summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--compiler/rustc/Cargo.toml25
-rw-r--r--compiler/rustc/Windows Manifest.xml28
-rw-r--r--compiler/rustc/build.rs27
-rw-r--r--compiler/rustc/src/main.rs63
-rw-r--r--compiler/rustc_apfloat/Cargo.toml8
-rw-r--r--compiler/rustc_apfloat/src/ieee.rs2758
-rw-r--r--compiler/rustc_apfloat/src/lib.rs693
-rw-r--r--compiler/rustc_apfloat/src/ppc.rs434
-rw-r--r--compiler/rustc_apfloat/tests/ieee.rs3301
-rw-r--r--compiler/rustc_apfloat/tests/ppc.rs530
-rw-r--r--compiler/rustc_arena/Cargo.toml7
-rw-r--r--compiler/rustc_arena/src/lib.rs645
-rw-r--r--compiler/rustc_arena/src/tests.rs248
-rw-r--r--compiler/rustc_ast/Cargo.toml18
-rw-r--r--compiler/rustc_ast/README.md8
-rw-r--r--compiler/rustc_ast/src/ast.rs3051
-rw-r--r--compiler/rustc_ast/src/ast_traits.rs442
-rw-r--r--compiler/rustc_ast/src/attr/mod.rs634
-rw-r--r--compiler/rustc_ast/src/entry.rs8
-rw-r--r--compiler/rustc_ast/src/expand/allocator.rs53
-rw-r--r--compiler/rustc_ast/src/expand/mod.rs3
-rw-r--r--compiler/rustc_ast/src/lib.rs63
-rw-r--r--compiler/rustc_ast/src/mut_visit.rs1601
-rw-r--r--compiler/rustc_ast/src/node_id.rs40
-rw-r--r--compiler/rustc_ast/src/ptr.rs212
-rw-r--r--compiler/rustc_ast/src/token.rs851
-rw-r--r--compiler/rustc_ast/src/tokenstream.rs681
-rw-r--r--compiler/rustc_ast/src/util/classify.rs52
-rw-r--r--compiler/rustc_ast/src/util/comments.rs255
-rw-r--r--compiler/rustc_ast/src/util/comments/tests.rs61
-rw-r--r--compiler/rustc_ast/src/util/literal.rs336
-rw-r--r--compiler/rustc_ast/src/util/parser.rs406
-rw-r--r--compiler/rustc_ast/src/util/unicode.rs35
-rw-r--r--compiler/rustc_ast/src/visit.rs959
-rw-r--r--compiler/rustc_ast_lowering/Cargo.toml23
-rw-r--r--compiler/rustc_ast_lowering/src/asm.rs485
-rw-r--r--compiler/rustc_ast_lowering/src/block.rs122
-rw-r--r--compiler/rustc_ast_lowering/src/expr.rs1914
-rw-r--r--compiler/rustc_ast_lowering/src/index.rs346
-rw-r--r--compiler/rustc_ast_lowering/src/item.rs1513
-rw-r--r--compiler/rustc_ast_lowering/src/lib.rs2501
-rw-r--r--compiler/rustc_ast_lowering/src/lifetime_collector.rs115
-rw-r--r--compiler/rustc_ast_lowering/src/pat.rs350
-rw-r--r--compiler/rustc_ast_lowering/src/path.rs406
-rw-r--r--compiler/rustc_ast_passes/Cargo.toml18
-rw-r--r--compiler/rustc_ast_passes/src/ast_validation.rs1909
-rw-r--r--compiler/rustc_ast_passes/src/feature_gate.rs901
-rw-r--r--compiler/rustc_ast_passes/src/lib.rs18
-rw-r--r--compiler/rustc_ast_passes/src/node_count.rs135
-rw-r--r--compiler/rustc_ast_passes/src/show_span.rs65
-rw-r--r--compiler/rustc_ast_pretty/Cargo.toml11
-rw-r--r--compiler/rustc_ast_pretty/src/helpers.rs48
-rw-r--r--compiler/rustc_ast_pretty/src/lib.rs8
-rw-r--r--compiler/rustc_ast_pretty/src/pp.rs451
-rw-r--r--compiler/rustc_ast_pretty/src/pp/convenience.rs94
-rw-r--r--compiler/rustc_ast_pretty/src/pp/ring.rs77
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/mod.rs86
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state.rs1770
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/delimited.rs41
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/expr.rs621
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/state/item.rs708
-rw-r--r--compiler/rustc_ast_pretty/src/pprust/tests.rs63
-rw-r--r--compiler/rustc_attr/Cargo.toml19
-rw-r--r--compiler/rustc_attr/src/builtin.rs1274
-rw-r--r--compiler/rustc_attr/src/lib.rs22
-rw-r--r--compiler/rustc_borrowck/Cargo.toml31
-rw-r--r--compiler/rustc_borrowck/src/borrow_set.rs345
-rw-r--r--compiler/rustc_borrowck/src/borrowck_errors.rs486
-rw-r--r--compiler/rustc_borrowck/src/constraint_generation.rs250
-rw-r--r--compiler/rustc_borrowck/src/constraints/graph.rs235
-rw-r--r--compiler/rustc_borrowck/src/constraints/mod.rs124
-rw-r--r--compiler/rustc_borrowck/src/consumers.rs39
-rw-r--r--compiler/rustc_borrowck/src/dataflow.rs438
-rw-r--r--compiler/rustc_borrowck/src/def_use.rs80
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs494
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs2773
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs744
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/find_all_local_uses.rs26
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/find_use.rs128
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/mod.rs1127
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/move_errors.rs529
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs1115
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs261
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/region_errors.rs904
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/region_name.rs896
-rw-r--r--compiler/rustc_borrowck/src/diagnostics/var_name.rs133
-rw-r--r--compiler/rustc_borrowck/src/facts.rs212
-rw-r--r--compiler/rustc_borrowck/src/invalidation.rs442
-rw-r--r--compiler/rustc_borrowck/src/lib.rs2380
-rw-r--r--compiler/rustc_borrowck/src/location.rs107
-rw-r--r--compiler/rustc_borrowck/src/member_constraints.rs230
-rw-r--r--compiler/rustc_borrowck/src/nll.rs462
-rw-r--r--compiler/rustc_borrowck/src/path_utils.rs171
-rw-r--r--compiler/rustc_borrowck/src/place_ext.rs81
-rw-r--r--compiler/rustc_borrowck/src/places_conflict.rs537
-rw-r--r--compiler/rustc_borrowck/src/prefixes.rs145
-rw-r--r--compiler/rustc_borrowck/src/region_infer/dump_mir.rs93
-rw-r--r--compiler/rustc_borrowck/src/region_infer/graphviz.rs140
-rw-r--r--compiler/rustc_borrowck/src/region_infer/mod.rs2365
-rw-r--r--compiler/rustc_borrowck/src/region_infer/opaque_types.rs662
-rw-r--r--compiler/rustc_borrowck/src/region_infer/reverse_sccs.rs68
-rw-r--r--compiler/rustc_borrowck/src/region_infer/values.rs488
-rw-r--r--compiler/rustc_borrowck/src/renumber.rs83
-rw-r--r--compiler/rustc_borrowck/src/session_diagnostics.rs44
-rw-r--r--compiler/rustc_borrowck/src/type_check/canonical.rs171
-rw-r--r--compiler/rustc_borrowck/src/type_check/constraint_conversion.rs204
-rw-r--r--compiler/rustc_borrowck/src/type_check/free_region_relations.rs374
-rw-r--r--compiler/rustc_borrowck/src/type_check/input_output.rs245
-rw-r--r--compiler/rustc_borrowck/src/type_check/liveness/local_use_map.rs170
-rw-r--r--compiler/rustc_borrowck/src/type_check/liveness/mod.rs139
-rw-r--r--compiler/rustc_borrowck/src/type_check/liveness/polonius.rs140
-rw-r--r--compiler/rustc_borrowck/src/type_check/liveness/trace.rs578
-rw-r--r--compiler/rustc_borrowck/src/type_check/mod.rs2721
-rw-r--r--compiler/rustc_borrowck/src/type_check/relate_tys.rs187
-rw-r--r--compiler/rustc_borrowck/src/universal_regions.rs841
-rw-r--r--compiler/rustc_borrowck/src/used_muts.rs110
-rw-r--r--compiler/rustc_builtin_macros/Cargo.toml26
-rw-r--r--compiler/rustc_builtin_macros/src/asm.rs875
-rw-r--r--compiler/rustc_builtin_macros/src/assert.rs178
-rw-r--r--compiler/rustc_builtin_macros/src/assert/context.rs453
-rw-r--r--compiler/rustc_builtin_macros/src/cfg.rs69
-rw-r--r--compiler/rustc_builtin_macros/src/cfg_accessible.rs61
-rw-r--r--compiler/rustc_builtin_macros/src/cfg_eval.rs269
-rw-r--r--compiler/rustc_builtin_macros/src/cmdline_attrs.rs35
-rw-r--r--compiler/rustc_builtin_macros/src/compile_error.rs19
-rw-r--r--compiler/rustc_builtin_macros/src/concat.rs64
-rw-r--r--compiler/rustc_builtin_macros/src/concat_bytes.rs189
-rw-r--r--compiler/rustc_builtin_macros/src/concat_idents.rs70
-rw-r--r--compiler/rustc_builtin_macros/src/derive.rs158
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/bounds.rs28
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/clone.rs212
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs90
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs79
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs110
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs88
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/debug.rs181
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/decodable.rs224
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/default.rs267
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/encodable.rs295
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/generic/mod.rs1655
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/generic/ty.rs203
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/hash.rs80
-rw-r--r--compiler/rustc_builtin_macros/src/deriving/mod.rs208
-rw-r--r--compiler/rustc_builtin_macros/src/edition_panic.rs86
-rw-r--r--compiler/rustc_builtin_macros/src/env.rs91
-rw-r--r--compiler/rustc_builtin_macros/src/format.rs1573
-rw-r--r--compiler/rustc_builtin_macros/src/format_foreign.rs829
-rw-r--r--compiler/rustc_builtin_macros/src/format_foreign/printf/tests.rs145
-rw-r--r--compiler/rustc_builtin_macros/src/format_foreign/shell/tests.rs56
-rw-r--r--compiler/rustc_builtin_macros/src/global_allocator.rs194
-rw-r--r--compiler/rustc_builtin_macros/src/lib.rs119
-rw-r--r--compiler/rustc_builtin_macros/src/log_syntax.rs14
-rw-r--r--compiler/rustc_builtin_macros/src/proc_macro_harness.rs393
-rw-r--r--compiler/rustc_builtin_macros/src/source_util.rs225
-rw-r--r--compiler/rustc_builtin_macros/src/standard_library_imports.rs92
-rw-r--r--compiler/rustc_builtin_macros/src/test.rs529
-rw-r--r--compiler/rustc_builtin_macros/src/test_harness.rs390
-rw-r--r--compiler/rustc_builtin_macros/src/trace_macros.rs29
-rw-r--r--compiler/rustc_builtin_macros/src/util.rs43
-rw-r--r--compiler/rustc_codegen_cranelift/.cirrus.yml25
-rw-r--r--compiler/rustc_codegen_cranelift/.github/workflows/main.yml184
-rw-r--r--compiler/rustc_codegen_cranelift/.github/workflows/nightly-cranelift.yml59
-rw-r--r--compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml82
-rw-r--r--compiler/rustc_codegen_cranelift/.vscode/settings.json73
-rw-r--r--compiler/rustc_codegen_cranelift/Cargo.lock385
-rw-r--r--compiler/rustc_codegen_cranelift/Cargo.toml45
-rw-r--r--compiler/rustc_codegen_cranelift/LICENSE-APACHE201
-rw-r--r--compiler/rustc_codegen_cranelift/LICENSE-MIT23
-rw-r--r--compiler/rustc_codegen_cranelift/Readme.md75
-rw-r--r--compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock333
-rw-r--r--compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml35
-rw-r--r--compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs1
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/build_backend.rs43
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs219
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/config.rs55
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/mod.rs128
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/prepare.rs174
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/rustc_info.rs65
-rw-r--r--compiler/rustc_codegen_cranelift/build_system/utils.rs35
-rwxr-xr-xcompiler/rustc_codegen_cranelift/clean_all.sh6
-rw-r--r--compiler/rustc_codegen_cranelift/config.txt17
-rw-r--r--compiler/rustc_codegen_cranelift/docs/dwarf.md153
-rw-r--r--compiler/rustc_codegen_cranelift/docs/usage.md67
-rw-r--r--compiler/rustc_codegen_cranelift/example/alloc_example.rs38
-rw-r--r--compiler/rustc_codegen_cranelift/example/alloc_system.rs130
-rw-r--r--compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs68
-rw-r--r--compiler/rustc_codegen_cranelift/example/dst-field-align.rs67
-rw-r--r--compiler/rustc_codegen_cranelift/example/example.rs208
-rw-r--r--compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs53
-rw-r--r--compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs59
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core.rs657
-rw-r--r--compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs515
-rw-r--r--compiler/rustc_codegen_cranelift/example/mod_bench.rs36
-rw-r--r--compiler/rustc_codegen_cranelift/example/std_example.rs356
-rw-r--r--compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs97
-rw-r--r--compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs40
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch107
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0002-rand-Disable-failing-test.patch24
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch34
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch50
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch105
-rw-r--r--compiler/rustc_codegen_cranelift/patches/0028-sysroot-Disable-long-running-tests.patch48
-rw-r--r--compiler/rustc_codegen_cranelift/rust-toolchain3
-rw-r--r--compiler/rustc_codegen_cranelift/rustfmt.toml4
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/Readme.md2
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs70
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/filter_profile.rs125
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/rustc-clif.rs36
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/rustup.sh58
-rw-r--r--compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh68
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh15
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh121
-rwxr-xr-xcompiler/rustc_codegen_cranelift/scripts/tests.sh203
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/comments.rs135
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs611
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs299
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/returning.rs141
-rw-r--r--compiler/rustc_codegen_cranelift/src/allocator.rs147
-rw-r--r--compiler/rustc_codegen_cranelift/src/analyze.rs48
-rw-r--r--compiler/rustc_codegen_cranelift/src/archive.rs236
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs955
-rw-r--r--compiler/rustc_codegen_cranelift/src/cast.rs164
-rw-r--r--compiler/rustc_codegen_cranelift/src/codegen_i128.rs153
-rw-r--r--compiler/rustc_codegen_cranelift/src/common.rs475
-rw-r--r--compiler/rustc_codegen_cranelift/src/compiler_builtins.rs43
-rw-r--r--compiler/rustc_codegen_cranelift/src/config.rs107
-rw-r--r--compiler/rustc_codegen_cranelift/src/constant.rs580
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs190
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs218
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs357
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/object.rs83
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs136
-rw-r--r--compiler/rustc_codegen_cranelift/src/discriminant.rs176
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/aot.rs436
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs385
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/mod.rs53
-rw-r--r--compiler/rustc_codegen_cranelift/src/inline_asm.rs677
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs74
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs192
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs1292
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs659
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs316
-rw-r--r--compiler/rustc_codegen_cranelift/src/linkage.rs36
-rw-r--r--compiler/rustc_codegen_cranelift/src/main_shim.rs161
-rw-r--r--compiler/rustc_codegen_cranelift/src/num.rs440
-rw-r--r--compiler/rustc_codegen_cranelift/src/optimize/mod.rs20
-rw-r--r--compiler/rustc_codegen_cranelift/src/optimize/peephole.rs67
-rw-r--r--compiler/rustc_codegen_cranelift/src/pointer.rs134
-rw-r--r--compiler/rustc_codegen_cranelift/src/pretty_clif.rs278
-rw-r--r--compiler/rustc_codegen_cranelift/src/toolchain.rs31
-rw-r--r--compiler/rustc_codegen_cranelift/src/trap.rs57
-rw-r--r--compiler/rustc_codegen_cranelift/src/unsize.rs227
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs883
-rw-r--r--compiler/rustc_codegen_cranelift/src/vtable.rs79
-rwxr-xr-xcompiler/rustc_codegen_cranelift/test.sh13
-rwxr-xr-xcompiler/rustc_codegen_cranelift/y.rs31
-rw-r--r--compiler/rustc_codegen_gcc/.github/workflows/ci.yml131
-rw-r--r--compiler/rustc_codegen_gcc/.rustfmt.toml1
-rw-r--r--compiler/rustc_codegen_gcc/Cargo.lock330
-rw-r--r--compiler/rustc_codegen_gcc/Cargo.toml54
-rw-r--r--compiler/rustc_codegen_gcc/LICENSE-APACHE176
-rw-r--r--compiler/rustc_codegen_gcc/LICENSE-MIT23
-rw-r--r--compiler/rustc_codegen_gcc/Readme.md147
-rwxr-xr-xcompiler/rustc_codegen_gcc/build.sh67
-rw-r--r--compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml19
-rwxr-xr-xcompiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh30
-rwxr-xr-xcompiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh39
-rw-r--r--compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs1
-rwxr-xr-xcompiler/rustc_codegen_gcc/cargo.sh23
-rwxr-xr-xcompiler/rustc_codegen_gcc/clean_all.sh6
-rw-r--r--compiler/rustc_codegen_gcc/config.sh52
-rw-r--r--compiler/rustc_codegen_gcc/crate_patches/0002-rand-Disable-failing-test.patch32
-rw-r--r--compiler/rustc_codegen_gcc/example/alloc_example.rs41
-rw-r--r--compiler/rustc_codegen_gcc/example/alloc_system.rs212
-rw-r--r--compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs69
-rw-r--r--compiler/rustc_codegen_gcc/example/dst-field-align.rs67
-rw-r--r--compiler/rustc_codegen_gcc/example/example.rs208
-rw-r--r--compiler/rustc_codegen_gcc/example/mini_core.rs599
-rw-r--r--compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs431
-rw-r--r--compiler/rustc_codegen_gcc/example/mod_bench.rs37
-rw-r--r--compiler/rustc_codegen_gcc/example/std_example.rs286
-rw-r--r--compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs97
-rw-r--r--compiler/rustc_codegen_gcc/example/track-caller-attribute.rs40
-rw-r--r--compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch63
-rw-r--r--compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch49
-rw-r--r--compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch29
-rw-r--r--compiler/rustc_codegen_gcc/patches/0028-core-Disable-long-running-tests.patch32
-rwxr-xr-xcompiler/rustc_codegen_gcc/prepare.sh30
-rwxr-xr-xcompiler/rustc_codegen_gcc/prepare_build.sh5
-rw-r--r--compiler/rustc_codegen_gcc/rust-toolchain3
-rw-r--r--compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch14
-rwxr-xr-xcompiler/rustc_codegen_gcc/rustup.sh29
-rw-r--r--compiler/rustc_codegen_gcc/src/abi.rs179
-rw-r--r--compiler/rustc_codegen_gcc/src/allocator.rs123
-rw-r--r--compiler/rustc_codegen_gcc/src/archive.rs189
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs817
-rw-r--r--compiler/rustc_codegen_gcc/src/back/mod.rs1
-rw-r--r--compiler/rustc_codegen_gcc/src/back/write.rs83
-rw-r--r--compiler/rustc_codegen_gcc/src/base.rs154
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs1561
-rw-r--r--compiler/rustc_codegen_gcc/src/callee.rs77
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs479
-rw-r--r--compiler/rustc_codegen_gcc/src/consts.rs405
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs553
-rw-r--r--compiler/rustc_codegen_gcc/src/coverageinfo.rs69
-rw-r--r--compiler/rustc_codegen_gcc/src/debuginfo.rs62
-rw-r--r--compiler/rustc_codegen_gcc/src/declare.rs145
-rw-r--r--compiler/rustc_codegen_gcc/src/int.rs742
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/archs.rs5722
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs250
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs1134
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/simd.rs751
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs331
-rw-r--r--compiler/rustc_codegen_gcc/src/mono_item.rs38
-rw-r--r--compiler/rustc_codegen_gcc/src/type_.rs303
-rw-r--r--compiler/rustc_codegen_gcc/src/type_of.rs385
-rwxr-xr-xcompiler/rustc_codegen_gcc/test.sh291
-rw-r--r--compiler/rustc_codegen_gcc/tests/lang_tests_common.rs68
-rw-r--r--compiler/rustc_codegen_gcc/tests/lang_tests_debug.rs5
-rw-r--r--compiler/rustc_codegen_gcc/tests/lang_tests_release.rs5
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/abort1.rs51
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/abort2.rs53
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/array.rs229
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/asm.rs172
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/assign.rs153
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/closure.rs230
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/condition.rs320
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/empty_main.rs39
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/exit.rs49
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/exit_code.rs39
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs223
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/int.rs340
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/int_overflow.rs140
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/mut_ref.rs165
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/operations.rs221
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs222
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/return-tuple.rs72
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/slice.rs128
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/static.rs112
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/structs.rs70
-rw-r--r--compiler/rustc_codegen_gcc/tests/run/tuple.rs51
-rw-r--r--compiler/rustc_codegen_gcc/tools/generate_intrinsics.py238
-rw-r--r--compiler/rustc_codegen_llvm/Cargo.toml36
-rw-r--r--compiler/rustc_codegen_llvm/README.md7
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs599
-rw-r--r--compiler/rustc_codegen_llvm/src/allocator.rs157
-rw-r--r--compiler/rustc_codegen_llvm/src/asm.rs1037
-rw-r--r--compiler/rustc_codegen_llvm/src/attributes.rs449
-rw-r--r--compiler/rustc_codegen_llvm/src/back/archive.rs383
-rw-r--r--compiler/rustc_codegen_llvm/src/back/lto.rs936
-rw-r--r--compiler/rustc_codegen_llvm/src/back/profiling.rs58
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs1212
-rw-r--r--compiler/rustc_codegen_llvm/src/base.rs173
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs1508
-rw-r--r--compiler/rustc_codegen_llvm/src/callee.rs194
-rw-r--r--compiler/rustc_codegen_llvm/src/common.rs359
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs577
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs1014
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs334
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs385
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs126
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/doc.md131
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs120
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs1618
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs514
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs437
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs441
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs267
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs614
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs48
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/utils.rs99
-rw-r--r--compiler/rustc_codegen_llvm/src/declare.rs146
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs1924
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs442
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs105
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs213
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs2547
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/mod.rs318
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs562
-rw-r--r--compiler/rustc_codegen_llvm/src/mono_item.rs150
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs319
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs418
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs214
-rw-r--r--compiler/rustc_codegen_llvm/src/value.rs32
-rw-r--r--compiler/rustc_codegen_ssa/Cargo.toml48
-rw-r--r--compiler/rustc_codegen_ssa/README.md3
-rw-r--r--compiler/rustc_codegen_ssa/src/back/archive.rs69
-rw-r--r--compiler/rustc_codegen_ssa/src/back/command.rs178
-rw-r--r--compiler/rustc_codegen_ssa/src/back/link.rs2800
-rw-r--r--compiler/rustc_codegen_ssa/src/back/linker.rs1788
-rw-r--r--compiler/rustc_codegen_ssa/src/back/lto.rs104
-rw-r--r--compiler/rustc_codegen_ssa/src/back/metadata.rs314
-rw-r--r--compiler/rustc_codegen_ssa/src/back/mod.rs9
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath.rs114
-rw-r--r--compiler/rustc_codegen_ssa/src/back/rpath/tests.rs72
-rw-r--r--compiler/rustc_codegen_ssa/src/back/symbol_export.rs590
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs2015
-rw-r--r--compiler/rustc_codegen_ssa/src/base.rs961
-rw-r--r--compiler/rustc_codegen_ssa/src/common.rs223
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs85
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/map.rs347
-rw-r--r--compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/mod.rs34
-rw-r--r--compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs821
-rw-r--r--compiler/rustc_codegen_ssa/src/glue.rs123
-rw-r--r--compiler/rustc_codegen_ssa/src/lib.rs244
-rw-r--r--compiler/rustc_codegen_ssa/src/meth.rs116
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/analyze.rs368
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs1654
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/constant.rs90
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs55
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/debuginfo.rs418
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs636
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/mod.rs410
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs461
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/place.rs549
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs729
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/statement.rs102
-rw-r--r--compiler/rustc_codegen_ssa/src/mono_item.rs147
-rw-r--r--compiler/rustc_codegen_ssa/src/target_features.rs308
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/abi.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/asm.rs66
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs161
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs481
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/consts.rs41
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs57
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/debuginfo.rs79
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/declare.rs21
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/intrinsic.rs39
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/misc.rs26
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/mod.rs102
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/statics.rs24
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/type_.rs151
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/write.rs68
-rw-r--r--compiler/rustc_const_eval/Cargo.toml27
-rw-r--r--compiler/rustc_const_eval/src/const_eval/error.rs252
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs395
-rw-r--r--compiler/rustc_const_eval/src/const_eval/fn_queries.rs82
-rw-r--r--compiler/rustc_const_eval/src/const_eval/machine.rs527
-rw-r--r--compiler/rustc_const_eval/src/const_eval/mod.rs163
-rw-r--r--compiler/rustc_const_eval/src/const_eval/valtrees.rs475
-rw-r--r--compiler/rustc_const_eval/src/errors.rs89
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs365
-rw-r--r--compiler/rustc_const_eval/src/interpret/eval_context.rs1019
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs486
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs696
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs130
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs196
-rw-r--r--compiler/rustc_const_eval/src/interpret/machine.rs525
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs1224
-rw-r--r--compiler/rustc_const_eval/src/interpret/mod.rs34
-rw-r--r--compiler/rustc_const_eval/src/interpret/operand.rs831
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs463
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs900
-rw-r--r--compiler/rustc_const_eval/src/interpret/projection.rs391
-rw-r--r--compiler/rustc_const_eval/src/interpret/step.rs320
-rw-r--r--compiler/rustc_const_eval/src/interpret/terminator.rs671
-rw-r--r--compiler/rustc_const_eval/src/interpret/traits.rs59
-rw-r--r--compiler/rustc_const_eval/src/interpret/util.rs73
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs986
-rw-r--r--compiler/rustc_const_eval/src/interpret/visitor.rs534
-rw-r--r--compiler/rustc_const_eval/src/lib.rs68
-rw-r--r--compiler/rustc_const_eval/src/might_permit_raw_init.rs40
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/check.rs1032
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/mod.rs132
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/ops.rs771
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs123
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs384
-rw-r--r--compiler/rustc_const_eval/src/transform/check_consts/resolver.rs384
-rw-r--r--compiler/rustc_const_eval/src/transform/mod.rs3
-rw-r--r--compiler/rustc_const_eval/src/transform/promote_consts.rs1066
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs913
-rw-r--r--compiler/rustc_const_eval/src/util/aggregate.rs77
-rw-r--r--compiler/rustc_const_eval/src/util/alignment.rs63
-rw-r--r--compiler/rustc_const_eval/src/util/call_kind.rs146
-rw-r--r--compiler/rustc_const_eval/src/util/collect_writes.rs36
-rw-r--r--compiler/rustc_const_eval/src/util/find_self_call.rs36
-rw-r--r--compiler/rustc_const_eval/src/util/mod.rs10
-rw-r--r--compiler/rustc_data_structures/Cargo.toml41
-rw-r--r--compiler/rustc_data_structures/src/atomic_ref.rs26
-rw-r--r--compiler/rustc_data_structures/src/base_n.rs42
-rw-r--r--compiler/rustc_data_structures/src/base_n/tests.rs24
-rw-r--r--compiler/rustc_data_structures/src/binary_search_util/mod.rs68
-rw-r--r--compiler/rustc_data_structures/src/binary_search_util/tests.rs23
-rw-r--r--compiler/rustc_data_structures/src/captures.rs8
-rw-r--r--compiler/rustc_data_structures/src/fingerprint.rs215
-rw-r--r--compiler/rustc_data_structures/src/fingerprint/tests.rs14
-rw-r--r--compiler/rustc_data_structures/src/flock.rs26
-rw-r--r--compiler/rustc_data_structures/src/flock/linux.rs40
-rw-r--r--compiler/rustc_data_structures/src/flock/unix.rs51
-rw-r--r--compiler/rustc_data_structures/src/flock/unsupported.rs16
-rw-r--r--compiler/rustc_data_structures/src/flock/windows.rs77
-rw-r--r--compiler/rustc_data_structures/src/frozen.rs64
-rw-r--r--compiler/rustc_data_structures/src/functor.rs99
-rw-r--r--compiler/rustc_data_structures/src/fx.rs14
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/mod.rs324
-rw-r--r--compiler/rustc_data_structures/src/graph/dominators/tests.rs45
-rw-r--r--compiler/rustc_data_structures/src/graph/implementation/mod.rs366
-rw-r--r--compiler/rustc_data_structures/src/graph/implementation/tests.rs131
-rw-r--r--compiler/rustc_data_structures/src/graph/iterate/mod.rs353
-rw-r--r--compiler/rustc_data_structures/src/graph/iterate/tests.rs38
-rw-r--r--compiler/rustc_data_structures/src/graph/mod.rs81
-rw-r--r--compiler/rustc_data_structures/src/graph/reference.rs39
-rw-r--r--compiler/rustc_data_structures/src/graph/scc/mod.rs567
-rw-r--r--compiler/rustc_data_structures/src/graph/scc/tests.rs216
-rw-r--r--compiler/rustc_data_structures/src/graph/tests.rs73
-rw-r--r--compiler/rustc_data_structures/src/graph/vec_graph/mod.rs109
-rw-r--r--compiler/rustc_data_structures/src/graph/vec_graph/tests.rs42
-rw-r--r--compiler/rustc_data_structures/src/intern.rs196
-rw-r--r--compiler/rustc_data_structures/src/intern/tests.rs59
-rw-r--r--compiler/rustc_data_structures/src/jobserver.rs40
-rw-r--r--compiler/rustc_data_structures/src/lib.rs113
-rw-r--r--compiler/rustc_data_structures/src/macros.rs37
-rw-r--r--compiler/rustc_data_structures/src/map_in_place.rs108
-rw-r--r--compiler/rustc_data_structures/src/memmap.rs108
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/graphviz.rs90
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/mod.rs698
-rw-r--r--compiler/rustc_data_structures/src/obligation_forest/tests.rs479
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/LICENSE21
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/mod.rs1214
-rw-r--r--compiler/rustc_data_structures/src/owning_ref/tests.rs711
-rw-r--r--compiler/rustc_data_structures/src/profiling.rs861
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs150
-rw-r--r--compiler/rustc_data_structures/src/sip128.rs496
-rw-r--r--compiler/rustc_data_structures/src/sip128/tests.rs497
-rw-r--r--compiler/rustc_data_structures/src/small_c_str.rs81
-rw-r--r--compiler/rustc_data_structures/src/small_c_str/tests.rs45
-rw-r--r--compiler/rustc_data_structures/src/small_str.rs68
-rw-r--r--compiler/rustc_data_structures/src/small_str/tests.rs20
-rw-r--r--compiler/rustc_data_structures/src/snapshot_map/mod.rs143
-rw-r--r--compiler/rustc_data_structures/src/snapshot_map/tests.rs43
-rw-r--r--compiler/rustc_data_structures/src/sorted_map.rs302
-rw-r--r--compiler/rustc_data_structures/src/sorted_map/index_map.rs154
-rw-r--r--compiler/rustc_data_structures/src/sorted_map/tests.rs222
-rw-r--r--compiler/rustc_data_structures/src/sso/either_iter.rs75
-rw-r--r--compiler/rustc_data_structures/src/sso/map.rs557
-rw-r--r--compiler/rustc_data_structures/src/sso/mod.rs6
-rw-r--r--compiler/rustc_data_structures/src/sso/set.rs238
-rw-r--r--compiler/rustc_data_structures/src/stable_hasher.rs650
-rw-r--r--compiler/rustc_data_structures/src/stable_hasher/tests.rs163
-rw-r--r--compiler/rustc_data_structures/src/stack.rs18
-rw-r--r--compiler/rustc_data_structures/src/steal.rs55
-rw-r--r--compiler/rustc_data_structures/src/svh.rs69
-rw-r--r--compiler/rustc_data_structures/src/sync.rs630
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr.rs168
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/copy.rs185
-rw-r--r--compiler/rustc_data_structures/src/tagged_ptr/drop.rs133
-rw-r--r--compiler/rustc_data_structures/src/temp_dir.rs34
-rw-r--r--compiler/rustc_data_structures/src/thin_vec.rs135
-rw-r--r--compiler/rustc_data_structures/src/thin_vec/tests.rs42
-rw-r--r--compiler/rustc_data_structures/src/tiny_list.rs81
-rw-r--r--compiler/rustc_data_structures/src/tiny_list/tests.rs155
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation.rs392
-rw-r--r--compiler/rustc_data_structures/src/transitive_relation/tests.rs362
-rw-r--r--compiler/rustc_data_structures/src/unhash.rs29
-rw-r--r--compiler/rustc_data_structures/src/vec_linked_list.rs70
-rw-r--r--compiler/rustc_data_structures/src/vec_map.rs194
-rw-r--r--compiler/rustc_data_structures/src/vec_map/tests.rs48
-rw-r--r--compiler/rustc_data_structures/src/work_queue.rs44
-rw-r--r--compiler/rustc_driver/Cargo.toml44
-rw-r--r--compiler/rustc_driver/README.md10
-rw-r--r--compiler/rustc_driver/src/args.rs51
-rw-r--r--compiler/rustc_driver/src/lib.rs1340
-rw-r--r--compiler/rustc_driver/src/pretty.rs518
-rw-r--r--compiler/rustc_error_codes/Cargo.toml4
-rw-r--r--compiler/rustc_error_codes/src/error_codes.rs649
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0001.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0002.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0004.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0005.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0007.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0009.md52
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0010.md11
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0013.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0014.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0015.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0023.md57
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0025.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0026.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0027.md39
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0029.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0030.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0033.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0034.md86
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0038.md324
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0040.md39
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0044.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0045.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0046.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0049.md36
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0050.md36
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0053.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0054.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0055.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0057.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0059.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0060.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0061.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0062.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0063.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0067.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0069.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0070.md49
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0071.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0072.md37
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0073.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0074.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0075.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0076.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0077.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0080.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0081.md37
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0084.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0087.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0088.md45
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0089.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0090.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0091.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0092.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0093.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0094.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0106.md53
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0107.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0109.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0110.md4
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0116.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0117.md50
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0118.md43
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0119.md59
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0120.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0121.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0124.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0128.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0130.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0131.md11
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0132.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0133.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0136.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0137.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0138.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0139.md84
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0152.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0154.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0158.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0161.md45
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0162.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0164.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0165.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0170.md67
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0178.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0183.md39
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0184.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0185.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0186.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0191.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0192.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0193.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0195.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0197.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0198.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0199.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0200.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0201.md45
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0203.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0204.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0205.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0206.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0207.md135
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0210.md82
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0211.md79
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0212.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0214.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0220.md42
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0221.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0222.md52
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0223.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0224.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0225.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0226.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0227.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0228.md40
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0229.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0230.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0231.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0232.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0243.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0244.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0251.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0252.md54
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0253.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0254.md36
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0255.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0256.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0259.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0260.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0261.md52
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0262.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0263.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0264.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0267.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0268.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0271.md65
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0275.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0276.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0277.md87
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0281.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0282.md69
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0283.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0284.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0297.md40
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0301.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0302.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0303.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0307.md92
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0308.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0309.md54
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0310.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0312.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0316.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0317.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0321.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0322.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0323.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0324.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0325.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0326.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0328.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0329.md40
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0364.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0365.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0366.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0367.md36
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0368.md49
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0369.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0370.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0371.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0373.md73
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0374.md57
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0375.md55
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0376.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0378.md59
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0379.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0380.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0381.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0382.md109
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0383.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0384.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0386.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0387.md57
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0388.md1
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0389.md63
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0390.md51
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0391.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0392.md58
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0393.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0398.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0399.md37
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0401.md106
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0403.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0404.md57
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0405.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0407.md52
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0408.md41
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0409.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0411.md56
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0412.md64
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0415.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0416.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0422.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0423.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0424.md40
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0425.md60
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0426.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0428.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0429.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0430.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0431.md11
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0432.md47
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0433.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0434.md40
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0435.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0436.md48
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0437.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0438.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0439.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0445.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0446.md50
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0447.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0448.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0449.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0451.md48
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0452.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0453.md42
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0454.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0455.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0458.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0459.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0463.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0464.md6
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0466.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0468.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0469.md39
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0477.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0478.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0482.md75
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0491.md36
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0492.md71
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0493.md42
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0495.md40
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0496.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0497.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0498.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0499.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0500.md41
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0501.md79
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0502.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0503.md54
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0504.md103
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0505.md87
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0506.md73
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0507.md133
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0508.md54
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0509.md92
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0510.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0511.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0512.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0515.md37
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0516.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0517.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0518.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0520.md61
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0521.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0522.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0524.md60
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0525.md42
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0527.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0528.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0529.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0530.md57
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0531.md42
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0532.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0533.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0534.md37
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0535.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0536.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0537.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0538.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0539.md48
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0541.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0542.md47
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0543.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0544.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0545.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0546.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0547.md37
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0549.md37
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0550.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0551.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0552.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0554.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0556.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0557.md9
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0559.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0560.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0561.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0562.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0565.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0566.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0567.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0568.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0569.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0570.md7
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0571.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0572.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0573.md71
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0574.md47
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0575.md50
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0576.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0577.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0578.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0579.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0580.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0581.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0582.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0583.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0584.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0585.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0586.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0587.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0588.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0589.md11
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0590.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0591.md81
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0592.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0593.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0594.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0595.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0596.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0597.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0599.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0600.md40
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0601.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0602.md10
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0603.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0604.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0605.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0606.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0607.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0608.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0609.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0610.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0614.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0615.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0616.md57
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0617.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0618.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0619.md39
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0620.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0621.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0622.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0623.md72
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0624.md53
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0625.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0626.md90
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0627.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0628.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0631.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0632.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0633.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0634.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0635.md7
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0636.md9
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0637.md51
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0638.md47
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0639.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0641.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0642.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0643.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0644.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0646.md9
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0647.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0648.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0657.md57
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0658.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0659.md50
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0660.md9
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0661.md10
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0662.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0663.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0664.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0665.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0666.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0667.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0668.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0669.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0670.md9
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0671.md10
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0687.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0688.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0689.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0690.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0691.md48
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0692.md49
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0693.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0695.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0696.md49
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0697.md15
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0698.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0699.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0700.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0701.md9
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0703.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0704.md27
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0705.md11
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0706.md59
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0708.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0710.md34
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0712.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0713.md49
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0714.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0715.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0716.md79
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0718.md10
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0719.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0720.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0722.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0724.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0725.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0726.md46
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0727.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0728.md75
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0729.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0730.md39
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0731.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0732.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0733.md44
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0734.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0735.md11
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0736.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0737.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0739.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0740.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0741.md25
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0742.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0743.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0744.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0745.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0746.md139
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0747.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0748.md16
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0749.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0750.md18
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0751.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0752.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0753.md31
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0754.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0755.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0756.md29
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0757.md33
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0758.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0759.md63
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0760.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0761.md21
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0762.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0763.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0764.md41
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0765.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0766.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0767.md20
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0768.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0769.md47
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0770.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0771.md23
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0772.md91
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0773.md38
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0774.md24
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0775.md17
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0776.md13
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0777.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0778.md35
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0779.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0780.md19
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0781.md12
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0782.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0783.md22
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0784.md32
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0785.md30
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0786.md14
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0787.md28
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0788.md26
-rw-r--r--compiler/rustc_error_codes/src/error_codes/E0790.md47
-rw-r--r--compiler/rustc_error_codes/src/lib.rs15
-rw-r--r--compiler/rustc_error_messages/Cargo.toml18
-rw-r--r--compiler/rustc_error_messages/locales/en-US/borrowck.ftl18
-rw-r--r--compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl5
-rw-r--r--compiler/rustc_error_messages/locales/en-US/const_eval.ftl31
-rw-r--r--compiler/rustc_error_messages/locales/en-US/expand.ftl5
-rw-r--r--compiler/rustc_error_messages/locales/en-US/lint.ftl395
-rw-r--r--compiler/rustc_error_messages/locales/en-US/parser.ftl34
-rw-r--r--compiler/rustc_error_messages/locales/en-US/passes.ftl264
-rw-r--r--compiler/rustc_error_messages/locales/en-US/privacy.ftl21
-rw-r--r--compiler/rustc_error_messages/locales/en-US/typeck.ftl125
-rw-r--r--compiler/rustc_error_messages/src/lib.rs489
-rw-r--r--compiler/rustc_errors/Cargo.toml27
-rw-r--r--compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs215
-rw-r--r--compiler/rustc_errors/src/diagnostic.rs1016
-rw-r--r--compiler/rustc_errors/src/diagnostic_builder.rs616
-rw-r--r--compiler/rustc_errors/src/emitter.rs2687
-rw-r--r--compiler/rustc_errors/src/json.rs561
-rw-r--r--compiler/rustc_errors/src/json/tests.rs204
-rw-r--r--compiler/rustc_errors/src/lib.rs1589
-rw-r--r--compiler/rustc_errors/src/lock.rs93
-rw-r--r--compiler/rustc_errors/src/registry.rs25
-rw-r--r--compiler/rustc_errors/src/snippet.rs179
-rw-r--r--compiler/rustc_errors/src/styled_buffer.rs151
-rw-r--r--compiler/rustc_expand/Cargo.toml27
-rw-r--r--compiler/rustc_expand/src/base.rs1444
-rw-r--r--compiler/rustc_expand/src/build.rs616
-rw-r--r--compiler/rustc_expand/src/config.rs535
-rw-r--r--compiler/rustc_expand/src/expand.rs1888
-rw-r--r--compiler/rustc_expand/src/lib.rs53
-rw-r--r--compiler/rustc_expand/src/mbe.rs110
-rw-r--r--compiler/rustc_expand/src/mbe/macro_check.rs652
-rw-r--r--compiler/rustc_expand/src/mbe/macro_parser.rs704
-rw-r--r--compiler/rustc_expand/src/mbe/macro_rules.rs1420
-rw-r--r--compiler/rustc_expand/src/mbe/metavar_expr.rs161
-rw-r--r--compiler/rustc_expand/src/mbe/quoted.rs366
-rw-r--r--compiler/rustc_expand/src/mbe/transcribe.rs580
-rw-r--r--compiler/rustc_expand/src/module.rs298
-rw-r--r--compiler/rustc_expand/src/mut_visit/tests.rs72
-rw-r--r--compiler/rustc_expand/src/parse/tests.rs358
-rw-r--r--compiler/rustc_expand/src/placeholders.rs373
-rw-r--r--compiler/rustc_expand/src/proc_macro.rs181
-rw-r--r--compiler/rustc_expand/src/proc_macro_server.rs766
-rw-r--r--compiler/rustc_expand/src/tests.rs1016
-rw-r--r--compiler/rustc_expand/src/tokenstream/tests.rs110
-rw-r--r--compiler/rustc_feature/Cargo.toml11
-rw-r--r--compiler/rustc_feature/src/accepted.rs344
-rw-r--r--compiler/rustc_feature/src/active.rs552
-rw-r--r--compiler/rustc_feature/src/builtin_attrs.rs833
-rw-r--r--compiler/rustc_feature/src/lib.rs155
-rw-r--r--compiler/rustc_feature/src/removed.rs203
-rw-r--r--compiler/rustc_feature/src/tests.rs23
-rw-r--r--compiler/rustc_fs_util/Cargo.toml4
-rw-r--r--compiler/rustc_fs_util/src/lib.rs90
-rw-r--r--compiler/rustc_graphviz/Cargo.toml4
-rw-r--r--compiler/rustc_graphviz/src/lib.rs693
-rw-r--r--compiler/rustc_graphviz/src/tests.rs408
-rw-r--r--compiler/rustc_hir/Cargo.toml21
-rw-r--r--compiler/rustc_hir/src/arena.rs55
-rw-r--r--compiler/rustc_hir/src/def.rs749
-rw-r--r--compiler/rustc_hir/src/def_path_hash_map.rs37
-rw-r--r--compiler/rustc_hir/src/definitions.rs440
-rw-r--r--compiler/rustc_hir/src/diagnostic_items.rs17
-rw-r--r--compiler/rustc_hir/src/hir.rs3506
-rw-r--r--compiler/rustc_hir/src/hir_id.rs89
-rw-r--r--compiler/rustc_hir/src/intravisit.rs1232
-rw-r--r--compiler/rustc_hir/src/lang_items.rs339
-rw-r--r--compiler/rustc_hir/src/lib.rs47
-rw-r--r--compiler/rustc_hir/src/pat_util.rs157
-rw-r--r--compiler/rustc_hir/src/stable_hash_impls.rs143
-rw-r--r--compiler/rustc_hir/src/target.rs188
-rw-r--r--compiler/rustc_hir/src/tests.rs36
-rw-r--r--compiler/rustc_hir/src/weak_lang_items.rs47
-rw-r--r--compiler/rustc_hir_pretty/Cargo.toml14
-rw-r--r--compiler/rustc_hir_pretty/src/lib.rs2413
-rw-r--r--compiler/rustc_incremental/Cargo.toml22
-rw-r--r--compiler/rustc_incremental/src/assert_dep_graph.rs446
-rw-r--r--compiler/rustc_incremental/src/assert_module_sources.rs178
-rw-r--r--compiler/rustc_incremental/src/lib.rs30
-rw-r--r--compiler/rustc_incremental/src/persist/README.md3
-rw-r--r--compiler/rustc_incremental/src/persist/data.rs13
-rw-r--r--compiler/rustc_incremental/src/persist/dirty_clean.rs480
-rw-r--r--compiler/rustc_incremental/src/persist/file_format.rs195
-rw-r--r--compiler/rustc_incremental/src/persist/fs.rs984
-rw-r--r--compiler/rustc_incremental/src/persist/fs/tests.rs84
-rw-r--r--compiler/rustc_incremental/src/persist/load.rs235
-rw-r--r--compiler/rustc_incremental/src/persist/mod.rs25
-rw-r--r--compiler/rustc_incremental/src/persist/save.rs188
-rw-r--r--compiler/rustc_incremental/src/persist/work_product.rs59
-rw-r--r--compiler/rustc_index/Cargo.toml13
-rw-r--r--compiler/rustc_index/src/bit_set.rs2098
-rw-r--r--compiler/rustc_index/src/bit_set/tests.rs873
-rw-r--r--compiler/rustc_index/src/interval.rs305
-rw-r--r--compiler/rustc_index/src/interval/tests.rs199
-rw-r--r--compiler/rustc_index/src/lib.rs23
-rw-r--r--compiler/rustc_index/src/vec.rs409
-rw-r--r--compiler/rustc_index/src/vec/tests.rs55
-rw-r--r--compiler/rustc_infer/Cargo.toml20
-rw-r--r--compiler/rustc_infer/src/infer/at.rs429
-rw-r--r--compiler/rustc_infer/src/infer/canonical/canonicalizer.rs791
-rw-r--r--compiler/rustc_infer/src/infer/canonical/mod.rs159
-rw-r--r--compiler/rustc_infer/src/infer/canonical/query_response.rs741
-rw-r--r--compiler/rustc_infer/src/infer/canonical/substitute.rs91
-rw-r--r--compiler/rustc_infer/src/infer/combine.rs1001
-rw-r--r--compiler/rustc_infer/src/infer/equate.rs169
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/mod.rs3121
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs1134
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs234
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs234
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs102
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs77
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs116
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs501
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs577
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs176
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs167
-rw-r--r--compiler/rustc_infer/src/infer/error_reporting/note.rs414
-rw-r--r--compiler/rustc_infer/src/infer/free_regions.rs119
-rw-r--r--compiler/rustc_infer/src/infer/freshen.rs255
-rw-r--r--compiler/rustc_infer/src/infer/fudge.rs246
-rw-r--r--compiler/rustc_infer/src/infer/glb.rs144
-rw-r--r--compiler/rustc_infer/src/infer/higher_ranked/README.md8
-rw-r--r--compiler/rustc_infer/src/infer/higher_ranked/mod.rs137
-rw-r--r--compiler/rustc_infer/src/infer/lattice.rs124
-rw-r--r--compiler/rustc_infer/src/infer/lexical_region_resolve/README.md6
-rw-r--r--compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs891
-rw-r--r--compiler/rustc_infer/src/infer/lub.rs144
-rw-r--r--compiler/rustc_infer/src/infer/mod.rs2066
-rw-r--r--compiler/rustc_infer/src/infer/nll_relate/mod.rs1080
-rw-r--r--compiler/rustc_infer/src/infer/opaque_types.rs649
-rw-r--r--compiler/rustc_infer/src/infer/opaque_types/table.rs80
-rw-r--r--compiler/rustc_infer/src/infer/outlives/components.rs219
-rw-r--r--compiler/rustc_infer/src/infer/outlives/env.rs131
-rw-r--r--compiler/rustc_infer/src/infer/outlives/mod.rs37
-rw-r--r--compiler/rustc_infer/src/infer/outlives/obligations.rs470
-rw-r--r--compiler/rustc_infer/src/infer/outlives/test_type_match.rs207
-rw-r--r--compiler/rustc_infer/src/infer/outlives/verify.rs373
-rw-r--r--compiler/rustc_infer/src/infer/projection.rs40
-rw-r--r--compiler/rustc_infer/src/infer/region_constraints/README.md3
-rw-r--r--compiler/rustc_infer/src/infer/region_constraints/leak_check.rs447
-rw-r--r--compiler/rustc_infer/src/infer/region_constraints/mod.rs821
-rw-r--r--compiler/rustc_infer/src/infer/resolve.rs237
-rw-r--r--compiler/rustc_infer/src/infer/sub.rs210
-rw-r--r--compiler/rustc_infer/src/infer/type_variable.rs460
-rw-r--r--compiler/rustc_infer/src/infer/undo_log.rs220
-rw-r--r--compiler/rustc_infer/src/lib.rs39
-rw-r--r--compiler/rustc_infer/src/traits/engine.rs76
-rw-r--r--compiler/rustc_infer/src/traits/error_reporting/mod.rs108
-rw-r--r--compiler/rustc_infer/src/traits/mod.rs170
-rw-r--r--compiler/rustc_infer/src/traits/project.rs255
-rw-r--r--compiler/rustc_infer/src/traits/structural_impls.rs79
-rw-r--r--compiler/rustc_infer/src/traits/util.rs390
-rw-r--r--compiler/rustc_interface/Cargo.toml61
-rw-r--r--compiler/rustc_interface/src/callbacks.rs59
-rw-r--r--compiler/rustc_interface/src/interface.rs362
-rw-r--r--compiler/rustc_interface/src/lib.rs22
-rw-r--r--compiler/rustc_interface/src/passes.rs1046
-rw-r--r--compiler/rustc_interface/src/proc_macro_decls.rs27
-rw-r--r--compiler/rustc_interface/src/queries.rs402
-rw-r--r--compiler/rustc_interface/src/tests.rs830
-rw-r--r--compiler/rustc_interface/src/util.rs672
-rw-r--r--compiler/rustc_lexer/Cargo.toml23
-rw-r--r--compiler/rustc_lexer/src/cursor.rs93
-rw-r--r--compiler/rustc_lexer/src/lib.rs843
-rw-r--r--compiler/rustc_lexer/src/tests.rs298
-rw-r--r--compiler/rustc_lexer/src/unescape.rs377
-rw-r--r--compiler/rustc_lexer/src/unescape/tests.rs297
-rw-r--r--compiler/rustc_lint/Cargo.toml25
-rw-r--r--compiler/rustc_lint/src/array_into_iter.rs155
-rw-r--r--compiler/rustc_lint/src/builtin.rs3172
-rw-r--r--compiler/rustc_lint/src/context.rs1259
-rw-r--r--compiler/rustc_lint/src/early.rs456
-rw-r--r--compiler/rustc_lint/src/enum_intrinsics_non_enums.rs88
-rw-r--r--compiler/rustc_lint/src/expect.rs59
-rw-r--r--compiler/rustc_lint/src/hidden_unicode_codepoints.rs141
-rw-r--r--compiler/rustc_lint/src/internal.rs469
-rw-r--r--compiler/rustc_lint/src/late.rs482
-rw-r--r--compiler/rustc_lint/src/levels.rs813
-rw-r--r--compiler/rustc_lint/src/lib.rs538
-rw-r--r--compiler/rustc_lint/src/methods.rs103
-rw-r--r--compiler/rustc_lint/src/non_ascii_idents.rs345
-rw-r--r--compiler/rustc_lint/src/non_fmt_panic.rs357
-rw-r--r--compiler/rustc_lint/src/nonstandard_style.rs565
-rw-r--r--compiler/rustc_lint/src/nonstandard_style/tests.rs21
-rw-r--r--compiler/rustc_lint/src/noop_method_call.rs103
-rw-r--r--compiler/rustc_lint/src/pass_by_value.rs96
-rw-r--r--compiler/rustc_lint/src/passes.rs249
-rw-r--r--compiler/rustc_lint/src/redundant_semicolon.rs58
-rw-r--r--compiler/rustc_lint/src/tests.rs26
-rw-r--r--compiler/rustc_lint/src/traits.rs134
-rw-r--r--compiler/rustc_lint/src/types.rs1576
-rw-r--r--compiler/rustc_lint/src/unused.rs1197
-rw-r--r--compiler/rustc_lint_defs/Cargo.toml15
-rw-r--r--compiler/rustc_lint_defs/src/builtin.rs4056
-rw-r--r--compiler/rustc_lint_defs/src/lib.rs722
-rw-r--r--compiler/rustc_llvm/Cargo.toml14
-rw-r--r--compiler/rustc_llvm/build.rs368
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/.editorconfig6
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp222
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp117
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h121
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/Linker.cpp48
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp1763
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/README16
-rw-r--r--compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp1972
-rw-r--r--compiler/rustc_llvm/src/lib.rs188
-rw-r--r--compiler/rustc_log/Cargo.toml16
-rw-r--r--compiler/rustc_log/src/lib.rs122
-rw-r--r--compiler/rustc_macros/Cargo.toml17
-rw-r--r--compiler/rustc_macros/src/diagnostics/diagnostic.rs225
-rw-r--r--compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs630
-rw-r--r--compiler/rustc_macros/src/diagnostics/error.rs141
-rw-r--r--compiler/rustc_macros/src/diagnostics/fluent.rs277
-rw-r--r--compiler/rustc_macros/src/diagnostics/mod.rs159
-rw-r--r--compiler/rustc_macros/src/diagnostics/subdiagnostic.rs491
-rw-r--r--compiler/rustc_macros/src/diagnostics/utils.rs356
-rw-r--r--compiler/rustc_macros/src/hash_stable.rs131
-rw-r--r--compiler/rustc_macros/src/lib.rs180
-rw-r--r--compiler/rustc_macros/src/lift.rs52
-rw-r--r--compiler/rustc_macros/src/newtype.rs333
-rw-r--r--compiler/rustc_macros/src/query.rs566
-rw-r--r--compiler/rustc_macros/src/serialize.rs224
-rw-r--r--compiler/rustc_macros/src/symbols.rs236
-rw-r--r--compiler/rustc_macros/src/symbols/tests.rs102
-rw-r--r--compiler/rustc_macros/src/type_foldable.rs36
-rw-r--r--compiler/rustc_macros/src/type_visitable.rs33
-rw-r--r--compiler/rustc_metadata/Cargo.toml31
-rw-r--r--compiler/rustc_metadata/src/creader.rs1041
-rw-r--r--compiler/rustc_metadata/src/dependency_format.rs435
-rw-r--r--compiler/rustc_metadata/src/foreign_modules.rs19
-rw-r--r--compiler/rustc_metadata/src/fs.rs137
-rw-r--r--compiler/rustc_metadata/src/lib.rs41
-rw-r--r--compiler/rustc_metadata/src/locator.rs1222
-rw-r--r--compiler/rustc_metadata/src/native_libs.rs504
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder.rs1820
-rw-r--r--compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs680
-rw-r--r--compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs65
-rw-r--r--compiler/rustc_metadata/src/rmeta/encoder.rs2302
-rw-r--r--compiler/rustc_metadata/src/rmeta/mod.rs460
-rw-r--r--compiler/rustc_metadata/src/rmeta/table.rs330
-rw-r--r--compiler/rustc_middle/Cargo.toml40
-rw-r--r--compiler/rustc_middle/README.md3
-rw-r--r--compiler/rustc_middle/benches/lib.rs54
-rw-r--r--compiler/rustc_middle/src/arena.rs108
-rw-r--r--compiler/rustc_middle/src/dep_graph/dep_node.rs435
-rw-r--r--compiler/rustc_middle/src/dep_graph/mod.rs140
-rw-r--r--compiler/rustc_middle/src/hir/map/mod.rs1405
-rw-r--r--compiler/rustc_middle/src/hir/mod.rs182
-rw-r--r--compiler/rustc_middle/src/hir/nested_filter.rs31
-rw-r--r--compiler/rustc_middle/src/hir/place.rs117
-rw-r--r--compiler/rustc_middle/src/infer/canonical.rs363
-rw-r--r--compiler/rustc_middle/src/infer/mod.rs32
-rw-r--r--compiler/rustc_middle/src/infer/unify_key.rs162
-rw-r--r--compiler/rustc_middle/src/lib.rs106
-rw-r--r--compiler/rustc_middle/src/lint.rs443
-rw-r--r--compiler/rustc_middle/src/macros.rs232
-rw-r--r--compiler/rustc_middle/src/metadata.rs26
-rw-r--r--compiler/rustc_middle/src/middle/codegen_fn_attrs.rs146
-rw-r--r--compiler/rustc_middle/src/middle/dependency_format.rs28
-rw-r--r--compiler/rustc_middle/src/middle/exported_symbols.rs72
-rw-r--r--compiler/rustc_middle/src/middle/lang_items.rs61
-rw-r--r--compiler/rustc_middle/src/middle/limits.rs85
-rw-r--r--compiler/rustc_middle/src/middle/mod.rs37
-rw-r--r--compiler/rustc_middle/src/middle/privacy.rs64
-rw-r--r--compiler/rustc_middle/src/middle/region.rs443
-rw-r--r--compiler/rustc_middle/src/middle/resolve_lifetime.rs54
-rw-r--r--compiler/rustc_middle/src/middle/stability.rs591
-rw-r--r--compiler/rustc_middle/src/mir/basic_blocks.rs147
-rw-r--r--compiler/rustc_middle/src/mir/coverage.rs186
-rw-r--r--compiler/rustc_middle/src/mir/generic_graph.rs69
-rw-r--r--compiler/rustc_middle/src/mir/generic_graphviz.rs173
-rw-r--r--compiler/rustc_middle/src/mir/graph_cyclic_cache.rs63
-rw-r--r--compiler/rustc_middle/src/mir/graphviz.rs134
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs1300
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs551
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs633
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs307
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs217
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs651
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs2900
-rw-r--r--compiler/rustc_middle/src/mir/mono.rs527
-rw-r--r--compiler/rustc_middle/src/mir/patch.rs196
-rw-r--r--compiler/rustc_middle/src/mir/predecessors.rs78
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs1067
-rw-r--r--compiler/rustc_middle/src/mir/query.rs476
-rw-r--r--compiler/rustc_middle/src/mir/spanview.rs691
-rw-r--r--compiler/rustc_middle/src/mir/switch_sources.rs78
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs1168
-rw-r--r--compiler/rustc_middle/src/mir/tcx.rs307
-rw-r--r--compiler/rustc_middle/src/mir/terminator.rs448
-rw-r--r--compiler/rustc_middle/src/mir/traversal.rs388
-rw-r--r--compiler/rustc_middle/src/mir/type_foldable.rs240
-rw-r--r--compiler/rustc_middle/src/mir/type_visitable.rs190
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs1330
-rw-r--r--compiler/rustc_middle/src/query/mod.rs2060
-rw-r--r--compiler/rustc_middle/src/tests.rs13
-rw-r--r--compiler/rustc_middle/src/thir.rs821
-rw-r--r--compiler/rustc_middle/src/thir/visit.rs244
-rw-r--r--compiler/rustc_middle/src/traits/chalk.rs403
-rw-r--r--compiler/rustc_middle/src/traits/mod.rs1026
-rw-r--r--compiler/rustc_middle/src/traits/query.rs230
-rw-r--r--compiler/rustc_middle/src/traits/select.rs312
-rw-r--r--compiler/rustc_middle/src/traits/specialization_graph.rs261
-rw-r--r--compiler/rustc_middle/src/traits/structural_impls.rs135
-rw-r--r--compiler/rustc_middle/src/traits/util.rs49
-rw-r--r--compiler/rustc_middle/src/ty/_match.rs124
-rw-r--r--compiler/rustc_middle/src/ty/abstract_const.rs194
-rw-r--r--compiler/rustc_middle/src/ty/adjustment.rs198
-rw-r--r--compiler/rustc_middle/src/ty/adt.rs569
-rw-r--r--compiler/rustc_middle/src/ty/assoc.rs195
-rw-r--r--compiler/rustc_middle/src/ty/binding.rs22
-rw-r--r--compiler/rustc_middle/src/ty/cast.rs73
-rw-r--r--compiler/rustc_middle/src/ty/closure.rs454
-rw-r--r--compiler/rustc_middle/src/ty/codec.rs527
-rw-r--r--compiler/rustc_middle/src/ty/consts.rs326
-rw-r--r--compiler/rustc_middle/src/ty/consts/int.rs483
-rw-r--r--compiler/rustc_middle/src/ty/consts/kind.rs239
-rw-r--r--compiler/rustc_middle/src/ty/consts/valtree.rs104
-rw-r--r--compiler/rustc_middle/src/ty/context.rs3018
-rw-r--r--compiler/rustc_middle/src/ty/diagnostics.rs501
-rw-r--r--compiler/rustc_middle/src/ty/erase_regions.rs74
-rw-r--r--compiler/rustc_middle/src/ty/error.rs965
-rw-r--r--compiler/rustc_middle/src/ty/fast_reject.rs405
-rw-r--r--compiler/rustc_middle/src/ty/flags.rs342
-rw-r--r--compiler/rustc_middle/src/ty/fold.rs797
-rw-r--r--compiler/rustc_middle/src/ty/generics.rs349
-rw-r--r--compiler/rustc_middle/src/ty/impls_ty.rs135
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs145
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/mod.rs234
-rw-r--r--compiler/rustc_middle/src/ty/instance.rs746
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs3504
-rw-r--r--compiler/rustc_middle/src/ty/list.rs215
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs2518
-rw-r--r--compiler/rustc_middle/src/ty/normalize_erasing_regions.rs283
-rw-r--r--compiler/rustc_middle/src/ty/parameterized.rs119
-rw-r--r--compiler/rustc_middle/src/ty/print/mod.rs327
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs2789
-rw-r--r--compiler/rustc_middle/src/ty/query.rs386
-rw-r--r--compiler/rustc_middle/src/ty/relate.rs841
-rw-r--r--compiler/rustc_middle/src/ty/rvalue_scopes.rs57
-rw-r--r--compiler/rustc_middle/src/ty/structural_impls.rs1304
-rw-r--r--compiler/rustc_middle/src/ty/sty.rs2295
-rw-r--r--compiler/rustc_middle/src/ty/subst.rs785
-rw-r--r--compiler/rustc_middle/src/ty/trait_def.rs272
-rw-r--r--compiler/rustc_middle/src/ty/util.rs1294
-rw-r--r--compiler/rustc_middle/src/ty/visit.rs745
-rw-r--r--compiler/rustc_middle/src/ty/vtable.rs117
-rw-r--r--compiler/rustc_middle/src/ty/walk.rs207
-rw-r--r--compiler/rustc_middle/src/util/bug.rs54
-rw-r--r--compiler/rustc_middle/src/util/common.rs67
-rw-r--r--compiler/rustc_middle/src/util/common/tests.rs14
-rw-r--r--compiler/rustc_mir_build/Cargo.toml26
-rw-r--r--compiler/rustc_mir_build/src/build/block.rs240
-rw-r--r--compiler/rustc_mir_build/src/build/cfg.rs113
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_constant.rs152
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_operand.rs184
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_place.rs820
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_rvalue.rs694
-rw-r--r--compiler/rustc_mir_build/src/build/expr/as_temp.rs119
-rw-r--r--compiler/rustc_mir_build/src/build/expr/category.rs92
-rw-r--r--compiler/rustc_mir_build/src/build/expr/into.rs599
-rw-r--r--compiler/rustc_mir_build/src/build/expr/mod.rs70
-rw-r--r--compiler/rustc_mir_build/src/build/expr/stmt.rs149
-rw-r--r--compiler/rustc_mir_build/src/build/matches/mod.rs2354
-rw-r--r--compiler/rustc_mir_build/src/build/matches/simplify.rs318
-rw-r--r--compiler/rustc_mir_build/src/build/matches/test.rs837
-rw-r--r--compiler/rustc_mir_build/src/build/matches/util.rs109
-rw-r--r--compiler/rustc_mir_build/src/build/misc.rs75
-rw-r--r--compiler/rustc_mir_build/src/build/mod.rs1171
-rw-r--r--compiler/rustc_mir_build/src/build/scope.rs1395
-rw-r--r--compiler/rustc_mir_build/src/check_unsafety.rs680
-rw-r--r--compiler/rustc_mir_build/src/lib.rs35
-rw-r--r--compiler/rustc_mir_build/src/lints.rs166
-rw-r--r--compiler/rustc_mir_build/src/thir/constant.rs52
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/block.rs126
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/expr.rs1117
-rw-r--r--compiler/rustc_mir_build/src/thir/cx/mod.rs101
-rw-r--r--compiler/rustc_mir_build/src/thir/mod.rs13
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/check_match.rs1162
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs601
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs1711
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/mod.rs802
-rw-r--r--compiler/rustc_mir_build/src/thir/pattern/usefulness.rs978
-rw-r--r--compiler/rustc_mir_build/src/thir/util.rs31
-rw-r--r--compiler/rustc_mir_dataflow/Cargo.toml22
-rw-r--r--compiler/rustc_mir_dataflow/src/drop_flag_effects.rs268
-rw-r--r--compiler/rustc_mir_dataflow/src/elaborate_drops.rs1056
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/cursor.rs235
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/direction.rs656
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/engine.rs413
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/fmt.rs211
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/graphviz.rs667
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/lattice.rs252
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/mod.rs624
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/tests.rs322
-rw-r--r--compiler/rustc_mir_dataflow/src/framework/visitor.rs187
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs162
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/init_locals.rs122
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/liveness.rs297
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/mod.rs766
-rw-r--r--compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs300
-rw-r--r--compiler/rustc_mir_dataflow/src/lib.rs63
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs61
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/builder.rs559
-rw-r--r--compiler/rustc_mir_dataflow/src/move_paths/mod.rs422
-rw-r--r--compiler/rustc_mir_dataflow/src/rustc_peek.rs287
-rw-r--r--compiler/rustc_mir_dataflow/src/storage.rs20
-rw-r--r--compiler/rustc_mir_dataflow/src/un_derefer.rs22
-rw-r--r--compiler/rustc_mir_transform/Cargo.toml29
-rw-r--r--compiler/rustc_mir_transform/src/abort_unwinding_calls.rs140
-rw-r--r--compiler/rustc_mir_transform/src/add_call_guards.rs81
-rw-r--r--compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs107
-rw-r--r--compiler/rustc_mir_transform/src/add_retag.rs186
-rw-r--r--compiler/rustc_mir_transform/src/check_const_item_mutation.rs156
-rw-r--r--compiler/rustc_mir_transform/src/check_packed_ref.rs108
-rw-r--r--compiler/rustc_mir_transform/src/check_unsafety.rs619
-rw-r--r--compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs59
-rw-r--r--compiler/rustc_mir_transform/src/const_debuginfo.rs100
-rw-r--r--compiler/rustc_mir_transform/src/const_goto.rs117
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs1142
-rw-r--r--compiler/rustc_mir_transform/src/const_prop_lint.rs734
-rw-r--r--compiler/rustc_mir_transform/src/coverage/counters.rs614
-rw-r--r--compiler/rustc_mir_transform/src/coverage/debug.rs831
-rw-r--r--compiler/rustc_mir_transform/src/coverage/graph.rs753
-rw-r--r--compiler/rustc_mir_transform/src/coverage/mod.rs580
-rw-r--r--compiler/rustc_mir_transform/src/coverage/query.rs170
-rw-r--r--compiler/rustc_mir_transform/src/coverage/spans.rs892
-rw-r--r--compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml8
-rw-r--r--compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs6
-rw-r--r--compiler/rustc_mir_transform/src/coverage/tests.rs710
-rw-r--r--compiler/rustc_mir_transform/src/dead_store_elimination.rs86
-rw-r--r--compiler/rustc_mir_transform/src/deaggregator.rs49
-rw-r--r--compiler/rustc_mir_transform/src/deduplicate_blocks.rs190
-rw-r--r--compiler/rustc_mir_transform/src/deref_separator.rs105
-rw-r--r--compiler/rustc_mir_transform/src/dest_prop.rs917
-rw-r--r--compiler/rustc_mir_transform/src/dump_mir.rs28
-rw-r--r--compiler/rustc_mir_transform/src/early_otherwise_branch.rs429
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_box_derefs.rs184
-rw-r--r--compiler/rustc_mir_transform/src/elaborate_drops.rs613
-rw-r--r--compiler/rustc_mir_transform/src/ffi_unwind_calls.rs170
-rw-r--r--compiler/rustc_mir_transform/src/function_item_references.rs205
-rw-r--r--compiler/rustc_mir_transform/src/generator.rs1581
-rw-r--r--compiler/rustc_mir_transform/src/inline.rs1006
-rw-r--r--compiler/rustc_mir_transform/src/inline/cycle.rs168
-rw-r--r--compiler/rustc_mir_transform/src/instcombine.rs203
-rw-r--r--compiler/rustc_mir_transform/src/lib.rs575
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs156
-rw-r--r--compiler/rustc_mir_transform/src/lower_slice_len.rs99
-rw-r--r--compiler/rustc_mir_transform/src/marker.rs20
-rw-r--r--compiler/rustc_mir_transform/src/match_branches.rs176
-rw-r--r--compiler/rustc_mir_transform/src/multiple_return_terminators.rs43
-rw-r--r--compiler/rustc_mir_transform/src/normalize_array_len.rs287
-rw-r--r--compiler/rustc_mir_transform/src/nrvo.rs236
-rw-r--r--compiler/rustc_mir_transform/src/pass_manager.rs157
-rw-r--r--compiler/rustc_mir_transform/src/remove_false_edges.rs29
-rw-r--r--compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs131
-rw-r--r--compiler/rustc_mir_transform/src/remove_storage_markers.rs29
-rw-r--r--compiler/rustc_mir_transform/src/remove_uninit_drops.rs171
-rw-r--r--compiler/rustc_mir_transform/src/remove_unneeded_drops.rs45
-rw-r--r--compiler/rustc_mir_transform/src/remove_zsts.rs86
-rw-r--r--compiler/rustc_mir_transform/src/required_consts.rs22
-rw-r--r--compiler/rustc_mir_transform/src/reveal_all.rs44
-rw-r--r--compiler/rustc_mir_transform/src/separate_const_switch.rs341
-rw-r--r--compiler/rustc_mir_transform/src/shim.rs790
-rw-r--r--compiler/rustc_mir_transform/src/simplify.rs590
-rw-r--r--compiler/rustc_mir_transform/src/simplify_branches.rs52
-rw-r--r--compiler/rustc_mir_transform/src/simplify_comparison_integral.rs242
-rw-r--r--compiler/rustc_mir_transform/src/simplify_try.rs822
-rw-r--r--compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs149
-rw-r--r--compiler/rustc_mir_transform/src/unreachable_prop.rs102
-rw-r--r--compiler/rustc_monomorphize/Cargo.toml18
-rw-r--r--compiler/rustc_monomorphize/src/collector.rs1463
-rw-r--r--compiler/rustc_monomorphize/src/lib.rs49
-rw-r--r--compiler/rustc_monomorphize/src/partitioning/default.rs560
-rw-r--r--compiler/rustc_monomorphize/src/partitioning/merging.rs111
-rw-r--r--compiler/rustc_monomorphize/src/partitioning/mod.rs515
-rw-r--r--compiler/rustc_monomorphize/src/polymorphize.rs385
-rw-r--r--compiler/rustc_monomorphize/src/util.rs70
-rw-r--r--compiler/rustc_parse/Cargo.toml22
-rw-r--r--compiler/rustc_parse/src/lexer/mod.rs717
-rw-r--r--compiler/rustc_parse/src/lexer/tokentrees.rs296
-rw-r--r--compiler/rustc_parse/src/lexer/unescape_error_reporting.rs381
-rw-r--r--compiler/rustc_parse/src/lexer/unicode_chars.rs386
-rw-r--r--compiler/rustc_parse/src/lib.rs291
-rw-r--r--compiler/rustc_parse/src/parser/attr.rs444
-rw-r--r--compiler/rustc_parse/src/parser/attr_wrapper.rs464
-rw-r--r--compiler/rustc_parse/src/parser/diagnostics.rs2740
-rw-r--r--compiler/rustc_parse/src/parser/expr.rs3288
-rw-r--r--compiler/rustc_parse/src/parser/generics.rs350
-rw-r--r--compiler/rustc_parse/src/parser/item.rs2426
-rw-r--r--compiler/rustc_parse/src/parser/mod.rs1481
-rw-r--r--compiler/rustc_parse/src/parser/nonterminal.rs203
-rw-r--r--compiler/rustc_parse/src/parser/pat.rs1151
-rw-r--r--compiler/rustc_parse/src/parser/path.rs754
-rw-r--r--compiler/rustc_parse/src/parser/stmt.rs648
-rw-r--r--compiler/rustc_parse/src/parser/ty.rs891
-rw-r--r--compiler/rustc_parse/src/validate_attr.rs200
-rw-r--r--compiler/rustc_parse_format/Cargo.toml7
-rw-r--r--compiler/rustc_parse_format/src/lib.rs884
-rw-r--r--compiler/rustc_parse_format/src/tests.rs374
-rw-r--r--compiler/rustc_passes/Cargo.toml24
-rw-r--r--compiler/rustc_passes/src/check_attr.rs2217
-rw-r--r--compiler/rustc_passes/src/check_const.rs236
-rw-r--r--compiler/rustc_passes/src/dead.rs960
-rw-r--r--compiler/rustc_passes/src/debugger_visualizer.rs99
-rw-r--r--compiler/rustc_passes/src/diagnostic_items.rs113
-rw-r--r--compiler/rustc_passes/src/entry.rs231
-rw-r--r--compiler/rustc_passes/src/errors.rs645
-rw-r--r--compiler/rustc_passes/src/hir_id_validator.rs164
-rw-r--r--compiler/rustc_passes/src/hir_stats.rs344
-rw-r--r--compiler/rustc_passes/src/lang_items.rs278
-rw-r--r--compiler/rustc_passes/src/layout_test.rs132
-rw-r--r--compiler/rustc_passes/src/lib.rs59
-rw-r--r--compiler/rustc_passes/src/lib_features.rs138
-rw-r--r--compiler/rustc_passes/src/liveness.rs1687
-rw-r--r--compiler/rustc_passes/src/liveness/rwu_table.rs145
-rw-r--r--compiler/rustc_passes/src/loops.rs287
-rw-r--r--compiler/rustc_passes/src/naked_functions.rs335
-rw-r--r--compiler/rustc_passes/src/reachable.rs417
-rw-r--r--compiler/rustc_passes/src/stability.rs1063
-rw-r--r--compiler/rustc_passes/src/upvars.rs97
-rw-r--r--compiler/rustc_passes/src/weak_lang_items.rs91
-rw-r--r--compiler/rustc_plugin_impl/Cargo.toml17
-rw-r--r--compiler/rustc_plugin_impl/src/lib.rs23
-rw-r--r--compiler/rustc_plugin_impl/src/load.rs84
-rw-r--r--compiler/rustc_privacy/Cargo.toml18
-rw-r--r--compiler/rustc_privacy/src/errors.rs92
-rw-r--r--compiler/rustc_privacy/src/lib.rs2093
-rw-r--r--compiler/rustc_query_impl/Cargo.toml26
-rw-r--r--compiler/rustc_query_impl/src/README.md3
-rw-r--r--compiler/rustc_query_impl/src/keys.rs545
-rw-r--r--compiler/rustc_query_impl/src/lib.rs63
-rw-r--r--compiler/rustc_query_impl/src/on_disk_cache.rs1012
-rw-r--r--compiler/rustc_query_impl/src/plumbing.rs558
-rw-r--r--compiler/rustc_query_impl/src/profiling_support.rs324
-rw-r--r--compiler/rustc_query_impl/src/values.rs45
-rw-r--r--compiler/rustc_query_system/Cargo.toml28
-rw-r--r--compiler/rustc_query_system/src/cache.rs53
-rw-r--r--compiler/rustc_query_system/src/dep_graph/README.md4
-rw-r--r--compiler/rustc_query_system/src/dep_graph/debug.rs63
-rw-r--r--compiler/rustc_query_system/src/dep_graph/dep_node.rs176
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs1288
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs106
-rw-r--r--compiler/rustc_query_system/src/dep_graph/query.rs68
-rw-r--r--compiler/rustc_query_system/src/dep_graph/serialized.rs330
-rw-r--r--compiler/rustc_query_system/src/ich/hcx.rs223
-rw-r--r--compiler/rustc_query_system/src/ich/impls_hir.rs42
-rw-r--r--compiler/rustc_query_system/src/ich/impls_syntax.rs150
-rw-r--r--compiler/rustc_query_system/src/ich/mod.rs19
-rw-r--r--compiler/rustc_query_system/src/lib.rs19
-rw-r--r--compiler/rustc_query_system/src/query/README.md3
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs226
-rw-r--r--compiler/rustc_query_system/src/query/config.rs75
-rw-r--r--compiler/rustc_query_system/src/query/job.rs612
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs125
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs742
-rw-r--r--compiler/rustc_resolve/Cargo.toml27
-rw-r--r--compiler/rustc_resolve/src/access_levels.rs237
-rw-r--r--compiler/rustc_resolve/src/build_reduced_graph.rs1546
-rw-r--r--compiler/rustc_resolve/src/check_unused.rs350
-rw-r--r--compiler/rustc_resolve/src/def_collector.rs354
-rw-r--r--compiler/rustc_resolve/src/diagnostics.rs2714
-rw-r--r--compiler/rustc_resolve/src/diagnostics/tests.rs40
-rw-r--r--compiler/rustc_resolve/src/ident.rs1556
-rw-r--r--compiler/rustc_resolve/src/imports.rs1151
-rw-r--r--compiler/rustc_resolve/src/late.rs3984
-rw-r--r--compiler/rustc_resolve/src/late/diagnostics.rs2369
-rw-r--r--compiler/rustc_resolve/src/late/lifetimes.rs2144
-rw-r--r--compiler/rustc_resolve/src/lib.rs2089
-rw-r--r--compiler/rustc_resolve/src/macros.rs921
-rw-r--r--compiler/rustc_save_analysis/Cargo.toml19
-rw-r--r--compiler/rustc_save_analysis/src/dump_visitor.rs1463
-rw-r--r--compiler/rustc_save_analysis/src/dumper.rs91
-rw-r--r--compiler/rustc_save_analysis/src/lib.rs1075
-rw-r--r--compiler/rustc_save_analysis/src/sig.rs917
-rw-r--r--compiler/rustc_save_analysis/src/span_utils.rs102
-rw-r--r--compiler/rustc_serialize/Cargo.toml11
-rw-r--r--compiler/rustc_serialize/src/collection_impls.rs267
-rw-r--r--compiler/rustc_serialize/src/leb128.rs163
-rw-r--r--compiler/rustc_serialize/src/lib.rs28
-rw-r--r--compiler/rustc_serialize/src/opaque.rs750
-rw-r--r--compiler/rustc_serialize/src/serialize.rs469
-rw-r--r--compiler/rustc_serialize/tests/leb128.rs91
-rw-r--r--compiler/rustc_serialize/tests/opaque.rs277
-rw-r--r--compiler/rustc_session/Cargo.toml20
-rw-r--r--compiler/rustc_session/src/cgu_reuse_tracker.rs118
-rw-r--r--compiler/rustc_session/src/code_stats.rs182
-rw-r--r--compiler/rustc_session/src/config.rs2970
-rw-r--r--compiler/rustc_session/src/cstore.rs218
-rw-r--r--compiler/rustc_session/src/filesearch.rs125
-rw-r--r--compiler/rustc_session/src/lib.rs40
-rw-r--r--compiler/rustc_session/src/options.rs1673
-rw-r--r--compiler/rustc_session/src/output.rs202
-rw-r--r--compiler/rustc_session/src/parse.rs326
-rw-r--r--compiler/rustc_session/src/search_paths.rs93
-rw-r--r--compiler/rustc_session/src/session.rs1599
-rw-r--r--compiler/rustc_session/src/utils.rs93
-rw-r--r--compiler/rustc_smir/Cargo.toml28
-rw-r--r--compiler/rustc_smir/README.md75
-rw-r--r--compiler/rustc_smir/rust-toolchain.toml3
-rw-r--r--compiler/rustc_smir/src/lib.rs17
-rw-r--r--compiler/rustc_smir/src/mir.rs10
-rw-r--r--compiler/rustc_smir/src/very_unstable.rs27
-rw-r--r--compiler/rustc_span/Cargo.toml21
-rw-r--r--compiler/rustc_span/src/analyze_source_file.rs274
-rw-r--r--compiler/rustc_span/src/analyze_source_file/tests.rs142
-rw-r--r--compiler/rustc_span/src/caching_source_map_view.rs293
-rw-r--r--compiler/rustc_span/src/def_id.rs444
-rw-r--r--compiler/rustc_span/src/edition.rs110
-rw-r--r--compiler/rustc_span/src/fatal_error.rs26
-rw-r--r--compiler/rustc_span/src/hygiene.rs1528
-rw-r--r--compiler/rustc_span/src/lev_distance.rs177
-rw-r--r--compiler/rustc_span/src/lev_distance/tests.rs71
-rw-r--r--compiler/rustc_span/src/lib.rs2116
-rw-r--r--compiler/rustc_span/src/profiling.rs35
-rw-r--r--compiler/rustc_span/src/source_map.rs1281
-rw-r--r--compiler/rustc_span/src/source_map/tests.rs481
-rw-r--r--compiler/rustc_span/src/span_encoding.rs150
-rw-r--r--compiler/rustc_span/src/symbol.rs2067
-rw-r--r--compiler/rustc_span/src/symbol/tests.rs25
-rw-r--r--compiler/rustc_span/src/tests.rs43
-rw-r--r--compiler/rustc_symbol_mangling/Cargo.toml20
-rw-r--r--compiler/rustc_symbol_mangling/src/legacy.rs464
-rw-r--r--compiler/rustc_symbol_mangling/src/lib.rs277
-rw-r--r--compiler/rustc_symbol_mangling/src/test.rs74
-rw-r--r--compiler/rustc_symbol_mangling/src/typeid.rs18
-rw-r--r--compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs929
-rw-r--r--compiler/rustc_symbol_mangling/src/v0.rs844
-rw-r--r--compiler/rustc_target/Cargo.toml14
-rw-r--r--compiler/rustc_target/README.md6
-rw-r--r--compiler/rustc_target/src/abi/call/aarch64.rs86
-rw-r--r--compiler/rustc_target/src/abi/call/amdgpu.rs35
-rw-r--r--compiler/rustc_target/src/abi/call/arm.rs97
-rw-r--r--compiler/rustc_target/src/abi/call/avr.rs59
-rw-r--r--compiler/rustc_target/src/abi/call/bpf.rs31
-rw-r--r--compiler/rustc_target/src/abi/call/hexagon.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/m68k.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/mips.rs51
-rw-r--r--compiler/rustc_target/src/abi/call/mips64.rs167
-rw-r--r--compiler/rustc_target/src/abi/call/mod.rs734
-rw-r--r--compiler/rustc_target/src/abi/call/msp430.rs39
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx.rs33
-rw-r--r--compiler/rustc_target/src/abi/call/nvptx64.rs64
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc.rs30
-rw-r--r--compiler/rustc_target/src/abi/call/powerpc64.rs141
-rw-r--r--compiler/rustc_target/src/abi/call/riscv.rs348
-rw-r--r--compiler/rustc_target/src/abi/call/s390x.rs57
-rw-r--r--compiler/rustc_target/src/abi/call/sparc.rs51
-rw-r--r--compiler/rustc_target/src/abi/call/sparc64.rs226
-rw-r--r--compiler/rustc_target/src/abi/call/wasm.rs83
-rw-r--r--compiler/rustc_target/src/abi/call/x86.rs117
-rw-r--r--compiler/rustc_target/src/abi/call/x86_64.rs248
-rw-r--r--compiler/rustc_target/src/abi/call/x86_win64.rs40
-rw-r--r--compiler/rustc_target/src/abi/mod.rs1558
-rw-r--r--compiler/rustc_target/src/asm/aarch64.rs200
-rw-r--r--compiler/rustc_target/src/asm/arm.rs340
-rw-r--r--compiler/rustc_target/src/asm/avr.rs197
-rw-r--r--compiler/rustc_target/src/asm/bpf.rs118
-rw-r--r--compiler/rustc_target/src/asm/hexagon.rs95
-rw-r--r--compiler/rustc_target/src/asm/mips.rs135
-rw-r--r--compiler/rustc_target/src/asm/mod.rs976
-rw-r--r--compiler/rustc_target/src/asm/msp430.rs81
-rw-r--r--compiler/rustc_target/src/asm/nvptx.rs50
-rw-r--r--compiler/rustc_target/src/asm/powerpc.rs204
-rw-r--r--compiler/rustc_target/src/asm/riscv.rs185
-rw-r--r--compiler/rustc_target/src/asm/s390x.rs107
-rw-r--r--compiler/rustc_target/src/asm/spirv.rs47
-rw-r--r--compiler/rustc_target/src/asm/wasm.rs47
-rw-r--r--compiler/rustc_target/src/asm/x86.rs492
-rw-r--r--compiler/rustc_target/src/json.rs91
-rw-r--r--compiler/rustc_target/src/lib.rs88
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_darwin.rs30
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_ios.rs36
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs32
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs38
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_tvos.rs18
-rw-r--r--compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs38
-rw-r--r--compiler/rustc_target/src/spec/aarch64_be_unknown_linux_gnu.rs18
-rw-r--r--compiler/rustc_target/src/spec/aarch64_be_unknown_linux_gnu_ilp32.rs21
-rw-r--r--compiler/rustc_target/src/spec/aarch64_fuchsia.rs15
-rw-r--r--compiler/rustc_target/src/spec/aarch64_kmc_solid_asp3.rs19
-rw-r--r--compiler/rustc_target/src/spec/aarch64_linux_android.rs25
-rw-r--r--compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs26
-rw-r--r--compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding_linker_script.ld78
-rw-r--r--compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs16
-rw-r--r--compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs15
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_freebsd.rs18
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs15
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs23
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu_ilp32.rs17
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs14
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs15
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_none.rs29
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs30
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_openbsd.rs11
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_redox.rs14
-rw-r--r--compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs20
-rw-r--r--compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs14
-rw-r--r--compiler/rustc_target/src/spec/aarch64_wrs_vxworks.rs11
-rw-r--r--compiler/rustc_target/src/spec/abi.rs175
-rw-r--r--compiler/rustc_target/src/spec/abi/tests.rs27
-rw-r--r--compiler/rustc_target/src/spec/android_base.rs15
-rw-r--r--compiler/rustc_target/src/spec/apple_base.rs143
-rw-r--r--compiler/rustc_target/src/spec/apple_sdk_base.rs113
-rw-r--r--compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs28
-rw-r--r--compiler/rustc_target/src/spec/arm_linux_androideabi.rs18
-rw-r--r--compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs17
-rw-r--r--compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs17
-rw-r--r--compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs22
-rw-r--r--compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs22
-rw-r--r--compiler/rustc_target/src/spec/armebv7r_none_eabi.rs27
-rw-r--r--compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs28
-rw-r--r--compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs23
-rw-r--r--compiler/rustc_target/src/spec/armv5te_unknown_linux_uclibceabi.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs38
-rw-r--r--compiler/rustc_target/src/spec/armv7_apple_ios.rs18
-rw-r--r--compiler/rustc_target/src/spec/armv7_linux_androideabi.rs27
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs20
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs21
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs26
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs25
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_linux_uclibceabi.rs23
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_linux_uclibceabihf.rs24
-rw-r--r--compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv7_wrs_vxworks_eabihf.rs17
-rw-r--r--compiler/rustc_target/src/spec/armv7a_kmc_solid_asp3_eabi.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv7a_kmc_solid_asp3_eabihf.rs19
-rw-r--r--compiler/rustc_target/src/spec/armv7a_none_eabi.rs40
-rw-r--r--compiler/rustc_target/src/spec/armv7a_none_eabihf.rs32
-rw-r--r--compiler/rustc_target/src/spec/armv7k_apple_watchos.rs28
-rw-r--r--compiler/rustc_target/src/spec/armv7r_none_eabi.rs26
-rw-r--r--compiler/rustc_target/src/spec/armv7r_none_eabihf.rs27
-rw-r--r--compiler/rustc_target/src/spec/armv7s_apple_ios.rs16
-rw-r--r--compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs7
-rw-r--r--compiler/rustc_target/src/spec/avr_gnu_base.rs27
-rw-r--r--compiler/rustc_target/src/spec/avr_unknown_gnu_atmega328.rs5
-rw-r--r--compiler/rustc_target/src/spec/bpf_base.rs25
-rw-r--r--compiler/rustc_target/src/spec/bpfeb_unknown_none.rs12
-rw-r--r--compiler/rustc_target/src/spec/bpfel_unknown_none.rs12
-rw-r--r--compiler/rustc_target/src/spec/crt_objects.rs157
-rw-r--r--compiler/rustc_target/src/spec/dragonfly_base.rs14
-rw-r--r--compiler/rustc_target/src/spec/freebsd_base.rs15
-rw-r--r--compiler/rustc_target/src/spec/fuchsia_base.rs38
-rw-r--r--compiler/rustc_target/src/spec/haiku_base.rs11
-rw-r--r--compiler/rustc_target/src/spec/hermit_base.rs21
-rw-r--r--compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs30
-rw-r--r--compiler/rustc_target/src/spec/i386_apple_ios.rs22
-rw-r--r--compiler/rustc_target/src/spec/i386_unknown_linux_gnu.rs8
-rw-r--r--compiler/rustc_target/src/spec/i486_unknown_linux_gnu.rs8
-rw-r--r--compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs8
-rw-r--r--compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs8
-rw-r--r--compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs8
-rw-r--r--compiler/rustc_target/src/spec/i686_apple_darwin.rs28
-rw-r--r--compiler/rustc_target/src/spec/i686_linux_android.rs26
-rw-r--r--compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs24
-rw-r--r--compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs32
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_freebsd.rs20
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_haiku.rs20
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs20
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs34
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_netbsd.rs20
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_openbsd.rs20
-rw-r--r--compiler/rustc_target/src/spec/i686_unknown_uefi.rs89
-rw-r--r--compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs23
-rw-r--r--compiler/rustc_target/src/spec/i686_uwp_windows_msvc.rs17
-rw-r--r--compiler/rustc_target/src/spec/i686_wrs_vxworks.rs20
-rw-r--r--compiler/rustc_target/src/spec/illumos_base.rs59
-rw-r--r--compiler/rustc_target/src/spec/l4re_base.rs14
-rw-r--r--compiler/rustc_target/src/spec/linux_base.rs15
-rw-r--r--compiler/rustc_target/src/spec/linux_gnu_base.rs5
-rw-r--r--compiler/rustc_target/src/spec/linux_kernel_base.rs19
-rw-r--r--compiler/rustc_target/src/spec/linux_musl_base.rs16
-rw-r--r--compiler/rustc_target/src/spec/linux_uclibc_base.rs5
-rw-r--r--compiler/rustc_target/src/spec/m68k_unknown_linux_gnu.rs15
-rw-r--r--compiler/rustc_target/src/spec/mips64_openwrt_linux_musl.rs26
-rw-r--r--compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs22
-rw-r--r--compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs22
-rw-r--r--compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs20
-rw-r--r--compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs16
-rw-r--r--compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs20
-rw-r--r--compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs17
-rw-r--r--compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs20
-rw-r--r--compiler/rustc_target/src/spec/mipsel_sony_psp.rs34
-rw-r--r--compiler/rustc_target/src/spec/mipsel_sony_psp_linker_script.ld45
-rw-r--r--compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs19
-rw-r--r--compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs16
-rw-r--r--compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs19
-rw-r--r--compiler/rustc_target/src/spec/mipsel_unknown_none.rs27
-rw-r--r--compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs20
-rw-r--r--compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs19
-rw-r--r--compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs22
-rw-r--r--compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs20
-rw-r--r--compiler/rustc_target/src/spec/mod.rs2585
-rw-r--r--compiler/rustc_target/src/spec/msp430_none_elf.rs59
-rw-r--r--compiler/rustc_target/src/spec/msvc_base.rs24
-rw-r--r--compiler/rustc_target/src/spec/netbsd_base.rs16
-rw-r--r--compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs53
-rw-r--r--compiler/rustc_target/src/spec/openbsd_base.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs17
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs21
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs17
-rw-r--r--compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs17
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs23
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs21
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs16
-rw-r--r--compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs22
-rw-r--r--compiler/rustc_target/src/spec/redox_base.rs17
-rw-r--r--compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs18
-rw-r--r--compiler/rustc_target/src/spec/riscv32gc_unknown_linux_musl.rs18
-rw-r--r--compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs24
-rw-r--r--compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs25
-rw-r--r--compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs24
-rw-r--r--compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs23
-rw-r--r--compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs36
-rw-r--r--compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs25
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs18
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs18
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs18
-rw-r--r--compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs26
-rw-r--r--compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs25
-rw-r--r--compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs23
-rw-r--r--compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs24
-rw-r--r--compiler/rustc_target/src/spec/solaris_base.rs16
-rw-r--r--compiler/rustc_target/src/spec/solid_base.rs13
-rw-r--r--compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs17
-rw-r--r--compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs17
-rw-r--r--compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs18
-rw-r--r--compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs18
-rw-r--r--compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs24
-rw-r--r--compiler/rustc_target/src/spec/tests/tests_impl.rs135
-rw-r--r--compiler/rustc_target/src/spec/thumb_base.rs60
-rw-r--r--compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs69
-rw-r--r--compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs23
-rw-r--r--compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs28
-rw-r--r--compiler/rustc_target/src/spec/thumbv7a_uwp_windows_msvc.rs18
-rw-r--r--compiler/rustc_target/src/spec/thumbv7em_none_eabi.rs27
-rw-r--r--compiler/rustc_target/src/spec/thumbv7em_none_eabihf.rs36
-rw-r--r--compiler/rustc_target/src/spec/thumbv7m_none_eabi.rs18
-rw-r--r--compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs26
-rw-r--r--compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs23
-rw-r--r--compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs29
-rw-r--r--compiler/rustc_target/src/spec/thumbv8m_base_none_eabi.rs21
-rw-r--r--compiler/rustc_target/src/spec/thumbv8m_main_none_eabi.rs19
-rw-r--r--compiler/rustc_target/src/spec/thumbv8m_main_none_eabihf.rs25
-rw-r--r--compiler/rustc_target/src/spec/uefi_msvc_base.rs51
-rw-r--r--compiler/rustc_target/src/spec/vxworks_base.rs21
-rw-r--r--compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs34
-rw-r--r--compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs64
-rw-r--r--compiler/rustc_target/src/spec/wasm32_wasi.rs112
-rw-r--r--compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs49
-rw-r--r--compiler/rustc_target/src/spec/wasm_base.rs129
-rw-r--r--compiler/rustc_target/src/spec/windows_gnu_base.rs91
-rw-r--r--compiler/rustc_target/src/spec/windows_gnullvm_base.rs40
-rw-r--r--compiler/rustc_target/src/spec/windows_msvc_base.rs34
-rw-r--r--compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs34
-rw-r--r--compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs11
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_darwin.rs30
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_ios.rs21
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs23
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_tvos.rs18
-rw-r--r--compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs35
-rw-r--r--compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs84
-rw-r--r--compiler/rustc_target/src/spec/x86_64_fuchsia.rs19
-rw-r--r--compiler/rustc_target/src/spec/x86_64_linux_android.rs21
-rw-r--r--compiler/rustc_target/src/spec/x86_64_pc_solaris.rs21
-rw-r--r--compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs20
-rw-r--r--compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs18
-rw-r--r--compiler/rustc_target/src/spec/x86_64_pc_windows_msvc.rs16
-rw-r--r--compiler/rustc_target/src/spec/x86_64_sun_solaris.rs20
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs19
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs21
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs21
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs19
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs20
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs19
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs25
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs25
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs25
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs24
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_none.rs40
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs28
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs19
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_redox.rs19
-rw-r--r--compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs36
-rw-r--r--compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs19
-rw-r--r--compiler/rustc_target/src/spec/x86_64_uwp_windows_msvc.rs16
-rw-r--r--compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs20
-rw-r--r--compiler/rustc_target/src/tests.rs57
-rw-r--r--compiler/rustc_trait_selection/Cargo.toml27
-rw-r--r--compiler/rustc_trait_selection/src/autoderef.rs240
-rw-r--r--compiler/rustc_trait_selection/src/infer.rs177
-rw-r--r--compiler/rustc_trait_selection/src/lib.rs40
-rw-r--r--compiler/rustc_trait_selection/src/traits/auto_trait.rs903
-rw-r--r--compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs163
-rw-r--r--compiler/rustc_trait_selection/src/traits/codegen.rs80
-rw-r--r--compiler/rustc_trait_selection/src/traits/coherence.rs747
-rw-r--r--compiler/rustc_trait_selection/src/traits/const_evaluatable.rs308
-rw-r--r--compiler/rustc_trait_selection/src/traits/engine.rs112
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs2765
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs272
-rw-r--r--compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs3119
-rw-r--r--compiler/rustc_trait_selection/src/traits/fulfill.rs757
-rw-r--r--compiler/rustc_trait_selection/src/traits/misc.rs88
-rw-r--r--compiler/rustc_trait_selection/src/traits/mod.rs863
-rw-r--r--compiler/rustc_trait_selection/src/traits/object_safety.rs866
-rw-r--r--compiler/rustc_trait_selection/src/traits/on_unimplemented.rs425
-rw-r--r--compiler/rustc_trait_selection/src/traits/project.rs2150
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs73
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs118
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/method_autoderef.rs3
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/mod.rs14
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/normalize.rs354
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs23
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs117
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/eq.rs23
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs42
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs168
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/normalize.rs68
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs55
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/prove_predicate.rs43
-rw-r--r--compiler/rustc_trait_selection/src/traits/query/type_op/subtype.rs20
-rw-r--r--compiler/rustc_trait_selection/src/traits/relationships.rs58
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs1009
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/confirmation.rs1266
-rw-r--r--compiler/rustc_trait_selection/src/traits/select/mod.rs2698
-rw-r--r--compiler/rustc_trait_selection/src/traits/specialize/mod.rs531
-rw-r--r--compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs395
-rw-r--r--compiler/rustc_trait_selection/src/traits/structural_match.rs273
-rw-r--r--compiler/rustc_trait_selection/src/traits/util.rs368
-rw-r--r--compiler/rustc_trait_selection/src/traits/wf.rs888
-rw-r--r--compiler/rustc_traits/Cargo.toml20
-rw-r--r--compiler/rustc_traits/src/chalk/db.rs796
-rw-r--r--compiler/rustc_traits/src/chalk/lowering.rs1172
-rw-r--r--compiler/rustc_traits/src/chalk/mod.rs176
-rw-r--r--compiler/rustc_traits/src/dropck_outlives.rs348
-rw-r--r--compiler/rustc_traits/src/evaluate_obligation.rs34
-rw-r--r--compiler/rustc_traits/src/implied_outlives_bounds.rs172
-rw-r--r--compiler/rustc_traits/src/lib.rs32
-rw-r--r--compiler/rustc_traits/src/normalize_erasing_regions.rs73
-rw-r--r--compiler/rustc_traits/src/normalize_projection_ty.rs45
-rw-r--r--compiler/rustc_traits/src/type_op.rs283
-rw-r--r--compiler/rustc_transmute/Cargo.toml28
-rw-r--r--compiler/rustc_transmute/src/layout/dfa.rs184
-rw-r--r--compiler/rustc_transmute/src/layout/mod.rs71
-rw-r--r--compiler/rustc_transmute/src/layout/nfa.rs179
-rw-r--r--compiler/rustc_transmute/src/layout/tree.rs471
-rw-r--r--compiler/rustc_transmute/src/layout/tree/tests.rs80
-rw-r--r--compiler/rustc_transmute/src/lib.rs117
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/mod.rs320
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/query_context.rs93
-rw-r--r--compiler/rustc_transmute/src/maybe_transmutable/tests.rs115
-rw-r--r--compiler/rustc_ty_utils/Cargo.toml18
-rw-r--r--compiler/rustc_ty_utils/src/assoc.rs111
-rw-r--r--compiler/rustc_ty_utils/src/common_traits.rs51
-rw-r--r--compiler/rustc_ty_utils/src/consts.rs469
-rw-r--r--compiler/rustc_ty_utils/src/instance.rs407
-rw-r--r--compiler/rustc_ty_utils/src/lib.rs36
-rw-r--r--compiler/rustc_ty_utils/src/needs_drop.rs323
-rw-r--r--compiler/rustc_ty_utils/src/representability.rs386
-rw-r--r--compiler/rustc_ty_utils/src/ty.rs481
-rw-r--r--compiler/rustc_type_ir/Cargo.toml15
-rw-r--r--compiler/rustc_type_ir/src/codec.rs64
-rw-r--r--compiler/rustc_type_ir/src/lib.rs856
-rw-r--r--compiler/rustc_type_ir/src/sty.rs1329
-rw-r--r--compiler/rustc_typeck/Cargo.toml32
-rw-r--r--compiler/rustc_typeck/README.md5
-rw-r--r--compiler/rustc_typeck/src/astconv/errors.rs410
-rw-r--r--compiler/rustc_typeck/src/astconv/generics.rs664
-rw-r--r--compiler/rustc_typeck/src/astconv/mod.rs3091
-rw-r--r--compiler/rustc_typeck/src/bounds.rs90
-rw-r--r--compiler/rustc_typeck/src/check/_match.rs529
-rw-r--r--compiler/rustc_typeck/src/check/autoderef.rs78
-rw-r--r--compiler/rustc_typeck/src/check/callee.rs675
-rw-r--r--compiler/rustc_typeck/src/check/cast.rs1072
-rw-r--r--compiler/rustc_typeck/src/check/check.rs1712
-rw-r--r--compiler/rustc_typeck/src/check/closure.rs805
-rw-r--r--compiler/rustc_typeck/src/check/coercion.rs1804
-rw-r--r--compiler/rustc_typeck/src/check/compare_method.rs1547
-rw-r--r--compiler/rustc_typeck/src/check/demand.rs1442
-rw-r--r--compiler/rustc_typeck/src/check/diverges.rs78
-rw-r--r--compiler/rustc_typeck/src/check/dropck.rs327
-rw-r--r--compiler/rustc_typeck/src/check/expectation.rs122
-rw-r--r--compiler/rustc_typeck/src/check/expr.rs2824
-rw-r--r--compiler/rustc_typeck/src/check/fallback.rs398
-rw-r--r--compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs1510
-rw-r--r--compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs376
-rw-r--r--compiler/rustc_typeck/src/check/fn_ctxt/checks.rs1900
-rw-r--r--compiler/rustc_typeck/src/check/fn_ctxt/mod.rs296
-rw-r--r--compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs912
-rw-r--r--compiler/rustc_typeck/src/check/gather_locals.rs160
-rw-r--r--compiler/rustc_typeck/src/check/generator_interior.rs632
-rw-r--r--compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs309
-rw-r--r--compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs560
-rw-r--r--compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs92
-rw-r--r--compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs91
-rw-r--r--compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs232
-rw-r--r--compiler/rustc_typeck/src/check/inherited.rs183
-rw-r--r--compiler/rustc_typeck/src/check/intrinsic.rs517
-rw-r--r--compiler/rustc_typeck/src/check/intrinsicck.rs530
-rw-r--r--compiler/rustc_typeck/src/check/method/confirm.rs582
-rw-r--r--compiler/rustc_typeck/src/check/method/mod.rs658
-rw-r--r--compiler/rustc_typeck/src/check/method/prelude2021.rs419
-rw-r--r--compiler/rustc_typeck/src/check/method/probe.rs1932
-rw-r--r--compiler/rustc_typeck/src/check/method/suggest.rs2286
-rw-r--r--compiler/rustc_typeck/src/check/mod.rs970
-rw-r--r--compiler/rustc_typeck/src/check/op.rs1076
-rw-r--r--compiler/rustc_typeck/src/check/pat.rs2142
-rw-r--r--compiler/rustc_typeck/src/check/place_op.rs451
-rw-r--r--compiler/rustc_typeck/src/check/region.rs837
-rw-r--r--compiler/rustc_typeck/src/check/regionck.rs47
-rw-r--r--compiler/rustc_typeck/src/check/rvalue_scopes.rs83
-rw-r--r--compiler/rustc_typeck/src/check/upvar.rs2272
-rw-r--r--compiler/rustc_typeck/src/check/wfcheck.rs1973
-rw-r--r--compiler/rustc_typeck/src/check/writeback.rs783
-rw-r--r--compiler/rustc_typeck/src/check_unused.rs196
-rw-r--r--compiler/rustc_typeck/src/coherence/builtin.rs603
-rw-r--r--compiler/rustc_typeck/src/coherence/inherent_impls.rs249
-rw-r--r--compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs307
-rw-r--r--compiler/rustc_typeck/src/coherence/mod.rs237
-rw-r--r--compiler/rustc_typeck/src/coherence/orphan.rs507
-rw-r--r--compiler/rustc_typeck/src/coherence/unsafety.rs66
-rw-r--r--compiler/rustc_typeck/src/collect.rs3361
-rw-r--r--compiler/rustc_typeck/src/collect/item_bounds.rs102
-rw-r--r--compiler/rustc_typeck/src/collect/type_of.rs877
-rw-r--r--compiler/rustc_typeck/src/constrained_generic_params.rs221
-rw-r--r--compiler/rustc_typeck/src/errors.rs326
-rw-r--r--compiler/rustc_typeck/src/expr_use_visitor.rs914
-rw-r--r--compiler/rustc_typeck/src/hir_wf_check.rs188
-rw-r--r--compiler/rustc_typeck/src/impl_wf_check.rs228
-rw-r--r--compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs439
-rw-r--r--compiler/rustc_typeck/src/lib.rs579
-rw-r--r--compiler/rustc_typeck/src/mem_categorization.rs786
-rw-r--r--compiler/rustc_typeck/src/outlives/explicit.rs69
-rw-r--r--compiler/rustc_typeck/src/outlives/implicit_infer.rs300
-rw-r--r--compiler/rustc_typeck/src/outlives/mod.rs130
-rw-r--r--compiler/rustc_typeck/src/outlives/outlives_bounds.rs90
-rw-r--r--compiler/rustc_typeck/src/outlives/test.rs21
-rw-r--r--compiler/rustc_typeck/src/outlives/utils.rs175
-rw-r--r--compiler/rustc_typeck/src/structured_errors.rs42
-rw-r--r--compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs61
-rw-r--r--compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs62
-rw-r--r--compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs890
-rw-r--r--compiler/rustc_typeck/src/variance/constraints.rs449
-rw-r--r--compiler/rustc_typeck/src/variance/mod.rs63
-rw-r--r--compiler/rustc_typeck/src/variance/solve.rs135
-rw-r--r--compiler/rustc_typeck/src/variance/terms.rs145
-rw-r--r--compiler/rustc_typeck/src/variance/test.rs14
-rw-r--r--compiler/rustc_typeck/src/variance/xform.rs22
2111 files changed, 592314 insertions, 0 deletions
diff --git a/compiler/rustc/Cargo.toml b/compiler/rustc/Cargo.toml
new file mode 100644
index 000000000..27ee3dd2a
--- /dev/null
+++ b/compiler/rustc/Cargo.toml
@@ -0,0 +1,25 @@
+[package]
+name = "rustc-main"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+rustc_driver = { path = "../rustc_driver" }
+
+# Make sure rustc_codegen_ssa ends up in the sysroot, because this
+# crate is intended to be used by codegen backends, which may not be in-tree.
+rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
+# Make sure rustc_smir ends up in the sysroot, because this
+# crate is intended to be used by stable MIR consumers, which are not in-tree
+rustc_smir = { path = "../rustc_smir" }
+
+[dependencies.jemalloc-sys]
+version = "0.5.0"
+optional = true
+features = ['unprefixed_malloc_on_supported_platforms']
+
+[features]
+jemalloc = ['jemalloc-sys']
+llvm = ['rustc_driver/llvm']
+max_level_info = ['rustc_driver/max_level_info']
+rustc_use_parallel_compiler = ['rustc_driver/rustc_use_parallel_compiler']
diff --git a/compiler/rustc/Windows Manifest.xml b/compiler/rustc/Windows Manifest.xml
new file mode 100644
index 000000000..b37a2fd4c
--- /dev/null
+++ b/compiler/rustc/Windows Manifest.xml
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!--
+This is a Windows application manifest file.
+See: https://docs.microsoft.com/en-us/windows/win32/sbscs/application-manifests
+-->
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0" xmlns:asmv3="urn:schemas-microsoft-com:asm.v3">
+ <!-- Versions rustc supports as compiler hosts -->
+ <compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
+ <application>
+ <!-- Windows 7 --><supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
+ <!-- Windows 8 --><supportedOS Id="{4a2f28e3-53b9-4441-ba9c-d69d4a4a6e38}"/>
+ <!-- Windows 8.1 --><supportedOS Id="{1f676c76-80e1-4239-95bb-83d0f6d0da78}"/>
+ <!-- Windows 10 and 11 --><supportedOS Id="{8e0f7a12-bfb3-4fe8-b9a5-48fd50a15a9a}"/>
+ </application>
+ </compatibility>
+ <!-- Use UTF-8 code page -->
+ <asmv3:application>
+ <asmv3:windowsSettings xmlns="http://schemas.microsoft.com/SMI/2019/WindowsSettings">
+ <activeCodePage>UTF-8</activeCodePage>
+ </asmv3:windowsSettings>
+ </asmv3:application>
+ <!-- Remove (most) legacy path limits -->
+ <asmv3:application>
+ <asmv3:windowsSettings xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings">
+ <ws2:longPathAware>true</ws2:longPathAware>
+ </asmv3:windowsSettings>
+ </asmv3:application>
+</assembly>
diff --git a/compiler/rustc/build.rs b/compiler/rustc/build.rs
new file mode 100644
index 000000000..39cf3e094
--- /dev/null
+++ b/compiler/rustc/build.rs
@@ -0,0 +1,27 @@
+use std::env;
+
+fn main() {
+ let target_os = env::var("CARGO_CFG_TARGET_OS");
+ let target_env = env::var("CARGO_CFG_TARGET_ENV");
+ if Ok("windows") == target_os.as_deref() && Ok("msvc") == target_env.as_deref() {
+ set_windows_exe_options();
+ } else {
+ // Avoid rerunning the build script every time.
+ println!("cargo:rerun-if-changed=build.rs");
+ }
+}
+
+// Add a manifest file to rustc.exe.
+fn set_windows_exe_options() {
+ static WINDOWS_MANIFEST_FILE: &str = "Windows Manifest.xml";
+
+ let mut manifest = env::current_dir().unwrap();
+ manifest.push(WINDOWS_MANIFEST_FILE);
+
+ println!("cargo:rerun-if-changed={}", WINDOWS_MANIFEST_FILE);
+ // Embed the Windows application manifest file.
+ println!("cargo:rustc-link-arg-bin=rustc-main=/MANIFEST:EMBED");
+ println!("cargo:rustc-link-arg-bin=rustc-main=/MANIFESTINPUT:{}", manifest.to_str().unwrap());
+ // Turn linker warnings into errors.
+ println!("cargo:rustc-link-arg-bin=rustc-main=/WX");
+}
diff --git a/compiler/rustc/src/main.rs b/compiler/rustc/src/main.rs
new file mode 100644
index 000000000..0de1a7819
--- /dev/null
+++ b/compiler/rustc/src/main.rs
@@ -0,0 +1,63 @@
+// A note about jemalloc: rustc uses jemalloc when built for CI and
+// distribution. The obvious way to do this is with the `#[global_allocator]`
+// mechanism. However, for complicated reasons (see
+// https://github.com/rust-lang/rust/pull/81782#issuecomment-784438001 for some
+// details) that mechanism doesn't work here. Also, we must use a consistent
+// allocator across the rustc <-> llvm boundary, and `#[global_allocator]`
+// wouldn't provide that.
+//
+// Instead, we use a lower-level mechanism. rustc is linked with jemalloc in a
+// way such that jemalloc's implementation of `malloc`, `free`, etc., override
+// the libc allocator's implementation. This means that Rust's `System`
+// allocator, which calls `libc::malloc()` et al., is actually calling into
+// jemalloc.
+//
+// A consequence of not using `GlobalAlloc` (and the `tikv-jemallocator` crate
+// provides an impl of that trait, which is called `Jemalloc`) is that we
+// cannot use the sized deallocation APIs (`sdallocx`) that jemalloc provides.
+// It's unclear how much performance is lost because of this.
+//
+// As for the symbol overrides in `main` below: we're pulling in a static copy
+// of jemalloc. We need to actually reference its symbols for it to get linked.
+// The two crates we link to here, `std` and `rustc_driver`, are both dynamic
+// libraries. So we must reference jemalloc symbols one way or another, because
+// this file is the only object code in the rustc executable.
+
+fn main() {
+ // See the comment at the top of this file for an explanation of this.
+ #[cfg(feature = "jemalloc-sys")]
+ {
+ use std::os::raw::{c_int, c_void};
+
+ #[used]
+ static _F1: unsafe extern "C" fn(usize, usize) -> *mut c_void = jemalloc_sys::calloc;
+ #[used]
+ static _F2: unsafe extern "C" fn(*mut *mut c_void, usize, usize) -> c_int =
+ jemalloc_sys::posix_memalign;
+ #[used]
+ static _F3: unsafe extern "C" fn(usize, usize) -> *mut c_void = jemalloc_sys::aligned_alloc;
+ #[used]
+ static _F4: unsafe extern "C" fn(usize) -> *mut c_void = jemalloc_sys::malloc;
+ #[used]
+ static _F5: unsafe extern "C" fn(*mut c_void, usize) -> *mut c_void = jemalloc_sys::realloc;
+ #[used]
+ static _F6: unsafe extern "C" fn(*mut c_void) = jemalloc_sys::free;
+
+ // On OSX, jemalloc doesn't directly override malloc/free, but instead
+ // registers itself with the allocator's zone APIs in a ctor. However,
+ // the linker doesn't seem to consider ctors as "used" when statically
+ // linking, so we need to explicitly depend on the function.
+ #[cfg(target_os = "macos")]
+ {
+ extern "C" {
+ fn _rjem_je_zone_register();
+ }
+
+ #[used]
+ static _F7: unsafe extern "C" fn() = _rjem_je_zone_register;
+ }
+ }
+
+ rustc_driver::set_sigpipe_handler();
+ rustc_driver::main()
+}
diff --git a/compiler/rustc_apfloat/Cargo.toml b/compiler/rustc_apfloat/Cargo.toml
new file mode 100644
index 000000000..98305201b
--- /dev/null
+++ b/compiler/rustc_apfloat/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "rustc_apfloat"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+bitflags = "1.2.1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_apfloat/src/ieee.rs b/compiler/rustc_apfloat/src/ieee.rs
new file mode 100644
index 000000000..3db8adb2a
--- /dev/null
+++ b/compiler/rustc_apfloat/src/ieee.rs
@@ -0,0 +1,2758 @@
+use crate::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO};
+use crate::{Float, FloatConvert, ParseError, Round, Status, StatusAnd};
+
+use core::cmp::{self, Ordering};
+use core::convert::TryFrom;
+use core::fmt::{self, Write};
+use core::marker::PhantomData;
+use core::mem;
+use core::ops::Neg;
+use smallvec::{smallvec, SmallVec};
+
+#[must_use]
+pub struct IeeeFloat<S> {
+ /// Absolute significand value (including the integer bit).
+ sig: [Limb; 1],
+
+ /// The signed unbiased exponent of the value.
+ exp: ExpInt,
+
+ /// What kind of floating point number this is.
+ category: Category,
+
+ /// Sign bit of the number.
+ sign: bool,
+
+ marker: PhantomData<S>,
+}
+
+/// Fundamental unit of big integer arithmetic, but also
+/// large to store the largest significands by itself.
+type Limb = u128;
+const LIMB_BITS: usize = 128;
+fn limbs_for_bits(bits: usize) -> usize {
+ (bits + LIMB_BITS - 1) / LIMB_BITS
+}
+
+/// Enum that represents what fraction of the LSB truncated bits of an fp number
+/// represent.
+///
+/// This essentially combines the roles of guard and sticky bits.
+#[must_use]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum Loss {
+ // Example of truncated bits:
+ ExactlyZero, // 000000
+ LessThanHalf, // 0xxxxx x's not all zero
+ ExactlyHalf, // 100000
+ MoreThanHalf, // 1xxxxx x's not all zero
+}
+
+/// Represents floating point arithmetic semantics.
+pub trait Semantics: Sized {
+ /// Total number of bits in the in-memory format.
+ const BITS: usize;
+
+ /// Number of bits in the significand. This includes the integer bit.
+ const PRECISION: usize;
+
+ /// The largest E such that 2<sup>E</sup> is representable; this matches the
+ /// definition of IEEE 754.
+ const MAX_EXP: ExpInt;
+
+ /// The smallest E such that 2<sup>E</sup> is a normalized number; this
+ /// matches the definition of IEEE 754.
+ const MIN_EXP: ExpInt = -Self::MAX_EXP + 1;
+
+ /// The significand bit that marks NaN as quiet.
+ const QNAN_BIT: usize = Self::PRECISION - 2;
+
+ /// The significand bitpattern to mark a NaN as quiet.
+ /// NOTE: for X87DoubleExtended we need to set two bits instead of 2.
+ const QNAN_SIGNIFICAND: Limb = 1 << Self::QNAN_BIT;
+
+ fn from_bits(bits: u128) -> IeeeFloat<Self> {
+ assert!(Self::BITS > Self::PRECISION);
+
+ let sign = bits & (1 << (Self::BITS - 1));
+ let exponent = (bits & !sign) >> (Self::PRECISION - 1);
+ let mut r = IeeeFloat {
+ sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)],
+ // Convert the exponent from its bias representation to a signed integer.
+ exp: (exponent as ExpInt) - Self::MAX_EXP,
+ category: Category::Zero,
+ sign: sign != 0,
+ marker: PhantomData,
+ };
+
+ if r.exp == Self::MIN_EXP - 1 && r.sig == [0] {
+ // Exponent, significand meaningless.
+ r.category = Category::Zero;
+ } else if r.exp == Self::MAX_EXP + 1 && r.sig == [0] {
+ // Exponent, significand meaningless.
+ r.category = Category::Infinity;
+ } else if r.exp == Self::MAX_EXP + 1 && r.sig != [0] {
+ // Sign, exponent, significand meaningless.
+ r.category = Category::NaN;
+ } else {
+ r.category = Category::Normal;
+ if r.exp == Self::MIN_EXP - 1 {
+ // Denormal.
+ r.exp = Self::MIN_EXP;
+ } else {
+ // Set integer bit.
+ sig::set_bit(&mut r.sig, Self::PRECISION - 1);
+ }
+ }
+
+ r
+ }
+
+ fn to_bits(x: IeeeFloat<Self>) -> u128 {
+ assert!(Self::BITS > Self::PRECISION);
+
+ // Split integer bit from significand.
+ let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1);
+ let mut significand = x.sig[0] & ((1 << (Self::PRECISION - 1)) - 1);
+ let exponent = match x.category {
+ Category::Normal => {
+ if x.exp == Self::MIN_EXP && !integer_bit {
+ // Denormal.
+ Self::MIN_EXP - 1
+ } else {
+ x.exp
+ }
+ }
+ Category::Zero => {
+ // FIXME(eddyb) Maybe we should guarantee an invariant instead?
+ significand = 0;
+ Self::MIN_EXP - 1
+ }
+ Category::Infinity => {
+ // FIXME(eddyb) Maybe we should guarantee an invariant instead?
+ significand = 0;
+ Self::MAX_EXP + 1
+ }
+ Category::NaN => Self::MAX_EXP + 1,
+ };
+
+ // Convert the exponent from a signed integer to its bias representation.
+ let exponent = (exponent + Self::MAX_EXP) as u128;
+
+ ((x.sign as u128) << (Self::BITS - 1)) | (exponent << (Self::PRECISION - 1)) | significand
+ }
+}
+
+impl<S> Copy for IeeeFloat<S> {}
+impl<S> Clone for IeeeFloat<S> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+macro_rules! ieee_semantics {
+ ($($name:ident = $sem:ident($bits:tt : $exp_bits:tt)),*) => {
+ $(pub struct $sem;)*
+ $(pub type $name = IeeeFloat<$sem>;)*
+ $(impl Semantics for $sem {
+ const BITS: usize = $bits;
+ const PRECISION: usize = ($bits - 1 - $exp_bits) + 1;
+ const MAX_EXP: ExpInt = (1 << ($exp_bits - 1)) - 1;
+ })*
+ }
+}
+
+ieee_semantics! {
+ Half = HalfS(16:5),
+ Single = SingleS(32:8),
+ Double = DoubleS(64:11),
+ Quad = QuadS(128:15)
+}
+
+pub struct X87DoubleExtendedS;
+pub type X87DoubleExtended = IeeeFloat<X87DoubleExtendedS>;
+impl Semantics for X87DoubleExtendedS {
+ const BITS: usize = 80;
+ const PRECISION: usize = 64;
+ const MAX_EXP: ExpInt = (1 << (15 - 1)) - 1;
+
+ /// For x87 extended precision, we want to make a NaN, not a
+ /// pseudo-NaN. Maybe we should expose the ability to make
+ /// pseudo-NaNs?
+ const QNAN_SIGNIFICAND: Limb = 0b11 << Self::QNAN_BIT;
+
+ /// Integer bit is explicit in this format. Intel hardware (387 and later)
+ /// does not support these bit patterns:
+ /// exponent = all 1's, integer bit 0, significand 0 ("pseudoinfinity")
+ /// exponent = all 1's, integer bit 0, significand nonzero ("pseudoNaN")
+ /// exponent = 0, integer bit 1 ("pseudodenormal")
+ /// exponent != 0 nor all 1's, integer bit 0 ("unnormal")
+ /// At the moment, the first two are treated as NaNs, the second two as Normal.
+ fn from_bits(bits: u128) -> IeeeFloat<Self> {
+ let sign = bits & (1 << (Self::BITS - 1));
+ let exponent = (bits & !sign) >> Self::PRECISION;
+ let mut r = IeeeFloat {
+ sig: [bits & ((1 << (Self::PRECISION - 1)) - 1)],
+ // Convert the exponent from its bias representation to a signed integer.
+ exp: (exponent as ExpInt) - Self::MAX_EXP,
+ category: Category::Zero,
+ sign: sign != 0,
+ marker: PhantomData,
+ };
+
+ if r.exp == Self::MIN_EXP - 1 && r.sig == [0] {
+ // Exponent, significand meaningless.
+ r.category = Category::Zero;
+ } else if r.exp == Self::MAX_EXP + 1 && r.sig == [1 << (Self::PRECISION - 1)] {
+ // Exponent, significand meaningless.
+ r.category = Category::Infinity;
+ } else if r.exp == Self::MAX_EXP + 1 && r.sig != [1 << (Self::PRECISION - 1)] {
+ // Sign, exponent, significand meaningless.
+ r.category = Category::NaN;
+ } else {
+ r.category = Category::Normal;
+ if r.exp == Self::MIN_EXP - 1 {
+ // Denormal.
+ r.exp = Self::MIN_EXP;
+ }
+ }
+
+ r
+ }
+
+ fn to_bits(x: IeeeFloat<Self>) -> u128 {
+ // Get integer bit from significand.
+ let integer_bit = sig::get_bit(&x.sig, Self::PRECISION - 1);
+ let mut significand = x.sig[0] & ((1 << Self::PRECISION) - 1);
+ let exponent = match x.category {
+ Category::Normal => {
+ if x.exp == Self::MIN_EXP && !integer_bit {
+ // Denormal.
+ Self::MIN_EXP - 1
+ } else {
+ x.exp
+ }
+ }
+ Category::Zero => {
+ // FIXME(eddyb) Maybe we should guarantee an invariant instead?
+ significand = 0;
+ Self::MIN_EXP - 1
+ }
+ Category::Infinity => {
+ // FIXME(eddyb) Maybe we should guarantee an invariant instead?
+ significand = 1 << (Self::PRECISION - 1);
+ Self::MAX_EXP + 1
+ }
+ Category::NaN => Self::MAX_EXP + 1,
+ };
+
+ // Convert the exponent from a signed integer to its bias representation.
+ let exponent = (exponent + Self::MAX_EXP) as u128;
+
+ ((x.sign as u128) << (Self::BITS - 1)) | (exponent << Self::PRECISION) | significand
+ }
+}
+
+float_common_impls!(IeeeFloat<S>);
+
+impl<S: Semantics> PartialEq for IeeeFloat<S> {
+ fn eq(&self, rhs: &Self) -> bool {
+ self.partial_cmp(rhs) == Some(Ordering::Equal)
+ }
+}
+
+impl<S: Semantics> PartialOrd for IeeeFloat<S> {
+ fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
+ match (self.category, rhs.category) {
+ (Category::NaN, _) | (_, Category::NaN) => None,
+
+ (Category::Infinity, Category::Infinity) => Some((!self.sign).cmp(&(!rhs.sign))),
+
+ (Category::Zero, Category::Zero) => Some(Ordering::Equal),
+
+ (Category::Infinity, _) | (Category::Normal, Category::Zero) => {
+ Some((!self.sign).cmp(&self.sign))
+ }
+
+ (_, Category::Infinity) | (Category::Zero, Category::Normal) => {
+ Some(rhs.sign.cmp(&(!rhs.sign)))
+ }
+
+ (Category::Normal, Category::Normal) => {
+ // Two normal numbers. Do they have the same sign?
+ Some((!self.sign).cmp(&(!rhs.sign)).then_with(|| {
+ // Compare absolute values; invert result if negative.
+ let result = self.cmp_abs_normal(*rhs);
+
+ if self.sign { result.reverse() } else { result }
+ }))
+ }
+ }
+ }
+}
+
+impl<S> Neg for IeeeFloat<S> {
+ type Output = Self;
+ fn neg(mut self) -> Self {
+ self.sign = !self.sign;
+ self
+ }
+}
+
+/// Prints this value as a decimal string.
+///
+/// \param precision The maximum number of digits of
+/// precision to output. If there are fewer digits available,
+/// zero padding will not be used unless the value is
+/// integral and small enough to be expressed in
+/// precision digits. 0 means to use the natural
+/// precision of the number.
+/// \param width The maximum number of zeros to
+/// consider inserting before falling back to scientific
+/// notation. 0 means to always use scientific notation.
+///
+/// \param alternate Indicate whether to remove the trailing zero in
+/// fraction part or not. Also setting this parameter to true forces
+/// producing of output more similar to default printf behavior.
+/// Specifically the lower e is used as exponent delimiter and exponent
+/// always contains no less than two digits.
+///
+/// Number precision width Result
+/// ------ --------- ----- ------
+/// 1.01E+4 5 2 10100
+/// 1.01E+4 4 2 1.01E+4
+/// 1.01E+4 5 1 1.01E+4
+/// 1.01E-2 5 2 0.0101
+/// 1.01E-2 4 2 0.0101
+/// 1.01E-2 4 1 1.01E-2
+impl<S: Semantics> fmt::Display for IeeeFloat<S> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let width = f.width().unwrap_or(3);
+ let alternate = f.alternate();
+
+ match self.category {
+ Category::Infinity => {
+ if self.sign {
+ return f.write_str("-Inf");
+ } else {
+ return f.write_str("+Inf");
+ }
+ }
+
+ Category::NaN => return f.write_str("NaN"),
+
+ Category::Zero => {
+ if self.sign {
+ f.write_char('-')?;
+ }
+
+ if width == 0 {
+ if alternate {
+ f.write_str("0.0")?;
+ if let Some(n) = f.precision() {
+ for _ in 1..n {
+ f.write_char('0')?;
+ }
+ }
+ f.write_str("e+00")?;
+ } else {
+ f.write_str("0.0E+0")?;
+ }
+ } else {
+ f.write_char('0')?;
+ }
+ return Ok(());
+ }
+
+ Category::Normal => {}
+ }
+
+ if self.sign {
+ f.write_char('-')?;
+ }
+
+ // We use enough digits so the number can be round-tripped back to an
+ // APFloat. The formula comes from "How to Print Floating-Point Numbers
+ // Accurately" by Steele and White.
+ // FIXME: Using a formula based purely on the precision is conservative;
+ // we can print fewer digits depending on the actual value being printed.
+
+ // precision = 2 + floor(S::PRECISION / lg_2(10))
+ let precision = f.precision().unwrap_or(2 + S::PRECISION * 59 / 196);
+
+ // Decompose the number into an APInt and an exponent.
+ let mut exp = self.exp - (S::PRECISION as ExpInt - 1);
+ let mut sig = vec![self.sig[0]];
+
+ // Ignore trailing binary zeros.
+ let trailing_zeros = sig[0].trailing_zeros();
+ let _: Loss = sig::shift_right(&mut sig, &mut exp, trailing_zeros as usize);
+
+ // Change the exponent from 2^e to 10^e.
+ if exp == 0 {
+ // Nothing to do.
+ } else if exp > 0 {
+ // Just shift left.
+ let shift = exp as usize;
+ sig.resize(limbs_for_bits(S::PRECISION + shift), 0);
+ sig::shift_left(&mut sig, &mut exp, shift);
+ } else {
+ // exp < 0
+ let mut texp = -exp as usize;
+
+ // We transform this using the identity:
+ // (N)(2^-e) == (N)(5^e)(10^-e)
+
+ // Multiply significand by 5^e.
+ // N * 5^0101 == N * 5^(1*1) * 5^(0*2) * 5^(1*4) * 5^(0*8)
+ let mut sig_scratch = vec![];
+ let mut p5 = vec![];
+ let mut p5_scratch = vec![];
+ while texp != 0 {
+ if p5.is_empty() {
+ p5.push(5);
+ } else {
+ p5_scratch.resize(p5.len() * 2, 0);
+ let _: Loss =
+ sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS);
+ while p5_scratch.last() == Some(&0) {
+ p5_scratch.pop();
+ }
+ mem::swap(&mut p5, &mut p5_scratch);
+ }
+ if texp & 1 != 0 {
+ sig_scratch.resize(sig.len() + p5.len(), 0);
+ let _: Loss = sig::mul(
+ &mut sig_scratch,
+ &mut 0,
+ &sig,
+ &p5,
+ (sig.len() + p5.len()) * LIMB_BITS,
+ );
+ while sig_scratch.last() == Some(&0) {
+ sig_scratch.pop();
+ }
+ mem::swap(&mut sig, &mut sig_scratch);
+ }
+ texp >>= 1;
+ }
+ }
+
+ // Fill the buffer.
+ let mut buffer = vec![];
+
+ // Ignore digits from the significand until it is no more
+ // precise than is required for the desired precision.
+ // 196/59 is a very slight overestimate of lg_2(10).
+ let required = (precision * 196 + 58) / 59;
+ let mut discard_digits = sig::omsb(&sig).saturating_sub(required) * 59 / 196;
+ let mut in_trail = true;
+ while !sig.is_empty() {
+ // Perform short division by 10 to extract the rightmost digit.
+ // rem <- sig % 10
+ // sig <- sig / 10
+ let mut rem = 0;
+
+ // Use 64-bit division and remainder, with 32-bit chunks from sig.
+ sig::each_chunk(&mut sig, 32, |chunk| {
+ let chunk = chunk as u32;
+ let combined = ((rem as u64) << 32) | (chunk as u64);
+ rem = (combined % 10) as u8;
+ (combined / 10) as u32 as Limb
+ });
+
+ // Reduce the significand to avoid wasting time dividing 0's.
+ while sig.last() == Some(&0) {
+ sig.pop();
+ }
+
+ let digit = rem;
+
+ // Ignore digits we don't need.
+ if discard_digits > 0 {
+ discard_digits -= 1;
+ exp += 1;
+ continue;
+ }
+
+ // Drop trailing zeros.
+ if in_trail && digit == 0 {
+ exp += 1;
+ } else {
+ in_trail = false;
+ buffer.push(b'0' + digit);
+ }
+ }
+
+ assert!(!buffer.is_empty(), "no characters in buffer!");
+
+ // Drop down to precision.
+ // FIXME: don't do more precise calculations above than are required.
+ if buffer.len() > precision {
+ // The most significant figures are the last ones in the buffer.
+ let mut first_sig = buffer.len() - precision;
+
+ // Round.
+ // FIXME: this probably shouldn't use 'round half up'.
+
+ // Rounding down is just a truncation, except we also want to drop
+ // trailing zeros from the new result.
+ if buffer[first_sig - 1] < b'5' {
+ while first_sig < buffer.len() && buffer[first_sig] == b'0' {
+ first_sig += 1;
+ }
+ } else {
+ // Rounding up requires a decimal add-with-carry. If we continue
+ // the carry, the newly-introduced zeros will just be truncated.
+ for x in &mut buffer[first_sig..] {
+ if *x == b'9' {
+ first_sig += 1;
+ } else {
+ *x += 1;
+ break;
+ }
+ }
+ }
+
+ exp += first_sig as ExpInt;
+ buffer.drain(..first_sig);
+
+ // If we carried through, we have exactly one digit of precision.
+ if buffer.is_empty() {
+ buffer.push(b'1');
+ }
+ }
+
+ let digits = buffer.len();
+
+ // Check whether we should use scientific notation.
+ let scientific = if width == 0 {
+ true
+ } else if exp >= 0 {
+ // 765e3 --> 765000
+ // ^^^
+ // But we shouldn't make the number look more precise than it is.
+ exp as usize > width || digits + exp as usize > precision
+ } else {
+ // Power of the most significant digit.
+ let msd = exp + (digits - 1) as ExpInt;
+ if msd >= 0 {
+ // 765e-2 == 7.65
+ false
+ } else {
+ // 765e-5 == 0.00765
+ // ^ ^^
+ -msd as usize > width
+ }
+ };
+
+ // Scientific formatting is pretty straightforward.
+ if scientific {
+ exp += digits as ExpInt - 1;
+
+ f.write_char(buffer[digits - 1] as char)?;
+ f.write_char('.')?;
+ let truncate_zero = !alternate;
+ if digits == 1 && truncate_zero {
+ f.write_char('0')?;
+ } else {
+ for &d in buffer[..digits - 1].iter().rev() {
+ f.write_char(d as char)?;
+ }
+ }
+ // Fill with zeros up to precision.
+ if !truncate_zero && precision > digits - 1 {
+ for _ in 0..=precision - digits {
+ f.write_char('0')?;
+ }
+ }
+ // For alternate we use lower 'e'.
+ f.write_char(if alternate { 'e' } else { 'E' })?;
+
+ // Exponent always at least two digits if we do not truncate zeros.
+ if truncate_zero {
+ write!(f, "{:+}", exp)?;
+ } else {
+ write!(f, "{:+03}", exp)?;
+ }
+
+ return Ok(());
+ }
+
+ // Non-scientific, positive exponents.
+ if exp >= 0 {
+ for &d in buffer.iter().rev() {
+ f.write_char(d as char)?;
+ }
+ for _ in 0..exp {
+ f.write_char('0')?;
+ }
+ return Ok(());
+ }
+
+ // Non-scientific, negative exponents.
+ let unit_place = -exp as usize;
+ if unit_place < digits {
+ for &d in buffer[unit_place..].iter().rev() {
+ f.write_char(d as char)?;
+ }
+ f.write_char('.')?;
+ for &d in buffer[..unit_place].iter().rev() {
+ f.write_char(d as char)?;
+ }
+ } else {
+ f.write_str("0.")?;
+ for _ in digits..unit_place {
+ f.write_char('0')?;
+ }
+ for &d in buffer.iter().rev() {
+ f.write_char(d as char)?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+impl<S: Semantics> fmt::Debug for IeeeFloat<S> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "{}({:?} | {}{:?} * 2^{})",
+ self,
+ self.category,
+ if self.sign { "-" } else { "+" },
+ self.sig,
+ self.exp
+ )
+ }
+}
+
+impl<S: Semantics> Float for IeeeFloat<S> {
+ const BITS: usize = S::BITS;
+ const PRECISION: usize = S::PRECISION;
+ const MAX_EXP: ExpInt = S::MAX_EXP;
+ const MIN_EXP: ExpInt = S::MIN_EXP;
+
+ const ZERO: Self = IeeeFloat {
+ sig: [0],
+ exp: S::MIN_EXP - 1,
+ category: Category::Zero,
+ sign: false,
+ marker: PhantomData,
+ };
+
+ const INFINITY: Self = IeeeFloat {
+ sig: [0],
+ exp: S::MAX_EXP + 1,
+ category: Category::Infinity,
+ sign: false,
+ marker: PhantomData,
+ };
+
+ // FIXME(eddyb) remove when qnan becomes const fn.
+ const NAN: Self = IeeeFloat {
+ sig: [S::QNAN_SIGNIFICAND],
+ exp: S::MAX_EXP + 1,
+ category: Category::NaN,
+ sign: false,
+ marker: PhantomData,
+ };
+
+ fn qnan(payload: Option<u128>) -> Self {
+ IeeeFloat {
+ sig: [S::QNAN_SIGNIFICAND
+ | payload.map_or(0, |payload| {
+ // Zero out the excess bits of the significand.
+ payload & ((1 << S::QNAN_BIT) - 1)
+ })],
+ exp: S::MAX_EXP + 1,
+ category: Category::NaN,
+ sign: false,
+ marker: PhantomData,
+ }
+ }
+
+ fn snan(payload: Option<u128>) -> Self {
+ let mut snan = Self::qnan(payload);
+
+ // We always have to clear the QNaN bit to make it an SNaN.
+ sig::clear_bit(&mut snan.sig, S::QNAN_BIT);
+
+ // If there are no bits set in the payload, we have to set
+ // *something* to make it a NaN instead of an infinity;
+ // conventionally, this is the next bit down from the QNaN bit.
+ if snan.sig[0] & !S::QNAN_SIGNIFICAND == 0 {
+ sig::set_bit(&mut snan.sig, S::QNAN_BIT - 1);
+ }
+
+ snan
+ }
+
+ fn largest() -> Self {
+ // We want (in interchange format):
+ // exponent = 1..10
+ // significand = 1..1
+ IeeeFloat {
+ sig: [(1 << S::PRECISION) - 1],
+ exp: S::MAX_EXP,
+ category: Category::Normal,
+ sign: false,
+ marker: PhantomData,
+ }
+ }
+
+ // We want (in interchange format):
+ // exponent = 0..0
+ // significand = 0..01
+ const SMALLEST: Self = IeeeFloat {
+ sig: [1],
+ exp: S::MIN_EXP,
+ category: Category::Normal,
+ sign: false,
+ marker: PhantomData,
+ };
+
+ fn smallest_normalized() -> Self {
+ // We want (in interchange format):
+ // exponent = 0..0
+ // significand = 10..0
+ IeeeFloat {
+ sig: [1 << (S::PRECISION - 1)],
+ exp: S::MIN_EXP,
+ category: Category::Normal,
+ sign: false,
+ marker: PhantomData,
+ }
+ }
+
+ fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
+ let status = match (self.category, rhs.category) {
+ (Category::Infinity, Category::Infinity) => {
+ // Differently signed infinities can only be validly
+ // subtracted.
+ if self.sign != rhs.sign {
+ self = Self::NAN;
+ Status::INVALID_OP
+ } else {
+ Status::OK
+ }
+ }
+
+ // Sign may depend on rounding mode; handled below.
+ (_, Category::Zero) | (Category::NaN, _) | (Category::Infinity, Category::Normal) => {
+ Status::OK
+ }
+
+ (Category::Zero, _) | (_, Category::NaN | Category::Infinity) => {
+ self = rhs;
+ Status::OK
+ }
+
+ // This return code means it was not a simple case.
+ (Category::Normal, Category::Normal) => {
+ let loss = sig::add_or_sub(
+ &mut self.sig,
+ &mut self.exp,
+ &mut self.sign,
+ &mut [rhs.sig[0]],
+ rhs.exp,
+ rhs.sign,
+ );
+ let status;
+ self = unpack!(status=, self.normalize(round, loss));
+
+ // Can only be zero if we lost no fraction.
+ assert!(self.category != Category::Zero || loss == Loss::ExactlyZero);
+
+ status
+ }
+ };
+
+ // If two numbers add (exactly) to zero, IEEE 754 decrees it is a
+ // positive zero unless rounding to minus infinity, except that
+ // adding two like-signed zeroes gives that zero.
+ if self.category == Category::Zero
+ && (rhs.category != Category::Zero || self.sign != rhs.sign)
+ {
+ self.sign = round == Round::TowardNegative;
+ }
+
+ status.and(self)
+ }
+
+ fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
+ self.sign ^= rhs.sign;
+
+ match (self.category, rhs.category) {
+ (Category::NaN, _) => {
+ self.sign = false;
+ Status::OK.and(self)
+ }
+
+ (_, Category::NaN) => {
+ self.sign = false;
+ self.category = Category::NaN;
+ self.sig = rhs.sig;
+ Status::OK.and(self)
+ }
+
+ (Category::Zero, Category::Infinity) | (Category::Infinity, Category::Zero) => {
+ Status::INVALID_OP.and(Self::NAN)
+ }
+
+ (_, Category::Infinity) | (Category::Infinity, _) => {
+ self.category = Category::Infinity;
+ Status::OK.and(self)
+ }
+
+ (Category::Zero, _) | (_, Category::Zero) => {
+ self.category = Category::Zero;
+ Status::OK.and(self)
+ }
+
+ (Category::Normal, Category::Normal) => {
+ self.exp += rhs.exp;
+ let mut wide_sig = [0; 2];
+ let loss =
+ sig::mul(&mut wide_sig, &mut self.exp, &self.sig, &rhs.sig, S::PRECISION);
+ self.sig = [wide_sig[0]];
+ let mut status;
+ self = unpack!(status=, self.normalize(round, loss));
+ if loss != Loss::ExactlyZero {
+ status |= Status::INEXACT;
+ }
+ status.and(self)
+ }
+ }
+ }
+
+ fn mul_add_r(mut self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> {
+ // If and only if all arguments are normal do we need to do an
+ // extended-precision calculation.
+ if !self.is_finite_non_zero() || !multiplicand.is_finite_non_zero() || !addend.is_finite() {
+ let mut status;
+ self = unpack!(status=, self.mul_r(multiplicand, round));
+
+ // FS can only be Status::OK or Status::INVALID_OP. There is no more work
+ // to do in the latter case. The IEEE-754R standard says it is
+ // implementation-defined in this case whether, if ADDEND is a
+ // quiet NaN, we raise invalid op; this implementation does so.
+ //
+ // If we need to do the addition we can do so with normal
+ // precision.
+ if status == Status::OK {
+ self = unpack!(status=, self.add_r(addend, round));
+ }
+ return status.and(self);
+ }
+
+ // Post-multiplication sign, before addition.
+ self.sign ^= multiplicand.sign;
+
+ // Allocate space for twice as many bits as the original significand, plus one
+ // extra bit for the addition to overflow into.
+ assert!(limbs_for_bits(S::PRECISION * 2 + 1) <= 2);
+ let mut wide_sig = sig::widening_mul(self.sig[0], multiplicand.sig[0]);
+
+ let mut loss = Loss::ExactlyZero;
+ let mut omsb = sig::omsb(&wide_sig);
+ self.exp += multiplicand.exp;
+
+ // Assume the operands involved in the multiplication are single-precision
+ // FP, and the two multiplicants are:
+ // lhs = a23 . a22 ... a0 * 2^e1
+ // rhs = b23 . b22 ... b0 * 2^e2
+ // the result of multiplication is:
+ // lhs = c48 c47 c46 . c45 ... c0 * 2^(e1+e2)
+ // Note that there are three significant bits at the left-hand side of the
+ // radix point: two for the multiplication, and an overflow bit for the
+ // addition (that will always be zero at this point). Move the radix point
+ // toward left by two bits, and adjust exponent accordingly.
+ self.exp += 2;
+
+ if addend.is_non_zero() {
+ // Normalize our MSB to one below the top bit to allow for overflow.
+ let ext_precision = 2 * S::PRECISION + 1;
+ if omsb != ext_precision - 1 {
+ assert!(ext_precision > omsb);
+ sig::shift_left(&mut wide_sig, &mut self.exp, (ext_precision - 1) - omsb);
+ }
+
+ // The intermediate result of the multiplication has "2 * S::PRECISION"
+ // significant bit; adjust the addend to be consistent with mul result.
+ let mut ext_addend_sig = [addend.sig[0], 0];
+
+ // Extend the addend significand to ext_precision - 1. This guarantees
+ // that the high bit of the significand is zero (same as wide_sig),
+ // so the addition will overflow (if it does overflow at all) into the top bit.
+ sig::shift_left(&mut ext_addend_sig, &mut 0, ext_precision - 1 - S::PRECISION);
+ loss = sig::add_or_sub(
+ &mut wide_sig,
+ &mut self.exp,
+ &mut self.sign,
+ &mut ext_addend_sig,
+ addend.exp + 1,
+ addend.sign,
+ );
+
+ omsb = sig::omsb(&wide_sig);
+ }
+
+ // Convert the result having "2 * S::PRECISION" significant-bits back to the one
+ // having "S::PRECISION" significant-bits. First, move the radix point from
+ // position "2*S::PRECISION - 1" to "S::PRECISION - 1". The exponent need to be
+ // adjusted by "2*S::PRECISION - 1" - "S::PRECISION - 1" = "S::PRECISION".
+ self.exp -= S::PRECISION as ExpInt + 1;
+
+ // In case MSB resides at the left-hand side of radix point, shift the
+ // mantissa right by some amount to make sure the MSB reside right before
+ // the radix point (i.e., "MSB . rest-significant-bits").
+ if omsb > S::PRECISION {
+ let bits = omsb - S::PRECISION;
+ loss = sig::shift_right(&mut wide_sig, &mut self.exp, bits).combine(loss);
+ }
+
+ self.sig[0] = wide_sig[0];
+
+ let mut status;
+ self = unpack!(status=, self.normalize(round, loss));
+ if loss != Loss::ExactlyZero {
+ status |= Status::INEXACT;
+ }
+
+ // If two numbers add (exactly) to zero, IEEE 754 decrees it is a
+ // positive zero unless rounding to minus infinity, except that
+ // adding two like-signed zeroes gives that zero.
+ if self.category == Category::Zero
+ && !status.intersects(Status::UNDERFLOW)
+ && self.sign != addend.sign
+ {
+ self.sign = round == Round::TowardNegative;
+ }
+
+ status.and(self)
+ }
+
+ fn div_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
+ self.sign ^= rhs.sign;
+
+ match (self.category, rhs.category) {
+ (Category::NaN, _) => {
+ self.sign = false;
+ Status::OK.and(self)
+ }
+
+ (_, Category::NaN) => {
+ self.category = Category::NaN;
+ self.sig = rhs.sig;
+ self.sign = false;
+ Status::OK.and(self)
+ }
+
+ (Category::Infinity, Category::Infinity) | (Category::Zero, Category::Zero) => {
+ Status::INVALID_OP.and(Self::NAN)
+ }
+
+ (Category::Infinity | Category::Zero, _) => Status::OK.and(self),
+
+ (Category::Normal, Category::Infinity) => {
+ self.category = Category::Zero;
+ Status::OK.and(self)
+ }
+
+ (Category::Normal, Category::Zero) => {
+ self.category = Category::Infinity;
+ Status::DIV_BY_ZERO.and(self)
+ }
+
+ (Category::Normal, Category::Normal) => {
+ self.exp -= rhs.exp;
+ let dividend = self.sig[0];
+ let loss = sig::div(
+ &mut self.sig,
+ &mut self.exp,
+ &mut [dividend],
+ &mut [rhs.sig[0]],
+ S::PRECISION,
+ );
+ let mut status;
+ self = unpack!(status=, self.normalize(round, loss));
+ if loss != Loss::ExactlyZero {
+ status |= Status::INEXACT;
+ }
+ status.and(self)
+ }
+ }
+ }
+
+ fn c_fmod(mut self, rhs: Self) -> StatusAnd<Self> {
+ match (self.category, rhs.category) {
+ (Category::NaN, _)
+ | (Category::Zero, Category::Infinity | Category::Normal)
+ | (Category::Normal, Category::Infinity) => Status::OK.and(self),
+
+ (_, Category::NaN) => {
+ self.sign = false;
+ self.category = Category::NaN;
+ self.sig = rhs.sig;
+ Status::OK.and(self)
+ }
+
+ (Category::Infinity, _) | (_, Category::Zero) => Status::INVALID_OP.and(Self::NAN),
+
+ (Category::Normal, Category::Normal) => {
+ while self.is_finite_non_zero()
+ && rhs.is_finite_non_zero()
+ && self.cmp_abs_normal(rhs) != Ordering::Less
+ {
+ let mut v = rhs.scalbn(self.ilogb() - rhs.ilogb());
+ if self.cmp_abs_normal(v) == Ordering::Less {
+ v = v.scalbn(-1);
+ }
+ v.sign = self.sign;
+
+ let status;
+ self = unpack!(status=, self - v);
+ assert_eq!(status, Status::OK);
+ }
+ Status::OK.and(self)
+ }
+ }
+ }
+
+ fn round_to_integral(self, round: Round) -> StatusAnd<Self> {
+ // If the exponent is large enough, we know that this value is already
+ // integral, and the arithmetic below would potentially cause it to saturate
+ // to +/-Inf. Bail out early instead.
+ if self.is_finite_non_zero() && self.exp + 1 >= S::PRECISION as ExpInt {
+ return Status::OK.and(self);
+ }
+
+ // The algorithm here is quite simple: we add 2^(p-1), where p is the
+ // precision of our format, and then subtract it back off again. The choice
+ // of rounding modes for the addition/subtraction determines the rounding mode
+ // for our integral rounding as well.
+ // NOTE: When the input value is negative, we do subtraction followed by
+ // addition instead.
+ assert!(S::PRECISION <= 128);
+ let mut status;
+ let magic_const = unpack!(status=, Self::from_u128(1 << (S::PRECISION - 1)));
+ let magic_const = magic_const.copy_sign(self);
+
+ if status != Status::OK {
+ return status.and(self);
+ }
+
+ let mut r = self;
+ r = unpack!(status=, r.add_r(magic_const, round));
+ if status != Status::OK && status != Status::INEXACT {
+ return status.and(self);
+ }
+
+ // Restore the input sign to handle 0.0/-0.0 cases correctly.
+ r.sub_r(magic_const, round).map(|r| r.copy_sign(self))
+ }
+
+ fn next_up(mut self) -> StatusAnd<Self> {
+ // Compute nextUp(x), handling each float category separately.
+ match self.category {
+ Category::Infinity => {
+ if self.sign {
+ // nextUp(-inf) = -largest
+ Status::OK.and(-Self::largest())
+ } else {
+ // nextUp(+inf) = +inf
+ Status::OK.and(self)
+ }
+ }
+ Category::NaN => {
+ // IEEE-754R 2008 6.2 Par 2: nextUp(sNaN) = qNaN. Set Invalid flag.
+ // IEEE-754R 2008 6.2: nextUp(qNaN) = qNaN. Must be identity so we do not
+ // change the payload.
+ if self.is_signaling() {
+ // For consistency, propagate the sign of the sNaN to the qNaN.
+ Status::INVALID_OP.and(Self::NAN.copy_sign(self))
+ } else {
+ Status::OK.and(self)
+ }
+ }
+ Category::Zero => {
+ // nextUp(pm 0) = +smallest
+ Status::OK.and(Self::SMALLEST)
+ }
+ Category::Normal => {
+ // nextUp(-smallest) = -0
+ if self.is_smallest() && self.sign {
+ return Status::OK.and(-Self::ZERO);
+ }
+
+ // nextUp(largest) == INFINITY
+ if self.is_largest() && !self.sign {
+ return Status::OK.and(Self::INFINITY);
+ }
+
+ // Excluding the integral bit. This allows us to test for binade boundaries.
+ let sig_mask = (1 << (S::PRECISION - 1)) - 1;
+
+ // nextUp(normal) == normal + inc.
+ if self.sign {
+ // If we are negative, we need to decrement the significand.
+
+ // We only cross a binade boundary that requires adjusting the exponent
+ // if:
+ // 1. exponent != S::MIN_EXP. This implies we are not in the
+ // smallest binade or are dealing with denormals.
+ // 2. Our significand excluding the integral bit is all zeros.
+ let crossing_binade_boundary =
+ self.exp != S::MIN_EXP && self.sig[0] & sig_mask == 0;
+
+ // Decrement the significand.
+ //
+ // We always do this since:
+ // 1. If we are dealing with a non-binade decrement, by definition we
+ // just decrement the significand.
+ // 2. If we are dealing with a normal -> normal binade decrement, since
+ // we have an explicit integral bit the fact that all bits but the
+ // integral bit are zero implies that subtracting one will yield a
+ // significand with 0 integral bit and 1 in all other spots. Thus we
+ // must just adjust the exponent and set the integral bit to 1.
+ // 3. If we are dealing with a normal -> denormal binade decrement,
+ // since we set the integral bit to 0 when we represent denormals, we
+ // just decrement the significand.
+ sig::decrement(&mut self.sig);
+
+ if crossing_binade_boundary {
+ // Our result is a normal number. Do the following:
+ // 1. Set the integral bit to 1.
+ // 2. Decrement the exponent.
+ sig::set_bit(&mut self.sig, S::PRECISION - 1);
+ self.exp -= 1;
+ }
+ } else {
+ // If we are positive, we need to increment the significand.
+
+ // We only cross a binade boundary that requires adjusting the exponent if
+ // the input is not a denormal and all of said input's significand bits
+ // are set. If all of said conditions are true: clear the significand, set
+ // the integral bit to 1, and increment the exponent. If we have a
+ // denormal always increment since moving denormals and the numbers in the
+ // smallest normal binade have the same exponent in our representation.
+ let crossing_binade_boundary =
+ !self.is_denormal() && self.sig[0] & sig_mask == sig_mask;
+
+ if crossing_binade_boundary {
+ self.sig = [0];
+ sig::set_bit(&mut self.sig, S::PRECISION - 1);
+ assert_ne!(
+ self.exp,
+ S::MAX_EXP,
+ "We can not increment an exponent beyond the MAX_EXP \
+ allowed by the given floating point semantics."
+ );
+ self.exp += 1;
+ } else {
+ sig::increment(&mut self.sig);
+ }
+ }
+ Status::OK.and(self)
+ }
+ }
+ }
+
+ fn from_bits(input: u128) -> Self {
+ // Dispatch to semantics.
+ S::from_bits(input)
+ }
+
+ fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> {
+ IeeeFloat {
+ sig: [input],
+ exp: S::PRECISION as ExpInt - 1,
+ category: Category::Normal,
+ sign: false,
+ marker: PhantomData,
+ }
+ .normalize(round, Loss::ExactlyZero)
+ }
+
+ fn from_str_r(mut s: &str, mut round: Round) -> Result<StatusAnd<Self>, ParseError> {
+ if s.is_empty() {
+ return Err(ParseError("Invalid string length"));
+ }
+
+ // Handle special cases.
+ match s {
+ "inf" | "INFINITY" => return Ok(Status::OK.and(Self::INFINITY)),
+ "-inf" | "-INFINITY" => return Ok(Status::OK.and(-Self::INFINITY)),
+ "nan" | "NaN" => return Ok(Status::OK.and(Self::NAN)),
+ "-nan" | "-NaN" => return Ok(Status::OK.and(-Self::NAN)),
+ _ => {}
+ }
+
+ // Handle a leading minus sign.
+ let minus = s.starts_with('-');
+ if minus || s.starts_with('+') {
+ s = &s[1..];
+ if s.is_empty() {
+ return Err(ParseError("String has no digits"));
+ }
+ }
+
+ // Adjust the rounding mode for the absolute value below.
+ if minus {
+ round = -round;
+ }
+
+ let r = if s.starts_with("0x") || s.starts_with("0X") {
+ s = &s[2..];
+ if s.is_empty() {
+ return Err(ParseError("Invalid string"));
+ }
+ Self::from_hexadecimal_string(s, round)?
+ } else {
+ Self::from_decimal_string(s, round)?
+ };
+
+ Ok(r.map(|r| if minus { -r } else { r }))
+ }
+
+ fn to_bits(self) -> u128 {
+ // Dispatch to semantics.
+ S::to_bits(self)
+ }
+
+ fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> {
+ // The result of trying to convert a number too large.
+ let overflow = if self.sign {
+ // Negative numbers cannot be represented as unsigned.
+ 0
+ } else {
+ // Largest unsigned integer of the given width.
+ !0 >> (128 - width)
+ };
+
+ *is_exact = false;
+
+ match self.category {
+ Category::NaN => Status::INVALID_OP.and(0),
+
+ Category::Infinity => Status::INVALID_OP.and(overflow),
+
+ Category::Zero => {
+ // Negative zero can't be represented as an int.
+ *is_exact = !self.sign;
+ Status::OK.and(0)
+ }
+
+ Category::Normal => {
+ let mut r = 0;
+
+ // Step 1: place our absolute value, with any fraction truncated, in
+ // the destination.
+ let truncated_bits = if self.exp < 0 {
+ // Our absolute value is less than one; truncate everything.
+ // For exponent -1 the integer bit represents .5, look at that.
+ // For smaller exponents leftmost truncated bit is 0.
+ S::PRECISION - 1 + (-self.exp) as usize
+ } else {
+ // We want the most significant (exponent + 1) bits; the rest are
+ // truncated.
+ let bits = self.exp as usize + 1;
+
+ // Hopelessly large in magnitude?
+ if bits > width {
+ return Status::INVALID_OP.and(overflow);
+ }
+
+ if bits < S::PRECISION {
+ // We truncate (S::PRECISION - bits) bits.
+ r = self.sig[0] >> (S::PRECISION - bits);
+ S::PRECISION - bits
+ } else {
+ // We want at least as many bits as are available.
+ r = self.sig[0] << (bits - S::PRECISION);
+ 0
+ }
+ };
+
+ // Step 2: work out any lost fraction, and increment the absolute
+ // value if we would round away from zero.
+ let mut loss = Loss::ExactlyZero;
+ if truncated_bits > 0 {
+ loss = Loss::through_truncation(&self.sig, truncated_bits);
+ if loss != Loss::ExactlyZero
+ && self.round_away_from_zero(round, loss, truncated_bits)
+ {
+ r = r.wrapping_add(1);
+ if r == 0 {
+ return Status::INVALID_OP.and(overflow); // Overflow.
+ }
+ }
+ }
+
+ // Step 3: check if we fit in the destination.
+ if r > overflow {
+ return Status::INVALID_OP.and(overflow);
+ }
+
+ if loss == Loss::ExactlyZero {
+ *is_exact = true;
+ Status::OK.and(r)
+ } else {
+ Status::INEXACT.and(r)
+ }
+ }
+ }
+ }
+
+ fn cmp_abs_normal(self, rhs: Self) -> Ordering {
+ assert!(self.is_finite_non_zero());
+ assert!(rhs.is_finite_non_zero());
+
+ // If exponents are equal, do an unsigned comparison of the significands.
+ self.exp.cmp(&rhs.exp).then_with(|| sig::cmp(&self.sig, &rhs.sig))
+ }
+
+ fn bitwise_eq(self, rhs: Self) -> bool {
+ if self.category != rhs.category || self.sign != rhs.sign {
+ return false;
+ }
+
+ if self.category == Category::Zero || self.category == Category::Infinity {
+ return true;
+ }
+
+ if self.is_finite_non_zero() && self.exp != rhs.exp {
+ return false;
+ }
+
+ self.sig == rhs.sig
+ }
+
+ fn is_negative(self) -> bool {
+ self.sign
+ }
+
+ fn is_denormal(self) -> bool {
+ self.is_finite_non_zero()
+ && self.exp == S::MIN_EXP
+ && !sig::get_bit(&self.sig, S::PRECISION - 1)
+ }
+
+ fn is_signaling(self) -> bool {
+ // IEEE-754R 2008 6.2.1: A signaling NaN bit string should be encoded with the
+ // first bit of the trailing significand being 0.
+ self.is_nan() && !sig::get_bit(&self.sig, S::QNAN_BIT)
+ }
+
+ fn category(self) -> Category {
+ self.category
+ }
+
+ fn get_exact_inverse(self) -> Option<Self> {
+ // Special floats and denormals have no exact inverse.
+ if !self.is_finite_non_zero() {
+ return None;
+ }
+
+ // Check that the number is a power of two by making sure that only the
+ // integer bit is set in the significand.
+ if self.sig != [1 << (S::PRECISION - 1)] {
+ return None;
+ }
+
+ // Get the inverse.
+ let mut reciprocal = Self::from_u128(1).value;
+ let status;
+ reciprocal = unpack!(status=, reciprocal / self);
+ if status != Status::OK {
+ return None;
+ }
+
+ // Avoid multiplication with a denormal, it is not safe on all platforms and
+ // may be slower than a normal division.
+ if reciprocal.is_denormal() {
+ return None;
+ }
+
+ assert!(reciprocal.is_finite_non_zero());
+ assert_eq!(reciprocal.sig, [1 << (S::PRECISION - 1)]);
+
+ Some(reciprocal)
+ }
+
+ fn ilogb(mut self) -> ExpInt {
+ if self.is_nan() {
+ return IEK_NAN;
+ }
+ if self.is_zero() {
+ return IEK_ZERO;
+ }
+ if self.is_infinite() {
+ return IEK_INF;
+ }
+ if !self.is_denormal() {
+ return self.exp;
+ }
+
+ let sig_bits = (S::PRECISION - 1) as ExpInt;
+ self.exp += sig_bits;
+ self = self.normalize(Round::NearestTiesToEven, Loss::ExactlyZero).value;
+ self.exp - sig_bits
+ }
+
+ fn scalbn_r(mut self, exp: ExpInt, round: Round) -> Self {
+ // If exp is wildly out-of-scale, simply adding it to self.exp will
+ // overflow; clamp it to a safe range before adding, but ensure that the range
+ // is large enough that the clamp does not change the result. The range we
+ // need to support is the difference between the largest possible exponent and
+ // the normalized exponent of half the smallest denormal.
+
+ let sig_bits = (S::PRECISION - 1) as i32;
+ let max_change = S::MAX_EXP as i32 - (S::MIN_EXP as i32 - sig_bits) + 1;
+
+ // Clamp to one past the range ends to let normalize handle overflow.
+ let exp_change = cmp::min(cmp::max(exp as i32, -max_change - 1), max_change);
+ self.exp = self.exp.saturating_add(exp_change as ExpInt);
+ self = self.normalize(round, Loss::ExactlyZero).value;
+ if self.is_nan() {
+ sig::set_bit(&mut self.sig, S::QNAN_BIT);
+ }
+ self
+ }
+
+ fn frexp_r(mut self, exp: &mut ExpInt, round: Round) -> Self {
+ *exp = self.ilogb();
+
+ // Quiet signalling nans.
+ if *exp == IEK_NAN {
+ sig::set_bit(&mut self.sig, S::QNAN_BIT);
+ return self;
+ }
+
+ if *exp == IEK_INF {
+ return self;
+ }
+
+ // 1 is added because frexp is defined to return a normalized fraction in
+ // +/-[0.5, 1.0), rather than the usual +/-[1.0, 2.0).
+ if *exp == IEK_ZERO {
+ *exp = 0;
+ } else {
+ *exp += 1;
+ }
+ self.scalbn_r(-*exp, round)
+ }
+}
+
+impl<S: Semantics, T: Semantics> FloatConvert<IeeeFloat<T>> for IeeeFloat<S> {
+ fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd<IeeeFloat<T>> {
+ let mut r = IeeeFloat {
+ sig: self.sig,
+ exp: self.exp,
+ category: self.category,
+ sign: self.sign,
+ marker: PhantomData,
+ };
+
+ // x86 has some unusual NaNs which cannot be represented in any other
+ // format; note them here.
+ fn is_x87_double_extended<S: Semantics>() -> bool {
+ S::QNAN_SIGNIFICAND == X87DoubleExtendedS::QNAN_SIGNIFICAND
+ }
+ let x87_special_nan = is_x87_double_extended::<S>()
+ && !is_x87_double_extended::<T>()
+ && r.category == Category::NaN
+ && (r.sig[0] & S::QNAN_SIGNIFICAND) != S::QNAN_SIGNIFICAND;
+
+ // If this is a truncation of a denormal number, and the target semantics
+ // has larger exponent range than the source semantics (this can happen
+ // when truncating from PowerPC double-double to double format), the
+ // right shift could lose result mantissa bits. Adjust exponent instead
+ // of performing excessive shift.
+ let mut shift = T::PRECISION as ExpInt - S::PRECISION as ExpInt;
+ if shift < 0 && r.is_finite_non_zero() {
+ let mut exp_change = sig::omsb(&r.sig) as ExpInt - S::PRECISION as ExpInt;
+ if r.exp + exp_change < T::MIN_EXP {
+ exp_change = T::MIN_EXP - r.exp;
+ }
+ if exp_change < shift {
+ exp_change = shift;
+ }
+ if exp_change < 0 {
+ shift -= exp_change;
+ r.exp += exp_change;
+ }
+ }
+
+ // If this is a truncation, perform the shift.
+ let loss = if shift < 0 && (r.is_finite_non_zero() || r.category == Category::NaN) {
+ sig::shift_right(&mut r.sig, &mut 0, -shift as usize)
+ } else {
+ Loss::ExactlyZero
+ };
+
+ // If this is an extension, perform the shift.
+ if shift > 0 && (r.is_finite_non_zero() || r.category == Category::NaN) {
+ sig::shift_left(&mut r.sig, &mut 0, shift as usize);
+ }
+
+ let status;
+ if r.is_finite_non_zero() {
+ r = unpack!(status=, r.normalize(round, loss));
+ *loses_info = status != Status::OK;
+ } else if r.category == Category::NaN {
+ *loses_info = loss != Loss::ExactlyZero || x87_special_nan;
+
+ // For x87 extended precision, we want to make a NaN, not a special NaN if
+ // the input wasn't special either.
+ if !x87_special_nan && is_x87_double_extended::<T>() {
+ sig::set_bit(&mut r.sig, T::PRECISION - 1);
+ }
+
+ // Convert of sNaN creates qNaN and raises an exception (invalid op).
+ // This also guarantees that a sNaN does not become Inf on a truncation
+ // that loses all payload bits.
+ if self.is_signaling() {
+ // Quiet signaling NaN.
+ sig::set_bit(&mut r.sig, T::QNAN_BIT);
+ status = Status::INVALID_OP;
+ } else {
+ status = Status::OK;
+ }
+ } else {
+ *loses_info = false;
+ status = Status::OK;
+ }
+
+ status.and(r)
+ }
+}
+
+impl<S: Semantics> IeeeFloat<S> {
+ /// Handle positive overflow. We either return infinity or
+ /// the largest finite number. For negative overflow,
+ /// negate the `round` argument before calling.
+ fn overflow_result(round: Round) -> StatusAnd<Self> {
+ match round {
+ // Infinity?
+ Round::NearestTiesToEven | Round::NearestTiesToAway | Round::TowardPositive => {
+ (Status::OVERFLOW | Status::INEXACT).and(Self::INFINITY)
+ }
+ // Otherwise we become the largest finite number.
+ Round::TowardNegative | Round::TowardZero => Status::INEXACT.and(Self::largest()),
+ }
+ }
+
+ /// Returns `true` if, when truncating the current number, with `bit` the
+ /// new LSB, with the given lost fraction and rounding mode, the result
+ /// would need to be rounded away from zero (i.e., by increasing the
+ /// signficand). This routine must work for `Category::Zero` of both signs, and
+ /// `Category::Normal` numbers.
+ fn round_away_from_zero(&self, round: Round, loss: Loss, bit: usize) -> bool {
+ // NaNs and infinities should not have lost fractions.
+ assert!(self.is_finite_non_zero() || self.is_zero());
+
+ // Current callers never pass this so we don't handle it.
+ assert_ne!(loss, Loss::ExactlyZero);
+
+ match round {
+ Round::NearestTiesToAway => loss == Loss::ExactlyHalf || loss == Loss::MoreThanHalf,
+ Round::NearestTiesToEven => {
+ if loss == Loss::MoreThanHalf {
+ return true;
+ }
+
+ // Our zeros don't have a significand to test.
+ if loss == Loss::ExactlyHalf && self.category != Category::Zero {
+ return sig::get_bit(&self.sig, bit);
+ }
+
+ false
+ }
+ Round::TowardZero => false,
+ Round::TowardPositive => !self.sign,
+ Round::TowardNegative => self.sign,
+ }
+ }
+
+ fn normalize(mut self, round: Round, mut loss: Loss) -> StatusAnd<Self> {
+ if !self.is_finite_non_zero() {
+ return Status::OK.and(self);
+ }
+
+ // Before rounding normalize the exponent of Category::Normal numbers.
+ let mut omsb = sig::omsb(&self.sig);
+
+ if omsb > 0 {
+ // OMSB is numbered from 1. We want to place it in the integer
+ // bit numbered PRECISION if possible, with a compensating change in
+ // the exponent.
+ let mut final_exp = self.exp.saturating_add(omsb as ExpInt - S::PRECISION as ExpInt);
+
+ // If the resulting exponent is too high, overflow according to
+ // the rounding mode.
+ if final_exp > S::MAX_EXP {
+ let round = if self.sign { -round } else { round };
+ return Self::overflow_result(round).map(|r| r.copy_sign(self));
+ }
+
+ // Subnormal numbers have exponent MIN_EXP, and their MSB
+ // is forced based on that.
+ if final_exp < S::MIN_EXP {
+ final_exp = S::MIN_EXP;
+ }
+
+ // Shifting left is easy as we don't lose precision.
+ if final_exp < self.exp {
+ assert_eq!(loss, Loss::ExactlyZero);
+
+ let exp_change = (self.exp - final_exp) as usize;
+ sig::shift_left(&mut self.sig, &mut self.exp, exp_change);
+
+ return Status::OK.and(self);
+ }
+
+ // Shift right and capture any new lost fraction.
+ if final_exp > self.exp {
+ let exp_change = (final_exp - self.exp) as usize;
+ loss = sig::shift_right(&mut self.sig, &mut self.exp, exp_change).combine(loss);
+
+ // Keep OMSB up-to-date.
+ omsb = omsb.saturating_sub(exp_change);
+ }
+ }
+
+ // Now round the number according to round given the lost
+ // fraction.
+
+ // As specified in IEEE 754, since we do not trap we do not report
+ // underflow for exact results.
+ if loss == Loss::ExactlyZero {
+ // Canonicalize zeros.
+ if omsb == 0 {
+ self.category = Category::Zero;
+ }
+
+ return Status::OK.and(self);
+ }
+
+ // Increment the significand if we're rounding away from zero.
+ if self.round_away_from_zero(round, loss, 0) {
+ if omsb == 0 {
+ self.exp = S::MIN_EXP;
+ }
+
+ // We should never overflow.
+ assert_eq!(sig::increment(&mut self.sig), 0);
+ omsb = sig::omsb(&self.sig);
+
+ // Did the significand increment overflow?
+ if omsb == S::PRECISION + 1 {
+ // Renormalize by incrementing the exponent and shifting our
+ // significand right one. However if we already have the
+ // maximum exponent we overflow to infinity.
+ if self.exp == S::MAX_EXP {
+ self.category = Category::Infinity;
+
+ return (Status::OVERFLOW | Status::INEXACT).and(self);
+ }
+
+ let _: Loss = sig::shift_right(&mut self.sig, &mut self.exp, 1);
+
+ return Status::INEXACT.and(self);
+ }
+ }
+
+ // The normal case - we were and are not denormal, and any
+ // significand increment above didn't overflow.
+ if omsb == S::PRECISION {
+ return Status::INEXACT.and(self);
+ }
+
+ // We have a non-zero denormal.
+ assert!(omsb < S::PRECISION);
+
+ // Canonicalize zeros.
+ if omsb == 0 {
+ self.category = Category::Zero;
+ }
+
+ // The Category::Zero case is a denormal that underflowed to zero.
+ (Status::UNDERFLOW | Status::INEXACT).and(self)
+ }
+
+ fn from_hexadecimal_string(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
+ let mut r = IeeeFloat {
+ sig: [0],
+ exp: 0,
+ category: Category::Normal,
+ sign: false,
+ marker: PhantomData,
+ };
+
+ let mut any_digits = false;
+ let mut has_exp = false;
+ let mut bit_pos = LIMB_BITS as isize;
+ let mut loss = None;
+
+ // Without leading or trailing zeros, irrespective of the dot.
+ let mut first_sig_digit = None;
+ let mut dot = s.len();
+
+ for (p, c) in s.char_indices() {
+ // Skip leading zeros and any (hexa)decimal point.
+ if c == '.' {
+ if dot != s.len() {
+ return Err(ParseError("String contains multiple dots"));
+ }
+ dot = p;
+ } else if let Some(hex_value) = c.to_digit(16) {
+ any_digits = true;
+
+ if first_sig_digit.is_none() {
+ if hex_value == 0 {
+ continue;
+ }
+ first_sig_digit = Some(p);
+ }
+
+ // Store the number while we have space.
+ bit_pos -= 4;
+ if bit_pos >= 0 {
+ r.sig[0] |= (hex_value as Limb) << bit_pos;
+ // If zero or one-half (the hexadecimal digit 8) are followed
+ // by non-zero, they're a little more than zero or one-half.
+ } else if let Some(ref mut loss) = loss {
+ if hex_value != 0 {
+ if *loss == Loss::ExactlyZero {
+ *loss = Loss::LessThanHalf;
+ }
+ if *loss == Loss::ExactlyHalf {
+ *loss = Loss::MoreThanHalf;
+ }
+ }
+ } else {
+ loss = Some(match hex_value {
+ 0 => Loss::ExactlyZero,
+ 1..=7 => Loss::LessThanHalf,
+ 8 => Loss::ExactlyHalf,
+ 9..=15 => Loss::MoreThanHalf,
+ _ => unreachable!(),
+ });
+ }
+ } else if c == 'p' || c == 'P' {
+ if !any_digits {
+ return Err(ParseError("Significand has no digits"));
+ }
+
+ if dot == s.len() {
+ dot = p;
+ }
+
+ let mut chars = s[p + 1..].chars().peekable();
+
+ // Adjust for the given exponent.
+ let exp_minus = chars.peek() == Some(&'-');
+ if exp_minus || chars.peek() == Some(&'+') {
+ chars.next();
+ }
+
+ for c in chars {
+ if let Some(value) = c.to_digit(10) {
+ has_exp = true;
+ r.exp = r.exp.saturating_mul(10).saturating_add(value as ExpInt);
+ } else {
+ return Err(ParseError("Invalid character in exponent"));
+ }
+ }
+ if !has_exp {
+ return Err(ParseError("Exponent has no digits"));
+ }
+
+ if exp_minus {
+ r.exp = -r.exp;
+ }
+
+ break;
+ } else {
+ return Err(ParseError("Invalid character in significand"));
+ }
+ }
+ if !any_digits {
+ return Err(ParseError("Significand has no digits"));
+ }
+
+ // Hex floats require an exponent but not a hexadecimal point.
+ if !has_exp {
+ return Err(ParseError("Hex strings require an exponent"));
+ }
+
+ // Ignore the exponent if we are zero.
+ let first_sig_digit = match first_sig_digit {
+ Some(p) => p,
+ None => return Ok(Status::OK.and(Self::ZERO)),
+ };
+
+ // Calculate the exponent adjustment implicit in the number of
+ // significant digits and adjust for writing the significand starting
+ // at the most significant nibble.
+ let exp_adjustment = if dot > first_sig_digit {
+ ExpInt::try_from(dot - first_sig_digit).unwrap()
+ } else {
+ -ExpInt::try_from(first_sig_digit - dot - 1).unwrap()
+ };
+ let exp_adjustment = exp_adjustment
+ .saturating_mul(4)
+ .saturating_sub(1)
+ .saturating_add(S::PRECISION as ExpInt)
+ .saturating_sub(LIMB_BITS as ExpInt);
+ r.exp = r.exp.saturating_add(exp_adjustment);
+
+ Ok(r.normalize(round, loss.unwrap_or(Loss::ExactlyZero)))
+ }
+
+ fn from_decimal_string(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
+ // Given a normal decimal floating point number of the form
+ //
+ // dddd.dddd[eE][+-]ddd
+ //
+ // where the decimal point and exponent are optional, fill out the
+ // variables below. Exponent is appropriate if the significand is
+ // treated as an integer, and normalized_exp if the significand
+ // is taken to have the decimal point after a single leading
+ // non-zero digit.
+ //
+ // If the value is zero, first_sig_digit is None.
+
+ let mut any_digits = false;
+ let mut dec_exp = 0i32;
+
+ // Without leading or trailing zeros, irrespective of the dot.
+ let mut first_sig_digit = None;
+ let mut last_sig_digit = 0;
+ let mut dot = s.len();
+
+ for (p, c) in s.char_indices() {
+ if c == '.' {
+ if dot != s.len() {
+ return Err(ParseError("String contains multiple dots"));
+ }
+ dot = p;
+ } else if let Some(dec_value) = c.to_digit(10) {
+ any_digits = true;
+
+ if dec_value != 0 {
+ if first_sig_digit.is_none() {
+ first_sig_digit = Some(p);
+ }
+ last_sig_digit = p;
+ }
+ } else if c == 'e' || c == 'E' {
+ if !any_digits {
+ return Err(ParseError("Significand has no digits"));
+ }
+
+ if dot == s.len() {
+ dot = p;
+ }
+
+ let mut chars = s[p + 1..].chars().peekable();
+
+ // Adjust for the given exponent.
+ let exp_minus = chars.peek() == Some(&'-');
+ if exp_minus || chars.peek() == Some(&'+') {
+ chars.next();
+ }
+
+ any_digits = false;
+ for c in chars {
+ if let Some(value) = c.to_digit(10) {
+ any_digits = true;
+ dec_exp = dec_exp.saturating_mul(10).saturating_add(value as i32);
+ } else {
+ return Err(ParseError("Invalid character in exponent"));
+ }
+ }
+ if !any_digits {
+ return Err(ParseError("Exponent has no digits"));
+ }
+
+ if exp_minus {
+ dec_exp = -dec_exp;
+ }
+
+ break;
+ } else {
+ return Err(ParseError("Invalid character in significand"));
+ }
+ }
+ if !any_digits {
+ return Err(ParseError("Significand has no digits"));
+ }
+
+ // Test if we have a zero number allowing for non-zero exponents.
+ let first_sig_digit = match first_sig_digit {
+ Some(p) => p,
+ None => return Ok(Status::OK.and(Self::ZERO)),
+ };
+
+ // Adjust the exponents for any decimal point.
+ if dot > last_sig_digit {
+ dec_exp = dec_exp.saturating_add((dot - last_sig_digit - 1) as i32);
+ } else {
+ dec_exp = dec_exp.saturating_sub((last_sig_digit - dot) as i32);
+ }
+ let significand_digits = last_sig_digit - first_sig_digit + 1
+ - (dot > first_sig_digit && dot < last_sig_digit) as usize;
+ let normalized_exp = dec_exp.saturating_add(significand_digits as i32 - 1);
+
+ // Handle the cases where exponents are obviously too large or too
+ // small. Writing L for log 10 / log 2, a number d.ddddd*10^dec_exp
+ // definitely overflows if
+ //
+ // (dec_exp - 1) * L >= MAX_EXP
+ //
+ // and definitely underflows to zero where
+ //
+ // (dec_exp + 1) * L <= MIN_EXP - PRECISION
+ //
+ // With integer arithmetic the tightest bounds for L are
+ //
+ // 93/28 < L < 196/59 [ numerator <= 256 ]
+ // 42039/12655 < L < 28738/8651 [ numerator <= 65536 ]
+
+ // Check for MAX_EXP.
+ if normalized_exp.saturating_sub(1).saturating_mul(42039) >= 12655 * S::MAX_EXP as i32 {
+ // Overflow and round.
+ return Ok(Self::overflow_result(round));
+ }
+
+ // Check for MIN_EXP.
+ if normalized_exp.saturating_add(1).saturating_mul(28738)
+ <= 8651 * (S::MIN_EXP as i32 - S::PRECISION as i32)
+ {
+ // Underflow to zero and round.
+ let r =
+ if round == Round::TowardPositive { IeeeFloat::SMALLEST } else { IeeeFloat::ZERO };
+ return Ok((Status::UNDERFLOW | Status::INEXACT).and(r));
+ }
+
+ // A tight upper bound on number of bits required to hold an
+ // N-digit decimal integer is N * 196 / 59. Allocate enough space
+ // to hold the full significand, and an extra limb required by
+ // tcMultiplyPart.
+ let max_limbs = limbs_for_bits(1 + 196 * significand_digits / 59);
+ let mut dec_sig: SmallVec<[Limb; 1]> = SmallVec::with_capacity(max_limbs);
+
+ // Convert to binary efficiently - we do almost all multiplication
+ // in a Limb. When this would overflow do we do a single
+ // bignum multiplication, and then revert again to multiplication
+ // in a Limb.
+ let mut chars = s[first_sig_digit..=last_sig_digit].chars();
+ loop {
+ let mut val = 0;
+ let mut multiplier = 1;
+
+ loop {
+ let dec_value = match chars.next() {
+ Some('.') => continue,
+ Some(c) => c.to_digit(10).unwrap(),
+ None => break,
+ };
+
+ multiplier *= 10;
+ val = val * 10 + dec_value as Limb;
+
+ // The maximum number that can be multiplied by ten with any
+ // digit added without overflowing a Limb.
+ if multiplier > (!0 - 9) / 10 {
+ break;
+ }
+ }
+
+ // If we've consumed no digits, we're done.
+ if multiplier == 1 {
+ break;
+ }
+
+ // Multiply out the current limb.
+ let mut carry = val;
+ for x in &mut dec_sig {
+ let [low, mut high] = sig::widening_mul(*x, multiplier);
+
+ // Now add carry.
+ let (low, overflow) = low.overflowing_add(carry);
+ high += overflow as Limb;
+
+ *x = low;
+ carry = high;
+ }
+
+ // If we had carry, we need another limb (likely but not guaranteed).
+ if carry > 0 {
+ dec_sig.push(carry);
+ }
+ }
+
+ // Calculate pow(5, abs(dec_exp)) into `pow5_full`.
+ // The *_calc Vec's are reused scratch space, as an optimization.
+ let (pow5_full, mut pow5_calc, mut sig_calc, mut sig_scratch_calc) = {
+ let mut power = dec_exp.abs() as usize;
+
+ const FIRST_EIGHT_POWERS: [Limb; 8] = [1, 5, 25, 125, 625, 3125, 15625, 78125];
+
+ let mut p5_scratch = smallvec![];
+ let mut p5: SmallVec<[Limb; 1]> = smallvec![FIRST_EIGHT_POWERS[4]];
+
+ let mut r_scratch = smallvec![];
+ let mut r: SmallVec<[Limb; 1]> = smallvec![FIRST_EIGHT_POWERS[power & 7]];
+ power >>= 3;
+
+ while power > 0 {
+ // Calculate pow(5,pow(2,n+3)).
+ p5_scratch.resize(p5.len() * 2, 0);
+ let _: Loss = sig::mul(&mut p5_scratch, &mut 0, &p5, &p5, p5.len() * 2 * LIMB_BITS);
+ while p5_scratch.last() == Some(&0) {
+ p5_scratch.pop();
+ }
+ mem::swap(&mut p5, &mut p5_scratch);
+
+ if power & 1 != 0 {
+ r_scratch.resize(r.len() + p5.len(), 0);
+ let _: Loss =
+ sig::mul(&mut r_scratch, &mut 0, &r, &p5, (r.len() + p5.len()) * LIMB_BITS);
+ while r_scratch.last() == Some(&0) {
+ r_scratch.pop();
+ }
+ mem::swap(&mut r, &mut r_scratch);
+ }
+
+ power >>= 1;
+ }
+
+ (r, r_scratch, p5, p5_scratch)
+ };
+
+ // Attempt dec_sig * 10^dec_exp with increasing precision.
+ let mut attempt = 0;
+ loop {
+ let calc_precision = (LIMB_BITS << attempt) - 1;
+ attempt += 1;
+
+ let calc_normal_from_limbs = |sig: &mut SmallVec<[Limb; 1]>,
+ limbs: &[Limb]|
+ -> StatusAnd<ExpInt> {
+ sig.resize(limbs_for_bits(calc_precision), 0);
+ let (mut loss, mut exp) = sig::from_limbs(sig, limbs, calc_precision);
+
+ // Before rounding normalize the exponent of Category::Normal numbers.
+ let mut omsb = sig::omsb(sig);
+
+ assert_ne!(omsb, 0);
+
+ // OMSB is numbered from 1. We want to place it in the integer
+ // bit numbered PRECISION if possible, with a compensating change in
+ // the exponent.
+ let final_exp = exp.saturating_add(omsb as ExpInt - calc_precision as ExpInt);
+
+ // Shifting left is easy as we don't lose precision.
+ if final_exp < exp {
+ assert_eq!(loss, Loss::ExactlyZero);
+
+ let exp_change = (exp - final_exp) as usize;
+ sig::shift_left(sig, &mut exp, exp_change);
+
+ return Status::OK.and(exp);
+ }
+
+ // Shift right and capture any new lost fraction.
+ if final_exp > exp {
+ let exp_change = (final_exp - exp) as usize;
+ loss = sig::shift_right(sig, &mut exp, exp_change).combine(loss);
+
+ // Keep OMSB up-to-date.
+ omsb = omsb.saturating_sub(exp_change);
+ }
+
+ assert_eq!(omsb, calc_precision);
+
+ // Now round the number according to round given the lost
+ // fraction.
+
+ // As specified in IEEE 754, since we do not trap we do not report
+ // underflow for exact results.
+ if loss == Loss::ExactlyZero {
+ return Status::OK.and(exp);
+ }
+
+ // Increment the significand if we're rounding away from zero.
+ if loss == Loss::MoreThanHalf || loss == Loss::ExactlyHalf && sig::get_bit(sig, 0) {
+ // We should never overflow.
+ assert_eq!(sig::increment(sig), 0);
+ omsb = sig::omsb(sig);
+
+ // Did the significand increment overflow?
+ if omsb == calc_precision + 1 {
+ let _: Loss = sig::shift_right(sig, &mut exp, 1);
+
+ return Status::INEXACT.and(exp);
+ }
+ }
+
+ // The normal case - we were and are not denormal, and any
+ // significand increment above didn't overflow.
+ Status::INEXACT.and(exp)
+ };
+
+ let status;
+ let mut exp = unpack!(status=,
+ calc_normal_from_limbs(&mut sig_calc, &dec_sig));
+ let pow5_status;
+ let pow5_exp = unpack!(pow5_status=,
+ calc_normal_from_limbs(&mut pow5_calc, &pow5_full));
+
+ // Add dec_exp, as 10^n = 5^n * 2^n.
+ exp += dec_exp as ExpInt;
+
+ let mut used_bits = S::PRECISION;
+ let mut truncated_bits = calc_precision - used_bits;
+
+ let half_ulp_err1 = (status != Status::OK) as Limb;
+ let (calc_loss, half_ulp_err2);
+ if dec_exp >= 0 {
+ exp += pow5_exp;
+
+ sig_scratch_calc.resize(sig_calc.len() + pow5_calc.len(), 0);
+ calc_loss = sig::mul(
+ &mut sig_scratch_calc,
+ &mut exp,
+ &sig_calc,
+ &pow5_calc,
+ calc_precision,
+ );
+ mem::swap(&mut sig_calc, &mut sig_scratch_calc);
+
+ half_ulp_err2 = (pow5_status != Status::OK) as Limb;
+ } else {
+ exp -= pow5_exp;
+
+ sig_scratch_calc.resize(sig_calc.len(), 0);
+ calc_loss = sig::div(
+ &mut sig_scratch_calc,
+ &mut exp,
+ &mut sig_calc,
+ &mut pow5_calc,
+ calc_precision,
+ );
+ mem::swap(&mut sig_calc, &mut sig_scratch_calc);
+
+ // Denormal numbers have less precision.
+ if exp < S::MIN_EXP {
+ truncated_bits += (S::MIN_EXP - exp) as usize;
+ used_bits = calc_precision.saturating_sub(truncated_bits);
+ }
+ // Extra half-ulp lost in reciprocal of exponent.
+ half_ulp_err2 =
+ 2 * (pow5_status != Status::OK || calc_loss != Loss::ExactlyZero) as Limb;
+ }
+
+ // Both sig::mul and sig::div return the
+ // result with the integer bit set.
+ assert!(sig::get_bit(&sig_calc, calc_precision - 1));
+
+ // The error from the true value, in half-ulps, on multiplying two
+ // floating point numbers, which differ from the value they
+ // approximate by at most half_ulp_err1 and half_ulp_err2 half-ulps, is strictly less
+ // than the returned value.
+ //
+ // See "How to Read Floating Point Numbers Accurately" by William D Clinger.
+ assert!(half_ulp_err1 < 2 || half_ulp_err2 < 2 || (half_ulp_err1 + half_ulp_err2 < 8));
+
+ let inexact = (calc_loss != Loss::ExactlyZero) as Limb;
+ let half_ulp_err = if half_ulp_err1 + half_ulp_err2 == 0 {
+ inexact * 2 // <= inexact half-ulps.
+ } else {
+ inexact + 2 * (half_ulp_err1 + half_ulp_err2)
+ };
+
+ let ulps_from_boundary = {
+ let bits = calc_precision - used_bits - 1;
+
+ let i = bits / LIMB_BITS;
+ let limb = sig_calc[i] & (!0 >> (LIMB_BITS - 1 - bits % LIMB_BITS));
+ let boundary = match round {
+ Round::NearestTiesToEven | Round::NearestTiesToAway => 1 << (bits % LIMB_BITS),
+ _ => 0,
+ };
+ if i == 0 {
+ let delta = limb.wrapping_sub(boundary);
+ cmp::min(delta, delta.wrapping_neg())
+ } else if limb == boundary {
+ if !sig::is_all_zeros(&sig_calc[1..i]) {
+ !0 // A lot.
+ } else {
+ sig_calc[0]
+ }
+ } else if limb == boundary.wrapping_sub(1) {
+ if sig_calc[1..i].iter().any(|&x| x.wrapping_neg() != 1) {
+ !0 // A lot.
+ } else {
+ sig_calc[0].wrapping_neg()
+ }
+ } else {
+ !0 // A lot.
+ }
+ };
+
+ // Are we guaranteed to round correctly if we truncate?
+ if ulps_from_boundary.saturating_mul(2) >= half_ulp_err {
+ let mut r = IeeeFloat {
+ sig: [0],
+ exp,
+ category: Category::Normal,
+ sign: false,
+ marker: PhantomData,
+ };
+ sig::extract(&mut r.sig, &sig_calc, used_bits, calc_precision - used_bits);
+ // If we extracted less bits above we must adjust our exponent
+ // to compensate for the implicit right shift.
+ r.exp += (S::PRECISION - used_bits) as ExpInt;
+ let loss = Loss::through_truncation(&sig_calc, truncated_bits);
+ return Ok(r.normalize(round, loss));
+ }
+ }
+ }
+}
+
+impl Loss {
+ /// Combine the effect of two lost fractions.
+ fn combine(self, less_significant: Loss) -> Loss {
+ let mut more_significant = self;
+ if less_significant != Loss::ExactlyZero {
+ if more_significant == Loss::ExactlyZero {
+ more_significant = Loss::LessThanHalf;
+ } else if more_significant == Loss::ExactlyHalf {
+ more_significant = Loss::MoreThanHalf;
+ }
+ }
+
+ more_significant
+ }
+
+ /// Returns the fraction lost were a bignum truncated losing the least
+ /// significant `bits` bits.
+ fn through_truncation(limbs: &[Limb], bits: usize) -> Loss {
+ if bits == 0 {
+ return Loss::ExactlyZero;
+ }
+
+ let half_bit = bits - 1;
+ let half_limb = half_bit / LIMB_BITS;
+ let (half_limb, rest) = if half_limb < limbs.len() {
+ (limbs[half_limb], &limbs[..half_limb])
+ } else {
+ (0, limbs)
+ };
+ let half = 1 << (half_bit % LIMB_BITS);
+ let has_half = half_limb & half != 0;
+ let has_rest = half_limb & (half - 1) != 0 || !sig::is_all_zeros(rest);
+
+ match (has_half, has_rest) {
+ (false, false) => Loss::ExactlyZero,
+ (false, true) => Loss::LessThanHalf,
+ (true, false) => Loss::ExactlyHalf,
+ (true, true) => Loss::MoreThanHalf,
+ }
+ }
+}
+
+/// Implementation details of IeeeFloat significands, such as big integer arithmetic.
+/// As a rule of thumb, no functions in this module should dynamically allocate.
+mod sig {
+ use super::{limbs_for_bits, ExpInt, Limb, Loss, LIMB_BITS};
+ use core::cmp::Ordering;
+ use core::iter;
+ use core::mem;
+
+ pub(super) fn is_all_zeros(limbs: &[Limb]) -> bool {
+ limbs.iter().all(|&l| l == 0)
+ }
+
+ /// One, not zero, based LSB. That is, returns 0 for a zeroed significand.
+ pub(super) fn olsb(limbs: &[Limb]) -> usize {
+ limbs
+ .iter()
+ .enumerate()
+ .find(|(_, &limb)| limb != 0)
+ .map_or(0, |(i, limb)| i * LIMB_BITS + limb.trailing_zeros() as usize + 1)
+ }
+
+ /// One, not zero, based MSB. That is, returns 0 for a zeroed significand.
+ pub(super) fn omsb(limbs: &[Limb]) -> usize {
+ limbs
+ .iter()
+ .enumerate()
+ .rfind(|(_, &limb)| limb != 0)
+ .map_or(0, |(i, limb)| (i + 1) * LIMB_BITS - limb.leading_zeros() as usize)
+ }
+
+ /// Comparison (unsigned) of two significands.
+ pub(super) fn cmp(a: &[Limb], b: &[Limb]) -> Ordering {
+ assert_eq!(a.len(), b.len());
+ for (a, b) in a.iter().zip(b).rev() {
+ match a.cmp(b) {
+ Ordering::Equal => {}
+ o => return o,
+ }
+ }
+
+ Ordering::Equal
+ }
+
+ /// Extracts the given bit.
+ pub(super) fn get_bit(limbs: &[Limb], bit: usize) -> bool {
+ limbs[bit / LIMB_BITS] & (1 << (bit % LIMB_BITS)) != 0
+ }
+
+ /// Sets the given bit.
+ pub(super) fn set_bit(limbs: &mut [Limb], bit: usize) {
+ limbs[bit / LIMB_BITS] |= 1 << (bit % LIMB_BITS);
+ }
+
+ /// Clear the given bit.
+ pub(super) fn clear_bit(limbs: &mut [Limb], bit: usize) {
+ limbs[bit / LIMB_BITS] &= !(1 << (bit % LIMB_BITS));
+ }
+
+ /// Shifts `dst` left `bits` bits, subtract `bits` from its exponent.
+ pub(super) fn shift_left(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) {
+ if bits > 0 {
+ // Our exponent should not underflow.
+ *exp = exp.checked_sub(bits as ExpInt).unwrap();
+
+ // Jump is the inter-limb jump; shift is the intra-limb shift.
+ let jump = bits / LIMB_BITS;
+ let shift = bits % LIMB_BITS;
+
+ for i in (0..dst.len()).rev() {
+ let mut limb;
+
+ if i < jump {
+ limb = 0;
+ } else {
+ // dst[i] comes from the two limbs src[i - jump] and, if we have
+ // an intra-limb shift, src[i - jump - 1].
+ limb = dst[i - jump];
+ if shift > 0 {
+ limb <<= shift;
+ if i > jump {
+ limb |= dst[i - jump - 1] >> (LIMB_BITS - shift);
+ }
+ }
+ }
+
+ dst[i] = limb;
+ }
+ }
+ }
+
+ /// Shifts `dst` right `bits` bits noting lost fraction.
+ pub(super) fn shift_right(dst: &mut [Limb], exp: &mut ExpInt, bits: usize) -> Loss {
+ let loss = Loss::through_truncation(dst, bits);
+
+ if bits > 0 {
+ // Our exponent should not overflow.
+ *exp = exp.checked_add(bits as ExpInt).unwrap();
+
+ // Jump is the inter-limb jump; shift is the intra-limb shift.
+ let jump = bits / LIMB_BITS;
+ let shift = bits % LIMB_BITS;
+
+ // Perform the shift. This leaves the most significant `bits` bits
+ // of the result at zero.
+ for i in 0..dst.len() {
+ let mut limb;
+
+ if i + jump >= dst.len() {
+ limb = 0;
+ } else {
+ limb = dst[i + jump];
+ if shift > 0 {
+ limb >>= shift;
+ if i + jump + 1 < dst.len() {
+ limb |= dst[i + jump + 1] << (LIMB_BITS - shift);
+ }
+ }
+ }
+
+ dst[i] = limb;
+ }
+ }
+
+ loss
+ }
+
+ /// Copies the bit vector of width `src_bits` from `src`, starting at bit SRC_LSB,
+ /// to `dst`, such that the bit SRC_LSB becomes the least significant bit of `dst`.
+ /// All high bits above `src_bits` in `dst` are zero-filled.
+ pub(super) fn extract(dst: &mut [Limb], src: &[Limb], src_bits: usize, src_lsb: usize) {
+ if src_bits == 0 {
+ return;
+ }
+
+ let dst_limbs = limbs_for_bits(src_bits);
+ assert!(dst_limbs <= dst.len());
+
+ let src = &src[src_lsb / LIMB_BITS..];
+ dst[..dst_limbs].copy_from_slice(&src[..dst_limbs]);
+
+ let shift = src_lsb % LIMB_BITS;
+ let _: Loss = shift_right(&mut dst[..dst_limbs], &mut 0, shift);
+
+ // We now have (dst_limbs * LIMB_BITS - shift) bits from `src`
+ // in `dst`. If this is less that src_bits, append the rest, else
+ // clear the high bits.
+ let n = dst_limbs * LIMB_BITS - shift;
+ if n < src_bits {
+ let mask = (1 << (src_bits - n)) - 1;
+ dst[dst_limbs - 1] |= (src[dst_limbs] & mask) << (n % LIMB_BITS);
+ } else if n > src_bits && src_bits % LIMB_BITS > 0 {
+ dst[dst_limbs - 1] &= (1 << (src_bits % LIMB_BITS)) - 1;
+ }
+
+ // Clear high limbs.
+ for x in &mut dst[dst_limbs..] {
+ *x = 0;
+ }
+ }
+
+ /// We want the most significant PRECISION bits of `src`. There may not
+ /// be that many; extract what we can.
+ pub(super) fn from_limbs(dst: &mut [Limb], src: &[Limb], precision: usize) -> (Loss, ExpInt) {
+ let omsb = omsb(src);
+
+ if precision <= omsb {
+ extract(dst, src, precision, omsb - precision);
+ (Loss::through_truncation(src, omsb - precision), omsb as ExpInt - 1)
+ } else {
+ extract(dst, src, omsb, 0);
+ (Loss::ExactlyZero, precision as ExpInt - 1)
+ }
+ }
+
+ /// For every consecutive chunk of `bits` bits from `limbs`,
+ /// going from most significant to the least significant bits,
+ /// call `f` to transform those bits and store the result back.
+ pub(super) fn each_chunk<F: FnMut(Limb) -> Limb>(limbs: &mut [Limb], bits: usize, mut f: F) {
+ assert_eq!(LIMB_BITS % bits, 0);
+ for limb in limbs.iter_mut().rev() {
+ let mut r = 0;
+ for i in (0..LIMB_BITS / bits).rev() {
+ r |= f((*limb >> (i * bits)) & ((1 << bits) - 1)) << (i * bits);
+ }
+ *limb = r;
+ }
+ }
+
+ /// Increment in-place, return the carry flag.
+ pub(super) fn increment(dst: &mut [Limb]) -> Limb {
+ for x in dst {
+ *x = x.wrapping_add(1);
+ if *x != 0 {
+ return 0;
+ }
+ }
+
+ 1
+ }
+
+ /// Decrement in-place, return the borrow flag.
+ pub(super) fn decrement(dst: &mut [Limb]) -> Limb {
+ for x in dst {
+ *x = x.wrapping_sub(1);
+ if *x != !0 {
+ return 0;
+ }
+ }
+
+ 1
+ }
+
+ /// `a += b + c` where `c` is zero or one. Returns the carry flag.
+ pub(super) fn add(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb {
+ assert!(c <= 1);
+
+ for (a, &b) in iter::zip(a, b) {
+ let (r, overflow) = a.overflowing_add(b);
+ let (r, overflow2) = r.overflowing_add(c);
+ *a = r;
+ c = (overflow | overflow2) as Limb;
+ }
+
+ c
+ }
+
+ /// `a -= b + c` where `c` is zero or one. Returns the borrow flag.
+ pub(super) fn sub(a: &mut [Limb], b: &[Limb], mut c: Limb) -> Limb {
+ assert!(c <= 1);
+
+ for (a, &b) in iter::zip(a, b) {
+ let (r, overflow) = a.overflowing_sub(b);
+ let (r, overflow2) = r.overflowing_sub(c);
+ *a = r;
+ c = (overflow | overflow2) as Limb;
+ }
+
+ c
+ }
+
+ /// `a += b` or `a -= b`. Does not preserve `b`.
+ pub(super) fn add_or_sub(
+ a_sig: &mut [Limb],
+ a_exp: &mut ExpInt,
+ a_sign: &mut bool,
+ b_sig: &mut [Limb],
+ b_exp: ExpInt,
+ b_sign: bool,
+ ) -> Loss {
+ // Are we bigger exponent-wise than the RHS?
+ let bits = *a_exp - b_exp;
+
+ // Determine if the operation on the absolute values is effectively
+ // an addition or subtraction.
+ // Subtraction is more subtle than one might naively expect.
+ if *a_sign ^ b_sign {
+ let (reverse, loss);
+
+ if bits == 0 {
+ reverse = cmp(a_sig, b_sig) == Ordering::Less;
+ loss = Loss::ExactlyZero;
+ } else if bits > 0 {
+ loss = shift_right(b_sig, &mut 0, (bits - 1) as usize);
+ shift_left(a_sig, a_exp, 1);
+ reverse = false;
+ } else {
+ loss = shift_right(a_sig, a_exp, (-bits - 1) as usize);
+ shift_left(b_sig, &mut 0, 1);
+ reverse = true;
+ }
+
+ let borrow = (loss != Loss::ExactlyZero) as Limb;
+ if reverse {
+ // The code above is intended to ensure that no borrow is necessary.
+ assert_eq!(sub(b_sig, a_sig, borrow), 0);
+ a_sig.copy_from_slice(b_sig);
+ *a_sign = !*a_sign;
+ } else {
+ // The code above is intended to ensure that no borrow is necessary.
+ assert_eq!(sub(a_sig, b_sig, borrow), 0);
+ }
+
+ // Invert the lost fraction - it was on the RHS and subtracted.
+ match loss {
+ Loss::LessThanHalf => Loss::MoreThanHalf,
+ Loss::MoreThanHalf => Loss::LessThanHalf,
+ _ => loss,
+ }
+ } else {
+ let loss = if bits > 0 {
+ shift_right(b_sig, &mut 0, bits as usize)
+ } else {
+ shift_right(a_sig, a_exp, -bits as usize)
+ };
+ // We have a guard bit; generating a carry cannot happen.
+ assert_eq!(add(a_sig, b_sig, 0), 0);
+ loss
+ }
+ }
+
+ /// `[low, high] = a * b`.
+ ///
+ /// This cannot overflow, because
+ ///
+ /// `(n - 1) * (n - 1) + 2 * (n - 1) == (n - 1) * (n + 1)`
+ ///
+ /// which is less than n<sup>2</sup>.
+ pub(super) fn widening_mul(a: Limb, b: Limb) -> [Limb; 2] {
+ let mut wide = [0, 0];
+
+ if a == 0 || b == 0 {
+ return wide;
+ }
+
+ const HALF_BITS: usize = LIMB_BITS / 2;
+
+ let select = |limb, i| (limb >> (i * HALF_BITS)) & ((1 << HALF_BITS) - 1);
+ for i in 0..2 {
+ for j in 0..2 {
+ let mut x = [select(a, i) * select(b, j), 0];
+ shift_left(&mut x, &mut 0, (i + j) * HALF_BITS);
+ assert_eq!(add(&mut wide, &x, 0), 0);
+ }
+ }
+
+ wide
+ }
+
+ /// `dst = a * b` (for normal `a` and `b`). Returns the lost fraction.
+ pub(super) fn mul<'a>(
+ dst: &mut [Limb],
+ exp: &mut ExpInt,
+ mut a: &'a [Limb],
+ mut b: &'a [Limb],
+ precision: usize,
+ ) -> Loss {
+ // Put the narrower number on the `a` for less loops below.
+ if a.len() > b.len() {
+ mem::swap(&mut a, &mut b);
+ }
+
+ for x in &mut dst[..b.len()] {
+ *x = 0;
+ }
+
+ for i in 0..a.len() {
+ let mut carry = 0;
+ for j in 0..b.len() {
+ let [low, mut high] = widening_mul(a[i], b[j]);
+
+ // Now add carry.
+ let (low, overflow) = low.overflowing_add(carry);
+ high += overflow as Limb;
+
+ // And now `dst[i + j]`, and store the new low part there.
+ let (low, overflow) = low.overflowing_add(dst[i + j]);
+ high += overflow as Limb;
+
+ dst[i + j] = low;
+ carry = high;
+ }
+ dst[i + b.len()] = carry;
+ }
+
+ // Assume the operands involved in the multiplication are single-precision
+ // FP, and the two multiplicants are:
+ // a = a23 . a22 ... a0 * 2^e1
+ // b = b23 . b22 ... b0 * 2^e2
+ // the result of multiplication is:
+ // dst = c48 c47 c46 . c45 ... c0 * 2^(e1+e2)
+ // Note that there are three significant bits at the left-hand side of the
+ // radix point: two for the multiplication, and an overflow bit for the
+ // addition (that will always be zero at this point). Move the radix point
+ // toward left by two bits, and adjust exponent accordingly.
+ *exp += 2;
+
+ // Convert the result having "2 * precision" significant-bits back to the one
+ // having "precision" significant-bits. First, move the radix point from
+ // poision "2*precision - 1" to "precision - 1". The exponent need to be
+ // adjusted by "2*precision - 1" - "precision - 1" = "precision".
+ *exp -= precision as ExpInt + 1;
+
+ // In case MSB resides at the left-hand side of radix point, shift the
+ // mantissa right by some amount to make sure the MSB reside right before
+ // the radix point (i.e., "MSB . rest-significant-bits").
+ //
+ // Note that the result is not normalized when "omsb < precision". So, the
+ // caller needs to call IeeeFloat::normalize() if normalized value is
+ // expected.
+ let omsb = omsb(dst);
+ if omsb <= precision { Loss::ExactlyZero } else { shift_right(dst, exp, omsb - precision) }
+ }
+
+ /// `quotient = dividend / divisor`. Returns the lost fraction.
+ /// Does not preserve `dividend` or `divisor`.
+ pub(super) fn div(
+ quotient: &mut [Limb],
+ exp: &mut ExpInt,
+ dividend: &mut [Limb],
+ divisor: &mut [Limb],
+ precision: usize,
+ ) -> Loss {
+ // Normalize the divisor.
+ let bits = precision - omsb(divisor);
+ shift_left(divisor, &mut 0, bits);
+ *exp += bits as ExpInt;
+
+ // Normalize the dividend.
+ let bits = precision - omsb(dividend);
+ shift_left(dividend, exp, bits);
+
+ // Division by 1.
+ let olsb_divisor = olsb(divisor);
+ if olsb_divisor == precision {
+ quotient.copy_from_slice(dividend);
+ return Loss::ExactlyZero;
+ }
+
+ // Ensure the dividend >= divisor initially for the loop below.
+ // Incidentally, this means that the division loop below is
+ // guaranteed to set the integer bit to one.
+ if cmp(dividend, divisor) == Ordering::Less {
+ shift_left(dividend, exp, 1);
+ assert_ne!(cmp(dividend, divisor), Ordering::Less)
+ }
+
+ // Helper for figuring out the lost fraction.
+ let lost_fraction = |dividend: &[Limb], divisor: &[Limb]| match cmp(dividend, divisor) {
+ Ordering::Greater => Loss::MoreThanHalf,
+ Ordering::Equal => Loss::ExactlyHalf,
+ Ordering::Less => {
+ if is_all_zeros(dividend) {
+ Loss::ExactlyZero
+ } else {
+ Loss::LessThanHalf
+ }
+ }
+ };
+
+ // Try to perform a (much faster) short division for small divisors.
+ let divisor_bits = precision - (olsb_divisor - 1);
+ macro_rules! try_short_div {
+ ($W:ty, $H:ty, $half:expr) => {
+ if divisor_bits * 2 <= $half {
+ // Extract the small divisor.
+ let _: Loss = shift_right(divisor, &mut 0, olsb_divisor - 1);
+ let divisor = divisor[0] as $H as $W;
+
+ // Shift the dividend to produce a quotient with the unit bit set.
+ let top_limb = *dividend.last().unwrap();
+ let mut rem = (top_limb >> (LIMB_BITS - (divisor_bits - 1))) as $H;
+ shift_left(dividend, &mut 0, divisor_bits - 1);
+
+ // Apply short division in place on $H (of $half bits) chunks.
+ each_chunk(dividend, $half, |chunk| {
+ let chunk = chunk as $H;
+ let combined = ((rem as $W) << $half) | (chunk as $W);
+ rem = (combined % divisor) as $H;
+ (combined / divisor) as $H as Limb
+ });
+ quotient.copy_from_slice(dividend);
+
+ return lost_fraction(&[(rem as Limb) << 1], &[divisor as Limb]);
+ }
+ };
+ }
+
+ try_short_div!(u32, u16, 16);
+ try_short_div!(u64, u32, 32);
+ try_short_div!(u128, u64, 64);
+
+ // Zero the quotient before setting bits in it.
+ for x in &mut quotient[..limbs_for_bits(precision)] {
+ *x = 0;
+ }
+
+ // Long division.
+ for bit in (0..precision).rev() {
+ if cmp(dividend, divisor) != Ordering::Less {
+ sub(dividend, divisor, 0);
+ set_bit(quotient, bit);
+ }
+ shift_left(dividend, &mut 0, 1);
+ }
+
+ lost_fraction(dividend, divisor)
+ }
+}
diff --git a/compiler/rustc_apfloat/src/lib.rs b/compiler/rustc_apfloat/src/lib.rs
new file mode 100644
index 000000000..cfc3d5b15
--- /dev/null
+++ b/compiler/rustc_apfloat/src/lib.rs
@@ -0,0 +1,693 @@
+//! Port of LLVM's APFloat software floating-point implementation from the
+//! following C++ sources (please update commit hash when backporting):
+//! <https://github.com/llvm-mirror/llvm/tree/23efab2bbd424ed13495a420ad8641cb2c6c28f9>
+//!
+//! * `include/llvm/ADT/APFloat.h` -> `Float` and `FloatConvert` traits
+//! * `lib/Support/APFloat.cpp` -> `ieee` and `ppc` modules
+//! * `unittests/ADT/APFloatTest.cpp` -> `tests` directory
+//!
+//! The port contains no unsafe code, global state, or side-effects in general,
+//! and the only allocations are in the conversion to/from decimal strings.
+//!
+//! Most of the API and the testcases are intact in some form or another,
+//! with some ergonomic changes, such as idiomatic short names, returning
+//! new values instead of mutating the receiver, and having separate method
+//! variants that take a non-default rounding mode (with the suffix `_r`).
+//! Comments have been preserved where possible, only slightly adapted.
+//!
+//! Instead of keeping a pointer to a configuration struct and inspecting it
+//! dynamically on every operation, types (e.g., `ieee::Double`), traits
+//! (e.g., `ieee::Semantics`) and associated constants are employed for
+//! increased type safety and performance.
+//!
+//! On-heap bigints are replaced everywhere (except in decimal conversion),
+//! with short arrays of `type Limb = u128` elements (instead of `u64`),
+//! This allows fitting the largest supported significands in one integer
+//! (`ieee::Quad` and `ppc::Fallback` use slightly less than 128 bits).
+//! All of the functions in the `ieee::sig` module operate on slices.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![no_std]
+#![forbid(unsafe_code)]
+
+#[macro_use]
+extern crate alloc;
+
+use core::cmp::Ordering;
+use core::fmt;
+use core::ops::{Add, Div, Mul, Neg, Rem, Sub};
+use core::ops::{AddAssign, DivAssign, MulAssign, RemAssign, SubAssign};
+use core::str::FromStr;
+
+bitflags::bitflags! {
+ /// IEEE-754R 7: Default exception handling.
+ ///
+ /// UNDERFLOW or OVERFLOW are always returned or-ed with INEXACT.
+ #[must_use]
+ pub struct Status: u8 {
+ const OK = 0x00;
+ const INVALID_OP = 0x01;
+ const DIV_BY_ZERO = 0x02;
+ const OVERFLOW = 0x04;
+ const UNDERFLOW = 0x08;
+ const INEXACT = 0x10;
+ }
+}
+
+#[must_use]
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
+pub struct StatusAnd<T> {
+ pub status: Status,
+ pub value: T,
+}
+
+impl Status {
+ pub fn and<T>(self, value: T) -> StatusAnd<T> {
+ StatusAnd { status: self, value }
+ }
+}
+
+impl<T> StatusAnd<T> {
+ pub fn map<F: FnOnce(T) -> U, U>(self, f: F) -> StatusAnd<U> {
+ StatusAnd { status: self.status, value: f(self.value) }
+ }
+}
+
+#[macro_export]
+macro_rules! unpack {
+ ($status:ident|=, $e:expr) => {
+ match $e {
+ $crate::StatusAnd { status, value } => {
+ $status |= status;
+ value
+ }
+ }
+ };
+ ($status:ident=, $e:expr) => {
+ match $e {
+ $crate::StatusAnd { status, value } => {
+ $status = status;
+ value
+ }
+ }
+ };
+}
+
+/// Category of internally-represented number.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Category {
+ Infinity,
+ NaN,
+ Normal,
+ Zero,
+}
+
+/// IEEE-754R 4.3: Rounding-direction attributes.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Round {
+ NearestTiesToEven,
+ TowardPositive,
+ TowardNegative,
+ TowardZero,
+ NearestTiesToAway,
+}
+
+impl Neg for Round {
+ type Output = Round;
+ fn neg(self) -> Round {
+ match self {
+ Round::TowardPositive => Round::TowardNegative,
+ Round::TowardNegative => Round::TowardPositive,
+ Round::NearestTiesToEven | Round::TowardZero | Round::NearestTiesToAway => self,
+ }
+ }
+}
+
+/// A signed type to represent a floating point number's unbiased exponent.
+pub type ExpInt = i16;
+
+// \c ilogb error results.
+pub const IEK_INF: ExpInt = ExpInt::MAX;
+pub const IEK_NAN: ExpInt = ExpInt::MIN;
+pub const IEK_ZERO: ExpInt = ExpInt::MIN + 1;
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct ParseError(pub &'static str);
+
+/// A self-contained host- and target-independent arbitrary-precision
+/// floating-point software implementation.
+///
+/// `apfloat` uses significand bignum integer arithmetic as provided by functions
+/// in the `ieee::sig`.
+///
+/// Written for clarity rather than speed, in particular with a view to use in
+/// the front-end of a cross compiler so that target arithmetic can be correctly
+/// performed on the host. Performance should nonetheless be reasonable,
+/// particularly for its intended use. It may be useful as a base
+/// implementation for a run-time library during development of a faster
+/// target-specific one.
+///
+/// All 5 rounding modes in the IEEE-754R draft are handled correctly for all
+/// implemented operations. Currently implemented operations are add, subtract,
+/// multiply, divide, fused-multiply-add, conversion-to-float,
+/// conversion-to-integer and conversion-from-integer. New rounding modes
+/// (e.g., away from zero) can be added with three or four lines of code.
+///
+/// Four formats are built-in: IEEE single precision, double precision,
+/// quadruple precision, and x87 80-bit extended double (when operating with
+/// full extended precision). Adding a new format that obeys IEEE semantics
+/// only requires adding two lines of code: a declaration and definition of the
+/// format.
+///
+/// All operations return the status of that operation as an exception bit-mask,
+/// so multiple operations can be done consecutively with their results or-ed
+/// together. The returned status can be useful for compiler diagnostics; e.g.,
+/// inexact, underflow and overflow can be easily diagnosed on constant folding,
+/// and compiler optimizers can determine what exceptions would be raised by
+/// folding operations and optimize, or perhaps not optimize, accordingly.
+///
+/// At present, underflow tininess is detected after rounding; it should be
+/// straight forward to add support for the before-rounding case too.
+///
+/// The library reads hexadecimal floating point numbers as per C99, and
+/// correctly rounds if necessary according to the specified rounding mode.
+/// Syntax is required to have been validated by the caller.
+///
+/// It also reads decimal floating point numbers and correctly rounds according
+/// to the specified rounding mode.
+///
+/// Non-zero finite numbers are represented internally as a sign bit, a 16-bit
+/// signed exponent, and the significand as an array of integer limbs. After
+/// normalization of a number of precision P the exponent is within the range of
+/// the format, and if the number is not denormal the P-th bit of the
+/// significand is set as an explicit integer bit. For denormals the most
+/// significant bit is shifted right so that the exponent is maintained at the
+/// format's minimum, so that the smallest denormal has just the least
+/// significant bit of the significand set. The sign of zeros and infinities
+/// is significant; the exponent and significand of such numbers is not stored,
+/// but has a known implicit (deterministic) value: 0 for the significands, 0
+/// for zero exponent, all 1 bits for infinity exponent. For NaNs the sign and
+/// significand are deterministic, although not really meaningful, and preserved
+/// in non-conversion operations. The exponent is implicitly all 1 bits.
+///
+/// `apfloat` does not provide any exception handling beyond default exception
+/// handling. We represent Signaling NaNs via IEEE-754R 2008 6.2.1 should clause
+/// by encoding Signaling NaNs with the first bit of its trailing significand
+/// as 0.
+///
+/// Future work
+/// ===========
+///
+/// Some features that may or may not be worth adding:
+///
+/// Optional ability to detect underflow tininess before rounding.
+///
+/// New formats: x87 in single and double precision mode (IEEE apart from
+/// extended exponent range) (hard).
+///
+/// New operations: sqrt, nexttoward.
+///
+pub trait Float:
+ Copy
+ + Default
+ + FromStr<Err = ParseError>
+ + PartialOrd
+ + fmt::Display
+ + Neg<Output = Self>
+ + AddAssign
+ + SubAssign
+ + MulAssign
+ + DivAssign
+ + RemAssign
+ + Add<Output = StatusAnd<Self>>
+ + Sub<Output = StatusAnd<Self>>
+ + Mul<Output = StatusAnd<Self>>
+ + Div<Output = StatusAnd<Self>>
+ + Rem<Output = StatusAnd<Self>>
+{
+ /// Total number of bits in the in-memory format.
+ const BITS: usize;
+
+ /// Number of bits in the significand. This includes the integer bit.
+ const PRECISION: usize;
+
+ /// The largest E such that 2<sup>E</sup> is representable; this matches the
+ /// definition of IEEE 754.
+ const MAX_EXP: ExpInt;
+
+ /// The smallest E such that 2<sup>E</sup> is a normalized number; this
+ /// matches the definition of IEEE 754.
+ const MIN_EXP: ExpInt;
+
+ /// Positive Zero.
+ const ZERO: Self;
+
+ /// Positive Infinity.
+ const INFINITY: Self;
+
+ /// NaN (Not a Number).
+ // FIXME(eddyb) provide a default when qnan becomes const fn.
+ const NAN: Self;
+
+ /// Factory for QNaN values.
+ // FIXME(eddyb) should be const fn.
+ fn qnan(payload: Option<u128>) -> Self;
+
+ /// Factory for SNaN values.
+ // FIXME(eddyb) should be const fn.
+ fn snan(payload: Option<u128>) -> Self;
+
+ /// Largest finite number.
+ // FIXME(eddyb) should be const (but FloatPair::largest is nontrivial).
+ fn largest() -> Self;
+
+ /// Smallest (by magnitude) finite number.
+ /// Might be denormalized, which implies a relative loss of precision.
+ const SMALLEST: Self;
+
+ /// Smallest (by magnitude) normalized finite number.
+ // FIXME(eddyb) should be const (but FloatPair::smallest_normalized is nontrivial).
+ fn smallest_normalized() -> Self;
+
+ // Arithmetic
+
+ fn add_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
+ fn sub_r(self, rhs: Self, round: Round) -> StatusAnd<Self> {
+ self.add_r(-rhs, round)
+ }
+ fn mul_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
+ fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self>;
+ fn mul_add(self, multiplicand: Self, addend: Self) -> StatusAnd<Self> {
+ self.mul_add_r(multiplicand, addend, Round::NearestTiesToEven)
+ }
+ fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self>;
+ /// IEEE remainder.
+ // This is not currently correct in all cases.
+ fn ieee_rem(self, rhs: Self) -> StatusAnd<Self> {
+ let mut v = self;
+
+ let status;
+ v = unpack!(status=, v / rhs);
+ if status == Status::DIV_BY_ZERO {
+ return status.and(self);
+ }
+
+ assert!(Self::PRECISION < 128);
+
+ let status;
+ let x = unpack!(status=, v.to_i128_r(128, Round::NearestTiesToEven, &mut false));
+ if status == Status::INVALID_OP {
+ return status.and(self);
+ }
+
+ let status;
+ let mut v = unpack!(status=, Self::from_i128(x));
+ assert_eq!(status, Status::OK); // should always work
+
+ let status;
+ v = unpack!(status=, v * rhs);
+ assert_eq!(status - Status::INEXACT, Status::OK); // should not overflow or underflow
+
+ let status;
+ v = unpack!(status=, self - v);
+ assert_eq!(status - Status::INEXACT, Status::OK); // likewise
+
+ if v.is_zero() {
+ status.and(v.copy_sign(self)) // IEEE754 requires this
+ } else {
+ status.and(v)
+ }
+ }
+ /// C fmod, or llvm frem.
+ fn c_fmod(self, rhs: Self) -> StatusAnd<Self>;
+ fn round_to_integral(self, round: Round) -> StatusAnd<Self>;
+
+ /// IEEE-754R 2008 5.3.1: nextUp.
+ fn next_up(self) -> StatusAnd<Self>;
+
+ /// IEEE-754R 2008 5.3.1: nextDown.
+ ///
+ /// *NOTE* since nextDown(x) = -nextUp(-x), we only implement nextUp with
+ /// appropriate sign switching before/after the computation.
+ fn next_down(self) -> StatusAnd<Self> {
+ (-self).next_up().map(|r| -r)
+ }
+
+ fn abs(self) -> Self {
+ if self.is_negative() { -self } else { self }
+ }
+ fn copy_sign(self, rhs: Self) -> Self {
+ if self.is_negative() != rhs.is_negative() { -self } else { self }
+ }
+
+ // Conversions
+ fn from_bits(input: u128) -> Self;
+ fn from_i128_r(input: i128, round: Round) -> StatusAnd<Self> {
+ if input < 0 {
+ Self::from_u128_r(input.wrapping_neg() as u128, -round).map(|r| -r)
+ } else {
+ Self::from_u128_r(input as u128, round)
+ }
+ }
+ fn from_i128(input: i128) -> StatusAnd<Self> {
+ Self::from_i128_r(input, Round::NearestTiesToEven)
+ }
+ fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self>;
+ fn from_u128(input: u128) -> StatusAnd<Self> {
+ Self::from_u128_r(input, Round::NearestTiesToEven)
+ }
+ fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError>;
+ fn to_bits(self) -> u128;
+
+ /// Converts a floating point number to an integer according to the
+ /// rounding mode. In case of an invalid operation exception,
+ /// deterministic values are returned, namely zero for NaNs and the
+ /// minimal or maximal value respectively for underflow or overflow.
+ /// If the rounded value is in range but the floating point number is
+ /// not the exact integer, the C standard doesn't require an inexact
+ /// exception to be raised. IEEE-854 does require it so we do that.
+ ///
+ /// Note that for conversions to integer type the C standard requires
+ /// round-to-zero to always be used.
+ ///
+ /// The *is_exact output tells whether the result is exact, in the sense
+ /// that converting it back to the original floating point type produces
+ /// the original value. This is almost equivalent to `result == Status::OK`,
+ /// except for negative zeroes.
+ fn to_i128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<i128> {
+ let status;
+ if self.is_negative() {
+ if self.is_zero() {
+ // Negative zero can't be represented as an int.
+ *is_exact = false;
+ }
+ let r = unpack!(status=, (-self).to_u128_r(width, -round, is_exact));
+
+ // Check for values that don't fit in the signed integer.
+ if r > (1 << (width - 1)) {
+ // Return the most negative integer for the given width.
+ *is_exact = false;
+ Status::INVALID_OP.and(-1 << (width - 1))
+ } else {
+ status.and(r.wrapping_neg() as i128)
+ }
+ } else {
+ // Positive case is simpler, can pretend it's a smaller unsigned
+ // integer, and `to_u128` will take care of all the edge cases.
+ self.to_u128_r(width - 1, round, is_exact).map(|r| r as i128)
+ }
+ }
+ fn to_i128(self, width: usize) -> StatusAnd<i128> {
+ self.to_i128_r(width, Round::TowardZero, &mut true)
+ }
+ fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128>;
+ fn to_u128(self, width: usize) -> StatusAnd<u128> {
+ self.to_u128_r(width, Round::TowardZero, &mut true)
+ }
+
+ fn cmp_abs_normal(self, rhs: Self) -> Ordering;
+
+ /// Bitwise comparison for equality (QNaNs compare equal, 0!=-0).
+ fn bitwise_eq(self, rhs: Self) -> bool;
+
+ // IEEE-754R 5.7.2 General operations.
+
+ /// Implements IEEE minNum semantics. Returns the smaller of the 2 arguments if
+ /// both are not NaN. If either argument is a NaN, returns the other argument.
+ fn min(self, other: Self) -> Self {
+ if self.is_nan() {
+ other
+ } else if other.is_nan() {
+ self
+ } else if other.partial_cmp(&self) == Some(Ordering::Less) {
+ other
+ } else {
+ self
+ }
+ }
+
+ /// Implements IEEE maxNum semantics. Returns the larger of the 2 arguments if
+ /// both are not NaN. If either argument is a NaN, returns the other argument.
+ fn max(self, other: Self) -> Self {
+ if self.is_nan() {
+ other
+ } else if other.is_nan() {
+ self
+ } else if self.partial_cmp(&other) == Some(Ordering::Less) {
+ other
+ } else {
+ self
+ }
+ }
+
+ /// IEEE-754R isSignMinus: Returns whether the current value is
+ /// negative.
+ ///
+ /// This applies to zeros and NaNs as well.
+ fn is_negative(self) -> bool;
+
+ /// IEEE-754R isNormal: Returns whether the current value is normal.
+ ///
+ /// This implies that the current value of the float is not zero, subnormal,
+ /// infinite, or NaN following the definition of normality from IEEE-754R.
+ fn is_normal(self) -> bool {
+ !self.is_denormal() && self.is_finite_non_zero()
+ }
+
+ /// Returns `true` if the current value is zero, subnormal, or
+ /// normal.
+ ///
+ /// This means that the value is not infinite or NaN.
+ fn is_finite(self) -> bool {
+ !self.is_nan() && !self.is_infinite()
+ }
+
+ /// Returns `true` if the float is plus or minus zero.
+ fn is_zero(self) -> bool {
+ self.category() == Category::Zero
+ }
+
+ /// IEEE-754R isSubnormal(): Returns whether the float is a
+ /// denormal.
+ fn is_denormal(self) -> bool;
+
+ /// IEEE-754R isInfinite(): Returns whether the float is infinity.
+ fn is_infinite(self) -> bool {
+ self.category() == Category::Infinity
+ }
+
+ /// Returns `true` if the float is a quiet or signaling NaN.
+ fn is_nan(self) -> bool {
+ self.category() == Category::NaN
+ }
+
+ /// Returns `true` if the float is a signaling NaN.
+ fn is_signaling(self) -> bool;
+
+ // Simple Queries
+
+ fn category(self) -> Category;
+ fn is_non_zero(self) -> bool {
+ !self.is_zero()
+ }
+ fn is_finite_non_zero(self) -> bool {
+ self.is_finite() && !self.is_zero()
+ }
+ fn is_pos_zero(self) -> bool {
+ self.is_zero() && !self.is_negative()
+ }
+ fn is_neg_zero(self) -> bool {
+ self.is_zero() && self.is_negative()
+ }
+
+ /// Returns `true` if the number has the smallest possible non-zero
+ /// magnitude in the current semantics.
+ fn is_smallest(self) -> bool {
+ Self::SMALLEST.copy_sign(self).bitwise_eq(self)
+ }
+
+ /// Returns `true` if the number has the largest possible finite
+ /// magnitude in the current semantics.
+ fn is_largest(self) -> bool {
+ Self::largest().copy_sign(self).bitwise_eq(self)
+ }
+
+ /// Returns `true` if the number is an exact integer.
+ fn is_integer(self) -> bool {
+ // This could be made more efficient; I'm going for obviously correct.
+ if !self.is_finite() {
+ return false;
+ }
+ self.round_to_integral(Round::TowardZero).value.bitwise_eq(self)
+ }
+
+ /// If this value has an exact multiplicative inverse, return it.
+ fn get_exact_inverse(self) -> Option<Self>;
+
+ /// Returns the exponent of the internal representation of the Float.
+ ///
+ /// Because the radix of Float is 2, this is equivalent to floor(log2(x)).
+ /// For special Float values, this returns special error codes:
+ ///
+ /// NaN -> \c IEK_NAN
+ /// 0 -> \c IEK_ZERO
+ /// Inf -> \c IEK_INF
+ ///
+ fn ilogb(self) -> ExpInt;
+
+ /// Returns: self * 2<sup>exp</sup> for integral exponents.
+ /// Equivalent to C standard library function `ldexp`.
+ fn scalbn_r(self, exp: ExpInt, round: Round) -> Self;
+ fn scalbn(self, exp: ExpInt) -> Self {
+ self.scalbn_r(exp, Round::NearestTiesToEven)
+ }
+
+ /// Equivalent to C standard library function with the same name.
+ ///
+ /// While the C standard says exp is an unspecified value for infinity and nan,
+ /// this returns INT_MAX for infinities, and INT_MIN for NaNs (see `ilogb`).
+ fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self;
+ fn frexp(self, exp: &mut ExpInt) -> Self {
+ self.frexp_r(exp, Round::NearestTiesToEven)
+ }
+}
+
+pub trait FloatConvert<T: Float>: Float {
+ /// Converts a value of one floating point type to another.
+ /// The return value corresponds to the IEEE754 exceptions. *loses_info
+ /// records whether the transformation lost information, i.e., whether
+ /// converting the result back to the original type will produce the
+ /// original value (this is almost the same as return `value == Status::OK`,
+ /// but there are edge cases where this is not so).
+ fn convert_r(self, round: Round, loses_info: &mut bool) -> StatusAnd<T>;
+ fn convert(self, loses_info: &mut bool) -> StatusAnd<T> {
+ self.convert_r(Round::NearestTiesToEven, loses_info)
+ }
+}
+
+macro_rules! float_common_impls {
+ ($ty:ident<$t:tt>) => {
+ impl<$t> Default for $ty<$t>
+ where
+ Self: Float,
+ {
+ fn default() -> Self {
+ Self::ZERO
+ }
+ }
+
+ impl<$t> ::core::str::FromStr for $ty<$t>
+ where
+ Self: Float,
+ {
+ type Err = ParseError;
+ fn from_str(s: &str) -> Result<Self, ParseError> {
+ Self::from_str_r(s, Round::NearestTiesToEven).map(|x| x.value)
+ }
+ }
+
+ // Rounding ties to the nearest even, by default.
+
+ impl<$t> ::core::ops::Add for $ty<$t>
+ where
+ Self: Float,
+ {
+ type Output = StatusAnd<Self>;
+ fn add(self, rhs: Self) -> StatusAnd<Self> {
+ self.add_r(rhs, Round::NearestTiesToEven)
+ }
+ }
+
+ impl<$t> ::core::ops::Sub for $ty<$t>
+ where
+ Self: Float,
+ {
+ type Output = StatusAnd<Self>;
+ fn sub(self, rhs: Self) -> StatusAnd<Self> {
+ self.sub_r(rhs, Round::NearestTiesToEven)
+ }
+ }
+
+ impl<$t> ::core::ops::Mul for $ty<$t>
+ where
+ Self: Float,
+ {
+ type Output = StatusAnd<Self>;
+ fn mul(self, rhs: Self) -> StatusAnd<Self> {
+ self.mul_r(rhs, Round::NearestTiesToEven)
+ }
+ }
+
+ impl<$t> ::core::ops::Div for $ty<$t>
+ where
+ Self: Float,
+ {
+ type Output = StatusAnd<Self>;
+ fn div(self, rhs: Self) -> StatusAnd<Self> {
+ self.div_r(rhs, Round::NearestTiesToEven)
+ }
+ }
+
+ impl<$t> ::core::ops::Rem for $ty<$t>
+ where
+ Self: Float,
+ {
+ type Output = StatusAnd<Self>;
+ fn rem(self, rhs: Self) -> StatusAnd<Self> {
+ self.c_fmod(rhs)
+ }
+ }
+
+ impl<$t> ::core::ops::AddAssign for $ty<$t>
+ where
+ Self: Float,
+ {
+ fn add_assign(&mut self, rhs: Self) {
+ *self = (*self + rhs).value;
+ }
+ }
+
+ impl<$t> ::core::ops::SubAssign for $ty<$t>
+ where
+ Self: Float,
+ {
+ fn sub_assign(&mut self, rhs: Self) {
+ *self = (*self - rhs).value;
+ }
+ }
+
+ impl<$t> ::core::ops::MulAssign for $ty<$t>
+ where
+ Self: Float,
+ {
+ fn mul_assign(&mut self, rhs: Self) {
+ *self = (*self * rhs).value;
+ }
+ }
+
+ impl<$t> ::core::ops::DivAssign for $ty<$t>
+ where
+ Self: Float,
+ {
+ fn div_assign(&mut self, rhs: Self) {
+ *self = (*self / rhs).value;
+ }
+ }
+
+ impl<$t> ::core::ops::RemAssign for $ty<$t>
+ where
+ Self: Float,
+ {
+ fn rem_assign(&mut self, rhs: Self) {
+ *self = (*self % rhs).value;
+ }
+ }
+ };
+}
+
+pub mod ieee;
+pub mod ppc;
diff --git a/compiler/rustc_apfloat/src/ppc.rs b/compiler/rustc_apfloat/src/ppc.rs
new file mode 100644
index 000000000..65a0f6664
--- /dev/null
+++ b/compiler/rustc_apfloat/src/ppc.rs
@@ -0,0 +1,434 @@
+use crate::ieee;
+use crate::{Category, ExpInt, Float, FloatConvert, ParseError, Round, Status, StatusAnd};
+
+use core::cmp::Ordering;
+use core::fmt;
+use core::ops::Neg;
+
+#[must_use]
+#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)]
+pub struct DoubleFloat<F>(F, F);
+pub type DoubleDouble = DoubleFloat<ieee::Double>;
+
+// These are legacy semantics for the Fallback, inaccurate implementation of
+// IBM double-double, if the accurate DoubleDouble doesn't handle the
+// operation. It's equivalent to having an IEEE number with consecutive 106
+// bits of mantissa and 11 bits of exponent.
+//
+// It's not equivalent to IBM double-double. For example, a legit IBM
+// double-double, 1 + epsilon:
+//
+// 1 + epsilon = 1 + (1 >> 1076)
+//
+// is not representable by a consecutive 106 bits of mantissa.
+//
+// Currently, these semantics are used in the following way:
+//
+// DoubleDouble -> (Double, Double) ->
+// DoubleDouble's Fallback -> IEEE operations
+//
+// FIXME: Implement all operations in DoubleDouble, and delete these
+// semantics.
+// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds.
+pub struct FallbackS<F>(#[allow(unused)] F);
+type Fallback<F> = ieee::IeeeFloat<FallbackS<F>>;
+impl<F: Float> ieee::Semantics for FallbackS<F> {
+ // Forbid any conversion to/from bits.
+ const BITS: usize = 0;
+ const PRECISION: usize = F::PRECISION * 2;
+ const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt;
+ const MIN_EXP: ExpInt = F::MIN_EXP as ExpInt + F::PRECISION as ExpInt;
+}
+
+// Convert number to F. To avoid spurious underflows, we re-
+// normalize against the F exponent range first, and only *then*
+// truncate the mantissa. The result of that second conversion
+// may be inexact, but should never underflow.
+// FIXME(eddyb) This shouldn't need to be `pub`, it's only used in bounds.
+pub struct FallbackExtendedS<F>(#[allow(unused)] F);
+type FallbackExtended<F> = ieee::IeeeFloat<FallbackExtendedS<F>>;
+impl<F: Float> ieee::Semantics for FallbackExtendedS<F> {
+ // Forbid any conversion to/from bits.
+ const BITS: usize = 0;
+ const PRECISION: usize = Fallback::<F>::PRECISION;
+ const MAX_EXP: ExpInt = F::MAX_EXP as ExpInt;
+}
+
+impl<F: Float> From<Fallback<F>> for DoubleFloat<F>
+where
+ F: FloatConvert<FallbackExtended<F>>,
+ FallbackExtended<F>: FloatConvert<F>,
+{
+ fn from(x: Fallback<F>) -> Self {
+ let mut status;
+ let mut loses_info = false;
+
+ let extended: FallbackExtended<F> = unpack!(status=, x.convert(&mut loses_info));
+ assert_eq!((status, loses_info), (Status::OK, false));
+
+ let a = unpack!(status=, extended.convert(&mut loses_info));
+ assert_eq!(status - Status::INEXACT, Status::OK);
+
+ // If conversion was exact or resulted in a special case, we're done;
+ // just set the second double to zero. Otherwise, re-convert back to
+ // the extended format and compute the difference. This now should
+ // convert exactly to double.
+ let b = if a.is_finite_non_zero() && loses_info {
+ let u: FallbackExtended<F> = unpack!(status=, a.convert(&mut loses_info));
+ assert_eq!((status, loses_info), (Status::OK, false));
+ let v = unpack!(status=, extended - u);
+ assert_eq!(status, Status::OK);
+ let v = unpack!(status=, v.convert(&mut loses_info));
+ assert_eq!((status, loses_info), (Status::OK, false));
+ v
+ } else {
+ F::ZERO
+ };
+
+ DoubleFloat(a, b)
+ }
+}
+
+impl<F: FloatConvert<Self>> From<DoubleFloat<F>> for Fallback<F> {
+ fn from(DoubleFloat(a, b): DoubleFloat<F>) -> Self {
+ let mut status;
+ let mut loses_info = false;
+
+ // Get the first F and convert to our format.
+ let a = unpack!(status=, a.convert(&mut loses_info));
+ assert_eq!((status, loses_info), (Status::OK, false));
+
+ // Unless we have a special case, add in second F.
+ if a.is_finite_non_zero() {
+ let b = unpack!(status=, b.convert(&mut loses_info));
+ assert_eq!((status, loses_info), (Status::OK, false));
+
+ (a + b).value
+ } else {
+ a
+ }
+ }
+}
+
+float_common_impls!(DoubleFloat<F>);
+
+impl<F: Float> Neg for DoubleFloat<F> {
+ type Output = Self;
+ fn neg(self) -> Self {
+ if self.1.is_finite_non_zero() {
+ DoubleFloat(-self.0, -self.1)
+ } else {
+ DoubleFloat(-self.0, self.1)
+ }
+ }
+}
+
+impl<F: FloatConvert<Fallback<F>>> fmt::Display for DoubleFloat<F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&Fallback::from(*self), f)
+ }
+}
+
+impl<F: FloatConvert<Fallback<F>>> Float for DoubleFloat<F>
+where
+ Self: From<Fallback<F>>,
+{
+ const BITS: usize = F::BITS * 2;
+ const PRECISION: usize = Fallback::<F>::PRECISION;
+ const MAX_EXP: ExpInt = Fallback::<F>::MAX_EXP;
+ const MIN_EXP: ExpInt = Fallback::<F>::MIN_EXP;
+
+ const ZERO: Self = DoubleFloat(F::ZERO, F::ZERO);
+
+ const INFINITY: Self = DoubleFloat(F::INFINITY, F::ZERO);
+
+ // FIXME(eddyb) remove when qnan becomes const fn.
+ const NAN: Self = DoubleFloat(F::NAN, F::ZERO);
+
+ fn qnan(payload: Option<u128>) -> Self {
+ DoubleFloat(F::qnan(payload), F::ZERO)
+ }
+
+ fn snan(payload: Option<u128>) -> Self {
+ DoubleFloat(F::snan(payload), F::ZERO)
+ }
+
+ fn largest() -> Self {
+ let status;
+ let mut r = DoubleFloat(F::largest(), F::largest());
+ r.1 = r.1.scalbn(-(F::PRECISION as ExpInt + 1));
+ r.1 = unpack!(status=, r.1.next_down());
+ assert_eq!(status, Status::OK);
+ r
+ }
+
+ const SMALLEST: Self = DoubleFloat(F::SMALLEST, F::ZERO);
+
+ fn smallest_normalized() -> Self {
+ DoubleFloat(F::smallest_normalized().scalbn(F::PRECISION as ExpInt), F::ZERO)
+ }
+
+ // Implement addition, subtraction, multiplication and division based on:
+ // "Software for Doubled-Precision Floating-Point Computations",
+ // by Seppo Linnainmaa, ACM TOMS vol 7 no 3, September 1981, pages 272-283.
+
+ fn add_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
+ match (self.category(), rhs.category()) {
+ (Category::Infinity, Category::Infinity) => {
+ if self.is_negative() != rhs.is_negative() {
+ Status::INVALID_OP.and(Self::NAN.copy_sign(self))
+ } else {
+ Status::OK.and(self)
+ }
+ }
+
+ (_, Category::Zero) | (Category::NaN, _) | (Category::Infinity, Category::Normal) => {
+ Status::OK.and(self)
+ }
+
+ (Category::Zero, _) | (_, Category::NaN | Category::Infinity) => Status::OK.and(rhs),
+
+ (Category::Normal, Category::Normal) => {
+ let mut status = Status::OK;
+ let (a, aa, c, cc) = (self.0, self.1, rhs.0, rhs.1);
+ let mut z = a;
+ z = unpack!(status|=, z.add_r(c, round));
+ if !z.is_finite() {
+ if !z.is_infinite() {
+ return status.and(DoubleFloat(z, F::ZERO));
+ }
+ status = Status::OK;
+ let a_cmp_c = a.cmp_abs_normal(c);
+ z = cc;
+ z = unpack!(status|=, z.add_r(aa, round));
+ if a_cmp_c == Ordering::Greater {
+ // z = cc + aa + c + a;
+ z = unpack!(status|=, z.add_r(c, round));
+ z = unpack!(status|=, z.add_r(a, round));
+ } else {
+ // z = cc + aa + a + c;
+ z = unpack!(status|=, z.add_r(a, round));
+ z = unpack!(status|=, z.add_r(c, round));
+ }
+ if !z.is_finite() {
+ return status.and(DoubleFloat(z, F::ZERO));
+ }
+ self.0 = z;
+ let mut zz = aa;
+ zz = unpack!(status|=, zz.add_r(cc, round));
+ if a_cmp_c == Ordering::Greater {
+ // self.1 = a - z + c + zz;
+ self.1 = a;
+ self.1 = unpack!(status|=, self.1.sub_r(z, round));
+ self.1 = unpack!(status|=, self.1.add_r(c, round));
+ self.1 = unpack!(status|=, self.1.add_r(zz, round));
+ } else {
+ // self.1 = c - z + a + zz;
+ self.1 = c;
+ self.1 = unpack!(status|=, self.1.sub_r(z, round));
+ self.1 = unpack!(status|=, self.1.add_r(a, round));
+ self.1 = unpack!(status|=, self.1.add_r(zz, round));
+ }
+ } else {
+ // q = a - z;
+ let mut q = a;
+ q = unpack!(status|=, q.sub_r(z, round));
+
+ // zz = q + c + (a - (q + z)) + aa + cc;
+ // Compute a - (q + z) as -((q + z) - a) to avoid temporary copies.
+ let mut zz = q;
+ zz = unpack!(status|=, zz.add_r(c, round));
+ q = unpack!(status|=, q.add_r(z, round));
+ q = unpack!(status|=, q.sub_r(a, round));
+ q = -q;
+ zz = unpack!(status|=, zz.add_r(q, round));
+ zz = unpack!(status|=, zz.add_r(aa, round));
+ zz = unpack!(status|=, zz.add_r(cc, round));
+ if zz.is_zero() && !zz.is_negative() {
+ return Status::OK.and(DoubleFloat(z, F::ZERO));
+ }
+ self.0 = z;
+ self.0 = unpack!(status|=, self.0.add_r(zz, round));
+ if !self.0.is_finite() {
+ self.1 = F::ZERO;
+ return status.and(self);
+ }
+ self.1 = z;
+ self.1 = unpack!(status|=, self.1.sub_r(self.0, round));
+ self.1 = unpack!(status|=, self.1.add_r(zz, round));
+ }
+ status.and(self)
+ }
+ }
+ }
+
+ fn mul_r(mut self, rhs: Self, round: Round) -> StatusAnd<Self> {
+ // Interesting observation: For special categories, finding the lowest
+ // common ancestor of the following layered graph gives the correct
+ // return category:
+ //
+ // NaN
+ // / \
+ // Zero Inf
+ // \ /
+ // Normal
+ //
+ // e.g., NaN * NaN = NaN
+ // Zero * Inf = NaN
+ // Normal * Zero = Zero
+ // Normal * Inf = Inf
+ match (self.category(), rhs.category()) {
+ (Category::NaN, _) => Status::OK.and(self),
+
+ (_, Category::NaN) => Status::OK.and(rhs),
+
+ (Category::Zero, Category::Infinity) | (Category::Infinity, Category::Zero) => {
+ Status::OK.and(Self::NAN)
+ }
+
+ (Category::Zero | Category::Infinity, _) => Status::OK.and(self),
+
+ (_, Category::Zero | Category::Infinity) => Status::OK.and(rhs),
+
+ (Category::Normal, Category::Normal) => {
+ let mut status = Status::OK;
+ let (a, b, c, d) = (self.0, self.1, rhs.0, rhs.1);
+ // t = a * c
+ let mut t = a;
+ t = unpack!(status|=, t.mul_r(c, round));
+ if !t.is_finite_non_zero() {
+ return status.and(DoubleFloat(t, F::ZERO));
+ }
+
+ // tau = fmsub(a, c, t), that is -fmadd(-a, c, t).
+ let mut tau = a;
+ tau = unpack!(status|=, tau.mul_add_r(c, -t, round));
+ // v = a * d
+ let mut v = a;
+ v = unpack!(status|=, v.mul_r(d, round));
+ // w = b * c
+ let mut w = b;
+ w = unpack!(status|=, w.mul_r(c, round));
+ v = unpack!(status|=, v.add_r(w, round));
+ // tau += v + w
+ tau = unpack!(status|=, tau.add_r(v, round));
+ // u = t + tau
+ let mut u = t;
+ u = unpack!(status|=, u.add_r(tau, round));
+
+ self.0 = u;
+ if !u.is_finite() {
+ self.1 = F::ZERO;
+ } else {
+ // self.1 = (t - u) + tau
+ t = unpack!(status|=, t.sub_r(u, round));
+ t = unpack!(status|=, t.add_r(tau, round));
+ self.1 = t;
+ }
+ status.and(self)
+ }
+ }
+ }
+
+ fn mul_add_r(self, multiplicand: Self, addend: Self, round: Round) -> StatusAnd<Self> {
+ Fallback::from(self)
+ .mul_add_r(Fallback::from(multiplicand), Fallback::from(addend), round)
+ .map(Self::from)
+ }
+
+ fn div_r(self, rhs: Self, round: Round) -> StatusAnd<Self> {
+ Fallback::from(self).div_r(Fallback::from(rhs), round).map(Self::from)
+ }
+
+ fn c_fmod(self, rhs: Self) -> StatusAnd<Self> {
+ Fallback::from(self).c_fmod(Fallback::from(rhs)).map(Self::from)
+ }
+
+ fn round_to_integral(self, round: Round) -> StatusAnd<Self> {
+ Fallback::from(self).round_to_integral(round).map(Self::from)
+ }
+
+ fn next_up(self) -> StatusAnd<Self> {
+ Fallback::from(self).next_up().map(Self::from)
+ }
+
+ fn from_bits(input: u128) -> Self {
+ let (a, b) = (input, input >> F::BITS);
+ DoubleFloat(F::from_bits(a & ((1 << F::BITS) - 1)), F::from_bits(b & ((1 << F::BITS) - 1)))
+ }
+
+ fn from_u128_r(input: u128, round: Round) -> StatusAnd<Self> {
+ Fallback::from_u128_r(input, round).map(Self::from)
+ }
+
+ fn from_str_r(s: &str, round: Round) -> Result<StatusAnd<Self>, ParseError> {
+ Fallback::from_str_r(s, round).map(|r| r.map(Self::from))
+ }
+
+ fn to_bits(self) -> u128 {
+ self.0.to_bits() | (self.1.to_bits() << F::BITS)
+ }
+
+ fn to_u128_r(self, width: usize, round: Round, is_exact: &mut bool) -> StatusAnd<u128> {
+ Fallback::from(self).to_u128_r(width, round, is_exact)
+ }
+
+ fn cmp_abs_normal(self, rhs: Self) -> Ordering {
+ self.0.cmp_abs_normal(rhs.0).then_with(|| {
+ let result = self.1.cmp_abs_normal(rhs.1);
+ if result != Ordering::Equal {
+ let against = self.0.is_negative() ^ self.1.is_negative();
+ let rhs_against = rhs.0.is_negative() ^ rhs.1.is_negative();
+ (!against)
+ .cmp(&!rhs_against)
+ .then_with(|| if against { result.reverse() } else { result })
+ } else {
+ result
+ }
+ })
+ }
+
+ fn bitwise_eq(self, rhs: Self) -> bool {
+ self.0.bitwise_eq(rhs.0) && self.1.bitwise_eq(rhs.1)
+ }
+
+ fn is_negative(self) -> bool {
+ self.0.is_negative()
+ }
+
+ fn is_denormal(self) -> bool {
+ self.category() == Category::Normal
+ && (self.0.is_denormal() || self.0.is_denormal() ||
+ // (double)(Hi + Lo) == Hi defines a normal number.
+ !(self.0 + self.1).value.bitwise_eq(self.0))
+ }
+
+ fn is_signaling(self) -> bool {
+ self.0.is_signaling()
+ }
+
+ fn category(self) -> Category {
+ self.0.category()
+ }
+
+ fn get_exact_inverse(self) -> Option<Self> {
+ Fallback::from(self).get_exact_inverse().map(Self::from)
+ }
+
+ fn ilogb(self) -> ExpInt {
+ self.0.ilogb()
+ }
+
+ fn scalbn_r(self, exp: ExpInt, round: Round) -> Self {
+ DoubleFloat(self.0.scalbn_r(exp, round), self.1.scalbn_r(exp, round))
+ }
+
+ fn frexp_r(self, exp: &mut ExpInt, round: Round) -> Self {
+ let a = self.0.frexp_r(exp, round);
+ let mut b = self.1;
+ if self.category() == Category::Normal {
+ b = b.scalbn_r(-*exp, round);
+ }
+ DoubleFloat(a, b)
+ }
+}
diff --git a/compiler/rustc_apfloat/tests/ieee.rs b/compiler/rustc_apfloat/tests/ieee.rs
new file mode 100644
index 000000000..f8fac0c23
--- /dev/null
+++ b/compiler/rustc_apfloat/tests/ieee.rs
@@ -0,0 +1,3301 @@
+// ignore-tidy-filelength
+
+use rustc_apfloat::ieee::{Double, Half, Quad, Single, X87DoubleExtended};
+use rustc_apfloat::unpack;
+use rustc_apfloat::{Category, ExpInt, IEK_INF, IEK_NAN, IEK_ZERO};
+use rustc_apfloat::{Float, FloatConvert, ParseError, Round, Status};
+
+trait SingleExt {
+ fn from_f32(input: f32) -> Self;
+ fn to_f32(self) -> f32;
+}
+
+impl SingleExt for Single {
+ fn from_f32(input: f32) -> Self {
+ Self::from_bits(input.to_bits() as u128)
+ }
+
+ fn to_f32(self) -> f32 {
+ f32::from_bits(self.to_bits() as u32)
+ }
+}
+
+trait DoubleExt {
+ fn from_f64(input: f64) -> Self;
+ fn to_f64(self) -> f64;
+}
+
+impl DoubleExt for Double {
+ fn from_f64(input: f64) -> Self {
+ Self::from_bits(input.to_bits() as u128)
+ }
+
+ fn to_f64(self) -> f64 {
+ f64::from_bits(self.to_bits() as u64)
+ }
+}
+
+#[test]
+fn is_signaling() {
+ // We test qNaN, -qNaN, +sNaN, -sNaN with and without payloads.
+ let payload = 4;
+ assert!(!Single::qnan(None).is_signaling());
+ assert!(!(-Single::qnan(None)).is_signaling());
+ assert!(!Single::qnan(Some(payload)).is_signaling());
+ assert!(!(-Single::qnan(Some(payload))).is_signaling());
+ assert!(Single::snan(None).is_signaling());
+ assert!((-Single::snan(None)).is_signaling());
+ assert!(Single::snan(Some(payload)).is_signaling());
+ assert!((-Single::snan(Some(payload))).is_signaling());
+}
+
+#[test]
+fn next() {
+ // 1. Test Special Cases Values.
+ //
+ // Test all special values for nextUp and nextDown perscribed by IEEE-754R
+ // 2008. These are:
+ // 1. +inf
+ // 2. -inf
+ // 3. largest
+ // 4. -largest
+ // 5. smallest
+ // 6. -smallest
+ // 7. qNaN
+ // 8. sNaN
+ // 9. +0
+ // 10. -0
+
+ let mut status;
+
+ // nextUp(+inf) = +inf.
+ let test = unpack!(status=, Quad::INFINITY.next_up());
+ let expected = Quad::INFINITY;
+ assert_eq!(status, Status::OK);
+ assert!(test.is_infinite());
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(+inf) = -nextUp(-inf) = -(-largest) = largest
+ let test = unpack!(status=, Quad::INFINITY.next_down());
+ let expected = Quad::largest();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(-inf) = -largest
+ let test = unpack!(status=, (-Quad::INFINITY).next_up());
+ let expected = -Quad::largest();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-inf) = -nextUp(+inf) = -(+inf) = -inf.
+ let test = unpack!(status=, (-Quad::INFINITY).next_down());
+ let expected = -Quad::INFINITY;
+ assert_eq!(status, Status::OK);
+ assert!(test.is_infinite() && test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(largest) = +inf
+ let test = unpack!(status=, Quad::largest().next_up());
+ let expected = Quad::INFINITY;
+ assert_eq!(status, Status::OK);
+ assert!(test.is_infinite() && !test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(largest) = -nextUp(-largest)
+ // = -(-largest + inc)
+ // = largest - inc.
+ let test = unpack!(status=, Quad::largest().next_down());
+ let expected = "0x1.fffffffffffffffffffffffffffep+16383".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_infinite() && !test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(-largest) = -largest + inc.
+ let test = unpack!(status=, (-Quad::largest()).next_up());
+ let expected = "-0x1.fffffffffffffffffffffffffffep+16383".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-largest) = -nextUp(largest) = -(inf) = -inf.
+ let test = unpack!(status=, (-Quad::largest()).next_down());
+ let expected = -Quad::INFINITY;
+ assert_eq!(status, Status::OK);
+ assert!(test.is_infinite() && test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(smallest) = smallest + inc.
+ let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "0x0.0000000000000000000000000002p-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(smallest) = -nextUp(-smallest) = -(-0) = +0.
+ let test = unpack!(status=, "0x0.0000000000000000000000000001p-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = Quad::ZERO;
+ assert_eq!(status, Status::OK);
+ assert!(test.is_pos_zero());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(-smallest) = -0.
+ let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = -Quad::ZERO;
+ assert_eq!(status, Status::OK);
+ assert!(test.is_neg_zero());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-smallest) = -nextUp(smallest) = -smallest - inc.
+ let test = unpack!(status=, "-0x0.0000000000000000000000000001p-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "-0x0.0000000000000000000000000002p-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(qNaN) = qNaN
+ let test = unpack!(status=, Quad::qnan(None).next_up());
+ let expected = Quad::qnan(None);
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(qNaN) = qNaN
+ let test = unpack!(status=, Quad::qnan(None).next_down());
+ let expected = Quad::qnan(None);
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(sNaN) = qNaN
+ let test = unpack!(status=, Quad::snan(None).next_up());
+ let expected = Quad::qnan(None);
+ assert_eq!(status, Status::INVALID_OP);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(sNaN) = qNaN
+ let test = unpack!(status=, Quad::snan(None).next_down());
+ let expected = Quad::qnan(None);
+ assert_eq!(status, Status::INVALID_OP);
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(+0) = +smallest
+ let test = unpack!(status=, Quad::ZERO.next_up());
+ let expected = Quad::SMALLEST;
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(+0) = -nextUp(-0) = -smallest
+ let test = unpack!(status=, Quad::ZERO.next_down());
+ let expected = -Quad::SMALLEST;
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(-0) = +smallest
+ let test = unpack!(status=, (-Quad::ZERO).next_up());
+ let expected = Quad::SMALLEST;
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-0) = -nextUp(0) = -smallest
+ let test = unpack!(status=, (-Quad::ZERO).next_down());
+ let expected = -Quad::SMALLEST;
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // 2. Binade Boundary Tests.
+
+ // 2a. Test denormal <-> normal binade boundaries.
+ // * nextUp(+Largest Denormal) -> +Smallest Normal.
+ // * nextDown(-Largest Denormal) -> -Smallest Normal.
+ // * nextUp(-Smallest Normal) -> -Largest Denormal.
+ // * nextDown(+Smallest Normal) -> +Largest Denormal.
+
+ // nextUp(+Largest Denormal) -> +Smallest Normal.
+ let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "0x1.0000000000000000000000000000p-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-Largest Denormal) -> -Smallest Normal.
+ let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "-0x1.0000000000000000000000000000p-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(-Smallest Normal) -> -Largest Denormal.
+ let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "-0x0.ffffffffffffffffffffffffffffp-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(+Smallest Normal) -> +Largest Denormal.
+ let test = unpack!(status=, "+0x1.0000000000000000000000000000p-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "+0x0.ffffffffffffffffffffffffffffp-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ // 2b. Test normal <-> normal binade boundaries.
+ // * nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1.
+ // * nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1.
+ // * nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary.
+ // * nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary.
+
+ // nextUp(-Normal Binade Boundary) -> -Normal Binade Boundary + 1.
+ let test = unpack!(status=, "-0x1p+1".parse::<Quad>().unwrap().next_up());
+ let expected = "-0x1.ffffffffffffffffffffffffffffp+0".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(+Normal Binade Boundary) -> +Normal Binade Boundary - 1.
+ let test = unpack!(status=, "0x1p+1".parse::<Quad>().unwrap().next_down());
+ let expected = "0x1.ffffffffffffffffffffffffffffp+0".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(+Normal Binade Boundary - 1) -> +Normal Binade Boundary.
+ let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp+0"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "0x1p+1".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-Normal Binade Boundary + 1) -> -Normal Binade Boundary.
+ let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp+0"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "-0x1p+1".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // 2c. Test using next at binade boundaries with a direction away from the
+ // binade boundary. Away from denormal <-> normal boundaries.
+ //
+ // This is to make sure that even though we are at a binade boundary, since
+ // we are rounding away, we do not trigger the binade boundary code. Thus we
+ // test:
+ // * nextUp(-Largest Denormal) -> -Largest Denormal + inc.
+ // * nextDown(+Largest Denormal) -> +Largest Denormal - inc.
+ // * nextUp(+Smallest Normal) -> +Smallest Normal + inc.
+ // * nextDown(-Smallest Normal) -> -Smallest Normal - inc.
+
+ // nextUp(-Largest Denormal) -> -Largest Denormal + inc.
+ let test = unpack!(status=, "-0x0.ffffffffffffffffffffffffffffp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "-0x0.fffffffffffffffffffffffffffep-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_denormal());
+ assert!(test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(+Largest Denormal) -> +Largest Denormal - inc.
+ let test = unpack!(status=, "0x0.ffffffffffffffffffffffffffffp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "0x0.fffffffffffffffffffffffffffep-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_denormal());
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(+Smallest Normal) -> +Smallest Normal + inc.
+ let test = unpack!(status=, "0x1.0000000000000000000000000000p-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "0x1.0000000000000000000000000001p-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_denormal());
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-Smallest Normal) -> -Smallest Normal - inc.
+ let test = unpack!(status=, "-0x1.0000000000000000000000000000p-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "-0x1.0000000000000000000000000001p-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_denormal());
+ assert!(test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // 2d. Test values which cause our exponent to go to min exponent. This
+ // is to ensure that guards in the code to check for min exponent
+ // trigger properly.
+ // * nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382
+ // * nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) ->
+ // -0x1p-16381
+ // * nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16382
+ // * nextDown(0x1p-16382) -> 0x1.ffffffffffffffffffffffffffffp-16382
+
+ // nextUp(-0x1p-16381) -> -0x1.ffffffffffffffffffffffffffffp-16382
+ let test = unpack!(status=, "-0x1p-16381".parse::<Quad>().unwrap().next_up());
+ let expected = "-0x1.ffffffffffffffffffffffffffffp-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-0x1.ffffffffffffffffffffffffffffp-16382) ->
+ // -0x1p-16381
+ let test = unpack!(status=, "-0x1.ffffffffffffffffffffffffffffp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "-0x1p-16381".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(0x1.ffffffffffffffffffffffffffffp-16382) -> 0x1p-16381
+ let test = unpack!(status=, "0x1.ffffffffffffffffffffffffffffp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "0x1p-16381".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(0x1p-16381) -> 0x1.ffffffffffffffffffffffffffffp-16382
+ let test = unpack!(status=, "0x1p-16381".parse::<Quad>().unwrap().next_down());
+ let expected = "0x1.ffffffffffffffffffffffffffffp-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.bitwise_eq(expected));
+
+ // 3. Now we test both denormal/normal computation which will not cause us
+ // to go across binade boundaries. Specifically we test:
+ // * nextUp(+Denormal) -> +Denormal.
+ // * nextDown(+Denormal) -> +Denormal.
+ // * nextUp(-Denormal) -> -Denormal.
+ // * nextDown(-Denormal) -> -Denormal.
+ // * nextUp(+Normal) -> +Normal.
+ // * nextDown(+Normal) -> +Normal.
+ // * nextUp(-Normal) -> -Normal.
+ // * nextDown(-Normal) -> -Normal.
+
+ // nextUp(+Denormal) -> +Denormal.
+ let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "0x0.ffffffffffffffffffffffff000dp-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_denormal());
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(+Denormal) -> +Denormal.
+ let test = unpack!(status=, "0x0.ffffffffffffffffffffffff000cp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "0x0.ffffffffffffffffffffffff000bp-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_denormal());
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(-Denormal) -> -Denormal.
+ let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "-0x0.ffffffffffffffffffffffff000bp-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_denormal());
+ assert!(test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-Denormal) -> -Denormal
+ let test = unpack!(status=, "-0x0.ffffffffffffffffffffffff000cp-16382"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "-0x0.ffffffffffffffffffffffff000dp-16382".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(test.is_denormal());
+ assert!(test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(+Normal) -> +Normal.
+ let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "0x1.ffffffffffffffffffffffff000dp-16000".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_denormal());
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(+Normal) -> +Normal.
+ let test = unpack!(status=, "0x1.ffffffffffffffffffffffff000cp-16000"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "0x1.ffffffffffffffffffffffff000bp-16000".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_denormal());
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextUp(-Normal) -> -Normal.
+ let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000"
+ .parse::<Quad>()
+ .unwrap()
+ .next_up());
+ let expected = "-0x1.ffffffffffffffffffffffff000bp-16000".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_denormal());
+ assert!(test.is_negative());
+ assert!(test.bitwise_eq(expected));
+
+ // nextDown(-Normal) -> -Normal.
+ let test = unpack!(status=, "-0x1.ffffffffffffffffffffffff000cp-16000"
+ .parse::<Quad>()
+ .unwrap()
+ .next_down());
+ let expected = "-0x1.ffffffffffffffffffffffff000dp-16000".parse::<Quad>().unwrap();
+ assert_eq!(status, Status::OK);
+ assert!(!test.is_denormal());
+ assert!(test.is_negative());
+ assert!(test.bitwise_eq(expected));
+}
+
+#[test]
+fn fma() {
+ {
+ let mut f1 = Single::from_f32(14.5);
+ let f2 = Single::from_f32(-14.5);
+ let f3 = Single::from_f32(225.0);
+ f1 = f1.mul_add(f2, f3).value;
+ assert_eq!(14.75, f1.to_f32());
+ }
+
+ {
+ let val2 = Single::from_f32(2.0);
+ let mut f1 = Single::from_f32(1.17549435e-38);
+ let mut f2 = Single::from_f32(1.17549435e-38);
+ f1 /= val2;
+ f2 /= val2;
+ let f3 = Single::from_f32(12.0);
+ f1 = f1.mul_add(f2, f3).value;
+ assert_eq!(12.0, f1.to_f32());
+ }
+
+ // Test for correct zero sign when answer is exactly zero.
+ // fma(1.0, -1.0, 1.0) -> +ve 0.
+ {
+ let mut f1 = Double::from_f64(1.0);
+ let f2 = Double::from_f64(-1.0);
+ let f3 = Double::from_f64(1.0);
+ f1 = f1.mul_add(f2, f3).value;
+ assert!(!f1.is_negative() && f1.is_zero());
+ }
+
+ // Test for correct zero sign when answer is exactly zero and rounding towards
+ // negative.
+ // fma(1.0, -1.0, 1.0) -> +ve 0.
+ {
+ let mut f1 = Double::from_f64(1.0);
+ let f2 = Double::from_f64(-1.0);
+ let f3 = Double::from_f64(1.0);
+ f1 = f1.mul_add_r(f2, f3, Round::TowardNegative).value;
+ assert!(f1.is_negative() && f1.is_zero());
+ }
+
+ // Test for correct (in this case -ve) sign when adding like signed zeros.
+ // Test fma(0.0, -0.0, -0.0) -> -ve 0.
+ {
+ let mut f1 = Double::from_f64(0.0);
+ let f2 = Double::from_f64(-0.0);
+ let f3 = Double::from_f64(-0.0);
+ f1 = f1.mul_add(f2, f3).value;
+ assert!(f1.is_negative() && f1.is_zero());
+ }
+
+ // Test -ve sign preservation when small negative results underflow.
+ {
+ let mut f1 = "-0x1p-1074".parse::<Double>().unwrap();
+ let f2 = "+0x1p-1074".parse::<Double>().unwrap();
+ let f3 = Double::from_f64(0.0);
+ f1 = f1.mul_add(f2, f3).value;
+ assert!(f1.is_negative() && f1.is_zero());
+ }
+
+ // Test x87 extended precision case from https://llvm.org/PR20728.
+ {
+ let mut m1 = X87DoubleExtended::from_u128(1).value;
+ let m2 = X87DoubleExtended::from_u128(1).value;
+ let a = X87DoubleExtended::from_u128(3).value;
+
+ let mut loses_info = false;
+ m1 = m1.mul_add(m2, a).value;
+ let r: Single = m1.convert(&mut loses_info).value;
+ assert!(!loses_info);
+ assert_eq!(4.0, r.to_f32());
+ }
+}
+
+#[test]
+fn issue_69532() {
+ let f = Double::from_bits(0x7FF0_0000_0000_0001u64 as u128);
+ let mut loses_info = false;
+ let sta = f.convert(&mut loses_info);
+ let r: Single = sta.value;
+ assert!(loses_info);
+ assert!(r.is_nan());
+ assert_eq!(sta.status, Status::INVALID_OP);
+}
+
+#[test]
+fn min_num() {
+ let f1 = Double::from_f64(1.0);
+ let f2 = Double::from_f64(2.0);
+ let nan = Double::NAN;
+
+ assert_eq!(1.0, f1.min(f2).to_f64());
+ assert_eq!(1.0, f2.min(f1).to_f64());
+ assert_eq!(1.0, f1.min(nan).to_f64());
+ assert_eq!(1.0, nan.min(f1).to_f64());
+}
+
+#[test]
+fn max_num() {
+ let f1 = Double::from_f64(1.0);
+ let f2 = Double::from_f64(2.0);
+ let nan = Double::NAN;
+
+ assert_eq!(2.0, f1.max(f2).to_f64());
+ assert_eq!(2.0, f2.max(f1).to_f64());
+ assert_eq!(1.0, f1.max(nan).to_f64());
+ assert_eq!(1.0, nan.max(f1).to_f64());
+}
+
+#[test]
+fn denormal() {
+ // Test single precision
+ {
+ assert!(!Single::from_f32(0.0).is_denormal());
+
+ let mut t = "1.17549435082228750797e-38".parse::<Single>().unwrap();
+ assert!(!t.is_denormal());
+
+ let val2 = Single::from_f32(2.0e0);
+ t /= val2;
+ assert!(t.is_denormal());
+ }
+
+ // Test double precision
+ {
+ assert!(!Double::from_f64(0.0).is_denormal());
+
+ let mut t = "2.22507385850720138309e-308".parse::<Double>().unwrap();
+ assert!(!t.is_denormal());
+
+ let val2 = Double::from_f64(2.0e0);
+ t /= val2;
+ assert!(t.is_denormal());
+ }
+
+ // Test Intel double-ext
+ {
+ assert!(!X87DoubleExtended::from_u128(0).value.is_denormal());
+
+ let mut t = "3.36210314311209350626e-4932".parse::<X87DoubleExtended>().unwrap();
+ assert!(!t.is_denormal());
+
+ t /= X87DoubleExtended::from_u128(2).value;
+ assert!(t.is_denormal());
+ }
+
+ // Test quadruple precision
+ {
+ assert!(!Quad::from_u128(0).value.is_denormal());
+
+ let mut t = "3.36210314311209350626267781732175260e-4932".parse::<Quad>().unwrap();
+ assert!(!t.is_denormal());
+
+ t /= Quad::from_u128(2).value;
+ assert!(t.is_denormal());
+ }
+}
+
+#[test]
+fn decimal_strings_without_null_terminators() {
+ // Make sure that we can parse strings without null terminators.
+ // rdar://14323230.
+ let val = "0.00"[..3].parse::<Double>().unwrap();
+ assert_eq!(val.to_f64(), 0.0);
+ let val = "0.01"[..3].parse::<Double>().unwrap();
+ assert_eq!(val.to_f64(), 0.0);
+ let val = "0.09"[..3].parse::<Double>().unwrap();
+ assert_eq!(val.to_f64(), 0.0);
+ let val = "0.095"[..4].parse::<Double>().unwrap();
+ assert_eq!(val.to_f64(), 0.09);
+ let val = "0.00e+3"[..7].parse::<Double>().unwrap();
+ assert_eq!(val.to_f64(), 0.00);
+ let val = "0e+3"[..4].parse::<Double>().unwrap();
+ assert_eq!(val.to_f64(), 0.00);
+}
+
+#[test]
+fn from_zero_decimal_string() {
+ assert_eq!(0.0, "0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0.".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0.".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0.".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, ".0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+.0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-.0".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0.0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0.0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0.0".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "00000.".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+00000.".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-00000.".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, ".00000".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+.00000".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-.00000".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0000.00000".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0000.00000".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0000.00000".parse::<Double>().unwrap().to_f64());
+}
+
+#[test]
+fn from_zero_decimal_single_exponent_string() {
+ assert_eq!(0.0, "0e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0e1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0e+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0e-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0e-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0e-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0.e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0.e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0.e1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0.e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0.e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0.e+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0.e-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0.e-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0.e-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, ".0e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+.0e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-.0e1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, ".0e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+.0e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-.0e+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, ".0e-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+.0e-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-.0e-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0.0e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0.0e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0.0e1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0.0e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0.0e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0.0e+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0.0e-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0.0e-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0.0e-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "000.0000e1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+000.0000e+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-000.0000e+1".parse::<Double>().unwrap().to_f64());
+}
+
+#[test]
+fn from_zero_decimal_large_exponent_string() {
+ assert_eq!(0.0, "0e1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0e1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0e1234".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0e+1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0e+1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0e+1234".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0e-1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0e-1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0e-1234".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "000.0000e1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "000.0000e-1234".parse::<Double>().unwrap().to_f64());
+}
+
+#[test]
+fn from_zero_hexadecimal_string() {
+ assert_eq!(0.0, "0x0p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0p1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x0p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0p+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x0p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0p-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x0.p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0.p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0.p1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x0.p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0.p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0.p+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x0.p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0.p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0.p-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x.0p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x.0p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x.0p1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x.0p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x.0p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x.0p+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x.0p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x.0p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x.0p-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x0.0p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0.0p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0.0p1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x0.0p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0.0p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0.0p+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x0.0p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "+0x0.0p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0.0p-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.0, "0x00000.p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "0x0000.00000p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "0x.00000p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "0x0.p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "0x0p1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.0, "-0x0p1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "0x00000.p1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "0x0000.00000p1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "0x.00000p1234".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.0, "0x0.p1234".parse::<Double>().unwrap().to_f64());
+}
+
+#[test]
+fn from_decimal_string() {
+ assert_eq!(1.0, "1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.0, "2.".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.5, ".5".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1.0, "1.0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-2.0, "-2".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-4.0, "-4.".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.5, "-.5".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-1.5, "-1.5".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1.25e12, "1.25e12".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1.25e+12, "1.25e+12".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1.25e-12, "1.25e-12".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1024.0, "1024.".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1024.05, "1024.05000".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.05, ".05000".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.0, "2.".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.0e2, "2.e2".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.0e+2, "2.e+2".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.0e-2, "2.e-2".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.05e2, "002.05000e2".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.05e+2, "002.05000e+2".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.05e-2, "002.05000e-2".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.05e12, "002.05000e12".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.05e+12, "002.05000e+12".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.05e-12, "002.05000e-12".parse::<Double>().unwrap().to_f64());
+
+ // These are "carefully selected" to overflow the fast log-base
+ // calculations in the implementation.
+ assert!("99e99999".parse::<Double>().unwrap().is_infinite());
+ assert!("-99e99999".parse::<Double>().unwrap().is_infinite());
+ assert!("1e-99999".parse::<Double>().unwrap().is_pos_zero());
+ assert!("-1e-99999".parse::<Double>().unwrap().is_neg_zero());
+
+ assert_eq!(2.71828, "2.71828".parse::<Double>().unwrap().to_f64());
+}
+
+#[test]
+fn from_hexadecimal_string() {
+ assert_eq!(1.0, "0x1p0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1.0, "+0x1p0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-1.0, "-0x1p0".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(1.0, "0x1p+0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1.0, "+0x1p+0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-1.0, "-0x1p+0".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(1.0, "0x1p-0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1.0, "+0x1p-0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-1.0, "-0x1p-0".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(2.0, "0x1p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.0, "+0x1p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-2.0, "-0x1p1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(2.0, "0x1p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2.0, "+0x1p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-2.0, "-0x1p+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.5, "0x1p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.5, "+0x1p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.5, "-0x1p-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(3.0, "0x1.8p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(3.0, "+0x1.8p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-3.0, "-0x1.8p1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(3.0, "0x1.8p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(3.0, "+0x1.8p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-3.0, "-0x1.8p+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.75, "0x1.8p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.75, "+0x1.8p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.75, "-0x1.8p-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(8192.0, "0x1000.000p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(8192.0, "+0x1000.000p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-8192.0, "-0x1000.000p1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(8192.0, "0x1000.000p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(8192.0, "+0x1000.000p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-8192.0, "-0x1000.000p+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(2048.0, "0x1000.000p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2048.0, "+0x1000.000p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-2048.0, "-0x1000.000p-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(8192.0, "0x1000p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(8192.0, "+0x1000p1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-8192.0, "-0x1000p1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(8192.0, "0x1000p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(8192.0, "+0x1000p+1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-8192.0, "-0x1000p+1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(2048.0, "0x1000p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(2048.0, "+0x1000p-1".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-2048.0, "-0x1000p-1".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(16384.0, "0x10p10".parse::<Double>().unwrap().to_f64());
+ assert_eq!(16384.0, "+0x10p10".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-16384.0, "-0x10p10".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(16384.0, "0x10p+10".parse::<Double>().unwrap().to_f64());
+ assert_eq!(16384.0, "+0x10p+10".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-16384.0, "-0x10p+10".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(0.015625, "0x10p-10".parse::<Double>().unwrap().to_f64());
+ assert_eq!(0.015625, "+0x10p-10".parse::<Double>().unwrap().to_f64());
+ assert_eq!(-0.015625, "-0x10p-10".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(1.0625, "0x1.1p0".parse::<Double>().unwrap().to_f64());
+ assert_eq!(1.0, "0x1p0".parse::<Double>().unwrap().to_f64());
+
+ assert_eq!(
+ "0x1p-150".parse::<Double>().unwrap().to_f64(),
+ "+0x800000000000000001.p-221".parse::<Double>().unwrap().to_f64()
+ );
+ assert_eq!(
+ 2251799813685248.5,
+ "0x80000000000004000000.010p-28".parse::<Double>().unwrap().to_f64()
+ );
+}
+
+#[test]
+fn to_string() {
+ let to_string = |d: f64, precision: usize, width: usize| {
+ let x = Double::from_f64(d);
+ if precision == 0 {
+ format!("{:1$}", x, width)
+ } else {
+ format!("{:2$.1$}", x, precision, width)
+ }
+ };
+ assert_eq!("10", to_string(10.0, 6, 3));
+ assert_eq!("1.0E+1", to_string(10.0, 6, 0));
+ assert_eq!("10100", to_string(1.01E+4, 5, 2));
+ assert_eq!("1.01E+4", to_string(1.01E+4, 4, 2));
+ assert_eq!("1.01E+4", to_string(1.01E+4, 5, 1));
+ assert_eq!("0.0101", to_string(1.01E-2, 5, 2));
+ assert_eq!("0.0101", to_string(1.01E-2, 4, 2));
+ assert_eq!("1.01E-2", to_string(1.01E-2, 5, 1));
+ assert_eq!("0.78539816339744828", to_string(0.78539816339744830961, 0, 3));
+ assert_eq!("4.9406564584124654E-324", to_string(4.9406564584124654e-324, 0, 3));
+ assert_eq!("873.18340000000001", to_string(873.1834, 0, 1));
+ assert_eq!("8.7318340000000001E+2", to_string(873.1834, 0, 0));
+ assert_eq!("1.7976931348623157E+308", to_string(1.7976931348623157E+308, 0, 0));
+
+ let to_string = |d: f64, precision: usize, width: usize| {
+ let x = Double::from_f64(d);
+ if precision == 0 {
+ format!("{:#1$}", x, width)
+ } else {
+ format!("{:#2$.1$}", x, precision, width)
+ }
+ };
+ assert_eq!("10", to_string(10.0, 6, 3));
+ assert_eq!("1.000000e+01", to_string(10.0, 6, 0));
+ assert_eq!("10100", to_string(1.01E+4, 5, 2));
+ assert_eq!("1.0100e+04", to_string(1.01E+4, 4, 2));
+ assert_eq!("1.01000e+04", to_string(1.01E+4, 5, 1));
+ assert_eq!("0.0101", to_string(1.01E-2, 5, 2));
+ assert_eq!("0.0101", to_string(1.01E-2, 4, 2));
+ assert_eq!("1.01000e-02", to_string(1.01E-2, 5, 1));
+ assert_eq!("0.78539816339744828", to_string(0.78539816339744830961, 0, 3));
+ assert_eq!("4.94065645841246540e-324", to_string(4.9406564584124654e-324, 0, 3));
+ assert_eq!("873.18340000000001", to_string(873.1834, 0, 1));
+ assert_eq!("8.73183400000000010e+02", to_string(873.1834, 0, 0));
+ assert_eq!("1.79769313486231570e+308", to_string(1.7976931348623157E+308, 0, 0));
+}
+
+#[test]
+fn to_integer() {
+ let mut is_exact = false;
+
+ assert_eq!(
+ Status::OK.and(10),
+ "10".parse::<Double>().unwrap().to_u128_r(5, Round::TowardZero, &mut is_exact,)
+ );
+ assert!(is_exact);
+
+ assert_eq!(
+ Status::INVALID_OP.and(0),
+ "-10".parse::<Double>().unwrap().to_u128_r(5, Round::TowardZero, &mut is_exact,)
+ );
+ assert!(!is_exact);
+
+ assert_eq!(
+ Status::INVALID_OP.and(31),
+ "32".parse::<Double>().unwrap().to_u128_r(5, Round::TowardZero, &mut is_exact,)
+ );
+ assert!(!is_exact);
+
+ assert_eq!(
+ Status::INEXACT.and(7),
+ "7.9".parse::<Double>().unwrap().to_u128_r(5, Round::TowardZero, &mut is_exact,)
+ );
+ assert!(!is_exact);
+
+ assert_eq!(
+ Status::OK.and(-10),
+ "-10".parse::<Double>().unwrap().to_i128_r(5, Round::TowardZero, &mut is_exact,)
+ );
+ assert!(is_exact);
+
+ assert_eq!(
+ Status::INVALID_OP.and(-16),
+ "-17".parse::<Double>().unwrap().to_i128_r(5, Round::TowardZero, &mut is_exact,)
+ );
+ assert!(!is_exact);
+
+ assert_eq!(
+ Status::INVALID_OP.and(15),
+ "16".parse::<Double>().unwrap().to_i128_r(5, Round::TowardZero, &mut is_exact,)
+ );
+ assert!(!is_exact);
+}
+
+#[test]
+fn nan() {
+ fn nanbits<T: Float>(signaling: bool, negative: bool, fill: u128) -> u128 {
+ let x = if signaling { T::snan(Some(fill)) } else { T::qnan(Some(fill)) };
+ if negative { (-x).to_bits() } else { x.to_bits() }
+ }
+
+ assert_eq!(0x7fc00000, nanbits::<Single>(false, false, 0));
+ assert_eq!(0xffc00000, nanbits::<Single>(false, true, 0));
+ assert_eq!(0x7fc0ae72, nanbits::<Single>(false, false, 0xae72));
+ assert_eq!(0x7fffae72, nanbits::<Single>(false, false, 0xffffae72));
+ assert_eq!(0x7fa00000, nanbits::<Single>(true, false, 0));
+ assert_eq!(0xffa00000, nanbits::<Single>(true, true, 0));
+ assert_eq!(0x7f80ae72, nanbits::<Single>(true, false, 0xae72));
+ assert_eq!(0x7fbfae72, nanbits::<Single>(true, false, 0xffffae72));
+
+ assert_eq!(0x7ff8000000000000, nanbits::<Double>(false, false, 0));
+ assert_eq!(0xfff8000000000000, nanbits::<Double>(false, true, 0));
+ assert_eq!(0x7ff800000000ae72, nanbits::<Double>(false, false, 0xae72));
+ assert_eq!(0x7fffffffffffae72, nanbits::<Double>(false, false, 0xffffffffffffae72));
+ assert_eq!(0x7ff4000000000000, nanbits::<Double>(true, false, 0));
+ assert_eq!(0xfff4000000000000, nanbits::<Double>(true, true, 0));
+ assert_eq!(0x7ff000000000ae72, nanbits::<Double>(true, false, 0xae72));
+ assert_eq!(0x7ff7ffffffffae72, nanbits::<Double>(true, false, 0xffffffffffffae72));
+}
+
+#[test]
+fn string_decimal_death() {
+ assert_eq!("".parse::<Double>(), Err(ParseError("Invalid string length")));
+ assert_eq!("+".parse::<Double>(), Err(ParseError("String has no digits")));
+ assert_eq!("-".parse::<Double>(), Err(ParseError("String has no digits")));
+
+ assert_eq!("\0".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+ assert_eq!("1\0".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+ assert_eq!("1\02".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+ assert_eq!("1\02e1".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+ assert_eq!("1e\0".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
+ assert_eq!("1e1\0".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
+ assert_eq!("1e1\02".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
+
+ assert_eq!("1.0f".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+
+ assert_eq!("..".parse::<Double>(), Err(ParseError("String contains multiple dots")));
+ assert_eq!("..0".parse::<Double>(), Err(ParseError("String contains multiple dots")));
+ assert_eq!("1.0.0".parse::<Double>(), Err(ParseError("String contains multiple dots")));
+}
+
+#[test]
+fn string_decimal_significand_death() {
+ assert_eq!(".".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+.".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-.".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!("e".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+e".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-e".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!("e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!(".e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+.e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-.e1".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!(".e".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+.e".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-.e".parse::<Double>(), Err(ParseError("Significand has no digits")));
+}
+
+#[test]
+fn string_decimal_exponent_death() {
+ assert_eq!("1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("1.e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+1.e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-1.e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!(".1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("1.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+1.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-1.1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("1e+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("1e-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!(".1e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!(".1e+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!(".1e-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("1.0e".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("1.0e+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("1.0e-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+}
+
+#[test]
+fn string_hexadecimal_death() {
+ assert_eq!("0x".parse::<Double>(), Err(ParseError("Invalid string")));
+ assert_eq!("+0x".parse::<Double>(), Err(ParseError("Invalid string")));
+ assert_eq!("-0x".parse::<Double>(), Err(ParseError("Invalid string")));
+
+ assert_eq!("0x0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+ assert_eq!("+0x0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+ assert_eq!("-0x0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+
+ assert_eq!("0x0.".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+ assert_eq!("+0x0.".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+ assert_eq!("-0x0.".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+
+ assert_eq!("0x.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+ assert_eq!("+0x.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+ assert_eq!("-0x.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+
+ assert_eq!("0x0.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+ assert_eq!("+0x0.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+ assert_eq!("-0x0.0".parse::<Double>(), Err(ParseError("Hex strings require an exponent")));
+
+ assert_eq!("0x\0".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+ assert_eq!("0x1\0".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+ assert_eq!("0x1\02".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+ assert_eq!("0x1\02p1".parse::<Double>(), Err(ParseError("Invalid character in significand")));
+ assert_eq!("0x1p\0".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
+ assert_eq!("0x1p1\0".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
+ assert_eq!("0x1p1\02".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
+
+ assert_eq!("0x1p0f".parse::<Double>(), Err(ParseError("Invalid character in exponent")));
+
+ assert_eq!("0x..p1".parse::<Double>(), Err(ParseError("String contains multiple dots")));
+ assert_eq!("0x..0p1".parse::<Double>(), Err(ParseError("String contains multiple dots")));
+ assert_eq!("0x1.0.0p1".parse::<Double>(), Err(ParseError("String contains multiple dots")));
+}
+
+#[test]
+fn string_hexadecimal_significand_death() {
+ assert_eq!("0x.".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+0x.".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-0x.".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!("0xp".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+0xp".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-0xp".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!("0xp+".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+0xp+".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-0xp+".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!("0xp-".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+0xp-".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-0xp-".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!("0x.p".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+0x.p".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-0x.p".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!("0x.p+".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+0x.p+".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-0x.p+".parse::<Double>(), Err(ParseError("Significand has no digits")));
+
+ assert_eq!("0x.p-".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("+0x.p-".parse::<Double>(), Err(ParseError("Significand has no digits")));
+ assert_eq!("-0x.p-".parse::<Double>(), Err(ParseError("Significand has no digits")));
+}
+
+#[test]
+fn string_hexadecimal_exponent_death() {
+ assert_eq!("0x1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x1.p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1.p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1.p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x1.p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1.p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1.p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x1.p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1.p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1.p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x1.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1.1p".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x1.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1.1p+".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+
+ assert_eq!("0x1.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("+0x1.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+ assert_eq!("-0x1.1p-".parse::<Double>(), Err(ParseError("Exponent has no digits")));
+}
+
+#[test]
+fn exact_inverse() {
+ // Trivial operation.
+ assert!(Double::from_f64(2.0).get_exact_inverse().unwrap().bitwise_eq(Double::from_f64(0.5)));
+ assert!(Single::from_f32(2.0).get_exact_inverse().unwrap().bitwise_eq(Single::from_f32(0.5)));
+ assert!(
+ "2.0"
+ .parse::<Quad>()
+ .unwrap()
+ .get_exact_inverse()
+ .unwrap()
+ .bitwise_eq("0.5".parse::<Quad>().unwrap())
+ );
+ assert!(
+ "2.0"
+ .parse::<X87DoubleExtended>()
+ .unwrap()
+ .get_exact_inverse()
+ .unwrap()
+ .bitwise_eq("0.5".parse::<X87DoubleExtended>().unwrap())
+ );
+
+ // FLT_MIN
+ assert!(
+ Single::from_f32(1.17549435e-38)
+ .get_exact_inverse()
+ .unwrap()
+ .bitwise_eq(Single::from_f32(8.5070592e+37))
+ );
+
+ // Large float, inverse is a denormal.
+ assert!(Single::from_f32(1.7014118e38).get_exact_inverse().is_none());
+ // Zero
+ assert!(Double::from_f64(0.0).get_exact_inverse().is_none());
+ // Denormalized float
+ assert!(Single::from_f32(1.40129846e-45).get_exact_inverse().is_none());
+}
+
+#[test]
+fn round_to_integral() {
+ let t = Double::from_f64(-0.5);
+ assert_eq!(-0.0, t.round_to_integral(Round::TowardZero).value.to_f64());
+ assert_eq!(-1.0, t.round_to_integral(Round::TowardNegative).value.to_f64());
+ assert_eq!(-0.0, t.round_to_integral(Round::TowardPositive).value.to_f64());
+ assert_eq!(-0.0, t.round_to_integral(Round::NearestTiesToEven).value.to_f64());
+
+ let s = Double::from_f64(3.14);
+ assert_eq!(3.0, s.round_to_integral(Round::TowardZero).value.to_f64());
+ assert_eq!(3.0, s.round_to_integral(Round::TowardNegative).value.to_f64());
+ assert_eq!(4.0, s.round_to_integral(Round::TowardPositive).value.to_f64());
+ assert_eq!(3.0, s.round_to_integral(Round::NearestTiesToEven).value.to_f64());
+
+ let r = Double::largest();
+ assert_eq!(r.to_f64(), r.round_to_integral(Round::TowardZero).value.to_f64());
+ assert_eq!(r.to_f64(), r.round_to_integral(Round::TowardNegative).value.to_f64());
+ assert_eq!(r.to_f64(), r.round_to_integral(Round::TowardPositive).value.to_f64());
+ assert_eq!(r.to_f64(), r.round_to_integral(Round::NearestTiesToEven).value.to_f64());
+
+ let p = Double::ZERO.round_to_integral(Round::TowardZero).value;
+ assert_eq!(0.0, p.to_f64());
+ let p = (-Double::ZERO).round_to_integral(Round::TowardZero).value;
+ assert_eq!(-0.0, p.to_f64());
+ let p = Double::NAN.round_to_integral(Round::TowardZero).value;
+ assert!(p.to_f64().is_nan());
+ let p = Double::INFINITY.round_to_integral(Round::TowardZero).value;
+ assert!(p.to_f64().is_infinite() && p.to_f64() > 0.0);
+ let p = (-Double::INFINITY).round_to_integral(Round::TowardZero).value;
+ assert!(p.to_f64().is_infinite() && p.to_f64() < 0.0);
+}
+
+#[test]
+fn is_integer() {
+ let t = Double::from_f64(-0.0);
+ assert!(t.is_integer());
+ let t = Double::from_f64(3.14159);
+ assert!(!t.is_integer());
+ let t = Double::NAN;
+ assert!(!t.is_integer());
+ let t = Double::INFINITY;
+ assert!(!t.is_integer());
+ let t = -Double::INFINITY;
+ assert!(!t.is_integer());
+ let t = Double::largest();
+ assert!(t.is_integer());
+}
+
+#[test]
+fn largest() {
+ assert_eq!(3.402823466e+38, Single::largest().to_f32());
+ assert_eq!(1.7976931348623158e+308, Double::largest().to_f64());
+}
+
+#[test]
+fn smallest() {
+ let test = Single::SMALLEST;
+ let expected = "0x0.000002p-126".parse::<Single>().unwrap();
+ assert!(!test.is_negative());
+ assert!(test.is_finite_non_zero());
+ assert!(test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ let test = -Single::SMALLEST;
+ let expected = "-0x0.000002p-126".parse::<Single>().unwrap();
+ assert!(test.is_negative());
+ assert!(test.is_finite_non_zero());
+ assert!(test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ let test = Quad::SMALLEST;
+ let expected = "0x0.0000000000000000000000000001p-16382".parse::<Quad>().unwrap();
+ assert!(!test.is_negative());
+ assert!(test.is_finite_non_zero());
+ assert!(test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ let test = -Quad::SMALLEST;
+ let expected = "-0x0.0000000000000000000000000001p-16382".parse::<Quad>().unwrap();
+ assert!(test.is_negative());
+ assert!(test.is_finite_non_zero());
+ assert!(test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+}
+
+#[test]
+fn smallest_normalized() {
+ let test = Single::smallest_normalized();
+ let expected = "0x1p-126".parse::<Single>().unwrap();
+ assert!(!test.is_negative());
+ assert!(test.is_finite_non_zero());
+ assert!(!test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ let test = -Single::smallest_normalized();
+ let expected = "-0x1p-126".parse::<Single>().unwrap();
+ assert!(test.is_negative());
+ assert!(test.is_finite_non_zero());
+ assert!(!test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ let test = Quad::smallest_normalized();
+ let expected = "0x1p-16382".parse::<Quad>().unwrap();
+ assert!(!test.is_negative());
+ assert!(test.is_finite_non_zero());
+ assert!(!test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+
+ let test = -Quad::smallest_normalized();
+ let expected = "-0x1p-16382".parse::<Quad>().unwrap();
+ assert!(test.is_negative());
+ assert!(test.is_finite_non_zero());
+ assert!(!test.is_denormal());
+ assert!(test.bitwise_eq(expected));
+}
+
+#[test]
+fn zero() {
+ assert_eq!(0.0, Single::from_f32(0.0).to_f32());
+ assert_eq!(-0.0, Single::from_f32(-0.0).to_f32());
+ assert!(Single::from_f32(-0.0).is_negative());
+
+ assert_eq!(0.0, Double::from_f64(0.0).to_f64());
+ assert_eq!(-0.0, Double::from_f64(-0.0).to_f64());
+ assert!(Double::from_f64(-0.0).is_negative());
+
+ fn test<T: Float>(sign: bool, bits: u128) {
+ let test = if sign { -T::ZERO } else { T::ZERO };
+ let pattern = if sign { "-0x0p+0" } else { "0x0p+0" };
+ let expected = pattern.parse::<T>().unwrap();
+ assert!(test.is_zero());
+ assert_eq!(sign, test.is_negative());
+ assert!(test.bitwise_eq(expected));
+ assert_eq!(bits, test.to_bits());
+ }
+ test::<Half>(false, 0);
+ test::<Half>(true, 0x8000);
+ test::<Single>(false, 0);
+ test::<Single>(true, 0x80000000);
+ test::<Double>(false, 0);
+ test::<Double>(true, 0x8000000000000000);
+ test::<Quad>(false, 0);
+ test::<Quad>(true, 0x8000000000000000_0000000000000000);
+ test::<X87DoubleExtended>(false, 0);
+ test::<X87DoubleExtended>(true, 0x8000_0000000000000000);
+}
+
+#[test]
+fn copy_sign() {
+ assert!(
+ Double::from_f64(-42.0)
+ .bitwise_eq(Double::from_f64(42.0).copy_sign(Double::from_f64(-1.0),),)
+ );
+ assert!(
+ Double::from_f64(42.0)
+ .bitwise_eq(Double::from_f64(-42.0).copy_sign(Double::from_f64(1.0),),)
+ );
+ assert!(
+ Double::from_f64(-42.0)
+ .bitwise_eq(Double::from_f64(-42.0).copy_sign(Double::from_f64(-1.0),),)
+ );
+ assert!(
+ Double::from_f64(42.0)
+ .bitwise_eq(Double::from_f64(42.0).copy_sign(Double::from_f64(1.0),),)
+ );
+}
+
+#[test]
+fn convert() {
+ let mut loses_info = false;
+ let test = "1.0".parse::<Double>().unwrap();
+ let test: Single = test.convert(&mut loses_info).value;
+ assert_eq!(1.0, test.to_f32());
+ assert!(!loses_info);
+
+ let mut test = "0x1p-53".parse::<X87DoubleExtended>().unwrap();
+ let one = "1.0".parse::<X87DoubleExtended>().unwrap();
+ test += one;
+ let test: Double = test.convert(&mut loses_info).value;
+ assert_eq!(1.0, test.to_f64());
+ assert!(loses_info);
+
+ let mut test = "0x1p-53".parse::<Quad>().unwrap();
+ let one = "1.0".parse::<Quad>().unwrap();
+ test += one;
+ let test: Double = test.convert(&mut loses_info).value;
+ assert_eq!(1.0, test.to_f64());
+ assert!(loses_info);
+
+ let test = "0xf.fffffffp+28".parse::<X87DoubleExtended>().unwrap();
+ let test: Double = test.convert(&mut loses_info).value;
+ assert_eq!(4294967295.0, test.to_f64());
+ assert!(!loses_info);
+
+ let test = Single::qnan(None);
+ let x87_qnan = X87DoubleExtended::qnan(None);
+ let test: X87DoubleExtended = test.convert(&mut loses_info).value;
+ assert!(test.bitwise_eq(x87_qnan));
+ assert!(!loses_info);
+
+ let test = Single::snan(None);
+ let sta = test.convert(&mut loses_info);
+ let test: X87DoubleExtended = sta.value;
+ assert!(test.is_nan());
+ assert!(!test.is_signaling());
+ assert!(!loses_info);
+ assert_eq!(sta.status, Status::INVALID_OP);
+
+ let test = X87DoubleExtended::qnan(None);
+ let test: X87DoubleExtended = test.convert(&mut loses_info).value;
+ assert!(test.bitwise_eq(x87_qnan));
+ assert!(!loses_info);
+
+ let test = X87DoubleExtended::snan(None);
+ let sta = test.convert(&mut loses_info);
+ let test: X87DoubleExtended = sta.value;
+ assert!(test.is_nan());
+ assert!(!test.is_signaling());
+ assert!(!loses_info);
+ assert_eq!(sta.status, Status::INVALID_OP);
+}
+
+#[test]
+fn is_negative() {
+ let t = "0x1p+0".parse::<Single>().unwrap();
+ assert!(!t.is_negative());
+ let t = "-0x1p+0".parse::<Single>().unwrap();
+ assert!(t.is_negative());
+
+ assert!(!Single::INFINITY.is_negative());
+ assert!((-Single::INFINITY).is_negative());
+
+ assert!(!Single::ZERO.is_negative());
+ assert!((-Single::ZERO).is_negative());
+
+ assert!(!Single::NAN.is_negative());
+ assert!((-Single::NAN).is_negative());
+
+ assert!(!Single::snan(None).is_negative());
+ assert!((-Single::snan(None)).is_negative());
+}
+
+#[test]
+fn is_normal() {
+ let t = "0x1p+0".parse::<Single>().unwrap();
+ assert!(t.is_normal());
+
+ assert!(!Single::INFINITY.is_normal());
+ assert!(!Single::ZERO.is_normal());
+ assert!(!Single::NAN.is_normal());
+ assert!(!Single::snan(None).is_normal());
+ assert!(!"0x1p-149".parse::<Single>().unwrap().is_normal());
+}
+
+#[test]
+fn is_finite() {
+ let t = "0x1p+0".parse::<Single>().unwrap();
+ assert!(t.is_finite());
+ assert!(!Single::INFINITY.is_finite());
+ assert!(Single::ZERO.is_finite());
+ assert!(!Single::NAN.is_finite());
+ assert!(!Single::snan(None).is_finite());
+ assert!("0x1p-149".parse::<Single>().unwrap().is_finite());
+}
+
+#[test]
+fn is_infinite() {
+ let t = "0x1p+0".parse::<Single>().unwrap();
+ assert!(!t.is_infinite());
+ assert!(Single::INFINITY.is_infinite());
+ assert!(!Single::ZERO.is_infinite());
+ assert!(!Single::NAN.is_infinite());
+ assert!(!Single::snan(None).is_infinite());
+ assert!(!"0x1p-149".parse::<Single>().unwrap().is_infinite());
+}
+
+#[test]
+fn is_nan() {
+ let t = "0x1p+0".parse::<Single>().unwrap();
+ assert!(!t.is_nan());
+ assert!(!Single::INFINITY.is_nan());
+ assert!(!Single::ZERO.is_nan());
+ assert!(Single::NAN.is_nan());
+ assert!(Single::snan(None).is_nan());
+ assert!(!"0x1p-149".parse::<Single>().unwrap().is_nan());
+}
+
+#[test]
+fn is_finite_non_zero() {
+ // Test positive/negative normal value.
+ assert!("0x1p+0".parse::<Single>().unwrap().is_finite_non_zero());
+ assert!("-0x1p+0".parse::<Single>().unwrap().is_finite_non_zero());
+
+ // Test positive/negative denormal value.
+ assert!("0x1p-149".parse::<Single>().unwrap().is_finite_non_zero());
+ assert!("-0x1p-149".parse::<Single>().unwrap().is_finite_non_zero());
+
+ // Test +/- Infinity.
+ assert!(!Single::INFINITY.is_finite_non_zero());
+ assert!(!(-Single::INFINITY).is_finite_non_zero());
+
+ // Test +/- Zero.
+ assert!(!Single::ZERO.is_finite_non_zero());
+ assert!(!(-Single::ZERO).is_finite_non_zero());
+
+ // Test +/- qNaN. +/- don't mean anything with qNaN but paranoia can't hurt in
+ // this instance.
+ assert!(!Single::NAN.is_finite_non_zero());
+ assert!(!(-Single::NAN).is_finite_non_zero());
+
+ // Test +/- sNaN. +/- don't mean anything with sNaN but paranoia can't hurt in
+ // this instance.
+ assert!(!Single::snan(None).is_finite_non_zero());
+ assert!(!(-Single::snan(None)).is_finite_non_zero());
+}
+
+#[test]
+fn add() {
+ // Test Special Cases against each other and normal values.
+
+ // FIXMES/NOTES:
+ // 1. Since we perform only default exception handling all operations with
+ // signaling NaNs should have a result that is a quiet NaN. Currently they
+ // return sNaN.
+
+ let p_inf = Single::INFINITY;
+ let m_inf = -Single::INFINITY;
+ let p_zero = Single::ZERO;
+ let m_zero = -Single::ZERO;
+ let qnan = Single::NAN;
+ let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
+ let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
+ let p_largest_value = Single::largest();
+ let m_largest_value = -Single::largest();
+ let p_smallest_value = Single::SMALLEST;
+ let m_smallest_value = -Single::SMALLEST;
+ let p_smallest_normalized = Single::smallest_normalized();
+ let m_smallest_normalized = -Single::smallest_normalized();
+
+ let overflow_status = Status::OVERFLOW | Status::INEXACT;
+
+ let special_cases = [
+ (p_inf, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (p_inf, p_zero, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_zero, "inf", Status::OK, Category::Infinity),
+ (p_inf, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, p_largest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_largest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, p_smallest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_smallest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, p_smallest_normalized, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_smallest_normalized, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (m_inf, m_inf, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_zero, "-inf", Status::OK, Category::Infinity),
+ (m_inf, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_inf, p_normal_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_normal_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_largest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_largest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_smallest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_smallest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_smallest_normalized, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_smallest_normalized, "-inf", Status::OK, Category::Infinity),
+ (p_zero, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_zero, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_zero, p_normal_value, "0x1p+0", Status::OK, Category::Normal),
+ (p_zero, m_normal_value, "-0x1p+0", Status::OK, Category::Normal),
+ (p_zero, p_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_zero, m_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_zero, p_smallest_value, "0x1p-149", Status::OK, Category::Normal),
+ (p_zero, m_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
+ (p_zero, p_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
+ (p_zero, m_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
+ (m_zero, p_inf, "inf", Status::OK, Category::Infinity),
+ (m_zero, m_inf, "-inf", Status::OK, Category::Infinity),
+ (m_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_zero, p_normal_value, "0x1p+0", Status::OK, Category::Normal),
+ (m_zero, m_normal_value, "-0x1p+0", Status::OK, Category::Normal),
+ (m_zero, p_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_zero, m_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_zero, p_smallest_value, "0x1p-149", Status::OK, Category::Normal),
+ (m_zero, m_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
+ (m_zero, p_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
+ (m_zero, m_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
+ (qnan, p_inf, "nan", Status::OK, Category::NaN),
+ (qnan, m_inf, "nan", Status::OK, Category::NaN),
+ (qnan, p_zero, "nan", Status::OK, Category::NaN),
+ (qnan, m_zero, "nan", Status::OK, Category::NaN),
+ (qnan, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (qnan, p_normal_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_normal_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_largest_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_largest_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_smallest_normalized, "nan", Status::OK, Category::NaN),
+ (qnan, m_smallest_normalized, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, snan, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_normal_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_normal_value, p_zero, "0x1p+0", Status::OK, Category::Normal),
+ (p_normal_value, m_zero, "0x1p+0", Status::OK, Category::Normal),
+ (p_normal_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_normal_value, p_normal_value, "0x1p+1", Status::OK, Category::Normal),
+ (p_normal_value, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_normal_value, p_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_normal_value, m_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_normal_value, p_smallest_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_normal_value, m_smallest_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_normal_value, p_smallest_normalized, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_normal_value, m_smallest_normalized, "0x1p+0", Status::INEXACT, Category::Normal),
+ (m_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (m_normal_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (m_normal_value, p_zero, "-0x1p+0", Status::OK, Category::Normal),
+ (m_normal_value, m_zero, "-0x1p+0", Status::OK, Category::Normal),
+ (m_normal_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_normal_value, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_normal_value, m_normal_value, "-0x1p+1", Status::OK, Category::Normal),
+ (m_normal_value, p_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_normal_value, m_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_normal_value, p_smallest_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_normal_value, m_smallest_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_normal_value, p_smallest_normalized, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_normal_value, m_smallest_normalized, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (p_largest_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_largest_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_largest_value, p_zero, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_largest_value, m_zero, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_largest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_largest_value, p_normal_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_largest_value, m_normal_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_largest_value, p_largest_value, "inf", overflow_status, Category::Infinity),
+ (p_largest_value, m_largest_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_largest_value, p_smallest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_largest_value, m_smallest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (
+ p_largest_value,
+ p_smallest_normalized,
+ "0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (
+ p_largest_value,
+ m_smallest_normalized,
+ "0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (m_largest_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (m_largest_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (m_largest_value, p_zero, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_largest_value, m_zero, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_largest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_largest_value, p_normal_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_largest_value, m_normal_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_largest_value, p_largest_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_largest_value, m_largest_value, "-inf", overflow_status, Category::Infinity),
+ (m_largest_value, p_smallest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_largest_value, m_smallest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (
+ m_largest_value,
+ p_smallest_normalized,
+ "-0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (
+ m_largest_value,
+ m_smallest_normalized,
+ "-0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (p_smallest_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_smallest_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_smallest_value, p_zero, "0x1p-149", Status::OK, Category::Normal),
+ (p_smallest_value, m_zero, "0x1p-149", Status::OK, Category::Normal),
+ (p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_smallest_value, p_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_smallest_value, m_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (p_smallest_value, p_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_smallest_value, m_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_smallest_value, p_smallest_value, "0x1p-148", Status::OK, Category::Normal),
+ (p_smallest_value, m_smallest_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_value, p_smallest_normalized, "0x1.000002p-126", Status::OK, Category::Normal),
+ (p_smallest_value, m_smallest_normalized, "-0x1.fffffcp-127", Status::OK, Category::Normal),
+ (m_smallest_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (m_smallest_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (m_smallest_value, p_zero, "-0x1p-149", Status::OK, Category::Normal),
+ (m_smallest_value, m_zero, "-0x1p-149", Status::OK, Category::Normal),
+ (m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_smallest_value, p_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (m_smallest_value, m_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_smallest_value, p_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_smallest_value, m_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_smallest_value, p_smallest_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_value, m_smallest_value, "-0x1p-148", Status::OK, Category::Normal),
+ (m_smallest_value, p_smallest_normalized, "0x1.fffffcp-127", Status::OK, Category::Normal),
+ (m_smallest_value, m_smallest_normalized, "-0x1.000002p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_smallest_normalized, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_smallest_normalized, p_zero, "0x1p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_zero, "0x1p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_smallest_normalized, p_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_smallest_normalized, m_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (
+ p_smallest_normalized,
+ p_largest_value,
+ "0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (
+ p_smallest_normalized,
+ m_largest_value,
+ "-0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (p_smallest_normalized, p_smallest_value, "0x1.000002p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_smallest_value, "0x1.fffffcp-127", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_smallest_normalized, "0x1p-125", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_normalized, p_inf, "inf", Status::OK, Category::Infinity),
+ (m_smallest_normalized, m_inf, "-inf", Status::OK, Category::Infinity),
+ (m_smallest_normalized, p_zero, "-0x1p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_zero, "-0x1p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_smallest_normalized, p_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (m_smallest_normalized, m_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (
+ m_smallest_normalized,
+ p_largest_value,
+ "0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (
+ m_smallest_normalized,
+ m_largest_value,
+ "-0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (m_smallest_normalized, p_smallest_value, "-0x1.fffffcp-127", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_smallest_value, "-0x1.000002p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, p_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_normalized, m_smallest_normalized, "-0x1p-125", Status::OK, Category::Normal),
+ ];
+
+ for (x, y, e_result, e_status, e_category) in special_cases {
+ let status;
+ let result = unpack!(status=, x + y);
+ assert_eq!(status, e_status);
+ assert_eq!(result.category(), e_category);
+ assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
+ }
+}
+
+#[test]
+fn subtract() {
+ // Test Special Cases against each other and normal values.
+
+ // FIXMES/NOTES:
+ // 1. Since we perform only default exception handling all operations with
+ // signaling NaNs should have a result that is a quiet NaN. Currently they
+ // return sNaN.
+
+ let p_inf = Single::INFINITY;
+ let m_inf = -Single::INFINITY;
+ let p_zero = Single::ZERO;
+ let m_zero = -Single::ZERO;
+ let qnan = Single::NAN;
+ let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
+ let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
+ let p_largest_value = Single::largest();
+ let m_largest_value = -Single::largest();
+ let p_smallest_value = Single::SMALLEST;
+ let m_smallest_value = -Single::SMALLEST;
+ let p_smallest_normalized = Single::smallest_normalized();
+ let m_smallest_normalized = -Single::smallest_normalized();
+
+ let overflow_status = Status::OVERFLOW | Status::INEXACT;
+
+ let special_cases = [
+ (p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (p_inf, m_inf, "inf", Status::OK, Category::Infinity),
+ (p_inf, p_zero, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_zero, "inf", Status::OK, Category::Infinity),
+ (p_inf, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_inf, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, p_largest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_largest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, p_smallest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_smallest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, p_smallest_normalized, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_smallest_normalized, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_zero, "-inf", Status::OK, Category::Infinity),
+ (m_inf, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_inf, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_inf, p_normal_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_normal_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_largest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_largest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_smallest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_smallest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_smallest_normalized, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_smallest_normalized, "-inf", Status::OK, Category::Infinity),
+ (p_zero, p_inf, "-inf", Status::OK, Category::Infinity),
+ (p_zero, m_inf, "inf", Status::OK, Category::Infinity),
+ (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_zero, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_zero, p_normal_value, "-0x1p+0", Status::OK, Category::Normal),
+ (p_zero, m_normal_value, "0x1p+0", Status::OK, Category::Normal),
+ (p_zero, p_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_zero, m_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_zero, p_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
+ (p_zero, m_smallest_value, "0x1p-149", Status::OK, Category::Normal),
+ (p_zero, p_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
+ (p_zero, m_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
+ (m_zero, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_zero, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_zero, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_zero, p_normal_value, "-0x1p+0", Status::OK, Category::Normal),
+ (m_zero, m_normal_value, "0x1p+0", Status::OK, Category::Normal),
+ (m_zero, p_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_zero, m_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_zero, p_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
+ (m_zero, m_smallest_value, "0x1p-149", Status::OK, Category::Normal),
+ (m_zero, p_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
+ (m_zero, m_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
+ (qnan, p_inf, "nan", Status::OK, Category::NaN),
+ (qnan, m_inf, "nan", Status::OK, Category::NaN),
+ (qnan, p_zero, "nan", Status::OK, Category::NaN),
+ (qnan, m_zero, "nan", Status::OK, Category::NaN),
+ (qnan, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (qnan, p_normal_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_normal_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_largest_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_largest_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_smallest_normalized, "nan", Status::OK, Category::NaN),
+ (qnan, m_smallest_normalized, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, snan, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_normal_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (p_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (p_normal_value, p_zero, "0x1p+0", Status::OK, Category::Normal),
+ (p_normal_value, m_zero, "0x1p+0", Status::OK, Category::Normal),
+ (p_normal_value, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_normal_value, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_normal_value, m_normal_value, "0x1p+1", Status::OK, Category::Normal),
+ (p_normal_value, p_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_normal_value, m_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_normal_value, p_smallest_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_normal_value, m_smallest_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_normal_value, p_smallest_normalized, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_normal_value, m_smallest_normalized, "0x1p+0", Status::INEXACT, Category::Normal),
+ (m_normal_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_normal_value, p_zero, "-0x1p+0", Status::OK, Category::Normal),
+ (m_normal_value, m_zero, "-0x1p+0", Status::OK, Category::Normal),
+ (m_normal_value, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_normal_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_normal_value, p_normal_value, "-0x1p+1", Status::OK, Category::Normal),
+ (m_normal_value, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_normal_value, p_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_normal_value, m_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_normal_value, p_smallest_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_normal_value, m_smallest_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_normal_value, p_smallest_normalized, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_normal_value, m_smallest_normalized, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (p_largest_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (p_largest_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (p_largest_value, p_zero, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_largest_value, m_zero, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_largest_value, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_largest_value, p_normal_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_largest_value, m_normal_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_largest_value, p_largest_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_largest_value, m_largest_value, "inf", overflow_status, Category::Infinity),
+ (p_largest_value, p_smallest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_largest_value, m_smallest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (
+ p_largest_value,
+ p_smallest_normalized,
+ "0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (
+ p_largest_value,
+ m_smallest_normalized,
+ "0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (m_largest_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_largest_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_largest_value, p_zero, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_largest_value, m_zero, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_largest_value, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_largest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_largest_value, p_normal_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_largest_value, m_normal_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_largest_value, p_largest_value, "-inf", overflow_status, Category::Infinity),
+ (m_largest_value, m_largest_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_largest_value, p_smallest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_largest_value, m_smallest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (
+ m_largest_value,
+ p_smallest_normalized,
+ "-0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (
+ m_largest_value,
+ m_smallest_normalized,
+ "-0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (p_smallest_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (p_smallest_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (p_smallest_value, p_zero, "0x1p-149", Status::OK, Category::Normal),
+ (p_smallest_value, m_zero, "0x1p-149", Status::OK, Category::Normal),
+ (p_smallest_value, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_smallest_value, p_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (p_smallest_value, m_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (p_smallest_value, p_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_smallest_value, m_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (p_smallest_value, p_smallest_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_value, m_smallest_value, "0x1p-148", Status::OK, Category::Normal),
+ (p_smallest_value, p_smallest_normalized, "-0x1.fffffcp-127", Status::OK, Category::Normal),
+ (p_smallest_value, m_smallest_normalized, "0x1.000002p-126", Status::OK, Category::Normal),
+ (m_smallest_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_smallest_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_smallest_value, p_zero, "-0x1p-149", Status::OK, Category::Normal),
+ (m_smallest_value, m_zero, "-0x1p-149", Status::OK, Category::Normal),
+ (m_smallest_value, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_smallest_value, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_smallest_value, p_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_smallest_value, m_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (m_smallest_value, p_largest_value, "-0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_smallest_value, m_largest_value, "0x1.fffffep+127", Status::INEXACT, Category::Normal),
+ (m_smallest_value, p_smallest_value, "-0x1p-148", Status::OK, Category::Normal),
+ (m_smallest_value, m_smallest_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_value, p_smallest_normalized, "-0x1.000002p-126", Status::OK, Category::Normal),
+ (m_smallest_value, m_smallest_normalized, "0x1.fffffcp-127", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_inf, "-inf", Status::OK, Category::Infinity),
+ (p_smallest_normalized, m_inf, "inf", Status::OK, Category::Infinity),
+ (p_smallest_normalized, p_zero, "0x1p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_zero, "0x1p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_smallest_normalized, p_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (p_smallest_normalized, m_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (
+ p_smallest_normalized,
+ p_largest_value,
+ "-0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (
+ p_smallest_normalized,
+ m_largest_value,
+ "0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (p_smallest_normalized, p_smallest_value, "0x1.fffffcp-127", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_smallest_value, "0x1.000002p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_normalized, m_smallest_normalized, "0x1p-125", Status::OK, Category::Normal),
+ (m_smallest_normalized, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_smallest_normalized, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_smallest_normalized, p_zero, "-0x1p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_zero, "-0x1p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, qnan, "-nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_smallest_normalized, snan, "-nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_smallest_normalized, p_normal_value, "-0x1p+0", Status::INEXACT, Category::Normal),
+ (m_smallest_normalized, m_normal_value, "0x1p+0", Status::INEXACT, Category::Normal),
+ (
+ m_smallest_normalized,
+ p_largest_value,
+ "-0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (
+ m_smallest_normalized,
+ m_largest_value,
+ "0x1.fffffep+127",
+ Status::INEXACT,
+ Category::Normal,
+ ),
+ (m_smallest_normalized, p_smallest_value, "-0x1.000002p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_smallest_value, "-0x1.fffffcp-127", Status::OK, Category::Normal),
+ (m_smallest_normalized, p_smallest_normalized, "-0x1p-125", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
+ ];
+
+ for (x, y, e_result, e_status, e_category) in special_cases {
+ let status;
+ let result = unpack!(status=, x - y);
+ assert_eq!(status, e_status);
+ assert_eq!(result.category(), e_category);
+ assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
+ }
+}
+
+#[test]
+fn multiply() {
+ // Test Special Cases against each other and normal values.
+
+ // FIXMES/NOTES:
+ // 1. Since we perform only default exception handling all operations with
+ // signaling NaNs should have a result that is a quiet NaN. Currently they
+ // return sNaN.
+
+ let p_inf = Single::INFINITY;
+ let m_inf = -Single::INFINITY;
+ let p_zero = Single::ZERO;
+ let m_zero = -Single::ZERO;
+ let qnan = Single::NAN;
+ let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
+ let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
+ let p_largest_value = Single::largest();
+ let m_largest_value = -Single::largest();
+ let p_smallest_value = Single::SMALLEST;
+ let m_smallest_value = -Single::SMALLEST;
+ let p_smallest_normalized = Single::smallest_normalized();
+ let m_smallest_normalized = -Single::smallest_normalized();
+
+ let overflow_status = Status::OVERFLOW | Status::INEXACT;
+ let underflow_status = Status::UNDERFLOW | Status::INEXACT;
+
+ let special_cases = [
+ (p_inf, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (p_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (p_inf, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_normal_value, "-inf", Status::OK, Category::Infinity),
+ (p_inf, p_largest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_largest_value, "-inf", Status::OK, Category::Infinity),
+ (p_inf, p_smallest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_smallest_value, "-inf", Status::OK, Category::Infinity),
+ (p_inf, p_smallest_normalized, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_smallest_normalized, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (m_inf, m_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (m_inf, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_inf, p_normal_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_largest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_largest_value, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_smallest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_smallest_value, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_smallest_normalized, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_smallest_normalized, "inf", Status::OK, Category::Infinity),
+ (p_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (p_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (p_zero, p_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (p_zero, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_normal_value, "-0x0p+0", Status::OK, Category::Zero),
+ (p_zero, p_largest_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_largest_value, "-0x0p+0", Status::OK, Category::Zero),
+ (p_zero, p_smallest_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_smallest_value, "-0x0p+0", Status::OK, Category::Zero),
+ (p_zero, p_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_smallest_normalized, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (m_zero, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (m_zero, p_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_zero, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_zero, p_normal_value, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_largest_value, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_largest_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_smallest_value, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_smallest_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_smallest_normalized, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
+ (qnan, p_inf, "nan", Status::OK, Category::NaN),
+ (qnan, m_inf, "nan", Status::OK, Category::NaN),
+ (qnan, p_zero, "nan", Status::OK, Category::NaN),
+ (qnan, m_zero, "nan", Status::OK, Category::NaN),
+ (qnan, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (qnan, p_normal_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_normal_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_largest_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_largest_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_smallest_normalized, "nan", Status::OK, Category::NaN),
+ (qnan, m_smallest_normalized, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, snan, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_normal_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_normal_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_normal_value, p_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_normal_value, m_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (p_normal_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_normal_value, p_normal_value, "0x1p+0", Status::OK, Category::Normal),
+ (p_normal_value, m_normal_value, "-0x1p+0", Status::OK, Category::Normal),
+ (p_normal_value, p_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_normal_value, m_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_normal_value, p_smallest_value, "0x1p-149", Status::OK, Category::Normal),
+ (p_normal_value, m_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
+ (p_normal_value, p_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
+ (p_normal_value, m_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
+ (m_normal_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_normal_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_normal_value, p_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (m_normal_value, m_zero, "0x0p+0", Status::OK, Category::Zero),
+ (m_normal_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_normal_value, p_normal_value, "-0x1p+0", Status::OK, Category::Normal),
+ (m_normal_value, m_normal_value, "0x1p+0", Status::OK, Category::Normal),
+ (m_normal_value, p_largest_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_normal_value, m_largest_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_normal_value, p_smallest_value, "-0x1p-149", Status::OK, Category::Normal),
+ (m_normal_value, m_smallest_value, "0x1p-149", Status::OK, Category::Normal),
+ (m_normal_value, p_smallest_normalized, "-0x1p-126", Status::OK, Category::Normal),
+ (m_normal_value, m_smallest_normalized, "0x1p-126", Status::OK, Category::Normal),
+ (p_largest_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_largest_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_largest_value, p_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_largest_value, m_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (p_largest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_largest_value, p_normal_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_largest_value, m_normal_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_largest_value, p_largest_value, "inf", overflow_status, Category::Infinity),
+ (p_largest_value, m_largest_value, "-inf", overflow_status, Category::Infinity),
+ (p_largest_value, p_smallest_value, "0x1.fffffep-22", Status::OK, Category::Normal),
+ (p_largest_value, m_smallest_value, "-0x1.fffffep-22", Status::OK, Category::Normal),
+ (p_largest_value, p_smallest_normalized, "0x1.fffffep+1", Status::OK, Category::Normal),
+ (p_largest_value, m_smallest_normalized, "-0x1.fffffep+1", Status::OK, Category::Normal),
+ (m_largest_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_largest_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_largest_value, p_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (m_largest_value, m_zero, "0x0p+0", Status::OK, Category::Zero),
+ (m_largest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_largest_value, p_normal_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_largest_value, m_normal_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_largest_value, p_largest_value, "-inf", overflow_status, Category::Infinity),
+ (m_largest_value, m_largest_value, "inf", overflow_status, Category::Infinity),
+ (m_largest_value, p_smallest_value, "-0x1.fffffep-22", Status::OK, Category::Normal),
+ (m_largest_value, m_smallest_value, "0x1.fffffep-22", Status::OK, Category::Normal),
+ (m_largest_value, p_smallest_normalized, "-0x1.fffffep+1", Status::OK, Category::Normal),
+ (m_largest_value, m_smallest_normalized, "0x1.fffffep+1", Status::OK, Category::Normal),
+ (p_smallest_value, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_smallest_value, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_smallest_value, p_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_value, m_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_smallest_value, p_normal_value, "0x1p-149", Status::OK, Category::Normal),
+ (p_smallest_value, m_normal_value, "-0x1p-149", Status::OK, Category::Normal),
+ (p_smallest_value, p_largest_value, "0x1.fffffep-22", Status::OK, Category::Normal),
+ (p_smallest_value, m_largest_value, "-0x1.fffffep-22", Status::OK, Category::Normal),
+ (p_smallest_value, p_smallest_value, "0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_value, m_smallest_value, "-0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_value, p_smallest_normalized, "0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_value, m_smallest_normalized, "-0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_value, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_smallest_value, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_smallest_value, p_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_value, m_zero, "0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_smallest_value, p_normal_value, "-0x1p-149", Status::OK, Category::Normal),
+ (m_smallest_value, m_normal_value, "0x1p-149", Status::OK, Category::Normal),
+ (m_smallest_value, p_largest_value, "-0x1.fffffep-22", Status::OK, Category::Normal),
+ (m_smallest_value, m_largest_value, "0x1.fffffep-22", Status::OK, Category::Normal),
+ (m_smallest_value, p_smallest_value, "-0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_value, m_smallest_value, "0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_value, p_smallest_normalized, "-0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_value, m_smallest_normalized, "0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_normalized, p_inf, "inf", Status::OK, Category::Infinity),
+ (p_smallest_normalized, m_inf, "-inf", Status::OK, Category::Infinity),
+ (p_smallest_normalized, p_zero, "0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_normalized, m_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_smallest_normalized, p_normal_value, "0x1p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_normal_value, "-0x1p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_largest_value, "0x1.fffffep+1", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_largest_value, "-0x1.fffffep+1", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_smallest_value, "0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_normalized, m_smallest_value, "-0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_normalized, p_smallest_normalized, "0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_normalized, m_smallest_normalized, "-0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_normalized, p_inf, "-inf", Status::OK, Category::Infinity),
+ (m_smallest_normalized, m_inf, "inf", Status::OK, Category::Infinity),
+ (m_smallest_normalized, p_zero, "-0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_normalized, m_zero, "0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_smallest_normalized, p_normal_value, "-0x1p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_normal_value, "0x1p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, p_largest_value, "-0x1.fffffep+1", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_largest_value, "0x1.fffffep+1", Status::OK, Category::Normal),
+ (m_smallest_normalized, p_smallest_value, "-0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_normalized, m_smallest_value, "0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_normalized, p_smallest_normalized, "-0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_normalized, m_smallest_normalized, "0x0p+0", underflow_status, Category::Zero),
+ ];
+
+ for (x, y, e_result, e_status, e_category) in special_cases {
+ let status;
+ let result = unpack!(status=, x * y);
+ assert_eq!(status, e_status);
+ assert_eq!(result.category(), e_category);
+ assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
+ }
+}
+
+#[test]
+fn divide() {
+ // Test Special Cases against each other and normal values.
+
+ // FIXMES/NOTES:
+ // 1. Since we perform only default exception handling all operations with
+ // signaling NaNs should have a result that is a quiet NaN. Currently they
+ // return sNaN.
+
+ let p_inf = Single::INFINITY;
+ let m_inf = -Single::INFINITY;
+ let p_zero = Single::ZERO;
+ let m_zero = -Single::ZERO;
+ let qnan = Single::NAN;
+ let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
+ let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
+ let p_largest_value = Single::largest();
+ let m_largest_value = -Single::largest();
+ let p_smallest_value = Single::SMALLEST;
+ let m_smallest_value = -Single::SMALLEST;
+ let p_smallest_normalized = Single::smallest_normalized();
+ let m_smallest_normalized = -Single::smallest_normalized();
+
+ let overflow_status = Status::OVERFLOW | Status::INEXACT;
+ let underflow_status = Status::UNDERFLOW | Status::INEXACT;
+
+ let special_cases = [
+ (p_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (p_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (p_inf, p_zero, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_zero, "-inf", Status::OK, Category::Infinity),
+ (p_inf, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_inf, p_normal_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_normal_value, "-inf", Status::OK, Category::Infinity),
+ (p_inf, p_largest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_largest_value, "-inf", Status::OK, Category::Infinity),
+ (p_inf, p_smallest_value, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_smallest_value, "-inf", Status::OK, Category::Infinity),
+ (p_inf, p_smallest_normalized, "inf", Status::OK, Category::Infinity),
+ (p_inf, m_smallest_normalized, "-inf", Status::OK, Category::Infinity),
+ (m_inf, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (m_inf, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (m_inf, p_zero, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_zero, "inf", Status::OK, Category::Infinity),
+ (m_inf, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_inf, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_inf, p_normal_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_normal_value, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_largest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_largest_value, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_smallest_value, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_smallest_value, "inf", Status::OK, Category::Infinity),
+ (m_inf, p_smallest_normalized, "-inf", Status::OK, Category::Infinity),
+ (m_inf, m_smallest_normalized, "inf", Status::OK, Category::Infinity),
+ (p_zero, p_inf, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (p_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (p_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (p_zero, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_zero, p_normal_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_normal_value, "-0x0p+0", Status::OK, Category::Zero),
+ (p_zero, p_largest_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_largest_value, "-0x0p+0", Status::OK, Category::Zero),
+ (p_zero, p_smallest_value, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_smallest_value, "-0x0p+0", Status::OK, Category::Zero),
+ (p_zero, p_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
+ (p_zero, m_smallest_normalized, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_inf, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (m_zero, m_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (m_zero, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_zero, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_zero, p_normal_value, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_normal_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_largest_value, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_largest_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_smallest_value, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_smallest_value, "0x0p+0", Status::OK, Category::Zero),
+ (m_zero, p_smallest_normalized, "-0x0p+0", Status::OK, Category::Zero),
+ (m_zero, m_smallest_normalized, "0x0p+0", Status::OK, Category::Zero),
+ (qnan, p_inf, "nan", Status::OK, Category::NaN),
+ (qnan, m_inf, "nan", Status::OK, Category::NaN),
+ (qnan, p_zero, "nan", Status::OK, Category::NaN),
+ (qnan, m_zero, "nan", Status::OK, Category::NaN),
+ (qnan, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (qnan, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (qnan, p_normal_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_normal_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_largest_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_largest_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_smallest_value, "nan", Status::OK, Category::NaN),
+ (qnan, m_smallest_value, "nan", Status::OK, Category::NaN),
+ (qnan, p_smallest_normalized, "nan", Status::OK, Category::NaN),
+ (qnan, m_smallest_normalized, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (snan, p_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_inf, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_zero, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, qnan, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, snan, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_normal_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_normal_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_largest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_largest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_smallest_value, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, p_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
+ (snan, m_smallest_normalized, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_normal_value, p_inf, "0x0p+0", Status::OK, Category::Zero),
+ (p_normal_value, m_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (p_normal_value, p_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (p_normal_value, m_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (p_normal_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_normal_value, p_normal_value, "0x1p+0", Status::OK, Category::Normal),
+ (p_normal_value, m_normal_value, "-0x1p+0", Status::OK, Category::Normal),
+ (p_normal_value, p_largest_value, "0x1p-128", underflow_status, Category::Normal),
+ (p_normal_value, m_largest_value, "-0x1p-128", underflow_status, Category::Normal),
+ (p_normal_value, p_smallest_value, "inf", overflow_status, Category::Infinity),
+ (p_normal_value, m_smallest_value, "-inf", overflow_status, Category::Infinity),
+ (p_normal_value, p_smallest_normalized, "0x1p+126", Status::OK, Category::Normal),
+ (p_normal_value, m_smallest_normalized, "-0x1p+126", Status::OK, Category::Normal),
+ (m_normal_value, p_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (m_normal_value, m_inf, "0x0p+0", Status::OK, Category::Zero),
+ (m_normal_value, p_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (m_normal_value, m_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (m_normal_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_normal_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_normal_value, p_normal_value, "-0x1p+0", Status::OK, Category::Normal),
+ (m_normal_value, m_normal_value, "0x1p+0", Status::OK, Category::Normal),
+ (m_normal_value, p_largest_value, "-0x1p-128", underflow_status, Category::Normal),
+ (m_normal_value, m_largest_value, "0x1p-128", underflow_status, Category::Normal),
+ (m_normal_value, p_smallest_value, "-inf", overflow_status, Category::Infinity),
+ (m_normal_value, m_smallest_value, "inf", overflow_status, Category::Infinity),
+ (m_normal_value, p_smallest_normalized, "-0x1p+126", Status::OK, Category::Normal),
+ (m_normal_value, m_smallest_normalized, "0x1p+126", Status::OK, Category::Normal),
+ (p_largest_value, p_inf, "0x0p+0", Status::OK, Category::Zero),
+ (p_largest_value, m_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (p_largest_value, p_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (p_largest_value, m_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (p_largest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_largest_value, p_normal_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_largest_value, m_normal_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (p_largest_value, p_largest_value, "0x1p+0", Status::OK, Category::Normal),
+ (p_largest_value, m_largest_value, "-0x1p+0", Status::OK, Category::Normal),
+ (p_largest_value, p_smallest_value, "inf", overflow_status, Category::Infinity),
+ (p_largest_value, m_smallest_value, "-inf", overflow_status, Category::Infinity),
+ (p_largest_value, p_smallest_normalized, "inf", overflow_status, Category::Infinity),
+ (p_largest_value, m_smallest_normalized, "-inf", overflow_status, Category::Infinity),
+ (m_largest_value, p_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (m_largest_value, m_inf, "0x0p+0", Status::OK, Category::Zero),
+ (m_largest_value, p_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (m_largest_value, m_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (m_largest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_largest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_largest_value, p_normal_value, "-0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_largest_value, m_normal_value, "0x1.fffffep+127", Status::OK, Category::Normal),
+ (m_largest_value, p_largest_value, "-0x1p+0", Status::OK, Category::Normal),
+ (m_largest_value, m_largest_value, "0x1p+0", Status::OK, Category::Normal),
+ (m_largest_value, p_smallest_value, "-inf", overflow_status, Category::Infinity),
+ (m_largest_value, m_smallest_value, "inf", overflow_status, Category::Infinity),
+ (m_largest_value, p_smallest_normalized, "-inf", overflow_status, Category::Infinity),
+ (m_largest_value, m_smallest_normalized, "inf", overflow_status, Category::Infinity),
+ (p_smallest_value, p_inf, "0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_value, m_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_value, p_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (p_smallest_value, m_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (p_smallest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_smallest_value, p_normal_value, "0x1p-149", Status::OK, Category::Normal),
+ (p_smallest_value, m_normal_value, "-0x1p-149", Status::OK, Category::Normal),
+ (p_smallest_value, p_largest_value, "0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_value, m_largest_value, "-0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_value, p_smallest_value, "0x1p+0", Status::OK, Category::Normal),
+ (p_smallest_value, m_smallest_value, "-0x1p+0", Status::OK, Category::Normal),
+ (p_smallest_value, p_smallest_normalized, "0x1p-23", Status::OK, Category::Normal),
+ (p_smallest_value, m_smallest_normalized, "-0x1p-23", Status::OK, Category::Normal),
+ (m_smallest_value, p_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_value, m_inf, "0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_value, p_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (m_smallest_value, m_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (m_smallest_value, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_smallest_value, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_smallest_value, p_normal_value, "-0x1p-149", Status::OK, Category::Normal),
+ (m_smallest_value, m_normal_value, "0x1p-149", Status::OK, Category::Normal),
+ (m_smallest_value, p_largest_value, "-0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_value, m_largest_value, "0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_value, p_smallest_value, "-0x1p+0", Status::OK, Category::Normal),
+ (m_smallest_value, m_smallest_value, "0x1p+0", Status::OK, Category::Normal),
+ (m_smallest_value, p_smallest_normalized, "-0x1p-23", Status::OK, Category::Normal),
+ (m_smallest_value, m_smallest_normalized, "0x1p-23", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_inf, "0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_normalized, m_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (p_smallest_normalized, p_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (p_smallest_normalized, m_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (p_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (p_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (p_smallest_normalized, p_normal_value, "0x1p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_normal_value, "-0x1p-126", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_largest_value, "0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_normalized, m_largest_value, "-0x0p+0", underflow_status, Category::Zero),
+ (p_smallest_normalized, p_smallest_value, "0x1p+23", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_smallest_value, "-0x1p+23", Status::OK, Category::Normal),
+ (p_smallest_normalized, p_smallest_normalized, "0x1p+0", Status::OK, Category::Normal),
+ (p_smallest_normalized, m_smallest_normalized, "-0x1p+0", Status::OK, Category::Normal),
+ (m_smallest_normalized, p_inf, "-0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_normalized, m_inf, "0x0p+0", Status::OK, Category::Zero),
+ (m_smallest_normalized, p_zero, "-inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (m_smallest_normalized, m_zero, "inf", Status::DIV_BY_ZERO, Category::Infinity),
+ (m_smallest_normalized, qnan, "nan", Status::OK, Category::NaN),
+ /*
+ // See Note 1.
+ (m_smallest_normalized, snan, "nan", Status::INVALID_OP, Category::NaN),
+ */
+ (m_smallest_normalized, p_normal_value, "-0x1p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_normal_value, "0x1p-126", Status::OK, Category::Normal),
+ (m_smallest_normalized, p_largest_value, "-0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_normalized, m_largest_value, "0x0p+0", underflow_status, Category::Zero),
+ (m_smallest_normalized, p_smallest_value, "-0x1p+23", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_smallest_value, "0x1p+23", Status::OK, Category::Normal),
+ (m_smallest_normalized, p_smallest_normalized, "-0x1p+0", Status::OK, Category::Normal),
+ (m_smallest_normalized, m_smallest_normalized, "0x1p+0", Status::OK, Category::Normal),
+ ];
+
+ for (x, y, e_result, e_status, e_category) in special_cases {
+ let status;
+ let result = unpack!(status=, x / y);
+ assert_eq!(status, e_status);
+ assert_eq!(result.category(), e_category);
+ assert!(result.bitwise_eq(e_result.parse::<Single>().unwrap()));
+ }
+}
+
+#[test]
+fn operator_overloads() {
+ // This is mostly testing that these operator overloads compile.
+ let one = "0x1p+0".parse::<Single>().unwrap();
+ let two = "0x2p+0".parse::<Single>().unwrap();
+ assert!(two.bitwise_eq((one + one).value));
+ assert!(one.bitwise_eq((two - one).value));
+ assert!(two.bitwise_eq((one * two).value));
+ assert!(one.bitwise_eq((two / two).value));
+}
+
+#[test]
+fn abs() {
+ let p_inf = Single::INFINITY;
+ let m_inf = -Single::INFINITY;
+ let p_zero = Single::ZERO;
+ let m_zero = -Single::ZERO;
+ let p_qnan = Single::NAN;
+ let m_qnan = -Single::NAN;
+ let p_snan = Single::snan(None);
+ let m_snan = -Single::snan(None);
+ let p_normal_value = "0x1p+0".parse::<Single>().unwrap();
+ let m_normal_value = "-0x1p+0".parse::<Single>().unwrap();
+ let p_largest_value = Single::largest();
+ let m_largest_value = -Single::largest();
+ let p_smallest_value = Single::SMALLEST;
+ let m_smallest_value = -Single::SMALLEST;
+ let p_smallest_normalized = Single::smallest_normalized();
+ let m_smallest_normalized = -Single::smallest_normalized();
+
+ assert!(p_inf.bitwise_eq(p_inf.abs()));
+ assert!(p_inf.bitwise_eq(m_inf.abs()));
+ assert!(p_zero.bitwise_eq(p_zero.abs()));
+ assert!(p_zero.bitwise_eq(m_zero.abs()));
+ assert!(p_qnan.bitwise_eq(p_qnan.abs()));
+ assert!(p_qnan.bitwise_eq(m_qnan.abs()));
+ assert!(p_snan.bitwise_eq(p_snan.abs()));
+ assert!(p_snan.bitwise_eq(m_snan.abs()));
+ assert!(p_normal_value.bitwise_eq(p_normal_value.abs()));
+ assert!(p_normal_value.bitwise_eq(m_normal_value.abs()));
+ assert!(p_largest_value.bitwise_eq(p_largest_value.abs()));
+ assert!(p_largest_value.bitwise_eq(m_largest_value.abs()));
+ assert!(p_smallest_value.bitwise_eq(p_smallest_value.abs()));
+ assert!(p_smallest_value.bitwise_eq(m_smallest_value.abs()));
+ assert!(p_smallest_normalized.bitwise_eq(p_smallest_normalized.abs(),));
+ assert!(p_smallest_normalized.bitwise_eq(m_smallest_normalized.abs(),));
+}
+
+#[test]
+fn neg() {
+ let one = "1.0".parse::<Single>().unwrap();
+ let neg_one = "-1.0".parse::<Single>().unwrap();
+ let zero = Single::ZERO;
+ let neg_zero = -Single::ZERO;
+ let inf = Single::INFINITY;
+ let neg_inf = -Single::INFINITY;
+ let qnan = Single::NAN;
+ let neg_qnan = -Single::NAN;
+
+ assert!(neg_one.bitwise_eq(-one));
+ assert!(one.bitwise_eq(-neg_one));
+ assert!(neg_zero.bitwise_eq(-zero));
+ assert!(zero.bitwise_eq(-neg_zero));
+ assert!(neg_inf.bitwise_eq(-inf));
+ assert!(inf.bitwise_eq(-neg_inf));
+ assert!(neg_inf.bitwise_eq(-inf));
+ assert!(inf.bitwise_eq(-neg_inf));
+ assert!(neg_qnan.bitwise_eq(-qnan));
+ assert!(qnan.bitwise_eq(-neg_qnan));
+}
+
+#[test]
+fn ilogb() {
+ assert_eq!(-1074, Double::SMALLEST.ilogb());
+ assert_eq!(-1074, (-Double::SMALLEST).ilogb());
+ assert_eq!(-1023, "0x1.ffffffffffffep-1024".parse::<Double>().unwrap().ilogb());
+ assert_eq!(-1023, "0x1.ffffffffffffep-1023".parse::<Double>().unwrap().ilogb());
+ assert_eq!(-1023, "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap().ilogb());
+ assert_eq!(-51, "0x1p-51".parse::<Double>().unwrap().ilogb());
+ assert_eq!(-1023, "0x1.c60f120d9f87cp-1023".parse::<Double>().unwrap().ilogb());
+ assert_eq!(-2, "0x0.ffffp-1".parse::<Double>().unwrap().ilogb());
+ assert_eq!(-1023, "0x1.fffep-1023".parse::<Double>().unwrap().ilogb());
+ assert_eq!(1023, Double::largest().ilogb());
+ assert_eq!(1023, (-Double::largest()).ilogb());
+
+ assert_eq!(0, "0x1p+0".parse::<Single>().unwrap().ilogb());
+ assert_eq!(0, "-0x1p+0".parse::<Single>().unwrap().ilogb());
+ assert_eq!(42, "0x1p+42".parse::<Single>().unwrap().ilogb());
+ assert_eq!(-42, "0x1p-42".parse::<Single>().unwrap().ilogb());
+
+ assert_eq!(IEK_INF, Single::INFINITY.ilogb());
+ assert_eq!(IEK_INF, (-Single::INFINITY).ilogb());
+ assert_eq!(IEK_ZERO, Single::ZERO.ilogb());
+ assert_eq!(IEK_ZERO, (-Single::ZERO).ilogb());
+ assert_eq!(IEK_NAN, Single::NAN.ilogb());
+ assert_eq!(IEK_NAN, Single::snan(None).ilogb());
+
+ assert_eq!(127, Single::largest().ilogb());
+ assert_eq!(127, (-Single::largest()).ilogb());
+
+ assert_eq!(-149, Single::SMALLEST.ilogb());
+ assert_eq!(-149, (-Single::SMALLEST).ilogb());
+ assert_eq!(-126, Single::smallest_normalized().ilogb());
+ assert_eq!(-126, (-Single::smallest_normalized()).ilogb());
+}
+
+#[test]
+fn scalbn() {
+ assert!(
+ "0x1p+0"
+ .parse::<Single>()
+ .unwrap()
+ .bitwise_eq("0x1p+0".parse::<Single>().unwrap().scalbn(0),)
+ );
+ assert!(
+ "0x1p+42"
+ .parse::<Single>()
+ .unwrap()
+ .bitwise_eq("0x1p+0".parse::<Single>().unwrap().scalbn(42),)
+ );
+ assert!(
+ "0x1p-42"
+ .parse::<Single>()
+ .unwrap()
+ .bitwise_eq("0x1p+0".parse::<Single>().unwrap().scalbn(-42),)
+ );
+
+ let p_inf = Single::INFINITY;
+ let m_inf = -Single::INFINITY;
+ let p_zero = Single::ZERO;
+ let m_zero = -Single::ZERO;
+ let p_qnan = Single::NAN;
+ let m_qnan = -Single::NAN;
+ let snan = Single::snan(None);
+
+ assert!(p_inf.bitwise_eq(p_inf.scalbn(0)));
+ assert!(m_inf.bitwise_eq(m_inf.scalbn(0)));
+ assert!(p_zero.bitwise_eq(p_zero.scalbn(0)));
+ assert!(m_zero.bitwise_eq(m_zero.scalbn(0)));
+ assert!(p_qnan.bitwise_eq(p_qnan.scalbn(0)));
+ assert!(m_qnan.bitwise_eq(m_qnan.scalbn(0)));
+ assert!(!snan.scalbn(0).is_signaling());
+
+ let scalbn_snan = snan.scalbn(1);
+ assert!(scalbn_snan.is_nan() && !scalbn_snan.is_signaling());
+
+ // Make sure highest bit of payload is preserved.
+ let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1;
+
+ let snan_with_payload = Double::snan(Some(payload));
+ let quiet_payload = snan_with_payload.scalbn(1);
+ assert!(quiet_payload.is_nan() && !quiet_payload.is_signaling());
+ assert_eq!(payload, quiet_payload.to_bits() & ((1 << 51) - 1));
+
+ assert!(p_inf.bitwise_eq("0x1p+0".parse::<Single>().unwrap().scalbn(128),));
+ assert!(m_inf.bitwise_eq("-0x1p+0".parse::<Single>().unwrap().scalbn(128),));
+ assert!(p_inf.bitwise_eq("0x1p+127".parse::<Single>().unwrap().scalbn(1),));
+ assert!(p_zero.bitwise_eq("0x1p-127".parse::<Single>().unwrap().scalbn(-127),));
+ assert!(m_zero.bitwise_eq("-0x1p-127".parse::<Single>().unwrap().scalbn(-127),));
+ assert!(
+ "-0x1p-149"
+ .parse::<Single>()
+ .unwrap()
+ .bitwise_eq("-0x1p-127".parse::<Single>().unwrap().scalbn(-22),)
+ );
+ assert!(p_zero.bitwise_eq("0x1p-126".parse::<Single>().unwrap().scalbn(-24),));
+
+ let smallest_f64 = Double::SMALLEST;
+ let neg_smallest_f64 = -Double::SMALLEST;
+
+ let largest_f64 = Double::largest();
+ let neg_largest_f64 = -Double::largest();
+
+ let largest_denormal_f64 = "0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
+ let neg_largest_denormal_f64 = "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
+
+ assert!(smallest_f64.bitwise_eq("0x1p-1074".parse::<Double>().unwrap().scalbn(0),));
+ assert!(neg_smallest_f64.bitwise_eq("-0x1p-1074".parse::<Double>().unwrap().scalbn(0),));
+
+ assert!("0x1p+1023".parse::<Double>().unwrap().bitwise_eq(smallest_f64.scalbn(2097,),));
+
+ assert!(smallest_f64.scalbn(-2097).is_pos_zero());
+ assert!(smallest_f64.scalbn(-2098).is_pos_zero());
+ assert!(smallest_f64.scalbn(-2099).is_pos_zero());
+ assert!("0x1p+1022".parse::<Double>().unwrap().bitwise_eq(smallest_f64.scalbn(2096,),));
+ assert!("0x1p+1023".parse::<Double>().unwrap().bitwise_eq(smallest_f64.scalbn(2097,),));
+ assert!(smallest_f64.scalbn(2098).is_infinite());
+ assert!(smallest_f64.scalbn(2099).is_infinite());
+
+ // Test for integer overflows when adding to exponent.
+ assert!(smallest_f64.scalbn(-ExpInt::MAX).is_pos_zero());
+ assert!(largest_f64.scalbn(ExpInt::MAX).is_infinite());
+
+ assert!(largest_denormal_f64.bitwise_eq(largest_denormal_f64.scalbn(0),));
+ assert!(neg_largest_denormal_f64.bitwise_eq(neg_largest_denormal_f64.scalbn(0),));
+
+ assert!(
+ "0x1.ffffffffffffep-1022"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(largest_denormal_f64.scalbn(1))
+ );
+ assert!(
+ "-0x1.ffffffffffffep-1021"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(neg_largest_denormal_f64.scalbn(2))
+ );
+
+ assert!(
+ "0x1.ffffffffffffep+1"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(largest_denormal_f64.scalbn(1024))
+ );
+ assert!(largest_denormal_f64.scalbn(-1023).is_pos_zero());
+ assert!(largest_denormal_f64.scalbn(-1024).is_pos_zero());
+ assert!(largest_denormal_f64.scalbn(-2048).is_pos_zero());
+ assert!(largest_denormal_f64.scalbn(2047).is_infinite());
+ assert!(largest_denormal_f64.scalbn(2098).is_infinite());
+ assert!(largest_denormal_f64.scalbn(2099).is_infinite());
+
+ assert!(
+ "0x1.ffffffffffffep-2"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(largest_denormal_f64.scalbn(1021))
+ );
+ assert!(
+ "0x1.ffffffffffffep-1"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(largest_denormal_f64.scalbn(1022))
+ );
+ assert!(
+ "0x1.ffffffffffffep+0"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(largest_denormal_f64.scalbn(1023))
+ );
+ assert!(
+ "0x1.ffffffffffffep+1023"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(largest_denormal_f64.scalbn(2046))
+ );
+ assert!("0x1p+974".parse::<Double>().unwrap().bitwise_eq(smallest_f64.scalbn(2048,),));
+
+ let random_denormal_f64 = "0x1.c60f120d9f87cp+51".parse::<Double>().unwrap();
+ assert!(
+ "0x1.c60f120d9f87cp-972"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(random_denormal_f64.scalbn(-1023))
+ );
+ assert!(
+ "0x1.c60f120d9f87cp-1"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(random_denormal_f64.scalbn(-52))
+ );
+ assert!(
+ "0x1.c60f120d9f87cp-2"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(random_denormal_f64.scalbn(-53))
+ );
+ assert!(
+ "0x1.c60f120d9f87cp+0"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq(random_denormal_f64.scalbn(-51))
+ );
+
+ assert!(random_denormal_f64.scalbn(-2097).is_pos_zero());
+ assert!(random_denormal_f64.scalbn(-2090).is_pos_zero());
+
+ assert!("-0x1p-1073".parse::<Double>().unwrap().bitwise_eq(neg_largest_f64.scalbn(-2097),));
+
+ assert!("-0x1p-1024".parse::<Double>().unwrap().bitwise_eq(neg_largest_f64.scalbn(-2048),));
+
+ assert!("0x1p-1073".parse::<Double>().unwrap().bitwise_eq(largest_f64.scalbn(-2097,),));
+
+ assert!("0x1p-1074".parse::<Double>().unwrap().bitwise_eq(largest_f64.scalbn(-2098,),));
+ assert!("-0x1p-1074".parse::<Double>().unwrap().bitwise_eq(neg_largest_f64.scalbn(-2098),));
+ assert!(neg_largest_f64.scalbn(-2099).is_neg_zero());
+ assert!(largest_f64.scalbn(1).is_infinite());
+
+ assert!(
+ "0x1p+0"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq("0x1p+52".parse::<Double>().unwrap().scalbn(-52),)
+ );
+
+ assert!(
+ "0x1p-103"
+ .parse::<Double>()
+ .unwrap()
+ .bitwise_eq("0x1p-51".parse::<Double>().unwrap().scalbn(-52),)
+ );
+}
+
+#[test]
+fn frexp() {
+ let p_zero = Double::ZERO;
+ let m_zero = -Double::ZERO;
+ let one = Double::from_f64(1.0);
+ let m_one = Double::from_f64(-1.0);
+
+ let largest_denormal = "0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
+ let neg_largest_denormal = "-0x1.ffffffffffffep-1023".parse::<Double>().unwrap();
+
+ let smallest = Double::SMALLEST;
+ let neg_smallest = -Double::SMALLEST;
+
+ let largest = Double::largest();
+ let neg_largest = -Double::largest();
+
+ let p_inf = Double::INFINITY;
+ let m_inf = -Double::INFINITY;
+
+ let p_qnan = Double::NAN;
+ let m_qnan = -Double::NAN;
+ let snan = Double::snan(None);
+
+ // Make sure highest bit of payload is preserved.
+ let payload = (1 << 50) | (1 << 49) | (1234 << 32) | 1;
+
+ let snan_with_payload = Double::snan(Some(payload));
+
+ let mut exp = 0;
+
+ let frac = p_zero.frexp(&mut exp);
+ assert_eq!(0, exp);
+ assert!(frac.is_pos_zero());
+
+ let frac = m_zero.frexp(&mut exp);
+ assert_eq!(0, exp);
+ assert!(frac.is_neg_zero());
+
+ let frac = one.frexp(&mut exp);
+ assert_eq!(1, exp);
+ assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = m_one.frexp(&mut exp);
+ assert_eq!(1, exp);
+ assert!("-0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = largest_denormal.frexp(&mut exp);
+ assert_eq!(-1022, exp);
+ assert!("0x1.ffffffffffffep-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = neg_largest_denormal.frexp(&mut exp);
+ assert_eq!(-1022, exp);
+ assert!("-0x1.ffffffffffffep-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = smallest.frexp(&mut exp);
+ assert_eq!(-1073, exp);
+ assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = neg_smallest.frexp(&mut exp);
+ assert_eq!(-1073, exp);
+ assert!("-0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = largest.frexp(&mut exp);
+ assert_eq!(1024, exp);
+ assert!("0x1.fffffffffffffp-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = neg_largest.frexp(&mut exp);
+ assert_eq!(1024, exp);
+ assert!("-0x1.fffffffffffffp-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = p_inf.frexp(&mut exp);
+ assert_eq!(IEK_INF, exp);
+ assert!(frac.is_infinite() && !frac.is_negative());
+
+ let frac = m_inf.frexp(&mut exp);
+ assert_eq!(IEK_INF, exp);
+ assert!(frac.is_infinite() && frac.is_negative());
+
+ let frac = p_qnan.frexp(&mut exp);
+ assert_eq!(IEK_NAN, exp);
+ assert!(frac.is_nan());
+
+ let frac = m_qnan.frexp(&mut exp);
+ assert_eq!(IEK_NAN, exp);
+ assert!(frac.is_nan());
+
+ let frac = snan.frexp(&mut exp);
+ assert_eq!(IEK_NAN, exp);
+ assert!(frac.is_nan() && !frac.is_signaling());
+
+ let frac = snan_with_payload.frexp(&mut exp);
+ assert_eq!(IEK_NAN, exp);
+ assert!(frac.is_nan() && !frac.is_signaling());
+ assert_eq!(payload, frac.to_bits() & ((1 << 51) - 1));
+
+ let frac = "0x0.ffffp-1".parse::<Double>().unwrap().frexp(&mut exp);
+ assert_eq!(-1, exp);
+ assert!("0x1.fffep-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = "0x1p-51".parse::<Double>().unwrap().frexp(&mut exp);
+ assert_eq!(-50, exp);
+ assert!("0x1p-1".parse::<Double>().unwrap().bitwise_eq(frac));
+
+ let frac = "0x1.c60f120d9f87cp+51".parse::<Double>().unwrap().frexp(&mut exp);
+ assert_eq!(52, exp);
+ assert!("0x1.c60f120d9f87cp-1".parse::<Double>().unwrap().bitwise_eq(frac));
+}
+
+#[test]
+fn modulo() {
+ let mut status;
+ {
+ let f1 = "1.5".parse::<Double>().unwrap();
+ let f2 = "1.0".parse::<Double>().unwrap();
+ let expected = "0.5".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
+ assert_eq!(status, Status::OK);
+ }
+ {
+ let f1 = "0.5".parse::<Double>().unwrap();
+ let f2 = "1.0".parse::<Double>().unwrap();
+ let expected = "0.5".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
+ assert_eq!(status, Status::OK);
+ }
+ {
+ let f1 = "0x1.3333333333333p-2".parse::<Double>().unwrap(); // 0.3
+ let f2 = "0x1.47ae147ae147bp-7".parse::<Double>().unwrap(); // 0.01
+ // 0.009999999999999983
+ let expected = "0x1.47ae147ae1471p-7".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
+ assert_eq!(status, Status::OK);
+ }
+ {
+ let f1 = "0x1p64".parse::<Double>().unwrap(); // 1.8446744073709552e19
+ let f2 = "1.5".parse::<Double>().unwrap();
+ let expected = "1.0".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
+ assert_eq!(status, Status::OK);
+ }
+ {
+ let f1 = "0x1p1000".parse::<Double>().unwrap();
+ let f2 = "0x1p-1000".parse::<Double>().unwrap();
+ let expected = "0.0".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
+ assert_eq!(status, Status::OK);
+ }
+ {
+ let f1 = "0.0".parse::<Double>().unwrap();
+ let f2 = "1.0".parse::<Double>().unwrap();
+ let expected = "0.0".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).bitwise_eq(expected));
+ assert_eq!(status, Status::OK);
+ }
+ {
+ let f1 = "1.0".parse::<Double>().unwrap();
+ let f2 = "0.0".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).is_nan());
+ assert_eq!(status, Status::INVALID_OP);
+ }
+ {
+ let f1 = "0.0".parse::<Double>().unwrap();
+ let f2 = "0.0".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).is_nan());
+ assert_eq!(status, Status::INVALID_OP);
+ }
+ {
+ let f1 = Double::INFINITY;
+ let f2 = "1.0".parse::<Double>().unwrap();
+ assert!(unpack!(status=, f1 % f2).is_nan());
+ assert_eq!(status, Status::INVALID_OP);
+ }
+}
diff --git a/compiler/rustc_apfloat/tests/ppc.rs b/compiler/rustc_apfloat/tests/ppc.rs
new file mode 100644
index 000000000..c769d2654
--- /dev/null
+++ b/compiler/rustc_apfloat/tests/ppc.rs
@@ -0,0 +1,530 @@
+use rustc_apfloat::ppc::DoubleDouble;
+use rustc_apfloat::{Category, Float, Round};
+
+use std::cmp::Ordering;
+
+#[test]
+fn ppc_double_double() {
+ let test = DoubleDouble::ZERO;
+ let expected = "0x0p+0".parse::<DoubleDouble>().unwrap();
+ assert!(test.is_zero());
+ assert!(!test.is_negative());
+ assert!(test.bitwise_eq(expected));
+ assert_eq!(0, test.to_bits());
+
+ let test = -DoubleDouble::ZERO;
+ let expected = "-0x0p+0".parse::<DoubleDouble>().unwrap();
+ assert!(test.is_zero());
+ assert!(test.is_negative());
+ assert!(test.bitwise_eq(expected));
+ assert_eq!(0x8000000000000000, test.to_bits());
+
+ let test = "1.0".parse::<DoubleDouble>().unwrap();
+ assert_eq!(0x3ff0000000000000, test.to_bits());
+
+ // LDBL_MAX
+ let test = "1.79769313486231580793728971405301e+308".parse::<DoubleDouble>().unwrap();
+ assert_eq!(0x7c8ffffffffffffe_7fefffffffffffff, test.to_bits());
+
+ // LDBL_MIN
+ let test = "2.00416836000897277799610805135016e-292".parse::<DoubleDouble>().unwrap();
+ assert_eq!(0x0000000000000000_0360000000000000, test.to_bits());
+}
+
+#[test]
+fn ppc_double_double_add_special() {
+ let data = [
+ // (1 + 0) + (-1 + 0) = Category::Zero
+ (0x3ff0000000000000, 0xbff0000000000000, Category::Zero, Round::NearestTiesToEven),
+ // LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity
+ (
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ 0x7948000000000000,
+ Category::Infinity,
+ Round::NearestTiesToEven,
+ ),
+ // FIXME: change the 4th 0x75effffffffffffe to 0x75efffffffffffff when
+ // DoubleDouble's fallback is gone.
+ // LDBL_MAX + (1.011111... >> (1023 - 106) + (1.1111111...0 >> (1023 -
+ // 160))) = Category::Normal
+ (
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ 0x75effffffffffffe_7947ffffffffffff,
+ Category::Normal,
+ Round::NearestTiesToEven,
+ ),
+ // LDBL_MAX + (1.1 >> (1023 - 106) + 0)) = Category::Infinity
+ (
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ Category::Infinity,
+ Round::NearestTiesToEven,
+ ),
+ // NaN + (1 + 0) = Category::NaN
+ (0x7ff8000000000000, 0x3ff0000000000000, Category::NaN, Round::NearestTiesToEven),
+ ];
+
+ for (op1, op2, expected, round) in data {
+ {
+ let mut a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ a1 = a1.add_r(a2, round).value;
+
+ assert_eq!(expected, a1.category(), "{:#x} + {:#x}", op1, op2);
+ }
+ {
+ let a1 = DoubleDouble::from_bits(op1);
+ let mut a2 = DoubleDouble::from_bits(op2);
+ a2 = a2.add_r(a1, round).value;
+
+ assert_eq!(expected, a2.category(), "{:#x} + {:#x}", op2, op1);
+ }
+ }
+}
+
+#[test]
+fn ppc_double_double_add() {
+ let data = [
+ // (1 + 0) + (1e-105 + 0) = (1 + 1e-105)
+ (
+ 0x3ff0000000000000,
+ 0x3960000000000000,
+ 0x3960000000000000_3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // (1 + 0) + (1e-106 + 0) = (1 + 1e-106)
+ (
+ 0x3ff0000000000000,
+ 0x3950000000000000,
+ 0x3950000000000000_3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // (1 + 1e-106) + (1e-106 + 0) = (1 + 1e-105)
+ (
+ 0x3950000000000000_3ff0000000000000,
+ 0x3950000000000000,
+ 0x3960000000000000_3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // (1 + 0) + (epsilon + 0) = (1 + epsilon)
+ (
+ 0x3ff0000000000000,
+ 0x0000000000000001,
+ 0x0000000000000001_3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // FIXME: change 0xf950000000000000 to 0xf940000000000000, when
+ // DoubleDouble's fallback is gone.
+ // (DBL_MAX - 1 << (1023 - 105)) + (1 << (1023 - 53) + 0) = DBL_MAX +
+ // 1.11111... << (1023 - 52)
+ (
+ 0xf950000000000000_7fefffffffffffff,
+ 0x7c90000000000000,
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ Round::NearestTiesToEven,
+ ),
+ // FIXME: change 0xf950000000000000 to 0xf940000000000000, when
+ // DoubleDouble's fallback is gone.
+ // (1 << (1023 - 53) + 0) + (DBL_MAX - 1 << (1023 - 105)) = DBL_MAX +
+ // 1.11111... << (1023 - 52)
+ (
+ 0x7c90000000000000,
+ 0xf950000000000000_7fefffffffffffff,
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ Round::NearestTiesToEven,
+ ),
+ ];
+
+ for (op1, op2, expected, round) in data {
+ {
+ let mut a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ a1 = a1.add_r(a2, round).value;
+
+ assert_eq!(expected, a1.to_bits(), "{:#x} + {:#x}", op1, op2);
+ }
+ {
+ let a1 = DoubleDouble::from_bits(op1);
+ let mut a2 = DoubleDouble::from_bits(op2);
+ a2 = a2.add_r(a1, round).value;
+
+ assert_eq!(expected, a2.to_bits(), "{:#x} + {:#x}", op2, op1);
+ }
+ }
+}
+
+#[test]
+fn ppc_double_double_subtract() {
+ let data = [
+ // (1 + 0) - (-1e-105 + 0) = (1 + 1e-105)
+ (
+ 0x3ff0000000000000,
+ 0xb960000000000000,
+ 0x3960000000000000_3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // (1 + 0) - (-1e-106 + 0) = (1 + 1e-106)
+ (
+ 0x3ff0000000000000,
+ 0xb950000000000000,
+ 0x3950000000000000_3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ ];
+
+ for (op1, op2, expected, round) in data {
+ let mut a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ a1 = a1.sub_r(a2, round).value;
+
+ assert_eq!(expected, a1.to_bits(), "{:#x} - {:#x}", op1, op2);
+ }
+}
+
+#[test]
+fn ppc_double_double_multiply_special() {
+ let data = [
+ // Category::NaN * Category::NaN = Category::NaN
+ (0x7ff8000000000000, 0x7ff8000000000000, Category::NaN, Round::NearestTiesToEven),
+ // Category::NaN * Category::Zero = Category::NaN
+ (0x7ff8000000000000, 0, Category::NaN, Round::NearestTiesToEven),
+ // Category::NaN * Category::Infinity = Category::NaN
+ (0x7ff8000000000000, 0x7ff0000000000000, Category::NaN, Round::NearestTiesToEven),
+ // Category::NaN * Category::Normal = Category::NaN
+ (0x7ff8000000000000, 0x3ff0000000000000, Category::NaN, Round::NearestTiesToEven),
+ // Category::Infinity * Category::Infinity = Category::Infinity
+ (0x7ff0000000000000, 0x7ff0000000000000, Category::Infinity, Round::NearestTiesToEven),
+ // Category::Infinity * Category::Zero = Category::NaN
+ (0x7ff0000000000000, 0, Category::NaN, Round::NearestTiesToEven),
+ // Category::Infinity * Category::Normal = Category::Infinity
+ (0x7ff0000000000000, 0x3ff0000000000000, Category::Infinity, Round::NearestTiesToEven),
+ // Category::Zero * Category::Zero = Category::Zero
+ (0, 0, Category::Zero, Round::NearestTiesToEven),
+ // Category::Zero * Category::Normal = Category::Zero
+ (0, 0x3ff0000000000000, Category::Zero, Round::NearestTiesToEven),
+ ];
+
+ for (op1, op2, expected, round) in data {
+ {
+ let mut a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ a1 = a1.mul_r(a2, round).value;
+
+ assert_eq!(expected, a1.category(), "{:#x} * {:#x}", op1, op2);
+ }
+ {
+ let a1 = DoubleDouble::from_bits(op1);
+ let mut a2 = DoubleDouble::from_bits(op2);
+ a2 = a2.mul_r(a1, round).value;
+
+ assert_eq!(expected, a2.category(), "{:#x} * {:#x}", op2, op1);
+ }
+ }
+}
+
+#[test]
+fn ppc_double_double_multiply() {
+ let data = [
+ // 1/3 * 3 = 1.0
+ (
+ 0x3c75555555555556_3fd5555555555555,
+ 0x4008000000000000,
+ 0x3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // (1 + epsilon) * (1 + 0) = Category::Zero
+ (
+ 0x0000000000000001_3ff0000000000000,
+ 0x3ff0000000000000,
+ 0x0000000000000001_3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // (1 + epsilon) * (1 + epsilon) = 1 + 2 * epsilon
+ (
+ 0x0000000000000001_3ff0000000000000,
+ 0x0000000000000001_3ff0000000000000,
+ 0x0000000000000002_3ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // -(1 + epsilon) * (1 + epsilon) = -1
+ (
+ 0x0000000000000001_bff0000000000000,
+ 0x0000000000000001_3ff0000000000000,
+ 0xbff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // (0.5 + 0) * (1 + 2 * epsilon) = 0.5 + epsilon
+ (
+ 0x3fe0000000000000,
+ 0x0000000000000002_3ff0000000000000,
+ 0x0000000000000001_3fe0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // (0.5 + 0) * (1 + epsilon) = 0.5
+ (
+ 0x3fe0000000000000,
+ 0x0000000000000001_3ff0000000000000,
+ 0x3fe0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // __LDBL_MAX__ * (1 + 1 << 106) = inf
+ (
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ 0x3950000000000000_3ff0000000000000,
+ 0x7ff0000000000000,
+ Round::NearestTiesToEven,
+ ),
+ // __LDBL_MAX__ * (1 + 1 << 107) > __LDBL_MAX__, but not inf, yes =_=|||
+ (
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ 0x3940000000000000_3ff0000000000000,
+ 0x7c8fffffffffffff_7fefffffffffffff,
+ Round::NearestTiesToEven,
+ ),
+ // __LDBL_MAX__ * (1 + 1 << 108) = __LDBL_MAX__
+ (
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ 0x3930000000000000_3ff0000000000000,
+ 0x7c8ffffffffffffe_7fefffffffffffff,
+ Round::NearestTiesToEven,
+ ),
+ ];
+
+ for (op1, op2, expected, round) in data {
+ {
+ let mut a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ a1 = a1.mul_r(a2, round).value;
+
+ assert_eq!(expected, a1.to_bits(), "{:#x} * {:#x}", op1, op2);
+ }
+ {
+ let a1 = DoubleDouble::from_bits(op1);
+ let mut a2 = DoubleDouble::from_bits(op2);
+ a2 = a2.mul_r(a1, round).value;
+
+ assert_eq!(expected, a2.to_bits(), "{:#x} * {:#x}", op2, op1);
+ }
+ }
+}
+
+#[test]
+fn ppc_double_double_divide() {
+ // FIXME: Only a sanity check for now. Add more edge cases when the
+ // double-double algorithm is implemented.
+ let data = [
+ // 1 / 3 = 1/3
+ (
+ 0x3ff0000000000000,
+ 0x4008000000000000,
+ 0x3c75555555555556_3fd5555555555555,
+ Round::NearestTiesToEven,
+ ),
+ ];
+
+ for (op1, op2, expected, round) in data {
+ let mut a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ a1 = a1.div_r(a2, round).value;
+
+ assert_eq!(expected, a1.to_bits(), "{:#x} / {:#x}", op1, op2);
+ }
+}
+
+#[test]
+fn ppc_double_double_remainder() {
+ let data = [
+ // ieee_rem(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53)
+ (
+ 0x3cb8000000000000_4008000000000000,
+ 0x3ca4000000000000_3ff4000000000000,
+ 0x3c90000000000000_3fe0000000000000,
+ ),
+ // ieee_rem(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (-0.5 - 0.5 << 53)
+ (
+ 0x3cb8000000000000_4008000000000000,
+ 0x3cac000000000000_3ffc000000000000,
+ 0xbc90000000000000_bfe0000000000000,
+ ),
+ ];
+
+ for (op1, op2, expected) in data {
+ let a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ let result = a1.ieee_rem(a2).value;
+
+ assert_eq!(expected, result.to_bits(), "ieee_rem({:#x}, {:#x})", op1, op2);
+ }
+}
+
+#[test]
+fn ppc_double_double_mod() {
+ let data = [
+ // mod(3.0 + 3.0 << 53, 1.25 + 1.25 << 53) = (0.5 + 0.5 << 53)
+ (
+ 0x3cb8000000000000_4008000000000000,
+ 0x3ca4000000000000_3ff4000000000000,
+ 0x3c90000000000000_3fe0000000000000,
+ ),
+ // mod(3.0 + 3.0 << 53, 1.75 + 1.75 << 53) = (1.25 + 1.25 << 53)
+ // 0xbc98000000000000 doesn't seem right, but it's what we currently have.
+ // FIXME: investigate
+ (
+ 0x3cb8000000000000_4008000000000000,
+ 0x3cac000000000000_3ffc000000000000,
+ 0xbc98000000000000_3ff4000000000001,
+ ),
+ ];
+
+ for (op1, op2, expected) in data {
+ let a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ let r = (a1 % a2).value;
+
+ assert_eq!(expected, r.to_bits(), "fmod({:#x}, {:#x})", op1, op2);
+ }
+}
+
+#[test]
+fn ppc_double_double_fma() {
+ // Sanity check for now.
+ let mut a = "2".parse::<DoubleDouble>().unwrap();
+ a = a.mul_add("3".parse::<DoubleDouble>().unwrap(), "4".parse::<DoubleDouble>().unwrap()).value;
+ assert_eq!(Some(Ordering::Equal), "10".parse::<DoubleDouble>().unwrap().partial_cmp(&a));
+}
+
+#[test]
+fn ppc_double_double_round_to_integral() {
+ {
+ let a = "1.5".parse::<DoubleDouble>().unwrap();
+ let a = a.round_to_integral(Round::NearestTiesToEven).value;
+ assert_eq!(Some(Ordering::Equal), "2".parse::<DoubleDouble>().unwrap().partial_cmp(&a));
+ }
+ {
+ let a = "2.5".parse::<DoubleDouble>().unwrap();
+ let a = a.round_to_integral(Round::NearestTiesToEven).value;
+ assert_eq!(Some(Ordering::Equal), "2".parse::<DoubleDouble>().unwrap().partial_cmp(&a));
+ }
+}
+
+#[test]
+fn ppc_double_double_compare() {
+ let data = [
+ // (1 + 0) = (1 + 0)
+ (0x3ff0000000000000, 0x3ff0000000000000, Some(Ordering::Equal)),
+ // (1 + 0) < (1.00...1 + 0)
+ (0x3ff0000000000000, 0x3ff0000000000001, Some(Ordering::Less)),
+ // (1.00...1 + 0) > (1 + 0)
+ (0x3ff0000000000001, 0x3ff0000000000000, Some(Ordering::Greater)),
+ // (1 + 0) < (1 + epsilon)
+ (0x3ff0000000000000, 0x0000000000000001_3ff0000000000001, Some(Ordering::Less)),
+ // NaN != NaN
+ (0x7ff8000000000000, 0x7ff8000000000000, None),
+ // (1 + 0) != NaN
+ (0x3ff0000000000000, 0x7ff8000000000000, None),
+ // Inf = Inf
+ (0x7ff0000000000000, 0x7ff0000000000000, Some(Ordering::Equal)),
+ ];
+
+ for (op1, op2, expected) in data {
+ let a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ assert_eq!(expected, a1.partial_cmp(&a2), "compare({:#x}, {:#x})", op1, op2,);
+ }
+}
+
+#[test]
+fn ppc_double_double_bitwise_eq() {
+ let data = [
+ // (1 + 0) = (1 + 0)
+ (0x3ff0000000000000, 0x3ff0000000000000, true),
+ // (1 + 0) != (1.00...1 + 0)
+ (0x3ff0000000000000, 0x3ff0000000000001, false),
+ // NaN = NaN
+ (0x7ff8000000000000, 0x7ff8000000000000, true),
+ // NaN != NaN with a different bit pattern
+ (0x7ff8000000000000, 0x3ff0000000000000_7ff8000000000000, false),
+ // Inf = Inf
+ (0x7ff0000000000000, 0x7ff0000000000000, true),
+ ];
+
+ for (op1, op2, expected) in data {
+ let a1 = DoubleDouble::from_bits(op1);
+ let a2 = DoubleDouble::from_bits(op2);
+ assert_eq!(expected, a1.bitwise_eq(a2), "{:#x} = {:#x}", op1, op2);
+ }
+}
+
+#[test]
+fn ppc_double_double_change_sign() {
+ let float = DoubleDouble::from_bits(0xbcb0000000000000_400f000000000000);
+ {
+ let actual = float.copy_sign("1".parse::<DoubleDouble>().unwrap());
+ assert_eq!(0xbcb0000000000000_400f000000000000, actual.to_bits());
+ }
+ {
+ let actual = float.copy_sign("-1".parse::<DoubleDouble>().unwrap());
+ assert_eq!(0x3cb0000000000000_c00f000000000000, actual.to_bits());
+ }
+}
+
+#[test]
+fn ppc_double_double_factories() {
+ assert_eq!(0, DoubleDouble::ZERO.to_bits());
+ assert_eq!(0x7c8ffffffffffffe_7fefffffffffffff, DoubleDouble::largest().to_bits());
+ assert_eq!(0x0000000000000001, DoubleDouble::SMALLEST.to_bits());
+ assert_eq!(0x0360000000000000, DoubleDouble::smallest_normalized().to_bits());
+ assert_eq!(0x0000000000000000_8000000000000000, (-DoubleDouble::ZERO).to_bits());
+ assert_eq!(0xfc8ffffffffffffe_ffefffffffffffff, (-DoubleDouble::largest()).to_bits());
+ assert_eq!(0x0000000000000000_8000000000000001, (-DoubleDouble::SMALLEST).to_bits());
+ assert_eq!(
+ 0x0000000000000000_8360000000000000,
+ (-DoubleDouble::smallest_normalized()).to_bits()
+ );
+ assert!(DoubleDouble::SMALLEST.is_smallest());
+ assert!(DoubleDouble::largest().is_largest());
+}
+
+#[test]
+fn ppc_double_double_is_denormal() {
+ assert!(DoubleDouble::SMALLEST.is_denormal());
+ assert!(!DoubleDouble::largest().is_denormal());
+ assert!(!DoubleDouble::smallest_normalized().is_denormal());
+ {
+ // (4 + 3) is not normalized
+ let data = 0x4008000000000000_4010000000000000;
+ assert!(DoubleDouble::from_bits(data).is_denormal());
+ }
+}
+
+#[test]
+fn ppc_double_double_exact_inverse() {
+ assert!(
+ "2.0"
+ .parse::<DoubleDouble>()
+ .unwrap()
+ .get_exact_inverse()
+ .unwrap()
+ .bitwise_eq("0.5".parse::<DoubleDouble>().unwrap())
+ );
+}
+
+#[test]
+fn ppc_double_double_scalbn() {
+ // 3.0 + 3.0 << 53
+ let input = 0x3cb8000000000000_4008000000000000;
+ let result = DoubleDouble::from_bits(input).scalbn(1);
+ // 6.0 + 6.0 << 53
+ assert_eq!(0x3cc8000000000000_4018000000000000, result.to_bits());
+}
+
+#[test]
+fn ppc_double_double_frexp() {
+ // 3.0 + 3.0 << 53
+ let input = 0x3cb8000000000000_4008000000000000;
+ let mut exp = 0;
+ // 0.75 + 0.75 << 53
+ let result = DoubleDouble::from_bits(input).frexp(&mut exp);
+ assert_eq!(2, exp);
+ assert_eq!(0x3c98000000000000_3fe8000000000000, result.to_bits());
+}
diff --git a/compiler/rustc_arena/Cargo.toml b/compiler/rustc_arena/Cargo.toml
new file mode 100644
index 000000000..5c2aee6b4
--- /dev/null
+++ b/compiler/rustc_arena/Cargo.toml
@@ -0,0 +1,7 @@
+[package]
+name = "rustc_arena"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
new file mode 100644
index 000000000..a5f1cbc96
--- /dev/null
+++ b/compiler/rustc_arena/src/lib.rs
@@ -0,0 +1,645 @@
+//! The arena, a fast but limited type of allocator.
+//!
+//! Arenas are a type of allocator that destroy the objects within, all at
+//! once, once the arena itself is destroyed. They do not support deallocation
+//! of individual objects while the arena itself is still alive. The benefit
+//! of an arena is very fast allocation; just a pointer bump.
+//!
+//! This crate implements several kinds of arena.
+
+#![doc(
+ html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
+ test(no_crate_inject, attr(deny(warnings)))
+)]
+#![feature(dropck_eyepatch)]
+#![feature(new_uninit)]
+#![feature(maybe_uninit_slice)]
+#![feature(min_specialization)]
+#![feature(decl_macro)]
+#![feature(rustc_attrs)]
+#![cfg_attr(test, feature(test))]
+#![feature(strict_provenance)]
+#![feature(ptr_const_cast)]
+
+use smallvec::SmallVec;
+
+use std::alloc::Layout;
+use std::cell::{Cell, RefCell};
+use std::cmp;
+use std::marker::{PhantomData, Send};
+use std::mem::{self, MaybeUninit};
+use std::ptr::{self, NonNull};
+use std::slice;
+
+#[inline(never)]
+#[cold]
+fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
+ f()
+}
+
+/// An arena that can hold objects of only one type.
+pub struct TypedArena<T> {
+ /// A pointer to the next object to be allocated.
+ ptr: Cell<*mut T>,
+
+ /// A pointer to the end of the allocated area. When this pointer is
+ /// reached, a new chunk is allocated.
+ end: Cell<*mut T>,
+
+ /// A vector of arena chunks.
+ chunks: RefCell<Vec<ArenaChunk<T>>>,
+
+ /// Marker indicating that dropping the arena causes its owned
+ /// instances of `T` to be dropped.
+ _own: PhantomData<T>,
+}
+
+struct ArenaChunk<T = u8> {
+ /// The raw storage for the arena chunk.
+ storage: NonNull<[MaybeUninit<T>]>,
+ /// The number of valid entries in the chunk.
+ entries: usize,
+}
+
+unsafe impl<#[may_dangle] T> Drop for ArenaChunk<T> {
+ fn drop(&mut self) {
+ unsafe { Box::from_raw(self.storage.as_mut()) };
+ }
+}
+
+impl<T> ArenaChunk<T> {
+ #[inline]
+ unsafe fn new(capacity: usize) -> ArenaChunk<T> {
+ ArenaChunk {
+ storage: NonNull::new(Box::into_raw(Box::new_uninit_slice(capacity))).unwrap(),
+ entries: 0,
+ }
+ }
+
+ /// Destroys this arena chunk.
+ #[inline]
+ unsafe fn destroy(&mut self, len: usize) {
+ // The branch on needs_drop() is an -O1 performance optimization.
+ // Without the branch, dropping TypedArena<u8> takes linear time.
+ if mem::needs_drop::<T>() {
+ let slice = &mut *(self.storage.as_mut());
+ ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut slice[..len]));
+ }
+ }
+
+ // Returns a pointer to the first allocated object.
+ #[inline]
+ fn start(&mut self) -> *mut T {
+ self.storage.as_ptr() as *mut T
+ }
+
+ // Returns a pointer to the end of the allocated space.
+ #[inline]
+ fn end(&mut self) -> *mut T {
+ unsafe {
+ if mem::size_of::<T>() == 0 {
+ // A pointer as large as possible for zero-sized elements.
+ ptr::invalid_mut(!0)
+ } else {
+ self.start().add((*self.storage.as_ptr()).len())
+ }
+ }
+ }
+}
+
+// The arenas start with PAGE-sized chunks, and then each new chunk is twice as
+// big as its predecessor, up until we reach HUGE_PAGE-sized chunks, whereupon
+// we stop growing. This scales well, from arenas that are barely used up to
+// arenas that are used for 100s of MiBs. Note also that the chosen sizes match
+// the usual sizes of pages and huge pages on Linux.
+const PAGE: usize = 4096;
+const HUGE_PAGE: usize = 2 * 1024 * 1024;
+
+impl<T> Default for TypedArena<T> {
+ /// Creates a new `TypedArena`.
+ fn default() -> TypedArena<T> {
+ TypedArena {
+ // We set both `ptr` and `end` to 0 so that the first call to
+ // alloc() will trigger a grow().
+ ptr: Cell::new(ptr::null_mut()),
+ end: Cell::new(ptr::null_mut()),
+ chunks: Default::default(),
+ _own: PhantomData,
+ }
+ }
+}
+
+trait IterExt<T> {
+ fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T];
+}
+
+impl<I, T> IterExt<T> for I
+where
+ I: IntoIterator<Item = T>,
+{
+ // This default collects into a `SmallVec` and then allocates by copying
+ // from it. The specializations below for types like `Vec` are more
+ // efficient, copying directly without the intermediate collecting step.
+ // This default could be made more efficient, like
+ // `DroplessArena::alloc_from_iter`, but it's not hot enough to bother.
+ #[inline]
+ default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
+ let vec: SmallVec<[_; 8]> = self.into_iter().collect();
+ vec.alloc_from_iter(arena)
+ }
+}
+
+impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
+ #[inline]
+ fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
+ let len = self.len();
+ if len == 0 {
+ return &mut [];
+ }
+ // Move the content to the arena by copying and then forgetting it.
+ unsafe {
+ let start_ptr = arena.alloc_raw_slice(len);
+ self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
+ mem::forget(self);
+ slice::from_raw_parts_mut(start_ptr, len)
+ }
+ }
+}
+
+impl<T> IterExt<T> for Vec<T> {
+ #[inline]
+ fn alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T] {
+ let len = self.len();
+ if len == 0 {
+ return &mut [];
+ }
+ // Move the content to the arena by copying and then forgetting it.
+ unsafe {
+ let start_ptr = arena.alloc_raw_slice(len);
+ self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
+ self.set_len(0);
+ slice::from_raw_parts_mut(start_ptr, len)
+ }
+ }
+}
+
+impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
+ #[inline]
+ fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] {
+ let len = self.len();
+ if len == 0 {
+ return &mut [];
+ }
+ // Move the content to the arena by copying and then forgetting it.
+ unsafe {
+ let start_ptr = arena.alloc_raw_slice(len);
+ self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
+ self.set_len(0);
+ slice::from_raw_parts_mut(start_ptr, len)
+ }
+ }
+}
+
+impl<T> TypedArena<T> {
+ /// Allocates an object in the `TypedArena`, returning a reference to it.
+ #[inline]
+ pub fn alloc(&self, object: T) -> &mut T {
+ if self.ptr == self.end {
+ self.grow(1)
+ }
+
+ unsafe {
+ if mem::size_of::<T>() == 0 {
+ self.ptr.set((self.ptr.get() as *mut u8).wrapping_offset(1) as *mut T);
+ let ptr = ptr::NonNull::<T>::dangling().as_ptr();
+ // Don't drop the object. This `write` is equivalent to `forget`.
+ ptr::write(ptr, object);
+ &mut *ptr
+ } else {
+ let ptr = self.ptr.get();
+ // Advance the pointer.
+ self.ptr.set(self.ptr.get().offset(1));
+ // Write into uninitialized memory.
+ ptr::write(ptr, object);
+ &mut *ptr
+ }
+ }
+ }
+
+ #[inline]
+ fn can_allocate(&self, additional: usize) -> bool {
+ // FIXME: this should *likely* use `offset_from`, but more
+ // investigation is needed (including running tests in miri).
+ let available_bytes = self.end.get().addr() - self.ptr.get().addr();
+ let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap();
+ available_bytes >= additional_bytes
+ }
+
+ /// Ensures there's enough space in the current chunk to fit `len` objects.
+ #[inline]
+ fn ensure_capacity(&self, additional: usize) {
+ if !self.can_allocate(additional) {
+ self.grow(additional);
+ debug_assert!(self.can_allocate(additional));
+ }
+ }
+
+ #[inline]
+ unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
+ assert!(mem::size_of::<T>() != 0);
+ assert!(len != 0);
+
+ self.ensure_capacity(len);
+
+ let start_ptr = self.ptr.get();
+ self.ptr.set(start_ptr.add(len));
+ start_ptr
+ }
+
+ #[inline]
+ pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
+ assert!(mem::size_of::<T>() != 0);
+ iter.alloc_from_iter(self)
+ }
+
+ /// Grows the arena.
+ #[inline(never)]
+ #[cold]
+ fn grow(&self, additional: usize) {
+ unsafe {
+ // We need the element size to convert chunk sizes (ranging from
+ // PAGE to HUGE_PAGE bytes) to element counts.
+ let elem_size = cmp::max(1, mem::size_of::<T>());
+ let mut chunks = self.chunks.borrow_mut();
+ let mut new_cap;
+ if let Some(last_chunk) = chunks.last_mut() {
+ // If a type is `!needs_drop`, we don't need to keep track of how many elements
+ // the chunk stores - the field will be ignored anyway.
+ if mem::needs_drop::<T>() {
+ // FIXME: this should *likely* use `offset_from`, but more
+ // investigation is needed (including running tests in miri).
+ let used_bytes = self.ptr.get().addr() - last_chunk.start().addr();
+ last_chunk.entries = used_bytes / mem::size_of::<T>();
+ }
+
+ // If the previous chunk's len is less than HUGE_PAGE
+ // bytes, then this chunk will be least double the previous
+ // chunk's size.
+ new_cap = (*last_chunk.storage.as_ptr()).len().min(HUGE_PAGE / elem_size / 2);
+ new_cap *= 2;
+ } else {
+ new_cap = PAGE / elem_size;
+ }
+ // Also ensure that this chunk can fit `additional`.
+ new_cap = cmp::max(additional, new_cap);
+
+ let mut chunk = ArenaChunk::<T>::new(new_cap);
+ self.ptr.set(chunk.start());
+ self.end.set(chunk.end());
+ chunks.push(chunk);
+ }
+ }
+
+ // Drops the contents of the last chunk. The last chunk is partially empty, unlike all other
+ // chunks.
+ fn clear_last_chunk(&self, last_chunk: &mut ArenaChunk<T>) {
+ // Determine how much was filled.
+ let start = last_chunk.start().addr();
+ // We obtain the value of the pointer to the first uninitialized element.
+ let end = self.ptr.get().addr();
+ // We then calculate the number of elements to be dropped in the last chunk,
+ // which is the filled area's length.
+ let diff = if mem::size_of::<T>() == 0 {
+ // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get
+ // the number of zero-sized values in the last and only chunk, just out of caution.
+ // Recall that `end` was incremented for each allocated value.
+ end - start
+ } else {
+ // FIXME: this should *likely* use `offset_from`, but more
+ // investigation is needed (including running tests in miri).
+ (end - start) / mem::size_of::<T>()
+ };
+ // Pass that to the `destroy` method.
+ unsafe {
+ last_chunk.destroy(diff);
+ }
+ // Reset the chunk.
+ self.ptr.set(last_chunk.start());
+ }
+}
+
+unsafe impl<#[may_dangle] T> Drop for TypedArena<T> {
+ fn drop(&mut self) {
+ unsafe {
+ // Determine how much was filled.
+ let mut chunks_borrow = self.chunks.borrow_mut();
+ if let Some(mut last_chunk) = chunks_borrow.pop() {
+ // Drop the contents of the last chunk.
+ self.clear_last_chunk(&mut last_chunk);
+ // The last chunk will be dropped. Destroy all other chunks.
+ for chunk in chunks_borrow.iter_mut() {
+ chunk.destroy(chunk.entries);
+ }
+ }
+ // Box handles deallocation of `last_chunk` and `self.chunks`.
+ }
+ }
+}
+
+unsafe impl<T: Send> Send for TypedArena<T> {}
+
+/// An arena that can hold objects of multiple different types that impl `Copy`
+/// and/or satisfy `!mem::needs_drop`.
+pub struct DroplessArena {
+ /// A pointer to the start of the free space.
+ start: Cell<*mut u8>,
+
+ /// A pointer to the end of free space.
+ ///
+ /// The allocation proceeds downwards from the end of the chunk towards the
+ /// start. (This is slightly simpler and faster than allocating upwards,
+ /// see <https://fitzgeraldnick.com/2019/11/01/always-bump-downwards.html>.)
+ /// When this pointer crosses the start pointer, a new chunk is allocated.
+ end: Cell<*mut u8>,
+
+ /// A vector of arena chunks.
+ chunks: RefCell<Vec<ArenaChunk>>,
+}
+
+unsafe impl Send for DroplessArena {}
+
+impl Default for DroplessArena {
+ #[inline]
+ fn default() -> DroplessArena {
+ DroplessArena {
+ start: Cell::new(ptr::null_mut()),
+ end: Cell::new(ptr::null_mut()),
+ chunks: Default::default(),
+ }
+ }
+}
+
+impl DroplessArena {
+ #[inline(never)]
+ #[cold]
+ fn grow(&self, additional: usize) {
+ unsafe {
+ let mut chunks = self.chunks.borrow_mut();
+ let mut new_cap;
+ if let Some(last_chunk) = chunks.last_mut() {
+ // There is no need to update `last_chunk.entries` because that
+ // field isn't used by `DroplessArena`.
+
+ // If the previous chunk's len is less than HUGE_PAGE
+ // bytes, then this chunk will be least double the previous
+ // chunk's size.
+ new_cap = (*last_chunk.storage.as_ptr()).len().min(HUGE_PAGE / 2);
+ new_cap *= 2;
+ } else {
+ new_cap = PAGE;
+ }
+ // Also ensure that this chunk can fit `additional`.
+ new_cap = cmp::max(additional, new_cap);
+
+ let mut chunk = ArenaChunk::new(new_cap);
+ self.start.set(chunk.start());
+ self.end.set(chunk.end());
+ chunks.push(chunk);
+ }
+ }
+
+ /// Allocates a byte slice with specified layout from the current memory
+ /// chunk. Returns `None` if there is no free space left to satisfy the
+ /// request.
+ #[inline]
+ fn alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8> {
+ let start = self.start.get().addr();
+ let old_end = self.end.get();
+ let end = old_end.addr();
+
+ let align = layout.align();
+ let bytes = layout.size();
+
+ let new_end = end.checked_sub(bytes)? & !(align - 1);
+ if start <= new_end {
+ let new_end = old_end.with_addr(new_end);
+ self.end.set(new_end);
+ Some(new_end)
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
+ assert!(layout.size() != 0);
+ loop {
+ if let Some(a) = self.alloc_raw_without_grow(layout) {
+ break a;
+ }
+ // No free space left. Allocate a new chunk to satisfy the request.
+ // On failure the grow will panic or abort.
+ self.grow(layout.size());
+ }
+ }
+
+ #[inline]
+ pub fn alloc<T>(&self, object: T) -> &mut T {
+ assert!(!mem::needs_drop::<T>());
+
+ let mem = self.alloc_raw(Layout::for_value::<T>(&object)) as *mut T;
+
+ unsafe {
+ // Write into uninitialized memory.
+ ptr::write(mem, object);
+ &mut *mem
+ }
+ }
+
+ /// Allocates a slice of objects that are copied into the `DroplessArena`, returning a mutable
+ /// reference to it. Will panic if passed a zero-sized type.
+ ///
+ /// Panics:
+ ///
+ /// - Zero-sized types
+ /// - Zero-length slices
+ #[inline]
+ pub fn alloc_slice<T>(&self, slice: &[T]) -> &mut [T]
+ where
+ T: Copy,
+ {
+ assert!(!mem::needs_drop::<T>());
+ assert!(mem::size_of::<T>() != 0);
+ assert!(!slice.is_empty());
+
+ let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T;
+
+ unsafe {
+ mem.copy_from_nonoverlapping(slice.as_ptr(), slice.len());
+ slice::from_raw_parts_mut(mem, slice.len())
+ }
+ }
+
+ #[inline]
+ unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
+ &self,
+ mut iter: I,
+ len: usize,
+ mem: *mut T,
+ ) -> &mut [T] {
+ let mut i = 0;
+ // Use a manual loop since LLVM manages to optimize it better for
+ // slice iterators
+ loop {
+ let value = iter.next();
+ if i >= len || value.is_none() {
+ // We only return as many items as the iterator gave us, even
+ // though it was supposed to give us `len`
+ return slice::from_raw_parts_mut(mem, i);
+ }
+ ptr::write(mem.add(i), value.unwrap());
+ i += 1;
+ }
+ }
+
+ #[inline]
+ pub fn alloc_from_iter<T, I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
+ let iter = iter.into_iter();
+ assert!(mem::size_of::<T>() != 0);
+ assert!(!mem::needs_drop::<T>());
+
+ let size_hint = iter.size_hint();
+
+ match size_hint {
+ (min, Some(max)) if min == max => {
+ // We know the exact number of elements the iterator will produce here
+ let len = min;
+
+ if len == 0 {
+ return &mut [];
+ }
+
+ let mem = self.alloc_raw(Layout::array::<T>(len).unwrap()) as *mut T;
+ unsafe { self.write_from_iter(iter, len, mem) }
+ }
+ (_, _) => {
+ cold_path(move || -> &mut [T] {
+ let mut vec: SmallVec<[_; 8]> = iter.collect();
+ if vec.is_empty() {
+ return &mut [];
+ }
+ // Move the content to the arena by copying it and then forgetting
+ // the content of the SmallVec
+ unsafe {
+ let len = vec.len();
+ let start_ptr =
+ self.alloc_raw(Layout::for_value::<[T]>(vec.as_slice())) as *mut T;
+ vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
+ vec.set_len(0);
+ slice::from_raw_parts_mut(start_ptr, len)
+ }
+ })
+ }
+ }
+ }
+}
+
+/// Declare an `Arena` containing one dropless arena and many typed arenas (the
+/// types of the typed arenas are specified by the arguments).
+///
+/// There are three cases of interest.
+/// - Types that are `Copy`: these need not be specified in the arguments. They
+/// will use the `DroplessArena`.
+/// - Types that are `!Copy` and `!Drop`: these must be specified in the
+/// arguments. An empty `TypedArena` will be created for each one, but the
+/// `DroplessArena` will always be used and the `TypedArena` will stay empty.
+/// This is odd but harmless, because an empty arena allocates no memory.
+/// - Types that are `!Copy` and `Drop`: these must be specified in the
+/// arguments. The `TypedArena` will be used for them.
+///
+#[rustc_macro_transparency = "semitransparent"]
+pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
+ #[derive(Default)]
+ pub struct Arena<'tcx> {
+ pub dropless: $crate::DroplessArena,
+ $($name: $crate::TypedArena<$ty>,)*
+ }
+
+ pub trait ArenaAllocatable<'tcx, C = rustc_arena::IsNotCopy>: Sized {
+ fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self;
+ fn allocate_from_iter<'a>(
+ arena: &'a Arena<'tcx>,
+ iter: impl ::std::iter::IntoIterator<Item = Self>,
+ ) -> &'a mut [Self];
+ }
+
+ // Any type that impls `Copy` can be arena-allocated in the `DroplessArena`.
+ impl<'tcx, T: Copy> ArenaAllocatable<'tcx, rustc_arena::IsCopy> for T {
+ #[inline]
+ fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self {
+ arena.dropless.alloc(self)
+ }
+ #[inline]
+ fn allocate_from_iter<'a>(
+ arena: &'a Arena<'tcx>,
+ iter: impl ::std::iter::IntoIterator<Item = Self>,
+ ) -> &'a mut [Self] {
+ arena.dropless.alloc_from_iter(iter)
+ }
+ }
+ $(
+ impl<'tcx> ArenaAllocatable<'tcx, rustc_arena::IsNotCopy> for $ty {
+ #[inline]
+ fn allocate_on<'a>(self, arena: &'a Arena<'tcx>) -> &'a mut Self {
+ if !::std::mem::needs_drop::<Self>() {
+ arena.dropless.alloc(self)
+ } else {
+ arena.$name.alloc(self)
+ }
+ }
+
+ #[inline]
+ fn allocate_from_iter<'a>(
+ arena: &'a Arena<'tcx>,
+ iter: impl ::std::iter::IntoIterator<Item = Self>,
+ ) -> &'a mut [Self] {
+ if !::std::mem::needs_drop::<Self>() {
+ arena.dropless.alloc_from_iter(iter)
+ } else {
+ arena.$name.alloc_from_iter(iter)
+ }
+ }
+ }
+ )*
+
+ impl<'tcx> Arena<'tcx> {
+ #[inline]
+ pub fn alloc<T: ArenaAllocatable<'tcx, C>, C>(&self, value: T) -> &mut T {
+ value.allocate_on(self)
+ }
+
+ // Any type that impls `Copy` can have slices be arena-allocated in the `DroplessArena`.
+ #[inline]
+ pub fn alloc_slice<T: ::std::marker::Copy>(&self, value: &[T]) -> &mut [T] {
+ if value.is_empty() {
+ return &mut [];
+ }
+ self.dropless.alloc_slice(value)
+ }
+
+ pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, C>, C>(
+ &'a self,
+ iter: impl ::std::iter::IntoIterator<Item = T>,
+ ) -> &'a mut [T] {
+ T::allocate_from_iter(self, iter)
+ }
+ }
+}
+
+// Marker types that let us give different behaviour for arenas allocating
+// `Copy` types vs `!Copy` types.
+pub struct IsCopy;
+pub struct IsNotCopy;
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_arena/src/tests.rs b/compiler/rustc_arena/src/tests.rs
new file mode 100644
index 000000000..ad6146434
--- /dev/null
+++ b/compiler/rustc_arena/src/tests.rs
@@ -0,0 +1,248 @@
+extern crate test;
+use super::TypedArena;
+use std::cell::Cell;
+use test::Bencher;
+
+#[allow(dead_code)]
+#[derive(Debug, Eq, PartialEq)]
+struct Point {
+ x: i32,
+ y: i32,
+ z: i32,
+}
+
+impl<T> TypedArena<T> {
+ /// Clears the arena. Deallocates all but the longest chunk which may be reused.
+ fn clear(&mut self) {
+ unsafe {
+ // Clear the last chunk, which is partially filled.
+ let mut chunks_borrow = self.chunks.borrow_mut();
+ if let Some(mut last_chunk) = chunks_borrow.last_mut() {
+ self.clear_last_chunk(&mut last_chunk);
+ let len = chunks_borrow.len();
+ // If `T` is ZST, code below has no effect.
+ for mut chunk in chunks_borrow.drain(..len - 1) {
+ chunk.destroy(chunk.entries);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+pub fn test_unused() {
+ let arena: TypedArena<Point> = TypedArena::default();
+ assert!(arena.chunks.borrow().is_empty());
+}
+
+#[test]
+fn test_arena_alloc_nested() {
+ struct Inner {
+ value: u8,
+ }
+ struct Outer<'a> {
+ inner: &'a Inner,
+ }
+ enum EI<'e> {
+ I(Inner),
+ O(Outer<'e>),
+ }
+
+ struct Wrap<'a>(TypedArena<EI<'a>>);
+
+ impl<'a> Wrap<'a> {
+ fn alloc_inner<F: Fn() -> Inner>(&self, f: F) -> &Inner {
+ let r: &EI<'_> = self.0.alloc(EI::I(f()));
+ if let &EI::I(ref i) = r {
+ i
+ } else {
+ panic!("mismatch");
+ }
+ }
+ fn alloc_outer<F: Fn() -> Outer<'a>>(&self, f: F) -> &Outer<'_> {
+ let r: &EI<'_> = self.0.alloc(EI::O(f()));
+ if let &EI::O(ref o) = r {
+ o
+ } else {
+ panic!("mismatch");
+ }
+ }
+ }
+
+ let arena = Wrap(TypedArena::default());
+
+ let result = arena.alloc_outer(|| Outer { inner: arena.alloc_inner(|| Inner { value: 10 }) });
+
+ assert_eq!(result.inner.value, 10);
+}
+
+#[test]
+pub fn test_copy() {
+ let arena = TypedArena::default();
+ #[cfg(not(miri))]
+ const N: usize = 100000;
+ #[cfg(miri)]
+ const N: usize = 1000;
+ for _ in 0..N {
+ arena.alloc(Point { x: 1, y: 2, z: 3 });
+ }
+}
+
+#[bench]
+pub fn bench_copy(b: &mut Bencher) {
+ let arena = TypedArena::default();
+ b.iter(|| arena.alloc(Point { x: 1, y: 2, z: 3 }))
+}
+
+#[bench]
+pub fn bench_copy_nonarena(b: &mut Bencher) {
+ b.iter(|| {
+ let _: Box<_> = Box::new(Point { x: 1, y: 2, z: 3 });
+ })
+}
+
+#[allow(dead_code)]
+struct Noncopy {
+ string: String,
+ array: Vec<i32>,
+}
+
+#[test]
+pub fn test_noncopy() {
+ let arena = TypedArena::default();
+ #[cfg(not(miri))]
+ const N: usize = 100000;
+ #[cfg(miri)]
+ const N: usize = 1000;
+ for _ in 0..N {
+ arena.alloc(Noncopy { string: "hello world".to_string(), array: vec![1, 2, 3, 4, 5] });
+ }
+}
+
+#[test]
+pub fn test_typed_arena_zero_sized() {
+ let arena = TypedArena::default();
+ #[cfg(not(miri))]
+ const N: usize = 100000;
+ #[cfg(miri)]
+ const N: usize = 1000;
+ for _ in 0..N {
+ arena.alloc(());
+ }
+}
+
+#[test]
+pub fn test_typed_arena_clear() {
+ let mut arena = TypedArena::default();
+ for _ in 0..10 {
+ arena.clear();
+ #[cfg(not(miri))]
+ const N: usize = 10000;
+ #[cfg(miri)]
+ const N: usize = 100;
+ for _ in 0..N {
+ arena.alloc(Point { x: 1, y: 2, z: 3 });
+ }
+ }
+}
+
+#[bench]
+pub fn bench_typed_arena_clear(b: &mut Bencher) {
+ let mut arena = TypedArena::default();
+ b.iter(|| {
+ arena.alloc(Point { x: 1, y: 2, z: 3 });
+ arena.clear();
+ })
+}
+
+#[bench]
+pub fn bench_typed_arena_clear_100(b: &mut Bencher) {
+ let mut arena = TypedArena::default();
+ b.iter(|| {
+ for _ in 0..100 {
+ arena.alloc(Point { x: 1, y: 2, z: 3 });
+ }
+ arena.clear();
+ })
+}
+
+// Drop tests
+
+struct DropCounter<'a> {
+ count: &'a Cell<u32>,
+}
+
+impl Drop for DropCounter<'_> {
+ fn drop(&mut self) {
+ self.count.set(self.count.get() + 1);
+ }
+}
+
+#[test]
+fn test_typed_arena_drop_count() {
+ let counter = Cell::new(0);
+ {
+ let arena: TypedArena<DropCounter<'_>> = TypedArena::default();
+ for _ in 0..100 {
+ // Allocate something with drop glue to make sure it doesn't leak.
+ arena.alloc(DropCounter { count: &counter });
+ }
+ };
+ assert_eq!(counter.get(), 100);
+}
+
+#[test]
+fn test_typed_arena_drop_on_clear() {
+ let counter = Cell::new(0);
+ let mut arena: TypedArena<DropCounter<'_>> = TypedArena::default();
+ for i in 0..10 {
+ for _ in 0..100 {
+ // Allocate something with drop glue to make sure it doesn't leak.
+ arena.alloc(DropCounter { count: &counter });
+ }
+ arena.clear();
+ assert_eq!(counter.get(), i * 100 + 100);
+ }
+}
+
+thread_local! {
+ static DROP_COUNTER: Cell<u32> = Cell::new(0)
+}
+
+struct SmallDroppable;
+
+impl Drop for SmallDroppable {
+ fn drop(&mut self) {
+ DROP_COUNTER.with(|c| c.set(c.get() + 1));
+ }
+}
+
+#[test]
+fn test_typed_arena_drop_small_count() {
+ DROP_COUNTER.with(|c| c.set(0));
+ {
+ let arena: TypedArena<SmallDroppable> = TypedArena::default();
+ for _ in 0..100 {
+ // Allocate something with drop glue to make sure it doesn't leak.
+ arena.alloc(SmallDroppable);
+ }
+ // dropping
+ };
+ assert_eq!(DROP_COUNTER.with(|c| c.get()), 100);
+}
+
+#[bench]
+pub fn bench_noncopy(b: &mut Bencher) {
+ let arena = TypedArena::default();
+ b.iter(|| {
+ arena.alloc(Noncopy { string: "hello world".to_string(), array: vec![1, 2, 3, 4, 5] })
+ })
+}
+
+#[bench]
+pub fn bench_noncopy_nonarena(b: &mut Bencher) {
+ b.iter(|| {
+ let _: Box<_> =
+ Box::new(Noncopy { string: "hello world".to_string(), array: vec![1, 2, 3, 4, 5] });
+ })
+}
diff --git a/compiler/rustc_ast/Cargo.toml b/compiler/rustc_ast/Cargo.toml
new file mode 100644
index 000000000..9822e9864
--- /dev/null
+++ b/compiler/rustc_ast/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "rustc_ast"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_serialize = { path = "../rustc_serialize" }
+tracing = "0.1"
+rustc_span = { path = "../rustc_span" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_index = { path = "../rustc_index" }
+rustc_lexer = { path = "../rustc_lexer" }
+rustc_macros = { path = "../rustc_macros" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+bitflags = "1.2.1"
diff --git a/compiler/rustc_ast/README.md b/compiler/rustc_ast/README.md
new file mode 100644
index 000000000..b2b90fed0
--- /dev/null
+++ b/compiler/rustc_ast/README.md
@@ -0,0 +1,8 @@
+The `rustc_ast` crate contains those things concerned purely with syntax
+– that is, the AST ("abstract syntax tree"), along with some definitions for tokens and token streams, data structures/traits for mutating ASTs, and shared definitions for other AST-related parts of the compiler (like the lexer and macro-expansion).
+
+For more information about how these things work in rustc, see the
+rustc dev guide:
+
+- [Parsing](https://rustc-dev-guide.rust-lang.org/the-parser.html)
+- [Macro Expansion](https://rustc-dev-guide.rust-lang.org/macro-expansion.html)
diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs
new file mode 100644
index 000000000..870a7c0be
--- /dev/null
+++ b/compiler/rustc_ast/src/ast.rs
@@ -0,0 +1,3051 @@
+//! The Rust abstract syntax tree module.
+//!
+//! This module contains common structures forming the language AST.
+//! Two main entities in the module are [`Item`] (which represents an AST element with
+//! additional metadata), and [`ItemKind`] (which represents a concrete type and contains
+//! information specific to the type of the item).
+//!
+//! Other module items worth mentioning:
+//! - [`Ty`] and [`TyKind`]: A parsed Rust type.
+//! - [`Expr`] and [`ExprKind`]: A parsed Rust expression.
+//! - [`Pat`] and [`PatKind`]: A parsed Rust pattern. Patterns are often dual to expressions.
+//! - [`Stmt`] and [`StmtKind`]: An executable action that does not return a value.
+//! - [`FnDecl`], [`FnHeader`] and [`Param`]: Metadata associated with a function declaration.
+//! - [`Generics`], [`GenericParam`], [`WhereClause`]: Metadata associated with generic parameters.
+//! - [`EnumDef`] and [`Variant`]: Enum declaration.
+//! - [`Lit`] and [`LitKind`]: Literal expressions.
+//! - [`MacroDef`], [`MacStmtStyle`], [`MacCall`], [`MacDelimiter`]: Macro definition and invocation.
+//! - [`Attribute`]: Metadata associated with item.
+//! - [`UnOp`], [`BinOp`], and [`BinOpKind`]: Unary and binary operators.
+
+pub use crate::util::parser::ExprPrecedence;
+pub use GenericArgs::*;
+pub use UnsafeSource::*;
+
+use crate::ptr::P;
+use crate::token::{self, CommentKind, Delimiter};
+use crate::tokenstream::{DelimSpan, LazyTokenStream, TokenStream};
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_macros::HashStable_Generic;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_span::source_map::{respan, Spanned};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+use std::cmp::Ordering;
+use std::convert::TryFrom;
+use std::fmt;
+use std::mem;
+
+/// A "Label" is an identifier of some point in sources,
+/// e.g. in the following code:
+///
+/// ```rust
+/// 'outer: loop {
+/// break 'outer;
+/// }
+/// ```
+///
+/// `'outer` is a label.
+#[derive(Clone, Encodable, Decodable, Copy, HashStable_Generic, Eq, PartialEq)]
+pub struct Label {
+ pub ident: Ident,
+}
+
+impl fmt::Debug for Label {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "label({:?})", self.ident)
+ }
+}
+
+/// A "Lifetime" is an annotation of the scope in which variable
+/// can be used, e.g. `'a` in `&'a i32`.
+#[derive(Clone, Encodable, Decodable, Copy, PartialEq, Eq)]
+pub struct Lifetime {
+ pub id: NodeId,
+ pub ident: Ident,
+}
+
+impl fmt::Debug for Lifetime {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "lifetime({}: {})", self.id, self)
+ }
+}
+
+impl fmt::Display for Lifetime {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.ident.name)
+ }
+}
+
+/// A "Path" is essentially Rust's notion of a name.
+///
+/// It's represented as a sequence of identifiers,
+/// along with a bunch of supporting information.
+///
+/// E.g., `std::cmp::PartialEq`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Path {
+ pub span: Span,
+ /// The segments in the path: the things separated by `::`.
+ /// Global paths begin with `kw::PathRoot`.
+ pub segments: Vec<PathSegment>,
+ pub tokens: Option<LazyTokenStream>,
+}
+
+impl PartialEq<Symbol> for Path {
+ #[inline]
+ fn eq(&self, symbol: &Symbol) -> bool {
+ self.segments.len() == 1 && { self.segments[0].ident.name == *symbol }
+ }
+}
+
+impl<CTX: rustc_span::HashStableContext> HashStable<CTX> for Path {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.segments.len().hash_stable(hcx, hasher);
+ for segment in &self.segments {
+ segment.ident.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+impl Path {
+ // Convert a span and an identifier to the corresponding
+ // one-segment path.
+ pub fn from_ident(ident: Ident) -> Path {
+ Path { segments: vec![PathSegment::from_ident(ident)], span: ident.span, tokens: None }
+ }
+
+ pub fn is_global(&self) -> bool {
+ !self.segments.is_empty() && self.segments[0].ident.name == kw::PathRoot
+ }
+}
+
+/// A segment of a path: an identifier, an optional lifetime, and a set of types.
+///
+/// E.g., `std`, `String` or `Box<T>`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct PathSegment {
+ /// The identifier portion of this path segment.
+ pub ident: Ident,
+
+ pub id: NodeId,
+
+ /// Type/lifetime parameters attached to this path. They come in
+ /// two flavors: `Path<A,B,C>` and `Path(A,B) -> C`.
+ /// `None` means that no parameter list is supplied (`Path`),
+ /// `Some` means that parameter list is supplied (`Path<X, Y>`)
+ /// but it can be empty (`Path<>`).
+ /// `P` is used as a size optimization for the common case with no parameters.
+ pub args: Option<P<GenericArgs>>,
+}
+
+impl PathSegment {
+ pub fn from_ident(ident: Ident) -> Self {
+ PathSegment { ident, id: DUMMY_NODE_ID, args: None }
+ }
+
+ pub fn path_root(span: Span) -> Self {
+ PathSegment::from_ident(Ident::new(kw::PathRoot, span))
+ }
+
+ pub fn span(&self) -> Span {
+ match &self.args {
+ Some(args) => self.ident.span.to(args.span()),
+ None => self.ident.span,
+ }
+ }
+}
+
+/// The arguments of a path segment.
+///
+/// E.g., `<A, B>` as in `Foo<A, B>` or `(A, B)` as in `Foo(A, B)`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum GenericArgs {
+ /// The `<'a, A, B, C>` in `foo::bar::baz::<'a, A, B, C>`.
+ AngleBracketed(AngleBracketedArgs),
+ /// The `(A, B)` and `C` in `Foo(A, B) -> C`.
+ Parenthesized(ParenthesizedArgs),
+}
+
+impl GenericArgs {
+ pub fn is_angle_bracketed(&self) -> bool {
+ matches!(self, AngleBracketed(..))
+ }
+
+ pub fn span(&self) -> Span {
+ match *self {
+ AngleBracketed(ref data) => data.span,
+ Parenthesized(ref data) => data.span,
+ }
+ }
+}
+
+/// Concrete argument in the sequence of generic args.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum GenericArg {
+ /// `'a` in `Foo<'a>`
+ Lifetime(Lifetime),
+ /// `Bar` in `Foo<Bar>`
+ Type(P<Ty>),
+ /// `1` in `Foo<1>`
+ Const(AnonConst),
+}
+
+impl GenericArg {
+ pub fn span(&self) -> Span {
+ match self {
+ GenericArg::Lifetime(lt) => lt.ident.span,
+ GenericArg::Type(ty) => ty.span,
+ GenericArg::Const(ct) => ct.value.span,
+ }
+ }
+}
+
+/// A path like `Foo<'a, T>`.
+#[derive(Clone, Encodable, Decodable, Debug, Default)]
+pub struct AngleBracketedArgs {
+ /// The overall span.
+ pub span: Span,
+ /// The comma separated parts in the `<...>`.
+ pub args: Vec<AngleBracketedArg>,
+}
+
+/// Either an argument for a parameter e.g., `'a`, `Vec<u8>`, `0`,
+/// or a constraint on an associated item, e.g., `Item = String` or `Item: Bound`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum AngleBracketedArg {
+ /// Argument for a generic parameter.
+ Arg(GenericArg),
+ /// Constraint for an associated item.
+ Constraint(AssocConstraint),
+}
+
+impl AngleBracketedArg {
+ pub fn span(&self) -> Span {
+ match self {
+ AngleBracketedArg::Arg(arg) => arg.span(),
+ AngleBracketedArg::Constraint(constraint) => constraint.span,
+ }
+ }
+}
+
+impl Into<Option<P<GenericArgs>>> for AngleBracketedArgs {
+ fn into(self) -> Option<P<GenericArgs>> {
+ Some(P(GenericArgs::AngleBracketed(self)))
+ }
+}
+
+impl Into<Option<P<GenericArgs>>> for ParenthesizedArgs {
+ fn into(self) -> Option<P<GenericArgs>> {
+ Some(P(GenericArgs::Parenthesized(self)))
+ }
+}
+
+/// A path like `Foo(A, B) -> C`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct ParenthesizedArgs {
+ /// ```text
+ /// Foo(A, B) -> C
+ /// ^^^^^^^^^^^^^^
+ /// ```
+ pub span: Span,
+
+ /// `(A, B)`
+ pub inputs: Vec<P<Ty>>,
+
+ /// ```text
+ /// Foo(A, B) -> C
+ /// ^^^^^^
+ /// ```
+ pub inputs_span: Span,
+
+ /// `C`
+ pub output: FnRetTy,
+}
+
+impl ParenthesizedArgs {
+ pub fn as_angle_bracketed_args(&self) -> AngleBracketedArgs {
+ let args = self
+ .inputs
+ .iter()
+ .cloned()
+ .map(|input| AngleBracketedArg::Arg(GenericArg::Type(input)))
+ .collect();
+ AngleBracketedArgs { span: self.inputs_span, args }
+ }
+}
+
+pub use crate::node_id::{NodeId, CRATE_NODE_ID, DUMMY_NODE_ID};
+
+/// A modifier on a bound, e.g., `?Trait` or `~const Trait`.
+///
+/// Negative bounds should also be handled here.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug)]
+pub enum TraitBoundModifier {
+ /// No modifiers
+ None,
+
+ /// `?Trait`
+ Maybe,
+
+ /// `~const Trait`
+ MaybeConst,
+
+ /// `~const ?Trait`
+ //
+ // This parses but will be rejected during AST validation.
+ MaybeConstMaybe,
+}
+
+/// The AST represents all type param bounds as types.
+/// `typeck::collect::compute_bounds` matches these against
+/// the "special" built-in traits (see `middle::lang_items`) and
+/// detects `Copy`, `Send` and `Sync`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum GenericBound {
+ Trait(PolyTraitRef, TraitBoundModifier),
+ Outlives(Lifetime),
+}
+
+impl GenericBound {
+ pub fn span(&self) -> Span {
+ match self {
+ GenericBound::Trait(ref t, ..) => t.span,
+ GenericBound::Outlives(ref l) => l.ident.span,
+ }
+ }
+}
+
+pub type GenericBounds = Vec<GenericBound>;
+
+/// Specifies the enforced ordering for generic parameters. In the future,
+/// if we wanted to relax this order, we could override `PartialEq` and
+/// `PartialOrd`, to allow the kinds to be unordered.
+#[derive(Hash, Clone, Copy)]
+pub enum ParamKindOrd {
+ Lifetime,
+ Type,
+ Const,
+ // `Infer` is not actually constructed directly from the AST, but is implicitly constructed
+ // during HIR lowering, and `ParamKindOrd` will implicitly order inferred variables last.
+ Infer,
+}
+
+impl Ord for ParamKindOrd {
+ fn cmp(&self, other: &Self) -> Ordering {
+ use ParamKindOrd::*;
+ let to_int = |v| match v {
+ Lifetime => 0,
+ Infer | Type | Const => 1,
+ };
+
+ to_int(*self).cmp(&to_int(*other))
+ }
+}
+impl PartialOrd for ParamKindOrd {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+impl PartialEq for ParamKindOrd {
+ fn eq(&self, other: &Self) -> bool {
+ self.cmp(other) == Ordering::Equal
+ }
+}
+impl Eq for ParamKindOrd {}
+
+impl fmt::Display for ParamKindOrd {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ParamKindOrd::Lifetime => "lifetime".fmt(f),
+ ParamKindOrd::Type => "type".fmt(f),
+ ParamKindOrd::Const { .. } => "const".fmt(f),
+ ParamKindOrd::Infer => "infer".fmt(f),
+ }
+ }
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum GenericParamKind {
+ /// A lifetime definition (e.g., `'a: 'b + 'c + 'd`).
+ Lifetime,
+ Type {
+ default: Option<P<Ty>>,
+ },
+ Const {
+ ty: P<Ty>,
+ /// Span of the `const` keyword.
+ kw_span: Span,
+ /// Optional default value for the const generic param
+ default: Option<AnonConst>,
+ },
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct GenericParam {
+ pub id: NodeId,
+ pub ident: Ident,
+ pub attrs: AttrVec,
+ pub bounds: GenericBounds,
+ pub is_placeholder: bool,
+ pub kind: GenericParamKind,
+ pub colon_span: Option<Span>,
+}
+
+impl GenericParam {
+ pub fn span(&self) -> Span {
+ match &self.kind {
+ GenericParamKind::Lifetime | GenericParamKind::Type { default: None } => {
+ self.ident.span
+ }
+ GenericParamKind::Type { default: Some(ty) } => self.ident.span.to(ty.span),
+ GenericParamKind::Const { kw_span, default: Some(default), .. } => {
+ kw_span.to(default.value.span)
+ }
+ GenericParamKind::Const { kw_span, default: None, ty } => kw_span.to(ty.span),
+ }
+ }
+}
+
+/// Represents lifetime, type and const parameters attached to a declaration of
+/// a function, enum, trait, etc.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Generics {
+ pub params: Vec<GenericParam>,
+ pub where_clause: WhereClause,
+ pub span: Span,
+}
+
+impl Default for Generics {
+ /// Creates an instance of `Generics`.
+ fn default() -> Generics {
+ Generics {
+ params: Vec::new(),
+ where_clause: WhereClause {
+ has_where_token: false,
+ predicates: Vec::new(),
+ span: DUMMY_SP,
+ },
+ span: DUMMY_SP,
+ }
+ }
+}
+
+/// A where-clause in a definition.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct WhereClause {
+ /// `true` if we ate a `where` token: this can happen
+ /// if we parsed no predicates (e.g. `struct Foo where {}`).
+ /// This allows us to pretty-print accurately.
+ pub has_where_token: bool,
+ pub predicates: Vec<WherePredicate>,
+ pub span: Span,
+}
+
+/// A single predicate in a where-clause.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum WherePredicate {
+ /// A type binding (e.g., `for<'c> Foo: Send + Clone + 'c`).
+ BoundPredicate(WhereBoundPredicate),
+ /// A lifetime predicate (e.g., `'a: 'b + 'c`).
+ RegionPredicate(WhereRegionPredicate),
+ /// An equality predicate (unsupported).
+ EqPredicate(WhereEqPredicate),
+}
+
+impl WherePredicate {
+ pub fn span(&self) -> Span {
+ match self {
+ WherePredicate::BoundPredicate(p) => p.span,
+ WherePredicate::RegionPredicate(p) => p.span,
+ WherePredicate::EqPredicate(p) => p.span,
+ }
+ }
+}
+
+/// A type bound.
+///
+/// E.g., `for<'c> Foo: Send + Clone + 'c`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct WhereBoundPredicate {
+ pub span: Span,
+ /// Any generics from a `for` binding.
+ pub bound_generic_params: Vec<GenericParam>,
+ /// The type being bounded.
+ pub bounded_ty: P<Ty>,
+ /// Trait and lifetime bounds (`Clone + Send + 'static`).
+ pub bounds: GenericBounds,
+}
+
+/// A lifetime predicate.
+///
+/// E.g., `'a: 'b + 'c`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct WhereRegionPredicate {
+ pub span: Span,
+ pub lifetime: Lifetime,
+ pub bounds: GenericBounds,
+}
+
+/// An equality predicate (unsupported).
+///
+/// E.g., `T = int`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct WhereEqPredicate {
+ pub id: NodeId,
+ pub span: Span,
+ pub lhs_ty: P<Ty>,
+ pub rhs_ty: P<Ty>,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Crate {
+ pub attrs: Vec<Attribute>,
+ pub items: Vec<P<Item>>,
+ pub spans: ModSpans,
+ /// Must be equal to `CRATE_NODE_ID` after the crate root is expanded, but may hold
+ /// expansion placeholders or an unassigned value (`DUMMY_NODE_ID`) before that.
+ pub id: NodeId,
+ pub is_placeholder: bool,
+}
+
+/// Possible values inside of compile-time attribute lists.
+///
+/// E.g., the '..' in `#[name(..)]`.
+#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum NestedMetaItem {
+ /// A full MetaItem, for recursive meta items.
+ MetaItem(MetaItem),
+ /// A literal.
+ ///
+ /// E.g., `"foo"`, `64`, `true`.
+ Literal(Lit),
+}
+
+/// A spanned compile-time attribute item.
+///
+/// E.g., `#[test]`, `#[derive(..)]`, `#[rustfmt::skip]` or `#[feature = "foo"]`.
+#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct MetaItem {
+ pub path: Path,
+ pub kind: MetaItemKind,
+ pub span: Span,
+}
+
+/// A compile-time attribute item.
+///
+/// E.g., `#[test]`, `#[derive(..)]` or `#[feature = "foo"]`.
+#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum MetaItemKind {
+ /// Word meta item.
+ ///
+ /// E.g., `test` as in `#[test]`.
+ Word,
+ /// List meta item.
+ ///
+ /// E.g., `derive(..)` as in `#[derive(..)]`.
+ List(Vec<NestedMetaItem>),
+ /// Name value meta item.
+ ///
+ /// E.g., `feature = "foo"` as in `#[feature = "foo"]`.
+ NameValue(Lit),
+}
+
+/// A block (`{ .. }`).
+///
+/// E.g., `{ .. }` as in `fn foo() { .. }`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Block {
+ /// The statements in the block.
+ pub stmts: Vec<Stmt>,
+ pub id: NodeId,
+ /// Distinguishes between `unsafe { ... }` and `{ ... }`.
+ pub rules: BlockCheckMode,
+ pub span: Span,
+ pub tokens: Option<LazyTokenStream>,
+ /// The following *isn't* a parse error, but will cause multiple errors in following stages.
+ /// ```compile_fail
+ /// let x = {
+ /// foo: var
+ /// };
+ /// ```
+ /// #34255
+ pub could_be_bare_literal: bool,
+}
+
+/// A match pattern.
+///
+/// Patterns appear in match statements and some other contexts, such as `let` and `if let`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Pat {
+ pub id: NodeId,
+ pub kind: PatKind,
+ pub span: Span,
+ pub tokens: Option<LazyTokenStream>,
+}
+
+impl Pat {
+ /// Attempt reparsing the pattern as a type.
+ /// This is intended for use by diagnostics.
+ pub fn to_ty(&self) -> Option<P<Ty>> {
+ let kind = match &self.kind {
+ // In a type expression `_` is an inference variable.
+ PatKind::Wild => TyKind::Infer,
+ // An IDENT pattern with no binding mode would be valid as path to a type. E.g. `u32`.
+ PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None) => {
+ TyKind::Path(None, Path::from_ident(*ident))
+ }
+ PatKind::Path(qself, path) => TyKind::Path(qself.clone(), path.clone()),
+ PatKind::MacCall(mac) => TyKind::MacCall(mac.clone()),
+ // `&mut? P` can be reinterpreted as `&mut? T` where `T` is `P` reparsed as a type.
+ PatKind::Ref(pat, mutbl) => {
+ pat.to_ty().map(|ty| TyKind::Rptr(None, MutTy { ty, mutbl: *mutbl }))?
+ }
+ // A slice/array pattern `[P]` can be reparsed as `[T]`, an unsized array,
+ // when `P` can be reparsed as a type `T`.
+ PatKind::Slice(pats) if pats.len() == 1 => pats[0].to_ty().map(TyKind::Slice)?,
+ // A tuple pattern `(P0, .., Pn)` can be reparsed as `(T0, .., Tn)`
+ // assuming `T0` to `Tn` are all syntactically valid as types.
+ PatKind::Tuple(pats) => {
+ let mut tys = Vec::with_capacity(pats.len());
+ // FIXME(#48994) - could just be collected into an Option<Vec>
+ for pat in pats {
+ tys.push(pat.to_ty()?);
+ }
+ TyKind::Tup(tys)
+ }
+ _ => return None,
+ };
+
+ Some(P(Ty { kind, id: self.id, span: self.span, tokens: None }))
+ }
+
+ /// Walk top-down and call `it` in each place where a pattern occurs
+ /// starting with the root pattern `walk` is called on. If `it` returns
+ /// false then we will descend no further but siblings will be processed.
+ pub fn walk(&self, it: &mut impl FnMut(&Pat) -> bool) {
+ if !it(self) {
+ return;
+ }
+
+ match &self.kind {
+ // Walk into the pattern associated with `Ident` (if any).
+ PatKind::Ident(_, _, Some(p)) => p.walk(it),
+
+ // Walk into each field of struct.
+ PatKind::Struct(_, _, fields, _) => fields.iter().for_each(|field| field.pat.walk(it)),
+
+ // Sequence of patterns.
+ PatKind::TupleStruct(_, _, s)
+ | PatKind::Tuple(s)
+ | PatKind::Slice(s)
+ | PatKind::Or(s) => s.iter().for_each(|p| p.walk(it)),
+
+ // Trivial wrappers over inner patterns.
+ PatKind::Box(s) | PatKind::Ref(s, _) | PatKind::Paren(s) => s.walk(it),
+
+ // These patterns do not contain subpatterns, skip.
+ PatKind::Wild
+ | PatKind::Rest
+ | PatKind::Lit(_)
+ | PatKind::Range(..)
+ | PatKind::Ident(..)
+ | PatKind::Path(..)
+ | PatKind::MacCall(_) => {}
+ }
+ }
+
+ /// Is this a `..` pattern?
+ pub fn is_rest(&self) -> bool {
+ matches!(self.kind, PatKind::Rest)
+ }
+}
+
+/// A single field in a struct pattern.
+///
+/// Patterns like the fields of `Foo { x, ref y, ref mut z }`
+/// are treated the same as `x: x, y: ref y, z: ref mut z`,
+/// except when `is_shorthand` is true.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct PatField {
+ /// The identifier for the field.
+ pub ident: Ident,
+ /// The pattern the field is destructured to.
+ pub pat: P<Pat>,
+ pub is_shorthand: bool,
+ pub attrs: AttrVec,
+ pub id: NodeId,
+ pub span: Span,
+ pub is_placeholder: bool,
+}
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
+pub enum BindingMode {
+ ByRef(Mutability),
+ ByValue(Mutability),
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum RangeEnd {
+ /// `..=` or `...`
+ Included(RangeSyntax),
+ /// `..`
+ Excluded,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum RangeSyntax {
+ /// `...`
+ DotDotDot,
+ /// `..=`
+ DotDotEq,
+}
+
+/// All the different flavors of pattern that Rust recognizes.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum PatKind {
+ /// Represents a wildcard pattern (`_`).
+ Wild,
+
+ /// A `PatKind::Ident` may either be a new bound variable (`ref mut binding @ OPT_SUBPATTERN`),
+ /// or a unit struct/variant pattern, or a const pattern (in the last two cases the third
+ /// field must be `None`). Disambiguation cannot be done with parser alone, so it happens
+ /// during name resolution.
+ Ident(BindingMode, Ident, Option<P<Pat>>),
+
+ /// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`).
+ /// The `bool` is `true` in the presence of a `..`.
+ Struct(Option<QSelf>, Path, Vec<PatField>, /* recovered */ bool),
+
+ /// A tuple struct/variant pattern (`Variant(x, y, .., z)`).
+ TupleStruct(Option<QSelf>, Path, Vec<P<Pat>>),
+
+ /// An or-pattern `A | B | C`.
+ /// Invariant: `pats.len() >= 2`.
+ Or(Vec<P<Pat>>),
+
+ /// A possibly qualified path pattern.
+ /// Unqualified path patterns `A::B::C` can legally refer to variants, structs, constants
+ /// or associated constants. Qualified path patterns `<A>::B::C`/`<A as Trait>::B::C` can
+ /// only legally refer to associated constants.
+ Path(Option<QSelf>, Path),
+
+ /// A tuple pattern (`(a, b)`).
+ Tuple(Vec<P<Pat>>),
+
+ /// A `box` pattern.
+ Box(P<Pat>),
+
+ /// A reference pattern (e.g., `&mut (a, b)`).
+ Ref(P<Pat>, Mutability),
+
+ /// A literal.
+ Lit(P<Expr>),
+
+ /// A range pattern (e.g., `1...2`, `1..2`, `1..`, `..2`, `1..=2`, `..=2`).
+ Range(Option<P<Expr>>, Option<P<Expr>>, Spanned<RangeEnd>),
+
+ /// A slice pattern `[a, b, c]`.
+ Slice(Vec<P<Pat>>),
+
+ /// A rest pattern `..`.
+ ///
+ /// Syntactically it is valid anywhere.
+ ///
+ /// Semantically however, it only has meaning immediately inside:
+ /// - a slice pattern: `[a, .., b]`,
+ /// - a binding pattern immediately inside a slice pattern: `[a, r @ ..]`,
+ /// - a tuple pattern: `(a, .., b)`,
+ /// - a tuple struct/variant pattern: `$path(a, .., b)`.
+ ///
+ /// In all of these cases, an additional restriction applies,
+ /// only one rest pattern may occur in the pattern sequences.
+ Rest,
+
+ /// Parentheses in patterns used for grouping (i.e., `(PAT)`).
+ Paren(P<Pat>),
+
+ /// A macro pattern; pre-expansion.
+ MacCall(MacCall),
+}
+
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Copy)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum Mutability {
+ Mut,
+ Not,
+}
+
+impl Mutability {
+ pub fn invert(self) -> Self {
+ match self {
+ Mutability::Mut => Mutability::Not,
+ Mutability::Not => Mutability::Mut,
+ }
+ }
+
+ pub fn prefix_str(&self) -> &'static str {
+ match self {
+ Mutability::Mut => "mut ",
+ Mutability::Not => "",
+ }
+ }
+}
+
+/// The kind of borrow in an `AddrOf` expression,
+/// e.g., `&place` or `&raw const place`.
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub enum BorrowKind {
+ /// A normal borrow, `&$expr` or `&mut $expr`.
+ /// The resulting type is either `&'a T` or `&'a mut T`
+ /// where `T = typeof($expr)` and `'a` is some lifetime.
+ Ref,
+ /// A raw borrow, `&raw const $expr` or `&raw mut $expr`.
+ /// The resulting type is either `*const T` or `*mut T`
+ /// where `T = typeof($expr)`.
+ Raw,
+}
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
+pub enum BinOpKind {
+ /// The `+` operator (addition)
+ Add,
+ /// The `-` operator (subtraction)
+ Sub,
+ /// The `*` operator (multiplication)
+ Mul,
+ /// The `/` operator (division)
+ Div,
+ /// The `%` operator (modulus)
+ Rem,
+ /// The `&&` operator (logical and)
+ And,
+ /// The `||` operator (logical or)
+ Or,
+ /// The `^` operator (bitwise xor)
+ BitXor,
+ /// The `&` operator (bitwise and)
+ BitAnd,
+ /// The `|` operator (bitwise or)
+ BitOr,
+ /// The `<<` operator (shift left)
+ Shl,
+ /// The `>>` operator (shift right)
+ Shr,
+ /// The `==` operator (equality)
+ Eq,
+ /// The `<` operator (less than)
+ Lt,
+ /// The `<=` operator (less than or equal to)
+ Le,
+ /// The `!=` operator (not equal to)
+ Ne,
+ /// The `>=` operator (greater than or equal to)
+ Ge,
+ /// The `>` operator (greater than)
+ Gt,
+}
+
+impl BinOpKind {
+ pub fn to_string(&self) -> &'static str {
+ use BinOpKind::*;
+ match *self {
+ Add => "+",
+ Sub => "-",
+ Mul => "*",
+ Div => "/",
+ Rem => "%",
+ And => "&&",
+ Or => "||",
+ BitXor => "^",
+ BitAnd => "&",
+ BitOr => "|",
+ Shl => "<<",
+ Shr => ">>",
+ Eq => "==",
+ Lt => "<",
+ Le => "<=",
+ Ne => "!=",
+ Ge => ">=",
+ Gt => ">",
+ }
+ }
+ pub fn lazy(&self) -> bool {
+ matches!(self, BinOpKind::And | BinOpKind::Or)
+ }
+
+ pub fn is_comparison(&self) -> bool {
+ use BinOpKind::*;
+ // Note for developers: please keep this as is;
+ // we want compilation to fail if another variant is added.
+ match *self {
+ Eq | Lt | Le | Ne | Gt | Ge => true,
+ And | Or | Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Shl | Shr => false,
+ }
+ }
+}
+
+pub type BinOp = Spanned<BinOpKind>;
+
+/// Unary operator.
+///
+/// Note that `&data` is not an operator, it's an `AddrOf` expression.
+#[derive(Clone, Encodable, Decodable, Debug, Copy)]
+pub enum UnOp {
+ /// The `*` operator for dereferencing
+ Deref,
+ /// The `!` operator for logical inversion
+ Not,
+ /// The `-` operator for negation
+ Neg,
+}
+
+impl UnOp {
+ pub fn to_string(op: UnOp) -> &'static str {
+ match op {
+ UnOp::Deref => "*",
+ UnOp::Not => "!",
+ UnOp::Neg => "-",
+ }
+ }
+}
+
+/// A statement
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Stmt {
+ pub id: NodeId,
+ pub kind: StmtKind,
+ pub span: Span,
+}
+
+impl Stmt {
+ pub fn has_trailing_semicolon(&self) -> bool {
+ match &self.kind {
+ StmtKind::Semi(_) => true,
+ StmtKind::MacCall(mac) => matches!(mac.style, MacStmtStyle::Semicolon),
+ _ => false,
+ }
+ }
+
+ /// Converts a parsed `Stmt` to a `Stmt` with
+ /// a trailing semicolon.
+ ///
+ /// This only modifies the parsed AST struct, not the attached
+ /// `LazyTokenStream`. The parser is responsible for calling
+ /// `CreateTokenStream::add_trailing_semi` when there is actually
+ /// a semicolon in the tokenstream.
+ pub fn add_trailing_semicolon(mut self) -> Self {
+ self.kind = match self.kind {
+ StmtKind::Expr(expr) => StmtKind::Semi(expr),
+ StmtKind::MacCall(mac) => {
+ StmtKind::MacCall(mac.map(|MacCallStmt { mac, style: _, attrs, tokens }| {
+ MacCallStmt { mac, style: MacStmtStyle::Semicolon, attrs, tokens }
+ }))
+ }
+ kind => kind,
+ };
+
+ self
+ }
+
+ pub fn is_item(&self) -> bool {
+ matches!(self.kind, StmtKind::Item(_))
+ }
+
+ pub fn is_expr(&self) -> bool {
+ matches!(self.kind, StmtKind::Expr(_))
+ }
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum StmtKind {
+ /// A local (let) binding.
+ Local(P<Local>),
+ /// An item definition.
+ Item(P<Item>),
+ /// Expr without trailing semi-colon.
+ Expr(P<Expr>),
+ /// Expr with a trailing semi-colon.
+ Semi(P<Expr>),
+ /// Just a trailing semi-colon.
+ Empty,
+ /// Macro.
+ MacCall(P<MacCallStmt>),
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct MacCallStmt {
+ pub mac: MacCall,
+ pub style: MacStmtStyle,
+ pub attrs: AttrVec,
+ pub tokens: Option<LazyTokenStream>,
+}
+
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
+pub enum MacStmtStyle {
+ /// The macro statement had a trailing semicolon (e.g., `foo! { ... };`
+ /// `foo!(...);`, `foo![...];`).
+ Semicolon,
+ /// The macro statement had braces (e.g., `foo! { ... }`).
+ Braces,
+ /// The macro statement had parentheses or brackets and no semicolon (e.g.,
+ /// `foo!(...)`). All of these will end up being converted into macro
+ /// expressions.
+ NoBraces,
+}
+
+/// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Local {
+ pub id: NodeId,
+ pub pat: P<Pat>,
+ pub ty: Option<P<Ty>>,
+ pub kind: LocalKind,
+ pub span: Span,
+ pub attrs: AttrVec,
+ pub tokens: Option<LazyTokenStream>,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum LocalKind {
+ /// Local declaration.
+ /// Example: `let x;`
+ Decl,
+ /// Local declaration with an initializer.
+ /// Example: `let x = y;`
+ Init(P<Expr>),
+ /// Local declaration with an initializer and an `else` clause.
+ /// Example: `let Some(x) = y else { return };`
+ InitElse(P<Expr>, P<Block>),
+}
+
+impl LocalKind {
+ pub fn init(&self) -> Option<&Expr> {
+ match self {
+ Self::Decl => None,
+ Self::Init(i) | Self::InitElse(i, _) => Some(i),
+ }
+ }
+
+ pub fn init_else_opt(&self) -> Option<(&Expr, Option<&Block>)> {
+ match self {
+ Self::Decl => None,
+ Self::Init(init) => Some((init, None)),
+ Self::InitElse(init, els) => Some((init, Some(els))),
+ }
+ }
+}
+
+/// An arm of a 'match'.
+///
+/// E.g., `0..=10 => { println!("match!") }` as in
+///
+/// ```
+/// match 123 {
+/// 0..=10 => { println!("match!") },
+/// _ => { println!("no match!") },
+/// }
+/// ```
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Arm {
+ pub attrs: AttrVec,
+ /// Match arm pattern, e.g. `10` in `match foo { 10 => {}, _ => {} }`
+ pub pat: P<Pat>,
+ /// Match arm guard, e.g. `n > 10` in `match foo { n if n > 10 => {}, _ => {} }`
+ pub guard: Option<P<Expr>>,
+ /// Match arm body.
+ pub body: P<Expr>,
+ pub span: Span,
+ pub id: NodeId,
+ pub is_placeholder: bool,
+}
+
+/// A single field in a struct expression, e.g. `x: value` and `y` in `Foo { x: value, y }`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct ExprField {
+ pub attrs: AttrVec,
+ pub id: NodeId,
+ pub span: Span,
+ pub ident: Ident,
+ pub expr: P<Expr>,
+ pub is_shorthand: bool,
+ pub is_placeholder: bool,
+}
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
+pub enum BlockCheckMode {
+ Default,
+ Unsafe(UnsafeSource),
+}
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
+pub enum UnsafeSource {
+ CompilerGenerated,
+ UserProvided,
+}
+
+/// A constant (expression) that's not an item or associated item,
+/// but needs its own `DefId` for type-checking, const-eval, etc.
+/// These are usually found nested inside types (e.g., array lengths)
+/// or expressions (e.g., repeat counts), and also used to define
+/// explicit discriminant values for enum variants.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct AnonConst {
+ pub id: NodeId,
+ pub value: P<Expr>,
+}
+
+/// An expression.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Expr {
+ pub id: NodeId,
+ pub kind: ExprKind,
+ pub span: Span,
+ pub attrs: AttrVec,
+ pub tokens: Option<LazyTokenStream>,
+}
+
+impl Expr {
+ /// Returns `true` if this expression would be valid somewhere that expects a value;
+ /// for example, an `if` condition.
+ pub fn returns(&self) -> bool {
+ if let ExprKind::Block(ref block, _) = self.kind {
+ match block.stmts.last().map(|last_stmt| &last_stmt.kind) {
+ // Implicit return
+ Some(StmtKind::Expr(_)) => true,
+ // Last statement is an explicit return?
+ Some(StmtKind::Semi(expr)) => matches!(expr.kind, ExprKind::Ret(_)),
+ // This is a block that doesn't end in either an implicit or explicit return.
+ _ => false,
+ }
+ } else {
+ // This is not a block, it is a value.
+ true
+ }
+ }
+
+ /// Is this expr either `N`, or `{ N }`.
+ ///
+ /// If this is not the case, name resolution does not resolve `N` when using
+ /// `min_const_generics` as more complex expressions are not supported.
+ pub fn is_potential_trivial_const_param(&self) -> bool {
+ let this = if let ExprKind::Block(ref block, None) = self.kind {
+ if block.stmts.len() == 1 {
+ if let StmtKind::Expr(ref expr) = block.stmts[0].kind { expr } else { self }
+ } else {
+ self
+ }
+ } else {
+ self
+ };
+
+ if let ExprKind::Path(None, ref path) = this.kind {
+ if path.segments.len() == 1 && path.segments[0].args.is_none() {
+ return true;
+ }
+ }
+
+ false
+ }
+
+ pub fn to_bound(&self) -> Option<GenericBound> {
+ match &self.kind {
+ ExprKind::Path(None, path) => Some(GenericBound::Trait(
+ PolyTraitRef::new(Vec::new(), path.clone(), self.span),
+ TraitBoundModifier::None,
+ )),
+ _ => None,
+ }
+ }
+
+ pub fn peel_parens(&self) -> &Expr {
+ let mut expr = self;
+ while let ExprKind::Paren(inner) = &expr.kind {
+ expr = &inner;
+ }
+ expr
+ }
+
+ /// Attempts to reparse as `Ty` (for diagnostic purposes).
+ pub fn to_ty(&self) -> Option<P<Ty>> {
+ let kind = match &self.kind {
+ // Trivial conversions.
+ ExprKind::Path(qself, path) => TyKind::Path(qself.clone(), path.clone()),
+ ExprKind::MacCall(mac) => TyKind::MacCall(mac.clone()),
+
+ ExprKind::Paren(expr) => expr.to_ty().map(TyKind::Paren)?,
+
+ ExprKind::AddrOf(BorrowKind::Ref, mutbl, expr) => {
+ expr.to_ty().map(|ty| TyKind::Rptr(None, MutTy { ty, mutbl: *mutbl }))?
+ }
+
+ ExprKind::Repeat(expr, expr_len) => {
+ expr.to_ty().map(|ty| TyKind::Array(ty, expr_len.clone()))?
+ }
+
+ ExprKind::Array(exprs) if exprs.len() == 1 => exprs[0].to_ty().map(TyKind::Slice)?,
+
+ ExprKind::Tup(exprs) => {
+ let tys = exprs.iter().map(|expr| expr.to_ty()).collect::<Option<Vec<_>>>()?;
+ TyKind::Tup(tys)
+ }
+
+ // If binary operator is `Add` and both `lhs` and `rhs` are trait bounds,
+ // then type of result is trait object.
+ // Otherwise we don't assume the result type.
+ ExprKind::Binary(binop, lhs, rhs) if binop.node == BinOpKind::Add => {
+ if let (Some(lhs), Some(rhs)) = (lhs.to_bound(), rhs.to_bound()) {
+ TyKind::TraitObject(vec![lhs, rhs], TraitObjectSyntax::None)
+ } else {
+ return None;
+ }
+ }
+
+ ExprKind::Underscore => TyKind::Infer,
+
+ // This expression doesn't look like a type syntactically.
+ _ => return None,
+ };
+
+ Some(P(Ty { kind, id: self.id, span: self.span, tokens: None }))
+ }
+
+ pub fn precedence(&self) -> ExprPrecedence {
+ match self.kind {
+ ExprKind::Box(_) => ExprPrecedence::Box,
+ ExprKind::Array(_) => ExprPrecedence::Array,
+ ExprKind::ConstBlock(_) => ExprPrecedence::ConstBlock,
+ ExprKind::Call(..) => ExprPrecedence::Call,
+ ExprKind::MethodCall(..) => ExprPrecedence::MethodCall,
+ ExprKind::Tup(_) => ExprPrecedence::Tup,
+ ExprKind::Binary(op, ..) => ExprPrecedence::Binary(op.node),
+ ExprKind::Unary(..) => ExprPrecedence::Unary,
+ ExprKind::Lit(_) => ExprPrecedence::Lit,
+ ExprKind::Type(..) | ExprKind::Cast(..) => ExprPrecedence::Cast,
+ ExprKind::Let(..) => ExprPrecedence::Let,
+ ExprKind::If(..) => ExprPrecedence::If,
+ ExprKind::While(..) => ExprPrecedence::While,
+ ExprKind::ForLoop(..) => ExprPrecedence::ForLoop,
+ ExprKind::Loop(..) => ExprPrecedence::Loop,
+ ExprKind::Match(..) => ExprPrecedence::Match,
+ ExprKind::Closure(..) => ExprPrecedence::Closure,
+ ExprKind::Block(..) => ExprPrecedence::Block,
+ ExprKind::TryBlock(..) => ExprPrecedence::TryBlock,
+ ExprKind::Async(..) => ExprPrecedence::Async,
+ ExprKind::Await(..) => ExprPrecedence::Await,
+ ExprKind::Assign(..) => ExprPrecedence::Assign,
+ ExprKind::AssignOp(..) => ExprPrecedence::AssignOp,
+ ExprKind::Field(..) => ExprPrecedence::Field,
+ ExprKind::Index(..) => ExprPrecedence::Index,
+ ExprKind::Range(..) => ExprPrecedence::Range,
+ ExprKind::Underscore => ExprPrecedence::Path,
+ ExprKind::Path(..) => ExprPrecedence::Path,
+ ExprKind::AddrOf(..) => ExprPrecedence::AddrOf,
+ ExprKind::Break(..) => ExprPrecedence::Break,
+ ExprKind::Continue(..) => ExprPrecedence::Continue,
+ ExprKind::Ret(..) => ExprPrecedence::Ret,
+ ExprKind::InlineAsm(..) => ExprPrecedence::InlineAsm,
+ ExprKind::MacCall(..) => ExprPrecedence::Mac,
+ ExprKind::Struct(..) => ExprPrecedence::Struct,
+ ExprKind::Repeat(..) => ExprPrecedence::Repeat,
+ ExprKind::Paren(..) => ExprPrecedence::Paren,
+ ExprKind::Try(..) => ExprPrecedence::Try,
+ ExprKind::Yield(..) => ExprPrecedence::Yield,
+ ExprKind::Yeet(..) => ExprPrecedence::Yeet,
+ ExprKind::Err => ExprPrecedence::Err,
+ }
+ }
+
+ pub fn take(&mut self) -> Self {
+ mem::replace(
+ self,
+ Expr {
+ id: DUMMY_NODE_ID,
+ kind: ExprKind::Err,
+ span: DUMMY_SP,
+ attrs: ThinVec::new(),
+ tokens: None,
+ },
+ )
+ }
+
+ // To a first-order approximation, is this a pattern
+ pub fn is_approximately_pattern(&self) -> bool {
+ match &self.peel_parens().kind {
+ ExprKind::Box(_)
+ | ExprKind::Array(_)
+ | ExprKind::Call(_, _)
+ | ExprKind::Tup(_)
+ | ExprKind::Lit(_)
+ | ExprKind::Range(_, _, _)
+ | ExprKind::Underscore
+ | ExprKind::Path(_, _)
+ | ExprKind::Struct(_) => true,
+ _ => false,
+ }
+ }
+}
+
+/// Limit types of a range (inclusive or exclusive)
+#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug)]
+pub enum RangeLimits {
+ /// Inclusive at the beginning, exclusive at the end
+ HalfOpen,
+ /// Inclusive at the beginning and end
+ Closed,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum StructRest {
+ /// `..x`.
+ Base(P<Expr>),
+ /// `..`.
+ Rest(Span),
+ /// No trailing `..` or expression.
+ None,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct StructExpr {
+ pub qself: Option<QSelf>,
+ pub path: Path,
+ pub fields: Vec<ExprField>,
+ pub rest: StructRest,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum ExprKind {
+ /// A `box x` expression.
+ Box(P<Expr>),
+ /// An array (`[a, b, c, d]`)
+ Array(Vec<P<Expr>>),
+ /// Allow anonymous constants from an inline `const` block
+ ConstBlock(AnonConst),
+ /// A function call
+ ///
+ /// The first field resolves to the function itself,
+ /// and the second field is the list of arguments.
+ /// This also represents calling the constructor of
+ /// tuple-like ADTs such as tuple structs and enum variants.
+ Call(P<Expr>, Vec<P<Expr>>),
+ /// A method call (`x.foo::<'static, Bar, Baz>(a, b, c, d)`)
+ ///
+ /// The `PathSegment` represents the method name and its generic arguments
+ /// (within the angle brackets).
+ /// The first element of the vector of an `Expr` is the expression that evaluates
+ /// to the object on which the method is being called on (the receiver),
+ /// and the remaining elements are the rest of the arguments.
+ /// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as
+ /// `ExprKind::MethodCall(PathSegment { foo, [Bar, Baz] }, [x, a, b, c, d])`.
+ /// This `Span` is the span of the function, without the dot and receiver
+ /// (e.g. `foo(a, b)` in `x.foo(a, b)`
+ MethodCall(PathSegment, Vec<P<Expr>>, Span),
+ /// A tuple (e.g., `(a, b, c, d)`).
+ Tup(Vec<P<Expr>>),
+ /// A binary operation (e.g., `a + b`, `a * b`).
+ Binary(BinOp, P<Expr>, P<Expr>),
+ /// A unary operation (e.g., `!x`, `*x`).
+ Unary(UnOp, P<Expr>),
+ /// A literal (e.g., `1`, `"foo"`).
+ Lit(Lit),
+ /// A cast (e.g., `foo as f64`).
+ Cast(P<Expr>, P<Ty>),
+ /// A type ascription (e.g., `42: usize`).
+ Type(P<Expr>, P<Ty>),
+ /// A `let pat = expr` expression that is only semantically allowed in the condition
+ /// of `if` / `while` expressions. (e.g., `if let 0 = x { .. }`).
+ ///
+ /// `Span` represents the whole `let pat = expr` statement.
+ Let(P<Pat>, P<Expr>, Span),
+ /// An `if` block, with an optional `else` block.
+ ///
+ /// `if expr { block } else { expr }`
+ If(P<Expr>, P<Block>, Option<P<Expr>>),
+ /// A while loop, with an optional label.
+ ///
+ /// `'label: while expr { block }`
+ While(P<Expr>, P<Block>, Option<Label>),
+ /// A `for` loop, with an optional label.
+ ///
+ /// `'label: for pat in expr { block }`
+ ///
+ /// This is desugared to a combination of `loop` and `match` expressions.
+ ForLoop(P<Pat>, P<Expr>, P<Block>, Option<Label>),
+ /// Conditionless loop (can be exited with `break`, `continue`, or `return`).
+ ///
+ /// `'label: loop { block }`
+ Loop(P<Block>, Option<Label>),
+ /// A `match` block.
+ Match(P<Expr>, Vec<Arm>),
+ /// A closure (e.g., `move |a, b, c| a + b + c`).
+ ///
+ /// The final span is the span of the argument block `|...|`.
+ Closure(ClosureBinder, CaptureBy, Async, Movability, P<FnDecl>, P<Expr>, Span),
+ /// A block (`'label: { ... }`).
+ Block(P<Block>, Option<Label>),
+ /// An async block (`async move { ... }`).
+ ///
+ /// The `NodeId` is the `NodeId` for the closure that results from
+ /// desugaring an async block, just like the NodeId field in the
+ /// `Async::Yes` variant. This is necessary in order to create a def for the
+ /// closure which can be used as a parent of any child defs. Defs
+ /// created during lowering cannot be made the parent of any other
+ /// preexisting defs.
+ Async(CaptureBy, NodeId, P<Block>),
+ /// An await expression (`my_future.await`).
+ Await(P<Expr>),
+
+ /// A try block (`try { ... }`).
+ TryBlock(P<Block>),
+
+ /// An assignment (`a = foo()`).
+ /// The `Span` argument is the span of the `=` token.
+ Assign(P<Expr>, P<Expr>, Span),
+ /// An assignment with an operator.
+ ///
+ /// E.g., `a += 1`.
+ AssignOp(BinOp, P<Expr>, P<Expr>),
+ /// Access of a named (e.g., `obj.foo`) or unnamed (e.g., `obj.0`) struct field.
+ Field(P<Expr>, Ident),
+ /// An indexing operation (e.g., `foo[2]`).
+ Index(P<Expr>, P<Expr>),
+ /// A range (e.g., `1..2`, `1..`, `..2`, `1..=2`, `..=2`; and `..` in destructuring assignment).
+ Range(Option<P<Expr>>, Option<P<Expr>>, RangeLimits),
+ /// An underscore, used in destructuring assignment to ignore a value.
+ Underscore,
+
+ /// Variable reference, possibly containing `::` and/or type
+ /// parameters (e.g., `foo::bar::<baz>`).
+ ///
+ /// Optionally "qualified" (e.g., `<Vec<T> as SomeTrait>::SomeType`).
+ Path(Option<QSelf>, Path),
+
+ /// A referencing operation (`&a`, `&mut a`, `&raw const a` or `&raw mut a`).
+ AddrOf(BorrowKind, Mutability, P<Expr>),
+ /// A `break`, with an optional label to break, and an optional expression.
+ Break(Option<Label>, Option<P<Expr>>),
+ /// A `continue`, with an optional label.
+ Continue(Option<Label>),
+ /// A `return`, with an optional value to be returned.
+ Ret(Option<P<Expr>>),
+
+ /// Output of the `asm!()` macro.
+ InlineAsm(P<InlineAsm>),
+
+ /// A macro invocation; pre-expansion.
+ MacCall(MacCall),
+
+ /// A struct literal expression.
+ ///
+ /// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. rest}`.
+ Struct(P<StructExpr>),
+
+ /// An array literal constructed from one repeated element.
+ ///
+ /// E.g., `[1; 5]`. The expression is the element to be
+ /// repeated; the constant is the number of times to repeat it.
+ Repeat(P<Expr>, AnonConst),
+
+ /// No-op: used solely so we can pretty-print faithfully.
+ Paren(P<Expr>),
+
+ /// A try expression (`expr?`).
+ Try(P<Expr>),
+
+ /// A `yield`, with an optional value to be yielded.
+ Yield(Option<P<Expr>>),
+
+ /// A `do yeet` (aka `throw`/`fail`/`bail`/`raise`/whatever),
+ /// with an optional value to be returned.
+ Yeet(Option<P<Expr>>),
+
+ /// Placeholder for an expression that wasn't syntactically well formed in some way.
+ Err,
+}
+
+/// The explicit `Self` type in a "qualified path". The actual
+/// path, including the trait and the associated item, is stored
+/// separately. `position` represents the index of the associated
+/// item qualified with this `Self` type.
+///
+/// ```ignore (only-for-syntax-highlight)
+/// <Vec<T> as a::b::Trait>::AssociatedItem
+/// ^~~~~ ~~~~~~~~~~~~~~^
+/// ty position = 3
+///
+/// <Vec<T>>::AssociatedItem
+/// ^~~~~ ^
+/// ty position = 0
+/// ```
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct QSelf {
+ pub ty: P<Ty>,
+
+ /// The span of `a::b::Trait` in a path like `<Vec<T> as
+ /// a::b::Trait>::AssociatedItem`; in the case where `position ==
+ /// 0`, this is an empty span.
+ pub path_span: Span,
+ pub position: usize,
+}
+
+/// A capture clause used in closures and `async` blocks.
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum CaptureBy {
+ /// `move |x| y + x`.
+ Value,
+ /// `move` keyword was not specified.
+ Ref,
+}
+
+/// The movability of a generator / closure literal:
+/// whether a generator contains self-references, causing it to be `!Unpin`.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable, Debug, Copy)]
+#[derive(HashStable_Generic)]
+pub enum Movability {
+ /// May contain self-references, `!Unpin`.
+ Static,
+ /// Must not contain self-references, `Unpin`.
+ Movable,
+}
+
+/// Closure lifetime binder, `for<'a, 'b>` in `for<'a, 'b> |_: &'a (), _: &'b ()|`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum ClosureBinder {
+ /// The binder is not present, all closure lifetimes are inferred.
+ NotPresent,
+ /// The binder is present.
+ For {
+ /// Span of the whole `for<>` clause
+ ///
+ /// ```text
+ /// for<'a, 'b> |_: &'a (), _: &'b ()| { ... }
+ /// ^^^^^^^^^^^ -- this
+ /// ```
+ span: Span,
+
+ /// Lifetimes in the `for<>` closure
+ ///
+ /// ```text
+ /// for<'a, 'b> |_: &'a (), _: &'b ()| { ... }
+ /// ^^^^^^ -- this
+ /// ```
+ generic_params: P<[GenericParam]>,
+ },
+}
+
+/// Represents a macro invocation. The `path` indicates which macro
+/// is being invoked, and the `args` are arguments passed to it.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct MacCall {
+ pub path: Path,
+ pub args: P<MacArgs>,
+ pub prior_type_ascription: Option<(Span, bool)>,
+}
+
+impl MacCall {
+ pub fn span(&self) -> Span {
+ self.path.span.to(self.args.span().unwrap_or(self.path.span))
+ }
+}
+
+/// Arguments passed to an attribute or a function-like macro.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum MacArgs {
+ /// No arguments - `#[attr]`.
+ Empty,
+ /// Delimited arguments - `#[attr()/[]/{}]` or `mac!()/[]/{}`.
+ Delimited(DelimSpan, MacDelimiter, TokenStream),
+ /// Arguments of a key-value attribute - `#[attr = "value"]`.
+ Eq(
+ /// Span of the `=` token.
+ Span,
+ /// The "value".
+ MacArgsEq,
+ ),
+}
+
+// The RHS of a `MacArgs::Eq` starts out as an expression. Once macro expansion
+// is completed, all cases end up either as a literal, which is the form used
+// after lowering to HIR, or as an error.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum MacArgsEq {
+ Ast(P<Expr>),
+ Hir(Lit),
+}
+
+impl MacArgs {
+ pub fn delim(&self) -> Option<Delimiter> {
+ match self {
+ MacArgs::Delimited(_, delim, _) => Some(delim.to_token()),
+ MacArgs::Empty | MacArgs::Eq(..) => None,
+ }
+ }
+
+ pub fn span(&self) -> Option<Span> {
+ match self {
+ MacArgs::Empty => None,
+ MacArgs::Delimited(dspan, ..) => Some(dspan.entire()),
+ MacArgs::Eq(eq_span, MacArgsEq::Ast(expr)) => Some(eq_span.to(expr.span)),
+ MacArgs::Eq(_, MacArgsEq::Hir(lit)) => {
+ unreachable!("in literal form when getting span: {:?}", lit);
+ }
+ }
+ }
+
+ /// Tokens inside the delimiters or after `=`.
+ /// Proc macros see these tokens, for example.
+ pub fn inner_tokens(&self) -> TokenStream {
+ match self {
+ MacArgs::Empty => TokenStream::default(),
+ MacArgs::Delimited(.., tokens) => tokens.clone(),
+ MacArgs::Eq(_, MacArgsEq::Ast(expr)) => TokenStream::from_ast(expr),
+ MacArgs::Eq(_, MacArgsEq::Hir(lit)) => {
+ unreachable!("in literal form when getting inner tokens: {:?}", lit)
+ }
+ }
+ }
+
+ /// Whether a macro with these arguments needs a semicolon
+ /// when used as a standalone item or statement.
+ pub fn need_semicolon(&self) -> bool {
+ !matches!(self, MacArgs::Delimited(_, MacDelimiter::Brace, _))
+ }
+}
+
+impl<CTX> HashStable<CTX> for MacArgs
+where
+ CTX: crate::HashStableContext,
+{
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ mem::discriminant(self).hash_stable(ctx, hasher);
+ match self {
+ MacArgs::Empty => {}
+ MacArgs::Delimited(dspan, delim, tokens) => {
+ dspan.hash_stable(ctx, hasher);
+ delim.hash_stable(ctx, hasher);
+ tokens.hash_stable(ctx, hasher);
+ }
+ MacArgs::Eq(_eq_span, MacArgsEq::Ast(expr)) => {
+ unreachable!("hash_stable {:?}", expr);
+ }
+ MacArgs::Eq(eq_span, MacArgsEq::Hir(lit)) => {
+ eq_span.hash_stable(ctx, hasher);
+ lit.hash_stable(ctx, hasher);
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum MacDelimiter {
+ Parenthesis,
+ Bracket,
+ Brace,
+}
+
+impl MacDelimiter {
+ pub fn to_token(self) -> Delimiter {
+ match self {
+ MacDelimiter::Parenthesis => Delimiter::Parenthesis,
+ MacDelimiter::Bracket => Delimiter::Bracket,
+ MacDelimiter::Brace => Delimiter::Brace,
+ }
+ }
+
+ pub fn from_token(delim: Delimiter) -> Option<MacDelimiter> {
+ match delim {
+ Delimiter::Parenthesis => Some(MacDelimiter::Parenthesis),
+ Delimiter::Bracket => Some(MacDelimiter::Bracket),
+ Delimiter::Brace => Some(MacDelimiter::Brace),
+ Delimiter::Invisible => None,
+ }
+ }
+}
+
+/// Represents a macro definition.
+#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct MacroDef {
+ pub body: P<MacArgs>,
+ /// `true` if macro was defined with `macro_rules`.
+ pub macro_rules: bool,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug, Copy, Hash, Eq, PartialEq)]
+#[derive(HashStable_Generic)]
+pub enum StrStyle {
+ /// A regular string, like `"foo"`.
+ Cooked,
+ /// A raw string, like `r##"foo"##`.
+ ///
+ /// The value is the number of `#` symbols used.
+ Raw(u8),
+}
+
+/// An AST literal.
+#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct Lit {
+ /// The original literal token as written in source code.
+ pub token: token::Lit,
+ /// The "semantic" representation of the literal lowered from the original tokens.
+ /// Strings are unescaped, hexadecimal forms are eliminated, etc.
+ /// FIXME: Remove this and only create the semantic representation during lowering to HIR.
+ pub kind: LitKind,
+ pub span: Span,
+}
+
+/// Same as `Lit`, but restricted to string literals.
+#[derive(Clone, Copy, Encodable, Decodable, Debug)]
+pub struct StrLit {
+ /// The original literal token as written in source code.
+ pub style: StrStyle,
+ pub symbol: Symbol,
+ pub suffix: Option<Symbol>,
+ pub span: Span,
+ /// The unescaped "semantic" representation of the literal lowered from the original token.
+ /// FIXME: Remove this and only create the semantic representation during lowering to HIR.
+ pub symbol_unescaped: Symbol,
+}
+
+impl StrLit {
+ pub fn as_lit(&self) -> Lit {
+ let token_kind = match self.style {
+ StrStyle::Cooked => token::Str,
+ StrStyle::Raw(n) => token::StrRaw(n),
+ };
+ Lit {
+ token: token::Lit::new(token_kind, self.symbol, self.suffix),
+ span: self.span,
+ kind: LitKind::Str(self.symbol_unescaped, self.style),
+ }
+ }
+}
+
+/// Type of the integer literal based on provided suffix.
+#[derive(Clone, Copy, Encodable, Decodable, Debug, Hash, Eq, PartialEq)]
+#[derive(HashStable_Generic)]
+pub enum LitIntType {
+ /// e.g. `42_i32`.
+ Signed(IntTy),
+ /// e.g. `42_u32`.
+ Unsigned(UintTy),
+ /// e.g. `42`.
+ Unsuffixed,
+}
+
+/// Type of the float literal based on provided suffix.
+#[derive(Clone, Copy, Encodable, Decodable, Debug, Hash, Eq, PartialEq)]
+#[derive(HashStable_Generic)]
+pub enum LitFloatType {
+ /// A float literal with a suffix (`1f32` or `1E10f32`).
+ Suffixed(FloatTy),
+ /// A float literal without a suffix (`1.0 or 1.0E10`).
+ Unsuffixed,
+}
+
+/// Literal kind.
+///
+/// E.g., `"foo"`, `42`, `12.34`, or `bool`.
+#[derive(Clone, Encodable, Decodable, Debug, Hash, Eq, PartialEq, HashStable_Generic)]
+pub enum LitKind {
+ /// A string literal (`"foo"`).
+ Str(Symbol, StrStyle),
+ /// A byte string (`b"foo"`).
+ ByteStr(Lrc<[u8]>),
+ /// A byte char (`b'f'`).
+ Byte(u8),
+ /// A character literal (`'a'`).
+ Char(char),
+ /// An integer literal (`1`).
+ Int(u128, LitIntType),
+ /// A float literal (`1f64` or `1E10f64`).
+ Float(Symbol, LitFloatType),
+ /// A boolean literal.
+ Bool(bool),
+ /// Placeholder for a literal that wasn't well-formed in some way.
+ Err(Symbol),
+}
+
+impl LitKind {
+ /// Returns `true` if this literal is a string.
+ pub fn is_str(&self) -> bool {
+ matches!(self, LitKind::Str(..))
+ }
+
+ /// Returns `true` if this literal is byte literal string.
+ pub fn is_bytestr(&self) -> bool {
+ matches!(self, LitKind::ByteStr(_))
+ }
+
+ /// Returns `true` if this is a numeric literal.
+ pub fn is_numeric(&self) -> bool {
+ matches!(self, LitKind::Int(..) | LitKind::Float(..))
+ }
+
+ /// Returns `true` if this literal has no suffix.
+ /// Note: this will return true for literals with prefixes such as raw strings and byte strings.
+ pub fn is_unsuffixed(&self) -> bool {
+ !self.is_suffixed()
+ }
+
+ /// Returns `true` if this literal has a suffix.
+ pub fn is_suffixed(&self) -> bool {
+ match *self {
+ // suffixed variants
+ LitKind::Int(_, LitIntType::Signed(..) | LitIntType::Unsigned(..))
+ | LitKind::Float(_, LitFloatType::Suffixed(..)) => true,
+ // unsuffixed variants
+ LitKind::Str(..)
+ | LitKind::ByteStr(..)
+ | LitKind::Byte(..)
+ | LitKind::Char(..)
+ | LitKind::Int(_, LitIntType::Unsuffixed)
+ | LitKind::Float(_, LitFloatType::Unsuffixed)
+ | LitKind::Bool(..)
+ | LitKind::Err(..) => false,
+ }
+ }
+}
+
+// N.B., If you change this, you'll probably want to change the corresponding
+// type structure in `middle/ty.rs` as well.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct MutTy {
+ pub ty: P<Ty>,
+ pub mutbl: Mutability,
+}
+
+/// Represents a function's signature in a trait declaration,
+/// trait implementation, or free function.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct FnSig {
+ pub header: FnHeader,
+ pub decl: P<FnDecl>,
+ pub span: Span,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub enum FloatTy {
+ F32,
+ F64,
+}
+
+impl FloatTy {
+ pub fn name_str(self) -> &'static str {
+ match self {
+ FloatTy::F32 => "f32",
+ FloatTy::F64 => "f64",
+ }
+ }
+
+ pub fn name(self) -> Symbol {
+ match self {
+ FloatTy::F32 => sym::f32,
+ FloatTy::F64 => sym::f64,
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub enum IntTy {
+ Isize,
+ I8,
+ I16,
+ I32,
+ I64,
+ I128,
+}
+
+impl IntTy {
+ pub fn name_str(&self) -> &'static str {
+ match *self {
+ IntTy::Isize => "isize",
+ IntTy::I8 => "i8",
+ IntTy::I16 => "i16",
+ IntTy::I32 => "i32",
+ IntTy::I64 => "i64",
+ IntTy::I128 => "i128",
+ }
+ }
+
+ pub fn name(&self) -> Symbol {
+ match *self {
+ IntTy::Isize => sym::isize,
+ IntTy::I8 => sym::i8,
+ IntTy::I16 => sym::i16,
+ IntTy::I32 => sym::i32,
+ IntTy::I64 => sym::i64,
+ IntTy::I128 => sym::i128,
+ }
+ }
+}
+
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Debug)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub enum UintTy {
+ Usize,
+ U8,
+ U16,
+ U32,
+ U64,
+ U128,
+}
+
+impl UintTy {
+ pub fn name_str(&self) -> &'static str {
+ match *self {
+ UintTy::Usize => "usize",
+ UintTy::U8 => "u8",
+ UintTy::U16 => "u16",
+ UintTy::U32 => "u32",
+ UintTy::U64 => "u64",
+ UintTy::U128 => "u128",
+ }
+ }
+
+ pub fn name(&self) -> Symbol {
+ match *self {
+ UintTy::Usize => sym::usize,
+ UintTy::U8 => sym::u8,
+ UintTy::U16 => sym::u16,
+ UintTy::U32 => sym::u32,
+ UintTy::U64 => sym::u64,
+ UintTy::U128 => sym::u128,
+ }
+ }
+}
+
+/// A constraint on an associated type (e.g., `A = Bar` in `Foo<A = Bar>` or
+/// `A: TraitA + TraitB` in `Foo<A: TraitA + TraitB>`).
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct AssocConstraint {
+ pub id: NodeId,
+ pub ident: Ident,
+ pub gen_args: Option<GenericArgs>,
+ pub kind: AssocConstraintKind,
+ pub span: Span,
+}
+
+/// The kinds of an `AssocConstraint`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum Term {
+ Ty(P<Ty>),
+ Const(AnonConst),
+}
+
+impl From<P<Ty>> for Term {
+ fn from(v: P<Ty>) -> Self {
+ Term::Ty(v)
+ }
+}
+
+impl From<AnonConst> for Term {
+ fn from(v: AnonConst) -> Self {
+ Term::Const(v)
+ }
+}
+
+/// The kinds of an `AssocConstraint`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum AssocConstraintKind {
+ /// E.g., `A = Bar`, `A = 3` in `Foo<A = Bar>` where A is an associated type.
+ Equality { term: Term },
+ /// E.g. `A: TraitA + TraitB` in `Foo<A: TraitA + TraitB>`.
+ Bound { bounds: GenericBounds },
+}
+
+#[derive(Encodable, Decodable, Debug)]
+pub struct Ty {
+ pub id: NodeId,
+ pub kind: TyKind,
+ pub span: Span,
+ pub tokens: Option<LazyTokenStream>,
+}
+
+impl Clone for Ty {
+ fn clone(&self) -> Self {
+ ensure_sufficient_stack(|| Self {
+ id: self.id,
+ kind: self.kind.clone(),
+ span: self.span,
+ tokens: self.tokens.clone(),
+ })
+ }
+}
+
+impl Ty {
+ pub fn peel_refs(&self) -> &Self {
+ let mut final_ty = self;
+ while let TyKind::Rptr(_, MutTy { ty, .. }) = &final_ty.kind {
+ final_ty = &ty;
+ }
+ final_ty
+ }
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct BareFnTy {
+ pub unsafety: Unsafe,
+ pub ext: Extern,
+ pub generic_params: Vec<GenericParam>,
+ pub decl: P<FnDecl>,
+ /// Span of the `fn(...) -> ...` part.
+ pub decl_span: Span,
+}
+
+/// The various kinds of type recognized by the compiler.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum TyKind {
+ /// A variable-length slice (`[T]`).
+ Slice(P<Ty>),
+ /// A fixed length array (`[T; n]`).
+ Array(P<Ty>, AnonConst),
+ /// A raw pointer (`*const T` or `*mut T`).
+ Ptr(MutTy),
+ /// A reference (`&'a T` or `&'a mut T`).
+ Rptr(Option<Lifetime>, MutTy),
+ /// A bare function (e.g., `fn(usize) -> bool`).
+ BareFn(P<BareFnTy>),
+ /// The never type (`!`).
+ Never,
+ /// A tuple (`(A, B, C, D,...)`).
+ Tup(Vec<P<Ty>>),
+ /// A path (`module::module::...::Type`), optionally
+ /// "qualified", e.g., `<Vec<T> as SomeTrait>::SomeType`.
+ ///
+ /// Type parameters are stored in the `Path` itself.
+ Path(Option<QSelf>, Path),
+ /// A trait object type `Bound1 + Bound2 + Bound3`
+ /// where `Bound` is a trait or a lifetime.
+ TraitObject(GenericBounds, TraitObjectSyntax),
+ /// An `impl Bound1 + Bound2 + Bound3` type
+ /// where `Bound` is a trait or a lifetime.
+ ///
+ /// The `NodeId` exists to prevent lowering from having to
+ /// generate `NodeId`s on the fly, which would complicate
+ /// the generation of opaque `type Foo = impl Trait` items significantly.
+ ImplTrait(NodeId, GenericBounds),
+ /// No-op; kept solely so that we can pretty-print faithfully.
+ Paren(P<Ty>),
+ /// Unused for now.
+ Typeof(AnonConst),
+ /// This means the type should be inferred instead of it having been
+ /// specified. This can appear anywhere in a type.
+ Infer,
+ /// Inferred type of a `self` or `&self` argument in a method.
+ ImplicitSelf,
+ /// A macro in the type position.
+ MacCall(MacCall),
+ /// Placeholder for a kind that has failed to be defined.
+ Err,
+ /// Placeholder for a `va_list`.
+ CVarArgs,
+}
+
+impl TyKind {
+ pub fn is_implicit_self(&self) -> bool {
+ matches!(self, TyKind::ImplicitSelf)
+ }
+
+ pub fn is_unit(&self) -> bool {
+ matches!(self, TyKind::Tup(tys) if tys.is_empty())
+ }
+
+ pub fn is_simple_path(&self) -> Option<Symbol> {
+ if let TyKind::Path(None, Path { segments, .. }) = &self && segments.len() == 1 {
+ Some(segments[0].ident.name)
+ } else {
+ None
+ }
+ }
+}
+
+/// Syntax used to declare a trait object.
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum TraitObjectSyntax {
+ Dyn,
+ None,
+}
+
+/// Inline assembly operand explicit register or register class.
+///
+/// E.g., `"eax"` as in `asm!("mov eax, 2", out("eax") result)`.
+#[derive(Clone, Copy, Encodable, Decodable, Debug)]
+pub enum InlineAsmRegOrRegClass {
+ Reg(Symbol),
+ RegClass(Symbol),
+}
+
+bitflags::bitflags! {
+ #[derive(Encodable, Decodable, HashStable_Generic)]
+ pub struct InlineAsmOptions: u16 {
+ const PURE = 1 << 0;
+ const NOMEM = 1 << 1;
+ const READONLY = 1 << 2;
+ const PRESERVES_FLAGS = 1 << 3;
+ const NORETURN = 1 << 4;
+ const NOSTACK = 1 << 5;
+ const ATT_SYNTAX = 1 << 6;
+ const RAW = 1 << 7;
+ const MAY_UNWIND = 1 << 8;
+ }
+}
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Hash, HashStable_Generic)]
+pub enum InlineAsmTemplatePiece {
+ String(String),
+ Placeholder { operand_idx: usize, modifier: Option<char>, span: Span },
+}
+
+impl fmt::Display for InlineAsmTemplatePiece {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::String(s) => {
+ for c in s.chars() {
+ match c {
+ '{' => f.write_str("{{")?,
+ '}' => f.write_str("}}")?,
+ _ => c.fmt(f)?,
+ }
+ }
+ Ok(())
+ }
+ Self::Placeholder { operand_idx, modifier: Some(modifier), .. } => {
+ write!(f, "{{{}:{}}}", operand_idx, modifier)
+ }
+ Self::Placeholder { operand_idx, modifier: None, .. } => {
+ write!(f, "{{{}}}", operand_idx)
+ }
+ }
+ }
+}
+
+impl InlineAsmTemplatePiece {
+ /// Rebuilds the asm template string from its pieces.
+ pub fn to_string(s: &[Self]) -> String {
+ use fmt::Write;
+ let mut out = String::new();
+ for p in s.iter() {
+ let _ = write!(out, "{}", p);
+ }
+ out
+ }
+}
+
+/// Inline assembly symbol operands get their own AST node that is somewhat
+/// similar to `AnonConst`.
+///
+/// The main difference is that we specifically don't assign it `DefId` in
+/// `DefCollector`. Instead this is deferred until AST lowering where we
+/// lower it to an `AnonConst` (for functions) or a `Path` (for statics)
+/// depending on what the path resolves to.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct InlineAsmSym {
+ pub id: NodeId,
+ pub qself: Option<QSelf>,
+ pub path: Path,
+}
+
+/// Inline assembly operand.
+///
+/// E.g., `out("eax") result` as in `asm!("mov eax, 2", out("eax") result)`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum InlineAsmOperand {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ expr: P<Expr>,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ expr: Option<P<Expr>>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ expr: P<Expr>,
+ },
+ SplitInOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_expr: P<Expr>,
+ out_expr: Option<P<Expr>>,
+ },
+ Const {
+ anon_const: AnonConst,
+ },
+ Sym {
+ sym: InlineAsmSym,
+ },
+}
+
+/// Inline assembly.
+///
+/// E.g., `asm!("NOP");`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct InlineAsm {
+ pub template: Vec<InlineAsmTemplatePiece>,
+ pub template_strs: Box<[(Symbol, Option<Symbol>, Span)]>,
+ pub operands: Vec<(InlineAsmOperand, Span)>,
+ pub clobber_abis: Vec<(Symbol, Span)>,
+ pub options: InlineAsmOptions,
+ pub line_spans: Vec<Span>,
+}
+
+/// A parameter in a function header.
+///
+/// E.g., `bar: usize` as in `fn foo(bar: usize)`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Param {
+ pub attrs: AttrVec,
+ pub ty: P<Ty>,
+ pub pat: P<Pat>,
+ pub id: NodeId,
+ pub span: Span,
+ pub is_placeholder: bool,
+}
+
+/// Alternative representation for `Arg`s describing `self` parameter of methods.
+///
+/// E.g., `&mut self` as in `fn foo(&mut self)`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum SelfKind {
+ /// `self`, `mut self`
+ Value(Mutability),
+ /// `&'lt self`, `&'lt mut self`
+ Region(Option<Lifetime>, Mutability),
+ /// `self: TYPE`, `mut self: TYPE`
+ Explicit(P<Ty>, Mutability),
+}
+
+pub type ExplicitSelf = Spanned<SelfKind>;
+
+impl Param {
+ /// Attempts to cast parameter to `ExplicitSelf`.
+ pub fn to_self(&self) -> Option<ExplicitSelf> {
+ if let PatKind::Ident(BindingMode::ByValue(mutbl), ident, _) = self.pat.kind {
+ if ident.name == kw::SelfLower {
+ return match self.ty.kind {
+ TyKind::ImplicitSelf => Some(respan(self.pat.span, SelfKind::Value(mutbl))),
+ TyKind::Rptr(lt, MutTy { ref ty, mutbl }) if ty.kind.is_implicit_self() => {
+ Some(respan(self.pat.span, SelfKind::Region(lt, mutbl)))
+ }
+ _ => Some(respan(
+ self.pat.span.to(self.ty.span),
+ SelfKind::Explicit(self.ty.clone(), mutbl),
+ )),
+ };
+ }
+ }
+ None
+ }
+
+ /// Returns `true` if parameter is `self`.
+ pub fn is_self(&self) -> bool {
+ if let PatKind::Ident(_, ident, _) = self.pat.kind {
+ ident.name == kw::SelfLower
+ } else {
+ false
+ }
+ }
+
+ /// Builds a `Param` object from `ExplicitSelf`.
+ pub fn from_self(attrs: AttrVec, eself: ExplicitSelf, eself_ident: Ident) -> Param {
+ let span = eself.span.to(eself_ident.span);
+ let infer_ty = P(Ty { id: DUMMY_NODE_ID, kind: TyKind::ImplicitSelf, span, tokens: None });
+ let param = |mutbl, ty| Param {
+ attrs,
+ pat: P(Pat {
+ id: DUMMY_NODE_ID,
+ kind: PatKind::Ident(BindingMode::ByValue(mutbl), eself_ident, None),
+ span,
+ tokens: None,
+ }),
+ span,
+ ty,
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ };
+ match eself.node {
+ SelfKind::Explicit(ty, mutbl) => param(mutbl, ty),
+ SelfKind::Value(mutbl) => param(mutbl, infer_ty),
+ SelfKind::Region(lt, mutbl) => param(
+ Mutability::Not,
+ P(Ty {
+ id: DUMMY_NODE_ID,
+ kind: TyKind::Rptr(lt, MutTy { ty: infer_ty, mutbl }),
+ span,
+ tokens: None,
+ }),
+ ),
+ }
+ }
+}
+
+/// A signature (not the body) of a function declaration.
+///
+/// E.g., `fn foo(bar: baz)`.
+///
+/// Please note that it's different from `FnHeader` structure
+/// which contains metadata about function safety, asyncness, constness and ABI.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct FnDecl {
+ pub inputs: Vec<Param>,
+ pub output: FnRetTy,
+}
+
+impl FnDecl {
+ pub fn has_self(&self) -> bool {
+ self.inputs.get(0).map_or(false, Param::is_self)
+ }
+ pub fn c_variadic(&self) -> bool {
+ self.inputs.last().map_or(false, |arg| matches!(arg.ty.kind, TyKind::CVarArgs))
+ }
+}
+
+/// Is the trait definition an auto trait?
+#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum IsAuto {
+ Yes,
+ No,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable, Debug)]
+#[derive(HashStable_Generic)]
+pub enum Unsafe {
+ Yes(Span),
+ No,
+}
+
+#[derive(Copy, Clone, Encodable, Decodable, Debug)]
+pub enum Async {
+ Yes { span: Span, closure_id: NodeId, return_impl_trait_id: NodeId },
+ No,
+}
+
+impl Async {
+ pub fn is_async(self) -> bool {
+ matches!(self, Async::Yes { .. })
+ }
+
+ /// In this case this is an `async` return, the `NodeId` for the generated `impl Trait` item.
+ pub fn opt_return_id(self) -> Option<NodeId> {
+ match self {
+ Async::Yes { return_impl_trait_id, .. } => Some(return_impl_trait_id),
+ Async::No => None,
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable, Debug)]
+#[derive(HashStable_Generic)]
+pub enum Const {
+ Yes(Span),
+ No,
+}
+
+/// Item defaultness.
+/// For details see the [RFC #2532](https://github.com/rust-lang/rfcs/pull/2532).
+#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum Defaultness {
+ Default(Span),
+ Final,
+}
+
+#[derive(Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
+pub enum ImplPolarity {
+ /// `impl Trait for Type`
+ Positive,
+ /// `impl !Trait for Type`
+ Negative(Span),
+}
+
+impl fmt::Debug for ImplPolarity {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ ImplPolarity::Positive => "positive".fmt(f),
+ ImplPolarity::Negative(_) => "negative".fmt(f),
+ }
+ }
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum FnRetTy {
+ /// Returns type is not specified.
+ ///
+ /// Functions default to `()` and closures default to inference.
+ /// Span points to where return type would be inserted.
+ Default(Span),
+ /// Everything else.
+ Ty(P<Ty>),
+}
+
+impl FnRetTy {
+ pub fn span(&self) -> Span {
+ match *self {
+ FnRetTy::Default(span) => span,
+ FnRetTy::Ty(ref ty) => ty.span,
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug)]
+pub enum Inline {
+ Yes,
+ No,
+}
+
+/// Module item kind.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum ModKind {
+ /// Module with inlined definition `mod foo { ... }`,
+ /// or with definition outlined to a separate file `mod foo;` and already loaded from it.
+ /// The inner span is from the first token past `{` to the last token until `}`,
+ /// or from the first to the last token in the loaded file.
+ Loaded(Vec<P<Item>>, Inline, ModSpans),
+ /// Module with definition outlined to a separate file `mod foo;` but not yet loaded from it.
+ Unloaded,
+}
+
+#[derive(Copy, Clone, Encodable, Decodable, Debug)]
+pub struct ModSpans {
+ /// `inner_span` covers the body of the module; for a file module, its the whole file.
+ /// For an inline module, its the span inside the `{ ... }`, not including the curly braces.
+ pub inner_span: Span,
+ pub inject_use_span: Span,
+}
+
+impl Default for ModSpans {
+ fn default() -> ModSpans {
+ ModSpans { inner_span: Default::default(), inject_use_span: Default::default() }
+ }
+}
+
+/// Foreign module declaration.
+///
+/// E.g., `extern { .. }` or `extern "C" { .. }`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct ForeignMod {
+ /// `unsafe` keyword accepted syntactically for macro DSLs, but not
+ /// semantically by Rust.
+ pub unsafety: Unsafe,
+ pub abi: Option<StrLit>,
+ pub items: Vec<P<ForeignItem>>,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct EnumDef {
+ pub variants: Vec<Variant>,
+}
+/// Enum variant.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Variant {
+ /// Attributes of the variant.
+ pub attrs: AttrVec,
+ /// Id of the variant (not the constructor, see `VariantData::ctor_id()`).
+ pub id: NodeId,
+ /// Span
+ pub span: Span,
+ /// The visibility of the variant. Syntactically accepted but not semantically.
+ pub vis: Visibility,
+ /// Name of the variant.
+ pub ident: Ident,
+
+ /// Fields and constructor id of the variant.
+ pub data: VariantData,
+ /// Explicit discriminant, e.g., `Foo = 1`.
+ pub disr_expr: Option<AnonConst>,
+ /// Is a macro placeholder
+ pub is_placeholder: bool,
+}
+
+/// Part of `use` item to the right of its prefix.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum UseTreeKind {
+ /// `use prefix` or `use prefix as rename`
+ ///
+ /// The extra `NodeId`s are for HIR lowering, when additional statements are created for each
+ /// namespace.
+ Simple(Option<Ident>, NodeId, NodeId),
+ /// `use prefix::{...}`
+ Nested(Vec<(UseTree, NodeId)>),
+ /// `use prefix::*`
+ Glob,
+}
+
+/// A tree of paths sharing common prefixes.
+/// Used in `use` items both at top-level and inside of braces in import groups.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct UseTree {
+ pub prefix: Path,
+ pub kind: UseTreeKind,
+ pub span: Span,
+}
+
+impl UseTree {
+ pub fn ident(&self) -> Ident {
+ match self.kind {
+ UseTreeKind::Simple(Some(rename), ..) => rename,
+ UseTreeKind::Simple(None, ..) => {
+ self.prefix.segments.last().expect("empty prefix in a simple import").ident
+ }
+ _ => panic!("`UseTree::ident` can only be used on a simple import"),
+ }
+ }
+}
+
+/// Distinguishes between `Attribute`s that decorate items and Attributes that
+/// are contained as statements within items. These two cases need to be
+/// distinguished for pretty-printing.
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy, HashStable_Generic)]
+pub enum AttrStyle {
+ Outer,
+ Inner,
+}
+
+rustc_index::newtype_index! {
+ pub struct AttrId {
+ ENCODABLE = custom
+ DEBUG_FORMAT = "AttrId({})"
+ }
+}
+
+impl<S: Encoder> Encodable<S> for AttrId {
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder> Decodable<D> for AttrId {
+ fn decode(_: &mut D) -> AttrId {
+ crate::attr::mk_attr_id()
+ }
+}
+
+#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct AttrItem {
+ pub path: Path,
+ pub args: MacArgs,
+ pub tokens: Option<LazyTokenStream>,
+}
+
+/// A list of attributes.
+pub type AttrVec = ThinVec<Attribute>;
+
+/// Metadata associated with an item.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Attribute {
+ pub kind: AttrKind,
+ pub id: AttrId,
+ /// Denotes if the attribute decorates the following construct (outer)
+ /// or the construct this attribute is contained within (inner).
+ pub style: AttrStyle,
+ pub span: Span,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum AttrKind {
+ /// A normal attribute.
+ Normal(AttrItem, Option<LazyTokenStream>),
+
+ /// A doc comment (e.g. `/// ...`, `//! ...`, `/** ... */`, `/*! ... */`).
+ /// Doc attributes (e.g. `#[doc="..."]`) are represented with the `Normal`
+ /// variant (which is much less compact and thus more expensive).
+ DocComment(CommentKind, Symbol),
+}
+
+/// `TraitRef`s appear in impls.
+///
+/// Resolution maps each `TraitRef`'s `ref_id` to its defining trait; that's all
+/// that the `ref_id` is for. The `impl_id` maps to the "self type" of this impl.
+/// If this impl is an `ItemKind::Impl`, the `impl_id` is redundant (it could be the
+/// same as the impl's `NodeId`).
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct TraitRef {
+ pub path: Path,
+ pub ref_id: NodeId,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct PolyTraitRef {
+ /// The `'a` in `for<'a> Foo<&'a T>`.
+ pub bound_generic_params: Vec<GenericParam>,
+
+ /// The `Foo<&'a T>` in `<'a> Foo<&'a T>`.
+ pub trait_ref: TraitRef,
+
+ pub span: Span,
+}
+
+impl PolyTraitRef {
+ pub fn new(generic_params: Vec<GenericParam>, path: Path, span: Span) -> Self {
+ PolyTraitRef {
+ bound_generic_params: generic_params,
+ trait_ref: TraitRef { path, ref_id: DUMMY_NODE_ID },
+ span,
+ }
+ }
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Visibility {
+ pub kind: VisibilityKind,
+ pub span: Span,
+ pub tokens: Option<LazyTokenStream>,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum VisibilityKind {
+ Public,
+ Restricted { path: P<Path>, id: NodeId },
+ Inherited,
+}
+
+impl VisibilityKind {
+ pub fn is_pub(&self) -> bool {
+ matches!(self, VisibilityKind::Public)
+ }
+}
+
+/// Field definition in a struct, variant or union.
+///
+/// E.g., `bar: usize` as in `struct Foo { bar: usize }`.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct FieldDef {
+ pub attrs: AttrVec,
+ pub id: NodeId,
+ pub span: Span,
+ pub vis: Visibility,
+ pub ident: Option<Ident>,
+
+ pub ty: P<Ty>,
+ pub is_placeholder: bool,
+}
+
+/// Fields and constructor ids of enum variants and structs.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum VariantData {
+ /// Struct variant.
+ ///
+ /// E.g., `Bar { .. }` as in `enum Foo { Bar { .. } }`.
+ Struct(Vec<FieldDef>, bool),
+ /// Tuple variant.
+ ///
+ /// E.g., `Bar(..)` as in `enum Foo { Bar(..) }`.
+ Tuple(Vec<FieldDef>, NodeId),
+ /// Unit variant.
+ ///
+ /// E.g., `Bar = ..` as in `enum Foo { Bar = .. }`.
+ Unit(NodeId),
+}
+
+impl VariantData {
+ /// Return the fields of this variant.
+ pub fn fields(&self) -> &[FieldDef] {
+ match *self {
+ VariantData::Struct(ref fields, ..) | VariantData::Tuple(ref fields, _) => fields,
+ _ => &[],
+ }
+ }
+
+ /// Return the `NodeId` of this variant's constructor, if it has one.
+ pub fn ctor_id(&self) -> Option<NodeId> {
+ match *self {
+ VariantData::Struct(..) => None,
+ VariantData::Tuple(_, id) | VariantData::Unit(id) => Some(id),
+ }
+ }
+}
+
+/// An item definition.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Item<K = ItemKind> {
+ pub attrs: Vec<Attribute>,
+ pub id: NodeId,
+ pub span: Span,
+ pub vis: Visibility,
+ /// The name of the item.
+ /// It might be a dummy name in case of anonymous items.
+ pub ident: Ident,
+
+ pub kind: K,
+
+ /// Original tokens this item was parsed from. This isn't necessarily
+ /// available for all items, although over time more and more items should
+ /// have this be `Some`. Right now this is primarily used for procedural
+ /// macros, notably custom attributes.
+ ///
+ /// Note that the tokens here do not include the outer attributes, but will
+ /// include inner attributes.
+ pub tokens: Option<LazyTokenStream>,
+}
+
+impl Item {
+ /// Return the span that encompasses the attributes.
+ pub fn span_with_attributes(&self) -> Span {
+ self.attrs.iter().fold(self.span, |acc, attr| acc.to(attr.span))
+ }
+}
+
+/// `extern` qualifier on a function item or function type.
+#[derive(Clone, Copy, Encodable, Decodable, Debug)]
+pub enum Extern {
+ None,
+ Implicit(Span),
+ Explicit(StrLit, Span),
+}
+
+impl Extern {
+ pub fn from_abi(abi: Option<StrLit>, span: Span) -> Extern {
+ match abi {
+ Some(name) => Extern::Explicit(name, span),
+ None => Extern::Implicit(span),
+ }
+ }
+}
+
+/// A function header.
+///
+/// All the information between the visibility and the name of the function is
+/// included in this struct (e.g., `async unsafe fn` or `const extern "C" fn`).
+#[derive(Clone, Copy, Encodable, Decodable, Debug)]
+pub struct FnHeader {
+ pub unsafety: Unsafe,
+ pub asyncness: Async,
+ pub constness: Const,
+ pub ext: Extern,
+}
+
+impl FnHeader {
+ /// Does this function header have any qualifiers or is it empty?
+ pub fn has_qualifiers(&self) -> bool {
+ let Self { unsafety, asyncness, constness, ext } = self;
+ matches!(unsafety, Unsafe::Yes(_))
+ || asyncness.is_async()
+ || matches!(constness, Const::Yes(_))
+ || !matches!(ext, Extern::None)
+ }
+}
+
+impl Default for FnHeader {
+ fn default() -> FnHeader {
+ FnHeader {
+ unsafety: Unsafe::No,
+ asyncness: Async::No,
+ constness: Const::No,
+ ext: Extern::None,
+ }
+ }
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Trait {
+ pub unsafety: Unsafe,
+ pub is_auto: IsAuto,
+ pub generics: Generics,
+ pub bounds: GenericBounds,
+ pub items: Vec<P<AssocItem>>,
+}
+
+/// The location of a where clause on a `TyAlias` (`Span`) and whether there was
+/// a `where` keyword (`bool`). This is split out from `WhereClause`, since there
+/// are two locations for where clause on type aliases, but their predicates
+/// are concatenated together.
+///
+/// Take this example:
+/// ```ignore (only-for-syntax-highlight)
+/// trait Foo {
+/// type Assoc<'a, 'b> where Self: 'a, Self: 'b;
+/// }
+/// impl Foo for () {
+/// type Assoc<'a, 'b> where Self: 'a = () where Self: 'b;
+/// // ^^^^^^^^^^^^^^ first where clause
+/// // ^^^^^^^^^^^^^^ second where clause
+/// }
+/// ```
+///
+/// If there is no where clause, then this is `false` with `DUMMY_SP`.
+#[derive(Copy, Clone, Encodable, Decodable, Debug, Default)]
+pub struct TyAliasWhereClause(pub bool, pub Span);
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct TyAlias {
+ pub defaultness: Defaultness,
+ pub generics: Generics,
+ /// The span information for the two where clauses (before equals, after equals)
+ pub where_clauses: (TyAliasWhereClause, TyAliasWhereClause),
+ /// The index in `generics.where_clause.predicates` that would split into
+ /// predicates from the where clause before the equals and the predicates
+ /// from the where clause after the equals
+ pub where_predicates_split: usize,
+ pub bounds: GenericBounds,
+ pub ty: Option<P<Ty>>,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Impl {
+ pub defaultness: Defaultness,
+ pub unsafety: Unsafe,
+ pub generics: Generics,
+ pub constness: Const,
+ pub polarity: ImplPolarity,
+ /// The trait being implemented, if any.
+ pub of_trait: Option<TraitRef>,
+ pub self_ty: P<Ty>,
+ pub items: Vec<P<AssocItem>>,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct Fn {
+ pub defaultness: Defaultness,
+ pub generics: Generics,
+ pub sig: FnSig,
+ pub body: Option<P<Block>>,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum ItemKind {
+ /// An `extern crate` item, with the optional *original* crate name if the crate was renamed.
+ ///
+ /// E.g., `extern crate foo` or `extern crate foo_bar as foo`.
+ ExternCrate(Option<Symbol>),
+ /// A use declaration item (`use`).
+ ///
+ /// E.g., `use foo;`, `use foo::bar;` or `use foo::bar as FooBar;`.
+ Use(UseTree),
+ /// A static item (`static`).
+ ///
+ /// E.g., `static FOO: i32 = 42;` or `static FOO: &'static str = "bar";`.
+ Static(P<Ty>, Mutability, Option<P<Expr>>),
+ /// A constant item (`const`).
+ ///
+ /// E.g., `const FOO: i32 = 42;`.
+ Const(Defaultness, P<Ty>, Option<P<Expr>>),
+ /// A function declaration (`fn`).
+ ///
+ /// E.g., `fn foo(bar: usize) -> usize { .. }`.
+ Fn(Box<Fn>),
+ /// A module declaration (`mod`).
+ ///
+ /// E.g., `mod foo;` or `mod foo { .. }`.
+ /// `unsafe` keyword on modules is accepted syntactically for macro DSLs, but not
+ /// semantically by Rust.
+ Mod(Unsafe, ModKind),
+ /// An external module (`extern`).
+ ///
+ /// E.g., `extern {}` or `extern "C" {}`.
+ ForeignMod(ForeignMod),
+ /// Module-level inline assembly (from `global_asm!()`).
+ GlobalAsm(Box<InlineAsm>),
+ /// A type alias (`type`).
+ ///
+ /// E.g., `type Foo = Bar<u8>;`.
+ TyAlias(Box<TyAlias>),
+ /// An enum definition (`enum`).
+ ///
+ /// E.g., `enum Foo<A, B> { C<A>, D<B> }`.
+ Enum(EnumDef, Generics),
+ /// A struct definition (`struct`).
+ ///
+ /// E.g., `struct Foo<A> { x: A }`.
+ Struct(VariantData, Generics),
+ /// A union definition (`union`).
+ ///
+ /// E.g., `union Foo<A, B> { x: A, y: B }`.
+ Union(VariantData, Generics),
+ /// A trait declaration (`trait`).
+ ///
+ /// E.g., `trait Foo { .. }`, `trait Foo<T> { .. }` or `auto trait Foo {}`.
+ Trait(Box<Trait>),
+ /// Trait alias
+ ///
+ /// E.g., `trait Foo = Bar + Quux;`.
+ TraitAlias(Generics, GenericBounds),
+ /// An implementation.
+ ///
+ /// E.g., `impl<A> Foo<A> { .. }` or `impl<A> Trait for Foo<A> { .. }`.
+ Impl(Box<Impl>),
+ /// A macro invocation.
+ ///
+ /// E.g., `foo!(..)`.
+ MacCall(MacCall),
+
+ /// A macro definition.
+ MacroDef(MacroDef),
+}
+
+impl ItemKind {
+ pub fn article(&self) -> &str {
+ use ItemKind::*;
+ match self {
+ Use(..) | Static(..) | Const(..) | Fn(..) | Mod(..) | GlobalAsm(..) | TyAlias(..)
+ | Struct(..) | Union(..) | Trait(..) | TraitAlias(..) | MacroDef(..) => "a",
+ ExternCrate(..) | ForeignMod(..) | MacCall(..) | Enum(..) | Impl { .. } => "an",
+ }
+ }
+
+ pub fn descr(&self) -> &str {
+ match self {
+ ItemKind::ExternCrate(..) => "extern crate",
+ ItemKind::Use(..) => "`use` import",
+ ItemKind::Static(..) => "static item",
+ ItemKind::Const(..) => "constant item",
+ ItemKind::Fn(..) => "function",
+ ItemKind::Mod(..) => "module",
+ ItemKind::ForeignMod(..) => "extern block",
+ ItemKind::GlobalAsm(..) => "global asm item",
+ ItemKind::TyAlias(..) => "type alias",
+ ItemKind::Enum(..) => "enum",
+ ItemKind::Struct(..) => "struct",
+ ItemKind::Union(..) => "union",
+ ItemKind::Trait(..) => "trait",
+ ItemKind::TraitAlias(..) => "trait alias",
+ ItemKind::MacCall(..) => "item macro invocation",
+ ItemKind::MacroDef(..) => "macro definition",
+ ItemKind::Impl { .. } => "implementation",
+ }
+ }
+
+ pub fn generics(&self) -> Option<&Generics> {
+ match self {
+ Self::Fn(box Fn { generics, .. })
+ | Self::TyAlias(box TyAlias { generics, .. })
+ | Self::Enum(_, generics)
+ | Self::Struct(_, generics)
+ | Self::Union(_, generics)
+ | Self::Trait(box Trait { generics, .. })
+ | Self::TraitAlias(generics, _)
+ | Self::Impl(box Impl { generics, .. }) => Some(generics),
+ _ => None,
+ }
+ }
+}
+
+/// Represents associated items.
+/// These include items in `impl` and `trait` definitions.
+pub type AssocItem = Item<AssocItemKind>;
+
+/// Represents associated item kinds.
+///
+/// The term "provided" in the variants below refers to the item having a default
+/// definition / body. Meanwhile, a "required" item lacks a definition / body.
+/// In an implementation, all items must be provided.
+/// The `Option`s below denote the bodies, where `Some(_)`
+/// means "provided" and conversely `None` means "required".
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum AssocItemKind {
+ /// An associated constant, `const $ident: $ty $def?;` where `def ::= "=" $expr? ;`.
+ /// If `def` is parsed, then the constant is provided, and otherwise required.
+ Const(Defaultness, P<Ty>, Option<P<Expr>>),
+ /// An associated function.
+ Fn(Box<Fn>),
+ /// An associated type.
+ TyAlias(Box<TyAlias>),
+ /// A macro expanding to associated items.
+ MacCall(MacCall),
+}
+
+impl AssocItemKind {
+ pub fn defaultness(&self) -> Defaultness {
+ match *self {
+ Self::Const(defaultness, ..)
+ | Self::Fn(box Fn { defaultness, .. })
+ | Self::TyAlias(box TyAlias { defaultness, .. }) => defaultness,
+ Self::MacCall(..) => Defaultness::Final,
+ }
+ }
+}
+
+impl From<AssocItemKind> for ItemKind {
+ fn from(assoc_item_kind: AssocItemKind) -> ItemKind {
+ match assoc_item_kind {
+ AssocItemKind::Const(a, b, c) => ItemKind::Const(a, b, c),
+ AssocItemKind::Fn(fn_kind) => ItemKind::Fn(fn_kind),
+ AssocItemKind::TyAlias(ty_alias_kind) => ItemKind::TyAlias(ty_alias_kind),
+ AssocItemKind::MacCall(a) => ItemKind::MacCall(a),
+ }
+ }
+}
+
+impl TryFrom<ItemKind> for AssocItemKind {
+ type Error = ItemKind;
+
+ fn try_from(item_kind: ItemKind) -> Result<AssocItemKind, ItemKind> {
+ Ok(match item_kind {
+ ItemKind::Const(a, b, c) => AssocItemKind::Const(a, b, c),
+ ItemKind::Fn(fn_kind) => AssocItemKind::Fn(fn_kind),
+ ItemKind::TyAlias(ty_alias_kind) => AssocItemKind::TyAlias(ty_alias_kind),
+ ItemKind::MacCall(a) => AssocItemKind::MacCall(a),
+ _ => return Err(item_kind),
+ })
+ }
+}
+
+/// An item in `extern` block.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub enum ForeignItemKind {
+ /// A foreign static item (`static FOO: u8`).
+ Static(P<Ty>, Mutability, Option<P<Expr>>),
+ /// An foreign function.
+ Fn(Box<Fn>),
+ /// An foreign type.
+ TyAlias(Box<TyAlias>),
+ /// A macro expanding to foreign items.
+ MacCall(MacCall),
+}
+
+impl From<ForeignItemKind> for ItemKind {
+ fn from(foreign_item_kind: ForeignItemKind) -> ItemKind {
+ match foreign_item_kind {
+ ForeignItemKind::Static(a, b, c) => ItemKind::Static(a, b, c),
+ ForeignItemKind::Fn(fn_kind) => ItemKind::Fn(fn_kind),
+ ForeignItemKind::TyAlias(ty_alias_kind) => ItemKind::TyAlias(ty_alias_kind),
+ ForeignItemKind::MacCall(a) => ItemKind::MacCall(a),
+ }
+ }
+}
+
+impl TryFrom<ItemKind> for ForeignItemKind {
+ type Error = ItemKind;
+
+ fn try_from(item_kind: ItemKind) -> Result<ForeignItemKind, ItemKind> {
+ Ok(match item_kind {
+ ItemKind::Static(a, b, c) => ForeignItemKind::Static(a, b, c),
+ ItemKind::Fn(fn_kind) => ForeignItemKind::Fn(fn_kind),
+ ItemKind::TyAlias(ty_alias_kind) => ForeignItemKind::TyAlias(ty_alias_kind),
+ ItemKind::MacCall(a) => ForeignItemKind::MacCall(a),
+ _ => return Err(item_kind),
+ })
+ }
+}
+
+pub type ForeignItem = Item<ForeignItemKind>;
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ rustc_data_structures::static_assert_size!(AssocItemKind, 72);
+ rustc_data_structures::static_assert_size!(Attribute, 152);
+ rustc_data_structures::static_assert_size!(Block, 48);
+ rustc_data_structures::static_assert_size!(Expr, 104);
+ rustc_data_structures::static_assert_size!(Fn, 192);
+ rustc_data_structures::static_assert_size!(ForeignItemKind, 72);
+ rustc_data_structures::static_assert_size!(GenericBound, 88);
+ rustc_data_structures::static_assert_size!(Generics, 72);
+ rustc_data_structures::static_assert_size!(Impl, 200);
+ rustc_data_structures::static_assert_size!(Item, 200);
+ rustc_data_structures::static_assert_size!(ItemKind, 112);
+ rustc_data_structures::static_assert_size!(Lit, 48);
+ rustc_data_structures::static_assert_size!(Pat, 120);
+ rustc_data_structures::static_assert_size!(Path, 40);
+ rustc_data_structures::static_assert_size!(PathSegment, 24);
+ rustc_data_structures::static_assert_size!(Stmt, 32);
+ rustc_data_structures::static_assert_size!(Ty, 96);
+}
diff --git a/compiler/rustc_ast/src/ast_traits.rs b/compiler/rustc_ast/src/ast_traits.rs
new file mode 100644
index 000000000..5c30a75a1
--- /dev/null
+++ b/compiler/rustc_ast/src/ast_traits.rs
@@ -0,0 +1,442 @@
+//! A set of traits implemented for various AST nodes,
+//! typically those used in AST fragments during macro expansion.
+//! The traits are not implemented exhaustively, only when actually necessary.
+
+use crate::ptr::P;
+use crate::token::Nonterminal;
+use crate::tokenstream::LazyTokenStream;
+use crate::{Arm, Crate, ExprField, FieldDef, GenericParam, Param, PatField, Variant};
+use crate::{AssocItem, Expr, ForeignItem, Item, NodeId};
+use crate::{AttrItem, AttrKind, Block, Pat, Path, Ty, Visibility};
+use crate::{AttrVec, Attribute, Stmt, StmtKind};
+
+use rustc_span::Span;
+
+use std::fmt;
+use std::marker::PhantomData;
+
+/// A utility trait to reduce boilerplate.
+/// Standard `Deref(Mut)` cannot be reused due to coherence.
+pub trait AstDeref {
+ type Target;
+ fn ast_deref(&self) -> &Self::Target;
+ fn ast_deref_mut(&mut self) -> &mut Self::Target;
+}
+
+macro_rules! impl_not_ast_deref {
+ ($($T:ty),+ $(,)?) => {
+ $(
+ impl !AstDeref for $T {}
+ )+
+ };
+}
+
+impl_not_ast_deref!(AssocItem, Expr, ForeignItem, Item, Stmt);
+
+impl<T> AstDeref for P<T> {
+ type Target = T;
+ fn ast_deref(&self) -> &Self::Target {
+ self
+ }
+ fn ast_deref_mut(&mut self) -> &mut Self::Target {
+ self
+ }
+}
+
+/// A trait for AST nodes having an ID.
+pub trait HasNodeId {
+ fn node_id(&self) -> NodeId;
+ fn node_id_mut(&mut self) -> &mut NodeId;
+}
+
+macro_rules! impl_has_node_id {
+ ($($T:ty),+ $(,)?) => {
+ $(
+ impl HasNodeId for $T {
+ fn node_id(&self) -> NodeId {
+ self.id
+ }
+ fn node_id_mut(&mut self) -> &mut NodeId {
+ &mut self.id
+ }
+ }
+ )+
+ };
+}
+
+impl_has_node_id!(
+ Arm,
+ AssocItem,
+ Crate,
+ Expr,
+ ExprField,
+ FieldDef,
+ ForeignItem,
+ GenericParam,
+ Item,
+ Param,
+ Pat,
+ PatField,
+ Stmt,
+ Ty,
+ Variant,
+);
+
+impl<T: AstDeref<Target: HasNodeId>> HasNodeId for T {
+ fn node_id(&self) -> NodeId {
+ self.ast_deref().node_id()
+ }
+ fn node_id_mut(&mut self) -> &mut NodeId {
+ self.ast_deref_mut().node_id_mut()
+ }
+}
+
+/// A trait for AST nodes having a span.
+pub trait HasSpan {
+ fn span(&self) -> Span;
+}
+
+macro_rules! impl_has_span {
+ ($($T:ty),+ $(,)?) => {
+ $(
+ impl HasSpan for $T {
+ fn span(&self) -> Span {
+ self.span
+ }
+ }
+ )+
+ };
+}
+
+impl_has_span!(AssocItem, Block, Expr, ForeignItem, Item, Pat, Path, Stmt, Ty, Visibility);
+
+impl<T: AstDeref<Target: HasSpan>> HasSpan for T {
+ fn span(&self) -> Span {
+ self.ast_deref().span()
+ }
+}
+
+impl HasSpan for AttrItem {
+ fn span(&self) -> Span {
+ self.span()
+ }
+}
+
+/// A trait for AST nodes having (or not having) collected tokens.
+pub trait HasTokens {
+ fn tokens(&self) -> Option<&LazyTokenStream>;
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>>;
+}
+
+macro_rules! impl_has_tokens {
+ ($($T:ty),+ $(,)?) => {
+ $(
+ impl HasTokens for $T {
+ fn tokens(&self) -> Option<&LazyTokenStream> {
+ self.tokens.as_ref()
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ Some(&mut self.tokens)
+ }
+ }
+ )+
+ };
+}
+
+macro_rules! impl_has_tokens_none {
+ ($($T:ty),+ $(,)?) => {
+ $(
+ impl HasTokens for $T {
+ fn tokens(&self) -> Option<&LazyTokenStream> {
+ None
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ None
+ }
+ }
+ )+
+ };
+}
+
+impl_has_tokens!(AssocItem, AttrItem, Block, Expr, ForeignItem, Item, Pat, Path, Ty, Visibility);
+impl_has_tokens_none!(Arm, ExprField, FieldDef, GenericParam, Param, PatField, Variant);
+
+impl<T: AstDeref<Target: HasTokens>> HasTokens for T {
+ fn tokens(&self) -> Option<&LazyTokenStream> {
+ self.ast_deref().tokens()
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ self.ast_deref_mut().tokens_mut()
+ }
+}
+
+impl<T: HasTokens> HasTokens for Option<T> {
+ fn tokens(&self) -> Option<&LazyTokenStream> {
+ self.as_ref().and_then(|inner| inner.tokens())
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ self.as_mut().and_then(|inner| inner.tokens_mut())
+ }
+}
+
+impl HasTokens for StmtKind {
+ fn tokens(&self) -> Option<&LazyTokenStream> {
+ match self {
+ StmtKind::Local(local) => local.tokens.as_ref(),
+ StmtKind::Item(item) => item.tokens(),
+ StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.tokens(),
+ StmtKind::Empty => return None,
+ StmtKind::MacCall(mac) => mac.tokens.as_ref(),
+ }
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ match self {
+ StmtKind::Local(local) => Some(&mut local.tokens),
+ StmtKind::Item(item) => item.tokens_mut(),
+ StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.tokens_mut(),
+ StmtKind::Empty => return None,
+ StmtKind::MacCall(mac) => Some(&mut mac.tokens),
+ }
+ }
+}
+
+impl HasTokens for Stmt {
+ fn tokens(&self) -> Option<&LazyTokenStream> {
+ self.kind.tokens()
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ self.kind.tokens_mut()
+ }
+}
+
+impl HasTokens for Attribute {
+ fn tokens(&self) -> Option<&LazyTokenStream> {
+ match &self.kind {
+ AttrKind::Normal(_, tokens) => tokens.as_ref(),
+ kind @ AttrKind::DocComment(..) => {
+ panic!("Called tokens on doc comment attr {:?}", kind)
+ }
+ }
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ Some(match &mut self.kind {
+ AttrKind::Normal(_, tokens) => tokens,
+ kind @ AttrKind::DocComment(..) => {
+ panic!("Called tokens_mut on doc comment attr {:?}", kind)
+ }
+ })
+ }
+}
+
+impl HasTokens for Nonterminal {
+ fn tokens(&self) -> Option<&LazyTokenStream> {
+ match self {
+ Nonterminal::NtItem(item) => item.tokens(),
+ Nonterminal::NtStmt(stmt) => stmt.tokens(),
+ Nonterminal::NtExpr(expr) | Nonterminal::NtLiteral(expr) => expr.tokens(),
+ Nonterminal::NtPat(pat) => pat.tokens(),
+ Nonterminal::NtTy(ty) => ty.tokens(),
+ Nonterminal::NtMeta(attr_item) => attr_item.tokens(),
+ Nonterminal::NtPath(path) => path.tokens(),
+ Nonterminal::NtVis(vis) => vis.tokens(),
+ Nonterminal::NtBlock(block) => block.tokens(),
+ Nonterminal::NtIdent(..) | Nonterminal::NtLifetime(..) => None,
+ }
+ }
+ fn tokens_mut(&mut self) -> Option<&mut Option<LazyTokenStream>> {
+ match self {
+ Nonterminal::NtItem(item) => item.tokens_mut(),
+ Nonterminal::NtStmt(stmt) => stmt.tokens_mut(),
+ Nonterminal::NtExpr(expr) | Nonterminal::NtLiteral(expr) => expr.tokens_mut(),
+ Nonterminal::NtPat(pat) => pat.tokens_mut(),
+ Nonterminal::NtTy(ty) => ty.tokens_mut(),
+ Nonterminal::NtMeta(attr_item) => attr_item.tokens_mut(),
+ Nonterminal::NtPath(path) => path.tokens_mut(),
+ Nonterminal::NtVis(vis) => vis.tokens_mut(),
+ Nonterminal::NtBlock(block) => block.tokens_mut(),
+ Nonterminal::NtIdent(..) | Nonterminal::NtLifetime(..) => None,
+ }
+ }
+}
+
+/// A trait for AST nodes having (or not having) attributes.
+pub trait HasAttrs {
+ /// This is `true` if this `HasAttrs` might support 'custom' (proc-macro) inner
+ /// attributes. Attributes like `#![cfg]` and `#![cfg_attr]` are not
+ /// considered 'custom' attributes.
+ ///
+ /// If this is `false`, then this `HasAttrs` definitely does
+ /// not support 'custom' inner attributes, which enables some optimizations
+ /// during token collection.
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool;
+ fn attrs(&self) -> &[Attribute];
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>));
+}
+
+macro_rules! impl_has_attrs {
+ (const SUPPORTS_CUSTOM_INNER_ATTRS: bool = $inner:literal, $($T:ty),+ $(,)?) => {
+ $(
+ impl HasAttrs for $T {
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool = $inner;
+
+ fn attrs(&self) -> &[Attribute] {
+ &self.attrs
+ }
+
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ VecOrAttrVec::visit(&mut self.attrs, f)
+ }
+ }
+ )+
+ };
+}
+
+macro_rules! impl_has_attrs_none {
+ ($($T:ty),+ $(,)?) => {
+ $(
+ impl HasAttrs for $T {
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool = false;
+ fn attrs(&self) -> &[Attribute] {
+ &[]
+ }
+ fn visit_attrs(&mut self, _f: impl FnOnce(&mut Vec<Attribute>)) {}
+ }
+ )+
+ };
+}
+
+impl_has_attrs!(
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool = true,
+ AssocItem,
+ ForeignItem,
+ Item,
+);
+impl_has_attrs!(
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool = false,
+ Arm,
+ Crate,
+ Expr,
+ ExprField,
+ FieldDef,
+ GenericParam,
+ Param,
+ PatField,
+ Variant,
+);
+impl_has_attrs_none!(Attribute, AttrItem, Block, Pat, Path, Ty, Visibility);
+
+impl<T: AstDeref<Target: HasAttrs>> HasAttrs for T {
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool = T::Target::SUPPORTS_CUSTOM_INNER_ATTRS;
+ fn attrs(&self) -> &[Attribute] {
+ self.ast_deref().attrs()
+ }
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ self.ast_deref_mut().visit_attrs(f)
+ }
+}
+
+impl<T: HasAttrs> HasAttrs for Option<T> {
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool = T::SUPPORTS_CUSTOM_INNER_ATTRS;
+ fn attrs(&self) -> &[Attribute] {
+ self.as_ref().map(|inner| inner.attrs()).unwrap_or(&[])
+ }
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ if let Some(inner) = self.as_mut() {
+ inner.visit_attrs(f);
+ }
+ }
+}
+
+impl HasAttrs for StmtKind {
+ // This might be a `StmtKind::Item`, which contains
+ // an item that supports inner attrs.
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool = true;
+
+ fn attrs(&self) -> &[Attribute] {
+ match self {
+ StmtKind::Local(local) => &local.attrs,
+ StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.attrs(),
+ StmtKind::Item(item) => item.attrs(),
+ StmtKind::Empty => &[],
+ StmtKind::MacCall(mac) => &mac.attrs,
+ }
+ }
+
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ match self {
+ StmtKind::Local(local) => visit_attrvec(&mut local.attrs, f),
+ StmtKind::Expr(expr) | StmtKind::Semi(expr) => expr.visit_attrs(f),
+ StmtKind::Item(item) => item.visit_attrs(f),
+ StmtKind::Empty => {}
+ StmtKind::MacCall(mac) => visit_attrvec(&mut mac.attrs, f),
+ }
+ }
+}
+
+impl HasAttrs for Stmt {
+ const SUPPORTS_CUSTOM_INNER_ATTRS: bool = StmtKind::SUPPORTS_CUSTOM_INNER_ATTRS;
+ fn attrs(&self) -> &[Attribute] {
+ self.kind.attrs()
+ }
+ fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ self.kind.visit_attrs(f);
+ }
+}
+
+/// Helper trait for the impls above. Abstracts over
+/// the two types of attribute fields that AST nodes
+/// may have (`Vec<Attribute>` or `AttrVec`).
+trait VecOrAttrVec {
+ fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>));
+}
+
+impl VecOrAttrVec for Vec<Attribute> {
+ fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ f(self)
+ }
+}
+
+impl VecOrAttrVec for AttrVec {
+ fn visit(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ visit_attrvec(self, f)
+ }
+}
+
+fn visit_attrvec(attrs: &mut AttrVec, f: impl FnOnce(&mut Vec<Attribute>)) {
+ crate::mut_visit::visit_clobber(attrs, |attrs| {
+ let mut vec = attrs.into();
+ f(&mut vec);
+ vec.into()
+ });
+}
+
+/// A newtype around an AST node that implements the traits above if the node implements them.
+pub struct AstNodeWrapper<Wrapped, Tag> {
+ pub wrapped: Wrapped,
+ pub tag: PhantomData<Tag>,
+}
+
+impl<Wrapped, Tag> AstNodeWrapper<Wrapped, Tag> {
+ pub fn new(wrapped: Wrapped, _tag: Tag) -> AstNodeWrapper<Wrapped, Tag> {
+ AstNodeWrapper { wrapped, tag: Default::default() }
+ }
+}
+
+impl<Wrapped, Tag> AstDeref for AstNodeWrapper<Wrapped, Tag> {
+ type Target = Wrapped;
+ fn ast_deref(&self) -> &Self::Target {
+ &self.wrapped
+ }
+ fn ast_deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.wrapped
+ }
+}
+
+impl<Wrapped: fmt::Debug, Tag> fmt::Debug for AstNodeWrapper<Wrapped, Tag> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("AstNodeWrapper")
+ .field("wrapped", &self.wrapped)
+ .field("tag", &self.tag)
+ .finish()
+ }
+}
diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs
new file mode 100644
index 000000000..86af7769d
--- /dev/null
+++ b/compiler/rustc_ast/src/attr/mod.rs
@@ -0,0 +1,634 @@
+//! Functions dealing with attributes and meta items.
+
+use crate::ast;
+use crate::ast::{AttrId, AttrItem, AttrKind, AttrStyle, Attribute};
+use crate::ast::{Lit, LitKind};
+use crate::ast::{MacArgs, MacArgsEq, MacDelimiter, MetaItem, MetaItemKind, NestedMetaItem};
+use crate::ast::{Path, PathSegment};
+use crate::ptr::P;
+use crate::token::{self, CommentKind, Delimiter, Token};
+use crate::tokenstream::{AttrAnnotatedTokenStream, AttrAnnotatedTokenTree};
+use crate::tokenstream::{DelimSpan, Spacing, TokenTree};
+use crate::tokenstream::{LazyTokenStream, TokenStream};
+use crate::util::comments;
+
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_index::bit_set::GrowableBitSet;
+use rustc_span::source_map::BytePos;
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::Span;
+
+use std::iter;
+
+pub struct MarkedAttrs(GrowableBitSet<AttrId>);
+
+impl MarkedAttrs {
+ // We have no idea how many attributes there will be, so just
+ // initiate the vectors with 0 bits. We'll grow them as necessary.
+ pub fn new() -> Self {
+ MarkedAttrs(GrowableBitSet::new_empty())
+ }
+
+ pub fn mark(&mut self, attr: &Attribute) {
+ self.0.insert(attr.id);
+ }
+
+ pub fn is_marked(&self, attr: &Attribute) -> bool {
+ self.0.contains(attr.id)
+ }
+}
+
+impl NestedMetaItem {
+ /// Returns the `MetaItem` if `self` is a `NestedMetaItem::MetaItem`.
+ pub fn meta_item(&self) -> Option<&MetaItem> {
+ match *self {
+ NestedMetaItem::MetaItem(ref item) => Some(item),
+ _ => None,
+ }
+ }
+
+ /// Returns the `Lit` if `self` is a `NestedMetaItem::Literal`s.
+ pub fn literal(&self) -> Option<&Lit> {
+ match *self {
+ NestedMetaItem::Literal(ref lit) => Some(lit),
+ _ => None,
+ }
+ }
+
+ /// Returns `true` if this list item is a MetaItem with a name of `name`.
+ pub fn has_name(&self, name: Symbol) -> bool {
+ self.meta_item().map_or(false, |meta_item| meta_item.has_name(name))
+ }
+
+ /// For a single-segment meta item, returns its name; otherwise, returns `None`.
+ pub fn ident(&self) -> Option<Ident> {
+ self.meta_item().and_then(|meta_item| meta_item.ident())
+ }
+ pub fn name_or_empty(&self) -> Symbol {
+ self.ident().unwrap_or_else(Ident::empty).name
+ }
+
+ /// Gets the string value if `self` is a `MetaItem` and the `MetaItem` is a
+ /// `MetaItemKind::NameValue` variant containing a string, otherwise `None`.
+ pub fn value_str(&self) -> Option<Symbol> {
+ self.meta_item().and_then(|meta_item| meta_item.value_str())
+ }
+
+ /// Returns a name and single literal value tuple of the `MetaItem`.
+ pub fn name_value_literal(&self) -> Option<(Symbol, &Lit)> {
+ self.meta_item().and_then(|meta_item| {
+ meta_item.meta_item_list().and_then(|meta_item_list| {
+ if meta_item_list.len() == 1
+ && let Some(ident) = meta_item.ident()
+ && let Some(lit) = meta_item_list[0].literal()
+ {
+ return Some((ident.name, lit));
+ }
+ None
+ })
+ })
+ }
+
+ /// Gets a list of inner meta items from a list `MetaItem` type.
+ pub fn meta_item_list(&self) -> Option<&[NestedMetaItem]> {
+ self.meta_item().and_then(|meta_item| meta_item.meta_item_list())
+ }
+
+ /// Returns `true` if the variant is `MetaItem`.
+ pub fn is_meta_item(&self) -> bool {
+ self.meta_item().is_some()
+ }
+
+ /// Returns `true` if `self` is a `MetaItem` and the meta item is a word.
+ pub fn is_word(&self) -> bool {
+ self.meta_item().map_or(false, |meta_item| meta_item.is_word())
+ }
+
+ /// See [`MetaItem::name_value_literal_span`].
+ pub fn name_value_literal_span(&self) -> Option<Span> {
+ self.meta_item()?.name_value_literal_span()
+ }
+}
+
+impl Attribute {
+ #[inline]
+ pub fn has_name(&self, name: Symbol) -> bool {
+ match self.kind {
+ AttrKind::Normal(ref item, _) => item.path == name,
+ AttrKind::DocComment(..) => false,
+ }
+ }
+
+ /// For a single-segment attribute, returns its name; otherwise, returns `None`.
+ pub fn ident(&self) -> Option<Ident> {
+ match self.kind {
+ AttrKind::Normal(ref item, _) => {
+ if item.path.segments.len() == 1 {
+ Some(item.path.segments[0].ident)
+ } else {
+ None
+ }
+ }
+ AttrKind::DocComment(..) => None,
+ }
+ }
+ pub fn name_or_empty(&self) -> Symbol {
+ self.ident().unwrap_or_else(Ident::empty).name
+ }
+
+ pub fn value_str(&self) -> Option<Symbol> {
+ match self.kind {
+ AttrKind::Normal(ref item, _) => item.meta_kind().and_then(|kind| kind.value_str()),
+ AttrKind::DocComment(..) => None,
+ }
+ }
+
+ pub fn meta_item_list(&self) -> Option<Vec<NestedMetaItem>> {
+ match self.kind {
+ AttrKind::Normal(ref item, _) => match item.meta_kind() {
+ Some(MetaItemKind::List(list)) => Some(list),
+ _ => None,
+ },
+ AttrKind::DocComment(..) => None,
+ }
+ }
+
+ pub fn is_word(&self) -> bool {
+ if let AttrKind::Normal(item, _) = &self.kind {
+ matches!(item.args, MacArgs::Empty)
+ } else {
+ false
+ }
+ }
+}
+
+impl MetaItem {
+ /// For a single-segment meta item, returns its name; otherwise, returns `None`.
+ pub fn ident(&self) -> Option<Ident> {
+ if self.path.segments.len() == 1 { Some(self.path.segments[0].ident) } else { None }
+ }
+ pub fn name_or_empty(&self) -> Symbol {
+ self.ident().unwrap_or_else(Ident::empty).name
+ }
+
+ // Example:
+ // #[attribute(name = "value")]
+ // ^^^^^^^^^^^^^^
+ pub fn name_value_literal(&self) -> Option<&Lit> {
+ match &self.kind {
+ MetaItemKind::NameValue(v) => Some(v),
+ _ => None,
+ }
+ }
+
+ pub fn value_str(&self) -> Option<Symbol> {
+ match self.kind {
+ MetaItemKind::NameValue(ref v) => match v.kind {
+ LitKind::Str(ref s, _) => Some(*s),
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+
+ pub fn meta_item_list(&self) -> Option<&[NestedMetaItem]> {
+ match self.kind {
+ MetaItemKind::List(ref l) => Some(&l[..]),
+ _ => None,
+ }
+ }
+
+ pub fn is_word(&self) -> bool {
+ matches!(self.kind, MetaItemKind::Word)
+ }
+
+ pub fn has_name(&self, name: Symbol) -> bool {
+ self.path == name
+ }
+
+ /// This is used in case you want the value span instead of the whole attribute. Example:
+ ///
+ /// ```text
+ /// #[doc(alias = "foo")]
+ /// ```
+ ///
+ /// In here, it'll return a span for `"foo"`.
+ pub fn name_value_literal_span(&self) -> Option<Span> {
+ Some(self.name_value_literal()?.span)
+ }
+}
+
+impl AttrItem {
+ pub fn span(&self) -> Span {
+ self.args.span().map_or(self.path.span, |args_span| self.path.span.to(args_span))
+ }
+
+ pub fn meta(&self, span: Span) -> Option<MetaItem> {
+ Some(MetaItem {
+ path: self.path.clone(),
+ kind: MetaItemKind::from_mac_args(&self.args)?,
+ span,
+ })
+ }
+
+ pub fn meta_kind(&self) -> Option<MetaItemKind> {
+ MetaItemKind::from_mac_args(&self.args)
+ }
+}
+
+impl Attribute {
+ pub fn is_doc_comment(&self) -> bool {
+ match self.kind {
+ AttrKind::Normal(..) => false,
+ AttrKind::DocComment(..) => true,
+ }
+ }
+
+ pub fn doc_str_and_comment_kind(&self) -> Option<(Symbol, CommentKind)> {
+ match self.kind {
+ AttrKind::DocComment(kind, data) => Some((data, kind)),
+ AttrKind::Normal(ref item, _) if item.path == sym::doc => item
+ .meta_kind()
+ .and_then(|kind| kind.value_str())
+ .map(|data| (data, CommentKind::Line)),
+ _ => None,
+ }
+ }
+
+ pub fn doc_str(&self) -> Option<Symbol> {
+ match self.kind {
+ AttrKind::DocComment(.., data) => Some(data),
+ AttrKind::Normal(ref item, _) if item.path == sym::doc => {
+ item.meta_kind().and_then(|kind| kind.value_str())
+ }
+ _ => None,
+ }
+ }
+
+ pub fn may_have_doc_links(&self) -> bool {
+ self.doc_str().map_or(false, |s| comments::may_have_doc_links(s.as_str()))
+ }
+
+ pub fn get_normal_item(&self) -> &AttrItem {
+ match self.kind {
+ AttrKind::Normal(ref item, _) => item,
+ AttrKind::DocComment(..) => panic!("unexpected doc comment"),
+ }
+ }
+
+ pub fn unwrap_normal_item(self) -> AttrItem {
+ match self.kind {
+ AttrKind::Normal(item, _) => item,
+ AttrKind::DocComment(..) => panic!("unexpected doc comment"),
+ }
+ }
+
+ /// Extracts the MetaItem from inside this Attribute.
+ pub fn meta(&self) -> Option<MetaItem> {
+ match self.kind {
+ AttrKind::Normal(ref item, _) => item.meta(self.span),
+ AttrKind::DocComment(..) => None,
+ }
+ }
+
+ pub fn meta_kind(&self) -> Option<MetaItemKind> {
+ match self.kind {
+ AttrKind::Normal(ref item, _) => item.meta_kind(),
+ AttrKind::DocComment(..) => None,
+ }
+ }
+
+ pub fn tokens(&self) -> AttrAnnotatedTokenStream {
+ match self.kind {
+ AttrKind::Normal(_, ref tokens) => tokens
+ .as_ref()
+ .unwrap_or_else(|| panic!("attribute is missing tokens: {:?}", self))
+ .create_token_stream(),
+ AttrKind::DocComment(comment_kind, data) => AttrAnnotatedTokenStream::from((
+ AttrAnnotatedTokenTree::Token(Token::new(
+ token::DocComment(comment_kind, self.style, data),
+ self.span,
+ )),
+ Spacing::Alone,
+ )),
+ }
+ }
+}
+
+/* Constructors */
+
+pub fn mk_name_value_item_str(ident: Ident, str: Symbol, str_span: Span) -> MetaItem {
+ let lit_kind = LitKind::Str(str, ast::StrStyle::Cooked);
+ mk_name_value_item(ident, lit_kind, str_span)
+}
+
+pub fn mk_name_value_item(ident: Ident, lit_kind: LitKind, lit_span: Span) -> MetaItem {
+ let lit = Lit::from_lit_kind(lit_kind, lit_span);
+ let span = ident.span.to(lit_span);
+ MetaItem { path: Path::from_ident(ident), span, kind: MetaItemKind::NameValue(lit) }
+}
+
+pub fn mk_list_item(ident: Ident, items: Vec<NestedMetaItem>) -> MetaItem {
+ MetaItem { path: Path::from_ident(ident), span: ident.span, kind: MetaItemKind::List(items) }
+}
+
+pub fn mk_word_item(ident: Ident) -> MetaItem {
+ MetaItem { path: Path::from_ident(ident), span: ident.span, kind: MetaItemKind::Word }
+}
+
+pub fn mk_nested_word_item(ident: Ident) -> NestedMetaItem {
+ NestedMetaItem::MetaItem(mk_word_item(ident))
+}
+
+pub(crate) fn mk_attr_id() -> AttrId {
+ use std::sync::atomic::AtomicU32;
+ use std::sync::atomic::Ordering;
+
+ static NEXT_ATTR_ID: AtomicU32 = AtomicU32::new(0);
+
+ let id = NEXT_ATTR_ID.fetch_add(1, Ordering::SeqCst);
+ assert!(id != u32::MAX);
+ AttrId::from_u32(id)
+}
+
+pub fn mk_attr(style: AttrStyle, path: Path, args: MacArgs, span: Span) -> Attribute {
+ mk_attr_from_item(AttrItem { path, args, tokens: None }, None, style, span)
+}
+
+pub fn mk_attr_from_item(
+ item: AttrItem,
+ tokens: Option<LazyTokenStream>,
+ style: AttrStyle,
+ span: Span,
+) -> Attribute {
+ Attribute { kind: AttrKind::Normal(item, tokens), id: mk_attr_id(), style, span }
+}
+
+/// Returns an inner attribute with the given value and span.
+pub fn mk_attr_inner(item: MetaItem) -> Attribute {
+ mk_attr(AttrStyle::Inner, item.path, item.kind.mac_args(item.span), item.span)
+}
+
+/// Returns an outer attribute with the given value and span.
+pub fn mk_attr_outer(item: MetaItem) -> Attribute {
+ mk_attr(AttrStyle::Outer, item.path, item.kind.mac_args(item.span), item.span)
+}
+
+pub fn mk_doc_comment(
+ comment_kind: CommentKind,
+ style: AttrStyle,
+ data: Symbol,
+ span: Span,
+) -> Attribute {
+ Attribute { kind: AttrKind::DocComment(comment_kind, data), id: mk_attr_id(), style, span }
+}
+
+pub fn list_contains_name(items: &[NestedMetaItem], name: Symbol) -> bool {
+ items.iter().any(|item| item.has_name(name))
+}
+
+impl MetaItem {
+ fn token_trees(&self) -> Vec<TokenTree> {
+ let mut idents = vec![];
+ let mut last_pos = BytePos(0_u32);
+ for (i, segment) in self.path.segments.iter().enumerate() {
+ let is_first = i == 0;
+ if !is_first {
+ let mod_sep_span =
+ Span::new(last_pos, segment.ident.span.lo(), segment.ident.span.ctxt(), None);
+ idents.push(TokenTree::token_alone(token::ModSep, mod_sep_span));
+ }
+ idents.push(TokenTree::Token(Token::from_ast_ident(segment.ident), Spacing::Alone));
+ last_pos = segment.ident.span.hi();
+ }
+ idents.extend(self.kind.token_trees(self.span));
+ idents
+ }
+
+ fn from_tokens<I>(tokens: &mut iter::Peekable<I>) -> Option<MetaItem>
+ where
+ I: Iterator<Item = TokenTree>,
+ {
+ // FIXME: Share code with `parse_path`.
+ let path = match tokens.next().map(TokenTree::uninterpolate) {
+ Some(TokenTree::Token(
+ Token { kind: kind @ (token::Ident(..) | token::ModSep), span },
+ _,
+ )) => 'arm: {
+ let mut segments = if let token::Ident(name, _) = kind {
+ if let Some(TokenTree::Token(Token { kind: token::ModSep, .. }, _)) =
+ tokens.peek()
+ {
+ tokens.next();
+ vec![PathSegment::from_ident(Ident::new(name, span))]
+ } else {
+ break 'arm Path::from_ident(Ident::new(name, span));
+ }
+ } else {
+ vec![PathSegment::path_root(span)]
+ };
+ loop {
+ if let Some(TokenTree::Token(Token { kind: token::Ident(name, _), span }, _)) =
+ tokens.next().map(TokenTree::uninterpolate)
+ {
+ segments.push(PathSegment::from_ident(Ident::new(name, span)));
+ } else {
+ return None;
+ }
+ if let Some(TokenTree::Token(Token { kind: token::ModSep, .. }, _)) =
+ tokens.peek()
+ {
+ tokens.next();
+ } else {
+ break;
+ }
+ }
+ let span = span.with_hi(segments.last().unwrap().ident.span.hi());
+ Path { span, segments, tokens: None }
+ }
+ Some(TokenTree::Token(Token { kind: token::Interpolated(nt), .. }, _)) => match *nt {
+ token::Nonterminal::NtMeta(ref item) => return item.meta(item.path.span),
+ token::Nonterminal::NtPath(ref path) => (**path).clone(),
+ _ => return None,
+ },
+ _ => return None,
+ };
+ let list_closing_paren_pos = tokens.peek().map(|tt| tt.span().hi());
+ let kind = MetaItemKind::from_tokens(tokens)?;
+ let hi = match kind {
+ MetaItemKind::NameValue(ref lit) => lit.span.hi(),
+ MetaItemKind::List(..) => list_closing_paren_pos.unwrap_or(path.span.hi()),
+ _ => path.span.hi(),
+ };
+ let span = path.span.with_hi(hi);
+ Some(MetaItem { path, kind, span })
+ }
+}
+
+impl MetaItemKind {
+ pub fn value_str(&self) -> Option<Symbol> {
+ match self {
+ MetaItemKind::NameValue(ref v) => match v.kind {
+ LitKind::Str(ref s, _) => Some(*s),
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+
+ pub fn mac_args(&self, span: Span) -> MacArgs {
+ match self {
+ MetaItemKind::Word => MacArgs::Empty,
+ MetaItemKind::NameValue(lit) => {
+ let expr = P(ast::Expr {
+ id: ast::DUMMY_NODE_ID,
+ kind: ast::ExprKind::Lit(lit.clone()),
+ span: lit.span,
+ attrs: ThinVec::new(),
+ tokens: None,
+ });
+ MacArgs::Eq(span, MacArgsEq::Ast(expr))
+ }
+ MetaItemKind::List(list) => {
+ let mut tts = Vec::new();
+ for (i, item) in list.iter().enumerate() {
+ if i > 0 {
+ tts.push(TokenTree::token_alone(token::Comma, span));
+ }
+ tts.extend(item.token_trees())
+ }
+ MacArgs::Delimited(
+ DelimSpan::from_single(span),
+ MacDelimiter::Parenthesis,
+ TokenStream::new(tts),
+ )
+ }
+ }
+ }
+
+ fn token_trees(&self, span: Span) -> Vec<TokenTree> {
+ match *self {
+ MetaItemKind::Word => vec![],
+ MetaItemKind::NameValue(ref lit) => {
+ vec![
+ TokenTree::token_alone(token::Eq, span),
+ TokenTree::Token(lit.to_token(), Spacing::Alone),
+ ]
+ }
+ MetaItemKind::List(ref list) => {
+ let mut tokens = Vec::new();
+ for (i, item) in list.iter().enumerate() {
+ if i > 0 {
+ tokens.push(TokenTree::token_alone(token::Comma, span));
+ }
+ tokens.extend(item.token_trees())
+ }
+ vec![TokenTree::Delimited(
+ DelimSpan::from_single(span),
+ Delimiter::Parenthesis,
+ TokenStream::new(tokens),
+ )]
+ }
+ }
+ }
+
+ fn list_from_tokens(tokens: TokenStream) -> Option<MetaItemKind> {
+ let mut tokens = tokens.into_trees().peekable();
+ let mut result = Vec::new();
+ while tokens.peek().is_some() {
+ let item = NestedMetaItem::from_tokens(&mut tokens)?;
+ result.push(item);
+ match tokens.next() {
+ None | Some(TokenTree::Token(Token { kind: token::Comma, .. }, _)) => {}
+ _ => return None,
+ }
+ }
+ Some(MetaItemKind::List(result))
+ }
+
+ fn name_value_from_tokens(
+ tokens: &mut impl Iterator<Item = TokenTree>,
+ ) -> Option<MetaItemKind> {
+ match tokens.next() {
+ Some(TokenTree::Delimited(_, Delimiter::Invisible, inner_tokens)) => {
+ MetaItemKind::name_value_from_tokens(&mut inner_tokens.into_trees())
+ }
+ Some(TokenTree::Token(token, _)) => {
+ Lit::from_token(&token).ok().map(MetaItemKind::NameValue)
+ }
+ _ => None,
+ }
+ }
+
+ fn from_mac_args(args: &MacArgs) -> Option<MetaItemKind> {
+ match args {
+ MacArgs::Empty => Some(MetaItemKind::Word),
+ MacArgs::Delimited(_, MacDelimiter::Parenthesis, tokens) => {
+ MetaItemKind::list_from_tokens(tokens.clone())
+ }
+ MacArgs::Delimited(..) => None,
+ MacArgs::Eq(_, MacArgsEq::Ast(expr)) => match &expr.kind {
+ ast::ExprKind::Lit(lit) => Some(MetaItemKind::NameValue(lit.clone())),
+ _ => None,
+ },
+ MacArgs::Eq(_, MacArgsEq::Hir(lit)) => Some(MetaItemKind::NameValue(lit.clone())),
+ }
+ }
+
+ fn from_tokens(
+ tokens: &mut iter::Peekable<impl Iterator<Item = TokenTree>>,
+ ) -> Option<MetaItemKind> {
+ match tokens.peek() {
+ Some(TokenTree::Delimited(_, Delimiter::Parenthesis, inner_tokens)) => {
+ let inner_tokens = inner_tokens.clone();
+ tokens.next();
+ MetaItemKind::list_from_tokens(inner_tokens)
+ }
+ Some(TokenTree::Delimited(..)) => None,
+ Some(TokenTree::Token(Token { kind: token::Eq, .. }, _)) => {
+ tokens.next();
+ MetaItemKind::name_value_from_tokens(tokens)
+ }
+ _ => Some(MetaItemKind::Word),
+ }
+ }
+}
+
+impl NestedMetaItem {
+ pub fn span(&self) -> Span {
+ match *self {
+ NestedMetaItem::MetaItem(ref item) => item.span,
+ NestedMetaItem::Literal(ref lit) => lit.span,
+ }
+ }
+
+ fn token_trees(&self) -> Vec<TokenTree> {
+ match *self {
+ NestedMetaItem::MetaItem(ref item) => item.token_trees(),
+ NestedMetaItem::Literal(ref lit) => {
+ vec![TokenTree::Token(lit.to_token(), Spacing::Alone)]
+ }
+ }
+ }
+
+ fn from_tokens<I>(tokens: &mut iter::Peekable<I>) -> Option<NestedMetaItem>
+ where
+ I: Iterator<Item = TokenTree>,
+ {
+ match tokens.peek() {
+ Some(TokenTree::Token(token, _))
+ if let Ok(lit) = Lit::from_token(token) =>
+ {
+ tokens.next();
+ return Some(NestedMetaItem::Literal(lit));
+ }
+ Some(TokenTree::Delimited(_, Delimiter::Invisible, inner_tokens)) => {
+ let inner_tokens = inner_tokens.clone();
+ tokens.next();
+ return NestedMetaItem::from_tokens(&mut inner_tokens.into_trees().peekable());
+ }
+ _ => {}
+ }
+ MetaItem::from_tokens(tokens).map(NestedMetaItem::MetaItem)
+ }
+}
diff --git a/compiler/rustc_ast/src/entry.rs b/compiler/rustc_ast/src/entry.rs
new file mode 100644
index 000000000..337014619
--- /dev/null
+++ b/compiler/rustc_ast/src/entry.rs
@@ -0,0 +1,8 @@
+#[derive(Debug)]
+pub enum EntryPointType {
+ None,
+ MainNamed,
+ RustcMainAttr,
+ Start,
+ OtherMain, // Not an entry point, but some other function named main
+}
diff --git a/compiler/rustc_ast/src/expand/allocator.rs b/compiler/rustc_ast/src/expand/allocator.rs
new file mode 100644
index 000000000..1976e4ad3
--- /dev/null
+++ b/compiler/rustc_ast/src/expand/allocator.rs
@@ -0,0 +1,53 @@
+use rustc_span::symbol::{sym, Symbol};
+
+#[derive(Clone, Debug, Copy, HashStable_Generic)]
+pub enum AllocatorKind {
+ Global,
+ Default,
+}
+
+impl AllocatorKind {
+ pub fn fn_name(&self, base: Symbol) -> String {
+ match *self {
+ AllocatorKind::Global => format!("__rg_{}", base),
+ AllocatorKind::Default => format!("__rdl_{}", base),
+ }
+ }
+}
+
+pub enum AllocatorTy {
+ Layout,
+ Ptr,
+ ResultPtr,
+ Unit,
+ Usize,
+}
+
+pub struct AllocatorMethod {
+ pub name: Symbol,
+ pub inputs: &'static [AllocatorTy],
+ pub output: AllocatorTy,
+}
+
+pub static ALLOCATOR_METHODS: &[AllocatorMethod] = &[
+ AllocatorMethod {
+ name: sym::alloc,
+ inputs: &[AllocatorTy::Layout],
+ output: AllocatorTy::ResultPtr,
+ },
+ AllocatorMethod {
+ name: sym::dealloc,
+ inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout],
+ output: AllocatorTy::Unit,
+ },
+ AllocatorMethod {
+ name: sym::realloc,
+ inputs: &[AllocatorTy::Ptr, AllocatorTy::Layout, AllocatorTy::Usize],
+ output: AllocatorTy::ResultPtr,
+ },
+ AllocatorMethod {
+ name: sym::alloc_zeroed,
+ inputs: &[AllocatorTy::Layout],
+ output: AllocatorTy::ResultPtr,
+ },
+];
diff --git a/compiler/rustc_ast/src/expand/mod.rs b/compiler/rustc_ast/src/expand/mod.rs
new file mode 100644
index 000000000..2ee1bfe0a
--- /dev/null
+++ b/compiler/rustc_ast/src/expand/mod.rs
@@ -0,0 +1,3 @@
+//! Definitions shared by macros / syntax extensions and e.g. `rustc_middle`.
+
+pub mod allocator;
diff --git a/compiler/rustc_ast/src/lib.rs b/compiler/rustc_ast/src/lib.rs
new file mode 100644
index 000000000..4b94ec0d6
--- /dev/null
+++ b/compiler/rustc_ast/src/lib.rs
@@ -0,0 +1,63 @@
+//! The Rust Abstract Syntax Tree (AST).
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(
+ html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
+ test(attr(deny(warnings)))
+)]
+#![feature(associated_type_bounds)]
+#![feature(box_patterns)]
+#![feature(const_default_impls)]
+#![feature(const_trait_impl)]
+#![feature(if_let_guard)]
+#![feature(label_break_value)]
+#![feature(let_chains)]
+#![feature(min_specialization)]
+#![feature(negative_impls)]
+#![feature(slice_internals)]
+#![feature(stmt_expr_attributes)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate rustc_macros;
+
+pub mod util {
+ pub mod classify;
+ pub mod comments;
+ pub mod literal;
+ pub mod parser;
+ pub mod unicode;
+}
+
+pub mod ast;
+pub mod ast_traits;
+pub mod attr;
+pub mod entry;
+pub mod expand;
+pub mod mut_visit;
+pub mod node_id;
+pub mod ptr;
+pub mod token;
+pub mod tokenstream;
+pub mod visit;
+
+pub use self::ast::*;
+pub use self::ast_traits::{AstDeref, AstNodeWrapper, HasAttrs, HasNodeId, HasSpan, HasTokens};
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+
+/// Requirements for a `StableHashingContext` to be used in this crate.
+/// This is a hack to allow using the `HashStable_Generic` derive macro
+/// instead of implementing everything in `rustc_middle`.
+pub trait HashStableContext: rustc_span::HashStableContext {
+ fn hash_attr(&mut self, _: &ast::Attribute, hasher: &mut StableHasher);
+}
+
+impl<AstCtx: crate::HashStableContext> HashStable<AstCtx> for ast::Attribute {
+ fn hash_stable(&self, hcx: &mut AstCtx, hasher: &mut StableHasher) {
+ hcx.hash_attr(self, hasher)
+ }
+}
diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs
new file mode 100644
index 000000000..01bd498b3
--- /dev/null
+++ b/compiler/rustc_ast/src/mut_visit.rs
@@ -0,0 +1,1601 @@
+//! A `MutVisitor` represents an AST modification; it accepts an AST piece and
+//! mutates it in place. So, for instance, macro expansion is a `MutVisitor`
+//! that walks over an AST and modifies it.
+//!
+//! Note: using a `MutVisitor` (other than the `MacroExpander` `MutVisitor`) on
+//! an AST before macro expansion is probably a bad idea. For instance,
+//! a `MutVisitor` renaming item names in a module will miss all of those
+//! that are created by the expansion of a macro.
+
+use crate::ast::*;
+use crate::ptr::P;
+use crate::token::{self, Token};
+use crate::tokenstream::*;
+
+use rustc_data_structures::map_in_place::MapInPlace;
+use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+use smallvec::{smallvec, Array, SmallVec};
+use std::ops::DerefMut;
+use std::{panic, ptr};
+
+pub trait ExpectOne<A: Array> {
+ fn expect_one(self, err: &'static str) -> A::Item;
+}
+
+impl<A: Array> ExpectOne<A> for SmallVec<A> {
+ fn expect_one(self, err: &'static str) -> A::Item {
+ assert!(self.len() == 1, "{}", err);
+ self.into_iter().next().unwrap()
+ }
+}
+
+pub trait MutVisitor: Sized {
+ /// Mutable token visiting only exists for the `macro_rules` token marker and should not be
+ /// used otherwise. Token visitor would be entirely separate from the regular visitor if
+ /// the marker didn't have to visit AST fragments in nonterminal tokens.
+ const VISIT_TOKENS: bool = false;
+
+ // Methods in this trait have one of three forms:
+ //
+ // fn visit_t(&mut self, t: &mut T); // common
+ // fn flat_map_t(&mut self, t: T) -> SmallVec<[T; 1]>; // rare
+ // fn filter_map_t(&mut self, t: T) -> Option<T>; // rarest
+ //
+ // Any additions to this trait should happen in form of a call to a public
+ // `noop_*` function that only calls out to the visitor again, not other
+ // `noop_*` functions. This is a necessary API workaround to the problem of
+ // not being able to call out to the super default method in an overridden
+ // default method.
+ //
+ // When writing these methods, it is better to use destructuring like this:
+ //
+ // fn visit_abc(&mut self, ABC { a, b, c: _ }: &mut ABC) {
+ // visit_a(a);
+ // visit_b(b);
+ // }
+ //
+ // than to use field access like this:
+ //
+ // fn visit_abc(&mut self, abc: &mut ABC) {
+ // visit_a(&mut abc.a);
+ // visit_b(&mut abc.b);
+ // // ignore abc.c
+ // }
+ //
+ // As well as being more concise, the former is explicit about which fields
+ // are skipped. Furthermore, if a new field is added, the destructuring
+ // version will cause a compile error, which is good. In comparison, the
+ // field access version will continue working and it would be easy to
+ // forget to add handling for it.
+
+ fn visit_crate(&mut self, c: &mut Crate) {
+ noop_visit_crate(c, self)
+ }
+
+ fn visit_meta_list_item(&mut self, list_item: &mut NestedMetaItem) {
+ noop_visit_meta_list_item(list_item, self);
+ }
+
+ fn visit_meta_item(&mut self, meta_item: &mut MetaItem) {
+ noop_visit_meta_item(meta_item, self);
+ }
+
+ fn visit_use_tree(&mut self, use_tree: &mut UseTree) {
+ noop_visit_use_tree(use_tree, self);
+ }
+
+ fn flat_map_foreign_item(&mut self, ni: P<ForeignItem>) -> SmallVec<[P<ForeignItem>; 1]> {
+ noop_flat_map_foreign_item(ni, self)
+ }
+
+ fn flat_map_item(&mut self, i: P<Item>) -> SmallVec<[P<Item>; 1]> {
+ noop_flat_map_item(i, self)
+ }
+
+ fn visit_fn_header(&mut self, header: &mut FnHeader) {
+ noop_visit_fn_header(header, self);
+ }
+
+ fn flat_map_field_def(&mut self, fd: FieldDef) -> SmallVec<[FieldDef; 1]> {
+ noop_flat_map_field_def(fd, self)
+ }
+
+ fn visit_item_kind(&mut self, i: &mut ItemKind) {
+ noop_visit_item_kind(i, self);
+ }
+
+ fn flat_map_trait_item(&mut self, i: P<AssocItem>) -> SmallVec<[P<AssocItem>; 1]> {
+ noop_flat_map_assoc_item(i, self)
+ }
+
+ fn flat_map_impl_item(&mut self, i: P<AssocItem>) -> SmallVec<[P<AssocItem>; 1]> {
+ noop_flat_map_assoc_item(i, self)
+ }
+
+ fn visit_fn_decl(&mut self, d: &mut P<FnDecl>) {
+ noop_visit_fn_decl(d, self);
+ }
+
+ fn visit_asyncness(&mut self, a: &mut Async) {
+ noop_visit_asyncness(a, self);
+ }
+
+ fn visit_closure_binder(&mut self, b: &mut ClosureBinder) {
+ noop_visit_closure_binder(b, self);
+ }
+
+ fn visit_block(&mut self, b: &mut P<Block>) {
+ noop_visit_block(b, self);
+ }
+
+ fn flat_map_stmt(&mut self, s: Stmt) -> SmallVec<[Stmt; 1]> {
+ noop_flat_map_stmt(s, self)
+ }
+
+ fn flat_map_arm(&mut self, arm: Arm) -> SmallVec<[Arm; 1]> {
+ noop_flat_map_arm(arm, self)
+ }
+
+ fn visit_pat(&mut self, p: &mut P<Pat>) {
+ noop_visit_pat(p, self);
+ }
+
+ fn visit_anon_const(&mut self, c: &mut AnonConst) {
+ noop_visit_anon_const(c, self);
+ }
+
+ fn visit_expr(&mut self, e: &mut P<Expr>) {
+ noop_visit_expr(e, self);
+ }
+
+ fn filter_map_expr(&mut self, e: P<Expr>) -> Option<P<Expr>> {
+ noop_filter_map_expr(e, self)
+ }
+
+ fn visit_generic_arg(&mut self, arg: &mut GenericArg) {
+ noop_visit_generic_arg(arg, self);
+ }
+
+ fn visit_ty(&mut self, t: &mut P<Ty>) {
+ noop_visit_ty(t, self);
+ }
+
+ fn visit_lifetime(&mut self, l: &mut Lifetime) {
+ noop_visit_lifetime(l, self);
+ }
+
+ fn visit_constraint(&mut self, t: &mut AssocConstraint) {
+ noop_visit_constraint(t, self);
+ }
+
+ fn visit_foreign_mod(&mut self, nm: &mut ForeignMod) {
+ noop_visit_foreign_mod(nm, self);
+ }
+
+ fn flat_map_variant(&mut self, v: Variant) -> SmallVec<[Variant; 1]> {
+ noop_flat_map_variant(v, self)
+ }
+
+ fn visit_ident(&mut self, i: &mut Ident) {
+ noop_visit_ident(i, self);
+ }
+
+ fn visit_path(&mut self, p: &mut Path) {
+ noop_visit_path(p, self);
+ }
+
+ fn visit_qself(&mut self, qs: &mut Option<QSelf>) {
+ noop_visit_qself(qs, self);
+ }
+
+ fn visit_generic_args(&mut self, p: &mut GenericArgs) {
+ noop_visit_generic_args(p, self);
+ }
+
+ fn visit_angle_bracketed_parameter_data(&mut self, p: &mut AngleBracketedArgs) {
+ noop_visit_angle_bracketed_parameter_data(p, self);
+ }
+
+ fn visit_parenthesized_parameter_data(&mut self, p: &mut ParenthesizedArgs) {
+ noop_visit_parenthesized_parameter_data(p, self);
+ }
+
+ fn visit_local(&mut self, l: &mut P<Local>) {
+ noop_visit_local(l, self);
+ }
+
+ fn visit_mac_call(&mut self, mac: &mut MacCall) {
+ noop_visit_mac(mac, self);
+ }
+
+ fn visit_macro_def(&mut self, def: &mut MacroDef) {
+ noop_visit_macro_def(def, self);
+ }
+
+ fn visit_label(&mut self, label: &mut Label) {
+ noop_visit_label(label, self);
+ }
+
+ fn visit_attribute(&mut self, at: &mut Attribute) {
+ noop_visit_attribute(at, self);
+ }
+
+ fn flat_map_param(&mut self, param: Param) -> SmallVec<[Param; 1]> {
+ noop_flat_map_param(param, self)
+ }
+
+ fn visit_generics(&mut self, generics: &mut Generics) {
+ noop_visit_generics(generics, self);
+ }
+
+ fn visit_trait_ref(&mut self, tr: &mut TraitRef) {
+ noop_visit_trait_ref(tr, self);
+ }
+
+ fn visit_poly_trait_ref(&mut self, p: &mut PolyTraitRef) {
+ noop_visit_poly_trait_ref(p, self);
+ }
+
+ fn visit_variant_data(&mut self, vdata: &mut VariantData) {
+ noop_visit_variant_data(vdata, self);
+ }
+
+ fn flat_map_generic_param(&mut self, param: GenericParam) -> SmallVec<[GenericParam; 1]> {
+ noop_flat_map_generic_param(param, self)
+ }
+
+ fn visit_param_bound(&mut self, tpb: &mut GenericBound) {
+ noop_visit_param_bound(tpb, self);
+ }
+
+ fn visit_mt(&mut self, mt: &mut MutTy) {
+ noop_visit_mt(mt, self);
+ }
+
+ fn flat_map_expr_field(&mut self, f: ExprField) -> SmallVec<[ExprField; 1]> {
+ noop_flat_map_expr_field(f, self)
+ }
+
+ fn visit_where_clause(&mut self, where_clause: &mut WhereClause) {
+ noop_visit_where_clause(where_clause, self);
+ }
+
+ fn visit_where_predicate(&mut self, where_predicate: &mut WherePredicate) {
+ noop_visit_where_predicate(where_predicate, self);
+ }
+
+ fn visit_vis(&mut self, vis: &mut Visibility) {
+ noop_visit_vis(vis, self);
+ }
+
+ fn visit_id(&mut self, _id: &mut NodeId) {
+ // Do nothing.
+ }
+
+ fn visit_span(&mut self, _sp: &mut Span) {
+ // Do nothing.
+ }
+
+ fn flat_map_pat_field(&mut self, fp: PatField) -> SmallVec<[PatField; 1]> {
+ noop_flat_map_pat_field(fp, self)
+ }
+
+ fn visit_inline_asm(&mut self, asm: &mut InlineAsm) {
+ noop_visit_inline_asm(asm, self)
+ }
+
+ fn visit_inline_asm_sym(&mut self, sym: &mut InlineAsmSym) {
+ noop_visit_inline_asm_sym(sym, self)
+ }
+}
+
+/// Use a map-style function (`FnOnce(T) -> T`) to overwrite a `&mut T`. Useful
+/// when using a `flat_map_*` or `filter_map_*` method within a `visit_`
+/// method.
+//
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_clobber<T: DummyAstNode>(t: &mut T, f: impl FnOnce(T) -> T) {
+ unsafe {
+ // Safe because `t` is used in a read-only fashion by `read()` before
+ // being overwritten by `write()`.
+ let old_t = ptr::read(t);
+ let new_t =
+ panic::catch_unwind(panic::AssertUnwindSafe(|| f(old_t))).unwrap_or_else(|err| {
+ // Set `t` to some valid but possible meaningless value,
+ // and pass the fatal error further.
+ ptr::write(t, T::dummy());
+ panic::resume_unwind(err);
+ });
+ ptr::write(t, new_t);
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+#[inline]
+pub fn visit_vec<T, F>(elems: &mut Vec<T>, mut visit_elem: F)
+where
+ F: FnMut(&mut T),
+{
+ for elem in elems {
+ visit_elem(elem);
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+#[inline]
+pub fn visit_opt<T, F>(opt: &mut Option<T>, mut visit_elem: F)
+where
+ F: FnMut(&mut T),
+{
+ if let Some(elem) = opt {
+ visit_elem(elem);
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_attrs<T: MutVisitor>(attrs: &mut Vec<Attribute>, vis: &mut T) {
+ visit_vec(attrs, |attr| vis.visit_attribute(attr));
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_thin_attrs<T: MutVisitor>(attrs: &mut AttrVec, vis: &mut T) {
+ for attr in attrs.iter_mut() {
+ vis.visit_attribute(attr);
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_exprs<T: MutVisitor>(exprs: &mut Vec<P<Expr>>, vis: &mut T) {
+ exprs.flat_map_in_place(|expr| vis.filter_map_expr(expr))
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_bounds<T: MutVisitor>(bounds: &mut GenericBounds, vis: &mut T) {
+ visit_vec(bounds, |bound| vis.visit_param_bound(bound));
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_fn_sig<T: MutVisitor>(FnSig { header, decl, span }: &mut FnSig, vis: &mut T) {
+ vis.visit_fn_header(header);
+ vis.visit_fn_decl(decl);
+ vis.visit_span(span);
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_mac_args<T: MutVisitor>(args: &mut MacArgs, vis: &mut T) {
+ match args {
+ MacArgs::Empty => {}
+ MacArgs::Delimited(dspan, _delim, tokens) => {
+ visit_delim_span(dspan, vis);
+ visit_tts(tokens, vis);
+ }
+ MacArgs::Eq(eq_span, MacArgsEq::Ast(expr)) => {
+ vis.visit_span(eq_span);
+ vis.visit_expr(expr);
+ }
+ MacArgs::Eq(_, MacArgsEq::Hir(lit)) => {
+ unreachable!("in literal form when visiting mac args eq: {:?}", lit)
+ }
+ }
+}
+
+pub fn visit_delim_span<T: MutVisitor>(dspan: &mut DelimSpan, vis: &mut T) {
+ vis.visit_span(&mut dspan.open);
+ vis.visit_span(&mut dspan.close);
+}
+
+pub fn noop_flat_map_pat_field<T: MutVisitor>(
+ mut fp: PatField,
+ vis: &mut T,
+) -> SmallVec<[PatField; 1]> {
+ let PatField { attrs, id, ident, is_placeholder: _, is_shorthand: _, pat, span } = &mut fp;
+ vis.visit_id(id);
+ vis.visit_ident(ident);
+ vis.visit_pat(pat);
+ vis.visit_span(span);
+ visit_thin_attrs(attrs, vis);
+ smallvec![fp]
+}
+
+pub fn noop_visit_use_tree<T: MutVisitor>(use_tree: &mut UseTree, vis: &mut T) {
+ let UseTree { prefix, kind, span } = use_tree;
+ vis.visit_path(prefix);
+ match kind {
+ UseTreeKind::Simple(rename, id1, id2) => {
+ visit_opt(rename, |rename| vis.visit_ident(rename));
+ vis.visit_id(id1);
+ vis.visit_id(id2);
+ }
+ UseTreeKind::Nested(items) => {
+ for (tree, id) in items {
+ vis.visit_use_tree(tree);
+ vis.visit_id(id);
+ }
+ }
+ UseTreeKind::Glob => {}
+ }
+ vis.visit_span(span);
+}
+
+pub fn noop_flat_map_arm<T: MutVisitor>(mut arm: Arm, vis: &mut T) -> SmallVec<[Arm; 1]> {
+ let Arm { attrs, pat, guard, body, span, id, is_placeholder: _ } = &mut arm;
+ visit_thin_attrs(attrs, vis);
+ vis.visit_id(id);
+ vis.visit_pat(pat);
+ visit_opt(guard, |guard| vis.visit_expr(guard));
+ vis.visit_expr(body);
+ vis.visit_span(span);
+ smallvec![arm]
+}
+
+pub fn noop_visit_constraint<T: MutVisitor>(
+ AssocConstraint { id, ident, gen_args, kind, span }: &mut AssocConstraint,
+ vis: &mut T,
+) {
+ vis.visit_id(id);
+ vis.visit_ident(ident);
+ if let Some(ref mut gen_args) = gen_args {
+ vis.visit_generic_args(gen_args);
+ }
+ match kind {
+ AssocConstraintKind::Equality { ref mut term } => match term {
+ Term::Ty(ty) => vis.visit_ty(ty),
+ Term::Const(c) => vis.visit_anon_const(c),
+ },
+ AssocConstraintKind::Bound { ref mut bounds } => visit_bounds(bounds, vis),
+ }
+ vis.visit_span(span);
+}
+
+pub fn noop_visit_ty<T: MutVisitor>(ty: &mut P<Ty>, vis: &mut T) {
+ let Ty { id, kind, span, tokens } = ty.deref_mut();
+ vis.visit_id(id);
+ match kind {
+ TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err | TyKind::Never | TyKind::CVarArgs => {}
+ TyKind::Slice(ty) => vis.visit_ty(ty),
+ TyKind::Ptr(mt) => vis.visit_mt(mt),
+ TyKind::Rptr(lt, mt) => {
+ visit_opt(lt, |lt| noop_visit_lifetime(lt, vis));
+ vis.visit_mt(mt);
+ }
+ TyKind::BareFn(bft) => {
+ let BareFnTy { unsafety, ext: _, generic_params, decl, decl_span } = bft.deref_mut();
+ visit_unsafety(unsafety, vis);
+ generic_params.flat_map_in_place(|param| vis.flat_map_generic_param(param));
+ vis.visit_fn_decl(decl);
+ vis.visit_span(decl_span);
+ }
+ TyKind::Tup(tys) => visit_vec(tys, |ty| vis.visit_ty(ty)),
+ TyKind::Paren(ty) => vis.visit_ty(ty),
+ TyKind::Path(qself, path) => {
+ vis.visit_qself(qself);
+ vis.visit_path(path);
+ }
+ TyKind::Array(ty, length) => {
+ vis.visit_ty(ty);
+ vis.visit_anon_const(length);
+ }
+ TyKind::Typeof(expr) => vis.visit_anon_const(expr),
+ TyKind::TraitObject(bounds, _syntax) => {
+ visit_vec(bounds, |bound| vis.visit_param_bound(bound))
+ }
+ TyKind::ImplTrait(id, bounds) => {
+ vis.visit_id(id);
+ visit_vec(bounds, |bound| vis.visit_param_bound(bound));
+ }
+ TyKind::MacCall(mac) => vis.visit_mac_call(mac),
+ }
+ vis.visit_span(span);
+ visit_lazy_tts(tokens, vis);
+}
+
+pub fn noop_visit_foreign_mod<T: MutVisitor>(foreign_mod: &mut ForeignMod, vis: &mut T) {
+ let ForeignMod { unsafety, abi: _, items } = foreign_mod;
+ visit_unsafety(unsafety, vis);
+ items.flat_map_in_place(|item| vis.flat_map_foreign_item(item));
+}
+
+pub fn noop_flat_map_variant<T: MutVisitor>(
+ mut variant: Variant,
+ visitor: &mut T,
+) -> SmallVec<[Variant; 1]> {
+ let Variant { ident, vis, attrs, id, data, disr_expr, span, is_placeholder: _ } = &mut variant;
+ visitor.visit_ident(ident);
+ visitor.visit_vis(vis);
+ visit_thin_attrs(attrs, visitor);
+ visitor.visit_id(id);
+ visitor.visit_variant_data(data);
+ visit_opt(disr_expr, |disr_expr| visitor.visit_anon_const(disr_expr));
+ visitor.visit_span(span);
+ smallvec![variant]
+}
+
+pub fn noop_visit_ident<T: MutVisitor>(Ident { name: _, span }: &mut Ident, vis: &mut T) {
+ vis.visit_span(span);
+}
+
+pub fn noop_visit_path<T: MutVisitor>(Path { segments, span, tokens }: &mut Path, vis: &mut T) {
+ vis.visit_span(span);
+ for PathSegment { ident, id, args } in segments {
+ vis.visit_ident(ident);
+ vis.visit_id(id);
+ visit_opt(args, |args| vis.visit_generic_args(args));
+ }
+ visit_lazy_tts(tokens, vis);
+}
+
+pub fn noop_visit_qself<T: MutVisitor>(qself: &mut Option<QSelf>, vis: &mut T) {
+ visit_opt(qself, |QSelf { ty, path_span, position: _ }| {
+ vis.visit_ty(ty);
+ vis.visit_span(path_span);
+ })
+}
+
+pub fn noop_visit_generic_args<T: MutVisitor>(generic_args: &mut GenericArgs, vis: &mut T) {
+ match generic_args {
+ GenericArgs::AngleBracketed(data) => vis.visit_angle_bracketed_parameter_data(data),
+ GenericArgs::Parenthesized(data) => vis.visit_parenthesized_parameter_data(data),
+ }
+}
+
+pub fn noop_visit_generic_arg<T: MutVisitor>(arg: &mut GenericArg, vis: &mut T) {
+ match arg {
+ GenericArg::Lifetime(lt) => vis.visit_lifetime(lt),
+ GenericArg::Type(ty) => vis.visit_ty(ty),
+ GenericArg::Const(ct) => vis.visit_anon_const(ct),
+ }
+}
+
+pub fn noop_visit_angle_bracketed_parameter_data<T: MutVisitor>(
+ data: &mut AngleBracketedArgs,
+ vis: &mut T,
+) {
+ let AngleBracketedArgs { args, span } = data;
+ visit_vec(args, |arg| match arg {
+ AngleBracketedArg::Arg(arg) => vis.visit_generic_arg(arg),
+ AngleBracketedArg::Constraint(constraint) => vis.visit_constraint(constraint),
+ });
+ vis.visit_span(span);
+}
+
+pub fn noop_visit_parenthesized_parameter_data<T: MutVisitor>(
+ args: &mut ParenthesizedArgs,
+ vis: &mut T,
+) {
+ let ParenthesizedArgs { inputs, output, span, .. } = args;
+ visit_vec(inputs, |input| vis.visit_ty(input));
+ noop_visit_fn_ret_ty(output, vis);
+ vis.visit_span(span);
+}
+
+pub fn noop_visit_local<T: MutVisitor>(local: &mut P<Local>, vis: &mut T) {
+ let Local { id, pat, ty, kind, span, attrs, tokens } = local.deref_mut();
+ vis.visit_id(id);
+ vis.visit_pat(pat);
+ visit_opt(ty, |ty| vis.visit_ty(ty));
+ match kind {
+ LocalKind::Decl => {}
+ LocalKind::Init(init) => {
+ vis.visit_expr(init);
+ }
+ LocalKind::InitElse(init, els) => {
+ vis.visit_expr(init);
+ vis.visit_block(els);
+ }
+ }
+ vis.visit_span(span);
+ visit_thin_attrs(attrs, vis);
+ visit_lazy_tts(tokens, vis);
+}
+
+pub fn noop_visit_attribute<T: MutVisitor>(attr: &mut Attribute, vis: &mut T) {
+ let Attribute { kind, id: _, style: _, span } = attr;
+ match kind {
+ AttrKind::Normal(AttrItem { path, args, tokens }, attr_tokens) => {
+ vis.visit_path(path);
+ visit_mac_args(args, vis);
+ visit_lazy_tts(tokens, vis);
+ visit_lazy_tts(attr_tokens, vis);
+ }
+ AttrKind::DocComment(..) => {}
+ }
+ vis.visit_span(span);
+}
+
+pub fn noop_visit_mac<T: MutVisitor>(mac: &mut MacCall, vis: &mut T) {
+ let MacCall { path, args, prior_type_ascription: _ } = mac;
+ vis.visit_path(path);
+ visit_mac_args(args, vis);
+}
+
+pub fn noop_visit_macro_def<T: MutVisitor>(macro_def: &mut MacroDef, vis: &mut T) {
+ let MacroDef { body, macro_rules: _ } = macro_def;
+ visit_mac_args(body, vis);
+}
+
+pub fn noop_visit_meta_list_item<T: MutVisitor>(li: &mut NestedMetaItem, vis: &mut T) {
+ match li {
+ NestedMetaItem::MetaItem(mi) => vis.visit_meta_item(mi),
+ NestedMetaItem::Literal(_lit) => {}
+ }
+}
+
+pub fn noop_visit_meta_item<T: MutVisitor>(mi: &mut MetaItem, vis: &mut T) {
+ let MetaItem { path: _, kind, span } = mi;
+ match kind {
+ MetaItemKind::Word => {}
+ MetaItemKind::List(mis) => visit_vec(mis, |mi| vis.visit_meta_list_item(mi)),
+ MetaItemKind::NameValue(_s) => {}
+ }
+ vis.visit_span(span);
+}
+
+pub fn noop_flat_map_param<T: MutVisitor>(mut param: Param, vis: &mut T) -> SmallVec<[Param; 1]> {
+ let Param { attrs, id, pat, span, ty, is_placeholder: _ } = &mut param;
+ vis.visit_id(id);
+ visit_thin_attrs(attrs, vis);
+ vis.visit_pat(pat);
+ vis.visit_span(span);
+ vis.visit_ty(ty);
+ smallvec![param]
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_attr_annotated_tt<T: MutVisitor>(tt: &mut AttrAnnotatedTokenTree, vis: &mut T) {
+ match tt {
+ AttrAnnotatedTokenTree::Token(token) => {
+ visit_token(token, vis);
+ }
+ AttrAnnotatedTokenTree::Delimited(DelimSpan { open, close }, _delim, tts) => {
+ vis.visit_span(open);
+ vis.visit_span(close);
+ visit_attr_annotated_tts(tts, vis);
+ }
+ AttrAnnotatedTokenTree::Attributes(data) => {
+ for attr in &mut *data.attrs {
+ match &mut attr.kind {
+ AttrKind::Normal(_, attr_tokens) => {
+ visit_lazy_tts(attr_tokens, vis);
+ }
+ AttrKind::DocComment(..) => {
+ vis.visit_span(&mut attr.span);
+ }
+ }
+ }
+ visit_lazy_tts_opt_mut(Some(&mut data.tokens), vis);
+ }
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_tt<T: MutVisitor>(tt: &mut TokenTree, vis: &mut T) {
+ match tt {
+ TokenTree::Token(token, _) => {
+ visit_token(token, vis);
+ }
+ TokenTree::Delimited(DelimSpan { open, close }, _delim, tts) => {
+ vis.visit_span(open);
+ vis.visit_span(close);
+ visit_tts(tts, vis);
+ }
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_tts<T: MutVisitor>(TokenStream(tts): &mut TokenStream, vis: &mut T) {
+ if T::VISIT_TOKENS && !tts.is_empty() {
+ let tts = Lrc::make_mut(tts);
+ visit_vec(tts, |tree| visit_tt(tree, vis));
+ }
+}
+
+pub fn visit_attr_annotated_tts<T: MutVisitor>(
+ AttrAnnotatedTokenStream(tts): &mut AttrAnnotatedTokenStream,
+ vis: &mut T,
+) {
+ if T::VISIT_TOKENS && !tts.is_empty() {
+ let tts = Lrc::make_mut(tts);
+ visit_vec(tts, |(tree, _is_joint)| visit_attr_annotated_tt(tree, vis));
+ }
+}
+
+pub fn visit_lazy_tts_opt_mut<T: MutVisitor>(lazy_tts: Option<&mut LazyTokenStream>, vis: &mut T) {
+ if T::VISIT_TOKENS {
+ if let Some(lazy_tts) = lazy_tts {
+ let mut tts = lazy_tts.create_token_stream();
+ visit_attr_annotated_tts(&mut tts, vis);
+ *lazy_tts = LazyTokenStream::new(tts);
+ }
+ }
+}
+
+pub fn visit_lazy_tts<T: MutVisitor>(lazy_tts: &mut Option<LazyTokenStream>, vis: &mut T) {
+ visit_lazy_tts_opt_mut(lazy_tts.as_mut(), vis);
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+// Applies ident visitor if it's an ident; applies other visits to interpolated nodes.
+// In practice the ident part is not actually used by specific visitors right now,
+// but there's a test below checking that it works.
+pub fn visit_token<T: MutVisitor>(t: &mut Token, vis: &mut T) {
+ let Token { kind, span } = t;
+ match kind {
+ token::Ident(name, _) | token::Lifetime(name) => {
+ let mut ident = Ident::new(*name, *span);
+ vis.visit_ident(&mut ident);
+ *name = ident.name;
+ *span = ident.span;
+ return; // Avoid visiting the span for the second time.
+ }
+ token::Interpolated(nt) => {
+ let mut nt = Lrc::make_mut(nt);
+ visit_nonterminal(&mut nt, vis);
+ }
+ _ => {}
+ }
+ vis.visit_span(span);
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+/// Applies the visitor to elements of interpolated nodes.
+//
+// N.B., this can occur only when applying a visitor to partially expanded
+// code, where parsed pieces have gotten implanted ito *other* macro
+// invocations. This is relevant for macro hygiene, but possibly not elsewhere.
+//
+// One problem here occurs because the types for flat_map_item, flat_map_stmt,
+// etc., allow the visitor to return *multiple* items; this is a problem for the
+// nodes here, because they insist on having exactly one piece. One solution
+// would be to mangle the MutVisitor trait to include one-to-many and
+// one-to-one versions of these entry points, but that would probably confuse a
+// lot of people and help very few. Instead, I'm just going to put in dynamic
+// checks. I think the performance impact of this will be pretty much
+// nonexistent. The danger is that someone will apply a `MutVisitor` to a
+// partially expanded node, and will be confused by the fact that their
+// `flat_map_item` or `flat_map_stmt` isn't getting called on `NtItem` or `NtStmt`
+// nodes. Hopefully they'll wind up reading this comment, and doing something
+// appropriate.
+//
+// BTW, design choice: I considered just changing the type of, e.g., `NtItem` to
+// contain multiple items, but decided against it when I looked at
+// `parse_item_or_view_item` and tried to figure out what I would do with
+// multiple items there....
+pub fn visit_nonterminal<T: MutVisitor>(nt: &mut token::Nonterminal, vis: &mut T) {
+ match nt {
+ token::NtItem(item) => visit_clobber(item, |item| {
+ // This is probably okay, because the only visitors likely to
+ // peek inside interpolated nodes will be renamings/markings,
+ // which map single items to single items.
+ vis.flat_map_item(item).expect_one("expected visitor to produce exactly one item")
+ }),
+ token::NtBlock(block) => vis.visit_block(block),
+ token::NtStmt(stmt) => visit_clobber(stmt, |stmt| {
+ // See reasoning above.
+ stmt.map(|stmt| {
+ vis.flat_map_stmt(stmt).expect_one("expected visitor to produce exactly one item")
+ })
+ }),
+ token::NtPat(pat) => vis.visit_pat(pat),
+ token::NtExpr(expr) => vis.visit_expr(expr),
+ token::NtTy(ty) => vis.visit_ty(ty),
+ token::NtIdent(ident, _is_raw) => vis.visit_ident(ident),
+ token::NtLifetime(ident) => vis.visit_ident(ident),
+ token::NtLiteral(expr) => vis.visit_expr(expr),
+ token::NtMeta(item) => {
+ let AttrItem { path, args, tokens } = item.deref_mut();
+ vis.visit_path(path);
+ visit_mac_args(args, vis);
+ visit_lazy_tts(tokens, vis);
+ }
+ token::NtPath(path) => vis.visit_path(path),
+ token::NtVis(visib) => vis.visit_vis(visib),
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_defaultness<T: MutVisitor>(defaultness: &mut Defaultness, vis: &mut T) {
+ match defaultness {
+ Defaultness::Default(span) => vis.visit_span(span),
+ Defaultness::Final => {}
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_unsafety<T: MutVisitor>(unsafety: &mut Unsafe, vis: &mut T) {
+ match unsafety {
+ Unsafe::Yes(span) => vis.visit_span(span),
+ Unsafe::No => {}
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_polarity<T: MutVisitor>(polarity: &mut ImplPolarity, vis: &mut T) {
+ match polarity {
+ ImplPolarity::Positive => {}
+ ImplPolarity::Negative(span) => vis.visit_span(span),
+ }
+}
+
+// No `noop_` prefix because there isn't a corresponding method in `MutVisitor`.
+pub fn visit_constness<T: MutVisitor>(constness: &mut Const, vis: &mut T) {
+ match constness {
+ Const::Yes(span) => vis.visit_span(span),
+ Const::No => {}
+ }
+}
+
+pub fn noop_visit_closure_binder<T: MutVisitor>(binder: &mut ClosureBinder, vis: &mut T) {
+ match binder {
+ ClosureBinder::NotPresent => {}
+ ClosureBinder::For { span: _, generic_params } => {
+ let mut vec = std::mem::take(generic_params).into_vec();
+ vec.flat_map_in_place(|param| vis.flat_map_generic_param(param));
+ *generic_params = P::from_vec(vec);
+ }
+ }
+}
+
+pub fn noop_visit_asyncness<T: MutVisitor>(asyncness: &mut Async, vis: &mut T) {
+ match asyncness {
+ Async::Yes { span: _, closure_id, return_impl_trait_id } => {
+ vis.visit_id(closure_id);
+ vis.visit_id(return_impl_trait_id);
+ }
+ Async::No => {}
+ }
+}
+
+pub fn noop_visit_fn_decl<T: MutVisitor>(decl: &mut P<FnDecl>, vis: &mut T) {
+ let FnDecl { inputs, output } = decl.deref_mut();
+ inputs.flat_map_in_place(|param| vis.flat_map_param(param));
+ noop_visit_fn_ret_ty(output, vis);
+}
+
+pub fn noop_visit_fn_ret_ty<T: MutVisitor>(fn_ret_ty: &mut FnRetTy, vis: &mut T) {
+ match fn_ret_ty {
+ FnRetTy::Default(span) => vis.visit_span(span),
+ FnRetTy::Ty(ty) => vis.visit_ty(ty),
+ }
+}
+
+pub fn noop_visit_param_bound<T: MutVisitor>(pb: &mut GenericBound, vis: &mut T) {
+ match pb {
+ GenericBound::Trait(ty, _modifier) => vis.visit_poly_trait_ref(ty),
+ GenericBound::Outlives(lifetime) => noop_visit_lifetime(lifetime, vis),
+ }
+}
+
+pub fn noop_flat_map_generic_param<T: MutVisitor>(
+ mut param: GenericParam,
+ vis: &mut T,
+) -> SmallVec<[GenericParam; 1]> {
+ let GenericParam { id, ident, attrs, bounds, kind, colon_span, is_placeholder: _ } = &mut param;
+ vis.visit_id(id);
+ vis.visit_ident(ident);
+ if let Some(ref mut colon_span) = colon_span {
+ vis.visit_span(colon_span);
+ }
+ visit_thin_attrs(attrs, vis);
+ visit_vec(bounds, |bound| noop_visit_param_bound(bound, vis));
+ match kind {
+ GenericParamKind::Lifetime => {}
+ GenericParamKind::Type { default } => {
+ visit_opt(default, |default| vis.visit_ty(default));
+ }
+ GenericParamKind::Const { ty, kw_span: _, default } => {
+ vis.visit_ty(ty);
+ visit_opt(default, |default| vis.visit_anon_const(default));
+ }
+ }
+ smallvec![param]
+}
+
+pub fn noop_visit_label<T: MutVisitor>(Label { ident }: &mut Label, vis: &mut T) {
+ vis.visit_ident(ident);
+}
+
+fn noop_visit_lifetime<T: MutVisitor>(Lifetime { id, ident }: &mut Lifetime, vis: &mut T) {
+ vis.visit_id(id);
+ vis.visit_ident(ident);
+}
+
+pub fn noop_visit_generics<T: MutVisitor>(generics: &mut Generics, vis: &mut T) {
+ let Generics { params, where_clause, span } = generics;
+ params.flat_map_in_place(|param| vis.flat_map_generic_param(param));
+ vis.visit_where_clause(where_clause);
+ vis.visit_span(span);
+}
+
+pub fn noop_visit_where_clause<T: MutVisitor>(wc: &mut WhereClause, vis: &mut T) {
+ let WhereClause { has_where_token: _, predicates, span } = wc;
+ visit_vec(predicates, |predicate| vis.visit_where_predicate(predicate));
+ vis.visit_span(span);
+}
+
+pub fn noop_visit_where_predicate<T: MutVisitor>(pred: &mut WherePredicate, vis: &mut T) {
+ match pred {
+ WherePredicate::BoundPredicate(bp) => {
+ let WhereBoundPredicate { span, bound_generic_params, bounded_ty, bounds } = bp;
+ vis.visit_span(span);
+ bound_generic_params.flat_map_in_place(|param| vis.flat_map_generic_param(param));
+ vis.visit_ty(bounded_ty);
+ visit_vec(bounds, |bound| vis.visit_param_bound(bound));
+ }
+ WherePredicate::RegionPredicate(rp) => {
+ let WhereRegionPredicate { span, lifetime, bounds } = rp;
+ vis.visit_span(span);
+ noop_visit_lifetime(lifetime, vis);
+ visit_vec(bounds, |bound| noop_visit_param_bound(bound, vis));
+ }
+ WherePredicate::EqPredicate(ep) => {
+ let WhereEqPredicate { id, span, lhs_ty, rhs_ty } = ep;
+ vis.visit_id(id);
+ vis.visit_span(span);
+ vis.visit_ty(lhs_ty);
+ vis.visit_ty(rhs_ty);
+ }
+ }
+}
+
+pub fn noop_visit_variant_data<T: MutVisitor>(vdata: &mut VariantData, vis: &mut T) {
+ match vdata {
+ VariantData::Struct(fields, ..) => {
+ fields.flat_map_in_place(|field| vis.flat_map_field_def(field));
+ }
+ VariantData::Tuple(fields, id) => {
+ fields.flat_map_in_place(|field| vis.flat_map_field_def(field));
+ vis.visit_id(id);
+ }
+ VariantData::Unit(id) => vis.visit_id(id),
+ }
+}
+
+pub fn noop_visit_trait_ref<T: MutVisitor>(TraitRef { path, ref_id }: &mut TraitRef, vis: &mut T) {
+ vis.visit_path(path);
+ vis.visit_id(ref_id);
+}
+
+pub fn noop_visit_poly_trait_ref<T: MutVisitor>(p: &mut PolyTraitRef, vis: &mut T) {
+ let PolyTraitRef { bound_generic_params, trait_ref, span } = p;
+ bound_generic_params.flat_map_in_place(|param| vis.flat_map_generic_param(param));
+ vis.visit_trait_ref(trait_ref);
+ vis.visit_span(span);
+}
+
+pub fn noop_flat_map_field_def<T: MutVisitor>(
+ mut fd: FieldDef,
+ visitor: &mut T,
+) -> SmallVec<[FieldDef; 1]> {
+ let FieldDef { span, ident, vis, id, ty, attrs, is_placeholder: _ } = &mut fd;
+ visitor.visit_span(span);
+ visit_opt(ident, |ident| visitor.visit_ident(ident));
+ visitor.visit_vis(vis);
+ visitor.visit_id(id);
+ visitor.visit_ty(ty);
+ visit_thin_attrs(attrs, visitor);
+ smallvec![fd]
+}
+
+pub fn noop_flat_map_expr_field<T: MutVisitor>(
+ mut f: ExprField,
+ vis: &mut T,
+) -> SmallVec<[ExprField; 1]> {
+ let ExprField { ident, expr, span, is_shorthand: _, attrs, id, is_placeholder: _ } = &mut f;
+ vis.visit_ident(ident);
+ vis.visit_expr(expr);
+ vis.visit_id(id);
+ vis.visit_span(span);
+ visit_thin_attrs(attrs, vis);
+ smallvec![f]
+}
+
+pub fn noop_visit_mt<T: MutVisitor>(MutTy { ty, mutbl: _ }: &mut MutTy, vis: &mut T) {
+ vis.visit_ty(ty);
+}
+
+pub fn noop_visit_block<T: MutVisitor>(block: &mut P<Block>, vis: &mut T) {
+ let Block { id, stmts, rules: _, span, tokens, could_be_bare_literal: _ } = block.deref_mut();
+ vis.visit_id(id);
+ stmts.flat_map_in_place(|stmt| vis.flat_map_stmt(stmt));
+ vis.visit_span(span);
+ visit_lazy_tts(tokens, vis);
+}
+
+pub fn noop_visit_item_kind<T: MutVisitor>(kind: &mut ItemKind, vis: &mut T) {
+ match kind {
+ ItemKind::ExternCrate(_orig_name) => {}
+ ItemKind::Use(use_tree) => vis.visit_use_tree(use_tree),
+ ItemKind::Static(ty, _, expr) => {
+ vis.visit_ty(ty);
+ visit_opt(expr, |expr| vis.visit_expr(expr));
+ }
+ ItemKind::Const(defaultness, ty, expr) => {
+ visit_defaultness(defaultness, vis);
+ vis.visit_ty(ty);
+ visit_opt(expr, |expr| vis.visit_expr(expr));
+ }
+ ItemKind::Fn(box Fn { defaultness, generics, sig, body }) => {
+ visit_defaultness(defaultness, vis);
+ visit_fn_sig(sig, vis);
+ vis.visit_generics(generics);
+ visit_opt(body, |body| vis.visit_block(body));
+ }
+ ItemKind::Mod(unsafety, mod_kind) => {
+ visit_unsafety(unsafety, vis);
+ match mod_kind {
+ ModKind::Loaded(items, _inline, ModSpans { inner_span, inject_use_span }) => {
+ vis.visit_span(inner_span);
+ vis.visit_span(inject_use_span);
+ items.flat_map_in_place(|item| vis.flat_map_item(item));
+ }
+ ModKind::Unloaded => {}
+ }
+ }
+ ItemKind::ForeignMod(nm) => vis.visit_foreign_mod(nm),
+ ItemKind::GlobalAsm(asm) => vis.visit_inline_asm(asm),
+ ItemKind::TyAlias(box TyAlias {
+ defaultness, generics, where_clauses, bounds, ty, ..
+ }) => {
+ visit_defaultness(defaultness, vis);
+ vis.visit_generics(generics);
+ vis.visit_span(&mut where_clauses.0.1);
+ vis.visit_span(&mut where_clauses.1.1);
+ visit_bounds(bounds, vis);
+ visit_opt(ty, |ty| vis.visit_ty(ty));
+ }
+ ItemKind::Enum(EnumDef { variants }, generics) => {
+ variants.flat_map_in_place(|variant| vis.flat_map_variant(variant));
+ vis.visit_generics(generics);
+ }
+ ItemKind::Struct(variant_data, generics) | ItemKind::Union(variant_data, generics) => {
+ vis.visit_variant_data(variant_data);
+ vis.visit_generics(generics);
+ }
+ ItemKind::Impl(box Impl {
+ defaultness,
+ unsafety,
+ generics,
+ constness,
+ polarity,
+ of_trait,
+ self_ty,
+ items,
+ }) => {
+ visit_defaultness(defaultness, vis);
+ visit_unsafety(unsafety, vis);
+ vis.visit_generics(generics);
+ visit_constness(constness, vis);
+ visit_polarity(polarity, vis);
+ visit_opt(of_trait, |trait_ref| vis.visit_trait_ref(trait_ref));
+ vis.visit_ty(self_ty);
+ items.flat_map_in_place(|item| vis.flat_map_impl_item(item));
+ }
+ ItemKind::Trait(box Trait { unsafety, is_auto: _, generics, bounds, items }) => {
+ visit_unsafety(unsafety, vis);
+ vis.visit_generics(generics);
+ visit_bounds(bounds, vis);
+ items.flat_map_in_place(|item| vis.flat_map_trait_item(item));
+ }
+ ItemKind::TraitAlias(generics, bounds) => {
+ vis.visit_generics(generics);
+ visit_bounds(bounds, vis);
+ }
+ ItemKind::MacCall(m) => vis.visit_mac_call(m),
+ ItemKind::MacroDef(def) => vis.visit_macro_def(def),
+ }
+}
+
+pub fn noop_flat_map_assoc_item<T: MutVisitor>(
+ mut item: P<AssocItem>,
+ visitor: &mut T,
+) -> SmallVec<[P<AssocItem>; 1]> {
+ let Item { id, ident, vis, attrs, kind, span, tokens } = item.deref_mut();
+ visitor.visit_id(id);
+ visitor.visit_ident(ident);
+ visitor.visit_vis(vis);
+ visit_attrs(attrs, visitor);
+ match kind {
+ AssocItemKind::Const(defaultness, ty, expr) => {
+ visit_defaultness(defaultness, visitor);
+ visitor.visit_ty(ty);
+ visit_opt(expr, |expr| visitor.visit_expr(expr));
+ }
+ AssocItemKind::Fn(box Fn { defaultness, generics, sig, body }) => {
+ visit_defaultness(defaultness, visitor);
+ visitor.visit_generics(generics);
+ visit_fn_sig(sig, visitor);
+ visit_opt(body, |body| visitor.visit_block(body));
+ }
+ AssocItemKind::TyAlias(box TyAlias {
+ defaultness,
+ generics,
+ where_clauses,
+ bounds,
+ ty,
+ ..
+ }) => {
+ visit_defaultness(defaultness, visitor);
+ visitor.visit_generics(generics);
+ visitor.visit_span(&mut where_clauses.0.1);
+ visitor.visit_span(&mut where_clauses.1.1);
+ visit_bounds(bounds, visitor);
+ visit_opt(ty, |ty| visitor.visit_ty(ty));
+ }
+ AssocItemKind::MacCall(mac) => visitor.visit_mac_call(mac),
+ }
+ visitor.visit_span(span);
+ visit_lazy_tts(tokens, visitor);
+ smallvec![item]
+}
+
+pub fn noop_visit_fn_header<T: MutVisitor>(header: &mut FnHeader, vis: &mut T) {
+ let FnHeader { unsafety, asyncness, constness, ext: _ } = header;
+ visit_constness(constness, vis);
+ vis.visit_asyncness(asyncness);
+ visit_unsafety(unsafety, vis);
+}
+
+pub fn noop_visit_crate<T: MutVisitor>(krate: &mut Crate, vis: &mut T) {
+ let Crate { attrs, items, spans, id, is_placeholder: _ } = krate;
+ vis.visit_id(id);
+ visit_attrs(attrs, vis);
+ items.flat_map_in_place(|item| vis.flat_map_item(item));
+ let ModSpans { inner_span, inject_use_span } = spans;
+ vis.visit_span(inner_span);
+ vis.visit_span(inject_use_span);
+}
+
+// Mutates one item into possibly many items.
+pub fn noop_flat_map_item<T: MutVisitor>(
+ mut item: P<Item>,
+ visitor: &mut T,
+) -> SmallVec<[P<Item>; 1]> {
+ let Item { ident, attrs, id, kind, vis, span, tokens } = item.deref_mut();
+ visitor.visit_ident(ident);
+ visit_attrs(attrs, visitor);
+ visitor.visit_id(id);
+ visitor.visit_item_kind(kind);
+ visitor.visit_vis(vis);
+ visitor.visit_span(span);
+ visit_lazy_tts(tokens, visitor);
+
+ smallvec![item]
+}
+
+pub fn noop_flat_map_foreign_item<T: MutVisitor>(
+ mut item: P<ForeignItem>,
+ visitor: &mut T,
+) -> SmallVec<[P<ForeignItem>; 1]> {
+ let Item { ident, attrs, id, kind, vis, span, tokens } = item.deref_mut();
+ visitor.visit_id(id);
+ visitor.visit_ident(ident);
+ visitor.visit_vis(vis);
+ visit_attrs(attrs, visitor);
+ match kind {
+ ForeignItemKind::Static(ty, _, expr) => {
+ visitor.visit_ty(ty);
+ visit_opt(expr, |expr| visitor.visit_expr(expr));
+ }
+ ForeignItemKind::Fn(box Fn { defaultness, generics, sig, body }) => {
+ visit_defaultness(defaultness, visitor);
+ visitor.visit_generics(generics);
+ visit_fn_sig(sig, visitor);
+ visit_opt(body, |body| visitor.visit_block(body));
+ }
+ ForeignItemKind::TyAlias(box TyAlias {
+ defaultness,
+ generics,
+ where_clauses,
+ bounds,
+ ty,
+ ..
+ }) => {
+ visit_defaultness(defaultness, visitor);
+ visitor.visit_generics(generics);
+ visitor.visit_span(&mut where_clauses.0.1);
+ visitor.visit_span(&mut where_clauses.1.1);
+ visit_bounds(bounds, visitor);
+ visit_opt(ty, |ty| visitor.visit_ty(ty));
+ }
+ ForeignItemKind::MacCall(mac) => visitor.visit_mac_call(mac),
+ }
+ visitor.visit_span(span);
+ visit_lazy_tts(tokens, visitor);
+ smallvec![item]
+}
+
+pub fn noop_visit_pat<T: MutVisitor>(pat: &mut P<Pat>, vis: &mut T) {
+ let Pat { id, kind, span, tokens } = pat.deref_mut();
+ vis.visit_id(id);
+ match kind {
+ PatKind::Wild | PatKind::Rest => {}
+ PatKind::Ident(_binding_mode, ident, sub) => {
+ vis.visit_ident(ident);
+ visit_opt(sub, |sub| vis.visit_pat(sub));
+ }
+ PatKind::Lit(e) => vis.visit_expr(e),
+ PatKind::TupleStruct(qself, path, elems) => {
+ vis.visit_qself(qself);
+ vis.visit_path(path);
+ visit_vec(elems, |elem| vis.visit_pat(elem));
+ }
+ PatKind::Path(qself, path) => {
+ vis.visit_qself(qself);
+ vis.visit_path(path);
+ }
+ PatKind::Struct(qself, path, fields, _etc) => {
+ vis.visit_qself(qself);
+ vis.visit_path(path);
+ fields.flat_map_in_place(|field| vis.flat_map_pat_field(field));
+ }
+ PatKind::Box(inner) => vis.visit_pat(inner),
+ PatKind::Ref(inner, _mutbl) => vis.visit_pat(inner),
+ PatKind::Range(e1, e2, Spanned { span: _, node: _ }) => {
+ visit_opt(e1, |e| vis.visit_expr(e));
+ visit_opt(e2, |e| vis.visit_expr(e));
+ vis.visit_span(span);
+ }
+ PatKind::Tuple(elems) | PatKind::Slice(elems) | PatKind::Or(elems) => {
+ visit_vec(elems, |elem| vis.visit_pat(elem))
+ }
+ PatKind::Paren(inner) => vis.visit_pat(inner),
+ PatKind::MacCall(mac) => vis.visit_mac_call(mac),
+ }
+ vis.visit_span(span);
+ visit_lazy_tts(tokens, vis);
+}
+
+pub fn noop_visit_anon_const<T: MutVisitor>(AnonConst { id, value }: &mut AnonConst, vis: &mut T) {
+ vis.visit_id(id);
+ vis.visit_expr(value);
+}
+
+pub fn noop_visit_inline_asm<T: MutVisitor>(asm: &mut InlineAsm, vis: &mut T) {
+ for (op, _) in &mut asm.operands {
+ match op {
+ InlineAsmOperand::In { expr, .. }
+ | InlineAsmOperand::Out { expr: Some(expr), .. }
+ | InlineAsmOperand::InOut { expr, .. } => vis.visit_expr(expr),
+ InlineAsmOperand::Out { expr: None, .. } => {}
+ InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ vis.visit_expr(in_expr);
+ if let Some(out_expr) = out_expr {
+ vis.visit_expr(out_expr);
+ }
+ }
+ InlineAsmOperand::Const { anon_const } => vis.visit_anon_const(anon_const),
+ InlineAsmOperand::Sym { sym } => vis.visit_inline_asm_sym(sym),
+ }
+ }
+}
+
+pub fn noop_visit_inline_asm_sym<T: MutVisitor>(
+ InlineAsmSym { id, qself, path }: &mut InlineAsmSym,
+ vis: &mut T,
+) {
+ vis.visit_id(id);
+ vis.visit_qself(qself);
+ vis.visit_path(path);
+}
+
+pub fn noop_visit_expr<T: MutVisitor>(
+ Expr { kind, id, span, attrs, tokens }: &mut Expr,
+ vis: &mut T,
+) {
+ match kind {
+ ExprKind::Box(expr) => vis.visit_expr(expr),
+ ExprKind::Array(exprs) => visit_exprs(exprs, vis),
+ ExprKind::ConstBlock(anon_const) => {
+ vis.visit_anon_const(anon_const);
+ }
+ ExprKind::Repeat(expr, count) => {
+ vis.visit_expr(expr);
+ vis.visit_anon_const(count);
+ }
+ ExprKind::Tup(exprs) => visit_exprs(exprs, vis),
+ ExprKind::Call(f, args) => {
+ vis.visit_expr(f);
+ visit_exprs(args, vis);
+ }
+ ExprKind::MethodCall(PathSegment { ident, id, args }, exprs, span) => {
+ vis.visit_ident(ident);
+ vis.visit_id(id);
+ visit_opt(args, |args| vis.visit_generic_args(args));
+ visit_exprs(exprs, vis);
+ vis.visit_span(span);
+ }
+ ExprKind::Binary(_binop, lhs, rhs) => {
+ vis.visit_expr(lhs);
+ vis.visit_expr(rhs);
+ }
+ ExprKind::Unary(_unop, ohs) => vis.visit_expr(ohs),
+ ExprKind::Cast(expr, ty) => {
+ vis.visit_expr(expr);
+ vis.visit_ty(ty);
+ }
+ ExprKind::Type(expr, ty) => {
+ vis.visit_expr(expr);
+ vis.visit_ty(ty);
+ }
+ ExprKind::AddrOf(_, _, ohs) => vis.visit_expr(ohs),
+ ExprKind::Let(pat, scrutinee, _) => {
+ vis.visit_pat(pat);
+ vis.visit_expr(scrutinee);
+ }
+ ExprKind::If(cond, tr, fl) => {
+ vis.visit_expr(cond);
+ vis.visit_block(tr);
+ visit_opt(fl, |fl| vis.visit_expr(fl));
+ }
+ ExprKind::While(cond, body, label) => {
+ vis.visit_expr(cond);
+ vis.visit_block(body);
+ visit_opt(label, |label| vis.visit_label(label));
+ }
+ ExprKind::ForLoop(pat, iter, body, label) => {
+ vis.visit_pat(pat);
+ vis.visit_expr(iter);
+ vis.visit_block(body);
+ visit_opt(label, |label| vis.visit_label(label));
+ }
+ ExprKind::Loop(body, label) => {
+ vis.visit_block(body);
+ visit_opt(label, |label| vis.visit_label(label));
+ }
+ ExprKind::Match(expr, arms) => {
+ vis.visit_expr(expr);
+ arms.flat_map_in_place(|arm| vis.flat_map_arm(arm));
+ }
+ ExprKind::Closure(binder, _capture_by, asyncness, _movability, decl, body, span) => {
+ vis.visit_closure_binder(binder);
+ vis.visit_asyncness(asyncness);
+ vis.visit_fn_decl(decl);
+ vis.visit_expr(body);
+ vis.visit_span(span);
+ }
+ ExprKind::Block(blk, label) => {
+ vis.visit_block(blk);
+ visit_opt(label, |label| vis.visit_label(label));
+ }
+ ExprKind::Async(_capture_by, node_id, body) => {
+ vis.visit_id(node_id);
+ vis.visit_block(body);
+ }
+ ExprKind::Await(expr) => vis.visit_expr(expr),
+ ExprKind::Assign(el, er, _) => {
+ vis.visit_expr(el);
+ vis.visit_expr(er);
+ }
+ ExprKind::AssignOp(_op, el, er) => {
+ vis.visit_expr(el);
+ vis.visit_expr(er);
+ }
+ ExprKind::Field(el, ident) => {
+ vis.visit_expr(el);
+ vis.visit_ident(ident);
+ }
+ ExprKind::Index(el, er) => {
+ vis.visit_expr(el);
+ vis.visit_expr(er);
+ }
+ ExprKind::Range(e1, e2, _lim) => {
+ visit_opt(e1, |e1| vis.visit_expr(e1));
+ visit_opt(e2, |e2| vis.visit_expr(e2));
+ }
+ ExprKind::Underscore => {}
+ ExprKind::Path(qself, path) => {
+ vis.visit_qself(qself);
+ vis.visit_path(path);
+ }
+ ExprKind::Break(label, expr) => {
+ visit_opt(label, |label| vis.visit_label(label));
+ visit_opt(expr, |expr| vis.visit_expr(expr));
+ }
+ ExprKind::Continue(label) => {
+ visit_opt(label, |label| vis.visit_label(label));
+ }
+ ExprKind::Ret(expr) => {
+ visit_opt(expr, |expr| vis.visit_expr(expr));
+ }
+ ExprKind::Yeet(expr) => {
+ visit_opt(expr, |expr| vis.visit_expr(expr));
+ }
+ ExprKind::InlineAsm(asm) => vis.visit_inline_asm(asm),
+ ExprKind::MacCall(mac) => vis.visit_mac_call(mac),
+ ExprKind::Struct(se) => {
+ let StructExpr { qself, path, fields, rest } = se.deref_mut();
+ vis.visit_qself(qself);
+ vis.visit_path(path);
+ fields.flat_map_in_place(|field| vis.flat_map_expr_field(field));
+ match rest {
+ StructRest::Base(expr) => vis.visit_expr(expr),
+ StructRest::Rest(_span) => {}
+ StructRest::None => {}
+ }
+ }
+ ExprKind::Paren(expr) => {
+ vis.visit_expr(expr);
+ }
+ ExprKind::Yield(expr) => {
+ visit_opt(expr, |expr| vis.visit_expr(expr));
+ }
+ ExprKind::Try(expr) => vis.visit_expr(expr),
+ ExprKind::TryBlock(body) => vis.visit_block(body),
+ ExprKind::Lit(_) | ExprKind::Err => {}
+ }
+ vis.visit_id(id);
+ vis.visit_span(span);
+ visit_thin_attrs(attrs, vis);
+ visit_lazy_tts(tokens, vis);
+}
+
+pub fn noop_filter_map_expr<T: MutVisitor>(mut e: P<Expr>, vis: &mut T) -> Option<P<Expr>> {
+ Some({
+ vis.visit_expr(&mut e);
+ e
+ })
+}
+
+pub fn noop_flat_map_stmt<T: MutVisitor>(
+ Stmt { kind, mut span, mut id }: Stmt,
+ vis: &mut T,
+) -> SmallVec<[Stmt; 1]> {
+ vis.visit_id(&mut id);
+ vis.visit_span(&mut span);
+ let stmts: SmallVec<_> = noop_flat_map_stmt_kind(kind, vis)
+ .into_iter()
+ .map(|kind| Stmt { id, kind, span })
+ .collect();
+ if stmts.len() > 1 {
+ panic!(
+ "cloning statement `NodeId`s is prohibited by default, \
+ the visitor should implement custom statement visiting"
+ );
+ }
+ stmts
+}
+
+pub fn noop_flat_map_stmt_kind<T: MutVisitor>(
+ kind: StmtKind,
+ vis: &mut T,
+) -> SmallVec<[StmtKind; 1]> {
+ match kind {
+ StmtKind::Local(mut local) => smallvec![StmtKind::Local({
+ vis.visit_local(&mut local);
+ local
+ })],
+ StmtKind::Item(item) => vis.flat_map_item(item).into_iter().map(StmtKind::Item).collect(),
+ StmtKind::Expr(expr) => vis.filter_map_expr(expr).into_iter().map(StmtKind::Expr).collect(),
+ StmtKind::Semi(expr) => vis.filter_map_expr(expr).into_iter().map(StmtKind::Semi).collect(),
+ StmtKind::Empty => smallvec![StmtKind::Empty],
+ StmtKind::MacCall(mut mac) => {
+ let MacCallStmt { mac: mac_, style: _, attrs, tokens } = mac.deref_mut();
+ vis.visit_mac_call(mac_);
+ visit_thin_attrs(attrs, vis);
+ visit_lazy_tts(tokens, vis);
+ smallvec![StmtKind::MacCall(mac)]
+ }
+ }
+}
+
+pub fn noop_visit_vis<T: MutVisitor>(visibility: &mut Visibility, vis: &mut T) {
+ match &mut visibility.kind {
+ VisibilityKind::Public | VisibilityKind::Inherited => {}
+ VisibilityKind::Restricted { path, id } => {
+ vis.visit_path(path);
+ vis.visit_id(id);
+ }
+ }
+ vis.visit_span(&mut visibility.span);
+}
+
+/// Some value for the AST node that is valid but possibly meaningless.
+pub trait DummyAstNode {
+ fn dummy() -> Self;
+}
+
+impl<T> DummyAstNode for Option<T> {
+ fn dummy() -> Self {
+ Default::default()
+ }
+}
+
+impl<T: DummyAstNode + 'static> DummyAstNode for P<T> {
+ fn dummy() -> Self {
+ P(DummyAstNode::dummy())
+ }
+}
+
+impl<T> DummyAstNode for ThinVec<T> {
+ fn dummy() -> Self {
+ Default::default()
+ }
+}
+
+impl DummyAstNode for Item {
+ fn dummy() -> Self {
+ Item {
+ attrs: Default::default(),
+ id: DUMMY_NODE_ID,
+ span: Default::default(),
+ vis: Visibility {
+ kind: VisibilityKind::Public,
+ span: Default::default(),
+ tokens: Default::default(),
+ },
+ ident: Ident::empty(),
+ kind: ItemKind::ExternCrate(None),
+ tokens: Default::default(),
+ }
+ }
+}
+
+impl DummyAstNode for Expr {
+ fn dummy() -> Self {
+ Expr {
+ id: DUMMY_NODE_ID,
+ kind: ExprKind::Err,
+ span: Default::default(),
+ attrs: Default::default(),
+ tokens: Default::default(),
+ }
+ }
+}
+
+impl DummyAstNode for Ty {
+ fn dummy() -> Self {
+ Ty {
+ id: DUMMY_NODE_ID,
+ kind: TyKind::Err,
+ span: Default::default(),
+ tokens: Default::default(),
+ }
+ }
+}
+
+impl DummyAstNode for Pat {
+ fn dummy() -> Self {
+ Pat {
+ id: DUMMY_NODE_ID,
+ kind: PatKind::Wild,
+ span: Default::default(),
+ tokens: Default::default(),
+ }
+ }
+}
+
+impl DummyAstNode for Stmt {
+ fn dummy() -> Self {
+ Stmt { id: DUMMY_NODE_ID, kind: StmtKind::Empty, span: Default::default() }
+ }
+}
+
+impl DummyAstNode for Block {
+ fn dummy() -> Self {
+ Block {
+ stmts: Default::default(),
+ id: DUMMY_NODE_ID,
+ rules: BlockCheckMode::Default,
+ span: Default::default(),
+ tokens: Default::default(),
+ could_be_bare_literal: Default::default(),
+ }
+ }
+}
+
+impl DummyAstNode for Crate {
+ fn dummy() -> Self {
+ Crate {
+ attrs: Default::default(),
+ items: Default::default(),
+ spans: Default::default(),
+ id: DUMMY_NODE_ID,
+ is_placeholder: Default::default(),
+ }
+ }
+}
diff --git a/compiler/rustc_ast/src/node_id.rs b/compiler/rustc_ast/src/node_id.rs
new file mode 100644
index 000000000..7f928cb57
--- /dev/null
+++ b/compiler/rustc_ast/src/node_id.rs
@@ -0,0 +1,40 @@
+use rustc_span::LocalExpnId;
+use std::fmt;
+
+rustc_index::newtype_index! {
+ /// Identifies an AST node.
+ ///
+ /// This identifies top-level definitions, expressions, and everything in between.
+ /// This is later turned into [`DefId`] and `HirId` for the HIR.
+ ///
+ /// [`DefId`]: rustc_span::def_id::DefId
+ pub struct NodeId {
+ DEBUG_FORMAT = "NodeId({})"
+ }
+}
+
+rustc_data_structures::define_id_collections!(NodeMap, NodeSet, NodeId);
+
+/// The [`NodeId`] used to represent the root of the crate.
+pub const CRATE_NODE_ID: NodeId = NodeId::from_u32(0);
+
+/// When parsing and at the beginning of doing expansions, we initially give all AST nodes
+/// this dummy AST [`NodeId`]. Then, during a later phase of expansion, we renumber them
+/// to have small, positive IDs.
+pub const DUMMY_NODE_ID: NodeId = NodeId::MAX;
+
+impl NodeId {
+ pub fn placeholder_from_expn_id(expn_id: LocalExpnId) -> Self {
+ NodeId::from_u32(expn_id.as_u32())
+ }
+
+ pub fn placeholder_to_expn_id(self) -> LocalExpnId {
+ LocalExpnId::from_u32(self.as_u32())
+ }
+}
+
+impl fmt::Display for NodeId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.as_u32(), f)
+ }
+}
diff --git a/compiler/rustc_ast/src/ptr.rs b/compiler/rustc_ast/src/ptr.rs
new file mode 100644
index 000000000..30481eddf
--- /dev/null
+++ b/compiler/rustc_ast/src/ptr.rs
@@ -0,0 +1,212 @@
+//! The AST pointer.
+//!
+//! Provides `P<T>`, a frozen owned smart pointer.
+//!
+//! # Motivations and benefits
+//!
+//! * **Identity**: sharing AST nodes is problematic for the various analysis
+//! passes (e.g., one may be able to bypass the borrow checker with a shared
+//! `ExprKind::AddrOf` node taking a mutable borrow).
+//!
+//! * **Immutability**: `P<T>` disallows mutating its inner `T`, unlike `Box<T>`
+//! (unless it contains an `Unsafe` interior, but that may be denied later).
+//! This mainly prevents mistakes, but also enforces a kind of "purity".
+//!
+//! * **Efficiency**: folding can reuse allocation space for `P<T>` and `Vec<T>`,
+//! the latter even when the input and output types differ (as it would be the
+//! case with arenas or a GADT AST using type parameters to toggle features).
+//!
+//! * **Maintainability**: `P<T>` provides a fixed interface - `Deref`,
+//! `and_then` and `map` - which can remain fully functional even if the
+//! implementation changes (using a special thread-local heap, for example).
+//! Moreover, a switch to, e.g., `P<'a, T>` would be easy and mostly automated.
+
+use std::fmt::{self, Debug, Display};
+use std::iter::FromIterator;
+use std::ops::{Deref, DerefMut};
+use std::{slice, vec};
+
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+/// An owned smart pointer.
+pub struct P<T: ?Sized> {
+ ptr: Box<T>,
+}
+
+/// Construct a `P<T>` from a `T` value.
+#[allow(non_snake_case)]
+pub fn P<T: 'static>(value: T) -> P<T> {
+ P { ptr: Box::new(value) }
+}
+
+impl<T: 'static> P<T> {
+ /// Move out of the pointer.
+ /// Intended for chaining transformations not covered by `map`.
+ pub fn and_then<U, F>(self, f: F) -> U
+ where
+ F: FnOnce(T) -> U,
+ {
+ f(*self.ptr)
+ }
+
+ /// Equivalent to `and_then(|x| x)`.
+ pub fn into_inner(self) -> T {
+ *self.ptr
+ }
+
+ /// Produce a new `P<T>` from `self` without reallocating.
+ pub fn map<F>(mut self, f: F) -> P<T>
+ where
+ F: FnOnce(T) -> T,
+ {
+ let x = f(*self.ptr);
+ *self.ptr = x;
+
+ self
+ }
+
+ /// Optionally produce a new `P<T>` from `self` without reallocating.
+ pub fn filter_map<F>(mut self, f: F) -> Option<P<T>>
+ where
+ F: FnOnce(T) -> Option<T>,
+ {
+ *self.ptr = f(*self.ptr)?;
+ Some(self)
+ }
+}
+
+impl<T: ?Sized> Deref for P<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.ptr
+ }
+}
+
+impl<T: ?Sized> DerefMut for P<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.ptr
+ }
+}
+
+impl<T: 'static + Clone> Clone for P<T> {
+ fn clone(&self) -> P<T> {
+ P((**self).clone())
+ }
+}
+
+impl<T: ?Sized + Debug> Debug for P<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Debug::fmt(&self.ptr, f)
+ }
+}
+
+impl<T: Display> Display for P<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Display::fmt(&**self, f)
+ }
+}
+
+impl<T> fmt::Pointer for P<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&self.ptr, f)
+ }
+}
+
+impl<D: Decoder, T: 'static + Decodable<D>> Decodable<D> for P<T> {
+ fn decode(d: &mut D) -> P<T> {
+ P(Decodable::decode(d))
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for P<T> {
+ fn encode(&self, s: &mut S) {
+ (**self).encode(s);
+ }
+}
+
+impl<T> P<[T]> {
+ pub const fn new() -> P<[T]> {
+ P { ptr: Box::default() }
+ }
+
+ #[inline(never)]
+ pub fn from_vec(v: Vec<T>) -> P<[T]> {
+ P { ptr: v.into_boxed_slice() }
+ }
+
+ #[inline(never)]
+ pub fn into_vec(self) -> Vec<T> {
+ self.ptr.into_vec()
+ }
+}
+
+impl<T> Default for P<[T]> {
+ /// Creates an empty `P<[T]>`.
+ fn default() -> P<[T]> {
+ P::new()
+ }
+}
+
+impl<T: Clone> Clone for P<[T]> {
+ fn clone(&self) -> P<[T]> {
+ P::from_vec(self.to_vec())
+ }
+}
+
+impl<T> From<Vec<T>> for P<[T]> {
+ fn from(v: Vec<T>) -> Self {
+ P::from_vec(v)
+ }
+}
+
+impl<T> Into<Vec<T>> for P<[T]> {
+ fn into(self) -> Vec<T> {
+ self.into_vec()
+ }
+}
+
+impl<T> FromIterator<T> for P<[T]> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> P<[T]> {
+ P::from_vec(iter.into_iter().collect())
+ }
+}
+
+impl<T> IntoIterator for P<[T]> {
+ type Item = T;
+ type IntoIter = vec::IntoIter<T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.into_vec().into_iter()
+ }
+}
+
+impl<'a, T> IntoIterator for &'a P<[T]> {
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.ptr.into_iter()
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for P<[T]> {
+ fn encode(&self, s: &mut S) {
+ Encodable::encode(&**self, s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for P<[T]> {
+ fn decode(d: &mut D) -> P<[T]> {
+ P::from_vec(Decodable::decode(d))
+ }
+}
+
+impl<CTX, T> HashStable<CTX> for P<T>
+where
+ T: ?Sized + HashStable<CTX>,
+{
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ (**self).hash_stable(hcx, hasher);
+ }
+}
diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs
new file mode 100644
index 000000000..85d9687c6
--- /dev/null
+++ b/compiler/rustc_ast/src/token.rs
@@ -0,0 +1,851 @@
+pub use BinOpToken::*;
+pub use LitKind::*;
+pub use Nonterminal::*;
+pub use TokenKind::*;
+
+use crate::ast;
+use crate::ptr::P;
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::Lrc;
+use rustc_macros::HashStable_Generic;
+use rustc_span::symbol::{kw, sym};
+use rustc_span::symbol::{Ident, Symbol};
+use rustc_span::{self, edition::Edition, Span, DUMMY_SP};
+use std::borrow::Cow;
+use std::{fmt, mem};
+
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum CommentKind {
+ Line,
+ Block,
+}
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Hash, Debug, Copy)]
+#[derive(HashStable_Generic)]
+pub enum BinOpToken {
+ Plus,
+ Minus,
+ Star,
+ Slash,
+ Percent,
+ Caret,
+ And,
+ Or,
+ Shl,
+ Shr,
+}
+
+/// Describes how a sequence of token trees is delimited.
+/// Cannot use `proc_macro::Delimiter` directly because this
+/// structure should implement some additional traits.
+/// The `None` variant is also renamed to `Invisible` to be
+/// less confusing and better convey the semantics.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+#[derive(Encodable, Decodable, Hash, HashStable_Generic)]
+pub enum Delimiter {
+ /// `( ... )`
+ Parenthesis,
+ /// `{ ... }`
+ Brace,
+ /// `[ ... ]`
+ Bracket,
+ /// `Ø ... Ø`
+ /// An invisible delimiter, that may, for example, appear around tokens coming from a
+ /// "macro variable" `$var`. It is important to preserve operator priorities in cases like
+ /// `$var * 3` where `$var` is `1 + 2`.
+ /// Invisible delimiters might not survive roundtrip of a token stream through a string.
+ Invisible,
+}
+
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum LitKind {
+ Bool, // AST only, must never appear in a `Token`
+ Byte,
+ Char,
+ Integer,
+ Float,
+ Str,
+ StrRaw(u8), // raw string delimited by `n` hash symbols
+ ByteStr,
+ ByteStrRaw(u8), // raw byte string delimited by `n` hash symbols
+ Err,
+}
+
+/// A literal token.
+#[derive(Clone, Copy, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct Lit {
+ pub kind: LitKind,
+ pub symbol: Symbol,
+ pub suffix: Option<Symbol>,
+}
+
+impl fmt::Display for Lit {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let Lit { kind, symbol, suffix } = *self;
+ match kind {
+ Byte => write!(f, "b'{}'", symbol)?,
+ Char => write!(f, "'{}'", symbol)?,
+ Str => write!(f, "\"{}\"", symbol)?,
+ StrRaw(n) => write!(
+ f,
+ "r{delim}\"{string}\"{delim}",
+ delim = "#".repeat(n as usize),
+ string = symbol
+ )?,
+ ByteStr => write!(f, "b\"{}\"", symbol)?,
+ ByteStrRaw(n) => write!(
+ f,
+ "br{delim}\"{string}\"{delim}",
+ delim = "#".repeat(n as usize),
+ string = symbol
+ )?,
+ Integer | Float | Bool | Err => write!(f, "{}", symbol)?,
+ }
+
+ if let Some(suffix) = suffix {
+ write!(f, "{}", suffix)?;
+ }
+
+ Ok(())
+ }
+}
+
+impl LitKind {
+ /// An English article for the literal token kind.
+ pub fn article(self) -> &'static str {
+ match self {
+ Integer | Err => "an",
+ _ => "a",
+ }
+ }
+
+ pub fn descr(self) -> &'static str {
+ match self {
+ Bool => panic!("literal token contains `Lit::Bool`"),
+ Byte => "byte",
+ Char => "char",
+ Integer => "integer",
+ Float => "float",
+ Str | StrRaw(..) => "string",
+ ByteStr | ByteStrRaw(..) => "byte string",
+ Err => "error",
+ }
+ }
+
+ pub(crate) fn may_have_suffix(self) -> bool {
+ matches!(self, Integer | Float | Err)
+ }
+}
+
+impl Lit {
+ pub fn new(kind: LitKind, symbol: Symbol, suffix: Option<Symbol>) -> Lit {
+ Lit { kind, symbol, suffix }
+ }
+}
+
+pub fn ident_can_begin_expr(name: Symbol, span: Span, is_raw: bool) -> bool {
+ let ident_token = Token::new(Ident(name, is_raw), span);
+
+ !ident_token.is_reserved_ident()
+ || ident_token.is_path_segment_keyword()
+ || [
+ kw::Async,
+ kw::Do,
+ kw::Box,
+ kw::Break,
+ kw::Const,
+ kw::Continue,
+ kw::False,
+ kw::For,
+ kw::If,
+ kw::Let,
+ kw::Loop,
+ kw::Match,
+ kw::Move,
+ kw::Return,
+ kw::True,
+ kw::Try,
+ kw::Unsafe,
+ kw::While,
+ kw::Yield,
+ kw::Static,
+ ]
+ .contains(&name)
+}
+
+fn ident_can_begin_type(name: Symbol, span: Span, is_raw: bool) -> bool {
+ let ident_token = Token::new(Ident(name, is_raw), span);
+
+ !ident_token.is_reserved_ident()
+ || ident_token.is_path_segment_keyword()
+ || [kw::Underscore, kw::For, kw::Impl, kw::Fn, kw::Unsafe, kw::Extern, kw::Typeof, kw::Dyn]
+ .contains(&name)
+}
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum TokenKind {
+ /* Expression-operator symbols. */
+ Eq,
+ Lt,
+ Le,
+ EqEq,
+ Ne,
+ Ge,
+ Gt,
+ AndAnd,
+ OrOr,
+ Not,
+ Tilde,
+ BinOp(BinOpToken),
+ BinOpEq(BinOpToken),
+
+ /* Structural symbols */
+ At,
+ Dot,
+ DotDot,
+ DotDotDot,
+ DotDotEq,
+ Comma,
+ Semi,
+ Colon,
+ ModSep,
+ RArrow,
+ LArrow,
+ FatArrow,
+ Pound,
+ Dollar,
+ Question,
+ /// Used by proc macros for representing lifetimes, not generated by lexer right now.
+ SingleQuote,
+ /// An opening delimiter (e.g., `{`).
+ OpenDelim(Delimiter),
+ /// A closing delimiter (e.g., `}`).
+ CloseDelim(Delimiter),
+
+ /* Literals */
+ Literal(Lit),
+
+ /// Identifier token.
+ /// Do not forget about `NtIdent` when you want to match on identifiers.
+ /// It's recommended to use `Token::(ident,uninterpolate,uninterpolated_span)` to
+ /// treat regular and interpolated identifiers in the same way.
+ Ident(Symbol, /* is_raw */ bool),
+ /// Lifetime identifier token.
+ /// Do not forget about `NtLifetime` when you want to match on lifetime identifiers.
+ /// It's recommended to use `Token::(lifetime,uninterpolate,uninterpolated_span)` to
+ /// treat regular and interpolated lifetime identifiers in the same way.
+ Lifetime(Symbol),
+
+ /// An embedded AST node, as produced by a macro. This only exists for
+ /// historical reasons. We'd like to get rid of it, for multiple reasons.
+ /// - It's conceptually very strange. Saying a token can contain an AST
+ /// node is like saying, in natural language, that a word can contain a
+ /// sentence.
+ /// - It requires special handling in a bunch of places in the parser.
+ /// - It prevents `Token` from implementing `Copy`.
+ /// It adds complexity and likely slows things down. Please don't add new
+ /// occurrences of this token kind!
+ Interpolated(Lrc<Nonterminal>),
+
+ /// A doc comment token.
+ /// `Symbol` is the doc comment's data excluding its "quotes" (`///`, `/**`, etc)
+ /// similarly to symbols in string literal tokens.
+ DocComment(CommentKind, ast::AttrStyle, Symbol),
+
+ Eof,
+}
+
+// `TokenKind` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(TokenKind, 16);
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct Token {
+ pub kind: TokenKind,
+ pub span: Span,
+}
+
+impl TokenKind {
+ pub fn lit(kind: LitKind, symbol: Symbol, suffix: Option<Symbol>) -> TokenKind {
+ Literal(Lit::new(kind, symbol, suffix))
+ }
+
+ // An approximation to proc-macro-style single-character operators used by rustc parser.
+ // If the operator token can be broken into two tokens, the first of which is single-character,
+ // then this function performs that operation, otherwise it returns `None`.
+ pub fn break_two_token_op(&self) -> Option<(TokenKind, TokenKind)> {
+ Some(match *self {
+ Le => (Lt, Eq),
+ EqEq => (Eq, Eq),
+ Ne => (Not, Eq),
+ Ge => (Gt, Eq),
+ AndAnd => (BinOp(And), BinOp(And)),
+ OrOr => (BinOp(Or), BinOp(Or)),
+ BinOp(Shl) => (Lt, Lt),
+ BinOp(Shr) => (Gt, Gt),
+ BinOpEq(Plus) => (BinOp(Plus), Eq),
+ BinOpEq(Minus) => (BinOp(Minus), Eq),
+ BinOpEq(Star) => (BinOp(Star), Eq),
+ BinOpEq(Slash) => (BinOp(Slash), Eq),
+ BinOpEq(Percent) => (BinOp(Percent), Eq),
+ BinOpEq(Caret) => (BinOp(Caret), Eq),
+ BinOpEq(And) => (BinOp(And), Eq),
+ BinOpEq(Or) => (BinOp(Or), Eq),
+ BinOpEq(Shl) => (Lt, Le),
+ BinOpEq(Shr) => (Gt, Ge),
+ DotDot => (Dot, Dot),
+ DotDotDot => (Dot, DotDot),
+ ModSep => (Colon, Colon),
+ RArrow => (BinOp(Minus), Gt),
+ LArrow => (Lt, BinOp(Minus)),
+ FatArrow => (Eq, Gt),
+ _ => return None,
+ })
+ }
+
+ /// Returns tokens that are likely to be typed accidentally instead of the current token.
+ /// Enables better error recovery when the wrong token is found.
+ pub fn similar_tokens(&self) -> Option<Vec<TokenKind>> {
+ match *self {
+ Comma => Some(vec![Dot, Lt, Semi]),
+ Semi => Some(vec![Colon, Comma]),
+ FatArrow => Some(vec![Eq, RArrow]),
+ _ => None,
+ }
+ }
+
+ pub fn should_end_const_arg(&self) -> bool {
+ matches!(self, Gt | Ge | BinOp(Shr) | BinOpEq(Shr))
+ }
+}
+
+impl Token {
+ pub fn new(kind: TokenKind, span: Span) -> Self {
+ Token { kind, span }
+ }
+
+ /// Some token that will be thrown away later.
+ pub fn dummy() -> Self {
+ Token::new(TokenKind::Question, DUMMY_SP)
+ }
+
+ /// Recovers a `Token` from an `Ident`. This creates a raw identifier if necessary.
+ pub fn from_ast_ident(ident: Ident) -> Self {
+ Token::new(Ident(ident.name, ident.is_raw_guess()), ident.span)
+ }
+
+ /// Return this token by value and leave a dummy token in its place.
+ pub fn take(&mut self) -> Self {
+ mem::replace(self, Token::dummy())
+ }
+
+ /// For interpolated tokens, returns a span of the fragment to which the interpolated
+ /// token refers. For all other tokens this is just a regular span.
+ /// It is particularly important to use this for identifiers and lifetimes
+ /// for which spans affect name resolution and edition checks.
+ /// Note that keywords are also identifiers, so they should use this
+ /// if they keep spans or perform edition checks.
+ pub fn uninterpolated_span(&self) -> Span {
+ match &self.kind {
+ Interpolated(nt) => nt.span(),
+ _ => self.span,
+ }
+ }
+
+ pub fn is_op(&self) -> bool {
+ !matches!(
+ self.kind,
+ OpenDelim(..)
+ | CloseDelim(..)
+ | Literal(..)
+ | DocComment(..)
+ | Ident(..)
+ | Lifetime(..)
+ | Interpolated(..)
+ | Eof
+ )
+ }
+
+ pub fn is_like_plus(&self) -> bool {
+ matches!(self.kind, BinOp(Plus) | BinOpEq(Plus))
+ }
+
+ /// Returns `true` if the token can appear at the start of an expression.
+ pub fn can_begin_expr(&self) -> bool {
+ match self.uninterpolate().kind {
+ Ident(name, is_raw) =>
+ ident_can_begin_expr(name, self.span, is_raw), // value name or keyword
+ OpenDelim(..) | // tuple, array or block
+ Literal(..) | // literal
+ Not | // operator not
+ BinOp(Minus) | // unary minus
+ BinOp(Star) | // dereference
+ BinOp(Or) | OrOr | // closure
+ BinOp(And) | // reference
+ AndAnd | // double reference
+ // DotDotDot is no longer supported, but we need some way to display the error
+ DotDot | DotDotDot | DotDotEq | // range notation
+ Lt | BinOp(Shl) | // associated path
+ ModSep | // global path
+ Lifetime(..) | // labeled loop
+ Pound => true, // expression attributes
+ Interpolated(ref nt) => matches!(**nt, NtLiteral(..) |
+ NtExpr(..) |
+ NtBlock(..) |
+ NtPath(..)),
+ _ => false,
+ }
+ }
+
+ /// Returns `true` if the token can appear at the start of a type.
+ pub fn can_begin_type(&self) -> bool {
+ match self.uninterpolate().kind {
+ Ident(name, is_raw) =>
+ ident_can_begin_type(name, self.span, is_raw), // type name or keyword
+ OpenDelim(Delimiter::Parenthesis) | // tuple
+ OpenDelim(Delimiter::Bracket) | // array
+ Not | // never
+ BinOp(Star) | // raw pointer
+ BinOp(And) | // reference
+ AndAnd | // double reference
+ Question | // maybe bound in trait object
+ Lifetime(..) | // lifetime bound in trait object
+ Lt | BinOp(Shl) | // associated path
+ ModSep => true, // global path
+ Interpolated(ref nt) => matches!(**nt, NtTy(..) | NtPath(..)),
+ _ => false,
+ }
+ }
+
+ /// Returns `true` if the token can appear at the start of a const param.
+ pub fn can_begin_const_arg(&self) -> bool {
+ match self.kind {
+ OpenDelim(Delimiter::Brace) => true,
+ Interpolated(ref nt) => matches!(**nt, NtExpr(..) | NtBlock(..) | NtLiteral(..)),
+ _ => self.can_begin_literal_maybe_minus(),
+ }
+ }
+
+ /// Returns `true` if the token can appear at the start of a generic bound.
+ pub fn can_begin_bound(&self) -> bool {
+ self.is_path_start()
+ || self.is_lifetime()
+ || self.is_keyword(kw::For)
+ || self == &Question
+ || self == &OpenDelim(Delimiter::Parenthesis)
+ }
+
+ /// Returns `true` if the token is any literal.
+ pub fn is_lit(&self) -> bool {
+ matches!(self.kind, Literal(..))
+ }
+
+ /// Returns `true` if the token is any literal, a minus (which can prefix a literal,
+ /// for example a '-42', or one of the boolean idents).
+ ///
+ /// In other words, would this token be a valid start of `parse_literal_maybe_minus`?
+ ///
+ /// Keep this in sync with and `Lit::from_token`, excluding unary negation.
+ pub fn can_begin_literal_maybe_minus(&self) -> bool {
+ match self.uninterpolate().kind {
+ Literal(..) | BinOp(Minus) => true,
+ Ident(name, false) if name.is_bool_lit() => true,
+ Interpolated(ref nt) => match &**nt {
+ NtLiteral(_) => true,
+ NtExpr(e) => match &e.kind {
+ ast::ExprKind::Lit(_) => true,
+ ast::ExprKind::Unary(ast::UnOp::Neg, e) => {
+ matches!(&e.kind, ast::ExprKind::Lit(_))
+ }
+ _ => false,
+ },
+ _ => false,
+ },
+ _ => false,
+ }
+ }
+
+ // A convenience function for matching on identifiers during parsing.
+ // Turns interpolated identifier (`$i: ident`) or lifetime (`$l: lifetime`) token
+ // into the regular identifier or lifetime token it refers to,
+ // otherwise returns the original token.
+ pub fn uninterpolate(&self) -> Cow<'_, Token> {
+ match &self.kind {
+ Interpolated(nt) => match **nt {
+ NtIdent(ident, is_raw) => {
+ Cow::Owned(Token::new(Ident(ident.name, is_raw), ident.span))
+ }
+ NtLifetime(ident) => Cow::Owned(Token::new(Lifetime(ident.name), ident.span)),
+ _ => Cow::Borrowed(self),
+ },
+ _ => Cow::Borrowed(self),
+ }
+ }
+
+ /// Returns an identifier if this token is an identifier.
+ #[inline]
+ pub fn ident(&self) -> Option<(Ident, /* is_raw */ bool)> {
+ // We avoid using `Token::uninterpolate` here because it's slow.
+ match &self.kind {
+ &Ident(name, is_raw) => Some((Ident::new(name, self.span), is_raw)),
+ Interpolated(nt) => match **nt {
+ NtIdent(ident, is_raw) => Some((ident, is_raw)),
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+
+ /// Returns a lifetime identifier if this token is a lifetime.
+ #[inline]
+ pub fn lifetime(&self) -> Option<Ident> {
+ // We avoid using `Token::uninterpolate` here because it's slow.
+ match &self.kind {
+ &Lifetime(name) => Some(Ident::new(name, self.span)),
+ Interpolated(nt) => match **nt {
+ NtLifetime(ident) => Some(ident),
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+
+ /// Returns `true` if the token is an identifier.
+ pub fn is_ident(&self) -> bool {
+ self.ident().is_some()
+ }
+
+ /// Returns `true` if the token is a lifetime.
+ pub fn is_lifetime(&self) -> bool {
+ self.lifetime().is_some()
+ }
+
+ /// Returns `true` if the token is an identifier whose name is the given
+ /// string slice.
+ pub fn is_ident_named(&self, name: Symbol) -> bool {
+ self.ident().map_or(false, |(ident, _)| ident.name == name)
+ }
+
+ /// Returns `true` if the token is an interpolated path.
+ fn is_path(&self) -> bool {
+ if let Interpolated(ref nt) = self.kind && let NtPath(..) = **nt {
+ return true;
+ }
+ false
+ }
+
+ /// Would `maybe_whole_expr` in `parser.rs` return `Ok(..)`?
+ /// That is, is this a pre-parsed expression dropped into the token stream
+ /// (which happens while parsing the result of macro expansion)?
+ pub fn is_whole_expr(&self) -> bool {
+ if let Interpolated(ref nt) = self.kind
+ && let NtExpr(_) | NtLiteral(_) | NtPath(_) | NtBlock(_) = **nt
+ {
+ return true;
+ }
+
+ false
+ }
+
+ // Is the token an interpolated block (`$b:block`)?
+ pub fn is_whole_block(&self) -> bool {
+ if let Interpolated(ref nt) = self.kind && let NtBlock(..) = **nt {
+ return true;
+ }
+ false
+ }
+
+ /// Returns `true` if the token is either the `mut` or `const` keyword.
+ pub fn is_mutability(&self) -> bool {
+ self.is_keyword(kw::Mut) || self.is_keyword(kw::Const)
+ }
+
+ pub fn is_qpath_start(&self) -> bool {
+ self == &Lt || self == &BinOp(Shl)
+ }
+
+ pub fn is_path_start(&self) -> bool {
+ self == &ModSep
+ || self.is_qpath_start()
+ || self.is_path()
+ || self.is_path_segment_keyword()
+ || self.is_ident() && !self.is_reserved_ident()
+ }
+
+ /// Returns `true` if the token is a given keyword, `kw`.
+ pub fn is_keyword(&self, kw: Symbol) -> bool {
+ self.is_non_raw_ident_where(|id| id.name == kw)
+ }
+
+ pub fn is_path_segment_keyword(&self) -> bool {
+ self.is_non_raw_ident_where(Ident::is_path_segment_keyword)
+ }
+
+ // Returns true for reserved identifiers used internally for elided lifetimes,
+ // unnamed method parameters, crate root module, error recovery etc.
+ pub fn is_special_ident(&self) -> bool {
+ self.is_non_raw_ident_where(Ident::is_special)
+ }
+
+ /// Returns `true` if the token is a keyword used in the language.
+ pub fn is_used_keyword(&self) -> bool {
+ self.is_non_raw_ident_where(Ident::is_used_keyword)
+ }
+
+ /// Returns `true` if the token is a keyword reserved for possible future use.
+ pub fn is_unused_keyword(&self) -> bool {
+ self.is_non_raw_ident_where(Ident::is_unused_keyword)
+ }
+
+ /// Returns `true` if the token is either a special identifier or a keyword.
+ pub fn is_reserved_ident(&self) -> bool {
+ self.is_non_raw_ident_where(Ident::is_reserved)
+ }
+
+ /// Returns `true` if the token is the identifier `true` or `false`.
+ pub fn is_bool_lit(&self) -> bool {
+ self.is_non_raw_ident_where(|id| id.name.is_bool_lit())
+ }
+
+ pub fn is_numeric_lit(&self) -> bool {
+ matches!(
+ self.kind,
+ Literal(Lit { kind: LitKind::Integer, .. }) | Literal(Lit { kind: LitKind::Float, .. })
+ )
+ }
+
+ /// Returns `true` if the token is a non-raw identifier for which `pred` holds.
+ pub fn is_non_raw_ident_where(&self, pred: impl FnOnce(Ident) -> bool) -> bool {
+ match self.ident() {
+ Some((id, false)) => pred(id),
+ _ => false,
+ }
+ }
+
+ pub fn glue(&self, joint: &Token) -> Option<Token> {
+ let kind = match self.kind {
+ Eq => match joint.kind {
+ Eq => EqEq,
+ Gt => FatArrow,
+ _ => return None,
+ },
+ Lt => match joint.kind {
+ Eq => Le,
+ Lt => BinOp(Shl),
+ Le => BinOpEq(Shl),
+ BinOp(Minus) => LArrow,
+ _ => return None,
+ },
+ Gt => match joint.kind {
+ Eq => Ge,
+ Gt => BinOp(Shr),
+ Ge => BinOpEq(Shr),
+ _ => return None,
+ },
+ Not => match joint.kind {
+ Eq => Ne,
+ _ => return None,
+ },
+ BinOp(op) => match joint.kind {
+ Eq => BinOpEq(op),
+ BinOp(And) if op == And => AndAnd,
+ BinOp(Or) if op == Or => OrOr,
+ Gt if op == Minus => RArrow,
+ _ => return None,
+ },
+ Dot => match joint.kind {
+ Dot => DotDot,
+ DotDot => DotDotDot,
+ _ => return None,
+ },
+ DotDot => match joint.kind {
+ Dot => DotDotDot,
+ Eq => DotDotEq,
+ _ => return None,
+ },
+ Colon => match joint.kind {
+ Colon => ModSep,
+ _ => return None,
+ },
+ SingleQuote => match joint.kind {
+ Ident(name, false) => Lifetime(Symbol::intern(&format!("'{}", name))),
+ _ => return None,
+ },
+
+ Le | EqEq | Ne | Ge | AndAnd | OrOr | Tilde | BinOpEq(..) | At | DotDotDot
+ | DotDotEq | Comma | Semi | ModSep | RArrow | LArrow | FatArrow | Pound | Dollar
+ | Question | OpenDelim(..) | CloseDelim(..) | Literal(..) | Ident(..)
+ | Lifetime(..) | Interpolated(..) | DocComment(..) | Eof => return None,
+ };
+
+ Some(Token::new(kind, self.span.to(joint.span)))
+ }
+}
+
+impl PartialEq<TokenKind> for Token {
+ fn eq(&self, rhs: &TokenKind) -> bool {
+ self.kind == *rhs
+ }
+}
+
+#[derive(Clone, Encodable, Decodable)]
+/// For interpolation during macro expansion.
+pub enum Nonterminal {
+ NtItem(P<ast::Item>),
+ NtBlock(P<ast::Block>),
+ NtStmt(P<ast::Stmt>),
+ NtPat(P<ast::Pat>),
+ NtExpr(P<ast::Expr>),
+ NtTy(P<ast::Ty>),
+ NtIdent(Ident, /* is_raw */ bool),
+ NtLifetime(Ident),
+ NtLiteral(P<ast::Expr>),
+ /// Stuff inside brackets for attributes
+ NtMeta(P<ast::AttrItem>),
+ NtPath(P<ast::Path>),
+ NtVis(P<ast::Visibility>),
+}
+
+// `Nonterminal` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Nonterminal, 16);
+
+#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable)]
+pub enum NonterminalKind {
+ Item,
+ Block,
+ Stmt,
+ PatParam {
+ /// Keep track of whether the user used `:pat_param` or `:pat` and we inferred it from the
+ /// edition of the span. This is used for diagnostics.
+ inferred: bool,
+ },
+ PatWithOr,
+ Expr,
+ Ty,
+ Ident,
+ Lifetime,
+ Literal,
+ Meta,
+ Path,
+ Vis,
+ TT,
+}
+
+impl NonterminalKind {
+ /// The `edition` closure is used to get the edition for the given symbol. Doing
+ /// `span.edition()` is expensive, so we do it lazily.
+ pub fn from_symbol(
+ symbol: Symbol,
+ edition: impl FnOnce() -> Edition,
+ ) -> Option<NonterminalKind> {
+ Some(match symbol {
+ sym::item => NonterminalKind::Item,
+ sym::block => NonterminalKind::Block,
+ sym::stmt => NonterminalKind::Stmt,
+ sym::pat => match edition() {
+ Edition::Edition2015 | Edition::Edition2018 => {
+ NonterminalKind::PatParam { inferred: true }
+ }
+ Edition::Edition2021 | Edition::Edition2024 => NonterminalKind::PatWithOr,
+ },
+ sym::pat_param => NonterminalKind::PatParam { inferred: false },
+ sym::expr => NonterminalKind::Expr,
+ sym::ty => NonterminalKind::Ty,
+ sym::ident => NonterminalKind::Ident,
+ sym::lifetime => NonterminalKind::Lifetime,
+ sym::literal => NonterminalKind::Literal,
+ sym::meta => NonterminalKind::Meta,
+ sym::path => NonterminalKind::Path,
+ sym::vis => NonterminalKind::Vis,
+ sym::tt => NonterminalKind::TT,
+ _ => return None,
+ })
+ }
+ fn symbol(self) -> Symbol {
+ match self {
+ NonterminalKind::Item => sym::item,
+ NonterminalKind::Block => sym::block,
+ NonterminalKind::Stmt => sym::stmt,
+ NonterminalKind::PatParam { inferred: false } => sym::pat_param,
+ NonterminalKind::PatParam { inferred: true } | NonterminalKind::PatWithOr => sym::pat,
+ NonterminalKind::Expr => sym::expr,
+ NonterminalKind::Ty => sym::ty,
+ NonterminalKind::Ident => sym::ident,
+ NonterminalKind::Lifetime => sym::lifetime,
+ NonterminalKind::Literal => sym::literal,
+ NonterminalKind::Meta => sym::meta,
+ NonterminalKind::Path => sym::path,
+ NonterminalKind::Vis => sym::vis,
+ NonterminalKind::TT => sym::tt,
+ }
+ }
+}
+
+impl fmt::Display for NonterminalKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.symbol())
+ }
+}
+
+impl Nonterminal {
+ pub fn span(&self) -> Span {
+ match self {
+ NtItem(item) => item.span,
+ NtBlock(block) => block.span,
+ NtStmt(stmt) => stmt.span,
+ NtPat(pat) => pat.span,
+ NtExpr(expr) | NtLiteral(expr) => expr.span,
+ NtTy(ty) => ty.span,
+ NtIdent(ident, _) | NtLifetime(ident) => ident.span,
+ NtMeta(attr_item) => attr_item.span(),
+ NtPath(path) => path.span,
+ NtVis(vis) => vis.span,
+ }
+ }
+}
+
+impl PartialEq for Nonterminal {
+ fn eq(&self, rhs: &Self) -> bool {
+ match (self, rhs) {
+ (NtIdent(ident_lhs, is_raw_lhs), NtIdent(ident_rhs, is_raw_rhs)) => {
+ ident_lhs == ident_rhs && is_raw_lhs == is_raw_rhs
+ }
+ (NtLifetime(ident_lhs), NtLifetime(ident_rhs)) => ident_lhs == ident_rhs,
+ // FIXME: Assume that all "complex" nonterminal are not equal, we can't compare them
+ // correctly based on data from AST. This will prevent them from matching each other
+ // in macros. The comparison will become possible only when each nonterminal has an
+ // attached token stream from which it was parsed.
+ _ => false,
+ }
+ }
+}
+
+impl fmt::Debug for Nonterminal {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ NtItem(..) => f.pad("NtItem(..)"),
+ NtBlock(..) => f.pad("NtBlock(..)"),
+ NtStmt(..) => f.pad("NtStmt(..)"),
+ NtPat(..) => f.pad("NtPat(..)"),
+ NtExpr(..) => f.pad("NtExpr(..)"),
+ NtTy(..) => f.pad("NtTy(..)"),
+ NtIdent(..) => f.pad("NtIdent(..)"),
+ NtLiteral(..) => f.pad("NtLiteral(..)"),
+ NtMeta(..) => f.pad("NtMeta(..)"),
+ NtPath(..) => f.pad("NtPath(..)"),
+ NtVis(..) => f.pad("NtVis(..)"),
+ NtLifetime(..) => f.pad("NtLifetime(..)"),
+ }
+ }
+}
+
+impl<CTX> HashStable<CTX> for Nonterminal
+where
+ CTX: crate::HashStableContext,
+{
+ fn hash_stable(&self, _hcx: &mut CTX, _hasher: &mut StableHasher) {
+ panic!("interpolated tokens should not be present in the HIR")
+ }
+}
diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs
new file mode 100644
index 000000000..9e4a22e1f
--- /dev/null
+++ b/compiler/rustc_ast/src/tokenstream.rs
@@ -0,0 +1,681 @@
+//! # Token Streams
+//!
+//! `TokenStream`s represent syntactic objects before they are converted into ASTs.
+//! A `TokenStream` is, roughly speaking, a sequence of [`TokenTree`]s,
+//! which are themselves a single [`Token`] or a `Delimited` subsequence of tokens.
+//!
+//! ## Ownership
+//!
+//! `TokenStream`s are persistent data structures constructed as ropes with reference
+//! counted-children. In general, this means that calling an operation on a `TokenStream`
+//! (such as `slice`) produces an entirely new `TokenStream` from the borrowed reference to
+//! the original. This essentially coerces `TokenStream`s into "views" of their subparts,
+//! and a borrowed `TokenStream` is sufficient to build an owned `TokenStream` without taking
+//! ownership of the original.
+
+use crate::ast::StmtKind;
+use crate::ast_traits::{HasAttrs, HasSpan, HasTokens};
+use crate::token::{self, Delimiter, Nonterminal, Token, TokenKind};
+use crate::AttrVec;
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{self, Lrc};
+use rustc_macros::HashStable_Generic;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_span::{Span, DUMMY_SP};
+use smallvec::{smallvec, SmallVec};
+
+use std::{fmt, iter};
+
+/// When the main Rust parser encounters a syntax-extension invocation, it
+/// parses the arguments to the invocation as a token tree. This is a very
+/// loose structure, such that all sorts of different AST fragments can
+/// be passed to syntax extensions using a uniform type.
+///
+/// If the syntax extension is an MBE macro, it will attempt to match its
+/// LHS token tree against the provided token tree, and if it finds a
+/// match, will transcribe the RHS token tree, splicing in any captured
+/// `macro_parser::matched_nonterminals` into the `SubstNt`s it finds.
+///
+/// The RHS of an MBE macro is the only place `SubstNt`s are substituted.
+/// Nothing special happens to misnamed or misplaced `SubstNt`s.
+#[derive(Debug, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
+pub enum TokenTree {
+ /// A single token.
+ Token(Token, Spacing),
+ /// A delimited sequence of token trees.
+ Delimited(DelimSpan, Delimiter, TokenStream),
+}
+
+// This type is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(TokenTree, 32);
+
+// Ensure all fields of `TokenTree` is `Send` and `Sync`.
+#[cfg(parallel_compiler)]
+fn _dummy()
+where
+ Token: Send + Sync,
+ DelimSpan: Send + Sync,
+ Delimiter: Send + Sync,
+ TokenStream: Send + Sync,
+{
+}
+
+impl TokenTree {
+ /// Checks if this `TokenTree` is equal to the other, regardless of span information.
+ pub fn eq_unspanned(&self, other: &TokenTree) -> bool {
+ match (self, other) {
+ (TokenTree::Token(token, _), TokenTree::Token(token2, _)) => token.kind == token2.kind,
+ (TokenTree::Delimited(_, delim, tts), TokenTree::Delimited(_, delim2, tts2)) => {
+ delim == delim2 && tts.eq_unspanned(&tts2)
+ }
+ _ => false,
+ }
+ }
+
+ /// Retrieves the `TokenTree`'s span.
+ pub fn span(&self) -> Span {
+ match self {
+ TokenTree::Token(token, _) => token.span,
+ TokenTree::Delimited(sp, ..) => sp.entire(),
+ }
+ }
+
+ /// Modify the `TokenTree`'s span in-place.
+ pub fn set_span(&mut self, span: Span) {
+ match self {
+ TokenTree::Token(token, _) => token.span = span,
+ TokenTree::Delimited(dspan, ..) => *dspan = DelimSpan::from_single(span),
+ }
+ }
+
+ // Create a `TokenTree::Token` with alone spacing.
+ pub fn token_alone(kind: TokenKind, span: Span) -> TokenTree {
+ TokenTree::Token(Token::new(kind, span), Spacing::Alone)
+ }
+
+ // Create a `TokenTree::Token` with joint spacing.
+ pub fn token_joint(kind: TokenKind, span: Span) -> TokenTree {
+ TokenTree::Token(Token::new(kind, span), Spacing::Joint)
+ }
+
+ pub fn uninterpolate(self) -> TokenTree {
+ match self {
+ TokenTree::Token(token, spacing) => {
+ TokenTree::Token(token.uninterpolate().into_owned(), spacing)
+ }
+ tt => tt,
+ }
+ }
+}
+
+impl<CTX> HashStable<CTX> for TokenStream
+where
+ CTX: crate::HashStableContext,
+{
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ for sub_tt in self.trees() {
+ sub_tt.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+pub trait CreateTokenStream: sync::Send + sync::Sync {
+ fn create_token_stream(&self) -> AttrAnnotatedTokenStream;
+}
+
+impl CreateTokenStream for AttrAnnotatedTokenStream {
+ fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
+ self.clone()
+ }
+}
+
+/// A lazy version of [`TokenStream`], which defers creation
+/// of an actual `TokenStream` until it is needed.
+/// `Box` is here only to reduce the structure size.
+#[derive(Clone)]
+pub struct LazyTokenStream(Lrc<Box<dyn CreateTokenStream>>);
+
+impl LazyTokenStream {
+ pub fn new(inner: impl CreateTokenStream + 'static) -> LazyTokenStream {
+ LazyTokenStream(Lrc::new(Box::new(inner)))
+ }
+
+ pub fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
+ self.0.create_token_stream()
+ }
+}
+
+impl fmt::Debug for LazyTokenStream {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "LazyTokenStream({:?})", self.create_token_stream())
+ }
+}
+
+impl<S: Encoder> Encodable<S> for LazyTokenStream {
+ fn encode(&self, s: &mut S) {
+ // Used by AST json printing.
+ Encodable::encode(&self.create_token_stream(), s);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for LazyTokenStream {
+ fn decode(_d: &mut D) -> Self {
+ panic!("Attempted to decode LazyTokenStream");
+ }
+}
+
+impl<CTX> HashStable<CTX> for LazyTokenStream {
+ fn hash_stable(&self, _hcx: &mut CTX, _hasher: &mut StableHasher) {
+ panic!("Attempted to compute stable hash for LazyTokenStream");
+ }
+}
+
+/// A `AttrAnnotatedTokenStream` is similar to a `TokenStream`, but with extra
+/// information about the tokens for attribute targets. This is used
+/// during expansion to perform early cfg-expansion, and to process attributes
+/// during proc-macro invocations.
+#[derive(Clone, Debug, Default, Encodable, Decodable)]
+pub struct AttrAnnotatedTokenStream(pub Lrc<Vec<(AttrAnnotatedTokenTree, Spacing)>>);
+
+/// Like `TokenTree`, but for `AttrAnnotatedTokenStream`
+#[derive(Clone, Debug, Encodable, Decodable)]
+pub enum AttrAnnotatedTokenTree {
+ Token(Token),
+ Delimited(DelimSpan, Delimiter, AttrAnnotatedTokenStream),
+ /// Stores the attributes for an attribute target,
+ /// along with the tokens for that attribute target.
+ /// See `AttributesData` for more information
+ Attributes(AttributesData),
+}
+
+impl AttrAnnotatedTokenStream {
+ pub fn new(tokens: Vec<(AttrAnnotatedTokenTree, Spacing)>) -> AttrAnnotatedTokenStream {
+ AttrAnnotatedTokenStream(Lrc::new(tokens))
+ }
+
+ /// Converts this `AttrAnnotatedTokenStream` to a plain `TokenStream
+ /// During conversion, `AttrAnnotatedTokenTree::Attributes` get 'flattened'
+ /// back to a `TokenStream` of the form `outer_attr attr_target`.
+ /// If there are inner attributes, they are inserted into the proper
+ /// place in the attribute target tokens.
+ pub fn to_tokenstream(&self) -> TokenStream {
+ let trees: Vec<_> = self
+ .0
+ .iter()
+ .flat_map(|tree| match &tree.0 {
+ AttrAnnotatedTokenTree::Token(inner) => {
+ smallvec![TokenTree::Token(inner.clone(), tree.1)].into_iter()
+ }
+ AttrAnnotatedTokenTree::Delimited(span, delim, stream) => {
+ smallvec![TokenTree::Delimited(*span, *delim, stream.to_tokenstream()),]
+ .into_iter()
+ }
+ AttrAnnotatedTokenTree::Attributes(data) => {
+ let mut outer_attrs = Vec::new();
+ let mut inner_attrs = Vec::new();
+ for attr in &data.attrs {
+ match attr.style {
+ crate::AttrStyle::Outer => {
+ outer_attrs.push(attr);
+ }
+ crate::AttrStyle::Inner => {
+ inner_attrs.push(attr);
+ }
+ }
+ }
+
+ let mut target_tokens: Vec<_> = data
+ .tokens
+ .create_token_stream()
+ .to_tokenstream()
+ .0
+ .iter()
+ .cloned()
+ .collect();
+ if !inner_attrs.is_empty() {
+ let mut found = false;
+ // Check the last two trees (to account for a trailing semi)
+ for tree in target_tokens.iter_mut().rev().take(2) {
+ if let TokenTree::Delimited(span, delim, delim_tokens) = tree {
+ // Inner attributes are only supported on extern blocks, functions, impls,
+ // and modules. All of these have their inner attributes placed at
+ // the beginning of the rightmost outermost braced group:
+ // e.g. fn foo() { #![my_attr} }
+ //
+ // Therefore, we can insert them back into the right location
+ // without needing to do any extra position tracking.
+ //
+ // Note: Outline modules are an exception - they can
+ // have attributes like `#![my_attr]` at the start of a file.
+ // Support for custom attributes in this position is not
+ // properly implemented - we always synthesize fake tokens,
+ // so we never reach this code.
+
+ let mut builder = TokenStreamBuilder::new();
+ for inner_attr in inner_attrs {
+ builder.push(inner_attr.tokens().to_tokenstream());
+ }
+ builder.push(delim_tokens.clone());
+ *tree = TokenTree::Delimited(*span, *delim, builder.build());
+ found = true;
+ break;
+ }
+ }
+
+ assert!(
+ found,
+ "Failed to find trailing delimited group in: {:?}",
+ target_tokens
+ );
+ }
+ let mut flat: SmallVec<[_; 1]> = SmallVec::new();
+ for attr in outer_attrs {
+ // FIXME: Make this more efficient
+ flat.extend(attr.tokens().to_tokenstream().0.clone().iter().cloned());
+ }
+ flat.extend(target_tokens);
+ flat.into_iter()
+ }
+ })
+ .collect();
+ TokenStream::new(trees)
+ }
+}
+
+/// Stores the tokens for an attribute target, along
+/// with its attributes.
+///
+/// This is constructed during parsing when we need to capture
+/// tokens.
+///
+/// For example, `#[cfg(FALSE)] struct Foo {}` would
+/// have an `attrs` field containing the `#[cfg(FALSE)]` attr,
+/// and a `tokens` field storing the (unparsed) tokens `struct Foo {}`
+#[derive(Clone, Debug, Encodable, Decodable)]
+pub struct AttributesData {
+ /// Attributes, both outer and inner.
+ /// These are stored in the original order that they were parsed in.
+ pub attrs: AttrVec,
+ /// The underlying tokens for the attribute target that `attrs`
+ /// are applied to
+ pub tokens: LazyTokenStream,
+}
+
+/// A `TokenStream` is an abstract sequence of tokens, organized into [`TokenTree`]s.
+///
+/// The goal is for procedural macros to work with `TokenStream`s and `TokenTree`s
+/// instead of a representation of the abstract syntax tree.
+/// Today's `TokenTree`s can still contain AST via `token::Interpolated` for
+/// backwards compatibility.
+#[derive(Clone, Debug, Default, Encodable, Decodable)]
+pub struct TokenStream(pub(crate) Lrc<Vec<TokenTree>>);
+
+// `TokenStream` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(TokenStream, 8);
+
+#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
+pub enum Spacing {
+ Alone,
+ Joint,
+}
+
+impl TokenStream {
+ /// Given a `TokenStream` with a `Stream` of only two arguments, return a new `TokenStream`
+ /// separating the two arguments with a comma for diagnostic suggestions.
+ pub fn add_comma(&self) -> Option<(TokenStream, Span)> {
+ // Used to suggest if a user writes `foo!(a b);`
+ let mut suggestion = None;
+ let mut iter = self.0.iter().enumerate().peekable();
+ while let Some((pos, ts)) = iter.next() {
+ if let Some((_, next)) = iter.peek() {
+ let sp = match (&ts, &next) {
+ (_, TokenTree::Token(Token { kind: token::Comma, .. }, _)) => continue,
+ (
+ TokenTree::Token(token_left, Spacing::Alone),
+ TokenTree::Token(token_right, _),
+ ) if ((token_left.is_ident() && !token_left.is_reserved_ident())
+ || token_left.is_lit())
+ && ((token_right.is_ident() && !token_right.is_reserved_ident())
+ || token_right.is_lit()) =>
+ {
+ token_left.span
+ }
+ (TokenTree::Delimited(sp, ..), _) => sp.entire(),
+ _ => continue,
+ };
+ let sp = sp.shrink_to_hi();
+ let comma = TokenTree::token_alone(token::Comma, sp);
+ suggestion = Some((pos, comma, sp));
+ }
+ }
+ if let Some((pos, comma, sp)) = suggestion {
+ let mut new_stream = Vec::with_capacity(self.0.len() + 1);
+ let parts = self.0.split_at(pos + 1);
+ new_stream.extend_from_slice(parts.0);
+ new_stream.push(comma);
+ new_stream.extend_from_slice(parts.1);
+ return Some((TokenStream::new(new_stream), sp));
+ }
+ None
+ }
+}
+
+impl From<(AttrAnnotatedTokenTree, Spacing)> for AttrAnnotatedTokenStream {
+ fn from((tree, spacing): (AttrAnnotatedTokenTree, Spacing)) -> AttrAnnotatedTokenStream {
+ AttrAnnotatedTokenStream::new(vec![(tree, spacing)])
+ }
+}
+
+impl iter::FromIterator<TokenTree> for TokenStream {
+ fn from_iter<I: IntoIterator<Item = TokenTree>>(iter: I) -> Self {
+ TokenStream::new(iter.into_iter().collect::<Vec<TokenTree>>())
+ }
+}
+
+impl Eq for TokenStream {}
+
+impl PartialEq<TokenStream> for TokenStream {
+ fn eq(&self, other: &TokenStream) -> bool {
+ self.trees().eq(other.trees())
+ }
+}
+
+impl TokenStream {
+ pub fn new(streams: Vec<TokenTree>) -> TokenStream {
+ TokenStream(Lrc::new(streams))
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ pub fn len(&self) -> usize {
+ self.0.len()
+ }
+
+ pub fn trees(&self) -> CursorRef<'_> {
+ CursorRef::new(self)
+ }
+
+ pub fn into_trees(self) -> Cursor {
+ Cursor::new(self)
+ }
+
+ /// Compares two `TokenStream`s, checking equality without regarding span information.
+ pub fn eq_unspanned(&self, other: &TokenStream) -> bool {
+ let mut t1 = self.trees();
+ let mut t2 = other.trees();
+ for (t1, t2) in iter::zip(&mut t1, &mut t2) {
+ if !t1.eq_unspanned(&t2) {
+ return false;
+ }
+ }
+ t1.next().is_none() && t2.next().is_none()
+ }
+
+ pub fn map_enumerated<F: FnMut(usize, &TokenTree) -> TokenTree>(self, mut f: F) -> TokenStream {
+ TokenStream(Lrc::new(self.0.iter().enumerate().map(|(i, tree)| f(i, tree)).collect()))
+ }
+
+ fn opt_from_ast(node: &(impl HasAttrs + HasTokens)) -> Option<TokenStream> {
+ let tokens = node.tokens()?;
+ let attrs = node.attrs();
+ let attr_annotated = if attrs.is_empty() {
+ tokens.create_token_stream()
+ } else {
+ let attr_data = AttributesData { attrs: attrs.to_vec().into(), tokens: tokens.clone() };
+ AttrAnnotatedTokenStream::new(vec![(
+ AttrAnnotatedTokenTree::Attributes(attr_data),
+ Spacing::Alone,
+ )])
+ };
+ Some(attr_annotated.to_tokenstream())
+ }
+
+ // Create a token stream containing a single token with alone spacing.
+ pub fn token_alone(kind: TokenKind, span: Span) -> TokenStream {
+ TokenStream::new(vec![TokenTree::token_alone(kind, span)])
+ }
+
+ // Create a token stream containing a single token with joint spacing.
+ pub fn token_joint(kind: TokenKind, span: Span) -> TokenStream {
+ TokenStream::new(vec![TokenTree::token_joint(kind, span)])
+ }
+
+ // Create a token stream containing a single `Delimited`.
+ pub fn delimited(span: DelimSpan, delim: Delimiter, tts: TokenStream) -> TokenStream {
+ TokenStream::new(vec![TokenTree::Delimited(span, delim, tts)])
+ }
+
+ pub fn from_ast(node: &(impl HasAttrs + HasSpan + HasTokens + fmt::Debug)) -> TokenStream {
+ TokenStream::opt_from_ast(node)
+ .unwrap_or_else(|| panic!("missing tokens for node at {:?}: {:?}", node.span(), node))
+ }
+
+ pub fn from_nonterminal_ast(nt: &Nonterminal) -> TokenStream {
+ match nt {
+ Nonterminal::NtIdent(ident, is_raw) => {
+ TokenStream::token_alone(token::Ident(ident.name, *is_raw), ident.span)
+ }
+ Nonterminal::NtLifetime(ident) => {
+ TokenStream::token_alone(token::Lifetime(ident.name), ident.span)
+ }
+ Nonterminal::NtItem(item) => TokenStream::from_ast(item),
+ Nonterminal::NtBlock(block) => TokenStream::from_ast(block),
+ Nonterminal::NtStmt(stmt) if let StmtKind::Empty = stmt.kind => {
+ // FIXME: Properly collect tokens for empty statements.
+ TokenStream::token_alone(token::Semi, stmt.span)
+ }
+ Nonterminal::NtStmt(stmt) => TokenStream::from_ast(stmt),
+ Nonterminal::NtPat(pat) => TokenStream::from_ast(pat),
+ Nonterminal::NtTy(ty) => TokenStream::from_ast(ty),
+ Nonterminal::NtMeta(attr) => TokenStream::from_ast(attr),
+ Nonterminal::NtPath(path) => TokenStream::from_ast(path),
+ Nonterminal::NtVis(vis) => TokenStream::from_ast(vis),
+ Nonterminal::NtExpr(expr) | Nonterminal::NtLiteral(expr) => TokenStream::from_ast(expr),
+ }
+ }
+
+ fn flatten_token(token: &Token, spacing: Spacing) -> TokenTree {
+ match &token.kind {
+ token::Interpolated(nt) if let token::NtIdent(ident, is_raw) = **nt => {
+ TokenTree::Token(Token::new(token::Ident(ident.name, is_raw), ident.span), spacing)
+ }
+ token::Interpolated(nt) => TokenTree::Delimited(
+ DelimSpan::from_single(token.span),
+ Delimiter::Invisible,
+ TokenStream::from_nonterminal_ast(&nt).flattened(),
+ ),
+ _ => TokenTree::Token(token.clone(), spacing),
+ }
+ }
+
+ fn flatten_token_tree(tree: &TokenTree) -> TokenTree {
+ match tree {
+ TokenTree::Token(token, spacing) => TokenStream::flatten_token(token, *spacing),
+ TokenTree::Delimited(span, delim, tts) => {
+ TokenTree::Delimited(*span, *delim, tts.flattened())
+ }
+ }
+ }
+
+ #[must_use]
+ pub fn flattened(&self) -> TokenStream {
+ fn can_skip(stream: &TokenStream) -> bool {
+ stream.trees().all(|tree| match tree {
+ TokenTree::Token(token, _) => !matches!(token.kind, token::Interpolated(_)),
+ TokenTree::Delimited(_, _, inner) => can_skip(inner),
+ })
+ }
+
+ if can_skip(self) {
+ return self.clone();
+ }
+
+ self.trees().map(|tree| TokenStream::flatten_token_tree(tree)).collect()
+ }
+}
+
+// 99.5%+ of the time we have 1 or 2 elements in this vector.
+#[derive(Clone)]
+pub struct TokenStreamBuilder(SmallVec<[TokenStream; 2]>);
+
+impl TokenStreamBuilder {
+ pub fn new() -> TokenStreamBuilder {
+ TokenStreamBuilder(SmallVec::new())
+ }
+
+ pub fn push(&mut self, stream: TokenStream) {
+ self.0.push(stream);
+ }
+
+ pub fn build(self) -> TokenStream {
+ let mut streams = self.0;
+ match streams.len() {
+ 0 => TokenStream::default(),
+ 1 => streams.pop().unwrap(),
+ _ => {
+ // We will extend the first stream in `streams` with the
+ // elements from the subsequent streams. This requires using
+ // `make_mut()` on the first stream, and in practice this
+ // doesn't cause cloning 99.9% of the time.
+ //
+ // One very common use case is when `streams` has two elements,
+ // where the first stream has any number of elements within
+ // (often 1, but sometimes many more) and the second stream has
+ // a single element within.
+
+ // Determine how much the first stream will be extended.
+ // Needed to avoid quadratic blow up from on-the-fly
+ // reallocations (#57735).
+ let num_appends = streams.iter().skip(1).map(|ts| ts.len()).sum();
+
+ // Get the first stream, which will become the result stream.
+ // If it's `None`, create an empty stream.
+ let mut iter = streams.drain(..);
+ let mut res_stream_lrc = iter.next().unwrap().0;
+
+ // Append the subsequent elements to the result stream, after
+ // reserving space for them.
+ let res_vec_mut = Lrc::make_mut(&mut res_stream_lrc);
+ res_vec_mut.reserve(num_appends);
+ for stream in iter {
+ let stream_iter = stream.0.iter().cloned();
+
+ // If (a) `res_mut_vec` is not empty and the last tree
+ // within it is a token tree marked with `Joint`, and (b)
+ // `stream` is not empty and the first tree within it is a
+ // token tree, and (c) the two tokens can be glued
+ // together...
+ if let Some(TokenTree::Token(last_tok, Spacing::Joint)) = res_vec_mut.last()
+ && let Some(TokenTree::Token(tok, spacing)) = stream.0.first()
+ && let Some(glued_tok) = last_tok.glue(&tok)
+ {
+ // ...then overwrite the last token tree in
+ // `res_vec_mut` with the glued token, and skip the
+ // first token tree from `stream`.
+ *res_vec_mut.last_mut().unwrap() = TokenTree::Token(glued_tok, *spacing);
+ res_vec_mut.extend(stream_iter.skip(1));
+ } else {
+ // Append all of `stream`.
+ res_vec_mut.extend(stream_iter);
+ }
+ }
+
+ TokenStream(res_stream_lrc)
+ }
+ }
+ }
+}
+
+/// By-reference iterator over a [`TokenStream`].
+#[derive(Clone)]
+pub struct CursorRef<'t> {
+ stream: &'t TokenStream,
+ index: usize,
+}
+
+impl<'t> CursorRef<'t> {
+ fn new(stream: &'t TokenStream) -> Self {
+ CursorRef { stream, index: 0 }
+ }
+
+ pub fn look_ahead(&self, n: usize) -> Option<&TokenTree> {
+ self.stream.0.get(self.index + n)
+ }
+}
+
+impl<'t> Iterator for CursorRef<'t> {
+ type Item = &'t TokenTree;
+
+ fn next(&mut self) -> Option<&'t TokenTree> {
+ self.stream.0.get(self.index).map(|tree| {
+ self.index += 1;
+ tree
+ })
+ }
+}
+
+/// Owning by-value iterator over a [`TokenStream`].
+// FIXME: Many uses of this can be replaced with by-reference iterator to avoid clones.
+#[derive(Clone)]
+pub struct Cursor {
+ pub stream: TokenStream,
+ index: usize,
+}
+
+impl Iterator for Cursor {
+ type Item = TokenTree;
+
+ fn next(&mut self) -> Option<TokenTree> {
+ self.stream.0.get(self.index).map(|tree| {
+ self.index += 1;
+ tree.clone()
+ })
+ }
+}
+
+impl Cursor {
+ fn new(stream: TokenStream) -> Self {
+ Cursor { stream, index: 0 }
+ }
+
+ #[inline]
+ pub fn next_ref(&mut self) -> Option<&TokenTree> {
+ self.stream.0.get(self.index).map(|tree| {
+ self.index += 1;
+ tree
+ })
+ }
+
+ pub fn look_ahead(&self, n: usize) -> Option<&TokenTree> {
+ self.stream.0.get(self.index + n)
+ }
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Encodable, Decodable, HashStable_Generic)]
+pub struct DelimSpan {
+ pub open: Span,
+ pub close: Span,
+}
+
+impl DelimSpan {
+ pub fn from_single(sp: Span) -> Self {
+ DelimSpan { open: sp, close: sp }
+ }
+
+ pub fn from_pair(open: Span, close: Span) -> Self {
+ DelimSpan { open, close }
+ }
+
+ pub fn dummy() -> Self {
+ Self::from_single(DUMMY_SP)
+ }
+
+ pub fn entire(self) -> Span {
+ self.open.with_hi(self.close.hi())
+ }
+}
diff --git a/compiler/rustc_ast/src/util/classify.rs b/compiler/rustc_ast/src/util/classify.rs
new file mode 100644
index 000000000..6ea3db6d3
--- /dev/null
+++ b/compiler/rustc_ast/src/util/classify.rs
@@ -0,0 +1,52 @@
+//! Routines the parser uses to classify AST nodes
+
+// Predicates on exprs and stmts that the pretty-printer and parser use
+
+use crate::ast;
+
+/// Does this expression require a semicolon to be treated
+/// as a statement? The negation of this: 'can this expression
+/// be used as a statement without a semicolon' -- is used
+/// as an early-bail-out in the parser so that, for instance,
+/// if true {...} else {...}
+/// |x| 5
+/// isn't parsed as (if true {...} else {...} | x) | 5
+pub fn expr_requires_semi_to_be_stmt(e: &ast::Expr) -> bool {
+ !matches!(
+ e.kind,
+ ast::ExprKind::If(..)
+ | ast::ExprKind::Match(..)
+ | ast::ExprKind::Block(..)
+ | ast::ExprKind::While(..)
+ | ast::ExprKind::Loop(..)
+ | ast::ExprKind::ForLoop(..)
+ | ast::ExprKind::TryBlock(..)
+ )
+}
+
+/// If an expression ends with `}`, returns the innermost expression ending in the `}`
+pub fn expr_trailing_brace(mut expr: &ast::Expr) -> Option<&ast::Expr> {
+ use ast::ExprKind::*;
+
+ loop {
+ match &expr.kind {
+ AddrOf(_, _, e)
+ | Assign(_, e, _)
+ | AssignOp(_, _, e)
+ | Binary(_, _, e)
+ | Box(e)
+ | Break(_, Some(e))
+ | Closure(.., e, _)
+ | Let(_, e, _)
+ | Range(_, Some(e), _)
+ | Ret(Some(e))
+ | Unary(_, e)
+ | Yield(Some(e)) => {
+ expr = e;
+ }
+ Async(..) | Block(..) | ForLoop(..) | If(..) | Loop(..) | Match(..) | Struct(..)
+ | TryBlock(..) | While(..) => break Some(expr),
+ _ => break None,
+ }
+ }
+}
diff --git a/compiler/rustc_ast/src/util/comments.rs b/compiler/rustc_ast/src/util/comments.rs
new file mode 100644
index 000000000..c96474ccb
--- /dev/null
+++ b/compiler/rustc_ast/src/util/comments.rs
@@ -0,0 +1,255 @@
+use crate::token::CommentKind;
+use rustc_span::source_map::SourceMap;
+use rustc_span::{BytePos, CharPos, FileName, Pos, Symbol};
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+pub enum CommentStyle {
+ /// No code on either side of each line of the comment
+ Isolated,
+ /// Code exists to the left of the comment
+ Trailing,
+ /// Code before /* foo */ and after the comment
+ Mixed,
+ /// Just a manual blank line "\n\n", for layout
+ BlankLine,
+}
+
+#[derive(Clone)]
+pub struct Comment {
+ pub style: CommentStyle,
+ pub lines: Vec<String>,
+ pub pos: BytePos,
+}
+
+/// A fast conservative estimate on whether the string can contain documentation links.
+/// A pair of square brackets `[]` must exist in the string, but we only search for the
+/// opening bracket because brackets always go in pairs in practice.
+#[inline]
+pub fn may_have_doc_links(s: &str) -> bool {
+ s.contains('[')
+}
+
+/// Makes a doc string more presentable to users.
+/// Used by rustdoc and perhaps other tools, but not by rustc.
+pub fn beautify_doc_string(data: Symbol, kind: CommentKind) -> Symbol {
+ fn get_vertical_trim(lines: &[&str]) -> Option<(usize, usize)> {
+ let mut i = 0;
+ let mut j = lines.len();
+ // first line of all-stars should be omitted
+ if !lines.is_empty() && lines[0].chars().all(|c| c == '*') {
+ i += 1;
+ }
+
+ // like the first, a last line of all stars should be omitted
+ if j > i && !lines[j - 1].is_empty() && lines[j - 1].chars().all(|c| c == '*') {
+ j -= 1;
+ }
+
+ if i != 0 || j != lines.len() { Some((i, j)) } else { None }
+ }
+
+ fn get_horizontal_trim<'a>(lines: &'a [&str], kind: CommentKind) -> Option<String> {
+ let mut i = usize::MAX;
+ let mut first = true;
+
+ // In case we have doc comments like `/**` or `/*!`, we want to remove stars if they are
+ // present. However, we first need to strip the empty lines so they don't get in the middle
+ // when we try to compute the "horizontal trim".
+ let lines = if kind == CommentKind::Block {
+ // Whatever happens, we skip the first line.
+ let mut i = lines
+ .get(0)
+ .map(|l| if l.trim_start().starts_with('*') { 0 } else { 1 })
+ .unwrap_or(0);
+ let mut j = lines.len();
+
+ while i < j && lines[i].trim().is_empty() {
+ i += 1;
+ }
+ while j > i && lines[j - 1].trim().is_empty() {
+ j -= 1;
+ }
+ &lines[i..j]
+ } else {
+ lines
+ };
+
+ for line in lines {
+ for (j, c) in line.chars().enumerate() {
+ if j > i || !"* \t".contains(c) {
+ return None;
+ }
+ if c == '*' {
+ if first {
+ i = j;
+ first = false;
+ } else if i != j {
+ return None;
+ }
+ break;
+ }
+ }
+ if i >= line.len() {
+ return None;
+ }
+ }
+ if lines.is_empty() { None } else { Some(lines[0][..i].into()) }
+ }
+
+ let data_s = data.as_str();
+ if data_s.contains('\n') {
+ let mut lines = data_s.lines().collect::<Vec<&str>>();
+ let mut changes = false;
+ let lines = if let Some((i, j)) = get_vertical_trim(&lines) {
+ changes = true;
+ // remove whitespace-only lines from the start/end of lines
+ &mut lines[i..j]
+ } else {
+ &mut lines
+ };
+ if let Some(horizontal) = get_horizontal_trim(&lines, kind) {
+ changes = true;
+ // remove a "[ \t]*\*" block from each line, if possible
+ for line in lines.iter_mut() {
+ if let Some(tmp) = line.strip_prefix(&horizontal) {
+ *line = tmp;
+ if kind == CommentKind::Block
+ && (*line == "*" || line.starts_with("* ") || line.starts_with("**"))
+ {
+ *line = &line[1..];
+ }
+ }
+ }
+ }
+ if changes {
+ return Symbol::intern(&lines.join("\n"));
+ }
+ }
+ data
+}
+
+/// Returns `None` if the first `col` chars of `s` contain a non-whitespace char.
+/// Otherwise returns `Some(k)` where `k` is first char offset after that leading
+/// whitespace. Note that `k` may be outside bounds of `s`.
+fn all_whitespace(s: &str, col: CharPos) -> Option<usize> {
+ let mut idx = 0;
+ for (i, ch) in s.char_indices().take(col.to_usize()) {
+ if !ch.is_whitespace() {
+ return None;
+ }
+ idx = i + ch.len_utf8();
+ }
+ Some(idx)
+}
+
+fn trim_whitespace_prefix(s: &str, col: CharPos) -> &str {
+ let len = s.len();
+ match all_whitespace(&s, col) {
+ Some(col) => {
+ if col < len {
+ &s[col..]
+ } else {
+ ""
+ }
+ }
+ None => s,
+ }
+}
+
+fn split_block_comment_into_lines(text: &str, col: CharPos) -> Vec<String> {
+ let mut res: Vec<String> = vec![];
+ let mut lines = text.lines();
+ // just push the first line
+ res.extend(lines.next().map(|it| it.to_string()));
+ // for other lines, strip common whitespace prefix
+ for line in lines {
+ res.push(trim_whitespace_prefix(line, col).to_string())
+ }
+ res
+}
+
+// it appears this function is called only from pprust... that's
+// probably not a good thing.
+pub fn gather_comments(sm: &SourceMap, path: FileName, src: String) -> Vec<Comment> {
+ let sm = SourceMap::new(sm.path_mapping().clone());
+ let source_file = sm.new_source_file(path, src);
+ let text = (*source_file.src.as_ref().unwrap()).clone();
+
+ let text: &str = text.as_str();
+ let start_bpos = source_file.start_pos;
+ let mut pos = 0;
+ let mut comments: Vec<Comment> = Vec::new();
+ let mut code_to_the_left = false;
+
+ if let Some(shebang_len) = rustc_lexer::strip_shebang(text) {
+ comments.push(Comment {
+ style: CommentStyle::Isolated,
+ lines: vec![text[..shebang_len].to_string()],
+ pos: start_bpos,
+ });
+ pos += shebang_len;
+ }
+
+ for token in rustc_lexer::tokenize(&text[pos..]) {
+ let token_text = &text[pos..pos + token.len as usize];
+ match token.kind {
+ rustc_lexer::TokenKind::Whitespace => {
+ if let Some(mut idx) = token_text.find('\n') {
+ code_to_the_left = false;
+ while let Some(next_newline) = &token_text[idx + 1..].find('\n') {
+ idx += 1 + next_newline;
+ comments.push(Comment {
+ style: CommentStyle::BlankLine,
+ lines: vec![],
+ pos: start_bpos + BytePos((pos + idx) as u32),
+ });
+ }
+ }
+ }
+ rustc_lexer::TokenKind::BlockComment { doc_style, .. } => {
+ if doc_style.is_none() {
+ let code_to_the_right = !matches!(
+ text[pos + token.len as usize..].chars().next(),
+ Some('\r' | '\n')
+ );
+ let style = match (code_to_the_left, code_to_the_right) {
+ (_, true) => CommentStyle::Mixed,
+ (false, false) => CommentStyle::Isolated,
+ (true, false) => CommentStyle::Trailing,
+ };
+
+ // Count the number of chars since the start of the line by rescanning.
+ let pos_in_file = start_bpos + BytePos(pos as u32);
+ let line_begin_in_file = source_file.line_begin_pos(pos_in_file);
+ let line_begin_pos = (line_begin_in_file - start_bpos).to_usize();
+ let col = CharPos(text[line_begin_pos..pos].chars().count());
+
+ let lines = split_block_comment_into_lines(token_text, col);
+ comments.push(Comment { style, lines, pos: pos_in_file })
+ }
+ }
+ rustc_lexer::TokenKind::LineComment { doc_style } => {
+ if doc_style.is_none() {
+ comments.push(Comment {
+ style: if code_to_the_left {
+ CommentStyle::Trailing
+ } else {
+ CommentStyle::Isolated
+ },
+ lines: vec![token_text.to_string()],
+ pos: start_bpos + BytePos(pos as u32),
+ })
+ }
+ }
+ _ => {
+ code_to_the_left = true;
+ }
+ }
+ pos += token.len as usize;
+ }
+
+ comments
+}
diff --git a/compiler/rustc_ast/src/util/comments/tests.rs b/compiler/rustc_ast/src/util/comments/tests.rs
new file mode 100644
index 000000000..11d50603a
--- /dev/null
+++ b/compiler/rustc_ast/src/util/comments/tests.rs
@@ -0,0 +1,61 @@
+use super::*;
+use rustc_span::create_default_session_globals_then;
+
+#[test]
+fn test_block_doc_comment_1() {
+ create_default_session_globals_then(|| {
+ let comment = "\n * Test \n ** Test\n * Test\n";
+ let stripped = beautify_doc_string(Symbol::intern(comment), CommentKind::Block);
+ assert_eq!(stripped.as_str(), " Test \n* Test\n Test");
+ })
+}
+
+#[test]
+fn test_block_doc_comment_2() {
+ create_default_session_globals_then(|| {
+ let comment = "\n * Test\n * Test\n";
+ let stripped = beautify_doc_string(Symbol::intern(comment), CommentKind::Block);
+ assert_eq!(stripped.as_str(), " Test\n Test");
+ })
+}
+
+#[test]
+fn test_block_doc_comment_3() {
+ create_default_session_globals_then(|| {
+ let comment = "\n let a: *i32;\n *a = 5;\n";
+ let stripped = beautify_doc_string(Symbol::intern(comment), CommentKind::Block);
+ assert_eq!(stripped.as_str(), "let a: *i32;\n*a = 5;");
+ })
+}
+
+#[test]
+fn test_line_doc_comment() {
+ create_default_session_globals_then(|| {
+ let stripped = beautify_doc_string(Symbol::intern(" test"), CommentKind::Line);
+ assert_eq!(stripped.as_str(), " test");
+ let stripped = beautify_doc_string(Symbol::intern("! test"), CommentKind::Line);
+ assert_eq!(stripped.as_str(), "! test");
+ let stripped = beautify_doc_string(Symbol::intern("test"), CommentKind::Line);
+ assert_eq!(stripped.as_str(), "test");
+ let stripped = beautify_doc_string(Symbol::intern("!test"), CommentKind::Line);
+ assert_eq!(stripped.as_str(), "!test");
+ })
+}
+
+#[test]
+fn test_doc_blocks() {
+ create_default_session_globals_then(|| {
+ let stripped =
+ beautify_doc_string(Symbol::intern(" # Returns\n *\n "), CommentKind::Block);
+ assert_eq!(stripped.as_str(), " # Returns\n\n");
+
+ let stripped = beautify_doc_string(
+ Symbol::intern("\n * # Returns\n *\n "),
+ CommentKind::Block,
+ );
+ assert_eq!(stripped.as_str(), " # Returns\n\n");
+
+ let stripped = beautify_doc_string(Symbol::intern("\n * a\n "), CommentKind::Block);
+ assert_eq!(stripped.as_str(), " a\n");
+ })
+}
diff --git a/compiler/rustc_ast/src/util/literal.rs b/compiler/rustc_ast/src/util/literal.rs
new file mode 100644
index 000000000..9c18f55c0
--- /dev/null
+++ b/compiler/rustc_ast/src/util/literal.rs
@@ -0,0 +1,336 @@
+//! Code related to parsing literals.
+
+use crate::ast::{self, Lit, LitKind};
+use crate::token::{self, Token};
+
+use rustc_lexer::unescape::{unescape_byte, unescape_char};
+use rustc_lexer::unescape::{unescape_byte_literal, unescape_literal, Mode};
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::Span;
+
+use std::ascii;
+use tracing::debug;
+
+pub enum LitError {
+ NotLiteral,
+ LexerError,
+ InvalidSuffix,
+ InvalidIntSuffix,
+ InvalidFloatSuffix,
+ NonDecimalFloat(u32),
+ IntTooLarge,
+}
+
+impl LitKind {
+ /// Converts literal token into a semantic literal.
+ pub fn from_lit_token(lit: token::Lit) -> Result<LitKind, LitError> {
+ let token::Lit { kind, symbol, suffix } = lit;
+ if suffix.is_some() && !kind.may_have_suffix() {
+ return Err(LitError::InvalidSuffix);
+ }
+
+ Ok(match kind {
+ token::Bool => {
+ assert!(symbol.is_bool_lit());
+ LitKind::Bool(symbol == kw::True)
+ }
+ token::Byte => {
+ return unescape_byte(symbol.as_str())
+ .map(LitKind::Byte)
+ .map_err(|_| LitError::LexerError);
+ }
+ token::Char => {
+ return unescape_char(symbol.as_str())
+ .map(LitKind::Char)
+ .map_err(|_| LitError::LexerError);
+ }
+
+ // There are some valid suffixes for integer and float literals,
+ // so all the handling is done internally.
+ token::Integer => return integer_lit(symbol, suffix),
+ token::Float => return float_lit(symbol, suffix),
+
+ token::Str => {
+ // If there are no characters requiring special treatment we can
+ // reuse the symbol from the token. Otherwise, we must generate a
+ // new symbol because the string in the LitKind is different to the
+ // string in the token.
+ let s = symbol.as_str();
+ let symbol = if s.contains(&['\\', '\r']) {
+ let mut buf = String::with_capacity(s.len());
+ let mut error = Ok(());
+ // Force-inlining here is aggressive but the closure is
+ // called on every char in the string, so it can be
+ // hot in programs with many long strings.
+ unescape_literal(
+ &s,
+ Mode::Str,
+ &mut #[inline(always)]
+ |_, unescaped_char| match unescaped_char {
+ Ok(c) => buf.push(c),
+ Err(err) => {
+ if err.is_fatal() {
+ error = Err(LitError::LexerError);
+ }
+ }
+ },
+ );
+ error?;
+ Symbol::intern(&buf)
+ } else {
+ symbol
+ };
+ LitKind::Str(symbol, ast::StrStyle::Cooked)
+ }
+ token::StrRaw(n) => {
+ // Ditto.
+ let s = symbol.as_str();
+ let symbol =
+ if s.contains('\r') {
+ let mut buf = String::with_capacity(s.len());
+ let mut error = Ok(());
+ unescape_literal(&s, Mode::RawStr, &mut |_, unescaped_char| {
+ match unescaped_char {
+ Ok(c) => buf.push(c),
+ Err(err) => {
+ if err.is_fatal() {
+ error = Err(LitError::LexerError);
+ }
+ }
+ }
+ });
+ error?;
+ Symbol::intern(&buf)
+ } else {
+ symbol
+ };
+ LitKind::Str(symbol, ast::StrStyle::Raw(n))
+ }
+ token::ByteStr => {
+ let s = symbol.as_str();
+ let mut buf = Vec::with_capacity(s.len());
+ let mut error = Ok(());
+ unescape_byte_literal(&s, Mode::ByteStr, &mut |_, unescaped_byte| {
+ match unescaped_byte {
+ Ok(c) => buf.push(c),
+ Err(err) => {
+ if err.is_fatal() {
+ error = Err(LitError::LexerError);
+ }
+ }
+ }
+ });
+ error?;
+ LitKind::ByteStr(buf.into())
+ }
+ token::ByteStrRaw(_) => {
+ let s = symbol.as_str();
+ let bytes = if s.contains('\r') {
+ let mut buf = Vec::with_capacity(s.len());
+ let mut error = Ok(());
+ unescape_byte_literal(&s, Mode::RawByteStr, &mut |_, unescaped_byte| {
+ match unescaped_byte {
+ Ok(c) => buf.push(c),
+ Err(err) => {
+ if err.is_fatal() {
+ error = Err(LitError::LexerError);
+ }
+ }
+ }
+ });
+ error?;
+ buf
+ } else {
+ symbol.to_string().into_bytes()
+ };
+
+ LitKind::ByteStr(bytes.into())
+ }
+ token::Err => LitKind::Err(symbol),
+ })
+ }
+
+ /// Attempts to recover a token from semantic literal.
+ /// This function is used when the original token doesn't exist (e.g. the literal is created
+ /// by an AST-based macro) or unavailable (e.g. from HIR pretty-printing).
+ pub fn to_lit_token(&self) -> token::Lit {
+ let (kind, symbol, suffix) = match *self {
+ LitKind::Str(symbol, ast::StrStyle::Cooked) => {
+ // Don't re-intern unless the escaped string is different.
+ let s = symbol.as_str();
+ let escaped = s.escape_default().to_string();
+ let symbol = if s == escaped { symbol } else { Symbol::intern(&escaped) };
+ (token::Str, symbol, None)
+ }
+ LitKind::Str(symbol, ast::StrStyle::Raw(n)) => (token::StrRaw(n), symbol, None),
+ LitKind::ByteStr(ref bytes) => {
+ let string = bytes
+ .iter()
+ .cloned()
+ .flat_map(ascii::escape_default)
+ .map(Into::<char>::into)
+ .collect::<String>();
+ (token::ByteStr, Symbol::intern(&string), None)
+ }
+ LitKind::Byte(byte) => {
+ let string: String = ascii::escape_default(byte).map(Into::<char>::into).collect();
+ (token::Byte, Symbol::intern(&string), None)
+ }
+ LitKind::Char(ch) => {
+ let string: String = ch.escape_default().map(Into::<char>::into).collect();
+ (token::Char, Symbol::intern(&string), None)
+ }
+ LitKind::Int(n, ty) => {
+ let suffix = match ty {
+ ast::LitIntType::Unsigned(ty) => Some(ty.name()),
+ ast::LitIntType::Signed(ty) => Some(ty.name()),
+ ast::LitIntType::Unsuffixed => None,
+ };
+ (token::Integer, sym::integer(n), suffix)
+ }
+ LitKind::Float(symbol, ty) => {
+ let suffix = match ty {
+ ast::LitFloatType::Suffixed(ty) => Some(ty.name()),
+ ast::LitFloatType::Unsuffixed => None,
+ };
+ (token::Float, symbol, suffix)
+ }
+ LitKind::Bool(value) => {
+ let symbol = if value { kw::True } else { kw::False };
+ (token::Bool, symbol, None)
+ }
+ LitKind::Err(symbol) => (token::Err, symbol, None),
+ };
+
+ token::Lit::new(kind, symbol, suffix)
+ }
+}
+
+impl Lit {
+ /// Converts literal token into an AST literal.
+ pub fn from_lit_token(token: token::Lit, span: Span) -> Result<Lit, LitError> {
+ Ok(Lit { token, kind: LitKind::from_lit_token(token)?, span })
+ }
+
+ /// Converts arbitrary token into an AST literal.
+ ///
+ /// Keep this in sync with `Token::can_begin_literal_or_bool` excluding unary negation.
+ pub fn from_token(token: &Token) -> Result<Lit, LitError> {
+ let lit = match token.uninterpolate().kind {
+ token::Ident(name, false) if name.is_bool_lit() => {
+ token::Lit::new(token::Bool, name, None)
+ }
+ token::Literal(lit) => lit,
+ token::Interpolated(ref nt) => {
+ if let token::NtExpr(expr) | token::NtLiteral(expr) = &**nt
+ && let ast::ExprKind::Lit(lit) = &expr.kind
+ {
+ return Ok(lit.clone());
+ }
+ return Err(LitError::NotLiteral);
+ }
+ _ => return Err(LitError::NotLiteral),
+ };
+
+ Lit::from_lit_token(lit, token.span)
+ }
+
+ /// Attempts to recover an AST literal from semantic literal.
+ /// This function is used when the original token doesn't exist (e.g. the literal is created
+ /// by an AST-based macro) or unavailable (e.g. from HIR pretty-printing).
+ pub fn from_lit_kind(kind: LitKind, span: Span) -> Lit {
+ Lit { token: kind.to_lit_token(), kind, span }
+ }
+
+ /// Losslessly convert an AST literal into a token.
+ pub fn to_token(&self) -> Token {
+ let kind = match self.token.kind {
+ token::Bool => token::Ident(self.token.symbol, false),
+ _ => token::Literal(self.token),
+ };
+ Token::new(kind, self.span)
+ }
+}
+
+fn strip_underscores(symbol: Symbol) -> Symbol {
+ // Do not allocate a new string unless necessary.
+ let s = symbol.as_str();
+ if s.contains('_') {
+ let mut s = s.to_string();
+ s.retain(|c| c != '_');
+ return Symbol::intern(&s);
+ }
+ symbol
+}
+
+fn filtered_float_lit(
+ symbol: Symbol,
+ suffix: Option<Symbol>,
+ base: u32,
+) -> Result<LitKind, LitError> {
+ debug!("filtered_float_lit: {:?}, {:?}, {:?}", symbol, suffix, base);
+ if base != 10 {
+ return Err(LitError::NonDecimalFloat(base));
+ }
+ Ok(match suffix {
+ Some(suf) => LitKind::Float(
+ symbol,
+ ast::LitFloatType::Suffixed(match suf {
+ sym::f32 => ast::FloatTy::F32,
+ sym::f64 => ast::FloatTy::F64,
+ _ => return Err(LitError::InvalidFloatSuffix),
+ }),
+ ),
+ None => LitKind::Float(symbol, ast::LitFloatType::Unsuffixed),
+ })
+}
+
+fn float_lit(symbol: Symbol, suffix: Option<Symbol>) -> Result<LitKind, LitError> {
+ debug!("float_lit: {:?}, {:?}", symbol, suffix);
+ filtered_float_lit(strip_underscores(symbol), suffix, 10)
+}
+
+fn integer_lit(symbol: Symbol, suffix: Option<Symbol>) -> Result<LitKind, LitError> {
+ debug!("integer_lit: {:?}, {:?}", symbol, suffix);
+ let symbol = strip_underscores(symbol);
+ let s = symbol.as_str();
+
+ let base = match s.as_bytes() {
+ [b'0', b'x', ..] => 16,
+ [b'0', b'o', ..] => 8,
+ [b'0', b'b', ..] => 2,
+ _ => 10,
+ };
+
+ let ty = match suffix {
+ Some(suf) => match suf {
+ sym::isize => ast::LitIntType::Signed(ast::IntTy::Isize),
+ sym::i8 => ast::LitIntType::Signed(ast::IntTy::I8),
+ sym::i16 => ast::LitIntType::Signed(ast::IntTy::I16),
+ sym::i32 => ast::LitIntType::Signed(ast::IntTy::I32),
+ sym::i64 => ast::LitIntType::Signed(ast::IntTy::I64),
+ sym::i128 => ast::LitIntType::Signed(ast::IntTy::I128),
+ sym::usize => ast::LitIntType::Unsigned(ast::UintTy::Usize),
+ sym::u8 => ast::LitIntType::Unsigned(ast::UintTy::U8),
+ sym::u16 => ast::LitIntType::Unsigned(ast::UintTy::U16),
+ sym::u32 => ast::LitIntType::Unsigned(ast::UintTy::U32),
+ sym::u64 => ast::LitIntType::Unsigned(ast::UintTy::U64),
+ sym::u128 => ast::LitIntType::Unsigned(ast::UintTy::U128),
+ // `1f64` and `2f32` etc. are valid float literals, and
+ // `fxxx` looks more like an invalid float literal than invalid integer literal.
+ _ if suf.as_str().starts_with('f') => return filtered_float_lit(symbol, suffix, base),
+ _ => return Err(LitError::InvalidIntSuffix),
+ },
+ _ => ast::LitIntType::Unsuffixed,
+ };
+
+ let s = &s[if base != 10 { 2 } else { 0 }..];
+ u128::from_str_radix(s, base).map(|i| LitKind::Int(i, ty)).map_err(|_| {
+ // Small bases are lexed as if they were base 10, e.g, the string
+ // might be `0b10201`. This will cause the conversion above to fail,
+ // but these kinds of errors are already reported by the lexer.
+ let from_lexer =
+ base < 10 && s.chars().any(|c| c.to_digit(10).map_or(false, |d| d >= base));
+ if from_lexer { LitError::LexerError } else { LitError::IntTooLarge }
+ })
+}
diff --git a/compiler/rustc_ast/src/util/parser.rs b/compiler/rustc_ast/src/util/parser.rs
new file mode 100644
index 000000000..74b7fe9e2
--- /dev/null
+++ b/compiler/rustc_ast/src/util/parser.rs
@@ -0,0 +1,406 @@
+use crate::ast::{self, BinOpKind};
+use crate::token::{self, BinOpToken, Token};
+use rustc_span::symbol::kw;
+
+/// Associative operator with precedence.
+///
+/// This is the enum which specifies operator precedence and fixity to the parser.
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum AssocOp {
+ /// `+`
+ Add,
+ /// `-`
+ Subtract,
+ /// `*`
+ Multiply,
+ /// `/`
+ Divide,
+ /// `%`
+ Modulus,
+ /// `&&`
+ LAnd,
+ /// `||`
+ LOr,
+ /// `^`
+ BitXor,
+ /// `&`
+ BitAnd,
+ /// `|`
+ BitOr,
+ /// `<<`
+ ShiftLeft,
+ /// `>>`
+ ShiftRight,
+ /// `==`
+ Equal,
+ /// `<`
+ Less,
+ /// `<=`
+ LessEqual,
+ /// `!=`
+ NotEqual,
+ /// `>`
+ Greater,
+ /// `>=`
+ GreaterEqual,
+ /// `=`
+ Assign,
+ /// `?=` where ? is one of the BinOpToken
+ AssignOp(BinOpToken),
+ /// `as`
+ As,
+ /// `..` range
+ DotDot,
+ /// `..=` range
+ DotDotEq,
+ /// `:`
+ Colon,
+}
+
+#[derive(PartialEq, Debug)]
+pub enum Fixity {
+ /// The operator is left-associative
+ Left,
+ /// The operator is right-associative
+ Right,
+ /// The operator is not associative
+ None,
+}
+
+impl AssocOp {
+ /// Creates a new AssocOP from a token
+ pub fn from_token(t: &Token) -> Option<AssocOp> {
+ use AssocOp::*;
+ match t.kind {
+ token::BinOpEq(k) => Some(AssignOp(k)),
+ token::Eq => Some(Assign),
+ token::BinOp(BinOpToken::Star) => Some(Multiply),
+ token::BinOp(BinOpToken::Slash) => Some(Divide),
+ token::BinOp(BinOpToken::Percent) => Some(Modulus),
+ token::BinOp(BinOpToken::Plus) => Some(Add),
+ token::BinOp(BinOpToken::Minus) => Some(Subtract),
+ token::BinOp(BinOpToken::Shl) => Some(ShiftLeft),
+ token::BinOp(BinOpToken::Shr) => Some(ShiftRight),
+ token::BinOp(BinOpToken::And) => Some(BitAnd),
+ token::BinOp(BinOpToken::Caret) => Some(BitXor),
+ token::BinOp(BinOpToken::Or) => Some(BitOr),
+ token::Lt => Some(Less),
+ token::Le => Some(LessEqual),
+ token::Ge => Some(GreaterEqual),
+ token::Gt => Some(Greater),
+ token::EqEq => Some(Equal),
+ token::Ne => Some(NotEqual),
+ token::AndAnd => Some(LAnd),
+ token::OrOr => Some(LOr),
+ token::DotDot => Some(DotDot),
+ token::DotDotEq => Some(DotDotEq),
+ // DotDotDot is no longer supported, but we need some way to display the error
+ token::DotDotDot => Some(DotDotEq),
+ token::Colon => Some(Colon),
+ // `<-` should probably be `< -`
+ token::LArrow => Some(Less),
+ _ if t.is_keyword(kw::As) => Some(As),
+ _ => None,
+ }
+ }
+
+ /// Creates a new AssocOp from ast::BinOpKind.
+ pub fn from_ast_binop(op: BinOpKind) -> Self {
+ use AssocOp::*;
+ match op {
+ BinOpKind::Lt => Less,
+ BinOpKind::Gt => Greater,
+ BinOpKind::Le => LessEqual,
+ BinOpKind::Ge => GreaterEqual,
+ BinOpKind::Eq => Equal,
+ BinOpKind::Ne => NotEqual,
+ BinOpKind::Mul => Multiply,
+ BinOpKind::Div => Divide,
+ BinOpKind::Rem => Modulus,
+ BinOpKind::Add => Add,
+ BinOpKind::Sub => Subtract,
+ BinOpKind::Shl => ShiftLeft,
+ BinOpKind::Shr => ShiftRight,
+ BinOpKind::BitAnd => BitAnd,
+ BinOpKind::BitXor => BitXor,
+ BinOpKind::BitOr => BitOr,
+ BinOpKind::And => LAnd,
+ BinOpKind::Or => LOr,
+ }
+ }
+
+ /// Gets the precedence of this operator
+ pub fn precedence(&self) -> usize {
+ use AssocOp::*;
+ match *self {
+ As | Colon => 14,
+ Multiply | Divide | Modulus => 13,
+ Add | Subtract => 12,
+ ShiftLeft | ShiftRight => 11,
+ BitAnd => 10,
+ BitXor => 9,
+ BitOr => 8,
+ Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual => 7,
+ LAnd => 6,
+ LOr => 5,
+ DotDot | DotDotEq => 4,
+ Assign | AssignOp(_) => 2,
+ }
+ }
+
+ /// Gets the fixity of this operator
+ pub fn fixity(&self) -> Fixity {
+ use AssocOp::*;
+ // NOTE: it is a bug to have an operators that has same precedence but different fixities!
+ match *self {
+ Assign | AssignOp(_) => Fixity::Right,
+ As | Multiply | Divide | Modulus | Add | Subtract | ShiftLeft | ShiftRight | BitAnd
+ | BitXor | BitOr | Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual
+ | LAnd | LOr | Colon => Fixity::Left,
+ DotDot | DotDotEq => Fixity::None,
+ }
+ }
+
+ pub fn is_comparison(&self) -> bool {
+ use AssocOp::*;
+ match *self {
+ Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual => true,
+ Assign | AssignOp(_) | As | Multiply | Divide | Modulus | Add | Subtract
+ | ShiftLeft | ShiftRight | BitAnd | BitXor | BitOr | LAnd | LOr | DotDot | DotDotEq
+ | Colon => false,
+ }
+ }
+
+ pub fn is_assign_like(&self) -> bool {
+ use AssocOp::*;
+ match *self {
+ Assign | AssignOp(_) => true,
+ Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual | As | Multiply
+ | Divide | Modulus | Add | Subtract | ShiftLeft | ShiftRight | BitAnd | BitXor
+ | BitOr | LAnd | LOr | DotDot | DotDotEq | Colon => false,
+ }
+ }
+
+ pub fn to_ast_binop(&self) -> Option<BinOpKind> {
+ use AssocOp::*;
+ match *self {
+ Less => Some(BinOpKind::Lt),
+ Greater => Some(BinOpKind::Gt),
+ LessEqual => Some(BinOpKind::Le),
+ GreaterEqual => Some(BinOpKind::Ge),
+ Equal => Some(BinOpKind::Eq),
+ NotEqual => Some(BinOpKind::Ne),
+ Multiply => Some(BinOpKind::Mul),
+ Divide => Some(BinOpKind::Div),
+ Modulus => Some(BinOpKind::Rem),
+ Add => Some(BinOpKind::Add),
+ Subtract => Some(BinOpKind::Sub),
+ ShiftLeft => Some(BinOpKind::Shl),
+ ShiftRight => Some(BinOpKind::Shr),
+ BitAnd => Some(BinOpKind::BitAnd),
+ BitXor => Some(BinOpKind::BitXor),
+ BitOr => Some(BinOpKind::BitOr),
+ LAnd => Some(BinOpKind::And),
+ LOr => Some(BinOpKind::Or),
+ Assign | AssignOp(_) | As | DotDot | DotDotEq | Colon => None,
+ }
+ }
+
+ /// This operator could be used to follow a block unambiguously.
+ ///
+ /// This is used for error recovery at the moment, providing a suggestion to wrap blocks with
+ /// parentheses while having a high degree of confidence on the correctness of the suggestion.
+ pub fn can_continue_expr_unambiguously(&self) -> bool {
+ use AssocOp::*;
+ matches!(
+ self,
+ BitXor | // `{ 42 } ^ 3`
+ Assign | // `{ 42 } = { 42 }`
+ Divide | // `{ 42 } / 42`
+ Modulus | // `{ 42 } % 2`
+ ShiftRight | // `{ 42 } >> 2`
+ LessEqual | // `{ 42 } <= 3`
+ Greater | // `{ 42 } > 3`
+ GreaterEqual | // `{ 42 } >= 3`
+ AssignOp(_) | // `{ 42 } +=`
+ As | // `{ 42 } as usize`
+ // Equal | // `{ 42 } == { 42 }` Accepting these here would regress incorrect
+ // NotEqual | // `{ 42 } != { 42 } struct literals parser recovery.
+ Colon, // `{ 42 }: usize`
+ )
+ }
+}
+
+pub const PREC_CLOSURE: i8 = -40;
+pub const PREC_JUMP: i8 = -30;
+pub const PREC_RANGE: i8 = -10;
+// The range 2..=14 is reserved for AssocOp binary operator precedences.
+pub const PREC_PREFIX: i8 = 50;
+pub const PREC_POSTFIX: i8 = 60;
+pub const PREC_PAREN: i8 = 99;
+pub const PREC_FORCE_PAREN: i8 = 100;
+
+#[derive(Debug, Clone, Copy)]
+pub enum ExprPrecedence {
+ Closure,
+ Break,
+ Continue,
+ Ret,
+ Yield,
+ Yeet,
+
+ Range,
+
+ Binary(BinOpKind),
+
+ Cast,
+ Type,
+
+ Assign,
+ AssignOp,
+
+ Box,
+ AddrOf,
+ Let,
+ Unary,
+
+ Call,
+ MethodCall,
+ Field,
+ Index,
+ Try,
+ InlineAsm,
+ Mac,
+
+ Array,
+ Repeat,
+ Tup,
+ Lit,
+ Path,
+ Paren,
+ If,
+ While,
+ ForLoop,
+ Loop,
+ Match,
+ ConstBlock,
+ Block,
+ TryBlock,
+ Struct,
+ Async,
+ Await,
+ Err,
+}
+
+impl ExprPrecedence {
+ pub fn order(self) -> i8 {
+ match self {
+ ExprPrecedence::Closure => PREC_CLOSURE,
+
+ ExprPrecedence::Break |
+ ExprPrecedence::Continue |
+ ExprPrecedence::Ret |
+ ExprPrecedence::Yield |
+ ExprPrecedence::Yeet => PREC_JUMP,
+
+ // `Range` claims to have higher precedence than `Assign`, but `x .. x = x` fails to
+ // parse, instead of parsing as `(x .. x) = x`. Giving `Range` a lower precedence
+ // ensures that `pprust` will add parentheses in the right places to get the desired
+ // parse.
+ ExprPrecedence::Range => PREC_RANGE,
+
+ // Binop-like expr kinds, handled by `AssocOp`.
+ ExprPrecedence::Binary(op) => AssocOp::from_ast_binop(op).precedence() as i8,
+ ExprPrecedence::Cast => AssocOp::As.precedence() as i8,
+ ExprPrecedence::Type => AssocOp::Colon.precedence() as i8,
+
+ ExprPrecedence::Assign |
+ ExprPrecedence::AssignOp => AssocOp::Assign.precedence() as i8,
+
+ // Unary, prefix
+ ExprPrecedence::Box |
+ ExprPrecedence::AddrOf |
+ // Here `let pats = expr` has `let pats =` as a "unary" prefix of `expr`.
+ // However, this is not exactly right. When `let _ = a` is the LHS of a binop we
+ // need parens sometimes. E.g. we can print `(let _ = a) && b` as `let _ = a && b`
+ // but we need to print `(let _ = a) < b` as-is with parens.
+ ExprPrecedence::Let |
+ ExprPrecedence::Unary => PREC_PREFIX,
+
+ // Unary, postfix
+ ExprPrecedence::Await |
+ ExprPrecedence::Call |
+ ExprPrecedence::MethodCall |
+ ExprPrecedence::Field |
+ ExprPrecedence::Index |
+ ExprPrecedence::Try |
+ ExprPrecedence::InlineAsm |
+ ExprPrecedence::Mac => PREC_POSTFIX,
+
+ // Never need parens
+ ExprPrecedence::Array |
+ ExprPrecedence::Repeat |
+ ExprPrecedence::Tup |
+ ExprPrecedence::Lit |
+ ExprPrecedence::Path |
+ ExprPrecedence::Paren |
+ ExprPrecedence::If |
+ ExprPrecedence::While |
+ ExprPrecedence::ForLoop |
+ ExprPrecedence::Loop |
+ ExprPrecedence::Match |
+ ExprPrecedence::ConstBlock |
+ ExprPrecedence::Block |
+ ExprPrecedence::TryBlock |
+ ExprPrecedence::Async |
+ ExprPrecedence::Struct |
+ ExprPrecedence::Err => PREC_PAREN,
+ }
+ }
+}
+
+/// In `let p = e`, operators with precedence `<=` this one requires parentheses in `e`.
+pub fn prec_let_scrutinee_needs_par() -> usize {
+ AssocOp::LAnd.precedence()
+}
+
+/// Suppose we have `let _ = e` and the `order` of `e`.
+/// Is the `order` such that `e` in `let _ = e` needs parentheses when it is on the RHS?
+///
+/// Conversely, suppose that we have `(let _ = a) OP b` and `order` is that of `OP`.
+/// Can we print this as `let _ = a OP b`?
+pub fn needs_par_as_let_scrutinee(order: i8) -> bool {
+ order <= prec_let_scrutinee_needs_par() as i8
+}
+
+/// Expressions that syntactically contain an "exterior" struct literal i.e., not surrounded by any
+/// parens or other delimiters, e.g., `X { y: 1 }`, `X { y: 1 }.method()`, `foo == X { y: 1 }` and
+/// `X { y: 1 } == foo` all do, but `(X { y: 1 }) == foo` does not.
+pub fn contains_exterior_struct_lit(value: &ast::Expr) -> bool {
+ match value.kind {
+ ast::ExprKind::Struct(..) => true,
+
+ ast::ExprKind::Assign(ref lhs, ref rhs, _)
+ | ast::ExprKind::AssignOp(_, ref lhs, ref rhs)
+ | ast::ExprKind::Binary(_, ref lhs, ref rhs) => {
+ // X { y: 1 } + X { y: 2 }
+ contains_exterior_struct_lit(&lhs) || contains_exterior_struct_lit(&rhs)
+ }
+ ast::ExprKind::Await(ref x)
+ | ast::ExprKind::Unary(_, ref x)
+ | ast::ExprKind::Cast(ref x, _)
+ | ast::ExprKind::Type(ref x, _)
+ | ast::ExprKind::Field(ref x, _)
+ | ast::ExprKind::Index(ref x, _) => {
+ // &X { y: 1 }, X { y: 1 }.y
+ contains_exterior_struct_lit(&x)
+ }
+
+ ast::ExprKind::MethodCall(.., ref exprs, _) => {
+ // X { y: 1 }.bar(...)
+ contains_exterior_struct_lit(&exprs[0])
+ }
+
+ _ => false,
+ }
+}
diff --git a/compiler/rustc_ast/src/util/unicode.rs b/compiler/rustc_ast/src/util/unicode.rs
new file mode 100644
index 000000000..f009f7b30
--- /dev/null
+++ b/compiler/rustc_ast/src/util/unicode.rs
@@ -0,0 +1,35 @@
+pub const TEXT_FLOW_CONTROL_CHARS: &[char] = &[
+ '\u{202A}', '\u{202B}', '\u{202D}', '\u{202E}', '\u{2066}', '\u{2067}', '\u{2068}', '\u{202C}',
+ '\u{2069}',
+];
+
+#[inline]
+pub fn contains_text_flow_control_chars(s: &str) -> bool {
+ // Char - UTF-8
+ // U+202A - E2 80 AA
+ // U+202B - E2 80 AB
+ // U+202C - E2 80 AC
+ // U+202D - E2 80 AD
+ // U+202E - E2 80 AE
+ // U+2066 - E2 81 A6
+ // U+2067 - E2 81 A7
+ // U+2068 - E2 81 A8
+ // U+2069 - E2 81 A9
+ let mut bytes = s.as_bytes();
+ loop {
+ match core::slice::memchr::memchr(0xE2, &bytes) {
+ Some(idx) => {
+ // bytes are valid UTF-8 -> E2 must be followed by two bytes
+ let ch = &bytes[idx..idx + 3];
+ match ch {
+ [_, 0x80, 0xAA..=0xAE] | [_, 0x81, 0xA6..=0xA9] => break true,
+ _ => {}
+ }
+ bytes = &bytes[idx + 3..];
+ }
+ None => {
+ break false;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs
new file mode 100644
index 000000000..d9594b323
--- /dev/null
+++ b/compiler/rustc_ast/src/visit.rs
@@ -0,0 +1,959 @@
+//! AST walker. Each overridden visit method has full control over what
+//! happens with its node, it can do its own traversal of the node's children,
+//! call `visit::walk_*` to apply the default traversal algorithm, or prevent
+//! deeper traversal by doing nothing.
+//!
+//! Note: it is an important invariant that the default visitor walks the body
+//! of a function in "execution order" (more concretely, reverse post-order
+//! with respect to the CFG implied by the AST), meaning that if AST node A may
+//! execute before AST node B, then A is visited first. The borrow checker in
+//! particular relies on this property.
+//!
+//! Note: walking an AST before macro expansion is probably a bad idea. For
+//! instance, a walker looking for item names in a module will miss all of
+//! those that are created by the expansion of a macro.
+
+use crate::ast::*;
+
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum AssocCtxt {
+ Trait,
+ Impl,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum FnCtxt {
+ Free,
+ Foreign,
+ Assoc(AssocCtxt),
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum BoundKind {
+ /// Trait bounds in generics bounds and type/trait alias.
+ /// E.g., `<T: Bound>`, `type A: Bound`, or `where T: Bound`.
+ Bound,
+
+ /// Trait bounds in `impl` type.
+ /// E.g., `type Foo = impl Bound1 + Bound2 + Bound3`.
+ Impl,
+
+ /// Trait bounds in trait object type.
+ /// E.g., `dyn Bound1 + Bound2 + Bound3`.
+ TraitObject,
+
+ /// Super traits of a trait.
+ /// E.g., `trait A: B`
+ SuperTraits,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum FnKind<'a> {
+ /// E.g., `fn foo()`, `fn foo(&self)`, or `extern "Abi" fn foo()`.
+ Fn(FnCtxt, Ident, &'a FnSig, &'a Visibility, &'a Generics, Option<&'a Block>),
+
+ /// E.g., `|x, y| body`.
+ Closure(&'a ClosureBinder, &'a FnDecl, &'a Expr),
+}
+
+impl<'a> FnKind<'a> {
+ pub fn header(&self) -> Option<&'a FnHeader> {
+ match *self {
+ FnKind::Fn(_, _, sig, _, _, _) => Some(&sig.header),
+ FnKind::Closure(_, _, _) => None,
+ }
+ }
+
+ pub fn ident(&self) -> Option<&Ident> {
+ match self {
+ FnKind::Fn(_, ident, ..) => Some(ident),
+ _ => None,
+ }
+ }
+
+ pub fn decl(&self) -> &'a FnDecl {
+ match self {
+ FnKind::Fn(_, _, sig, _, _, _) => &sig.decl,
+ FnKind::Closure(_, decl, _) => decl,
+ }
+ }
+
+ pub fn ctxt(&self) -> Option<FnCtxt> {
+ match self {
+ FnKind::Fn(ctxt, ..) => Some(*ctxt),
+ FnKind::Closure(..) => None,
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum LifetimeCtxt {
+ /// Appears in a reference type.
+ Rptr,
+ /// Appears as a bound on a type or another lifetime.
+ Bound,
+ /// Appears as a generic argument.
+ GenericArg,
+}
+
+/// Each method of the `Visitor` trait is a hook to be potentially
+/// overridden. Each method's default implementation recursively visits
+/// the substructure of the input via the corresponding `walk` method;
+/// e.g., the `visit_item` method by default calls `visit::walk_item`.
+///
+/// If you want to ensure that your code handles every variant
+/// explicitly, you need to override each method. (And you also need
+/// to monitor future changes to `Visitor` in case a new method with a
+/// new default implementation gets introduced.)
+pub trait Visitor<'ast>: Sized {
+ fn visit_ident(&mut self, _ident: Ident) {}
+ fn visit_foreign_item(&mut self, i: &'ast ForeignItem) {
+ walk_foreign_item(self, i)
+ }
+ fn visit_item(&mut self, i: &'ast Item) {
+ walk_item(self, i)
+ }
+ fn visit_local(&mut self, l: &'ast Local) {
+ walk_local(self, l)
+ }
+ fn visit_block(&mut self, b: &'ast Block) {
+ walk_block(self, b)
+ }
+ fn visit_stmt(&mut self, s: &'ast Stmt) {
+ walk_stmt(self, s)
+ }
+ fn visit_param(&mut self, param: &'ast Param) {
+ walk_param(self, param)
+ }
+ fn visit_arm(&mut self, a: &'ast Arm) {
+ walk_arm(self, a)
+ }
+ fn visit_pat(&mut self, p: &'ast Pat) {
+ walk_pat(self, p)
+ }
+ fn visit_anon_const(&mut self, c: &'ast AnonConst) {
+ walk_anon_const(self, c)
+ }
+ fn visit_expr(&mut self, ex: &'ast Expr) {
+ walk_expr(self, ex)
+ }
+ fn visit_expr_post(&mut self, _ex: &'ast Expr) {}
+ fn visit_ty(&mut self, t: &'ast Ty) {
+ walk_ty(self, t)
+ }
+ fn visit_generic_param(&mut self, param: &'ast GenericParam) {
+ walk_generic_param(self, param)
+ }
+ fn visit_generics(&mut self, g: &'ast Generics) {
+ walk_generics(self, g)
+ }
+ fn visit_closure_binder(&mut self, b: &'ast ClosureBinder) {
+ walk_closure_binder(self, b)
+ }
+ fn visit_where_predicate(&mut self, p: &'ast WherePredicate) {
+ walk_where_predicate(self, p)
+ }
+ fn visit_fn(&mut self, fk: FnKind<'ast>, s: Span, _: NodeId) {
+ walk_fn(self, fk, s)
+ }
+ fn visit_assoc_item(&mut self, i: &'ast AssocItem, ctxt: AssocCtxt) {
+ walk_assoc_item(self, i, ctxt)
+ }
+ fn visit_trait_ref(&mut self, t: &'ast TraitRef) {
+ walk_trait_ref(self, t)
+ }
+ fn visit_param_bound(&mut self, bounds: &'ast GenericBound, _ctxt: BoundKind) {
+ walk_param_bound(self, bounds)
+ }
+ fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) {
+ walk_poly_trait_ref(self, t, m)
+ }
+ fn visit_variant_data(&mut self, s: &'ast VariantData) {
+ walk_struct_def(self, s)
+ }
+ fn visit_field_def(&mut self, s: &'ast FieldDef) {
+ walk_field_def(self, s)
+ }
+ fn visit_enum_def(
+ &mut self,
+ enum_definition: &'ast EnumDef,
+ generics: &'ast Generics,
+ item_id: NodeId,
+ _: Span,
+ ) {
+ walk_enum_def(self, enum_definition, generics, item_id)
+ }
+ fn visit_variant(&mut self, v: &'ast Variant) {
+ walk_variant(self, v)
+ }
+ fn visit_label(&mut self, label: &'ast Label) {
+ walk_label(self, label)
+ }
+ fn visit_lifetime(&mut self, lifetime: &'ast Lifetime, _: LifetimeCtxt) {
+ walk_lifetime(self, lifetime)
+ }
+ fn visit_mac_call(&mut self, mac: &'ast MacCall) {
+ walk_mac(self, mac)
+ }
+ fn visit_mac_def(&mut self, _mac: &'ast MacroDef, _id: NodeId) {
+ // Nothing to do
+ }
+ fn visit_path(&mut self, path: &'ast Path, _id: NodeId) {
+ walk_path(self, path)
+ }
+ fn visit_use_tree(&mut self, use_tree: &'ast UseTree, id: NodeId, _nested: bool) {
+ walk_use_tree(self, use_tree, id)
+ }
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
+ walk_path_segment(self, path_span, path_segment)
+ }
+ fn visit_generic_args(&mut self, path_span: Span, generic_args: &'ast GenericArgs) {
+ walk_generic_args(self, path_span, generic_args)
+ }
+ fn visit_generic_arg(&mut self, generic_arg: &'ast GenericArg) {
+ walk_generic_arg(self, generic_arg)
+ }
+ fn visit_assoc_constraint(&mut self, constraint: &'ast AssocConstraint) {
+ walk_assoc_constraint(self, constraint)
+ }
+ fn visit_attribute(&mut self, attr: &'ast Attribute) {
+ walk_attribute(self, attr)
+ }
+ fn visit_vis(&mut self, vis: &'ast Visibility) {
+ walk_vis(self, vis)
+ }
+ fn visit_fn_ret_ty(&mut self, ret_ty: &'ast FnRetTy) {
+ walk_fn_ret_ty(self, ret_ty)
+ }
+ fn visit_fn_header(&mut self, _header: &'ast FnHeader) {
+ // Nothing to do
+ }
+ fn visit_expr_field(&mut self, f: &'ast ExprField) {
+ walk_expr_field(self, f)
+ }
+ fn visit_pat_field(&mut self, fp: &'ast PatField) {
+ walk_pat_field(self, fp)
+ }
+ fn visit_crate(&mut self, krate: &'ast Crate) {
+ walk_crate(self, krate)
+ }
+ fn visit_inline_asm(&mut self, asm: &'ast InlineAsm) {
+ walk_inline_asm(self, asm)
+ }
+ fn visit_inline_asm_sym(&mut self, sym: &'ast InlineAsmSym) {
+ walk_inline_asm_sym(self, sym)
+ }
+}
+
+#[macro_export]
+macro_rules! walk_list {
+ ($visitor: expr, $method: ident, $list: expr) => {
+ for elem in $list {
+ $visitor.$method(elem)
+ }
+ };
+ ($visitor: expr, $method: ident, $list: expr, $($extra_args: expr),*) => {
+ for elem in $list {
+ $visitor.$method(elem, $($extra_args,)*)
+ }
+ }
+}
+
+pub fn walk_crate<'a, V: Visitor<'a>>(visitor: &mut V, krate: &'a Crate) {
+ walk_list!(visitor, visit_item, &krate.items);
+ walk_list!(visitor, visit_attribute, &krate.attrs);
+}
+
+pub fn walk_local<'a, V: Visitor<'a>>(visitor: &mut V, local: &'a Local) {
+ for attr in local.attrs.iter() {
+ visitor.visit_attribute(attr);
+ }
+ visitor.visit_pat(&local.pat);
+ walk_list!(visitor, visit_ty, &local.ty);
+ if let Some((init, els)) = local.kind.init_else_opt() {
+ visitor.visit_expr(init);
+ walk_list!(visitor, visit_block, els);
+ }
+}
+
+pub fn walk_label<'a, V: Visitor<'a>>(visitor: &mut V, label: &'a Label) {
+ visitor.visit_ident(label.ident);
+}
+
+pub fn walk_lifetime<'a, V: Visitor<'a>>(visitor: &mut V, lifetime: &'a Lifetime) {
+ visitor.visit_ident(lifetime.ident);
+}
+
+pub fn walk_poly_trait_ref<'a, V>(
+ visitor: &mut V,
+ trait_ref: &'a PolyTraitRef,
+ _: &TraitBoundModifier,
+) where
+ V: Visitor<'a>,
+{
+ walk_list!(visitor, visit_generic_param, &trait_ref.bound_generic_params);
+ visitor.visit_trait_ref(&trait_ref.trait_ref);
+}
+
+pub fn walk_trait_ref<'a, V: Visitor<'a>>(visitor: &mut V, trait_ref: &'a TraitRef) {
+ visitor.visit_path(&trait_ref.path, trait_ref.ref_id)
+}
+
+pub fn walk_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a Item) {
+ visitor.visit_vis(&item.vis);
+ visitor.visit_ident(item.ident);
+ match item.kind {
+ ItemKind::ExternCrate(_) => {}
+ ItemKind::Use(ref use_tree) => visitor.visit_use_tree(use_tree, item.id, false),
+ ItemKind::Static(ref typ, _, ref expr) | ItemKind::Const(_, ref typ, ref expr) => {
+ visitor.visit_ty(typ);
+ walk_list!(visitor, visit_expr, expr);
+ }
+ ItemKind::Fn(box Fn { defaultness: _, ref generics, ref sig, ref body }) => {
+ let kind =
+ FnKind::Fn(FnCtxt::Free, item.ident, sig, &item.vis, generics, body.as_deref());
+ visitor.visit_fn(kind, item.span, item.id)
+ }
+ ItemKind::Mod(_unsafety, ref mod_kind) => match mod_kind {
+ ModKind::Loaded(items, _inline, _inner_span) => {
+ walk_list!(visitor, visit_item, items)
+ }
+ ModKind::Unloaded => {}
+ },
+ ItemKind::ForeignMod(ref foreign_module) => {
+ walk_list!(visitor, visit_foreign_item, &foreign_module.items);
+ }
+ ItemKind::GlobalAsm(ref asm) => visitor.visit_inline_asm(asm),
+ ItemKind::TyAlias(box TyAlias { ref generics, ref bounds, ref ty, .. }) => {
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::Bound);
+ walk_list!(visitor, visit_ty, ty);
+ }
+ ItemKind::Enum(ref enum_definition, ref generics) => {
+ visitor.visit_generics(generics);
+ visitor.visit_enum_def(enum_definition, generics, item.id, item.span)
+ }
+ ItemKind::Impl(box Impl {
+ defaultness: _,
+ unsafety: _,
+ ref generics,
+ constness: _,
+ polarity: _,
+ ref of_trait,
+ ref self_ty,
+ ref items,
+ }) => {
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_trait_ref, of_trait);
+ visitor.visit_ty(self_ty);
+ walk_list!(visitor, visit_assoc_item, items, AssocCtxt::Impl);
+ }
+ ItemKind::Struct(ref struct_definition, ref generics)
+ | ItemKind::Union(ref struct_definition, ref generics) => {
+ visitor.visit_generics(generics);
+ visitor.visit_variant_data(struct_definition);
+ }
+ ItemKind::Trait(box Trait {
+ unsafety: _,
+ is_auto: _,
+ ref generics,
+ ref bounds,
+ ref items,
+ }) => {
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::SuperTraits);
+ walk_list!(visitor, visit_assoc_item, items, AssocCtxt::Trait);
+ }
+ ItemKind::TraitAlias(ref generics, ref bounds) => {
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::Bound);
+ }
+ ItemKind::MacCall(ref mac) => visitor.visit_mac_call(mac),
+ ItemKind::MacroDef(ref ts) => visitor.visit_mac_def(ts, item.id),
+ }
+ walk_list!(visitor, visit_attribute, &item.attrs);
+}
+
+pub fn walk_enum_def<'a, V: Visitor<'a>>(
+ visitor: &mut V,
+ enum_definition: &'a EnumDef,
+ _: &'a Generics,
+ _: NodeId,
+) {
+ walk_list!(visitor, visit_variant, &enum_definition.variants);
+}
+
+pub fn walk_variant<'a, V: Visitor<'a>>(visitor: &mut V, variant: &'a Variant)
+where
+ V: Visitor<'a>,
+{
+ visitor.visit_ident(variant.ident);
+ visitor.visit_vis(&variant.vis);
+ visitor.visit_variant_data(&variant.data);
+ walk_list!(visitor, visit_anon_const, &variant.disr_expr);
+ walk_list!(visitor, visit_attribute, &variant.attrs);
+}
+
+pub fn walk_expr_field<'a, V: Visitor<'a>>(visitor: &mut V, f: &'a ExprField) {
+ visitor.visit_expr(&f.expr);
+ visitor.visit_ident(f.ident);
+ walk_list!(visitor, visit_attribute, f.attrs.iter());
+}
+
+pub fn walk_pat_field<'a, V: Visitor<'a>>(visitor: &mut V, fp: &'a PatField) {
+ visitor.visit_ident(fp.ident);
+ visitor.visit_pat(&fp.pat);
+ walk_list!(visitor, visit_attribute, fp.attrs.iter());
+}
+
+pub fn walk_ty<'a, V: Visitor<'a>>(visitor: &mut V, typ: &'a Ty) {
+ match typ.kind {
+ TyKind::Slice(ref ty) | TyKind::Paren(ref ty) => visitor.visit_ty(ty),
+ TyKind::Ptr(ref mutable_type) => visitor.visit_ty(&mutable_type.ty),
+ TyKind::Rptr(ref opt_lifetime, ref mutable_type) => {
+ walk_list!(visitor, visit_lifetime, opt_lifetime, LifetimeCtxt::Rptr);
+ visitor.visit_ty(&mutable_type.ty)
+ }
+ TyKind::Tup(ref tuple_element_types) => {
+ walk_list!(visitor, visit_ty, tuple_element_types);
+ }
+ TyKind::BareFn(ref function_declaration) => {
+ walk_list!(visitor, visit_generic_param, &function_declaration.generic_params);
+ walk_fn_decl(visitor, &function_declaration.decl);
+ }
+ TyKind::Path(ref maybe_qself, ref path) => {
+ if let Some(ref qself) = *maybe_qself {
+ visitor.visit_ty(&qself.ty);
+ }
+ visitor.visit_path(path, typ.id);
+ }
+ TyKind::Array(ref ty, ref length) => {
+ visitor.visit_ty(ty);
+ visitor.visit_anon_const(length)
+ }
+ TyKind::TraitObject(ref bounds, ..) => {
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::TraitObject);
+ }
+ TyKind::ImplTrait(_, ref bounds) => {
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::Impl);
+ }
+ TyKind::Typeof(ref expression) => visitor.visit_anon_const(expression),
+ TyKind::Infer | TyKind::ImplicitSelf | TyKind::Err => {}
+ TyKind::MacCall(ref mac) => visitor.visit_mac_call(mac),
+ TyKind::Never | TyKind::CVarArgs => {}
+ }
+}
+
+pub fn walk_path<'a, V: Visitor<'a>>(visitor: &mut V, path: &'a Path) {
+ for segment in &path.segments {
+ visitor.visit_path_segment(path.span, segment);
+ }
+}
+
+pub fn walk_use_tree<'a, V: Visitor<'a>>(visitor: &mut V, use_tree: &'a UseTree, id: NodeId) {
+ visitor.visit_path(&use_tree.prefix, id);
+ match use_tree.kind {
+ UseTreeKind::Simple(rename, ..) => {
+ // The extra IDs are handled during HIR lowering.
+ if let Some(rename) = rename {
+ visitor.visit_ident(rename);
+ }
+ }
+ UseTreeKind::Glob => {}
+ UseTreeKind::Nested(ref use_trees) => {
+ for &(ref nested_tree, nested_id) in use_trees {
+ visitor.visit_use_tree(nested_tree, nested_id, true);
+ }
+ }
+ }
+}
+
+pub fn walk_path_segment<'a, V: Visitor<'a>>(
+ visitor: &mut V,
+ path_span: Span,
+ segment: &'a PathSegment,
+) {
+ visitor.visit_ident(segment.ident);
+ if let Some(ref args) = segment.args {
+ visitor.visit_generic_args(path_span, args);
+ }
+}
+
+pub fn walk_generic_args<'a, V>(visitor: &mut V, _path_span: Span, generic_args: &'a GenericArgs)
+where
+ V: Visitor<'a>,
+{
+ match *generic_args {
+ GenericArgs::AngleBracketed(ref data) => {
+ for arg in &data.args {
+ match arg {
+ AngleBracketedArg::Arg(a) => visitor.visit_generic_arg(a),
+ AngleBracketedArg::Constraint(c) => visitor.visit_assoc_constraint(c),
+ }
+ }
+ }
+ GenericArgs::Parenthesized(ref data) => {
+ walk_list!(visitor, visit_ty, &data.inputs);
+ walk_fn_ret_ty(visitor, &data.output);
+ }
+ }
+}
+
+pub fn walk_generic_arg<'a, V>(visitor: &mut V, generic_arg: &'a GenericArg)
+where
+ V: Visitor<'a>,
+{
+ match generic_arg {
+ GenericArg::Lifetime(lt) => visitor.visit_lifetime(lt, LifetimeCtxt::GenericArg),
+ GenericArg::Type(ty) => visitor.visit_ty(ty),
+ GenericArg::Const(ct) => visitor.visit_anon_const(ct),
+ }
+}
+
+pub fn walk_assoc_constraint<'a, V: Visitor<'a>>(visitor: &mut V, constraint: &'a AssocConstraint) {
+ visitor.visit_ident(constraint.ident);
+ if let Some(ref gen_args) = constraint.gen_args {
+ visitor.visit_generic_args(gen_args.span(), gen_args);
+ }
+ match constraint.kind {
+ AssocConstraintKind::Equality { ref term } => match term {
+ Term::Ty(ty) => visitor.visit_ty(ty),
+ Term::Const(c) => visitor.visit_anon_const(c),
+ },
+ AssocConstraintKind::Bound { ref bounds } => {
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::Bound);
+ }
+ }
+}
+
+pub fn walk_pat<'a, V: Visitor<'a>>(visitor: &mut V, pattern: &'a Pat) {
+ match pattern.kind {
+ PatKind::TupleStruct(ref opt_qself, ref path, ref elems) => {
+ if let Some(ref qself) = *opt_qself {
+ visitor.visit_ty(&qself.ty);
+ }
+ visitor.visit_path(path, pattern.id);
+ walk_list!(visitor, visit_pat, elems);
+ }
+ PatKind::Path(ref opt_qself, ref path) => {
+ if let Some(ref qself) = *opt_qself {
+ visitor.visit_ty(&qself.ty);
+ }
+ visitor.visit_path(path, pattern.id)
+ }
+ PatKind::Struct(ref opt_qself, ref path, ref fields, _) => {
+ if let Some(ref qself) = *opt_qself {
+ visitor.visit_ty(&qself.ty);
+ }
+ visitor.visit_path(path, pattern.id);
+ walk_list!(visitor, visit_pat_field, fields);
+ }
+ PatKind::Box(ref subpattern)
+ | PatKind::Ref(ref subpattern, _)
+ | PatKind::Paren(ref subpattern) => visitor.visit_pat(subpattern),
+ PatKind::Ident(_, ident, ref optional_subpattern) => {
+ visitor.visit_ident(ident);
+ walk_list!(visitor, visit_pat, optional_subpattern);
+ }
+ PatKind::Lit(ref expression) => visitor.visit_expr(expression),
+ PatKind::Range(ref lower_bound, ref upper_bound, _) => {
+ walk_list!(visitor, visit_expr, lower_bound);
+ walk_list!(visitor, visit_expr, upper_bound);
+ }
+ PatKind::Wild | PatKind::Rest => {}
+ PatKind::Tuple(ref elems) | PatKind::Slice(ref elems) | PatKind::Or(ref elems) => {
+ walk_list!(visitor, visit_pat, elems);
+ }
+ PatKind::MacCall(ref mac) => visitor.visit_mac_call(mac),
+ }
+}
+
+pub fn walk_foreign_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a ForeignItem) {
+ let Item { id, span, ident, ref vis, ref attrs, ref kind, tokens: _ } = *item;
+ visitor.visit_vis(vis);
+ visitor.visit_ident(ident);
+ walk_list!(visitor, visit_attribute, attrs);
+ match kind {
+ ForeignItemKind::Static(ty, _, expr) => {
+ visitor.visit_ty(ty);
+ walk_list!(visitor, visit_expr, expr);
+ }
+ ForeignItemKind::Fn(box Fn { defaultness: _, ref generics, ref sig, ref body }) => {
+ let kind = FnKind::Fn(FnCtxt::Foreign, ident, sig, vis, generics, body.as_deref());
+ visitor.visit_fn(kind, span, id);
+ }
+ ForeignItemKind::TyAlias(box TyAlias { generics, bounds, ty, .. }) => {
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::Bound);
+ walk_list!(visitor, visit_ty, ty);
+ }
+ ForeignItemKind::MacCall(mac) => {
+ visitor.visit_mac_call(mac);
+ }
+ }
+}
+
+pub fn walk_param_bound<'a, V: Visitor<'a>>(visitor: &mut V, bound: &'a GenericBound) {
+ match *bound {
+ GenericBound::Trait(ref typ, ref modifier) => visitor.visit_poly_trait_ref(typ, modifier),
+ GenericBound::Outlives(ref lifetime) => {
+ visitor.visit_lifetime(lifetime, LifetimeCtxt::Bound)
+ }
+ }
+}
+
+pub fn walk_generic_param<'a, V: Visitor<'a>>(visitor: &mut V, param: &'a GenericParam) {
+ visitor.visit_ident(param.ident);
+ walk_list!(visitor, visit_attribute, param.attrs.iter());
+ walk_list!(visitor, visit_param_bound, &param.bounds, BoundKind::Bound);
+ match param.kind {
+ GenericParamKind::Lifetime => (),
+ GenericParamKind::Type { ref default } => walk_list!(visitor, visit_ty, default),
+ GenericParamKind::Const { ref ty, ref default, .. } => {
+ visitor.visit_ty(ty);
+ if let Some(default) = default {
+ visitor.visit_anon_const(default);
+ }
+ }
+ }
+}
+
+pub fn walk_generics<'a, V: Visitor<'a>>(visitor: &mut V, generics: &'a Generics) {
+ walk_list!(visitor, visit_generic_param, &generics.params);
+ walk_list!(visitor, visit_where_predicate, &generics.where_clause.predicates);
+}
+
+pub fn walk_closure_binder<'a, V: Visitor<'a>>(visitor: &mut V, binder: &'a ClosureBinder) {
+ match binder {
+ ClosureBinder::NotPresent => {}
+ ClosureBinder::For { generic_params, span: _ } => {
+ walk_list!(visitor, visit_generic_param, generic_params)
+ }
+ }
+}
+
+pub fn walk_where_predicate<'a, V: Visitor<'a>>(visitor: &mut V, predicate: &'a WherePredicate) {
+ match *predicate {
+ WherePredicate::BoundPredicate(WhereBoundPredicate {
+ ref bounded_ty,
+ ref bounds,
+ ref bound_generic_params,
+ ..
+ }) => {
+ visitor.visit_ty(bounded_ty);
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::Bound);
+ walk_list!(visitor, visit_generic_param, bound_generic_params);
+ }
+ WherePredicate::RegionPredicate(WhereRegionPredicate {
+ ref lifetime, ref bounds, ..
+ }) => {
+ visitor.visit_lifetime(lifetime, LifetimeCtxt::Bound);
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::Bound);
+ }
+ WherePredicate::EqPredicate(WhereEqPredicate { ref lhs_ty, ref rhs_ty, .. }) => {
+ visitor.visit_ty(lhs_ty);
+ visitor.visit_ty(rhs_ty);
+ }
+ }
+}
+
+pub fn walk_fn_ret_ty<'a, V: Visitor<'a>>(visitor: &mut V, ret_ty: &'a FnRetTy) {
+ if let FnRetTy::Ty(ref output_ty) = *ret_ty {
+ visitor.visit_ty(output_ty)
+ }
+}
+
+pub fn walk_fn_decl<'a, V: Visitor<'a>>(visitor: &mut V, function_declaration: &'a FnDecl) {
+ for param in &function_declaration.inputs {
+ visitor.visit_param(param);
+ }
+ visitor.visit_fn_ret_ty(&function_declaration.output);
+}
+
+pub fn walk_fn<'a, V: Visitor<'a>>(visitor: &mut V, kind: FnKind<'a>, _span: Span) {
+ match kind {
+ FnKind::Fn(_, _, sig, _, generics, body) => {
+ visitor.visit_generics(generics);
+ visitor.visit_fn_header(&sig.header);
+ walk_fn_decl(visitor, &sig.decl);
+ walk_list!(visitor, visit_block, body);
+ }
+ FnKind::Closure(binder, decl, body) => {
+ visitor.visit_closure_binder(binder);
+ walk_fn_decl(visitor, decl);
+ visitor.visit_expr(body);
+ }
+ }
+}
+
+pub fn walk_assoc_item<'a, V: Visitor<'a>>(visitor: &mut V, item: &'a AssocItem, ctxt: AssocCtxt) {
+ let Item { id, span, ident, ref vis, ref attrs, ref kind, tokens: _ } = *item;
+ visitor.visit_vis(vis);
+ visitor.visit_ident(ident);
+ walk_list!(visitor, visit_attribute, attrs);
+ match kind {
+ AssocItemKind::Const(_, ty, expr) => {
+ visitor.visit_ty(ty);
+ walk_list!(visitor, visit_expr, expr);
+ }
+ AssocItemKind::Fn(box Fn { defaultness: _, ref generics, ref sig, ref body }) => {
+ let kind = FnKind::Fn(FnCtxt::Assoc(ctxt), ident, sig, vis, generics, body.as_deref());
+ visitor.visit_fn(kind, span, id);
+ }
+ AssocItemKind::TyAlias(box TyAlias { generics, bounds, ty, .. }) => {
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_param_bound, bounds, BoundKind::Bound);
+ walk_list!(visitor, visit_ty, ty);
+ }
+ AssocItemKind::MacCall(mac) => {
+ visitor.visit_mac_call(mac);
+ }
+ }
+}
+
+pub fn walk_struct_def<'a, V: Visitor<'a>>(visitor: &mut V, struct_definition: &'a VariantData) {
+ walk_list!(visitor, visit_field_def, struct_definition.fields());
+}
+
+pub fn walk_field_def<'a, V: Visitor<'a>>(visitor: &mut V, field: &'a FieldDef) {
+ visitor.visit_vis(&field.vis);
+ if let Some(ident) = field.ident {
+ visitor.visit_ident(ident);
+ }
+ visitor.visit_ty(&field.ty);
+ walk_list!(visitor, visit_attribute, &field.attrs);
+}
+
+pub fn walk_block<'a, V: Visitor<'a>>(visitor: &mut V, block: &'a Block) {
+ walk_list!(visitor, visit_stmt, &block.stmts);
+}
+
+pub fn walk_stmt<'a, V: Visitor<'a>>(visitor: &mut V, statement: &'a Stmt) {
+ match statement.kind {
+ StmtKind::Local(ref local) => visitor.visit_local(local),
+ StmtKind::Item(ref item) => visitor.visit_item(item),
+ StmtKind::Expr(ref expr) | StmtKind::Semi(ref expr) => visitor.visit_expr(expr),
+ StmtKind::Empty => {}
+ StmtKind::MacCall(ref mac) => {
+ let MacCallStmt { ref mac, style: _, ref attrs, tokens: _ } = **mac;
+ visitor.visit_mac_call(mac);
+ for attr in attrs.iter() {
+ visitor.visit_attribute(attr);
+ }
+ }
+ }
+}
+
+pub fn walk_mac<'a, V: Visitor<'a>>(visitor: &mut V, mac: &'a MacCall) {
+ visitor.visit_path(&mac.path, DUMMY_NODE_ID);
+}
+
+pub fn walk_anon_const<'a, V: Visitor<'a>>(visitor: &mut V, constant: &'a AnonConst) {
+ visitor.visit_expr(&constant.value);
+}
+
+pub fn walk_inline_asm<'a, V: Visitor<'a>>(visitor: &mut V, asm: &'a InlineAsm) {
+ for (op, _) in &asm.operands {
+ match op {
+ InlineAsmOperand::In { expr, .. }
+ | InlineAsmOperand::Out { expr: Some(expr), .. }
+ | InlineAsmOperand::InOut { expr, .. } => visitor.visit_expr(expr),
+ InlineAsmOperand::Out { expr: None, .. } => {}
+ InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ visitor.visit_expr(in_expr);
+ if let Some(out_expr) = out_expr {
+ visitor.visit_expr(out_expr);
+ }
+ }
+ InlineAsmOperand::Const { anon_const, .. } => visitor.visit_anon_const(anon_const),
+ InlineAsmOperand::Sym { sym } => visitor.visit_inline_asm_sym(sym),
+ }
+ }
+}
+
+pub fn walk_inline_asm_sym<'a, V: Visitor<'a>>(visitor: &mut V, sym: &'a InlineAsmSym) {
+ if let Some(ref qself) = sym.qself {
+ visitor.visit_ty(&qself.ty);
+ }
+ visitor.visit_path(&sym.path, sym.id);
+}
+
+pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) {
+ walk_list!(visitor, visit_attribute, expression.attrs.iter());
+
+ match expression.kind {
+ ExprKind::Box(ref subexpression) => visitor.visit_expr(subexpression),
+ ExprKind::Array(ref subexpressions) => {
+ walk_list!(visitor, visit_expr, subexpressions);
+ }
+ ExprKind::ConstBlock(ref anon_const) => visitor.visit_anon_const(anon_const),
+ ExprKind::Repeat(ref element, ref count) => {
+ visitor.visit_expr(element);
+ visitor.visit_anon_const(count)
+ }
+ ExprKind::Struct(ref se) => {
+ if let Some(ref qself) = se.qself {
+ visitor.visit_ty(&qself.ty);
+ }
+ visitor.visit_path(&se.path, expression.id);
+ walk_list!(visitor, visit_expr_field, &se.fields);
+ match &se.rest {
+ StructRest::Base(expr) => visitor.visit_expr(expr),
+ StructRest::Rest(_span) => {}
+ StructRest::None => {}
+ }
+ }
+ ExprKind::Tup(ref subexpressions) => {
+ walk_list!(visitor, visit_expr, subexpressions);
+ }
+ ExprKind::Call(ref callee_expression, ref arguments) => {
+ visitor.visit_expr(callee_expression);
+ walk_list!(visitor, visit_expr, arguments);
+ }
+ ExprKind::MethodCall(ref segment, ref arguments, _span) => {
+ visitor.visit_path_segment(expression.span, segment);
+ walk_list!(visitor, visit_expr, arguments);
+ }
+ ExprKind::Binary(_, ref left_expression, ref right_expression) => {
+ visitor.visit_expr(left_expression);
+ visitor.visit_expr(right_expression)
+ }
+ ExprKind::AddrOf(_, _, ref subexpression) | ExprKind::Unary(_, ref subexpression) => {
+ visitor.visit_expr(subexpression)
+ }
+ ExprKind::Cast(ref subexpression, ref typ) | ExprKind::Type(ref subexpression, ref typ) => {
+ visitor.visit_expr(subexpression);
+ visitor.visit_ty(typ)
+ }
+ ExprKind::Let(ref pat, ref expr, _) => {
+ visitor.visit_pat(pat);
+ visitor.visit_expr(expr);
+ }
+ ExprKind::If(ref head_expression, ref if_block, ref optional_else) => {
+ visitor.visit_expr(head_expression);
+ visitor.visit_block(if_block);
+ walk_list!(visitor, visit_expr, optional_else);
+ }
+ ExprKind::While(ref subexpression, ref block, ref opt_label) => {
+ walk_list!(visitor, visit_label, opt_label);
+ visitor.visit_expr(subexpression);
+ visitor.visit_block(block);
+ }
+ ExprKind::ForLoop(ref pattern, ref subexpression, ref block, ref opt_label) => {
+ walk_list!(visitor, visit_label, opt_label);
+ visitor.visit_pat(pattern);
+ visitor.visit_expr(subexpression);
+ visitor.visit_block(block);
+ }
+ ExprKind::Loop(ref block, ref opt_label) => {
+ walk_list!(visitor, visit_label, opt_label);
+ visitor.visit_block(block);
+ }
+ ExprKind::Match(ref subexpression, ref arms) => {
+ visitor.visit_expr(subexpression);
+ walk_list!(visitor, visit_arm, arms);
+ }
+ ExprKind::Closure(ref binder, _, _, _, ref decl, ref body, _decl_span) => {
+ visitor.visit_fn(FnKind::Closure(binder, decl, body), expression.span, expression.id)
+ }
+ ExprKind::Block(ref block, ref opt_label) => {
+ walk_list!(visitor, visit_label, opt_label);
+ visitor.visit_block(block);
+ }
+ ExprKind::Async(_, _, ref body) => {
+ visitor.visit_block(body);
+ }
+ ExprKind::Await(ref expr) => visitor.visit_expr(expr),
+ ExprKind::Assign(ref lhs, ref rhs, _) => {
+ visitor.visit_expr(lhs);
+ visitor.visit_expr(rhs);
+ }
+ ExprKind::AssignOp(_, ref left_expression, ref right_expression) => {
+ visitor.visit_expr(left_expression);
+ visitor.visit_expr(right_expression);
+ }
+ ExprKind::Field(ref subexpression, ident) => {
+ visitor.visit_expr(subexpression);
+ visitor.visit_ident(ident);
+ }
+ ExprKind::Index(ref main_expression, ref index_expression) => {
+ visitor.visit_expr(main_expression);
+ visitor.visit_expr(index_expression)
+ }
+ ExprKind::Range(ref start, ref end, _) => {
+ walk_list!(visitor, visit_expr, start);
+ walk_list!(visitor, visit_expr, end);
+ }
+ ExprKind::Underscore => {}
+ ExprKind::Path(ref maybe_qself, ref path) => {
+ if let Some(ref qself) = *maybe_qself {
+ visitor.visit_ty(&qself.ty);
+ }
+ visitor.visit_path(path, expression.id)
+ }
+ ExprKind::Break(ref opt_label, ref opt_expr) => {
+ walk_list!(visitor, visit_label, opt_label);
+ walk_list!(visitor, visit_expr, opt_expr);
+ }
+ ExprKind::Continue(ref opt_label) => {
+ walk_list!(visitor, visit_label, opt_label);
+ }
+ ExprKind::Ret(ref optional_expression) => {
+ walk_list!(visitor, visit_expr, optional_expression);
+ }
+ ExprKind::Yeet(ref optional_expression) => {
+ walk_list!(visitor, visit_expr, optional_expression);
+ }
+ ExprKind::MacCall(ref mac) => visitor.visit_mac_call(mac),
+ ExprKind::Paren(ref subexpression) => visitor.visit_expr(subexpression),
+ ExprKind::InlineAsm(ref asm) => visitor.visit_inline_asm(asm),
+ ExprKind::Yield(ref optional_expression) => {
+ walk_list!(visitor, visit_expr, optional_expression);
+ }
+ ExprKind::Try(ref subexpression) => visitor.visit_expr(subexpression),
+ ExprKind::TryBlock(ref body) => visitor.visit_block(body),
+ ExprKind::Lit(_) | ExprKind::Err => {}
+ }
+
+ visitor.visit_expr_post(expression)
+}
+
+pub fn walk_param<'a, V: Visitor<'a>>(visitor: &mut V, param: &'a Param) {
+ walk_list!(visitor, visit_attribute, param.attrs.iter());
+ visitor.visit_pat(&param.pat);
+ visitor.visit_ty(&param.ty);
+}
+
+pub fn walk_arm<'a, V: Visitor<'a>>(visitor: &mut V, arm: &'a Arm) {
+ visitor.visit_pat(&arm.pat);
+ walk_list!(visitor, visit_expr, &arm.guard);
+ visitor.visit_expr(&arm.body);
+ walk_list!(visitor, visit_attribute, &arm.attrs);
+}
+
+pub fn walk_vis<'a, V: Visitor<'a>>(visitor: &mut V, vis: &'a Visibility) {
+ if let VisibilityKind::Restricted { ref path, id } = vis.kind {
+ visitor.visit_path(path, id);
+ }
+}
+
+pub fn walk_attribute<'a, V: Visitor<'a>>(visitor: &mut V, attr: &'a Attribute) {
+ match attr.kind {
+ AttrKind::Normal(ref item, ref _tokens) => walk_mac_args(visitor, &item.args),
+ AttrKind::DocComment(..) => {}
+ }
+}
+
+pub fn walk_mac_args<'a, V: Visitor<'a>>(visitor: &mut V, args: &'a MacArgs) {
+ match args {
+ MacArgs::Empty => {}
+ MacArgs::Delimited(_dspan, _delim, _tokens) => {}
+ MacArgs::Eq(_eq_span, MacArgsEq::Ast(expr)) => visitor.visit_expr(expr),
+ MacArgs::Eq(_, MacArgsEq::Hir(lit)) => {
+ unreachable!("in literal form when walking mac args eq: {:?}", lit)
+ }
+ }
+}
diff --git a/compiler/rustc_ast_lowering/Cargo.toml b/compiler/rustc_ast_lowering/Cargo.toml
new file mode 100644
index 000000000..39ba62ef2
--- /dev/null
+++ b/compiler/rustc_ast_lowering/Cargo.toml
@@ -0,0 +1,23 @@
+[package]
+name = "rustc_ast_lowering"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_arena = { path = "../rustc_arena" }
+tracing = "0.1"
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_target = { path = "../rustc_target" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_index = { path = "../rustc_index" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_span = { path = "../rustc_span" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_session = { path = "../rustc_session" }
+rustc_ast = { path = "../rustc_ast" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_ast_lowering/src/asm.rs b/compiler/rustc_ast_lowering/src/asm.rs
new file mode 100644
index 000000000..4166b4fc2
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/asm.rs
@@ -0,0 +1,485 @@
+use crate::{ImplTraitContext, ImplTraitPosition, ParamMode, ResolverAstLoweringExt};
+
+use super::LoweringContext;
+
+use rustc_ast::ptr::P;
+use rustc_ast::*;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::definitions::DefPathData;
+use rustc_session::parse::feature_err;
+use rustc_span::{sym, Span};
+use rustc_target::asm;
+use std::collections::hash_map::Entry;
+use std::fmt::Write;
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ pub(crate) fn lower_inline_asm(
+ &mut self,
+ sp: Span,
+ asm: &InlineAsm,
+ ) -> &'hir hir::InlineAsm<'hir> {
+ // Rustdoc needs to support asm! from foreign architectures: don't try
+ // lowering the register constraints in this case.
+ let asm_arch =
+ if self.tcx.sess.opts.actually_rustdoc { None } else { self.tcx.sess.asm_arch };
+ if asm_arch.is_none() && !self.tcx.sess.opts.actually_rustdoc {
+ struct_span_err!(
+ self.tcx.sess,
+ sp,
+ E0472,
+ "inline assembly is unsupported on this target"
+ )
+ .emit();
+ }
+ if let Some(asm_arch) = asm_arch {
+ // Inline assembly is currently only stable for these architectures.
+ let is_stable = matches!(
+ asm_arch,
+ asm::InlineAsmArch::X86
+ | asm::InlineAsmArch::X86_64
+ | asm::InlineAsmArch::Arm
+ | asm::InlineAsmArch::AArch64
+ | asm::InlineAsmArch::RiscV32
+ | asm::InlineAsmArch::RiscV64
+ );
+ if !is_stable && !self.tcx.features().asm_experimental_arch {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::asm_experimental_arch,
+ sp,
+ "inline assembly is not stable yet on this architecture",
+ )
+ .emit();
+ }
+ }
+ if asm.options.contains(InlineAsmOptions::ATT_SYNTAX)
+ && !matches!(asm_arch, Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64))
+ && !self.tcx.sess.opts.actually_rustdoc
+ {
+ self.tcx
+ .sess
+ .struct_span_err(sp, "the `att_syntax` option is only supported on x86")
+ .emit();
+ }
+ if asm.options.contains(InlineAsmOptions::MAY_UNWIND) && !self.tcx.features().asm_unwind {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::asm_unwind,
+ sp,
+ "the `may_unwind` option is unstable",
+ )
+ .emit();
+ }
+
+ let mut clobber_abis = FxHashMap::default();
+ if let Some(asm_arch) = asm_arch {
+ for (abi_name, abi_span) in &asm.clobber_abis {
+ match asm::InlineAsmClobberAbi::parse(asm_arch, &self.tcx.sess.target, *abi_name) {
+ Ok(abi) => {
+ // If the abi was already in the list, emit an error
+ match clobber_abis.get(&abi) {
+ Some((prev_name, prev_sp)) => {
+ let mut err = self.tcx.sess.struct_span_err(
+ *abi_span,
+ &format!("`{}` ABI specified multiple times", prev_name),
+ );
+ err.span_label(*prev_sp, "previously specified here");
+
+ // Multiple different abi names may actually be the same ABI
+ // If the specified ABIs are not the same name, alert the user that they resolve to the same ABI
+ let source_map = self.tcx.sess.source_map();
+ if source_map.span_to_snippet(*prev_sp)
+ != source_map.span_to_snippet(*abi_span)
+ {
+ err.note("these ABIs are equivalent on the current target");
+ }
+
+ err.emit();
+ }
+ None => {
+ clobber_abis.insert(abi, (abi_name, *abi_span));
+ }
+ }
+ }
+ Err(&[]) => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ *abi_span,
+ "`clobber_abi` is not supported on this target",
+ )
+ .emit();
+ }
+ Err(supported_abis) => {
+ let mut err = self
+ .tcx
+ .sess
+ .struct_span_err(*abi_span, "invalid ABI for `clobber_abi`");
+ let mut abis = format!("`{}`", supported_abis[0]);
+ for m in &supported_abis[1..] {
+ let _ = write!(abis, ", `{}`", m);
+ }
+ err.note(&format!(
+ "the following ABIs are supported on this target: {}",
+ abis
+ ));
+ err.emit();
+ }
+ }
+ }
+ }
+
+ // Lower operands to HIR. We use dummy register classes if an error
+ // occurs during lowering because we still need to be able to produce a
+ // valid HIR.
+ let sess = self.tcx.sess;
+ let mut operands: Vec<_> = asm
+ .operands
+ .iter()
+ .map(|(op, op_sp)| {
+ let lower_reg = |reg| match reg {
+ InlineAsmRegOrRegClass::Reg(s) => {
+ asm::InlineAsmRegOrRegClass::Reg(if let Some(asm_arch) = asm_arch {
+ asm::InlineAsmReg::parse(asm_arch, s).unwrap_or_else(|e| {
+ let msg = format!("invalid register `{}`: {}", s, e);
+ sess.struct_span_err(*op_sp, &msg).emit();
+ asm::InlineAsmReg::Err
+ })
+ } else {
+ asm::InlineAsmReg::Err
+ })
+ }
+ InlineAsmRegOrRegClass::RegClass(s) => {
+ asm::InlineAsmRegOrRegClass::RegClass(if let Some(asm_arch) = asm_arch {
+ asm::InlineAsmRegClass::parse(asm_arch, s).unwrap_or_else(|e| {
+ let msg = format!("invalid register class `{}`: {}", s, e);
+ sess.struct_span_err(*op_sp, &msg).emit();
+ asm::InlineAsmRegClass::Err
+ })
+ } else {
+ asm::InlineAsmRegClass::Err
+ })
+ }
+ };
+
+ let op = match *op {
+ InlineAsmOperand::In { reg, ref expr } => hir::InlineAsmOperand::In {
+ reg: lower_reg(reg),
+ expr: self.lower_expr_mut(expr),
+ },
+ InlineAsmOperand::Out { reg, late, ref expr } => hir::InlineAsmOperand::Out {
+ reg: lower_reg(reg),
+ late,
+ expr: expr.as_ref().map(|expr| self.lower_expr_mut(expr)),
+ },
+ InlineAsmOperand::InOut { reg, late, ref expr } => {
+ hir::InlineAsmOperand::InOut {
+ reg: lower_reg(reg),
+ late,
+ expr: self.lower_expr_mut(expr),
+ }
+ }
+ InlineAsmOperand::SplitInOut { reg, late, ref in_expr, ref out_expr } => {
+ hir::InlineAsmOperand::SplitInOut {
+ reg: lower_reg(reg),
+ late,
+ in_expr: self.lower_expr_mut(in_expr),
+ out_expr: out_expr.as_ref().map(|expr| self.lower_expr_mut(expr)),
+ }
+ }
+ InlineAsmOperand::Const { ref anon_const } => {
+ if !self.tcx.features().asm_const {
+ feature_err(
+ &sess.parse_sess,
+ sym::asm_const,
+ *op_sp,
+ "const operands for inline assembly are unstable",
+ )
+ .emit();
+ }
+ hir::InlineAsmOperand::Const {
+ anon_const: self.lower_anon_const(anon_const),
+ }
+ }
+ InlineAsmOperand::Sym { ref sym } => {
+ if !self.tcx.features().asm_sym {
+ feature_err(
+ &sess.parse_sess,
+ sym::asm_sym,
+ *op_sp,
+ "sym operands for inline assembly are unstable",
+ )
+ .emit();
+ }
+
+ let static_def_id = self
+ .resolver
+ .get_partial_res(sym.id)
+ .filter(|res| res.unresolved_segments() == 0)
+ .and_then(|res| {
+ if let Res::Def(DefKind::Static(_), def_id) = res.base_res() {
+ Some(def_id)
+ } else {
+ None
+ }
+ });
+
+ if let Some(def_id) = static_def_id {
+ let path = self.lower_qpath(
+ sym.id,
+ &sym.qself,
+ &sym.path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ hir::InlineAsmOperand::SymStatic { path, def_id }
+ } else {
+ // Replace the InlineAsmSym AST node with an
+ // Expr using the name node id.
+ let expr = Expr {
+ id: sym.id,
+ kind: ExprKind::Path(sym.qself.clone(), sym.path.clone()),
+ span: *op_sp,
+ attrs: AttrVec::new(),
+ tokens: None,
+ };
+
+ // Wrap the expression in an AnonConst.
+ let parent_def_id = self.current_hir_id_owner;
+ let node_id = self.next_node_id();
+ self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+ let anon_const = AnonConst { id: node_id, value: P(expr) };
+ hir::InlineAsmOperand::SymFn {
+ anon_const: self.lower_anon_const(&anon_const),
+ }
+ }
+ }
+ };
+ (op, self.lower_span(*op_sp))
+ })
+ .collect();
+
+ // Validate template modifiers against the register classes for the operands
+ for p in &asm.template {
+ if let InlineAsmTemplatePiece::Placeholder {
+ operand_idx,
+ modifier: Some(modifier),
+ span: placeholder_span,
+ } = *p
+ {
+ let op_sp = asm.operands[operand_idx].1;
+ match &operands[operand_idx].0 {
+ hir::InlineAsmOperand::In { reg, .. }
+ | hir::InlineAsmOperand::Out { reg, .. }
+ | hir::InlineAsmOperand::InOut { reg, .. }
+ | hir::InlineAsmOperand::SplitInOut { reg, .. } => {
+ let class = reg.reg_class();
+ if class == asm::InlineAsmRegClass::Err {
+ continue;
+ }
+ let valid_modifiers = class.valid_modifiers(asm_arch.unwrap());
+ if !valid_modifiers.contains(&modifier) {
+ let mut err = sess.struct_span_err(
+ placeholder_span,
+ "invalid asm template modifier for this register class",
+ );
+ err.span_label(placeholder_span, "template modifier");
+ err.span_label(op_sp, "argument");
+ if !valid_modifiers.is_empty() {
+ let mut mods = format!("`{}`", valid_modifiers[0]);
+ for m in &valid_modifiers[1..] {
+ let _ = write!(mods, ", `{}`", m);
+ }
+ err.note(&format!(
+ "the `{}` register class supports \
+ the following template modifiers: {}",
+ class.name(),
+ mods
+ ));
+ } else {
+ err.note(&format!(
+ "the `{}` register class does not support template modifiers",
+ class.name()
+ ));
+ }
+ err.emit();
+ }
+ }
+ hir::InlineAsmOperand::Const { .. } => {
+ let mut err = sess.struct_span_err(
+ placeholder_span,
+ "asm template modifiers are not allowed for `const` arguments",
+ );
+ err.span_label(placeholder_span, "template modifier");
+ err.span_label(op_sp, "argument");
+ err.emit();
+ }
+ hir::InlineAsmOperand::SymFn { .. }
+ | hir::InlineAsmOperand::SymStatic { .. } => {
+ let mut err = sess.struct_span_err(
+ placeholder_span,
+ "asm template modifiers are not allowed for `sym` arguments",
+ );
+ err.span_label(placeholder_span, "template modifier");
+ err.span_label(op_sp, "argument");
+ err.emit();
+ }
+ }
+ }
+ }
+
+ let mut used_input_regs = FxHashMap::default();
+ let mut used_output_regs = FxHashMap::default();
+
+ for (idx, &(ref op, op_sp)) in operands.iter().enumerate() {
+ if let Some(reg) = op.reg() {
+ let reg_class = reg.reg_class();
+ if reg_class == asm::InlineAsmRegClass::Err {
+ continue;
+ }
+
+ // Some register classes can only be used as clobbers. This
+ // means that we disallow passing a value in/out of the asm and
+ // require that the operand name an explicit register, not a
+ // register class.
+ if reg_class.is_clobber_only(asm_arch.unwrap()) && !op.is_clobber() {
+ let msg = format!(
+ "register class `{}` can only be used as a clobber, \
+ not as an input or output",
+ reg_class.name()
+ );
+ sess.struct_span_err(op_sp, &msg).emit();
+ continue;
+ }
+
+ // Check for conflicts between explicit register operands.
+ if let asm::InlineAsmRegOrRegClass::Reg(reg) = reg {
+ let (input, output) = match op {
+ hir::InlineAsmOperand::In { .. } => (true, false),
+
+ // Late output do not conflict with inputs, but normal outputs do
+ hir::InlineAsmOperand::Out { late, .. } => (!late, true),
+
+ hir::InlineAsmOperand::InOut { .. }
+ | hir::InlineAsmOperand::SplitInOut { .. } => (true, true),
+
+ hir::InlineAsmOperand::Const { .. }
+ | hir::InlineAsmOperand::SymFn { .. }
+ | hir::InlineAsmOperand::SymStatic { .. } => {
+ unreachable!()
+ }
+ };
+
+ // Flag to output the error only once per operand
+ let mut skip = false;
+ reg.overlapping_regs(|r| {
+ let mut check = |used_regs: &mut FxHashMap<asm::InlineAsmReg, usize>,
+ input| {
+ match used_regs.entry(r) {
+ Entry::Occupied(o) => {
+ if skip {
+ return;
+ }
+ skip = true;
+
+ let idx2 = *o.get();
+ let &(ref op2, op_sp2) = &operands[idx2];
+ let Some(asm::InlineAsmRegOrRegClass::Reg(reg2)) = op2.reg() else {
+ unreachable!();
+ };
+
+ let msg = format!(
+ "register `{}` conflicts with register `{}`",
+ reg.name(),
+ reg2.name()
+ );
+ let mut err = sess.struct_span_err(op_sp, &msg);
+ err.span_label(op_sp, &format!("register `{}`", reg.name()));
+ err.span_label(op_sp2, &format!("register `{}`", reg2.name()));
+
+ match (op, op2) {
+ (
+ hir::InlineAsmOperand::In { .. },
+ hir::InlineAsmOperand::Out { late, .. },
+ )
+ | (
+ hir::InlineAsmOperand::Out { late, .. },
+ hir::InlineAsmOperand::In { .. },
+ ) => {
+ assert!(!*late);
+ let out_op_sp = if input { op_sp2 } else { op_sp };
+ let msg = "use `lateout` instead of \
+ `out` to avoid conflict";
+ err.span_help(out_op_sp, msg);
+ }
+ _ => {}
+ }
+
+ err.emit();
+ }
+ Entry::Vacant(v) => {
+ if r == reg {
+ v.insert(idx);
+ }
+ }
+ }
+ };
+ if input {
+ check(&mut used_input_regs, true);
+ }
+ if output {
+ check(&mut used_output_regs, false);
+ }
+ });
+ }
+ }
+ }
+
+ // If a clobber_abi is specified, add the necessary clobbers to the
+ // operands list.
+ let mut clobbered = FxHashSet::default();
+ for (abi, (_, abi_span)) in clobber_abis {
+ for &clobber in abi.clobbered_regs() {
+ // Don't emit a clobber for a register already clobbered
+ if clobbered.contains(&clobber) {
+ continue;
+ }
+
+ let mut output_used = false;
+ clobber.overlapping_regs(|reg| {
+ if used_output_regs.contains_key(&reg) {
+ output_used = true;
+ }
+ });
+
+ if !output_used {
+ operands.push((
+ hir::InlineAsmOperand::Out {
+ reg: asm::InlineAsmRegOrRegClass::Reg(clobber),
+ late: true,
+ expr: None,
+ },
+ self.lower_span(abi_span),
+ ));
+ clobbered.insert(clobber);
+ }
+ }
+ }
+
+ let operands = self.arena.alloc_from_iter(operands);
+ let template = self.arena.alloc_from_iter(asm.template.iter().cloned());
+ let template_strs = self.arena.alloc_from_iter(
+ asm.template_strs
+ .iter()
+ .map(|(sym, snippet, span)| (*sym, *snippet, self.lower_span(*span))),
+ );
+ let line_spans =
+ self.arena.alloc_from_iter(asm.line_spans.iter().map(|span| self.lower_span(*span)));
+ let hir_asm =
+ hir::InlineAsm { template, template_strs, operands, options: asm.options, line_spans };
+ self.arena.alloc(hir_asm)
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/block.rs b/compiler/rustc_ast_lowering/src/block.rs
new file mode 100644
index 000000000..7cbfe143b
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/block.rs
@@ -0,0 +1,122 @@
+use crate::{ImplTraitContext, ImplTraitPosition, LoweringContext};
+use rustc_ast::{Block, BlockCheckMode, Local, LocalKind, Stmt, StmtKind};
+use rustc_hir as hir;
+use rustc_session::parse::feature_err;
+use rustc_span::sym;
+
+use smallvec::SmallVec;
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ pub(super) fn lower_block(
+ &mut self,
+ b: &Block,
+ targeted_by_break: bool,
+ ) -> &'hir hir::Block<'hir> {
+ self.arena.alloc(self.lower_block_noalloc(b, targeted_by_break))
+ }
+
+ pub(super) fn lower_block_noalloc(
+ &mut self,
+ b: &Block,
+ targeted_by_break: bool,
+ ) -> hir::Block<'hir> {
+ let (stmts, expr) = self.lower_stmts(&b.stmts);
+ let rules = self.lower_block_check_mode(&b.rules);
+ let hir_id = self.lower_node_id(b.id);
+ hir::Block { hir_id, stmts, expr, rules, span: self.lower_span(b.span), targeted_by_break }
+ }
+
+ fn lower_stmts(
+ &mut self,
+ mut ast_stmts: &[Stmt],
+ ) -> (&'hir [hir::Stmt<'hir>], Option<&'hir hir::Expr<'hir>>) {
+ let mut stmts = SmallVec::<[hir::Stmt<'hir>; 8]>::new();
+ let mut expr = None;
+ while let [s, tail @ ..] = ast_stmts {
+ match s.kind {
+ StmtKind::Local(ref local) => {
+ let hir_id = self.lower_node_id(s.id);
+ let local = self.lower_local(local);
+ self.alias_attrs(hir_id, local.hir_id);
+ let kind = hir::StmtKind::Local(local);
+ let span = self.lower_span(s.span);
+ stmts.push(hir::Stmt { hir_id, kind, span });
+ }
+ StmtKind::Item(ref it) => {
+ stmts.extend(self.lower_item_ref(it).into_iter().enumerate().map(
+ |(i, item_id)| {
+ let hir_id = match i {
+ 0 => self.lower_node_id(s.id),
+ _ => self.next_id(),
+ };
+ let kind = hir::StmtKind::Item(item_id);
+ let span = self.lower_span(s.span);
+ hir::Stmt { hir_id, kind, span }
+ },
+ ));
+ }
+ StmtKind::Expr(ref e) => {
+ let e = self.lower_expr(e);
+ if tail.is_empty() {
+ expr = Some(e);
+ } else {
+ let hir_id = self.lower_node_id(s.id);
+ self.alias_attrs(hir_id, e.hir_id);
+ let kind = hir::StmtKind::Expr(e);
+ let span = self.lower_span(s.span);
+ stmts.push(hir::Stmt { hir_id, kind, span });
+ }
+ }
+ StmtKind::Semi(ref e) => {
+ let e = self.lower_expr(e);
+ let hir_id = self.lower_node_id(s.id);
+ self.alias_attrs(hir_id, e.hir_id);
+ let kind = hir::StmtKind::Semi(e);
+ let span = self.lower_span(s.span);
+ stmts.push(hir::Stmt { hir_id, kind, span });
+ }
+ StmtKind::Empty => {}
+ StmtKind::MacCall(..) => panic!("shouldn't exist here"),
+ }
+ ast_stmts = &ast_stmts[1..];
+ }
+ (self.arena.alloc_from_iter(stmts), expr)
+ }
+
+ fn lower_local(&mut self, l: &Local) -> &'hir hir::Local<'hir> {
+ let ty = l
+ .ty
+ .as_ref()
+ .map(|t| self.lower_ty(t, ImplTraitContext::Disallowed(ImplTraitPosition::Variable)));
+ let init = l.kind.init().map(|init| self.lower_expr(init));
+ let hir_id = self.lower_node_id(l.id);
+ let pat = self.lower_pat(&l.pat);
+ let els = if let LocalKind::InitElse(_, els) = &l.kind {
+ if !self.tcx.features().let_else {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::let_else,
+ l.span,
+ "`let...else` statements are unstable",
+ )
+ .emit();
+ }
+ Some(self.lower_block(els, false))
+ } else {
+ None
+ };
+ let span = self.lower_span(l.span);
+ let source = hir::LocalSource::Normal;
+ self.lower_attrs(hir_id, &l.attrs);
+ self.arena.alloc(hir::Local { hir_id, ty, pat, init, els, span, source })
+ }
+
+ fn lower_block_check_mode(&mut self, b: &BlockCheckMode) -> hir::BlockCheckMode {
+ match *b {
+ BlockCheckMode::Default => hir::BlockCheckMode::DefaultBlock,
+ BlockCheckMode::Unsafe(u) => {
+ hir::BlockCheckMode::UnsafeBlock(self.lower_unsafe_source(u))
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs
new file mode 100644
index 000000000..fb6715ff1
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/expr.rs
@@ -0,0 +1,1914 @@
+use super::ResolverAstLoweringExt;
+use super::{ImplTraitContext, LoweringContext, ParamMode, ParenthesizedGenericArgs};
+use crate::{FnDeclKind, ImplTraitPosition};
+
+use rustc_ast::attr;
+use rustc_ast::ptr::P as AstP;
+use rustc_ast::*;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::definitions::DefPathData;
+use rustc_span::source_map::{respan, DesugaringKind, Span, Spanned};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::DUMMY_SP;
+
+impl<'hir> LoweringContext<'_, 'hir> {
+ fn lower_exprs(&mut self, exprs: &[AstP<Expr>]) -> &'hir [hir::Expr<'hir>] {
+ self.arena.alloc_from_iter(exprs.iter().map(|x| self.lower_expr_mut(x)))
+ }
+
+ pub(super) fn lower_expr(&mut self, e: &Expr) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.lower_expr_mut(e))
+ }
+
+ pub(super) fn lower_expr_mut(&mut self, e: &Expr) -> hir::Expr<'hir> {
+ ensure_sufficient_stack(|| {
+ let kind = match e.kind {
+ ExprKind::Box(ref inner) => hir::ExprKind::Box(self.lower_expr(inner)),
+ ExprKind::Array(ref exprs) => hir::ExprKind::Array(self.lower_exprs(exprs)),
+ ExprKind::ConstBlock(ref anon_const) => {
+ let anon_const = self.lower_anon_const(anon_const);
+ hir::ExprKind::ConstBlock(anon_const)
+ }
+ ExprKind::Repeat(ref expr, ref count) => {
+ let expr = self.lower_expr(expr);
+ let count = self.lower_array_length(count);
+ hir::ExprKind::Repeat(expr, count)
+ }
+ ExprKind::Tup(ref elts) => hir::ExprKind::Tup(self.lower_exprs(elts)),
+ ExprKind::Call(ref f, ref args) => {
+ if e.attrs.get(0).map_or(false, |a| a.has_name(sym::rustc_box)) {
+ if let [inner] = &args[..] && e.attrs.len() == 1 {
+ let kind = hir::ExprKind::Box(self.lower_expr(&inner));
+ let hir_id = self.lower_node_id(e.id);
+ return hir::Expr { hir_id, kind, span: self.lower_span(e.span) };
+ } else {
+ self.tcx.sess
+ .struct_span_err(
+ e.span,
+ "#[rustc_box] requires precisely one argument \
+ and no other attributes are allowed",
+ )
+ .emit();
+ hir::ExprKind::Err
+ }
+ } else if let Some(legacy_args) = self.resolver.legacy_const_generic_args(f) {
+ self.lower_legacy_const_generics((**f).clone(), args.clone(), &legacy_args)
+ } else {
+ let f = self.lower_expr(f);
+ hir::ExprKind::Call(f, self.lower_exprs(args))
+ }
+ }
+ ExprKind::MethodCall(ref seg, ref args, span) => {
+ let hir_seg = self.arena.alloc(self.lower_path_segment(
+ e.span,
+ seg,
+ ParamMode::Optional,
+ ParenthesizedGenericArgs::Err,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ ));
+ let args = self.lower_exprs(args);
+ hir::ExprKind::MethodCall(hir_seg, args, self.lower_span(span))
+ }
+ ExprKind::Binary(binop, ref lhs, ref rhs) => {
+ let binop = self.lower_binop(binop);
+ let lhs = self.lower_expr(lhs);
+ let rhs = self.lower_expr(rhs);
+ hir::ExprKind::Binary(binop, lhs, rhs)
+ }
+ ExprKind::Unary(op, ref ohs) => {
+ let op = self.lower_unop(op);
+ let ohs = self.lower_expr(ohs);
+ hir::ExprKind::Unary(op, ohs)
+ }
+ ExprKind::Lit(ref l) => {
+ hir::ExprKind::Lit(respan(self.lower_span(l.span), l.kind.clone()))
+ }
+ ExprKind::Cast(ref expr, ref ty) => {
+ let expr = self.lower_expr(expr);
+ let ty =
+ self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ hir::ExprKind::Cast(expr, ty)
+ }
+ ExprKind::Type(ref expr, ref ty) => {
+ let expr = self.lower_expr(expr);
+ let ty =
+ self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ hir::ExprKind::Type(expr, ty)
+ }
+ ExprKind::AddrOf(k, m, ref ohs) => {
+ let ohs = self.lower_expr(ohs);
+ hir::ExprKind::AddrOf(k, m, ohs)
+ }
+ ExprKind::Let(ref pat, ref scrutinee, span) => {
+ hir::ExprKind::Let(self.arena.alloc(hir::Let {
+ hir_id: self.next_id(),
+ span: self.lower_span(span),
+ pat: self.lower_pat(pat),
+ ty: None,
+ init: self.lower_expr(scrutinee),
+ }))
+ }
+ ExprKind::If(ref cond, ref then, ref else_opt) => {
+ self.lower_expr_if(cond, then, else_opt.as_deref())
+ }
+ ExprKind::While(ref cond, ref body, opt_label) => {
+ self.with_loop_scope(e.id, |this| {
+ let span =
+ this.mark_span_with_reason(DesugaringKind::WhileLoop, e.span, None);
+ this.lower_expr_while_in_loop_scope(span, cond, body, opt_label)
+ })
+ }
+ ExprKind::Loop(ref body, opt_label) => self.with_loop_scope(e.id, |this| {
+ hir::ExprKind::Loop(
+ this.lower_block(body, false),
+ this.lower_label(opt_label),
+ hir::LoopSource::Loop,
+ DUMMY_SP,
+ )
+ }),
+ ExprKind::TryBlock(ref body) => self.lower_expr_try_block(body),
+ ExprKind::Match(ref expr, ref arms) => hir::ExprKind::Match(
+ self.lower_expr(expr),
+ self.arena.alloc_from_iter(arms.iter().map(|x| self.lower_arm(x))),
+ hir::MatchSource::Normal,
+ ),
+ ExprKind::Async(capture_clause, closure_node_id, ref block) => self
+ .make_async_expr(
+ capture_clause,
+ closure_node_id,
+ None,
+ block.span,
+ hir::AsyncGeneratorKind::Block,
+ |this| this.with_new_scopes(|this| this.lower_block_expr(block)),
+ ),
+ ExprKind::Await(ref expr) => {
+ let span = if expr.span.hi() < e.span.hi() {
+ expr.span.shrink_to_hi().with_hi(e.span.hi())
+ } else {
+ // this is a recovered `await expr`
+ e.span
+ };
+ self.lower_expr_await(span, expr)
+ }
+ ExprKind::Closure(
+ ref binder,
+ capture_clause,
+ asyncness,
+ movability,
+ ref decl,
+ ref body,
+ fn_decl_span,
+ ) => {
+ if let Async::Yes { closure_id, .. } = asyncness {
+ self.lower_expr_async_closure(
+ binder,
+ capture_clause,
+ e.id,
+ closure_id,
+ decl,
+ body,
+ fn_decl_span,
+ )
+ } else {
+ self.lower_expr_closure(
+ binder,
+ capture_clause,
+ e.id,
+ movability,
+ decl,
+ body,
+ fn_decl_span,
+ )
+ }
+ }
+ ExprKind::Block(ref blk, opt_label) => {
+ let opt_label = self.lower_label(opt_label);
+ hir::ExprKind::Block(self.lower_block(blk, opt_label.is_some()), opt_label)
+ }
+ ExprKind::Assign(ref el, ref er, span) => {
+ self.lower_expr_assign(el, er, span, e.span)
+ }
+ ExprKind::AssignOp(op, ref el, ref er) => hir::ExprKind::AssignOp(
+ self.lower_binop(op),
+ self.lower_expr(el),
+ self.lower_expr(er),
+ ),
+ ExprKind::Field(ref el, ident) => {
+ hir::ExprKind::Field(self.lower_expr(el), self.lower_ident(ident))
+ }
+ ExprKind::Index(ref el, ref er) => {
+ hir::ExprKind::Index(self.lower_expr(el), self.lower_expr(er))
+ }
+ ExprKind::Range(Some(ref e1), Some(ref e2), RangeLimits::Closed) => {
+ self.lower_expr_range_closed(e.span, e1, e2)
+ }
+ ExprKind::Range(ref e1, ref e2, lims) => {
+ self.lower_expr_range(e.span, e1.as_deref(), e2.as_deref(), lims)
+ }
+ ExprKind::Underscore => {
+ self.tcx
+ .sess.struct_span_err(
+ e.span,
+ "in expressions, `_` can only be used on the left-hand side of an assignment",
+ )
+ .span_label(e.span, "`_` not allowed here")
+ .emit();
+ hir::ExprKind::Err
+ }
+ ExprKind::Path(ref qself, ref path) => {
+ let qpath = self.lower_qpath(
+ e.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ hir::ExprKind::Path(qpath)
+ }
+ ExprKind::Break(opt_label, ref opt_expr) => {
+ let opt_expr = opt_expr.as_ref().map(|x| self.lower_expr(x));
+ hir::ExprKind::Break(self.lower_jump_destination(e.id, opt_label), opt_expr)
+ }
+ ExprKind::Continue(opt_label) => {
+ hir::ExprKind::Continue(self.lower_jump_destination(e.id, opt_label))
+ }
+ ExprKind::Ret(ref e) => {
+ let e = e.as_ref().map(|x| self.lower_expr(x));
+ hir::ExprKind::Ret(e)
+ }
+ ExprKind::Yeet(ref sub_expr) => self.lower_expr_yeet(e.span, sub_expr.as_deref()),
+ ExprKind::InlineAsm(ref asm) => {
+ hir::ExprKind::InlineAsm(self.lower_inline_asm(e.span, asm))
+ }
+ ExprKind::Struct(ref se) => {
+ let rest = match &se.rest {
+ StructRest::Base(e) => Some(self.lower_expr(e)),
+ StructRest::Rest(sp) => {
+ self.tcx
+ .sess
+ .struct_span_err(*sp, "base expression required after `..`")
+ .span_label(*sp, "add a base expression here")
+ .emit();
+ Some(&*self.arena.alloc(self.expr_err(*sp)))
+ }
+ StructRest::None => None,
+ };
+ hir::ExprKind::Struct(
+ self.arena.alloc(self.lower_qpath(
+ e.id,
+ &se.qself,
+ &se.path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ )),
+ self.arena
+ .alloc_from_iter(se.fields.iter().map(|x| self.lower_expr_field(x))),
+ rest,
+ )
+ }
+ ExprKind::Yield(ref opt_expr) => self.lower_expr_yield(e.span, opt_expr.as_deref()),
+ ExprKind::Err => hir::ExprKind::Err,
+ ExprKind::Try(ref sub_expr) => self.lower_expr_try(e.span, sub_expr),
+ ExprKind::Paren(ref ex) => {
+ let mut ex = self.lower_expr_mut(ex);
+ // Include parens in span, but only if it is a super-span.
+ if e.span.contains(ex.span) {
+ ex.span = self.lower_span(e.span);
+ }
+ // Merge attributes into the inner expression.
+ if !e.attrs.is_empty() {
+ let old_attrs =
+ self.attrs.get(&ex.hir_id.local_id).map(|la| *la).unwrap_or(&[]);
+ self.attrs.insert(
+ ex.hir_id.local_id,
+ &*self.arena.alloc_from_iter(
+ e.attrs
+ .iter()
+ .map(|a| self.lower_attr(a))
+ .chain(old_attrs.iter().cloned()),
+ ),
+ );
+ }
+ return ex;
+ }
+
+ // Desugar `ExprForLoop`
+ // from: `[opt_ident]: for <pat> in <head> <body>`
+ ExprKind::ForLoop(ref pat, ref head, ref body, opt_label) => {
+ return self.lower_expr_for(e, pat, head, body, opt_label);
+ }
+ ExprKind::MacCall(_) => panic!("{:?} shouldn't exist here", e.span),
+ };
+
+ let hir_id = self.lower_node_id(e.id);
+ self.lower_attrs(hir_id, &e.attrs);
+ hir::Expr { hir_id, kind, span: self.lower_span(e.span) }
+ })
+ }
+
+ fn lower_unop(&mut self, u: UnOp) -> hir::UnOp {
+ match u {
+ UnOp::Deref => hir::UnOp::Deref,
+ UnOp::Not => hir::UnOp::Not,
+ UnOp::Neg => hir::UnOp::Neg,
+ }
+ }
+
+ fn lower_binop(&mut self, b: BinOp) -> hir::BinOp {
+ Spanned {
+ node: match b.node {
+ BinOpKind::Add => hir::BinOpKind::Add,
+ BinOpKind::Sub => hir::BinOpKind::Sub,
+ BinOpKind::Mul => hir::BinOpKind::Mul,
+ BinOpKind::Div => hir::BinOpKind::Div,
+ BinOpKind::Rem => hir::BinOpKind::Rem,
+ BinOpKind::And => hir::BinOpKind::And,
+ BinOpKind::Or => hir::BinOpKind::Or,
+ BinOpKind::BitXor => hir::BinOpKind::BitXor,
+ BinOpKind::BitAnd => hir::BinOpKind::BitAnd,
+ BinOpKind::BitOr => hir::BinOpKind::BitOr,
+ BinOpKind::Shl => hir::BinOpKind::Shl,
+ BinOpKind::Shr => hir::BinOpKind::Shr,
+ BinOpKind::Eq => hir::BinOpKind::Eq,
+ BinOpKind::Lt => hir::BinOpKind::Lt,
+ BinOpKind::Le => hir::BinOpKind::Le,
+ BinOpKind::Ne => hir::BinOpKind::Ne,
+ BinOpKind::Ge => hir::BinOpKind::Ge,
+ BinOpKind::Gt => hir::BinOpKind::Gt,
+ },
+ span: self.lower_span(b.span),
+ }
+ }
+
+ fn lower_legacy_const_generics(
+ &mut self,
+ mut f: Expr,
+ args: Vec<AstP<Expr>>,
+ legacy_args_idx: &[usize],
+ ) -> hir::ExprKind<'hir> {
+ let ExprKind::Path(None, ref mut path) = f.kind else {
+ unreachable!();
+ };
+
+ // Split the arguments into const generics and normal arguments
+ let mut real_args = vec![];
+ let mut generic_args = vec![];
+ for (idx, arg) in args.into_iter().enumerate() {
+ if legacy_args_idx.contains(&idx) {
+ let parent_def_id = self.current_hir_id_owner;
+ let node_id = self.next_node_id();
+
+ // Add a definition for the in-band const def.
+ self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+
+ let anon_const = AnonConst { id: node_id, value: arg };
+ generic_args.push(AngleBracketedArg::Arg(GenericArg::Const(anon_const)));
+ } else {
+ real_args.push(arg);
+ }
+ }
+
+ // Add generic args to the last element of the path.
+ let last_segment = path.segments.last_mut().unwrap();
+ assert!(last_segment.args.is_none());
+ last_segment.args = Some(AstP(GenericArgs::AngleBracketed(AngleBracketedArgs {
+ span: DUMMY_SP,
+ args: generic_args,
+ })));
+
+ // Now lower everything as normal.
+ let f = self.lower_expr(&f);
+ hir::ExprKind::Call(f, self.lower_exprs(&real_args))
+ }
+
+ fn lower_expr_if(
+ &mut self,
+ cond: &Expr,
+ then: &Block,
+ else_opt: Option<&Expr>,
+ ) -> hir::ExprKind<'hir> {
+ let lowered_cond = self.lower_expr(cond);
+ let new_cond = self.manage_let_cond(lowered_cond);
+ let then_expr = self.lower_block_expr(then);
+ if let Some(rslt) = else_opt {
+ hir::ExprKind::If(new_cond, self.arena.alloc(then_expr), Some(self.lower_expr(rslt)))
+ } else {
+ hir::ExprKind::If(new_cond, self.arena.alloc(then_expr), None)
+ }
+ }
+
+ // If `cond` kind is `let`, returns `let`. Otherwise, wraps and returns `cond`
+ // in a temporary block.
+ fn manage_let_cond(&mut self, cond: &'hir hir::Expr<'hir>) -> &'hir hir::Expr<'hir> {
+ fn has_let_expr<'hir>(expr: &'hir hir::Expr<'hir>) -> bool {
+ match expr.kind {
+ hir::ExprKind::Binary(_, lhs, rhs) => has_let_expr(lhs) || has_let_expr(rhs),
+ hir::ExprKind::Let(..) => true,
+ _ => false,
+ }
+ }
+ if has_let_expr(cond) {
+ cond
+ } else {
+ let reason = DesugaringKind::CondTemporary;
+ let span_block = self.mark_span_with_reason(reason, cond.span, None);
+ self.expr_drop_temps(span_block, cond, AttrVec::new())
+ }
+ }
+
+ // We desugar: `'label: while $cond $body` into:
+ //
+ // ```
+ // 'label: loop {
+ // if { let _t = $cond; _t } {
+ // $body
+ // }
+ // else {
+ // break;
+ // }
+ // }
+ // ```
+ //
+ // Wrap in a construct equivalent to `{ let _t = $cond; _t }`
+ // to preserve drop semantics since `while $cond { ... }` does not
+ // let temporaries live outside of `cond`.
+ fn lower_expr_while_in_loop_scope(
+ &mut self,
+ span: Span,
+ cond: &Expr,
+ body: &Block,
+ opt_label: Option<Label>,
+ ) -> hir::ExprKind<'hir> {
+ let lowered_cond = self.with_loop_condition_scope(|t| t.lower_expr(cond));
+ let new_cond = self.manage_let_cond(lowered_cond);
+ let then = self.lower_block_expr(body);
+ let expr_break = self.expr_break(span, ThinVec::new());
+ let stmt_break = self.stmt_expr(span, expr_break);
+ let else_blk = self.block_all(span, arena_vec![self; stmt_break], None);
+ let else_expr = self.arena.alloc(self.expr_block(else_blk, ThinVec::new()));
+ let if_kind = hir::ExprKind::If(new_cond, self.arena.alloc(then), Some(else_expr));
+ let if_expr = self.expr(span, if_kind, ThinVec::new());
+ let block = self.block_expr(self.arena.alloc(if_expr));
+ let span = self.lower_span(span.with_hi(cond.span.hi()));
+ let opt_label = self.lower_label(opt_label);
+ hir::ExprKind::Loop(block, opt_label, hir::LoopSource::While, span)
+ }
+
+ /// Desugar `try { <stmts>; <expr> }` into `{ <stmts>; ::std::ops::Try::from_output(<expr>) }`,
+ /// `try { <stmts>; }` into `{ <stmts>; ::std::ops::Try::from_output(()) }`
+ /// and save the block id to use it as a break target for desugaring of the `?` operator.
+ fn lower_expr_try_block(&mut self, body: &Block) -> hir::ExprKind<'hir> {
+ self.with_catch_scope(body.id, |this| {
+ let mut block = this.lower_block_noalloc(body, true);
+
+ // Final expression of the block (if present) or `()` with span at the end of block
+ let (try_span, tail_expr) = if let Some(expr) = block.expr.take() {
+ (
+ this.mark_span_with_reason(
+ DesugaringKind::TryBlock,
+ expr.span,
+ this.allow_try_trait.clone(),
+ ),
+ expr,
+ )
+ } else {
+ let try_span = this.mark_span_with_reason(
+ DesugaringKind::TryBlock,
+ this.tcx.sess.source_map().end_point(body.span),
+ this.allow_try_trait.clone(),
+ );
+
+ (try_span, this.expr_unit(try_span))
+ };
+
+ let ok_wrapped_span =
+ this.mark_span_with_reason(DesugaringKind::TryBlock, tail_expr.span, None);
+
+ // `::std::ops::Try::from_output($tail_expr)`
+ block.expr = Some(this.wrap_in_try_constructor(
+ hir::LangItem::TryTraitFromOutput,
+ try_span,
+ tail_expr,
+ ok_wrapped_span,
+ ));
+
+ hir::ExprKind::Block(this.arena.alloc(block), None)
+ })
+ }
+
+ fn wrap_in_try_constructor(
+ &mut self,
+ lang_item: hir::LangItem,
+ method_span: Span,
+ expr: &'hir hir::Expr<'hir>,
+ overall_span: Span,
+ ) -> &'hir hir::Expr<'hir> {
+ let constructor = self.arena.alloc(self.expr_lang_item_path(
+ method_span,
+ lang_item,
+ ThinVec::new(),
+ None,
+ ));
+ self.expr_call(overall_span, constructor, std::slice::from_ref(expr))
+ }
+
+ fn lower_arm(&mut self, arm: &Arm) -> hir::Arm<'hir> {
+ let pat = self.lower_pat(&arm.pat);
+ let guard = arm.guard.as_ref().map(|cond| {
+ if let ExprKind::Let(ref pat, ref scrutinee, span) = cond.kind {
+ hir::Guard::IfLet(self.arena.alloc(hir::Let {
+ hir_id: self.next_id(),
+ span: self.lower_span(span),
+ pat: self.lower_pat(pat),
+ ty: None,
+ init: self.lower_expr(scrutinee),
+ }))
+ } else {
+ hir::Guard::If(self.lower_expr(cond))
+ }
+ });
+ let hir_id = self.next_id();
+ self.lower_attrs(hir_id, &arm.attrs);
+ hir::Arm {
+ hir_id,
+ pat,
+ guard,
+ body: self.lower_expr(&arm.body),
+ span: self.lower_span(arm.span),
+ }
+ }
+
+ /// Lower an `async` construct to a generator that is then wrapped so it implements `Future`.
+ ///
+ /// This results in:
+ ///
+ /// ```text
+ /// std::future::from_generator(static move? |_task_context| -> <ret_ty> {
+ /// <body>
+ /// })
+ /// ```
+ pub(super) fn make_async_expr(
+ &mut self,
+ capture_clause: CaptureBy,
+ closure_node_id: NodeId,
+ ret_ty: Option<AstP<Ty>>,
+ span: Span,
+ async_gen_kind: hir::AsyncGeneratorKind,
+ body: impl FnOnce(&mut Self) -> hir::Expr<'hir>,
+ ) -> hir::ExprKind<'hir> {
+ let output = match ret_ty {
+ Some(ty) => hir::FnRetTy::Return(
+ self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::AsyncBlock)),
+ ),
+ None => hir::FnRetTy::DefaultReturn(self.lower_span(span)),
+ };
+
+ // Resume argument type. We let the compiler infer this to simplify the lowering. It is
+ // fully constrained by `future::from_generator`.
+ let input_ty = hir::Ty {
+ hir_id: self.next_id(),
+ kind: hir::TyKind::Infer,
+ span: self.lower_span(span),
+ };
+
+ // The closure/generator `FnDecl` takes a single (resume) argument of type `input_ty`.
+ let fn_decl = self.arena.alloc(hir::FnDecl {
+ inputs: arena_vec![self; input_ty],
+ output,
+ c_variadic: false,
+ implicit_self: hir::ImplicitSelfKind::None,
+ });
+
+ // Lower the argument pattern/ident. The ident is used again in the `.await` lowering.
+ let (pat, task_context_hid) = self.pat_ident_binding_mode(
+ span,
+ Ident::with_dummy_span(sym::_task_context),
+ hir::BindingAnnotation::Mutable,
+ );
+ let param = hir::Param {
+ hir_id: self.next_id(),
+ pat,
+ ty_span: self.lower_span(span),
+ span: self.lower_span(span),
+ };
+ let params = arena_vec![self; param];
+
+ let body = self.lower_body(move |this| {
+ this.generator_kind = Some(hir::GeneratorKind::Async(async_gen_kind));
+
+ let old_ctx = this.task_context;
+ this.task_context = Some(task_context_hid);
+ let res = body(this);
+ this.task_context = old_ctx;
+ (params, res)
+ });
+
+ // `static |_task_context| -> <ret_ty> { body }`:
+ let generator_kind = {
+ let c = self.arena.alloc(hir::Closure {
+ binder: hir::ClosureBinder::Default,
+ capture_clause,
+ bound_generic_params: &[],
+ fn_decl,
+ body,
+ fn_decl_span: self.lower_span(span),
+ movability: Some(hir::Movability::Static),
+ });
+
+ hir::ExprKind::Closure(c)
+ };
+ let generator = hir::Expr {
+ hir_id: self.lower_node_id(closure_node_id),
+ kind: generator_kind,
+ span: self.lower_span(span),
+ };
+
+ // `future::from_generator`:
+ let unstable_span =
+ self.mark_span_with_reason(DesugaringKind::Async, span, self.allow_gen_future.clone());
+ let gen_future = self.expr_lang_item_path(
+ unstable_span,
+ hir::LangItem::FromGenerator,
+ ThinVec::new(),
+ None,
+ );
+
+ // `future::from_generator(generator)`:
+ hir::ExprKind::Call(self.arena.alloc(gen_future), arena_vec![self; generator])
+ }
+
+ /// Desugar `<expr>.await` into:
+ /// ```ignore (pseudo-rust)
+ /// match ::std::future::IntoFuture::into_future(<expr>) {
+ /// mut __awaitee => loop {
+ /// match unsafe { ::std::future::Future::poll(
+ /// <::std::pin::Pin>::new_unchecked(&mut __awaitee),
+ /// ::std::future::get_context(task_context),
+ /// ) } {
+ /// ::std::task::Poll::Ready(result) => break result,
+ /// ::std::task::Poll::Pending => {}
+ /// }
+ /// task_context = yield ();
+ /// }
+ /// }
+ /// ```
+ fn lower_expr_await(&mut self, dot_await_span: Span, expr: &Expr) -> hir::ExprKind<'hir> {
+ let full_span = expr.span.to(dot_await_span);
+ match self.generator_kind {
+ Some(hir::GeneratorKind::Async(_)) => {}
+ Some(hir::GeneratorKind::Gen) | None => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ dot_await_span,
+ E0728,
+ "`await` is only allowed inside `async` functions and blocks"
+ );
+ err.span_label(dot_await_span, "only allowed inside `async` functions and blocks");
+ if let Some(item_sp) = self.current_item {
+ err.span_label(item_sp, "this is not `async`");
+ }
+ err.emit();
+ }
+ }
+ let span = self.mark_span_with_reason(DesugaringKind::Await, dot_await_span, None);
+ let gen_future_span = self.mark_span_with_reason(
+ DesugaringKind::Await,
+ full_span,
+ self.allow_gen_future.clone(),
+ );
+ let expr = self.lower_expr_mut(expr);
+ let expr_hir_id = expr.hir_id;
+
+ // Note that the name of this binding must not be changed to something else because
+ // debuggers and debugger extensions expect it to be called `__awaitee`. They use
+ // this name to identify what is being awaited by a suspended async functions.
+ let awaitee_ident = Ident::with_dummy_span(sym::__awaitee);
+ let (awaitee_pat, awaitee_pat_hid) =
+ self.pat_ident_binding_mode(span, awaitee_ident, hir::BindingAnnotation::Mutable);
+
+ let task_context_ident = Ident::with_dummy_span(sym::_task_context);
+
+ // unsafe {
+ // ::std::future::Future::poll(
+ // ::std::pin::Pin::new_unchecked(&mut __awaitee),
+ // ::std::future::get_context(task_context),
+ // )
+ // }
+ let poll_expr = {
+ let awaitee = self.expr_ident(span, awaitee_ident, awaitee_pat_hid);
+ let ref_mut_awaitee = self.expr_mut_addr_of(span, awaitee);
+ let task_context = if let Some(task_context_hid) = self.task_context {
+ self.expr_ident_mut(span, task_context_ident, task_context_hid)
+ } else {
+ // Use of `await` outside of an async context, we cannot use `task_context` here.
+ self.expr_err(span)
+ };
+ let new_unchecked = self.expr_call_lang_item_fn_mut(
+ span,
+ hir::LangItem::PinNewUnchecked,
+ arena_vec![self; ref_mut_awaitee],
+ Some(expr_hir_id),
+ );
+ let get_context = self.expr_call_lang_item_fn_mut(
+ gen_future_span,
+ hir::LangItem::GetContext,
+ arena_vec![self; task_context],
+ Some(expr_hir_id),
+ );
+ let call = self.expr_call_lang_item_fn(
+ span,
+ hir::LangItem::FuturePoll,
+ arena_vec![self; new_unchecked, get_context],
+ Some(expr_hir_id),
+ );
+ self.arena.alloc(self.expr_unsafe(call))
+ };
+
+ // `::std::task::Poll::Ready(result) => break result`
+ let loop_node_id = self.next_node_id();
+ let loop_hir_id = self.lower_node_id(loop_node_id);
+ let ready_arm = {
+ let x_ident = Ident::with_dummy_span(sym::result);
+ let (x_pat, x_pat_hid) = self.pat_ident(gen_future_span, x_ident);
+ let x_expr = self.expr_ident(gen_future_span, x_ident, x_pat_hid);
+ let ready_field = self.single_pat_field(gen_future_span, x_pat);
+ let ready_pat = self.pat_lang_item_variant(
+ span,
+ hir::LangItem::PollReady,
+ ready_field,
+ Some(expr_hir_id),
+ );
+ let break_x = self.with_loop_scope(loop_node_id, move |this| {
+ let expr_break =
+ hir::ExprKind::Break(this.lower_loop_destination(None), Some(x_expr));
+ this.arena.alloc(this.expr(gen_future_span, expr_break, ThinVec::new()))
+ });
+ self.arm(ready_pat, break_x)
+ };
+
+ // `::std::task::Poll::Pending => {}`
+ let pending_arm = {
+ let pending_pat = self.pat_lang_item_variant(
+ span,
+ hir::LangItem::PollPending,
+ &[],
+ Some(expr_hir_id),
+ );
+ let empty_block = self.expr_block_empty(span);
+ self.arm(pending_pat, empty_block)
+ };
+
+ let inner_match_stmt = {
+ let match_expr = self.expr_match(
+ span,
+ poll_expr,
+ arena_vec![self; ready_arm, pending_arm],
+ hir::MatchSource::AwaitDesugar,
+ );
+ self.stmt_expr(span, match_expr)
+ };
+
+ // task_context = yield ();
+ let yield_stmt = {
+ let unit = self.expr_unit(span);
+ let yield_expr = self.expr(
+ span,
+ hir::ExprKind::Yield(unit, hir::YieldSource::Await { expr: Some(expr_hir_id) }),
+ ThinVec::new(),
+ );
+ let yield_expr = self.arena.alloc(yield_expr);
+
+ if let Some(task_context_hid) = self.task_context {
+ let lhs = self.expr_ident(span, task_context_ident, task_context_hid);
+ let assign = self.expr(
+ span,
+ hir::ExprKind::Assign(lhs, yield_expr, self.lower_span(span)),
+ AttrVec::new(),
+ );
+ self.stmt_expr(span, assign)
+ } else {
+ // Use of `await` outside of an async context. Return `yield_expr` so that we can
+ // proceed with type checking.
+ self.stmt(span, hir::StmtKind::Semi(yield_expr))
+ }
+ };
+
+ let loop_block = self.block_all(span, arena_vec![self; inner_match_stmt, yield_stmt], None);
+
+ // loop { .. }
+ let loop_expr = self.arena.alloc(hir::Expr {
+ hir_id: loop_hir_id,
+ kind: hir::ExprKind::Loop(
+ loop_block,
+ None,
+ hir::LoopSource::Loop,
+ self.lower_span(span),
+ ),
+ span: self.lower_span(span),
+ });
+
+ // mut __awaitee => loop { ... }
+ let awaitee_arm = self.arm(awaitee_pat, loop_expr);
+
+ // `match ::std::future::IntoFuture::into_future(<expr>) { ... }`
+ let into_future_span = self.mark_span_with_reason(
+ DesugaringKind::Await,
+ dot_await_span,
+ self.allow_into_future.clone(),
+ );
+ let into_future_expr = self.expr_call_lang_item_fn(
+ into_future_span,
+ hir::LangItem::IntoFutureIntoFuture,
+ arena_vec![self; expr],
+ Some(expr_hir_id),
+ );
+
+ // match <into_future_expr> {
+ // mut __awaitee => loop { .. }
+ // }
+ hir::ExprKind::Match(
+ into_future_expr,
+ arena_vec![self; awaitee_arm],
+ hir::MatchSource::AwaitDesugar,
+ )
+ }
+
+ fn lower_expr_closure(
+ &mut self,
+ binder: &ClosureBinder,
+ capture_clause: CaptureBy,
+ closure_id: NodeId,
+ movability: Movability,
+ decl: &FnDecl,
+ body: &Expr,
+ fn_decl_span: Span,
+ ) -> hir::ExprKind<'hir> {
+ let (binder_clause, generic_params) = self.lower_closure_binder(binder);
+
+ let (body_id, generator_option) = self.with_new_scopes(move |this| {
+ let prev = this.current_item;
+ this.current_item = Some(fn_decl_span);
+ let mut generator_kind = None;
+ let body_id = this.lower_fn_body(decl, |this| {
+ let e = this.lower_expr_mut(body);
+ generator_kind = this.generator_kind;
+ e
+ });
+ let generator_option =
+ this.generator_movability_for_fn(&decl, fn_decl_span, generator_kind, movability);
+ this.current_item = prev;
+ (body_id, generator_option)
+ });
+
+ let bound_generic_params = self.lower_lifetime_binder(closure_id, generic_params);
+ // Lower outside new scope to preserve `is_in_loop_condition`.
+ let fn_decl = self.lower_fn_decl(decl, None, FnDeclKind::Closure, None);
+
+ let c = self.arena.alloc(hir::Closure {
+ binder: binder_clause,
+ capture_clause,
+ bound_generic_params,
+ fn_decl,
+ body: body_id,
+ fn_decl_span: self.lower_span(fn_decl_span),
+ movability: generator_option,
+ });
+
+ hir::ExprKind::Closure(c)
+ }
+
+ fn generator_movability_for_fn(
+ &mut self,
+ decl: &FnDecl,
+ fn_decl_span: Span,
+ generator_kind: Option<hir::GeneratorKind>,
+ movability: Movability,
+ ) -> Option<hir::Movability> {
+ match generator_kind {
+ Some(hir::GeneratorKind::Gen) => {
+ if decl.inputs.len() > 1 {
+ struct_span_err!(
+ self.tcx.sess,
+ fn_decl_span,
+ E0628,
+ "too many parameters for a generator (expected 0 or 1 parameters)"
+ )
+ .emit();
+ }
+ Some(movability)
+ }
+ Some(hir::GeneratorKind::Async(_)) => {
+ panic!("non-`async` closure body turned `async` during lowering");
+ }
+ None => {
+ if movability == Movability::Static {
+ struct_span_err!(
+ self.tcx.sess,
+ fn_decl_span,
+ E0697,
+ "closures cannot be static"
+ )
+ .emit();
+ }
+ None
+ }
+ }
+ }
+
+ fn lower_closure_binder<'c>(
+ &mut self,
+ binder: &'c ClosureBinder,
+ ) -> (hir::ClosureBinder, &'c [GenericParam]) {
+ let (binder, params) = match binder {
+ ClosureBinder::NotPresent => (hir::ClosureBinder::Default, &[][..]),
+ &ClosureBinder::For { span, ref generic_params } => {
+ let span = self.lower_span(span);
+ (hir::ClosureBinder::For { span }, &**generic_params)
+ }
+ };
+
+ (binder, params)
+ }
+
+ fn lower_expr_async_closure(
+ &mut self,
+ binder: &ClosureBinder,
+ capture_clause: CaptureBy,
+ closure_id: NodeId,
+ inner_closure_id: NodeId,
+ decl: &FnDecl,
+ body: &Expr,
+ fn_decl_span: Span,
+ ) -> hir::ExprKind<'hir> {
+ if let &ClosureBinder::For { span, .. } = binder {
+ self.tcx.sess.span_err(
+ span,
+ "`for<...>` binders on `async` closures are not currently supported",
+ );
+ }
+
+ let (binder_clause, generic_params) = self.lower_closure_binder(binder);
+
+ let outer_decl =
+ FnDecl { inputs: decl.inputs.clone(), output: FnRetTy::Default(fn_decl_span) };
+
+ let body = self.with_new_scopes(|this| {
+ // FIXME(cramertj): allow `async` non-`move` closures with arguments.
+ if capture_clause == CaptureBy::Ref && !decl.inputs.is_empty() {
+ struct_span_err!(
+ this.tcx.sess,
+ fn_decl_span,
+ E0708,
+ "`async` non-`move` closures with parameters are not currently supported",
+ )
+ .help(
+ "consider using `let` statements to manually capture \
+ variables by reference before entering an `async move` closure",
+ )
+ .emit();
+ }
+
+ // Transform `async |x: u8| -> X { ... }` into
+ // `|x: u8| future_from_generator(|| -> X { ... })`.
+ let body_id = this.lower_fn_body(&outer_decl, |this| {
+ let async_ret_ty =
+ if let FnRetTy::Ty(ty) = &decl.output { Some(ty.clone()) } else { None };
+ let async_body = this.make_async_expr(
+ capture_clause,
+ inner_closure_id,
+ async_ret_ty,
+ body.span,
+ hir::AsyncGeneratorKind::Closure,
+ |this| this.with_new_scopes(|this| this.lower_expr_mut(body)),
+ );
+ this.expr(fn_decl_span, async_body, ThinVec::new())
+ });
+ body_id
+ });
+
+ let bound_generic_params = self.lower_lifetime_binder(closure_id, generic_params);
+
+ // We need to lower the declaration outside the new scope, because we
+ // have to conserve the state of being inside a loop condition for the
+ // closure argument types.
+ let fn_decl = self.lower_fn_decl(&outer_decl, None, FnDeclKind::Closure, None);
+
+ let c = self.arena.alloc(hir::Closure {
+ binder: binder_clause,
+ capture_clause,
+ bound_generic_params,
+ fn_decl,
+ body,
+ fn_decl_span: self.lower_span(fn_decl_span),
+ movability: None,
+ });
+ hir::ExprKind::Closure(c)
+ }
+
+ /// Destructure the LHS of complex assignments.
+ /// For instance, lower `(a, b) = t` to `{ let (lhs1, lhs2) = t; a = lhs1; b = lhs2; }`.
+ fn lower_expr_assign(
+ &mut self,
+ lhs: &Expr,
+ rhs: &Expr,
+ eq_sign_span: Span,
+ whole_span: Span,
+ ) -> hir::ExprKind<'hir> {
+ // Return early in case of an ordinary assignment.
+ fn is_ordinary(lower_ctx: &mut LoweringContext<'_, '_>, lhs: &Expr) -> bool {
+ match &lhs.kind {
+ ExprKind::Array(..)
+ | ExprKind::Struct(..)
+ | ExprKind::Tup(..)
+ | ExprKind::Underscore => false,
+ // Check for tuple struct constructor.
+ ExprKind::Call(callee, ..) => lower_ctx.extract_tuple_struct_path(callee).is_none(),
+ ExprKind::Paren(e) => {
+ match e.kind {
+ // We special-case `(..)` for consistency with patterns.
+ ExprKind::Range(None, None, RangeLimits::HalfOpen) => false,
+ _ => is_ordinary(lower_ctx, e),
+ }
+ }
+ _ => true,
+ }
+ }
+ if is_ordinary(self, lhs) {
+ return hir::ExprKind::Assign(
+ self.lower_expr(lhs),
+ self.lower_expr(rhs),
+ self.lower_span(eq_sign_span),
+ );
+ }
+
+ let mut assignments = vec![];
+
+ // The LHS becomes a pattern: `(lhs1, lhs2)`.
+ let pat = self.destructure_assign(lhs, eq_sign_span, &mut assignments);
+ let rhs = self.lower_expr(rhs);
+
+ // Introduce a `let` for destructuring: `let (lhs1, lhs2) = t`.
+ let destructure_let = self.stmt_let_pat(
+ None,
+ whole_span,
+ Some(rhs),
+ pat,
+ hir::LocalSource::AssignDesugar(self.lower_span(eq_sign_span)),
+ );
+
+ // `a = lhs1; b = lhs2;`.
+ let stmts = self
+ .arena
+ .alloc_from_iter(std::iter::once(destructure_let).chain(assignments.into_iter()));
+
+ // Wrap everything in a block.
+ hir::ExprKind::Block(&self.block_all(whole_span, stmts, None), None)
+ }
+
+ /// If the given expression is a path to a tuple struct, returns that path.
+ /// It is not a complete check, but just tries to reject most paths early
+ /// if they are not tuple structs.
+ /// Type checking will take care of the full validation later.
+ fn extract_tuple_struct_path<'a>(
+ &mut self,
+ expr: &'a Expr,
+ ) -> Option<(&'a Option<QSelf>, &'a Path)> {
+ if let ExprKind::Path(qself, path) = &expr.kind {
+ // Does the path resolve to something disallowed in a tuple struct/variant pattern?
+ if let Some(partial_res) = self.resolver.get_partial_res(expr.id) {
+ if partial_res.unresolved_segments() == 0
+ && !partial_res.base_res().expected_in_tuple_struct_pat()
+ {
+ return None;
+ }
+ }
+ return Some((qself, path));
+ }
+ None
+ }
+
+ /// If the given expression is a path to a unit struct, returns that path.
+ /// It is not a complete check, but just tries to reject most paths early
+ /// if they are not unit structs.
+ /// Type checking will take care of the full validation later.
+ fn extract_unit_struct_path<'a>(
+ &mut self,
+ expr: &'a Expr,
+ ) -> Option<(&'a Option<QSelf>, &'a Path)> {
+ if let ExprKind::Path(qself, path) = &expr.kind {
+ // Does the path resolve to something disallowed in a unit struct/variant pattern?
+ if let Some(partial_res) = self.resolver.get_partial_res(expr.id) {
+ if partial_res.unresolved_segments() == 0
+ && !partial_res.base_res().expected_in_unit_struct_pat()
+ {
+ return None;
+ }
+ }
+ return Some((qself, path));
+ }
+ None
+ }
+
+ /// Convert the LHS of a destructuring assignment to a pattern.
+ /// Each sub-assignment is recorded in `assignments`.
+ fn destructure_assign(
+ &mut self,
+ lhs: &Expr,
+ eq_sign_span: Span,
+ assignments: &mut Vec<hir::Stmt<'hir>>,
+ ) -> &'hir hir::Pat<'hir> {
+ self.arena.alloc(self.destructure_assign_mut(lhs, eq_sign_span, assignments))
+ }
+
+ fn destructure_assign_mut(
+ &mut self,
+ lhs: &Expr,
+ eq_sign_span: Span,
+ assignments: &mut Vec<hir::Stmt<'hir>>,
+ ) -> hir::Pat<'hir> {
+ match &lhs.kind {
+ // Underscore pattern.
+ ExprKind::Underscore => {
+ return self.pat_without_dbm(lhs.span, hir::PatKind::Wild);
+ }
+ // Slice patterns.
+ ExprKind::Array(elements) => {
+ let (pats, rest) =
+ self.destructure_sequence(elements, "slice", eq_sign_span, assignments);
+ let slice_pat = if let Some((i, span)) = rest {
+ let (before, after) = pats.split_at(i);
+ hir::PatKind::Slice(
+ before,
+ Some(self.arena.alloc(self.pat_without_dbm(span, hir::PatKind::Wild))),
+ after,
+ )
+ } else {
+ hir::PatKind::Slice(pats, None, &[])
+ };
+ return self.pat_without_dbm(lhs.span, slice_pat);
+ }
+ // Tuple structs.
+ ExprKind::Call(callee, args) => {
+ if let Some((qself, path)) = self.extract_tuple_struct_path(callee) {
+ let (pats, rest) = self.destructure_sequence(
+ args,
+ "tuple struct or variant",
+ eq_sign_span,
+ assignments,
+ );
+ let qpath = self.lower_qpath(
+ callee.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ // Destructure like a tuple struct.
+ let tuple_struct_pat =
+ hir::PatKind::TupleStruct(qpath, pats, rest.map(|r| r.0));
+ return self.pat_without_dbm(lhs.span, tuple_struct_pat);
+ }
+ }
+ // Unit structs and enum variants.
+ ExprKind::Path(..) => {
+ if let Some((qself, path)) = self.extract_unit_struct_path(lhs) {
+ let qpath = self.lower_qpath(
+ lhs.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ // Destructure like a unit struct.
+ let unit_struct_pat = hir::PatKind::Path(qpath);
+ return self.pat_without_dbm(lhs.span, unit_struct_pat);
+ }
+ }
+ // Structs.
+ ExprKind::Struct(se) => {
+ let field_pats = self.arena.alloc_from_iter(se.fields.iter().map(|f| {
+ let pat = self.destructure_assign(&f.expr, eq_sign_span, assignments);
+ hir::PatField {
+ hir_id: self.next_id(),
+ ident: self.lower_ident(f.ident),
+ pat,
+ is_shorthand: f.is_shorthand,
+ span: self.lower_span(f.span),
+ }
+ }));
+ let qpath = self.lower_qpath(
+ lhs.id,
+ &se.qself,
+ &se.path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ let fields_omitted = match &se.rest {
+ StructRest::Base(e) => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ e.span,
+ "functional record updates are not allowed in destructuring \
+ assignments",
+ )
+ .span_suggestion(
+ e.span,
+ "consider removing the trailing pattern",
+ "",
+ rustc_errors::Applicability::MachineApplicable,
+ )
+ .emit();
+ true
+ }
+ StructRest::Rest(_) => true,
+ StructRest::None => false,
+ };
+ let struct_pat = hir::PatKind::Struct(qpath, field_pats, fields_omitted);
+ return self.pat_without_dbm(lhs.span, struct_pat);
+ }
+ // Tuples.
+ ExprKind::Tup(elements) => {
+ let (pats, rest) =
+ self.destructure_sequence(elements, "tuple", eq_sign_span, assignments);
+ let tuple_pat = hir::PatKind::Tuple(pats, rest.map(|r| r.0));
+ return self.pat_without_dbm(lhs.span, tuple_pat);
+ }
+ ExprKind::Paren(e) => {
+ // We special-case `(..)` for consistency with patterns.
+ if let ExprKind::Range(None, None, RangeLimits::HalfOpen) = e.kind {
+ let tuple_pat = hir::PatKind::Tuple(&[], Some(0));
+ return self.pat_without_dbm(lhs.span, tuple_pat);
+ } else {
+ return self.destructure_assign_mut(e, eq_sign_span, assignments);
+ }
+ }
+ _ => {}
+ }
+ // Treat all other cases as normal lvalue.
+ let ident = Ident::new(sym::lhs, self.lower_span(lhs.span));
+ let (pat, binding) = self.pat_ident_mut(lhs.span, ident);
+ let ident = self.expr_ident(lhs.span, ident, binding);
+ let assign =
+ hir::ExprKind::Assign(self.lower_expr(lhs), ident, self.lower_span(eq_sign_span));
+ let expr = self.expr(lhs.span, assign, ThinVec::new());
+ assignments.push(self.stmt_expr(lhs.span, expr));
+ pat
+ }
+
+ /// Destructure a sequence of expressions occurring on the LHS of an assignment.
+ /// Such a sequence occurs in a tuple (struct)/slice.
+ /// Return a sequence of corresponding patterns, and the index and the span of `..` if it
+ /// exists.
+ /// Each sub-assignment is recorded in `assignments`.
+ fn destructure_sequence(
+ &mut self,
+ elements: &[AstP<Expr>],
+ ctx: &str,
+ eq_sign_span: Span,
+ assignments: &mut Vec<hir::Stmt<'hir>>,
+ ) -> (&'hir [hir::Pat<'hir>], Option<(usize, Span)>) {
+ let mut rest = None;
+ let elements =
+ self.arena.alloc_from_iter(elements.iter().enumerate().filter_map(|(i, e)| {
+ // Check for `..` pattern.
+ if let ExprKind::Range(None, None, RangeLimits::HalfOpen) = e.kind {
+ if let Some((_, prev_span)) = rest {
+ self.ban_extra_rest_pat(e.span, prev_span, ctx);
+ } else {
+ rest = Some((i, e.span));
+ }
+ None
+ } else {
+ Some(self.destructure_assign_mut(e, eq_sign_span, assignments))
+ }
+ }));
+ (elements, rest)
+ }
+
+ /// Desugar `<start>..=<end>` into `std::ops::RangeInclusive::new(<start>, <end>)`.
+ fn lower_expr_range_closed(&mut self, span: Span, e1: &Expr, e2: &Expr) -> hir::ExprKind<'hir> {
+ let e1 = self.lower_expr_mut(e1);
+ let e2 = self.lower_expr_mut(e2);
+ let fn_path =
+ hir::QPath::LangItem(hir::LangItem::RangeInclusiveNew, self.lower_span(span), None);
+ let fn_expr =
+ self.arena.alloc(self.expr(span, hir::ExprKind::Path(fn_path), ThinVec::new()));
+ hir::ExprKind::Call(fn_expr, arena_vec![self; e1, e2])
+ }
+
+ fn lower_expr_range(
+ &mut self,
+ span: Span,
+ e1: Option<&Expr>,
+ e2: Option<&Expr>,
+ lims: RangeLimits,
+ ) -> hir::ExprKind<'hir> {
+ use rustc_ast::RangeLimits::*;
+
+ let lang_item = match (e1, e2, lims) {
+ (None, None, HalfOpen) => hir::LangItem::RangeFull,
+ (Some(..), None, HalfOpen) => hir::LangItem::RangeFrom,
+ (None, Some(..), HalfOpen) => hir::LangItem::RangeTo,
+ (Some(..), Some(..), HalfOpen) => hir::LangItem::Range,
+ (None, Some(..), Closed) => hir::LangItem::RangeToInclusive,
+ (Some(..), Some(..), Closed) => unreachable!(),
+ (_, None, Closed) => self.diagnostic().span_fatal(span, "inclusive range with no end"),
+ };
+
+ let fields = self.arena.alloc_from_iter(
+ e1.iter().map(|e| (sym::start, e)).chain(e2.iter().map(|e| (sym::end, e))).map(
+ |(s, e)| {
+ let expr = self.lower_expr(&e);
+ let ident = Ident::new(s, self.lower_span(e.span));
+ self.expr_field(ident, expr, e.span)
+ },
+ ),
+ );
+
+ hir::ExprKind::Struct(
+ self.arena.alloc(hir::QPath::LangItem(lang_item, self.lower_span(span), None)),
+ fields,
+ None,
+ )
+ }
+
+ fn lower_label(&self, opt_label: Option<Label>) -> Option<Label> {
+ let label = opt_label?;
+ Some(Label { ident: self.lower_ident(label.ident) })
+ }
+
+ fn lower_loop_destination(&mut self, destination: Option<(NodeId, Label)>) -> hir::Destination {
+ let target_id = match destination {
+ Some((id, _)) => {
+ if let Some(loop_id) = self.resolver.get_label_res(id) {
+ Ok(self.lower_node_id(loop_id))
+ } else {
+ Err(hir::LoopIdError::UnresolvedLabel)
+ }
+ }
+ None => self
+ .loop_scope
+ .map(|id| Ok(self.lower_node_id(id)))
+ .unwrap_or(Err(hir::LoopIdError::OutsideLoopScope)),
+ };
+ let label = self.lower_label(destination.map(|(_, label)| label));
+ hir::Destination { label, target_id }
+ }
+
+ fn lower_jump_destination(&mut self, id: NodeId, opt_label: Option<Label>) -> hir::Destination {
+ if self.is_in_loop_condition && opt_label.is_none() {
+ hir::Destination {
+ label: None,
+ target_id: Err(hir::LoopIdError::UnlabeledCfInWhileCondition),
+ }
+ } else {
+ self.lower_loop_destination(opt_label.map(|label| (id, label)))
+ }
+ }
+
+ fn with_catch_scope<T>(&mut self, catch_id: NodeId, f: impl FnOnce(&mut Self) -> T) -> T {
+ let old_scope = self.catch_scope.replace(catch_id);
+ let result = f(self);
+ self.catch_scope = old_scope;
+ result
+ }
+
+ fn with_loop_scope<T>(&mut self, loop_id: NodeId, f: impl FnOnce(&mut Self) -> T) -> T {
+ // We're no longer in the base loop's condition; we're in another loop.
+ let was_in_loop_condition = self.is_in_loop_condition;
+ self.is_in_loop_condition = false;
+
+ let old_scope = self.loop_scope.replace(loop_id);
+ let result = f(self);
+ self.loop_scope = old_scope;
+
+ self.is_in_loop_condition = was_in_loop_condition;
+
+ result
+ }
+
+ fn with_loop_condition_scope<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
+ let was_in_loop_condition = self.is_in_loop_condition;
+ self.is_in_loop_condition = true;
+
+ let result = f(self);
+
+ self.is_in_loop_condition = was_in_loop_condition;
+
+ result
+ }
+
+ fn lower_expr_field(&mut self, f: &ExprField) -> hir::ExprField<'hir> {
+ hir::ExprField {
+ hir_id: self.next_id(),
+ ident: self.lower_ident(f.ident),
+ expr: self.lower_expr(&f.expr),
+ span: self.lower_span(f.span),
+ is_shorthand: f.is_shorthand,
+ }
+ }
+
+ fn lower_expr_yield(&mut self, span: Span, opt_expr: Option<&Expr>) -> hir::ExprKind<'hir> {
+ match self.generator_kind {
+ Some(hir::GeneratorKind::Gen) => {}
+ Some(hir::GeneratorKind::Async(_)) => {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0727,
+ "`async` generators are not yet supported"
+ )
+ .emit();
+ }
+ None => self.generator_kind = Some(hir::GeneratorKind::Gen),
+ }
+
+ let expr =
+ opt_expr.as_ref().map(|x| self.lower_expr(x)).unwrap_or_else(|| self.expr_unit(span));
+
+ hir::ExprKind::Yield(expr, hir::YieldSource::Yield)
+ }
+
+ /// Desugar `ExprForLoop` from: `[opt_ident]: for <pat> in <head> <body>` into:
+ /// ```ignore (pseudo-rust)
+ /// {
+ /// let result = match IntoIterator::into_iter(<head>) {
+ /// mut iter => {
+ /// [opt_ident]: loop {
+ /// match Iterator::next(&mut iter) {
+ /// None => break,
+ /// Some(<pat>) => <body>,
+ /// };
+ /// }
+ /// }
+ /// };
+ /// result
+ /// }
+ /// ```
+ fn lower_expr_for(
+ &mut self,
+ e: &Expr,
+ pat: &Pat,
+ head: &Expr,
+ body: &Block,
+ opt_label: Option<Label>,
+ ) -> hir::Expr<'hir> {
+ let head = self.lower_expr_mut(head);
+ let pat = self.lower_pat(pat);
+ let for_span =
+ self.mark_span_with_reason(DesugaringKind::ForLoop, self.lower_span(e.span), None);
+ let head_span = self.mark_span_with_reason(DesugaringKind::ForLoop, head.span, None);
+ let pat_span = self.mark_span_with_reason(DesugaringKind::ForLoop, pat.span, None);
+
+ // `None => break`
+ let none_arm = {
+ let break_expr =
+ self.with_loop_scope(e.id, |this| this.expr_break_alloc(for_span, ThinVec::new()));
+ let pat = self.pat_none(for_span);
+ self.arm(pat, break_expr)
+ };
+
+ // Some(<pat>) => <body>,
+ let some_arm = {
+ let some_pat = self.pat_some(pat_span, pat);
+ let body_block = self.with_loop_scope(e.id, |this| this.lower_block(body, false));
+ let body_expr = self.arena.alloc(self.expr_block(body_block, ThinVec::new()));
+ self.arm(some_pat, body_expr)
+ };
+
+ // `mut iter`
+ let iter = Ident::with_dummy_span(sym::iter);
+ let (iter_pat, iter_pat_nid) =
+ self.pat_ident_binding_mode(head_span, iter, hir::BindingAnnotation::Mutable);
+
+ // `match Iterator::next(&mut iter) { ... }`
+ let match_expr = {
+ let iter = self.expr_ident(head_span, iter, iter_pat_nid);
+ let ref_mut_iter = self.expr_mut_addr_of(head_span, iter);
+ let next_expr = self.expr_call_lang_item_fn(
+ head_span,
+ hir::LangItem::IteratorNext,
+ arena_vec![self; ref_mut_iter],
+ None,
+ );
+ let arms = arena_vec![self; none_arm, some_arm];
+
+ self.expr_match(head_span, next_expr, arms, hir::MatchSource::ForLoopDesugar)
+ };
+ let match_stmt = self.stmt_expr(for_span, match_expr);
+
+ let loop_block = self.block_all(for_span, arena_vec![self; match_stmt], None);
+
+ // `[opt_ident]: loop { ... }`
+ let kind = hir::ExprKind::Loop(
+ loop_block,
+ self.lower_label(opt_label),
+ hir::LoopSource::ForLoop,
+ self.lower_span(for_span.with_hi(head.span.hi())),
+ );
+ let loop_expr =
+ self.arena.alloc(hir::Expr { hir_id: self.lower_node_id(e.id), kind, span: for_span });
+
+ // `mut iter => { ... }`
+ let iter_arm = self.arm(iter_pat, loop_expr);
+
+ // `match ::std::iter::IntoIterator::into_iter(<head>) { ... }`
+ let into_iter_expr = {
+ self.expr_call_lang_item_fn(
+ head_span,
+ hir::LangItem::IntoIterIntoIter,
+ arena_vec![self; head],
+ None,
+ )
+ };
+
+ let match_expr = self.arena.alloc(self.expr_match(
+ for_span,
+ into_iter_expr,
+ arena_vec![self; iter_arm],
+ hir::MatchSource::ForLoopDesugar,
+ ));
+
+ let attrs: Vec<_> = e.attrs.iter().map(|a| self.lower_attr(a)).collect();
+
+ // This is effectively `{ let _result = ...; _result }`.
+ // The construct was introduced in #21984 and is necessary to make sure that
+ // temporaries in the `head` expression are dropped and do not leak to the
+ // surrounding scope of the `match` since the `match` is not a terminating scope.
+ //
+ // Also, add the attributes to the outer returned expr node.
+ self.expr_drop_temps_mut(for_span, match_expr, attrs.into())
+ }
+
+ /// Desugar `ExprKind::Try` from: `<expr>?` into:
+ /// ```ignore (pseudo-rust)
+ /// match Try::branch(<expr>) {
+ /// ControlFlow::Continue(val) => #[allow(unreachable_code)] val,,
+ /// ControlFlow::Break(residual) =>
+ /// #[allow(unreachable_code)]
+ /// // If there is an enclosing `try {...}`:
+ /// break 'catch_target Try::from_residual(residual),
+ /// // Otherwise:
+ /// return Try::from_residual(residual),
+ /// }
+ /// ```
+ fn lower_expr_try(&mut self, span: Span, sub_expr: &Expr) -> hir::ExprKind<'hir> {
+ let unstable_span = self.mark_span_with_reason(
+ DesugaringKind::QuestionMark,
+ span,
+ self.allow_try_trait.clone(),
+ );
+ let try_span = self.tcx.sess.source_map().end_point(span);
+ let try_span = self.mark_span_with_reason(
+ DesugaringKind::QuestionMark,
+ try_span,
+ self.allow_try_trait.clone(),
+ );
+
+ // `Try::branch(<expr>)`
+ let scrutinee = {
+ // expand <expr>
+ let sub_expr = self.lower_expr_mut(sub_expr);
+
+ self.expr_call_lang_item_fn(
+ unstable_span,
+ hir::LangItem::TryTraitBranch,
+ arena_vec![self; sub_expr],
+ None,
+ )
+ };
+
+ // `#[allow(unreachable_code)]`
+ let attr = {
+ // `allow(unreachable_code)`
+ let allow = {
+ let allow_ident = Ident::new(sym::allow, self.lower_span(span));
+ let uc_ident = Ident::new(sym::unreachable_code, self.lower_span(span));
+ let uc_nested = attr::mk_nested_word_item(uc_ident);
+ attr::mk_list_item(allow_ident, vec![uc_nested])
+ };
+ attr::mk_attr_outer(allow)
+ };
+ let attrs = vec![attr];
+
+ // `ControlFlow::Continue(val) => #[allow(unreachable_code)] val,`
+ let continue_arm = {
+ let val_ident = Ident::with_dummy_span(sym::val);
+ let (val_pat, val_pat_nid) = self.pat_ident(span, val_ident);
+ let val_expr = self.arena.alloc(self.expr_ident_with_attrs(
+ span,
+ val_ident,
+ val_pat_nid,
+ ThinVec::from(attrs.clone()),
+ ));
+ let continue_pat = self.pat_cf_continue(unstable_span, val_pat);
+ self.arm(continue_pat, val_expr)
+ };
+
+ // `ControlFlow::Break(residual) =>
+ // #[allow(unreachable_code)]
+ // return Try::from_residual(residual),`
+ let break_arm = {
+ let residual_ident = Ident::with_dummy_span(sym::residual);
+ let (residual_local, residual_local_nid) = self.pat_ident(try_span, residual_ident);
+ let residual_expr = self.expr_ident_mut(try_span, residual_ident, residual_local_nid);
+ let from_residual_expr = self.wrap_in_try_constructor(
+ hir::LangItem::TryTraitFromResidual,
+ try_span,
+ self.arena.alloc(residual_expr),
+ unstable_span,
+ );
+ let thin_attrs = ThinVec::from(attrs);
+ let ret_expr = if let Some(catch_node) = self.catch_scope {
+ let target_id = Ok(self.lower_node_id(catch_node));
+ self.arena.alloc(self.expr(
+ try_span,
+ hir::ExprKind::Break(
+ hir::Destination { label: None, target_id },
+ Some(from_residual_expr),
+ ),
+ thin_attrs,
+ ))
+ } else {
+ self.arena.alloc(self.expr(
+ try_span,
+ hir::ExprKind::Ret(Some(from_residual_expr)),
+ thin_attrs,
+ ))
+ };
+
+ let break_pat = self.pat_cf_break(try_span, residual_local);
+ self.arm(break_pat, ret_expr)
+ };
+
+ hir::ExprKind::Match(
+ scrutinee,
+ arena_vec![self; break_arm, continue_arm],
+ hir::MatchSource::TryDesugar,
+ )
+ }
+
+ /// Desugar `ExprKind::Yeet` from: `do yeet <expr>` into:
+ /// ```rust
+ /// // If there is an enclosing `try {...}`:
+ /// break 'catch_target FromResidual::from_residual(Yeet(residual)),
+ /// // Otherwise:
+ /// return FromResidual::from_residual(Yeet(residual)),
+ /// ```
+ /// But to simplify this, there's a `from_yeet` lang item function which
+ /// handles the combined `FromResidual::from_residual(Yeet(residual))`.
+ fn lower_expr_yeet(&mut self, span: Span, sub_expr: Option<&Expr>) -> hir::ExprKind<'hir> {
+ // The expression (if present) or `()` otherwise.
+ let (yeeted_span, yeeted_expr) = if let Some(sub_expr) = sub_expr {
+ (sub_expr.span, self.lower_expr(sub_expr))
+ } else {
+ (self.mark_span_with_reason(DesugaringKind::YeetExpr, span, None), self.expr_unit(span))
+ };
+
+ let unstable_span = self.mark_span_with_reason(
+ DesugaringKind::YeetExpr,
+ span,
+ self.allow_try_trait.clone(),
+ );
+
+ let from_yeet_expr = self.wrap_in_try_constructor(
+ hir::LangItem::TryTraitFromYeet,
+ unstable_span,
+ yeeted_expr,
+ yeeted_span,
+ );
+
+ if let Some(catch_node) = self.catch_scope {
+ let target_id = Ok(self.lower_node_id(catch_node));
+ hir::ExprKind::Break(hir::Destination { label: None, target_id }, Some(from_yeet_expr))
+ } else {
+ hir::ExprKind::Ret(Some(from_yeet_expr))
+ }
+ }
+
+ // =========================================================================
+ // Helper methods for building HIR.
+ // =========================================================================
+
+ /// Wrap the given `expr` in a terminating scope using `hir::ExprKind::DropTemps`.
+ ///
+ /// In terms of drop order, it has the same effect as wrapping `expr` in
+ /// `{ let _t = $expr; _t }` but should provide better compile-time performance.
+ ///
+ /// The drop order can be important in e.g. `if expr { .. }`.
+ pub(super) fn expr_drop_temps(
+ &mut self,
+ span: Span,
+ expr: &'hir hir::Expr<'hir>,
+ attrs: AttrVec,
+ ) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr_drop_temps_mut(span, expr, attrs))
+ }
+
+ pub(super) fn expr_drop_temps_mut(
+ &mut self,
+ span: Span,
+ expr: &'hir hir::Expr<'hir>,
+ attrs: AttrVec,
+ ) -> hir::Expr<'hir> {
+ self.expr(span, hir::ExprKind::DropTemps(expr), attrs)
+ }
+
+ fn expr_match(
+ &mut self,
+ span: Span,
+ arg: &'hir hir::Expr<'hir>,
+ arms: &'hir [hir::Arm<'hir>],
+ source: hir::MatchSource,
+ ) -> hir::Expr<'hir> {
+ self.expr(span, hir::ExprKind::Match(arg, arms, source), ThinVec::new())
+ }
+
+ fn expr_break(&mut self, span: Span, attrs: AttrVec) -> hir::Expr<'hir> {
+ let expr_break = hir::ExprKind::Break(self.lower_loop_destination(None), None);
+ self.expr(span, expr_break, attrs)
+ }
+
+ fn expr_break_alloc(&mut self, span: Span, attrs: AttrVec) -> &'hir hir::Expr<'hir> {
+ let expr_break = self.expr_break(span, attrs);
+ self.arena.alloc(expr_break)
+ }
+
+ fn expr_mut_addr_of(&mut self, span: Span, e: &'hir hir::Expr<'hir>) -> hir::Expr<'hir> {
+ self.expr(
+ span,
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, hir::Mutability::Mut, e),
+ ThinVec::new(),
+ )
+ }
+
+ fn expr_unit(&mut self, sp: Span) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr(sp, hir::ExprKind::Tup(&[]), ThinVec::new()))
+ }
+
+ fn expr_call_mut(
+ &mut self,
+ span: Span,
+ e: &'hir hir::Expr<'hir>,
+ args: &'hir [hir::Expr<'hir>],
+ ) -> hir::Expr<'hir> {
+ self.expr(span, hir::ExprKind::Call(e, args), ThinVec::new())
+ }
+
+ fn expr_call(
+ &mut self,
+ span: Span,
+ e: &'hir hir::Expr<'hir>,
+ args: &'hir [hir::Expr<'hir>],
+ ) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr_call_mut(span, e, args))
+ }
+
+ fn expr_call_lang_item_fn_mut(
+ &mut self,
+ span: Span,
+ lang_item: hir::LangItem,
+ args: &'hir [hir::Expr<'hir>],
+ hir_id: Option<hir::HirId>,
+ ) -> hir::Expr<'hir> {
+ let path =
+ self.arena.alloc(self.expr_lang_item_path(span, lang_item, ThinVec::new(), hir_id));
+ self.expr_call_mut(span, path, args)
+ }
+
+ fn expr_call_lang_item_fn(
+ &mut self,
+ span: Span,
+ lang_item: hir::LangItem,
+ args: &'hir [hir::Expr<'hir>],
+ hir_id: Option<hir::HirId>,
+ ) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr_call_lang_item_fn_mut(span, lang_item, args, hir_id))
+ }
+
+ fn expr_lang_item_path(
+ &mut self,
+ span: Span,
+ lang_item: hir::LangItem,
+ attrs: AttrVec,
+ hir_id: Option<hir::HirId>,
+ ) -> hir::Expr<'hir> {
+ self.expr(
+ span,
+ hir::ExprKind::Path(hir::QPath::LangItem(lang_item, self.lower_span(span), hir_id)),
+ attrs,
+ )
+ }
+
+ pub(super) fn expr_ident(
+ &mut self,
+ sp: Span,
+ ident: Ident,
+ binding: hir::HirId,
+ ) -> &'hir hir::Expr<'hir> {
+ self.arena.alloc(self.expr_ident_mut(sp, ident, binding))
+ }
+
+ pub(super) fn expr_ident_mut(
+ &mut self,
+ sp: Span,
+ ident: Ident,
+ binding: hir::HirId,
+ ) -> hir::Expr<'hir> {
+ self.expr_ident_with_attrs(sp, ident, binding, ThinVec::new())
+ }
+
+ fn expr_ident_with_attrs(
+ &mut self,
+ span: Span,
+ ident: Ident,
+ binding: hir::HirId,
+ attrs: AttrVec,
+ ) -> hir::Expr<'hir> {
+ let expr_path = hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ span: self.lower_span(span),
+ res: Res::Local(binding),
+ segments: arena_vec![self; hir::PathSegment::from_ident(ident)],
+ }),
+ ));
+
+ self.expr(span, expr_path, attrs)
+ }
+
+ fn expr_unsafe(&mut self, expr: &'hir hir::Expr<'hir>) -> hir::Expr<'hir> {
+ let hir_id = self.next_id();
+ let span = expr.span;
+ self.expr(
+ span,
+ hir::ExprKind::Block(
+ self.arena.alloc(hir::Block {
+ stmts: &[],
+ expr: Some(expr),
+ hir_id,
+ rules: hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::CompilerGenerated),
+ span: self.lower_span(span),
+ targeted_by_break: false,
+ }),
+ None,
+ ),
+ ThinVec::new(),
+ )
+ }
+
+ fn expr_block_empty(&mut self, span: Span) -> &'hir hir::Expr<'hir> {
+ let blk = self.block_all(span, &[], None);
+ let expr = self.expr_block(blk, ThinVec::new());
+ self.arena.alloc(expr)
+ }
+
+ pub(super) fn expr_block(
+ &mut self,
+ b: &'hir hir::Block<'hir>,
+ attrs: AttrVec,
+ ) -> hir::Expr<'hir> {
+ self.expr(b.span, hir::ExprKind::Block(b, None), attrs)
+ }
+
+ pub(super) fn expr(
+ &mut self,
+ span: Span,
+ kind: hir::ExprKind<'hir>,
+ attrs: AttrVec,
+ ) -> hir::Expr<'hir> {
+ let hir_id = self.next_id();
+ self.lower_attrs(hir_id, &attrs);
+ hir::Expr { hir_id, kind, span: self.lower_span(span) }
+ }
+
+ fn expr_field(
+ &mut self,
+ ident: Ident,
+ expr: &'hir hir::Expr<'hir>,
+ span: Span,
+ ) -> hir::ExprField<'hir> {
+ hir::ExprField {
+ hir_id: self.next_id(),
+ ident,
+ span: self.lower_span(span),
+ expr,
+ is_shorthand: false,
+ }
+ }
+
+ fn arm(&mut self, pat: &'hir hir::Pat<'hir>, expr: &'hir hir::Expr<'hir>) -> hir::Arm<'hir> {
+ hir::Arm {
+ hir_id: self.next_id(),
+ pat,
+ guard: None,
+ span: self.lower_span(expr.span),
+ body: expr,
+ }
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/index.rs b/compiler/rustc_ast_lowering/src/index.rs
new file mode 100644
index 000000000..d5af74d47
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/index.rs
@@ -0,0 +1,346 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::definitions;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::*;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::span_bug;
+use rustc_session::Session;
+use rustc_span::source_map::SourceMap;
+use rustc_span::{Span, DUMMY_SP};
+
+use tracing::debug;
+
+/// A visitor that walks over the HIR and collects `Node`s into a HIR map.
+pub(super) struct NodeCollector<'a, 'hir> {
+ /// Source map
+ source_map: &'a SourceMap,
+ bodies: &'a SortedMap<ItemLocalId, &'hir Body<'hir>>,
+
+ /// Outputs
+ nodes: IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>,
+ parenting: FxHashMap<LocalDefId, ItemLocalId>,
+
+ /// The parent of this node
+ parent_node: hir::ItemLocalId,
+
+ owner: LocalDefId,
+
+ definitions: &'a definitions::Definitions,
+}
+
+#[tracing::instrument(level = "debug", skip(sess, definitions, bodies))]
+pub(super) fn index_hir<'hir>(
+ sess: &Session,
+ definitions: &definitions::Definitions,
+ item: hir::OwnerNode<'hir>,
+ bodies: &SortedMap<ItemLocalId, &'hir Body<'hir>>,
+) -> (IndexVec<ItemLocalId, Option<ParentedNode<'hir>>>, FxHashMap<LocalDefId, ItemLocalId>) {
+ let mut nodes = IndexVec::new();
+ // This node's parent should never be accessed: the owner's parent is computed by the
+ // hir_owner_parent query. Make it invalid (= ItemLocalId::MAX) to force an ICE whenever it is
+ // used.
+ nodes.push(Some(ParentedNode { parent: ItemLocalId::INVALID, node: item.into() }));
+ let mut collector = NodeCollector {
+ source_map: sess.source_map(),
+ definitions,
+ owner: item.def_id(),
+ parent_node: ItemLocalId::new(0),
+ nodes,
+ bodies,
+ parenting: FxHashMap::default(),
+ };
+
+ match item {
+ OwnerNode::Crate(citem) => {
+ collector.visit_mod(&citem, citem.spans.inner_span, hir::CRATE_HIR_ID)
+ }
+ OwnerNode::Item(item) => collector.visit_item(item),
+ OwnerNode::TraitItem(item) => collector.visit_trait_item(item),
+ OwnerNode::ImplItem(item) => collector.visit_impl_item(item),
+ OwnerNode::ForeignItem(item) => collector.visit_foreign_item(item),
+ };
+
+ (collector.nodes, collector.parenting)
+}
+
+impl<'a, 'hir> NodeCollector<'a, 'hir> {
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn insert(&mut self, span: Span, hir_id: HirId, node: Node<'hir>) {
+ debug_assert_eq!(self.owner, hir_id.owner);
+ debug_assert_ne!(hir_id.local_id.as_u32(), 0);
+
+ // Make sure that the DepNode of some node coincides with the HirId
+ // owner of that node.
+ if cfg!(debug_assertions) {
+ if hir_id.owner != self.owner {
+ span_bug!(
+ span,
+ "inconsistent DepNode at `{:?}` for `{:?}`: \
+ current_dep_node_owner={} ({:?}), hir_id.owner={} ({:?})",
+ self.source_map.span_to_diagnostic_string(span),
+ node,
+ self.definitions.def_path(self.owner).to_string_no_crate_verbose(),
+ self.owner,
+ self.definitions.def_path(hir_id.owner).to_string_no_crate_verbose(),
+ hir_id.owner,
+ )
+ }
+ }
+
+ self.nodes.insert(hir_id.local_id, ParentedNode { parent: self.parent_node, node: node });
+ }
+
+ fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_node_id: HirId, f: F) {
+ debug_assert_eq!(parent_node_id.owner, self.owner);
+ let parent_node = self.parent_node;
+ self.parent_node = parent_node_id.local_id;
+ f(self);
+ self.parent_node = parent_node;
+ }
+
+ fn insert_nested(&mut self, item: LocalDefId) {
+ self.parenting.insert(item, self.parent_node);
+ }
+}
+
+impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> {
+ /// Because we want to track parent items and so forth, enable
+ /// deep walking so that we walk nested items in the context of
+ /// their outer items.
+
+ fn visit_nested_item(&mut self, item: ItemId) {
+ debug!("visit_nested_item: {:?}", item);
+ self.insert_nested(item.def_id);
+ }
+
+ fn visit_nested_trait_item(&mut self, item_id: TraitItemId) {
+ self.insert_nested(item_id.def_id);
+ }
+
+ fn visit_nested_impl_item(&mut self, item_id: ImplItemId) {
+ self.insert_nested(item_id.def_id);
+ }
+
+ fn visit_nested_foreign_item(&mut self, foreign_id: ForeignItemId) {
+ self.insert_nested(foreign_id.def_id);
+ }
+
+ fn visit_nested_body(&mut self, id: BodyId) {
+ debug_assert_eq!(id.hir_id.owner, self.owner);
+ let body = self.bodies[&id.hir_id.local_id];
+ self.visit_body(body);
+ }
+
+ fn visit_param(&mut self, param: &'hir Param<'hir>) {
+ let node = Node::Param(param);
+ self.insert(param.pat.span, param.hir_id, node);
+ self.with_parent(param.hir_id, |this| {
+ intravisit::walk_param(this, param);
+ });
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_item(&mut self, i: &'hir Item<'hir>) {
+ debug_assert_eq!(i.def_id, self.owner);
+ self.with_parent(i.hir_id(), |this| {
+ if let ItemKind::Struct(ref struct_def, _) = i.kind {
+ // If this is a tuple or unit-like struct, register the constructor.
+ if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
+ this.insert(i.span, ctor_hir_id, Node::Ctor(struct_def));
+ }
+ }
+ intravisit::walk_item(this, i);
+ });
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_foreign_item(&mut self, fi: &'hir ForeignItem<'hir>) {
+ debug_assert_eq!(fi.def_id, self.owner);
+ self.with_parent(fi.hir_id(), |this| {
+ intravisit::walk_foreign_item(this, fi);
+ });
+ }
+
+ fn visit_generic_param(&mut self, param: &'hir GenericParam<'hir>) {
+ self.insert(param.span, param.hir_id, Node::GenericParam(param));
+ intravisit::walk_generic_param(self, param);
+ }
+
+ fn visit_const_param_default(&mut self, param: HirId, ct: &'hir AnonConst) {
+ self.with_parent(param, |this| {
+ intravisit::walk_const_param_default(this, ct);
+ })
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_trait_item(&mut self, ti: &'hir TraitItem<'hir>) {
+ debug_assert_eq!(ti.def_id, self.owner);
+ self.with_parent(ti.hir_id(), |this| {
+ intravisit::walk_trait_item(this, ti);
+ });
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_impl_item(&mut self, ii: &'hir ImplItem<'hir>) {
+ debug_assert_eq!(ii.def_id, self.owner);
+ self.with_parent(ii.hir_id(), |this| {
+ intravisit::walk_impl_item(this, ii);
+ });
+ }
+
+ fn visit_pat(&mut self, pat: &'hir Pat<'hir>) {
+ self.insert(pat.span, pat.hir_id, Node::Pat(pat));
+
+ self.with_parent(pat.hir_id, |this| {
+ intravisit::walk_pat(this, pat);
+ });
+ }
+
+ fn visit_arm(&mut self, arm: &'hir Arm<'hir>) {
+ let node = Node::Arm(arm);
+
+ self.insert(arm.span, arm.hir_id, node);
+
+ self.with_parent(arm.hir_id, |this| {
+ intravisit::walk_arm(this, arm);
+ });
+ }
+
+ fn visit_anon_const(&mut self, constant: &'hir AnonConst) {
+ self.insert(DUMMY_SP, constant.hir_id, Node::AnonConst(constant));
+
+ self.with_parent(constant.hir_id, |this| {
+ intravisit::walk_anon_const(this, constant);
+ });
+ }
+
+ fn visit_expr(&mut self, expr: &'hir Expr<'hir>) {
+ self.insert(expr.span, expr.hir_id, Node::Expr(expr));
+
+ self.with_parent(expr.hir_id, |this| {
+ intravisit::walk_expr(this, expr);
+ });
+ }
+
+ fn visit_stmt(&mut self, stmt: &'hir Stmt<'hir>) {
+ self.insert(stmt.span, stmt.hir_id, Node::Stmt(stmt));
+
+ self.with_parent(stmt.hir_id, |this| {
+ intravisit::walk_stmt(this, stmt);
+ });
+ }
+
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'hir PathSegment<'hir>) {
+ if let Some(hir_id) = path_segment.hir_id {
+ self.insert(path_span, hir_id, Node::PathSegment(path_segment));
+ }
+ intravisit::walk_path_segment(self, path_span, path_segment);
+ }
+
+ fn visit_ty(&mut self, ty: &'hir Ty<'hir>) {
+ self.insert(ty.span, ty.hir_id, Node::Ty(ty));
+
+ self.with_parent(ty.hir_id, |this| {
+ intravisit::walk_ty(this, ty);
+ });
+ }
+
+ fn visit_infer(&mut self, inf: &'hir InferArg) {
+ self.insert(inf.span, inf.hir_id, Node::Infer(inf));
+
+ self.with_parent(inf.hir_id, |this| {
+ intravisit::walk_inf(this, inf);
+ });
+ }
+
+ fn visit_trait_ref(&mut self, tr: &'hir TraitRef<'hir>) {
+ self.insert(tr.path.span, tr.hir_ref_id, Node::TraitRef(tr));
+
+ self.with_parent(tr.hir_ref_id, |this| {
+ intravisit::walk_trait_ref(this, tr);
+ });
+ }
+
+ fn visit_fn(
+ &mut self,
+ fk: intravisit::FnKind<'hir>,
+ fd: &'hir FnDecl<'hir>,
+ b: BodyId,
+ s: Span,
+ id: HirId,
+ ) {
+ assert_eq!(self.owner, id.owner);
+ assert_eq!(self.parent_node, id.local_id);
+ intravisit::walk_fn(self, fk, fd, b, s, id);
+ }
+
+ fn visit_block(&mut self, block: &'hir Block<'hir>) {
+ self.insert(block.span, block.hir_id, Node::Block(block));
+ self.with_parent(block.hir_id, |this| {
+ intravisit::walk_block(this, block);
+ });
+ }
+
+ fn visit_local(&mut self, l: &'hir Local<'hir>) {
+ self.insert(l.span, l.hir_id, Node::Local(l));
+ self.with_parent(l.hir_id, |this| {
+ intravisit::walk_local(this, l);
+ })
+ }
+
+ fn visit_lifetime(&mut self, lifetime: &'hir Lifetime) {
+ self.insert(lifetime.span, lifetime.hir_id, Node::Lifetime(lifetime));
+ }
+
+ fn visit_variant(&mut self, v: &'hir Variant<'hir>, g: &'hir Generics<'hir>, item_id: HirId) {
+ self.insert(v.span, v.id, Node::Variant(v));
+ self.with_parent(v.id, |this| {
+ // Register the constructor of this variant.
+ if let Some(ctor_hir_id) = v.data.ctor_hir_id() {
+ this.insert(v.span, ctor_hir_id, Node::Ctor(&v.data));
+ }
+ intravisit::walk_variant(this, v, g, item_id);
+ });
+ }
+
+ fn visit_field_def(&mut self, field: &'hir FieldDef<'hir>) {
+ self.insert(field.span, field.hir_id, Node::Field(field));
+ self.with_parent(field.hir_id, |this| {
+ intravisit::walk_field_def(this, field);
+ });
+ }
+
+ fn visit_assoc_type_binding(&mut self, type_binding: &'hir TypeBinding<'hir>) {
+ self.insert(type_binding.span, type_binding.hir_id, Node::TypeBinding(type_binding));
+ self.with_parent(type_binding.hir_id, |this| {
+ intravisit::walk_assoc_type_binding(this, type_binding)
+ })
+ }
+
+ fn visit_trait_item_ref(&mut self, ii: &'hir TraitItemRef) {
+ // Do not visit the duplicate information in TraitItemRef. We want to
+ // map the actual nodes, not the duplicate ones in the *Ref.
+ let TraitItemRef { id, ident: _, kind: _, span: _ } = *ii;
+
+ self.visit_nested_trait_item(id);
+ }
+
+ fn visit_impl_item_ref(&mut self, ii: &'hir ImplItemRef) {
+ // Do not visit the duplicate information in ImplItemRef. We want to
+ // map the actual nodes, not the duplicate ones in the *Ref.
+ let ImplItemRef { id, ident: _, kind: _, span: _, trait_item_def_id: _ } = *ii;
+
+ self.visit_nested_impl_item(id);
+ }
+
+ fn visit_foreign_item_ref(&mut self, fi: &'hir ForeignItemRef) {
+ // Do not visit the duplicate information in ForeignItemRef. We want to
+ // map the actual nodes, not the duplicate ones in the *Ref.
+ let ForeignItemRef { id, ident: _, span: _ } = *fi;
+
+ self.visit_nested_foreign_item(id);
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs
new file mode 100644
index 000000000..ee4c0036f
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/item.rs
@@ -0,0 +1,1513 @@
+use super::ResolverAstLoweringExt;
+use super::{AstOwner, ImplTraitContext, ImplTraitPosition};
+use super::{FnDeclKind, LoweringContext, ParamMode};
+
+use rustc_ast::ptr::P;
+use rustc_ast::visit::AssocCtxt;
+use rustc_ast::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::PredicateOrigin;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::ty::{DefIdTree, ResolverAstLowering, TyCtxt};
+use rustc_span::source_map::DesugaringKind;
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::Span;
+use rustc_target::spec::abi;
+use smallvec::{smallvec, SmallVec};
+
+use std::iter;
+
+pub(super) struct ItemLowerer<'a, 'hir> {
+ pub(super) tcx: TyCtxt<'hir>,
+ pub(super) resolver: &'a mut ResolverAstLowering,
+ pub(super) ast_index: &'a IndexVec<LocalDefId, AstOwner<'a>>,
+ pub(super) owners: &'a mut IndexVec<LocalDefId, hir::MaybeOwner<&'hir hir::OwnerInfo<'hir>>>,
+}
+
+/// When we have a ty alias we *may* have two where clauses. To give the best diagnostics, we set the span
+/// to the where clause that is preferred, if it exists. Otherwise, it sets the span to the other where
+/// clause if it exists.
+fn add_ty_alias_where_clause(
+ generics: &mut ast::Generics,
+ mut where_clauses: (TyAliasWhereClause, TyAliasWhereClause),
+ prefer_first: bool,
+) {
+ if !prefer_first {
+ where_clauses = (where_clauses.1, where_clauses.0);
+ }
+ if where_clauses.0.0 || !where_clauses.1.0 {
+ generics.where_clause.has_where_token = where_clauses.0.0;
+ generics.where_clause.span = where_clauses.0.1;
+ } else {
+ generics.where_clause.has_where_token = where_clauses.1.0;
+ generics.where_clause.span = where_clauses.1.1;
+ }
+}
+
+impl<'a, 'hir> ItemLowerer<'a, 'hir> {
+ fn with_lctx(
+ &mut self,
+ owner: NodeId,
+ f: impl FnOnce(&mut LoweringContext<'_, 'hir>) -> hir::OwnerNode<'hir>,
+ ) {
+ let mut lctx = LoweringContext {
+ // Pseudo-globals.
+ tcx: self.tcx,
+ resolver: self.resolver,
+ arena: self.tcx.hir_arena,
+
+ // HirId handling.
+ bodies: Vec::new(),
+ attrs: SortedMap::default(),
+ children: FxHashMap::default(),
+ current_hir_id_owner: CRATE_DEF_ID,
+ item_local_id_counter: hir::ItemLocalId::new(0),
+ node_id_to_local_id: Default::default(),
+ local_id_to_def_id: SortedMap::new(),
+ trait_map: Default::default(),
+
+ // Lowering state.
+ catch_scope: None,
+ loop_scope: None,
+ is_in_loop_condition: false,
+ is_in_trait_impl: false,
+ is_in_dyn_type: false,
+ generator_kind: None,
+ task_context: None,
+ current_item: None,
+ impl_trait_defs: Vec::new(),
+ impl_trait_bounds: Vec::new(),
+ allow_try_trait: Some([sym::try_trait_v2, sym::yeet_desugar_details][..].into()),
+ allow_gen_future: Some([sym::gen_future][..].into()),
+ allow_into_future: Some([sym::into_future][..].into()),
+ };
+ lctx.with_hir_id_owner(owner, |lctx| f(lctx));
+
+ for (def_id, info) in lctx.children {
+ self.owners.ensure_contains_elem(def_id, || hir::MaybeOwner::Phantom);
+ debug_assert!(matches!(self.owners[def_id], hir::MaybeOwner::Phantom));
+ self.owners[def_id] = info;
+ }
+ }
+
+ pub(super) fn lower_node(
+ &mut self,
+ def_id: LocalDefId,
+ ) -> hir::MaybeOwner<&'hir hir::OwnerInfo<'hir>> {
+ self.owners.ensure_contains_elem(def_id, || hir::MaybeOwner::Phantom);
+ if let hir::MaybeOwner::Phantom = self.owners[def_id] {
+ let node = self.ast_index[def_id];
+ match node {
+ AstOwner::NonOwner => {}
+ AstOwner::Crate(c) => self.lower_crate(c),
+ AstOwner::Item(item) => self.lower_item(item),
+ AstOwner::AssocItem(item, ctxt) => self.lower_assoc_item(item, ctxt),
+ AstOwner::ForeignItem(item) => self.lower_foreign_item(item),
+ }
+ }
+
+ self.owners[def_id]
+ }
+
+ #[instrument(level = "debug", skip(self, c))]
+ fn lower_crate(&mut self, c: &Crate) {
+ debug_assert_eq!(self.resolver.node_id_to_def_id[&CRATE_NODE_ID], CRATE_DEF_ID);
+ self.with_lctx(CRATE_NODE_ID, |lctx| {
+ let module = lctx.lower_mod(&c.items, &c.spans);
+ lctx.lower_attrs(hir::CRATE_HIR_ID, &c.attrs);
+ hir::OwnerNode::Crate(lctx.arena.alloc(module))
+ })
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn lower_item(&mut self, item: &Item) {
+ self.with_lctx(item.id, |lctx| hir::OwnerNode::Item(lctx.lower_item(item)))
+ }
+
+ fn lower_assoc_item(&mut self, item: &AssocItem, ctxt: AssocCtxt) {
+ let def_id = self.resolver.node_id_to_def_id[&item.id];
+
+ let parent_id = self.tcx.local_parent(def_id);
+ let parent_hir = self.lower_node(parent_id).unwrap();
+ self.with_lctx(item.id, |lctx| {
+ // Evaluate with the lifetimes in `params` in-scope.
+ // This is used to track which lifetimes have already been defined,
+ // and which need to be replicated when lowering an async fn.
+ match parent_hir.node().expect_item().kind {
+ hir::ItemKind::Impl(hir::Impl { ref of_trait, .. }) => {
+ lctx.is_in_trait_impl = of_trait.is_some();
+ }
+ _ => {}
+ };
+
+ match ctxt {
+ AssocCtxt::Trait => hir::OwnerNode::TraitItem(lctx.lower_trait_item(item)),
+ AssocCtxt::Impl => hir::OwnerNode::ImplItem(lctx.lower_impl_item(item)),
+ }
+ })
+ }
+
+ fn lower_foreign_item(&mut self, item: &ForeignItem) {
+ self.with_lctx(item.id, |lctx| hir::OwnerNode::ForeignItem(lctx.lower_foreign_item(item)))
+ }
+}
+
+impl<'hir> LoweringContext<'_, 'hir> {
+ pub(super) fn lower_mod(&mut self, items: &[P<Item>], spans: &ModSpans) -> hir::Mod<'hir> {
+ hir::Mod {
+ spans: hir::ModSpans {
+ inner_span: self.lower_span(spans.inner_span),
+ inject_use_span: self.lower_span(spans.inject_use_span),
+ },
+ item_ids: self.arena.alloc_from_iter(items.iter().flat_map(|x| self.lower_item_ref(x))),
+ }
+ }
+
+ pub(super) fn lower_item_ref(&mut self, i: &Item) -> SmallVec<[hir::ItemId; 1]> {
+ let mut node_ids = smallvec![hir::ItemId { def_id: self.local_def_id(i.id) }];
+ if let ItemKind::Use(ref use_tree) = &i.kind {
+ self.lower_item_id_use_tree(use_tree, i.id, &mut node_ids);
+ }
+ node_ids
+ }
+
+ fn lower_item_id_use_tree(
+ &mut self,
+ tree: &UseTree,
+ base_id: NodeId,
+ vec: &mut SmallVec<[hir::ItemId; 1]>,
+ ) {
+ match tree.kind {
+ UseTreeKind::Nested(ref nested_vec) => {
+ for &(ref nested, id) in nested_vec {
+ vec.push(hir::ItemId { def_id: self.local_def_id(id) });
+ self.lower_item_id_use_tree(nested, id, vec);
+ }
+ }
+ UseTreeKind::Glob => {}
+ UseTreeKind::Simple(_, id1, id2) => {
+ for (_, &id) in
+ iter::zip(self.expect_full_res_from_use(base_id).skip(1), &[id1, id2])
+ {
+ vec.push(hir::ItemId { def_id: self.local_def_id(id) });
+ }
+ }
+ }
+ }
+
+ fn lower_item(&mut self, i: &Item) -> &'hir hir::Item<'hir> {
+ let mut ident = i.ident;
+ let vis_span = self.lower_span(i.vis.span);
+ let hir_id = self.lower_node_id(i.id);
+ let attrs = self.lower_attrs(hir_id, &i.attrs);
+ let kind = self.lower_item_kind(i.span, i.id, hir_id, &mut ident, attrs, vis_span, &i.kind);
+ let item = hir::Item {
+ def_id: hir_id.expect_owner(),
+ ident: self.lower_ident(ident),
+ kind,
+ vis_span,
+ span: self.lower_span(i.span),
+ };
+ self.arena.alloc(item)
+ }
+
+ fn lower_item_kind(
+ &mut self,
+ span: Span,
+ id: NodeId,
+ hir_id: hir::HirId,
+ ident: &mut Ident,
+ attrs: Option<&'hir [Attribute]>,
+ vis_span: Span,
+ i: &ItemKind,
+ ) -> hir::ItemKind<'hir> {
+ match *i {
+ ItemKind::ExternCrate(orig_name) => hir::ItemKind::ExternCrate(orig_name),
+ ItemKind::Use(ref use_tree) => {
+ // Start with an empty prefix.
+ let prefix = Path { segments: vec![], span: use_tree.span, tokens: None };
+
+ self.lower_use_tree(use_tree, &prefix, id, vis_span, ident, attrs)
+ }
+ ItemKind::Static(ref t, m, ref e) => {
+ let (ty, body_id) = self.lower_const_item(t, span, e.as_deref());
+ hir::ItemKind::Static(ty, m, body_id)
+ }
+ ItemKind::Const(_, ref t, ref e) => {
+ let (ty, body_id) = self.lower_const_item(t, span, e.as_deref());
+ hir::ItemKind::Const(ty, body_id)
+ }
+ ItemKind::Fn(box Fn {
+ sig: FnSig { ref decl, header, span: fn_sig_span },
+ ref generics,
+ ref body,
+ ..
+ }) => {
+ self.with_new_scopes(|this| {
+ this.current_item = Some(ident.span);
+
+ // Note: we don't need to change the return type from `T` to
+ // `impl Future<Output = T>` here because lower_body
+ // only cares about the input argument patterns in the function
+ // declaration (decl), not the return types.
+ let asyncness = header.asyncness;
+ let body_id =
+ this.lower_maybe_async_body(span, &decl, asyncness, body.as_deref());
+
+ let itctx = ImplTraitContext::Universal;
+ let (generics, decl) = this.lower_generics(generics, id, itctx, |this| {
+ let ret_id = asyncness.opt_return_id();
+ this.lower_fn_decl(&decl, Some(id), FnDeclKind::Fn, ret_id)
+ });
+ let sig = hir::FnSig {
+ decl,
+ header: this.lower_fn_header(header),
+ span: this.lower_span(fn_sig_span),
+ };
+ hir::ItemKind::Fn(sig, generics, body_id)
+ })
+ }
+ ItemKind::Mod(_, ref mod_kind) => match mod_kind {
+ ModKind::Loaded(items, _, spans) => {
+ hir::ItemKind::Mod(self.lower_mod(items, spans))
+ }
+ ModKind::Unloaded => panic!("`mod` items should have been loaded by now"),
+ },
+ ItemKind::ForeignMod(ref fm) => hir::ItemKind::ForeignMod {
+ abi: fm.abi.map_or(abi::Abi::FALLBACK, |abi| self.lower_abi(abi)),
+ items: self
+ .arena
+ .alloc_from_iter(fm.items.iter().map(|x| self.lower_foreign_item_ref(x))),
+ },
+ ItemKind::GlobalAsm(ref asm) => {
+ hir::ItemKind::GlobalAsm(self.lower_inline_asm(span, asm))
+ }
+ ItemKind::TyAlias(box TyAlias {
+ ref generics,
+ where_clauses,
+ ty: Some(ref ty),
+ ..
+ }) => {
+ // We lower
+ //
+ // type Foo = impl Trait
+ //
+ // to
+ //
+ // type Foo = Foo1
+ // opaque type Foo1: Trait
+ let mut generics = generics.clone();
+ add_ty_alias_where_clause(&mut generics, where_clauses, true);
+ let (generics, ty) = self.lower_generics(
+ &generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.lower_ty(ty, ImplTraitContext::TypeAliasesOpaqueTy),
+ );
+ hir::ItemKind::TyAlias(ty, generics)
+ }
+ ItemKind::TyAlias(box TyAlias {
+ ref generics, ref where_clauses, ty: None, ..
+ }) => {
+ let mut generics = generics.clone();
+ add_ty_alias_where_clause(&mut generics, *where_clauses, true);
+ let (generics, ty) = self.lower_generics(
+ &generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.arena.alloc(this.ty(span, hir::TyKind::Err)),
+ );
+ hir::ItemKind::TyAlias(ty, generics)
+ }
+ ItemKind::Enum(ref enum_definition, ref generics) => {
+ let (generics, variants) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ this.arena.alloc_from_iter(
+ enum_definition.variants.iter().map(|x| this.lower_variant(x)),
+ )
+ },
+ );
+ hir::ItemKind::Enum(hir::EnumDef { variants }, generics)
+ }
+ ItemKind::Struct(ref struct_def, ref generics) => {
+ let (generics, struct_def) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.lower_variant_data(hir_id, struct_def),
+ );
+ hir::ItemKind::Struct(struct_def, generics)
+ }
+ ItemKind::Union(ref vdata, ref generics) => {
+ let (generics, vdata) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| this.lower_variant_data(hir_id, vdata),
+ );
+ hir::ItemKind::Union(vdata, generics)
+ }
+ ItemKind::Impl(box Impl {
+ unsafety,
+ polarity,
+ defaultness,
+ constness,
+ generics: ref ast_generics,
+ of_trait: ref trait_ref,
+ self_ty: ref ty,
+ items: ref impl_items,
+ }) => {
+ // Lower the "impl header" first. This ordering is important
+ // for in-band lifetimes! Consider `'a` here:
+ //
+ // impl Foo<'a> for u32 {
+ // fn method(&'a self) { .. }
+ // }
+ //
+ // Because we start by lowering the `Foo<'a> for u32`
+ // part, we will add `'a` to the list of generics on
+ // the impl. When we then encounter it later in the
+ // method, it will not be considered an in-band
+ // lifetime to be added, but rather a reference to a
+ // parent lifetime.
+ let itctx = ImplTraitContext::Universal;
+ let (generics, (trait_ref, lowered_ty)) =
+ self.lower_generics(ast_generics, id, itctx, |this| {
+ let trait_ref = trait_ref.as_ref().map(|trait_ref| {
+ this.lower_trait_ref(
+ trait_ref,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Trait),
+ )
+ });
+
+ let lowered_ty = this
+ .lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+
+ (trait_ref, lowered_ty)
+ });
+
+ let new_impl_items = self
+ .arena
+ .alloc_from_iter(impl_items.iter().map(|item| self.lower_impl_item_ref(item)));
+
+ // `defaultness.has_value()` is never called for an `impl`, always `true` in order
+ // to not cause an assertion failure inside the `lower_defaultness` function.
+ let has_val = true;
+ let (defaultness, defaultness_span) = self.lower_defaultness(defaultness, has_val);
+ let polarity = match polarity {
+ ImplPolarity::Positive => ImplPolarity::Positive,
+ ImplPolarity::Negative(s) => ImplPolarity::Negative(self.lower_span(s)),
+ };
+ hir::ItemKind::Impl(self.arena.alloc(hir::Impl {
+ unsafety: self.lower_unsafety(unsafety),
+ polarity,
+ defaultness,
+ defaultness_span,
+ constness: self.lower_constness(constness),
+ generics,
+ of_trait: trait_ref,
+ self_ty: lowered_ty,
+ items: new_impl_items,
+ }))
+ }
+ ItemKind::Trait(box Trait {
+ is_auto,
+ unsafety,
+ ref generics,
+ ref bounds,
+ ref items,
+ }) => {
+ let (generics, (unsafety, items, bounds)) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ let bounds = this.lower_param_bounds(
+ bounds,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ );
+ let items = this.arena.alloc_from_iter(
+ items.iter().map(|item| this.lower_trait_item_ref(item)),
+ );
+ let unsafety = this.lower_unsafety(unsafety);
+ (unsafety, items, bounds)
+ },
+ );
+ hir::ItemKind::Trait(is_auto, unsafety, generics, bounds, items)
+ }
+ ItemKind::TraitAlias(ref generics, ref bounds) => {
+ let (generics, bounds) = self.lower_generics(
+ generics,
+ id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ this.lower_param_bounds(
+ bounds,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ )
+ },
+ );
+ hir::ItemKind::TraitAlias(generics, bounds)
+ }
+ ItemKind::MacroDef(MacroDef { ref body, macro_rules }) => {
+ let body = P(self.lower_mac_args(body));
+ let macro_kind = self.resolver.decl_macro_kind(self.local_def_id(id));
+ hir::ItemKind::Macro(ast::MacroDef { body, macro_rules }, macro_kind)
+ }
+ ItemKind::MacCall(..) => {
+ panic!("`TyMac` should have been expanded by now")
+ }
+ }
+ }
+
+ fn lower_const_item(
+ &mut self,
+ ty: &Ty,
+ span: Span,
+ body: Option<&Expr>,
+ ) -> (&'hir hir::Ty<'hir>, hir::BodyId) {
+ let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ (ty, self.lower_const_body(span, body))
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn lower_use_tree(
+ &mut self,
+ tree: &UseTree,
+ prefix: &Path,
+ id: NodeId,
+ vis_span: Span,
+ ident: &mut Ident,
+ attrs: Option<&'hir [Attribute]>,
+ ) -> hir::ItemKind<'hir> {
+ let path = &tree.prefix;
+ let segments = prefix.segments.iter().chain(path.segments.iter()).cloned().collect();
+
+ match tree.kind {
+ UseTreeKind::Simple(rename, id1, id2) => {
+ *ident = tree.ident();
+
+ // First, apply the prefix to the path.
+ let mut path = Path { segments, span: path.span, tokens: None };
+
+ // Correctly resolve `self` imports.
+ if path.segments.len() > 1
+ && path.segments.last().unwrap().ident.name == kw::SelfLower
+ {
+ let _ = path.segments.pop();
+ if rename.is_none() {
+ *ident = path.segments.last().unwrap().ident;
+ }
+ }
+
+ let mut resolutions = self.expect_full_res_from_use(id).fuse();
+ // We want to return *something* from this function, so hold onto the first item
+ // for later.
+ let ret_res = self.lower_res(resolutions.next().unwrap_or(Res::Err));
+
+ // Here, we are looping over namespaces, if they exist for the definition
+ // being imported. We only handle type and value namespaces because we
+ // won't be dealing with macros in the rest of the compiler.
+ // Essentially a single `use` which imports two names is desugared into
+ // two imports.
+ for new_node_id in [id1, id2] {
+ let new_id = self.local_def_id(new_node_id);
+ let Some(res) = resolutions.next() else {
+ // Associate an HirId to both ids even if there is no resolution.
+ let _old = self.children.insert(
+ new_id,
+ hir::MaybeOwner::NonOwner(hir::HirId::make_owner(new_id)),
+ );
+ debug_assert!(_old.is_none());
+ continue;
+ };
+ let ident = *ident;
+ let mut path = path.clone();
+ for seg in &mut path.segments {
+ seg.id = self.next_node_id();
+ }
+ let span = path.span;
+
+ self.with_hir_id_owner(new_node_id, |this| {
+ let res = this.lower_res(res);
+ let path = this.lower_path_extra(res, &path, ParamMode::Explicit);
+ let kind = hir::ItemKind::Use(path, hir::UseKind::Single);
+ if let Some(attrs) = attrs {
+ this.attrs.insert(hir::ItemLocalId::new(0), attrs);
+ }
+
+ let item = hir::Item {
+ def_id: new_id,
+ ident: this.lower_ident(ident),
+ kind,
+ vis_span,
+ span: this.lower_span(span),
+ };
+ hir::OwnerNode::Item(this.arena.alloc(item))
+ });
+ }
+
+ let path = self.lower_path_extra(ret_res, &path, ParamMode::Explicit);
+ hir::ItemKind::Use(path, hir::UseKind::Single)
+ }
+ UseTreeKind::Glob => {
+ let path = self.lower_path(
+ id,
+ &Path { segments, span: path.span, tokens: None },
+ ParamMode::Explicit,
+ );
+ hir::ItemKind::Use(path, hir::UseKind::Glob)
+ }
+ UseTreeKind::Nested(ref trees) => {
+ // Nested imports are desugared into simple imports.
+ // So, if we start with
+ //
+ // ```
+ // pub(x) use foo::{a, b};
+ // ```
+ //
+ // we will create three items:
+ //
+ // ```
+ // pub(x) use foo::a;
+ // pub(x) use foo::b;
+ // pub(x) use foo::{}; // <-- this is called the `ListStem`
+ // ```
+ //
+ // The first two are produced by recursively invoking
+ // `lower_use_tree` (and indeed there may be things
+ // like `use foo::{a::{b, c}}` and so forth). They
+ // wind up being directly added to
+ // `self.items`. However, the structure of this
+ // function also requires us to return one item, and
+ // for that we return the `{}` import (called the
+ // `ListStem`).
+
+ let prefix = Path { segments, span: prefix.span.to(path.span), tokens: None };
+
+ // Add all the nested `PathListItem`s to the HIR.
+ for &(ref use_tree, id) in trees {
+ let new_hir_id = self.local_def_id(id);
+
+ let mut prefix = prefix.clone();
+
+ // Give the segments new node-ids since they are being cloned.
+ for seg in &mut prefix.segments {
+ seg.id = self.next_node_id();
+ }
+
+ // Each `use` import is an item and thus are owners of the
+ // names in the path. Up to this point the nested import is
+ // the current owner, since we want each desugared import to
+ // own its own names, we have to adjust the owner before
+ // lowering the rest of the import.
+ self.with_hir_id_owner(id, |this| {
+ let mut ident = *ident;
+
+ let kind =
+ this.lower_use_tree(use_tree, &prefix, id, vis_span, &mut ident, attrs);
+ if let Some(attrs) = attrs {
+ this.attrs.insert(hir::ItemLocalId::new(0), attrs);
+ }
+
+ let item = hir::Item {
+ def_id: new_hir_id,
+ ident: this.lower_ident(ident),
+ kind,
+ vis_span,
+ span: this.lower_span(use_tree.span),
+ };
+ hir::OwnerNode::Item(this.arena.alloc(item))
+ });
+ }
+
+ let res = self.expect_full_res_from_use(id).next().unwrap_or(Res::Err);
+ let res = self.lower_res(res);
+ let path = self.lower_path_extra(res, &prefix, ParamMode::Explicit);
+ hir::ItemKind::Use(path, hir::UseKind::ListStem)
+ }
+ }
+ }
+
+ fn lower_foreign_item(&mut self, i: &ForeignItem) -> &'hir hir::ForeignItem<'hir> {
+ let hir_id = self.lower_node_id(i.id);
+ let def_id = hir_id.expect_owner();
+ self.lower_attrs(hir_id, &i.attrs);
+ let item = hir::ForeignItem {
+ def_id,
+ ident: self.lower_ident(i.ident),
+ kind: match i.kind {
+ ForeignItemKind::Fn(box Fn { ref sig, ref generics, .. }) => {
+ let fdec = &sig.decl;
+ let itctx = ImplTraitContext::Universal;
+ let (generics, (fn_dec, fn_args)) =
+ self.lower_generics(generics, i.id, itctx, |this| {
+ (
+ // Disallow `impl Trait` in foreign items.
+ this.lower_fn_decl(fdec, None, FnDeclKind::ExternFn, None),
+ this.lower_fn_params_to_names(fdec),
+ )
+ });
+
+ hir::ForeignItemKind::Fn(fn_dec, fn_args, generics)
+ }
+ ForeignItemKind::Static(ref t, m, _) => {
+ let ty =
+ self.lower_ty(t, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ hir::ForeignItemKind::Static(ty, m)
+ }
+ ForeignItemKind::TyAlias(..) => hir::ForeignItemKind::Type,
+ ForeignItemKind::MacCall(_) => panic!("macro shouldn't exist here"),
+ },
+ vis_span: self.lower_span(i.vis.span),
+ span: self.lower_span(i.span),
+ };
+ self.arena.alloc(item)
+ }
+
+ fn lower_foreign_item_ref(&mut self, i: &ForeignItem) -> hir::ForeignItemRef {
+ hir::ForeignItemRef {
+ id: hir::ForeignItemId { def_id: self.local_def_id(i.id) },
+ ident: self.lower_ident(i.ident),
+ span: self.lower_span(i.span),
+ }
+ }
+
+ fn lower_variant(&mut self, v: &Variant) -> hir::Variant<'hir> {
+ let id = self.lower_node_id(v.id);
+ self.lower_attrs(id, &v.attrs);
+ hir::Variant {
+ id,
+ data: self.lower_variant_data(id, &v.data),
+ disr_expr: v.disr_expr.as_ref().map(|e| self.lower_anon_const(e)),
+ ident: self.lower_ident(v.ident),
+ span: self.lower_span(v.span),
+ }
+ }
+
+ fn lower_variant_data(
+ &mut self,
+ parent_id: hir::HirId,
+ vdata: &VariantData,
+ ) -> hir::VariantData<'hir> {
+ match *vdata {
+ VariantData::Struct(ref fields, recovered) => hir::VariantData::Struct(
+ self.arena
+ .alloc_from_iter(fields.iter().enumerate().map(|f| self.lower_field_def(f))),
+ recovered,
+ ),
+ VariantData::Tuple(ref fields, id) => {
+ let ctor_id = self.lower_node_id(id);
+ self.alias_attrs(ctor_id, parent_id);
+ hir::VariantData::Tuple(
+ self.arena.alloc_from_iter(
+ fields.iter().enumerate().map(|f| self.lower_field_def(f)),
+ ),
+ ctor_id,
+ )
+ }
+ VariantData::Unit(id) => {
+ let ctor_id = self.lower_node_id(id);
+ self.alias_attrs(ctor_id, parent_id);
+ hir::VariantData::Unit(ctor_id)
+ }
+ }
+ }
+
+ fn lower_field_def(&mut self, (index, f): (usize, &FieldDef)) -> hir::FieldDef<'hir> {
+ let ty = if let TyKind::Path(ref qself, ref path) = f.ty.kind {
+ let t = self.lower_path_ty(
+ &f.ty,
+ qself,
+ path,
+ ParamMode::ExplicitNamed, // no `'_` in declarations (Issue #61124)
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ self.arena.alloc(t)
+ } else {
+ self.lower_ty(&f.ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ };
+ let hir_id = self.lower_node_id(f.id);
+ self.lower_attrs(hir_id, &f.attrs);
+ hir::FieldDef {
+ span: self.lower_span(f.span),
+ hir_id,
+ ident: match f.ident {
+ Some(ident) => self.lower_ident(ident),
+ // FIXME(jseyfried): positional field hygiene.
+ None => Ident::new(sym::integer(index), self.lower_span(f.span)),
+ },
+ vis_span: self.lower_span(f.vis.span),
+ ty,
+ }
+ }
+
+ fn lower_trait_item(&mut self, i: &AssocItem) -> &'hir hir::TraitItem<'hir> {
+ let hir_id = self.lower_node_id(i.id);
+ let trait_item_def_id = hir_id.expect_owner();
+
+ let (generics, kind, has_default) = match i.kind {
+ AssocItemKind::Const(_, ref ty, ref default) => {
+ let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ let body = default.as_ref().map(|x| self.lower_const_body(i.span, Some(x)));
+ (hir::Generics::empty(), hir::TraitItemKind::Const(ty, body), body.is_some())
+ }
+ AssocItemKind::Fn(box Fn { ref sig, ref generics, body: None, .. }) => {
+ let names = self.lower_fn_params_to_names(&sig.decl);
+ let (generics, sig) =
+ self.lower_method_sig(generics, sig, i.id, FnDeclKind::Trait, None);
+ (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Required(names)), false)
+ }
+ AssocItemKind::Fn(box Fn { ref sig, ref generics, body: Some(ref body), .. }) => {
+ let asyncness = sig.header.asyncness;
+ let body_id =
+ self.lower_maybe_async_body(i.span, &sig.decl, asyncness, Some(&body));
+ let (generics, sig) = self.lower_method_sig(
+ generics,
+ sig,
+ i.id,
+ FnDeclKind::Trait,
+ asyncness.opt_return_id(),
+ );
+ (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Provided(body_id)), true)
+ }
+ AssocItemKind::TyAlias(box TyAlias {
+ ref generics,
+ where_clauses,
+ ref bounds,
+ ref ty,
+ ..
+ }) => {
+ let mut generics = generics.clone();
+ add_ty_alias_where_clause(&mut generics, where_clauses, false);
+ let (generics, kind) = self.lower_generics(
+ &generics,
+ i.id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| {
+ let ty = ty.as_ref().map(|x| {
+ this.lower_ty(x, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ });
+ hir::TraitItemKind::Type(
+ this.lower_param_bounds(
+ bounds,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ ),
+ ty,
+ )
+ },
+ );
+ (generics, kind, ty.is_some())
+ }
+ AssocItemKind::MacCall(..) => panic!("macro item shouldn't exist at this point"),
+ };
+
+ self.lower_attrs(hir_id, &i.attrs);
+ let item = hir::TraitItem {
+ def_id: trait_item_def_id,
+ ident: self.lower_ident(i.ident),
+ generics,
+ kind,
+ span: self.lower_span(i.span),
+ defaultness: hir::Defaultness::Default { has_value: has_default },
+ };
+ self.arena.alloc(item)
+ }
+
+ fn lower_trait_item_ref(&mut self, i: &AssocItem) -> hir::TraitItemRef {
+ let kind = match &i.kind {
+ AssocItemKind::Const(..) => hir::AssocItemKind::Const,
+ AssocItemKind::TyAlias(..) => hir::AssocItemKind::Type,
+ AssocItemKind::Fn(box Fn { sig, .. }) => {
+ hir::AssocItemKind::Fn { has_self: sig.decl.has_self() }
+ }
+ AssocItemKind::MacCall(..) => unimplemented!(),
+ };
+ let id = hir::TraitItemId { def_id: self.local_def_id(i.id) };
+ hir::TraitItemRef {
+ id,
+ ident: self.lower_ident(i.ident),
+ span: self.lower_span(i.span),
+ kind,
+ }
+ }
+
+ /// Construct `ExprKind::Err` for the given `span`.
+ pub(crate) fn expr_err(&mut self, span: Span) -> hir::Expr<'hir> {
+ self.expr(span, hir::ExprKind::Err, AttrVec::new())
+ }
+
+ fn lower_impl_item(&mut self, i: &AssocItem) -> &'hir hir::ImplItem<'hir> {
+ // Since `default impl` is not yet implemented, this is always true in impls.
+ let has_value = true;
+ let (defaultness, _) = self.lower_defaultness(i.kind.defaultness(), has_value);
+
+ let (generics, kind) = match &i.kind {
+ AssocItemKind::Const(_, ty, expr) => {
+ let ty = self.lower_ty(ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ (
+ hir::Generics::empty(),
+ hir::ImplItemKind::Const(ty, self.lower_const_body(i.span, expr.as_deref())),
+ )
+ }
+ AssocItemKind::Fn(box Fn { sig, generics, body, .. }) => {
+ self.current_item = Some(i.span);
+ let asyncness = sig.header.asyncness;
+ let body_id =
+ self.lower_maybe_async_body(i.span, &sig.decl, asyncness, body.as_deref());
+ let (generics, sig) = self.lower_method_sig(
+ generics,
+ sig,
+ i.id,
+ if self.is_in_trait_impl { FnDeclKind::Impl } else { FnDeclKind::Inherent },
+ asyncness.opt_return_id(),
+ );
+
+ (generics, hir::ImplItemKind::Fn(sig, body_id))
+ }
+ AssocItemKind::TyAlias(box TyAlias { generics, where_clauses, ty, .. }) => {
+ let mut generics = generics.clone();
+ add_ty_alias_where_clause(&mut generics, *where_clauses, false);
+ self.lower_generics(
+ &generics,
+ i.id,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Generic),
+ |this| match ty {
+ None => {
+ let ty = this.arena.alloc(this.ty(i.span, hir::TyKind::Err));
+ hir::ImplItemKind::TyAlias(ty)
+ }
+ Some(ty) => {
+ let ty = this.lower_ty(ty, ImplTraitContext::TypeAliasesOpaqueTy);
+ hir::ImplItemKind::TyAlias(ty)
+ }
+ },
+ )
+ }
+ AssocItemKind::MacCall(..) => panic!("`TyMac` should have been expanded by now"),
+ };
+
+ let hir_id = self.lower_node_id(i.id);
+ self.lower_attrs(hir_id, &i.attrs);
+ let item = hir::ImplItem {
+ def_id: hir_id.expect_owner(),
+ ident: self.lower_ident(i.ident),
+ generics,
+ kind,
+ vis_span: self.lower_span(i.vis.span),
+ span: self.lower_span(i.span),
+ defaultness,
+ };
+ self.arena.alloc(item)
+ }
+
+ fn lower_impl_item_ref(&mut self, i: &AssocItem) -> hir::ImplItemRef {
+ hir::ImplItemRef {
+ id: hir::ImplItemId { def_id: self.local_def_id(i.id) },
+ ident: self.lower_ident(i.ident),
+ span: self.lower_span(i.span),
+ kind: match &i.kind {
+ AssocItemKind::Const(..) => hir::AssocItemKind::Const,
+ AssocItemKind::TyAlias(..) => hir::AssocItemKind::Type,
+ AssocItemKind::Fn(box Fn { sig, .. }) => {
+ hir::AssocItemKind::Fn { has_self: sig.decl.has_self() }
+ }
+ AssocItemKind::MacCall(..) => unimplemented!(),
+ },
+ trait_item_def_id: self.resolver.get_partial_res(i.id).map(|r| r.base_res().def_id()),
+ }
+ }
+
+ fn lower_defaultness(
+ &self,
+ d: Defaultness,
+ has_value: bool,
+ ) -> (hir::Defaultness, Option<Span>) {
+ match d {
+ Defaultness::Default(sp) => {
+ (hir::Defaultness::Default { has_value }, Some(self.lower_span(sp)))
+ }
+ Defaultness::Final => {
+ assert!(has_value);
+ (hir::Defaultness::Final, None)
+ }
+ }
+ }
+
+ fn record_body(
+ &mut self,
+ params: &'hir [hir::Param<'hir>],
+ value: hir::Expr<'hir>,
+ ) -> hir::BodyId {
+ let body = hir::Body { generator_kind: self.generator_kind, params, value };
+ let id = body.id();
+ debug_assert_eq!(id.hir_id.owner, self.current_hir_id_owner);
+ self.bodies.push((id.hir_id.local_id, self.arena.alloc(body)));
+ id
+ }
+
+ pub(super) fn lower_body(
+ &mut self,
+ f: impl FnOnce(&mut Self) -> (&'hir [hir::Param<'hir>], hir::Expr<'hir>),
+ ) -> hir::BodyId {
+ let prev_gen_kind = self.generator_kind.take();
+ let task_context = self.task_context.take();
+ let (parameters, result) = f(self);
+ let body_id = self.record_body(parameters, result);
+ self.task_context = task_context;
+ self.generator_kind = prev_gen_kind;
+ body_id
+ }
+
+ fn lower_param(&mut self, param: &Param) -> hir::Param<'hir> {
+ let hir_id = self.lower_node_id(param.id);
+ self.lower_attrs(hir_id, &param.attrs);
+ hir::Param {
+ hir_id,
+ pat: self.lower_pat(&param.pat),
+ ty_span: self.lower_span(param.ty.span),
+ span: self.lower_span(param.span),
+ }
+ }
+
+ pub(super) fn lower_fn_body(
+ &mut self,
+ decl: &FnDecl,
+ body: impl FnOnce(&mut Self) -> hir::Expr<'hir>,
+ ) -> hir::BodyId {
+ self.lower_body(|this| {
+ (
+ this.arena.alloc_from_iter(decl.inputs.iter().map(|x| this.lower_param(x))),
+ body(this),
+ )
+ })
+ }
+
+ fn lower_fn_body_block(
+ &mut self,
+ span: Span,
+ decl: &FnDecl,
+ body: Option<&Block>,
+ ) -> hir::BodyId {
+ self.lower_fn_body(decl, |this| this.lower_block_expr_opt(span, body))
+ }
+
+ fn lower_block_expr_opt(&mut self, span: Span, block: Option<&Block>) -> hir::Expr<'hir> {
+ match block {
+ Some(block) => self.lower_block_expr(block),
+ None => self.expr_err(span),
+ }
+ }
+
+ pub(super) fn lower_const_body(&mut self, span: Span, expr: Option<&Expr>) -> hir::BodyId {
+ self.lower_body(|this| {
+ (
+ &[],
+ match expr {
+ Some(expr) => this.lower_expr_mut(expr),
+ None => this.expr_err(span),
+ },
+ )
+ })
+ }
+
+ fn lower_maybe_async_body(
+ &mut self,
+ span: Span,
+ decl: &FnDecl,
+ asyncness: Async,
+ body: Option<&Block>,
+ ) -> hir::BodyId {
+ let closure_id = match asyncness {
+ Async::Yes { closure_id, .. } => closure_id,
+ Async::No => return self.lower_fn_body_block(span, decl, body),
+ };
+
+ self.lower_body(|this| {
+ let mut parameters: Vec<hir::Param<'_>> = Vec::new();
+ let mut statements: Vec<hir::Stmt<'_>> = Vec::new();
+
+ // Async function parameters are lowered into the closure body so that they are
+ // captured and so that the drop order matches the equivalent non-async functions.
+ //
+ // from:
+ //
+ // async fn foo(<pattern>: <ty>, <pattern>: <ty>, <pattern>: <ty>) {
+ // <body>
+ // }
+ //
+ // into:
+ //
+ // fn foo(__arg0: <ty>, __arg1: <ty>, __arg2: <ty>) {
+ // async move {
+ // let __arg2 = __arg2;
+ // let <pattern> = __arg2;
+ // let __arg1 = __arg1;
+ // let <pattern> = __arg1;
+ // let __arg0 = __arg0;
+ // let <pattern> = __arg0;
+ // drop-temps { <body> } // see comments later in fn for details
+ // }
+ // }
+ //
+ // If `<pattern>` is a simple ident, then it is lowered to a single
+ // `let <pattern> = <pattern>;` statement as an optimization.
+ //
+ // Note that the body is embedded in `drop-temps`; an
+ // equivalent desugaring would be `return { <body>
+ // };`. The key point is that we wish to drop all the
+ // let-bound variables and temporaries created in the body
+ // (and its tail expression!) before we drop the
+ // parameters (c.f. rust-lang/rust#64512).
+ for (index, parameter) in decl.inputs.iter().enumerate() {
+ let parameter = this.lower_param(parameter);
+ let span = parameter.pat.span;
+
+ // Check if this is a binding pattern, if so, we can optimize and avoid adding a
+ // `let <pat> = __argN;` statement. In this case, we do not rename the parameter.
+ let (ident, is_simple_parameter) = match parameter.pat.kind {
+ hir::PatKind::Binding(
+ hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
+ _,
+ ident,
+ _,
+ ) => (ident, true),
+ // For `ref mut` or wildcard arguments, we can't reuse the binding, but
+ // we can keep the same name for the parameter.
+ // This lets rustdoc render it correctly in documentation.
+ hir::PatKind::Binding(_, _, ident, _) => (ident, false),
+ hir::PatKind::Wild => {
+ (Ident::with_dummy_span(rustc_span::symbol::kw::Underscore), false)
+ }
+ _ => {
+ // Replace the ident for bindings that aren't simple.
+ let name = format!("__arg{}", index);
+ let ident = Ident::from_str(&name);
+
+ (ident, false)
+ }
+ };
+
+ let desugared_span = this.mark_span_with_reason(DesugaringKind::Async, span, None);
+
+ // Construct a parameter representing `__argN: <ty>` to replace the parameter of the
+ // async function.
+ //
+ // If this is the simple case, this parameter will end up being the same as the
+ // original parameter, but with a different pattern id.
+ let stmt_attrs = this.attrs.get(&parameter.hir_id.local_id).copied();
+ let (new_parameter_pat, new_parameter_id) = this.pat_ident(desugared_span, ident);
+ let new_parameter = hir::Param {
+ hir_id: parameter.hir_id,
+ pat: new_parameter_pat,
+ ty_span: this.lower_span(parameter.ty_span),
+ span: this.lower_span(parameter.span),
+ };
+
+ if is_simple_parameter {
+ // If this is the simple case, then we only insert one statement that is
+ // `let <pat> = <pat>;`. We re-use the original argument's pattern so that
+ // `HirId`s are densely assigned.
+ let expr = this.expr_ident(desugared_span, ident, new_parameter_id);
+ let stmt = this.stmt_let_pat(
+ stmt_attrs,
+ desugared_span,
+ Some(expr),
+ parameter.pat,
+ hir::LocalSource::AsyncFn,
+ );
+ statements.push(stmt);
+ } else {
+ // If this is not the simple case, then we construct two statements:
+ //
+ // ```
+ // let __argN = __argN;
+ // let <pat> = __argN;
+ // ```
+ //
+ // The first statement moves the parameter into the closure and thus ensures
+ // that the drop order is correct.
+ //
+ // The second statement creates the bindings that the user wrote.
+
+ // Construct the `let mut __argN = __argN;` statement. It must be a mut binding
+ // because the user may have specified a `ref mut` binding in the next
+ // statement.
+ let (move_pat, move_id) = this.pat_ident_binding_mode(
+ desugared_span,
+ ident,
+ hir::BindingAnnotation::Mutable,
+ );
+ let move_expr = this.expr_ident(desugared_span, ident, new_parameter_id);
+ let move_stmt = this.stmt_let_pat(
+ None,
+ desugared_span,
+ Some(move_expr),
+ move_pat,
+ hir::LocalSource::AsyncFn,
+ );
+
+ // Construct the `let <pat> = __argN;` statement. We re-use the original
+ // parameter's pattern so that `HirId`s are densely assigned.
+ let pattern_expr = this.expr_ident(desugared_span, ident, move_id);
+ let pattern_stmt = this.stmt_let_pat(
+ stmt_attrs,
+ desugared_span,
+ Some(pattern_expr),
+ parameter.pat,
+ hir::LocalSource::AsyncFn,
+ );
+
+ statements.push(move_stmt);
+ statements.push(pattern_stmt);
+ };
+
+ parameters.push(new_parameter);
+ }
+
+ let body_span = body.map_or(span, |b| b.span);
+ let async_expr = this.make_async_expr(
+ CaptureBy::Value,
+ closure_id,
+ None,
+ body_span,
+ hir::AsyncGeneratorKind::Fn,
+ |this| {
+ // Create a block from the user's function body:
+ let user_body = this.lower_block_expr_opt(body_span, body);
+
+ // Transform into `drop-temps { <user-body> }`, an expression:
+ let desugared_span =
+ this.mark_span_with_reason(DesugaringKind::Async, user_body.span, None);
+ let user_body = this.expr_drop_temps(
+ desugared_span,
+ this.arena.alloc(user_body),
+ AttrVec::new(),
+ );
+
+ // As noted above, create the final block like
+ //
+ // ```
+ // {
+ // let $param_pattern = $raw_param;
+ // ...
+ // drop-temps { <user-body> }
+ // }
+ // ```
+ let body = this.block_all(
+ desugared_span,
+ this.arena.alloc_from_iter(statements),
+ Some(user_body),
+ );
+
+ this.expr_block(body, AttrVec::new())
+ },
+ );
+
+ (
+ this.arena.alloc_from_iter(parameters),
+ this.expr(body_span, async_expr, AttrVec::new()),
+ )
+ })
+ }
+
+ fn lower_method_sig(
+ &mut self,
+ generics: &Generics,
+ sig: &FnSig,
+ id: NodeId,
+ kind: FnDeclKind,
+ is_async: Option<NodeId>,
+ ) -> (&'hir hir::Generics<'hir>, hir::FnSig<'hir>) {
+ let header = self.lower_fn_header(sig.header);
+ let itctx = ImplTraitContext::Universal;
+ let (generics, decl) = self.lower_generics(generics, id, itctx, |this| {
+ this.lower_fn_decl(&sig.decl, Some(id), kind, is_async)
+ });
+ (generics, hir::FnSig { header, decl, span: self.lower_span(sig.span) })
+ }
+
+ fn lower_fn_header(&mut self, h: FnHeader) -> hir::FnHeader {
+ hir::FnHeader {
+ unsafety: self.lower_unsafety(h.unsafety),
+ asyncness: self.lower_asyncness(h.asyncness),
+ constness: self.lower_constness(h.constness),
+ abi: self.lower_extern(h.ext),
+ }
+ }
+
+ pub(super) fn lower_abi(&mut self, abi: StrLit) -> abi::Abi {
+ abi::lookup(abi.symbol_unescaped.as_str()).unwrap_or_else(|| {
+ self.error_on_invalid_abi(abi);
+ abi::Abi::Rust
+ })
+ }
+
+ pub(super) fn lower_extern(&mut self, ext: Extern) -> abi::Abi {
+ match ext {
+ Extern::None => abi::Abi::Rust,
+ Extern::Implicit(_) => abi::Abi::FALLBACK,
+ Extern::Explicit(abi, _) => self.lower_abi(abi),
+ }
+ }
+
+ fn error_on_invalid_abi(&self, abi: StrLit) {
+ struct_span_err!(self.tcx.sess, abi.span, E0703, "invalid ABI: found `{}`", abi.symbol)
+ .span_label(abi.span, "invalid ABI")
+ .help(&format!("valid ABIs: {}", abi::all_names().join(", ")))
+ .emit();
+ }
+
+ fn lower_asyncness(&mut self, a: Async) -> hir::IsAsync {
+ match a {
+ Async::Yes { .. } => hir::IsAsync::Async,
+ Async::No => hir::IsAsync::NotAsync,
+ }
+ }
+
+ fn lower_constness(&mut self, c: Const) -> hir::Constness {
+ match c {
+ Const::Yes(_) => hir::Constness::Const,
+ Const::No => hir::Constness::NotConst,
+ }
+ }
+
+ pub(super) fn lower_unsafety(&mut self, u: Unsafe) -> hir::Unsafety {
+ match u {
+ Unsafe::Yes(_) => hir::Unsafety::Unsafe,
+ Unsafe::No => hir::Unsafety::Normal,
+ }
+ }
+
+ /// Return the pair of the lowered `generics` as `hir::Generics` and the evaluation of `f` with
+ /// the carried impl trait definitions and bounds.
+ #[instrument(level = "debug", skip(self, f))]
+ fn lower_generics<T>(
+ &mut self,
+ generics: &Generics,
+ parent_node_id: NodeId,
+ itctx: ImplTraitContext,
+ f: impl FnOnce(&mut Self) -> T,
+ ) -> (&'hir hir::Generics<'hir>, T) {
+ debug_assert!(self.impl_trait_defs.is_empty());
+ debug_assert!(self.impl_trait_bounds.is_empty());
+
+ // Error if `?Trait` bounds in where clauses don't refer directly to type parameters.
+ // Note: we used to clone these bounds directly onto the type parameter (and avoid lowering
+ // these into hir when we lower thee where clauses), but this makes it quite difficult to
+ // keep track of the Span info. Now, `add_implicitly_sized` in `AstConv` checks both param bounds and
+ // where clauses for `?Sized`.
+ for pred in &generics.where_clause.predicates {
+ let WherePredicate::BoundPredicate(ref bound_pred) = *pred else {
+ continue;
+ };
+ let compute_is_param = || {
+ // Check if the where clause type is a plain type parameter.
+ match self
+ .resolver
+ .get_partial_res(bound_pred.bounded_ty.id)
+ .map(|d| (d.base_res(), d.unresolved_segments()))
+ {
+ Some((Res::Def(DefKind::TyParam, def_id), 0))
+ if bound_pred.bound_generic_params.is_empty() =>
+ {
+ generics
+ .params
+ .iter()
+ .any(|p| def_id == self.local_def_id(p.id).to_def_id())
+ }
+ // Either the `bounded_ty` is not a plain type parameter, or
+ // it's not found in the generic type parameters list.
+ _ => false,
+ }
+ };
+ // We only need to compute this once per `WherePredicate`, but don't
+ // need to compute this at all unless there is a Maybe bound.
+ let mut is_param: Option<bool> = None;
+ for bound in &bound_pred.bounds {
+ if !matches!(*bound, GenericBound::Trait(_, TraitBoundModifier::Maybe)) {
+ continue;
+ }
+ let is_param = *is_param.get_or_insert_with(compute_is_param);
+ if !is_param {
+ self.diagnostic().span_err(
+ bound.span(),
+ "`?Trait` bounds are only permitted at the \
+ point where a type parameter is declared",
+ );
+ }
+ }
+ }
+
+ let mut predicates: SmallVec<[hir::WherePredicate<'hir>; 4]> = SmallVec::new();
+ predicates.extend(generics.params.iter().filter_map(|param| {
+ self.lower_generic_bound_predicate(
+ param.ident,
+ param.id,
+ &param.kind,
+ &param.bounds,
+ itctx,
+ PredicateOrigin::GenericParam,
+ )
+ }));
+ predicates.extend(
+ generics
+ .where_clause
+ .predicates
+ .iter()
+ .map(|predicate| self.lower_where_predicate(predicate)),
+ );
+
+ let mut params: SmallVec<[hir::GenericParam<'hir>; 4]> =
+ self.lower_generic_params_mut(&generics.params).collect();
+
+ // Introduce extra lifetimes if late resolution tells us to.
+ let extra_lifetimes = self.resolver.take_extra_lifetime_params(parent_node_id);
+ params.extend(extra_lifetimes.into_iter().filter_map(|(ident, node_id, res)| {
+ self.lifetime_res_to_generic_param(ident, node_id, res)
+ }));
+
+ let has_where_clause_predicates = !generics.where_clause.predicates.is_empty();
+ let where_clause_span = self.lower_span(generics.where_clause.span);
+ let span = self.lower_span(generics.span);
+ let res = f(self);
+
+ let impl_trait_defs = std::mem::take(&mut self.impl_trait_defs);
+ params.extend(impl_trait_defs.into_iter());
+
+ let impl_trait_bounds = std::mem::take(&mut self.impl_trait_bounds);
+ predicates.extend(impl_trait_bounds.into_iter());
+
+ let lowered_generics = self.arena.alloc(hir::Generics {
+ params: self.arena.alloc_from_iter(params),
+ predicates: self.arena.alloc_from_iter(predicates),
+ has_where_clause_predicates,
+ where_clause_span,
+ span,
+ });
+
+ (lowered_generics, res)
+ }
+
+ pub(super) fn lower_generic_bound_predicate(
+ &mut self,
+ ident: Ident,
+ id: NodeId,
+ kind: &GenericParamKind,
+ bounds: &[GenericBound],
+ itctx: ImplTraitContext,
+ origin: PredicateOrigin,
+ ) -> Option<hir::WherePredicate<'hir>> {
+ // Do not create a clause if we do not have anything inside it.
+ if bounds.is_empty() {
+ return None;
+ }
+
+ let bounds = self.lower_param_bounds(bounds, itctx);
+
+ let ident = self.lower_ident(ident);
+ let param_span = ident.span;
+ let span = bounds
+ .iter()
+ .fold(Some(param_span.shrink_to_hi()), |span: Option<Span>, bound| {
+ let bound_span = bound.span();
+ // We include bounds that come from a `#[derive(_)]` but point at the user's code,
+ // as we use this method to get a span appropriate for suggestions.
+ if !bound_span.can_be_used_for_suggestions() {
+ None
+ } else if let Some(span) = span {
+ Some(span.to(bound_span))
+ } else {
+ Some(bound_span)
+ }
+ })
+ .unwrap_or(param_span.shrink_to_hi());
+ match kind {
+ GenericParamKind::Const { .. } => None,
+ GenericParamKind::Type { .. } => {
+ let def_id = self.local_def_id(id).to_def_id();
+ let ty_path = self.arena.alloc(hir::Path {
+ span: param_span,
+ res: Res::Def(DefKind::TyParam, def_id),
+ segments: self.arena.alloc_from_iter([hir::PathSegment::from_ident(ident)]),
+ });
+ let ty_id = self.next_id();
+ let bounded_ty =
+ self.ty_path(ty_id, param_span, hir::QPath::Resolved(None, ty_path));
+ Some(hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ bounded_ty: self.arena.alloc(bounded_ty),
+ bounds,
+ span,
+ bound_generic_params: &[],
+ origin,
+ }))
+ }
+ GenericParamKind::Lifetime => {
+ let ident_span = self.lower_span(ident.span);
+ let ident = self.lower_ident(ident);
+ let lt_id = self.next_node_id();
+ let lifetime = self.new_named_lifetime(id, lt_id, ident_span, ident);
+ Some(hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+ lifetime,
+ span,
+ bounds,
+ in_where_clause: false,
+ }))
+ }
+ }
+ }
+
+ fn lower_where_predicate(&mut self, pred: &WherePredicate) -> hir::WherePredicate<'hir> {
+ match *pred {
+ WherePredicate::BoundPredicate(WhereBoundPredicate {
+ ref bound_generic_params,
+ ref bounded_ty,
+ ref bounds,
+ span,
+ }) => hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ bound_generic_params: self.lower_generic_params(bound_generic_params),
+ bounded_ty: self
+ .lower_ty(bounded_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ bounds: self.arena.alloc_from_iter(bounds.iter().map(|bound| {
+ self.lower_param_bound(
+ bound,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ )
+ })),
+ span: self.lower_span(span),
+ origin: PredicateOrigin::WhereClause,
+ }),
+ WherePredicate::RegionPredicate(WhereRegionPredicate {
+ ref lifetime,
+ ref bounds,
+ span,
+ }) => hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+ span: self.lower_span(span),
+ lifetime: self.lower_lifetime(lifetime),
+ bounds: self.lower_param_bounds(
+ bounds,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Bound),
+ ),
+ in_where_clause: true,
+ }),
+ WherePredicate::EqPredicate(WhereEqPredicate { id, ref lhs_ty, ref rhs_ty, span }) => {
+ hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
+ hir_id: self.lower_node_id(id),
+ lhs_ty: self
+ .lower_ty(lhs_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ rhs_ty: self
+ .lower_ty(rhs_ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type)),
+ span: self.lower_span(span),
+ })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs
new file mode 100644
index 000000000..224dc3c23
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/lib.rs
@@ -0,0 +1,2501 @@
+//! Lowers the AST to the HIR.
+//!
+//! Since the AST and HIR are fairly similar, this is mostly a simple procedure,
+//! much like a fold. Where lowering involves a bit more work things get more
+//! interesting and there are some invariants you should know about. These mostly
+//! concern spans and IDs.
+//!
+//! Spans are assigned to AST nodes during parsing and then are modified during
+//! expansion to indicate the origin of a node and the process it went through
+//! being expanded. IDs are assigned to AST nodes just before lowering.
+//!
+//! For the simpler lowering steps, IDs and spans should be preserved. Unlike
+//! expansion we do not preserve the process of lowering in the spans, so spans
+//! should not be modified here. When creating a new node (as opposed to
+//! "folding" an existing one), create a new ID using `next_id()`.
+//!
+//! You must ensure that IDs are unique. That means that you should only use the
+//! ID from an AST node in a single HIR node (you can assume that AST node-IDs
+//! are unique). Every new node must have a unique ID. Avoid cloning HIR nodes.
+//! If you do, you must then set the new node's ID to a fresh one.
+//!
+//! Spans are used for error messages and for tools to map semantics back to
+//! source code. It is therefore not as important with spans as IDs to be strict
+//! about use (you can't break the compiler by screwing up a span). Obviously, a
+//! HIR node can only have a single span. But multiple nodes can have the same
+//! span and spans don't need to be kept in order, etc. Where code is preserved
+//! by lowering, it should have the same span as in the AST. Where HIR nodes are
+//! new it is probably best to give a span for the whole AST node being lowered.
+//! All nodes should have real spans; don't use dummy spans. Tools are likely to
+//! get confused if the spans from leaf AST nodes occur in multiple places
+//! in the HIR, especially for multiple identifiers.
+
+#![feature(box_patterns)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(never_type)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+
+use rustc_ast::visit;
+use rustc_ast::{self as ast, *};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{struct_span_err, Applicability, Handler};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, LifetimeRes, Namespace, PartialRes, PerNS, Res};
+use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::definitions::DefPathData;
+use rustc_hir::{ConstArg, GenericArg, ItemLocalId, ParamName, TraitCandidate};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::span_bug;
+use rustc_middle::ty::{ResolverAstLowering, TyCtxt};
+use rustc_session::parse::feature_err;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::source_map::DesugaringKind;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+use smallvec::SmallVec;
+use std::collections::hash_map::Entry;
+
+macro_rules! arena_vec {
+ ($this:expr; $($x:expr),*) => (
+ $this.arena.alloc_from_iter([$($x),*])
+ );
+}
+
+mod asm;
+mod block;
+mod expr;
+mod index;
+mod item;
+mod lifetime_collector;
+mod pat;
+mod path;
+
+struct LoweringContext<'a, 'hir> {
+ tcx: TyCtxt<'hir>,
+ resolver: &'a mut ResolverAstLowering,
+
+ /// Used to allocate HIR nodes.
+ arena: &'hir hir::Arena<'hir>,
+
+ /// Bodies inside the owner being lowered.
+ bodies: Vec<(hir::ItemLocalId, &'hir hir::Body<'hir>)>,
+ /// Attributes inside the owner being lowered.
+ attrs: SortedMap<hir::ItemLocalId, &'hir [Attribute]>,
+ /// Collect items that were created by lowering the current owner.
+ children: FxHashMap<LocalDefId, hir::MaybeOwner<&'hir hir::OwnerInfo<'hir>>>,
+
+ generator_kind: Option<hir::GeneratorKind>,
+
+ /// When inside an `async` context, this is the `HirId` of the
+ /// `task_context` local bound to the resume argument of the generator.
+ task_context: Option<hir::HirId>,
+
+ /// Used to get the current `fn`'s def span to point to when using `await`
+ /// outside of an `async fn`.
+ current_item: Option<Span>,
+
+ catch_scope: Option<NodeId>,
+ loop_scope: Option<NodeId>,
+ is_in_loop_condition: bool,
+ is_in_trait_impl: bool,
+ is_in_dyn_type: bool,
+
+ current_hir_id_owner: LocalDefId,
+ item_local_id_counter: hir::ItemLocalId,
+ local_id_to_def_id: SortedMap<ItemLocalId, LocalDefId>,
+ trait_map: FxHashMap<ItemLocalId, Box<[TraitCandidate]>>,
+
+ impl_trait_defs: Vec<hir::GenericParam<'hir>>,
+ impl_trait_bounds: Vec<hir::WherePredicate<'hir>>,
+
+ /// NodeIds that are lowered inside the current HIR owner.
+ node_id_to_local_id: FxHashMap<NodeId, hir::ItemLocalId>,
+
+ allow_try_trait: Option<Lrc<[Symbol]>>,
+ allow_gen_future: Option<Lrc<[Symbol]>>,
+ allow_into_future: Option<Lrc<[Symbol]>>,
+}
+
+trait ResolverAstLoweringExt {
+ fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>>;
+ fn get_partial_res(&self, id: NodeId) -> Option<PartialRes>;
+ fn get_import_res(&self, id: NodeId) -> PerNS<Option<Res<NodeId>>>;
+ fn get_label_res(&self, id: NodeId) -> Option<NodeId>;
+ fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes>;
+ fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)>;
+ fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind;
+ /// Record the map from `from` local def id to `to` local def id, on `generics_def_id_map`
+ /// field.
+ fn record_def_id_remap(&mut self, from: LocalDefId, to: LocalDefId);
+ /// Get the previously recorded `to` local def id given the `from` local def id, obtained using
+ /// `generics_def_id_map` field.
+ fn get_remapped_def_id(&self, local_def_id: LocalDefId) -> LocalDefId;
+}
+
+impl ResolverAstLoweringExt for ResolverAstLowering {
+ fn legacy_const_generic_args(&self, expr: &Expr) -> Option<Vec<usize>> {
+ if let ExprKind::Path(None, path) = &expr.kind {
+ // Don't perform legacy const generics rewriting if the path already
+ // has generic arguments.
+ if path.segments.last().unwrap().args.is_some() {
+ return None;
+ }
+
+ let partial_res = self.partial_res_map.get(&expr.id)?;
+ if partial_res.unresolved_segments() != 0 {
+ return None;
+ }
+
+ if let Res::Def(DefKind::Fn, def_id) = partial_res.base_res() {
+ // We only support cross-crate argument rewriting. Uses
+ // within the same crate should be updated to use the new
+ // const generics style.
+ if def_id.is_local() {
+ return None;
+ }
+
+ if let Some(v) = self.legacy_const_generic_args.get(&def_id) {
+ return v.clone();
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Obtains resolution for a `NodeId` with a single resolution.
+ fn get_partial_res(&self, id: NodeId) -> Option<PartialRes> {
+ self.partial_res_map.get(&id).copied()
+ }
+
+ /// Obtains per-namespace resolutions for `use` statement with the given `NodeId`.
+ fn get_import_res(&self, id: NodeId) -> PerNS<Option<Res<NodeId>>> {
+ self.import_res_map.get(&id).copied().unwrap_or_default()
+ }
+
+ /// Obtains resolution for a label with the given `NodeId`.
+ fn get_label_res(&self, id: NodeId) -> Option<NodeId> {
+ self.label_res_map.get(&id).copied()
+ }
+
+ /// Obtains resolution for a lifetime with the given `NodeId`.
+ fn get_lifetime_res(&self, id: NodeId) -> Option<LifetimeRes> {
+ self.lifetimes_res_map.get(&id).copied()
+ }
+
+ /// Obtain the list of lifetimes parameters to add to an item.
+ ///
+ /// Extra lifetime parameters should only be added in places that can appear
+ /// as a `binder` in `LifetimeRes`.
+ ///
+ /// The extra lifetimes that appear from the parenthesized `Fn`-trait desugaring
+ /// should appear at the enclosing `PolyTraitRef`.
+ fn take_extra_lifetime_params(&mut self, id: NodeId) -> Vec<(Ident, NodeId, LifetimeRes)> {
+ self.extra_lifetime_params_map.remove(&id).unwrap_or_default()
+ }
+
+ fn decl_macro_kind(&self, def_id: LocalDefId) -> MacroKind {
+ self.builtin_macro_kinds.get(&def_id).copied().unwrap_or(MacroKind::Bang)
+ }
+
+ /// Push a remapping into the top-most map.
+ /// Panics if no map has been pushed.
+ /// Remapping is used when creating lowering `-> impl Trait` return
+ /// types to create the resulting opaque type.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn record_def_id_remap(&mut self, from: LocalDefId, to: LocalDefId) {
+ self.generics_def_id_map.last_mut().expect("no map pushed").insert(from, to);
+ }
+
+ fn get_remapped_def_id(&self, mut local_def_id: LocalDefId) -> LocalDefId {
+ // `generics_def_id_map` is a stack of mappings. As we go deeper in impl traits nesting we
+ // push new mappings so we need to try first the latest mappings, hence `iter().rev()`.
+ //
+ // Consider:
+ //
+ // `fn test<'a, 'b>() -> impl Trait<&'a u8, Ty = impl Sized + 'b> {}`
+ //
+ // We would end with a generics_def_id_map like:
+ //
+ // `[[fn#'b -> impl_trait#'b], [fn#'b -> impl_sized#'b]]`
+ //
+ // for the opaque type generated on `impl Sized + 'b`, We want the result to be:
+ // impl_sized#'b, so iterating forward is the wrong thing to do.
+ for map in self.generics_def_id_map.iter().rev() {
+ if let Some(r) = map.get(&local_def_id) {
+ debug!("def_id_remapper: remapping from `{local_def_id:?}` to `{r:?}`");
+ local_def_id = *r;
+ } else {
+ debug!("def_id_remapper: no remapping for `{local_def_id:?}` found in map");
+ }
+ }
+
+ local_def_id
+ }
+}
+
+/// Context of `impl Trait` in code, which determines whether it is allowed in an HIR subtree,
+/// and if so, what meaning it has.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum ImplTraitContext {
+ /// Treat `impl Trait` as shorthand for a new universal generic parameter.
+ /// Example: `fn foo(x: impl Debug)`, where `impl Debug` is conceptually
+ /// equivalent to a fresh universal parameter like `fn foo<T: Debug>(x: T)`.
+ ///
+ /// Newly generated parameters should be inserted into the given `Vec`.
+ Universal,
+
+ /// Treat `impl Trait` as shorthand for a new opaque type.
+ /// Example: `fn foo() -> impl Debug`, where `impl Debug` is conceptually
+ /// equivalent to a new opaque type like `type T = impl Debug; fn foo() -> T`.
+ ///
+ ReturnPositionOpaqueTy {
+ /// Origin: Either OpaqueTyOrigin::FnReturn or OpaqueTyOrigin::AsyncFn,
+ origin: hir::OpaqueTyOrigin,
+ },
+ /// Impl trait in type aliases.
+ TypeAliasesOpaqueTy,
+ /// `impl Trait` is not accepted in this position.
+ Disallowed(ImplTraitPosition),
+}
+
+/// Position in which `impl Trait` is disallowed.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum ImplTraitPosition {
+ Path,
+ Variable,
+ Type,
+ Trait,
+ AsyncBlock,
+ Bound,
+ Generic,
+ ExternFnParam,
+ ClosureParam,
+ PointerParam,
+ FnTraitParam,
+ TraitParam,
+ ImplParam,
+ ExternFnReturn,
+ ClosureReturn,
+ PointerReturn,
+ FnTraitReturn,
+ TraitReturn,
+ ImplReturn,
+}
+
+impl std::fmt::Display for ImplTraitPosition {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let name = match self {
+ ImplTraitPosition::Path => "path",
+ ImplTraitPosition::Variable => "variable binding",
+ ImplTraitPosition::Type => "type",
+ ImplTraitPosition::Trait => "trait",
+ ImplTraitPosition::AsyncBlock => "async block",
+ ImplTraitPosition::Bound => "bound",
+ ImplTraitPosition::Generic => "generic",
+ ImplTraitPosition::ExternFnParam => "`extern fn` param",
+ ImplTraitPosition::ClosureParam => "closure param",
+ ImplTraitPosition::PointerParam => "`fn` pointer param",
+ ImplTraitPosition::FnTraitParam => "`Fn` trait param",
+ ImplTraitPosition::TraitParam => "trait method param",
+ ImplTraitPosition::ImplParam => "`impl` method param",
+ ImplTraitPosition::ExternFnReturn => "`extern fn` return",
+ ImplTraitPosition::ClosureReturn => "closure return",
+ ImplTraitPosition::PointerReturn => "`fn` pointer return",
+ ImplTraitPosition::FnTraitReturn => "`Fn` trait return",
+ ImplTraitPosition::TraitReturn => "trait method return",
+ ImplTraitPosition::ImplReturn => "`impl` method return",
+ };
+
+ write!(f, "{}", name)
+ }
+}
+
+#[derive(Debug)]
+enum FnDeclKind {
+ Fn,
+ Inherent,
+ ExternFn,
+ Closure,
+ Pointer,
+ Trait,
+ Impl,
+}
+
+impl FnDeclKind {
+ fn impl_trait_return_allowed(&self) -> bool {
+ match self {
+ FnDeclKind::Fn | FnDeclKind::Inherent => true,
+ _ => false,
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+enum AstOwner<'a> {
+ NonOwner,
+ Crate(&'a ast::Crate),
+ Item(&'a ast::Item),
+ AssocItem(&'a ast::AssocItem, visit::AssocCtxt),
+ ForeignItem(&'a ast::ForeignItem),
+}
+
+fn index_crate<'a>(
+ node_id_to_def_id: &FxHashMap<NodeId, LocalDefId>,
+ krate: &'a Crate,
+) -> IndexVec<LocalDefId, AstOwner<'a>> {
+ let mut indexer = Indexer { node_id_to_def_id, index: IndexVec::new() };
+ indexer.index.ensure_contains_elem(CRATE_DEF_ID, || AstOwner::NonOwner);
+ indexer.index[CRATE_DEF_ID] = AstOwner::Crate(krate);
+ visit::walk_crate(&mut indexer, krate);
+ return indexer.index;
+
+ struct Indexer<'s, 'a> {
+ node_id_to_def_id: &'s FxHashMap<NodeId, LocalDefId>,
+ index: IndexVec<LocalDefId, AstOwner<'a>>,
+ }
+
+ impl<'a> visit::Visitor<'a> for Indexer<'_, 'a> {
+ fn visit_attribute(&mut self, _: &'a Attribute) {
+ // We do not want to lower expressions that appear in attributes,
+ // as they are not accessible to the rest of the HIR.
+ }
+
+ fn visit_item(&mut self, item: &'a ast::Item) {
+ let def_id = self.node_id_to_def_id[&item.id];
+ self.index.ensure_contains_elem(def_id, || AstOwner::NonOwner);
+ self.index[def_id] = AstOwner::Item(item);
+ visit::walk_item(self, item)
+ }
+
+ fn visit_assoc_item(&mut self, item: &'a ast::AssocItem, ctxt: visit::AssocCtxt) {
+ let def_id = self.node_id_to_def_id[&item.id];
+ self.index.ensure_contains_elem(def_id, || AstOwner::NonOwner);
+ self.index[def_id] = AstOwner::AssocItem(item, ctxt);
+ visit::walk_assoc_item(self, item, ctxt);
+ }
+
+ fn visit_foreign_item(&mut self, item: &'a ast::ForeignItem) {
+ let def_id = self.node_id_to_def_id[&item.id];
+ self.index.ensure_contains_elem(def_id, || AstOwner::NonOwner);
+ self.index[def_id] = AstOwner::ForeignItem(item);
+ visit::walk_foreign_item(self, item);
+ }
+ }
+}
+
+/// Compute the hash for the HIR of the full crate.
+/// This hash will then be part of the crate_hash which is stored in the metadata.
+fn compute_hir_hash(
+ tcx: TyCtxt<'_>,
+ owners: &IndexVec<LocalDefId, hir::MaybeOwner<&hir::OwnerInfo<'_>>>,
+) -> Fingerprint {
+ let mut hir_body_nodes: Vec<_> = owners
+ .iter_enumerated()
+ .filter_map(|(def_id, info)| {
+ let info = info.as_owner()?;
+ let def_path_hash = tcx.hir().def_path_hash(def_id);
+ Some((def_path_hash, info))
+ })
+ .collect();
+ hir_body_nodes.sort_unstable_by_key(|bn| bn.0);
+
+ tcx.with_stable_hashing_context(|mut hcx| {
+ let mut stable_hasher = StableHasher::new();
+ hir_body_nodes.hash_stable(&mut hcx, &mut stable_hasher);
+ stable_hasher.finish()
+ })
+}
+
+pub fn lower_to_hir<'hir>(tcx: TyCtxt<'hir>, (): ()) -> hir::Crate<'hir> {
+ let sess = tcx.sess;
+ let krate = tcx.untracked_crate.steal();
+ let mut resolver = tcx.resolver_for_lowering(()).steal();
+
+ let ast_index = index_crate(&resolver.node_id_to_def_id, &krate);
+ let mut owners = IndexVec::from_fn_n(
+ |_| hir::MaybeOwner::Phantom,
+ tcx.definitions_untracked().def_index_count(),
+ );
+
+ for def_id in ast_index.indices() {
+ item::ItemLowerer {
+ tcx,
+ resolver: &mut resolver,
+ ast_index: &ast_index,
+ owners: &mut owners,
+ }
+ .lower_node(def_id);
+ }
+
+ // Drop AST to free memory
+ std::mem::drop(ast_index);
+ sess.time("drop_ast", || std::mem::drop(krate));
+
+ // Discard hygiene data, which isn't required after lowering to HIR.
+ if !sess.opts.unstable_opts.keep_hygiene_data {
+ rustc_span::hygiene::clear_syntax_context_map();
+ }
+
+ let hir_hash = compute_hir_hash(tcx, &owners);
+ hir::Crate { owners, hir_hash }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+enum ParamMode {
+ /// Any path in a type context.
+ Explicit,
+ /// Path in a type definition, where the anonymous lifetime `'_` is not allowed.
+ ExplicitNamed,
+ /// The `module::Type` in `module::Type::method` in an expression.
+ Optional,
+}
+
+enum ParenthesizedGenericArgs {
+ Ok,
+ Err,
+}
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ fn create_def(
+ &mut self,
+ parent: LocalDefId,
+ node_id: ast::NodeId,
+ data: DefPathData,
+ ) -> LocalDefId {
+ debug_assert_ne!(node_id, ast::DUMMY_NODE_ID);
+ assert!(
+ self.opt_local_def_id(node_id).is_none(),
+ "adding a def'n for node-id {:?} and data {:?} but a previous def'n exists: {:?}",
+ node_id,
+ data,
+ self.tcx.hir().def_key(self.local_def_id(node_id)),
+ );
+
+ let def_id = self.tcx.create_def(parent, data);
+
+ debug!("create_def: def_id_to_node_id[{:?}] <-> {:?}", def_id, node_id);
+ self.resolver.node_id_to_def_id.insert(node_id, def_id);
+
+ def_id
+ }
+
+ fn next_node_id(&mut self) -> NodeId {
+ let start = self.resolver.next_node_id;
+ let next = start.as_u32().checked_add(1).expect("input too large; ran out of NodeIds");
+ self.resolver.next_node_id = ast::NodeId::from_u32(next);
+ start
+ }
+
+ /// Given the id of some node in the AST, finds the `LocalDefId` associated with it by the name
+ /// resolver (if any), after applying any remapping from `get_remapped_def_id`.
+ ///
+ /// For example, in a function like `fn foo<'a>(x: &'a u32)`,
+ /// invoking with the id from the `ast::Lifetime` node found inside
+ /// the `&'a u32` type would return the `LocalDefId` of the
+ /// `'a` parameter declared on `foo`.
+ ///
+ /// This function also applies remapping from `get_remapped_def_id`.
+ /// These are used when synthesizing opaque types from `-> impl Trait` return types and so forth.
+ /// For example, in a function like `fn foo<'a>() -> impl Debug + 'a`,
+ /// we would create an opaque type `type FooReturn<'a1> = impl Debug + 'a1`.
+ /// When lowering the `Debug + 'a` bounds, we add a remapping to map `'a` to `'a1`.
+ fn opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
+ self.resolver
+ .node_id_to_def_id
+ .get(&node)
+ .map(|local_def_id| self.resolver.get_remapped_def_id(*local_def_id))
+ }
+
+ fn local_def_id(&self, node: NodeId) -> LocalDefId {
+ self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{:?}`", node))
+ }
+
+ /// Freshen the `LoweringContext` and ready it to lower a nested item.
+ /// The lowered item is registered into `self.children`.
+ ///
+ /// This function sets up `HirId` lowering infrastructure,
+ /// and stashes the shared mutable state to avoid pollution by the closure.
+ #[instrument(level = "debug", skip(self, f))]
+ fn with_hir_id_owner(
+ &mut self,
+ owner: NodeId,
+ f: impl FnOnce(&mut Self) -> hir::OwnerNode<'hir>,
+ ) {
+ let def_id = self.local_def_id(owner);
+
+ let current_attrs = std::mem::take(&mut self.attrs);
+ let current_bodies = std::mem::take(&mut self.bodies);
+ let current_node_ids = std::mem::take(&mut self.node_id_to_local_id);
+ let current_id_to_def_id = std::mem::take(&mut self.local_id_to_def_id);
+ let current_trait_map = std::mem::take(&mut self.trait_map);
+ let current_owner = std::mem::replace(&mut self.current_hir_id_owner, def_id);
+ let current_local_counter =
+ std::mem::replace(&mut self.item_local_id_counter, hir::ItemLocalId::new(1));
+ let current_impl_trait_defs = std::mem::take(&mut self.impl_trait_defs);
+ let current_impl_trait_bounds = std::mem::take(&mut self.impl_trait_bounds);
+
+ // Do not reset `next_node_id` and `node_id_to_def_id`:
+ // we want `f` to be able to refer to the `LocalDefId`s that the caller created.
+ // and the caller to refer to some of the subdefinitions' nodes' `LocalDefId`s.
+
+ // Always allocate the first `HirId` for the owner itself.
+ let _old = self.node_id_to_local_id.insert(owner, hir::ItemLocalId::new(0));
+ debug_assert_eq!(_old, None);
+
+ let item = f(self);
+ debug_assert_eq!(def_id, item.def_id());
+ // `f` should have consumed all the elements in these vectors when constructing `item`.
+ debug_assert!(self.impl_trait_defs.is_empty());
+ debug_assert!(self.impl_trait_bounds.is_empty());
+ let info = self.make_owner_info(item);
+
+ self.attrs = current_attrs;
+ self.bodies = current_bodies;
+ self.node_id_to_local_id = current_node_ids;
+ self.local_id_to_def_id = current_id_to_def_id;
+ self.trait_map = current_trait_map;
+ self.current_hir_id_owner = current_owner;
+ self.item_local_id_counter = current_local_counter;
+ self.impl_trait_defs = current_impl_trait_defs;
+ self.impl_trait_bounds = current_impl_trait_bounds;
+
+ let _old = self.children.insert(def_id, hir::MaybeOwner::Owner(info));
+ debug_assert!(_old.is_none())
+ }
+
+ /// Installs the remapping `remap` in scope while `f` is being executed.
+ /// This causes references to the `LocalDefId` keys to be changed to
+ /// refer to the values instead.
+ ///
+ /// The remapping is used when one piece of AST expands to multiple
+ /// pieces of HIR. For example, the function `fn foo<'a>(...) -> impl Debug + 'a`,
+ /// expands to both a function definition (`foo`) and a TAIT for the return value,
+ /// both of which have a lifetime parameter `'a`. The remapping allows us to
+ /// rewrite the `'a` in the return value to refer to the
+ /// `'a` declared on the TAIT, instead of the function.
+ fn with_remapping<R>(
+ &mut self,
+ remap: FxHashMap<LocalDefId, LocalDefId>,
+ f: impl FnOnce(&mut Self) -> R,
+ ) -> R {
+ self.resolver.generics_def_id_map.push(remap);
+ let res = f(self);
+ self.resolver.generics_def_id_map.pop();
+ res
+ }
+
+ fn make_owner_info(&mut self, node: hir::OwnerNode<'hir>) -> &'hir hir::OwnerInfo<'hir> {
+ let attrs = std::mem::take(&mut self.attrs);
+ let mut bodies = std::mem::take(&mut self.bodies);
+ let local_id_to_def_id = std::mem::take(&mut self.local_id_to_def_id);
+ let trait_map = std::mem::take(&mut self.trait_map);
+
+ #[cfg(debug_assertions)]
+ for (id, attrs) in attrs.iter() {
+ // Verify that we do not store empty slices in the map.
+ if attrs.is_empty() {
+ panic!("Stored empty attributes for {:?}", id);
+ }
+ }
+
+ bodies.sort_by_key(|(k, _)| *k);
+ let bodies = SortedMap::from_presorted_elements(bodies);
+ let (hash_including_bodies, hash_without_bodies) = self.hash_owner(node, &bodies);
+ let (nodes, parenting) =
+ index::index_hir(self.tcx.sess, &*self.tcx.definitions_untracked(), node, &bodies);
+ let nodes = hir::OwnerNodes {
+ hash_including_bodies,
+ hash_without_bodies,
+ nodes,
+ bodies,
+ local_id_to_def_id,
+ };
+ let attrs = {
+ let hash = self.tcx.with_stable_hashing_context(|mut hcx| {
+ let mut stable_hasher = StableHasher::new();
+ attrs.hash_stable(&mut hcx, &mut stable_hasher);
+ stable_hasher.finish()
+ });
+ hir::AttributeMap { map: attrs, hash }
+ };
+
+ self.arena.alloc(hir::OwnerInfo { nodes, parenting, attrs, trait_map })
+ }
+
+ /// Hash the HIR node twice, one deep and one shallow hash. This allows to differentiate
+ /// queries which depend on the full HIR tree and those which only depend on the item signature.
+ fn hash_owner(
+ &mut self,
+ node: hir::OwnerNode<'hir>,
+ bodies: &SortedMap<hir::ItemLocalId, &'hir hir::Body<'hir>>,
+ ) -> (Fingerprint, Fingerprint) {
+ self.tcx.with_stable_hashing_context(|mut hcx| {
+ let mut stable_hasher = StableHasher::new();
+ hcx.with_hir_bodies(true, node.def_id(), bodies, |hcx| {
+ node.hash_stable(hcx, &mut stable_hasher)
+ });
+ let hash_including_bodies = stable_hasher.finish();
+ let mut stable_hasher = StableHasher::new();
+ hcx.with_hir_bodies(false, node.def_id(), bodies, |hcx| {
+ node.hash_stable(hcx, &mut stable_hasher)
+ });
+ let hash_without_bodies = stable_hasher.finish();
+ (hash_including_bodies, hash_without_bodies)
+ })
+ }
+
+ /// This method allocates a new `HirId` for the given `NodeId` and stores it in
+ /// the `LoweringContext`'s `NodeId => HirId` map.
+ /// Take care not to call this method if the resulting `HirId` is then not
+ /// actually used in the HIR, as that would trigger an assertion in the
+ /// `HirIdValidator` later on, which makes sure that all `NodeId`s got mapped
+ /// properly. Calling the method twice with the same `NodeId` is fine though.
+ fn lower_node_id(&mut self, ast_node_id: NodeId) -> hir::HirId {
+ assert_ne!(ast_node_id, DUMMY_NODE_ID);
+
+ match self.node_id_to_local_id.entry(ast_node_id) {
+ Entry::Occupied(o) => {
+ hir::HirId { owner: self.current_hir_id_owner, local_id: *o.get() }
+ }
+ Entry::Vacant(v) => {
+ // Generate a new `HirId`.
+ let owner = self.current_hir_id_owner;
+ let local_id = self.item_local_id_counter;
+ let hir_id = hir::HirId { owner, local_id };
+
+ v.insert(local_id);
+ self.item_local_id_counter.increment_by(1);
+
+ assert_ne!(local_id, hir::ItemLocalId::new(0));
+ if let Some(def_id) = self.opt_local_def_id(ast_node_id) {
+ // Do not override a `MaybeOwner::Owner` that may already here.
+ self.children.entry(def_id).or_insert(hir::MaybeOwner::NonOwner(hir_id));
+ self.local_id_to_def_id.insert(local_id, def_id);
+ }
+
+ if let Some(traits) = self.resolver.trait_map.remove(&ast_node_id) {
+ self.trait_map.insert(hir_id.local_id, traits.into_boxed_slice());
+ }
+
+ hir_id
+ }
+ }
+ }
+
+ /// Generate a new `HirId` without a backing `NodeId`.
+ fn next_id(&mut self) -> hir::HirId {
+ let owner = self.current_hir_id_owner;
+ let local_id = self.item_local_id_counter;
+ assert_ne!(local_id, hir::ItemLocalId::new(0));
+ self.item_local_id_counter.increment_by(1);
+ hir::HirId { owner, local_id }
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn lower_res(&mut self, res: Res<NodeId>) -> Res {
+ let res: Result<Res, ()> = res.apply_id(|id| {
+ let owner = self.current_hir_id_owner;
+ let local_id = self.node_id_to_local_id.get(&id).copied().ok_or(())?;
+ Ok(hir::HirId { owner, local_id })
+ });
+ trace!(?res);
+
+ // We may fail to find a HirId when the Res points to a Local from an enclosing HIR owner.
+ // This can happen when trying to lower the return type `x` in erroneous code like
+ // async fn foo(x: u8) -> x {}
+ // In that case, `x` is lowered as a function parameter, and the return type is lowered as
+ // an opaque type as a synthesized HIR owner.
+ res.unwrap_or(Res::Err)
+ }
+
+ fn expect_full_res(&mut self, id: NodeId) -> Res<NodeId> {
+ self.resolver.get_partial_res(id).map_or(Res::Err, |pr| {
+ if pr.unresolved_segments() != 0 {
+ panic!("path not fully resolved: {:?}", pr);
+ }
+ pr.base_res()
+ })
+ }
+
+ fn expect_full_res_from_use(&mut self, id: NodeId) -> impl Iterator<Item = Res<NodeId>> {
+ self.resolver.get_import_res(id).present_items()
+ }
+
+ fn diagnostic(&self) -> &Handler {
+ self.tcx.sess.diagnostic()
+ }
+
+ /// Reuses the span but adds information like the kind of the desugaring and features that are
+ /// allowed inside this span.
+ fn mark_span_with_reason(
+ &self,
+ reason: DesugaringKind,
+ span: Span,
+ allow_internal_unstable: Option<Lrc<[Symbol]>>,
+ ) -> Span {
+ self.tcx.with_stable_hashing_context(|hcx| {
+ span.mark_with_reason(allow_internal_unstable, reason, self.tcx.sess.edition(), hcx)
+ })
+ }
+
+ /// Intercept all spans entering HIR.
+ /// Mark a span as relative to the current owning item.
+ fn lower_span(&self, span: Span) -> Span {
+ if self.tcx.sess.opts.unstable_opts.incremental_relative_spans {
+ span.with_parent(Some(self.current_hir_id_owner))
+ } else {
+ // Do not make spans relative when not using incremental compilation.
+ span
+ }
+ }
+
+ fn lower_ident(&self, ident: Ident) -> Ident {
+ Ident::new(ident.name, self.lower_span(ident.span))
+ }
+
+ /// Converts a lifetime into a new generic parameter.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lifetime_res_to_generic_param(
+ &mut self,
+ ident: Ident,
+ node_id: NodeId,
+ res: LifetimeRes,
+ ) -> Option<hir::GenericParam<'hir>> {
+ let (name, kind) = match res {
+ LifetimeRes::Param { .. } => {
+ (hir::ParamName::Plain(ident), hir::LifetimeParamKind::Explicit)
+ }
+ LifetimeRes::Fresh { param, .. } => {
+ // Late resolution delegates to us the creation of the `LocalDefId`.
+ let _def_id = self.create_def(
+ self.current_hir_id_owner,
+ param,
+ DefPathData::LifetimeNs(kw::UnderscoreLifetime),
+ );
+ debug!(?_def_id);
+
+ (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
+ }
+ LifetimeRes::Static | LifetimeRes::Error => return None,
+ res => panic!(
+ "Unexpected lifetime resolution {:?} for {:?} at {:?}",
+ res, ident, ident.span
+ ),
+ };
+ let hir_id = self.lower_node_id(node_id);
+ Some(hir::GenericParam {
+ hir_id,
+ name,
+ span: self.lower_span(ident.span),
+ pure_wrt_drop: false,
+ kind: hir::GenericParamKind::Lifetime { kind },
+ colon_span: None,
+ })
+ }
+
+ /// Lowers a lifetime binder that defines `generic_params`, returning the corresponding HIR
+ /// nodes. The returned list includes any "extra" lifetime parameters that were added by the
+ /// name resolver owing to lifetime elision; this also populates the resolver's node-id->def-id
+ /// map, so that later calls to `opt_node_id_to_def_id` that refer to these extra lifetime
+ /// parameters will be successful.
+ #[tracing::instrument(level = "debug", skip(self))]
+ #[inline]
+ fn lower_lifetime_binder(
+ &mut self,
+ binder: NodeId,
+ generic_params: &[GenericParam],
+ ) -> &'hir [hir::GenericParam<'hir>] {
+ let mut generic_params: Vec<_> = self.lower_generic_params_mut(generic_params).collect();
+ let extra_lifetimes = self.resolver.take_extra_lifetime_params(binder);
+ debug!(?extra_lifetimes);
+ generic_params.extend(extra_lifetimes.into_iter().filter_map(|(ident, node_id, res)| {
+ self.lifetime_res_to_generic_param(ident, node_id, res)
+ }));
+ let generic_params = self.arena.alloc_from_iter(generic_params);
+ debug!(?generic_params);
+
+ generic_params
+ }
+
+ fn with_dyn_type_scope<T>(&mut self, in_scope: bool, f: impl FnOnce(&mut Self) -> T) -> T {
+ let was_in_dyn_type = self.is_in_dyn_type;
+ self.is_in_dyn_type = in_scope;
+
+ let result = f(self);
+
+ self.is_in_dyn_type = was_in_dyn_type;
+
+ result
+ }
+
+ fn with_new_scopes<T>(&mut self, f: impl FnOnce(&mut Self) -> T) -> T {
+ let was_in_loop_condition = self.is_in_loop_condition;
+ self.is_in_loop_condition = false;
+
+ let catch_scope = self.catch_scope.take();
+ let loop_scope = self.loop_scope.take();
+ let ret = f(self);
+ self.catch_scope = catch_scope;
+ self.loop_scope = loop_scope;
+
+ self.is_in_loop_condition = was_in_loop_condition;
+
+ ret
+ }
+
+ fn lower_attrs(&mut self, id: hir::HirId, attrs: &[Attribute]) -> Option<&'hir [Attribute]> {
+ if attrs.is_empty() {
+ None
+ } else {
+ debug_assert_eq!(id.owner, self.current_hir_id_owner);
+ let ret = self.arena.alloc_from_iter(attrs.iter().map(|a| self.lower_attr(a)));
+ debug_assert!(!ret.is_empty());
+ self.attrs.insert(id.local_id, ret);
+ Some(ret)
+ }
+ }
+
+ fn lower_attr(&self, attr: &Attribute) -> Attribute {
+ // Note that we explicitly do not walk the path. Since we don't really
+ // lower attributes (we use the AST version) there is nowhere to keep
+ // the `HirId`s. We don't actually need HIR version of attributes anyway.
+ // Tokens are also not needed after macro expansion and parsing.
+ let kind = match attr.kind {
+ AttrKind::Normal(ref item, _) => AttrKind::Normal(
+ AttrItem {
+ path: item.path.clone(),
+ args: self.lower_mac_args(&item.args),
+ tokens: None,
+ },
+ None,
+ ),
+ AttrKind::DocComment(comment_kind, data) => AttrKind::DocComment(comment_kind, data),
+ };
+
+ Attribute { kind, id: attr.id, style: attr.style, span: self.lower_span(attr.span) }
+ }
+
+ fn alias_attrs(&mut self, id: hir::HirId, target_id: hir::HirId) {
+ debug_assert_eq!(id.owner, self.current_hir_id_owner);
+ debug_assert_eq!(target_id.owner, self.current_hir_id_owner);
+ if let Some(&a) = self.attrs.get(&target_id.local_id) {
+ debug_assert!(!a.is_empty());
+ self.attrs.insert(id.local_id, a);
+ }
+ }
+
+ fn lower_mac_args(&self, args: &MacArgs) -> MacArgs {
+ match *args {
+ MacArgs::Empty => MacArgs::Empty,
+ MacArgs::Delimited(dspan, delim, ref tokens) => {
+ // This is either a non-key-value attribute, or a `macro_rules!` body.
+ // We either not have any nonterminals present (in the case of an attribute),
+ // or have tokens available for all nonterminals in the case of a nested
+ // `macro_rules`: e.g:
+ //
+ // ```rust
+ // macro_rules! outer {
+ // ($e:expr) => {
+ // macro_rules! inner {
+ // () => { $e }
+ // }
+ // }
+ // }
+ // ```
+ //
+ // In both cases, we don't want to synthesize any tokens
+ MacArgs::Delimited(dspan, delim, tokens.flattened())
+ }
+ // This is an inert key-value attribute - it will never be visible to macros
+ // after it gets lowered to HIR. Therefore, we can extract literals to handle
+ // nonterminals in `#[doc]` (e.g. `#[doc = $e]`).
+ MacArgs::Eq(eq_span, MacArgsEq::Ast(ref expr)) => {
+ // In valid code the value always ends up as a single literal. Otherwise, a dummy
+ // literal suffices because the error is handled elsewhere.
+ let lit = if let ExprKind::Lit(lit) = &expr.kind {
+ lit.clone()
+ } else {
+ Lit {
+ token: token::Lit::new(token::LitKind::Err, kw::Empty, None),
+ kind: LitKind::Err(kw::Empty),
+ span: DUMMY_SP,
+ }
+ };
+ MacArgs::Eq(eq_span, MacArgsEq::Hir(lit))
+ }
+ MacArgs::Eq(_, MacArgsEq::Hir(ref lit)) => {
+ unreachable!("in literal form when lowering mac args eq: {:?}", lit)
+ }
+ }
+ }
+
+ /// Given an associated type constraint like one of these:
+ ///
+ /// ```ignore (illustrative)
+ /// T: Iterator<Item: Debug>
+ /// ^^^^^^^^^^^
+ /// T: Iterator<Item = Debug>
+ /// ^^^^^^^^^^^^
+ /// ```
+ ///
+ /// returns a `hir::TypeBinding` representing `Item`.
+ #[instrument(level = "debug", skip(self))]
+ fn lower_assoc_ty_constraint(
+ &mut self,
+ constraint: &AssocConstraint,
+ itctx: ImplTraitContext,
+ ) -> hir::TypeBinding<'hir> {
+ debug!("lower_assoc_ty_constraint(constraint={:?}, itctx={:?})", constraint, itctx);
+ // lower generic arguments of identifier in constraint
+ let gen_args = if let Some(ref gen_args) = constraint.gen_args {
+ let gen_args_ctor = match gen_args {
+ GenericArgs::AngleBracketed(ref data) => {
+ self.lower_angle_bracketed_parameter_data(data, ParamMode::Explicit, itctx).0
+ }
+ GenericArgs::Parenthesized(ref data) => {
+ self.emit_bad_parenthesized_trait_in_assoc_ty(data);
+ self.lower_angle_bracketed_parameter_data(
+ &data.as_angle_bracketed_args(),
+ ParamMode::Explicit,
+ itctx,
+ )
+ .0
+ }
+ };
+ gen_args_ctor.into_generic_args(self)
+ } else {
+ self.arena.alloc(hir::GenericArgs::none())
+ };
+
+ let kind = match constraint.kind {
+ AssocConstraintKind::Equality { ref term } => {
+ let term = match term {
+ Term::Ty(ref ty) => self.lower_ty(ty, itctx).into(),
+ Term::Const(ref c) => self.lower_anon_const(c).into(),
+ };
+ hir::TypeBindingKind::Equality { term }
+ }
+ AssocConstraintKind::Bound { ref bounds } => {
+ // Piggy-back on the `impl Trait` context to figure out the correct behavior.
+ let (desugar_to_impl_trait, itctx) = match itctx {
+ // We are in the return position:
+ //
+ // fn foo() -> impl Iterator<Item: Debug>
+ //
+ // so desugar to
+ //
+ // fn foo() -> impl Iterator<Item = impl Debug>
+ ImplTraitContext::ReturnPositionOpaqueTy { .. }
+ | ImplTraitContext::TypeAliasesOpaqueTy { .. } => (true, itctx),
+
+ // We are in the argument position, but within a dyn type:
+ //
+ // fn foo(x: dyn Iterator<Item: Debug>)
+ //
+ // so desugar to
+ //
+ // fn foo(x: dyn Iterator<Item = impl Debug>)
+ ImplTraitContext::Universal if self.is_in_dyn_type => (true, itctx),
+
+ // In `type Foo = dyn Iterator<Item: Debug>` we desugar to
+ // `type Foo = dyn Iterator<Item = impl Debug>` but we have to override the
+ // "impl trait context" to permit `impl Debug` in this position (it desugars
+ // then to an opaque type).
+ //
+ // FIXME: this is only needed until `impl Trait` is allowed in type aliases.
+ ImplTraitContext::Disallowed(_) if self.is_in_dyn_type => {
+ (true, ImplTraitContext::TypeAliasesOpaqueTy)
+ }
+
+ // We are in the parameter position, but not within a dyn type:
+ //
+ // fn foo(x: impl Iterator<Item: Debug>)
+ //
+ // so we leave it as is and this gets expanded in astconv to a bound like
+ // `<T as Iterator>::Item: Debug` where `T` is the type parameter for the
+ // `impl Iterator`.
+ _ => (false, itctx),
+ };
+
+ if desugar_to_impl_trait {
+ // Desugar `AssocTy: Bounds` into `AssocTy = impl Bounds`. We do this by
+ // constructing the HIR for `impl bounds...` and then lowering that.
+
+ let parent_def_id = self.current_hir_id_owner;
+ let impl_trait_node_id = self.next_node_id();
+ self.create_def(parent_def_id, impl_trait_node_id, DefPathData::ImplTrait);
+
+ self.with_dyn_type_scope(false, |this| {
+ let node_id = this.next_node_id();
+ let ty = this.lower_ty(
+ &Ty {
+ id: node_id,
+ kind: TyKind::ImplTrait(impl_trait_node_id, bounds.clone()),
+ span: this.lower_span(constraint.span),
+ tokens: None,
+ },
+ itctx,
+ );
+
+ hir::TypeBindingKind::Equality { term: ty.into() }
+ })
+ } else {
+ // Desugar `AssocTy: Bounds` into a type binding where the
+ // later desugars into a trait predicate.
+ let bounds = self.lower_param_bounds(bounds, itctx);
+
+ hir::TypeBindingKind::Constraint { bounds }
+ }
+ }
+ };
+
+ hir::TypeBinding {
+ hir_id: self.lower_node_id(constraint.id),
+ ident: self.lower_ident(constraint.ident),
+ gen_args,
+ kind,
+ span: self.lower_span(constraint.span),
+ }
+ }
+
+ fn emit_bad_parenthesized_trait_in_assoc_ty(&self, data: &ParenthesizedArgs) {
+ let mut err = self.tcx.sess.struct_span_err(
+ data.span,
+ "parenthesized generic arguments cannot be used in associated type constraints",
+ );
+ // Suggest removing empty parentheses: "Trait()" -> "Trait"
+ if data.inputs.is_empty() {
+ let parentheses_span =
+ data.inputs_span.shrink_to_lo().to(data.inputs_span.shrink_to_hi());
+ err.multipart_suggestion(
+ "remove these parentheses",
+ vec![(parentheses_span, String::new())],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ // Suggest replacing parentheses with angle brackets `Trait(params...)` to `Trait<params...>`
+ else {
+ // Start of parameters to the 1st argument
+ let open_param = data.inputs_span.shrink_to_lo().to(data
+ .inputs
+ .first()
+ .unwrap()
+ .span
+ .shrink_to_lo());
+ // End of last argument to end of parameters
+ let close_param =
+ data.inputs.last().unwrap().span.shrink_to_hi().to(data.inputs_span.shrink_to_hi());
+ err.multipart_suggestion(
+ &format!("use angle brackets instead",),
+ vec![(open_param, String::from("<")), (close_param, String::from(">"))],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn lower_generic_arg(
+ &mut self,
+ arg: &ast::GenericArg,
+ itctx: ImplTraitContext,
+ ) -> hir::GenericArg<'hir> {
+ match arg {
+ ast::GenericArg::Lifetime(lt) => GenericArg::Lifetime(self.lower_lifetime(&lt)),
+ ast::GenericArg::Type(ty) => {
+ match ty.kind {
+ TyKind::Infer if self.tcx.features().generic_arg_infer => {
+ return GenericArg::Infer(hir::InferArg {
+ hir_id: self.lower_node_id(ty.id),
+ span: self.lower_span(ty.span),
+ });
+ }
+ // We parse const arguments as path types as we cannot distinguish them during
+ // parsing. We try to resolve that ambiguity by attempting resolution in both the
+ // type and value namespaces. If we resolved the path in the value namespace, we
+ // transform it into a generic const argument.
+ TyKind::Path(ref qself, ref path) => {
+ if let Some(partial_res) = self.resolver.get_partial_res(ty.id) {
+ let res = partial_res.base_res();
+ if !res.matches_ns(Namespace::TypeNS) {
+ debug!(
+ "lower_generic_arg: Lowering type argument as const argument: {:?}",
+ ty,
+ );
+
+ // Construct an AnonConst where the expr is the "ty"'s path.
+
+ let parent_def_id = self.current_hir_id_owner;
+ let node_id = self.next_node_id();
+
+ // Add a definition for the in-band const def.
+ self.create_def(parent_def_id, node_id, DefPathData::AnonConst);
+
+ let span = self.lower_span(ty.span);
+ let path_expr = Expr {
+ id: ty.id,
+ kind: ExprKind::Path(qself.clone(), path.clone()),
+ span,
+ attrs: AttrVec::new(),
+ tokens: None,
+ };
+
+ let ct = self.with_new_scopes(|this| hir::AnonConst {
+ hir_id: this.lower_node_id(node_id),
+ body: this.lower_const_body(path_expr.span, Some(&path_expr)),
+ });
+ return GenericArg::Const(ConstArg { value: ct, span });
+ }
+ }
+ }
+ _ => {}
+ }
+ GenericArg::Type(self.lower_ty_direct(&ty, itctx))
+ }
+ ast::GenericArg::Const(ct) => GenericArg::Const(ConstArg {
+ value: self.lower_anon_const(&ct),
+ span: self.lower_span(ct.value.span),
+ }),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn lower_ty(&mut self, t: &Ty, itctx: ImplTraitContext) -> &'hir hir::Ty<'hir> {
+ self.arena.alloc(self.lower_ty_direct(t, itctx))
+ }
+
+ fn lower_path_ty(
+ &mut self,
+ t: &Ty,
+ qself: &Option<QSelf>,
+ path: &Path,
+ param_mode: ParamMode,
+ itctx: ImplTraitContext,
+ ) -> hir::Ty<'hir> {
+ // Check whether we should interpret this as a bare trait object.
+ // This check mirrors the one in late resolution. We only introduce this special case in
+ // the rare occurence we need to lower `Fresh` anonymous lifetimes.
+ // The other cases when a qpath should be opportunistically made a trait object are handled
+ // by `ty_path`.
+ if qself.is_none()
+ && let Some(partial_res) = self.resolver.get_partial_res(t.id)
+ && partial_res.unresolved_segments() == 0
+ && let Res::Def(DefKind::Trait | DefKind::TraitAlias, _) = partial_res.base_res()
+ {
+ let (bounds, lifetime_bound) = self.with_dyn_type_scope(true, |this| {
+ let bound = this.lower_poly_trait_ref(
+ &PolyTraitRef {
+ bound_generic_params: vec![],
+ trait_ref: TraitRef { path: path.clone(), ref_id: t.id },
+ span: t.span
+ },
+ itctx,
+ );
+ let bounds = this.arena.alloc_from_iter([bound]);
+ let lifetime_bound = this.elided_dyn_bound(t.span);
+ (bounds, lifetime_bound)
+ });
+ let kind = hir::TyKind::TraitObject(bounds, lifetime_bound, TraitObjectSyntax::None);
+ return hir::Ty { kind, span: self.lower_span(t.span), hir_id: self.next_id() };
+ }
+
+ let id = self.lower_node_id(t.id);
+ let qpath = self.lower_qpath(t.id, qself, path, param_mode, itctx);
+ self.ty_path(id, t.span, qpath)
+ }
+
+ fn ty(&mut self, span: Span, kind: hir::TyKind<'hir>) -> hir::Ty<'hir> {
+ hir::Ty { hir_id: self.next_id(), kind, span: self.lower_span(span) }
+ }
+
+ fn ty_tup(&mut self, span: Span, tys: &'hir [hir::Ty<'hir>]) -> hir::Ty<'hir> {
+ self.ty(span, hir::TyKind::Tup(tys))
+ }
+
+ fn lower_ty_direct(&mut self, t: &Ty, itctx: ImplTraitContext) -> hir::Ty<'hir> {
+ let kind = match t.kind {
+ TyKind::Infer => hir::TyKind::Infer,
+ TyKind::Err => hir::TyKind::Err,
+ TyKind::Slice(ref ty) => hir::TyKind::Slice(self.lower_ty(ty, itctx)),
+ TyKind::Ptr(ref mt) => hir::TyKind::Ptr(self.lower_mt(mt, itctx)),
+ TyKind::Rptr(ref region, ref mt) => {
+ let region = region.unwrap_or_else(|| {
+ let id = if let Some(LifetimeRes::ElidedAnchor { start, end }) =
+ self.resolver.get_lifetime_res(t.id)
+ {
+ debug_assert_eq!(start.plus(1), end);
+ start
+ } else {
+ self.next_node_id()
+ };
+ let span = self.tcx.sess.source_map().next_point(t.span.shrink_to_lo());
+ Lifetime { ident: Ident::new(kw::UnderscoreLifetime, span), id }
+ });
+ let lifetime = self.lower_lifetime(&region);
+ hir::TyKind::Rptr(lifetime, self.lower_mt(mt, itctx))
+ }
+ TyKind::BareFn(ref f) => {
+ let generic_params = self.lower_lifetime_binder(t.id, &f.generic_params);
+ hir::TyKind::BareFn(self.arena.alloc(hir::BareFnTy {
+ generic_params,
+ unsafety: self.lower_unsafety(f.unsafety),
+ abi: self.lower_extern(f.ext),
+ decl: self.lower_fn_decl(&f.decl, None, FnDeclKind::Pointer, None),
+ param_names: self.lower_fn_params_to_names(&f.decl),
+ }))
+ }
+ TyKind::Never => hir::TyKind::Never,
+ TyKind::Tup(ref tys) => hir::TyKind::Tup(
+ self.arena.alloc_from_iter(tys.iter().map(|ty| self.lower_ty_direct(ty, itctx))),
+ ),
+ TyKind::Paren(ref ty) => {
+ return self.lower_ty_direct(ty, itctx);
+ }
+ TyKind::Path(ref qself, ref path) => {
+ return self.lower_path_ty(t, qself, path, ParamMode::Explicit, itctx);
+ }
+ TyKind::ImplicitSelf => {
+ let res = self.expect_full_res(t.id);
+ let res = self.lower_res(res);
+ hir::TyKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ res,
+ segments: arena_vec![self; hir::PathSegment::from_ident(
+ Ident::with_dummy_span(kw::SelfUpper)
+ )],
+ span: self.lower_span(t.span),
+ }),
+ ))
+ }
+ TyKind::Array(ref ty, ref length) => {
+ hir::TyKind::Array(self.lower_ty(ty, itctx), self.lower_array_length(length))
+ }
+ TyKind::Typeof(ref expr) => hir::TyKind::Typeof(self.lower_anon_const(expr)),
+ TyKind::TraitObject(ref bounds, kind) => {
+ let mut lifetime_bound = None;
+ let (bounds, lifetime_bound) = self.with_dyn_type_scope(true, |this| {
+ let bounds =
+ this.arena.alloc_from_iter(bounds.iter().filter_map(
+ |bound| match *bound {
+ GenericBound::Trait(
+ ref ty,
+ TraitBoundModifier::None | TraitBoundModifier::MaybeConst,
+ ) => Some(this.lower_poly_trait_ref(ty, itctx)),
+ // `~const ?Bound` will cause an error during AST validation
+ // anyways, so treat it like `?Bound` as compilation proceeds.
+ GenericBound::Trait(
+ _,
+ TraitBoundModifier::Maybe | TraitBoundModifier::MaybeConstMaybe,
+ ) => None,
+ GenericBound::Outlives(ref lifetime) => {
+ if lifetime_bound.is_none() {
+ lifetime_bound = Some(this.lower_lifetime(lifetime));
+ }
+ None
+ }
+ },
+ ));
+ let lifetime_bound =
+ lifetime_bound.unwrap_or_else(|| this.elided_dyn_bound(t.span));
+ (bounds, lifetime_bound)
+ });
+ hir::TyKind::TraitObject(bounds, lifetime_bound, kind)
+ }
+ TyKind::ImplTrait(def_node_id, ref bounds) => {
+ let span = t.span;
+ match itctx {
+ ImplTraitContext::ReturnPositionOpaqueTy { origin } => {
+ self.lower_opaque_impl_trait(span, origin, def_node_id, bounds, itctx)
+ }
+ ImplTraitContext::TypeAliasesOpaqueTy => {
+ let nested_itctx = ImplTraitContext::TypeAliasesOpaqueTy;
+ self.lower_opaque_impl_trait(
+ span,
+ hir::OpaqueTyOrigin::TyAlias,
+ def_node_id,
+ bounds,
+ nested_itctx,
+ )
+ }
+ ImplTraitContext::Universal => {
+ let span = t.span;
+ let ident = Ident::from_str_and_span(&pprust::ty_to_string(t), span);
+ let (param, bounds, path) =
+ self.lower_generic_and_bounds(def_node_id, span, ident, bounds);
+ self.impl_trait_defs.push(param);
+ if let Some(bounds) = bounds {
+ self.impl_trait_bounds.push(bounds);
+ }
+ path
+ }
+ ImplTraitContext::Disallowed(position) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ t.span,
+ E0562,
+ "`impl Trait` only allowed in function and inherent method return types, not in {}",
+ position
+ );
+ err.emit();
+ hir::TyKind::Err
+ }
+ }
+ }
+ TyKind::MacCall(_) => panic!("`TyKind::MacCall` should have been expanded by now"),
+ TyKind::CVarArgs => {
+ self.tcx.sess.delay_span_bug(
+ t.span,
+ "`TyKind::CVarArgs` should have been handled elsewhere",
+ );
+ hir::TyKind::Err
+ }
+ };
+
+ hir::Ty { kind, span: self.lower_span(t.span), hir_id: self.lower_node_id(t.id) }
+ }
+
+ /// Lowers a `ReturnPositionOpaqueTy` (`-> impl Trait`) or a `TypeAliasesOpaqueTy` (`type F =
+ /// impl Trait`): this creates the associated Opaque Type (TAIT) definition and then returns a
+ /// HIR type that references the TAIT.
+ ///
+ /// Given a function definition like:
+ ///
+ /// ```rust
+ /// fn test<'a, T: Debug>(x: &'a T) -> impl Debug + 'a {
+ /// x
+ /// }
+ /// ```
+ ///
+ /// we will create a TAIT definition in the HIR like
+ ///
+ /// ```
+ /// type TestReturn<'a, T, 'x> = impl Debug + 'x
+ /// ```
+ ///
+ /// and return a type like `TestReturn<'static, T, 'a>`, so that the function looks like:
+ ///
+ /// ```rust
+ /// fn test<'a, T: Debug>(x: &'a T) -> TestReturn<'static, T, 'a>
+ /// ```
+ ///
+ /// Note the subtlety around type parameters! The new TAIT, `TestReturn`, inherits all the
+ /// type parameters from the function `test` (this is implemented in the query layer, they aren't
+ /// added explicitly in the HIR). But this includes all the lifetimes, and we only want to
+ /// capture the lifetimes that are referenced in the bounds. Therefore, we add *extra* lifetime parameters
+ /// for the lifetimes that get captured (`'x`, in our example above) and reference those.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lower_opaque_impl_trait(
+ &mut self,
+ span: Span,
+ origin: hir::OpaqueTyOrigin,
+ opaque_ty_node_id: NodeId,
+ bounds: &GenericBounds,
+ itctx: ImplTraitContext,
+ ) -> hir::TyKind<'hir> {
+ // Make sure we know that some funky desugaring has been going on here.
+ // This is a first: there is code in other places like for loop
+ // desugaring that explicitly states that we don't want to track that.
+ // Not tracking it makes lints in rustc and clippy very fragile, as
+ // frequently opened issues show.
+ let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::OpaqueTy, span, None);
+
+ let opaque_ty_def_id = self.local_def_id(opaque_ty_node_id);
+ debug!(?opaque_ty_def_id);
+
+ // Contains the new lifetime definitions created for the TAIT (if any).
+ let mut collected_lifetimes = Vec::new();
+
+ // If this came from a TAIT (as opposed to a function that returns an RPIT), we only want
+ // to capture the lifetimes that appear in the bounds. So visit the bounds to find out
+ // exactly which ones those are.
+ let lifetimes_to_remap = if origin == hir::OpaqueTyOrigin::TyAlias {
+ // in a TAIT like `type Foo<'a> = impl Foo<'a>`, we don't keep all the lifetime parameters
+ Vec::new()
+ } else {
+ // in fn return position, like the `fn test<'a>() -> impl Debug + 'a` example,
+ // we only keep the lifetimes that appear in the `impl Debug` itself:
+ lifetime_collector::lifetimes_in_bounds(&self.resolver, bounds)
+ };
+ debug!(?lifetimes_to_remap);
+
+ self.with_hir_id_owner(opaque_ty_node_id, |lctx| {
+ let mut new_remapping = FxHashMap::default();
+
+ // If this opaque type is only capturing a subset of the lifetimes (those that appear
+ // in bounds), then create the new lifetime parameters required and create a mapping
+ // from the old `'a` (on the function) to the new `'a` (on the opaque type).
+ collected_lifetimes = lctx.create_lifetime_defs(
+ opaque_ty_def_id,
+ &lifetimes_to_remap,
+ &mut new_remapping,
+ );
+ debug!(?collected_lifetimes);
+ debug!(?new_remapping);
+
+ // Install the remapping from old to new (if any):
+ lctx.with_remapping(new_remapping, |lctx| {
+ // This creates HIR lifetime definitions as `hir::GenericParam`, in the given
+ // example `type TestReturn<'a, T, 'x> = impl Debug + 'x`, it creates a collection
+ // containing `&['x]`.
+ let lifetime_defs = lctx.arena.alloc_from_iter(collected_lifetimes.iter().map(
+ |&(new_node_id, lifetime)| {
+ let hir_id = lctx.lower_node_id(new_node_id);
+ debug_assert_ne!(lctx.opt_local_def_id(new_node_id), None);
+
+ let (name, kind) = if lifetime.ident.name == kw::UnderscoreLifetime {
+ (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
+ } else {
+ (
+ hir::ParamName::Plain(lifetime.ident),
+ hir::LifetimeParamKind::Explicit,
+ )
+ };
+
+ hir::GenericParam {
+ hir_id,
+ name,
+ span: lifetime.ident.span,
+ pure_wrt_drop: false,
+ kind: hir::GenericParamKind::Lifetime { kind },
+ colon_span: None,
+ }
+ },
+ ));
+ debug!(?lifetime_defs);
+
+ // Then when we lower the param bounds, references to 'a are remapped to 'a1, so we
+ // get back Debug + 'a1, which is suitable for use on the TAIT.
+ let hir_bounds = lctx.lower_param_bounds(bounds, itctx);
+ debug!(?hir_bounds);
+
+ let opaque_ty_item = hir::OpaqueTy {
+ generics: self.arena.alloc(hir::Generics {
+ params: lifetime_defs,
+ predicates: &[],
+ has_where_clause_predicates: false,
+ where_clause_span: lctx.lower_span(span),
+ span: lctx.lower_span(span),
+ }),
+ bounds: hir_bounds,
+ origin,
+ };
+ debug!(?opaque_ty_item);
+
+ lctx.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span)
+ })
+ });
+
+ // This creates HIR lifetime arguments as `hir::GenericArg`, in the given example `type
+ // TestReturn<'a, T, 'x> = impl Debug + 'x`, it creates a collection containing `&['x]`.
+ let lifetimes =
+ self.arena.alloc_from_iter(collected_lifetimes.into_iter().map(|(_, lifetime)| {
+ let id = self.next_node_id();
+ let span = lifetime.ident.span;
+
+ let ident = if lifetime.ident.name == kw::UnderscoreLifetime {
+ Ident::with_dummy_span(kw::UnderscoreLifetime)
+ } else {
+ lifetime.ident
+ };
+
+ let l = self.new_named_lifetime(lifetime.id, id, span, ident);
+ hir::GenericArg::Lifetime(l)
+ }));
+ debug!(?lifetimes);
+
+ // `impl Trait` now just becomes `Foo<'a, 'b, ..>`.
+ hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, lifetimes)
+ }
+
+ /// Registers a new opaque type with the proper `NodeId`s and
+ /// returns the lowered node-ID for the opaque type.
+ fn generate_opaque_type(
+ &mut self,
+ opaque_ty_id: LocalDefId,
+ opaque_ty_item: hir::OpaqueTy<'hir>,
+ span: Span,
+ opaque_ty_span: Span,
+ ) -> hir::OwnerNode<'hir> {
+ let opaque_ty_item_kind = hir::ItemKind::OpaqueTy(opaque_ty_item);
+ // Generate an `type Foo = impl Trait;` declaration.
+ trace!("registering opaque type with id {:#?}", opaque_ty_id);
+ let opaque_ty_item = hir::Item {
+ def_id: opaque_ty_id,
+ ident: Ident::empty(),
+ kind: opaque_ty_item_kind,
+ vis_span: self.lower_span(span.shrink_to_lo()),
+ span: self.lower_span(opaque_ty_span),
+ };
+ hir::OwnerNode::Item(self.arena.alloc(opaque_ty_item))
+ }
+
+ /// Given a `parent_def_id`, a list of `lifetimes_in_bounds and a `remapping` hash to be
+ /// filled, this function creates new definitions for `Param` and `Fresh` lifetimes, inserts the
+ /// new definition, adds it to the remapping with the definition of the given lifetime and
+ /// returns a list of lifetimes to be lowered afterwards.
+ fn create_lifetime_defs(
+ &mut self,
+ parent_def_id: LocalDefId,
+ lifetimes_in_bounds: &[Lifetime],
+ remapping: &mut FxHashMap<LocalDefId, LocalDefId>,
+ ) -> Vec<(NodeId, Lifetime)> {
+ let mut result = Vec::new();
+
+ for lifetime in lifetimes_in_bounds {
+ let res = self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error);
+ debug!(?res);
+
+ match res {
+ LifetimeRes::Param { param: old_def_id, binder: _ } => {
+ if remapping.get(&old_def_id).is_none() {
+ let node_id = self.next_node_id();
+
+ let new_def_id = self.create_def(
+ parent_def_id,
+ node_id,
+ DefPathData::LifetimeNs(lifetime.ident.name),
+ );
+ remapping.insert(old_def_id, new_def_id);
+
+ result.push((node_id, *lifetime));
+ }
+ }
+
+ LifetimeRes::Fresh { param, binder: _ } => {
+ debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
+ if let Some(old_def_id) = self.opt_local_def_id(param) && remapping.get(&old_def_id).is_none() {
+ let node_id = self.next_node_id();
+
+ let new_def_id = self.create_def(
+ parent_def_id,
+ node_id,
+ DefPathData::LifetimeNs(kw::UnderscoreLifetime),
+ );
+ remapping.insert(old_def_id, new_def_id);
+
+ result.push((node_id, *lifetime));
+ }
+ }
+
+ LifetimeRes::Static | LifetimeRes::Error => {}
+
+ res => {
+ let bug_msg = format!(
+ "Unexpected lifetime resolution {:?} for {:?} at {:?}",
+ res, lifetime.ident, lifetime.ident.span
+ );
+ span_bug!(lifetime.ident.span, "{}", bug_msg);
+ }
+ }
+ }
+
+ result
+ }
+
+ fn lower_fn_params_to_names(&mut self, decl: &FnDecl) -> &'hir [Ident] {
+ // Skip the `...` (`CVarArgs`) trailing arguments from the AST,
+ // as they are not explicit in HIR/Ty function signatures.
+ // (instead, the `c_variadic` flag is set to `true`)
+ let mut inputs = &decl.inputs[..];
+ if decl.c_variadic() {
+ inputs = &inputs[..inputs.len() - 1];
+ }
+ self.arena.alloc_from_iter(inputs.iter().map(|param| match param.pat.kind {
+ PatKind::Ident(_, ident, _) => self.lower_ident(ident),
+ _ => Ident::new(kw::Empty, self.lower_span(param.pat.span)),
+ }))
+ }
+
+ // Lowers a function declaration.
+ //
+ // `decl`: the unlowered (AST) function declaration.
+ // `fn_def_id`: if `Some`, impl Trait arguments are lowered into generic parameters on the
+ // given DefId, otherwise impl Trait is disallowed. Must be `Some` if
+ // `make_ret_async` is also `Some`.
+ // `impl_trait_return_allow`: determines whether `impl Trait` can be used in return position.
+ // This guards against trait declarations and implementations where `impl Trait` is
+ // disallowed.
+ // `make_ret_async`: if `Some`, converts `-> T` into `-> impl Future<Output = T>` in the
+ // return type. This is used for `async fn` declarations. The `NodeId` is the ID of the
+ // return type `impl Trait` item.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lower_fn_decl(
+ &mut self,
+ decl: &FnDecl,
+ fn_node_id: Option<NodeId>,
+ kind: FnDeclKind,
+ make_ret_async: Option<NodeId>,
+ ) -> &'hir hir::FnDecl<'hir> {
+ let c_variadic = decl.c_variadic();
+
+ // Skip the `...` (`CVarArgs`) trailing arguments from the AST,
+ // as they are not explicit in HIR/Ty function signatures.
+ // (instead, the `c_variadic` flag is set to `true`)
+ let mut inputs = &decl.inputs[..];
+ if c_variadic {
+ inputs = &inputs[..inputs.len() - 1];
+ }
+ let inputs = self.arena.alloc_from_iter(inputs.iter().map(|param| {
+ if fn_node_id.is_some() {
+ self.lower_ty_direct(&param.ty, ImplTraitContext::Universal)
+ } else {
+ self.lower_ty_direct(
+ &param.ty,
+ ImplTraitContext::Disallowed(match kind {
+ FnDeclKind::Fn | FnDeclKind::Inherent => {
+ unreachable!("fn should allow in-band lifetimes")
+ }
+ FnDeclKind::ExternFn => ImplTraitPosition::ExternFnParam,
+ FnDeclKind::Closure => ImplTraitPosition::ClosureParam,
+ FnDeclKind::Pointer => ImplTraitPosition::PointerParam,
+ FnDeclKind::Trait => ImplTraitPosition::TraitParam,
+ FnDeclKind::Impl => ImplTraitPosition::ImplParam,
+ }),
+ )
+ }
+ }));
+
+ let output = if let Some(ret_id) = make_ret_async {
+ self.lower_async_fn_ret_ty(
+ &decl.output,
+ fn_node_id.expect("`make_ret_async` but no `fn_def_id`"),
+ ret_id,
+ )
+ } else {
+ match decl.output {
+ FnRetTy::Ty(ref ty) => {
+ let context = match fn_node_id {
+ Some(fn_node_id) if kind.impl_trait_return_allowed() => {
+ let fn_def_id = self.local_def_id(fn_node_id);
+ ImplTraitContext::ReturnPositionOpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
+ }
+ }
+ _ => ImplTraitContext::Disallowed(match kind {
+ FnDeclKind::Fn | FnDeclKind::Inherent => {
+ unreachable!("fn should allow in-band lifetimes")
+ }
+ FnDeclKind::ExternFn => ImplTraitPosition::ExternFnReturn,
+ FnDeclKind::Closure => ImplTraitPosition::ClosureReturn,
+ FnDeclKind::Pointer => ImplTraitPosition::PointerReturn,
+ FnDeclKind::Trait => ImplTraitPosition::TraitReturn,
+ FnDeclKind::Impl => ImplTraitPosition::ImplReturn,
+ }),
+ };
+ hir::FnRetTy::Return(self.lower_ty(ty, context))
+ }
+ FnRetTy::Default(span) => hir::FnRetTy::DefaultReturn(self.lower_span(span)),
+ }
+ };
+
+ self.arena.alloc(hir::FnDecl {
+ inputs,
+ output,
+ c_variadic,
+ implicit_self: decl.inputs.get(0).map_or(hir::ImplicitSelfKind::None, |arg| {
+ use BindingMode::{ByRef, ByValue};
+ let is_mutable_pat = matches!(
+ arg.pat.kind,
+ PatKind::Ident(ByValue(Mutability::Mut) | ByRef(Mutability::Mut), ..)
+ );
+
+ match arg.ty.kind {
+ TyKind::ImplicitSelf if is_mutable_pat => hir::ImplicitSelfKind::Mut,
+ TyKind::ImplicitSelf => hir::ImplicitSelfKind::Imm,
+ // Given we are only considering `ImplicitSelf` types, we needn't consider
+ // the case where we have a mutable pattern to a reference as that would
+ // no longer be an `ImplicitSelf`.
+ TyKind::Rptr(_, ref mt)
+ if mt.ty.kind.is_implicit_self() && mt.mutbl == ast::Mutability::Mut =>
+ {
+ hir::ImplicitSelfKind::MutRef
+ }
+ TyKind::Rptr(_, ref mt) if mt.ty.kind.is_implicit_self() => {
+ hir::ImplicitSelfKind::ImmRef
+ }
+ _ => hir::ImplicitSelfKind::None,
+ }
+ }),
+ })
+ }
+
+ // Transforms `-> T` for `async fn` into `-> OpaqueTy { .. }`
+ // combined with the following definition of `OpaqueTy`:
+ //
+ // type OpaqueTy<generics_from_parent_fn> = impl Future<Output = T>;
+ //
+ // `output`: unlowered output type (`T` in `-> T`)
+ // `fn_def_id`: `DefId` of the parent function (used to create child impl trait definition)
+ // `opaque_ty_node_id`: `NodeId` of the opaque `impl Trait` type that should be created
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lower_async_fn_ret_ty(
+ &mut self,
+ output: &FnRetTy,
+ fn_node_id: NodeId,
+ opaque_ty_node_id: NodeId,
+ ) -> hir::FnRetTy<'hir> {
+ let span = output.span();
+
+ let opaque_ty_span = self.mark_span_with_reason(DesugaringKind::Async, span, None);
+
+ let opaque_ty_def_id = self.local_def_id(opaque_ty_node_id);
+ let fn_def_id = self.local_def_id(fn_node_id);
+
+ // When we create the opaque type for this async fn, it is going to have
+ // to capture all the lifetimes involved in the signature (including in the
+ // return type). This is done by introducing lifetime parameters for:
+ //
+ // - all the explicitly declared lifetimes from the impl and function itself;
+ // - all the elided lifetimes in the fn arguments;
+ // - all the elided lifetimes in the return type.
+ //
+ // So for example in this snippet:
+ //
+ // ```rust
+ // impl<'a> Foo<'a> {
+ // async fn bar<'b>(&self, x: &'b Vec<f64>, y: &str) -> &u32 {
+ // // ^ '0 ^ '1 ^ '2
+ // // elided lifetimes used below
+ // }
+ // }
+ // ```
+ //
+ // we would create an opaque type like:
+ //
+ // ```
+ // type Bar<'a, 'b, '0, '1, '2> = impl Future<Output = &'2 u32>;
+ // ```
+ //
+ // and we would then desugar `bar` to the equivalent of:
+ //
+ // ```rust
+ // impl<'a> Foo<'a> {
+ // fn bar<'b, '0, '1>(&'0 self, x: &'b Vec<f64>, y: &'1 str) -> Bar<'a, 'b, '0, '1, '_>
+ // }
+ // ```
+ //
+ // Note that the final parameter to `Bar` is `'_`, not `'2` --
+ // this is because the elided lifetimes from the return type
+ // should be figured out using the ordinary elision rules, and
+ // this desugaring achieves that.
+
+ // Calculate all the lifetimes that should be captured
+ // by the opaque type. This should include all in-scope
+ // lifetime parameters, including those defined in-band.
+
+ // Contains the new lifetime definitions created for the TAIT (if any) generated for the
+ // return type.
+ let mut collected_lifetimes = Vec::new();
+ let mut new_remapping = FxHashMap::default();
+
+ let extra_lifetime_params = self.resolver.take_extra_lifetime_params(opaque_ty_node_id);
+ debug!(?extra_lifetime_params);
+ for (ident, outer_node_id, outer_res) in extra_lifetime_params {
+ let outer_def_id = self.local_def_id(outer_node_id);
+ let inner_node_id = self.next_node_id();
+
+ // Add a definition for the in scope lifetime def.
+ let inner_def_id = self.create_def(
+ opaque_ty_def_id,
+ inner_node_id,
+ DefPathData::LifetimeNs(ident.name),
+ );
+ new_remapping.insert(outer_def_id, inner_def_id);
+
+ let inner_res = match outer_res {
+ // Input lifetime like `'a`:
+ LifetimeRes::Param { param, .. } => {
+ LifetimeRes::Param { param, binder: fn_node_id }
+ }
+ // Input lifetime like `'1`:
+ LifetimeRes::Fresh { param, .. } => {
+ LifetimeRes::Fresh { param, binder: fn_node_id }
+ }
+ LifetimeRes::Static | LifetimeRes::Error => continue,
+ res => {
+ panic!(
+ "Unexpected lifetime resolution {:?} for {:?} at {:?}",
+ res, ident, ident.span
+ )
+ }
+ };
+
+ let lifetime = Lifetime { id: outer_node_id, ident };
+ collected_lifetimes.push((inner_node_id, lifetime, Some(inner_res)));
+ }
+
+ debug!(?collected_lifetimes);
+
+ // We only want to capture the lifetimes that appear in the bounds. So visit the bounds to
+ // find out exactly which ones those are.
+ // in fn return position, like the `fn test<'a>() -> impl Debug + 'a` example,
+ // we only keep the lifetimes that appear in the `impl Debug` itself:
+ let lifetimes_to_remap = lifetime_collector::lifetimes_in_ret_ty(&self.resolver, output);
+ debug!(?lifetimes_to_remap);
+
+ self.with_hir_id_owner(opaque_ty_node_id, |this| {
+ // If this opaque type is only capturing a subset of the lifetimes (those that appear
+ // in bounds), then create the new lifetime parameters required and create a mapping
+ // from the old `'a` (on the function) to the new `'a` (on the opaque type).
+ collected_lifetimes.extend(
+ this.create_lifetime_defs(
+ opaque_ty_def_id,
+ &lifetimes_to_remap,
+ &mut new_remapping,
+ )
+ .into_iter()
+ .map(|(new_node_id, lifetime)| (new_node_id, lifetime, None)),
+ );
+ debug!(?collected_lifetimes);
+ debug!(?new_remapping);
+
+ // Install the remapping from old to new (if any):
+ this.with_remapping(new_remapping, |this| {
+ // We have to be careful to get elision right here. The
+ // idea is that we create a lifetime parameter for each
+ // lifetime in the return type. So, given a return type
+ // like `async fn foo(..) -> &[&u32]`, we lower to `impl
+ // Future<Output = &'1 [ &'2 u32 ]>`.
+ //
+ // Then, we will create `fn foo(..) -> Foo<'_, '_>`, and
+ // hence the elision takes place at the fn site.
+ let future_bound =
+ this.lower_async_fn_output_type_to_future_bound(output, fn_def_id, span);
+
+ let generic_params = this.arena.alloc_from_iter(collected_lifetimes.iter().map(
+ |&(new_node_id, lifetime, _)| {
+ let hir_id = this.lower_node_id(new_node_id);
+ debug_assert_ne!(this.opt_local_def_id(new_node_id), None);
+
+ let (name, kind) = if lifetime.ident.name == kw::UnderscoreLifetime {
+ (hir::ParamName::Fresh, hir::LifetimeParamKind::Elided)
+ } else {
+ (
+ hir::ParamName::Plain(lifetime.ident),
+ hir::LifetimeParamKind::Explicit,
+ )
+ };
+
+ hir::GenericParam {
+ hir_id,
+ name,
+ span: lifetime.ident.span,
+ pure_wrt_drop: false,
+ kind: hir::GenericParamKind::Lifetime { kind },
+ colon_span: None,
+ }
+ },
+ ));
+ debug!("lower_async_fn_ret_ty: generic_params={:#?}", generic_params);
+
+ let opaque_ty_item = hir::OpaqueTy {
+ generics: this.arena.alloc(hir::Generics {
+ params: generic_params,
+ predicates: &[],
+ has_where_clause_predicates: false,
+ where_clause_span: this.lower_span(span),
+ span: this.lower_span(span),
+ }),
+ bounds: arena_vec![this; future_bound],
+ origin: hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
+ };
+
+ trace!("exist ty from async fn def id: {:#?}", opaque_ty_def_id);
+ this.generate_opaque_type(opaque_ty_def_id, opaque_ty_item, span, opaque_ty_span)
+ })
+ });
+
+ // As documented above, we need to create the lifetime
+ // arguments to our opaque type. Continuing with our example,
+ // we're creating the type arguments for the return type:
+ //
+ // ```
+ // Bar<'a, 'b, '0, '1, '_>
+ // ```
+ //
+ // For the "input" lifetime parameters, we wish to create
+ // references to the parameters themselves, including the
+ // "implicit" ones created from parameter types (`'a`, `'b`,
+ // '`0`, `'1`).
+ //
+ // For the "output" lifetime parameters, we just want to
+ // generate `'_`.
+ let generic_args = self.arena.alloc_from_iter(collected_lifetimes.into_iter().map(
+ |(_, lifetime, res)| {
+ let id = self.next_node_id();
+ let span = lifetime.ident.span;
+
+ let ident = if lifetime.ident.name == kw::UnderscoreLifetime {
+ Ident::with_dummy_span(kw::UnderscoreLifetime)
+ } else {
+ lifetime.ident
+ };
+
+ let res = res.unwrap_or(
+ self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error),
+ );
+ let l = self.new_named_lifetime_with_res(id, span, ident, res);
+ hir::GenericArg::Lifetime(l)
+ },
+ ));
+
+ // Create the `Foo<...>` reference itself. Note that the `type
+ // Foo = impl Trait` is, internally, created as a child of the
+ // async fn, so the *type parameters* are inherited. It's
+ // only the lifetime parameters that we must supply.
+ let opaque_ty_ref =
+ hir::TyKind::OpaqueDef(hir::ItemId { def_id: opaque_ty_def_id }, generic_args);
+ let opaque_ty = self.ty(opaque_ty_span, opaque_ty_ref);
+ hir::FnRetTy::Return(self.arena.alloc(opaque_ty))
+ }
+
+ /// Transforms `-> T` into `Future<Output = T>`.
+ fn lower_async_fn_output_type_to_future_bound(
+ &mut self,
+ output: &FnRetTy,
+ fn_def_id: LocalDefId,
+ span: Span,
+ ) -> hir::GenericBound<'hir> {
+ // Compute the `T` in `Future<Output = T>` from the return type.
+ let output_ty = match output {
+ FnRetTy::Ty(ty) => {
+ // Not `OpaqueTyOrigin::AsyncFn`: that's only used for the
+ // `impl Future` opaque type that `async fn` implicitly
+ // generates.
+ let context = ImplTraitContext::ReturnPositionOpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(fn_def_id),
+ };
+ self.lower_ty(ty, context)
+ }
+ FnRetTy::Default(ret_ty_span) => self.arena.alloc(self.ty_tup(*ret_ty_span, &[])),
+ };
+
+ // "<Output = T>"
+ let future_args = self.arena.alloc(hir::GenericArgs {
+ args: &[],
+ bindings: arena_vec![self; self.output_ty_binding(span, output_ty)],
+ parenthesized: false,
+ span_ext: DUMMY_SP,
+ });
+
+ hir::GenericBound::LangItemTrait(
+ // ::std::future::Future<future_params>
+ hir::LangItem::Future,
+ self.lower_span(span),
+ self.next_id(),
+ future_args,
+ )
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn lower_param_bound(
+ &mut self,
+ tpb: &GenericBound,
+ itctx: ImplTraitContext,
+ ) -> hir::GenericBound<'hir> {
+ match tpb {
+ GenericBound::Trait(p, modifier) => hir::GenericBound::Trait(
+ self.lower_poly_trait_ref(p, itctx),
+ self.lower_trait_bound_modifier(*modifier),
+ ),
+ GenericBound::Outlives(lifetime) => {
+ hir::GenericBound::Outlives(self.lower_lifetime(lifetime))
+ }
+ }
+ }
+
+ fn lower_lifetime(&mut self, l: &Lifetime) -> hir::Lifetime {
+ let span = self.lower_span(l.ident.span);
+ let ident = self.lower_ident(l.ident);
+ self.new_named_lifetime(l.id, l.id, span, ident)
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn new_named_lifetime_with_res(
+ &mut self,
+ id: NodeId,
+ span: Span,
+ ident: Ident,
+ res: LifetimeRes,
+ ) -> hir::Lifetime {
+ let name = match res {
+ LifetimeRes::Param { param, .. } => {
+ let p_name = ParamName::Plain(ident);
+ let param = self.resolver.get_remapped_def_id(param);
+
+ hir::LifetimeName::Param(param, p_name)
+ }
+ LifetimeRes::Fresh { param, .. } => {
+ debug_assert_eq!(ident.name, kw::UnderscoreLifetime);
+ let param = self.local_def_id(param);
+
+ hir::LifetimeName::Param(param, ParamName::Fresh)
+ }
+ LifetimeRes::Infer => hir::LifetimeName::Infer,
+ LifetimeRes::Static => hir::LifetimeName::Static,
+ LifetimeRes::Error => hir::LifetimeName::Error,
+ res => panic!("Unexpected lifetime resolution {:?} for {:?} at {:?}", res, ident, span),
+ };
+
+ debug!(?name);
+ hir::Lifetime { hir_id: self.lower_node_id(id), span: self.lower_span(span), name }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn new_named_lifetime(
+ &mut self,
+ id: NodeId,
+ new_id: NodeId,
+ span: Span,
+ ident: Ident,
+ ) -> hir::Lifetime {
+ let res = self.resolver.get_lifetime_res(id).unwrap_or(LifetimeRes::Error);
+ self.new_named_lifetime_with_res(new_id, span, ident, res)
+ }
+
+ fn lower_generic_params_mut<'s>(
+ &'s mut self,
+ params: &'s [GenericParam],
+ ) -> impl Iterator<Item = hir::GenericParam<'hir>> + Captures<'a> + Captures<'s> {
+ params.iter().map(move |param| self.lower_generic_param(param))
+ }
+
+ fn lower_generic_params(&mut self, params: &[GenericParam]) -> &'hir [hir::GenericParam<'hir>] {
+ self.arena.alloc_from_iter(self.lower_generic_params_mut(params))
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn lower_generic_param(&mut self, param: &GenericParam) -> hir::GenericParam<'hir> {
+ let (name, kind) = self.lower_generic_param_kind(param);
+
+ let hir_id = self.lower_node_id(param.id);
+ self.lower_attrs(hir_id, &param.attrs);
+ hir::GenericParam {
+ hir_id,
+ name,
+ span: self.lower_span(param.span()),
+ pure_wrt_drop: self.tcx.sess.contains_name(&param.attrs, sym::may_dangle),
+ kind,
+ colon_span: param.colon_span.map(|s| self.lower_span(s)),
+ }
+ }
+
+ fn lower_generic_param_kind(
+ &mut self,
+ param: &GenericParam,
+ ) -> (hir::ParamName, hir::GenericParamKind<'hir>) {
+ match param.kind {
+ GenericParamKind::Lifetime => {
+ // AST resolution emitted an error on those parameters, so we lower them using
+ // `ParamName::Error`.
+ let param_name =
+ if let Some(LifetimeRes::Error) = self.resolver.get_lifetime_res(param.id) {
+ ParamName::Error
+ } else {
+ let ident = self.lower_ident(param.ident);
+ ParamName::Plain(ident)
+ };
+ let kind =
+ hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit };
+
+ (param_name, kind)
+ }
+ GenericParamKind::Type { ref default, .. } => {
+ let kind = hir::GenericParamKind::Type {
+ default: default.as_ref().map(|x| {
+ self.lower_ty(x, ImplTraitContext::Disallowed(ImplTraitPosition::Type))
+ }),
+ synthetic: false,
+ };
+
+ (hir::ParamName::Plain(self.lower_ident(param.ident)), kind)
+ }
+ GenericParamKind::Const { ref ty, kw_span: _, ref default } => {
+ let ty = self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::Type));
+ let default = default.as_ref().map(|def| self.lower_anon_const(def));
+ (
+ hir::ParamName::Plain(self.lower_ident(param.ident)),
+ hir::GenericParamKind::Const { ty, default },
+ )
+ }
+ }
+ }
+
+ fn lower_trait_ref(&mut self, p: &TraitRef, itctx: ImplTraitContext) -> hir::TraitRef<'hir> {
+ let path = match self.lower_qpath(p.ref_id, &None, &p.path, ParamMode::Explicit, itctx) {
+ hir::QPath::Resolved(None, path) => path,
+ qpath => panic!("lower_trait_ref: unexpected QPath `{:?}`", qpath),
+ };
+ hir::TraitRef { path, hir_ref_id: self.lower_node_id(p.ref_id) }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn lower_poly_trait_ref(
+ &mut self,
+ p: &PolyTraitRef,
+ itctx: ImplTraitContext,
+ ) -> hir::PolyTraitRef<'hir> {
+ let bound_generic_params =
+ self.lower_lifetime_binder(p.trait_ref.ref_id, &p.bound_generic_params);
+ let trait_ref = self.lower_trait_ref(&p.trait_ref, itctx);
+ hir::PolyTraitRef { bound_generic_params, trait_ref, span: self.lower_span(p.span) }
+ }
+
+ fn lower_mt(&mut self, mt: &MutTy, itctx: ImplTraitContext) -> hir::MutTy<'hir> {
+ hir::MutTy { ty: self.lower_ty(&mt.ty, itctx), mutbl: mt.mutbl }
+ }
+
+ fn lower_param_bounds(
+ &mut self,
+ bounds: &[GenericBound],
+ itctx: ImplTraitContext,
+ ) -> hir::GenericBounds<'hir> {
+ self.arena.alloc_from_iter(self.lower_param_bounds_mut(bounds, itctx))
+ }
+
+ fn lower_param_bounds_mut<'s>(
+ &'s mut self,
+ bounds: &'s [GenericBound],
+ itctx: ImplTraitContext,
+ ) -> impl Iterator<Item = hir::GenericBound<'hir>> + Captures<'s> + Captures<'a> {
+ bounds.iter().map(move |bound| self.lower_param_bound(bound, itctx))
+ }
+
+ fn lower_generic_and_bounds(
+ &mut self,
+ node_id: NodeId,
+ span: Span,
+ ident: Ident,
+ bounds: &[GenericBound],
+ ) -> (hir::GenericParam<'hir>, Option<hir::WherePredicate<'hir>>, hir::TyKind<'hir>) {
+ // Add a definition for the in-band `Param`.
+ let def_id = self.local_def_id(node_id);
+
+ // Set the name to `impl Bound1 + Bound2`.
+ let param = hir::GenericParam {
+ hir_id: self.lower_node_id(node_id),
+ name: ParamName::Plain(self.lower_ident(ident)),
+ pure_wrt_drop: false,
+ span: self.lower_span(span),
+ kind: hir::GenericParamKind::Type { default: None, synthetic: true },
+ colon_span: None,
+ };
+
+ let preds = self.lower_generic_bound_predicate(
+ ident,
+ node_id,
+ &GenericParamKind::Type { default: None },
+ bounds,
+ ImplTraitContext::Universal,
+ hir::PredicateOrigin::ImplTrait,
+ );
+
+ let ty = hir::TyKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ span: self.lower_span(span),
+ res: Res::Def(DefKind::TyParam, def_id.to_def_id()),
+ segments: arena_vec![self; hir::PathSegment::from_ident(self.lower_ident(ident))],
+ }),
+ ));
+
+ (param, preds, ty)
+ }
+
+ /// Lowers a block directly to an expression, presuming that it
+ /// has no attributes and is not targeted by a `break`.
+ fn lower_block_expr(&mut self, b: &Block) -> hir::Expr<'hir> {
+ let block = self.lower_block(b, false);
+ self.expr_block(block, AttrVec::new())
+ }
+
+ fn lower_array_length(&mut self, c: &AnonConst) -> hir::ArrayLen {
+ match c.value.kind {
+ ExprKind::Underscore => {
+ if self.tcx.features().generic_arg_infer {
+ hir::ArrayLen::Infer(self.lower_node_id(c.id), c.value.span)
+ } else {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::generic_arg_infer,
+ c.value.span,
+ "using `_` for array lengths is unstable",
+ )
+ .emit();
+ hir::ArrayLen::Body(self.lower_anon_const(c))
+ }
+ }
+ _ => hir::ArrayLen::Body(self.lower_anon_const(c)),
+ }
+ }
+
+ fn lower_anon_const(&mut self, c: &AnonConst) -> hir::AnonConst {
+ self.with_new_scopes(|this| hir::AnonConst {
+ hir_id: this.lower_node_id(c.id),
+ body: this.lower_const_body(c.value.span, Some(&c.value)),
+ })
+ }
+
+ fn lower_unsafe_source(&mut self, u: UnsafeSource) -> hir::UnsafeSource {
+ match u {
+ CompilerGenerated => hir::UnsafeSource::CompilerGenerated,
+ UserProvided => hir::UnsafeSource::UserProvided,
+ }
+ }
+
+ fn lower_trait_bound_modifier(&mut self, f: TraitBoundModifier) -> hir::TraitBoundModifier {
+ match f {
+ TraitBoundModifier::None => hir::TraitBoundModifier::None,
+ TraitBoundModifier::MaybeConst => hir::TraitBoundModifier::MaybeConst,
+
+ // `MaybeConstMaybe` will cause an error during AST validation, but we need to pick a
+ // placeholder for compilation to proceed.
+ TraitBoundModifier::MaybeConstMaybe | TraitBoundModifier::Maybe => {
+ hir::TraitBoundModifier::Maybe
+ }
+ }
+ }
+
+ // Helper methods for building HIR.
+
+ fn stmt(&mut self, span: Span, kind: hir::StmtKind<'hir>) -> hir::Stmt<'hir> {
+ hir::Stmt { span: self.lower_span(span), kind, hir_id: self.next_id() }
+ }
+
+ fn stmt_expr(&mut self, span: Span, expr: hir::Expr<'hir>) -> hir::Stmt<'hir> {
+ self.stmt(span, hir::StmtKind::Expr(self.arena.alloc(expr)))
+ }
+
+ fn stmt_let_pat(
+ &mut self,
+ attrs: Option<&'hir [Attribute]>,
+ span: Span,
+ init: Option<&'hir hir::Expr<'hir>>,
+ pat: &'hir hir::Pat<'hir>,
+ source: hir::LocalSource,
+ ) -> hir::Stmt<'hir> {
+ let hir_id = self.next_id();
+ if let Some(a) = attrs {
+ debug_assert!(!a.is_empty());
+ self.attrs.insert(hir_id.local_id, a);
+ }
+ let local = hir::Local {
+ hir_id,
+ init,
+ pat,
+ els: None,
+ source,
+ span: self.lower_span(span),
+ ty: None,
+ };
+ self.stmt(span, hir::StmtKind::Local(self.arena.alloc(local)))
+ }
+
+ fn block_expr(&mut self, expr: &'hir hir::Expr<'hir>) -> &'hir hir::Block<'hir> {
+ self.block_all(expr.span, &[], Some(expr))
+ }
+
+ fn block_all(
+ &mut self,
+ span: Span,
+ stmts: &'hir [hir::Stmt<'hir>],
+ expr: Option<&'hir hir::Expr<'hir>>,
+ ) -> &'hir hir::Block<'hir> {
+ let blk = hir::Block {
+ stmts,
+ expr,
+ hir_id: self.next_id(),
+ rules: hir::BlockCheckMode::DefaultBlock,
+ span: self.lower_span(span),
+ targeted_by_break: false,
+ };
+ self.arena.alloc(blk)
+ }
+
+ fn pat_cf_continue(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
+ let field = self.single_pat_field(span, pat);
+ self.pat_lang_item_variant(span, hir::LangItem::ControlFlowContinue, field, None)
+ }
+
+ fn pat_cf_break(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
+ let field = self.single_pat_field(span, pat);
+ self.pat_lang_item_variant(span, hir::LangItem::ControlFlowBreak, field, None)
+ }
+
+ fn pat_some(&mut self, span: Span, pat: &'hir hir::Pat<'hir>) -> &'hir hir::Pat<'hir> {
+ let field = self.single_pat_field(span, pat);
+ self.pat_lang_item_variant(span, hir::LangItem::OptionSome, field, None)
+ }
+
+ fn pat_none(&mut self, span: Span) -> &'hir hir::Pat<'hir> {
+ self.pat_lang_item_variant(span, hir::LangItem::OptionNone, &[], None)
+ }
+
+ fn single_pat_field(
+ &mut self,
+ span: Span,
+ pat: &'hir hir::Pat<'hir>,
+ ) -> &'hir [hir::PatField<'hir>] {
+ let field = hir::PatField {
+ hir_id: self.next_id(),
+ ident: Ident::new(sym::integer(0), self.lower_span(span)),
+ is_shorthand: false,
+ pat,
+ span: self.lower_span(span),
+ };
+ arena_vec![self; field]
+ }
+
+ fn pat_lang_item_variant(
+ &mut self,
+ span: Span,
+ lang_item: hir::LangItem,
+ fields: &'hir [hir::PatField<'hir>],
+ hir_id: Option<hir::HirId>,
+ ) -> &'hir hir::Pat<'hir> {
+ let qpath = hir::QPath::LangItem(lang_item, self.lower_span(span), hir_id);
+ self.pat(span, hir::PatKind::Struct(qpath, fields, false))
+ }
+
+ fn pat_ident(&mut self, span: Span, ident: Ident) -> (&'hir hir::Pat<'hir>, hir::HirId) {
+ self.pat_ident_binding_mode(span, ident, hir::BindingAnnotation::Unannotated)
+ }
+
+ fn pat_ident_mut(&mut self, span: Span, ident: Ident) -> (hir::Pat<'hir>, hir::HirId) {
+ self.pat_ident_binding_mode_mut(span, ident, hir::BindingAnnotation::Unannotated)
+ }
+
+ fn pat_ident_binding_mode(
+ &mut self,
+ span: Span,
+ ident: Ident,
+ bm: hir::BindingAnnotation,
+ ) -> (&'hir hir::Pat<'hir>, hir::HirId) {
+ let (pat, hir_id) = self.pat_ident_binding_mode_mut(span, ident, bm);
+ (self.arena.alloc(pat), hir_id)
+ }
+
+ fn pat_ident_binding_mode_mut(
+ &mut self,
+ span: Span,
+ ident: Ident,
+ bm: hir::BindingAnnotation,
+ ) -> (hir::Pat<'hir>, hir::HirId) {
+ let hir_id = self.next_id();
+
+ (
+ hir::Pat {
+ hir_id,
+ kind: hir::PatKind::Binding(bm, hir_id, self.lower_ident(ident), None),
+ span: self.lower_span(span),
+ default_binding_modes: true,
+ },
+ hir_id,
+ )
+ }
+
+ fn pat(&mut self, span: Span, kind: hir::PatKind<'hir>) -> &'hir hir::Pat<'hir> {
+ self.arena.alloc(hir::Pat {
+ hir_id: self.next_id(),
+ kind,
+ span: self.lower_span(span),
+ default_binding_modes: true,
+ })
+ }
+
+ fn pat_without_dbm(&mut self, span: Span, kind: hir::PatKind<'hir>) -> hir::Pat<'hir> {
+ hir::Pat {
+ hir_id: self.next_id(),
+ kind,
+ span: self.lower_span(span),
+ default_binding_modes: false,
+ }
+ }
+
+ fn ty_path(
+ &mut self,
+ mut hir_id: hir::HirId,
+ span: Span,
+ qpath: hir::QPath<'hir>,
+ ) -> hir::Ty<'hir> {
+ let kind = match qpath {
+ hir::QPath::Resolved(None, path) => {
+ // Turn trait object paths into `TyKind::TraitObject` instead.
+ match path.res {
+ Res::Def(DefKind::Trait | DefKind::TraitAlias, _) => {
+ let principal = hir::PolyTraitRef {
+ bound_generic_params: &[],
+ trait_ref: hir::TraitRef { path, hir_ref_id: hir_id },
+ span: self.lower_span(span),
+ };
+
+ // The original ID is taken by the `PolyTraitRef`,
+ // so the `Ty` itself needs a different one.
+ hir_id = self.next_id();
+ hir::TyKind::TraitObject(
+ arena_vec![self; principal],
+ self.elided_dyn_bound(span),
+ TraitObjectSyntax::None,
+ )
+ }
+ _ => hir::TyKind::Path(hir::QPath::Resolved(None, path)),
+ }
+ }
+ _ => hir::TyKind::Path(qpath),
+ };
+
+ hir::Ty { hir_id, kind, span: self.lower_span(span) }
+ }
+
+ /// Invoked to create the lifetime argument(s) for an elided trait object
+ /// bound, like the bound in `Box<dyn Debug>`. This method is not invoked
+ /// when the bound is written, even if it is written with `'_` like in
+ /// `Box<dyn Debug + '_>`. In those cases, `lower_lifetime` is invoked.
+ fn elided_dyn_bound(&mut self, span: Span) -> hir::Lifetime {
+ let r = hir::Lifetime {
+ hir_id: self.next_id(),
+ span: self.lower_span(span),
+ name: hir::LifetimeName::ImplicitObjectLifetimeDefault,
+ };
+ debug!("elided_dyn_bound: r={:?}", r);
+ r
+ }
+}
+
+/// Helper struct for delayed construction of GenericArgs.
+struct GenericArgsCtor<'hir> {
+ args: SmallVec<[hir::GenericArg<'hir>; 4]>,
+ bindings: &'hir [hir::TypeBinding<'hir>],
+ parenthesized: bool,
+ span: Span,
+}
+
+impl<'hir> GenericArgsCtor<'hir> {
+ fn is_empty(&self) -> bool {
+ self.args.is_empty() && self.bindings.is_empty() && !self.parenthesized
+ }
+
+ fn into_generic_args(self, this: &LoweringContext<'_, 'hir>) -> &'hir hir::GenericArgs<'hir> {
+ let ga = hir::GenericArgs {
+ args: this.arena.alloc_from_iter(self.args),
+ bindings: self.bindings,
+ parenthesized: self.parenthesized,
+ span_ext: this.lower_span(self.span),
+ };
+ this.arena.alloc(ga)
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/lifetime_collector.rs b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
new file mode 100644
index 000000000..81006e00f
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/lifetime_collector.rs
@@ -0,0 +1,115 @@
+use super::ResolverAstLoweringExt;
+use rustc_ast::visit::{self, BoundKind, LifetimeCtxt, Visitor};
+use rustc_ast::{
+ FnRetTy, GenericBounds, Lifetime, NodeId, PathSegment, PolyTraitRef, TraitBoundModifier, Ty,
+ TyKind,
+};
+use rustc_hir::def::LifetimeRes;
+use rustc_middle::span_bug;
+use rustc_middle::ty::ResolverAstLowering;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::Span;
+
+struct LifetimeCollectVisitor<'ast> {
+ resolver: &'ast ResolverAstLowering,
+ current_binders: Vec<NodeId>,
+ collected_lifetimes: Vec<Lifetime>,
+}
+
+impl<'ast> LifetimeCollectVisitor<'ast> {
+ fn new(resolver: &'ast ResolverAstLowering) -> Self {
+ Self { resolver, current_binders: Vec::new(), collected_lifetimes: Vec::new() }
+ }
+
+ fn record_lifetime_use(&mut self, lifetime: Lifetime) {
+ match self.resolver.get_lifetime_res(lifetime.id).unwrap_or(LifetimeRes::Error) {
+ LifetimeRes::Param { binder, .. } | LifetimeRes::Fresh { binder, .. } => {
+ if !self.current_binders.contains(&binder) {
+ if !self.collected_lifetimes.contains(&lifetime) {
+ self.collected_lifetimes.push(lifetime);
+ }
+ }
+ }
+ LifetimeRes::Static | LifetimeRes::Error => {
+ if !self.collected_lifetimes.contains(&lifetime) {
+ self.collected_lifetimes.push(lifetime);
+ }
+ }
+ LifetimeRes::Infer => {}
+ res => {
+ let bug_msg = format!(
+ "Unexpected lifetime resolution {:?} for {:?} at {:?}",
+ res, lifetime.ident, lifetime.ident.span
+ );
+ span_bug!(lifetime.ident.span, "{}", bug_msg);
+ }
+ }
+ }
+
+ /// This collect lifetimes that are elided, for nodes like `Foo<T>` where there are no explicit
+ /// lifetime nodes. Is equivalent to having "pseudo" nodes introduced for each of the node ids
+ /// in the list start..end.
+ fn record_elided_anchor(&mut self, node_id: NodeId, span: Span) {
+ if let Some(LifetimeRes::ElidedAnchor { start, end }) =
+ self.resolver.get_lifetime_res(node_id)
+ {
+ for i in start..end {
+ let lifetime = Lifetime { id: i, ident: Ident::new(kw::UnderscoreLifetime, span) };
+ self.record_lifetime_use(lifetime);
+ }
+ }
+ }
+}
+
+impl<'ast> Visitor<'ast> for LifetimeCollectVisitor<'ast> {
+ fn visit_lifetime(&mut self, lifetime: &'ast Lifetime, _: LifetimeCtxt) {
+ self.record_lifetime_use(*lifetime);
+ }
+
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
+ self.record_elided_anchor(path_segment.id, path_span);
+ visit::walk_path_segment(self, path_span, path_segment);
+ }
+
+ fn visit_poly_trait_ref(&mut self, t: &'ast PolyTraitRef, m: &'ast TraitBoundModifier) {
+ self.current_binders.push(t.trait_ref.ref_id);
+
+ visit::walk_poly_trait_ref(self, t, m);
+
+ self.current_binders.pop();
+ }
+
+ fn visit_ty(&mut self, t: &'ast Ty) {
+ match t.kind {
+ TyKind::BareFn(_) => {
+ self.current_binders.push(t.id);
+ visit::walk_ty(self, t);
+ self.current_binders.pop();
+ }
+ TyKind::Rptr(None, _) => {
+ self.record_elided_anchor(t.id, t.span);
+ visit::walk_ty(self, t);
+ }
+ _ => {
+ visit::walk_ty(self, t);
+ }
+ }
+ }
+}
+
+pub fn lifetimes_in_ret_ty(resolver: &ResolverAstLowering, ret_ty: &FnRetTy) -> Vec<Lifetime> {
+ let mut visitor = LifetimeCollectVisitor::new(resolver);
+ visitor.visit_fn_ret_ty(ret_ty);
+ visitor.collected_lifetimes
+}
+
+pub fn lifetimes_in_bounds(
+ resolver: &ResolverAstLowering,
+ bounds: &GenericBounds,
+) -> Vec<Lifetime> {
+ let mut visitor = LifetimeCollectVisitor::new(resolver);
+ for bound in bounds {
+ visitor.visit_param_bound(bound, BoundKind::Bound);
+ }
+ visitor.collected_lifetimes
+}
diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs
new file mode 100644
index 000000000..bd2e76e55
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/pat.rs
@@ -0,0 +1,350 @@
+use super::ResolverAstLoweringExt;
+use super::{ImplTraitContext, LoweringContext, ParamMode};
+use crate::ImplTraitPosition;
+
+use rustc_ast::ptr::P;
+use rustc_ast::*;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_span::symbol::Ident;
+use rustc_span::{source_map::Spanned, Span};
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ pub(crate) fn lower_pat(&mut self, pattern: &Pat) -> &'hir hir::Pat<'hir> {
+ self.arena.alloc(self.lower_pat_mut(pattern))
+ }
+
+ pub(crate) fn lower_pat_mut(&mut self, mut pattern: &Pat) -> hir::Pat<'hir> {
+ ensure_sufficient_stack(|| {
+ // loop here to avoid recursion
+ let node = loop {
+ match pattern.kind {
+ PatKind::Wild => break hir::PatKind::Wild,
+ PatKind::Ident(ref binding_mode, ident, ref sub) => {
+ let lower_sub = |this: &mut Self| sub.as_ref().map(|s| this.lower_pat(&*s));
+ break self.lower_pat_ident(pattern, binding_mode, ident, lower_sub);
+ }
+ PatKind::Lit(ref e) => {
+ break hir::PatKind::Lit(self.lower_expr_within_pat(e, false));
+ }
+ PatKind::TupleStruct(ref qself, ref path, ref pats) => {
+ let qpath = self.lower_qpath(
+ pattern.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple struct");
+ break hir::PatKind::TupleStruct(qpath, pats, ddpos);
+ }
+ PatKind::Or(ref pats) => {
+ break hir::PatKind::Or(
+ self.arena.alloc_from_iter(pats.iter().map(|x| self.lower_pat_mut(x))),
+ );
+ }
+ PatKind::Path(ref qself, ref path) => {
+ let qpath = self.lower_qpath(
+ pattern.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+ break hir::PatKind::Path(qpath);
+ }
+ PatKind::Struct(ref qself, ref path, ref fields, etc) => {
+ let qpath = self.lower_qpath(
+ pattern.id,
+ qself,
+ path,
+ ParamMode::Optional,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ );
+
+ let fs = self.arena.alloc_from_iter(fields.iter().map(|f| hir::PatField {
+ hir_id: self.next_id(),
+ ident: self.lower_ident(f.ident),
+ pat: self.lower_pat(&f.pat),
+ is_shorthand: f.is_shorthand,
+ span: self.lower_span(f.span),
+ }));
+ break hir::PatKind::Struct(qpath, fs, etc);
+ }
+ PatKind::Tuple(ref pats) => {
+ let (pats, ddpos) = self.lower_pat_tuple(pats, "tuple");
+ break hir::PatKind::Tuple(pats, ddpos);
+ }
+ PatKind::Box(ref inner) => {
+ break hir::PatKind::Box(self.lower_pat(inner));
+ }
+ PatKind::Ref(ref inner, mutbl) => {
+ break hir::PatKind::Ref(self.lower_pat(inner), mutbl);
+ }
+ PatKind::Range(ref e1, ref e2, Spanned { node: ref end, .. }) => {
+ break hir::PatKind::Range(
+ e1.as_deref().map(|e| self.lower_expr_within_pat(e, true)),
+ e2.as_deref().map(|e| self.lower_expr_within_pat(e, true)),
+ self.lower_range_end(end, e2.is_some()),
+ );
+ }
+ PatKind::Slice(ref pats) => break self.lower_pat_slice(pats),
+ PatKind::Rest => {
+ // If we reach here the `..` pattern is not semantically allowed.
+ break self.ban_illegal_rest_pat(pattern.span);
+ }
+ // return inner to be processed in next loop
+ PatKind::Paren(ref inner) => pattern = inner,
+ PatKind::MacCall(_) => panic!("{:?} shouldn't exist here", pattern.span),
+ }
+ };
+
+ self.pat_with_node_id_of(pattern, node)
+ })
+ }
+
+ fn lower_pat_tuple(
+ &mut self,
+ pats: &[P<Pat>],
+ ctx: &str,
+ ) -> (&'hir [hir::Pat<'hir>], Option<usize>) {
+ let mut elems = Vec::with_capacity(pats.len());
+ let mut rest = None;
+
+ let mut iter = pats.iter().enumerate();
+ for (idx, pat) in iter.by_ref() {
+ // Interpret the first `..` pattern as a sub-tuple pattern.
+ // Note that unlike for slice patterns,
+ // where `xs @ ..` is a legal sub-slice pattern,
+ // it is not a legal sub-tuple pattern.
+ match pat.kind {
+ // Found a sub-tuple rest pattern
+ PatKind::Rest => {
+ rest = Some((idx, pat.span));
+ break;
+ }
+ // Found a sub-tuple pattern `$binding_mode $ident @ ..`.
+ // This is not allowed as a sub-tuple pattern
+ PatKind::Ident(ref _bm, ident, Some(ref sub)) if sub.is_rest() => {
+ let sp = pat.span;
+ self.diagnostic()
+ .struct_span_err(
+ sp,
+ &format!("`{} @` is not allowed in a {}", ident.name, ctx),
+ )
+ .span_label(sp, "this is only allowed in slice patterns")
+ .help("remove this and bind each tuple field independently")
+ .span_suggestion_verbose(
+ sp,
+ &format!("if you don't need to use the contents of {}, discard the tuple's remaining fields", ident),
+ "..",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ _ => {}
+ }
+
+ // It was not a sub-tuple pattern so lower it normally.
+ elems.push(self.lower_pat_mut(pat));
+ }
+
+ for (_, pat) in iter {
+ // There was a previous sub-tuple pattern; make sure we don't allow more...
+ if pat.is_rest() {
+ // ...but there was one again, so error.
+ self.ban_extra_rest_pat(pat.span, rest.unwrap().1, ctx);
+ } else {
+ elems.push(self.lower_pat_mut(pat));
+ }
+ }
+
+ (self.arena.alloc_from_iter(elems), rest.map(|(ddpos, _)| ddpos))
+ }
+
+ /// Lower a slice pattern of form `[pat_0, ..., pat_n]` into
+ /// `hir::PatKind::Slice(before, slice, after)`.
+ ///
+ /// When encountering `($binding_mode $ident @)? ..` (`slice`),
+ /// this is interpreted as a sub-slice pattern semantically.
+ /// Patterns that follow, which are not like `slice` -- or an error occurs, are in `after`.
+ fn lower_pat_slice(&mut self, pats: &[P<Pat>]) -> hir::PatKind<'hir> {
+ let mut before = Vec::new();
+ let mut after = Vec::new();
+ let mut slice = None;
+ let mut prev_rest_span = None;
+
+ // Lowers `$bm $ident @ ..` to `$bm $ident @ _`.
+ let lower_rest_sub = |this: &mut Self, pat, bm, ident, sub| {
+ let lower_sub = |this: &mut Self| Some(this.pat_wild_with_node_id_of(sub));
+ let node = this.lower_pat_ident(pat, bm, ident, lower_sub);
+ this.pat_with_node_id_of(pat, node)
+ };
+
+ let mut iter = pats.iter();
+ // Lower all the patterns until the first occurrence of a sub-slice pattern.
+ for pat in iter.by_ref() {
+ match pat.kind {
+ // Found a sub-slice pattern `..`. Record, lower it to `_`, and stop here.
+ PatKind::Rest => {
+ prev_rest_span = Some(pat.span);
+ slice = Some(self.pat_wild_with_node_id_of(pat));
+ break;
+ }
+ // Found a sub-slice pattern `$binding_mode $ident @ ..`.
+ // Record, lower it to `$binding_mode $ident @ _`, and stop here.
+ PatKind::Ident(ref bm, ident, Some(ref sub)) if sub.is_rest() => {
+ prev_rest_span = Some(sub.span);
+ slice = Some(self.arena.alloc(lower_rest_sub(self, pat, bm, ident, sub)));
+ break;
+ }
+ // It was not a subslice pattern so lower it normally.
+ _ => before.push(self.lower_pat_mut(pat)),
+ }
+ }
+
+ // Lower all the patterns after the first sub-slice pattern.
+ for pat in iter {
+ // There was a previous subslice pattern; make sure we don't allow more.
+ let rest_span = match pat.kind {
+ PatKind::Rest => Some(pat.span),
+ PatKind::Ident(ref bm, ident, Some(ref sub)) if sub.is_rest() => {
+ // #69103: Lower into `binding @ _` as above to avoid ICEs.
+ after.push(lower_rest_sub(self, pat, bm, ident, sub));
+ Some(sub.span)
+ }
+ _ => None,
+ };
+ if let Some(rest_span) = rest_span {
+ // We have e.g., `[a, .., b, ..]`. That's no good, error!
+ self.ban_extra_rest_pat(rest_span, prev_rest_span.unwrap(), "slice");
+ } else {
+ // Lower the pattern normally.
+ after.push(self.lower_pat_mut(pat));
+ }
+ }
+
+ hir::PatKind::Slice(
+ self.arena.alloc_from_iter(before),
+ slice,
+ self.arena.alloc_from_iter(after),
+ )
+ }
+
+ fn lower_pat_ident(
+ &mut self,
+ p: &Pat,
+ binding_mode: &BindingMode,
+ ident: Ident,
+ lower_sub: impl FnOnce(&mut Self) -> Option<&'hir hir::Pat<'hir>>,
+ ) -> hir::PatKind<'hir> {
+ match self.resolver.get_partial_res(p.id).map(|d| d.base_res()) {
+ // `None` can occur in body-less function signatures
+ res @ (None | Some(Res::Local(_))) => {
+ let canonical_id = match res {
+ Some(Res::Local(id)) => id,
+ _ => p.id,
+ };
+
+ hir::PatKind::Binding(
+ self.lower_binding_mode(binding_mode),
+ self.lower_node_id(canonical_id),
+ self.lower_ident(ident),
+ lower_sub(self),
+ )
+ }
+ Some(res) => hir::PatKind::Path(hir::QPath::Resolved(
+ None,
+ self.arena.alloc(hir::Path {
+ span: self.lower_span(ident.span),
+ res: self.lower_res(res),
+ segments: arena_vec![self; hir::PathSegment::from_ident(self.lower_ident(ident))],
+ }),
+ )),
+ }
+ }
+
+ fn lower_binding_mode(&mut self, b: &BindingMode) -> hir::BindingAnnotation {
+ match *b {
+ BindingMode::ByValue(Mutability::Not) => hir::BindingAnnotation::Unannotated,
+ BindingMode::ByRef(Mutability::Not) => hir::BindingAnnotation::Ref,
+ BindingMode::ByValue(Mutability::Mut) => hir::BindingAnnotation::Mutable,
+ BindingMode::ByRef(Mutability::Mut) => hir::BindingAnnotation::RefMut,
+ }
+ }
+
+ fn pat_wild_with_node_id_of(&mut self, p: &Pat) -> &'hir hir::Pat<'hir> {
+ self.arena.alloc(self.pat_with_node_id_of(p, hir::PatKind::Wild))
+ }
+
+ /// Construct a `Pat` with the `HirId` of `p.id` lowered.
+ fn pat_with_node_id_of(&mut self, p: &Pat, kind: hir::PatKind<'hir>) -> hir::Pat<'hir> {
+ hir::Pat {
+ hir_id: self.lower_node_id(p.id),
+ kind,
+ span: self.lower_span(p.span),
+ default_binding_modes: true,
+ }
+ }
+
+ /// Emit a friendly error for extra `..` patterns in a tuple/tuple struct/slice pattern.
+ pub(crate) fn ban_extra_rest_pat(&self, sp: Span, prev_sp: Span, ctx: &str) {
+ self.diagnostic()
+ .struct_span_err(sp, &format!("`..` can only be used once per {} pattern", ctx))
+ .span_label(sp, &format!("can only be used once per {} pattern", ctx))
+ .span_label(prev_sp, "previously used here")
+ .emit();
+ }
+
+ /// Used to ban the `..` pattern in places it shouldn't be semantically.
+ fn ban_illegal_rest_pat(&self, sp: Span) -> hir::PatKind<'hir> {
+ self.diagnostic()
+ .struct_span_err(sp, "`..` patterns are not allowed here")
+ .note("only allowed in tuple, tuple struct, and slice patterns")
+ .emit();
+
+ // We're not in a list context so `..` can be reasonably treated
+ // as `_` because it should always be valid and roughly matches the
+ // intent of `..` (notice that the rest of a single slot is that slot).
+ hir::PatKind::Wild
+ }
+
+ fn lower_range_end(&mut self, e: &RangeEnd, has_end: bool) -> hir::RangeEnd {
+ match *e {
+ RangeEnd::Excluded if has_end => hir::RangeEnd::Excluded,
+ // No end; so `X..` behaves like `RangeFrom`.
+ RangeEnd::Excluded | RangeEnd::Included(_) => hir::RangeEnd::Included,
+ }
+ }
+
+ /// Matches `'-' lit | lit (cf. parser::Parser::parse_literal_maybe_minus)`,
+ /// or paths for ranges.
+ //
+ // FIXME: do we want to allow `expr -> pattern` conversion to create path expressions?
+ // That means making this work:
+ //
+ // ```rust,ignore (FIXME)
+ // struct S;
+ // macro_rules! m {
+ // ($a:expr) => {
+ // let $a = S;
+ // }
+ // }
+ // m!(S);
+ // ```
+ fn lower_expr_within_pat(&mut self, expr: &Expr, allow_paths: bool) -> &'hir hir::Expr<'hir> {
+ match expr.kind {
+ ExprKind::Lit(..) | ExprKind::ConstBlock(..) | ExprKind::Err => {}
+ ExprKind::Path(..) if allow_paths => {}
+ ExprKind::Unary(UnOp::Neg, ref inner) if matches!(inner.kind, ExprKind::Lit(_)) => {}
+ _ => {
+ self.diagnostic()
+ .span_err(expr.span, "arbitrary expressions aren't allowed in patterns");
+ return self.arena.alloc(self.expr_err(expr.span));
+ }
+ }
+ self.lower_expr(expr)
+ }
+}
diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs
new file mode 100644
index 000000000..393be3b45
--- /dev/null
+++ b/compiler/rustc_ast_lowering/src/path.rs
@@ -0,0 +1,406 @@
+use crate::ImplTraitPosition;
+
+use super::ResolverAstLoweringExt;
+use super::{GenericArgsCtor, LifetimeRes, ParenthesizedGenericArgs};
+use super::{ImplTraitContext, LoweringContext, ParamMode};
+
+use rustc_ast::{self as ast, *};
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, PartialRes, Res};
+use rustc_hir::GenericArg;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::{BytePos, Span, DUMMY_SP};
+
+use smallvec::smallvec;
+use tracing::debug;
+
+impl<'a, 'hir> LoweringContext<'a, 'hir> {
+ #[instrument(level = "trace", skip(self))]
+ pub(crate) fn lower_qpath(
+ &mut self,
+ id: NodeId,
+ qself: &Option<QSelf>,
+ p: &Path,
+ param_mode: ParamMode,
+ itctx: ImplTraitContext,
+ ) -> hir::QPath<'hir> {
+ let qself_position = qself.as_ref().map(|q| q.position);
+ let qself = qself.as_ref().map(|q| self.lower_ty(&q.ty, itctx));
+
+ let partial_res =
+ self.resolver.get_partial_res(id).unwrap_or_else(|| PartialRes::new(Res::Err));
+
+ let path_span_lo = p.span.shrink_to_lo();
+ let proj_start = p.segments.len() - partial_res.unresolved_segments();
+ let path = self.arena.alloc(hir::Path {
+ res: self.lower_res(partial_res.base_res()),
+ segments: self.arena.alloc_from_iter(p.segments[..proj_start].iter().enumerate().map(
+ |(i, segment)| {
+ let param_mode = match (qself_position, param_mode) {
+ (Some(j), ParamMode::Optional) if i < j => {
+ // This segment is part of the trait path in a
+ // qualified path - one of `a`, `b` or `Trait`
+ // in `<X as a::b::Trait>::T::U::method`.
+ ParamMode::Explicit
+ }
+ _ => param_mode,
+ };
+
+ let parenthesized_generic_args = match partial_res.base_res() {
+ // `a::b::Trait(Args)`
+ Res::Def(DefKind::Trait, _) if i + 1 == proj_start => {
+ ParenthesizedGenericArgs::Ok
+ }
+ // `a::b::Trait(Args)::TraitItem`
+ Res::Def(DefKind::AssocFn, _)
+ | Res::Def(DefKind::AssocConst, _)
+ | Res::Def(DefKind::AssocTy, _)
+ if i + 2 == proj_start =>
+ {
+ ParenthesizedGenericArgs::Ok
+ }
+ // Avoid duplicated errors.
+ Res::Err => ParenthesizedGenericArgs::Ok,
+ // An error
+ _ => ParenthesizedGenericArgs::Err,
+ };
+
+ self.lower_path_segment(
+ p.span,
+ segment,
+ param_mode,
+ parenthesized_generic_args,
+ itctx,
+ )
+ },
+ )),
+ span: self.lower_span(
+ p.segments[..proj_start]
+ .last()
+ .map_or(path_span_lo, |segment| path_span_lo.to(segment.span())),
+ ),
+ });
+
+ // Simple case, either no projections, or only fully-qualified.
+ // E.g., `std::mem::size_of` or `<I as Iterator>::Item`.
+ if partial_res.unresolved_segments() == 0 {
+ return hir::QPath::Resolved(qself, path);
+ }
+
+ // Create the innermost type that we're projecting from.
+ let mut ty = if path.segments.is_empty() {
+ // If the base path is empty that means there exists a
+ // syntactical `Self`, e.g., `&i32` in `<&i32>::clone`.
+ qself.expect("missing QSelf for <T>::...")
+ } else {
+ // Otherwise, the base path is an implicit `Self` type path,
+ // e.g., `Vec` in `Vec::new` or `<I as Iterator>::Item` in
+ // `<I as Iterator>::Item::default`.
+ let new_id = self.next_id();
+ self.arena.alloc(self.ty_path(new_id, path.span, hir::QPath::Resolved(qself, path)))
+ };
+
+ // Anything after the base path are associated "extensions",
+ // out of which all but the last one are associated types,
+ // e.g., for `std::vec::Vec::<T>::IntoIter::Item::clone`:
+ // * base path is `std::vec::Vec<T>`
+ // * "extensions" are `IntoIter`, `Item` and `clone`
+ // * type nodes are:
+ // 1. `std::vec::Vec<T>` (created above)
+ // 2. `<std::vec::Vec<T>>::IntoIter`
+ // 3. `<<std::vec::Vec<T>>::IntoIter>::Item`
+ // * final path is `<<<std::vec::Vec<T>>::IntoIter>::Item>::clone`
+ for (i, segment) in p.segments.iter().enumerate().skip(proj_start) {
+ let hir_segment = self.arena.alloc(self.lower_path_segment(
+ p.span,
+ segment,
+ param_mode,
+ ParenthesizedGenericArgs::Err,
+ itctx,
+ ));
+ let qpath = hir::QPath::TypeRelative(ty, hir_segment);
+
+ // It's finished, return the extension of the right node type.
+ if i == p.segments.len() - 1 {
+ return qpath;
+ }
+
+ // Wrap the associated extension in another type node.
+ let new_id = self.next_id();
+ ty = self.arena.alloc(self.ty_path(new_id, path_span_lo.to(segment.span()), qpath));
+ }
+
+ // We should've returned in the for loop above.
+
+ self.diagnostic().span_bug(
+ p.span,
+ &format!(
+ "lower_qpath: no final extension segment in {}..{}",
+ proj_start,
+ p.segments.len()
+ ),
+ );
+ }
+
+ pub(crate) fn lower_path_extra(
+ &mut self,
+ res: Res,
+ p: &Path,
+ param_mode: ParamMode,
+ ) -> &'hir hir::Path<'hir> {
+ self.arena.alloc(hir::Path {
+ res,
+ segments: self.arena.alloc_from_iter(p.segments.iter().map(|segment| {
+ self.lower_path_segment(
+ p.span,
+ segment,
+ param_mode,
+ ParenthesizedGenericArgs::Err,
+ ImplTraitContext::Disallowed(ImplTraitPosition::Path),
+ )
+ })),
+ span: self.lower_span(p.span),
+ })
+ }
+
+ pub(crate) fn lower_path(
+ &mut self,
+ id: NodeId,
+ p: &Path,
+ param_mode: ParamMode,
+ ) -> &'hir hir::Path<'hir> {
+ let res = self.expect_full_res(id);
+ let res = self.lower_res(res);
+ self.lower_path_extra(res, p, param_mode)
+ }
+
+ pub(crate) fn lower_path_segment(
+ &mut self,
+ path_span: Span,
+ segment: &PathSegment,
+ param_mode: ParamMode,
+ parenthesized_generic_args: ParenthesizedGenericArgs,
+ itctx: ImplTraitContext,
+ ) -> hir::PathSegment<'hir> {
+ debug!("path_span: {:?}, lower_path_segment(segment: {:?})", path_span, segment,);
+ let (mut generic_args, infer_args) = if let Some(ref generic_args) = segment.args {
+ let msg = "parenthesized type parameters may only be used with a `Fn` trait";
+ match **generic_args {
+ GenericArgs::AngleBracketed(ref data) => {
+ self.lower_angle_bracketed_parameter_data(data, param_mode, itctx)
+ }
+ GenericArgs::Parenthesized(ref data) => match parenthesized_generic_args {
+ ParenthesizedGenericArgs::Ok => self.lower_parenthesized_parameter_data(data),
+ ParenthesizedGenericArgs::Err => {
+ let mut err = struct_span_err!(self.tcx.sess, data.span, E0214, "{}", msg);
+ err.span_label(data.span, "only `Fn` traits may use parentheses");
+ // Suggest replacing parentheses with angle brackets `Trait(params...)` to `Trait<params...>`
+ if !data.inputs.is_empty() {
+ // Start of the span to the 1st character of 1st argument
+ let open_param = data.inputs_span.shrink_to_lo().to(data
+ .inputs
+ .first()
+ .unwrap()
+ .span
+ .shrink_to_lo());
+ // Last character position of last argument to the end of the span
+ let close_param = data
+ .inputs
+ .last()
+ .unwrap()
+ .span
+ .shrink_to_hi()
+ .to(data.inputs_span.shrink_to_hi());
+ err.multipart_suggestion(
+ &format!("use angle brackets instead",),
+ vec![
+ (open_param, String::from("<")),
+ (close_param, String::from(">")),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ (
+ self.lower_angle_bracketed_parameter_data(
+ &data.as_angle_bracketed_args(),
+ param_mode,
+ itctx,
+ )
+ .0,
+ false,
+ )
+ }
+ },
+ }
+ } else {
+ (
+ GenericArgsCtor {
+ args: Default::default(),
+ bindings: &[],
+ parenthesized: false,
+ span: path_span.shrink_to_hi(),
+ },
+ param_mode == ParamMode::Optional,
+ )
+ };
+
+ let has_lifetimes =
+ generic_args.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)));
+ if !generic_args.parenthesized && !has_lifetimes {
+ self.maybe_insert_elided_lifetimes_in_path(
+ path_span,
+ segment.id,
+ segment.ident.span,
+ &mut generic_args,
+ );
+ }
+
+ let res = self.expect_full_res(segment.id);
+ let id = self.lower_node_id(segment.id);
+ debug!(
+ "lower_path_segment: ident={:?} original-id={:?} new-id={:?}",
+ segment.ident, segment.id, id,
+ );
+
+ hir::PathSegment {
+ ident: self.lower_ident(segment.ident),
+ hir_id: Some(id),
+ res: Some(self.lower_res(res)),
+ infer_args,
+ args: if generic_args.is_empty() && generic_args.span.is_empty() {
+ None
+ } else {
+ Some(generic_args.into_generic_args(self))
+ },
+ }
+ }
+
+ fn maybe_insert_elided_lifetimes_in_path(
+ &mut self,
+ path_span: Span,
+ segment_id: NodeId,
+ segment_ident_span: Span,
+ generic_args: &mut GenericArgsCtor<'hir>,
+ ) {
+ let (start, end) = match self.resolver.get_lifetime_res(segment_id) {
+ Some(LifetimeRes::ElidedAnchor { start, end }) => (start, end),
+ None => return,
+ Some(_) => panic!(),
+ };
+ let expected_lifetimes = end.as_usize() - start.as_usize();
+ debug!(expected_lifetimes);
+
+ // Note: these spans are used for diagnostics when they can't be inferred.
+ // See rustc_resolve::late::lifetimes::LifetimeContext::add_missing_lifetime_specifiers_label
+ let elided_lifetime_span = if generic_args.span.is_empty() {
+ // If there are no brackets, use the identifier span.
+ // HACK: we use find_ancestor_inside to properly suggest elided spans in paths
+ // originating from macros, since the segment's span might be from a macro arg.
+ segment_ident_span.find_ancestor_inside(path_span).unwrap_or(path_span)
+ } else if generic_args.is_empty() {
+ // If there are brackets, but not generic arguments, then use the opening bracket
+ generic_args.span.with_hi(generic_args.span.lo() + BytePos(1))
+ } else {
+ // Else use an empty span right after the opening bracket.
+ generic_args.span.with_lo(generic_args.span.lo() + BytePos(1)).shrink_to_lo()
+ };
+
+ generic_args.args.insert_many(
+ 0,
+ (start.as_u32()..end.as_u32()).map(|i| {
+ let id = NodeId::from_u32(i);
+ let l = self.lower_lifetime(&Lifetime {
+ id,
+ ident: Ident::new(kw::UnderscoreLifetime, elided_lifetime_span),
+ });
+ GenericArg::Lifetime(l)
+ }),
+ );
+ }
+
+ pub(crate) fn lower_angle_bracketed_parameter_data(
+ &mut self,
+ data: &AngleBracketedArgs,
+ param_mode: ParamMode,
+ itctx: ImplTraitContext,
+ ) -> (GenericArgsCtor<'hir>, bool) {
+ let has_non_lt_args = data.args.iter().any(|arg| match arg {
+ AngleBracketedArg::Arg(ast::GenericArg::Lifetime(_))
+ | AngleBracketedArg::Constraint(_) => false,
+ AngleBracketedArg::Arg(ast::GenericArg::Type(_) | ast::GenericArg::Const(_)) => true,
+ });
+ let args = data
+ .args
+ .iter()
+ .filter_map(|arg| match arg {
+ AngleBracketedArg::Arg(arg) => Some(self.lower_generic_arg(arg, itctx)),
+ AngleBracketedArg::Constraint(_) => None,
+ })
+ .collect();
+ let bindings = self.arena.alloc_from_iter(data.args.iter().filter_map(|arg| match arg {
+ AngleBracketedArg::Constraint(c) => Some(self.lower_assoc_ty_constraint(c, itctx)),
+ AngleBracketedArg::Arg(_) => None,
+ }));
+ let ctor = GenericArgsCtor { args, bindings, parenthesized: false, span: data.span };
+ (ctor, !has_non_lt_args && param_mode == ParamMode::Optional)
+ }
+
+ fn lower_parenthesized_parameter_data(
+ &mut self,
+ data: &ParenthesizedArgs,
+ ) -> (GenericArgsCtor<'hir>, bool) {
+ // Switch to `PassThrough` mode for anonymous lifetimes; this
+ // means that we permit things like `&Ref<T>`, where `Ref` has
+ // a hidden lifetime parameter. This is needed for backwards
+ // compatibility, even in contexts like an impl header where
+ // we generally don't permit such things (see #51008).
+ let ParenthesizedArgs { span, inputs, inputs_span, output } = data;
+ let inputs = self.arena.alloc_from_iter(inputs.iter().map(|ty| {
+ self.lower_ty_direct(ty, ImplTraitContext::Disallowed(ImplTraitPosition::FnTraitParam))
+ }));
+ let output_ty = match output {
+ FnRetTy::Ty(ty) => {
+ self.lower_ty(&ty, ImplTraitContext::Disallowed(ImplTraitPosition::FnTraitReturn))
+ }
+ FnRetTy::Default(_) => self.arena.alloc(self.ty_tup(*span, &[])),
+ };
+ let args = smallvec![GenericArg::Type(self.ty_tup(*inputs_span, inputs))];
+ let binding = self.output_ty_binding(output_ty.span, output_ty);
+ (
+ GenericArgsCtor {
+ args,
+ bindings: arena_vec![self; binding],
+ parenthesized: true,
+ span: data.inputs_span,
+ },
+ false,
+ )
+ }
+
+ /// An associated type binding `Output = $ty`.
+ pub(crate) fn output_ty_binding(
+ &mut self,
+ span: Span,
+ ty: &'hir hir::Ty<'hir>,
+ ) -> hir::TypeBinding<'hir> {
+ let ident = Ident::with_dummy_span(hir::FN_OUTPUT_NAME);
+ let kind = hir::TypeBindingKind::Equality { term: ty.into() };
+ let args = arena_vec![self;];
+ let bindings = arena_vec![self;];
+ let gen_args = self.arena.alloc(hir::GenericArgs {
+ args,
+ bindings,
+ parenthesized: false,
+ span_ext: DUMMY_SP,
+ });
+ hir::TypeBinding {
+ hir_id: self.next_id(),
+ gen_args,
+ span: self.lower_span(span),
+ ident,
+ kind,
+ }
+ }
+}
diff --git a/compiler/rustc_ast_passes/Cargo.toml b/compiler/rustc_ast_passes/Cargo.toml
new file mode 100644
index 000000000..22742b2ad
--- /dev/null
+++ b/compiler/rustc_ast_passes/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "rustc_ast_passes"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+itertools = "0.10.1"
+tracing = "0.1"
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_parse = { path = "../rustc_parse" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+rustc_ast = { path = "../rustc_ast" }
diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs
new file mode 100644
index 000000000..2d9d0073f
--- /dev/null
+++ b/compiler/rustc_ast_passes/src/ast_validation.rs
@@ -0,0 +1,1909 @@
+// Validate AST before lowering it to HIR.
+//
+// This pass is supposed to catch things that fit into AST data structures,
+// but not permitted by the language. It runs after expansion when AST is frozen,
+// so it can check for erroneous constructions produced by syntax extensions.
+// This pass is supposed to perform only simple checks not requiring name resolution
+// or type checking or some other kind of complex analysis.
+
+use itertools::{Either, Itertools};
+use rustc_ast::ptr::P;
+use rustc_ast::visit::{self, AssocCtxt, BoundKind, FnCtxt, FnKind, Visitor};
+use rustc_ast::walk_list;
+use rustc_ast::*;
+use rustc_ast_pretty::pprust::{self, State};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{
+ error_code, pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed,
+};
+use rustc_parse::validate_attr;
+use rustc_session::lint::builtin::{
+ DEPRECATED_WHERE_CLAUSE_LOCATION, MISSING_ABI, PATTERNS_IN_FNS_WITHOUT_BODY,
+};
+use rustc_session::lint::{BuiltinLintDiagnostics, LintBuffer};
+use rustc_session::Session;
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::Span;
+use rustc_target::spec::abi;
+use std::mem;
+use std::ops::{Deref, DerefMut};
+
+const MORE_EXTERN: &str =
+ "for more information, visit https://doc.rust-lang.org/std/keyword.extern.html";
+
+/// Is `self` allowed semantically as the first parameter in an `FnDecl`?
+enum SelfSemantic {
+ Yes,
+ No,
+}
+
+struct AstValidator<'a> {
+ session: &'a Session,
+
+ /// The span of the `extern` in an `extern { ... }` block, if any.
+ extern_mod: Option<&'a Item>,
+
+ /// Are we inside a trait impl?
+ in_trait_impl: bool,
+
+ in_const_trait_impl: bool,
+
+ has_proc_macro_decls: bool,
+
+ /// Used to ban nested `impl Trait`, e.g., `impl Into<impl Debug>`.
+ /// Nested `impl Trait` _is_ allowed in associated type position,
+ /// e.g., `impl Iterator<Item = impl Debug>`.
+ outer_impl_trait: Option<Span>,
+
+ is_tilde_const_allowed: bool,
+
+ /// Used to ban `impl Trait` in path projections like `<impl Iterator>::Item`
+ /// or `Foo::Bar<impl Trait>`
+ is_impl_trait_banned: bool,
+
+ /// Used to ban associated type bounds (i.e., `Type<AssocType: Bounds>`) in
+ /// certain positions.
+ is_assoc_ty_bound_banned: bool,
+
+ /// See [ForbiddenLetReason]
+ forbidden_let_reason: Option<ForbiddenLetReason>,
+
+ lint_buffer: &'a mut LintBuffer,
+}
+
+impl<'a> AstValidator<'a> {
+ fn with_in_trait_impl(
+ &mut self,
+ is_in: bool,
+ constness: Option<Const>,
+ f: impl FnOnce(&mut Self),
+ ) {
+ let old = mem::replace(&mut self.in_trait_impl, is_in);
+ let old_const =
+ mem::replace(&mut self.in_const_trait_impl, matches!(constness, Some(Const::Yes(_))));
+ f(self);
+ self.in_trait_impl = old;
+ self.in_const_trait_impl = old_const;
+ }
+
+ fn with_banned_impl_trait(&mut self, f: impl FnOnce(&mut Self)) {
+ let old = mem::replace(&mut self.is_impl_trait_banned, true);
+ f(self);
+ self.is_impl_trait_banned = old;
+ }
+
+ fn with_tilde_const(&mut self, allowed: bool, f: impl FnOnce(&mut Self)) {
+ let old = mem::replace(&mut self.is_tilde_const_allowed, allowed);
+ f(self);
+ self.is_tilde_const_allowed = old;
+ }
+
+ fn with_tilde_const_allowed(&mut self, f: impl FnOnce(&mut Self)) {
+ self.with_tilde_const(true, f)
+ }
+
+ fn with_banned_tilde_const(&mut self, f: impl FnOnce(&mut Self)) {
+ self.with_tilde_const(false, f)
+ }
+
+ fn with_let_management(
+ &mut self,
+ forbidden_let_reason: Option<ForbiddenLetReason>,
+ f: impl FnOnce(&mut Self, Option<ForbiddenLetReason>),
+ ) {
+ let old = mem::replace(&mut self.forbidden_let_reason, forbidden_let_reason);
+ f(self, old);
+ self.forbidden_let_reason = old;
+ }
+
+ /// Emits an error banning the `let` expression provided in the given location.
+ fn ban_let_expr(&self, expr: &'a Expr, forbidden_let_reason: ForbiddenLetReason) {
+ let sess = &self.session;
+ if sess.opts.unstable_features.is_nightly_build() {
+ let err = "`let` expressions are not supported here";
+ let mut diag = sess.struct_span_err(expr.span, err);
+ diag.note("only supported directly in conditions of `if` and `while` expressions");
+ match forbidden_let_reason {
+ ForbiddenLetReason::GenericForbidden => {}
+ ForbiddenLetReason::NotSupportedOr(span) => {
+ diag.span_note(
+ span,
+ "`||` operators are not supported in let chain expressions",
+ );
+ }
+ ForbiddenLetReason::NotSupportedParentheses(span) => {
+ diag.span_note(
+ span,
+ "`let`s wrapped in parentheses are not supported in a context with let \
+ chains",
+ );
+ }
+ }
+ diag.emit();
+ } else {
+ sess.struct_span_err(expr.span, "expected expression, found statement (`let`)")
+ .note("variable declaration using `let` is a statement")
+ .emit();
+ }
+ }
+
+ fn check_gat_where(
+ &mut self,
+ id: NodeId,
+ before_predicates: &[WherePredicate],
+ where_clauses: (ast::TyAliasWhereClause, ast::TyAliasWhereClause),
+ ) {
+ if !before_predicates.is_empty() {
+ let mut state = State::new();
+ if !where_clauses.1.0 {
+ state.space();
+ state.word_space("where");
+ } else {
+ state.word_space(",");
+ }
+ let mut first = true;
+ for p in before_predicates.iter() {
+ if !first {
+ state.word_space(",");
+ }
+ first = false;
+ state.print_where_predicate(p);
+ }
+ let suggestion = state.s.eof();
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ DEPRECATED_WHERE_CLAUSE_LOCATION,
+ id,
+ where_clauses.0.1,
+ "where clause not allowed here",
+ BuiltinLintDiagnostics::DeprecatedWhereclauseLocation(
+ where_clauses.1.1.shrink_to_hi(),
+ suggestion,
+ ),
+ );
+ }
+ }
+
+ fn with_banned_assoc_ty_bound(&mut self, f: impl FnOnce(&mut Self)) {
+ let old = mem::replace(&mut self.is_assoc_ty_bound_banned, true);
+ f(self);
+ self.is_assoc_ty_bound_banned = old;
+ }
+
+ fn with_impl_trait(&mut self, outer: Option<Span>, f: impl FnOnce(&mut Self)) {
+ let old = mem::replace(&mut self.outer_impl_trait, outer);
+ if outer.is_some() {
+ self.with_banned_tilde_const(f);
+ } else {
+ f(self);
+ }
+ self.outer_impl_trait = old;
+ }
+
+ fn visit_assoc_constraint_from_generic_args(&mut self, constraint: &'a AssocConstraint) {
+ match constraint.kind {
+ AssocConstraintKind::Equality { .. } => {}
+ AssocConstraintKind::Bound { .. } => {
+ if self.is_assoc_ty_bound_banned {
+ self.err_handler().span_err(
+ constraint.span,
+ "associated type bounds are not allowed within structs, enums, or unions",
+ );
+ }
+ }
+ }
+ self.visit_assoc_constraint(constraint);
+ }
+
+ // Mirrors `visit::walk_ty`, but tracks relevant state.
+ fn walk_ty(&mut self, t: &'a Ty) {
+ match t.kind {
+ TyKind::ImplTrait(..) => {
+ self.with_impl_trait(Some(t.span), |this| visit::walk_ty(this, t))
+ }
+ TyKind::TraitObject(..) => self.with_banned_tilde_const(|this| visit::walk_ty(this, t)),
+ TyKind::Path(ref qself, ref path) => {
+ // We allow these:
+ // - `Option<impl Trait>`
+ // - `option::Option<impl Trait>`
+ // - `option::Option<T>::Foo<impl Trait>
+ //
+ // But not these:
+ // - `<impl Trait>::Foo`
+ // - `option::Option<impl Trait>::Foo`.
+ //
+ // To implement this, we disallow `impl Trait` from `qself`
+ // (for cases like `<impl Trait>::Foo>`)
+ // but we allow `impl Trait` in `GenericArgs`
+ // iff there are no more PathSegments.
+ if let Some(ref qself) = *qself {
+ // `impl Trait` in `qself` is always illegal
+ self.with_banned_impl_trait(|this| this.visit_ty(&qself.ty));
+ }
+
+ // Note that there should be a call to visit_path here,
+ // so if any logic is added to process `Path`s a call to it should be
+ // added both in visit_path and here. This code mirrors visit::walk_path.
+ for (i, segment) in path.segments.iter().enumerate() {
+ // Allow `impl Trait` iff we're on the final path segment
+ if i == path.segments.len() - 1 {
+ self.visit_path_segment(path.span, segment);
+ } else {
+ self.with_banned_impl_trait(|this| {
+ this.visit_path_segment(path.span, segment)
+ });
+ }
+ }
+ }
+ _ => visit::walk_ty(self, t),
+ }
+ }
+
+ fn visit_struct_field_def(&mut self, field: &'a FieldDef) {
+ if let Some(ident) = field.ident {
+ if ident.name == kw::Underscore {
+ self.visit_vis(&field.vis);
+ self.visit_ident(ident);
+ self.visit_ty_common(&field.ty);
+ self.walk_ty(&field.ty);
+ walk_list!(self, visit_attribute, &field.attrs);
+ return;
+ }
+ }
+ self.visit_field_def(field);
+ }
+
+ fn err_handler(&self) -> &rustc_errors::Handler {
+ &self.session.diagnostic()
+ }
+
+ fn check_lifetime(&self, ident: Ident) {
+ let valid_names = [kw::UnderscoreLifetime, kw::StaticLifetime, kw::Empty];
+ if !valid_names.contains(&ident.name) && ident.without_first_quote().is_reserved() {
+ self.err_handler().span_err(ident.span, "lifetimes cannot use keyword names");
+ }
+ }
+
+ fn check_label(&self, ident: Ident) {
+ if ident.without_first_quote().is_reserved() {
+ self.err_handler()
+ .span_err(ident.span, &format!("invalid label name `{}`", ident.name));
+ }
+ }
+
+ fn invalid_visibility(&self, vis: &Visibility, note: Option<&str>) {
+ if let VisibilityKind::Inherited = vis.kind {
+ return;
+ }
+
+ let mut err =
+ struct_span_err!(self.session, vis.span, E0449, "unnecessary visibility qualifier");
+ if vis.kind.is_pub() {
+ err.span_label(vis.span, "`pub` not permitted here because it's implied");
+ }
+ if let Some(note) = note {
+ err.note(note);
+ }
+ err.emit();
+ }
+
+ fn check_decl_no_pat(decl: &FnDecl, mut report_err: impl FnMut(Span, Option<Ident>, bool)) {
+ for Param { pat, .. } in &decl.inputs {
+ match pat.kind {
+ PatKind::Ident(BindingMode::ByValue(Mutability::Not), _, None) | PatKind::Wild => {}
+ PatKind::Ident(BindingMode::ByValue(Mutability::Mut), ident, None) => {
+ report_err(pat.span, Some(ident), true)
+ }
+ _ => report_err(pat.span, None, false),
+ }
+ }
+ }
+
+ fn check_trait_fn_not_async(&self, fn_span: Span, asyncness: Async) {
+ if let Async::Yes { span, .. } = asyncness {
+ struct_span_err!(
+ self.session,
+ fn_span,
+ E0706,
+ "functions in traits cannot be declared `async`"
+ )
+ .span_label(span, "`async` because of this")
+ .note("`async` trait functions are not currently supported")
+ .note("consider using the `async-trait` crate: https://crates.io/crates/async-trait")
+ .emit();
+ }
+ }
+
+ fn check_trait_fn_not_const(&self, constness: Const) {
+ if let Const::Yes(span) = constness {
+ struct_span_err!(
+ self.session,
+ span,
+ E0379,
+ "functions in traits cannot be declared const"
+ )
+ .span_label(span, "functions in traits cannot be const")
+ .emit();
+ }
+ }
+
+ fn check_late_bound_lifetime_defs(&self, params: &[GenericParam]) {
+ // Check only lifetime parameters are present and that the lifetime
+ // parameters that are present have no bounds.
+ let non_lt_param_spans: Vec<_> = params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ if !param.bounds.is_empty() {
+ let spans: Vec<_> = param.bounds.iter().map(|b| b.span()).collect();
+ self.err_handler()
+ .span_err(spans, "lifetime bounds cannot be used in this context");
+ }
+ None
+ }
+ _ => Some(param.ident.span),
+ })
+ .collect();
+ if !non_lt_param_spans.is_empty() {
+ self.err_handler().span_err(
+ non_lt_param_spans,
+ "only lifetime parameters can be used in this context",
+ );
+ }
+ }
+
+ fn check_fn_decl(&self, fn_decl: &FnDecl, self_semantic: SelfSemantic) {
+ self.check_decl_num_args(fn_decl);
+ self.check_decl_cvaradic_pos(fn_decl);
+ self.check_decl_attrs(fn_decl);
+ self.check_decl_self_param(fn_decl, self_semantic);
+ }
+
+ /// Emits fatal error if function declaration has more than `u16::MAX` arguments
+ /// Error is fatal to prevent errors during typechecking
+ fn check_decl_num_args(&self, fn_decl: &FnDecl) {
+ let max_num_args: usize = u16::MAX.into();
+ if fn_decl.inputs.len() > max_num_args {
+ let Param { span, .. } = fn_decl.inputs[0];
+ self.err_handler().span_fatal(
+ span,
+ &format!("function can not have more than {} arguments", max_num_args),
+ );
+ }
+ }
+
+ fn check_decl_cvaradic_pos(&self, fn_decl: &FnDecl) {
+ match &*fn_decl.inputs {
+ [Param { ty, span, .. }] => {
+ if let TyKind::CVarArgs = ty.kind {
+ self.err_handler().span_err(
+ *span,
+ "C-variadic function must be declared with at least one named argument",
+ );
+ }
+ }
+ [ps @ .., _] => {
+ for Param { ty, span, .. } in ps {
+ if let TyKind::CVarArgs = ty.kind {
+ self.err_handler().span_err(
+ *span,
+ "`...` must be the last argument of a C-variadic function",
+ );
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ fn check_decl_attrs(&self, fn_decl: &FnDecl) {
+ fn_decl
+ .inputs
+ .iter()
+ .flat_map(|i| i.attrs.as_ref())
+ .filter(|attr| {
+ let arr = [
+ sym::allow,
+ sym::cfg,
+ sym::cfg_attr,
+ sym::deny,
+ sym::expect,
+ sym::forbid,
+ sym::warn,
+ ];
+ !arr.contains(&attr.name_or_empty()) && rustc_attr::is_builtin_attr(attr)
+ })
+ .for_each(|attr| {
+ if attr.is_doc_comment() {
+ self.err_handler()
+ .struct_span_err(
+ attr.span,
+ "documentation comments cannot be applied to function parameters",
+ )
+ .span_label(attr.span, "doc comments are not allowed here")
+ .emit();
+ } else {
+ self.err_handler().span_err(
+ attr.span,
+ "allow, cfg, cfg_attr, deny, expect, \
+ forbid, and warn are the only allowed built-in attributes in function parameters",
+ );
+ }
+ });
+ }
+
+ fn check_decl_self_param(&self, fn_decl: &FnDecl, self_semantic: SelfSemantic) {
+ if let (SelfSemantic::No, [param, ..]) = (self_semantic, &*fn_decl.inputs) {
+ if param.is_self() {
+ self.err_handler()
+ .struct_span_err(
+ param.span,
+ "`self` parameter is only allowed in associated functions",
+ )
+ .span_label(param.span, "not semantically valid as function parameter")
+ .note("associated functions are those in `impl` or `trait` definitions")
+ .emit();
+ }
+ }
+ }
+
+ fn check_defaultness(&self, span: Span, defaultness: Defaultness) {
+ if let Defaultness::Default(def_span) = defaultness {
+ let span = self.session.source_map().guess_head_span(span);
+ self.err_handler()
+ .struct_span_err(span, "`default` is only allowed on items in trait impls")
+ .span_label(def_span, "`default` because of this")
+ .emit();
+ }
+ }
+
+ fn error_item_without_body(&self, sp: Span, ctx: &str, msg: &str, sugg: &str) {
+ self.error_item_without_body_with_help(sp, ctx, msg, sugg, |_| ());
+ }
+
+ fn error_item_without_body_with_help(
+ &self,
+ sp: Span,
+ ctx: &str,
+ msg: &str,
+ sugg: &str,
+ help: impl FnOnce(&mut DiagnosticBuilder<'_, ErrorGuaranteed>),
+ ) {
+ let source_map = self.session.source_map();
+ let end = source_map.end_point(sp);
+ let replace_span = if source_map.span_to_snippet(end).map(|s| s == ";").unwrap_or(false) {
+ end
+ } else {
+ sp.shrink_to_hi()
+ };
+ let mut err = self.err_handler().struct_span_err(sp, msg);
+ err.span_suggestion(
+ replace_span,
+ &format!("provide a definition for the {}", ctx),
+ sugg,
+ Applicability::HasPlaceholders,
+ );
+ help(&mut err);
+ err.emit();
+ }
+
+ fn check_impl_item_provided<T>(&self, sp: Span, body: &Option<T>, ctx: &str, sugg: &str) {
+ if body.is_none() {
+ let msg = format!("associated {} in `impl` without body", ctx);
+ self.error_item_without_body(sp, ctx, &msg, sugg);
+ }
+ }
+
+ fn check_type_no_bounds(&self, bounds: &[GenericBound], ctx: &str) {
+ let span = match bounds {
+ [] => return,
+ [b0] => b0.span(),
+ [b0, .., bl] => b0.span().to(bl.span()),
+ };
+ self.err_handler()
+ .struct_span_err(span, &format!("bounds on `type`s in {} have no effect", ctx))
+ .emit();
+ }
+
+ fn check_foreign_ty_genericless(&self, generics: &Generics, where_span: Span) {
+ let cannot_have = |span, descr, remove_descr| {
+ self.err_handler()
+ .struct_span_err(
+ span,
+ &format!("`type`s inside `extern` blocks cannot have {}", descr),
+ )
+ .span_suggestion(
+ span,
+ &format!("remove the {}", remove_descr),
+ "",
+ Applicability::MaybeIncorrect,
+ )
+ .span_label(self.current_extern_span(), "`extern` block begins here")
+ .note(MORE_EXTERN)
+ .emit();
+ };
+
+ if !generics.params.is_empty() {
+ cannot_have(generics.span, "generic parameters", "generic parameters");
+ }
+
+ if !generics.where_clause.predicates.is_empty() {
+ cannot_have(where_span, "`where` clauses", "`where` clause");
+ }
+ }
+
+ fn check_foreign_kind_bodyless(&self, ident: Ident, kind: &str, body: Option<Span>) {
+ let Some(body) = body else {
+ return;
+ };
+ self.err_handler()
+ .struct_span_err(ident.span, &format!("incorrect `{}` inside `extern` block", kind))
+ .span_label(ident.span, "cannot have a body")
+ .span_label(body, "the invalid body")
+ .span_label(
+ self.current_extern_span(),
+ format!(
+ "`extern` blocks define existing foreign {0}s and {0}s \
+ inside of them cannot have a body",
+ kind
+ ),
+ )
+ .note(MORE_EXTERN)
+ .emit();
+ }
+
+ /// An `fn` in `extern { ... }` cannot have a body `{ ... }`.
+ fn check_foreign_fn_bodyless(&self, ident: Ident, body: Option<&Block>) {
+ let Some(body) = body else {
+ return;
+ };
+ self.err_handler()
+ .struct_span_err(ident.span, "incorrect function inside `extern` block")
+ .span_label(ident.span, "cannot have a body")
+ .span_suggestion(
+ body.span,
+ "remove the invalid body",
+ ";",
+ Applicability::MaybeIncorrect,
+ )
+ .help(
+ "you might have meant to write a function accessible through FFI, \
+ which can be done by writing `extern fn` outside of the `extern` block",
+ )
+ .span_label(
+ self.current_extern_span(),
+ "`extern` blocks define existing foreign functions and functions \
+ inside of them cannot have a body",
+ )
+ .note(MORE_EXTERN)
+ .emit();
+ }
+
+ fn current_extern_span(&self) -> Span {
+ self.session.source_map().guess_head_span(self.extern_mod.unwrap().span)
+ }
+
+ /// An `fn` in `extern { ... }` cannot have qualifiers, e.g. `async fn`.
+ fn check_foreign_fn_headerless(&self, ident: Ident, span: Span, header: FnHeader) {
+ if header.has_qualifiers() {
+ self.err_handler()
+ .struct_span_err(ident.span, "functions in `extern` blocks cannot have qualifiers")
+ .span_label(self.current_extern_span(), "in this `extern` block")
+ .span_suggestion_verbose(
+ span.until(ident.span.shrink_to_lo()),
+ "remove the qualifiers",
+ "fn ",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ }
+
+ /// An item in `extern { ... }` cannot use non-ascii identifier.
+ fn check_foreign_item_ascii_only(&self, ident: Ident) {
+ if !ident.as_str().is_ascii() {
+ let n = 83942;
+ self.err_handler()
+ .struct_span_err(
+ ident.span,
+ "items in `extern` blocks cannot use non-ascii identifiers",
+ )
+ .span_label(self.current_extern_span(), "in this `extern` block")
+ .note(&format!(
+ "this limitation may be lifted in the future; see issue #{} <https://github.com/rust-lang/rust/issues/{}> for more information",
+ n, n,
+ ))
+ .emit();
+ }
+ }
+
+ /// Reject C-variadic type unless the function is foreign,
+ /// or free and `unsafe extern "C"` semantically.
+ fn check_c_variadic_type(&self, fk: FnKind<'a>) {
+ match (fk.ctxt(), fk.header()) {
+ (Some(FnCtxt::Foreign), _) => return,
+ (Some(FnCtxt::Free), Some(header)) => match header.ext {
+ Extern::Explicit(StrLit { symbol_unescaped: sym::C, .. }, _)
+ | Extern::Implicit(_)
+ if matches!(header.unsafety, Unsafe::Yes(_)) =>
+ {
+ return;
+ }
+ _ => {}
+ },
+ _ => {}
+ };
+
+ for Param { ty, span, .. } in &fk.decl().inputs {
+ if let TyKind::CVarArgs = ty.kind {
+ self.err_handler()
+ .struct_span_err(
+ *span,
+ "only foreign or `unsafe extern \"C\"` functions may be C-variadic",
+ )
+ .emit();
+ }
+ }
+ }
+
+ fn check_item_named(&self, ident: Ident, kind: &str) {
+ if ident.name != kw::Underscore {
+ return;
+ }
+ self.err_handler()
+ .struct_span_err(ident.span, &format!("`{}` items in this context need a name", kind))
+ .span_label(ident.span, format!("`_` is not a valid name for this `{}` item", kind))
+ .emit();
+ }
+
+ fn check_nomangle_item_asciionly(&self, ident: Ident, item_span: Span) {
+ if ident.name.as_str().is_ascii() {
+ return;
+ }
+ let head_span = self.session.source_map().guess_head_span(item_span);
+ struct_span_err!(
+ self.session,
+ head_span,
+ E0754,
+ "`#[no_mangle]` requires ASCII identifier"
+ )
+ .emit();
+ }
+
+ fn check_mod_file_item_asciionly(&self, ident: Ident) {
+ if ident.name.as_str().is_ascii() {
+ return;
+ }
+ struct_span_err!(
+ self.session,
+ ident.span,
+ E0754,
+ "trying to load file for module `{}` with non-ascii identifier name",
+ ident.name
+ )
+ .help("consider using `#[path]` attribute to specify filesystem path")
+ .emit();
+ }
+
+ fn deny_generic_params(&self, generics: &Generics, ident_span: Span) {
+ if !generics.params.is_empty() {
+ struct_span_err!(
+ self.session,
+ generics.span,
+ E0567,
+ "auto traits cannot have generic parameters"
+ )
+ .span_label(ident_span, "auto trait cannot have generic parameters")
+ .span_suggestion(
+ generics.span,
+ "remove the parameters",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ }
+
+ fn emit_e0568(&self, span: Span, ident_span: Span) {
+ struct_span_err!(
+ self.session,
+ span,
+ E0568,
+ "auto traits cannot have super traits or lifetime bounds"
+ )
+ .span_label(ident_span, "auto trait cannot have super traits or lifetime bounds")
+ .span_suggestion(
+ span,
+ "remove the super traits or lifetime bounds",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ fn deny_super_traits(&self, bounds: &GenericBounds, ident_span: Span) {
+ if let [.., last] = &bounds[..] {
+ let span = ident_span.shrink_to_hi().to(last.span());
+ self.emit_e0568(span, ident_span);
+ }
+ }
+
+ fn deny_where_clause(&self, where_clause: &WhereClause, ident_span: Span) {
+ if !where_clause.predicates.is_empty() {
+ self.emit_e0568(where_clause.span, ident_span);
+ }
+ }
+
+ fn deny_items(&self, trait_items: &[P<AssocItem>], ident_span: Span) {
+ if !trait_items.is_empty() {
+ let spans: Vec<_> = trait_items.iter().map(|i| i.ident.span).collect();
+ let total_span = trait_items.first().unwrap().span.to(trait_items.last().unwrap().span);
+ struct_span_err!(
+ self.session,
+ spans,
+ E0380,
+ "auto traits cannot have associated items"
+ )
+ .span_suggestion(
+ total_span,
+ "remove these associated items",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .span_label(ident_span, "auto trait cannot have associated items")
+ .emit();
+ }
+ }
+
+ fn correct_generic_order_suggestion(&self, data: &AngleBracketedArgs) -> String {
+ // Lifetimes always come first.
+ let lt_sugg = data.args.iter().filter_map(|arg| match arg {
+ AngleBracketedArg::Arg(lt @ GenericArg::Lifetime(_)) => {
+ Some(pprust::to_string(|s| s.print_generic_arg(lt)))
+ }
+ _ => None,
+ });
+ let args_sugg = data.args.iter().filter_map(|a| match a {
+ AngleBracketedArg::Arg(GenericArg::Lifetime(_)) | AngleBracketedArg::Constraint(_) => {
+ None
+ }
+ AngleBracketedArg::Arg(arg) => Some(pprust::to_string(|s| s.print_generic_arg(arg))),
+ });
+ // Constraints always come last.
+ let constraint_sugg = data.args.iter().filter_map(|a| match a {
+ AngleBracketedArg::Arg(_) => None,
+ AngleBracketedArg::Constraint(c) => {
+ Some(pprust::to_string(|s| s.print_assoc_constraint(c)))
+ }
+ });
+ format!(
+ "<{}>",
+ lt_sugg.chain(args_sugg).chain(constraint_sugg).collect::<Vec<String>>().join(", ")
+ )
+ }
+
+ /// Enforce generic args coming before constraints in `<...>` of a path segment.
+ fn check_generic_args_before_constraints(&self, data: &AngleBracketedArgs) {
+ // Early exit in case it's partitioned as it should be.
+ if data.args.iter().is_partitioned(|arg| matches!(arg, AngleBracketedArg::Arg(_))) {
+ return;
+ }
+ // Find all generic argument coming after the first constraint...
+ let (constraint_spans, arg_spans): (Vec<Span>, Vec<Span>) =
+ data.args.iter().partition_map(|arg| match arg {
+ AngleBracketedArg::Constraint(c) => Either::Left(c.span),
+ AngleBracketedArg::Arg(a) => Either::Right(a.span()),
+ });
+ let args_len = arg_spans.len();
+ let constraint_len = constraint_spans.len();
+ // ...and then error:
+ self.err_handler()
+ .struct_span_err(
+ arg_spans.clone(),
+ "generic arguments must come before the first constraint",
+ )
+ .span_label(constraint_spans[0], &format!("constraint{}", pluralize!(constraint_len)))
+ .span_label(
+ *arg_spans.iter().last().unwrap(),
+ &format!("generic argument{}", pluralize!(args_len)),
+ )
+ .span_labels(constraint_spans, "")
+ .span_labels(arg_spans, "")
+ .span_suggestion_verbose(
+ data.span,
+ &format!(
+ "move the constraint{} after the generic argument{}",
+ pluralize!(constraint_len),
+ pluralize!(args_len)
+ ),
+ self.correct_generic_order_suggestion(&data),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ fn visit_ty_common(&mut self, ty: &'a Ty) {
+ match ty.kind {
+ TyKind::BareFn(ref bfty) => {
+ self.check_fn_decl(&bfty.decl, SelfSemantic::No);
+ Self::check_decl_no_pat(&bfty.decl, |span, _, _| {
+ struct_span_err!(
+ self.session,
+ span,
+ E0561,
+ "patterns aren't allowed in function pointer types"
+ )
+ .emit();
+ });
+ self.check_late_bound_lifetime_defs(&bfty.generic_params);
+ if let Extern::Implicit(_) = bfty.ext {
+ let sig_span = self.session.source_map().next_point(ty.span.shrink_to_lo());
+ self.maybe_lint_missing_abi(sig_span, ty.id);
+ }
+ }
+ TyKind::TraitObject(ref bounds, ..) => {
+ let mut any_lifetime_bounds = false;
+ for bound in bounds {
+ if let GenericBound::Outlives(ref lifetime) = *bound {
+ if any_lifetime_bounds {
+ struct_span_err!(
+ self.session,
+ lifetime.ident.span,
+ E0226,
+ "only a single explicit lifetime bound is permitted"
+ )
+ .emit();
+ break;
+ }
+ any_lifetime_bounds = true;
+ }
+ }
+ }
+ TyKind::ImplTrait(_, ref bounds) => {
+ if self.is_impl_trait_banned {
+ struct_span_err!(
+ self.session,
+ ty.span,
+ E0667,
+ "`impl Trait` is not allowed in path parameters"
+ )
+ .emit();
+ }
+
+ if let Some(outer_impl_trait_sp) = self.outer_impl_trait {
+ struct_span_err!(
+ self.session,
+ ty.span,
+ E0666,
+ "nested `impl Trait` is not allowed"
+ )
+ .span_label(outer_impl_trait_sp, "outer `impl Trait`")
+ .span_label(ty.span, "nested `impl Trait` here")
+ .emit();
+ }
+
+ if !bounds.iter().any(|b| matches!(b, GenericBound::Trait(..))) {
+ self.err_handler().span_err(ty.span, "at least one trait must be specified");
+ }
+ }
+ _ => {}
+ }
+ }
+
+ fn maybe_lint_missing_abi(&mut self, span: Span, id: NodeId) {
+ // FIXME(davidtwco): This is a hack to detect macros which produce spans of the
+ // call site which do not have a macro backtrace. See #61963.
+ let is_macro_callsite = self
+ .session
+ .source_map()
+ .span_to_snippet(span)
+ .map(|snippet| snippet.starts_with("#["))
+ .unwrap_or(true);
+ if !is_macro_callsite {
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ MISSING_ABI,
+ id,
+ span,
+ "extern declarations without an explicit ABI are deprecated",
+ BuiltinLintDiagnostics::MissingAbi(span, abi::Abi::FALLBACK),
+ )
+ }
+ }
+}
+
+/// Checks that generic parameters are in the correct order,
+/// which is lifetimes, then types and then consts. (`<'a, T, const N: usize>`)
+fn validate_generic_param_order(
+ handler: &rustc_errors::Handler,
+ generics: &[GenericParam],
+ span: Span,
+) {
+ let mut max_param: Option<ParamKindOrd> = None;
+ let mut out_of_order = FxHashMap::default();
+ let mut param_idents = Vec::with_capacity(generics.len());
+
+ for (idx, param) in generics.iter().enumerate() {
+ let ident = param.ident;
+ let (kind, bounds, span) = (&param.kind, &param.bounds, ident.span);
+ let (ord_kind, ident) = match &param.kind {
+ GenericParamKind::Lifetime => (ParamKindOrd::Lifetime, ident.to_string()),
+ GenericParamKind::Type { default: _ } => (ParamKindOrd::Type, ident.to_string()),
+ GenericParamKind::Const { ref ty, kw_span: _, default: _ } => {
+ let ty = pprust::ty_to_string(ty);
+ (ParamKindOrd::Const, format!("const {}: {}", ident, ty))
+ }
+ };
+ param_idents.push((kind, ord_kind, bounds, idx, ident));
+ match max_param {
+ Some(max_param) if max_param > ord_kind => {
+ let entry = out_of_order.entry(ord_kind).or_insert((max_param, vec![]));
+ entry.1.push(span);
+ }
+ Some(_) | None => max_param = Some(ord_kind),
+ };
+ }
+
+ if !out_of_order.is_empty() {
+ let mut ordered_params = "<".to_string();
+ param_idents.sort_by_key(|&(_, po, _, i, _)| (po, i));
+ let mut first = true;
+ for (kind, _, bounds, _, ident) in param_idents {
+ if !first {
+ ordered_params += ", ";
+ }
+ ordered_params += &ident;
+
+ if !bounds.is_empty() {
+ ordered_params += ": ";
+ ordered_params += &pprust::bounds_to_string(&bounds);
+ }
+
+ match kind {
+ GenericParamKind::Type { default: Some(default) } => {
+ ordered_params += " = ";
+ ordered_params += &pprust::ty_to_string(default);
+ }
+ GenericParamKind::Type { default: None } => (),
+ GenericParamKind::Lifetime => (),
+ GenericParamKind::Const { ty: _, kw_span: _, default: Some(default) } => {
+ ordered_params += " = ";
+ ordered_params += &pprust::expr_to_string(&*default.value);
+ }
+ GenericParamKind::Const { ty: _, kw_span: _, default: None } => (),
+ }
+ first = false;
+ }
+
+ ordered_params += ">";
+
+ for (param_ord, (max_param, spans)) in &out_of_order {
+ let mut err = handler.struct_span_err(
+ spans.clone(),
+ &format!(
+ "{} parameters must be declared prior to {} parameters",
+ param_ord, max_param,
+ ),
+ );
+ err.span_suggestion(
+ span,
+ "reorder the parameters: lifetimes, then consts and types",
+ &ordered_params,
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ }
+ }
+}
+
+impl<'a> Visitor<'a> for AstValidator<'a> {
+ fn visit_attribute(&mut self, attr: &Attribute) {
+ validate_attr::check_meta(&self.session.parse_sess, attr);
+ }
+
+ fn visit_expr(&mut self, expr: &'a Expr) {
+ self.with_let_management(Some(ForbiddenLetReason::GenericForbidden), |this, forbidden_let_reason| {
+ match &expr.kind {
+ ExprKind::Binary(Spanned { node: BinOpKind::Or, span }, lhs, rhs) => {
+ let local_reason = Some(ForbiddenLetReason::NotSupportedOr(*span));
+ this.with_let_management(local_reason, |this, _| this.visit_expr(lhs));
+ this.with_let_management(local_reason, |this, _| this.visit_expr(rhs));
+ }
+ ExprKind::If(cond, then, opt_else) => {
+ this.visit_block(then);
+ walk_list!(this, visit_expr, opt_else);
+ this.with_let_management(None, |this, _| this.visit_expr(cond));
+ return;
+ }
+ ExprKind::Let(..) if let Some(elem) = forbidden_let_reason => {
+ this.ban_let_expr(expr, elem);
+ },
+ ExprKind::Match(scrutinee, arms) => {
+ this.visit_expr(scrutinee);
+ for arm in arms {
+ this.visit_expr(&arm.body);
+ this.visit_pat(&arm.pat);
+ walk_list!(this, visit_attribute, &arm.attrs);
+ if let Some(guard) = &arm.guard && let ExprKind::Let(_, guard_expr, _) = &guard.kind {
+ this.with_let_management(None, |this, _| {
+ this.visit_expr(guard_expr)
+ });
+ return;
+ }
+ }
+ }
+ ExprKind::Paren(local_expr) => {
+ fn has_let_expr(expr: &Expr) -> bool {
+ match expr.kind {
+ ExprKind::Binary(_, ref lhs, ref rhs) => has_let_expr(lhs) || has_let_expr(rhs),
+ ExprKind::Let(..) => true,
+ _ => false,
+ }
+ }
+ let local_reason = if has_let_expr(local_expr) {
+ Some(ForbiddenLetReason::NotSupportedParentheses(local_expr.span))
+ }
+ else {
+ forbidden_let_reason
+ };
+ this.with_let_management(local_reason, |this, _| this.visit_expr(local_expr));
+ }
+ ExprKind::Binary(Spanned { node: BinOpKind::And, .. }, ..) => {
+ this.with_let_management(forbidden_let_reason, |this, _| visit::walk_expr(this, expr));
+ return;
+ }
+ ExprKind::While(cond, then, opt_label) => {
+ walk_list!(this, visit_label, opt_label);
+ this.visit_block(then);
+ this.with_let_management(None, |this, _| this.visit_expr(cond));
+ return;
+ }
+ _ => visit::walk_expr(this, expr),
+ }
+ });
+ }
+
+ fn visit_ty(&mut self, ty: &'a Ty) {
+ self.visit_ty_common(ty);
+ self.walk_ty(ty)
+ }
+
+ fn visit_label(&mut self, label: &'a Label) {
+ self.check_label(label.ident);
+ visit::walk_label(self, label);
+ }
+
+ fn visit_lifetime(&mut self, lifetime: &'a Lifetime, _: visit::LifetimeCtxt) {
+ self.check_lifetime(lifetime.ident);
+ visit::walk_lifetime(self, lifetime);
+ }
+
+ fn visit_field_def(&mut self, s: &'a FieldDef) {
+ visit::walk_field_def(self, s)
+ }
+
+ fn visit_item(&mut self, item: &'a Item) {
+ if item.attrs.iter().any(|attr| self.session.is_proc_macro_attr(attr)) {
+ self.has_proc_macro_decls = true;
+ }
+
+ if self.session.contains_name(&item.attrs, sym::no_mangle) {
+ self.check_nomangle_item_asciionly(item.ident, item.span);
+ }
+
+ match item.kind {
+ ItemKind::Impl(box Impl {
+ unsafety,
+ polarity,
+ defaultness: _,
+ constness,
+ ref generics,
+ of_trait: Some(ref t),
+ ref self_ty,
+ ref items,
+ }) => {
+ self.with_in_trait_impl(true, Some(constness), |this| {
+ this.invalid_visibility(&item.vis, None);
+ if let TyKind::Err = self_ty.kind {
+ this.err_handler()
+ .struct_span_err(
+ item.span,
+ "`impl Trait for .. {}` is an obsolete syntax",
+ )
+ .help("use `auto trait Trait {}` instead")
+ .emit();
+ }
+ if let (Unsafe::Yes(span), ImplPolarity::Negative(sp)) = (unsafety, polarity) {
+ struct_span_err!(
+ this.session,
+ sp.to(t.path.span),
+ E0198,
+ "negative impls cannot be unsafe"
+ )
+ .span_label(sp, "negative because of this")
+ .span_label(span, "unsafe because of this")
+ .emit();
+ }
+
+ this.visit_vis(&item.vis);
+ this.visit_ident(item.ident);
+ if let Const::Yes(_) = constness {
+ this.with_tilde_const_allowed(|this| this.visit_generics(generics));
+ } else {
+ this.visit_generics(generics);
+ }
+ this.visit_trait_ref(t);
+ this.visit_ty(self_ty);
+
+ walk_list!(this, visit_assoc_item, items, AssocCtxt::Impl);
+ });
+ return; // Avoid visiting again.
+ }
+ ItemKind::Impl(box Impl {
+ unsafety,
+ polarity,
+ defaultness,
+ constness,
+ generics: _,
+ of_trait: None,
+ ref self_ty,
+ items: _,
+ }) => {
+ let error = |annotation_span, annotation| {
+ let mut err = self.err_handler().struct_span_err(
+ self_ty.span,
+ &format!("inherent impls cannot be {}", annotation),
+ );
+ err.span_label(annotation_span, &format!("{} because of this", annotation));
+ err.span_label(self_ty.span, "inherent impl for this type");
+ err
+ };
+
+ self.invalid_visibility(
+ &item.vis,
+ Some("place qualifiers on individual impl items instead"),
+ );
+ if let Unsafe::Yes(span) = unsafety {
+ error(span, "unsafe").code(error_code!(E0197)).emit();
+ }
+ if let ImplPolarity::Negative(span) = polarity {
+ error(span, "negative").emit();
+ }
+ if let Defaultness::Default(def_span) = defaultness {
+ error(def_span, "`default`")
+ .note("only trait implementations may be annotated with `default`")
+ .emit();
+ }
+ if let Const::Yes(span) = constness {
+ error(span, "`const`")
+ .note("only trait implementations may be annotated with `const`")
+ .emit();
+ }
+ }
+ ItemKind::Fn(box Fn { defaultness, ref sig, ref generics, ref body }) => {
+ self.check_defaultness(item.span, defaultness);
+
+ if body.is_none() {
+ let msg = "free function without a body";
+ let ext = sig.header.ext;
+
+ let f = |e: &mut DiagnosticBuilder<'_, _>| {
+ if let Extern::Implicit(start_span) | Extern::Explicit(_, start_span) = &ext
+ {
+ let start_suggestion = if let Extern::Explicit(abi, _) = ext {
+ format!("extern \"{}\" {{", abi.symbol_unescaped)
+ } else {
+ "extern {".to_owned()
+ };
+
+ let end_suggestion = " }".to_owned();
+ let end_span = item.span.shrink_to_hi();
+
+ e
+ .multipart_suggestion(
+ "if you meant to declare an externally defined function, use an `extern` block",
+ vec![(*start_span, start_suggestion), (end_span, end_suggestion)],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ };
+
+ self.error_item_without_body_with_help(
+ item.span,
+ "function",
+ msg,
+ " { <body> }",
+ f,
+ );
+ }
+
+ self.visit_vis(&item.vis);
+ self.visit_ident(item.ident);
+ let kind =
+ FnKind::Fn(FnCtxt::Free, item.ident, sig, &item.vis, generics, body.as_deref());
+ self.visit_fn(kind, item.span, item.id);
+ walk_list!(self, visit_attribute, &item.attrs);
+ return; // Avoid visiting again.
+ }
+ ItemKind::ForeignMod(ForeignMod { abi, unsafety, .. }) => {
+ let old_item = mem::replace(&mut self.extern_mod, Some(item));
+ self.invalid_visibility(
+ &item.vis,
+ Some("place qualifiers on individual foreign items instead"),
+ );
+ if let Unsafe::Yes(span) = unsafety {
+ self.err_handler().span_err(span, "extern block cannot be declared unsafe");
+ }
+ if abi.is_none() {
+ self.maybe_lint_missing_abi(item.span, item.id);
+ }
+ visit::walk_item(self, item);
+ self.extern_mod = old_item;
+ return; // Avoid visiting again.
+ }
+ ItemKind::Enum(ref def, _) => {
+ for variant in &def.variants {
+ self.invalid_visibility(&variant.vis, None);
+ for field in variant.data.fields() {
+ self.invalid_visibility(&field.vis, None);
+ }
+ }
+ }
+ ItemKind::Trait(box Trait { is_auto, ref generics, ref bounds, ref items, .. }) => {
+ if is_auto == IsAuto::Yes {
+ // Auto traits cannot have generics, super traits nor contain items.
+ self.deny_generic_params(generics, item.ident.span);
+ self.deny_super_traits(bounds, item.ident.span);
+ self.deny_where_clause(&generics.where_clause, item.ident.span);
+ self.deny_items(items, item.ident.span);
+ }
+
+ // Equivalent of `visit::walk_item` for `ItemKind::Trait` that inserts a bound
+ // context for the supertraits.
+ self.visit_vis(&item.vis);
+ self.visit_ident(item.ident);
+ self.visit_generics(generics);
+ self.with_tilde_const_allowed(|this| {
+ walk_list!(this, visit_param_bound, bounds, BoundKind::SuperTraits)
+ });
+ walk_list!(self, visit_assoc_item, items, AssocCtxt::Trait);
+ walk_list!(self, visit_attribute, &item.attrs);
+ return;
+ }
+ ItemKind::Mod(unsafety, ref mod_kind) => {
+ if let Unsafe::Yes(span) = unsafety {
+ self.err_handler().span_err(span, "module cannot be declared unsafe");
+ }
+ // Ensure that `path` attributes on modules are recorded as used (cf. issue #35584).
+ if !matches!(mod_kind, ModKind::Loaded(_, Inline::Yes, _))
+ && !self.session.contains_name(&item.attrs, sym::path)
+ {
+ self.check_mod_file_item_asciionly(item.ident);
+ }
+ }
+ ItemKind::Struct(ref vdata, ref generics) => match vdata {
+ // Duplicating the `Visitor` logic allows catching all cases
+ // of `Anonymous(Struct, Union)` outside of a field struct or union.
+ //
+ // Inside `visit_ty` the validator catches every `Anonymous(Struct, Union)` it
+ // encounters, and only on `ItemKind::Struct` and `ItemKind::Union`
+ // it uses `visit_ty_common`, which doesn't contain that specific check.
+ VariantData::Struct(ref fields, ..) => {
+ self.visit_vis(&item.vis);
+ self.visit_ident(item.ident);
+ self.visit_generics(generics);
+ self.with_banned_assoc_ty_bound(|this| {
+ walk_list!(this, visit_struct_field_def, fields);
+ });
+ walk_list!(self, visit_attribute, &item.attrs);
+ return;
+ }
+ _ => {}
+ },
+ ItemKind::Union(ref vdata, ref generics) => {
+ if vdata.fields().is_empty() {
+ self.err_handler().span_err(item.span, "unions cannot have zero fields");
+ }
+ match vdata {
+ VariantData::Struct(ref fields, ..) => {
+ self.visit_vis(&item.vis);
+ self.visit_ident(item.ident);
+ self.visit_generics(generics);
+ self.with_banned_assoc_ty_bound(|this| {
+ walk_list!(this, visit_struct_field_def, fields);
+ });
+ walk_list!(self, visit_attribute, &item.attrs);
+ return;
+ }
+ _ => {}
+ }
+ }
+ ItemKind::Const(def, .., None) => {
+ self.check_defaultness(item.span, def);
+ let msg = "free constant item without body";
+ self.error_item_without_body(item.span, "constant", msg, " = <expr>;");
+ }
+ ItemKind::Static(.., None) => {
+ let msg = "free static item without body";
+ self.error_item_without_body(item.span, "static", msg, " = <expr>;");
+ }
+ ItemKind::TyAlias(box TyAlias {
+ defaultness,
+ where_clauses,
+ ref bounds,
+ ref ty,
+ ..
+ }) => {
+ self.check_defaultness(item.span, defaultness);
+ if ty.is_none() {
+ let msg = "free type alias without body";
+ self.error_item_without_body(item.span, "type", msg, " = <type>;");
+ }
+ self.check_type_no_bounds(bounds, "this context");
+ if where_clauses.1.0 {
+ let mut err = self.err_handler().struct_span_err(
+ where_clauses.1.1,
+ "where clauses are not allowed after the type for type aliases",
+ );
+ err.note(
+ "see issue #89122 <https://github.com/rust-lang/rust/issues/89122> for more information",
+ );
+ err.emit();
+ }
+ }
+ _ => {}
+ }
+
+ visit::walk_item(self, item);
+ }
+
+ fn visit_foreign_item(&mut self, fi: &'a ForeignItem) {
+ match &fi.kind {
+ ForeignItemKind::Fn(box Fn { defaultness, sig, body, .. }) => {
+ self.check_defaultness(fi.span, *defaultness);
+ self.check_foreign_fn_bodyless(fi.ident, body.as_deref());
+ self.check_foreign_fn_headerless(fi.ident, fi.span, sig.header);
+ self.check_foreign_item_ascii_only(fi.ident);
+ }
+ ForeignItemKind::TyAlias(box TyAlias {
+ defaultness,
+ generics,
+ where_clauses,
+ bounds,
+ ty,
+ ..
+ }) => {
+ self.check_defaultness(fi.span, *defaultness);
+ self.check_foreign_kind_bodyless(fi.ident, "type", ty.as_ref().map(|b| b.span));
+ self.check_type_no_bounds(bounds, "`extern` blocks");
+ self.check_foreign_ty_genericless(generics, where_clauses.0.1);
+ self.check_foreign_item_ascii_only(fi.ident);
+ }
+ ForeignItemKind::Static(_, _, body) => {
+ self.check_foreign_kind_bodyless(fi.ident, "static", body.as_ref().map(|b| b.span));
+ self.check_foreign_item_ascii_only(fi.ident);
+ }
+ ForeignItemKind::MacCall(..) => {}
+ }
+
+ visit::walk_foreign_item(self, fi)
+ }
+
+ // Mirrors `visit::walk_generic_args`, but tracks relevant state.
+ fn visit_generic_args(&mut self, _: Span, generic_args: &'a GenericArgs) {
+ match *generic_args {
+ GenericArgs::AngleBracketed(ref data) => {
+ self.check_generic_args_before_constraints(data);
+
+ for arg in &data.args {
+ match arg {
+ AngleBracketedArg::Arg(arg) => self.visit_generic_arg(arg),
+ // Type bindings such as `Item = impl Debug` in `Iterator<Item = Debug>`
+ // are allowed to contain nested `impl Trait`.
+ AngleBracketedArg::Constraint(constraint) => {
+ self.with_impl_trait(None, |this| {
+ this.visit_assoc_constraint_from_generic_args(constraint);
+ });
+ }
+ }
+ }
+ }
+ GenericArgs::Parenthesized(ref data) => {
+ walk_list!(self, visit_ty, &data.inputs);
+ if let FnRetTy::Ty(ty) = &data.output {
+ // `-> Foo` syntax is essentially an associated type binding,
+ // so it is also allowed to contain nested `impl Trait`.
+ self.with_impl_trait(None, |this| this.visit_ty(ty));
+ }
+ }
+ }
+ }
+
+ fn visit_generics(&mut self, generics: &'a Generics) {
+ let mut prev_param_default = None;
+ for param in &generics.params {
+ match param.kind {
+ GenericParamKind::Lifetime => (),
+ GenericParamKind::Type { default: Some(_), .. }
+ | GenericParamKind::Const { default: Some(_), .. } => {
+ prev_param_default = Some(param.ident.span);
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
+ if let Some(span) = prev_param_default {
+ let mut err = self.err_handler().struct_span_err(
+ span,
+ "generic parameters with a default must be trailing",
+ );
+ err.emit();
+ break;
+ }
+ }
+ }
+ }
+
+ validate_generic_param_order(self.err_handler(), &generics.params, generics.span);
+
+ for predicate in &generics.where_clause.predicates {
+ if let WherePredicate::EqPredicate(ref predicate) = *predicate {
+ deny_equality_constraints(self, predicate, generics);
+ }
+ }
+ walk_list!(self, visit_generic_param, &generics.params);
+ for predicate in &generics.where_clause.predicates {
+ match predicate {
+ WherePredicate::BoundPredicate(bound_pred) => {
+ // A type binding, eg `for<'c> Foo: Send+Clone+'c`
+ self.check_late_bound_lifetime_defs(&bound_pred.bound_generic_params);
+
+ // This is slightly complicated. Our representation for poly-trait-refs contains a single
+ // binder and thus we only allow a single level of quantification. However,
+ // the syntax of Rust permits quantification in two places in where clauses,
+ // e.g., `T: for <'a> Foo<'a>` and `for <'a, 'b> &'b T: Foo<'a>`. If both are
+ // defined, then error.
+ if !bound_pred.bound_generic_params.is_empty() {
+ for bound in &bound_pred.bounds {
+ match bound {
+ GenericBound::Trait(t, _) => {
+ if !t.bound_generic_params.is_empty() {
+ struct_span_err!(
+ self.err_handler(),
+ t.span,
+ E0316,
+ "nested quantification of lifetimes"
+ )
+ .emit();
+ }
+ }
+ GenericBound::Outlives(_) => {}
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ self.visit_where_predicate(predicate);
+ }
+ }
+
+ fn visit_generic_param(&mut self, param: &'a GenericParam) {
+ if let GenericParamKind::Lifetime { .. } = param.kind {
+ self.check_lifetime(param.ident);
+ }
+ visit::walk_generic_param(self, param);
+ }
+
+ fn visit_param_bound(&mut self, bound: &'a GenericBound, ctxt: BoundKind) {
+ if let GenericBound::Trait(ref poly, modify) = *bound {
+ match (ctxt, modify) {
+ (BoundKind::SuperTraits, TraitBoundModifier::Maybe) => {
+ let mut err = self
+ .err_handler()
+ .struct_span_err(poly.span, "`?Trait` is not permitted in supertraits");
+ let path_str = pprust::path_to_string(&poly.trait_ref.path);
+ err.note(&format!("traits are `?{}` by default", path_str));
+ err.emit();
+ }
+ (BoundKind::TraitObject, TraitBoundModifier::Maybe) => {
+ let mut err = self.err_handler().struct_span_err(
+ poly.span,
+ "`?Trait` is not permitted in trait object types",
+ );
+ err.emit();
+ }
+ (_, TraitBoundModifier::MaybeConst) => {
+ if !self.is_tilde_const_allowed {
+ self.err_handler()
+ .struct_span_err(bound.span(), "`~const` is not allowed here")
+ .note("only allowed on bounds on traits' associated types and functions, const fns, const impls and its associated functions")
+ .emit();
+ }
+ }
+ (_, TraitBoundModifier::MaybeConstMaybe) => {
+ self.err_handler()
+ .span_err(bound.span(), "`~const` and `?` are mutually exclusive");
+ }
+ _ => {}
+ }
+ }
+
+ visit::walk_param_bound(self, bound)
+ }
+
+ fn visit_poly_trait_ref(&mut self, t: &'a PolyTraitRef, m: &'a TraitBoundModifier) {
+ self.check_late_bound_lifetime_defs(&t.bound_generic_params);
+ visit::walk_poly_trait_ref(self, t, m);
+ }
+
+ fn visit_variant_data(&mut self, s: &'a VariantData) {
+ self.with_banned_assoc_ty_bound(|this| visit::walk_struct_def(this, s))
+ }
+
+ fn visit_enum_def(
+ &mut self,
+ enum_definition: &'a EnumDef,
+ generics: &'a Generics,
+ item_id: NodeId,
+ _: Span,
+ ) {
+ self.with_banned_assoc_ty_bound(|this| {
+ visit::walk_enum_def(this, enum_definition, generics, item_id)
+ })
+ }
+
+ fn visit_fn(&mut self, fk: FnKind<'a>, span: Span, id: NodeId) {
+ // Only associated `fn`s can have `self` parameters.
+ let self_semantic = match fk.ctxt() {
+ Some(FnCtxt::Assoc(_)) => SelfSemantic::Yes,
+ _ => SelfSemantic::No,
+ };
+ self.check_fn_decl(fk.decl(), self_semantic);
+
+ self.check_c_variadic_type(fk);
+
+ // Functions cannot both be `const async`
+ if let Some(FnHeader {
+ constness: Const::Yes(cspan),
+ asyncness: Async::Yes { span: aspan, .. },
+ ..
+ }) = fk.header()
+ {
+ self.err_handler()
+ .struct_span_err(
+ vec![*cspan, *aspan],
+ "functions cannot be both `const` and `async`",
+ )
+ .span_label(*cspan, "`const` because of this")
+ .span_label(*aspan, "`async` because of this")
+ .span_label(span, "") // Point at the fn header.
+ .emit();
+ }
+
+ if let FnKind::Closure(ClosureBinder::For { generic_params, .. }, ..) = fk {
+ self.check_late_bound_lifetime_defs(generic_params);
+ }
+
+ if let FnKind::Fn(
+ _,
+ _,
+ FnSig { span: sig_span, header: FnHeader { ext: Extern::Implicit(_), .. }, .. },
+ _,
+ _,
+ _,
+ ) = fk
+ {
+ self.maybe_lint_missing_abi(*sig_span, id);
+ }
+
+ // Functions without bodies cannot have patterns.
+ if let FnKind::Fn(ctxt, _, sig, _, _, None) = fk {
+ Self::check_decl_no_pat(&sig.decl, |span, ident, mut_ident| {
+ let (code, msg, label) = match ctxt {
+ FnCtxt::Foreign => (
+ error_code!(E0130),
+ "patterns aren't allowed in foreign function declarations",
+ "pattern not allowed in foreign function",
+ ),
+ _ => (
+ error_code!(E0642),
+ "patterns aren't allowed in functions without bodies",
+ "pattern not allowed in function without body",
+ ),
+ };
+ if mut_ident && matches!(ctxt, FnCtxt::Assoc(_)) {
+ if let Some(ident) = ident {
+ let diag = BuiltinLintDiagnostics::PatternsInFnsWithoutBody(span, ident);
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ PATTERNS_IN_FNS_WITHOUT_BODY,
+ id,
+ span,
+ msg,
+ diag,
+ )
+ }
+ } else {
+ self.err_handler()
+ .struct_span_err(span, msg)
+ .span_label(span, label)
+ .code(code)
+ .emit();
+ }
+ });
+ }
+
+ let tilde_const_allowed =
+ matches!(fk.header(), Some(FnHeader { constness: Const::Yes(_), .. }))
+ || matches!(fk.ctxt(), Some(FnCtxt::Assoc(_)));
+
+ self.with_tilde_const(tilde_const_allowed, |this| visit::walk_fn(this, fk, span));
+ }
+
+ fn visit_assoc_item(&mut self, item: &'a AssocItem, ctxt: AssocCtxt) {
+ if self.session.contains_name(&item.attrs, sym::no_mangle) {
+ self.check_nomangle_item_asciionly(item.ident, item.span);
+ }
+
+ if ctxt == AssocCtxt::Trait || !self.in_trait_impl {
+ self.check_defaultness(item.span, item.kind.defaultness());
+ }
+
+ if ctxt == AssocCtxt::Impl {
+ match &item.kind {
+ AssocItemKind::Const(_, _, body) => {
+ self.check_impl_item_provided(item.span, body, "constant", " = <expr>;");
+ }
+ AssocItemKind::Fn(box Fn { body, .. }) => {
+ self.check_impl_item_provided(item.span, body, "function", " { <body> }");
+ }
+ AssocItemKind::TyAlias(box TyAlias {
+ generics,
+ where_clauses,
+ where_predicates_split,
+ bounds,
+ ty,
+ ..
+ }) => {
+ self.check_impl_item_provided(item.span, ty, "type", " = <type>;");
+ self.check_type_no_bounds(bounds, "`impl`s");
+ if ty.is_some() {
+ self.check_gat_where(
+ item.id,
+ generics.where_clause.predicates.split_at(*where_predicates_split).0,
+ *where_clauses,
+ );
+ }
+ }
+ _ => {}
+ }
+ }
+
+ if ctxt == AssocCtxt::Trait || self.in_trait_impl {
+ self.invalid_visibility(&item.vis, None);
+ if let AssocItemKind::Fn(box Fn { sig, .. }) = &item.kind {
+ self.check_trait_fn_not_const(sig.header.constness);
+ self.check_trait_fn_not_async(item.span, sig.header.asyncness);
+ }
+ }
+
+ if let AssocItemKind::Const(..) = item.kind {
+ self.check_item_named(item.ident, "const");
+ }
+
+ match item.kind {
+ AssocItemKind::TyAlias(box TyAlias { ref generics, ref bounds, ref ty, .. })
+ if ctxt == AssocCtxt::Trait =>
+ {
+ self.visit_vis(&item.vis);
+ self.visit_ident(item.ident);
+ walk_list!(self, visit_attribute, &item.attrs);
+ self.with_tilde_const_allowed(|this| {
+ this.visit_generics(generics);
+ walk_list!(this, visit_param_bound, bounds, BoundKind::Bound);
+ });
+ walk_list!(self, visit_ty, ty);
+ }
+ AssocItemKind::Fn(box Fn { ref sig, ref generics, ref body, .. })
+ if self.in_const_trait_impl
+ || ctxt == AssocCtxt::Trait
+ || matches!(sig.header.constness, Const::Yes(_)) =>
+ {
+ self.visit_vis(&item.vis);
+ self.visit_ident(item.ident);
+ let kind = FnKind::Fn(
+ FnCtxt::Assoc(ctxt),
+ item.ident,
+ sig,
+ &item.vis,
+ generics,
+ body.as_deref(),
+ );
+ self.visit_fn(kind, item.span, item.id);
+ }
+ _ => self
+ .with_in_trait_impl(false, None, |this| visit::walk_assoc_item(this, item, ctxt)),
+ }
+ }
+}
+
+/// When encountering an equality constraint in a `where` clause, emit an error. If the code seems
+/// like it's setting an associated type, provide an appropriate suggestion.
+fn deny_equality_constraints(
+ this: &mut AstValidator<'_>,
+ predicate: &WhereEqPredicate,
+ generics: &Generics,
+) {
+ let mut err = this.err_handler().struct_span_err(
+ predicate.span,
+ "equality constraints are not yet supported in `where` clauses",
+ );
+ err.span_label(predicate.span, "not supported");
+
+ // Given `<A as Foo>::Bar = RhsTy`, suggest `A: Foo<Bar = RhsTy>`.
+ if let TyKind::Path(Some(qself), full_path) = &predicate.lhs_ty.kind {
+ if let TyKind::Path(None, path) = &qself.ty.kind {
+ match &path.segments[..] {
+ [PathSegment { ident, args: None, .. }] => {
+ for param in &generics.params {
+ if param.ident == *ident {
+ let param = ident;
+ match &full_path.segments[qself.position..] {
+ [PathSegment { ident, args, .. }] => {
+ // Make a new `Path` from `foo::Bar` to `Foo<Bar = RhsTy>`.
+ let mut assoc_path = full_path.clone();
+ // Remove `Bar` from `Foo::Bar`.
+ assoc_path.segments.pop();
+ let len = assoc_path.segments.len() - 1;
+ let gen_args = args.as_ref().map(|p| (**p).clone());
+ // Build `<Bar = RhsTy>`.
+ let arg = AngleBracketedArg::Constraint(AssocConstraint {
+ id: rustc_ast::node_id::DUMMY_NODE_ID,
+ ident: *ident,
+ gen_args,
+ kind: AssocConstraintKind::Equality {
+ term: predicate.rhs_ty.clone().into(),
+ },
+ span: ident.span,
+ });
+ // Add `<Bar = RhsTy>` to `Foo`.
+ match &mut assoc_path.segments[len].args {
+ Some(args) => match args.deref_mut() {
+ GenericArgs::Parenthesized(_) => continue,
+ GenericArgs::AngleBracketed(args) => {
+ args.args.push(arg);
+ }
+ },
+ empty_args => {
+ *empty_args = AngleBracketedArgs {
+ span: ident.span,
+ args: vec![arg],
+ }
+ .into();
+ }
+ }
+ err.span_suggestion_verbose(
+ predicate.span,
+ &format!(
+ "if `{}` is an associated type you're trying to set, \
+ use the associated type binding syntax",
+ ident
+ ),
+ format!(
+ "{}: {}",
+ param,
+ pprust::path_to_string(&assoc_path)
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ };
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+ // Given `A: Foo, A::Bar = RhsTy`, suggest `A: Foo<Bar = RhsTy>`.
+ if let TyKind::Path(None, full_path) = &predicate.lhs_ty.kind {
+ if let [potential_param, potential_assoc] = &full_path.segments[..] {
+ for param in &generics.params {
+ if param.ident == potential_param.ident {
+ for bound in &param.bounds {
+ if let ast::GenericBound::Trait(trait_ref, TraitBoundModifier::None) = bound
+ {
+ if let [trait_segment] = &trait_ref.trait_ref.path.segments[..] {
+ let assoc = pprust::path_to_string(&ast::Path::from_ident(
+ potential_assoc.ident,
+ ));
+ let ty = pprust::ty_to_string(&predicate.rhs_ty);
+ let (args, span) = match &trait_segment.args {
+ Some(args) => match args.deref() {
+ ast::GenericArgs::AngleBracketed(args) => {
+ let Some(arg) = args.args.last() else {
+ continue;
+ };
+ (
+ format!(", {} = {}", assoc, ty),
+ arg.span().shrink_to_hi(),
+ )
+ }
+ _ => continue,
+ },
+ None => (
+ format!("<{} = {}>", assoc, ty),
+ trait_segment.span().shrink_to_hi(),
+ ),
+ };
+ err.multipart_suggestion(
+ &format!(
+ "if `{}::{}` is an associated type you're trying to set, \
+ use the associated type binding syntax",
+ trait_segment.ident, potential_assoc.ident,
+ ),
+ vec![(span, args), (predicate.span, String::new())],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ err.note(
+ "see issue #20041 <https://github.com/rust-lang/rust/issues/20041> for more information",
+ );
+ err.emit();
+}
+
+pub fn check_crate(session: &Session, krate: &Crate, lints: &mut LintBuffer) -> bool {
+ let mut validator = AstValidator {
+ session,
+ extern_mod: None,
+ in_trait_impl: false,
+ in_const_trait_impl: false,
+ has_proc_macro_decls: false,
+ outer_impl_trait: None,
+ is_tilde_const_allowed: false,
+ is_impl_trait_banned: false,
+ is_assoc_ty_bound_banned: false,
+ forbidden_let_reason: Some(ForbiddenLetReason::GenericForbidden),
+ lint_buffer: lints,
+ };
+ visit::walk_crate(&mut validator, krate);
+
+ validator.has_proc_macro_decls
+}
+
+/// Used to forbid `let` expressions in certain syntactic locations.
+#[derive(Clone, Copy)]
+enum ForbiddenLetReason {
+ /// `let` is not valid and the source environment is not important
+ GenericForbidden,
+ /// A let chain with the `||` operator
+ NotSupportedOr(Span),
+ /// A let chain with invalid parentheses
+ ///
+ /// For exemple, `let 1 = 1 && (expr && expr)` is allowed
+ /// but `(let 1 = 1 && (let 1 = 1 && (let 1 = 1))) && let a = 1` is not
+ NotSupportedParentheses(Span),
+}
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs
new file mode 100644
index 000000000..789eca1f0
--- /dev/null
+++ b/compiler/rustc_ast_passes/src/feature_gate.rs
@@ -0,0 +1,901 @@
+use rustc_ast as ast;
+use rustc_ast::visit::{self, AssocCtxt, FnCtxt, FnKind, Visitor};
+use rustc_ast::{AssocConstraint, AssocConstraintKind, NodeId};
+use rustc_ast::{PatKind, RangeEnd, VariantData};
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_feature::{AttributeGate, BuiltinAttribute, BUILTIN_ATTRIBUTE_MAP};
+use rustc_feature::{Features, GateIssue};
+use rustc_session::parse::{feature_err, feature_err_issue};
+use rustc_session::Session;
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+use tracing::debug;
+
+macro_rules! gate_feature_fn {
+ ($visitor: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr, $help: expr) => {{
+ let (visitor, has_feature, span, name, explain, help) =
+ (&*$visitor, $has_feature, $span, $name, $explain, $help);
+ let has_feature: bool = has_feature(visitor.features);
+ debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature);
+ if !has_feature && !span.allows_unstable($name) {
+ feature_err_issue(&visitor.sess.parse_sess, name, span, GateIssue::Language, explain)
+ .help(help)
+ .emit();
+ }
+ }};
+ ($visitor: expr, $has_feature: expr, $span: expr, $name: expr, $explain: expr) => {{
+ let (visitor, has_feature, span, name, explain) =
+ (&*$visitor, $has_feature, $span, $name, $explain);
+ let has_feature: bool = has_feature(visitor.features);
+ debug!("gate_feature(feature = {:?}, span = {:?}); has? {}", name, span, has_feature);
+ if !has_feature && !span.allows_unstable($name) {
+ feature_err_issue(&visitor.sess.parse_sess, name, span, GateIssue::Language, explain)
+ .emit();
+ }
+ }};
+}
+
+macro_rules! gate_feature_post {
+ ($visitor: expr, $feature: ident, $span: expr, $explain: expr, $help: expr) => {
+ gate_feature_fn!($visitor, |x: &Features| x.$feature, $span, sym::$feature, $explain, $help)
+ };
+ ($visitor: expr, $feature: ident, $span: expr, $explain: expr) => {
+ gate_feature_fn!($visitor, |x: &Features| x.$feature, $span, sym::$feature, $explain)
+ };
+}
+
+pub fn check_attribute(attr: &ast::Attribute, sess: &Session, features: &Features) {
+ PostExpansionVisitor { sess, features }.visit_attribute(attr)
+}
+
+struct PostExpansionVisitor<'a> {
+ sess: &'a Session,
+
+ // `sess` contains a `Features`, but this might not be that one.
+ features: &'a Features,
+}
+
+impl<'a> PostExpansionVisitor<'a> {
+ fn check_abi(&self, abi: ast::StrLit, constness: ast::Const) {
+ let ast::StrLit { symbol_unescaped, span, .. } = abi;
+
+ if let ast::Const::Yes(_) = constness {
+ match symbol_unescaped {
+ // Stable
+ sym::Rust | sym::C => {}
+ abi => gate_feature_post!(
+ &self,
+ const_extern_fn,
+ span,
+ &format!("`{}` as a `const fn` ABI is unstable", abi)
+ ),
+ }
+ }
+
+ match symbol_unescaped.as_str() {
+ // Stable
+ "Rust" | "C" | "cdecl" | "stdcall" | "fastcall" | "aapcs" | "win64" | "sysv64"
+ | "system" => {}
+ "rust-intrinsic" => {
+ gate_feature_post!(&self, intrinsics, span, "intrinsics are subject to change");
+ }
+ "platform-intrinsic" => {
+ gate_feature_post!(
+ &self,
+ platform_intrinsics,
+ span,
+ "platform intrinsics are experimental and possibly buggy"
+ );
+ }
+ "vectorcall" => {
+ gate_feature_post!(
+ &self,
+ abi_vectorcall,
+ span,
+ "vectorcall is experimental and subject to change"
+ );
+ }
+ "thiscall" => {
+ gate_feature_post!(
+ &self,
+ abi_thiscall,
+ span,
+ "thiscall is experimental and subject to change"
+ );
+ }
+ "rust-call" => {
+ gate_feature_post!(
+ &self,
+ unboxed_closures,
+ span,
+ "rust-call ABI is subject to change"
+ );
+ }
+ "rust-cold" => {
+ gate_feature_post!(
+ &self,
+ rust_cold_cc,
+ span,
+ "rust-cold is experimental and subject to change"
+ );
+ }
+ "ptx-kernel" => {
+ gate_feature_post!(
+ &self,
+ abi_ptx,
+ span,
+ "PTX ABIs are experimental and subject to change"
+ );
+ }
+ "unadjusted" => {
+ gate_feature_post!(
+ &self,
+ abi_unadjusted,
+ span,
+ "unadjusted ABI is an implementation detail and perma-unstable"
+ );
+ }
+ "msp430-interrupt" => {
+ gate_feature_post!(
+ &self,
+ abi_msp430_interrupt,
+ span,
+ "msp430-interrupt ABI is experimental and subject to change"
+ );
+ }
+ "x86-interrupt" => {
+ gate_feature_post!(
+ &self,
+ abi_x86_interrupt,
+ span,
+ "x86-interrupt ABI is experimental and subject to change"
+ );
+ }
+ "amdgpu-kernel" => {
+ gate_feature_post!(
+ &self,
+ abi_amdgpu_kernel,
+ span,
+ "amdgpu-kernel ABI is experimental and subject to change"
+ );
+ }
+ "avr-interrupt" | "avr-non-blocking-interrupt" => {
+ gate_feature_post!(
+ &self,
+ abi_avr_interrupt,
+ span,
+ "avr-interrupt and avr-non-blocking-interrupt ABIs are experimental and subject to change"
+ );
+ }
+ "efiapi" => {
+ gate_feature_post!(
+ &self,
+ abi_efiapi,
+ span,
+ "efiapi ABI is experimental and subject to change"
+ );
+ }
+ "C-cmse-nonsecure-call" => {
+ gate_feature_post!(
+ &self,
+ abi_c_cmse_nonsecure_call,
+ span,
+ "C-cmse-nonsecure-call ABI is experimental and subject to change"
+ );
+ }
+ "C-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "C-unwind ABI is experimental and subject to change"
+ );
+ }
+ "stdcall-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "stdcall-unwind ABI is experimental and subject to change"
+ );
+ }
+ "system-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "system-unwind ABI is experimental and subject to change"
+ );
+ }
+ "thiscall-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "thiscall-unwind ABI is experimental and subject to change"
+ );
+ }
+ "cdecl-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "cdecl-unwind ABI is experimental and subject to change"
+ );
+ }
+ "fastcall-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "fastcall-unwind ABI is experimental and subject to change"
+ );
+ }
+ "vectorcall-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "vectorcall-unwind ABI is experimental and subject to change"
+ );
+ }
+ "aapcs-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "aapcs-unwind ABI is experimental and subject to change"
+ );
+ }
+ "win64-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "win64-unwind ABI is experimental and subject to change"
+ );
+ }
+ "sysv64-unwind" => {
+ gate_feature_post!(
+ &self,
+ c_unwind,
+ span,
+ "sysv64-unwind ABI is experimental and subject to change"
+ );
+ }
+ "wasm" => {
+ gate_feature_post!(
+ &self,
+ wasm_abi,
+ span,
+ "wasm ABI is experimental and subject to change"
+ );
+ }
+ abi => {
+ if self.sess.opts.pretty.map_or(true, |ppm| ppm.needs_hir()) {
+ self.sess.parse_sess.span_diagnostic.delay_span_bug(
+ span,
+ &format!("unrecognized ABI not caught in lowering: {}", abi),
+ );
+ }
+ }
+ }
+ }
+
+ fn check_extern(&self, ext: ast::Extern, constness: ast::Const) {
+ if let ast::Extern::Explicit(abi, _) = ext {
+ self.check_abi(abi, constness);
+ }
+ }
+
+ fn maybe_report_invalid_custom_discriminants(&self, variants: &[ast::Variant]) {
+ let has_fields = variants.iter().any(|variant| match variant.data {
+ VariantData::Tuple(..) | VariantData::Struct(..) => true,
+ VariantData::Unit(..) => false,
+ });
+
+ let discriminant_spans = variants
+ .iter()
+ .filter(|variant| match variant.data {
+ VariantData::Tuple(..) | VariantData::Struct(..) => false,
+ VariantData::Unit(..) => true,
+ })
+ .filter_map(|variant| variant.disr_expr.as_ref().map(|c| c.value.span))
+ .collect::<Vec<_>>();
+
+ if !discriminant_spans.is_empty() && has_fields {
+ let mut err = feature_err(
+ &self.sess.parse_sess,
+ sym::arbitrary_enum_discriminant,
+ discriminant_spans.clone(),
+ "custom discriminant values are not allowed in enums with tuple or struct variants",
+ );
+ for sp in discriminant_spans {
+ err.span_label(sp, "disallowed custom discriminant");
+ }
+ for variant in variants.iter() {
+ match &variant.data {
+ VariantData::Struct(..) => {
+ err.span_label(variant.span, "struct variant defined here");
+ }
+ VariantData::Tuple(..) => {
+ err.span_label(variant.span, "tuple variant defined here");
+ }
+ VariantData::Unit(..) => {}
+ }
+ }
+ err.emit();
+ }
+ }
+
+ fn check_gat(&self, generics: &ast::Generics, span: Span) {
+ if !generics.params.is_empty() {
+ gate_feature_post!(
+ &self,
+ generic_associated_types,
+ span,
+ "generic associated types are unstable"
+ );
+ }
+ if !generics.where_clause.predicates.is_empty() {
+ gate_feature_post!(
+ &self,
+ generic_associated_types,
+ span,
+ "where clauses on associated types are unstable"
+ );
+ }
+ }
+
+ /// Feature gate `impl Trait` inside `type Alias = $type_expr;`.
+ fn check_impl_trait(&self, ty: &ast::Ty) {
+ struct ImplTraitVisitor<'a> {
+ vis: &'a PostExpansionVisitor<'a>,
+ }
+ impl Visitor<'_> for ImplTraitVisitor<'_> {
+ fn visit_ty(&mut self, ty: &ast::Ty) {
+ if let ast::TyKind::ImplTrait(..) = ty.kind {
+ gate_feature_post!(
+ &self.vis,
+ type_alias_impl_trait,
+ ty.span,
+ "`impl Trait` in type aliases is unstable"
+ );
+ }
+ visit::walk_ty(self, ty);
+ }
+ }
+ ImplTraitVisitor { vis: self }.visit_ty(ty);
+ }
+}
+
+impl<'a> Visitor<'a> for PostExpansionVisitor<'a> {
+ fn visit_attribute(&mut self, attr: &ast::Attribute) {
+ let attr_info = attr.ident().and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name));
+ // Check feature gates for built-in attributes.
+ if let Some(BuiltinAttribute {
+ gate: AttributeGate::Gated(_, name, descr, has_feature),
+ ..
+ }) = attr_info
+ {
+ gate_feature_fn!(self, has_feature, attr.span, *name, descr);
+ }
+ // Check unstable flavors of the `#[doc]` attribute.
+ if attr.has_name(sym::doc) {
+ for nested_meta in attr.meta_item_list().unwrap_or_default() {
+ macro_rules! gate_doc { ($($name:ident => $feature:ident)*) => {
+ $(if nested_meta.has_name(sym::$name) {
+ let msg = concat!("`#[doc(", stringify!($name), ")]` is experimental");
+ gate_feature_post!(self, $feature, attr.span, msg);
+ })*
+ }}
+
+ gate_doc!(
+ cfg => doc_cfg
+ cfg_hide => doc_cfg_hide
+ masked => doc_masked
+ notable_trait => doc_notable_trait
+ );
+
+ if nested_meta.has_name(sym::keyword) {
+ let msg = "`#[doc(keyword)]` is meant for internal use only";
+ gate_feature_post!(self, rustdoc_internals, attr.span, msg);
+ }
+
+ if nested_meta.has_name(sym::fake_variadic) {
+ let msg = "`#[doc(fake_variadic)]` is meant for internal use only";
+ gate_feature_post!(self, rustdoc_internals, attr.span, msg);
+ }
+ }
+ }
+
+ // Emit errors for non-staged-api crates.
+ if !self.features.staged_api {
+ if attr.has_name(sym::unstable)
+ || attr.has_name(sym::stable)
+ || attr.has_name(sym::rustc_const_unstable)
+ || attr.has_name(sym::rustc_const_stable)
+ {
+ struct_span_err!(
+ self.sess,
+ attr.span,
+ E0734,
+ "stability attributes may not be used outside of the standard library",
+ )
+ .emit();
+ }
+ }
+ }
+
+ fn visit_item(&mut self, i: &'a ast::Item) {
+ match i.kind {
+ ast::ItemKind::ForeignMod(ref foreign_module) => {
+ if let Some(abi) = foreign_module.abi {
+ self.check_abi(abi, ast::Const::No);
+ }
+ }
+
+ ast::ItemKind::Fn(..) => {
+ if self.sess.contains_name(&i.attrs, sym::start) {
+ gate_feature_post!(
+ &self,
+ start,
+ i.span,
+ "`#[start]` functions are experimental \
+ and their signature may change \
+ over time"
+ );
+ }
+ }
+
+ ast::ItemKind::Struct(..) => {
+ for attr in self.sess.filter_by_name(&i.attrs, sym::repr) {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
+ if item.has_name(sym::simd) {
+ gate_feature_post!(
+ &self,
+ repr_simd,
+ attr.span,
+ "SIMD types are experimental and possibly buggy"
+ );
+ }
+ }
+ }
+ }
+
+ ast::ItemKind::Enum(ast::EnumDef { ref variants, .. }, ..) => {
+ for variant in variants {
+ match (&variant.data, &variant.disr_expr) {
+ (ast::VariantData::Unit(..), _) => {}
+ (_, Some(disr_expr)) => gate_feature_post!(
+ &self,
+ arbitrary_enum_discriminant,
+ disr_expr.value.span,
+ "discriminants on non-unit variants are experimental"
+ ),
+ _ => {}
+ }
+ }
+
+ let has_feature = self.features.arbitrary_enum_discriminant;
+ if !has_feature && !i.span.allows_unstable(sym::arbitrary_enum_discriminant) {
+ self.maybe_report_invalid_custom_discriminants(&variants);
+ }
+ }
+
+ ast::ItemKind::Impl(box ast::Impl { polarity, defaultness, ref of_trait, .. }) => {
+ if let ast::ImplPolarity::Negative(span) = polarity {
+ gate_feature_post!(
+ &self,
+ negative_impls,
+ span.to(of_trait.as_ref().map_or(span, |t| t.path.span)),
+ "negative trait bounds are not yet fully implemented; \
+ use marker types for now"
+ );
+ }
+
+ if let ast::Defaultness::Default(_) = defaultness {
+ gate_feature_post!(&self, specialization, i.span, "specialization is unstable");
+ }
+ }
+
+ ast::ItemKind::Trait(box ast::Trait { is_auto: ast::IsAuto::Yes, .. }) => {
+ gate_feature_post!(
+ &self,
+ auto_traits,
+ i.span,
+ "auto traits are experimental and possibly buggy"
+ );
+ }
+
+ ast::ItemKind::TraitAlias(..) => {
+ gate_feature_post!(&self, trait_alias, i.span, "trait aliases are experimental");
+ }
+
+ ast::ItemKind::MacroDef(ast::MacroDef { macro_rules: false, .. }) => {
+ let msg = "`macro` is experimental";
+ gate_feature_post!(&self, decl_macro, i.span, msg);
+ }
+
+ ast::ItemKind::TyAlias(box ast::TyAlias { ty: Some(ref ty), .. }) => {
+ self.check_impl_trait(&ty)
+ }
+
+ _ => {}
+ }
+
+ visit::walk_item(self, i);
+ }
+
+ fn visit_foreign_item(&mut self, i: &'a ast::ForeignItem) {
+ match i.kind {
+ ast::ForeignItemKind::Fn(..) | ast::ForeignItemKind::Static(..) => {
+ let link_name = self.sess.first_attr_value_str_by_name(&i.attrs, sym::link_name);
+ let links_to_llvm =
+ link_name.map_or(false, |val| val.as_str().starts_with("llvm."));
+ if links_to_llvm {
+ gate_feature_post!(
+ &self,
+ link_llvm_intrinsics,
+ i.span,
+ "linking to LLVM intrinsics is experimental"
+ );
+ }
+ }
+ ast::ForeignItemKind::TyAlias(..) => {
+ gate_feature_post!(&self, extern_types, i.span, "extern types are experimental");
+ }
+ ast::ForeignItemKind::MacCall(..) => {}
+ }
+
+ visit::walk_foreign_item(self, i)
+ }
+
+ fn visit_ty(&mut self, ty: &'a ast::Ty) {
+ match ty.kind {
+ ast::TyKind::BareFn(ref bare_fn_ty) => {
+ // Function pointers cannot be `const`
+ self.check_extern(bare_fn_ty.ext, ast::Const::No);
+ }
+ ast::TyKind::Never => {
+ gate_feature_post!(&self, never_type, ty.span, "the `!` type is experimental");
+ }
+ _ => {}
+ }
+ visit::walk_ty(self, ty)
+ }
+
+ fn visit_fn_ret_ty(&mut self, ret_ty: &'a ast::FnRetTy) {
+ if let ast::FnRetTy::Ty(ref output_ty) = *ret_ty {
+ if let ast::TyKind::Never = output_ty.kind {
+ // Do nothing.
+ } else {
+ self.visit_ty(output_ty)
+ }
+ }
+ }
+
+ fn visit_stmt(&mut self, stmt: &'a ast::Stmt) {
+ if let ast::StmtKind::Semi(expr) = &stmt.kind
+ && let ast::ExprKind::Assign(lhs, _, _) = &expr.kind
+ && let ast::ExprKind::Type(..) = lhs.kind
+ && self.sess.parse_sess.span_diagnostic.err_count() == 0
+ && !self.features.type_ascription
+ && !lhs.span.allows_unstable(sym::type_ascription)
+ {
+ // When we encounter a statement of the form `foo: Ty = val;`, this will emit a type
+ // ascription error, but the likely intention was to write a `let` statement. (#78907).
+ feature_err_issue(
+ &self.sess.parse_sess,
+ sym::type_ascription,
+ lhs.span,
+ GateIssue::Language,
+ "type ascription is experimental",
+ ).span_suggestion_verbose(
+ lhs.span.shrink_to_lo(),
+ "you might have meant to introduce a new binding",
+ "let ".to_string(),
+ Applicability::MachineApplicable,
+ ).emit();
+ }
+ visit::walk_stmt(self, stmt);
+ }
+
+ fn visit_expr(&mut self, e: &'a ast::Expr) {
+ match e.kind {
+ ast::ExprKind::Box(_) => {
+ gate_feature_post!(
+ &self,
+ box_syntax,
+ e.span,
+ "box expression syntax is experimental; you can call `Box::new` instead"
+ );
+ }
+ ast::ExprKind::Type(..) => {
+ // To avoid noise about type ascription in common syntax errors, only emit if it
+ // is the *only* error.
+ if self.sess.parse_sess.span_diagnostic.err_count() == 0 {
+ gate_feature_post!(
+ &self,
+ type_ascription,
+ e.span,
+ "type ascription is experimental"
+ );
+ }
+ }
+ ast::ExprKind::TryBlock(_) => {
+ gate_feature_post!(&self, try_blocks, e.span, "`try` expression is experimental");
+ }
+ ast::ExprKind::Block(_, Some(label)) => {
+ gate_feature_post!(
+ &self,
+ label_break_value,
+ label.ident.span,
+ "labels on blocks are unstable"
+ );
+ }
+ _ => {}
+ }
+ visit::walk_expr(self, e)
+ }
+
+ fn visit_pat(&mut self, pattern: &'a ast::Pat) {
+ match &pattern.kind {
+ PatKind::Slice(pats) => {
+ for pat in pats {
+ let inner_pat = match &pat.kind {
+ PatKind::Ident(.., Some(pat)) => pat,
+ _ => pat,
+ };
+ if let PatKind::Range(Some(_), None, Spanned { .. }) = inner_pat.kind {
+ gate_feature_post!(
+ &self,
+ half_open_range_patterns,
+ pat.span,
+ "`X..` patterns in slices are experimental"
+ );
+ }
+ }
+ }
+ PatKind::Box(..) => {
+ gate_feature_post!(
+ &self,
+ box_patterns,
+ pattern.span,
+ "box pattern syntax is experimental"
+ );
+ }
+ PatKind::Range(_, Some(_), Spanned { node: RangeEnd::Excluded, .. }) => {
+ gate_feature_post!(
+ &self,
+ exclusive_range_pattern,
+ pattern.span,
+ "exclusive range pattern syntax is experimental"
+ );
+ }
+ _ => {}
+ }
+ visit::walk_pat(self, pattern)
+ }
+
+ fn visit_fn(&mut self, fn_kind: FnKind<'a>, span: Span, _: NodeId) {
+ if let Some(header) = fn_kind.header() {
+ // Stability of const fn methods are covered in `visit_assoc_item` below.
+ self.check_extern(header.ext, header.constness);
+ }
+
+ if fn_kind.ctxt() != Some(FnCtxt::Foreign) && fn_kind.decl().c_variadic() {
+ gate_feature_post!(&self, c_variadic, span, "C-variadic functions are unstable");
+ }
+
+ visit::walk_fn(self, fn_kind, span)
+ }
+
+ fn visit_assoc_constraint(&mut self, constraint: &'a AssocConstraint) {
+ if let AssocConstraintKind::Bound { .. } = constraint.kind {
+ gate_feature_post!(
+ &self,
+ associated_type_bounds,
+ constraint.span,
+ "associated type bounds are unstable"
+ )
+ }
+ visit::walk_assoc_constraint(self, constraint)
+ }
+
+ fn visit_assoc_item(&mut self, i: &'a ast::AssocItem, ctxt: AssocCtxt) {
+ let is_fn = match i.kind {
+ ast::AssocItemKind::Fn(_) => true,
+ ast::AssocItemKind::TyAlias(box ast::TyAlias { ref generics, ref ty, .. }) => {
+ if let (Some(_), AssocCtxt::Trait) = (ty, ctxt) {
+ gate_feature_post!(
+ &self,
+ associated_type_defaults,
+ i.span,
+ "associated type defaults are unstable"
+ );
+ }
+ if let Some(ty) = ty {
+ self.check_impl_trait(ty);
+ }
+ self.check_gat(generics, i.span);
+ false
+ }
+ _ => false,
+ };
+ if let ast::Defaultness::Default(_) = i.kind.defaultness() {
+ // Limit `min_specialization` to only specializing functions.
+ gate_feature_fn!(
+ &self,
+ |x: &Features| x.specialization || (is_fn && x.min_specialization),
+ i.span,
+ sym::specialization,
+ "specialization is unstable"
+ );
+ }
+ visit::walk_assoc_item(self, i, ctxt)
+ }
+}
+
+pub fn check_crate(krate: &ast::Crate, sess: &Session) {
+ maybe_stage_features(sess, krate);
+ check_incompatible_features(sess);
+ let mut visitor = PostExpansionVisitor { sess, features: &sess.features_untracked() };
+
+ let spans = sess.parse_sess.gated_spans.spans.borrow();
+ macro_rules! gate_all {
+ ($gate:ident, $msg:literal, $help:literal) => {
+ if let Some(spans) = spans.get(&sym::$gate) {
+ for span in spans {
+ gate_feature_post!(&visitor, $gate, *span, $msg, $help);
+ }
+ }
+ };
+ ($gate:ident, $msg:literal) => {
+ if let Some(spans) = spans.get(&sym::$gate) {
+ for span in spans {
+ gate_feature_post!(&visitor, $gate, *span, $msg);
+ }
+ }
+ };
+ }
+ gate_all!(
+ if_let_guard,
+ "`if let` guards are experimental",
+ "you can write `if matches!(<expr>, <pattern>)` instead of `if let <pattern> = <expr>`"
+ );
+ gate_all!(let_chains, "`let` expressions in this position are unstable");
+ gate_all!(
+ async_closure,
+ "async closures are unstable",
+ "to use an async block, remove the `||`: `async {`"
+ );
+ gate_all!(
+ closure_lifetime_binder,
+ "`for<...>` binders for closures are experimental",
+ "consider removing `for<...>`"
+ );
+ gate_all!(more_qualified_paths, "usage of qualified paths in this context is experimental");
+ gate_all!(generators, "yield syntax is experimental");
+ gate_all!(raw_ref_op, "raw address of syntax is experimental");
+ gate_all!(const_trait_impl, "const trait impls are experimental");
+ gate_all!(half_open_range_patterns, "half-open range patterns are unstable");
+ gate_all!(inline_const, "inline-const is experimental");
+ gate_all!(inline_const_pat, "inline-const in pattern position is experimental");
+ gate_all!(associated_const_equality, "associated const equality is incomplete");
+ gate_all!(yeet_expr, "`do yeet` expression is experimental");
+
+ // All uses of `gate_all!` below this point were added in #65742,
+ // and subsequently disabled (with the non-early gating readded).
+ macro_rules! gate_all {
+ ($gate:ident, $msg:literal) => {
+ // FIXME(eddyb) do something more useful than always
+ // disabling these uses of early feature-gatings.
+ if false {
+ for span in spans.get(&sym::$gate).unwrap_or(&vec![]) {
+ gate_feature_post!(&visitor, $gate, *span, $msg);
+ }
+ }
+ };
+ }
+
+ gate_all!(trait_alias, "trait aliases are experimental");
+ gate_all!(associated_type_bounds, "associated type bounds are unstable");
+ gate_all!(decl_macro, "`macro` is experimental");
+ gate_all!(box_patterns, "box pattern syntax is experimental");
+ gate_all!(exclusive_range_pattern, "exclusive range pattern syntax is experimental");
+ gate_all!(try_blocks, "`try` blocks are unstable");
+ gate_all!(label_break_value, "labels on blocks are unstable");
+ gate_all!(box_syntax, "box expression syntax is experimental; you can call `Box::new` instead");
+ // To avoid noise about type ascription in common syntax errors,
+ // only emit if it is the *only* error. (Also check it last.)
+ if sess.parse_sess.span_diagnostic.err_count() == 0 {
+ gate_all!(type_ascription, "type ascription is experimental");
+ }
+
+ visit::walk_crate(&mut visitor, krate);
+}
+
+fn maybe_stage_features(sess: &Session, krate: &ast::Crate) {
+ // checks if `#![feature]` has been used to enable any lang feature
+ // does not check the same for lib features unless there's at least one
+ // declared lang feature
+ if !sess.opts.unstable_features.is_nightly_build() {
+ let lang_features = &sess.features_untracked().declared_lang_features;
+ if lang_features.len() == 0 {
+ return;
+ }
+ for attr in krate.attrs.iter().filter(|attr| attr.has_name(sym::feature)) {
+ let mut err = struct_span_err!(
+ sess.parse_sess.span_diagnostic,
+ attr.span,
+ E0554,
+ "`#![feature]` may not be used on the {} release channel",
+ option_env!("CFG_RELEASE_CHANNEL").unwrap_or("(unknown)")
+ );
+ let mut all_stable = true;
+ for ident in
+ attr.meta_item_list().into_iter().flatten().flat_map(|nested| nested.ident())
+ {
+ let name = ident.name;
+ let stable_since = lang_features
+ .iter()
+ .flat_map(|&(feature, _, since)| if feature == name { since } else { None })
+ .next();
+ if let Some(since) = stable_since {
+ err.help(&format!(
+ "the feature `{}` has been stable since {} and no longer requires \
+ an attribute to enable",
+ name, since
+ ));
+ } else {
+ all_stable = false;
+ }
+ }
+ if all_stable {
+ err.span_suggestion(
+ attr.span,
+ "remove the attribute",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ }
+ }
+}
+
+fn check_incompatible_features(sess: &Session) {
+ let features = sess.features_untracked();
+
+ let declared_features = features
+ .declared_lang_features
+ .iter()
+ .copied()
+ .map(|(name, span, _)| (name, span))
+ .chain(features.declared_lib_features.iter().copied());
+
+ for (f1, f2) in rustc_feature::INCOMPATIBLE_FEATURES
+ .iter()
+ .filter(|&&(f1, f2)| features.enabled(f1) && features.enabled(f2))
+ {
+ if let Some((f1_name, f1_span)) = declared_features.clone().find(|(name, _)| name == f1) {
+ if let Some((f2_name, f2_span)) = declared_features.clone().find(|(name, _)| name == f2)
+ {
+ let spans = vec![f1_span, f2_span];
+ sess.struct_span_err(
+ spans.clone(),
+ &format!(
+ "features `{}` and `{}` are incompatible, using them at the same time \
+ is not allowed",
+ f1_name, f2_name
+ ),
+ )
+ .help("remove one of these features")
+ .emit();
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ast_passes/src/lib.rs b/compiler/rustc_ast_passes/src/lib.rs
new file mode 100644
index 000000000..9d52c3288
--- /dev/null
+++ b/compiler/rustc_ast_passes/src/lib.rs
@@ -0,0 +1,18 @@
+//! The `rustc_ast_passes` crate contains passes which validate the AST in `syntax`
+//! parsed by `rustc_parse` and then lowered, after the passes in this crate,
+//! by `rustc_ast_lowering`.
+//!
+//! The crate also contains other misc AST visitors, e.g. `node_count` and `show_span`.
+
+#![allow(rustc::potential_query_instability)]
+#![feature(box_patterns)]
+#![feature(if_let_guard)]
+#![feature(iter_is_partitioned)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![recursion_limit = "256"]
+
+pub mod ast_validation;
+pub mod feature_gate;
+pub mod node_count;
+pub mod show_span;
diff --git a/compiler/rustc_ast_passes/src/node_count.rs b/compiler/rustc_ast_passes/src/node_count.rs
new file mode 100644
index 000000000..9c7369c83
--- /dev/null
+++ b/compiler/rustc_ast_passes/src/node_count.rs
@@ -0,0 +1,135 @@
+// Simply gives a rough count of the number of nodes in an AST.
+
+use rustc_ast::visit::*;
+use rustc_ast::*;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+pub struct NodeCounter {
+ pub count: usize,
+}
+
+impl NodeCounter {
+ pub fn new() -> NodeCounter {
+ NodeCounter { count: 0 }
+ }
+}
+
+impl<'ast> Visitor<'ast> for NodeCounter {
+ fn visit_ident(&mut self, _ident: Ident) {
+ self.count += 1;
+ }
+ fn visit_foreign_item(&mut self, i: &ForeignItem) {
+ self.count += 1;
+ walk_foreign_item(self, i)
+ }
+ fn visit_item(&mut self, i: &Item) {
+ self.count += 1;
+ walk_item(self, i)
+ }
+ fn visit_local(&mut self, l: &Local) {
+ self.count += 1;
+ walk_local(self, l)
+ }
+ fn visit_block(&mut self, b: &Block) {
+ self.count += 1;
+ walk_block(self, b)
+ }
+ fn visit_stmt(&mut self, s: &Stmt) {
+ self.count += 1;
+ walk_stmt(self, s)
+ }
+ fn visit_arm(&mut self, a: &Arm) {
+ self.count += 1;
+ walk_arm(self, a)
+ }
+ fn visit_pat(&mut self, p: &Pat) {
+ self.count += 1;
+ walk_pat(self, p)
+ }
+ fn visit_expr(&mut self, ex: &Expr) {
+ self.count += 1;
+ walk_expr(self, ex)
+ }
+ fn visit_ty(&mut self, t: &Ty) {
+ self.count += 1;
+ walk_ty(self, t)
+ }
+ fn visit_generic_param(&mut self, param: &GenericParam) {
+ self.count += 1;
+ walk_generic_param(self, param)
+ }
+ fn visit_generics(&mut self, g: &Generics) {
+ self.count += 1;
+ walk_generics(self, g)
+ }
+ fn visit_fn(&mut self, fk: visit::FnKind<'_>, s: Span, _: NodeId) {
+ self.count += 1;
+ walk_fn(self, fk, s)
+ }
+ fn visit_assoc_item(&mut self, ti: &AssocItem, ctxt: AssocCtxt) {
+ self.count += 1;
+ walk_assoc_item(self, ti, ctxt);
+ }
+ fn visit_trait_ref(&mut self, t: &TraitRef) {
+ self.count += 1;
+ walk_trait_ref(self, t)
+ }
+ fn visit_param_bound(&mut self, bounds: &GenericBound, _ctxt: BoundKind) {
+ self.count += 1;
+ walk_param_bound(self, bounds)
+ }
+ fn visit_poly_trait_ref(&mut self, t: &PolyTraitRef, m: &TraitBoundModifier) {
+ self.count += 1;
+ walk_poly_trait_ref(self, t, m)
+ }
+ fn visit_variant_data(&mut self, s: &VariantData) {
+ self.count += 1;
+ walk_struct_def(self, s)
+ }
+ fn visit_field_def(&mut self, s: &FieldDef) {
+ self.count += 1;
+ walk_field_def(self, s)
+ }
+ fn visit_enum_def(
+ &mut self,
+ enum_definition: &EnumDef,
+ generics: &Generics,
+ item_id: NodeId,
+ _: Span,
+ ) {
+ self.count += 1;
+ walk_enum_def(self, enum_definition, generics, item_id)
+ }
+ fn visit_variant(&mut self, v: &Variant) {
+ self.count += 1;
+ walk_variant(self, v)
+ }
+ fn visit_lifetime(&mut self, lifetime: &Lifetime, _: visit::LifetimeCtxt) {
+ self.count += 1;
+ walk_lifetime(self, lifetime)
+ }
+ fn visit_mac_call(&mut self, mac: &MacCall) {
+ self.count += 1;
+ walk_mac(self, mac)
+ }
+ fn visit_path(&mut self, path: &Path, _id: NodeId) {
+ self.count += 1;
+ walk_path(self, path)
+ }
+ fn visit_use_tree(&mut self, use_tree: &UseTree, id: NodeId, _nested: bool) {
+ self.count += 1;
+ walk_use_tree(self, use_tree, id)
+ }
+ fn visit_generic_args(&mut self, path_span: Span, generic_args: &GenericArgs) {
+ self.count += 1;
+ walk_generic_args(self, path_span, generic_args)
+ }
+ fn visit_assoc_constraint(&mut self, constraint: &AssocConstraint) {
+ self.count += 1;
+ walk_assoc_constraint(self, constraint)
+ }
+ fn visit_attribute(&mut self, _attr: &Attribute) {
+ self.count += 1;
+ }
+}
diff --git a/compiler/rustc_ast_passes/src/show_span.rs b/compiler/rustc_ast_passes/src/show_span.rs
new file mode 100644
index 000000000..27637e311
--- /dev/null
+++ b/compiler/rustc_ast_passes/src/show_span.rs
@@ -0,0 +1,65 @@
+//! Span debugger
+//!
+//! This module shows spans for all expressions in the crate
+//! to help with compiler debugging.
+
+use std::str::FromStr;
+
+use rustc_ast as ast;
+use rustc_ast::visit;
+use rustc_ast::visit::Visitor;
+
+enum Mode {
+ Expression,
+ Pattern,
+ Type,
+}
+
+impl FromStr for Mode {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Mode, ()> {
+ let mode = match s {
+ "expr" => Mode::Expression,
+ "pat" => Mode::Pattern,
+ "ty" => Mode::Type,
+ _ => return Err(()),
+ };
+ Ok(mode)
+ }
+}
+
+struct ShowSpanVisitor<'a> {
+ span_diagnostic: &'a rustc_errors::Handler,
+ mode: Mode,
+}
+
+impl<'a> Visitor<'a> for ShowSpanVisitor<'a> {
+ fn visit_expr(&mut self, e: &'a ast::Expr) {
+ if let Mode::Expression = self.mode {
+ self.span_diagnostic.span_warn(e.span, "expression");
+ }
+ visit::walk_expr(self, e);
+ }
+
+ fn visit_pat(&mut self, p: &'a ast::Pat) {
+ if let Mode::Pattern = self.mode {
+ self.span_diagnostic.span_warn(p.span, "pattern");
+ }
+ visit::walk_pat(self, p);
+ }
+
+ fn visit_ty(&mut self, t: &'a ast::Ty) {
+ if let Mode::Type = self.mode {
+ self.span_diagnostic.span_warn(t.span, "type");
+ }
+ visit::walk_ty(self, t);
+ }
+}
+
+pub fn run(span_diagnostic: &rustc_errors::Handler, mode: &str, krate: &ast::Crate) {
+ let Ok(mode) = mode.parse() else {
+ return;
+ };
+ let mut v = ShowSpanVisitor { span_diagnostic, mode };
+ visit::walk_crate(&mut v, krate);
+}
diff --git a/compiler/rustc_ast_pretty/Cargo.toml b/compiler/rustc_ast_pretty/Cargo.toml
new file mode 100644
index 000000000..5ad8714e9
--- /dev/null
+++ b/compiler/rustc_ast_pretty/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "rustc_ast_pretty"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_span = { path = "../rustc_span" }
+rustc_ast = { path = "../rustc_ast" }
diff --git a/compiler/rustc_ast_pretty/src/helpers.rs b/compiler/rustc_ast_pretty/src/helpers.rs
new file mode 100644
index 000000000..5ec71cddf
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/helpers.rs
@@ -0,0 +1,48 @@
+use crate::pp::Printer;
+use std::borrow::Cow;
+
+impl Printer {
+ pub fn word_space<W: Into<Cow<'static, str>>>(&mut self, w: W) {
+ self.word(w);
+ self.space();
+ }
+
+ pub fn popen(&mut self) {
+ self.word("(");
+ }
+
+ pub fn pclose(&mut self) {
+ self.word(")");
+ }
+
+ pub fn hardbreak_if_not_bol(&mut self) {
+ if !self.is_beginning_of_line() {
+ self.hardbreak()
+ }
+ }
+
+ pub fn space_if_not_bol(&mut self) {
+ if !self.is_beginning_of_line() {
+ self.space();
+ }
+ }
+
+ pub fn nbsp(&mut self) {
+ self.word(" ")
+ }
+
+ pub fn word_nbsp<S: Into<Cow<'static, str>>>(&mut self, w: S) {
+ self.word(w);
+ self.nbsp()
+ }
+
+ // Synthesizes a comment that was not textually present in the original
+ // source file.
+ pub fn synth_comment(&mut self, text: impl Into<Cow<'static, str>>) {
+ self.word("/*");
+ self.space();
+ self.word(text);
+ self.space();
+ self.word("*/")
+ }
+}
diff --git a/compiler/rustc_ast_pretty/src/lib.rs b/compiler/rustc_ast_pretty/src/lib.rs
new file mode 100644
index 000000000..79178830b
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/lib.rs
@@ -0,0 +1,8 @@
+#![feature(associated_type_bounds)]
+#![feature(box_patterns)]
+#![feature(with_negative_coherence)]
+#![recursion_limit = "256"]
+
+mod helpers;
+pub mod pp;
+pub mod pprust;
diff --git a/compiler/rustc_ast_pretty/src/pp.rs b/compiler/rustc_ast_pretty/src/pp.rs
new file mode 100644
index 000000000..c93022308
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pp.rs
@@ -0,0 +1,451 @@
+//! This pretty-printer is a direct reimplementation of Philip Karlton's
+//! Mesa pretty-printer, as described in the appendix to
+//! Derek C. Oppen, "Pretty Printing" (1979),
+//! Stanford Computer Science Department STAN-CS-79-770,
+//! <http://i.stanford.edu/pub/cstr/reports/cs/tr/79/770/CS-TR-79-770.pdf>.
+//!
+//! The algorithm's aim is to break a stream into as few lines as possible
+//! while respecting the indentation-consistency requirements of the enclosing
+//! block, and avoiding breaking at silly places on block boundaries, for
+//! example, between "x" and ")" in "x)".
+//!
+//! I am implementing this algorithm because it comes with 20 pages of
+//! documentation explaining its theory, and because it addresses the set of
+//! concerns I've seen other pretty-printers fall down on. Weirdly. Even though
+//! it's 32 years old. What can I say?
+//!
+//! Despite some redundancies and quirks in the way it's implemented in that
+//! paper, I've opted to keep the implementation here as similar as I can,
+//! changing only what was blatantly wrong, a typo, or sufficiently
+//! non-idiomatic rust that it really stuck out.
+//!
+//! In particular you'll see a certain amount of churn related to INTEGER vs.
+//! CARDINAL in the Mesa implementation. Mesa apparently interconverts the two
+//! somewhat readily? In any case, I've used usize for indices-in-buffers and
+//! ints for character-sizes-and-indentation-offsets. This respects the need
+//! for ints to "go negative" while carrying a pending-calculation balance, and
+//! helps differentiate all the numbers flying around internally (slightly).
+//!
+//! I also inverted the indentation arithmetic used in the print stack, since
+//! the Mesa implementation (somewhat randomly) stores the offset on the print
+//! stack in terms of margin-col rather than col itself. I store col.
+//!
+//! I also implemented a small change in the String token, in that I store an
+//! explicit length for the string. For most tokens this is just the length of
+//! the accompanying string. But it's necessary to permit it to differ, for
+//! encoding things that are supposed to "go on their own line" -- certain
+//! classes of comment and blank-line -- where relying on adjacent
+//! hardbreak-like Break tokens with long blankness indication doesn't actually
+//! work. To see why, consider when there is a "thing that should be on its own
+//! line" between two long blocks, say functions. If you put a hardbreak after
+//! each function (or before each) and the breaking algorithm decides to break
+//! there anyways (because the functions themselves are long) you wind up with
+//! extra blank lines. If you don't put hardbreaks you can wind up with the
+//! "thing which should be on its own line" not getting its own line in the
+//! rare case of "really small functions" or such. This re-occurs with comments
+//! and explicit blank lines. So in those cases we use a string with a payload
+//! we want isolated to a line and an explicit length that's huge, surrounded
+//! by two zero-length breaks. The algorithm will try its best to fit it on a
+//! line (which it can't) and so naturally place the content on its own line to
+//! avoid combining it with other lines and making matters even worse.
+//!
+//! # Explanation
+//!
+//! In case you do not have the paper, here is an explanation of what's going
+//! on.
+//!
+//! There is a stream of input tokens flowing through this printer.
+//!
+//! The printer buffers up to 3N tokens inside itself, where N is linewidth.
+//! Yes, linewidth is chars and tokens are multi-char, but in the worst
+//! case every token worth buffering is 1 char long, so it's ok.
+//!
+//! Tokens are String, Break, and Begin/End to delimit blocks.
+//!
+//! Begin tokens can carry an offset, saying "how far to indent when you break
+//! inside here", as well as a flag indicating "consistent" or "inconsistent"
+//! breaking. Consistent breaking means that after the first break, no attempt
+//! will be made to flow subsequent breaks together onto lines. Inconsistent
+//! is the opposite. Inconsistent breaking example would be, say:
+//!
+//! ```ignore (illustrative)
+//! foo(hello, there, good, friends)
+//! ```
+//!
+//! breaking inconsistently to become
+//!
+//! ```ignore (illustrative)
+//! foo(hello, there,
+//! good, friends);
+//! ```
+//!
+//! whereas a consistent breaking would yield:
+//!
+//! ```ignore (illustrative)
+//! foo(hello,
+//! there,
+//! good,
+//! friends);
+//! ```
+//!
+//! That is, in the consistent-break blocks we value vertical alignment
+//! more than the ability to cram stuff onto a line. But in all cases if it
+//! can make a block a one-liner, it'll do so.
+//!
+//! Carrying on with high-level logic:
+//!
+//! The buffered tokens go through a ring-buffer, 'tokens'. The 'left' and
+//! 'right' indices denote the active portion of the ring buffer as well as
+//! describing hypothetical points-in-the-infinite-stream at most 3N tokens
+//! apart (i.e., "not wrapped to ring-buffer boundaries"). The paper will switch
+//! between using 'left' and 'right' terms to denote the wrapped-to-ring-buffer
+//! and point-in-infinite-stream senses freely.
+//!
+//! There is a parallel ring buffer, `size`, that holds the calculated size of
+//! each token. Why calculated? Because for Begin/End pairs, the "size"
+//! includes everything between the pair. That is, the "size" of Begin is
+//! actually the sum of the sizes of everything between Begin and the paired
+//! End that follows. Since that is arbitrarily far in the future, `size` is
+//! being rewritten regularly while the printer runs; in fact most of the
+//! machinery is here to work out `size` entries on the fly (and give up when
+//! they're so obviously over-long that "infinity" is a good enough
+//! approximation for purposes of line breaking).
+//!
+//! The "input side" of the printer is managed as an abstract process called
+//! SCAN, which uses `scan_stack`, to manage calculating `size`. SCAN is, in
+//! other words, the process of calculating 'size' entries.
+//!
+//! The "output side" of the printer is managed by an abstract process called
+//! PRINT, which uses `print_stack`, `margin` and `space` to figure out what to
+//! do with each token/size pair it consumes as it goes. It's trying to consume
+//! the entire buffered window, but can't output anything until the size is >=
+//! 0 (sizes are set to negative while they're pending calculation).
+//!
+//! So SCAN takes input and buffers tokens and pending calculations, while
+//! PRINT gobbles up completed calculations and tokens from the buffer. The
+//! theory is that the two can never get more than 3N tokens apart, because
+//! once there's "obviously" too much data to fit on a line, in a size
+//! calculation, SCAN will write "infinity" to the size and let PRINT consume
+//! it.
+//!
+//! In this implementation (following the paper, again) the SCAN process is the
+//! methods called `Printer::scan_*`, and the 'PRINT' process is the
+//! method called `Printer::print`.
+
+mod convenience;
+mod ring;
+
+use ring::RingBuffer;
+use std::borrow::Cow;
+use std::cmp;
+use std::collections::VecDeque;
+use std::iter;
+
+/// How to break. Described in more detail in the module docs.
+#[derive(Clone, Copy, PartialEq)]
+pub enum Breaks {
+ Consistent,
+ Inconsistent,
+}
+
+#[derive(Clone, Copy, PartialEq)]
+enum IndentStyle {
+ /// Vertically aligned under whatever column this block begins at.
+ ///
+ /// fn demo(arg1: usize,
+ /// arg2: usize) {}
+ Visual,
+ /// Indented relative to the indentation level of the previous line.
+ ///
+ /// fn demo(
+ /// arg1: usize,
+ /// arg2: usize,
+ /// ) {}
+ Block { offset: isize },
+}
+
+#[derive(Clone, Copy, Default, PartialEq)]
+pub struct BreakToken {
+ offset: isize,
+ blank_space: isize,
+ pre_break: Option<char>,
+}
+
+#[derive(Clone, Copy, PartialEq)]
+pub struct BeginToken {
+ indent: IndentStyle,
+ breaks: Breaks,
+}
+
+#[derive(Clone, PartialEq)]
+pub enum Token {
+ // In practice a string token contains either a `&'static str` or a
+ // `String`. `Cow` is overkill for this because we never modify the data,
+ // but it's more convenient than rolling our own more specialized type.
+ String(Cow<'static, str>),
+ Break(BreakToken),
+ Begin(BeginToken),
+ End,
+}
+
+#[derive(Copy, Clone)]
+enum PrintFrame {
+ Fits,
+ Broken { indent: usize, breaks: Breaks },
+}
+
+const SIZE_INFINITY: isize = 0xffff;
+
+/// Target line width.
+const MARGIN: isize = 78;
+/// Every line is allowed at least this much space, even if highly indented.
+const MIN_SPACE: isize = 60;
+
+pub struct Printer {
+ out: String,
+ /// Number of spaces left on line
+ space: isize,
+ /// Ring-buffer of tokens and calculated sizes
+ buf: RingBuffer<BufEntry>,
+ /// Running size of stream "...left"
+ left_total: isize,
+ /// Running size of stream "...right"
+ right_total: isize,
+ /// Pseudo-stack, really a ring too. Holds the
+ /// primary-ring-buffers index of the Begin that started the
+ /// current block, possibly with the most recent Break after that
+ /// Begin (if there is any) on top of it. Stuff is flushed off the
+ /// bottom as it becomes irrelevant due to the primary ring-buffer
+ /// advancing.
+ scan_stack: VecDeque<usize>,
+ /// Stack of blocks-in-progress being flushed by print
+ print_stack: Vec<PrintFrame>,
+ /// Level of indentation of current line
+ indent: usize,
+ /// Buffered indentation to avoid writing trailing whitespace
+ pending_indentation: isize,
+ /// The token most recently popped from the left boundary of the
+ /// ring-buffer for printing
+ last_printed: Option<Token>,
+}
+
+#[derive(Clone)]
+struct BufEntry {
+ token: Token,
+ size: isize,
+}
+
+impl Printer {
+ pub fn new() -> Self {
+ Printer {
+ out: String::new(),
+ space: MARGIN,
+ buf: RingBuffer::new(),
+ left_total: 0,
+ right_total: 0,
+ scan_stack: VecDeque::new(),
+ print_stack: Vec::new(),
+ indent: 0,
+ pending_indentation: 0,
+ last_printed: None,
+ }
+ }
+
+ pub fn last_token(&self) -> Option<&Token> {
+ self.last_token_still_buffered().or_else(|| self.last_printed.as_ref())
+ }
+
+ pub fn last_token_still_buffered(&self) -> Option<&Token> {
+ self.buf.last().map(|last| &last.token)
+ }
+
+ /// Be very careful with this!
+ pub fn replace_last_token_still_buffered(&mut self, token: Token) {
+ self.buf.last_mut().unwrap().token = token;
+ }
+
+ fn scan_eof(&mut self) {
+ if !self.scan_stack.is_empty() {
+ self.check_stack(0);
+ self.advance_left();
+ }
+ }
+
+ fn scan_begin(&mut self, token: BeginToken) {
+ if self.scan_stack.is_empty() {
+ self.left_total = 1;
+ self.right_total = 1;
+ self.buf.clear();
+ }
+ let right = self.buf.push(BufEntry { token: Token::Begin(token), size: -self.right_total });
+ self.scan_stack.push_back(right);
+ }
+
+ fn scan_end(&mut self) {
+ if self.scan_stack.is_empty() {
+ self.print_end();
+ } else {
+ let right = self.buf.push(BufEntry { token: Token::End, size: -1 });
+ self.scan_stack.push_back(right);
+ }
+ }
+
+ fn scan_break(&mut self, token: BreakToken) {
+ if self.scan_stack.is_empty() {
+ self.left_total = 1;
+ self.right_total = 1;
+ self.buf.clear();
+ } else {
+ self.check_stack(0);
+ }
+ let right = self.buf.push(BufEntry { token: Token::Break(token), size: -self.right_total });
+ self.scan_stack.push_back(right);
+ self.right_total += token.blank_space;
+ }
+
+ fn scan_string(&mut self, string: Cow<'static, str>) {
+ if self.scan_stack.is_empty() {
+ self.print_string(&string);
+ } else {
+ let len = string.len() as isize;
+ self.buf.push(BufEntry { token: Token::String(string), size: len });
+ self.right_total += len;
+ self.check_stream();
+ }
+ }
+
+ pub fn offset(&mut self, offset: isize) {
+ if let Some(BufEntry { token: Token::Break(token), .. }) = &mut self.buf.last_mut() {
+ token.offset += offset;
+ }
+ }
+
+ fn check_stream(&mut self) {
+ while self.right_total - self.left_total > self.space {
+ if *self.scan_stack.front().unwrap() == self.buf.index_of_first() {
+ self.scan_stack.pop_front().unwrap();
+ self.buf.first_mut().unwrap().size = SIZE_INFINITY;
+ }
+ self.advance_left();
+ if self.buf.is_empty() {
+ break;
+ }
+ }
+ }
+
+ fn advance_left(&mut self) {
+ while self.buf.first().unwrap().size >= 0 {
+ let left = self.buf.pop_first().unwrap();
+
+ match &left.token {
+ Token::String(string) => {
+ self.left_total += string.len() as isize;
+ self.print_string(string);
+ }
+ Token::Break(token) => {
+ self.left_total += token.blank_space;
+ self.print_break(*token, left.size);
+ }
+ Token::Begin(token) => self.print_begin(*token, left.size),
+ Token::End => self.print_end(),
+ }
+
+ self.last_printed = Some(left.token);
+
+ if self.buf.is_empty() {
+ break;
+ }
+ }
+ }
+
+ fn check_stack(&mut self, mut depth: usize) {
+ while let Some(&index) = self.scan_stack.back() {
+ let mut entry = &mut self.buf[index];
+ match entry.token {
+ Token::Begin(_) => {
+ if depth == 0 {
+ break;
+ }
+ self.scan_stack.pop_back().unwrap();
+ entry.size += self.right_total;
+ depth -= 1;
+ }
+ Token::End => {
+ // paper says + not =, but that makes no sense.
+ self.scan_stack.pop_back().unwrap();
+ entry.size = 1;
+ depth += 1;
+ }
+ _ => {
+ self.scan_stack.pop_back().unwrap();
+ entry.size += self.right_total;
+ if depth == 0 {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ fn get_top(&self) -> PrintFrame {
+ *self
+ .print_stack
+ .last()
+ .unwrap_or(&PrintFrame::Broken { indent: 0, breaks: Breaks::Inconsistent })
+ }
+
+ fn print_begin(&mut self, token: BeginToken, size: isize) {
+ if size > self.space {
+ self.print_stack.push(PrintFrame::Broken { indent: self.indent, breaks: token.breaks });
+ self.indent = match token.indent {
+ IndentStyle::Block { offset } => {
+ usize::try_from(self.indent as isize + offset).unwrap()
+ }
+ IndentStyle::Visual => (MARGIN - self.space) as usize,
+ };
+ } else {
+ self.print_stack.push(PrintFrame::Fits);
+ }
+ }
+
+ fn print_end(&mut self) {
+ if let PrintFrame::Broken { indent, .. } = self.print_stack.pop().unwrap() {
+ self.indent = indent;
+ }
+ }
+
+ fn print_break(&mut self, token: BreakToken, size: isize) {
+ let fits = match self.get_top() {
+ PrintFrame::Fits => true,
+ PrintFrame::Broken { breaks: Breaks::Consistent, .. } => false,
+ PrintFrame::Broken { breaks: Breaks::Inconsistent, .. } => size <= self.space,
+ };
+ if fits {
+ self.pending_indentation += token.blank_space;
+ self.space -= token.blank_space;
+ } else {
+ if let Some(pre_break) = token.pre_break {
+ self.out.push(pre_break);
+ }
+ self.out.push('\n');
+ let indent = self.indent as isize + token.offset;
+ self.pending_indentation = indent;
+ self.space = cmp::max(MARGIN - indent, MIN_SPACE);
+ }
+ }
+
+ fn print_string(&mut self, string: &str) {
+ // Write the pending indent. A more concise way of doing this would be:
+ //
+ // write!(self.out, "{: >n$}", "", n = self.pending_indentation as usize)?;
+ //
+ // But that is significantly slower. This code is sufficiently hot, and indents can get
+ // sufficiently large, that the difference is significant on some workloads.
+ self.out.reserve(self.pending_indentation as usize);
+ self.out.extend(iter::repeat(' ').take(self.pending_indentation as usize));
+ self.pending_indentation = 0;
+
+ self.out.push_str(string);
+ self.space -= string.len() as isize;
+ }
+}
diff --git a/compiler/rustc_ast_pretty/src/pp/convenience.rs b/compiler/rustc_ast_pretty/src/pp/convenience.rs
new file mode 100644
index 000000000..93310dd45
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pp/convenience.rs
@@ -0,0 +1,94 @@
+use crate::pp::{BeginToken, BreakToken, Breaks, IndentStyle, Printer, Token, SIZE_INFINITY};
+use std::borrow::Cow;
+
+impl Printer {
+ /// "raw box"
+ pub fn rbox(&mut self, indent: isize, breaks: Breaks) {
+ self.scan_begin(BeginToken { indent: IndentStyle::Block { offset: indent }, breaks })
+ }
+
+ /// Inconsistent breaking box
+ pub fn ibox(&mut self, indent: isize) {
+ self.rbox(indent, Breaks::Inconsistent)
+ }
+
+ /// Consistent breaking box
+ pub fn cbox(&mut self, indent: isize) {
+ self.rbox(indent, Breaks::Consistent)
+ }
+
+ pub fn visual_align(&mut self) {
+ self.scan_begin(BeginToken { indent: IndentStyle::Visual, breaks: Breaks::Consistent });
+ }
+
+ pub fn break_offset(&mut self, n: usize, off: isize) {
+ self.scan_break(BreakToken {
+ offset: off,
+ blank_space: n as isize,
+ ..BreakToken::default()
+ });
+ }
+
+ pub fn end(&mut self) {
+ self.scan_end()
+ }
+
+ pub fn eof(mut self) -> String {
+ self.scan_eof();
+ self.out
+ }
+
+ pub fn word<S: Into<Cow<'static, str>>>(&mut self, wrd: S) {
+ let string = wrd.into();
+ self.scan_string(string)
+ }
+
+ fn spaces(&mut self, n: usize) {
+ self.break_offset(n, 0)
+ }
+
+ pub fn zerobreak(&mut self) {
+ self.spaces(0)
+ }
+
+ pub fn space(&mut self) {
+ self.spaces(1)
+ }
+
+ pub fn hardbreak(&mut self) {
+ self.spaces(SIZE_INFINITY as usize)
+ }
+
+ pub fn is_beginning_of_line(&self) -> bool {
+ match self.last_token() {
+ Some(last_token) => last_token.is_hardbreak_tok(),
+ None => true,
+ }
+ }
+
+ pub fn hardbreak_tok_offset(off: isize) -> Token {
+ Token::Break(BreakToken {
+ offset: off,
+ blank_space: SIZE_INFINITY,
+ ..BreakToken::default()
+ })
+ }
+
+ pub fn trailing_comma(&mut self) {
+ self.scan_break(BreakToken { pre_break: Some(','), ..BreakToken::default() });
+ }
+
+ pub fn trailing_comma_or_space(&mut self) {
+ self.scan_break(BreakToken {
+ blank_space: 1,
+ pre_break: Some(','),
+ ..BreakToken::default()
+ });
+ }
+}
+
+impl Token {
+ pub fn is_hardbreak_tok(&self) -> bool {
+ *self == Printer::hardbreak_tok_offset(0)
+ }
+}
diff --git a/compiler/rustc_ast_pretty/src/pp/ring.rs b/compiler/rustc_ast_pretty/src/pp/ring.rs
new file mode 100644
index 000000000..8187394fe
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pp/ring.rs
@@ -0,0 +1,77 @@
+use std::collections::VecDeque;
+use std::ops::{Index, IndexMut};
+
+/// A view onto a finite range of an infinitely long sequence of T.
+///
+/// The Ts are indexed 0..infinity. A RingBuffer begins as a view of elements
+/// 0..0 (i.e. nothing). The user of the RingBuffer advances its left and right
+/// position independently, although only in the positive direction, and only
+/// with left <= right at all times.
+///
+/// Holding a RingBuffer whose view is elements left..right gives the ability to
+/// use Index and IndexMut to access elements i in the infinitely long queue for
+/// which left <= i < right.
+pub struct RingBuffer<T> {
+ data: VecDeque<T>,
+ // Abstract index of data[0] in the infinitely sized queue.
+ offset: usize,
+}
+
+impl<T> RingBuffer<T> {
+ pub fn new() -> Self {
+ RingBuffer { data: VecDeque::new(), offset: 0 }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.data.is_empty()
+ }
+
+ pub fn push(&mut self, value: T) -> usize {
+ let index = self.offset + self.data.len();
+ self.data.push_back(value);
+ index
+ }
+
+ pub fn clear(&mut self) {
+ self.data.clear();
+ }
+
+ pub fn index_of_first(&self) -> usize {
+ self.offset
+ }
+
+ pub fn first(&self) -> Option<&T> {
+ self.data.front()
+ }
+
+ pub fn first_mut(&mut self) -> Option<&mut T> {
+ self.data.front_mut()
+ }
+
+ pub fn pop_first(&mut self) -> Option<T> {
+ let first = self.data.pop_front()?;
+ self.offset += 1;
+ Some(first)
+ }
+
+ pub fn last(&self) -> Option<&T> {
+ self.data.back()
+ }
+
+ pub fn last_mut(&mut self) -> Option<&mut T> {
+ self.data.back_mut()
+ }
+}
+
+impl<T> Index<usize> for RingBuffer<T> {
+ type Output = T;
+ fn index(&self, index: usize) -> &Self::Output {
+ &self.data[index.checked_sub(self.offset).unwrap()]
+ }
+}
+
+impl<T> IndexMut<usize> for RingBuffer<T> {
+ fn index_mut(&mut self, index: usize) -> &mut Self::Output {
+ &mut self.data[index.checked_sub(self.offset).unwrap()]
+ }
+}
diff --git a/compiler/rustc_ast_pretty/src/pprust/mod.rs b/compiler/rustc_ast_pretty/src/pprust/mod.rs
new file mode 100644
index 000000000..ac9e7d06c
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pprust/mod.rs
@@ -0,0 +1,86 @@
+#[cfg(test)]
+mod tests;
+
+pub mod state;
+pub use state::{print_crate, AnnNode, Comments, PpAnn, PrintState, State};
+
+use rustc_ast as ast;
+use rustc_ast::token::{Nonterminal, Token, TokenKind};
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
+
+use std::borrow::Cow;
+
+pub fn nonterminal_to_string(nt: &Nonterminal) -> String {
+ State::new().nonterminal_to_string(nt)
+}
+
+/// Print the token kind precisely, without converting `$crate` into its respective crate name.
+pub fn token_kind_to_string(tok: &TokenKind) -> Cow<'static, str> {
+ State::new().token_kind_to_string(tok)
+}
+
+/// Print the token precisely, without converting `$crate` into its respective crate name.
+pub fn token_to_string(token: &Token) -> Cow<'static, str> {
+ State::new().token_to_string(token)
+}
+
+pub fn ty_to_string(ty: &ast::Ty) -> String {
+ State::new().ty_to_string(ty)
+}
+
+pub fn bounds_to_string(bounds: &[ast::GenericBound]) -> String {
+ State::new().bounds_to_string(bounds)
+}
+
+pub fn pat_to_string(pat: &ast::Pat) -> String {
+ State::new().pat_to_string(pat)
+}
+
+pub fn expr_to_string(e: &ast::Expr) -> String {
+ State::new().expr_to_string(e)
+}
+
+pub fn tt_to_string(tt: &TokenTree) -> String {
+ State::new().tt_to_string(tt)
+}
+
+pub fn tts_to_string(tokens: &TokenStream) -> String {
+ State::new().tts_to_string(tokens)
+}
+
+pub fn item_to_string(i: &ast::Item) -> String {
+ State::new().item_to_string(i)
+}
+
+pub fn path_to_string(p: &ast::Path) -> String {
+ State::new().path_to_string(p)
+}
+
+pub fn path_segment_to_string(p: &ast::PathSegment) -> String {
+ State::new().path_segment_to_string(p)
+}
+
+pub fn vis_to_string(v: &ast::Visibility) -> String {
+ State::new().vis_to_string(v)
+}
+
+pub fn meta_list_item_to_string(li: &ast::NestedMetaItem) -> String {
+ State::new().meta_list_item_to_string(li)
+}
+
+pub fn attribute_to_string(attr: &ast::Attribute) -> String {
+ State::new().attribute_to_string(attr)
+}
+
+pub fn to_string(f: impl FnOnce(&mut State<'_>)) -> String {
+ State::to_string(f)
+}
+
+pub fn crate_to_string_for_macros(krate: &ast::Crate) -> String {
+ State::to_string(|s| {
+ s.print_inner_attributes(&krate.attrs);
+ for item in &krate.items {
+ s.print_item(item);
+ }
+ })
+}
diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs
new file mode 100644
index 000000000..5eb7bf634
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pprust/state.rs
@@ -0,0 +1,1770 @@
+mod delimited;
+mod expr;
+mod item;
+
+use crate::pp::Breaks::{Consistent, Inconsistent};
+use crate::pp::{self, Breaks};
+
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, BinOpToken, CommentKind, Delimiter, Nonterminal, Token, TokenKind};
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
+use rustc_ast::util::classify;
+use rustc_ast::util::comments::{gather_comments, Comment, CommentStyle};
+use rustc_ast::util::parser;
+use rustc_ast::{self as ast, BlockCheckMode, PatKind, RangeEnd, RangeSyntax};
+use rustc_ast::{attr, Term};
+use rustc_ast::{GenericArg, MacArgs, MacArgsEq};
+use rustc_ast::{GenericBound, SelfKind, TraitBoundModifier};
+use rustc_ast::{InlineAsmOperand, InlineAsmRegOrRegClass};
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_span::edition::Edition;
+use rustc_span::source_map::{SourceMap, Spanned};
+use rustc_span::symbol::{kw, sym, Ident, IdentPrinter, Symbol};
+use rustc_span::{BytePos, FileName, Span};
+
+use std::borrow::Cow;
+
+pub use self::delimited::IterDelimited;
+
+pub enum MacHeader<'a> {
+ Path(&'a ast::Path),
+ Keyword(&'static str),
+}
+
+pub enum AnnNode<'a> {
+ Ident(&'a Ident),
+ Name(&'a Symbol),
+ Block(&'a ast::Block),
+ Item(&'a ast::Item),
+ SubItem(ast::NodeId),
+ Expr(&'a ast::Expr),
+ Pat(&'a ast::Pat),
+ Crate(&'a ast::Crate),
+}
+
+pub trait PpAnn {
+ fn pre(&self, _state: &mut State<'_>, _node: AnnNode<'_>) {}
+ fn post(&self, _state: &mut State<'_>, _node: AnnNode<'_>) {}
+}
+
+#[derive(Copy, Clone)]
+pub struct NoAnn;
+
+impl PpAnn for NoAnn {}
+
+pub struct Comments<'a> {
+ sm: &'a SourceMap,
+ comments: Vec<Comment>,
+ current: usize,
+}
+
+impl<'a> Comments<'a> {
+ pub fn new(sm: &'a SourceMap, filename: FileName, input: String) -> Comments<'a> {
+ let comments = gather_comments(sm, filename, input);
+ Comments { sm, comments, current: 0 }
+ }
+
+ pub fn next(&self) -> Option<Comment> {
+ self.comments.get(self.current).cloned()
+ }
+
+ pub fn trailing_comment(
+ &self,
+ span: rustc_span::Span,
+ next_pos: Option<BytePos>,
+ ) -> Option<Comment> {
+ if let Some(cmnt) = self.next() {
+ if cmnt.style != CommentStyle::Trailing {
+ return None;
+ }
+ let span_line = self.sm.lookup_char_pos(span.hi());
+ let comment_line = self.sm.lookup_char_pos(cmnt.pos);
+ let next = next_pos.unwrap_or_else(|| cmnt.pos + BytePos(1));
+ if span.hi() < cmnt.pos && cmnt.pos < next && span_line.line == comment_line.line {
+ return Some(cmnt);
+ }
+ }
+
+ None
+ }
+}
+
+pub struct State<'a> {
+ pub s: pp::Printer,
+ comments: Option<Comments<'a>>,
+ ann: &'a (dyn PpAnn + 'a),
+}
+
+pub(crate) const INDENT_UNIT: isize = 4;
+
+/// Requires you to pass an input filename and reader so that
+/// it can scan the input text for comments to copy forward.
+pub fn print_crate<'a>(
+ sm: &'a SourceMap,
+ krate: &ast::Crate,
+ filename: FileName,
+ input: String,
+ ann: &'a dyn PpAnn,
+ is_expanded: bool,
+ edition: Edition,
+) -> String {
+ let mut s =
+ State { s: pp::Printer::new(), comments: Some(Comments::new(sm, filename, input)), ann };
+
+ if is_expanded && !krate.attrs.iter().any(|attr| attr.has_name(sym::no_core)) {
+ // We need to print `#![no_std]` (and its feature gate) so that
+ // compiling pretty-printed source won't inject libstd again.
+ // However, we don't want these attributes in the AST because
+ // of the feature gate, so we fake them up here.
+
+ // `#![feature(prelude_import)]`
+ let pi_nested = attr::mk_nested_word_item(Ident::with_dummy_span(sym::prelude_import));
+ let list = attr::mk_list_item(Ident::with_dummy_span(sym::feature), vec![pi_nested]);
+ let fake_attr = attr::mk_attr_inner(list);
+ s.print_attribute(&fake_attr);
+
+ // Currently, in Rust 2018 we don't have `extern crate std;` at the crate
+ // root, so this is not needed, and actually breaks things.
+ if edition == Edition::Edition2015 {
+ // `#![no_std]`
+ let no_std_meta = attr::mk_word_item(Ident::with_dummy_span(sym::no_std));
+ let fake_attr = attr::mk_attr_inner(no_std_meta);
+ s.print_attribute(&fake_attr);
+ }
+ }
+
+ s.print_inner_attributes(&krate.attrs);
+ for item in &krate.items {
+ s.print_item(item);
+ }
+ s.print_remaining_comments();
+ s.ann.post(&mut s, AnnNode::Crate(krate));
+ s.s.eof()
+}
+
+/// This makes printed token streams look slightly nicer,
+/// and also addresses some specific regressions described in #63896 and #73345.
+fn tt_prepend_space(tt: &TokenTree, prev: &TokenTree) -> bool {
+ if let TokenTree::Token(token, _) = prev {
+ if matches!(token.kind, token::Dot | token::Dollar) {
+ return false;
+ }
+ if let token::DocComment(comment_kind, ..) = token.kind {
+ return comment_kind != CommentKind::Line;
+ }
+ }
+ match tt {
+ TokenTree::Token(token, _) => !matches!(token.kind, token::Comma | token::Not | token::Dot),
+ TokenTree::Delimited(_, Delimiter::Parenthesis, _) => {
+ !matches!(prev, TokenTree::Token(Token { kind: token::Ident(..), .. }, _))
+ }
+ TokenTree::Delimited(_, Delimiter::Bracket, _) => {
+ !matches!(prev, TokenTree::Token(Token { kind: token::Pound, .. }, _))
+ }
+ TokenTree::Delimited(..) => true,
+ }
+}
+
+fn binop_to_string(op: BinOpToken) -> &'static str {
+ match op {
+ token::Plus => "+",
+ token::Minus => "-",
+ token::Star => "*",
+ token::Slash => "/",
+ token::Percent => "%",
+ token::Caret => "^",
+ token::And => "&",
+ token::Or => "|",
+ token::Shl => "<<",
+ token::Shr => ">>",
+ }
+}
+
+fn doc_comment_to_string(
+ comment_kind: CommentKind,
+ attr_style: ast::AttrStyle,
+ data: Symbol,
+) -> String {
+ match (comment_kind, attr_style) {
+ (CommentKind::Line, ast::AttrStyle::Outer) => format!("///{}", data),
+ (CommentKind::Line, ast::AttrStyle::Inner) => format!("//!{}", data),
+ (CommentKind::Block, ast::AttrStyle::Outer) => format!("/**{}*/", data),
+ (CommentKind::Block, ast::AttrStyle::Inner) => format!("/*!{}*/", data),
+ }
+}
+
+pub fn literal_to_string(lit: token::Lit) -> String {
+ let token::Lit { kind, symbol, suffix } = lit;
+ let mut out = match kind {
+ token::Byte => format!("b'{}'", symbol),
+ token::Char => format!("'{}'", symbol),
+ token::Str => format!("\"{}\"", symbol),
+ token::StrRaw(n) => {
+ format!("r{delim}\"{string}\"{delim}", delim = "#".repeat(n as usize), string = symbol)
+ }
+ token::ByteStr => format!("b\"{}\"", symbol),
+ token::ByteStrRaw(n) => {
+ format!("br{delim}\"{string}\"{delim}", delim = "#".repeat(n as usize), string = symbol)
+ }
+ token::Integer | token::Float | token::Bool | token::Err => symbol.to_string(),
+ };
+
+ if let Some(suffix) = suffix {
+ out.push_str(suffix.as_str())
+ }
+
+ out
+}
+
+impl std::ops::Deref for State<'_> {
+ type Target = pp::Printer;
+ fn deref(&self) -> &Self::Target {
+ &self.s
+ }
+}
+
+impl std::ops::DerefMut for State<'_> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.s
+ }
+}
+
+pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::DerefMut {
+ fn comments(&mut self) -> &mut Option<Comments<'a>>;
+ fn print_ident(&mut self, ident: Ident);
+ fn print_generic_args(&mut self, args: &ast::GenericArgs, colons_before_params: bool);
+
+ fn strsep<T, F>(
+ &mut self,
+ sep: &'static str,
+ space_before: bool,
+ b: Breaks,
+ elts: &[T],
+ mut op: F,
+ ) where
+ F: FnMut(&mut Self, &T),
+ {
+ self.rbox(0, b);
+ if let Some((first, rest)) = elts.split_first() {
+ op(self, first);
+ for elt in rest {
+ if space_before {
+ self.space();
+ }
+ self.word_space(sep);
+ op(self, elt);
+ }
+ }
+ self.end();
+ }
+
+ fn commasep<T, F>(&mut self, b: Breaks, elts: &[T], op: F)
+ where
+ F: FnMut(&mut Self, &T),
+ {
+ self.strsep(",", false, b, elts, op)
+ }
+
+ fn maybe_print_comment(&mut self, pos: BytePos) -> bool {
+ let mut has_comment = false;
+ while let Some(ref cmnt) = self.next_comment() {
+ if cmnt.pos < pos {
+ has_comment = true;
+ self.print_comment(cmnt);
+ } else {
+ break;
+ }
+ }
+ has_comment
+ }
+
+ fn print_comment(&mut self, cmnt: &Comment) {
+ match cmnt.style {
+ CommentStyle::Mixed => {
+ if !self.is_beginning_of_line() {
+ self.zerobreak();
+ }
+ if let Some((last, lines)) = cmnt.lines.split_last() {
+ self.ibox(0);
+
+ for line in lines {
+ self.word(line.clone());
+ self.hardbreak()
+ }
+
+ self.word(last.clone());
+ self.space();
+
+ self.end();
+ }
+ self.zerobreak()
+ }
+ CommentStyle::Isolated => {
+ self.hardbreak_if_not_bol();
+ for line in &cmnt.lines {
+ // Don't print empty lines because they will end up as trailing
+ // whitespace.
+ if !line.is_empty() {
+ self.word(line.clone());
+ }
+ self.hardbreak();
+ }
+ }
+ CommentStyle::Trailing => {
+ if !self.is_beginning_of_line() {
+ self.word(" ");
+ }
+ if cmnt.lines.len() == 1 {
+ self.word(cmnt.lines[0].clone());
+ self.hardbreak()
+ } else {
+ self.visual_align();
+ for line in &cmnt.lines {
+ if !line.is_empty() {
+ self.word(line.clone());
+ }
+ self.hardbreak();
+ }
+ self.end();
+ }
+ }
+ CommentStyle::BlankLine => {
+ // We need to do at least one, possibly two hardbreaks.
+ let twice = match self.last_token() {
+ Some(pp::Token::String(s)) => ";" == s,
+ Some(pp::Token::Begin(_)) => true,
+ Some(pp::Token::End) => true,
+ _ => false,
+ };
+ if twice {
+ self.hardbreak();
+ }
+ self.hardbreak();
+ }
+ }
+ if let Some(cmnts) = self.comments() {
+ cmnts.current += 1;
+ }
+ }
+
+ fn next_comment(&mut self) -> Option<Comment> {
+ self.comments().as_mut().and_then(|c| c.next())
+ }
+
+ fn maybe_print_trailing_comment(&mut self, span: rustc_span::Span, next_pos: Option<BytePos>) {
+ if let Some(cmnts) = self.comments() {
+ if let Some(cmnt) = cmnts.trailing_comment(span, next_pos) {
+ self.print_comment(&cmnt);
+ }
+ }
+ }
+
+ fn print_remaining_comments(&mut self) {
+ // If there aren't any remaining comments, then we need to manually
+ // make sure there is a line break at the end.
+ if self.next_comment().is_none() {
+ self.hardbreak();
+ }
+ while let Some(ref cmnt) = self.next_comment() {
+ self.print_comment(cmnt)
+ }
+ }
+
+ fn print_literal(&mut self, lit: &ast::Lit) {
+ self.maybe_print_comment(lit.span.lo());
+ self.word(lit.token.to_string())
+ }
+
+ fn print_string(&mut self, st: &str, style: ast::StrStyle) {
+ let st = match style {
+ ast::StrStyle::Cooked => format!("\"{}\"", st.escape_debug()),
+ ast::StrStyle::Raw(n) => {
+ format!("r{delim}\"{string}\"{delim}", delim = "#".repeat(n as usize), string = st)
+ }
+ };
+ self.word(st)
+ }
+
+ fn print_symbol(&mut self, sym: Symbol, style: ast::StrStyle) {
+ self.print_string(sym.as_str(), style);
+ }
+
+ fn print_inner_attributes(&mut self, attrs: &[ast::Attribute]) -> bool {
+ self.print_either_attributes(attrs, ast::AttrStyle::Inner, false, true)
+ }
+
+ fn print_inner_attributes_no_trailing_hardbreak(&mut self, attrs: &[ast::Attribute]) -> bool {
+ self.print_either_attributes(attrs, ast::AttrStyle::Inner, false, false)
+ }
+
+ fn print_outer_attributes(&mut self, attrs: &[ast::Attribute]) -> bool {
+ self.print_either_attributes(attrs, ast::AttrStyle::Outer, false, true)
+ }
+
+ fn print_inner_attributes_inline(&mut self, attrs: &[ast::Attribute]) -> bool {
+ self.print_either_attributes(attrs, ast::AttrStyle::Inner, true, true)
+ }
+
+ fn print_outer_attributes_inline(&mut self, attrs: &[ast::Attribute]) -> bool {
+ self.print_either_attributes(attrs, ast::AttrStyle::Outer, true, true)
+ }
+
+ fn print_either_attributes(
+ &mut self,
+ attrs: &[ast::Attribute],
+ kind: ast::AttrStyle,
+ is_inline: bool,
+ trailing_hardbreak: bool,
+ ) -> bool {
+ let mut printed = false;
+ for attr in attrs {
+ if attr.style == kind {
+ self.print_attribute_inline(attr, is_inline);
+ if is_inline {
+ self.nbsp();
+ }
+ printed = true;
+ }
+ }
+ if printed && trailing_hardbreak && !is_inline {
+ self.hardbreak_if_not_bol();
+ }
+ printed
+ }
+
+ fn print_attribute(&mut self, attr: &ast::Attribute) {
+ self.print_attribute_inline(attr, false)
+ }
+
+ fn print_attribute_inline(&mut self, attr: &ast::Attribute, is_inline: bool) {
+ if !is_inline {
+ self.hardbreak_if_not_bol();
+ }
+ self.maybe_print_comment(attr.span.lo());
+ match attr.kind {
+ ast::AttrKind::Normal(ref item, _) => {
+ match attr.style {
+ ast::AttrStyle::Inner => self.word("#!["),
+ ast::AttrStyle::Outer => self.word("#["),
+ }
+ self.print_attr_item(&item, attr.span);
+ self.word("]");
+ }
+ ast::AttrKind::DocComment(comment_kind, data) => {
+ self.word(doc_comment_to_string(comment_kind, attr.style, data));
+ self.hardbreak()
+ }
+ }
+ }
+
+ fn print_attr_item(&mut self, item: &ast::AttrItem, span: Span) {
+ self.ibox(0);
+ match &item.args {
+ MacArgs::Delimited(_, delim, tokens) => self.print_mac_common(
+ Some(MacHeader::Path(&item.path)),
+ false,
+ None,
+ Some(delim.to_token()),
+ tokens,
+ true,
+ span,
+ ),
+ MacArgs::Empty => {
+ self.print_path(&item.path, false, 0);
+ }
+ MacArgs::Eq(_, MacArgsEq::Ast(expr)) => {
+ self.print_path(&item.path, false, 0);
+ self.space();
+ self.word_space("=");
+ let token_str = self.expr_to_string(expr);
+ self.word(token_str);
+ }
+ MacArgs::Eq(_, MacArgsEq::Hir(lit)) => {
+ self.print_path(&item.path, false, 0);
+ self.space();
+ self.word_space("=");
+ let token_str = self.literal_to_string(lit);
+ self.word(token_str);
+ }
+ }
+ self.end();
+ }
+
+ fn print_meta_list_item(&mut self, item: &ast::NestedMetaItem) {
+ match item {
+ ast::NestedMetaItem::MetaItem(ref mi) => self.print_meta_item(mi),
+ ast::NestedMetaItem::Literal(ref lit) => self.print_literal(lit),
+ }
+ }
+
+ fn print_meta_item(&mut self, item: &ast::MetaItem) {
+ self.ibox(INDENT_UNIT);
+ match item.kind {
+ ast::MetaItemKind::Word => self.print_path(&item.path, false, 0),
+ ast::MetaItemKind::NameValue(ref value) => {
+ self.print_path(&item.path, false, 0);
+ self.space();
+ self.word_space("=");
+ self.print_literal(value);
+ }
+ ast::MetaItemKind::List(ref items) => {
+ self.print_path(&item.path, false, 0);
+ self.popen();
+ self.commasep(Consistent, &items, |s, i| s.print_meta_list_item(i));
+ self.pclose();
+ }
+ }
+ self.end();
+ }
+
+ /// This doesn't deserve to be called "pretty" printing, but it should be
+ /// meaning-preserving. A quick hack that might help would be to look at the
+ /// spans embedded in the TTs to decide where to put spaces and newlines.
+ /// But it'd be better to parse these according to the grammar of the
+ /// appropriate macro, transcribe back into the grammar we just parsed from,
+ /// and then pretty-print the resulting AST nodes (so, e.g., we print
+ /// expression arguments as expressions). It can be done! I think.
+ fn print_tt(&mut self, tt: &TokenTree, convert_dollar_crate: bool) {
+ match tt {
+ TokenTree::Token(token, _) => {
+ let token_str = self.token_to_string_ext(&token, convert_dollar_crate);
+ self.word(token_str);
+ if let token::DocComment(..) = token.kind {
+ self.hardbreak()
+ }
+ }
+ TokenTree::Delimited(dspan, delim, tts) => {
+ self.print_mac_common(
+ None,
+ false,
+ None,
+ Some(*delim),
+ tts,
+ convert_dollar_crate,
+ dspan.entire(),
+ );
+ }
+ }
+ }
+
+ fn print_tts(&mut self, tts: &TokenStream, convert_dollar_crate: bool) {
+ let mut iter = tts.trees().peekable();
+ while let Some(tt) = iter.next() {
+ self.print_tt(tt, convert_dollar_crate);
+ if let Some(next) = iter.peek() {
+ if tt_prepend_space(next, tt) {
+ self.space();
+ }
+ }
+ }
+ }
+
+ fn print_mac_common(
+ &mut self,
+ header: Option<MacHeader<'_>>,
+ has_bang: bool,
+ ident: Option<Ident>,
+ delim: Option<Delimiter>,
+ tts: &TokenStream,
+ convert_dollar_crate: bool,
+ span: Span,
+ ) {
+ if delim == Some(Delimiter::Brace) {
+ self.cbox(INDENT_UNIT);
+ }
+ match header {
+ Some(MacHeader::Path(path)) => self.print_path(path, false, 0),
+ Some(MacHeader::Keyword(kw)) => self.word(kw),
+ None => {}
+ }
+ if has_bang {
+ self.word("!");
+ }
+ if let Some(ident) = ident {
+ self.nbsp();
+ self.print_ident(ident);
+ }
+ match delim {
+ Some(Delimiter::Brace) => {
+ if header.is_some() || has_bang || ident.is_some() {
+ self.nbsp();
+ }
+ self.word("{");
+ if !tts.is_empty() {
+ self.space();
+ }
+ self.ibox(0);
+ self.print_tts(tts, convert_dollar_crate);
+ self.end();
+ let empty = tts.is_empty();
+ self.bclose(span, empty);
+ }
+ Some(delim) => {
+ let token_str = self.token_kind_to_string(&token::OpenDelim(delim));
+ self.word(token_str);
+ self.ibox(0);
+ self.print_tts(tts, convert_dollar_crate);
+ self.end();
+ let token_str = self.token_kind_to_string(&token::CloseDelim(delim));
+ self.word(token_str);
+ }
+ None => {
+ self.ibox(0);
+ self.print_tts(tts, convert_dollar_crate);
+ self.end();
+ }
+ }
+ }
+
+ fn print_mac_def(
+ &mut self,
+ macro_def: &ast::MacroDef,
+ ident: &Ident,
+ sp: Span,
+ print_visibility: impl FnOnce(&mut Self),
+ ) {
+ let (kw, has_bang) = if macro_def.macro_rules {
+ ("macro_rules", true)
+ } else {
+ print_visibility(self);
+ ("macro", false)
+ };
+ self.print_mac_common(
+ Some(MacHeader::Keyword(kw)),
+ has_bang,
+ Some(*ident),
+ macro_def.body.delim(),
+ &macro_def.body.inner_tokens(),
+ true,
+ sp,
+ );
+ if macro_def.body.need_semicolon() {
+ self.word(";");
+ }
+ }
+
+ fn print_path(&mut self, path: &ast::Path, colons_before_params: bool, depth: usize) {
+ self.maybe_print_comment(path.span.lo());
+
+ for (i, segment) in path.segments[..path.segments.len() - depth].iter().enumerate() {
+ if i > 0 {
+ self.word("::")
+ }
+ self.print_path_segment(segment, colons_before_params);
+ }
+ }
+
+ fn print_path_segment(&mut self, segment: &ast::PathSegment, colons_before_params: bool) {
+ if segment.ident.name != kw::PathRoot {
+ self.print_ident(segment.ident);
+ if let Some(ref args) = segment.args {
+ self.print_generic_args(args, colons_before_params);
+ }
+ }
+ }
+
+ fn head<S: Into<Cow<'static, str>>>(&mut self, w: S) {
+ let w = w.into();
+ // Outer-box is consistent.
+ self.cbox(INDENT_UNIT);
+ // Head-box is inconsistent.
+ self.ibox(0);
+ // Keyword that starts the head.
+ if !w.is_empty() {
+ self.word_nbsp(w);
+ }
+ }
+
+ fn bopen(&mut self) {
+ self.word("{");
+ self.end(); // Close the head-box.
+ }
+
+ fn bclose_maybe_open(&mut self, span: rustc_span::Span, empty: bool, close_box: bool) {
+ let has_comment = self.maybe_print_comment(span.hi());
+ if !empty || has_comment {
+ self.break_offset_if_not_bol(1, -(INDENT_UNIT as isize));
+ }
+ self.word("}");
+ if close_box {
+ self.end(); // Close the outer-box.
+ }
+ }
+
+ fn bclose(&mut self, span: rustc_span::Span, empty: bool) {
+ let close_box = true;
+ self.bclose_maybe_open(span, empty, close_box)
+ }
+
+ fn break_offset_if_not_bol(&mut self, n: usize, off: isize) {
+ if !self.is_beginning_of_line() {
+ self.break_offset(n, off)
+ } else if off != 0 {
+ if let Some(last_token) = self.last_token_still_buffered() {
+ if last_token.is_hardbreak_tok() {
+ // We do something pretty sketchy here: tuck the nonzero
+ // offset-adjustment we were going to deposit along with the
+ // break into the previous hardbreak.
+ self.replace_last_token_still_buffered(pp::Printer::hardbreak_tok_offset(off));
+ }
+ }
+ }
+ }
+
+ fn nonterminal_to_string(&self, nt: &Nonterminal) -> String {
+ match *nt {
+ token::NtExpr(ref e) => self.expr_to_string(e),
+ token::NtMeta(ref e) => self.attr_item_to_string(e),
+ token::NtTy(ref e) => self.ty_to_string(e),
+ token::NtPath(ref e) => self.path_to_string(e),
+ token::NtItem(ref e) => self.item_to_string(e),
+ token::NtBlock(ref e) => self.block_to_string(e),
+ token::NtStmt(ref e) => self.stmt_to_string(e),
+ token::NtPat(ref e) => self.pat_to_string(e),
+ token::NtIdent(e, is_raw) => IdentPrinter::for_ast_ident(e, is_raw).to_string(),
+ token::NtLifetime(e) => e.to_string(),
+ token::NtLiteral(ref e) => self.expr_to_string(e),
+ token::NtVis(ref e) => self.vis_to_string(e),
+ }
+ }
+
+ /// Print the token kind precisely, without converting `$crate` into its respective crate name.
+ fn token_kind_to_string(&self, tok: &TokenKind) -> Cow<'static, str> {
+ self.token_kind_to_string_ext(tok, None)
+ }
+
+ fn token_kind_to_string_ext(
+ &self,
+ tok: &TokenKind,
+ convert_dollar_crate: Option<Span>,
+ ) -> Cow<'static, str> {
+ match *tok {
+ token::Eq => "=".into(),
+ token::Lt => "<".into(),
+ token::Le => "<=".into(),
+ token::EqEq => "==".into(),
+ token::Ne => "!=".into(),
+ token::Ge => ">=".into(),
+ token::Gt => ">".into(),
+ token::Not => "!".into(),
+ token::Tilde => "~".into(),
+ token::OrOr => "||".into(),
+ token::AndAnd => "&&".into(),
+ token::BinOp(op) => binop_to_string(op).into(),
+ token::BinOpEq(op) => format!("{}=", binop_to_string(op)).into(),
+
+ /* Structural symbols */
+ token::At => "@".into(),
+ token::Dot => ".".into(),
+ token::DotDot => "..".into(),
+ token::DotDotDot => "...".into(),
+ token::DotDotEq => "..=".into(),
+ token::Comma => ",".into(),
+ token::Semi => ";".into(),
+ token::Colon => ":".into(),
+ token::ModSep => "::".into(),
+ token::RArrow => "->".into(),
+ token::LArrow => "<-".into(),
+ token::FatArrow => "=>".into(),
+ token::OpenDelim(Delimiter::Parenthesis) => "(".into(),
+ token::CloseDelim(Delimiter::Parenthesis) => ")".into(),
+ token::OpenDelim(Delimiter::Bracket) => "[".into(),
+ token::CloseDelim(Delimiter::Bracket) => "]".into(),
+ token::OpenDelim(Delimiter::Brace) => "{".into(),
+ token::CloseDelim(Delimiter::Brace) => "}".into(),
+ token::OpenDelim(Delimiter::Invisible) | token::CloseDelim(Delimiter::Invisible) => {
+ "".into()
+ }
+ token::Pound => "#".into(),
+ token::Dollar => "$".into(),
+ token::Question => "?".into(),
+ token::SingleQuote => "'".into(),
+
+ /* Literals */
+ token::Literal(lit) => literal_to_string(lit).into(),
+
+ /* Name components */
+ token::Ident(s, is_raw) => {
+ IdentPrinter::new(s, is_raw, convert_dollar_crate).to_string().into()
+ }
+ token::Lifetime(s) => s.to_string().into(),
+
+ /* Other */
+ token::DocComment(comment_kind, attr_style, data) => {
+ doc_comment_to_string(comment_kind, attr_style, data).into()
+ }
+ token::Eof => "<eof>".into(),
+
+ token::Interpolated(ref nt) => self.nonterminal_to_string(nt).into(),
+ }
+ }
+
+ /// Print the token precisely, without converting `$crate` into its respective crate name.
+ fn token_to_string(&self, token: &Token) -> Cow<'static, str> {
+ self.token_to_string_ext(token, false)
+ }
+
+ fn token_to_string_ext(&self, token: &Token, convert_dollar_crate: bool) -> Cow<'static, str> {
+ let convert_dollar_crate = convert_dollar_crate.then_some(token.span);
+ self.token_kind_to_string_ext(&token.kind, convert_dollar_crate)
+ }
+
+ fn ty_to_string(&self, ty: &ast::Ty) -> String {
+ Self::to_string(|s| s.print_type(ty))
+ }
+
+ fn bounds_to_string(&self, bounds: &[ast::GenericBound]) -> String {
+ Self::to_string(|s| s.print_type_bounds(bounds))
+ }
+
+ fn pat_to_string(&self, pat: &ast::Pat) -> String {
+ Self::to_string(|s| s.print_pat(pat))
+ }
+
+ fn expr_to_string(&self, e: &ast::Expr) -> String {
+ Self::to_string(|s| s.print_expr(e))
+ }
+
+ fn literal_to_string(&self, lit: &ast::Lit) -> String {
+ Self::to_string(|s| s.print_literal(lit))
+ }
+
+ fn tt_to_string(&self, tt: &TokenTree) -> String {
+ Self::to_string(|s| s.print_tt(tt, false))
+ }
+
+ fn tts_to_string(&self, tokens: &TokenStream) -> String {
+ Self::to_string(|s| s.print_tts(tokens, false))
+ }
+
+ fn stmt_to_string(&self, stmt: &ast::Stmt) -> String {
+ Self::to_string(|s| s.print_stmt(stmt))
+ }
+
+ fn item_to_string(&self, i: &ast::Item) -> String {
+ Self::to_string(|s| s.print_item(i))
+ }
+
+ fn assoc_item_to_string(&self, i: &ast::AssocItem) -> String {
+ Self::to_string(|s| s.print_assoc_item(i))
+ }
+
+ fn foreign_item_to_string(&self, i: &ast::ForeignItem) -> String {
+ Self::to_string(|s| s.print_foreign_item(i))
+ }
+
+ fn generic_params_to_string(&self, generic_params: &[ast::GenericParam]) -> String {
+ Self::to_string(|s| s.print_generic_params(generic_params))
+ }
+
+ fn path_to_string(&self, p: &ast::Path) -> String {
+ Self::to_string(|s| s.print_path(p, false, 0))
+ }
+
+ fn path_segment_to_string(&self, p: &ast::PathSegment) -> String {
+ Self::to_string(|s| s.print_path_segment(p, false))
+ }
+
+ fn vis_to_string(&self, v: &ast::Visibility) -> String {
+ Self::to_string(|s| s.print_visibility(v))
+ }
+
+ fn block_to_string(&self, blk: &ast::Block) -> String {
+ Self::to_string(|s| {
+ // Containing cbox, will be closed by `print_block` at `}`.
+ s.cbox(INDENT_UNIT);
+ // Head-ibox, will be closed by `print_block` after `{`.
+ s.ibox(0);
+ s.print_block(blk)
+ })
+ }
+
+ fn meta_list_item_to_string(&self, li: &ast::NestedMetaItem) -> String {
+ Self::to_string(|s| s.print_meta_list_item(li))
+ }
+
+ fn attr_item_to_string(&self, ai: &ast::AttrItem) -> String {
+ Self::to_string(|s| s.print_attr_item(ai, ai.path.span))
+ }
+
+ fn attribute_to_string(&self, attr: &ast::Attribute) -> String {
+ Self::to_string(|s| s.print_attribute(attr))
+ }
+
+ fn param_to_string(&self, arg: &ast::Param) -> String {
+ Self::to_string(|s| s.print_param(arg, false))
+ }
+
+ fn to_string(f: impl FnOnce(&mut State<'_>)) -> String {
+ let mut printer = State::new();
+ f(&mut printer);
+ printer.s.eof()
+ }
+}
+
+impl<'a> PrintState<'a> for State<'a> {
+ fn comments(&mut self) -> &mut Option<Comments<'a>> {
+ &mut self.comments
+ }
+
+ fn print_ident(&mut self, ident: Ident) {
+ self.word(IdentPrinter::for_ast_ident(ident, ident.is_raw_guess()).to_string());
+ self.ann.post(self, AnnNode::Ident(&ident))
+ }
+
+ fn print_generic_args(&mut self, args: &ast::GenericArgs, colons_before_params: bool) {
+ if colons_before_params {
+ self.word("::")
+ }
+
+ match *args {
+ ast::GenericArgs::AngleBracketed(ref data) => {
+ self.word("<");
+ self.commasep(Inconsistent, &data.args, |s, arg| match arg {
+ ast::AngleBracketedArg::Arg(a) => s.print_generic_arg(a),
+ ast::AngleBracketedArg::Constraint(c) => s.print_assoc_constraint(c),
+ });
+ self.word(">")
+ }
+
+ ast::GenericArgs::Parenthesized(ref data) => {
+ self.word("(");
+ self.commasep(Inconsistent, &data.inputs, |s, ty| s.print_type(ty));
+ self.word(")");
+ self.print_fn_ret_ty(&data.output);
+ }
+ }
+ }
+}
+
+impl<'a> State<'a> {
+ pub fn new() -> State<'a> {
+ State { s: pp::Printer::new(), comments: None, ann: &NoAnn }
+ }
+
+ pub(crate) fn commasep_cmnt<T, F, G>(
+ &mut self,
+ b: Breaks,
+ elts: &[T],
+ mut op: F,
+ mut get_span: G,
+ ) where
+ F: FnMut(&mut State<'_>, &T),
+ G: FnMut(&T) -> rustc_span::Span,
+ {
+ self.rbox(0, b);
+ let len = elts.len();
+ let mut i = 0;
+ for elt in elts {
+ self.maybe_print_comment(get_span(elt).hi());
+ op(self, elt);
+ i += 1;
+ if i < len {
+ self.word(",");
+ self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi()));
+ self.space_if_not_bol();
+ }
+ }
+ self.end();
+ }
+
+ pub(crate) fn commasep_exprs(&mut self, b: Breaks, exprs: &[P<ast::Expr>]) {
+ self.commasep_cmnt(b, exprs, |s, e| s.print_expr(e), |e| e.span)
+ }
+
+ pub fn print_opt_lifetime(&mut self, lifetime: &Option<ast::Lifetime>) {
+ if let Some(lt) = *lifetime {
+ self.print_lifetime(lt);
+ self.nbsp();
+ }
+ }
+
+ pub fn print_assoc_constraint(&mut self, constraint: &ast::AssocConstraint) {
+ self.print_ident(constraint.ident);
+ constraint.gen_args.as_ref().map(|args| self.print_generic_args(args, false));
+ self.space();
+ match &constraint.kind {
+ ast::AssocConstraintKind::Equality { term } => {
+ self.word_space("=");
+ match term {
+ Term::Ty(ty) => self.print_type(ty),
+ Term::Const(c) => self.print_expr_anon_const(c, &[]),
+ }
+ }
+ ast::AssocConstraintKind::Bound { bounds } => {
+ if !bounds.is_empty() {
+ self.word_nbsp(":");
+ self.print_type_bounds(&bounds);
+ }
+ }
+ }
+ }
+
+ pub fn print_generic_arg(&mut self, generic_arg: &GenericArg) {
+ match generic_arg {
+ GenericArg::Lifetime(lt) => self.print_lifetime(*lt),
+ GenericArg::Type(ty) => self.print_type(ty),
+ GenericArg::Const(ct) => self.print_expr(&ct.value),
+ }
+ }
+
+ pub fn print_type(&mut self, ty: &ast::Ty) {
+ self.maybe_print_comment(ty.span.lo());
+ self.ibox(0);
+ match ty.kind {
+ ast::TyKind::Slice(ref ty) => {
+ self.word("[");
+ self.print_type(ty);
+ self.word("]");
+ }
+ ast::TyKind::Ptr(ref mt) => {
+ self.word("*");
+ self.print_mt(mt, true);
+ }
+ ast::TyKind::Rptr(ref lifetime, ref mt) => {
+ self.word("&");
+ self.print_opt_lifetime(lifetime);
+ self.print_mt(mt, false);
+ }
+ ast::TyKind::Never => {
+ self.word("!");
+ }
+ ast::TyKind::Tup(ref elts) => {
+ self.popen();
+ self.commasep(Inconsistent, &elts, |s, ty| s.print_type(ty));
+ if elts.len() == 1 {
+ self.word(",");
+ }
+ self.pclose();
+ }
+ ast::TyKind::Paren(ref typ) => {
+ self.popen();
+ self.print_type(typ);
+ self.pclose();
+ }
+ ast::TyKind::BareFn(ref f) => {
+ self.print_ty_fn(f.ext, f.unsafety, &f.decl, None, &f.generic_params);
+ }
+ ast::TyKind::Path(None, ref path) => {
+ self.print_path(path, false, 0);
+ }
+ ast::TyKind::Path(Some(ref qself), ref path) => self.print_qpath(path, qself, false),
+ ast::TyKind::TraitObject(ref bounds, syntax) => {
+ if syntax == ast::TraitObjectSyntax::Dyn {
+ self.word_nbsp("dyn");
+ }
+ self.print_type_bounds(bounds);
+ }
+ ast::TyKind::ImplTrait(_, ref bounds) => {
+ self.word_nbsp("impl");
+ self.print_type_bounds(bounds);
+ }
+ ast::TyKind::Array(ref ty, ref length) => {
+ self.word("[");
+ self.print_type(ty);
+ self.word("; ");
+ self.print_expr(&length.value);
+ self.word("]");
+ }
+ ast::TyKind::Typeof(ref e) => {
+ self.word("typeof(");
+ self.print_expr(&e.value);
+ self.word(")");
+ }
+ ast::TyKind::Infer => {
+ self.word("_");
+ }
+ ast::TyKind::Err => {
+ self.popen();
+ self.word("/*ERROR*/");
+ self.pclose();
+ }
+ ast::TyKind::ImplicitSelf => {
+ self.word("Self");
+ }
+ ast::TyKind::MacCall(ref m) => {
+ self.print_mac(m);
+ }
+ ast::TyKind::CVarArgs => {
+ self.word("...");
+ }
+ }
+ self.end();
+ }
+
+ fn print_trait_ref(&mut self, t: &ast::TraitRef) {
+ self.print_path(&t.path, false, 0)
+ }
+
+ fn print_formal_generic_params(&mut self, generic_params: &[ast::GenericParam]) {
+ if !generic_params.is_empty() {
+ self.word("for");
+ self.print_generic_params(generic_params);
+ self.nbsp();
+ }
+ }
+
+ fn print_poly_trait_ref(&mut self, t: &ast::PolyTraitRef) {
+ self.print_formal_generic_params(&t.bound_generic_params);
+ self.print_trait_ref(&t.trait_ref)
+ }
+
+ pub(crate) fn print_stmt(&mut self, st: &ast::Stmt) {
+ self.maybe_print_comment(st.span.lo());
+ match st.kind {
+ ast::StmtKind::Local(ref loc) => {
+ self.print_outer_attributes(&loc.attrs);
+ self.space_if_not_bol();
+ self.ibox(INDENT_UNIT);
+ self.word_nbsp("let");
+
+ self.ibox(INDENT_UNIT);
+ self.print_local_decl(loc);
+ self.end();
+ if let Some((init, els)) = loc.kind.init_else_opt() {
+ self.nbsp();
+ self.word_space("=");
+ self.print_expr(init);
+ if let Some(els) = els {
+ self.cbox(INDENT_UNIT);
+ self.ibox(INDENT_UNIT);
+ self.word(" else ");
+ self.print_block(els);
+ }
+ }
+ self.word(";");
+ self.end(); // `let` ibox
+ }
+ ast::StmtKind::Item(ref item) => self.print_item(item),
+ ast::StmtKind::Expr(ref expr) => {
+ self.space_if_not_bol();
+ self.print_expr_outer_attr_style(expr, false);
+ if classify::expr_requires_semi_to_be_stmt(expr) {
+ self.word(";");
+ }
+ }
+ ast::StmtKind::Semi(ref expr) => {
+ self.space_if_not_bol();
+ self.print_expr_outer_attr_style(expr, false);
+ self.word(";");
+ }
+ ast::StmtKind::Empty => {
+ self.space_if_not_bol();
+ self.word(";");
+ }
+ ast::StmtKind::MacCall(ref mac) => {
+ self.space_if_not_bol();
+ self.print_outer_attributes(&mac.attrs);
+ self.print_mac(&mac.mac);
+ if mac.style == ast::MacStmtStyle::Semicolon {
+ self.word(";");
+ }
+ }
+ }
+ self.maybe_print_trailing_comment(st.span, None)
+ }
+
+ pub(crate) fn print_block(&mut self, blk: &ast::Block) {
+ self.print_block_with_attrs(blk, &[])
+ }
+
+ pub(crate) fn print_block_unclosed_indent(&mut self, blk: &ast::Block) {
+ self.print_block_maybe_unclosed(blk, &[], false)
+ }
+
+ pub(crate) fn print_block_with_attrs(&mut self, blk: &ast::Block, attrs: &[ast::Attribute]) {
+ self.print_block_maybe_unclosed(blk, attrs, true)
+ }
+
+ pub(crate) fn print_block_maybe_unclosed(
+ &mut self,
+ blk: &ast::Block,
+ attrs: &[ast::Attribute],
+ close_box: bool,
+ ) {
+ match blk.rules {
+ BlockCheckMode::Unsafe(..) => self.word_space("unsafe"),
+ BlockCheckMode::Default => (),
+ }
+ self.maybe_print_comment(blk.span.lo());
+ self.ann.pre(self, AnnNode::Block(blk));
+ self.bopen();
+
+ let has_attrs = self.print_inner_attributes(attrs);
+
+ for (i, st) in blk.stmts.iter().enumerate() {
+ match st.kind {
+ ast::StmtKind::Expr(ref expr) if i == blk.stmts.len() - 1 => {
+ self.maybe_print_comment(st.span.lo());
+ self.space_if_not_bol();
+ self.print_expr_outer_attr_style(expr, false);
+ self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi()));
+ }
+ _ => self.print_stmt(st),
+ }
+ }
+
+ let empty = !has_attrs && blk.stmts.is_empty();
+ self.bclose_maybe_open(blk.span, empty, close_box);
+ self.ann.post(self, AnnNode::Block(blk))
+ }
+
+ /// Print a `let pat = expr` expression.
+ pub(crate) fn print_let(&mut self, pat: &ast::Pat, expr: &ast::Expr) {
+ self.word("let ");
+ self.print_pat(pat);
+ self.space();
+ self.word_space("=");
+ let npals = || parser::needs_par_as_let_scrutinee(expr.precedence().order());
+ self.print_expr_cond_paren(expr, Self::cond_needs_par(expr) || npals())
+ }
+
+ pub(crate) fn print_mac(&mut self, m: &ast::MacCall) {
+ self.print_mac_common(
+ Some(MacHeader::Path(&m.path)),
+ true,
+ None,
+ m.args.delim(),
+ &m.args.inner_tokens(),
+ true,
+ m.span(),
+ );
+ }
+
+ fn print_inline_asm(&mut self, asm: &ast::InlineAsm) {
+ enum AsmArg<'a> {
+ Template(String),
+ Operand(&'a InlineAsmOperand),
+ ClobberAbi(Symbol),
+ Options(InlineAsmOptions),
+ }
+
+ let mut args = vec![AsmArg::Template(InlineAsmTemplatePiece::to_string(&asm.template))];
+ args.extend(asm.operands.iter().map(|(o, _)| AsmArg::Operand(o)));
+ for (abi, _) in &asm.clobber_abis {
+ args.push(AsmArg::ClobberAbi(*abi));
+ }
+ if !asm.options.is_empty() {
+ args.push(AsmArg::Options(asm.options));
+ }
+
+ self.popen();
+ self.commasep(Consistent, &args, |s, arg| match arg {
+ AsmArg::Template(template) => s.print_string(&template, ast::StrStyle::Cooked),
+ AsmArg::Operand(op) => {
+ let print_reg_or_class = |s: &mut Self, r: &InlineAsmRegOrRegClass| match r {
+ InlineAsmRegOrRegClass::Reg(r) => s.print_symbol(*r, ast::StrStyle::Cooked),
+ InlineAsmRegOrRegClass::RegClass(r) => s.word(r.to_string()),
+ };
+ match op {
+ InlineAsmOperand::In { reg, expr } => {
+ s.word("in");
+ s.popen();
+ print_reg_or_class(s, reg);
+ s.pclose();
+ s.space();
+ s.print_expr(expr);
+ }
+ InlineAsmOperand::Out { reg, late, expr } => {
+ s.word(if *late { "lateout" } else { "out" });
+ s.popen();
+ print_reg_or_class(s, reg);
+ s.pclose();
+ s.space();
+ match expr {
+ Some(expr) => s.print_expr(expr),
+ None => s.word("_"),
+ }
+ }
+ InlineAsmOperand::InOut { reg, late, expr } => {
+ s.word(if *late { "inlateout" } else { "inout" });
+ s.popen();
+ print_reg_or_class(s, reg);
+ s.pclose();
+ s.space();
+ s.print_expr(expr);
+ }
+ InlineAsmOperand::SplitInOut { reg, late, in_expr, out_expr } => {
+ s.word(if *late { "inlateout" } else { "inout" });
+ s.popen();
+ print_reg_or_class(s, reg);
+ s.pclose();
+ s.space();
+ s.print_expr(in_expr);
+ s.space();
+ s.word_space("=>");
+ match out_expr {
+ Some(out_expr) => s.print_expr(out_expr),
+ None => s.word("_"),
+ }
+ }
+ InlineAsmOperand::Const { anon_const } => {
+ s.word("const");
+ s.space();
+ s.print_expr(&anon_const.value);
+ }
+ InlineAsmOperand::Sym { sym } => {
+ s.word("sym");
+ s.space();
+ if let Some(qself) = &sym.qself {
+ s.print_qpath(&sym.path, qself, true);
+ } else {
+ s.print_path(&sym.path, true, 0);
+ }
+ }
+ }
+ }
+ AsmArg::ClobberAbi(abi) => {
+ s.word("clobber_abi");
+ s.popen();
+ s.print_symbol(*abi, ast::StrStyle::Cooked);
+ s.pclose();
+ }
+ AsmArg::Options(opts) => {
+ s.word("options");
+ s.popen();
+ let mut options = vec![];
+ if opts.contains(InlineAsmOptions::PURE) {
+ options.push("pure");
+ }
+ if opts.contains(InlineAsmOptions::NOMEM) {
+ options.push("nomem");
+ }
+ if opts.contains(InlineAsmOptions::READONLY) {
+ options.push("readonly");
+ }
+ if opts.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+ options.push("preserves_flags");
+ }
+ if opts.contains(InlineAsmOptions::NORETURN) {
+ options.push("noreturn");
+ }
+ if opts.contains(InlineAsmOptions::NOSTACK) {
+ options.push("nostack");
+ }
+ if opts.contains(InlineAsmOptions::ATT_SYNTAX) {
+ options.push("att_syntax");
+ }
+ if opts.contains(InlineAsmOptions::RAW) {
+ options.push("raw");
+ }
+ if opts.contains(InlineAsmOptions::MAY_UNWIND) {
+ options.push("may_unwind");
+ }
+ s.commasep(Inconsistent, &options, |s, &opt| {
+ s.word(opt);
+ });
+ s.pclose();
+ }
+ });
+ self.pclose();
+ }
+
+ pub(crate) fn print_local_decl(&mut self, loc: &ast::Local) {
+ self.print_pat(&loc.pat);
+ if let Some(ref ty) = loc.ty {
+ self.word_space(":");
+ self.print_type(ty);
+ }
+ }
+
+ pub(crate) fn print_name(&mut self, name: Symbol) {
+ self.word(name.to_string());
+ self.ann.post(self, AnnNode::Name(&name))
+ }
+
+ fn print_qpath(&mut self, path: &ast::Path, qself: &ast::QSelf, colons_before_params: bool) {
+ self.word("<");
+ self.print_type(&qself.ty);
+ if qself.position > 0 {
+ self.space();
+ self.word_space("as");
+ let depth = path.segments.len() - qself.position;
+ self.print_path(path, false, depth);
+ }
+ self.word(">");
+ for item_segment in &path.segments[qself.position..] {
+ self.word("::");
+ self.print_ident(item_segment.ident);
+ if let Some(ref args) = item_segment.args {
+ self.print_generic_args(args, colons_before_params)
+ }
+ }
+ }
+
+ pub(crate) fn print_pat(&mut self, pat: &ast::Pat) {
+ self.maybe_print_comment(pat.span.lo());
+ self.ann.pre(self, AnnNode::Pat(pat));
+ /* Pat isn't normalized, but the beauty of it
+ is that it doesn't matter */
+ match pat.kind {
+ PatKind::Wild => self.word("_"),
+ PatKind::Ident(binding_mode, ident, ref sub) => {
+ match binding_mode {
+ ast::BindingMode::ByRef(mutbl) => {
+ self.word_nbsp("ref");
+ self.print_mutability(mutbl, false);
+ }
+ ast::BindingMode::ByValue(ast::Mutability::Not) => {}
+ ast::BindingMode::ByValue(ast::Mutability::Mut) => {
+ self.word_nbsp("mut");
+ }
+ }
+ self.print_ident(ident);
+ if let Some(ref p) = *sub {
+ self.space();
+ self.word_space("@");
+ self.print_pat(p);
+ }
+ }
+ PatKind::TupleStruct(ref qself, ref path, ref elts) => {
+ if let Some(qself) = qself {
+ self.print_qpath(path, qself, true);
+ } else {
+ self.print_path(path, true, 0);
+ }
+ self.popen();
+ self.commasep(Inconsistent, &elts, |s, p| s.print_pat(p));
+ self.pclose();
+ }
+ PatKind::Or(ref pats) => {
+ self.strsep("|", true, Inconsistent, &pats, |s, p| s.print_pat(p));
+ }
+ PatKind::Path(None, ref path) => {
+ self.print_path(path, true, 0);
+ }
+ PatKind::Path(Some(ref qself), ref path) => {
+ self.print_qpath(path, qself, false);
+ }
+ PatKind::Struct(ref qself, ref path, ref fields, etc) => {
+ if let Some(qself) = qself {
+ self.print_qpath(path, qself, true);
+ } else {
+ self.print_path(path, true, 0);
+ }
+ self.nbsp();
+ self.word("{");
+ let empty = fields.is_empty() && !etc;
+ if !empty {
+ self.space();
+ }
+ self.commasep_cmnt(
+ Consistent,
+ &fields,
+ |s, f| {
+ s.cbox(INDENT_UNIT);
+ if !f.is_shorthand {
+ s.print_ident(f.ident);
+ s.word_nbsp(":");
+ }
+ s.print_pat(&f.pat);
+ s.end();
+ },
+ |f| f.pat.span,
+ );
+ if etc {
+ if !fields.is_empty() {
+ self.word_space(",");
+ }
+ self.word("..");
+ }
+ if !empty {
+ self.space();
+ }
+ self.word("}");
+ }
+ PatKind::Tuple(ref elts) => {
+ self.popen();
+ self.commasep(Inconsistent, &elts, |s, p| s.print_pat(p));
+ if elts.len() == 1 {
+ self.word(",");
+ }
+ self.pclose();
+ }
+ PatKind::Box(ref inner) => {
+ self.word("box ");
+ self.print_pat(inner);
+ }
+ PatKind::Ref(ref inner, mutbl) => {
+ self.word("&");
+ if mutbl == ast::Mutability::Mut {
+ self.word("mut ");
+ }
+ if let PatKind::Ident(ast::BindingMode::ByValue(ast::Mutability::Mut), ..) =
+ inner.kind
+ {
+ self.popen();
+ self.print_pat(inner);
+ self.pclose();
+ } else {
+ self.print_pat(inner);
+ }
+ }
+ PatKind::Lit(ref e) => self.print_expr(&**e),
+ PatKind::Range(ref begin, ref end, Spanned { node: ref end_kind, .. }) => {
+ if let Some(e) = begin {
+ self.print_expr(e);
+ }
+ match *end_kind {
+ RangeEnd::Included(RangeSyntax::DotDotDot) => self.word("..."),
+ RangeEnd::Included(RangeSyntax::DotDotEq) => self.word("..="),
+ RangeEnd::Excluded => self.word(".."),
+ }
+ if let Some(e) = end {
+ self.print_expr(e);
+ }
+ }
+ PatKind::Slice(ref elts) => {
+ self.word("[");
+ self.commasep(Inconsistent, &elts, |s, p| s.print_pat(p));
+ self.word("]");
+ }
+ PatKind::Rest => self.word(".."),
+ PatKind::Paren(ref inner) => {
+ self.popen();
+ self.print_pat(inner);
+ self.pclose();
+ }
+ PatKind::MacCall(ref m) => self.print_mac(m),
+ }
+ self.ann.post(self, AnnNode::Pat(pat))
+ }
+
+ fn print_explicit_self(&mut self, explicit_self: &ast::ExplicitSelf) {
+ match explicit_self.node {
+ SelfKind::Value(m) => {
+ self.print_mutability(m, false);
+ self.word("self")
+ }
+ SelfKind::Region(ref lt, m) => {
+ self.word("&");
+ self.print_opt_lifetime(lt);
+ self.print_mutability(m, false);
+ self.word("self")
+ }
+ SelfKind::Explicit(ref typ, m) => {
+ self.print_mutability(m, false);
+ self.word("self");
+ self.word_space(":");
+ self.print_type(typ)
+ }
+ }
+ }
+
+ pub(crate) fn print_asyncness(&mut self, asyncness: ast::Async) {
+ if asyncness.is_async() {
+ self.word_nbsp("async");
+ }
+ }
+
+ pub fn print_type_bounds(&mut self, bounds: &[ast::GenericBound]) {
+ let mut first = true;
+ for bound in bounds {
+ if first {
+ first = false;
+ } else {
+ self.nbsp();
+ self.word_space("+");
+ }
+
+ match bound {
+ GenericBound::Trait(tref, modifier) => {
+ if modifier == &TraitBoundModifier::Maybe {
+ self.word("?");
+ }
+ self.print_poly_trait_ref(tref);
+ }
+ GenericBound::Outlives(lt) => self.print_lifetime(*lt),
+ }
+ }
+ }
+
+ pub(crate) fn print_lifetime(&mut self, lifetime: ast::Lifetime) {
+ self.print_name(lifetime.ident.name)
+ }
+
+ pub(crate) fn print_lifetime_bounds(&mut self, bounds: &ast::GenericBounds) {
+ for (i, bound) in bounds.iter().enumerate() {
+ if i != 0 {
+ self.word(" + ");
+ }
+ match bound {
+ ast::GenericBound::Outlives(lt) => self.print_lifetime(*lt),
+ _ => panic!(),
+ }
+ }
+ }
+
+ pub(crate) fn print_generic_params(&mut self, generic_params: &[ast::GenericParam]) {
+ if generic_params.is_empty() {
+ return;
+ }
+
+ self.word("<");
+
+ self.commasep(Inconsistent, &generic_params, |s, param| {
+ s.print_outer_attributes_inline(&param.attrs);
+
+ match param.kind {
+ ast::GenericParamKind::Lifetime => {
+ let lt = ast::Lifetime { id: param.id, ident: param.ident };
+ s.print_lifetime(lt);
+ if !param.bounds.is_empty() {
+ s.word_nbsp(":");
+ s.print_lifetime_bounds(&param.bounds)
+ }
+ }
+ ast::GenericParamKind::Type { ref default } => {
+ s.print_ident(param.ident);
+ if !param.bounds.is_empty() {
+ s.word_nbsp(":");
+ s.print_type_bounds(&param.bounds);
+ }
+ if let Some(ref default) = default {
+ s.space();
+ s.word_space("=");
+ s.print_type(default)
+ }
+ }
+ ast::GenericParamKind::Const { ref ty, kw_span: _, ref default } => {
+ s.word_space("const");
+ s.print_ident(param.ident);
+ s.space();
+ s.word_space(":");
+ s.print_type(ty);
+ if !param.bounds.is_empty() {
+ s.word_nbsp(":");
+ s.print_type_bounds(&param.bounds);
+ }
+ if let Some(ref default) = default {
+ s.space();
+ s.word_space("=");
+ s.print_expr(&default.value);
+ }
+ }
+ }
+ });
+
+ self.word(">");
+ }
+
+ pub fn print_mutability(&mut self, mutbl: ast::Mutability, print_const: bool) {
+ match mutbl {
+ ast::Mutability::Mut => self.word_nbsp("mut"),
+ ast::Mutability::Not => {
+ if print_const {
+ self.word_nbsp("const");
+ }
+ }
+ }
+ }
+
+ pub(crate) fn print_mt(&mut self, mt: &ast::MutTy, print_const: bool) {
+ self.print_mutability(mt.mutbl, print_const);
+ self.print_type(&mt.ty)
+ }
+
+ pub(crate) fn print_param(&mut self, input: &ast::Param, is_closure: bool) {
+ self.ibox(INDENT_UNIT);
+
+ self.print_outer_attributes_inline(&input.attrs);
+
+ match input.ty.kind {
+ ast::TyKind::Infer if is_closure => self.print_pat(&input.pat),
+ _ => {
+ if let Some(eself) = input.to_self() {
+ self.print_explicit_self(&eself);
+ } else {
+ let invalid = if let PatKind::Ident(_, ident, _) = input.pat.kind {
+ ident.name == kw::Empty
+ } else {
+ false
+ };
+ if !invalid {
+ self.print_pat(&input.pat);
+ self.word(":");
+ self.space();
+ }
+ self.print_type(&input.ty);
+ }
+ }
+ }
+ self.end();
+ }
+
+ pub(crate) fn print_fn_ret_ty(&mut self, fn_ret_ty: &ast::FnRetTy) {
+ if let ast::FnRetTy::Ty(ty) = fn_ret_ty {
+ self.space_if_not_bol();
+ self.ibox(INDENT_UNIT);
+ self.word_space("->");
+ self.print_type(ty);
+ self.end();
+ self.maybe_print_comment(ty.span.lo());
+ }
+ }
+
+ pub(crate) fn print_ty_fn(
+ &mut self,
+ ext: ast::Extern,
+ unsafety: ast::Unsafe,
+ decl: &ast::FnDecl,
+ name: Option<Ident>,
+ generic_params: &[ast::GenericParam],
+ ) {
+ self.ibox(INDENT_UNIT);
+ self.print_formal_generic_params(generic_params);
+ let generics = ast::Generics {
+ params: Vec::new(),
+ where_clause: ast::WhereClause {
+ has_where_token: false,
+ predicates: Vec::new(),
+ span: rustc_span::DUMMY_SP,
+ },
+ span: rustc_span::DUMMY_SP,
+ };
+ let header = ast::FnHeader { unsafety, ext, ..ast::FnHeader::default() };
+ self.print_fn(decl, header, name, &generics);
+ self.end();
+ }
+
+ pub(crate) fn print_fn_header_info(&mut self, header: ast::FnHeader) {
+ self.print_constness(header.constness);
+ self.print_asyncness(header.asyncness);
+ self.print_unsafety(header.unsafety);
+
+ match header.ext {
+ ast::Extern::None => {}
+ ast::Extern::Implicit(_) => {
+ self.word_nbsp("extern");
+ }
+ ast::Extern::Explicit(abi, _) => {
+ self.word_nbsp("extern");
+ self.print_literal(&abi.as_lit());
+ self.nbsp();
+ }
+ }
+
+ self.word("fn")
+ }
+
+ pub(crate) fn print_unsafety(&mut self, s: ast::Unsafe) {
+ match s {
+ ast::Unsafe::No => {}
+ ast::Unsafe::Yes(_) => self.word_nbsp("unsafe"),
+ }
+ }
+
+ pub(crate) fn print_constness(&mut self, s: ast::Const) {
+ match s {
+ ast::Const::No => {}
+ ast::Const::Yes(_) => self.word_nbsp("const"),
+ }
+ }
+
+ pub(crate) fn print_is_auto(&mut self, s: ast::IsAuto) {
+ match s {
+ ast::IsAuto::Yes => self.word_nbsp("auto"),
+ ast::IsAuto::No => {}
+ }
+ }
+}
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/delimited.rs b/compiler/rustc_ast_pretty/src/pprust/state/delimited.rs
new file mode 100644
index 000000000..fe0640baa
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pprust/state/delimited.rs
@@ -0,0 +1,41 @@
+use std::iter::Peekable;
+use std::mem;
+use std::ops::Deref;
+
+pub struct Delimited<I: Iterator> {
+ is_first: bool,
+ iter: Peekable<I>,
+}
+
+pub trait IterDelimited: Iterator + Sized {
+ fn delimited(self) -> Delimited<Self> {
+ Delimited { is_first: true, iter: self.peekable() }
+ }
+}
+
+impl<I: Iterator> IterDelimited for I {}
+
+pub struct IteratorItem<T> {
+ value: T,
+ pub is_first: bool,
+ pub is_last: bool,
+}
+
+impl<I: Iterator> Iterator for Delimited<I> {
+ type Item = IteratorItem<I::Item>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let value = self.iter.next()?;
+ let is_first = mem::replace(&mut self.is_first, false);
+ let is_last = self.iter.peek().is_none();
+ Some(IteratorItem { value, is_first, is_last })
+ }
+}
+
+impl<T> Deref for IteratorItem<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &self.value
+ }
+}
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
new file mode 100644
index 000000000..ead38caee
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs
@@ -0,0 +1,621 @@
+use crate::pp::Breaks::Inconsistent;
+use crate::pprust::state::{AnnNode, IterDelimited, PrintState, State, INDENT_UNIT};
+
+use rustc_ast::ptr::P;
+use rustc_ast::util::parser::{self, AssocOp, Fixity};
+use rustc_ast::{self as ast, BlockCheckMode};
+
+impl<'a> State<'a> {
+ fn print_else(&mut self, els: Option<&ast::Expr>) {
+ if let Some(_else) = els {
+ match _else.kind {
+ // Another `else if` block.
+ ast::ExprKind::If(ref i, ref then, ref e) => {
+ self.cbox(INDENT_UNIT - 1);
+ self.ibox(0);
+ self.word(" else if ");
+ self.print_expr_as_cond(i);
+ self.space();
+ self.print_block(then);
+ self.print_else(e.as_deref())
+ }
+ // Final `else` block.
+ ast::ExprKind::Block(ref b, _) => {
+ self.cbox(INDENT_UNIT - 1);
+ self.ibox(0);
+ self.word(" else ");
+ self.print_block(b)
+ }
+ // Constraints would be great here!
+ _ => {
+ panic!("print_if saw if with weird alternative");
+ }
+ }
+ }
+ }
+
+ fn print_if(&mut self, test: &ast::Expr, blk: &ast::Block, elseopt: Option<&ast::Expr>) {
+ self.head("if");
+ self.print_expr_as_cond(test);
+ self.space();
+ self.print_block(blk);
+ self.print_else(elseopt)
+ }
+
+ fn print_call_post(&mut self, args: &[P<ast::Expr>]) {
+ self.popen();
+ self.commasep_exprs(Inconsistent, args);
+ self.pclose()
+ }
+
+ fn print_expr_maybe_paren(&mut self, expr: &ast::Expr, prec: i8) {
+ self.print_expr_cond_paren(expr, expr.precedence().order() < prec)
+ }
+
+ /// Prints an expr using syntax that's acceptable in a condition position, such as the `cond` in
+ /// `if cond { ... }`.
+ fn print_expr_as_cond(&mut self, expr: &ast::Expr) {
+ self.print_expr_cond_paren(expr, Self::cond_needs_par(expr))
+ }
+
+ // Does `expr` need parentheses when printed in a condition position?
+ //
+ // These cases need parens due to the parse error observed in #26461: `if return {}`
+ // parses as the erroneous construct `if (return {})`, not `if (return) {}`.
+ pub(super) fn cond_needs_par(expr: &ast::Expr) -> bool {
+ match expr.kind {
+ ast::ExprKind::Break(..)
+ | ast::ExprKind::Closure(..)
+ | ast::ExprKind::Ret(..)
+ | ast::ExprKind::Yeet(..) => true,
+ _ => parser::contains_exterior_struct_lit(expr),
+ }
+ }
+
+ /// Prints `expr` or `(expr)` when `needs_par` holds.
+ pub(super) fn print_expr_cond_paren(&mut self, expr: &ast::Expr, needs_par: bool) {
+ if needs_par {
+ self.popen();
+ }
+ self.print_expr(expr);
+ if needs_par {
+ self.pclose();
+ }
+ }
+
+ fn print_expr_vec(&mut self, exprs: &[P<ast::Expr>]) {
+ self.ibox(INDENT_UNIT);
+ self.word("[");
+ self.commasep_exprs(Inconsistent, exprs);
+ self.word("]");
+ self.end();
+ }
+
+ pub(super) fn print_expr_anon_const(
+ &mut self,
+ expr: &ast::AnonConst,
+ attrs: &[ast::Attribute],
+ ) {
+ self.ibox(INDENT_UNIT);
+ self.word("const");
+ self.nbsp();
+ if let ast::ExprKind::Block(block, None) = &expr.value.kind {
+ self.cbox(0);
+ self.ibox(0);
+ self.print_block_with_attrs(block, attrs);
+ } else {
+ self.print_expr(&expr.value);
+ }
+ self.end();
+ }
+
+ fn print_expr_repeat(&mut self, element: &ast::Expr, count: &ast::AnonConst) {
+ self.ibox(INDENT_UNIT);
+ self.word("[");
+ self.print_expr(element);
+ self.word_space(";");
+ self.print_expr(&count.value);
+ self.word("]");
+ self.end();
+ }
+
+ fn print_expr_struct(
+ &mut self,
+ qself: &Option<ast::QSelf>,
+ path: &ast::Path,
+ fields: &[ast::ExprField],
+ rest: &ast::StructRest,
+ ) {
+ if let Some(qself) = qself {
+ self.print_qpath(path, qself, true);
+ } else {
+ self.print_path(path, true, 0);
+ }
+ self.nbsp();
+ self.word("{");
+ let has_rest = match rest {
+ ast::StructRest::Base(_) | ast::StructRest::Rest(_) => true,
+ ast::StructRest::None => false,
+ };
+ if fields.is_empty() && !has_rest {
+ self.word("}");
+ return;
+ }
+ self.cbox(0);
+ for field in fields.iter().delimited() {
+ self.maybe_print_comment(field.span.hi());
+ self.print_outer_attributes(&field.attrs);
+ if field.is_first {
+ self.space_if_not_bol();
+ }
+ if !field.is_shorthand {
+ self.print_ident(field.ident);
+ self.word_nbsp(":");
+ }
+ self.print_expr(&field.expr);
+ if !field.is_last || has_rest {
+ self.word_space(",");
+ } else {
+ self.trailing_comma_or_space();
+ }
+ }
+ if has_rest {
+ if fields.is_empty() {
+ self.space();
+ }
+ self.word("..");
+ if let ast::StructRest::Base(expr) = rest {
+ self.print_expr(expr);
+ }
+ self.space();
+ }
+ self.offset(-INDENT_UNIT);
+ self.end();
+ self.word("}");
+ }
+
+ fn print_expr_tup(&mut self, exprs: &[P<ast::Expr>]) {
+ self.popen();
+ self.commasep_exprs(Inconsistent, exprs);
+ if exprs.len() == 1 {
+ self.word(",");
+ }
+ self.pclose()
+ }
+
+ fn print_expr_call(&mut self, func: &ast::Expr, args: &[P<ast::Expr>]) {
+ let prec = match func.kind {
+ ast::ExprKind::Field(..) => parser::PREC_FORCE_PAREN,
+ _ => parser::PREC_POSTFIX,
+ };
+
+ self.print_expr_maybe_paren(func, prec);
+ self.print_call_post(args)
+ }
+
+ fn print_expr_method_call(&mut self, segment: &ast::PathSegment, args: &[P<ast::Expr>]) {
+ let base_args = &args[1..];
+ self.print_expr_maybe_paren(&args[0], parser::PREC_POSTFIX);
+ self.word(".");
+ self.print_ident(segment.ident);
+ if let Some(ref args) = segment.args {
+ self.print_generic_args(args, true);
+ }
+ self.print_call_post(base_args)
+ }
+
+ fn print_expr_binary(&mut self, op: ast::BinOp, lhs: &ast::Expr, rhs: &ast::Expr) {
+ let assoc_op = AssocOp::from_ast_binop(op.node);
+ let prec = assoc_op.precedence() as i8;
+ let fixity = assoc_op.fixity();
+
+ let (left_prec, right_prec) = match fixity {
+ Fixity::Left => (prec, prec + 1),
+ Fixity::Right => (prec + 1, prec),
+ Fixity::None => (prec + 1, prec + 1),
+ };
+
+ let left_prec = match (&lhs.kind, op.node) {
+ // These cases need parens: `x as i32 < y` has the parser thinking that `i32 < y` is
+ // the beginning of a path type. It starts trying to parse `x as (i32 < y ...` instead
+ // of `(x as i32) < ...`. We need to convince it _not_ to do that.
+ (&ast::ExprKind::Cast { .. }, ast::BinOpKind::Lt | ast::BinOpKind::Shl) => {
+ parser::PREC_FORCE_PAREN
+ }
+ // We are given `(let _ = a) OP b`.
+ //
+ // - When `OP <= LAnd` we should print `let _ = a OP b` to avoid redundant parens
+ // as the parser will interpret this as `(let _ = a) OP b`.
+ //
+ // - Otherwise, e.g. when we have `(let a = b) < c` in AST,
+ // parens are required since the parser would interpret `let a = b < c` as
+ // `let a = (b < c)`. To achieve this, we force parens.
+ (&ast::ExprKind::Let { .. }, _) if !parser::needs_par_as_let_scrutinee(prec) => {
+ parser::PREC_FORCE_PAREN
+ }
+ _ => left_prec,
+ };
+
+ self.print_expr_maybe_paren(lhs, left_prec);
+ self.space();
+ self.word_space(op.node.to_string());
+ self.print_expr_maybe_paren(rhs, right_prec)
+ }
+
+ fn print_expr_unary(&mut self, op: ast::UnOp, expr: &ast::Expr) {
+ self.word(ast::UnOp::to_string(op));
+ self.print_expr_maybe_paren(expr, parser::PREC_PREFIX)
+ }
+
+ fn print_expr_addr_of(
+ &mut self,
+ kind: ast::BorrowKind,
+ mutability: ast::Mutability,
+ expr: &ast::Expr,
+ ) {
+ self.word("&");
+ match kind {
+ ast::BorrowKind::Ref => self.print_mutability(mutability, false),
+ ast::BorrowKind::Raw => {
+ self.word_nbsp("raw");
+ self.print_mutability(mutability, true);
+ }
+ }
+ self.print_expr_maybe_paren(expr, parser::PREC_PREFIX)
+ }
+
+ pub fn print_expr(&mut self, expr: &ast::Expr) {
+ self.print_expr_outer_attr_style(expr, true)
+ }
+
+ pub(super) fn print_expr_outer_attr_style(&mut self, expr: &ast::Expr, is_inline: bool) {
+ self.maybe_print_comment(expr.span.lo());
+
+ let attrs = &expr.attrs;
+ if is_inline {
+ self.print_outer_attributes_inline(attrs);
+ } else {
+ self.print_outer_attributes(attrs);
+ }
+
+ self.ibox(INDENT_UNIT);
+ self.ann.pre(self, AnnNode::Expr(expr));
+ match expr.kind {
+ ast::ExprKind::Box(ref expr) => {
+ self.word_space("box");
+ self.print_expr_maybe_paren(expr, parser::PREC_PREFIX);
+ }
+ ast::ExprKind::Array(ref exprs) => {
+ self.print_expr_vec(exprs);
+ }
+ ast::ExprKind::ConstBlock(ref anon_const) => {
+ self.print_expr_anon_const(anon_const, attrs);
+ }
+ ast::ExprKind::Repeat(ref element, ref count) => {
+ self.print_expr_repeat(element, count);
+ }
+ ast::ExprKind::Struct(ref se) => {
+ self.print_expr_struct(&se.qself, &se.path, &se.fields, &se.rest);
+ }
+ ast::ExprKind::Tup(ref exprs) => {
+ self.print_expr_tup(exprs);
+ }
+ ast::ExprKind::Call(ref func, ref args) => {
+ self.print_expr_call(func, &args);
+ }
+ ast::ExprKind::MethodCall(ref segment, ref args, _) => {
+ self.print_expr_method_call(segment, &args);
+ }
+ ast::ExprKind::Binary(op, ref lhs, ref rhs) => {
+ self.print_expr_binary(op, lhs, rhs);
+ }
+ ast::ExprKind::Unary(op, ref expr) => {
+ self.print_expr_unary(op, expr);
+ }
+ ast::ExprKind::AddrOf(k, m, ref expr) => {
+ self.print_expr_addr_of(k, m, expr);
+ }
+ ast::ExprKind::Lit(ref lit) => {
+ self.print_literal(lit);
+ }
+ ast::ExprKind::Cast(ref expr, ref ty) => {
+ let prec = AssocOp::As.precedence() as i8;
+ self.print_expr_maybe_paren(expr, prec);
+ self.space();
+ self.word_space("as");
+ self.print_type(ty);
+ }
+ ast::ExprKind::Type(ref expr, ref ty) => {
+ let prec = AssocOp::Colon.precedence() as i8;
+ self.print_expr_maybe_paren(expr, prec);
+ self.word_space(":");
+ self.print_type(ty);
+ }
+ ast::ExprKind::Let(ref pat, ref scrutinee, _) => {
+ self.print_let(pat, scrutinee);
+ }
+ ast::ExprKind::If(ref test, ref blk, ref elseopt) => {
+ self.print_if(test, blk, elseopt.as_deref())
+ }
+ ast::ExprKind::While(ref test, ref blk, opt_label) => {
+ if let Some(label) = opt_label {
+ self.print_ident(label.ident);
+ self.word_space(":");
+ }
+ self.cbox(0);
+ self.ibox(0);
+ self.word_nbsp("while");
+ self.print_expr_as_cond(test);
+ self.space();
+ self.print_block_with_attrs(blk, attrs);
+ }
+ ast::ExprKind::ForLoop(ref pat, ref iter, ref blk, opt_label) => {
+ if let Some(label) = opt_label {
+ self.print_ident(label.ident);
+ self.word_space(":");
+ }
+ self.cbox(0);
+ self.ibox(0);
+ self.word_nbsp("for");
+ self.print_pat(pat);
+ self.space();
+ self.word_space("in");
+ self.print_expr_as_cond(iter);
+ self.space();
+ self.print_block_with_attrs(blk, attrs);
+ }
+ ast::ExprKind::Loop(ref blk, opt_label) => {
+ if let Some(label) = opt_label {
+ self.print_ident(label.ident);
+ self.word_space(":");
+ }
+ self.cbox(0);
+ self.ibox(0);
+ self.word_nbsp("loop");
+ self.print_block_with_attrs(blk, attrs);
+ }
+ ast::ExprKind::Match(ref expr, ref arms) => {
+ self.cbox(0);
+ self.ibox(0);
+ self.word_nbsp("match");
+ self.print_expr_as_cond(expr);
+ self.space();
+ self.bopen();
+ self.print_inner_attributes_no_trailing_hardbreak(attrs);
+ for arm in arms {
+ self.print_arm(arm);
+ }
+ let empty = attrs.is_empty() && arms.is_empty();
+ self.bclose(expr.span, empty);
+ }
+ ast::ExprKind::Closure(
+ ref binder,
+ capture_clause,
+ asyncness,
+ movability,
+ ref decl,
+ ref body,
+ _,
+ ) => {
+ self.print_closure_binder(binder);
+ self.print_movability(movability);
+ self.print_asyncness(asyncness);
+ self.print_capture_clause(capture_clause);
+
+ self.print_fn_params_and_ret(decl, true);
+ self.space();
+ self.print_expr(body);
+ self.end(); // need to close a box
+
+ // a box will be closed by print_expr, but we didn't want an overall
+ // wrapper so we closed the corresponding opening. so create an
+ // empty box to satisfy the close.
+ self.ibox(0);
+ }
+ ast::ExprKind::Block(ref blk, opt_label) => {
+ if let Some(label) = opt_label {
+ self.print_ident(label.ident);
+ self.word_space(":");
+ }
+ // containing cbox, will be closed by print-block at }
+ self.cbox(0);
+ // head-box, will be closed by print-block after {
+ self.ibox(0);
+ self.print_block_with_attrs(blk, attrs);
+ }
+ ast::ExprKind::Async(capture_clause, _, ref blk) => {
+ self.word_nbsp("async");
+ self.print_capture_clause(capture_clause);
+ // cbox/ibox in analogy to the `ExprKind::Block` arm above
+ self.cbox(0);
+ self.ibox(0);
+ self.print_block_with_attrs(blk, attrs);
+ }
+ ast::ExprKind::Await(ref expr) => {
+ self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
+ self.word(".await");
+ }
+ ast::ExprKind::Assign(ref lhs, ref rhs, _) => {
+ let prec = AssocOp::Assign.precedence() as i8;
+ self.print_expr_maybe_paren(lhs, prec + 1);
+ self.space();
+ self.word_space("=");
+ self.print_expr_maybe_paren(rhs, prec);
+ }
+ ast::ExprKind::AssignOp(op, ref lhs, ref rhs) => {
+ let prec = AssocOp::Assign.precedence() as i8;
+ self.print_expr_maybe_paren(lhs, prec + 1);
+ self.space();
+ self.word(op.node.to_string());
+ self.word_space("=");
+ self.print_expr_maybe_paren(rhs, prec);
+ }
+ ast::ExprKind::Field(ref expr, ident) => {
+ self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
+ self.word(".");
+ self.print_ident(ident);
+ }
+ ast::ExprKind::Index(ref expr, ref index) => {
+ self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
+ self.word("[");
+ self.print_expr(index);
+ self.word("]");
+ }
+ ast::ExprKind::Range(ref start, ref end, limits) => {
+ // Special case for `Range`. `AssocOp` claims that `Range` has higher precedence
+ // than `Assign`, but `x .. x = x` gives a parse error instead of `x .. (x = x)`.
+ // Here we use a fake precedence value so that any child with lower precedence than
+ // a "normal" binop gets parenthesized. (`LOr` is the lowest-precedence binop.)
+ let fake_prec = AssocOp::LOr.precedence() as i8;
+ if let Some(ref e) = *start {
+ self.print_expr_maybe_paren(e, fake_prec);
+ }
+ if limits == ast::RangeLimits::HalfOpen {
+ self.word("..");
+ } else {
+ self.word("..=");
+ }
+ if let Some(ref e) = *end {
+ self.print_expr_maybe_paren(e, fake_prec);
+ }
+ }
+ ast::ExprKind::Underscore => self.word("_"),
+ ast::ExprKind::Path(None, ref path) => self.print_path(path, true, 0),
+ ast::ExprKind::Path(Some(ref qself), ref path) => self.print_qpath(path, qself, true),
+ ast::ExprKind::Break(opt_label, ref opt_expr) => {
+ self.word("break");
+ if let Some(label) = opt_label {
+ self.space();
+ self.print_ident(label.ident);
+ }
+ if let Some(ref expr) = *opt_expr {
+ self.space();
+ self.print_expr_maybe_paren(expr, parser::PREC_JUMP);
+ }
+ }
+ ast::ExprKind::Continue(opt_label) => {
+ self.word("continue");
+ if let Some(label) = opt_label {
+ self.space();
+ self.print_ident(label.ident);
+ }
+ }
+ ast::ExprKind::Ret(ref result) => {
+ self.word("return");
+ if let Some(ref expr) = *result {
+ self.word(" ");
+ self.print_expr_maybe_paren(expr, parser::PREC_JUMP);
+ }
+ }
+ ast::ExprKind::Yeet(ref result) => {
+ self.word("do");
+ self.word(" ");
+ self.word("yeet");
+ if let Some(ref expr) = *result {
+ self.word(" ");
+ self.print_expr_maybe_paren(expr, parser::PREC_JUMP);
+ }
+ }
+ ast::ExprKind::InlineAsm(ref a) => {
+ self.word("asm!");
+ self.print_inline_asm(a);
+ }
+ ast::ExprKind::MacCall(ref m) => self.print_mac(m),
+ ast::ExprKind::Paren(ref e) => {
+ self.popen();
+ self.print_expr(e);
+ self.pclose();
+ }
+ ast::ExprKind::Yield(ref e) => {
+ self.word("yield");
+
+ if let Some(ref expr) = *e {
+ self.space();
+ self.print_expr_maybe_paren(expr, parser::PREC_JUMP);
+ }
+ }
+ ast::ExprKind::Try(ref e) => {
+ self.print_expr_maybe_paren(e, parser::PREC_POSTFIX);
+ self.word("?")
+ }
+ ast::ExprKind::TryBlock(ref blk) => {
+ self.cbox(0);
+ self.ibox(0);
+ self.word_nbsp("try");
+ self.print_block_with_attrs(blk, attrs)
+ }
+ ast::ExprKind::Err => {
+ self.popen();
+ self.word("/*ERROR*/");
+ self.pclose()
+ }
+ }
+ self.ann.post(self, AnnNode::Expr(expr));
+ self.end();
+ }
+
+ fn print_arm(&mut self, arm: &ast::Arm) {
+ // Note, I have no idea why this check is necessary, but here it is.
+ if arm.attrs.is_empty() {
+ self.space();
+ }
+ self.cbox(INDENT_UNIT);
+ self.ibox(0);
+ self.maybe_print_comment(arm.pat.span.lo());
+ self.print_outer_attributes(&arm.attrs);
+ self.print_pat(&arm.pat);
+ self.space();
+ if let Some(ref e) = arm.guard {
+ self.word_space("if");
+ self.print_expr(e);
+ self.space();
+ }
+ self.word_space("=>");
+
+ match arm.body.kind {
+ ast::ExprKind::Block(ref blk, opt_label) => {
+ if let Some(label) = opt_label {
+ self.print_ident(label.ident);
+ self.word_space(":");
+ }
+
+ // The block will close the pattern's ibox.
+ self.print_block_unclosed_indent(blk);
+
+ // If it is a user-provided unsafe block, print a comma after it.
+ if let BlockCheckMode::Unsafe(ast::UserProvided) = blk.rules {
+ self.word(",");
+ }
+ }
+ _ => {
+ self.end(); // Close the ibox for the pattern.
+ self.print_expr(&arm.body);
+ self.word(",");
+ }
+ }
+ self.end(); // Close enclosing cbox.
+ }
+
+ fn print_closure_binder(&mut self, binder: &ast::ClosureBinder) {
+ match binder {
+ ast::ClosureBinder::NotPresent => {}
+ ast::ClosureBinder::For { generic_params, .. } => {
+ self.print_formal_generic_params(&generic_params)
+ }
+ }
+ }
+
+ fn print_movability(&mut self, movability: ast::Movability) {
+ match movability {
+ ast::Movability::Static => self.word_space("static"),
+ ast::Movability::Movable => {}
+ }
+ }
+
+ fn print_capture_clause(&mut self, capture_clause: ast::CaptureBy) {
+ match capture_clause {
+ ast::CaptureBy::Value => self.word_space("move"),
+ ast::CaptureBy::Ref => {}
+ }
+ }
+}
diff --git a/compiler/rustc_ast_pretty/src/pprust/state/item.rs b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
new file mode 100644
index 000000000..f1caf22f3
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pprust/state/item.rs
@@ -0,0 +1,708 @@
+use crate::pp::Breaks::Inconsistent;
+use crate::pprust::state::delimited::IterDelimited;
+use crate::pprust::state::{AnnNode, PrintState, State, INDENT_UNIT};
+
+use rustc_ast as ast;
+use rustc_ast::GenericBound;
+use rustc_ast::ModKind;
+use rustc_span::symbol::Ident;
+
+fn visibility_qualified(vis: &ast::Visibility, s: &str) -> String {
+ format!("{}{}", State::to_string(|s| s.print_visibility(vis)), s)
+}
+
+impl<'a> State<'a> {
+ fn print_foreign_mod(&mut self, nmod: &ast::ForeignMod, attrs: &[ast::Attribute]) {
+ self.print_inner_attributes(attrs);
+ for item in &nmod.items {
+ self.print_foreign_item(item);
+ }
+ }
+
+ pub(crate) fn print_foreign_item(&mut self, item: &ast::ForeignItem) {
+ let ast::Item { id, span, ident, ref attrs, ref kind, ref vis, tokens: _ } = *item;
+ self.ann.pre(self, AnnNode::SubItem(id));
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(span.lo());
+ self.print_outer_attributes(attrs);
+ match kind {
+ ast::ForeignItemKind::Fn(box ast::Fn { defaultness, sig, generics, body }) => {
+ self.print_fn_full(sig, ident, generics, vis, *defaultness, body.as_deref(), attrs);
+ }
+ ast::ForeignItemKind::Static(ty, mutbl, body) => {
+ let def = ast::Defaultness::Final;
+ self.print_item_const(ident, Some(*mutbl), ty, body.as_deref(), vis, def);
+ }
+ ast::ForeignItemKind::TyAlias(box ast::TyAlias {
+ defaultness,
+ generics,
+ where_clauses,
+ where_predicates_split,
+ bounds,
+ ty,
+ }) => {
+ self.print_associated_type(
+ ident,
+ generics,
+ *where_clauses,
+ *where_predicates_split,
+ bounds,
+ ty.as_deref(),
+ vis,
+ *defaultness,
+ );
+ }
+ ast::ForeignItemKind::MacCall(m) => {
+ self.print_mac(m);
+ if m.args.need_semicolon() {
+ self.word(";");
+ }
+ }
+ }
+ self.ann.post(self, AnnNode::SubItem(id))
+ }
+
+ fn print_item_const(
+ &mut self,
+ ident: Ident,
+ mutbl: Option<ast::Mutability>,
+ ty: &ast::Ty,
+ body: Option<&ast::Expr>,
+ vis: &ast::Visibility,
+ defaultness: ast::Defaultness,
+ ) {
+ self.head("");
+ self.print_visibility(vis);
+ self.print_defaultness(defaultness);
+ let leading = match mutbl {
+ None => "const",
+ Some(ast::Mutability::Not) => "static",
+ Some(ast::Mutability::Mut) => "static mut",
+ };
+ self.word_space(leading);
+ self.print_ident(ident);
+ self.word_space(":");
+ self.print_type(ty);
+ if body.is_some() {
+ self.space();
+ }
+ self.end(); // end the head-ibox
+ if let Some(body) = body {
+ self.word_space("=");
+ self.print_expr(body);
+ }
+ self.word(";");
+ self.end(); // end the outer cbox
+ }
+
+ fn print_associated_type(
+ &mut self,
+ ident: Ident,
+ generics: &ast::Generics,
+ where_clauses: (ast::TyAliasWhereClause, ast::TyAliasWhereClause),
+ where_predicates_split: usize,
+ bounds: &ast::GenericBounds,
+ ty: Option<&ast::Ty>,
+ vis: &ast::Visibility,
+ defaultness: ast::Defaultness,
+ ) {
+ let (before_predicates, after_predicates) =
+ generics.where_clause.predicates.split_at(where_predicates_split);
+ self.head("");
+ self.print_visibility(vis);
+ self.print_defaultness(defaultness);
+ self.word_space("type");
+ self.print_ident(ident);
+ self.print_generic_params(&generics.params);
+ if !bounds.is_empty() {
+ self.word_nbsp(":");
+ self.print_type_bounds(bounds);
+ }
+ self.print_where_clause_parts(where_clauses.0.0, before_predicates);
+ if let Some(ty) = ty {
+ self.space();
+ self.word_space("=");
+ self.print_type(ty);
+ }
+ self.print_where_clause_parts(where_clauses.1.0, after_predicates);
+ self.word(";");
+ self.end(); // end inner head-block
+ self.end(); // end outer head-block
+ }
+
+ /// Pretty-prints an item.
+ pub(crate) fn print_item(&mut self, item: &ast::Item) {
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(item.span.lo());
+ self.print_outer_attributes(&item.attrs);
+ self.ann.pre(self, AnnNode::Item(item));
+ match item.kind {
+ ast::ItemKind::ExternCrate(orig_name) => {
+ self.head(visibility_qualified(&item.vis, "extern crate"));
+ if let Some(orig_name) = orig_name {
+ self.print_name(orig_name);
+ self.space();
+ self.word("as");
+ self.space();
+ }
+ self.print_ident(item.ident);
+ self.word(";");
+ self.end(); // end inner head-block
+ self.end(); // end outer head-block
+ }
+ ast::ItemKind::Use(ref tree) => {
+ self.print_visibility(&item.vis);
+ self.word_nbsp("use");
+ self.print_use_tree(tree);
+ self.word(";");
+ }
+ ast::ItemKind::Static(ref ty, mutbl, ref body) => {
+ let def = ast::Defaultness::Final;
+ self.print_item_const(item.ident, Some(mutbl), ty, body.as_deref(), &item.vis, def);
+ }
+ ast::ItemKind::Const(def, ref ty, ref body) => {
+ self.print_item_const(item.ident, None, ty, body.as_deref(), &item.vis, def);
+ }
+ ast::ItemKind::Fn(box ast::Fn { defaultness, ref sig, ref generics, ref body }) => {
+ let body = body.as_deref();
+ self.print_fn_full(
+ sig,
+ item.ident,
+ generics,
+ &item.vis,
+ defaultness,
+ body,
+ &item.attrs,
+ );
+ }
+ ast::ItemKind::Mod(unsafety, ref mod_kind) => {
+ self.head(Self::to_string(|s| {
+ s.print_visibility(&item.vis);
+ s.print_unsafety(unsafety);
+ s.word("mod");
+ }));
+ self.print_ident(item.ident);
+
+ match mod_kind {
+ ModKind::Loaded(items, ..) => {
+ self.nbsp();
+ self.bopen();
+ self.print_inner_attributes(&item.attrs);
+ for item in items {
+ self.print_item(item);
+ }
+ let empty = item.attrs.is_empty() && items.is_empty();
+ self.bclose(item.span, empty);
+ }
+ ModKind::Unloaded => {
+ self.word(";");
+ self.end(); // end inner head-block
+ self.end(); // end outer head-block
+ }
+ }
+ }
+ ast::ItemKind::ForeignMod(ref nmod) => {
+ self.head(Self::to_string(|s| {
+ s.print_unsafety(nmod.unsafety);
+ s.word("extern");
+ }));
+ if let Some(abi) = nmod.abi {
+ self.print_literal(&abi.as_lit());
+ self.nbsp();
+ }
+ self.bopen();
+ self.print_foreign_mod(nmod, &item.attrs);
+ let empty = item.attrs.is_empty() && nmod.items.is_empty();
+ self.bclose(item.span, empty);
+ }
+ ast::ItemKind::GlobalAsm(ref asm) => {
+ self.head(visibility_qualified(&item.vis, "global_asm!"));
+ self.print_inline_asm(asm);
+ self.end();
+ }
+ ast::ItemKind::TyAlias(box ast::TyAlias {
+ defaultness,
+ ref generics,
+ where_clauses,
+ where_predicates_split,
+ ref bounds,
+ ref ty,
+ }) => {
+ let ty = ty.as_deref();
+ self.print_associated_type(
+ item.ident,
+ generics,
+ where_clauses,
+ where_predicates_split,
+ bounds,
+ ty,
+ &item.vis,
+ defaultness,
+ );
+ }
+ ast::ItemKind::Enum(ref enum_definition, ref params) => {
+ self.print_enum_def(enum_definition, params, item.ident, item.span, &item.vis);
+ }
+ ast::ItemKind::Struct(ref struct_def, ref generics) => {
+ self.head(visibility_qualified(&item.vis, "struct"));
+ self.print_struct(struct_def, generics, item.ident, item.span, true);
+ }
+ ast::ItemKind::Union(ref struct_def, ref generics) => {
+ self.head(visibility_qualified(&item.vis, "union"));
+ self.print_struct(struct_def, generics, item.ident, item.span, true);
+ }
+ ast::ItemKind::Impl(box ast::Impl {
+ unsafety,
+ polarity,
+ defaultness,
+ constness,
+ ref generics,
+ ref of_trait,
+ ref self_ty,
+ ref items,
+ }) => {
+ self.head("");
+ self.print_visibility(&item.vis);
+ self.print_defaultness(defaultness);
+ self.print_unsafety(unsafety);
+ self.word("impl");
+
+ if generics.params.is_empty() {
+ self.nbsp();
+ } else {
+ self.print_generic_params(&generics.params);
+ self.space();
+ }
+
+ self.print_constness(constness);
+
+ if let ast::ImplPolarity::Negative(_) = polarity {
+ self.word("!");
+ }
+
+ if let Some(ref t) = *of_trait {
+ self.print_trait_ref(t);
+ self.space();
+ self.word_space("for");
+ }
+
+ self.print_type(self_ty);
+ self.print_where_clause(&generics.where_clause);
+
+ self.space();
+ self.bopen();
+ self.print_inner_attributes(&item.attrs);
+ for impl_item in items {
+ self.print_assoc_item(impl_item);
+ }
+ let empty = item.attrs.is_empty() && items.is_empty();
+ self.bclose(item.span, empty);
+ }
+ ast::ItemKind::Trait(box ast::Trait {
+ is_auto,
+ unsafety,
+ ref generics,
+ ref bounds,
+ ref items,
+ ..
+ }) => {
+ self.head("");
+ self.print_visibility(&item.vis);
+ self.print_unsafety(unsafety);
+ self.print_is_auto(is_auto);
+ self.word_nbsp("trait");
+ self.print_ident(item.ident);
+ self.print_generic_params(&generics.params);
+ let mut real_bounds = Vec::with_capacity(bounds.len());
+ for b in bounds.iter() {
+ if let GenericBound::Trait(ref ptr, ast::TraitBoundModifier::Maybe) = *b {
+ self.space();
+ self.word_space("for ?");
+ self.print_trait_ref(&ptr.trait_ref);
+ } else {
+ real_bounds.push(b.clone());
+ }
+ }
+ if !real_bounds.is_empty() {
+ self.word_nbsp(":");
+ self.print_type_bounds(&real_bounds);
+ }
+ self.print_where_clause(&generics.where_clause);
+ self.word(" ");
+ self.bopen();
+ self.print_inner_attributes(&item.attrs);
+ for trait_item in items {
+ self.print_assoc_item(trait_item);
+ }
+ let empty = item.attrs.is_empty() && items.is_empty();
+ self.bclose(item.span, empty);
+ }
+ ast::ItemKind::TraitAlias(ref generics, ref bounds) => {
+ self.head(visibility_qualified(&item.vis, "trait"));
+ self.print_ident(item.ident);
+ self.print_generic_params(&generics.params);
+ let mut real_bounds = Vec::with_capacity(bounds.len());
+ // FIXME(durka) this seems to be some quite outdated syntax
+ for b in bounds.iter() {
+ if let GenericBound::Trait(ref ptr, ast::TraitBoundModifier::Maybe) = *b {
+ self.space();
+ self.word_space("for ?");
+ self.print_trait_ref(&ptr.trait_ref);
+ } else {
+ real_bounds.push(b.clone());
+ }
+ }
+ self.nbsp();
+ if !real_bounds.is_empty() {
+ self.word_nbsp("=");
+ self.print_type_bounds(&real_bounds);
+ }
+ self.print_where_clause(&generics.where_clause);
+ self.word(";");
+ self.end(); // end inner head-block
+ self.end(); // end outer head-block
+ }
+ ast::ItemKind::MacCall(ref mac) => {
+ self.print_mac(mac);
+ if mac.args.need_semicolon() {
+ self.word(";");
+ }
+ }
+ ast::ItemKind::MacroDef(ref macro_def) => {
+ self.print_mac_def(macro_def, &item.ident, item.span, |state| {
+ state.print_visibility(&item.vis)
+ });
+ }
+ }
+ self.ann.post(self, AnnNode::Item(item))
+ }
+
+ fn print_enum_def(
+ &mut self,
+ enum_definition: &ast::EnumDef,
+ generics: &ast::Generics,
+ ident: Ident,
+ span: rustc_span::Span,
+ visibility: &ast::Visibility,
+ ) {
+ self.head(visibility_qualified(visibility, "enum"));
+ self.print_ident(ident);
+ self.print_generic_params(&generics.params);
+ self.print_where_clause(&generics.where_clause);
+ self.space();
+ self.print_variants(&enum_definition.variants, span)
+ }
+
+ fn print_variants(&mut self, variants: &[ast::Variant], span: rustc_span::Span) {
+ self.bopen();
+ for v in variants {
+ self.space_if_not_bol();
+ self.maybe_print_comment(v.span.lo());
+ self.print_outer_attributes(&v.attrs);
+ self.ibox(0);
+ self.print_variant(v);
+ self.word(",");
+ self.end();
+ self.maybe_print_trailing_comment(v.span, None);
+ }
+ let empty = variants.is_empty();
+ self.bclose(span, empty)
+ }
+
+ pub(crate) fn print_visibility(&mut self, vis: &ast::Visibility) {
+ match vis.kind {
+ ast::VisibilityKind::Public => self.word_nbsp("pub"),
+ ast::VisibilityKind::Restricted { ref path, .. } => {
+ let path = Self::to_string(|s| s.print_path(path, false, 0));
+ if path == "crate" || path == "self" || path == "super" {
+ self.word_nbsp(format!("pub({})", path))
+ } else {
+ self.word_nbsp(format!("pub(in {})", path))
+ }
+ }
+ ast::VisibilityKind::Inherited => {}
+ }
+ }
+
+ fn print_defaultness(&mut self, defaultness: ast::Defaultness) {
+ if let ast::Defaultness::Default(_) = defaultness {
+ self.word_nbsp("default");
+ }
+ }
+
+ fn print_record_struct_body(&mut self, fields: &[ast::FieldDef], span: rustc_span::Span) {
+ self.nbsp();
+ self.bopen();
+
+ let empty = fields.is_empty();
+ if !empty {
+ self.hardbreak_if_not_bol();
+
+ for field in fields {
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(field.span.lo());
+ self.print_outer_attributes(&field.attrs);
+ self.print_visibility(&field.vis);
+ self.print_ident(field.ident.unwrap());
+ self.word_nbsp(":");
+ self.print_type(&field.ty);
+ self.word(",");
+ }
+ }
+
+ self.bclose(span, empty);
+ }
+
+ fn print_struct(
+ &mut self,
+ struct_def: &ast::VariantData,
+ generics: &ast::Generics,
+ ident: Ident,
+ span: rustc_span::Span,
+ print_finalizer: bool,
+ ) {
+ self.print_ident(ident);
+ self.print_generic_params(&generics.params);
+ match struct_def {
+ ast::VariantData::Tuple(..) | ast::VariantData::Unit(..) => {
+ if let ast::VariantData::Tuple(..) = struct_def {
+ self.popen();
+ self.commasep(Inconsistent, struct_def.fields(), |s, field| {
+ s.maybe_print_comment(field.span.lo());
+ s.print_outer_attributes(&field.attrs);
+ s.print_visibility(&field.vis);
+ s.print_type(&field.ty)
+ });
+ self.pclose();
+ }
+ self.print_where_clause(&generics.where_clause);
+ if print_finalizer {
+ self.word(";");
+ }
+ self.end();
+ self.end(); // Close the outer-box.
+ }
+ ast::VariantData::Struct(ref fields, ..) => {
+ self.print_where_clause(&generics.where_clause);
+ self.print_record_struct_body(fields, span);
+ }
+ }
+ }
+
+ pub(crate) fn print_variant(&mut self, v: &ast::Variant) {
+ self.head("");
+ self.print_visibility(&v.vis);
+ let generics = ast::Generics::default();
+ self.print_struct(&v.data, &generics, v.ident, v.span, false);
+ if let Some(ref d) = v.disr_expr {
+ self.space();
+ self.word_space("=");
+ self.print_expr(&d.value)
+ }
+ }
+
+ pub(crate) fn print_assoc_item(&mut self, item: &ast::AssocItem) {
+ let ast::Item { id, span, ident, ref attrs, ref kind, ref vis, tokens: _ } = *item;
+ self.ann.pre(self, AnnNode::SubItem(id));
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(span.lo());
+ self.print_outer_attributes(attrs);
+ match kind {
+ ast::AssocItemKind::Fn(box ast::Fn { defaultness, sig, generics, body }) => {
+ self.print_fn_full(sig, ident, generics, vis, *defaultness, body.as_deref(), attrs);
+ }
+ ast::AssocItemKind::Const(def, ty, body) => {
+ self.print_item_const(ident, None, ty, body.as_deref(), vis, *def);
+ }
+ ast::AssocItemKind::TyAlias(box ast::TyAlias {
+ defaultness,
+ generics,
+ where_clauses,
+ where_predicates_split,
+ bounds,
+ ty,
+ }) => {
+ self.print_associated_type(
+ ident,
+ generics,
+ *where_clauses,
+ *where_predicates_split,
+ bounds,
+ ty.as_deref(),
+ vis,
+ *defaultness,
+ );
+ }
+ ast::AssocItemKind::MacCall(m) => {
+ self.print_mac(m);
+ if m.args.need_semicolon() {
+ self.word(";");
+ }
+ }
+ }
+ self.ann.post(self, AnnNode::SubItem(id))
+ }
+
+ fn print_fn_full(
+ &mut self,
+ sig: &ast::FnSig,
+ name: Ident,
+ generics: &ast::Generics,
+ vis: &ast::Visibility,
+ defaultness: ast::Defaultness,
+ body: Option<&ast::Block>,
+ attrs: &[ast::Attribute],
+ ) {
+ if body.is_some() {
+ self.head("");
+ }
+ self.print_visibility(vis);
+ self.print_defaultness(defaultness);
+ self.print_fn(&sig.decl, sig.header, Some(name), generics);
+ if let Some(body) = body {
+ self.nbsp();
+ self.print_block_with_attrs(body, attrs);
+ } else {
+ self.word(";");
+ }
+ }
+
+ pub(crate) fn print_fn(
+ &mut self,
+ decl: &ast::FnDecl,
+ header: ast::FnHeader,
+ name: Option<Ident>,
+ generics: &ast::Generics,
+ ) {
+ self.print_fn_header_info(header);
+ if let Some(name) = name {
+ self.nbsp();
+ self.print_ident(name);
+ }
+ self.print_generic_params(&generics.params);
+ self.print_fn_params_and_ret(decl, false);
+ self.print_where_clause(&generics.where_clause)
+ }
+
+ pub(crate) fn print_fn_params_and_ret(&mut self, decl: &ast::FnDecl, is_closure: bool) {
+ let (open, close) = if is_closure { ("|", "|") } else { ("(", ")") };
+ self.word(open);
+ self.commasep(Inconsistent, &decl.inputs, |s, param| s.print_param(param, is_closure));
+ self.word(close);
+ self.print_fn_ret_ty(&decl.output)
+ }
+
+ fn print_where_clause(&mut self, where_clause: &ast::WhereClause) {
+ self.print_where_clause_parts(where_clause.has_where_token, &where_clause.predicates);
+ }
+
+ pub(crate) fn print_where_clause_parts(
+ &mut self,
+ has_where_token: bool,
+ predicates: &[ast::WherePredicate],
+ ) {
+ if predicates.is_empty() && !has_where_token {
+ return;
+ }
+
+ self.space();
+ self.word_space("where");
+
+ for (i, predicate) in predicates.iter().enumerate() {
+ if i != 0 {
+ self.word_space(",");
+ }
+
+ self.print_where_predicate(predicate);
+ }
+ }
+
+ pub fn print_where_predicate(&mut self, predicate: &ast::WherePredicate) {
+ match predicate {
+ ast::WherePredicate::BoundPredicate(ast::WhereBoundPredicate {
+ bound_generic_params,
+ bounded_ty,
+ bounds,
+ ..
+ }) => {
+ self.print_formal_generic_params(bound_generic_params);
+ self.print_type(bounded_ty);
+ self.word(":");
+ if !bounds.is_empty() {
+ self.nbsp();
+ self.print_type_bounds(bounds);
+ }
+ }
+ ast::WherePredicate::RegionPredicate(ast::WhereRegionPredicate {
+ lifetime,
+ bounds,
+ ..
+ }) => {
+ self.print_lifetime(*lifetime);
+ self.word(":");
+ if !bounds.is_empty() {
+ self.nbsp();
+ self.print_lifetime_bounds(bounds);
+ }
+ }
+ ast::WherePredicate::EqPredicate(ast::WhereEqPredicate { lhs_ty, rhs_ty, .. }) => {
+ self.print_type(lhs_ty);
+ self.space();
+ self.word_space("=");
+ self.print_type(rhs_ty);
+ }
+ }
+ }
+
+ fn print_use_tree(&mut self, tree: &ast::UseTree) {
+ match tree.kind {
+ ast::UseTreeKind::Simple(rename, ..) => {
+ self.print_path(&tree.prefix, false, 0);
+ if let Some(rename) = rename {
+ self.nbsp();
+ self.word_nbsp("as");
+ self.print_ident(rename);
+ }
+ }
+ ast::UseTreeKind::Glob => {
+ if !tree.prefix.segments.is_empty() {
+ self.print_path(&tree.prefix, false, 0);
+ self.word("::");
+ }
+ self.word("*");
+ }
+ ast::UseTreeKind::Nested(ref items) => {
+ if !tree.prefix.segments.is_empty() {
+ self.print_path(&tree.prefix, false, 0);
+ self.word("::");
+ }
+ if items.is_empty() {
+ self.word("{}");
+ } else if items.len() == 1 {
+ self.print_use_tree(&items[0].0);
+ } else {
+ self.cbox(INDENT_UNIT);
+ self.word("{");
+ self.zerobreak();
+ self.ibox(0);
+ for use_tree in items.iter().delimited() {
+ self.print_use_tree(&use_tree.0);
+ if !use_tree.is_last {
+ self.word(",");
+ if let ast::UseTreeKind::Nested(_) = use_tree.0.kind {
+ self.hardbreak();
+ } else {
+ self.space();
+ }
+ }
+ }
+ self.end();
+ self.trailing_comma();
+ self.offset(-INDENT_UNIT);
+ self.word("}");
+ self.end();
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ast_pretty/src/pprust/tests.rs b/compiler/rustc_ast_pretty/src/pprust/tests.rs
new file mode 100644
index 000000000..6c8d42f33
--- /dev/null
+++ b/compiler/rustc_ast_pretty/src/pprust/tests.rs
@@ -0,0 +1,63 @@
+use super::*;
+
+use rustc_ast as ast;
+use rustc_span::create_default_session_globals_then;
+use rustc_span::symbol::Ident;
+
+fn fun_to_string(
+ decl: &ast::FnDecl,
+ header: ast::FnHeader,
+ name: Ident,
+ generics: &ast::Generics,
+) -> String {
+ to_string(|s| {
+ s.head("");
+ s.print_fn(decl, header, Some(name), generics);
+ s.end(); // Close the head box.
+ s.end(); // Close the outer box.
+ })
+}
+
+fn variant_to_string(var: &ast::Variant) -> String {
+ to_string(|s| s.print_variant(var))
+}
+
+#[test]
+fn test_fun_to_string() {
+ create_default_session_globals_then(|| {
+ let abba_ident = Ident::from_str("abba");
+
+ let decl =
+ ast::FnDecl { inputs: Vec::new(), output: ast::FnRetTy::Default(rustc_span::DUMMY_SP) };
+ let generics = ast::Generics::default();
+ assert_eq!(
+ fun_to_string(&decl, ast::FnHeader::default(), abba_ident, &generics),
+ "fn abba()"
+ );
+ })
+}
+
+#[test]
+fn test_variant_to_string() {
+ create_default_session_globals_then(|| {
+ let ident = Ident::from_str("principal_skinner");
+
+ let var = ast::Variant {
+ ident,
+ vis: ast::Visibility {
+ span: rustc_span::DUMMY_SP,
+ kind: ast::VisibilityKind::Inherited,
+ tokens: None,
+ },
+ attrs: ast::AttrVec::new(),
+ id: ast::DUMMY_NODE_ID,
+ data: ast::VariantData::Unit(ast::DUMMY_NODE_ID),
+ disr_expr: None,
+ span: rustc_span::DUMMY_SP,
+ is_placeholder: false,
+ };
+
+ let varstr = variant_to_string(&var);
+ assert_eq!(varstr, "principal_skinner");
+ })
+}
diff --git a/compiler/rustc_attr/Cargo.toml b/compiler/rustc_attr/Cargo.toml
new file mode 100644
index 000000000..ba310a686
--- /dev/null
+++ b/compiler/rustc_attr/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "rustc_attr"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_span = { path = "../rustc_span" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_lexer = { path = "../rustc_lexer" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_session = { path = "../rustc_session" }
+rustc_ast = { path = "../rustc_ast" }
diff --git a/compiler/rustc_attr/src/builtin.rs b/compiler/rustc_attr/src/builtin.rs
new file mode 100644
index 000000000..10a9cfb62
--- /dev/null
+++ b/compiler/rustc_attr/src/builtin.rs
@@ -0,0 +1,1274 @@
+//! Parsing and validation of builtin attributes
+
+use rustc_ast as ast;
+use rustc_ast::{Attribute, Lit, LitKind, MetaItem, MetaItemKind, NestedMetaItem, NodeId};
+use rustc_ast_pretty::pprust;
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_feature::{find_gated_cfg, is_builtin_attr_name, Features, GatedCfg};
+use rustc_macros::HashStable_Generic;
+use rustc_session::lint::builtin::UNEXPECTED_CFGS;
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_session::parse::{feature_err, ParseSess};
+use rustc_session::Session;
+use rustc_span::hygiene::Transparency;
+use rustc_span::{symbol::sym, symbol::Symbol, Span};
+use std::num::NonZeroU32;
+
+pub fn is_builtin_attr(attr: &Attribute) -> bool {
+ attr.is_doc_comment() || attr.ident().filter(|ident| is_builtin_attr_name(ident.name)).is_some()
+}
+
+enum AttrError {
+ MultipleItem(String),
+ UnknownMetaItem(String, &'static [&'static str]),
+ MissingSince,
+ NonIdentFeature,
+ MissingFeature,
+ MultipleStabilityLevels,
+ UnsupportedLiteral(&'static str, /* is_bytestr */ bool),
+}
+
+fn handle_errors(sess: &ParseSess, span: Span, error: AttrError) {
+ let diag = &sess.span_diagnostic;
+ match error {
+ AttrError::MultipleItem(item) => {
+ struct_span_err!(diag, span, E0538, "multiple '{}' items", item).emit();
+ }
+ AttrError::UnknownMetaItem(item, expected) => {
+ let expected = expected.iter().map(|name| format!("`{}`", name)).collect::<Vec<_>>();
+ struct_span_err!(diag, span, E0541, "unknown meta item '{}'", item)
+ .span_label(span, format!("expected one of {}", expected.join(", ")))
+ .emit();
+ }
+ AttrError::MissingSince => {
+ struct_span_err!(diag, span, E0542, "missing 'since'").emit();
+ }
+ AttrError::NonIdentFeature => {
+ struct_span_err!(diag, span, E0546, "'feature' is not an identifier").emit();
+ }
+ AttrError::MissingFeature => {
+ struct_span_err!(diag, span, E0546, "missing 'feature'").emit();
+ }
+ AttrError::MultipleStabilityLevels => {
+ struct_span_err!(diag, span, E0544, "multiple stability levels").emit();
+ }
+ AttrError::UnsupportedLiteral(msg, is_bytestr) => {
+ let mut err = struct_span_err!(diag, span, E0565, "{}", msg);
+ if is_bytestr {
+ if let Ok(lint_str) = sess.source_map().span_to_snippet(span) {
+ err.span_suggestion(
+ span,
+ "consider removing the prefix",
+ &lint_str[1..],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ err.emit();
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum InlineAttr {
+ None,
+ Hint,
+ Always,
+ Never,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq, HashStable_Generic)]
+pub enum InstructionSetAttr {
+ ArmA32,
+ ArmT32,
+}
+
+#[derive(Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum OptimizeAttr {
+ None,
+ Speed,
+ Size,
+}
+
+/// Represents the following attributes:
+///
+/// - `#[stable]`
+/// - `#[unstable]`
+#[derive(Encodable, Decodable, Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[derive(HashStable_Generic)]
+pub struct Stability {
+ pub level: StabilityLevel,
+ pub feature: Symbol,
+}
+
+impl Stability {
+ pub fn is_unstable(&self) -> bool {
+ self.level.is_unstable()
+ }
+
+ pub fn is_stable(&self) -> bool {
+ self.level.is_stable()
+ }
+}
+
+/// Represents the `#[rustc_const_unstable]` and `#[rustc_const_stable]` attributes.
+#[derive(Encodable, Decodable, Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[derive(HashStable_Generic)]
+pub struct ConstStability {
+ pub level: StabilityLevel,
+ pub feature: Symbol,
+ /// whether the function has a `#[rustc_promotable]` attribute
+ pub promotable: bool,
+}
+
+impl ConstStability {
+ pub fn is_const_unstable(&self) -> bool {
+ self.level.is_unstable()
+ }
+
+ pub fn is_const_stable(&self) -> bool {
+ self.level.is_stable()
+ }
+}
+
+/// The available stability levels.
+#[derive(Encodable, Decodable, PartialEq, Copy, Clone, Debug, Eq, Hash)]
+#[derive(HashStable_Generic)]
+pub enum StabilityLevel {
+ /// `#[unstable]`
+ Unstable {
+ /// Reason for the current stability level.
+ reason: UnstableReason,
+ /// Relevant `rust-lang/rust` issue.
+ issue: Option<NonZeroU32>,
+ is_soft: bool,
+ /// If part of a feature is stabilized and a new feature is added for the remaining parts,
+ /// then the `implied_by` attribute is used to indicate which now-stable feature previously
+ /// contained a item.
+ ///
+ /// ```pseudo-Rust
+ /// #[unstable(feature = "foo", issue = "...")]
+ /// fn foo() {}
+ /// #[unstable(feature = "foo", issue = "...")]
+ /// fn foobar() {}
+ /// ```
+ ///
+ /// ...becomes...
+ ///
+ /// ```pseudo-Rust
+ /// #[stable(feature = "foo", since = "1.XX.X")]
+ /// fn foo() {}
+ /// #[unstable(feature = "foobar", issue = "...", implied_by = "foo")]
+ /// fn foobar() {}
+ /// ```
+ implied_by: Option<Symbol>,
+ },
+ /// `#[stable]`
+ Stable {
+ /// Rust release which stabilized this feature.
+ since: Symbol,
+ /// Is this item allowed to be referred to on stable, despite being contained in unstable
+ /// modules?
+ allowed_through_unstable_modules: bool,
+ },
+}
+
+impl StabilityLevel {
+ pub fn is_unstable(&self) -> bool {
+ matches!(self, StabilityLevel::Unstable { .. })
+ }
+ pub fn is_stable(&self) -> bool {
+ matches!(self, StabilityLevel::Stable { .. })
+ }
+}
+
+#[derive(Encodable, Decodable, PartialEq, Copy, Clone, Debug, Eq, Hash)]
+#[derive(HashStable_Generic)]
+pub enum UnstableReason {
+ None,
+ Default,
+ Some(Symbol),
+}
+
+impl UnstableReason {
+ fn from_opt_reason(reason: Option<Symbol>) -> Self {
+ // UnstableReason::Default constructed manually
+ match reason {
+ Some(r) => Self::Some(r),
+ None => Self::None,
+ }
+ }
+
+ pub fn to_opt_reason(&self) -> Option<Symbol> {
+ match self {
+ Self::None => None,
+ Self::Default => Some(sym::unstable_location_reason_default),
+ Self::Some(r) => Some(*r),
+ }
+ }
+}
+
+/// Collects stability info from all stability attributes in `attrs`.
+/// Returns `None` if no stability attributes are found.
+pub fn find_stability(
+ sess: &Session,
+ attrs: &[Attribute],
+ item_sp: Span,
+) -> (Option<(Stability, Span)>, Option<(ConstStability, Span)>) {
+ find_stability_generic(sess, attrs.iter(), item_sp)
+}
+
+fn find_stability_generic<'a, I>(
+ sess: &Session,
+ attrs_iter: I,
+ item_sp: Span,
+) -> (Option<(Stability, Span)>, Option<(ConstStability, Span)>)
+where
+ I: Iterator<Item = &'a Attribute>,
+{
+ use StabilityLevel::*;
+
+ let mut stab: Option<(Stability, Span)> = None;
+ let mut const_stab: Option<(ConstStability, Span)> = None;
+ let mut promotable = false;
+ let mut allowed_through_unstable_modules = false;
+
+ let diagnostic = &sess.parse_sess.span_diagnostic;
+
+ 'outer: for attr in attrs_iter {
+ if ![
+ sym::rustc_const_unstable,
+ sym::rustc_const_stable,
+ sym::unstable,
+ sym::stable,
+ sym::rustc_promotable,
+ sym::rustc_allowed_through_unstable_modules,
+ ]
+ .iter()
+ .any(|&s| attr.has_name(s))
+ {
+ continue; // not a stability level
+ }
+
+ let meta = attr.meta();
+
+ if attr.has_name(sym::rustc_promotable) {
+ promotable = true;
+ } else if attr.has_name(sym::rustc_allowed_through_unstable_modules) {
+ allowed_through_unstable_modules = true;
+ }
+ // attributes with data
+ else if let Some(MetaItem { kind: MetaItemKind::List(ref metas), .. }) = meta {
+ let meta = meta.as_ref().unwrap();
+ let get = |meta: &MetaItem, item: &mut Option<Symbol>| {
+ if item.is_some() {
+ handle_errors(
+ &sess.parse_sess,
+ meta.span,
+ AttrError::MultipleItem(pprust::path_to_string(&meta.path)),
+ );
+ return false;
+ }
+ if let Some(v) = meta.value_str() {
+ *item = Some(v);
+ true
+ } else {
+ struct_span_err!(diagnostic, meta.span, E0539, "incorrect meta item").emit();
+ false
+ }
+ };
+
+ let meta_name = meta.name_or_empty();
+ match meta_name {
+ sym::rustc_const_unstable | sym::unstable => {
+ if meta_name == sym::unstable && stab.is_some() {
+ handle_errors(
+ &sess.parse_sess,
+ attr.span,
+ AttrError::MultipleStabilityLevels,
+ );
+ break;
+ } else if meta_name == sym::rustc_const_unstable && const_stab.is_some() {
+ handle_errors(
+ &sess.parse_sess,
+ attr.span,
+ AttrError::MultipleStabilityLevels,
+ );
+ break;
+ }
+
+ let mut feature = None;
+ let mut reason = None;
+ let mut issue = None;
+ let mut issue_num = None;
+ let mut is_soft = false;
+ let mut implied_by = None;
+ for meta in metas {
+ let Some(mi) = meta.meta_item() else {
+ handle_errors(
+ &sess.parse_sess,
+ meta.span(),
+ AttrError::UnsupportedLiteral("unsupported literal", false),
+ );
+ continue 'outer;
+ };
+ match mi.name_or_empty() {
+ sym::feature => {
+ if !get(mi, &mut feature) {
+ continue 'outer;
+ }
+ }
+ sym::reason => {
+ if !get(mi, &mut reason) {
+ continue 'outer;
+ }
+ }
+ sym::issue => {
+ if !get(mi, &mut issue) {
+ continue 'outer;
+ }
+
+ // These unwraps are safe because `get` ensures the meta item
+ // is a name/value pair string literal.
+ issue_num = match issue.unwrap().as_str() {
+ "none" => None,
+ issue => {
+ let emit_diag = |msg: &str| {
+ struct_span_err!(
+ diagnostic,
+ mi.span,
+ E0545,
+ "`issue` must be a non-zero numeric string \
+ or \"none\"",
+ )
+ .span_label(mi.name_value_literal_span().unwrap(), msg)
+ .emit();
+ };
+ match issue.parse() {
+ Ok(0) => {
+ emit_diag(
+ "`issue` must not be \"0\", \
+ use \"none\" instead",
+ );
+ continue 'outer;
+ }
+ Ok(num) => NonZeroU32::new(num),
+ Err(err) => {
+ emit_diag(&err.to_string());
+ continue 'outer;
+ }
+ }
+ }
+ };
+ }
+ sym::soft => {
+ if !mi.is_word() {
+ let msg = "`soft` should not have any arguments";
+ sess.parse_sess.span_diagnostic.span_err(mi.span, msg);
+ }
+ is_soft = true;
+ }
+ sym::implied_by => {
+ if !get(mi, &mut implied_by) {
+ continue 'outer;
+ }
+ }
+ _ => {
+ handle_errors(
+ &sess.parse_sess,
+ meta.span(),
+ AttrError::UnknownMetaItem(
+ pprust::path_to_string(&mi.path),
+ &["feature", "reason", "issue", "soft"],
+ ),
+ );
+ continue 'outer;
+ }
+ }
+ }
+
+ match (feature, reason, issue) {
+ (Some(feature), reason, Some(_)) => {
+ if !rustc_lexer::is_ident(feature.as_str()) {
+ handle_errors(
+ &sess.parse_sess,
+ attr.span,
+ AttrError::NonIdentFeature,
+ );
+ continue;
+ }
+ let level = Unstable {
+ reason: UnstableReason::from_opt_reason(reason),
+ issue: issue_num,
+ is_soft,
+ implied_by,
+ };
+ if sym::unstable == meta_name {
+ stab = Some((Stability { level, feature }, attr.span));
+ } else {
+ const_stab = Some((
+ ConstStability { level, feature, promotable: false },
+ attr.span,
+ ));
+ }
+ }
+ (None, _, _) => {
+ handle_errors(&sess.parse_sess, attr.span, AttrError::MissingFeature);
+ continue;
+ }
+ _ => {
+ struct_span_err!(diagnostic, attr.span, E0547, "missing 'issue'")
+ .emit();
+ continue;
+ }
+ }
+ }
+ sym::rustc_const_stable | sym::stable => {
+ if meta_name == sym::stable && stab.is_some() {
+ handle_errors(
+ &sess.parse_sess,
+ attr.span,
+ AttrError::MultipleStabilityLevels,
+ );
+ break;
+ } else if meta_name == sym::rustc_const_stable && const_stab.is_some() {
+ handle_errors(
+ &sess.parse_sess,
+ attr.span,
+ AttrError::MultipleStabilityLevels,
+ );
+ break;
+ }
+
+ let mut feature = None;
+ let mut since = None;
+ for meta in metas {
+ match meta {
+ NestedMetaItem::MetaItem(mi) => match mi.name_or_empty() {
+ sym::feature => {
+ if !get(mi, &mut feature) {
+ continue 'outer;
+ }
+ }
+ sym::since => {
+ if !get(mi, &mut since) {
+ continue 'outer;
+ }
+ }
+ _ => {
+ handle_errors(
+ &sess.parse_sess,
+ meta.span(),
+ AttrError::UnknownMetaItem(
+ pprust::path_to_string(&mi.path),
+ &["feature", "since"],
+ ),
+ );
+ continue 'outer;
+ }
+ },
+ NestedMetaItem::Literal(lit) => {
+ handle_errors(
+ &sess.parse_sess,
+ lit.span,
+ AttrError::UnsupportedLiteral("unsupported literal", false),
+ );
+ continue 'outer;
+ }
+ }
+ }
+
+ match (feature, since) {
+ (Some(feature), Some(since)) => {
+ let level = Stable { since, allowed_through_unstable_modules: false };
+ if sym::stable == meta_name {
+ stab = Some((Stability { level, feature }, attr.span));
+ } else {
+ const_stab = Some((
+ ConstStability { level, feature, promotable: false },
+ attr.span,
+ ));
+ }
+ }
+ (None, _) => {
+ handle_errors(&sess.parse_sess, attr.span, AttrError::MissingFeature);
+ continue;
+ }
+ _ => {
+ handle_errors(&sess.parse_sess, attr.span, AttrError::MissingSince);
+ continue;
+ }
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ // Merge the const-unstable info into the stability info
+ if promotable {
+ if let Some((ref mut stab, _)) = const_stab {
+ stab.promotable = promotable;
+ } else {
+ struct_span_err!(
+ diagnostic,
+ item_sp,
+ E0717,
+ "`rustc_promotable` attribute must be paired with either a `rustc_const_unstable` \
+ or a `rustc_const_stable` attribute"
+ )
+ .emit();
+ }
+ }
+
+ if allowed_through_unstable_modules {
+ if let Some((
+ Stability {
+ level: StabilityLevel::Stable { ref mut allowed_through_unstable_modules, .. },
+ ..
+ },
+ _,
+ )) = stab
+ {
+ *allowed_through_unstable_modules = true;
+ } else {
+ struct_span_err!(
+ diagnostic,
+ item_sp,
+ E0789,
+ "`rustc_allowed_through_unstable_modules` attribute must be paired with a `stable` attribute"
+ )
+ .emit();
+ }
+ }
+
+ (stab, const_stab)
+}
+
+pub fn find_crate_name(sess: &Session, attrs: &[Attribute]) -> Option<Symbol> {
+ sess.first_attr_value_str_by_name(attrs, sym::crate_name)
+}
+
+#[derive(Clone, Debug)]
+pub struct Condition {
+ pub name: Symbol,
+ pub name_span: Span,
+ pub value: Option<Symbol>,
+ pub value_span: Option<Span>,
+ pub span: Span,
+}
+
+/// Tests if a cfg-pattern matches the cfg set
+pub fn cfg_matches(
+ cfg: &ast::MetaItem,
+ sess: &ParseSess,
+ lint_node_id: NodeId,
+ features: Option<&Features>,
+) -> bool {
+ eval_condition(cfg, sess, features, &mut |cfg| {
+ try_gate_cfg(cfg.name, cfg.span, sess, features);
+ if let Some(names_valid) = &sess.check_config.names_valid {
+ if !names_valid.contains(&cfg.name) {
+ sess.buffer_lint_with_diagnostic(
+ UNEXPECTED_CFGS,
+ cfg.span,
+ lint_node_id,
+ "unexpected `cfg` condition name",
+ BuiltinLintDiagnostics::UnexpectedCfg((cfg.name, cfg.name_span), None),
+ );
+ }
+ }
+ if let Some(value) = cfg.value {
+ if let Some(values) = &sess.check_config.values_valid.get(&cfg.name) {
+ if !values.contains(&value) {
+ sess.buffer_lint_with_diagnostic(
+ UNEXPECTED_CFGS,
+ cfg.span,
+ lint_node_id,
+ "unexpected `cfg` condition value",
+ BuiltinLintDiagnostics::UnexpectedCfg(
+ (cfg.name, cfg.name_span),
+ cfg.value_span.map(|vs| (value, vs)),
+ ),
+ );
+ }
+ }
+ }
+ sess.config.contains(&(cfg.name, cfg.value))
+ })
+}
+
+fn try_gate_cfg(name: Symbol, span: Span, sess: &ParseSess, features: Option<&Features>) {
+ let gate = find_gated_cfg(|sym| sym == name);
+ if let (Some(feats), Some(gated_cfg)) = (features, gate) {
+ gate_cfg(&gated_cfg, span, sess, feats);
+ }
+}
+
+fn gate_cfg(gated_cfg: &GatedCfg, cfg_span: Span, sess: &ParseSess, features: &Features) {
+ let (cfg, feature, has_feature) = gated_cfg;
+ if !has_feature(features) && !cfg_span.allows_unstable(*feature) {
+ let explain = format!("`cfg({})` is experimental and subject to change", cfg);
+ feature_err(sess, *feature, cfg_span, &explain).emit();
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+struct Version {
+ major: u16,
+ minor: u16,
+ patch: u16,
+}
+
+fn parse_version(s: &str, allow_appendix: bool) -> Option<Version> {
+ let mut components = s.split('-');
+ let d = components.next()?;
+ if !allow_appendix && components.next().is_some() {
+ return None;
+ }
+ let mut digits = d.splitn(3, '.');
+ let major = digits.next()?.parse().ok()?;
+ let minor = digits.next()?.parse().ok()?;
+ let patch = digits.next().unwrap_or("0").parse().ok()?;
+ Some(Version { major, minor, patch })
+}
+
+/// Evaluate a cfg-like condition (with `any` and `all`), using `eval` to
+/// evaluate individual items.
+pub fn eval_condition(
+ cfg: &ast::MetaItem,
+ sess: &ParseSess,
+ features: Option<&Features>,
+ eval: &mut impl FnMut(Condition) -> bool,
+) -> bool {
+ match cfg.kind {
+ ast::MetaItemKind::List(ref mis) if cfg.name_or_empty() == sym::version => {
+ try_gate_cfg(sym::version, cfg.span, sess, features);
+ let (min_version, span) = match &mis[..] {
+ [NestedMetaItem::Literal(Lit { kind: LitKind::Str(sym, ..), span, .. })] => {
+ (sym, span)
+ }
+ [
+ NestedMetaItem::Literal(Lit { span, .. })
+ | NestedMetaItem::MetaItem(MetaItem { span, .. }),
+ ] => {
+ sess.span_diagnostic
+ .struct_span_err(*span, "expected a version literal")
+ .emit();
+ return false;
+ }
+ [..] => {
+ sess.span_diagnostic
+ .struct_span_err(cfg.span, "expected single version literal")
+ .emit();
+ return false;
+ }
+ };
+ let Some(min_version) = parse_version(min_version.as_str(), false) else {
+ sess.span_diagnostic
+ .struct_span_warn(
+ *span,
+ "unknown version literal format, assuming it refers to a future version",
+ )
+ .emit();
+ return false;
+ };
+ let rustc_version = parse_version(env!("CFG_RELEASE"), true).unwrap();
+
+ // See https://github.com/rust-lang/rust/issues/64796#issuecomment-640851454 for details
+ if sess.assume_incomplete_release {
+ rustc_version > min_version
+ } else {
+ rustc_version >= min_version
+ }
+ }
+ ast::MetaItemKind::List(ref mis) => {
+ for mi in mis.iter() {
+ if !mi.is_meta_item() {
+ handle_errors(
+ sess,
+ mi.span(),
+ AttrError::UnsupportedLiteral("unsupported literal", false),
+ );
+ return false;
+ }
+ }
+
+ // The unwraps below may look dangerous, but we've already asserted
+ // that they won't fail with the loop above.
+ match cfg.name_or_empty() {
+ sym::any => mis
+ .iter()
+ // We don't use any() here, because we want to evaluate all cfg condition
+ // as eval_condition can (and does) extra checks
+ .fold(false, |res, mi| {
+ res | eval_condition(mi.meta_item().unwrap(), sess, features, eval)
+ }),
+ sym::all => mis
+ .iter()
+ // We don't use all() here, because we want to evaluate all cfg condition
+ // as eval_condition can (and does) extra checks
+ .fold(true, |res, mi| {
+ res & eval_condition(mi.meta_item().unwrap(), sess, features, eval)
+ }),
+ sym::not => {
+ if mis.len() != 1 {
+ struct_span_err!(
+ sess.span_diagnostic,
+ cfg.span,
+ E0536,
+ "expected 1 cfg-pattern"
+ )
+ .emit();
+ return false;
+ }
+
+ !eval_condition(mis[0].meta_item().unwrap(), sess, features, eval)
+ }
+ sym::target => {
+ if let Some(features) = features && !features.cfg_target_compact {
+ feature_err(
+ sess,
+ sym::cfg_target_compact,
+ cfg.span,
+ &"compact `cfg(target(..))` is experimental and subject to change"
+ ).emit();
+ }
+
+ mis.iter().fold(true, |res, mi| {
+ let mut mi = mi.meta_item().unwrap().clone();
+ if let [seg, ..] = &mut mi.path.segments[..] {
+ seg.ident.name = Symbol::intern(&format!("target_{}", seg.ident.name));
+ }
+
+ res & eval_condition(&mi, sess, features, eval)
+ })
+ }
+ _ => {
+ struct_span_err!(
+ sess.span_diagnostic,
+ cfg.span,
+ E0537,
+ "invalid predicate `{}`",
+ pprust::path_to_string(&cfg.path)
+ )
+ .emit();
+ false
+ }
+ }
+ }
+ ast::MetaItemKind::Word | MetaItemKind::NameValue(..) if cfg.path.segments.len() != 1 => {
+ sess.span_diagnostic
+ .span_err(cfg.path.span, "`cfg` predicate key must be an identifier");
+ true
+ }
+ MetaItemKind::NameValue(ref lit) if !lit.kind.is_str() => {
+ handle_errors(
+ sess,
+ lit.span,
+ AttrError::UnsupportedLiteral(
+ "literal in `cfg` predicate value must be a string",
+ lit.kind.is_bytestr(),
+ ),
+ );
+ true
+ }
+ ast::MetaItemKind::Word | ast::MetaItemKind::NameValue(..) => {
+ let ident = cfg.ident().expect("multi-segment cfg predicate");
+ eval(Condition {
+ name: ident.name,
+ name_span: ident.span,
+ value: cfg.value_str(),
+ value_span: cfg.name_value_literal_span(),
+ span: cfg.span,
+ })
+ }
+ }
+}
+
+#[derive(Copy, Debug, Encodable, Decodable, Clone, HashStable_Generic)]
+pub struct Deprecation {
+ pub since: Option<Symbol>,
+ /// The note to issue a reason.
+ pub note: Option<Symbol>,
+ /// A text snippet used to completely replace any use of the deprecated item in an expression.
+ ///
+ /// This is currently unstable.
+ pub suggestion: Option<Symbol>,
+
+ /// Whether to treat the since attribute as being a Rust version identifier
+ /// (rather than an opaque string).
+ pub is_since_rustc_version: bool,
+}
+
+/// Finds the deprecation attribute. `None` if none exists.
+pub fn find_deprecation(sess: &Session, attrs: &[Attribute]) -> Option<(Deprecation, Span)> {
+ find_deprecation_generic(sess, attrs.iter())
+}
+
+fn find_deprecation_generic<'a, I>(sess: &Session, attrs_iter: I) -> Option<(Deprecation, Span)>
+where
+ I: Iterator<Item = &'a Attribute>,
+{
+ let mut depr: Option<(Deprecation, Span)> = None;
+ let diagnostic = &sess.parse_sess.span_diagnostic;
+ let is_rustc = sess.features_untracked().staged_api;
+
+ 'outer: for attr in attrs_iter {
+ if !attr.has_name(sym::deprecated) {
+ continue;
+ }
+
+ let Some(meta) = attr.meta() else {
+ continue;
+ };
+ let mut since = None;
+ let mut note = None;
+ let mut suggestion = None;
+ match &meta.kind {
+ MetaItemKind::Word => {}
+ MetaItemKind::NameValue(..) => note = meta.value_str(),
+ MetaItemKind::List(list) => {
+ let get = |meta: &MetaItem, item: &mut Option<Symbol>| {
+ if item.is_some() {
+ handle_errors(
+ &sess.parse_sess,
+ meta.span,
+ AttrError::MultipleItem(pprust::path_to_string(&meta.path)),
+ );
+ return false;
+ }
+ if let Some(v) = meta.value_str() {
+ *item = Some(v);
+ true
+ } else {
+ if let Some(lit) = meta.name_value_literal() {
+ handle_errors(
+ &sess.parse_sess,
+ lit.span,
+ AttrError::UnsupportedLiteral(
+ "literal in `deprecated` \
+ value must be a string",
+ lit.kind.is_bytestr(),
+ ),
+ );
+ } else {
+ struct_span_err!(diagnostic, meta.span, E0551, "incorrect meta item")
+ .emit();
+ }
+
+ false
+ }
+ };
+
+ for meta in list {
+ match meta {
+ NestedMetaItem::MetaItem(mi) => match mi.name_or_empty() {
+ sym::since => {
+ if !get(mi, &mut since) {
+ continue 'outer;
+ }
+ }
+ sym::note => {
+ if !get(mi, &mut note) {
+ continue 'outer;
+ }
+ }
+ sym::suggestion => {
+ if !sess.features_untracked().deprecated_suggestion {
+ let mut diag = sess.struct_span_err(
+ mi.span,
+ "suggestions on deprecated items are unstable",
+ );
+ if sess.is_nightly_build() {
+ diag.help("add `#![feature(deprecated_suggestion)]` to the crate root");
+ }
+ diag.note("see #94785 for more details").emit();
+ }
+
+ if !get(mi, &mut suggestion) {
+ continue 'outer;
+ }
+ }
+ _ => {
+ handle_errors(
+ &sess.parse_sess,
+ meta.span(),
+ AttrError::UnknownMetaItem(
+ pprust::path_to_string(&mi.path),
+ if sess.features_untracked().deprecated_suggestion {
+ &["since", "note", "suggestion"]
+ } else {
+ &["since", "note"]
+ },
+ ),
+ );
+ continue 'outer;
+ }
+ },
+ NestedMetaItem::Literal(lit) => {
+ handle_errors(
+ &sess.parse_sess,
+ lit.span,
+ AttrError::UnsupportedLiteral(
+ "item in `deprecated` must be a key/value pair",
+ false,
+ ),
+ );
+ continue 'outer;
+ }
+ }
+ }
+ }
+ }
+
+ if is_rustc {
+ if since.is_none() {
+ handle_errors(&sess.parse_sess, attr.span, AttrError::MissingSince);
+ continue;
+ }
+
+ if note.is_none() {
+ struct_span_err!(diagnostic, attr.span, E0543, "missing 'note'").emit();
+ continue;
+ }
+ }
+
+ depr = Some((
+ Deprecation { since, note, suggestion, is_since_rustc_version: is_rustc },
+ attr.span,
+ ));
+ }
+
+ depr
+}
+
+#[derive(PartialEq, Debug, Encodable, Decodable, Copy, Clone)]
+pub enum ReprAttr {
+ ReprInt(IntType),
+ ReprC,
+ ReprPacked(u32),
+ ReprSimd,
+ ReprTransparent,
+ ReprAlign(u32),
+}
+
+#[derive(Eq, PartialEq, Debug, Copy, Clone)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub enum IntType {
+ SignedInt(ast::IntTy),
+ UnsignedInt(ast::UintTy),
+}
+
+impl IntType {
+ #[inline]
+ pub fn is_signed(self) -> bool {
+ use IntType::*;
+
+ match self {
+ SignedInt(..) => true,
+ UnsignedInt(..) => false,
+ }
+ }
+}
+
+/// Parse #[repr(...)] forms.
+///
+/// Valid repr contents: any of the primitive integral type names (see
+/// `int_type_of_word`, below) to specify enum discriminant type; `C`, to use
+/// the same discriminant size that the corresponding C enum would or C
+/// structure layout, `packed` to remove padding, and `transparent` to delegate representation
+/// concerns to the only non-ZST field.
+pub fn find_repr_attrs(sess: &Session, attr: &Attribute) -> Vec<ReprAttr> {
+ if attr.has_name(sym::repr) { parse_repr_attr(sess, attr) } else { Vec::new() }
+}
+
+pub fn parse_repr_attr(sess: &Session, attr: &Attribute) -> Vec<ReprAttr> {
+ assert!(attr.has_name(sym::repr), "expected `#[repr(..)]`, found: {:?}", attr);
+ use ReprAttr::*;
+ let mut acc = Vec::new();
+ let diagnostic = &sess.parse_sess.span_diagnostic;
+
+ if let Some(items) = attr.meta_item_list() {
+ for item in items {
+ let mut recognised = false;
+ if item.is_word() {
+ let hint = match item.name_or_empty() {
+ sym::C => Some(ReprC),
+ sym::packed => Some(ReprPacked(1)),
+ sym::simd => Some(ReprSimd),
+ sym::transparent => Some(ReprTransparent),
+ sym::align => {
+ let mut err = struct_span_err!(
+ diagnostic,
+ item.span(),
+ E0589,
+ "invalid `repr(align)` attribute: `align` needs an argument"
+ );
+ err.span_suggestion(
+ item.span(),
+ "supply an argument here",
+ "align(...)",
+ Applicability::HasPlaceholders,
+ );
+ err.emit();
+ recognised = true;
+ None
+ }
+ name => int_type_of_word(name).map(ReprInt),
+ };
+
+ if let Some(h) = hint {
+ recognised = true;
+ acc.push(h);
+ }
+ } else if let Some((name, value)) = item.name_value_literal() {
+ let mut literal_error = None;
+ if name == sym::align {
+ recognised = true;
+ match parse_alignment(&value.kind) {
+ Ok(literal) => acc.push(ReprAlign(literal)),
+ Err(message) => literal_error = Some(message),
+ };
+ } else if name == sym::packed {
+ recognised = true;
+ match parse_alignment(&value.kind) {
+ Ok(literal) => acc.push(ReprPacked(literal)),
+ Err(message) => literal_error = Some(message),
+ };
+ } else if matches!(name, sym::C | sym::simd | sym::transparent)
+ || int_type_of_word(name).is_some()
+ {
+ recognised = true;
+ struct_span_err!(
+ diagnostic,
+ item.span(),
+ E0552,
+ "invalid representation hint: `{}` does not take a parenthesized argument list",
+ name.to_ident_string(),
+ ).emit();
+ }
+ if let Some(literal_error) = literal_error {
+ struct_span_err!(
+ diagnostic,
+ item.span(),
+ E0589,
+ "invalid `repr({})` attribute: {}",
+ name.to_ident_string(),
+ literal_error
+ )
+ .emit();
+ }
+ } else if let Some(meta_item) = item.meta_item() {
+ if let MetaItemKind::NameValue(ref value) = meta_item.kind {
+ if meta_item.has_name(sym::align) || meta_item.has_name(sym::packed) {
+ let name = meta_item.name_or_empty().to_ident_string();
+ recognised = true;
+ let mut err = struct_span_err!(
+ diagnostic,
+ item.span(),
+ E0693,
+ "incorrect `repr({})` attribute format",
+ name,
+ );
+ match value.kind {
+ ast::LitKind::Int(int, ast::LitIntType::Unsuffixed) => {
+ err.span_suggestion(
+ item.span(),
+ "use parentheses instead",
+ format!("{}({})", name, int),
+ Applicability::MachineApplicable,
+ );
+ }
+ ast::LitKind::Str(s, _) => {
+ err.span_suggestion(
+ item.span(),
+ "use parentheses instead",
+ format!("{}({})", name, s),
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {}
+ }
+ err.emit();
+ } else {
+ if matches!(
+ meta_item.name_or_empty(),
+ sym::C | sym::simd | sym::transparent
+ ) || int_type_of_word(meta_item.name_or_empty()).is_some()
+ {
+ recognised = true;
+ struct_span_err!(
+ diagnostic,
+ meta_item.span,
+ E0552,
+ "invalid representation hint: `{}` does not take a value",
+ meta_item.name_or_empty().to_ident_string(),
+ )
+ .emit();
+ }
+ }
+ } else if let MetaItemKind::List(_) = meta_item.kind {
+ if meta_item.has_name(sym::align) {
+ recognised = true;
+ struct_span_err!(
+ diagnostic,
+ meta_item.span,
+ E0693,
+ "incorrect `repr(align)` attribute format: \
+ `align` takes exactly one argument in parentheses"
+ )
+ .emit();
+ } else if meta_item.has_name(sym::packed) {
+ recognised = true;
+ struct_span_err!(
+ diagnostic,
+ meta_item.span,
+ E0552,
+ "incorrect `repr(packed)` attribute format: \
+ `packed` takes exactly one parenthesized argument, \
+ or no parentheses at all"
+ )
+ .emit();
+ } else if matches!(
+ meta_item.name_or_empty(),
+ sym::C | sym::simd | sym::transparent
+ ) || int_type_of_word(meta_item.name_or_empty()).is_some()
+ {
+ recognised = true;
+ struct_span_err!(
+ diagnostic,
+ meta_item.span,
+ E0552,
+ "invalid representation hint: `{}` does not take a parenthesized argument list",
+ meta_item.name_or_empty().to_ident_string(),
+ ).emit();
+ }
+ }
+ }
+ if !recognised {
+ // Not a word we recognize. This will be caught and reported by
+ // the `check_mod_attrs` pass, but this pass doesn't always run
+ // (e.g. if we only pretty-print the source), so we have to gate
+ // the `delay_span_bug` call as follows:
+ if sess.opts.pretty.map_or(true, |pp| pp.needs_analysis()) {
+ diagnostic.delay_span_bug(item.span(), "unrecognized representation hint");
+ }
+ }
+ }
+ }
+ acc
+}
+
+fn int_type_of_word(s: Symbol) -> Option<IntType> {
+ use IntType::*;
+
+ match s {
+ sym::i8 => Some(SignedInt(ast::IntTy::I8)),
+ sym::u8 => Some(UnsignedInt(ast::UintTy::U8)),
+ sym::i16 => Some(SignedInt(ast::IntTy::I16)),
+ sym::u16 => Some(UnsignedInt(ast::UintTy::U16)),
+ sym::i32 => Some(SignedInt(ast::IntTy::I32)),
+ sym::u32 => Some(UnsignedInt(ast::UintTy::U32)),
+ sym::i64 => Some(SignedInt(ast::IntTy::I64)),
+ sym::u64 => Some(UnsignedInt(ast::UintTy::U64)),
+ sym::i128 => Some(SignedInt(ast::IntTy::I128)),
+ sym::u128 => Some(UnsignedInt(ast::UintTy::U128)),
+ sym::isize => Some(SignedInt(ast::IntTy::Isize)),
+ sym::usize => Some(UnsignedInt(ast::UintTy::Usize)),
+ _ => None,
+ }
+}
+
+pub enum TransparencyError {
+ UnknownTransparency(Symbol, Span),
+ MultipleTransparencyAttrs(Span, Span),
+}
+
+pub fn find_transparency(
+ attrs: &[Attribute],
+ macro_rules: bool,
+) -> (Transparency, Option<TransparencyError>) {
+ let mut transparency = None;
+ let mut error = None;
+ for attr in attrs {
+ if attr.has_name(sym::rustc_macro_transparency) {
+ if let Some((_, old_span)) = transparency {
+ error = Some(TransparencyError::MultipleTransparencyAttrs(old_span, attr.span));
+ break;
+ } else if let Some(value) = attr.value_str() {
+ transparency = Some((
+ match value {
+ sym::transparent => Transparency::Transparent,
+ sym::semitransparent => Transparency::SemiTransparent,
+ sym::opaque => Transparency::Opaque,
+ _ => {
+ error = Some(TransparencyError::UnknownTransparency(value, attr.span));
+ continue;
+ }
+ },
+ attr.span,
+ ));
+ }
+ }
+ }
+ let fallback = if macro_rules { Transparency::SemiTransparent } else { Transparency::Opaque };
+ (transparency.map_or(fallback, |t| t.0), error)
+}
+
+pub fn allow_internal_unstable<'a>(
+ sess: &'a Session,
+ attrs: &'a [Attribute],
+) -> impl Iterator<Item = Symbol> + 'a {
+ allow_unstable(sess, attrs, sym::allow_internal_unstable)
+}
+
+pub fn rustc_allow_const_fn_unstable<'a>(
+ sess: &'a Session,
+ attrs: &'a [Attribute],
+) -> impl Iterator<Item = Symbol> + 'a {
+ allow_unstable(sess, attrs, sym::rustc_allow_const_fn_unstable)
+}
+
+fn allow_unstable<'a>(
+ sess: &'a Session,
+ attrs: &'a [Attribute],
+ symbol: Symbol,
+) -> impl Iterator<Item = Symbol> + 'a {
+ let attrs = sess.filter_by_name(attrs, symbol);
+ let list = attrs
+ .filter_map(move |attr| {
+ attr.meta_item_list().or_else(|| {
+ sess.diagnostic().span_err(
+ attr.span,
+ &format!("`{}` expects a list of feature names", symbol.to_ident_string()),
+ );
+ None
+ })
+ })
+ .flatten();
+
+ list.into_iter().filter_map(move |it| {
+ let name = it.ident().map(|ident| ident.name);
+ if name.is_none() {
+ sess.diagnostic().span_err(
+ it.span(),
+ &format!("`{}` expects feature names", symbol.to_ident_string()),
+ );
+ }
+ name
+ })
+}
+
+pub fn parse_alignment(node: &ast::LitKind) -> Result<u32, &'static str> {
+ if let ast::LitKind::Int(literal, ast::LitIntType::Unsuffixed) = node {
+ if literal.is_power_of_two() {
+ // rustc_middle::ty::layout::Align restricts align to <= 2^29
+ if *literal <= 1 << 29 { Ok(*literal as u32) } else { Err("larger than 2^29") }
+ } else {
+ Err("not a power of two")
+ }
+ } else {
+ Err("not an unsuffixed integer")
+ }
+}
diff --git a/compiler/rustc_attr/src/lib.rs b/compiler/rustc_attr/src/lib.rs
new file mode 100644
index 000000000..c3f9f0cf3
--- /dev/null
+++ b/compiler/rustc_attr/src/lib.rs
@@ -0,0 +1,22 @@
+//! Functions and types dealing with attributes and meta items.
+//!
+//! FIXME(Centril): For now being, much of the logic is still in `rustc_ast::attr`.
+//! The goal is to move the definition of `MetaItem` and things that don't need to be in `syntax`
+//! to this crate.
+
+#![feature(let_chains)]
+#![feature(let_else)]
+
+#[macro_use]
+extern crate rustc_macros;
+
+mod builtin;
+
+pub use builtin::*;
+pub use IntType::*;
+pub use ReprAttr::*;
+pub use StabilityLevel::*;
+
+pub use rustc_ast::attr::*;
+
+pub(crate) use rustc_ast::HashStableContext;
diff --git a/compiler/rustc_borrowck/Cargo.toml b/compiler/rustc_borrowck/Cargo.toml
new file mode 100644
index 000000000..fbf628e86
--- /dev/null
+++ b/compiler/rustc_borrowck/Cargo.toml
@@ -0,0 +1,31 @@
+[package]
+name = "rustc_borrowck"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+either = "1.5.0"
+itertools = "0.10.1"
+tracing = "0.1"
+polonius-engine = "0.13.0"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_lexer = { path = "../rustc_lexer" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_const_eval = { path = "../rustc_const_eval" }
+rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
+rustc_target = { path = "../rustc_target" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_traits = { path = "../rustc_traits" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_borrowck/src/borrow_set.rs b/compiler/rustc_borrowck/src/borrow_set.rs
new file mode 100644
index 000000000..41279588e
--- /dev/null
+++ b/compiler/rustc_borrowck/src/borrow_set.rs
@@ -0,0 +1,345 @@
+use crate::nll::ToRegionVid;
+use crate::path_utils::allow_two_phase_borrow;
+use crate::place_ext::PlaceExt;
+use crate::BorrowIndex;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::traversal;
+use rustc_middle::mir::visit::{MutatingUseContext, NonUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{self, Body, Local, Location};
+use rustc_middle::ty::{RegionVid, TyCtxt};
+use rustc_mir_dataflow::move_paths::MoveData;
+use std::fmt;
+use std::ops::Index;
+
+pub struct BorrowSet<'tcx> {
+ /// The fundamental map relating bitvector indexes to the borrows
+ /// in the MIR. Each borrow is also uniquely identified in the MIR
+ /// by the `Location` of the assignment statement in which it
+ /// appears on the right hand side. Thus the location is the map
+ /// key, and its position in the map corresponds to `BorrowIndex`.
+ pub location_map: FxIndexMap<Location, BorrowData<'tcx>>,
+
+ /// Locations which activate borrows.
+ /// NOTE: a given location may activate more than one borrow in the future
+ /// when more general two-phase borrow support is introduced, but for now we
+ /// only need to store one borrow index.
+ pub activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
+
+ /// Map from local to all the borrows on that local.
+ pub local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
+
+ pub(crate) locals_state_at_exit: LocalsStateAtExit,
+}
+
+impl<'tcx> Index<BorrowIndex> for BorrowSet<'tcx> {
+ type Output = BorrowData<'tcx>;
+
+ fn index(&self, index: BorrowIndex) -> &BorrowData<'tcx> {
+ &self.location_map[index.as_usize()]
+ }
+}
+
+/// Location where a two-phase borrow is activated, if a borrow
+/// is in fact a two-phase borrow.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum TwoPhaseActivation {
+ NotTwoPhase,
+ NotActivated,
+ ActivatedAt(Location),
+}
+
+#[derive(Debug, Clone)]
+pub struct BorrowData<'tcx> {
+ /// Location where the borrow reservation starts.
+ /// In many cases, this will be equal to the activation location but not always.
+ pub reserve_location: Location,
+ /// Location where the borrow is activated.
+ pub activation_location: TwoPhaseActivation,
+ /// What kind of borrow this is
+ pub kind: mir::BorrowKind,
+ /// The region for which this borrow is live
+ pub region: RegionVid,
+ /// Place from which we are borrowing
+ pub borrowed_place: mir::Place<'tcx>,
+ /// Place to which the borrow was stored
+ pub assigned_place: mir::Place<'tcx>,
+}
+
+impl<'tcx> fmt::Display for BorrowData<'tcx> {
+ fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let kind = match self.kind {
+ mir::BorrowKind::Shared => "",
+ mir::BorrowKind::Shallow => "shallow ",
+ mir::BorrowKind::Unique => "uniq ",
+ mir::BorrowKind::Mut { .. } => "mut ",
+ };
+ write!(w, "&{:?} {}{:?}", self.region, kind, self.borrowed_place)
+ }
+}
+
+pub enum LocalsStateAtExit {
+ AllAreInvalidated,
+ SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> },
+}
+
+impl LocalsStateAtExit {
+ fn build<'tcx>(
+ locals_are_invalidated_at_exit: bool,
+ body: &Body<'tcx>,
+ move_data: &MoveData<'tcx>,
+ ) -> Self {
+ struct HasStorageDead(BitSet<Local>);
+
+ impl<'tcx> Visitor<'tcx> for HasStorageDead {
+ fn visit_local(&mut self, local: Local, ctx: PlaceContext, _: Location) {
+ if ctx == PlaceContext::NonUse(NonUseContext::StorageDead) {
+ self.0.insert(local);
+ }
+ }
+ }
+
+ if locals_are_invalidated_at_exit {
+ LocalsStateAtExit::AllAreInvalidated
+ } else {
+ let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len()));
+ has_storage_dead.visit_body(&body);
+ let mut has_storage_dead_or_moved = has_storage_dead.0;
+ for move_out in &move_data.moves {
+ if let Some(index) = move_data.base_local(move_out.path) {
+ has_storage_dead_or_moved.insert(index);
+ }
+ }
+ LocalsStateAtExit::SomeAreInvalidated { has_storage_dead_or_moved }
+ }
+ }
+}
+
+impl<'tcx> BorrowSet<'tcx> {
+ pub fn build(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ locals_are_invalidated_at_exit: bool,
+ move_data: &MoveData<'tcx>,
+ ) -> Self {
+ let mut visitor = GatherBorrows {
+ tcx,
+ body: &body,
+ location_map: Default::default(),
+ activation_map: Default::default(),
+ local_map: Default::default(),
+ pending_activations: Default::default(),
+ locals_state_at_exit: LocalsStateAtExit::build(
+ locals_are_invalidated_at_exit,
+ body,
+ move_data,
+ ),
+ };
+
+ for (block, block_data) in traversal::preorder(&body) {
+ visitor.visit_basic_block_data(block, block_data);
+ }
+
+ BorrowSet {
+ location_map: visitor.location_map,
+ activation_map: visitor.activation_map,
+ local_map: visitor.local_map,
+ locals_state_at_exit: visitor.locals_state_at_exit,
+ }
+ }
+
+ pub(crate) fn activations_at_location(&self, location: Location) -> &[BorrowIndex] {
+ self.activation_map.get(&location).map_or(&[], |activations| &activations[..])
+ }
+
+ pub(crate) fn len(&self) -> usize {
+ self.location_map.len()
+ }
+
+ pub(crate) fn indices(&self) -> impl Iterator<Item = BorrowIndex> {
+ BorrowIndex::from_usize(0)..BorrowIndex::from_usize(self.len())
+ }
+
+ pub(crate) fn iter_enumerated(&self) -> impl Iterator<Item = (BorrowIndex, &BorrowData<'tcx>)> {
+ self.indices().zip(self.location_map.values())
+ }
+
+ pub(crate) fn get_index_of(&self, location: &Location) -> Option<BorrowIndex> {
+ self.location_map.get_index_of(location).map(BorrowIndex::from)
+ }
+}
+
+struct GatherBorrows<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ location_map: FxIndexMap<Location, BorrowData<'tcx>>,
+ activation_map: FxHashMap<Location, Vec<BorrowIndex>>,
+ local_map: FxHashMap<mir::Local, FxHashSet<BorrowIndex>>,
+
+ /// When we encounter a 2-phase borrow statement, it will always
+ /// be assigning into a temporary TEMP:
+ ///
+ /// TEMP = &foo
+ ///
+ /// We add TEMP into this map with `b`, where `b` is the index of
+ /// the borrow. When we find a later use of this activation, we
+ /// remove from the map (and add to the "tombstone" set below).
+ pending_activations: FxHashMap<mir::Local, BorrowIndex>,
+
+ locals_state_at_exit: LocalsStateAtExit,
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for GatherBorrows<'a, 'tcx> {
+ fn visit_assign(
+ &mut self,
+ assigned_place: &mir::Place<'tcx>,
+ rvalue: &mir::Rvalue<'tcx>,
+ location: mir::Location,
+ ) {
+ if let mir::Rvalue::Ref(region, kind, ref borrowed_place) = *rvalue {
+ if borrowed_place.ignore_borrow(self.tcx, self.body, &self.locals_state_at_exit) {
+ debug!("ignoring_borrow of {:?}", borrowed_place);
+ return;
+ }
+
+ let region = region.to_region_vid();
+
+ let borrow = BorrowData {
+ kind,
+ region,
+ reserve_location: location,
+ activation_location: TwoPhaseActivation::NotTwoPhase,
+ borrowed_place: *borrowed_place,
+ assigned_place: *assigned_place,
+ };
+ let (idx, _) = self.location_map.insert_full(location, borrow);
+ let idx = BorrowIndex::from(idx);
+
+ self.insert_as_pending_if_two_phase(location, assigned_place, kind, idx);
+
+ self.local_map.entry(borrowed_place.local).or_default().insert(idx);
+ }
+
+ self.super_assign(assigned_place, rvalue, location)
+ }
+
+ fn visit_local(&mut self, temp: Local, context: PlaceContext, location: Location) {
+ if !context.is_use() {
+ return;
+ }
+
+ // We found a use of some temporary TMP
+ // check whether we (earlier) saw a 2-phase borrow like
+ //
+ // TMP = &mut place
+ if let Some(&borrow_index) = self.pending_activations.get(&temp) {
+ let borrow_data = &mut self.location_map[borrow_index.as_usize()];
+
+ // Watch out: the use of TMP in the borrow itself
+ // doesn't count as an activation. =)
+ if borrow_data.reserve_location == location
+ && context == PlaceContext::MutatingUse(MutatingUseContext::Store)
+ {
+ return;
+ }
+
+ if let TwoPhaseActivation::ActivatedAt(other_location) = borrow_data.activation_location
+ {
+ span_bug!(
+ self.body.source_info(location).span,
+ "found two uses for 2-phase borrow temporary {:?}: \
+ {:?} and {:?}",
+ temp,
+ location,
+ other_location,
+ );
+ }
+
+ // Otherwise, this is the unique later use that we expect.
+ // Double check: This borrow is indeed a two-phase borrow (that is,
+ // we are 'transitioning' from `NotActivated` to `ActivatedAt`) and
+ // we've not found any other activations (checked above).
+ assert_eq!(
+ borrow_data.activation_location,
+ TwoPhaseActivation::NotActivated,
+ "never found an activation for this borrow!",
+ );
+ self.activation_map.entry(location).or_default().push(borrow_index);
+
+ borrow_data.activation_location = TwoPhaseActivation::ActivatedAt(location);
+ }
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: mir::Location) {
+ if let mir::Rvalue::Ref(region, kind, ref place) = *rvalue {
+ // double-check that we already registered a BorrowData for this
+
+ let borrow_data = &self.location_map[&location];
+ assert_eq!(borrow_data.reserve_location, location);
+ assert_eq!(borrow_data.kind, kind);
+ assert_eq!(borrow_data.region, region.to_region_vid());
+ assert_eq!(borrow_data.borrowed_place, *place);
+ }
+
+ self.super_rvalue(rvalue, location)
+ }
+}
+
+impl<'a, 'tcx> GatherBorrows<'a, 'tcx> {
+ /// If this is a two-phase borrow, then we will record it
+ /// as "pending" until we find the activating use.
+ fn insert_as_pending_if_two_phase(
+ &mut self,
+ start_location: Location,
+ assigned_place: &mir::Place<'tcx>,
+ kind: mir::BorrowKind,
+ borrow_index: BorrowIndex,
+ ) {
+ debug!(
+ "Borrows::insert_as_pending_if_two_phase({:?}, {:?}, {:?})",
+ start_location, assigned_place, borrow_index,
+ );
+
+ if !allow_two_phase_borrow(kind) {
+ debug!(" -> {:?}", start_location);
+ return;
+ }
+
+ // When we encounter a 2-phase borrow statement, it will always
+ // be assigning into a temporary TEMP:
+ //
+ // TEMP = &foo
+ //
+ // so extract `temp`.
+ let Some(temp) = assigned_place.as_local() else {
+ span_bug!(
+ self.body.source_info(start_location).span,
+ "expected 2-phase borrow to assign to a local, not `{:?}`",
+ assigned_place,
+ );
+ };
+
+ // Consider the borrow not activated to start. When we find an activation, we'll update
+ // this field.
+ {
+ let borrow_data = &mut self.location_map[borrow_index.as_usize()];
+ borrow_data.activation_location = TwoPhaseActivation::NotActivated;
+ }
+
+ // Insert `temp` into the list of pending activations. From
+ // now on, we'll be on the lookout for a use of it. Note that
+ // we are guaranteed that this use will come after the
+ // assignment.
+ let old_value = self.pending_activations.insert(temp, borrow_index);
+ if let Some(old_index) = old_value {
+ span_bug!(
+ self.body.source_info(start_location).span,
+ "found already pending activation for temp: {:?} \
+ at borrow_index: {:?} with associated data {:?}",
+ temp,
+ old_index,
+ self.location_map[old_index.as_usize()]
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/borrowck_errors.rs b/compiler/rustc_borrowck/src/borrowck_errors.rs
new file mode 100644
index 000000000..08ea00d71
--- /dev/null
+++ b/compiler/rustc_borrowck/src/borrowck_errors.rs
@@ -0,0 +1,486 @@
+use rustc_errors::{
+ struct_span_err, DiagnosticBuilder, DiagnosticId, DiagnosticMessage, ErrorGuaranteed, MultiSpan,
+};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::Span;
+
+impl<'cx, 'tcx> crate::MirBorrowckCtxt<'cx, 'tcx> {
+ pub(crate) fn cannot_move_when_borrowed(
+ &self,
+ span: Span,
+ desc: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ struct_span_err!(self, span, E0505, "cannot move out of {} because it is borrowed", desc,)
+ }
+
+ pub(crate) fn cannot_use_when_mutably_borrowed(
+ &self,
+ span: Span,
+ desc: &str,
+ borrow_span: Span,
+ borrow_desc: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ span,
+ E0503,
+ "cannot use {} because it was mutably borrowed",
+ desc,
+ );
+
+ err.span_label(borrow_span, format!("borrow of {} occurs here", borrow_desc));
+ err.span_label(span, format!("use of borrowed {}", borrow_desc));
+ err
+ }
+
+ pub(crate) fn cannot_mutably_borrow_multiply(
+ &self,
+ new_loan_span: Span,
+ desc: &str,
+ opt_via: &str,
+ old_loan_span: Span,
+ old_opt_via: &str,
+ old_load_end_span: Option<Span>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let via =
+ |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {})", msg) };
+ let mut err = struct_span_err!(
+ self,
+ new_loan_span,
+ E0499,
+ "cannot borrow {}{} as mutable more than once at a time",
+ desc,
+ via(opt_via),
+ );
+ if old_loan_span == new_loan_span {
+ // Both borrows are happening in the same place
+ // Meaning the borrow is occurring in a loop
+ err.span_label(
+ new_loan_span,
+ format!(
+ "{}{} was mutably borrowed here in the previous iteration of the loop{}",
+ desc,
+ via(opt_via),
+ opt_via,
+ ),
+ );
+ if let Some(old_load_end_span) = old_load_end_span {
+ err.span_label(old_load_end_span, "mutable borrow ends here");
+ }
+ } else {
+ err.span_label(
+ old_loan_span,
+ format!("first mutable borrow occurs here{}", via(old_opt_via)),
+ );
+ err.span_label(
+ new_loan_span,
+ format!("second mutable borrow occurs here{}", via(opt_via)),
+ );
+ if let Some(old_load_end_span) = old_load_end_span {
+ err.span_label(old_load_end_span, "first borrow ends here");
+ }
+ }
+ err
+ }
+
+ pub(crate) fn cannot_uniquely_borrow_by_two_closures(
+ &self,
+ new_loan_span: Span,
+ desc: &str,
+ old_loan_span: Span,
+ old_load_end_span: Option<Span>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ new_loan_span,
+ E0524,
+ "two closures require unique access to {} at the same time",
+ desc,
+ );
+ if old_loan_span == new_loan_span {
+ err.span_label(
+ old_loan_span,
+ "closures are constructed here in different iterations of loop",
+ );
+ } else {
+ err.span_label(old_loan_span, "first closure is constructed here");
+ err.span_label(new_loan_span, "second closure is constructed here");
+ }
+ if let Some(old_load_end_span) = old_load_end_span {
+ err.span_label(old_load_end_span, "borrow from first closure ends here");
+ }
+ err
+ }
+
+ pub(crate) fn cannot_uniquely_borrow_by_one_closure(
+ &self,
+ new_loan_span: Span,
+ container_name: &str,
+ desc_new: &str,
+ opt_via: &str,
+ old_loan_span: Span,
+ noun_old: &str,
+ old_opt_via: &str,
+ previous_end_span: Option<Span>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ new_loan_span,
+ E0500,
+ "closure requires unique access to {} but {} is already borrowed{}",
+ desc_new,
+ noun_old,
+ old_opt_via,
+ );
+ err.span_label(
+ new_loan_span,
+ format!("{} construction occurs here{}", container_name, opt_via),
+ );
+ err.span_label(old_loan_span, format!("borrow occurs here{}", old_opt_via));
+ if let Some(previous_end_span) = previous_end_span {
+ err.span_label(previous_end_span, "borrow ends here");
+ }
+ err
+ }
+
+ pub(crate) fn cannot_reborrow_already_uniquely_borrowed(
+ &self,
+ new_loan_span: Span,
+ container_name: &str,
+ desc_new: &str,
+ opt_via: &str,
+ kind_new: &str,
+ old_loan_span: Span,
+ old_opt_via: &str,
+ previous_end_span: Option<Span>,
+ second_borrow_desc: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ new_loan_span,
+ E0501,
+ "cannot borrow {}{} as {} because previous closure requires unique access",
+ desc_new,
+ opt_via,
+ kind_new,
+ );
+ err.span_label(
+ new_loan_span,
+ format!("{}borrow occurs here{}", second_borrow_desc, opt_via),
+ );
+ err.span_label(
+ old_loan_span,
+ format!("{} construction occurs here{}", container_name, old_opt_via),
+ );
+ if let Some(previous_end_span) = previous_end_span {
+ err.span_label(previous_end_span, "borrow from closure ends here");
+ }
+ err
+ }
+
+ pub(crate) fn cannot_reborrow_already_borrowed(
+ &self,
+ span: Span,
+ desc_new: &str,
+ msg_new: &str,
+ kind_new: &str,
+ old_span: Span,
+ noun_old: &str,
+ kind_old: &str,
+ msg_old: &str,
+ old_load_end_span: Option<Span>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let via =
+ |msg: &str| if msg.is_empty() { "".to_string() } else { format!(" (via {})", msg) };
+ let mut err = struct_span_err!(
+ self,
+ span,
+ E0502,
+ "cannot borrow {}{} as {} because {} is also borrowed as {}{}",
+ desc_new,
+ via(msg_new),
+ kind_new,
+ noun_old,
+ kind_old,
+ via(msg_old),
+ );
+
+ if msg_new == "" {
+ // If `msg_new` is empty, then this isn't a borrow of a union field.
+ err.span_label(span, format!("{} borrow occurs here", kind_new));
+ err.span_label(old_span, format!("{} borrow occurs here", kind_old));
+ } else {
+ // If `msg_new` isn't empty, then this a borrow of a union field.
+ err.span_label(
+ span,
+ format!(
+ "{} borrow of {} -- which overlaps with {} -- occurs here",
+ kind_new, msg_new, msg_old,
+ ),
+ );
+ err.span_label(old_span, format!("{} borrow occurs here{}", kind_old, via(msg_old)));
+ }
+
+ if let Some(old_load_end_span) = old_load_end_span {
+ err.span_label(old_load_end_span, format!("{} borrow ends here", kind_old));
+ }
+ err
+ }
+
+ pub(crate) fn cannot_assign_to_borrowed(
+ &self,
+ span: Span,
+ borrow_span: Span,
+ desc: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ span,
+ E0506,
+ "cannot assign to {} because it is borrowed",
+ desc,
+ );
+
+ err.span_label(borrow_span, format!("borrow of {} occurs here", desc));
+ err.span_label(span, format!("assignment to borrowed {} occurs here", desc));
+ err
+ }
+
+ pub(crate) fn cannot_reassign_immutable(
+ &self,
+ span: Span,
+ desc: &str,
+ is_arg: bool,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let msg = if is_arg { "to immutable argument" } else { "twice to immutable variable" };
+ struct_span_err!(self, span, E0384, "cannot assign {} {}", msg, desc)
+ }
+
+ pub(crate) fn cannot_assign(
+ &self,
+ span: Span,
+ desc: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ struct_span_err!(self, span, E0594, "cannot assign to {}", desc)
+ }
+
+ pub(crate) fn cannot_move_out_of(
+ &self,
+ move_from_span: Span,
+ move_from_desc: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ struct_span_err!(self, move_from_span, E0507, "cannot move out of {}", move_from_desc,)
+ }
+
+ /// Signal an error due to an attempt to move out of the interior
+ /// of an array or slice. `is_index` is None when error origin
+ /// didn't capture whether there was an indexing operation or not.
+ pub(crate) fn cannot_move_out_of_interior_noncopy(
+ &self,
+ move_from_span: Span,
+ ty: Ty<'_>,
+ is_index: Option<bool>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let type_name = match (&ty.kind(), is_index) {
+ (&ty::Array(_, _), Some(true)) | (&ty::Array(_, _), None) => "array",
+ (&ty::Slice(_), _) => "slice",
+ _ => span_bug!(move_from_span, "this path should not cause illegal move"),
+ };
+ let mut err = struct_span_err!(
+ self,
+ move_from_span,
+ E0508,
+ "cannot move out of type `{}`, a non-copy {}",
+ ty,
+ type_name,
+ );
+ err.span_label(move_from_span, "cannot move out of here");
+ err
+ }
+
+ pub(crate) fn cannot_move_out_of_interior_of_drop(
+ &self,
+ move_from_span: Span,
+ container_ty: Ty<'_>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ move_from_span,
+ E0509,
+ "cannot move out of type `{}`, which implements the `Drop` trait",
+ container_ty,
+ );
+ err.span_label(move_from_span, "cannot move out of here");
+ err
+ }
+
+ pub(crate) fn cannot_act_on_moved_value(
+ &self,
+ use_span: Span,
+ verb: &str,
+ optional_adverb_for_moved: &str,
+ moved_path: Option<String>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let moved_path = moved_path.map(|mp| format!(": `{}`", mp)).unwrap_or_default();
+
+ struct_span_err!(
+ self,
+ use_span,
+ E0382,
+ "{} of {}moved value{}",
+ verb,
+ optional_adverb_for_moved,
+ moved_path,
+ )
+ }
+
+ pub(crate) fn cannot_borrow_path_as_mutable_because(
+ &self,
+ span: Span,
+ path: &str,
+ reason: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ struct_span_err!(self, span, E0596, "cannot borrow {} as mutable{}", path, reason,)
+ }
+
+ pub(crate) fn cannot_mutate_in_immutable_section(
+ &self,
+ mutate_span: Span,
+ immutable_span: Span,
+ immutable_place: &str,
+ immutable_section: &str,
+ action: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ mutate_span,
+ E0510,
+ "cannot {} {} in {}",
+ action,
+ immutable_place,
+ immutable_section,
+ );
+ err.span_label(mutate_span, format!("cannot {}", action));
+ err.span_label(immutable_span, format!("value is immutable in {}", immutable_section));
+ err
+ }
+
+ pub(crate) fn cannot_borrow_across_generator_yield(
+ &self,
+ span: Span,
+ yield_span: Span,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ span,
+ E0626,
+ "borrow may still be in use when generator yields",
+ );
+ err.span_label(yield_span, "possible yield occurs here");
+ err
+ }
+
+ pub(crate) fn cannot_borrow_across_destructor(
+ &self,
+ borrow_span: Span,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ struct_span_err!(
+ self,
+ borrow_span,
+ E0713,
+ "borrow may still be in use when destructor runs",
+ )
+ }
+
+ pub(crate) fn path_does_not_live_long_enough(
+ &self,
+ span: Span,
+ path: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ struct_span_err!(self, span, E0597, "{} does not live long enough", path,)
+ }
+
+ pub(crate) fn cannot_return_reference_to_local(
+ &self,
+ span: Span,
+ return_kind: &str,
+ reference_desc: &str,
+ path_desc: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ span,
+ E0515,
+ "cannot {RETURN} {REFERENCE} {LOCAL}",
+ RETURN = return_kind,
+ REFERENCE = reference_desc,
+ LOCAL = path_desc,
+ );
+
+ err.span_label(
+ span,
+ format!("{}s a {} data owned by the current function", return_kind, reference_desc),
+ );
+
+ err
+ }
+
+ pub(crate) fn cannot_capture_in_long_lived_closure(
+ &self,
+ closure_span: Span,
+ closure_kind: &str,
+ borrowed_path: &str,
+ capture_span: Span,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self,
+ closure_span,
+ E0373,
+ "{} may outlive the current function, but it borrows {}, which is owned by the current \
+ function",
+ closure_kind,
+ borrowed_path,
+ );
+ err.span_label(capture_span, format!("{} is borrowed here", borrowed_path))
+ .span_label(closure_span, format!("may outlive borrowed value {}", borrowed_path));
+ err
+ }
+
+ pub(crate) fn thread_local_value_does_not_live_long_enough(
+ &self,
+ span: Span,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ struct_span_err!(self, span, E0712, "thread-local variable borrowed past end of function",)
+ }
+
+ pub(crate) fn temporary_value_borrowed_for_too_long(
+ &self,
+ span: Span,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ struct_span_err!(self, span, E0716, "temporary value dropped while borrowed",)
+ }
+
+ #[rustc_lint_diagnostics]
+ pub(crate) fn struct_span_err_with_code<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ self.infcx.tcx.sess.struct_span_err_with_code(sp, msg, code)
+ }
+}
+
+pub(crate) fn borrowed_data_escapes_closure<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ escape_span: Span,
+ escapes_from: &str,
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ struct_span_err!(
+ tcx.sess,
+ escape_span,
+ E0521,
+ "borrowed data escapes outside of {}",
+ escapes_from,
+ )
+}
diff --git a/compiler/rustc_borrowck/src/constraint_generation.rs b/compiler/rustc_borrowck/src/constraint_generation.rs
new file mode 100644
index 000000000..5e9cec5c3
--- /dev/null
+++ b/compiler/rustc_borrowck/src/constraint_generation.rs
@@ -0,0 +1,250 @@
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::mir::visit::TyContext;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{
+ BasicBlock, BasicBlockData, Body, Local, Location, Place, PlaceRef, ProjectionElem, Rvalue,
+ SourceInfo, Statement, StatementKind, Terminator, TerminatorKind, UserTypeProjection,
+};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, RegionVid, Ty};
+
+use crate::{
+ borrow_set::BorrowSet, facts::AllFacts, location::LocationTable, nll::ToRegionVid,
+ places_conflict, region_infer::values::LivenessValues,
+};
+
+pub(super) fn generate_constraints<'cx, 'tcx>(
+ infcx: &InferCtxt<'cx, 'tcx>,
+ liveness_constraints: &mut LivenessValues<RegionVid>,
+ all_facts: &mut Option<AllFacts>,
+ location_table: &LocationTable,
+ body: &Body<'tcx>,
+ borrow_set: &BorrowSet<'tcx>,
+) {
+ let mut cg = ConstraintGeneration {
+ borrow_set,
+ infcx,
+ liveness_constraints,
+ location_table,
+ all_facts,
+ body,
+ };
+
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
+ cg.visit_basic_block_data(bb, data);
+ }
+}
+
+/// 'cg = the duration of the constraint generation process itself.
+struct ConstraintGeneration<'cg, 'cx, 'tcx> {
+ infcx: &'cg InferCtxt<'cx, 'tcx>,
+ all_facts: &'cg mut Option<AllFacts>,
+ location_table: &'cg LocationTable,
+ liveness_constraints: &'cg mut LivenessValues<RegionVid>,
+ borrow_set: &'cg BorrowSet<'tcx>,
+ body: &'cg Body<'tcx>,
+}
+
+impl<'cg, 'cx, 'tcx> Visitor<'tcx> for ConstraintGeneration<'cg, 'cx, 'tcx> {
+ fn visit_basic_block_data(&mut self, bb: BasicBlock, data: &BasicBlockData<'tcx>) {
+ self.super_basic_block_data(bb, data);
+ }
+
+ /// We sometimes have `substs` within an rvalue, or within a
+ /// call. Make them live at the location where they appear.
+ fn visit_substs(&mut self, substs: &SubstsRef<'tcx>, location: Location) {
+ self.add_regular_live_constraint(*substs, location);
+ self.super_substs(substs);
+ }
+
+ /// We sometimes have `region` within an rvalue, or within a
+ /// call. Make them live at the location where they appear.
+ fn visit_region(&mut self, region: ty::Region<'tcx>, location: Location) {
+ self.add_regular_live_constraint(region, location);
+ self.super_region(region);
+ }
+
+ /// We sometimes have `ty` within an rvalue, or within a
+ /// call. Make them live at the location where they appear.
+ fn visit_ty(&mut self, ty: Ty<'tcx>, ty_context: TyContext) {
+ match ty_context {
+ TyContext::ReturnTy(SourceInfo { span, .. })
+ | TyContext::YieldTy(SourceInfo { span, .. })
+ | TyContext::UserTy(span)
+ | TyContext::LocalDecl { source_info: SourceInfo { span, .. }, .. } => {
+ span_bug!(span, "should not be visiting outside of the CFG: {:?}", ty_context);
+ }
+ TyContext::Location(location) => {
+ self.add_regular_live_constraint(ty, location);
+ }
+ }
+
+ self.super_ty(ty);
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ if let Some(all_facts) = self.all_facts {
+ let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+ all_facts.cfg_edge.push((
+ self.location_table.start_index(location),
+ self.location_table.mid_index(location),
+ ));
+
+ all_facts.cfg_edge.push((
+ self.location_table.mid_index(location),
+ self.location_table.start_index(location.successor_within_block()),
+ ));
+
+ // If there are borrows on this now dead local, we need to record them as `killed`.
+ if let StatementKind::StorageDead(local) = statement.kind {
+ record_killed_borrows_for_local(
+ all_facts,
+ self.borrow_set,
+ self.location_table,
+ local,
+ location,
+ );
+ }
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
+ // When we see `X = ...`, then kill borrows of
+ // `(*X).foo` and so forth.
+ self.record_killed_borrows_for_place(*place, location);
+
+ self.super_assign(place, rvalue, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ if let Some(all_facts) = self.all_facts {
+ let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+ all_facts.cfg_edge.push((
+ self.location_table.start_index(location),
+ self.location_table.mid_index(location),
+ ));
+
+ let successor_blocks = terminator.successors();
+ all_facts.cfg_edge.reserve(successor_blocks.size_hint().0);
+ for successor_block in successor_blocks {
+ all_facts.cfg_edge.push((
+ self.location_table.mid_index(location),
+ self.location_table.start_index(successor_block.start_location()),
+ ));
+ }
+ }
+
+ // A `Call` terminator's return value can be a local which has borrows,
+ // so we need to record those as `killed` as well.
+ if let TerminatorKind::Call { destination, .. } = terminator.kind {
+ self.record_killed_borrows_for_place(destination, location);
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_ascribe_user_ty(
+ &mut self,
+ _place: &Place<'tcx>,
+ _variance: ty::Variance,
+ _user_ty: &UserTypeProjection,
+ _location: Location,
+ ) {
+ }
+}
+
+impl<'cx, 'cg, 'tcx> ConstraintGeneration<'cx, 'cg, 'tcx> {
+ /// Some variable with type `live_ty` is "regular live" at
+ /// `location` -- i.e., it may be used later. This means that all
+ /// regions appearing in the type `live_ty` must be live at
+ /// `location`.
+ fn add_regular_live_constraint<T>(&mut self, live_ty: T, location: Location)
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ debug!("add_regular_live_constraint(live_ty={:?}, location={:?})", live_ty, location);
+
+ self.infcx.tcx.for_each_free_region(&live_ty, |live_region| {
+ let vid = live_region.to_region_vid();
+ self.liveness_constraints.add_element(vid, location);
+ });
+ }
+
+ /// When recording facts for Polonius, records the borrows on the specified place
+ /// as `killed`. For example, when assigning to a local, or on a call's return destination.
+ fn record_killed_borrows_for_place(&mut self, place: Place<'tcx>, location: Location) {
+ if let Some(all_facts) = self.all_facts {
+ let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+
+ // Depending on the `Place` we're killing:
+ // - if it's a local, or a single deref of a local,
+ // we kill all the borrows on the local.
+ // - if it's a deeper projection, we have to filter which
+ // of the borrows are killed: the ones whose `borrowed_place`
+ // conflicts with the `place`.
+ match place.as_ref() {
+ PlaceRef { local, projection: &[] }
+ | PlaceRef { local, projection: &[ProjectionElem::Deref] } => {
+ debug!(
+ "Recording `killed` facts for borrows of local={:?} at location={:?}",
+ local, location
+ );
+
+ record_killed_borrows_for_local(
+ all_facts,
+ self.borrow_set,
+ self.location_table,
+ local,
+ location,
+ );
+ }
+
+ PlaceRef { local, projection: &[.., _] } => {
+ // Kill conflicting borrows of the innermost local.
+ debug!(
+ "Recording `killed` facts for borrows of \
+ innermost projected local={:?} at location={:?}",
+ local, location
+ );
+
+ if let Some(borrow_indices) = self.borrow_set.local_map.get(&local) {
+ for &borrow_index in borrow_indices {
+ let places_conflict = places_conflict::places_conflict(
+ self.infcx.tcx,
+ self.body,
+ self.borrow_set[borrow_index].borrowed_place,
+ place,
+ places_conflict::PlaceConflictBias::NoOverlap,
+ );
+
+ if places_conflict {
+ let location_index = self.location_table.mid_index(location);
+ all_facts.loan_killed_at.push((borrow_index, location_index));
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+/// When recording facts for Polonius, records the borrows on the specified local as `killed`.
+fn record_killed_borrows_for_local(
+ all_facts: &mut AllFacts,
+ borrow_set: &BorrowSet<'_>,
+ location_table: &LocationTable,
+ local: Local,
+ location: Location,
+) {
+ if let Some(borrow_indices) = borrow_set.local_map.get(&local) {
+ all_facts.loan_killed_at.reserve(borrow_indices.len());
+ for &borrow_index in borrow_indices {
+ let location_index = location_table.mid_index(location);
+ all_facts.loan_killed_at.push((borrow_index, location_index));
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/constraints/graph.rs b/compiler/rustc_borrowck/src/constraints/graph.rs
new file mode 100644
index 000000000..609fbc2bc
--- /dev/null
+++ b/compiler/rustc_borrowck/src/constraints/graph.rs
@@ -0,0 +1,235 @@
+use rustc_data_structures::graph;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::{RegionVid, VarianceDiagInfo};
+use rustc_span::DUMMY_SP;
+
+use crate::{
+ constraints::OutlivesConstraintIndex,
+ constraints::{OutlivesConstraint, OutlivesConstraintSet},
+ type_check::Locations,
+};
+
+/// The construct graph organizes the constraints by their end-points.
+/// It can be used to view a `R1: R2` constraint as either an edge `R1
+/// -> R2` or `R2 -> R1` depending on the direction type `D`.
+pub(crate) struct ConstraintGraph<D: ConstraintGraphDirecton> {
+ _direction: D,
+ first_constraints: IndexVec<RegionVid, Option<OutlivesConstraintIndex>>,
+ next_constraints: IndexVec<OutlivesConstraintIndex, Option<OutlivesConstraintIndex>>,
+}
+
+pub(crate) type NormalConstraintGraph = ConstraintGraph<Normal>;
+
+pub(crate) type ReverseConstraintGraph = ConstraintGraph<Reverse>;
+
+/// Marker trait that controls whether a `R1: R2` constraint
+/// represents an edge `R1 -> R2` or `R2 -> R1`.
+pub(crate) trait ConstraintGraphDirecton: Copy + 'static {
+ fn start_region(c: &OutlivesConstraint<'_>) -> RegionVid;
+ fn end_region(c: &OutlivesConstraint<'_>) -> RegionVid;
+ fn is_normal() -> bool;
+}
+
+/// In normal mode, a `R1: R2` constraint results in an edge `R1 ->
+/// R2`. This is what we use when constructing the SCCs for
+/// inference. This is because we compute the value of R1 by union'ing
+/// all the things that it relies on.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct Normal;
+
+impl ConstraintGraphDirecton for Normal {
+ fn start_region(c: &OutlivesConstraint<'_>) -> RegionVid {
+ c.sup
+ }
+
+ fn end_region(c: &OutlivesConstraint<'_>) -> RegionVid {
+ c.sub
+ }
+
+ fn is_normal() -> bool {
+ true
+ }
+}
+
+/// In reverse mode, a `R1: R2` constraint results in an edge `R2 ->
+/// R1`. We use this for optimizing liveness computation, because then
+/// we wish to iterate from a region (e.g., R2) to all the regions
+/// that will outlive it (e.g., R1).
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct Reverse;
+
+impl ConstraintGraphDirecton for Reverse {
+ fn start_region(c: &OutlivesConstraint<'_>) -> RegionVid {
+ c.sub
+ }
+
+ fn end_region(c: &OutlivesConstraint<'_>) -> RegionVid {
+ c.sup
+ }
+
+ fn is_normal() -> bool {
+ false
+ }
+}
+
+impl<D: ConstraintGraphDirecton> ConstraintGraph<D> {
+ /// Creates a "dependency graph" where each region constraint `R1:
+ /// R2` is treated as an edge `R1 -> R2`. We use this graph to
+ /// construct SCCs for region inference but also for error
+ /// reporting.
+ pub(crate) fn new(
+ direction: D,
+ set: &OutlivesConstraintSet<'_>,
+ num_region_vars: usize,
+ ) -> Self {
+ let mut first_constraints = IndexVec::from_elem_n(None, num_region_vars);
+ let mut next_constraints = IndexVec::from_elem(None, &set.outlives);
+
+ for (idx, constraint) in set.outlives.iter_enumerated().rev() {
+ let head = &mut first_constraints[D::start_region(constraint)];
+ let next = &mut next_constraints[idx];
+ debug_assert!(next.is_none());
+ *next = *head;
+ *head = Some(idx);
+ }
+
+ Self { _direction: direction, first_constraints, next_constraints }
+ }
+
+ /// Given the constraint set from which this graph was built
+ /// creates a region graph so that you can iterate over *regions*
+ /// and not constraints.
+ pub(crate) fn region_graph<'rg, 'tcx>(
+ &'rg self,
+ set: &'rg OutlivesConstraintSet<'tcx>,
+ static_region: RegionVid,
+ ) -> RegionGraph<'rg, 'tcx, D> {
+ RegionGraph::new(set, self, static_region)
+ }
+
+ /// Given a region `R`, iterate over all constraints `R: R1`.
+ pub(crate) fn outgoing_edges<'a, 'tcx>(
+ &'a self,
+ region_sup: RegionVid,
+ constraints: &'a OutlivesConstraintSet<'tcx>,
+ static_region: RegionVid,
+ ) -> Edges<'a, 'tcx, D> {
+ //if this is the `'static` region and the graph's direction is normal,
+ //then setup the Edges iterator to return all regions #53178
+ if region_sup == static_region && D::is_normal() {
+ Edges {
+ graph: self,
+ constraints,
+ pointer: None,
+ next_static_idx: Some(0),
+ static_region,
+ }
+ } else {
+ //otherwise, just setup the iterator as normal
+ let first = self.first_constraints[region_sup];
+ Edges { graph: self, constraints, pointer: first, next_static_idx: None, static_region }
+ }
+ }
+}
+
+pub(crate) struct Edges<'s, 'tcx, D: ConstraintGraphDirecton> {
+ graph: &'s ConstraintGraph<D>,
+ constraints: &'s OutlivesConstraintSet<'tcx>,
+ pointer: Option<OutlivesConstraintIndex>,
+ next_static_idx: Option<usize>,
+ static_region: RegionVid,
+}
+
+impl<'s, 'tcx, D: ConstraintGraphDirecton> Iterator for Edges<'s, 'tcx, D> {
+ type Item = OutlivesConstraint<'tcx>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if let Some(p) = self.pointer {
+ self.pointer = self.graph.next_constraints[p];
+
+ Some(self.constraints[p].clone())
+ } else if let Some(next_static_idx) = self.next_static_idx {
+ self.next_static_idx = if next_static_idx == (self.graph.first_constraints.len() - 1) {
+ None
+ } else {
+ Some(next_static_idx + 1)
+ };
+
+ Some(OutlivesConstraint {
+ sup: self.static_region,
+ sub: next_static_idx.into(),
+ locations: Locations::All(DUMMY_SP),
+ span: DUMMY_SP,
+ category: ConstraintCategory::Internal,
+ variance_info: VarianceDiagInfo::default(),
+ })
+ } else {
+ None
+ }
+ }
+}
+
+/// This struct brings together a constraint set and a (normal, not
+/// reverse) constraint graph. It implements the graph traits and is
+/// usd for doing the SCC computation.
+pub(crate) struct RegionGraph<'s, 'tcx, D: ConstraintGraphDirecton> {
+ set: &'s OutlivesConstraintSet<'tcx>,
+ constraint_graph: &'s ConstraintGraph<D>,
+ static_region: RegionVid,
+}
+
+impl<'s, 'tcx, D: ConstraintGraphDirecton> RegionGraph<'s, 'tcx, D> {
+ /// Creates a "dependency graph" where each region constraint `R1:
+ /// R2` is treated as an edge `R1 -> R2`. We use this graph to
+ /// construct SCCs for region inference but also for error
+ /// reporting.
+ pub(crate) fn new(
+ set: &'s OutlivesConstraintSet<'tcx>,
+ constraint_graph: &'s ConstraintGraph<D>,
+ static_region: RegionVid,
+ ) -> Self {
+ Self { set, constraint_graph, static_region }
+ }
+
+ /// Given a region `R`, iterate over all regions `R1` such that
+ /// there exists a constraint `R: R1`.
+ pub(crate) fn outgoing_regions(&self, region_sup: RegionVid) -> Successors<'s, 'tcx, D> {
+ Successors {
+ edges: self.constraint_graph.outgoing_edges(region_sup, self.set, self.static_region),
+ }
+ }
+}
+
+pub(crate) struct Successors<'s, 'tcx, D: ConstraintGraphDirecton> {
+ edges: Edges<'s, 'tcx, D>,
+}
+
+impl<'s, 'tcx, D: ConstraintGraphDirecton> Iterator for Successors<'s, 'tcx, D> {
+ type Item = RegionVid;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.edges.next().map(|c| D::end_region(&c))
+ }
+}
+
+impl<'s, 'tcx, D: ConstraintGraphDirecton> graph::DirectedGraph for RegionGraph<'s, 'tcx, D> {
+ type Node = RegionVid;
+}
+
+impl<'s, 'tcx, D: ConstraintGraphDirecton> graph::WithNumNodes for RegionGraph<'s, 'tcx, D> {
+ fn num_nodes(&self) -> usize {
+ self.constraint_graph.first_constraints.len()
+ }
+}
+
+impl<'s, 'tcx, D: ConstraintGraphDirecton> graph::WithSuccessors for RegionGraph<'s, 'tcx, D> {
+ fn successors(&self, node: Self::Node) -> <Self as graph::GraphSuccessors<'_>>::Iter {
+ self.outgoing_regions(node)
+ }
+}
+
+impl<'s, 'tcx, D: ConstraintGraphDirecton> graph::GraphSuccessors<'_> for RegionGraph<'s, 'tcx, D> {
+ type Item = RegionVid;
+ type Iter = Successors<'s, 'tcx, D>;
+}
diff --git a/compiler/rustc_borrowck/src/constraints/mod.rs b/compiler/rustc_borrowck/src/constraints/mod.rs
new file mode 100644
index 000000000..a504d0c91
--- /dev/null
+++ b/compiler/rustc_borrowck/src/constraints/mod.rs
@@ -0,0 +1,124 @@
+use rustc_data_structures::graph::scc::Sccs;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::{RegionVid, VarianceDiagInfo};
+use rustc_span::Span;
+use std::fmt;
+use std::ops::Index;
+
+use crate::type_check::Locations;
+
+pub(crate) mod graph;
+
+/// A set of NLL region constraints. These include "outlives"
+/// constraints of the form `R1: R2`. Each constraint is identified by
+/// a unique `OutlivesConstraintIndex` and you can index into the set
+/// (`constraint_set[i]`) to access the constraint details.
+#[derive(Clone, Default)]
+pub(crate) struct OutlivesConstraintSet<'tcx> {
+ outlives: IndexVec<OutlivesConstraintIndex, OutlivesConstraint<'tcx>>,
+}
+
+impl<'tcx> OutlivesConstraintSet<'tcx> {
+ pub(crate) fn push(&mut self, constraint: OutlivesConstraint<'tcx>) {
+ debug!(
+ "OutlivesConstraintSet::push({:?}: {:?} @ {:?}",
+ constraint.sup, constraint.sub, constraint.locations
+ );
+ if constraint.sup == constraint.sub {
+ // 'a: 'a is pretty uninteresting
+ return;
+ }
+ self.outlives.push(constraint);
+ }
+
+ /// Constructs a "normal" graph from the constraint set; the graph makes it
+ /// easy to find the constraints affecting a particular region.
+ ///
+ /// N.B., this graph contains a "frozen" view of the current
+ /// constraints. Any new constraints added to the `OutlivesConstraintSet`
+ /// after the graph is built will not be present in the graph.
+ pub(crate) fn graph(&self, num_region_vars: usize) -> graph::NormalConstraintGraph {
+ graph::ConstraintGraph::new(graph::Normal, self, num_region_vars)
+ }
+
+ /// Like `graph`, but constraints a reverse graph where `R1: R2`
+ /// represents an edge `R2 -> R1`.
+ pub(crate) fn reverse_graph(&self, num_region_vars: usize) -> graph::ReverseConstraintGraph {
+ graph::ConstraintGraph::new(graph::Reverse, self, num_region_vars)
+ }
+
+ /// Computes cycles (SCCs) in the graph of regions. In particular,
+ /// find all regions R1, R2 such that R1: R2 and R2: R1 and group
+ /// them into an SCC, and find the relationships between SCCs.
+ pub(crate) fn compute_sccs(
+ &self,
+ constraint_graph: &graph::NormalConstraintGraph,
+ static_region: RegionVid,
+ ) -> Sccs<RegionVid, ConstraintSccIndex> {
+ let region_graph = &constraint_graph.region_graph(self, static_region);
+ Sccs::new(region_graph)
+ }
+
+ pub(crate) fn outlives(&self) -> &IndexVec<OutlivesConstraintIndex, OutlivesConstraint<'tcx>> {
+ &self.outlives
+ }
+}
+
+impl<'tcx> Index<OutlivesConstraintIndex> for OutlivesConstraintSet<'tcx> {
+ type Output = OutlivesConstraint<'tcx>;
+
+ fn index(&self, i: OutlivesConstraintIndex) -> &Self::Output {
+ &self.outlives[i]
+ }
+}
+
+#[derive(Clone, PartialEq, Eq)]
+pub struct OutlivesConstraint<'tcx> {
+ // NB. The ordering here is not significant for correctness, but
+ // it is for convenience. Before we dump the constraints in the
+ // debugging logs, we sort them, and we'd like the "super region"
+ // to be first, etc. (In particular, span should remain last.)
+ /// The region SUP must outlive SUB...
+ pub sup: RegionVid,
+
+ /// Region that must be outlived.
+ pub sub: RegionVid,
+
+ /// Where did this constraint arise?
+ pub locations: Locations,
+
+ /// The `Span` associated with the creation of this constraint.
+ /// This should be used in preference to obtaining the span from
+ /// `locations`, since the `locations` may give a poor span
+ /// in some cases (e.g. converting a constraint from a promoted).
+ pub span: Span,
+
+ /// What caused this constraint?
+ pub category: ConstraintCategory<'tcx>,
+
+ /// Variance diagnostic information
+ pub variance_info: VarianceDiagInfo<'tcx>,
+}
+
+impl<'tcx> fmt::Debug for OutlivesConstraint<'tcx> {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ formatter,
+ "({:?}: {:?}) due to {:?} ({:?})",
+ self.sup, self.sub, self.locations, self.variance_info
+ )
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct OutlivesConstraintIndex {
+ DEBUG_FORMAT = "OutlivesConstraintIndex({})"
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct ConstraintSccIndex {
+ DEBUG_FORMAT = "ConstraintSccIndex({})"
+ }
+}
diff --git a/compiler/rustc_borrowck/src/consumers.rs b/compiler/rustc_borrowck/src/consumers.rs
new file mode 100644
index 000000000..efc17a173
--- /dev/null
+++ b/compiler/rustc_borrowck/src/consumers.rs
@@ -0,0 +1,39 @@
+//! This file provides API for compiler consumers.
+
+use rustc_hir::def_id::LocalDefId;
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::{DefiningAnchor, TyCtxtInferExt};
+use rustc_middle::mir::Body;
+use rustc_middle::ty::{self, TyCtxt};
+
+pub use super::{
+ facts::{AllFacts as PoloniusInput, RustcFacts},
+ location::{LocationTable, RichLocation},
+ nll::PoloniusOutput,
+ BodyWithBorrowckFacts,
+};
+
+/// This function computes Polonius facts for the given body. It makes a copy of
+/// the body because it needs to regenerate the region identifiers. This function
+/// should never be invoked during a typical compilation session due to performance
+/// issues with Polonius.
+///
+/// Note:
+/// * This function will panic if the required body was already stolen. This
+/// can, for example, happen when requesting a body of a `const` function
+/// because they are evaluated during typechecking. The panic can be avoided
+/// by overriding the `mir_borrowck` query. You can find a complete example
+/// that shows how to do this at `src/test/run-make/obtain-borrowck/`.
+///
+/// * Polonius is highly unstable, so expect regular changes in its signature or other details.
+pub fn get_body_with_borrowck_facts<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> BodyWithBorrowckFacts<'tcx> {
+ let (input_body, promoted) = tcx.mir_promoted(def);
+ tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(def.did)).enter(|infcx| {
+ let input_body: &Body<'_> = &input_body.borrow();
+ let promoted: &IndexVec<_, _> = &promoted.borrow();
+ *super::do_mir_borrowck(&infcx, input_body, promoted, true).1.unwrap()
+ })
+}
diff --git a/compiler/rustc_borrowck/src/dataflow.rs b/compiler/rustc_borrowck/src/dataflow.rs
new file mode 100644
index 000000000..97d5a8d15
--- /dev/null
+++ b/compiler/rustc_borrowck/src/dataflow.rs
@@ -0,0 +1,438 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{self, BasicBlock, Body, Location, Place};
+use rustc_middle::ty::RegionVid;
+use rustc_middle::ty::TyCtxt;
+use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces};
+use rustc_mir_dataflow::ResultsVisitable;
+use rustc_mir_dataflow::{self, fmt::DebugWithContext, CallReturnPlaces, GenKill};
+use rustc_mir_dataflow::{Analysis, Direction, Results};
+use std::fmt;
+
+use crate::{
+ places_conflict, BorrowSet, PlaceConflictBias, PlaceExt, RegionInferenceContext, ToRegionVid,
+};
+
+/// A tuple with named fields that can hold either the results or the transient state of the
+/// dataflow analyses used by the borrow checker.
+#[derive(Debug)]
+pub struct BorrowckAnalyses<B, U, E> {
+ pub borrows: B,
+ pub uninits: U,
+ pub ever_inits: E,
+}
+
+/// The results of the dataflow analyses used by the borrow checker.
+pub type BorrowckResults<'mir, 'tcx> = BorrowckAnalyses<
+ Results<'tcx, Borrows<'mir, 'tcx>>,
+ Results<'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
+ Results<'tcx, EverInitializedPlaces<'mir, 'tcx>>,
+>;
+
+/// The transient state of the dataflow analyses used by the borrow checker.
+pub type BorrowckFlowState<'mir, 'tcx> =
+ <BorrowckResults<'mir, 'tcx> as ResultsVisitable<'tcx>>::FlowState;
+
+macro_rules! impl_visitable {
+ ( $(
+ $T:ident { $( $field:ident : $A:ident ),* $(,)? }
+ )* ) => { $(
+ impl<'tcx, $($A),*, D: Direction> ResultsVisitable<'tcx> for $T<$( Results<'tcx, $A> ),*>
+ where
+ $( $A: Analysis<'tcx, Direction = D>, )*
+ {
+ type Direction = D;
+ type FlowState = $T<$( $A::Domain ),*>;
+
+ fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState {
+ $T {
+ $( $field: self.$field.analysis.bottom_value(body) ),*
+ }
+ }
+
+ fn reset_to_block_entry(
+ &self,
+ state: &mut Self::FlowState,
+ block: BasicBlock,
+ ) {
+ $( state.$field.clone_from(&self.$field.entry_set_for_block(block)); )*
+ }
+
+ fn reconstruct_before_statement_effect(
+ &self,
+ state: &mut Self::FlowState,
+ stmt: &mir::Statement<'tcx>,
+ loc: Location,
+ ) {
+ $( self.$field.analysis
+ .apply_before_statement_effect(&mut state.$field, stmt, loc); )*
+ }
+
+ fn reconstruct_statement_effect(
+ &self,
+ state: &mut Self::FlowState,
+ stmt: &mir::Statement<'tcx>,
+ loc: Location,
+ ) {
+ $( self.$field.analysis
+ .apply_statement_effect(&mut state.$field, stmt, loc); )*
+ }
+
+ fn reconstruct_before_terminator_effect(
+ &self,
+ state: &mut Self::FlowState,
+ term: &mir::Terminator<'tcx>,
+ loc: Location,
+ ) {
+ $( self.$field.analysis
+ .apply_before_terminator_effect(&mut state.$field, term, loc); )*
+ }
+
+ fn reconstruct_terminator_effect(
+ &self,
+ state: &mut Self::FlowState,
+ term: &mir::Terminator<'tcx>,
+ loc: Location,
+ ) {
+ $( self.$field.analysis
+ .apply_terminator_effect(&mut state.$field, term, loc); )*
+ }
+ }
+ )* }
+}
+
+impl_visitable! {
+ BorrowckAnalyses { borrows: B, uninits: U, ever_inits: E }
+}
+
+rustc_index::newtype_index! {
+ pub struct BorrowIndex {
+ DEBUG_FORMAT = "bw{}"
+ }
+}
+
+/// `Borrows` stores the data used in the analyses that track the flow
+/// of borrows.
+///
+/// It uniquely identifies every borrow (`Rvalue::Ref`) by a
+/// `BorrowIndex`, and maps each such index to a `BorrowData`
+/// describing the borrow. These indexes are used for representing the
+/// borrows in compact bitvectors.
+pub struct Borrows<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+
+ borrow_set: &'a BorrowSet<'tcx>,
+ borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
+}
+
+struct StackEntry {
+ bb: mir::BasicBlock,
+ lo: usize,
+ hi: usize,
+}
+
+struct OutOfScopePrecomputer<'a, 'tcx> {
+ visited: BitSet<mir::BasicBlock>,
+ visit_stack: Vec<StackEntry>,
+ body: &'a Body<'tcx>,
+ regioncx: &'a RegionInferenceContext<'tcx>,
+ borrows_out_of_scope_at_location: FxHashMap<Location, Vec<BorrowIndex>>,
+}
+
+impl<'a, 'tcx> OutOfScopePrecomputer<'a, 'tcx> {
+ fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self {
+ OutOfScopePrecomputer {
+ visited: BitSet::new_empty(body.basic_blocks().len()),
+ visit_stack: vec![],
+ body,
+ regioncx,
+ borrows_out_of_scope_at_location: FxHashMap::default(),
+ }
+ }
+}
+
+impl<'tcx> OutOfScopePrecomputer<'_, 'tcx> {
+ fn precompute_borrows_out_of_scope(
+ &mut self,
+ borrow_index: BorrowIndex,
+ borrow_region: RegionVid,
+ location: Location,
+ ) {
+ // We visit one BB at a time. The complication is that we may start in the
+ // middle of the first BB visited (the one containing `location`), in which
+ // case we may have to later on process the first part of that BB if there
+ // is a path back to its start.
+
+ // For visited BBs, we record the index of the first statement processed.
+ // (In fully processed BBs this index is 0.) Note also that we add BBs to
+ // `visited` once they are added to `stack`, before they are actually
+ // processed, because this avoids the need to look them up again on
+ // completion.
+ self.visited.insert(location.block);
+
+ let mut first_lo = location.statement_index;
+ let first_hi = self.body[location.block].statements.len();
+
+ self.visit_stack.push(StackEntry { bb: location.block, lo: first_lo, hi: first_hi });
+
+ while let Some(StackEntry { bb, lo, hi }) = self.visit_stack.pop() {
+ // If we process the first part of the first basic block (i.e. we encounter that block
+ // for the second time), we no longer have to visit its successors again.
+ let mut finished_early = bb == location.block && hi != first_hi;
+ for i in lo..=hi {
+ let location = Location { block: bb, statement_index: i };
+ // If region does not contain a point at the location, then add to list and skip
+ // successor locations.
+ if !self.regioncx.region_contains(borrow_region, location) {
+ debug!("borrow {:?} gets killed at {:?}", borrow_index, location);
+ self.borrows_out_of_scope_at_location
+ .entry(location)
+ .or_default()
+ .push(borrow_index);
+ finished_early = true;
+ break;
+ }
+ }
+
+ if !finished_early {
+ // Add successor BBs to the work list, if necessary.
+ let bb_data = &self.body[bb];
+ debug_assert!(hi == bb_data.statements.len());
+ for succ_bb in bb_data.terminator().successors() {
+ if !self.visited.insert(succ_bb) {
+ if succ_bb == location.block && first_lo > 0 {
+ // `succ_bb` has been seen before. If it wasn't
+ // fully processed, add its first part to `stack`
+ // for processing.
+ self.visit_stack.push(StackEntry {
+ bb: succ_bb,
+ lo: 0,
+ hi: first_lo - 1,
+ });
+
+ // And update this entry with 0, to represent the
+ // whole BB being processed.
+ first_lo = 0;
+ }
+ } else {
+ // succ_bb hasn't been seen before. Add it to
+ // `stack` for processing.
+ self.visit_stack.push(StackEntry {
+ bb: succ_bb,
+ lo: 0,
+ hi: self.body[succ_bb].statements.len(),
+ });
+ }
+ }
+ }
+ }
+
+ self.visited.clear();
+ }
+}
+
+impl<'a, 'tcx> Borrows<'a, 'tcx> {
+ pub(crate) fn new(
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ nonlexical_regioncx: &'a RegionInferenceContext<'tcx>,
+ borrow_set: &'a BorrowSet<'tcx>,
+ ) -> Self {
+ let mut prec = OutOfScopePrecomputer::new(body, nonlexical_regioncx);
+ for (borrow_index, borrow_data) in borrow_set.iter_enumerated() {
+ let borrow_region = borrow_data.region.to_region_vid();
+ let location = borrow_data.reserve_location;
+
+ prec.precompute_borrows_out_of_scope(borrow_index, borrow_region, location);
+ }
+
+ Borrows {
+ tcx,
+ body,
+ borrow_set,
+ borrows_out_of_scope_at_location: prec.borrows_out_of_scope_at_location,
+ }
+ }
+
+ pub fn location(&self, idx: BorrowIndex) -> &Location {
+ &self.borrow_set[idx].reserve_location
+ }
+
+ /// Add all borrows to the kill set, if those borrows are out of scope at `location`.
+ /// That means they went out of a nonlexical scope
+ fn kill_loans_out_of_scope_at_location(
+ &self,
+ trans: &mut impl GenKill<BorrowIndex>,
+ location: Location,
+ ) {
+ // NOTE: The state associated with a given `location`
+ // reflects the dataflow on entry to the statement.
+ // Iterate over each of the borrows that we've precomputed
+ // to have went out of scope at this location and kill them.
+ //
+ // We are careful always to call this function *before* we
+ // set up the gen-bits for the statement or
+ // terminator. That way, if the effect of the statement or
+ // terminator *does* introduce a new loan of the same
+ // region, then setting that gen-bit will override any
+ // potential kill introduced here.
+ if let Some(indices) = self.borrows_out_of_scope_at_location.get(&location) {
+ trans.kill_all(indices.iter().copied());
+ }
+ }
+
+ /// Kill any borrows that conflict with `place`.
+ fn kill_borrows_on_place(&self, trans: &mut impl GenKill<BorrowIndex>, place: Place<'tcx>) {
+ debug!("kill_borrows_on_place: place={:?}", place);
+
+ let other_borrows_of_local = self
+ .borrow_set
+ .local_map
+ .get(&place.local)
+ .into_iter()
+ .flat_map(|bs| bs.iter())
+ .copied();
+
+ // If the borrowed place is a local with no projections, all other borrows of this
+ // local must conflict. This is purely an optimization so we don't have to call
+ // `places_conflict` for every borrow.
+ if place.projection.is_empty() {
+ if !self.body.local_decls[place.local].is_ref_to_static() {
+ trans.kill_all(other_borrows_of_local);
+ }
+ return;
+ }
+
+ // By passing `PlaceConflictBias::NoOverlap`, we conservatively assume that any given
+ // pair of array indices are unequal, so that when `places_conflict` returns true, we
+ // will be assured that two places being compared definitely denotes the same sets of
+ // locations.
+ let definitely_conflicting_borrows = other_borrows_of_local.filter(|&i| {
+ places_conflict(
+ self.tcx,
+ self.body,
+ self.borrow_set[i].borrowed_place,
+ place,
+ PlaceConflictBias::NoOverlap,
+ )
+ });
+
+ trans.kill_all(definitely_conflicting_borrows);
+ }
+}
+
+impl<'tcx> rustc_mir_dataflow::AnalysisDomain<'tcx> for Borrows<'_, 'tcx> {
+ type Domain = BitSet<BorrowIndex>;
+
+ const NAME: &'static str = "borrows";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = nothing is reserved or activated yet;
+ BitSet::new_empty(self.borrow_set.len() * 2)
+ }
+
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
+ // no borrows of code region_scopes have been taken prior to
+ // function execution, so this method has no effect.
+ }
+}
+
+impl<'tcx> rustc_mir_dataflow::GenKillAnalysis<'tcx> for Borrows<'_, 'tcx> {
+ type Idx = BorrowIndex;
+
+ fn before_statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ self.kill_loans_out_of_scope_at_location(trans, location);
+ }
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ stmt: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ match stmt.kind {
+ mir::StatementKind::Assign(box (lhs, ref rhs)) => {
+ if let mir::Rvalue::Ref(_, _, place) = *rhs {
+ if place.ignore_borrow(
+ self.tcx,
+ self.body,
+ &self.borrow_set.locals_state_at_exit,
+ ) {
+ return;
+ }
+ let index = self.borrow_set.get_index_of(&location).unwrap_or_else(|| {
+ panic!("could not find BorrowIndex for location {:?}", location);
+ });
+
+ trans.gen(index);
+ }
+
+ // Make sure there are no remaining borrows for variables
+ // that are assigned over.
+ self.kill_borrows_on_place(trans, lhs);
+ }
+
+ mir::StatementKind::StorageDead(local) => {
+ // Make sure there are no remaining borrows for locals that
+ // are gone out of scope.
+ self.kill_borrows_on_place(trans, Place::from(local));
+ }
+
+ mir::StatementKind::FakeRead(..)
+ | mir::StatementKind::SetDiscriminant { .. }
+ | mir::StatementKind::Deinit(..)
+ | mir::StatementKind::StorageLive(..)
+ | mir::StatementKind::Retag { .. }
+ | mir::StatementKind::AscribeUserType(..)
+ | mir::StatementKind::Coverage(..)
+ | mir::StatementKind::CopyNonOverlapping(..)
+ | mir::StatementKind::Nop => {}
+ }
+ }
+
+ fn before_terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.kill_loans_out_of_scope_at_location(trans, location);
+ }
+
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ terminator: &mir::Terminator<'tcx>,
+ _location: Location,
+ ) {
+ if let mir::TerminatorKind::InlineAsm { operands, .. } = &terminator.kind {
+ for op in operands {
+ if let mir::InlineAsmOperand::Out { place: Some(place), .. }
+ | mir::InlineAsmOperand::InOut { out_place: Some(place), .. } = *op
+ {
+ self.kill_borrows_on_place(trans, place);
+ }
+ }
+ }
+ }
+
+ fn call_return_effect(
+ &self,
+ _trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ _return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ }
+}
+
+impl DebugWithContext<Borrows<'_, '_>> for BorrowIndex {
+ fn fmt_with(&self, ctxt: &Borrows<'_, '_>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", ctxt.location(*self))
+ }
+}
diff --git a/compiler/rustc_borrowck/src/def_use.rs b/compiler/rustc_borrowck/src/def_use.rs
new file mode 100644
index 000000000..a5c0d7742
--- /dev/null
+++ b/compiler/rustc_borrowck/src/def_use.rs
@@ -0,0 +1,80 @@
+use rustc_middle::mir::visit::{
+ MutatingUseContext, NonMutatingUseContext, NonUseContext, PlaceContext,
+};
+
+#[derive(Eq, PartialEq, Clone)]
+pub enum DefUse {
+ Def,
+ Use,
+ Drop,
+}
+
+pub fn categorize(context: PlaceContext) -> Option<DefUse> {
+ match context {
+ ///////////////////////////////////////////////////////////////////////////
+ // DEFS
+
+ PlaceContext::MutatingUse(MutatingUseContext::Store) |
+
+ // We let Call define the result in both the success and
+ // unwind cases. This is not really correct, however it
+ // does not seem to be observable due to the way that we
+ // generate MIR. To do things properly, we would apply
+ // the def in call only to the input from the success
+ // path and not the unwind path. -nmatsakis
+ PlaceContext::MutatingUse(MutatingUseContext::Call) |
+ PlaceContext::MutatingUse(MutatingUseContext::AsmOutput) |
+ PlaceContext::MutatingUse(MutatingUseContext::Yield) |
+
+ // Storage live and storage dead aren't proper defines, but we can ignore
+ // values that come before them.
+ PlaceContext::NonUse(NonUseContext::StorageLive) |
+ PlaceContext::NonUse(NonUseContext::StorageDead) => Some(DefUse::Def),
+
+ ///////////////////////////////////////////////////////////////////////////
+ // REGULAR USES
+ //
+ // These are uses that occur *outside* of a drop. For the
+ // purposes of NLL, these are special in that **all** the
+ // lifetimes appearing in the variable must be live for each regular use.
+
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) |
+ PlaceContext::MutatingUse(MutatingUseContext::Projection) |
+
+ // Borrows only consider their local used at the point of the borrow.
+ // This won't affect the results since we use this analysis for generators
+ // and we only care about the result at suspension points. Borrows cannot
+ // cross suspension points so this behavior is unproblematic.
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow) |
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow) |
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow) |
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow) |
+
+ PlaceContext::MutatingUse(MutatingUseContext::AddressOf) |
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf) |
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect) |
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) |
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) |
+ PlaceContext::NonUse(NonUseContext::AscribeUserTy) |
+ PlaceContext::MutatingUse(MutatingUseContext::Retag) =>
+ Some(DefUse::Use),
+
+ ///////////////////////////////////////////////////////////////////////////
+ // DROP USES
+ //
+ // These are uses that occur in a DROP (a MIR drop, not a
+ // call to `std::mem::drop()`). For the purposes of NLL,
+ // uses in drop are special because `#[may_dangle]`
+ // attributes can affect whether lifetimes must be live.
+
+ PlaceContext::MutatingUse(MutatingUseContext::Drop) =>
+ Some(DefUse::Drop),
+
+ // Debug info is neither def nor use.
+ PlaceContext::NonUse(NonUseContext::VarDebugInfo) => None,
+
+ PlaceContext::MutatingUse(MutatingUseContext::Deinit | MutatingUseContext::SetDiscriminant) => {
+ bug!("These statements are not allowed in this MIR phase")
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs
new file mode 100644
index 000000000..1ef2b0ae9
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs
@@ -0,0 +1,494 @@
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
+use rustc_infer::infer::canonical::Canonical;
+use rustc_infer::infer::error_reporting::nice_region_error::NiceRegionError;
+use rustc_infer::infer::region_constraints::Constraint;
+use rustc_infer::infer::region_constraints::RegionConstraintData;
+use rustc_infer::infer::RegionVariableOrigin;
+use rustc_infer::infer::{InferCtxt, RegionResolutionError, SubregionOrigin, TyCtxtInferExt as _};
+use rustc_infer::traits::{Normalized, ObligationCause, TraitEngine, TraitEngineExt};
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::RegionVid;
+use rustc_middle::ty::UniverseIndex;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc_span::Span;
+use rustc_trait_selection::traits::query::type_op;
+use rustc_trait_selection::traits::{SelectionContext, TraitEngineExt as _};
+use rustc_traits::{type_op_ascribe_user_type_with_span, type_op_prove_predicate_with_cause};
+
+use std::fmt;
+use std::rc::Rc;
+
+use crate::region_infer::values::RegionElement;
+use crate::session_diagnostics::HigherRankedErrorCause;
+use crate::session_diagnostics::HigherRankedLifetimeError;
+use crate::session_diagnostics::HigherRankedSubtypeError;
+use crate::MirBorrowckCtxt;
+
+#[derive(Clone)]
+pub(crate) struct UniverseInfo<'tcx>(UniverseInfoInner<'tcx>);
+
+/// What operation a universe was created for.
+#[derive(Clone)]
+enum UniverseInfoInner<'tcx> {
+ /// Relating two types which have binders.
+ RelateTys { expected: Ty<'tcx>, found: Ty<'tcx> },
+ /// Created from performing a `TypeOp`.
+ TypeOp(Rc<dyn TypeOpInfo<'tcx> + 'tcx>),
+ /// Any other reason.
+ Other,
+}
+
+impl<'tcx> UniverseInfo<'tcx> {
+ pub(crate) fn other() -> UniverseInfo<'tcx> {
+ UniverseInfo(UniverseInfoInner::Other)
+ }
+
+ pub(crate) fn relate(expected: Ty<'tcx>, found: Ty<'tcx>) -> UniverseInfo<'tcx> {
+ UniverseInfo(UniverseInfoInner::RelateTys { expected, found })
+ }
+
+ pub(crate) fn report_error(
+ &self,
+ mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
+ placeholder: ty::PlaceholderRegion,
+ error_element: RegionElement,
+ cause: ObligationCause<'tcx>,
+ ) {
+ match self.0 {
+ UniverseInfoInner::RelateTys { expected, found } => {
+ let err = mbcx.infcx.report_mismatched_types(
+ &cause,
+ expected,
+ found,
+ TypeError::RegionsPlaceholderMismatch,
+ );
+ mbcx.buffer_error(err);
+ }
+ UniverseInfoInner::TypeOp(ref type_op_info) => {
+ type_op_info.report_error(mbcx, placeholder, error_element, cause);
+ }
+ UniverseInfoInner::Other => {
+ // FIXME: This error message isn't great, but it doesn't show
+ // up in the existing UI tests. Consider investigating this
+ // some more.
+ mbcx.buffer_error(
+ mbcx.infcx.tcx.sess.create_err(HigherRankedSubtypeError { span: cause.span }),
+ );
+ }
+ }
+ }
+}
+
+pub(crate) trait ToUniverseInfo<'tcx> {
+ fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx>;
+}
+
+impl<'tcx> ToUniverseInfo<'tcx> for crate::type_check::InstantiateOpaqueType<'tcx> {
+ fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
+ UniverseInfo(UniverseInfoInner::TypeOp(Rc::new(crate::type_check::InstantiateOpaqueType {
+ base_universe: Some(base_universe),
+ ..self
+ })))
+ }
+}
+
+impl<'tcx> ToUniverseInfo<'tcx>
+ for Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::prove_predicate::ProvePredicate<'tcx>>>
+{
+ fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
+ UniverseInfo(UniverseInfoInner::TypeOp(Rc::new(PredicateQuery {
+ canonical_query: self,
+ base_universe,
+ })))
+ }
+}
+
+impl<'tcx, T: Copy + fmt::Display + TypeFoldable<'tcx> + 'tcx> ToUniverseInfo<'tcx>
+ for Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Normalize<T>>>
+{
+ fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
+ UniverseInfo(UniverseInfoInner::TypeOp(Rc::new(NormalizeQuery {
+ canonical_query: self,
+ base_universe,
+ })))
+ }
+}
+
+impl<'tcx> ToUniverseInfo<'tcx>
+ for Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::AscribeUserType<'tcx>>>
+{
+ fn to_universe_info(self, base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
+ UniverseInfo(UniverseInfoInner::TypeOp(Rc::new(AscribeUserTypeQuery {
+ canonical_query: self,
+ base_universe,
+ })))
+ }
+}
+
+impl<'tcx, F, G> ToUniverseInfo<'tcx> for Canonical<'tcx, type_op::custom::CustomTypeOp<F, G>> {
+ fn to_universe_info(self, _base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
+ // We can't rerun custom type ops.
+ UniverseInfo::other()
+ }
+}
+
+impl<'tcx> ToUniverseInfo<'tcx> for ! {
+ fn to_universe_info(self, _base_universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
+ self
+ }
+}
+
+#[allow(unused_lifetimes)]
+trait TypeOpInfo<'tcx> {
+ /// Returns an error to be reported if rerunning the type op fails to
+ /// recover the error's cause.
+ fn fallback_error(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+
+ fn base_universe(&self) -> ty::UniverseIndex;
+
+ fn nice_error(
+ &self,
+ mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ placeholder_region: ty::Region<'tcx>,
+ error_region: Option<ty::Region<'tcx>>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>>;
+
+ fn report_error(
+ &self,
+ mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
+ placeholder: ty::PlaceholderRegion,
+ error_element: RegionElement,
+ cause: ObligationCause<'tcx>,
+ ) {
+ let tcx = mbcx.infcx.tcx;
+ let base_universe = self.base_universe();
+
+ let Some(adjusted_universe) =
+ placeholder.universe.as_u32().checked_sub(base_universe.as_u32())
+ else {
+ mbcx.buffer_error(self.fallback_error(tcx, cause.span));
+ return;
+ };
+
+ let placeholder_region = tcx.mk_region(ty::RePlaceholder(ty::Placeholder {
+ name: placeholder.name,
+ universe: adjusted_universe.into(),
+ }));
+
+ let error_region =
+ if let RegionElement::PlaceholderRegion(error_placeholder) = error_element {
+ let adjusted_universe =
+ error_placeholder.universe.as_u32().checked_sub(base_universe.as_u32());
+ adjusted_universe.map(|adjusted| {
+ tcx.mk_region(ty::RePlaceholder(ty::Placeholder {
+ name: error_placeholder.name,
+ universe: adjusted.into(),
+ }))
+ })
+ } else {
+ None
+ };
+
+ debug!(?placeholder_region);
+
+ let span = cause.span;
+ let nice_error = self.nice_error(mbcx, cause, placeholder_region, error_region);
+
+ if let Some(nice_error) = nice_error {
+ mbcx.buffer_error(nice_error);
+ } else {
+ mbcx.buffer_error(self.fallback_error(tcx, span));
+ }
+ }
+}
+
+struct PredicateQuery<'tcx> {
+ canonical_query:
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::prove_predicate::ProvePredicate<'tcx>>>,
+ base_universe: ty::UniverseIndex,
+}
+
+impl<'tcx> TypeOpInfo<'tcx> for PredicateQuery<'tcx> {
+ fn fallback_error(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ tcx.sess.create_err(HigherRankedLifetimeError {
+ cause: Some(HigherRankedErrorCause::CouldNotProve {
+ predicate: self.canonical_query.value.value.predicate.to_string(),
+ }),
+ span,
+ })
+ }
+
+ fn base_universe(&self) -> ty::UniverseIndex {
+ self.base_universe
+ }
+
+ fn nice_error(
+ &self,
+ mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ placeholder_region: ty::Region<'tcx>,
+ error_region: Option<ty::Region<'tcx>>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ mbcx.infcx.tcx.infer_ctxt().enter_with_canonical(
+ cause.span,
+ &self.canonical_query,
+ |ref infcx, key, _| {
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+ type_op_prove_predicate_with_cause(infcx, &mut *fulfill_cx, key, cause);
+ try_extract_error_from_fulfill_cx(
+ fulfill_cx,
+ infcx,
+ placeholder_region,
+ error_region,
+ )
+ },
+ )
+ }
+}
+
+struct NormalizeQuery<'tcx, T> {
+ canonical_query: Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Normalize<T>>>,
+ base_universe: ty::UniverseIndex,
+}
+
+impl<'tcx, T> TypeOpInfo<'tcx> for NormalizeQuery<'tcx, T>
+where
+ T: Copy + fmt::Display + TypeFoldable<'tcx> + 'tcx,
+{
+ fn fallback_error(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ tcx.sess.create_err(HigherRankedLifetimeError {
+ cause: Some(HigherRankedErrorCause::CouldNotNormalize {
+ value: self.canonical_query.value.value.value.to_string(),
+ }),
+ span,
+ })
+ }
+
+ fn base_universe(&self) -> ty::UniverseIndex {
+ self.base_universe
+ }
+
+ fn nice_error(
+ &self,
+ mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ placeholder_region: ty::Region<'tcx>,
+ error_region: Option<ty::Region<'tcx>>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ mbcx.infcx.tcx.infer_ctxt().enter_with_canonical(
+ cause.span,
+ &self.canonical_query,
+ |ref infcx, key, _| {
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+
+ let mut selcx = SelectionContext::new(infcx);
+
+ // FIXME(lqd): Unify and de-duplicate the following with the actual
+ // `rustc_traits::type_op::type_op_normalize` query to allow the span we need in the
+ // `ObligationCause`. The normalization results are currently different between
+ // `AtExt::normalize` used in the query and `normalize` called below: the former fails
+ // to normalize the `nll/relate_tys/impl-fn-ignore-binder-via-bottom.rs` test. Check
+ // after #85499 lands to see if its fixes have erased this difference.
+ let (param_env, value) = key.into_parts();
+ let Normalized { value: _, obligations } = rustc_trait_selection::traits::normalize(
+ &mut selcx,
+ param_env,
+ cause,
+ value.value,
+ );
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+
+ try_extract_error_from_fulfill_cx(
+ fulfill_cx,
+ infcx,
+ placeholder_region,
+ error_region,
+ )
+ },
+ )
+ }
+}
+
+struct AscribeUserTypeQuery<'tcx> {
+ canonical_query: Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::AscribeUserType<'tcx>>>,
+ base_universe: ty::UniverseIndex,
+}
+
+impl<'tcx> TypeOpInfo<'tcx> for AscribeUserTypeQuery<'tcx> {
+ fn fallback_error(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ // FIXME: This error message isn't great, but it doesn't show up in the existing UI tests,
+ // and is only the fallback when the nice error fails. Consider improving this some more.
+ tcx.sess.create_err(HigherRankedLifetimeError { cause: None, span })
+ }
+
+ fn base_universe(&self) -> ty::UniverseIndex {
+ self.base_universe
+ }
+
+ fn nice_error(
+ &self,
+ mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ placeholder_region: ty::Region<'tcx>,
+ error_region: Option<ty::Region<'tcx>>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ mbcx.infcx.tcx.infer_ctxt().enter_with_canonical(
+ cause.span,
+ &self.canonical_query,
+ |ref infcx, key, _| {
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+ type_op_ascribe_user_type_with_span(infcx, &mut *fulfill_cx, key, Some(cause.span))
+ .ok()?;
+ try_extract_error_from_fulfill_cx(
+ fulfill_cx,
+ infcx,
+ placeholder_region,
+ error_region,
+ )
+ },
+ )
+ }
+}
+
+impl<'tcx> TypeOpInfo<'tcx> for crate::type_check::InstantiateOpaqueType<'tcx> {
+ fn fallback_error(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ // FIXME: This error message isn't great, but it doesn't show up in the existing UI tests,
+ // and is only the fallback when the nice error fails. Consider improving this some more.
+ tcx.sess.create_err(HigherRankedLifetimeError { cause: None, span })
+ }
+
+ fn base_universe(&self) -> ty::UniverseIndex {
+ self.base_universe.unwrap()
+ }
+
+ fn nice_error(
+ &self,
+ mbcx: &mut MirBorrowckCtxt<'_, 'tcx>,
+ _cause: ObligationCause<'tcx>,
+ placeholder_region: ty::Region<'tcx>,
+ error_region: Option<ty::Region<'tcx>>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ try_extract_error_from_region_constraints(
+ mbcx.infcx,
+ placeholder_region,
+ error_region,
+ self.region_constraints.as_ref().unwrap(),
+ // We're using the original `InferCtxt` that we
+ // started MIR borrowchecking with, so the region
+ // constraints have already been taken. Use the data from
+ // our `mbcx` instead.
+ |vid| mbcx.regioncx.var_infos[vid].origin,
+ |vid| mbcx.regioncx.var_infos[vid].universe,
+ )
+ }
+}
+
+#[instrument(skip(fulfill_cx, infcx), level = "debug")]
+fn try_extract_error_from_fulfill_cx<'tcx>(
+ mut fulfill_cx: Box<dyn TraitEngine<'tcx> + 'tcx>,
+ infcx: &InferCtxt<'_, 'tcx>,
+ placeholder_region: ty::Region<'tcx>,
+ error_region: Option<ty::Region<'tcx>>,
+) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ // We generally shouldn't have errors here because the query was
+ // already run, but there's no point using `delay_span_bug`
+ // when we're going to emit an error here anyway.
+ let _errors = fulfill_cx.select_all_or_error(infcx);
+ let region_constraints = infcx.with_region_constraints(|r| r.clone());
+ try_extract_error_from_region_constraints(
+ infcx,
+ placeholder_region,
+ error_region,
+ &region_constraints,
+ |vid| infcx.region_var_origin(vid),
+ |vid| infcx.universe_of_region(infcx.tcx.mk_region(ty::ReVar(vid))),
+ )
+}
+
+fn try_extract_error_from_region_constraints<'tcx>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ placeholder_region: ty::Region<'tcx>,
+ error_region: Option<ty::Region<'tcx>>,
+ region_constraints: &RegionConstraintData<'tcx>,
+ mut region_var_origin: impl FnMut(RegionVid) -> RegionVariableOrigin,
+ mut universe_of_region: impl FnMut(RegionVid) -> UniverseIndex,
+) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ let (sub_region, cause) =
+ region_constraints.constraints.iter().find_map(|(constraint, cause)| {
+ match *constraint {
+ Constraint::RegSubReg(sub, sup) if sup == placeholder_region && sup != sub => {
+ Some((sub, cause.clone()))
+ }
+ // FIXME: Should this check the universe of the var?
+ Constraint::VarSubReg(vid, sup) if sup == placeholder_region => {
+ Some((infcx.tcx.mk_region(ty::ReVar(vid)), cause.clone()))
+ }
+ _ => None,
+ }
+ })?;
+
+ debug!(?sub_region, "cause = {:#?}", cause);
+ let nice_error = match (error_region, *sub_region) {
+ (Some(error_region), ty::ReVar(vid)) => NiceRegionError::new(
+ infcx,
+ RegionResolutionError::SubSupConflict(
+ vid,
+ region_var_origin(vid),
+ cause.clone(),
+ error_region,
+ cause.clone(),
+ placeholder_region,
+ vec![],
+ ),
+ ),
+ (Some(error_region), _) => NiceRegionError::new(
+ infcx,
+ RegionResolutionError::ConcreteFailure(cause.clone(), error_region, placeholder_region),
+ ),
+ // Note universe here is wrong...
+ (None, ty::ReVar(vid)) => NiceRegionError::new(
+ infcx,
+ RegionResolutionError::UpperBoundUniverseConflict(
+ vid,
+ region_var_origin(vid),
+ universe_of_region(vid),
+ cause.clone(),
+ placeholder_region,
+ ),
+ ),
+ (None, _) => NiceRegionError::new(
+ infcx,
+ RegionResolutionError::ConcreteFailure(cause.clone(), sub_region, placeholder_region),
+ ),
+ };
+ nice_error.try_report_from_nll().or_else(|| {
+ if let SubregionOrigin::Subtype(trace) = cause {
+ Some(
+ infcx.report_and_explain_type_error(*trace, &TypeError::RegionsPlaceholderMismatch),
+ )
+ } else {
+ None
+ }
+ })
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
new file mode 100644
index 000000000..8bc8964bb
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs
@@ -0,0 +1,2773 @@
+use either::Either;
+use rustc_const_eval::util::CallKind;
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{
+ struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::intravisit::{walk_block, walk_expr, Visitor};
+use rustc_hir::{AsyncGeneratorKind, GeneratorKind};
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::ObligationCause;
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::mir::{
+ self, AggregateKind, BindingForm, BorrowKind, ClearCrossCrate, ConstraintCategory,
+ FakeReadCause, LocalDecl, LocalInfo, LocalKind, Location, Operand, Place, PlaceRef,
+ ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, VarBindingForm,
+};
+use rustc_middle::ty::{self, subst::Subst, suggest_constraining_type_params, PredicateKind, Ty};
+use rustc_mir_dataflow::move_paths::{InitKind, MoveOutIndex, MovePathIndex};
+use rustc_span::def_id::LocalDefId;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::symbol::sym;
+use rustc_span::{BytePos, Span, Symbol};
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::TraitEngineExt as _;
+
+use crate::borrow_set::TwoPhaseActivation;
+use crate::borrowck_errors;
+
+use crate::diagnostics::conflict_errors::StorageDeadOrDrop::LocalStorageDead;
+use crate::diagnostics::find_all_local_uses;
+use crate::{
+ borrow_set::BorrowData, diagnostics::Instance, prefixes::IsPrefixOf,
+ InitializationRequiringAction, MirBorrowckCtxt, PrefixSet, WriteKind,
+};
+
+use super::{
+ explain_borrow::{BorrowExplanation, LaterUseKind},
+ DescribePlaceOpt, RegionName, RegionNameSource, UseSpans,
+};
+
+#[derive(Debug)]
+struct MoveSite {
+ /// Index of the "move out" that we found. The `MoveData` can
+ /// then tell us where the move occurred.
+ moi: MoveOutIndex,
+
+ /// `true` if we traversed a back edge while walking from the point
+ /// of error to the move site.
+ traversed_back_edge: bool,
+}
+
+/// Which case a StorageDeadOrDrop is for.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum StorageDeadOrDrop<'tcx> {
+ LocalStorageDead,
+ BoxedStorageDead,
+ Destructor(Ty<'tcx>),
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+ pub(crate) fn report_use_of_moved_or_uninitialized(
+ &mut self,
+ location: Location,
+ desired_action: InitializationRequiringAction,
+ (moved_place, used_place, span): (PlaceRef<'tcx>, PlaceRef<'tcx>, Span),
+ mpi: MovePathIndex,
+ ) {
+ debug!(
+ "report_use_of_moved_or_uninitialized: location={:?} desired_action={:?} \
+ moved_place={:?} used_place={:?} span={:?} mpi={:?}",
+ location, desired_action, moved_place, used_place, span, mpi
+ );
+
+ let use_spans =
+ self.move_spans(moved_place, location).or_else(|| self.borrow_spans(span, location));
+ let span = use_spans.args_or_use();
+
+ let (move_site_vec, maybe_reinitialized_locations) = self.get_moved_indexes(location, mpi);
+ debug!(
+ "report_use_of_moved_or_uninitialized: move_site_vec={:?} use_spans={:?}",
+ move_site_vec, use_spans
+ );
+ let move_out_indices: Vec<_> =
+ move_site_vec.iter().map(|move_site| move_site.moi).collect();
+
+ if move_out_indices.is_empty() {
+ let root_place = PlaceRef { projection: &[], ..used_place };
+
+ if !self.uninitialized_error_reported.insert(root_place) {
+ debug!(
+ "report_use_of_moved_or_uninitialized place: error about {:?} suppressed",
+ root_place
+ );
+ return;
+ }
+
+ let err = self.report_use_of_uninitialized(
+ mpi,
+ used_place,
+ moved_place,
+ desired_action,
+ span,
+ use_spans,
+ );
+ self.buffer_error(err);
+ } else {
+ if let Some((reported_place, _)) = self.has_move_error(&move_out_indices) {
+ if self.prefixes(*reported_place, PrefixSet::All).any(|p| p == used_place) {
+ debug!(
+ "report_use_of_moved_or_uninitialized place: error suppressed mois={:?}",
+ move_out_indices
+ );
+ return;
+ }
+ }
+
+ let is_partial_move = move_site_vec.iter().any(|move_site| {
+ let move_out = self.move_data.moves[(*move_site).moi];
+ let moved_place = &self.move_data.move_paths[move_out.path].place;
+ // `*(_1)` where `_1` is a `Box` is actually a move out.
+ let is_box_move = moved_place.as_ref().projection == [ProjectionElem::Deref]
+ && self.body.local_decls[moved_place.local].ty.is_box();
+
+ !is_box_move
+ && used_place != moved_place.as_ref()
+ && used_place.is_prefix_of(moved_place.as_ref())
+ });
+
+ let partial_str = if is_partial_move { "partial " } else { "" };
+ let partially_str = if is_partial_move { "partially " } else { "" };
+
+ let mut err = self.cannot_act_on_moved_value(
+ span,
+ desired_action.as_noun(),
+ partially_str,
+ self.describe_place_with_options(
+ moved_place,
+ DescribePlaceOpt { including_downcast: true, including_tuple_field: true },
+ ),
+ );
+
+ let reinit_spans = maybe_reinitialized_locations
+ .iter()
+ .take(3)
+ .map(|loc| {
+ self.move_spans(self.move_data.move_paths[mpi].place.as_ref(), *loc)
+ .args_or_use()
+ })
+ .collect::<Vec<Span>>();
+
+ let reinits = maybe_reinitialized_locations.len();
+ if reinits == 1 {
+ err.span_label(reinit_spans[0], "this reinitialization might get skipped");
+ } else if reinits > 1 {
+ err.span_note(
+ MultiSpan::from_spans(reinit_spans),
+ &if reinits <= 3 {
+ format!("these {} reinitializations might get skipped", reinits)
+ } else {
+ format!(
+ "these 3 reinitializations and {} other{} might get skipped",
+ reinits - 3,
+ if reinits == 4 { "" } else { "s" }
+ )
+ },
+ );
+ }
+
+ self.add_moved_or_invoked_closure_note(location, used_place, &mut err);
+
+ let mut is_loop_move = false;
+ let mut in_pattern = false;
+
+ for move_site in &move_site_vec {
+ let move_out = self.move_data.moves[(*move_site).moi];
+ let moved_place = &self.move_data.move_paths[move_out.path].place;
+
+ let move_spans = self.move_spans(moved_place.as_ref(), move_out.source);
+ let move_span = move_spans.args_or_use();
+
+ let move_msg = if move_spans.for_closure() { " into closure" } else { "" };
+
+ let loop_message = if location == move_out.source || move_site.traversed_back_edge {
+ ", in previous iteration of loop"
+ } else {
+ ""
+ };
+
+ if location == move_out.source {
+ is_loop_move = true;
+ }
+
+ self.explain_captures(
+ &mut err,
+ span,
+ move_span,
+ move_spans,
+ *moved_place,
+ Some(used_place),
+ partially_str,
+ loop_message,
+ move_msg,
+ is_loop_move,
+ maybe_reinitialized_locations.is_empty(),
+ );
+
+ if let (UseSpans::PatUse(span), []) =
+ (move_spans, &maybe_reinitialized_locations[..])
+ {
+ if maybe_reinitialized_locations.is_empty() {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ &format!(
+ "borrow this field in the pattern to avoid moving {}",
+ self.describe_place(moved_place.as_ref())
+ .map(|n| format!("`{}`", n))
+ .unwrap_or_else(|| "the value".to_string())
+ ),
+ "ref ",
+ Applicability::MachineApplicable,
+ );
+ in_pattern = true;
+ }
+ }
+ }
+
+ use_spans.var_span_label_path_only(
+ &mut err,
+ format!("{} occurs due to use{}", desired_action.as_noun(), use_spans.describe()),
+ );
+
+ if !is_loop_move {
+ err.span_label(
+ span,
+ format!(
+ "value {} here after {}move",
+ desired_action.as_verb_in_past_tense(),
+ partial_str
+ ),
+ );
+ }
+
+ let ty = used_place.ty(self.body, self.infcx.tcx).ty;
+ let needs_note = match ty.kind() {
+ ty::Closure(id, _) => {
+ let tables = self.infcx.tcx.typeck(id.expect_local());
+ let hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(id.expect_local());
+
+ tables.closure_kind_origins().get(hir_id).is_none()
+ }
+ _ => true,
+ };
+
+ let mpi = self.move_data.moves[move_out_indices[0]].path;
+ let place = &self.move_data.move_paths[mpi].place;
+ let ty = place.ty(self.body, self.infcx.tcx).ty;
+
+ // If we're in pattern, we do nothing in favor of the previous suggestion (#80913).
+ if is_loop_move & !in_pattern {
+ if let ty::Ref(_, _, hir::Mutability::Mut) = ty.kind() {
+ // We have a `&mut` ref, we need to reborrow on each iteration (#62112).
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ &format!(
+ "consider creating a fresh reborrow of {} here",
+ self.describe_place(moved_place)
+ .map(|n| format!("`{}`", n))
+ .unwrap_or_else(|| "the mutable reference".to_string()),
+ ),
+ "&mut *",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+
+ let opt_name = self.describe_place_with_options(
+ place.as_ref(),
+ DescribePlaceOpt { including_downcast: true, including_tuple_field: true },
+ );
+ let note_msg = match opt_name {
+ Some(ref name) => format!("`{}`", name),
+ None => "value".to_owned(),
+ };
+ if self.suggest_borrow_fn_like(&mut err, ty, &move_site_vec, &note_msg) {
+ // Suppress the next suggestion since we don't want to put more bounds onto
+ // something that already has `Fn`-like bounds (or is a closure), so we can't
+ // restrict anyways.
+ } else {
+ self.suggest_adding_copy_bounds(&mut err, ty, span);
+ }
+
+ if needs_note {
+ let span = if let Some(local) = place.as_local() {
+ Some(self.body.local_decls[local].source_info.span)
+ } else {
+ None
+ };
+ self.note_type_does_not_implement_copy(&mut err, &note_msg, ty, span, partial_str);
+ }
+
+ if let UseSpans::FnSelfUse {
+ kind: CallKind::DerefCoercion { deref_target, deref_target_ty, .. },
+ ..
+ } = use_spans
+ {
+ err.note(&format!(
+ "{} occurs due to deref coercion to `{}`",
+ desired_action.as_noun(),
+ deref_target_ty
+ ));
+
+ // Check first whether the source is accessible (issue #87060)
+ if self.infcx.tcx.sess.source_map().is_span_accessible(deref_target) {
+ err.span_note(deref_target, "deref defined here");
+ }
+ }
+
+ self.buffer_move_error(move_out_indices, (used_place, err));
+ }
+ }
+
+ fn report_use_of_uninitialized(
+ &self,
+ mpi: MovePathIndex,
+ used_place: PlaceRef<'tcx>,
+ moved_place: PlaceRef<'tcx>,
+ desired_action: InitializationRequiringAction,
+ span: Span,
+ use_spans: UseSpans<'tcx>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ // We need all statements in the body where the binding was assigned to to later find all
+ // the branching code paths where the binding *wasn't* assigned to.
+ let inits = &self.move_data.init_path_map[mpi];
+ let move_path = &self.move_data.move_paths[mpi];
+ let decl_span = self.body.local_decls[move_path.place.local].source_info.span;
+ let mut spans = vec![];
+ for init_idx in inits {
+ let init = &self.move_data.inits[*init_idx];
+ let span = init.span(&self.body);
+ if !span.is_dummy() {
+ spans.push(span);
+ }
+ }
+
+ let (name, desc) = match self.describe_place_with_options(
+ moved_place,
+ DescribePlaceOpt { including_downcast: true, including_tuple_field: true },
+ ) {
+ Some(name) => (format!("`{name}`"), format!("`{name}` ")),
+ None => ("the variable".to_string(), String::new()),
+ };
+ let path = match self.describe_place_with_options(
+ used_place,
+ DescribePlaceOpt { including_downcast: true, including_tuple_field: true },
+ ) {
+ Some(name) => format!("`{name}`"),
+ None => "value".to_string(),
+ };
+
+ // We use the statements were the binding was initialized, and inspect the HIR to look
+ // for the branching codepaths that aren't covered, to point at them.
+ let map = self.infcx.tcx.hir();
+ let body_id = map.body_owned_by(self.mir_def_id());
+ let body = map.body(body_id);
+
+ let mut visitor = ConditionVisitor { spans: &spans, name: &name, errors: vec![] };
+ visitor.visit_body(&body);
+
+ let isnt_initialized = if let InitializationRequiringAction::PartialAssignment
+ | InitializationRequiringAction::Assignment = desired_action
+ {
+ // The same error is emitted for bindings that are *sometimes* initialized and the ones
+ // that are *partially* initialized by assigning to a field of an uninitialized
+ // binding. We differentiate between them for more accurate wording here.
+ "isn't fully initialized"
+ } else if spans
+ .iter()
+ .filter(|i| {
+ // We filter these to avoid misleading wording in cases like the following,
+ // where `x` has an `init`, but it is in the same place we're looking at:
+ // ```
+ // let x;
+ // x += 1;
+ // ```
+ !i.contains(span)
+ // We filter these to avoid incorrect main message on `match-cfg-fake-edges.rs`
+ && !visitor
+ .errors
+ .iter()
+ .map(|(sp, _)| *sp)
+ .any(|sp| span < sp && !sp.contains(span))
+ })
+ .count()
+ == 0
+ {
+ "isn't initialized"
+ } else {
+ "is possibly-uninitialized"
+ };
+
+ let used = desired_action.as_general_verb_in_past_tense();
+ let mut err =
+ struct_span_err!(self, span, E0381, "{used} binding {desc}{isnt_initialized}");
+ use_spans.var_span_label_path_only(
+ &mut err,
+ format!("{} occurs due to use{}", desired_action.as_noun(), use_spans.describe()),
+ );
+
+ if let InitializationRequiringAction::PartialAssignment
+ | InitializationRequiringAction::Assignment = desired_action
+ {
+ err.help(
+ "partial initialization isn't supported, fully initialize the binding with a \
+ default value and mutate it, or use `std::mem::MaybeUninit`",
+ );
+ }
+ err.span_label(span, format!("{path} {used} here but it {isnt_initialized}"));
+
+ let mut shown = false;
+ for (sp, label) in visitor.errors {
+ if sp < span && !sp.overlaps(span) {
+ // When we have a case like `match-cfg-fake-edges.rs`, we don't want to mention
+ // match arms coming after the primary span because they aren't relevant:
+ // ```
+ // let x;
+ // match y {
+ // _ if { x = 2; true } => {}
+ // _ if {
+ // x; //~ ERROR
+ // false
+ // } => {}
+ // _ => {} // We don't want to point to this.
+ // };
+ // ```
+ err.span_label(sp, &label);
+ shown = true;
+ }
+ }
+ if !shown {
+ for sp in &spans {
+ if *sp < span && !sp.overlaps(span) {
+ err.span_label(*sp, "binding initialized here in some conditions");
+ }
+ }
+ }
+ err.span_label(decl_span, "binding declared here but left uninitialized");
+ err
+ }
+
+ fn suggest_borrow_fn_like(
+ &self,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ty: Ty<'tcx>,
+ move_sites: &[MoveSite],
+ value_name: &str,
+ ) -> bool {
+ let tcx = self.infcx.tcx;
+
+ // Find out if the predicates show that the type is a Fn or FnMut
+ let find_fn_kind_from_did =
+ |predicates: ty::EarlyBinder<&[(ty::Predicate<'tcx>, Span)]>, substs| {
+ predicates.0.iter().find_map(|(pred, _)| {
+ let pred = if let Some(substs) = substs {
+ predicates.rebind(*pred).subst(tcx, substs).kind().skip_binder()
+ } else {
+ pred.kind().skip_binder()
+ };
+ if let ty::PredicateKind::Trait(pred) = pred && pred.self_ty() == ty {
+ if Some(pred.def_id()) == tcx.lang_items().fn_trait() {
+ return Some(hir::Mutability::Not);
+ } else if Some(pred.def_id()) == tcx.lang_items().fn_mut_trait() {
+ return Some(hir::Mutability::Mut);
+ }
+ }
+ None
+ })
+ };
+
+ // If the type is opaque/param/closure, and it is Fn or FnMut, let's suggest (mutably)
+ // borrowing the type, since `&mut F: FnMut` iff `F: FnMut` and similarly for `Fn`.
+ // These types seem reasonably opaque enough that they could be substituted with their
+ // borrowed variants in a function body when we see a move error.
+ let borrow_level = match ty.kind() {
+ ty::Param(_) => find_fn_kind_from_did(
+ tcx.bound_explicit_predicates_of(self.mir_def_id().to_def_id())
+ .map_bound(|p| p.predicates),
+ None,
+ ),
+ ty::Opaque(did, substs) => {
+ find_fn_kind_from_did(tcx.bound_explicit_item_bounds(*did), Some(*substs))
+ }
+ ty::Closure(_, substs) => match substs.as_closure().kind() {
+ ty::ClosureKind::Fn => Some(hir::Mutability::Not),
+ ty::ClosureKind::FnMut => Some(hir::Mutability::Mut),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let Some(borrow_level) = borrow_level else { return false; };
+ let sugg = move_sites
+ .iter()
+ .map(|move_site| {
+ let move_out = self.move_data.moves[(*move_site).moi];
+ let moved_place = &self.move_data.move_paths[move_out.path].place;
+ let move_spans = self.move_spans(moved_place.as_ref(), move_out.source);
+ let move_span = move_spans.args_or_use();
+ let suggestion = if borrow_level == hir::Mutability::Mut {
+ "&mut ".to_string()
+ } else {
+ "&".to_string()
+ };
+ (move_span.shrink_to_lo(), suggestion)
+ })
+ .collect();
+ err.multipart_suggestion_verbose(
+ &format!(
+ "consider {}borrowing {value_name}",
+ if borrow_level == hir::Mutability::Mut { "mutably " } else { "" }
+ ),
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ true
+ }
+
+ fn suggest_adding_copy_bounds(
+ &self,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ty: Ty<'tcx>,
+ span: Span,
+ ) {
+ let tcx = self.infcx.tcx;
+ let generics = tcx.generics_of(self.mir_def_id());
+
+ let Some(hir_generics) = tcx
+ .typeck_root_def_id(self.mir_def_id().to_def_id())
+ .as_local()
+ .and_then(|def_id| tcx.hir().get_generics(def_id))
+ else { return; };
+ // Try to find predicates on *generic params* that would allow copying `ty`
+ let predicates: Result<Vec<_>, _> = tcx.infer_ctxt().enter(|infcx| {
+ let mut fulfill_cx = <dyn rustc_infer::traits::TraitEngine<'_>>::new(infcx.tcx);
+
+ let copy_did = infcx.tcx.lang_items().copy_trait().unwrap();
+ let cause = ObligationCause::new(
+ span,
+ self.mir_hir_id(),
+ rustc_infer::traits::ObligationCauseCode::MiscObligation,
+ );
+ fulfill_cx.register_bound(
+ &infcx,
+ self.param_env,
+ // Erase any region vids from the type, which may not be resolved
+ infcx.tcx.erase_regions(ty),
+ copy_did,
+ cause,
+ );
+ // Select all, including ambiguous predicates
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+
+ // Only emit suggestion if all required predicates are on generic
+ errors
+ .into_iter()
+ .map(|err| match err.obligation.predicate.kind().skip_binder() {
+ PredicateKind::Trait(predicate) => match predicate.self_ty().kind() {
+ ty::Param(param_ty) => Ok((
+ generics.type_param(param_ty, tcx),
+ predicate.trait_ref.print_only_trait_path().to_string(),
+ )),
+ _ => Err(()),
+ },
+ _ => Err(()),
+ })
+ .collect()
+ });
+
+ if let Ok(predicates) = predicates {
+ suggest_constraining_type_params(
+ tcx,
+ hir_generics,
+ err,
+ predicates
+ .iter()
+ .map(|(param, constraint)| (param.name.as_str(), &**constraint, None)),
+ );
+ }
+ }
+
+ pub(crate) fn report_move_out_while_borrowed(
+ &mut self,
+ location: Location,
+ (place, span): (Place<'tcx>, Span),
+ borrow: &BorrowData<'tcx>,
+ ) {
+ debug!(
+ "report_move_out_while_borrowed: location={:?} place={:?} span={:?} borrow={:?}",
+ location, place, span, borrow
+ );
+ let value_msg = self.describe_any_place(place.as_ref());
+ let borrow_msg = self.describe_any_place(borrow.borrowed_place.as_ref());
+
+ let borrow_spans = self.retrieve_borrow_spans(borrow);
+ let borrow_span = borrow_spans.args_or_use();
+
+ let move_spans = self.move_spans(place.as_ref(), location);
+ let span = move_spans.args_or_use();
+
+ let mut err =
+ self.cannot_move_when_borrowed(span, &self.describe_any_place(place.as_ref()));
+ err.span_label(borrow_span, format!("borrow of {} occurs here", borrow_msg));
+ err.span_label(span, format!("move out of {} occurs here", value_msg));
+
+ borrow_spans.var_span_label_path_only(
+ &mut err,
+ format!("borrow occurs due to use{}", borrow_spans.describe()),
+ );
+
+ move_spans.var_span_label(
+ &mut err,
+ format!("move occurs due to use{}", move_spans.describe()),
+ "moved",
+ );
+
+ self.explain_why_borrow_contains_point(location, borrow, None)
+ .add_explanation_to_diagnostic(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &mut err,
+ "",
+ Some(borrow_span),
+ None,
+ );
+ self.buffer_error(err);
+ }
+
+ pub(crate) fn report_use_while_mutably_borrowed(
+ &mut self,
+ location: Location,
+ (place, _span): (Place<'tcx>, Span),
+ borrow: &BorrowData<'tcx>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let borrow_spans = self.retrieve_borrow_spans(borrow);
+ let borrow_span = borrow_spans.args_or_use();
+
+ // Conflicting borrows are reported separately, so only check for move
+ // captures.
+ let use_spans = self.move_spans(place.as_ref(), location);
+ let span = use_spans.var_or_use();
+
+ // If the attempted use is in a closure then we do not care about the path span of the place we are currently trying to use
+ // we call `var_span_label` on `borrow_spans` to annotate if the existing borrow was in a closure
+ let mut err = self.cannot_use_when_mutably_borrowed(
+ span,
+ &self.describe_any_place(place.as_ref()),
+ borrow_span,
+ &self.describe_any_place(borrow.borrowed_place.as_ref()),
+ );
+
+ borrow_spans.var_span_label(
+ &mut err,
+ {
+ let place = &borrow.borrowed_place;
+ let desc_place = self.describe_any_place(place.as_ref());
+ format!("borrow occurs due to use of {}{}", desc_place, borrow_spans.describe())
+ },
+ "mutable",
+ );
+
+ self.explain_why_borrow_contains_point(location, borrow, None)
+ .add_explanation_to_diagnostic(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &mut err,
+ "",
+ None,
+ None,
+ );
+ err
+ }
+
+ pub(crate) fn report_conflicting_borrow(
+ &mut self,
+ location: Location,
+ (place, span): (Place<'tcx>, Span),
+ gen_borrow_kind: BorrowKind,
+ issued_borrow: &BorrowData<'tcx>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let issued_spans = self.retrieve_borrow_spans(issued_borrow);
+ let issued_span = issued_spans.args_or_use();
+
+ let borrow_spans = self.borrow_spans(span, location);
+ let span = borrow_spans.args_or_use();
+
+ let container_name = if issued_spans.for_generator() || borrow_spans.for_generator() {
+ "generator"
+ } else {
+ "closure"
+ };
+
+ let (desc_place, msg_place, msg_borrow, union_type_name) =
+ self.describe_place_for_conflicting_borrow(place, issued_borrow.borrowed_place);
+
+ let explanation = self.explain_why_borrow_contains_point(location, issued_borrow, None);
+ let second_borrow_desc = if explanation.is_explained() { "second " } else { "" };
+
+ // FIXME: supply non-"" `opt_via` when appropriate
+ let first_borrow_desc;
+ let mut err = match (gen_borrow_kind, issued_borrow.kind) {
+ (BorrowKind::Shared, BorrowKind::Mut { .. }) => {
+ first_borrow_desc = "mutable ";
+ self.cannot_reborrow_already_borrowed(
+ span,
+ &desc_place,
+ &msg_place,
+ "immutable",
+ issued_span,
+ "it",
+ "mutable",
+ &msg_borrow,
+ None,
+ )
+ }
+ (BorrowKind::Mut { .. }, BorrowKind::Shared) => {
+ first_borrow_desc = "immutable ";
+ self.cannot_reborrow_already_borrowed(
+ span,
+ &desc_place,
+ &msg_place,
+ "mutable",
+ issued_span,
+ "it",
+ "immutable",
+ &msg_borrow,
+ None,
+ )
+ }
+
+ (BorrowKind::Mut { .. }, BorrowKind::Mut { .. }) => {
+ first_borrow_desc = "first ";
+ let mut err = self.cannot_mutably_borrow_multiply(
+ span,
+ &desc_place,
+ &msg_place,
+ issued_span,
+ &msg_borrow,
+ None,
+ );
+ self.suggest_split_at_mut_if_applicable(
+ &mut err,
+ place,
+ issued_borrow.borrowed_place,
+ );
+ err
+ }
+
+ (BorrowKind::Unique, BorrowKind::Unique) => {
+ first_borrow_desc = "first ";
+ self.cannot_uniquely_borrow_by_two_closures(span, &desc_place, issued_span, None)
+ }
+
+ (BorrowKind::Mut { .. } | BorrowKind::Unique, BorrowKind::Shallow) => {
+ if let Some(immutable_section_description) =
+ self.classify_immutable_section(issued_borrow.assigned_place)
+ {
+ let mut err = self.cannot_mutate_in_immutable_section(
+ span,
+ issued_span,
+ &desc_place,
+ immutable_section_description,
+ "mutably borrow",
+ );
+ borrow_spans.var_span_label(
+ &mut err,
+ format!(
+ "borrow occurs due to use of {}{}",
+ desc_place,
+ borrow_spans.describe(),
+ ),
+ "immutable",
+ );
+
+ return err;
+ } else {
+ first_borrow_desc = "immutable ";
+ self.cannot_reborrow_already_borrowed(
+ span,
+ &desc_place,
+ &msg_place,
+ "mutable",
+ issued_span,
+ "it",
+ "immutable",
+ &msg_borrow,
+ None,
+ )
+ }
+ }
+
+ (BorrowKind::Unique, _) => {
+ first_borrow_desc = "first ";
+ self.cannot_uniquely_borrow_by_one_closure(
+ span,
+ container_name,
+ &desc_place,
+ "",
+ issued_span,
+ "it",
+ "",
+ None,
+ )
+ }
+
+ (BorrowKind::Shared, BorrowKind::Unique) => {
+ first_borrow_desc = "first ";
+ self.cannot_reborrow_already_uniquely_borrowed(
+ span,
+ container_name,
+ &desc_place,
+ "",
+ "immutable",
+ issued_span,
+ "",
+ None,
+ second_borrow_desc,
+ )
+ }
+
+ (BorrowKind::Mut { .. }, BorrowKind::Unique) => {
+ first_borrow_desc = "first ";
+ self.cannot_reborrow_already_uniquely_borrowed(
+ span,
+ container_name,
+ &desc_place,
+ "",
+ "mutable",
+ issued_span,
+ "",
+ None,
+ second_borrow_desc,
+ )
+ }
+
+ (BorrowKind::Shared, BorrowKind::Shared | BorrowKind::Shallow)
+ | (
+ BorrowKind::Shallow,
+ BorrowKind::Mut { .. }
+ | BorrowKind::Unique
+ | BorrowKind::Shared
+ | BorrowKind::Shallow,
+ ) => unreachable!(),
+ };
+
+ if issued_spans == borrow_spans {
+ borrow_spans.var_span_label(
+ &mut err,
+ format!("borrows occur due to use of {}{}", desc_place, borrow_spans.describe(),),
+ gen_borrow_kind.describe_mutability(),
+ );
+ } else {
+ let borrow_place = &issued_borrow.borrowed_place;
+ let borrow_place_desc = self.describe_any_place(borrow_place.as_ref());
+ issued_spans.var_span_label(
+ &mut err,
+ format!(
+ "first borrow occurs due to use of {}{}",
+ borrow_place_desc,
+ issued_spans.describe(),
+ ),
+ issued_borrow.kind.describe_mutability(),
+ );
+
+ borrow_spans.var_span_label(
+ &mut err,
+ format!(
+ "second borrow occurs due to use of {}{}",
+ desc_place,
+ borrow_spans.describe(),
+ ),
+ gen_borrow_kind.describe_mutability(),
+ );
+ }
+
+ if union_type_name != "" {
+ err.note(&format!(
+ "{} is a field of the union `{}`, so it overlaps the field {}",
+ msg_place, union_type_name, msg_borrow,
+ ));
+ }
+
+ explanation.add_explanation_to_diagnostic(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &mut err,
+ first_borrow_desc,
+ None,
+ Some((issued_span, span)),
+ );
+
+ self.suggest_using_local_if_applicable(&mut err, location, issued_borrow, explanation);
+
+ err
+ }
+
+ #[instrument(level = "debug", skip(self, err))]
+ fn suggest_using_local_if_applicable(
+ &self,
+ err: &mut Diagnostic,
+ location: Location,
+ issued_borrow: &BorrowData<'tcx>,
+ explanation: BorrowExplanation<'tcx>,
+ ) {
+ let used_in_call = matches!(
+ explanation,
+ BorrowExplanation::UsedLater(LaterUseKind::Call | LaterUseKind::Other, _call_span, _)
+ );
+ if !used_in_call {
+ debug!("not later used in call");
+ return;
+ }
+
+ let use_span =
+ if let BorrowExplanation::UsedLater(LaterUseKind::Other, use_span, _) = explanation {
+ Some(use_span)
+ } else {
+ None
+ };
+
+ let outer_call_loc =
+ if let TwoPhaseActivation::ActivatedAt(loc) = issued_borrow.activation_location {
+ loc
+ } else {
+ issued_borrow.reserve_location
+ };
+ let outer_call_stmt = self.body.stmt_at(outer_call_loc);
+
+ let inner_param_location = location;
+ let Some(inner_param_stmt) = self.body.stmt_at(inner_param_location).left() else {
+ debug!("`inner_param_location` {:?} is not for a statement", inner_param_location);
+ return;
+ };
+ let Some(&inner_param) = inner_param_stmt.kind.as_assign().map(|(p, _)| p) else {
+ debug!(
+ "`inner_param_location` {:?} is not for an assignment: {:?}",
+ inner_param_location, inner_param_stmt
+ );
+ return;
+ };
+ let inner_param_uses = find_all_local_uses::find(self.body, inner_param.local);
+ let Some((inner_call_loc, inner_call_term)) = inner_param_uses.into_iter().find_map(|loc| {
+ let Either::Right(term) = self.body.stmt_at(loc) else {
+ debug!("{:?} is a statement, so it can't be a call", loc);
+ return None;
+ };
+ let TerminatorKind::Call { args, .. } = &term.kind else {
+ debug!("not a call: {:?}", term);
+ return None;
+ };
+ debug!("checking call args for uses of inner_param: {:?}", args);
+ if args.contains(&Operand::Move(inner_param)) {
+ Some((loc, term))
+ } else {
+ None
+ }
+ }) else {
+ debug!("no uses of inner_param found as a by-move call arg");
+ return;
+ };
+ debug!("===> outer_call_loc = {:?}, inner_call_loc = {:?}", outer_call_loc, inner_call_loc);
+
+ let inner_call_span = inner_call_term.source_info.span;
+ let outer_call_span = match use_span {
+ Some(span) => span,
+ None => outer_call_stmt.either(|s| s.source_info, |t| t.source_info).span,
+ };
+ if outer_call_span == inner_call_span || !outer_call_span.contains(inner_call_span) {
+ // FIXME: This stops the suggestion in some cases where it should be emitted.
+ // Fix the spans for those cases so it's emitted correctly.
+ debug!(
+ "outer span {:?} does not strictly contain inner span {:?}",
+ outer_call_span, inner_call_span
+ );
+ return;
+ }
+ err.span_help(
+ inner_call_span,
+ &format!(
+ "try adding a local storing this{}...",
+ if use_span.is_some() { "" } else { " argument" }
+ ),
+ );
+ err.span_help(
+ outer_call_span,
+ &format!(
+ "...and then using that local {}",
+ if use_span.is_some() { "here" } else { "as the argument to this call" }
+ ),
+ );
+ }
+
+ fn suggest_split_at_mut_if_applicable(
+ &self,
+ err: &mut Diagnostic,
+ place: Place<'tcx>,
+ borrowed_place: Place<'tcx>,
+ ) {
+ if let ([ProjectionElem::Index(_)], [ProjectionElem::Index(_)]) =
+ (&place.projection[..], &borrowed_place.projection[..])
+ {
+ err.help(
+ "consider using `.split_at_mut(position)` or similar method to obtain \
+ two mutable non-overlapping sub-slices",
+ );
+ }
+ }
+
+ /// Returns the description of the root place for a conflicting borrow and the full
+ /// descriptions of the places that caused the conflict.
+ ///
+ /// In the simplest case, where there are no unions involved, if a mutable borrow of `x` is
+ /// attempted while a shared borrow is live, then this function will return:
+ /// ```
+ /// ("x", "", "")
+ /// # ;
+ /// ```
+ /// In the simple union case, if a mutable borrow of a union field `x.z` is attempted while
+ /// a shared borrow of another field `x.y`, then this function will return:
+ /// ```
+ /// ("x", "x.z", "x.y")
+ /// # ;
+ /// ```
+ /// In the more complex union case, where the union is a field of a struct, then if a mutable
+ /// borrow of a union field in a struct `x.u.z` is attempted while a shared borrow of
+ /// another field `x.u.y`, then this function will return:
+ /// ```
+ /// ("x.u", "x.u.z", "x.u.y")
+ /// # ;
+ /// ```
+ /// This is used when creating error messages like below:
+ ///
+ /// ```text
+ /// cannot borrow `a.u` (via `a.u.z.c`) as immutable because it is also borrowed as
+ /// mutable (via `a.u.s.b`) [E0502]
+ /// ```
+ pub(crate) fn describe_place_for_conflicting_borrow(
+ &self,
+ first_borrowed_place: Place<'tcx>,
+ second_borrowed_place: Place<'tcx>,
+ ) -> (String, String, String, String) {
+ // Define a small closure that we can use to check if the type of a place
+ // is a union.
+ let union_ty = |place_base| {
+ // Need to use fn call syntax `PlaceRef::ty` to determine the type of `place_base`;
+ // using a type annotation in the closure argument instead leads to a lifetime error.
+ let ty = PlaceRef::ty(&place_base, self.body, self.infcx.tcx).ty;
+ ty.ty_adt_def().filter(|adt| adt.is_union()).map(|_| ty)
+ };
+
+ // Start with an empty tuple, so we can use the functions on `Option` to reduce some
+ // code duplication (particularly around returning an empty description in the failure
+ // case).
+ Some(())
+ .filter(|_| {
+ // If we have a conflicting borrow of the same place, then we don't want to add
+ // an extraneous "via x.y" to our diagnostics, so filter out this case.
+ first_borrowed_place != second_borrowed_place
+ })
+ .and_then(|_| {
+ // We're going to want to traverse the first borrowed place to see if we can find
+ // field access to a union. If we find that, then we will keep the place of the
+ // union being accessed and the field that was being accessed so we can check the
+ // second borrowed place for the same union and an access to a different field.
+ for (place_base, elem) in first_borrowed_place.iter_projections().rev() {
+ match elem {
+ ProjectionElem::Field(field, _) if union_ty(place_base).is_some() => {
+ return Some((place_base, field));
+ }
+ _ => {}
+ }
+ }
+ None
+ })
+ .and_then(|(target_base, target_field)| {
+ // With the place of a union and a field access into it, we traverse the second
+ // borrowed place and look for an access to a different field of the same union.
+ for (place_base, elem) in second_borrowed_place.iter_projections().rev() {
+ if let ProjectionElem::Field(field, _) = elem {
+ if let Some(union_ty) = union_ty(place_base) {
+ if field != target_field && place_base == target_base {
+ return Some((
+ self.describe_any_place(place_base),
+ self.describe_any_place(first_borrowed_place.as_ref()),
+ self.describe_any_place(second_borrowed_place.as_ref()),
+ union_ty.to_string(),
+ ));
+ }
+ }
+ }
+ }
+ None
+ })
+ .unwrap_or_else(|| {
+ // If we didn't find a field access into a union, or both places match, then
+ // only return the description of the first place.
+ (
+ self.describe_any_place(first_borrowed_place.as_ref()),
+ "".to_string(),
+ "".to_string(),
+ "".to_string(),
+ )
+ })
+ }
+
+ /// Reports StorageDeadOrDrop of `place` conflicts with `borrow`.
+ ///
+ /// This means that some data referenced by `borrow` needs to live
+ /// past the point where the StorageDeadOrDrop of `place` occurs.
+ /// This is usually interpreted as meaning that `place` has too
+ /// short a lifetime. (But sometimes it is more useful to report
+ /// it as a more direct conflict between the execution of a
+ /// `Drop::drop` with an aliasing borrow.)
+ pub(crate) fn report_borrowed_value_does_not_live_long_enough(
+ &mut self,
+ location: Location,
+ borrow: &BorrowData<'tcx>,
+ place_span: (Place<'tcx>, Span),
+ kind: Option<WriteKind>,
+ ) {
+ debug!(
+ "report_borrowed_value_does_not_live_long_enough(\
+ {:?}, {:?}, {:?}, {:?}\
+ )",
+ location, borrow, place_span, kind
+ );
+
+ let drop_span = place_span.1;
+ let root_place =
+ self.prefixes(borrow.borrowed_place.as_ref(), PrefixSet::All).last().unwrap();
+
+ let borrow_spans = self.retrieve_borrow_spans(borrow);
+ let borrow_span = borrow_spans.var_or_use_path_span();
+
+ assert!(root_place.projection.is_empty());
+ let proper_span = self.body.local_decls[root_place.local].source_info.span;
+
+ let root_place_projection = self.infcx.tcx.intern_place_elems(root_place.projection);
+
+ if self.access_place_error_reported.contains(&(
+ Place { local: root_place.local, projection: root_place_projection },
+ borrow_span,
+ )) {
+ debug!(
+ "suppressing access_place error when borrow doesn't live long enough for {:?}",
+ borrow_span
+ );
+ return;
+ }
+
+ self.access_place_error_reported.insert((
+ Place { local: root_place.local, projection: root_place_projection },
+ borrow_span,
+ ));
+
+ let borrowed_local = borrow.borrowed_place.local;
+ if self.body.local_decls[borrowed_local].is_ref_to_thread_local() {
+ let err =
+ self.report_thread_local_value_does_not_live_long_enough(drop_span, borrow_span);
+ self.buffer_error(err);
+ return;
+ }
+
+ if let StorageDeadOrDrop::Destructor(dropped_ty) =
+ self.classify_drop_access_kind(borrow.borrowed_place.as_ref())
+ {
+ // If a borrow of path `B` conflicts with drop of `D` (and
+ // we're not in the uninteresting case where `B` is a
+ // prefix of `D`), then report this as a more interesting
+ // destructor conflict.
+ if !borrow.borrowed_place.as_ref().is_prefix_of(place_span.0.as_ref()) {
+ self.report_borrow_conflicts_with_destructor(
+ location, borrow, place_span, kind, dropped_ty,
+ );
+ return;
+ }
+ }
+
+ let place_desc = self.describe_place(borrow.borrowed_place.as_ref());
+
+ let kind_place = kind.filter(|_| place_desc.is_some()).map(|k| (k, place_span.0));
+ let explanation = self.explain_why_borrow_contains_point(location, &borrow, kind_place);
+
+ debug!(
+ "report_borrowed_value_does_not_live_long_enough(place_desc: {:?}, explanation: {:?})",
+ place_desc, explanation
+ );
+ let err = match (place_desc, explanation) {
+ // If the outlives constraint comes from inside the closure,
+ // for example:
+ //
+ // let x = 0;
+ // let y = &x;
+ // Box::new(|| y) as Box<Fn() -> &'static i32>
+ //
+ // then just use the normal error. The closure isn't escaping
+ // and `move` will not help here.
+ (
+ Some(ref name),
+ BorrowExplanation::MustBeValidFor {
+ category:
+ category @ (ConstraintCategory::Return(_)
+ | ConstraintCategory::CallArgument(_)
+ | ConstraintCategory::OpaqueType),
+ from_closure: false,
+ ref region_name,
+ span,
+ ..
+ },
+ ) if borrow_spans.for_generator() | borrow_spans.for_closure() => self
+ .report_escaping_closure_capture(
+ borrow_spans,
+ borrow_span,
+ region_name,
+ category,
+ span,
+ &format!("`{}`", name),
+ ),
+ (
+ ref name,
+ BorrowExplanation::MustBeValidFor {
+ category: ConstraintCategory::Assignment,
+ from_closure: false,
+ region_name:
+ RegionName {
+ source: RegionNameSource::AnonRegionFromUpvar(upvar_span, upvar_name),
+ ..
+ },
+ span,
+ ..
+ },
+ ) => self.report_escaping_data(borrow_span, name, upvar_span, upvar_name, span),
+ (Some(name), explanation) => self.report_local_value_does_not_live_long_enough(
+ location,
+ &name,
+ &borrow,
+ drop_span,
+ borrow_spans,
+ explanation,
+ ),
+ (None, explanation) => self.report_temporary_value_does_not_live_long_enough(
+ location,
+ &borrow,
+ drop_span,
+ borrow_spans,
+ proper_span,
+ explanation,
+ ),
+ };
+
+ self.buffer_error(err);
+ }
+
+ fn report_local_value_does_not_live_long_enough(
+ &mut self,
+ location: Location,
+ name: &str,
+ borrow: &BorrowData<'tcx>,
+ drop_span: Span,
+ borrow_spans: UseSpans<'tcx>,
+ explanation: BorrowExplanation<'tcx>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ debug!(
+ "report_local_value_does_not_live_long_enough(\
+ {:?}, {:?}, {:?}, {:?}, {:?}\
+ )",
+ location, name, borrow, drop_span, borrow_spans
+ );
+
+ let borrow_span = borrow_spans.var_or_use_path_span();
+ if let BorrowExplanation::MustBeValidFor {
+ category,
+ span,
+ ref opt_place_desc,
+ from_closure: false,
+ ..
+ } = explanation
+ {
+ if let Some(diag) = self.try_report_cannot_return_reference_to_local(
+ borrow,
+ borrow_span,
+ span,
+ category,
+ opt_place_desc.as_ref(),
+ ) {
+ return diag;
+ }
+ }
+
+ let mut err = self.path_does_not_live_long_enough(borrow_span, &format!("`{}`", name));
+
+ if let Some(annotation) = self.annotate_argument_and_return_for_borrow(borrow) {
+ let region_name = annotation.emit(self, &mut err);
+
+ err.span_label(
+ borrow_span,
+ format!("`{}` would have to be valid for `{}`...", name, region_name),
+ );
+
+ let fn_hir_id = self.mir_hir_id();
+ err.span_label(
+ drop_span,
+ format!(
+ "...but `{}` will be dropped here, when the {} returns",
+ name,
+ self.infcx
+ .tcx
+ .hir()
+ .opt_name(fn_hir_id)
+ .map(|name| format!("function `{}`", name))
+ .unwrap_or_else(|| {
+ match &self
+ .infcx
+ .tcx
+ .typeck(self.mir_def_id())
+ .node_type(fn_hir_id)
+ .kind()
+ {
+ ty::Closure(..) => "enclosing closure",
+ ty::Generator(..) => "enclosing generator",
+ kind => bug!("expected closure or generator, found {:?}", kind),
+ }
+ .to_string()
+ })
+ ),
+ );
+
+ err.note(
+ "functions cannot return a borrow to data owned within the function's scope, \
+ functions can only return borrows to data passed as arguments",
+ );
+ err.note(
+ "to learn more, visit <https://doc.rust-lang.org/book/ch04-02-\
+ references-and-borrowing.html#dangling-references>",
+ );
+
+ if let BorrowExplanation::MustBeValidFor { .. } = explanation {
+ } else {
+ explanation.add_explanation_to_diagnostic(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &mut err,
+ "",
+ None,
+ None,
+ );
+ }
+ } else {
+ err.span_label(borrow_span, "borrowed value does not live long enough");
+ err.span_label(drop_span, format!("`{}` dropped here while still borrowed", name));
+
+ let within = if borrow_spans.for_generator() { " by generator" } else { "" };
+
+ borrow_spans.args_span_label(&mut err, format!("value captured here{}", within));
+
+ explanation.add_explanation_to_diagnostic(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &mut err,
+ "",
+ None,
+ None,
+ );
+ }
+
+ err
+ }
+
+ fn report_borrow_conflicts_with_destructor(
+ &mut self,
+ location: Location,
+ borrow: &BorrowData<'tcx>,
+ (place, drop_span): (Place<'tcx>, Span),
+ kind: Option<WriteKind>,
+ dropped_ty: Ty<'tcx>,
+ ) {
+ debug!(
+ "report_borrow_conflicts_with_destructor(\
+ {:?}, {:?}, ({:?}, {:?}), {:?}\
+ )",
+ location, borrow, place, drop_span, kind,
+ );
+
+ let borrow_spans = self.retrieve_borrow_spans(borrow);
+ let borrow_span = borrow_spans.var_or_use();
+
+ let mut err = self.cannot_borrow_across_destructor(borrow_span);
+
+ let what_was_dropped = match self.describe_place(place.as_ref()) {
+ Some(name) => format!("`{}`", name),
+ None => String::from("temporary value"),
+ };
+
+ let label = match self.describe_place(borrow.borrowed_place.as_ref()) {
+ Some(borrowed) => format!(
+ "here, drop of {D} needs exclusive access to `{B}`, \
+ because the type `{T}` implements the `Drop` trait",
+ D = what_was_dropped,
+ T = dropped_ty,
+ B = borrowed
+ ),
+ None => format!(
+ "here is drop of {D}; whose type `{T}` implements the `Drop` trait",
+ D = what_was_dropped,
+ T = dropped_ty
+ ),
+ };
+ err.span_label(drop_span, label);
+
+ // Only give this note and suggestion if they could be relevant.
+ let explanation =
+ self.explain_why_borrow_contains_point(location, borrow, kind.map(|k| (k, place)));
+ match explanation {
+ BorrowExplanation::UsedLater { .. }
+ | BorrowExplanation::UsedLaterWhenDropped { .. } => {
+ err.note("consider using a `let` binding to create a longer lived value");
+ }
+ _ => {}
+ }
+
+ explanation.add_explanation_to_diagnostic(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &mut err,
+ "",
+ None,
+ None,
+ );
+
+ self.buffer_error(err);
+ }
+
+ fn report_thread_local_value_does_not_live_long_enough(
+ &mut self,
+ drop_span: Span,
+ borrow_span: Span,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ debug!(
+ "report_thread_local_value_does_not_live_long_enough(\
+ {:?}, {:?}\
+ )",
+ drop_span, borrow_span
+ );
+
+ let mut err = self.thread_local_value_does_not_live_long_enough(borrow_span);
+
+ err.span_label(
+ borrow_span,
+ "thread-local variables cannot be borrowed beyond the end of the function",
+ );
+ err.span_label(drop_span, "end of enclosing function is here");
+
+ err
+ }
+
+ fn report_temporary_value_does_not_live_long_enough(
+ &mut self,
+ location: Location,
+ borrow: &BorrowData<'tcx>,
+ drop_span: Span,
+ borrow_spans: UseSpans<'tcx>,
+ proper_span: Span,
+ explanation: BorrowExplanation<'tcx>,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ debug!(
+ "report_temporary_value_does_not_live_long_enough(\
+ {:?}, {:?}, {:?}, {:?}\
+ )",
+ location, borrow, drop_span, proper_span
+ );
+
+ if let BorrowExplanation::MustBeValidFor { category, span, from_closure: false, .. } =
+ explanation
+ {
+ if let Some(diag) = self.try_report_cannot_return_reference_to_local(
+ borrow,
+ proper_span,
+ span,
+ category,
+ None,
+ ) {
+ return diag;
+ }
+ }
+
+ let mut err = self.temporary_value_borrowed_for_too_long(proper_span);
+ err.span_label(proper_span, "creates a temporary which is freed while still in use");
+ err.span_label(drop_span, "temporary value is freed at the end of this statement");
+
+ match explanation {
+ BorrowExplanation::UsedLater(..)
+ | BorrowExplanation::UsedLaterInLoop(..)
+ | BorrowExplanation::UsedLaterWhenDropped { .. } => {
+ // Only give this note and suggestion if it could be relevant.
+ let sm = self.infcx.tcx.sess.source_map();
+ let mut suggested = false;
+ let msg = "consider using a `let` binding to create a longer lived value";
+
+ /// We check that there's a single level of block nesting to ensure always correct
+ /// suggestions. If we don't, then we only provide a free-form message to avoid
+ /// misleading users in cases like `src/test/ui/nll/borrowed-temporary-error.rs`.
+ /// We could expand the analysis to suggest hoising all of the relevant parts of
+ /// the users' code to make the code compile, but that could be too much.
+ struct NestedStatementVisitor {
+ span: Span,
+ current: usize,
+ found: usize,
+ }
+
+ impl<'tcx> Visitor<'tcx> for NestedStatementVisitor {
+ fn visit_block(&mut self, block: &hir::Block<'tcx>) {
+ self.current += 1;
+ walk_block(self, block);
+ self.current -= 1;
+ }
+ fn visit_expr(&mut self, expr: &hir::Expr<'tcx>) {
+ if self.span == expr.span {
+ self.found = self.current;
+ }
+ walk_expr(self, expr);
+ }
+ }
+ let source_info = self.body.source_info(location);
+ if let Some(scope) = self.body.source_scopes.get(source_info.scope)
+ && let ClearCrossCrate::Set(scope_data) = &scope.local_data
+ && let Some(node) = self.infcx.tcx.hir().find(scope_data.lint_root)
+ && let Some(id) = node.body_id()
+ && let hir::ExprKind::Block(block, _) = self.infcx.tcx.hir().body(id).value.kind
+ {
+ for stmt in block.stmts {
+ let mut visitor = NestedStatementVisitor {
+ span: proper_span,
+ current: 0,
+ found: 0,
+ };
+ visitor.visit_stmt(stmt);
+ if visitor.found == 0
+ && stmt.span.contains(proper_span)
+ && let Some(p) = sm.span_to_margin(stmt.span)
+ && let Ok(s) = sm.span_to_snippet(proper_span)
+ {
+ let addition = format!("let binding = {};\n{}", s, " ".repeat(p));
+ err.multipart_suggestion_verbose(
+ msg,
+ vec![
+ (stmt.span.shrink_to_lo(), addition),
+ (proper_span, "binding".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ suggested = true;
+ break;
+ }
+ }
+ }
+ if !suggested {
+ err.note(msg);
+ }
+ }
+ _ => {}
+ }
+ explanation.add_explanation_to_diagnostic(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &mut err,
+ "",
+ None,
+ None,
+ );
+
+ let within = if borrow_spans.for_generator() { " by generator" } else { "" };
+
+ borrow_spans.args_span_label(&mut err, format!("value captured here{}", within));
+
+ err
+ }
+
+ fn try_report_cannot_return_reference_to_local(
+ &self,
+ borrow: &BorrowData<'tcx>,
+ borrow_span: Span,
+ return_span: Span,
+ category: ConstraintCategory<'tcx>,
+ opt_place_desc: Option<&String>,
+ ) -> Option<DiagnosticBuilder<'cx, ErrorGuaranteed>> {
+ let return_kind = match category {
+ ConstraintCategory::Return(_) => "return",
+ ConstraintCategory::Yield => "yield",
+ _ => return None,
+ };
+
+ // FIXME use a better heuristic than Spans
+ let reference_desc = if return_span == self.body.source_info(borrow.reserve_location).span {
+ "reference to"
+ } else {
+ "value referencing"
+ };
+
+ let (place_desc, note) = if let Some(place_desc) = opt_place_desc {
+ let local_kind = if let Some(local) = borrow.borrowed_place.as_local() {
+ match self.body.local_kind(local) {
+ LocalKind::ReturnPointer | LocalKind::Temp => {
+ bug!("temporary or return pointer with a name")
+ }
+ LocalKind::Var => "local variable ",
+ LocalKind::Arg
+ if !self.upvars.is_empty() && local == ty::CAPTURE_STRUCT_LOCAL =>
+ {
+ "variable captured by `move` "
+ }
+ LocalKind::Arg => "function parameter ",
+ }
+ } else {
+ "local data "
+ };
+ (
+ format!("{}`{}`", local_kind, place_desc),
+ format!("`{}` is borrowed here", place_desc),
+ )
+ } else {
+ let root_place =
+ self.prefixes(borrow.borrowed_place.as_ref(), PrefixSet::All).last().unwrap();
+ let local = root_place.local;
+ match self.body.local_kind(local) {
+ LocalKind::ReturnPointer | LocalKind::Temp => {
+ ("temporary value".to_string(), "temporary value created here".to_string())
+ }
+ LocalKind::Arg => (
+ "function parameter".to_string(),
+ "function parameter borrowed here".to_string(),
+ ),
+ LocalKind::Var => {
+ ("local binding".to_string(), "local binding introduced here".to_string())
+ }
+ }
+ };
+
+ let mut err = self.cannot_return_reference_to_local(
+ return_span,
+ return_kind,
+ reference_desc,
+ &place_desc,
+ );
+
+ if return_span != borrow_span {
+ err.span_label(borrow_span, note);
+
+ let tcx = self.infcx.tcx;
+ let ty_params = ty::List::empty();
+
+ let return_ty = self.regioncx.universal_regions().unnormalized_output_ty;
+ let return_ty = tcx.erase_regions(return_ty);
+
+ // to avoid panics
+ if let Some(iter_trait) = tcx.get_diagnostic_item(sym::Iterator)
+ && self
+ .infcx
+ .type_implements_trait(iter_trait, return_ty, ty_params, self.param_env)
+ .must_apply_modulo_regions()
+ {
+ err.span_suggestion_hidden(
+ return_span.shrink_to_hi(),
+ "use `.collect()` to allocate the iterator",
+ ".collect::<Vec<_>>()",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ Some(err)
+ }
+
+ fn report_escaping_closure_capture(
+ &mut self,
+ use_span: UseSpans<'tcx>,
+ var_span: Span,
+ fr_name: &RegionName,
+ category: ConstraintCategory<'tcx>,
+ constraint_span: Span,
+ captured_var: &str,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let tcx = self.infcx.tcx;
+ let args_span = use_span.args_or_use();
+
+ let (sugg_span, suggestion) = match tcx.sess.source_map().span_to_snippet(args_span) {
+ Ok(string) => {
+ if string.starts_with("async ") {
+ let pos = args_span.lo() + BytePos(6);
+ (args_span.with_lo(pos).with_hi(pos), "move ")
+ } else if string.starts_with("async|") {
+ let pos = args_span.lo() + BytePos(5);
+ (args_span.with_lo(pos).with_hi(pos), " move")
+ } else {
+ (args_span.shrink_to_lo(), "move ")
+ }
+ }
+ Err(_) => (args_span, "move |<args>| <body>"),
+ };
+ let kind = match use_span.generator_kind() {
+ Some(generator_kind) => match generator_kind {
+ GeneratorKind::Async(async_kind) => match async_kind {
+ AsyncGeneratorKind::Block => "async block",
+ AsyncGeneratorKind::Closure => "async closure",
+ _ => bug!("async block/closure expected, but async function found."),
+ },
+ GeneratorKind::Gen => "generator",
+ },
+ None => "closure",
+ };
+
+ let mut err =
+ self.cannot_capture_in_long_lived_closure(args_span, kind, captured_var, var_span);
+ err.span_suggestion_verbose(
+ sugg_span,
+ &format!(
+ "to force the {} to take ownership of {} (and any \
+ other referenced variables), use the `move` keyword",
+ kind, captured_var
+ ),
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+
+ match category {
+ ConstraintCategory::Return(_) | ConstraintCategory::OpaqueType => {
+ let msg = format!("{} is returned here", kind);
+ err.span_note(constraint_span, &msg);
+ }
+ ConstraintCategory::CallArgument(_) => {
+ fr_name.highlight_region_name(&mut err);
+ if matches!(use_span.generator_kind(), Some(GeneratorKind::Async(_))) {
+ err.note(
+ "async blocks are not executed immediately and must either take a \
+ reference or ownership of outside variables they use",
+ );
+ } else {
+ let msg = format!("function requires argument type to outlive `{}`", fr_name);
+ err.span_note(constraint_span, &msg);
+ }
+ }
+ _ => bug!(
+ "report_escaping_closure_capture called with unexpected constraint \
+ category: `{:?}`",
+ category
+ ),
+ }
+
+ err
+ }
+
+ fn report_escaping_data(
+ &mut self,
+ borrow_span: Span,
+ name: &Option<String>,
+ upvar_span: Span,
+ upvar_name: Symbol,
+ escape_span: Span,
+ ) -> DiagnosticBuilder<'cx, ErrorGuaranteed> {
+ let tcx = self.infcx.tcx;
+
+ let (_, escapes_from) = tcx.article_and_description(self.mir_def_id().to_def_id());
+
+ let mut err =
+ borrowck_errors::borrowed_data_escapes_closure(tcx, escape_span, escapes_from);
+
+ err.span_label(
+ upvar_span,
+ format!("`{}` declared here, outside of the {} body", upvar_name, escapes_from),
+ );
+
+ err.span_label(borrow_span, format!("borrow is only valid in the {} body", escapes_from));
+
+ if let Some(name) = name {
+ err.span_label(
+ escape_span,
+ format!("reference to `{}` escapes the {} body here", name, escapes_from),
+ );
+ } else {
+ err.span_label(
+ escape_span,
+ format!("reference escapes the {} body here", escapes_from),
+ );
+ }
+
+ err
+ }
+
+ fn get_moved_indexes(
+ &mut self,
+ location: Location,
+ mpi: MovePathIndex,
+ ) -> (Vec<MoveSite>, Vec<Location>) {
+ fn predecessor_locations<'tcx, 'a>(
+ body: &'a mir::Body<'tcx>,
+ location: Location,
+ ) -> impl Iterator<Item = Location> + Captures<'tcx> + 'a {
+ if location.statement_index == 0 {
+ let predecessors = body.basic_blocks.predecessors()[location.block].to_vec();
+ Either::Left(predecessors.into_iter().map(move |bb| body.terminator_loc(bb)))
+ } else {
+ Either::Right(std::iter::once(Location {
+ statement_index: location.statement_index - 1,
+ ..location
+ }))
+ }
+ }
+
+ let mut mpis = vec![mpi];
+ let move_paths = &self.move_data.move_paths;
+ mpis.extend(move_paths[mpi].parents(move_paths).map(|(mpi, _)| mpi));
+
+ let mut stack = Vec::new();
+ let mut back_edge_stack = Vec::new();
+
+ predecessor_locations(self.body, location).for_each(|predecessor| {
+ if location.dominates(predecessor, &self.dominators) {
+ back_edge_stack.push(predecessor)
+ } else {
+ stack.push(predecessor);
+ }
+ });
+
+ let mut reached_start = false;
+
+ /* Check if the mpi is initialized as an argument */
+ let mut is_argument = false;
+ for arg in self.body.args_iter() {
+ let path = self.move_data.rev_lookup.find_local(arg);
+ if mpis.contains(&path) {
+ is_argument = true;
+ }
+ }
+
+ let mut visited = FxHashSet::default();
+ let mut move_locations = FxHashSet::default();
+ let mut reinits = vec![];
+ let mut result = vec![];
+
+ let mut dfs_iter = |result: &mut Vec<MoveSite>, location: Location, is_back_edge: bool| {
+ debug!(
+ "report_use_of_moved_or_uninitialized: (current_location={:?}, back_edge={})",
+ location, is_back_edge
+ );
+
+ if !visited.insert(location) {
+ return true;
+ }
+
+ // check for moves
+ let stmt_kind =
+ self.body[location.block].statements.get(location.statement_index).map(|s| &s.kind);
+ if let Some(StatementKind::StorageDead(..)) = stmt_kind {
+ // this analysis only tries to find moves explicitly
+ // written by the user, so we ignore the move-outs
+ // created by `StorageDead` and at the beginning
+ // of a function.
+ } else {
+ // If we are found a use of a.b.c which was in error, then we want to look for
+ // moves not only of a.b.c but also a.b and a.
+ //
+ // Note that the moves data already includes "parent" paths, so we don't have to
+ // worry about the other case: that is, if there is a move of a.b.c, it is already
+ // marked as a move of a.b and a as well, so we will generate the correct errors
+ // there.
+ for moi in &self.move_data.loc_map[location] {
+ debug!("report_use_of_moved_or_uninitialized: moi={:?}", moi);
+ let path = self.move_data.moves[*moi].path;
+ if mpis.contains(&path) {
+ debug!(
+ "report_use_of_moved_or_uninitialized: found {:?}",
+ move_paths[path].place
+ );
+ result.push(MoveSite { moi: *moi, traversed_back_edge: is_back_edge });
+ move_locations.insert(location);
+
+ // Strictly speaking, we could continue our DFS here. There may be
+ // other moves that can reach the point of error. But it is kind of
+ // confusing to highlight them.
+ //
+ // Example:
+ //
+ // ```
+ // let a = vec![];
+ // let b = a;
+ // let c = a;
+ // drop(a); // <-- current point of error
+ // ```
+ //
+ // Because we stop the DFS here, we only highlight `let c = a`,
+ // and not `let b = a`. We will of course also report an error at
+ // `let c = a` which highlights `let b = a` as the move.
+ return true;
+ }
+ }
+ }
+
+ // check for inits
+ let mut any_match = false;
+ for ii in &self.move_data.init_loc_map[location] {
+ let init = self.move_data.inits[*ii];
+ match init.kind {
+ InitKind::Deep | InitKind::NonPanicPathOnly => {
+ if mpis.contains(&init.path) {
+ any_match = true;
+ }
+ }
+ InitKind::Shallow => {
+ if mpi == init.path {
+ any_match = true;
+ }
+ }
+ }
+ }
+ if any_match {
+ reinits.push(location);
+ return true;
+ }
+ return false;
+ };
+
+ while let Some(location) = stack.pop() {
+ if dfs_iter(&mut result, location, false) {
+ continue;
+ }
+
+ let mut has_predecessor = false;
+ predecessor_locations(self.body, location).for_each(|predecessor| {
+ if location.dominates(predecessor, &self.dominators) {
+ back_edge_stack.push(predecessor)
+ } else {
+ stack.push(predecessor);
+ }
+ has_predecessor = true;
+ });
+
+ if !has_predecessor {
+ reached_start = true;
+ }
+ }
+ if (is_argument || !reached_start) && result.is_empty() {
+ /* Process back edges (moves in future loop iterations) only if
+ the move path is definitely initialized upon loop entry,
+ to avoid spurious "in previous iteration" errors.
+ During DFS, if there's a path from the error back to the start
+ of the function with no intervening init or move, then the
+ move path may be uninitialized at loop entry.
+ */
+ while let Some(location) = back_edge_stack.pop() {
+ if dfs_iter(&mut result, location, true) {
+ continue;
+ }
+
+ predecessor_locations(self.body, location)
+ .for_each(|predecessor| back_edge_stack.push(predecessor));
+ }
+ }
+
+ // Check if we can reach these reinits from a move location.
+ let reinits_reachable = reinits
+ .into_iter()
+ .filter(|reinit| {
+ let mut visited = FxHashSet::default();
+ let mut stack = vec![*reinit];
+ while let Some(location) = stack.pop() {
+ if !visited.insert(location) {
+ continue;
+ }
+ if move_locations.contains(&location) {
+ return true;
+ }
+ stack.extend(predecessor_locations(self.body, location));
+ }
+ false
+ })
+ .collect::<Vec<Location>>();
+ (result, reinits_reachable)
+ }
+
+ pub(crate) fn report_illegal_mutation_of_borrowed(
+ &mut self,
+ location: Location,
+ (place, span): (Place<'tcx>, Span),
+ loan: &BorrowData<'tcx>,
+ ) {
+ let loan_spans = self.retrieve_borrow_spans(loan);
+ let loan_span = loan_spans.args_or_use();
+
+ let descr_place = self.describe_any_place(place.as_ref());
+ if loan.kind == BorrowKind::Shallow {
+ if let Some(section) = self.classify_immutable_section(loan.assigned_place) {
+ let mut err = self.cannot_mutate_in_immutable_section(
+ span,
+ loan_span,
+ &descr_place,
+ section,
+ "assign",
+ );
+ loan_spans.var_span_label(
+ &mut err,
+ format!("borrow occurs due to use{}", loan_spans.describe()),
+ loan.kind.describe_mutability(),
+ );
+
+ self.buffer_error(err);
+
+ return;
+ }
+ }
+
+ let mut err = self.cannot_assign_to_borrowed(span, loan_span, &descr_place);
+
+ loan_spans.var_span_label(
+ &mut err,
+ format!("borrow occurs due to use{}", loan_spans.describe()),
+ loan.kind.describe_mutability(),
+ );
+
+ self.explain_why_borrow_contains_point(location, loan, None).add_explanation_to_diagnostic(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &mut err,
+ "",
+ None,
+ None,
+ );
+
+ self.explain_deref_coercion(loan, &mut err);
+
+ self.buffer_error(err);
+ }
+
+ fn explain_deref_coercion(&mut self, loan: &BorrowData<'tcx>, err: &mut Diagnostic) {
+ let tcx = self.infcx.tcx;
+ if let (
+ Some(Terminator { kind: TerminatorKind::Call { from_hir_call: false, .. }, .. }),
+ Some((method_did, method_substs)),
+ ) = (
+ &self.body[loan.reserve_location.block].terminator,
+ rustc_const_eval::util::find_self_call(
+ tcx,
+ self.body,
+ loan.assigned_place.local,
+ loan.reserve_location.block,
+ ),
+ ) {
+ if tcx.is_diagnostic_item(sym::deref_method, method_did) {
+ let deref_target =
+ tcx.get_diagnostic_item(sym::deref_target).and_then(|deref_target| {
+ Instance::resolve(tcx, self.param_env, deref_target, method_substs)
+ .transpose()
+ });
+ if let Some(Ok(instance)) = deref_target {
+ let deref_target_ty = instance.ty(tcx, self.param_env);
+ err.note(&format!(
+ "borrow occurs due to deref coercion to `{}`",
+ deref_target_ty
+ ));
+ err.span_note(tcx.def_span(instance.def_id()), "deref defined here");
+ }
+ }
+ }
+ }
+
+ /// Reports an illegal reassignment; for example, an assignment to
+ /// (part of) a non-`mut` local that occurs potentially after that
+ /// local has already been initialized. `place` is the path being
+ /// assigned; `err_place` is a place providing a reason why
+ /// `place` is not mutable (e.g., the non-`mut` local `x` in an
+ /// assignment to `x.f`).
+ pub(crate) fn report_illegal_reassignment(
+ &mut self,
+ _location: Location,
+ (place, span): (Place<'tcx>, Span),
+ assigned_span: Span,
+ err_place: Place<'tcx>,
+ ) {
+ let (from_arg, local_decl, local_name) = match err_place.as_local() {
+ Some(local) => (
+ self.body.local_kind(local) == LocalKind::Arg,
+ Some(&self.body.local_decls[local]),
+ self.local_names[local],
+ ),
+ None => (false, None, None),
+ };
+
+ // If root local is initialized immediately (everything apart from let
+ // PATTERN;) then make the error refer to that local, rather than the
+ // place being assigned later.
+ let (place_description, assigned_span) = match local_decl {
+ Some(LocalDecl {
+ local_info:
+ Some(box LocalInfo::User(
+ ClearCrossCrate::Clear
+ | ClearCrossCrate::Set(BindingForm::Var(VarBindingForm {
+ opt_match_place: None,
+ ..
+ })),
+ ))
+ | Some(box LocalInfo::StaticRef { .. })
+ | None,
+ ..
+ })
+ | None => (self.describe_any_place(place.as_ref()), assigned_span),
+ Some(decl) => (self.describe_any_place(err_place.as_ref()), decl.source_info.span),
+ };
+
+ let mut err = self.cannot_reassign_immutable(span, &place_description, from_arg);
+ let msg = if from_arg {
+ "cannot assign to immutable argument"
+ } else {
+ "cannot assign twice to immutable variable"
+ };
+ if span != assigned_span && !from_arg {
+ err.span_label(assigned_span, format!("first assignment to {}", place_description));
+ }
+ if let Some(decl) = local_decl
+ && let Some(name) = local_name
+ && decl.can_be_made_mutable()
+ {
+ err.span_suggestion(
+ decl.source_info.span,
+ "consider making this binding mutable",
+ format!("mut {}", name),
+ Applicability::MachineApplicable,
+ );
+ }
+ err.span_label(span, msg);
+ self.buffer_error(err);
+ }
+
+ fn classify_drop_access_kind(&self, place: PlaceRef<'tcx>) -> StorageDeadOrDrop<'tcx> {
+ let tcx = self.infcx.tcx;
+ let (kind, _place_ty) = place.projection.iter().fold(
+ (LocalStorageDead, PlaceTy::from_ty(self.body.local_decls[place.local].ty)),
+ |(kind, place_ty), &elem| {
+ (
+ match elem {
+ ProjectionElem::Deref => match kind {
+ StorageDeadOrDrop::LocalStorageDead
+ | StorageDeadOrDrop::BoxedStorageDead => {
+ assert!(
+ place_ty.ty.is_box(),
+ "Drop of value behind a reference or raw pointer"
+ );
+ StorageDeadOrDrop::BoxedStorageDead
+ }
+ StorageDeadOrDrop::Destructor(_) => kind,
+ },
+ ProjectionElem::Field(..) | ProjectionElem::Downcast(..) => {
+ match place_ty.ty.kind() {
+ ty::Adt(def, _) if def.has_dtor(tcx) => {
+ // Report the outermost adt with a destructor
+ match kind {
+ StorageDeadOrDrop::Destructor(_) => kind,
+ StorageDeadOrDrop::LocalStorageDead
+ | StorageDeadOrDrop::BoxedStorageDead => {
+ StorageDeadOrDrop::Destructor(place_ty.ty)
+ }
+ }
+ }
+ _ => kind,
+ }
+ }
+ ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Index(_) => kind,
+ },
+ place_ty.projection_ty(tcx, elem),
+ )
+ },
+ );
+ kind
+ }
+
+ /// Describe the reason for the fake borrow that was assigned to `place`.
+ fn classify_immutable_section(&self, place: Place<'tcx>) -> Option<&'static str> {
+ use rustc_middle::mir::visit::Visitor;
+ struct FakeReadCauseFinder<'tcx> {
+ place: Place<'tcx>,
+ cause: Option<FakeReadCause>,
+ }
+ impl<'tcx> Visitor<'tcx> for FakeReadCauseFinder<'tcx> {
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, _: Location) {
+ match statement {
+ Statement { kind: StatementKind::FakeRead(box (cause, place)), .. }
+ if *place == self.place =>
+ {
+ self.cause = Some(*cause);
+ }
+ _ => (),
+ }
+ }
+ }
+ let mut visitor = FakeReadCauseFinder { place, cause: None };
+ visitor.visit_body(&self.body);
+ match visitor.cause {
+ Some(FakeReadCause::ForMatchGuard) => Some("match guard"),
+ Some(FakeReadCause::ForIndex) => Some("indexing expression"),
+ _ => None,
+ }
+ }
+
+ /// Annotate argument and return type of function and closure with (synthesized) lifetime for
+ /// borrow of local value that does not live long enough.
+ fn annotate_argument_and_return_for_borrow(
+ &self,
+ borrow: &BorrowData<'tcx>,
+ ) -> Option<AnnotatedBorrowFnSignature<'tcx>> {
+ // Define a fallback for when we can't match a closure.
+ let fallback = || {
+ let is_closure = self.infcx.tcx.is_closure(self.mir_def_id().to_def_id());
+ if is_closure {
+ None
+ } else {
+ let ty = self.infcx.tcx.type_of(self.mir_def_id());
+ match ty.kind() {
+ ty::FnDef(_, _) | ty::FnPtr(_) => self.annotate_fn_sig(
+ self.mir_def_id(),
+ self.infcx.tcx.fn_sig(self.mir_def_id()),
+ ),
+ _ => None,
+ }
+ }
+ };
+
+ // In order to determine whether we need to annotate, we need to check whether the reserve
+ // place was an assignment into a temporary.
+ //
+ // If it was, we check whether or not that temporary is eventually assigned into the return
+ // place. If it was, we can add annotations about the function's return type and arguments
+ // and it'll make sense.
+ let location = borrow.reserve_location;
+ debug!("annotate_argument_and_return_for_borrow: location={:?}", location);
+ if let Some(&Statement { kind: StatementKind::Assign(box (ref reservation, _)), .. }) =
+ &self.body[location.block].statements.get(location.statement_index)
+ {
+ debug!("annotate_argument_and_return_for_borrow: reservation={:?}", reservation);
+ // Check that the initial assignment of the reserve location is into a temporary.
+ let mut target = match reservation.as_local() {
+ Some(local) if self.body.local_kind(local) == LocalKind::Temp => local,
+ _ => return None,
+ };
+
+ // Next, look through the rest of the block, checking if we are assigning the
+ // `target` (that is, the place that contains our borrow) to anything.
+ let mut annotated_closure = None;
+ for stmt in &self.body[location.block].statements[location.statement_index + 1..] {
+ debug!(
+ "annotate_argument_and_return_for_borrow: target={:?} stmt={:?}",
+ target, stmt
+ );
+ if let StatementKind::Assign(box (place, rvalue)) = &stmt.kind {
+ if let Some(assigned_to) = place.as_local() {
+ debug!(
+ "annotate_argument_and_return_for_borrow: assigned_to={:?} \
+ rvalue={:?}",
+ assigned_to, rvalue
+ );
+ // Check if our `target` was captured by a closure.
+ if let Rvalue::Aggregate(
+ box AggregateKind::Closure(def_id, substs),
+ ref operands,
+ ) = *rvalue
+ {
+ for operand in operands {
+ let (Operand::Copy(assigned_from) | Operand::Move(assigned_from)) = operand else {
+ continue;
+ };
+ debug!(
+ "annotate_argument_and_return_for_borrow: assigned_from={:?}",
+ assigned_from
+ );
+
+ // Find the local from the operand.
+ let Some(assigned_from_local) = assigned_from.local_or_deref_local() else {
+ continue;
+ };
+
+ if assigned_from_local != target {
+ continue;
+ }
+
+ // If a closure captured our `target` and then assigned
+ // into a place then we should annotate the closure in
+ // case it ends up being assigned into the return place.
+ annotated_closure =
+ self.annotate_fn_sig(def_id, substs.as_closure().sig());
+ debug!(
+ "annotate_argument_and_return_for_borrow: \
+ annotated_closure={:?} assigned_from_local={:?} \
+ assigned_to={:?}",
+ annotated_closure, assigned_from_local, assigned_to
+ );
+
+ if assigned_to == mir::RETURN_PLACE {
+ // If it was assigned directly into the return place, then
+ // return now.
+ return annotated_closure;
+ } else {
+ // Otherwise, update the target.
+ target = assigned_to;
+ }
+ }
+
+ // If none of our closure's operands matched, then skip to the next
+ // statement.
+ continue;
+ }
+
+ // Otherwise, look at other types of assignment.
+ let assigned_from = match rvalue {
+ Rvalue::Ref(_, _, assigned_from) => assigned_from,
+ Rvalue::Use(operand) => match operand {
+ Operand::Copy(assigned_from) | Operand::Move(assigned_from) => {
+ assigned_from
+ }
+ _ => continue,
+ },
+ _ => continue,
+ };
+ debug!(
+ "annotate_argument_and_return_for_borrow: \
+ assigned_from={:?}",
+ assigned_from,
+ );
+
+ // Find the local from the rvalue.
+ let Some(assigned_from_local) = assigned_from.local_or_deref_local() else { continue };
+ debug!(
+ "annotate_argument_and_return_for_borrow: \
+ assigned_from_local={:?}",
+ assigned_from_local,
+ );
+
+ // Check if our local matches the target - if so, we've assigned our
+ // borrow to a new place.
+ if assigned_from_local != target {
+ continue;
+ }
+
+ // If we assigned our `target` into a new place, then we should
+ // check if it was the return place.
+ debug!(
+ "annotate_argument_and_return_for_borrow: \
+ assigned_from_local={:?} assigned_to={:?}",
+ assigned_from_local, assigned_to
+ );
+ if assigned_to == mir::RETURN_PLACE {
+ // If it was then return the annotated closure if there was one,
+ // else, annotate this function.
+ return annotated_closure.or_else(fallback);
+ }
+
+ // If we didn't assign into the return place, then we just update
+ // the target.
+ target = assigned_to;
+ }
+ }
+ }
+
+ // Check the terminator if we didn't find anything in the statements.
+ let terminator = &self.body[location.block].terminator();
+ debug!(
+ "annotate_argument_and_return_for_borrow: target={:?} terminator={:?}",
+ target, terminator
+ );
+ if let TerminatorKind::Call { destination, target: Some(_), args, .. } =
+ &terminator.kind
+ {
+ if let Some(assigned_to) = destination.as_local() {
+ debug!(
+ "annotate_argument_and_return_for_borrow: assigned_to={:?} args={:?}",
+ assigned_to, args
+ );
+ for operand in args {
+ let (Operand::Copy(assigned_from) | Operand::Move(assigned_from)) = operand else {
+ continue;
+ };
+ debug!(
+ "annotate_argument_and_return_for_borrow: assigned_from={:?}",
+ assigned_from,
+ );
+
+ if let Some(assigned_from_local) = assigned_from.local_or_deref_local() {
+ debug!(
+ "annotate_argument_and_return_for_borrow: assigned_from_local={:?}",
+ assigned_from_local,
+ );
+
+ if assigned_to == mir::RETURN_PLACE && assigned_from_local == target {
+ return annotated_closure.or_else(fallback);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // If we haven't found an assignment into the return place, then we need not add
+ // any annotations.
+ debug!("annotate_argument_and_return_for_borrow: none found");
+ None
+ }
+
+ /// Annotate the first argument and return type of a function signature if they are
+ /// references.
+ fn annotate_fn_sig(
+ &self,
+ did: LocalDefId,
+ sig: ty::PolyFnSig<'tcx>,
+ ) -> Option<AnnotatedBorrowFnSignature<'tcx>> {
+ debug!("annotate_fn_sig: did={:?} sig={:?}", did, sig);
+ let is_closure = self.infcx.tcx.is_closure(did.to_def_id());
+ let fn_hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(did);
+ let fn_decl = self.infcx.tcx.hir().fn_decl_by_hir_id(fn_hir_id)?;
+
+ // We need to work out which arguments to highlight. We do this by looking
+ // at the return type, where there are three cases:
+ //
+ // 1. If there are named arguments, then we should highlight the return type and
+ // highlight any of the arguments that are also references with that lifetime.
+ // If there are no arguments that have the same lifetime as the return type,
+ // then don't highlight anything.
+ // 2. The return type is a reference with an anonymous lifetime. If this is
+ // the case, then we can take advantage of (and teach) the lifetime elision
+ // rules.
+ //
+ // We know that an error is being reported. So the arguments and return type
+ // must satisfy the elision rules. Therefore, if there is a single argument
+ // then that means the return type and first (and only) argument have the same
+ // lifetime and the borrow isn't meeting that, we can highlight the argument
+ // and return type.
+ //
+ // If there are multiple arguments then the first argument must be self (else
+ // it would not satisfy the elision rules), so we can highlight self and the
+ // return type.
+ // 3. The return type is not a reference. In this case, we don't highlight
+ // anything.
+ let return_ty = sig.output();
+ match return_ty.skip_binder().kind() {
+ ty::Ref(return_region, _, _) if return_region.has_name() && !is_closure => {
+ // This is case 1 from above, return type is a named reference so we need to
+ // search for relevant arguments.
+ let mut arguments = Vec::new();
+ for (index, argument) in sig.inputs().skip_binder().iter().enumerate() {
+ if let ty::Ref(argument_region, _, _) = argument.kind() {
+ if argument_region == return_region {
+ // Need to use the `rustc_middle::ty` types to compare against the
+ // `return_region`. Then use the `rustc_hir` type to get only
+ // the lifetime span.
+ if let hir::TyKind::Rptr(lifetime, _) = &fn_decl.inputs[index].kind {
+ // With access to the lifetime, we can get
+ // the span of it.
+ arguments.push((*argument, lifetime.span));
+ } else {
+ bug!("ty type is a ref but hir type is not");
+ }
+ }
+ }
+ }
+
+ // We need to have arguments. This shouldn't happen, but it's worth checking.
+ if arguments.is_empty() {
+ return None;
+ }
+
+ // We use a mix of the HIR and the Ty types to get information
+ // as the HIR doesn't have full types for closure arguments.
+ let return_ty = sig.output().skip_binder();
+ let mut return_span = fn_decl.output.span();
+ if let hir::FnRetTy::Return(ty) = &fn_decl.output {
+ if let hir::TyKind::Rptr(lifetime, _) = ty.kind {
+ return_span = lifetime.span;
+ }
+ }
+
+ Some(AnnotatedBorrowFnSignature::NamedFunction {
+ arguments,
+ return_ty,
+ return_span,
+ })
+ }
+ ty::Ref(_, _, _) if is_closure => {
+ // This is case 2 from above but only for closures, return type is anonymous
+ // reference so we select
+ // the first argument.
+ let argument_span = fn_decl.inputs.first()?.span;
+ let argument_ty = sig.inputs().skip_binder().first()?;
+
+ // Closure arguments are wrapped in a tuple, so we need to get the first
+ // from that.
+ if let ty::Tuple(elems) = argument_ty.kind() {
+ let &argument_ty = elems.first()?;
+ if let ty::Ref(_, _, _) = argument_ty.kind() {
+ return Some(AnnotatedBorrowFnSignature::Closure {
+ argument_ty,
+ argument_span,
+ });
+ }
+ }
+
+ None
+ }
+ ty::Ref(_, _, _) => {
+ // This is also case 2 from above but for functions, return type is still an
+ // anonymous reference so we select the first argument.
+ let argument_span = fn_decl.inputs.first()?.span;
+ let argument_ty = *sig.inputs().skip_binder().first()?;
+
+ let return_span = fn_decl.output.span();
+ let return_ty = sig.output().skip_binder();
+
+ // We expect the first argument to be a reference.
+ match argument_ty.kind() {
+ ty::Ref(_, _, _) => {}
+ _ => return None,
+ }
+
+ Some(AnnotatedBorrowFnSignature::AnonymousFunction {
+ argument_ty,
+ argument_span,
+ return_ty,
+ return_span,
+ })
+ }
+ _ => {
+ // This is case 3 from above, return type is not a reference so don't highlight
+ // anything.
+ None
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+enum AnnotatedBorrowFnSignature<'tcx> {
+ NamedFunction {
+ arguments: Vec<(Ty<'tcx>, Span)>,
+ return_ty: Ty<'tcx>,
+ return_span: Span,
+ },
+ AnonymousFunction {
+ argument_ty: Ty<'tcx>,
+ argument_span: Span,
+ return_ty: Ty<'tcx>,
+ return_span: Span,
+ },
+ Closure {
+ argument_ty: Ty<'tcx>,
+ argument_span: Span,
+ },
+}
+
+impl<'tcx> AnnotatedBorrowFnSignature<'tcx> {
+ /// Annotate the provided diagnostic with information about borrow from the fn signature that
+ /// helps explain.
+ pub(crate) fn emit(&self, cx: &mut MirBorrowckCtxt<'_, 'tcx>, diag: &mut Diagnostic) -> String {
+ match self {
+ &AnnotatedBorrowFnSignature::Closure { argument_ty, argument_span } => {
+ diag.span_label(
+ argument_span,
+ format!("has type `{}`", cx.get_name_for_ty(argument_ty, 0)),
+ );
+
+ cx.get_region_name_for_ty(argument_ty, 0)
+ }
+ &AnnotatedBorrowFnSignature::AnonymousFunction {
+ argument_ty,
+ argument_span,
+ return_ty,
+ return_span,
+ } => {
+ let argument_ty_name = cx.get_name_for_ty(argument_ty, 0);
+ diag.span_label(argument_span, format!("has type `{}`", argument_ty_name));
+
+ let return_ty_name = cx.get_name_for_ty(return_ty, 0);
+ let types_equal = return_ty_name == argument_ty_name;
+ diag.span_label(
+ return_span,
+ format!(
+ "{}has type `{}`",
+ if types_equal { "also " } else { "" },
+ return_ty_name,
+ ),
+ );
+
+ diag.note(
+ "argument and return type have the same lifetime due to lifetime elision rules",
+ );
+ diag.note(
+ "to learn more, visit <https://doc.rust-lang.org/book/ch10-03-\
+ lifetime-syntax.html#lifetime-elision>",
+ );
+
+ cx.get_region_name_for_ty(return_ty, 0)
+ }
+ AnnotatedBorrowFnSignature::NamedFunction { arguments, return_ty, return_span } => {
+ // Region of return type and arguments checked to be the same earlier.
+ let region_name = cx.get_region_name_for_ty(*return_ty, 0);
+ for (_, argument_span) in arguments {
+ diag.span_label(*argument_span, format!("has lifetime `{}`", region_name));
+ }
+
+ diag.span_label(*return_span, format!("also has lifetime `{}`", region_name,));
+
+ diag.help(&format!(
+ "use data from the highlighted arguments which match the `{}` lifetime of \
+ the return type",
+ region_name,
+ ));
+
+ region_name
+ }
+ }
+ }
+}
+
+/// Detect whether one of the provided spans is a statement nested within the top-most visited expr
+struct ReferencedStatementsVisitor<'a>(&'a [Span], bool);
+
+impl<'a, 'v> Visitor<'v> for ReferencedStatementsVisitor<'a> {
+ fn visit_stmt(&mut self, s: &'v hir::Stmt<'v>) {
+ match s.kind {
+ hir::StmtKind::Semi(expr) if self.0.contains(&expr.span) => {
+ self.1 = true;
+ }
+ _ => {}
+ }
+ }
+}
+
+/// Given a set of spans representing statements initializing the relevant binding, visit all the
+/// function expressions looking for branching code paths that *do not* initialize the binding.
+struct ConditionVisitor<'b> {
+ spans: &'b [Span],
+ name: &'b str,
+ errors: Vec<(Span, String)>,
+}
+
+impl<'b, 'v> Visitor<'v> for ConditionVisitor<'b> {
+ fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) {
+ match ex.kind {
+ hir::ExprKind::If(cond, body, None) => {
+ // `if` expressions with no `else` that initialize the binding might be missing an
+ // `else` arm.
+ let mut v = ReferencedStatementsVisitor(self.spans, false);
+ v.visit_expr(body);
+ if v.1 {
+ self.errors.push((
+ cond.span,
+ format!(
+ "if this `if` condition is `false`, {} is not initialized",
+ self.name,
+ ),
+ ));
+ self.errors.push((
+ ex.span.shrink_to_hi(),
+ format!("an `else` arm might be missing here, initializing {}", self.name),
+ ));
+ }
+ }
+ hir::ExprKind::If(cond, body, Some(other)) => {
+ // `if` expressions where the binding is only initialized in one of the two arms
+ // might be missing a binding initialization.
+ let mut a = ReferencedStatementsVisitor(self.spans, false);
+ a.visit_expr(body);
+ let mut b = ReferencedStatementsVisitor(self.spans, false);
+ b.visit_expr(other);
+ match (a.1, b.1) {
+ (true, true) | (false, false) => {}
+ (true, false) => {
+ if other.span.is_desugaring(DesugaringKind::WhileLoop) {
+ self.errors.push((
+ cond.span,
+ format!(
+ "if this condition isn't met and the `while` loop runs 0 \
+ times, {} is not initialized",
+ self.name
+ ),
+ ));
+ } else {
+ self.errors.push((
+ body.span.shrink_to_hi().until(other.span),
+ format!(
+ "if the `if` condition is `false` and this `else` arm is \
+ executed, {} is not initialized",
+ self.name
+ ),
+ ));
+ }
+ }
+ (false, true) => {
+ self.errors.push((
+ cond.span,
+ format!(
+ "if this condition is `true`, {} is not initialized",
+ self.name
+ ),
+ ));
+ }
+ }
+ }
+ hir::ExprKind::Match(e, arms, loop_desugar) => {
+ // If the binding is initialized in one of the match arms, then the other match
+ // arms might be missing an initialization.
+ let results: Vec<bool> = arms
+ .iter()
+ .map(|arm| {
+ let mut v = ReferencedStatementsVisitor(self.spans, false);
+ v.visit_arm(arm);
+ v.1
+ })
+ .collect();
+ if results.iter().any(|x| *x) && !results.iter().all(|x| *x) {
+ for (arm, seen) in arms.iter().zip(results) {
+ if !seen {
+ if loop_desugar == hir::MatchSource::ForLoopDesugar {
+ self.errors.push((
+ e.span,
+ format!(
+ "if the `for` loop runs 0 times, {} is not initialized",
+ self.name
+ ),
+ ));
+ } else if let Some(guard) = &arm.guard {
+ self.errors.push((
+ arm.pat.span.to(guard.body().span),
+ format!(
+ "if this pattern and condition are matched, {} is not \
+ initialized",
+ self.name
+ ),
+ ));
+ } else {
+ self.errors.push((
+ arm.pat.span,
+ format!(
+ "if this pattern is matched, {} is not initialized",
+ self.name
+ ),
+ ));
+ }
+ }
+ }
+ }
+ }
+ // FIXME: should we also account for binops, particularly `&&` and `||`? `try` should
+ // also be accounted for. For now it is fine, as if we don't find *any* relevant
+ // branching code paths, we point at the places where the binding *is* initialized for
+ // *some* context.
+ _ => {}
+ }
+ walk_expr(self, ex);
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
new file mode 100644
index 000000000..72aee0267
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs
@@ -0,0 +1,744 @@
+//! Print diagnostics to explain why values are borrowed.
+
+use std::collections::VecDeque;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::NllRegionVariableOrigin;
+use rustc_middle::mir::{
+ Body, CastKind, ConstraintCategory, FakeReadCause, Local, Location, Operand, Place, Rvalue,
+ Statement, StatementKind, TerminatorKind,
+};
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::{self, RegionVid, TyCtxt};
+use rustc_span::symbol::{kw, Symbol};
+use rustc_span::{sym, DesugaringKind, Span};
+
+use crate::region_infer::BlameConstraint;
+use crate::{
+ borrow_set::BorrowData, nll::ConstraintDescription, region_infer::Cause, MirBorrowckCtxt,
+ WriteKind,
+};
+
+use super::{find_use, RegionName, UseSpans};
+
+#[derive(Debug)]
+pub(crate) enum BorrowExplanation<'tcx> {
+ UsedLater(LaterUseKind, Span, Option<Span>),
+ UsedLaterInLoop(LaterUseKind, Span, Option<Span>),
+ UsedLaterWhenDropped {
+ drop_loc: Location,
+ dropped_local: Local,
+ should_note_order: bool,
+ },
+ MustBeValidFor {
+ category: ConstraintCategory<'tcx>,
+ from_closure: bool,
+ span: Span,
+ region_name: RegionName,
+ opt_place_desc: Option<String>,
+ },
+ Unexplained,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub(crate) enum LaterUseKind {
+ TraitCapture,
+ ClosureCapture,
+ Call,
+ FakeLetRead,
+ Other,
+}
+
+impl<'tcx> BorrowExplanation<'tcx> {
+ pub(crate) fn is_explained(&self) -> bool {
+ !matches!(self, BorrowExplanation::Unexplained)
+ }
+ pub(crate) fn add_explanation_to_diagnostic(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ local_names: &IndexVec<Local, Option<Symbol>>,
+ err: &mut Diagnostic,
+ borrow_desc: &str,
+ borrow_span: Option<Span>,
+ multiple_borrow_span: Option<(Span, Span)>,
+ ) {
+ match *self {
+ BorrowExplanation::UsedLater(later_use_kind, var_or_use_span, path_span) => {
+ let message = match later_use_kind {
+ LaterUseKind::TraitCapture => "captured here by trait object",
+ LaterUseKind::ClosureCapture => "captured here by closure",
+ LaterUseKind::Call => "used by call",
+ LaterUseKind::FakeLetRead => "stored here",
+ LaterUseKind::Other => "used here",
+ };
+ // We can use `var_or_use_span` if either `path_span` is not present, or both spans are the same
+ if path_span.map(|path_span| path_span == var_or_use_span).unwrap_or(true) {
+ if borrow_span.map(|sp| !sp.overlaps(var_or_use_span)).unwrap_or(true) {
+ err.span_label(
+ var_or_use_span,
+ format!("{}borrow later {}", borrow_desc, message),
+ );
+ }
+ } else {
+ // path_span must be `Some` as otherwise the if condition is true
+ let path_span = path_span.unwrap();
+ // path_span is only present in the case of closure capture
+ assert!(matches!(later_use_kind, LaterUseKind::ClosureCapture));
+ if !borrow_span.map_or(false, |sp| sp.overlaps(var_or_use_span)) {
+ let path_label = "used here by closure";
+ let capture_kind_label = message;
+ err.span_label(
+ var_or_use_span,
+ format!("{}borrow later {}", borrow_desc, capture_kind_label),
+ );
+ err.span_label(path_span, path_label);
+ }
+ }
+ }
+ BorrowExplanation::UsedLaterInLoop(later_use_kind, var_or_use_span, path_span) => {
+ let message = match later_use_kind {
+ LaterUseKind::TraitCapture => {
+ "borrow captured here by trait object, in later iteration of loop"
+ }
+ LaterUseKind::ClosureCapture => {
+ "borrow captured here by closure, in later iteration of loop"
+ }
+ LaterUseKind::Call => "borrow used by call, in later iteration of loop",
+ LaterUseKind::FakeLetRead => "borrow later stored here",
+ LaterUseKind::Other => "borrow used here, in later iteration of loop",
+ };
+ // We can use `var_or_use_span` if either `path_span` is not present, or both spans are the same
+ if path_span.map(|path_span| path_span == var_or_use_span).unwrap_or(true) {
+ err.span_label(var_or_use_span, format!("{}{}", borrow_desc, message));
+ } else {
+ // path_span must be `Some` as otherwise the if condition is true
+ let path_span = path_span.unwrap();
+ // path_span is only present in the case of closure capture
+ assert!(matches!(later_use_kind, LaterUseKind::ClosureCapture));
+ if borrow_span.map(|sp| !sp.overlaps(var_or_use_span)).unwrap_or(true) {
+ let path_label = "used here by closure";
+ let capture_kind_label = message;
+ err.span_label(
+ var_or_use_span,
+ format!("{}borrow later {}", borrow_desc, capture_kind_label),
+ );
+ err.span_label(path_span, path_label);
+ }
+ }
+ }
+ BorrowExplanation::UsedLaterWhenDropped {
+ drop_loc,
+ dropped_local,
+ should_note_order,
+ } => {
+ let local_decl = &body.local_decls[dropped_local];
+ let mut ty = local_decl.ty;
+ if local_decl.source_info.span.desugaring_kind() == Some(DesugaringKind::ForLoop) {
+ if let ty::Adt(adt, substs) = local_decl.ty.kind() {
+ if tcx.is_diagnostic_item(sym::Option, adt.did()) {
+ // in for loop desugaring, only look at the `Some(..)` inner type
+ ty = substs.type_at(0);
+ }
+ }
+ }
+ let (dtor_desc, type_desc) = match ty.kind() {
+ // If type is an ADT that implements Drop, then
+ // simplify output by reporting just the ADT name.
+ ty::Adt(adt, _substs) if adt.has_dtor(tcx) && !adt.is_box() => {
+ ("`Drop` code", format!("type `{}`", tcx.def_path_str(adt.did())))
+ }
+
+ // Otherwise, just report the whole type (and use
+ // the intentionally fuzzy phrase "destructor")
+ ty::Closure(..) => ("destructor", "closure".to_owned()),
+ ty::Generator(..) => ("destructor", "generator".to_owned()),
+
+ _ => ("destructor", format!("type `{}`", local_decl.ty)),
+ };
+
+ match local_names[dropped_local] {
+ Some(local_name) if !local_decl.from_compiler_desugaring() => {
+ let message = format!(
+ "{B}borrow might be used here, when `{LOC}` is dropped \
+ and runs the {DTOR} for {TYPE}",
+ B = borrow_desc,
+ LOC = local_name,
+ TYPE = type_desc,
+ DTOR = dtor_desc
+ );
+ err.span_label(body.source_info(drop_loc).span, message);
+
+ if should_note_order {
+ err.note(
+ "values in a scope are dropped \
+ in the opposite order they are defined",
+ );
+ }
+ }
+ _ => {
+ err.span_label(
+ local_decl.source_info.span,
+ format!(
+ "a temporary with access to the {B}borrow \
+ is created here ...",
+ B = borrow_desc
+ ),
+ );
+ let message = format!(
+ "... and the {B}borrow might be used here, \
+ when that temporary is dropped \
+ and runs the {DTOR} for {TYPE}",
+ B = borrow_desc,
+ TYPE = type_desc,
+ DTOR = dtor_desc
+ );
+ err.span_label(body.source_info(drop_loc).span, message);
+
+ if let Some(info) = &local_decl.is_block_tail {
+ if info.tail_result_is_ignored {
+ // #85581: If the first mutable borrow's scope contains
+ // the second borrow, this suggestion isn't helpful.
+ if !multiple_borrow_span
+ .map(|(old, new)| {
+ old.to(info.span.shrink_to_hi()).contains(new)
+ })
+ .unwrap_or(false)
+ {
+ err.span_suggestion_verbose(
+ info.span.shrink_to_hi(),
+ "consider adding semicolon after the expression so its \
+ temporaries are dropped sooner, before the local variables \
+ declared by the block are dropped",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ } else {
+ err.note(
+ "the temporary is part of an expression at the end of a \
+ block;\nconsider forcing this temporary to be dropped sooner, \
+ before the block's local variables are dropped",
+ );
+ err.multipart_suggestion(
+ "for example, you could save the expression's value in a new \
+ local variable `x` and then make `x` be the expression at the \
+ end of the block",
+ vec![
+ (info.span.shrink_to_lo(), "let x = ".to_string()),
+ (info.span.shrink_to_hi(), "; x".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ };
+ }
+ }
+ }
+ }
+ BorrowExplanation::MustBeValidFor {
+ category,
+ span,
+ ref region_name,
+ ref opt_place_desc,
+ from_closure: _,
+ } => {
+ region_name.highlight_region_name(err);
+
+ if let Some(desc) = opt_place_desc {
+ err.span_label(
+ span,
+ format!(
+ "{}requires that `{}` is borrowed for `{}`",
+ category.description(),
+ desc,
+ region_name,
+ ),
+ );
+ } else {
+ err.span_label(
+ span,
+ format!(
+ "{}requires that {}borrow lasts for `{}`",
+ category.description(),
+ borrow_desc,
+ region_name,
+ ),
+ );
+ };
+
+ self.add_lifetime_bound_suggestion_to_diagnostic(err, &category, span, region_name);
+ }
+ _ => {}
+ }
+ }
+ pub(crate) fn add_lifetime_bound_suggestion_to_diagnostic(
+ &self,
+ err: &mut Diagnostic,
+ category: &ConstraintCategory<'tcx>,
+ span: Span,
+ region_name: &RegionName,
+ ) {
+ if let ConstraintCategory::OpaqueType = category {
+ let suggestable_name =
+ if region_name.was_named() { region_name.name } else { kw::UnderscoreLifetime };
+
+ let msg = format!(
+ "you can add a bound to the {}to make it last less than `'static` and match `{}`",
+ category.description(),
+ region_name,
+ );
+
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ &msg,
+ format!(" + {}", suggestable_name),
+ Applicability::Unspecified,
+ );
+ }
+ }
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+ fn free_region_constraint_info(
+ &self,
+ borrow_region: RegionVid,
+ outlived_region: RegionVid,
+ ) -> (ConstraintCategory<'tcx>, bool, Span, Option<RegionName>) {
+ let BlameConstraint { category, from_closure, cause, variance_info: _ } =
+ self.regioncx.best_blame_constraint(
+ &self.body,
+ borrow_region,
+ NllRegionVariableOrigin::FreeRegion,
+ |r| self.regioncx.provides_universal_region(r, borrow_region, outlived_region),
+ );
+
+ let outlived_fr_name = self.give_region_a_name(outlived_region);
+
+ (category, from_closure, cause.span, outlived_fr_name)
+ }
+
+ /// Returns structured explanation for *why* the borrow contains the
+ /// point from `location`. This is key for the "3-point errors"
+ /// [described in the NLL RFC][d].
+ ///
+ /// # Parameters
+ ///
+ /// - `borrow`: the borrow in question
+ /// - `location`: where the borrow occurs
+ /// - `kind_place`: if Some, this describes the statement that triggered the error.
+ /// - first half is the kind of write, if any, being performed
+ /// - second half is the place being accessed
+ ///
+ /// [d]: https://rust-lang.github.io/rfcs/2094-nll.html#leveraging-intuition-framing-errors-in-terms-of-points
+ pub(crate) fn explain_why_borrow_contains_point(
+ &self,
+ location: Location,
+ borrow: &BorrowData<'tcx>,
+ kind_place: Option<(WriteKind, Place<'tcx>)>,
+ ) -> BorrowExplanation<'tcx> {
+ debug!(
+ "explain_why_borrow_contains_point(location={:?}, borrow={:?}, kind_place={:?})",
+ location, borrow, kind_place
+ );
+
+ let regioncx = &self.regioncx;
+ let body: &Body<'_> = &self.body;
+ let tcx = self.infcx.tcx;
+
+ let borrow_region_vid = borrow.region;
+ debug!("explain_why_borrow_contains_point: borrow_region_vid={:?}", borrow_region_vid);
+
+ let region_sub = self.regioncx.find_sub_region_live_at(borrow_region_vid, location);
+ debug!("explain_why_borrow_contains_point: region_sub={:?}", region_sub);
+
+ match find_use::find(body, regioncx, tcx, region_sub, location) {
+ Some(Cause::LiveVar(local, location)) => {
+ let span = body.source_info(location).span;
+ let spans = self
+ .move_spans(Place::from(local).as_ref(), location)
+ .or_else(|| self.borrow_spans(span, location));
+
+ let borrow_location = location;
+ if self.is_use_in_later_iteration_of_loop(borrow_location, location) {
+ let later_use = self.later_use_kind(borrow, spans, location);
+ BorrowExplanation::UsedLaterInLoop(later_use.0, later_use.1, later_use.2)
+ } else {
+ // Check if the location represents a `FakeRead`, and adapt the error
+ // message to the `FakeReadCause` it is from: in particular,
+ // the ones inserted in optimized `let var = <expr>` patterns.
+ let later_use = self.later_use_kind(borrow, spans, location);
+ BorrowExplanation::UsedLater(later_use.0, later_use.1, later_use.2)
+ }
+ }
+
+ Some(Cause::DropVar(local, location)) => {
+ let mut should_note_order = false;
+ if self.local_names[local].is_some()
+ && let Some((WriteKind::StorageDeadOrDrop, place)) = kind_place
+ && let Some(borrowed_local) = place.as_local()
+ && self.local_names[borrowed_local].is_some() && local != borrowed_local
+ {
+ should_note_order = true;
+ }
+
+ BorrowExplanation::UsedLaterWhenDropped {
+ drop_loc: location,
+ dropped_local: local,
+ should_note_order,
+ }
+ }
+
+ None => {
+ if let Some(region) = self.to_error_region_vid(borrow_region_vid) {
+ let (category, from_closure, span, region_name) =
+ self.free_region_constraint_info(borrow_region_vid, region);
+ if let Some(region_name) = region_name {
+ let opt_place_desc = self.describe_place(borrow.borrowed_place.as_ref());
+ BorrowExplanation::MustBeValidFor {
+ category,
+ from_closure,
+ span,
+ region_name,
+ opt_place_desc,
+ }
+ } else {
+ debug!(
+ "explain_why_borrow_contains_point: \
+ Could not generate a region name"
+ );
+ BorrowExplanation::Unexplained
+ }
+ } else {
+ debug!(
+ "explain_why_borrow_contains_point: \
+ Could not generate an error region vid"
+ );
+ BorrowExplanation::Unexplained
+ }
+ }
+ }
+ }
+
+ /// true if `borrow_location` can reach `use_location` by going through a loop and
+ /// `use_location` is also inside of that loop
+ fn is_use_in_later_iteration_of_loop(
+ &self,
+ borrow_location: Location,
+ use_location: Location,
+ ) -> bool {
+ let back_edge = self.reach_through_backedge(borrow_location, use_location);
+ back_edge.map_or(false, |back_edge| self.can_reach_head_of_loop(use_location, back_edge))
+ }
+
+ /// Returns the outmost back edge if `from` location can reach `to` location passing through
+ /// that back edge
+ fn reach_through_backedge(&self, from: Location, to: Location) -> Option<Location> {
+ let mut visited_locations = FxHashSet::default();
+ let mut pending_locations = VecDeque::new();
+ visited_locations.insert(from);
+ pending_locations.push_back(from);
+ debug!("reach_through_backedge: from={:?} to={:?}", from, to,);
+
+ let mut outmost_back_edge = None;
+ while let Some(location) = pending_locations.pop_front() {
+ debug!(
+ "reach_through_backedge: location={:?} outmost_back_edge={:?}
+ pending_locations={:?} visited_locations={:?}",
+ location, outmost_back_edge, pending_locations, visited_locations
+ );
+
+ if location == to && outmost_back_edge.is_some() {
+ // We've managed to reach the use location
+ debug!("reach_through_backedge: found!");
+ return outmost_back_edge;
+ }
+
+ let block = &self.body.basic_blocks()[location.block];
+
+ if location.statement_index < block.statements.len() {
+ let successor = location.successor_within_block();
+ if visited_locations.insert(successor) {
+ pending_locations.push_back(successor);
+ }
+ } else {
+ pending_locations.extend(
+ block
+ .terminator()
+ .successors()
+ .map(|bb| Location { statement_index: 0, block: bb })
+ .filter(|s| visited_locations.insert(*s))
+ .map(|s| {
+ if self.is_back_edge(location, s) {
+ match outmost_back_edge {
+ None => {
+ outmost_back_edge = Some(location);
+ }
+
+ Some(back_edge)
+ if location.dominates(back_edge, &self.dominators) =>
+ {
+ outmost_back_edge = Some(location);
+ }
+
+ Some(_) => {}
+ }
+ }
+
+ s
+ }),
+ );
+ }
+ }
+
+ None
+ }
+
+ /// true if `from` location can reach `loop_head` location and `loop_head` dominates all the
+ /// intermediate nodes
+ fn can_reach_head_of_loop(&self, from: Location, loop_head: Location) -> bool {
+ self.find_loop_head_dfs(from, loop_head, &mut FxHashSet::default())
+ }
+
+ fn find_loop_head_dfs(
+ &self,
+ from: Location,
+ loop_head: Location,
+ visited_locations: &mut FxHashSet<Location>,
+ ) -> bool {
+ visited_locations.insert(from);
+
+ if from == loop_head {
+ return true;
+ }
+
+ if loop_head.dominates(from, &self.dominators) {
+ let block = &self.body.basic_blocks()[from.block];
+
+ if from.statement_index < block.statements.len() {
+ let successor = from.successor_within_block();
+
+ if !visited_locations.contains(&successor)
+ && self.find_loop_head_dfs(successor, loop_head, visited_locations)
+ {
+ return true;
+ }
+ } else {
+ for bb in block.terminator().successors() {
+ let successor = Location { statement_index: 0, block: bb };
+
+ if !visited_locations.contains(&successor)
+ && self.find_loop_head_dfs(successor, loop_head, visited_locations)
+ {
+ return true;
+ }
+ }
+ }
+ }
+
+ false
+ }
+
+ /// True if an edge `source -> target` is a backedge -- in other words, if the target
+ /// dominates the source.
+ fn is_back_edge(&self, source: Location, target: Location) -> bool {
+ target.dominates(source, &self.dominators)
+ }
+
+ /// Determine how the borrow was later used.
+ /// First span returned points to the location of the conflicting use
+ /// Second span if `Some` is returned in the case of closures and points
+ /// to the use of the path
+ fn later_use_kind(
+ &self,
+ borrow: &BorrowData<'tcx>,
+ use_spans: UseSpans<'tcx>,
+ location: Location,
+ ) -> (LaterUseKind, Span, Option<Span>) {
+ match use_spans {
+ UseSpans::ClosureUse { capture_kind_span, path_span, .. } => {
+ // Used in a closure.
+ (LaterUseKind::ClosureCapture, capture_kind_span, Some(path_span))
+ }
+ UseSpans::PatUse(span)
+ | UseSpans::OtherUse(span)
+ | UseSpans::FnSelfUse { var_span: span, .. } => {
+ let block = &self.body.basic_blocks()[location.block];
+
+ let kind = if let Some(&Statement {
+ kind: StatementKind::FakeRead(box (FakeReadCause::ForLet(_), _)),
+ ..
+ }) = block.statements.get(location.statement_index)
+ {
+ LaterUseKind::FakeLetRead
+ } else if self.was_captured_by_trait_object(borrow) {
+ LaterUseKind::TraitCapture
+ } else if location.statement_index == block.statements.len() {
+ if let TerminatorKind::Call { ref func, from_hir_call: true, .. } =
+ block.terminator().kind
+ {
+ // Just point to the function, to reduce the chance of overlapping spans.
+ let function_span = match func {
+ Operand::Constant(c) => c.span,
+ Operand::Copy(place) | Operand::Move(place) => {
+ if let Some(l) = place.as_local() {
+ let local_decl = &self.body.local_decls[l];
+ if self.local_names[l].is_none() {
+ local_decl.source_info.span
+ } else {
+ span
+ }
+ } else {
+ span
+ }
+ }
+ };
+ return (LaterUseKind::Call, function_span, None);
+ } else {
+ LaterUseKind::Other
+ }
+ } else {
+ LaterUseKind::Other
+ };
+
+ (kind, span, None)
+ }
+ }
+ }
+
+ /// Checks if a borrowed value was captured by a trait object. We do this by
+ /// looking forward in the MIR from the reserve location and checking if we see
+ /// an unsized cast to a trait object on our data.
+ fn was_captured_by_trait_object(&self, borrow: &BorrowData<'tcx>) -> bool {
+ // Start at the reserve location, find the place that we want to see cast to a trait object.
+ let location = borrow.reserve_location;
+ let block = &self.body[location.block];
+ let stmt = block.statements.get(location.statement_index);
+ debug!("was_captured_by_trait_object: location={:?} stmt={:?}", location, stmt);
+
+ // We make a `queue` vector that has the locations we want to visit. As of writing, this
+ // will only ever have one item at any given time, but by using a vector, we can pop from
+ // it which simplifies the termination logic.
+ let mut queue = vec![location];
+ let mut target = if let Some(&Statement {
+ kind: StatementKind::Assign(box (ref place, _)),
+ ..
+ }) = stmt
+ {
+ if let Some(local) = place.as_local() {
+ local
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ };
+
+ debug!("was_captured_by_trait: target={:?} queue={:?}", target, queue);
+ while let Some(current_location) = queue.pop() {
+ debug!("was_captured_by_trait: target={:?}", target);
+ let block = &self.body[current_location.block];
+ // We need to check the current location to find out if it is a terminator.
+ let is_terminator = current_location.statement_index == block.statements.len();
+ if !is_terminator {
+ let stmt = &block.statements[current_location.statement_index];
+ debug!("was_captured_by_trait_object: stmt={:?}", stmt);
+
+ // The only kind of statement that we care about is assignments...
+ if let StatementKind::Assign(box (place, rvalue)) = &stmt.kind {
+ let Some(into) = place.local_or_deref_local() else {
+ // Continue at the next location.
+ queue.push(current_location.successor_within_block());
+ continue;
+ };
+
+ match rvalue {
+ // If we see a use, we should check whether it is our data, and if so
+ // update the place that we're looking for to that new place.
+ Rvalue::Use(operand) => match operand {
+ Operand::Copy(place) | Operand::Move(place) => {
+ if let Some(from) = place.as_local() {
+ if from == target {
+ target = into;
+ }
+ }
+ }
+ _ => {}
+ },
+ // If we see an unsized cast, then if it is our data we should check
+ // whether it is being cast to a trait object.
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), operand, ty) => {
+ match operand {
+ Operand::Copy(place) | Operand::Move(place) => {
+ if let Some(from) = place.as_local() {
+ if from == target {
+ debug!("was_captured_by_trait_object: ty={:?}", ty);
+ // Check the type for a trait object.
+ return match ty.kind() {
+ // `&dyn Trait`
+ ty::Ref(_, ty, _) if ty.is_trait() => true,
+ // `Box<dyn Trait>`
+ _ if ty.is_box() && ty.boxed_ty().is_trait() => {
+ true
+ }
+ // `dyn Trait`
+ _ if ty.is_trait() => true,
+ // Anything else.
+ _ => false,
+ };
+ }
+ }
+ return false;
+ }
+ _ => return false,
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // Continue at the next location.
+ queue.push(current_location.successor_within_block());
+ } else {
+ // The only thing we need to do for terminators is progress to the next block.
+ let terminator = block.terminator();
+ debug!("was_captured_by_trait_object: terminator={:?}", terminator);
+
+ if let TerminatorKind::Call { destination, target: Some(block), args, .. } =
+ &terminator.kind
+ {
+ if let Some(dest) = destination.as_local() {
+ debug!(
+ "was_captured_by_trait_object: target={:?} dest={:?} args={:?}",
+ target, dest, args
+ );
+ // Check if one of the arguments to this function is the target place.
+ let found_target = args.iter().any(|arg| {
+ if let Operand::Move(place) = arg {
+ if let Some(potential) = place.as_local() {
+ potential == target
+ } else {
+ false
+ }
+ } else {
+ false
+ }
+ });
+
+ // If it is, follow this to the next block and update the target.
+ if found_target {
+ target = dest;
+ queue.push(block.start_location());
+ }
+ }
+ }
+ }
+
+ debug!("was_captured_by_trait: queue={:?}", queue);
+ }
+
+ // We didn't find anything and ran out of locations to check.
+ false
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/find_all_local_uses.rs b/compiler/rustc_borrowck/src/diagnostics/find_all_local_uses.rs
new file mode 100644
index 000000000..b3edc35dc
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/find_all_local_uses.rs
@@ -0,0 +1,26 @@
+use std::collections::BTreeSet;
+
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location};
+
+/// Find all uses of (including assignments to) a [`Local`].
+///
+/// Uses `BTreeSet` so output is deterministic.
+pub(super) fn find<'tcx>(body: &Body<'tcx>, local: Local) -> BTreeSet<Location> {
+ let mut visitor = AllLocalUsesVisitor { for_local: local, uses: BTreeSet::default() };
+ visitor.visit_body(body);
+ visitor.uses
+}
+
+struct AllLocalUsesVisitor {
+ for_local: Local,
+ uses: BTreeSet<Location>,
+}
+
+impl<'tcx> Visitor<'tcx> for AllLocalUsesVisitor {
+ fn visit_local(&mut self, local: Local, _context: PlaceContext, location: Location) {
+ if local == self.for_local {
+ self.uses.insert(location);
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/find_use.rs b/compiler/rustc_borrowck/src/diagnostics/find_use.rs
new file mode 100644
index 000000000..b5a3081e5
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/find_use.rs
@@ -0,0 +1,128 @@
+use std::collections::VecDeque;
+use std::rc::Rc;
+
+use crate::{
+ def_use::{self, DefUse},
+ nll::ToRegionVid,
+ region_infer::{Cause, RegionInferenceContext},
+};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::mir::visit::{MirVisitable, PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location};
+use rustc_middle::ty::{RegionVid, TyCtxt};
+
+pub(crate) fn find<'tcx>(
+ body: &Body<'tcx>,
+ regioncx: &Rc<RegionInferenceContext<'tcx>>,
+ tcx: TyCtxt<'tcx>,
+ region_vid: RegionVid,
+ start_point: Location,
+) -> Option<Cause> {
+ let mut uf = UseFinder { body, regioncx, tcx, region_vid, start_point };
+
+ uf.find()
+}
+
+struct UseFinder<'cx, 'tcx> {
+ body: &'cx Body<'tcx>,
+ regioncx: &'cx Rc<RegionInferenceContext<'tcx>>,
+ tcx: TyCtxt<'tcx>,
+ region_vid: RegionVid,
+ start_point: Location,
+}
+
+impl<'cx, 'tcx> UseFinder<'cx, 'tcx> {
+ fn find(&mut self) -> Option<Cause> {
+ let mut queue = VecDeque::new();
+ let mut visited = FxHashSet::default();
+
+ queue.push_back(self.start_point);
+ while let Some(p) = queue.pop_front() {
+ if !self.regioncx.region_contains(self.region_vid, p) {
+ continue;
+ }
+
+ if !visited.insert(p) {
+ continue;
+ }
+
+ let block_data = &self.body[p.block];
+
+ match self.def_use(p, block_data.visitable(p.statement_index)) {
+ Some(DefUseResult::Def) => {}
+
+ Some(DefUseResult::UseLive { local }) => {
+ return Some(Cause::LiveVar(local, p));
+ }
+
+ Some(DefUseResult::UseDrop { local }) => {
+ return Some(Cause::DropVar(local, p));
+ }
+
+ None => {
+ if p.statement_index < block_data.statements.len() {
+ queue.push_back(p.successor_within_block());
+ } else {
+ queue.extend(
+ block_data
+ .terminator()
+ .successors()
+ .filter(|&bb| Some(&Some(bb)) != block_data.terminator().unwind())
+ .map(|bb| Location { statement_index: 0, block: bb }),
+ );
+ }
+ }
+ }
+ }
+
+ None
+ }
+
+ fn def_use(&self, location: Location, thing: &dyn MirVisitable<'tcx>) -> Option<DefUseResult> {
+ let mut visitor = DefUseVisitor {
+ body: self.body,
+ tcx: self.tcx,
+ region_vid: self.region_vid,
+ def_use_result: None,
+ };
+
+ thing.apply(location, &mut visitor);
+
+ visitor.def_use_result
+ }
+}
+
+struct DefUseVisitor<'cx, 'tcx> {
+ body: &'cx Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ region_vid: RegionVid,
+ def_use_result: Option<DefUseResult>,
+}
+
+enum DefUseResult {
+ Def,
+ UseLive { local: Local },
+ UseDrop { local: Local },
+}
+
+impl<'cx, 'tcx> Visitor<'tcx> for DefUseVisitor<'cx, 'tcx> {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, _: Location) {
+ let local_ty = self.body.local_decls[local].ty;
+
+ let mut found_it = false;
+ self.tcx.for_each_free_region(&local_ty, |r| {
+ if r.to_region_vid() == self.region_vid {
+ found_it = true;
+ }
+ });
+
+ if found_it {
+ self.def_use_result = match def_use::categorize(context) {
+ Some(DefUse::Def) => Some(DefUseResult::Def),
+ Some(DefUse::Use) => Some(DefUseResult::UseLive { local }),
+ Some(DefUse::Drop) => Some(DefUseResult::UseDrop { local }),
+ None => None,
+ };
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs
new file mode 100644
index 000000000..098e8de94
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs
@@ -0,0 +1,1127 @@
+//! Borrow checker diagnostics.
+
+use itertools::Itertools;
+use rustc_const_eval::util::{call_kind, CallDesugaringKind};
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, Namespace};
+use rustc_hir::GeneratorKind;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::mir::{
+ AggregateKind, Constant, FakeReadCause, Field, Local, LocalInfo, LocalKind, Location, Operand,
+ Place, PlaceRef, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind,
+};
+use rustc_middle::ty::print::Print;
+use rustc_middle::ty::{self, DefIdTree, Instance, Ty, TyCtxt};
+use rustc_mir_dataflow::move_paths::{InitLocation, LookupResult};
+use rustc_span::def_id::LocalDefId;
+use rustc_span::{symbol::sym, Span, Symbol, DUMMY_SP};
+use rustc_target::abi::VariantIdx;
+use rustc_trait_selection::traits::type_known_to_meet_bound_modulo_regions;
+
+use super::borrow_set::BorrowData;
+use super::MirBorrowckCtxt;
+
+mod find_all_local_uses;
+mod find_use;
+mod outlives_suggestion;
+mod region_name;
+mod var_name;
+
+mod bound_region_errors;
+mod conflict_errors;
+mod explain_borrow;
+mod move_errors;
+mod mutability_errors;
+mod region_errors;
+
+pub(crate) use bound_region_errors::{ToUniverseInfo, UniverseInfo};
+pub(crate) use mutability_errors::AccessKind;
+pub(crate) use outlives_suggestion::OutlivesSuggestionBuilder;
+pub(crate) use region_errors::{ErrorConstraintInfo, RegionErrorKind, RegionErrors};
+pub(crate) use region_name::{RegionName, RegionNameSource};
+pub(crate) use rustc_const_eval::util::CallKind;
+
+pub(super) struct DescribePlaceOpt {
+ pub including_downcast: bool,
+
+ /// Enable/Disable tuple fields.
+ /// For example `x` tuple. if it's `true` `x.0`. Otherwise `x`
+ pub including_tuple_field: bool,
+}
+
+pub(super) struct IncludingTupleField(pub(super) bool);
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+ /// Adds a suggestion when a closure is invoked twice with a moved variable or when a closure
+ /// is moved after being invoked.
+ ///
+ /// ```text
+ /// note: closure cannot be invoked more than once because it moves the variable `dict` out of
+ /// its environment
+ /// --> $DIR/issue-42065.rs:16:29
+ /// |
+ /// LL | for (key, value) in dict {
+ /// | ^^^^
+ /// ```
+ pub(super) fn add_moved_or_invoked_closure_note(
+ &self,
+ location: Location,
+ place: PlaceRef<'tcx>,
+ diag: &mut Diagnostic,
+ ) {
+ debug!("add_moved_or_invoked_closure_note: location={:?} place={:?}", location, place);
+ let mut target = place.local_or_deref_local();
+ for stmt in &self.body[location.block].statements[location.statement_index..] {
+ debug!("add_moved_or_invoked_closure_note: stmt={:?} target={:?}", stmt, target);
+ if let StatementKind::Assign(box (into, Rvalue::Use(from))) = &stmt.kind {
+ debug!("add_fnonce_closure_note: into={:?} from={:?}", into, from);
+ match from {
+ Operand::Copy(ref place) | Operand::Move(ref place)
+ if target == place.local_or_deref_local() =>
+ {
+ target = into.local_or_deref_local()
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Check if we are attempting to call a closure after it has been invoked.
+ let terminator = self.body[location.block].terminator();
+ debug!("add_moved_or_invoked_closure_note: terminator={:?}", terminator);
+ if let TerminatorKind::Call {
+ func: Operand::Constant(box Constant { literal, .. }),
+ args,
+ ..
+ } = &terminator.kind
+ {
+ if let ty::FnDef(id, _) = *literal.ty().kind() {
+ debug!("add_moved_or_invoked_closure_note: id={:?}", id);
+ if Some(self.infcx.tcx.parent(id)) == self.infcx.tcx.lang_items().fn_once_trait() {
+ let closure = match args.first() {
+ Some(Operand::Copy(ref place)) | Some(Operand::Move(ref place))
+ if target == place.local_or_deref_local() =>
+ {
+ place.local_or_deref_local().unwrap()
+ }
+ _ => return,
+ };
+
+ debug!("add_moved_or_invoked_closure_note: closure={:?}", closure);
+ if let ty::Closure(did, _) = self.body.local_decls[closure].ty.kind() {
+ let did = did.expect_local();
+ let hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(did);
+
+ if let Some((span, hir_place)) =
+ self.infcx.tcx.typeck(did).closure_kind_origins().get(hir_id)
+ {
+ diag.span_note(
+ *span,
+ &format!(
+ "closure cannot be invoked more than once because it moves the \
+ variable `{}` out of its environment",
+ ty::place_to_string_for_capture(self.infcx.tcx, hir_place)
+ ),
+ );
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ // Check if we are just moving a closure after it has been invoked.
+ if let Some(target) = target {
+ if let ty::Closure(did, _) = self.body.local_decls[target].ty.kind() {
+ let did = did.expect_local();
+ let hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(did);
+
+ if let Some((span, hir_place)) =
+ self.infcx.tcx.typeck(did).closure_kind_origins().get(hir_id)
+ {
+ diag.span_note(
+ *span,
+ &format!(
+ "closure cannot be moved more than once as it is not `Copy` due to \
+ moving the variable `{}` out of its environment",
+ ty::place_to_string_for_capture(self.infcx.tcx, hir_place)
+ ),
+ );
+ }
+ }
+ }
+ }
+
+ /// End-user visible description of `place` if one can be found.
+ /// If the place is a temporary for instance, `"value"` will be returned.
+ pub(super) fn describe_any_place(&self, place_ref: PlaceRef<'tcx>) -> String {
+ match self.describe_place(place_ref) {
+ Some(mut descr) => {
+ // Surround descr with `backticks`.
+ descr.reserve(2);
+ descr.insert(0, '`');
+ descr.push('`');
+ descr
+ }
+ None => "value".to_string(),
+ }
+ }
+
+ /// End-user visible description of `place` if one can be found.
+ /// If the place is a temporary for instance, `None` will be returned.
+ pub(super) fn describe_place(&self, place_ref: PlaceRef<'tcx>) -> Option<String> {
+ self.describe_place_with_options(
+ place_ref,
+ DescribePlaceOpt { including_downcast: false, including_tuple_field: true },
+ )
+ }
+
+ /// End-user visible description of `place` if one can be found. If the place is a temporary
+ /// for instance, `None` will be returned.
+ /// `IncludingDowncast` parameter makes the function return `None` if `ProjectionElem` is
+ /// `Downcast` and `IncludingDowncast` is true
+ pub(super) fn describe_place_with_options(
+ &self,
+ place: PlaceRef<'tcx>,
+ opt: DescribePlaceOpt,
+ ) -> Option<String> {
+ let local = place.local;
+ let mut autoderef_index = None;
+ let mut buf = String::new();
+ let mut ok = self.append_local_to_string(local, &mut buf);
+
+ for (index, elem) in place.projection.into_iter().enumerate() {
+ match elem {
+ ProjectionElem::Deref => {
+ if index == 0 {
+ if self.body.local_decls[local].is_ref_for_guard() {
+ continue;
+ }
+ if let Some(box LocalInfo::StaticRef { def_id, .. }) =
+ &self.body.local_decls[local].local_info
+ {
+ buf.push_str(self.infcx.tcx.item_name(*def_id).as_str());
+ ok = Ok(());
+ continue;
+ }
+ }
+ if let Some(field) = self.is_upvar_field_projection(PlaceRef {
+ local,
+ projection: place.projection.split_at(index + 1).0,
+ }) {
+ let var_index = field.index();
+ buf = self.upvars[var_index].place.to_string(self.infcx.tcx);
+ ok = Ok(());
+ if !self.upvars[var_index].by_ref {
+ buf.insert(0, '*');
+ }
+ } else {
+ if autoderef_index.is_none() {
+ autoderef_index =
+ match place.projection.into_iter().rev().find_position(|elem| {
+ !matches!(
+ elem,
+ ProjectionElem::Deref | ProjectionElem::Downcast(..)
+ )
+ }) {
+ Some((index, _)) => Some(place.projection.len() - index),
+ None => Some(0),
+ };
+ }
+ if index >= autoderef_index.unwrap() {
+ buf.insert(0, '*');
+ }
+ }
+ }
+ ProjectionElem::Downcast(..) if opt.including_downcast => return None,
+ ProjectionElem::Downcast(..) => (),
+ ProjectionElem::Field(field, _ty) => {
+ // FIXME(project-rfc_2229#36): print capture precisely here.
+ if let Some(field) = self.is_upvar_field_projection(PlaceRef {
+ local,
+ projection: place.projection.split_at(index + 1).0,
+ }) {
+ buf = self.upvars[field.index()].place.to_string(self.infcx.tcx);
+ ok = Ok(());
+ } else {
+ let field_name = self.describe_field(
+ PlaceRef { local, projection: place.projection.split_at(index).0 },
+ *field,
+ IncludingTupleField(opt.including_tuple_field),
+ );
+ if let Some(field_name_str) = field_name {
+ buf.push('.');
+ buf.push_str(&field_name_str);
+ }
+ }
+ }
+ ProjectionElem::Index(index) => {
+ buf.push('[');
+ if self.append_local_to_string(*index, &mut buf).is_err() {
+ buf.push('_');
+ }
+ buf.push(']');
+ }
+ ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {
+ // Since it isn't possible to borrow an element on a particular index and
+ // then use another while the borrow is held, don't output indices details
+ // to avoid confusing the end-user
+ buf.push_str("[..]");
+ }
+ }
+ }
+ ok.ok().map(|_| buf)
+ }
+
+ fn describe_name(&self, place: PlaceRef<'tcx>) -> Option<Symbol> {
+ for elem in place.projection.into_iter() {
+ match elem {
+ ProjectionElem::Downcast(Some(name), _) => {
+ return Some(*name);
+ }
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// Appends end-user visible description of the `local` place to `buf`. If `local` doesn't have
+ /// a name, or its name was generated by the compiler, then `Err` is returned
+ fn append_local_to_string(&self, local: Local, buf: &mut String) -> Result<(), ()> {
+ let decl = &self.body.local_decls[local];
+ match self.local_names[local] {
+ Some(name) if !decl.from_compiler_desugaring() => {
+ buf.push_str(name.as_str());
+ Ok(())
+ }
+ _ => Err(()),
+ }
+ }
+
+ /// End-user visible description of the `field`nth field of `base`
+ fn describe_field(
+ &self,
+ place: PlaceRef<'tcx>,
+ field: Field,
+ including_tuple_field: IncludingTupleField,
+ ) -> Option<String> {
+ let place_ty = match place {
+ PlaceRef { local, projection: [] } => PlaceTy::from_ty(self.body.local_decls[local].ty),
+ PlaceRef { local, projection: [proj_base @ .., elem] } => match elem {
+ ProjectionElem::Deref
+ | ProjectionElem::Index(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => {
+ PlaceRef { local, projection: proj_base }.ty(self.body, self.infcx.tcx)
+ }
+ ProjectionElem::Downcast(..) => place.ty(self.body, self.infcx.tcx),
+ ProjectionElem::Field(_, field_type) => PlaceTy::from_ty(*field_type),
+ },
+ };
+ self.describe_field_from_ty(
+ place_ty.ty,
+ field,
+ place_ty.variant_index,
+ including_tuple_field,
+ )
+ }
+
+ /// End-user visible description of the `field_index`nth field of `ty`
+ fn describe_field_from_ty(
+ &self,
+ ty: Ty<'_>,
+ field: Field,
+ variant_index: Option<VariantIdx>,
+ including_tuple_field: IncludingTupleField,
+ ) -> Option<String> {
+ if ty.is_box() {
+ // If the type is a box, the field is described from the boxed type
+ self.describe_field_from_ty(ty.boxed_ty(), field, variant_index, including_tuple_field)
+ } else {
+ match *ty.kind() {
+ ty::Adt(def, _) => {
+ let variant = if let Some(idx) = variant_index {
+ assert!(def.is_enum());
+ &def.variant(idx)
+ } else {
+ def.non_enum_variant()
+ };
+ if !including_tuple_field.0 && variant.ctor_kind == CtorKind::Fn {
+ return None;
+ }
+ Some(variant.fields[field.index()].name.to_string())
+ }
+ ty::Tuple(_) => Some(field.index().to_string()),
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ self.describe_field_from_ty(ty, field, variant_index, including_tuple_field)
+ }
+ ty::Array(ty, _) | ty::Slice(ty) => {
+ self.describe_field_from_ty(ty, field, variant_index, including_tuple_field)
+ }
+ ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+ // We won't be borrowck'ing here if the closure came from another crate,
+ // so it's safe to call `expect_local`.
+ //
+ // We know the field exists so it's safe to call operator[] and `unwrap` here.
+ let def_id = def_id.expect_local();
+ let var_id = self
+ .infcx
+ .tcx
+ .typeck(def_id)
+ .closure_min_captures_flattened(def_id)
+ .nth(field.index())
+ .unwrap()
+ .get_root_variable();
+
+ Some(self.infcx.tcx.hir().name(var_id).to_string())
+ }
+ _ => {
+ // Might need a revision when the fields in trait RFC is implemented
+ // (https://github.com/rust-lang/rfcs/pull/1546)
+ bug!("End-user description not implemented for field access on `{:?}`", ty);
+ }
+ }
+ }
+ }
+
+ /// Add a note that a type does not implement `Copy`
+ pub(super) fn note_type_does_not_implement_copy(
+ &self,
+ err: &mut Diagnostic,
+ place_desc: &str,
+ ty: Ty<'tcx>,
+ span: Option<Span>,
+ move_prefix: &str,
+ ) {
+ let message = format!(
+ "{}move occurs because {} has type `{}`, which does not implement the `Copy` trait",
+ move_prefix, place_desc, ty,
+ );
+ if let Some(span) = span {
+ err.span_label(span, message);
+ } else {
+ err.note(&message);
+ }
+ }
+
+ pub(super) fn borrowed_content_source(
+ &self,
+ deref_base: PlaceRef<'tcx>,
+ ) -> BorrowedContentSource<'tcx> {
+ let tcx = self.infcx.tcx;
+
+ // Look up the provided place and work out the move path index for it,
+ // we'll use this to check whether it was originally from an overloaded
+ // operator.
+ match self.move_data.rev_lookup.find(deref_base) {
+ LookupResult::Exact(mpi) | LookupResult::Parent(Some(mpi)) => {
+ debug!("borrowed_content_source: mpi={:?}", mpi);
+
+ for i in &self.move_data.init_path_map[mpi] {
+ let init = &self.move_data.inits[*i];
+ debug!("borrowed_content_source: init={:?}", init);
+ // We're only interested in statements that initialized a value, not the
+ // initializations from arguments.
+ let InitLocation::Statement(loc) = init.location else { continue };
+
+ let bbd = &self.body[loc.block];
+ let is_terminator = bbd.statements.len() == loc.statement_index;
+ debug!(
+ "borrowed_content_source: loc={:?} is_terminator={:?}",
+ loc, is_terminator,
+ );
+ if !is_terminator {
+ continue;
+ } else if let Some(Terminator {
+ kind: TerminatorKind::Call { ref func, from_hir_call: false, .. },
+ ..
+ }) = bbd.terminator
+ {
+ if let Some(source) =
+ BorrowedContentSource::from_call(func.ty(self.body, tcx), tcx)
+ {
+ return source;
+ }
+ }
+ }
+ }
+ // Base is a `static` so won't be from an overloaded operator
+ _ => (),
+ };
+
+ // If we didn't find an overloaded deref or index, then assume it's a
+ // built in deref and check the type of the base.
+ let base_ty = deref_base.ty(self.body, tcx).ty;
+ if base_ty.is_unsafe_ptr() {
+ BorrowedContentSource::DerefRawPointer
+ } else if base_ty.is_mutable_ptr() {
+ BorrowedContentSource::DerefMutableRef
+ } else {
+ BorrowedContentSource::DerefSharedRef
+ }
+ }
+
+ /// Return the name of the provided `Ty` (that must be a reference) with a synthesized lifetime
+ /// name where required.
+ pub(super) fn get_name_for_ty(&self, ty: Ty<'tcx>, counter: usize) -> String {
+ let mut printer = ty::print::FmtPrinter::new(self.infcx.tcx, Namespace::TypeNS);
+
+ // We need to add synthesized lifetimes where appropriate. We do
+ // this by hooking into the pretty printer and telling it to label the
+ // lifetimes without names with the value `'0`.
+ if let ty::Ref(region, ..) = ty.kind() {
+ match **region {
+ ty::ReLateBound(_, ty::BoundRegion { kind: br, .. })
+ | ty::RePlaceholder(ty::PlaceholderRegion { name: br, .. }) => {
+ printer.region_highlight_mode.highlighting_bound_region(br, counter)
+ }
+ _ => {}
+ }
+ }
+
+ ty.print(printer).unwrap().into_buffer()
+ }
+
+ /// Returns the name of the provided `Ty` (that must be a reference)'s region with a
+ /// synthesized lifetime name where required.
+ pub(super) fn get_region_name_for_ty(&self, ty: Ty<'tcx>, counter: usize) -> String {
+ let mut printer = ty::print::FmtPrinter::new(self.infcx.tcx, Namespace::TypeNS);
+
+ let region = if let ty::Ref(region, ..) = ty.kind() {
+ match **region {
+ ty::ReLateBound(_, ty::BoundRegion { kind: br, .. })
+ | ty::RePlaceholder(ty::PlaceholderRegion { name: br, .. }) => {
+ printer.region_highlight_mode.highlighting_bound_region(br, counter)
+ }
+ _ => {}
+ }
+ region
+ } else {
+ bug!("ty for annotation of borrow region is not a reference");
+ };
+
+ region.print(printer).unwrap().into_buffer()
+ }
+}
+
+/// The span(s) associated to a use of a place.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub(super) enum UseSpans<'tcx> {
+ /// The access is caused by capturing a variable for a closure.
+ ClosureUse {
+ /// This is true if the captured variable was from a generator.
+ generator_kind: Option<GeneratorKind>,
+ /// The span of the args of the closure, including the `move` keyword if
+ /// it's present.
+ args_span: Span,
+ /// The span of the use resulting in capture kind
+ /// Check `ty::CaptureInfo` for more details
+ capture_kind_span: Span,
+ /// The span of the use resulting in the captured path
+ /// Check `ty::CaptureInfo` for more details
+ path_span: Span,
+ },
+ /// The access is caused by using a variable as the receiver of a method
+ /// that takes 'self'
+ FnSelfUse {
+ /// The span of the variable being moved
+ var_span: Span,
+ /// The span of the method call on the variable
+ fn_call_span: Span,
+ /// The definition span of the method being called
+ fn_span: Span,
+ kind: CallKind<'tcx>,
+ },
+ /// This access is caused by a `match` or `if let` pattern.
+ PatUse(Span),
+ /// This access has a single span associated to it: common case.
+ OtherUse(Span),
+}
+
+impl UseSpans<'_> {
+ pub(super) fn args_or_use(self) -> Span {
+ match self {
+ UseSpans::ClosureUse { args_span: span, .. }
+ | UseSpans::PatUse(span)
+ | UseSpans::OtherUse(span) => span,
+ UseSpans::FnSelfUse { fn_call_span, kind: CallKind::DerefCoercion { .. }, .. } => {
+ fn_call_span
+ }
+ UseSpans::FnSelfUse { var_span, .. } => var_span,
+ }
+ }
+
+ /// Returns the span of `self`, in the case of a `ClosureUse` returns the `path_span`
+ pub(super) fn var_or_use_path_span(self) -> Span {
+ match self {
+ UseSpans::ClosureUse { path_span: span, .. }
+ | UseSpans::PatUse(span)
+ | UseSpans::OtherUse(span) => span,
+ UseSpans::FnSelfUse { fn_call_span, kind: CallKind::DerefCoercion { .. }, .. } => {
+ fn_call_span
+ }
+ UseSpans::FnSelfUse { var_span, .. } => var_span,
+ }
+ }
+
+ /// Returns the span of `self`, in the case of a `ClosureUse` returns the `capture_kind_span`
+ pub(super) fn var_or_use(self) -> Span {
+ match self {
+ UseSpans::ClosureUse { capture_kind_span: span, .. }
+ | UseSpans::PatUse(span)
+ | UseSpans::OtherUse(span) => span,
+ UseSpans::FnSelfUse { fn_call_span, kind: CallKind::DerefCoercion { .. }, .. } => {
+ fn_call_span
+ }
+ UseSpans::FnSelfUse { var_span, .. } => var_span,
+ }
+ }
+
+ pub(super) fn generator_kind(self) -> Option<GeneratorKind> {
+ match self {
+ UseSpans::ClosureUse { generator_kind, .. } => generator_kind,
+ _ => None,
+ }
+ }
+
+ // Add a span label to the arguments of the closure, if it exists.
+ pub(super) fn args_span_label(self, err: &mut Diagnostic, message: impl Into<String>) {
+ if let UseSpans::ClosureUse { args_span, .. } = self {
+ err.span_label(args_span, message);
+ }
+ }
+
+ // Add a span label to the use of the captured variable, if it exists.
+ // only adds label to the `path_span`
+ pub(super) fn var_span_label_path_only(self, err: &mut Diagnostic, message: impl Into<String>) {
+ if let UseSpans::ClosureUse { path_span, .. } = self {
+ err.span_label(path_span, message);
+ }
+ }
+
+ // Add a span label to the use of the captured variable, if it exists.
+ pub(super) fn var_span_label(
+ self,
+ err: &mut Diagnostic,
+ message: impl Into<String>,
+ kind_desc: impl Into<String>,
+ ) {
+ if let UseSpans::ClosureUse { capture_kind_span, path_span, .. } = self {
+ if capture_kind_span == path_span {
+ err.span_label(capture_kind_span, message);
+ } else {
+ let capture_kind_label =
+ format!("capture is {} because of use here", kind_desc.into());
+ let path_label = message;
+ err.span_label(capture_kind_span, capture_kind_label);
+ err.span_label(path_span, path_label);
+ }
+ }
+ }
+
+ /// Returns `false` if this place is not used in a closure.
+ pub(super) fn for_closure(&self) -> bool {
+ match *self {
+ UseSpans::ClosureUse { generator_kind, .. } => generator_kind.is_none(),
+ _ => false,
+ }
+ }
+
+ /// Returns `false` if this place is not used in a generator.
+ pub(super) fn for_generator(&self) -> bool {
+ match *self {
+ UseSpans::ClosureUse { generator_kind, .. } => generator_kind.is_some(),
+ _ => false,
+ }
+ }
+
+ /// Describe the span associated with a use of a place.
+ pub(super) fn describe(&self) -> &str {
+ match *self {
+ UseSpans::ClosureUse { generator_kind, .. } => {
+ if generator_kind.is_some() {
+ " in generator"
+ } else {
+ " in closure"
+ }
+ }
+ _ => "",
+ }
+ }
+
+ pub(super) fn or_else<F>(self, if_other: F) -> Self
+ where
+ F: FnOnce() -> Self,
+ {
+ match self {
+ closure @ UseSpans::ClosureUse { .. } => closure,
+ UseSpans::PatUse(_) | UseSpans::OtherUse(_) => if_other(),
+ fn_self @ UseSpans::FnSelfUse { .. } => fn_self,
+ }
+ }
+}
+
+pub(super) enum BorrowedContentSource<'tcx> {
+ DerefRawPointer,
+ DerefMutableRef,
+ DerefSharedRef,
+ OverloadedDeref(Ty<'tcx>),
+ OverloadedIndex(Ty<'tcx>),
+}
+
+impl<'tcx> BorrowedContentSource<'tcx> {
+ pub(super) fn describe_for_unnamed_place(&self, tcx: TyCtxt<'_>) -> String {
+ match *self {
+ BorrowedContentSource::DerefRawPointer => "a raw pointer".to_string(),
+ BorrowedContentSource::DerefSharedRef => "a shared reference".to_string(),
+ BorrowedContentSource::DerefMutableRef => "a mutable reference".to_string(),
+ BorrowedContentSource::OverloadedDeref(ty) => ty
+ .ty_adt_def()
+ .and_then(|adt| match tcx.get_diagnostic_name(adt.did())? {
+ name @ (sym::Rc | sym::Arc) => Some(format!("an `{}`", name)),
+ _ => None,
+ })
+ .unwrap_or_else(|| format!("dereference of `{}`", ty)),
+ BorrowedContentSource::OverloadedIndex(ty) => format!("index of `{}`", ty),
+ }
+ }
+
+ pub(super) fn describe_for_named_place(&self) -> Option<&'static str> {
+ match *self {
+ BorrowedContentSource::DerefRawPointer => Some("raw pointer"),
+ BorrowedContentSource::DerefSharedRef => Some("shared reference"),
+ BorrowedContentSource::DerefMutableRef => Some("mutable reference"),
+ // Overloaded deref and index operators should be evaluated into a
+ // temporary. So we don't need a description here.
+ BorrowedContentSource::OverloadedDeref(_)
+ | BorrowedContentSource::OverloadedIndex(_) => None,
+ }
+ }
+
+ pub(super) fn describe_for_immutable_place(&self, tcx: TyCtxt<'_>) -> String {
+ match *self {
+ BorrowedContentSource::DerefRawPointer => "a `*const` pointer".to_string(),
+ BorrowedContentSource::DerefSharedRef => "a `&` reference".to_string(),
+ BorrowedContentSource::DerefMutableRef => {
+ bug!("describe_for_immutable_place: DerefMutableRef isn't immutable")
+ }
+ BorrowedContentSource::OverloadedDeref(ty) => ty
+ .ty_adt_def()
+ .and_then(|adt| match tcx.get_diagnostic_name(adt.did())? {
+ name @ (sym::Rc | sym::Arc) => Some(format!("an `{}`", name)),
+ _ => None,
+ })
+ .unwrap_or_else(|| format!("dereference of `{}`", ty)),
+ BorrowedContentSource::OverloadedIndex(ty) => format!("an index of `{}`", ty),
+ }
+ }
+
+ fn from_call(func: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Option<Self> {
+ match *func.kind() {
+ ty::FnDef(def_id, substs) => {
+ let trait_id = tcx.trait_of_item(def_id)?;
+
+ let lang_items = tcx.lang_items();
+ if Some(trait_id) == lang_items.deref_trait()
+ || Some(trait_id) == lang_items.deref_mut_trait()
+ {
+ Some(BorrowedContentSource::OverloadedDeref(substs.type_at(0)))
+ } else if Some(trait_id) == lang_items.index_trait()
+ || Some(trait_id) == lang_items.index_mut_trait()
+ {
+ Some(BorrowedContentSource::OverloadedIndex(substs.type_at(0)))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+ }
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+ /// Finds the spans associated to a move or copy of move_place at location.
+ pub(super) fn move_spans(
+ &self,
+ moved_place: PlaceRef<'tcx>, // Could also be an upvar.
+ location: Location,
+ ) -> UseSpans<'tcx> {
+ use self::UseSpans::*;
+
+ let Some(stmt) = self.body[location.block].statements.get(location.statement_index) else {
+ return OtherUse(self.body.source_info(location).span);
+ };
+
+ debug!("move_spans: moved_place={:?} location={:?} stmt={:?}", moved_place, location, stmt);
+ if let StatementKind::Assign(box (_, Rvalue::Aggregate(ref kind, ref places))) = stmt.kind {
+ match **kind {
+ AggregateKind::Closure(def_id, _) | AggregateKind::Generator(def_id, _, _) => {
+ debug!("move_spans: def_id={:?} places={:?}", def_id, places);
+ if let Some((args_span, generator_kind, capture_kind_span, path_span)) =
+ self.closure_span(def_id, moved_place, places)
+ {
+ return ClosureUse {
+ generator_kind,
+ args_span,
+ capture_kind_span,
+ path_span,
+ };
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // StatementKind::FakeRead only contains a def_id if they are introduced as a result
+ // of pattern matching within a closure.
+ if let StatementKind::FakeRead(box (cause, ref place)) = stmt.kind {
+ match cause {
+ FakeReadCause::ForMatchedPlace(Some(closure_def_id))
+ | FakeReadCause::ForLet(Some(closure_def_id)) => {
+ debug!("move_spans: def_id={:?} place={:?}", closure_def_id, place);
+ let places = &[Operand::Move(*place)];
+ if let Some((args_span, generator_kind, capture_kind_span, path_span)) =
+ self.closure_span(closure_def_id, moved_place, places)
+ {
+ return ClosureUse {
+ generator_kind,
+ args_span,
+ capture_kind_span,
+ path_span,
+ };
+ }
+ }
+ _ => {}
+ }
+ }
+
+ let normal_ret =
+ if moved_place.projection.iter().any(|p| matches!(p, ProjectionElem::Downcast(..))) {
+ PatUse(stmt.source_info.span)
+ } else {
+ OtherUse(stmt.source_info.span)
+ };
+
+ // We are trying to find MIR of the form:
+ // ```
+ // _temp = _moved_val;
+ // ...
+ // FnSelfCall(_temp, ...)
+ // ```
+ //
+ // where `_moved_val` is the place we generated the move error for,
+ // `_temp` is some other local, and `FnSelfCall` is a function
+ // that has a `self` parameter.
+
+ let target_temp = match stmt.kind {
+ StatementKind::Assign(box (temp, _)) if temp.as_local().is_some() => {
+ temp.as_local().unwrap()
+ }
+ _ => return normal_ret,
+ };
+
+ debug!("move_spans: target_temp = {:?}", target_temp);
+
+ if let Some(Terminator {
+ kind: TerminatorKind::Call { fn_span, from_hir_call, .. }, ..
+ }) = &self.body[location.block].terminator
+ {
+ let Some((method_did, method_substs)) =
+ rustc_const_eval::util::find_self_call(
+ self.infcx.tcx,
+ &self.body,
+ target_temp,
+ location.block,
+ )
+ else {
+ return normal_ret;
+ };
+
+ let kind = call_kind(
+ self.infcx.tcx,
+ self.param_env,
+ method_did,
+ method_substs,
+ *fn_span,
+ *from_hir_call,
+ Some(self.infcx.tcx.fn_arg_names(method_did)[0]),
+ );
+
+ return FnSelfUse {
+ var_span: stmt.source_info.span,
+ fn_call_span: *fn_span,
+ fn_span: self.infcx.tcx.def_span(method_did),
+ kind,
+ };
+ }
+ normal_ret
+ }
+
+ /// Finds the span of arguments of a closure (within `maybe_closure_span`)
+ /// and its usage of the local assigned at `location`.
+ /// This is done by searching in statements succeeding `location`
+ /// and originating from `maybe_closure_span`.
+ pub(super) fn borrow_spans(&self, use_span: Span, location: Location) -> UseSpans<'tcx> {
+ use self::UseSpans::*;
+ debug!("borrow_spans: use_span={:?} location={:?}", use_span, location);
+
+ let target = match self.body[location.block].statements.get(location.statement_index) {
+ Some(&Statement { kind: StatementKind::Assign(box (ref place, _)), .. }) => {
+ if let Some(local) = place.as_local() {
+ local
+ } else {
+ return OtherUse(use_span);
+ }
+ }
+ _ => return OtherUse(use_span),
+ };
+
+ if self.body.local_kind(target) != LocalKind::Temp {
+ // operands are always temporaries.
+ return OtherUse(use_span);
+ }
+
+ for stmt in &self.body[location.block].statements[location.statement_index + 1..] {
+ if let StatementKind::Assign(box (_, Rvalue::Aggregate(ref kind, ref places))) =
+ stmt.kind
+ {
+ let (&def_id, is_generator) = match kind {
+ box AggregateKind::Closure(def_id, _) => (def_id, false),
+ box AggregateKind::Generator(def_id, _, _) => (def_id, true),
+ _ => continue,
+ };
+
+ debug!(
+ "borrow_spans: def_id={:?} is_generator={:?} places={:?}",
+ def_id, is_generator, places
+ );
+ if let Some((args_span, generator_kind, capture_kind_span, path_span)) =
+ self.closure_span(def_id, Place::from(target).as_ref(), places)
+ {
+ return ClosureUse { generator_kind, args_span, capture_kind_span, path_span };
+ } else {
+ return OtherUse(use_span);
+ }
+ }
+
+ if use_span != stmt.source_info.span {
+ break;
+ }
+ }
+
+ OtherUse(use_span)
+ }
+
+ /// Finds the spans of a captured place within a closure or generator.
+ /// The first span is the location of the use resulting in the capture kind of the capture
+ /// The second span is the location the use resulting in the captured path of the capture
+ fn closure_span(
+ &self,
+ def_id: LocalDefId,
+ target_place: PlaceRef<'tcx>,
+ places: &[Operand<'tcx>],
+ ) -> Option<(Span, Option<GeneratorKind>, Span, Span)> {
+ debug!(
+ "closure_span: def_id={:?} target_place={:?} places={:?}",
+ def_id, target_place, places
+ );
+ let hir_id = self.infcx.tcx.hir().local_def_id_to_hir_id(def_id);
+ let expr = &self.infcx.tcx.hir().expect_expr(hir_id).kind;
+ debug!("closure_span: hir_id={:?} expr={:?}", hir_id, expr);
+ if let hir::ExprKind::Closure(&hir::Closure { body, fn_decl_span, .. }) = expr {
+ for (captured_place, place) in
+ self.infcx.tcx.typeck(def_id).closure_min_captures_flattened(def_id).zip(places)
+ {
+ match place {
+ Operand::Copy(place) | Operand::Move(place)
+ if target_place == place.as_ref() =>
+ {
+ debug!("closure_span: found captured local {:?}", place);
+ let body = self.infcx.tcx.hir().body(body);
+ let generator_kind = body.generator_kind();
+
+ return Some((
+ fn_decl_span,
+ generator_kind,
+ captured_place.get_capture_kind_span(self.infcx.tcx),
+ captured_place.get_path_span(self.infcx.tcx),
+ ));
+ }
+ _ => {}
+ }
+ }
+ }
+ None
+ }
+
+ /// Helper to retrieve span(s) of given borrow from the current MIR
+ /// representation
+ pub(super) fn retrieve_borrow_spans(&self, borrow: &BorrowData<'_>) -> UseSpans<'tcx> {
+ let span = self.body.source_info(borrow.reserve_location).span;
+ self.borrow_spans(span, borrow.reserve_location)
+ }
+
+ fn explain_captures(
+ &mut self,
+ err: &mut Diagnostic,
+ span: Span,
+ move_span: Span,
+ move_spans: UseSpans<'tcx>,
+ moved_place: Place<'tcx>,
+ used_place: Option<PlaceRef<'tcx>>,
+ partially_str: &str,
+ loop_message: &str,
+ move_msg: &str,
+ is_loop_move: bool,
+ maybe_reinitialized_locations_is_empty: bool,
+ ) {
+ if let UseSpans::FnSelfUse { var_span, fn_call_span, fn_span, kind } = move_spans {
+ let place_name = self
+ .describe_place(moved_place.as_ref())
+ .map(|n| format!("`{}`", n))
+ .unwrap_or_else(|| "value".to_owned());
+ match kind {
+ CallKind::FnCall { fn_trait_id, .. }
+ if Some(fn_trait_id) == self.infcx.tcx.lang_items().fn_once_trait() =>
+ {
+ err.span_label(
+ fn_call_span,
+ &format!(
+ "{} {}moved due to this call{}",
+ place_name, partially_str, loop_message
+ ),
+ );
+ err.span_note(
+ var_span,
+ "this value implements `FnOnce`, which causes it to be moved when called",
+ );
+ }
+ CallKind::Operator { self_arg, .. } => {
+ let self_arg = self_arg.unwrap();
+ err.span_label(
+ fn_call_span,
+ &format!(
+ "{} {}moved due to usage in operator{}",
+ place_name, partially_str, loop_message
+ ),
+ );
+ if self.fn_self_span_reported.insert(fn_span) {
+ err.span_note(
+ // Check whether the source is accessible
+ if self.infcx.tcx.sess.source_map().is_span_accessible(self_arg.span) {
+ self_arg.span
+ } else {
+ fn_call_span
+ },
+ "calling this operator moves the left-hand side",
+ );
+ }
+ }
+ CallKind::Normal { self_arg, desugaring, is_option_or_result } => {
+ let self_arg = self_arg.unwrap();
+ if let Some((CallDesugaringKind::ForLoopIntoIter, _)) = desugaring {
+ let ty = moved_place.ty(self.body, self.infcx.tcx).ty;
+ let suggest = match self.infcx.tcx.get_diagnostic_item(sym::IntoIterator) {
+ Some(def_id) => self.infcx.tcx.infer_ctxt().enter(|infcx| {
+ type_known_to_meet_bound_modulo_regions(
+ &infcx,
+ self.param_env,
+ infcx.tcx.mk_imm_ref(
+ infcx.tcx.lifetimes.re_erased,
+ infcx.tcx.erase_regions(ty),
+ ),
+ def_id,
+ DUMMY_SP,
+ )
+ }),
+ _ => false,
+ };
+ if suggest {
+ err.span_suggestion_verbose(
+ move_span.shrink_to_lo(),
+ &format!(
+ "consider iterating over a slice of the `{}`'s content to \
+ avoid moving into the `for` loop",
+ ty,
+ ),
+ "&",
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ err.span_label(
+ fn_call_span,
+ &format!(
+ "{} {}moved due to this implicit call to `.into_iter()`{}",
+ place_name, partially_str, loop_message
+ ),
+ );
+ // If we have a `&mut` ref, we need to reborrow.
+ if let Some(ty::Ref(_, _, hir::Mutability::Mut)) = used_place
+ .map(|used_place| used_place.ty(self.body, self.infcx.tcx).ty.kind())
+ {
+ // If we are in a loop this will be suggested later.
+ if !is_loop_move {
+ err.span_suggestion_verbose(
+ move_span.shrink_to_lo(),
+ &format!(
+ "consider creating a fresh reborrow of {} here",
+ self.describe_place(moved_place.as_ref())
+ .map(|n| format!("`{}`", n))
+ .unwrap_or_else(|| "the mutable reference".to_string()),
+ ),
+ "&mut *",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ } else {
+ err.span_label(
+ fn_call_span,
+ &format!(
+ "{} {}moved due to this method call{}",
+ place_name, partially_str, loop_message
+ ),
+ );
+ }
+ if is_option_or_result && maybe_reinitialized_locations_is_empty {
+ err.span_suggestion_verbose(
+ fn_call_span.shrink_to_lo(),
+ "consider calling `.as_ref()` to borrow the type's contents",
+ "as_ref().",
+ Applicability::MachineApplicable,
+ );
+ }
+ // Avoid pointing to the same function in multiple different
+ // error messages.
+ if span != DUMMY_SP && self.fn_self_span_reported.insert(self_arg.span) {
+ err.span_note(
+ self_arg.span,
+ &format!("this function takes ownership of the receiver `self`, which moves {}", place_name)
+ );
+ }
+ }
+ // Other desugarings takes &self, which cannot cause a move
+ _ => {}
+ }
+ } else {
+ if move_span != span || !loop_message.is_empty() {
+ err.span_label(
+ move_span,
+ format!("value {}moved{} here{}", partially_str, move_msg, loop_message),
+ );
+ }
+ // If the move error occurs due to a loop, don't show
+ // another message for the same span
+ if loop_message.is_empty() {
+ move_spans.var_span_label(
+ err,
+ format!("variable {}moved due to use{}", partially_str, move_spans.describe()),
+ "moved",
+ );
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/move_errors.rs b/compiler/rustc_borrowck/src/diagnostics/move_errors.rs
new file mode 100644
index 000000000..cb3cd479a
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/move_errors.rs
@@ -0,0 +1,529 @@
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_middle::mir::*;
+use rustc_middle::ty;
+use rustc_mir_dataflow::move_paths::{
+ IllegalMoveOrigin, IllegalMoveOriginKind, LookupResult, MoveError, MovePathIndex,
+};
+use rustc_span::Span;
+
+use crate::diagnostics::{DescribePlaceOpt, UseSpans};
+use crate::prefixes::PrefixSet;
+use crate::MirBorrowckCtxt;
+
+// Often when desugaring a pattern match we may have many individual moves in
+// MIR that are all part of one operation from the user's point-of-view. For
+// example:
+//
+// let (x, y) = foo()
+//
+// would move x from the 0 field of some temporary, and y from the 1 field. We
+// group such errors together for cleaner error reporting.
+//
+// Errors are kept separate if they are from places with different parent move
+// paths. For example, this generates two errors:
+//
+// let (&x, &y) = (&String::new(), &String::new());
+#[derive(Debug)]
+enum GroupedMoveError<'tcx> {
+ // Place expression can't be moved from,
+ // e.g., match x[0] { s => (), } where x: &[String]
+ MovesFromPlace {
+ original_path: Place<'tcx>,
+ span: Span,
+ move_from: Place<'tcx>,
+ kind: IllegalMoveOriginKind<'tcx>,
+ binds_to: Vec<Local>,
+ },
+ // Part of a value expression can't be moved from,
+ // e.g., match &String::new() { &x => (), }
+ MovesFromValue {
+ original_path: Place<'tcx>,
+ span: Span,
+ move_from: MovePathIndex,
+ kind: IllegalMoveOriginKind<'tcx>,
+ binds_to: Vec<Local>,
+ },
+ // Everything that isn't from pattern matching.
+ OtherIllegalMove {
+ original_path: Place<'tcx>,
+ use_spans: UseSpans<'tcx>,
+ kind: IllegalMoveOriginKind<'tcx>,
+ },
+}
+
+impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
+ pub(crate) fn report_move_errors(&mut self, move_errors: Vec<(Place<'tcx>, MoveError<'tcx>)>) {
+ let grouped_errors = self.group_move_errors(move_errors);
+ for error in grouped_errors {
+ self.report(error);
+ }
+ }
+
+ fn group_move_errors(
+ &self,
+ errors: Vec<(Place<'tcx>, MoveError<'tcx>)>,
+ ) -> Vec<GroupedMoveError<'tcx>> {
+ let mut grouped_errors = Vec::new();
+ for (original_path, error) in errors {
+ self.append_to_grouped_errors(&mut grouped_errors, original_path, error);
+ }
+ grouped_errors
+ }
+
+ fn append_to_grouped_errors(
+ &self,
+ grouped_errors: &mut Vec<GroupedMoveError<'tcx>>,
+ original_path: Place<'tcx>,
+ error: MoveError<'tcx>,
+ ) {
+ match error {
+ MoveError::UnionMove { .. } => {
+ unimplemented!("don't know how to report union move errors yet.")
+ }
+ MoveError::IllegalMove { cannot_move_out_of: IllegalMoveOrigin { location, kind } } => {
+ // Note: that the only time we assign a place isn't a temporary
+ // to a user variable is when initializing it.
+ // If that ever stops being the case, then the ever initialized
+ // flow could be used.
+ if let Some(StatementKind::Assign(box (
+ place,
+ Rvalue::Use(Operand::Move(move_from)),
+ ))) = self.body.basic_blocks()[location.block]
+ .statements
+ .get(location.statement_index)
+ .map(|stmt| &stmt.kind)
+ {
+ if let Some(local) = place.as_local() {
+ let local_decl = &self.body.local_decls[local];
+ // opt_match_place is the
+ // match_span is the span of the expression being matched on
+ // match *x.y { ... } match_place is Some(*x.y)
+ // ^^^^ match_span is the span of *x.y
+ //
+ // opt_match_place is None for let [mut] x = ... statements,
+ // whether or not the right-hand side is a place expression
+ if let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm {
+ opt_match_place: Some((opt_match_place, match_span)),
+ binding_mode: _,
+ opt_ty_info: _,
+ pat_span: _,
+ },
+ )))) = local_decl.local_info
+ {
+ let stmt_source_info = self.body.source_info(location);
+ self.append_binding_error(
+ grouped_errors,
+ kind,
+ original_path,
+ *move_from,
+ local,
+ opt_match_place,
+ match_span,
+ stmt_source_info.span,
+ );
+ return;
+ }
+ }
+ }
+
+ let move_spans = self.move_spans(original_path.as_ref(), location);
+ grouped_errors.push(GroupedMoveError::OtherIllegalMove {
+ use_spans: move_spans,
+ original_path,
+ kind,
+ });
+ }
+ }
+ }
+
+ fn append_binding_error(
+ &self,
+ grouped_errors: &mut Vec<GroupedMoveError<'tcx>>,
+ kind: IllegalMoveOriginKind<'tcx>,
+ original_path: Place<'tcx>,
+ move_from: Place<'tcx>,
+ bind_to: Local,
+ match_place: Option<Place<'tcx>>,
+ match_span: Span,
+ statement_span: Span,
+ ) {
+ debug!("append_binding_error(match_place={:?}, match_span={:?})", match_place, match_span);
+
+ let from_simple_let = match_place.is_none();
+ let match_place = match_place.unwrap_or(move_from);
+
+ match self.move_data.rev_lookup.find(match_place.as_ref()) {
+ // Error with the match place
+ LookupResult::Parent(_) => {
+ for ge in &mut *grouped_errors {
+ if let GroupedMoveError::MovesFromPlace { span, binds_to, .. } = ge
+ && match_span == *span
+ {
+ debug!("appending local({:?}) to list", bind_to);
+ if !binds_to.is_empty() {
+ binds_to.push(bind_to);
+ }
+ return;
+ }
+ }
+ debug!("found a new move error location");
+
+ // Don't need to point to x in let x = ... .
+ let (binds_to, span) = if from_simple_let {
+ (vec![], statement_span)
+ } else {
+ (vec![bind_to], match_span)
+ };
+ grouped_errors.push(GroupedMoveError::MovesFromPlace {
+ span,
+ move_from,
+ original_path,
+ kind,
+ binds_to,
+ });
+ }
+ // Error with the pattern
+ LookupResult::Exact(_) => {
+ let LookupResult::Parent(Some(mpi)) = self.move_data.rev_lookup.find(move_from.as_ref()) else {
+ // move_from should be a projection from match_place.
+ unreachable!("Probably not unreachable...");
+ };
+ for ge in &mut *grouped_errors {
+ if let GroupedMoveError::MovesFromValue {
+ span,
+ move_from: other_mpi,
+ binds_to,
+ ..
+ } = ge
+ {
+ if match_span == *span && mpi == *other_mpi {
+ debug!("appending local({:?}) to list", bind_to);
+ binds_to.push(bind_to);
+ return;
+ }
+ }
+ }
+ debug!("found a new move error location");
+ grouped_errors.push(GroupedMoveError::MovesFromValue {
+ span: match_span,
+ move_from: mpi,
+ original_path,
+ kind,
+ binds_to: vec![bind_to],
+ });
+ }
+ };
+ }
+
+ fn report(&mut self, error: GroupedMoveError<'tcx>) {
+ let (mut err, err_span) = {
+ let (span, use_spans, original_path, kind) = match error {
+ GroupedMoveError::MovesFromPlace { span, original_path, ref kind, .. }
+ | GroupedMoveError::MovesFromValue { span, original_path, ref kind, .. } => {
+ (span, None, original_path, kind)
+ }
+ GroupedMoveError::OtherIllegalMove { use_spans, original_path, ref kind } => {
+ (use_spans.args_or_use(), Some(use_spans), original_path, kind)
+ }
+ };
+ debug!(
+ "report: original_path={:?} span={:?}, kind={:?} \
+ original_path.is_upvar_field_projection={:?}",
+ original_path,
+ span,
+ kind,
+ self.is_upvar_field_projection(original_path.as_ref())
+ );
+ (
+ match kind {
+ &IllegalMoveOriginKind::BorrowedContent { target_place } => self
+ .report_cannot_move_from_borrowed_content(
+ original_path,
+ target_place,
+ span,
+ use_spans,
+ ),
+ &IllegalMoveOriginKind::InteriorOfTypeWithDestructor { container_ty: ty } => {
+ self.cannot_move_out_of_interior_of_drop(span, ty)
+ }
+ &IllegalMoveOriginKind::InteriorOfSliceOrArray { ty, is_index } => {
+ self.cannot_move_out_of_interior_noncopy(span, ty, Some(is_index))
+ }
+ },
+ span,
+ )
+ };
+
+ self.add_move_hints(error, &mut err, err_span);
+ self.buffer_error(err);
+ }
+
+ fn report_cannot_move_from_static(
+ &mut self,
+ place: Place<'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let description = if place.projection.len() == 1 {
+ format!("static item {}", self.describe_any_place(place.as_ref()))
+ } else {
+ let base_static = PlaceRef { local: place.local, projection: &[ProjectionElem::Deref] };
+
+ format!(
+ "{} as {} is a static item",
+ self.describe_any_place(place.as_ref()),
+ self.describe_any_place(base_static),
+ )
+ };
+
+ self.cannot_move_out_of(span, &description)
+ }
+
+ fn report_cannot_move_from_borrowed_content(
+ &mut self,
+ move_place: Place<'tcx>,
+ deref_target_place: Place<'tcx>,
+ span: Span,
+ use_spans: Option<UseSpans<'tcx>>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ // Inspect the type of the content behind the
+ // borrow to provide feedback about why this
+ // was a move rather than a copy.
+ let ty = deref_target_place.ty(self.body, self.infcx.tcx).ty;
+ let upvar_field = self
+ .prefixes(move_place.as_ref(), PrefixSet::All)
+ .find_map(|p| self.is_upvar_field_projection(p));
+
+ let deref_base = match deref_target_place.projection.as_ref() {
+ [proj_base @ .., ProjectionElem::Deref] => {
+ PlaceRef { local: deref_target_place.local, projection: &proj_base }
+ }
+ _ => bug!("deref_target_place is not a deref projection"),
+ };
+
+ if let PlaceRef { local, projection: [] } = deref_base {
+ let decl = &self.body.local_decls[local];
+ if decl.is_ref_for_guard() {
+ let mut err = self.cannot_move_out_of(
+ span,
+ &format!("`{}` in pattern guard", self.local_names[local].unwrap()),
+ );
+ err.note(
+ "variables bound in patterns cannot be moved from \
+ until after the end of the pattern guard",
+ );
+ return err;
+ } else if decl.is_ref_to_static() {
+ return self.report_cannot_move_from_static(move_place, span);
+ }
+ }
+
+ debug!("report: ty={:?}", ty);
+ let mut err = match ty.kind() {
+ ty::Array(..) | ty::Slice(..) => {
+ self.cannot_move_out_of_interior_noncopy(span, ty, None)
+ }
+ ty::Closure(def_id, closure_substs)
+ if def_id.as_local() == Some(self.mir_def_id()) && upvar_field.is_some() =>
+ {
+ let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ let closure_kind = match closure_kind_ty.to_opt_closure_kind() {
+ Some(kind @ (ty::ClosureKind::Fn | ty::ClosureKind::FnMut)) => kind,
+ Some(ty::ClosureKind::FnOnce) => {
+ bug!("closure kind does not match first argument type")
+ }
+ None => bug!("closure kind not inferred by borrowck"),
+ };
+ let capture_description =
+ format!("captured variable in an `{closure_kind}` closure");
+
+ let upvar = &self.upvars[upvar_field.unwrap().index()];
+ let upvar_hir_id = upvar.place.get_root_variable();
+ let upvar_name = upvar.place.to_string(self.infcx.tcx);
+ let upvar_span = self.infcx.tcx.hir().span(upvar_hir_id);
+
+ let place_name = self.describe_any_place(move_place.as_ref());
+
+ let place_description =
+ if self.is_upvar_field_projection(move_place.as_ref()).is_some() {
+ format!("{place_name}, a {capture_description}")
+ } else {
+ format!("{place_name}, as `{upvar_name}` is a {capture_description}")
+ };
+
+ debug!(
+ "report: closure_kind_ty={:?} closure_kind={:?} place_description={:?}",
+ closure_kind_ty, closure_kind, place_description,
+ );
+
+ let mut diag = self.cannot_move_out_of(span, &place_description);
+
+ diag.span_label(upvar_span, "captured outer variable");
+ diag.span_label(
+ self.body.span,
+ format!("captured by this `{closure_kind}` closure"),
+ );
+
+ diag
+ }
+ _ => {
+ let source = self.borrowed_content_source(deref_base);
+ let move_place_ref = move_place.as_ref();
+ match (
+ self.describe_place_with_options(
+ move_place_ref,
+ DescribePlaceOpt {
+ including_downcast: false,
+ including_tuple_field: false,
+ },
+ ),
+ self.describe_name(move_place_ref),
+ source.describe_for_named_place(),
+ ) {
+ (Some(place_desc), Some(name), Some(source_desc)) => self.cannot_move_out_of(
+ span,
+ &format!("`{place_desc}` as enum variant `{name}` which is behind a {source_desc}"),
+ ),
+ (Some(place_desc), Some(name), None) => self.cannot_move_out_of(
+ span,
+ &format!("`{place_desc}` as enum variant `{name}`"),
+ ),
+ (Some(place_desc), _, Some(source_desc)) => self.cannot_move_out_of(
+ span,
+ &format!("`{place_desc}` which is behind a {source_desc}"),
+ ),
+ (_, _, _) => self.cannot_move_out_of(
+ span,
+ &source.describe_for_unnamed_place(self.infcx.tcx),
+ ),
+ }
+ }
+ };
+ if let Some(use_spans) = use_spans {
+ self.explain_captures(
+ &mut err, span, span, use_spans, move_place, None, "", "", "", false, true,
+ );
+ }
+ err
+ }
+
+ fn add_move_hints(&self, error: GroupedMoveError<'tcx>, err: &mut Diagnostic, span: Span) {
+ match error {
+ GroupedMoveError::MovesFromPlace { mut binds_to, move_from, .. } => {
+ if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
+ err.span_suggestion(
+ span,
+ "consider borrowing here",
+ format!("&{snippet}"),
+ Applicability::Unspecified,
+ );
+ }
+
+ if binds_to.is_empty() {
+ let place_ty = move_from.ty(self.body, self.infcx.tcx).ty;
+ let place_desc = match self.describe_place(move_from.as_ref()) {
+ Some(desc) => format!("`{desc}`"),
+ None => "value".to_string(),
+ };
+
+ self.note_type_does_not_implement_copy(
+ err,
+ &place_desc,
+ place_ty,
+ Some(span),
+ "",
+ );
+ } else {
+ binds_to.sort();
+ binds_to.dedup();
+
+ self.add_move_error_details(err, &binds_to);
+ }
+ }
+ GroupedMoveError::MovesFromValue { mut binds_to, .. } => {
+ binds_to.sort();
+ binds_to.dedup();
+ self.add_move_error_suggestions(err, &binds_to);
+ self.add_move_error_details(err, &binds_to);
+ }
+ // No binding. Nothing to suggest.
+ GroupedMoveError::OtherIllegalMove { ref original_path, use_spans, .. } => {
+ let span = use_spans.var_or_use();
+ let place_ty = original_path.ty(self.body, self.infcx.tcx).ty;
+ let place_desc = match self.describe_place(original_path.as_ref()) {
+ Some(desc) => format!("`{desc}`"),
+ None => "value".to_string(),
+ };
+ self.note_type_does_not_implement_copy(err, &place_desc, place_ty, Some(span), "");
+
+ use_spans.args_span_label(err, format!("move out of {place_desc} occurs here"));
+ }
+ }
+ }
+
+ fn add_move_error_suggestions(&self, err: &mut Diagnostic, binds_to: &[Local]) {
+ let mut suggestions: Vec<(Span, &str, String)> = Vec::new();
+ for local in binds_to {
+ let bind_to = &self.body.local_decls[*local];
+ if let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm { pat_span, .. },
+ )))) = bind_to.local_info
+ {
+ if let Ok(pat_snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(pat_span)
+ {
+ if let Some(stripped) = pat_snippet.strip_prefix('&') {
+ let pat_snippet = stripped.trim_start();
+ let (suggestion, to_remove) = if pat_snippet.starts_with("mut")
+ && pat_snippet["mut".len()..].starts_with(rustc_lexer::is_whitespace)
+ {
+ (pat_snippet["mut".len()..].trim_start(), "&mut")
+ } else {
+ (pat_snippet, "&")
+ };
+ suggestions.push((pat_span, to_remove, suggestion.to_owned()));
+ }
+ }
+ }
+ }
+ suggestions.sort_unstable_by_key(|&(span, _, _)| span);
+ suggestions.dedup_by_key(|&mut (span, _, _)| span);
+ for (span, to_remove, suggestion) in suggestions {
+ err.span_suggestion(
+ span,
+ &format!("consider removing the `{to_remove}`"),
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+
+ fn add_move_error_details(&self, err: &mut Diagnostic, binds_to: &[Local]) {
+ for (j, local) in binds_to.iter().enumerate() {
+ let bind_to = &self.body.local_decls[*local];
+ let binding_span = bind_to.source_info.span;
+
+ if j == 0 {
+ err.span_label(binding_span, "data moved here");
+ } else {
+ err.span_label(binding_span, "...and here");
+ }
+
+ if binds_to.len() == 1 {
+ self.note_type_does_not_implement_copy(
+ err,
+ &format!("`{}`", self.local_names[*local].unwrap()),
+ bind_to.ty,
+ Some(binding_span),
+ "",
+ );
+ }
+ }
+
+ if binds_to.len() > 1 {
+ err.note(
+ "move occurs because these variables have types that \
+ don't implement the `Copy` trait",
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
new file mode 100644
index 000000000..0ad4abbce
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs
@@ -0,0 +1,1115 @@
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_middle::hir::map::Map;
+use rustc_middle::mir::{Mutability, Place, PlaceRef, ProjectionElem};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::{
+ hir::place::PlaceBase,
+ mir::{
+ self, BindingForm, ClearCrossCrate, ImplicitSelfKind, Local, LocalDecl, LocalInfo,
+ LocalKind, Location,
+ },
+};
+use rustc_span::source_map::DesugaringKind;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_span::{BytePos, Span};
+
+use crate::diagnostics::BorrowedContentSource;
+use crate::MirBorrowckCtxt;
+use rustc_const_eval::util::collect_writes::FindAssignments;
+use rustc_errors::{Applicability, Diagnostic};
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum AccessKind {
+ MutableBorrow,
+ Mutate,
+}
+
+impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
+ pub(crate) fn report_mutability_error(
+ &mut self,
+ access_place: Place<'tcx>,
+ span: Span,
+ the_place_err: PlaceRef<'tcx>,
+ error_access: AccessKind,
+ location: Location,
+ ) {
+ debug!(
+ "report_mutability_error(\
+ access_place={:?}, span={:?}, the_place_err={:?}, error_access={:?}, location={:?},\
+ )",
+ access_place, span, the_place_err, error_access, location,
+ );
+
+ let mut err;
+ let item_msg;
+ let reason;
+ let mut opt_source = None;
+ let access_place_desc = self.describe_any_place(access_place.as_ref());
+ debug!("report_mutability_error: access_place_desc={:?}", access_place_desc);
+
+ match the_place_err {
+ PlaceRef { local, projection: [] } => {
+ item_msg = access_place_desc;
+ if access_place.as_local().is_some() {
+ reason = ", as it is not declared as mutable".to_string();
+ } else {
+ let name = self.local_names[local].expect("immutable unnamed local");
+ reason = format!(", as `{name}` is not declared as mutable");
+ }
+ }
+
+ PlaceRef {
+ local,
+ projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
+ } => {
+ debug_assert!(is_closure_or_generator(
+ Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty
+ ));
+
+ let imm_borrow_derefed = self.upvars[upvar_index.index()]
+ .place
+ .place
+ .deref_tys()
+ .any(|ty| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Not)));
+
+ // If the place is immutable then:
+ //
+ // - Either we deref an immutable ref to get to our final place.
+ // - We don't capture derefs of raw ptrs
+ // - Or the final place is immut because the root variable of the capture
+ // isn't marked mut and we should suggest that to the user.
+ if imm_borrow_derefed {
+ // If we deref an immutable ref then the suggestion here doesn't help.
+ return;
+ } else {
+ item_msg = access_place_desc;
+ if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
+ reason = ", as it is not declared as mutable".to_string();
+ } else {
+ let name = self.upvars[upvar_index.index()].place.to_string(self.infcx.tcx);
+ reason = format!(", as `{name}` is not declared as mutable");
+ }
+ }
+ }
+
+ PlaceRef { local, projection: [ProjectionElem::Deref] }
+ if self.body.local_decls[local].is_ref_for_guard() =>
+ {
+ item_msg = access_place_desc;
+ reason = ", as it is immutable for the pattern guard".to_string();
+ }
+ PlaceRef { local, projection: [ProjectionElem::Deref] }
+ if self.body.local_decls[local].is_ref_to_static() =>
+ {
+ if access_place.projection.len() == 1 {
+ item_msg = format!("immutable static item {access_place_desc}");
+ reason = String::new();
+ } else {
+ item_msg = access_place_desc;
+ let local_info = &self.body.local_decls[local].local_info;
+ if let Some(box LocalInfo::StaticRef { def_id, .. }) = *local_info {
+ let static_name = &self.infcx.tcx.item_name(def_id);
+ reason = format!(", as `{static_name}` is an immutable static item");
+ } else {
+ bug!("is_ref_to_static return true, but not ref to static?");
+ }
+ }
+ }
+ PlaceRef { local: _, projection: [proj_base @ .., ProjectionElem::Deref] } => {
+ if the_place_err.local == ty::CAPTURE_STRUCT_LOCAL
+ && proj_base.is_empty()
+ && !self.upvars.is_empty()
+ {
+ item_msg = access_place_desc;
+ debug_assert!(
+ self.body.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty.is_region_ptr()
+ );
+ debug_assert!(is_closure_or_generator(
+ Place::ty_from(
+ the_place_err.local,
+ the_place_err.projection,
+ self.body,
+ self.infcx.tcx
+ )
+ .ty
+ ));
+
+ reason = if self.is_upvar_field_projection(access_place.as_ref()).is_some() {
+ ", as it is a captured variable in a `Fn` closure".to_string()
+ } else {
+ ", as `Fn` closures cannot mutate their captured variables".to_string()
+ }
+ } else {
+ let source = self.borrowed_content_source(PlaceRef {
+ local: the_place_err.local,
+ projection: proj_base,
+ });
+ let pointer_type = source.describe_for_immutable_place(self.infcx.tcx);
+ opt_source = Some(source);
+ if let Some(desc) = self.describe_place(access_place.as_ref()) {
+ item_msg = format!("`{desc}`");
+ reason = match error_access {
+ AccessKind::Mutate => format!(", which is behind {pointer_type}"),
+ AccessKind::MutableBorrow => {
+ format!(", as it is behind {pointer_type}")
+ }
+ }
+ } else {
+ item_msg = format!("data in {pointer_type}");
+ reason = String::new();
+ }
+ }
+ }
+
+ PlaceRef {
+ local: _,
+ projection:
+ [
+ ..,
+ ProjectionElem::Index(_)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Downcast(..),
+ ],
+ } => bug!("Unexpected immutable place."),
+ }
+
+ debug!("report_mutability_error: item_msg={:?}, reason={:?}", item_msg, reason);
+
+ // `act` and `acted_on` are strings that let us abstract over
+ // the verbs used in some diagnostic messages.
+ let act;
+ let acted_on;
+
+ let span = match error_access {
+ AccessKind::Mutate => {
+ err = self.cannot_assign(span, &(item_msg + &reason));
+ act = "assign";
+ acted_on = "written";
+ span
+ }
+ AccessKind::MutableBorrow => {
+ act = "borrow as mutable";
+ acted_on = "borrowed as mutable";
+
+ let borrow_spans = self.borrow_spans(span, location);
+ let borrow_span = borrow_spans.args_or_use();
+ err = self.cannot_borrow_path_as_mutable_because(borrow_span, &item_msg, &reason);
+ borrow_spans.var_span_label(
+ &mut err,
+ format!(
+ "mutable borrow occurs due to use of {} in closure",
+ self.describe_any_place(access_place.as_ref()),
+ ),
+ "mutable",
+ );
+ borrow_span
+ }
+ };
+
+ debug!("report_mutability_error: act={:?}, acted_on={:?}", act, acted_on);
+
+ match the_place_err {
+ // Suggest making an existing shared borrow in a struct definition a mutable borrow.
+ //
+ // This is applicable when we have a deref of a field access to a deref of a local -
+ // something like `*((*_1).0`. The local that we get will be a reference to the
+ // struct we've got a field access of (it must be a reference since there's a deref
+ // after the field access).
+ PlaceRef {
+ local,
+ projection:
+ &[
+ ref proj_base @ ..,
+ ProjectionElem::Deref,
+ ProjectionElem::Field(field, _),
+ ProjectionElem::Deref,
+ ],
+ } => {
+ err.span_label(span, format!("cannot {ACT}", ACT = act));
+
+ if let Some(span) = get_mut_span_in_struct_field(
+ self.infcx.tcx,
+ Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty,
+ field,
+ ) {
+ err.span_suggestion_verbose(
+ span,
+ "consider changing this to be mutable",
+ " mut ",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ // Suggest removing a `&mut` from the use of a mutable reference.
+ PlaceRef { local, projection: [] }
+ if self
+ .body
+ .local_decls
+ .get(local)
+ .map(|l| mut_borrow_of_mutable_ref(l, self.local_names[local]))
+ .unwrap_or(false) =>
+ {
+ let decl = &self.body.local_decls[local];
+ err.span_label(span, format!("cannot {ACT}", ACT = act));
+ if let Some(mir::Statement {
+ source_info,
+ kind:
+ mir::StatementKind::Assign(box (
+ _,
+ mir::Rvalue::Ref(
+ _,
+ mir::BorrowKind::Mut { allow_two_phase_borrow: false },
+ _,
+ ),
+ )),
+ ..
+ }) = &self.body[location.block].statements.get(location.statement_index)
+ {
+ match decl.local_info {
+ Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ mir::VarBindingForm {
+ binding_mode: ty::BindingMode::BindByValue(Mutability::Not),
+ opt_ty_info: Some(sp),
+ opt_match_place: _,
+ pat_span: _,
+ },
+ )))) => {
+ err.span_note(sp, "the binding is already a mutable borrow");
+ }
+ _ => {
+ err.span_note(
+ decl.source_info.span,
+ "the binding is already a mutable borrow",
+ );
+ }
+ }
+ if let Ok(snippet) =
+ self.infcx.tcx.sess.source_map().span_to_snippet(source_info.span)
+ {
+ if snippet.starts_with("&mut ") {
+ // We don't have access to the HIR to get accurate spans, but we can
+ // give a best effort structured suggestion.
+ err.span_suggestion_verbose(
+ source_info.span.with_hi(source_info.span.lo() + BytePos(5)),
+ "try removing `&mut` here",
+ "",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // This can occur with things like `(&mut self).foo()`.
+ err.span_help(source_info.span, "try removing `&mut` here");
+ }
+ } else {
+ err.span_help(source_info.span, "try removing `&mut` here");
+ }
+ } else if decl.mutability == Mutability::Not
+ && !matches!(
+ decl.local_info,
+ Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::ImplicitSelf(
+ ImplicitSelfKind::MutRef
+ ))))
+ )
+ {
+ err.span_suggestion_verbose(
+ decl.source_info.span.shrink_to_lo(),
+ "consider making the binding mutable",
+ "mut ",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+
+ // We want to suggest users use `let mut` for local (user
+ // variable) mutations...
+ PlaceRef { local, projection: [] }
+ if self.body.local_decls[local].can_be_made_mutable() =>
+ {
+ // ... but it doesn't make sense to suggest it on
+ // variables that are `ref x`, `ref mut x`, `&self`,
+ // or `&mut self` (such variables are simply not
+ // mutable).
+ let local_decl = &self.body.local_decls[local];
+ assert_eq!(local_decl.mutability, Mutability::Not);
+
+ err.span_label(span, format!("cannot {ACT}", ACT = act));
+ err.span_suggestion(
+ local_decl.source_info.span,
+ "consider changing this to be mutable",
+ format!("mut {}", self.local_names[local].unwrap()),
+ Applicability::MachineApplicable,
+ );
+ let tcx = self.infcx.tcx;
+ if let ty::Closure(id, _) = *the_place_err.ty(self.body, tcx).ty.kind() {
+ self.show_mutating_upvar(tcx, id.expect_local(), the_place_err, &mut err);
+ }
+ }
+
+ // Also suggest adding mut for upvars
+ PlaceRef {
+ local,
+ projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
+ } => {
+ debug_assert!(is_closure_or_generator(
+ Place::ty_from(local, proj_base, self.body, self.infcx.tcx).ty
+ ));
+
+ let captured_place = &self.upvars[upvar_index.index()].place;
+
+ err.span_label(span, format!("cannot {ACT}", ACT = act));
+
+ let upvar_hir_id = captured_place.get_root_variable();
+
+ if let Some(Node::Pat(pat)) = self.infcx.tcx.hir().find(upvar_hir_id)
+ && let hir::PatKind::Binding(
+ hir::BindingAnnotation::Unannotated,
+ _,
+ upvar_ident,
+ _,
+ ) = pat.kind
+ {
+ err.span_suggestion(
+ upvar_ident.span,
+ "consider changing this to be mutable",
+ format!("mut {}", upvar_ident.name),
+ Applicability::MachineApplicable,
+ );
+ }
+
+ let tcx = self.infcx.tcx;
+ if let ty::Ref(_, ty, Mutability::Mut) = the_place_err.ty(self.body, tcx).ty.kind()
+ && let ty::Closure(id, _) = *ty.kind()
+ {
+ self.show_mutating_upvar(tcx, id.expect_local(), the_place_err, &mut err);
+ }
+ }
+
+ // complete hack to approximate old AST-borrowck
+ // diagnostic: if the span starts with a mutable borrow of
+ // a local variable, then just suggest the user remove it.
+ PlaceRef { local: _, projection: [] }
+ if {
+ if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
+ snippet.starts_with("&mut ")
+ } else {
+ false
+ }
+ } =>
+ {
+ err.span_label(span, format!("cannot {ACT}", ACT = act));
+ err.span_suggestion(
+ span,
+ "try removing `&mut` here",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ PlaceRef { local, projection: [ProjectionElem::Deref] }
+ if self.body.local_decls[local].is_ref_for_guard() =>
+ {
+ err.span_label(span, format!("cannot {ACT}", ACT = act));
+ err.note(
+ "variables bound in patterns are immutable until the end of the pattern guard",
+ );
+ }
+
+ // We want to point out when a `&` can be readily replaced
+ // with an `&mut`.
+ //
+ // FIXME: can this case be generalized to work for an
+ // arbitrary base for the projection?
+ PlaceRef { local, projection: [ProjectionElem::Deref] }
+ if self.body.local_decls[local].is_user_variable() =>
+ {
+ let local_decl = &self.body.local_decls[local];
+
+ let (pointer_sigil, pointer_desc) = if local_decl.ty.is_region_ptr() {
+ ("&", "reference")
+ } else {
+ ("*const", "pointer")
+ };
+
+ match self.local_names[local] {
+ Some(name) if !local_decl.from_compiler_desugaring() => {
+ let label = match local_decl.local_info.as_deref().unwrap() {
+ LocalInfo::User(ClearCrossCrate::Set(
+ mir::BindingForm::ImplicitSelf(_),
+ )) => {
+ let (span, suggestion) =
+ suggest_ampmut_self(self.infcx.tcx, local_decl);
+ Some((true, span, suggestion))
+ }
+
+ LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::Var(
+ mir::VarBindingForm {
+ binding_mode: ty::BindingMode::BindByValue(_),
+ opt_ty_info,
+ ..
+ },
+ ))) => {
+ // check if the RHS is from desugaring
+ let opt_assignment_rhs_span =
+ self.body.find_assignments(local).first().map(|&location| {
+ if let Some(mir::Statement {
+ source_info: _,
+ kind:
+ mir::StatementKind::Assign(box (
+ _,
+ mir::Rvalue::Use(mir::Operand::Copy(place)),
+ )),
+ }) = self.body[location.block]
+ .statements
+ .get(location.statement_index)
+ {
+ self.body.local_decls[place.local].source_info.span
+ } else {
+ self.body.source_info(location).span
+ }
+ });
+ match opt_assignment_rhs_span.and_then(|s| s.desugaring_kind()) {
+ // on for loops, RHS points to the iterator part
+ Some(DesugaringKind::ForLoop) => {
+ self.suggest_similar_mut_method_for_for_loop(&mut err);
+ err.span_label(opt_assignment_rhs_span.unwrap(), format!(
+ "this iterator yields `{pointer_sigil}` {pointer_desc}s",
+ ));
+ None
+ }
+ // don't create labels for compiler-generated spans
+ Some(_) => None,
+ None => {
+ let label = if name != kw::SelfLower {
+ suggest_ampmut(
+ self.infcx.tcx,
+ local_decl,
+ opt_assignment_rhs_span,
+ *opt_ty_info,
+ )
+ } else {
+ match local_decl.local_info.as_deref() {
+ Some(LocalInfo::User(ClearCrossCrate::Set(
+ mir::BindingForm::Var(mir::VarBindingForm {
+ opt_ty_info: None,
+ ..
+ }),
+ ))) => {
+ let (span, sugg) = suggest_ampmut_self(
+ self.infcx.tcx,
+ local_decl,
+ );
+ (true, span, sugg)
+ }
+ // explicit self (eg `self: &'a Self`)
+ _ => suggest_ampmut(
+ self.infcx.tcx,
+ local_decl,
+ opt_assignment_rhs_span,
+ *opt_ty_info,
+ ),
+ }
+ };
+ Some(label)
+ }
+ }
+ }
+
+ LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::Var(
+ mir::VarBindingForm {
+ binding_mode: ty::BindingMode::BindByReference(_),
+ ..
+ },
+ ))) => {
+ let pattern_span = local_decl.source_info.span;
+ suggest_ref_mut(self.infcx.tcx, pattern_span)
+ .map(|replacement| (true, pattern_span, replacement))
+ }
+
+ LocalInfo::User(ClearCrossCrate::Clear) => {
+ bug!("saw cleared local state")
+ }
+
+ _ => unreachable!(),
+ };
+
+ match label {
+ Some((true, err_help_span, suggested_code)) => {
+ let (is_trait_sig, local_trait) = self.is_error_in_trait(local);
+ if !is_trait_sig {
+ err.span_suggestion(
+ err_help_span,
+ &format!(
+ "consider changing this to be a mutable {pointer_desc}"
+ ),
+ suggested_code,
+ Applicability::MachineApplicable,
+ );
+ } else if let Some(x) = local_trait {
+ err.span_suggestion(
+ x,
+ &format!(
+ "consider changing that to be a mutable {pointer_desc}"
+ ),
+ suggested_code,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ Some((false, err_label_span, message)) => {
+ err.span_label(
+ err_label_span,
+ &format!(
+ "consider changing this binding's type to be: `{message}`"
+ ),
+ );
+ }
+ None => {}
+ }
+ err.span_label(
+ span,
+ format!(
+ "`{NAME}` is a `{SIGIL}` {DESC}, \
+ so the data it refers to cannot be {ACTED_ON}",
+ NAME = name,
+ SIGIL = pointer_sigil,
+ DESC = pointer_desc,
+ ACTED_ON = acted_on
+ ),
+ );
+ }
+ _ => {
+ err.span_label(
+ span,
+ format!(
+ "cannot {ACT} through `{SIGIL}` {DESC}",
+ ACT = act,
+ SIGIL = pointer_sigil,
+ DESC = pointer_desc
+ ),
+ );
+ }
+ }
+ }
+
+ PlaceRef { local, projection: [ProjectionElem::Deref] }
+ if local == ty::CAPTURE_STRUCT_LOCAL && !self.upvars.is_empty() =>
+ {
+ self.expected_fn_found_fn_mut_call(&mut err, span, act);
+ }
+
+ PlaceRef { local: _, projection: [.., ProjectionElem::Deref] } => {
+ err.span_label(span, format!("cannot {ACT}", ACT = act));
+
+ match opt_source {
+ Some(BorrowedContentSource::OverloadedDeref(ty)) => {
+ err.help(&format!(
+ "trait `DerefMut` is required to modify through a dereference, \
+ but it is not implemented for `{ty}`",
+ ));
+ }
+ Some(BorrowedContentSource::OverloadedIndex(ty)) => {
+ err.help(&format!(
+ "trait `IndexMut` is required to modify indexed content, \
+ but it is not implemented for `{ty}`",
+ ));
+ }
+ _ => (),
+ }
+ }
+
+ _ => {
+ err.span_label(span, format!("cannot {ACT}", ACT = act));
+ }
+ }
+
+ self.buffer_error(err);
+ }
+
+ /// User cannot make signature of a trait mutable without changing the
+ /// trait. So we find if this error belongs to a trait and if so we move
+ /// suggestion to the trait or disable it if it is out of scope of this crate
+ fn is_error_in_trait(&self, local: Local) -> (bool, Option<Span>) {
+ if self.body.local_kind(local) != LocalKind::Arg {
+ return (false, None);
+ }
+ let hir_map = self.infcx.tcx.hir();
+ let my_def = self.body.source.def_id();
+ let my_hir = hir_map.local_def_id_to_hir_id(my_def.as_local().unwrap());
+ let Some(td) =
+ self.infcx.tcx.impl_of_method(my_def).and_then(|x| self.infcx.tcx.trait_id_of_impl(x))
+ else {
+ return (false, None);
+ };
+ (
+ true,
+ td.as_local().and_then(|tld| match hir_map.find_by_def_id(tld) {
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(_, _, _, _, items),
+ ..
+ })) => {
+ let mut f_in_trait_opt = None;
+ for hir::TraitItemRef { id: fi, kind: k, .. } in *items {
+ let hi = fi.hir_id();
+ if !matches!(k, hir::AssocItemKind::Fn { .. }) {
+ continue;
+ }
+ if hir_map.name(hi) != hir_map.name(my_hir) {
+ continue;
+ }
+ f_in_trait_opt = Some(hi);
+ break;
+ }
+ f_in_trait_opt.and_then(|f_in_trait| match hir_map.find(f_in_trait) {
+ Some(Node::TraitItem(hir::TraitItem {
+ kind:
+ hir::TraitItemKind::Fn(
+ hir::FnSig { decl: hir::FnDecl { inputs, .. }, .. },
+ _,
+ ),
+ ..
+ })) => {
+ let hir::Ty { span, .. } = inputs[local.index() - 1];
+ Some(span)
+ }
+ _ => None,
+ })
+ }
+ _ => None,
+ }),
+ )
+ }
+
+ // point to span of upvar making closure call require mutable borrow
+ fn show_mutating_upvar(
+ &self,
+ tcx: TyCtxt<'_>,
+ closure_local_def_id: hir::def_id::LocalDefId,
+ the_place_err: PlaceRef<'tcx>,
+ err: &mut Diagnostic,
+ ) {
+ let tables = tcx.typeck(closure_local_def_id);
+ let closure_hir_id = tcx.hir().local_def_id_to_hir_id(closure_local_def_id);
+ if let Some((span, closure_kind_origin)) =
+ &tables.closure_kind_origins().get(closure_hir_id)
+ {
+ let reason = if let PlaceBase::Upvar(upvar_id) = closure_kind_origin.base {
+ let upvar = ty::place_to_string_for_capture(tcx, closure_kind_origin);
+ let root_hir_id = upvar_id.var_path.hir_id;
+ // we have an origin for this closure kind starting at this root variable so it's safe to unwrap here
+ let captured_places =
+ tables.closure_min_captures[&closure_local_def_id].get(&root_hir_id).unwrap();
+
+ let origin_projection = closure_kind_origin
+ .projections
+ .iter()
+ .map(|proj| proj.kind)
+ .collect::<Vec<_>>();
+ let mut capture_reason = String::new();
+ for captured_place in captured_places {
+ let captured_place_kinds = captured_place
+ .place
+ .projections
+ .iter()
+ .map(|proj| proj.kind)
+ .collect::<Vec<_>>();
+ if rustc_middle::ty::is_ancestor_or_same_capture(
+ &captured_place_kinds,
+ &origin_projection,
+ ) {
+ match captured_place.info.capture_kind {
+ ty::UpvarCapture::ByRef(
+ ty::BorrowKind::MutBorrow | ty::BorrowKind::UniqueImmBorrow,
+ ) => {
+ capture_reason = format!("mutable borrow of `{upvar}`");
+ }
+ ty::UpvarCapture::ByValue => {
+ capture_reason = format!("possible mutation of `{upvar}`");
+ }
+ _ => bug!("upvar `{upvar}` borrowed, but not mutably"),
+ }
+ break;
+ }
+ }
+ if capture_reason.is_empty() {
+ bug!("upvar `{upvar}` borrowed, but cannot find reason");
+ }
+ capture_reason
+ } else {
+ bug!("not an upvar")
+ };
+ err.span_label(
+ *span,
+ format!(
+ "calling `{}` requires mutable binding due to {}",
+ self.describe_place(the_place_err).unwrap(),
+ reason
+ ),
+ );
+ }
+ }
+
+ // Attempt to search similar mutable associated items for suggestion.
+ // In the future, attempt in all path but initially for RHS of for_loop
+ fn suggest_similar_mut_method_for_for_loop(&self, err: &mut Diagnostic) {
+ use hir::{
+ BodyId, Expr,
+ ExprKind::{Block, Call, DropTemps, Match, MethodCall},
+ HirId, ImplItem, ImplItemKind, Item, ItemKind,
+ };
+
+ fn maybe_body_id_of_fn(hir_map: Map<'_>, id: HirId) -> Option<BodyId> {
+ match hir_map.find(id) {
+ Some(Node::Item(Item { kind: ItemKind::Fn(_, _, body_id), .. }))
+ | Some(Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(_, body_id), .. })) => {
+ Some(*body_id)
+ }
+ _ => None,
+ }
+ }
+ let hir_map = self.infcx.tcx.hir();
+ let mir_body_hir_id = self.mir_hir_id();
+ if let Some(fn_body_id) = maybe_body_id_of_fn(hir_map, mir_body_hir_id) {
+ if let Block(
+ hir::Block {
+ expr:
+ Some(Expr {
+ kind:
+ DropTemps(Expr {
+ kind:
+ Match(
+ Expr {
+ kind:
+ Call(
+ _,
+ [
+ Expr {
+ kind:
+ MethodCall(
+ path_segment,
+ _args,
+ span,
+ ),
+ hir_id,
+ ..
+ },
+ ..,
+ ],
+ ),
+ ..
+ },
+ ..,
+ ),
+ ..
+ }),
+ ..
+ }),
+ ..
+ },
+ _,
+ ) = hir_map.body(fn_body_id).value.kind
+ {
+ let opt_suggestions = path_segment
+ .hir_id
+ .map(|path_hir_id| self.infcx.tcx.typeck(path_hir_id.owner))
+ .and_then(|typeck| typeck.type_dependent_def_id(*hir_id))
+ .and_then(|def_id| self.infcx.tcx.impl_of_method(def_id))
+ .map(|def_id| self.infcx.tcx.associated_items(def_id))
+ .map(|assoc_items| {
+ assoc_items
+ .in_definition_order()
+ .map(|assoc_item_def| assoc_item_def.ident(self.infcx.tcx))
+ .filter(|&ident| {
+ let original_method_ident = path_segment.ident;
+ original_method_ident != ident
+ && ident
+ .as_str()
+ .starts_with(&original_method_ident.name.to_string())
+ })
+ .map(|ident| format!("{ident}()"))
+ .peekable()
+ });
+
+ if let Some(mut suggestions) = opt_suggestions
+ && suggestions.peek().is_some()
+ {
+ err.span_suggestions(
+ *span,
+ "use mutable method",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ };
+ }
+
+ /// Targeted error when encountering an `FnMut` closure where an `Fn` closure was expected.
+ fn expected_fn_found_fn_mut_call(&self, err: &mut Diagnostic, sp: Span, act: &str) {
+ err.span_label(sp, format!("cannot {act}"));
+
+ let hir = self.infcx.tcx.hir();
+ let closure_id = self.mir_hir_id();
+ let fn_call_id = hir.get_parent_node(closure_id);
+ let node = hir.get(fn_call_id);
+ let def_id = hir.enclosing_body_owner(fn_call_id);
+ let mut look_at_return = true;
+ // If we can detect the expression to be an `fn` call where the closure was an argument,
+ // we point at the `fn` definition argument...
+ if let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Call(func, args), .. }) = node {
+ let arg_pos = args
+ .iter()
+ .enumerate()
+ .filter(|(_, arg)| arg.hir_id == closure_id)
+ .map(|(pos, _)| pos)
+ .next();
+ let tables = self.infcx.tcx.typeck(def_id);
+ if let Some(ty::FnDef(def_id, _)) =
+ tables.node_type_opt(func.hir_id).as_ref().map(|ty| ty.kind())
+ {
+ let arg = match hir.get_if_local(*def_id) {
+ Some(
+ hir::Node::Item(hir::Item {
+ ident, kind: hir::ItemKind::Fn(sig, ..), ..
+ })
+ | hir::Node::TraitItem(hir::TraitItem {
+ ident,
+ kind: hir::TraitItemKind::Fn(sig, _),
+ ..
+ })
+ | hir::Node::ImplItem(hir::ImplItem {
+ ident,
+ kind: hir::ImplItemKind::Fn(sig, _),
+ ..
+ }),
+ ) => Some(
+ arg_pos
+ .and_then(|pos| {
+ sig.decl.inputs.get(
+ pos + if sig.decl.implicit_self.has_implicit_self() {
+ 1
+ } else {
+ 0
+ },
+ )
+ })
+ .map(|arg| arg.span)
+ .unwrap_or(ident.span),
+ ),
+ _ => None,
+ };
+ if let Some(span) = arg {
+ err.span_label(span, "change this to accept `FnMut` instead of `Fn`");
+ err.span_label(func.span, "expects `Fn` instead of `FnMut`");
+ err.span_label(self.body.span, "in this closure");
+ look_at_return = false;
+ }
+ }
+ }
+
+ if look_at_return && hir.get_return_block(closure_id).is_some() {
+ // ...otherwise we are probably in the tail expression of the function, point at the
+ // return type.
+ match hir.get_by_def_id(hir.get_parent_item(fn_call_id)) {
+ hir::Node::Item(hir::Item { ident, kind: hir::ItemKind::Fn(sig, ..), .. })
+ | hir::Node::TraitItem(hir::TraitItem {
+ ident,
+ kind: hir::TraitItemKind::Fn(sig, _),
+ ..
+ })
+ | hir::Node::ImplItem(hir::ImplItem {
+ ident,
+ kind: hir::ImplItemKind::Fn(sig, _),
+ ..
+ }) => {
+ err.span_label(ident.span, "");
+ err.span_label(
+ sig.decl.output.span(),
+ "change this to return `FnMut` instead of `Fn`",
+ );
+ err.span_label(self.body.span, "in this closure");
+ }
+ _ => {}
+ }
+ }
+ }
+}
+
+fn mut_borrow_of_mutable_ref(local_decl: &LocalDecl<'_>, local_name: Option<Symbol>) -> bool {
+ debug!("local_info: {:?}, ty.kind(): {:?}", local_decl.local_info, local_decl.ty.kind());
+
+ match local_decl.local_info.as_deref() {
+ // Check if mutably borrowing a mutable reference.
+ Some(LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::Var(
+ mir::VarBindingForm {
+ binding_mode: ty::BindingMode::BindByValue(Mutability::Not), ..
+ },
+ )))) => matches!(local_decl.ty.kind(), ty::Ref(_, _, hir::Mutability::Mut)),
+ Some(LocalInfo::User(ClearCrossCrate::Set(mir::BindingForm::ImplicitSelf(kind)))) => {
+ // Check if the user variable is a `&mut self` and we can therefore
+ // suggest removing the `&mut`.
+ //
+ // Deliberately fall into this case for all implicit self types,
+ // so that we don't fall in to the next case with them.
+ *kind == mir::ImplicitSelfKind::MutRef
+ }
+ _ if Some(kw::SelfLower) == local_name => {
+ // Otherwise, check if the name is the `self` keyword - in which case
+ // we have an explicit self. Do the same thing in this case and check
+ // for a `self: &mut Self` to suggest removing the `&mut`.
+ matches!(local_decl.ty.kind(), ty::Ref(_, _, hir::Mutability::Mut))
+ }
+ _ => false,
+ }
+}
+
+fn suggest_ampmut_self<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ local_decl: &mir::LocalDecl<'tcx>,
+) -> (Span, String) {
+ let sp = local_decl.source_info.span;
+ (
+ sp,
+ match tcx.sess.source_map().span_to_snippet(sp) {
+ Ok(snippet) => {
+ let lt_pos = snippet.find('\'');
+ if let Some(lt_pos) = lt_pos {
+ format!("&{}mut self", &snippet[lt_pos..snippet.len() - 4])
+ } else {
+ "&mut self".to_string()
+ }
+ }
+ _ => "&mut self".to_string(),
+ },
+ )
+}
+
+// When we want to suggest a user change a local variable to be a `&mut`, there
+// are three potential "obvious" things to highlight:
+//
+// let ident [: Type] [= RightHandSideExpression];
+// ^^^^^ ^^^^ ^^^^^^^^^^^^^^^^^^^^^^^
+// (1.) (2.) (3.)
+//
+// We can always fallback on highlighting the first. But chances are good that
+// the user experience will be better if we highlight one of the others if possible;
+// for example, if the RHS is present and the Type is not, then the type is going to
+// be inferred *from* the RHS, which means we should highlight that (and suggest
+// that they borrow the RHS mutably).
+//
+// This implementation attempts to emulate AST-borrowck prioritization
+// by trying (3.), then (2.) and finally falling back on (1.).
+fn suggest_ampmut<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ local_decl: &mir::LocalDecl<'tcx>,
+ opt_assignment_rhs_span: Option<Span>,
+ opt_ty_info: Option<Span>,
+) -> (bool, Span, String) {
+ if let Some(assignment_rhs_span) = opt_assignment_rhs_span
+ && let Ok(src) = tcx.sess.source_map().span_to_snippet(assignment_rhs_span)
+ {
+ let is_mutbl = |ty: &str| -> bool {
+ if let Some(rest) = ty.strip_prefix("mut") {
+ match rest.chars().next() {
+ // e.g. `&mut x`
+ Some(c) if c.is_whitespace() => true,
+ // e.g. `&mut(x)`
+ Some('(') => true,
+ // e.g. `&mut{x}`
+ Some('{') => true,
+ // e.g. `&mutablevar`
+ _ => false,
+ }
+ } else {
+ false
+ }
+ };
+ if let (true, Some(ws_pos)) = (src.starts_with("&'"), src.find(char::is_whitespace)) {
+ let lt_name = &src[1..ws_pos];
+ let ty = src[ws_pos..].trim_start();
+ if !is_mutbl(ty) {
+ return (true, assignment_rhs_span, format!("&{lt_name} mut {ty}"));
+ }
+ } else if let Some(stripped) = src.strip_prefix('&') {
+ let stripped = stripped.trim_start();
+ if !is_mutbl(stripped) {
+ return (true, assignment_rhs_span, format!("&mut {stripped}"));
+ }
+ }
+ }
+
+ let (suggestability, highlight_span) = match opt_ty_info {
+ // if this is a variable binding with an explicit type,
+ // try to highlight that for the suggestion.
+ Some(ty_span) => (true, ty_span),
+
+ // otherwise, just highlight the span associated with
+ // the (MIR) LocalDecl.
+ None => (false, local_decl.source_info.span),
+ };
+
+ if let Ok(src) = tcx.sess.source_map().span_to_snippet(highlight_span)
+ && let (true, Some(ws_pos)) = (src.starts_with("&'"), src.find(char::is_whitespace))
+ {
+ let lt_name = &src[1..ws_pos];
+ let ty = &src[ws_pos..];
+ return (true, highlight_span, format!("&{} mut{}", lt_name, ty));
+ }
+
+ let ty_mut = local_decl.ty.builtin_deref(true).unwrap();
+ assert_eq!(ty_mut.mutbl, hir::Mutability::Not);
+ (
+ suggestability,
+ highlight_span,
+ if local_decl.ty.is_region_ptr() {
+ format!("&mut {}", ty_mut.ty)
+ } else {
+ format!("*mut {}", ty_mut.ty)
+ },
+ )
+}
+
+fn is_closure_or_generator(ty: Ty<'_>) -> bool {
+ ty.is_closure() || ty.is_generator()
+}
+
+/// Given a field that needs to be mutable, returns a span where the " mut " could go.
+/// This function expects the local to be a reference to a struct in order to produce a span.
+///
+/// ```text
+/// LL | s: &'a String
+/// | ^^^ returns a span taking up the space here
+/// ```
+fn get_mut_span_in_struct_field<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ field: mir::Field,
+) -> Option<Span> {
+ // Expect our local to be a reference to a struct of some kind.
+ if let ty::Ref(_, ty, _) = ty.kind()
+ && let ty::Adt(def, _) = ty.kind()
+ && let field = def.all_fields().nth(field.index())?
+ // Use the HIR types to construct the diagnostic message.
+ && let node = tcx.hir().find_by_def_id(field.did.as_local()?)?
+ // Now we're dealing with the actual struct that we're going to suggest a change to,
+ // we can expect a field that is an immutable reference to a type.
+ && let hir::Node::Field(field) = node
+ && let hir::TyKind::Rptr(lt, hir::MutTy { mutbl: hir::Mutability::Not, ty }) = field.ty.kind
+ {
+ return Some(lt.span.between(ty.span));
+ }
+
+ None
+}
+
+/// If possible, suggest replacing `ref` with `ref mut`.
+fn suggest_ref_mut(tcx: TyCtxt<'_>, binding_span: Span) -> Option<String> {
+ let hi_src = tcx.sess.source_map().span_to_snippet(binding_span).ok()?;
+ if hi_src.starts_with("ref") && hi_src["ref".len()..].starts_with(rustc_lexer::is_whitespace) {
+ let replacement = format!("ref mut{}", &hi_src["ref".len()..]);
+ Some(replacement)
+ } else {
+ None
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs b/compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs
new file mode 100644
index 000000000..d359d7efb
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/outlives_suggestion.rs
@@ -0,0 +1,261 @@
+//! Contains utilities for generating suggestions for borrowck errors related to unsatisfied
+//! outlives constraints.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::Diagnostic;
+use rustc_middle::ty::RegionVid;
+use smallvec::SmallVec;
+use std::collections::BTreeMap;
+use tracing::debug;
+
+use crate::MirBorrowckCtxt;
+
+use super::{ErrorConstraintInfo, RegionName, RegionNameSource};
+
+/// The different things we could suggest.
+enum SuggestedConstraint {
+ /// Outlives(a, [b, c, d, ...]) => 'a: 'b + 'c + 'd + ...
+ Outlives(RegionName, SmallVec<[RegionName; 2]>),
+
+ /// 'a = 'b
+ Equal(RegionName, RegionName),
+
+ /// 'a: 'static i.e. 'a = 'static and the user should just use 'static
+ Static(RegionName),
+}
+
+/// Collects information about outlives constraints that needed to be added for a given MIR node
+/// corresponding to a function definition.
+///
+/// Adds a help note suggesting adding a where clause with the needed constraints.
+#[derive(Default)]
+pub struct OutlivesSuggestionBuilder {
+ /// The list of outlives constraints that need to be added. Specifically, we map each free
+ /// region to all other regions that it must outlive. I will use the shorthand `fr:
+ /// outlived_frs`. Not all of these regions will already have names necessarily. Some could be
+ /// implicit free regions that we inferred. These will need to be given names in the final
+ /// suggestion message.
+ constraints_to_add: BTreeMap<RegionVid, Vec<RegionVid>>,
+}
+
+impl OutlivesSuggestionBuilder {
+ /// Returns `true` iff the `RegionNameSource` is a valid source for an outlives
+ /// suggestion.
+ //
+ // FIXME: Currently, we only report suggestions if the `RegionNameSource` is an early-bound
+ // region or a named region, avoiding using regions with synthetic names altogether. This
+ // allows us to avoid giving impossible suggestions (e.g. adding bounds to closure args).
+ // We can probably be less conservative, since some inferred free regions are namable (e.g.
+ // the user can explicitly name them. To do this, we would allow some regions whose names
+ // come from `MatchedAdtAndSegment`, being careful to filter out bad suggestions, such as
+ // naming the `'self` lifetime in methods, etc.
+ fn region_name_is_suggestable(name: &RegionName) -> bool {
+ match name.source {
+ RegionNameSource::NamedEarlyBoundRegion(..)
+ | RegionNameSource::NamedFreeRegion(..)
+ | RegionNameSource::Static => true,
+
+ // Don't give suggestions for upvars, closure return types, or other unnameable
+ // regions.
+ RegionNameSource::SynthesizedFreeEnvRegion(..)
+ | RegionNameSource::AnonRegionFromArgument(..)
+ | RegionNameSource::AnonRegionFromUpvar(..)
+ | RegionNameSource::AnonRegionFromOutput(..)
+ | RegionNameSource::AnonRegionFromYieldTy(..)
+ | RegionNameSource::AnonRegionFromAsyncFn(..)
+ | RegionNameSource::AnonRegionFromImplSignature(..) => {
+ debug!("Region {:?} is NOT suggestable", name);
+ false
+ }
+ }
+ }
+
+ /// Returns a name for the region if it is suggestable. See `region_name_is_suggestable`.
+ fn region_vid_to_name(
+ &self,
+ mbcx: &MirBorrowckCtxt<'_, '_>,
+ region: RegionVid,
+ ) -> Option<RegionName> {
+ mbcx.give_region_a_name(region).filter(Self::region_name_is_suggestable)
+ }
+
+ /// Compiles a list of all suggestions to be printed in the final big suggestion.
+ fn compile_all_suggestions(
+ &self,
+ mbcx: &MirBorrowckCtxt<'_, '_>,
+ ) -> SmallVec<[SuggestedConstraint; 2]> {
+ let mut suggested = SmallVec::new();
+
+ // Keep track of variables that we have already suggested unifying so that we don't print
+ // out silly duplicate messages.
+ let mut unified_already = FxHashSet::default();
+
+ for (fr, outlived) in &self.constraints_to_add {
+ let Some(fr_name) = self.region_vid_to_name(mbcx, *fr) else {
+ continue;
+ };
+
+ let outlived = outlived
+ .iter()
+ // if there is a `None`, we will just omit that constraint
+ .filter_map(|fr| self.region_vid_to_name(mbcx, *fr).map(|rname| (fr, rname)))
+ .collect::<Vec<_>>();
+
+ // No suggestable outlived lifetimes.
+ if outlived.is_empty() {
+ continue;
+ }
+
+ // There are three types of suggestions we can make:
+ // 1) Suggest a bound: 'a: 'b
+ // 2) Suggest replacing 'a with 'static. If any of `outlived` is `'static`, then we
+ // should just replace 'a with 'static.
+ // 3) Suggest unifying 'a with 'b if we have both 'a: 'b and 'b: 'a
+
+ if outlived
+ .iter()
+ .any(|(_, outlived_name)| matches!(outlived_name.source, RegionNameSource::Static))
+ {
+ suggested.push(SuggestedConstraint::Static(fr_name));
+ } else {
+ // We want to isolate out all lifetimes that should be unified and print out
+ // separate messages for them.
+
+ let (unified, other): (Vec<_>, Vec<_>) = outlived.into_iter().partition(
+ // Do we have both 'fr: 'r and 'r: 'fr?
+ |(r, _)| {
+ self.constraints_to_add
+ .get(r)
+ .map(|r_outlived| r_outlived.as_slice().contains(fr))
+ .unwrap_or(false)
+ },
+ );
+
+ for (r, bound) in unified.into_iter() {
+ if !unified_already.contains(fr) {
+ suggested.push(SuggestedConstraint::Equal(fr_name.clone(), bound));
+ unified_already.insert(r);
+ }
+ }
+
+ if !other.is_empty() {
+ let other =
+ other.iter().map(|(_, rname)| rname.clone()).collect::<SmallVec<_>>();
+ suggested.push(SuggestedConstraint::Outlives(fr_name, other))
+ }
+ }
+ }
+
+ suggested
+ }
+
+ /// Add the outlives constraint `fr: outlived_fr` to the set of constraints we need to suggest.
+ pub(crate) fn collect_constraint(&mut self, fr: RegionVid, outlived_fr: RegionVid) {
+ debug!("Collected {:?}: {:?}", fr, outlived_fr);
+
+ // Add to set of constraints for final help note.
+ self.constraints_to_add.entry(fr).or_default().push(outlived_fr);
+ }
+
+ /// Emit an intermediate note on the given `Diagnostic` if the involved regions are
+ /// suggestable.
+ pub(crate) fn intermediate_suggestion(
+ &mut self,
+ mbcx: &MirBorrowckCtxt<'_, '_>,
+ errci: &ErrorConstraintInfo<'_>,
+ diag: &mut Diagnostic,
+ ) {
+ // Emit an intermediate note.
+ let fr_name = self.region_vid_to_name(mbcx, errci.fr);
+ let outlived_fr_name = self.region_vid_to_name(mbcx, errci.outlived_fr);
+
+ if let (Some(fr_name), Some(outlived_fr_name)) = (fr_name, outlived_fr_name)
+ && !matches!(outlived_fr_name.source, RegionNameSource::Static)
+ {
+ diag.help(&format!(
+ "consider adding the following bound: `{fr_name}: {outlived_fr_name}`",
+ ));
+ }
+ }
+
+ /// If there is a suggestion to emit, add a diagnostic to the buffer. This is the final
+ /// suggestion including all collected constraints.
+ pub(crate) fn add_suggestion(&self, mbcx: &mut MirBorrowckCtxt<'_, '_>) {
+ // No constraints to add? Done.
+ if self.constraints_to_add.is_empty() {
+ debug!("No constraints to suggest.");
+ return;
+ }
+
+ // If there is only one constraint to suggest, then we already suggested it in the
+ // intermediate suggestion above.
+ if self.constraints_to_add.len() == 1
+ && self.constraints_to_add.values().next().unwrap().len() == 1
+ {
+ debug!("Only 1 suggestion. Skipping.");
+ return;
+ }
+
+ // Get all suggestable constraints.
+ let suggested = self.compile_all_suggestions(mbcx);
+
+ // If there are no suggestable constraints...
+ if suggested.is_empty() {
+ debug!("Only 1 suggestable constraint. Skipping.");
+ return;
+ }
+
+ // If there is exactly one suggestable constraints, then just suggest it. Otherwise, emit a
+ // list of diagnostics.
+ let mut diag = if suggested.len() == 1 {
+ mbcx.infcx.tcx.sess.diagnostic().struct_help(&match suggested.last().unwrap() {
+ SuggestedConstraint::Outlives(a, bs) => {
+ let bs: SmallVec<[String; 2]> = bs.iter().map(|r| format!("{}", r)).collect();
+ format!("add bound `{}: {}`", a, bs.join(" + "))
+ }
+
+ SuggestedConstraint::Equal(a, b) => {
+ format!("`{}` and `{}` must be the same: replace one with the other", a, b)
+ }
+ SuggestedConstraint::Static(a) => format!("replace `{}` with `'static`", a),
+ })
+ } else {
+ // Create a new diagnostic.
+ let mut diag = mbcx
+ .infcx
+ .tcx
+ .sess
+ .diagnostic()
+ .struct_help("the following changes may resolve your lifetime errors");
+
+ // Add suggestions.
+ for constraint in suggested {
+ match constraint {
+ SuggestedConstraint::Outlives(a, bs) => {
+ let bs: SmallVec<[String; 2]> =
+ bs.iter().map(|r| format!("{}", r)).collect();
+ diag.help(&format!("add bound `{}: {}`", a, bs.join(" + ")));
+ }
+ SuggestedConstraint::Equal(a, b) => {
+ diag.help(&format!(
+ "`{}` and `{}` must be the same: replace one with the other",
+ a, b
+ ));
+ }
+ SuggestedConstraint::Static(a) => {
+ diag.help(&format!("replace `{}` with `'static`", a));
+ }
+ }
+ }
+
+ diag
+ };
+
+ // We want this message to appear after other messages on the mir def.
+ let mir_span = mbcx.body.span;
+ diag.sort_span = mir_span.shrink_to_hi();
+
+ // Buffer the diagnostic
+ mbcx.buffer_non_error_diag(diag);
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
new file mode 100644
index 000000000..176090c3b
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs
@@ -0,0 +1,904 @@
+//! Error reporting machinery for lifetime errors.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan};
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::{self as hir, Item, ItemKind, Node};
+use rustc_infer::infer::{
+ error_reporting::nice_region_error::{
+ self, find_anon_type, find_param_with_region, suggest_adding_lifetime_params,
+ HirTraitObjectVisitor, NiceRegionError, TraitObjectVisitor,
+ },
+ error_reporting::unexpected_hidden_region_diagnostic,
+ NllRegionVariableOrigin, RelateParamBound,
+};
+use rustc_middle::hir::place::PlaceBase;
+use rustc_middle::mir::{ConstraintCategory, ReturnConstraint};
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::Region;
+use rustc_middle::ty::TypeVisitor;
+use rustc_middle::ty::{self, RegionVid, Ty};
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::Span;
+
+use crate::borrowck_errors;
+use crate::session_diagnostics::GenericDoesNotLiveLongEnough;
+
+use super::{OutlivesSuggestionBuilder, RegionName};
+use crate::region_infer::BlameConstraint;
+use crate::{
+ nll::ConstraintDescription,
+ region_infer::{values::RegionElement, TypeTest},
+ universal_regions::DefiningTy,
+ MirBorrowckCtxt,
+};
+
+impl<'tcx> ConstraintDescription for ConstraintCategory<'tcx> {
+ fn description(&self) -> &'static str {
+ // Must end with a space. Allows for empty names to be provided.
+ match self {
+ ConstraintCategory::Assignment => "assignment ",
+ ConstraintCategory::Return(_) => "returning this value ",
+ ConstraintCategory::Yield => "yielding this value ",
+ ConstraintCategory::UseAsConst => "using this value as a constant ",
+ ConstraintCategory::UseAsStatic => "using this value as a static ",
+ ConstraintCategory::Cast => "cast ",
+ ConstraintCategory::CallArgument(_) => "argument ",
+ ConstraintCategory::TypeAnnotation => "type annotation ",
+ ConstraintCategory::ClosureBounds => "closure body ",
+ ConstraintCategory::SizedBound => "proving this value is `Sized` ",
+ ConstraintCategory::CopyBound => "copying this value ",
+ ConstraintCategory::OpaqueType => "opaque type ",
+ ConstraintCategory::ClosureUpvar(_) => "closure capture ",
+ ConstraintCategory::Usage => "this usage ",
+ ConstraintCategory::Predicate(_)
+ | ConstraintCategory::Boring
+ | ConstraintCategory::BoringNoLocation
+ | ConstraintCategory::Internal => "",
+ }
+ }
+}
+
+/// A collection of errors encountered during region inference. This is needed to efficiently
+/// report errors after borrow checking.
+///
+/// Usually we expect this to either be empty or contain a small number of items, so we can avoid
+/// allocation most of the time.
+pub(crate) type RegionErrors<'tcx> = Vec<RegionErrorKind<'tcx>>;
+
+#[derive(Clone, Debug)]
+pub(crate) enum RegionErrorKind<'tcx> {
+ /// A generic bound failure for a type test (`T: 'a`).
+ TypeTestError { type_test: TypeTest<'tcx> },
+
+ /// An unexpected hidden region for an opaque type.
+ UnexpectedHiddenRegion {
+ /// The span for the member constraint.
+ span: Span,
+ /// The hidden type.
+ hidden_ty: Ty<'tcx>,
+ /// The opaque type.
+ key: ty::OpaqueTypeKey<'tcx>,
+ /// The unexpected region.
+ member_region: ty::Region<'tcx>,
+ },
+
+ /// Higher-ranked subtyping error.
+ BoundUniversalRegionError {
+ /// The placeholder free region.
+ longer_fr: RegionVid,
+ /// The region element that erroneously must be outlived by `longer_fr`.
+ error_element: RegionElement,
+ /// The placeholder region.
+ placeholder: ty::PlaceholderRegion,
+ },
+
+ /// Any other lifetime error.
+ RegionError {
+ /// The origin of the region.
+ fr_origin: NllRegionVariableOrigin,
+ /// The region that should outlive `shorter_fr`.
+ longer_fr: RegionVid,
+ /// The region that should be shorter, but we can't prove it.
+ shorter_fr: RegionVid,
+ /// Indicates whether this is a reported error. We currently only report the first error
+ /// encountered and leave the rest unreported so as not to overwhelm the user.
+ is_reported: bool,
+ },
+}
+
+/// Information about the various region constraints involved in a borrow checker error.
+#[derive(Clone, Debug)]
+pub struct ErrorConstraintInfo<'tcx> {
+ // fr: outlived_fr
+ pub(super) fr: RegionVid,
+ pub(super) fr_is_local: bool,
+ pub(super) outlived_fr: RegionVid,
+ pub(super) outlived_fr_is_local: bool,
+
+ // Category and span for best blame constraint
+ pub(super) category: ConstraintCategory<'tcx>,
+ pub(super) span: Span,
+}
+
+impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
+ /// Converts a region inference variable into a `ty::Region` that
+ /// we can use for error reporting. If `r` is universally bound,
+ /// then we use the name that we have on record for it. If `r` is
+ /// existentially bound, then we check its inferred value and try
+ /// to find a good name from that. Returns `None` if we can't find
+ /// one (e.g., this is just some random part of the CFG).
+ pub(super) fn to_error_region(&self, r: RegionVid) -> Option<ty::Region<'tcx>> {
+ self.to_error_region_vid(r).and_then(|r| self.regioncx.region_definition(r).external_name)
+ }
+
+ /// Returns the `RegionVid` corresponding to the region returned by
+ /// `to_error_region`.
+ pub(super) fn to_error_region_vid(&self, r: RegionVid) -> Option<RegionVid> {
+ if self.regioncx.universal_regions().is_universal_region(r) {
+ Some(r)
+ } else {
+ // We just want something nameable, even if it's not
+ // actually an upper bound.
+ let upper_bound = self.regioncx.approx_universal_upper_bound(r);
+
+ if self.regioncx.upper_bound_in_region_scc(r, upper_bound) {
+ self.to_error_region_vid(upper_bound)
+ } else {
+ None
+ }
+ }
+ }
+
+ /// Returns `true` if a closure is inferred to be an `FnMut` closure.
+ fn is_closure_fn_mut(&self, fr: RegionVid) -> bool {
+ if let Some(ty::ReFree(free_region)) = self.to_error_region(fr).as_deref()
+ && let ty::BoundRegionKind::BrEnv = free_region.bound_region
+ && let DefiningTy::Closure(_, substs) = self.regioncx.universal_regions().defining_ty
+ {
+ return substs.as_closure().kind() == ty::ClosureKind::FnMut;
+ }
+
+ false
+ }
+
+ /// Produces nice borrowck error diagnostics for all the errors collected in `nll_errors`.
+ pub(crate) fn report_region_errors(&mut self, nll_errors: RegionErrors<'tcx>) {
+ // Iterate through all the errors, producing a diagnostic for each one. The diagnostics are
+ // buffered in the `MirBorrowckCtxt`.
+
+ let mut outlives_suggestion = OutlivesSuggestionBuilder::default();
+
+ for nll_error in nll_errors.into_iter() {
+ match nll_error {
+ RegionErrorKind::TypeTestError { type_test } => {
+ // Try to convert the lower-bound region into something named we can print for the user.
+ let lower_bound_region = self.to_error_region(type_test.lower_bound);
+
+ let type_test_span = type_test.locations.span(&self.body);
+
+ if let Some(lower_bound_region) = lower_bound_region {
+ let generic_ty = type_test.generic_kind.to_ty(self.infcx.tcx);
+ let origin = RelateParamBound(type_test_span, generic_ty, None);
+ self.buffer_error(self.infcx.construct_generic_bound_failure(
+ self.body.source.def_id().expect_local(),
+ type_test_span,
+ Some(origin),
+ type_test.generic_kind,
+ lower_bound_region,
+ ));
+ } else {
+ // FIXME. We should handle this case better. It
+ // indicates that we have e.g., some region variable
+ // whose value is like `'a+'b` where `'a` and `'b` are
+ // distinct unrelated universal regions that are not
+ // known to outlive one another. It'd be nice to have
+ // some examples where this arises to decide how best
+ // to report it; we could probably handle it by
+ // iterating over the universal regions and reporting
+ // an error that multiple bounds are required.
+ self.buffer_error(self.infcx.tcx.sess.create_err(
+ GenericDoesNotLiveLongEnough {
+ kind: type_test.generic_kind.to_string(),
+ span: type_test_span,
+ },
+ ));
+ }
+ }
+
+ RegionErrorKind::UnexpectedHiddenRegion { span, hidden_ty, key, member_region } => {
+ let named_ty = self.regioncx.name_regions(self.infcx.tcx, hidden_ty);
+ let named_key = self.regioncx.name_regions(self.infcx.tcx, key);
+ let named_region = self.regioncx.name_regions(self.infcx.tcx, member_region);
+ self.buffer_error(unexpected_hidden_region_diagnostic(
+ self.infcx.tcx,
+ span,
+ named_ty,
+ named_region,
+ named_key,
+ ));
+ }
+
+ RegionErrorKind::BoundUniversalRegionError {
+ longer_fr,
+ placeholder,
+ error_element,
+ } => {
+ let error_vid = self.regioncx.region_from_element(longer_fr, &error_element);
+
+ // Find the code to blame for the fact that `longer_fr` outlives `error_fr`.
+ let (_, cause) = self.regioncx.find_outlives_blame_span(
+ &self.body,
+ longer_fr,
+ NllRegionVariableOrigin::Placeholder(placeholder),
+ error_vid,
+ );
+
+ let universe = placeholder.universe;
+ let universe_info = self.regioncx.universe_info(universe);
+
+ universe_info.report_error(self, placeholder, error_element, cause);
+ }
+
+ RegionErrorKind::RegionError { fr_origin, longer_fr, shorter_fr, is_reported } => {
+ if is_reported {
+ self.report_region_error(
+ longer_fr,
+ fr_origin,
+ shorter_fr,
+ &mut outlives_suggestion,
+ );
+ } else {
+ // We only report the first error, so as not to overwhelm the user. See
+ // `RegRegionErrorKind` docs.
+ //
+ // FIXME: currently we do nothing with these, but perhaps we can do better?
+ // FIXME: try collecting these constraints on the outlives suggestion
+ // builder. Does it make the suggestions any better?
+ debug!(
+ "Unreported region error: can't prove that {:?}: {:?}",
+ longer_fr, shorter_fr
+ );
+ }
+ }
+ }
+ }
+
+ // Emit one outlives suggestions for each MIR def we borrowck
+ outlives_suggestion.add_suggestion(self);
+ }
+
+ fn get_impl_ident_and_self_ty_from_trait(
+ &self,
+ def_id: DefId,
+ trait_objects: &FxHashSet<DefId>,
+ ) -> Option<(Ident, &'tcx hir::Ty<'tcx>)> {
+ let tcx = self.infcx.tcx;
+ match tcx.hir().get_if_local(def_id) {
+ Some(Node::ImplItem(impl_item)) => {
+ match tcx.hir().find_by_def_id(tcx.hir().get_parent_item(impl_item.hir_id())) {
+ Some(Node::Item(Item {
+ kind: ItemKind::Impl(hir::Impl { self_ty, .. }),
+ ..
+ })) => Some((impl_item.ident, self_ty)),
+ _ => None,
+ }
+ }
+ Some(Node::TraitItem(trait_item)) => {
+ let trait_did = tcx.hir().get_parent_item(trait_item.hir_id());
+ match tcx.hir().find_by_def_id(trait_did) {
+ Some(Node::Item(Item { kind: ItemKind::Trait(..), .. })) => {
+ // The method being called is defined in the `trait`, but the `'static`
+ // obligation comes from the `impl`. Find that `impl` so that we can point
+ // at it in the suggestion.
+ let trait_did = trait_did.to_def_id();
+ match tcx
+ .hir()
+ .trait_impls(trait_did)
+ .iter()
+ .filter_map(|&impl_did| {
+ match tcx.hir().get_if_local(impl_did.to_def_id()) {
+ Some(Node::Item(Item {
+ kind: ItemKind::Impl(hir::Impl { self_ty, .. }),
+ ..
+ })) if trait_objects.iter().all(|did| {
+ // FIXME: we should check `self_ty` against the receiver
+ // type in the `UnifyReceiver` context, but for now, use
+ // this imperfect proxy. This will fail if there are
+ // multiple `impl`s for the same trait like
+ // `impl Foo for Box<dyn Bar>` and `impl Foo for dyn Bar`.
+ // In that case, only the first one will get suggestions.
+ let mut traits = vec![];
+ let mut hir_v = HirTraitObjectVisitor(&mut traits, *did);
+ hir_v.visit_ty(self_ty);
+ !traits.is_empty()
+ }) =>
+ {
+ Some(self_ty)
+ }
+ _ => None,
+ }
+ })
+ .next()
+ {
+ Some(self_ty) => Some((trait_item.ident, self_ty)),
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ }
+
+ /// Report an error because the universal region `fr` was required to outlive
+ /// `outlived_fr` but it is not known to do so. For example:
+ ///
+ /// ```compile_fail,E0312
+ /// fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
+ /// ```
+ ///
+ /// Here we would be invoked with `fr = 'a` and `outlived_fr = `'b`.
+ pub(crate) fn report_region_error(
+ &mut self,
+ fr: RegionVid,
+ fr_origin: NllRegionVariableOrigin,
+ outlived_fr: RegionVid,
+ outlives_suggestion: &mut OutlivesSuggestionBuilder,
+ ) {
+ debug!("report_region_error(fr={:?}, outlived_fr={:?})", fr, outlived_fr);
+
+ let BlameConstraint { category, cause, variance_info, from_closure: _ } =
+ self.regioncx.best_blame_constraint(&self.body, fr, fr_origin, |r| {
+ self.regioncx.provides_universal_region(r, fr, outlived_fr)
+ });
+
+ debug!("report_region_error: category={:?} {:?} {:?}", category, cause, variance_info);
+
+ // Check if we can use one of the "nice region errors".
+ if let (Some(f), Some(o)) = (self.to_error_region(fr), self.to_error_region(outlived_fr)) {
+ let nice = NiceRegionError::new_from_span(self.infcx, cause.span, o, f);
+ if let Some(diag) = nice.try_report_from_nll() {
+ self.buffer_error(diag);
+ return;
+ }
+ }
+
+ let (fr_is_local, outlived_fr_is_local): (bool, bool) = (
+ self.regioncx.universal_regions().is_local_free_region(fr),
+ self.regioncx.universal_regions().is_local_free_region(outlived_fr),
+ );
+
+ debug!(
+ "report_region_error: fr_is_local={:?} outlived_fr_is_local={:?} category={:?}",
+ fr_is_local, outlived_fr_is_local, category
+ );
+
+ let errci = ErrorConstraintInfo {
+ fr,
+ outlived_fr,
+ fr_is_local,
+ outlived_fr_is_local,
+ category,
+ span: cause.span,
+ };
+
+ let mut diag = match (category, fr_is_local, outlived_fr_is_local) {
+ (ConstraintCategory::Return(kind), true, false) if self.is_closure_fn_mut(fr) => {
+ self.report_fnmut_error(&errci, kind)
+ }
+ (ConstraintCategory::Assignment, true, false)
+ | (ConstraintCategory::CallArgument(_), true, false) => {
+ let mut db = self.report_escaping_data_error(&errci);
+
+ outlives_suggestion.intermediate_suggestion(self, &errci, &mut db);
+ outlives_suggestion.collect_constraint(fr, outlived_fr);
+
+ db
+ }
+ _ => {
+ let mut db = self.report_general_error(&errci);
+
+ outlives_suggestion.intermediate_suggestion(self, &errci, &mut db);
+ outlives_suggestion.collect_constraint(fr, outlived_fr);
+
+ db
+ }
+ };
+
+ match variance_info {
+ ty::VarianceDiagInfo::None => {}
+ ty::VarianceDiagInfo::Invariant { ty, param_index } => {
+ let (desc, note) = match ty.kind() {
+ ty::RawPtr(ty_mut) => {
+ assert_eq!(ty_mut.mutbl, rustc_hir::Mutability::Mut);
+ (
+ format!("a mutable pointer to `{}`", ty_mut.ty),
+ "mutable pointers are invariant over their type parameter".to_string(),
+ )
+ }
+ ty::Ref(_, inner_ty, mutbl) => {
+ assert_eq!(*mutbl, rustc_hir::Mutability::Mut);
+ (
+ format!("a mutable reference to `{inner_ty}`"),
+ "mutable references are invariant over their type parameter"
+ .to_string(),
+ )
+ }
+ ty::Adt(adt, substs) => {
+ let generic_arg = substs[param_index as usize];
+ let identity_substs =
+ InternalSubsts::identity_for_item(self.infcx.tcx, adt.did());
+ let base_ty = self.infcx.tcx.mk_adt(*adt, identity_substs);
+ let base_generic_arg = identity_substs[param_index as usize];
+ let adt_desc = adt.descr();
+
+ let desc = format!(
+ "the type `{ty}`, which makes the generic argument `{generic_arg}` invariant"
+ );
+ let note = format!(
+ "the {adt_desc} `{base_ty}` is invariant over the parameter `{base_generic_arg}`"
+ );
+ (desc, note)
+ }
+ ty::FnDef(def_id, _) => {
+ let name = self.infcx.tcx.item_name(*def_id);
+ let identity_substs =
+ InternalSubsts::identity_for_item(self.infcx.tcx, *def_id);
+ let desc = format!("a function pointer to `{name}`");
+ let note = format!(
+ "the function `{name}` is invariant over the parameter `{}`",
+ identity_substs[param_index as usize]
+ );
+ (desc, note)
+ }
+ _ => panic!("Unexpected type {:?}", ty),
+ };
+ diag.note(&format!("requirement occurs because of {desc}",));
+ diag.note(&note);
+ diag.help("see <https://doc.rust-lang.org/nomicon/subtyping.html> for more information about variance");
+ }
+ }
+
+ self.buffer_error(diag);
+ }
+
+ /// Report a specialized error when `FnMut` closures return a reference to a captured variable.
+ /// This function expects `fr` to be local and `outlived_fr` to not be local.
+ ///
+ /// ```text
+ /// error: captured variable cannot escape `FnMut` closure body
+ /// --> $DIR/issue-53040.rs:15:8
+ /// |
+ /// LL | || &mut v;
+ /// | -- ^^^^^^ creates a reference to a captured variable which escapes the closure body
+ /// | |
+ /// | inferred to be a `FnMut` closure
+ /// |
+ /// = note: `FnMut` closures only have access to their captured variables while they are
+ /// executing...
+ /// = note: ...therefore, returned references to captured variables will escape the closure
+ /// ```
+ fn report_fnmut_error(
+ &self,
+ errci: &ErrorConstraintInfo<'tcx>,
+ kind: ReturnConstraint,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let ErrorConstraintInfo { outlived_fr, span, .. } = errci;
+
+ let mut diag = self
+ .infcx
+ .tcx
+ .sess
+ .struct_span_err(*span, "captured variable cannot escape `FnMut` closure body");
+
+ let mut output_ty = self.regioncx.universal_regions().unnormalized_output_ty;
+ if let ty::Opaque(def_id, _) = *output_ty.kind() {
+ output_ty = self.infcx.tcx.type_of(def_id)
+ };
+
+ debug!("report_fnmut_error: output_ty={:?}", output_ty);
+
+ let message = match output_ty.kind() {
+ ty::Closure(_, _) => {
+ "returns a closure that contains a reference to a captured variable, which then \
+ escapes the closure body"
+ }
+ ty::Adt(def, _) if self.infcx.tcx.is_diagnostic_item(sym::gen_future, def.did()) => {
+ "returns an `async` block that contains a reference to a captured variable, which then \
+ escapes the closure body"
+ }
+ _ => "returns a reference to a captured variable which escapes the closure body",
+ };
+
+ diag.span_label(*span, message);
+
+ if let ReturnConstraint::ClosureUpvar(upvar_field) = kind {
+ let def_id = match self.regioncx.universal_regions().defining_ty {
+ DefiningTy::Closure(def_id, _) => def_id,
+ ty => bug!("unexpected DefiningTy {:?}", ty),
+ };
+
+ let captured_place = &self.upvars[upvar_field.index()].place;
+ let defined_hir = match captured_place.place.base {
+ PlaceBase::Local(hirid) => Some(hirid),
+ PlaceBase::Upvar(upvar) => Some(upvar.var_path.hir_id),
+ _ => None,
+ };
+
+ if let Some(def_hir) = defined_hir {
+ let upvars_map = self.infcx.tcx.upvars_mentioned(def_id).unwrap();
+ let upvar_def_span = self.infcx.tcx.hir().span(def_hir);
+ let upvar_span = upvars_map.get(&def_hir).unwrap().span;
+ diag.span_label(upvar_def_span, "variable defined here");
+ diag.span_label(upvar_span, "variable captured here");
+ }
+ }
+
+ if let Some(fr_span) = self.give_region_a_name(*outlived_fr).unwrap().span() {
+ diag.span_label(fr_span, "inferred to be a `FnMut` closure");
+ }
+
+ diag.note(
+ "`FnMut` closures only have access to their captured variables while they are \
+ executing...",
+ );
+ diag.note("...therefore, they cannot allow references to captured variables to escape");
+
+ diag
+ }
+
+ /// Reports an error specifically for when data is escaping a closure.
+ ///
+ /// ```text
+ /// error: borrowed data escapes outside of function
+ /// --> $DIR/lifetime-bound-will-change-warning.rs:44:5
+ /// |
+ /// LL | fn test2<'a>(x: &'a Box<Fn()+'a>) {
+ /// | - `x` is a reference that is only valid in the function body
+ /// LL | // but ref_obj will not, so warn.
+ /// LL | ref_obj(x)
+ /// | ^^^^^^^^^^ `x` escapes the function body here
+ /// ```
+ fn report_escaping_data_error(
+ &self,
+ errci: &ErrorConstraintInfo<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let ErrorConstraintInfo { span, category, .. } = errci;
+
+ let fr_name_and_span = self.regioncx.get_var_name_and_span_for_region(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &self.upvars,
+ errci.fr,
+ );
+ let outlived_fr_name_and_span = self.regioncx.get_var_name_and_span_for_region(
+ self.infcx.tcx,
+ &self.body,
+ &self.local_names,
+ &self.upvars,
+ errci.outlived_fr,
+ );
+
+ let (_, escapes_from) = self
+ .infcx
+ .tcx
+ .article_and_description(self.regioncx.universal_regions().defining_ty.def_id());
+
+ // Revert to the normal error in these cases.
+ // Assignments aren't "escapes" in function items.
+ if (fr_name_and_span.is_none() && outlived_fr_name_and_span.is_none())
+ || (*category == ConstraintCategory::Assignment
+ && self.regioncx.universal_regions().defining_ty.is_fn_def())
+ || self.regioncx.universal_regions().defining_ty.is_const()
+ {
+ return self.report_general_error(&ErrorConstraintInfo {
+ fr_is_local: true,
+ outlived_fr_is_local: false,
+ ..*errci
+ });
+ }
+
+ let mut diag =
+ borrowck_errors::borrowed_data_escapes_closure(self.infcx.tcx, *span, escapes_from);
+
+ if let Some((Some(outlived_fr_name), outlived_fr_span)) = outlived_fr_name_and_span {
+ diag.span_label(
+ outlived_fr_span,
+ format!("`{outlived_fr_name}` declared here, outside of the {escapes_from} body",),
+ );
+ }
+
+ if let Some((Some(fr_name), fr_span)) = fr_name_and_span {
+ diag.span_label(
+ fr_span,
+ format!(
+ "`{fr_name}` is a reference that is only valid in the {escapes_from} body",
+ ),
+ );
+
+ diag.span_label(*span, format!("`{fr_name}` escapes the {escapes_from} body here"));
+ }
+
+ // Only show an extra note if we can find an 'error region' for both of the region
+ // variables. This avoids showing a noisy note that just mentions 'synthetic' regions
+ // that don't help the user understand the error.
+ match (self.to_error_region(errci.fr), self.to_error_region(errci.outlived_fr)) {
+ (Some(f), Some(o)) => {
+ self.maybe_suggest_constrain_dyn_trait_impl(&mut diag, f, o, category);
+
+ let fr_region_name = self.give_region_a_name(errci.fr).unwrap();
+ fr_region_name.highlight_region_name(&mut diag);
+ let outlived_fr_region_name = self.give_region_a_name(errci.outlived_fr).unwrap();
+ outlived_fr_region_name.highlight_region_name(&mut diag);
+
+ diag.span_label(
+ *span,
+ format!(
+ "{}requires that `{}` must outlive `{}`",
+ category.description(),
+ fr_region_name,
+ outlived_fr_region_name,
+ ),
+ );
+ }
+ _ => {}
+ }
+
+ diag
+ }
+
+ /// Reports a region inference error for the general case with named/synthesized lifetimes to
+ /// explain what is happening.
+ ///
+ /// ```text
+ /// error: unsatisfied lifetime constraints
+ /// --> $DIR/regions-creating-enums3.rs:17:5
+ /// |
+ /// LL | fn mk_add_bad1<'a,'b>(x: &'a ast<'a>, y: &'b ast<'b>) -> ast<'a> {
+ /// | -- -- lifetime `'b` defined here
+ /// | |
+ /// | lifetime `'a` defined here
+ /// LL | ast::add(x, y)
+ /// | ^^^^^^^^^^^^^^ function was supposed to return data with lifetime `'a` but it
+ /// | is returning data with lifetime `'b`
+ /// ```
+ fn report_general_error(
+ &self,
+ errci: &ErrorConstraintInfo<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let ErrorConstraintInfo {
+ fr,
+ fr_is_local,
+ outlived_fr,
+ outlived_fr_is_local,
+ span,
+ category,
+ ..
+ } = errci;
+
+ let mut diag =
+ self.infcx.tcx.sess.struct_span_err(*span, "lifetime may not live long enough");
+
+ let (_, mir_def_name) =
+ self.infcx.tcx.article_and_description(self.mir_def_id().to_def_id());
+
+ let fr_name = self.give_region_a_name(*fr).unwrap();
+ fr_name.highlight_region_name(&mut diag);
+ let outlived_fr_name = self.give_region_a_name(*outlived_fr).unwrap();
+ outlived_fr_name.highlight_region_name(&mut diag);
+
+ match (category, outlived_fr_is_local, fr_is_local) {
+ (ConstraintCategory::Return(_), true, _) => {
+ diag.span_label(
+ *span,
+ format!(
+ "{mir_def_name} was supposed to return data with lifetime `{outlived_fr_name}` but it is returning \
+ data with lifetime `{fr_name}`",
+ ),
+ );
+ }
+ _ => {
+ diag.span_label(
+ *span,
+ format!(
+ "{}requires that `{}` must outlive `{}`",
+ category.description(),
+ fr_name,
+ outlived_fr_name,
+ ),
+ );
+ }
+ }
+
+ self.add_static_impl_trait_suggestion(&mut diag, *fr, fr_name, *outlived_fr);
+ self.suggest_adding_lifetime_params(&mut diag, *fr, *outlived_fr);
+
+ diag
+ }
+
+ /// Adds a suggestion to errors where an `impl Trait` is returned.
+ ///
+ /// ```text
+ /// help: to allow this `impl Trait` to capture borrowed data with lifetime `'1`, add `'_` as
+ /// a constraint
+ /// |
+ /// LL | fn iter_values_anon(&self) -> impl Iterator<Item=u32> + 'a {
+ /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ /// ```
+ fn add_static_impl_trait_suggestion(
+ &self,
+ diag: &mut Diagnostic,
+ fr: RegionVid,
+ // We need to pass `fr_name` - computing it again will label it twice.
+ fr_name: RegionName,
+ outlived_fr: RegionVid,
+ ) {
+ if let (Some(f), Some(outlived_f)) =
+ (self.to_error_region(fr), self.to_error_region(outlived_fr))
+ {
+ if *outlived_f != ty::ReStatic {
+ return;
+ }
+
+ let fn_returns = self
+ .infcx
+ .tcx
+ .is_suitable_region(f)
+ .map(|r| self.infcx.tcx.return_type_impl_or_dyn_traits(r.def_id))
+ .unwrap_or_default();
+
+ if fn_returns.is_empty() {
+ return;
+ }
+
+ let param = if let Some(param) = find_param_with_region(self.infcx.tcx, f, outlived_f) {
+ param
+ } else {
+ return;
+ };
+
+ let lifetime = if f.has_name() { fr_name.name } else { kw::UnderscoreLifetime };
+
+ let arg = match param.param.pat.simple_ident() {
+ Some(simple_ident) => format!("argument `{}`", simple_ident),
+ None => "the argument".to_string(),
+ };
+ let captures = format!("captures data from {}", arg);
+
+ return nice_region_error::suggest_new_region_bound(
+ self.infcx.tcx,
+ diag,
+ fn_returns,
+ lifetime.to_string(),
+ Some(arg),
+ captures,
+ Some((param.param_ty_span, param.param_ty.to_string())),
+ );
+ }
+ }
+
+ fn maybe_suggest_constrain_dyn_trait_impl(
+ &self,
+ diag: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ f: Region<'tcx>,
+ o: Region<'tcx>,
+ category: &ConstraintCategory<'tcx>,
+ ) {
+ if !o.is_static() {
+ return;
+ }
+
+ let tcx = self.infcx.tcx;
+
+ let instance = if let ConstraintCategory::CallArgument(Some(func_ty)) = category {
+ let (fn_did, substs) = match func_ty.kind() {
+ ty::FnDef(fn_did, substs) => (fn_did, substs),
+ _ => return,
+ };
+ debug!(?fn_did, ?substs);
+
+ // Only suggest this on function calls, not closures
+ let ty = tcx.type_of(fn_did);
+ debug!("ty: {:?}, ty.kind: {:?}", ty, ty.kind());
+ if let ty::Closure(_, _) = ty.kind() {
+ return;
+ }
+
+ if let Ok(Some(instance)) = ty::Instance::resolve(
+ tcx,
+ self.param_env,
+ *fn_did,
+ self.infcx.resolve_vars_if_possible(substs),
+ ) {
+ instance
+ } else {
+ return;
+ }
+ } else {
+ return;
+ };
+
+ let param = match find_param_with_region(tcx, f, o) {
+ Some(param) => param,
+ None => return,
+ };
+ debug!(?param);
+
+ let mut visitor = TraitObjectVisitor(FxHashSet::default());
+ visitor.visit_ty(param.param_ty);
+
+ let Some((ident, self_ty)) =
+ self.get_impl_ident_and_self_ty_from_trait(instance.def_id(), &visitor.0) else {return};
+
+ self.suggest_constrain_dyn_trait_in_impl(diag, &visitor.0, ident, self_ty);
+ }
+
+ #[instrument(skip(self, err), level = "debug")]
+ fn suggest_constrain_dyn_trait_in_impl(
+ &self,
+ err: &mut Diagnostic,
+ found_dids: &FxHashSet<DefId>,
+ ident: Ident,
+ self_ty: &hir::Ty<'_>,
+ ) -> bool {
+ debug!("err: {:#?}", err);
+ let mut suggested = false;
+ for found_did in found_dids {
+ let mut traits = vec![];
+ let mut hir_v = HirTraitObjectVisitor(&mut traits, *found_did);
+ hir_v.visit_ty(&self_ty);
+ debug!("trait spans found: {:?}", traits);
+ for span in &traits {
+ let mut multi_span: MultiSpan = vec![*span].into();
+ multi_span
+ .push_span_label(*span, "this has an implicit `'static` lifetime requirement");
+ multi_span.push_span_label(
+ ident.span,
+ "calling this method introduces the `impl`'s 'static` requirement",
+ );
+ err.span_note(multi_span, "the used `impl` has a `'static` requirement");
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "consider relaxing the implicit `'static` requirement",
+ " + '_",
+ Applicability::MaybeIncorrect,
+ );
+ suggested = true;
+ }
+ }
+ suggested
+ }
+
+ fn suggest_adding_lifetime_params(
+ &self,
+ diag: &mut Diagnostic,
+ sub: RegionVid,
+ sup: RegionVid,
+ ) {
+ let (Some(sub), Some(sup)) = (self.to_error_region(sub), self.to_error_region(sup)) else {
+ return
+ };
+
+ let Some((ty_sub, _)) = self
+ .infcx
+ .tcx
+ .is_suitable_region(sub)
+ .and_then(|anon_reg| find_anon_type(self.infcx.tcx, sub, &anon_reg.boundregion)) else {
+ return
+ };
+
+ let Some((ty_sup, _)) = self
+ .infcx
+ .tcx
+ .is_suitable_region(sup)
+ .and_then(|anon_reg| find_anon_type(self.infcx.tcx, sup, &anon_reg.boundregion)) else {
+ return
+ };
+
+ suggest_adding_lifetime_params(self.infcx.tcx, sub, ty_sup, ty_sub, diag);
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
new file mode 100644
index 000000000..a87e8bd5b
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs
@@ -0,0 +1,896 @@
+use std::fmt::{self, Display};
+use std::iter;
+
+use rustc_errors::Diagnostic;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_middle::ty::print::RegionHighlightMode;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, DefIdTree, RegionVid, Ty};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+use crate::{nll::ToRegionVid, universal_regions::DefiningTy, MirBorrowckCtxt};
+
+/// A name for a particular region used in emitting diagnostics. This name could be a generated
+/// name like `'1`, a name used by the user like `'a`, or a name like `'static`.
+#[derive(Debug, Clone)]
+pub(crate) struct RegionName {
+ /// The name of the region (interned).
+ pub(crate) name: Symbol,
+ /// Where the region comes from.
+ pub(crate) source: RegionNameSource,
+}
+
+/// Denotes the source of a region that is named by a `RegionName`. For example, a free region that
+/// was named by the user would get `NamedFreeRegion` and `'static` lifetime would get `Static`.
+/// This helps to print the right kinds of diagnostics.
+#[derive(Debug, Clone)]
+pub(crate) enum RegionNameSource {
+ /// A bound (not free) region that was substituted at the def site (not an HRTB).
+ NamedEarlyBoundRegion(Span),
+ /// A free region that the user has a name (`'a`) for.
+ NamedFreeRegion(Span),
+ /// The `'static` region.
+ Static,
+ /// The free region corresponding to the environment of a closure.
+ SynthesizedFreeEnvRegion(Span, &'static str),
+ /// The region corresponding to an argument.
+ AnonRegionFromArgument(RegionNameHighlight),
+ /// The region corresponding to a closure upvar.
+ AnonRegionFromUpvar(Span, Symbol),
+ /// The region corresponding to the return type of a closure.
+ AnonRegionFromOutput(RegionNameHighlight, &'static str),
+ /// The region from a type yielded by a generator.
+ AnonRegionFromYieldTy(Span, String),
+ /// An anonymous region from an async fn.
+ AnonRegionFromAsyncFn(Span),
+ /// An anonymous region from an impl self type or trait
+ AnonRegionFromImplSignature(Span, &'static str),
+}
+
+/// Describes what to highlight to explain to the user that we're giving an anonymous region a
+/// synthesized name, and how to highlight it.
+#[derive(Debug, Clone)]
+pub(crate) enum RegionNameHighlight {
+ /// The anonymous region corresponds to a reference that was found by traversing the type in the HIR.
+ MatchedHirTy(Span),
+ /// The anonymous region corresponds to a `'_` in the generics list of a struct/enum/union.
+ MatchedAdtAndSegment(Span),
+ /// The anonymous region corresponds to a region where the type annotation is completely missing
+ /// from the code, e.g. in a closure arguments `|x| { ... }`, where `x` is a reference.
+ CannotMatchHirTy(Span, String),
+ /// The anonymous region corresponds to a region where the type annotation is completely missing
+ /// from the code, and *even if* we print out the full name of the type, the region name won't
+ /// be included. This currently occurs for opaque types like `impl Future`.
+ Occluded(Span, String),
+}
+
+impl RegionName {
+ pub(crate) fn was_named(&self) -> bool {
+ match self.source {
+ RegionNameSource::NamedEarlyBoundRegion(..)
+ | RegionNameSource::NamedFreeRegion(..)
+ | RegionNameSource::Static => true,
+ RegionNameSource::SynthesizedFreeEnvRegion(..)
+ | RegionNameSource::AnonRegionFromArgument(..)
+ | RegionNameSource::AnonRegionFromUpvar(..)
+ | RegionNameSource::AnonRegionFromOutput(..)
+ | RegionNameSource::AnonRegionFromYieldTy(..)
+ | RegionNameSource::AnonRegionFromAsyncFn(..)
+ | RegionNameSource::AnonRegionFromImplSignature(..) => false,
+ }
+ }
+
+ pub(crate) fn span(&self) -> Option<Span> {
+ match self.source {
+ RegionNameSource::Static => None,
+ RegionNameSource::NamedEarlyBoundRegion(span)
+ | RegionNameSource::NamedFreeRegion(span)
+ | RegionNameSource::SynthesizedFreeEnvRegion(span, _)
+ | RegionNameSource::AnonRegionFromUpvar(span, _)
+ | RegionNameSource::AnonRegionFromYieldTy(span, _)
+ | RegionNameSource::AnonRegionFromAsyncFn(span)
+ | RegionNameSource::AnonRegionFromImplSignature(span, _) => Some(span),
+ RegionNameSource::AnonRegionFromArgument(ref highlight)
+ | RegionNameSource::AnonRegionFromOutput(ref highlight, _) => match *highlight {
+ RegionNameHighlight::MatchedHirTy(span)
+ | RegionNameHighlight::MatchedAdtAndSegment(span)
+ | RegionNameHighlight::CannotMatchHirTy(span, _)
+ | RegionNameHighlight::Occluded(span, _) => Some(span),
+ },
+ }
+ }
+
+ pub(crate) fn highlight_region_name(&self, diag: &mut Diagnostic) {
+ match &self.source {
+ RegionNameSource::NamedFreeRegion(span)
+ | RegionNameSource::NamedEarlyBoundRegion(span) => {
+ diag.span_label(*span, format!("lifetime `{self}` defined here"));
+ }
+ RegionNameSource::SynthesizedFreeEnvRegion(span, note) => {
+ diag.span_label(*span, format!("lifetime `{self}` represents this closure's body"));
+ diag.note(*note);
+ }
+ RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::CannotMatchHirTy(
+ span,
+ type_name,
+ )) => {
+ diag.span_label(*span, format!("has type `{type_name}`"));
+ }
+ RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::MatchedHirTy(span))
+ | RegionNameSource::AnonRegionFromOutput(RegionNameHighlight::MatchedHirTy(span), _)
+ | RegionNameSource::AnonRegionFromAsyncFn(span) => {
+ diag.span_label(
+ *span,
+ format!("let's call the lifetime of this reference `{self}`"),
+ );
+ }
+ RegionNameSource::AnonRegionFromArgument(
+ RegionNameHighlight::MatchedAdtAndSegment(span),
+ )
+ | RegionNameSource::AnonRegionFromOutput(
+ RegionNameHighlight::MatchedAdtAndSegment(span),
+ _,
+ ) => {
+ diag.span_label(*span, format!("let's call this `{self}`"));
+ }
+ RegionNameSource::AnonRegionFromArgument(RegionNameHighlight::Occluded(
+ span,
+ type_name,
+ )) => {
+ diag.span_label(
+ *span,
+ format!("lifetime `{self}` appears in the type {type_name}"),
+ );
+ }
+ RegionNameSource::AnonRegionFromOutput(
+ RegionNameHighlight::Occluded(span, type_name),
+ mir_description,
+ ) => {
+ diag.span_label(
+ *span,
+ format!(
+ "return type{mir_description} `{type_name}` contains a lifetime `{self}`"
+ ),
+ );
+ }
+ RegionNameSource::AnonRegionFromUpvar(span, upvar_name) => {
+ diag.span_label(
+ *span,
+ format!("lifetime `{self}` appears in the type of `{upvar_name}`"),
+ );
+ }
+ RegionNameSource::AnonRegionFromOutput(
+ RegionNameHighlight::CannotMatchHirTy(span, type_name),
+ mir_description,
+ ) => {
+ diag.span_label(*span, format!("return type{mir_description} is {type_name}"));
+ }
+ RegionNameSource::AnonRegionFromYieldTy(span, type_name) => {
+ diag.span_label(*span, format!("yield type is {type_name}"));
+ }
+ RegionNameSource::AnonRegionFromImplSignature(span, location) => {
+ diag.span_label(
+ *span,
+ format!("lifetime `{self}` appears in the `impl`'s {location}"),
+ );
+ }
+ RegionNameSource::Static => {}
+ }
+ }
+}
+
+impl Display for RegionName {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.name)
+ }
+}
+
+impl<'tcx> MirBorrowckCtxt<'_, 'tcx> {
+ pub(crate) fn mir_def_id(&self) -> hir::def_id::LocalDefId {
+ self.body.source.def_id().expect_local()
+ }
+
+ pub(crate) fn mir_hir_id(&self) -> hir::HirId {
+ self.infcx.tcx.hir().local_def_id_to_hir_id(self.mir_def_id())
+ }
+
+ /// Generate a synthetic region named `'N`, where `N` is the next value of the counter. Then,
+ /// increment the counter.
+ ///
+ /// This is _not_ idempotent. Call `give_region_a_name` when possible.
+ fn synthesize_region_name(&self) -> Symbol {
+ let c = self.next_region_name.replace_with(|counter| *counter + 1);
+ Symbol::intern(&format!("'{:?}", c))
+ }
+
+ /// Maps from an internal MIR region vid to something that we can
+ /// report to the user. In some cases, the region vids will map
+ /// directly to lifetimes that the user has a name for (e.g.,
+ /// `'static`). But frequently they will not, in which case we
+ /// have to find some way to identify the lifetime to the user. To
+ /// that end, this function takes a "diagnostic" so that it can
+ /// create auxiliary notes as needed.
+ ///
+ /// The names are memoized, so this is both cheap to recompute and idempotent.
+ ///
+ /// Example (function arguments):
+ ///
+ /// Suppose we are trying to give a name to the lifetime of the
+ /// reference `x`:
+ ///
+ /// ```ignore (pseudo-rust)
+ /// fn foo(x: &u32) { .. }
+ /// ```
+ ///
+ /// This function would create a label like this:
+ ///
+ /// ```text
+ /// | fn foo(x: &u32) { .. }
+ /// ------- fully elaborated type of `x` is `&'1 u32`
+ /// ```
+ ///
+ /// and then return the name `'1` for us to use.
+ pub(crate) fn give_region_a_name(&self, fr: RegionVid) -> Option<RegionName> {
+ debug!(
+ "give_region_a_name(fr={:?}, counter={:?})",
+ fr,
+ self.next_region_name.try_borrow().unwrap()
+ );
+
+ assert!(self.regioncx.universal_regions().is_universal_region(fr));
+
+ if let Some(value) = self.region_names.try_borrow_mut().unwrap().get(&fr) {
+ return Some(value.clone());
+ }
+
+ let value = self
+ .give_name_from_error_region(fr)
+ .or_else(|| self.give_name_if_anonymous_region_appears_in_arguments(fr))
+ .or_else(|| self.give_name_if_anonymous_region_appears_in_upvars(fr))
+ .or_else(|| self.give_name_if_anonymous_region_appears_in_output(fr))
+ .or_else(|| self.give_name_if_anonymous_region_appears_in_yield_ty(fr))
+ .or_else(|| self.give_name_if_anonymous_region_appears_in_impl_signature(fr));
+
+ if let Some(ref value) = value {
+ self.region_names.try_borrow_mut().unwrap().insert(fr, value.clone());
+ }
+
+ debug!("give_region_a_name: gave name {:?}", value);
+ value
+ }
+
+ /// Checks for the case where `fr` maps to something that the
+ /// *user* has a name for. In that case, we'll be able to map
+ /// `fr` to a `Region<'tcx>`, and that region will be one of
+ /// named variants.
+ #[tracing::instrument(level = "trace", skip(self))]
+ fn give_name_from_error_region(&self, fr: RegionVid) -> Option<RegionName> {
+ let error_region = self.to_error_region(fr)?;
+
+ let tcx = self.infcx.tcx;
+
+ debug!("give_region_a_name: error_region = {:?}", error_region);
+ match *error_region {
+ ty::ReEarlyBound(ebr) => {
+ if ebr.has_name() {
+ let span = tcx.hir().span_if_local(ebr.def_id).unwrap_or(DUMMY_SP);
+ Some(RegionName {
+ name: ebr.name,
+ source: RegionNameSource::NamedEarlyBoundRegion(span),
+ })
+ } else {
+ None
+ }
+ }
+
+ ty::ReStatic => {
+ Some(RegionName { name: kw::StaticLifetime, source: RegionNameSource::Static })
+ }
+
+ ty::ReFree(free_region) => match free_region.bound_region {
+ ty::BoundRegionKind::BrNamed(region_def_id, name) => {
+ // Get the span to point to, even if we don't use the name.
+ let span = tcx.hir().span_if_local(region_def_id).unwrap_or(DUMMY_SP);
+ debug!(
+ "bound region named: {:?}, is_named: {:?}",
+ name,
+ free_region.bound_region.is_named()
+ );
+
+ if free_region.bound_region.is_named() {
+ // A named region that is actually named.
+ Some(RegionName { name, source: RegionNameSource::NamedFreeRegion(span) })
+ } else if let hir::IsAsync::Async = tcx.asyncness(self.mir_hir_id().owner) {
+ // If we spuriously thought that the region is named, we should let the
+ // system generate a true name for error messages. Currently this can
+ // happen if we have an elided name in an async fn for example: the
+ // compiler will generate a region named `'_`, but reporting such a name is
+ // not actually useful, so we synthesize a name for it instead.
+ let name = self.synthesize_region_name();
+ Some(RegionName {
+ name,
+ source: RegionNameSource::AnonRegionFromAsyncFn(span),
+ })
+ } else {
+ None
+ }
+ }
+
+ ty::BoundRegionKind::BrEnv => {
+ let def_ty = self.regioncx.universal_regions().defining_ty;
+
+ let DefiningTy::Closure(_, substs) = def_ty else {
+ // Can't have BrEnv in functions, constants or generators.
+ bug!("BrEnv outside of closure.");
+ };
+ let hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. })
+ = tcx.hir().expect_expr(self.mir_hir_id()).kind
+ else {
+ bug!("Closure is not defined by a closure expr");
+ };
+ let region_name = self.synthesize_region_name();
+
+ let closure_kind_ty = substs.as_closure().kind_ty();
+ let note = match closure_kind_ty.to_opt_closure_kind() {
+ Some(ty::ClosureKind::Fn) => {
+ "closure implements `Fn`, so references to captured variables \
+ can't escape the closure"
+ }
+ Some(ty::ClosureKind::FnMut) => {
+ "closure implements `FnMut`, so references to captured variables \
+ can't escape the closure"
+ }
+ Some(ty::ClosureKind::FnOnce) => {
+ bug!("BrEnv in a `FnOnce` closure");
+ }
+ None => bug!("Closure kind not inferred in borrow check"),
+ };
+
+ Some(RegionName {
+ name: region_name,
+ source: RegionNameSource::SynthesizedFreeEnvRegion(fn_decl_span, note),
+ })
+ }
+
+ ty::BoundRegionKind::BrAnon(_) => None,
+ },
+
+ ty::ReLateBound(..)
+ | ty::ReVar(..)
+ | ty::RePlaceholder(..)
+ | ty::ReEmpty(_)
+ | ty::ReErased => None,
+ }
+ }
+
+ /// Finds an argument that contains `fr` and label it with a fully
+ /// elaborated type, returning something like `'1`. Result looks
+ /// like:
+ ///
+ /// ```text
+ /// | fn foo(x: &u32) { .. }
+ /// ------- fully elaborated type of `x` is `&'1 u32`
+ /// ```
+ #[tracing::instrument(level = "trace", skip(self))]
+ fn give_name_if_anonymous_region_appears_in_arguments(
+ &self,
+ fr: RegionVid,
+ ) -> Option<RegionName> {
+ let implicit_inputs = self.regioncx.universal_regions().defining_ty.implicit_inputs();
+ let argument_index = self.regioncx.get_argument_index_for_region(self.infcx.tcx, fr)?;
+
+ let arg_ty = self.regioncx.universal_regions().unnormalized_input_tys
+ [implicit_inputs + argument_index];
+ let (_, span) = self.regioncx.get_argument_name_and_span_for_region(
+ &self.body,
+ &self.local_names,
+ argument_index,
+ );
+
+ let highlight = self
+ .get_argument_hir_ty_for_highlighting(argument_index)
+ .and_then(|arg_hir_ty| self.highlight_if_we_can_match_hir_ty(fr, arg_ty, arg_hir_ty))
+ .unwrap_or_else(|| {
+ // `highlight_if_we_cannot_match_hir_ty` needs to know the number we will give to
+ // the anonymous region. If it succeeds, the `synthesize_region_name` call below
+ // will increment the counter, "reserving" the number we just used.
+ let counter = *self.next_region_name.try_borrow().unwrap();
+ self.highlight_if_we_cannot_match_hir_ty(fr, arg_ty, span, counter)
+ });
+
+ Some(RegionName {
+ name: self.synthesize_region_name(),
+ source: RegionNameSource::AnonRegionFromArgument(highlight),
+ })
+ }
+
+ fn get_argument_hir_ty_for_highlighting(
+ &self,
+ argument_index: usize,
+ ) -> Option<&hir::Ty<'tcx>> {
+ let fn_decl = self.infcx.tcx.hir().fn_decl_by_hir_id(self.mir_hir_id())?;
+ let argument_hir_ty: &hir::Ty<'_> = fn_decl.inputs.get(argument_index)?;
+ match argument_hir_ty.kind {
+ // This indicates a variable with no type annotation, like
+ // `|x|`... in that case, we can't highlight the type but
+ // must highlight the variable.
+ // NOTE(eddyb) this is handled in/by the sole caller
+ // (`give_name_if_anonymous_region_appears_in_arguments`).
+ hir::TyKind::Infer => None,
+
+ _ => Some(argument_hir_ty),
+ }
+ }
+
+ /// Attempts to highlight the specific part of a type in an argument
+ /// that has no type annotation.
+ /// For example, we might produce an annotation like this:
+ ///
+ /// ```text
+ /// | foo(|a, b| b)
+ /// | - -
+ /// | | |
+ /// | | has type `&'1 u32`
+ /// | has type `&'2 u32`
+ /// ```
+ fn highlight_if_we_cannot_match_hir_ty(
+ &self,
+ needle_fr: RegionVid,
+ ty: Ty<'tcx>,
+ span: Span,
+ counter: usize,
+ ) -> RegionNameHighlight {
+ let mut highlight = RegionHighlightMode::new(self.infcx.tcx);
+ highlight.highlighting_region_vid(needle_fr, counter);
+ let type_name =
+ self.infcx.extract_inference_diagnostics_data(ty.into(), Some(highlight)).name;
+
+ debug!(
+ "highlight_if_we_cannot_match_hir_ty: type_name={:?} needle_fr={:?}",
+ type_name, needle_fr
+ );
+ if type_name.contains(&format!("'{counter}")) {
+ // Only add a label if we can confirm that a region was labelled.
+ RegionNameHighlight::CannotMatchHirTy(span, type_name)
+ } else {
+ RegionNameHighlight::Occluded(span, type_name)
+ }
+ }
+
+ /// Attempts to highlight the specific part of a type annotation
+ /// that contains the anonymous reference we want to give a name
+ /// to. For example, we might produce an annotation like this:
+ ///
+ /// ```text
+ /// | fn a<T>(items: &[T]) -> Box<dyn Iterator<Item = &T>> {
+ /// | - let's call the lifetime of this reference `'1`
+ /// ```
+ ///
+ /// the way this works is that we match up `ty`, which is
+ /// a `Ty<'tcx>` (the internal form of the type) with
+ /// `hir_ty`, a `hir::Ty` (the syntax of the type
+ /// annotation). We are descending through the types stepwise,
+ /// looking in to find the region `needle_fr` in the internal
+ /// type. Once we find that, we can use the span of the `hir::Ty`
+ /// to add the highlight.
+ ///
+ /// This is a somewhat imperfect process, so along the way we also
+ /// keep track of the **closest** type we've found. If we fail to
+ /// find the exact `&` or `'_` to highlight, then we may fall back
+ /// to highlighting that closest type instead.
+ fn highlight_if_we_can_match_hir_ty(
+ &self,
+ needle_fr: RegionVid,
+ ty: Ty<'tcx>,
+ hir_ty: &hir::Ty<'_>,
+ ) -> Option<RegionNameHighlight> {
+ let search_stack: &mut Vec<(Ty<'tcx>, &hir::Ty<'_>)> = &mut vec![(ty, hir_ty)];
+
+ while let Some((ty, hir_ty)) = search_stack.pop() {
+ match (ty.kind(), &hir_ty.kind) {
+ // Check if the `ty` is `&'X ..` where `'X`
+ // is the region we are looking for -- if so, and we have a `&T`
+ // on the RHS, then we want to highlight the `&` like so:
+ //
+ // &
+ // - let's call the lifetime of this reference `'1`
+ (
+ ty::Ref(region, referent_ty, _),
+ hir::TyKind::Rptr(_lifetime, referent_hir_ty),
+ ) => {
+ if region.to_region_vid() == needle_fr {
+ // Just grab the first character, the `&`.
+ let source_map = self.infcx.tcx.sess.source_map();
+ let ampersand_span = source_map.start_point(hir_ty.span);
+
+ return Some(RegionNameHighlight::MatchedHirTy(ampersand_span));
+ }
+
+ // Otherwise, let's descend into the referent types.
+ search_stack.push((*referent_ty, &referent_hir_ty.ty));
+ }
+
+ // Match up something like `Foo<'1>`
+ (
+ ty::Adt(_adt_def, substs),
+ hir::TyKind::Path(hir::QPath::Resolved(None, path)),
+ ) => {
+ match path.res {
+ // Type parameters of the type alias have no reason to
+ // be the same as those of the ADT.
+ // FIXME: We should be able to do something similar to
+ // match_adt_and_segment in this case.
+ Res::Def(DefKind::TyAlias, _) => (),
+ _ => {
+ if let Some(last_segment) = path.segments.last() {
+ if let Some(highlight) = self.match_adt_and_segment(
+ substs,
+ needle_fr,
+ last_segment,
+ search_stack,
+ ) {
+ return Some(highlight);
+ }
+ }
+ }
+ }
+ }
+
+ // The following cases don't have lifetimes, so we
+ // just worry about trying to match up the rustc type
+ // with the HIR types:
+ (&ty::Tuple(elem_tys), hir::TyKind::Tup(elem_hir_tys)) => {
+ search_stack.extend(iter::zip(elem_tys, *elem_hir_tys));
+ }
+
+ (ty::Slice(elem_ty), hir::TyKind::Slice(elem_hir_ty))
+ | (ty::Array(elem_ty, _), hir::TyKind::Array(elem_hir_ty, _)) => {
+ search_stack.push((*elem_ty, elem_hir_ty));
+ }
+
+ (ty::RawPtr(mut_ty), hir::TyKind::Ptr(mut_hir_ty)) => {
+ search_stack.push((mut_ty.ty, &mut_hir_ty.ty));
+ }
+
+ _ => {
+ // FIXME there are other cases that we could trace
+ }
+ }
+ }
+
+ None
+ }
+
+ /// We've found an enum/struct/union type with the substitutions
+ /// `substs` and -- in the HIR -- a path type with the final
+ /// segment `last_segment`. Try to find a `'_` to highlight in
+ /// the generic args (or, if not, to produce new zipped pairs of
+ /// types+hir to search through).
+ fn match_adt_and_segment<'hir>(
+ &self,
+ substs: SubstsRef<'tcx>,
+ needle_fr: RegionVid,
+ last_segment: &'hir hir::PathSegment<'hir>,
+ search_stack: &mut Vec<(Ty<'tcx>, &'hir hir::Ty<'hir>)>,
+ ) -> Option<RegionNameHighlight> {
+ // Did the user give explicit arguments? (e.g., `Foo<..>`)
+ let args = last_segment.args.as_ref()?;
+ let lifetime =
+ self.try_match_adt_and_generic_args(substs, needle_fr, args, search_stack)?;
+ match lifetime.name {
+ hir::LifetimeName::Param(_, hir::ParamName::Plain(_) | hir::ParamName::Error)
+ | hir::LifetimeName::Error
+ | hir::LifetimeName::Static => {
+ let lifetime_span = lifetime.span;
+ Some(RegionNameHighlight::MatchedAdtAndSegment(lifetime_span))
+ }
+
+ hir::LifetimeName::Param(_, hir::ParamName::Fresh)
+ | hir::LifetimeName::ImplicitObjectLifetimeDefault
+ | hir::LifetimeName::Infer => {
+ // In this case, the user left off the lifetime; so
+ // they wrote something like:
+ //
+ // ```
+ // x: Foo<T>
+ // ```
+ //
+ // where the fully elaborated form is `Foo<'_, '1,
+ // T>`. We don't consider this a match; instead we let
+ // the "fully elaborated" type fallback above handle
+ // it.
+ None
+ }
+ }
+ }
+
+ /// We've found an enum/struct/union type with the substitutions
+ /// `substs` and -- in the HIR -- a path with the generic
+ /// arguments `args`. If `needle_fr` appears in the args, return
+ /// the `hir::Lifetime` that corresponds to it. If not, push onto
+ /// `search_stack` the types+hir to search through.
+ fn try_match_adt_and_generic_args<'hir>(
+ &self,
+ substs: SubstsRef<'tcx>,
+ needle_fr: RegionVid,
+ args: &'hir hir::GenericArgs<'hir>,
+ search_stack: &mut Vec<(Ty<'tcx>, &'hir hir::Ty<'hir>)>,
+ ) -> Option<&'hir hir::Lifetime> {
+ for (kind, hir_arg) in iter::zip(substs, args.args) {
+ match (kind.unpack(), hir_arg) {
+ (GenericArgKind::Lifetime(r), hir::GenericArg::Lifetime(lt)) => {
+ if r.to_region_vid() == needle_fr {
+ return Some(lt);
+ }
+ }
+
+ (GenericArgKind::Type(ty), hir::GenericArg::Type(hir_ty)) => {
+ search_stack.push((ty, hir_ty));
+ }
+
+ (GenericArgKind::Const(_ct), hir::GenericArg::Const(_hir_ct)) => {
+ // Lifetimes cannot be found in consts, so we don't need
+ // to search anything here.
+ }
+
+ (
+ GenericArgKind::Lifetime(_)
+ | GenericArgKind::Type(_)
+ | GenericArgKind::Const(_),
+ _,
+ ) => {
+ // HIR lowering sometimes doesn't catch this in erroneous
+ // programs, so we need to use delay_span_bug here. See #82126.
+ self.infcx.tcx.sess.delay_span_bug(
+ hir_arg.span(),
+ &format!("unmatched subst and hir arg: found {:?} vs {:?}", kind, hir_arg),
+ );
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Finds a closure upvar that contains `fr` and label it with a
+ /// fully elaborated type, returning something like `'1`. Result
+ /// looks like:
+ ///
+ /// ```text
+ /// | let x = Some(&22);
+ /// - fully elaborated type of `x` is `Option<&'1 u32>`
+ /// ```
+ #[tracing::instrument(level = "trace", skip(self))]
+ fn give_name_if_anonymous_region_appears_in_upvars(&self, fr: RegionVid) -> Option<RegionName> {
+ let upvar_index = self.regioncx.get_upvar_index_for_region(self.infcx.tcx, fr)?;
+ let (upvar_name, upvar_span) = self.regioncx.get_upvar_name_and_span_for_region(
+ self.infcx.tcx,
+ &self.upvars,
+ upvar_index,
+ );
+ let region_name = self.synthesize_region_name();
+
+ Some(RegionName {
+ name: region_name,
+ source: RegionNameSource::AnonRegionFromUpvar(upvar_span, upvar_name),
+ })
+ }
+
+ /// Checks for arguments appearing in the (closure) return type. It
+ /// must be a closure since, in a free fn, such an argument would
+ /// have to either also appear in an argument (if using elision)
+ /// or be early bound (named, not in argument).
+ #[tracing::instrument(level = "trace", skip(self))]
+ fn give_name_if_anonymous_region_appears_in_output(&self, fr: RegionVid) -> Option<RegionName> {
+ let tcx = self.infcx.tcx;
+ let hir = tcx.hir();
+
+ let return_ty = self.regioncx.universal_regions().unnormalized_output_ty;
+ debug!("give_name_if_anonymous_region_appears_in_output: return_ty = {:?}", return_ty);
+ if !tcx.any_free_region_meets(&return_ty, |r| r.to_region_vid() == fr) {
+ return None;
+ }
+
+ let mir_hir_id = self.mir_hir_id();
+
+ let (return_span, mir_description, hir_ty) = match hir.get(mir_hir_id) {
+ hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { fn_decl, body, fn_decl_span, .. }),
+ ..
+ }) => {
+ let (mut span, mut hir_ty) = match fn_decl.output {
+ hir::FnRetTy::DefaultReturn(_) => {
+ (tcx.sess.source_map().end_point(fn_decl_span), None)
+ }
+ hir::FnRetTy::Return(hir_ty) => (fn_decl.output.span(), Some(hir_ty)),
+ };
+ let mir_description = match hir.body(body).generator_kind {
+ Some(hir::GeneratorKind::Async(gen)) => match gen {
+ hir::AsyncGeneratorKind::Block => " of async block",
+ hir::AsyncGeneratorKind::Closure => " of async closure",
+ hir::AsyncGeneratorKind::Fn => {
+ let parent_item = hir.get_by_def_id(hir.get_parent_item(mir_hir_id));
+ let output = &parent_item
+ .fn_decl()
+ .expect("generator lowered from async fn should be in fn")
+ .output;
+ span = output.span();
+ if let hir::FnRetTy::Return(ret) = output {
+ hir_ty = Some(self.get_future_inner_return_ty(*ret));
+ }
+ " of async function"
+ }
+ },
+ Some(hir::GeneratorKind::Gen) => " of generator",
+ None => " of closure",
+ };
+ (span, mir_description, hir_ty)
+ }
+ node => match node.fn_decl() {
+ Some(fn_decl) => {
+ let hir_ty = match fn_decl.output {
+ hir::FnRetTy::DefaultReturn(_) => None,
+ hir::FnRetTy::Return(ty) => Some(ty),
+ };
+ (fn_decl.output.span(), "", hir_ty)
+ }
+ None => (self.body.span, "", None),
+ },
+ };
+
+ let highlight = hir_ty
+ .and_then(|hir_ty| self.highlight_if_we_can_match_hir_ty(fr, return_ty, hir_ty))
+ .unwrap_or_else(|| {
+ // `highlight_if_we_cannot_match_hir_ty` needs to know the number we will give to
+ // the anonymous region. If it succeeds, the `synthesize_region_name` call below
+ // will increment the counter, "reserving" the number we just used.
+ let counter = *self.next_region_name.try_borrow().unwrap();
+ self.highlight_if_we_cannot_match_hir_ty(fr, return_ty, return_span, counter)
+ });
+
+ Some(RegionName {
+ name: self.synthesize_region_name(),
+ source: RegionNameSource::AnonRegionFromOutput(highlight, mir_description),
+ })
+ }
+
+ /// From the [`hir::Ty`] of an async function's lowered return type,
+ /// retrieve the `hir::Ty` representing the type the user originally wrote.
+ ///
+ /// e.g. given the function:
+ ///
+ /// ```
+ /// async fn foo() -> i32 { 2 }
+ /// ```
+ ///
+ /// this function, given the lowered return type of `foo`, an [`OpaqueDef`] that implements `Future<Output=i32>`,
+ /// returns the `i32`.
+ ///
+ /// [`OpaqueDef`]: hir::TyKind::OpaqueDef
+ fn get_future_inner_return_ty(&self, hir_ty: &'tcx hir::Ty<'tcx>) -> &'tcx hir::Ty<'tcx> {
+ let hir = self.infcx.tcx.hir();
+
+ let hir::TyKind::OpaqueDef(id, _) = hir_ty.kind else {
+ span_bug!(
+ hir_ty.span,
+ "lowered return type of async fn is not OpaqueDef: {:?}",
+ hir_ty
+ );
+ };
+ let opaque_ty = hir.item(id);
+ if let hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ bounds:
+ [
+ hir::GenericBound::LangItemTrait(
+ hir::LangItem::Future,
+ _,
+ _,
+ hir::GenericArgs {
+ bindings:
+ [
+ hir::TypeBinding {
+ ident: Ident { name: sym::Output, .. },
+ kind:
+ hir::TypeBindingKind::Equality { term: hir::Term::Ty(ty) },
+ ..
+ },
+ ],
+ ..
+ },
+ ),
+ ],
+ ..
+ }) = opaque_ty.kind
+ {
+ ty
+ } else {
+ span_bug!(
+ hir_ty.span,
+ "bounds from lowered return type of async fn did not match expected format: {:?}",
+ opaque_ty
+ );
+ }
+ }
+
+ #[tracing::instrument(level = "trace", skip(self))]
+ fn give_name_if_anonymous_region_appears_in_yield_ty(
+ &self,
+ fr: RegionVid,
+ ) -> Option<RegionName> {
+ // Note: generators from `async fn` yield `()`, so we don't have to
+ // worry about them here.
+ let yield_ty = self.regioncx.universal_regions().yield_ty?;
+ debug!("give_name_if_anonymous_region_appears_in_yield_ty: yield_ty = {:?}", yield_ty);
+
+ let tcx = self.infcx.tcx;
+
+ if !tcx.any_free_region_meets(&yield_ty, |r| r.to_region_vid() == fr) {
+ return None;
+ }
+
+ let mut highlight = RegionHighlightMode::new(tcx);
+ highlight.highlighting_region_vid(fr, *self.next_region_name.try_borrow().unwrap());
+ let type_name =
+ self.infcx.extract_inference_diagnostics_data(yield_ty.into(), Some(highlight)).name;
+
+ let yield_span = match tcx.hir().get(self.mir_hir_id()) {
+ hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. }),
+ ..
+ }) => tcx.sess.source_map().end_point(fn_decl_span),
+ _ => self.body.span,
+ };
+
+ debug!(
+ "give_name_if_anonymous_region_appears_in_yield_ty: \
+ type_name = {:?}, yield_span = {:?}",
+ yield_span, type_name,
+ );
+
+ Some(RegionName {
+ name: self.synthesize_region_name(),
+ source: RegionNameSource::AnonRegionFromYieldTy(yield_span, type_name),
+ })
+ }
+
+ fn give_name_if_anonymous_region_appears_in_impl_signature(
+ &self,
+ fr: RegionVid,
+ ) -> Option<RegionName> {
+ let ty::ReEarlyBound(region) = *self.to_error_region(fr)? else {
+ return None;
+ };
+ if region.has_name() {
+ return None;
+ };
+
+ let tcx = self.infcx.tcx;
+ let body_parent_did = tcx.opt_parent(self.mir_def_id().to_def_id())?;
+ if tcx.parent(region.def_id) != body_parent_did
+ || tcx.def_kind(body_parent_did) != DefKind::Impl
+ {
+ return None;
+ }
+
+ let mut found = false;
+ tcx.fold_regions(tcx.type_of(body_parent_did), |r: ty::Region<'tcx>, _| {
+ if *r == ty::ReEarlyBound(region) {
+ found = true;
+ }
+ r
+ });
+
+ Some(RegionName {
+ name: self.synthesize_region_name(),
+ source: RegionNameSource::AnonRegionFromImplSignature(
+ tcx.def_span(region.def_id),
+ // FIXME(compiler-errors): Does this ever actually show up
+ // anywhere other than the self type? I couldn't create an
+ // example of a `'_` in the impl's trait being referenceable.
+ if found { "self type" } else { "header" },
+ ),
+ })
+ }
+}
diff --git a/compiler/rustc_borrowck/src/diagnostics/var_name.rs b/compiler/rustc_borrowck/src/diagnostics/var_name.rs
new file mode 100644
index 000000000..9ba29f04b
--- /dev/null
+++ b/compiler/rustc_borrowck/src/diagnostics/var_name.rs
@@ -0,0 +1,133 @@
+use crate::Upvar;
+use crate::{nll::ToRegionVid, region_infer::RegionInferenceContext};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::{Body, Local};
+use rustc_middle::ty::{RegionVid, TyCtxt};
+use rustc_span::source_map::Span;
+use rustc_span::symbol::Symbol;
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+ pub(crate) fn get_var_name_and_span_for_region(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ local_names: &IndexVec<Local, Option<Symbol>>,
+ upvars: &[Upvar<'tcx>],
+ fr: RegionVid,
+ ) -> Option<(Option<Symbol>, Span)> {
+ debug!("get_var_name_and_span_for_region(fr={:?})", fr);
+ assert!(self.universal_regions().is_universal_region(fr));
+
+ debug!("get_var_name_and_span_for_region: attempting upvar");
+ self.get_upvar_index_for_region(tcx, fr)
+ .map(|index| {
+ // FIXME(project-rfc-2229#8): Use place span for diagnostics
+ let (name, span) = self.get_upvar_name_and_span_for_region(tcx, upvars, index);
+ (Some(name), span)
+ })
+ .or_else(|| {
+ debug!("get_var_name_and_span_for_region: attempting argument");
+ self.get_argument_index_for_region(tcx, fr).map(|index| {
+ self.get_argument_name_and_span_for_region(body, local_names, index)
+ })
+ })
+ }
+
+ /// Search the upvars (if any) to find one that references fr. Return its index.
+ pub(crate) fn get_upvar_index_for_region(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ fr: RegionVid,
+ ) -> Option<usize> {
+ let upvar_index =
+ self.universal_regions().defining_ty.upvar_tys().position(|upvar_ty| {
+ debug!("get_upvar_index_for_region: upvar_ty={:?}", upvar_ty);
+ tcx.any_free_region_meets(&upvar_ty, |r| {
+ let r = r.to_region_vid();
+ debug!("get_upvar_index_for_region: r={:?} fr={:?}", r, fr);
+ r == fr
+ })
+ })?;
+
+ let upvar_ty = self.universal_regions().defining_ty.upvar_tys().nth(upvar_index);
+
+ debug!(
+ "get_upvar_index_for_region: found {:?} in upvar {} which has type {:?}",
+ fr, upvar_index, upvar_ty,
+ );
+
+ Some(upvar_index)
+ }
+
+ /// Given the index of an upvar, finds its name and the span from where it was
+ /// declared.
+ pub(crate) fn get_upvar_name_and_span_for_region(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ upvars: &[Upvar<'tcx>],
+ upvar_index: usize,
+ ) -> (Symbol, Span) {
+ let upvar_hir_id = upvars[upvar_index].place.get_root_variable();
+ debug!("get_upvar_name_and_span_for_region: upvar_hir_id={:?}", upvar_hir_id);
+
+ let upvar_name = tcx.hir().name(upvar_hir_id);
+ let upvar_span = tcx.hir().span(upvar_hir_id);
+ debug!(
+ "get_upvar_name_and_span_for_region: upvar_name={:?} upvar_span={:?}",
+ upvar_name, upvar_span
+ );
+
+ (upvar_name, upvar_span)
+ }
+
+ /// Search the argument types for one that references fr (which should be a free region).
+ /// Returns Some(_) with the index of the input if one is found.
+ ///
+ /// N.B., in the case of a closure, the index is indexing into the signature as seen by the
+ /// user - in particular, index 0 is not the implicit self parameter.
+ pub(crate) fn get_argument_index_for_region(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ fr: RegionVid,
+ ) -> Option<usize> {
+ let implicit_inputs = self.universal_regions().defining_ty.implicit_inputs();
+ let argument_index =
+ self.universal_regions().unnormalized_input_tys.iter().skip(implicit_inputs).position(
+ |arg_ty| {
+ debug!("get_argument_index_for_region: arg_ty = {:?}", arg_ty);
+ tcx.any_free_region_meets(arg_ty, |r| r.to_region_vid() == fr)
+ },
+ )?;
+
+ debug!(
+ "get_argument_index_for_region: found {:?} in argument {} which has type {:?}",
+ fr,
+ argument_index,
+ self.universal_regions().unnormalized_input_tys[argument_index],
+ );
+
+ Some(argument_index)
+ }
+
+ /// Given the index of an argument, finds its name (if any) and the span from where it was
+ /// declared.
+ pub(crate) fn get_argument_name_and_span_for_region(
+ &self,
+ body: &Body<'tcx>,
+ local_names: &IndexVec<Local, Option<Symbol>>,
+ argument_index: usize,
+ ) -> (Option<Symbol>, Span) {
+ let implicit_inputs = self.universal_regions().defining_ty.implicit_inputs();
+ let argument_local = Local::new(implicit_inputs + argument_index + 1);
+ debug!("get_argument_name_and_span_for_region: argument_local={:?}", argument_local);
+
+ let argument_name = local_names[argument_local];
+ let argument_span = body.local_decls[argument_local].source_info.span;
+ debug!(
+ "get_argument_name_and_span_for_region: argument_name={:?} argument_span={:?}",
+ argument_name, argument_span
+ );
+
+ (argument_name, argument_span)
+ }
+}
diff --git a/compiler/rustc_borrowck/src/facts.rs b/compiler/rustc_borrowck/src/facts.rs
new file mode 100644
index 000000000..22134d5a7
--- /dev/null
+++ b/compiler/rustc_borrowck/src/facts.rs
@@ -0,0 +1,212 @@
+use crate::location::{LocationIndex, LocationTable};
+use crate::BorrowIndex;
+use polonius_engine::AllFacts as PoloniusFacts;
+use polonius_engine::Atom;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::Local;
+use rustc_middle::ty::{RegionVid, TyCtxt};
+use rustc_mir_dataflow::move_paths::MovePathIndex;
+use std::error::Error;
+use std::fmt::Debug;
+use std::fs::{self, File};
+use std::io::{BufWriter, Write};
+use std::path::Path;
+
+#[derive(Copy, Clone, Debug)]
+pub struct RustcFacts;
+
+impl polonius_engine::FactTypes for RustcFacts {
+ type Origin = RegionVid;
+ type Loan = BorrowIndex;
+ type Point = LocationIndex;
+ type Variable = Local;
+ type Path = MovePathIndex;
+}
+
+pub type AllFacts = PoloniusFacts<RustcFacts>;
+
+pub(crate) trait AllFactsExt {
+ /// Returns `true` if there is a need to gather `AllFacts` given the
+ /// current `-Z` flags.
+ fn enabled(tcx: TyCtxt<'_>) -> bool;
+
+ fn write_to_dir(
+ &self,
+ dir: impl AsRef<Path>,
+ location_table: &LocationTable,
+ ) -> Result<(), Box<dyn Error>>;
+}
+
+impl AllFactsExt for AllFacts {
+ /// Return
+ fn enabled(tcx: TyCtxt<'_>) -> bool {
+ tcx.sess.opts.unstable_opts.nll_facts || tcx.sess.opts.unstable_opts.polonius
+ }
+
+ fn write_to_dir(
+ &self,
+ dir: impl AsRef<Path>,
+ location_table: &LocationTable,
+ ) -> Result<(), Box<dyn Error>> {
+ let dir: &Path = dir.as_ref();
+ fs::create_dir_all(dir)?;
+ let wr = FactWriter { location_table, dir };
+ macro_rules! write_facts_to_path {
+ ($wr:ident . write_facts_to_path($this:ident . [
+ $($field:ident,)*
+ ])) => {
+ $(
+ $wr.write_facts_to_path(
+ &$this.$field,
+ &format!("{}.facts", stringify!($field))
+ )?;
+ )*
+ }
+ }
+ write_facts_to_path! {
+ wr.write_facts_to_path(self.[
+ loan_issued_at,
+ universal_region,
+ cfg_edge,
+ loan_killed_at,
+ subset_base,
+ loan_invalidated_at,
+ var_used_at,
+ var_defined_at,
+ var_dropped_at,
+ use_of_var_derefs_origin,
+ drop_of_var_derefs_origin,
+ child_path,
+ path_is_var,
+ path_assigned_at_base,
+ path_moved_at_base,
+ path_accessed_at_base,
+ known_placeholder_subset,
+ placeholder,
+ ])
+ }
+ Ok(())
+ }
+}
+
+impl Atom for BorrowIndex {
+ fn index(self) -> usize {
+ Idx::index(self)
+ }
+}
+
+impl Atom for LocationIndex {
+ fn index(self) -> usize {
+ Idx::index(self)
+ }
+}
+
+struct FactWriter<'w> {
+ location_table: &'w LocationTable,
+ dir: &'w Path,
+}
+
+impl<'w> FactWriter<'w> {
+ fn write_facts_to_path<T>(&self, rows: &[T], file_name: &str) -> Result<(), Box<dyn Error>>
+ where
+ T: FactRow,
+ {
+ let file = &self.dir.join(file_name);
+ let mut file = BufWriter::new(File::create(file)?);
+ for row in rows {
+ row.write(&mut file, self.location_table)?;
+ }
+ Ok(())
+ }
+}
+
+trait FactRow {
+ fn write(
+ &self,
+ out: &mut dyn Write,
+ location_table: &LocationTable,
+ ) -> Result<(), Box<dyn Error>>;
+}
+
+impl FactRow for RegionVid {
+ fn write(
+ &self,
+ out: &mut dyn Write,
+ location_table: &LocationTable,
+ ) -> Result<(), Box<dyn Error>> {
+ write_row(out, location_table, &[self])
+ }
+}
+
+impl<A, B> FactRow for (A, B)
+where
+ A: FactCell,
+ B: FactCell,
+{
+ fn write(
+ &self,
+ out: &mut dyn Write,
+ location_table: &LocationTable,
+ ) -> Result<(), Box<dyn Error>> {
+ write_row(out, location_table, &[&self.0, &self.1])
+ }
+}
+
+impl<A, B, C> FactRow for (A, B, C)
+where
+ A: FactCell,
+ B: FactCell,
+ C: FactCell,
+{
+ fn write(
+ &self,
+ out: &mut dyn Write,
+ location_table: &LocationTable,
+ ) -> Result<(), Box<dyn Error>> {
+ write_row(out, location_table, &[&self.0, &self.1, &self.2])
+ }
+}
+
+impl<A, B, C, D> FactRow for (A, B, C, D)
+where
+ A: FactCell,
+ B: FactCell,
+ C: FactCell,
+ D: FactCell,
+{
+ fn write(
+ &self,
+ out: &mut dyn Write,
+ location_table: &LocationTable,
+ ) -> Result<(), Box<dyn Error>> {
+ write_row(out, location_table, &[&self.0, &self.1, &self.2, &self.3])
+ }
+}
+
+fn write_row(
+ out: &mut dyn Write,
+ location_table: &LocationTable,
+ columns: &[&dyn FactCell],
+) -> Result<(), Box<dyn Error>> {
+ for (index, c) in columns.iter().enumerate() {
+ let tail = if index == columns.len() - 1 { "\n" } else { "\t" };
+ write!(out, "{:?}{}", c.to_string(location_table), tail)?;
+ }
+ Ok(())
+}
+
+trait FactCell {
+ fn to_string(&self, location_table: &LocationTable) -> String;
+}
+
+impl<A: Debug> FactCell for A {
+ default fn to_string(&self, _location_table: &LocationTable) -> String {
+ format!("{:?}", self)
+ }
+}
+
+impl FactCell for LocationIndex {
+ fn to_string(&self, location_table: &LocationTable) -> String {
+ format!("{:?}", location_table.to_location(*self))
+ }
+}
diff --git a/compiler/rustc_borrowck/src/invalidation.rs b/compiler/rustc_borrowck/src/invalidation.rs
new file mode 100644
index 000000000..ec521b1cf
--- /dev/null
+++ b/compiler/rustc_borrowck/src/invalidation.rs
@@ -0,0 +1,442 @@
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{BasicBlock, Body, Location, Place, Rvalue};
+use rustc_middle::mir::{BorrowKind, Mutability, Operand};
+use rustc_middle::mir::{InlineAsmOperand, Terminator, TerminatorKind};
+use rustc_middle::mir::{Statement, StatementKind};
+use rustc_middle::ty::TyCtxt;
+
+use crate::{
+ borrow_set::BorrowSet, facts::AllFacts, location::LocationTable, path_utils::*, AccessDepth,
+ Activation, ArtificialField, BorrowIndex, Deep, LocalMutationIsAllowed, Read, ReadKind,
+ ReadOrWrite, Reservation, Shallow, Write, WriteKind,
+};
+
+pub(super) fn generate_invalidates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ all_facts: &mut Option<AllFacts>,
+ location_table: &LocationTable,
+ body: &Body<'tcx>,
+ borrow_set: &BorrowSet<'tcx>,
+) {
+ if all_facts.is_none() {
+ // Nothing to do if we don't have any facts
+ return;
+ }
+
+ if let Some(all_facts) = all_facts {
+ let _prof_timer = tcx.prof.generic_activity("polonius_fact_generation");
+ let dominators = body.basic_blocks.dominators();
+ let mut ig = InvalidationGenerator {
+ all_facts,
+ borrow_set,
+ tcx,
+ location_table,
+ body: &body,
+ dominators,
+ };
+ ig.visit_body(body);
+ }
+}
+
+struct InvalidationGenerator<'cx, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ all_facts: &'cx mut AllFacts,
+ location_table: &'cx LocationTable,
+ body: &'cx Body<'tcx>,
+ dominators: Dominators<BasicBlock>,
+ borrow_set: &'cx BorrowSet<'tcx>,
+}
+
+/// Visits the whole MIR and generates `invalidates()` facts.
+/// Most of the code implementing this was stolen from `borrow_check/mod.rs`.
+impl<'cx, 'tcx> Visitor<'tcx> for InvalidationGenerator<'cx, 'tcx> {
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ self.check_activations(location);
+
+ match &statement.kind {
+ StatementKind::Assign(box (lhs, rhs)) => {
+ self.consume_rvalue(location, rhs);
+
+ self.mutate_place(location, *lhs, Shallow(None));
+ }
+ StatementKind::FakeRead(box (_, _)) => {
+ // Only relevant for initialized/liveness/safety checks.
+ }
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ self.consume_operand(location, src);
+ self.consume_operand(location, dst);
+ self.consume_operand(location, count);
+ }
+ StatementKind::Nop
+ | StatementKind::Coverage(..)
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::StorageLive(..) => {
+ // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
+ // to borrow check.
+ }
+ StatementKind::StorageDead(local) => {
+ self.access_place(
+ location,
+ Place::from(*local),
+ (Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
+ LocalMutationIsAllowed::Yes,
+ );
+ }
+ StatementKind::Deinit(..) | StatementKind::SetDiscriminant { .. } => {
+ bug!("Statement not allowed in this MIR phase")
+ }
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ self.check_activations(location);
+
+ match &terminator.kind {
+ TerminatorKind::SwitchInt { ref discr, switch_ty: _, targets: _ } => {
+ self.consume_operand(location, discr);
+ }
+ TerminatorKind::Drop { place: drop_place, target: _, unwind: _ } => {
+ self.access_place(
+ location,
+ *drop_place,
+ (AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)),
+ LocalMutationIsAllowed::Yes,
+ );
+ }
+ TerminatorKind::DropAndReplace {
+ place: drop_place,
+ value: ref new_value,
+ target: _,
+ unwind: _,
+ } => {
+ self.mutate_place(location, *drop_place, Deep);
+ self.consume_operand(location, new_value);
+ }
+ TerminatorKind::Call {
+ ref func,
+ ref args,
+ destination,
+ target: _,
+ cleanup: _,
+ from_hir_call: _,
+ fn_span: _,
+ } => {
+ self.consume_operand(location, func);
+ for arg in args {
+ self.consume_operand(location, arg);
+ }
+ self.mutate_place(location, *destination, Deep);
+ }
+ TerminatorKind::Assert { ref cond, expected: _, ref msg, target: _, cleanup: _ } => {
+ self.consume_operand(location, cond);
+ use rustc_middle::mir::AssertKind;
+ if let AssertKind::BoundsCheck { ref len, ref index } = *msg {
+ self.consume_operand(location, len);
+ self.consume_operand(location, index);
+ }
+ }
+ TerminatorKind::Yield { ref value, resume, resume_arg, drop: _ } => {
+ self.consume_operand(location, value);
+
+ // Invalidate all borrows of local places
+ let borrow_set = self.borrow_set;
+ let resume = self.location_table.start_index(resume.start_location());
+ for (i, data) in borrow_set.iter_enumerated() {
+ if borrow_of_local_data(data.borrowed_place) {
+ self.all_facts.loan_invalidated_at.push((resume, i));
+ }
+ }
+
+ self.mutate_place(location, *resume_arg, Deep);
+ }
+ TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
+ // Invalidate all borrows of local places
+ let borrow_set = self.borrow_set;
+ let start = self.location_table.start_index(location);
+ for (i, data) in borrow_set.iter_enumerated() {
+ if borrow_of_local_data(data.borrowed_place) {
+ self.all_facts.loan_invalidated_at.push((start, i));
+ }
+ }
+ }
+ TerminatorKind::InlineAsm {
+ template: _,
+ ref operands,
+ options: _,
+ line_spans: _,
+ destination: _,
+ cleanup: _,
+ } => {
+ for op in operands {
+ match *op {
+ InlineAsmOperand::In { reg: _, ref value } => {
+ self.consume_operand(location, value);
+ }
+ InlineAsmOperand::Out { reg: _, late: _, place, .. } => {
+ if let Some(place) = place {
+ self.mutate_place(location, place, Shallow(None));
+ }
+ }
+ InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+ self.consume_operand(location, in_value);
+ if let Some(out_place) = out_place {
+ self.mutate_place(location, out_place, Shallow(None));
+ }
+ }
+ InlineAsmOperand::Const { value: _ }
+ | InlineAsmOperand::SymFn { value: _ }
+ | InlineAsmOperand::SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+ TerminatorKind::Goto { target: _ }
+ | TerminatorKind::Abort
+ | TerminatorKind::Unreachable
+ | TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
+ | TerminatorKind::FalseUnwind { real_target: _, unwind: _ } => {
+ // no data used, thus irrelevant to borrowck
+ }
+ }
+
+ self.super_terminator(terminator, location);
+ }
+}
+
+impl<'cx, 'tcx> InvalidationGenerator<'cx, 'tcx> {
+ /// Simulates mutation of a place.
+ fn mutate_place(&mut self, location: Location, place: Place<'tcx>, kind: AccessDepth) {
+ self.access_place(
+ location,
+ place,
+ (kind, Write(WriteKind::Mutate)),
+ LocalMutationIsAllowed::ExceptUpvars,
+ );
+ }
+
+ /// Simulates consumption of an operand.
+ fn consume_operand(&mut self, location: Location, operand: &Operand<'tcx>) {
+ match *operand {
+ Operand::Copy(place) => {
+ self.access_place(
+ location,
+ place,
+ (Deep, Read(ReadKind::Copy)),
+ LocalMutationIsAllowed::No,
+ );
+ }
+ Operand::Move(place) => {
+ self.access_place(
+ location,
+ place,
+ (Deep, Write(WriteKind::Move)),
+ LocalMutationIsAllowed::Yes,
+ );
+ }
+ Operand::Constant(_) => {}
+ }
+ }
+
+ // Simulates consumption of an rvalue
+ fn consume_rvalue(&mut self, location: Location, rvalue: &Rvalue<'tcx>) {
+ match *rvalue {
+ Rvalue::Ref(_ /*rgn*/, bk, place) => {
+ let access_kind = match bk {
+ BorrowKind::Shallow => {
+ (Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
+ }
+ BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
+ BorrowKind::Unique | BorrowKind::Mut { .. } => {
+ let wk = WriteKind::MutableBorrow(bk);
+ if allow_two_phase_borrow(bk) {
+ (Deep, Reservation(wk))
+ } else {
+ (Deep, Write(wk))
+ }
+ }
+ };
+
+ self.access_place(location, place, access_kind, LocalMutationIsAllowed::No);
+ }
+
+ Rvalue::AddressOf(mutability, place) => {
+ let access_kind = match mutability {
+ Mutability::Mut => (
+ Deep,
+ Write(WriteKind::MutableBorrow(BorrowKind::Mut {
+ allow_two_phase_borrow: false,
+ })),
+ ),
+ Mutability::Not => (Deep, Read(ReadKind::Borrow(BorrowKind::Shared))),
+ };
+
+ self.access_place(location, place, access_kind, LocalMutationIsAllowed::No);
+ }
+
+ Rvalue::ThreadLocalRef(_) => {}
+
+ Rvalue::Use(ref operand)
+ | Rvalue::Repeat(ref operand, _)
+ | Rvalue::UnaryOp(_ /*un_op*/, ref operand)
+ | Rvalue::Cast(_ /*cast_kind*/, ref operand, _ /*ty*/)
+ | Rvalue::ShallowInitBox(ref operand, _ /*ty*/) => {
+ self.consume_operand(location, operand)
+ }
+ Rvalue::CopyForDeref(ref place) => {
+ let op = &Operand::Copy(*place);
+ self.consume_operand(location, op);
+ }
+
+ Rvalue::Len(place) | Rvalue::Discriminant(place) => {
+ let af = match *rvalue {
+ Rvalue::Len(..) => Some(ArtificialField::ArrayLength),
+ Rvalue::Discriminant(..) => None,
+ _ => unreachable!(),
+ };
+ self.access_place(
+ location,
+ place,
+ (Shallow(af), Read(ReadKind::Copy)),
+ LocalMutationIsAllowed::No,
+ );
+ }
+
+ Rvalue::BinaryOp(_bin_op, box (ref operand1, ref operand2))
+ | Rvalue::CheckedBinaryOp(_bin_op, box (ref operand1, ref operand2)) => {
+ self.consume_operand(location, operand1);
+ self.consume_operand(location, operand2);
+ }
+
+ Rvalue::NullaryOp(_op, _ty) => {}
+
+ Rvalue::Aggregate(_, ref operands) => {
+ for operand in operands {
+ self.consume_operand(location, operand);
+ }
+ }
+ }
+ }
+
+ /// Simulates an access to a place.
+ fn access_place(
+ &mut self,
+ location: Location,
+ place: Place<'tcx>,
+ kind: (AccessDepth, ReadOrWrite),
+ _is_local_mutation_allowed: LocalMutationIsAllowed,
+ ) {
+ let (sd, rw) = kind;
+ // note: not doing check_access_permissions checks because they don't generate invalidates
+ self.check_access_for_conflict(location, place, sd, rw);
+ }
+
+ fn check_access_for_conflict(
+ &mut self,
+ location: Location,
+ place: Place<'tcx>,
+ sd: AccessDepth,
+ rw: ReadOrWrite,
+ ) {
+ debug!(
+ "invalidation::check_access_for_conflict(location={:?}, place={:?}, sd={:?}, \
+ rw={:?})",
+ location, place, sd, rw,
+ );
+ let tcx = self.tcx;
+ let body = self.body;
+ let borrow_set = self.borrow_set;
+ let indices = self.borrow_set.indices();
+ each_borrow_involving_path(
+ self,
+ tcx,
+ body,
+ location,
+ (sd, place),
+ borrow_set,
+ indices,
+ |this, borrow_index, borrow| {
+ match (rw, borrow.kind) {
+ // Obviously an activation is compatible with its own
+ // reservation (or even prior activating uses of same
+ // borrow); so don't check if they interfere.
+ //
+ // NOTE: *reservations* do conflict with themselves;
+ // thus aren't injecting unsoundness w/ this check.)
+ (Activation(_, activating), _) if activating == borrow_index => {
+ // Activating a borrow doesn't generate any invalidations, since we
+ // have already taken the reservation
+ }
+
+ (Read(_), BorrowKind::Shallow | BorrowKind::Shared)
+ | (
+ Read(ReadKind::Borrow(BorrowKind::Shallow)),
+ BorrowKind::Unique | BorrowKind::Mut { .. },
+ ) => {
+ // Reads don't invalidate shared or shallow borrows
+ }
+
+ (Read(_), BorrowKind::Unique | BorrowKind::Mut { .. }) => {
+ // Reading from mere reservations of mutable-borrows is OK.
+ if !is_active(&this.dominators, borrow, location) {
+ // If the borrow isn't active yet, reads don't invalidate it
+ assert!(allow_two_phase_borrow(borrow.kind));
+ return Control::Continue;
+ }
+
+ // Unique and mutable borrows are invalidated by reads from any
+ // involved path
+ this.emit_loan_invalidated_at(borrow_index, location);
+ }
+
+ (Reservation(_) | Activation(_, _) | Write(_), _) => {
+ // unique or mutable borrows are invalidated by writes.
+ // Reservations count as writes since we need to check
+ // that activating the borrow will be OK
+ // FIXME(bob_twinkles) is this actually the right thing to do?
+ this.emit_loan_invalidated_at(borrow_index, location);
+ }
+ }
+ Control::Continue
+ },
+ );
+ }
+
+ /// Generates a new `loan_invalidated_at(L, B)` fact.
+ fn emit_loan_invalidated_at(&mut self, b: BorrowIndex, l: Location) {
+ let lidx = self.location_table.start_index(l);
+ self.all_facts.loan_invalidated_at.push((lidx, b));
+ }
+
+ fn check_activations(&mut self, location: Location) {
+ // Two-phase borrow support: For each activation that is newly
+ // generated at this statement, check if it interferes with
+ // another borrow.
+ for &borrow_index in self.borrow_set.activations_at_location(location) {
+ let borrow = &self.borrow_set[borrow_index];
+
+ // only mutable borrows should be 2-phase
+ assert!(match borrow.kind {
+ BorrowKind::Shared | BorrowKind::Shallow => false,
+ BorrowKind::Unique | BorrowKind::Mut { .. } => true,
+ });
+
+ self.access_place(
+ location,
+ borrow.borrowed_place,
+ (Deep, Activation(WriteKind::MutableBorrow(borrow.kind), borrow_index)),
+ LocalMutationIsAllowed::No,
+ );
+
+ // We do not need to call `check_if_path_or_subpath_is_moved`
+ // again, as we already called it when we made the
+ // initial reservation.
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs
new file mode 100644
index 000000000..3d8b07382
--- /dev/null
+++ b/compiler/rustc_borrowck/src/lib.rs
@@ -0,0 +1,2380 @@
+//! This query borrow-checks the MIR to (further) ensure it is not broken.
+
+#![allow(rustc::potential_query_instability)]
+#![feature(box_patterns)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(rustc_attrs)]
+#![feature(stmt_expr_attributes)]
+#![feature(trusted_step)]
+#![feature(try_blocks)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate rustc_middle;
+#[macro_use]
+extern crate tracing;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_index::bit_set::ChunkedBitSet;
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::{DefiningAnchor, InferCtxt, TyCtxtInferExt};
+use rustc_middle::mir::{
+ traversal, Body, ClearCrossCrate, Local, Location, Mutability, Operand, Place, PlaceElem,
+ PlaceRef, VarDebugInfoContents,
+};
+use rustc_middle::mir::{AggregateKind, BasicBlock, BorrowCheckResult, BorrowKind};
+use rustc_middle::mir::{Field, ProjectionElem, Promoted, Rvalue, Statement, StatementKind};
+use rustc_middle::mir::{InlineAsmOperand, Terminator, TerminatorKind};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, CapturedPlace, ParamEnv, RegionVid, TyCtxt};
+use rustc_session::lint::builtin::UNUSED_MUT;
+use rustc_span::{Span, Symbol};
+
+use either::Either;
+use smallvec::SmallVec;
+use std::cell::RefCell;
+use std::collections::BTreeMap;
+use std::rc::Rc;
+
+use rustc_mir_dataflow::impls::{
+ EverInitializedPlaces, MaybeInitializedPlaces, MaybeUninitializedPlaces,
+};
+use rustc_mir_dataflow::move_paths::{InitIndex, MoveOutIndex, MovePathIndex};
+use rustc_mir_dataflow::move_paths::{InitLocation, LookupResult, MoveData, MoveError};
+use rustc_mir_dataflow::Analysis;
+use rustc_mir_dataflow::MoveDataParamEnv;
+
+use self::diagnostics::{AccessKind, RegionName};
+use self::location::LocationTable;
+use self::prefixes::PrefixSet;
+use facts::AllFacts;
+
+use self::path_utils::*;
+
+pub mod borrow_set;
+mod borrowck_errors;
+mod constraint_generation;
+mod constraints;
+mod dataflow;
+mod def_use;
+mod diagnostics;
+mod facts;
+mod invalidation;
+mod location;
+mod member_constraints;
+mod nll;
+mod path_utils;
+mod place_ext;
+mod places_conflict;
+mod prefixes;
+mod region_infer;
+mod renumber;
+mod session_diagnostics;
+mod type_check;
+mod universal_regions;
+mod used_muts;
+
+// A public API provided for the Rust compiler consumers.
+pub mod consumers;
+
+use borrow_set::{BorrowData, BorrowSet};
+use dataflow::{BorrowIndex, BorrowckFlowState as Flows, BorrowckResults, Borrows};
+use nll::{PoloniusOutput, ToRegionVid};
+use place_ext::PlaceExt;
+use places_conflict::{places_conflict, PlaceConflictBias};
+use region_infer::RegionInferenceContext;
+
+// FIXME(eddyb) perhaps move this somewhere more centrally.
+#[derive(Debug)]
+struct Upvar<'tcx> {
+ place: CapturedPlace<'tcx>,
+
+ /// If true, the capture is behind a reference.
+ by_ref: bool,
+}
+
+/// Associate some local constants with the `'tcx` lifetime
+struct TyCtxtConsts<'tcx>(TyCtxt<'tcx>);
+impl<'tcx> TyCtxtConsts<'tcx> {
+ const DEREF_PROJECTION: &'tcx [PlaceElem<'tcx>; 1] = &[ProjectionElem::Deref];
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers {
+ mir_borrowck: |tcx, did| {
+ if let Some(def) = ty::WithOptConstParam::try_lookup(did, tcx) {
+ tcx.mir_borrowck_const_arg(def)
+ } else {
+ mir_borrowck(tcx, ty::WithOptConstParam::unknown(did))
+ }
+ },
+ mir_borrowck_const_arg: |tcx, (did, param_did)| {
+ mir_borrowck(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
+ },
+ ..*providers
+ };
+}
+
+fn mir_borrowck<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx BorrowCheckResult<'tcx> {
+ let (input_body, promoted) = tcx.mir_promoted(def);
+ debug!("run query mir_borrowck: {}", tcx.def_path_str(def.did.to_def_id()));
+ let hir_owner = tcx.hir().local_def_id_to_hir_id(def.did).owner;
+
+ let opt_closure_req = tcx
+ .infer_ctxt()
+ .with_opaque_type_inference(DefiningAnchor::Bind(hir_owner))
+ .enter(|infcx| {
+ let input_body: &Body<'_> = &input_body.borrow();
+ let promoted: &IndexVec<_, _> = &promoted.borrow();
+ do_mir_borrowck(&infcx, input_body, promoted, false).0
+ });
+ debug!("mir_borrowck done");
+
+ tcx.arena.alloc(opt_closure_req)
+}
+
+/// Perform the actual borrow checking.
+///
+/// If `return_body_with_facts` is true, then return the body with non-erased
+/// region ids on which the borrow checking was performed together with Polonius
+/// facts.
+#[instrument(skip(infcx, input_body, input_promoted), fields(id=?input_body.source.with_opt_param().as_local().unwrap()), level = "debug")]
+fn do_mir_borrowck<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ input_body: &Body<'tcx>,
+ input_promoted: &IndexVec<Promoted, Body<'tcx>>,
+ return_body_with_facts: bool,
+) -> (BorrowCheckResult<'tcx>, Option<Box<BodyWithBorrowckFacts<'tcx>>>) {
+ let def = input_body.source.with_opt_param().as_local().unwrap();
+
+ debug!(?def);
+
+ let tcx = infcx.tcx;
+ let param_env = tcx.param_env(def.did);
+
+ let mut local_names = IndexVec::from_elem(None, &input_body.local_decls);
+ for var_debug_info in &input_body.var_debug_info {
+ if let VarDebugInfoContents::Place(place) = var_debug_info.value {
+ if let Some(local) = place.as_local() {
+ if let Some(prev_name) = local_names[local] && var_debug_info.name != prev_name {
+ span_bug!(
+ var_debug_info.source_info.span,
+ "local {:?} has many names (`{}` vs `{}`)",
+ local,
+ prev_name,
+ var_debug_info.name
+ );
+ }
+ local_names[local] = Some(var_debug_info.name);
+ }
+ }
+ }
+
+ let mut errors = error::BorrowckErrors::new();
+
+ // Gather the upvars of a closure, if any.
+ let tables = tcx.typeck_opt_const_arg(def);
+ if let Some(ErrorGuaranteed { .. }) = tables.tainted_by_errors {
+ infcx.set_tainted_by_errors();
+ errors.set_tainted_by_errors();
+ }
+ let upvars: Vec<_> = tables
+ .closure_min_captures_flattened(def.did)
+ .map(|captured_place| {
+ let capture = captured_place.info.capture_kind;
+ let by_ref = match capture {
+ ty::UpvarCapture::ByValue => false,
+ ty::UpvarCapture::ByRef(..) => true,
+ };
+ Upvar { place: captured_place.clone(), by_ref }
+ })
+ .collect();
+
+ // Replace all regions with fresh inference variables. This
+ // requires first making our own copy of the MIR. This copy will
+ // be modified (in place) to contain non-lexical lifetimes. It
+ // will have a lifetime tied to the inference context.
+ let mut body_owned = input_body.clone();
+ let mut promoted = input_promoted.clone();
+ let free_regions =
+ nll::replace_regions_in_mir(infcx, param_env, &mut body_owned, &mut promoted);
+ let body = &body_owned; // no further changes
+
+ let location_table_owned = LocationTable::new(body);
+ let location_table = &location_table_owned;
+
+ let (move_data, move_errors): (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>) =
+ match MoveData::gather_moves(&body, tcx, param_env) {
+ Ok((_, move_data)) => (move_data, Vec::new()),
+ Err((move_data, move_errors)) => (move_data, move_errors),
+ };
+ let promoted_errors = promoted
+ .iter_enumerated()
+ .map(|(idx, body)| (idx, MoveData::gather_moves(&body, tcx, param_env)));
+
+ let mdpe = MoveDataParamEnv { move_data, param_env };
+
+ let mut flow_inits = MaybeInitializedPlaces::new(tcx, &body, &mdpe)
+ .into_engine(tcx, &body)
+ .pass_name("borrowck")
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body);
+
+ let locals_are_invalidated_at_exit = tcx.hir().body_owner_kind(def.did).is_fn_or_closure();
+ let borrow_set =
+ Rc::new(BorrowSet::build(tcx, body, locals_are_invalidated_at_exit, &mdpe.move_data));
+
+ let use_polonius = return_body_with_facts || infcx.tcx.sess.opts.unstable_opts.polonius;
+
+ // Compute non-lexical lifetimes.
+ let nll::NllOutput {
+ regioncx,
+ opaque_type_values,
+ polonius_input,
+ polonius_output,
+ opt_closure_req,
+ nll_errors,
+ } = nll::compute_regions(
+ infcx,
+ free_regions,
+ body,
+ &promoted,
+ location_table,
+ param_env,
+ &mut flow_inits,
+ &mdpe.move_data,
+ &borrow_set,
+ &upvars,
+ use_polonius,
+ );
+
+ // Dump MIR results into a file, if that is enabled. This let us
+ // write unit-tests, as well as helping with debugging.
+ nll::dump_mir_results(infcx, &body, &regioncx, &opt_closure_req);
+
+ // We also have a `#[rustc_regions]` annotation that causes us to dump
+ // information.
+ nll::dump_annotation(
+ infcx,
+ &body,
+ &regioncx,
+ &opt_closure_req,
+ &opaque_type_values,
+ &mut errors,
+ );
+
+ // The various `flow_*` structures can be large. We drop `flow_inits` here
+ // so it doesn't overlap with the others below. This reduces peak memory
+ // usage significantly on some benchmarks.
+ drop(flow_inits);
+
+ let regioncx = Rc::new(regioncx);
+
+ let flow_borrows = Borrows::new(tcx, body, &regioncx, &borrow_set)
+ .into_engine(tcx, body)
+ .pass_name("borrowck")
+ .iterate_to_fixpoint();
+ let flow_uninits = MaybeUninitializedPlaces::new(tcx, body, &mdpe)
+ .into_engine(tcx, body)
+ .pass_name("borrowck")
+ .iterate_to_fixpoint();
+ let flow_ever_inits = EverInitializedPlaces::new(tcx, body, &mdpe)
+ .into_engine(tcx, body)
+ .pass_name("borrowck")
+ .iterate_to_fixpoint();
+
+ let movable_generator =
+ // The first argument is the generator type passed by value
+ if let Some(local) = body.local_decls.raw.get(1)
+ // Get the interior types and substs which typeck computed
+ && let ty::Generator(_, _, hir::Movability::Static) = local.ty.kind()
+ {
+ false
+ } else {
+ true
+ };
+
+ for (idx, move_data_results) in promoted_errors {
+ let promoted_body = &promoted[idx];
+
+ if let Err((move_data, move_errors)) = move_data_results {
+ let mut promoted_mbcx = MirBorrowckCtxt {
+ infcx,
+ param_env,
+ body: promoted_body,
+ move_data: &move_data,
+ location_table, // no need to create a real one for the promoted, it is not used
+ movable_generator,
+ fn_self_span_reported: Default::default(),
+ locals_are_invalidated_at_exit,
+ access_place_error_reported: Default::default(),
+ reservation_error_reported: Default::default(),
+ uninitialized_error_reported: Default::default(),
+ regioncx: regioncx.clone(),
+ used_mut: Default::default(),
+ used_mut_upvars: SmallVec::new(),
+ borrow_set: Rc::clone(&borrow_set),
+ dominators: Dominators::dummy(), // not used
+ upvars: Vec::new(),
+ local_names: IndexVec::from_elem(None, &promoted_body.local_decls),
+ region_names: RefCell::default(),
+ next_region_name: RefCell::new(1),
+ polonius_output: None,
+ errors,
+ };
+ promoted_mbcx.report_move_errors(move_errors);
+ errors = promoted_mbcx.errors;
+ };
+ }
+
+ let dominators = body.basic_blocks.dominators();
+
+ let mut mbcx = MirBorrowckCtxt {
+ infcx,
+ param_env,
+ body,
+ move_data: &mdpe.move_data,
+ location_table,
+ movable_generator,
+ locals_are_invalidated_at_exit,
+ fn_self_span_reported: Default::default(),
+ access_place_error_reported: Default::default(),
+ reservation_error_reported: Default::default(),
+ uninitialized_error_reported: Default::default(),
+ regioncx: Rc::clone(&regioncx),
+ used_mut: Default::default(),
+ used_mut_upvars: SmallVec::new(),
+ borrow_set: Rc::clone(&borrow_set),
+ dominators,
+ upvars,
+ local_names,
+ region_names: RefCell::default(),
+ next_region_name: RefCell::new(1),
+ polonius_output,
+ errors,
+ };
+
+ // Compute and report region errors, if any.
+ mbcx.report_region_errors(nll_errors);
+
+ let results = BorrowckResults {
+ ever_inits: flow_ever_inits,
+ uninits: flow_uninits,
+ borrows: flow_borrows,
+ };
+
+ mbcx.report_move_errors(move_errors);
+
+ rustc_mir_dataflow::visit_results(
+ body,
+ traversal::reverse_postorder(body).map(|(bb, _)| bb),
+ &results,
+ &mut mbcx,
+ );
+
+ // For each non-user used mutable variable, check if it's been assigned from
+ // a user-declared local. If so, then put that local into the used_mut set.
+ // Note that this set is expected to be small - only upvars from closures
+ // would have a chance of erroneously adding non-user-defined mutable vars
+ // to the set.
+ let temporary_used_locals: FxHashSet<Local> = mbcx
+ .used_mut
+ .iter()
+ .filter(|&local| !mbcx.body.local_decls[*local].is_user_variable())
+ .cloned()
+ .collect();
+ // For the remaining unused locals that are marked as mutable, we avoid linting any that
+ // were never initialized. These locals may have been removed as unreachable code; or will be
+ // linted as unused variables.
+ let unused_mut_locals =
+ mbcx.body.mut_vars_iter().filter(|local| !mbcx.used_mut.contains(local)).collect();
+ mbcx.gather_used_muts(temporary_used_locals, unused_mut_locals);
+
+ debug!("mbcx.used_mut: {:?}", mbcx.used_mut);
+ let used_mut = std::mem::take(&mut mbcx.used_mut);
+ for local in mbcx.body.mut_vars_and_args_iter().filter(|local| !used_mut.contains(local)) {
+ let local_decl = &mbcx.body.local_decls[local];
+ let lint_root = match &mbcx.body.source_scopes[local_decl.source_info.scope].local_data {
+ ClearCrossCrate::Set(data) => data.lint_root,
+ _ => continue,
+ };
+
+ // Skip over locals that begin with an underscore or have no name
+ match mbcx.local_names[local] {
+ Some(name) => {
+ if name.as_str().starts_with('_') {
+ continue;
+ }
+ }
+ None => continue,
+ }
+
+ let span = local_decl.source_info.span;
+ if span.desugaring_kind().is_some() {
+ // If the `mut` arises as part of a desugaring, we should ignore it.
+ continue;
+ }
+
+ tcx.struct_span_lint_hir(UNUSED_MUT, lint_root, span, |lint| {
+ let mut_span = tcx.sess.source_map().span_until_non_whitespace(span);
+ lint.build("variable does not need to be mutable")
+ .span_suggestion_short(
+ mut_span,
+ "remove this `mut`",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ })
+ }
+
+ let tainted_by_errors = mbcx.emit_errors();
+
+ let result = BorrowCheckResult {
+ concrete_opaque_types: opaque_type_values,
+ closure_requirements: opt_closure_req,
+ used_mut_upvars: mbcx.used_mut_upvars,
+ tainted_by_errors,
+ };
+
+ let body_with_facts = if return_body_with_facts {
+ let output_facts = mbcx.polonius_output.expect("Polonius output was not computed");
+ Some(Box::new(BodyWithBorrowckFacts {
+ body: body_owned,
+ input_facts: *polonius_input.expect("Polonius input facts were not generated"),
+ output_facts,
+ location_table: location_table_owned,
+ }))
+ } else {
+ None
+ };
+
+ debug!("do_mir_borrowck: result = {:#?}", result);
+
+ (result, body_with_facts)
+}
+
+/// A `Body` with information computed by the borrow checker. This struct is
+/// intended to be consumed by compiler consumers.
+///
+/// We need to include the MIR body here because the region identifiers must
+/// match the ones in the Polonius facts.
+pub struct BodyWithBorrowckFacts<'tcx> {
+ /// A mir body that contains region identifiers.
+ pub body: Body<'tcx>,
+ /// Polonius input facts.
+ pub input_facts: AllFacts,
+ /// Polonius output facts.
+ pub output_facts: Rc<self::nll::PoloniusOutput>,
+ /// The table that maps Polonius points to locations in the table.
+ pub location_table: LocationTable,
+}
+
+struct MirBorrowckCtxt<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ param_env: ParamEnv<'tcx>,
+ body: &'cx Body<'tcx>,
+ move_data: &'cx MoveData<'tcx>,
+
+ /// Map from MIR `Location` to `LocationIndex`; created
+ /// when MIR borrowck begins.
+ location_table: &'cx LocationTable,
+
+ movable_generator: bool,
+ /// This keeps track of whether local variables are free-ed when the function
+ /// exits even without a `StorageDead`, which appears to be the case for
+ /// constants.
+ ///
+ /// I'm not sure this is the right approach - @eddyb could you try and
+ /// figure this out?
+ locals_are_invalidated_at_exit: bool,
+ /// This field keeps track of when borrow errors are reported in the access_place function
+ /// so that there is no duplicate reporting. This field cannot also be used for the conflicting
+ /// borrow errors that is handled by the `reservation_error_reported` field as the inclusion
+ /// of the `Span` type (while required to mute some errors) stops the muting of the reservation
+ /// errors.
+ access_place_error_reported: FxHashSet<(Place<'tcx>, Span)>,
+ /// This field keeps track of when borrow conflict errors are reported
+ /// for reservations, so that we don't report seemingly duplicate
+ /// errors for corresponding activations.
+ //
+ // FIXME: ideally this would be a set of `BorrowIndex`, not `Place`s,
+ // but it is currently inconvenient to track down the `BorrowIndex`
+ // at the time we detect and report a reservation error.
+ reservation_error_reported: FxHashSet<Place<'tcx>>,
+ /// This fields keeps track of the `Span`s that we have
+ /// used to report extra information for `FnSelfUse`, to avoid
+ /// unnecessarily verbose errors.
+ fn_self_span_reported: FxHashSet<Span>,
+ /// This field keeps track of errors reported in the checking of uninitialized variables,
+ /// so that we don't report seemingly duplicate errors.
+ uninitialized_error_reported: FxHashSet<PlaceRef<'tcx>>,
+ /// This field keeps track of all the local variables that are declared mut and are mutated.
+ /// Used for the warning issued by an unused mutable local variable.
+ used_mut: FxHashSet<Local>,
+ /// If the function we're checking is a closure, then we'll need to report back the list of
+ /// mutable upvars that have been used. This field keeps track of them.
+ used_mut_upvars: SmallVec<[Field; 8]>,
+ /// Region inference context. This contains the results from region inference and lets us e.g.
+ /// find out which CFG points are contained in each borrow region.
+ regioncx: Rc<RegionInferenceContext<'tcx>>,
+
+ /// The set of borrows extracted from the MIR
+ borrow_set: Rc<BorrowSet<'tcx>>,
+
+ /// Dominators for MIR
+ dominators: Dominators<BasicBlock>,
+
+ /// Information about upvars not necessarily preserved in types or MIR
+ upvars: Vec<Upvar<'tcx>>,
+
+ /// Names of local (user) variables (extracted from `var_debug_info`).
+ local_names: IndexVec<Local, Option<Symbol>>,
+
+ /// Record the region names generated for each region in the given
+ /// MIR def so that we can reuse them later in help/error messages.
+ region_names: RefCell<FxHashMap<RegionVid, RegionName>>,
+
+ /// The counter for generating new region names.
+ next_region_name: RefCell<usize>,
+
+ /// Results of Polonius analysis.
+ polonius_output: Option<Rc<PoloniusOutput>>,
+
+ errors: error::BorrowckErrors<'tcx>,
+}
+
+// Check that:
+// 1. assignments are always made to mutable locations (FIXME: does that still really go here?)
+// 2. loans made in overlapping scopes do not conflict
+// 3. assignments do not affect things loaned out as immutable
+// 4. moves do not affect things loaned out in any way
+impl<'cx, 'tcx> rustc_mir_dataflow::ResultsVisitor<'cx, 'tcx> for MirBorrowckCtxt<'cx, 'tcx> {
+ type FlowState = Flows<'cx, 'tcx>;
+
+ fn visit_statement_before_primary_effect(
+ &mut self,
+ flow_state: &Flows<'cx, 'tcx>,
+ stmt: &'cx Statement<'tcx>,
+ location: Location,
+ ) {
+ debug!("MirBorrowckCtxt::process_statement({:?}, {:?}): {:?}", location, stmt, flow_state);
+ let span = stmt.source_info.span;
+
+ self.check_activations(location, span, flow_state);
+
+ match &stmt.kind {
+ StatementKind::Assign(box (lhs, ref rhs)) => {
+ self.consume_rvalue(location, (rhs, span), flow_state);
+
+ self.mutate_place(location, (*lhs, span), Shallow(None), flow_state);
+ }
+ StatementKind::FakeRead(box (_, ref place)) => {
+ // Read for match doesn't access any memory and is used to
+ // assert that a place is safe and live. So we don't have to
+ // do any checks here.
+ //
+ // FIXME: Remove check that the place is initialized. This is
+ // needed for now because matches don't have never patterns yet.
+ // So this is the only place we prevent
+ // let x: !;
+ // match x {};
+ // from compiling.
+ self.check_if_path_or_subpath_is_moved(
+ location,
+ InitializationRequiringAction::Use,
+ (place.as_ref(), span),
+ flow_state,
+ );
+ }
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ..
+ }) => {
+ span_bug!(
+ span,
+ "Unexpected CopyNonOverlapping, should only appear after lower_intrinsics",
+ )
+ }
+ StatementKind::Nop
+ | StatementKind::Coverage(..)
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::StorageLive(..) => {
+ // `Nop`, `AscribeUserType`, `Retag`, and `StorageLive` are irrelevant
+ // to borrow check.
+ }
+ StatementKind::StorageDead(local) => {
+ self.access_place(
+ location,
+ (Place::from(*local), span),
+ (Shallow(None), Write(WriteKind::StorageDeadOrDrop)),
+ LocalMutationIsAllowed::Yes,
+ flow_state,
+ );
+ }
+ StatementKind::Deinit(..) | StatementKind::SetDiscriminant { .. } => {
+ bug!("Statement not allowed in this MIR phase")
+ }
+ }
+ }
+
+ fn visit_terminator_before_primary_effect(
+ &mut self,
+ flow_state: &Flows<'cx, 'tcx>,
+ term: &'cx Terminator<'tcx>,
+ loc: Location,
+ ) {
+ debug!("MirBorrowckCtxt::process_terminator({:?}, {:?}): {:?}", loc, term, flow_state);
+ let span = term.source_info.span;
+
+ self.check_activations(loc, span, flow_state);
+
+ match term.kind {
+ TerminatorKind::SwitchInt { ref discr, switch_ty: _, targets: _ } => {
+ self.consume_operand(loc, (discr, span), flow_state);
+ }
+ TerminatorKind::Drop { place, target: _, unwind: _ } => {
+ debug!(
+ "visit_terminator_drop \
+ loc: {:?} term: {:?} place: {:?} span: {:?}",
+ loc, term, place, span
+ );
+
+ self.access_place(
+ loc,
+ (place, span),
+ (AccessDepth::Drop, Write(WriteKind::StorageDeadOrDrop)),
+ LocalMutationIsAllowed::Yes,
+ flow_state,
+ );
+ }
+ TerminatorKind::DropAndReplace {
+ place: drop_place,
+ value: ref new_value,
+ target: _,
+ unwind: _,
+ } => {
+ self.mutate_place(loc, (drop_place, span), Deep, flow_state);
+ self.consume_operand(loc, (new_value, span), flow_state);
+ }
+ TerminatorKind::Call {
+ ref func,
+ ref args,
+ destination,
+ target: _,
+ cleanup: _,
+ from_hir_call: _,
+ fn_span: _,
+ } => {
+ self.consume_operand(loc, (func, span), flow_state);
+ for arg in args {
+ self.consume_operand(loc, (arg, span), flow_state);
+ }
+ self.mutate_place(loc, (destination, span), Deep, flow_state);
+ }
+ TerminatorKind::Assert { ref cond, expected: _, ref msg, target: _, cleanup: _ } => {
+ self.consume_operand(loc, (cond, span), flow_state);
+ use rustc_middle::mir::AssertKind;
+ if let AssertKind::BoundsCheck { ref len, ref index } = *msg {
+ self.consume_operand(loc, (len, span), flow_state);
+ self.consume_operand(loc, (index, span), flow_state);
+ }
+ }
+
+ TerminatorKind::Yield { ref value, resume: _, resume_arg, drop: _ } => {
+ self.consume_operand(loc, (value, span), flow_state);
+ self.mutate_place(loc, (resume_arg, span), Deep, flow_state);
+ }
+
+ TerminatorKind::InlineAsm {
+ template: _,
+ ref operands,
+ options: _,
+ line_spans: _,
+ destination: _,
+ cleanup: _,
+ } => {
+ for op in operands {
+ match *op {
+ InlineAsmOperand::In { reg: _, ref value } => {
+ self.consume_operand(loc, (value, span), flow_state);
+ }
+ InlineAsmOperand::Out { reg: _, late: _, place, .. } => {
+ if let Some(place) = place {
+ self.mutate_place(loc, (place, span), Shallow(None), flow_state);
+ }
+ }
+ InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+ self.consume_operand(loc, (in_value, span), flow_state);
+ if let Some(out_place) = out_place {
+ self.mutate_place(
+ loc,
+ (out_place, span),
+ Shallow(None),
+ flow_state,
+ );
+ }
+ }
+ InlineAsmOperand::Const { value: _ }
+ | InlineAsmOperand::SymFn { value: _ }
+ | InlineAsmOperand::SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+
+ TerminatorKind::Goto { target: _ }
+ | TerminatorKind::Abort
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Resume
+ | TerminatorKind::Return
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
+ | TerminatorKind::FalseUnwind { real_target: _, unwind: _ } => {
+ // no data used, thus irrelevant to borrowck
+ }
+ }
+ }
+
+ fn visit_terminator_after_primary_effect(
+ &mut self,
+ flow_state: &Flows<'cx, 'tcx>,
+ term: &'cx Terminator<'tcx>,
+ loc: Location,
+ ) {
+ let span = term.source_info.span;
+
+ match term.kind {
+ TerminatorKind::Yield { value: _, resume: _, resume_arg: _, drop: _ } => {
+ if self.movable_generator {
+ // Look for any active borrows to locals
+ let borrow_set = self.borrow_set.clone();
+ for i in flow_state.borrows.iter() {
+ let borrow = &borrow_set[i];
+ self.check_for_local_borrow(borrow, span);
+ }
+ }
+ }
+
+ TerminatorKind::Resume | TerminatorKind::Return | TerminatorKind::GeneratorDrop => {
+ // Returning from the function implicitly kills storage for all locals and statics.
+ // Often, the storage will already have been killed by an explicit
+ // StorageDead, but we don't always emit those (notably on unwind paths),
+ // so this "extra check" serves as a kind of backup.
+ let borrow_set = self.borrow_set.clone();
+ for i in flow_state.borrows.iter() {
+ let borrow = &borrow_set[i];
+ self.check_for_invalidation_at_exit(loc, borrow, span);
+ }
+ }
+
+ TerminatorKind::Abort
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::FalseEdge { real_target: _, imaginary_target: _ }
+ | TerminatorKind::FalseUnwind { real_target: _, unwind: _ }
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Unreachable
+ | TerminatorKind::InlineAsm { .. } => {}
+ }
+ }
+}
+
+use self::AccessDepth::{Deep, Shallow};
+use self::ReadOrWrite::{Activation, Read, Reservation, Write};
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum ArtificialField {
+ ArrayLength,
+ ShallowBorrow,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum AccessDepth {
+ /// From the RFC: "A *shallow* access means that the immediate
+ /// fields reached at P are accessed, but references or pointers
+ /// found within are not dereferenced. Right now, the only access
+ /// that is shallow is an assignment like `x = ...;`, which would
+ /// be a *shallow write* of `x`."
+ Shallow(Option<ArtificialField>),
+
+ /// From the RFC: "A *deep* access means that all data reachable
+ /// through the given place may be invalidated or accesses by
+ /// this action."
+ Deep,
+
+ /// Access is Deep only when there is a Drop implementation that
+ /// can reach the data behind the reference.
+ Drop,
+}
+
+/// Kind of access to a value: read or write
+/// (For informational purposes only)
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum ReadOrWrite {
+ /// From the RFC: "A *read* means that the existing data may be
+ /// read, but will not be changed."
+ Read(ReadKind),
+
+ /// From the RFC: "A *write* means that the data may be mutated to
+ /// new values or otherwise invalidated (for example, it could be
+ /// de-initialized, as in a move operation).
+ Write(WriteKind),
+
+ /// For two-phase borrows, we distinguish a reservation (which is treated
+ /// like a Read) from an activation (which is treated like a write), and
+ /// each of those is furthermore distinguished from Reads/Writes above.
+ Reservation(WriteKind),
+ Activation(WriteKind, BorrowIndex),
+}
+
+/// Kind of read access to a value
+/// (For informational purposes only)
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum ReadKind {
+ Borrow(BorrowKind),
+ Copy,
+}
+
+/// Kind of write access to a value
+/// (For informational purposes only)
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum WriteKind {
+ StorageDeadOrDrop,
+ MutableBorrow(BorrowKind),
+ Mutate,
+ Move,
+}
+
+/// When checking permissions for a place access, this flag is used to indicate that an immutable
+/// local place can be mutated.
+//
+// FIXME: @nikomatsakis suggested that this flag could be removed with the following modifications:
+// - Merge `check_access_permissions()` and `check_if_reassignment_to_immutable_state()`.
+// - Split `is_mutable()` into `is_assignable()` (can be directly assigned) and
+// `is_declared_mutable()`.
+// - Take flow state into consideration in `is_assignable()` for local variables.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum LocalMutationIsAllowed {
+ Yes,
+ /// We want use of immutable upvars to cause a "write to immutable upvar"
+ /// error, not an "reassignment" error.
+ ExceptUpvars,
+ No,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum InitializationRequiringAction {
+ Borrow,
+ MatchOn,
+ Use,
+ Assignment,
+ PartialAssignment,
+}
+
+struct RootPlace<'tcx> {
+ place_local: Local,
+ place_projection: &'tcx [PlaceElem<'tcx>],
+ is_local_mutation_allowed: LocalMutationIsAllowed,
+}
+
+impl InitializationRequiringAction {
+ fn as_noun(self) -> &'static str {
+ match self {
+ InitializationRequiringAction::Borrow => "borrow",
+ InitializationRequiringAction::MatchOn => "use", // no good noun
+ InitializationRequiringAction::Use => "use",
+ InitializationRequiringAction::Assignment => "assign",
+ InitializationRequiringAction::PartialAssignment => "assign to part",
+ }
+ }
+
+ fn as_verb_in_past_tense(self) -> &'static str {
+ match self {
+ InitializationRequiringAction::Borrow => "borrowed",
+ InitializationRequiringAction::MatchOn => "matched on",
+ InitializationRequiringAction::Use => "used",
+ InitializationRequiringAction::Assignment => "assigned",
+ InitializationRequiringAction::PartialAssignment => "partially assigned",
+ }
+ }
+
+ fn as_general_verb_in_past_tense(self) -> &'static str {
+ match self {
+ InitializationRequiringAction::Borrow
+ | InitializationRequiringAction::MatchOn
+ | InitializationRequiringAction::Use => "used",
+ InitializationRequiringAction::Assignment => "assigned",
+ InitializationRequiringAction::PartialAssignment => "partially assigned",
+ }
+ }
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+ fn body(&self) -> &'cx Body<'tcx> {
+ self.body
+ }
+
+ /// Checks an access to the given place to see if it is allowed. Examines the set of borrows
+ /// that are in scope, as well as which paths have been initialized, to ensure that (a) the
+ /// place is initialized and (b) it is not borrowed in some way that would prevent this
+ /// access.
+ ///
+ /// Returns `true` if an error is reported.
+ fn access_place(
+ &mut self,
+ location: Location,
+ place_span: (Place<'tcx>, Span),
+ kind: (AccessDepth, ReadOrWrite),
+ is_local_mutation_allowed: LocalMutationIsAllowed,
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ let (sd, rw) = kind;
+
+ if let Activation(_, borrow_index) = rw {
+ if self.reservation_error_reported.contains(&place_span.0) {
+ debug!(
+ "skipping access_place for activation of invalid reservation \
+ place: {:?} borrow_index: {:?}",
+ place_span.0, borrow_index
+ );
+ return;
+ }
+ }
+
+ // Check is_empty() first because it's the common case, and doing that
+ // way we avoid the clone() call.
+ if !self.access_place_error_reported.is_empty()
+ && self.access_place_error_reported.contains(&(place_span.0, place_span.1))
+ {
+ debug!(
+ "access_place: suppressing error place_span=`{:?}` kind=`{:?}`",
+ place_span, kind
+ );
+ return;
+ }
+
+ let mutability_error = self.check_access_permissions(
+ place_span,
+ rw,
+ is_local_mutation_allowed,
+ flow_state,
+ location,
+ );
+ let conflict_error =
+ self.check_access_for_conflict(location, place_span, sd, rw, flow_state);
+
+ if conflict_error || mutability_error {
+ debug!("access_place: logging error place_span=`{:?}` kind=`{:?}`", place_span, kind);
+ self.access_place_error_reported.insert((place_span.0, place_span.1));
+ }
+ }
+
+ fn check_access_for_conflict(
+ &mut self,
+ location: Location,
+ place_span: (Place<'tcx>, Span),
+ sd: AccessDepth,
+ rw: ReadOrWrite,
+ flow_state: &Flows<'cx, 'tcx>,
+ ) -> bool {
+ debug!(
+ "check_access_for_conflict(location={:?}, place_span={:?}, sd={:?}, rw={:?})",
+ location, place_span, sd, rw,
+ );
+
+ let mut error_reported = false;
+ let tcx = self.infcx.tcx;
+ let body = self.body;
+ let borrow_set = self.borrow_set.clone();
+
+ // Use polonius output if it has been enabled.
+ let polonius_output = self.polonius_output.clone();
+ let borrows_in_scope = if let Some(polonius) = &polonius_output {
+ let location = self.location_table.start_index(location);
+ Either::Left(polonius.errors_at(location).iter().copied())
+ } else {
+ Either::Right(flow_state.borrows.iter())
+ };
+
+ each_borrow_involving_path(
+ self,
+ tcx,
+ body,
+ location,
+ (sd, place_span.0),
+ &borrow_set,
+ borrows_in_scope,
+ |this, borrow_index, borrow| match (rw, borrow.kind) {
+ // Obviously an activation is compatible with its own
+ // reservation (or even prior activating uses of same
+ // borrow); so don't check if they interfere.
+ //
+ // NOTE: *reservations* do conflict with themselves;
+ // thus aren't injecting unsoundness w/ this check.)
+ (Activation(_, activating), _) if activating == borrow_index => {
+ debug!(
+ "check_access_for_conflict place_span: {:?} sd: {:?} rw: {:?} \
+ skipping {:?} b/c activation of same borrow_index",
+ place_span,
+ sd,
+ rw,
+ (borrow_index, borrow),
+ );
+ Control::Continue
+ }
+
+ (Read(_), BorrowKind::Shared | BorrowKind::Shallow)
+ | (
+ Read(ReadKind::Borrow(BorrowKind::Shallow)),
+ BorrowKind::Unique | BorrowKind::Mut { .. },
+ ) => Control::Continue,
+
+ (Reservation(_), BorrowKind::Shallow | BorrowKind::Shared) => {
+ // This used to be a future compatibility warning (to be
+ // disallowed on NLL). See rust-lang/rust#56254
+ Control::Continue
+ }
+
+ (Write(WriteKind::Move), BorrowKind::Shallow) => {
+ // Handled by initialization checks.
+ Control::Continue
+ }
+
+ (Read(kind), BorrowKind::Unique | BorrowKind::Mut { .. }) => {
+ // Reading from mere reservations of mutable-borrows is OK.
+ if !is_active(&this.dominators, borrow, location) {
+ assert!(allow_two_phase_borrow(borrow.kind));
+ return Control::Continue;
+ }
+
+ error_reported = true;
+ match kind {
+ ReadKind::Copy => {
+ let err = this
+ .report_use_while_mutably_borrowed(location, place_span, borrow);
+ this.buffer_error(err);
+ }
+ ReadKind::Borrow(bk) => {
+ let err =
+ this.report_conflicting_borrow(location, place_span, bk, borrow);
+ this.buffer_error(err);
+ }
+ }
+ Control::Break
+ }
+
+ (Reservation(kind) | Activation(kind, _) | Write(kind), _) => {
+ match rw {
+ Reservation(..) => {
+ debug!(
+ "recording invalid reservation of \
+ place: {:?}",
+ place_span.0
+ );
+ this.reservation_error_reported.insert(place_span.0);
+ }
+ Activation(_, activating) => {
+ debug!(
+ "observing check_place for activation of \
+ borrow_index: {:?}",
+ activating
+ );
+ }
+ Read(..) | Write(..) => {}
+ }
+
+ error_reported = true;
+ match kind {
+ WriteKind::MutableBorrow(bk) => {
+ let err =
+ this.report_conflicting_borrow(location, place_span, bk, borrow);
+ this.buffer_error(err);
+ }
+ WriteKind::StorageDeadOrDrop => this
+ .report_borrowed_value_does_not_live_long_enough(
+ location,
+ borrow,
+ place_span,
+ Some(kind),
+ ),
+ WriteKind::Mutate => {
+ this.report_illegal_mutation_of_borrowed(location, place_span, borrow)
+ }
+ WriteKind::Move => {
+ this.report_move_out_while_borrowed(location, place_span, borrow)
+ }
+ }
+ Control::Break
+ }
+ },
+ );
+
+ error_reported
+ }
+
+ fn mutate_place(
+ &mut self,
+ location: Location,
+ place_span: (Place<'tcx>, Span),
+ kind: AccessDepth,
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ // Write of P[i] or *P requires P init'd.
+ self.check_if_assigned_path_is_moved(location, place_span, flow_state);
+
+ // Special case: you can assign an immutable local variable
+ // (e.g., `x = ...`) so long as it has never been initialized
+ // before (at this point in the flow).
+ if let Some(local) = place_span.0.as_local() {
+ if let Mutability::Not = self.body.local_decls[local].mutability {
+ // check for reassignments to immutable local variables
+ self.check_if_reassignment_to_immutable_state(
+ location, local, place_span, flow_state,
+ );
+ return;
+ }
+ }
+
+ // Otherwise, use the normal access permission rules.
+ self.access_place(
+ location,
+ place_span,
+ (kind, Write(WriteKind::Mutate)),
+ LocalMutationIsAllowed::No,
+ flow_state,
+ );
+ }
+
+ fn consume_rvalue(
+ &mut self,
+ location: Location,
+ (rvalue, span): (&'cx Rvalue<'tcx>, Span),
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ match *rvalue {
+ Rvalue::Ref(_ /*rgn*/, bk, place) => {
+ let access_kind = match bk {
+ BorrowKind::Shallow => {
+ (Shallow(Some(ArtificialField::ShallowBorrow)), Read(ReadKind::Borrow(bk)))
+ }
+ BorrowKind::Shared => (Deep, Read(ReadKind::Borrow(bk))),
+ BorrowKind::Unique | BorrowKind::Mut { .. } => {
+ let wk = WriteKind::MutableBorrow(bk);
+ if allow_two_phase_borrow(bk) {
+ (Deep, Reservation(wk))
+ } else {
+ (Deep, Write(wk))
+ }
+ }
+ };
+
+ self.access_place(
+ location,
+ (place, span),
+ access_kind,
+ LocalMutationIsAllowed::No,
+ flow_state,
+ );
+
+ let action = if bk == BorrowKind::Shallow {
+ InitializationRequiringAction::MatchOn
+ } else {
+ InitializationRequiringAction::Borrow
+ };
+
+ self.check_if_path_or_subpath_is_moved(
+ location,
+ action,
+ (place.as_ref(), span),
+ flow_state,
+ );
+ }
+
+ Rvalue::AddressOf(mutability, place) => {
+ let access_kind = match mutability {
+ Mutability::Mut => (
+ Deep,
+ Write(WriteKind::MutableBorrow(BorrowKind::Mut {
+ allow_two_phase_borrow: false,
+ })),
+ ),
+ Mutability::Not => (Deep, Read(ReadKind::Borrow(BorrowKind::Shared))),
+ };
+
+ self.access_place(
+ location,
+ (place, span),
+ access_kind,
+ LocalMutationIsAllowed::No,
+ flow_state,
+ );
+
+ self.check_if_path_or_subpath_is_moved(
+ location,
+ InitializationRequiringAction::Borrow,
+ (place.as_ref(), span),
+ flow_state,
+ );
+ }
+
+ Rvalue::ThreadLocalRef(_) => {}
+
+ Rvalue::Use(ref operand)
+ | Rvalue::Repeat(ref operand, _)
+ | Rvalue::UnaryOp(_ /*un_op*/, ref operand)
+ | Rvalue::Cast(_ /*cast_kind*/, ref operand, _ /*ty*/)
+ | Rvalue::ShallowInitBox(ref operand, _ /*ty*/) => {
+ self.consume_operand(location, (operand, span), flow_state)
+ }
+ Rvalue::CopyForDeref(place) => {
+ self.access_place(
+ location,
+ (place, span),
+ (Deep, Read(ReadKind::Copy)),
+ LocalMutationIsAllowed::No,
+ flow_state,
+ );
+
+ // Finally, check if path was already moved.
+ self.check_if_path_or_subpath_is_moved(
+ location,
+ InitializationRequiringAction::Use,
+ (place.as_ref(), span),
+ flow_state,
+ );
+ }
+
+ Rvalue::Len(place) | Rvalue::Discriminant(place) => {
+ let af = match *rvalue {
+ Rvalue::Len(..) => Some(ArtificialField::ArrayLength),
+ Rvalue::Discriminant(..) => None,
+ _ => unreachable!(),
+ };
+ self.access_place(
+ location,
+ (place, span),
+ (Shallow(af), Read(ReadKind::Copy)),
+ LocalMutationIsAllowed::No,
+ flow_state,
+ );
+ self.check_if_path_or_subpath_is_moved(
+ location,
+ InitializationRequiringAction::Use,
+ (place.as_ref(), span),
+ flow_state,
+ );
+ }
+
+ Rvalue::BinaryOp(_bin_op, box (ref operand1, ref operand2))
+ | Rvalue::CheckedBinaryOp(_bin_op, box (ref operand1, ref operand2)) => {
+ self.consume_operand(location, (operand1, span), flow_state);
+ self.consume_operand(location, (operand2, span), flow_state);
+ }
+
+ Rvalue::NullaryOp(_op, _ty) => {
+ // nullary ops take no dynamic input; no borrowck effect.
+ }
+
+ Rvalue::Aggregate(ref aggregate_kind, ref operands) => {
+ // We need to report back the list of mutable upvars that were
+ // moved into the closure and subsequently used by the closure,
+ // in order to populate our used_mut set.
+ match **aggregate_kind {
+ AggregateKind::Closure(def_id, _) | AggregateKind::Generator(def_id, _, _) => {
+ let BorrowCheckResult { used_mut_upvars, .. } =
+ self.infcx.tcx.mir_borrowck(def_id);
+ debug!("{:?} used_mut_upvars={:?}", def_id, used_mut_upvars);
+ for field in used_mut_upvars {
+ self.propagate_closure_used_mut_upvar(&operands[field.index()]);
+ }
+ }
+ AggregateKind::Adt(..)
+ | AggregateKind::Array(..)
+ | AggregateKind::Tuple { .. } => (),
+ }
+
+ for operand in operands {
+ self.consume_operand(location, (operand, span), flow_state);
+ }
+ }
+ }
+ }
+
+ fn propagate_closure_used_mut_upvar(&mut self, operand: &Operand<'tcx>) {
+ let propagate_closure_used_mut_place = |this: &mut Self, place: Place<'tcx>| {
+ // We have three possibilities here:
+ // a. We are modifying something through a mut-ref
+ // b. We are modifying something that is local to our parent
+ // c. Current body is a nested closure, and we are modifying path starting from
+ // a Place captured by our parent closure.
+
+ // Handle (c), the path being modified is exactly the path captured by our parent
+ if let Some(field) = this.is_upvar_field_projection(place.as_ref()) {
+ this.used_mut_upvars.push(field);
+ return;
+ }
+
+ for (place_ref, proj) in place.iter_projections().rev() {
+ // Handle (a)
+ if proj == ProjectionElem::Deref {
+ match place_ref.ty(this.body(), this.infcx.tcx).ty.kind() {
+ // We aren't modifying a variable directly
+ ty::Ref(_, _, hir::Mutability::Mut) => return,
+
+ _ => {}
+ }
+ }
+
+ // Handle (c)
+ if let Some(field) = this.is_upvar_field_projection(place_ref) {
+ this.used_mut_upvars.push(field);
+ return;
+ }
+ }
+
+ // Handle(b)
+ this.used_mut.insert(place.local);
+ };
+
+ // This relies on the current way that by-value
+ // captures of a closure are copied/moved directly
+ // when generating MIR.
+ match *operand {
+ Operand::Move(place) | Operand::Copy(place) => {
+ match place.as_local() {
+ Some(local) if !self.body.local_decls[local].is_user_variable() => {
+ if self.body.local_decls[local].ty.is_mutable_ptr() {
+ // The variable will be marked as mutable by the borrow.
+ return;
+ }
+ // This is an edge case where we have a `move` closure
+ // inside a non-move closure, and the inner closure
+ // contains a mutation:
+ //
+ // let mut i = 0;
+ // || { move || { i += 1; }; };
+ //
+ // In this case our usual strategy of assuming that the
+ // variable will be captured by mutable reference is
+ // wrong, since `i` can be copied into the inner
+ // closure from a shared reference.
+ //
+ // As such we have to search for the local that this
+ // capture comes from and mark it as being used as mut.
+
+ let temp_mpi = self.move_data.rev_lookup.find_local(local);
+ let init = if let [init_index] = *self.move_data.init_path_map[temp_mpi] {
+ &self.move_data.inits[init_index]
+ } else {
+ bug!("temporary should be initialized exactly once")
+ };
+
+ let InitLocation::Statement(loc) = init.location else {
+ bug!("temporary initialized in arguments")
+ };
+
+ let body = self.body;
+ let bbd = &body[loc.block];
+ let stmt = &bbd.statements[loc.statement_index];
+ debug!("temporary assigned in: stmt={:?}", stmt);
+
+ if let StatementKind::Assign(box (_, Rvalue::Ref(_, _, source))) = stmt.kind
+ {
+ propagate_closure_used_mut_place(self, source);
+ } else {
+ bug!(
+ "closures should only capture user variables \
+ or references to user variables"
+ );
+ }
+ }
+ _ => propagate_closure_used_mut_place(self, place),
+ }
+ }
+ Operand::Constant(..) => {}
+ }
+ }
+
+ fn consume_operand(
+ &mut self,
+ location: Location,
+ (operand, span): (&'cx Operand<'tcx>, Span),
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ match *operand {
+ Operand::Copy(place) => {
+ // copy of place: check if this is "copy of frozen path"
+ // (FIXME: see check_loans.rs)
+ self.access_place(
+ location,
+ (place, span),
+ (Deep, Read(ReadKind::Copy)),
+ LocalMutationIsAllowed::No,
+ flow_state,
+ );
+
+ // Finally, check if path was already moved.
+ self.check_if_path_or_subpath_is_moved(
+ location,
+ InitializationRequiringAction::Use,
+ (place.as_ref(), span),
+ flow_state,
+ );
+ }
+ Operand::Move(place) => {
+ // move of place: check if this is move of already borrowed path
+ self.access_place(
+ location,
+ (place, span),
+ (Deep, Write(WriteKind::Move)),
+ LocalMutationIsAllowed::Yes,
+ flow_state,
+ );
+
+ // Finally, check if path was already moved.
+ self.check_if_path_or_subpath_is_moved(
+ location,
+ InitializationRequiringAction::Use,
+ (place.as_ref(), span),
+ flow_state,
+ );
+ }
+ Operand::Constant(_) => {}
+ }
+ }
+
+ /// Checks whether a borrow of this place is invalidated when the function
+ /// exits
+ fn check_for_invalidation_at_exit(
+ &mut self,
+ location: Location,
+ borrow: &BorrowData<'tcx>,
+ span: Span,
+ ) {
+ debug!("check_for_invalidation_at_exit({:?})", borrow);
+ let place = borrow.borrowed_place;
+ let mut root_place = PlaceRef { local: place.local, projection: &[] };
+
+ // FIXME(nll-rfc#40): do more precise destructor tracking here. For now
+ // we just know that all locals are dropped at function exit (otherwise
+ // we'll have a memory leak) and assume that all statics have a destructor.
+ //
+ // FIXME: allow thread-locals to borrow other thread locals?
+
+ let (might_be_alive, will_be_dropped) =
+ if self.body.local_decls[root_place.local].is_ref_to_thread_local() {
+ // Thread-locals might be dropped after the function exits
+ // We have to dereference the outer reference because
+ // borrows don't conflict behind shared references.
+ root_place.projection = TyCtxtConsts::DEREF_PROJECTION;
+ (true, true)
+ } else {
+ (false, self.locals_are_invalidated_at_exit)
+ };
+
+ if !will_be_dropped {
+ debug!("place_is_invalidated_at_exit({:?}) - won't be dropped", place);
+ return;
+ }
+
+ let sd = if might_be_alive { Deep } else { Shallow(None) };
+
+ if places_conflict::borrow_conflicts_with_place(
+ self.infcx.tcx,
+ &self.body,
+ place,
+ borrow.kind,
+ root_place,
+ sd,
+ places_conflict::PlaceConflictBias::Overlap,
+ ) {
+ debug!("check_for_invalidation_at_exit({:?}): INVALID", place);
+ // FIXME: should be talking about the region lifetime instead
+ // of just a span here.
+ let span = self.infcx.tcx.sess.source_map().end_point(span);
+ self.report_borrowed_value_does_not_live_long_enough(
+ location,
+ borrow,
+ (place, span),
+ None,
+ )
+ }
+ }
+
+ /// Reports an error if this is a borrow of local data.
+ /// This is called for all Yield expressions on movable generators
+ fn check_for_local_borrow(&mut self, borrow: &BorrowData<'tcx>, yield_span: Span) {
+ debug!("check_for_local_borrow({:?})", borrow);
+
+ if borrow_of_local_data(borrow.borrowed_place) {
+ let err = self.cannot_borrow_across_generator_yield(
+ self.retrieve_borrow_spans(borrow).var_or_use(),
+ yield_span,
+ );
+
+ self.buffer_error(err);
+ }
+ }
+
+ fn check_activations(&mut self, location: Location, span: Span, flow_state: &Flows<'cx, 'tcx>) {
+ // Two-phase borrow support: For each activation that is newly
+ // generated at this statement, check if it interferes with
+ // another borrow.
+ let borrow_set = self.borrow_set.clone();
+ for &borrow_index in borrow_set.activations_at_location(location) {
+ let borrow = &borrow_set[borrow_index];
+
+ // only mutable borrows should be 2-phase
+ assert!(match borrow.kind {
+ BorrowKind::Shared | BorrowKind::Shallow => false,
+ BorrowKind::Unique | BorrowKind::Mut { .. } => true,
+ });
+
+ self.access_place(
+ location,
+ (borrow.borrowed_place, span),
+ (Deep, Activation(WriteKind::MutableBorrow(borrow.kind), borrow_index)),
+ LocalMutationIsAllowed::No,
+ flow_state,
+ );
+ // We do not need to call `check_if_path_or_subpath_is_moved`
+ // again, as we already called it when we made the
+ // initial reservation.
+ }
+ }
+
+ fn check_if_reassignment_to_immutable_state(
+ &mut self,
+ location: Location,
+ local: Local,
+ place_span: (Place<'tcx>, Span),
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ debug!("check_if_reassignment_to_immutable_state({:?})", local);
+
+ // Check if any of the initializations of `local` have happened yet:
+ if let Some(init_index) = self.is_local_ever_initialized(local, flow_state) {
+ // And, if so, report an error.
+ let init = &self.move_data.inits[init_index];
+ let span = init.span(&self.body);
+ self.report_illegal_reassignment(location, place_span, span, place_span.0);
+ }
+ }
+
+ fn check_if_full_path_is_moved(
+ &mut self,
+ location: Location,
+ desired_action: InitializationRequiringAction,
+ place_span: (PlaceRef<'tcx>, Span),
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ let maybe_uninits = &flow_state.uninits;
+
+ // Bad scenarios:
+ //
+ // 1. Move of `a.b.c`, use of `a.b.c`
+ // 2. Move of `a.b.c`, use of `a.b.c.d` (without first reinitializing `a.b.c.d`)
+ // 3. Uninitialized `(a.b.c: &_)`, use of `*a.b.c`; note that with
+ // partial initialization support, one might have `a.x`
+ // initialized but not `a.b`.
+ //
+ // OK scenarios:
+ //
+ // 4. Move of `a.b.c`, use of `a.b.d`
+ // 5. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
+ // 6. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
+ // must have been initialized for the use to be sound.
+ // 7. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
+
+ // The dataflow tracks shallow prefixes distinctly (that is,
+ // field-accesses on P distinctly from P itself), in order to
+ // track substructure initialization separately from the whole
+ // structure.
+ //
+ // E.g., when looking at (*a.b.c).d, if the closest prefix for
+ // which we have a MovePath is `a.b`, then that means that the
+ // initialization state of `a.b` is all we need to inspect to
+ // know if `a.b.c` is valid (and from that we infer that the
+ // dereference and `.d` access is also valid, since we assume
+ // `a.b.c` is assigned a reference to an initialized and
+ // well-formed record structure.)
+
+ // Therefore, if we seek out the *closest* prefix for which we
+ // have a MovePath, that should capture the initialization
+ // state for the place scenario.
+ //
+ // This code covers scenarios 1, 2, and 3.
+
+ debug!("check_if_full_path_is_moved place: {:?}", place_span.0);
+ let (prefix, mpi) = self.move_path_closest_to(place_span.0);
+ if maybe_uninits.contains(mpi) {
+ self.report_use_of_moved_or_uninitialized(
+ location,
+ desired_action,
+ (prefix, place_span.0, place_span.1),
+ mpi,
+ );
+ } // Only query longest prefix with a MovePath, not further
+ // ancestors; dataflow recurs on children when parents
+ // move (to support partial (re)inits).
+ //
+ // (I.e., querying parents breaks scenario 7; but may want
+ // to do such a query based on partial-init feature-gate.)
+ }
+
+ /// Subslices correspond to multiple move paths, so we iterate through the
+ /// elements of the base array. For each element we check
+ ///
+ /// * Does this element overlap with our slice.
+ /// * Is any part of it uninitialized.
+ fn check_if_subslice_element_is_moved(
+ &mut self,
+ location: Location,
+ desired_action: InitializationRequiringAction,
+ place_span: (PlaceRef<'tcx>, Span),
+ maybe_uninits: &ChunkedBitSet<MovePathIndex>,
+ from: u64,
+ to: u64,
+ ) {
+ if let Some(mpi) = self.move_path_for_place(place_span.0) {
+ let move_paths = &self.move_data.move_paths;
+
+ let root_path = &move_paths[mpi];
+ for (child_mpi, child_move_path) in root_path.children(move_paths) {
+ let last_proj = child_move_path.place.projection.last().unwrap();
+ if let ProjectionElem::ConstantIndex { offset, from_end, .. } = last_proj {
+ debug_assert!(!from_end, "Array constant indexing shouldn't be `from_end`.");
+
+ if (from..to).contains(offset) {
+ let uninit_child =
+ self.move_data.find_in_move_path_or_its_descendants(child_mpi, |mpi| {
+ maybe_uninits.contains(mpi)
+ });
+
+ if let Some(uninit_child) = uninit_child {
+ self.report_use_of_moved_or_uninitialized(
+ location,
+ desired_action,
+ (place_span.0, place_span.0, place_span.1),
+ uninit_child,
+ );
+ return; // don't bother finding other problems.
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn check_if_path_or_subpath_is_moved(
+ &mut self,
+ location: Location,
+ desired_action: InitializationRequiringAction,
+ place_span: (PlaceRef<'tcx>, Span),
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ let maybe_uninits = &flow_state.uninits;
+
+ // Bad scenarios:
+ //
+ // 1. Move of `a.b.c`, use of `a` or `a.b`
+ // partial initialization support, one might have `a.x`
+ // initialized but not `a.b`.
+ // 2. All bad scenarios from `check_if_full_path_is_moved`
+ //
+ // OK scenarios:
+ //
+ // 3. Move of `a.b.c`, use of `a.b.d`
+ // 4. Uninitialized `a.x`, initialized `a.b`, use of `a.b`
+ // 5. Copied `(a.b: &_)`, use of `*(a.b).c`; note that `a.b`
+ // must have been initialized for the use to be sound.
+ // 6. Move of `a.b.c` then reinit of `a.b.c.d`, use of `a.b.c.d`
+
+ self.check_if_full_path_is_moved(location, desired_action, place_span, flow_state);
+
+ if let Some((place_base, ProjectionElem::Subslice { from, to, from_end: false })) =
+ place_span.0.last_projection()
+ {
+ let place_ty = place_base.ty(self.body(), self.infcx.tcx);
+ if let ty::Array(..) = place_ty.ty.kind() {
+ self.check_if_subslice_element_is_moved(
+ location,
+ desired_action,
+ (place_base, place_span.1),
+ maybe_uninits,
+ from,
+ to,
+ );
+ return;
+ }
+ }
+
+ // A move of any shallow suffix of `place` also interferes
+ // with an attempt to use `place`. This is scenario 3 above.
+ //
+ // (Distinct from handling of scenarios 1+2+4 above because
+ // `place` does not interfere with suffixes of its prefixes,
+ // e.g., `a.b.c` does not interfere with `a.b.d`)
+ //
+ // This code covers scenario 1.
+
+ debug!("check_if_path_or_subpath_is_moved place: {:?}", place_span.0);
+ if let Some(mpi) = self.move_path_for_place(place_span.0) {
+ let uninit_mpi = self
+ .move_data
+ .find_in_move_path_or_its_descendants(mpi, |mpi| maybe_uninits.contains(mpi));
+
+ if let Some(uninit_mpi) = uninit_mpi {
+ self.report_use_of_moved_or_uninitialized(
+ location,
+ desired_action,
+ (place_span.0, place_span.0, place_span.1),
+ uninit_mpi,
+ );
+ return; // don't bother finding other problems.
+ }
+ }
+ }
+
+ /// Currently MoveData does not store entries for all places in
+ /// the input MIR. For example it will currently filter out
+ /// places that are Copy; thus we do not track places of shared
+ /// reference type. This routine will walk up a place along its
+ /// prefixes, searching for a foundational place that *is*
+ /// tracked in the MoveData.
+ ///
+ /// An Err result includes a tag indicated why the search failed.
+ /// Currently this can only occur if the place is built off of a
+ /// static variable, as we do not track those in the MoveData.
+ fn move_path_closest_to(&mut self, place: PlaceRef<'tcx>) -> (PlaceRef<'tcx>, MovePathIndex) {
+ match self.move_data.rev_lookup.find(place) {
+ LookupResult::Parent(Some(mpi)) | LookupResult::Exact(mpi) => {
+ (self.move_data.move_paths[mpi].place.as_ref(), mpi)
+ }
+ LookupResult::Parent(None) => panic!("should have move path for every Local"),
+ }
+ }
+
+ fn move_path_for_place(&mut self, place: PlaceRef<'tcx>) -> Option<MovePathIndex> {
+ // If returns None, then there is no move path corresponding
+ // to a direct owner of `place` (which means there is nothing
+ // that borrowck tracks for its analysis).
+
+ match self.move_data.rev_lookup.find(place) {
+ LookupResult::Parent(_) => None,
+ LookupResult::Exact(mpi) => Some(mpi),
+ }
+ }
+
+ fn check_if_assigned_path_is_moved(
+ &mut self,
+ location: Location,
+ (place, span): (Place<'tcx>, Span),
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ debug!("check_if_assigned_path_is_moved place: {:?}", place);
+
+ // None case => assigning to `x` does not require `x` be initialized.
+ for (place_base, elem) in place.iter_projections().rev() {
+ match elem {
+ ProjectionElem::Index(_/*operand*/) |
+ ProjectionElem::ConstantIndex { .. } |
+ // assigning to P[i] requires P to be valid.
+ ProjectionElem::Downcast(_/*adt_def*/, _/*variant_idx*/) =>
+ // assigning to (P->variant) is okay if assigning to `P` is okay
+ //
+ // FIXME: is this true even if P is an adt with a dtor?
+ { }
+
+ // assigning to (*P) requires P to be initialized
+ ProjectionElem::Deref => {
+ self.check_if_full_path_is_moved(
+ location, InitializationRequiringAction::Use,
+ (place_base, span), flow_state);
+ // (base initialized; no need to
+ // recur further)
+ break;
+ }
+
+ ProjectionElem::Subslice { .. } => {
+ panic!("we don't allow assignments to subslices, location: {:?}",
+ location);
+ }
+
+ ProjectionElem::Field(..) => {
+ // if type of `P` has a dtor, then
+ // assigning to `P.f` requires `P` itself
+ // be already initialized
+ let tcx = self.infcx.tcx;
+ let base_ty = place_base.ty(self.body(), tcx).ty;
+ match base_ty.kind() {
+ ty::Adt(def, _) if def.has_dtor(tcx) => {
+ self.check_if_path_or_subpath_is_moved(
+ location, InitializationRequiringAction::Assignment,
+ (place_base, span), flow_state);
+
+ // (base initialized; no need to
+ // recur further)
+ break;
+ }
+
+ // Once `let s; s.x = V; read(s.x);`,
+ // is allowed, remove this match arm.
+ ty::Adt(..) | ty::Tuple(..) => {
+ check_parent_of_field(self, location, place_base, span, flow_state);
+
+ // rust-lang/rust#21232, #54499, #54986: during period where we reject
+ // partial initialization, do not complain about unnecessary `mut` on
+ // an attempt to do a partial initialization.
+ self.used_mut.insert(place.local);
+ }
+
+ _ => {}
+ }
+ }
+ }
+ }
+
+ fn check_parent_of_field<'cx, 'tcx>(
+ this: &mut MirBorrowckCtxt<'cx, 'tcx>,
+ location: Location,
+ base: PlaceRef<'tcx>,
+ span: Span,
+ flow_state: &Flows<'cx, 'tcx>,
+ ) {
+ // rust-lang/rust#21232: Until Rust allows reads from the
+ // initialized parts of partially initialized structs, we
+ // will, starting with the 2018 edition, reject attempts
+ // to write to structs that are not fully initialized.
+ //
+ // In other words, *until* we allow this:
+ //
+ // 1. `let mut s; s.x = Val; read(s.x);`
+ //
+ // we will for now disallow this:
+ //
+ // 2. `let mut s; s.x = Val;`
+ //
+ // and also this:
+ //
+ // 3. `let mut s = ...; drop(s); s.x=Val;`
+ //
+ // This does not use check_if_path_or_subpath_is_moved,
+ // because we want to *allow* reinitializations of fields:
+ // e.g., want to allow
+ //
+ // `let mut s = ...; drop(s.x); s.x=Val;`
+ //
+ // This does not use check_if_full_path_is_moved on
+ // `base`, because that would report an error about the
+ // `base` as a whole, but in this scenario we *really*
+ // want to report an error about the actual thing that was
+ // moved, which may be some prefix of `base`.
+
+ // Shallow so that we'll stop at any dereference; we'll
+ // report errors about issues with such bases elsewhere.
+ let maybe_uninits = &flow_state.uninits;
+
+ // Find the shortest uninitialized prefix you can reach
+ // without going over a Deref.
+ let mut shortest_uninit_seen = None;
+ for prefix in this.prefixes(base, PrefixSet::Shallow) {
+ let Some(mpi) = this.move_path_for_place(prefix) else { continue };
+
+ if maybe_uninits.contains(mpi) {
+ debug!(
+ "check_parent_of_field updating shortest_uninit_seen from {:?} to {:?}",
+ shortest_uninit_seen,
+ Some((prefix, mpi))
+ );
+ shortest_uninit_seen = Some((prefix, mpi));
+ } else {
+ debug!("check_parent_of_field {:?} is definitely initialized", (prefix, mpi));
+ }
+ }
+
+ if let Some((prefix, mpi)) = shortest_uninit_seen {
+ // Check for a reassignment into an uninitialized field of a union (for example,
+ // after a move out). In this case, do not report an error here. There is an
+ // exception, if this is the first assignment into the union (that is, there is
+ // no move out from an earlier location) then this is an attempt at initialization
+ // of the union - we should error in that case.
+ let tcx = this.infcx.tcx;
+ if base.ty(this.body(), tcx).ty.is_union() {
+ if this.move_data.path_map[mpi].iter().any(|moi| {
+ this.move_data.moves[*moi].source.is_predecessor_of(location, this.body)
+ }) {
+ return;
+ }
+ }
+
+ this.report_use_of_moved_or_uninitialized(
+ location,
+ InitializationRequiringAction::PartialAssignment,
+ (prefix, base, span),
+ mpi,
+ );
+ }
+ }
+ }
+
+ /// Checks the permissions for the given place and read or write kind
+ ///
+ /// Returns `true` if an error is reported.
+ fn check_access_permissions(
+ &mut self,
+ (place, span): (Place<'tcx>, Span),
+ kind: ReadOrWrite,
+ is_local_mutation_allowed: LocalMutationIsAllowed,
+ flow_state: &Flows<'cx, 'tcx>,
+ location: Location,
+ ) -> bool {
+ debug!(
+ "check_access_permissions({:?}, {:?}, is_local_mutation_allowed: {:?})",
+ place, kind, is_local_mutation_allowed
+ );
+
+ let error_access;
+ let the_place_err;
+
+ match kind {
+ Reservation(WriteKind::MutableBorrow(
+ borrow_kind @ (BorrowKind::Unique | BorrowKind::Mut { .. }),
+ ))
+ | Write(WriteKind::MutableBorrow(
+ borrow_kind @ (BorrowKind::Unique | BorrowKind::Mut { .. }),
+ )) => {
+ let is_local_mutation_allowed = match borrow_kind {
+ BorrowKind::Unique => LocalMutationIsAllowed::Yes,
+ BorrowKind::Mut { .. } => is_local_mutation_allowed,
+ BorrowKind::Shared | BorrowKind::Shallow => unreachable!(),
+ };
+ match self.is_mutable(place.as_ref(), is_local_mutation_allowed) {
+ Ok(root_place) => {
+ self.add_used_mut(root_place, flow_state);
+ return false;
+ }
+ Err(place_err) => {
+ error_access = AccessKind::MutableBorrow;
+ the_place_err = place_err;
+ }
+ }
+ }
+ Reservation(WriteKind::Mutate) | Write(WriteKind::Mutate) => {
+ match self.is_mutable(place.as_ref(), is_local_mutation_allowed) {
+ Ok(root_place) => {
+ self.add_used_mut(root_place, flow_state);
+ return false;
+ }
+ Err(place_err) => {
+ error_access = AccessKind::Mutate;
+ the_place_err = place_err;
+ }
+ }
+ }
+
+ Reservation(
+ WriteKind::Move
+ | WriteKind::StorageDeadOrDrop
+ | WriteKind::MutableBorrow(BorrowKind::Shared)
+ | WriteKind::MutableBorrow(BorrowKind::Shallow),
+ )
+ | Write(
+ WriteKind::Move
+ | WriteKind::StorageDeadOrDrop
+ | WriteKind::MutableBorrow(BorrowKind::Shared)
+ | WriteKind::MutableBorrow(BorrowKind::Shallow),
+ ) => {
+ if self.is_mutable(place.as_ref(), is_local_mutation_allowed).is_err()
+ && !self.has_buffered_errors()
+ {
+ // rust-lang/rust#46908: In pure NLL mode this code path should be
+ // unreachable, but we use `delay_span_bug` because we can hit this when
+ // dereferencing a non-Copy raw pointer *and* have `-Ztreat-err-as-bug`
+ // enabled. We don't want to ICE for that case, as other errors will have
+ // been emitted (#52262).
+ self.infcx.tcx.sess.delay_span_bug(
+ span,
+ &format!(
+ "Accessing `{:?}` with the kind `{:?}` shouldn't be possible",
+ place, kind,
+ ),
+ );
+ }
+ return false;
+ }
+ Activation(..) => {
+ // permission checks are done at Reservation point.
+ return false;
+ }
+ Read(
+ ReadKind::Borrow(
+ BorrowKind::Unique
+ | BorrowKind::Mut { .. }
+ | BorrowKind::Shared
+ | BorrowKind::Shallow,
+ )
+ | ReadKind::Copy,
+ ) => {
+ // Access authorized
+ return false;
+ }
+ }
+
+ // rust-lang/rust#21232, #54986: during period where we reject
+ // partial initialization, do not complain about mutability
+ // errors except for actual mutation (as opposed to an attempt
+ // to do a partial initialization).
+ let previously_initialized =
+ self.is_local_ever_initialized(place.local, flow_state).is_some();
+
+ // at this point, we have set up the error reporting state.
+ if previously_initialized {
+ self.report_mutability_error(place, span, the_place_err, error_access, location);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn is_local_ever_initialized(
+ &self,
+ local: Local,
+ flow_state: &Flows<'cx, 'tcx>,
+ ) -> Option<InitIndex> {
+ let mpi = self.move_data.rev_lookup.find_local(local);
+ let ii = &self.move_data.init_path_map[mpi];
+ for &index in ii {
+ if flow_state.ever_inits.contains(index) {
+ return Some(index);
+ }
+ }
+ None
+ }
+
+ /// Adds the place into the used mutable variables set
+ fn add_used_mut(&mut self, root_place: RootPlace<'tcx>, flow_state: &Flows<'cx, 'tcx>) {
+ match root_place {
+ RootPlace { place_local: local, place_projection: [], is_local_mutation_allowed } => {
+ // If the local may have been initialized, and it is now currently being
+ // mutated, then it is justified to be annotated with the `mut`
+ // keyword, since the mutation may be a possible reassignment.
+ if is_local_mutation_allowed != LocalMutationIsAllowed::Yes
+ && self.is_local_ever_initialized(local, flow_state).is_some()
+ {
+ self.used_mut.insert(local);
+ }
+ }
+ RootPlace {
+ place_local: _,
+ place_projection: _,
+ is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
+ } => {}
+ RootPlace {
+ place_local,
+ place_projection: place_projection @ [.., _],
+ is_local_mutation_allowed: _,
+ } => {
+ if let Some(field) = self.is_upvar_field_projection(PlaceRef {
+ local: place_local,
+ projection: place_projection,
+ }) {
+ self.used_mut_upvars.push(field);
+ }
+ }
+ }
+ }
+
+ /// Whether this value can be written or borrowed mutably.
+ /// Returns the root place if the place passed in is a projection.
+ fn is_mutable(
+ &self,
+ place: PlaceRef<'tcx>,
+ is_local_mutation_allowed: LocalMutationIsAllowed,
+ ) -> Result<RootPlace<'tcx>, PlaceRef<'tcx>> {
+ debug!("is_mutable: place={:?}, is_local...={:?}", place, is_local_mutation_allowed);
+ match place.last_projection() {
+ None => {
+ let local = &self.body.local_decls[place.local];
+ match local.mutability {
+ Mutability::Not => match is_local_mutation_allowed {
+ LocalMutationIsAllowed::Yes => Ok(RootPlace {
+ place_local: place.local,
+ place_projection: place.projection,
+ is_local_mutation_allowed: LocalMutationIsAllowed::Yes,
+ }),
+ LocalMutationIsAllowed::ExceptUpvars => Ok(RootPlace {
+ place_local: place.local,
+ place_projection: place.projection,
+ is_local_mutation_allowed: LocalMutationIsAllowed::ExceptUpvars,
+ }),
+ LocalMutationIsAllowed::No => Err(place),
+ },
+ Mutability::Mut => Ok(RootPlace {
+ place_local: place.local,
+ place_projection: place.projection,
+ is_local_mutation_allowed,
+ }),
+ }
+ }
+ Some((place_base, elem)) => {
+ match elem {
+ ProjectionElem::Deref => {
+ let base_ty = place_base.ty(self.body(), self.infcx.tcx).ty;
+
+ // Check the kind of deref to decide
+ match base_ty.kind() {
+ ty::Ref(_, _, mutbl) => {
+ match mutbl {
+ // Shared borrowed data is never mutable
+ hir::Mutability::Not => Err(place),
+ // Mutably borrowed data is mutable, but only if we have a
+ // unique path to the `&mut`
+ hir::Mutability::Mut => {
+ let mode = match self.is_upvar_field_projection(place) {
+ Some(field) if self.upvars[field.index()].by_ref => {
+ is_local_mutation_allowed
+ }
+ _ => LocalMutationIsAllowed::Yes,
+ };
+
+ self.is_mutable(place_base, mode)
+ }
+ }
+ }
+ ty::RawPtr(tnm) => {
+ match tnm.mutbl {
+ // `*const` raw pointers are not mutable
+ hir::Mutability::Not => Err(place),
+ // `*mut` raw pointers are always mutable, regardless of
+ // context. The users have to check by themselves.
+ hir::Mutability::Mut => Ok(RootPlace {
+ place_local: place.local,
+ place_projection: place.projection,
+ is_local_mutation_allowed,
+ }),
+ }
+ }
+ // `Box<T>` owns its content, so mutable if its location is mutable
+ _ if base_ty.is_box() => {
+ self.is_mutable(place_base, is_local_mutation_allowed)
+ }
+ // Deref should only be for reference, pointers or boxes
+ _ => bug!("Deref of unexpected type: {:?}", base_ty),
+ }
+ }
+ // All other projections are owned by their base path, so mutable if
+ // base path is mutable
+ ProjectionElem::Field(..)
+ | ProjectionElem::Index(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Downcast(..) => {
+ let upvar_field_projection = self.is_upvar_field_projection(place);
+ if let Some(field) = upvar_field_projection {
+ let upvar = &self.upvars[field.index()];
+ debug!(
+ "is_mutable: upvar.mutability={:?} local_mutation_is_allowed={:?} \
+ place={:?}, place_base={:?}",
+ upvar, is_local_mutation_allowed, place, place_base
+ );
+ match (upvar.place.mutability, is_local_mutation_allowed) {
+ (
+ Mutability::Not,
+ LocalMutationIsAllowed::No
+ | LocalMutationIsAllowed::ExceptUpvars,
+ ) => Err(place),
+ (Mutability::Not, LocalMutationIsAllowed::Yes)
+ | (Mutability::Mut, _) => {
+ // Subtle: this is an upvar
+ // reference, so it looks like
+ // `self.foo` -- we want to double
+ // check that the location `*self`
+ // is mutable (i.e., this is not a
+ // `Fn` closure). But if that
+ // check succeeds, we want to
+ // *blame* the mutability on
+ // `place` (that is,
+ // `self.foo`). This is used to
+ // propagate the info about
+ // whether mutability declarations
+ // are used outwards, so that we register
+ // the outer variable as mutable. Otherwise a
+ // test like this fails to record the `mut`
+ // as needed:
+ //
+ // ```
+ // fn foo<F: FnOnce()>(_f: F) { }
+ // fn main() {
+ // let var = Vec::new();
+ // foo(move || {
+ // var.push(1);
+ // });
+ // }
+ // ```
+ let _ =
+ self.is_mutable(place_base, is_local_mutation_allowed)?;
+ Ok(RootPlace {
+ place_local: place.local,
+ place_projection: place.projection,
+ is_local_mutation_allowed,
+ })
+ }
+ }
+ } else {
+ self.is_mutable(place_base, is_local_mutation_allowed)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// If `place` is a field projection, and the field is being projected from a closure type,
+ /// then returns the index of the field being projected. Note that this closure will always
+ /// be `self` in the current MIR, because that is the only time we directly access the fields
+ /// of a closure type.
+ fn is_upvar_field_projection(&self, place_ref: PlaceRef<'tcx>) -> Option<Field> {
+ path_utils::is_upvar_field_projection(self.infcx.tcx, &self.upvars, place_ref, self.body())
+ }
+}
+
+mod error {
+ use rustc_errors::ErrorGuaranteed;
+
+ use super::*;
+
+ pub struct BorrowckErrors<'tcx> {
+ /// This field keeps track of move errors that are to be reported for given move indices.
+ ///
+ /// There are situations where many errors can be reported for a single move out (see #53807)
+ /// and we want only the best of those errors.
+ ///
+ /// The `report_use_of_moved_or_uninitialized` function checks this map and replaces the
+ /// diagnostic (if there is one) if the `Place` of the error being reported is a prefix of the
+ /// `Place` of the previous most diagnostic. This happens instead of buffering the error. Once
+ /// all move errors have been reported, any diagnostics in this map are added to the buffer
+ /// to be emitted.
+ ///
+ /// `BTreeMap` is used to preserve the order of insertions when iterating. This is necessary
+ /// when errors in the map are being re-added to the error buffer so that errors with the
+ /// same primary span come out in a consistent order.
+ buffered_move_errors:
+ BTreeMap<Vec<MoveOutIndex>, (PlaceRef<'tcx>, DiagnosticBuilder<'tcx, ErrorGuaranteed>)>,
+ /// Diagnostics to be reported buffer.
+ buffered: Vec<Diagnostic>,
+ /// Set to Some if we emit an error during borrowck
+ tainted_by_errors: Option<ErrorGuaranteed>,
+ }
+
+ impl BorrowckErrors<'_> {
+ pub fn new() -> Self {
+ BorrowckErrors {
+ buffered_move_errors: BTreeMap::new(),
+ buffered: Default::default(),
+ tainted_by_errors: None,
+ }
+ }
+
+ // FIXME(eddyb) this is a suboptimal API because `tainted_by_errors` is
+ // set before any emission actually happens (weakening the guarantee).
+ pub fn buffer_error(&mut self, t: DiagnosticBuilder<'_, ErrorGuaranteed>) {
+ self.tainted_by_errors = Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
+ t.buffer(&mut self.buffered);
+ }
+
+ pub fn buffer_non_error_diag(&mut self, t: DiagnosticBuilder<'_, ()>) {
+ t.buffer(&mut self.buffered);
+ }
+
+ pub fn set_tainted_by_errors(&mut self) {
+ self.tainted_by_errors = Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
+ }
+ }
+
+ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+ pub fn buffer_error(&mut self, t: DiagnosticBuilder<'_, ErrorGuaranteed>) {
+ self.errors.buffer_error(t);
+ }
+
+ pub fn buffer_non_error_diag(&mut self, t: DiagnosticBuilder<'_, ()>) {
+ self.errors.buffer_non_error_diag(t);
+ }
+
+ pub fn buffer_move_error(
+ &mut self,
+ move_out_indices: Vec<MoveOutIndex>,
+ place_and_err: (PlaceRef<'tcx>, DiagnosticBuilder<'tcx, ErrorGuaranteed>),
+ ) -> bool {
+ if let Some((_, diag)) =
+ self.errors.buffered_move_errors.insert(move_out_indices, place_and_err)
+ {
+ // Cancel the old diagnostic so we don't ICE
+ diag.cancel();
+ false
+ } else {
+ true
+ }
+ }
+
+ pub fn emit_errors(&mut self) -> Option<ErrorGuaranteed> {
+ // Buffer any move errors that we collected and de-duplicated.
+ for (_, (_, diag)) in std::mem::take(&mut self.errors.buffered_move_errors) {
+ // We have already set tainted for this error, so just buffer it.
+ diag.buffer(&mut self.errors.buffered);
+ }
+
+ if !self.errors.buffered.is_empty() {
+ self.errors.buffered.sort_by_key(|diag| diag.sort_span);
+
+ for mut diag in self.errors.buffered.drain(..) {
+ self.infcx.tcx.sess.diagnostic().emit_diagnostic(&mut diag);
+ }
+ }
+
+ self.errors.tainted_by_errors
+ }
+
+ pub fn has_buffered_errors(&self) -> bool {
+ self.errors.buffered.is_empty()
+ }
+
+ pub fn has_move_error(
+ &self,
+ move_out_indices: &[MoveOutIndex],
+ ) -> Option<&(PlaceRef<'tcx>, DiagnosticBuilder<'cx, ErrorGuaranteed>)> {
+ self.errors.buffered_move_errors.get(move_out_indices)
+ }
+ }
+}
+
+/// The degree of overlap between 2 places for borrow-checking.
+enum Overlap {
+ /// The places might partially overlap - in this case, we give
+ /// up and say that they might conflict. This occurs when
+ /// different fields of a union are borrowed. For example,
+ /// if `u` is a union, we have no way of telling how disjoint
+ /// `u.a.x` and `a.b.y` are.
+ Arbitrary,
+ /// The places have the same type, and are either completely disjoint
+ /// or equal - i.e., they can't "partially" overlap as can occur with
+ /// unions. This is the "base case" on which we recur for extensions
+ /// of the place.
+ EqualOrDisjoint,
+ /// The places are disjoint, so we know all extensions of them
+ /// will also be disjoint.
+ Disjoint,
+}
diff --git a/compiler/rustc_borrowck/src/location.rs b/compiler/rustc_borrowck/src/location.rs
new file mode 100644
index 000000000..70a311694
--- /dev/null
+++ b/compiler/rustc_borrowck/src/location.rs
@@ -0,0 +1,107 @@
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::{BasicBlock, Body, Location};
+
+/// Maps between a MIR Location, which identifies a particular
+/// statement within a basic block, to a "rich location", which
+/// identifies at a finer granularity. In particular, we distinguish
+/// the *start* of a statement and the *mid-point*. The mid-point is
+/// the point *just* before the statement takes effect; in particular,
+/// for an assignment `A = B`, it is the point where B is about to be
+/// written into A. This mid-point is a kind of hack to work around
+/// our inability to track the position information at sufficient
+/// granularity through outlives relations; however, the rich location
+/// table serves another purpose: it compresses locations from
+/// multiple words into a single u32.
+pub struct LocationTable {
+ num_points: usize,
+ statements_before_block: IndexVec<BasicBlock, usize>,
+}
+
+rustc_index::newtype_index! {
+ pub struct LocationIndex {
+ DEBUG_FORMAT = "LocationIndex({})"
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum RichLocation {
+ Start(Location),
+ Mid(Location),
+}
+
+impl LocationTable {
+ pub(crate) fn new(body: &Body<'_>) -> Self {
+ let mut num_points = 0;
+ let statements_before_block = body
+ .basic_blocks()
+ .iter()
+ .map(|block_data| {
+ let v = num_points;
+ num_points += (block_data.statements.len() + 1) * 2;
+ v
+ })
+ .collect();
+
+ debug!("LocationTable(statements_before_block={:#?})", statements_before_block);
+ debug!("LocationTable: num_points={:#?}", num_points);
+
+ Self { num_points, statements_before_block }
+ }
+
+ pub fn all_points(&self) -> impl Iterator<Item = LocationIndex> {
+ (0..self.num_points).map(LocationIndex::new)
+ }
+
+ pub fn start_index(&self, location: Location) -> LocationIndex {
+ let Location { block, statement_index } = location;
+ let start_index = self.statements_before_block[block];
+ LocationIndex::new(start_index + statement_index * 2)
+ }
+
+ pub fn mid_index(&self, location: Location) -> LocationIndex {
+ let Location { block, statement_index } = location;
+ let start_index = self.statements_before_block[block];
+ LocationIndex::new(start_index + statement_index * 2 + 1)
+ }
+
+ pub fn to_location(&self, index: LocationIndex) -> RichLocation {
+ let point_index = index.index();
+
+ // Find the basic block. We have a vector with the
+ // starting index of the statement in each block. Imagine
+ // we have statement #22, and we have a vector like:
+ //
+ // [0, 10, 20]
+ //
+ // In that case, this represents point_index 2 of
+ // basic block BB2. We know this because BB0 accounts for
+ // 0..10, BB1 accounts for 11..20, and BB2 accounts for
+ // 20...
+ //
+ // To compute this, we could do a binary search, but
+ // because I am lazy we instead iterate through to find
+ // the last point where the "first index" (0, 10, or 20)
+ // was less than the statement index (22). In our case, this will
+ // be (BB2, 20).
+ let (block, &first_index) = self
+ .statements_before_block
+ .iter_enumerated()
+ .filter(|(_, first_index)| **first_index <= point_index)
+ .last()
+ .unwrap();
+
+ let statement_index = (point_index - first_index) / 2;
+ if index.is_start() {
+ RichLocation::Start(Location { block, statement_index })
+ } else {
+ RichLocation::Mid(Location { block, statement_index })
+ }
+ }
+}
+
+impl LocationIndex {
+ fn is_start(self) -> bool {
+ // even indices are start points; odd indices are mid points
+ (self.index() % 2) == 0
+ }
+}
diff --git a/compiler/rustc_borrowck/src/member_constraints.rs b/compiler/rustc_borrowck/src/member_constraints.rs
new file mode 100644
index 000000000..43253a2aa
--- /dev/null
+++ b/compiler/rustc_borrowck/src/member_constraints.rs
@@ -0,0 +1,230 @@
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::vec::IndexVec;
+use rustc_middle::infer::MemberConstraint;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::Span;
+use std::hash::Hash;
+use std::ops::Index;
+
+/// Compactly stores a set of `R0 member of [R1...Rn]` constraints,
+/// indexed by the region `R0`.
+pub(crate) struct MemberConstraintSet<'tcx, R>
+where
+ R: Copy + Eq,
+{
+ /// Stores the first "member" constraint for a given `R0`. This is an
+ /// index into the `constraints` vector below.
+ first_constraints: FxHashMap<R, NllMemberConstraintIndex>,
+
+ /// Stores the data about each `R0 member of [R1..Rn]` constraint.
+ /// These are organized into a linked list, so each constraint
+ /// contains the index of the next constraint with the same `R0`.
+ constraints: IndexVec<NllMemberConstraintIndex, NllMemberConstraint<'tcx>>,
+
+ /// Stores the `R1..Rn` regions for *all* sets. For any given
+ /// constraint, we keep two indices so that we can pull out a
+ /// slice.
+ choice_regions: Vec<ty::RegionVid>,
+}
+
+/// Represents a `R0 member of [R1..Rn]` constraint
+pub(crate) struct NllMemberConstraint<'tcx> {
+ next_constraint: Option<NllMemberConstraintIndex>,
+
+ /// The span where the hidden type was instantiated.
+ pub(crate) definition_span: Span,
+
+ /// The hidden type in which `R0` appears. (Used in error reporting.)
+ pub(crate) hidden_ty: Ty<'tcx>,
+
+ pub(crate) key: ty::OpaqueTypeKey<'tcx>,
+
+ /// The region `R0`.
+ pub(crate) member_region_vid: ty::RegionVid,
+
+ /// Index of `R1` in `choice_regions` vector from `MemberConstraintSet`.
+ start_index: usize,
+
+ /// Index of `Rn` in `choice_regions` vector from `MemberConstraintSet`.
+ end_index: usize,
+}
+
+rustc_index::newtype_index! {
+ pub(crate) struct NllMemberConstraintIndex {
+ DEBUG_FORMAT = "MemberConstraintIndex({})"
+ }
+}
+
+impl Default for MemberConstraintSet<'_, ty::RegionVid> {
+ fn default() -> Self {
+ Self {
+ first_constraints: Default::default(),
+ constraints: Default::default(),
+ choice_regions: Default::default(),
+ }
+ }
+}
+
+impl<'tcx> MemberConstraintSet<'tcx, ty::RegionVid> {
+ /// Pushes a member constraint into the set.
+ ///
+ /// The input member constraint `m_c` is in the form produced by
+ /// the `rustc_middle::infer` code.
+ ///
+ /// The `to_region_vid` callback fn is used to convert the regions
+ /// within into `RegionVid` format -- it typically consults the
+ /// `UniversalRegions` data structure that is known to the caller
+ /// (but which this code is unaware of).
+ pub(crate) fn push_constraint(
+ &mut self,
+ m_c: &MemberConstraint<'tcx>,
+ mut to_region_vid: impl FnMut(ty::Region<'tcx>) -> ty::RegionVid,
+ ) {
+ debug!("push_constraint(m_c={:?})", m_c);
+ let member_region_vid: ty::RegionVid = to_region_vid(m_c.member_region);
+ let next_constraint = self.first_constraints.get(&member_region_vid).cloned();
+ let start_index = self.choice_regions.len();
+ let end_index = start_index + m_c.choice_regions.len();
+ debug!("push_constraint: member_region_vid={:?}", member_region_vid);
+ let constraint_index = self.constraints.push(NllMemberConstraint {
+ next_constraint,
+ member_region_vid,
+ definition_span: m_c.definition_span,
+ hidden_ty: m_c.hidden_ty,
+ key: m_c.key,
+ start_index,
+ end_index,
+ });
+ self.first_constraints.insert(member_region_vid, constraint_index);
+ self.choice_regions.extend(m_c.choice_regions.iter().map(|&r| to_region_vid(r)));
+ }
+}
+
+impl<'tcx, R1> MemberConstraintSet<'tcx, R1>
+where
+ R1: Copy + Hash + Eq,
+{
+ /// Remap the "member region" key using `map_fn`, producing a new
+ /// member constraint set. This is used in the NLL code to map from
+ /// the original `RegionVid` to an scc index. In some cases, we
+ /// may have multiple `R1` values mapping to the same `R2` key -- that
+ /// is ok, the two sets will be merged.
+ pub(crate) fn into_mapped<R2>(
+ self,
+ mut map_fn: impl FnMut(R1) -> R2,
+ ) -> MemberConstraintSet<'tcx, R2>
+ where
+ R2: Copy + Hash + Eq,
+ {
+ // We can re-use most of the original data, just tweaking the
+ // linked list links a bit.
+ //
+ // For example if we had two keys `Ra` and `Rb` that both now
+ // wind up mapped to the same key `S`, we would append the
+ // linked list for `Ra` onto the end of the linked list for
+ // `Rb` (or vice versa) -- this basically just requires
+ // rewriting the final link from one list to point at the other
+ // other (see `append_list`).
+
+ let MemberConstraintSet { first_constraints, mut constraints, choice_regions } = self;
+
+ let mut first_constraints2 = FxHashMap::default();
+ first_constraints2.reserve(first_constraints.len());
+
+ for (r1, start1) in first_constraints {
+ let r2 = map_fn(r1);
+ if let Some(&start2) = first_constraints2.get(&r2) {
+ append_list(&mut constraints, start1, start2);
+ }
+ first_constraints2.insert(r2, start1);
+ }
+
+ MemberConstraintSet { first_constraints: first_constraints2, constraints, choice_regions }
+ }
+}
+
+impl<'tcx, R> MemberConstraintSet<'tcx, R>
+where
+ R: Copy + Hash + Eq,
+{
+ pub(crate) fn all_indices(
+ &self,
+ ) -> impl Iterator<Item = NllMemberConstraintIndex> + Captures<'tcx> + '_ {
+ self.constraints.indices()
+ }
+
+ /// Iterate down the constraint indices associated with a given
+ /// peek-region. You can then use `choice_regions` and other
+ /// methods to access data.
+ pub(crate) fn indices(
+ &self,
+ member_region_vid: R,
+ ) -> impl Iterator<Item = NllMemberConstraintIndex> + Captures<'tcx> + '_ {
+ let mut next = self.first_constraints.get(&member_region_vid).cloned();
+ std::iter::from_fn(move || -> Option<NllMemberConstraintIndex> {
+ if let Some(current) = next {
+ next = self.constraints[current].next_constraint;
+ Some(current)
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Returns the "choice regions" for a given member
+ /// constraint. This is the `R1..Rn` from a constraint like:
+ ///
+ /// ```text
+ /// R0 member of [R1..Rn]
+ /// ```
+ pub(crate) fn choice_regions(&self, pci: NllMemberConstraintIndex) -> &[ty::RegionVid] {
+ let NllMemberConstraint { start_index, end_index, .. } = &self.constraints[pci];
+ &self.choice_regions[*start_index..*end_index]
+ }
+}
+
+impl<'tcx, R> Index<NllMemberConstraintIndex> for MemberConstraintSet<'tcx, R>
+where
+ R: Copy + Eq,
+{
+ type Output = NllMemberConstraint<'tcx>;
+
+ fn index(&self, i: NllMemberConstraintIndex) -> &NllMemberConstraint<'tcx> {
+ &self.constraints[i]
+ }
+}
+
+/// Given a linked list starting at `source_list` and another linked
+/// list starting at `target_list`, modify `target_list` so that it is
+/// followed by `source_list`.
+///
+/// Before:
+///
+/// ```text
+/// target_list: A -> B -> C -> (None)
+/// source_list: D -> E -> F -> (None)
+/// ```
+///
+/// After:
+///
+/// ```text
+/// target_list: A -> B -> C -> D -> E -> F -> (None)
+/// ```
+fn append_list(
+ constraints: &mut IndexVec<NllMemberConstraintIndex, NllMemberConstraint<'_>>,
+ target_list: NllMemberConstraintIndex,
+ source_list: NllMemberConstraintIndex,
+) {
+ let mut p = target_list;
+ loop {
+ let mut r = &mut constraints[p];
+ match r.next_constraint {
+ Some(q) => p = q,
+ None => {
+ r.next_constraint = Some(source_list);
+ return;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs
new file mode 100644
index 000000000..0961203d7
--- /dev/null
+++ b/compiler/rustc_borrowck/src/nll.rs
@@ -0,0 +1,462 @@
+//! The entry point of the NLL borrow checker.
+
+use rustc_data_structures::vec_map::VecMap;
+use rustc_hir::def_id::LocalDefId;
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::mir::{create_dump_file, dump_enabled, dump_mir, PassWhere};
+use rustc_middle::mir::{
+ BasicBlock, Body, ClosureOutlivesSubject, ClosureRegionRequirements, LocalKind, Location,
+ Promoted,
+};
+use rustc_middle::ty::{self, OpaqueHiddenType, Region, RegionVid};
+use rustc_span::symbol::sym;
+use std::env;
+use std::fmt::Debug;
+use std::io;
+use std::path::PathBuf;
+use std::rc::Rc;
+use std::str::FromStr;
+
+use polonius_engine::{Algorithm, Output};
+
+use rustc_mir_dataflow::impls::MaybeInitializedPlaces;
+use rustc_mir_dataflow::move_paths::{InitKind, InitLocation, MoveData};
+use rustc_mir_dataflow::ResultsCursor;
+
+use crate::{
+ borrow_set::BorrowSet,
+ constraint_generation,
+ diagnostics::RegionErrors,
+ facts::{AllFacts, AllFactsExt, RustcFacts},
+ invalidation,
+ location::LocationTable,
+ region_infer::{values::RegionValueElements, RegionInferenceContext},
+ renumber,
+ type_check::{self, MirTypeckRegionConstraints, MirTypeckResults},
+ universal_regions::UniversalRegions,
+ Upvar,
+};
+
+pub type PoloniusOutput = Output<RustcFacts>;
+
+/// The output of `nll::compute_regions`. This includes the computed `RegionInferenceContext`, any
+/// closure requirements to propagate, and any generated errors.
+pub(crate) struct NllOutput<'tcx> {
+ pub regioncx: RegionInferenceContext<'tcx>,
+ pub opaque_type_values: VecMap<LocalDefId, OpaqueHiddenType<'tcx>>,
+ pub polonius_input: Option<Box<AllFacts>>,
+ pub polonius_output: Option<Rc<PoloniusOutput>>,
+ pub opt_closure_req: Option<ClosureRegionRequirements<'tcx>>,
+ pub nll_errors: RegionErrors<'tcx>,
+}
+
+/// Rewrites the regions in the MIR to use NLL variables, also scraping out the set of universal
+/// regions (e.g., region parameters) declared on the function. That set will need to be given to
+/// `compute_regions`.
+#[instrument(skip(infcx, param_env, body, promoted), level = "debug")]
+pub(crate) fn replace_regions_in_mir<'cx, 'tcx>(
+ infcx: &InferCtxt<'cx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &mut Body<'tcx>,
+ promoted: &mut IndexVec<Promoted, Body<'tcx>>,
+) -> UniversalRegions<'tcx> {
+ let def = body.source.with_opt_param().as_local().unwrap();
+
+ debug!(?def);
+
+ // Compute named region information. This also renumbers the inputs/outputs.
+ let universal_regions = UniversalRegions::new(infcx, def, param_env);
+
+ // Replace all remaining regions with fresh inference variables.
+ renumber::renumber_mir(infcx, body, promoted);
+
+ dump_mir(infcx.tcx, None, "renumber", &0, body, |_, _| Ok(()));
+
+ universal_regions
+}
+
+// This function populates an AllFacts instance with base facts related to
+// MovePaths and needed for the move analysis.
+fn populate_polonius_move_facts(
+ all_facts: &mut AllFacts,
+ move_data: &MoveData<'_>,
+ location_table: &LocationTable,
+ body: &Body<'_>,
+) {
+ all_facts
+ .path_is_var
+ .extend(move_data.rev_lookup.iter_locals_enumerated().map(|(l, r)| (r, l)));
+
+ for (child, move_path) in move_data.move_paths.iter_enumerated() {
+ if let Some(parent) = move_path.parent {
+ all_facts.child_path.push((child, parent));
+ }
+ }
+
+ let fn_entry_start = location_table
+ .start_index(Location { block: BasicBlock::from_u32(0u32), statement_index: 0 });
+
+ // initialized_at
+ for init in move_data.inits.iter() {
+ match init.location {
+ InitLocation::Statement(location) => {
+ let block_data = &body[location.block];
+ let is_terminator = location.statement_index == block_data.statements.len();
+
+ if is_terminator && init.kind == InitKind::NonPanicPathOnly {
+ // We are at the terminator of an init that has a panic path,
+ // and where the init should not happen on panic
+
+ for successor in block_data.terminator().successors() {
+ if body[successor].is_cleanup {
+ continue;
+ }
+
+ // The initialization happened in (or rather, when arriving at)
+ // the successors, but not in the unwind block.
+ let first_statement = Location { block: successor, statement_index: 0 };
+ all_facts
+ .path_assigned_at_base
+ .push((init.path, location_table.start_index(first_statement)));
+ }
+ } else {
+ // In all other cases, the initialization just happens at the
+ // midpoint, like any other effect.
+ all_facts
+ .path_assigned_at_base
+ .push((init.path, location_table.mid_index(location)));
+ }
+ }
+ // Arguments are initialized on function entry
+ InitLocation::Argument(local) => {
+ assert!(body.local_kind(local) == LocalKind::Arg);
+ all_facts.path_assigned_at_base.push((init.path, fn_entry_start));
+ }
+ }
+ }
+
+ for (local, path) in move_data.rev_lookup.iter_locals_enumerated() {
+ if body.local_kind(local) != LocalKind::Arg {
+ // Non-arguments start out deinitialised; we simulate this with an
+ // initial move:
+ all_facts.path_moved_at_base.push((path, fn_entry_start));
+ }
+ }
+
+ // moved_out_at
+ // deinitialisation is assumed to always happen!
+ all_facts
+ .path_moved_at_base
+ .extend(move_data.moves.iter().map(|mo| (mo.path, location_table.mid_index(mo.source))));
+}
+
+/// Computes the (non-lexical) regions from the input MIR.
+///
+/// This may result in errors being reported.
+pub(crate) fn compute_regions<'cx, 'tcx>(
+ infcx: &InferCtxt<'cx, 'tcx>,
+ universal_regions: UniversalRegions<'tcx>,
+ body: &Body<'tcx>,
+ promoted: &IndexVec<Promoted, Body<'tcx>>,
+ location_table: &LocationTable,
+ param_env: ty::ParamEnv<'tcx>,
+ flow_inits: &mut ResultsCursor<'cx, 'tcx, MaybeInitializedPlaces<'cx, 'tcx>>,
+ move_data: &MoveData<'tcx>,
+ borrow_set: &BorrowSet<'tcx>,
+ upvars: &[Upvar<'tcx>],
+ use_polonius: bool,
+) -> NllOutput<'tcx> {
+ let mut all_facts =
+ (use_polonius || AllFacts::enabled(infcx.tcx)).then_some(AllFacts::default());
+
+ let universal_regions = Rc::new(universal_regions);
+
+ let elements = &Rc::new(RegionValueElements::new(&body));
+
+ // Run the MIR type-checker.
+ let MirTypeckResults { constraints, universal_region_relations, opaque_type_values } =
+ type_check::type_check(
+ infcx,
+ param_env,
+ body,
+ promoted,
+ &universal_regions,
+ location_table,
+ borrow_set,
+ &mut all_facts,
+ flow_inits,
+ move_data,
+ elements,
+ upvars,
+ use_polonius,
+ );
+
+ if let Some(all_facts) = &mut all_facts {
+ let _prof_timer = infcx.tcx.prof.generic_activity("polonius_fact_generation");
+ all_facts.universal_region.extend(universal_regions.universal_regions());
+ populate_polonius_move_facts(all_facts, move_data, location_table, &body);
+
+ // Emit universal regions facts, and their relations, for Polonius.
+ //
+ // 1: universal regions are modeled in Polonius as a pair:
+ // - the universal region vid itself.
+ // - a "placeholder loan" associated to this universal region. Since they don't exist in
+ // the `borrow_set`, their `BorrowIndex` are synthesized as the universal region index
+ // added to the existing number of loans, as if they succeeded them in the set.
+ //
+ let borrow_count = borrow_set.len();
+ debug!(
+ "compute_regions: polonius placeholders, num_universals={}, borrow_count={}",
+ universal_regions.len(),
+ borrow_count
+ );
+
+ for universal_region in universal_regions.universal_regions() {
+ let universal_region_idx = universal_region.index();
+ let placeholder_loan_idx = borrow_count + universal_region_idx;
+ all_facts.placeholder.push((universal_region, placeholder_loan_idx.into()));
+ }
+
+ // 2: the universal region relations `outlives` constraints are emitted as
+ // `known_placeholder_subset` facts.
+ for (fr1, fr2) in universal_region_relations.known_outlives() {
+ if fr1 != fr2 {
+ debug!(
+ "compute_regions: emitting polonius `known_placeholder_subset` \
+ fr1={:?}, fr2={:?}",
+ fr1, fr2
+ );
+ all_facts.known_placeholder_subset.push((fr1, fr2));
+ }
+ }
+ }
+
+ // Create the region inference context, taking ownership of the
+ // region inference data that was contained in `infcx`, and the
+ // base constraints generated by the type-check.
+ let var_origins = infcx.take_region_var_origins();
+ let MirTypeckRegionConstraints {
+ placeholder_indices,
+ placeholder_index_to_region: _,
+ mut liveness_constraints,
+ outlives_constraints,
+ member_constraints,
+ closure_bounds_mapping,
+ universe_causes,
+ type_tests,
+ } = constraints;
+ let placeholder_indices = Rc::new(placeholder_indices);
+
+ constraint_generation::generate_constraints(
+ infcx,
+ &mut liveness_constraints,
+ &mut all_facts,
+ location_table,
+ &body,
+ borrow_set,
+ );
+
+ let mut regioncx = RegionInferenceContext::new(
+ var_origins,
+ universal_regions,
+ placeholder_indices,
+ universal_region_relations,
+ outlives_constraints,
+ member_constraints,
+ closure_bounds_mapping,
+ universe_causes,
+ type_tests,
+ liveness_constraints,
+ elements,
+ );
+
+ // Generate various additional constraints.
+ invalidation::generate_invalidates(infcx.tcx, &mut all_facts, location_table, body, borrow_set);
+
+ let def_id = body.source.def_id();
+
+ // Dump facts if requested.
+ let polonius_output = all_facts.as_ref().and_then(|all_facts| {
+ if infcx.tcx.sess.opts.unstable_opts.nll_facts {
+ let def_path = infcx.tcx.def_path(def_id);
+ let dir_path = PathBuf::from(&infcx.tcx.sess.opts.unstable_opts.nll_facts_dir)
+ .join(def_path.to_filename_friendly_no_crate());
+ all_facts.write_to_dir(dir_path, location_table).unwrap();
+ }
+
+ if use_polonius {
+ let algorithm =
+ env::var("POLONIUS_ALGORITHM").unwrap_or_else(|_| String::from("Hybrid"));
+ let algorithm = Algorithm::from_str(&algorithm).unwrap();
+ debug!("compute_regions: using polonius algorithm {:?}", algorithm);
+ let _prof_timer = infcx.tcx.prof.generic_activity("polonius_analysis");
+ Some(Rc::new(Output::compute(&all_facts, algorithm, false)))
+ } else {
+ None
+ }
+ });
+
+ // Solve the region constraints.
+ let (closure_region_requirements, nll_errors) =
+ regioncx.solve(infcx, param_env, &body, polonius_output.clone());
+
+ if !nll_errors.is_empty() {
+ // Suppress unhelpful extra errors in `infer_opaque_types`.
+ infcx.set_tainted_by_errors();
+ }
+
+ let remapped_opaque_tys = regioncx.infer_opaque_types(&infcx, opaque_type_values);
+
+ NllOutput {
+ regioncx,
+ opaque_type_values: remapped_opaque_tys,
+ polonius_input: all_facts.map(Box::new),
+ polonius_output,
+ opt_closure_req: closure_region_requirements,
+ nll_errors,
+ }
+}
+
+pub(super) fn dump_mir_results<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ body: &Body<'tcx>,
+ regioncx: &RegionInferenceContext<'tcx>,
+ closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
+) {
+ if !dump_enabled(infcx.tcx, "nll", body.source.def_id()) {
+ return;
+ }
+
+ dump_mir(infcx.tcx, None, "nll", &0, body, |pass_where, out| {
+ match pass_where {
+ // Before the CFG, dump out the values for each region variable.
+ PassWhere::BeforeCFG => {
+ regioncx.dump_mir(infcx.tcx, out)?;
+ writeln!(out, "|")?;
+
+ if let Some(closure_region_requirements) = closure_region_requirements {
+ writeln!(out, "| Free Region Constraints")?;
+ for_each_region_constraint(closure_region_requirements, &mut |msg| {
+ writeln!(out, "| {}", msg)
+ })?;
+ writeln!(out, "|")?;
+ }
+ }
+
+ PassWhere::BeforeLocation(_) => {}
+
+ PassWhere::AfterTerminator(_) => {}
+
+ PassWhere::BeforeBlock(_) | PassWhere::AfterLocation(_) | PassWhere::AfterCFG => {}
+ }
+ Ok(())
+ });
+
+ // Also dump the inference graph constraints as a graphviz file.
+ let _: io::Result<()> = try {
+ let mut file =
+ create_dump_file(infcx.tcx, "regioncx.all.dot", None, "nll", &0, body.source)?;
+ regioncx.dump_graphviz_raw_constraints(&mut file)?;
+ };
+
+ // Also dump the inference graph constraints as a graphviz file.
+ let _: io::Result<()> = try {
+ let mut file =
+ create_dump_file(infcx.tcx, "regioncx.scc.dot", None, "nll", &0, body.source)?;
+ regioncx.dump_graphviz_scc_constraints(&mut file)?;
+ };
+}
+
+pub(super) fn dump_annotation<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ body: &Body<'tcx>,
+ regioncx: &RegionInferenceContext<'tcx>,
+ closure_region_requirements: &Option<ClosureRegionRequirements<'_>>,
+ opaque_type_values: &VecMap<LocalDefId, OpaqueHiddenType<'tcx>>,
+ errors: &mut crate::error::BorrowckErrors<'tcx>,
+) {
+ let tcx = infcx.tcx;
+ let base_def_id = tcx.typeck_root_def_id(body.source.def_id());
+ if !tcx.has_attr(base_def_id, sym::rustc_regions) {
+ return;
+ }
+
+ // When the enclosing function is tagged with `#[rustc_regions]`,
+ // we dump out various bits of state as warnings. This is useful
+ // for verifying that the compiler is behaving as expected. These
+ // warnings focus on the closure region requirements -- for
+ // viewing the intraprocedural state, the -Zdump-mir output is
+ // better.
+
+ let mut err = if let Some(closure_region_requirements) = closure_region_requirements {
+ let mut err = tcx.sess.diagnostic().span_note_diag(body.span, "external requirements");
+
+ regioncx.annotate(tcx, &mut err);
+
+ err.note(&format!(
+ "number of external vids: {}",
+ closure_region_requirements.num_external_vids
+ ));
+
+ // Dump the region constraints we are imposing *between* those
+ // newly created variables.
+ for_each_region_constraint(closure_region_requirements, &mut |msg| {
+ err.note(msg);
+ Ok(())
+ })
+ .unwrap();
+
+ err
+ } else {
+ let mut err = tcx.sess.diagnostic().span_note_diag(body.span, "no external requirements");
+ regioncx.annotate(tcx, &mut err);
+
+ err
+ };
+
+ if !opaque_type_values.is_empty() {
+ err.note(&format!("Inferred opaque type values:\n{:#?}", opaque_type_values));
+ }
+
+ errors.buffer_non_error_diag(err);
+}
+
+fn for_each_region_constraint(
+ closure_region_requirements: &ClosureRegionRequirements<'_>,
+ with_msg: &mut dyn FnMut(&str) -> io::Result<()>,
+) -> io::Result<()> {
+ for req in &closure_region_requirements.outlives_requirements {
+ let subject: &dyn Debug = match &req.subject {
+ ClosureOutlivesSubject::Region(subject) => subject,
+ ClosureOutlivesSubject::Ty(ty) => ty,
+ };
+ with_msg(&format!("where {:?}: {:?}", subject, req.outlived_free_region,))?;
+ }
+ Ok(())
+}
+
+/// Right now, we piggy back on the `ReVar` to store our NLL inference
+/// regions. These are indexed with `RegionVid`. This method will
+/// assert that the region is a `ReVar` and extract its internal index.
+/// This is reasonable because in our MIR we replace all universal regions
+/// with inference variables.
+pub trait ToRegionVid {
+ fn to_region_vid(self) -> RegionVid;
+}
+
+impl<'tcx> ToRegionVid for Region<'tcx> {
+ fn to_region_vid(self) -> RegionVid {
+ if let ty::ReVar(vid) = *self { vid } else { bug!("region is not an ReVar: {:?}", self) }
+ }
+}
+
+impl ToRegionVid for RegionVid {
+ fn to_region_vid(self) -> RegionVid {
+ self
+ }
+}
+
+pub(crate) trait ConstraintDescription {
+ fn description(&self) -> &'static str;
+}
diff --git a/compiler/rustc_borrowck/src/path_utils.rs b/compiler/rustc_borrowck/src/path_utils.rs
new file mode 100644
index 000000000..b2c8dfc82
--- /dev/null
+++ b/compiler/rustc_borrowck/src/path_utils.rs
@@ -0,0 +1,171 @@
+use crate::borrow_set::{BorrowData, BorrowSet, TwoPhaseActivation};
+use crate::places_conflict;
+use crate::AccessDepth;
+use crate::BorrowIndex;
+use crate::Upvar;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_middle::mir::BorrowKind;
+use rustc_middle::mir::{BasicBlock, Body, Field, Location, Place, PlaceRef, ProjectionElem};
+use rustc_middle::ty::TyCtxt;
+
+/// Returns `true` if the borrow represented by `kind` is
+/// allowed to be split into separate Reservation and
+/// Activation phases.
+pub(super) fn allow_two_phase_borrow(kind: BorrowKind) -> bool {
+ kind.allows_two_phase_borrow()
+}
+
+/// Control for the path borrow checking code
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub(super) enum Control {
+ Continue,
+ Break,
+}
+
+/// Encapsulates the idea of iterating over every borrow that involves a particular path
+pub(super) fn each_borrow_involving_path<'tcx, F, I, S>(
+ s: &mut S,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ _location: Location,
+ access_place: (AccessDepth, Place<'tcx>),
+ borrow_set: &BorrowSet<'tcx>,
+ candidates: I,
+ mut op: F,
+) where
+ F: FnMut(&mut S, BorrowIndex, &BorrowData<'tcx>) -> Control,
+ I: Iterator<Item = BorrowIndex>,
+{
+ let (access, place) = access_place;
+
+ // FIXME: analogous code in check_loans first maps `place` to
+ // its base_path.
+
+ // check for loan restricting path P being used. Accounts for
+ // borrows of P, P.a.b, etc.
+ for i in candidates {
+ let borrowed = &borrow_set[i];
+
+ if places_conflict::borrow_conflicts_with_place(
+ tcx,
+ body,
+ borrowed.borrowed_place,
+ borrowed.kind,
+ place.as_ref(),
+ access,
+ places_conflict::PlaceConflictBias::Overlap,
+ ) {
+ debug!(
+ "each_borrow_involving_path: {:?} @ {:?} vs. {:?}/{:?}",
+ i, borrowed, place, access
+ );
+ let ctrl = op(s, i, borrowed);
+ if ctrl == Control::Break {
+ return;
+ }
+ }
+ }
+}
+
+pub(super) fn is_active<'tcx>(
+ dominators: &Dominators<BasicBlock>,
+ borrow_data: &BorrowData<'tcx>,
+ location: Location,
+) -> bool {
+ debug!("is_active(borrow_data={:?}, location={:?})", borrow_data, location);
+
+ let activation_location = match borrow_data.activation_location {
+ // If this is not a 2-phase borrow, it is always active.
+ TwoPhaseActivation::NotTwoPhase => return true,
+ // And if the unique 2-phase use is not an activation, then it is *never* active.
+ TwoPhaseActivation::NotActivated => return false,
+ // Otherwise, we derive info from the activation point `loc`:
+ TwoPhaseActivation::ActivatedAt(loc) => loc,
+ };
+
+ // Otherwise, it is active for every location *except* in between
+ // the reservation and the activation:
+ //
+ // X
+ // /
+ // R <--+ Except for this
+ // / \ | diamond
+ // \ / |
+ // A <------+
+ // |
+ // Z
+ //
+ // Note that we assume that:
+ // - the reservation R dominates the activation A
+ // - the activation A post-dominates the reservation R (ignoring unwinding edges).
+ //
+ // This means that there can't be an edge that leaves A and
+ // comes back into that diamond unless it passes through R.
+ //
+ // Suboptimal: In some cases, this code walks the dominator
+ // tree twice when it only has to be walked once. I am
+ // lazy. -nmatsakis
+
+ // If dominated by the activation A, then it is active. The
+ // activation occurs upon entering the point A, so this is
+ // also true if location == activation_location.
+ if activation_location.dominates(location, dominators) {
+ return true;
+ }
+
+ // The reservation starts *on exiting* the reservation block,
+ // so check if the location is dominated by R.successor. If so,
+ // this point falls in between the reservation and location.
+ let reserve_location = borrow_data.reserve_location.successor_within_block();
+ if reserve_location.dominates(location, dominators) {
+ false
+ } else {
+ // Otherwise, this point is outside the diamond, so
+ // consider the borrow active. This could happen for
+ // example if the borrow remains active around a loop (in
+ // which case it would be active also for the point R,
+ // which would generate an error).
+ true
+ }
+}
+
+/// Determines if a given borrow is borrowing local data
+/// This is called for all Yield expressions on movable generators
+pub(super) fn borrow_of_local_data(place: Place<'_>) -> bool {
+ // Reborrow of already borrowed data is ignored
+ // Any errors will be caught on the initial borrow
+ !place.is_indirect()
+}
+
+/// If `place` is a field projection, and the field is being projected from a closure type,
+/// then returns the index of the field being projected. Note that this closure will always
+/// be `self` in the current MIR, because that is the only time we directly access the fields
+/// of a closure type.
+pub(crate) fn is_upvar_field_projection<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ upvars: &[Upvar<'tcx>],
+ place_ref: PlaceRef<'tcx>,
+ body: &Body<'tcx>,
+) -> Option<Field> {
+ let mut place_ref = place_ref;
+ let mut by_ref = false;
+
+ if let Some((place_base, ProjectionElem::Deref)) = place_ref.last_projection() {
+ place_ref = place_base;
+ by_ref = true;
+ }
+
+ match place_ref.last_projection() {
+ Some((place_base, ProjectionElem::Field(field, _ty))) => {
+ let base_ty = place_base.ty(body, tcx).ty;
+ if (base_ty.is_closure() || base_ty.is_generator())
+ && (!by_ref || upvars[field.index()].by_ref)
+ {
+ Some(field)
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_borrowck/src/place_ext.rs b/compiler/rustc_borrowck/src/place_ext.rs
new file mode 100644
index 000000000..93d202e49
--- /dev/null
+++ b/compiler/rustc_borrowck/src/place_ext.rs
@@ -0,0 +1,81 @@
+use crate::borrow_set::LocalsStateAtExit;
+use rustc_hir as hir;
+use rustc_middle::mir::ProjectionElem;
+use rustc_middle::mir::{Body, Mutability, Place};
+use rustc_middle::ty::{self, TyCtxt};
+
+/// Extension methods for the `Place` type.
+pub(crate) trait PlaceExt<'tcx> {
+ /// Returns `true` if we can safely ignore borrows of this place.
+ /// This is true whenever there is no action that the user can do
+ /// to the place `self` that would invalidate the borrow. This is true
+ /// for borrows of raw pointer dereferents as well as shared references.
+ fn ignore_borrow(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ locals_state_at_exit: &LocalsStateAtExit,
+ ) -> bool;
+}
+
+impl<'tcx> PlaceExt<'tcx> for Place<'tcx> {
+ fn ignore_borrow(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ locals_state_at_exit: &LocalsStateAtExit,
+ ) -> bool {
+ // If a local variable is immutable, then we only need to track borrows to guard
+ // against two kinds of errors:
+ // * The variable being dropped while still borrowed (e.g., because the fn returns
+ // a reference to a local variable)
+ // * The variable being moved while still borrowed
+ //
+ // In particular, the variable cannot be mutated -- the "access checks" will fail --
+ // so we don't have to worry about mutation while borrowed.
+ if let LocalsStateAtExit::SomeAreInvalidated { has_storage_dead_or_moved } =
+ locals_state_at_exit
+ {
+ let ignore = !has_storage_dead_or_moved.contains(self.local)
+ && body.local_decls[self.local].mutability == Mutability::Not;
+ debug!("ignore_borrow: local {:?} => {:?}", self.local, ignore);
+ if ignore {
+ return true;
+ }
+ }
+
+ for (i, elem) in self.projection.iter().enumerate() {
+ let proj_base = &self.projection[..i];
+
+ if elem == ProjectionElem::Deref {
+ let ty = Place::ty_from(self.local, proj_base, body, tcx).ty;
+ match ty.kind() {
+ ty::Ref(_, _, hir::Mutability::Not) if i == 0 => {
+ // For references to thread-local statics, we do need
+ // to track the borrow.
+ if body.local_decls[self.local].is_ref_to_thread_local() {
+ continue;
+ }
+ return true;
+ }
+ ty::RawPtr(..) | ty::Ref(_, _, hir::Mutability::Not) => {
+ // For both derefs of raw pointers and `&T`
+ // references, the original path is `Copy` and
+ // therefore not significant. In particular,
+ // there is nothing the user can do to the
+ // original path that would invalidate the
+ // newly created reference -- and if there
+ // were, then the user could have copied the
+ // original path into a new variable and
+ // borrowed *that* one, leaving the original
+ // path unborrowed.
+ return true;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ false
+ }
+}
diff --git a/compiler/rustc_borrowck/src/places_conflict.rs b/compiler/rustc_borrowck/src/places_conflict.rs
new file mode 100644
index 000000000..97335fd0d
--- /dev/null
+++ b/compiler/rustc_borrowck/src/places_conflict.rs
@@ -0,0 +1,537 @@
+use crate::ArtificialField;
+use crate::Overlap;
+use crate::{AccessDepth, Deep, Shallow};
+use rustc_hir as hir;
+use rustc_middle::mir::{Body, BorrowKind, Local, Place, PlaceElem, PlaceRef, ProjectionElem};
+use rustc_middle::ty::{self, TyCtxt};
+use std::cmp::max;
+use std::iter;
+
+/// When checking if a place conflicts with another place, this enum is used to influence decisions
+/// where a place might be equal or disjoint with another place, such as if `a[i] == a[j]`.
+/// `PlaceConflictBias::Overlap` would bias toward assuming that `i` might equal `j` and that these
+/// places overlap. `PlaceConflictBias::NoOverlap` assumes that for the purposes of the predicate
+/// being run in the calling context, the conservative choice is to assume the compared indices
+/// are disjoint (and therefore, do not overlap).
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum PlaceConflictBias {
+ Overlap,
+ NoOverlap,
+}
+
+/// Helper function for checking if places conflict with a mutable borrow and deep access depth.
+/// This is used to check for places conflicting outside of the borrow checking code (such as in
+/// dataflow).
+pub(crate) fn places_conflict<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ borrow_place: Place<'tcx>,
+ access_place: Place<'tcx>,
+ bias: PlaceConflictBias,
+) -> bool {
+ borrow_conflicts_with_place(
+ tcx,
+ body,
+ borrow_place,
+ BorrowKind::Mut { allow_two_phase_borrow: true },
+ access_place.as_ref(),
+ AccessDepth::Deep,
+ bias,
+ )
+}
+
+/// Checks whether the `borrow_place` conflicts with the `access_place` given a borrow kind and
+/// access depth. The `bias` parameter is used to determine how the unknowable (comparing runtime
+/// array indices, for example) should be interpreted - this depends on what the caller wants in
+/// order to make the conservative choice and preserve soundness.
+pub(super) fn borrow_conflicts_with_place<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ borrow_place: Place<'tcx>,
+ borrow_kind: BorrowKind,
+ access_place: PlaceRef<'tcx>,
+ access: AccessDepth,
+ bias: PlaceConflictBias,
+) -> bool {
+ debug!(
+ "borrow_conflicts_with_place({:?}, {:?}, {:?}, {:?})",
+ borrow_place, access_place, access, bias,
+ );
+
+ // This Local/Local case is handled by the more general code below, but
+ // it's so common that it's a speed win to check for it first.
+ if let Some(l1) = borrow_place.as_local() && let Some(l2) = access_place.as_local() {
+ return l1 == l2;
+ }
+
+ place_components_conflict(tcx, body, borrow_place, borrow_kind, access_place, access, bias)
+}
+
+fn place_components_conflict<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ borrow_place: Place<'tcx>,
+ borrow_kind: BorrowKind,
+ access_place: PlaceRef<'tcx>,
+ access: AccessDepth,
+ bias: PlaceConflictBias,
+) -> bool {
+ // The borrowck rules for proving disjointness are applied from the "root" of the
+ // borrow forwards, iterating over "similar" projections in lockstep until
+ // we can prove overlap one way or another. Essentially, we treat `Overlap` as
+ // a monoid and report a conflict if the product ends up not being `Disjoint`.
+ //
+ // At each step, if we didn't run out of borrow or place, we know that our elements
+ // have the same type, and that they only overlap if they are the identical.
+ //
+ // For example, if we are comparing these:
+ // BORROW: (*x1[2].y).z.a
+ // ACCESS: (*x1[i].y).w.b
+ //
+ // Then our steps are:
+ // x1 | x1 -- places are the same
+ // x1[2] | x1[i] -- equal or disjoint (disjoint if indexes differ)
+ // x1[2].y | x1[i].y -- equal or disjoint
+ // *x1[2].y | *x1[i].y -- equal or disjoint
+ // (*x1[2].y).z | (*x1[i].y).w -- we are disjoint and don't need to check more!
+ //
+ // Because `zip` does potentially bad things to the iterator inside, this loop
+ // also handles the case where the access might be a *prefix* of the borrow, e.g.
+ //
+ // BORROW: (*x1[2].y).z.a
+ // ACCESS: x1[i].y
+ //
+ // Then our steps are:
+ // x1 | x1 -- places are the same
+ // x1[2] | x1[i] -- equal or disjoint (disjoint if indexes differ)
+ // x1[2].y | x1[i].y -- equal or disjoint
+ //
+ // -- here we run out of access - the borrow can access a part of it. If this
+ // is a full deep access, then we *know* the borrow conflicts with it. However,
+ // if the access is shallow, then we can proceed:
+ //
+ // x1[2].y | (*x1[i].y) -- a deref! the access can't get past this, so we
+ // are disjoint
+ //
+ // Our invariant is, that at each step of the iteration:
+ // - If we didn't run out of access to match, our borrow and access are comparable
+ // and either equal or disjoint.
+ // - If we did run out of access, the borrow can access a part of it.
+
+ let borrow_local = borrow_place.local;
+ let access_local = access_place.local;
+
+ match place_base_conflict(borrow_local, access_local) {
+ Overlap::Arbitrary => {
+ bug!("Two base can't return Arbitrary");
+ }
+ Overlap::EqualOrDisjoint => {
+ // This is the recursive case - proceed to the next element.
+ }
+ Overlap::Disjoint => {
+ // We have proven the borrow disjoint - further
+ // projections will remain disjoint.
+ debug!("borrow_conflicts_with_place: disjoint");
+ return false;
+ }
+ }
+
+ // loop invariant: borrow_c is always either equal to access_c or disjoint from it.
+ for (i, (borrow_c, &access_c)) in
+ iter::zip(borrow_place.projection, access_place.projection).enumerate()
+ {
+ debug!("borrow_conflicts_with_place: borrow_c = {:?}", borrow_c);
+ let borrow_proj_base = &borrow_place.projection[..i];
+
+ debug!("borrow_conflicts_with_place: access_c = {:?}", access_c);
+
+ // Borrow and access path both have more components.
+ //
+ // Examples:
+ //
+ // - borrow of `a.(...)`, access to `a.(...)`
+ // - borrow of `a.(...)`, access to `b.(...)`
+ //
+ // Here we only see the components we have checked so
+ // far (in our examples, just the first component). We
+ // check whether the components being borrowed vs
+ // accessed are disjoint (as in the second example,
+ // but not the first).
+ match place_projection_conflict(
+ tcx,
+ body,
+ borrow_local,
+ borrow_proj_base,
+ borrow_c,
+ access_c,
+ bias,
+ ) {
+ Overlap::Arbitrary => {
+ // We have encountered different fields of potentially
+ // the same union - the borrow now partially overlaps.
+ //
+ // There is no *easy* way of comparing the fields
+ // further on, because they might have different types
+ // (e.g., borrows of `u.a.0` and `u.b.y` where `.0` and
+ // `.y` come from different structs).
+ //
+ // We could try to do some things here - e.g., count
+ // dereferences - but that's probably not a good
+ // idea, at least for now, so just give up and
+ // report a conflict. This is unsafe code anyway so
+ // the user could always use raw pointers.
+ debug!("borrow_conflicts_with_place: arbitrary -> conflict");
+ return true;
+ }
+ Overlap::EqualOrDisjoint => {
+ // This is the recursive case - proceed to the next element.
+ }
+ Overlap::Disjoint => {
+ // We have proven the borrow disjoint - further
+ // projections will remain disjoint.
+ debug!("borrow_conflicts_with_place: disjoint");
+ return false;
+ }
+ }
+ }
+
+ if borrow_place.projection.len() > access_place.projection.len() {
+ for (i, elem) in borrow_place.projection[access_place.projection.len()..].iter().enumerate()
+ {
+ // Borrow path is longer than the access path. Examples:
+ //
+ // - borrow of `a.b.c`, access to `a.b`
+ //
+ // Here, we know that the borrow can access a part of
+ // our place. This is a conflict if that is a part our
+ // access cares about.
+
+ let proj_base = &borrow_place.projection[..access_place.projection.len() + i];
+ let base_ty = Place::ty_from(borrow_local, proj_base, body, tcx).ty;
+
+ match (elem, &base_ty.kind(), access) {
+ (_, _, Shallow(Some(ArtificialField::ArrayLength)))
+ | (_, _, Shallow(Some(ArtificialField::ShallowBorrow))) => {
+ // The array length is like additional fields on the
+ // type; it does not overlap any existing data there.
+ // Furthermore, if cannot actually be a prefix of any
+ // borrowed place (at least in MIR as it is currently.)
+ //
+ // e.g., a (mutable) borrow of `a[5]` while we read the
+ // array length of `a`.
+ debug!("borrow_conflicts_with_place: implicit field");
+ return false;
+ }
+
+ (ProjectionElem::Deref, _, Shallow(None)) => {
+ // e.g., a borrow of `*x.y` while we shallowly access `x.y` or some
+ // prefix thereof - the shallow access can't touch anything behind
+ // the pointer.
+ debug!("borrow_conflicts_with_place: shallow access behind ptr");
+ return false;
+ }
+ (ProjectionElem::Deref, ty::Ref(_, _, hir::Mutability::Not), _) => {
+ // Shouldn't be tracked
+ bug!("Tracking borrow behind shared reference.");
+ }
+ (ProjectionElem::Deref, ty::Ref(_, _, hir::Mutability::Mut), AccessDepth::Drop) => {
+ // Values behind a mutable reference are not access either by dropping a
+ // value, or by StorageDead
+ debug!("borrow_conflicts_with_place: drop access behind ptr");
+ return false;
+ }
+
+ (ProjectionElem::Field { .. }, ty::Adt(def, _), AccessDepth::Drop) => {
+ // Drop can read/write arbitrary projections, so places
+ // conflict regardless of further projections.
+ if def.has_dtor(tcx) {
+ return true;
+ }
+ }
+
+ (ProjectionElem::Deref, _, Deep)
+ | (ProjectionElem::Deref, _, AccessDepth::Drop)
+ | (ProjectionElem::Field { .. }, _, _)
+ | (ProjectionElem::Index { .. }, _, _)
+ | (ProjectionElem::ConstantIndex { .. }, _, _)
+ | (ProjectionElem::Subslice { .. }, _, _)
+ | (ProjectionElem::Downcast { .. }, _, _) => {
+ // Recursive case. This can still be disjoint on a
+ // further iteration if this a shallow access and
+ // there's a deref later on, e.g., a borrow
+ // of `*x.y` while accessing `x`.
+ }
+ }
+ }
+ }
+
+ // Borrow path ran out but access path may not
+ // have. Examples:
+ //
+ // - borrow of `a.b`, access to `a.b.c`
+ // - borrow of `a.b`, access to `a.b`
+ //
+ // In the first example, where we didn't run out of
+ // access, the borrow can access all of our place, so we
+ // have a conflict.
+ //
+ // If the second example, where we did, then we still know
+ // that the borrow can access a *part* of our place that
+ // our access cares about, so we still have a conflict.
+ if borrow_kind == BorrowKind::Shallow
+ && borrow_place.projection.len() < access_place.projection.len()
+ {
+ debug!("borrow_conflicts_with_place: shallow borrow");
+ false
+ } else {
+ debug!("borrow_conflicts_with_place: full borrow, CONFLICT");
+ true
+ }
+}
+
+// Given that the bases of `elem1` and `elem2` are always either equal
+// or disjoint (and have the same type!), return the overlap situation
+// between `elem1` and `elem2`.
+fn place_base_conflict(l1: Local, l2: Local) -> Overlap {
+ if l1 == l2 {
+ // the same local - base case, equal
+ debug!("place_element_conflict: DISJOINT-OR-EQ-LOCAL");
+ Overlap::EqualOrDisjoint
+ } else {
+ // different locals - base case, disjoint
+ debug!("place_element_conflict: DISJOINT-LOCAL");
+ Overlap::Disjoint
+ }
+}
+
+// Given that the bases of `elem1` and `elem2` are always either equal
+// or disjoint (and have the same type!), return the overlap situation
+// between `elem1` and `elem2`.
+fn place_projection_conflict<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ pi1_local: Local,
+ pi1_proj_base: &[PlaceElem<'tcx>],
+ pi1_elem: PlaceElem<'tcx>,
+ pi2_elem: PlaceElem<'tcx>,
+ bias: PlaceConflictBias,
+) -> Overlap {
+ match (pi1_elem, pi2_elem) {
+ (ProjectionElem::Deref, ProjectionElem::Deref) => {
+ // derefs (e.g., `*x` vs. `*x`) - recur.
+ debug!("place_element_conflict: DISJOINT-OR-EQ-DEREF");
+ Overlap::EqualOrDisjoint
+ }
+ (ProjectionElem::Field(f1, _), ProjectionElem::Field(f2, _)) => {
+ if f1 == f2 {
+ // same field (e.g., `a.y` vs. `a.y`) - recur.
+ debug!("place_element_conflict: DISJOINT-OR-EQ-FIELD");
+ Overlap::EqualOrDisjoint
+ } else {
+ let ty = Place::ty_from(pi1_local, pi1_proj_base, body, tcx).ty;
+ if ty.is_union() {
+ // Different fields of a union, we are basically stuck.
+ debug!("place_element_conflict: STUCK-UNION");
+ Overlap::Arbitrary
+ } else {
+ // Different fields of a struct (`a.x` vs. `a.y`). Disjoint!
+ debug!("place_element_conflict: DISJOINT-FIELD");
+ Overlap::Disjoint
+ }
+ }
+ }
+ (ProjectionElem::Downcast(_, v1), ProjectionElem::Downcast(_, v2)) => {
+ // different variants are treated as having disjoint fields,
+ // even if they occupy the same "space", because it's
+ // impossible for 2 variants of the same enum to exist
+ // (and therefore, to be borrowed) at the same time.
+ //
+ // Note that this is different from unions - we *do* allow
+ // this code to compile:
+ //
+ // ```
+ // fn foo(x: &mut Result<i32, i32>) {
+ // let mut v = None;
+ // if let Ok(ref mut a) = *x {
+ // v = Some(a);
+ // }
+ // // here, you would *think* that the
+ // // *entirety* of `x` would be borrowed,
+ // // but in fact only the `Ok` variant is,
+ // // so the `Err` variant is *entirely free*:
+ // if let Err(ref mut a) = *x {
+ // v = Some(a);
+ // }
+ // drop(v);
+ // }
+ // ```
+ if v1 == v2 {
+ debug!("place_element_conflict: DISJOINT-OR-EQ-FIELD");
+ Overlap::EqualOrDisjoint
+ } else {
+ debug!("place_element_conflict: DISJOINT-FIELD");
+ Overlap::Disjoint
+ }
+ }
+ (
+ ProjectionElem::Index(..),
+ ProjectionElem::Index(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. },
+ )
+ | (
+ ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. },
+ ProjectionElem::Index(..),
+ ) => {
+ // Array indexes (`a[0]` vs. `a[i]`). These can either be disjoint
+ // (if the indexes differ) or equal (if they are the same).
+ match bias {
+ PlaceConflictBias::Overlap => {
+ // If we are biased towards overlapping, then this is the recursive
+ // case that gives "equal *or* disjoint" its meaning.
+ debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-INDEX");
+ Overlap::EqualOrDisjoint
+ }
+ PlaceConflictBias::NoOverlap => {
+ // If we are biased towards no overlapping, then this is disjoint.
+ debug!("place_element_conflict: DISJOINT-ARRAY-INDEX");
+ Overlap::Disjoint
+ }
+ }
+ }
+ (
+ ProjectionElem::ConstantIndex { offset: o1, min_length: _, from_end: false },
+ ProjectionElem::ConstantIndex { offset: o2, min_length: _, from_end: false },
+ )
+ | (
+ ProjectionElem::ConstantIndex { offset: o1, min_length: _, from_end: true },
+ ProjectionElem::ConstantIndex { offset: o2, min_length: _, from_end: true },
+ ) => {
+ if o1 == o2 {
+ debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-CONSTANT-INDEX");
+ Overlap::EqualOrDisjoint
+ } else {
+ debug!("place_element_conflict: DISJOINT-ARRAY-CONSTANT-INDEX");
+ Overlap::Disjoint
+ }
+ }
+ (
+ ProjectionElem::ConstantIndex {
+ offset: offset_from_begin,
+ min_length: min_length1,
+ from_end: false,
+ },
+ ProjectionElem::ConstantIndex {
+ offset: offset_from_end,
+ min_length: min_length2,
+ from_end: true,
+ },
+ )
+ | (
+ ProjectionElem::ConstantIndex {
+ offset: offset_from_end,
+ min_length: min_length1,
+ from_end: true,
+ },
+ ProjectionElem::ConstantIndex {
+ offset: offset_from_begin,
+ min_length: min_length2,
+ from_end: false,
+ },
+ ) => {
+ // both patterns matched so it must be at least the greater of the two
+ let min_length = max(min_length1, min_length2);
+ // `offset_from_end` can be in range `[1..min_length]`, 1 indicates the last
+ // element (like -1 in Python) and `min_length` the first.
+ // Therefore, `min_length - offset_from_end` gives the minimal possible
+ // offset from the beginning
+ if offset_from_begin >= min_length - offset_from_end {
+ debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-CONSTANT-INDEX-FE");
+ Overlap::EqualOrDisjoint
+ } else {
+ debug!("place_element_conflict: DISJOINT-ARRAY-CONSTANT-INDEX-FE");
+ Overlap::Disjoint
+ }
+ }
+ (
+ ProjectionElem::ConstantIndex { offset, min_length: _, from_end: false },
+ ProjectionElem::Subslice { from, to, from_end: false },
+ )
+ | (
+ ProjectionElem::Subslice { from, to, from_end: false },
+ ProjectionElem::ConstantIndex { offset, min_length: _, from_end: false },
+ ) => {
+ if (from..to).contains(&offset) {
+ debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-CONSTANT-INDEX-SUBSLICE");
+ Overlap::EqualOrDisjoint
+ } else {
+ debug!("place_element_conflict: DISJOINT-ARRAY-CONSTANT-INDEX-SUBSLICE");
+ Overlap::Disjoint
+ }
+ }
+ (
+ ProjectionElem::ConstantIndex { offset, min_length: _, from_end: false },
+ ProjectionElem::Subslice { from, .. },
+ )
+ | (
+ ProjectionElem::Subslice { from, .. },
+ ProjectionElem::ConstantIndex { offset, min_length: _, from_end: false },
+ ) => {
+ if offset >= from {
+ debug!("place_element_conflict: DISJOINT-OR-EQ-SLICE-CONSTANT-INDEX-SUBSLICE");
+ Overlap::EqualOrDisjoint
+ } else {
+ debug!("place_element_conflict: DISJOINT-SLICE-CONSTANT-INDEX-SUBSLICE");
+ Overlap::Disjoint
+ }
+ }
+ (
+ ProjectionElem::ConstantIndex { offset, min_length: _, from_end: true },
+ ProjectionElem::Subslice { to, from_end: true, .. },
+ )
+ | (
+ ProjectionElem::Subslice { to, from_end: true, .. },
+ ProjectionElem::ConstantIndex { offset, min_length: _, from_end: true },
+ ) => {
+ if offset > to {
+ debug!(
+ "place_element_conflict: \
+ DISJOINT-OR-EQ-SLICE-CONSTANT-INDEX-SUBSLICE-FE"
+ );
+ Overlap::EqualOrDisjoint
+ } else {
+ debug!("place_element_conflict: DISJOINT-SLICE-CONSTANT-INDEX-SUBSLICE-FE");
+ Overlap::Disjoint
+ }
+ }
+ (
+ ProjectionElem::Subslice { from: f1, to: t1, from_end: false },
+ ProjectionElem::Subslice { from: f2, to: t2, from_end: false },
+ ) => {
+ if f2 >= t1 || f1 >= t2 {
+ debug!("place_element_conflict: DISJOINT-ARRAY-SUBSLICES");
+ Overlap::Disjoint
+ } else {
+ debug!("place_element_conflict: DISJOINT-OR-EQ-ARRAY-SUBSLICES");
+ Overlap::EqualOrDisjoint
+ }
+ }
+ (ProjectionElem::Subslice { .. }, ProjectionElem::Subslice { .. }) => {
+ debug!("place_element_conflict: DISJOINT-OR-EQ-SLICE-SUBSLICES");
+ Overlap::EqualOrDisjoint
+ }
+ (
+ ProjectionElem::Deref
+ | ProjectionElem::Field(..)
+ | ProjectionElem::Index(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Downcast(..),
+ _,
+ ) => bug!(
+ "mismatched projections in place_element_conflict: {:?} and {:?}",
+ pi1_elem,
+ pi2_elem
+ ),
+ }
+}
diff --git a/compiler/rustc_borrowck/src/prefixes.rs b/compiler/rustc_borrowck/src/prefixes.rs
new file mode 100644
index 000000000..bdf2becb7
--- /dev/null
+++ b/compiler/rustc_borrowck/src/prefixes.rs
@@ -0,0 +1,145 @@
+//! From the NLL RFC: "The deep [aka 'supporting'] prefixes for an
+//! place are formed by stripping away fields and derefs, except that
+//! we stop when we reach the deref of a shared reference. [...] "
+//!
+//! "Shallow prefixes are found by stripping away fields, but stop at
+//! any dereference. So: writing a path like `a` is illegal if `a.b`
+//! is borrowed. But: writing `a` is legal if `*a` is borrowed,
+//! whether or not `a` is a shared or mutable reference. [...] "
+
+use super::MirBorrowckCtxt;
+
+use rustc_hir as hir;
+use rustc_middle::mir::{Body, PlaceRef, ProjectionElem};
+use rustc_middle::ty::{self, TyCtxt};
+
+pub trait IsPrefixOf<'tcx> {
+ fn is_prefix_of(&self, other: PlaceRef<'tcx>) -> bool;
+}
+
+impl<'tcx> IsPrefixOf<'tcx> for PlaceRef<'tcx> {
+ fn is_prefix_of(&self, other: PlaceRef<'tcx>) -> bool {
+ self.local == other.local
+ && self.projection.len() <= other.projection.len()
+ && self.projection == &other.projection[..self.projection.len()]
+ }
+}
+
+pub(super) struct Prefixes<'cx, 'tcx> {
+ body: &'cx Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ kind: PrefixSet,
+ next: Option<PlaceRef<'tcx>>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub(super) enum PrefixSet {
+ /// Doesn't stop until it returns the base case (a Local or
+ /// Static prefix).
+ All,
+ /// Stops at any dereference.
+ Shallow,
+ /// Stops at the deref of a shared reference.
+ Supporting,
+}
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+ /// Returns an iterator over the prefixes of `place`
+ /// (inclusive) from longest to smallest, potentially
+ /// terminating the iteration early based on `kind`.
+ pub(super) fn prefixes(
+ &self,
+ place_ref: PlaceRef<'tcx>,
+ kind: PrefixSet,
+ ) -> Prefixes<'cx, 'tcx> {
+ Prefixes { next: Some(place_ref), kind, body: self.body, tcx: self.infcx.tcx }
+ }
+}
+
+impl<'cx, 'tcx> Iterator for Prefixes<'cx, 'tcx> {
+ type Item = PlaceRef<'tcx>;
+ fn next(&mut self) -> Option<Self::Item> {
+ let mut cursor = self.next?;
+
+ // Post-processing `place`: Enqueue any remaining
+ // work. Also, `place` may not be a prefix itself, but
+ // may hold one further down (e.g., we never return
+ // downcasts here, but may return a base of a downcast).
+
+ 'cursor: loop {
+ match cursor.last_projection() {
+ None => {
+ self.next = None;
+ return Some(cursor);
+ }
+ Some((cursor_base, elem)) => {
+ match elem {
+ ProjectionElem::Field(_ /*field*/, _ /*ty*/) => {
+ // FIXME: add union handling
+ self.next = Some(cursor_base);
+ return Some(cursor);
+ }
+ ProjectionElem::Downcast(..)
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Index(_) => {
+ cursor = cursor_base;
+ continue 'cursor;
+ }
+ ProjectionElem::Deref => {
+ // (handled below)
+ }
+ }
+
+ assert_eq!(elem, ProjectionElem::Deref);
+
+ match self.kind {
+ PrefixSet::Shallow => {
+ // Shallow prefixes are found by stripping away
+ // fields, but stop at *any* dereference.
+ // So we can just stop the traversal now.
+ self.next = None;
+ return Some(cursor);
+ }
+ PrefixSet::All => {
+ // All prefixes: just blindly enqueue the base
+ // of the projection.
+ self.next = Some(cursor_base);
+ return Some(cursor);
+ }
+ PrefixSet::Supporting => {
+ // Fall through!
+ }
+ }
+
+ assert_eq!(self.kind, PrefixSet::Supporting);
+ // Supporting prefixes: strip away fields and
+ // derefs, except we stop at the deref of a shared
+ // reference.
+
+ let ty = cursor_base.ty(self.body, self.tcx).ty;
+ match ty.kind() {
+ ty::RawPtr(_) | ty::Ref(_ /*rgn*/, _ /*ty*/, hir::Mutability::Not) => {
+ // don't continue traversing over derefs of raw pointers or shared
+ // borrows.
+ self.next = None;
+ return Some(cursor);
+ }
+
+ ty::Ref(_ /*rgn*/, _ /*ty*/, hir::Mutability::Mut) => {
+ self.next = Some(cursor_base);
+ return Some(cursor);
+ }
+
+ ty::Adt(..) if ty.is_box() => {
+ self.next = Some(cursor_base);
+ return Some(cursor);
+ }
+
+ _ => panic!("unknown type fed to Projection Deref."),
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/region_infer/dump_mir.rs b/compiler/rustc_borrowck/src/region_infer/dump_mir.rs
new file mode 100644
index 000000000..fe5193102
--- /dev/null
+++ b/compiler/rustc_borrowck/src/region_infer/dump_mir.rs
@@ -0,0 +1,93 @@
+//! As part of generating the regions, if you enable `-Zdump-mir=nll`,
+//! we will generate an annotated copy of the MIR that includes the
+//! state of region inference. This code handles emitting the region
+//! context internal state.
+
+use super::{OutlivesConstraint, RegionInferenceContext};
+use crate::type_check::Locations;
+use rustc_infer::infer::NllRegionVariableOrigin;
+use rustc_middle::ty::TyCtxt;
+use std::io::{self, Write};
+
+// Room for "'_#NNNNr" before things get misaligned.
+// Easy enough to fix if this ever doesn't seem like
+// enough.
+const REGION_WIDTH: usize = 8;
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+ /// Write out our state into the `.mir` files.
+ pub(crate) fn dump_mir(&self, tcx: TyCtxt<'tcx>, out: &mut dyn Write) -> io::Result<()> {
+ writeln!(out, "| Free Region Mapping")?;
+
+ for region in self.regions() {
+ if let NllRegionVariableOrigin::FreeRegion = self.definitions[region].origin {
+ let classification = self.universal_regions.region_classification(region).unwrap();
+ let outlived_by = self.universal_region_relations.regions_outlived_by(region);
+ writeln!(
+ out,
+ "| {r:rw$?} | {c:cw$?} | {ob:?}",
+ r = region,
+ rw = REGION_WIDTH,
+ c = classification,
+ cw = 8, // "External" at most
+ ob = outlived_by
+ )?;
+ }
+ }
+
+ writeln!(out, "|")?;
+ writeln!(out, "| Inferred Region Values")?;
+ for region in self.regions() {
+ writeln!(
+ out,
+ "| {r:rw$?} | {ui:4?} | {v}",
+ r = region,
+ rw = REGION_WIDTH,
+ ui = self.region_universe(region),
+ v = self.region_value_str(region),
+ )?;
+ }
+
+ writeln!(out, "|")?;
+ writeln!(out, "| Inference Constraints")?;
+ self.for_each_constraint(tcx, &mut |msg| writeln!(out, "| {}", msg))?;
+
+ Ok(())
+ }
+
+ /// Debugging aid: Invokes the `with_msg` callback repeatedly with
+ /// our internal region constraints. These are dumped into the
+ /// -Zdump-mir file so that we can figure out why the region
+ /// inference resulted in the values that it did when debugging.
+ fn for_each_constraint(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ with_msg: &mut dyn FnMut(&str) -> io::Result<()>,
+ ) -> io::Result<()> {
+ for region in self.definitions.indices() {
+ let value = self.liveness_constraints.region_value_str(region);
+ if value != "{}" {
+ with_msg(&format!("{:?} live at {}", region, value))?;
+ }
+ }
+
+ let mut constraints: Vec<_> = self.constraints.outlives().iter().collect();
+ constraints.sort_by_key(|c| (c.sup, c.sub));
+ for constraint in &constraints {
+ let OutlivesConstraint { sup, sub, locations, category, span, variance_info: _ } =
+ constraint;
+ let (name, arg) = match locations {
+ Locations::All(span) => {
+ ("All", tcx.sess.source_map().span_to_embeddable_string(*span))
+ }
+ Locations::Single(loc) => ("Single", format!("{:?}", loc)),
+ };
+ with_msg(&format!(
+ "{:?}: {:?} due to {:?} at {}({}) ({:?}",
+ sup, sub, category, name, arg, span
+ ))?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_borrowck/src/region_infer/graphviz.rs b/compiler/rustc_borrowck/src/region_infer/graphviz.rs
new file mode 100644
index 000000000..f31ccd74c
--- /dev/null
+++ b/compiler/rustc_borrowck/src/region_infer/graphviz.rs
@@ -0,0 +1,140 @@
+//! This module provides linkage between RegionInferenceContext and
+//! `rustc_graphviz` traits, specialized to attaching borrowck analysis
+//! data to rendered labels.
+
+use std::borrow::Cow;
+use std::io::{self, Write};
+
+use super::*;
+use crate::constraints::OutlivesConstraint;
+use rustc_graphviz as dot;
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+ /// Write out the region constraint graph.
+ pub(crate) fn dump_graphviz_raw_constraints(&self, mut w: &mut dyn Write) -> io::Result<()> {
+ dot::render(&RawConstraints { regioncx: self }, &mut w)
+ }
+
+ /// Write out the region constraint graph.
+ pub(crate) fn dump_graphviz_scc_constraints(&self, mut w: &mut dyn Write) -> io::Result<()> {
+ let mut nodes_per_scc: IndexVec<ConstraintSccIndex, _> =
+ self.constraint_sccs.all_sccs().map(|_| Vec::new()).collect();
+
+ for region in self.definitions.indices() {
+ let scc = self.constraint_sccs.scc(region);
+ nodes_per_scc[scc].push(region);
+ }
+
+ dot::render(&SccConstraints { regioncx: self, nodes_per_scc }, &mut w)
+ }
+}
+
+struct RawConstraints<'a, 'tcx> {
+ regioncx: &'a RegionInferenceContext<'tcx>,
+}
+
+impl<'a, 'this, 'tcx> dot::Labeller<'this> for RawConstraints<'a, 'tcx> {
+ type Node = RegionVid;
+ type Edge = OutlivesConstraint<'tcx>;
+
+ fn graph_id(&'this self) -> dot::Id<'this> {
+ dot::Id::new("RegionInferenceContext").unwrap()
+ }
+ fn node_id(&'this self, n: &RegionVid) -> dot::Id<'this> {
+ dot::Id::new(format!("r{}", n.index())).unwrap()
+ }
+ fn node_shape(&'this self, _node: &RegionVid) -> Option<dot::LabelText<'this>> {
+ Some(dot::LabelText::LabelStr(Cow::Borrowed("box")))
+ }
+ fn node_label(&'this self, n: &RegionVid) -> dot::LabelText<'this> {
+ dot::LabelText::LabelStr(format!("{:?}", n).into())
+ }
+ fn edge_label(&'this self, e: &OutlivesConstraint<'tcx>) -> dot::LabelText<'this> {
+ dot::LabelText::LabelStr(format!("{:?}", e.locations).into())
+ }
+}
+
+impl<'a, 'this, 'tcx> dot::GraphWalk<'this> for RawConstraints<'a, 'tcx> {
+ type Node = RegionVid;
+ type Edge = OutlivesConstraint<'tcx>;
+
+ fn nodes(&'this self) -> dot::Nodes<'this, RegionVid> {
+ let vids: Vec<RegionVid> = self.regioncx.definitions.indices().collect();
+ vids.into()
+ }
+ fn edges(&'this self) -> dot::Edges<'this, OutlivesConstraint<'tcx>> {
+ (&self.regioncx.constraints.outlives().raw[..]).into()
+ }
+
+ // Render `a: b` as `a -> b`, indicating the flow
+ // of data during inference.
+
+ fn source(&'this self, edge: &OutlivesConstraint<'tcx>) -> RegionVid {
+ edge.sup
+ }
+
+ fn target(&'this self, edge: &OutlivesConstraint<'tcx>) -> RegionVid {
+ edge.sub
+ }
+}
+
+struct SccConstraints<'a, 'tcx> {
+ regioncx: &'a RegionInferenceContext<'tcx>,
+ nodes_per_scc: IndexVec<ConstraintSccIndex, Vec<RegionVid>>,
+}
+
+impl<'a, 'this, 'tcx> dot::Labeller<'this> for SccConstraints<'a, 'tcx> {
+ type Node = ConstraintSccIndex;
+ type Edge = (ConstraintSccIndex, ConstraintSccIndex);
+
+ fn graph_id(&'this self) -> dot::Id<'this> {
+ dot::Id::new("RegionInferenceContext".to_string()).unwrap()
+ }
+ fn node_id(&'this self, n: &ConstraintSccIndex) -> dot::Id<'this> {
+ dot::Id::new(format!("r{}", n.index())).unwrap()
+ }
+ fn node_shape(&'this self, _node: &ConstraintSccIndex) -> Option<dot::LabelText<'this>> {
+ Some(dot::LabelText::LabelStr(Cow::Borrowed("box")))
+ }
+ fn node_label(&'this self, n: &ConstraintSccIndex) -> dot::LabelText<'this> {
+ let nodes = &self.nodes_per_scc[*n];
+ dot::LabelText::LabelStr(format!("{:?} = {:?}", n, nodes).into())
+ }
+}
+
+impl<'a, 'this, 'tcx> dot::GraphWalk<'this> for SccConstraints<'a, 'tcx> {
+ type Node = ConstraintSccIndex;
+ type Edge = (ConstraintSccIndex, ConstraintSccIndex);
+
+ fn nodes(&'this self) -> dot::Nodes<'this, ConstraintSccIndex> {
+ let vids: Vec<ConstraintSccIndex> = self.regioncx.constraint_sccs.all_sccs().collect();
+ vids.into()
+ }
+ fn edges(&'this self) -> dot::Edges<'this, (ConstraintSccIndex, ConstraintSccIndex)> {
+ let edges: Vec<_> = self
+ .regioncx
+ .constraint_sccs
+ .all_sccs()
+ .flat_map(|scc_a| {
+ self.regioncx
+ .constraint_sccs
+ .successors(scc_a)
+ .iter()
+ .map(move |&scc_b| (scc_a, scc_b))
+ })
+ .collect();
+
+ edges.into()
+ }
+
+ // Render `a: b` as `a -> b`, indicating the flow
+ // of data during inference.
+
+ fn source(&'this self, edge: &(ConstraintSccIndex, ConstraintSccIndex)) -> ConstraintSccIndex {
+ edge.0
+ }
+
+ fn target(&'this self, edge: &(ConstraintSccIndex, ConstraintSccIndex)) -> ConstraintSccIndex {
+ edge.1
+ }
+}
diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs
new file mode 100644
index 000000000..2894c6d29
--- /dev/null
+++ b/compiler/rustc_borrowck/src/region_infer/mod.rs
@@ -0,0 +1,2365 @@
+use std::collections::VecDeque;
+use std::rc::Rc;
+
+use rustc_data_structures::binary_search_util;
+use rustc_data_structures::frozen::Frozen;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::graph::scc::Sccs;
+use rustc_errors::Diagnostic;
+use rustc_hir::def_id::{DefId, CRATE_DEF_ID};
+use rustc_hir::CRATE_HIR_ID;
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::canonical::QueryOutlivesConstraint;
+use rustc_infer::infer::outlives::test_type_match;
+use rustc_infer::infer::region_constraints::{GenericKind, VarInfos, VerifyBound, VerifyIfEq};
+use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin, RegionVariableOrigin};
+use rustc_middle::mir::{
+ Body, ClosureOutlivesRequirement, ClosureOutlivesSubject, ClosureRegionRequirements,
+ ConstraintCategory, Local, Location, ReturnConstraint,
+};
+use rustc_middle::traits::ObligationCause;
+use rustc_middle::traits::ObligationCauseCode;
+use rustc_middle::ty::{
+ self, subst::SubstsRef, RegionVid, Ty, TyCtxt, TypeFoldable, TypeVisitable,
+};
+use rustc_span::Span;
+
+use crate::{
+ constraints::{
+ graph::NormalConstraintGraph, ConstraintSccIndex, OutlivesConstraint, OutlivesConstraintSet,
+ },
+ diagnostics::{RegionErrorKind, RegionErrors, UniverseInfo},
+ member_constraints::{MemberConstraintSet, NllMemberConstraintIndex},
+ nll::{PoloniusOutput, ToRegionVid},
+ region_infer::reverse_sccs::ReverseSccGraph,
+ region_infer::values::{
+ LivenessValues, PlaceholderIndices, RegionElement, RegionValueElements, RegionValues,
+ ToElementIndex,
+ },
+ type_check::{free_region_relations::UniversalRegionRelations, Locations},
+ universal_regions::UniversalRegions,
+};
+
+mod dump_mir;
+mod graphviz;
+mod opaque_types;
+mod reverse_sccs;
+
+pub mod values;
+
+pub struct RegionInferenceContext<'tcx> {
+ pub var_infos: VarInfos,
+
+ /// Contains the definition for every region variable. Region
+ /// variables are identified by their index (`RegionVid`). The
+ /// definition contains information about where the region came
+ /// from as well as its final inferred value.
+ definitions: IndexVec<RegionVid, RegionDefinition<'tcx>>,
+
+ /// The liveness constraints added to each region. For most
+ /// regions, these start out empty and steadily grow, though for
+ /// each universally quantified region R they start out containing
+ /// the entire CFG and `end(R)`.
+ liveness_constraints: LivenessValues<RegionVid>,
+
+ /// The outlives constraints computed by the type-check.
+ constraints: Frozen<OutlivesConstraintSet<'tcx>>,
+
+ /// The constraint-set, but in graph form, making it easy to traverse
+ /// the constraints adjacent to a particular region. Used to construct
+ /// the SCC (see `constraint_sccs`) and for error reporting.
+ constraint_graph: Frozen<NormalConstraintGraph>,
+
+ /// The SCC computed from `constraints` and the constraint
+ /// graph. We have an edge from SCC A to SCC B if `A: B`. Used to
+ /// compute the values of each region.
+ constraint_sccs: Rc<Sccs<RegionVid, ConstraintSccIndex>>,
+
+ /// Reverse of the SCC constraint graph -- i.e., an edge `A -> B` exists if
+ /// `B: A`. This is used to compute the universal regions that are required
+ /// to outlive a given SCC. Computed lazily.
+ rev_scc_graph: Option<Rc<ReverseSccGraph>>,
+
+ /// The "R0 member of [R1..Rn]" constraints, indexed by SCC.
+ member_constraints: Rc<MemberConstraintSet<'tcx, ConstraintSccIndex>>,
+
+ /// Records the member constraints that we applied to each scc.
+ /// This is useful for error reporting. Once constraint
+ /// propagation is done, this vector is sorted according to
+ /// `member_region_scc`.
+ member_constraints_applied: Vec<AppliedMemberConstraint>,
+
+ /// Map closure bounds to a `Span` that should be used for error reporting.
+ closure_bounds_mapping:
+ FxHashMap<Location, FxHashMap<(RegionVid, RegionVid), (ConstraintCategory<'tcx>, Span)>>,
+
+ /// Map universe indexes to information on why we created it.
+ universe_causes: FxHashMap<ty::UniverseIndex, UniverseInfo<'tcx>>,
+
+ /// Contains the minimum universe of any variable within the same
+ /// SCC. We will ensure that no SCC contains values that are not
+ /// visible from this index.
+ scc_universes: IndexVec<ConstraintSccIndex, ty::UniverseIndex>,
+
+ /// Contains a "representative" from each SCC. This will be the
+ /// minimal RegionVid belonging to that universe. It is used as a
+ /// kind of hacky way to manage checking outlives relationships,
+ /// since we can 'canonicalize' each region to the representative
+ /// of its SCC and be sure that -- if they have the same repr --
+ /// they *must* be equal (though not having the same repr does not
+ /// mean they are unequal).
+ scc_representatives: IndexVec<ConstraintSccIndex, ty::RegionVid>,
+
+ /// The final inferred values of the region variables; we compute
+ /// one value per SCC. To get the value for any given *region*,
+ /// you first find which scc it is a part of.
+ scc_values: RegionValues<ConstraintSccIndex>,
+
+ /// Type constraints that we check after solving.
+ type_tests: Vec<TypeTest<'tcx>>,
+
+ /// Information about the universally quantified regions in scope
+ /// on this function.
+ universal_regions: Rc<UniversalRegions<'tcx>>,
+
+ /// Information about how the universally quantified regions in
+ /// scope on this function relate to one another.
+ universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
+}
+
+/// Each time that `apply_member_constraint` is successful, it appends
+/// one of these structs to the `member_constraints_applied` field.
+/// This is used in error reporting to trace out what happened.
+///
+/// The way that `apply_member_constraint` works is that it effectively
+/// adds a new lower bound to the SCC it is analyzing: so you wind up
+/// with `'R: 'O` where `'R` is the pick-region and `'O` is the
+/// minimal viable option.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
+pub(crate) struct AppliedMemberConstraint {
+ /// The SCC that was affected. (The "member region".)
+ ///
+ /// The vector if `AppliedMemberConstraint` elements is kept sorted
+ /// by this field.
+ pub(crate) member_region_scc: ConstraintSccIndex,
+
+ /// The "best option" that `apply_member_constraint` found -- this was
+ /// added as an "ad-hoc" lower-bound to `member_region_scc`.
+ pub(crate) min_choice: ty::RegionVid,
+
+ /// The "member constraint index" -- we can find out details about
+ /// the constraint from
+ /// `set.member_constraints[member_constraint_index]`.
+ pub(crate) member_constraint_index: NllMemberConstraintIndex,
+}
+
+pub(crate) struct RegionDefinition<'tcx> {
+ /// What kind of variable is this -- a free region? existential
+ /// variable? etc. (See the `NllRegionVariableOrigin` for more
+ /// info.)
+ pub(crate) origin: NllRegionVariableOrigin,
+
+ /// Which universe is this region variable defined in? This is
+ /// most often `ty::UniverseIndex::ROOT`, but when we encounter
+ /// forall-quantifiers like `for<'a> { 'a = 'b }`, we would create
+ /// the variable for `'a` in a fresh universe that extends ROOT.
+ pub(crate) universe: ty::UniverseIndex,
+
+ /// If this is 'static or an early-bound region, then this is
+ /// `Some(X)` where `X` is the name of the region.
+ pub(crate) external_name: Option<ty::Region<'tcx>>,
+}
+
+/// N.B., the variants in `Cause` are intentionally ordered. Lower
+/// values are preferred when it comes to error messages. Do not
+/// reorder willy nilly.
+#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
+pub(crate) enum Cause {
+ /// point inserted because Local was live at the given Location
+ LiveVar(Local, Location),
+
+ /// point inserted because Local was dropped at the given Location
+ DropVar(Local, Location),
+}
+
+/// A "type test" corresponds to an outlives constraint between a type
+/// and a lifetime, like `T: 'x` or `<T as Foo>::Bar: 'x`. They are
+/// translated from the `Verify` region constraints in the ordinary
+/// inference context.
+///
+/// These sorts of constraints are handled differently than ordinary
+/// constraints, at least at present. During type checking, the
+/// `InferCtxt::process_registered_region_obligations` method will
+/// attempt to convert a type test like `T: 'x` into an ordinary
+/// outlives constraint when possible (for example, `&'a T: 'b` will
+/// be converted into `'a: 'b` and registered as a `Constraint`).
+///
+/// In some cases, however, there are outlives relationships that are
+/// not converted into a region constraint, but rather into one of
+/// these "type tests". The distinction is that a type test does not
+/// influence the inference result, but instead just examines the
+/// values that we ultimately inferred for each region variable and
+/// checks that they meet certain extra criteria. If not, an error
+/// can be issued.
+///
+/// One reason for this is that these type tests typically boil down
+/// to a check like `'a: 'x` where `'a` is a universally quantified
+/// region -- and therefore not one whose value is really meant to be
+/// *inferred*, precisely (this is not always the case: one can have a
+/// type test like `<Foo as Trait<'?0>>::Bar: 'x`, where `'?0` is an
+/// inference variable). Another reason is that these type tests can
+/// involve *disjunction* -- that is, they can be satisfied in more
+/// than one way.
+///
+/// For more information about this translation, see
+/// `InferCtxt::process_registered_region_obligations` and
+/// `InferCtxt::type_must_outlive` in `rustc_infer::infer::InferCtxt`.
+#[derive(Clone, Debug)]
+pub struct TypeTest<'tcx> {
+ /// The type `T` that must outlive the region.
+ pub generic_kind: GenericKind<'tcx>,
+
+ /// The region `'x` that the type must outlive.
+ pub lower_bound: RegionVid,
+
+ /// Where did this constraint arise and why?
+ pub locations: Locations,
+
+ /// A test which, if met by the region `'x`, proves that this type
+ /// constraint is satisfied.
+ pub verify_bound: VerifyBound<'tcx>,
+}
+
+/// When we have an unmet lifetime constraint, we try to propagate it outward (e.g. to a closure
+/// environment). If we can't, it is an error.
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+enum RegionRelationCheckResult {
+ Ok,
+ Propagated,
+ Error,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+enum Trace<'tcx> {
+ StartRegion,
+ FromOutlivesConstraint(OutlivesConstraint<'tcx>),
+ NotVisited,
+}
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+ /// Creates a new region inference context with a total of
+ /// `num_region_variables` valid inference variables; the first N
+ /// of those will be constant regions representing the free
+ /// regions defined in `universal_regions`.
+ ///
+ /// The `outlives_constraints` and `type_tests` are an initial set
+ /// of constraints produced by the MIR type check.
+ pub(crate) fn new(
+ var_infos: VarInfos,
+ universal_regions: Rc<UniversalRegions<'tcx>>,
+ placeholder_indices: Rc<PlaceholderIndices>,
+ universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
+ outlives_constraints: OutlivesConstraintSet<'tcx>,
+ member_constraints_in: MemberConstraintSet<'tcx, RegionVid>,
+ closure_bounds_mapping: FxHashMap<
+ Location,
+ FxHashMap<(RegionVid, RegionVid), (ConstraintCategory<'tcx>, Span)>,
+ >,
+ universe_causes: FxHashMap<ty::UniverseIndex, UniverseInfo<'tcx>>,
+ type_tests: Vec<TypeTest<'tcx>>,
+ liveness_constraints: LivenessValues<RegionVid>,
+ elements: &Rc<RegionValueElements>,
+ ) -> Self {
+ // Create a RegionDefinition for each inference variable.
+ let definitions: IndexVec<_, _> = var_infos
+ .iter()
+ .map(|info| RegionDefinition::new(info.universe, info.origin))
+ .collect();
+
+ let constraints = Frozen::freeze(outlives_constraints);
+ let constraint_graph = Frozen::freeze(constraints.graph(definitions.len()));
+ let fr_static = universal_regions.fr_static;
+ let constraint_sccs = Rc::new(constraints.compute_sccs(&constraint_graph, fr_static));
+
+ let mut scc_values =
+ RegionValues::new(elements, universal_regions.len(), &placeholder_indices);
+
+ for region in liveness_constraints.rows() {
+ let scc = constraint_sccs.scc(region);
+ scc_values.merge_liveness(scc, region, &liveness_constraints);
+ }
+
+ let scc_universes = Self::compute_scc_universes(&constraint_sccs, &definitions);
+
+ let scc_representatives = Self::compute_scc_representatives(&constraint_sccs, &definitions);
+
+ let member_constraints =
+ Rc::new(member_constraints_in.into_mapped(|r| constraint_sccs.scc(r)));
+
+ let mut result = Self {
+ var_infos,
+ definitions,
+ liveness_constraints,
+ constraints,
+ constraint_graph,
+ constraint_sccs,
+ rev_scc_graph: None,
+ member_constraints,
+ member_constraints_applied: Vec::new(),
+ closure_bounds_mapping,
+ universe_causes,
+ scc_universes,
+ scc_representatives,
+ scc_values,
+ type_tests,
+ universal_regions,
+ universal_region_relations,
+ };
+
+ result.init_free_and_bound_regions();
+
+ result
+ }
+
+ /// Each SCC is the combination of many region variables which
+ /// have been equated. Therefore, we can associate a universe with
+ /// each SCC which is minimum of all the universes of its
+ /// constituent regions -- this is because whatever value the SCC
+ /// takes on must be a value that each of the regions within the
+ /// SCC could have as well. This implies that the SCC must have
+ /// the minimum, or narrowest, universe.
+ fn compute_scc_universes(
+ constraint_sccs: &Sccs<RegionVid, ConstraintSccIndex>,
+ definitions: &IndexVec<RegionVid, RegionDefinition<'tcx>>,
+ ) -> IndexVec<ConstraintSccIndex, ty::UniverseIndex> {
+ let num_sccs = constraint_sccs.num_sccs();
+ let mut scc_universes = IndexVec::from_elem_n(ty::UniverseIndex::MAX, num_sccs);
+
+ debug!("compute_scc_universes()");
+
+ // For each region R in universe U, ensure that the universe for the SCC
+ // that contains R is "no bigger" than U. This effectively sets the universe
+ // for each SCC to be the minimum of the regions within.
+ for (region_vid, region_definition) in definitions.iter_enumerated() {
+ let scc = constraint_sccs.scc(region_vid);
+ let scc_universe = &mut scc_universes[scc];
+ let scc_min = std::cmp::min(region_definition.universe, *scc_universe);
+ if scc_min != *scc_universe {
+ *scc_universe = scc_min;
+ debug!(
+ "compute_scc_universes: lowered universe of {scc:?} to {scc_min:?} \
+ because it contains {region_vid:?} in {region_universe:?}",
+ scc = scc,
+ scc_min = scc_min,
+ region_vid = region_vid,
+ region_universe = region_definition.universe,
+ );
+ }
+ }
+
+ // Walk each SCC `A` and `B` such that `A: B`
+ // and ensure that universe(A) can see universe(B).
+ //
+ // This serves to enforce the 'empty/placeholder' hierarchy
+ // (described in more detail on `RegionKind`):
+ //
+ // ```
+ // static -----+
+ // | |
+ // empty(U0) placeholder(U1)
+ // | /
+ // empty(U1)
+ // ```
+ //
+ // In particular, imagine we have variables R0 in U0 and R1
+ // created in U1, and constraints like this;
+ //
+ // ```
+ // R1: !1 // R1 outlives the placeholder in U1
+ // R1: R0 // R1 outlives R0
+ // ```
+ //
+ // Here, we wish for R1 to be `'static`, because it
+ // cannot outlive `placeholder(U1)` and `empty(U0)` any other way.
+ //
+ // Thanks to this loop, what happens is that the `R1: R0`
+ // constraint lowers the universe of `R1` to `U0`, which in turn
+ // means that the `R1: !1` constraint will (later) cause
+ // `R1` to become `'static`.
+ for scc_a in constraint_sccs.all_sccs() {
+ for &scc_b in constraint_sccs.successors(scc_a) {
+ let scc_universe_a = scc_universes[scc_a];
+ let scc_universe_b = scc_universes[scc_b];
+ let scc_universe_min = std::cmp::min(scc_universe_a, scc_universe_b);
+ if scc_universe_a != scc_universe_min {
+ scc_universes[scc_a] = scc_universe_min;
+
+ debug!(
+ "compute_scc_universes: lowered universe of {scc_a:?} to {scc_universe_min:?} \
+ because {scc_a:?}: {scc_b:?} and {scc_b:?} is in universe {scc_universe_b:?}",
+ scc_a = scc_a,
+ scc_b = scc_b,
+ scc_universe_min = scc_universe_min,
+ scc_universe_b = scc_universe_b
+ );
+ }
+ }
+ }
+
+ debug!("compute_scc_universes: scc_universe = {:#?}", scc_universes);
+
+ scc_universes
+ }
+
+ /// For each SCC, we compute a unique `RegionVid` (in fact, the
+ /// minimal one that belongs to the SCC). See
+ /// `scc_representatives` field of `RegionInferenceContext` for
+ /// more details.
+ fn compute_scc_representatives(
+ constraints_scc: &Sccs<RegionVid, ConstraintSccIndex>,
+ definitions: &IndexVec<RegionVid, RegionDefinition<'tcx>>,
+ ) -> IndexVec<ConstraintSccIndex, ty::RegionVid> {
+ let num_sccs = constraints_scc.num_sccs();
+ let next_region_vid = definitions.next_index();
+ let mut scc_representatives = IndexVec::from_elem_n(next_region_vid, num_sccs);
+
+ for region_vid in definitions.indices() {
+ let scc = constraints_scc.scc(region_vid);
+ let prev_min = scc_representatives[scc];
+ scc_representatives[scc] = region_vid.min(prev_min);
+ }
+
+ scc_representatives
+ }
+
+ /// Initializes the region variables for each universally
+ /// quantified region (lifetime parameter). The first N variables
+ /// always correspond to the regions appearing in the function
+ /// signature (both named and anonymous) and where-clauses. This
+ /// function iterates over those regions and initializes them with
+ /// minimum values.
+ ///
+ /// For example:
+ /// ```
+ /// fn foo<'a, 'b>( /* ... */ ) where 'a: 'b { /* ... */ }
+ /// ```
+ /// would initialize two variables like so:
+ /// ```ignore (illustrative)
+ /// R0 = { CFG, R0 } // 'a
+ /// R1 = { CFG, R0, R1 } // 'b
+ /// ```
+ /// Here, R0 represents `'a`, and it contains (a) the entire CFG
+ /// and (b) any universally quantified regions that it outlives,
+ /// which in this case is just itself. R1 (`'b`) in contrast also
+ /// outlives `'a` and hence contains R0 and R1.
+ fn init_free_and_bound_regions(&mut self) {
+ // Update the names (if any)
+ for (external_name, variable) in self.universal_regions.named_universal_regions() {
+ debug!(
+ "init_universal_regions: region {:?} has external name {:?}",
+ variable, external_name
+ );
+ self.definitions[variable].external_name = Some(external_name);
+ }
+
+ for variable in self.definitions.indices() {
+ let scc = self.constraint_sccs.scc(variable);
+
+ match self.definitions[variable].origin {
+ NllRegionVariableOrigin::FreeRegion => {
+ // For each free, universally quantified region X:
+
+ // Add all nodes in the CFG to liveness constraints
+ self.liveness_constraints.add_all_points(variable);
+ self.scc_values.add_all_points(scc);
+
+ // Add `end(X)` into the set for X.
+ self.scc_values.add_element(scc, variable);
+ }
+
+ NllRegionVariableOrigin::Placeholder(placeholder) => {
+ // Each placeholder region is only visible from
+ // its universe `ui` and its extensions. So we
+ // can't just add it into `scc` unless the
+ // universe of the scc can name this region.
+ let scc_universe = self.scc_universes[scc];
+ if scc_universe.can_name(placeholder.universe) {
+ self.scc_values.add_element(scc, placeholder);
+ } else {
+ debug!(
+ "init_free_and_bound_regions: placeholder {:?} is \
+ not compatible with universe {:?} of its SCC {:?}",
+ placeholder, scc_universe, scc,
+ );
+ self.add_incompatible_universe(scc);
+ }
+ }
+
+ NllRegionVariableOrigin::Existential { .. } => {
+ // For existential, regions, nothing to do.
+ }
+ }
+ }
+ }
+
+ /// Returns an iterator over all the region indices.
+ pub fn regions(&self) -> impl Iterator<Item = RegionVid> + 'tcx {
+ self.definitions.indices()
+ }
+
+ /// Given a universal region in scope on the MIR, returns the
+ /// corresponding index.
+ ///
+ /// (Panics if `r` is not a registered universal region.)
+ pub fn to_region_vid(&self, r: ty::Region<'tcx>) -> RegionVid {
+ self.universal_regions.to_region_vid(r)
+ }
+
+ /// Adds annotations for `#[rustc_regions]`; see `UniversalRegions::annotate`.
+ pub(crate) fn annotate(&self, tcx: TyCtxt<'tcx>, err: &mut Diagnostic) {
+ self.universal_regions.annotate(tcx, err)
+ }
+
+ /// Returns `true` if the region `r` contains the point `p`.
+ ///
+ /// Panics if called before `solve()` executes,
+ pub(crate) fn region_contains(&self, r: impl ToRegionVid, p: impl ToElementIndex) -> bool {
+ let scc = self.constraint_sccs.scc(r.to_region_vid());
+ self.scc_values.contains(scc, p)
+ }
+
+ /// Returns access to the value of `r` for debugging purposes.
+ pub(crate) fn region_value_str(&self, r: RegionVid) -> String {
+ let scc = self.constraint_sccs.scc(r.to_region_vid());
+ self.scc_values.region_value_str(scc)
+ }
+
+ /// Returns access to the value of `r` for debugging purposes.
+ pub(crate) fn region_universe(&self, r: RegionVid) -> ty::UniverseIndex {
+ let scc = self.constraint_sccs.scc(r.to_region_vid());
+ self.scc_universes[scc]
+ }
+
+ /// Once region solving has completed, this function will return
+ /// the member constraints that were applied to the value of a given
+ /// region `r`. See `AppliedMemberConstraint`.
+ pub(crate) fn applied_member_constraints(
+ &self,
+ r: impl ToRegionVid,
+ ) -> &[AppliedMemberConstraint] {
+ let scc = self.constraint_sccs.scc(r.to_region_vid());
+ binary_search_util::binary_search_slice(
+ &self.member_constraints_applied,
+ |applied| applied.member_region_scc,
+ &scc,
+ )
+ }
+
+ /// Performs region inference and report errors if we see any
+ /// unsatisfiable constraints. If this is a closure, returns the
+ /// region requirements to propagate to our creator, if any.
+ #[instrument(skip(self, infcx, body, polonius_output), level = "debug")]
+ pub(super) fn solve(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+ polonius_output: Option<Rc<PoloniusOutput>>,
+ ) -> (Option<ClosureRegionRequirements<'tcx>>, RegionErrors<'tcx>) {
+ let mir_def_id = body.source.def_id();
+ self.propagate_constraints(body);
+
+ let mut errors_buffer = RegionErrors::new();
+
+ // If this is a closure, we can propagate unsatisfied
+ // `outlives_requirements` to our creator, so create a vector
+ // to store those. Otherwise, we'll pass in `None` to the
+ // functions below, which will trigger them to report errors
+ // eagerly.
+ let mut outlives_requirements = infcx.tcx.is_typeck_child(mir_def_id).then(Vec::new);
+
+ self.check_type_tests(
+ infcx,
+ param_env,
+ body,
+ outlives_requirements.as_mut(),
+ &mut errors_buffer,
+ );
+
+ // In Polonius mode, the errors about missing universal region relations are in the output
+ // and need to be emitted or propagated. Otherwise, we need to check whether the
+ // constraints were too strong, and if so, emit or propagate those errors.
+ if infcx.tcx.sess.opts.unstable_opts.polonius {
+ self.check_polonius_subset_errors(
+ body,
+ outlives_requirements.as_mut(),
+ &mut errors_buffer,
+ polonius_output.expect("Polonius output is unavailable despite `-Z polonius`"),
+ );
+ } else {
+ self.check_universal_regions(body, outlives_requirements.as_mut(), &mut errors_buffer);
+ }
+
+ if errors_buffer.is_empty() {
+ self.check_member_constraints(infcx, &mut errors_buffer);
+ }
+
+ let outlives_requirements = outlives_requirements.unwrap_or_default();
+
+ if outlives_requirements.is_empty() {
+ (None, errors_buffer)
+ } else {
+ let num_external_vids = self.universal_regions.num_global_and_external_regions();
+ (
+ Some(ClosureRegionRequirements { num_external_vids, outlives_requirements }),
+ errors_buffer,
+ )
+ }
+ }
+
+ /// Propagate the region constraints: this will grow the values
+ /// for each region variable until all the constraints are
+ /// satisfied. Note that some values may grow **too** large to be
+ /// feasible, but we check this later.
+ #[instrument(skip(self, _body), level = "debug")]
+ fn propagate_constraints(&mut self, _body: &Body<'tcx>) {
+ debug!("constraints={:#?}", {
+ let mut constraints: Vec<_> = self.constraints.outlives().iter().collect();
+ constraints.sort_by_key(|c| (c.sup, c.sub));
+ constraints
+ .into_iter()
+ .map(|c| (c, self.constraint_sccs.scc(c.sup), self.constraint_sccs.scc(c.sub)))
+ .collect::<Vec<_>>()
+ });
+
+ // To propagate constraints, we walk the DAG induced by the
+ // SCC. For each SCC, we visit its successors and compute
+ // their values, then we union all those values to get our
+ // own.
+ let constraint_sccs = self.constraint_sccs.clone();
+ for scc in constraint_sccs.all_sccs() {
+ self.compute_value_for_scc(scc);
+ }
+
+ // Sort the applied member constraints so we can binary search
+ // through them later.
+ self.member_constraints_applied.sort_by_key(|applied| applied.member_region_scc);
+ }
+
+ /// Computes the value of the SCC `scc_a`, which has not yet been
+ /// computed, by unioning the values of its successors.
+ /// Assumes that all successors have been computed already
+ /// (which is assured by iterating over SCCs in dependency order).
+ #[instrument(skip(self), level = "debug")]
+ fn compute_value_for_scc(&mut self, scc_a: ConstraintSccIndex) {
+ let constraint_sccs = self.constraint_sccs.clone();
+
+ // Walk each SCC `B` such that `A: B`...
+ for &scc_b in constraint_sccs.successors(scc_a) {
+ debug!(?scc_b);
+
+ // ...and add elements from `B` into `A`. One complication
+ // arises because of universes: If `B` contains something
+ // that `A` cannot name, then `A` can only contain `B` if
+ // it outlives static.
+ if self.universe_compatible(scc_b, scc_a) {
+ // `A` can name everything that is in `B`, so just
+ // merge the bits.
+ self.scc_values.add_region(scc_a, scc_b);
+ } else {
+ self.add_incompatible_universe(scc_a);
+ }
+ }
+
+ // Now take member constraints into account.
+ let member_constraints = self.member_constraints.clone();
+ for m_c_i in member_constraints.indices(scc_a) {
+ self.apply_member_constraint(scc_a, m_c_i, member_constraints.choice_regions(m_c_i));
+ }
+
+ debug!(value = ?self.scc_values.region_value_str(scc_a));
+ }
+
+ /// Invoked for each `R0 member of [R1..Rn]` constraint.
+ ///
+ /// `scc` is the SCC containing R0, and `choice_regions` are the
+ /// `R1..Rn` regions -- they are always known to be universal
+ /// regions (and if that's not true, we just don't attempt to
+ /// enforce the constraint).
+ ///
+ /// The current value of `scc` at the time the method is invoked
+ /// is considered a *lower bound*. If possible, we will modify
+ /// the constraint to set it equal to one of the option regions.
+ /// If we make any changes, returns true, else false.
+ #[instrument(skip(self, member_constraint_index), level = "debug")]
+ fn apply_member_constraint(
+ &mut self,
+ scc: ConstraintSccIndex,
+ member_constraint_index: NllMemberConstraintIndex,
+ choice_regions: &[ty::RegionVid],
+ ) -> bool {
+ // Create a mutable vector of the options. We'll try to winnow
+ // them down.
+ let mut choice_regions: Vec<ty::RegionVid> = choice_regions.to_vec();
+
+ // Convert to the SCC representative: sometimes we have inference
+ // variables in the member constraint that wind up equated with
+ // universal regions. The scc representative is the minimal numbered
+ // one from the corresponding scc so it will be the universal region
+ // if one exists.
+ for c_r in &mut choice_regions {
+ let scc = self.constraint_sccs.scc(*c_r);
+ *c_r = self.scc_representatives[scc];
+ }
+
+ // The 'member region' in a member constraint is part of the
+ // hidden type, which must be in the root universe. Therefore,
+ // it cannot have any placeholders in its value.
+ assert!(self.scc_universes[scc] == ty::UniverseIndex::ROOT);
+ debug_assert!(
+ self.scc_values.placeholders_contained_in(scc).next().is_none(),
+ "scc {:?} in a member constraint has placeholder value: {:?}",
+ scc,
+ self.scc_values.region_value_str(scc),
+ );
+
+ // The existing value for `scc` is a lower-bound. This will
+ // consist of some set `{P} + {LB}` of points `{P}` and
+ // lower-bound free regions `{LB}`. As each choice region `O`
+ // is a free region, it will outlive the points. But we can
+ // only consider the option `O` if `O: LB`.
+ choice_regions.retain(|&o_r| {
+ self.scc_values
+ .universal_regions_outlived_by(scc)
+ .all(|lb| self.universal_region_relations.outlives(o_r, lb))
+ });
+ debug!(?choice_regions, "after lb");
+
+ // Now find all the *upper bounds* -- that is, each UB is a
+ // free region that must outlive the member region `R0` (`UB:
+ // R0`). Therefore, we need only keep an option `O` if `UB: O`
+ // for all UB.
+ let rev_scc_graph = self.reverse_scc_graph();
+ let universal_region_relations = &self.universal_region_relations;
+ for ub in rev_scc_graph.upper_bounds(scc) {
+ debug!(?ub);
+ choice_regions.retain(|&o_r| universal_region_relations.outlives(ub, o_r));
+ }
+ debug!(?choice_regions, "after ub");
+
+ // If we ruled everything out, we're done.
+ if choice_regions.is_empty() {
+ return false;
+ }
+
+ // Otherwise, we need to find the minimum remaining choice, if
+ // any, and take that.
+ debug!("choice_regions remaining are {:#?}", choice_regions);
+ let min = |r1: ty::RegionVid, r2: ty::RegionVid| -> Option<ty::RegionVid> {
+ let r1_outlives_r2 = self.universal_region_relations.outlives(r1, r2);
+ let r2_outlives_r1 = self.universal_region_relations.outlives(r2, r1);
+ match (r1_outlives_r2, r2_outlives_r1) {
+ (true, true) => Some(r1.min(r2)),
+ (true, false) => Some(r2),
+ (false, true) => Some(r1),
+ (false, false) => None,
+ }
+ };
+ let mut min_choice = choice_regions[0];
+ for &other_option in &choice_regions[1..] {
+ debug!(?min_choice, ?other_option,);
+ match min(min_choice, other_option) {
+ Some(m) => min_choice = m,
+ None => {
+ debug!(?min_choice, ?other_option, "incomparable; no min choice",);
+ return false;
+ }
+ }
+ }
+
+ let min_choice_scc = self.constraint_sccs.scc(min_choice);
+ debug!(?min_choice, ?min_choice_scc);
+ if self.scc_values.add_region(scc, min_choice_scc) {
+ self.member_constraints_applied.push(AppliedMemberConstraint {
+ member_region_scc: scc,
+ min_choice,
+ member_constraint_index,
+ });
+
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Returns `true` if all the elements in the value of `scc_b` are nameable
+ /// in `scc_a`. Used during constraint propagation, and only once
+ /// the value of `scc_b` has been computed.
+ fn universe_compatible(&self, scc_b: ConstraintSccIndex, scc_a: ConstraintSccIndex) -> bool {
+ let universe_a = self.scc_universes[scc_a];
+
+ // Quick check: if scc_b's declared universe is a subset of
+ // scc_a's declared universe (typically, both are ROOT), then
+ // it cannot contain any problematic universe elements.
+ if universe_a.can_name(self.scc_universes[scc_b]) {
+ return true;
+ }
+
+ // Otherwise, we have to iterate over the universe elements in
+ // B's value, and check whether all of them are nameable
+ // from universe_a
+ self.scc_values.placeholders_contained_in(scc_b).all(|p| universe_a.can_name(p.universe))
+ }
+
+ /// Extend `scc` so that it can outlive some placeholder region
+ /// from a universe it can't name; at present, the only way for
+ /// this to be true is if `scc` outlives `'static`. This is
+ /// actually stricter than necessary: ideally, we'd support bounds
+ /// like `for<'a: 'b`>` that might then allow us to approximate
+ /// `'a` with `'b` and not `'static`. But it will have to do for
+ /// now.
+ fn add_incompatible_universe(&mut self, scc: ConstraintSccIndex) {
+ debug!("add_incompatible_universe(scc={:?})", scc);
+
+ let fr_static = self.universal_regions.fr_static;
+ self.scc_values.add_all_points(scc);
+ self.scc_values.add_element(scc, fr_static);
+ }
+
+ /// Once regions have been propagated, this method is used to see
+ /// whether the "type tests" produced by typeck were satisfied;
+ /// type tests encode type-outlives relationships like `T:
+ /// 'a`. See `TypeTest` for more details.
+ fn check_type_tests(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+ mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+ errors_buffer: &mut RegionErrors<'tcx>,
+ ) {
+ let tcx = infcx.tcx;
+
+ // Sometimes we register equivalent type-tests that would
+ // result in basically the exact same error being reported to
+ // the user. Avoid that.
+ let mut deduplicate_errors = FxHashSet::default();
+
+ for type_test in &self.type_tests {
+ debug!("check_type_test: {:?}", type_test);
+
+ let generic_ty = type_test.generic_kind.to_ty(tcx);
+ if self.eval_verify_bound(
+ infcx,
+ param_env,
+ body,
+ generic_ty,
+ type_test.lower_bound,
+ &type_test.verify_bound,
+ ) {
+ continue;
+ }
+
+ if let Some(propagated_outlives_requirements) = &mut propagated_outlives_requirements {
+ if self.try_promote_type_test(
+ infcx,
+ param_env,
+ body,
+ type_test,
+ propagated_outlives_requirements,
+ ) {
+ continue;
+ }
+ }
+
+ // Type-test failed. Report the error.
+ let erased_generic_kind = infcx.tcx.erase_regions(type_test.generic_kind);
+
+ // Skip duplicate-ish errors.
+ if deduplicate_errors.insert((
+ erased_generic_kind,
+ type_test.lower_bound,
+ type_test.locations,
+ )) {
+ debug!(
+ "check_type_test: reporting error for erased_generic_kind={:?}, \
+ lower_bound_region={:?}, \
+ type_test.locations={:?}",
+ erased_generic_kind, type_test.lower_bound, type_test.locations,
+ );
+
+ errors_buffer.push(RegionErrorKind::TypeTestError { type_test: type_test.clone() });
+ }
+ }
+ }
+
+ /// Invoked when we have some type-test (e.g., `T: 'X`) that we cannot
+ /// prove to be satisfied. If this is a closure, we will attempt to
+ /// "promote" this type-test into our `ClosureRegionRequirements` and
+ /// hence pass it up the creator. To do this, we have to phrase the
+ /// type-test in terms of external free regions, as local free
+ /// regions are not nameable by the closure's creator.
+ ///
+ /// Promotion works as follows: we first check that the type `T`
+ /// contains only regions that the creator knows about. If this is
+ /// true, then -- as a consequence -- we know that all regions in
+ /// the type `T` are free regions that outlive the closure body. If
+ /// false, then promotion fails.
+ ///
+ /// Once we've promoted T, we have to "promote" `'X` to some region
+ /// that is "external" to the closure. Generally speaking, a region
+ /// may be the union of some points in the closure body as well as
+ /// various free lifetimes. We can ignore the points in the closure
+ /// body: if the type T can be expressed in terms of external regions,
+ /// we know it outlives the points in the closure body. That
+ /// just leaves the free regions.
+ ///
+ /// The idea then is to lower the `T: 'X` constraint into multiple
+ /// bounds -- e.g., if `'X` is the union of two free lifetimes,
+ /// `'1` and `'2`, then we would create `T: '1` and `T: '2`.
+ #[instrument(level = "debug", skip(self, infcx, propagated_outlives_requirements))]
+ fn try_promote_type_test(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+ type_test: &TypeTest<'tcx>,
+ propagated_outlives_requirements: &mut Vec<ClosureOutlivesRequirement<'tcx>>,
+ ) -> bool {
+ let tcx = infcx.tcx;
+
+ let TypeTest { generic_kind, lower_bound, locations, verify_bound: _ } = type_test;
+
+ let generic_ty = generic_kind.to_ty(tcx);
+ let Some(subject) = self.try_promote_type_test_subject(infcx, generic_ty) else {
+ return false;
+ };
+
+ debug!("subject = {:?}", subject);
+
+ let r_scc = self.constraint_sccs.scc(*lower_bound);
+
+ debug!(
+ "lower_bound = {:?} r_scc={:?} universe={:?}",
+ lower_bound, r_scc, self.scc_universes[r_scc]
+ );
+
+ // If the type test requires that `T: 'a` where `'a` is a
+ // placeholder from another universe, that effectively requires
+ // `T: 'static`, so we have to propagate that requirement.
+ //
+ // It doesn't matter *what* universe because the promoted `T` will
+ // always be in the root universe.
+ if let Some(p) = self.scc_values.placeholders_contained_in(r_scc).next() {
+ debug!("encountered placeholder in higher universe: {:?}, requiring 'static", p);
+ let static_r = self.universal_regions.fr_static;
+ propagated_outlives_requirements.push(ClosureOutlivesRequirement {
+ subject,
+ outlived_free_region: static_r,
+ blame_span: locations.span(body),
+ category: ConstraintCategory::Boring,
+ });
+
+ // we can return here -- the code below might push add'l constraints
+ // but they would all be weaker than this one.
+ return true;
+ }
+
+ // For each region outlived by lower_bound find a non-local,
+ // universal region (it may be the same region) and add it to
+ // `ClosureOutlivesRequirement`.
+ for ur in self.scc_values.universal_regions_outlived_by(r_scc) {
+ debug!("universal_region_outlived_by ur={:?}", ur);
+ // Check whether we can already prove that the "subject" outlives `ur`.
+ // If so, we don't have to propagate this requirement to our caller.
+ //
+ // To continue the example from the function, if we are trying to promote
+ // a requirement that `T: 'X`, and we know that `'X = '1 + '2` (i.e., the union
+ // `'1` and `'2`), then in this loop `ur` will be `'1` (and `'2`). So here
+ // we check whether `T: '1` is something we *can* prove. If so, no need
+ // to propagate that requirement.
+ //
+ // This is needed because -- particularly in the case
+ // where `ur` is a local bound -- we are sometimes in a
+ // position to prove things that our caller cannot. See
+ // #53570 for an example.
+ if self.eval_verify_bound(
+ infcx,
+ param_env,
+ body,
+ generic_ty,
+ ur,
+ &type_test.verify_bound,
+ ) {
+ continue;
+ }
+
+ let non_local_ub = self.universal_region_relations.non_local_upper_bounds(ur);
+ debug!("try_promote_type_test: non_local_ub={:?}", non_local_ub);
+
+ // This is slightly too conservative. To show T: '1, given `'2: '1`
+ // and `'3: '1` we only need to prove that T: '2 *or* T: '3, but to
+ // avoid potential non-determinism we approximate this by requiring
+ // T: '1 and T: '2.
+ for upper_bound in non_local_ub {
+ debug_assert!(self.universal_regions.is_universal_region(upper_bound));
+ debug_assert!(!self.universal_regions.is_local_free_region(upper_bound));
+
+ let requirement = ClosureOutlivesRequirement {
+ subject,
+ outlived_free_region: upper_bound,
+ blame_span: locations.span(body),
+ category: ConstraintCategory::Boring,
+ };
+ debug!("try_promote_type_test: pushing {:#?}", requirement);
+ propagated_outlives_requirements.push(requirement);
+ }
+ }
+ true
+ }
+
+ /// When we promote a type test `T: 'r`, we have to convert the
+ /// type `T` into something we can store in a query result (so
+ /// something allocated for `'tcx`). This is problematic if `ty`
+ /// contains regions. During the course of NLL region checking, we
+ /// will have replaced all of those regions with fresh inference
+ /// variables. To create a test subject, we want to replace those
+ /// inference variables with some region from the closure
+ /// signature -- this is not always possible, so this is a
+ /// fallible process. Presuming we do find a suitable region, we
+ /// will use it's *external name*, which will be a `RegionKind`
+ /// variant that can be used in query responses such as
+ /// `ReEarlyBound`.
+ #[instrument(level = "debug", skip(self, infcx))]
+ fn try_promote_type_test_subject(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<ClosureOutlivesSubject<'tcx>> {
+ let tcx = infcx.tcx;
+
+ let ty = tcx.fold_regions(ty, |r, _depth| {
+ let region_vid = self.to_region_vid(r);
+
+ // The challenge if this. We have some region variable `r`
+ // whose value is a set of CFG points and universal
+ // regions. We want to find if that set is *equivalent* to
+ // any of the named regions found in the closure.
+ //
+ // To do so, we compute the
+ // `non_local_universal_upper_bound`. This will be a
+ // non-local, universal region that is greater than `r`.
+ // However, it might not be *contained* within `r`, so
+ // then we further check whether this bound is contained
+ // in `r`. If so, we can say that `r` is equivalent to the
+ // bound.
+ //
+ // Let's work through a few examples. For these, imagine
+ // that we have 3 non-local regions (I'll denote them as
+ // `'static`, `'a`, and `'b`, though of course in the code
+ // they would be represented with indices) where:
+ //
+ // - `'static: 'a`
+ // - `'static: 'b`
+ //
+ // First, let's assume that `r` is some existential
+ // variable with an inferred value `{'a, 'static}` (plus
+ // some CFG nodes). In this case, the non-local upper
+ // bound is `'static`, since that outlives `'a`. `'static`
+ // is also a member of `r` and hence we consider `r`
+ // equivalent to `'static` (and replace it with
+ // `'static`).
+ //
+ // Now let's consider the inferred value `{'a, 'b}`. This
+ // means `r` is effectively `'a | 'b`. I'm not sure if
+ // this can come about, actually, but assuming it did, we
+ // would get a non-local upper bound of `'static`. Since
+ // `'static` is not contained in `r`, we would fail to
+ // find an equivalent.
+ let upper_bound = self.non_local_universal_upper_bound(region_vid);
+ if self.region_contains(region_vid, upper_bound) {
+ self.definitions[upper_bound].external_name.unwrap_or(r)
+ } else {
+ // In the case of a failure, use a `ReVar` result. This will
+ // cause the `needs_infer` later on to return `None`.
+ r
+ }
+ });
+
+ debug!("try_promote_type_test_subject: folded ty = {:?}", ty);
+
+ // `needs_infer` will only be true if we failed to promote some region.
+ if ty.needs_infer() {
+ return None;
+ }
+
+ Some(ClosureOutlivesSubject::Ty(ty))
+ }
+
+ /// Given some universal or existential region `r`, finds a
+ /// non-local, universal region `r+` that outlives `r` at entry to (and
+ /// exit from) the closure. In the worst case, this will be
+ /// `'static`.
+ ///
+ /// This is used for two purposes. First, if we are propagated
+ /// some requirement `T: r`, we can use this method to enlarge `r`
+ /// to something we can encode for our creator (which only knows
+ /// about non-local, universal regions). It is also used when
+ /// encoding `T` as part of `try_promote_type_test_subject` (see
+ /// that fn for details).
+ ///
+ /// This is based on the result `'y` of `universal_upper_bound`,
+ /// except that it converts further takes the non-local upper
+ /// bound of `'y`, so that the final result is non-local.
+ fn non_local_universal_upper_bound(&self, r: RegionVid) -> RegionVid {
+ debug!("non_local_universal_upper_bound(r={:?}={})", r, self.region_value_str(r));
+
+ let lub = self.universal_upper_bound(r);
+
+ // Grow further to get smallest universal region known to
+ // creator.
+ let non_local_lub = self.universal_region_relations.non_local_upper_bound(lub);
+
+ debug!("non_local_universal_upper_bound: non_local_lub={:?}", non_local_lub);
+
+ non_local_lub
+ }
+
+ /// Returns a universally quantified region that outlives the
+ /// value of `r` (`r` may be existentially or universally
+ /// quantified).
+ ///
+ /// Since `r` is (potentially) an existential region, it has some
+ /// value which may include (a) any number of points in the CFG
+ /// and (b) any number of `end('x)` elements of universally
+ /// quantified regions. To convert this into a single universal
+ /// region we do as follows:
+ ///
+ /// - Ignore the CFG points in `'r`. All universally quantified regions
+ /// include the CFG anyhow.
+ /// - For each `end('x)` element in `'r`, compute the mutual LUB, yielding
+ /// a result `'y`.
+ #[instrument(skip(self), level = "debug")]
+ pub(crate) fn universal_upper_bound(&self, r: RegionVid) -> RegionVid {
+ debug!(r = %self.region_value_str(r));
+
+ // Find the smallest universal region that contains all other
+ // universal regions within `region`.
+ let mut lub = self.universal_regions.fr_fn_body;
+ let r_scc = self.constraint_sccs.scc(r);
+ for ur in self.scc_values.universal_regions_outlived_by(r_scc) {
+ lub = self.universal_region_relations.postdom_upper_bound(lub, ur);
+ }
+
+ debug!(?lub);
+
+ lub
+ }
+
+ /// Like `universal_upper_bound`, but returns an approximation more suitable
+ /// for diagnostics. If `r` contains multiple disjoint universal regions
+ /// (e.g. 'a and 'b in `fn foo<'a, 'b> { ... }`, we pick the lower-numbered region.
+ /// This corresponds to picking named regions over unnamed regions
+ /// (e.g. picking early-bound regions over a closure late-bound region).
+ ///
+ /// This means that the returned value may not be a true upper bound, since
+ /// only 'static is known to outlive disjoint universal regions.
+ /// Therefore, this method should only be used in diagnostic code,
+ /// where displaying *some* named universal region is better than
+ /// falling back to 'static.
+ pub(crate) fn approx_universal_upper_bound(&self, r: RegionVid) -> RegionVid {
+ debug!("approx_universal_upper_bound(r={:?}={})", r, self.region_value_str(r));
+
+ // Find the smallest universal region that contains all other
+ // universal regions within `region`.
+ let mut lub = self.universal_regions.fr_fn_body;
+ let r_scc = self.constraint_sccs.scc(r);
+ let static_r = self.universal_regions.fr_static;
+ for ur in self.scc_values.universal_regions_outlived_by(r_scc) {
+ let new_lub = self.universal_region_relations.postdom_upper_bound(lub, ur);
+ debug!("approx_universal_upper_bound: ur={:?} lub={:?} new_lub={:?}", ur, lub, new_lub);
+ // The upper bound of two non-static regions is static: this
+ // means we know nothing about the relationship between these
+ // two regions. Pick a 'better' one to use when constructing
+ // a diagnostic
+ if ur != static_r && lub != static_r && new_lub == static_r {
+ // Prefer the region with an `external_name` - this
+ // indicates that the region is early-bound, so working with
+ // it can produce a nicer error.
+ if self.region_definition(ur).external_name.is_some() {
+ lub = ur;
+ } else if self.region_definition(lub).external_name.is_some() {
+ // Leave lub unchanged
+ } else {
+ // If we get here, we don't have any reason to prefer
+ // one region over the other. Just pick the
+ // one with the lower index for now.
+ lub = std::cmp::min(ur, lub);
+ }
+ } else {
+ lub = new_lub;
+ }
+ }
+
+ debug!("approx_universal_upper_bound: r={:?} lub={:?}", r, lub);
+
+ lub
+ }
+
+ /// Tests if `test` is true when applied to `lower_bound` at
+ /// `point`.
+ fn eval_verify_bound(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+ generic_ty: Ty<'tcx>,
+ lower_bound: RegionVid,
+ verify_bound: &VerifyBound<'tcx>,
+ ) -> bool {
+ debug!("eval_verify_bound(lower_bound={:?}, verify_bound={:?})", lower_bound, verify_bound);
+
+ match verify_bound {
+ VerifyBound::IfEq(verify_if_eq_b) => {
+ self.eval_if_eq(infcx, param_env, generic_ty, lower_bound, *verify_if_eq_b)
+ }
+
+ VerifyBound::IsEmpty => {
+ let lower_bound_scc = self.constraint_sccs.scc(lower_bound);
+ self.scc_values.elements_contained_in(lower_bound_scc).next().is_none()
+ }
+
+ VerifyBound::OutlivedBy(r) => {
+ let r_vid = self.to_region_vid(*r);
+ self.eval_outlives(r_vid, lower_bound)
+ }
+
+ VerifyBound::AnyBound(verify_bounds) => verify_bounds.iter().any(|verify_bound| {
+ self.eval_verify_bound(
+ infcx,
+ param_env,
+ body,
+ generic_ty,
+ lower_bound,
+ verify_bound,
+ )
+ }),
+
+ VerifyBound::AllBounds(verify_bounds) => verify_bounds.iter().all(|verify_bound| {
+ self.eval_verify_bound(
+ infcx,
+ param_env,
+ body,
+ generic_ty,
+ lower_bound,
+ verify_bound,
+ )
+ }),
+ }
+ }
+
+ fn eval_if_eq(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ generic_ty: Ty<'tcx>,
+ lower_bound: RegionVid,
+ verify_if_eq_b: ty::Binder<'tcx, VerifyIfEq<'tcx>>,
+ ) -> bool {
+ let generic_ty = self.normalize_to_scc_representatives(infcx.tcx, generic_ty);
+ let verify_if_eq_b = self.normalize_to_scc_representatives(infcx.tcx, verify_if_eq_b);
+ match test_type_match::extract_verify_if_eq(
+ infcx.tcx,
+ param_env,
+ &verify_if_eq_b,
+ generic_ty,
+ ) {
+ Some(r) => {
+ let r_vid = self.to_region_vid(r);
+ self.eval_outlives(r_vid, lower_bound)
+ }
+ None => false,
+ }
+ }
+
+ /// This is a conservative normalization procedure. It takes every
+ /// free region in `value` and replaces it with the
+ /// "representative" of its SCC (see `scc_representatives` field).
+ /// We are guaranteed that if two values normalize to the same
+ /// thing, then they are equal; this is a conservative check in
+ /// that they could still be equal even if they normalize to
+ /// different results. (For example, there might be two regions
+ /// with the same value that are not in the same SCC).
+ ///
+ /// N.B., this is not an ideal approach and I would like to revisit
+ /// it. However, it works pretty well in practice. In particular,
+ /// this is needed to deal with projection outlives bounds like
+ ///
+ /// ```text
+ /// <T as Foo<'0>>::Item: '1
+ /// ```
+ ///
+ /// In particular, this routine winds up being important when
+ /// there are bounds like `where <T as Foo<'a>>::Item: 'b` in the
+ /// environment. In this case, if we can show that `'0 == 'a`,
+ /// and that `'b: '1`, then we know that the clause is
+ /// satisfied. In such cases, particularly due to limitations of
+ /// the trait solver =), we usually wind up with a where-clause like
+ /// `T: Foo<'a>` in scope, which thus forces `'0 == 'a` to be added as
+ /// a constraint, and thus ensures that they are in the same SCC.
+ ///
+ /// So why can't we do a more correct routine? Well, we could
+ /// *almost* use the `relate_tys` code, but the way it is
+ /// currently setup it creates inference variables to deal with
+ /// higher-ranked things and so forth, and right now the inference
+ /// context is not permitted to make more inference variables. So
+ /// we use this kind of hacky solution.
+ fn normalize_to_scc_representatives<T>(&self, tcx: TyCtxt<'tcx>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ tcx.fold_regions(value, |r, _db| {
+ let vid = self.to_region_vid(r);
+ let scc = self.constraint_sccs.scc(vid);
+ let repr = self.scc_representatives[scc];
+ tcx.mk_region(ty::ReVar(repr))
+ })
+ }
+
+ // Evaluate whether `sup_region == sub_region`.
+ fn eval_equal(&self, r1: RegionVid, r2: RegionVid) -> bool {
+ self.eval_outlives(r1, r2) && self.eval_outlives(r2, r1)
+ }
+
+ // Evaluate whether `sup_region: sub_region`.
+ #[instrument(skip(self), level = "debug")]
+ fn eval_outlives(&self, sup_region: RegionVid, sub_region: RegionVid) -> bool {
+ debug!(
+ "eval_outlives: sup_region's value = {:?} universal={:?}",
+ self.region_value_str(sup_region),
+ self.universal_regions.is_universal_region(sup_region),
+ );
+ debug!(
+ "eval_outlives: sub_region's value = {:?} universal={:?}",
+ self.region_value_str(sub_region),
+ self.universal_regions.is_universal_region(sub_region),
+ );
+
+ let sub_region_scc = self.constraint_sccs.scc(sub_region);
+ let sup_region_scc = self.constraint_sccs.scc(sup_region);
+
+ // If we are checking that `'sup: 'sub`, and `'sub` contains
+ // some placeholder that `'sup` cannot name, then this is only
+ // true if `'sup` outlives static.
+ if !self.universe_compatible(sub_region_scc, sup_region_scc) {
+ debug!(
+ "eval_outlives: sub universe `{sub_region_scc:?}` is not nameable \
+ by super `{sup_region_scc:?}`, promoting to static",
+ );
+
+ return self.eval_outlives(sup_region, self.universal_regions.fr_static);
+ }
+
+ // Both the `sub_region` and `sup_region` consist of the union
+ // of some number of universal regions (along with the union
+ // of various points in the CFG; ignore those points for
+ // now). Therefore, the sup-region outlives the sub-region if,
+ // for each universal region R1 in the sub-region, there
+ // exists some region R2 in the sup-region that outlives R1.
+ let universal_outlives =
+ self.scc_values.universal_regions_outlived_by(sub_region_scc).all(|r1| {
+ self.scc_values
+ .universal_regions_outlived_by(sup_region_scc)
+ .any(|r2| self.universal_region_relations.outlives(r2, r1))
+ });
+
+ if !universal_outlives {
+ debug!(
+ "eval_outlives: returning false because sub region contains a universal region not present in super"
+ );
+ return false;
+ }
+
+ // Now we have to compare all the points in the sub region and make
+ // sure they exist in the sup region.
+
+ if self.universal_regions.is_universal_region(sup_region) {
+ // Micro-opt: universal regions contain all points.
+ debug!(
+ "eval_outlives: returning true because super is universal and hence contains all points"
+ );
+ return true;
+ }
+
+ let result = self.scc_values.contains_points(sup_region_scc, sub_region_scc);
+ debug!("returning {} because of comparison between points in sup/sub", result);
+ result
+ }
+
+ /// Once regions have been propagated, this method is used to see
+ /// whether any of the constraints were too strong. In particular,
+ /// we want to check for a case where a universally quantified
+ /// region exceeded its bounds. Consider:
+ /// ```compile_fail,E0312
+ /// fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
+ /// ```
+ /// In this case, returning `x` requires `&'a u32 <: &'b u32`
+ /// and hence we establish (transitively) a constraint that
+ /// `'a: 'b`. The `propagate_constraints` code above will
+ /// therefore add `end('a)` into the region for `'b` -- but we
+ /// have no evidence that `'b` outlives `'a`, so we want to report
+ /// an error.
+ ///
+ /// If `propagated_outlives_requirements` is `Some`, then we will
+ /// push unsatisfied obligations into there. Otherwise, we'll
+ /// report them as errors.
+ fn check_universal_regions(
+ &self,
+ body: &Body<'tcx>,
+ mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+ errors_buffer: &mut RegionErrors<'tcx>,
+ ) {
+ for (fr, fr_definition) in self.definitions.iter_enumerated() {
+ match fr_definition.origin {
+ NllRegionVariableOrigin::FreeRegion => {
+ // Go through each of the universal regions `fr` and check that
+ // they did not grow too large, accumulating any requirements
+ // for our caller into the `outlives_requirements` vector.
+ self.check_universal_region(
+ body,
+ fr,
+ &mut propagated_outlives_requirements,
+ errors_buffer,
+ );
+ }
+
+ NllRegionVariableOrigin::Placeholder(placeholder) => {
+ self.check_bound_universal_region(fr, placeholder, errors_buffer);
+ }
+
+ NllRegionVariableOrigin::Existential { .. } => {
+ // nothing to check here
+ }
+ }
+ }
+ }
+
+ /// Checks if Polonius has found any unexpected free region relations.
+ ///
+ /// In Polonius terms, a "subset error" (or "illegal subset relation error") is the equivalent
+ /// of NLL's "checking if any region constraints were too strong": a placeholder origin `'a`
+ /// was unexpectedly found to be a subset of another placeholder origin `'b`, and means in NLL
+ /// terms that the "longer free region" `'a` outlived the "shorter free region" `'b`.
+ ///
+ /// More details can be found in this blog post by Niko:
+ /// <https://smallcultfollowing.com/babysteps/blog/2019/01/17/polonius-and-region-errors/>
+ ///
+ /// In the canonical example
+ /// ```compile_fail,E0312
+ /// fn foo<'a, 'b>(x: &'a u32) -> &'b u32 { x }
+ /// ```
+ /// returning `x` requires `&'a u32 <: &'b u32` and hence we establish (transitively) a
+ /// constraint that `'a: 'b`. It is an error that we have no evidence that this
+ /// constraint holds.
+ ///
+ /// If `propagated_outlives_requirements` is `Some`, then we will
+ /// push unsatisfied obligations into there. Otherwise, we'll
+ /// report them as errors.
+ fn check_polonius_subset_errors(
+ &self,
+ body: &Body<'tcx>,
+ mut propagated_outlives_requirements: Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+ errors_buffer: &mut RegionErrors<'tcx>,
+ polonius_output: Rc<PoloniusOutput>,
+ ) {
+ debug!(
+ "check_polonius_subset_errors: {} subset_errors",
+ polonius_output.subset_errors.len()
+ );
+
+ // Similarly to `check_universal_regions`: a free region relation, which was not explicitly
+ // declared ("known") was found by Polonius, so emit an error, or propagate the
+ // requirements for our caller into the `propagated_outlives_requirements` vector.
+ //
+ // Polonius doesn't model regions ("origins") as CFG-subsets or durations, but the
+ // `longer_fr` and `shorter_fr` terminology will still be used here, for consistency with
+ // the rest of the NLL infrastructure. The "subset origin" is the "longer free region",
+ // and the "superset origin" is the outlived "shorter free region".
+ //
+ // Note: Polonius will produce a subset error at every point where the unexpected
+ // `longer_fr`'s "placeholder loan" is contained in the `shorter_fr`. This can be helpful
+ // for diagnostics in the future, e.g. to point more precisely at the key locations
+ // requiring this constraint to hold. However, the error and diagnostics code downstream
+ // expects that these errors are not duplicated (and that they are in a certain order).
+ // Otherwise, diagnostics messages such as the ones giving names like `'1` to elided or
+ // anonymous lifetimes for example, could give these names differently, while others like
+ // the outlives suggestions or the debug output from `#[rustc_regions]` would be
+ // duplicated. The polonius subset errors are deduplicated here, while keeping the
+ // CFG-location ordering.
+ let mut subset_errors: Vec<_> = polonius_output
+ .subset_errors
+ .iter()
+ .flat_map(|(_location, subset_errors)| subset_errors.iter())
+ .collect();
+ subset_errors.sort();
+ subset_errors.dedup();
+
+ for (longer_fr, shorter_fr) in subset_errors.into_iter() {
+ debug!(
+ "check_polonius_subset_errors: subset_error longer_fr={:?},\
+ shorter_fr={:?}",
+ longer_fr, shorter_fr
+ );
+
+ let propagated = self.try_propagate_universal_region_error(
+ *longer_fr,
+ *shorter_fr,
+ body,
+ &mut propagated_outlives_requirements,
+ );
+ if propagated == RegionRelationCheckResult::Error {
+ errors_buffer.push(RegionErrorKind::RegionError {
+ longer_fr: *longer_fr,
+ shorter_fr: *shorter_fr,
+ fr_origin: NllRegionVariableOrigin::FreeRegion,
+ is_reported: true,
+ });
+ }
+ }
+
+ // Handle the placeholder errors as usual, until the chalk-rustc-polonius triumvirate has
+ // a more complete picture on how to separate this responsibility.
+ for (fr, fr_definition) in self.definitions.iter_enumerated() {
+ match fr_definition.origin {
+ NllRegionVariableOrigin::FreeRegion => {
+ // handled by polonius above
+ }
+
+ NllRegionVariableOrigin::Placeholder(placeholder) => {
+ self.check_bound_universal_region(fr, placeholder, errors_buffer);
+ }
+
+ NllRegionVariableOrigin::Existential { .. } => {
+ // nothing to check here
+ }
+ }
+ }
+ }
+
+ /// Checks the final value for the free region `fr` to see if it
+ /// grew too large. In particular, examine what `end(X)` points
+ /// wound up in `fr`'s final value; for each `end(X)` where `X !=
+ /// fr`, we want to check that `fr: X`. If not, that's either an
+ /// error, or something we have to propagate to our creator.
+ ///
+ /// Things that are to be propagated are accumulated into the
+ /// `outlives_requirements` vector.
+ #[instrument(
+ skip(self, body, propagated_outlives_requirements, errors_buffer),
+ level = "debug"
+ )]
+ fn check_universal_region(
+ &self,
+ body: &Body<'tcx>,
+ longer_fr: RegionVid,
+ propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+ errors_buffer: &mut RegionErrors<'tcx>,
+ ) {
+ let longer_fr_scc = self.constraint_sccs.scc(longer_fr);
+
+ // Because this free region must be in the ROOT universe, we
+ // know it cannot contain any bound universes.
+ assert!(self.scc_universes[longer_fr_scc] == ty::UniverseIndex::ROOT);
+ debug_assert!(self.scc_values.placeholders_contained_in(longer_fr_scc).next().is_none());
+
+ // Only check all of the relations for the main representative of each
+ // SCC, otherwise just check that we outlive said representative. This
+ // reduces the number of redundant relations propagated out of
+ // closures.
+ // Note that the representative will be a universal region if there is
+ // one in this SCC, so we will always check the representative here.
+ let representative = self.scc_representatives[longer_fr_scc];
+ if representative != longer_fr {
+ if let RegionRelationCheckResult::Error = self.check_universal_region_relation(
+ longer_fr,
+ representative,
+ body,
+ propagated_outlives_requirements,
+ ) {
+ errors_buffer.push(RegionErrorKind::RegionError {
+ longer_fr,
+ shorter_fr: representative,
+ fr_origin: NllRegionVariableOrigin::FreeRegion,
+ is_reported: true,
+ });
+ }
+ return;
+ }
+
+ // Find every region `o` such that `fr: o`
+ // (because `fr` includes `end(o)`).
+ let mut error_reported = false;
+ for shorter_fr in self.scc_values.universal_regions_outlived_by(longer_fr_scc) {
+ if let RegionRelationCheckResult::Error = self.check_universal_region_relation(
+ longer_fr,
+ shorter_fr,
+ body,
+ propagated_outlives_requirements,
+ ) {
+ // We only report the first region error. Subsequent errors are hidden so as
+ // not to overwhelm the user, but we do record them so as to potentially print
+ // better diagnostics elsewhere...
+ errors_buffer.push(RegionErrorKind::RegionError {
+ longer_fr,
+ shorter_fr,
+ fr_origin: NllRegionVariableOrigin::FreeRegion,
+ is_reported: !error_reported,
+ });
+
+ error_reported = true;
+ }
+ }
+ }
+
+ /// Checks that we can prove that `longer_fr: shorter_fr`. If we can't we attempt to propagate
+ /// the constraint outward (e.g. to a closure environment), but if that fails, there is an
+ /// error.
+ fn check_universal_region_relation(
+ &self,
+ longer_fr: RegionVid,
+ shorter_fr: RegionVid,
+ body: &Body<'tcx>,
+ propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+ ) -> RegionRelationCheckResult {
+ // If it is known that `fr: o`, carry on.
+ if self.universal_region_relations.outlives(longer_fr, shorter_fr) {
+ RegionRelationCheckResult::Ok
+ } else {
+ // If we are not in a context where we can't propagate errors, or we
+ // could not shrink `fr` to something smaller, then just report an
+ // error.
+ //
+ // Note: in this case, we use the unapproximated regions to report the
+ // error. This gives better error messages in some cases.
+ self.try_propagate_universal_region_error(
+ longer_fr,
+ shorter_fr,
+ body,
+ propagated_outlives_requirements,
+ )
+ }
+ }
+
+ /// Attempt to propagate a region error (e.g. `'a: 'b`) that is not met to a closure's
+ /// creator. If we cannot, then the caller should report an error to the user.
+ fn try_propagate_universal_region_error(
+ &self,
+ longer_fr: RegionVid,
+ shorter_fr: RegionVid,
+ body: &Body<'tcx>,
+ propagated_outlives_requirements: &mut Option<&mut Vec<ClosureOutlivesRequirement<'tcx>>>,
+ ) -> RegionRelationCheckResult {
+ if let Some(propagated_outlives_requirements) = propagated_outlives_requirements {
+ // Shrink `longer_fr` until we find a non-local region (if we do).
+ // We'll call it `fr-` -- it's ever so slightly smaller than
+ // `longer_fr`.
+ if let Some(fr_minus) = self.universal_region_relations.non_local_lower_bound(longer_fr)
+ {
+ debug!("try_propagate_universal_region_error: fr_minus={:?}", fr_minus);
+
+ let blame_span_category = self.find_outlives_blame_span(
+ body,
+ longer_fr,
+ NllRegionVariableOrigin::FreeRegion,
+ shorter_fr,
+ );
+
+ // Grow `shorter_fr` until we find some non-local regions. (We
+ // always will.) We'll call them `shorter_fr+` -- they're ever
+ // so slightly larger than `shorter_fr`.
+ let shorter_fr_plus =
+ self.universal_region_relations.non_local_upper_bounds(shorter_fr);
+ debug!(
+ "try_propagate_universal_region_error: shorter_fr_plus={:?}",
+ shorter_fr_plus
+ );
+ for fr in shorter_fr_plus {
+ // Push the constraint `fr-: shorter_fr+`
+ propagated_outlives_requirements.push(ClosureOutlivesRequirement {
+ subject: ClosureOutlivesSubject::Region(fr_minus),
+ outlived_free_region: fr,
+ blame_span: blame_span_category.1.span,
+ category: blame_span_category.0,
+ });
+ }
+ return RegionRelationCheckResult::Propagated;
+ }
+ }
+
+ RegionRelationCheckResult::Error
+ }
+
+ fn check_bound_universal_region(
+ &self,
+ longer_fr: RegionVid,
+ placeholder: ty::PlaceholderRegion,
+ errors_buffer: &mut RegionErrors<'tcx>,
+ ) {
+ debug!("check_bound_universal_region(fr={:?}, placeholder={:?})", longer_fr, placeholder,);
+
+ let longer_fr_scc = self.constraint_sccs.scc(longer_fr);
+ debug!("check_bound_universal_region: longer_fr_scc={:?}", longer_fr_scc,);
+
+ // If we have some bound universal region `'a`, then the only
+ // elements it can contain is itself -- we don't know anything
+ // else about it!
+ let Some(error_element) = ({
+ self.scc_values.elements_contained_in(longer_fr_scc).find(|element| match element {
+ RegionElement::Location(_) => true,
+ RegionElement::RootUniversalRegion(_) => true,
+ RegionElement::PlaceholderRegion(placeholder1) => placeholder != *placeholder1,
+ })
+ }) else {
+ return;
+ };
+ debug!("check_bound_universal_region: error_element = {:?}", error_element);
+
+ // Find the region that introduced this `error_element`.
+ errors_buffer.push(RegionErrorKind::BoundUniversalRegionError {
+ longer_fr,
+ error_element,
+ placeholder,
+ });
+ }
+
+ fn check_member_constraints(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ errors_buffer: &mut RegionErrors<'tcx>,
+ ) {
+ let member_constraints = self.member_constraints.clone();
+ for m_c_i in member_constraints.all_indices() {
+ debug!("check_member_constraint(m_c_i={:?})", m_c_i);
+ let m_c = &member_constraints[m_c_i];
+ let member_region_vid = m_c.member_region_vid;
+ debug!(
+ "check_member_constraint: member_region_vid={:?} with value {}",
+ member_region_vid,
+ self.region_value_str(member_region_vid),
+ );
+ let choice_regions = member_constraints.choice_regions(m_c_i);
+ debug!("check_member_constraint: choice_regions={:?}", choice_regions);
+
+ // Did the member region wind up equal to any of the option regions?
+ if let Some(o) =
+ choice_regions.iter().find(|&&o_r| self.eval_equal(o_r, m_c.member_region_vid))
+ {
+ debug!("check_member_constraint: evaluated as equal to {:?}", o);
+ continue;
+ }
+
+ // If not, report an error.
+ let member_region = infcx.tcx.mk_region(ty::ReVar(member_region_vid));
+ errors_buffer.push(RegionErrorKind::UnexpectedHiddenRegion {
+ span: m_c.definition_span,
+ hidden_ty: m_c.hidden_ty,
+ key: m_c.key,
+ member_region,
+ });
+ }
+ }
+
+ /// We have a constraint `fr1: fr2` that is not satisfied, where
+ /// `fr2` represents some universal region. Here, `r` is some
+ /// region where we know that `fr1: r` and this function has the
+ /// job of determining whether `r` is "to blame" for the fact that
+ /// `fr1: fr2` is required.
+ ///
+ /// This is true under two conditions:
+ ///
+ /// - `r == fr2`
+ /// - `fr2` is `'static` and `r` is some placeholder in a universe
+ /// that cannot be named by `fr1`; in that case, we will require
+ /// that `fr1: 'static` because it is the only way to `fr1: r` to
+ /// be satisfied. (See `add_incompatible_universe`.)
+ pub(crate) fn provides_universal_region(
+ &self,
+ r: RegionVid,
+ fr1: RegionVid,
+ fr2: RegionVid,
+ ) -> bool {
+ debug!("provides_universal_region(r={:?}, fr1={:?}, fr2={:?})", r, fr1, fr2);
+ let result = {
+ r == fr2 || {
+ fr2 == self.universal_regions.fr_static && self.cannot_name_placeholder(fr1, r)
+ }
+ };
+ debug!("provides_universal_region: result = {:?}", result);
+ result
+ }
+
+ /// If `r2` represents a placeholder region, then this returns
+ /// `true` if `r1` cannot name that placeholder in its
+ /// value; otherwise, returns `false`.
+ pub(crate) fn cannot_name_placeholder(&self, r1: RegionVid, r2: RegionVid) -> bool {
+ debug!("cannot_name_value_of(r1={:?}, r2={:?})", r1, r2);
+
+ match self.definitions[r2].origin {
+ NllRegionVariableOrigin::Placeholder(placeholder) => {
+ let universe1 = self.definitions[r1].universe;
+ debug!(
+ "cannot_name_value_of: universe1={:?} placeholder={:?}",
+ universe1, placeholder
+ );
+ universe1.cannot_name(placeholder.universe)
+ }
+
+ NllRegionVariableOrigin::FreeRegion | NllRegionVariableOrigin::Existential { .. } => {
+ false
+ }
+ }
+ }
+
+ pub(crate) fn retrieve_closure_constraint_info(
+ &self,
+ _body: &Body<'tcx>,
+ constraint: &OutlivesConstraint<'tcx>,
+ ) -> BlameConstraint<'tcx> {
+ let loc = match constraint.locations {
+ Locations::All(span) => {
+ return BlameConstraint {
+ category: constraint.category,
+ from_closure: false,
+ cause: ObligationCause::dummy_with_span(span),
+ variance_info: constraint.variance_info,
+ };
+ }
+ Locations::Single(loc) => loc,
+ };
+
+ let opt_span_category =
+ self.closure_bounds_mapping[&loc].get(&(constraint.sup, constraint.sub));
+ opt_span_category
+ .map(|&(category, span)| BlameConstraint {
+ category,
+ from_closure: true,
+ cause: ObligationCause::dummy_with_span(span),
+ variance_info: constraint.variance_info,
+ })
+ .unwrap_or(BlameConstraint {
+ category: constraint.category,
+ from_closure: false,
+ cause: ObligationCause::dummy_with_span(constraint.span),
+ variance_info: constraint.variance_info,
+ })
+ }
+
+ /// Finds a good `ObligationCause` to blame for the fact that `fr1` outlives `fr2`.
+ pub(crate) fn find_outlives_blame_span(
+ &self,
+ body: &Body<'tcx>,
+ fr1: RegionVid,
+ fr1_origin: NllRegionVariableOrigin,
+ fr2: RegionVid,
+ ) -> (ConstraintCategory<'tcx>, ObligationCause<'tcx>) {
+ let BlameConstraint { category, cause, .. } =
+ self.best_blame_constraint(body, fr1, fr1_origin, |r| {
+ self.provides_universal_region(r, fr1, fr2)
+ });
+ (category, cause)
+ }
+
+ /// Walks the graph of constraints (where `'a: 'b` is considered
+ /// an edge `'a -> 'b`) to find all paths from `from_region` to
+ /// `to_region`. The paths are accumulated into the vector
+ /// `results`. The paths are stored as a series of
+ /// `ConstraintIndex` values -- in other words, a list of *edges*.
+ ///
+ /// Returns: a series of constraints as well as the region `R`
+ /// that passed the target test.
+ pub(crate) fn find_constraint_paths_between_regions(
+ &self,
+ from_region: RegionVid,
+ target_test: impl Fn(RegionVid) -> bool,
+ ) -> Option<(Vec<OutlivesConstraint<'tcx>>, RegionVid)> {
+ let mut context = IndexVec::from_elem(Trace::NotVisited, &self.definitions);
+ context[from_region] = Trace::StartRegion;
+
+ // Use a deque so that we do a breadth-first search. We will
+ // stop at the first match, which ought to be the shortest
+ // path (fewest constraints).
+ let mut deque = VecDeque::new();
+ deque.push_back(from_region);
+
+ while let Some(r) = deque.pop_front() {
+ debug!(
+ "find_constraint_paths_between_regions: from_region={:?} r={:?} value={}",
+ from_region,
+ r,
+ self.region_value_str(r),
+ );
+
+ // Check if we reached the region we were looking for. If so,
+ // we can reconstruct the path that led to it and return it.
+ if target_test(r) {
+ let mut result = vec![];
+ let mut p = r;
+ loop {
+ match context[p].clone() {
+ Trace::NotVisited => {
+ bug!("found unvisited region {:?} on path to {:?}", p, r)
+ }
+
+ Trace::FromOutlivesConstraint(c) => {
+ p = c.sup;
+ result.push(c);
+ }
+
+ Trace::StartRegion => {
+ result.reverse();
+ return Some((result, r));
+ }
+ }
+ }
+ }
+
+ // Otherwise, walk over the outgoing constraints and
+ // enqueue any regions we find, keeping track of how we
+ // reached them.
+
+ // A constraint like `'r: 'x` can come from our constraint
+ // graph.
+ let fr_static = self.universal_regions.fr_static;
+ let outgoing_edges_from_graph =
+ self.constraint_graph.outgoing_edges(r, &self.constraints, fr_static);
+
+ // Always inline this closure because it can be hot.
+ let mut handle_constraint = #[inline(always)]
+ |constraint: OutlivesConstraint<'tcx>| {
+ debug_assert_eq!(constraint.sup, r);
+ let sub_region = constraint.sub;
+ if let Trace::NotVisited = context[sub_region] {
+ context[sub_region] = Trace::FromOutlivesConstraint(constraint);
+ deque.push_back(sub_region);
+ }
+ };
+
+ // This loop can be hot.
+ for constraint in outgoing_edges_from_graph {
+ handle_constraint(constraint);
+ }
+
+ // Member constraints can also give rise to `'r: 'x` edges that
+ // were not part of the graph initially, so watch out for those.
+ // (But they are extremely rare; this loop is very cold.)
+ for constraint in self.applied_member_constraints(r) {
+ let p_c = &self.member_constraints[constraint.member_constraint_index];
+ let constraint = OutlivesConstraint {
+ sup: r,
+ sub: constraint.min_choice,
+ locations: Locations::All(p_c.definition_span),
+ span: p_c.definition_span,
+ category: ConstraintCategory::OpaqueType,
+ variance_info: ty::VarianceDiagInfo::default(),
+ };
+ handle_constraint(constraint);
+ }
+ }
+
+ None
+ }
+
+ /// Finds some region R such that `fr1: R` and `R` is live at `elem`.
+ #[instrument(skip(self), level = "trace")]
+ pub(crate) fn find_sub_region_live_at(&self, fr1: RegionVid, elem: Location) -> RegionVid {
+ trace!(scc = ?self.constraint_sccs.scc(fr1));
+ trace!(universe = ?self.scc_universes[self.constraint_sccs.scc(fr1)]);
+ self.find_constraint_paths_between_regions(fr1, |r| {
+ // First look for some `r` such that `fr1: r` and `r` is live at `elem`
+ trace!(?r, liveness_constraints=?self.liveness_constraints.region_value_str(r));
+ self.liveness_constraints.contains(r, elem)
+ })
+ .or_else(|| {
+ // If we fail to find that, we may find some `r` such that
+ // `fr1: r` and `r` is a placeholder from some universe
+ // `fr1` cannot name. This would force `fr1` to be
+ // `'static`.
+ self.find_constraint_paths_between_regions(fr1, |r| {
+ self.cannot_name_placeholder(fr1, r)
+ })
+ })
+ .or_else(|| {
+ // If we fail to find THAT, it may be that `fr1` is a
+ // placeholder that cannot "fit" into its SCC. In that
+ // case, there should be some `r` where `fr1: r` and `fr1` is a
+ // placeholder that `r` cannot name. We can blame that
+ // edge.
+ //
+ // Remember that if `R1: R2`, then the universe of R1
+ // must be able to name the universe of R2, because R2 will
+ // be at least `'empty(Universe(R2))`, and `R1` must be at
+ // larger than that.
+ self.find_constraint_paths_between_regions(fr1, |r| {
+ self.cannot_name_placeholder(r, fr1)
+ })
+ })
+ .map(|(_path, r)| r)
+ .unwrap()
+ }
+
+ /// Get the region outlived by `longer_fr` and live at `element`.
+ pub(crate) fn region_from_element(
+ &self,
+ longer_fr: RegionVid,
+ element: &RegionElement,
+ ) -> RegionVid {
+ match *element {
+ RegionElement::Location(l) => self.find_sub_region_live_at(longer_fr, l),
+ RegionElement::RootUniversalRegion(r) => r,
+ RegionElement::PlaceholderRegion(error_placeholder) => self
+ .definitions
+ .iter_enumerated()
+ .find_map(|(r, definition)| match definition.origin {
+ NllRegionVariableOrigin::Placeholder(p) if p == error_placeholder => Some(r),
+ _ => None,
+ })
+ .unwrap(),
+ }
+ }
+
+ /// Get the region definition of `r`.
+ pub(crate) fn region_definition(&self, r: RegionVid) -> &RegionDefinition<'tcx> {
+ &self.definitions[r]
+ }
+
+ /// Check if the SCC of `r` contains `upper`.
+ pub(crate) fn upper_bound_in_region_scc(&self, r: RegionVid, upper: RegionVid) -> bool {
+ let r_scc = self.constraint_sccs.scc(r);
+ self.scc_values.contains(r_scc, upper)
+ }
+
+ pub(crate) fn universal_regions(&self) -> &UniversalRegions<'tcx> {
+ self.universal_regions.as_ref()
+ }
+
+ /// Tries to find the best constraint to blame for the fact that
+ /// `R: from_region`, where `R` is some region that meets
+ /// `target_test`. This works by following the constraint graph,
+ /// creating a constraint path that forces `R` to outlive
+ /// `from_region`, and then finding the best choices within that
+ /// path to blame.
+ pub(crate) fn best_blame_constraint(
+ &self,
+ body: &Body<'tcx>,
+ from_region: RegionVid,
+ from_region_origin: NllRegionVariableOrigin,
+ target_test: impl Fn(RegionVid) -> bool,
+ ) -> BlameConstraint<'tcx> {
+ debug!(
+ "best_blame_constraint(from_region={:?}, from_region_origin={:?})",
+ from_region, from_region_origin
+ );
+
+ // Find all paths
+ let (path, target_region) =
+ self.find_constraint_paths_between_regions(from_region, target_test).unwrap();
+ debug!(
+ "best_blame_constraint: path={:#?}",
+ path.iter()
+ .map(|c| format!(
+ "{:?} ({:?}: {:?})",
+ c,
+ self.constraint_sccs.scc(c.sup),
+ self.constraint_sccs.scc(c.sub),
+ ))
+ .collect::<Vec<_>>()
+ );
+
+ // We try to avoid reporting a `ConstraintCategory::Predicate` as our best constraint.
+ // Instead, we use it to produce an improved `ObligationCauseCode`.
+ // FIXME - determine what we should do if we encounter multiple `ConstraintCategory::Predicate`
+ // constraints. Currently, we just pick the first one.
+ let cause_code = path
+ .iter()
+ .find_map(|constraint| {
+ if let ConstraintCategory::Predicate(predicate_span) = constraint.category {
+ // We currently do not store the `DefId` in the `ConstraintCategory`
+ // for performances reasons. The error reporting code used by NLL only
+ // uses the span, so this doesn't cause any problems at the moment.
+ Some(ObligationCauseCode::BindingObligation(
+ CRATE_DEF_ID.to_def_id(),
+ predicate_span,
+ ))
+ } else {
+ None
+ }
+ })
+ .unwrap_or_else(|| ObligationCauseCode::MiscObligation);
+
+ // Classify each of the constraints along the path.
+ let mut categorized_path: Vec<BlameConstraint<'tcx>> = path
+ .iter()
+ .map(|constraint| {
+ if constraint.category == ConstraintCategory::ClosureBounds {
+ self.retrieve_closure_constraint_info(body, &constraint)
+ } else {
+ BlameConstraint {
+ category: constraint.category,
+ from_closure: false,
+ cause: ObligationCause::new(
+ constraint.span,
+ CRATE_HIR_ID,
+ cause_code.clone(),
+ ),
+ variance_info: constraint.variance_info,
+ }
+ }
+ })
+ .collect();
+ debug!("best_blame_constraint: categorized_path={:#?}", categorized_path);
+
+ // To find the best span to cite, we first try to look for the
+ // final constraint that is interesting and where the `sup` is
+ // not unified with the ultimate target region. The reason
+ // for this is that we have a chain of constraints that lead
+ // from the source to the target region, something like:
+ //
+ // '0: '1 ('0 is the source)
+ // '1: '2
+ // '2: '3
+ // '3: '4
+ // '4: '5
+ // '5: '6 ('6 is the target)
+ //
+ // Some of those regions are unified with `'6` (in the same
+ // SCC). We want to screen those out. After that point, the
+ // "closest" constraint we have to the end is going to be the
+ // most likely to be the point where the value escapes -- but
+ // we still want to screen for an "interesting" point to
+ // highlight (e.g., a call site or something).
+ let target_scc = self.constraint_sccs.scc(target_region);
+ let mut range = 0..path.len();
+
+ // As noted above, when reporting an error, there is typically a chain of constraints
+ // leading from some "source" region which must outlive some "target" region.
+ // In most cases, we prefer to "blame" the constraints closer to the target --
+ // but there is one exception. When constraints arise from higher-ranked subtyping,
+ // we generally prefer to blame the source value,
+ // as the "target" in this case tends to be some type annotation that the user gave.
+ // Therefore, if we find that the region origin is some instantiation
+ // of a higher-ranked region, we start our search from the "source" point
+ // rather than the "target", and we also tweak a few other things.
+ //
+ // An example might be this bit of Rust code:
+ //
+ // ```rust
+ // let x: fn(&'static ()) = |_| {};
+ // let y: for<'a> fn(&'a ()) = x;
+ // ```
+ //
+ // In MIR, this will be converted into a combination of assignments and type ascriptions.
+ // In particular, the 'static is imposed through a type ascription:
+ //
+ // ```rust
+ // x = ...;
+ // AscribeUserType(x, fn(&'static ())
+ // y = x;
+ // ```
+ //
+ // We wind up ultimately with constraints like
+ //
+ // ```rust
+ // !a: 'temp1 // from the `y = x` statement
+ // 'temp1: 'temp2
+ // 'temp2: 'static // from the AscribeUserType
+ // ```
+ //
+ // and here we prefer to blame the source (the y = x statement).
+ let blame_source = match from_region_origin {
+ NllRegionVariableOrigin::FreeRegion
+ | NllRegionVariableOrigin::Existential { from_forall: false } => true,
+ NllRegionVariableOrigin::Placeholder(_)
+ | NllRegionVariableOrigin::Existential { from_forall: true } => false,
+ };
+
+ let find_region = |i: &usize| {
+ let constraint = &path[*i];
+
+ let constraint_sup_scc = self.constraint_sccs.scc(constraint.sup);
+
+ if blame_source {
+ match categorized_path[*i].category {
+ ConstraintCategory::OpaqueType
+ | ConstraintCategory::Boring
+ | ConstraintCategory::BoringNoLocation
+ | ConstraintCategory::Internal
+ | ConstraintCategory::Predicate(_) => false,
+ ConstraintCategory::TypeAnnotation
+ | ConstraintCategory::Return(_)
+ | ConstraintCategory::Yield => true,
+ _ => constraint_sup_scc != target_scc,
+ }
+ } else {
+ !matches!(
+ categorized_path[*i].category,
+ ConstraintCategory::OpaqueType
+ | ConstraintCategory::Boring
+ | ConstraintCategory::BoringNoLocation
+ | ConstraintCategory::Internal
+ | ConstraintCategory::Predicate(_)
+ )
+ }
+ };
+
+ let best_choice =
+ if blame_source { range.rev().find(find_region) } else { range.find(find_region) };
+
+ debug!(
+ "best_blame_constraint: best_choice={:?} blame_source={}",
+ best_choice, blame_source
+ );
+
+ if let Some(i) = best_choice {
+ if let Some(next) = categorized_path.get(i + 1) {
+ if matches!(categorized_path[i].category, ConstraintCategory::Return(_))
+ && next.category == ConstraintCategory::OpaqueType
+ {
+ // The return expression is being influenced by the return type being
+ // impl Trait, point at the return type and not the return expr.
+ return next.clone();
+ }
+ }
+
+ if categorized_path[i].category == ConstraintCategory::Return(ReturnConstraint::Normal)
+ {
+ let field = categorized_path.iter().find_map(|p| {
+ if let ConstraintCategory::ClosureUpvar(f) = p.category {
+ Some(f)
+ } else {
+ None
+ }
+ });
+
+ if let Some(field) = field {
+ categorized_path[i].category =
+ ConstraintCategory::Return(ReturnConstraint::ClosureUpvar(field));
+ }
+ }
+
+ return categorized_path[i].clone();
+ }
+
+ // If that search fails, that is.. unusual. Maybe everything
+ // is in the same SCC or something. In that case, find what
+ // appears to be the most interesting point to report to the
+ // user via an even more ad-hoc guess.
+ categorized_path.sort_by(|p0, p1| p0.category.cmp(&p1.category));
+ debug!("best_blame_constraint: sorted_path={:#?}", categorized_path);
+
+ categorized_path.remove(0)
+ }
+
+ pub(crate) fn universe_info(&self, universe: ty::UniverseIndex) -> UniverseInfo<'tcx> {
+ self.universe_causes[&universe].clone()
+ }
+}
+
+impl<'tcx> RegionDefinition<'tcx> {
+ fn new(universe: ty::UniverseIndex, rv_origin: RegionVariableOrigin) -> Self {
+ // Create a new region definition. Note that, for free
+ // regions, the `external_name` field gets updated later in
+ // `init_universal_regions`.
+
+ let origin = match rv_origin {
+ RegionVariableOrigin::Nll(origin) => origin,
+ _ => NllRegionVariableOrigin::Existential { from_forall: false },
+ };
+
+ Self { origin, universe, external_name: None }
+ }
+}
+
+pub trait ClosureRegionRequirementsExt<'tcx> {
+ fn apply_requirements(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ closure_def_id: DefId,
+ closure_substs: SubstsRef<'tcx>,
+ ) -> Vec<QueryOutlivesConstraint<'tcx>>;
+}
+
+impl<'tcx> ClosureRegionRequirementsExt<'tcx> for ClosureRegionRequirements<'tcx> {
+ /// Given an instance T of the closure type, this method
+ /// instantiates the "extra" requirements that we computed for the
+ /// closure into the inference context. This has the effect of
+ /// adding new outlives obligations to existing variables.
+ ///
+ /// As described on `ClosureRegionRequirements`, the extra
+ /// requirements are expressed in terms of regionvids that index
+ /// into the free regions that appear on the closure type. So, to
+ /// do this, we first copy those regions out from the type T into
+ /// a vector. Then we can just index into that vector to extract
+ /// out the corresponding region from T and apply the
+ /// requirements.
+ fn apply_requirements(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ closure_def_id: DefId,
+ closure_substs: SubstsRef<'tcx>,
+ ) -> Vec<QueryOutlivesConstraint<'tcx>> {
+ debug!(
+ "apply_requirements(closure_def_id={:?}, closure_substs={:?})",
+ closure_def_id, closure_substs
+ );
+
+ // Extract the values of the free regions in `closure_substs`
+ // into a vector. These are the regions that we will be
+ // relating to one another.
+ let closure_mapping = &UniversalRegions::closure_mapping(
+ tcx,
+ closure_substs,
+ self.num_external_vids,
+ tcx.typeck_root_def_id(closure_def_id),
+ );
+ debug!("apply_requirements: closure_mapping={:?}", closure_mapping);
+
+ // Create the predicates.
+ self.outlives_requirements
+ .iter()
+ .map(|outlives_requirement| {
+ let outlived_region = closure_mapping[outlives_requirement.outlived_free_region];
+
+ match outlives_requirement.subject {
+ ClosureOutlivesSubject::Region(region) => {
+ let region = closure_mapping[region];
+ debug!(
+ "apply_requirements: region={:?} \
+ outlived_region={:?} \
+ outlives_requirement={:?}",
+ region, outlived_region, outlives_requirement,
+ );
+ ty::Binder::dummy(ty::OutlivesPredicate(region.into(), outlived_region))
+ }
+
+ ClosureOutlivesSubject::Ty(ty) => {
+ debug!(
+ "apply_requirements: ty={:?} \
+ outlived_region={:?} \
+ outlives_requirement={:?}",
+ ty, outlived_region, outlives_requirement,
+ );
+ ty::Binder::dummy(ty::OutlivesPredicate(ty.into(), outlived_region))
+ }
+ }
+ })
+ .collect()
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct BlameConstraint<'tcx> {
+ pub category: ConstraintCategory<'tcx>,
+ pub from_closure: bool,
+ pub cause: ObligationCause<'tcx>,
+ pub variance_info: ty::VarianceDiagInfo<'tcx>,
+}
diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
new file mode 100644
index 000000000..d6712b6a4
--- /dev/null
+++ b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs
@@ -0,0 +1,662 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::vec_map::VecMap;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::OpaqueTyOrigin;
+use rustc_infer::infer::error_reporting::unexpected_hidden_region_diagnostic;
+use rustc_infer::infer::TyCtxtInferExt as _;
+use rustc_infer::infer::{DefiningAnchor, InferCtxt};
+use rustc_infer::traits::{Obligation, ObligationCause, TraitEngine};
+use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts};
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{
+ self, OpaqueHiddenType, OpaqueTypeKey, ToPredicate, Ty, TyCtxt, TypeFoldable,
+};
+use rustc_span::Span;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::TraitEngineExt as _;
+
+use super::RegionInferenceContext;
+
+impl<'tcx> RegionInferenceContext<'tcx> {
+ /// Resolve any opaque types that were encountered while borrow checking
+ /// this item. This is then used to get the type in the `type_of` query.
+ ///
+ /// For example consider `fn f<'a>(x: &'a i32) -> impl Sized + 'a { x }`.
+ /// This is lowered to give HIR something like
+ ///
+ /// type f<'a>::_Return<'_a> = impl Sized + '_a;
+ /// fn f<'a>(x: &'a i32) -> f<'static>::_Return<'a> { x }
+ ///
+ /// When checking the return type record the type from the return and the
+ /// type used in the return value. In this case they might be `_Return<'1>`
+ /// and `&'2 i32` respectively.
+ ///
+ /// Once we to this method, we have completed region inference and want to
+ /// call `infer_opaque_definition_from_instantiation` to get the inferred
+ /// type of `_Return<'_a>`. `infer_opaque_definition_from_instantiation`
+ /// compares lifetimes directly, so we need to map the inference variables
+ /// back to concrete lifetimes: `'static`, `ReEarlyBound` or `ReFree`.
+ ///
+ /// First we map all the lifetimes in the concrete type to an equal
+ /// universal region that occurs in the concrete type's substs, in this case
+ /// this would result in `&'1 i32`. We only consider regions in the substs
+ /// in case there is an equal region that does not. For example, this should
+ /// be allowed:
+ /// `fn f<'a: 'b, 'b: 'a>(x: *mut &'b i32) -> impl Sized + 'a { x }`
+ ///
+ /// Then we map the regions in both the type and the subst to their
+ /// `external_name` giving `concrete_type = &'a i32`,
+ /// `substs = ['static, 'a]`. This will then allow
+ /// `infer_opaque_definition_from_instantiation` to determine that
+ /// `_Return<'_a> = &'_a i32`.
+ ///
+ /// There's a slight complication around closures. Given
+ /// `fn f<'a: 'a>() { || {} }` the closure's type is something like
+ /// `f::<'a>::{{closure}}`. The region parameter from f is essentially
+ /// ignored by type checking so ends up being inferred to an empty region.
+ /// Calling `universal_upper_bound` for such a region gives `fr_fn_body`,
+ /// which has no `external_name` in which case we use `'empty` as the
+ /// region to pass to `infer_opaque_definition_from_instantiation`.
+ #[instrument(level = "debug", skip(self, infcx))]
+ pub(crate) fn infer_opaque_types(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ opaque_ty_decls: VecMap<OpaqueTypeKey<'tcx>, (OpaqueHiddenType<'tcx>, OpaqueTyOrigin)>,
+ ) -> VecMap<LocalDefId, OpaqueHiddenType<'tcx>> {
+ let mut result: VecMap<LocalDefId, OpaqueHiddenType<'tcx>> = VecMap::new();
+ for (opaque_type_key, (concrete_type, origin)) in opaque_ty_decls {
+ let substs = opaque_type_key.substs;
+ debug!(?concrete_type, ?substs);
+
+ let mut subst_regions = vec![self.universal_regions.fr_static];
+ let universal_substs = infcx.tcx.fold_regions(substs, |region, _| {
+ if let ty::RePlaceholder(..) = region.kind() {
+ // Higher kinded regions don't need remapping, they don't refer to anything outside of this the substs.
+ return region;
+ }
+ let vid = self.to_region_vid(region);
+ trace!(?vid);
+ let scc = self.constraint_sccs.scc(vid);
+ trace!(?scc);
+ match self.scc_values.universal_regions_outlived_by(scc).find_map(|lb| {
+ self.eval_equal(vid, lb).then_some(self.definitions[lb].external_name?)
+ }) {
+ Some(region) => {
+ let vid = self.universal_regions.to_region_vid(region);
+ subst_regions.push(vid);
+ region
+ }
+ None => {
+ subst_regions.push(vid);
+ infcx.tcx.sess.delay_span_bug(
+ concrete_type.span,
+ "opaque type with non-universal region substs",
+ );
+ infcx.tcx.lifetimes.re_static
+ }
+ }
+ });
+
+ subst_regions.sort();
+ subst_regions.dedup();
+
+ let universal_concrete_type =
+ infcx.tcx.fold_regions(concrete_type, |region, _| match *region {
+ ty::ReVar(vid) => subst_regions
+ .iter()
+ .find(|ur_vid| self.eval_equal(vid, **ur_vid))
+ .and_then(|ur_vid| self.definitions[*ur_vid].external_name)
+ .unwrap_or(infcx.tcx.lifetimes.re_root_empty),
+ _ => region,
+ });
+
+ debug!(?universal_concrete_type, ?universal_substs);
+
+ let opaque_type_key =
+ OpaqueTypeKey { def_id: opaque_type_key.def_id, substs: universal_substs };
+ let ty = infcx.infer_opaque_definition_from_instantiation(
+ opaque_type_key,
+ universal_concrete_type,
+ origin,
+ );
+ // Sometimes two opaque types are the same only after we remap the generic parameters
+ // back to the opaque type definition. E.g. we may have `OpaqueType<X, Y>` mapped to `(X, Y)`
+ // and `OpaqueType<Y, X>` mapped to `(Y, X)`, and those are the same, but we only know that
+ // once we convert the generic parameters to those of the opaque type.
+ if let Some(prev) = result.get_mut(&opaque_type_key.def_id) {
+ if prev.ty != ty {
+ if !ty.references_error() {
+ prev.report_mismatch(
+ &OpaqueHiddenType { ty, span: concrete_type.span },
+ infcx.tcx,
+ );
+ }
+ prev.ty = infcx.tcx.ty_error();
+ }
+ // Pick a better span if there is one.
+ // FIXME(oli-obk): collect multiple spans for better diagnostics down the road.
+ prev.span = prev.span.substitute_dummy(concrete_type.span);
+ } else {
+ result.insert(
+ opaque_type_key.def_id,
+ OpaqueHiddenType { ty, span: concrete_type.span },
+ );
+ }
+ }
+ result
+ }
+
+ /// Map the regions in the type to named regions. This is similar to what
+ /// `infer_opaque_types` does, but can infer any universal region, not only
+ /// ones from the substs for the opaque type. It also doesn't double check
+ /// that the regions produced are in fact equal to the named region they are
+ /// replaced with. This is fine because this function is only to improve the
+ /// region names in error messages.
+ pub(crate) fn name_regions<T>(&self, tcx: TyCtxt<'tcx>, ty: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ tcx.fold_regions(ty, |region, _| match *region {
+ ty::ReVar(vid) => {
+ // Find something that we can name
+ let upper_bound = self.approx_universal_upper_bound(vid);
+ let upper_bound = &self.definitions[upper_bound];
+ match upper_bound.external_name {
+ Some(reg) => reg,
+ None => {
+ // Nothing exact found, so we pick the first one that we find.
+ let scc = self.constraint_sccs.scc(vid);
+ for vid in self.rev_scc_graph.as_ref().unwrap().upper_bounds(scc) {
+ match self.definitions[vid].external_name {
+ None => {}
+ Some(region) if region.is_static() => {}
+ Some(region) => return region,
+ }
+ }
+ region
+ }
+ }
+ }
+ _ => region,
+ })
+ }
+}
+
+pub trait InferCtxtExt<'tcx> {
+ fn infer_opaque_definition_from_instantiation(
+ &self,
+ opaque_type_key: OpaqueTypeKey<'tcx>,
+ instantiated_ty: OpaqueHiddenType<'tcx>,
+ origin: OpaqueTyOrigin,
+ ) -> Ty<'tcx>;
+}
+
+impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
+ /// Given the fully resolved, instantiated type for an opaque
+ /// type, i.e., the value of an inference variable like C1 or C2
+ /// (*), computes the "definition type" for an opaque type
+ /// definition -- that is, the inferred value of `Foo1<'x>` or
+ /// `Foo2<'x>` that we would conceptually use in its definition:
+ /// ```ignore (illustrative)
+ /// type Foo1<'x> = impl Bar<'x> = AAA; // <-- this type AAA
+ /// type Foo2<'x> = impl Bar<'x> = BBB; // <-- or this type BBB
+ /// fn foo<'a, 'b>(..) -> (Foo1<'a>, Foo2<'b>) { .. }
+ /// ```
+ /// Note that these values are defined in terms of a distinct set of
+ /// generic parameters (`'x` instead of `'a`) from C1 or C2. The main
+ /// purpose of this function is to do that translation.
+ ///
+ /// (*) C1 and C2 were introduced in the comments on
+ /// `register_member_constraints`. Read that comment for more context.
+ ///
+ /// # Parameters
+ ///
+ /// - `def_id`, the `impl Trait` type
+ /// - `substs`, the substs used to instantiate this opaque type
+ /// - `instantiated_ty`, the inferred type C1 -- fully resolved, lifted version of
+ /// `opaque_defn.concrete_ty`
+ #[instrument(level = "debug", skip(self))]
+ fn infer_opaque_definition_from_instantiation(
+ &self,
+ opaque_type_key: OpaqueTypeKey<'tcx>,
+ instantiated_ty: OpaqueHiddenType<'tcx>,
+ origin: OpaqueTyOrigin,
+ ) -> Ty<'tcx> {
+ if self.is_tainted_by_errors() {
+ return self.tcx.ty_error();
+ }
+
+ let OpaqueTypeKey { def_id, substs } = opaque_type_key;
+
+ // Use substs to build up a reverse map from regions to their
+ // identity mappings. This is necessary because of `impl
+ // Trait` lifetimes are computed by replacing existing
+ // lifetimes with 'static and remapping only those used in the
+ // `impl Trait` return type, resulting in the parameters
+ // shifting.
+ let id_substs = InternalSubsts::identity_for_item(self.tcx, def_id.to_def_id());
+ debug!(?id_substs);
+ let map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>> =
+ substs.iter().enumerate().map(|(index, subst)| (subst, id_substs[index])).collect();
+ debug!("map = {:#?}", map);
+
+ // Convert the type from the function into a type valid outside
+ // the function, by replacing invalid regions with 'static,
+ // after producing an error for each of them.
+ let definition_ty = instantiated_ty.ty.fold_with(&mut ReverseMapper::new(
+ self.tcx,
+ opaque_type_key,
+ map,
+ instantiated_ty.ty,
+ instantiated_ty.span,
+ ));
+ debug!(?definition_ty);
+
+ if !check_opaque_type_parameter_valid(
+ self.tcx,
+ opaque_type_key,
+ origin,
+ instantiated_ty.span,
+ ) {
+ return self.tcx.ty_error();
+ }
+
+ // Only check this for TAIT. RPIT already supports `src/test/ui/impl-trait/nested-return-type2.rs`
+ // on stable and we'd break that.
+ if let OpaqueTyOrigin::TyAlias = origin {
+ // This logic duplicates most of `check_opaque_meets_bounds`.
+ // FIXME(oli-obk): Also do region checks here and then consider removing `check_opaque_meets_bounds` entirely.
+ let param_env = self.tcx.param_env(def_id);
+ let body_id = self.tcx.local_def_id_to_hir_id(def_id);
+ // HACK This bubble is required for this tests to pass:
+ // type-alias-impl-trait/issue-67844-nested-opaque.rs
+ self.tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bubble).enter(
+ move |infcx| {
+ // Require the hidden type to be well-formed with only the generics of the opaque type.
+ // Defining use functions may have more bounds than the opaque type, which is ok, as long as the
+ // hidden type is well formed even without those bounds.
+ let predicate =
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(definition_ty.into()))
+ .to_predicate(infcx.tcx);
+ let mut fulfillment_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+
+ // Require that the hidden type actually fulfills all the bounds of the opaque type, even without
+ // the bounds that the function supplies.
+ match infcx.register_hidden_type(
+ OpaqueTypeKey { def_id, substs: id_substs },
+ ObligationCause::misc(instantiated_ty.span, body_id),
+ param_env,
+ definition_ty,
+ origin,
+ ) {
+ Ok(infer_ok) => {
+ for obligation in infer_ok.obligations {
+ fulfillment_cx.register_predicate_obligation(&infcx, obligation);
+ }
+ }
+ Err(err) => {
+ infcx
+ .report_mismatched_types(
+ &ObligationCause::misc(instantiated_ty.span, body_id),
+ self.tcx.mk_opaque(def_id.to_def_id(), id_substs),
+ definition_ty,
+ err,
+ )
+ .emit();
+ }
+ }
+
+ fulfillment_cx.register_predicate_obligation(
+ &infcx,
+ Obligation::misc(instantiated_ty.span, body_id, param_env, predicate),
+ );
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = fulfillment_cx.select_all_or_error(&infcx);
+
+ // This is still required for many(half of the tests in ui/type-alias-impl-trait)
+ // tests to pass
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+
+ if errors.is_empty() {
+ definition_ty
+ } else {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ self.tcx.ty_error()
+ }
+ },
+ )
+ } else {
+ definition_ty
+ }
+ }
+}
+
+fn check_opaque_type_parameter_valid(
+ tcx: TyCtxt<'_>,
+ opaque_type_key: OpaqueTypeKey<'_>,
+ origin: OpaqueTyOrigin,
+ span: Span,
+) -> bool {
+ match origin {
+ // No need to check return position impl trait (RPIT)
+ // because for type and const parameters they are correct
+ // by construction: we convert
+ //
+ // fn foo<P0..Pn>() -> impl Trait
+ //
+ // into
+ //
+ // type Foo<P0...Pn>
+ // fn foo<P0..Pn>() -> Foo<P0...Pn>.
+ //
+ // For lifetime parameters we convert
+ //
+ // fn foo<'l0..'ln>() -> impl Trait<'l0..'lm>
+ //
+ // into
+ //
+ // type foo::<'p0..'pn>::Foo<'q0..'qm>
+ // fn foo<l0..'ln>() -> foo::<'static..'static>::Foo<'l0..'lm>.
+ //
+ // which would error here on all of the `'static` args.
+ OpaqueTyOrigin::FnReturn(..) | OpaqueTyOrigin::AsyncFn(..) => return true,
+ // Check these
+ OpaqueTyOrigin::TyAlias => {}
+ }
+ let opaque_generics = tcx.generics_of(opaque_type_key.def_id);
+ let mut seen_params: FxHashMap<_, Vec<_>> = FxHashMap::default();
+ for (i, arg) in opaque_type_key.substs.iter().enumerate() {
+ let arg_is_param = match arg.unpack() {
+ GenericArgKind::Type(ty) => matches!(ty.kind(), ty::Param(_)),
+ GenericArgKind::Lifetime(lt) if lt.is_static() => {
+ tcx.sess
+ .struct_span_err(span, "non-defining opaque type use in defining scope")
+ .span_label(
+ tcx.def_span(opaque_generics.param_at(i, tcx).def_id),
+ "cannot use static lifetime; use a bound lifetime \
+ instead or remove the lifetime parameter from the \
+ opaque type",
+ )
+ .emit();
+ return false;
+ }
+ GenericArgKind::Lifetime(lt) => {
+ matches!(*lt, ty::ReEarlyBound(_) | ty::ReFree(_))
+ }
+ GenericArgKind::Const(ct) => matches!(ct.kind(), ty::ConstKind::Param(_)),
+ };
+
+ if arg_is_param {
+ seen_params.entry(arg).or_default().push(i);
+ } else {
+ // Prevent `fn foo() -> Foo<u32>` from being defining.
+ let opaque_param = opaque_generics.param_at(i, tcx);
+ tcx.sess
+ .struct_span_err(span, "non-defining opaque type use in defining scope")
+ .span_note(
+ tcx.def_span(opaque_param.def_id),
+ &format!(
+ "used non-generic {} `{}` for generic parameter",
+ opaque_param.kind.descr(),
+ arg,
+ ),
+ )
+ .emit();
+ return false;
+ }
+ }
+
+ for (_, indices) in seen_params {
+ if indices.len() > 1 {
+ let descr = opaque_generics.param_at(indices[0], tcx).kind.descr();
+ let spans: Vec<_> = indices
+ .into_iter()
+ .map(|i| tcx.def_span(opaque_generics.param_at(i, tcx).def_id))
+ .collect();
+ tcx.sess
+ .struct_span_err(span, "non-defining opaque type use in defining scope")
+ .span_note(spans, &format!("{} used multiple times", descr))
+ .emit();
+ return false;
+ }
+ }
+ true
+}
+
+struct ReverseMapper<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ key: ty::OpaqueTypeKey<'tcx>,
+ map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>,
+ map_missing_regions_to_empty: bool,
+
+ /// initially `Some`, set to `None` once error has been reported
+ hidden_ty: Option<Ty<'tcx>>,
+
+ /// Span of function being checked.
+ span: Span,
+}
+
+impl<'tcx> ReverseMapper<'tcx> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ key: ty::OpaqueTypeKey<'tcx>,
+ map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>,
+ hidden_ty: Ty<'tcx>,
+ span: Span,
+ ) -> Self {
+ Self {
+ tcx,
+ key,
+ map,
+ map_missing_regions_to_empty: false,
+ hidden_ty: Some(hidden_ty),
+ span,
+ }
+ }
+
+ fn fold_kind_mapping_missing_regions_to_empty(
+ &mut self,
+ kind: GenericArg<'tcx>,
+ ) -> GenericArg<'tcx> {
+ assert!(!self.map_missing_regions_to_empty);
+ self.map_missing_regions_to_empty = true;
+ let kind = kind.fold_with(self);
+ self.map_missing_regions_to_empty = false;
+ kind
+ }
+
+ fn fold_kind_normally(&mut self, kind: GenericArg<'tcx>) -> GenericArg<'tcx> {
+ assert!(!self.map_missing_regions_to_empty);
+ kind.fold_with(self)
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for ReverseMapper<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ // Ignore bound regions and `'static` regions that appear in the
+ // type, we only need to remap regions that reference lifetimes
+ // from the function declaration.
+ // This would ignore `'r` in a type like `for<'r> fn(&'r u32)`.
+ ty::ReLateBound(..) | ty::ReStatic => return r,
+
+ // If regions have been erased (by writeback), don't try to unerase
+ // them.
+ ty::ReErased => return r,
+
+ // The regions that we expect from borrow checking.
+ ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReEmpty(ty::UniverseIndex::ROOT) => {}
+
+ ty::ReEmpty(_) | ty::RePlaceholder(_) | ty::ReVar(_) => {
+ // All of the regions in the type should either have been
+ // erased by writeback, or mapped back to named regions by
+ // borrow checking.
+ bug!("unexpected region kind in opaque type: {:?}", r);
+ }
+ }
+
+ let generics = self.tcx().generics_of(self.key.def_id);
+ match self.map.get(&r.into()).map(|k| k.unpack()) {
+ Some(GenericArgKind::Lifetime(r1)) => r1,
+ Some(u) => panic!("region mapped to unexpected kind: {:?}", u),
+ None if self.map_missing_regions_to_empty => self.tcx.lifetimes.re_root_empty,
+ None if generics.parent.is_some() => {
+ if let Some(hidden_ty) = self.hidden_ty.take() {
+ unexpected_hidden_region_diagnostic(
+ self.tcx,
+ self.tcx.def_span(self.key.def_id),
+ hidden_ty,
+ r,
+ self.key,
+ )
+ .emit();
+ }
+ self.tcx.lifetimes.re_root_empty
+ }
+ None => {
+ self.tcx
+ .sess
+ .struct_span_err(self.span, "non-defining opaque type use in defining scope")
+ .span_label(
+ self.span,
+ format!(
+ "lifetime `{}` is part of concrete type but not used in \
+ parameter list of the `impl Trait` type alias",
+ r
+ ),
+ )
+ .emit();
+
+ self.tcx().lifetimes.re_static
+ }
+ }
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match *ty.kind() {
+ ty::Closure(def_id, substs) => {
+ // I am a horrible monster and I pray for death. When
+ // we encounter a closure here, it is always a closure
+ // from within the function that we are currently
+ // type-checking -- one that is now being encapsulated
+ // in an opaque type. Ideally, we would
+ // go through the types/lifetimes that it references
+ // and treat them just like we would any other type,
+ // which means we would error out if we find any
+ // reference to a type/region that is not in the
+ // "reverse map".
+ //
+ // **However,** in the case of closures, there is a
+ // somewhat subtle (read: hacky) consideration. The
+ // problem is that our closure types currently include
+ // all the lifetime parameters declared on the
+ // enclosing function, even if they are unused by the
+ // closure itself. We can't readily filter them out,
+ // so here we replace those values with `'empty`. This
+ // can't really make a difference to the rest of the
+ // compiler; those regions are ignored for the
+ // outlives relation, and hence don't affect trait
+ // selection or auto traits, and they are erased
+ // during codegen.
+
+ let generics = self.tcx.generics_of(def_id);
+ let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| {
+ if index < generics.parent_count {
+ // Accommodate missing regions in the parent kinds...
+ self.fold_kind_mapping_missing_regions_to_empty(kind)
+ } else {
+ // ...but not elsewhere.
+ self.fold_kind_normally(kind)
+ }
+ }));
+
+ self.tcx.mk_closure(def_id, substs)
+ }
+
+ ty::Generator(def_id, substs, movability) => {
+ let generics = self.tcx.generics_of(def_id);
+ let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| {
+ if index < generics.parent_count {
+ // Accommodate missing regions in the parent kinds...
+ self.fold_kind_mapping_missing_regions_to_empty(kind)
+ } else {
+ // ...but not elsewhere.
+ self.fold_kind_normally(kind)
+ }
+ }));
+
+ self.tcx.mk_generator(def_id, substs, movability)
+ }
+
+ ty::Param(param) => {
+ // Look it up in the substitution list.
+ match self.map.get(&ty.into()).map(|k| k.unpack()) {
+ // Found it in the substitution list; replace with the parameter from the
+ // opaque type.
+ Some(GenericArgKind::Type(t1)) => t1,
+ Some(u) => panic!("type mapped to unexpected kind: {:?}", u),
+ None => {
+ debug!(?param, ?self.map);
+ self.tcx
+ .sess
+ .struct_span_err(
+ self.span,
+ &format!(
+ "type parameter `{}` is part of concrete type but not \
+ used in parameter list for the `impl Trait` type alias",
+ ty
+ ),
+ )
+ .emit();
+
+ self.tcx().ty_error()
+ }
+ }
+ }
+
+ _ => ty.super_fold_with(self),
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ trace!("checking const {:?}", ct);
+ // Find a const parameter
+ match ct.kind() {
+ ty::ConstKind::Param(..) => {
+ // Look it up in the substitution list.
+ match self.map.get(&ct.into()).map(|k| k.unpack()) {
+ // Found it in the substitution list, replace with the parameter from the
+ // opaque type.
+ Some(GenericArgKind::Const(c1)) => c1,
+ Some(u) => panic!("const mapped to unexpected kind: {:?}", u),
+ None => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ self.span,
+ &format!(
+ "const parameter `{}` is part of concrete type but not \
+ used in parameter list for the `impl Trait` type alias",
+ ct
+ ),
+ )
+ .emit();
+
+ self.tcx().const_error(ct.ty())
+ }
+ }
+ }
+
+ _ => ct,
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/region_infer/reverse_sccs.rs b/compiler/rustc_borrowck/src/region_infer/reverse_sccs.rs
new file mode 100644
index 000000000..1e6798eee
--- /dev/null
+++ b/compiler/rustc_borrowck/src/region_infer/reverse_sccs.rs
@@ -0,0 +1,68 @@
+use crate::constraints::ConstraintSccIndex;
+use crate::RegionInferenceContext;
+use itertools::Itertools;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::graph::vec_graph::VecGraph;
+use rustc_data_structures::graph::WithSuccessors;
+use rustc_middle::ty::RegionVid;
+use std::ops::Range;
+use std::rc::Rc;
+
+pub(crate) struct ReverseSccGraph {
+ graph: VecGraph<ConstraintSccIndex>,
+ /// For each SCC, the range of `universal_regions` that use that SCC as
+ /// their value.
+ scc_regions: FxHashMap<ConstraintSccIndex, Range<usize>>,
+ /// All of the universal regions, in grouped so that `scc_regions` can
+ /// index into here.
+ universal_regions: Vec<RegionVid>,
+}
+
+impl ReverseSccGraph {
+ /// Find all universal regions that are required to outlive the given SCC.
+ pub(super) fn upper_bounds<'a>(
+ &'a self,
+ scc0: ConstraintSccIndex,
+ ) -> impl Iterator<Item = RegionVid> + 'a {
+ let mut duplicates = FxHashSet::default();
+ self.graph
+ .depth_first_search(scc0)
+ .flat_map(move |scc1| {
+ self.scc_regions
+ .get(&scc1)
+ .map_or(&[][..], |range| &self.universal_regions[range.clone()])
+ })
+ .copied()
+ .filter(move |r| duplicates.insert(*r))
+ }
+}
+
+impl RegionInferenceContext<'_> {
+ /// Compute and return the reverse SCC-based constraint graph (lazily).
+ pub(super) fn reverse_scc_graph(&mut self) -> Rc<ReverseSccGraph> {
+ if let Some(g) = &self.rev_scc_graph {
+ return g.clone();
+ }
+
+ let graph = self.constraint_sccs.reverse();
+ let mut paired_scc_regions = self
+ .universal_regions
+ .universal_regions()
+ .map(|region| (self.constraint_sccs.scc(region), region))
+ .collect_vec();
+ paired_scc_regions.sort();
+ let universal_regions = paired_scc_regions.iter().map(|&(_, region)| region).collect();
+
+ let mut scc_regions = FxHashMap::default();
+ let mut start = 0;
+ for (scc, group) in &paired_scc_regions.into_iter().group_by(|(scc, _)| *scc) {
+ let group_size = group.count();
+ scc_regions.insert(scc, start..start + group_size);
+ start += group_size;
+ }
+
+ let rev_graph = Rc::new(ReverseSccGraph { graph, scc_regions, universal_regions });
+ self.rev_scc_graph = Some(rev_graph.clone());
+ rev_graph
+ }
+}
diff --git a/compiler/rustc_borrowck/src/region_infer/values.rs b/compiler/rustc_borrowck/src/region_infer/values.rs
new file mode 100644
index 000000000..c81ef10f7
--- /dev/null
+++ b/compiler/rustc_borrowck/src/region_infer/values.rs
@@ -0,0 +1,488 @@
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_index::bit_set::SparseBitMatrix;
+use rustc_index::interval::IntervalSet;
+use rustc_index::interval::SparseIntervalMatrix;
+use rustc_index::vec::Idx;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::{BasicBlock, Body, Location};
+use rustc_middle::ty::{self, RegionVid};
+use std::fmt::Debug;
+use std::rc::Rc;
+
+/// Maps between a `Location` and a `PointIndex` (and vice versa).
+pub(crate) struct RegionValueElements {
+ /// For each basic block, how many points are contained within?
+ statements_before_block: IndexVec<BasicBlock, usize>,
+
+ /// Map backward from each point to the basic block that it
+ /// belongs to.
+ basic_blocks: IndexVec<PointIndex, BasicBlock>,
+
+ num_points: usize,
+}
+
+impl RegionValueElements {
+ pub(crate) fn new(body: &Body<'_>) -> Self {
+ let mut num_points = 0;
+ let statements_before_block: IndexVec<BasicBlock, usize> = body
+ .basic_blocks()
+ .iter()
+ .map(|block_data| {
+ let v = num_points;
+ num_points += block_data.statements.len() + 1;
+ v
+ })
+ .collect();
+ debug!("RegionValueElements: statements_before_block={:#?}", statements_before_block);
+ debug!("RegionValueElements: num_points={:#?}", num_points);
+
+ let mut basic_blocks = IndexVec::with_capacity(num_points);
+ for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
+ basic_blocks.extend((0..=bb_data.statements.len()).map(|_| bb));
+ }
+
+ Self { statements_before_block, basic_blocks, num_points }
+ }
+
+ /// Total number of point indices
+ pub(crate) fn num_points(&self) -> usize {
+ self.num_points
+ }
+
+ /// Converts a `Location` into a `PointIndex`. O(1).
+ pub(crate) fn point_from_location(&self, location: Location) -> PointIndex {
+ let Location { block, statement_index } = location;
+ let start_index = self.statements_before_block[block];
+ PointIndex::new(start_index + statement_index)
+ }
+
+ /// Converts a `Location` into a `PointIndex`. O(1).
+ pub(crate) fn entry_point(&self, block: BasicBlock) -> PointIndex {
+ let start_index = self.statements_before_block[block];
+ PointIndex::new(start_index)
+ }
+
+ /// Return the PointIndex for the block start of this index.
+ pub(crate) fn to_block_start(&self, index: PointIndex) -> PointIndex {
+ PointIndex::new(self.statements_before_block[self.basic_blocks[index]])
+ }
+
+ /// Converts a `PointIndex` back to a location. O(1).
+ pub(crate) fn to_location(&self, index: PointIndex) -> Location {
+ assert!(index.index() < self.num_points);
+ let block = self.basic_blocks[index];
+ let start_index = self.statements_before_block[block];
+ let statement_index = index.index() - start_index;
+ Location { block, statement_index }
+ }
+
+ /// Sometimes we get point-indices back from bitsets that may be
+ /// out of range (because they round up to the nearest 2^N number
+ /// of bits). Use this function to filter such points out if you
+ /// like.
+ pub(crate) fn point_in_range(&self, index: PointIndex) -> bool {
+ index.index() < self.num_points
+ }
+}
+
+rustc_index::newtype_index! {
+ /// A single integer representing a `Location` in the MIR control-flow
+ /// graph. Constructed efficiently from `RegionValueElements`.
+ pub struct PointIndex { DEBUG_FORMAT = "PointIndex({})" }
+}
+
+rustc_index::newtype_index! {
+ /// A single integer representing a `ty::Placeholder`.
+ pub struct PlaceholderIndex { DEBUG_FORMAT = "PlaceholderIndex({})" }
+}
+
+/// An individual element in a region value -- the value of a
+/// particular region variable consists of a set of these elements.
+#[derive(Debug, Clone)]
+pub(crate) enum RegionElement {
+ /// A point in the control-flow graph.
+ Location(Location),
+
+ /// A universally quantified region from the root universe (e.g.,
+ /// a lifetime parameter).
+ RootUniversalRegion(RegionVid),
+
+ /// A placeholder (e.g., instantiated from a `for<'a> fn(&'a u32)`
+ /// type).
+ PlaceholderRegion(ty::PlaceholderRegion),
+}
+
+/// When we initially compute liveness, we use an interval matrix storing
+/// liveness ranges for each region-vid.
+pub(crate) struct LivenessValues<N: Idx> {
+ elements: Rc<RegionValueElements>,
+ points: SparseIntervalMatrix<N, PointIndex>,
+}
+
+impl<N: Idx> LivenessValues<N> {
+ /// Creates a new set of "region values" that tracks causal information.
+ /// Each of the regions in num_region_variables will be initialized with an
+ /// empty set of points and no causal information.
+ pub(crate) fn new(elements: Rc<RegionValueElements>) -> Self {
+ Self { points: SparseIntervalMatrix::new(elements.num_points), elements }
+ }
+
+ /// Iterate through each region that has a value in this set.
+ pub(crate) fn rows(&self) -> impl Iterator<Item = N> {
+ self.points.rows()
+ }
+
+ /// Adds the given element to the value for the given region. Returns whether
+ /// the element is newly added (i.e., was not already present).
+ pub(crate) fn add_element(&mut self, row: N, location: Location) -> bool {
+ debug!("LivenessValues::add(r={:?}, location={:?})", row, location);
+ let index = self.elements.point_from_location(location);
+ self.points.insert(row, index)
+ }
+
+ /// Adds all the elements in the given bit array into the given
+ /// region. Returns whether any of them are newly added.
+ pub(crate) fn add_elements(&mut self, row: N, locations: &IntervalSet<PointIndex>) -> bool {
+ debug!("LivenessValues::add_elements(row={:?}, locations={:?})", row, locations);
+ self.points.union_row(row, locations)
+ }
+
+ /// Adds all the control-flow points to the values for `r`.
+ pub(crate) fn add_all_points(&mut self, row: N) {
+ self.points.insert_all_into_row(row);
+ }
+
+ /// Returns `true` if the region `r` contains the given element.
+ pub(crate) fn contains(&self, row: N, location: Location) -> bool {
+ let index = self.elements.point_from_location(location);
+ self.points.row(row).map_or(false, |r| r.contains(index))
+ }
+
+ /// Returns an iterator of all the elements contained by the region `r`
+ pub(crate) fn get_elements(&self, row: N) -> impl Iterator<Item = Location> + '_ {
+ self.points
+ .row(row)
+ .into_iter()
+ .flat_map(|set| set.iter())
+ .take_while(move |&p| self.elements.point_in_range(p))
+ .map(move |p| self.elements.to_location(p))
+ }
+
+ /// Returns a "pretty" string value of the region. Meant for debugging.
+ pub(crate) fn region_value_str(&self, r: N) -> String {
+ region_value_str(self.get_elements(r).map(RegionElement::Location))
+ }
+}
+
+/// Maps from `ty::PlaceholderRegion` values that are used in the rest of
+/// rustc to the internal `PlaceholderIndex` values that are used in
+/// NLL.
+#[derive(Default)]
+pub(crate) struct PlaceholderIndices {
+ indices: FxIndexSet<ty::PlaceholderRegion>,
+}
+
+impl PlaceholderIndices {
+ pub(crate) fn insert(&mut self, placeholder: ty::PlaceholderRegion) -> PlaceholderIndex {
+ let (index, _) = self.indices.insert_full(placeholder);
+ index.into()
+ }
+
+ pub(crate) fn lookup_index(&self, placeholder: ty::PlaceholderRegion) -> PlaceholderIndex {
+ self.indices.get_index_of(&placeholder).unwrap().into()
+ }
+
+ pub(crate) fn lookup_placeholder(
+ &self,
+ placeholder: PlaceholderIndex,
+ ) -> ty::PlaceholderRegion {
+ self.indices[placeholder.index()]
+ }
+
+ pub(crate) fn len(&self) -> usize {
+ self.indices.len()
+ }
+}
+
+/// Stores the full values for a set of regions (in contrast to
+/// `LivenessValues`, which only stores those points in the where a
+/// region is live). The full value for a region may contain points in
+/// the CFG, but also free regions as well as bound universe
+/// placeholders.
+///
+/// Example:
+///
+/// ```text
+/// fn foo(x: &'a u32) -> &'a u32 {
+/// let y: &'0 u32 = x; // let's call this `'0`
+/// y
+/// }
+/// ```
+///
+/// Here, the variable `'0` would contain the free region `'a`,
+/// because (since it is returned) it must live for at least `'a`. But
+/// it would also contain various points from within the function.
+#[derive(Clone)]
+pub(crate) struct RegionValues<N: Idx> {
+ elements: Rc<RegionValueElements>,
+ placeholder_indices: Rc<PlaceholderIndices>,
+ points: SparseIntervalMatrix<N, PointIndex>,
+ free_regions: SparseBitMatrix<N, RegionVid>,
+
+ /// Placeholders represent bound regions -- so something like `'a`
+ /// in for<'a> fn(&'a u32)`.
+ placeholders: SparseBitMatrix<N, PlaceholderIndex>,
+}
+
+impl<N: Idx> RegionValues<N> {
+ /// Creates a new set of "region values" that tracks causal information.
+ /// Each of the regions in num_region_variables will be initialized with an
+ /// empty set of points and no causal information.
+ pub(crate) fn new(
+ elements: &Rc<RegionValueElements>,
+ num_universal_regions: usize,
+ placeholder_indices: &Rc<PlaceholderIndices>,
+ ) -> Self {
+ let num_placeholders = placeholder_indices.len();
+ Self {
+ elements: elements.clone(),
+ points: SparseIntervalMatrix::new(elements.num_points),
+ placeholder_indices: placeholder_indices.clone(),
+ free_regions: SparseBitMatrix::new(num_universal_regions),
+ placeholders: SparseBitMatrix::new(num_placeholders),
+ }
+ }
+
+ /// Adds the given element to the value for the given region. Returns whether
+ /// the element is newly added (i.e., was not already present).
+ pub(crate) fn add_element(&mut self, r: N, elem: impl ToElementIndex) -> bool {
+ debug!("add(r={:?}, elem={:?})", r, elem);
+ elem.add_to_row(self, r)
+ }
+
+ /// Adds all the control-flow points to the values for `r`.
+ pub(crate) fn add_all_points(&mut self, r: N) {
+ self.points.insert_all_into_row(r);
+ }
+
+ /// Adds all elements in `r_from` to `r_to` (because e.g., `r_to:
+ /// r_from`).
+ pub(crate) fn add_region(&mut self, r_to: N, r_from: N) -> bool {
+ self.points.union_rows(r_from, r_to)
+ | self.free_regions.union_rows(r_from, r_to)
+ | self.placeholders.union_rows(r_from, r_to)
+ }
+
+ /// Returns `true` if the region `r` contains the given element.
+ pub(crate) fn contains(&self, r: N, elem: impl ToElementIndex) -> bool {
+ elem.contained_in_row(self, r)
+ }
+
+ /// `self[to] |= values[from]`, essentially: that is, take all the
+ /// elements for the region `from` from `values` and add them to
+ /// the region `to` in `self`.
+ pub(crate) fn merge_liveness<M: Idx>(&mut self, to: N, from: M, values: &LivenessValues<M>) {
+ if let Some(set) = values.points.row(from) {
+ self.points.union_row(to, set);
+ }
+ }
+
+ /// Returns `true` if `sup_region` contains all the CFG points that
+ /// `sub_region` contains. Ignores universal regions.
+ pub(crate) fn contains_points(&self, sup_region: N, sub_region: N) -> bool {
+ if let Some(sub_row) = self.points.row(sub_region) {
+ if let Some(sup_row) = self.points.row(sup_region) {
+ sup_row.superset(sub_row)
+ } else {
+ // sup row is empty, so sub row must be empty
+ sub_row.is_empty()
+ }
+ } else {
+ // sub row is empty, always true
+ true
+ }
+ }
+
+ /// Returns the locations contained within a given region `r`.
+ pub(crate) fn locations_outlived_by<'a>(&'a self, r: N) -> impl Iterator<Item = Location> + 'a {
+ self.points.row(r).into_iter().flat_map(move |set| {
+ set.iter()
+ .take_while(move |&p| self.elements.point_in_range(p))
+ .map(move |p| self.elements.to_location(p))
+ })
+ }
+
+ /// Returns just the universal regions that are contained in a given region's value.
+ pub(crate) fn universal_regions_outlived_by<'a>(
+ &'a self,
+ r: N,
+ ) -> impl Iterator<Item = RegionVid> + 'a {
+ self.free_regions.row(r).into_iter().flat_map(|set| set.iter())
+ }
+
+ /// Returns all the elements contained in a given region's value.
+ pub(crate) fn placeholders_contained_in<'a>(
+ &'a self,
+ r: N,
+ ) -> impl Iterator<Item = ty::PlaceholderRegion> + 'a {
+ self.placeholders
+ .row(r)
+ .into_iter()
+ .flat_map(|set| set.iter())
+ .map(move |p| self.placeholder_indices.lookup_placeholder(p))
+ }
+
+ /// Returns all the elements contained in a given region's value.
+ pub(crate) fn elements_contained_in<'a>(
+ &'a self,
+ r: N,
+ ) -> impl Iterator<Item = RegionElement> + 'a {
+ let points_iter = self.locations_outlived_by(r).map(RegionElement::Location);
+
+ let free_regions_iter =
+ self.universal_regions_outlived_by(r).map(RegionElement::RootUniversalRegion);
+
+ let placeholder_universes_iter =
+ self.placeholders_contained_in(r).map(RegionElement::PlaceholderRegion);
+
+ points_iter.chain(free_regions_iter).chain(placeholder_universes_iter)
+ }
+
+ /// Returns a "pretty" string value of the region. Meant for debugging.
+ pub(crate) fn region_value_str(&self, r: N) -> String {
+ region_value_str(self.elements_contained_in(r))
+ }
+}
+
+pub(crate) trait ToElementIndex: Debug + Copy {
+ fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool;
+
+ fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool;
+}
+
+impl ToElementIndex for Location {
+ fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool {
+ let index = values.elements.point_from_location(self);
+ values.points.insert(row, index)
+ }
+
+ fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool {
+ let index = values.elements.point_from_location(self);
+ values.points.contains(row, index)
+ }
+}
+
+impl ToElementIndex for RegionVid {
+ fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool {
+ values.free_regions.insert(row, self)
+ }
+
+ fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool {
+ values.free_regions.contains(row, self)
+ }
+}
+
+impl ToElementIndex for ty::PlaceholderRegion {
+ fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool {
+ let index = values.placeholder_indices.lookup_index(self);
+ values.placeholders.insert(row, index)
+ }
+
+ fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool {
+ let index = values.placeholder_indices.lookup_index(self);
+ values.placeholders.contains(row, index)
+ }
+}
+
+pub(crate) fn location_set_str(
+ elements: &RegionValueElements,
+ points: impl IntoIterator<Item = PointIndex>,
+) -> String {
+ region_value_str(
+ points
+ .into_iter()
+ .take_while(|&p| elements.point_in_range(p))
+ .map(|p| elements.to_location(p))
+ .map(RegionElement::Location),
+ )
+}
+
+fn region_value_str(elements: impl IntoIterator<Item = RegionElement>) -> String {
+ let mut result = String::new();
+ result.push('{');
+
+ // Set to Some(l1, l2) when we have observed all the locations
+ // from l1..=l2 (inclusive) but not yet printed them. This
+ // gets extended if we then see l3 where l3 is the successor
+ // to l2.
+ let mut open_location: Option<(Location, Location)> = None;
+
+ let mut sep = "";
+ let mut push_sep = |s: &mut String| {
+ s.push_str(sep);
+ sep = ", ";
+ };
+
+ for element in elements {
+ match element {
+ RegionElement::Location(l) => {
+ if let Some((location1, location2)) = open_location {
+ if location2.block == l.block
+ && location2.statement_index == l.statement_index - 1
+ {
+ open_location = Some((location1, l));
+ continue;
+ }
+
+ push_sep(&mut result);
+ push_location_range(&mut result, location1, location2);
+ }
+
+ open_location = Some((l, l));
+ }
+
+ RegionElement::RootUniversalRegion(fr) => {
+ if let Some((location1, location2)) = open_location {
+ push_sep(&mut result);
+ push_location_range(&mut result, location1, location2);
+ open_location = None;
+ }
+
+ push_sep(&mut result);
+ result.push_str(&format!("{:?}", fr));
+ }
+
+ RegionElement::PlaceholderRegion(placeholder) => {
+ if let Some((location1, location2)) = open_location {
+ push_sep(&mut result);
+ push_location_range(&mut result, location1, location2);
+ open_location = None;
+ }
+
+ push_sep(&mut result);
+ result.push_str(&format!("{:?}", placeholder));
+ }
+ }
+ }
+
+ if let Some((location1, location2)) = open_location {
+ push_sep(&mut result);
+ push_location_range(&mut result, location1, location2);
+ }
+
+ result.push('}');
+
+ return result;
+
+ fn push_location_range(str: &mut String, location1: Location, location2: Location) {
+ if location1 == location2 {
+ str.push_str(&format!("{:?}", location1));
+ } else {
+ assert_eq!(location1.block, location2.block);
+ str.push_str(&format!(
+ "{:?}[{}..={}]",
+ location1.block, location1.statement_index, location2.statement_index
+ ));
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/renumber.rs b/compiler/rustc_borrowck/src/renumber.rs
new file mode 100644
index 000000000..7a8ce621c
--- /dev/null
+++ b/compiler/rustc_borrowck/src/renumber.rs
@@ -0,0 +1,83 @@
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin};
+use rustc_middle::mir::visit::{MutVisitor, TyContext};
+use rustc_middle::mir::{Body, Location, Promoted};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
+
+/// Replaces all free regions appearing in the MIR with fresh
+/// inference variables, returning the number of variables created.
+#[instrument(skip(infcx, body, promoted), level = "debug")]
+pub fn renumber_mir<'tcx>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ body: &mut Body<'tcx>,
+ promoted: &mut IndexVec<Promoted, Body<'tcx>>,
+) {
+ debug!(?body.arg_count);
+
+ let mut visitor = NllVisitor { infcx };
+
+ for body in promoted.iter_mut() {
+ visitor.visit_body(body);
+ }
+
+ visitor.visit_body(body);
+}
+
+/// Replaces all regions appearing in `value` with fresh inference
+/// variables.
+#[instrument(skip(infcx), level = "debug")]
+pub fn renumber_regions<'tcx, T>(infcx: &InferCtxt<'_, 'tcx>, value: T) -> T
+where
+ T: TypeFoldable<'tcx>,
+{
+ infcx.tcx.fold_regions(value, |_region, _depth| {
+ let origin = NllRegionVariableOrigin::Existential { from_forall: false };
+ infcx.next_nll_region_var(origin)
+ })
+}
+
+struct NllVisitor<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> NllVisitor<'a, 'tcx> {
+ fn renumber_regions<T>(&mut self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ renumber_regions(self.infcx, value)
+ }
+}
+
+impl<'a, 'tcx> MutVisitor<'tcx> for NllVisitor<'a, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn visit_ty(&mut self, ty: &mut Ty<'tcx>, ty_context: TyContext) {
+ *ty = self.renumber_regions(*ty);
+
+ debug!(?ty);
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn visit_substs(&mut self, substs: &mut SubstsRef<'tcx>, location: Location) {
+ *substs = self.renumber_regions(*substs);
+
+ debug!(?substs);
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn visit_region(&mut self, region: &mut ty::Region<'tcx>, location: Location) {
+ let old_region = *region;
+ *region = self.renumber_regions(old_region);
+
+ debug!(?region);
+ }
+
+ fn visit_const(&mut self, constant: &mut ty::Const<'tcx>, _location: Location) {
+ *constant = self.renumber_regions(*constant);
+ }
+}
diff --git a/compiler/rustc_borrowck/src/session_diagnostics.rs b/compiler/rustc_borrowck/src/session_diagnostics.rs
new file mode 100644
index 000000000..895723d44
--- /dev/null
+++ b/compiler/rustc_borrowck/src/session_diagnostics.rs
@@ -0,0 +1,44 @@
+use rustc_macros::{SessionDiagnostic, SessionSubdiagnostic};
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+
+#[derive(SessionDiagnostic)]
+#[error(borrowck::move_unsized, code = "E0161")]
+pub(crate) struct MoveUnsized<'tcx> {
+ pub ty: Ty<'tcx>,
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(borrowck::higher_ranked_lifetime_error)]
+pub(crate) struct HigherRankedLifetimeError {
+ #[subdiagnostic]
+ pub cause: Option<HigherRankedErrorCause>,
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionSubdiagnostic)]
+pub(crate) enum HigherRankedErrorCause {
+ #[note(borrowck::could_not_prove)]
+ CouldNotProve { predicate: String },
+ #[note(borrowck::could_not_normalize)]
+ CouldNotNormalize { value: String },
+}
+
+#[derive(SessionDiagnostic)]
+#[error(borrowck::higher_ranked_subtype_error)]
+pub(crate) struct HigherRankedSubtypeError {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(borrowck::generic_does_not_live_long_enough)]
+pub(crate) struct GenericDoesNotLiveLongEnough {
+ pub kind: String,
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_borrowck/src/type_check/canonical.rs b/compiler/rustc_borrowck/src/type_check/canonical.rs
new file mode 100644
index 000000000..6cfe5efb6
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/canonical.rs
@@ -0,0 +1,171 @@
+use std::fmt;
+
+use rustc_infer::infer::canonical::Canonical;
+use rustc_infer::traits::query::NoSolution;
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::{self, ToPredicate, TypeFoldable};
+use rustc_span::def_id::DefId;
+use rustc_span::Span;
+use rustc_trait_selection::traits::query::type_op::{self, TypeOpOutput};
+use rustc_trait_selection::traits::query::Fallible;
+
+use crate::diagnostics::{ToUniverseInfo, UniverseInfo};
+
+use super::{Locations, NormalizeLocation, TypeChecker};
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ /// Given some operation `op` that manipulates types, proves
+ /// predicates, or otherwise uses the inference context, executes
+ /// `op` and then executes all the further obligations that `op`
+ /// returns. This will yield a set of outlives constraints amongst
+ /// regions which are extracted and stored as having occurred at
+ /// `locations`.
+ ///
+ /// **Any `rustc_infer::infer` operations that might generate region
+ /// constraints should occur within this method so that those
+ /// constraints can be properly localized!**
+ #[instrument(skip(self, category, op), level = "trace")]
+ pub(super) fn fully_perform_op<R, Op>(
+ &mut self,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ op: Op,
+ ) -> Fallible<R>
+ where
+ Op: type_op::TypeOp<'tcx, Output = R>,
+ Op::ErrorInfo: ToUniverseInfo<'tcx>,
+ {
+ let old_universe = self.infcx.universe();
+
+ let TypeOpOutput { output, constraints, error_info } = op.fully_perform(self.infcx)?;
+
+ if let Some(data) = constraints {
+ self.push_region_constraints(locations, category, data);
+ }
+
+ let universe = self.infcx.universe();
+
+ if old_universe != universe {
+ let universe_info = match error_info {
+ Some(error_info) => error_info.to_universe_info(old_universe),
+ None => UniverseInfo::other(),
+ };
+ for u in old_universe..universe {
+ self.borrowck_context
+ .constraints
+ .universe_causes
+ .insert(u + 1, universe_info.clone());
+ }
+ }
+
+ Ok(output)
+ }
+
+ pub(super) fn instantiate_canonical_with_fresh_inference_vars<T>(
+ &mut self,
+ span: Span,
+ canonical: &Canonical<'tcx, T>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let (instantiated, _) =
+ self.infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical);
+
+ for u in 0..canonical.max_universe.as_u32() {
+ let info = UniverseInfo::other();
+ self.borrowck_context
+ .constraints
+ .universe_causes
+ .insert(ty::UniverseIndex::from_u32(u), info);
+ }
+
+ instantiated
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(super) fn prove_trait_ref(
+ &mut self,
+ trait_ref: ty::TraitRef<'tcx>,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ ) {
+ self.prove_predicates(
+ Some(ty::Binder::dummy(ty::PredicateKind::Trait(ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Positive,
+ }))),
+ locations,
+ category,
+ );
+ }
+
+ pub(super) fn normalize_and_prove_instantiated_predicates(
+ &mut self,
+ // Keep this parameter for now, in case we start using
+ // it in `ConstraintCategory` at some point.
+ _def_id: DefId,
+ instantiated_predicates: ty::InstantiatedPredicates<'tcx>,
+ locations: Locations,
+ ) {
+ for (predicate, span) in instantiated_predicates
+ .predicates
+ .into_iter()
+ .zip(instantiated_predicates.spans.into_iter())
+ {
+ debug!(?predicate);
+ let predicate = self.normalize(predicate, locations);
+ self.prove_predicate(predicate, locations, ConstraintCategory::Predicate(span));
+ }
+ }
+
+ pub(super) fn prove_predicates(
+ &mut self,
+ predicates: impl IntoIterator<Item = impl ToPredicate<'tcx>>,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ ) {
+ for predicate in predicates {
+ let predicate = predicate.to_predicate(self.tcx());
+ debug!("prove_predicates(predicate={:?}, locations={:?})", predicate, locations,);
+
+ self.prove_predicate(predicate, locations, category);
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(super) fn prove_predicate(
+ &mut self,
+ predicate: ty::Predicate<'tcx>,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ ) {
+ let param_env = self.param_env;
+ self.fully_perform_op(
+ locations,
+ category,
+ param_env.and(type_op::prove_predicate::ProvePredicate::new(predicate)),
+ )
+ .unwrap_or_else(|NoSolution| {
+ span_mirbug!(self, NoSolution, "could not prove {:?}", predicate);
+ })
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(super) fn normalize<T>(&mut self, value: T, location: impl NormalizeLocation) -> T
+ where
+ T: type_op::normalize::Normalizable<'tcx> + fmt::Display + Copy + 'tcx,
+ {
+ let param_env = self.param_env;
+ self.fully_perform_op(
+ location.to_locations(),
+ ConstraintCategory::Boring,
+ param_env.and(type_op::normalize::Normalize::new(value)),
+ )
+ .unwrap_or_else(|NoSolution| {
+ span_mirbug!(self, NoSolution, "failed to normalize `{:?}`", value);
+ value
+ })
+ }
+}
diff --git a/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs b/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs
new file mode 100644
index 000000000..167960918
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs
@@ -0,0 +1,204 @@
+use rustc_infer::infer::canonical::QueryOutlivesConstraint;
+use rustc_infer::infer::canonical::QueryRegionConstraints;
+use rustc_infer::infer::outlives::env::RegionBoundPairs;
+use rustc_infer::infer::outlives::obligations::{TypeOutlives, TypeOutlivesDelegate};
+use rustc_infer::infer::region_constraints::{GenericKind, VerifyBound};
+use rustc_infer::infer::{self, InferCtxt, SubregionOrigin};
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::TypeVisitable;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::{Span, DUMMY_SP};
+
+use crate::{
+ constraints::OutlivesConstraint,
+ nll::ToRegionVid,
+ region_infer::TypeTest,
+ type_check::{Locations, MirTypeckRegionConstraints},
+ universal_regions::UniversalRegions,
+};
+
+pub(crate) struct ConstraintConversion<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ tcx: TyCtxt<'tcx>,
+ universal_regions: &'a UniversalRegions<'tcx>,
+ /// Each RBP `GK: 'a` is assumed to be true. These encode
+ /// relationships like `T: 'a` that are added via implicit bounds
+ /// or the `param_env`.
+ ///
+ /// Each region here is guaranteed to be a key in the `indices`
+ /// map. We use the "original" regions (i.e., the keys from the
+ /// map, and not the values) because the code in
+ /// `process_registered_region_obligations` has some special-cased
+ /// logic expecting to see (e.g.) `ReStatic`, and if we supplied
+ /// our special inference variable there, we would mess that up.
+ region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+ implicit_region_bound: ty::Region<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ locations: Locations,
+ span: Span,
+ category: ConstraintCategory<'tcx>,
+ constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
+}
+
+impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> {
+ pub(crate) fn new(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ universal_regions: &'a UniversalRegions<'tcx>,
+ region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+ implicit_region_bound: ty::Region<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ locations: Locations,
+ span: Span,
+ category: ConstraintCategory<'tcx>,
+ constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
+ ) -> Self {
+ Self {
+ infcx,
+ tcx: infcx.tcx,
+ universal_regions,
+ region_bound_pairs,
+ implicit_region_bound,
+ param_env,
+ locations,
+ span,
+ category,
+ constraints,
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(super) fn convert_all(&mut self, query_constraints: &QueryRegionConstraints<'tcx>) {
+ let QueryRegionConstraints { outlives, member_constraints } = query_constraints;
+
+ // Annoying: to invoke `self.to_region_vid`, we need access to
+ // `self.constraints`, but we also want to be mutating
+ // `self.member_constraints`. For now, just swap out the value
+ // we want and replace at the end.
+ let mut tmp = std::mem::take(&mut self.constraints.member_constraints);
+ for member_constraint in member_constraints {
+ tmp.push_constraint(member_constraint, |r| self.to_region_vid(r));
+ }
+ self.constraints.member_constraints = tmp;
+
+ for query_constraint in outlives {
+ self.convert(query_constraint);
+ }
+ }
+
+ pub(super) fn convert(&mut self, query_constraint: &QueryOutlivesConstraint<'tcx>) {
+ debug!("generate: constraints at: {:#?}", self.locations);
+
+ // Extract out various useful fields we'll need below.
+ let ConstraintConversion {
+ tcx, region_bound_pairs, implicit_region_bound, param_env, ..
+ } = *self;
+
+ // At the moment, we never generate any "higher-ranked"
+ // region constraints like `for<'a> 'a: 'b`. At some point
+ // when we move to universes, we will, and this assertion
+ // will start to fail.
+ let ty::OutlivesPredicate(k1, r2) = query_constraint.no_bound_vars().unwrap_or_else(|| {
+ bug!("query_constraint {:?} contained bound vars", query_constraint,);
+ });
+
+ match k1.unpack() {
+ GenericArgKind::Lifetime(r1) => {
+ let r1_vid = self.to_region_vid(r1);
+ let r2_vid = self.to_region_vid(r2);
+ self.add_outlives(r1_vid, r2_vid);
+ }
+
+ GenericArgKind::Type(mut t1) => {
+ // we don't actually use this for anything, but
+ // the `TypeOutlives` code needs an origin.
+ let origin = infer::RelateParamBound(DUMMY_SP, t1, None);
+
+ // Placeholder regions need to be converted now because it may
+ // create new region variables, which can't be done later when
+ // verifying these bounds.
+ if t1.has_placeholders() {
+ t1 = tcx.fold_regions(t1, |r, _| match *r {
+ ty::RePlaceholder(placeholder) => {
+ self.constraints.placeholder_region(self.infcx, placeholder)
+ }
+ _ => r,
+ });
+ }
+
+ TypeOutlives::new(
+ &mut *self,
+ tcx,
+ region_bound_pairs,
+ Some(implicit_region_bound),
+ param_env,
+ )
+ .type_must_outlive(origin, t1, r2);
+ }
+
+ GenericArgKind::Const(_) => {
+ // Consts cannot outlive one another, so we
+ // don't need to handle any relations here.
+ }
+ }
+ }
+
+ fn verify_to_type_test(
+ &mut self,
+ generic_kind: GenericKind<'tcx>,
+ region: ty::Region<'tcx>,
+ verify_bound: VerifyBound<'tcx>,
+ ) -> TypeTest<'tcx> {
+ let lower_bound = self.to_region_vid(region);
+
+ TypeTest { generic_kind, lower_bound, locations: self.locations, verify_bound }
+ }
+
+ fn to_region_vid(&mut self, r: ty::Region<'tcx>) -> ty::RegionVid {
+ if let ty::RePlaceholder(placeholder) = *r {
+ self.constraints.placeholder_region(self.infcx, placeholder).to_region_vid()
+ } else {
+ self.universal_regions.to_region_vid(r)
+ }
+ }
+
+ fn add_outlives(&mut self, sup: ty::RegionVid, sub: ty::RegionVid) {
+ self.constraints.outlives_constraints.push(OutlivesConstraint {
+ locations: self.locations,
+ category: self.category,
+ span: self.span,
+ sub,
+ sup,
+ variance_info: ty::VarianceDiagInfo::default(),
+ });
+ }
+
+ fn add_type_test(&mut self, type_test: TypeTest<'tcx>) {
+ debug!("add_type_test(type_test={:?})", type_test);
+ self.constraints.type_tests.push(type_test);
+ }
+}
+
+impl<'a, 'b, 'tcx> TypeOutlivesDelegate<'tcx> for &'a mut ConstraintConversion<'b, 'tcx> {
+ fn push_sub_region_constraint(
+ &mut self,
+ _origin: SubregionOrigin<'tcx>,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) {
+ let b = self.to_region_vid(b);
+ let a = self.to_region_vid(a);
+ self.add_outlives(b, a);
+ }
+
+ fn push_verify(
+ &mut self,
+ _origin: SubregionOrigin<'tcx>,
+ kind: GenericKind<'tcx>,
+ a: ty::Region<'tcx>,
+ bound: VerifyBound<'tcx>,
+ ) {
+ let type_test = self.verify_to_type_test(kind, a, bound);
+ self.add_type_test(type_test);
+ }
+}
diff --git a/compiler/rustc_borrowck/src/type_check/free_region_relations.rs b/compiler/rustc_borrowck/src/type_check/free_region_relations.rs
new file mode 100644
index 000000000..cc0318ede
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/free_region_relations.rs
@@ -0,0 +1,374 @@
+use rustc_data_structures::frozen::Frozen;
+use rustc_data_structures::transitive_relation::TransitiveRelation;
+use rustc_infer::infer::canonical::QueryRegionConstraints;
+use rustc_infer::infer::outlives;
+use rustc_infer::infer::outlives::env::RegionBoundPairs;
+use rustc_infer::infer::region_constraints::GenericKind;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::traits::query::OutlivesBound;
+use rustc_middle::ty::{self, RegionVid, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits::query::type_op::{self, TypeOp};
+use std::rc::Rc;
+use type_op::TypeOpOutput;
+
+use crate::{
+ type_check::constraint_conversion,
+ type_check::{Locations, MirTypeckRegionConstraints},
+ universal_regions::UniversalRegions,
+};
+
+#[derive(Debug)]
+pub(crate) struct UniversalRegionRelations<'tcx> {
+ universal_regions: Rc<UniversalRegions<'tcx>>,
+
+ /// Stores the outlives relations that are known to hold from the
+ /// implied bounds, in-scope where-clauses, and that sort of
+ /// thing.
+ outlives: TransitiveRelation<RegionVid>,
+
+ /// This is the `<=` relation; that is, if `a: b`, then `b <= a`,
+ /// and we store that here. This is useful when figuring out how
+ /// to express some local region in terms of external regions our
+ /// caller will understand.
+ inverse_outlives: TransitiveRelation<RegionVid>,
+}
+
+/// As part of computing the free region relations, we also have to
+/// normalize the input-output types, which we then need later. So we
+/// return those. This vector consists of first the input types and
+/// then the output type as the last element.
+type NormalizedInputsAndOutput<'tcx> = Vec<Ty<'tcx>>;
+
+pub(crate) struct CreateResult<'tcx> {
+ pub(crate) universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
+ pub(crate) region_bound_pairs: RegionBoundPairs<'tcx>,
+ pub(crate) normalized_inputs_and_output: NormalizedInputsAndOutput<'tcx>,
+}
+
+pub(crate) fn create<'tcx>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ implicit_region_bound: ty::Region<'tcx>,
+ universal_regions: &Rc<UniversalRegions<'tcx>>,
+ constraints: &mut MirTypeckRegionConstraints<'tcx>,
+) -> CreateResult<'tcx> {
+ UniversalRegionRelationsBuilder {
+ infcx,
+ param_env,
+ implicit_region_bound,
+ constraints,
+ universal_regions: universal_regions.clone(),
+ region_bound_pairs: Default::default(),
+ relations: UniversalRegionRelations {
+ universal_regions: universal_regions.clone(),
+ outlives: Default::default(),
+ inverse_outlives: Default::default(),
+ },
+ }
+ .create()
+}
+
+impl UniversalRegionRelations<'_> {
+ /// Records in the `outlives_relation` (and
+ /// `inverse_outlives_relation`) that `fr_a: fr_b`. Invoked by the
+ /// builder below.
+ fn relate_universal_regions(&mut self, fr_a: RegionVid, fr_b: RegionVid) {
+ debug!("relate_universal_regions: fr_a={:?} outlives fr_b={:?}", fr_a, fr_b);
+ self.outlives.add(fr_a, fr_b);
+ self.inverse_outlives.add(fr_b, fr_a);
+ }
+
+ /// Given two universal regions, returns the postdominating
+ /// upper-bound (effectively the least upper bound).
+ ///
+ /// (See `TransitiveRelation::postdom_upper_bound` for details on
+ /// the postdominating upper bound in general.)
+ pub(crate) fn postdom_upper_bound(&self, fr1: RegionVid, fr2: RegionVid) -> RegionVid {
+ assert!(self.universal_regions.is_universal_region(fr1));
+ assert!(self.universal_regions.is_universal_region(fr2));
+ self.inverse_outlives
+ .postdom_upper_bound(fr1, fr2)
+ .unwrap_or(self.universal_regions.fr_static)
+ }
+
+ /// Finds an "upper bound" for `fr` that is not local. In other
+ /// words, returns the smallest (*) known region `fr1` that (a)
+ /// outlives `fr` and (b) is not local.
+ ///
+ /// (*) If there are multiple competing choices, we return all of them.
+ pub(crate) fn non_local_upper_bounds<'a>(&'a self, fr: RegionVid) -> Vec<RegionVid> {
+ debug!("non_local_upper_bound(fr={:?})", fr);
+ let res = self.non_local_bounds(&self.inverse_outlives, fr);
+ assert!(!res.is_empty(), "can't find an upper bound!?");
+ res
+ }
+
+ /// Returns the "postdominating" bound of the set of
+ /// `non_local_upper_bounds` for the given region.
+ pub(crate) fn non_local_upper_bound(&self, fr: RegionVid) -> RegionVid {
+ let upper_bounds = self.non_local_upper_bounds(fr);
+
+ // In case we find more than one, reduce to one for
+ // convenience. This is to prevent us from generating more
+ // complex constraints, but it will cause spurious errors.
+ let post_dom = self.inverse_outlives.mutual_immediate_postdominator(upper_bounds);
+
+ debug!("non_local_bound: post_dom={:?}", post_dom);
+
+ post_dom
+ .and_then(|post_dom| {
+ // If the mutual immediate postdom is not local, then
+ // there is no non-local result we can return.
+ if !self.universal_regions.is_local_free_region(post_dom) {
+ Some(post_dom)
+ } else {
+ None
+ }
+ })
+ .unwrap_or(self.universal_regions.fr_static)
+ }
+
+ /// Finds a "lower bound" for `fr` that is not local. In other
+ /// words, returns the largest (*) known region `fr1` that (a) is
+ /// outlived by `fr` and (b) is not local.
+ ///
+ /// (*) If there are multiple competing choices, we pick the "postdominating"
+ /// one. See `TransitiveRelation::postdom_upper_bound` for details.
+ pub(crate) fn non_local_lower_bound(&self, fr: RegionVid) -> Option<RegionVid> {
+ debug!("non_local_lower_bound(fr={:?})", fr);
+ let lower_bounds = self.non_local_bounds(&self.outlives, fr);
+
+ // In case we find more than one, reduce to one for
+ // convenience. This is to prevent us from generating more
+ // complex constraints, but it will cause spurious errors.
+ let post_dom = self.outlives.mutual_immediate_postdominator(lower_bounds);
+
+ debug!("non_local_bound: post_dom={:?}", post_dom);
+
+ post_dom.and_then(|post_dom| {
+ // If the mutual immediate postdom is not local, then
+ // there is no non-local result we can return.
+ if !self.universal_regions.is_local_free_region(post_dom) {
+ Some(post_dom)
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Helper for `non_local_upper_bounds` and `non_local_lower_bounds`.
+ /// Repeatedly invokes `postdom_parent` until we find something that is not
+ /// local. Returns `None` if we never do so.
+ fn non_local_bounds<'a>(
+ &self,
+ relation: &'a TransitiveRelation<RegionVid>,
+ fr0: RegionVid,
+ ) -> Vec<RegionVid> {
+ // This method assumes that `fr0` is one of the universally
+ // quantified region variables.
+ assert!(self.universal_regions.is_universal_region(fr0));
+
+ let mut external_parents = vec![];
+ let mut queue = vec![fr0];
+
+ // Keep expanding `fr` into its parents until we reach
+ // non-local regions.
+ while let Some(fr) = queue.pop() {
+ if !self.universal_regions.is_local_free_region(fr) {
+ external_parents.push(fr);
+ continue;
+ }
+
+ queue.extend(relation.parents(fr));
+ }
+
+ debug!("non_local_bound: external_parents={:?}", external_parents);
+
+ external_parents
+ }
+
+ /// Returns `true` if fr1 is known to outlive fr2.
+ ///
+ /// This will only ever be true for universally quantified regions.
+ pub(crate) fn outlives(&self, fr1: RegionVid, fr2: RegionVid) -> bool {
+ self.outlives.contains(fr1, fr2)
+ }
+
+ /// Returns a vector of free regions `x` such that `fr1: x` is
+ /// known to hold.
+ pub(crate) fn regions_outlived_by(&self, fr1: RegionVid) -> Vec<RegionVid> {
+ self.outlives.reachable_from(fr1)
+ }
+
+ /// Returns the _non-transitive_ set of known `outlives` constraints between free regions.
+ pub(crate) fn known_outlives(&self) -> impl Iterator<Item = (RegionVid, RegionVid)> + '_ {
+ self.outlives.base_edges()
+ }
+}
+
+struct UniversalRegionRelationsBuilder<'this, 'tcx> {
+ infcx: &'this InferCtxt<'this, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ universal_regions: Rc<UniversalRegions<'tcx>>,
+ implicit_region_bound: ty::Region<'tcx>,
+ constraints: &'this mut MirTypeckRegionConstraints<'tcx>,
+
+ // outputs:
+ relations: UniversalRegionRelations<'tcx>,
+ region_bound_pairs: RegionBoundPairs<'tcx>,
+}
+
+impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> {
+ pub(crate) fn create(mut self) -> CreateResult<'tcx> {
+ let unnormalized_input_output_tys = self
+ .universal_regions
+ .unnormalized_input_tys
+ .iter()
+ .cloned()
+ .chain(Some(self.universal_regions.unnormalized_output_ty));
+
+ // For each of the input/output types:
+ // - Normalize the type. This will create some region
+ // constraints, which we buffer up because we are
+ // not ready to process them yet.
+ // - Then compute the implied bounds. This will adjust
+ // the `region_bound_pairs` and so forth.
+ // - After this is done, we'll process the constraints, once
+ // the `relations` is built.
+ let mut normalized_inputs_and_output =
+ Vec::with_capacity(self.universal_regions.unnormalized_input_tys.len() + 1);
+ let constraint_sets: Vec<_> = unnormalized_input_output_tys
+ .flat_map(|ty| {
+ debug!("build: input_or_output={:?}", ty);
+ // We only add implied bounds for the normalized type as the unnormalized
+ // type may not actually get checked by the caller.
+ //
+ // Can otherwise be unsound, see #91068.
+ let TypeOpOutput { output: norm_ty, constraints: constraints1, .. } = self
+ .param_env
+ .and(type_op::normalize::Normalize::new(ty))
+ .fully_perform(self.infcx)
+ .unwrap_or_else(|_| {
+ self.infcx
+ .tcx
+ .sess
+ .delay_span_bug(DUMMY_SP, &format!("failed to normalize {:?}", ty));
+ TypeOpOutput {
+ output: self.infcx.tcx.ty_error(),
+ constraints: None,
+ error_info: None,
+ }
+ });
+ // Note: we need this in examples like
+ // ```
+ // trait Foo {
+ // type Bar;
+ // fn foo(&self) -> &Self::Bar;
+ // }
+ // impl Foo for () {
+ // type Bar = ();
+ // fn foo(&self) ->&() {}
+ // }
+ // ```
+ // Both &Self::Bar and &() are WF
+ let constraints_implied = self.add_implied_bounds(norm_ty);
+ normalized_inputs_and_output.push(norm_ty);
+ constraints1.into_iter().chain(constraints_implied)
+ })
+ .collect();
+
+ // Insert the facts we know from the predicates. Why? Why not.
+ let param_env = self.param_env;
+ self.add_outlives_bounds(outlives::explicit_outlives_bounds(param_env));
+
+ // Finally:
+ // - outlives is reflexive, so `'r: 'r` for every region `'r`
+ // - `'static: 'r` for every region `'r`
+ // - `'r: 'fn_body` for every (other) universally quantified
+ // region `'r`, all of which are provided by our caller
+ let fr_static = self.universal_regions.fr_static;
+ let fr_fn_body = self.universal_regions.fr_fn_body;
+ for fr in self.universal_regions.universal_regions() {
+ debug!("build: relating free region {:?} to itself and to 'static", fr);
+ self.relations.relate_universal_regions(fr, fr);
+ self.relations.relate_universal_regions(fr_static, fr);
+ self.relations.relate_universal_regions(fr, fr_fn_body);
+ }
+
+ for data in &constraint_sets {
+ constraint_conversion::ConstraintConversion::new(
+ self.infcx,
+ &self.universal_regions,
+ &self.region_bound_pairs,
+ self.implicit_region_bound,
+ self.param_env,
+ Locations::All(DUMMY_SP),
+ DUMMY_SP,
+ ConstraintCategory::Internal,
+ &mut self.constraints,
+ )
+ .convert_all(data);
+ }
+
+ CreateResult {
+ universal_region_relations: Frozen::freeze(self.relations),
+ region_bound_pairs: self.region_bound_pairs,
+ normalized_inputs_and_output,
+ }
+ }
+
+ /// Update the type of a single local, which should represent
+ /// either the return type of the MIR or one of its arguments. At
+ /// the same time, compute and add any implied bounds that come
+ /// from this local.
+ #[instrument(level = "debug", skip(self))]
+ fn add_implied_bounds(&mut self, ty: Ty<'tcx>) -> Option<&'tcx QueryRegionConstraints<'tcx>> {
+ let TypeOpOutput { output: bounds, constraints, .. } = self
+ .param_env
+ .and(type_op::implied_outlives_bounds::ImpliedOutlivesBounds { ty })
+ .fully_perform(self.infcx)
+ .unwrap_or_else(|_| bug!("failed to compute implied bounds {:?}", ty));
+ self.add_outlives_bounds(bounds);
+ constraints
+ }
+
+ /// Registers the `OutlivesBound` items from `outlives_bounds` in
+ /// the outlives relation as well as the region-bound pairs
+ /// listing.
+ fn add_outlives_bounds<I>(&mut self, outlives_bounds: I)
+ where
+ I: IntoIterator<Item = OutlivesBound<'tcx>>,
+ {
+ for outlives_bound in outlives_bounds {
+ debug!("add_outlives_bounds(bound={:?})", outlives_bound);
+
+ match outlives_bound {
+ OutlivesBound::RegionSubRegion(r1, r2) => {
+ // `where Type:` is lowered to `where Type: 'empty` so that
+ // we check `Type` is well formed, but there's no use for
+ // this bound here.
+ if r1.is_empty() {
+ return;
+ }
+
+ // The bound says that `r1 <= r2`; we store `r2: r1`.
+ let r1 = self.universal_regions.to_region_vid(r1);
+ let r2 = self.universal_regions.to_region_vid(r2);
+ self.relations.relate_universal_regions(r2, r1);
+ }
+
+ OutlivesBound::RegionSubParam(r_a, param_b) => {
+ self.region_bound_pairs
+ .insert(ty::OutlivesPredicate(GenericKind::Param(param_b), r_a));
+ }
+
+ OutlivesBound::RegionSubProjection(r_a, projection_b) => {
+ self.region_bound_pairs
+ .insert(ty::OutlivesPredicate(GenericKind::Projection(projection_b), r_a));
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/type_check/input_output.rs b/compiler/rustc_borrowck/src/type_check/input_output.rs
new file mode 100644
index 000000000..4431a2e8e
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/input_output.rs
@@ -0,0 +1,245 @@
+//! This module contains code to equate the input/output types appearing
+//! in the MIR with the expected input/output types from the function
+//! signature. This requires a bit of processing, as the expected types
+//! are supplied to us before normalization and may contain opaque
+//! `impl Trait` instances. In contrast, the input/output types found in
+//! the MIR (specifically, in the special local variables for the
+//! `RETURN_PLACE` the MIR arguments) are always fully normalized (and
+//! contain revealed `impl Trait` values).
+
+use crate::type_check::constraint_conversion::ConstraintConversion;
+use rustc_index::vec::Idx;
+use rustc_infer::infer::LateBoundRegionConversionTime;
+use rustc_middle::mir::*;
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits::query::type_op::{self, TypeOp};
+use rustc_trait_selection::traits::query::Fallible;
+use type_op::TypeOpOutput;
+
+use crate::universal_regions::UniversalRegions;
+
+use super::{Locations, TypeChecker};
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ #[instrument(skip(self, body, universal_regions), level = "debug")]
+ pub(super) fn equate_inputs_and_outputs(
+ &mut self,
+ body: &Body<'tcx>,
+ universal_regions: &UniversalRegions<'tcx>,
+ normalized_inputs_and_output: &[Ty<'tcx>],
+ ) {
+ let (&normalized_output_ty, normalized_input_tys) =
+ normalized_inputs_and_output.split_last().unwrap();
+
+ debug!(?normalized_output_ty);
+ debug!(?normalized_input_tys);
+
+ let mir_def_id = body.source.def_id().expect_local();
+
+ // If the user explicitly annotated the input types, extract
+ // those.
+ //
+ // e.g., `|x: FxHashMap<_, &'static u32>| ...`
+ let user_provided_sig;
+ if !self.tcx().is_closure(mir_def_id.to_def_id()) {
+ user_provided_sig = None;
+ } else {
+ let typeck_results = self.tcx().typeck(mir_def_id);
+ user_provided_sig = typeck_results.user_provided_sigs.get(&mir_def_id.to_def_id()).map(
+ |user_provided_poly_sig| {
+ // Instantiate the canonicalized variables from
+ // user-provided signature (e.g., the `_` in the code
+ // above) with fresh variables.
+ let poly_sig = self.instantiate_canonical_with_fresh_inference_vars(
+ body.span,
+ &user_provided_poly_sig,
+ );
+
+ // Replace the bound items in the fn sig with fresh
+ // variables, so that they represent the view from
+ // "inside" the closure.
+ self.infcx.replace_bound_vars_with_fresh_vars(
+ body.span,
+ LateBoundRegionConversionTime::FnCall,
+ poly_sig,
+ )
+ },
+ );
+ }
+
+ debug!(?normalized_input_tys, ?body.local_decls);
+
+ // Equate expected input tys with those in the MIR.
+ for (argument_index, &normalized_input_ty) in normalized_input_tys.iter().enumerate() {
+ if argument_index + 1 >= body.local_decls.len() {
+ self.tcx()
+ .sess
+ .delay_span_bug(body.span, "found more normalized_input_ty than local_decls");
+ break;
+ }
+
+ // In MIR, argument N is stored in local N+1.
+ let local = Local::new(argument_index + 1);
+
+ let mir_input_ty = body.local_decls[local].ty;
+
+ let mir_input_span = body.local_decls[local].source_info.span;
+ self.equate_normalized_input_or_output(
+ normalized_input_ty,
+ mir_input_ty,
+ mir_input_span,
+ );
+ }
+
+ if let Some(user_provided_sig) = user_provided_sig {
+ for (argument_index, &user_provided_input_ty) in
+ user_provided_sig.inputs().iter().enumerate()
+ {
+ // In MIR, closures begin an implicit `self`, so
+ // argument N is stored in local N+2.
+ let local = Local::new(argument_index + 2);
+ let mir_input_ty = body.local_decls[local].ty;
+ let mir_input_span = body.local_decls[local].source_info.span;
+
+ // If the user explicitly annotated the input types, enforce those.
+ let user_provided_input_ty =
+ self.normalize(user_provided_input_ty, Locations::All(mir_input_span));
+
+ self.equate_normalized_input_or_output(
+ user_provided_input_ty,
+ mir_input_ty,
+ mir_input_span,
+ );
+ }
+ }
+
+ debug!(
+ "equate_inputs_and_outputs: body.yield_ty {:?}, universal_regions.yield_ty {:?}",
+ body.yield_ty(),
+ universal_regions.yield_ty
+ );
+
+ // We will not have a universal_regions.yield_ty if we yield (by accident)
+ // outside of a generator and return an `impl Trait`, so emit a delay_span_bug
+ // because we don't want to panic in an assert here if we've already got errors.
+ if body.yield_ty().is_some() != universal_regions.yield_ty.is_some() {
+ self.tcx().sess.delay_span_bug(
+ body.span,
+ &format!(
+ "Expected body to have yield_ty ({:?}) iff we have a UR yield_ty ({:?})",
+ body.yield_ty(),
+ universal_regions.yield_ty,
+ ),
+ );
+ }
+
+ if let (Some(mir_yield_ty), Some(ur_yield_ty)) =
+ (body.yield_ty(), universal_regions.yield_ty)
+ {
+ let yield_span = body.local_decls[RETURN_PLACE].source_info.span;
+ self.equate_normalized_input_or_output(ur_yield_ty, mir_yield_ty, yield_span);
+ }
+
+ // Return types are a bit more complex. They may contain opaque `impl Trait` types.
+ let mir_output_ty = body.local_decls[RETURN_PLACE].ty;
+ let output_span = body.local_decls[RETURN_PLACE].source_info.span;
+ if let Err(terr) = self.eq_types(
+ normalized_output_ty,
+ mir_output_ty,
+ Locations::All(output_span),
+ ConstraintCategory::BoringNoLocation,
+ ) {
+ span_mirbug!(
+ self,
+ Location::START,
+ "equate_inputs_and_outputs: `{:?}=={:?}` failed with `{:?}`",
+ normalized_output_ty,
+ mir_output_ty,
+ terr
+ );
+ };
+
+ // If the user explicitly annotated the output types, enforce those.
+ // Note that this only happens for closures.
+ if let Some(user_provided_sig) = user_provided_sig {
+ let user_provided_output_ty = user_provided_sig.output();
+ let user_provided_output_ty =
+ self.normalize(user_provided_output_ty, Locations::All(output_span));
+ if let Err(err) = self.eq_types(
+ user_provided_output_ty,
+ mir_output_ty,
+ Locations::All(output_span),
+ ConstraintCategory::BoringNoLocation,
+ ) {
+ span_mirbug!(
+ self,
+ Location::START,
+ "equate_inputs_and_outputs: `{:?}=={:?}` failed with `{:?}`",
+ mir_output_ty,
+ user_provided_output_ty,
+ err
+ );
+ }
+ }
+ }
+
+ #[instrument(skip(self, span), level = "debug")]
+ fn equate_normalized_input_or_output(&mut self, a: Ty<'tcx>, b: Ty<'tcx>, span: Span) {
+ if let Err(_) =
+ self.eq_types(a, b, Locations::All(span), ConstraintCategory::BoringNoLocation)
+ {
+ // FIXME(jackh726): This is a hack. It's somewhat like
+ // `rustc_traits::normalize_after_erasing_regions`. Ideally, we'd
+ // like to normalize *before* inserting into `local_decls`, but
+ // doing so ends up causing some other trouble.
+ let b = match self.normalize_and_add_constraints(b) {
+ Ok(n) => n,
+ Err(_) => {
+ debug!("equate_inputs_and_outputs: NoSolution");
+ b
+ }
+ };
+
+ // Note: if we have to introduce new placeholders during normalization above, then we won't have
+ // added those universes to the universe info, which we would want in `relate_tys`.
+ if let Err(terr) =
+ self.eq_types(a, b, Locations::All(span), ConstraintCategory::BoringNoLocation)
+ {
+ span_mirbug!(
+ self,
+ Location::START,
+ "equate_normalized_input_or_output: `{:?}=={:?}` failed with `{:?}`",
+ a,
+ b,
+ terr
+ );
+ }
+ }
+ }
+
+ pub(crate) fn normalize_and_add_constraints(&mut self, t: Ty<'tcx>) -> Fallible<Ty<'tcx>> {
+ let TypeOpOutput { output: norm_ty, constraints, .. } =
+ self.param_env.and(type_op::normalize::Normalize::new(t)).fully_perform(self.infcx)?;
+
+ debug!("{:?} normalized to {:?}", t, norm_ty);
+
+ for data in constraints {
+ ConstraintConversion::new(
+ self.infcx,
+ &self.borrowck_context.universal_regions,
+ &self.region_bound_pairs,
+ self.implicit_region_bound,
+ self.param_env,
+ Locations::All(DUMMY_SP),
+ DUMMY_SP,
+ ConstraintCategory::Internal,
+ &mut self.borrowck_context.constraints,
+ )
+ .convert_all(&*data);
+ }
+
+ Ok(norm_ty)
+ }
+}
diff --git a/compiler/rustc_borrowck/src/type_check/liveness/local_use_map.rs b/compiler/rustc_borrowck/src/type_check/liveness/local_use_map.rs
new file mode 100644
index 000000000..fda2cee43
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/liveness/local_use_map.rs
@@ -0,0 +1,170 @@
+use rustc_data_structures::vec_linked_list as vll;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location};
+
+use crate::def_use::{self, DefUse};
+use crate::region_infer::values::{PointIndex, RegionValueElements};
+
+/// A map that cross references each local with the locations where it
+/// is defined (assigned), used, or dropped. Used during liveness
+/// computation.
+///
+/// We keep track only of `Local`s we'll do the liveness analysis later,
+/// this means that our internal `IndexVec`s will only be sparsely populated.
+/// In the time-memory trade-off between keeping compact vectors with new
+/// indexes (and needing to continuously map the `Local` index to its compact
+/// counterpart) and having `IndexVec`s that we only use a fraction of, time
+/// (and code simplicity) was favored. The rationale is that we only keep
+/// a small number of `IndexVec`s throughout the entire analysis while, in
+/// contrast, we're accessing each `Local` *many* times.
+pub(crate) struct LocalUseMap {
+ /// Head of a linked list of **definitions** of each variable --
+ /// definition in this context means assignment, e.g., `x` is
+ /// defined in `x = y` but not `y`; that first def is the head of
+ /// a linked list that lets you enumerate all places the variable
+ /// is assigned.
+ first_def_at: IndexVec<Local, Option<AppearanceIndex>>,
+
+ /// Head of a linked list of **uses** of each variable -- use in
+ /// this context means that the existing value of the variable is
+ /// read or modified. e.g., `y` is used in `x = y` but not `x`.
+ /// Note that `DROP(x)` terminators are excluded from this list.
+ first_use_at: IndexVec<Local, Option<AppearanceIndex>>,
+
+ /// Head of a linked list of **drops** of each variable -- these
+ /// are a special category of uses corresponding to the drop that
+ /// we add for each local variable.
+ first_drop_at: IndexVec<Local, Option<AppearanceIndex>>,
+
+ appearances: IndexVec<AppearanceIndex, Appearance>,
+}
+
+struct Appearance {
+ point_index: PointIndex,
+ next: Option<AppearanceIndex>,
+}
+
+rustc_index::newtype_index! {
+ pub struct AppearanceIndex { .. }
+}
+
+impl vll::LinkElem for Appearance {
+ type LinkIndex = AppearanceIndex;
+
+ fn next(elem: &Self) -> Option<AppearanceIndex> {
+ elem.next
+ }
+}
+
+impl LocalUseMap {
+ pub(crate) fn build(
+ live_locals: &[Local],
+ elements: &RegionValueElements,
+ body: &Body<'_>,
+ ) -> Self {
+ let nones = IndexVec::from_elem_n(None, body.local_decls.len());
+ let mut local_use_map = LocalUseMap {
+ first_def_at: nones.clone(),
+ first_use_at: nones.clone(),
+ first_drop_at: nones,
+ appearances: IndexVec::new(),
+ };
+
+ if live_locals.is_empty() {
+ return local_use_map;
+ }
+
+ let mut locals_with_use_data: IndexVec<Local, bool> =
+ IndexVec::from_elem_n(false, body.local_decls.len());
+ live_locals.iter().for_each(|&local| locals_with_use_data[local] = true);
+
+ LocalUseMapBuild { local_use_map: &mut local_use_map, elements, locals_with_use_data }
+ .visit_body(&body);
+
+ local_use_map
+ }
+
+ pub(crate) fn defs(&self, local: Local) -> impl Iterator<Item = PointIndex> + '_ {
+ vll::iter(self.first_def_at[local], &self.appearances)
+ .map(move |aa| self.appearances[aa].point_index)
+ }
+
+ pub(crate) fn uses(&self, local: Local) -> impl Iterator<Item = PointIndex> + '_ {
+ vll::iter(self.first_use_at[local], &self.appearances)
+ .map(move |aa| self.appearances[aa].point_index)
+ }
+
+ pub(crate) fn drops(&self, local: Local) -> impl Iterator<Item = PointIndex> + '_ {
+ vll::iter(self.first_drop_at[local], &self.appearances)
+ .map(move |aa| self.appearances[aa].point_index)
+ }
+}
+
+struct LocalUseMapBuild<'me> {
+ local_use_map: &'me mut LocalUseMap,
+ elements: &'me RegionValueElements,
+
+ // Vector used in `visit_local` to signal which `Local`s do we need
+ // def/use/drop information on, constructed from `live_locals` (that
+ // contains the variables we'll do the liveness analysis for).
+ // This vector serves optimization purposes only: we could have
+ // obtained the same information from `live_locals` but we want to
+ // avoid repeatedly calling `Vec::contains()` (see `LocalUseMap` for
+ // the rationale on the time-memory trade-off we're favoring here).
+ locals_with_use_data: IndexVec<Local, bool>,
+}
+
+impl LocalUseMapBuild<'_> {
+ fn insert_def(&mut self, local: Local, location: Location) {
+ Self::insert(
+ self.elements,
+ &mut self.local_use_map.first_def_at[local],
+ &mut self.local_use_map.appearances,
+ location,
+ );
+ }
+
+ fn insert_use(&mut self, local: Local, location: Location) {
+ Self::insert(
+ self.elements,
+ &mut self.local_use_map.first_use_at[local],
+ &mut self.local_use_map.appearances,
+ location,
+ );
+ }
+
+ fn insert_drop(&mut self, local: Local, location: Location) {
+ Self::insert(
+ self.elements,
+ &mut self.local_use_map.first_drop_at[local],
+ &mut self.local_use_map.appearances,
+ location,
+ );
+ }
+
+ fn insert(
+ elements: &RegionValueElements,
+ first_appearance: &mut Option<AppearanceIndex>,
+ appearances: &mut IndexVec<AppearanceIndex, Appearance>,
+ location: Location,
+ ) {
+ let point_index = elements.point_from_location(location);
+ let appearance_index =
+ appearances.push(Appearance { point_index, next: *first_appearance });
+ *first_appearance = Some(appearance_index);
+ }
+}
+
+impl Visitor<'_> for LocalUseMapBuild<'_> {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
+ if self.locals_with_use_data[local] {
+ match def_use::categorize(context) {
+ Some(DefUse::Def) => self.insert_def(local, location),
+ Some(DefUse::Use) => self.insert_use(local, location),
+ Some(DefUse::Drop) => self.insert_drop(local, location),
+ _ => (),
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/type_check/liveness/mod.rs b/compiler/rustc_borrowck/src/type_check/liveness/mod.rs
new file mode 100644
index 000000000..d5c401ae1
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/liveness/mod.rs
@@ -0,0 +1,139 @@
+use itertools::{Either, Itertools};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::mir::{Body, Local};
+use rustc_middle::ty::{RegionVid, TyCtxt};
+use rustc_mir_dataflow::impls::MaybeInitializedPlaces;
+use rustc_mir_dataflow::move_paths::MoveData;
+use rustc_mir_dataflow::ResultsCursor;
+use std::rc::Rc;
+
+use crate::{
+ constraints::OutlivesConstraintSet,
+ facts::{AllFacts, AllFactsExt},
+ location::LocationTable,
+ nll::ToRegionVid,
+ region_infer::values::RegionValueElements,
+ universal_regions::UniversalRegions,
+};
+
+use super::TypeChecker;
+
+mod local_use_map;
+mod polonius;
+mod trace;
+
+/// Combines liveness analysis with initialization analysis to
+/// determine which variables are live at which points, both due to
+/// ordinary uses and drops. Returns a set of (ty, location) pairs
+/// that indicate which types must be live at which point in the CFG.
+/// This vector is consumed by `constraint_generation`.
+///
+/// N.B., this computation requires normalization; therefore, it must be
+/// performed before
+pub(super) fn generate<'mir, 'tcx>(
+ typeck: &mut TypeChecker<'_, 'tcx>,
+ body: &Body<'tcx>,
+ elements: &Rc<RegionValueElements>,
+ flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+ move_data: &MoveData<'tcx>,
+ location_table: &LocationTable,
+ use_polonius: bool,
+) {
+ debug!("liveness::generate");
+
+ let free_regions = regions_that_outlive_free_regions(
+ typeck.infcx.num_region_vars(),
+ &typeck.borrowck_context.universal_regions,
+ &typeck.borrowck_context.constraints.outlives_constraints,
+ );
+ let (relevant_live_locals, boring_locals) =
+ compute_relevant_live_locals(typeck.tcx(), &free_regions, &body);
+ let facts_enabled = use_polonius || AllFacts::enabled(typeck.tcx());
+
+ let polonius_drop_used = if facts_enabled {
+ let mut drop_used = Vec::new();
+ polonius::populate_access_facts(typeck, body, location_table, move_data, &mut drop_used);
+ Some(drop_used)
+ } else {
+ None
+ };
+
+ trace::trace(
+ typeck,
+ body,
+ elements,
+ flow_inits,
+ move_data,
+ relevant_live_locals,
+ boring_locals,
+ polonius_drop_used,
+ );
+}
+
+// The purpose of `compute_relevant_live_locals` is to define the subset of `Local`
+// variables for which we need to do a liveness computation. We only need
+// to compute whether a variable `X` is live if that variable contains
+// some region `R` in its type where `R` is not known to outlive a free
+// region (i.e., where `R` may be valid for just a subset of the fn body).
+fn compute_relevant_live_locals<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ free_regions: &FxHashSet<RegionVid>,
+ body: &Body<'tcx>,
+) -> (Vec<Local>, Vec<Local>) {
+ let (boring_locals, relevant_live_locals): (Vec<_>, Vec<_>) =
+ body.local_decls.iter_enumerated().partition_map(|(local, local_decl)| {
+ if tcx.all_free_regions_meet(&local_decl.ty, |r| {
+ free_regions.contains(&r.to_region_vid())
+ }) {
+ Either::Left(local)
+ } else {
+ Either::Right(local)
+ }
+ });
+
+ debug!("{} total variables", body.local_decls.len());
+ debug!("{} variables need liveness", relevant_live_locals.len());
+ debug!("{} regions outlive free regions", free_regions.len());
+
+ (relevant_live_locals, boring_locals)
+}
+
+/// Computes all regions that are (currently) known to outlive free
+/// regions. For these regions, we do not need to compute
+/// liveness, since the outlives constraints will ensure that they
+/// are live over the whole fn body anyhow.
+fn regions_that_outlive_free_regions<'tcx>(
+ num_region_vars: usize,
+ universal_regions: &UniversalRegions<'tcx>,
+ constraint_set: &OutlivesConstraintSet<'tcx>,
+) -> FxHashSet<RegionVid> {
+ // Build a graph of the outlives constraints thus far. This is
+ // a reverse graph, so for each constraint `R1: R2` we have an
+ // edge `R2 -> R1`. Therefore, if we find all regions
+ // reachable from each free region, we will have all the
+ // regions that are forced to outlive some free region.
+ let rev_constraint_graph = constraint_set.reverse_graph(num_region_vars);
+ let fr_static = universal_regions.fr_static;
+ let rev_region_graph = rev_constraint_graph.region_graph(constraint_set, fr_static);
+
+ // Stack for the depth-first search. Start out with all the free regions.
+ let mut stack: Vec<_> = universal_regions.universal_regions().collect();
+
+ // Set of all free regions, plus anything that outlives them. Initially
+ // just contains the free regions.
+ let mut outlives_free_region: FxHashSet<_> = stack.iter().cloned().collect();
+
+ // Do the DFS -- for each thing in the stack, find all things
+ // that outlive it and add them to the set. If they are not,
+ // push them onto the stack for later.
+ while let Some(sub_region) = stack.pop() {
+ stack.extend(
+ rev_region_graph
+ .outgoing_regions(sub_region)
+ .filter(|&r| outlives_free_region.insert(r)),
+ );
+ }
+
+ // Return the final set of things we visited.
+ outlives_free_region
+}
diff --git a/compiler/rustc_borrowck/src/type_check/liveness/polonius.rs b/compiler/rustc_borrowck/src/type_check/liveness/polonius.rs
new file mode 100644
index 000000000..bc76a465e
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/liveness/polonius.rs
@@ -0,0 +1,140 @@
+use crate::def_use::{self, DefUse};
+use crate::location::{LocationIndex, LocationTable};
+use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{Body, Local, Location, Place};
+use rustc_middle::ty::subst::GenericArg;
+use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
+
+use super::TypeChecker;
+
+type VarPointRelation = Vec<(Local, LocationIndex)>;
+type PathPointRelation = Vec<(MovePathIndex, LocationIndex)>;
+
+struct UseFactsExtractor<'me, 'tcx> {
+ var_defined_at: &'me mut VarPointRelation,
+ var_used_at: &'me mut VarPointRelation,
+ location_table: &'me LocationTable,
+ var_dropped_at: &'me mut VarPointRelation,
+ move_data: &'me MoveData<'tcx>,
+ path_accessed_at_base: &'me mut PathPointRelation,
+}
+
+// A Visitor to walk through the MIR and extract point-wise facts
+impl UseFactsExtractor<'_, '_> {
+ fn location_to_index(&self, location: Location) -> LocationIndex {
+ self.location_table.mid_index(location)
+ }
+
+ fn insert_def(&mut self, local: Local, location: Location) {
+ debug!("UseFactsExtractor::insert_def()");
+ self.var_defined_at.push((local, self.location_to_index(location)));
+ }
+
+ fn insert_use(&mut self, local: Local, location: Location) {
+ debug!("UseFactsExtractor::insert_use()");
+ self.var_used_at.push((local, self.location_to_index(location)));
+ }
+
+ fn insert_drop_use(&mut self, local: Local, location: Location) {
+ debug!("UseFactsExtractor::insert_drop_use()");
+ self.var_dropped_at.push((local, self.location_to_index(location)));
+ }
+
+ fn insert_path_access(&mut self, path: MovePathIndex, location: Location) {
+ debug!("UseFactsExtractor::insert_path_access({:?}, {:?})", path, location);
+ self.path_accessed_at_base.push((path, self.location_to_index(location)));
+ }
+
+ fn place_to_mpi(&self, place: &Place<'_>) -> Option<MovePathIndex> {
+ match self.move_data.rev_lookup.find(place.as_ref()) {
+ LookupResult::Exact(mpi) => Some(mpi),
+ LookupResult::Parent(mmpi) => mmpi,
+ }
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for UseFactsExtractor<'a, 'tcx> {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
+ match def_use::categorize(context) {
+ Some(DefUse::Def) => self.insert_def(local, location),
+ Some(DefUse::Use) => self.insert_use(local, location),
+ Some(DefUse::Drop) => self.insert_drop_use(local, location),
+ _ => (),
+ }
+ }
+
+ fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+ self.super_place(place, context, location);
+ match context {
+ PlaceContext::NonMutatingUse(_) => {
+ if let Some(mpi) = self.place_to_mpi(place) {
+ self.insert_path_access(mpi, location);
+ }
+ }
+
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow) => {
+ if let Some(mpi) = self.place_to_mpi(place) {
+ self.insert_path_access(mpi, location);
+ }
+ }
+ _ => (),
+ }
+ }
+}
+
+pub(super) fn populate_access_facts<'a, 'tcx>(
+ typeck: &mut TypeChecker<'a, 'tcx>,
+ body: &Body<'tcx>,
+ location_table: &LocationTable,
+ move_data: &MoveData<'tcx>,
+ dropped_at: &mut Vec<(Local, Location)>,
+) {
+ debug!("populate_access_facts()");
+
+ if let Some(facts) = typeck.borrowck_context.all_facts.as_mut() {
+ let mut extractor = UseFactsExtractor {
+ var_defined_at: &mut facts.var_defined_at,
+ var_used_at: &mut facts.var_used_at,
+ var_dropped_at: &mut facts.var_dropped_at,
+ path_accessed_at_base: &mut facts.path_accessed_at_base,
+ location_table,
+ move_data,
+ };
+ extractor.visit_body(&body);
+
+ facts.var_dropped_at.extend(
+ dropped_at.iter().map(|&(local, location)| (local, location_table.mid_index(location))),
+ );
+
+ for (local, local_decl) in body.local_decls.iter_enumerated() {
+ debug!(
+ "add use_of_var_derefs_origin facts - local={:?}, type={:?}",
+ local, local_decl.ty
+ );
+ let _prof_timer = typeck.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+ let universal_regions = &typeck.borrowck_context.universal_regions;
+ typeck.infcx.tcx.for_each_free_region(&local_decl.ty, |region| {
+ let region_vid = universal_regions.to_region_vid(region);
+ facts.use_of_var_derefs_origin.push((local, region_vid));
+ });
+ }
+ }
+}
+
+// For every potentially drop()-touched region `region` in `local`'s type
+// (`kind`), emit a Polonius `use_of_var_derefs_origin(local, origin)` fact.
+pub(super) fn add_drop_of_var_derefs_origin<'tcx>(
+ typeck: &mut TypeChecker<'_, 'tcx>,
+ local: Local,
+ kind: &GenericArg<'tcx>,
+) {
+ debug!("add_drop_of_var_derefs_origin(local={:?}, kind={:?}", local, kind);
+ if let Some(facts) = typeck.borrowck_context.all_facts.as_mut() {
+ let _prof_timer = typeck.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+ let universal_regions = &typeck.borrowck_context.universal_regions;
+ typeck.infcx.tcx.for_each_free_region(kind, |drop_live_region| {
+ let region_vid = universal_regions.to_region_vid(drop_live_region);
+ facts.drop_of_var_derefs_origin.push((local, region_vid));
+ });
+ }
+}
diff --git a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs
new file mode 100644
index 000000000..42b577175
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs
@@ -0,0 +1,578 @@
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_index::bit_set::HybridBitSet;
+use rustc_index::interval::IntervalSet;
+use rustc_infer::infer::canonical::QueryRegionConstraints;
+use rustc_middle::mir::{BasicBlock, Body, ConstraintCategory, Local, Location};
+use rustc_middle::ty::{Ty, TypeVisitable};
+use rustc_trait_selection::traits::query::dropck_outlives::DropckOutlivesResult;
+use rustc_trait_selection::traits::query::type_op::outlives::DropckOutlives;
+use rustc_trait_selection::traits::query::type_op::{TypeOp, TypeOpOutput};
+use std::rc::Rc;
+
+use rustc_mir_dataflow::impls::MaybeInitializedPlaces;
+use rustc_mir_dataflow::move_paths::{HasMoveData, MoveData, MovePathIndex};
+use rustc_mir_dataflow::ResultsCursor;
+
+use crate::{
+ region_infer::values::{self, PointIndex, RegionValueElements},
+ type_check::liveness::local_use_map::LocalUseMap,
+ type_check::liveness::polonius,
+ type_check::NormalizeLocation,
+ type_check::TypeChecker,
+};
+
+/// This is the heart of the liveness computation. For each variable X
+/// that requires a liveness computation, it walks over all the uses
+/// of X and does a reverse depth-first search ("trace") through the
+/// MIR. This search stops when we find a definition of that variable.
+/// The points visited in this search is the USE-LIVE set for the variable;
+/// of those points is added to all the regions that appear in the variable's
+/// type.
+///
+/// We then also walks through each *drop* of those variables and does
+/// another search, stopping when we reach a use or definition. This
+/// is the DROP-LIVE set of points. Each of the points in the
+/// DROP-LIVE set are to the liveness sets for regions found in the
+/// `dropck_outlives` result of the variable's type (in particular,
+/// this respects `#[may_dangle]` annotations).
+pub(super) fn trace<'mir, 'tcx>(
+ typeck: &mut TypeChecker<'_, 'tcx>,
+ body: &Body<'tcx>,
+ elements: &Rc<RegionValueElements>,
+ flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+ move_data: &MoveData<'tcx>,
+ relevant_live_locals: Vec<Local>,
+ boring_locals: Vec<Local>,
+ polonius_drop_used: Option<Vec<(Local, Location)>>,
+) {
+ debug!("trace()");
+
+ let local_use_map = &LocalUseMap::build(&relevant_live_locals, elements, body);
+
+ let cx = LivenessContext {
+ typeck,
+ body,
+ flow_inits,
+ elements,
+ local_use_map,
+ move_data,
+ drop_data: FxHashMap::default(),
+ };
+
+ let mut results = LivenessResults::new(cx);
+
+ if let Some(drop_used) = polonius_drop_used {
+ results.add_extra_drop_facts(drop_used, relevant_live_locals.iter().copied().collect())
+ }
+
+ results.compute_for_all_locals(relevant_live_locals);
+
+ results.dropck_boring_locals(boring_locals);
+}
+
+/// Contextual state for the type-liveness generator.
+struct LivenessContext<'me, 'typeck, 'flow, 'tcx> {
+ /// Current type-checker, giving us our inference context etc.
+ typeck: &'me mut TypeChecker<'typeck, 'tcx>,
+
+ /// Defines the `PointIndex` mapping
+ elements: &'me RegionValueElements,
+
+ /// MIR we are analyzing.
+ body: &'me Body<'tcx>,
+
+ /// Mapping to/from the various indices used for initialization tracking.
+ move_data: &'me MoveData<'tcx>,
+
+ /// Cache for the results of `dropck_outlives` query.
+ drop_data: FxHashMap<Ty<'tcx>, DropData<'tcx>>,
+
+ /// Results of dataflow tracking which variables (and paths) have been
+ /// initialized.
+ flow_inits: &'me mut ResultsCursor<'flow, 'tcx, MaybeInitializedPlaces<'flow, 'tcx>>,
+
+ /// Index indicating where each variable is assigned, used, or
+ /// dropped.
+ local_use_map: &'me LocalUseMap,
+}
+
+struct DropData<'tcx> {
+ dropck_result: DropckOutlivesResult<'tcx>,
+ region_constraint_data: Option<&'tcx QueryRegionConstraints<'tcx>>,
+}
+
+struct LivenessResults<'me, 'typeck, 'flow, 'tcx> {
+ cx: LivenessContext<'me, 'typeck, 'flow, 'tcx>,
+
+ /// Set of points that define the current local.
+ defs: HybridBitSet<PointIndex>,
+
+ /// Points where the current variable is "use live" -- meaning
+ /// that there is a future "full use" that may use its value.
+ use_live_at: IntervalSet<PointIndex>,
+
+ /// Points where the current variable is "drop live" -- meaning
+ /// that there is no future "full use" that may use its value, but
+ /// there is a future drop.
+ drop_live_at: IntervalSet<PointIndex>,
+
+ /// Locations where drops may occur.
+ drop_locations: Vec<Location>,
+
+ /// Stack used when doing (reverse) DFS.
+ stack: Vec<PointIndex>,
+}
+
+impl<'me, 'typeck, 'flow, 'tcx> LivenessResults<'me, 'typeck, 'flow, 'tcx> {
+ fn new(cx: LivenessContext<'me, 'typeck, 'flow, 'tcx>) -> Self {
+ let num_points = cx.elements.num_points();
+ LivenessResults {
+ cx,
+ defs: HybridBitSet::new_empty(num_points),
+ use_live_at: IntervalSet::new(num_points),
+ drop_live_at: IntervalSet::new(num_points),
+ drop_locations: vec![],
+ stack: vec![],
+ }
+ }
+
+ fn compute_for_all_locals(&mut self, relevant_live_locals: Vec<Local>) {
+ for local in relevant_live_locals {
+ self.reset_local_state();
+ self.add_defs_for(local);
+ self.compute_use_live_points_for(local);
+ self.compute_drop_live_points_for(local);
+
+ let local_ty = self.cx.body.local_decls[local].ty;
+
+ if !self.use_live_at.is_empty() {
+ self.cx.add_use_live_facts_for(local_ty, &self.use_live_at);
+ }
+
+ if !self.drop_live_at.is_empty() {
+ self.cx.add_drop_live_facts_for(
+ local,
+ local_ty,
+ &self.drop_locations,
+ &self.drop_live_at,
+ );
+ }
+ }
+ }
+
+ // Runs dropck for locals whose liveness isn't relevant. This is
+ // necessary to eagerly detect unbound recursion during drop glue computation.
+ fn dropck_boring_locals(&mut self, boring_locals: Vec<Local>) {
+ for local in boring_locals {
+ let local_ty = self.cx.body.local_decls[local].ty;
+ let drop_data = self.cx.drop_data.entry(local_ty).or_insert_with({
+ let typeck = &mut self.cx.typeck;
+ move || LivenessContext::compute_drop_data(typeck, local_ty)
+ });
+
+ drop_data.dropck_result.report_overflows(
+ self.cx.typeck.infcx.tcx,
+ self.cx.body.local_decls[local].source_info.span,
+ local_ty,
+ );
+ }
+ }
+
+ /// Add extra drop facts needed for Polonius.
+ ///
+ /// Add facts for all locals with free regions, since regions may outlive
+ /// the function body only at certain nodes in the CFG.
+ fn add_extra_drop_facts(
+ &mut self,
+ drop_used: Vec<(Local, Location)>,
+ relevant_live_locals: FxHashSet<Local>,
+ ) {
+ let locations = IntervalSet::new(self.cx.elements.num_points());
+
+ for (local, location) in drop_used {
+ if !relevant_live_locals.contains(&local) {
+ let local_ty = self.cx.body.local_decls[local].ty;
+ if local_ty.has_free_regions() {
+ self.cx.add_drop_live_facts_for(local, local_ty, &[location], &locations);
+ }
+ }
+ }
+ }
+
+ /// Clear the value of fields that are "per local variable".
+ fn reset_local_state(&mut self) {
+ self.defs.clear();
+ self.use_live_at.clear();
+ self.drop_live_at.clear();
+ self.drop_locations.clear();
+ assert!(self.stack.is_empty());
+ }
+
+ /// Adds the definitions of `local` into `self.defs`.
+ fn add_defs_for(&mut self, local: Local) {
+ for def in self.cx.local_use_map.defs(local) {
+ debug!("- defined at {:?}", def);
+ self.defs.insert(def);
+ }
+ }
+
+ /// Computes all points where local is "use live" -- meaning its
+ /// current value may be used later (except by a drop). This is
+ /// done by walking backwards from each use of `local` until we
+ /// find a `def` of local.
+ ///
+ /// Requires `add_defs_for(local)` to have been executed.
+ fn compute_use_live_points_for(&mut self, local: Local) {
+ debug!("compute_use_live_points_for(local={:?})", local);
+
+ self.stack.extend(self.cx.local_use_map.uses(local));
+ while let Some(p) = self.stack.pop() {
+ // We are live in this block from the closest to us of:
+ //
+ // * Inclusively, the block start
+ // * Exclusively, the previous definition (if it's in this block)
+ // * Exclusively, the previous live_at setting (an optimization)
+ let block_start = self.cx.elements.to_block_start(p);
+ let previous_defs = self.defs.last_set_in(block_start..=p);
+ let previous_live_at = self.use_live_at.last_set_in(block_start..=p);
+
+ let exclusive_start = match (previous_defs, previous_live_at) {
+ (Some(a), Some(b)) => Some(std::cmp::max(a, b)),
+ (Some(a), None) | (None, Some(a)) => Some(a),
+ (None, None) => None,
+ };
+
+ if let Some(exclusive) = exclusive_start {
+ self.use_live_at.insert_range(exclusive + 1..=p);
+
+ // If we have a bound after the start of the block, we should
+ // not add the predecessors for this block.
+ continue;
+ } else {
+ // Add all the elements of this block.
+ self.use_live_at.insert_range(block_start..=p);
+
+ // Then add the predecessors for this block, which are the
+ // terminators of predecessor basic blocks. Push those onto the
+ // stack so that the next iteration(s) will process them.
+
+ let block = self.cx.elements.to_location(block_start).block;
+ self.stack.extend(
+ self.cx.body.basic_blocks.predecessors()[block]
+ .iter()
+ .map(|&pred_bb| self.cx.body.terminator_loc(pred_bb))
+ .map(|pred_loc| self.cx.elements.point_from_location(pred_loc)),
+ );
+ }
+ }
+ }
+
+ /// Computes all points where local is "drop live" -- meaning its
+ /// current value may be dropped later (but not used). This is
+ /// done by iterating over the drops of `local` where `local` (or
+ /// some subpart of `local`) is initialized. For each such drop,
+ /// we walk backwards until we find a point where `local` is
+ /// either defined or use-live.
+ ///
+ /// Requires `compute_use_live_points_for` and `add_defs_for` to
+ /// have been executed.
+ fn compute_drop_live_points_for(&mut self, local: Local) {
+ debug!("compute_drop_live_points_for(local={:?})", local);
+
+ let mpi = self.cx.move_data.rev_lookup.find_local(local);
+ debug!("compute_drop_live_points_for: mpi = {:?}", mpi);
+
+ // Find the drops where `local` is initialized.
+ for drop_point in self.cx.local_use_map.drops(local) {
+ let location = self.cx.elements.to_location(drop_point);
+ debug_assert_eq!(self.cx.body.terminator_loc(location.block), location,);
+
+ if self.cx.initialized_at_terminator(location.block, mpi) {
+ if self.drop_live_at.insert(drop_point) {
+ self.drop_locations.push(location);
+ self.stack.push(drop_point);
+ }
+ }
+ }
+
+ debug!("compute_drop_live_points_for: drop_locations={:?}", self.drop_locations);
+
+ // Reverse DFS. But for drops, we do it a bit differently.
+ // The stack only ever stores *terminators of blocks*. Within
+ // a block, we walk back the statements in an inner loop.
+ while let Some(term_point) = self.stack.pop() {
+ self.compute_drop_live_points_for_block(mpi, term_point);
+ }
+ }
+
+ /// Executes one iteration of the drop-live analysis loop.
+ ///
+ /// The parameter `mpi` is the `MovePathIndex` of the local variable
+ /// we are currently analyzing.
+ ///
+ /// The point `term_point` represents some terminator in the MIR,
+ /// where the local `mpi` is drop-live on entry to that terminator.
+ ///
+ /// This method adds all drop-live points within the block and --
+ /// where applicable -- pushes the terminators of preceding blocks
+ /// onto `self.stack`.
+ fn compute_drop_live_points_for_block(&mut self, mpi: MovePathIndex, term_point: PointIndex) {
+ debug!(
+ "compute_drop_live_points_for_block(mpi={:?}, term_point={:?})",
+ self.cx.move_data.move_paths[mpi].place,
+ self.cx.elements.to_location(term_point),
+ );
+
+ // We are only invoked with terminators where `mpi` is
+ // drop-live on entry.
+ debug_assert!(self.drop_live_at.contains(term_point));
+
+ // Otherwise, scan backwards through the statements in the
+ // block. One of them may be either a definition or use
+ // live point.
+ let term_location = self.cx.elements.to_location(term_point);
+ debug_assert_eq!(self.cx.body.terminator_loc(term_location.block), term_location,);
+ let block = term_location.block;
+ let entry_point = self.cx.elements.entry_point(term_location.block);
+ for p in (entry_point..term_point).rev() {
+ debug!("compute_drop_live_points_for_block: p = {:?}", self.cx.elements.to_location(p));
+
+ if self.defs.contains(p) {
+ debug!("compute_drop_live_points_for_block: def site");
+ return;
+ }
+
+ if self.use_live_at.contains(p) {
+ debug!("compute_drop_live_points_for_block: use-live at {:?}", p);
+ return;
+ }
+
+ if !self.drop_live_at.insert(p) {
+ debug!("compute_drop_live_points_for_block: already drop-live");
+ return;
+ }
+ }
+
+ let body = self.cx.body;
+ for &pred_block in body.basic_blocks.predecessors()[block].iter() {
+ debug!("compute_drop_live_points_for_block: pred_block = {:?}", pred_block,);
+
+ // Check whether the variable is (at least partially)
+ // initialized at the exit of this predecessor. If so, we
+ // want to enqueue it on our list. If not, go check the
+ // next block.
+ //
+ // Note that we only need to check whether `live_local`
+ // became de-initialized at basic block boundaries. If it
+ // were to become de-initialized within the block, that
+ // would have been a "use-live" transition in the earlier
+ // loop, and we'd have returned already.
+ //
+ // NB. It's possible that the pred-block ends in a call
+ // which stores to the variable; in that case, the
+ // variable may be uninitialized "at exit" because this
+ // call only considers the *unconditional effects* of the
+ // terminator. *But*, in that case, the terminator is also
+ // a *definition* of the variable, in which case we want
+ // to stop the search anyhow. (But see Note 1 below.)
+ if !self.cx.initialized_at_exit(pred_block, mpi) {
+ debug!("compute_drop_live_points_for_block: not initialized");
+ continue;
+ }
+
+ let pred_term_loc = self.cx.body.terminator_loc(pred_block);
+ let pred_term_point = self.cx.elements.point_from_location(pred_term_loc);
+
+ // If the terminator of this predecessor either *assigns*
+ // our value or is a "normal use", then stop.
+ if self.defs.contains(pred_term_point) {
+ debug!("compute_drop_live_points_for_block: defined at {:?}", pred_term_loc);
+ continue;
+ }
+
+ if self.use_live_at.contains(pred_term_point) {
+ debug!("compute_drop_live_points_for_block: use-live at {:?}", pred_term_loc);
+ continue;
+ }
+
+ // Otherwise, we are drop-live on entry to the terminator,
+ // so walk it.
+ if self.drop_live_at.insert(pred_term_point) {
+ debug!("compute_drop_live_points_for_block: pushed to stack");
+ self.stack.push(pred_term_point);
+ }
+ }
+
+ // Note 1. There is a weird scenario that you might imagine
+ // being problematic here, but which actually cannot happen.
+ // The problem would be if we had a variable that *is* initialized
+ // (but dead) on entry to the terminator, and where the current value
+ // will be dropped in the case of unwind. In that case, we ought to
+ // consider `X` to be drop-live in between the last use and call.
+ // Here is the example:
+ //
+ // ```
+ // BB0 {
+ // X = ...
+ // use(X); // last use
+ // ... // <-- X ought to be drop-live here
+ // X = call() goto BB1 unwind BB2
+ // }
+ //
+ // BB1 {
+ // DROP(X)
+ // }
+ //
+ // BB2 {
+ // DROP(X)
+ // }
+ // ```
+ //
+ // However, the current code would, when walking back from BB2,
+ // simply stop and never explore BB0. This seems bad! But it turns
+ // out this code is flawed anyway -- note that the existing value of
+ // `X` would leak in the case where unwinding did *not* occur.
+ //
+ // What we *actually* generate is a store to a temporary
+ // for the call (`TMP = call()...`) and then a
+ // `DropAndReplace` to swap that with `X`
+ // (`DropAndReplace` has very particular semantics).
+ }
+}
+
+impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> {
+ /// Returns `true` if the local variable (or some part of it) is initialized at the current
+ /// cursor position. Callers should call one of the `seek` methods immediately before to point
+ /// the cursor to the desired location.
+ fn initialized_at_curr_loc(&self, mpi: MovePathIndex) -> bool {
+ let state = self.flow_inits.get();
+ if state.contains(mpi) {
+ return true;
+ }
+
+ let move_paths = &self.flow_inits.analysis().move_data().move_paths;
+ move_paths[mpi].find_descendant(&move_paths, |mpi| state.contains(mpi)).is_some()
+ }
+
+ /// Returns `true` if the local variable (or some part of it) is initialized in
+ /// the terminator of `block`. We need to check this to determine if a
+ /// DROP of some local variable will have an effect -- note that
+ /// drops, as they may unwind, are always terminators.
+ fn initialized_at_terminator(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool {
+ self.flow_inits.seek_before_primary_effect(self.body.terminator_loc(block));
+ self.initialized_at_curr_loc(mpi)
+ }
+
+ /// Returns `true` if the path `mpi` (or some part of it) is initialized at
+ /// the exit of `block`.
+ ///
+ /// **Warning:** Does not account for the result of `Call`
+ /// instructions.
+ fn initialized_at_exit(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool {
+ self.flow_inits.seek_after_primary_effect(self.body.terminator_loc(block));
+ self.initialized_at_curr_loc(mpi)
+ }
+
+ /// Stores the result that all regions in `value` are live for the
+ /// points `live_at`.
+ fn add_use_live_facts_for(
+ &mut self,
+ value: impl TypeVisitable<'tcx>,
+ live_at: &IntervalSet<PointIndex>,
+ ) {
+ debug!("add_use_live_facts_for(value={:?})", value);
+
+ Self::make_all_regions_live(self.elements, &mut self.typeck, value, live_at)
+ }
+
+ /// Some variable with type `live_ty` is "drop live" at `location`
+ /// -- i.e., it may be dropped later. This means that *some* of
+ /// the regions in its type must be live at `location`. The
+ /// precise set will depend on the dropck constraints, and in
+ /// particular this takes `#[may_dangle]` into account.
+ fn add_drop_live_facts_for(
+ &mut self,
+ dropped_local: Local,
+ dropped_ty: Ty<'tcx>,
+ drop_locations: &[Location],
+ live_at: &IntervalSet<PointIndex>,
+ ) {
+ debug!(
+ "add_drop_live_constraint(\
+ dropped_local={:?}, \
+ dropped_ty={:?}, \
+ drop_locations={:?}, \
+ live_at={:?})",
+ dropped_local,
+ dropped_ty,
+ drop_locations,
+ values::location_set_str(self.elements, live_at.iter()),
+ );
+
+ let drop_data = self.drop_data.entry(dropped_ty).or_insert_with({
+ let typeck = &mut self.typeck;
+ move || Self::compute_drop_data(typeck, dropped_ty)
+ });
+
+ if let Some(data) = &drop_data.region_constraint_data {
+ for &drop_location in drop_locations {
+ self.typeck.push_region_constraints(
+ drop_location.to_locations(),
+ ConstraintCategory::Boring,
+ data,
+ );
+ }
+ }
+
+ drop_data.dropck_result.report_overflows(
+ self.typeck.infcx.tcx,
+ self.body.source_info(*drop_locations.first().unwrap()).span,
+ dropped_ty,
+ );
+
+ // All things in the `outlives` array may be touched by
+ // the destructor and must be live at this point.
+ for &kind in &drop_data.dropck_result.kinds {
+ Self::make_all_regions_live(self.elements, &mut self.typeck, kind, live_at);
+
+ polonius::add_drop_of_var_derefs_origin(&mut self.typeck, dropped_local, &kind);
+ }
+ }
+
+ fn make_all_regions_live(
+ elements: &RegionValueElements,
+ typeck: &mut TypeChecker<'_, 'tcx>,
+ value: impl TypeVisitable<'tcx>,
+ live_at: &IntervalSet<PointIndex>,
+ ) {
+ debug!("make_all_regions_live(value={:?})", value);
+ debug!(
+ "make_all_regions_live: live_at={}",
+ values::location_set_str(elements, live_at.iter()),
+ );
+
+ let tcx = typeck.tcx();
+ tcx.for_each_free_region(&value, |live_region| {
+ let live_region_vid =
+ typeck.borrowck_context.universal_regions.to_region_vid(live_region);
+ typeck
+ .borrowck_context
+ .constraints
+ .liveness_constraints
+ .add_elements(live_region_vid, live_at);
+ });
+ }
+
+ fn compute_drop_data(
+ typeck: &mut TypeChecker<'_, 'tcx>,
+ dropped_ty: Ty<'tcx>,
+ ) -> DropData<'tcx> {
+ debug!("compute_drop_data(dropped_ty={:?})", dropped_ty,);
+
+ let param_env = typeck.param_env;
+ let TypeOpOutput { output, constraints, .. } =
+ param_env.and(DropckOutlives::new(dropped_ty)).fully_perform(typeck.infcx).unwrap();
+
+ DropData { dropck_result: output, region_constraint_data: constraints }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs
new file mode 100644
index 000000000..d32b1edcd
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/mod.rs
@@ -0,0 +1,2721 @@
+//! This pass type-checks the MIR to ensure it is not broken.
+
+use std::rc::Rc;
+use std::{fmt, iter, mem};
+
+use either::Either;
+
+use hir::OpaqueTyOrigin;
+use rustc_data_structures::frozen::Frozen;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::vec_map::VecMap;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_infer::infer::canonical::QueryRegionConstraints;
+use rustc_infer::infer::outlives::env::RegionBoundPairs;
+use rustc_infer::infer::region_constraints::RegionConstraintData;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{
+ InferCtxt, InferOk, LateBoundRegion, LateBoundRegionConversionTime, NllRegionVariableOrigin,
+};
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::*;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef, UserSubsts};
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{
+ self, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, OpaqueHiddenType,
+ OpaqueTypeKey, RegionVid, ToPredicate, Ty, TyCtxt, UserType, UserTypeAnnotationIndex,
+};
+use rustc_span::def_id::CRATE_DEF_ID;
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi::VariantIdx;
+use rustc_trait_selection::traits::query::type_op;
+use rustc_trait_selection::traits::query::type_op::custom::scrape_region_constraints;
+use rustc_trait_selection::traits::query::type_op::custom::CustomTypeOp;
+use rustc_trait_selection::traits::query::type_op::{TypeOp, TypeOpOutput};
+use rustc_trait_selection::traits::query::Fallible;
+use rustc_trait_selection::traits::PredicateObligation;
+
+use rustc_mir_dataflow::impls::MaybeInitializedPlaces;
+use rustc_mir_dataflow::move_paths::MoveData;
+use rustc_mir_dataflow::ResultsCursor;
+
+use crate::session_diagnostics::MoveUnsized;
+use crate::{
+ borrow_set::BorrowSet,
+ constraints::{OutlivesConstraint, OutlivesConstraintSet},
+ diagnostics::UniverseInfo,
+ facts::AllFacts,
+ location::LocationTable,
+ member_constraints::MemberConstraintSet,
+ nll::ToRegionVid,
+ path_utils,
+ region_infer::values::{
+ LivenessValues, PlaceholderIndex, PlaceholderIndices, RegionValueElements,
+ },
+ region_infer::{ClosureRegionRequirementsExt, TypeTest},
+ type_check::free_region_relations::{CreateResult, UniversalRegionRelations},
+ universal_regions::{DefiningTy, UniversalRegions},
+ Upvar,
+};
+
+macro_rules! span_mirbug {
+ ($context:expr, $elem:expr, $($message:tt)*) => ({
+ $crate::type_check::mirbug(
+ $context.tcx(),
+ $context.last_span,
+ &format!(
+ "broken MIR in {:?} ({:?}): {}",
+ $context.body().source.def_id(),
+ $elem,
+ format_args!($($message)*),
+ ),
+ )
+ })
+}
+
+macro_rules! span_mirbug_and_err {
+ ($context:expr, $elem:expr, $($message:tt)*) => ({
+ {
+ span_mirbug!($context, $elem, $($message)*);
+ $context.error()
+ }
+ })
+}
+
+mod canonical;
+mod constraint_conversion;
+pub mod free_region_relations;
+mod input_output;
+pub(crate) mod liveness;
+mod relate_tys;
+
+/// Type checks the given `mir` in the context of the inference
+/// context `infcx`. Returns any region constraints that have yet to
+/// be proven. This result includes liveness constraints that
+/// ensure that regions appearing in the types of all local variables
+/// are live at all points where that local variable may later be
+/// used.
+///
+/// This phase of type-check ought to be infallible -- this is because
+/// the original, HIR-based type-check succeeded. So if any errors
+/// occur here, we will get a `bug!` reported.
+///
+/// # Parameters
+///
+/// - `infcx` -- inference context to use
+/// - `param_env` -- parameter environment to use for trait solving
+/// - `body` -- MIR body to type-check
+/// - `promoted` -- map of promoted constants within `body`
+/// - `universal_regions` -- the universal regions from `body`s function signature
+/// - `location_table` -- MIR location map of `body`
+/// - `borrow_set` -- information about borrows occurring in `body`
+/// - `all_facts` -- when using Polonius, this is the generated set of Polonius facts
+/// - `flow_inits` -- results of a maybe-init dataflow analysis
+/// - `move_data` -- move-data constructed when performing the maybe-init dataflow analysis
+/// - `elements` -- MIR region map
+pub(crate) fn type_check<'mir, 'tcx>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+ promoted: &IndexVec<Promoted, Body<'tcx>>,
+ universal_regions: &Rc<UniversalRegions<'tcx>>,
+ location_table: &LocationTable,
+ borrow_set: &BorrowSet<'tcx>,
+ all_facts: &mut Option<AllFacts>,
+ flow_inits: &mut ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+ move_data: &MoveData<'tcx>,
+ elements: &Rc<RegionValueElements>,
+ upvars: &[Upvar<'tcx>],
+ use_polonius: bool,
+) -> MirTypeckResults<'tcx> {
+ let implicit_region_bound = infcx.tcx.mk_region(ty::ReVar(universal_regions.fr_fn_body));
+ let mut universe_causes = FxHashMap::default();
+ universe_causes.insert(ty::UniverseIndex::from_u32(0), UniverseInfo::other());
+ let mut constraints = MirTypeckRegionConstraints {
+ placeholder_indices: PlaceholderIndices::default(),
+ placeholder_index_to_region: IndexVec::default(),
+ liveness_constraints: LivenessValues::new(elements.clone()),
+ outlives_constraints: OutlivesConstraintSet::default(),
+ member_constraints: MemberConstraintSet::default(),
+ closure_bounds_mapping: Default::default(),
+ type_tests: Vec::default(),
+ universe_causes,
+ };
+
+ let CreateResult {
+ universal_region_relations,
+ region_bound_pairs,
+ normalized_inputs_and_output,
+ } = free_region_relations::create(
+ infcx,
+ param_env,
+ implicit_region_bound,
+ universal_regions,
+ &mut constraints,
+ );
+
+ debug!(?normalized_inputs_and_output);
+
+ for u in ty::UniverseIndex::ROOT..infcx.universe() {
+ let info = UniverseInfo::other();
+ constraints.universe_causes.insert(u, info);
+ }
+
+ let mut borrowck_context = BorrowCheckContext {
+ universal_regions,
+ location_table,
+ borrow_set,
+ all_facts,
+ constraints: &mut constraints,
+ upvars,
+ };
+
+ let opaque_type_values = type_check_internal(
+ infcx,
+ param_env,
+ body,
+ promoted,
+ &region_bound_pairs,
+ implicit_region_bound,
+ &mut borrowck_context,
+ |mut cx| {
+ debug!("inside extra closure of type_check_internal");
+ cx.equate_inputs_and_outputs(&body, universal_regions, &normalized_inputs_and_output);
+ liveness::generate(
+ &mut cx,
+ body,
+ elements,
+ flow_inits,
+ move_data,
+ location_table,
+ use_polonius,
+ );
+
+ translate_outlives_facts(&mut cx);
+ let opaque_type_values =
+ infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+
+ opaque_type_values
+ .into_iter()
+ .map(|(opaque_type_key, decl)| {
+ cx.fully_perform_op(
+ Locations::All(body.span),
+ ConstraintCategory::OpaqueType,
+ CustomTypeOp::new(
+ |infcx| {
+ infcx.register_member_constraints(
+ param_env,
+ opaque_type_key,
+ decl.hidden_type.ty,
+ decl.hidden_type.span,
+ );
+ Ok(InferOk { value: (), obligations: vec![] })
+ },
+ || "opaque_type_map".to_string(),
+ ),
+ )
+ .unwrap();
+ let mut hidden_type = infcx.resolve_vars_if_possible(decl.hidden_type);
+ trace!(
+ "finalized opaque type {:?} to {:#?}",
+ opaque_type_key,
+ hidden_type.ty.kind()
+ );
+ if hidden_type.has_infer_types_or_consts() {
+ infcx.tcx.sess.delay_span_bug(
+ decl.hidden_type.span,
+ &format!("could not resolve {:#?}", hidden_type.ty.kind()),
+ );
+ hidden_type.ty = infcx.tcx.ty_error();
+ }
+
+ (opaque_type_key, (hidden_type, decl.origin))
+ })
+ .collect()
+ },
+ );
+
+ MirTypeckResults { constraints, universal_region_relations, opaque_type_values }
+}
+
+#[instrument(
+ skip(infcx, body, promoted, region_bound_pairs, borrowck_context, extra),
+ level = "debug"
+)]
+fn type_check_internal<'a, 'tcx, R>(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body: &'a Body<'tcx>,
+ promoted: &'a IndexVec<Promoted, Body<'tcx>>,
+ region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+ implicit_region_bound: ty::Region<'tcx>,
+ borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
+ extra: impl FnOnce(TypeChecker<'a, 'tcx>) -> R,
+) -> R {
+ debug!("body: {:#?}", body);
+ let mut checker = TypeChecker::new(
+ infcx,
+ body,
+ param_env,
+ region_bound_pairs,
+ implicit_region_bound,
+ borrowck_context,
+ );
+ let errors_reported = {
+ let mut verifier = TypeVerifier::new(&mut checker, promoted);
+ verifier.visit_body(&body);
+ verifier.errors_reported
+ };
+
+ if !errors_reported {
+ // if verifier failed, don't do further checks to avoid ICEs
+ checker.typeck_mir(body);
+ }
+
+ extra(checker)
+}
+
+fn translate_outlives_facts(typeck: &mut TypeChecker<'_, '_>) {
+ let cx = &mut typeck.borrowck_context;
+ if let Some(facts) = cx.all_facts {
+ let _prof_timer = typeck.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+ let location_table = cx.location_table;
+ facts.subset_base.extend(cx.constraints.outlives_constraints.outlives().iter().flat_map(
+ |constraint: &OutlivesConstraint<'_>| {
+ if let Some(from_location) = constraint.locations.from_location() {
+ Either::Left(iter::once((
+ constraint.sup,
+ constraint.sub,
+ location_table.mid_index(from_location),
+ )))
+ } else {
+ Either::Right(
+ location_table
+ .all_points()
+ .map(move |location| (constraint.sup, constraint.sub, location)),
+ )
+ }
+ },
+ ));
+ }
+}
+
+#[track_caller]
+fn mirbug(tcx: TyCtxt<'_>, span: Span, msg: &str) {
+ // We sometimes see MIR failures (notably predicate failures) due to
+ // the fact that we check rvalue sized predicates here. So use `delay_span_bug`
+ // to avoid reporting bugs in those cases.
+ tcx.sess.diagnostic().delay_span_bug(span, msg);
+}
+
+enum FieldAccessError {
+ OutOfRange { field_count: usize },
+}
+
+/// Verifies that MIR types are sane to not crash further checks.
+///
+/// The sanitize_XYZ methods here take an MIR object and compute its
+/// type, calling `span_mirbug` and returning an error type if there
+/// is a problem.
+struct TypeVerifier<'a, 'b, 'tcx> {
+ cx: &'a mut TypeChecker<'b, 'tcx>,
+ promoted: &'b IndexVec<Promoted, Body<'tcx>>,
+ last_span: Span,
+ errors_reported: bool,
+}
+
+impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> {
+ fn visit_span(&mut self, span: Span) {
+ if !span.is_dummy() {
+ self.last_span = span;
+ }
+ }
+
+ fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+ self.sanitize_place(place, location, context);
+ }
+
+ fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+ self.super_constant(constant, location);
+ let ty = self.sanitize_type(constant, constant.literal.ty());
+
+ self.cx.infcx.tcx.for_each_free_region(&ty, |live_region| {
+ let live_region_vid =
+ self.cx.borrowck_context.universal_regions.to_region_vid(live_region);
+ self.cx
+ .borrowck_context
+ .constraints
+ .liveness_constraints
+ .add_element(live_region_vid, location);
+ });
+
+ // HACK(compiler-errors): Constants that are gathered into Body.required_consts
+ // have their locations erased...
+ let locations = if location != Location::START {
+ location.to_locations()
+ } else {
+ Locations::All(constant.span)
+ };
+
+ if let Some(annotation_index) = constant.user_ty {
+ if let Err(terr) = self.cx.relate_type_and_user_type(
+ constant.literal.ty(),
+ ty::Variance::Invariant,
+ &UserTypeProjection { base: annotation_index, projs: vec![] },
+ locations,
+ ConstraintCategory::Boring,
+ ) {
+ let annotation = &self.cx.user_type_annotations[annotation_index];
+ span_mirbug!(
+ self,
+ constant,
+ "bad constant user type {:?} vs {:?}: {:?}",
+ annotation,
+ constant.literal.ty(),
+ terr,
+ );
+ }
+ } else {
+ let tcx = self.tcx();
+ let maybe_uneval = match constant.literal {
+ ConstantKind::Ty(ct) => match ct.kind() {
+ ty::ConstKind::Unevaluated(uv) => Some(uv),
+ _ => None,
+ },
+ _ => None,
+ };
+ if let Some(uv) = maybe_uneval {
+ if let Some(promoted) = uv.promoted {
+ let check_err = |verifier: &mut TypeVerifier<'a, 'b, 'tcx>,
+ promoted: &Body<'tcx>,
+ ty,
+ san_ty| {
+ if let Err(terr) =
+ verifier.cx.eq_types(ty, san_ty, locations, ConstraintCategory::Boring)
+ {
+ span_mirbug!(
+ verifier,
+ promoted,
+ "bad promoted type ({:?}: {:?}): {:?}",
+ ty,
+ san_ty,
+ terr
+ );
+ };
+ };
+
+ if !self.errors_reported {
+ let promoted_body = &self.promoted[promoted];
+ self.sanitize_promoted(promoted_body, location);
+
+ let promoted_ty = promoted_body.return_ty();
+ check_err(self, promoted_body, ty, promoted_ty);
+ }
+ } else {
+ if let Err(terr) = self.cx.fully_perform_op(
+ locations,
+ ConstraintCategory::Boring,
+ self.cx.param_env.and(type_op::ascribe_user_type::AscribeUserType::new(
+ constant.literal.ty(),
+ uv.def.did,
+ UserSubsts { substs: uv.substs, user_self_ty: None },
+ )),
+ ) {
+ span_mirbug!(
+ self,
+ constant,
+ "bad constant type {:?} ({:?})",
+ constant,
+ terr
+ );
+ }
+ }
+ } else if let Some(static_def_id) = constant.check_static_ptr(tcx) {
+ let unnormalized_ty = tcx.type_of(static_def_id);
+ let normalized_ty = self.cx.normalize(unnormalized_ty, locations);
+ let literal_ty = constant.literal.ty().builtin_deref(true).unwrap().ty;
+
+ if let Err(terr) = self.cx.eq_types(
+ literal_ty,
+ normalized_ty,
+ locations,
+ ConstraintCategory::Boring,
+ ) {
+ span_mirbug!(self, constant, "bad static type {:?} ({:?})", constant, terr);
+ }
+ }
+
+ if let ty::FnDef(def_id, substs) = *constant.literal.ty().kind() {
+ let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, substs);
+ self.cx.normalize_and_prove_instantiated_predicates(
+ def_id,
+ instantiated_predicates,
+ locations,
+ );
+ }
+ }
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+ let rval_ty = rvalue.ty(self.body(), self.tcx());
+ self.sanitize_type(rvalue, rval_ty);
+ }
+
+ fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) {
+ self.super_local_decl(local, local_decl);
+ self.sanitize_type(local_decl, local_decl.ty);
+
+ if let Some(user_ty) = &local_decl.user_ty {
+ for (user_ty, span) in user_ty.projections_and_spans() {
+ let ty = if !local_decl.is_nonref_binding() {
+ // If we have a binding of the form `let ref x: T = ..`
+ // then remove the outermost reference so we can check the
+ // type annotation for the remaining type.
+ if let ty::Ref(_, rty, _) = local_decl.ty.kind() {
+ *rty
+ } else {
+ bug!("{:?} with ref binding has wrong type {}", local, local_decl.ty);
+ }
+ } else {
+ local_decl.ty
+ };
+
+ if let Err(terr) = self.cx.relate_type_and_user_type(
+ ty,
+ ty::Variance::Invariant,
+ user_ty,
+ Locations::All(*span),
+ ConstraintCategory::TypeAnnotation,
+ ) {
+ span_mirbug!(
+ self,
+ local,
+ "bad user type on variable {:?}: {:?} != {:?} ({:?})",
+ local,
+ local_decl.ty,
+ local_decl.user_ty,
+ terr,
+ );
+ }
+ }
+ }
+ }
+
+ fn visit_body(&mut self, body: &Body<'tcx>) {
+ self.sanitize_type(&"return type", body.return_ty());
+ for local_decl in &body.local_decls {
+ self.sanitize_type(local_decl, local_decl.ty);
+ }
+ if self.errors_reported {
+ return;
+ }
+ self.super_body(body);
+ }
+}
+
+impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> {
+ fn new(
+ cx: &'a mut TypeChecker<'b, 'tcx>,
+ promoted: &'b IndexVec<Promoted, Body<'tcx>>,
+ ) -> Self {
+ TypeVerifier { promoted, last_span: cx.body.span, cx, errors_reported: false }
+ }
+
+ fn body(&self) -> &Body<'tcx> {
+ self.cx.body
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.cx.infcx.tcx
+ }
+
+ fn sanitize_type(&mut self, parent: &dyn fmt::Debug, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if ty.has_escaping_bound_vars() || ty.references_error() {
+ span_mirbug_and_err!(self, parent, "bad type {:?}", ty)
+ } else {
+ ty
+ }
+ }
+
+ /// Checks that the types internal to the `place` match up with
+ /// what would be expected.
+ fn sanitize_place(
+ &mut self,
+ place: &Place<'tcx>,
+ location: Location,
+ context: PlaceContext,
+ ) -> PlaceTy<'tcx> {
+ debug!("sanitize_place: {:?}", place);
+
+ let mut place_ty = PlaceTy::from_ty(self.body().local_decls[place.local].ty);
+
+ for elem in place.projection.iter() {
+ if place_ty.variant_index.is_none() {
+ if place_ty.ty.references_error() {
+ assert!(self.errors_reported);
+ return PlaceTy::from_ty(self.tcx().ty_error());
+ }
+ }
+ place_ty = self.sanitize_projection(place_ty, elem, place, location);
+ }
+
+ if let PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) = context {
+ let tcx = self.tcx();
+ let trait_ref = ty::TraitRef {
+ def_id: tcx.require_lang_item(LangItem::Copy, Some(self.last_span)),
+ substs: tcx.mk_substs_trait(place_ty.ty, &[]),
+ };
+
+ // To have a `Copy` operand, the type `T` of the
+ // value must be `Copy`. Note that we prove that `T: Copy`,
+ // rather than using the `is_copy_modulo_regions`
+ // test. This is important because
+ // `is_copy_modulo_regions` ignores the resulting region
+ // obligations and assumes they pass. This can result in
+ // bounds from `Copy` impls being unsoundly ignored (e.g.,
+ // #29149). Note that we decide to use `Copy` before knowing
+ // whether the bounds fully apply: in effect, the rule is
+ // that if a value of some type could implement `Copy`, then
+ // it must.
+ self.cx.prove_trait_ref(
+ trait_ref,
+ location.to_locations(),
+ ConstraintCategory::CopyBound,
+ );
+ }
+
+ place_ty
+ }
+
+ fn sanitize_promoted(&mut self, promoted_body: &'b Body<'tcx>, location: Location) {
+ // Determine the constraints from the promoted MIR by running the type
+ // checker on the promoted MIR, then transfer the constraints back to
+ // the main MIR, changing the locations to the provided location.
+
+ let parent_body = mem::replace(&mut self.cx.body, promoted_body);
+
+ // Use new sets of constraints and closure bounds so that we can
+ // modify their locations.
+ let all_facts = &mut None;
+ let mut constraints = Default::default();
+ let mut closure_bounds = Default::default();
+ let mut liveness_constraints =
+ LivenessValues::new(Rc::new(RegionValueElements::new(&promoted_body)));
+ // Don't try to add borrow_region facts for the promoted MIR
+
+ let mut swap_constraints = |this: &mut Self| {
+ mem::swap(this.cx.borrowck_context.all_facts, all_facts);
+ mem::swap(
+ &mut this.cx.borrowck_context.constraints.outlives_constraints,
+ &mut constraints,
+ );
+ mem::swap(
+ &mut this.cx.borrowck_context.constraints.closure_bounds_mapping,
+ &mut closure_bounds,
+ );
+ mem::swap(
+ &mut this.cx.borrowck_context.constraints.liveness_constraints,
+ &mut liveness_constraints,
+ );
+ };
+
+ swap_constraints(self);
+
+ self.visit_body(&promoted_body);
+
+ if !self.errors_reported {
+ // if verifier failed, don't do further checks to avoid ICEs
+ self.cx.typeck_mir(promoted_body);
+ }
+
+ self.cx.body = parent_body;
+ // Merge the outlives constraints back in, at the given location.
+ swap_constraints(self);
+
+ let locations = location.to_locations();
+ for constraint in constraints.outlives().iter() {
+ let mut constraint = constraint.clone();
+ constraint.locations = locations;
+ if let ConstraintCategory::Return(_)
+ | ConstraintCategory::UseAsConst
+ | ConstraintCategory::UseAsStatic = constraint.category
+ {
+ // "Returning" from a promoted is an assignment to a
+ // temporary from the user's point of view.
+ constraint.category = ConstraintCategory::Boring;
+ }
+ self.cx.borrowck_context.constraints.outlives_constraints.push(constraint)
+ }
+ for region in liveness_constraints.rows() {
+ // If the region is live at at least one location in the promoted MIR,
+ // then add a liveness constraint to the main MIR for this region
+ // at the location provided as an argument to this method
+ if liveness_constraints.get_elements(region).next().is_some() {
+ self.cx
+ .borrowck_context
+ .constraints
+ .liveness_constraints
+ .add_element(region, location);
+ }
+ }
+
+ if !closure_bounds.is_empty() {
+ let combined_bounds_mapping =
+ closure_bounds.into_iter().flat_map(|(_, value)| value).collect();
+ let existing = self
+ .cx
+ .borrowck_context
+ .constraints
+ .closure_bounds_mapping
+ .insert(location, combined_bounds_mapping);
+ assert!(existing.is_none(), "Multiple promoteds/closures at the same location.");
+ }
+ }
+
+ fn sanitize_projection(
+ &mut self,
+ base: PlaceTy<'tcx>,
+ pi: PlaceElem<'tcx>,
+ place: &Place<'tcx>,
+ location: Location,
+ ) -> PlaceTy<'tcx> {
+ debug!("sanitize_projection: {:?} {:?} {:?}", base, pi, place);
+ let tcx = self.tcx();
+ let base_ty = base.ty;
+ match pi {
+ ProjectionElem::Deref => {
+ let deref_ty = base_ty.builtin_deref(true);
+ PlaceTy::from_ty(deref_ty.map(|t| t.ty).unwrap_or_else(|| {
+ span_mirbug_and_err!(self, place, "deref of non-pointer {:?}", base_ty)
+ }))
+ }
+ ProjectionElem::Index(i) => {
+ let index_ty = Place::from(i).ty(self.body(), tcx).ty;
+ if index_ty != tcx.types.usize {
+ PlaceTy::from_ty(span_mirbug_and_err!(self, i, "index by non-usize {:?}", i))
+ } else {
+ PlaceTy::from_ty(base_ty.builtin_index().unwrap_or_else(|| {
+ span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty)
+ }))
+ }
+ }
+ ProjectionElem::ConstantIndex { .. } => {
+ // consider verifying in-bounds
+ PlaceTy::from_ty(base_ty.builtin_index().unwrap_or_else(|| {
+ span_mirbug_and_err!(self, place, "index of non-array {:?}", base_ty)
+ }))
+ }
+ ProjectionElem::Subslice { from, to, from_end } => {
+ PlaceTy::from_ty(match base_ty.kind() {
+ ty::Array(inner, _) => {
+ assert!(!from_end, "array subslices should not use from_end");
+ tcx.mk_array(*inner, to - from)
+ }
+ ty::Slice(..) => {
+ assert!(from_end, "slice subslices should use from_end");
+ base_ty
+ }
+ _ => span_mirbug_and_err!(self, place, "slice of non-array {:?}", base_ty),
+ })
+ }
+ ProjectionElem::Downcast(maybe_name, index) => match base_ty.kind() {
+ ty::Adt(adt_def, _substs) if adt_def.is_enum() => {
+ if index.as_usize() >= adt_def.variants().len() {
+ PlaceTy::from_ty(span_mirbug_and_err!(
+ self,
+ place,
+ "cast to variant #{:?} but enum only has {:?}",
+ index,
+ adt_def.variants().len()
+ ))
+ } else {
+ PlaceTy { ty: base_ty, variant_index: Some(index) }
+ }
+ }
+ // We do not need to handle generators here, because this runs
+ // before the generator transform stage.
+ _ => {
+ let ty = if let Some(name) = maybe_name {
+ span_mirbug_and_err!(
+ self,
+ place,
+ "can't downcast {:?} as {:?}",
+ base_ty,
+ name
+ )
+ } else {
+ span_mirbug_and_err!(self, place, "can't downcast {:?}", base_ty)
+ };
+ PlaceTy::from_ty(ty)
+ }
+ },
+ ProjectionElem::Field(field, fty) => {
+ let fty = self.sanitize_type(place, fty);
+ let fty = self.cx.normalize(fty, location);
+ match self.field_ty(place, base, field, location) {
+ Ok(ty) => {
+ let ty = self.cx.normalize(ty, location);
+ if let Err(terr) = self.cx.eq_types(
+ ty,
+ fty,
+ location.to_locations(),
+ ConstraintCategory::Boring,
+ ) {
+ span_mirbug!(
+ self,
+ place,
+ "bad field access ({:?}: {:?}): {:?}",
+ ty,
+ fty,
+ terr
+ );
+ }
+ }
+ Err(FieldAccessError::OutOfRange { field_count }) => span_mirbug!(
+ self,
+ place,
+ "accessed field #{} but variant only has {}",
+ field.index(),
+ field_count
+ ),
+ }
+ PlaceTy::from_ty(fty)
+ }
+ }
+ }
+
+ fn error(&mut self) -> Ty<'tcx> {
+ self.errors_reported = true;
+ self.tcx().ty_error()
+ }
+
+ fn field_ty(
+ &mut self,
+ parent: &dyn fmt::Debug,
+ base_ty: PlaceTy<'tcx>,
+ field: Field,
+ location: Location,
+ ) -> Result<Ty<'tcx>, FieldAccessError> {
+ let tcx = self.tcx();
+
+ let (variant, substs) = match base_ty {
+ PlaceTy { ty, variant_index: Some(variant_index) } => match *ty.kind() {
+ ty::Adt(adt_def, substs) => (adt_def.variant(variant_index), substs),
+ ty::Generator(def_id, substs, _) => {
+ let mut variants = substs.as_generator().state_tys(def_id, tcx);
+ let Some(mut variant) = variants.nth(variant_index.into()) else {
+ bug!(
+ "variant_index of generator out of range: {:?}/{:?}",
+ variant_index,
+ substs.as_generator().state_tys(def_id, tcx).count()
+ );
+ };
+ return match variant.nth(field.index()) {
+ Some(ty) => Ok(ty),
+ None => Err(FieldAccessError::OutOfRange { field_count: variant.count() }),
+ };
+ }
+ _ => bug!("can't have downcast of non-adt non-generator type"),
+ },
+ PlaceTy { ty, variant_index: None } => match *ty.kind() {
+ ty::Adt(adt_def, substs) if !adt_def.is_enum() => {
+ (adt_def.variant(VariantIdx::new(0)), substs)
+ }
+ ty::Closure(_, substs) => {
+ return match substs
+ .as_closure()
+ .tupled_upvars_ty()
+ .tuple_fields()
+ .get(field.index())
+ {
+ Some(&ty) => Ok(ty),
+ None => Err(FieldAccessError::OutOfRange {
+ field_count: substs.as_closure().upvar_tys().count(),
+ }),
+ };
+ }
+ ty::Generator(_, substs, _) => {
+ // Only prefix fields (upvars and current state) are
+ // accessible without a variant index.
+ return match substs.as_generator().prefix_tys().nth(field.index()) {
+ Some(ty) => Ok(ty),
+ None => Err(FieldAccessError::OutOfRange {
+ field_count: substs.as_generator().prefix_tys().count(),
+ }),
+ };
+ }
+ ty::Tuple(tys) => {
+ return match tys.get(field.index()) {
+ Some(&ty) => Ok(ty),
+ None => Err(FieldAccessError::OutOfRange { field_count: tys.len() }),
+ };
+ }
+ _ => {
+ return Ok(span_mirbug_and_err!(
+ self,
+ parent,
+ "can't project out of {:?}",
+ base_ty
+ ));
+ }
+ },
+ };
+
+ if let Some(field) = variant.fields.get(field.index()) {
+ Ok(self.cx.normalize(field.ty(tcx, substs), location))
+ } else {
+ Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() })
+ }
+ }
+}
+
+/// The MIR type checker. Visits the MIR and enforces all the
+/// constraints needed for it to be valid and well-typed. Along the
+/// way, it accrues region constraints -- these can later be used by
+/// NLL region checking.
+struct TypeChecker<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ last_span: Span,
+ body: &'a Body<'tcx>,
+ /// User type annotations are shared between the main MIR and the MIR of
+ /// all of the promoted items.
+ user_type_annotations: &'a CanonicalUserTypeAnnotations<'tcx>,
+ region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+ implicit_region_bound: ty::Region<'tcx>,
+ reported_errors: FxHashSet<(Ty<'tcx>, Span)>,
+ borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
+}
+
+struct BorrowCheckContext<'a, 'tcx> {
+ pub(crate) universal_regions: &'a UniversalRegions<'tcx>,
+ location_table: &'a LocationTable,
+ all_facts: &'a mut Option<AllFacts>,
+ borrow_set: &'a BorrowSet<'tcx>,
+ pub(crate) constraints: &'a mut MirTypeckRegionConstraints<'tcx>,
+ upvars: &'a [Upvar<'tcx>],
+}
+
+pub(crate) struct MirTypeckResults<'tcx> {
+ pub(crate) constraints: MirTypeckRegionConstraints<'tcx>,
+ pub(crate) universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>,
+ pub(crate) opaque_type_values:
+ VecMap<OpaqueTypeKey<'tcx>, (OpaqueHiddenType<'tcx>, OpaqueTyOrigin)>,
+}
+
+/// A collection of region constraints that must be satisfied for the
+/// program to be considered well-typed.
+pub(crate) struct MirTypeckRegionConstraints<'tcx> {
+ /// Maps from a `ty::Placeholder` to the corresponding
+ /// `PlaceholderIndex` bit that we will use for it.
+ ///
+ /// To keep everything in sync, do not insert this set
+ /// directly. Instead, use the `placeholder_region` helper.
+ pub(crate) placeholder_indices: PlaceholderIndices,
+
+ /// Each time we add a placeholder to `placeholder_indices`, we
+ /// also create a corresponding "representative" region vid for
+ /// that wraps it. This vector tracks those. This way, when we
+ /// convert the same `ty::RePlaceholder(p)` twice, we can map to
+ /// the same underlying `RegionVid`.
+ pub(crate) placeholder_index_to_region: IndexVec<PlaceholderIndex, ty::Region<'tcx>>,
+
+ /// In general, the type-checker is not responsible for enforcing
+ /// liveness constraints; this job falls to the region inferencer,
+ /// which performs a liveness analysis. However, in some limited
+ /// cases, the MIR type-checker creates temporary regions that do
+ /// not otherwise appear in the MIR -- in particular, the
+ /// late-bound regions that it instantiates at call-sites -- and
+ /// hence it must report on their liveness constraints.
+ pub(crate) liveness_constraints: LivenessValues<RegionVid>,
+
+ pub(crate) outlives_constraints: OutlivesConstraintSet<'tcx>,
+
+ pub(crate) member_constraints: MemberConstraintSet<'tcx, RegionVid>,
+
+ pub(crate) closure_bounds_mapping:
+ FxHashMap<Location, FxHashMap<(RegionVid, RegionVid), (ConstraintCategory<'tcx>, Span)>>,
+
+ pub(crate) universe_causes: FxHashMap<ty::UniverseIndex, UniverseInfo<'tcx>>,
+
+ pub(crate) type_tests: Vec<TypeTest<'tcx>>,
+}
+
+impl<'tcx> MirTypeckRegionConstraints<'tcx> {
+ fn placeholder_region(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ placeholder: ty::PlaceholderRegion,
+ ) -> ty::Region<'tcx> {
+ let placeholder_index = self.placeholder_indices.insert(placeholder);
+ match self.placeholder_index_to_region.get(placeholder_index) {
+ Some(&v) => v,
+ None => {
+ let origin = NllRegionVariableOrigin::Placeholder(placeholder);
+ let region = infcx.next_nll_region_var_in_universe(origin, placeholder.universe);
+ self.placeholder_index_to_region.push(region);
+ region
+ }
+ }
+ }
+}
+
+/// The `Locations` type summarizes *where* region constraints are
+/// required to hold. Normally, this is at a particular point which
+/// created the obligation, but for constraints that the user gave, we
+/// want the constraint to hold at all points.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub enum Locations {
+ /// Indicates that a type constraint should always be true. This
+ /// is particularly important in the new borrowck analysis for
+ /// things like the type of the return slot. Consider this
+ /// example:
+ ///
+ /// ```compile_fail,E0515
+ /// fn foo<'a>(x: &'a u32) -> &'a u32 {
+ /// let y = 22;
+ /// return &y; // error
+ /// }
+ /// ```
+ ///
+ /// Here, we wind up with the signature from the return type being
+ /// something like `&'1 u32` where `'1` is a universal region. But
+ /// the type of the return slot `_0` is something like `&'2 u32`
+ /// where `'2` is an existential region variable. The type checker
+ /// requires that `&'2 u32 = &'1 u32` -- but at what point? In the
+ /// older NLL analysis, we required this only at the entry point
+ /// to the function. By the nature of the constraints, this wound
+ /// up propagating to all points reachable from start (because
+ /// `'1` -- as a universal region -- is live everywhere). In the
+ /// newer analysis, though, this doesn't work: `_0` is considered
+ /// dead at the start (it has no usable value) and hence this type
+ /// equality is basically a no-op. Then, later on, when we do `_0
+ /// = &'3 y`, that region `'3` never winds up related to the
+ /// universal region `'1` and hence no error occurs. Therefore, we
+ /// use Locations::All instead, which ensures that the `'1` and
+ /// `'2` are equal everything. We also use this for other
+ /// user-given type annotations; e.g., if the user wrote `let mut
+ /// x: &'static u32 = ...`, we would ensure that all values
+ /// assigned to `x` are of `'static` lifetime.
+ ///
+ /// The span points to the place the constraint arose. For example,
+ /// it points to the type in a user-given type annotation. If
+ /// there's no sensible span then it's DUMMY_SP.
+ All(Span),
+
+ /// An outlives constraint that only has to hold at a single location,
+ /// usually it represents a point where references flow from one spot to
+ /// another (e.g., `x = y`)
+ Single(Location),
+}
+
+impl Locations {
+ pub fn from_location(&self) -> Option<Location> {
+ match self {
+ Locations::All(_) => None,
+ Locations::Single(from_location) => Some(*from_location),
+ }
+ }
+
+ /// Gets a span representing the location.
+ pub fn span(&self, body: &Body<'_>) -> Span {
+ match self {
+ Locations::All(span) => *span,
+ Locations::Single(l) => body.source_info(*l).span,
+ }
+ }
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ fn new(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ body: &'a Body<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ region_bound_pairs: &'a RegionBoundPairs<'tcx>,
+ implicit_region_bound: ty::Region<'tcx>,
+ borrowck_context: &'a mut BorrowCheckContext<'a, 'tcx>,
+ ) -> Self {
+ let mut checker = Self {
+ infcx,
+ last_span: DUMMY_SP,
+ body,
+ user_type_annotations: &body.user_type_annotations,
+ param_env,
+ region_bound_pairs,
+ implicit_region_bound,
+ borrowck_context,
+ reported_errors: Default::default(),
+ };
+ checker.check_user_type_annotations();
+ checker
+ }
+
+ fn body(&self) -> &Body<'tcx> {
+ self.body
+ }
+
+ fn unsized_feature_enabled(&self) -> bool {
+ let features = self.tcx().features();
+ features.unsized_locals || features.unsized_fn_params
+ }
+
+ /// Equate the inferred type and the annotated type for user type annotations
+ #[instrument(skip(self), level = "debug")]
+ fn check_user_type_annotations(&mut self) {
+ debug!(?self.user_type_annotations);
+ for user_annotation in self.user_type_annotations {
+ let CanonicalUserTypeAnnotation { span, ref user_ty, inferred_ty } = *user_annotation;
+ let inferred_ty = self.normalize(inferred_ty, Locations::All(span));
+ let annotation = self.instantiate_canonical_with_fresh_inference_vars(span, user_ty);
+ match annotation {
+ UserType::Ty(mut ty) => {
+ ty = self.normalize(ty, Locations::All(span));
+
+ if let Err(terr) = self.eq_types(
+ ty,
+ inferred_ty,
+ Locations::All(span),
+ ConstraintCategory::BoringNoLocation,
+ ) {
+ span_mirbug!(
+ self,
+ user_annotation,
+ "bad user type ({:?} = {:?}): {:?}",
+ ty,
+ inferred_ty,
+ terr
+ );
+ }
+
+ self.prove_predicate(
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(inferred_ty.into()))
+ .to_predicate(self.tcx()),
+ Locations::All(span),
+ ConstraintCategory::TypeAnnotation,
+ );
+ }
+ UserType::TypeOf(def_id, user_substs) => {
+ if let Err(terr) = self.fully_perform_op(
+ Locations::All(span),
+ ConstraintCategory::BoringNoLocation,
+ self.param_env.and(type_op::ascribe_user_type::AscribeUserType::new(
+ inferred_ty,
+ def_id,
+ user_substs,
+ )),
+ ) {
+ span_mirbug!(
+ self,
+ user_annotation,
+ "bad user type AscribeUserType({:?}, {:?} {:?}, type_of={:?}): {:?}",
+ inferred_ty,
+ def_id,
+ user_substs,
+ self.tcx().type_of(def_id),
+ terr,
+ );
+ }
+ }
+ }
+ }
+ }
+
+ #[instrument(skip(self, data), level = "debug")]
+ fn push_region_constraints(
+ &mut self,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ data: &QueryRegionConstraints<'tcx>,
+ ) {
+ debug!("constraints generated: {:#?}", data);
+
+ constraint_conversion::ConstraintConversion::new(
+ self.infcx,
+ self.borrowck_context.universal_regions,
+ self.region_bound_pairs,
+ self.implicit_region_bound,
+ self.param_env,
+ locations,
+ locations.span(self.body),
+ category,
+ &mut self.borrowck_context.constraints,
+ )
+ .convert_all(data);
+ }
+
+ /// Try to relate `sub <: sup`
+ fn sub_types(
+ &mut self,
+ sub: Ty<'tcx>,
+ sup: Ty<'tcx>,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ ) -> Fallible<()> {
+ // Use this order of parameters because the sup type is usually the
+ // "expected" type in diagnostics.
+ self.relate_types(sup, ty::Variance::Contravariant, sub, locations, category)
+ }
+
+ #[instrument(skip(self, category), level = "debug")]
+ fn eq_types(
+ &mut self,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ ) -> Fallible<()> {
+ self.relate_types(expected, ty::Variance::Invariant, found, locations, category)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn relate_type_and_user_type(
+ &mut self,
+ a: Ty<'tcx>,
+ v: ty::Variance,
+ user_ty: &UserTypeProjection,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ ) -> Fallible<()> {
+ let annotated_type = self.user_type_annotations[user_ty.base].inferred_ty;
+ let mut curr_projected_ty = PlaceTy::from_ty(annotated_type);
+
+ let tcx = self.infcx.tcx;
+
+ for proj in &user_ty.projs {
+ let projected_ty = curr_projected_ty.projection_ty_core(
+ tcx,
+ self.param_env,
+ proj,
+ |this, field, ()| {
+ let ty = this.field_ty(tcx, field);
+ self.normalize(ty, locations)
+ },
+ );
+ curr_projected_ty = projected_ty;
+ }
+ debug!(
+ "user_ty base: {:?} freshened: {:?} projs: {:?} yields: {:?}",
+ user_ty.base, annotated_type, user_ty.projs, curr_projected_ty
+ );
+
+ let ty = curr_projected_ty.ty;
+ self.relate_types(ty, v.xform(ty::Variance::Contravariant), a, locations, category)?;
+
+ Ok(())
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ #[instrument(skip(self, body, location), level = "debug")]
+ fn check_stmt(&mut self, body: &Body<'tcx>, stmt: &Statement<'tcx>, location: Location) {
+ let tcx = self.tcx();
+ debug!("stmt kind: {:?}", stmt.kind);
+ match stmt.kind {
+ StatementKind::Assign(box (ref place, ref rv)) => {
+ // Assignments to temporaries are not "interesting";
+ // they are not caused by the user, but rather artifacts
+ // of lowering. Assignments to other sorts of places *are* interesting
+ // though.
+ let category = match place.as_local() {
+ Some(RETURN_PLACE) => {
+ let defining_ty = &self.borrowck_context.universal_regions.defining_ty;
+ if defining_ty.is_const() {
+ if tcx.is_static(defining_ty.def_id()) {
+ ConstraintCategory::UseAsStatic
+ } else {
+ ConstraintCategory::UseAsConst
+ }
+ } else {
+ ConstraintCategory::Return(ReturnConstraint::Normal)
+ }
+ }
+ Some(l)
+ if matches!(
+ body.local_decls[l].local_info,
+ Some(box LocalInfo::AggregateTemp)
+ ) =>
+ {
+ ConstraintCategory::Usage
+ }
+ Some(l) if !body.local_decls[l].is_user_variable() => {
+ ConstraintCategory::Boring
+ }
+ _ => ConstraintCategory::Assignment,
+ };
+ debug!(
+ "assignment category: {:?} {:?}",
+ category,
+ place.as_local().map(|l| &body.local_decls[l])
+ );
+
+ let place_ty = place.ty(body, tcx).ty;
+ debug!(?place_ty);
+ let place_ty = self.normalize(place_ty, location);
+ debug!("place_ty normalized: {:?}", place_ty);
+ let rv_ty = rv.ty(body, tcx);
+ debug!(?rv_ty);
+ let rv_ty = self.normalize(rv_ty, location);
+ debug!("normalized rv_ty: {:?}", rv_ty);
+ if let Err(terr) =
+ self.sub_types(rv_ty, place_ty, location.to_locations(), category)
+ {
+ span_mirbug!(
+ self,
+ stmt,
+ "bad assignment ({:?} = {:?}): {:?}",
+ place_ty,
+ rv_ty,
+ terr
+ );
+ }
+
+ if let Some(annotation_index) = self.rvalue_user_ty(rv) {
+ if let Err(terr) = self.relate_type_and_user_type(
+ rv_ty,
+ ty::Variance::Invariant,
+ &UserTypeProjection { base: annotation_index, projs: vec![] },
+ location.to_locations(),
+ ConstraintCategory::Boring,
+ ) {
+ let annotation = &self.user_type_annotations[annotation_index];
+ span_mirbug!(
+ self,
+ stmt,
+ "bad user type on rvalue ({:?} = {:?}): {:?}",
+ annotation,
+ rv_ty,
+ terr
+ );
+ }
+ }
+
+ self.check_rvalue(body, rv, location);
+ if !self.unsized_feature_enabled() {
+ let trait_ref = ty::TraitRef {
+ def_id: tcx.require_lang_item(LangItem::Sized, Some(self.last_span)),
+ substs: tcx.mk_substs_trait(place_ty, &[]),
+ };
+ self.prove_trait_ref(
+ trait_ref,
+ location.to_locations(),
+ ConstraintCategory::SizedBound,
+ );
+ }
+ }
+ StatementKind::AscribeUserType(box (ref place, ref projection), variance) => {
+ let place_ty = place.ty(body, tcx).ty;
+ if let Err(terr) = self.relate_type_and_user_type(
+ place_ty,
+ variance,
+ projection,
+ Locations::All(stmt.source_info.span),
+ ConstraintCategory::TypeAnnotation,
+ ) {
+ let annotation = &self.user_type_annotations[projection.base];
+ span_mirbug!(
+ self,
+ stmt,
+ "bad type assert ({:?} <: {:?} with projections {:?}): {:?}",
+ place_ty,
+ annotation,
+ projection.projs,
+ terr
+ );
+ }
+ }
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ..
+ }) => span_bug!(
+ stmt.source_info.span,
+ "Unexpected StatementKind::CopyNonOverlapping, should only appear after lowering_intrinsics",
+ ),
+ StatementKind::FakeRead(..)
+ | StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::Coverage(..)
+ | StatementKind::Nop => {}
+ StatementKind::Deinit(..) | StatementKind::SetDiscriminant { .. } => {
+ bug!("Statement not allowed in this MIR phase")
+ }
+ }
+ }
+
+ #[instrument(skip(self, body, term_location), level = "debug")]
+ fn check_terminator(
+ &mut self,
+ body: &Body<'tcx>,
+ term: &Terminator<'tcx>,
+ term_location: Location,
+ ) {
+ let tcx = self.tcx();
+ debug!("terminator kind: {:?}", term.kind);
+ match term.kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. } => {
+ // no checks needed for these
+ }
+
+ TerminatorKind::DropAndReplace { ref place, ref value, target: _, unwind: _ } => {
+ let place_ty = place.ty(body, tcx).ty;
+ let rv_ty = value.ty(body, tcx);
+
+ let locations = term_location.to_locations();
+ if let Err(terr) =
+ self.sub_types(rv_ty, place_ty, locations, ConstraintCategory::Assignment)
+ {
+ span_mirbug!(
+ self,
+ term,
+ "bad DropAndReplace ({:?} = {:?}): {:?}",
+ place_ty,
+ rv_ty,
+ terr
+ );
+ }
+ }
+ TerminatorKind::SwitchInt { ref discr, switch_ty, .. } => {
+ self.check_operand(discr, term_location);
+
+ let discr_ty = discr.ty(body, tcx);
+ if let Err(terr) = self.sub_types(
+ discr_ty,
+ switch_ty,
+ term_location.to_locations(),
+ ConstraintCategory::Assignment,
+ ) {
+ span_mirbug!(
+ self,
+ term,
+ "bad SwitchInt ({:?} on {:?}): {:?}",
+ switch_ty,
+ discr_ty,
+ terr
+ );
+ }
+ if !switch_ty.is_integral() && !switch_ty.is_char() && !switch_ty.is_bool() {
+ span_mirbug!(self, term, "bad SwitchInt discr ty {:?}", switch_ty);
+ }
+ // FIXME: check the values
+ }
+ TerminatorKind::Call {
+ ref func,
+ ref args,
+ ref destination,
+ from_hir_call,
+ target,
+ ..
+ } => {
+ self.check_operand(func, term_location);
+ for arg in args {
+ self.check_operand(arg, term_location);
+ }
+
+ let func_ty = func.ty(body, tcx);
+ debug!("func_ty.kind: {:?}", func_ty.kind());
+
+ let sig = match func_ty.kind() {
+ ty::FnDef(..) | ty::FnPtr(_) => func_ty.fn_sig(tcx),
+ _ => {
+ span_mirbug!(self, term, "call to non-function {:?}", func_ty);
+ return;
+ }
+ };
+ let (sig, map) = tcx.replace_late_bound_regions(sig, |br| {
+ self.infcx.next_region_var(LateBoundRegion(
+ term.source_info.span,
+ br.kind,
+ LateBoundRegionConversionTime::FnCall,
+ ))
+ });
+ debug!(?sig);
+ let sig = self.normalize(sig, term_location);
+ self.check_call_dest(body, term, &sig, *destination, target, term_location);
+
+ self.prove_predicates(
+ sig.inputs_and_output
+ .iter()
+ .map(|ty| ty::Binder::dummy(ty::PredicateKind::WellFormed(ty.into()))),
+ term_location.to_locations(),
+ ConstraintCategory::Boring,
+ );
+
+ // The ordinary liveness rules will ensure that all
+ // regions in the type of the callee are live here. We
+ // then further constrain the late-bound regions that
+ // were instantiated at the call site to be live as
+ // well. The resulting is that all the input (and
+ // output) types in the signature must be live, since
+ // all the inputs that fed into it were live.
+ for &late_bound_region in map.values() {
+ let region_vid =
+ self.borrowck_context.universal_regions.to_region_vid(late_bound_region);
+ self.borrowck_context
+ .constraints
+ .liveness_constraints
+ .add_element(region_vid, term_location);
+ }
+
+ self.check_call_inputs(body, term, &sig, args, term_location, from_hir_call);
+ }
+ TerminatorKind::Assert { ref cond, ref msg, .. } => {
+ self.check_operand(cond, term_location);
+
+ let cond_ty = cond.ty(body, tcx);
+ if cond_ty != tcx.types.bool {
+ span_mirbug!(self, term, "bad Assert ({:?}, not bool", cond_ty);
+ }
+
+ if let AssertKind::BoundsCheck { ref len, ref index } = *msg {
+ if len.ty(body, tcx) != tcx.types.usize {
+ span_mirbug!(self, len, "bounds-check length non-usize {:?}", len)
+ }
+ if index.ty(body, tcx) != tcx.types.usize {
+ span_mirbug!(self, index, "bounds-check index non-usize {:?}", index)
+ }
+ }
+ }
+ TerminatorKind::Yield { ref value, .. } => {
+ self.check_operand(value, term_location);
+
+ let value_ty = value.ty(body, tcx);
+ match body.yield_ty() {
+ None => span_mirbug!(self, term, "yield in non-generator"),
+ Some(ty) => {
+ if let Err(terr) = self.sub_types(
+ value_ty,
+ ty,
+ term_location.to_locations(),
+ ConstraintCategory::Yield,
+ ) {
+ span_mirbug!(
+ self,
+ term,
+ "type of yield value is {:?}, but the yield type is {:?}: {:?}",
+ value_ty,
+ ty,
+ terr
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn check_call_dest(
+ &mut self,
+ body: &Body<'tcx>,
+ term: &Terminator<'tcx>,
+ sig: &ty::FnSig<'tcx>,
+ destination: Place<'tcx>,
+ target: Option<BasicBlock>,
+ term_location: Location,
+ ) {
+ let tcx = self.tcx();
+ match target {
+ Some(_) => {
+ let dest_ty = destination.ty(body, tcx).ty;
+ let dest_ty = self.normalize(dest_ty, term_location);
+ let category = match destination.as_local() {
+ Some(RETURN_PLACE) => {
+ if let BorrowCheckContext {
+ universal_regions:
+ UniversalRegions {
+ defining_ty:
+ DefiningTy::Const(def_id, _)
+ | DefiningTy::InlineConst(def_id, _),
+ ..
+ },
+ ..
+ } = self.borrowck_context
+ {
+ if tcx.is_static(*def_id) {
+ ConstraintCategory::UseAsStatic
+ } else {
+ ConstraintCategory::UseAsConst
+ }
+ } else {
+ ConstraintCategory::Return(ReturnConstraint::Normal)
+ }
+ }
+ Some(l) if !body.local_decls[l].is_user_variable() => {
+ ConstraintCategory::Boring
+ }
+ _ => ConstraintCategory::Assignment,
+ };
+
+ let locations = term_location.to_locations();
+
+ if let Err(terr) = self.sub_types(sig.output(), dest_ty, locations, category) {
+ span_mirbug!(
+ self,
+ term,
+ "call dest mismatch ({:?} <- {:?}): {:?}",
+ dest_ty,
+ sig.output(),
+ terr
+ );
+ }
+
+ // When `unsized_fn_params` and `unsized_locals` are both not enabled,
+ // this check is done at `check_local`.
+ if self.unsized_feature_enabled() {
+ let span = term.source_info.span;
+ self.ensure_place_sized(dest_ty, span);
+ }
+ }
+ None => {
+ if !self
+ .tcx()
+ .conservative_is_privately_uninhabited(self.param_env.and(sig.output()))
+ {
+ span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig);
+ }
+ }
+ }
+ }
+
+ fn check_call_inputs(
+ &mut self,
+ body: &Body<'tcx>,
+ term: &Terminator<'tcx>,
+ sig: &ty::FnSig<'tcx>,
+ args: &[Operand<'tcx>],
+ term_location: Location,
+ from_hir_call: bool,
+ ) {
+ debug!("check_call_inputs({:?}, {:?})", sig, args);
+ if args.len() < sig.inputs().len() || (args.len() > sig.inputs().len() && !sig.c_variadic) {
+ span_mirbug!(self, term, "call to {:?} with wrong # of args", sig);
+ }
+
+ let func_ty = if let TerminatorKind::Call { func, .. } = &term.kind {
+ Some(func.ty(body, self.infcx.tcx))
+ } else {
+ None
+ };
+ debug!(?func_ty);
+
+ for (n, (fn_arg, op_arg)) in iter::zip(sig.inputs(), args).enumerate() {
+ let op_arg_ty = op_arg.ty(body, self.tcx());
+
+ let op_arg_ty = self.normalize(op_arg_ty, term_location);
+ let category = if from_hir_call {
+ ConstraintCategory::CallArgument(func_ty)
+ } else {
+ ConstraintCategory::Boring
+ };
+ if let Err(terr) =
+ self.sub_types(op_arg_ty, *fn_arg, term_location.to_locations(), category)
+ {
+ span_mirbug!(
+ self,
+ term,
+ "bad arg #{:?} ({:?} <- {:?}): {:?}",
+ n,
+ fn_arg,
+ op_arg_ty,
+ terr
+ );
+ }
+ }
+ }
+
+ fn check_iscleanup(&mut self, body: &Body<'tcx>, block_data: &BasicBlockData<'tcx>) {
+ let is_cleanup = block_data.is_cleanup;
+ self.last_span = block_data.terminator().source_info.span;
+ match block_data.terminator().kind {
+ TerminatorKind::Goto { target } => {
+ self.assert_iscleanup(body, block_data, target, is_cleanup)
+ }
+ TerminatorKind::SwitchInt { ref targets, .. } => {
+ for target in targets.all_targets() {
+ self.assert_iscleanup(body, block_data, *target, is_cleanup);
+ }
+ }
+ TerminatorKind::Resume => {
+ if !is_cleanup {
+ span_mirbug!(self, block_data, "resume on non-cleanup block!")
+ }
+ }
+ TerminatorKind::Abort => {
+ if !is_cleanup {
+ span_mirbug!(self, block_data, "abort on non-cleanup block!")
+ }
+ }
+ TerminatorKind::Return => {
+ if is_cleanup {
+ span_mirbug!(self, block_data, "return on cleanup block")
+ }
+ }
+ TerminatorKind::GeneratorDrop { .. } => {
+ if is_cleanup {
+ span_mirbug!(self, block_data, "generator_drop in cleanup block")
+ }
+ }
+ TerminatorKind::Yield { resume, drop, .. } => {
+ if is_cleanup {
+ span_mirbug!(self, block_data, "yield in cleanup block")
+ }
+ self.assert_iscleanup(body, block_data, resume, is_cleanup);
+ if let Some(drop) = drop {
+ self.assert_iscleanup(body, block_data, drop, is_cleanup);
+ }
+ }
+ TerminatorKind::Unreachable => {}
+ TerminatorKind::Drop { target, unwind, .. }
+ | TerminatorKind::DropAndReplace { target, unwind, .. }
+ | TerminatorKind::Assert { target, cleanup: unwind, .. } => {
+ self.assert_iscleanup(body, block_data, target, is_cleanup);
+ if let Some(unwind) = unwind {
+ if is_cleanup {
+ span_mirbug!(self, block_data, "unwind on cleanup block")
+ }
+ self.assert_iscleanup(body, block_data, unwind, true);
+ }
+ }
+ TerminatorKind::Call { ref target, cleanup, .. } => {
+ if let &Some(target) = target {
+ self.assert_iscleanup(body, block_data, target, is_cleanup);
+ }
+ if let Some(cleanup) = cleanup {
+ if is_cleanup {
+ span_mirbug!(self, block_data, "cleanup on cleanup block")
+ }
+ self.assert_iscleanup(body, block_data, cleanup, true);
+ }
+ }
+ TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+ self.assert_iscleanup(body, block_data, real_target, is_cleanup);
+ self.assert_iscleanup(body, block_data, imaginary_target, is_cleanup);
+ }
+ TerminatorKind::FalseUnwind { real_target, unwind } => {
+ self.assert_iscleanup(body, block_data, real_target, is_cleanup);
+ if let Some(unwind) = unwind {
+ if is_cleanup {
+ span_mirbug!(self, block_data, "cleanup in cleanup block via false unwind");
+ }
+ self.assert_iscleanup(body, block_data, unwind, true);
+ }
+ }
+ TerminatorKind::InlineAsm { destination, cleanup, .. } => {
+ if let Some(target) = destination {
+ self.assert_iscleanup(body, block_data, target, is_cleanup);
+ }
+ if let Some(cleanup) = cleanup {
+ if is_cleanup {
+ span_mirbug!(self, block_data, "cleanup on cleanup block")
+ }
+ self.assert_iscleanup(body, block_data, cleanup, true);
+ }
+ }
+ }
+ }
+
+ fn assert_iscleanup(
+ &mut self,
+ body: &Body<'tcx>,
+ ctxt: &dyn fmt::Debug,
+ bb: BasicBlock,
+ iscleanuppad: bool,
+ ) {
+ if body[bb].is_cleanup != iscleanuppad {
+ span_mirbug!(self, ctxt, "cleanuppad mismatch: {:?} should be {:?}", bb, iscleanuppad);
+ }
+ }
+
+ fn check_local(&mut self, body: &Body<'tcx>, local: Local, local_decl: &LocalDecl<'tcx>) {
+ match body.local_kind(local) {
+ LocalKind::ReturnPointer | LocalKind::Arg => {
+ // return values of normal functions are required to be
+ // sized by typeck, but return values of ADT constructors are
+ // not because we don't include a `Self: Sized` bounds on them.
+ //
+ // Unbound parts of arguments were never required to be Sized
+ // - maybe we should make that a warning.
+ return;
+ }
+ LocalKind::Var | LocalKind::Temp => {}
+ }
+
+ // When `unsized_fn_params` or `unsized_locals` is enabled, only function calls
+ // and nullary ops are checked in `check_call_dest`.
+ if !self.unsized_feature_enabled() {
+ let span = local_decl.source_info.span;
+ let ty = local_decl.ty;
+ self.ensure_place_sized(ty, span);
+ }
+ }
+
+ fn ensure_place_sized(&mut self, ty: Ty<'tcx>, span: Span) {
+ let tcx = self.tcx();
+
+ // Erase the regions from `ty` to get a global type. The
+ // `Sized` bound in no way depends on precise regions, so this
+ // shouldn't affect `is_sized`.
+ let erased_ty = tcx.erase_regions(ty);
+ if !erased_ty.is_sized(tcx.at(span), self.param_env) {
+ // in current MIR construction, all non-control-flow rvalue
+ // expressions evaluate through `as_temp` or `into` a return
+ // slot or local, so to find all unsized rvalues it is enough
+ // to check all temps, return slots and locals.
+ if self.reported_errors.replace((ty, span)).is_none() {
+ // While this is located in `nll::typeck` this error is not
+ // an NLL error, it's a required check to prevent creation
+ // of unsized rvalues in a call expression.
+ self.tcx().sess.emit_err(MoveUnsized { ty, span });
+ }
+ }
+ }
+
+ fn aggregate_field_ty(
+ &mut self,
+ ak: &AggregateKind<'tcx>,
+ field_index: usize,
+ location: Location,
+ ) -> Result<Ty<'tcx>, FieldAccessError> {
+ let tcx = self.tcx();
+
+ match *ak {
+ AggregateKind::Adt(adt_did, variant_index, substs, _, active_field_index) => {
+ let def = tcx.adt_def(adt_did);
+ let variant = &def.variant(variant_index);
+ let adj_field_index = active_field_index.unwrap_or(field_index);
+ if let Some(field) = variant.fields.get(adj_field_index) {
+ Ok(self.normalize(field.ty(tcx, substs), location))
+ } else {
+ Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() })
+ }
+ }
+ AggregateKind::Closure(_, substs) => {
+ match substs.as_closure().upvar_tys().nth(field_index) {
+ Some(ty) => Ok(ty),
+ None => Err(FieldAccessError::OutOfRange {
+ field_count: substs.as_closure().upvar_tys().count(),
+ }),
+ }
+ }
+ AggregateKind::Generator(_, substs, _) => {
+ // It doesn't make sense to look at a field beyond the prefix;
+ // these require a variant index, and are not initialized in
+ // aggregate rvalues.
+ match substs.as_generator().prefix_tys().nth(field_index) {
+ Some(ty) => Ok(ty),
+ None => Err(FieldAccessError::OutOfRange {
+ field_count: substs.as_generator().prefix_tys().count(),
+ }),
+ }
+ }
+ AggregateKind::Array(ty) => Ok(ty),
+ AggregateKind::Tuple => {
+ unreachable!("This should have been covered in check_rvalues");
+ }
+ }
+ }
+
+ fn check_operand(&mut self, op: &Operand<'tcx>, location: Location) {
+ if let Operand::Constant(constant) = op {
+ let maybe_uneval = match constant.literal {
+ ConstantKind::Ty(ct) => match ct.kind() {
+ ty::ConstKind::Unevaluated(uv) => Some(uv),
+ _ => None,
+ },
+ _ => None,
+ };
+ if let Some(uv) = maybe_uneval {
+ if uv.promoted.is_none() {
+ let tcx = self.tcx();
+ let def_id = uv.def.def_id_for_type_of();
+ if tcx.def_kind(def_id) == DefKind::InlineConst {
+ let def_id = def_id.expect_local();
+ let predicates =
+ self.prove_closure_bounds(tcx, def_id, uv.substs, location);
+ self.normalize_and_prove_instantiated_predicates(
+ def_id.to_def_id(),
+ predicates,
+ location.to_locations(),
+ );
+ }
+ }
+ }
+ }
+ }
+
+ #[instrument(skip(self, body), level = "debug")]
+ fn check_rvalue(&mut self, body: &Body<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) {
+ let tcx = self.tcx();
+
+ match rvalue {
+ Rvalue::Aggregate(ak, ops) => {
+ for op in ops {
+ self.check_operand(op, location);
+ }
+ self.check_aggregate_rvalue(&body, rvalue, ak, ops, location)
+ }
+
+ Rvalue::Repeat(operand, len) => {
+ self.check_operand(operand, location);
+
+ // If the length cannot be evaluated we must assume that the length can be larger
+ // than 1.
+ // If the length is larger than 1, the repeat expression will need to copy the
+ // element, so we require the `Copy` trait.
+ if len.try_eval_usize(tcx, self.param_env).map_or(true, |len| len > 1) {
+ match operand {
+ Operand::Copy(..) | Operand::Constant(..) => {
+ // These are always okay: direct use of a const, or a value that can evidently be copied.
+ }
+ Operand::Move(place) => {
+ // Make sure that repeated elements implement `Copy`.
+ let span = body.source_info(location).span;
+ let ty = place.ty(body, tcx).ty;
+ let trait_ref = ty::TraitRef::new(
+ tcx.require_lang_item(LangItem::Copy, Some(span)),
+ tcx.mk_substs_trait(ty, &[]),
+ );
+
+ self.prove_trait_ref(
+ trait_ref,
+ Locations::Single(location),
+ ConstraintCategory::CopyBound,
+ );
+ }
+ }
+ }
+ }
+
+ &Rvalue::NullaryOp(_, ty) => {
+ let trait_ref = ty::TraitRef {
+ def_id: tcx.require_lang_item(LangItem::Sized, Some(self.last_span)),
+ substs: tcx.mk_substs_trait(ty, &[]),
+ };
+
+ self.prove_trait_ref(
+ trait_ref,
+ location.to_locations(),
+ ConstraintCategory::SizedBound,
+ );
+ }
+
+ Rvalue::ShallowInitBox(operand, ty) => {
+ self.check_operand(operand, location);
+
+ let trait_ref = ty::TraitRef {
+ def_id: tcx.require_lang_item(LangItem::Sized, Some(self.last_span)),
+ substs: tcx.mk_substs_trait(*ty, &[]),
+ };
+
+ self.prove_trait_ref(
+ trait_ref,
+ location.to_locations(),
+ ConstraintCategory::SizedBound,
+ );
+ }
+
+ Rvalue::Cast(cast_kind, op, ty) => {
+ self.check_operand(op, location);
+
+ match cast_kind {
+ CastKind::Pointer(PointerCast::ReifyFnPointer) => {
+ let fn_sig = op.ty(body, tcx).fn_sig(tcx);
+
+ // The type that we see in the fcx is like
+ // `foo::<'a, 'b>`, where `foo` is the path to a
+ // function definition. When we extract the
+ // signature, it comes from the `fn_sig` query,
+ // and hence may contain unnormalized results.
+ let fn_sig = self.normalize(fn_sig, location);
+
+ let ty_fn_ptr_from = tcx.mk_fn_ptr(fn_sig);
+
+ if let Err(terr) = self.eq_types(
+ *ty,
+ ty_fn_ptr_from,
+ location.to_locations(),
+ ConstraintCategory::Cast,
+ ) {
+ span_mirbug!(
+ self,
+ rvalue,
+ "equating {:?} with {:?} yields {:?}",
+ ty_fn_ptr_from,
+ ty,
+ terr
+ );
+ }
+ }
+
+ CastKind::Pointer(PointerCast::ClosureFnPointer(unsafety)) => {
+ let sig = match op.ty(body, tcx).kind() {
+ ty::Closure(_, substs) => substs.as_closure().sig(),
+ _ => bug!(),
+ };
+ let ty_fn_ptr_from = tcx.mk_fn_ptr(tcx.signature_unclosure(sig, *unsafety));
+
+ if let Err(terr) = self.eq_types(
+ *ty,
+ ty_fn_ptr_from,
+ location.to_locations(),
+ ConstraintCategory::Cast,
+ ) {
+ span_mirbug!(
+ self,
+ rvalue,
+ "equating {:?} with {:?} yields {:?}",
+ ty_fn_ptr_from,
+ ty,
+ terr
+ );
+ }
+ }
+
+ CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
+ let fn_sig = op.ty(body, tcx).fn_sig(tcx);
+
+ // The type that we see in the fcx is like
+ // `foo::<'a, 'b>`, where `foo` is the path to a
+ // function definition. When we extract the
+ // signature, it comes from the `fn_sig` query,
+ // and hence may contain unnormalized results.
+ let fn_sig = self.normalize(fn_sig, location);
+
+ let ty_fn_ptr_from = tcx.safe_to_unsafe_fn_ty(fn_sig);
+
+ if let Err(terr) = self.eq_types(
+ *ty,
+ ty_fn_ptr_from,
+ location.to_locations(),
+ ConstraintCategory::Cast,
+ ) {
+ span_mirbug!(
+ self,
+ rvalue,
+ "equating {:?} with {:?} yields {:?}",
+ ty_fn_ptr_from,
+ ty,
+ terr
+ );
+ }
+ }
+
+ CastKind::Pointer(PointerCast::Unsize) => {
+ let &ty = ty;
+ let trait_ref = ty::TraitRef {
+ def_id: tcx
+ .require_lang_item(LangItem::CoerceUnsized, Some(self.last_span)),
+ substs: tcx.mk_substs_trait(op.ty(body, tcx), &[ty.into()]),
+ };
+
+ self.prove_trait_ref(
+ trait_ref,
+ location.to_locations(),
+ ConstraintCategory::Cast,
+ );
+ }
+
+ CastKind::Pointer(PointerCast::MutToConstPointer) => {
+ let ty::RawPtr(ty::TypeAndMut {
+ ty: ty_from,
+ mutbl: hir::Mutability::Mut,
+ }) = op.ty(body, tcx).kind() else {
+ span_mirbug!(
+ self,
+ rvalue,
+ "unexpected base type for cast {:?}",
+ ty,
+ );
+ return;
+ };
+ let ty::RawPtr(ty::TypeAndMut {
+ ty: ty_to,
+ mutbl: hir::Mutability::Not,
+ }) = ty.kind() else {
+ span_mirbug!(
+ self,
+ rvalue,
+ "unexpected target type for cast {:?}",
+ ty,
+ );
+ return;
+ };
+ if let Err(terr) = self.sub_types(
+ *ty_from,
+ *ty_to,
+ location.to_locations(),
+ ConstraintCategory::Cast,
+ ) {
+ span_mirbug!(
+ self,
+ rvalue,
+ "relating {:?} with {:?} yields {:?}",
+ ty_from,
+ ty_to,
+ terr
+ );
+ }
+ }
+
+ CastKind::Pointer(PointerCast::ArrayToPointer) => {
+ let ty_from = op.ty(body, tcx);
+
+ let opt_ty_elem_mut = match ty_from.kind() {
+ ty::RawPtr(ty::TypeAndMut { mutbl: array_mut, ty: array_ty }) => {
+ match array_ty.kind() {
+ ty::Array(ty_elem, _) => Some((ty_elem, *array_mut)),
+ _ => None,
+ }
+ }
+ _ => None,
+ };
+
+ let Some((ty_elem, ty_mut)) = opt_ty_elem_mut else {
+ span_mirbug!(
+ self,
+ rvalue,
+ "ArrayToPointer cast from unexpected type {:?}",
+ ty_from,
+ );
+ return;
+ };
+
+ let (ty_to, ty_to_mut) = match ty.kind() {
+ ty::RawPtr(ty::TypeAndMut { mutbl: ty_to_mut, ty: ty_to }) => {
+ (ty_to, *ty_to_mut)
+ }
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "ArrayToPointer cast to unexpected type {:?}",
+ ty,
+ );
+ return;
+ }
+ };
+
+ if ty_to_mut == Mutability::Mut && ty_mut == Mutability::Not {
+ span_mirbug!(
+ self,
+ rvalue,
+ "ArrayToPointer cast from const {:?} to mut {:?}",
+ ty,
+ ty_to
+ );
+ return;
+ }
+
+ if let Err(terr) = self.sub_types(
+ *ty_elem,
+ *ty_to,
+ location.to_locations(),
+ ConstraintCategory::Cast,
+ ) {
+ span_mirbug!(
+ self,
+ rvalue,
+ "relating {:?} with {:?} yields {:?}",
+ ty_elem,
+ ty_to,
+ terr
+ )
+ }
+ }
+
+ CastKind::PointerExposeAddress => {
+ let ty_from = op.ty(body, tcx);
+ let cast_ty_from = CastTy::from_ty(ty_from);
+ let cast_ty_to = CastTy::from_ty(*ty);
+ match (cast_ty_from, cast_ty_to) {
+ (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Int(_))) => (),
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "Invalid PointerExposeAddress cast {:?} -> {:?}",
+ ty_from,
+ ty
+ )
+ }
+ }
+ }
+
+ CastKind::PointerFromExposedAddress => {
+ let ty_from = op.ty(body, tcx);
+ let cast_ty_from = CastTy::from_ty(ty_from);
+ let cast_ty_to = CastTy::from_ty(*ty);
+ match (cast_ty_from, cast_ty_to) {
+ (Some(CastTy::Int(_)), Some(CastTy::Ptr(_))) => (),
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "Invalid PointerFromExposedAddress cast {:?} -> {:?}",
+ ty_from,
+ ty
+ )
+ }
+ }
+ }
+
+ CastKind::Misc => {
+ let ty_from = op.ty(body, tcx);
+ let cast_ty_from = CastTy::from_ty(ty_from);
+ let cast_ty_to = CastTy::from_ty(*ty);
+ // Misc casts are either between floats and ints, or one ptr type to another.
+ match (cast_ty_from, cast_ty_to) {
+ (
+ Some(CastTy::Int(_) | CastTy::Float),
+ Some(CastTy::Int(_) | CastTy::Float),
+ )
+ | (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Ptr(_))) => (),
+ _ => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "Invalid Misc cast {:?} -> {:?}",
+ ty_from,
+ ty,
+ )
+ }
+ }
+ }
+ }
+ }
+
+ Rvalue::Ref(region, _borrow_kind, borrowed_place) => {
+ self.add_reborrow_constraint(&body, location, *region, borrowed_place);
+ }
+
+ Rvalue::BinaryOp(
+ BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge,
+ box (left, right),
+ ) => {
+ self.check_operand(left, location);
+ self.check_operand(right, location);
+
+ let ty_left = left.ty(body, tcx);
+ match ty_left.kind() {
+ // Types with regions are comparable if they have a common super-type.
+ ty::RawPtr(_) | ty::FnPtr(_) => {
+ let ty_right = right.ty(body, tcx);
+ let common_ty = self.infcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: body.source_info(location).span,
+ });
+ self.sub_types(
+ ty_left,
+ common_ty,
+ location.to_locations(),
+ ConstraintCategory::Boring,
+ )
+ .unwrap_or_else(|err| {
+ bug!("Could not equate type variable with {:?}: {:?}", ty_left, err)
+ });
+ if let Err(terr) = self.sub_types(
+ ty_right,
+ common_ty,
+ location.to_locations(),
+ ConstraintCategory::Boring,
+ ) {
+ span_mirbug!(
+ self,
+ rvalue,
+ "unexpected comparison types {:?} and {:?} yields {:?}",
+ ty_left,
+ ty_right,
+ terr
+ )
+ }
+ }
+ // For types with no regions we can just check that the
+ // both operands have the same type.
+ ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char | ty::Float(_)
+ if ty_left == right.ty(body, tcx) => {}
+ // Other types are compared by trait methods, not by
+ // `Rvalue::BinaryOp`.
+ _ => span_mirbug!(
+ self,
+ rvalue,
+ "unexpected comparison types {:?} and {:?}",
+ ty_left,
+ right.ty(body, tcx)
+ ),
+ }
+ }
+
+ Rvalue::Use(operand) | Rvalue::UnaryOp(_, operand) => {
+ self.check_operand(operand, location);
+ }
+ Rvalue::CopyForDeref(place) => {
+ let op = &Operand::Copy(*place);
+ self.check_operand(op, location);
+ }
+
+ Rvalue::BinaryOp(_, box (left, right))
+ | Rvalue::CheckedBinaryOp(_, box (left, right)) => {
+ self.check_operand(left, location);
+ self.check_operand(right, location);
+ }
+
+ Rvalue::AddressOf(..)
+ | Rvalue::ThreadLocalRef(..)
+ | Rvalue::Len(..)
+ | Rvalue::Discriminant(..) => {}
+ }
+ }
+
+ /// If this rvalue supports a user-given type annotation, then
+ /// extract and return it. This represents the final type of the
+ /// rvalue and will be unified with the inferred type.
+ fn rvalue_user_ty(&self, rvalue: &Rvalue<'tcx>) -> Option<UserTypeAnnotationIndex> {
+ match rvalue {
+ Rvalue::Use(_)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::Repeat(..)
+ | Rvalue::Ref(..)
+ | Rvalue::AddressOf(..)
+ | Rvalue::Len(..)
+ | Rvalue::Cast(..)
+ | Rvalue::ShallowInitBox(..)
+ | Rvalue::BinaryOp(..)
+ | Rvalue::CheckedBinaryOp(..)
+ | Rvalue::NullaryOp(..)
+ | Rvalue::CopyForDeref(..)
+ | Rvalue::UnaryOp(..)
+ | Rvalue::Discriminant(..) => None,
+
+ Rvalue::Aggregate(aggregate, _) => match **aggregate {
+ AggregateKind::Adt(_, _, _, user_ty, _) => user_ty,
+ AggregateKind::Array(_) => None,
+ AggregateKind::Tuple => None,
+ AggregateKind::Closure(_, _) => None,
+ AggregateKind::Generator(_, _, _) => None,
+ },
+ }
+ }
+
+ fn check_aggregate_rvalue(
+ &mut self,
+ body: &Body<'tcx>,
+ rvalue: &Rvalue<'tcx>,
+ aggregate_kind: &AggregateKind<'tcx>,
+ operands: &[Operand<'tcx>],
+ location: Location,
+ ) {
+ let tcx = self.tcx();
+
+ self.prove_aggregate_predicates(aggregate_kind, location);
+
+ if *aggregate_kind == AggregateKind::Tuple {
+ // tuple rvalue field type is always the type of the op. Nothing to check here.
+ return;
+ }
+
+ for (i, operand) in operands.iter().enumerate() {
+ let field_ty = match self.aggregate_field_ty(aggregate_kind, i, location) {
+ Ok(field_ty) => field_ty,
+ Err(FieldAccessError::OutOfRange { field_count }) => {
+ span_mirbug!(
+ self,
+ rvalue,
+ "accessed field #{} but variant only has {}",
+ i,
+ field_count
+ );
+ continue;
+ }
+ };
+ let operand_ty = operand.ty(body, tcx);
+ let operand_ty = self.normalize(operand_ty, location);
+
+ if let Err(terr) = self.sub_types(
+ operand_ty,
+ field_ty,
+ location.to_locations(),
+ ConstraintCategory::Boring,
+ ) {
+ span_mirbug!(
+ self,
+ rvalue,
+ "{:?} is not a subtype of {:?}: {:?}",
+ operand_ty,
+ field_ty,
+ terr
+ );
+ }
+ }
+ }
+
+ /// Adds the constraints that arise from a borrow expression `&'a P` at the location `L`.
+ ///
+ /// # Parameters
+ ///
+ /// - `location`: the location `L` where the borrow expression occurs
+ /// - `borrow_region`: the region `'a` associated with the borrow
+ /// - `borrowed_place`: the place `P` being borrowed
+ fn add_reborrow_constraint(
+ &mut self,
+ body: &Body<'tcx>,
+ location: Location,
+ borrow_region: ty::Region<'tcx>,
+ borrowed_place: &Place<'tcx>,
+ ) {
+ // These constraints are only meaningful during borrowck:
+ let BorrowCheckContext { borrow_set, location_table, all_facts, constraints, .. } =
+ self.borrowck_context;
+
+ // In Polonius mode, we also push a `loan_issued_at` fact
+ // linking the loan to the region (in some cases, though,
+ // there is no loan associated with this borrow expression --
+ // that occurs when we are borrowing an unsafe place, for
+ // example).
+ if let Some(all_facts) = all_facts {
+ let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation");
+ if let Some(borrow_index) = borrow_set.get_index_of(&location) {
+ let region_vid = borrow_region.to_region_vid();
+ all_facts.loan_issued_at.push((
+ region_vid,
+ borrow_index,
+ location_table.mid_index(location),
+ ));
+ }
+ }
+
+ // If we are reborrowing the referent of another reference, we
+ // need to add outlives relationships. In a case like `&mut
+ // *p`, where the `p` has type `&'b mut Foo`, for example, we
+ // need to ensure that `'b: 'a`.
+
+ debug!(
+ "add_reborrow_constraint({:?}, {:?}, {:?})",
+ location, borrow_region, borrowed_place
+ );
+
+ let mut cursor = borrowed_place.projection.as_ref();
+ let tcx = self.infcx.tcx;
+ let field = path_utils::is_upvar_field_projection(
+ tcx,
+ &self.borrowck_context.upvars,
+ borrowed_place.as_ref(),
+ body,
+ );
+ let category = if let Some(field) = field {
+ ConstraintCategory::ClosureUpvar(field)
+ } else {
+ ConstraintCategory::Boring
+ };
+
+ while let [proj_base @ .., elem] = cursor {
+ cursor = proj_base;
+
+ debug!("add_reborrow_constraint - iteration {:?}", elem);
+
+ match elem {
+ ProjectionElem::Deref => {
+ let base_ty = Place::ty_from(borrowed_place.local, proj_base, body, tcx).ty;
+
+ debug!("add_reborrow_constraint - base_ty = {:?}", base_ty);
+ match base_ty.kind() {
+ ty::Ref(ref_region, _, mutbl) => {
+ constraints.outlives_constraints.push(OutlivesConstraint {
+ sup: ref_region.to_region_vid(),
+ sub: borrow_region.to_region_vid(),
+ locations: location.to_locations(),
+ span: location.to_locations().span(body),
+ category,
+ variance_info: ty::VarianceDiagInfo::default(),
+ });
+
+ match mutbl {
+ hir::Mutability::Not => {
+ // Immutable reference. We don't need the base
+ // to be valid for the entire lifetime of
+ // the borrow.
+ break;
+ }
+ hir::Mutability::Mut => {
+ // Mutable reference. We *do* need the base
+ // to be valid, because after the base becomes
+ // invalid, someone else can use our mutable deref.
+
+ // This is in order to make the following function
+ // illegal:
+ // ```
+ // fn unsafe_deref<'a, 'b>(x: &'a &'b mut T) -> &'b mut T {
+ // &mut *x
+ // }
+ // ```
+ //
+ // As otherwise you could clone `&mut T` using the
+ // following function:
+ // ```
+ // fn bad(x: &mut T) -> (&mut T, &mut T) {
+ // let my_clone = unsafe_deref(&'a x);
+ // ENDREGION 'a;
+ // (my_clone, x)
+ // }
+ // ```
+ }
+ }
+ }
+ ty::RawPtr(..) => {
+ // deref of raw pointer, guaranteed to be valid
+ break;
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ // deref of `Box`, need the base to be valid - propagate
+ }
+ _ => bug!("unexpected deref ty {:?} in {:?}", base_ty, borrowed_place),
+ }
+ }
+ ProjectionElem::Field(..)
+ | ProjectionElem::Downcast(..)
+ | ProjectionElem::Index(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => {
+ // other field access
+ }
+ }
+ }
+ }
+
+ fn prove_aggregate_predicates(
+ &mut self,
+ aggregate_kind: &AggregateKind<'tcx>,
+ location: Location,
+ ) {
+ let tcx = self.tcx();
+
+ debug!(
+ "prove_aggregate_predicates(aggregate_kind={:?}, location={:?})",
+ aggregate_kind, location
+ );
+
+ let (def_id, instantiated_predicates) = match *aggregate_kind {
+ AggregateKind::Adt(adt_did, _, substs, _, _) => {
+ (adt_did, tcx.predicates_of(adt_did).instantiate(tcx, substs))
+ }
+
+ // For closures, we have some **extra requirements** we
+ //
+ // have to check. In particular, in their upvars and
+ // signatures, closures often reference various regions
+ // from the surrounding function -- we call those the
+ // closure's free regions. When we borrow-check (and hence
+ // region-check) closures, we may find that the closure
+ // requires certain relationships between those free
+ // regions. However, because those free regions refer to
+ // portions of the CFG of their caller, the closure is not
+ // in a position to verify those relationships. In that
+ // case, the requirements get "propagated" to us, and so
+ // we have to solve them here where we instantiate the
+ // closure.
+ //
+ // Despite the opacity of the previous paragraph, this is
+ // actually relatively easy to understand in terms of the
+ // desugaring. A closure gets desugared to a struct, and
+ // these extra requirements are basically like where
+ // clauses on the struct.
+ AggregateKind::Closure(def_id, substs)
+ | AggregateKind::Generator(def_id, substs, _) => {
+ (def_id.to_def_id(), self.prove_closure_bounds(tcx, def_id, substs, location))
+ }
+
+ AggregateKind::Array(_) | AggregateKind::Tuple => {
+ (CRATE_DEF_ID.to_def_id(), ty::InstantiatedPredicates::empty())
+ }
+ };
+
+ self.normalize_and_prove_instantiated_predicates(
+ def_id,
+ instantiated_predicates,
+ location.to_locations(),
+ );
+ }
+
+ fn prove_closure_bounds(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ substs: SubstsRef<'tcx>,
+ location: Location,
+ ) -> ty::InstantiatedPredicates<'tcx> {
+ if let Some(ref closure_region_requirements) = tcx.mir_borrowck(def_id).closure_requirements
+ {
+ let closure_constraints = QueryRegionConstraints {
+ outlives: closure_region_requirements.apply_requirements(
+ tcx,
+ def_id.to_def_id(),
+ substs,
+ ),
+
+ // Presently, closures never propagate member
+ // constraints to their parents -- they are enforced
+ // locally. This is largely a non-issue as member
+ // constraints only come from `-> impl Trait` and
+ // friends which don't appear (thus far...) in
+ // closures.
+ member_constraints: vec![],
+ };
+
+ let bounds_mapping = closure_constraints
+ .outlives
+ .iter()
+ .enumerate()
+ .filter_map(|(idx, constraint)| {
+ let ty::OutlivesPredicate(k1, r2) =
+ constraint.no_bound_vars().unwrap_or_else(|| {
+ bug!("query_constraint {:?} contained bound vars", constraint,);
+ });
+
+ match k1.unpack() {
+ GenericArgKind::Lifetime(r1) => {
+ // constraint is r1: r2
+ let r1_vid = self.borrowck_context.universal_regions.to_region_vid(r1);
+ let r2_vid = self.borrowck_context.universal_regions.to_region_vid(r2);
+ let outlives_requirements =
+ &closure_region_requirements.outlives_requirements[idx];
+ Some((
+ (r1_vid, r2_vid),
+ (outlives_requirements.category, outlives_requirements.blame_span),
+ ))
+ }
+ GenericArgKind::Type(_) | GenericArgKind::Const(_) => None,
+ }
+ })
+ .collect();
+
+ let existing = self
+ .borrowck_context
+ .constraints
+ .closure_bounds_mapping
+ .insert(location, bounds_mapping);
+ assert!(existing.is_none(), "Multiple closures at the same location.");
+
+ self.push_region_constraints(
+ location.to_locations(),
+ ConstraintCategory::ClosureBounds,
+ &closure_constraints,
+ );
+ }
+
+ // Now equate closure substs to regions inherited from `typeck_root_def_id`. Fixes #98589.
+ let typeck_root_def_id = tcx.typeck_root_def_id(self.body.source.def_id());
+ let typeck_root_substs = ty::InternalSubsts::identity_for_item(tcx, typeck_root_def_id);
+
+ let parent_substs = match tcx.def_kind(def_id) {
+ DefKind::Closure => substs.as_closure().parent_substs(),
+ DefKind::Generator => substs.as_generator().parent_substs(),
+ DefKind::InlineConst => substs.as_inline_const().parent_substs(),
+ other => bug!("unexpected item {:?}", other),
+ };
+ let parent_substs = tcx.mk_substs(parent_substs.iter());
+
+ assert_eq!(typeck_root_substs.len(), parent_substs.len());
+ if let Err(_) = self.eq_substs(
+ typeck_root_substs,
+ parent_substs,
+ location.to_locations(),
+ ConstraintCategory::BoringNoLocation,
+ ) {
+ span_mirbug!(
+ self,
+ def_id,
+ "could not relate closure to parent {:?} != {:?}",
+ typeck_root_substs,
+ parent_substs
+ );
+ }
+
+ tcx.predicates_of(def_id).instantiate(tcx, substs)
+ }
+
+ #[instrument(skip(self, body), level = "debug")]
+ fn typeck_mir(&mut self, body: &Body<'tcx>) {
+ self.last_span = body.span;
+ debug!(?body.span);
+
+ for (local, local_decl) in body.local_decls.iter_enumerated() {
+ self.check_local(&body, local, local_decl);
+ }
+
+ for (block, block_data) in body.basic_blocks().iter_enumerated() {
+ let mut location = Location { block, statement_index: 0 };
+ for stmt in &block_data.statements {
+ if !stmt.source_info.span.is_dummy() {
+ self.last_span = stmt.source_info.span;
+ }
+ self.check_stmt(body, stmt, location);
+ location.statement_index += 1;
+ }
+
+ self.check_terminator(&body, block_data.terminator(), location);
+ self.check_iscleanup(&body, block_data);
+ }
+ }
+}
+
+trait NormalizeLocation: fmt::Debug + Copy {
+ fn to_locations(self) -> Locations;
+}
+
+impl NormalizeLocation for Locations {
+ fn to_locations(self) -> Locations {
+ self
+ }
+}
+
+impl NormalizeLocation for Location {
+ fn to_locations(self) -> Locations {
+ Locations::Single(self)
+ }
+}
+
+/// Runs `infcx.instantiate_opaque_types`. Unlike other `TypeOp`s,
+/// this is not canonicalized - it directly affects the main `InferCtxt`
+/// that we use during MIR borrowchecking.
+#[derive(Debug)]
+pub(super) struct InstantiateOpaqueType<'tcx> {
+ pub base_universe: Option<ty::UniverseIndex>,
+ pub region_constraints: Option<RegionConstraintData<'tcx>>,
+ pub obligations: Vec<PredicateObligation<'tcx>>,
+}
+
+impl<'tcx> TypeOp<'tcx> for InstantiateOpaqueType<'tcx> {
+ type Output = ();
+ /// We use this type itself to store the information used
+ /// when reporting errors. Since this is not a query, we don't
+ /// re-run anything during error reporting - we just use the information
+ /// we saved to help extract an error from the already-existing region
+ /// constraints in our `InferCtxt`
+ type ErrorInfo = InstantiateOpaqueType<'tcx>;
+
+ fn fully_perform(mut self, infcx: &InferCtxt<'_, 'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
+ let (mut output, region_constraints) = scrape_region_constraints(infcx, || {
+ Ok(InferOk { value: (), obligations: self.obligations.clone() })
+ })?;
+ self.region_constraints = Some(region_constraints);
+ output.error_info = Some(self);
+ Ok(output)
+ }
+}
diff --git a/compiler/rustc_borrowck/src/type_check/relate_tys.rs b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
new file mode 100644
index 000000000..c97a6a1a6
--- /dev/null
+++ b/compiler/rustc_borrowck/src/type_check/relate_tys.rs
@@ -0,0 +1,187 @@
+use rustc_infer::infer::nll_relate::{NormalizationStrategy, TypeRelating, TypeRelatingDelegate};
+use rustc_infer::infer::NllRegionVariableOrigin;
+use rustc_infer::traits::ObligationCause;
+use rustc_middle::mir::ConstraintCategory;
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::relate::TypeRelation;
+use rustc_middle::ty::{self, Const, Ty};
+use rustc_span::Span;
+use rustc_trait_selection::traits::query::Fallible;
+
+use crate::constraints::OutlivesConstraint;
+use crate::diagnostics::UniverseInfo;
+use crate::type_check::{InstantiateOpaqueType, Locations, TypeChecker};
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ /// Adds sufficient constraints to ensure that `a R b` where `R` depends on `v`:
+ ///
+ /// - "Covariant" `a <: b`
+ /// - "Invariant" `a == b`
+ /// - "Contravariant" `a :> b`
+ ///
+ /// N.B., the type `a` is permitted to have unresolved inference
+ /// variables, but not the type `b`.
+ #[instrument(skip(self), level = "debug")]
+ pub(super) fn relate_types(
+ &mut self,
+ a: Ty<'tcx>,
+ v: ty::Variance,
+ b: Ty<'tcx>,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ ) -> Fallible<()> {
+ TypeRelating::new(
+ self.infcx,
+ NllTypeRelatingDelegate::new(self, locations, category, UniverseInfo::relate(a, b)),
+ v,
+ )
+ .relate(a, b)?;
+ Ok(())
+ }
+
+ /// Add sufficient constraints to ensure `a == b`. See also [Self::relate_types].
+ pub(super) fn eq_substs(
+ &mut self,
+ a: ty::SubstsRef<'tcx>,
+ b: ty::SubstsRef<'tcx>,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ ) -> Fallible<()> {
+ TypeRelating::new(
+ self.infcx,
+ NllTypeRelatingDelegate::new(self, locations, category, UniverseInfo::other()),
+ ty::Variance::Invariant,
+ )
+ .relate(a, b)?;
+ Ok(())
+ }
+}
+
+struct NllTypeRelatingDelegate<'me, 'bccx, 'tcx> {
+ type_checker: &'me mut TypeChecker<'bccx, 'tcx>,
+
+ /// Where (and why) is this relation taking place?
+ locations: Locations,
+
+ /// What category do we assign the resulting `'a: 'b` relationships?
+ category: ConstraintCategory<'tcx>,
+
+ /// Information so that error reporting knows what types we are relating
+ /// when reporting a bound region error.
+ universe_info: UniverseInfo<'tcx>,
+}
+
+impl<'me, 'bccx, 'tcx> NllTypeRelatingDelegate<'me, 'bccx, 'tcx> {
+ fn new(
+ type_checker: &'me mut TypeChecker<'bccx, 'tcx>,
+ locations: Locations,
+ category: ConstraintCategory<'tcx>,
+ universe_info: UniverseInfo<'tcx>,
+ ) -> Self {
+ Self { type_checker, locations, category, universe_info }
+ }
+}
+
+impl<'tcx> TypeRelatingDelegate<'tcx> for NllTypeRelatingDelegate<'_, '_, 'tcx> {
+ fn span(&self) -> Span {
+ self.locations.span(self.type_checker.body)
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.type_checker.param_env
+ }
+
+ fn create_next_universe(&mut self) -> ty::UniverseIndex {
+ let universe = self.type_checker.infcx.create_next_universe();
+ self.type_checker
+ .borrowck_context
+ .constraints
+ .universe_causes
+ .insert(universe, self.universe_info.clone());
+ universe
+ }
+
+ fn next_existential_region_var(&mut self, from_forall: bool) -> ty::Region<'tcx> {
+ let origin = NllRegionVariableOrigin::Existential { from_forall };
+ self.type_checker.infcx.next_nll_region_var(origin)
+ }
+
+ fn next_placeholder_region(&mut self, placeholder: ty::PlaceholderRegion) -> ty::Region<'tcx> {
+ self.type_checker
+ .borrowck_context
+ .constraints
+ .placeholder_region(self.type_checker.infcx, placeholder)
+ }
+
+ fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx> {
+ self.type_checker.infcx.next_nll_region_var_in_universe(
+ NllRegionVariableOrigin::Existential { from_forall: false },
+ universe,
+ )
+ }
+
+ fn push_outlives(
+ &mut self,
+ sup: ty::Region<'tcx>,
+ sub: ty::Region<'tcx>,
+ info: ty::VarianceDiagInfo<'tcx>,
+ ) {
+ let sub = self.type_checker.borrowck_context.universal_regions.to_region_vid(sub);
+ let sup = self.type_checker.borrowck_context.universal_regions.to_region_vid(sup);
+ self.type_checker.borrowck_context.constraints.outlives_constraints.push(
+ OutlivesConstraint {
+ sup,
+ sub,
+ locations: self.locations,
+ span: self.locations.span(self.type_checker.body),
+ category: self.category,
+ variance_info: info,
+ },
+ );
+ }
+
+ // We don't have to worry about the equality of consts during borrow checking
+ // as consts always have a static lifetime.
+ // FIXME(oli-obk): is this really true? We can at least have HKL and with
+ // inline consts we may have further lifetimes that may be unsound to treat as
+ // 'static.
+ fn const_equate(&mut self, _a: Const<'tcx>, _b: Const<'tcx>) {}
+
+ fn normalization() -> NormalizationStrategy {
+ NormalizationStrategy::Eager
+ }
+
+ fn forbid_inference_vars() -> bool {
+ true
+ }
+
+ fn register_opaque_type(
+ &mut self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ a_is_expected: bool,
+ ) -> Result<(), TypeError<'tcx>> {
+ let param_env = self.param_env();
+ let span = self.span();
+ let def_id = self.type_checker.body.source.def_id().expect_local();
+ let body_id = self.type_checker.tcx().hir().local_def_id_to_hir_id(def_id);
+ let cause = ObligationCause::misc(span, body_id);
+ self.type_checker
+ .fully_perform_op(
+ self.locations,
+ self.category,
+ InstantiateOpaqueType {
+ obligations: self
+ .type_checker
+ .infcx
+ .handle_opaque_type(a, b, a_is_expected, &cause, param_env)?
+ .obligations,
+ // These fields are filled in during execution of the operation
+ base_universe: None,
+ region_constraints: None,
+ },
+ )
+ .unwrap();
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs
new file mode 100644
index 000000000..2a7713bc4
--- /dev/null
+++ b/compiler/rustc_borrowck/src/universal_regions.rs
@@ -0,0 +1,841 @@
+//! Code to extract the universally quantified regions declared on a
+//! function and the relationships between them. For example:
+//!
+//! ```
+//! fn foo<'a, 'b, 'c: 'b>() { }
+//! ```
+//!
+//! here we would return a map assigning each of `{'a, 'b, 'c}`
+//! to an index, as well as the `FreeRegionMap` which can compute
+//! relationships between them.
+//!
+//! The code in this file doesn't *do anything* with those results; it
+//! just returns them for other code to use.
+
+use either::Either;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::Diagnostic;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{BodyOwnerKind, HirId};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
+use rustc_middle::ty::{self, InlineConstSubsts, InlineConstSubstsParts, RegionVid, Ty, TyCtxt};
+use std::iter;
+
+use crate::nll::ToRegionVid;
+
+#[derive(Debug)]
+pub struct UniversalRegions<'tcx> {
+ indices: UniversalRegionIndices<'tcx>,
+
+ /// The vid assigned to `'static`
+ pub fr_static: RegionVid,
+
+ /// A special region vid created to represent the current MIR fn
+ /// body. It will outlive the entire CFG but it will not outlive
+ /// any other universal regions.
+ pub fr_fn_body: RegionVid,
+
+ /// We create region variables such that they are ordered by their
+ /// `RegionClassification`. The first block are globals, then
+ /// externals, then locals. So, things from:
+ /// - `FIRST_GLOBAL_INDEX..first_extern_index` are global,
+ /// - `first_extern_index..first_local_index` are external,
+ /// - `first_local_index..num_universals` are local.
+ first_extern_index: usize,
+
+ /// See `first_extern_index`.
+ first_local_index: usize,
+
+ /// The total number of universal region variables instantiated.
+ num_universals: usize,
+
+ /// A special region variable created for the `'empty(U0)` region.
+ /// Note that this is **not** a "universal" region, as it doesn't
+ /// represent a universally bound placeholder or any such thing.
+ /// But we do create it here in this type because it's a useful region
+ /// to have around in a few limited cases.
+ pub root_empty: RegionVid,
+
+ /// The "defining" type for this function, with all universal
+ /// regions instantiated. For a closure or generator, this is the
+ /// closure type, but for a top-level function it's the `FnDef`.
+ pub defining_ty: DefiningTy<'tcx>,
+
+ /// The return type of this function, with all regions replaced by
+ /// their universal `RegionVid` equivalents.
+ ///
+ /// N.B., associated types in this type have not been normalized,
+ /// as the name suggests. =)
+ pub unnormalized_output_ty: Ty<'tcx>,
+
+ /// The fully liberated input types of this function, with all
+ /// regions replaced by their universal `RegionVid` equivalents.
+ ///
+ /// N.B., associated types in these types have not been normalized,
+ /// as the name suggests. =)
+ pub unnormalized_input_tys: &'tcx [Ty<'tcx>],
+
+ pub yield_ty: Option<Ty<'tcx>>,
+}
+
+/// The "defining type" for this MIR. The key feature of the "defining
+/// type" is that it contains the information needed to derive all the
+/// universal regions that are in scope as well as the types of the
+/// inputs/output from the MIR. In general, early-bound universal
+/// regions appear free in the defining type and late-bound regions
+/// appear bound in the signature.
+#[derive(Copy, Clone, Debug)]
+pub enum DefiningTy<'tcx> {
+ /// The MIR is a closure. The signature is found via
+ /// `ClosureSubsts::closure_sig_ty`.
+ Closure(DefId, SubstsRef<'tcx>),
+
+ /// The MIR is a generator. The signature is that generators take
+ /// no parameters and return the result of
+ /// `ClosureSubsts::generator_return_ty`.
+ Generator(DefId, SubstsRef<'tcx>, hir::Movability),
+
+ /// The MIR is a fn item with the given `DefId` and substs. The signature
+ /// of the function can be bound then with the `fn_sig` query.
+ FnDef(DefId, SubstsRef<'tcx>),
+
+ /// The MIR represents some form of constant. The signature then
+ /// is that it has no inputs and a single return value, which is
+ /// the value of the constant.
+ Const(DefId, SubstsRef<'tcx>),
+
+ /// The MIR represents an inline const. The signature has no inputs and a
+ /// single return value found via `InlineConstSubsts::ty`.
+ InlineConst(DefId, SubstsRef<'tcx>),
+}
+
+impl<'tcx> DefiningTy<'tcx> {
+ /// Returns a list of all the upvar types for this MIR. If this is
+ /// not a closure or generator, there are no upvars, and hence it
+ /// will be an empty list. The order of types in this list will
+ /// match up with the upvar order in the HIR, typesystem, and MIR.
+ pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ match self {
+ DefiningTy::Closure(_, substs) => Either::Left(substs.as_closure().upvar_tys()),
+ DefiningTy::Generator(_, substs, _) => {
+ Either::Right(Either::Left(substs.as_generator().upvar_tys()))
+ }
+ DefiningTy::FnDef(..) | DefiningTy::Const(..) | DefiningTy::InlineConst(..) => {
+ Either::Right(Either::Right(iter::empty()))
+ }
+ }
+ }
+
+ /// Number of implicit inputs -- notably the "environment"
+ /// parameter for closures -- that appear in MIR but not in the
+ /// user's code.
+ pub fn implicit_inputs(self) -> usize {
+ match self {
+ DefiningTy::Closure(..) | DefiningTy::Generator(..) => 1,
+ DefiningTy::FnDef(..) | DefiningTy::Const(..) | DefiningTy::InlineConst(..) => 0,
+ }
+ }
+
+ pub fn is_fn_def(&self) -> bool {
+ matches!(*self, DefiningTy::FnDef(..))
+ }
+
+ pub fn is_const(&self) -> bool {
+ matches!(*self, DefiningTy::Const(..) | DefiningTy::InlineConst(..))
+ }
+
+ pub fn def_id(&self) -> DefId {
+ match *self {
+ DefiningTy::Closure(def_id, ..)
+ | DefiningTy::Generator(def_id, ..)
+ | DefiningTy::FnDef(def_id, ..)
+ | DefiningTy::Const(def_id, ..)
+ | DefiningTy::InlineConst(def_id, ..) => def_id,
+ }
+ }
+}
+
+#[derive(Debug)]
+struct UniversalRegionIndices<'tcx> {
+ /// For those regions that may appear in the parameter environment
+ /// ('static and early-bound regions), we maintain a map from the
+ /// `ty::Region` to the internal `RegionVid` we are using. This is
+ /// used because trait matching and type-checking will feed us
+ /// region constraints that reference those regions and we need to
+ /// be able to map them our internal `RegionVid`. This is
+ /// basically equivalent to an `InternalSubsts`, except that it also
+ /// contains an entry for `ReStatic` -- it might be nice to just
+ /// use a substs, and then handle `ReStatic` another way.
+ indices: FxHashMap<ty::Region<'tcx>, RegionVid>,
+}
+
+#[derive(Debug, PartialEq)]
+pub enum RegionClassification {
+ /// A **global** region is one that can be named from
+ /// anywhere. There is only one, `'static`.
+ Global,
+
+ /// An **external** region is only relevant for
+ /// closures, generators, and inline consts. In that
+ /// case, it refers to regions that are free in the type
+ /// -- basically, something bound in the surrounding context.
+ ///
+ /// Consider this example:
+ ///
+ /// ```ignore (pseudo-rust)
+ /// fn foo<'a, 'b>(a: &'a u32, b: &'b u32, c: &'static u32) {
+ /// let closure = for<'x> |x: &'x u32| { .. };
+ /// // ^^^^^^^ pretend this were legal syntax
+ /// // for declaring a late-bound region in
+ /// // a closure signature
+ /// }
+ /// ```
+ ///
+ /// Here, the lifetimes `'a` and `'b` would be **external** to the
+ /// closure.
+ ///
+ /// If we are not analyzing a closure/generator/inline-const,
+ /// there are no external lifetimes.
+ External,
+
+ /// A **local** lifetime is one about which we know the full set
+ /// of relevant constraints (that is, relationships to other named
+ /// regions). For a closure, this includes any region bound in
+ /// the closure's signature. For a fn item, this includes all
+ /// regions other than global ones.
+ ///
+ /// Continuing with the example from `External`, if we were
+ /// analyzing the closure, then `'x` would be local (and `'a` and
+ /// `'b` are external). If we are analyzing the function item
+ /// `foo`, then `'a` and `'b` are local (and `'x` is not in
+ /// scope).
+ Local,
+}
+
+const FIRST_GLOBAL_INDEX: usize = 0;
+
+impl<'tcx> UniversalRegions<'tcx> {
+ /// Creates a new and fully initialized `UniversalRegions` that
+ /// contains indices for all the free regions found in the given
+ /// MIR -- that is, all the regions that appear in the function's
+ /// signature. This will also compute the relationships that are
+ /// known between those regions.
+ pub fn new(
+ infcx: &InferCtxt<'_, 'tcx>,
+ mir_def: ty::WithOptConstParam<LocalDefId>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ let tcx = infcx.tcx;
+ let mir_hir_id = tcx.hir().local_def_id_to_hir_id(mir_def.did);
+ UniversalRegionsBuilder { infcx, mir_def, mir_hir_id, param_env }.build()
+ }
+
+ /// Given a reference to a closure type, extracts all the values
+ /// from its free regions and returns a vector with them. This is
+ /// used when the closure's creator checks that the
+ /// `ClosureRegionRequirements` are met. The requirements from
+ /// `ClosureRegionRequirements` are expressed in terms of
+ /// `RegionVid` entries that map into the returned vector `V`: so
+ /// if the `ClosureRegionRequirements` contains something like
+ /// `'1: '2`, then the caller would impose the constraint that
+ /// `V[1]: V[2]`.
+ pub fn closure_mapping(
+ tcx: TyCtxt<'tcx>,
+ closure_substs: SubstsRef<'tcx>,
+ expected_num_vars: usize,
+ typeck_root_def_id: DefId,
+ ) -> IndexVec<RegionVid, ty::Region<'tcx>> {
+ let mut region_mapping = IndexVec::with_capacity(expected_num_vars);
+ region_mapping.push(tcx.lifetimes.re_static);
+ tcx.for_each_free_region(&closure_substs, |fr| {
+ region_mapping.push(fr);
+ });
+
+ for_each_late_bound_region_defined_on(tcx, typeck_root_def_id, |r| {
+ region_mapping.push(r);
+ });
+
+ assert_eq!(
+ region_mapping.len(),
+ expected_num_vars,
+ "index vec had unexpected number of variables"
+ );
+
+ region_mapping
+ }
+
+ /// Returns `true` if `r` is a member of this set of universal regions.
+ pub fn is_universal_region(&self, r: RegionVid) -> bool {
+ (FIRST_GLOBAL_INDEX..self.num_universals).contains(&r.index())
+ }
+
+ /// Classifies `r` as a universal region, returning `None` if this
+ /// is not a member of this set of universal regions.
+ pub fn region_classification(&self, r: RegionVid) -> Option<RegionClassification> {
+ let index = r.index();
+ if (FIRST_GLOBAL_INDEX..self.first_extern_index).contains(&index) {
+ Some(RegionClassification::Global)
+ } else if (self.first_extern_index..self.first_local_index).contains(&index) {
+ Some(RegionClassification::External)
+ } else if (self.first_local_index..self.num_universals).contains(&index) {
+ Some(RegionClassification::Local)
+ } else {
+ None
+ }
+ }
+
+ /// Returns an iterator over all the RegionVids corresponding to
+ /// universally quantified free regions.
+ pub fn universal_regions(&self) -> impl Iterator<Item = RegionVid> {
+ (FIRST_GLOBAL_INDEX..self.num_universals).map(RegionVid::new)
+ }
+
+ /// Returns `true` if `r` is classified as an local region.
+ pub fn is_local_free_region(&self, r: RegionVid) -> bool {
+ self.region_classification(r) == Some(RegionClassification::Local)
+ }
+
+ /// Returns the number of universal regions created in any category.
+ pub fn len(&self) -> usize {
+ self.num_universals
+ }
+
+ /// Returns the number of global plus external universal regions.
+ /// For closures, these are the regions that appear free in the
+ /// closure type (versus those bound in the closure
+ /// signature). They are therefore the regions between which the
+ /// closure may impose constraints that its creator must verify.
+ pub fn num_global_and_external_regions(&self) -> usize {
+ self.first_local_index
+ }
+
+ /// Gets an iterator over all the early-bound regions that have names.
+ pub fn named_universal_regions<'s>(
+ &'s self,
+ ) -> impl Iterator<Item = (ty::Region<'tcx>, ty::RegionVid)> + 's {
+ self.indices.indices.iter().map(|(&r, &v)| (r, v))
+ }
+
+ /// See `UniversalRegionIndices::to_region_vid`.
+ pub fn to_region_vid(&self, r: ty::Region<'tcx>) -> RegionVid {
+ if let ty::ReEmpty(ty::UniverseIndex::ROOT) = *r {
+ self.root_empty
+ } else {
+ self.indices.to_region_vid(r)
+ }
+ }
+
+ /// As part of the NLL unit tests, you can annotate a function with
+ /// `#[rustc_regions]`, and we will emit information about the region
+ /// inference context and -- in particular -- the external constraints
+ /// that this region imposes on others. The methods in this file
+ /// handle the part about dumping the inference context internal
+ /// state.
+ pub(crate) fn annotate(&self, tcx: TyCtxt<'tcx>, err: &mut Diagnostic) {
+ match self.defining_ty {
+ DefiningTy::Closure(def_id, substs) => {
+ err.note(&format!(
+ "defining type: {} with closure substs {:#?}",
+ tcx.def_path_str_with_substs(def_id, substs),
+ &substs[tcx.generics_of(def_id).parent_count..],
+ ));
+
+ // FIXME: It'd be nice to print the late-bound regions
+ // here, but unfortunately these wind up stored into
+ // tests, and the resulting print-outs include def-ids
+ // and other things that are not stable across tests!
+ // So we just include the region-vid. Annoying.
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id);
+ for_each_late_bound_region_defined_on(tcx, typeck_root_def_id, |r| {
+ err.note(&format!("late-bound region is {:?}", self.to_region_vid(r),));
+ });
+ }
+ DefiningTy::Generator(def_id, substs, _) => {
+ err.note(&format!(
+ "defining type: {} with generator substs {:#?}",
+ tcx.def_path_str_with_substs(def_id, substs),
+ &substs[tcx.generics_of(def_id).parent_count..],
+ ));
+
+ // FIXME: As above, we'd like to print out the region
+ // `r` but doing so is not stable across architectures
+ // and so forth.
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id);
+ for_each_late_bound_region_defined_on(tcx, typeck_root_def_id, |r| {
+ err.note(&format!("late-bound region is {:?}", self.to_region_vid(r),));
+ });
+ }
+ DefiningTy::FnDef(def_id, substs) => {
+ err.note(&format!(
+ "defining type: {}",
+ tcx.def_path_str_with_substs(def_id, substs),
+ ));
+ }
+ DefiningTy::Const(def_id, substs) => {
+ err.note(&format!(
+ "defining constant type: {}",
+ tcx.def_path_str_with_substs(def_id, substs),
+ ));
+ }
+ DefiningTy::InlineConst(def_id, substs) => {
+ err.note(&format!(
+ "defining inline constant type: {}",
+ tcx.def_path_str_with_substs(def_id, substs),
+ ));
+ }
+ }
+ }
+}
+
+struct UniversalRegionsBuilder<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ mir_def: ty::WithOptConstParam<LocalDefId>,
+ mir_hir_id: HirId,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+const FR: NllRegionVariableOrigin = NllRegionVariableOrigin::FreeRegion;
+
+impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> {
+ fn build(self) -> UniversalRegions<'tcx> {
+ debug!("build(mir_def={:?})", self.mir_def);
+
+ let param_env = self.param_env;
+ debug!("build: param_env={:?}", param_env);
+
+ assert_eq!(FIRST_GLOBAL_INDEX, self.infcx.num_region_vars());
+
+ // Create the "global" region that is always free in all contexts: 'static.
+ let fr_static = self.infcx.next_nll_region_var(FR).to_region_vid();
+
+ // We've now added all the global regions. The next ones we
+ // add will be external.
+ let first_extern_index = self.infcx.num_region_vars();
+
+ let defining_ty = self.defining_ty();
+ debug!("build: defining_ty={:?}", defining_ty);
+
+ let mut indices = self.compute_indices(fr_static, defining_ty);
+ debug!("build: indices={:?}", indices);
+
+ let typeck_root_def_id = self.infcx.tcx.typeck_root_def_id(self.mir_def.did.to_def_id());
+
+ // If this is is a 'root' body (not a closure/generator/inline const), then
+ // there are no extern regions, so the local regions start at the same
+ // position as the (empty) sub-list of extern regions
+ let first_local_index = if self.mir_def.did.to_def_id() == typeck_root_def_id {
+ first_extern_index
+ } else {
+ // If this is a closure, generator, or inline-const, then the late-bound regions from the enclosing
+ // function are actually external regions to us. For example, here, 'a is not local
+ // to the closure c (although it is local to the fn foo):
+ // fn foo<'a>() {
+ // let c = || { let x: &'a u32 = ...; }
+ // }
+ self.infcx
+ .replace_late_bound_regions_with_nll_infer_vars(self.mir_def.did, &mut indices);
+ // Any regions created during the execution of `defining_ty` or during the above
+ // late-bound region replacement are all considered 'extern' regions
+ self.infcx.num_region_vars()
+ };
+
+ // "Liberate" the late-bound regions. These correspond to
+ // "local" free regions.
+
+ let bound_inputs_and_output = self.compute_inputs_and_output(&indices, defining_ty);
+
+ let inputs_and_output = self.infcx.replace_bound_regions_with_nll_infer_vars(
+ FR,
+ self.mir_def.did,
+ bound_inputs_and_output,
+ &mut indices,
+ );
+ // Converse of above, if this is a function then the late-bound regions declared on its
+ // signature are local to the fn.
+ if self.mir_def.did.to_def_id() == typeck_root_def_id {
+ self.infcx
+ .replace_late_bound_regions_with_nll_infer_vars(self.mir_def.did, &mut indices);
+ }
+
+ let (unnormalized_output_ty, mut unnormalized_input_tys) =
+ inputs_and_output.split_last().unwrap();
+
+ // C-variadic fns also have a `VaList` input that's not listed in the signature
+ // (as it's created inside the body itself, not passed in from outside).
+ if let DefiningTy::FnDef(def_id, _) = defining_ty {
+ if self.infcx.tcx.fn_sig(def_id).c_variadic() {
+ let va_list_did = self.infcx.tcx.require_lang_item(
+ LangItem::VaList,
+ Some(self.infcx.tcx.def_span(self.mir_def.did)),
+ );
+ let region = self
+ .infcx
+ .tcx
+ .mk_region(ty::ReVar(self.infcx.next_nll_region_var(FR).to_region_vid()));
+ let va_list_ty = self
+ .infcx
+ .tcx
+ .bound_type_of(va_list_did)
+ .subst(self.infcx.tcx, &[region.into()]);
+
+ unnormalized_input_tys = self.infcx.tcx.mk_type_list(
+ unnormalized_input_tys.iter().copied().chain(iter::once(va_list_ty)),
+ );
+ }
+ }
+
+ let fr_fn_body = self.infcx.next_nll_region_var(FR).to_region_vid();
+ let num_universals = self.infcx.num_region_vars();
+
+ debug!("build: global regions = {}..{}", FIRST_GLOBAL_INDEX, first_extern_index);
+ debug!("build: extern regions = {}..{}", first_extern_index, first_local_index);
+ debug!("build: local regions = {}..{}", first_local_index, num_universals);
+
+ let yield_ty = match defining_ty {
+ DefiningTy::Generator(_, substs, _) => Some(substs.as_generator().yield_ty()),
+ _ => None,
+ };
+
+ let root_empty = self
+ .infcx
+ .next_nll_region_var(NllRegionVariableOrigin::Existential { from_forall: true })
+ .to_region_vid();
+
+ UniversalRegions {
+ indices,
+ fr_static,
+ fr_fn_body,
+ root_empty,
+ first_extern_index,
+ first_local_index,
+ num_universals,
+ defining_ty,
+ unnormalized_output_ty: *unnormalized_output_ty,
+ unnormalized_input_tys,
+ yield_ty,
+ }
+ }
+
+ /// Returns the "defining type" of the current MIR;
+ /// see `DefiningTy` for details.
+ fn defining_ty(&self) -> DefiningTy<'tcx> {
+ let tcx = self.infcx.tcx;
+ let typeck_root_def_id = tcx.typeck_root_def_id(self.mir_def.did.to_def_id());
+
+ match tcx.hir().body_owner_kind(self.mir_def.did) {
+ BodyOwnerKind::Closure | BodyOwnerKind::Fn => {
+ let defining_ty = if self.mir_def.did.to_def_id() == typeck_root_def_id {
+ tcx.type_of(typeck_root_def_id)
+ } else {
+ let tables = tcx.typeck(self.mir_def.did);
+ tables.node_type(self.mir_hir_id)
+ };
+
+ debug!("defining_ty (pre-replacement): {:?}", defining_ty);
+
+ let defining_ty =
+ self.infcx.replace_free_regions_with_nll_infer_vars(FR, defining_ty);
+
+ match *defining_ty.kind() {
+ ty::Closure(def_id, substs) => DefiningTy::Closure(def_id, substs),
+ ty::Generator(def_id, substs, movability) => {
+ DefiningTy::Generator(def_id, substs, movability)
+ }
+ ty::FnDef(def_id, substs) => DefiningTy::FnDef(def_id, substs),
+ _ => span_bug!(
+ tcx.def_span(self.mir_def.did),
+ "expected defining type for `{:?}`: `{:?}`",
+ self.mir_def.did,
+ defining_ty
+ ),
+ }
+ }
+
+ BodyOwnerKind::Const | BodyOwnerKind::Static(..) => {
+ let identity_substs = InternalSubsts::identity_for_item(tcx, typeck_root_def_id);
+ if self.mir_def.did.to_def_id() == typeck_root_def_id {
+ let substs =
+ self.infcx.replace_free_regions_with_nll_infer_vars(FR, identity_substs);
+ DefiningTy::Const(self.mir_def.did.to_def_id(), substs)
+ } else {
+ let ty = tcx.typeck(self.mir_def.did).node_type(self.mir_hir_id);
+ let substs = InlineConstSubsts::new(
+ tcx,
+ InlineConstSubstsParts { parent_substs: identity_substs, ty },
+ )
+ .substs;
+ let substs = self.infcx.replace_free_regions_with_nll_infer_vars(FR, substs);
+ DefiningTy::InlineConst(self.mir_def.did.to_def_id(), substs)
+ }
+ }
+ }
+ }
+
+ /// Builds a hashmap that maps from the universal regions that are
+ /// in scope (as a `ty::Region<'tcx>`) to their indices (as a
+ /// `RegionVid`). The map returned by this function contains only
+ /// the early-bound regions.
+ fn compute_indices(
+ &self,
+ fr_static: RegionVid,
+ defining_ty: DefiningTy<'tcx>,
+ ) -> UniversalRegionIndices<'tcx> {
+ let tcx = self.infcx.tcx;
+ let typeck_root_def_id = tcx.typeck_root_def_id(self.mir_def.did.to_def_id());
+ let identity_substs = InternalSubsts::identity_for_item(tcx, typeck_root_def_id);
+ let fr_substs = match defining_ty {
+ DefiningTy::Closure(_, ref substs)
+ | DefiningTy::Generator(_, ref substs, _)
+ | DefiningTy::InlineConst(_, ref substs) => {
+ // In the case of closures, we rely on the fact that
+ // the first N elements in the ClosureSubsts are
+ // inherited from the `typeck_root_def_id`.
+ // Therefore, when we zip together (below) with
+ // `identity_substs`, we will get only those regions
+ // that correspond to early-bound regions declared on
+ // the `typeck_root_def_id`.
+ assert!(substs.len() >= identity_substs.len());
+ assert_eq!(substs.regions().count(), identity_substs.regions().count());
+ substs
+ }
+
+ DefiningTy::FnDef(_, substs) | DefiningTy::Const(_, substs) => substs,
+ };
+
+ let global_mapping = iter::once((tcx.lifetimes.re_static, fr_static));
+ let subst_mapping =
+ iter::zip(identity_substs.regions(), fr_substs.regions().map(|r| r.to_region_vid()));
+
+ UniversalRegionIndices { indices: global_mapping.chain(subst_mapping).collect() }
+ }
+
+ fn compute_inputs_and_output(
+ &self,
+ indices: &UniversalRegionIndices<'tcx>,
+ defining_ty: DefiningTy<'tcx>,
+ ) -> ty::Binder<'tcx, &'tcx ty::List<Ty<'tcx>>> {
+ let tcx = self.infcx.tcx;
+ match defining_ty {
+ DefiningTy::Closure(def_id, substs) => {
+ assert_eq!(self.mir_def.did.to_def_id(), def_id);
+ let closure_sig = substs.as_closure().sig();
+ let inputs_and_output = closure_sig.inputs_and_output();
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ inputs_and_output
+ .bound_vars()
+ .iter()
+ .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
+ );
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind: ty::BrEnv,
+ };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let closure_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
+
+ // The "inputs" of the closure in the
+ // signature appear as a tuple. The MIR side
+ // flattens this tuple.
+ let (&output, tuplized_inputs) =
+ inputs_and_output.skip_binder().split_last().unwrap();
+ assert_eq!(tuplized_inputs.len(), 1, "multiple closure inputs");
+ let &ty::Tuple(inputs) = tuplized_inputs[0].kind() else {
+ bug!("closure inputs not a tuple: {:?}", tuplized_inputs[0]);
+ };
+
+ ty::Binder::bind_with_vars(
+ tcx.mk_type_list(
+ iter::once(closure_ty).chain(inputs).chain(iter::once(output)),
+ ),
+ bound_vars,
+ )
+ }
+
+ DefiningTy::Generator(def_id, substs, movability) => {
+ assert_eq!(self.mir_def.did.to_def_id(), def_id);
+ let resume_ty = substs.as_generator().resume_ty();
+ let output = substs.as_generator().return_ty();
+ let generator_ty = tcx.mk_generator(def_id, substs, movability);
+ let inputs_and_output =
+ self.infcx.tcx.intern_type_list(&[generator_ty, resume_ty, output]);
+ ty::Binder::dummy(inputs_and_output)
+ }
+
+ DefiningTy::FnDef(def_id, _) => {
+ let sig = tcx.fn_sig(def_id);
+ let sig = indices.fold_to_region_vids(tcx, sig);
+ sig.inputs_and_output()
+ }
+
+ DefiningTy::Const(def_id, _) => {
+ // For a constant body, there are no inputs, and one
+ // "output" (the type of the constant).
+ assert_eq!(self.mir_def.did.to_def_id(), def_id);
+ let ty = tcx.type_of(self.mir_def.def_id_for_type_of());
+ let ty = indices.fold_to_region_vids(tcx, ty);
+ ty::Binder::dummy(tcx.intern_type_list(&[ty]))
+ }
+
+ DefiningTy::InlineConst(def_id, substs) => {
+ assert_eq!(self.mir_def.did.to_def_id(), def_id);
+ let ty = substs.as_inline_const().ty();
+ ty::Binder::dummy(tcx.intern_type_list(&[ty]))
+ }
+ }
+ }
+}
+
+trait InferCtxtExt<'tcx> {
+ fn replace_free_regions_with_nll_infer_vars<T>(
+ &self,
+ origin: NllRegionVariableOrigin,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>;
+
+ fn replace_bound_regions_with_nll_infer_vars<T>(
+ &self,
+ origin: NllRegionVariableOrigin,
+ all_outlive_scope: LocalDefId,
+ value: ty::Binder<'tcx, T>,
+ indices: &mut UniversalRegionIndices<'tcx>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>;
+
+ fn replace_late_bound_regions_with_nll_infer_vars(
+ &self,
+ mir_def_id: LocalDefId,
+ indices: &mut UniversalRegionIndices<'tcx>,
+ );
+}
+
+impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
+ fn replace_free_regions_with_nll_infer_vars<T>(
+ &self,
+ origin: NllRegionVariableOrigin,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.tcx.fold_regions(value, |_region, _depth| self.next_nll_region_var(origin))
+ }
+
+ #[instrument(level = "debug", skip(self, indices))]
+ fn replace_bound_regions_with_nll_infer_vars<T>(
+ &self,
+ origin: NllRegionVariableOrigin,
+ all_outlive_scope: LocalDefId,
+ value: ty::Binder<'tcx, T>,
+ indices: &mut UniversalRegionIndices<'tcx>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let (value, _map) = self.tcx.replace_late_bound_regions(value, |br| {
+ debug!(?br);
+ let liberated_region = self.tcx.mk_region(ty::ReFree(ty::FreeRegion {
+ scope: all_outlive_scope.to_def_id(),
+ bound_region: br.kind,
+ }));
+ let region_vid = self.next_nll_region_var(origin);
+ indices.insert_late_bound_region(liberated_region, region_vid.to_region_vid());
+ debug!(?liberated_region, ?region_vid);
+ region_vid
+ });
+ value
+ }
+
+ /// Finds late-bound regions that do not appear in the parameter listing and adds them to the
+ /// indices vector. Typically, we identify late-bound regions as we process the inputs and
+ /// outputs of the closure/function. However, sometimes there are late-bound regions which do
+ /// not appear in the fn parameters but which are nonetheless in scope. The simplest case of
+ /// this are unused functions, like fn foo<'a>() { } (see e.g., #51351). Despite not being used,
+ /// users can still reference these regions (e.g., let x: &'a u32 = &22;), so we need to create
+ /// entries for them and store them in the indices map. This code iterates over the complete
+ /// set of late-bound regions and checks for any that we have not yet seen, adding them to the
+ /// inputs vector.
+ #[instrument(skip(self, indices))]
+ fn replace_late_bound_regions_with_nll_infer_vars(
+ &self,
+ mir_def_id: LocalDefId,
+ indices: &mut UniversalRegionIndices<'tcx>,
+ ) {
+ debug!("replace_late_bound_regions_with_nll_infer_vars(mir_def_id={:?})", mir_def_id);
+ let typeck_root_def_id = self.tcx.typeck_root_def_id(mir_def_id.to_def_id());
+ for_each_late_bound_region_defined_on(self.tcx, typeck_root_def_id, |r| {
+ debug!("replace_late_bound_regions_with_nll_infer_vars: r={:?}", r);
+ if !indices.indices.contains_key(&r) {
+ let region_vid = self.next_nll_region_var(FR);
+ debug!(?region_vid);
+ indices.insert_late_bound_region(r, region_vid.to_region_vid());
+ }
+ });
+ }
+}
+
+impl<'tcx> UniversalRegionIndices<'tcx> {
+ /// Initially, the `UniversalRegionIndices` map contains only the
+ /// early-bound regions in scope. Once that is all setup, we come
+ /// in later and instantiate the late-bound regions, and then we
+ /// insert the `ReFree` version of those into the map as
+ /// well. These are used for error reporting.
+ fn insert_late_bound_region(&mut self, r: ty::Region<'tcx>, vid: ty::RegionVid) {
+ debug!("insert_late_bound_region({:?}, {:?})", r, vid);
+ self.indices.insert(r, vid);
+ }
+
+ /// Converts `r` into a local inference variable: `r` can either
+ /// by a `ReVar` (i.e., already a reference to an inference
+ /// variable) or it can be `'static` or some early-bound
+ /// region. This is useful when taking the results from
+ /// type-checking and trait-matching, which may sometimes
+ /// reference those regions from the `ParamEnv`. It is also used
+ /// during initialization. Relies on the `indices` map having been
+ /// fully initialized.
+ pub fn to_region_vid(&self, r: ty::Region<'tcx>) -> RegionVid {
+ if let ty::ReVar(..) = *r {
+ r.to_region_vid()
+ } else {
+ *self
+ .indices
+ .get(&r)
+ .unwrap_or_else(|| bug!("cannot convert `{:?}` to a region vid", r))
+ }
+ }
+
+ /// Replaces all free regions in `value` with region vids, as
+ /// returned by `to_region_vid`.
+ pub fn fold_to_region_vids<T>(&self, tcx: TyCtxt<'tcx>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ tcx.fold_regions(value, |region, _| tcx.mk_region(ty::ReVar(self.to_region_vid(region))))
+ }
+}
+
+/// Iterates over the late-bound regions defined on fn_def_id and
+/// invokes `f` with the liberated form of each one.
+fn for_each_late_bound_region_defined_on<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ fn_def_id: DefId,
+ mut f: impl FnMut(ty::Region<'tcx>),
+) {
+ if let Some(late_bounds) = tcx.is_late_bound_map(fn_def_id.expect_local()) {
+ for &region_def_id in late_bounds.iter() {
+ let name = tcx.item_name(region_def_id.to_def_id());
+ let liberated_region = tcx.mk_region(ty::ReFree(ty::FreeRegion {
+ scope: fn_def_id,
+ bound_region: ty::BoundRegionKind::BrNamed(region_def_id.to_def_id(), name),
+ }));
+ f(liberated_region);
+ }
+ }
+}
diff --git a/compiler/rustc_borrowck/src/used_muts.rs b/compiler/rustc_borrowck/src/used_muts.rs
new file mode 100644
index 000000000..8833753b1
--- /dev/null
+++ b/compiler/rustc_borrowck/src/used_muts.rs
@@ -0,0 +1,110 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{
+ Local, Location, Place, Statement, StatementKind, Terminator, TerminatorKind,
+};
+
+use crate::MirBorrowckCtxt;
+
+impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
+ /// Walks the MIR adding to the set of `used_mut` locals that will be ignored for the purposes
+ /// of the `unused_mut` lint.
+ ///
+ /// `temporary_used_locals` should contain locals that were found to be temporary, mutable and
+ /// used from borrow checking. This function looks for assignments into these locals from
+ /// user-declared locals and adds those user-defined locals to the `used_mut` set. This can
+ /// occur due to a rare case involving upvars in closures.
+ ///
+ /// `never_initialized_mut_locals` should contain the set of user-declared mutable locals
+ /// (not arguments) that have not already been marked as being used.
+ /// This function then looks for assignments from statements or the terminator into the locals
+ /// from this set and removes them from the set. This leaves only those locals that have not
+ /// been assigned to - this set is used as a proxy for locals that were not initialized due to
+ /// unreachable code. These locals are then considered "used" to silence the lint for them.
+ /// See #55344 for context.
+ pub(crate) fn gather_used_muts(
+ &mut self,
+ temporary_used_locals: FxHashSet<Local>,
+ mut never_initialized_mut_locals: FxHashSet<Local>,
+ ) {
+ {
+ let mut visitor = GatherUsedMutsVisitor {
+ temporary_used_locals,
+ never_initialized_mut_locals: &mut never_initialized_mut_locals,
+ mbcx: self,
+ };
+ visitor.visit_body(&visitor.mbcx.body);
+ }
+
+ // Take the union of the existed `used_mut` set with those variables we've found were
+ // never initialized.
+ debug!("gather_used_muts: never_initialized_mut_locals={:?}", never_initialized_mut_locals);
+ self.used_mut = self.used_mut.union(&never_initialized_mut_locals).cloned().collect();
+ }
+}
+
+/// MIR visitor for collecting used mutable variables.
+/// The 'visit lifetime represents the duration of the MIR walk.
+struct GatherUsedMutsVisitor<'visit, 'cx, 'tcx> {
+ temporary_used_locals: FxHashSet<Local>,
+ never_initialized_mut_locals: &'visit mut FxHashSet<Local>,
+ mbcx: &'visit mut MirBorrowckCtxt<'cx, 'tcx>,
+}
+
+impl GatherUsedMutsVisitor<'_, '_, '_> {
+ fn remove_never_initialized_mut_locals(&mut self, into: Place<'_>) {
+ // Remove any locals that we found were initialized from the
+ // `never_initialized_mut_locals` set. At the end, the only remaining locals will
+ // be those that were never initialized - we will consider those as being used as
+ // they will either have been removed by unreachable code optimizations; or linted
+ // as unused variables.
+ self.never_initialized_mut_locals.remove(&into.local);
+ }
+}
+
+impl<'visit, 'cx, 'tcx> Visitor<'tcx> for GatherUsedMutsVisitor<'visit, 'cx, 'tcx> {
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ debug!("visit_terminator: terminator={:?}", terminator);
+ match &terminator.kind {
+ TerminatorKind::Call { destination, .. } => {
+ self.remove_never_initialized_mut_locals(*destination);
+ }
+ TerminatorKind::DropAndReplace { place, .. } => {
+ self.remove_never_initialized_mut_locals(*place);
+ }
+ _ => {}
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ if let StatementKind::Assign(box (into, _)) = &statement.kind {
+ debug!(
+ "visit_statement: statement={:?} local={:?} \
+ never_initialized_mut_locals={:?}",
+ statement, into.local, self.never_initialized_mut_locals
+ );
+ self.remove_never_initialized_mut_locals(*into);
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_local(&mut self, local: Local, place_context: PlaceContext, location: Location) {
+ if place_context.is_place_assignment() && self.temporary_used_locals.contains(&local) {
+ // Propagate the Local assigned at this Location as a used mutable local variable
+ for moi in &self.mbcx.move_data.loc_map[location] {
+ let mpi = &self.mbcx.move_data.moves[*moi].path;
+ let path = &self.mbcx.move_data.move_paths[*mpi];
+ debug!(
+ "assignment of {:?} to {:?}, adding {:?} to used mutable set",
+ path.place, local, path.place
+ );
+ if let Some(user_local) = path.place.as_local() {
+ self.mbcx.used_mut.insert(user_local);
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_builtin_macros/Cargo.toml b/compiler/rustc_builtin_macros/Cargo.toml
new file mode 100644
index 000000000..8d8e9d9b5
--- /dev/null
+++ b/compiler/rustc_builtin_macros/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+name = "rustc_builtin_macros"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_parse_format = { path = "../rustc_parse_format" }
+tracing = "0.1"
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_lexer = { path = "../rustc_lexer" }
+rustc_lint_defs = { path = "../rustc_lint_defs" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_parse = { path = "../rustc_parse" }
+rustc_target = { path = "../rustc_target" }
+rustc_session = { path = "../rustc_session" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_ast = { path = "../rustc_ast" }
+rustc_expand = { path = "../rustc_expand" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_builtin_macros/src/asm.rs b/compiler/rustc_builtin_macros/src/asm.rs
new file mode 100644
index 000000000..1a0ea8f41
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/asm.rs
@@ -0,0 +1,875 @@
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter};
+use rustc_ast::tokenstream::TokenStream;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{Applicability, PResult};
+use rustc_expand::base::{self, *};
+use rustc_parse::parser::Parser;
+use rustc_parse_format as parse;
+use rustc_session::lint;
+use rustc_session::parse::ParseSess;
+use rustc_span::symbol::Ident;
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::{InnerSpan, Span};
+use rustc_target::asm::InlineAsmArch;
+use smallvec::smallvec;
+
+pub struct AsmArgs {
+ pub templates: Vec<P<ast::Expr>>,
+ pub operands: Vec<(ast::InlineAsmOperand, Span)>,
+ named_args: FxHashMap<Symbol, usize>,
+ reg_args: FxHashSet<usize>,
+ pub clobber_abis: Vec<(Symbol, Span)>,
+ options: ast::InlineAsmOptions,
+ pub options_spans: Vec<Span>,
+}
+
+fn parse_args<'a>(
+ ecx: &mut ExtCtxt<'a>,
+ sp: Span,
+ tts: TokenStream,
+ is_global_asm: bool,
+) -> PResult<'a, AsmArgs> {
+ let mut p = ecx.new_parser_from_tts(tts);
+ let sess = &ecx.sess.parse_sess;
+ parse_asm_args(&mut p, sess, sp, is_global_asm)
+}
+
+// Primarily public for rustfmt consumption.
+// Internal consumers should continue to leverage `expand_asm`/`expand__global_asm`
+pub fn parse_asm_args<'a>(
+ p: &mut Parser<'a>,
+ sess: &'a ParseSess,
+ sp: Span,
+ is_global_asm: bool,
+) -> PResult<'a, AsmArgs> {
+ let diag = &sess.span_diagnostic;
+
+ if p.token == token::Eof {
+ return Err(diag.struct_span_err(sp, "requires at least a template string argument"));
+ }
+
+ let first_template = p.parse_expr()?;
+ let mut args = AsmArgs {
+ templates: vec![first_template],
+ operands: vec![],
+ named_args: FxHashMap::default(),
+ reg_args: FxHashSet::default(),
+ clobber_abis: Vec::new(),
+ options: ast::InlineAsmOptions::empty(),
+ options_spans: vec![],
+ };
+
+ let mut allow_templates = true;
+ while p.token != token::Eof {
+ if !p.eat(&token::Comma) {
+ if allow_templates {
+ // After a template string, we always expect *only* a comma...
+ let mut err = diag.struct_span_err(p.token.span, "expected token: `,`");
+ err.span_label(p.token.span, "expected `,`");
+ p.maybe_annotate_with_ascription(&mut err, false);
+ return Err(err);
+ } else {
+ // ...after that delegate to `expect` to also include the other expected tokens.
+ return Err(p.expect(&token::Comma).err().unwrap());
+ }
+ }
+ if p.token == token::Eof {
+ break;
+ } // accept trailing commas
+
+ // Parse clobber_abi
+ if p.eat_keyword(sym::clobber_abi) {
+ parse_clobber_abi(p, &mut args)?;
+ allow_templates = false;
+ continue;
+ }
+
+ // Parse options
+ if p.eat_keyword(sym::options) {
+ parse_options(p, &mut args, is_global_asm)?;
+ allow_templates = false;
+ continue;
+ }
+
+ let span_start = p.token.span;
+
+ // Parse operand names
+ let name = if p.token.is_ident() && p.look_ahead(1, |t| *t == token::Eq) {
+ let (ident, _) = p.token.ident().unwrap();
+ p.bump();
+ p.expect(&token::Eq)?;
+ allow_templates = false;
+ Some(ident.name)
+ } else {
+ None
+ };
+
+ let mut explicit_reg = false;
+ let op = if !is_global_asm && p.eat_keyword(kw::In) {
+ let reg = parse_reg(p, &mut explicit_reg)?;
+ if p.eat_keyword(kw::Underscore) {
+ let err = diag.struct_span_err(p.token.span, "_ cannot be used for input operands");
+ return Err(err);
+ }
+ let expr = p.parse_expr()?;
+ ast::InlineAsmOperand::In { reg, expr }
+ } else if !is_global_asm && p.eat_keyword(sym::out) {
+ let reg = parse_reg(p, &mut explicit_reg)?;
+ let expr = if p.eat_keyword(kw::Underscore) { None } else { Some(p.parse_expr()?) };
+ ast::InlineAsmOperand::Out { reg, expr, late: false }
+ } else if !is_global_asm && p.eat_keyword(sym::lateout) {
+ let reg = parse_reg(p, &mut explicit_reg)?;
+ let expr = if p.eat_keyword(kw::Underscore) { None } else { Some(p.parse_expr()?) };
+ ast::InlineAsmOperand::Out { reg, expr, late: true }
+ } else if !is_global_asm && p.eat_keyword(sym::inout) {
+ let reg = parse_reg(p, &mut explicit_reg)?;
+ if p.eat_keyword(kw::Underscore) {
+ let err = diag.struct_span_err(p.token.span, "_ cannot be used for input operands");
+ return Err(err);
+ }
+ let expr = p.parse_expr()?;
+ if p.eat(&token::FatArrow) {
+ let out_expr =
+ if p.eat_keyword(kw::Underscore) { None } else { Some(p.parse_expr()?) };
+ ast::InlineAsmOperand::SplitInOut { reg, in_expr: expr, out_expr, late: false }
+ } else {
+ ast::InlineAsmOperand::InOut { reg, expr, late: false }
+ }
+ } else if !is_global_asm && p.eat_keyword(sym::inlateout) {
+ let reg = parse_reg(p, &mut explicit_reg)?;
+ if p.eat_keyword(kw::Underscore) {
+ let err = diag.struct_span_err(p.token.span, "_ cannot be used for input operands");
+ return Err(err);
+ }
+ let expr = p.parse_expr()?;
+ if p.eat(&token::FatArrow) {
+ let out_expr =
+ if p.eat_keyword(kw::Underscore) { None } else { Some(p.parse_expr()?) };
+ ast::InlineAsmOperand::SplitInOut { reg, in_expr: expr, out_expr, late: true }
+ } else {
+ ast::InlineAsmOperand::InOut { reg, expr, late: true }
+ }
+ } else if p.eat_keyword(kw::Const) {
+ let anon_const = p.parse_anon_const_expr()?;
+ ast::InlineAsmOperand::Const { anon_const }
+ } else if p.eat_keyword(sym::sym) {
+ let expr = p.parse_expr()?;
+ let ast::ExprKind::Path(qself, path) = &expr.kind else {
+ let err = diag
+ .struct_span_err(expr.span, "expected a path for argument to `sym`");
+ return Err(err);
+ };
+ let sym = ast::InlineAsmSym {
+ id: ast::DUMMY_NODE_ID,
+ qself: qself.clone(),
+ path: path.clone(),
+ };
+ ast::InlineAsmOperand::Sym { sym }
+ } else if allow_templates {
+ let template = p.parse_expr()?;
+ // If it can't possibly expand to a string, provide diagnostics here to include other
+ // things it could have been.
+ match template.kind {
+ ast::ExprKind::Lit(ast::Lit { kind: ast::LitKind::Str(..), .. }) => {}
+ ast::ExprKind::MacCall(..) => {}
+ _ => {
+ let errstr = if is_global_asm {
+ "expected operand, options, or additional template string"
+ } else {
+ "expected operand, clobber_abi, options, or additional template string"
+ };
+ let mut err = diag.struct_span_err(template.span, errstr);
+ err.span_label(template.span, errstr);
+ return Err(err);
+ }
+ }
+ args.templates.push(template);
+ continue;
+ } else {
+ return p.unexpected();
+ };
+
+ allow_templates = false;
+ let span = span_start.to(p.prev_token.span);
+ let slot = args.operands.len();
+ args.operands.push((op, span));
+
+ // Validate the order of named, positional & explicit register operands and
+ // clobber_abi/options. We do this at the end once we have the full span
+ // of the argument available.
+ if !args.options_spans.is_empty() {
+ diag.struct_span_err(span, "arguments are not allowed after options")
+ .span_labels(args.options_spans.clone(), "previous options")
+ .span_label(span, "argument")
+ .emit();
+ } else if let Some((_, abi_span)) = args.clobber_abis.last() {
+ diag.struct_span_err(span, "arguments are not allowed after clobber_abi")
+ .span_label(*abi_span, "clobber_abi")
+ .span_label(span, "argument")
+ .emit();
+ }
+ if explicit_reg {
+ if name.is_some() {
+ diag.struct_span_err(span, "explicit register arguments cannot have names").emit();
+ }
+ args.reg_args.insert(slot);
+ } else if let Some(name) = name {
+ if let Some(&prev) = args.named_args.get(&name) {
+ diag.struct_span_err(span, &format!("duplicate argument named `{}`", name))
+ .span_label(args.operands[prev].1, "previously here")
+ .span_label(span, "duplicate argument")
+ .emit();
+ continue;
+ }
+ if !args.reg_args.is_empty() {
+ let mut err = diag.struct_span_err(
+ span,
+ "named arguments cannot follow explicit register arguments",
+ );
+ err.span_label(span, "named argument");
+ for pos in &args.reg_args {
+ err.span_label(args.operands[*pos].1, "explicit register argument");
+ }
+ err.emit();
+ }
+ args.named_args.insert(name, slot);
+ } else {
+ if !args.named_args.is_empty() || !args.reg_args.is_empty() {
+ let mut err = diag.struct_span_err(
+ span,
+ "positional arguments cannot follow named arguments \
+ or explicit register arguments",
+ );
+ err.span_label(span, "positional argument");
+ for pos in args.named_args.values() {
+ err.span_label(args.operands[*pos].1, "named argument");
+ }
+ for pos in &args.reg_args {
+ err.span_label(args.operands[*pos].1, "explicit register argument");
+ }
+ err.emit();
+ }
+ }
+ }
+
+ if args.options.contains(ast::InlineAsmOptions::NOMEM)
+ && args.options.contains(ast::InlineAsmOptions::READONLY)
+ {
+ let spans = args.options_spans.clone();
+ diag.struct_span_err(spans, "the `nomem` and `readonly` options are mutually exclusive")
+ .emit();
+ }
+ if args.options.contains(ast::InlineAsmOptions::PURE)
+ && args.options.contains(ast::InlineAsmOptions::NORETURN)
+ {
+ let spans = args.options_spans.clone();
+ diag.struct_span_err(spans, "the `pure` and `noreturn` options are mutually exclusive")
+ .emit();
+ }
+ if args.options.contains(ast::InlineAsmOptions::PURE)
+ && !args.options.intersects(ast::InlineAsmOptions::NOMEM | ast::InlineAsmOptions::READONLY)
+ {
+ let spans = args.options_spans.clone();
+ diag.struct_span_err(
+ spans,
+ "the `pure` option must be combined with either `nomem` or `readonly`",
+ )
+ .emit();
+ }
+
+ let mut have_real_output = false;
+ let mut outputs_sp = vec![];
+ let mut regclass_outputs = vec![];
+ for (op, op_sp) in &args.operands {
+ match op {
+ ast::InlineAsmOperand::Out { reg, expr, .. }
+ | ast::InlineAsmOperand::SplitInOut { reg, out_expr: expr, .. } => {
+ outputs_sp.push(*op_sp);
+ have_real_output |= expr.is_some();
+ if let ast::InlineAsmRegOrRegClass::RegClass(_) = reg {
+ regclass_outputs.push(*op_sp);
+ }
+ }
+ ast::InlineAsmOperand::InOut { reg, .. } => {
+ outputs_sp.push(*op_sp);
+ have_real_output = true;
+ if let ast::InlineAsmRegOrRegClass::RegClass(_) = reg {
+ regclass_outputs.push(*op_sp);
+ }
+ }
+ _ => {}
+ }
+ }
+ if args.options.contains(ast::InlineAsmOptions::PURE) && !have_real_output {
+ diag.struct_span_err(
+ args.options_spans.clone(),
+ "asm with the `pure` option must have at least one output",
+ )
+ .emit();
+ }
+ if args.options.contains(ast::InlineAsmOptions::NORETURN) && !outputs_sp.is_empty() {
+ let err = diag
+ .struct_span_err(outputs_sp, "asm outputs are not allowed with the `noreturn` option");
+
+ // Bail out now since this is likely to confuse MIR
+ return Err(err);
+ }
+
+ if args.clobber_abis.len() > 0 {
+ if is_global_asm {
+ let err = diag.struct_span_err(
+ args.clobber_abis.iter().map(|(_, span)| *span).collect::<Vec<Span>>(),
+ "`clobber_abi` cannot be used with `global_asm!`",
+ );
+
+ // Bail out now since this is likely to confuse later stages
+ return Err(err);
+ }
+ if !regclass_outputs.is_empty() {
+ diag.struct_span_err(
+ regclass_outputs.clone(),
+ "asm with `clobber_abi` must specify explicit registers for outputs",
+ )
+ .span_labels(
+ args.clobber_abis.iter().map(|(_, span)| *span).collect::<Vec<Span>>(),
+ "clobber_abi",
+ )
+ .span_labels(regclass_outputs, "generic outputs")
+ .emit();
+ }
+ }
+
+ Ok(args)
+}
+
+/// Report a duplicate option error.
+///
+/// This function must be called immediately after the option token is parsed.
+/// Otherwise, the suggestion will be incorrect.
+fn err_duplicate_option<'a>(p: &mut Parser<'a>, symbol: Symbol, span: Span) {
+ let mut err = p
+ .sess
+ .span_diagnostic
+ .struct_span_err(span, &format!("the `{}` option was already provided", symbol));
+ err.span_label(span, "this option was already provided");
+
+ // Tool-only output
+ let mut full_span = span;
+ if p.token.kind == token::Comma {
+ full_span = full_span.to(p.token.span);
+ }
+ err.tool_only_span_suggestion(
+ full_span,
+ "remove this option",
+ "",
+ Applicability::MachineApplicable,
+ );
+
+ err.emit();
+}
+
+/// Try to set the provided option in the provided `AsmArgs`.
+/// If it is already set, report a duplicate option error.
+///
+/// This function must be called immediately after the option token is parsed.
+/// Otherwise, the error will not point to the correct spot.
+fn try_set_option<'a>(
+ p: &mut Parser<'a>,
+ args: &mut AsmArgs,
+ symbol: Symbol,
+ option: ast::InlineAsmOptions,
+) {
+ if !args.options.contains(option) {
+ args.options |= option;
+ } else {
+ err_duplicate_option(p, symbol, p.prev_token.span);
+ }
+}
+
+fn parse_options<'a>(
+ p: &mut Parser<'a>,
+ args: &mut AsmArgs,
+ is_global_asm: bool,
+) -> PResult<'a, ()> {
+ let span_start = p.prev_token.span;
+
+ p.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
+
+ while !p.eat(&token::CloseDelim(Delimiter::Parenthesis)) {
+ if !is_global_asm && p.eat_keyword(sym::pure) {
+ try_set_option(p, args, sym::pure, ast::InlineAsmOptions::PURE);
+ } else if !is_global_asm && p.eat_keyword(sym::nomem) {
+ try_set_option(p, args, sym::nomem, ast::InlineAsmOptions::NOMEM);
+ } else if !is_global_asm && p.eat_keyword(sym::readonly) {
+ try_set_option(p, args, sym::readonly, ast::InlineAsmOptions::READONLY);
+ } else if !is_global_asm && p.eat_keyword(sym::preserves_flags) {
+ try_set_option(p, args, sym::preserves_flags, ast::InlineAsmOptions::PRESERVES_FLAGS);
+ } else if !is_global_asm && p.eat_keyword(sym::noreturn) {
+ try_set_option(p, args, sym::noreturn, ast::InlineAsmOptions::NORETURN);
+ } else if !is_global_asm && p.eat_keyword(sym::nostack) {
+ try_set_option(p, args, sym::nostack, ast::InlineAsmOptions::NOSTACK);
+ } else if !is_global_asm && p.eat_keyword(sym::may_unwind) {
+ try_set_option(p, args, kw::Raw, ast::InlineAsmOptions::MAY_UNWIND);
+ } else if p.eat_keyword(sym::att_syntax) {
+ try_set_option(p, args, sym::att_syntax, ast::InlineAsmOptions::ATT_SYNTAX);
+ } else if p.eat_keyword(kw::Raw) {
+ try_set_option(p, args, kw::Raw, ast::InlineAsmOptions::RAW);
+ } else {
+ return p.unexpected();
+ }
+
+ // Allow trailing commas
+ if p.eat(&token::CloseDelim(Delimiter::Parenthesis)) {
+ break;
+ }
+ p.expect(&token::Comma)?;
+ }
+
+ let new_span = span_start.to(p.prev_token.span);
+ args.options_spans.push(new_span);
+
+ Ok(())
+}
+
+fn parse_clobber_abi<'a>(p: &mut Parser<'a>, args: &mut AsmArgs) -> PResult<'a, ()> {
+ let span_start = p.prev_token.span;
+
+ p.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
+
+ if p.eat(&token::CloseDelim(Delimiter::Parenthesis)) {
+ let err = p.sess.span_diagnostic.struct_span_err(
+ p.token.span,
+ "at least one abi must be provided as an argument to `clobber_abi`",
+ );
+ return Err(err);
+ }
+
+ let mut new_abis = Vec::new();
+ loop {
+ match p.parse_str_lit() {
+ Ok(str_lit) => {
+ new_abis.push((str_lit.symbol_unescaped, str_lit.span));
+ }
+ Err(opt_lit) => {
+ // If the non-string literal is a closing paren then it's the end of the list and is fine
+ if p.eat(&token::CloseDelim(Delimiter::Parenthesis)) {
+ break;
+ }
+ let span = opt_lit.map_or(p.token.span, |lit| lit.span);
+ let mut err =
+ p.sess.span_diagnostic.struct_span_err(span, "expected string literal");
+ err.span_label(span, "not a string literal");
+ return Err(err);
+ }
+ };
+
+ // Allow trailing commas
+ if p.eat(&token::CloseDelim(Delimiter::Parenthesis)) {
+ break;
+ }
+ p.expect(&token::Comma)?;
+ }
+
+ let full_span = span_start.to(p.prev_token.span);
+
+ if !args.options_spans.is_empty() {
+ let mut err = p
+ .sess
+ .span_diagnostic
+ .struct_span_err(full_span, "clobber_abi is not allowed after options");
+ err.span_labels(args.options_spans.clone(), "options");
+ return Err(err);
+ }
+
+ match &new_abis[..] {
+ // should have errored above during parsing
+ [] => unreachable!(),
+ [(abi, _span)] => args.clobber_abis.push((*abi, full_span)),
+ [abis @ ..] => {
+ for (abi, span) in abis {
+ args.clobber_abis.push((*abi, *span));
+ }
+ }
+ }
+
+ Ok(())
+}
+
+fn parse_reg<'a>(
+ p: &mut Parser<'a>,
+ explicit_reg: &mut bool,
+) -> PResult<'a, ast::InlineAsmRegOrRegClass> {
+ p.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
+ let result = match p.token.uninterpolate().kind {
+ token::Ident(name, false) => ast::InlineAsmRegOrRegClass::RegClass(name),
+ token::Literal(token::Lit { kind: token::LitKind::Str, symbol, suffix: _ }) => {
+ *explicit_reg = true;
+ ast::InlineAsmRegOrRegClass::Reg(symbol)
+ }
+ _ => {
+ return Err(
+ p.struct_span_err(p.token.span, "expected register class or explicit register")
+ );
+ }
+ };
+ p.bump();
+ p.expect(&token::CloseDelim(Delimiter::Parenthesis))?;
+ Ok(result)
+}
+
+fn expand_preparsed_asm(ecx: &mut ExtCtxt<'_>, args: AsmArgs) -> Option<ast::InlineAsm> {
+ let mut template = vec![];
+ // Register operands are implicitly used since they are not allowed to be
+ // referenced in the template string.
+ let mut used = vec![false; args.operands.len()];
+ for pos in &args.reg_args {
+ used[*pos] = true;
+ }
+ let named_pos: FxHashMap<usize, Symbol> =
+ args.named_args.iter().map(|(&sym, &idx)| (idx, sym)).collect();
+ let mut line_spans = Vec::with_capacity(args.templates.len());
+ let mut curarg = 0;
+
+ let mut template_strs = Vec::with_capacity(args.templates.len());
+
+ for (i, template_expr) in args.templates.into_iter().enumerate() {
+ if i != 0 {
+ template.push(ast::InlineAsmTemplatePiece::String("\n".to_string()));
+ }
+
+ let msg = "asm template must be a string literal";
+ let template_sp = template_expr.span;
+ let (template_str, template_style, template_span) =
+ match expr_to_spanned_string(ecx, template_expr, msg) {
+ Ok(template_part) => template_part,
+ Err(err) => {
+ if let Some((mut err, _)) = err {
+ err.emit();
+ }
+ return None;
+ }
+ };
+
+ let str_style = match template_style {
+ ast::StrStyle::Cooked => None,
+ ast::StrStyle::Raw(raw) => Some(raw as usize),
+ };
+
+ let template_snippet = ecx.source_map().span_to_snippet(template_sp).ok();
+ template_strs.push((
+ template_str,
+ template_snippet.as_ref().map(|s| Symbol::intern(s)),
+ template_sp,
+ ));
+ let template_str = template_str.as_str();
+
+ if let Some(InlineAsmArch::X86 | InlineAsmArch::X86_64) = ecx.sess.asm_arch {
+ let find_span = |needle: &str| -> Span {
+ if let Some(snippet) = &template_snippet {
+ if let Some(pos) = snippet.find(needle) {
+ let end = pos
+ + snippet[pos..]
+ .find(|c| matches!(c, '\n' | ';' | '\\' | '"'))
+ .unwrap_or(snippet[pos..].len() - 1);
+ let inner = InnerSpan::new(pos, end);
+ return template_sp.from_inner(inner);
+ }
+ }
+ template_sp
+ };
+
+ if template_str.contains(".intel_syntax") {
+ ecx.parse_sess().buffer_lint(
+ lint::builtin::BAD_ASM_STYLE,
+ find_span(".intel_syntax"),
+ ecx.current_expansion.lint_node_id,
+ "avoid using `.intel_syntax`, Intel syntax is the default",
+ );
+ }
+ if template_str.contains(".att_syntax") {
+ ecx.parse_sess().buffer_lint(
+ lint::builtin::BAD_ASM_STYLE,
+ find_span(".att_syntax"),
+ ecx.current_expansion.lint_node_id,
+ "avoid using `.att_syntax`, prefer using `options(att_syntax)` instead",
+ );
+ }
+ }
+
+ // Don't treat raw asm as a format string.
+ if args.options.contains(ast::InlineAsmOptions::RAW) {
+ template.push(ast::InlineAsmTemplatePiece::String(template_str.to_string()));
+ let template_num_lines = 1 + template_str.matches('\n').count();
+ line_spans.extend(std::iter::repeat(template_sp).take(template_num_lines));
+ continue;
+ }
+
+ let mut parser = parse::Parser::new(
+ template_str,
+ str_style,
+ template_snippet,
+ false,
+ parse::ParseMode::InlineAsm,
+ );
+ parser.curarg = curarg;
+
+ let mut unverified_pieces = Vec::new();
+ while let Some(piece) = parser.next() {
+ if !parser.errors.is_empty() {
+ break;
+ } else {
+ unverified_pieces.push(piece);
+ }
+ }
+
+ if !parser.errors.is_empty() {
+ let err = parser.errors.remove(0);
+ let err_sp = template_span.from_inner(InnerSpan::new(err.span.start, err.span.end));
+ let msg = &format!("invalid asm template string: {}", err.description);
+ let mut e = ecx.struct_span_err(err_sp, msg);
+ e.span_label(err_sp, err.label + " in asm template string");
+ if let Some(note) = err.note {
+ e.note(&note);
+ }
+ if let Some((label, span)) = err.secondary_label {
+ let err_sp = template_span.from_inner(InnerSpan::new(span.start, span.end));
+ e.span_label(err_sp, label);
+ }
+ e.emit();
+ return None;
+ }
+
+ curarg = parser.curarg;
+
+ let mut arg_spans = parser
+ .arg_places
+ .iter()
+ .map(|span| template_span.from_inner(InnerSpan::new(span.start, span.end)));
+ for piece in unverified_pieces {
+ match piece {
+ parse::Piece::String(s) => {
+ template.push(ast::InlineAsmTemplatePiece::String(s.to_string()))
+ }
+ parse::Piece::NextArgument(arg) => {
+ let span = arg_spans.next().unwrap_or(template_sp);
+
+ let operand_idx = match arg.position {
+ parse::ArgumentIs(idx) | parse::ArgumentImplicitlyIs(idx) => {
+ if idx >= args.operands.len()
+ || named_pos.contains_key(&idx)
+ || args.reg_args.contains(&idx)
+ {
+ let msg = format!("invalid reference to argument at index {}", idx);
+ let mut err = ecx.struct_span_err(span, &msg);
+ err.span_label(span, "from here");
+
+ let positional_args = args.operands.len()
+ - args.named_args.len()
+ - args.reg_args.len();
+ let positional = if positional_args != args.operands.len() {
+ "positional "
+ } else {
+ ""
+ };
+ let msg = match positional_args {
+ 0 => format!("no {}arguments were given", positional),
+ 1 => format!("there is 1 {}argument", positional),
+ x => format!("there are {} {}arguments", x, positional),
+ };
+ err.note(&msg);
+
+ if named_pos.contains_key(&idx) {
+ err.span_label(args.operands[idx].1, "named argument");
+ err.span_note(
+ args.operands[idx].1,
+ "named arguments cannot be referenced by position",
+ );
+ } else if args.reg_args.contains(&idx) {
+ err.span_label(
+ args.operands[idx].1,
+ "explicit register argument",
+ );
+ err.span_note(
+ args.operands[idx].1,
+ "explicit register arguments cannot be used in the asm template",
+ );
+ }
+ err.emit();
+ None
+ } else {
+ Some(idx)
+ }
+ }
+ parse::ArgumentNamed(name) => {
+ match args.named_args.get(&Symbol::intern(name)) {
+ Some(&idx) => Some(idx),
+ None => {
+ let msg = format!("there is no argument named `{}`", name);
+ let span = arg.position_span;
+ ecx.struct_span_err(
+ template_span
+ .from_inner(InnerSpan::new(span.start, span.end)),
+ &msg,
+ )
+ .emit();
+ None
+ }
+ }
+ }
+ };
+
+ let mut chars = arg.format.ty.chars();
+ let mut modifier = chars.next();
+ if chars.next().is_some() {
+ let span = arg
+ .format
+ .ty_span
+ .map(|sp| template_sp.from_inner(InnerSpan::new(sp.start, sp.end)))
+ .unwrap_or(template_sp);
+ ecx.struct_span_err(
+ span,
+ "asm template modifier must be a single character",
+ )
+ .emit();
+ modifier = None;
+ }
+
+ if let Some(operand_idx) = operand_idx {
+ used[operand_idx] = true;
+ template.push(ast::InlineAsmTemplatePiece::Placeholder {
+ operand_idx,
+ modifier,
+ span,
+ });
+ }
+ }
+ }
+ }
+
+ if parser.line_spans.is_empty() {
+ let template_num_lines = 1 + template_str.matches('\n').count();
+ line_spans.extend(std::iter::repeat(template_sp).take(template_num_lines));
+ } else {
+ line_spans.extend(
+ parser
+ .line_spans
+ .iter()
+ .map(|span| template_span.from_inner(InnerSpan::new(span.start, span.end))),
+ );
+ };
+ }
+
+ let mut unused_operands = vec![];
+ let mut help_str = String::new();
+ for (idx, used) in used.into_iter().enumerate() {
+ if !used {
+ let msg = if let Some(sym) = named_pos.get(&idx) {
+ help_str.push_str(&format!(" {{{}}}", sym));
+ "named argument never used"
+ } else {
+ help_str.push_str(&format!(" {{{}}}", idx));
+ "argument never used"
+ };
+ unused_operands.push((args.operands[idx].1, msg));
+ }
+ }
+ match unused_operands.len() {
+ 0 => {}
+ 1 => {
+ let (sp, msg) = unused_operands.into_iter().next().unwrap();
+ let mut err = ecx.struct_span_err(sp, msg);
+ err.span_label(sp, msg);
+ err.help(&format!(
+ "if this argument is intentionally unused, \
+ consider using it in an asm comment: `\"/*{} */\"`",
+ help_str
+ ));
+ err.emit();
+ }
+ _ => {
+ let mut err = ecx.struct_span_err(
+ unused_operands.iter().map(|&(sp, _)| sp).collect::<Vec<Span>>(),
+ "multiple unused asm arguments",
+ );
+ for (sp, msg) in unused_operands {
+ err.span_label(sp, msg);
+ }
+ err.help(&format!(
+ "if these arguments are intentionally unused, \
+ consider using them in an asm comment: `\"/*{} */\"`",
+ help_str
+ ));
+ err.emit();
+ }
+ }
+
+ Some(ast::InlineAsm {
+ template,
+ template_strs: template_strs.into_boxed_slice(),
+ operands: args.operands,
+ clobber_abis: args.clobber_abis,
+ options: args.options,
+ line_spans,
+ })
+}
+
+pub(super) fn expand_asm<'cx>(
+ ecx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ match parse_args(ecx, sp, tts, false) {
+ Ok(args) => {
+ let expr = if let Some(inline_asm) = expand_preparsed_asm(ecx, args) {
+ P(ast::Expr {
+ id: ast::DUMMY_NODE_ID,
+ kind: ast::ExprKind::InlineAsm(P(inline_asm)),
+ span: sp,
+ attrs: ast::AttrVec::new(),
+ tokens: None,
+ })
+ } else {
+ DummyResult::raw_expr(sp, true)
+ };
+ MacEager::expr(expr)
+ }
+ Err(mut err) => {
+ err.emit();
+ DummyResult::any(sp)
+ }
+ }
+}
+
+pub(super) fn expand_global_asm<'cx>(
+ ecx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ match parse_args(ecx, sp, tts, true) {
+ Ok(args) => {
+ if let Some(inline_asm) = expand_preparsed_asm(ecx, args) {
+ MacEager::items(smallvec![P(ast::Item {
+ ident: Ident::empty(),
+ attrs: Vec::new(),
+ id: ast::DUMMY_NODE_ID,
+ kind: ast::ItemKind::GlobalAsm(Box::new(inline_asm)),
+ vis: ast::Visibility {
+ span: sp.shrink_to_lo(),
+ kind: ast::VisibilityKind::Inherited,
+ tokens: None,
+ },
+ span: ecx.with_def_site_ctxt(sp),
+ tokens: None,
+ })])
+ } else {
+ DummyResult::any(sp)
+ }
+ }
+ Err(mut err) => {
+ err.emit();
+ DummyResult::any(sp)
+ }
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/assert.rs b/compiler/rustc_builtin_macros/src/assert.rs
new file mode 100644
index 000000000..925c36edb
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/assert.rs
@@ -0,0 +1,178 @@
+mod context;
+
+use crate::edition_panic::use_panic_2021;
+use rustc_ast::ptr::P;
+use rustc_ast::token;
+use rustc_ast::tokenstream::{DelimSpan, TokenStream};
+use rustc_ast::{Expr, ExprKind, MacArgs, MacCall, MacDelimiter, Path, PathSegment, UnOp};
+use rustc_ast_pretty::pprust;
+use rustc_errors::{Applicability, PResult};
+use rustc_expand::base::{DummyResult, ExtCtxt, MacEager, MacResult};
+use rustc_parse::parser::Parser;
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+pub fn expand_assert<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ tts: TokenStream,
+) -> Box<dyn MacResult + 'cx> {
+ let Assert { cond_expr, custom_message } = match parse_assert(cx, span, tts) {
+ Ok(assert) => assert,
+ Err(mut err) => {
+ err.emit();
+ return DummyResult::any(span);
+ }
+ };
+
+ // `core::panic` and `std::panic` are different macros, so we use call-site
+ // context to pick up whichever is currently in scope.
+ let call_site_span = cx.with_call_site_ctxt(span);
+
+ let panic_path = || {
+ if use_panic_2021(span) {
+ // On edition 2021, we always call `$crate::panic::panic_2021!()`.
+ Path {
+ span: call_site_span,
+ segments: cx
+ .std_path(&[sym::panic, sym::panic_2021])
+ .into_iter()
+ .map(|ident| PathSegment::from_ident(ident))
+ .collect(),
+ tokens: None,
+ }
+ } else {
+ // Before edition 2021, we call `panic!()` unqualified,
+ // such that it calls either `std::panic!()` or `core::panic!()`.
+ Path::from_ident(Ident::new(sym::panic, call_site_span))
+ }
+ };
+
+ // Simply uses the user provided message instead of generating custom outputs
+ let expr = if let Some(tokens) = custom_message {
+ let then = cx.expr(
+ call_site_span,
+ ExprKind::MacCall(MacCall {
+ path: panic_path(),
+ args: P(MacArgs::Delimited(
+ DelimSpan::from_single(call_site_span),
+ MacDelimiter::Parenthesis,
+ tokens,
+ )),
+ prior_type_ascription: None,
+ }),
+ );
+ expr_if_not(cx, call_site_span, cond_expr, then, None)
+ }
+ // If `generic_assert` is enabled, generates rich captured outputs
+ //
+ // FIXME(c410-f3r) See https://github.com/rust-lang/rust/issues/96949
+ else if let Some(features) = cx.ecfg.features && features.generic_assert {
+ context::Context::new(cx, call_site_span).build(cond_expr, panic_path())
+ }
+ // If `generic_assert` is not enabled, only outputs a literal "assertion failed: ..."
+ // string
+ else {
+ // Pass our own message directly to $crate::panicking::panic(),
+ // because it might contain `{` and `}` that should always be
+ // passed literally.
+ let then = cx.expr_call_global(
+ call_site_span,
+ cx.std_path(&[sym::panicking, sym::panic]),
+ vec![cx.expr_str(
+ DUMMY_SP,
+ Symbol::intern(&format!(
+ "assertion failed: {}",
+ pprust::expr_to_string(&cond_expr).escape_debug()
+ )),
+ )],
+ );
+ expr_if_not(cx, call_site_span, cond_expr, then, None)
+ };
+
+ MacEager::expr(expr)
+}
+
+struct Assert {
+ cond_expr: P<Expr>,
+ custom_message: Option<TokenStream>,
+}
+
+// if !{ ... } { ... } else { ... }
+fn expr_if_not(
+ cx: &ExtCtxt<'_>,
+ span: Span,
+ cond: P<Expr>,
+ then: P<Expr>,
+ els: Option<P<Expr>>,
+) -> P<Expr> {
+ cx.expr_if(span, cx.expr(span, ExprKind::Unary(UnOp::Not, cond)), then, els)
+}
+
+fn parse_assert<'a>(cx: &mut ExtCtxt<'a>, sp: Span, stream: TokenStream) -> PResult<'a, Assert> {
+ let mut parser = cx.new_parser_from_tts(stream);
+
+ if parser.token == token::Eof {
+ let mut err = cx.struct_span_err(sp, "macro requires a boolean expression as an argument");
+ err.span_label(sp, "boolean expression required");
+ return Err(err);
+ }
+
+ let cond_expr = parser.parse_expr()?;
+
+ // Some crates use the `assert!` macro in the following form (note extra semicolon):
+ //
+ // assert!(
+ // my_function();
+ // );
+ //
+ // Emit an error about semicolon and suggest removing it.
+ if parser.token == token::Semi {
+ let mut err = cx.struct_span_err(sp, "macro requires an expression as an argument");
+ err.span_suggestion(
+ parser.token.span,
+ "try removing semicolon",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+
+ parser.bump();
+ }
+
+ // Some crates use the `assert!` macro in the following form (note missing comma before
+ // message):
+ //
+ // assert!(true "error message");
+ //
+ // Emit an error and suggest inserting a comma.
+ let custom_message =
+ if let token::Literal(token::Lit { kind: token::Str, .. }) = parser.token.kind {
+ let mut err = cx.struct_span_err(parser.token.span, "unexpected string literal");
+ let comma_span = parser.prev_token.span.shrink_to_hi();
+ err.span_suggestion_short(
+ comma_span,
+ "try adding a comma",
+ ", ",
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+
+ parse_custom_message(&mut parser)
+ } else if parser.eat(&token::Comma) {
+ parse_custom_message(&mut parser)
+ } else {
+ None
+ };
+
+ if parser.token != token::Eof {
+ return parser.unexpected();
+ }
+
+ Ok(Assert { cond_expr, custom_message })
+}
+
+fn parse_custom_message(parser: &mut Parser<'_>) -> Option<TokenStream> {
+ let ts = parser.parse_tokens();
+ if !ts.is_empty() { Some(ts) } else { None }
+}
diff --git a/compiler/rustc_builtin_macros/src/assert/context.rs b/compiler/rustc_builtin_macros/src/assert/context.rs
new file mode 100644
index 000000000..dcea883a5
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/assert/context.rs
@@ -0,0 +1,453 @@
+use rustc_ast::{
+ attr,
+ ptr::P,
+ token,
+ tokenstream::{DelimSpan, TokenStream, TokenTree},
+ BinOpKind, BorrowKind, Expr, ExprKind, ItemKind, MacArgs, MacCall, MacDelimiter, Mutability,
+ Path, PathSegment, Stmt, StructRest, UnOp, UseTree, UseTreeKind, DUMMY_NODE_ID,
+};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_expand::base::ExtCtxt;
+use rustc_span::{
+ symbol::{sym, Ident, Symbol},
+ Span,
+};
+
+pub(super) struct Context<'cx, 'a> {
+ // An optimization.
+ //
+ // Elements that aren't consumed (PartialEq, PartialOrd, ...) can be copied **after** the
+ // `assert!` expression fails rather than copied on-the-fly.
+ best_case_captures: Vec<Stmt>,
+ // Top-level `let captureN = Capture::new()` statements
+ capture_decls: Vec<Capture>,
+ cx: &'cx ExtCtxt<'a>,
+ // Formatting string used for debugging
+ fmt_string: String,
+ // If the current expression being visited consumes itself. Used to construct
+ // `best_case_captures`.
+ is_consumed: bool,
+ // Top-level `let __local_bindN = &expr` statements
+ local_bind_decls: Vec<Stmt>,
+ // Used to avoid capturing duplicated paths
+ //
+ // ```rust
+ // let a = 1i32;
+ // assert!(add(a, a) == 3);
+ // ```
+ paths: FxHashSet<Ident>,
+ span: Span,
+}
+
+impl<'cx, 'a> Context<'cx, 'a> {
+ pub(super) fn new(cx: &'cx ExtCtxt<'a>, span: Span) -> Self {
+ Self {
+ best_case_captures: <_>::default(),
+ capture_decls: <_>::default(),
+ cx,
+ fmt_string: <_>::default(),
+ is_consumed: true,
+ local_bind_decls: <_>::default(),
+ paths: <_>::default(),
+ span,
+ }
+ }
+
+ /// Builds the whole `assert!` expression. For example, `let elem = 1; assert!(elem == 1);` expands to:
+ ///
+ /// ```rust
+ /// let elem = 1;
+ /// {
+ /// #[allow(unused_imports)]
+ /// use ::core::asserting::{TryCaptureGeneric, TryCapturePrintable};
+ /// let mut __capture0 = ::core::asserting::Capture::new();
+ /// let __local_bind0 = &elem;
+ /// if !(
+ /// *{
+ /// (&::core::asserting::Wrapper(__local_bind0)).try_capture(&mut __capture0);
+ /// __local_bind0
+ /// } == 1
+ /// ) {
+ /// panic!("Assertion failed: elem == 1\nWith captures:\n elem = {}", __capture0)
+ /// }
+ /// }
+ /// ```
+ pub(super) fn build(mut self, mut cond_expr: P<Expr>, panic_path: Path) -> P<Expr> {
+ let expr_str = pprust::expr_to_string(&cond_expr);
+ self.manage_cond_expr(&mut cond_expr);
+ let initial_imports = self.build_initial_imports();
+ let panic = self.build_panic(&expr_str, panic_path);
+ let cond_expr_with_unlikely = self.build_unlikely(cond_expr);
+
+ let Self { best_case_captures, capture_decls, cx, local_bind_decls, span, .. } = self;
+
+ let mut assert_then_stmts = Vec::with_capacity(2);
+ assert_then_stmts.extend(best_case_captures);
+ assert_then_stmts.push(self.cx.stmt_expr(panic));
+ let assert_then = self.cx.block(span, assert_then_stmts);
+
+ let mut stmts = Vec::with_capacity(4);
+ stmts.push(initial_imports);
+ stmts.extend(capture_decls.into_iter().map(|c| c.decl));
+ stmts.extend(local_bind_decls);
+ stmts.push(
+ cx.stmt_expr(cx.expr(span, ExprKind::If(cond_expr_with_unlikely, assert_then, None))),
+ );
+ cx.expr_block(cx.block(span, stmts))
+ }
+
+ /// Initial **trait** imports
+ ///
+ /// use ::core::asserting::{ ... };
+ fn build_initial_imports(&self) -> Stmt {
+ let nested_tree = |this: &Self, sym| {
+ (
+ UseTree {
+ prefix: this.cx.path(this.span, vec![Ident::with_dummy_span(sym)]),
+ kind: UseTreeKind::Simple(None, DUMMY_NODE_ID, DUMMY_NODE_ID),
+ span: this.span,
+ },
+ DUMMY_NODE_ID,
+ )
+ };
+ self.cx.stmt_item(
+ self.span,
+ self.cx.item(
+ self.span,
+ Ident::empty(),
+ vec![self.cx.attribute(attr::mk_list_item(
+ Ident::new(sym::allow, self.span),
+ vec![attr::mk_nested_word_item(Ident::new(sym::unused_imports, self.span))],
+ ))],
+ ItemKind::Use(UseTree {
+ prefix: self.cx.path(self.span, self.cx.std_path(&[sym::asserting])),
+ kind: UseTreeKind::Nested(vec![
+ nested_tree(self, sym::TryCaptureGeneric),
+ nested_tree(self, sym::TryCapturePrintable),
+ ]),
+ span: self.span,
+ }),
+ ),
+ )
+ }
+
+ /// Takes the conditional expression of `assert!` and then wraps it inside `unlikely`
+ fn build_unlikely(&self, cond_expr: P<Expr>) -> P<Expr> {
+ let unlikely_path = self.cx.std_path(&[sym::intrinsics, sym::unlikely]);
+ self.cx.expr_call(
+ self.span,
+ self.cx.expr_path(self.cx.path(self.span, unlikely_path)),
+ vec![self.cx.expr(self.span, ExprKind::Unary(UnOp::Not, cond_expr))],
+ )
+ }
+
+ /// The necessary custom `panic!(...)` expression.
+ ///
+ /// panic!(
+ /// "Assertion failed: ... \n With expansion: ...",
+ /// __capture0,
+ /// ...
+ /// );
+ fn build_panic(&self, expr_str: &str, panic_path: Path) -> P<Expr> {
+ let escaped_expr_str = escape_to_fmt(expr_str);
+ let initial = [
+ TokenTree::token_alone(
+ token::Literal(token::Lit {
+ kind: token::LitKind::Str,
+ symbol: Symbol::intern(&if self.fmt_string.is_empty() {
+ format!("Assertion failed: {escaped_expr_str}")
+ } else {
+ format!(
+ "Assertion failed: {escaped_expr_str}\nWith captures:\n{}",
+ &self.fmt_string
+ )
+ }),
+ suffix: None,
+ }),
+ self.span,
+ ),
+ TokenTree::token_alone(token::Comma, self.span),
+ ];
+ let captures = self.capture_decls.iter().flat_map(|cap| {
+ [
+ TokenTree::token_alone(token::Ident(cap.ident.name, false), cap.ident.span),
+ TokenTree::token_alone(token::Comma, self.span),
+ ]
+ });
+ self.cx.expr(
+ self.span,
+ ExprKind::MacCall(MacCall {
+ path: panic_path,
+ args: P(MacArgs::Delimited(
+ DelimSpan::from_single(self.span),
+ MacDelimiter::Parenthesis,
+ initial.into_iter().chain(captures).collect::<TokenStream>(),
+ )),
+ prior_type_ascription: None,
+ }),
+ )
+ }
+
+ /// Recursive function called until `cond_expr` and `fmt_str` are fully modified.
+ ///
+ /// See [Self::manage_initial_capture] and [Self::manage_try_capture]
+ fn manage_cond_expr(&mut self, expr: &mut P<Expr>) {
+ match (*expr).kind {
+ ExprKind::AddrOf(_, mutability, ref mut local_expr) => {
+ self.with_is_consumed_management(
+ matches!(mutability, Mutability::Mut),
+ |this| this.manage_cond_expr(local_expr)
+ );
+ }
+ ExprKind::Array(ref mut local_exprs) => {
+ for local_expr in local_exprs {
+ self.manage_cond_expr(local_expr);
+ }
+ }
+ ExprKind::Binary(ref op, ref mut lhs, ref mut rhs) => {
+ self.with_is_consumed_management(
+ matches!(
+ op.node,
+ BinOpKind::Add
+ | BinOpKind::And
+ | BinOpKind::BitAnd
+ | BinOpKind::BitOr
+ | BinOpKind::BitXor
+ | BinOpKind::Div
+ | BinOpKind::Mul
+ | BinOpKind::Or
+ | BinOpKind::Rem
+ | BinOpKind::Shl
+ | BinOpKind::Shr
+ | BinOpKind::Sub
+ ),
+ |this| {
+ this.manage_cond_expr(lhs);
+ this.manage_cond_expr(rhs);
+ }
+ );
+ }
+ ExprKind::Call(_, ref mut local_exprs) => {
+ for local_expr in local_exprs {
+ self.manage_cond_expr(local_expr);
+ }
+ }
+ ExprKind::Cast(ref mut local_expr, _) => {
+ self.manage_cond_expr(local_expr);
+ }
+ ExprKind::Index(ref mut prefix, ref mut suffix) => {
+ self.manage_cond_expr(prefix);
+ self.manage_cond_expr(suffix);
+ }
+ ExprKind::MethodCall(_, ref mut local_exprs, _) => {
+ for local_expr in local_exprs.iter_mut().skip(1) {
+ self.manage_cond_expr(local_expr);
+ }
+ }
+ ExprKind::Path(_, Path { ref segments, .. }) if let &[ref path_segment] = &segments[..] => {
+ let path_ident = path_segment.ident;
+ self.manage_initial_capture(expr, path_ident);
+ }
+ ExprKind::Paren(ref mut local_expr) => {
+ self.manage_cond_expr(local_expr);
+ }
+ ExprKind::Range(ref mut prefix, ref mut suffix, _) => {
+ if let Some(ref mut elem) = prefix {
+ self.manage_cond_expr(elem);
+ }
+ if let Some(ref mut elem) = suffix {
+ self.manage_cond_expr(elem);
+ }
+ }
+ ExprKind::Repeat(ref mut local_expr, ref mut elem) => {
+ self.manage_cond_expr(local_expr);
+ self.manage_cond_expr(&mut elem.value);
+ }
+ ExprKind::Struct(ref mut elem) => {
+ for field in &mut elem.fields {
+ self.manage_cond_expr(&mut field.expr);
+ }
+ if let StructRest::Base(ref mut local_expr) = elem.rest {
+ self.manage_cond_expr(local_expr);
+ }
+ }
+ ExprKind::Tup(ref mut local_exprs) => {
+ for local_expr in local_exprs {
+ self.manage_cond_expr(local_expr);
+ }
+ }
+ ExprKind::Unary(un_op, ref mut local_expr) => {
+ self.with_is_consumed_management(
+ matches!(un_op, UnOp::Neg | UnOp::Not),
+ |this| this.manage_cond_expr(local_expr)
+ );
+ }
+ // Expressions that are not worth or can not be captured.
+ //
+ // Full list instead of `_` to catch possible future inclusions and to
+ // sync with the `rfc-2011-nicer-assert-messages/all-expr-kinds.rs` test.
+ ExprKind::Assign(_, _, _)
+ | ExprKind::AssignOp(_, _, _)
+ | ExprKind::Async(_, _, _)
+ | ExprKind::Await(_)
+ | ExprKind::Block(_, _)
+ | ExprKind::Box(_)
+ | ExprKind::Break(_, _)
+ | ExprKind::Closure(_, _, _, _, _, _, _)
+ | ExprKind::ConstBlock(_)
+ | ExprKind::Continue(_)
+ | ExprKind::Err
+ | ExprKind::Field(_, _)
+ | ExprKind::ForLoop(_, _, _, _)
+ | ExprKind::If(_, _, _)
+ | ExprKind::InlineAsm(_)
+ | ExprKind::Let(_, _, _)
+ | ExprKind::Lit(_)
+ | ExprKind::Loop(_, _)
+ | ExprKind::MacCall(_)
+ | ExprKind::Match(_, _)
+ | ExprKind::Path(_, _)
+ | ExprKind::Ret(_)
+ | ExprKind::Try(_)
+ | ExprKind::TryBlock(_)
+ | ExprKind::Type(_, _)
+ | ExprKind::Underscore
+ | ExprKind::While(_, _, _)
+ | ExprKind::Yeet(_)
+ | ExprKind::Yield(_) => {}
+ }
+ }
+
+ /// Pushes the top-level declarations and modifies `expr` to try capturing variables.
+ ///
+ /// `fmt_str`, the formatting string used for debugging, is constructed to show possible
+ /// captured variables.
+ fn manage_initial_capture(&mut self, expr: &mut P<Expr>, path_ident: Ident) {
+ if self.paths.contains(&path_ident) {
+ return;
+ } else {
+ self.fmt_string.push_str(" ");
+ self.fmt_string.push_str(path_ident.as_str());
+ self.fmt_string.push_str(" = {:?}\n");
+ let _ = self.paths.insert(path_ident);
+ }
+ let curr_capture_idx = self.capture_decls.len();
+ let capture_string = format!("__capture{curr_capture_idx}");
+ let ident = Ident::new(Symbol::intern(&capture_string), self.span);
+ let init_std_path = self.cx.std_path(&[sym::asserting, sym::Capture, sym::new]);
+ let init = self.cx.expr_call(
+ self.span,
+ self.cx.expr_path(self.cx.path(self.span, init_std_path)),
+ vec![],
+ );
+ let capture = Capture { decl: self.cx.stmt_let(self.span, true, ident, init), ident };
+ self.capture_decls.push(capture);
+ self.manage_try_capture(ident, curr_capture_idx, expr);
+ }
+
+ /// Tries to copy `__local_bindN` into `__captureN`.
+ ///
+ /// *{
+ /// (&Wrapper(__local_bindN)).try_capture(&mut __captureN);
+ /// __local_bindN
+ /// }
+ fn manage_try_capture(&mut self, capture: Ident, curr_capture_idx: usize, expr: &mut P<Expr>) {
+ let local_bind_string = format!("__local_bind{curr_capture_idx}");
+ let local_bind = Ident::new(Symbol::intern(&local_bind_string), self.span);
+ self.local_bind_decls.push(self.cx.stmt_let(
+ self.span,
+ false,
+ local_bind,
+ self.cx.expr_addr_of(self.span, expr.clone()),
+ ));
+ let wrapper = self.cx.expr_call(
+ self.span,
+ self.cx.expr_path(
+ self.cx.path(self.span, self.cx.std_path(&[sym::asserting, sym::Wrapper])),
+ ),
+ vec![self.cx.expr_path(Path::from_ident(local_bind))],
+ );
+ let try_capture_call = self
+ .cx
+ .stmt_expr(expr_method_call(
+ self.cx,
+ PathSegment {
+ args: None,
+ id: DUMMY_NODE_ID,
+ ident: Ident::new(sym::try_capture, self.span),
+ },
+ vec![
+ expr_paren(self.cx, self.span, self.cx.expr_addr_of(self.span, wrapper)),
+ expr_addr_of_mut(
+ self.cx,
+ self.span,
+ self.cx.expr_path(Path::from_ident(capture)),
+ ),
+ ],
+ self.span,
+ ))
+ .add_trailing_semicolon();
+ let local_bind_path = self.cx.expr_path(Path::from_ident(local_bind));
+ let rslt = if self.is_consumed {
+ let ret = self.cx.stmt_expr(local_bind_path);
+ self.cx.expr_block(self.cx.block(self.span, vec![try_capture_call, ret]))
+ } else {
+ self.best_case_captures.push(try_capture_call);
+ local_bind_path
+ };
+ *expr = self.cx.expr_deref(self.span, rslt);
+ }
+
+ // Calls `f` with the internal `is_consumed` set to `curr_is_consumed` and then
+ // sets the internal `is_consumed` back to its original value.
+ fn with_is_consumed_management(&mut self, curr_is_consumed: bool, f: impl FnOnce(&mut Self)) {
+ let prev_is_consumed = self.is_consumed;
+ self.is_consumed = curr_is_consumed;
+ f(self);
+ self.is_consumed = prev_is_consumed;
+ }
+}
+
+/// Information about a captured element.
+#[derive(Debug)]
+struct Capture {
+ // Generated indexed `Capture` statement.
+ //
+ // `let __capture{} = Capture::new();`
+ decl: Stmt,
+ // The name of the generated indexed `Capture` variable.
+ //
+ // `__capture{}`
+ ident: Ident,
+}
+
+/// Escapes to use as a formatting string.
+fn escape_to_fmt(s: &str) -> String {
+ let mut rslt = String::with_capacity(s.len());
+ for c in s.chars() {
+ rslt.extend(c.escape_debug());
+ match c {
+ '{' | '}' => rslt.push(c),
+ _ => {}
+ }
+ }
+ rslt
+}
+
+fn expr_addr_of_mut(cx: &ExtCtxt<'_>, sp: Span, e: P<Expr>) -> P<Expr> {
+ cx.expr(sp, ExprKind::AddrOf(BorrowKind::Ref, Mutability::Mut, e))
+}
+
+fn expr_method_call(
+ cx: &ExtCtxt<'_>,
+ path: PathSegment,
+ args: Vec<P<Expr>>,
+ span: Span,
+) -> P<Expr> {
+ cx.expr(span, ExprKind::MethodCall(path, args, span))
+}
+
+fn expr_paren(cx: &ExtCtxt<'_>, sp: Span, e: P<Expr>) -> P<Expr> {
+ cx.expr(sp, ExprKind::Paren(e))
+}
diff --git a/compiler/rustc_builtin_macros/src/cfg.rs b/compiler/rustc_builtin_macros/src/cfg.rs
new file mode 100644
index 000000000..aa355150b
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/cfg.rs
@@ -0,0 +1,69 @@
+//! The compiler code necessary to support the cfg! extension, which expands to
+//! a literal `true` or `false` based on whether the given cfg matches the
+//! current compilation environment.
+
+use rustc_ast as ast;
+use rustc_ast::token;
+use rustc_ast::tokenstream::TokenStream;
+use rustc_attr as attr;
+use rustc_errors::PResult;
+use rustc_expand::base::{self, *};
+use rustc_macros::SessionDiagnostic;
+use rustc_span::Span;
+
+pub fn expand_cfg(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let sp = cx.with_def_site_ctxt(sp);
+
+ match parse_cfg(cx, sp, tts) {
+ Ok(cfg) => {
+ let matches_cfg = attr::cfg_matches(
+ &cfg,
+ &cx.sess.parse_sess,
+ cx.current_expansion.lint_node_id,
+ cx.ecfg.features,
+ );
+ MacEager::expr(cx.expr_bool(sp, matches_cfg))
+ }
+ Err(mut err) => {
+ err.emit();
+ DummyResult::any(sp)
+ }
+ }
+}
+
+#[derive(SessionDiagnostic)]
+#[error(builtin_macros::requires_cfg_pattern)]
+struct RequiresCfgPattern {
+ #[primary_span]
+ #[label]
+ span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(builtin_macros::expected_one_cfg_pattern)]
+struct OneCfgPattern {
+ #[primary_span]
+ span: Span,
+}
+
+fn parse_cfg<'a>(cx: &mut ExtCtxt<'a>, span: Span, tts: TokenStream) -> PResult<'a, ast::MetaItem> {
+ let mut p = cx.new_parser_from_tts(tts);
+
+ if p.token == token::Eof {
+ return Err(cx.create_err(RequiresCfgPattern { span }));
+ }
+
+ let cfg = p.parse_meta_item()?;
+
+ let _ = p.eat(&token::Comma);
+
+ if !p.eat(&token::Eof) {
+ return Err(cx.create_err(OneCfgPattern { span }));
+ }
+
+ Ok(cfg)
+}
diff --git a/compiler/rustc_builtin_macros/src/cfg_accessible.rs b/compiler/rustc_builtin_macros/src/cfg_accessible.rs
new file mode 100644
index 000000000..cb5359dd1
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/cfg_accessible.rs
@@ -0,0 +1,61 @@
+//! Implementation of the `#[cfg_accessible(path)]` attribute macro.
+
+use rustc_ast as ast;
+use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, Indeterminate, MultiItemModifier};
+use rustc_feature::AttributeTemplate;
+use rustc_parse::validate_attr;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+pub(crate) struct Expander;
+
+fn validate_input<'a>(ecx: &mut ExtCtxt<'_>, mi: &'a ast::MetaItem) -> Option<&'a ast::Path> {
+ match mi.meta_item_list() {
+ None => {}
+ Some([]) => ecx.span_err(mi.span, "`cfg_accessible` path is not specified"),
+ Some([_, .., l]) => ecx.span_err(l.span(), "multiple `cfg_accessible` paths are specified"),
+ Some([nmi]) => match nmi.meta_item() {
+ None => ecx.span_err(nmi.span(), "`cfg_accessible` path cannot be a literal"),
+ Some(mi) => {
+ if !mi.is_word() {
+ ecx.span_err(mi.span, "`cfg_accessible` path cannot accept arguments");
+ }
+ return Some(&mi.path);
+ }
+ },
+ }
+ None
+}
+
+impl MultiItemModifier for Expander {
+ fn expand(
+ &self,
+ ecx: &mut ExtCtxt<'_>,
+ span: Span,
+ meta_item: &ast::MetaItem,
+ item: Annotatable,
+ ) -> ExpandResult<Vec<Annotatable>, Annotatable> {
+ let template = AttributeTemplate { list: Some("path"), ..Default::default() };
+ let attr = &ecx.attribute(meta_item.clone());
+ validate_attr::check_builtin_attribute(
+ &ecx.sess.parse_sess,
+ attr,
+ sym::cfg_accessible,
+ template,
+ );
+
+ let Some(path) = validate_input(ecx, meta_item) else {
+ return ExpandResult::Ready(Vec::new());
+ };
+
+ match ecx.resolver.cfg_accessible(ecx.current_expansion.id, path) {
+ Ok(true) => ExpandResult::Ready(vec![item]),
+ Ok(false) => ExpandResult::Ready(Vec::new()),
+ Err(Indeterminate) if ecx.force_mode => {
+ ecx.span_err(span, "cannot determine whether the path is accessible or not");
+ ExpandResult::Ready(vec![item])
+ }
+ Err(Indeterminate) => ExpandResult::Retry(item),
+ }
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/cfg_eval.rs b/compiler/rustc_builtin_macros/src/cfg_eval.rs
new file mode 100644
index 000000000..89b2c3292
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/cfg_eval.rs
@@ -0,0 +1,269 @@
+use crate::util::{check_builtin_macro_attribute, warn_on_duplicate_attribute};
+
+use rustc_ast as ast;
+use rustc_ast::mut_visit::MutVisitor;
+use rustc_ast::ptr::P;
+use rustc_ast::visit::Visitor;
+use rustc_ast::NodeId;
+use rustc_ast::{mut_visit, visit};
+use rustc_ast::{Attribute, HasAttrs, HasTokens};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_expand::config::StripUnconfigured;
+use rustc_expand::configure;
+use rustc_feature::Features;
+use rustc_parse::parser::{ForceCollect, Parser};
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use smallvec::SmallVec;
+
+pub(crate) fn expand(
+ ecx: &mut ExtCtxt<'_>,
+ _span: Span,
+ meta_item: &ast::MetaItem,
+ annotatable: Annotatable,
+) -> Vec<Annotatable> {
+ check_builtin_macro_attribute(ecx, meta_item, sym::cfg_eval);
+ warn_on_duplicate_attribute(&ecx, &annotatable, sym::cfg_eval);
+ vec![cfg_eval(ecx.sess, ecx.ecfg.features, annotatable, ecx.current_expansion.lint_node_id)]
+}
+
+pub(crate) fn cfg_eval(
+ sess: &Session,
+ features: Option<&Features>,
+ annotatable: Annotatable,
+ lint_node_id: NodeId,
+) -> Annotatable {
+ CfgEval { cfg: &mut StripUnconfigured { sess, features, config_tokens: true, lint_node_id } }
+ .configure_annotatable(annotatable)
+ // Since the item itself has already been configured by the `InvocationCollector`,
+ // we know that fold result vector will contain exactly one element.
+ .unwrap()
+}
+
+struct CfgEval<'a, 'b> {
+ cfg: &'a mut StripUnconfigured<'b>,
+}
+
+fn flat_map_annotatable(
+ vis: &mut impl MutVisitor,
+ annotatable: Annotatable,
+) -> Option<Annotatable> {
+ match annotatable {
+ Annotatable::Item(item) => vis.flat_map_item(item).pop().map(Annotatable::Item),
+ Annotatable::TraitItem(item) => {
+ vis.flat_map_trait_item(item).pop().map(Annotatable::TraitItem)
+ }
+ Annotatable::ImplItem(item) => {
+ vis.flat_map_impl_item(item).pop().map(Annotatable::ImplItem)
+ }
+ Annotatable::ForeignItem(item) => {
+ vis.flat_map_foreign_item(item).pop().map(Annotatable::ForeignItem)
+ }
+ Annotatable::Stmt(stmt) => {
+ vis.flat_map_stmt(stmt.into_inner()).pop().map(P).map(Annotatable::Stmt)
+ }
+ Annotatable::Expr(mut expr) => {
+ vis.visit_expr(&mut expr);
+ Some(Annotatable::Expr(expr))
+ }
+ Annotatable::Arm(arm) => vis.flat_map_arm(arm).pop().map(Annotatable::Arm),
+ Annotatable::ExprField(field) => {
+ vis.flat_map_expr_field(field).pop().map(Annotatable::ExprField)
+ }
+ Annotatable::PatField(fp) => vis.flat_map_pat_field(fp).pop().map(Annotatable::PatField),
+ Annotatable::GenericParam(param) => {
+ vis.flat_map_generic_param(param).pop().map(Annotatable::GenericParam)
+ }
+ Annotatable::Param(param) => vis.flat_map_param(param).pop().map(Annotatable::Param),
+ Annotatable::FieldDef(sf) => vis.flat_map_field_def(sf).pop().map(Annotatable::FieldDef),
+ Annotatable::Variant(v) => vis.flat_map_variant(v).pop().map(Annotatable::Variant),
+ Annotatable::Crate(mut krate) => {
+ vis.visit_crate(&mut krate);
+ Some(Annotatable::Crate(krate))
+ }
+ }
+}
+
+struct CfgFinder {
+ has_cfg_or_cfg_attr: bool,
+}
+
+impl CfgFinder {
+ fn has_cfg_or_cfg_attr(annotatable: &Annotatable) -> bool {
+ let mut finder = CfgFinder { has_cfg_or_cfg_attr: false };
+ match annotatable {
+ Annotatable::Item(item) => finder.visit_item(&item),
+ Annotatable::TraitItem(item) => finder.visit_assoc_item(&item, visit::AssocCtxt::Trait),
+ Annotatable::ImplItem(item) => finder.visit_assoc_item(&item, visit::AssocCtxt::Impl),
+ Annotatable::ForeignItem(item) => finder.visit_foreign_item(&item),
+ Annotatable::Stmt(stmt) => finder.visit_stmt(&stmt),
+ Annotatable::Expr(expr) => finder.visit_expr(&expr),
+ Annotatable::Arm(arm) => finder.visit_arm(&arm),
+ Annotatable::ExprField(field) => finder.visit_expr_field(&field),
+ Annotatable::PatField(field) => finder.visit_pat_field(&field),
+ Annotatable::GenericParam(param) => finder.visit_generic_param(&param),
+ Annotatable::Param(param) => finder.visit_param(&param),
+ Annotatable::FieldDef(field) => finder.visit_field_def(&field),
+ Annotatable::Variant(variant) => finder.visit_variant(&variant),
+ Annotatable::Crate(krate) => finder.visit_crate(krate),
+ };
+ finder.has_cfg_or_cfg_attr
+ }
+}
+
+impl<'ast> visit::Visitor<'ast> for CfgFinder {
+ fn visit_attribute(&mut self, attr: &'ast Attribute) {
+ // We want short-circuiting behavior, so don't use the '|=' operator.
+ self.has_cfg_or_cfg_attr = self.has_cfg_or_cfg_attr
+ || attr
+ .ident()
+ .map_or(false, |ident| ident.name == sym::cfg || ident.name == sym::cfg_attr);
+ }
+}
+
+impl CfgEval<'_, '_> {
+ fn configure<T: HasAttrs + HasTokens>(&mut self, node: T) -> Option<T> {
+ self.cfg.configure(node)
+ }
+
+ fn configure_annotatable(&mut self, mut annotatable: Annotatable) -> Option<Annotatable> {
+ // Tokenizing and re-parsing the `Annotatable` can have a significant
+ // performance impact, so try to avoid it if possible
+ if !CfgFinder::has_cfg_or_cfg_attr(&annotatable) {
+ return Some(annotatable);
+ }
+
+ // The majority of parsed attribute targets will never need to have early cfg-expansion
+ // run (e.g. they are not part of a `#[derive]` or `#[cfg_eval]` macro input).
+ // Therefore, we normally do not capture the necessary information about `#[cfg]`
+ // and `#[cfg_attr]` attributes during parsing.
+ //
+ // Therefore, when we actually *do* run early cfg-expansion, we need to tokenize
+ // and re-parse the attribute target, this time capturing information about
+ // the location of `#[cfg]` and `#[cfg_attr]` in the token stream. The tokenization
+ // process is lossless, so this process is invisible to proc-macros.
+
+ let parse_annotatable_with: fn(&mut Parser<'_>) -> _ = match annotatable {
+ Annotatable::Item(_) => {
+ |parser| Annotatable::Item(parser.parse_item(ForceCollect::Yes).unwrap().unwrap())
+ }
+ Annotatable::TraitItem(_) => |parser| {
+ Annotatable::TraitItem(
+ parser.parse_trait_item(ForceCollect::Yes).unwrap().unwrap().unwrap(),
+ )
+ },
+ Annotatable::ImplItem(_) => |parser| {
+ Annotatable::ImplItem(
+ parser.parse_impl_item(ForceCollect::Yes).unwrap().unwrap().unwrap(),
+ )
+ },
+ Annotatable::ForeignItem(_) => |parser| {
+ Annotatable::ForeignItem(
+ parser.parse_foreign_item(ForceCollect::Yes).unwrap().unwrap().unwrap(),
+ )
+ },
+ Annotatable::Stmt(_) => |parser| {
+ Annotatable::Stmt(P(parser.parse_stmt(ForceCollect::Yes).unwrap().unwrap()))
+ },
+ Annotatable::Expr(_) => {
+ |parser| Annotatable::Expr(parser.parse_expr_force_collect().unwrap())
+ }
+ _ => unreachable!(),
+ };
+
+ // 'Flatten' all nonterminals (i.e. `TokenKind::Interpolated`)
+ // to `None`-delimited groups containing the corresponding tokens. This
+ // is normally delayed until the proc-macro server actually needs to
+ // provide a `TokenKind::Interpolated` to a proc-macro. We do this earlier,
+ // so that we can handle cases like:
+ //
+ // ```rust
+ // #[cfg_eval] #[cfg] $item
+ //```
+ //
+ // where `$item` is `#[cfg_attr] struct Foo {}`. We want to make
+ // sure to evaluate *all* `#[cfg]` and `#[cfg_attr]` attributes - the simplest
+ // way to do this is to do a single parse of a stream without any nonterminals.
+ let orig_tokens = annotatable.to_tokens().flattened();
+
+ // Re-parse the tokens, setting the `capture_cfg` flag to save extra information
+ // to the captured `AttrAnnotatedTokenStream` (specifically, we capture
+ // `AttrAnnotatedTokenTree::AttributesData` for all occurrences of `#[cfg]` and `#[cfg_attr]`)
+ let mut parser =
+ rustc_parse::stream_to_parser(&self.cfg.sess.parse_sess, orig_tokens, None);
+ parser.capture_cfg = true;
+ annotatable = parse_annotatable_with(&mut parser);
+
+ // Now that we have our re-parsed `AttrAnnotatedTokenStream`, recursively configuring
+ // our attribute target will correctly the tokens as well.
+ flat_map_annotatable(self, annotatable)
+ }
+}
+
+impl MutVisitor for CfgEval<'_, '_> {
+ fn visit_expr(&mut self, expr: &mut P<ast::Expr>) {
+ self.cfg.configure_expr(expr);
+ mut_visit::noop_visit_expr(expr, self);
+ }
+
+ fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
+ let mut expr = configure!(self, expr);
+ mut_visit::noop_visit_expr(&mut expr, self);
+ Some(expr)
+ }
+
+ fn flat_map_generic_param(
+ &mut self,
+ param: ast::GenericParam,
+ ) -> SmallVec<[ast::GenericParam; 1]> {
+ mut_visit::noop_flat_map_generic_param(configure!(self, param), self)
+ }
+
+ fn flat_map_stmt(&mut self, stmt: ast::Stmt) -> SmallVec<[ast::Stmt; 1]> {
+ mut_visit::noop_flat_map_stmt(configure!(self, stmt), self)
+ }
+
+ fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
+ mut_visit::noop_flat_map_item(configure!(self, item), self)
+ }
+
+ fn flat_map_impl_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
+ mut_visit::noop_flat_map_assoc_item(configure!(self, item), self)
+ }
+
+ fn flat_map_trait_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
+ mut_visit::noop_flat_map_assoc_item(configure!(self, item), self)
+ }
+
+ fn flat_map_foreign_item(
+ &mut self,
+ foreign_item: P<ast::ForeignItem>,
+ ) -> SmallVec<[P<ast::ForeignItem>; 1]> {
+ mut_visit::noop_flat_map_foreign_item(configure!(self, foreign_item), self)
+ }
+
+ fn flat_map_arm(&mut self, arm: ast::Arm) -> SmallVec<[ast::Arm; 1]> {
+ mut_visit::noop_flat_map_arm(configure!(self, arm), self)
+ }
+
+ fn flat_map_expr_field(&mut self, field: ast::ExprField) -> SmallVec<[ast::ExprField; 1]> {
+ mut_visit::noop_flat_map_expr_field(configure!(self, field), self)
+ }
+
+ fn flat_map_pat_field(&mut self, fp: ast::PatField) -> SmallVec<[ast::PatField; 1]> {
+ mut_visit::noop_flat_map_pat_field(configure!(self, fp), self)
+ }
+
+ fn flat_map_param(&mut self, p: ast::Param) -> SmallVec<[ast::Param; 1]> {
+ mut_visit::noop_flat_map_param(configure!(self, p), self)
+ }
+
+ fn flat_map_field_def(&mut self, sf: ast::FieldDef) -> SmallVec<[ast::FieldDef; 1]> {
+ mut_visit::noop_flat_map_field_def(configure!(self, sf), self)
+ }
+
+ fn flat_map_variant(&mut self, variant: ast::Variant) -> SmallVec<[ast::Variant; 1]> {
+ mut_visit::noop_flat_map_variant(configure!(self, variant), self)
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/cmdline_attrs.rs b/compiler/rustc_builtin_macros/src/cmdline_attrs.rs
new file mode 100644
index 000000000..747e48ece
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/cmdline_attrs.rs
@@ -0,0 +1,35 @@
+//! Attributes injected into the crate root from command line using `-Z crate-attr`.
+
+use rustc_ast::attr::mk_attr;
+use rustc_ast::token;
+use rustc_ast::{self as ast, AttrItem, AttrStyle};
+use rustc_session::parse::ParseSess;
+use rustc_span::FileName;
+
+pub fn inject(mut krate: ast::Crate, parse_sess: &ParseSess, attrs: &[String]) -> ast::Crate {
+ for raw_attr in attrs {
+ let mut parser = rustc_parse::new_parser_from_source_str(
+ parse_sess,
+ FileName::cli_crate_attr_source_code(&raw_attr),
+ raw_attr.clone(),
+ );
+
+ let start_span = parser.token.span;
+ let AttrItem { path, args, tokens: _ } = match parser.parse_attr_item(false) {
+ Ok(ai) => ai,
+ Err(mut err) => {
+ err.emit();
+ continue;
+ }
+ };
+ let end_span = parser.token.span;
+ if parser.token != token::Eof {
+ parse_sess.span_diagnostic.span_err(start_span.to(end_span), "invalid crate attribute");
+ continue;
+ }
+
+ krate.attrs.push(mk_attr(AttrStyle::Inner, path, args, start_span.to(end_span)));
+ }
+
+ krate
+}
diff --git a/compiler/rustc_builtin_macros/src/compile_error.rs b/compiler/rustc_builtin_macros/src/compile_error.rs
new file mode 100644
index 000000000..72397aa25
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/compile_error.rs
@@ -0,0 +1,19 @@
+// The compiler code necessary to support the compile_error! extension.
+
+use rustc_ast::tokenstream::TokenStream;
+use rustc_expand::base::{self, *};
+use rustc_span::Span;
+
+pub fn expand_compile_error<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ let Some(var) = get_single_str_from_tts(cx, sp, tts, "compile_error!") else {
+ return DummyResult::any(sp);
+ };
+
+ cx.span_err(sp, var.as_str());
+
+ DummyResult::any(sp)
+}
diff --git a/compiler/rustc_builtin_macros/src/concat.rs b/compiler/rustc_builtin_macros/src/concat.rs
new file mode 100644
index 000000000..a23dd1d12
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/concat.rs
@@ -0,0 +1,64 @@
+use rustc_ast as ast;
+use rustc_ast::tokenstream::TokenStream;
+use rustc_expand::base::{self, DummyResult};
+use rustc_span::symbol::Symbol;
+
+use std::string::String;
+
+pub fn expand_concat(
+ cx: &mut base::ExtCtxt<'_>,
+ sp: rustc_span::Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let Some(es) = base::get_exprs_from_tts(cx, sp, tts) else {
+ return DummyResult::any(sp);
+ };
+ let mut accumulator = String::new();
+ let mut missing_literal = vec![];
+ let mut has_errors = false;
+ for e in es {
+ match e.kind {
+ ast::ExprKind::Lit(ref lit) => match lit.kind {
+ ast::LitKind::Str(ref s, _) | ast::LitKind::Float(ref s, _) => {
+ accumulator.push_str(s.as_str());
+ }
+ ast::LitKind::Char(c) => {
+ accumulator.push(c);
+ }
+ ast::LitKind::Int(
+ i,
+ ast::LitIntType::Unsigned(_)
+ | ast::LitIntType::Signed(_)
+ | ast::LitIntType::Unsuffixed,
+ ) => {
+ accumulator.push_str(&i.to_string());
+ }
+ ast::LitKind::Bool(b) => {
+ accumulator.push_str(&b.to_string());
+ }
+ ast::LitKind::Byte(..) | ast::LitKind::ByteStr(..) => {
+ cx.span_err(e.span, "cannot concatenate a byte string literal");
+ }
+ ast::LitKind::Err(_) => {
+ has_errors = true;
+ }
+ },
+ ast::ExprKind::Err => {
+ has_errors = true;
+ }
+ _ => {
+ missing_literal.push(e.span);
+ }
+ }
+ }
+ if !missing_literal.is_empty() {
+ let mut err = cx.struct_span_err(missing_literal, "expected a literal");
+ err.note("only literals (like `\"foo\"`, `42` and `3.14`) can be passed to `concat!()`");
+ err.emit();
+ return DummyResult::any(sp);
+ } else if has_errors {
+ return DummyResult::any(sp);
+ }
+ let sp = cx.with_def_site_ctxt(sp);
+ base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&accumulator)))
+}
diff --git a/compiler/rustc_builtin_macros/src/concat_bytes.rs b/compiler/rustc_builtin_macros/src/concat_bytes.rs
new file mode 100644
index 000000000..a1afec410
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/concat_bytes.rs
@@ -0,0 +1,189 @@
+use rustc_ast as ast;
+use rustc_ast::{ptr::P, tokenstream::TokenStream};
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::Applicability;
+use rustc_expand::base::{self, DummyResult};
+
+/// Emits errors for literal expressions that are invalid inside and outside of an array.
+fn invalid_type_err(cx: &mut base::ExtCtxt<'_>, expr: &P<rustc_ast::Expr>, is_nested: bool) {
+ let ast::ExprKind::Lit(lit) = &expr.kind else {
+ unreachable!();
+ };
+ match lit.kind {
+ ast::LitKind::Char(_) => {
+ let mut err = cx.struct_span_err(expr.span, "cannot concatenate character literals");
+ if let Ok(snippet) = cx.sess.source_map().span_to_snippet(expr.span) {
+ err.span_suggestion(
+ expr.span,
+ "try using a byte character",
+ format!("b{}", snippet),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ }
+ ast::LitKind::Str(_, _) => {
+ let mut err = cx.struct_span_err(expr.span, "cannot concatenate string literals");
+ // suggestion would be invalid if we are nested
+ if !is_nested {
+ if let Ok(snippet) = cx.sess.source_map().span_to_snippet(expr.span) {
+ err.span_suggestion(
+ expr.span,
+ "try using a byte string",
+ format!("b{}", snippet),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ err.emit();
+ }
+ ast::LitKind::Float(_, _) => {
+ cx.span_err(expr.span, "cannot concatenate float literals");
+ }
+ ast::LitKind::Bool(_) => {
+ cx.span_err(expr.span, "cannot concatenate boolean literals");
+ }
+ ast::LitKind::Err(_) => {}
+ ast::LitKind::Int(_, _) if !is_nested => {
+ let mut err = cx.struct_span_err(expr.span, "cannot concatenate numeric literals");
+ if let Ok(snippet) = cx.sess.source_map().span_to_snippet(expr.span) {
+ err.span_suggestion(
+ expr.span,
+ "try wrapping the number in an array",
+ format!("[{}]", snippet),
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ }
+ ast::LitKind::Int(
+ val,
+ ast::LitIntType::Unsuffixed | ast::LitIntType::Unsigned(ast::UintTy::U8),
+ ) => {
+ assert!(val > u8::MAX.into()); // must be an error
+ cx.span_err(expr.span, "numeric literal is out of bounds");
+ }
+ ast::LitKind::Int(_, _) => {
+ cx.span_err(expr.span, "numeric literal is not a `u8`");
+ }
+ _ => unreachable!(),
+ }
+}
+
+fn handle_array_element(
+ cx: &mut base::ExtCtxt<'_>,
+ has_errors: &mut bool,
+ missing_literals: &mut Vec<rustc_span::Span>,
+ expr: &P<rustc_ast::Expr>,
+) -> Option<u8> {
+ match expr.kind {
+ ast::ExprKind::Array(_) | ast::ExprKind::Repeat(_, _) => {
+ if !*has_errors {
+ cx.span_err(expr.span, "cannot concatenate doubly nested array");
+ }
+ *has_errors = true;
+ None
+ }
+ ast::ExprKind::Lit(ref lit) => match lit.kind {
+ ast::LitKind::Int(
+ val,
+ ast::LitIntType::Unsuffixed | ast::LitIntType::Unsigned(ast::UintTy::U8),
+ ) if val <= u8::MAX.into() => Some(val as u8),
+
+ ast::LitKind::Byte(val) => Some(val),
+ ast::LitKind::ByteStr(_) => {
+ if !*has_errors {
+ cx.struct_span_err(expr.span, "cannot concatenate doubly nested array")
+ .note("byte strings are treated as arrays of bytes")
+ .help("try flattening the array")
+ .emit();
+ }
+ *has_errors = true;
+ None
+ }
+ _ => {
+ if !*has_errors {
+ invalid_type_err(cx, expr, true);
+ }
+ *has_errors = true;
+ None
+ }
+ },
+ _ => {
+ missing_literals.push(expr.span);
+ None
+ }
+ }
+}
+
+pub fn expand_concat_bytes(
+ cx: &mut base::ExtCtxt<'_>,
+ sp: rustc_span::Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let Some(es) = base::get_exprs_from_tts(cx, sp, tts) else {
+ return DummyResult::any(sp);
+ };
+ let mut accumulator = Vec::new();
+ let mut missing_literals = vec![];
+ let mut has_errors = false;
+ for e in es {
+ match e.kind {
+ ast::ExprKind::Array(ref exprs) => {
+ for expr in exprs {
+ if let Some(elem) =
+ handle_array_element(cx, &mut has_errors, &mut missing_literals, expr)
+ {
+ accumulator.push(elem);
+ }
+ }
+ }
+ ast::ExprKind::Repeat(ref expr, ref count) => {
+ if let ast::ExprKind::Lit(ast::Lit {
+ kind: ast::LitKind::Int(count_val, _), ..
+ }) = count.value.kind
+ {
+ if let Some(elem) =
+ handle_array_element(cx, &mut has_errors, &mut missing_literals, expr)
+ {
+ for _ in 0..count_val {
+ accumulator.push(elem);
+ }
+ }
+ } else {
+ cx.span_err(count.value.span, "repeat count is not a positive number");
+ }
+ }
+ ast::ExprKind::Lit(ref lit) => match lit.kind {
+ ast::LitKind::Byte(val) => {
+ accumulator.push(val);
+ }
+ ast::LitKind::ByteStr(ref bytes) => {
+ accumulator.extend_from_slice(&bytes);
+ }
+ _ => {
+ if !has_errors {
+ invalid_type_err(cx, &e, false);
+ }
+ has_errors = true;
+ }
+ },
+ ast::ExprKind::Err => {
+ has_errors = true;
+ }
+ _ => {
+ missing_literals.push(e.span);
+ }
+ }
+ }
+ if !missing_literals.is_empty() {
+ let mut err = cx.struct_span_err(missing_literals.clone(), "expected a byte literal");
+ err.note("only byte literals (like `b\"foo\"`, `b's'`, and `[3, 4, 5]`) can be passed to `concat_bytes!()`");
+ err.emit();
+ return base::MacEager::expr(DummyResult::raw_expr(sp, true));
+ } else if has_errors {
+ return base::MacEager::expr(DummyResult::raw_expr(sp, true));
+ }
+ let sp = cx.with_def_site_ctxt(sp);
+ base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(Lrc::from(accumulator))))
+}
diff --git a/compiler/rustc_builtin_macros/src/concat_idents.rs b/compiler/rustc_builtin_macros/src/concat_idents.rs
new file mode 100644
index 000000000..297c604e0
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/concat_idents.rs
@@ -0,0 +1,70 @@
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Token};
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
+use rustc_expand::base::{self, *};
+use rustc_span::symbol::{Ident, Symbol};
+use rustc_span::Span;
+
+pub fn expand_concat_idents<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ if tts.is_empty() {
+ cx.span_err(sp, "concat_idents! takes 1 or more arguments");
+ return DummyResult::any(sp);
+ }
+
+ let mut res_str = String::new();
+ for (i, e) in tts.into_trees().enumerate() {
+ if i & 1 == 1 {
+ match e {
+ TokenTree::Token(Token { kind: token::Comma, .. }, _) => {}
+ _ => {
+ cx.span_err(sp, "concat_idents! expecting comma");
+ return DummyResult::any(sp);
+ }
+ }
+ } else {
+ if let TokenTree::Token(token, _) = e {
+ if let Some((ident, _)) = token.ident() {
+ res_str.push_str(ident.name.as_str());
+ continue;
+ }
+ }
+
+ cx.span_err(sp, "concat_idents! requires ident args");
+ return DummyResult::any(sp);
+ }
+ }
+
+ let ident = Ident::new(Symbol::intern(&res_str), cx.with_call_site_ctxt(sp));
+
+ struct ConcatIdentsResult {
+ ident: Ident,
+ }
+
+ impl base::MacResult for ConcatIdentsResult {
+ fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> {
+ Some(P(ast::Expr {
+ id: ast::DUMMY_NODE_ID,
+ kind: ast::ExprKind::Path(None, ast::Path::from_ident(self.ident)),
+ span: self.ident.span,
+ attrs: ast::AttrVec::new(),
+ tokens: None,
+ }))
+ }
+
+ fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> {
+ Some(P(ast::Ty {
+ id: ast::DUMMY_NODE_ID,
+ kind: ast::TyKind::Path(None, ast::Path::from_ident(self.ident)),
+ span: self.ident.span,
+ tokens: None,
+ }))
+ }
+ }
+
+ Box::new(ConcatIdentsResult { ident })
+}
diff --git a/compiler/rustc_builtin_macros/src/derive.rs b/compiler/rustc_builtin_macros/src/derive.rs
new file mode 100644
index 000000000..d3de10ca4
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/derive.rs
@@ -0,0 +1,158 @@
+use crate::cfg_eval::cfg_eval;
+
+use rustc_ast as ast;
+use rustc_ast::{attr, token, GenericParamKind, ItemKind, MetaItemKind, NestedMetaItem, StmtKind};
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, Indeterminate, MultiItemModifier};
+use rustc_feature::AttributeTemplate;
+use rustc_parse::validate_attr;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+
+pub(crate) struct Expander;
+
+impl MultiItemModifier for Expander {
+ fn expand(
+ &self,
+ ecx: &mut ExtCtxt<'_>,
+ span: Span,
+ meta_item: &ast::MetaItem,
+ item: Annotatable,
+ ) -> ExpandResult<Vec<Annotatable>, Annotatable> {
+ let sess = ecx.sess;
+ if report_bad_target(sess, &item, span) {
+ // We don't want to pass inappropriate targets to derive macros to avoid
+ // follow up errors, all other errors below are recoverable.
+ return ExpandResult::Ready(vec![item]);
+ }
+
+ let (sess, features) = (ecx.sess, ecx.ecfg.features);
+ let result =
+ ecx.resolver.resolve_derives(ecx.current_expansion.id, ecx.force_mode, &|| {
+ let template =
+ AttributeTemplate { list: Some("Trait1, Trait2, ..."), ..Default::default() };
+ let attr = attr::mk_attr_outer(meta_item.clone());
+ validate_attr::check_builtin_attribute(
+ &sess.parse_sess,
+ &attr,
+ sym::derive,
+ template,
+ );
+
+ let mut resolutions: Vec<_> = attr
+ .meta_item_list()
+ .unwrap_or_default()
+ .into_iter()
+ .filter_map(|nested_meta| match nested_meta {
+ NestedMetaItem::MetaItem(meta) => Some(meta),
+ NestedMetaItem::Literal(lit) => {
+ // Reject `#[derive("Debug")]`.
+ report_unexpected_literal(sess, &lit);
+ None
+ }
+ })
+ .map(|meta| {
+ // Reject `#[derive(Debug = "value", Debug(abc))]`, but recover the paths.
+ report_path_args(sess, &meta);
+ meta.path
+ })
+ .map(|path| (path, dummy_annotatable(), None))
+ .collect();
+
+ // Do not configure or clone items unless necessary.
+ match &mut resolutions[..] {
+ [] => {}
+ [(_, first_item, _), others @ ..] => {
+ *first_item = cfg_eval(
+ sess,
+ features,
+ item.clone(),
+ ecx.current_expansion.lint_node_id,
+ );
+ for (_, item, _) in others {
+ *item = first_item.clone();
+ }
+ }
+ }
+
+ resolutions
+ });
+
+ match result {
+ Ok(()) => ExpandResult::Ready(vec![item]),
+ Err(Indeterminate) => ExpandResult::Retry(item),
+ }
+ }
+}
+
+// The cheapest `Annotatable` to construct.
+fn dummy_annotatable() -> Annotatable {
+ Annotatable::GenericParam(ast::GenericParam {
+ id: ast::DUMMY_NODE_ID,
+ ident: Ident::empty(),
+ attrs: Default::default(),
+ bounds: Default::default(),
+ is_placeholder: false,
+ kind: GenericParamKind::Lifetime,
+ colon_span: None,
+ })
+}
+
+fn report_bad_target(sess: &Session, item: &Annotatable, span: Span) -> bool {
+ let item_kind = match item {
+ Annotatable::Item(item) => Some(&item.kind),
+ Annotatable::Stmt(stmt) => match &stmt.kind {
+ StmtKind::Item(item) => Some(&item.kind),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let bad_target =
+ !matches!(item_kind, Some(ItemKind::Struct(..) | ItemKind::Enum(..) | ItemKind::Union(..)));
+ if bad_target {
+ struct_span_err!(
+ sess,
+ span,
+ E0774,
+ "`derive` may only be applied to `struct`s, `enum`s and `union`s",
+ )
+ .span_label(span, "not applicable here")
+ .span_label(item.span(), "not a `struct`, `enum` or `union`")
+ .emit();
+ }
+ bad_target
+}
+
+fn report_unexpected_literal(sess: &Session, lit: &ast::Lit) {
+ let help_msg = match lit.token.kind {
+ token::Str if rustc_lexer::is_ident(lit.token.symbol.as_str()) => {
+ format!("try using `#[derive({})]`", lit.token.symbol)
+ }
+ _ => "for example, write `#[derive(Debug)]` for `Debug`".to_string(),
+ };
+ struct_span_err!(sess, lit.span, E0777, "expected path to a trait, found literal",)
+ .span_label(lit.span, "not a trait")
+ .help(&help_msg)
+ .emit();
+}
+
+fn report_path_args(sess: &Session, meta: &ast::MetaItem) {
+ let report_error = |title, action| {
+ let span = meta.span.with_lo(meta.path.span.hi());
+ sess.struct_span_err(span, title)
+ .span_suggestion(span, action, "", Applicability::MachineApplicable)
+ .emit();
+ };
+ match meta.kind {
+ MetaItemKind::Word => {}
+ MetaItemKind::List(..) => report_error(
+ "traits in `#[derive(...)]` don't accept arguments",
+ "remove the arguments",
+ ),
+ MetaItemKind::NameValue(..) => {
+ report_error("traits in `#[derive(...)]` don't accept values", "remove the value")
+ }
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/bounds.rs b/compiler/rustc_builtin_macros/src/deriving/bounds.rs
new file mode 100644
index 000000000..5ef68c6ae
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/bounds.rs
@@ -0,0 +1,28 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::path_std;
+
+use rustc_ast::MetaItem;
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::Span;
+
+pub fn expand_deriving_copy(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: path_std!(marker::Copy),
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: true,
+ methods: Vec::new(),
+ associated_types: Vec::new(),
+ };
+
+ trait_def.expand(cx, mitem, item, push);
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/clone.rs b/compiler/rustc_builtin_macros/src/deriving/clone.rs
new file mode 100644
index 000000000..7755ff779
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/clone.rs
@@ -0,0 +1,212 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::path_std;
+
+use rustc_ast::{self as ast, Generics, ItemKind, MetaItem, VariantData};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::Span;
+
+pub fn expand_deriving_clone(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ // The simple form is `fn clone(&self) -> Self { *self }`, possibly with
+ // some additional `AssertParamIsClone` assertions.
+ //
+ // We can use the simple form if either of the following are true.
+ // - The type derives Copy and there are no generic parameters. (If we
+ // used the simple form with generics, we'd have to bound the generics
+ // with Clone + Copy, and then there'd be no Clone impl at all if the
+ // user fills in something that is Clone but not Copy. After
+ // specialization we can remove this no-generics limitation.)
+ // - The item is a union. (Unions with generic parameters still can derive
+ // Clone because they require Copy for deriving, Clone alone is not
+ // enough. Whether Clone is implemented for fields is irrelevant so we
+ // don't assert it.)
+ let bounds;
+ let substructure;
+ let is_simple;
+ match *item {
+ Annotatable::Item(ref annitem) => match annitem.kind {
+ ItemKind::Struct(_, Generics { ref params, .. })
+ | ItemKind::Enum(_, Generics { ref params, .. }) => {
+ let container_id = cx.current_expansion.id.expn_data().parent.expect_local();
+ let has_derive_copy = cx.resolver.has_derive_copy(container_id);
+ if has_derive_copy
+ && !params
+ .iter()
+ .any(|param| matches!(param.kind, ast::GenericParamKind::Type { .. }))
+ {
+ bounds = vec![];
+ is_simple = true;
+ substructure = combine_substructure(Box::new(|c, s, sub| {
+ cs_clone_simple("Clone", c, s, sub, false)
+ }));
+ } else {
+ bounds = vec![];
+ is_simple = false;
+ substructure =
+ combine_substructure(Box::new(|c, s, sub| cs_clone("Clone", c, s, sub)));
+ }
+ }
+ ItemKind::Union(..) => {
+ bounds = vec![Path(path_std!(marker::Copy))];
+ is_simple = true;
+ substructure = combine_substructure(Box::new(|c, s, sub| {
+ cs_clone_simple("Clone", c, s, sub, true)
+ }));
+ }
+ _ => cx.span_bug(span, "`#[derive(Clone)]` on wrong item kind"),
+ },
+
+ _ => cx.span_bug(span, "`#[derive(Clone)]` on trait item or impl item"),
+ }
+
+ let inline = cx.meta_word(span, sym::inline);
+ let attrs = vec![cx.attribute(inline)];
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: path_std!(clone::Clone),
+ additional_bounds: bounds,
+ generics: Bounds::empty(),
+ supports_unions: true,
+ methods: vec![MethodDef {
+ name: sym::clone,
+ generics: Bounds::empty(),
+ explicit_self: true,
+ nonself_args: Vec::new(),
+ ret_ty: Self_,
+ attributes: attrs,
+ unify_fieldless_variants: false,
+ combine_substructure: substructure,
+ }],
+ associated_types: Vec::new(),
+ };
+
+ trait_def.expand_ext(cx, mitem, item, push, is_simple)
+}
+
+fn cs_clone_simple(
+ name: &str,
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ substr: &Substructure<'_>,
+ is_union: bool,
+) -> BlockOrExpr {
+ let mut stmts = Vec::new();
+ let mut seen_type_names = FxHashSet::default();
+ let mut process_variant = |variant: &VariantData| {
+ for field in variant.fields() {
+ // This basic redundancy checking only prevents duplication of
+ // assertions like `AssertParamIsClone<Foo>` where the type is a
+ // simple name. That's enough to get a lot of cases, though.
+ if let Some(name) = field.ty.kind.is_simple_path() && !seen_type_names.insert(name) {
+ // Already produced an assertion for this type.
+ } else {
+ // let _: AssertParamIsClone<FieldTy>;
+ super::assert_ty_bounds(
+ cx,
+ &mut stmts,
+ field.ty.clone(),
+ field.span,
+ &[sym::clone, sym::AssertParamIsClone],
+ );
+ }
+ }
+ };
+
+ if is_union {
+ // Just a single assertion for unions, that the union impls `Copy`.
+ // let _: AssertParamIsCopy<Self>;
+ let self_ty = cx.ty_path(cx.path_ident(trait_span, Ident::with_dummy_span(kw::SelfUpper)));
+ super::assert_ty_bounds(
+ cx,
+ &mut stmts,
+ self_ty,
+ trait_span,
+ &[sym::clone, sym::AssertParamIsCopy],
+ );
+ } else {
+ match *substr.fields {
+ StaticStruct(vdata, ..) => {
+ process_variant(vdata);
+ }
+ StaticEnum(enum_def, ..) => {
+ for variant in &enum_def.variants {
+ process_variant(&variant.data);
+ }
+ }
+ _ => cx.span_bug(
+ trait_span,
+ &format!("unexpected substructure in simple `derive({})`", name),
+ ),
+ }
+ }
+ BlockOrExpr::new_mixed(stmts, Some(cx.expr_deref(trait_span, cx.expr_self(trait_span))))
+}
+
+fn cs_clone(
+ name: &str,
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ substr: &Substructure<'_>,
+) -> BlockOrExpr {
+ let ctor_path;
+ let all_fields;
+ let fn_path = cx.std_path(&[sym::clone, sym::Clone, sym::clone]);
+ let subcall = |cx: &mut ExtCtxt<'_>, field: &FieldInfo| {
+ let args = vec![field.self_expr.clone()];
+ cx.expr_call_global(field.span, fn_path.clone(), args)
+ };
+
+ let vdata;
+ match *substr.fields {
+ Struct(vdata_, ref af) => {
+ ctor_path = cx.path(trait_span, vec![substr.type_ident]);
+ all_fields = af;
+ vdata = vdata_;
+ }
+ EnumMatching(.., variant, ref af) => {
+ ctor_path = cx.path(trait_span, vec![substr.type_ident, variant.ident]);
+ all_fields = af;
+ vdata = &variant.data;
+ }
+ EnumTag(..) => cx.span_bug(trait_span, &format!("enum tags in `derive({})`", name,)),
+ StaticEnum(..) | StaticStruct(..) => {
+ cx.span_bug(trait_span, &format!("associated function in `derive({})`", name))
+ }
+ }
+
+ let expr = match *vdata {
+ VariantData::Struct(..) => {
+ let fields = all_fields
+ .iter()
+ .map(|field| {
+ let Some(ident) = field.name else {
+ cx.span_bug(
+ trait_span,
+ &format!("unnamed field in normal struct in `derive({})`", name,),
+ );
+ };
+ let call = subcall(cx, field);
+ cx.field_imm(field.span, ident, call)
+ })
+ .collect::<Vec<_>>();
+
+ cx.expr_struct(trait_span, ctor_path, fields)
+ }
+ VariantData::Tuple(..) => {
+ let subcalls = all_fields.iter().map(|f| subcall(cx, f)).collect();
+ let path = cx.expr_path(ctor_path);
+ cx.expr_call(trait_span, path, subcalls)
+ }
+ VariantData::Unit(..) => cx.expr_path(ctor_path),
+ };
+ BlockOrExpr::new_expr(expr)
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
new file mode 100644
index 000000000..4e798bf6a
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/eq.rs
@@ -0,0 +1,90 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::path_std;
+
+use rustc_ast::{self as ast, MetaItem};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+
+pub fn expand_deriving_eq(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ let span = cx.with_def_site_ctxt(span);
+ let inline = cx.meta_word(span, sym::inline);
+ let hidden = rustc_ast::attr::mk_nested_word_item(Ident::new(sym::hidden, span));
+ let doc = rustc_ast::attr::mk_list_item(Ident::new(sym::doc, span), vec![hidden]);
+ let no_coverage = cx.meta_word(span, sym::no_coverage);
+ let attrs = vec![cx.attribute(inline), cx.attribute(doc), cx.attribute(no_coverage)];
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: path_std!(cmp::Eq),
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: true,
+ methods: vec![MethodDef {
+ name: sym::assert_receiver_is_total_eq,
+ generics: Bounds::empty(),
+ explicit_self: true,
+ nonself_args: vec![],
+ ret_ty: Unit,
+ attributes: attrs,
+ unify_fieldless_variants: true,
+ combine_substructure: combine_substructure(Box::new(|a, b, c| {
+ cs_total_eq_assert(a, b, c)
+ })),
+ }],
+ associated_types: Vec::new(),
+ };
+
+ super::inject_impl_of_structural_trait(cx, span, item, path_std!(marker::StructuralEq), push);
+
+ trait_def.expand_ext(cx, mitem, item, push, true)
+}
+
+fn cs_total_eq_assert(
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ substr: &Substructure<'_>,
+) -> BlockOrExpr {
+ let mut stmts = Vec::new();
+ let mut seen_type_names = FxHashSet::default();
+ let mut process_variant = |variant: &ast::VariantData| {
+ for field in variant.fields() {
+ // This basic redundancy checking only prevents duplication of
+ // assertions like `AssertParamIsEq<Foo>` where the type is a
+ // simple name. That's enough to get a lot of cases, though.
+ if let Some(name) = field.ty.kind.is_simple_path() && !seen_type_names.insert(name) {
+ // Already produced an assertion for this type.
+ } else {
+ // let _: AssertParamIsEq<FieldTy>;
+ super::assert_ty_bounds(
+ cx,
+ &mut stmts,
+ field.ty.clone(),
+ field.span,
+ &[sym::cmp, sym::AssertParamIsEq],
+ );
+ }
+ }
+ };
+
+ match *substr.fields {
+ StaticStruct(vdata, ..) => {
+ process_variant(vdata);
+ }
+ StaticEnum(enum_def, ..) => {
+ for variant in &enum_def.variants {
+ process_variant(&variant.data);
+ }
+ }
+ _ => cx.span_bug(trait_span, "unexpected substructure in `derive(Eq)`"),
+ }
+ BlockOrExpr::new_stmts(stmts)
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
new file mode 100644
index 000000000..1612be862
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/ord.rs
@@ -0,0 +1,79 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::path_std;
+
+use rustc_ast::MetaItem;
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+
+pub fn expand_deriving_ord(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ let inline = cx.meta_word(span, sym::inline);
+ let attrs = vec![cx.attribute(inline)];
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: path_std!(cmp::Ord),
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: false,
+ methods: vec![MethodDef {
+ name: sym::cmp,
+ generics: Bounds::empty(),
+ explicit_self: true,
+ nonself_args: vec![(self_ref(), sym::other)],
+ ret_ty: Path(path_std!(cmp::Ordering)),
+ attributes: attrs,
+ unify_fieldless_variants: true,
+ combine_substructure: combine_substructure(Box::new(|a, b, c| cs_cmp(a, b, c))),
+ }],
+ associated_types: Vec::new(),
+ };
+
+ trait_def.expand(cx, mitem, item, push)
+}
+
+pub fn cs_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
+ let test_id = Ident::new(sym::cmp, span);
+ let equal_path = cx.path_global(span, cx.std_path(&[sym::cmp, sym::Ordering, sym::Equal]));
+ let cmp_path = cx.std_path(&[sym::cmp, sym::Ord, sym::cmp]);
+
+ // Builds:
+ //
+ // match ::core::cmp::Ord::cmp(&self.x, &other.x) {
+ // ::std::cmp::Ordering::Equal =>
+ // ::core::cmp::Ord::cmp(&self.y, &other.y),
+ // cmp => cmp,
+ // }
+ let expr = cs_fold(
+ // foldr nests the if-elses correctly, leaving the first field
+ // as the outermost one, and the last as the innermost.
+ false,
+ cx,
+ span,
+ substr,
+ |cx, fold| match fold {
+ CsFold::Single(field) => {
+ let [other_expr] = &field.other_selflike_exprs[..] else {
+ cx.span_bug(field.span, "not exactly 2 arguments in `derive(Ord)`");
+ };
+ let args = vec![field.self_expr.clone(), other_expr.clone()];
+ cx.expr_call_global(field.span, cmp_path.clone(), args)
+ }
+ CsFold::Combine(span, expr1, expr2) => {
+ let eq_arm = cx.arm(span, cx.pat_path(span, equal_path.clone()), expr1);
+ let neq_arm =
+ cx.arm(span, cx.pat_ident(span, test_id), cx.expr_ident(span, test_id));
+ cx.expr_match(span, expr2, vec![eq_arm, neq_arm])
+ }
+ CsFold::Fieldless => cx.expr_path(equal_path.clone()),
+ },
+ );
+ BlockOrExpr::new_expr(expr)
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
new file mode 100644
index 000000000..0141b3377
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_eq.rs
@@ -0,0 +1,110 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::{path_local, path_std};
+
+use rustc_ast::ptr::P;
+use rustc_ast::{BinOpKind, BorrowKind, Expr, ExprKind, MetaItem, Mutability};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+pub fn expand_deriving_partial_eq(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ fn cs_op(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ substr: &Substructure<'_>,
+ op: BinOpKind,
+ combiner: BinOpKind,
+ base: bool,
+ ) -> BlockOrExpr {
+ let expr = cs_fold(
+ true, // use foldl
+ cx,
+ span,
+ substr,
+ |cx, fold| match fold {
+ CsFold::Single(field) => {
+ let [other_expr] = &field.other_selflike_exprs[..] else {
+ cx.span_bug(field.span, "not exactly 2 arguments in `derive(PartialEq)`");
+ };
+
+ // We received `&T` arguments. Convert them to `T` by
+ // stripping `&` or adding `*`. This isn't necessary for
+ // type checking, but it results in much better error
+ // messages if something goes wrong.
+ let convert = |expr: &P<Expr>| {
+ if let ExprKind::AddrOf(BorrowKind::Ref, Mutability::Not, inner) =
+ &expr.kind
+ {
+ inner.clone()
+ } else {
+ cx.expr_deref(field.span, expr.clone())
+ }
+ };
+ cx.expr_binary(field.span, op, convert(&field.self_expr), convert(other_expr))
+ }
+ CsFold::Combine(span, expr1, expr2) => cx.expr_binary(span, combiner, expr1, expr2),
+ CsFold::Fieldless => cx.expr_bool(span, base),
+ },
+ );
+ BlockOrExpr::new_expr(expr)
+ }
+
+ fn cs_eq(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
+ cs_op(cx, span, substr, BinOpKind::Eq, BinOpKind::And, true)
+ }
+ fn cs_ne(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
+ cs_op(cx, span, substr, BinOpKind::Ne, BinOpKind::Or, false)
+ }
+
+ macro_rules! md {
+ ($name:expr, $f:ident) => {{
+ let inline = cx.meta_word(span, sym::inline);
+ let attrs = vec![cx.attribute(inline)];
+ MethodDef {
+ name: $name,
+ generics: Bounds::empty(),
+ explicit_self: true,
+ nonself_args: vec![(self_ref(), sym::other)],
+ ret_ty: Path(path_local!(bool)),
+ attributes: attrs,
+ unify_fieldless_variants: true,
+ combine_substructure: combine_substructure(Box::new(|a, b, c| $f(a, b, c))),
+ }
+ }};
+ }
+
+ super::inject_impl_of_structural_trait(
+ cx,
+ span,
+ item,
+ path_std!(marker::StructuralPartialEq),
+ push,
+ );
+
+ // avoid defining `ne` if we can
+ // c-like enums, enums without any fields and structs without fields
+ // can safely define only `eq`.
+ let mut methods = vec![md!(sym::eq, cs_eq)];
+ if !is_type_without_fields(item) {
+ methods.push(md!(sym::ne, cs_ne));
+ }
+
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: path_std!(cmp::PartialEq),
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: false,
+ methods,
+ associated_types: Vec::new(),
+ };
+ trait_def.expand(cx, mitem, item, push)
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
new file mode 100644
index 000000000..2ebb01cc8
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/cmp/partial_ord.rs
@@ -0,0 +1,88 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::{path_std, pathvec_std};
+
+use rustc_ast::MetaItem;
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+
+pub fn expand_deriving_partial_ord(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ let ordering_ty = Path(path_std!(cmp::Ordering));
+ let ret_ty =
+ Path(Path::new_(pathvec_std!(option::Option), vec![Box::new(ordering_ty)], PathKind::Std));
+
+ let inline = cx.meta_word(span, sym::inline);
+ let attrs = vec![cx.attribute(inline)];
+
+ let partial_cmp_def = MethodDef {
+ name: sym::partial_cmp,
+ generics: Bounds::empty(),
+ explicit_self: true,
+ nonself_args: vec![(self_ref(), sym::other)],
+ ret_ty,
+ attributes: attrs,
+ unify_fieldless_variants: true,
+ combine_substructure: combine_substructure(Box::new(|cx, span, substr| {
+ cs_partial_cmp(cx, span, substr)
+ })),
+ };
+
+ let trait_def = TraitDef {
+ span,
+ attributes: vec![],
+ path: path_std!(cmp::PartialOrd),
+ additional_bounds: vec![],
+ generics: Bounds::empty(),
+ supports_unions: false,
+ methods: vec![partial_cmp_def],
+ associated_types: Vec::new(),
+ };
+ trait_def.expand(cx, mitem, item, push)
+}
+
+pub fn cs_partial_cmp(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
+ let test_id = Ident::new(sym::cmp, span);
+ let equal_path = cx.path_global(span, cx.std_path(&[sym::cmp, sym::Ordering, sym::Equal]));
+ let partial_cmp_path = cx.std_path(&[sym::cmp, sym::PartialOrd, sym::partial_cmp]);
+
+ // Builds:
+ //
+ // match ::core::cmp::PartialOrd::partial_cmp(&self.x, &other.x) {
+ // ::core::option::Option::Some(::core::cmp::Ordering::Equal) =>
+ // ::core::cmp::PartialOrd::partial_cmp(&self.y, &other.y),
+ // cmp => cmp,
+ // }
+ let expr = cs_fold(
+ // foldr nests the if-elses correctly, leaving the first field
+ // as the outermost one, and the last as the innermost.
+ false,
+ cx,
+ span,
+ substr,
+ |cx, fold| match fold {
+ CsFold::Single(field) => {
+ let [other_expr] = &field.other_selflike_exprs[..] else {
+ cx.span_bug(field.span, "not exactly 2 arguments in `derive(Ord)`");
+ };
+ let args = vec![field.self_expr.clone(), other_expr.clone()];
+ cx.expr_call_global(field.span, partial_cmp_path.clone(), args)
+ }
+ CsFold::Combine(span, expr1, expr2) => {
+ let eq_arm =
+ cx.arm(span, cx.pat_some(span, cx.pat_path(span, equal_path.clone())), expr1);
+ let neq_arm =
+ cx.arm(span, cx.pat_ident(span, test_id), cx.expr_ident(span, test_id));
+ cx.expr_match(span, expr2, vec![eq_arm, neq_arm])
+ }
+ CsFold::Fieldless => cx.expr_some(span, cx.expr_path(equal_path.clone())),
+ },
+ );
+ BlockOrExpr::new_expr(expr)
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/debug.rs b/compiler/rustc_builtin_macros/src/deriving/debug.rs
new file mode 100644
index 000000000..ceef893e8
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/debug.rs
@@ -0,0 +1,181 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::path_std;
+
+use rustc_ast::{self as ast, MetaItem};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::Span;
+
+pub fn expand_deriving_debug(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ // &mut ::std::fmt::Formatter
+ let fmtr = Ref(Box::new(Path(path_std!(fmt::Formatter))), ast::Mutability::Mut);
+
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: path_std!(fmt::Debug),
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: false,
+ methods: vec![MethodDef {
+ name: sym::fmt,
+ generics: Bounds::empty(),
+ explicit_self: true,
+ nonself_args: vec![(fmtr, sym::f)],
+ ret_ty: Path(path_std!(fmt::Result)),
+ attributes: Vec::new(),
+ unify_fieldless_variants: false,
+ combine_substructure: combine_substructure(Box::new(|a, b, c| {
+ show_substructure(a, b, c)
+ })),
+ }],
+ associated_types: Vec::new(),
+ };
+ trait_def.expand(cx, mitem, item, push)
+}
+
+fn show_substructure(cx: &mut ExtCtxt<'_>, span: Span, substr: &Substructure<'_>) -> BlockOrExpr {
+ let (ident, vdata, fields) = match substr.fields {
+ Struct(vdata, fields) => (substr.type_ident, *vdata, fields),
+ EnumMatching(_, _, v, fields) => (v.ident, &v.data, fields),
+ EnumTag(..) | StaticStruct(..) | StaticEnum(..) => {
+ cx.span_bug(span, "nonsensical .fields in `#[derive(Debug)]`")
+ }
+ };
+
+ // We want to make sure we have the ctxt set so that we can use unstable methods
+ let span = cx.with_def_site_ctxt(span);
+ let name = cx.expr_lit(span, ast::LitKind::Str(ident.name, ast::StrStyle::Cooked));
+ let fmt = substr.nonselflike_args[0].clone();
+
+ // Struct and tuples are similar enough that we use the same code for both,
+ // with some extra pieces for structs due to the field names.
+ let (is_struct, args_per_field) = match vdata {
+ ast::VariantData::Unit(..) => {
+ // Special fast path for unit variants.
+ assert!(fields.is_empty());
+ (false, 0)
+ }
+ ast::VariantData::Tuple(..) => (false, 1),
+ ast::VariantData::Struct(..) => (true, 2),
+ };
+
+ // The number of fields that can be handled without an array.
+ const CUTOFF: usize = 5;
+
+ if fields.is_empty() {
+ // Special case for no fields.
+ let fn_path_write_str = cx.std_path(&[sym::fmt, sym::Formatter, sym::write_str]);
+ let expr = cx.expr_call_global(span, fn_path_write_str, vec![fmt, name]);
+ BlockOrExpr::new_expr(expr)
+ } else if fields.len() <= CUTOFF {
+ // Few enough fields that we can use a specific-length method.
+ let debug = if is_struct {
+ format!("debug_struct_field{}_finish", fields.len())
+ } else {
+ format!("debug_tuple_field{}_finish", fields.len())
+ };
+ let fn_path_debug = cx.std_path(&[sym::fmt, sym::Formatter, Symbol::intern(&debug)]);
+
+ let mut args = Vec::with_capacity(2 + fields.len() * args_per_field);
+ args.extend([fmt, name]);
+ for i in 0..fields.len() {
+ let field = &fields[i];
+ if is_struct {
+ let name = cx.expr_lit(
+ field.span,
+ ast::LitKind::Str(field.name.unwrap().name, ast::StrStyle::Cooked),
+ );
+ args.push(name);
+ }
+ // Use an extra indirection to make sure this works for unsized types.
+ let field = cx.expr_addr_of(field.span, field.self_expr.clone());
+ args.push(field);
+ }
+ let expr = cx.expr_call_global(span, fn_path_debug, args);
+ BlockOrExpr::new_expr(expr)
+ } else {
+ // Enough fields that we must use the any-length method.
+ let mut name_exprs = Vec::with_capacity(fields.len());
+ let mut value_exprs = Vec::with_capacity(fields.len());
+
+ for field in fields {
+ if is_struct {
+ name_exprs.push(cx.expr_lit(
+ field.span,
+ ast::LitKind::Str(field.name.unwrap().name, ast::StrStyle::Cooked),
+ ));
+ }
+
+ // Use an extra indirection to make sure this works for unsized types.
+ let field = cx.expr_addr_of(field.span, field.self_expr.clone());
+ value_exprs.push(field);
+ }
+
+ // `let names: &'static _ = &["field1", "field2"];`
+ let names_let = if is_struct {
+ let lt_static = Some(cx.lifetime_static(span));
+ let ty_static_ref =
+ cx.ty_rptr(span, cx.ty_infer(span), lt_static, ast::Mutability::Not);
+ Some(cx.stmt_let_ty(
+ span,
+ false,
+ Ident::new(sym::names, span),
+ Some(ty_static_ref),
+ cx.expr_array_ref(span, name_exprs),
+ ))
+ } else {
+ None
+ };
+
+ // `let values: &[&dyn Debug] = &[&&self.field1, &&self.field2];`
+ let path_debug = cx.path_global(span, cx.std_path(&[sym::fmt, sym::Debug]));
+ let ty_dyn_debug = cx.ty(
+ span,
+ ast::TyKind::TraitObject(vec![cx.trait_bound(path_debug)], ast::TraitObjectSyntax::Dyn),
+ );
+ let ty_slice = cx.ty(
+ span,
+ ast::TyKind::Slice(cx.ty_rptr(span, ty_dyn_debug, None, ast::Mutability::Not)),
+ );
+ let values_let = cx.stmt_let_ty(
+ span,
+ false,
+ Ident::new(sym::values, span),
+ Some(cx.ty_rptr(span, ty_slice, None, ast::Mutability::Not)),
+ cx.expr_array_ref(span, value_exprs),
+ );
+
+ // `fmt::Formatter::debug_struct_fields_finish(fmt, name, names, values)` or
+ // `fmt::Formatter::debug_tuple_fields_finish(fmt, name, values)`
+ let sym_debug = if is_struct {
+ sym::debug_struct_fields_finish
+ } else {
+ sym::debug_tuple_fields_finish
+ };
+ let fn_path_debug_internal = cx.std_path(&[sym::fmt, sym::Formatter, sym_debug]);
+
+ let mut args = Vec::with_capacity(4);
+ args.push(fmt);
+ args.push(name);
+ if is_struct {
+ args.push(cx.expr_ident(span, Ident::new(sym::names, span)));
+ }
+ args.push(cx.expr_ident(span, Ident::new(sym::values, span)));
+ let expr = cx.expr_call_global(span, fn_path_debug_internal, args);
+
+ let mut stmts = Vec::with_capacity(3);
+ if is_struct {
+ stmts.push(names_let.unwrap());
+ }
+ stmts.push(values_let);
+ BlockOrExpr::new_mixed(stmts, Some(expr))
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/decodable.rs b/compiler/rustc_builtin_macros/src/deriving/decodable.rs
new file mode 100644
index 000000000..d688143a2
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/decodable.rs
@@ -0,0 +1,224 @@
+//! The compiler code necessary for `#[derive(RustcDecodable)]`. See encodable.rs for more.
+
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::pathvec_std;
+
+use rustc_ast::ptr::P;
+use rustc_ast::{self as ast, Expr, MetaItem, Mutability};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::Span;
+
+pub fn expand_deriving_rustc_decodable(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ let krate = sym::rustc_serialize;
+ let typaram = sym::__D;
+
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: Path::new_(vec![krate, sym::Decodable], vec![], PathKind::Global),
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: false,
+ methods: vec![MethodDef {
+ name: sym::decode,
+ generics: Bounds {
+ bounds: vec![(
+ typaram,
+ vec![Path::new_(vec![krate, sym::Decoder], vec![], PathKind::Global)],
+ )],
+ },
+ explicit_self: false,
+ nonself_args: vec![(
+ Ref(Box::new(Path(Path::new_local(typaram))), Mutability::Mut),
+ sym::d,
+ )],
+ ret_ty: Path(Path::new_(
+ pathvec_std!(result::Result),
+ vec![
+ Box::new(Self_),
+ Box::new(Path(Path::new_(vec![typaram, sym::Error], vec![], PathKind::Local))),
+ ],
+ PathKind::Std,
+ )),
+ attributes: Vec::new(),
+ unify_fieldless_variants: false,
+ combine_substructure: combine_substructure(Box::new(|a, b, c| {
+ decodable_substructure(a, b, c, krate)
+ })),
+ }],
+ associated_types: Vec::new(),
+ };
+
+ trait_def.expand(cx, mitem, item, push)
+}
+
+fn decodable_substructure(
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ substr: &Substructure<'_>,
+ krate: Symbol,
+) -> BlockOrExpr {
+ let decoder = substr.nonselflike_args[0].clone();
+ let recurse = vec![
+ Ident::new(krate, trait_span),
+ Ident::new(sym::Decodable, trait_span),
+ Ident::new(sym::decode, trait_span),
+ ];
+ let exprdecode = cx.expr_path(cx.path_global(trait_span, recurse));
+ // throw an underscore in front to suppress unused variable warnings
+ let blkarg = Ident::new(sym::_d, trait_span);
+ let blkdecoder = cx.expr_ident(trait_span, blkarg);
+
+ let expr = match *substr.fields {
+ StaticStruct(_, ref summary) => {
+ let nfields = match *summary {
+ Unnamed(ref fields, _) => fields.len(),
+ Named(ref fields) => fields.len(),
+ };
+ let fn_read_struct_field_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_struct_field]);
+
+ let path = cx.path_ident(trait_span, substr.type_ident);
+ let result =
+ decode_static_fields(cx, trait_span, path, summary, |cx, span, name, field| {
+ cx.expr_try(
+ span,
+ cx.expr_call_global(
+ span,
+ fn_read_struct_field_path.clone(),
+ vec![
+ blkdecoder.clone(),
+ cx.expr_str(span, name),
+ cx.expr_usize(span, field),
+ exprdecode.clone(),
+ ],
+ ),
+ )
+ });
+ let result = cx.expr_ok(trait_span, result);
+ let fn_read_struct_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_struct]);
+
+ cx.expr_call_global(
+ trait_span,
+ fn_read_struct_path,
+ vec![
+ decoder,
+ cx.expr_str(trait_span, substr.type_ident.name),
+ cx.expr_usize(trait_span, nfields),
+ cx.lambda1(trait_span, result, blkarg),
+ ],
+ )
+ }
+ StaticEnum(_, ref fields) => {
+ let variant = Ident::new(sym::i, trait_span);
+
+ let mut arms = Vec::with_capacity(fields.len() + 1);
+ let mut variants = Vec::with_capacity(fields.len());
+
+ let fn_read_enum_variant_arg_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_enum_variant_arg]);
+
+ for (i, &(ident, v_span, ref parts)) in fields.iter().enumerate() {
+ variants.push(cx.expr_str(v_span, ident.name));
+
+ let path = cx.path(trait_span, vec![substr.type_ident, ident]);
+ let decoded =
+ decode_static_fields(cx, v_span, path, parts, |cx, span, _, field| {
+ let idx = cx.expr_usize(span, field);
+ cx.expr_try(
+ span,
+ cx.expr_call_global(
+ span,
+ fn_read_enum_variant_arg_path.clone(),
+ vec![blkdecoder.clone(), idx, exprdecode.clone()],
+ ),
+ )
+ });
+
+ arms.push(cx.arm(v_span, cx.pat_lit(v_span, cx.expr_usize(v_span, i)), decoded));
+ }
+
+ arms.push(cx.arm_unreachable(trait_span));
+
+ let result = cx.expr_ok(
+ trait_span,
+ cx.expr_match(trait_span, cx.expr_ident(trait_span, variant), arms),
+ );
+ let lambda = cx.lambda(trait_span, vec![blkarg, variant], result);
+ let variant_array_ref = cx.expr_array_ref(trait_span, variants);
+ let fn_read_enum_variant_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_enum_variant]);
+ let result = cx.expr_call_global(
+ trait_span,
+ fn_read_enum_variant_path,
+ vec![blkdecoder, variant_array_ref, lambda],
+ );
+ let fn_read_enum_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Decoder, sym::read_enum]);
+
+ cx.expr_call_global(
+ trait_span,
+ fn_read_enum_path,
+ vec![
+ decoder,
+ cx.expr_str(trait_span, substr.type_ident.name),
+ cx.lambda1(trait_span, result, blkarg),
+ ],
+ )
+ }
+ _ => cx.bug("expected StaticEnum or StaticStruct in derive(Decodable)"),
+ };
+ BlockOrExpr::new_expr(expr)
+}
+
+/// Creates a decoder for a single enum variant/struct:
+/// - `outer_pat_path` is the path to this enum variant/struct
+/// - `getarg` should retrieve the `usize`-th field with name `@str`.
+fn decode_static_fields<F>(
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ outer_pat_path: ast::Path,
+ fields: &StaticFields,
+ mut getarg: F,
+) -> P<Expr>
+where
+ F: FnMut(&mut ExtCtxt<'_>, Span, Symbol, usize) -> P<Expr>,
+{
+ match *fields {
+ Unnamed(ref fields, is_tuple) => {
+ let path_expr = cx.expr_path(outer_pat_path);
+ if !is_tuple {
+ path_expr
+ } else {
+ let fields = fields
+ .iter()
+ .enumerate()
+ .map(|(i, &span)| getarg(cx, span, Symbol::intern(&format!("_field{}", i)), i))
+ .collect();
+
+ cx.expr_call(trait_span, path_expr, fields)
+ }
+ }
+ Named(ref fields) => {
+ // use the field's span to get nicer error messages.
+ let fields = fields
+ .iter()
+ .enumerate()
+ .map(|(i, &(ident, span))| {
+ let arg = getarg(cx, span, ident.name, i);
+ cx.field_imm(span, ident, arg)
+ })
+ .collect();
+ cx.expr_struct(trait_span, outer_pat_path, fields)
+ }
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/default.rs b/compiler/rustc_builtin_macros/src/deriving/default.rs
new file mode 100644
index 000000000..517769091
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/default.rs
@@ -0,0 +1,267 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+
+use rustc_ast as ast;
+use rustc_ast::walk_list;
+use rustc_ast::EnumDef;
+use rustc_ast::VariantData;
+use rustc_errors::Applicability;
+use rustc_expand::base::{Annotatable, DummyResult, ExtCtxt};
+use rustc_span::symbol::Ident;
+use rustc_span::symbol::{kw, sym};
+use rustc_span::Span;
+use smallvec::SmallVec;
+
+pub fn expand_deriving_default(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &ast::MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ item.visit_with(&mut DetectNonVariantDefaultAttr { cx });
+
+ let inline = cx.meta_word(span, sym::inline);
+ let attrs = vec![cx.attribute(inline)];
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: Path::new(vec![kw::Default, sym::Default]),
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: false,
+ methods: vec![MethodDef {
+ name: kw::Default,
+ generics: Bounds::empty(),
+ explicit_self: false,
+ nonself_args: Vec::new(),
+ ret_ty: Self_,
+ attributes: attrs,
+ unify_fieldless_variants: false,
+ combine_substructure: combine_substructure(Box::new(|cx, trait_span, substr| {
+ match substr.fields {
+ StaticStruct(_, fields) => {
+ default_struct_substructure(cx, trait_span, substr, fields)
+ }
+ StaticEnum(enum_def, _) => default_enum_substructure(cx, trait_span, enum_def),
+ _ => cx.span_bug(trait_span, "method in `derive(Default)`"),
+ }
+ })),
+ }],
+ associated_types: Vec::new(),
+ };
+ trait_def.expand(cx, mitem, item, push)
+}
+
+fn default_struct_substructure(
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ substr: &Substructure<'_>,
+ summary: &StaticFields,
+) -> BlockOrExpr {
+ // Note that `kw::Default` is "default" and `sym::Default` is "Default"!
+ let default_ident = cx.std_path(&[kw::Default, sym::Default, kw::Default]);
+ let default_call = |span| cx.expr_call_global(span, default_ident.clone(), Vec::new());
+
+ let expr = match summary {
+ Unnamed(ref fields, is_tuple) => {
+ if !is_tuple {
+ cx.expr_ident(trait_span, substr.type_ident)
+ } else {
+ let exprs = fields.iter().map(|sp| default_call(*sp)).collect();
+ cx.expr_call_ident(trait_span, substr.type_ident, exprs)
+ }
+ }
+ Named(ref fields) => {
+ let default_fields = fields
+ .iter()
+ .map(|&(ident, span)| cx.field_imm(span, ident, default_call(span)))
+ .collect();
+ cx.expr_struct_ident(trait_span, substr.type_ident, default_fields)
+ }
+ };
+ BlockOrExpr::new_expr(expr)
+}
+
+fn default_enum_substructure(
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ enum_def: &EnumDef,
+) -> BlockOrExpr {
+ let expr = if let Ok(default_variant) = extract_default_variant(cx, enum_def, trait_span)
+ && let Ok(_) = validate_default_attribute(cx, default_variant)
+ {
+ // We now know there is exactly one unit variant with exactly one `#[default]` attribute.
+ cx.expr_path(cx.path(
+ default_variant.span,
+ vec![Ident::new(kw::SelfUpper, default_variant.span), default_variant.ident],
+ ))
+ } else {
+ DummyResult::raw_expr(trait_span, true)
+ };
+ BlockOrExpr::new_expr(expr)
+}
+
+fn extract_default_variant<'a>(
+ cx: &mut ExtCtxt<'_>,
+ enum_def: &'a EnumDef,
+ trait_span: Span,
+) -> Result<&'a rustc_ast::Variant, ()> {
+ let default_variants: SmallVec<[_; 1]> = enum_def
+ .variants
+ .iter()
+ .filter(|variant| cx.sess.contains_name(&variant.attrs, kw::Default))
+ .collect();
+
+ let variant = match default_variants.as_slice() {
+ [variant] => variant,
+ [] => {
+ let possible_defaults = enum_def
+ .variants
+ .iter()
+ .filter(|variant| matches!(variant.data, VariantData::Unit(..)))
+ .filter(|variant| !cx.sess.contains_name(&variant.attrs, sym::non_exhaustive));
+
+ let mut diag = cx.struct_span_err(trait_span, "no default declared");
+ diag.help("make a unit variant default by placing `#[default]` above it");
+ for variant in possible_defaults {
+ // Suggest making each unit variant default.
+ diag.tool_only_span_suggestion(
+ variant.span,
+ &format!("make `{}` default", variant.ident),
+ format!("#[default] {}", variant.ident),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ diag.emit();
+
+ return Err(());
+ }
+ [first, rest @ ..] => {
+ let mut diag = cx.struct_span_err(trait_span, "multiple declared defaults");
+ diag.span_label(first.span, "first default");
+ diag.span_labels(rest.iter().map(|variant| variant.span), "additional default");
+ diag.note("only one variant can be default");
+ for variant in &default_variants {
+ // Suggest making each variant already tagged default.
+ let suggestion = default_variants
+ .iter()
+ .filter_map(|v| {
+ if v.ident == variant.ident {
+ None
+ } else {
+ Some((cx.sess.find_by_name(&v.attrs, kw::Default)?.span, String::new()))
+ }
+ })
+ .collect();
+
+ diag.tool_only_multipart_suggestion(
+ &format!("make `{}` default", variant.ident),
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ diag.emit();
+
+ return Err(());
+ }
+ };
+
+ if !matches!(variant.data, VariantData::Unit(..)) {
+ cx.struct_span_err(
+ variant.ident.span,
+ "the `#[default]` attribute may only be used on unit enum variants",
+ )
+ .help("consider a manual implementation of `Default`")
+ .emit();
+
+ return Err(());
+ }
+
+ if let Some(non_exhaustive_attr) = cx.sess.find_by_name(&variant.attrs, sym::non_exhaustive) {
+ cx.struct_span_err(variant.ident.span, "default variant must be exhaustive")
+ .span_label(non_exhaustive_attr.span, "declared `#[non_exhaustive]` here")
+ .help("consider a manual implementation of `Default`")
+ .emit();
+
+ return Err(());
+ }
+
+ Ok(variant)
+}
+
+fn validate_default_attribute(
+ cx: &mut ExtCtxt<'_>,
+ default_variant: &rustc_ast::Variant,
+) -> Result<(), ()> {
+ let attrs: SmallVec<[_; 1]> =
+ cx.sess.filter_by_name(&default_variant.attrs, kw::Default).collect();
+
+ let attr = match attrs.as_slice() {
+ [attr] => attr,
+ [] => cx.bug(
+ "this method must only be called with a variant that has a `#[default]` attribute",
+ ),
+ [first, rest @ ..] => {
+ let suggestion_text =
+ if rest.len() == 1 { "try removing this" } else { "try removing these" };
+
+ cx.struct_span_err(default_variant.ident.span, "multiple `#[default]` attributes")
+ .note("only one `#[default]` attribute is needed")
+ .span_label(first.span, "`#[default]` used here")
+ .span_label(rest[0].span, "`#[default]` used again here")
+ .span_help(rest.iter().map(|attr| attr.span).collect::<Vec<_>>(), suggestion_text)
+ // This would otherwise display the empty replacement, hence the otherwise
+ // repetitive `.span_help` call above.
+ .tool_only_multipart_suggestion(
+ suggestion_text,
+ rest.iter().map(|attr| (attr.span, String::new())).collect(),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ return Err(());
+ }
+ };
+ if !attr.is_word() {
+ cx.struct_span_err(attr.span, "`#[default]` attribute does not accept a value")
+ .span_suggestion_hidden(
+ attr.span,
+ "try using `#[default]`",
+ "#[default]",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+
+ return Err(());
+ }
+ Ok(())
+}
+
+struct DetectNonVariantDefaultAttr<'a, 'b> {
+ cx: &'a ExtCtxt<'b>,
+}
+
+impl<'a, 'b> rustc_ast::visit::Visitor<'a> for DetectNonVariantDefaultAttr<'a, 'b> {
+ fn visit_attribute(&mut self, attr: &'a rustc_ast::Attribute) {
+ if attr.has_name(kw::Default) {
+ self.cx
+ .struct_span_err(
+ attr.span,
+ "the `#[default]` attribute may only be used on unit enum variants",
+ )
+ .emit();
+ }
+
+ rustc_ast::visit::walk_attribute(self, attr);
+ }
+ fn visit_variant(&mut self, v: &'a rustc_ast::Variant) {
+ self.visit_ident(v.ident);
+ self.visit_vis(&v.vis);
+ self.visit_variant_data(&v.data);
+ walk_list!(self, visit_anon_const, &v.disr_expr);
+ for attr in &v.attrs {
+ rustc_ast::visit::walk_attribute(self, attr);
+ }
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/encodable.rs b/compiler/rustc_builtin_macros/src/deriving/encodable.rs
new file mode 100644
index 000000000..70167cac6
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/encodable.rs
@@ -0,0 +1,295 @@
+//! The compiler code necessary to implement the `#[derive(RustcEncodable)]`
+//! (and `RustcDecodable`, in `decodable.rs`) extension. The idea here is that
+//! type-defining items may be tagged with
+//! `#[derive(RustcEncodable, RustcDecodable)]`.
+//!
+//! For example, a type like:
+//!
+//! ```ignore (old code)
+//! #[derive(RustcEncodable, RustcDecodable)]
+//! struct Node { id: usize }
+//! ```
+//!
+//! would generate two implementations like:
+//!
+//! ```ignore (old code)
+//! # struct Node { id: usize }
+//! impl<S: Encoder<E>, E> Encodable<S, E> for Node {
+//! fn encode(&self, s: &mut S) -> Result<(), E> {
+//! s.emit_struct("Node", 1, |this| {
+//! this.emit_struct_field("id", 0, |this| {
+//! Encodable::encode(&self.id, this)
+//! /* this.emit_usize(self.id) can also be used */
+//! })
+//! })
+//! }
+//! }
+//!
+//! impl<D: Decoder<E>, E> Decodable<D, E> for Node {
+//! fn decode(d: &mut D) -> Result<Node, E> {
+//! d.read_struct("Node", 1, |this| {
+//! match this.read_struct_field("id", 0, |this| Decodable::decode(this)) {
+//! Ok(id) => Ok(Node { id: id }),
+//! Err(e) => Err(e),
+//! }
+//! })
+//! }
+//! }
+//! ```
+//!
+//! Other interesting scenarios are when the item has type parameters or
+//! references other non-built-in types. A type definition like:
+//!
+//! ```ignore (old code)
+//! # #[derive(RustcEncodable, RustcDecodable)]
+//! # struct Span;
+//! #[derive(RustcEncodable, RustcDecodable)]
+//! struct Spanned<T> { node: T, span: Span }
+//! ```
+//!
+//! would yield functions like:
+//!
+//! ```ignore (old code)
+//! # #[derive(RustcEncodable, RustcDecodable)]
+//! # struct Span;
+//! # struct Spanned<T> { node: T, span: Span }
+//! impl<
+//! S: Encoder<E>,
+//! E,
+//! T: Encodable<S, E>
+//! > Encodable<S, E> for Spanned<T> {
+//! fn encode(&self, s: &mut S) -> Result<(), E> {
+//! s.emit_struct("Spanned", 2, |this| {
+//! this.emit_struct_field("node", 0, |this| self.node.encode(this))
+//! .unwrap();
+//! this.emit_struct_field("span", 1, |this| self.span.encode(this))
+//! })
+//! }
+//! }
+//!
+//! impl<
+//! D: Decoder<E>,
+//! E,
+//! T: Decodable<D, E>
+//! > Decodable<D, E> for Spanned<T> {
+//! fn decode(d: &mut D) -> Result<Spanned<T>, E> {
+//! d.read_struct("Spanned", 2, |this| {
+//! Ok(Spanned {
+//! node: this.read_struct_field("node", 0, |this| Decodable::decode(this))
+//! .unwrap(),
+//! span: this.read_struct_field("span", 1, |this| Decodable::decode(this))
+//! .unwrap(),
+//! })
+//! })
+//! }
+//! }
+//! ```
+
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::pathvec_std;
+
+use rustc_ast::{ExprKind, MetaItem, Mutability};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::Span;
+
+pub fn expand_deriving_rustc_encodable(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ let krate = sym::rustc_serialize;
+ let typaram = sym::__S;
+
+ let trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path: Path::new_(vec![krate, sym::Encodable], vec![], PathKind::Global),
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: false,
+ methods: vec![MethodDef {
+ name: sym::encode,
+ generics: Bounds {
+ bounds: vec![(
+ typaram,
+ vec![Path::new_(vec![krate, sym::Encoder], vec![], PathKind::Global)],
+ )],
+ },
+ explicit_self: true,
+ nonself_args: vec![(
+ Ref(Box::new(Path(Path::new_local(typaram))), Mutability::Mut),
+ sym::s,
+ )],
+ ret_ty: Path(Path::new_(
+ pathvec_std!(result::Result),
+ vec![
+ Box::new(Unit),
+ Box::new(Path(Path::new_(vec![typaram, sym::Error], vec![], PathKind::Local))),
+ ],
+ PathKind::Std,
+ )),
+ attributes: Vec::new(),
+ unify_fieldless_variants: false,
+ combine_substructure: combine_substructure(Box::new(|a, b, c| {
+ encodable_substructure(a, b, c, krate)
+ })),
+ }],
+ associated_types: Vec::new(),
+ };
+
+ trait_def.expand(cx, mitem, item, push)
+}
+
+fn encodable_substructure(
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ substr: &Substructure<'_>,
+ krate: Symbol,
+) -> BlockOrExpr {
+ let encoder = substr.nonselflike_args[0].clone();
+ // throw an underscore in front to suppress unused variable warnings
+ let blkarg = Ident::new(sym::_e, trait_span);
+ let blkencoder = cx.expr_ident(trait_span, blkarg);
+ let fn_path = cx.expr_path(cx.path_global(
+ trait_span,
+ vec![
+ Ident::new(krate, trait_span),
+ Ident::new(sym::Encodable, trait_span),
+ Ident::new(sym::encode, trait_span),
+ ],
+ ));
+
+ match *substr.fields {
+ Struct(_, ref fields) => {
+ let fn_emit_struct_field_path =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_struct_field]);
+ let mut stmts = Vec::new();
+ for (i, &FieldInfo { name, ref self_expr, span, .. }) in fields.iter().enumerate() {
+ let name = match name {
+ Some(id) => id.name,
+ None => Symbol::intern(&format!("_field{}", i)),
+ };
+ let self_ref = cx.expr_addr_of(span, self_expr.clone());
+ let enc = cx.expr_call(span, fn_path.clone(), vec![self_ref, blkencoder.clone()]);
+ let lambda = cx.lambda1(span, enc, blkarg);
+ let call = cx.expr_call_global(
+ span,
+ fn_emit_struct_field_path.clone(),
+ vec![
+ blkencoder.clone(),
+ cx.expr_str(span, name),
+ cx.expr_usize(span, i),
+ lambda,
+ ],
+ );
+
+ // last call doesn't need a try!
+ let last = fields.len() - 1;
+ let call = if i != last {
+ cx.expr_try(span, call)
+ } else {
+ cx.expr(span, ExprKind::Ret(Some(call)))
+ };
+
+ let stmt = cx.stmt_expr(call);
+ stmts.push(stmt);
+ }
+
+ // unit structs have no fields and need to return Ok()
+ let blk = if stmts.is_empty() {
+ let ok = cx.expr_ok(trait_span, cx.expr_tuple(trait_span, vec![]));
+ cx.lambda1(trait_span, ok, blkarg)
+ } else {
+ cx.lambda_stmts_1(trait_span, stmts, blkarg)
+ };
+
+ let fn_emit_struct_path =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_struct]);
+
+ let expr = cx.expr_call_global(
+ trait_span,
+ fn_emit_struct_path,
+ vec![
+ encoder,
+ cx.expr_str(trait_span, substr.type_ident.name),
+ cx.expr_usize(trait_span, fields.len()),
+ blk,
+ ],
+ );
+ BlockOrExpr::new_expr(expr)
+ }
+
+ EnumMatching(idx, _, variant, ref fields) => {
+ // We're not generating an AST that the borrow checker is expecting,
+ // so we need to generate a unique local variable to take the
+ // mutable loan out on, otherwise we get conflicts which don't
+ // actually exist.
+ let me = cx.stmt_let(trait_span, false, blkarg, encoder);
+ let encoder = cx.expr_ident(trait_span, blkarg);
+
+ let fn_emit_enum_variant_arg_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_enum_variant_arg]);
+
+ let mut stmts = Vec::new();
+ if !fields.is_empty() {
+ let last = fields.len() - 1;
+ for (i, &FieldInfo { ref self_expr, span, .. }) in fields.iter().enumerate() {
+ let self_ref = cx.expr_addr_of(span, self_expr.clone());
+ let enc =
+ cx.expr_call(span, fn_path.clone(), vec![self_ref, blkencoder.clone()]);
+ let lambda = cx.lambda1(span, enc, blkarg);
+
+ let call = cx.expr_call_global(
+ span,
+ fn_emit_enum_variant_arg_path.clone(),
+ vec![blkencoder.clone(), cx.expr_usize(span, i), lambda],
+ );
+ let call = if i != last {
+ cx.expr_try(span, call)
+ } else {
+ cx.expr(span, ExprKind::Ret(Some(call)))
+ };
+ stmts.push(cx.stmt_expr(call));
+ }
+ } else {
+ let ok = cx.expr_ok(trait_span, cx.expr_tuple(trait_span, vec![]));
+ let ret_ok = cx.expr(trait_span, ExprKind::Ret(Some(ok)));
+ stmts.push(cx.stmt_expr(ret_ok));
+ }
+
+ let blk = cx.lambda_stmts_1(trait_span, stmts, blkarg);
+ let name = cx.expr_str(trait_span, variant.ident.name);
+
+ let fn_emit_enum_variant_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_enum_variant]);
+
+ let call = cx.expr_call_global(
+ trait_span,
+ fn_emit_enum_variant_path,
+ vec![
+ blkencoder,
+ name,
+ cx.expr_usize(trait_span, idx),
+ cx.expr_usize(trait_span, fields.len()),
+ blk,
+ ],
+ );
+
+ let blk = cx.lambda1(trait_span, call, blkarg);
+ let fn_emit_enum_path: Vec<_> =
+ cx.def_site_path(&[sym::rustc_serialize, sym::Encoder, sym::emit_enum]);
+ let expr = cx.expr_call_global(
+ trait_span,
+ fn_emit_enum_path,
+ vec![encoder, cx.expr_str(trait_span, substr.type_ident.name), blk],
+ );
+ BlockOrExpr::new_mixed(vec![me], Some(expr))
+ }
+
+ _ => cx.bug("expected Struct or EnumMatching in derive(Encodable)"),
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
new file mode 100644
index 000000000..735017aa5
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs
@@ -0,0 +1,1655 @@
+//! Some code that abstracts away much of the boilerplate of writing
+//! `derive` instances for traits. Among other things it manages getting
+//! access to the fields of the 4 different sorts of structs and enum
+//! variants, as well as creating the method and impl ast instances.
+//!
+//! Supported features (fairly exhaustive):
+//!
+//! - Methods taking any number of parameters of any type, and returning
+//! any type, other than vectors, bottom and closures.
+//! - Generating `impl`s for types with type parameters and lifetimes
+//! (e.g., `Option<T>`), the parameters are automatically given the
+//! current trait as a bound. (This includes separate type parameters
+//! and lifetimes for methods.)
+//! - Additional bounds on the type parameters (`TraitDef.additional_bounds`)
+//!
+//! The most important thing for implementors is the `Substructure` and
+//! `SubstructureFields` objects. The latter groups 5 possibilities of the
+//! arguments:
+//!
+//! - `Struct`, when `Self` is a struct (including tuple structs, e.g
+//! `struct T(i32, char)`).
+//! - `EnumMatching`, when `Self` is an enum and all the arguments are the
+//! same variant of the enum (e.g., `Some(1)`, `Some(3)` and `Some(4)`)
+//! - `EnumTag` when `Self` is an enum, for comparing the enum tags.
+//! - `StaticEnum` and `StaticStruct` for static methods, where the type
+//! being derived upon is either an enum or struct respectively. (Any
+//! argument with type Self is just grouped among the non-self
+//! arguments.)
+//!
+//! In the first two cases, the values from the corresponding fields in
+//! all the arguments are grouped together.
+//!
+//! The non-static cases have `Option<ident>` in several places associated
+//! with field `expr`s. This represents the name of the field it is
+//! associated with. It is only not `None` when the associated field has
+//! an identifier in the source code. For example, the `x`s in the
+//! following snippet
+//!
+//! ```rust
+//! # #![allow(dead_code)]
+//! struct A { x : i32 }
+//!
+//! struct B(i32);
+//!
+//! enum C {
+//! C0(i32),
+//! C1 { x: i32 }
+//! }
+//! ```
+//!
+//! The `i32`s in `B` and `C0` don't have an identifier, so the
+//! `Option<ident>`s would be `None` for them.
+//!
+//! In the static cases, the structure is summarized, either into the just
+//! spans of the fields or a list of spans and the field idents (for tuple
+//! structs and record structs, respectively), or a list of these, for
+//! enums (one for each variant). For empty struct and empty enum
+//! variants, it is represented as a count of 0.
+//!
+//! # "`cs`" functions
+//!
+//! The `cs_...` functions ("combine substructure") are designed to
+//! make life easier by providing some pre-made recipes for common
+//! threads; mostly calling the function being derived on all the
+//! arguments and then combining them back together in some way (or
+//! letting the user chose that). They are not meant to be the only
+//! way to handle the structures that this code creates.
+//!
+//! # Examples
+//!
+//! The following simplified `PartialEq` is used for in-code examples:
+//!
+//! ```rust
+//! trait PartialEq {
+//! fn eq(&self, other: &Self) -> bool;
+//! }
+//! impl PartialEq for i32 {
+//! fn eq(&self, other: &i32) -> bool {
+//! *self == *other
+//! }
+//! }
+//! ```
+//!
+//! Some examples of the values of `SubstructureFields` follow, using the
+//! above `PartialEq`, `A`, `B` and `C`.
+//!
+//! ## Structs
+//!
+//! When generating the `expr` for the `A` impl, the `SubstructureFields` is
+//!
+//! ```{.text}
+//! Struct(vec![FieldInfo {
+//! span: <span of x>
+//! name: Some(<ident of x>),
+//! self_: <expr for &self.x>,
+//! other: vec![<expr for &other.x]
+//! }])
+//! ```
+//!
+//! For the `B` impl, called with `B(a)` and `B(b)`,
+//!
+//! ```{.text}
+//! Struct(vec![FieldInfo {
+//! span: <span of `i32`>,
+//! name: None,
+//! self_: <expr for &a>
+//! other: vec![<expr for &b>]
+//! }])
+//! ```
+//!
+//! ## Enums
+//!
+//! When generating the `expr` for a call with `self == C0(a)` and `other
+//! == C0(b)`, the SubstructureFields is
+//!
+//! ```{.text}
+//! EnumMatching(0, <ast::Variant for C0>,
+//! vec![FieldInfo {
+//! span: <span of i32>
+//! name: None,
+//! self_: <expr for &a>,
+//! other: vec![<expr for &b>]
+//! }])
+//! ```
+//!
+//! For `C1 {x}` and `C1 {x}`,
+//!
+//! ```{.text}
+//! EnumMatching(1, <ast::Variant for C1>,
+//! vec![FieldInfo {
+//! span: <span of x>
+//! name: Some(<ident of x>),
+//! self_: <expr for &self.x>,
+//! other: vec![<expr for &other.x>]
+//! }])
+//! ```
+//!
+//! For the tags,
+//!
+//! ```{.text}
+//! EnumTag(
+//! &[<ident of self tag>, <ident of other tag>], <expr to combine with>)
+//! ```
+//! Note that this setup doesn't allow for the brute-force "match every variant
+//! against every other variant" approach, which is bad because it produces a
+//! quadratic amount of code (see #15375).
+//!
+//! ## Static
+//!
+//! A static method on the types above would result in,
+//!
+//! ```{.text}
+//! StaticStruct(<ast::VariantData of A>, Named(vec![(<ident of x>, <span of x>)]))
+//!
+//! StaticStruct(<ast::VariantData of B>, Unnamed(vec![<span of x>]))
+//!
+//! StaticEnum(<ast::EnumDef of C>,
+//! vec![(<ident of C0>, <span of C0>, Unnamed(vec![<span of i32>])),
+//! (<ident of C1>, <span of C1>, Named(vec![(<ident of x>, <span of x>)]))])
+//! ```
+
+pub use StaticFields::*;
+pub use SubstructureFields::*;
+
+use std::cell::RefCell;
+use std::iter;
+use std::vec;
+
+use rustc_ast::ptr::P;
+use rustc_ast::{self as ast, EnumDef, Expr, Generics, PatKind};
+use rustc_ast::{GenericArg, GenericParamKind, VariantData};
+use rustc_attr as attr;
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::Span;
+
+use ty::{Bounds, Path, Ref, Self_, Ty};
+
+use crate::deriving;
+
+pub mod ty;
+
+pub struct TraitDef<'a> {
+ /// The span for the current #[derive(Foo)] header.
+ pub span: Span,
+
+ pub attributes: Vec<ast::Attribute>,
+
+ /// Path of the trait, including any type parameters
+ pub path: Path,
+
+ /// Additional bounds required of any type parameters of the type,
+ /// other than the current trait
+ pub additional_bounds: Vec<Ty>,
+
+ /// Any extra lifetimes and/or bounds, e.g., `D: serialize::Decoder`
+ pub generics: Bounds,
+
+ /// Can this trait be derived for unions?
+ pub supports_unions: bool,
+
+ pub methods: Vec<MethodDef<'a>>,
+
+ pub associated_types: Vec<(Ident, Ty)>,
+}
+
+pub struct MethodDef<'a> {
+ /// name of the method
+ pub name: Symbol,
+ /// List of generics, e.g., `R: rand::Rng`
+ pub generics: Bounds,
+
+ /// Is there is a `&self` argument? If not, it is a static function.
+ pub explicit_self: bool,
+
+ /// Arguments other than the self argument.
+ pub nonself_args: Vec<(Ty, Symbol)>,
+
+ /// Returns type
+ pub ret_ty: Ty,
+
+ pub attributes: Vec<ast::Attribute>,
+
+ /// Can we combine fieldless variants for enums into a single match arm?
+ /// If true, indicates that the trait operation uses the enum tag in some
+ /// way.
+ pub unify_fieldless_variants: bool,
+
+ pub combine_substructure: RefCell<CombineSubstructureFunc<'a>>,
+}
+
+/// All the data about the data structure/method being derived upon.
+pub struct Substructure<'a> {
+ /// ident of self
+ pub type_ident: Ident,
+ /// Verbatim access to any non-selflike arguments, i.e. arguments that
+ /// don't have type `&Self`.
+ pub nonselflike_args: &'a [P<Expr>],
+ pub fields: &'a SubstructureFields<'a>,
+}
+
+/// Summary of the relevant parts of a struct/enum field.
+pub struct FieldInfo {
+ pub span: Span,
+ /// None for tuple structs/normal enum variants, Some for normal
+ /// structs/struct enum variants.
+ pub name: Option<Ident>,
+ /// The expression corresponding to this field of `self`
+ /// (specifically, a reference to it).
+ pub self_expr: P<Expr>,
+ /// The expressions corresponding to references to this field in
+ /// the other selflike arguments.
+ pub other_selflike_exprs: Vec<P<Expr>>,
+}
+
+/// Fields for a static method
+pub enum StaticFields {
+ /// Tuple and unit structs/enum variants like this.
+ Unnamed(Vec<Span>, bool /*is tuple*/),
+ /// Normal structs/struct variants.
+ Named(Vec<(Ident, Span)>),
+}
+
+/// A summary of the possible sets of fields.
+pub enum SubstructureFields<'a> {
+ /// A non-static method with `Self` is a struct.
+ Struct(&'a ast::VariantData, Vec<FieldInfo>),
+
+ /// Matching variants of the enum: variant index, variant count, ast::Variant,
+ /// fields: the field name is only non-`None` in the case of a struct
+ /// variant.
+ EnumMatching(usize, usize, &'a ast::Variant, Vec<FieldInfo>),
+
+ /// The tag of an enum. The first field is a `FieldInfo` for the tags, as
+ /// if they were fields. The second field is the expression to combine the
+ /// tag expression with; it will be `None` if no match is necessary.
+ EnumTag(FieldInfo, Option<P<Expr>>),
+
+ /// A static method where `Self` is a struct.
+ StaticStruct(&'a ast::VariantData, StaticFields),
+
+ /// A static method where `Self` is an enum.
+ StaticEnum(&'a ast::EnumDef, Vec<(Ident, Span, StaticFields)>),
+}
+
+/// Combine the values of all the fields together. The last argument is
+/// all the fields of all the structures.
+pub type CombineSubstructureFunc<'a> =
+ Box<dyn FnMut(&mut ExtCtxt<'_>, Span, &Substructure<'_>) -> BlockOrExpr + 'a>;
+
+pub fn combine_substructure(
+ f: CombineSubstructureFunc<'_>,
+) -> RefCell<CombineSubstructureFunc<'_>> {
+ RefCell::new(f)
+}
+
+struct TypeParameter {
+ bound_generic_params: Vec<ast::GenericParam>,
+ ty: P<ast::Ty>,
+}
+
+// The code snippets built up for derived code are sometimes used as blocks
+// (e.g. in a function body) and sometimes used as expressions (e.g. in a match
+// arm). This structure avoids committing to either form until necessary,
+// avoiding the insertion of any unnecessary blocks.
+//
+// The statements come before the expression.
+pub struct BlockOrExpr(Vec<ast::Stmt>, Option<P<Expr>>);
+
+impl BlockOrExpr {
+ pub fn new_stmts(stmts: Vec<ast::Stmt>) -> BlockOrExpr {
+ BlockOrExpr(stmts, None)
+ }
+
+ pub fn new_expr(expr: P<Expr>) -> BlockOrExpr {
+ BlockOrExpr(vec![], Some(expr))
+ }
+
+ pub fn new_mixed(stmts: Vec<ast::Stmt>, expr: Option<P<Expr>>) -> BlockOrExpr {
+ BlockOrExpr(stmts, expr)
+ }
+
+ // Converts it into a block.
+ fn into_block(mut self, cx: &ExtCtxt<'_>, span: Span) -> P<ast::Block> {
+ if let Some(expr) = self.1 {
+ self.0.push(cx.stmt_expr(expr));
+ }
+ cx.block(span, self.0)
+ }
+
+ // Converts it into an expression.
+ fn into_expr(self, cx: &ExtCtxt<'_>, span: Span) -> P<Expr> {
+ if self.0.is_empty() {
+ match self.1 {
+ None => cx.expr_block(cx.block(span, vec![])),
+ Some(expr) => expr,
+ }
+ } else if self.0.len() == 1
+ && let ast::StmtKind::Expr(expr) = &self.0[0].kind
+ && self.1.is_none()
+ {
+ // There's only a single statement expression. Pull it out.
+ expr.clone()
+ } else {
+ // Multiple statements and/or expressions.
+ cx.expr_block(self.into_block(cx, span))
+ }
+ }
+}
+
+/// This method helps to extract all the type parameters referenced from a
+/// type. For a type parameter `<T>`, it looks for either a `TyPath` that
+/// is not global and starts with `T`, or a `TyQPath`.
+/// Also include bound generic params from the input type.
+fn find_type_parameters(
+ ty: &ast::Ty,
+ ty_param_names: &[Symbol],
+ cx: &ExtCtxt<'_>,
+) -> Vec<TypeParameter> {
+ use rustc_ast::visit;
+
+ struct Visitor<'a, 'b> {
+ cx: &'a ExtCtxt<'b>,
+ ty_param_names: &'a [Symbol],
+ bound_generic_params_stack: Vec<ast::GenericParam>,
+ type_params: Vec<TypeParameter>,
+ }
+
+ impl<'a, 'b> visit::Visitor<'a> for Visitor<'a, 'b> {
+ fn visit_ty(&mut self, ty: &'a ast::Ty) {
+ if let ast::TyKind::Path(_, ref path) = ty.kind {
+ if let Some(segment) = path.segments.first() {
+ if self.ty_param_names.contains(&segment.ident.name) {
+ self.type_params.push(TypeParameter {
+ bound_generic_params: self.bound_generic_params_stack.clone(),
+ ty: P(ty.clone()),
+ });
+ }
+ }
+ }
+
+ visit::walk_ty(self, ty)
+ }
+
+ // Place bound generic params on a stack, to extract them when a type is encountered.
+ fn visit_poly_trait_ref(
+ &mut self,
+ trait_ref: &'a ast::PolyTraitRef,
+ modifier: &'a ast::TraitBoundModifier,
+ ) {
+ let stack_len = self.bound_generic_params_stack.len();
+ self.bound_generic_params_stack
+ .extend(trait_ref.bound_generic_params.clone().into_iter());
+
+ visit::walk_poly_trait_ref(self, trait_ref, modifier);
+
+ self.bound_generic_params_stack.truncate(stack_len);
+ }
+
+ fn visit_mac_call(&mut self, mac: &ast::MacCall) {
+ self.cx.span_err(mac.span(), "`derive` cannot be used on items with type macros");
+ }
+ }
+
+ let mut visitor = Visitor {
+ cx,
+ ty_param_names,
+ bound_generic_params_stack: Vec::new(),
+ type_params: Vec::new(),
+ };
+ visit::Visitor::visit_ty(&mut visitor, ty);
+
+ visitor.type_params
+}
+
+impl<'a> TraitDef<'a> {
+ pub fn expand(
+ self,
+ cx: &mut ExtCtxt<'_>,
+ mitem: &ast::MetaItem,
+ item: &'a Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+ ) {
+ self.expand_ext(cx, mitem, item, push, false);
+ }
+
+ pub fn expand_ext(
+ self,
+ cx: &mut ExtCtxt<'_>,
+ mitem: &ast::MetaItem,
+ item: &'a Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+ from_scratch: bool,
+ ) {
+ match *item {
+ Annotatable::Item(ref item) => {
+ let is_packed = item.attrs.iter().any(|attr| {
+ for r in attr::find_repr_attrs(&cx.sess, attr) {
+ if let attr::ReprPacked(_) = r {
+ return true;
+ }
+ }
+ false
+ });
+ let has_no_type_params = match item.kind {
+ ast::ItemKind::Struct(_, ref generics)
+ | ast::ItemKind::Enum(_, ref generics)
+ | ast::ItemKind::Union(_, ref generics) => !generics
+ .params
+ .iter()
+ .any(|param| matches!(param.kind, ast::GenericParamKind::Type { .. })),
+ _ => unreachable!(),
+ };
+ let container_id = cx.current_expansion.id.expn_data().parent.expect_local();
+ let always_copy = has_no_type_params && cx.resolver.has_derive_copy(container_id);
+
+ let newitem = match item.kind {
+ ast::ItemKind::Struct(ref struct_def, ref generics) => self.expand_struct_def(
+ cx,
+ &struct_def,
+ item.ident,
+ generics,
+ from_scratch,
+ is_packed,
+ always_copy,
+ ),
+ ast::ItemKind::Enum(ref enum_def, ref generics) => {
+ // We ignore `is_packed`/`always_copy` here, because
+ // `repr(packed)` enums cause an error later on.
+ //
+ // This can only cause further compilation errors
+ // downstream in blatantly illegal code, so it
+ // is fine.
+ self.expand_enum_def(cx, enum_def, item.ident, generics, from_scratch)
+ }
+ ast::ItemKind::Union(ref struct_def, ref generics) => {
+ if self.supports_unions {
+ self.expand_struct_def(
+ cx,
+ &struct_def,
+ item.ident,
+ generics,
+ from_scratch,
+ is_packed,
+ always_copy,
+ )
+ } else {
+ cx.span_err(mitem.span, "this trait cannot be derived for unions");
+ return;
+ }
+ }
+ _ => unreachable!(),
+ };
+ // Keep the lint attributes of the previous item to control how the
+ // generated implementations are linted
+ let mut attrs = newitem.attrs.clone();
+ attrs.extend(
+ item.attrs
+ .iter()
+ .filter(|a| {
+ [
+ sym::allow,
+ sym::warn,
+ sym::deny,
+ sym::forbid,
+ sym::stable,
+ sym::unstable,
+ ]
+ .contains(&a.name_or_empty())
+ })
+ .cloned(),
+ );
+ push(Annotatable::Item(P(ast::Item { attrs, ..(*newitem).clone() })))
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ /// Given that we are deriving a trait `DerivedTrait` for a type like:
+ ///
+ /// ```ignore (only-for-syntax-highlight)
+ /// struct Struct<'a, ..., 'z, A, B: DeclaredTrait, C, ..., Z> where C: WhereTrait {
+ /// a: A,
+ /// b: B::Item,
+ /// b1: <B as DeclaredTrait>::Item,
+ /// c1: <C as WhereTrait>::Item,
+ /// c2: Option<<C as WhereTrait>::Item>,
+ /// ...
+ /// }
+ /// ```
+ ///
+ /// create an impl like:
+ ///
+ /// ```ignore (only-for-syntax-highlight)
+ /// impl<'a, ..., 'z, A, B: DeclaredTrait, C, ... Z> where
+ /// C: WhereTrait,
+ /// A: DerivedTrait + B1 + ... + BN,
+ /// B: DerivedTrait + B1 + ... + BN,
+ /// C: DerivedTrait + B1 + ... + BN,
+ /// B::Item: DerivedTrait + B1 + ... + BN,
+ /// <C as WhereTrait>::Item: DerivedTrait + B1 + ... + BN,
+ /// ...
+ /// {
+ /// ...
+ /// }
+ /// ```
+ ///
+ /// where B1, ..., BN are the bounds given by `bounds_paths`.'. Z is a phantom type, and
+ /// therefore does not get bound by the derived trait.
+ fn create_derived_impl(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ type_ident: Ident,
+ generics: &Generics,
+ field_tys: Vec<P<ast::Ty>>,
+ methods: Vec<P<ast::AssocItem>>,
+ ) -> P<ast::Item> {
+ let trait_path = self.path.to_path(cx, self.span, type_ident, generics);
+
+ // Transform associated types from `deriving::ty::Ty` into `ast::AssocItem`
+ let associated_types = self.associated_types.iter().map(|&(ident, ref type_def)| {
+ P(ast::AssocItem {
+ id: ast::DUMMY_NODE_ID,
+ span: self.span,
+ ident,
+ vis: ast::Visibility {
+ span: self.span.shrink_to_lo(),
+ kind: ast::VisibilityKind::Inherited,
+ tokens: None,
+ },
+ attrs: Vec::new(),
+ kind: ast::AssocItemKind::TyAlias(Box::new(ast::TyAlias {
+ defaultness: ast::Defaultness::Final,
+ generics: Generics::default(),
+ where_clauses: (
+ ast::TyAliasWhereClause::default(),
+ ast::TyAliasWhereClause::default(),
+ ),
+ where_predicates_split: 0,
+ bounds: Vec::new(),
+ ty: Some(type_def.to_ty(cx, self.span, type_ident, generics)),
+ })),
+ tokens: None,
+ })
+ });
+
+ let Generics { mut params, mut where_clause, .. } =
+ self.generics.to_generics(cx, self.span, type_ident, generics);
+ where_clause.span = generics.where_clause.span;
+ let ctxt = self.span.ctxt();
+ let span = generics.span.with_ctxt(ctxt);
+
+ // Create the generic parameters
+ params.extend(generics.params.iter().map(|param| match &param.kind {
+ GenericParamKind::Lifetime { .. } => param.clone(),
+ GenericParamKind::Type { .. } => {
+ // I don't think this can be moved out of the loop, since
+ // a GenericBound requires an ast id
+ let bounds: Vec<_> =
+ // extra restrictions on the generics parameters to the
+ // type being derived upon
+ self.additional_bounds.iter().map(|p| {
+ cx.trait_bound(p.to_path(cx, self.span, type_ident, generics))
+ }).chain(
+ // require the current trait
+ iter::once(cx.trait_bound(trait_path.clone()))
+ ).chain(
+ // also add in any bounds from the declaration
+ param.bounds.iter().cloned()
+ ).collect();
+
+ cx.typaram(param.ident.span.with_ctxt(ctxt), param.ident, vec![], bounds, None)
+ }
+ GenericParamKind::Const { ty, kw_span, .. } => {
+ let const_nodefault_kind = GenericParamKind::Const {
+ ty: ty.clone(),
+ kw_span: kw_span.with_ctxt(ctxt),
+
+ // We can't have default values inside impl block
+ default: None,
+ };
+ let mut param_clone = param.clone();
+ param_clone.kind = const_nodefault_kind;
+ param_clone
+ }
+ }));
+
+ // and similarly for where clauses
+ where_clause.predicates.extend(generics.where_clause.predicates.iter().map(|clause| {
+ match clause {
+ ast::WherePredicate::BoundPredicate(wb) => {
+ let span = wb.span.with_ctxt(ctxt);
+ ast::WherePredicate::BoundPredicate(ast::WhereBoundPredicate {
+ span,
+ ..wb.clone()
+ })
+ }
+ ast::WherePredicate::RegionPredicate(wr) => {
+ let span = wr.span.with_ctxt(ctxt);
+ ast::WherePredicate::RegionPredicate(ast::WhereRegionPredicate {
+ span,
+ ..wr.clone()
+ })
+ }
+ ast::WherePredicate::EqPredicate(we) => {
+ let span = we.span.with_ctxt(ctxt);
+ ast::WherePredicate::EqPredicate(ast::WhereEqPredicate {
+ id: ast::DUMMY_NODE_ID,
+ span,
+ ..we.clone()
+ })
+ }
+ }
+ }));
+
+ {
+ // Extra scope required here so ty_params goes out of scope before params is moved
+
+ let mut ty_params = params
+ .iter()
+ .filter(|param| matches!(param.kind, ast::GenericParamKind::Type { .. }))
+ .peekable();
+
+ if ty_params.peek().is_some() {
+ let ty_param_names: Vec<Symbol> =
+ ty_params.map(|ty_param| ty_param.ident.name).collect();
+
+ for field_ty in field_tys {
+ let field_ty_params = find_type_parameters(&field_ty, &ty_param_names, cx);
+
+ for field_ty_param in field_ty_params {
+ // if we have already handled this type, skip it
+ if let ast::TyKind::Path(_, ref p) = field_ty_param.ty.kind {
+ if p.segments.len() == 1
+ && ty_param_names.contains(&p.segments[0].ident.name)
+ {
+ continue;
+ };
+ }
+ let mut bounds: Vec<_> = self
+ .additional_bounds
+ .iter()
+ .map(|p| cx.trait_bound(p.to_path(cx, self.span, type_ident, generics)))
+ .collect();
+
+ // require the current trait
+ bounds.push(cx.trait_bound(trait_path.clone()));
+
+ let predicate = ast::WhereBoundPredicate {
+ span: self.span,
+ bound_generic_params: field_ty_param.bound_generic_params,
+ bounded_ty: field_ty_param.ty,
+ bounds,
+ };
+
+ let predicate = ast::WherePredicate::BoundPredicate(predicate);
+ where_clause.predicates.push(predicate);
+ }
+ }
+ }
+ }
+
+ let trait_generics = Generics { params, where_clause, span };
+
+ // Create the reference to the trait.
+ let trait_ref = cx.trait_ref(trait_path);
+
+ let self_params: Vec<_> = generics
+ .params
+ .iter()
+ .map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ GenericArg::Lifetime(cx.lifetime(param.ident.span.with_ctxt(ctxt), param.ident))
+ }
+ GenericParamKind::Type { .. } => {
+ GenericArg::Type(cx.ty_ident(param.ident.span.with_ctxt(ctxt), param.ident))
+ }
+ GenericParamKind::Const { .. } => {
+ GenericArg::Const(cx.const_ident(param.ident.span.with_ctxt(ctxt), param.ident))
+ }
+ })
+ .collect();
+
+ // Create the type of `self`.
+ let path = cx.path_all(self.span, false, vec![type_ident], self_params);
+ let self_type = cx.ty_path(path);
+
+ let attr = cx.attribute(cx.meta_word(self.span, sym::automatically_derived));
+ let opt_trait_ref = Some(trait_ref);
+
+ let mut a = vec![attr];
+ a.extend(self.attributes.iter().cloned());
+
+ cx.item(
+ self.span,
+ Ident::empty(),
+ a,
+ ast::ItemKind::Impl(Box::new(ast::Impl {
+ unsafety: ast::Unsafe::No,
+ polarity: ast::ImplPolarity::Positive,
+ defaultness: ast::Defaultness::Final,
+ constness: ast::Const::No,
+ generics: trait_generics,
+ of_trait: opt_trait_ref,
+ self_ty: self_type,
+ items: methods.into_iter().chain(associated_types).collect(),
+ })),
+ )
+ }
+
+ fn expand_struct_def(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ struct_def: &'a VariantData,
+ type_ident: Ident,
+ generics: &Generics,
+ from_scratch: bool,
+ is_packed: bool,
+ always_copy: bool,
+ ) -> P<ast::Item> {
+ let field_tys: Vec<P<ast::Ty>> =
+ struct_def.fields().iter().map(|field| field.ty.clone()).collect();
+
+ let methods = self
+ .methods
+ .iter()
+ .map(|method_def| {
+ let (explicit_self, selflike_args, nonselflike_args, nonself_arg_tys) =
+ method_def.extract_arg_details(cx, self, type_ident, generics);
+
+ let body = if from_scratch || method_def.is_static() {
+ method_def.expand_static_struct_method_body(
+ cx,
+ self,
+ struct_def,
+ type_ident,
+ &nonselflike_args,
+ )
+ } else {
+ method_def.expand_struct_method_body(
+ cx,
+ self,
+ struct_def,
+ type_ident,
+ &selflike_args,
+ &nonselflike_args,
+ is_packed,
+ always_copy,
+ )
+ };
+
+ method_def.create_method(
+ cx,
+ self,
+ type_ident,
+ generics,
+ explicit_self,
+ nonself_arg_tys,
+ body,
+ )
+ })
+ .collect();
+
+ self.create_derived_impl(cx, type_ident, generics, field_tys, methods)
+ }
+
+ fn expand_enum_def(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ enum_def: &'a EnumDef,
+ type_ident: Ident,
+ generics: &Generics,
+ from_scratch: bool,
+ ) -> P<ast::Item> {
+ let mut field_tys = Vec::new();
+
+ for variant in &enum_def.variants {
+ field_tys.extend(variant.data.fields().iter().map(|field| field.ty.clone()));
+ }
+
+ let methods = self
+ .methods
+ .iter()
+ .map(|method_def| {
+ let (explicit_self, selflike_args, nonselflike_args, nonself_arg_tys) =
+ method_def.extract_arg_details(cx, self, type_ident, generics);
+
+ let body = if from_scratch || method_def.is_static() {
+ method_def.expand_static_enum_method_body(
+ cx,
+ self,
+ enum_def,
+ type_ident,
+ &nonselflike_args,
+ )
+ } else {
+ method_def.expand_enum_method_body(
+ cx,
+ self,
+ enum_def,
+ type_ident,
+ selflike_args,
+ &nonselflike_args,
+ )
+ };
+
+ method_def.create_method(
+ cx,
+ self,
+ type_ident,
+ generics,
+ explicit_self,
+ nonself_arg_tys,
+ body,
+ )
+ })
+ .collect();
+
+ self.create_derived_impl(cx, type_ident, generics, field_tys, methods)
+ }
+}
+
+impl<'a> MethodDef<'a> {
+ fn call_substructure_method(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ trait_: &TraitDef<'_>,
+ type_ident: Ident,
+ nonselflike_args: &[P<Expr>],
+ fields: &SubstructureFields<'_>,
+ ) -> BlockOrExpr {
+ let span = trait_.span;
+ let substructure = Substructure { type_ident, nonselflike_args, fields };
+ let mut f = self.combine_substructure.borrow_mut();
+ let f: &mut CombineSubstructureFunc<'_> = &mut *f;
+ f(cx, span, &substructure)
+ }
+
+ fn get_ret_ty(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ trait_: &TraitDef<'_>,
+ generics: &Generics,
+ type_ident: Ident,
+ ) -> P<ast::Ty> {
+ self.ret_ty.to_ty(cx, trait_.span, type_ident, generics)
+ }
+
+ fn is_static(&self) -> bool {
+ !self.explicit_self
+ }
+
+ // The return value includes:
+ // - explicit_self: The `&self` arg, if present.
+ // - selflike_args: Expressions for `&self` (if present) and also any other
+ // args with the same type (e.g. the `other` arg in `PartialEq::eq`).
+ // - nonselflike_args: Expressions for all the remaining args.
+ // - nonself_arg_tys: Additional information about all the args other than
+ // `&self`.
+ fn extract_arg_details(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ trait_: &TraitDef<'_>,
+ type_ident: Ident,
+ generics: &Generics,
+ ) -> (Option<ast::ExplicitSelf>, Vec<P<Expr>>, Vec<P<Expr>>, Vec<(Ident, P<ast::Ty>)>) {
+ let mut selflike_args = Vec::new();
+ let mut nonselflike_args = Vec::new();
+ let mut nonself_arg_tys = Vec::new();
+ let span = trait_.span;
+
+ let explicit_self = if self.explicit_self {
+ let (self_expr, explicit_self) = ty::get_explicit_self(cx, span);
+ selflike_args.push(self_expr);
+ Some(explicit_self)
+ } else {
+ None
+ };
+
+ for (ty, name) in self.nonself_args.iter() {
+ let ast_ty = ty.to_ty(cx, span, type_ident, generics);
+ let ident = Ident::new(*name, span);
+ nonself_arg_tys.push((ident, ast_ty));
+
+ let arg_expr = cx.expr_ident(span, ident);
+
+ match ty {
+ // Selflike (`&Self`) arguments only occur in non-static methods.
+ Ref(box Self_, _) if !self.is_static() => selflike_args.push(arg_expr),
+ Self_ => cx.span_bug(span, "`Self` in non-return position"),
+ _ => nonselflike_args.push(arg_expr),
+ }
+ }
+
+ (explicit_self, selflike_args, nonselflike_args, nonself_arg_tys)
+ }
+
+ fn create_method(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ trait_: &TraitDef<'_>,
+ type_ident: Ident,
+ generics: &Generics,
+ explicit_self: Option<ast::ExplicitSelf>,
+ nonself_arg_tys: Vec<(Ident, P<ast::Ty>)>,
+ body: BlockOrExpr,
+ ) -> P<ast::AssocItem> {
+ let span = trait_.span;
+ // Create the generics that aren't for `Self`.
+ let fn_generics = self.generics.to_generics(cx, span, type_ident, generics);
+
+ let args = {
+ let self_arg = explicit_self.map(|explicit_self| {
+ let ident = Ident::with_dummy_span(kw::SelfLower).with_span_pos(span);
+ ast::Param::from_self(ast::AttrVec::default(), explicit_self, ident)
+ });
+ let nonself_args =
+ nonself_arg_tys.into_iter().map(|(name, ty)| cx.param(span, name, ty));
+ self_arg.into_iter().chain(nonself_args).collect()
+ };
+
+ let ret_type = self.get_ret_ty(cx, trait_, generics, type_ident);
+
+ let method_ident = Ident::new(self.name, span);
+ let fn_decl = cx.fn_decl(args, ast::FnRetTy::Ty(ret_type));
+ let body_block = body.into_block(cx, span);
+
+ let trait_lo_sp = span.shrink_to_lo();
+
+ let sig = ast::FnSig { header: ast::FnHeader::default(), decl: fn_decl, span };
+ let defaultness = ast::Defaultness::Final;
+
+ // Create the method.
+ P(ast::AssocItem {
+ id: ast::DUMMY_NODE_ID,
+ attrs: self.attributes.clone(),
+ span,
+ vis: ast::Visibility {
+ span: trait_lo_sp,
+ kind: ast::VisibilityKind::Inherited,
+ tokens: None,
+ },
+ ident: method_ident,
+ kind: ast::AssocItemKind::Fn(Box::new(ast::Fn {
+ defaultness,
+ sig,
+ generics: fn_generics,
+ body: Some(body_block),
+ })),
+ tokens: None,
+ })
+ }
+
+ /// The normal case uses field access.
+ /// ```
+ /// #[derive(PartialEq)]
+ /// # struct Dummy;
+ /// struct A { x: u8, y: u8 }
+ ///
+ /// // equivalent to:
+ /// impl PartialEq for A {
+ /// fn eq(&self, other: &A) -> bool {
+ /// self.x == other.x && self.y == other.y
+ /// }
+ /// }
+ /// ```
+ /// But if the struct is `repr(packed)`, we can't use something like
+ /// `&self.x` because that might cause an unaligned ref. So for any trait
+ /// method that takes a reference, if the struct impls `Copy` then we use a
+ /// local block to force a copy:
+ /// ```
+ /// # struct A { x: u8, y: u8 }
+ /// impl PartialEq for A {
+ /// fn eq(&self, other: &A) -> bool {
+ /// // Desugars to `{ self.x }.eq(&{ other.y }) && ...`
+ /// { self.x } == { other.y } && { self.y } == { other.y }
+ /// }
+ /// }
+ /// impl Hash for A {
+ /// fn hash<__H: ::core::hash::Hasher>(&self, state: &mut __H) -> () {
+ /// ::core::hash::Hash::hash(&{ self.x }, state);
+ /// ::core::hash::Hash::hash(&{ self.y }, state)
+ /// }
+ /// }
+ /// ```
+ /// If the struct doesn't impl `Copy`, we use let-destructuring with `ref`:
+ /// ```
+ /// # struct A { x: u8, y: u8 }
+ /// impl PartialEq for A {
+ /// fn eq(&self, other: &A) -> bool {
+ /// let Self { x: ref __self_0_0, y: ref __self_0_1 } = *self;
+ /// let Self { x: ref __self_1_0, y: ref __self_1_1 } = *other;
+ /// *__self_0_0 == *__self_1_0 && *__self_0_1 == *__self_1_1
+ /// }
+ /// }
+ /// ```
+ /// This latter case only works if the fields match the alignment required
+ /// by the `packed(N)` attribute. (We'll get errors later on if not.)
+ fn expand_struct_method_body<'b>(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ trait_: &TraitDef<'b>,
+ struct_def: &'b VariantData,
+ type_ident: Ident,
+ selflike_args: &[P<Expr>],
+ nonselflike_args: &[P<Expr>],
+ is_packed: bool,
+ always_copy: bool,
+ ) -> BlockOrExpr {
+ let span = trait_.span;
+ assert!(selflike_args.len() == 1 || selflike_args.len() == 2);
+
+ let mk_body = |cx, selflike_fields| {
+ self.call_substructure_method(
+ cx,
+ trait_,
+ type_ident,
+ nonselflike_args,
+ &Struct(struct_def, selflike_fields),
+ )
+ };
+
+ if !is_packed {
+ let selflike_fields =
+ trait_.create_struct_field_access_fields(cx, selflike_args, struct_def, false);
+ mk_body(cx, selflike_fields)
+ } else if always_copy {
+ let selflike_fields =
+ trait_.create_struct_field_access_fields(cx, selflike_args, struct_def, true);
+ mk_body(cx, selflike_fields)
+ } else {
+ // Neither packed nor copy. Need to use ref patterns.
+ let prefixes: Vec<_> =
+ (0..selflike_args.len()).map(|i| format!("__self_{}", i)).collect();
+ let addr_of = always_copy;
+ let selflike_fields =
+ trait_.create_struct_pattern_fields(cx, struct_def, &prefixes, addr_of);
+ let mut body = mk_body(cx, selflike_fields);
+
+ let struct_path = cx.path(span, vec![Ident::new(kw::SelfUpper, type_ident.span)]);
+ let use_ref_pat = is_packed && !always_copy;
+ let patterns =
+ trait_.create_struct_patterns(cx, struct_path, struct_def, &prefixes, use_ref_pat);
+
+ // Do the let-destructuring.
+ let mut stmts: Vec<_> = iter::zip(selflike_args, patterns)
+ .map(|(selflike_arg_expr, pat)| {
+ let selflike_arg_expr = cx.expr_deref(span, selflike_arg_expr.clone());
+ cx.stmt_let_pat(span, pat, selflike_arg_expr)
+ })
+ .collect();
+ stmts.extend(std::mem::take(&mut body.0));
+ BlockOrExpr(stmts, body.1)
+ }
+ }
+
+ fn expand_static_struct_method_body(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ trait_: &TraitDef<'_>,
+ struct_def: &VariantData,
+ type_ident: Ident,
+ nonselflike_args: &[P<Expr>],
+ ) -> BlockOrExpr {
+ let summary = trait_.summarise_struct(cx, struct_def);
+
+ self.call_substructure_method(
+ cx,
+ trait_,
+ type_ident,
+ nonselflike_args,
+ &StaticStruct(struct_def, summary),
+ )
+ }
+
+ /// ```
+ /// #[derive(PartialEq)]
+ /// # struct Dummy;
+ /// enum A {
+ /// A1,
+ /// A2(i32)
+ /// }
+ /// ```
+ /// is equivalent to:
+ /// ```
+ /// impl ::core::cmp::PartialEq for A {
+ /// #[inline]
+ /// fn eq(&self, other: &A) -> bool {
+ /// let __self_tag = ::core::intrinsics::discriminant_value(self);
+ /// let __arg1_tag = ::core::intrinsics::discriminant_value(other);
+ /// __self_tag == __arg1_tag &&
+ /// match (self, other) {
+ /// (A::A2(__self_0), A::A2(__arg1_0)) =>
+ /// *__self_0 == *__arg1_0,
+ /// _ => true,
+ /// }
+ /// }
+ /// }
+ /// ```
+ /// Creates a tag check combined with a match for a tuple of all
+ /// `selflike_args`, with an arm for each variant with fields, possibly an
+ /// arm for each fieldless variant (if `!unify_fieldless_variants` is not
+ /// true), and possibly a default arm.
+ fn expand_enum_method_body<'b>(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ trait_: &TraitDef<'b>,
+ enum_def: &'b EnumDef,
+ type_ident: Ident,
+ selflike_args: Vec<P<Expr>>,
+ nonselflike_args: &[P<Expr>],
+ ) -> BlockOrExpr {
+ let span = trait_.span;
+ let variants = &enum_def.variants;
+
+ // Traits that unify fieldless variants always use the tag(s).
+ let uses_tags = self.unify_fieldless_variants;
+
+ // There is no sensible code to be generated for *any* deriving on a
+ // zero-variant enum. So we just generate a failing expression.
+ if variants.is_empty() {
+ return BlockOrExpr(vec![], Some(deriving::call_unreachable(cx, span)));
+ }
+
+ let prefixes = iter::once("__self".to_string())
+ .chain(
+ selflike_args
+ .iter()
+ .enumerate()
+ .skip(1)
+ .map(|(arg_count, _selflike_arg)| format!("__arg{}", arg_count)),
+ )
+ .collect::<Vec<String>>();
+
+ // Build a series of let statements mapping each selflike_arg
+ // to its discriminant value.
+ //
+ // e.g. for `PartialEq::eq` builds two statements:
+ // ```
+ // let __self_tag = ::core::intrinsics::discriminant_value(self);
+ // let __arg1_tag = ::core::intrinsics::discriminant_value(other);
+ // ```
+ let get_tag_pieces = |cx: &ExtCtxt<'_>| {
+ let tag_idents: Vec<_> = prefixes
+ .iter()
+ .map(|name| Ident::from_str_and_span(&format!("{}_tag", name), span))
+ .collect();
+
+ let mut tag_exprs: Vec<_> = tag_idents
+ .iter()
+ .map(|&ident| cx.expr_addr_of(span, cx.expr_ident(span, ident)))
+ .collect();
+
+ let self_expr = tag_exprs.remove(0);
+ let other_selflike_exprs = tag_exprs;
+ let tag_field = FieldInfo { span, name: None, self_expr, other_selflike_exprs };
+
+ let tag_let_stmts: Vec<_> = iter::zip(&tag_idents, &selflike_args)
+ .map(|(&ident, selflike_arg)| {
+ let variant_value = deriving::call_intrinsic(
+ cx,
+ span,
+ sym::discriminant_value,
+ vec![selflike_arg.clone()],
+ );
+ cx.stmt_let(span, false, ident, variant_value)
+ })
+ .collect();
+
+ (tag_field, tag_let_stmts)
+ };
+
+ // There are some special cases involving fieldless enums where no
+ // match is necessary.
+ let all_fieldless = variants.iter().all(|v| v.data.fields().is_empty());
+ if all_fieldless {
+ if uses_tags && variants.len() > 1 {
+ // If the type is fieldless and the trait uses the tag and
+ // there are multiple variants, we need just an operation on
+ // the tag(s).
+ let (tag_field, mut tag_let_stmts) = get_tag_pieces(cx);
+ let mut tag_check = self.call_substructure_method(
+ cx,
+ trait_,
+ type_ident,
+ nonselflike_args,
+ &EnumTag(tag_field, None),
+ );
+ tag_let_stmts.append(&mut tag_check.0);
+ return BlockOrExpr(tag_let_stmts, tag_check.1);
+ }
+
+ if variants.len() == 1 {
+ // If there is a single variant, we don't need an operation on
+ // the tag(s). Just use the most degenerate result.
+ return self.call_substructure_method(
+ cx,
+ trait_,
+ type_ident,
+ nonselflike_args,
+ &EnumMatching(0, 1, &variants[0], Vec::new()),
+ );
+ };
+ }
+
+ // These arms are of the form:
+ // (Variant1, Variant1, ...) => Body1
+ // (Variant2, Variant2, ...) => Body2
+ // ...
+ // where each tuple has length = selflike_args.len()
+ let mut match_arms: Vec<ast::Arm> = variants
+ .iter()
+ .enumerate()
+ .filter(|&(_, v)| !(self.unify_fieldless_variants && v.data.fields().is_empty()))
+ .map(|(index, variant)| {
+ // A single arm has form (&VariantK, &VariantK, ...) => BodyK
+ // (see "Final wrinkle" note below for why.)
+
+ let addr_of = false; // because enums can't be repr(packed)
+ let fields =
+ trait_.create_struct_pattern_fields(cx, &variant.data, &prefixes, addr_of);
+
+ let sp = variant.span.with_ctxt(trait_.span.ctxt());
+ let variant_path = cx.path(sp, vec![type_ident, variant.ident]);
+ let use_ref_pat = false; // because enums can't be repr(packed)
+ let mut subpats: Vec<_> = trait_.create_struct_patterns(
+ cx,
+ variant_path,
+ &variant.data,
+ &prefixes,
+ use_ref_pat,
+ );
+
+ // `(VariantK, VariantK, ...)` or just `VariantK`.
+ let single_pat = if subpats.len() == 1 {
+ subpats.pop().unwrap()
+ } else {
+ cx.pat_tuple(span, subpats)
+ };
+
+ // For the BodyK, we need to delegate to our caller,
+ // passing it an EnumMatching to indicate which case
+ // we are in.
+ //
+ // Now, for some given VariantK, we have built up
+ // expressions for referencing every field of every
+ // Self arg, assuming all are instances of VariantK.
+ // Build up code associated with such a case.
+ let substructure = EnumMatching(index, variants.len(), variant, fields);
+ let arm_expr = self
+ .call_substructure_method(
+ cx,
+ trait_,
+ type_ident,
+ nonselflike_args,
+ &substructure,
+ )
+ .into_expr(cx, span);
+
+ cx.arm(span, single_pat, arm_expr)
+ })
+ .collect();
+
+ // Add a default arm to the match, if necessary.
+ let first_fieldless = variants.iter().find(|v| v.data.fields().is_empty());
+ let default = match first_fieldless {
+ Some(v) if self.unify_fieldless_variants => {
+ // We need a default case that handles all the fieldless
+ // variants. The index and actual variant aren't meaningful in
+ // this case, so just use dummy values.
+ Some(
+ self.call_substructure_method(
+ cx,
+ trait_,
+ type_ident,
+ nonselflike_args,
+ &EnumMatching(0, variants.len(), v, Vec::new()),
+ )
+ .into_expr(cx, span),
+ )
+ }
+ _ if variants.len() > 1 && selflike_args.len() > 1 => {
+ // Because we know that all the arguments will match if we reach
+ // the match expression we add the unreachable intrinsics as the
+ // result of the default which should help llvm in optimizing it.
+ Some(deriving::call_unreachable(cx, span))
+ }
+ _ => None,
+ };
+ if let Some(arm) = default {
+ match_arms.push(cx.arm(span, cx.pat_wild(span), arm));
+ }
+
+ // Create a match expression with one arm per discriminant plus
+ // possibly a default arm, e.g.:
+ // match (self, other) {
+ // (Variant1, Variant1, ...) => Body1
+ // (Variant2, Variant2, ...) => Body2,
+ // ...
+ // _ => ::core::intrinsics::unreachable()
+ // }
+ let get_match_expr = |mut selflike_args: Vec<P<Expr>>| {
+ let match_arg = if selflike_args.len() == 1 {
+ selflike_args.pop().unwrap()
+ } else {
+ cx.expr(span, ast::ExprKind::Tup(selflike_args))
+ };
+ cx.expr_match(span, match_arg, match_arms)
+ };
+
+ // If the trait uses the tag and there are multiple variants, we need
+ // to add a tag check operation before the match. Otherwise, the match
+ // is enough.
+ if uses_tags && variants.len() > 1 {
+ let (tag_field, mut tag_let_stmts) = get_tag_pieces(cx);
+
+ // Combine a tag check with the match.
+ let mut tag_check_plus_match = self.call_substructure_method(
+ cx,
+ trait_,
+ type_ident,
+ nonselflike_args,
+ &EnumTag(tag_field, Some(get_match_expr(selflike_args))),
+ );
+ tag_let_stmts.append(&mut tag_check_plus_match.0);
+ BlockOrExpr(tag_let_stmts, tag_check_plus_match.1)
+ } else {
+ BlockOrExpr(vec![], Some(get_match_expr(selflike_args)))
+ }
+ }
+
+ fn expand_static_enum_method_body(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ trait_: &TraitDef<'_>,
+ enum_def: &EnumDef,
+ type_ident: Ident,
+ nonselflike_args: &[P<Expr>],
+ ) -> BlockOrExpr {
+ let summary = enum_def
+ .variants
+ .iter()
+ .map(|v| {
+ let sp = v.span.with_ctxt(trait_.span.ctxt());
+ let summary = trait_.summarise_struct(cx, &v.data);
+ (v.ident, sp, summary)
+ })
+ .collect();
+ self.call_substructure_method(
+ cx,
+ trait_,
+ type_ident,
+ nonselflike_args,
+ &StaticEnum(enum_def, summary),
+ )
+ }
+}
+
+// general helper methods.
+impl<'a> TraitDef<'a> {
+ fn summarise_struct(&self, cx: &mut ExtCtxt<'_>, struct_def: &VariantData) -> StaticFields {
+ let mut named_idents = Vec::new();
+ let mut just_spans = Vec::new();
+ for field in struct_def.fields() {
+ let sp = field.span.with_ctxt(self.span.ctxt());
+ match field.ident {
+ Some(ident) => named_idents.push((ident, sp)),
+ _ => just_spans.push(sp),
+ }
+ }
+
+ let is_tuple = matches!(struct_def, ast::VariantData::Tuple(..));
+ match (just_spans.is_empty(), named_idents.is_empty()) {
+ (false, false) => {
+ cx.span_bug(self.span, "a struct with named and unnamed fields in generic `derive`")
+ }
+ // named fields
+ (_, false) => Named(named_idents),
+ // unnamed fields
+ (false, _) => Unnamed(just_spans, is_tuple),
+ // empty
+ _ => Named(Vec::new()),
+ }
+ }
+
+ fn create_struct_patterns(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ struct_path: ast::Path,
+ struct_def: &'a VariantData,
+ prefixes: &[String],
+ use_ref_pat: bool,
+ ) -> Vec<P<ast::Pat>> {
+ prefixes
+ .iter()
+ .map(|prefix| {
+ let pieces_iter =
+ struct_def.fields().iter().enumerate().map(|(i, struct_field)| {
+ let sp = struct_field.span.with_ctxt(self.span.ctxt());
+ let binding_mode = if use_ref_pat {
+ ast::BindingMode::ByRef(ast::Mutability::Not)
+ } else {
+ ast::BindingMode::ByValue(ast::Mutability::Not)
+ };
+ let ident = self.mk_pattern_ident(prefix, i);
+ let path = ident.with_span_pos(sp);
+ (
+ sp,
+ struct_field.ident,
+ cx.pat(path.span, PatKind::Ident(binding_mode, path, None)),
+ )
+ });
+
+ let struct_path = struct_path.clone();
+ match *struct_def {
+ VariantData::Struct(..) => {
+ let field_pats = pieces_iter
+ .map(|(sp, ident, pat)| {
+ if ident.is_none() {
+ cx.span_bug(
+ sp,
+ "a braced struct with unnamed fields in `derive`",
+ );
+ }
+ ast::PatField {
+ ident: ident.unwrap(),
+ is_shorthand: false,
+ attrs: ast::AttrVec::new(),
+ id: ast::DUMMY_NODE_ID,
+ span: pat.span.with_ctxt(self.span.ctxt()),
+ pat,
+ is_placeholder: false,
+ }
+ })
+ .collect();
+ cx.pat_struct(self.span, struct_path, field_pats)
+ }
+ VariantData::Tuple(..) => {
+ let subpats = pieces_iter.map(|(_, _, subpat)| subpat).collect();
+ cx.pat_tuple_struct(self.span, struct_path, subpats)
+ }
+ VariantData::Unit(..) => cx.pat_path(self.span, struct_path),
+ }
+ })
+ .collect()
+ }
+
+ fn create_fields<F>(&self, struct_def: &'a VariantData, mk_exprs: F) -> Vec<FieldInfo>
+ where
+ F: Fn(usize, &ast::FieldDef, Span) -> Vec<P<ast::Expr>>,
+ {
+ struct_def
+ .fields()
+ .iter()
+ .enumerate()
+ .map(|(i, struct_field)| {
+ // For this field, get an expr for each selflike_arg. E.g. for
+ // `PartialEq::eq`, one for each of `&self` and `other`.
+ let sp = struct_field.span.with_ctxt(self.span.ctxt());
+ let mut exprs: Vec<_> = mk_exprs(i, struct_field, sp);
+ let self_expr = exprs.remove(0);
+ let other_selflike_exprs = exprs;
+ FieldInfo {
+ span: sp.with_ctxt(self.span.ctxt()),
+ name: struct_field.ident,
+ self_expr,
+ other_selflike_exprs,
+ }
+ })
+ .collect()
+ }
+
+ fn mk_pattern_ident(&self, prefix: &str, i: usize) -> Ident {
+ Ident::from_str_and_span(&format!("{}_{}", prefix, i), self.span)
+ }
+
+ fn create_struct_pattern_fields(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ struct_def: &'a VariantData,
+ prefixes: &[String],
+ addr_of: bool,
+ ) -> Vec<FieldInfo> {
+ self.create_fields(struct_def, |i, _struct_field, sp| {
+ prefixes
+ .iter()
+ .map(|prefix| {
+ let ident = self.mk_pattern_ident(prefix, i);
+ let expr = cx.expr_path(cx.path_ident(sp, ident));
+ if addr_of { cx.expr_addr_of(sp, expr) } else { expr }
+ })
+ .collect()
+ })
+ }
+
+ fn create_struct_field_access_fields(
+ &self,
+ cx: &mut ExtCtxt<'_>,
+ selflike_args: &[P<Expr>],
+ struct_def: &'a VariantData,
+ copy: bool,
+ ) -> Vec<FieldInfo> {
+ self.create_fields(struct_def, |i, struct_field, sp| {
+ selflike_args
+ .iter()
+ .map(|selflike_arg| {
+ // Note: we must use `struct_field.span` rather than `sp` in the
+ // `unwrap_or_else` case otherwise the hygiene is wrong and we get
+ // "field `0` of struct `Point` is private" errors on tuple
+ // structs.
+ let mut field_expr = cx.expr(
+ sp,
+ ast::ExprKind::Field(
+ selflike_arg.clone(),
+ struct_field.ident.unwrap_or_else(|| {
+ Ident::from_str_and_span(&i.to_string(), struct_field.span)
+ }),
+ ),
+ );
+ if copy {
+ field_expr = cx.expr_block(
+ cx.block(struct_field.span, vec![cx.stmt_expr(field_expr)]),
+ );
+ }
+ cx.expr_addr_of(sp, field_expr)
+ })
+ .collect()
+ })
+ }
+}
+
+/// The function passed to `cs_fold` is called repeatedly with a value of this
+/// type. It describes one part of the code generation. The result is always an
+/// expression.
+pub enum CsFold<'a> {
+ /// The basic case: a field expression for one or more selflike args. E.g.
+ /// for `PartialEq::eq` this is something like `self.x == other.x`.
+ Single(&'a FieldInfo),
+
+ /// The combination of two field expressions. E.g. for `PartialEq::eq` this
+ /// is something like `<field1 equality> && <field2 equality>`.
+ Combine(Span, P<Expr>, P<Expr>),
+
+ // The fallback case for a struct or enum variant with no fields.
+ Fieldless,
+}
+
+/// Folds over fields, combining the expressions for each field in a sequence.
+/// Statics may not be folded over.
+pub fn cs_fold<F>(
+ use_foldl: bool,
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ substructure: &Substructure<'_>,
+ mut f: F,
+) -> P<Expr>
+where
+ F: FnMut(&mut ExtCtxt<'_>, CsFold<'_>) -> P<Expr>,
+{
+ match substructure.fields {
+ EnumMatching(.., all_fields) | Struct(_, all_fields) => {
+ if all_fields.is_empty() {
+ return f(cx, CsFold::Fieldless);
+ }
+
+ let (base_field, rest) = if use_foldl {
+ all_fields.split_first().unwrap()
+ } else {
+ all_fields.split_last().unwrap()
+ };
+
+ let base_expr = f(cx, CsFold::Single(base_field));
+
+ let op = |old, field: &FieldInfo| {
+ let new = f(cx, CsFold::Single(field));
+ f(cx, CsFold::Combine(field.span, old, new))
+ };
+
+ if use_foldl {
+ rest.iter().fold(base_expr, op)
+ } else {
+ rest.iter().rfold(base_expr, op)
+ }
+ }
+ EnumTag(tag_field, match_expr) => {
+ let tag_check_expr = f(cx, CsFold::Single(tag_field));
+ if let Some(match_expr) = match_expr {
+ if use_foldl {
+ f(cx, CsFold::Combine(trait_span, tag_check_expr, match_expr.clone()))
+ } else {
+ f(cx, CsFold::Combine(trait_span, match_expr.clone(), tag_check_expr))
+ }
+ } else {
+ tag_check_expr
+ }
+ }
+ StaticEnum(..) | StaticStruct(..) => cx.span_bug(trait_span, "static function in `derive`"),
+ }
+}
+
+/// Returns `true` if the type has no value fields
+/// (for an enum, no variant has any fields)
+pub fn is_type_without_fields(item: &Annotatable) -> bool {
+ if let Annotatable::Item(ref item) = *item {
+ match item.kind {
+ ast::ItemKind::Enum(ref enum_def, _) => {
+ enum_def.variants.iter().all(|v| v.data.fields().is_empty())
+ }
+ ast::ItemKind::Struct(ref variant_data, _) => variant_data.fields().is_empty(),
+ _ => false,
+ }
+ } else {
+ false
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs b/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs
new file mode 100644
index 000000000..4d46f7cd4
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/generic/ty.rs
@@ -0,0 +1,203 @@
+//! A mini version of ast::Ty, which is easier to use, and features an explicit `Self` type to use
+//! when specifying impls to be derived.
+
+pub use Ty::*;
+
+use rustc_ast::ptr::P;
+use rustc_ast::{self as ast, Expr, GenericArg, GenericParamKind, Generics, SelfKind};
+use rustc_expand::base::ExtCtxt;
+use rustc_span::source_map::{respan, DUMMY_SP};
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_span::Span;
+
+/// A path, e.g., `::std::option::Option::<i32>` (global). Has support
+/// for type parameters.
+#[derive(Clone)]
+pub struct Path {
+ path: Vec<Symbol>,
+ params: Vec<Box<Ty>>,
+ kind: PathKind,
+}
+
+#[derive(Clone)]
+pub enum PathKind {
+ Local,
+ Global,
+ Std,
+}
+
+impl Path {
+ pub fn new(path: Vec<Symbol>) -> Path {
+ Path::new_(path, Vec::new(), PathKind::Std)
+ }
+ pub fn new_local(path: Symbol) -> Path {
+ Path::new_(vec![path], Vec::new(), PathKind::Local)
+ }
+ pub fn new_(path: Vec<Symbol>, params: Vec<Box<Ty>>, kind: PathKind) -> Path {
+ Path { path, params, kind }
+ }
+
+ pub fn to_ty(
+ &self,
+ cx: &ExtCtxt<'_>,
+ span: Span,
+ self_ty: Ident,
+ self_generics: &Generics,
+ ) -> P<ast::Ty> {
+ cx.ty_path(self.to_path(cx, span, self_ty, self_generics))
+ }
+ pub fn to_path(
+ &self,
+ cx: &ExtCtxt<'_>,
+ span: Span,
+ self_ty: Ident,
+ self_generics: &Generics,
+ ) -> ast::Path {
+ let mut idents = self.path.iter().map(|s| Ident::new(*s, span)).collect();
+ let tys = self.params.iter().map(|t| t.to_ty(cx, span, self_ty, self_generics));
+ let params = tys.map(GenericArg::Type).collect();
+
+ match self.kind {
+ PathKind::Global => cx.path_all(span, true, idents, params),
+ PathKind::Local => cx.path_all(span, false, idents, params),
+ PathKind::Std => {
+ let def_site = cx.with_def_site_ctxt(DUMMY_SP);
+ idents.insert(0, Ident::new(kw::DollarCrate, def_site));
+ cx.path_all(span, false, idents, params)
+ }
+ }
+ }
+}
+
+/// A type. Supports pointers, Self, and literals.
+#[derive(Clone)]
+pub enum Ty {
+ Self_,
+ /// A reference.
+ Ref(Box<Ty>, ast::Mutability),
+ /// `mod::mod::Type<[lifetime], [Params...]>`, including a plain type
+ /// parameter, and things like `i32`
+ Path(Path),
+ /// For () return types.
+ Unit,
+}
+
+pub fn self_ref() -> Ty {
+ Ref(Box::new(Self_), ast::Mutability::Not)
+}
+
+impl Ty {
+ pub fn to_ty(
+ &self,
+ cx: &ExtCtxt<'_>,
+ span: Span,
+ self_ty: Ident,
+ self_generics: &Generics,
+ ) -> P<ast::Ty> {
+ match self {
+ Ref(ty, mutbl) => {
+ let raw_ty = ty.to_ty(cx, span, self_ty, self_generics);
+ cx.ty_rptr(span, raw_ty, None, *mutbl)
+ }
+ Path(p) => p.to_ty(cx, span, self_ty, self_generics),
+ Self_ => cx.ty_path(self.to_path(cx, span, self_ty, self_generics)),
+ Unit => {
+ let ty = ast::TyKind::Tup(vec![]);
+ cx.ty(span, ty)
+ }
+ }
+ }
+
+ pub fn to_path(
+ &self,
+ cx: &ExtCtxt<'_>,
+ span: Span,
+ self_ty: Ident,
+ generics: &Generics,
+ ) -> ast::Path {
+ match *self {
+ Self_ => {
+ let params: Vec<_> = generics
+ .params
+ .iter()
+ .map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ GenericArg::Lifetime(ast::Lifetime { id: param.id, ident: param.ident })
+ }
+ GenericParamKind::Type { .. } => {
+ GenericArg::Type(cx.ty_ident(span, param.ident))
+ }
+ GenericParamKind::Const { .. } => {
+ GenericArg::Const(cx.const_ident(span, param.ident))
+ }
+ })
+ .collect();
+
+ cx.path_all(span, false, vec![self_ty], params)
+ }
+ Path(ref p) => p.to_path(cx, span, self_ty, generics),
+ Ref(..) => cx.span_bug(span, "ref in a path in generic `derive`"),
+ Unit => cx.span_bug(span, "unit in a path in generic `derive`"),
+ }
+ }
+}
+
+fn mk_ty_param(
+ cx: &ExtCtxt<'_>,
+ span: Span,
+ name: Symbol,
+ attrs: &[ast::Attribute],
+ bounds: &[Path],
+ self_ident: Ident,
+ self_generics: &Generics,
+) -> ast::GenericParam {
+ let bounds = bounds
+ .iter()
+ .map(|b| {
+ let path = b.to_path(cx, span, self_ident, self_generics);
+ cx.trait_bound(path)
+ })
+ .collect();
+ cx.typaram(span, Ident::new(name, span), attrs.to_owned(), bounds, None)
+}
+
+/// Bounds on type parameters.
+#[derive(Clone)]
+pub struct Bounds {
+ pub bounds: Vec<(Symbol, Vec<Path>)>,
+}
+
+impl Bounds {
+ pub fn empty() -> Bounds {
+ Bounds { bounds: Vec::new() }
+ }
+ pub fn to_generics(
+ &self,
+ cx: &ExtCtxt<'_>,
+ span: Span,
+ self_ty: Ident,
+ self_generics: &Generics,
+ ) -> Generics {
+ let params = self
+ .bounds
+ .iter()
+ .map(|t| {
+ let (name, ref bounds) = *t;
+ mk_ty_param(cx, span, name, &[], &bounds, self_ty, self_generics)
+ })
+ .collect();
+
+ Generics {
+ params,
+ where_clause: ast::WhereClause { has_where_token: false, predicates: Vec::new(), span },
+ span,
+ }
+ }
+}
+
+pub fn get_explicit_self(cx: &ExtCtxt<'_>, span: Span) -> (P<Expr>, ast::ExplicitSelf) {
+ // This constructs a fresh `self` path.
+ let self_path = cx.expr_self(span);
+ let self_ty = respan(span, SelfKind::Region(None, ast::Mutability::Not));
+ (self_path, self_ty)
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/hash.rs b/compiler/rustc_builtin_macros/src/deriving/hash.rs
new file mode 100644
index 000000000..32ae3d344
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/hash.rs
@@ -0,0 +1,80 @@
+use crate::deriving::generic::ty::*;
+use crate::deriving::generic::*;
+use crate::deriving::{path_std, pathvec_std};
+
+use rustc_ast::{MetaItem, Mutability};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+pub fn expand_deriving_hash(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ mitem: &MetaItem,
+ item: &Annotatable,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ let path = Path::new_(pathvec_std!(hash::Hash), vec![], PathKind::Std);
+
+ let typaram = sym::__H;
+
+ let arg = Path::new_local(typaram);
+ let hash_trait_def = TraitDef {
+ span,
+ attributes: Vec::new(),
+ path,
+ additional_bounds: Vec::new(),
+ generics: Bounds::empty(),
+ supports_unions: false,
+ methods: vec![MethodDef {
+ name: sym::hash,
+ generics: Bounds { bounds: vec![(typaram, vec![path_std!(hash::Hasher)])] },
+ explicit_self: true,
+ nonself_args: vec![(Ref(Box::new(Path(arg)), Mutability::Mut), sym::state)],
+ ret_ty: Unit,
+ attributes: vec![],
+ unify_fieldless_variants: true,
+ combine_substructure: combine_substructure(Box::new(|a, b, c| {
+ hash_substructure(a, b, c)
+ })),
+ }],
+ associated_types: Vec::new(),
+ };
+
+ hash_trait_def.expand(cx, mitem, item, push);
+}
+
+fn hash_substructure(
+ cx: &mut ExtCtxt<'_>,
+ trait_span: Span,
+ substr: &Substructure<'_>,
+) -> BlockOrExpr {
+ let [state_expr] = substr.nonselflike_args else {
+ cx.span_bug(trait_span, "incorrect number of arguments in `derive(Hash)`");
+ };
+ let call_hash = |span, expr| {
+ let hash_path = {
+ let strs = cx.std_path(&[sym::hash, sym::Hash, sym::hash]);
+
+ cx.expr_path(cx.path_global(span, strs))
+ };
+ let expr = cx.expr_call(span, hash_path, vec![expr, state_expr.clone()]);
+ cx.stmt_expr(expr)
+ };
+
+ let (stmts, match_expr) = match substr.fields {
+ Struct(_, fields) | EnumMatching(.., fields) => {
+ let stmts =
+ fields.iter().map(|field| call_hash(field.span, field.self_expr.clone())).collect();
+ (stmts, None)
+ }
+ EnumTag(tag_field, match_expr) => {
+ assert!(tag_field.other_selflike_exprs.is_empty());
+ let stmts = vec![call_hash(tag_field.span, tag_field.self_expr.clone())];
+ (stmts, match_expr.clone())
+ }
+ _ => cx.span_bug(trait_span, "impossible substructure in `derive(Hash)`"),
+ };
+
+ BlockOrExpr::new_mixed(stmts, match_expr)
+}
diff --git a/compiler/rustc_builtin_macros/src/deriving/mod.rs b/compiler/rustc_builtin_macros/src/deriving/mod.rs
new file mode 100644
index 000000000..c1ca089da
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/deriving/mod.rs
@@ -0,0 +1,208 @@
+//! The compiler code necessary to implement the `#[derive]` extensions.
+
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_ast::{GenericArg, Impl, ItemKind, MetaItem};
+use rustc_expand::base::{Annotatable, ExpandResult, ExtCtxt, MultiItemModifier};
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::Span;
+
+macro path_local($x:ident) {
+ generic::ty::Path::new_local(sym::$x)
+}
+
+macro pathvec_std($($rest:ident)::+) {{
+ vec![ $( sym::$rest ),+ ]
+}}
+
+macro path_std($($x:tt)*) {
+ generic::ty::Path::new( pathvec_std!( $($x)* ) )
+}
+
+pub mod bounds;
+pub mod clone;
+pub mod debug;
+pub mod decodable;
+pub mod default;
+pub mod encodable;
+pub mod hash;
+
+#[path = "cmp/eq.rs"]
+pub mod eq;
+#[path = "cmp/ord.rs"]
+pub mod ord;
+#[path = "cmp/partial_eq.rs"]
+pub mod partial_eq;
+#[path = "cmp/partial_ord.rs"]
+pub mod partial_ord;
+
+pub mod generic;
+
+pub(crate) struct BuiltinDerive(
+ pub(crate) fn(&mut ExtCtxt<'_>, Span, &MetaItem, &Annotatable, &mut dyn FnMut(Annotatable)),
+);
+
+impl MultiItemModifier for BuiltinDerive {
+ fn expand(
+ &self,
+ ecx: &mut ExtCtxt<'_>,
+ span: Span,
+ meta_item: &MetaItem,
+ item: Annotatable,
+ ) -> ExpandResult<Vec<Annotatable>, Annotatable> {
+ // FIXME: Built-in derives often forget to give spans contexts,
+ // so we are doing it here in a centralized way.
+ let span = ecx.with_def_site_ctxt(span);
+ let mut items = Vec::new();
+ match item {
+ Annotatable::Stmt(stmt) => {
+ if let ast::StmtKind::Item(item) = stmt.into_inner().kind {
+ (self.0)(ecx, span, meta_item, &Annotatable::Item(item), &mut |a| {
+ // Cannot use 'ecx.stmt_item' here, because we need to pass 'ecx'
+ // to the function
+ items.push(Annotatable::Stmt(P(ast::Stmt {
+ id: ast::DUMMY_NODE_ID,
+ kind: ast::StmtKind::Item(a.expect_item()),
+ span,
+ })));
+ });
+ } else {
+ unreachable!("should have already errored on non-item statement")
+ }
+ }
+ _ => {
+ (self.0)(ecx, span, meta_item, &item, &mut |a| items.push(a));
+ }
+ }
+ ExpandResult::Ready(items)
+ }
+}
+
+/// Constructs an expression that calls an intrinsic
+fn call_intrinsic(
+ cx: &ExtCtxt<'_>,
+ span: Span,
+ intrinsic: Symbol,
+ args: Vec<P<ast::Expr>>,
+) -> P<ast::Expr> {
+ let span = cx.with_def_site_ctxt(span);
+ let path = cx.std_path(&[sym::intrinsics, intrinsic]);
+ cx.expr_call_global(span, path, args)
+}
+
+/// Constructs an expression that calls the `unreachable` intrinsic.
+fn call_unreachable(cx: &ExtCtxt<'_>, span: Span) -> P<ast::Expr> {
+ let span = cx.with_def_site_ctxt(span);
+ let path = cx.std_path(&[sym::intrinsics, sym::unreachable]);
+ let call = cx.expr_call_global(span, path, vec![]);
+
+ cx.expr_block(P(ast::Block {
+ stmts: vec![cx.stmt_expr(call)],
+ id: ast::DUMMY_NODE_ID,
+ rules: ast::BlockCheckMode::Unsafe(ast::CompilerGenerated),
+ span,
+ tokens: None,
+ could_be_bare_literal: false,
+ }))
+}
+
+// Injects `impl<...> Structural for ItemType<...> { }`. In particular,
+// does *not* add `where T: Structural` for parameters `T` in `...`.
+// (That's the main reason we cannot use TraitDef here.)
+fn inject_impl_of_structural_trait(
+ cx: &mut ExtCtxt<'_>,
+ span: Span,
+ item: &Annotatable,
+ structural_path: generic::ty::Path,
+ push: &mut dyn FnMut(Annotatable),
+) {
+ let Annotatable::Item(ref item) = *item else {
+ unreachable!();
+ };
+
+ let generics = match item.kind {
+ ItemKind::Struct(_, ref generics) | ItemKind::Enum(_, ref generics) => generics,
+ // Do not inject `impl Structural for Union`. (`PartialEq` does not
+ // support unions, so we will see error downstream.)
+ ItemKind::Union(..) => return,
+ _ => unreachable!(),
+ };
+
+ // Create generics param list for where clauses and impl headers
+ let mut generics = generics.clone();
+
+ // Create the type of `self`.
+ //
+ // in addition, remove defaults from generic params (impls cannot have them).
+ let self_params: Vec<_> = generics
+ .params
+ .iter_mut()
+ .map(|param| match &mut param.kind {
+ ast::GenericParamKind::Lifetime => {
+ ast::GenericArg::Lifetime(cx.lifetime(span, param.ident))
+ }
+ ast::GenericParamKind::Type { default } => {
+ *default = None;
+ ast::GenericArg::Type(cx.ty_ident(span, param.ident))
+ }
+ ast::GenericParamKind::Const { ty: _, kw_span: _, default } => {
+ *default = None;
+ ast::GenericArg::Const(cx.const_ident(span, param.ident))
+ }
+ })
+ .collect();
+
+ let type_ident = item.ident;
+
+ let trait_ref = cx.trait_ref(structural_path.to_path(cx, span, type_ident, &generics));
+ let self_type = cx.ty_path(cx.path_all(span, false, vec![type_ident], self_params));
+
+ // It would be nice to also encode constraint `where Self: Eq` (by adding it
+ // onto `generics` cloned above). Unfortunately, that strategy runs afoul of
+ // rust-lang/rust#48214. So we perform that additional check in the compiler
+ // itself, instead of encoding it here.
+
+ // Keep the lint and stability attributes of the original item, to control
+ // how the generated implementation is linted.
+ let mut attrs = Vec::new();
+ attrs.extend(
+ item.attrs
+ .iter()
+ .filter(|a| {
+ [sym::allow, sym::warn, sym::deny, sym::forbid, sym::stable, sym::unstable]
+ .contains(&a.name_or_empty())
+ })
+ .cloned(),
+ );
+
+ let newitem = cx.item(
+ span,
+ Ident::empty(),
+ attrs,
+ ItemKind::Impl(Box::new(Impl {
+ unsafety: ast::Unsafe::No,
+ polarity: ast::ImplPolarity::Positive,
+ defaultness: ast::Defaultness::Final,
+ constness: ast::Const::No,
+ generics,
+ of_trait: Some(trait_ref),
+ self_ty: self_type,
+ items: Vec::new(),
+ })),
+ );
+
+ push(Annotatable::Item(newitem));
+}
+
+fn assert_ty_bounds(
+ cx: &mut ExtCtxt<'_>,
+ stmts: &mut Vec<ast::Stmt>,
+ ty: P<ast::Ty>,
+ span: Span,
+ assert_path: &[Symbol],
+) {
+ // Generate statement `let _: assert_path<ty>;`.
+ let span = cx.with_def_site_ctxt(span);
+ let assert_path = cx.path_all(span, true, cx.std_path(assert_path), vec![GenericArg::Type(ty)]);
+ stmts.push(cx.stmt_let_type_only(span, cx.ty_path(assert_path)));
+}
diff --git a/compiler/rustc_builtin_macros/src/edition_panic.rs b/compiler/rustc_builtin_macros/src/edition_panic.rs
new file mode 100644
index 000000000..ea0e768a5
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/edition_panic.rs
@@ -0,0 +1,86 @@
+use rustc_ast::ptr::P;
+use rustc_ast::tokenstream::{DelimSpan, TokenStream};
+use rustc_ast::*;
+use rustc_expand::base::*;
+use rustc_span::edition::Edition;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+// This expands to either
+// - `$crate::panic::panic_2015!(...)` or
+// - `$crate::panic::panic_2021!(...)`
+// depending on the edition.
+//
+// This is used for both std::panic!() and core::panic!().
+//
+// `$crate` will refer to either the `std` or `core` crate depending on which
+// one we're expanding from.
+pub fn expand_panic<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn MacResult + 'cx> {
+ let mac = if use_panic_2021(sp) { sym::panic_2021 } else { sym::panic_2015 };
+ expand(mac, cx, sp, tts)
+}
+
+// This expands to either
+// - `$crate::panic::unreachable_2015!(...)` or
+// - `$crate::panic::unreachable_2021!(...)`
+// depending on the edition.
+pub fn expand_unreachable<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn MacResult + 'cx> {
+ let mac = if use_panic_2021(sp) { sym::unreachable_2021 } else { sym::unreachable_2015 };
+ expand(mac, cx, sp, tts)
+}
+
+fn expand<'cx>(
+ mac: rustc_span::Symbol,
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn MacResult + 'cx> {
+ let sp = cx.with_call_site_ctxt(sp);
+
+ MacEager::expr(
+ cx.expr(
+ sp,
+ ExprKind::MacCall(MacCall {
+ path: Path {
+ span: sp,
+ segments: cx
+ .std_path(&[sym::panic, mac])
+ .into_iter()
+ .map(|ident| PathSegment::from_ident(ident))
+ .collect(),
+ tokens: None,
+ },
+ args: P(MacArgs::Delimited(
+ DelimSpan::from_single(sp),
+ MacDelimiter::Parenthesis,
+ tts,
+ )),
+ prior_type_ascription: None,
+ }),
+ ),
+ )
+}
+
+pub fn use_panic_2021(mut span: Span) -> bool {
+ // To determine the edition, we check the first span up the expansion
+ // stack that does not have #[allow_internal_unstable(edition_panic)].
+ // (To avoid using the edition of e.g. the assert!() or debug_assert!() definition.)
+ loop {
+ let expn = span.ctxt().outer_expn_data();
+ if let Some(features) = expn.allow_internal_unstable {
+ if features.iter().any(|&f| f == sym::edition_panic) {
+ span = expn.call_site;
+ continue;
+ }
+ }
+ break expn.edition >= Edition::Edition2021;
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/env.rs b/compiler/rustc_builtin_macros/src/env.rs
new file mode 100644
index 000000000..b8828fa67
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/env.rs
@@ -0,0 +1,91 @@
+// The compiler code necessary to support the env! extension. Eventually this
+// should all get sucked into either the compiler syntax extension plugin
+// interface.
+//
+
+use rustc_ast::tokenstream::TokenStream;
+use rustc_ast::{self as ast, GenericArg};
+use rustc_expand::base::{self, *};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::Span;
+
+use std::env;
+
+pub fn expand_option_env<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ let Some(var) = get_single_str_from_tts(cx, sp, tts, "option_env!") else {
+ return DummyResult::any(sp);
+ };
+
+ let sp = cx.with_def_site_ctxt(sp);
+ let value = env::var(var.as_str()).ok().as_deref().map(Symbol::intern);
+ cx.sess.parse_sess.env_depinfo.borrow_mut().insert((var, value));
+ let e = match value {
+ None => {
+ let lt = cx.lifetime(sp, Ident::new(kw::StaticLifetime, sp));
+ cx.expr_path(cx.path_all(
+ sp,
+ true,
+ cx.std_path(&[sym::option, sym::Option, sym::None]),
+ vec![GenericArg::Type(cx.ty_rptr(
+ sp,
+ cx.ty_ident(sp, Ident::new(sym::str, sp)),
+ Some(lt),
+ ast::Mutability::Not,
+ ))],
+ ))
+ }
+ Some(value) => cx.expr_call_global(
+ sp,
+ cx.std_path(&[sym::option, sym::Option, sym::Some]),
+ vec![cx.expr_str(sp, value)],
+ ),
+ };
+ MacEager::expr(e)
+}
+
+pub fn expand_env<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ let mut exprs = match get_exprs_from_tts(cx, sp, tts) {
+ Some(ref exprs) if exprs.is_empty() => {
+ cx.span_err(sp, "env! takes 1 or 2 arguments");
+ return DummyResult::any(sp);
+ }
+ None => return DummyResult::any(sp),
+ Some(exprs) => exprs.into_iter(),
+ };
+
+ let Some((var, _style)) = expr_to_string(cx, exprs.next().unwrap(), "expected string literal") else {
+ return DummyResult::any(sp);
+ };
+ let msg = match exprs.next() {
+ None => Symbol::intern(&format!("environment variable `{}` not defined", var)),
+ Some(second) => match expr_to_string(cx, second, "expected string literal") {
+ None => return DummyResult::any(sp),
+ Some((s, _style)) => s,
+ },
+ };
+
+ if exprs.next().is_some() {
+ cx.span_err(sp, "env! takes 1 or 2 arguments");
+ return DummyResult::any(sp);
+ }
+
+ let sp = cx.with_def_site_ctxt(sp);
+ let value = env::var(var.as_str()).ok().as_deref().map(Symbol::intern);
+ cx.sess.parse_sess.env_depinfo.borrow_mut().insert((var, value));
+ let e = match value {
+ None => {
+ cx.span_err(sp, msg.as_str());
+ return DummyResult::any(sp);
+ }
+ Some(value) => cx.expr_str(sp, value),
+ };
+ MacEager::expr(e)
+}
diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs
new file mode 100644
index 000000000..9eb96ec76
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/format.rs
@@ -0,0 +1,1573 @@
+use ArgumentType::*;
+use Position::*;
+
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_ast::tokenstream::TokenStream;
+use rustc_ast::visit::{self, Visitor};
+use rustc_ast::{token, BlockCheckMode, UnsafeSource};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, Applicability, MultiSpan, PResult};
+use rustc_expand::base::{self, *};
+use rustc_parse_format as parse;
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::{BytePos, InnerSpan, Span};
+use smallvec::SmallVec;
+
+use rustc_lint_defs::builtin::NAMED_ARGUMENTS_USED_POSITIONALLY;
+use rustc_lint_defs::{BufferedEarlyLint, BuiltinLintDiagnostics, LintId};
+use rustc_parse_format::Count;
+use std::borrow::Cow;
+use std::collections::hash_map::Entry;
+
+#[derive(PartialEq)]
+enum ArgumentType {
+ Placeholder(&'static str),
+ Count,
+}
+
+enum Position {
+ Exact(usize),
+ Capture(usize),
+ Named(Symbol, InnerSpan),
+}
+
+/// Indicates how positional named argument (i.e. an named argument which is used by position
+/// instead of by name) is used in format string
+/// * `Arg` is the actual argument to print
+/// * `Width` is width format argument
+/// * `Precision` is precion format argument
+/// Example: `{Arg:Width$.Precision$}
+#[derive(Debug, Eq, PartialEq)]
+enum PositionalNamedArgType {
+ Arg,
+ Width,
+ Precision,
+}
+
+/// Contains information necessary to create a lint for a positional named argument
+#[derive(Debug)]
+struct PositionalNamedArg {
+ ty: PositionalNamedArgType,
+ /// The piece of the using this argument (multiple pieces can use the same argument)
+ cur_piece: usize,
+ /// The InnerSpan for in the string to be replaced with the named argument
+ /// This will be None when the position is implicit
+ inner_span_to_replace: Option<rustc_parse_format::InnerSpan>,
+ /// The name to use instead of the position
+ replacement: Symbol,
+ /// The span for the positional named argument (so the lint can point a message to it)
+ positional_named_arg_span: Span,
+ has_formatting: bool,
+}
+
+impl PositionalNamedArg {
+ /// Determines:
+ /// 1) span to be replaced with the name of the named argument and
+ /// 2) span to be underlined for error messages
+ fn get_positional_arg_spans(&self, cx: &Context<'_, '_>) -> (Option<Span>, Option<Span>) {
+ if let Some(inner_span) = &self.inner_span_to_replace {
+ let span =
+ cx.fmtsp.from_inner(InnerSpan { start: inner_span.start, end: inner_span.end });
+ (Some(span), Some(span))
+ } else if self.ty == PositionalNamedArgType::Arg {
+ // In the case of a named argument whose position is implicit, if the argument *has*
+ // formatting, there will not be a span to replace. Instead, we insert the name after
+ // the `{`, which will be the first character of arg_span. If the argument does *not*
+ // have formatting, there may or may not be a span to replace. This is because
+ // whitespace is allowed in arguments without formatting (such as `format!("{ }", 1);`)
+ // but is not allowed in arguments with formatting (an error will be generated in cases
+ // like `format!("{ :1.1}", 1.0f32);`.
+ // For the message span, if there is formatting, we want to use the opening `{` and the
+ // next character, which will the `:` indicating the start of formatting. If there is
+ // not any formatting, we want to underline the entire span.
+ cx.arg_spans.get(self.cur_piece).map_or((None, None), |arg_span| {
+ if self.has_formatting {
+ (
+ Some(arg_span.with_lo(arg_span.lo() + BytePos(1)).shrink_to_lo()),
+ Some(arg_span.with_hi(arg_span.lo() + BytePos(2))),
+ )
+ } else {
+ let replace_start = arg_span.lo() + BytePos(1);
+ let replace_end = arg_span.hi() - BytePos(1);
+ let to_replace = arg_span.with_lo(replace_start).with_hi(replace_end);
+ (Some(to_replace), Some(*arg_span))
+ }
+ })
+ } else {
+ (None, None)
+ }
+ }
+}
+
+/// Encapsulates all the named arguments that have been used positionally
+#[derive(Debug)]
+struct PositionalNamedArgsLint {
+ positional_named_args: Vec<PositionalNamedArg>,
+}
+
+impl PositionalNamedArgsLint {
+ /// For a given positional argument, check if the index is for a named argument.
+ ///
+ /// Since positional arguments are required to come before named arguments, if the positional
+ /// index is greater than or equal to the start of named arguments, we know it's a named
+ /// argument used positionally.
+ ///
+ /// Example:
+ /// println!("{} {} {2}", 0, a=1, b=2);
+ ///
+ /// In this case, the first piece (`{}`) would be ArgumentImplicitlyIs with an index of 0. The
+ /// total number of arguments is 3 and the number of named arguments is 2, so the start of named
+ /// arguments is index 1. Therefore, the index of 0 is okay.
+ ///
+ /// The second piece (`{}`) would be ArgumentImplicitlyIs with an index of 1, which is the start
+ /// of named arguments, and so we should add a lint to use the named argument `a`.
+ ///
+ /// The third piece (`{2}`) would be ArgumentIs with an index of 2, which is greater than the
+ /// start of named arguments, and so we should add a lint to use the named argument `b`.
+ ///
+ /// This same check also works for width and precision formatting when either or both are
+ /// CountIsParam, which contains an index into the arguments.
+ fn maybe_add_positional_named_arg(
+ &mut self,
+ current_positional_arg: usize,
+ total_args_length: usize,
+ format_argument_index: usize,
+ ty: PositionalNamedArgType,
+ cur_piece: usize,
+ inner_span_to_replace: Option<rustc_parse_format::InnerSpan>,
+ names: &FxHashMap<Symbol, (usize, Span)>,
+ has_formatting: bool,
+ ) {
+ let start_of_named_args = total_args_length - names.len();
+ if current_positional_arg >= start_of_named_args {
+ self.maybe_push(
+ format_argument_index,
+ ty,
+ cur_piece,
+ inner_span_to_replace,
+ names,
+ has_formatting,
+ )
+ }
+ }
+
+ /// Try constructing a PositionalNamedArg struct and pushing it into the vec of positional
+ /// named arguments. If a named arg associated with `format_argument_index` cannot be found,
+ /// a new item will not be added as the lint cannot be emitted in this case.
+ fn maybe_push(
+ &mut self,
+ format_argument_index: usize,
+ ty: PositionalNamedArgType,
+ cur_piece: usize,
+ inner_span_to_replace: Option<rustc_parse_format::InnerSpan>,
+ names: &FxHashMap<Symbol, (usize, Span)>,
+ has_formatting: bool,
+ ) {
+ let named_arg = names
+ .iter()
+ .find(|&(_, &(index, _))| index == format_argument_index)
+ .map(|found| found.clone());
+
+ if let Some((&replacement, &(_, positional_named_arg_span))) = named_arg {
+ // In FormatSpec, `precision_span` starts at the leading `.`, which we want to keep in
+ // the lint suggestion, so increment `start` by 1 when `PositionalArgumentType` is
+ // `Precision`.
+ let inner_span_to_replace = if ty == PositionalNamedArgType::Precision {
+ inner_span_to_replace
+ .map(|is| rustc_parse_format::InnerSpan { start: is.start + 1, end: is.end })
+ } else {
+ inner_span_to_replace
+ };
+ self.positional_named_args.push(PositionalNamedArg {
+ ty,
+ cur_piece,
+ inner_span_to_replace,
+ replacement,
+ positional_named_arg_span,
+ has_formatting,
+ });
+ }
+ }
+}
+
+struct Context<'a, 'b> {
+ ecx: &'a mut ExtCtxt<'b>,
+ /// The macro's call site. References to unstable formatting internals must
+ /// use this span to pass the stability checker.
+ macsp: Span,
+ /// The span of the format string literal.
+ fmtsp: Span,
+
+ /// List of parsed argument expressions.
+ /// Named expressions are resolved early, and are appended to the end of
+ /// argument expressions.
+ ///
+ /// Example showing the various data structures in motion:
+ ///
+ /// * Original: `"{foo:o} {:o} {foo:x} {0:x} {1:o} {:x} {1:x} {0:o}"`
+ /// * Implicit argument resolution: `"{foo:o} {0:o} {foo:x} {0:x} {1:o} {1:x} {1:x} {0:o}"`
+ /// * Name resolution: `"{2:o} {0:o} {2:x} {0:x} {1:o} {1:x} {1:x} {0:o}"`
+ /// * `arg_types` (in JSON): `[[0, 1, 0], [0, 1, 1], [0, 1]]`
+ /// * `arg_unique_types` (in simplified JSON): `[["o", "x"], ["o", "x"], ["o", "x"]]`
+ /// * `names` (in JSON): `{"foo": 2}`
+ args: Vec<P<ast::Expr>>,
+ /// The number of arguments that were added by implicit capturing.
+ num_captured_args: usize,
+ /// Placeholder slot numbers indexed by argument.
+ arg_types: Vec<Vec<usize>>,
+ /// Unique format specs seen for each argument.
+ arg_unique_types: Vec<Vec<ArgumentType>>,
+ /// Map from named arguments to their resolved indices.
+ names: FxHashMap<Symbol, (usize, Span)>,
+
+ /// The latest consecutive literal strings, or empty if there weren't any.
+ literal: String,
+
+ /// Collection of the compiled `rt::Argument` structures
+ pieces: Vec<P<ast::Expr>>,
+ /// Collection of string literals
+ str_pieces: Vec<P<ast::Expr>>,
+ /// Stays `true` if all formatting parameters are default (as in "{}{}").
+ all_pieces_simple: bool,
+
+ /// Mapping between positional argument references and indices into the
+ /// final generated static argument array. We record the starting indices
+ /// corresponding to each positional argument, and number of references
+ /// consumed so far for each argument, to facilitate correct `Position`
+ /// mapping in `build_piece`. In effect this can be seen as a "flattened"
+ /// version of `arg_unique_types`.
+ ///
+ /// Again with the example described above in docstring for `args`:
+ ///
+ /// * `arg_index_map` (in JSON): `[[0, 1, 0], [2, 3, 3], [4, 5]]`
+ arg_index_map: Vec<Vec<usize>>,
+
+ /// Starting offset of count argument slots.
+ count_args_index_offset: usize,
+
+ /// Count argument slots and tracking data structures.
+ /// Count arguments are separately tracked for de-duplication in case
+ /// multiple references are made to one argument. For example, in this
+ /// format string:
+ ///
+ /// * Original: `"{:.*} {:.foo$} {1:.*} {:.0$}"`
+ /// * Implicit argument resolution: `"{1:.0$} {2:.foo$} {1:.3$} {4:.0$}"`
+ /// * Name resolution: `"{1:.0$} {2:.5$} {1:.3$} {4:.0$}"`
+ /// * `count_positions` (in JSON): `{0: 0, 5: 1, 3: 2}`
+ /// * `count_args`: `vec![0, 5, 3]`
+ count_args: Vec<usize>,
+ /// Relative slot numbers for count arguments.
+ count_positions: FxHashMap<usize, usize>,
+ /// Number of count slots assigned.
+ count_positions_count: usize,
+
+ /// Current position of the implicit positional arg pointer, as if it
+ /// still existed in this phase of processing.
+ /// Used only for `all_pieces_simple` tracking in `build_piece`.
+ curarg: usize,
+ /// Current piece being evaluated, used for error reporting.
+ curpiece: usize,
+ /// Keep track of invalid references to positional arguments.
+ invalid_refs: Vec<(usize, usize)>,
+ /// Spans of all the formatting arguments, in order.
+ arg_spans: Vec<Span>,
+ /// All the formatting arguments that have formatting flags set, in order for diagnostics.
+ arg_with_formatting: Vec<parse::FormatSpec<'a>>,
+
+ /// Whether this format string came from a string literal, as opposed to a macro.
+ is_literal: bool,
+ unused_names_lint: PositionalNamedArgsLint,
+}
+
+pub struct FormatArg {
+ expr: P<ast::Expr>,
+ named: bool,
+}
+
+/// Parses the arguments from the given list of tokens, returning the diagnostic
+/// if there's a parse error so we can continue parsing other format!
+/// expressions.
+///
+/// If parsing succeeds, the return value is:
+///
+/// ```text
+/// Some((fmtstr, parsed arguments, index map for named arguments))
+/// ```
+fn parse_args<'a>(
+ ecx: &mut ExtCtxt<'a>,
+ sp: Span,
+ tts: TokenStream,
+) -> PResult<'a, (P<ast::Expr>, Vec<FormatArg>, FxHashMap<Symbol, (usize, Span)>)> {
+ let mut args = Vec::<FormatArg>::new();
+ let mut names = FxHashMap::<Symbol, (usize, Span)>::default();
+
+ let mut p = ecx.new_parser_from_tts(tts);
+
+ if p.token == token::Eof {
+ return Err(ecx.struct_span_err(sp, "requires at least a format string argument"));
+ }
+
+ let first_token = &p.token;
+ let fmtstr = match first_token.kind {
+ token::TokenKind::Literal(token::Lit {
+ kind: token::LitKind::Str | token::LitKind::StrRaw(_),
+ ..
+ }) => {
+ // If the first token is a string literal, then a format expression
+ // is constructed from it.
+ //
+ // This allows us to properly handle cases when the first comma
+ // after the format string is mistakenly replaced with any operator,
+ // which cause the expression parser to eat too much tokens.
+ p.parse_literal_maybe_minus()?
+ }
+ _ => {
+ // Otherwise, we fall back to the expression parser.
+ p.parse_expr()?
+ }
+ };
+
+ let mut first = true;
+ let mut named = false;
+
+ while p.token != token::Eof {
+ if !p.eat(&token::Comma) {
+ if first {
+ p.clear_expected_tokens();
+ }
+
+ match p.expect(&token::Comma) {
+ Err(mut err) => {
+ match token::TokenKind::Comma.similar_tokens() {
+ Some(tks) if tks.contains(&p.token.kind) => {
+ // If a similar token is found, then it may be a typo. We
+ // consider it as a comma, and continue parsing.
+ err.emit();
+ p.bump();
+ }
+ // Otherwise stop the parsing and return the error.
+ _ => return Err(err),
+ }
+ }
+ Ok(recovered) => {
+ assert!(recovered);
+ }
+ }
+ }
+ first = false;
+ if p.token == token::Eof {
+ break;
+ } // accept trailing commas
+ match p.token.ident() {
+ Some((ident, _)) if p.look_ahead(1, |t| *t == token::Eq) => {
+ named = true;
+ p.bump();
+ p.expect(&token::Eq)?;
+ let e = p.parse_expr()?;
+ if let Some((prev, _)) = names.get(&ident.name) {
+ ecx.struct_span_err(e.span, &format!("duplicate argument named `{}`", ident))
+ .span_label(args[*prev].expr.span, "previously here")
+ .span_label(e.span, "duplicate argument")
+ .emit();
+ continue;
+ }
+
+ // Resolve names into slots early.
+ // Since all the positional args are already seen at this point
+ // if the input is valid, we can simply append to the positional
+ // args. And remember the names.
+ let slot = args.len();
+ names.insert(ident.name, (slot, ident.span));
+ args.push(FormatArg { expr: e, named: true });
+ }
+ _ => {
+ let e = p.parse_expr()?;
+ if named {
+ let mut err = ecx.struct_span_err(
+ e.span,
+ "positional arguments cannot follow named arguments",
+ );
+ err.span_label(e.span, "positional arguments must be before named arguments");
+ for pos in names.values() {
+ err.span_label(args[pos.0].expr.span, "named argument");
+ }
+ err.emit();
+ }
+ args.push(FormatArg { expr: e, named: false });
+ }
+ }
+ }
+ Ok((fmtstr, args, names))
+}
+
+impl<'a, 'b> Context<'a, 'b> {
+ /// The number of arguments that were explicitly given.
+ fn num_args(&self) -> usize {
+ self.args.len() - self.num_captured_args
+ }
+
+ fn resolve_name_inplace(&mut self, p: &mut parse::Piece<'_>) {
+ // NOTE: the `unwrap_or` branch is needed in case of invalid format
+ // arguments, e.g., `format_args!("{foo}")`.
+ let lookup =
+ |s: &str| self.names.get(&Symbol::intern(s)).unwrap_or(&(0, Span::default())).0;
+
+ match *p {
+ parse::String(_) => {}
+ parse::NextArgument(ref mut arg) => {
+ if let parse::ArgumentNamed(s) = arg.position {
+ arg.position = parse::ArgumentIs(lookup(s));
+ }
+ if let parse::CountIsName(s, _) = arg.format.width {
+ arg.format.width = parse::CountIsParam(lookup(s));
+ }
+ if let parse::CountIsName(s, _) = arg.format.precision {
+ arg.format.precision = parse::CountIsParam(lookup(s));
+ }
+ }
+ }
+ }
+
+ /// Verifies one piece of a parse string, and remembers it if valid.
+ /// All errors are not emitted as fatal so we can continue giving errors
+ /// about this and possibly other format strings.
+ fn verify_piece(&mut self, p: &parse::Piece<'_>) {
+ match *p {
+ parse::String(..) => {}
+ parse::NextArgument(ref arg) => {
+ // width/precision first, if they have implicit positional
+ // parameters it makes more sense to consume them first.
+ self.verify_count(
+ arg.format.width,
+ &arg.format.width_span,
+ PositionalNamedArgType::Width,
+ );
+ self.verify_count(
+ arg.format.precision,
+ &arg.format.precision_span,
+ PositionalNamedArgType::Precision,
+ );
+
+ let has_precision = arg.format.precision != Count::CountImplied;
+ let has_width = arg.format.width != Count::CountImplied;
+
+ // argument second, if it's an implicit positional parameter
+ // it's written second, so it should come after width/precision.
+ let pos = match arg.position {
+ parse::ArgumentIs(i) => {
+ self.unused_names_lint.maybe_add_positional_named_arg(
+ i,
+ self.args.len(),
+ i,
+ PositionalNamedArgType::Arg,
+ self.curpiece,
+ Some(arg.position_span),
+ &self.names,
+ has_precision || has_width,
+ );
+
+ Exact(i)
+ }
+ parse::ArgumentImplicitlyIs(i) => {
+ self.unused_names_lint.maybe_add_positional_named_arg(
+ i,
+ self.args.len(),
+ i,
+ PositionalNamedArgType::Arg,
+ self.curpiece,
+ None,
+ &self.names,
+ has_precision || has_width,
+ );
+ Exact(i)
+ }
+ parse::ArgumentNamed(s) => {
+ let symbol = Symbol::intern(s);
+ let span = arg.position_span;
+ Named(symbol, InnerSpan::new(span.start, span.end))
+ }
+ };
+
+ let ty = Placeholder(match arg.format.ty {
+ "" => "Display",
+ "?" => "Debug",
+ "e" => "LowerExp",
+ "E" => "UpperExp",
+ "o" => "Octal",
+ "p" => "Pointer",
+ "b" => "Binary",
+ "x" => "LowerHex",
+ "X" => "UpperHex",
+ _ => {
+ let fmtsp = self.fmtsp;
+ let sp = arg
+ .format
+ .ty_span
+ .map(|sp| fmtsp.from_inner(InnerSpan::new(sp.start, sp.end)));
+ let mut err = self.ecx.struct_span_err(
+ sp.unwrap_or(fmtsp),
+ &format!("unknown format trait `{}`", arg.format.ty),
+ );
+ err.note(
+ "the only appropriate formatting traits are:\n\
+ - ``, which uses the `Display` trait\n\
+ - `?`, which uses the `Debug` trait\n\
+ - `e`, which uses the `LowerExp` trait\n\
+ - `E`, which uses the `UpperExp` trait\n\
+ - `o`, which uses the `Octal` trait\n\
+ - `p`, which uses the `Pointer` trait\n\
+ - `b`, which uses the `Binary` trait\n\
+ - `x`, which uses the `LowerHex` trait\n\
+ - `X`, which uses the `UpperHex` trait",
+ );
+ if let Some(sp) = sp {
+ for (fmt, name) in &[
+ ("", "Display"),
+ ("?", "Debug"),
+ ("e", "LowerExp"),
+ ("E", "UpperExp"),
+ ("o", "Octal"),
+ ("p", "Pointer"),
+ ("b", "Binary"),
+ ("x", "LowerHex"),
+ ("X", "UpperHex"),
+ ] {
+ // FIXME: rustfix (`run-rustfix`) fails to apply suggestions.
+ // > "Cannot replace slice of data that was already replaced"
+ err.tool_only_span_suggestion(
+ sp,
+ &format!("use the `{}` trait", name),
+ *fmt,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ err.emit();
+ "<invalid>"
+ }
+ });
+ self.verify_arg_type(pos, ty);
+ self.curpiece += 1;
+ }
+ }
+ }
+
+ fn verify_count(
+ &mut self,
+ c: parse::Count<'_>,
+ inner_span: &Option<rustc_parse_format::InnerSpan>,
+ named_arg_type: PositionalNamedArgType,
+ ) {
+ match c {
+ parse::CountImplied | parse::CountIs(..) => {}
+ parse::CountIsParam(i) => {
+ self.unused_names_lint.maybe_add_positional_named_arg(
+ i,
+ self.args.len(),
+ i,
+ named_arg_type,
+ self.curpiece,
+ *inner_span,
+ &self.names,
+ true,
+ );
+ self.verify_arg_type(Exact(i), Count);
+ }
+ parse::CountIsName(s, span) => {
+ self.verify_arg_type(
+ Named(Symbol::intern(s), InnerSpan::new(span.start, span.end)),
+ Count,
+ );
+ }
+ }
+ }
+
+ fn describe_num_args(&self) -> Cow<'_, str> {
+ match self.num_args() {
+ 0 => "no arguments were given".into(),
+ 1 => "there is 1 argument".into(),
+ x => format!("there are {} arguments", x).into(),
+ }
+ }
+
+ /// Handle invalid references to positional arguments. Output different
+ /// errors for the case where all arguments are positional and for when
+ /// there are named arguments or numbered positional arguments in the
+ /// format string.
+ fn report_invalid_references(&self, numbered_position_args: bool) {
+ let mut e;
+ let sp = if !self.arg_spans.is_empty() {
+ // Point at the formatting arguments.
+ MultiSpan::from_spans(self.arg_spans.clone())
+ } else {
+ MultiSpan::from_span(self.fmtsp)
+ };
+ let refs =
+ self.invalid_refs.iter().map(|(r, pos)| (r.to_string(), self.arg_spans.get(*pos)));
+
+ let mut zero_based_note = false;
+
+ let count = self.pieces.len()
+ + self.arg_with_formatting.iter().filter(|fmt| fmt.precision_span.is_some()).count();
+ if self.names.is_empty() && !numbered_position_args && count != self.num_args() {
+ e = self.ecx.struct_span_err(
+ sp,
+ &format!(
+ "{} positional argument{} in format string, but {}",
+ count,
+ pluralize!(count),
+ self.describe_num_args(),
+ ),
+ );
+ for arg in &self.args {
+ // Point at the arguments that will be formatted.
+ e.span_label(arg.span, "");
+ }
+ } else {
+ let (mut refs, spans): (Vec<_>, Vec<_>) = refs.unzip();
+ // Avoid `invalid reference to positional arguments 7 and 7 (there is 1 argument)`
+ // for `println!("{7:7$}", 1);`
+ refs.sort();
+ refs.dedup();
+ let spans: Vec<_> = spans.into_iter().filter_map(|sp| sp.copied()).collect();
+ let sp = if self.arg_spans.is_empty() || spans.is_empty() {
+ MultiSpan::from_span(self.fmtsp)
+ } else {
+ MultiSpan::from_spans(spans)
+ };
+ let arg_list = if refs.len() == 1 {
+ format!("argument {}", refs[0])
+ } else {
+ let reg = refs.pop().unwrap();
+ format!("arguments {head} and {tail}", head = refs.join(", "), tail = reg)
+ };
+
+ e = self.ecx.struct_span_err(
+ sp,
+ &format!(
+ "invalid reference to positional {} ({})",
+ arg_list,
+ self.describe_num_args()
+ ),
+ );
+ zero_based_note = true;
+ };
+
+ for fmt in &self.arg_with_formatting {
+ if let Some(span) = fmt.precision_span {
+ let span = self.fmtsp.from_inner(InnerSpan::new(span.start, span.end));
+ match fmt.precision {
+ parse::CountIsParam(pos) if pos > self.num_args() => {
+ e.span_label(
+ span,
+ &format!(
+ "this precision flag expects an `usize` argument at position {}, \
+ but {}",
+ pos,
+ self.describe_num_args(),
+ ),
+ );
+ zero_based_note = true;
+ }
+ parse::CountIsParam(pos) => {
+ let count = self.pieces.len()
+ + self
+ .arg_with_formatting
+ .iter()
+ .filter(|fmt| fmt.precision_span.is_some())
+ .count();
+ e.span_label(
+ span,
+ &format!(
+ "this precision flag adds an extra required argument at position {}, \
+ which is why there {} expected",
+ pos,
+ if count == 1 {
+ "is 1 argument".to_string()
+ } else {
+ format!("are {} arguments", count)
+ },
+ ),
+ );
+ if let Some(arg) = self.args.get(pos) {
+ e.span_label(
+ arg.span,
+ "this parameter corresponds to the precision flag",
+ );
+ }
+ zero_based_note = true;
+ }
+ _ => {}
+ }
+ }
+ if let Some(span) = fmt.width_span {
+ let span = self.fmtsp.from_inner(InnerSpan::new(span.start, span.end));
+ match fmt.width {
+ parse::CountIsParam(pos) if pos >= self.num_args() => {
+ e.span_label(
+ span,
+ &format!(
+ "this width flag expects an `usize` argument at position {}, \
+ but {}",
+ pos,
+ self.describe_num_args(),
+ ),
+ );
+ zero_based_note = true;
+ }
+ _ => {}
+ }
+ }
+ }
+ if zero_based_note {
+ e.note("positional arguments are zero-based");
+ }
+ if !self.arg_with_formatting.is_empty() {
+ e.note(
+ "for information about formatting flags, visit \
+ https://doc.rust-lang.org/std/fmt/index.html",
+ );
+ }
+
+ e.emit();
+ }
+
+ /// Actually verifies and tracks a given format placeholder
+ /// (a.k.a. argument).
+ fn verify_arg_type(&mut self, arg: Position, ty: ArgumentType) {
+ if let Exact(arg) = arg {
+ if arg >= self.num_args() {
+ self.invalid_refs.push((arg, self.curpiece));
+ return;
+ }
+ }
+
+ match arg {
+ Exact(arg) | Capture(arg) => {
+ match ty {
+ Placeholder(_) => {
+ // record every (position, type) combination only once
+ let seen_ty = &mut self.arg_unique_types[arg];
+ let i = seen_ty.iter().position(|x| *x == ty).unwrap_or_else(|| {
+ let i = seen_ty.len();
+ seen_ty.push(ty);
+ i
+ });
+ self.arg_types[arg].push(i);
+ }
+ Count => {
+ if let Entry::Vacant(e) = self.count_positions.entry(arg) {
+ let i = self.count_positions_count;
+ e.insert(i);
+ self.count_args.push(arg);
+ self.count_positions_count += 1;
+ }
+ }
+ }
+ }
+
+ Named(name, span) => {
+ match self.names.get(&name) {
+ Some(&idx) => {
+ // Treat as positional arg.
+ self.verify_arg_type(Capture(idx.0), ty)
+ }
+ None => {
+ // For the moment capturing variables from format strings expanded from macros is
+ // disabled (see RFC #2795)
+ if self.is_literal {
+ // Treat this name as a variable to capture from the surrounding scope
+ let idx = self.args.len();
+ self.arg_types.push(Vec::new());
+ self.arg_unique_types.push(Vec::new());
+ let span = if self.is_literal {
+ self.fmtsp.from_inner(span)
+ } else {
+ self.fmtsp
+ };
+ self.num_captured_args += 1;
+ self.args.push(self.ecx.expr_ident(span, Ident::new(name, span)));
+ self.names.insert(name, (idx, span));
+ self.verify_arg_type(Capture(idx), ty)
+ } else {
+ let msg = format!("there is no argument named `{}`", name);
+ let sp = if self.is_literal {
+ self.fmtsp.from_inner(span)
+ } else {
+ self.fmtsp
+ };
+ let mut err = self.ecx.struct_span_err(sp, &msg);
+
+ err.note(&format!(
+ "did you intend to capture a variable `{}` from \
+ the surrounding scope?",
+ name
+ ));
+ err.note(
+ "to avoid ambiguity, `format_args!` cannot capture variables \
+ when the format string is expanded from a macro",
+ );
+
+ err.emit();
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// Builds the mapping between format placeholders and argument objects.
+ fn build_index_map(&mut self) {
+ // NOTE: Keep the ordering the same as `into_expr`'s expansion would do!
+ let args_len = self.args.len();
+ self.arg_index_map.reserve(args_len);
+
+ let mut sofar = 0usize;
+
+ // Map the arguments
+ for i in 0..args_len {
+ let arg_types = &self.arg_types[i];
+ let arg_offsets = arg_types.iter().map(|offset| sofar + *offset).collect::<Vec<_>>();
+ self.arg_index_map.push(arg_offsets);
+ sofar += self.arg_unique_types[i].len();
+ }
+
+ // Record starting index for counts, which appear just after arguments
+ self.count_args_index_offset = sofar;
+ }
+
+ fn rtpath(ecx: &ExtCtxt<'_>, s: Symbol) -> Vec<Ident> {
+ ecx.std_path(&[sym::fmt, sym::rt, sym::v1, s])
+ }
+
+ fn build_count(&self, c: parse::Count<'_>) -> P<ast::Expr> {
+ let sp = self.macsp;
+ let count = |c, arg| {
+ let mut path = Context::rtpath(self.ecx, sym::Count);
+ path.push(Ident::new(c, sp));
+ match arg {
+ Some(arg) => self.ecx.expr_call_global(sp, path, vec![arg]),
+ None => self.ecx.expr_path(self.ecx.path_global(sp, path)),
+ }
+ };
+ match c {
+ parse::CountIs(i) => count(sym::Is, Some(self.ecx.expr_usize(sp, i))),
+ parse::CountIsParam(i) => {
+ // This needs mapping too, as `i` is referring to a macro
+ // argument. If `i` is not found in `count_positions` then
+ // the error had already been emitted elsewhere.
+ let i = self.count_positions.get(&i).cloned().unwrap_or(0)
+ + self.count_args_index_offset;
+ count(sym::Param, Some(self.ecx.expr_usize(sp, i)))
+ }
+ parse::CountImplied => count(sym::Implied, None),
+ // should never be the case, names are already resolved
+ parse::CountIsName(..) => panic!("should never happen"),
+ }
+ }
+
+ /// Build a literal expression from the accumulated string literals
+ fn build_literal_string(&mut self) -> P<ast::Expr> {
+ let sp = self.fmtsp;
+ let s = Symbol::intern(&self.literal);
+ self.literal.clear();
+ self.ecx.expr_str(sp, s)
+ }
+
+ /// Builds a static `rt::Argument` from a `parse::Piece` or append
+ /// to the `literal` string.
+ fn build_piece(
+ &mut self,
+ piece: &parse::Piece<'a>,
+ arg_index_consumed: &mut Vec<usize>,
+ ) -> Option<P<ast::Expr>> {
+ let sp = self.macsp;
+ match *piece {
+ parse::String(s) => {
+ self.literal.push_str(s);
+ None
+ }
+ parse::NextArgument(ref arg) => {
+ // Build the position
+ let pos = {
+ match arg.position {
+ parse::ArgumentIs(i, ..) | parse::ArgumentImplicitlyIs(i) => {
+ // Map to index in final generated argument array
+ // in case of multiple types specified
+ let arg_idx = match arg_index_consumed.get_mut(i) {
+ None => 0, // error already emitted elsewhere
+ Some(offset) => {
+ let idx_map = &self.arg_index_map[i];
+ // unwrap_or branch: error already emitted elsewhere
+ let arg_idx = *idx_map.get(*offset).unwrap_or(&0);
+ *offset += 1;
+ arg_idx
+ }
+ };
+ self.ecx.expr_usize(sp, arg_idx)
+ }
+
+ // should never be the case, because names are already
+ // resolved.
+ parse::ArgumentNamed(..) => panic!("should never happen"),
+ }
+ };
+
+ let simple_arg = parse::Argument {
+ position: {
+ // We don't have ArgumentNext any more, so we have to
+ // track the current argument ourselves.
+ let i = self.curarg;
+ self.curarg += 1;
+ parse::ArgumentIs(i)
+ },
+ position_span: arg.position_span,
+ format: parse::FormatSpec {
+ fill: arg.format.fill,
+ align: parse::AlignUnknown,
+ flags: 0,
+ precision: parse::CountImplied,
+ precision_span: None,
+ width: parse::CountImplied,
+ width_span: None,
+ ty: arg.format.ty,
+ ty_span: arg.format.ty_span,
+ },
+ };
+
+ let fill = arg.format.fill.unwrap_or(' ');
+
+ let pos_simple = arg.position.index() == simple_arg.position.index();
+
+ if arg.format.precision_span.is_some() || arg.format.width_span.is_some() {
+ self.arg_with_formatting.push(arg.format);
+ }
+ if !pos_simple || arg.format != simple_arg.format || fill != ' ' {
+ self.all_pieces_simple = false;
+ }
+
+ // Build the format
+ let fill = self.ecx.expr_lit(sp, ast::LitKind::Char(fill));
+ let align = |name| {
+ let mut p = Context::rtpath(self.ecx, sym::Alignment);
+ p.push(Ident::new(name, sp));
+ self.ecx.path_global(sp, p)
+ };
+ let align = match arg.format.align {
+ parse::AlignLeft => align(sym::Left),
+ parse::AlignRight => align(sym::Right),
+ parse::AlignCenter => align(sym::Center),
+ parse::AlignUnknown => align(sym::Unknown),
+ };
+ let align = self.ecx.expr_path(align);
+ let flags = self.ecx.expr_u32(sp, arg.format.flags);
+ let prec = self.build_count(arg.format.precision);
+ let width = self.build_count(arg.format.width);
+ let path = self.ecx.path_global(sp, Context::rtpath(self.ecx, sym::FormatSpec));
+ let fmt = self.ecx.expr_struct(
+ sp,
+ path,
+ vec![
+ self.ecx.field_imm(sp, Ident::new(sym::fill, sp), fill),
+ self.ecx.field_imm(sp, Ident::new(sym::align, sp), align),
+ self.ecx.field_imm(sp, Ident::new(sym::flags, sp), flags),
+ self.ecx.field_imm(sp, Ident::new(sym::precision, sp), prec),
+ self.ecx.field_imm(sp, Ident::new(sym::width, sp), width),
+ ],
+ );
+
+ let path = self.ecx.path_global(sp, Context::rtpath(self.ecx, sym::Argument));
+ Some(self.ecx.expr_struct(
+ sp,
+ path,
+ vec![
+ self.ecx.field_imm(sp, Ident::new(sym::position, sp), pos),
+ self.ecx.field_imm(sp, Ident::new(sym::format, sp), fmt),
+ ],
+ ))
+ }
+ }
+ }
+
+ /// Actually builds the expression which the format_args! block will be
+ /// expanded to.
+ fn into_expr(self) -> P<ast::Expr> {
+ let mut original_args = self.args;
+ let mut fmt_args = Vec::with_capacity(
+ self.arg_unique_types.iter().map(|v| v.len()).sum::<usize>() + self.count_args.len(),
+ );
+
+ // First, build up the static array which will become our precompiled
+ // format "string"
+ let pieces = self.ecx.expr_array_ref(self.fmtsp, self.str_pieces);
+
+ // We need to construct a &[ArgumentV1] to pass into the fmt::Arguments
+ // constructor. In general the expressions in this slice might be
+ // permuted from their order in original_args (such as in the case of
+ // "{1} {0}"), or may have multiple entries referring to the same
+ // element of original_args ("{0} {0}").
+ //
+ // The following vector has one item per element of our output slice,
+ // identifying the index of which element of original_args it's passing,
+ // and that argument's type.
+ let mut fmt_arg_index_and_ty = SmallVec::<[(usize, &ArgumentType); 8]>::new();
+ for (i, unique_types) in self.arg_unique_types.iter().enumerate() {
+ fmt_arg_index_and_ty.extend(unique_types.iter().map(|ty| (i, ty)));
+ }
+ fmt_arg_index_and_ty.extend(self.count_args.iter().map(|&i| (i, &Count)));
+
+ // Figure out whether there are permuted or repeated elements. If not,
+ // we can generate simpler code.
+ //
+ // The sequence has no indices out of order or repeated if: for every
+ // adjacent pair of elements, the first one's index is less than the
+ // second one's index.
+ let nicely_ordered =
+ fmt_arg_index_and_ty.array_windows().all(|[(i, _i_ty), (j, _j_ty)]| i < j);
+
+ // We want to emit:
+ //
+ // [ArgumentV1::new(&$arg0, …), ArgumentV1::new(&$arg1, …), …]
+ //
+ // However, it's only legal to do so if $arg0, $arg1, … were written in
+ // exactly that order by the programmer. When arguments are permuted, we
+ // want them evaluated in the order written by the programmer, not in
+ // the order provided to fmt::Arguments. When arguments are repeated, we
+ // want the expression evaluated only once.
+ //
+ // Further, if any arg _after the first one_ contains a yield point such
+ // as `await` or `yield`, the above short form is inconvenient for the
+ // caller because it would keep a temporary of type ArgumentV1 alive
+ // across the yield point. ArgumentV1 can't implement Send since it
+ // holds a type-erased arbitrary type.
+ //
+ // Thus in the not nicely ordered case, and in the yielding case, we
+ // emit the following instead:
+ //
+ // match (&$arg0, &$arg1, …) {
+ // args => [ArgumentV1::new(args.$i, …), ArgumentV1::new(args.$j, …), …]
+ // }
+ //
+ // for the sequence of indices $i, $j, … governed by fmt_arg_index_and_ty.
+ // This more verbose representation ensures that all arguments are
+ // evaluated a single time each, in the order written by the programmer,
+ // and that the surrounding future/generator (if any) is Send whenever
+ // possible.
+ let no_need_for_match =
+ nicely_ordered && !original_args.iter().skip(1).any(|e| may_contain_yield_point(e));
+
+ for (arg_index, arg_ty) in fmt_arg_index_and_ty {
+ let e = &mut original_args[arg_index];
+ let span = e.span;
+ let arg = if no_need_for_match {
+ let expansion_span = e.span.with_ctxt(self.macsp.ctxt());
+ // The indices are strictly ordered so e has not been taken yet.
+ self.ecx.expr_addr_of(expansion_span, P(e.take()))
+ } else {
+ let def_site = self.ecx.with_def_site_ctxt(span);
+ let args_tuple = self.ecx.expr_ident(def_site, Ident::new(sym::args, def_site));
+ let member = Ident::new(sym::integer(arg_index), def_site);
+ self.ecx.expr(def_site, ast::ExprKind::Field(args_tuple, member))
+ };
+ fmt_args.push(Context::format_arg(self.ecx, self.macsp, span, arg_ty, arg));
+ }
+
+ let args_array = self.ecx.expr_array(self.macsp, fmt_args);
+ let args_slice = self.ecx.expr_addr_of(
+ self.macsp,
+ if no_need_for_match {
+ args_array
+ } else {
+ // In the !no_need_for_match case, none of the exprs were moved
+ // away in the previous loop.
+ //
+ // This uses the arg span for `&arg` so that borrowck errors
+ // point to the specific expression passed to the macro (the
+ // span is otherwise unavailable in the MIR used by borrowck).
+ let heads = original_args
+ .into_iter()
+ .map(|e| self.ecx.expr_addr_of(e.span.with_ctxt(self.macsp.ctxt()), e))
+ .collect();
+
+ let pat = self.ecx.pat_ident(self.macsp, Ident::new(sym::args, self.macsp));
+ let arm = self.ecx.arm(self.macsp, pat, args_array);
+ let head = self.ecx.expr(self.macsp, ast::ExprKind::Tup(heads));
+ self.ecx.expr_match(self.macsp, head, vec![arm])
+ },
+ );
+
+ // Now create the fmt::Arguments struct with all our locals we created.
+ let (fn_name, fn_args) = if self.all_pieces_simple {
+ ("new_v1", vec![pieces, args_slice])
+ } else {
+ // Build up the static array which will store our precompiled
+ // nonstandard placeholders, if there are any.
+ let fmt = self.ecx.expr_array_ref(self.macsp, self.pieces);
+
+ let path = self.ecx.std_path(&[sym::fmt, sym::UnsafeArg, sym::new]);
+ let unsafe_arg = self.ecx.expr_call_global(self.macsp, path, Vec::new());
+ let unsafe_expr = self.ecx.expr_block(P(ast::Block {
+ stmts: vec![self.ecx.stmt_expr(unsafe_arg)],
+ id: ast::DUMMY_NODE_ID,
+ rules: BlockCheckMode::Unsafe(UnsafeSource::CompilerGenerated),
+ span: self.macsp,
+ tokens: None,
+ could_be_bare_literal: false,
+ }));
+
+ ("new_v1_formatted", vec![pieces, args_slice, fmt, unsafe_expr])
+ };
+
+ let path = self.ecx.std_path(&[sym::fmt, sym::Arguments, Symbol::intern(fn_name)]);
+ self.ecx.expr_call_global(self.macsp, path, fn_args)
+ }
+
+ fn format_arg(
+ ecx: &ExtCtxt<'_>,
+ macsp: Span,
+ mut sp: Span,
+ ty: &ArgumentType,
+ arg: P<ast::Expr>,
+ ) -> P<ast::Expr> {
+ sp = ecx.with_def_site_ctxt(sp);
+ let trait_ = match *ty {
+ Placeholder(trait_) if trait_ == "<invalid>" => return DummyResult::raw_expr(sp, true),
+ Placeholder(trait_) => trait_,
+ Count => {
+ let path = ecx.std_path(&[sym::fmt, sym::ArgumentV1, sym::from_usize]);
+ return ecx.expr_call_global(macsp, path, vec![arg]);
+ }
+ };
+ let new_fn_name = match trait_ {
+ "Display" => "new_display",
+ "Debug" => "new_debug",
+ "LowerExp" => "new_lower_exp",
+ "UpperExp" => "new_upper_exp",
+ "Octal" => "new_octal",
+ "Pointer" => "new_pointer",
+ "Binary" => "new_binary",
+ "LowerHex" => "new_lower_hex",
+ "UpperHex" => "new_upper_hex",
+ _ => unreachable!(),
+ };
+
+ let path = ecx.std_path(&[sym::fmt, sym::ArgumentV1, Symbol::intern(new_fn_name)]);
+ ecx.expr_call_global(sp, path, vec![arg])
+ }
+}
+
+fn expand_format_args_impl<'cx>(
+ ecx: &'cx mut ExtCtxt<'_>,
+ mut sp: Span,
+ tts: TokenStream,
+ nl: bool,
+) -> Box<dyn base::MacResult + 'cx> {
+ sp = ecx.with_def_site_ctxt(sp);
+ match parse_args(ecx, sp, tts) {
+ Ok((efmt, args, names)) => {
+ MacEager::expr(expand_preparsed_format_args(ecx, sp, efmt, args, names, nl))
+ }
+ Err(mut err) => {
+ err.emit();
+ DummyResult::any(sp)
+ }
+ }
+}
+
+pub fn expand_format_args<'cx>(
+ ecx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ expand_format_args_impl(ecx, sp, tts, false)
+}
+
+pub fn expand_format_args_nl<'cx>(
+ ecx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ expand_format_args_impl(ecx, sp, tts, true)
+}
+
+fn create_lints_for_named_arguments_used_positionally(cx: &mut Context<'_, '_>) {
+ for named_arg in &cx.unused_names_lint.positional_named_args {
+ let (position_sp_to_replace, position_sp_for_msg) = named_arg.get_positional_arg_spans(cx);
+
+ let msg = format!("named argument `{}` is not used by name", named_arg.replacement);
+
+ cx.ecx.buffered_early_lint.push(BufferedEarlyLint {
+ span: MultiSpan::from_span(named_arg.positional_named_arg_span),
+ msg: msg.clone(),
+ node_id: ast::CRATE_NODE_ID,
+ lint_id: LintId::of(&NAMED_ARGUMENTS_USED_POSITIONALLY),
+ diagnostic: BuiltinLintDiagnostics::NamedArgumentUsedPositionally {
+ position_sp_to_replace,
+ position_sp_for_msg,
+ named_arg_sp: named_arg.positional_named_arg_span,
+ named_arg_name: named_arg.replacement.to_string(),
+ is_formatting_arg: named_arg.ty != PositionalNamedArgType::Arg,
+ },
+ });
+ }
+}
+
+/// Take the various parts of `format_args!(efmt, args..., name=names...)`
+/// and construct the appropriate formatting expression.
+pub fn expand_preparsed_format_args(
+ ecx: &mut ExtCtxt<'_>,
+ sp: Span,
+ efmt: P<ast::Expr>,
+ args: Vec<FormatArg>,
+ names: FxHashMap<Symbol, (usize, Span)>,
+ append_newline: bool,
+) -> P<ast::Expr> {
+ // NOTE: this verbose way of initializing `Vec<Vec<ArgumentType>>` is because
+ // `ArgumentType` does not derive `Clone`.
+ let arg_types: Vec<_> = (0..args.len()).map(|_| Vec::new()).collect();
+ let arg_unique_types: Vec<_> = (0..args.len()).map(|_| Vec::new()).collect();
+
+ let mut macsp = ecx.call_site();
+ macsp = ecx.with_def_site_ctxt(macsp);
+
+ let msg = "format argument must be a string literal";
+ let fmt_sp = efmt.span;
+ let efmt_kind_is_lit: bool = matches!(efmt.kind, ast::ExprKind::Lit(_));
+ let (fmt_str, fmt_style, fmt_span) = match expr_to_spanned_string(ecx, efmt, msg) {
+ Ok(mut fmt) if append_newline => {
+ fmt.0 = Symbol::intern(&format!("{}\n", fmt.0));
+ fmt
+ }
+ Ok(fmt) => fmt,
+ Err(err) => {
+ if let Some((mut err, suggested)) = err {
+ let sugg_fmt = match args.len() {
+ 0 => "{}".to_string(),
+ _ => format!("{}{{}}", "{} ".repeat(args.len())),
+ };
+ if !suggested {
+ err.span_suggestion(
+ fmt_sp.shrink_to_lo(),
+ "you might be missing a string literal to format with",
+ format!("\"{}\", ", sugg_fmt),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+ return DummyResult::raw_expr(sp, true);
+ }
+ };
+
+ let str_style = match fmt_style {
+ ast::StrStyle::Cooked => None,
+ ast::StrStyle::Raw(raw) => Some(raw as usize),
+ };
+
+ let fmt_str = fmt_str.as_str(); // for the suggestions below
+ let fmt_snippet = ecx.source_map().span_to_snippet(fmt_sp).ok();
+ let mut parser = parse::Parser::new(
+ fmt_str,
+ str_style,
+ fmt_snippet,
+ append_newline,
+ parse::ParseMode::Format,
+ );
+
+ let mut unverified_pieces = Vec::new();
+ while let Some(piece) = parser.next() {
+ if !parser.errors.is_empty() {
+ break;
+ } else {
+ unverified_pieces.push(piece);
+ }
+ }
+
+ if !parser.errors.is_empty() {
+ let err = parser.errors.remove(0);
+ let sp = if efmt_kind_is_lit {
+ fmt_span.from_inner(InnerSpan::new(err.span.start, err.span.end))
+ } else {
+ // The format string could be another macro invocation, e.g.:
+ // format!(concat!("abc", "{}"), 4);
+ // However, `err.span` is an inner span relative to the *result* of
+ // the macro invocation, which is why we would get a nonsensical
+ // result calling `fmt_span.from_inner(err.span)` as above, and
+ // might even end up inside a multibyte character (issue #86085).
+ // Therefore, we conservatively report the error for the entire
+ // argument span here.
+ fmt_span
+ };
+ let mut e = ecx.struct_span_err(sp, &format!("invalid format string: {}", err.description));
+ e.span_label(sp, err.label + " in format string");
+ if let Some(note) = err.note {
+ e.note(&note);
+ }
+ if let Some((label, span)) = err.secondary_label {
+ if efmt_kind_is_lit {
+ e.span_label(fmt_span.from_inner(InnerSpan::new(span.start, span.end)), label);
+ }
+ }
+ if err.should_be_replaced_with_positional_argument {
+ let captured_arg_span =
+ fmt_span.from_inner(InnerSpan::new(err.span.start, err.span.end));
+ let positional_args = args.iter().filter(|arg| !arg.named).collect::<Vec<_>>();
+ if let Ok(arg) = ecx.source_map().span_to_snippet(captured_arg_span) {
+ let span = match positional_args.last() {
+ Some(arg) => arg.expr.span,
+ None => fmt_sp,
+ };
+ e.multipart_suggestion_verbose(
+ "consider using a positional formatting argument instead",
+ vec![
+ (captured_arg_span, positional_args.len().to_string()),
+ (span.shrink_to_hi(), format!(", {}", arg)),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ e.emit();
+ return DummyResult::raw_expr(sp, true);
+ }
+
+ let arg_spans = parser
+ .arg_places
+ .iter()
+ .map(|span| fmt_span.from_inner(InnerSpan::new(span.start, span.end)))
+ .collect();
+
+ let named_pos: FxHashSet<usize> = names.values().cloned().map(|(i, _)| i).collect();
+
+ let mut cx = Context {
+ ecx,
+ args: args.into_iter().map(|arg| arg.expr).collect(),
+ num_captured_args: 0,
+ arg_types,
+ arg_unique_types,
+ names,
+ curarg: 0,
+ curpiece: 0,
+ arg_index_map: Vec::new(),
+ count_args: Vec::new(),
+ count_positions: FxHashMap::default(),
+ count_positions_count: 0,
+ count_args_index_offset: 0,
+ literal: String::new(),
+ pieces: Vec::with_capacity(unverified_pieces.len()),
+ str_pieces: Vec::with_capacity(unverified_pieces.len()),
+ all_pieces_simple: true,
+ macsp,
+ fmtsp: fmt_span,
+ invalid_refs: Vec::new(),
+ arg_spans,
+ arg_with_formatting: Vec::new(),
+ is_literal: parser.is_literal,
+ unused_names_lint: PositionalNamedArgsLint { positional_named_args: vec![] },
+ };
+
+ // This needs to happen *after* the Parser has consumed all pieces to create all the spans
+ let pieces = unverified_pieces
+ .into_iter()
+ .map(|mut piece| {
+ cx.verify_piece(&piece);
+ cx.resolve_name_inplace(&mut piece);
+ piece
+ })
+ .collect::<Vec<_>>();
+
+ let numbered_position_args = pieces.iter().any(|arg: &parse::Piece<'_>| match *arg {
+ parse::String(_) => false,
+ parse::NextArgument(arg) => matches!(arg.position, parse::Position::ArgumentIs(..)),
+ });
+
+ cx.build_index_map();
+
+ let mut arg_index_consumed = vec![0usize; cx.arg_index_map.len()];
+
+ for piece in pieces {
+ if let Some(piece) = cx.build_piece(&piece, &mut arg_index_consumed) {
+ let s = cx.build_literal_string();
+ cx.str_pieces.push(s);
+ cx.pieces.push(piece);
+ }
+ }
+
+ if !cx.literal.is_empty() {
+ let s = cx.build_literal_string();
+ cx.str_pieces.push(s);
+ }
+
+ if !cx.invalid_refs.is_empty() {
+ cx.report_invalid_references(numbered_position_args);
+ }
+
+ // Make sure that all arguments were used and all arguments have types.
+ let errs = cx
+ .arg_types
+ .iter()
+ .enumerate()
+ .filter(|(i, ty)| ty.is_empty() && !cx.count_positions.contains_key(&i))
+ .map(|(i, _)| {
+ let msg = if named_pos.contains(&i) {
+ // named argument
+ "named argument never used"
+ } else {
+ // positional argument
+ "argument never used"
+ };
+ (cx.args[i].span, msg)
+ })
+ .collect::<Vec<_>>();
+
+ let errs_len = errs.len();
+ if !errs.is_empty() {
+ let args_used = cx.arg_types.len() - errs_len;
+ let args_unused = errs_len;
+
+ let mut diag = {
+ if let [(sp, msg)] = &errs[..] {
+ let mut diag = cx.ecx.struct_span_err(*sp, *msg);
+ diag.span_label(*sp, *msg);
+ diag
+ } else {
+ let mut diag = cx.ecx.struct_span_err(
+ errs.iter().map(|&(sp, _)| sp).collect::<Vec<Span>>(),
+ "multiple unused formatting arguments",
+ );
+ diag.span_label(cx.fmtsp, "multiple missing formatting specifiers");
+ for (sp, msg) in errs {
+ diag.span_label(sp, msg);
+ }
+ diag
+ }
+ };
+
+ // Used to ensure we only report translations for *one* kind of foreign format.
+ let mut found_foreign = false;
+ // Decide if we want to look for foreign formatting directives.
+ if args_used < args_unused {
+ use super::format_foreign as foreign;
+
+ // The set of foreign substitutions we've explained. This prevents spamming the user
+ // with `%d should be written as {}` over and over again.
+ let mut explained = FxHashSet::default();
+
+ macro_rules! check_foreign {
+ ($kind:ident) => {{
+ let mut show_doc_note = false;
+
+ let mut suggestions = vec![];
+ // account for `"` and account for raw strings `r#`
+ let padding = str_style.map(|i| i + 2).unwrap_or(1);
+ for sub in foreign::$kind::iter_subs(fmt_str, padding) {
+ let (trn, success) = match sub.translate() {
+ Ok(trn) => (trn, true),
+ Err(Some(msg)) => (msg, false),
+
+ // If it has no translation, don't call it out specifically.
+ _ => continue,
+ };
+
+ let pos = sub.position();
+ let sub = String::from(sub.as_str());
+ if explained.contains(&sub) {
+ continue;
+ }
+ explained.insert(sub.clone());
+
+ if !found_foreign {
+ found_foreign = true;
+ show_doc_note = true;
+ }
+
+ if let Some(inner_sp) = pos {
+ let sp = fmt_sp.from_inner(inner_sp);
+
+ if success {
+ suggestions.push((sp, trn));
+ } else {
+ diag.span_note(
+ sp,
+ &format!("format specifiers use curly braces, and {}", trn),
+ );
+ }
+ } else {
+ if success {
+ diag.help(&format!("`{}` should be written as `{}`", sub, trn));
+ } else {
+ diag.note(&format!(
+ "`{}` should use curly braces, and {}",
+ sub, trn
+ ));
+ }
+ }
+ }
+
+ if show_doc_note {
+ diag.note(concat!(
+ stringify!($kind),
+ " formatting not supported; see the documentation for `std::fmt`",
+ ));
+ }
+ if suggestions.len() > 0 {
+ diag.multipart_suggestion(
+ "format specifiers use curly braces",
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+ }
+ }};
+ }
+
+ check_foreign!(printf);
+ if !found_foreign {
+ check_foreign!(shell);
+ }
+ }
+ if !found_foreign && errs_len == 1 {
+ diag.span_label(cx.fmtsp, "formatting specifier missing");
+ }
+
+ diag.emit();
+ } else if cx.invalid_refs.is_empty() && cx.ecx.sess.err_count() == 0 {
+ // Only check for unused named argument names if there are no other errors to avoid causing
+ // too much noise in output errors, such as when a named argument is entirely unused.
+ create_lints_for_named_arguments_used_positionally(&mut cx);
+ }
+
+ cx.into_expr()
+}
+
+fn may_contain_yield_point(e: &ast::Expr) -> bool {
+ struct MayContainYieldPoint(bool);
+
+ impl Visitor<'_> for MayContainYieldPoint {
+ fn visit_expr(&mut self, e: &ast::Expr) {
+ if let ast::ExprKind::Await(_) | ast::ExprKind::Yield(_) = e.kind {
+ self.0 = true;
+ } else {
+ visit::walk_expr(self, e);
+ }
+ }
+
+ fn visit_mac_call(&mut self, _: &ast::MacCall) {
+ self.0 = true;
+ }
+
+ fn visit_attribute(&mut self, _: &ast::Attribute) {
+ // Conservatively assume this may be a proc macro attribute in
+ // expression position.
+ self.0 = true;
+ }
+
+ fn visit_item(&mut self, _: &ast::Item) {
+ // Do not recurse into nested items.
+ }
+ }
+
+ let mut visitor = MayContainYieldPoint(false);
+ visitor.visit_expr(e);
+ visitor.0
+}
diff --git a/compiler/rustc_builtin_macros/src/format_foreign.rs b/compiler/rustc_builtin_macros/src/format_foreign.rs
new file mode 100644
index 000000000..ecd16736e
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/format_foreign.rs
@@ -0,0 +1,829 @@
+pub(crate) mod printf {
+ use super::strcursor::StrCursor as Cur;
+ use rustc_span::InnerSpan;
+
+ /// Represents a single `printf`-style substitution.
+ #[derive(Clone, PartialEq, Debug)]
+ pub enum Substitution<'a> {
+ /// A formatted output substitution with its internal byte offset.
+ Format(Format<'a>),
+ /// A literal `%%` escape, with its start and end indices.
+ Escape((usize, usize)),
+ }
+
+ impl<'a> Substitution<'a> {
+ pub fn as_str(&self) -> &str {
+ match *self {
+ Substitution::Format(ref fmt) => fmt.span,
+ Substitution::Escape(_) => "%%",
+ }
+ }
+
+ pub fn position(&self) -> Option<InnerSpan> {
+ match *self {
+ Substitution::Format(ref fmt) => Some(fmt.position),
+ Substitution::Escape((start, end)) => Some(InnerSpan::new(start, end)),
+ }
+ }
+
+ pub fn set_position(&mut self, start: usize, end: usize) {
+ match self {
+ Substitution::Format(ref mut fmt) => fmt.position = InnerSpan::new(start, end),
+ Substitution::Escape(ref mut pos) => *pos = (start, end),
+ }
+ }
+
+ /// Translate this substitution into an equivalent Rust formatting directive.
+ ///
+ /// This ignores cases where the substitution does not have an exact equivalent, or where
+ /// the substitution would be unnecessary.
+ pub fn translate(&self) -> Result<String, Option<String>> {
+ match *self {
+ Substitution::Format(ref fmt) => fmt.translate(),
+ Substitution::Escape(_) => Err(None),
+ }
+ }
+ }
+
+ #[derive(Clone, PartialEq, Debug)]
+ /// A single `printf`-style formatting directive.
+ pub struct Format<'a> {
+ /// The entire original formatting directive.
+ pub span: &'a str,
+ /// The (1-based) parameter to be converted.
+ pub parameter: Option<u16>,
+ /// Formatting flags.
+ pub flags: &'a str,
+ /// Minimum width of the output.
+ pub width: Option<Num>,
+ /// Precision of the conversion.
+ pub precision: Option<Num>,
+ /// Length modifier for the conversion.
+ pub length: Option<&'a str>,
+ /// Type of parameter being converted.
+ pub type_: &'a str,
+ /// Byte offset for the start and end of this formatting directive.
+ pub position: InnerSpan,
+ }
+
+ impl Format<'_> {
+ /// Translate this directive into an equivalent Rust formatting directive.
+ ///
+ /// Returns `Err` in cases where the `printf` directive does not have an exact Rust
+ /// equivalent, rather than guessing.
+ pub fn translate(&self) -> Result<String, Option<String>> {
+ use std::fmt::Write;
+
+ let (c_alt, c_zero, c_left, c_plus) = {
+ let mut c_alt = false;
+ let mut c_zero = false;
+ let mut c_left = false;
+ let mut c_plus = false;
+ for c in self.flags.chars() {
+ match c {
+ '#' => c_alt = true,
+ '0' => c_zero = true,
+ '-' => c_left = true,
+ '+' => c_plus = true,
+ _ => {
+ return Err(Some(format!(
+ "the flag `{}` is unknown or unsupported",
+ c
+ )));
+ }
+ }
+ }
+ (c_alt, c_zero, c_left, c_plus)
+ };
+
+ // Has a special form in Rust for numbers.
+ let fill = c_zero.then_some("0");
+
+ let align = c_left.then_some("<");
+
+ // Rust doesn't have an equivalent to the `' '` flag.
+ let sign = c_plus.then_some("+");
+
+ // Not *quite* the same, depending on the type...
+ let alt = c_alt;
+
+ let width = match self.width {
+ Some(Num::Next) => {
+ // NOTE: Rust doesn't support this.
+ return Err(Some(
+ "you have to use a positional or named parameter for the width".to_string(),
+ ));
+ }
+ w @ Some(Num::Arg(_)) => w,
+ w @ Some(Num::Num(_)) => w,
+ None => None,
+ };
+
+ let precision = self.precision;
+
+ // NOTE: although length *can* have an effect, we can't duplicate the effect in Rust, so
+ // we just ignore it.
+
+ let (type_, use_zero_fill, is_int) = match self.type_ {
+ "d" | "i" | "u" => (None, true, true),
+ "f" | "F" => (None, false, false),
+ "s" | "c" => (None, false, false),
+ "e" | "E" => (Some(self.type_), true, false),
+ "x" | "X" | "o" => (Some(self.type_), true, true),
+ "p" => (Some(self.type_), false, true),
+ "g" => (Some("e"), true, false),
+ "G" => (Some("E"), true, false),
+ _ => {
+ return Err(Some(format!(
+ "the conversion specifier `{}` is unknown or unsupported",
+ self.type_
+ )));
+ }
+ };
+
+ let (fill, width, precision) = match (is_int, width, precision) {
+ (true, Some(_), Some(_)) => {
+ // Rust can't duplicate this insanity.
+ return Err(Some(
+ "width and precision cannot both be specified for integer conversions"
+ .to_string(),
+ ));
+ }
+ (true, None, Some(p)) => (Some("0"), Some(p), None),
+ (true, w, None) => (fill, w, None),
+ (false, w, p) => (fill, w, p),
+ };
+
+ let align = match (self.type_, width.is_some(), align.is_some()) {
+ ("s", true, false) => Some(">"),
+ _ => align,
+ };
+
+ let (fill, zero_fill) = match (fill, use_zero_fill) {
+ (Some("0"), true) => (None, true),
+ (fill, _) => (fill, false),
+ };
+
+ let alt = match type_ {
+ Some("x" | "X") => alt,
+ _ => false,
+ };
+
+ let has_options = fill.is_some()
+ || align.is_some()
+ || sign.is_some()
+ || alt
+ || zero_fill
+ || width.is_some()
+ || precision.is_some()
+ || type_.is_some();
+
+ // Initialise with a rough guess.
+ let cap = self.span.len() + if has_options { 2 } else { 0 };
+ let mut s = String::with_capacity(cap);
+
+ s.push('{');
+
+ if let Some(arg) = self.parameter {
+ match write!(
+ s,
+ "{}",
+ match arg.checked_sub(1) {
+ Some(a) => a,
+ None => return Err(None),
+ }
+ ) {
+ Err(_) => return Err(None),
+ _ => {}
+ }
+ }
+
+ if has_options {
+ s.push(':');
+
+ let align = if let Some(fill) = fill {
+ s.push_str(fill);
+ align.or(Some(">"))
+ } else {
+ align
+ };
+
+ if let Some(align) = align {
+ s.push_str(align);
+ }
+
+ if let Some(sign) = sign {
+ s.push_str(sign);
+ }
+
+ if alt {
+ s.push('#');
+ }
+
+ if zero_fill {
+ s.push('0');
+ }
+
+ if let Some(width) = width {
+ match width.translate(&mut s) {
+ Err(_) => return Err(None),
+ _ => {}
+ }
+ }
+
+ if let Some(precision) = precision {
+ s.push('.');
+ match precision.translate(&mut s) {
+ Err(_) => return Err(None),
+ _ => {}
+ }
+ }
+
+ if let Some(type_) = type_ {
+ s.push_str(type_);
+ }
+ }
+
+ s.push('}');
+ Ok(s)
+ }
+ }
+
+ /// A general number used in a `printf` formatting directive.
+ #[derive(Copy, Clone, PartialEq, Debug)]
+ pub enum Num {
+ // The range of these values is technically bounded by `NL_ARGMAX`... but, at least for GNU
+ // libc, it apparently has no real fixed limit. A `u16` is used here on the basis that it
+ // is *vanishingly* unlikely that *anyone* is going to try formatting something wider, or
+ // with more precision, than 32 thousand positions which is so wide it couldn't possibly fit
+ // on a screen.
+ /// A specific, fixed value.
+ Num(u16),
+ /// The value is derived from a positional argument.
+ Arg(u16),
+ /// The value is derived from the "next" unconverted argument.
+ Next,
+ }
+
+ impl Num {
+ fn from_str(s: &str, arg: Option<&str>) -> Self {
+ if let Some(arg) = arg {
+ Num::Arg(arg.parse().unwrap_or_else(|_| panic!("invalid format arg `{:?}`", arg)))
+ } else if s == "*" {
+ Num::Next
+ } else {
+ Num::Num(s.parse().unwrap_or_else(|_| panic!("invalid format num `{:?}`", s)))
+ }
+ }
+
+ fn translate(&self, s: &mut String) -> std::fmt::Result {
+ use std::fmt::Write;
+ match *self {
+ Num::Num(n) => write!(s, "{}", n),
+ Num::Arg(n) => {
+ let n = n.checked_sub(1).ok_or(std::fmt::Error)?;
+ write!(s, "{}$", n)
+ }
+ Num::Next => write!(s, "*"),
+ }
+ }
+ }
+
+ /// Returns an iterator over all substitutions in a given string.
+ pub fn iter_subs(s: &str, start_pos: usize) -> Substitutions<'_> {
+ Substitutions { s, pos: start_pos }
+ }
+
+ /// Iterator over substitutions in a string.
+ pub struct Substitutions<'a> {
+ s: &'a str,
+ pos: usize,
+ }
+
+ impl<'a> Iterator for Substitutions<'a> {
+ type Item = Substitution<'a>;
+ fn next(&mut self) -> Option<Self::Item> {
+ let (mut sub, tail) = parse_next_substitution(self.s)?;
+ self.s = tail;
+ if let Some(InnerSpan { start, end }) = sub.position() {
+ sub.set_position(start + self.pos, end + self.pos);
+ self.pos += end;
+ }
+ Some(sub)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // Substitutions are at least 2 characters long.
+ (0, Some(self.s.len() / 2))
+ }
+ }
+
+ enum State {
+ Start,
+ Flags,
+ Width,
+ WidthArg,
+ Prec,
+ PrecInner,
+ Length,
+ Type,
+ }
+
+ /// Parse the next substitution from the input string.
+ pub fn parse_next_substitution(s: &str) -> Option<(Substitution<'_>, &str)> {
+ use self::State::*;
+
+ let at = {
+ let start = s.find('%')?;
+ if let '%' = s[start + 1..].chars().next()? {
+ return Some((Substitution::Escape((start, start + 2)), &s[start + 2..]));
+ }
+
+ Cur::new_at(s, start)
+ };
+
+ // This is meant to be a translation of the following regex:
+ //
+ // ```regex
+ // (?x)
+ // ^ %
+ // (?: (?P<parameter> \d+) \$ )?
+ // (?P<flags> [-+ 0\#']* )
+ // (?P<width> \d+ | \* (?: (?P<widtha> \d+) \$ )? )?
+ // (?: \. (?P<precision> \d+ | \* (?: (?P<precisiona> \d+) \$ )? ) )?
+ // (?P<length>
+ // # Standard
+ // hh | h | ll | l | L | z | j | t
+ //
+ // # Other
+ // | I32 | I64 | I | q
+ // )?
+ // (?P<type> . )
+ // ```
+
+ // Used to establish the full span at the end.
+ let start = at;
+ // The current position within the string.
+ let mut at = at.at_next_cp()?;
+ // `c` is the next codepoint, `next` is a cursor after it.
+ let (mut c, mut next) = at.next_cp()?;
+
+ // Update `at`, `c`, and `next`, exiting if we're out of input.
+ macro_rules! move_to {
+ ($cur:expr) => {{
+ at = $cur;
+ let (c_, next_) = at.next_cp()?;
+ c = c_;
+ next = next_;
+ }};
+ }
+
+ // Constructs a result when parsing fails.
+ //
+ // Note: `move` used to capture copies of the cursors as they are *now*.
+ let fallback = move || {
+ Some((
+ Substitution::Format(Format {
+ span: start.slice_between(next).unwrap(),
+ parameter: None,
+ flags: "",
+ width: None,
+ precision: None,
+ length: None,
+ type_: at.slice_between(next).unwrap(),
+ position: InnerSpan::new(start.at, next.at),
+ }),
+ next.slice_after(),
+ ))
+ };
+
+ // Next parsing state.
+ let mut state = Start;
+
+ // Sadly, Rust isn't *quite* smart enough to know these *must* be initialised by the end.
+ let mut parameter: Option<u16> = None;
+ let mut flags: &str = "";
+ let mut width: Option<Num> = None;
+ let mut precision: Option<Num> = None;
+ let mut length: Option<&str> = None;
+ let mut type_: &str = "";
+ let end: Cur<'_>;
+
+ if let Start = state {
+ match c {
+ '1'..='9' => {
+ let end = at_next_cp_while(next, char::is_ascii_digit);
+ match end.next_cp() {
+ // Yes, this *is* the parameter.
+ Some(('$', end2)) => {
+ state = Flags;
+ parameter = Some(at.slice_between(end).unwrap().parse().unwrap());
+ move_to!(end2);
+ }
+ // Wait, no, actually, it's the width.
+ Some(_) => {
+ state = Prec;
+ parameter = None;
+ flags = "";
+ width = Some(Num::from_str(at.slice_between(end).unwrap(), None));
+ move_to!(end);
+ }
+ // It's invalid, is what it is.
+ None => return fallback(),
+ }
+ }
+ _ => {
+ state = Flags;
+ parameter = None;
+ move_to!(at);
+ }
+ }
+ }
+
+ if let Flags = state {
+ let end = at_next_cp_while(at, is_flag);
+ state = Width;
+ flags = at.slice_between(end).unwrap();
+ move_to!(end);
+ }
+
+ if let Width = state {
+ match c {
+ '*' => {
+ state = WidthArg;
+ move_to!(next);
+ }
+ '1'..='9' => {
+ let end = at_next_cp_while(next, char::is_ascii_digit);
+ state = Prec;
+ width = Some(Num::from_str(at.slice_between(end).unwrap(), None));
+ move_to!(end);
+ }
+ _ => {
+ state = Prec;
+ width = None;
+ move_to!(at);
+ }
+ }
+ }
+
+ if let WidthArg = state {
+ let end = at_next_cp_while(at, char::is_ascii_digit);
+ match end.next_cp() {
+ Some(('$', end2)) => {
+ state = Prec;
+ width = Some(Num::from_str("", Some(at.slice_between(end).unwrap())));
+ move_to!(end2);
+ }
+ _ => {
+ state = Prec;
+ width = Some(Num::Next);
+ move_to!(end);
+ }
+ }
+ }
+
+ if let Prec = state {
+ match c {
+ '.' => {
+ state = PrecInner;
+ move_to!(next);
+ }
+ _ => {
+ state = Length;
+ precision = None;
+ move_to!(at);
+ }
+ }
+ }
+
+ if let PrecInner = state {
+ match c {
+ '*' => {
+ let end = at_next_cp_while(next, char::is_ascii_digit);
+ match end.next_cp() {
+ Some(('$', end2)) => {
+ state = Length;
+ precision = Some(Num::from_str("*", next.slice_between(end)));
+ move_to!(end2);
+ }
+ _ => {
+ state = Length;
+ precision = Some(Num::Next);
+ move_to!(end);
+ }
+ }
+ }
+ '0'..='9' => {
+ let end = at_next_cp_while(next, char::is_ascii_digit);
+ state = Length;
+ precision = Some(Num::from_str(at.slice_between(end).unwrap(), None));
+ move_to!(end);
+ }
+ _ => return fallback(),
+ }
+ }
+
+ if let Length = state {
+ let c1_next1 = next.next_cp();
+ match (c, c1_next1) {
+ ('h', Some(('h', next1))) | ('l', Some(('l', next1))) => {
+ state = Type;
+ length = Some(at.slice_between(next1).unwrap());
+ move_to!(next1);
+ }
+
+ ('h' | 'l' | 'L' | 'z' | 'j' | 't' | 'q', _) => {
+ state = Type;
+ length = Some(at.slice_between(next).unwrap());
+ move_to!(next);
+ }
+
+ ('I', _) => {
+ let end = next
+ .at_next_cp()
+ .and_then(|end| end.at_next_cp())
+ .map(|end| (next.slice_between(end).unwrap(), end));
+ let end = match end {
+ Some(("32" | "64", end)) => end,
+ _ => next,
+ };
+ state = Type;
+ length = Some(at.slice_between(end).unwrap());
+ move_to!(end);
+ }
+
+ _ => {
+ state = Type;
+ length = None;
+ move_to!(at);
+ }
+ }
+ }
+
+ if let Type = state {
+ drop(c);
+ type_ = at.slice_between(next).unwrap();
+
+ // Don't use `move_to!` here, as we *can* be at the end of the input.
+ at = next;
+ }
+
+ drop(c);
+ drop(next);
+
+ end = at;
+ let position = InnerSpan::new(start.at, end.at);
+
+ let f = Format {
+ span: start.slice_between(end).unwrap(),
+ parameter,
+ flags,
+ width,
+ precision,
+ length,
+ type_,
+ position,
+ };
+ Some((Substitution::Format(f), end.slice_after()))
+ }
+
+ fn at_next_cp_while<F>(mut cur: Cur<'_>, mut pred: F) -> Cur<'_>
+ where
+ F: FnMut(&char) -> bool,
+ {
+ loop {
+ match cur.next_cp() {
+ Some((c, next)) => {
+ if pred(&c) {
+ cur = next;
+ } else {
+ return cur;
+ }
+ }
+ None => return cur,
+ }
+ }
+ }
+
+ fn is_flag(c: &char) -> bool {
+ matches!(c, '0' | '-' | '+' | ' ' | '#' | '\'')
+ }
+
+ #[cfg(test)]
+ mod tests;
+}
+
+pub mod shell {
+ use super::strcursor::StrCursor as Cur;
+ use rustc_span::InnerSpan;
+
+ #[derive(Clone, PartialEq, Debug)]
+ pub enum Substitution<'a> {
+ Ordinal(u8, (usize, usize)),
+ Name(&'a str, (usize, usize)),
+ Escape((usize, usize)),
+ }
+
+ impl Substitution<'_> {
+ pub fn as_str(&self) -> String {
+ match self {
+ Substitution::Ordinal(n, _) => format!("${}", n),
+ Substitution::Name(n, _) => format!("${}", n),
+ Substitution::Escape(_) => "$$".into(),
+ }
+ }
+
+ pub fn position(&self) -> Option<InnerSpan> {
+ match self {
+ Substitution::Ordinal(_, pos)
+ | Substitution::Name(_, pos)
+ | Substitution::Escape(pos) => Some(InnerSpan::new(pos.0, pos.1)),
+ }
+ }
+
+ pub fn set_position(&mut self, start: usize, end: usize) {
+ match self {
+ Substitution::Ordinal(_, ref mut pos)
+ | Substitution::Name(_, ref mut pos)
+ | Substitution::Escape(ref mut pos) => *pos = (start, end),
+ }
+ }
+
+ pub fn translate(&self) -> Result<String, Option<String>> {
+ match *self {
+ Substitution::Ordinal(n, _) => Ok(format!("{{{}}}", n)),
+ Substitution::Name(n, _) => Ok(format!("{{{}}}", n)),
+ Substitution::Escape(_) => Err(None),
+ }
+ }
+ }
+
+ /// Returns an iterator over all substitutions in a given string.
+ pub fn iter_subs(s: &str, start_pos: usize) -> Substitutions<'_> {
+ Substitutions { s, pos: start_pos }
+ }
+
+ /// Iterator over substitutions in a string.
+ pub struct Substitutions<'a> {
+ s: &'a str,
+ pos: usize,
+ }
+
+ impl<'a> Iterator for Substitutions<'a> {
+ type Item = Substitution<'a>;
+ fn next(&mut self) -> Option<Self::Item> {
+ let (mut sub, tail) = parse_next_substitution(self.s)?;
+ self.s = tail;
+ if let Some(InnerSpan { start, end }) = sub.position() {
+ sub.set_position(start + self.pos, end + self.pos);
+ self.pos += end;
+ }
+ Some(sub)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.s.len()))
+ }
+ }
+
+ /// Parse the next substitution from the input string.
+ pub fn parse_next_substitution(s: &str) -> Option<(Substitution<'_>, &str)> {
+ let at = {
+ let start = s.find('$')?;
+ match s[start + 1..].chars().next()? {
+ '$' => return Some((Substitution::Escape((start, start + 2)), &s[start + 2..])),
+ c @ '0'..='9' => {
+ let n = (c as u8) - b'0';
+ return Some((Substitution::Ordinal(n, (start, start + 2)), &s[start + 2..]));
+ }
+ _ => { /* fall-through */ }
+ }
+
+ Cur::new_at(s, start)
+ };
+
+ let at = at.at_next_cp()?;
+ let (c, inner) = at.next_cp()?;
+
+ if !is_ident_head(c) {
+ None
+ } else {
+ let end = at_next_cp_while(inner, is_ident_tail);
+ let slice = at.slice_between(end).unwrap();
+ let start = at.at - 1;
+ let end_pos = at.at + slice.len();
+ Some((Substitution::Name(slice, (start, end_pos)), end.slice_after()))
+ }
+ }
+
+ fn at_next_cp_while<F>(mut cur: Cur<'_>, mut pred: F) -> Cur<'_>
+ where
+ F: FnMut(char) -> bool,
+ {
+ loop {
+ match cur.next_cp() {
+ Some((c, next)) => {
+ if pred(c) {
+ cur = next;
+ } else {
+ return cur;
+ }
+ }
+ None => return cur,
+ }
+ }
+ }
+
+ fn is_ident_head(c: char) -> bool {
+ c.is_ascii_alphabetic() || c == '_'
+ }
+
+ fn is_ident_tail(c: char) -> bool {
+ c.is_ascii_alphanumeric() || c == '_'
+ }
+
+ #[cfg(test)]
+ mod tests;
+}
+
+mod strcursor {
+ pub struct StrCursor<'a> {
+ s: &'a str,
+ pub at: usize,
+ }
+
+ impl<'a> StrCursor<'a> {
+ pub fn new_at(s: &'a str, at: usize) -> StrCursor<'a> {
+ StrCursor { s, at }
+ }
+
+ pub fn at_next_cp(mut self) -> Option<StrCursor<'a>> {
+ match self.try_seek_right_cp() {
+ true => Some(self),
+ false => None,
+ }
+ }
+
+ pub fn next_cp(mut self) -> Option<(char, StrCursor<'a>)> {
+ let cp = self.cp_after()?;
+ self.seek_right(cp.len_utf8());
+ Some((cp, self))
+ }
+
+ fn slice_before(&self) -> &'a str {
+ &self.s[0..self.at]
+ }
+
+ pub fn slice_after(&self) -> &'a str {
+ &self.s[self.at..]
+ }
+
+ pub fn slice_between(&self, until: StrCursor<'a>) -> Option<&'a str> {
+ if !str_eq_literal(self.s, until.s) {
+ None
+ } else {
+ use std::cmp::{max, min};
+ let beg = min(self.at, until.at);
+ let end = max(self.at, until.at);
+ Some(&self.s[beg..end])
+ }
+ }
+
+ fn cp_after(&self) -> Option<char> {
+ self.slice_after().chars().next()
+ }
+
+ fn try_seek_right_cp(&mut self) -> bool {
+ match self.slice_after().chars().next() {
+ Some(c) => {
+ self.at += c.len_utf8();
+ true
+ }
+ None => false,
+ }
+ }
+
+ fn seek_right(&mut self, bytes: usize) {
+ self.at += bytes;
+ }
+ }
+
+ impl Copy for StrCursor<'_> {}
+
+ impl<'a> Clone for StrCursor<'a> {
+ fn clone(&self) -> StrCursor<'a> {
+ *self
+ }
+ }
+
+ impl std::fmt::Debug for StrCursor<'_> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(fmt, "StrCursor({:?} | {:?})", self.slice_before(), self.slice_after())
+ }
+ }
+
+ fn str_eq_literal(a: &str, b: &str) -> bool {
+ a.as_bytes().as_ptr() == b.as_bytes().as_ptr() && a.len() == b.len()
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/format_foreign/printf/tests.rs b/compiler/rustc_builtin_macros/src/format_foreign/printf/tests.rs
new file mode 100644
index 000000000..fc7442470
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/format_foreign/printf/tests.rs
@@ -0,0 +1,145 @@
+use super::{iter_subs, parse_next_substitution as pns, Format as F, Num as N, Substitution as S};
+
+macro_rules! assert_eq_pnsat {
+ ($lhs:expr, $rhs:expr) => {
+ assert_eq!(
+ pns($lhs).and_then(|(s, _)| s.translate().ok()),
+ $rhs.map(<String as From<&str>>::from)
+ )
+ };
+}
+
+#[test]
+fn test_escape() {
+ assert_eq!(pns("has no escapes"), None);
+ assert_eq!(pns("has no escapes, either %"), None);
+ assert_eq!(pns("*so* has a %% escape"), Some((S::Escape((11, 13)), " escape")));
+ assert_eq!(pns("%% leading escape"), Some((S::Escape((0, 2)), " leading escape")));
+ assert_eq!(pns("trailing escape %%"), Some((S::Escape((16, 18)), "")));
+}
+
+#[test]
+fn test_parse() {
+ macro_rules! assert_pns_eq_sub {
+ ($in_:expr, {
+ $param:expr, $flags:expr,
+ $width:expr, $prec:expr, $len:expr, $type_:expr,
+ $pos:expr,
+ }) => {
+ assert_eq!(
+ pns(concat!($in_, "!")),
+ Some((
+ S::Format(F {
+ span: $in_,
+ parameter: $param,
+ flags: $flags,
+ width: $width,
+ precision: $prec,
+ length: $len,
+ type_: $type_,
+ position: rustc_span::InnerSpan::new($pos.0, $pos.1),
+ }),
+ "!"
+ ))
+ )
+ };
+ }
+
+ assert_pns_eq_sub!("%!",
+ { None, "", None, None, None, "!", (0, 2), });
+ assert_pns_eq_sub!("%c",
+ { None, "", None, None, None, "c", (0, 2), });
+ assert_pns_eq_sub!("%s",
+ { None, "", None, None, None, "s", (0, 2), });
+ assert_pns_eq_sub!("%06d",
+ { None, "0", Some(N::Num(6)), None, None, "d", (0, 4), });
+ assert_pns_eq_sub!("%4.2f",
+ { None, "", Some(N::Num(4)), Some(N::Num(2)), None, "f", (0, 5), });
+ assert_pns_eq_sub!("%#x",
+ { None, "#", None, None, None, "x", (0, 3), });
+ assert_pns_eq_sub!("%-10s",
+ { None, "-", Some(N::Num(10)), None, None, "s", (0, 5), });
+ assert_pns_eq_sub!("%*s",
+ { None, "", Some(N::Next), None, None, "s", (0, 3), });
+ assert_pns_eq_sub!("%-10.*s",
+ { None, "-", Some(N::Num(10)), Some(N::Next), None, "s", (0, 7), });
+ assert_pns_eq_sub!("%-*.*s",
+ { None, "-", Some(N::Next), Some(N::Next), None, "s", (0, 6), });
+ assert_pns_eq_sub!("%.6i",
+ { None, "", None, Some(N::Num(6)), None, "i", (0, 4), });
+ assert_pns_eq_sub!("%+i",
+ { None, "+", None, None, None, "i", (0, 3), });
+ assert_pns_eq_sub!("%08X",
+ { None, "0", Some(N::Num(8)), None, None, "X", (0, 4), });
+ assert_pns_eq_sub!("%lu",
+ { None, "", None, None, Some("l"), "u", (0, 3), });
+ assert_pns_eq_sub!("%Iu",
+ { None, "", None, None, Some("I"), "u", (0, 3), });
+ assert_pns_eq_sub!("%I32u",
+ { None, "", None, None, Some("I32"), "u", (0, 5), });
+ assert_pns_eq_sub!("%I64u",
+ { None, "", None, None, Some("I64"), "u", (0, 5), });
+ assert_pns_eq_sub!("%'d",
+ { None, "'", None, None, None, "d", (0, 3), });
+ assert_pns_eq_sub!("%10s",
+ { None, "", Some(N::Num(10)), None, None, "s", (0, 4), });
+ assert_pns_eq_sub!("%-10.10s",
+ { None, "-", Some(N::Num(10)), Some(N::Num(10)), None, "s", (0, 8), });
+ assert_pns_eq_sub!("%1$d",
+ { Some(1), "", None, None, None, "d", (0, 4), });
+ assert_pns_eq_sub!("%2$.*3$d",
+ { Some(2), "", None, Some(N::Arg(3)), None, "d", (0, 8), });
+ assert_pns_eq_sub!("%1$*2$.*3$d",
+ { Some(1), "", Some(N::Arg(2)), Some(N::Arg(3)), None, "d", (0, 11), });
+ assert_pns_eq_sub!("%-8ld",
+ { None, "-", Some(N::Num(8)), None, Some("l"), "d", (0, 5), });
+}
+
+#[test]
+fn test_iter() {
+ let s = "The %d'th word %% is: `%.*s` %!\n";
+ let subs: Vec<_> = iter_subs(s, 0).map(|sub| sub.translate().ok()).collect();
+ assert_eq!(
+ subs.iter().map(|ms| ms.as_ref().map(|s| &s[..])).collect::<Vec<_>>(),
+ vec![Some("{}"), None, Some("{:.*}"), None]
+ );
+}
+
+/// Checks that the translations are what we expect.
+#[test]
+fn test_translation() {
+ assert_eq_pnsat!("%c", Some("{}"));
+ assert_eq_pnsat!("%d", Some("{}"));
+ assert_eq_pnsat!("%u", Some("{}"));
+ assert_eq_pnsat!("%x", Some("{:x}"));
+ assert_eq_pnsat!("%X", Some("{:X}"));
+ assert_eq_pnsat!("%e", Some("{:e}"));
+ assert_eq_pnsat!("%E", Some("{:E}"));
+ assert_eq_pnsat!("%f", Some("{}"));
+ assert_eq_pnsat!("%g", Some("{:e}"));
+ assert_eq_pnsat!("%G", Some("{:E}"));
+ assert_eq_pnsat!("%s", Some("{}"));
+ assert_eq_pnsat!("%p", Some("{:p}"));
+
+ assert_eq_pnsat!("%06d", Some("{:06}"));
+ assert_eq_pnsat!("%4.2f", Some("{:4.2}"));
+ assert_eq_pnsat!("%#x", Some("{:#x}"));
+ assert_eq_pnsat!("%-10s", Some("{:<10}"));
+ assert_eq_pnsat!("%*s", None);
+ assert_eq_pnsat!("%-10.*s", Some("{:<10.*}"));
+ assert_eq_pnsat!("%-*.*s", None);
+ assert_eq_pnsat!("%.6i", Some("{:06}"));
+ assert_eq_pnsat!("%+i", Some("{:+}"));
+ assert_eq_pnsat!("%08X", Some("{:08X}"));
+ assert_eq_pnsat!("%lu", Some("{}"));
+ assert_eq_pnsat!("%Iu", Some("{}"));
+ assert_eq_pnsat!("%I32u", Some("{}"));
+ assert_eq_pnsat!("%I64u", Some("{}"));
+ assert_eq_pnsat!("%'d", None);
+ assert_eq_pnsat!("%10s", Some("{:>10}"));
+ assert_eq_pnsat!("%-10.10s", Some("{:<10.10}"));
+ assert_eq_pnsat!("%1$d", Some("{0}"));
+ assert_eq_pnsat!("%2$.*3$d", Some("{1:02$}"));
+ assert_eq_pnsat!("%1$*2$.*3$s", Some("{0:>1$.2$}"));
+ assert_eq_pnsat!("%-8ld", Some("{:<8}"));
+}
diff --git a/compiler/rustc_builtin_macros/src/format_foreign/shell/tests.rs b/compiler/rustc_builtin_macros/src/format_foreign/shell/tests.rs
new file mode 100644
index 000000000..f5f82732f
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/format_foreign/shell/tests.rs
@@ -0,0 +1,56 @@
+use super::{parse_next_substitution as pns, Substitution as S};
+
+macro_rules! assert_eq_pnsat {
+ ($lhs:expr, $rhs:expr) => {
+ assert_eq!(
+ pns($lhs).and_then(|(f, _)| f.translate().ok()),
+ $rhs.map(<String as From<&str>>::from)
+ )
+ };
+}
+
+#[test]
+fn test_escape() {
+ assert_eq!(pns("has no escapes"), None);
+ assert_eq!(pns("has no escapes, either $"), None);
+ assert_eq!(pns("*so* has a $$ escape"), Some((S::Escape((11, 13)), " escape")));
+ assert_eq!(pns("$$ leading escape"), Some((S::Escape((0, 2)), " leading escape")));
+ assert_eq!(pns("trailing escape $$"), Some((S::Escape((16, 18)), "")));
+}
+
+#[test]
+fn test_parse() {
+ macro_rules! assert_pns_eq_sub {
+ ($in_:expr, $kind:ident($arg:expr, $pos:expr)) => {
+ assert_eq!(pns(concat!($in_, "!")), Some((S::$kind($arg.into(), $pos), "!")))
+ };
+ }
+
+ assert_pns_eq_sub!("$0", Ordinal(0, (0, 2)));
+ assert_pns_eq_sub!("$1", Ordinal(1, (0, 2)));
+ assert_pns_eq_sub!("$9", Ordinal(9, (0, 2)));
+ assert_pns_eq_sub!("$N", Name("N", (0, 2)));
+ assert_pns_eq_sub!("$NAME", Name("NAME", (0, 5)));
+}
+
+#[test]
+fn test_iter() {
+ use super::iter_subs;
+ let s = "The $0'th word $$ is: `$WORD` $!\n";
+ let subs: Vec<_> = iter_subs(s, 0).map(|sub| sub.translate().ok()).collect();
+ assert_eq!(
+ subs.iter().map(|ms| ms.as_ref().map(|s| &s[..])).collect::<Vec<_>>(),
+ vec![Some("{0}"), None, Some("{WORD}")]
+ );
+}
+
+#[test]
+fn test_translation() {
+ assert_eq_pnsat!("$0", Some("{0}"));
+ assert_eq_pnsat!("$9", Some("{9}"));
+ assert_eq_pnsat!("$1", Some("{1}"));
+ assert_eq_pnsat!("$10", Some("{1}"));
+ assert_eq_pnsat!("$stuff", Some("{stuff}"));
+ assert_eq_pnsat!("$NAME", Some("{NAME}"));
+ assert_eq_pnsat!("$PREFIX/bin", Some("{PREFIX}"));
+}
diff --git a/compiler/rustc_builtin_macros/src/global_allocator.rs b/compiler/rustc_builtin_macros/src/global_allocator.rs
new file mode 100644
index 000000000..36cfbba45
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/global_allocator.rs
@@ -0,0 +1,194 @@
+use crate::util::check_builtin_macro_attribute;
+
+use rustc_ast::expand::allocator::{
+ AllocatorKind, AllocatorMethod, AllocatorTy, ALLOCATOR_METHODS,
+};
+use rustc_ast::ptr::P;
+use rustc_ast::{self as ast, Attribute, Expr, FnHeader, FnSig, Generics, Param, StmtKind};
+use rustc_ast::{Fn, ItemKind, Mutability, Stmt, Ty, TyKind, Unsafe};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::Span;
+
+pub fn expand(
+ ecx: &mut ExtCtxt<'_>,
+ _span: Span,
+ meta_item: &ast::MetaItem,
+ item: Annotatable,
+) -> Vec<Annotatable> {
+ check_builtin_macro_attribute(ecx, meta_item, sym::global_allocator);
+
+ let orig_item = item.clone();
+ let not_static = || {
+ ecx.sess.parse_sess.span_diagnostic.span_err(item.span(), "allocators must be statics");
+ vec![orig_item.clone()]
+ };
+
+ // Allow using `#[global_allocator]` on an item statement
+ // FIXME - if we get deref patterns, use them to reduce duplication here
+ let (item, is_stmt, ty_span) = match &item {
+ Annotatable::Item(item) => match item.kind {
+ ItemKind::Static(ref ty, ..) => (item, false, ecx.with_def_site_ctxt(ty.span)),
+ _ => return not_static(),
+ },
+ Annotatable::Stmt(stmt) => match &stmt.kind {
+ StmtKind::Item(item_) => match item_.kind {
+ ItemKind::Static(ref ty, ..) => (item_, true, ecx.with_def_site_ctxt(ty.span)),
+ _ => return not_static(),
+ },
+ _ => return not_static(),
+ },
+ _ => return not_static(),
+ };
+
+ // Generate a bunch of new items using the AllocFnFactory
+ let span = ecx.with_def_site_ctxt(item.span);
+ let f =
+ AllocFnFactory { span, ty_span, kind: AllocatorKind::Global, global: item.ident, cx: ecx };
+
+ // Generate item statements for the allocator methods.
+ let stmts = ALLOCATOR_METHODS.iter().map(|method| f.allocator_fn(method)).collect();
+
+ // Generate anonymous constant serving as container for the allocator methods.
+ let const_ty = ecx.ty(ty_span, TyKind::Tup(Vec::new()));
+ let const_body = ecx.expr_block(ecx.block(span, stmts));
+ let const_item = ecx.item_const(span, Ident::new(kw::Underscore, span), const_ty, const_body);
+ let const_item = if is_stmt {
+ Annotatable::Stmt(P(ecx.stmt_item(span, const_item)))
+ } else {
+ Annotatable::Item(const_item)
+ };
+
+ // Return the original item and the new methods.
+ vec![orig_item, const_item]
+}
+
+struct AllocFnFactory<'a, 'b> {
+ span: Span,
+ ty_span: Span,
+ kind: AllocatorKind,
+ global: Ident,
+ cx: &'b ExtCtxt<'a>,
+}
+
+impl AllocFnFactory<'_, '_> {
+ fn allocator_fn(&self, method: &AllocatorMethod) -> Stmt {
+ let mut abi_args = Vec::new();
+ let mut i = 0;
+ let mut mk = || {
+ let name = Ident::from_str_and_span(&format!("arg{}", i), self.span);
+ i += 1;
+ name
+ };
+ let args = method.inputs.iter().map(|ty| self.arg_ty(ty, &mut abi_args, &mut mk)).collect();
+ let result = self.call_allocator(method.name, args);
+ let (output_ty, output_expr) = self.ret_ty(&method.output, result);
+ let decl = self.cx.fn_decl(abi_args, ast::FnRetTy::Ty(output_ty));
+ let header = FnHeader { unsafety: Unsafe::Yes(self.span), ..FnHeader::default() };
+ let sig = FnSig { decl, header, span: self.span };
+ let body = Some(self.cx.block_expr(output_expr));
+ let kind = ItemKind::Fn(Box::new(Fn {
+ defaultness: ast::Defaultness::Final,
+ sig,
+ generics: Generics::default(),
+ body,
+ }));
+ let item = self.cx.item(
+ self.span,
+ Ident::from_str_and_span(&self.kind.fn_name(method.name), self.span),
+ self.attrs(),
+ kind,
+ );
+ self.cx.stmt_item(self.ty_span, item)
+ }
+
+ fn call_allocator(&self, method: Symbol, mut args: Vec<P<Expr>>) -> P<Expr> {
+ let method = self.cx.std_path(&[sym::alloc, sym::GlobalAlloc, method]);
+ let method = self.cx.expr_path(self.cx.path(self.ty_span, method));
+ let allocator = self.cx.path_ident(self.ty_span, self.global);
+ let allocator = self.cx.expr_path(allocator);
+ let allocator = self.cx.expr_addr_of(self.ty_span, allocator);
+ args.insert(0, allocator);
+
+ self.cx.expr_call(self.ty_span, method, args)
+ }
+
+ fn attrs(&self) -> Vec<Attribute> {
+ let special = sym::rustc_std_internal_symbol;
+ let special = self.cx.meta_word(self.span, special);
+ vec![self.cx.attribute(special)]
+ }
+
+ fn arg_ty(
+ &self,
+ ty: &AllocatorTy,
+ args: &mut Vec<Param>,
+ ident: &mut dyn FnMut() -> Ident,
+ ) -> P<Expr> {
+ match *ty {
+ AllocatorTy::Layout => {
+ let usize = self.cx.path_ident(self.span, Ident::new(sym::usize, self.span));
+ let ty_usize = self.cx.ty_path(usize);
+ let size = ident();
+ let align = ident();
+ args.push(self.cx.param(self.span, size, ty_usize.clone()));
+ args.push(self.cx.param(self.span, align, ty_usize));
+
+ let layout_new =
+ self.cx.std_path(&[sym::alloc, sym::Layout, sym::from_size_align_unchecked]);
+ let layout_new = self.cx.expr_path(self.cx.path(self.span, layout_new));
+ let size = self.cx.expr_ident(self.span, size);
+ let align = self.cx.expr_ident(self.span, align);
+ let layout = self.cx.expr_call(self.span, layout_new, vec![size, align]);
+ layout
+ }
+
+ AllocatorTy::Ptr => {
+ let ident = ident();
+ args.push(self.cx.param(self.span, ident, self.ptr_u8()));
+ let arg = self.cx.expr_ident(self.span, ident);
+ self.cx.expr_cast(self.span, arg, self.ptr_u8())
+ }
+
+ AllocatorTy::Usize => {
+ let ident = ident();
+ args.push(self.cx.param(self.span, ident, self.usize()));
+ self.cx.expr_ident(self.span, ident)
+ }
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => {
+ panic!("can't convert AllocatorTy to an argument")
+ }
+ }
+ }
+
+ fn ret_ty(&self, ty: &AllocatorTy, expr: P<Expr>) -> (P<Ty>, P<Expr>) {
+ match *ty {
+ AllocatorTy::ResultPtr => {
+ // We're creating:
+ //
+ // #expr as *mut u8
+
+ let expr = self.cx.expr_cast(self.span, expr, self.ptr_u8());
+ (self.ptr_u8(), expr)
+ }
+
+ AllocatorTy::Unit => (self.cx.ty(self.span, TyKind::Tup(Vec::new())), expr),
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("can't convert `AllocatorTy` to an output")
+ }
+ }
+ }
+
+ fn usize(&self) -> P<Ty> {
+ let usize = self.cx.path_ident(self.span, Ident::new(sym::usize, self.span));
+ self.cx.ty_path(usize)
+ }
+
+ fn ptr_u8(&self) -> P<Ty> {
+ let u8 = self.cx.path_ident(self.span, Ident::new(sym::u8, self.span));
+ let ty_u8 = self.cx.ty_path(u8);
+ self.cx.ty_ptr(self.span, ty_u8, Mutability::Mut)
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/lib.rs b/compiler/rustc_builtin_macros/src/lib.rs
new file mode 100644
index 000000000..11565ba72
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/lib.rs
@@ -0,0 +1,119 @@
+//! This crate contains implementations of built-in macros and other code generating facilities
+//! injecting code into the crate before it is lowered to HIR.
+
+#![allow(rustc::potential_query_instability)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(array_windows)]
+#![feature(box_patterns)]
+#![feature(decl_macro)]
+#![feature(if_let_guard)]
+#![feature(is_sorted)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(proc_macro_internals)]
+#![feature(proc_macro_quote)]
+#![recursion_limit = "256"]
+
+extern crate proc_macro;
+
+use crate::deriving::*;
+
+use rustc_expand::base::{MacroExpanderFn, ResolverExpand, SyntaxExtensionKind};
+use rustc_expand::proc_macro::BangProcMacro;
+use rustc_span::symbol::sym;
+
+mod assert;
+mod cfg;
+mod cfg_accessible;
+mod cfg_eval;
+mod compile_error;
+mod concat;
+mod concat_bytes;
+mod concat_idents;
+mod derive;
+mod deriving;
+mod edition_panic;
+mod env;
+mod format;
+mod format_foreign;
+mod global_allocator;
+mod log_syntax;
+mod source_util;
+mod test;
+mod trace_macros;
+mod util;
+
+pub mod asm;
+pub mod cmdline_attrs;
+pub mod proc_macro_harness;
+pub mod standard_library_imports;
+pub mod test_harness;
+
+pub fn register_builtin_macros(resolver: &mut dyn ResolverExpand) {
+ let mut register = |name, kind| resolver.register_builtin_macro(name, kind);
+ macro register_bang($($name:ident: $f:expr,)*) {
+ $(register(sym::$name, SyntaxExtensionKind::LegacyBang(Box::new($f as MacroExpanderFn)));)*
+ }
+ macro register_attr($($name:ident: $f:expr,)*) {
+ $(register(sym::$name, SyntaxExtensionKind::LegacyAttr(Box::new($f)));)*
+ }
+ macro register_derive($($name:ident: $f:expr,)*) {
+ $(register(sym::$name, SyntaxExtensionKind::LegacyDerive(Box::new(BuiltinDerive($f))));)*
+ }
+
+ register_bang! {
+ asm: asm::expand_asm,
+ assert: assert::expand_assert,
+ cfg: cfg::expand_cfg,
+ column: source_util::expand_column,
+ compile_error: compile_error::expand_compile_error,
+ concat_bytes: concat_bytes::expand_concat_bytes,
+ concat_idents: concat_idents::expand_concat_idents,
+ concat: concat::expand_concat,
+ env: env::expand_env,
+ file: source_util::expand_file,
+ format_args_nl: format::expand_format_args_nl,
+ format_args: format::expand_format_args,
+ const_format_args: format::expand_format_args,
+ global_asm: asm::expand_global_asm,
+ include_bytes: source_util::expand_include_bytes,
+ include_str: source_util::expand_include_str,
+ include: source_util::expand_include,
+ line: source_util::expand_line,
+ log_syntax: log_syntax::expand_log_syntax,
+ module_path: source_util::expand_mod,
+ option_env: env::expand_option_env,
+ core_panic: edition_panic::expand_panic,
+ std_panic: edition_panic::expand_panic,
+ unreachable: edition_panic::expand_unreachable,
+ stringify: source_util::expand_stringify,
+ trace_macros: trace_macros::expand_trace_macros,
+ }
+
+ register_attr! {
+ bench: test::expand_bench,
+ cfg_accessible: cfg_accessible::Expander,
+ cfg_eval: cfg_eval::expand,
+ derive: derive::Expander,
+ global_allocator: global_allocator::expand,
+ test: test::expand_test,
+ test_case: test::expand_test_case,
+ }
+
+ register_derive! {
+ Clone: clone::expand_deriving_clone,
+ Copy: bounds::expand_deriving_copy,
+ Debug: debug::expand_deriving_debug,
+ Default: default::expand_deriving_default,
+ Eq: eq::expand_deriving_eq,
+ Hash: hash::expand_deriving_hash,
+ Ord: ord::expand_deriving_ord,
+ PartialEq: partial_eq::expand_deriving_partial_eq,
+ PartialOrd: partial_ord::expand_deriving_partial_ord,
+ RustcDecodable: decodable::expand_deriving_rustc_decodable,
+ RustcEncodable: encodable::expand_deriving_rustc_encodable,
+ }
+
+ let client = proc_macro::bridge::client::Client::expand1(proc_macro::quote);
+ register(sym::quote, SyntaxExtensionKind::Bang(Box::new(BangProcMacro { client })));
+}
diff --git a/compiler/rustc_builtin_macros/src/log_syntax.rs b/compiler/rustc_builtin_macros/src/log_syntax.rs
new file mode 100644
index 000000000..ede34a761
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/log_syntax.rs
@@ -0,0 +1,14 @@
+use rustc_ast::tokenstream::TokenStream;
+use rustc_ast_pretty::pprust;
+use rustc_expand::base;
+
+pub fn expand_log_syntax<'cx>(
+ _cx: &'cx mut base::ExtCtxt<'_>,
+ sp: rustc_span::Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ println!("{}", pprust::tts_to_string(&tts));
+
+ // any so that `log_syntax` can be invoked as an expression and item.
+ base::DummyResult::any_valid(sp)
+}
diff --git a/compiler/rustc_builtin_macros/src/proc_macro_harness.rs b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
new file mode 100644
index 000000000..5cfda3349
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/proc_macro_harness.rs
@@ -0,0 +1,393 @@
+use std::mem;
+
+use rustc_ast::attr;
+use rustc_ast::ptr::P;
+use rustc_ast::visit::{self, Visitor};
+use rustc_ast::{self as ast, NodeId};
+use rustc_ast_pretty::pprust;
+use rustc_expand::base::{parse_macro_name_and_helper_attrs, ExtCtxt, ResolverExpand};
+use rustc_expand::expand::{AstFragment, ExpansionConfig};
+use rustc_session::Session;
+use rustc_span::hygiene::AstPass;
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use smallvec::smallvec;
+
+struct ProcMacroDerive {
+ id: NodeId,
+ trait_name: Symbol,
+ function_name: Ident,
+ span: Span,
+ attrs: Vec<Symbol>,
+}
+
+struct ProcMacroDef {
+ id: NodeId,
+ function_name: Ident,
+ span: Span,
+}
+
+enum ProcMacro {
+ Derive(ProcMacroDerive),
+ Attr(ProcMacroDef),
+ Bang(ProcMacroDef),
+}
+
+struct CollectProcMacros<'a> {
+ sess: &'a Session,
+ macros: Vec<ProcMacro>,
+ in_root: bool,
+ handler: &'a rustc_errors::Handler,
+ source_map: &'a SourceMap,
+ is_proc_macro_crate: bool,
+ is_test_crate: bool,
+}
+
+pub fn inject(
+ sess: &Session,
+ resolver: &mut dyn ResolverExpand,
+ mut krate: ast::Crate,
+ is_proc_macro_crate: bool,
+ has_proc_macro_decls: bool,
+ is_test_crate: bool,
+ handler: &rustc_errors::Handler,
+) -> ast::Crate {
+ let ecfg = ExpansionConfig::default("proc_macro".to_string());
+ let mut cx = ExtCtxt::new(sess, ecfg, resolver, None);
+
+ let mut collect = CollectProcMacros {
+ sess,
+ macros: Vec::new(),
+ in_root: true,
+ handler,
+ source_map: sess.source_map(),
+ is_proc_macro_crate,
+ is_test_crate,
+ };
+
+ if has_proc_macro_decls || is_proc_macro_crate {
+ visit::walk_crate(&mut collect, &krate);
+ }
+ let macros = collect.macros;
+
+ if !is_proc_macro_crate {
+ return krate;
+ }
+
+ if is_test_crate {
+ return krate;
+ }
+
+ let decls = mk_decls(&mut cx, &macros);
+ krate.items.push(decls);
+
+ krate
+}
+
+impl<'a> CollectProcMacros<'a> {
+ fn check_not_pub_in_root(&self, vis: &ast::Visibility, sp: Span) {
+ if self.is_proc_macro_crate && self.in_root && vis.kind.is_pub() {
+ self.handler.span_err(
+ sp,
+ "`proc-macro` crate types currently cannot export any items other \
+ than functions tagged with `#[proc_macro]`, `#[proc_macro_derive]`, \
+ or `#[proc_macro_attribute]`",
+ );
+ }
+ }
+
+ fn collect_custom_derive(&mut self, item: &'a ast::Item, attr: &'a ast::Attribute) {
+ let Some((trait_name, proc_attrs)) = parse_macro_name_and_helper_attrs(self.handler, attr, "derive") else {
+ return;
+ };
+
+ if self.in_root && item.vis.kind.is_pub() {
+ self.macros.push(ProcMacro::Derive(ProcMacroDerive {
+ id: item.id,
+ span: item.span,
+ trait_name,
+ function_name: item.ident,
+ attrs: proc_attrs,
+ }));
+ } else {
+ let msg = if !self.in_root {
+ "functions tagged with `#[proc_macro_derive]` must \
+ currently reside in the root of the crate"
+ } else {
+ "functions tagged with `#[proc_macro_derive]` must be `pub`"
+ };
+ self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
+ }
+ }
+
+ fn collect_attr_proc_macro(&mut self, item: &'a ast::Item) {
+ if self.in_root && item.vis.kind.is_pub() {
+ self.macros.push(ProcMacro::Attr(ProcMacroDef {
+ id: item.id,
+ span: item.span,
+ function_name: item.ident,
+ }));
+ } else {
+ let msg = if !self.in_root {
+ "functions tagged with `#[proc_macro_attribute]` must \
+ currently reside in the root of the crate"
+ } else {
+ "functions tagged with `#[proc_macro_attribute]` must be `pub`"
+ };
+ self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
+ }
+ }
+
+ fn collect_bang_proc_macro(&mut self, item: &'a ast::Item) {
+ if self.in_root && item.vis.kind.is_pub() {
+ self.macros.push(ProcMacro::Bang(ProcMacroDef {
+ id: item.id,
+ span: item.span,
+ function_name: item.ident,
+ }));
+ } else {
+ let msg = if !self.in_root {
+ "functions tagged with `#[proc_macro]` must \
+ currently reside in the root of the crate"
+ } else {
+ "functions tagged with `#[proc_macro]` must be `pub`"
+ };
+ self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
+ }
+ }
+}
+
+impl<'a> Visitor<'a> for CollectProcMacros<'a> {
+ fn visit_item(&mut self, item: &'a ast::Item) {
+ if let ast::ItemKind::MacroDef(..) = item.kind {
+ if self.is_proc_macro_crate && self.sess.contains_name(&item.attrs, sym::macro_export) {
+ let msg =
+ "cannot export macro_rules! macros from a `proc-macro` crate type currently";
+ self.handler.span_err(self.source_map.guess_head_span(item.span), msg);
+ }
+ }
+
+ // First up, make sure we're checking a bare function. If we're not then
+ // we're just not interested in this item.
+ //
+ // If we find one, try to locate a `#[proc_macro_derive]` attribute on it.
+ let is_fn = matches!(item.kind, ast::ItemKind::Fn(..));
+
+ let mut found_attr: Option<&'a ast::Attribute> = None;
+
+ for attr in &item.attrs {
+ if self.sess.is_proc_macro_attr(&attr) {
+ if let Some(prev_attr) = found_attr {
+ let prev_item = prev_attr.get_normal_item();
+ let item = attr.get_normal_item();
+ let path_str = pprust::path_to_string(&item.path);
+ let msg = if item.path.segments[0].ident.name
+ == prev_item.path.segments[0].ident.name
+ {
+ format!(
+ "only one `#[{}]` attribute is allowed on any given function",
+ path_str,
+ )
+ } else {
+ format!(
+ "`#[{}]` and `#[{}]` attributes cannot both be applied
+ to the same function",
+ path_str,
+ pprust::path_to_string(&prev_item.path),
+ )
+ };
+
+ self.handler
+ .struct_span_err(attr.span, &msg)
+ .span_label(prev_attr.span, "previous attribute here")
+ .emit();
+
+ return;
+ }
+
+ found_attr = Some(attr);
+ }
+ }
+
+ let Some(attr) = found_attr else {
+ self.check_not_pub_in_root(&item.vis, self.source_map.guess_head_span(item.span));
+ let prev_in_root = mem::replace(&mut self.in_root, false);
+ visit::walk_item(self, item);
+ self.in_root = prev_in_root;
+ return;
+ };
+
+ if !is_fn {
+ let msg = format!(
+ "the `#[{}]` attribute may only be used on bare functions",
+ pprust::path_to_string(&attr.get_normal_item().path),
+ );
+
+ self.handler.span_err(attr.span, &msg);
+ return;
+ }
+
+ if self.is_test_crate {
+ return;
+ }
+
+ if !self.is_proc_macro_crate {
+ let msg = format!(
+ "the `#[{}]` attribute is only usable with crates of the `proc-macro` crate type",
+ pprust::path_to_string(&attr.get_normal_item().path),
+ );
+
+ self.handler.span_err(attr.span, &msg);
+ return;
+ }
+
+ if attr.has_name(sym::proc_macro_derive) {
+ self.collect_custom_derive(item, attr);
+ } else if attr.has_name(sym::proc_macro_attribute) {
+ self.collect_attr_proc_macro(item);
+ } else if attr.has_name(sym::proc_macro) {
+ self.collect_bang_proc_macro(item);
+ };
+
+ let prev_in_root = mem::replace(&mut self.in_root, false);
+ visit::walk_item(self, item);
+ self.in_root = prev_in_root;
+ }
+}
+
+// Creates a new module which looks like:
+//
+// const _: () = {
+// extern crate proc_macro;
+//
+// use proc_macro::bridge::client::ProcMacro;
+//
+// #[rustc_proc_macro_decls]
+// #[allow(deprecated)]
+// static DECLS: &[ProcMacro] = &[
+// ProcMacro::custom_derive($name_trait1, &[], ::$name1);
+// ProcMacro::custom_derive($name_trait2, &["attribute_name"], ::$name2);
+// // ...
+// ];
+// }
+fn mk_decls(cx: &mut ExtCtxt<'_>, macros: &[ProcMacro]) -> P<ast::Item> {
+ let expn_id = cx.resolver.expansion_for_ast_pass(
+ DUMMY_SP,
+ AstPass::ProcMacroHarness,
+ &[sym::rustc_attrs, sym::proc_macro_internals],
+ None,
+ );
+ let span = DUMMY_SP.with_def_site_ctxt(expn_id.to_expn_id());
+
+ let proc_macro = Ident::new(sym::proc_macro, span);
+ let krate = cx.item(span, proc_macro, Vec::new(), ast::ItemKind::ExternCrate(None));
+
+ let bridge = Ident::new(sym::bridge, span);
+ let client = Ident::new(sym::client, span);
+ let proc_macro_ty = Ident::new(sym::ProcMacro, span);
+ let custom_derive = Ident::new(sym::custom_derive, span);
+ let attr = Ident::new(sym::attr, span);
+ let bang = Ident::new(sym::bang, span);
+
+ // We add NodeIds to 'resolver.proc_macros' in the order
+ // that we generate expressions. The position of each NodeId
+ // in the 'proc_macros' Vec corresponds to its position
+ // in the static array that will be generated
+ let decls = macros
+ .iter()
+ .map(|m| {
+ let harness_span = span;
+ let span = match m {
+ ProcMacro::Derive(m) => m.span,
+ ProcMacro::Attr(m) | ProcMacro::Bang(m) => m.span,
+ };
+ let local_path = |cx: &ExtCtxt<'_>, name| cx.expr_path(cx.path(span, vec![name]));
+ let proc_macro_ty_method_path = |cx: &ExtCtxt<'_>, method| {
+ cx.expr_path(cx.path(
+ span.with_ctxt(harness_span.ctxt()),
+ vec![proc_macro, bridge, client, proc_macro_ty, method],
+ ))
+ };
+ match m {
+ ProcMacro::Derive(cd) => {
+ cx.resolver.declare_proc_macro(cd.id);
+ cx.expr_call(
+ span,
+ proc_macro_ty_method_path(cx, custom_derive),
+ vec![
+ cx.expr_str(span, cd.trait_name),
+ cx.expr_array_ref(
+ span,
+ cd.attrs.iter().map(|&s| cx.expr_str(span, s)).collect::<Vec<_>>(),
+ ),
+ local_path(cx, cd.function_name),
+ ],
+ )
+ }
+ ProcMacro::Attr(ca) | ProcMacro::Bang(ca) => {
+ cx.resolver.declare_proc_macro(ca.id);
+ let ident = match m {
+ ProcMacro::Attr(_) => attr,
+ ProcMacro::Bang(_) => bang,
+ ProcMacro::Derive(_) => unreachable!(),
+ };
+
+ cx.expr_call(
+ span,
+ proc_macro_ty_method_path(cx, ident),
+ vec![
+ cx.expr_str(span, ca.function_name.name),
+ local_path(cx, ca.function_name),
+ ],
+ )
+ }
+ }
+ })
+ .collect();
+
+ let decls_static = cx
+ .item_static(
+ span,
+ Ident::new(sym::_DECLS, span),
+ cx.ty_rptr(
+ span,
+ cx.ty(
+ span,
+ ast::TyKind::Slice(
+ cx.ty_path(cx.path(span, vec![proc_macro, bridge, client, proc_macro_ty])),
+ ),
+ ),
+ None,
+ ast::Mutability::Not,
+ ),
+ ast::Mutability::Not,
+ cx.expr_array_ref(span, decls),
+ )
+ .map(|mut i| {
+ let attr = cx.meta_word(span, sym::rustc_proc_macro_decls);
+ i.attrs.push(cx.attribute(attr));
+
+ let deprecated_attr = attr::mk_nested_word_item(Ident::new(sym::deprecated, span));
+ let allow_deprecated_attr =
+ attr::mk_list_item(Ident::new(sym::allow, span), vec![deprecated_attr]);
+ i.attrs.push(cx.attribute(allow_deprecated_attr));
+
+ i
+ });
+
+ let block = cx.expr_block(
+ cx.block(span, vec![cx.stmt_item(span, krate), cx.stmt_item(span, decls_static)]),
+ );
+
+ let anon_constant = cx.item_const(
+ span,
+ Ident::new(kw::Underscore, span),
+ cx.ty(span, ast::TyKind::Tup(Vec::new())),
+ block,
+ );
+
+ // Integrate the new item into existing module structures.
+ let items = AstFragment::Items(smallvec![anon_constant]);
+ cx.monotonic_expander().fully_expand_fragment(items).make_items().pop().unwrap()
+}
diff --git a/compiler/rustc_builtin_macros/src/source_util.rs b/compiler/rustc_builtin_macros/src/source_util.rs
new file mode 100644
index 000000000..8bf3a0799
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/source_util.rs
@@ -0,0 +1,225 @@
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_ast::token;
+use rustc_ast::tokenstream::TokenStream;
+use rustc_ast_pretty::pprust;
+use rustc_expand::base::{self, *};
+use rustc_expand::module::DirOwnership;
+use rustc_parse::parser::{ForceCollect, Parser};
+use rustc_parse::{self, new_parser_from_file};
+use rustc_session::lint::builtin::INCOMPLETE_INCLUDE;
+use rustc_span::symbol::Symbol;
+use rustc_span::{self, Pos, Span};
+
+use smallvec::SmallVec;
+use std::rc::Rc;
+
+// These macros all relate to the file system; they either return
+// the column/row/filename of the expression, or they include
+// a given file into the current one.
+
+/// line!(): expands to the current line number
+pub fn expand_line(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let sp = cx.with_def_site_ctxt(sp);
+ base::check_zero_tts(cx, sp, tts, "line!");
+
+ let topmost = cx.expansion_cause().unwrap_or(sp);
+ let loc = cx.source_map().lookup_char_pos(topmost.lo());
+
+ base::MacEager::expr(cx.expr_u32(topmost, loc.line as u32))
+}
+
+/* column!(): expands to the current column number */
+pub fn expand_column(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let sp = cx.with_def_site_ctxt(sp);
+ base::check_zero_tts(cx, sp, tts, "column!");
+
+ let topmost = cx.expansion_cause().unwrap_or(sp);
+ let loc = cx.source_map().lookup_char_pos(topmost.lo());
+
+ base::MacEager::expr(cx.expr_u32(topmost, loc.col.to_usize() as u32 + 1))
+}
+
+/// file!(): expands to the current filename */
+/// The source_file (`loc.file`) contains a bunch more information we could spit
+/// out if we wanted.
+pub fn expand_file(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let sp = cx.with_def_site_ctxt(sp);
+ base::check_zero_tts(cx, sp, tts, "file!");
+
+ let topmost = cx.expansion_cause().unwrap_or(sp);
+ let loc = cx.source_map().lookup_char_pos(topmost.lo());
+ base::MacEager::expr(
+ cx.expr_str(topmost, Symbol::intern(&loc.file.name.prefer_remapped().to_string_lossy())),
+ )
+}
+
+pub fn expand_stringify(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let sp = cx.with_def_site_ctxt(sp);
+ let s = pprust::tts_to_string(&tts);
+ base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&s)))
+}
+
+pub fn expand_mod(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let sp = cx.with_def_site_ctxt(sp);
+ base::check_zero_tts(cx, sp, tts, "module_path!");
+ let mod_path = &cx.current_expansion.module.mod_path;
+ let string = mod_path.iter().map(|x| x.to_string()).collect::<Vec<String>>().join("::");
+
+ base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&string)))
+}
+
+/// include! : parse the given file as an expr
+/// This is generally a bad idea because it's going to behave
+/// unhygienically.
+pub fn expand_include<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'cx> {
+ let sp = cx.with_def_site_ctxt(sp);
+ let Some(file) = get_single_str_from_tts(cx, sp, tts, "include!") else {
+ return DummyResult::any(sp);
+ };
+ // The file will be added to the code map by the parser
+ let file = match resolve_path(&cx.sess.parse_sess, file.as_str(), sp) {
+ Ok(f) => f,
+ Err(mut err) => {
+ err.emit();
+ return DummyResult::any(sp);
+ }
+ };
+ let p = new_parser_from_file(cx.parse_sess(), &file, Some(sp));
+
+ // If in the included file we have e.g., `mod bar;`,
+ // then the path of `bar.rs` should be relative to the directory of `file`.
+ // See https://github.com/rust-lang/rust/pull/69838/files#r395217057 for a discussion.
+ // `MacroExpander::fully_expand_fragment` later restores, so "stack discipline" is maintained.
+ let dir_path = file.parent().unwrap_or(&file).to_owned();
+ cx.current_expansion.module = Rc::new(cx.current_expansion.module.with_dir_path(dir_path));
+ cx.current_expansion.dir_ownership = DirOwnership::Owned { relative: None };
+
+ struct ExpandResult<'a> {
+ p: Parser<'a>,
+ node_id: ast::NodeId,
+ }
+ impl<'a> base::MacResult for ExpandResult<'a> {
+ fn make_expr(mut self: Box<ExpandResult<'a>>) -> Option<P<ast::Expr>> {
+ let r = base::parse_expr(&mut self.p)?;
+ if self.p.token != token::Eof {
+ self.p.sess.buffer_lint(
+ &INCOMPLETE_INCLUDE,
+ self.p.token.span,
+ self.node_id,
+ "include macro expected single expression in source",
+ );
+ }
+ Some(r)
+ }
+
+ fn make_items(mut self: Box<ExpandResult<'a>>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
+ let mut ret = SmallVec::new();
+ loop {
+ match self.p.parse_item(ForceCollect::No) {
+ Err(mut err) => {
+ err.emit();
+ break;
+ }
+ Ok(Some(item)) => ret.push(item),
+ Ok(None) => {
+ if self.p.token != token::Eof {
+ let token = pprust::token_to_string(&self.p.token);
+ let msg = format!("expected item, found `{}`", token);
+ self.p.struct_span_err(self.p.token.span, &msg).emit();
+ }
+
+ break;
+ }
+ }
+ }
+ Some(ret)
+ }
+ }
+
+ Box::new(ExpandResult { p, node_id: cx.current_expansion.lint_node_id })
+}
+
+// include_str! : read the given file, insert it as a literal string expr
+pub fn expand_include_str(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let sp = cx.with_def_site_ctxt(sp);
+ let Some(file) = get_single_str_from_tts(cx, sp, tts, "include_str!") else {
+ return DummyResult::any(sp);
+ };
+ let file = match resolve_path(&cx.sess.parse_sess, file.as_str(), sp) {
+ Ok(f) => f,
+ Err(mut err) => {
+ err.emit();
+ return DummyResult::any(sp);
+ }
+ };
+ match cx.source_map().load_binary_file(&file) {
+ Ok(bytes) => match std::str::from_utf8(&bytes) {
+ Ok(src) => {
+ let interned_src = Symbol::intern(&src);
+ base::MacEager::expr(cx.expr_str(sp, interned_src))
+ }
+ Err(_) => {
+ cx.span_err(sp, &format!("{} wasn't a utf-8 file", file.display()));
+ DummyResult::any(sp)
+ }
+ },
+ Err(e) => {
+ cx.span_err(sp, &format!("couldn't read {}: {}", file.display(), e));
+ DummyResult::any(sp)
+ }
+ }
+}
+
+pub fn expand_include_bytes(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let sp = cx.with_def_site_ctxt(sp);
+ let Some(file) = get_single_str_from_tts(cx, sp, tts, "include_bytes!") else {
+ return DummyResult::any(sp);
+ };
+ let file = match resolve_path(&cx.sess.parse_sess, file.as_str(), sp) {
+ Ok(f) => f,
+ Err(mut err) => {
+ err.emit();
+ return DummyResult::any(sp);
+ }
+ };
+ match cx.source_map().load_binary_file(&file) {
+ Ok(bytes) => base::MacEager::expr(cx.expr_lit(sp, ast::LitKind::ByteStr(bytes.into()))),
+ Err(e) => {
+ cx.span_err(sp, &format!("couldn't read {}: {}", file.display(), e));
+ DummyResult::any(sp)
+ }
+ }
+}
diff --git a/compiler/rustc_builtin_macros/src/standard_library_imports.rs b/compiler/rustc_builtin_macros/src/standard_library_imports.rs
new file mode 100644
index 000000000..09ad5f9b3
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/standard_library_imports.rs
@@ -0,0 +1,92 @@
+use rustc_ast as ast;
+use rustc_expand::base::{ExtCtxt, ResolverExpand};
+use rustc_expand::expand::ExpansionConfig;
+use rustc_session::Session;
+use rustc_span::edition::Edition::*;
+use rustc_span::hygiene::AstPass;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::DUMMY_SP;
+
+pub fn inject(
+ mut krate: ast::Crate,
+ resolver: &mut dyn ResolverExpand,
+ sess: &Session,
+) -> ast::Crate {
+ let edition = sess.parse_sess.edition;
+
+ // the first name in this list is the crate name of the crate with the prelude
+ let names: &[Symbol] = if sess.contains_name(&krate.attrs, sym::no_core) {
+ return krate;
+ } else if sess.contains_name(&krate.attrs, sym::no_std) {
+ if sess.contains_name(&krate.attrs, sym::compiler_builtins) {
+ &[sym::core]
+ } else {
+ &[sym::core, sym::compiler_builtins]
+ }
+ } else {
+ &[sym::std]
+ };
+
+ let expn_id = resolver.expansion_for_ast_pass(
+ DUMMY_SP,
+ AstPass::StdImports,
+ &[sym::prelude_import],
+ None,
+ );
+ let span = DUMMY_SP.with_def_site_ctxt(expn_id.to_expn_id());
+ let call_site = DUMMY_SP.with_call_site_ctxt(expn_id.to_expn_id());
+
+ let ecfg = ExpansionConfig::default("std_lib_injection".to_string());
+ let cx = ExtCtxt::new(sess, ecfg, resolver, None);
+
+ // .rev() to preserve ordering above in combination with insert(0, ...)
+ for &name in names.iter().rev() {
+ let ident = if edition >= Edition2018 {
+ Ident::new(name, span)
+ } else {
+ Ident::new(name, call_site)
+ };
+ krate.items.insert(
+ 0,
+ cx.item(
+ span,
+ ident,
+ vec![cx.attribute(cx.meta_word(span, sym::macro_use))],
+ ast::ItemKind::ExternCrate(None),
+ ),
+ );
+ }
+
+ // The crates have been injected, the assumption is that the first one is
+ // the one with the prelude.
+ let name = names[0];
+
+ let root = (edition == Edition2015).then(|| kw::PathRoot);
+
+ let import_path = root
+ .iter()
+ .chain(&[name, sym::prelude])
+ .chain(&[match edition {
+ Edition2015 => sym::rust_2015,
+ Edition2018 => sym::rust_2018,
+ Edition2021 => sym::rust_2021,
+ Edition2024 => sym::rust_2024,
+ }])
+ .map(|&symbol| Ident::new(symbol, span))
+ .collect();
+
+ let use_item = cx.item(
+ span,
+ Ident::empty(),
+ vec![cx.attribute(cx.meta_word(span, sym::prelude_import))],
+ ast::ItemKind::Use(ast::UseTree {
+ prefix: cx.path(span, import_path),
+ kind: ast::UseTreeKind::Glob,
+ span,
+ }),
+ );
+
+ krate.items.insert(0, use_item);
+
+ krate
+}
diff --git a/compiler/rustc_builtin_macros/src/test.rs b/compiler/rustc_builtin_macros/src/test.rs
new file mode 100644
index 000000000..e20375689
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/test.rs
@@ -0,0 +1,529 @@
+/// The expansion from a test function to the appropriate test struct for libtest
+/// Ideally, this code would be in libtest but for efficiency and error messages it lives here.
+use crate::util::{check_builtin_macro_attribute, warn_on_duplicate_attribute};
+
+use rustc_ast as ast;
+use rustc_ast::attr;
+use rustc_ast::ptr::P;
+use rustc_ast_pretty::pprust;
+use rustc_errors::Applicability;
+use rustc_expand::base::*;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::Span;
+
+use std::iter;
+
+// #[test_case] is used by custom test authors to mark tests
+// When building for test, it needs to make the item public and gensym the name
+// Otherwise, we'll omit the item. This behavior means that any item annotated
+// with #[test_case] is never addressable.
+//
+// We mark item with an inert attribute "rustc_test_marker" which the test generation
+// logic will pick up on.
+pub fn expand_test_case(
+ ecx: &mut ExtCtxt<'_>,
+ attr_sp: Span,
+ meta_item: &ast::MetaItem,
+ anno_item: Annotatable,
+) -> Vec<Annotatable> {
+ check_builtin_macro_attribute(ecx, meta_item, sym::test_case);
+ warn_on_duplicate_attribute(&ecx, &anno_item, sym::test_case);
+
+ if !ecx.ecfg.should_test {
+ return vec![];
+ }
+
+ let sp = ecx.with_def_site_ctxt(attr_sp);
+ let mut item = anno_item.expect_item();
+ item = item.map(|mut item| {
+ item.vis = ast::Visibility {
+ span: item.vis.span,
+ kind: ast::VisibilityKind::Public,
+ tokens: None,
+ };
+ item.ident.span = item.ident.span.with_ctxt(sp.ctxt());
+ item.attrs.push(ecx.attribute(ecx.meta_word(sp, sym::rustc_test_marker)));
+ item
+ });
+
+ return vec![Annotatable::Item(item)];
+}
+
+pub fn expand_test(
+ cx: &mut ExtCtxt<'_>,
+ attr_sp: Span,
+ meta_item: &ast::MetaItem,
+ item: Annotatable,
+) -> Vec<Annotatable> {
+ check_builtin_macro_attribute(cx, meta_item, sym::test);
+ warn_on_duplicate_attribute(&cx, &item, sym::test);
+ expand_test_or_bench(cx, attr_sp, item, false)
+}
+
+pub fn expand_bench(
+ cx: &mut ExtCtxt<'_>,
+ attr_sp: Span,
+ meta_item: &ast::MetaItem,
+ item: Annotatable,
+) -> Vec<Annotatable> {
+ check_builtin_macro_attribute(cx, meta_item, sym::bench);
+ warn_on_duplicate_attribute(&cx, &item, sym::bench);
+ expand_test_or_bench(cx, attr_sp, item, true)
+}
+
+pub fn expand_test_or_bench(
+ cx: &mut ExtCtxt<'_>,
+ attr_sp: Span,
+ item: Annotatable,
+ is_bench: bool,
+) -> Vec<Annotatable> {
+ // If we're not in test configuration, remove the annotated item
+ if !cx.ecfg.should_test {
+ return vec![];
+ }
+
+ let (item, is_stmt) = match item {
+ Annotatable::Item(i) => (i, false),
+ Annotatable::Stmt(stmt) if matches!(stmt.kind, ast::StmtKind::Item(_)) => {
+ // FIXME: Use an 'if let' guard once they are implemented
+ if let ast::StmtKind::Item(i) = stmt.into_inner().kind {
+ (i, true)
+ } else {
+ unreachable!()
+ }
+ }
+ other => {
+ cx.struct_span_err(
+ other.span(),
+ "`#[test]` attribute is only allowed on non associated functions",
+ )
+ .emit();
+ return vec![other];
+ }
+ };
+
+ // Note: non-associated fn items are already handled by `expand_test_or_bench`
+ if !matches!(item.kind, ast::ItemKind::Fn(_)) {
+ let diag = &cx.sess.parse_sess.span_diagnostic;
+ let msg = "the `#[test]` attribute may only be used on a non-associated function";
+ let mut err = match item.kind {
+ // These were a warning before #92959 and need to continue being that to avoid breaking
+ // stable user code (#94508).
+ ast::ItemKind::MacCall(_) => diag.struct_span_warn(attr_sp, msg),
+ // `.forget_guarantee()` needed to get these two arms to match types. Because of how
+ // locally close the `.emit()` call is I'm comfortable with it, but if it can be
+ // reworked in the future to not need it, it'd be nice.
+ _ => diag.struct_span_err(attr_sp, msg).forget_guarantee(),
+ };
+ err.span_label(attr_sp, "the `#[test]` macro causes a a function to be run on a test and has no effect on non-functions")
+ .span_label(item.span, format!("expected a non-associated function, found {} {}", item.kind.article(), item.kind.descr()))
+ .span_suggestion(attr_sp, "replace with conditional compilation to make the item only exist when tests are being run", "#[cfg(test)]", Applicability::MaybeIncorrect)
+ .emit();
+
+ return vec![Annotatable::Item(item)];
+ }
+
+ // has_*_signature will report any errors in the type so compilation
+ // will fail. We shouldn't try to expand in this case because the errors
+ // would be spurious.
+ if (!is_bench && !has_test_signature(cx, &item))
+ || (is_bench && !has_bench_signature(cx, &item))
+ {
+ return vec![Annotatable::Item(item)];
+ }
+
+ let (sp, attr_sp) = (cx.with_def_site_ctxt(item.span), cx.with_def_site_ctxt(attr_sp));
+
+ let test_id = Ident::new(sym::test, attr_sp);
+
+ // creates test::$name
+ let test_path = |name| cx.path(sp, vec![test_id, Ident::from_str_and_span(name, sp)]);
+
+ // creates test::ShouldPanic::$name
+ let should_panic_path = |name| {
+ cx.path(
+ sp,
+ vec![
+ test_id,
+ Ident::from_str_and_span("ShouldPanic", sp),
+ Ident::from_str_and_span(name, sp),
+ ],
+ )
+ };
+
+ // creates test::TestType::$name
+ let test_type_path = |name| {
+ cx.path(
+ sp,
+ vec![
+ test_id,
+ Ident::from_str_and_span("TestType", sp),
+ Ident::from_str_and_span(name, sp),
+ ],
+ )
+ };
+
+ // creates $name: $expr
+ let field = |name, expr| cx.field_imm(sp, Ident::from_str_and_span(name, sp), expr);
+
+ let test_fn = if is_bench {
+ // A simple ident for a lambda
+ let b = Ident::from_str_and_span("b", attr_sp);
+
+ cx.expr_call(
+ sp,
+ cx.expr_path(test_path("StaticBenchFn")),
+ vec![
+ // |b| self::test::assert_test_result(
+ cx.lambda1(
+ sp,
+ cx.expr_call(
+ sp,
+ cx.expr_path(test_path("assert_test_result")),
+ vec![
+ // super::$test_fn(b)
+ cx.expr_call(
+ sp,
+ cx.expr_path(cx.path(sp, vec![item.ident])),
+ vec![cx.expr_ident(sp, b)],
+ ),
+ ],
+ ),
+ b,
+ ), // )
+ ],
+ )
+ } else {
+ cx.expr_call(
+ sp,
+ cx.expr_path(test_path("StaticTestFn")),
+ vec![
+ // || {
+ cx.lambda0(
+ sp,
+ // test::assert_test_result(
+ cx.expr_call(
+ sp,
+ cx.expr_path(test_path("assert_test_result")),
+ vec![
+ // $test_fn()
+ cx.expr_call(sp, cx.expr_path(cx.path(sp, vec![item.ident])), vec![]), // )
+ ],
+ ), // }
+ ), // )
+ ],
+ )
+ };
+
+ let mut test_const = cx.item(
+ sp,
+ Ident::new(item.ident.name, sp),
+ vec![
+ // #[cfg(test)]
+ cx.attribute(attr::mk_list_item(
+ Ident::new(sym::cfg, attr_sp),
+ vec![attr::mk_nested_word_item(Ident::new(sym::test, attr_sp))],
+ )),
+ // #[rustc_test_marker]
+ cx.attribute(cx.meta_word(attr_sp, sym::rustc_test_marker)),
+ ],
+ // const $ident: test::TestDescAndFn =
+ ast::ItemKind::Const(
+ ast::Defaultness::Final,
+ cx.ty(sp, ast::TyKind::Path(None, test_path("TestDescAndFn"))),
+ // test::TestDescAndFn {
+ Some(
+ cx.expr_struct(
+ sp,
+ test_path("TestDescAndFn"),
+ vec![
+ // desc: test::TestDesc {
+ field(
+ "desc",
+ cx.expr_struct(
+ sp,
+ test_path("TestDesc"),
+ vec![
+ // name: "path::to::test"
+ field(
+ "name",
+ cx.expr_call(
+ sp,
+ cx.expr_path(test_path("StaticTestName")),
+ vec![cx.expr_str(
+ sp,
+ Symbol::intern(&item_path(
+ // skip the name of the root module
+ &cx.current_expansion.module.mod_path[1..],
+ &item.ident,
+ )),
+ )],
+ ),
+ ),
+ // ignore: true | false
+ field(
+ "ignore",
+ cx.expr_bool(sp, should_ignore(&cx.sess, &item)),
+ ),
+ // ignore_message: Some("...") | None
+ field(
+ "ignore_message",
+ if let Some(msg) = should_ignore_message(cx, &item) {
+ cx.expr_some(sp, cx.expr_str(sp, msg))
+ } else {
+ cx.expr_none(sp)
+ },
+ ),
+ // compile_fail: true | false
+ field("compile_fail", cx.expr_bool(sp, false)),
+ // no_run: true | false
+ field("no_run", cx.expr_bool(sp, false)),
+ // should_panic: ...
+ field(
+ "should_panic",
+ match should_panic(cx, &item) {
+ // test::ShouldPanic::No
+ ShouldPanic::No => {
+ cx.expr_path(should_panic_path("No"))
+ }
+ // test::ShouldPanic::Yes
+ ShouldPanic::Yes(None) => {
+ cx.expr_path(should_panic_path("Yes"))
+ }
+ // test::ShouldPanic::YesWithMessage("...")
+ ShouldPanic::Yes(Some(sym)) => cx.expr_call(
+ sp,
+ cx.expr_path(should_panic_path("YesWithMessage")),
+ vec![cx.expr_str(sp, sym)],
+ ),
+ },
+ ),
+ // test_type: ...
+ field(
+ "test_type",
+ match test_type(cx) {
+ // test::TestType::UnitTest
+ TestType::UnitTest => {
+ cx.expr_path(test_type_path("UnitTest"))
+ }
+ // test::TestType::IntegrationTest
+ TestType::IntegrationTest => {
+ cx.expr_path(test_type_path("IntegrationTest"))
+ }
+ // test::TestPath::Unknown
+ TestType::Unknown => {
+ cx.expr_path(test_type_path("Unknown"))
+ }
+ },
+ ),
+ // },
+ ],
+ ),
+ ),
+ // testfn: test::StaticTestFn(...) | test::StaticBenchFn(...)
+ field("testfn", test_fn), // }
+ ],
+ ), // }
+ ),
+ ),
+ );
+ test_const = test_const.map(|mut tc| {
+ tc.vis.kind = ast::VisibilityKind::Public;
+ tc
+ });
+
+ // extern crate test
+ let test_extern = cx.item(sp, test_id, vec![], ast::ItemKind::ExternCrate(None));
+
+ tracing::debug!("synthetic test item:\n{}\n", pprust::item_to_string(&test_const));
+
+ if is_stmt {
+ vec![
+ // Access to libtest under a hygienic name
+ Annotatable::Stmt(P(cx.stmt_item(sp, test_extern))),
+ // The generated test case
+ Annotatable::Stmt(P(cx.stmt_item(sp, test_const))),
+ // The original item
+ Annotatable::Stmt(P(cx.stmt_item(sp, item))),
+ ]
+ } else {
+ vec![
+ // Access to libtest under a hygienic name
+ Annotatable::Item(test_extern),
+ // The generated test case
+ Annotatable::Item(test_const),
+ // The original item
+ Annotatable::Item(item),
+ ]
+ }
+}
+
+fn item_path(mod_path: &[Ident], item_ident: &Ident) -> String {
+ mod_path
+ .iter()
+ .chain(iter::once(item_ident))
+ .map(|x| x.to_string())
+ .collect::<Vec<String>>()
+ .join("::")
+}
+
+enum ShouldPanic {
+ No,
+ Yes(Option<Symbol>),
+}
+
+fn should_ignore(sess: &Session, i: &ast::Item) -> bool {
+ sess.contains_name(&i.attrs, sym::ignore)
+}
+
+fn should_ignore_message(cx: &ExtCtxt<'_>, i: &ast::Item) -> Option<Symbol> {
+ match cx.sess.find_by_name(&i.attrs, sym::ignore) {
+ Some(attr) => {
+ match attr.meta_item_list() {
+ // Handle #[ignore(bar = "foo")]
+ Some(_) => None,
+ // Handle #[ignore] and #[ignore = "message"]
+ None => attr.value_str(),
+ }
+ }
+ None => None,
+ }
+}
+
+fn should_panic(cx: &ExtCtxt<'_>, i: &ast::Item) -> ShouldPanic {
+ match cx.sess.find_by_name(&i.attrs, sym::should_panic) {
+ Some(attr) => {
+ let sd = &cx.sess.parse_sess.span_diagnostic;
+
+ match attr.meta_item_list() {
+ // Handle #[should_panic(expected = "foo")]
+ Some(list) => {
+ let msg = list
+ .iter()
+ .find(|mi| mi.has_name(sym::expected))
+ .and_then(|mi| mi.meta_item())
+ .and_then(|mi| mi.value_str());
+ if list.len() != 1 || msg.is_none() {
+ sd.struct_span_warn(
+ attr.span,
+ "argument must be of the form: \
+ `expected = \"error message\"`",
+ )
+ .note(
+ "errors in this attribute were erroneously \
+ allowed and will become a hard error in a \
+ future release",
+ )
+ .emit();
+ ShouldPanic::Yes(None)
+ } else {
+ ShouldPanic::Yes(msg)
+ }
+ }
+ // Handle #[should_panic] and #[should_panic = "expected"]
+ None => ShouldPanic::Yes(attr.value_str()),
+ }
+ }
+ None => ShouldPanic::No,
+ }
+}
+
+enum TestType {
+ UnitTest,
+ IntegrationTest,
+ Unknown,
+}
+
+/// Attempts to determine the type of test.
+/// Since doctests are created without macro expanding, only possible variants here
+/// are `UnitTest`, `IntegrationTest` or `Unknown`.
+fn test_type(cx: &ExtCtxt<'_>) -> TestType {
+ // Root path from context contains the topmost sources directory of the crate.
+ // I.e., for `project` with sources in `src` and tests in `tests` folders
+ // (no matter how many nested folders lie inside),
+ // there will be two different root paths: `/project/src` and `/project/tests`.
+ let crate_path = cx.root_path.as_path();
+
+ if crate_path.ends_with("src") {
+ // `/src` folder contains unit-tests.
+ TestType::UnitTest
+ } else if crate_path.ends_with("tests") {
+ // `/tests` folder contains integration tests.
+ TestType::IntegrationTest
+ } else {
+ // Crate layout doesn't match expected one, test type is unknown.
+ TestType::Unknown
+ }
+}
+
+fn has_test_signature(cx: &ExtCtxt<'_>, i: &ast::Item) -> bool {
+ let has_should_panic_attr = cx.sess.contains_name(&i.attrs, sym::should_panic);
+ let sd = &cx.sess.parse_sess.span_diagnostic;
+ if let ast::ItemKind::Fn(box ast::Fn { ref sig, ref generics, .. }) = i.kind {
+ if let ast::Unsafe::Yes(span) = sig.header.unsafety {
+ sd.struct_span_err(i.span, "unsafe functions cannot be used for tests")
+ .span_label(span, "`unsafe` because of this")
+ .emit();
+ return false;
+ }
+ if let ast::Async::Yes { span, .. } = sig.header.asyncness {
+ sd.struct_span_err(i.span, "async functions cannot be used for tests")
+ .span_label(span, "`async` because of this")
+ .emit();
+ return false;
+ }
+
+ // If the termination trait is active, the compiler will check that the output
+ // type implements the `Termination` trait as `libtest` enforces that.
+ let has_output = match sig.decl.output {
+ ast::FnRetTy::Default(..) => false,
+ ast::FnRetTy::Ty(ref t) if t.kind.is_unit() => false,
+ _ => true,
+ };
+
+ if !sig.decl.inputs.is_empty() {
+ sd.span_err(i.span, "functions used as tests can not have any arguments");
+ return false;
+ }
+
+ match (has_output, has_should_panic_attr) {
+ (true, true) => {
+ sd.span_err(i.span, "functions using `#[should_panic]` must return `()`");
+ false
+ }
+ (true, false) => {
+ if !generics.params.is_empty() {
+ sd.span_err(i.span, "functions used as tests must have signature fn() -> ()");
+ false
+ } else {
+ true
+ }
+ }
+ (false, _) => true,
+ }
+ } else {
+ // should be unreachable because `is_test_fn_item` should catch all non-fn items
+ false
+ }
+}
+
+fn has_bench_signature(cx: &ExtCtxt<'_>, i: &ast::Item) -> bool {
+ let has_sig = if let ast::ItemKind::Fn(box ast::Fn { ref sig, .. }) = i.kind {
+ // N.B., inadequate check, but we're running
+ // well before resolve, can't get too deep.
+ sig.decl.inputs.len() == 1
+ } else {
+ false
+ };
+
+ if !has_sig {
+ cx.sess.parse_sess.span_diagnostic.span_err(
+ i.span,
+ "functions used as benches must have \
+ signature `fn(&mut Bencher) -> impl Termination`",
+ );
+ }
+
+ has_sig
+}
diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs
new file mode 100644
index 000000000..0ebe29df9
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/test_harness.rs
@@ -0,0 +1,390 @@
+// Code that generates a test runner to run all the tests in a crate
+
+use rustc_ast as ast;
+use rustc_ast::entry::EntryPointType;
+use rustc_ast::mut_visit::{ExpectOne, *};
+use rustc_ast::ptr::P;
+use rustc_ast::{attr, ModKind};
+use rustc_expand::base::{ExtCtxt, ResolverExpand};
+use rustc_expand::expand::{AstFragment, ExpansionConfig};
+use rustc_feature::Features;
+use rustc_session::Session;
+use rustc_span::hygiene::{AstPass, SyntaxContext, Transparency};
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::spec::PanicStrategy;
+use smallvec::{smallvec, SmallVec};
+use tracing::debug;
+
+use std::{iter, mem};
+
+struct Test {
+ span: Span,
+ ident: Ident,
+}
+
+struct TestCtxt<'a> {
+ ext_cx: ExtCtxt<'a>,
+ panic_strategy: PanicStrategy,
+ def_site: Span,
+ test_cases: Vec<Test>,
+ reexport_test_harness_main: Option<Symbol>,
+ test_runner: Option<ast::Path>,
+}
+
+// Traverse the crate, collecting all the test functions, eliding any
+// existing main functions, and synthesizing a main test harness
+pub fn inject(sess: &Session, resolver: &mut dyn ResolverExpand, krate: &mut ast::Crate) {
+ let span_diagnostic = sess.diagnostic();
+ let panic_strategy = sess.panic_strategy();
+ let platform_panic_strategy = sess.target.panic_strategy;
+
+ // Check for #![reexport_test_harness_main = "some_name"] which gives the
+ // main test function the name `some_name` without hygiene. This needs to be
+ // unconditional, so that the attribute is still marked as used in
+ // non-test builds.
+ let reexport_test_harness_main =
+ sess.first_attr_value_str_by_name(&krate.attrs, sym::reexport_test_harness_main);
+
+ // Do this here so that the test_runner crate attribute gets marked as used
+ // even in non-test builds
+ let test_runner = get_test_runner(sess, span_diagnostic, &krate);
+
+ if sess.opts.test {
+ let panic_strategy = match (panic_strategy, sess.opts.unstable_opts.panic_abort_tests) {
+ (PanicStrategy::Abort, true) => PanicStrategy::Abort,
+ (PanicStrategy::Abort, false) => {
+ if panic_strategy == platform_panic_strategy {
+ // Silently allow compiling with panic=abort on these platforms,
+ // but with old behavior (abort if a test fails).
+ } else {
+ span_diagnostic.err(
+ "building tests with panic=abort is not supported \
+ without `-Zpanic_abort_tests`",
+ );
+ }
+ PanicStrategy::Unwind
+ }
+ (PanicStrategy::Unwind, _) => PanicStrategy::Unwind,
+ };
+ generate_test_harness(
+ sess,
+ resolver,
+ reexport_test_harness_main,
+ krate,
+ &sess.features_untracked(),
+ panic_strategy,
+ test_runner,
+ )
+ }
+}
+
+struct TestHarnessGenerator<'a> {
+ cx: TestCtxt<'a>,
+ tests: Vec<Test>,
+}
+
+impl TestHarnessGenerator<'_> {
+ fn add_test_cases(&mut self, node_id: ast::NodeId, span: Span, prev_tests: Vec<Test>) {
+ let mut tests = mem::replace(&mut self.tests, prev_tests);
+
+ if !tests.is_empty() {
+ // Create an identifier that will hygienically resolve the test
+ // case name, even in another module.
+ let expn_id = self.cx.ext_cx.resolver.expansion_for_ast_pass(
+ span,
+ AstPass::TestHarness,
+ &[],
+ Some(node_id),
+ );
+ for test in &mut tests {
+ // See the comment on `mk_main` for why we're using
+ // `apply_mark` directly.
+ test.ident.span =
+ test.ident.span.apply_mark(expn_id.to_expn_id(), Transparency::Opaque);
+ }
+ self.cx.test_cases.extend(tests);
+ }
+ }
+}
+
+impl<'a> MutVisitor for TestHarnessGenerator<'a> {
+ fn visit_crate(&mut self, c: &mut ast::Crate) {
+ let prev_tests = mem::take(&mut self.tests);
+ noop_visit_crate(c, self);
+ self.add_test_cases(ast::CRATE_NODE_ID, c.spans.inner_span, prev_tests);
+
+ // Create a main function to run our tests
+ c.items.push(mk_main(&mut self.cx));
+ }
+
+ fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
+ let mut item = i.into_inner();
+ if is_test_case(&self.cx.ext_cx.sess, &item) {
+ debug!("this is a test item");
+
+ let test = Test { span: item.span, ident: item.ident };
+ self.tests.push(test);
+ }
+
+ // We don't want to recurse into anything other than mods, since
+ // mods or tests inside of functions will break things
+ if let ast::ItemKind::Mod(_, ModKind::Loaded(.., ref spans)) = item.kind {
+ let ast::ModSpans { inner_span: span, inject_use_span: _ } = *spans;
+ let prev_tests = mem::take(&mut self.tests);
+ noop_visit_item_kind(&mut item.kind, self);
+ self.add_test_cases(item.id, span, prev_tests);
+ }
+ smallvec![P(item)]
+ }
+}
+
+// Beware, this is duplicated in librustc_passes/entry.rs (with
+// `rustc_hir::Item`), so make sure to keep them in sync.
+fn entry_point_type(sess: &Session, item: &ast::Item, depth: usize) -> EntryPointType {
+ match item.kind {
+ ast::ItemKind::Fn(..) => {
+ if sess.contains_name(&item.attrs, sym::start) {
+ EntryPointType::Start
+ } else if sess.contains_name(&item.attrs, sym::rustc_main) {
+ EntryPointType::RustcMainAttr
+ } else if item.ident.name == sym::main {
+ if depth == 0 {
+ // This is a top-level function so can be 'main'
+ EntryPointType::MainNamed
+ } else {
+ EntryPointType::OtherMain
+ }
+ } else {
+ EntryPointType::None
+ }
+ }
+ _ => EntryPointType::None,
+ }
+}
+/// A folder used to remove any entry points (like fn main) because the harness
+/// generator will provide its own
+struct EntryPointCleaner<'a> {
+ // Current depth in the ast
+ sess: &'a Session,
+ depth: usize,
+ def_site: Span,
+}
+
+impl<'a> MutVisitor for EntryPointCleaner<'a> {
+ fn flat_map_item(&mut self, i: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
+ self.depth += 1;
+ let item = noop_flat_map_item(i, self).expect_one("noop did something");
+ self.depth -= 1;
+
+ // Remove any #[rustc_main] or #[start] from the AST so it doesn't
+ // clash with the one we're going to add, but mark it as
+ // #[allow(dead_code)] to avoid printing warnings.
+ let item = match entry_point_type(self.sess, &item, self.depth) {
+ EntryPointType::MainNamed | EntryPointType::RustcMainAttr | EntryPointType::Start => {
+ item.map(|ast::Item { id, ident, attrs, kind, vis, span, tokens }| {
+ let allow_ident = Ident::new(sym::allow, self.def_site);
+ let dc_nested =
+ attr::mk_nested_word_item(Ident::new(sym::dead_code, self.def_site));
+ let allow_dead_code_item = attr::mk_list_item(allow_ident, vec![dc_nested]);
+ let allow_dead_code = attr::mk_attr_outer(allow_dead_code_item);
+ let attrs = attrs
+ .into_iter()
+ .filter(|attr| {
+ !attr.has_name(sym::rustc_main) && !attr.has_name(sym::start)
+ })
+ .chain(iter::once(allow_dead_code))
+ .collect();
+
+ ast::Item { id, ident, attrs, kind, vis, span, tokens }
+ })
+ }
+ EntryPointType::None | EntryPointType::OtherMain => item,
+ };
+
+ smallvec![item]
+ }
+}
+
+/// Crawl over the crate, inserting test reexports and the test main function
+fn generate_test_harness(
+ sess: &Session,
+ resolver: &mut dyn ResolverExpand,
+ reexport_test_harness_main: Option<Symbol>,
+ krate: &mut ast::Crate,
+ features: &Features,
+ panic_strategy: PanicStrategy,
+ test_runner: Option<ast::Path>,
+) {
+ let mut econfig = ExpansionConfig::default("test".to_string());
+ econfig.features = Some(features);
+
+ let ext_cx = ExtCtxt::new(sess, econfig, resolver, None);
+
+ let expn_id = ext_cx.resolver.expansion_for_ast_pass(
+ DUMMY_SP,
+ AstPass::TestHarness,
+ &[sym::test, sym::rustc_attrs],
+ None,
+ );
+ let def_site = DUMMY_SP.with_def_site_ctxt(expn_id.to_expn_id());
+
+ // Remove the entry points
+ let mut cleaner = EntryPointCleaner { sess, depth: 0, def_site };
+ cleaner.visit_crate(krate);
+
+ let cx = TestCtxt {
+ ext_cx,
+ panic_strategy,
+ def_site,
+ test_cases: Vec::new(),
+ reexport_test_harness_main,
+ test_runner,
+ };
+
+ TestHarnessGenerator { cx, tests: Vec::new() }.visit_crate(krate);
+}
+
+/// Creates a function item for use as the main function of a test build.
+/// This function will call the `test_runner` as specified by the crate attribute
+///
+/// By default this expands to
+///
+/// ```ignore UNSOLVED (I think I still need guidance for this one. Is it correct? Do we try to make it run? How do we nicely fill it out?)
+/// #[rustc_main]
+/// pub fn main() {
+/// extern crate test;
+/// test::test_main_static(&[
+/// &test_const1,
+/// &test_const2,
+/// &test_const3,
+/// ]);
+/// }
+/// ```
+///
+/// Most of the Ident have the usual def-site hygiene for the AST pass. The
+/// exception is the `test_const`s. These have a syntax context that has two
+/// opaque marks: one from the expansion of `test` or `test_case`, and one
+/// generated in `TestHarnessGenerator::flat_map_item`. When resolving this
+/// identifier after failing to find a matching identifier in the root module
+/// we remove the outer mark, and try resolving at its def-site, which will
+/// then resolve to `test_const`.
+///
+/// The expansion here can be controlled by two attributes:
+///
+/// [`TestCtxt::reexport_test_harness_main`] provides a different name for the `main`
+/// function and [`TestCtxt::test_runner`] provides a path that replaces
+/// `test::test_main_static`.
+fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> {
+ let sp = cx.def_site;
+ let ecx = &cx.ext_cx;
+ let test_id = Ident::new(sym::test, sp);
+
+ let runner_name = match cx.panic_strategy {
+ PanicStrategy::Unwind => "test_main_static",
+ PanicStrategy::Abort => "test_main_static_abort",
+ };
+
+ // test::test_main_static(...)
+ let mut test_runner = cx
+ .test_runner
+ .clone()
+ .unwrap_or_else(|| ecx.path(sp, vec![test_id, Ident::from_str_and_span(runner_name, sp)]));
+
+ test_runner.span = sp;
+
+ let test_main_path_expr = ecx.expr_path(test_runner);
+ let call_test_main = ecx.expr_call(sp, test_main_path_expr, vec![mk_tests_slice(cx, sp)]);
+ let call_test_main = ecx.stmt_expr(call_test_main);
+
+ // extern crate test
+ let test_extern_stmt =
+ ecx.stmt_item(sp, ecx.item(sp, test_id, vec![], ast::ItemKind::ExternCrate(None)));
+
+ // #[rustc_main]
+ let main_meta = ecx.meta_word(sp, sym::rustc_main);
+ let main_attr = ecx.attribute(main_meta);
+
+ // pub fn main() { ... }
+ let main_ret_ty = ecx.ty(sp, ast::TyKind::Tup(vec![]));
+
+ // If no test runner is provided we need to import the test crate
+ let main_body = if cx.test_runner.is_none() {
+ ecx.block(sp, vec![test_extern_stmt, call_test_main])
+ } else {
+ ecx.block(sp, vec![call_test_main])
+ };
+
+ let decl = ecx.fn_decl(vec![], ast::FnRetTy::Ty(main_ret_ty));
+ let sig = ast::FnSig { decl, header: ast::FnHeader::default(), span: sp };
+ let defaultness = ast::Defaultness::Final;
+ let main = ast::ItemKind::Fn(Box::new(ast::Fn {
+ defaultness,
+ sig,
+ generics: ast::Generics::default(),
+ body: Some(main_body),
+ }));
+
+ // Honor the reexport_test_harness_main attribute
+ let main_id = match cx.reexport_test_harness_main {
+ Some(sym) => Ident::new(sym, sp.with_ctxt(SyntaxContext::root())),
+ None => Ident::new(sym::main, sp),
+ };
+
+ let main = P(ast::Item {
+ ident: main_id,
+ attrs: vec![main_attr],
+ id: ast::DUMMY_NODE_ID,
+ kind: main,
+ vis: ast::Visibility { span: sp, kind: ast::VisibilityKind::Public, tokens: None },
+ span: sp,
+ tokens: None,
+ });
+
+ // Integrate the new item into existing module structures.
+ let main = AstFragment::Items(smallvec![main]);
+ cx.ext_cx.monotonic_expander().fully_expand_fragment(main).make_items().pop().unwrap()
+}
+
+/// Creates a slice containing every test like so:
+/// &[&test1, &test2]
+fn mk_tests_slice(cx: &TestCtxt<'_>, sp: Span) -> P<ast::Expr> {
+ debug!("building test vector from {} tests", cx.test_cases.len());
+ let ecx = &cx.ext_cx;
+
+ ecx.expr_array_ref(
+ sp,
+ cx.test_cases
+ .iter()
+ .map(|test| {
+ ecx.expr_addr_of(test.span, ecx.expr_path(ecx.path(test.span, vec![test.ident])))
+ })
+ .collect(),
+ )
+}
+
+fn is_test_case(sess: &Session, i: &ast::Item) -> bool {
+ sess.contains_name(&i.attrs, sym::rustc_test_marker)
+}
+
+fn get_test_runner(
+ sess: &Session,
+ sd: &rustc_errors::Handler,
+ krate: &ast::Crate,
+) -> Option<ast::Path> {
+ let test_attr = sess.find_by_name(&krate.attrs, sym::test_runner)?;
+ let meta_list = test_attr.meta_item_list()?;
+ let span = test_attr.span;
+ match &*meta_list {
+ [single] => match single.meta_item() {
+ Some(meta_item) if meta_item.is_word() => return Some(meta_item.path.clone()),
+ _ => {
+ sd.struct_span_err(span, "`test_runner` argument must be a path").emit();
+ }
+ },
+ _ => {
+ sd.struct_span_err(span, "`#![test_runner(..)]` accepts exactly 1 argument").emit();
+ }
+ }
+ None
+}
diff --git a/compiler/rustc_builtin_macros/src/trace_macros.rs b/compiler/rustc_builtin_macros/src/trace_macros.rs
new file mode 100644
index 000000000..cc5ae6894
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/trace_macros.rs
@@ -0,0 +1,29 @@
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
+use rustc_expand::base::{self, ExtCtxt};
+use rustc_span::symbol::kw;
+use rustc_span::Span;
+
+pub fn expand_trace_macros(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tt: TokenStream,
+) -> Box<dyn base::MacResult + 'static> {
+ let mut cursor = tt.into_trees();
+ let mut err = false;
+ let value = match &cursor.next() {
+ Some(TokenTree::Token(token, _)) if token.is_keyword(kw::True) => true,
+ Some(TokenTree::Token(token, _)) if token.is_keyword(kw::False) => false,
+ _ => {
+ err = true;
+ false
+ }
+ };
+ err |= cursor.next().is_some();
+ if err {
+ cx.span_err(sp, "trace_macros! accepts only `true` or `false`")
+ } else {
+ cx.set_trace_macros(value);
+ }
+
+ base::DummyResult::any_valid(sp)
+}
diff --git a/compiler/rustc_builtin_macros/src/util.rs b/compiler/rustc_builtin_macros/src/util.rs
new file mode 100644
index 000000000..527fe50ef
--- /dev/null
+++ b/compiler/rustc_builtin_macros/src/util.rs
@@ -0,0 +1,43 @@
+use rustc_ast::{Attribute, MetaItem};
+use rustc_expand::base::{Annotatable, ExtCtxt};
+use rustc_feature::AttributeTemplate;
+use rustc_lint_defs::builtin::DUPLICATE_MACRO_ATTRIBUTES;
+use rustc_parse::validate_attr;
+use rustc_span::Symbol;
+
+pub fn check_builtin_macro_attribute(ecx: &ExtCtxt<'_>, meta_item: &MetaItem, name: Symbol) {
+ // All the built-in macro attributes are "words" at the moment.
+ let template = AttributeTemplate { word: true, ..Default::default() };
+ let attr = ecx.attribute(meta_item.clone());
+ validate_attr::check_builtin_attribute(&ecx.sess.parse_sess, &attr, name, template);
+}
+
+/// Emit a warning if the item is annotated with the given attribute. This is used to diagnose when
+/// an attribute may have been mistakenly duplicated.
+pub fn warn_on_duplicate_attribute(ecx: &ExtCtxt<'_>, item: &Annotatable, name: Symbol) {
+ let attrs: Option<&[Attribute]> = match item {
+ Annotatable::Item(item) => Some(&item.attrs),
+ Annotatable::TraitItem(item) => Some(&item.attrs),
+ Annotatable::ImplItem(item) => Some(&item.attrs),
+ Annotatable::ForeignItem(item) => Some(&item.attrs),
+ Annotatable::Expr(expr) => Some(&expr.attrs),
+ Annotatable::Arm(arm) => Some(&arm.attrs),
+ Annotatable::ExprField(field) => Some(&field.attrs),
+ Annotatable::PatField(field) => Some(&field.attrs),
+ Annotatable::GenericParam(param) => Some(&param.attrs),
+ Annotatable::Param(param) => Some(&param.attrs),
+ Annotatable::FieldDef(def) => Some(&def.attrs),
+ Annotatable::Variant(variant) => Some(&variant.attrs),
+ _ => None,
+ };
+ if let Some(attrs) = attrs {
+ if let Some(attr) = ecx.sess.find_by_name(attrs, name) {
+ ecx.parse_sess().buffer_lint(
+ DUPLICATE_MACRO_ATTRIBUTES,
+ attr.span,
+ ecx.current_expansion.lint_node_id,
+ "duplicated attribute",
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/.cirrus.yml b/compiler/rustc_codegen_cranelift/.cirrus.yml
new file mode 100644
index 000000000..61da6a249
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.cirrus.yml
@@ -0,0 +1,25 @@
+task:
+ name: freebsd
+ freebsd_instance:
+ image: freebsd-12-1-release-amd64
+ setup_rust_script:
+ - pkg install -y curl git bash
+ - curl https://sh.rustup.rs -sSf --output rustup.sh
+ - sh rustup.sh --default-toolchain none -y --profile=minimal
+ cargo_bin_cache:
+ folder: ~/.cargo/bin
+ target_cache:
+ folder: target
+ prepare_script:
+ - . $HOME/.cargo/env
+ - git config --global user.email "user@example.com"
+ - git config --global user.name "User"
+ - ./y.rs prepare
+ test_script:
+ - . $HOME/.cargo/env
+ - # Enable backtraces for easier debugging
+ - export RUST_BACKTRACE=1
+ - # Reduce amount of benchmark runs as they are slow
+ - export COMPILE_RUNS=2
+ - export RUN_RUNS=2
+ - ./test.sh
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/main.yml b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
new file mode 100644
index 000000000..aa556a21b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/main.yml
@@ -0,0 +1,184 @@
+name: CI
+
+on:
+ - push
+ - pull_request
+
+jobs:
+ rustfmt:
+ runs-on: ubuntu-latest
+ timeout-minutes: 10
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install rustfmt
+ run: |
+ rustup component add rustfmt
+
+ - name: Rustfmt
+ run: |
+ cargo fmt --check
+
+ build:
+ runs-on: ${{ matrix.os }}
+ timeout-minutes: 60
+
+ strategy:
+ fail-fast: false
+ matrix:
+ include:
+ - os: ubuntu-latest
+ - os: macos-latest
+ # cross-compile from Linux to Windows using mingw
+ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: x86_64-pc-windows-gnu
+ - os: ubuntu-latest
+ env:
+ TARGET_TRIPLE: aarch64-unknown-linux-gnu
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v2
+ with:
+ path: ~/.cargo/bin
+ key: ${{ runner.os }}-cargo-installed-crates
+
+ - name: Cache cargo registry and index
+ uses: actions/cache@v2
+ with:
+ path: |
+ ~/.cargo/registry
+ ~/.cargo/git
+ key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v2
+ with:
+ path: target
+ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Install MinGW toolchain and wine
+ if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y gcc-mingw-w64-x86-64 wine-stable
+ rustup target add x86_64-pc-windows-gnu
+
+ - name: Install AArch64 toolchain and qemu
+ if: matrix.os == 'ubuntu-latest' && matrix.env.TARGET_TRIPLE == 'aarch64-unknown-linux-gnu'
+ run: |
+ sudo apt-get update
+ sudo apt-get install -y gcc-aarch64-linux-gnu qemu-user
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./y.rs prepare
+
+ - name: Build without unstable features
+ env:
+ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+ # This is the config rust-lang/rust uses for builds
+ run: ./y.rs build --no-unstable-features
+
+ - name: Build
+ run: ./y.rs build --sysroot none
+
+ - name: Test
+ env:
+ TARGET_TRIPLE: ${{ matrix.env.TARGET_TRIPLE }}
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
+ # Enable extra checks
+ export CG_CLIF_ENABLE_VERIFIER=1
+
+ ./test.sh
+
+ - name: Package prebuilt cg_clif
+ run: tar cvfJ cg_clif.tar.xz build
+
+ - name: Upload prebuilt cg_clif
+ if: matrix.env.TARGET_TRIPLE != 'x86_64-pc-windows-gnu'
+ uses: actions/upload-artifact@v2
+ with:
+ name: cg_clif-${{ runner.os }}
+ path: cg_clif.tar.xz
+
+ - name: Upload prebuilt cg_clif (cross compile)
+ if: matrix.env.TARGET_TRIPLE == 'x86_64-pc-windows-gnu'
+ uses: actions/upload-artifact@v2
+ with:
+ name: cg_clif-${{ runner.os }}-cross-x86_64-mingw
+ path: cg_clif.tar.xz
+
+ build_windows:
+ runs-on: windows-latest
+ timeout-minutes: 60
+
+ steps:
+ - uses: actions/checkout@v3
+
+ #- name: Cache cargo installed crates
+ # uses: actions/cache@v2
+ # with:
+ # path: ~/.cargo/bin
+ # key: ${{ runner.os }}-cargo-installed-crates
+
+ #- name: Cache cargo registry and index
+ # uses: actions/cache@v2
+ # with:
+ # path: |
+ # ~/.cargo/registry
+ # ~/.cargo/git
+ # key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+ #- name: Cache cargo target dir
+ # uses: actions/cache@v2
+ # with:
+ # path: target
+ # key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ git config --global core.autocrlf false
+ rustup set default-host x86_64-pc-windows-gnu
+ rustc y.rs -o y.exe -g
+ ./y.exe prepare
+
+ - name: Build
+ #name: Test
+ run: |
+ # Enable backtraces for easier debugging
+ #export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ #export COMPILE_RUNS=2
+ #export RUN_RUNS=2
+
+ # Enable extra checks
+ #export CG_CLIF_ENABLE_VERIFIER=1
+
+ ./y.exe build
+
+ - name: Package prebuilt cg_clif
+ # don't use compression as xzip isn't supported by tar on windows and bzip2 hangs
+ run: tar cvf cg_clif.tar build
+
+ - name: Upload prebuilt cg_clif
+ uses: actions/upload-artifact@v2
+ with:
+ name: cg_clif-${{ runner.os }}
+ path: cg_clif.tar
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/nightly-cranelift.yml b/compiler/rustc_codegen_cranelift/.github/workflows/nightly-cranelift.yml
new file mode 100644
index 000000000..0a3e7ca07
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/nightly-cranelift.yml
@@ -0,0 +1,59 @@
+name: Test nightly Cranelift
+
+on:
+ push:
+ schedule:
+ - cron: '17 1 * * *' # At 01:17 UTC every day.
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ timeout-minutes: 60
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v2
+ with:
+ path: ~/.cargo/bin
+ key: ubuntu-latest-cargo-installed-crates
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./y.rs prepare
+
+ - name: Patch Cranelift
+ run: |
+ sed -i 's/cranelift-codegen = { version = "\w*.\w*.\w*", features = \["unwind", "all-arch"\] }/cranelift-codegen = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git", features = ["unwind", "all-arch"] }/' Cargo.toml
+ sed -i 's/cranelift-frontend = "\w*.\w*.\w*"/cranelift-frontend = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git" }/' Cargo.toml
+ sed -i 's/cranelift-module = "\w*.\w*.\w*"/cranelift-module = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git" }/' Cargo.toml
+ sed -i 's/cranelift-native = "\w*.\w*.\w*"/cranelift-native = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git" }/' Cargo.toml
+ sed -i 's/cranelift-jit = { version = "\w*.\w*.\w*", optional = true }/cranelift-jit = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git", optional = true }/' Cargo.toml
+ sed -i 's/cranelift-object = "\w*.\w*.\w*"/cranelift-object = { git = "https:\/\/github.com\/bytecodealliance\/wasmtime.git" }/' Cargo.toml
+
+ sed -i 's/object = { version = "0.27.0"/object = { version = "0.28.0"/' Cargo.toml
+
+ cat Cargo.toml
+
+ - name: Build without unstable features
+ # This is the config rust-lang/rust uses for builds
+ run: ./y.rs build --no-unstable-features
+
+ - name: Build
+ run: ./y.rs build --sysroot none
+ - name: Test
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
+ # Enable extra checks
+ export CG_CLIF_ENABLE_VERIFIER=1
+
+ ./test.sh
diff --git a/compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml b/compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml
new file mode 100644
index 000000000..b8a98b83e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.github/workflows/rustc.yml
@@ -0,0 +1,82 @@
+name: Various rustc tests
+
+on:
+ - push
+
+jobs:
+ bootstrap_rustc:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v2
+ with:
+ path: ~/.cargo/bin
+ key: ${{ runner.os }}-cargo-installed-crates
+
+ - name: Cache cargo registry and index
+ uses: actions/cache@v2
+ with:
+ path: |
+ ~/.cargo/registry
+ ~/.cargo/git
+ key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v2
+ with:
+ path: target
+ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./y.rs prepare
+
+ - name: Test
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ ./scripts/test_bootstrap.sh
+ rustc_test_suite:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v2
+ with:
+ path: ~/.cargo/bin
+ key: ${{ runner.os }}-cargo-installed-crates
+
+ - name: Cache cargo registry and index
+ uses: actions/cache@v2
+ with:
+ path: |
+ ~/.cargo/registry
+ ~/.cargo/git
+ key: ${{ runner.os }}-cargo-registry-and-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v2
+ with:
+ path: target
+ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain', '**/Cargo.lock') }}
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./y.rs prepare
+
+ - name: Test
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ ./scripts/test_rustc_tests.sh
diff --git a/compiler/rustc_codegen_cranelift/.vscode/settings.json b/compiler/rustc_codegen_cranelift/.vscode/settings.json
new file mode 100644
index 000000000..d88309e41
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/.vscode/settings.json
@@ -0,0 +1,73 @@
+{
+ // source for rustc_* is not included in the rust-src component; disable the errors about this
+ "rust-analyzer.diagnostics.disabled": ["unresolved-extern-crate", "unresolved-macro-call"],
+ "rust-analyzer.imports.granularity.enforce": true,
+ "rust-analyzer.imports.granularity.group": "module",
+ "rust-analyzer.imports.prefix": "crate",
+ "rust-analyzer.cargo.features": ["unstable-features"],
+ "rust-analyzer.linkedProjects": [
+ "./Cargo.toml",
+ //"./build_sysroot/sysroot_src/src/libstd/Cargo.toml",
+ {
+ "roots": [
+ "./example/mini_core.rs",
+ "./example/mini_core_hello_world.rs",
+ "./example/mod_bench.rs"
+ ],
+ "crates": [
+ {
+ "root_module": "./example/mini_core.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ {
+ "root_module": "./example/mini_core_hello_world.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 0, "name": "mini_core" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./example/mod_bench.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ },
+ {
+ "roots": ["./scripts/filter_profile.rs"],
+ "crates": [
+ {
+ "root_module": "./scripts/filter_profile.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 1, "name": "std" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ },
+ {
+ "roots": ["./y.rs"],
+ "crates": [
+ {
+ "root_module": "./y.rs",
+ "edition": "2018",
+ "deps": [{ "crate": 1, "name": "std" }],
+ "cfg": [],
+ },
+ {
+ "root_module": "./build_sysroot/sysroot_src/library/std/src/lib.rs",
+ "edition": "2018",
+ "deps": [],
+ "cfg": [],
+ },
+ ]
+ }
+ ]
+}
diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock
new file mode 100644
index 000000000..532049c85
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Cargo.lock
@@ -0,0 +1,385 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "ahash"
+version = "0.7.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
+dependencies = [
+ "getrandom",
+ "once_cell",
+ "version_check",
+]
+
+[[package]]
+name = "anyhow"
+version = "1.0.56"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4361135be9122e0870de935d7c439aef945b9f9ddd4199a553b5270b49c82a27"
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "git+https://github.com/bjorn3/rust-ar.git?branch=do_not_remove_cg_clif_ranlib#de9ab0e56bf3a208381d342aa5b60f9ff2891648"
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "byteorder"
+version = "1.4.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "cranelift-bforest"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "749d0d6022c9038dccf480bdde2a38d435937335bf2bb0f14e815d94517cdce8"
+dependencies = [
+ "cranelift-entity",
+]
+
+[[package]]
+name = "cranelift-codegen"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e94370cc7b37bf652ccd8bb8f09bd900997f7ccf97520edfc75554bb5c4abbea"
+dependencies = [
+ "cranelift-bforest",
+ "cranelift-codegen-meta",
+ "cranelift-codegen-shared",
+ "cranelift-entity",
+ "cranelift-isle",
+ "gimli",
+ "log",
+ "regalloc2",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-codegen-meta"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e0a3cea8fdab90e44018c5b9a1dfd460d8ee265ac354337150222a354628bdb6"
+dependencies = [
+ "cranelift-codegen-shared",
+]
+
+[[package]]
+name = "cranelift-codegen-shared"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5ac72f76f2698598951ab26d8c96eaa854810e693e7dd52523958b5909fde6b2"
+
+[[package]]
+name = "cranelift-entity"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "09eaeacfcd2356fe0e66b295e8f9d59fdd1ac3ace53ba50de14d628ec902f72d"
+
+[[package]]
+name = "cranelift-frontend"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dba69c9980d5ffd62c18a2bde927855fcd7c8dc92f29feaf8636052662cbd99c"
+dependencies = [
+ "cranelift-codegen",
+ "log",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-isle"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d2920dc1e05cac40304456ed3301fde2c09bd6a9b0210bcfa2f101398d628d5b"
+
+[[package]]
+name = "cranelift-jit"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1c3c5ed067f2c81577e431f3039148a9c187b33cc79e0d1731fede27d801ec56"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-entity",
+ "cranelift-module",
+ "cranelift-native",
+ "libc",
+ "log",
+ "region",
+ "target-lexicon",
+ "winapi",
+]
+
+[[package]]
+name = "cranelift-module"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "eee6784303bf9af235237a4885f7417e09a35df896d38ea969a0081064b3ede4"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+]
+
+[[package]]
+name = "cranelift-native"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f04dfa45f9b2a6f587c564d6b63388e00cd6589d2df6ea2758cf79e1a13285e6"
+dependencies = [
+ "cranelift-codegen",
+ "libc",
+ "target-lexicon",
+]
+
+[[package]]
+name = "cranelift-object"
+version = "0.85.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0bf38b2c505db749276793116c0cb30bd096206c7810e471677a453134881881"
+dependencies = [
+ "anyhow",
+ "cranelift-codegen",
+ "cranelift-module",
+ "log",
+ "object",
+ "target-lexicon",
+]
+
+[[package]]
+name = "crc32fast"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "fxhash"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
+dependencies = [
+ "byteorder",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad"
+dependencies = [
+ "cfg-if",
+ "libc",
+ "wasi",
+]
+
+[[package]]
+name = "gimli"
+version = "0.26.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4"
+dependencies = [
+ "indexmap",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.11.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e"
+dependencies = [
+ "ahash",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+
+[[package]]
+name = "indexmap"
+version = "1.9.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
+dependencies = [
+ "autocfg",
+ "hashbrown 0.12.3",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.126"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
+
+[[package]]
+name = "libloading"
+version = "0.6.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "351a32417a12d5f7e82c368a66781e307834dae04c6ce0cd4456d52989229883"
+dependencies = [
+ "cfg-if",
+ "winapi",
+]
+
+[[package]]
+name = "log"
+version = "0.4.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "mach"
+version = "0.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "memchr"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+
+[[package]]
+name = "object"
+version = "0.28.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424"
+dependencies = [
+ "crc32fast",
+ "hashbrown 0.11.2",
+ "indexmap",
+ "memchr",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9"
+
+[[package]]
+name = "regalloc2"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4a8d23b35d7177df3b9d31ed8a9ab4bf625c668be77a319d4f5efd4a5257701c"
+dependencies = [
+ "fxhash",
+ "log",
+ "slice-group-by",
+ "smallvec",
+]
+
+[[package]]
+name = "region"
+version = "2.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "877e54ea2adcd70d80e9179344c97f93ef0dffd6b03e1f4529e6e83ab2fa9ae0"
+dependencies = [
+ "bitflags",
+ "libc",
+ "mach",
+ "winapi",
+]
+
+[[package]]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "cranelift-codegen",
+ "cranelift-frontend",
+ "cranelift-jit",
+ "cranelift-module",
+ "cranelift-native",
+ "cranelift-object",
+ "gimli",
+ "indexmap",
+ "libloading",
+ "object",
+ "once_cell",
+ "smallvec",
+ "target-lexicon",
+]
+
+[[package]]
+name = "slice-group-by"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec"
+
+[[package]]
+name = "smallvec"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cc88c725d61fc6c3132893370cac4a0200e3fedf5da8331c570664b1987f5ca2"
+
+[[package]]
+name = "target-lexicon"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d7fa7e55043acb85fca6b3c01485a2eeb6b69c5d21002e273c79e465f43b7ac1"
+
+[[package]]
+name = "version_check"
+version = "0.9.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
+
+[[package]]
+name = "wasi"
+version = "0.10.2+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/compiler/rustc_codegen_cranelift/Cargo.toml b/compiler/rustc_codegen_cranelift/Cargo.toml
new file mode 100644
index 000000000..61e977e3e
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Cargo.toml
@@ -0,0 +1,45 @@
+[package]
+name = "rustc_codegen_cranelift"
+version = "0.1.0"
+edition = "2021"
+
+[lib]
+crate-type = ["dylib"]
+
+[dependencies]
+# These have to be in sync with each other
+cranelift-codegen = { version = "0.85.3", features = ["unwind", "all-arch"] }
+cranelift-frontend = "0.85.3"
+cranelift-module = "0.85.3"
+cranelift-native = "0.85.3"
+cranelift-jit = { version = "0.85.3", optional = true }
+cranelift-object = "0.85.3"
+target-lexicon = "0.12.0"
+gimli = { version = "0.26.0", default-features = false, features = ["write"]}
+object = { version = "0.28.0", default-features = false, features = ["std", "read_core", "write", "archive", "coff", "elf", "macho", "pe"] }
+
+ar = { git = "https://github.com/bjorn3/rust-ar.git", branch = "do_not_remove_cg_clif_ranlib" }
+indexmap = "1.9.1"
+libloading = { version = "0.6.0", optional = true }
+once_cell = "1.10.0"
+smallvec = "1.8.1"
+
+[patch.crates-io]
+# Uncomment to use local checkout of cranelift
+#cranelift-codegen = { path = "../wasmtime/cranelift/codegen" }
+#cranelift-frontend = { path = "../wasmtime/cranelift/frontend" }
+#cranelift-module = { path = "../wasmtime/cranelift/module" }
+#cranelift-native = { path = "../wasmtime/cranelift/native" }
+#cranelift-jit = { path = "../wasmtime/cranelift/jit" }
+#cranelift-object = { path = "../wasmtime/cranelift/object" }
+
+#gimli = { path = "../" }
+
+[features]
+# Enable features not ready to be enabled when compiling as part of rustc
+unstable-features = ["jit", "inline_asm"]
+jit = ["cranelift-jit", "libloading"]
+inline_asm = []
+
+[package.metadata.rust-analyzer]
+rustc_private = true
diff --git a/compiler/rustc_codegen_cranelift/LICENSE-APACHE b/compiler/rustc_codegen_cranelift/LICENSE-APACHE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/compiler/rustc_codegen_cranelift/LICENSE-MIT b/compiler/rustc_codegen_cranelift/LICENSE-MIT
new file mode 100644
index 000000000..31aa79387
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/compiler/rustc_codegen_cranelift/Readme.md b/compiler/rustc_codegen_cranelift/Readme.md
new file mode 100644
index 000000000..8a2db5a43
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/Readme.md
@@ -0,0 +1,75 @@
+# Cranelift codegen backend for rust
+
+The goal of this project is to create an alternative codegen backend for the rust compiler based on [Cranelift](https://github.com/bytecodealliance/wasmtime/blob/main/cranelift).
+This has the potential to improve compilation times in debug mode.
+If your project doesn't use any of the things listed under "Not yet supported", it should work fine.
+If not please open an issue.
+
+## Building and testing
+
+```bash
+$ git clone https://github.com/bjorn3/rustc_codegen_cranelift.git
+$ cd rustc_codegen_cranelift
+$ ./y.rs prepare # download and patch sysroot src and install hyperfine for benchmarking
+$ ./y.rs build
+```
+
+To run the test suite replace the last command with:
+
+```bash
+$ ./test.sh
+```
+
+This will implicitly build cg_clif too. Both `y.rs build` and `test.sh` accept a `--debug` argument to
+build in debug mode.
+
+Alternatively you can download a pre built version from [GHA]. It is listed in the artifacts section
+of workflow runs. Unfortunately due to GHA restrictions you need to be logged in to access it.
+
+[GHA]: https://github.com/bjorn3/rustc_codegen_cranelift/actions?query=branch%3Amaster+event%3Apush+is%3Asuccess
+
+## Usage
+
+rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
+
+Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`y.rs prepare` and `y.rs build` or `test.sh`).
+
+In the directory with your project (where you can do the usual `cargo build`), run:
+
+```bash
+$ $cg_clif_dir/build/cargo-clif build
+```
+
+This will build your project with rustc_codegen_cranelift instead of the usual LLVM backend.
+
+For additional ways to use rustc_codegen_cranelift like the JIT mode see [usage.md](docs/usage.md).
+
+## Configuration
+
+See the documentation on the `BackendConfig` struct in [config.rs](src/config.rs) for all
+configuration options.
+
+## Not yet supported
+
+* Inline assembly ([no cranelift support](https://github.com/bytecodealliance/wasmtime/issues/1041))
+ * On Linux there is support for invoking an external assembler for `global_asm!` and `asm!`.
+ `llvm_asm!` will remain unimplemented forever. `asm!` doesn't yet support reg classes. You
+ have to specify specific registers instead.
+* SIMD ([tracked here](https://github.com/bjorn3/rustc_codegen_cranelift/issues/171), some basic things work)
+
+## License
+
+Licensed under either of
+
+ * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or
+ http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or
+ http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you shall be dual licensed as above, without any
+additional terms or conditions.
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
new file mode 100644
index 000000000..7b2cdd273
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.lock
@@ -0,0 +1,333 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "addr2line"
+version = "0.16.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3e61f2b7f93d2c7d2b08263acaa4a363b3e276806c68af6134c44f523bf1aacd"
+dependencies = [
+ "compiler_builtins",
+ "gimli",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "adler"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "alloc"
+version = "0.0.0"
+dependencies = [
+ "compiler_builtins",
+ "core",
+]
+
+[[package]]
+name = "autocfg"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
+
+[[package]]
+name = "cc"
+version = "1.0.73"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11"
+
+[[package]]
+name = "cfg-if"
+version = "0.1.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "compiler_builtins"
+version = "0.1.75"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c6e3183e88f659a862835db8f4b67dbeed3d93e44dd4927eef78edb1c149d784"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "core"
+version = "0.0.0"
+
+[[package]]
+name = "dlmalloc"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a6fe28e0bf9357092740362502f5cc7955d8dc125ebda71dec72336c2e15c62e"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "fortanix-sgx-abi"
+version = "0.3.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c56c422ef86062869b2d57ae87270608dc5929969dd130a6e248979cf4fb6ca6"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+ "unicode-width",
+]
+
+[[package]]
+name = "gimli"
+version = "0.25.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f0a01e0497841a3b2db4f8afa483cce65f7e96a3498bd6c541734792aeac8fe7"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hashbrown"
+version = "0.12.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.2.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7668753748e445859e4e373c3d41117235d9feed578392f5a3a73efdc751ca4a"
+dependencies = [
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.126"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836"
+dependencies = [
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "memchr"
+version = "2.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "miniz_oxide"
+version = "0.4.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
+dependencies = [
+ "adler",
+ "autocfg",
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "object"
+version = "0.26.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "39f37e50073ccad23b6d09bcb5b263f4e76d3bb6038e4a3c08e52162ffa8abc2"
+dependencies = [
+ "compiler_builtins",
+ "memchr",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "panic_abort"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "panic_unwind"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+ "unwind",
+]
+
+[[package]]
+name = "proc_macro"
+version = "0.0.0"
+dependencies = [
+ "core",
+ "std",
+]
+
+[[package]]
+name = "rustc-demangle"
+version = "0.1.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "rustc-std-workspace-alloc"
+version = "1.99.0"
+dependencies = [
+ "alloc",
+]
+
+[[package]]
+name = "rustc-std-workspace-core"
+version = "1.99.0"
+dependencies = [
+ "core",
+]
+
+[[package]]
+name = "rustc-std-workspace-std"
+version = "1.99.0"
+dependencies = [
+ "std",
+]
+
+[[package]]
+name = "std"
+version = "0.0.0"
+dependencies = [
+ "addr2line",
+ "alloc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "dlmalloc",
+ "fortanix-sgx-abi",
+ "hashbrown",
+ "hermit-abi",
+ "libc",
+ "miniz_oxide",
+ "object",
+ "panic_abort",
+ "panic_unwind",
+ "rustc-demangle",
+ "std_detect",
+ "unwind",
+ "wasi",
+]
+
+[[package]]
+name = "std_detect"
+version = "0.1.5"
+dependencies = [
+ "cfg-if",
+ "compiler_builtins",
+ "libc",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
+
+[[package]]
+name = "sysroot"
+version = "0.0.0"
+dependencies = [
+ "alloc",
+ "compiler_builtins",
+ "core",
+ "std",
+ "test",
+]
+
+[[package]]
+name = "test"
+version = "0.0.0"
+dependencies = [
+ "cfg-if",
+ "core",
+ "getopts",
+ "libc",
+ "panic_abort",
+ "panic_unwind",
+ "proc_macro",
+ "std",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-std",
+]
+
+[[package]]
+name = "unwind"
+version = "0.0.0"
+dependencies = [
+ "cc",
+ "cfg-if",
+ "compiler_builtins",
+ "core",
+ "libc",
+]
+
+[[package]]
+name = "wasi"
+version = "0.11.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
+dependencies = [
+ "compiler_builtins",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-core",
+]
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
new file mode 100644
index 000000000..d0e5fc4a3
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/Cargo.toml
@@ -0,0 +1,35 @@
+[package]
+name = "sysroot"
+version = "0.0.0"
+
+[dependencies]
+core = { path = "./sysroot_src/library/core" }
+alloc = { path = "./sysroot_src/library/alloc" }
+std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
+test = { path = "./sysroot_src/library/test" }
+
+compiler_builtins = { version = "0.1.39", default-features = false, features = ["no-asm"] }
+
+[patch.crates-io]
+rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
+rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
+rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
+
+[profile.dev]
+lto = "off"
+
+[profile.release]
+debug = true
+incremental = true
+lto = "off"
+
+# Mandatory for correctly compiling compiler-builtins
+[profile.dev.package.compiler_builtins]
+debug-assertions = false
+overflow-checks = false
+codegen-units = 10000
+
+[profile.release.package.compiler_builtins]
+debug-assertions = false
+overflow-checks = false
+codegen-units = 10000
diff --git a/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs b/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs
new file mode 100644
index 000000000..0c9ac1ac8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_sysroot/src/lib.rs
@@ -0,0 +1 @@
+#![no_std]
diff --git a/compiler/rustc_codegen_cranelift/build_system/build_backend.rs b/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
new file mode 100644
index 000000000..48faec8bc
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/build_backend.rs
@@ -0,0 +1,43 @@
+use std::env;
+use std::path::{Path, PathBuf};
+use std::process::Command;
+
+pub(crate) fn build_backend(
+ channel: &str,
+ host_triple: &str,
+ use_unstable_features: bool,
+) -> PathBuf {
+ let mut cmd = Command::new("cargo");
+ cmd.arg("build").arg("--target").arg(host_triple);
+
+ cmd.env("CARGO_BUILD_INCREMENTAL", "true"); // Force incr comp even in release mode
+
+ let mut rustflags = env::var("RUSTFLAGS").unwrap_or_default();
+
+ if env::var("CI").as_ref().map(|val| &**val) == Ok("true") {
+ // Deny warnings on CI
+ rustflags += " -Dwarnings";
+
+ // Disabling incr comp reduces cache size and incr comp doesn't save as much on CI anyway
+ cmd.env("CARGO_BUILD_INCREMENTAL", "false");
+ }
+
+ if use_unstable_features {
+ cmd.arg("--features").arg("unstable-features");
+ }
+
+ match channel {
+ "debug" => {}
+ "release" => {
+ cmd.arg("--release");
+ }
+ _ => unreachable!(),
+ }
+
+ cmd.env("RUSTFLAGS", rustflags);
+
+ eprintln!("[BUILD] rustc_codegen_cranelift");
+ super::utils::spawn_and_wait(cmd);
+
+ Path::new("target").join(host_triple).join(channel)
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
new file mode 100644
index 000000000..16cce83dd
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs
@@ -0,0 +1,219 @@
+use std::fs;
+use std::path::{Path, PathBuf};
+use std::process::{self, Command};
+
+use super::rustc_info::{get_file_name, get_rustc_version};
+use super::utils::{spawn_and_wait, try_hard_link};
+use super::SysrootKind;
+
+pub(crate) fn build_sysroot(
+ channel: &str,
+ sysroot_kind: SysrootKind,
+ target_dir: &Path,
+ cg_clif_build_dir: PathBuf,
+ host_triple: &str,
+ target_triple: &str,
+) {
+ if target_dir.exists() {
+ fs::remove_dir_all(target_dir).unwrap();
+ }
+ fs::create_dir_all(target_dir.join("bin")).unwrap();
+ fs::create_dir_all(target_dir.join("lib")).unwrap();
+
+ // Copy the backend
+ let cg_clif_dylib = get_file_name("rustc_codegen_cranelift", "dylib");
+ let cg_clif_dylib_path = target_dir
+ .join(if cfg!(windows) {
+ // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the
+ // binaries.
+ "bin"
+ } else {
+ "lib"
+ })
+ .join(&cg_clif_dylib);
+ try_hard_link(cg_clif_build_dir.join(cg_clif_dylib), &cg_clif_dylib_path);
+
+ // Build and copy rustc and cargo wrappers
+ for wrapper in ["rustc-clif", "cargo-clif"] {
+ let mut build_cargo_wrapper_cmd = Command::new("rustc");
+ build_cargo_wrapper_cmd
+ .arg(PathBuf::from("scripts").join(format!("{wrapper}.rs")))
+ .arg("-o")
+ .arg(target_dir.join(wrapper))
+ .arg("-g");
+ spawn_and_wait(build_cargo_wrapper_cmd);
+ }
+
+ let default_sysroot = super::rustc_info::get_default_sysroot();
+
+ let rustlib = target_dir.join("lib").join("rustlib");
+ let host_rustlib_lib = rustlib.join(host_triple).join("lib");
+ let target_rustlib_lib = rustlib.join(target_triple).join("lib");
+ fs::create_dir_all(&host_rustlib_lib).unwrap();
+ fs::create_dir_all(&target_rustlib_lib).unwrap();
+
+ if target_triple == "x86_64-pc-windows-gnu" {
+ if !default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib").exists() {
+ eprintln!(
+ "The x86_64-pc-windows-gnu target needs to be installed first before it is possible \
+ to compile a sysroot for it.",
+ );
+ process::exit(1);
+ }
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ if file.extension().map_or(true, |ext| ext.to_str().unwrap() != "o") {
+ continue; // only copy object files
+ }
+ try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
+ }
+ }
+
+ match sysroot_kind {
+ SysrootKind::None => {} // Nothing to do
+ SysrootKind::Llvm => {
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(host_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ let file_name_str = file.file_name().unwrap().to_str().unwrap();
+ if (file_name_str.contains("rustc_")
+ && !file_name_str.contains("rustc_std_workspace_")
+ && !file_name_str.contains("rustc_demangle"))
+ || file_name_str.contains("chalk")
+ || file_name_str.contains("tracing")
+ || file_name_str.contains("regex")
+ {
+ // These are large crates that are part of the rustc-dev component and are not
+ // necessary to run regular programs.
+ continue;
+ }
+ try_hard_link(&file, host_rustlib_lib.join(file.file_name().unwrap()));
+ }
+
+ if target_triple != host_triple {
+ for file in fs::read_dir(
+ default_sysroot.join("lib").join("rustlib").join(target_triple).join("lib"),
+ )
+ .unwrap()
+ {
+ let file = file.unwrap().path();
+ try_hard_link(&file, target_rustlib_lib.join(file.file_name().unwrap()));
+ }
+ }
+ }
+ SysrootKind::Clif => {
+ build_clif_sysroot_for_triple(
+ channel,
+ target_dir,
+ host_triple,
+ &cg_clif_dylib_path,
+ None,
+ );
+
+ if host_triple != target_triple {
+ // When cross-compiling it is often necessary to manually pick the right linker
+ let linker = if target_triple == "aarch64-unknown-linux-gnu" {
+ Some("aarch64-linux-gnu-gcc")
+ } else {
+ None
+ };
+ build_clif_sysroot_for_triple(
+ channel,
+ target_dir,
+ target_triple,
+ &cg_clif_dylib_path,
+ linker,
+ );
+ }
+
+ // Copy std for the host to the lib dir. This is necessary for the jit mode to find
+ // libstd.
+ for file in fs::read_dir(host_rustlib_lib).unwrap() {
+ let file = file.unwrap().path();
+ let filename = file.file_name().unwrap().to_str().unwrap();
+ if filename.contains("std-") && !filename.contains(".rlib") {
+ try_hard_link(&file, target_dir.join("lib").join(file.file_name().unwrap()));
+ }
+ }
+ }
+ }
+}
+
+fn build_clif_sysroot_for_triple(
+ channel: &str,
+ target_dir: &Path,
+ triple: &str,
+ cg_clif_dylib_path: &Path,
+ linker: Option<&str>,
+) {
+ match fs::read_to_string(Path::new("build_sysroot").join("rustc_version")) {
+ Err(e) => {
+ eprintln!("Failed to get rustc version for patched sysroot source: {}", e);
+ eprintln!("Hint: Try `./y.rs prepare` to patch the sysroot source");
+ process::exit(1);
+ }
+ Ok(source_version) => {
+ let rustc_version = get_rustc_version();
+ if source_version != rustc_version {
+ eprintln!("The patched sysroot source is outdated");
+ eprintln!("Source version: {}", source_version.trim());
+ eprintln!("Rustc version: {}", rustc_version.trim());
+ eprintln!("Hint: Try `./y.rs prepare` to update the patched sysroot source");
+ process::exit(1);
+ }
+ }
+ }
+
+ let build_dir = Path::new("build_sysroot").join("target").join(triple).join(channel);
+
+ if !super::config::get_bool("keep_sysroot") {
+ // Cleanup the deps dir, but keep build scripts and the incremental cache for faster
+ // recompilation as they are not affected by changes in cg_clif.
+ if build_dir.join("deps").exists() {
+ fs::remove_dir_all(build_dir.join("deps")).unwrap();
+ }
+ }
+
+ // Build sysroot
+ let mut build_cmd = Command::new("cargo");
+ build_cmd.arg("build").arg("--target").arg(triple).current_dir("build_sysroot");
+ let mut rustflags = "-Zforce-unstable-if-unmarked -Cpanic=abort".to_string();
+ rustflags.push_str(&format!(" -Zcodegen-backend={}", cg_clif_dylib_path.to_str().unwrap()));
+ if channel == "release" {
+ build_cmd.arg("--release");
+ rustflags.push_str(" -Zmir-opt-level=3");
+ }
+ if let Some(linker) = linker {
+ use std::fmt::Write;
+ write!(rustflags, " -Clinker={}", linker).unwrap();
+ }
+ build_cmd.env("RUSTFLAGS", rustflags);
+ build_cmd.env("__CARGO_DEFAULT_LIB_METADATA", "cg_clif");
+ spawn_and_wait(build_cmd);
+
+ // Copy all relevant files to the sysroot
+ for entry in
+ fs::read_dir(Path::new("build_sysroot/target").join(triple).join(channel).join("deps"))
+ .unwrap()
+ {
+ let entry = entry.unwrap();
+ if let Some(ext) = entry.path().extension() {
+ if ext == "rmeta" || ext == "d" || ext == "dSYM" || ext == "clif" {
+ continue;
+ }
+ } else {
+ continue;
+ };
+ try_hard_link(
+ entry.path(),
+ target_dir.join("lib").join("rustlib").join(triple).join("lib").join(entry.file_name()),
+ );
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/config.rs b/compiler/rustc_codegen_cranelift/build_system/config.rs
new file mode 100644
index 000000000..ef540cf1f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/config.rs
@@ -0,0 +1,55 @@
+use std::{fs, process};
+
+fn load_config_file() -> Vec<(String, Option<String>)> {
+ fs::read_to_string("config.txt")
+ .unwrap()
+ .lines()
+ .map(|line| if let Some((line, _comment)) = line.split_once('#') { line } else { line })
+ .map(|line| line.trim())
+ .filter(|line| !line.is_empty())
+ .map(|line| {
+ if let Some((key, val)) = line.split_once('=') {
+ (key.trim().to_owned(), Some(val.trim().to_owned()))
+ } else {
+ (line.to_owned(), None)
+ }
+ })
+ .collect()
+}
+
+pub(crate) fn get_bool(name: &str) -> bool {
+ let values = load_config_file()
+ .into_iter()
+ .filter(|(key, _)| key == name)
+ .map(|(_, val)| val)
+ .collect::<Vec<_>>();
+ if values.is_empty() {
+ false
+ } else {
+ if values.iter().any(|val| val.is_some()) {
+ eprintln!("Boolean config `{}` has a value", name);
+ process::exit(1);
+ }
+ true
+ }
+}
+
+pub(crate) fn get_value(name: &str) -> Option<String> {
+ let values = load_config_file()
+ .into_iter()
+ .filter(|(key, _)| key == name)
+ .map(|(_, val)| val)
+ .collect::<Vec<_>>();
+ if values.is_empty() {
+ None
+ } else if values.len() == 1 {
+ if values[0].is_none() {
+ eprintln!("Config `{}` missing value", name);
+ process::exit(1);
+ }
+ values.into_iter().next().unwrap()
+ } else {
+ eprintln!("Config `{}` given multiple values: {:?}", name, values);
+ process::exit(1);
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/mod.rs b/compiler/rustc_codegen_cranelift/build_system/mod.rs
new file mode 100644
index 000000000..b897b7fba
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/mod.rs
@@ -0,0 +1,128 @@
+use std::env;
+use std::path::PathBuf;
+use std::process;
+
+mod build_backend;
+mod build_sysroot;
+mod config;
+mod prepare;
+mod rustc_info;
+mod utils;
+
+fn usage() {
+ eprintln!("Usage:");
+ eprintln!(" ./y.rs prepare");
+ eprintln!(
+ " ./y.rs build [--debug] [--sysroot none|clif|llvm] [--target-dir DIR] [--no-unstable-features]"
+ );
+}
+
+macro_rules! arg_error {
+ ($($err:tt)*) => {{
+ eprintln!($($err)*);
+ usage();
+ std::process::exit(1);
+ }};
+}
+
+enum Command {
+ Build,
+}
+
+#[derive(Copy, Clone)]
+pub(crate) enum SysrootKind {
+ None,
+ Clif,
+ Llvm,
+}
+
+pub fn main() {
+ env::set_var("CG_CLIF_DISPLAY_CG_TIME", "1");
+ env::set_var("CG_CLIF_DISABLE_INCR_CACHE", "1");
+ // The target dir is expected in the default location. Guard against the user changing it.
+ env::set_var("CARGO_TARGET_DIR", "target");
+
+ let mut args = env::args().skip(1);
+ let command = match args.next().as_deref() {
+ Some("prepare") => {
+ if args.next().is_some() {
+ arg_error!("./x.rs prepare doesn't expect arguments");
+ }
+ prepare::prepare();
+ process::exit(0);
+ }
+ Some("build") => Command::Build,
+ Some(flag) if flag.starts_with('-') => arg_error!("Expected command found flag {}", flag),
+ Some(command) => arg_error!("Unknown command {}", command),
+ None => {
+ usage();
+ process::exit(0);
+ }
+ };
+
+ let mut target_dir = PathBuf::from("build");
+ let mut channel = "release";
+ let mut sysroot_kind = SysrootKind::Clif;
+ let mut use_unstable_features = true;
+ while let Some(arg) = args.next().as_deref() {
+ match arg {
+ "--target-dir" => {
+ target_dir = PathBuf::from(args.next().unwrap_or_else(|| {
+ arg_error!("--target-dir requires argument");
+ }))
+ }
+ "--debug" => channel = "debug",
+ "--sysroot" => {
+ sysroot_kind = match args.next().as_deref() {
+ Some("none") => SysrootKind::None,
+ Some("clif") => SysrootKind::Clif,
+ Some("llvm") => SysrootKind::Llvm,
+ Some(arg) => arg_error!("Unknown sysroot kind {}", arg),
+ None => arg_error!("--sysroot requires argument"),
+ }
+ }
+ "--no-unstable-features" => use_unstable_features = false,
+ flag if flag.starts_with("-") => arg_error!("Unknown flag {}", flag),
+ arg => arg_error!("Unexpected argument {}", arg),
+ }
+ }
+ target_dir = std::env::current_dir().unwrap().join(target_dir);
+
+ let host_triple = if let Ok(host_triple) = std::env::var("HOST_TRIPLE") {
+ host_triple
+ } else if let Some(host_triple) = config::get_value("host") {
+ host_triple
+ } else {
+ rustc_info::get_host_triple()
+ };
+ let target_triple = if let Ok(target_triple) = std::env::var("TARGET_TRIPLE") {
+ if target_triple != "" {
+ target_triple
+ } else {
+ host_triple.clone() // Empty target triple can happen on GHA
+ }
+ } else if let Some(target_triple) = config::get_value("target") {
+ target_triple
+ } else {
+ host_triple.clone()
+ };
+
+ if target_triple.ends_with("-msvc") {
+ eprintln!("The MSVC toolchain is not yet supported by rustc_codegen_cranelift.");
+ eprintln!("Switch to the MinGW toolchain for Windows support.");
+ eprintln!("Hint: You can use `rustup set default-host x86_64-pc-windows-gnu` to");
+ eprintln!("set the global default target to MinGW");
+ process::exit(1);
+ }
+
+ let cg_clif_build_dir =
+ build_backend::build_backend(channel, &host_triple, use_unstable_features);
+ build_sysroot::build_sysroot(
+ channel,
+ sysroot_kind,
+ &target_dir,
+ cg_clif_build_dir,
+ &host_triple,
+ &target_triple,
+ );
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/prepare.rs b/compiler/rustc_codegen_cranelift/build_system/prepare.rs
new file mode 100644
index 000000000..8bb00352d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/prepare.rs
@@ -0,0 +1,174 @@
+use std::env;
+use std::ffi::OsStr;
+use std::ffi::OsString;
+use std::fs;
+use std::path::Path;
+use std::process::Command;
+
+use super::rustc_info::{get_file_name, get_rustc_path, get_rustc_version};
+use super::utils::{copy_dir_recursively, spawn_and_wait};
+
+pub(crate) fn prepare() {
+ prepare_sysroot();
+
+ eprintln!("[INSTALL] hyperfine");
+ Command::new("cargo").arg("install").arg("hyperfine").spawn().unwrap().wait().unwrap();
+
+ clone_repo_shallow_github(
+ "rand",
+ "rust-random",
+ "rand",
+ "0f933f9c7176e53b2a3c7952ded484e1783f0bf1",
+ );
+ apply_patches("rand", Path::new("rand"));
+
+ clone_repo_shallow_github(
+ "regex",
+ "rust-lang",
+ "regex",
+ "341f207c1071f7290e3f228c710817c280c8dca1",
+ );
+
+ clone_repo_shallow_github(
+ "portable-simd",
+ "rust-lang",
+ "portable-simd",
+ "b8d6b6844602f80af79cd96401339ec594d472d8",
+ );
+ apply_patches("portable-simd", Path::new("portable-simd"));
+
+ clone_repo_shallow_github(
+ "simple-raytracer",
+ "ebobby",
+ "simple-raytracer",
+ "804a7a21b9e673a482797aa289a18ed480e4d813",
+ );
+
+ eprintln!("[LLVM BUILD] simple-raytracer");
+ let mut build_cmd = Command::new("cargo");
+ build_cmd.arg("build").env_remove("CARGO_TARGET_DIR").current_dir("simple-raytracer");
+ spawn_and_wait(build_cmd);
+ fs::copy(
+ Path::new("simple-raytracer/target/debug").join(get_file_name("main", "bin")),
+ // FIXME use get_file_name here too once testing is migrated to rust
+ "simple-raytracer/raytracer_cg_llvm",
+ )
+ .unwrap();
+}
+
+fn prepare_sysroot() {
+ let rustc_path = get_rustc_path();
+ let sysroot_src_orig = rustc_path.parent().unwrap().join("../lib/rustlib/src/rust");
+ let sysroot_src = env::current_dir().unwrap().join("build_sysroot").join("sysroot_src");
+
+ assert!(sysroot_src_orig.exists());
+
+ if sysroot_src.exists() {
+ fs::remove_dir_all(&sysroot_src).unwrap();
+ }
+ fs::create_dir_all(sysroot_src.join("library")).unwrap();
+ eprintln!("[COPY] sysroot src");
+ copy_dir_recursively(&sysroot_src_orig.join("library"), &sysroot_src.join("library"));
+
+ let rustc_version = get_rustc_version();
+ fs::write(Path::new("build_sysroot").join("rustc_version"), &rustc_version).unwrap();
+
+ eprintln!("[GIT] init");
+ let mut git_init_cmd = Command::new("git");
+ git_init_cmd.arg("init").arg("-q").current_dir(&sysroot_src);
+ spawn_and_wait(git_init_cmd);
+
+ init_git_repo(&sysroot_src);
+
+ apply_patches("sysroot", &sysroot_src);
+}
+
+#[allow(dead_code)]
+fn clone_repo(target_dir: &str, repo: &str, rev: &str) {
+ eprintln!("[CLONE] {}", repo);
+ // Ignore exit code as the repo may already have been checked out
+ Command::new("git").arg("clone").arg(repo).arg(target_dir).spawn().unwrap().wait().unwrap();
+
+ let mut clean_cmd = Command::new("git");
+ clean_cmd.arg("checkout").arg("--").arg(".").current_dir(target_dir);
+ spawn_and_wait(clean_cmd);
+
+ let mut checkout_cmd = Command::new("git");
+ checkout_cmd.arg("checkout").arg("-q").arg(rev).current_dir(target_dir);
+ spawn_and_wait(checkout_cmd);
+}
+
+fn clone_repo_shallow_github(target_dir: &str, username: &str, repo: &str, rev: &str) {
+ if cfg!(windows) {
+ // Older windows doesn't have tar or curl by default. Fall back to using git.
+ clone_repo(target_dir, &format!("https://github.com/{}/{}.git", username, repo), rev);
+ return;
+ }
+
+ let archive_url = format!("https://github.com/{}/{}/archive/{}.tar.gz", username, repo, rev);
+ let archive_file = format!("{}.tar.gz", rev);
+ let archive_dir = format!("{}-{}", repo, rev);
+
+ eprintln!("[DOWNLOAD] {}/{} from {}", username, repo, archive_url);
+
+ // Remove previous results if they exists
+ let _ = std::fs::remove_file(&archive_file);
+ let _ = std::fs::remove_dir_all(&archive_dir);
+ let _ = std::fs::remove_dir_all(target_dir);
+
+ // Download zip archive
+ let mut download_cmd = Command::new("curl");
+ download_cmd.arg("--location").arg("--output").arg(&archive_file).arg(archive_url);
+ spawn_and_wait(download_cmd);
+
+ // Unpack tar archive
+ let mut unpack_cmd = Command::new("tar");
+ unpack_cmd.arg("xf").arg(&archive_file);
+ spawn_and_wait(unpack_cmd);
+
+ // Rename unpacked dir to the expected name
+ std::fs::rename(archive_dir, target_dir).unwrap();
+
+ init_git_repo(Path::new(target_dir));
+
+ // Cleanup
+ std::fs::remove_file(archive_file).unwrap();
+}
+
+fn init_git_repo(repo_dir: &Path) {
+ let mut git_init_cmd = Command::new("git");
+ git_init_cmd.arg("init").arg("-q").current_dir(repo_dir);
+ spawn_and_wait(git_init_cmd);
+
+ let mut git_add_cmd = Command::new("git");
+ git_add_cmd.arg("add").arg(".").current_dir(repo_dir);
+ spawn_and_wait(git_add_cmd);
+
+ let mut git_commit_cmd = Command::new("git");
+ git_commit_cmd.arg("commit").arg("-m").arg("Initial commit").arg("-q").current_dir(repo_dir);
+ spawn_and_wait(git_commit_cmd);
+}
+
+fn get_patches(crate_name: &str) -> Vec<OsString> {
+ let mut patches: Vec<_> = fs::read_dir("patches")
+ .unwrap()
+ .map(|entry| entry.unwrap().path())
+ .filter(|path| path.extension() == Some(OsStr::new("patch")))
+ .map(|path| path.file_name().unwrap().to_owned())
+ .filter(|file_name| {
+ file_name.to_str().unwrap().split_once("-").unwrap().1.starts_with(crate_name)
+ })
+ .collect();
+ patches.sort();
+ patches
+}
+
+fn apply_patches(crate_name: &str, target_dir: &Path) {
+ for patch in get_patches(crate_name) {
+ eprintln!("[PATCH] {:?} <- {:?}", target_dir.file_name().unwrap(), patch);
+ let patch_arg = env::current_dir().unwrap().join("patches").join(patch);
+ let mut apply_patch_cmd = Command::new("git");
+ apply_patch_cmd.arg("am").arg(patch_arg).arg("-q").current_dir(target_dir);
+ spawn_and_wait(apply_patch_cmd);
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs b/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs
new file mode 100644
index 000000000..9206bb02b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/rustc_info.rs
@@ -0,0 +1,65 @@
+use std::path::{Path, PathBuf};
+use std::process::{Command, Stdio};
+
+pub(crate) fn get_rustc_version() -> String {
+ let version_info =
+ Command::new("rustc").stderr(Stdio::inherit()).args(&["-V"]).output().unwrap().stdout;
+ String::from_utf8(version_info).unwrap()
+}
+
+pub(crate) fn get_host_triple() -> String {
+ let version_info =
+ Command::new("rustc").stderr(Stdio::inherit()).args(&["-vV"]).output().unwrap().stdout;
+ String::from_utf8(version_info)
+ .unwrap()
+ .lines()
+ .to_owned()
+ .find(|line| line.starts_with("host"))
+ .unwrap()
+ .split(":")
+ .nth(1)
+ .unwrap()
+ .trim()
+ .to_owned()
+}
+
+pub(crate) fn get_rustc_path() -> PathBuf {
+ let rustc_path = Command::new("rustup")
+ .stderr(Stdio::inherit())
+ .args(&["which", "rustc"])
+ .output()
+ .unwrap()
+ .stdout;
+ Path::new(String::from_utf8(rustc_path).unwrap().trim()).to_owned()
+}
+
+pub(crate) fn get_default_sysroot() -> PathBuf {
+ let default_sysroot = Command::new("rustc")
+ .stderr(Stdio::inherit())
+ .args(&["--print", "sysroot"])
+ .output()
+ .unwrap()
+ .stdout;
+ Path::new(String::from_utf8(default_sysroot).unwrap().trim()).to_owned()
+}
+
+pub(crate) fn get_file_name(crate_name: &str, crate_type: &str) -> String {
+ let file_name = Command::new("rustc")
+ .stderr(Stdio::inherit())
+ .args(&[
+ "--crate-name",
+ crate_name,
+ "--crate-type",
+ crate_type,
+ "--print",
+ "file-names",
+ "-",
+ ])
+ .output()
+ .unwrap()
+ .stdout;
+ let file_name = String::from_utf8(file_name).unwrap().trim().to_owned();
+ assert!(!file_name.contains('\n'));
+ assert!(file_name.contains(crate_name));
+ file_name
+}
diff --git a/compiler/rustc_codegen_cranelift/build_system/utils.rs b/compiler/rustc_codegen_cranelift/build_system/utils.rs
new file mode 100644
index 000000000..12b5d70fa
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/build_system/utils.rs
@@ -0,0 +1,35 @@
+use std::fs;
+use std::path::Path;
+use std::process::{self, Command};
+
+#[track_caller]
+pub(crate) fn try_hard_link(src: impl AsRef<Path>, dst: impl AsRef<Path>) {
+ let src = src.as_ref();
+ let dst = dst.as_ref();
+ if let Err(_) = fs::hard_link(src, dst) {
+ fs::copy(src, dst).unwrap(); // Fallback to copying if hardlinking failed
+ }
+}
+
+#[track_caller]
+pub(crate) fn spawn_and_wait(mut cmd: Command) {
+ if !cmd.spawn().unwrap().wait().unwrap().success() {
+ process::exit(1);
+ }
+}
+
+pub(crate) fn copy_dir_recursively(from: &Path, to: &Path) {
+ for entry in fs::read_dir(from).unwrap() {
+ let entry = entry.unwrap();
+ let filename = entry.file_name();
+ if filename == "." || filename == ".." {
+ continue;
+ }
+ if entry.metadata().unwrap().is_dir() {
+ fs::create_dir(to.join(&filename)).unwrap();
+ copy_dir_recursively(&from.join(&filename), &to.join(&filename));
+ } else {
+ fs::copy(from.join(&filename), to.join(&filename)).unwrap();
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/clean_all.sh b/compiler/rustc_codegen_cranelift/clean_all.sh
new file mode 100755
index 000000000..ea1f8c1e8
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/clean_all.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+set -e
+
+rm -rf build_sysroot/{sysroot_src/,target/,compiler-builtins/,rustc_version}
+rm -rf target/ build/ perf.data{,.old} y.bin
+rm -rf rand/ regex/ simple-raytracer/ portable-simd/
diff --git a/compiler/rustc_codegen_cranelift/config.txt b/compiler/rustc_codegen_cranelift/config.txt
new file mode 100644
index 000000000..b14db27d6
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/config.txt
@@ -0,0 +1,17 @@
+# This file allows configuring the build system.
+
+# Which triple to produce a compiler toolchain for.
+#
+# Defaults to the default triple of rustc on the host system.
+#host = x86_64-unknown-linux-gnu
+
+# Which triple to build libraries (core/alloc/std/test/proc_macro) for.
+#
+# Defaults to `host`.
+#target = x86_64-unknown-linux-gnu
+
+# Disables cleaning of the sysroot dir. This will cause old compiled artifacts to be re-used when
+# the sysroot source hasn't changed. This is useful when the codegen backend hasn't been modified.
+# This option can be changed while the build system is already running for as long as sysroot
+# building hasn't started yet.
+#keep_sysroot
diff --git a/compiler/rustc_codegen_cranelift/docs/dwarf.md b/compiler/rustc_codegen_cranelift/docs/dwarf.md
new file mode 100644
index 000000000..502b1b036
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/docs/dwarf.md
@@ -0,0 +1,153 @@
+# Line number information
+
+Line number information maps between machine code instructions and the source level location.
+
+## Encoding
+
+The line number information is stored in the `.debug_line` section for ELF and `__debug_line`
+section of the `__DWARF` segment for Mach-O object files. The line number information contains a
+header followed by the line program. The line program is a program for a virtual machine with
+instructions like set line number for the current machine code instruction and advance the current
+machine code instruction.
+
+## Tips
+
+You need to set either `DW_AT_low_pc` and `DW_AT_high_pc` **or** `DW_AT_ranges` of a
+`DW_TAG_compilation_unit` to the range of addresses in the compilation unit. After that you need
+to set `DW_AT_stmt_list` to the `.debug_line` section offset of the line program. Otherwise a
+debugger won't find the line number information. On macOS the debuginfo relocations **must** be
+section relative and not symbol relative.
+See [#303 (comment)](https://github.com/bjorn3/rustc_codegen_cranelift/issues/303#issuecomment-457825535)
+for more information.
+
+# Function debuginfo
+
+## Tips
+
+`DW_TAG_subprogram` requires `DW_AT_name`, `DW_AT_low_pc` and `DW_AT_high_pc` **or** `DW_AT_ranges`.
+Otherwise gdb will silently skip it. When `DW_AT_high_pc` is a length instead of an address, the
+DWARF version must be at least 4.
+
+<details>
+<summary>IRC log of #gdb on irc.freenode.org at 2020-04-23</summary>
+
+```
+(13:46:11) bjorn3: i am writing a backend for a compiler that uses DWARF for debuginfo. for some reason gdb seems to completely ignore all DW_TAG_subprogram, while lldb works fine. any idea what the problem could be?
+(13:47:49) bjorn3: this is the output of llvm-dwarfdump: https://gist.github.com/bjorn3/8a34e333c80f13cb048381e94b4a3756
+(13:47:50) osa1: luispm: why is that problem not exists in 'commands'? (the target vs. host)
+(13:52:16) luispm: osa1, commands is a bit more high level. It executes isolated commands. Breakpoint conditions need to be evaluated in the context of a valid expression. That expression may involve variables, symbols etc.
+(13:52:36) luispm: osa1, Oh, i see your point now. Commands is only executed on the host.
+(13:53:18) luispm: osa1, The commands are not tied to the execution context of the debugged program. The breakpoint conditions determine if execution must stop or continue etc.
+(13:55:00) luispm: bjorn3, Likely something GDB thinks is wrong. Does enabling "set debug dwarf*" show anything?
+(13:56:01) bjorn3: luispm: no
+(13:56:12) bjorn3: for more context: https://github.com/bjorn3/rustc_codegen_cranelift/pull/978
+(13:58:16) osa1 verliet de ruimte (quit: Quit: osa1).
+(13:58:28) bjorn3: luispm: wait, for b m<TAB> it shows nothing, but when stepping into a new function it does
+(13:58:45) bjorn3: it still doesn't show anything for `info args` though
+(13:58:50) bjorn3: No symbol table info available.
+(14:00:50) luispm: bjorn3, Is that expected given the nature of the binary?
+(14:01:17) bjorn3: b main<TAB> may show nothing as I only set DW_AT_linkage_name and not DW_AT_name
+(14:01:24) bjorn3: info args should work though
+(14:03:26) luispm: Sorry, I'm not sure what's up. There may be a genuine bug there.
+(14:03:41) luispm: tromey (not currently in the channel, but maybe later today) may have more input.
+(14:04:08) bjorn3: okay, thanks luispm!
+(14:04:27) luispm: In the worst case, reporting a bug may prompt someone to look into that as well.
+(14:04:48) luispm: Or send an e-mail to the gdb@sourceware.org mailing list.
+(14:05:11) bjorn3: I don't know if it is a bug in gdb, or just me producing (slightly) wrong DWARF
+(14:39:40) irker749: gdb: tom binutils-gdb.git:master * 740480b88af / gdb/ChangeLog gdb/darwin-nat.c gdb/inferior.c gdb/inferior.h: Remove iterate_over_inferiors
+(15:22:45) irker749: gdb: tromey binutils-gdb.git:master * ecc6c6066b5 / gdb/ChangeLog gdb/dwarf2/read.c gdb/unittests/lookup_name_info-selftests.c: Fix Ada crash with .debug_names
+(15:23:13) bjorn3: tromey: ping
+(15:23:29) tromey: bjorn3: hey
+(15:24:16) bjorn3: I am writing a backend for a compiler which uses DWARF for debuginfo. I unfortunately can't get gdb to show arguments. lldb works fine.
+(15:25:13) bjorn3: it just says: No symbol table info available.
+(15:25:21) bjorn3: any idea what it could be?
+(15:25:34) bjorn3: dwarfdump output: https://gist.github.com/bjorn3/8a34e333c80f13cb048381e94b4a3756
+(15:26:48) bjorn3: more context: https://github.com/bjorn3/rustc_codegen_cranelift/pull/978
+(15:28:05) tromey: offhand I don't know, but if you can send me an executable I can look
+(15:28:17) bjorn3: how should I send it?
+(15:29:26) tromey: good question
+(15:29:41) tromey: you could try emailing it to tromey at adacore.com
+(15:29:47) tromey: dunno if that will work or not
+(15:30:26) bjorn3: i will try
+(15:37:27) bjorn3: tromey: i sent an email with the subject "gdb args not showing"
+(15:38:29) tromey: will check now
+(15:38:40) bjorn3: thanks!
+(15:42:51) irker749: gdb: tdevries binutils-gdb.git:master * de82891ce5b / gdb/ChangeLog gdb/block.c gdb/block.h gdb/symtab.c gdb/testsuite/ChangeLog gdb/testsuite/gdb.base/decl-before-def-decl.c gdb/testsuite/gdb.base/decl-before-def-def.c gdb/testsuite/gdb.base/decl-before-def.exp: [gdb/symtab] Prefer def over decl (inter-CU case)
+(15:42:52) irker749: gdb: tdevries binutils-gdb.git:master * 70bc38f5138 / gdb/ChangeLog gdb/symtab.c gdb/testsuite/ChangeLog gdb/testsuite/gdb.base/decl-before-def.exp: [gdb/symtab] Prefer def over decl (inter-CU case, with context)
+(15:43:36) tromey: bjorn3: sorry, got distracted. I have the file now
+(15:45:35) tromey: my first thing when investigating was to enable complaints
+(15:45:37) tromey: so I did
+(15:45:40) tromey: set complaints 1000
+(15:45:42) tromey: then
+(15:45:51) tromey: file -readnow mini_core_hello_world
+(15:46:00) tromey: gdb printed just one style of complaint
+(15:46:07) tromey: During symbol reading: missing name for subprogram DIE at 0x3f7
+(15:46:18) tromey: (which is really pretty good, most compilers manage to generate a bunch)
+(15:46:29) tromey: and then the gdb DWARF reader says
+(15:46:34) tromey: /* Ignore functions with missing or empty names. These are actually
+(15:46:34) tromey: illegal according to the DWARF standard. */
+(15:46:34) tromey: if (name == NULL)
+(15:46:34) tromey: {
+(15:46:37) tromey: complaint (_("missing name for subprogram DIE at %s"),
+(15:46:40) tromey: sect_offset_str (die->sect_off));
+(15:46:47) tromey: I wonder if that comment is correct though
+(15:47:34) tromey: I guess pedantically maybe it is, DWARF 5 3.3.1 says
+(15:47:43) tromey: The subroutine or entry point entry has a DW_AT_name attribute whose value is
+(15:47:43) tromey: a null-terminated string containing the subroutine or entry point name.
+(15:48:14) bjorn3: i tried set complaints, but it returned complaints for system files. i didn't know about file -readnow.
+(15:48:21) tromey: cool
+(15:48:26) bjorn3: i will try adding DW_AT_name
+(15:48:45) tromey: without readnow unfortunately you get less stuff, because for whatever reason gdb has 2 separate DWARF scanners
+(15:49:02) tromey: sort of anyway
+(15:49:43) tromey: this seems kind of pedantic of gdb, like if there's a linkage name but no DW_AT_name, then why bail?
+(15:50:01) tromey: also what about anonymous functions
+(15:50:17) tromey: but anyway this explains the current situation and if you don't mind adding DW_AT_name, then that's probably simplest
+(15:51:47) bjorn3: i added DW_AT_name.
+(15:51:54) bjorn3: now it says cannot get low and high bounds for subprogram DIE at ...
+(15:52:01) tromey: ugh
+(15:52:10) bjorn3: i will add DW_AT_low_pc and DW_AT_high_pc
+(15:52:15) tromey: /* Ignore functions with missing or invalid low and high pc attributes. */
+(15:52:37) tromey: you can also use DW_AT_ranges
+(15:52:55) tromey: if you'd prefer
+(15:53:08) bjorn3: already using DW_AT_ranges for DW_TAG_compilation_unit
+(15:53:19) bjorn3: for individual functions, there are no gaps
+(15:57:07) bjorn3: still the same error with DW_AT_low_pc and DW_AT_high_pc
+(15:57:24) bjorn3: tromey: ^
+(15:58:08) tromey: hmmm
+(15:58:30) bjorn3: should i send the new executable?
+(15:58:31) tromey: send me another executable & I will debug
+(15:58:33) tromey: yep
+(15:59:23) bjorn3: sent as repy of the previous mail
+(16:03:23) tromey: the low PC has DW_FORM_addr, but the high PC has DW_FORM_udata, which seems weird
+(16:03:50) mjw: no
+(16:03:54) tromey: no?
+(16:04:00) mjw: I suggested that for the DWARF standard...
+(16:04:05) mjw: sorry
+(16:04:58) mjw: The idea was that instead of two relocations and two address wide fields, you have one address and a constant offset.
+(16:05:05) tromey: ahh, I see the code now
+(16:05:07) tromey: I forgot about this
+(16:05:18) tromey: if (cu->header.version >= 4 && attr_high->form_is_constant ())
+(16:05:18) tromey: high += low;
+(16:05:36) mjw: that second offset doesn't need a relocation and can often be packed in something small, like an uleb128
+(16:05:51) mjw: using udata might not be ideal though, but is allowed
+(16:05:51) tromey: bjorn3: the problem is that this CU claims to be DWARF 3 but is using a DWARF 4 feature
+(16:05:58) mjw: aha
+(16:05:59) bjorn3: which one?
+(16:06:03) ryoshu: hi
+(16:06:08) tromey: high_pc (udata) 107 (+0x00000000000011b0 <_ZN21mini_core_hello_world5start17hec55b7ca64fc434eE>)
+(16:06:08) tromey:
+(16:06:12) ryoshu: just soft ping, I have a queue of patches :)
+(16:06:22) tromey: using this as a length requires DWARF 4
+(16:06:36) tromey: for gdb at least it's fine to always emit DWARF 4
+(16:06:44) bjorn3: trying dwarf 4 now
+(16:06:48) tromey: I think there are some DWARF 5 features still in the works but DWARF 4 should be solid AFAIK
+(16:07:03) tromey: fini
+(16:07:08) tromey: lol wrong window
+(16:07:56) mjw: Maybe you can accept it for DWARF < 4. But if I remember correctly it might be that people might have been using udata as if it was an address...
+(16:08:13) tromey: yeah, I vaguely recall this as well, though I'd expect there to be a comment
+(16:08:21) mjw: Cannot really remember why it needed version >= 4. Maybe there was no good reason?
+(16:08:32) bjorn3: tromey: it works!!!! thanks for all the help!
+(16:08:41) tromey: my pleasure bjorn3
+```
+
+</details>
diff --git a/compiler/rustc_codegen_cranelift/docs/usage.md b/compiler/rustc_codegen_cranelift/docs/usage.md
new file mode 100644
index 000000000..33f146e7b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/docs/usage.md
@@ -0,0 +1,67 @@
+# Usage
+
+rustc_codegen_cranelift can be used as a near-drop-in replacement for `cargo build` or `cargo run` for existing projects.
+
+Assuming `$cg_clif_dir` is the directory you cloned this repo into and you followed the instructions (`y.rs prepare` and `y.rs build` or `test.sh`).
+
+## Cargo
+
+In the directory with your project (where you can do the usual `cargo build`), run:
+
+```bash
+$ $cg_clif_dir/build/cargo-clif build
+```
+
+This will build your project with rustc_codegen_cranelift instead of the usual LLVM backend.
+
+## Rustc
+
+> You should prefer using the Cargo method.
+
+```bash
+$ $cg_clif_dir/build/rustc-clif my_crate.rs
+```
+
+## Jit mode
+
+> ⚠⚠⚠ The JIT mode is highly experimental. It may be slower than AOT compilation due to lack of incremental compilation. It may also be hard to setup if you have cargo dependencies. ⚠⚠⚠
+
+In jit mode cg_clif will immediately execute your code without creating an executable file.
+
+> This requires all dependencies to be available as dynamic library.
+> The jit mode will probably need cargo integration to make this possible.
+
+```bash
+$ $cg_clif_dir/build/cargo-clif jit
+```
+
+or
+
+```bash
+$ $cg_clif_dir/build/rustc-clif -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic my_crate.rs
+```
+
+There is also an experimental lazy jit mode. In this mode functions are only compiled once they are
+first called.
+
+```bash
+$ $cg_clif_dir/build/cargo-clif lazy-jit
+```
+
+## Shell
+
+These are a few functions that allow you to easily run rust code from the shell using cg_clif as jit.
+
+```bash
+function jit_naked() {
+ echo "$@" | $cg_clif_dir/build/rustc-clif - -Zunstable-features -Cllvm-args=mode=jit -Cprefer-dynamic
+}
+
+function jit() {
+ jit_naked "fn main() { $@ }"
+}
+
+function jit_calc() {
+ jit 'println!("0x{:x}", ' $@ ');';
+}
+```
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_example.rs b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
new file mode 100644
index 000000000..bc1594d82
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/alloc_example.rs
@@ -0,0 +1,38 @@
+#![feature(start, core_intrinsics, alloc_error_handler, box_syntax)]
+#![no_std]
+
+extern crate alloc;
+extern crate alloc_system;
+
+use alloc::boxed::Box;
+
+use alloc_system::System;
+
+#[global_allocator]
+static ALLOC: System = System;
+
+#[cfg_attr(unix, link(name = "c"))]
+#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+extern "C" {
+ fn puts(s: *const u8) -> i32;
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ core::intrinsics::abort();
+}
+
+#[alloc_error_handler]
+fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
+ core::intrinsics::abort();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ let world: Box<&str> = box "Hello World!\0";
+ unsafe {
+ puts(*world as *const str as *const u8);
+ }
+
+ 0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/alloc_system.rs b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
new file mode 100644
index 000000000..cf95c89bc
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/alloc_system.rs
@@ -0,0 +1,130 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![no_std]
+
+pub struct System;
+
+#[cfg(any(windows, unix, target_os = "redox"))]
+mod realloc_fallback {
+ use core::alloc::{GlobalAlloc, Layout};
+ use core::cmp;
+ use core::ptr;
+ impl super::System {
+ pub(crate) unsafe fn realloc_fallback(
+ &self,
+ ptr: *mut u8,
+ old_layout: Layout,
+ new_size: usize,
+ ) -> *mut u8 {
+ // Docs for GlobalAlloc::realloc require this to be valid:
+ let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+ let new_ptr = GlobalAlloc::alloc(self, new_layout);
+ if !new_ptr.is_null() {
+ let size = cmp::min(old_layout.size(), new_size);
+ ptr::copy_nonoverlapping(ptr, new_ptr, size);
+ GlobalAlloc::dealloc(self, ptr, old_layout);
+ }
+ new_ptr
+ }
+ }
+}
+#[cfg(any(unix, target_os = "redox"))]
+mod platform {
+ use core::alloc::{GlobalAlloc, Layout};
+ use core::ffi::c_void;
+ use core::ptr;
+ use System;
+ extern "C" {
+ fn posix_memalign(memptr: *mut *mut c_void, align: usize, size: usize) -> i32;
+ fn free(p: *mut c_void);
+ }
+ unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ aligned_malloc(&layout)
+ }
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ let ptr = self.alloc(layout.clone());
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, layout.size());
+ }
+ ptr
+ }
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ free(ptr as *mut c_void)
+ }
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ self.realloc_fallback(ptr, layout, new_size)
+ }
+ }
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ let mut out = ptr::null_mut();
+ let ret = posix_memalign(&mut out, layout.align(), layout.size());
+ if ret != 0 { ptr::null_mut() } else { out as *mut u8 }
+ }
+}
+#[cfg(windows)]
+#[allow(nonstandard_style)]
+mod platform {
+ use core::alloc::{GlobalAlloc, Layout};
+ use System;
+ type LPVOID = *mut u8;
+ type HANDLE = LPVOID;
+ type SIZE_T = usize;
+ type DWORD = u32;
+ type BOOL = i32;
+ extern "system" {
+ fn GetProcessHeap() -> HANDLE;
+ fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
+ fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
+ fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
+ fn GetLastError() -> DWORD;
+ }
+ #[repr(C)]
+ struct Header(*mut u8);
+ const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+ unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
+ &mut *(ptr as *mut Header).offset(-1)
+ }
+ unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
+ let aligned = ptr.add(align - (ptr as usize & (align - 1)));
+ *get_header(aligned) = Header(ptr);
+ aligned
+ }
+ #[inline]
+ unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
+ let size = layout.size() + layout.align();
+ let ptr = HeapAlloc(GetProcessHeap(), flags, size);
+ (if ptr.is_null() { ptr } else { align_ptr(ptr, layout.align()) }) as *mut u8
+ }
+ unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, 0)
+ }
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, HEAP_ZERO_MEMORY)
+ }
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ let header = get_header(ptr);
+ let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError());
+ }
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ self.realloc_fallback(ptr, layout, new_size)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
new file mode 100644
index 000000000..d270fec6b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/arbitrary_self_types_pointers_and_wrappers.rs
@@ -0,0 +1,68 @@
+// Adapted from rustc run-pass test suite
+
+#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
+
+use std::{
+ ops::{Deref, CoerceUnsized, DispatchFromDyn},
+ marker::Unsize,
+};
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+ // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+ // without unsized_locals), but wrappers around `Self` currently are not.
+ // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+ // fn wrapper(self: Wrapper<Self>) -> i32;
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+ ***self
+ }
+}
+
+fn main() {
+ let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
+ assert_eq!(pw.ptr_wrapper(), 5);
+
+ let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
+ assert_eq!(wp.wrapper_ptr(), 6);
+
+ let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+ assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/dst-field-align.rs b/compiler/rustc_codegen_cranelift/example/dst-field-align.rs
new file mode 100644
index 000000000..6c338e999
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/dst-field-align.rs
@@ -0,0 +1,67 @@
+// run-pass
+#![allow(dead_code)]
+struct Foo<T: ?Sized> {
+ a: u16,
+ b: T
+}
+
+trait Bar {
+ fn get(&self) -> usize;
+}
+
+impl Bar for usize {
+ fn get(&self) -> usize { *self }
+}
+
+struct Baz<T: ?Sized> {
+ a: T
+}
+
+struct HasDrop<T: ?Sized> {
+ ptr: Box<usize>,
+ data: T
+}
+
+fn main() {
+ // Test that zero-offset works properly
+ let b : Baz<usize> = Baz { a: 7 };
+ assert_eq!(b.a.get(), 7);
+ let b : &Baz<dyn Bar> = &b;
+ assert_eq!(b.a.get(), 7);
+
+ // Test that the field is aligned properly
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ assert_eq!(f.b.get(), 11);
+ let ptr1 : *const u8 = &f.b as *const _ as *const u8;
+
+ let f : &Foo<dyn Bar> = &f;
+ let ptr2 : *const u8 = &f.b as *const _ as *const u8;
+ assert_eq!(f.b.get(), 11);
+
+ // The pointers should be the same
+ assert_eq!(ptr1, ptr2);
+
+ // Test that nested DSTs work properly
+ let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
+ assert_eq!(f.b.b.get(), 17);
+ let f : &Foo<Foo<dyn Bar>> = &f;
+ assert_eq!(f.b.b.get(), 17);
+
+ // Test that get the pointer via destructuring works
+
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ let f : &Foo<dyn Bar> = &f;
+ let &Foo { a: _, b: ref bar } = f;
+ assert_eq!(bar.get(), 11);
+
+ // Make sure that drop flags don't screw things up
+
+ let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
+ ptr: Box::new(0),
+ data: Baz { a: [1,2,3,4] }
+ };
+ assert_eq!([1,2,3,4], d.data.a);
+
+ let d : &HasDrop<Baz<[i32]>> = &d;
+ assert_eq!(&[1,2,3,4], &d.data.a);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/example.rs b/compiler/rustc_codegen_cranelift/example/example.rs
new file mode 100644
index 000000000..d5c122bf6
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/example.rs
@@ -0,0 +1,208 @@
+#![feature(no_core, unboxed_closures)]
+#![no_core]
+#![allow(dead_code)]
+
+extern crate mini_core;
+
+use mini_core::*;
+
+pub fn abc(a: u8) -> u8 {
+ a * 2
+}
+
+pub fn bcd(b: bool, a: u8) -> u8 {
+ if b {
+ a * 2
+ } else {
+ a * 3
+ }
+}
+
+pub fn call() {
+ abc(42);
+}
+
+pub fn indirect_call() {
+ let f: fn() = call;
+ f();
+}
+
+pub enum BoolOption {
+ Some(bool),
+ None,
+}
+
+pub fn option_unwrap_or(o: BoolOption, d: bool) -> bool {
+ match o {
+ BoolOption::Some(b) => b,
+ BoolOption::None => d,
+ }
+}
+
+pub fn ret_42() -> u8 {
+ 42
+}
+
+pub fn return_str() -> &'static str {
+ "hello world"
+}
+
+pub fn promoted_val() -> &'static u8 {
+ &(1 * 2)
+}
+
+pub fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 {
+ abc as *const u8
+}
+
+pub fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool {
+ a == b
+}
+
+pub fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+ (
+ a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+ b as u32,
+ )
+}
+
+pub fn char_cast(c: char) -> u8 {
+ c as u8
+}
+
+pub struct DebugTuple(());
+
+pub fn debug_tuple() -> DebugTuple {
+ DebugTuple(())
+}
+
+pub fn size_of<T>() -> usize {
+ intrinsics::size_of::<T>()
+}
+
+pub fn use_size_of() -> usize {
+ size_of::<u64>()
+}
+
+pub unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) {
+ intrinsics::copy::<u8>(src, dst, 1);
+}
+
+pub unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) {
+ let copy2 = &intrinsics::copy::<u8>;
+ copy2(src, dst, 1);
+}
+
+pub const ABC: u8 = 6 * 7;
+
+pub fn use_const() -> u8 {
+ ABC
+}
+
+pub fn call_closure_3arg() {
+ (|_, _, _| {})(0u8, 42u16, 0u8)
+}
+
+pub fn call_closure_2arg() {
+ (|_, _| {})(0u8, 42u16)
+}
+
+pub struct IsNotEmpty;
+
+impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty {
+ type Output = (u8, u8);
+
+ #[inline]
+ extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) {
+ self.call_mut(arg)
+ }
+}
+
+impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty {
+ #[inline]
+ extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) {
+ (0, 42)
+ }
+}
+
+pub fn call_is_not_empty() {
+ IsNotEmpty.call_once((&(&[0u16] as &[_]),));
+}
+
+pub fn eq_char(a: char, b: char) -> bool {
+ a == b
+}
+
+pub unsafe fn transmute(c: char) -> u32 {
+ intrinsics::transmute(c)
+}
+
+pub unsafe fn deref_str_ptr(s: *const str) -> &'static str {
+ &*s
+}
+
+pub fn use_array(arr: [u8; 3]) -> u8 {
+ arr[1]
+}
+
+pub fn repeat_array() -> [u8; 3] {
+ [0; 3]
+}
+
+pub fn array_as_slice(arr: &[u8; 3]) -> &[u8] {
+ arr
+}
+
+pub unsafe fn use_ctlz_nonzero(a: u16) -> u16 {
+ intrinsics::ctlz_nonzero(a)
+}
+
+pub fn ptr_as_usize(ptr: *const u8) -> usize {
+ ptr as usize
+}
+
+pub fn float_cast(a: f32, b: f64) -> (f64, f32) {
+ (a as f64, b as f32)
+}
+
+pub fn int_to_float(a: u8, b: i32) -> (f64, f32) {
+ (a as f64, b as f32)
+}
+
+pub fn make_array() -> [u8; 3] {
+ [42, 0, 5]
+}
+
+pub fn some_promoted_tuple() -> &'static (&'static str, &'static str) {
+ &("abc", "some")
+}
+
+pub fn index_slice(s: &[u8]) -> u8 {
+ s[2]
+}
+
+pub struct StrWrapper {
+ s: str,
+}
+
+pub fn str_wrapper_get(w: &StrWrapper) -> &str {
+ &w.s
+}
+
+pub fn i16_as_i8(a: i16) -> i8 {
+ a as i8
+}
+
+pub struct Unsized(u8, str);
+
+pub fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 {
+ &u.0
+}
+
+pub fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str {
+ &u.1
+}
+
+pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 {
+ a.0
+}
diff --git a/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs b/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs
new file mode 100644
index 000000000..b8f901d1b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/float-minmax-pass.rs
@@ -0,0 +1,53 @@
+// Copied from https://github.com/rust-lang/rust/blob/3fe3b89cd57229343eeca753fdd8c63d9b03c65c/src/test/ui/simd/intrinsic/float-minmax-pass.rs
+// run-pass
+// ignore-emscripten
+
+// Test that the simd_f{min,max} intrinsics produce the correct results.
+
+#![feature(repr_simd, platform_intrinsics)]
+#![allow(non_camel_case_types)]
+
+#[repr(simd)]
+#[derive(Copy, Clone, PartialEq, Debug)]
+struct f32x4(pub f32, pub f32, pub f32, pub f32);
+
+extern "platform-intrinsic" {
+ fn simd_fmin<T>(x: T, y: T) -> T;
+ fn simd_fmax<T>(x: T, y: T) -> T;
+}
+
+fn main() {
+ let x = f32x4(1.0, 2.0, 3.0, 4.0);
+ let y = f32x4(2.0, 1.0, 4.0, 3.0);
+
+ #[cfg(not(any(target_arch = "mips", target_arch = "mips64")))]
+ let nan = f32::NAN;
+ // MIPS hardware treats f32::NAN as SNAN. Clear the signaling bit.
+ // See https://github.com/rust-lang/rust/issues/52746.
+ #[cfg(any(target_arch = "mips", target_arch = "mips64"))]
+ let nan = f32::from_bits(f32::NAN.to_bits() - 1);
+
+ let n = f32x4(nan, nan, nan, nan);
+
+ unsafe {
+ let min0 = simd_fmin(x, y);
+ let min1 = simd_fmin(y, x);
+ assert_eq!(min0, min1);
+ let e = f32x4(1.0, 1.0, 3.0, 3.0);
+ assert_eq!(min0, e);
+ let minn = simd_fmin(x, n);
+ assert_eq!(minn, x);
+ let minn = simd_fmin(y, n);
+ assert_eq!(minn, y);
+
+ let max0 = simd_fmax(x, y);
+ let max1 = simd_fmax(y, x);
+ assert_eq!(max0, max1);
+ let e = f32x4(2.0, 2.0, 4.0, 4.0);
+ assert_eq!(max0, e);
+ let maxn = simd_fmax(x, n);
+ assert_eq!(maxn, x);
+ let maxn = simd_fmax(y, n);
+ assert_eq!(maxn, y);
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs b/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs
new file mode 100644
index 000000000..2ecc8b823
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/issue-91827-extern-types.rs
@@ -0,0 +1,59 @@
+// Copied from rustc ui test suite
+
+// run-pass
+//
+// Test that we can handle unsized types with an extern type tail part.
+// Regression test for issue #91827.
+
+#![feature(const_ptr_offset_from)]
+#![feature(extern_types)]
+
+use std::ptr::addr_of;
+
+extern "C" {
+ type Opaque;
+}
+
+unsafe impl Sync for Opaque {}
+
+#[repr(C)]
+pub struct List<T> {
+ len: usize,
+ data: [T; 0],
+ tail: Opaque,
+}
+
+#[repr(C)]
+pub struct ListImpl<T, const N: usize> {
+ len: usize,
+ data: [T; N],
+}
+
+impl<T> List<T> {
+ const fn as_slice(&self) -> &[T] {
+ unsafe { std::slice::from_raw_parts(self.data.as_ptr(), self.len) }
+ }
+}
+
+impl<T, const N: usize> ListImpl<T, N> {
+ const fn as_list(&self) -> &List<T> {
+ unsafe { std::mem::transmute(self) }
+ }
+}
+
+pub static A: ListImpl<u128, 3> = ListImpl {
+ len: 3,
+ data: [5, 6, 7],
+};
+pub static A_REF: &'static List<u128> = A.as_list();
+pub static A_TAIL_OFFSET: isize = tail_offset(A.as_list());
+
+const fn tail_offset<T>(list: &List<T>) -> isize {
+ unsafe { (addr_of!(list.tail) as *const u8).offset_from(list as *const List<T> as *const u8) }
+}
+
+fn main() {
+ assert_eq!(A_REF.as_slice(), &[5, 6, 7]);
+ // Check that interpreter and code generation agree about the position of the tail field.
+ assert_eq!(A_TAIL_OFFSET, tail_offset(A_REF));
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core.rs b/compiler/rustc_codegen_cranelift/example/mini_core.rs
new file mode 100644
index 000000000..8b6042a3d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mini_core.rs
@@ -0,0 +1,657 @@
+#![feature(
+ no_core,
+ lang_items,
+ intrinsics,
+ unboxed_closures,
+ extern_types,
+ decl_macro,
+ rustc_attrs,
+ transparent_unions,
+ auto_traits,
+ thread_local
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "destruct"]
+pub trait Destruct {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for u128 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for f64 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+unsafe impl<T: Copy> Copy for Option<T> {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+ type Output;
+
+ fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+ type Output = bool;
+
+ fn not(self) -> bool {
+ !self
+ }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for usize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+ type Output;
+
+ fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+ type Output = Self;
+
+ fn rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ self | rhs
+ }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ *self | rhs
+ }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ fn eq(&self, other: &Rhs) -> bool;
+ fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+ fn eq(&self, other: &u8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u16 {
+ fn eq(&self, other: &u16) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u16) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u32 {
+ fn eq(&self, other: &u32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+
+impl PartialEq for u64 {
+ fn eq(&self, other: &u64) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u64) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u128 {
+ fn eq(&self, other: &u128) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u128) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for usize {
+ fn eq(&self, other: &usize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &usize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i8 {
+ fn eq(&self, other: &i8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for isize {
+ fn eq(&self, other: &isize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &isize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for char {
+ fn eq(&self, other: &char) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &char) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+ fn eq(&self, other: &*const T) -> bool {
+ *self == *other
+ }
+ fn ne(&self, other: &*const T) -> bool {
+ *self != *other
+ }
+}
+
+impl <T: PartialEq> PartialEq for Option<T> {
+ fn eq(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Some(lhs), Some(rhs)) => *lhs == *rhs,
+ (None, None) => true,
+ _ => false,
+ }
+ }
+
+ fn ne(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Some(lhs), Some(rhs)) => *lhs != *rhs,
+ (None, None) => false,
+ _ => true,
+ }
+ }
+}
+
+#[lang = "shl"]
+pub trait Shl<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn shl(self, rhs: RHS) -> Self::Output;
+}
+
+impl Shl for u128 {
+ type Output = u128;
+
+ fn shl(self, rhs: u128) -> u128 {
+ self << rhs
+ }
+}
+
+#[lang = "neg"]
+pub trait Neg {
+ type Output;
+
+ fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+ type Output = i8;
+
+ fn neg(self) -> i8 {
+ -self
+ }
+}
+
+impl Neg for i16 {
+ type Output = i16;
+
+ fn neg(self) -> i16 {
+ self
+ }
+}
+
+impl Neg for isize {
+ type Output = isize;
+
+ fn neg(self) -> isize {
+ -self
+ }
+}
+
+impl Neg for f32 {
+ type Output = f32;
+
+ fn neg(self) -> f32 {
+ -self
+ }
+}
+
+pub enum Option<T> {
+ Some(T),
+ None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+ #[lang = "fn_once_output"]
+ type Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &'static str) -> ! {
+ unsafe {
+ libc::puts("Panicking\n\0" as *const str as *const i8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+ loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
+#[repr(transparent)]
+#[rustc_layout_scalar_valid_range_start(1)]
+#[rustc_nonnull_optimization_guaranteed]
+pub struct NonNull<T: ?Sized>(pub *const T);
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<NonNull<U>> for NonNull<T> where T: Unsize<U> {}
+
+pub struct Unique<T: ?Sized> {
+ pub pointer: NonNull<T>,
+ pub _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+#[lang = "owned_box"]
+pub struct Box<T: ?Sized>(Unique<T>, ());
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized> Drop for Box<T> {
+ fn drop(&mut self) {
+ // drop is currently performed by compiler.
+ }
+}
+
+impl<T: ?Sized> Deref for Box<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &**self
+ }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+ libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized>(ptr: Unique<T>, alloc: ()) {
+ libc::free(ptr.pointer.0 as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+ fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+ pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+ pub uninit: (),
+ pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ pub fn size_of<T>() -> usize;
+ pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn min_align_of<T>() -> usize;
+ pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn transmute<T, U>(e: T) -> U;
+ pub fn ctlz_nonzero<T>(x: T) -> T;
+ pub fn needs_drop<T: ?::Sized>() -> bool;
+ pub fn bitreverse<T>(x: T) -> T;
+ pub fn bswap<T>(x: T) -> T;
+ pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ }
+}
+
+pub mod libc {
+ #[cfg_attr(unix, link(name = "c"))]
+ #[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+ extern "C" {
+ pub fn puts(s: *const i8) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn malloc(size: usize) -> *mut u8;
+ pub fn free(ptr: *mut u8);
+ pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+ pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+ pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+extern {
+ type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro global_asm() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[no_mangle]
+#[cfg(not(windows))]
+pub fn get_tls() -> u8 {
+ #[thread_local]
+ static A: u8 = 42;
+
+ A
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
new file mode 100644
index 000000000..aa1f239ba
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mini_core_hello_world.rs
@@ -0,0 +1,515 @@
+#![feature(no_core, lang_items, never_type, linkage, extern_types, thread_local, box_syntax)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+macro_rules! assert {
+ ($e:expr) => {
+ if !$e {
+ panic(stringify!(! $e));
+ }
+ };
+}
+
+macro_rules! assert_eq {
+ ($l:expr, $r: expr) => {
+ if $l != $r {
+ panic(stringify!($l != $r));
+ }
+ }
+}
+
+#[lang = "termination"]
+trait Termination {
+ fn report(self) -> i32;
+}
+
+impl Termination for () {
+ fn report(self) -> i32 {
+ unsafe {
+ NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+ assert_eq!(*NUM_REF as i32, 44);
+ }
+ 0
+ }
+}
+
+trait SomeTrait {
+ fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+ fn object_safe(&self) {
+ unsafe {
+ puts(*self as *const str as *const i8);
+ }
+ }
+}
+
+struct NoisyDrop {
+ text: &'static str,
+ inner: NoisyDropInner,
+}
+
+struct NoisyDropUnsized {
+ inner: NoisyDropInner,
+ text: str,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+ fn drop(&mut self) {
+ unsafe {
+ puts(self.text as *const str as *const i8);
+ }
+ }
+}
+
+impl Drop for NoisyDropInner {
+ fn drop(&mut self) {
+ unsafe {
+ puts("Inner got dropped!\0" as *const str as *const i8);
+ }
+ }
+}
+
+impl SomeTrait for NoisyDrop {
+ fn object_safe(&self) {}
+}
+
+enum Ordering {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+ main: fn() -> T,
+ argc: isize,
+ argv: *const *const u8,
+) -> isize {
+ if argc == 3 {
+ unsafe { puts(*argv as *const i8); }
+ unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+ unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const i8)); }
+ }
+
+ main().report() as isize
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+
+unsafe fn zeroed<T>() -> T {
+ let mut uninit = MaybeUninit { uninit: () };
+ intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+ uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+ (0, 0)
+}
+
+fn call_return_u128_pair() {
+ return_u128_pair();
+}
+
+#[repr(C)]
+pub struct bool_11 {
+ field0: bool,
+ field1: bool,
+ field2: bool,
+ field3: bool,
+ field4: bool,
+ field5: bool,
+ field6: bool,
+ field7: bool,
+ field8: bool,
+ field9: bool,
+ field10: bool,
+}
+
+extern "C" fn bool_struct_in_11(arg0: bool_11) {}
+
+#[allow(unreachable_code)] // FIXME false positive
+fn main() {
+ take_unique(Unique {
+ pointer: unsafe { NonNull(1 as *mut ()) },
+ _marker: PhantomData,
+ });
+ take_f32(0.1);
+
+ call_return_u128_pair();
+
+ bool_struct_in_11(bool_11 {
+ field0: true,
+ field1: true,
+ field2: true,
+ field3: true,
+ field4: true,
+ field5: true,
+ field6: true,
+ field7: true,
+ field8: true,
+ field9: true,
+ field10: true,
+ });
+
+ let slice = &[0, 1] as &[i32];
+ let slice_ptr = slice as *const [i32] as *const i32;
+
+ assert_eq!(slice_ptr as usize % 4, 0);
+
+ //return;
+
+ unsafe {
+ printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+ let hello: &[u8] = b"Hello\0" as &[u8; 6];
+ let ptr: *const i8 = hello as *const [u8] as *const i8;
+ puts(ptr);
+
+ let world: Box<&str> = box "World!\0";
+ puts(*world as *const str as *const i8);
+ world as Box<dyn SomeTrait>;
+
+ assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+ assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+ assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+ assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+ assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+ assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+ let chars = &['C', 'h', 'a', 'r', 's'];
+ let chars = chars as &[char];
+ assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+ let a: &dyn SomeTrait = &"abc\0";
+ a.object_safe();
+
+ assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+ assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+ assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+ assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+ assert!(!intrinsics::needs_drop::<u8>());
+ assert!(!intrinsics::needs_drop::<[u8]>());
+ assert!(intrinsics::needs_drop::<NoisyDrop>());
+ assert!(intrinsics::needs_drop::<NoisyDropUnsized>());
+
+ Unique {
+ pointer: NonNull(1 as *mut &str),
+ _marker: PhantomData,
+ } as Unique<dyn SomeTrait>;
+
+ struct MyDst<T: ?Sized>(T);
+
+ intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+ struct Foo {
+ x: u8,
+ y: !,
+ }
+
+ unsafe fn uninitialized<T>() -> T {
+ MaybeUninit { uninit: () }.value.value
+ }
+
+ zeroed::<(u8, u8)>();
+ #[allow(unreachable_code)]
+ {
+ if false {
+ zeroed::<!>();
+ zeroed::<Foo>();
+ uninitialized::<Foo>();
+ }
+ }
+ }
+
+ let _ = box NoisyDrop {
+ text: "Boxed outer got dropped!\0",
+ inner: NoisyDropInner,
+ } as Box<dyn SomeTrait>;
+
+ const FUNC_REF: Option<fn()> = Some(main);
+ match FUNC_REF {
+ Some(_) => {},
+ None => assert!(false),
+ }
+
+ match Ordering::Less {
+ Ordering::Less => {},
+ _ => assert!(false),
+ }
+
+ [NoisyDropInner, NoisyDropInner];
+
+ let x = &[0u32, 42u32] as &[u32];
+ match x {
+ [] => assert_eq!(0u32, 1),
+ [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+ }
+
+ assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+ #[cfg(not(any(jit, windows)))]
+ {
+ extern {
+ #[linkage = "extern_weak"]
+ static ABC: *const u8;
+ }
+
+ {
+ extern {
+ #[linkage = "extern_weak"]
+ static ABC: *const u8;
+ }
+ }
+
+ unsafe { assert_eq!(ABC as usize, 0); }
+ }
+
+ &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+ let f = 1000.0;
+ assert_eq!(f as u8, 255);
+ let f2 = -1000.0;
+ assert_eq!(f2 as i8, -128);
+ assert_eq!(f2 as u8, 0);
+
+ let amount = 0;
+ assert_eq!(1u128 << amount, 1);
+
+ static ANOTHER_STATIC: &u8 = &A_STATIC;
+ assert_eq!(*ANOTHER_STATIC, 42);
+
+ check_niche_behavior();
+
+ extern "C" {
+ type ExternType;
+ }
+
+ struct ExternTypeWrapper {
+ _a: ExternType,
+ }
+
+ let nullptr = 0 as *const ();
+ let extern_nullptr = nullptr as *const ExternTypeWrapper;
+ extern_nullptr as *const ();
+ let slice_ptr = &[] as *const [u8];
+ slice_ptr as *const u8;
+
+ let repeat = [Some(42); 2];
+ assert_eq!(repeat[0], Some(42));
+ assert_eq!(repeat[1], Some(42));
+
+ from_decimal_string();
+
+ #[cfg(not(any(jit, windows)))]
+ test_tls();
+
+ #[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+ unsafe {
+ global_asm_test();
+ }
+
+ // Both statics have a reference that points to the same anonymous allocation.
+ static REF1: &u8 = &42;
+ static REF2: &u8 = REF1;
+ assert_eq!(*REF1, *REF2);
+
+ extern "C" {
+ type A;
+ }
+
+ fn main() {
+ let x: &A = unsafe { &*(1usize as *const A) };
+
+ assert_eq!(unsafe { intrinsics::size_of_val(x) }, 0);
+ assert_eq!(unsafe { intrinsics::min_align_of_val(x) }, 1);
+}
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+extern "C" {
+ fn global_asm_test();
+}
+
+#[cfg(all(not(jit), target_arch = "x86_64", target_os = "linux"))]
+global_asm! {
+ "
+ .global global_asm_test
+ global_asm_test:
+ // comment that would normally be removed by LLVM
+ ret
+ "
+}
+
+#[repr(C)]
+enum c_void {
+ _1,
+ _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+ __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+ fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+ fn pthread_create(
+ native: *mut pthread_t,
+ attr: *const pthread_attr_t,
+ f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ value: *mut c_void
+ ) -> c_int;
+
+ fn pthread_join(
+ native: pthread_t,
+ value: *mut *mut c_void
+ ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+ unsafe { TLS = 0; }
+ 0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+ unsafe {
+ let mut attr: pthread_attr_t = zeroed();
+ let mut thread: pthread_t = 0;
+
+ assert_eq!(TLS, 42);
+
+ if pthread_attr_init(&mut attr) != 0 {
+ assert!(false);
+ }
+
+ if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+ assert!(false);
+ }
+
+ let mut res = 0 as *mut c_void;
+ pthread_join(thread, &mut res);
+
+ // TLS of main thread must not have been changed by the other thread.
+ assert_eq!(TLS, 42);
+
+ puts("TLS works!\n\0" as *const str as *const i8);
+ }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+ V1 { f: bool },
+ V2 { f: Infallible },
+ V3,
+ V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+ V1 { f: bool },
+
+ /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+ _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+ _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+ _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+ _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+ _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+ _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+ _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+ _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+ _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+ _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+ _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+ _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+ _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+ _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+ _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+ _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+ _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+ _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+ _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+ _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+ _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+ _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+ _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+ _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+ _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+ _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+ _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+ _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+ _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+ _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+ _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+ V3,
+ V4,
+}
+
+fn check_niche_behavior () {
+ if let E1::V2 { .. } = (E1::V1 { f: true }) {
+ intrinsics::abort();
+ }
+
+ if let E2::V1 { .. } = E2::V3::<Infallible> {
+ intrinsics::abort();
+ }
+}
+
+fn from_decimal_string() {
+ loop {
+ let multiplier = 1;
+
+ take_multiplier_ref(&multiplier);
+
+ if multiplier == 1 {
+ break;
+ }
+
+ unreachable();
+ }
+}
+
+fn take_multiplier_ref(_multiplier: &u128) {}
+
+fn unreachable() -> ! {
+ panic("unreachable")
+}
diff --git a/compiler/rustc_codegen_cranelift/example/mod_bench.rs b/compiler/rustc_codegen_cranelift/example/mod_bench.rs
new file mode 100644
index 000000000..e3e8a3c2d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/mod_bench.rs
@@ -0,0 +1,36 @@
+#![feature(start, core_intrinsics, lang_items)]
+#![no_std]
+
+#[cfg_attr(unix, link(name = "c"))]
+#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))]
+extern {}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ core::intrinsics::abort();
+}
+
+#[lang="eh_personality"]
+fn eh_personality(){}
+
+// Required for rustc_codegen_llvm
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+ core::intrinsics::unreachable();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ for i in 2..10_000_000 {
+ black_box((i + 1) % i);
+ }
+
+ 0
+}
+
+#[inline(never)]
+fn black_box(i: u32) {
+ if i != 1 {
+ core::intrinsics::abort();
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/std_example.rs b/compiler/rustc_codegen_cranelift/example/std_example.rs
new file mode 100644
index 000000000..0b5b6cd55
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/std_example.rs
@@ -0,0 +1,356 @@
+#![feature(core_intrinsics, generators, generator_trait, is_sorted, bench_black_box)]
+
+#[cfg(target_arch = "x86_64")]
+use std::arch::x86_64::*;
+use std::hint::black_box;
+use std::io::Write;
+use std::ops::Generator;
+
+fn main() {
+ println!("{:?}", std::env::args().collect::<Vec<_>>());
+
+ let mutex = std::sync::Mutex::new(());
+ let _guard = mutex.lock().unwrap();
+
+ let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+ let stderr = ::std::io::stderr();
+ let mut stderr = stderr.lock();
+
+ std::thread::spawn(move || {
+ println!("Hello from another thread!");
+ });
+
+ writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+ let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+ println!("cargo:rustc-link-lib=z");
+
+ static ONCE: std::sync::Once = std::sync::Once::new();
+ ONCE.call_once(|| {});
+
+ let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+ // Make sure ByValPair values with differently sized components are correctly passed
+ map(None::<(u8, Box<Instruction>)>);
+
+ println!("{}", 2.3f32.exp());
+ println!("{}", 2.3f32.exp2());
+ println!("{}", 2.3f32.abs());
+ println!("{}", 2.3f32.sqrt());
+ println!("{}", 2.3f32.floor());
+ println!("{}", 2.3f32.ceil());
+ println!("{}", 2.3f32.min(1.0));
+ println!("{}", 2.3f32.max(1.0));
+ println!("{}", 2.3f32.powi(2));
+ println!("{}", 2.3f32.log2());
+ assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+ println!("{}", 2.3f32.powf(2.0));
+
+ assert_eq!(i64::MAX.checked_mul(2), None);
+
+ assert_eq!(-128i8, (-128i8).saturating_sub(1));
+ assert_eq!(127i8, 127i8.saturating_sub(-128));
+ assert_eq!(-128i8, (-128i8).saturating_add(-128));
+ assert_eq!(127i8, 127i8.saturating_add(1));
+
+ assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+ assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+ assert_eq!(core::intrinsics::saturating_sub(0, -170141183460469231731687303715884105728i128), 170141183460469231731687303715884105727i128);
+
+ let _d = 0i128.checked_div(2i128);
+ let _d = 0u128.checked_div(2u128);
+ assert_eq!(1u128 + 2, 3);
+
+ assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+ let tmp = 353985398u128;
+ assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+ let tmp = -0x1234_5678_9ABC_DEF0i64;
+ assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+ // Check that all u/i128 <-> float casts work correctly.
+ let houndred_u128 = 100u128;
+ let houndred_i128 = 100i128;
+ let houndred_f32 = 100.0f32;
+ let houndred_f64 = 100.0f64;
+ assert_eq!(houndred_u128 as f32, 100.0);
+ assert_eq!(houndred_u128 as f64, 100.0);
+ assert_eq!(houndred_f32 as u128, 100);
+ assert_eq!(houndred_f64 as u128, 100);
+ assert_eq!(houndred_i128 as f32, 100.0);
+ assert_eq!(houndred_i128 as f64, 100.0);
+ assert_eq!(houndred_f32 as i128, 100);
+ assert_eq!(houndred_f64 as i128, 100);
+ assert_eq!(1u128.rotate_left(2), 4);
+
+ assert_eq!(black_box(f32::NAN) as i128, 0);
+ assert_eq!(black_box(f32::NAN) as u128, 0);
+
+ // Test signed 128bit comparing
+ let max = usize::MAX as i128;
+ if 100i128 < 0i128 || 100i128 > max {
+ panic!();
+ }
+
+ test_checked_mul();
+
+ let _a = 1u32 << 2u8;
+
+ let empty: [i32; 0] = [];
+ assert!(empty.is_sorted());
+
+ println!("{:?}", std::intrinsics::caller_location());
+
+ #[cfg(target_arch = "x86_64")]
+ unsafe {
+ test_simd();
+ }
+
+ Box::pin(move |mut _task_context| {
+ yield ();
+ }).as_mut().resume(0);
+
+ #[derive(Copy, Clone)]
+ enum Nums {
+ NegOne = -1,
+ }
+
+ let kind = Nums::NegOne;
+ assert_eq!(-1i128, kind as i128);
+
+ let options = [1u128];
+ match options[0] {
+ 1 => (),
+ 0 => loop {},
+ v => panic(v),
+ };
+
+ if black_box(false) {
+ // Based on https://github.com/rust-lang/rust/blob/2f320a224e827b400be25966755a621779f797cc/src/test/ui/debuginfo/debuginfo_with_uninhabitable_field_and_unsized.rs
+ let _ = Foo::<dyn Send>::new();
+
+ #[allow(dead_code)]
+ struct Foo<T: ?Sized> {
+ base: Never,
+ value: T,
+ }
+
+ impl<T: ?Sized> Foo<T> {
+ pub fn new() -> Box<Foo<T>> {
+ todo!()
+ }
+ }
+
+ enum Never {}
+ }
+}
+
+fn panic(_: u128) {
+ panic!();
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+ assert!(is_x86_feature_detected!("sse2"));
+
+ let x = _mm_setzero_si128();
+ let y = _mm_set1_epi16(7);
+ let or = _mm_or_si128(x, y);
+ let cmp_eq = _mm_cmpeq_epi8(y, y);
+ let cmp_lt = _mm_cmplt_epi8(y, y);
+
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+ test_mm_slli_si128();
+ test_mm_movemask_epi8();
+ test_mm256_movemask_epi8();
+ test_mm_add_epi8();
+ test_mm_add_pd();
+ test_mm_cvtepi8_epi16();
+ test_mm_cvtsi128_si64();
+
+ test_mm_extract_epi8();
+ test_mm_insert_epi16();
+
+ let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+ assert_eq!(mask1, 1);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 1);
+ let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 15);
+ let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 16);
+ assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+ 0b0101, 0b1111_0000u8 as i8, 0, 0,
+ 0, 0, 0b1111_0000u8 as i8, 0b0101,
+ 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+ );
+ let r = _mm_movemask_epi8(a);
+ assert_eq!(r, 0b10100100_00100101);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+ let a = _mm256_set1_epi8(-1);
+ let r = _mm256_movemask_epi8(a);
+ let e = -1;
+ assert_eq!(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+ let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ #[rustfmt::skip]
+ let b = _mm_setr_epi8(
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ );
+ let r = _mm_add_epi8(a, b);
+ #[rustfmt::skip]
+ let e = _mm_setr_epi8(
+ 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+ );
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+ let a = _mm_setr_pd(1.0, 2.0);
+ let b = _mm_setr_pd(5.0, 10.0);
+ let r = _mm_add_pd(a, b);
+ assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+#[cfg(target_arch = "x86_64")]
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+ unsafe {
+ assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+ if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+ panic!("{:?} != {:?}", a, b);
+ }
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+ let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+ assert_eq!(r, 5);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+ let a = _mm_set1_epi8(10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(10);
+ assert_eq_m128i(r, e);
+ let a = _mm_set1_epi8(-10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(-10);
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ -1, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15
+ );
+ let r1 = _mm_extract_epi8(a, 0);
+ let r2 = _mm_extract_epi8(a, 3);
+ assert_eq!(r1, 0xFF);
+ assert_eq!(r2, 3);
+}
+
+#[cfg(target_arch = "x86_64")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_insert_epi16() {
+ let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
+ let r = _mm_insert_epi16(a, 9, 0);
+ let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
+ assert_eq_m128i(r, e);
+}
+
+fn test_checked_mul() {
+ let u: Option<u8> = u8::from_str_radix("1000", 10).ok();
+ assert_eq!(u, None);
+
+ assert_eq!(1u8.checked_mul(255u8), Some(255u8));
+ assert_eq!(255u8.checked_mul(255u8), None);
+ assert_eq!(1i8.checked_mul(127i8), Some(127i8));
+ assert_eq!(127i8.checked_mul(127i8), None);
+ assert_eq!((-1i8).checked_mul(-127i8), Some(127i8));
+ assert_eq!(1i8.checked_mul(-128i8), Some(-128i8));
+ assert_eq!((-128i8).checked_mul(-128i8), None);
+
+ assert_eq!(1u64.checked_mul(u64::MAX), Some(u64::MAX));
+ assert_eq!(u64::MAX.checked_mul(u64::MAX), None);
+ assert_eq!(1i64.checked_mul(i64::MAX), Some(i64::MAX));
+ assert_eq!(i64::MAX.checked_mul(i64::MAX), None);
+ assert_eq!((-1i64).checked_mul(i64::MIN + 1), Some(i64::MAX));
+ assert_eq!(1i64.checked_mul(i64::MIN), Some(i64::MIN));
+ assert_eq!(i64::MIN.checked_mul(i64::MIN), None);
+}
+
+#[derive(PartialEq)]
+enum LoopState {
+ Continue(()),
+ Break(())
+}
+
+pub enum Instruction {
+ Increment,
+ Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+ match a {
+ None => None,
+ Some((_, instr)) => Some(instr),
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs b/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs
new file mode 100644
index 000000000..2cb84786f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/subslice-patterns-const-eval.rs
@@ -0,0 +1,97 @@
+// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs
+
+// Test that array subslice patterns are correctly handled in const evaluation.
+
+// run-pass
+
+#[derive(PartialEq, Debug, Clone)]
+struct N(u8);
+
+#[derive(PartialEq, Debug, Clone)]
+struct Z;
+
+macro_rules! n {
+ ($($e:expr),* $(,)?) => {
+ [$(N($e)),*]
+ }
+}
+
+// This macro has an unused variable so that it can be repeated base on the
+// number of times a repeated variable (`$e` in `z`) occurs.
+macro_rules! zed {
+ ($e:expr) => { Z }
+}
+
+macro_rules! z {
+ ($($e:expr),* $(,)?) => {
+ [$(zed!($e)),*]
+ }
+}
+
+// Compare constant evaluation and runtime evaluation of a given expression.
+macro_rules! compare_evaluation {
+ ($e:expr, $t:ty $(,)?) => {{
+ const CONST_EVAL: $t = $e;
+ const fn const_eval() -> $t { $e }
+ static CONST_EVAL2: $t = const_eval();
+ let runtime_eval = $e;
+ assert_eq!(CONST_EVAL, runtime_eval);
+ assert_eq!(CONST_EVAL2, runtime_eval);
+ }}
+}
+
+// Repeat `$test`, substituting the given macro variables with the given
+// identifiers.
+//
+// For example:
+//
+// repeat! {
+// ($name); X; Y:
+// struct $name;
+// }
+//
+// Expands to:
+//
+// struct X; struct Y;
+//
+// This is used to repeat the tests using both the `N` and `Z`
+// types.
+macro_rules! repeat {
+ (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => {
+ macro_rules! single {
+ ($($dollar $placeholder:ident),*) => { $($test)* }
+ }
+ $(single!($($values),+);)*
+ }
+}
+
+fn main() {
+ repeat! {
+ ($arr $Ty); n, N; z, Z:
+ compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]);
+ compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+ compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+
+ compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]);
+ compare_evaluation!(
+ { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x },
+ &'static [$Ty; 0],
+ );
+ compare_evaluation!(
+ { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x },
+ &'static [$Ty; 0],
+ );
+
+ compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty);
+ compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty);
+ compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty);
+ }
+
+ compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8);
+ compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8);
+ compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8);
+
+ compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8);
+ compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8);
+ compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8);
+}
diff --git a/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs b/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs
new file mode 100644
index 000000000..93bab17e4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/example/track-caller-attribute.rs
@@ -0,0 +1,40 @@
+// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs
+
+// run-pass
+
+use std::panic::Location;
+
+#[track_caller]
+fn tracked() -> &'static Location<'static> {
+ Location::caller()
+}
+
+fn nested_intrinsic() -> &'static Location<'static> {
+ Location::caller()
+}
+
+fn nested_tracked() -> &'static Location<'static> {
+ tracked()
+}
+
+fn main() {
+ let location = Location::caller();
+ assert_eq!(location.file(), file!());
+ assert_eq!(location.line(), 21);
+ assert_eq!(location.column(), 20);
+
+ let tracked = tracked();
+ assert_eq!(tracked.file(), file!());
+ assert_eq!(tracked.line(), 26);
+ assert_eq!(tracked.column(), 19);
+
+ let nested = nested_intrinsic();
+ assert_eq!(nested.file(), file!());
+ assert_eq!(nested.line(), 13);
+ assert_eq!(nested.column(), 5);
+
+ let contained = nested_tracked();
+ assert_eq!(contained.file(), file!());
+ assert_eq!(contained.line(), 17);
+ assert_eq!(contained.column(), 5);
+}
diff --git a/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch b/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch
new file mode 100644
index 000000000..54e13b090
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0001-portable-simd-Disable-unsupported-tests.patch
@@ -0,0 +1,107 @@
+From 97c473937382a5b5858d9cce3c947855d23b2dc5 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 18 Nov 2021 19:28:40 +0100
+Subject: [PATCH] Disable unsupported tests
+
+---
+ crates/core_simd/src/math.rs | 6 ++++++
+ crates/core_simd/src/vector.rs | 2 ++
+ crates/core_simd/tests/masks.rs | 2 ++
+ crates/core_simd/tests/ops_macros.rs | 4 ++++
+ 4 files changed, 14 insertions(+)
+
+diff --git a/crates/core_simd/src/math.rs b/crates/core_simd/src/math.rs
+index 2bae414..2f87499 100644
+--- a/crates/core_simd/src/math.rs
++++ b/crates/core_simd/src/math.rs
+@@ -5,6 +5,7 @@ macro_rules! impl_uint_arith {
+ ($($ty:ty),+) => {
+ $( impl<const LANES: usize> Simd<$ty, LANES> where LaneCount<LANES>: SupportedLaneCount {
+
++ /*
+ /// Lanewise saturating add.
+ ///
+ /// # Examples
+@@ -43,6 +44,7 @@ macro_rules! impl_uint_arith {
+ pub fn saturating_sub(self, second: Self) -> Self {
+ unsafe { simd_saturating_sub(self, second) }
+ }
++ */
+ })+
+ }
+ }
+@@ -51,6 +53,7 @@ macro_rules! impl_int_arith {
+ ($($ty:ty),+) => {
+ $( impl<const LANES: usize> Simd<$ty, LANES> where LaneCount<LANES>: SupportedLaneCount {
+
++ /*
+ /// Lanewise saturating add.
+ ///
+ /// # Examples
+@@ -89,6 +92,7 @@ macro_rules! impl_int_arith {
+ pub fn saturating_sub(self, second: Self) -> Self {
+ unsafe { simd_saturating_sub(self, second) }
+ }
++ */
+
+ /// Lanewise absolute value, implemented in Rust.
+ /// Every lane becomes its absolute value.
+@@ -109,6 +113,7 @@ macro_rules! impl_int_arith {
+ (self^m) - m
+ }
+
++ /*
+ /// Lanewise saturating absolute value, implemented in Rust.
+ /// As abs(), except the MIN value becomes MAX instead of itself.
+ ///
+@@ -151,6 +156,7 @@ macro_rules! impl_int_arith {
+ pub fn saturating_neg(self) -> Self {
+ Self::splat(0).saturating_sub(self)
+ }
++ */
+ })+
+ }
+ }
+diff --git a/crates/core_simd/src/vector.rs b/crates/core_simd/src/vector.rs
+index 7c5ec2b..c8631e8 100644
+--- a/crates/core_simd/src/vector.rs
++++ b/crates/core_simd/src/vector.rs
+@@ -75,6 +75,7 @@ where
+ Self(array)
+ }
+
++ /*
+ /// Reads from potentially discontiguous indices in `slice` to construct a SIMD vector.
+ /// If an index is out-of-bounds, the lane is instead selected from the `or` vector.
+ ///
+@@ -297,6 +298,7 @@ where
+ // Cleared ☢️ *mut T Zone
+ }
+ }
++ */
+ }
+
+ impl<T, const LANES: usize> Copy for Simd<T, LANES>
+diff --git a/crates/core_simd/tests/masks.rs b/crates/core_simd/tests/masks.rs
+index 6a8ecd3..68fcb49 100644
+--- a/crates/core_simd/tests/masks.rs
++++ b/crates/core_simd/tests/masks.rs
+@@ -68,6 +68,7 @@ macro_rules! test_mask_api {
+ assert_eq!(core_simd::Mask::<$type, 8>::from_int(int), mask);
+ }
+
++ /*
+ #[cfg(feature = "generic_const_exprs")]
+ #[test]
+ fn roundtrip_bitmask_conversion() {
+@@ -80,6 +81,7 @@ macro_rules! test_mask_api {
+ assert_eq!(bitmask, [0b01001001, 0b10000011]);
+ assert_eq!(core_simd::Mask::<$type, 16>::from_bitmask(bitmask), mask);
+ }
++ */
+ }
+ }
+ }
+--
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_cranelift/patches/0002-rand-Disable-failing-test.patch b/compiler/rustc_codegen_cranelift/patches/0002-rand-Disable-failing-test.patch
new file mode 100644
index 000000000..ae13ab3b0
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0002-rand-Disable-failing-test.patch
@@ -0,0 +1,24 @@
+From a8fb97120d71252538b6b026695df40d02696bdb Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sat, 15 Aug 2020 20:04:38 +0200
+Subject: [PATCH] [rand] Disable failing test
+
+---
+ src/distributions/uniform.rs | 1 +
+ 1 file changed, 1 insertion(+), 0 deletions(-)
+
+diff --git a/src/distributions/uniform.rs b/src/distributions/uniform.rs
+index 480b859..c80bb6f 100644
+--- a/src/distributions/uniform.rs
++++ b/src/distributions/uniform.rs
+@@ -1314,6 +1314,7 @@ mod tests {
+ not(target_arch = "wasm32"),
+ not(target_arch = "asmjs")
+ ))]
++ #[ignore] // Requires unwinding
+ fn test_float_assertions() {
+ use super::SampleUniform;
+ use std::panic::catch_unwind;
+--
+2.20.1
+
diff --git a/compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch
new file mode 100644
index 000000000..8d9ee3f25
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0022-sysroot-Disable-not-compiling-tests.patch
@@ -0,0 +1,34 @@
+From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:10:23 +0100
+Subject: [PATCH] [core] Disable not compiling tests
+
+---
+ library/core/tests/Cargo.toml | 8 ++++++++
+ library/core/tests/num/flt2dec/mod.rs | 1 -
+ library/core/tests/num/int_macros.rs | 2 ++
+ library/core/tests/num/uint_macros.rs | 2 ++
+ library/core/tests/ptr.rs | 2 ++
+ library/core/tests/slice.rs | 2 ++
+ 6 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 library/core/tests/Cargo.toml
+
+diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
+new file mode 100644
+index 0000000..46fd999
+--- /dev/null
++++ b/library/core/tests/Cargo.toml
+@@ -0,0 +1,11 @@
++[package]
++name = "core"
++version = "0.0.0"
++edition = "2021"
++
++[lib]
++name = "coretests"
++path = "lib.rs"
++
++[dependencies]
++rand = "0.7"
+--
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch b/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch
new file mode 100644
index 000000000..50ef0bd94
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0023-sysroot-Ignore-failing-tests.patch
@@ -0,0 +1,50 @@
+From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:34:06 +0100
+Subject: [PATCH] [core] Ignore failing tests
+
+---
+ library/core/tests/iter.rs | 4 ++++
+ library/core/tests/num/bignum.rs | 10 ++++++++++
+ library/core/tests/num/mod.rs | 5 +++--
+ library/core/tests/time.rs | 1 +
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
+index 4bc44e9..8e3c7a4 100644
+--- a/library/core/tests/array.rs
++++ b/library/core/tests/array.rs
+@@ -242,6 +242,7 @@ fn iterator_drops() {
+ assert_eq!(i.get(), 5);
+ }
+
++/*
+ // This test does not work on targets without panic=unwind support.
+ // To work around this problem, test is marked is should_panic, so it will
+ // be automagically skipped on unsuitable targets, such as
+@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
+ assert_eq!(COUNTER.load(Relaxed), 0);
+ panic!("test succeeded")
+ }
++*/
+
+ #[test]
+ fn empty_array_is_always_default() {
+@@ -304,6 +304,7 @@ fn array_map() {
+ assert_eq!(b, [1, 2, 3]);
+ }
+
++/*
+ // See note on above test for why `should_panic` is used.
+ #[test]
+ #[should_panic(expected = "test succeeded")]
+@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
+ assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
+ panic!("test succeeded")
+ }
++*/
+
+ #[test]
+ fn cell_allows_array_cycle() {
+--
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch b/compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch
new file mode 100644
index 000000000..77f437974
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0027-sysroot-128bit-atomic-operations.patch
@@ -0,0 +1,105 @@
+From ad7ffe71baba46865f2e65266ab025920dfdc20b Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 18 Feb 2021 18:45:28 +0100
+Subject: [PATCH] Disable 128bit atomic operations
+
+Cranelift doesn't support them yet
+---
+ library/core/src/panic/unwind_safe.rs | 6 -----
+ library/core/src/sync/atomic.rs | 38 ---------------------------
+ library/core/tests/atomic.rs | 4 ---
+ 4 files changed, 4 insertions(+), 50 deletions(-)
+
+diff --git a/library/core/src/panic/unwind_safe.rs b/library/core/src/panic/unwind_safe.rs
+index 092b7cf..158cf71 100644
+--- a/library/core/src/panic/unwind_safe.rs
++++ b/library/core/src/panic/unwind_safe.rs
+@@ -216,9 +216,6 @@ impl RefUnwindSafe for crate::sync::atomic::AtomicI32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for crate::sync::atomic::AtomicI64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "99069")]
+-impl RefUnwindSafe for crate::sync::atomic::AtomicI128 {}
+
+ #[cfg(target_has_atomic_load_store = "ptr")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+@@ -235,9 +232,6 @@ impl RefUnwindSafe for crate::sync::atomic::AtomicU32 {}
+ #[cfg(target_has_atomic_load_store = "64")]
+ #[stable(feature = "integer_atomics_stable", since = "1.34.0")]
+ impl RefUnwindSafe for crate::sync::atomic::AtomicU64 {}
+-#[cfg(target_has_atomic_load_store = "128")]
+-#[unstable(feature = "integer_atomics", issue = "99069")]
+-impl RefUnwindSafe for crate::sync::atomic::AtomicU128 {}
+
+ #[cfg(target_has_atomic_load_store = "8")]
+ #[stable(feature = "unwind_safe_atomic_refs", since = "1.14.0")]
+diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
+index d9de37e..8293fce 100644
+--- a/library/core/src/sync/atomic.rs
++++ b/library/core/src/sync/atomic.rs
+@@ -2234,46 +2234,6 @@ atomic_int! {
+ "AtomicU64::new(0)",
+ u64 AtomicU64 ATOMIC_U64_INIT
+ }
+-#[cfg(target_has_atomic_load_store = "128")]
+-atomic_int! {
+- cfg(target_has_atomic = "128"),
+- cfg(target_has_atomic_equal_alignment = "128"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- cfg_attr(not(test), rustc_diagnostic_item = "AtomicI128"),
+- "i128",
+- "#![feature(integer_atomics)]\n\n",
+- atomic_min, atomic_max,
+- 16,
+- "AtomicI128::new(0)",
+- i128 AtomicI128 ATOMIC_I128_INIT
+-}
+-#[cfg(target_has_atomic_load_store = "128")]
+-atomic_int! {
+- cfg(target_has_atomic = "128"),
+- cfg(target_has_atomic_equal_alignment = "128"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
+- unstable(feature = "integer_atomics", issue = "99069"),
+- cfg_attr(not(test), rustc_diagnostic_item = "AtomicU128"),
+- "u128",
+- "#![feature(integer_atomics)]\n\n",
+- atomic_umin, atomic_umax,
+- 16,
+- "AtomicU128::new(0)",
+- u128 AtomicU128 ATOMIC_U128_INIT
+-}
+
+ macro_rules! atomic_int_ptr_sized {
+ ( $($target_pointer_width:literal $align:literal)* ) => { $(
+diff --git a/library/core/tests/atomic.rs b/library/core/tests/atomic.rs
+index b735957..ea728b6 100644
+--- a/library/core/tests/atomic.rs
++++ b/library/core/tests/atomic.rs
+@@ -185,10 +185,6 @@ fn atomic_alignment() {
+ assert_eq!(align_of::<AtomicU64>(), size_of::<AtomicU64>());
+ #[cfg(target_has_atomic = "64")]
+ assert_eq!(align_of::<AtomicI64>(), size_of::<AtomicI64>());
+- #[cfg(target_has_atomic = "128")]
+- assert_eq!(align_of::<AtomicU128>(), size_of::<AtomicU128>());
+- #[cfg(target_has_atomic = "128")]
+- assert_eq!(align_of::<AtomicI128>(), size_of::<AtomicI128>());
+ #[cfg(target_has_atomic = "ptr")]
+ assert_eq!(align_of::<AtomicUsize>(), size_of::<AtomicUsize>());
+ #[cfg(target_has_atomic = "ptr")]
+--
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_cranelift/patches/0028-sysroot-Disable-long-running-tests.patch b/compiler/rustc_codegen_cranelift/patches/0028-sysroot-Disable-long-running-tests.patch
new file mode 100644
index 000000000..d804a78cc
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/patches/0028-sysroot-Disable-long-running-tests.patch
@@ -0,0 +1,48 @@
+From eb703e627e7a84f1cd8d0d87f0f69da1f0acf765 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Fri, 3 Dec 2021 12:16:30 +0100
+Subject: [PATCH] Disable long running tests
+
+---
+ library/core/tests/slice.rs | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 8402833..84592e0 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -1809,6 +1809,7 @@ fn sort_unstable() {
+ assert!(v == [0xDEADBEEF]);
+ }
+
++/*
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ #[cfg_attr(miri, ignore)] // Miri is too slow
+@@ -1914,6 +1915,7 @@ fn select_nth_unstable() {
+ v.select_nth_unstable(0);
+ assert!(v == [0xDEADBEEF]);
+ }
++*/
+
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+@@ -2462,6 +2462,7 @@ take_tests! {
+ #[cfg(not(miri))] // unused in Miri
+ const EMPTY_MAX: &'static [()] = &[(); usize::MAX];
+
++/*
+ // can't be a constant due to const mutability rules
+ #[cfg(not(miri))] // unused in Miri
+ macro_rules! empty_max_mut {
+@@ -2485,6 +2486,7 @@ take_tests! {
+ (take_mut_oob_max_range_to_inclusive, (..=usize::MAX), None, empty_max_mut!()),
+ (take_mut_in_bounds_max_range_from, (usize::MAX..), Some(&mut [] as _), empty_max_mut!()),
+ }
++*/
+
+ #[test]
+ fn test_slice_from_ptr_range() {
+--
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain
new file mode 100644
index 000000000..3ab395d89
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/rust-toolchain
@@ -0,0 +1,3 @@
+[toolchain]
+channel = "nightly-2022-07-25"
+components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
diff --git a/compiler/rustc_codegen_cranelift/rustfmt.toml b/compiler/rustc_codegen_cranelift/rustfmt.toml
new file mode 100644
index 000000000..2bd8f7d1b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/rustfmt.toml
@@ -0,0 +1,4 @@
+# Matches rustfmt.toml of rustc
+version = "Two"
+use_small_heuristics = "Max"
+merge_derives = false
diff --git a/compiler/rustc_codegen_cranelift/scripts/Readme.md b/compiler/rustc_codegen_cranelift/scripts/Readme.md
new file mode 100644
index 000000000..83cec9c6f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/Readme.md
@@ -0,0 +1,2 @@
+This directory is for scripts that are either never directly invoked or are not used very often.
+Scripts that are frequently used should be kept at the project root.
diff --git a/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs b/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs
new file mode 100644
index 000000000..9362b47fa
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs
@@ -0,0 +1,70 @@
+use std::env;
+#[cfg(unix)]
+use std::os::unix::process::CommandExt;
+use std::path::PathBuf;
+use std::process::Command;
+
+fn main() {
+ let sysroot = PathBuf::from(env::current_exe().unwrap().parent().unwrap());
+
+ let mut rustflags = String::new();
+ rustflags.push_str(" -Cpanic=abort -Zpanic-abort-tests -Zcodegen-backend=");
+ rustflags.push_str(
+ sysroot
+ .join(if cfg!(windows) { "bin" } else { "lib" })
+ .join(
+ env::consts::DLL_PREFIX.to_string()
+ + "rustc_codegen_cranelift"
+ + env::consts::DLL_SUFFIX,
+ )
+ .to_str()
+ .unwrap(),
+ );
+ rustflags.push_str(" --sysroot ");
+ rustflags.push_str(sysroot.to_str().unwrap());
+ env::set_var("RUSTFLAGS", env::var("RUSTFLAGS").unwrap_or(String::new()) + &rustflags);
+ env::set_var("RUSTDOCFLAGS", env::var("RUSTDOCFLAGS").unwrap_or(String::new()) + &rustflags);
+
+ // Ensure that the right toolchain is used
+ env::set_var("RUSTUP_TOOLCHAIN", env!("RUSTUP_TOOLCHAIN"));
+
+ let args: Vec<_> = match env::args().nth(1).as_deref() {
+ Some("jit") => {
+ env::set_var(
+ "RUSTFLAGS",
+ env::var("RUSTFLAGS").unwrap_or(String::new()) + " -Cprefer-dynamic",
+ );
+ IntoIterator::into_iter(["rustc".to_string()])
+ .chain(env::args().skip(2))
+ .chain([
+ "--".to_string(),
+ "-Zunstable-options".to_string(),
+ "-Cllvm-args=mode=jit".to_string(),
+ ])
+ .collect()
+ }
+ Some("lazy-jit") => {
+ env::set_var(
+ "RUSTFLAGS",
+ env::var("RUSTFLAGS").unwrap_or(String::new()) + " -Cprefer-dynamic",
+ );
+ IntoIterator::into_iter(["rustc".to_string()])
+ .chain(env::args().skip(2))
+ .chain([
+ "--".to_string(),
+ "-Zunstable-options".to_string(),
+ "-Cllvm-args=mode=jit-lazy".to_string(),
+ ])
+ .collect()
+ }
+ _ => env::args().skip(1).collect(),
+ };
+
+ #[cfg(unix)]
+ Command::new("cargo").args(args).exec();
+
+ #[cfg(not(unix))]
+ std::process::exit(
+ Command::new("cargo").args(args).spawn().unwrap().wait().unwrap().code().unwrap_or(1),
+ );
+}
diff --git a/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
new file mode 100755
index 000000000..e6f60d1c0
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/filter_profile.rs
@@ -0,0 +1,125 @@
+#!/usr/bin/env bash
+#![forbid(unsafe_code)]/* This line is ignored by bash
+# This block is ignored by rustc
+pushd $(dirname "$0")/../
+RUSTC="$(pwd)/build/rustc-clif"
+popd
+PROFILE=$1 OUTPUT=$2 exec $RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic $0
+#*/
+
+//! This program filters away uninteresting samples and trims uninteresting frames for stackcollapse
+//! profiles.
+//!
+//! Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>
+//!
+//! This file is specially crafted to be both a valid bash script and valid rust source file. If
+//! executed as bash script this will run the rust source using cg_clif in JIT mode.
+
+use std::io::Write;
+
+fn main() -> Result<(), Box<dyn std::error::Error>> {
+ let profile_name = std::env::var("PROFILE").unwrap();
+ let output_name = std::env::var("OUTPUT").unwrap();
+ if profile_name.is_empty() || output_name.is_empty() {
+ println!("Usage: ./filter_profile.rs <profile in stackcollapse format> <output file>");
+ std::process::exit(1);
+ }
+ let profile = std::fs::read_to_string(profile_name)
+ .map_err(|err| format!("Failed to read profile {}", err))?;
+ let mut output = std::fs::OpenOptions::new()
+ .create(true)
+ .write(true)
+ .truncate(true)
+ .open(output_name)?;
+
+ for line in profile.lines() {
+ let mut stack = &line[..line.rfind(" ").unwrap()];
+ let count = &line[line.rfind(" ").unwrap() + 1..];
+
+ // Filter away uninteresting samples
+ if !stack.contains("rustc_codegen_cranelift") {
+ continue;
+ }
+
+ if stack.contains("rustc_monomorphize::partitioning::collect_and_partition_mono_items")
+ || stack.contains("rustc_incremental::assert_dep_graph::assert_dep_graph")
+ || stack.contains("rustc_symbol_mangling::test::report_symbol_names")
+ {
+ continue;
+ }
+
+ // Trim start
+ if let Some(index) = stack.find("rustc_interface::passes::configure_and_expand") {
+ stack = &stack[index..];
+ } else if let Some(index) = stack.find("rustc_interface::passes::analysis") {
+ stack = &stack[index..];
+ } else if let Some(index) = stack.find("rustc_interface::passes::start_codegen") {
+ stack = &stack[index..];
+ } else if let Some(index) = stack.find("rustc_interface::queries::Linker::link") {
+ stack = &stack[index..];
+ }
+
+ if let Some(index) = stack.find("rustc_codegen_cranelift::driver::aot::module_codegen") {
+ stack = &stack[index..];
+ }
+
+ // Trim end
+ const MALLOC: &str = "malloc";
+ if let Some(index) = stack.find(MALLOC) {
+ stack = &stack[..index + MALLOC.len()];
+ }
+
+ const FREE: &str = "free";
+ if let Some(index) = stack.find(FREE) {
+ stack = &stack[..index + FREE.len()];
+ }
+
+ const TYPECK_ITEM_BODIES: &str = "rustc_typeck::check::typeck_item_bodies";
+ if let Some(index) = stack.find(TYPECK_ITEM_BODIES) {
+ stack = &stack[..index + TYPECK_ITEM_BODIES.len()];
+ }
+
+ const COLLECT_AND_PARTITION_MONO_ITEMS: &str =
+ "rustc_monomorphize::partitioning::collect_and_partition_mono_items";
+ if let Some(index) = stack.find(COLLECT_AND_PARTITION_MONO_ITEMS) {
+ stack = &stack[..index + COLLECT_AND_PARTITION_MONO_ITEMS.len()];
+ }
+
+ const ASSERT_DEP_GRAPH: &str = "rustc_incremental::assert_dep_graph::assert_dep_graph";
+ if let Some(index) = stack.find(ASSERT_DEP_GRAPH) {
+ stack = &stack[..index + ASSERT_DEP_GRAPH.len()];
+ }
+
+ const REPORT_SYMBOL_NAMES: &str = "rustc_symbol_mangling::test::report_symbol_names";
+ if let Some(index) = stack.find(REPORT_SYMBOL_NAMES) {
+ stack = &stack[..index + REPORT_SYMBOL_NAMES.len()];
+ }
+
+ const ENCODE_METADATA: &str = "rustc_metadata::rmeta::encoder::encode_metadata";
+ if let Some(index) = stack.find(ENCODE_METADATA) {
+ stack = &stack[..index + ENCODE_METADATA.len()];
+ }
+
+ const SUBST_AND_NORMALIZE_ERASING_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::subst_and_normalize_erasing_regions";
+ if let Some(index) = stack.find(SUBST_AND_NORMALIZE_ERASING_REGIONS) {
+ stack = &stack[..index + SUBST_AND_NORMALIZE_ERASING_REGIONS.len()];
+ }
+
+ const NORMALIZE_ERASING_LATE_BOUND_REGIONS: &str = "rustc_middle::ty::normalize_erasing_regions::<impl rustc_middle::ty::context::TyCtxt>::normalize_erasing_late_bound_regions";
+ if let Some(index) = stack.find(NORMALIZE_ERASING_LATE_BOUND_REGIONS) {
+ stack = &stack[..index + NORMALIZE_ERASING_LATE_BOUND_REGIONS.len()];
+ }
+
+ const INST_BUILD: &str = "<cranelift_frontend::frontend::FuncInstBuilder as cranelift_codegen::ir::builder::InstBuilderBase>::build";
+ if let Some(index) = stack.find(INST_BUILD) {
+ stack = &stack[..index + INST_BUILD.len()];
+ }
+
+ output.write_all(stack.as_bytes())?;
+ output.write_all(&*b" ")?;
+ output.write_all(count.as_bytes())?;
+ output.write_all(&*b"\n")?;
+ }
+
+ Ok(())
+}
diff --git a/compiler/rustc_codegen_cranelift/scripts/rustc-clif.rs b/compiler/rustc_codegen_cranelift/scripts/rustc-clif.rs
new file mode 100644
index 000000000..3abfcd8dd
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/rustc-clif.rs
@@ -0,0 +1,36 @@
+use std::env;
+use std::ffi::OsString;
+#[cfg(unix)]
+use std::os::unix::process::CommandExt;
+use std::path::PathBuf;
+use std::process::Command;
+
+fn main() {
+ let sysroot = PathBuf::from(env::current_exe().unwrap().parent().unwrap());
+
+ let cg_clif_dylib_path = sysroot.join(if cfg!(windows) { "bin" } else { "lib" }).join(
+ env::consts::DLL_PREFIX.to_string() + "rustc_codegen_cranelift" + env::consts::DLL_SUFFIX,
+ );
+
+ let mut args = std::env::args_os().skip(1).collect::<Vec<_>>();
+ args.push(OsString::from("-Cpanic=abort"));
+ args.push(OsString::from("-Zpanic-abort-tests"));
+ let mut codegen_backend_arg = OsString::from("-Zcodegen-backend=");
+ codegen_backend_arg.push(cg_clif_dylib_path);
+ args.push(codegen_backend_arg);
+ if !args.contains(&OsString::from("--sysroot")) {
+ args.push(OsString::from("--sysroot"));
+ args.push(OsString::from(sysroot.to_str().unwrap()));
+ }
+
+ // Ensure that the right toolchain is used
+ env::set_var("RUSTUP_TOOLCHAIN", env!("RUSTUP_TOOLCHAIN"));
+
+ #[cfg(unix)]
+ Command::new("rustc").args(args).exec();
+
+ #[cfg(not(unix))]
+ std::process::exit(
+ Command::new("rustc").args(args).spawn().unwrap().wait().unwrap().code().unwrap_or(1),
+ );
+}
diff --git a/compiler/rustc_codegen_cranelift/scripts/rustup.sh b/compiler/rustc_codegen_cranelift/scripts/rustup.sh
new file mode 100755
index 000000000..bc4c06ed7
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/rustup.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+
+set -e
+
+case $1 in
+ "prepare")
+ TOOLCHAIN=$(date +%Y-%m-%d)
+
+ echo "=> Installing new nightly"
+ rustup toolchain install --profile minimal "nightly-${TOOLCHAIN}" # Sanity check to see if the nightly exists
+ sed -i "s/\"nightly-.*\"/\"nightly-${TOOLCHAIN}\"/" rust-toolchain
+ rustup component add rustfmt || true
+
+ echo "=> Uninstalling all old nightlies"
+ for nightly in $(rustup toolchain list | grep nightly | grep -v "$TOOLCHAIN" | grep -v nightly-x86_64); do
+ rustup toolchain uninstall "$nightly"
+ done
+
+ ./clean_all.sh
+ ./y.rs prepare
+
+ (cd build_sysroot && cargo update)
+
+ ;;
+ "commit")
+ git add rust-toolchain build_sysroot/Cargo.lock
+ git commit -m "Rustup to $(rustc -V)"
+ ;;
+ "push")
+ cg_clif=$(pwd)
+ pushd ../rust
+ git pull origin master
+ branch=sync_cg_clif-$(date +%Y-%m-%d)
+ git checkout -b "$branch"
+ git subtree pull --prefix=compiler/rustc_codegen_cranelift/ https://github.com/bjorn3/rustc_codegen_cranelift.git master
+ git push -u my "$branch"
+
+ # immediately merge the merge commit into cg_clif to prevent merge conflicts when syncing
+ # from rust-lang/rust later
+ git subtree push --prefix=compiler/rustc_codegen_cranelift/ "$cg_clif" sync_from_rust
+ popd
+ git merge sync_from_rust
+ ;;
+ "pull")
+ cg_clif=$(pwd)
+ pushd ../rust
+ git pull origin master
+ rust_vers="$(git rev-parse HEAD)"
+ git subtree push --prefix=compiler/rustc_codegen_cranelift/ "$cg_clif" sync_from_rust
+ popd
+ git merge sync_from_rust -m "Sync from rust $rust_vers"
+ git branch -d sync_from_rust
+ ;;
+ *)
+ echo "Unknown command '$1'"
+ echo "Usage: ./rustup.sh prepare|commit"
+ ;;
+esac
diff --git a/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
new file mode 100644
index 000000000..091bfa1e9
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/setup_rust_fork.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+set -e
+
+./y.rs build --no-unstable-features
+
+echo "[SETUP] Rust fork"
+git clone https://github.com/rust-lang/rust.git || true
+pushd rust
+git fetch
+git checkout -- .
+git checkout "$(rustc -V | cut -d' ' -f3 | tr -d '(')"
+
+git apply - <<EOF
+diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
+index d95b5b7f17f..00b6f0e3635 100644
+--- a/library/alloc/Cargo.toml
++++ b/library/alloc/Cargo.toml
+@@ -8,7 +8,7 @@ edition = "2018"
+
+ [dependencies]
+ core = { path = "../core" }
+-compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
++compiler_builtins = { version = "0.1.66", features = ['rustc-dep-of-std', 'no-asm'] }
+
+ [dev-dependencies]
+ rand = "0.7"
+ rand_xorshift = "0.2"
+diff --git a/src/tools/compiletest/src/runtest.rs b/src/tools/compiletest/src/runtest.rs
+index 8431aa7b818..a3ff7e68ce5 100644
+--- a/src/tools/compiletest/src/runtest.rs
++++ b/src/tools/compiletest/src/runtest.rs
+@@ -3489,12 +3489,7 @@ fn normalize_output(&self, output: &str, custom_rules: &[(String, String)]) -> S
+ let compiler_src_dir = base_dir.join("compiler");
+ normalize_path(&compiler_src_dir, "$(echo '$COMPILER_DIR')");
+
+- if let Some(virtual_rust_source_base_dir) =
+- option_env!("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR").map(PathBuf::from)
+- {
+- normalize_path(&virtual_rust_source_base_dir.join("library"), "$(echo '$SRC_DIR')");
+- normalize_path(&virtual_rust_source_base_dir.join("compiler"), "$(echo '$COMPILER_DIR')");
+- }
++ normalize_path(&Path::new("$(cd ../build_sysroot/sysroot_src/library; pwd)"), "$(echo '$SRC_DIR')");
+
+ // Paths into the build directory
+ let test_build_dir = &self.config.build_base;
+EOF
+
+cat > config.toml <<EOF
+changelog-seen = 2
+
+[llvm]
+ninja = false
+
+[build]
+rustc = "$(pwd)/../build/rustc-clif"
+cargo = "$(rustup which cargo)"
+full-bootstrap = true
+local-rebuild = true
+
+[rust]
+codegen-backends = ["cranelift"]
+deny-warnings = false
+verbose-tests = false
+EOF
+popd
+
+# FIXME remove once inline asm is fully supported
+export RUSTFLAGS="$RUSTFLAGS --cfg=rustix_use_libc"
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh b/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
new file mode 100755
index 000000000..791d45799
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/test_bootstrap.sh
@@ -0,0 +1,15 @@
+#!/usr/bin/env bash
+set -e
+
+cd "$(dirname "$0")/../"
+
+source ./scripts/setup_rust_fork.sh
+
+echo "[TEST] Bootstrap of rustc"
+pushd rust
+rm -r compiler/rustc_codegen_cranelift/{Cargo.*,src}
+cp ../Cargo.* compiler/rustc_codegen_cranelift/
+cp -r ../src compiler/rustc_codegen_cranelift/src
+
+./x.py build --stage 1 library/std
+popd
diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
new file mode 100755
index 000000000..944787612
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh
@@ -0,0 +1,121 @@
+#!/usr/bin/env bash
+set -e
+
+cd $(dirname "$0")/../
+
+source ./scripts/setup_rust_fork.sh
+
+echo "[TEST] Test suite of rustc"
+pushd rust
+
+command -v rg >/dev/null 2>&1 || cargo install ripgrep
+
+rm -r src/test/ui/{extern/,unsized-locals/,lto/,linkage*} || true
+for test in $(rg --files-with-matches "lto|// needs-asm-support|// needs-unwind" src/test/{ui,incremental}); do
+ rm $test
+done
+
+for test in $(rg -i --files-with-matches "//(\[\w+\])?~[^\|]*\s*ERR|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+ rm $test
+done
+
+git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+
+# missing features
+# ================
+
+# requires stack unwinding
+rm src/test/incremental/change_crate_dep_kind.rs
+rm src/test/incremental/issue-80691-bad-eval-cache.rs # -Cpanic=abort causes abort instead of exit(101)
+
+# requires compiling with -Cpanic=unwind
+rm src/test/ui/test-attrs/test-fn-signature-verification-for-explicit-return-type.rs # "Cannot run dynamic test fn out-of-process"
+rm src/test/ui/async-await/async-fn-size-moved-locals.rs # -Cpanic=abort shrinks some generator by one byte
+rm src/test/ui/async-await/async-fn-size-uninit-locals.rs # same
+rm src/test/ui/generator/size-moved-locals.rs # same
+rm -r src/test/ui/macros/rfc-2011-nicer-assert-messages/
+
+# vendor intrinsics
+rm src/test/ui/sse2.rs # cpuid not supported, so sse2 not detected
+rm src/test/ui/intrinsics/const-eval-select-x86_64.rs # requires x86_64 vendor intrinsics
+rm src/test/ui/simd/array-type.rs # "Index argument for `simd_insert` is not a constant"
+rm src/test/ui/simd/intrinsic/generic-bitmask-pass.rs # simd_bitmask unimplemented
+rm src/test/ui/simd/intrinsic/generic-as.rs # simd_as unimplemented
+rm src/test/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs # simd_saturating_add unimplemented
+rm src/test/ui/simd/intrinsic/float-math-pass.rs # simd_fcos unimplemented
+rm src/test/ui/simd/intrinsic/generic-gather-pass.rs # simd_gather unimplemented
+rm src/test/ui/simd/intrinsic/generic-select-pass.rs # simd_select_bitmask unimplemented
+rm src/test/ui/simd/issue-85915-simd-ptrs.rs # simd_gather unimplemented
+rm src/test/ui/simd/issue-89193.rs # simd_gather unimplemented
+rm src/test/ui/simd/simd-bitmask.rs # simd_bitmask unimplemented
+
+# exotic linkages
+rm src/test/ui/issues/issue-33992.rs # unsupported linkages
+rm src/test/incremental/hashes/function_interfaces.rs # same
+rm src/test/incremental/hashes/statics.rs # same
+
+# variadic arguments
+rm src/test/ui/abi/mir/mir_codegen_calls_variadic.rs # requires float varargs
+rm src/test/ui/abi/variadic-ffi.rs # requires callee side vararg support
+
+# unsized locals
+rm -r src/test/run-pass-valgrind/unsized-locals
+
+# misc unimplemented things
+rm src/test/ui/intrinsics/intrinsic-nearby.rs # unimplemented nearbyintf32 and nearbyintf64 intrinsics
+rm src/test/ui/target-feature/missing-plusminus.rs # error not implemented
+rm src/test/ui/fn/dyn-fn-alignment.rs # wants a 256 byte alignment
+rm -r src/test/run-make/emit-named-files # requires full --emit support
+rm src/test/ui/abi/stack-probes.rs # stack probes not yet implemented
+
+# optimization tests
+# ==================
+rm src/test/ui/codegen/issue-28950.rs # depends on stack size optimizations
+rm src/test/ui/codegen/init-large-type.rs # same
+rm src/test/ui/issues/issue-40883.rs # same
+rm -r src/test/run-make/fmt-write-bloat/ # tests an optimization
+
+# backend specific tests
+# ======================
+rm src/test/incremental/thinlto/cgu_invalidated_when_import_{added,removed}.rs # requires LLVM
+rm src/test/ui/abi/stack-protector.rs # requires stack protector support
+
+# giving different but possibly correct results
+# =============================================
+rm src/test/ui/mir/mir_misc_casts.rs # depends on deduplication of constants
+rm src/test/ui/mir/mir_raw_fat_ptr.rs # same
+rm src/test/ui/consts/issue-33537.rs # same
+
+# doesn't work due to the way the rustc test suite is invoked.
+# should work when using ./x.py test the way it is intended
+# ============================================================
+rm -r src/test/run-make/emit-shared-files # requires the rustdoc executable in build/bin/
+rm -r src/test/run-make/unstable-flag-required # same
+rm -r src/test/run-make/rustdoc-* # same
+rm -r src/test/run-make/issue-88756-default-output # same
+rm -r src/test/run-make/remap-path-prefix-dwarf # requires llvm-dwarfdump
+
+# genuine bugs
+# ============
+rm src/test/ui/allocator/no_std-alloc-error-handler-default.rs # missing rust_oom definition
+
+rm src/test/incremental/spike-neg1.rs # errors out for some reason
+rm src/test/incremental/spike-neg2.rs # same
+rm src/test/ui/issues/issue-74564-if-expr-stack-overflow.rs # gives a stackoverflow before the backend runs
+rm src/test/ui/mir/ssa-analysis-regression-50041.rs # produces ICE
+rm src/test/ui/type-alias-impl-trait/assoc-projection-ice.rs # produces ICE
+
+rm src/test/ui/simd/intrinsic/generic-reduction-pass.rs # simd_reduce_add_unordered doesn't accept an accumulator for integer vectors
+
+# bugs in the test suite
+# ======================
+rm src/test/ui/backtrace.rs # TODO warning
+rm src/test/ui/empty_global_asm.rs # TODO add needs-asm-support
+rm src/test/ui/simple_global_asm.rs # TODO add needs-asm-support
+rm src/test/ui/test-attrs/test-type.rs # TODO panic message on stderr. correct stdout
+# not sure if this is actually a bug in the test suite, but the symbol list shows the function without leading _ for some reason
+rm -r src/test/run-make/native-link-modifier-bundle
+
+echo "[TEST] rustc test suite"
+RUST_TEST_NOCAPTURE=1 COMPILETEST_FORCE_STAGE0=1 ./x.py test --stage 0 src/test/{codegen-units,run-make,run-pass-valgrind,ui,incremental}
+popd
diff --git a/compiler/rustc_codegen_cranelift/scripts/tests.sh b/compiler/rustc_codegen_cranelift/scripts/tests.sh
new file mode 100755
index 000000000..9b5ffa409
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/scripts/tests.sh
@@ -0,0 +1,203 @@
+#!/usr/bin/env bash
+
+set -e
+
+export CG_CLIF_DISPLAY_CG_TIME=1
+export CG_CLIF_DISABLE_INCR_CACHE=1
+
+export HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
+export TARGET_TRIPLE=${TARGET_TRIPLE:-$HOST_TRIPLE}
+
+export RUN_WRAPPER=''
+
+case "$TARGET_TRIPLE" in
+ x86_64*)
+ export JIT_SUPPORTED=1
+ ;;
+ *)
+ export JIT_SUPPORTED=0
+ ;;
+esac
+
+if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
+ export JIT_SUPPORTED=0
+ if [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
+ # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+ export RUSTFLAGS='-Clinker=aarch64-linux-gnu-gcc '$RUSTFLAGS
+ export RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
+ elif [[ "$TARGET_TRIPLE" == "x86_64-pc-windows-gnu" ]]; then
+ # We are cross-compiling for Windows. Run tests in wine.
+ export RUN_WRAPPER='wine'
+ else
+ echo "Unknown non-native platform"
+ fi
+fi
+
+# FIXME fix `#[linkage = "extern_weak"]` without this
+if [[ "$(uname)" == 'Darwin' ]]; then
+ export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
+fi
+
+MY_RUSTC="$(pwd)/build/rustc-clif $RUSTFLAGS -L crate=target/out --out-dir target/out -Cdebuginfo=2"
+
+function no_sysroot_tests() {
+ echo "[BUILD] mini_core"
+ $MY_RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target "$TARGET_TRIPLE"
+
+ echo "[BUILD] example"
+ $MY_RUSTC example/example.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+ if [[ "$JIT_SUPPORTED" = "1" ]]; then
+ echo "[JIT] mini_core_hello_world"
+ CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+
+ echo "[JIT-lazy] mini_core_hello_world"
+ CG_CLIF_JIT_ARGS="abc bcd" $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/mini_core_hello_world.rs --cfg jit --target "$HOST_TRIPLE"
+ else
+ echo "[JIT] mini_core_hello_world (skipped)"
+ fi
+
+ echo "[AOT] mini_core_hello_world"
+ $MY_RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
+ # (echo "break set -n main"; echo "run"; sleep 1; echo "si -c 10"; sleep 1; echo "frame variable") | lldb -- ./target/out/mini_core_hello_world abc bcd
+}
+
+function base_sysroot_tests() {
+ echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+ $MY_RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+
+ echo "[AOT] issue_91827_extern_types"
+ $MY_RUSTC example/issue-91827-extern-types.rs --crate-name issue_91827_extern_types --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/issue_91827_extern_types
+
+ echo "[BUILD] alloc_system"
+ $MY_RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+ echo "[AOT] alloc_example"
+ $MY_RUSTC example/alloc_example.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/alloc_example
+
+ if [[ "$JIT_SUPPORTED" = "1" ]]; then
+ echo "[JIT] std_example"
+ $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
+
+ echo "[JIT-lazy] std_example"
+ $MY_RUSTC -Zunstable-options -Cllvm-args=mode=jit-lazy -Cprefer-dynamic example/std_example.rs --target "$HOST_TRIPLE"
+ else
+ echo "[JIT] std_example (skipped)"
+ fi
+
+ echo "[AOT] std_example"
+ $MY_RUSTC example/std_example.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/std_example arg
+
+ echo "[AOT] dst_field_align"
+ $MY_RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/dst_field_align
+
+ echo "[AOT] subslice-patterns-const-eval"
+ $MY_RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+ echo "[AOT] track-caller-attribute"
+ $MY_RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/track-caller-attribute
+
+ echo "[AOT] float-minmax-pass"
+ $MY_RUSTC example/float-minmax-pass.rs --crate-type bin -Cpanic=abort --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/float-minmax-pass
+
+ echo "[AOT] mod_bench"
+ $MY_RUSTC example/mod_bench.rs --crate-type bin --target "$TARGET_TRIPLE"
+ $RUN_WRAPPER ./target/out/mod_bench
+}
+
+function extended_sysroot_tests() {
+ pushd rand
+ ../build/cargo-clif clean
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ echo "[TEST] rust-random/rand"
+ ../build/cargo-clif test --workspace
+ else
+ echo "[AOT] rust-random/rand"
+ ../build/cargo-clif build --workspace --target $TARGET_TRIPLE --tests
+ fi
+ popd
+
+ pushd simple-raytracer
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ echo "[BENCH COMPILE] ebobby/simple-raytracer"
+ hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "../build/cargo-clif clean" \
+ "RUSTFLAGS='' cargo build" \
+ "../build/cargo-clif build"
+
+ echo "[BENCH RUN] ebobby/simple-raytracer"
+ cp ./target/debug/main ./raytracer_cg_clif
+ hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_clif
+ else
+ ../build/cargo-clif clean
+ echo "[BENCH COMPILE] ebobby/simple-raytracer (skipped)"
+ echo "[COMPILE] ebobby/simple-raytracer"
+ ../build/cargo-clif build --target $TARGET_TRIPLE
+ echo "[BENCH RUN] ebobby/simple-raytracer (skipped)"
+ fi
+ popd
+
+ pushd build_sysroot/sysroot_src/library/core/tests
+ echo "[TEST] libcore"
+ ../../../../../build/cargo-clif clean
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ ../../../../../build/cargo-clif test
+ else
+ ../../../../../build/cargo-clif build --target $TARGET_TRIPLE --tests
+ fi
+ popd
+
+ pushd regex
+ echo "[TEST] rust-lang/regex example shootout-regex-dna"
+ ../build/cargo-clif clean
+ export RUSTFLAGS="$RUSTFLAGS --cap-lints warn" # newer aho_corasick versions throw a deprecation warning
+ # Make sure `[codegen mono items] start` doesn't poison the diff
+ ../build/cargo-clif build --example shootout-regex-dna --target $TARGET_TRIPLE
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ cat examples/regexdna-input.txt \
+ | ../build/cargo-clif run --example shootout-regex-dna --target $TARGET_TRIPLE \
+ | grep -v "Spawned thread" > res.txt
+ diff -u res.txt examples/regexdna-output.txt
+ fi
+
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ echo "[TEST] rust-lang/regex tests"
+ ../build/cargo-clif test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
+ else
+ echo "[AOT] rust-lang/regex tests"
+ ../build/cargo-clif build --tests --target $TARGET_TRIPLE
+ fi
+ popd
+
+ pushd portable-simd
+ echo "[TEST] rust-lang/portable-simd"
+ ../build/cargo-clif clean
+ ../build/cargo-clif build --all-targets --target $TARGET_TRIPLE
+ if [[ "$HOST_TRIPLE" = "$TARGET_TRIPLE" ]]; then
+ ../build/cargo-clif test -q
+ fi
+ popd
+}
+
+case "$1" in
+ "no_sysroot")
+ no_sysroot_tests
+ ;;
+ "base_sysroot")
+ base_sysroot_tests
+ ;;
+ "extended_sysroot")
+ extended_sysroot_tests
+ ;;
+ *)
+ echo "unknown test suite"
+ ;;
+esac
diff --git a/compiler/rustc_codegen_cranelift/src/abi/comments.rs b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
new file mode 100644
index 000000000..37d2679c1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/comments.rs
@@ -0,0 +1,135 @@
+//! Annotate the clif ir with comments describing how arguments are passed into the current function
+//! and where all locals are stored.
+
+use std::borrow::Cow;
+
+use rustc_middle::mir;
+use rustc_target::abi::call::PassMode;
+
+use cranelift_codegen::entity::EntityRef;
+
+use crate::prelude::*;
+
+pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ if fx.clif_comments.enabled() {
+ fx.add_global_comment(
+ "kind loc.idx param pass mode ty".to_string(),
+ );
+ }
+}
+
+pub(super) fn add_arg_comment<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ kind: &str,
+ local: Option<mir::Local>,
+ local_field: Option<usize>,
+ params: &[Value],
+ arg_abi_mode: PassMode,
+ arg_layout: TyAndLayout<'tcx>,
+) {
+ if !fx.clif_comments.enabled() {
+ return;
+ }
+
+ let local = if let Some(local) = local {
+ Cow::Owned(format!("{:?}", local))
+ } else {
+ Cow::Borrowed("???")
+ };
+ let local_field = if let Some(local_field) = local_field {
+ Cow::Owned(format!(".{}", local_field))
+ } else {
+ Cow::Borrowed("")
+ };
+
+ let params = match params {
+ [] => Cow::Borrowed("-"),
+ [param] => Cow::Owned(format!("= {:?}", param)),
+ [param_a, param_b] => Cow::Owned(format!("= {:?},{:?}", param_a, param_b)),
+ params => Cow::Owned(format!(
+ "= {}",
+ params.iter().map(ToString::to_string).collect::<Vec<_>>().join(",")
+ )),
+ };
+
+ let pass_mode = format!("{:?}", arg_abi_mode);
+ fx.add_global_comment(format!(
+ "{kind:5}{local:>3}{local_field:<5} {params:10} {pass_mode:36} {ty:?}",
+ kind = kind,
+ local = local,
+ local_field = local_field,
+ params = params,
+ pass_mode = pass_mode,
+ ty = arg_layout.ty,
+ ));
+}
+
+pub(super) fn add_locals_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
+ if fx.clif_comments.enabled() {
+ fx.add_global_comment(String::new());
+ fx.add_global_comment(
+ "kind local ty size align (abi,pref)".to_string(),
+ );
+ }
+}
+
+pub(super) fn add_local_place_comments<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: CPlace<'tcx>,
+ local: Local,
+) {
+ if !fx.clif_comments.enabled() {
+ return;
+ }
+ let TyAndLayout { ty, layout } = place.layout();
+ let rustc_target::abi::LayoutS {
+ size,
+ align,
+ abi: _,
+ variants: _,
+ fields: _,
+ largest_niche: _,
+ } = layout.0.0;
+
+ let (kind, extra) = match *place.inner() {
+ CPlaceInner::Var(place_local, var) => {
+ assert_eq!(local, place_local);
+ ("ssa", Cow::Owned(format!(",var={}", var.index())))
+ }
+ CPlaceInner::VarPair(place_local, var1, var2) => {
+ assert_eq!(local, place_local);
+ ("ssa", Cow::Owned(format!(",var=({}, {})", var1.index(), var2.index())))
+ }
+ CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
+ CPlaceInner::Addr(ptr, meta) => {
+ let meta = if let Some(meta) = meta {
+ Cow::Owned(format!(",meta={}", meta))
+ } else {
+ Cow::Borrowed("")
+ };
+ match ptr.debug_base_and_offset() {
+ (crate::pointer::PointerBase::Addr(addr), offset) => {
+ ("reuse", format!("storage={}{}{}", addr, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
+ ("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
+ }
+ (crate::pointer::PointerBase::Dangling(align), offset) => {
+ ("zst", format!("align={},offset={}", align.bytes(), offset).into())
+ }
+ }
+ }
+ };
+
+ fx.add_global_comment(format!(
+ "{:<5} {:5} {:30} {:4}b {}, {}{}{}",
+ kind,
+ format!("{:?}", local),
+ format!("{:?}", ty),
+ size.bytes(),
+ align.abi.bytes(),
+ align.pref.bytes(),
+ if extra.is_empty() { "" } else { " " },
+ extra,
+ ));
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
new file mode 100644
index 000000000..815450f68
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -0,0 +1,611 @@
+//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
+
+mod comments;
+mod pass_mode;
+mod returning;
+
+use cranelift_module::ModuleError;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_target::abi::call::{Conv, FnAbi};
+use rustc_target::spec::abi::Abi;
+
+use cranelift_codegen::ir::{AbiParam, SigRef};
+
+use self::pass_mode::*;
+use crate::prelude::*;
+
+pub(crate) use self::returning::codegen_return;
+
+fn clif_sig_from_fn_abi<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ default_call_conv: CallConv,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+) -> Signature {
+ let call_conv = match fn_abi.conv {
+ Conv::Rust | Conv::C => default_call_conv,
+ Conv::RustCold => CallConv::Cold,
+ Conv::X86_64SysV => CallConv::SystemV,
+ Conv::X86_64Win64 => CallConv::WindowsFastcall,
+ Conv::ArmAapcs
+ | Conv::CCmseNonSecureCall
+ | Conv::Msp430Intr
+ | Conv::PtxKernel
+ | Conv::X86Fastcall
+ | Conv::X86Intr
+ | Conv::X86Stdcall
+ | Conv::X86ThisCall
+ | Conv::X86VectorCall
+ | Conv::AmdGpuKernel
+ | Conv::AvrInterrupt
+ | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
+ };
+ let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
+
+ let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
+ // Sometimes the first param is an pointer to the place where the return value needs to be stored.
+ let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
+
+ Signature { params, returns, call_conv }
+}
+
+pub(crate) fn get_function_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ triple: &target_lexicon::Triple,
+ inst: Instance<'tcx>,
+) -> Signature {
+ assert!(!inst.substs.needs_infer());
+ clif_sig_from_fn_abi(
+ tcx,
+ CallConv::triple_default(triple),
+ &RevealAllLayoutCx(tcx).fn_abi_of_instance(inst, ty::List::empty()),
+ )
+}
+
+/// Instance must be monomorphized
+pub(crate) fn import_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ module: &mut dyn Module,
+ inst: Instance<'tcx>,
+) -> FuncId {
+ let name = tcx.symbol_name(inst).name;
+ let sig = get_function_sig(tcx, module.isa().triple(), inst);
+ match module.declare_function(name, Linkage::Import, &sig) {
+ Ok(func_id) => func_id,
+ Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{name}` as function, but it was already declared as static"
+ )),
+ Err(ModuleError::IncompatibleSignature(_, prev_sig, new_sig)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{name}` with signature {new_sig:?}, \
+ but it was already declared with signature {prev_sig:?}"
+ )),
+ Err(err) => Err::<_, _>(err).unwrap(),
+ }
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ /// Instance must be monomorphized
+ pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
+ let func_id = import_function(self.tcx, self.module, inst);
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", inst));
+ }
+
+ func_ref
+ }
+
+ pub(crate) fn lib_call(
+ &mut self,
+ name: &str,
+ params: Vec<AbiParam>,
+ returns: Vec<AbiParam>,
+ args: &[Value],
+ ) -> &[Value] {
+ let sig = Signature { params, returns, call_conv: self.target_config.default_call_conv };
+ let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", name));
+ }
+ let call_inst = self.bcx.ins().call(func_ref, args);
+ if self.clif_comments.enabled() {
+ self.add_comment(call_inst, format!("easy_call {}", name));
+ }
+ let results = self.bcx.inst_results(call_inst);
+ assert!(results.len() <= 2, "{}", results.len());
+ results
+ }
+
+ pub(crate) fn easy_call(
+ &mut self,
+ name: &str,
+ args: &[CValue<'tcx>],
+ return_ty: Ty<'tcx>,
+ ) -> CValue<'tcx> {
+ let (input_tys, args): (Vec<_>, Vec<_>) = args
+ .iter()
+ .map(|arg| {
+ (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
+ })
+ .unzip();
+ let return_layout = self.layout_of(return_ty);
+ let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
+ tup.iter().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
+ } else {
+ vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
+ };
+ let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
+ match *ret_vals {
+ [] => CValue::by_ref(
+ Pointer::const_addr(self, i64::from(self.pointer_type.bytes())),
+ return_layout,
+ ),
+ [val] => CValue::by_val(val, return_layout),
+ [val, extra] => CValue::by_val_pair(val, extra, return_layout),
+ _ => unreachable!(),
+ }
+ }
+}
+
+/// Make a [`CPlace`] capable of holding value of the specified type.
+fn make_local_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ is_ssa: bool,
+) -> CPlace<'tcx> {
+ let place = if is_ssa {
+ if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi {
+ CPlace::new_var_pair(fx, local, layout)
+ } else {
+ CPlace::new_var(fx, local, layout)
+ }
+ } else {
+ CPlace::new_stack_slot(fx, layout)
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ place
+}
+
+pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
+ fx.bcx.append_block_params_for_function_params(start_block);
+
+ fx.bcx.switch_to_block(start_block);
+ fx.bcx.ins().nop();
+
+ let ssa_analyzed = crate::analyze::analyze(fx);
+
+ self::comments::add_args_header_comment(fx);
+
+ let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
+ let ret_place =
+ self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
+ assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
+
+ // None means pass_mode == NoPass
+ enum ArgKind<'tcx> {
+ Normal(Option<CValue<'tcx>>),
+ Spread(Vec<Option<CValue<'tcx>>>),
+ }
+
+ let fn_abi = fx.fn_abi.take().unwrap();
+
+ // FIXME implement variadics in cranelift
+ if fn_abi.c_variadic {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
+ }
+
+ let mut arg_abis_iter = fn_abi.args.iter();
+
+ let func_params = fx
+ .mir
+ .args_iter()
+ .map(|local| {
+ let arg_ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+
+ // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
+ if Some(local) == fx.mir.spread_arg {
+ // This argument (e.g. the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual function arguments.
+
+ let tupled_arg_tys = match arg_ty.kind() {
+ ty::Tuple(ref tys) => tys,
+ _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
+ };
+
+ let mut params = Vec::new();
+ for (i, _arg_ty) in tupled_arg_tys.iter().enumerate() {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
+ params.push(param);
+ }
+
+ (local, ArgKind::Spread(params), arg_ty)
+ } else {
+ let arg_abi = arg_abis_iter.next().unwrap();
+ let param =
+ cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
+ (local, ArgKind::Normal(param), arg_ty)
+ }
+ })
+ .collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
+
+ assert!(fx.caller_location.is_none());
+ if fx.instance.def.requires_caller_location(fx.tcx) {
+ // Store caller location for `#[track_caller]`.
+ let arg_abi = arg_abis_iter.next().unwrap();
+ fx.caller_location =
+ Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
+ }
+
+ assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
+ fx.fn_abi = Some(fn_abi);
+ assert!(block_params_iter.next().is_none(), "arg_value left behind");
+
+ self::comments::add_locals_header_comment(fx);
+
+ for (local, arg_kind, ty) in func_params {
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ // While this is normally an optimization to prevent an unnecessary copy when an argument is
+ // not mutated by the current function, this is necessary to support unsized arguments.
+ if let ArgKind::Normal(Some(val)) = arg_kind {
+ if let Some((addr, meta)) = val.try_to_ptr() {
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so it is fine to borrow the backing storage of this argument
+ // to prevent a copy.
+
+ let place = if let Some(meta) = meta {
+ CPlace::for_ptr_with_extra(addr, meta, val.layout())
+ } else {
+ CPlace::for_ptr(addr, val.layout())
+ };
+
+ self::comments::add_local_place_comments(fx, place, local);
+
+ assert_eq!(fx.local_map.push(place), local);
+ continue;
+ }
+ }
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+
+ match arg_kind {
+ ArgKind::Normal(param) => {
+ if let Some(param) = param {
+ place.write_cvalue(fx, param);
+ }
+ }
+ ArgKind::Spread(params) => {
+ for (i, param) in params.into_iter().enumerate() {
+ if let Some(param) = param {
+ place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
+ }
+ }
+ }
+ }
+ }
+
+ for local in fx.mir.vars_and_temps_iter() {
+ let ty = fx.monomorphize(fx.mir.local_decls[local].ty);
+ let layout = fx.layout_of(ty);
+
+ let is_ssa = ssa_analyzed[local] == crate::analyze::SsaKind::Ssa;
+
+ let place = make_local_place(fx, local, layout, is_ssa);
+ assert_eq!(fx.local_map.push(place), local);
+ }
+
+ fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+}
+
+struct CallArgument<'tcx> {
+ value: CValue<'tcx>,
+ is_owned: bool,
+}
+
+// FIXME avoid intermediate `CValue` before calling `adjust_arg_for_abi`
+fn codegen_call_argument_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CallArgument<'tcx> {
+ CallArgument {
+ value: codegen_operand(fx, operand),
+ is_owned: matches!(operand, Operand::Move(_)),
+ }
+}
+
+pub(crate) fn codegen_terminator_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+ func: &Operand<'tcx>,
+ args: &[Operand<'tcx>],
+ destination: Place<'tcx>,
+ target: Option<BasicBlock>,
+) {
+ let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
+ let fn_sig =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+
+ let ret_place = codegen_place(fx, destination);
+
+ // Handle special calls like instrinsics and empty drop glue.
+ let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
+ let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(fx.tcx);
+
+ if fx.tcx.symbol_name(instance).name.starts_with("llvm.") {
+ crate::intrinsics::codegen_llvm_intrinsic_call(
+ fx,
+ &fx.tcx.symbol_name(instance).name,
+ substs,
+ args,
+ ret_place,
+ target,
+ );
+ return;
+ }
+
+ match instance.def {
+ InstanceDef::Intrinsic(_) => {
+ crate::intrinsics::codegen_intrinsic_call(
+ fx,
+ instance,
+ args,
+ ret_place,
+ target,
+ source_info,
+ );
+ return;
+ }
+ InstanceDef::DropGlue(_, None) => {
+ // empty drop glue - a nop.
+ let dest = target.expect("Non terminating drop_in_place_real???");
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ }
+ _ => Some(instance),
+ }
+ } else {
+ None
+ };
+
+ let extra_args = &args[fn_sig.inputs().len()..];
+ let extra_args = fx
+ .tcx
+ .mk_type_list(extra_args.iter().map(|op_arg| fx.monomorphize(op_arg.ty(fx.mir, fx.tcx))));
+ let fn_abi = if let Some(instance) = instance {
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(instance, extra_args)
+ } else {
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_fn_ptr(fn_ty.fn_sig(fx.tcx), extra_args)
+ };
+
+ let is_cold = if fn_sig.abi == Abi::RustCold {
+ true
+ } else {
+ instance
+ .map(|inst| {
+ fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD)
+ })
+ .unwrap_or(false)
+ };
+ if is_cold {
+ fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
+ if let Some(destination_block) = target {
+ fx.bcx.set_cold_block(fx.get_block(destination_block));
+ }
+ }
+
+ // Unpack arguments tuple for closures
+ let mut args = if fn_sig.abi == Abi::RustCall {
+ assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
+ let self_arg = codegen_call_argument_operand(fx, &args[0]);
+ let pack_arg = codegen_call_argument_operand(fx, &args[1]);
+
+ let tupled_arguments = match pack_arg.value.layout().ty.kind() {
+ ty::Tuple(ref tupled_arguments) => tupled_arguments,
+ _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
+ };
+
+ let mut args = Vec::with_capacity(1 + tupled_arguments.len());
+ args.push(self_arg);
+ for i in 0..tupled_arguments.len() {
+ args.push(CallArgument {
+ value: pack_arg.value.value_field(fx, mir::Field::new(i)),
+ is_owned: pack_arg.is_owned,
+ });
+ }
+ args
+ } else {
+ args.iter().map(|arg| codegen_call_argument_operand(fx, arg)).collect::<Vec<_>>()
+ };
+
+ // Pass the caller location for `#[track_caller]`.
+ if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
+ let caller_location = fx.get_caller_location(source_info);
+ args.push(CallArgument { value: caller_location, is_owned: false });
+ }
+
+ let args = args;
+ assert_eq!(fn_abi.args.len(), args.len());
+
+ enum CallTarget {
+ Direct(FuncRef),
+ Indirect(SigRef, Value),
+ }
+
+ let (func_ref, first_arg_override) = match instance {
+ // Trait object call
+ Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
+ if fx.clif_comments.enabled() {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(
+ nop_inst,
+ format!("virtual call; self arg pass mode: {:?}", &fn_abi.args[0]),
+ );
+ }
+
+ let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0].value, idx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, method), Some(ptr))
+ }
+
+ // Normal call
+ Some(instance) => {
+ let func_ref = fx.get_function_ref(instance);
+ (CallTarget::Direct(func_ref), None)
+ }
+
+ // Indirect call
+ None => {
+ if fx.clif_comments.enabled() {
+ let nop_inst = fx.bcx.ins().nop();
+ fx.add_comment(nop_inst, "indirect call");
+ }
+
+ let func = codegen_operand(fx, func).load_scalar(fx);
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+
+ (CallTarget::Indirect(sig, func), None)
+ }
+ };
+
+ self::returning::codegen_with_call_return_arg(fx, &fn_abi.ret, ret_place, |fx, return_ptr| {
+ let call_args = return_ptr
+ .into_iter()
+ .chain(first_arg_override.into_iter())
+ .chain(
+ args.into_iter()
+ .enumerate()
+ .skip(if first_arg_override.is_some() { 1 } else { 0 })
+ .map(|(i, arg)| {
+ adjust_arg_for_abi(fx, arg.value, &fn_abi.args[i], arg.is_owned).into_iter()
+ })
+ .flatten(),
+ )
+ .collect::<Vec<Value>>();
+
+ let call_inst = match func_ref {
+ CallTarget::Direct(func_ref) => fx.bcx.ins().call(func_ref, &call_args),
+ CallTarget::Indirect(sig, func_ptr) => {
+ fx.bcx.ins().call_indirect(sig, func_ptr, &call_args)
+ }
+ };
+
+ // FIXME find a cleaner way to support varargs
+ if fn_sig.c_variadic {
+ if !matches!(fn_sig.abi, Abi::C { .. }) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
+ );
+ }
+ let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
+ let abi_params = call_args
+ .into_iter()
+ .map(|arg| {
+ let ty = fx.bcx.func.dfg.value_type(arg);
+ if !ty.is_int() {
+ // FIXME set %al to upperbound on float args once floats are supported
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ &format!("Non int ty {:?} for variadic call", ty),
+ );
+ }
+ AbiParam::new(ty)
+ })
+ .collect::<Vec<AbiParam>>();
+ fx.bcx.func.dfg.signatures[sig_ref].params = abi_params;
+ }
+
+ call_inst
+ });
+
+ if let Some(dest) = target {
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+}
+
+pub(crate) fn codegen_drop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source_info: mir::SourceInfo,
+ drop_place: CPlace<'tcx>,
+) {
+ let ty = drop_place.layout().ty;
+ let drop_instance = Instance::resolve_drop_in_place(fx.tcx, ty).polymorphize(fx.tcx);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_instance.def {
+ // we don't actually need to drop anything
+ } else {
+ match ty.kind() {
+ ty::Dynamic(..) => {
+ let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
+ let ptr = ptr.get_addr(fx);
+ let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
+
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_instance.def_id(), 0),
+ substs: drop_instance.substs,
+ };
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(virtual_drop, ty::List::empty());
+
+ let sig = clif_sig_from_fn_abi(fx.tcx, fx.target_config.default_call_conv, &fn_abi);
+ let sig = fx.bcx.import_signature(sig);
+ fx.bcx.ins().call_indirect(sig, drop_fn, &[ptr]);
+ }
+ _ => {
+ assert!(!matches!(drop_instance.def, InstanceDef::Virtual(_, _)));
+
+ let fn_abi =
+ RevealAllLayoutCx(fx.tcx).fn_abi_of_instance(drop_instance, ty::List::empty());
+
+ let arg_value = drop_place.place_ref(
+ fx,
+ fx.layout_of(fx.tcx.mk_ref(
+ fx.tcx.lifetimes.re_erased,
+ TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
+ )),
+ );
+ let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0], true);
+
+ let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
+
+ if drop_instance.def.requires_caller_location(fx.tcx) {
+ // Pass the caller location for `#[track_caller]`.
+ let caller_location = fx.get_caller_location(source_info);
+ call_args.extend(
+ adjust_arg_for_abi(fx, caller_location, &fn_abi.args[1], false).into_iter(),
+ );
+ }
+
+ let func_ref = fx.get_function_ref(drop_instance);
+ fx.bcx.ins().call(func_ref, &call_args);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
new file mode 100644
index 000000000..6c10baa53
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs
@@ -0,0 +1,299 @@
+//! Argument passing
+
+use crate::prelude::*;
+use crate::value_and_place::assert_assignable;
+
+use cranelift_codegen::ir::{ArgumentExtension, ArgumentPurpose};
+use rustc_target::abi::call::{
+ ArgAbi, ArgAttributes, ArgExtension as RustcArgExtension, CastTarget, PassMode, Reg, RegKind,
+};
+use smallvec::{smallvec, SmallVec};
+
+pub(super) trait ArgAbiExt<'tcx> {
+ fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]>;
+ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>);
+}
+
+fn reg_to_abi_param(reg: Reg) -> AbiParam {
+ let clif_ty = match (reg.kind, reg.size.bytes()) {
+ (RegKind::Integer, 1) => types::I8,
+ (RegKind::Integer, 2) => types::I16,
+ (RegKind::Integer, 3..=4) => types::I32,
+ (RegKind::Integer, 5..=8) => types::I64,
+ (RegKind::Integer, 9..=16) => types::I128,
+ (RegKind::Float, 4) => types::F32,
+ (RegKind::Float, 8) => types::F64,
+ (RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
+ _ => unreachable!("{:?}", reg),
+ };
+ AbiParam::new(clif_ty)
+}
+
+fn apply_arg_attrs_to_abi_param(mut param: AbiParam, arg_attrs: ArgAttributes) -> AbiParam {
+ match arg_attrs.arg_ext {
+ RustcArgExtension::None => {}
+ RustcArgExtension::Zext => param.extension = ArgumentExtension::Uext,
+ RustcArgExtension::Sext => param.extension = ArgumentExtension::Sext,
+ }
+ param
+}
+
+fn cast_target_to_abi_params(cast: CastTarget) -> SmallVec<[AbiParam; 2]> {
+ let (rest_count, rem_bytes) = if cast.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ } else {
+ (
+ cast.rest.total.bytes() / cast.rest.unit.size.bytes(),
+ cast.rest.total.bytes() % cast.rest.unit.size.bytes(),
+ )
+ };
+
+ // Note: Unlike the LLVM equivalent of this code we don't have separate branches for when there
+ // is no prefix as a single unit, an array and a heterogeneous struct are not represented using
+ // different types in Cranelift IR. Instead a single array of primitive types is used.
+
+ // Create list of fields in the main structure
+ let mut args = cast
+ .prefix
+ .iter()
+ .flatten()
+ .map(|&reg| reg_to_abi_param(reg))
+ .chain((0..rest_count).map(|_| reg_to_abi_param(cast.rest.unit)))
+ .collect::<SmallVec<_>>();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(cast.rest.unit.kind, RegKind::Integer);
+ args.push(reg_to_abi_param(Reg {
+ kind: RegKind::Integer,
+ size: Size::from_bytes(rem_bytes),
+ }));
+ }
+
+ args
+}
+
+impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> {
+ match self.mode {
+ PassMode::Ignore => smallvec![],
+ PassMode::Direct(attrs) => match self.layout.abi {
+ Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param(
+ AbiParam::new(scalar_to_clif_type(tcx, scalar)),
+ attrs
+ )],
+ Abi::Vector { .. } => {
+ let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+ smallvec![AbiParam::new(vector_ty)]
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi {
+ Abi::ScalarPair(a, b) => {
+ let a = scalar_to_clif_type(tcx, a);
+ let b = scalar_to_clif_type(tcx, b);
+ smallvec![
+ apply_arg_attrs_to_abi_param(AbiParam::new(a), attrs_a),
+ apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b),
+ ]
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Cast(cast) => cast_target_to_abi_params(cast),
+ PassMode::Indirect { attrs, extra_attrs: None, on_stack } => {
+ if on_stack {
+ // Abi requires aligning struct size to pointer size
+ let size = self.layout.size.align_to(tcx.data_layout.pointer_align.abi);
+ let size = u32::try_from(size.bytes()).unwrap();
+ smallvec![apply_arg_attrs_to_abi_param(
+ AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructArgument(size),),
+ attrs
+ )]
+ } else {
+ smallvec![apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs)]
+ }
+ }
+ PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => {
+ assert!(!on_stack);
+ smallvec![
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), attrs),
+ apply_arg_attrs_to_abi_param(AbiParam::new(pointer_ty(tcx)), extra_attrs),
+ ]
+ }
+ }
+ }
+
+ fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option<AbiParam>, Vec<AbiParam>) {
+ match self.mode {
+ PassMode::Ignore => (None, vec![]),
+ PassMode::Direct(_) => match self.layout.abi {
+ Abi::Scalar(scalar) => {
+ (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
+ }
+ Abi::Vector { .. } => {
+ let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
+ (None, vec![AbiParam::new(vector_ty)])
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Pair(_, _) => match self.layout.abi {
+ Abi::ScalarPair(a, b) => {
+ let a = scalar_to_clif_type(tcx, a);
+ let b = scalar_to_clif_type(tcx, b);
+ (None, vec![AbiParam::new(a), AbiParam::new(b)])
+ }
+ _ => unreachable!("{:?}", self.layout.abi),
+ },
+ PassMode::Cast(cast) => (None, cast_target_to_abi_params(cast).into_iter().collect()),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack } => {
+ assert!(!on_stack);
+ (Some(AbiParam::special(pointer_ty(tcx), ArgumentPurpose::StructReturn)), vec![])
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ }
+ }
+}
+
+pub(super) fn to_casted_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ cast: CastTarget,
+) -> SmallVec<[Value; 2]> {
+ let (ptr, meta) = arg.force_stack(fx);
+ assert!(meta.is_none());
+ let mut offset = 0;
+ cast_target_to_abi_params(cast)
+ .into_iter()
+ .map(|param| {
+ let val = ptr.offset_i64(fx, offset).load(fx, param.value_type, MemFlags::new());
+ offset += i64::from(param.value_type.bytes());
+ val
+ })
+ .collect()
+}
+
+pub(super) fn from_casted_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ block_params: &[Value],
+ layout: TyAndLayout<'tcx>,
+ cast: CastTarget,
+) -> CValue<'tcx> {
+ let abi_params = cast_target_to_abi_params(cast);
+ let abi_param_size: u32 = abi_params.iter().map(|param| param.value_type.bytes()).sum();
+ let layout_size = u32::try_from(layout.size.bytes()).unwrap();
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ // Stack slot size may be bigger for for example `[u8; 3]` which is packed into an `i32`.
+ // It may also be smaller for example when the type is a wrapper around an integer with a
+ // larger alignment than the integer.
+ size: (std::cmp::max(abi_param_size, layout_size) + 15) / 16 * 16,
+ });
+ let ptr = Pointer::new(fx.bcx.ins().stack_addr(pointer_ty(fx.tcx), stack_slot, 0));
+ let mut offset = 0;
+ let mut block_params_iter = block_params.iter().copied();
+ for param in abi_params {
+ let val = ptr.offset_i64(fx, offset).store(
+ fx,
+ block_params_iter.next().unwrap(),
+ MemFlags::new(),
+ );
+ offset += i64::from(param.value_type.bytes());
+ val
+ }
+ assert_eq!(block_params_iter.next(), None, "Leftover block param");
+ CValue::by_ref(ptr, layout)
+}
+
+/// Get a set of values to be passed as function arguments.
+pub(super) fn adjust_arg_for_abi<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ is_owned: bool,
+) -> SmallVec<[Value; 2]> {
+ assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty, 16);
+ match arg_abi.mode {
+ PassMode::Ignore => smallvec![],
+ PassMode::Direct(_) => smallvec![arg.load_scalar(fx)],
+ PassMode::Pair(_, _) => {
+ let (a, b) = arg.load_scalar_pair(fx);
+ smallvec![a, b]
+ }
+ PassMode::Cast(cast) => to_casted_value(fx, arg, cast),
+ PassMode::Indirect { .. } => {
+ if is_owned {
+ match arg.force_stack(fx) {
+ (ptr, None) => smallvec![ptr.get_addr(fx)],
+ (ptr, Some(meta)) => smallvec![ptr.get_addr(fx), meta],
+ }
+ } else {
+ // Ownership of the value at the backing storage for an argument is passed to the
+ // callee per the ABI, so we must make a copy of the argument unless the argument
+ // local is moved.
+ let place = CPlace::new_stack_slot(fx, arg.layout());
+ place.write_cvalue(fx, arg);
+ smallvec![place.to_ptr().get_addr(fx)]
+ }
+ }
+ }
+}
+
+/// Create a [`CValue`] containing the value of a function parameter adding clif function parameters
+/// as necessary.
+pub(super) fn cvalue_for_param<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Option<mir::Local>,
+ local_field: Option<usize>,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ block_params_iter: &mut impl Iterator<Item = Value>,
+) -> Option<CValue<'tcx>> {
+ let block_params = arg_abi
+ .get_abi_param(fx.tcx)
+ .into_iter()
+ .map(|abi_param| {
+ let block_param = block_params_iter.next().unwrap();
+ assert_eq!(fx.bcx.func.dfg.value_type(block_param), abi_param.value_type);
+ block_param
+ })
+ .collect::<SmallVec<[_; 2]>>();
+
+ crate::abi::comments::add_arg_comment(
+ fx,
+ "arg",
+ local,
+ local_field,
+ &block_params,
+ arg_abi.mode,
+ arg_abi.layout,
+ );
+
+ match arg_abi.mode {
+ PassMode::Ignore => None,
+ PassMode::Direct(_) => {
+ assert_eq!(block_params.len(), 1, "{:?}", block_params);
+ Some(CValue::by_val(block_params[0], arg_abi.layout))
+ }
+ PassMode::Pair(_, _) => {
+ assert_eq!(block_params.len(), 2, "{:?}", block_params);
+ Some(CValue::by_val_pair(block_params[0], block_params[1], arg_abi.layout))
+ }
+ PassMode::Cast(cast) => Some(from_casted_value(fx, &block_params, arg_abi.layout, cast)),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ assert_eq!(block_params.len(), 1, "{:?}", block_params);
+ Some(CValue::by_ref(Pointer::new(block_params[0]), arg_abi.layout))
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ assert_eq!(block_params.len(), 2, "{:?}", block_params);
+ Some(CValue::by_ref_unsized(
+ Pointer::new(block_params[0]),
+ block_params[1],
+ arg_abi.layout,
+ ))
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/abi/returning.rs b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
new file mode 100644
index 000000000..ff3bb2dfd
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/abi/returning.rs
@@ -0,0 +1,141 @@
+//! Return value handling
+
+use crate::prelude::*;
+
+use rustc_target::abi::call::{ArgAbi, PassMode};
+use smallvec::{smallvec, SmallVec};
+
+/// Return a place where the return value of the current function can be written to. If necessary
+/// this adds an extra parameter pointing to where the return value needs to be stored.
+pub(super) fn codegen_return_param<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ssa_analyzed: &rustc_index::vec::IndexVec<Local, crate::analyze::SsaKind>,
+ block_params_iter: &mut impl Iterator<Item = Value>,
+) -> CPlace<'tcx> {
+ let (ret_place, ret_param): (_, SmallVec<[_; 2]>) = match fx.fn_abi.as_ref().unwrap().ret.mode {
+ PassMode::Ignore | PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => {
+ let is_ssa = ssa_analyzed[RETURN_PLACE] == crate::analyze::SsaKind::Ssa;
+ (
+ super::make_local_place(
+ fx,
+ RETURN_PLACE,
+ fx.fn_abi.as_ref().unwrap().ret.layout,
+ is_ssa,
+ ),
+ smallvec![],
+ )
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ let ret_param = block_params_iter.next().unwrap();
+ assert_eq!(fx.bcx.func.dfg.value_type(ret_param), fx.pointer_type);
+ (
+ CPlace::for_ptr(Pointer::new(ret_param), fx.fn_abi.as_ref().unwrap().ret.layout),
+ smallvec![ret_param],
+ )
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ };
+
+ crate::abi::comments::add_arg_comment(
+ fx,
+ "ret",
+ Some(RETURN_PLACE),
+ None,
+ &ret_param,
+ fx.fn_abi.as_ref().unwrap().ret.mode,
+ fx.fn_abi.as_ref().unwrap().ret.layout,
+ );
+
+ ret_place
+}
+
+/// Invokes the closure with if necessary a value representing the return pointer. When the closure
+/// returns the call return value(s) if any are written to the correct place.
+pub(super) fn codegen_with_call_return_arg<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ret_arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ ret_place: CPlace<'tcx>,
+ f: impl FnOnce(&mut FunctionCx<'_, '_, 'tcx>, Option<Value>) -> Inst,
+) {
+ let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
+ PassMode::Ignore => (None, None),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ if matches!(ret_place.inner(), CPlaceInner::Addr(_, None)) {
+ // This is an optimization to prevent unnecessary copies of the return value when
+ // the return place is already a memory place as opposed to a register.
+ // This match arm can be safely removed.
+ (None, Some(ret_place.to_ptr().get_addr(fx)))
+ } else {
+ let place = CPlace::new_stack_slot(fx, ret_arg_abi.layout);
+ (Some(place), Some(place.to_ptr().get_addr(fx)))
+ }
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ PassMode::Direct(_) | PassMode::Pair(_, _) | PassMode::Cast(_) => (None, None),
+ };
+
+ let call_inst = f(fx, return_ptr);
+
+ match ret_arg_abi.mode {
+ PassMode::Ignore => {}
+ PassMode::Direct(_) => {
+ let ret_val = fx.bcx.inst_results(call_inst)[0];
+ ret_place.write_cvalue(fx, CValue::by_val(ret_val, ret_arg_abi.layout));
+ }
+ PassMode::Pair(_, _) => {
+ let ret_val_a = fx.bcx.inst_results(call_inst)[0];
+ let ret_val_b = fx.bcx.inst_results(call_inst)[1];
+ ret_place
+ .write_cvalue(fx, CValue::by_val_pair(ret_val_a, ret_val_b, ret_arg_abi.layout));
+ }
+ PassMode::Cast(cast) => {
+ let results =
+ fx.bcx.inst_results(call_inst).iter().copied().collect::<SmallVec<[Value; 2]>>();
+ let result =
+ super::pass_mode::from_casted_value(fx, &results, ret_place.layout(), cast);
+ ret_place.write_cvalue(fx, result);
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ if let Some(ret_temp_place) = ret_temp_place {
+ // If ret_temp_place is None, it is not necessary to copy the return value.
+ let ret_temp_value = ret_temp_place.to_cvalue(fx);
+ ret_place.write_cvalue(fx, ret_temp_value);
+ }
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ }
+}
+
+/// Codegen a return instruction with the right return value(s) if any.
+pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, '_>) {
+ match fx.fn_abi.as_ref().unwrap().ret.mode {
+ PassMode::Ignore | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ fx.bcx.ins().return_(&[]);
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ unreachable!("unsized return value")
+ }
+ PassMode::Direct(_) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let ret_val = place.to_cvalue(fx).load_scalar(fx);
+ fx.bcx.ins().return_(&[ret_val]);
+ }
+ PassMode::Pair(_, _) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let (ret_val_a, ret_val_b) = place.to_cvalue(fx).load_scalar_pair(fx);
+ fx.bcx.ins().return_(&[ret_val_a, ret_val_b]);
+ }
+ PassMode::Cast(cast) => {
+ let place = fx.get_local_place(RETURN_PLACE);
+ let ret_val = place.to_cvalue(fx);
+ let ret_vals = super::pass_mode::to_casted_value(fx, ret_val, cast);
+ fx.bcx.ins().return_(&ret_vals);
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/allocator.rs b/compiler/rustc_codegen_cranelift/src/allocator.rs
new file mode 100644
index 000000000..6d321c7b2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/allocator.rs
@@ -0,0 +1,147 @@
+//! Allocator shim
+// Adapted from rustc
+
+use crate::prelude::*;
+
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_session::config::OomStrategy;
+
+/// Returns whether an allocator shim was created
+pub(crate) fn codegen(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+) -> bool {
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate {
+ false
+ } else if let Some(kind) = tcx.allocator_kind(()) {
+ codegen_inner(
+ module,
+ unwind_context,
+ kind,
+ tcx.lang_items().oom().is_some(),
+ tcx.sess.opts.unstable_opts.oom,
+ );
+ true
+ } else {
+ false
+ }
+}
+
+fn codegen_inner(
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+ oom_strategy: OomStrategy,
+) {
+ let usize_ty = module.target_config().pointer_type();
+
+ for method in ALLOCATOR_METHODS {
+ let mut arg_tys = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ arg_tys.push(usize_ty); // size
+ arg_tys.push(usize_ty); // align
+ }
+ AllocatorTy::Ptr => arg_tys.push(usize_ty),
+ AllocatorTy::Usize => arg_tys.push(usize_ty),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(usize_ty),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: arg_tys.iter().cloned().map(AbiParam::new).collect(),
+ returns: output.into_iter().map(AbiParam::new).collect(),
+ };
+
+ let caller_name = format!("__rust_{}", method.name);
+ let callee_name = kind.fn_name(method.name);
+
+ let func_id = module.declare_function(&caller_name, Linkage::Export, &sig).unwrap();
+
+ let callee_func_id = module.declare_function(&callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig.clone());
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = arg_tys
+ .into_iter()
+ .map(|ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ let call_inst = bcx.ins().call(callee_func_ref, &args);
+ let results = bcx.inst_results(call_inst).to_vec(); // Clone to prevent borrow error
+
+ bcx.ins().return_(&results);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ module.define_function(func_id, &mut ctx).unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+ }
+
+ let sig = Signature {
+ call_conv: CallConv::triple_default(module.isa().triple()),
+ params: vec![AbiParam::new(usize_ty), AbiParam::new(usize_ty)],
+ returns: vec![],
+ };
+
+ let callee_name = if has_alloc_error_handler { "__rg_oom" } else { "__rdl_oom" };
+
+ let func_id =
+ module.declare_function("__rust_alloc_error_handler", Linkage::Export, &sig).unwrap();
+
+ let callee_func_id = module.declare_function(callee_name, Linkage::Import, &sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), sig);
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let args = (&[usize_ty, usize_ty])
+ .iter()
+ .map(|&ty| bcx.append_block_param(block, ty))
+ .collect::<Vec<Value>>();
+
+ let callee_func_ref = module.declare_func_in_func(callee_func_id, &mut bcx.func);
+ bcx.ins().call(callee_func_ref, &args);
+
+ bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ module.define_function(func_id, &mut ctx).unwrap();
+ unwind_context.add_function(func_id, &ctx, module.isa());
+
+ let data_id = module.declare_data(OomStrategy::SYMBOL, Linkage::Export, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(1);
+ let val = oom_strategy.should_panic();
+ data_ctx.define(Box::new([val]));
+ module.define_data(data_id, &data_ctx).unwrap();
+}
diff --git a/compiler/rustc_codegen_cranelift/src/analyze.rs b/compiler/rustc_codegen_cranelift/src/analyze.rs
new file mode 100644
index 000000000..35b89358b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/analyze.rs
@@ -0,0 +1,48 @@
+//! SSA analysis
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::StatementKind::*;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub(crate) enum SsaKind {
+ NotSsa,
+ Ssa,
+}
+
+pub(crate) fn analyze(fx: &FunctionCx<'_, '_, '_>) -> IndexVec<Local, SsaKind> {
+ let mut flag_map = fx
+ .mir
+ .local_decls
+ .iter()
+ .map(|local_decl| {
+ let ty = fx.monomorphize(local_decl.ty);
+ if fx.clif_type(ty).is_some() || fx.clif_pair_type(ty).is_some() {
+ SsaKind::Ssa
+ } else {
+ SsaKind::NotSsa
+ }
+ })
+ .collect::<IndexVec<Local, SsaKind>>();
+
+ for bb in fx.mir.basic_blocks().iter() {
+ for stmt in bb.statements.iter() {
+ match &stmt.kind {
+ Assign(place_and_rval) => match &place_and_rval.1 {
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ not_ssa(&mut flag_map, place.local)
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ }
+ }
+
+ flag_map
+}
+
+fn not_ssa(flag_map: &mut IndexVec<Local, SsaKind>, local: Local) {
+ flag_map[local] = SsaKind::NotSsa;
+}
diff --git a/compiler/rustc_codegen_cranelift/src/archive.rs b/compiler/rustc_codegen_cranelift/src/archive.rs
new file mode 100644
index 000000000..b4c790961
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/archive.rs
@@ -0,0 +1,236 @@
+//! Creation of ar archives like for the lib and staticlib crate type
+
+use std::collections::BTreeMap;
+use std::fs::File;
+use std::io::{self, Read, Seek};
+use std::path::{Path, PathBuf};
+
+use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
+use rustc_session::Session;
+
+use object::read::archive::ArchiveFile;
+use object::{Object, ObjectSymbol, ReadCache};
+
+#[derive(Debug)]
+enum ArchiveEntry {
+ FromArchive { archive_index: usize, file_range: (u64, u64) },
+ File(PathBuf),
+}
+
+pub(crate) struct ArArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
+ Box::new(ArArchiveBuilder {
+ sess,
+ use_gnu_style_archive: sess.target.archive_format == "gnu",
+ // FIXME fix builtin ranlib on macOS
+ no_builtin_ranlib: sess.target.is_like_osx,
+
+ src_archives: vec![],
+ entries: vec![],
+ })
+ }
+
+ fn create_dll_import_lib(
+ &self,
+ _sess: &Session,
+ _lib_name: &str,
+ _dll_imports: &[rustc_session::cstore::DllImport],
+ _tmpdir: &Path,
+ ) -> PathBuf {
+ bug!("creating dll imports is not supported");
+ }
+}
+
+pub(crate) struct ArArchiveBuilder<'a> {
+ sess: &'a Session,
+ use_gnu_style_archive: bool,
+ no_builtin_ranlib: bool,
+
+ src_archives: Vec<File>,
+ // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+ // the end of an archive for linkers to not get confused.
+ entries: Vec<(Vec<u8>, ArchiveEntry)>,
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+ fn add_file(&mut self, file: &Path) {
+ self.entries.push((
+ file.file_name().unwrap().to_str().unwrap().to_string().into_bytes(),
+ ArchiveEntry::File(file.to_owned()),
+ ));
+ }
+
+ fn add_archive(
+ &mut self,
+ archive_path: &Path,
+ mut skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> std::io::Result<()> {
+ let read_cache = ReadCache::new(std::fs::File::open(&archive_path)?);
+ let archive = ArchiveFile::parse(&read_cache).unwrap();
+ let archive_index = self.src_archives.len();
+
+ for entry in archive.members() {
+ let entry = entry.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?;
+ let file_name = String::from_utf8(entry.name().to_vec())
+ .map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err))?;
+ if !skip(&file_name) {
+ self.entries.push((
+ file_name.into_bytes(),
+ ArchiveEntry::FromArchive { archive_index, file_range: entry.file_range() },
+ ));
+ }
+ }
+
+ self.src_archives.push(read_cache.into_inner());
+ Ok(())
+ }
+
+ fn build(mut self: Box<Self>, output: &Path) -> bool {
+ enum BuilderKind {
+ Bsd(ar::Builder<File>),
+ Gnu(ar::GnuBuilder<File>),
+ }
+
+ let sess = self.sess;
+
+ let mut symbol_table = BTreeMap::new();
+
+ let mut entries = Vec::new();
+
+ for (mut entry_name, entry) in self.entries {
+ // FIXME only read the symbol table of the object files to avoid having to keep all
+ // object files in memory at once, or read them twice.
+ let data = match entry {
+ ArchiveEntry::FromArchive { archive_index, file_range } => {
+ // FIXME read symbols from symtab
+ let src_read_cache = &mut self.src_archives[archive_index];
+
+ src_read_cache.seek(io::SeekFrom::Start(file_range.0)).unwrap();
+ let mut data = std::vec::from_elem(0, usize::try_from(file_range.1).unwrap());
+ src_read_cache.read_exact(&mut data).unwrap();
+
+ data
+ }
+ ArchiveEntry::File(file) => std::fs::read(file).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error while reading object file during archive building: {}",
+ err
+ ));
+ }),
+ };
+
+ if !self.no_builtin_ranlib {
+ if symbol_table.contains_key(&entry_name) {
+ // The ar crate can't handle creating a symbol table in case of multiple archive
+ // members with the same name. Work around this by prepending a number until we
+ // get a unique name.
+ for i in 1.. {
+ let new_name = format!("{}_", i)
+ .into_bytes()
+ .into_iter()
+ .chain(entry_name.iter().copied())
+ .collect::<Vec<_>>();
+ if !symbol_table.contains_key(&new_name) {
+ entry_name = new_name;
+ break;
+ }
+ }
+ }
+
+ match object::File::parse(&*data) {
+ Ok(object) => {
+ symbol_table.insert(
+ entry_name.to_vec(),
+ object
+ .symbols()
+ .filter_map(|symbol| {
+ if symbol.is_undefined() || symbol.is_local() {
+ None
+ } else {
+ symbol.name().map(|name| name.as_bytes().to_vec()).ok()
+ }
+ })
+ .collect::<Vec<_>>(),
+ );
+ }
+ Err(err) => {
+ let err = err.to_string();
+ if err == "Unknown file magic" {
+ // Not an object file; skip it.
+ } else {
+ sess.fatal(&format!(
+ "error parsing `{}` during archive creation: {}",
+ String::from_utf8_lossy(&entry_name),
+ err
+ ));
+ }
+ }
+ }
+ }
+
+ entries.push((entry_name, data));
+ }
+
+ let mut builder = if self.use_gnu_style_archive {
+ BuilderKind::Gnu(
+ ar::GnuBuilder::new(
+ File::create(output).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ entries.iter().map(|(name, _)| name.clone()).collect(),
+ ar::GnuSymbolTableFormat::Size32,
+ symbol_table,
+ )
+ .unwrap(),
+ )
+ } else {
+ BuilderKind::Bsd(
+ ar::Builder::new(
+ File::create(output).unwrap_or_else(|err| {
+ sess.fatal(&format!(
+ "error opening destination during archive building: {}",
+ err
+ ));
+ }),
+ symbol_table,
+ )
+ .unwrap(),
+ )
+ };
+
+ let any_members = !entries.is_empty();
+
+ // Add all files
+ for (entry_name, data) in entries.into_iter() {
+ let header = ar::Header::new(entry_name, data.len() as u64);
+ match builder {
+ BuilderKind::Bsd(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ BuilderKind::Gnu(ref mut builder) => builder.append(&header, &mut &*data).unwrap(),
+ }
+ }
+
+ // Finalize archive
+ std::mem::drop(builder);
+
+ if self.no_builtin_ranlib {
+ let ranlib = crate::toolchain::get_toolchain_binary(self.sess, "ranlib");
+
+ // Run ranlib to be able to link the archive
+ let status = std::process::Command::new(ranlib)
+ .arg(output)
+ .status()
+ .expect("Couldn't run ranlib");
+
+ if !status.success() {
+ self.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ }
+ }
+
+ any_members
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
new file mode 100644
index 000000000..122e103ff
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -0,0 +1,955 @@
+//! Codegen of a single function
+
+use rustc_ast::InlineAsmOptions;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+
+use indexmap::IndexSet;
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+use crate::pretty_clif::CommentWriter;
+
+pub(crate) fn codegen_fn<'tcx>(
+ cx: &mut crate::CodegenCx<'tcx>,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+) {
+ let tcx = cx.tcx;
+
+ let _inst_guard =
+ crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));
+ debug_assert!(!instance.substs.needs_infer());
+
+ let mir = tcx.instance_mir(instance.def);
+ let _mir_guard = crate::PrintOnPanic(|| {
+ let mut buf = Vec::new();
+ with_no_trimmed_paths!({
+ rustc_middle::mir::pretty::write_mir_fn(tcx, mir, &mut |_, _| Ok(()), &mut buf)
+ .unwrap();
+ });
+ String::from_utf8_lossy(&buf).into_owned()
+ });
+
+ // Declare function
+ let symbol_name = tcx.symbol_name(instance);
+ let sig = get_function_sig(tcx, module.isa().triple(), instance);
+ let func_id = module.declare_function(symbol_name.name, Linkage::Local, &sig).unwrap();
+
+ cx.cached_context.clear();
+
+ // Make the FunctionBuilder
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut func = std::mem::replace(&mut cx.cached_context.func, Function::new());
+ func.name = ExternalName::user(0, func_id.as_u32());
+ func.signature = sig;
+ func.collect_debug_info();
+
+ let mut bcx = FunctionBuilder::new(&mut func, &mut func_ctx);
+
+ // Predefine blocks
+ let start_block = bcx.create_block();
+ let block_map: IndexVec<BasicBlock, Block> =
+ (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
+
+ // Make FunctionCx
+ let target_config = module.target_config();
+ let pointer_type = target_config.pointer_type();
+ let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
+
+ let mut fx = FunctionCx {
+ cx,
+ module,
+ tcx,
+ target_config,
+ pointer_type,
+ constants_cx: ConstantCx::new(),
+
+ instance,
+ symbol_name,
+ mir,
+ fn_abi: Some(RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())),
+
+ bcx,
+ block_map,
+ local_map: IndexVec::with_capacity(mir.local_decls.len()),
+ caller_location: None, // set by `codegen_fn_prelude`
+
+ clif_comments,
+ source_info_set: indexmap::IndexSet::new(),
+ next_ssa_var: 0,
+ };
+
+ let arg_uninhabited = fx
+ .mir
+ .args_iter()
+ .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited());
+
+ if !crate::constant::check_constants(&mut fx) {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ crate::trap::trap_unreachable(&mut fx, "compilation should have been aborted");
+ } else if arg_uninhabited {
+ fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]);
+ fx.bcx.switch_to_block(fx.block_map[START_BLOCK]);
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ } else {
+ tcx.sess.time("codegen clif ir", || {
+ tcx.sess
+ .time("codegen prelude", || crate::abi::codegen_fn_prelude(&mut fx, start_block));
+ codegen_fn_content(&mut fx);
+ });
+ }
+
+ // Recover all necessary data from fx, before accessing func will prevent future access to it.
+ let instance = fx.instance;
+ let clif_comments = fx.clif_comments;
+ let source_info_set = fx.source_info_set;
+ let local_map = fx.local_map;
+
+ fx.constants_cx.finalize(fx.tcx, &mut *fx.module);
+
+ crate::pretty_clif::write_clif_file(
+ tcx,
+ "unopt",
+ module.isa(),
+ instance,
+ &func,
+ &clif_comments,
+ );
+
+ // Verify function
+ verify_func(tcx, &clif_comments, &func);
+
+ compile_fn(
+ cx,
+ module,
+ instance,
+ symbol_name.name,
+ func_id,
+ func,
+ clif_comments,
+ source_info_set,
+ local_map,
+ );
+}
+
+fn compile_fn<'tcx>(
+ cx: &mut crate::CodegenCx<'tcx>,
+ module: &mut dyn Module,
+ instance: Instance<'tcx>,
+ symbol_name: &str,
+ func_id: FuncId,
+ func: Function,
+ mut clif_comments: CommentWriter,
+ source_info_set: IndexSet<SourceInfo>,
+ local_map: IndexVec<mir::Local, CPlace<'tcx>>,
+) {
+ let tcx = cx.tcx;
+
+ // Store function in context
+ let context = &mut cx.cached_context;
+ context.clear();
+ context.func = func;
+
+ // If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
+ // instruction, which doesn't have an encoding.
+ context.compute_cfg();
+ context.compute_domtree();
+ context.eliminate_unreachable_code(module.isa()).unwrap();
+ context.dce(module.isa()).unwrap();
+ // Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
+ // invalidate it when it would change.
+ context.domtree.clear();
+
+ // Perform rust specific optimizations
+ tcx.sess.time("optimize clif ir", || {
+ crate::optimize::optimize_function(
+ tcx,
+ module.isa(),
+ instance,
+ context,
+ &mut clif_comments,
+ );
+ });
+
+ #[cfg(any())] // This is never true
+ let _clif_guard = {
+ use std::fmt::Write;
+
+ let func_clone = context.func.clone();
+ let clif_comments_clone = clif_comments.clone();
+ let mut clif = String::new();
+ for flag in module.isa().flags().iter() {
+ writeln!(clif, "set {}", flag).unwrap();
+ }
+ write!(clif, "target {}", module.isa().triple().architecture.to_string()).unwrap();
+ for isa_flag in module.isa().isa_flags().iter() {
+ write!(clif, " {}", isa_flag).unwrap();
+ }
+ writeln!(clif, "\n").unwrap();
+ crate::PrintOnPanic(move || {
+ let mut clif = clif.clone();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &clif_comments_clone,
+ &mut clif,
+ &func_clone,
+ )
+ .unwrap();
+ clif
+ })
+ };
+
+ // Define function
+ tcx.sess.time("define function", || {
+ context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
+ module.define_function(func_id, context).unwrap();
+ });
+
+ // Write optimized function to file for debugging
+ crate::pretty_clif::write_clif_file(
+ tcx,
+ "opt",
+ module.isa(),
+ instance,
+ &context.func,
+ &clif_comments,
+ );
+
+ if let Some(disasm) = &context.mach_compile_result.as_ref().unwrap().disasm {
+ crate::pretty_clif::write_ir_file(
+ tcx,
+ || format!("{}.vcode", tcx.symbol_name(instance).name),
+ |file| file.write_all(disasm.as_bytes()),
+ )
+ }
+
+ // Define debuginfo for function
+ let isa = module.isa();
+ let debug_context = &mut cx.debug_context;
+ let unwind_context = &mut cx.unwind_context;
+ tcx.sess.time("generate debug info", || {
+ if let Some(debug_context) = debug_context {
+ debug_context.define_function(
+ instance,
+ func_id,
+ symbol_name,
+ isa,
+ context,
+ &source_info_set,
+ local_map,
+ );
+ }
+ unwind_context.add_function(func_id, &context, isa);
+ });
+}
+
+pub(crate) fn verify_func(
+ tcx: TyCtxt<'_>,
+ writer: &crate::pretty_clif::CommentWriter,
+ func: &Function,
+) {
+ tcx.sess.time("verify clif ir", || {
+ let flags = cranelift_codegen::settings::Flags::new(cranelift_codegen::settings::builder());
+ match cranelift_codegen::verify_function(&func, &flags) {
+ Ok(_) => {}
+ Err(err) => {
+ tcx.sess.err(&format!("{:?}", err));
+ let pretty_error = cranelift_codegen::print_errors::pretty_verifier_error(
+ &func,
+ Some(Box::new(writer)),
+ err,
+ );
+ tcx.sess.fatal(&format!("cranelift verify error:\n{}", pretty_error));
+ }
+ }
+ });
+}
+
+fn codegen_fn_content(fx: &mut FunctionCx<'_, '_, '_>) {
+ for (bb, bb_data) in fx.mir.basic_blocks().iter_enumerated() {
+ let block = fx.get_block(bb);
+ fx.bcx.switch_to_block(block);
+
+ if bb_data.is_cleanup {
+ // Unwinding after panicking is not supported
+ continue;
+
+ // FIXME Once unwinding is supported and Cranelift supports marking blocks as cold, do
+ // so for cleanup blocks.
+ }
+
+ fx.bcx.ins().nop();
+ for stmt in &bb_data.statements {
+ fx.set_debug_loc(stmt.source_info);
+ codegen_stmt(fx, block, stmt);
+ }
+
+ if fx.clif_comments.enabled() {
+ let mut terminator_head = "\n".to_string();
+ with_no_trimmed_paths!({
+ bb_data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ });
+ let inst = fx.bcx.func.layout.last_inst(block).unwrap();
+ fx.add_comment(inst, terminator_head);
+ }
+
+ let source_info = bb_data.terminator().source_info;
+ fx.set_debug_loc(source_info);
+
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { target } => {
+ if let TerminatorKind::Return = fx.mir[*target].terminator().kind {
+ let mut can_immediately_return = true;
+ for stmt in &fx.mir[*target].statements {
+ if let StatementKind::StorageDead(_) = stmt.kind {
+ } else {
+ // FIXME Can sometimes happen, see rust-lang/rust#70531
+ can_immediately_return = false;
+ break;
+ }
+ }
+
+ if can_immediately_return {
+ crate::abi::codegen_return(fx);
+ continue;
+ }
+ }
+
+ let block = fx.get_block(*target);
+ fx.bcx.ins().jump(block, &[]);
+ }
+ TerminatorKind::Return => {
+ crate::abi::codegen_return(fx);
+ }
+ TerminatorKind::Assert { cond, expected, msg, target, cleanup: _ } => {
+ if !fx.tcx.sess.overflow_checks() {
+ if let mir::AssertKind::OverflowNeg(_) = *msg {
+ let target = fx.get_block(*target);
+ fx.bcx.ins().jump(target, &[]);
+ continue;
+ }
+ }
+ let cond = codegen_operand(fx, cond).load_scalar(fx);
+
+ let target = fx.get_block(*target);
+ let failure = fx.bcx.create_block();
+ fx.bcx.set_cold_block(failure);
+
+ if *expected {
+ fx.bcx.ins().brz(cond, failure, &[]);
+ } else {
+ fx.bcx.ins().brnz(cond, failure, &[]);
+ };
+ fx.bcx.ins().jump(target, &[]);
+
+ fx.bcx.switch_to_block(failure);
+ fx.bcx.ins().nop();
+
+ match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = codegen_operand(fx, len).load_scalar(fx);
+ let index = codegen_operand(fx, index).load_scalar(fx);
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ codegen_panic_inner(
+ fx,
+ rustc_hir::LangItem::PanicBoundsCheck,
+ &[index, len, location],
+ source_info.span,
+ );
+ }
+ _ => {
+ let msg_str = msg.description();
+ codegen_panic(fx, msg_str, source_info);
+ }
+ }
+ }
+
+ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+ let discr = codegen_operand(fx, discr).load_scalar(fx);
+
+ let use_bool_opt = switch_ty.kind() == fx.tcx.types.bool.kind()
+ || (targets.iter().count() == 1 && targets.iter().next().unwrap().0 == 0);
+ if use_bool_opt {
+ assert_eq!(targets.iter().count(), 1);
+ let (then_value, then_block) = targets.iter().next().unwrap();
+ let then_block = fx.get_block(then_block);
+ let else_block = fx.get_block(targets.otherwise());
+ let test_zero = match then_value {
+ 0 => true,
+ 1 => false,
+ _ => unreachable!("{:?}", targets),
+ };
+
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ let (discr, is_inverted) =
+ crate::optimize::peephole::maybe_unwrap_bool_not(&mut fx.bcx, discr);
+ let test_zero = if is_inverted { !test_zero } else { test_zero };
+ let discr = crate::optimize::peephole::maybe_unwrap_bint(&mut fx.bcx, discr);
+ if let Some(taken) = crate::optimize::peephole::maybe_known_branch_taken(
+ &fx.bcx, discr, test_zero,
+ ) {
+ if taken {
+ fx.bcx.ins().jump(then_block, &[]);
+ } else {
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ } else {
+ if test_zero {
+ fx.bcx.ins().brz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ } else {
+ fx.bcx.ins().brnz(discr, then_block, &[]);
+ fx.bcx.ins().jump(else_block, &[]);
+ }
+ }
+ } else {
+ let mut switch = ::cranelift_frontend::Switch::new();
+ for (value, block) in targets.iter() {
+ let block = fx.get_block(block);
+ switch.set_entry(value, block);
+ }
+ let otherwise_block = fx.get_block(targets.otherwise());
+ switch.emit(&mut fx.bcx, discr, otherwise_block);
+ }
+ }
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target,
+ fn_span,
+ cleanup: _,
+ from_hir_call: _,
+ } => {
+ fx.tcx.sess.time("codegen call", || {
+ crate::abi::codegen_terminator_call(
+ fx,
+ mir::SourceInfo { span: *fn_span, ..source_info },
+ func,
+ args,
+ *destination,
+ *target,
+ )
+ });
+ }
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ destination,
+ line_spans: _,
+ cleanup: _,
+ } => {
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "cranelift doesn't support unwinding from inline assembly.",
+ );
+ }
+
+ crate::inline_asm::codegen_inline_asm(
+ fx,
+ source_info.span,
+ template,
+ operands,
+ *options,
+ );
+
+ match *destination {
+ Some(destination) => {
+ let destination_block = fx.get_block(destination);
+ fx.bcx.ins().jump(destination_block, &[]);
+ }
+ None => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ }
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ // FIXME implement unwinding
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ TerminatorKind::Unreachable => {
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+ }
+ TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::GeneratorDrop => {
+ bug!("shouldn't exist at codegen {:?}", bb_data.terminator());
+ }
+ TerminatorKind::Drop { place, target, unwind: _ } => {
+ let drop_place = codegen_place(fx, *place);
+ crate::abi::codegen_drop(fx, source_info, drop_place);
+
+ let target_block = fx.get_block(*target);
+ fx.bcx.ins().jump(target_block, &[]);
+ }
+ };
+ }
+
+ fx.bcx.seal_all_blocks();
+ fx.bcx.finalize();
+}
+
+fn codegen_stmt<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ #[allow(unused_variables)] cur_block: Block,
+ stmt: &Statement<'tcx>,
+) {
+ let _print_guard = crate::PrintOnPanic(|| format!("stmt {:?}", stmt));
+
+ fx.set_debug_loc(stmt.source_info);
+
+ #[cfg(any())] // This is never true
+ match &stmt.kind {
+ StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
+ _ => {
+ if fx.clif_comments.enabled() {
+ let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
+ fx.add_comment(inst, format!("{:?}", stmt));
+ }
+ }
+ }
+
+ match &stmt.kind {
+ StatementKind::SetDiscriminant { place, variant_index } => {
+ let place = codegen_place(fx, **place);
+ crate::discriminant::codegen_set_discriminant(fx, place, *variant_index);
+ }
+ StatementKind::Assign(to_place_and_rval) => {
+ let lval = codegen_place(fx, to_place_and_rval.0);
+ let dest_layout = lval.layout();
+ match to_place_and_rval.1 {
+ Rvalue::Use(ref operand) => {
+ let val = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::CopyForDeref(place) => {
+ let cplace = codegen_place(fx, place);
+ let val = cplace.to_cvalue(fx);
+ lval.write_cvalue(fx, val)
+ }
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ let place = codegen_place(fx, place);
+ let ref_ = place.place_ref(fx, lval.layout());
+ lval.write_cvalue(fx, ref_);
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ let val = crate::constant::codegen_tls_ref(fx, def_id, lval.layout());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::BinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = crate::num::codegen_binop(fx, bin_op, lhs, rhs);
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::CheckedBinaryOp(bin_op, ref lhs_rhs) => {
+ let lhs = codegen_operand(fx, &lhs_rhs.0);
+ let rhs = codegen_operand(fx, &lhs_rhs.1);
+
+ let res = if !fx.tcx.sess.overflow_checks() {
+ let val =
+ crate::num::codegen_int_binop(fx, bin_op, lhs, rhs).load_scalar(fx);
+ let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
+ CValue::by_val_pair(val, is_overflow, lval.layout())
+ } else {
+ crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs)
+ };
+
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::UnaryOp(un_op, ref operand) => {
+ let operand = codegen_operand(fx, operand);
+ let layout = operand.layout();
+ let val = operand.load_scalar(fx);
+ let res = match un_op {
+ UnOp::Not => match layout.ty.kind() {
+ ty::Bool => {
+ let res = fx.bcx.ins().icmp_imm(IntCC::Equal, val, 0);
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), layout)
+ }
+ ty::Uint(_) | ty::Int(_) => {
+ CValue::by_val(fx.bcx.ins().bnot(val), layout)
+ }
+ _ => unreachable!("un op Not for {:?}", layout.ty),
+ },
+ UnOp::Neg => match layout.ty.kind() {
+ ty::Int(IntTy::I128) => {
+ // FIXME remove this case once ineg.i128 works
+ let zero =
+ CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ crate::num::codegen_int_binop(fx, BinOp::Sub, zero, operand)
+ }
+ ty::Int(_) => CValue::by_val(fx.bcx.ins().ineg(val), layout),
+ ty::Float(_) => CValue::by_val(fx.bcx.ins().fneg(val), layout),
+ _ => unreachable!("un op Neg for {:?}", layout.ty),
+ },
+ };
+ lval.write_cvalue(fx, res);
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ReifyFnPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let from_ty = fx.monomorphize(operand.ty(&fx.mir.local_decls, fx.tcx));
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ match *from_ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let func_ref = fx.get_function_ref(
+ Instance::resolve_for_fn_ptr(
+ fx.tcx,
+ ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(fx.tcx),
+ );
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
+ }
+ _ => bug!("Trying to ReifyFnPointer on non FnDef {:?}", from_ty),
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::UnsafeFnPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::MutToConstPointer),
+ ref operand,
+ to_ty,
+ )
+ | Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ArrayToPointer),
+ ref operand,
+ to_ty,
+ ) => {
+ let to_layout = fx.layout_of(fx.monomorphize(to_ty));
+ let operand = codegen_operand(fx, operand);
+ lval.write_cvalue(fx, operand.cast_pointer_to(to_layout));
+ }
+ Rvalue::Cast(
+ CastKind::Misc
+ | CastKind::PointerExposeAddress
+ | CastKind::PointerFromExposedAddress,
+ ref operand,
+ to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ let from_ty = operand.layout().ty;
+ let to_ty = fx.monomorphize(to_ty);
+
+ fn is_fat_ptr<'tcx>(fx: &FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.builtin_deref(true)
+ .map(|ty::TypeAndMut { ty: pointee_ty, mutbl: _ }| {
+ has_ptr_meta(fx.tcx, pointee_ty)
+ })
+ .unwrap_or(false)
+ }
+
+ if is_fat_ptr(fx, from_ty) {
+ if is_fat_ptr(fx, to_ty) {
+ // fat-ptr -> fat-ptr
+ lval.write_cvalue(fx, operand.cast_pointer_to(dest_layout));
+ } else {
+ // fat-ptr -> thin-ptr
+ let (ptr, _extra) = operand.load_scalar_pair(fx);
+ lval.write_cvalue(fx, CValue::by_val(ptr, dest_layout))
+ }
+ } else {
+ let to_clif_ty = fx.clif_type(to_ty).unwrap();
+ let from = operand.load_scalar(fx);
+
+ let res = clif_int_or_float_cast(
+ fx,
+ from,
+ type_sign(from_ty),
+ to_clif_ty,
+ type_sign(to_ty),
+ );
+ lval.write_cvalue(fx, CValue::by_val(res, dest_layout));
+ }
+ }
+ Rvalue::Cast(
+ CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ ref operand,
+ _to_ty,
+ ) => {
+ let operand = codegen_operand(fx, operand);
+ match *operand.layout().ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ fx.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .expect("failed to normalize and resolve closure during codegen")
+ .polymorphize(fx.tcx);
+ let func_ref = fx.get_function_ref(instance);
+ let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
+ lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout().ty),
+ }
+ }
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
+ let operand = codegen_operand(fx, operand);
+ operand.unsize_value(fx, lval);
+ }
+ Rvalue::Discriminant(place) => {
+ let place = codegen_place(fx, place);
+ let value = place.to_cvalue(fx);
+ let discr =
+ crate::discriminant::codegen_get_discriminant(fx, value, dest_layout);
+ lval.write_cvalue(fx, discr);
+ }
+ Rvalue::Repeat(ref operand, times) => {
+ let operand = codegen_operand(fx, operand);
+ let times = fx
+ .monomorphize(times)
+ .eval(fx.tcx, ParamEnv::reveal_all())
+ .kind()
+ .try_to_bits(fx.tcx.data_layout.pointer_size)
+ .unwrap();
+ if operand.layout().size.bytes() == 0 {
+ // Do nothing for ZST's
+ } else if fx.clif_type(operand.layout().ty) == Some(types::I8) {
+ let times = fx.bcx.ins().iconst(fx.pointer_type, times as i64);
+ // FIXME use emit_small_memset where possible
+ let addr = lval.to_ptr().get_addr(fx);
+ let val = operand.load_scalar(fx);
+ fx.bcx.call_memset(fx.target_config, addr, val, times);
+ } else {
+ let loop_block = fx.bcx.create_block();
+ let loop_block2 = fx.bcx.create_block();
+ let done_block = fx.bcx.create_block();
+ let index = fx.bcx.append_block_param(loop_block, fx.pointer_type);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ fx.bcx.ins().jump(loop_block, &[zero]);
+
+ fx.bcx.switch_to_block(loop_block);
+ let done = fx.bcx.ins().icmp_imm(IntCC::Equal, index, times as i64);
+ fx.bcx.ins().brnz(done, done_block, &[]);
+ fx.bcx.ins().jump(loop_block2, &[]);
+
+ fx.bcx.switch_to_block(loop_block2);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ let index = fx.bcx.ins().iadd_imm(index, 1);
+ fx.bcx.ins().jump(loop_block, &[index]);
+
+ fx.bcx.switch_to_block(done_block);
+ fx.bcx.ins().nop();
+ }
+ }
+ Rvalue::Len(place) => {
+ let place = codegen_place(fx, place);
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ let len = codegen_array_len(fx, place);
+ lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
+ }
+ Rvalue::ShallowInitBox(ref operand, content_ty) => {
+ let content_ty = fx.monomorphize(content_ty);
+ let box_layout = fx.layout_of(fx.tcx.mk_box(content_ty));
+ let operand = codegen_operand(fx, operand);
+ let operand = operand.load_scalar(fx);
+ lval.write_cvalue(fx, CValue::by_val(operand, box_layout));
+ }
+ Rvalue::NullaryOp(null_op, ty) => {
+ assert!(
+ lval.layout()
+ .ty
+ .is_sized(fx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all())
+ );
+ let layout = fx.layout_of(fx.monomorphize(ty));
+ let val = match null_op {
+ NullOp::SizeOf => layout.size.bytes(),
+ NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), val.into());
+ lval.write_cvalue(fx, val);
+ }
+ Rvalue::Aggregate(ref kind, ref operands) => match kind.as_ref() {
+ AggregateKind::Array(_ty) => {
+ for (i, operand) in operands.iter().enumerate() {
+ let operand = codegen_operand(fx, operand);
+ let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
+ let to = lval.place_index(fx, index);
+ to.write_cvalue(fx, operand);
+ }
+ }
+ _ => unreachable!("shouldn't exist at codegen {:?}", to_place_and_rval.1),
+ },
+ }
+ }
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Deinit(_)
+ | StatementKind::Nop
+ | StatementKind::FakeRead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..) => {}
+
+ StatementKind::Coverage { .. } => fx.tcx.sess.fatal("-Zcoverage is unimplemented"),
+ StatementKind::CopyNonOverlapping(inner) => {
+ let dst = codegen_operand(fx, &inner.dst);
+ let pointee = dst
+ .layout()
+ .pointee_info_at(fx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let dst = dst.load_scalar(fx);
+ let src = codegen_operand(fx, &inner.src).load_scalar(fx);
+ let count = codegen_operand(fx, &inner.count).load_scalar(fx);
+ let elem_size: u64 = pointee.size.bytes();
+ let bytes =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+ fx.bcx.call_memcpy(fx.target_config, dst, src, bytes);
+ }
+ }
+}
+
+fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx>) -> Value {
+ match *place.layout().ty.kind() {
+ ty::Array(_elem_ty, len) => {
+ let len = fx.monomorphize(len).eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
+ fx.bcx.ins().iconst(fx.pointer_type, len)
+ }
+ ty::Slice(_elem_ty) => {
+ place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
+ }
+ _ => bug!("Rvalue::Len({:?})", place),
+ }
+}
+
+pub(crate) fn codegen_place<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: Place<'tcx>,
+) -> CPlace<'tcx> {
+ let mut cplace = fx.get_local_place(place.local);
+
+ for elem in place.projection {
+ match elem {
+ PlaceElem::Deref => {
+ cplace = cplace.place_deref(fx);
+ }
+ PlaceElem::Field(field, _ty) => {
+ cplace = cplace.place_field(fx, field);
+ }
+ PlaceElem::Index(local) => {
+ let index = fx.get_local_place(local).to_cvalue(fx).load_scalar(fx);
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::ConstantIndex { offset, min_length: _, from_end } => {
+ let offset: u64 = offset;
+ let index = if !from_end {
+ fx.bcx.ins().iconst(fx.pointer_type, offset as i64)
+ } else {
+ let len = codegen_array_len(fx, cplace);
+ fx.bcx.ins().iadd_imm(len, -(offset as i64))
+ };
+ cplace = cplace.place_index(fx, index);
+ }
+ PlaceElem::Subslice { from, to, from_end } => {
+ // These indices are generated by slice patterns.
+ // slice[from:-to] in Python terms.
+
+ let from: u64 = from;
+ let to: u64 = to;
+
+ match cplace.layout().ty.kind() {
+ ty::Array(elem_ty, _len) => {
+ assert!(!from_end, "array subslices are never `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let ptr = cplace.to_ptr();
+ cplace = CPlace::for_ptr(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.layout_of(fx.tcx.mk_array(*elem_ty, to - from)),
+ );
+ }
+ ty::Slice(elem_ty) => {
+ assert!(from_end, "slice subslices should be `from_end`");
+ let elem_layout = fx.layout_of(*elem_ty);
+ let (ptr, len) = cplace.to_ptr_maybe_unsized();
+ let len = len.unwrap();
+ cplace = CPlace::for_ptr_with_extra(
+ ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
+ fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),
+ cplace.layout(),
+ );
+ }
+ _ => unreachable!(),
+ }
+ }
+ PlaceElem::Downcast(_adt_def, variant) => {
+ cplace = cplace.downcast_variant(fx, variant);
+ }
+ }
+ }
+
+ cplace
+}
+
+pub(crate) fn codegen_operand<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> CValue<'tcx> {
+ match operand {
+ Operand::Move(place) | Operand::Copy(place) => {
+ let cplace = codegen_place(fx, *place);
+ cplace.to_cvalue(fx)
+ }
+ Operand::Constant(const_) => crate::constant::codegen_constant(fx, const_),
+ }
+}
+
+pub(crate) fn codegen_panic<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ msg_str: &str,
+ source_info: mir::SourceInfo,
+) {
+ let location = fx.get_caller_location(source_info).load_scalar(fx);
+
+ let msg_ptr = fx.anonymous_str(msg_str);
+ let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
+ let args = [msg_ptr, msg_len, location];
+
+ codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, source_info.span);
+}
+
+pub(crate) fn codegen_panic_inner<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lang_item: rustc_hir::LangItem,
+ args: &[Value],
+ span: Span,
+) {
+ let def_id =
+ fx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| fx.tcx.sess.span_fatal(span, &s));
+
+ let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
+ let symbol_name = fx.tcx.symbol_name(instance).name;
+
+ fx.lib_call(
+ &*symbol_name,
+ vec![
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![],
+ args,
+ );
+
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/cast.rs b/compiler/rustc_codegen_cranelift/src/cast.rs
new file mode 100644
index 000000000..bad5d1f08
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/cast.rs
@@ -0,0 +1,164 @@
+//! Various number casting functions
+
+use crate::prelude::*;
+
+pub(crate) fn clif_intcast(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ val: Value,
+ to: Type,
+ signed: bool,
+) -> Value {
+ let from = fx.bcx.func.dfg.value_type(val);
+ match (from, to) {
+ // equal
+ (_, _) if from == to => val,
+
+ // extend
+ (_, _) if to.wider_or_equal(from) => {
+ if signed {
+ fx.bcx.ins().sextend(to, val)
+ } else {
+ fx.bcx.ins().uextend(to, val)
+ }
+ }
+
+ // reduce
+ (_, _) => fx.bcx.ins().ireduce(to, val),
+ }
+}
+
+pub(crate) fn clif_int_or_float_cast(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ from: Value,
+ from_signed: bool,
+ to_ty: Type,
+ to_signed: bool,
+) -> Value {
+ let from_ty = fx.bcx.func.dfg.value_type(from);
+
+ if from_ty.is_int() && to_ty.is_int() {
+ // int-like -> int-like
+ clif_intcast(
+ fx,
+ from,
+ to_ty,
+ // This is correct as either from_signed == to_signed (=> this is trivially correct)
+ // Or from_clif_ty == to_clif_ty, which means this is a no-op.
+ from_signed,
+ )
+ } else if from_ty.is_int() && to_ty.is_float() {
+ if from_ty == types::I128 {
+ // _______ss__f_
+ // __float tisf: i128 -> f32
+ // __float tidf: i128 -> f64
+ // __floatuntisf: u128 -> f32
+ // __floatuntidf: u128 -> f64
+
+ let name = format!(
+ "__float{sign}ti{flt}f",
+ sign = if from_signed { "" } else { "un" },
+ flt = match to_ty {
+ types::F32 => "s",
+ types::F64 => "d",
+ _ => unreachable!("{:?}", to_ty),
+ },
+ );
+
+ let from_rust_ty = if from_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+
+ let to_rust_ty = match to_ty {
+ types::F32 => fx.tcx.types.f32,
+ types::F64 => fx.tcx.types.f64,
+ _ => unreachable!(),
+ };
+
+ return fx
+ .easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
+ .load_scalar(fx);
+ }
+
+ // int-like -> float
+ if from_signed {
+ fx.bcx.ins().fcvt_from_sint(to_ty, from)
+ } else {
+ fx.bcx.ins().fcvt_from_uint(to_ty, from)
+ }
+ } else if from_ty.is_float() && to_ty.is_int() {
+ let val = if to_ty == types::I128 {
+ // _____sssf___
+ // __fix sfti: f32 -> i128
+ // __fix dfti: f64 -> i128
+ // __fixunssfti: f32 -> u128
+ // __fixunsdfti: f64 -> u128
+
+ let name = format!(
+ "__fix{sign}{flt}fti",
+ sign = if to_signed { "" } else { "uns" },
+ flt = match from_ty {
+ types::F32 => "s",
+ types::F64 => "d",
+ _ => unreachable!("{:?}", to_ty),
+ },
+ );
+
+ let from_rust_ty = match from_ty {
+ types::F32 => fx.tcx.types.f32,
+ types::F64 => fx.tcx.types.f64,
+ _ => unreachable!(),
+ };
+
+ let to_rust_ty = if to_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+
+ fx.easy_call(&name, &[CValue::by_val(from, fx.layout_of(from_rust_ty))], to_rust_ty)
+ .load_scalar(fx)
+ } else if to_ty == types::I8 || to_ty == types::I16 {
+ // FIXME implement fcvt_to_*int_sat.i8/i16
+ let val = if to_signed {
+ fx.bcx.ins().fcvt_to_sint_sat(types::I32, from)
+ } else {
+ fx.bcx.ins().fcvt_to_uint_sat(types::I32, from)
+ };
+ let (min, max) = match (to_ty, to_signed) {
+ (types::I8, false) => (0, i64::from(u8::MAX)),
+ (types::I16, false) => (0, i64::from(u16::MAX)),
+ (types::I8, true) => (i64::from(i8::MIN), i64::from(i8::MAX)),
+ (types::I16, true) => (i64::from(i16::MIN), i64::from(i16::MAX)),
+ _ => unreachable!(),
+ };
+ let min_val = fx.bcx.ins().iconst(types::I32, min);
+ let max_val = fx.bcx.ins().iconst(types::I32, max);
+
+ let val = if to_signed {
+ let has_underflow = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, min);
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThan, val, max);
+ let bottom_capped = fx.bcx.ins().select(has_underflow, min_val, val);
+ fx.bcx.ins().select(has_overflow, max_val, bottom_capped)
+ } else {
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, val, max);
+ fx.bcx.ins().select(has_overflow, max_val, val)
+ };
+ fx.bcx.ins().ireduce(to_ty, val)
+ } else if to_signed {
+ fx.bcx.ins().fcvt_to_sint_sat(to_ty, from)
+ } else {
+ fx.bcx.ins().fcvt_to_uint_sat(to_ty, from)
+ };
+
+ if let Some(false) = fx.tcx.sess.opts.unstable_opts.saturating_float_casts {
+ return val;
+ }
+
+ let is_not_nan = fx.bcx.ins().fcmp(FloatCC::Equal, from, from);
+ let zero = fx.bcx.ins().iconst(to_ty, 0);
+ fx.bcx.ins().select(is_not_nan, val, zero)
+ } else if from_ty.is_float() && to_ty.is_float() {
+ // float -> float
+ match (from_ty, to_ty) {
+ (types::F32, types::F64) => fx.bcx.ins().fpromote(types::F64, from),
+ (types::F64, types::F32) => fx.bcx.ins().fdemote(types::F32, from),
+ _ => from,
+ }
+ } else {
+ unreachable!("cast value from {:?} to {:?}", from_ty, to_ty);
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
new file mode 100644
index 000000000..638b2d573
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs
@@ -0,0 +1,153 @@
+//! Replaces 128-bit operators with lang item calls where necessary
+
+use cranelift_codegen::ir::ArgumentPurpose;
+
+use crate::prelude::*;
+
+pub(crate) fn maybe_codegen<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ checked: bool,
+ lhs: CValue<'tcx>,
+ rhs: CValue<'tcx>,
+) -> Option<CValue<'tcx>> {
+ if lhs.layout().ty != fx.tcx.types.u128
+ && lhs.layout().ty != fx.tcx.types.i128
+ && rhs.layout().ty != fx.tcx.types.u128
+ && rhs.layout().ty != fx.tcx.types.i128
+ {
+ return None;
+ }
+
+ let is_signed = type_sign(lhs.layout().ty);
+
+ match bin_op {
+ BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => {
+ assert!(!checked);
+ None
+ }
+ BinOp::Add | BinOp::Sub if !checked => None,
+ BinOp::Mul if !checked || is_signed => {
+ if !checked {
+ let val_ty = if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 };
+ if fx.tcx.sess.target.is_like_windows {
+ let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
+ let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+ let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+ assert!(lhs_extra.is_none());
+ assert!(rhs_extra.is_none());
+ let args = [
+ ret_place.to_ptr().get_addr(fx),
+ lhs_ptr.get_addr(fx),
+ rhs_ptr.get_addr(fx),
+ ];
+ fx.lib_call(
+ "__multi3",
+ vec![
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![],
+ &args,
+ );
+ Some(ret_place.to_cvalue(fx))
+ } else {
+ Some(fx.easy_call("__multi3", &[lhs, rhs], val_ty))
+ }
+ } else {
+ let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+ let oflow = CPlace::new_stack_slot(fx, fx.layout_of(fx.tcx.types.i32));
+ let lhs = lhs.load_scalar(fx);
+ let rhs = rhs.load_scalar(fx);
+ let oflow_ptr = oflow.to_ptr().get_addr(fx);
+ let res = fx.lib_call(
+ "__muloti4",
+ vec![
+ AbiParam::new(types::I128),
+ AbiParam::new(types::I128),
+ AbiParam::new(fx.pointer_type),
+ ],
+ vec![AbiParam::new(types::I128)],
+ &[lhs, rhs, oflow_ptr],
+ )[0];
+ let oflow = oflow.to_cvalue(fx).load_scalar(fx);
+ let oflow = fx.bcx.ins().ireduce(types::I8, oflow);
+ Some(CValue::by_val_pair(res, oflow, fx.layout_of(out_ty)))
+ }
+ }
+ BinOp::Add | BinOp::Sub | BinOp::Mul => {
+ assert!(checked);
+ let out_ty = fx.tcx.mk_tup([lhs.layout().ty, fx.tcx.types.bool].iter());
+ let out_place = CPlace::new_stack_slot(fx, fx.layout_of(out_ty));
+ let (param_types, args) = if fx.tcx.sess.target.is_like_windows {
+ let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+ let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+ assert!(lhs_extra.is_none());
+ assert!(rhs_extra.is_none());
+ (
+ vec![
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(fx.pointer_type),
+ AbiParam::new(fx.pointer_type),
+ ],
+ [out_place.to_ptr().get_addr(fx), lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)],
+ )
+ } else {
+ (
+ vec![
+ AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn),
+ AbiParam::new(types::I128),
+ AbiParam::new(types::I128),
+ ],
+ [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)],
+ )
+ };
+ let name = match (bin_op, is_signed) {
+ (BinOp::Add, false) => "__rust_u128_addo",
+ (BinOp::Add, true) => "__rust_i128_addo",
+ (BinOp::Sub, false) => "__rust_u128_subo",
+ (BinOp::Sub, true) => "__rust_i128_subo",
+ (BinOp::Mul, false) => "__rust_u128_mulo",
+ _ => unreachable!(),
+ };
+ fx.lib_call(name, param_types, vec![], &args);
+ Some(out_place.to_cvalue(fx))
+ }
+ BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
+ BinOp::Div | BinOp::Rem => {
+ assert!(!checked);
+ let name = match (bin_op, is_signed) {
+ (BinOp::Div, false) => "__udivti3",
+ (BinOp::Div, true) => "__divti3",
+ (BinOp::Rem, false) => "__umodti3",
+ (BinOp::Rem, true) => "__modti3",
+ _ => unreachable!(),
+ };
+ if fx.tcx.sess.target.is_like_windows {
+ let (lhs_ptr, lhs_extra) = lhs.force_stack(fx);
+ let (rhs_ptr, rhs_extra) = rhs.force_stack(fx);
+ assert!(lhs_extra.is_none());
+ assert!(rhs_extra.is_none());
+ let args = [lhs_ptr.get_addr(fx), rhs_ptr.get_addr(fx)];
+ let ret = fx.lib_call(
+ name,
+ vec![AbiParam::new(fx.pointer_type), AbiParam::new(fx.pointer_type)],
+ vec![AbiParam::new(types::I64X2)],
+ &args,
+ )[0];
+ // FIXME use bitcast instead of store to get from i64x2 to i128
+ let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
+ ret_place.to_ptr().store(fx, ret, MemFlags::trusted());
+ Some(ret_place.to_cvalue(fx))
+ } else {
+ Some(fx.easy_call(name, &[lhs, rhs], lhs.layout().ty))
+ }
+ }
+ BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
+ assert!(!checked);
+ None
+ }
+ BinOp::Shl | BinOp::Shr => None,
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/common.rs b/compiler/rustc_codegen_cranelift/src/common.rs
new file mode 100644
index 000000000..f9dc1b516
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/common.rs
@@ -0,0 +1,475 @@
+use cranelift_codegen::isa::TargetFrontendConfig;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::layout::{
+ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers,
+};
+use rustc_middle::ty::SymbolName;
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::{Integer, Primitive};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use crate::constant::ConstantCx;
+use crate::prelude::*;
+
+pub(crate) fn pointer_ty(tcx: TyCtxt<'_>) -> types::Type {
+ match tcx.data_layout.pointer_size.bits() {
+ 16 => types::I16,
+ 32 => types::I32,
+ 64 => types::I64,
+ bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits),
+ }
+}
+
+pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
+ match scalar.primitive() {
+ Primitive::Int(int, _sign) => match int {
+ Integer::I8 => types::I8,
+ Integer::I16 => types::I16,
+ Integer::I32 => types::I32,
+ Integer::I64 => types::I64,
+ Integer::I128 => types::I128,
+ },
+ Primitive::F32 => types::F32,
+ Primitive::F64 => types::F64,
+ Primitive::Pointer => pointer_ty(tcx),
+ }
+}
+
+fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Type> {
+ Some(match ty.kind() {
+ ty::Bool => types::I8,
+ ty::Uint(size) => match size {
+ UintTy::U8 => types::I8,
+ UintTy::U16 => types::I16,
+ UintTy::U32 => types::I32,
+ UintTy::U64 => types::I64,
+ UintTy::U128 => types::I128,
+ UintTy::Usize => pointer_ty(tcx),
+ },
+ ty::Int(size) => match size {
+ IntTy::I8 => types::I8,
+ IntTy::I16 => types::I16,
+ IntTy::I32 => types::I32,
+ IntTy::I64 => types::I64,
+ IntTy::I128 => types::I128,
+ IntTy::Isize => pointer_ty(tcx),
+ },
+ ty::Char => types::I32,
+ ty::Float(size) => match size {
+ FloatTy::F32 => types::F32,
+ FloatTy::F64 => types::F64,
+ },
+ ty::FnPtr(_) => pointer_ty(tcx),
+ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ if has_ptr_meta(tcx, *pointee_ty) {
+ return None;
+ } else {
+ pointer_ty(tcx)
+ }
+ }
+ ty::Adt(adt_def, _) if adt_def.repr().simd() => {
+ let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
+ {
+ Abi::Vector { element, count } => (element.clone(), *count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
+ _ => return None,
+ }
+ }
+ ty::Param(_) => bug!("ty param {:?}", ty),
+ _ => return None,
+ })
+}
+
+fn clif_pair_type_from_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+) -> Option<(types::Type, types::Type)> {
+ Some(match ty.kind() {
+ ty::Tuple(types) if types.len() == 2 => {
+ let a = clif_type_from_ty(tcx, types[0])?;
+ let b = clif_type_from_ty(tcx, types[1])?;
+ if a.is_vector() || b.is_vector() {
+ return None;
+ }
+ (a, b)
+ }
+ ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
+ if has_ptr_meta(tcx, *pointee_ty) {
+ (pointer_ty(tcx), pointer_ty(tcx))
+ } else {
+ return None;
+ }
+ }
+ _ => return None,
+ })
+}
+
+/// Is a pointer to this type a fat ptr?
+pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+ let ptr_ty = tcx.mk_ptr(TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
+ match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
+ Abi::Scalar(_) => false,
+ Abi::ScalarPair(_, _) => true,
+ abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
+ }
+}
+
+pub(crate) fn codegen_icmp_imm(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ intcc: IntCC,
+ lhs: Value,
+ rhs: i128,
+) -> Value {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ if lhs_ty == types::I128 {
+ // FIXME legalize `icmp_imm.i128` in Cranelift
+
+ let (lhs_lsb, lhs_msb) = fx.bcx.ins().isplit(lhs);
+ let (rhs_lsb, rhs_msb) = (rhs as u128 as u64 as i64, (rhs as u128 >> 64) as u64 as i64);
+
+ match intcc {
+ IntCC::Equal => {
+ let lsb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_lsb, rhs_lsb);
+ let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+ fx.bcx.ins().band(lsb_eq, msb_eq)
+ }
+ IntCC::NotEqual => {
+ let lsb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_lsb, rhs_lsb);
+ let msb_ne = fx.bcx.ins().icmp_imm(IntCC::NotEqual, lhs_msb, rhs_msb);
+ fx.bcx.ins().bor(lsb_ne, msb_ne)
+ }
+ _ => {
+ // if msb_eq {
+ // lsb_cc
+ // } else {
+ // msb_cc
+ // }
+
+ let msb_eq = fx.bcx.ins().icmp_imm(IntCC::Equal, lhs_msb, rhs_msb);
+ let lsb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_lsb, rhs_lsb);
+ let msb_cc = fx.bcx.ins().icmp_imm(intcc, lhs_msb, rhs_msb);
+
+ fx.bcx.ins().select(msb_eq, lsb_cc, msb_cc)
+ }
+ }
+ } else {
+ let rhs = i64::try_from(rhs).expect("codegen_icmp_imm rhs out of range for <128bit int");
+ fx.bcx.ins().icmp_imm(intcc, lhs, rhs)
+ }
+}
+
+pub(crate) fn type_min_max_value(
+ bcx: &mut FunctionBuilder<'_>,
+ ty: Type,
+ signed: bool,
+) -> (Value, Value) {
+ assert!(ty.is_int());
+
+ if ty == types::I128 {
+ if signed {
+ let min = i128::MIN as u128;
+ let min_lsb = bcx.ins().iconst(types::I64, min as u64 as i64);
+ let min_msb = bcx.ins().iconst(types::I64, (min >> 64) as u64 as i64);
+ let min = bcx.ins().iconcat(min_lsb, min_msb);
+
+ let max = i128::MAX as u128;
+ let max_lsb = bcx.ins().iconst(types::I64, max as u64 as i64);
+ let max_msb = bcx.ins().iconst(types::I64, (max >> 64) as u64 as i64);
+ let max = bcx.ins().iconcat(max_lsb, max_msb);
+
+ return (min, max);
+ } else {
+ let min_half = bcx.ins().iconst(types::I64, 0);
+ let min = bcx.ins().iconcat(min_half, min_half);
+
+ let max_half = bcx.ins().iconst(types::I64, u64::MAX as i64);
+ let max = bcx.ins().iconcat(max_half, max_half);
+
+ return (min, max);
+ }
+ }
+
+ let min = match (ty, signed) {
+ (types::I8, false) | (types::I16, false) | (types::I32, false) | (types::I64, false) => {
+ 0i64
+ }
+ (types::I8, true) => i64::from(i8::MIN),
+ (types::I16, true) => i64::from(i16::MIN),
+ (types::I32, true) => i64::from(i32::MIN),
+ (types::I64, true) => i64::MIN,
+ _ => unreachable!(),
+ };
+
+ let max = match (ty, signed) {
+ (types::I8, false) => i64::from(u8::MAX),
+ (types::I16, false) => i64::from(u16::MAX),
+ (types::I32, false) => i64::from(u32::MAX),
+ (types::I64, false) => u64::MAX as i64,
+ (types::I8, true) => i64::from(i8::MAX),
+ (types::I16, true) => i64::from(i16::MAX),
+ (types::I32, true) => i64::from(i32::MAX),
+ (types::I64, true) => i64::MAX,
+ _ => unreachable!(),
+ };
+
+ let (min, max) = (bcx.ins().iconst(ty, min), bcx.ins().iconst(ty, max));
+
+ (min, max)
+}
+
+pub(crate) fn type_sign(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) | ty::Char | ty::Uint(..) | ty::Bool => false,
+ ty::Int(..) => true,
+ ty::Float(..) => false, // `signed` is unused for floats
+ _ => panic!("{}", ty),
+ }
+}
+
+pub(crate) struct FunctionCx<'m, 'clif, 'tcx: 'm> {
+ pub(crate) cx: &'clif mut crate::CodegenCx<'tcx>,
+ pub(crate) module: &'m mut dyn Module,
+ pub(crate) tcx: TyCtxt<'tcx>,
+ pub(crate) target_config: TargetFrontendConfig, // Cached from module
+ pub(crate) pointer_type: Type, // Cached from module
+ pub(crate) constants_cx: ConstantCx,
+
+ pub(crate) instance: Instance<'tcx>,
+ pub(crate) symbol_name: SymbolName<'tcx>,
+ pub(crate) mir: &'tcx Body<'tcx>,
+ pub(crate) fn_abi: Option<&'tcx FnAbi<'tcx, Ty<'tcx>>>,
+
+ pub(crate) bcx: FunctionBuilder<'clif>,
+ pub(crate) block_map: IndexVec<BasicBlock, Block>,
+ pub(crate) local_map: IndexVec<Local, CPlace<'tcx>>,
+
+ /// When `#[track_caller]` is used, the implicit caller location is stored in this variable.
+ pub(crate) caller_location: Option<CValue<'tcx>>,
+
+ pub(crate) clif_comments: crate::pretty_clif::CommentWriter,
+ pub(crate) source_info_set: indexmap::IndexSet<SourceInfo>,
+
+ /// This should only be accessed by `CPlace::new_var`.
+ pub(crate) next_ssa_var: u32,
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ RevealAllLayoutCx(self.tcx).handle_layout_err(err, span, ty)
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ RevealAllLayoutCx(self.tcx).handle_fn_abi_err(err, span, fn_abi_request)
+ }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for FunctionCx<'_, '_, 'tcx> {
+ fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for FunctionCx<'_, '_, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> HasTargetSpec for FunctionCx<'_, '_, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
+ pub(crate) fn monomorphize<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ self.instance.subst_mir_and_normalize_erasing_regions(
+ self.tcx,
+ ty::ParamEnv::reveal_all(),
+ value,
+ )
+ }
+
+ pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
+ clif_type_from_ty(self.tcx, ty)
+ }
+
+ pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
+ clif_pair_type_from_ty(self.tcx, ty)
+ }
+
+ pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
+ *self.block_map.get(bb).unwrap()
+ }
+
+ pub(crate) fn get_local_place(&mut self, local: Local) -> CPlace<'tcx> {
+ *self.local_map.get(local).unwrap_or_else(|| {
+ panic!("Local {:?} doesn't exist", local);
+ })
+ }
+
+ pub(crate) fn set_debug_loc(&mut self, source_info: mir::SourceInfo) {
+ let (index, _) = self.source_info_set.insert_full(source_info);
+ self.bcx.set_srcloc(SourceLoc::new(index as u32));
+ }
+
+ // Note: must be kept in sync with get_caller_location from cg_ssa
+ pub(crate) fn get_caller_location(&mut self, mut source_info: mir::SourceInfo) -> CValue<'tcx> {
+ let span_to_caller_location = |fx: &mut FunctionCx<'_, '_, 'tcx>, span: Span| {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = fx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = fx.tcx.const_caller_location((
+ rustc_span::symbol::Symbol::intern(
+ &caller.file.name.prefer_remapped().to_string_lossy(),
+ ),
+ caller.line as u32,
+ caller.col_display as u32 + 1,
+ ));
+ crate::constant::codegen_const_value(fx, const_loc, fx.tcx.caller_location_ty())
+ };
+
+ // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+ // If so, the starting `source_info.span` is in the innermost inlined
+ // function, and will be replaced with outer callsite spans as long
+ // as the inlined functions were `#[track_caller]`.
+ loop {
+ let scope_data = &self.mir.source_scopes[source_info.scope];
+
+ if let Some((callee, callsite_span)) = scope_data.inlined {
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !callee.def.requires_caller_location(self.tcx) {
+ return span_to_caller_location(self, source_info.span);
+ }
+ source_info.span = callsite_span;
+ }
+
+ // Skip past all of the parents with `inlined: None`.
+ match scope_data.inlined_parent_scope {
+ Some(parent) => source_info.scope = parent,
+ None => break,
+ }
+ }
+
+ // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
+ self.caller_location.unwrap_or_else(|| span_to_caller_location(self, source_info.span))
+ }
+
+ pub(crate) fn anonymous_str(&mut self, msg: &str) -> Value {
+ let mut data_ctx = DataContext::new();
+ data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
+ let msg_id = self.module.declare_anonymous_data(false, false).unwrap();
+
+ // Ignore DuplicateDefinition error, as the data will be the same
+ let _ = self.module.define_data(msg_id, &data_ctx);
+
+ let local_msg_id = self.module.declare_data_in_func(msg_id, self.bcx.func);
+ if self.clif_comments.enabled() {
+ self.add_comment(local_msg_id, msg);
+ }
+ self.bcx.ins().global_value(self.pointer_type, local_msg_id)
+ }
+}
+
+pub(crate) struct RevealAllLayoutCx<'tcx>(pub(crate) TyCtxt<'tcx>);
+
+impl<'tcx> LayoutOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ if let layout::LayoutError::SizeOverflow(_) = err {
+ self.0.sess.span_fatal(span, &err.to_string())
+ } else {
+ span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ }
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for RevealAllLayoutCx<'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+ self.0.sess.span_fatal(span, &err.to_string())
+ } else {
+ match fn_abi_request {
+ FnAbiRequest::OfFnPtr { sig, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
+ sig,
+ extra_args,
+ err
+ );
+ }
+ FnAbiRequest::OfInstance { instance, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_instance({}, {:?})` failed: {}",
+ instance,
+ extra_args,
+ err
+ );
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> layout::HasTyCtxt<'tcx> for RevealAllLayoutCx<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.0
+ }
+}
+
+impl<'tcx> rustc_target::abi::HasDataLayout for RevealAllLayoutCx<'tcx> {
+ fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
+ &self.0.data_layout
+ }
+}
+
+impl<'tcx> layout::HasParamEnv<'tcx> for RevealAllLayoutCx<'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> HasTargetSpec for RevealAllLayoutCx<'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.0.sess.target
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
new file mode 100644
index 000000000..c6a247cf5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs
@@ -0,0 +1,43 @@
+macro_rules! builtin_functions {
+ ($register:ident; $(fn $name:ident($($arg_name:ident: $arg_ty:ty),*) -> $ret_ty:ty;)*) => {
+ #[cfg(feature = "jit")]
+ #[allow(improper_ctypes)]
+ extern "C" {
+ $(fn $name($($arg_name: $arg_ty),*) -> $ret_ty;)*
+ }
+
+ #[cfg(feature = "jit")]
+ pub(crate) fn $register(builder: &mut cranelift_jit::JITBuilder) {
+ for (name, val) in [$((stringify!($name), $name as *const u8)),*] {
+ builder.symbol(name, val);
+ }
+ }
+ };
+}
+
+builtin_functions! {
+ register_functions_for_jit;
+
+ // integers
+ fn __multi3(a: i128, b: i128) -> i128;
+ fn __udivti3(n: u128, d: u128) -> u128;
+ fn __divti3(n: i128, d: i128) -> i128;
+ fn __umodti3(n: u128, d: u128) -> u128;
+ fn __modti3(n: i128, d: i128) -> i128;
+ fn __rust_u128_addo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_addo(a: i128, b: i128) -> (i128, bool);
+ fn __rust_u128_subo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_subo(a: i128, b: i128) -> (i128, bool);
+ fn __rust_u128_mulo(a: u128, b: u128) -> (u128, bool);
+ fn __rust_i128_mulo(a: i128, b: i128) -> (i128, bool);
+
+ // floats
+ fn __floattisf(i: i128) -> f32;
+ fn __floattidf(i: i128) -> f64;
+ fn __floatuntisf(i: u128) -> f32;
+ fn __floatuntidf(i: u128) -> f64;
+ fn __fixsfti(f: f32) -> i128;
+ fn __fixdfti(f: f64) -> i128;
+ fn __fixunssfti(f: f32) -> u128;
+ fn __fixunsdfti(f: f64) -> u128;
+}
diff --git a/compiler/rustc_codegen_cranelift/src/config.rs b/compiler/rustc_codegen_cranelift/src/config.rs
new file mode 100644
index 000000000..e59a0cb0a
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/config.rs
@@ -0,0 +1,107 @@
+use std::env;
+use std::str::FromStr;
+
+fn bool_env_var(key: &str) -> bool {
+ env::var(key).as_ref().map(|val| &**val) == Ok("1")
+}
+
+/// The mode to use for compilation.
+#[derive(Copy, Clone, Debug)]
+pub enum CodegenMode {
+ /// AOT compile the crate. This is the default.
+ Aot,
+ /// JIT compile and execute the crate.
+ Jit,
+ /// JIT compile and execute the crate, but only compile functions the first time they are used.
+ JitLazy,
+}
+
+impl FromStr for CodegenMode {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "aot" => Ok(CodegenMode::Aot),
+ "jit" => Ok(CodegenMode::Jit),
+ "jit-lazy" => Ok(CodegenMode::JitLazy),
+ _ => Err(format!("Unknown codegen mode `{}`", s)),
+ }
+ }
+}
+
+/// Configuration of cg_clif as passed in through `-Cllvm-args` and various env vars.
+#[derive(Clone, Debug)]
+pub struct BackendConfig {
+ /// Should the crate be AOT compiled or JIT executed.
+ ///
+ /// Defaults to AOT compilation. Can be set using `-Cllvm-args=mode=...`.
+ pub codegen_mode: CodegenMode,
+
+ /// When JIT mode is enable pass these arguments to the program.
+ ///
+ /// Defaults to the value of `CG_CLIF_JIT_ARGS`.
+ pub jit_args: Vec<String>,
+
+ /// Display the time it took to perform codegen for a crate.
+ ///
+ /// Defaults to true when the `CG_CLIF_DISPLAY_CG_TIME` env var is set to 1 or false otherwise.
+ /// Can be set using `-Cllvm-args=display_cg_time=...`.
+ pub display_cg_time: bool,
+
+ /// Enable the Cranelift ir verifier for all compilation passes. If not set it will only run
+ /// once before passing the clif ir to Cranelift for compilation.
+ ///
+ /// Defaults to true when the `CG_CLIF_ENABLE_VERIFIER` env var is set to 1 or when cg_clif is
+ /// compiled with debug assertions enabled or false otherwise. Can be set using
+ /// `-Cllvm-args=enable_verifier=...`.
+ pub enable_verifier: bool,
+
+ /// Don't cache object files in the incremental cache. Useful during development of cg_clif
+ /// to make it possible to use incremental mode for all analyses performed by rustc without
+ /// caching object files when their content should have been changed by a change to cg_clif.
+ ///
+ /// Defaults to true when the `CG_CLIF_DISABLE_INCR_CACHE` env var is set to 1 or false
+ /// otherwise. Can be set using `-Cllvm-args=disable_incr_cache=...`.
+ pub disable_incr_cache: bool,
+}
+
+impl Default for BackendConfig {
+ fn default() -> Self {
+ BackendConfig {
+ codegen_mode: CodegenMode::Aot,
+ jit_args: {
+ let args = std::env::var("CG_CLIF_JIT_ARGS").unwrap_or_else(|_| String::new());
+ args.split(' ').map(|arg| arg.to_string()).collect()
+ },
+ display_cg_time: bool_env_var("CG_CLIF_DISPLAY_CG_TIME"),
+ enable_verifier: cfg!(debug_assertions) || bool_env_var("CG_CLIF_ENABLE_VERIFIER"),
+ disable_incr_cache: bool_env_var("CG_CLIF_DISABLE_INCR_CACHE"),
+ }
+ }
+}
+
+impl BackendConfig {
+ /// Parse the configuration passed in using `-Cllvm-args`.
+ pub fn from_opts(opts: &[String]) -> Result<Self, String> {
+ fn parse_bool(name: &str, value: &str) -> Result<bool, String> {
+ value.parse().map_err(|_| format!("failed to parse value `{}` for {}", value, name))
+ }
+
+ let mut config = BackendConfig::default();
+ for opt in opts {
+ if let Some((name, value)) = opt.split_once('=') {
+ match name {
+ "mode" => config.codegen_mode = value.parse()?,
+ "display_cg_time" => config.display_cg_time = parse_bool(name, value)?,
+ "enable_verifier" => config.enable_verifier = parse_bool(name, value)?,
+ "disable_incr_cache" => config.disable_incr_cache = parse_bool(name, value)?,
+ _ => return Err(format!("Unknown option `{}`", name)),
+ }
+ } else {
+ return Err(format!("Invalid option `{}`", opt));
+ }
+ }
+
+ Ok(config)
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/constant.rs b/compiler/rustc_codegen_cranelift/src/constant.rs
new file mode 100644
index 000000000..7f7fd0e9c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/constant.rs
@@ -0,0 +1,580 @@
+//! Handling of `static`s, `const`s and promoted allocations
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::interpret::{
+ read_target_uint, AllocId, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
+};
+use rustc_middle::ty::ConstKind;
+use rustc_span::DUMMY_SP;
+
+use cranelift_codegen::ir::GlobalValueData;
+use cranelift_module::*;
+
+use crate::prelude::*;
+
+pub(crate) struct ConstantCx {
+ todo: Vec<TodoItem>,
+ done: FxHashSet<DataId>,
+ anon_allocs: FxHashMap<AllocId, DataId>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum TodoItem {
+ Alloc(AllocId),
+ Static(DefId),
+}
+
+impl ConstantCx {
+ pub(crate) fn new() -> Self {
+ ConstantCx { todo: vec![], done: FxHashSet::default(), anon_allocs: FxHashMap::default() }
+ }
+
+ pub(crate) fn finalize(mut self, tcx: TyCtxt<'_>, module: &mut dyn Module) {
+ //println!("todo {:?}", self.todo);
+ define_all_allocs(tcx, module, &mut self);
+ //println!("done {:?}", self.done);
+ self.done.clear();
+ }
+}
+
+pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
+ let mut all_constants_ok = true;
+ for constant in &fx.mir.required_consts {
+ let const_ = match fx.monomorphize(constant.literal) {
+ ConstantKind::Ty(ct) => ct,
+ ConstantKind::Val(..) => continue,
+ };
+ match const_.kind() {
+ ConstKind::Value(_) => {}
+ ConstKind::Unevaluated(unevaluated) => {
+ if let Err(err) =
+ fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None)
+ {
+ all_constants_ok = false;
+ match err {
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {
+ fx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ }
+ ErrorHandled::TooGeneric => {
+ span_bug!(
+ constant.span,
+ "codgen encountered polymorphic constant: {:?}",
+ err
+ );
+ }
+ }
+ }
+ }
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ }
+ }
+ all_constants_ok
+}
+
+pub(crate) fn codegen_static(tcx: TyCtxt<'_>, module: &mut dyn Module, def_id: DefId) {
+ let mut constants_cx = ConstantCx::new();
+ constants_cx.todo.push(TodoItem::Static(def_id));
+ constants_cx.finalize(tcx, module);
+}
+
+pub(crate) fn codegen_tls_ref<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("tls {:?}", def_id));
+ }
+ let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
+ CValue::by_val(tls_ptr, layout)
+}
+
+fn codegen_static_ref<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ def_id: DefId,
+ layout: TyAndLayout<'tcx>,
+) -> CPlace<'tcx> {
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ }
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ assert!(!layout.is_unsized(), "unsized statics aren't supported");
+ assert!(
+ matches!(
+ fx.bcx.func.global_values[local_data_id],
+ GlobalValueData::Symbol { tls: false, .. }
+ ),
+ "tls static referenced without Rvalue::ThreadLocalRef"
+ );
+ CPlace::for_ptr(crate::pointer::Pointer::new(global_ptr), layout)
+}
+
+pub(crate) fn codegen_constant<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ constant: &Constant<'tcx>,
+) -> CValue<'tcx> {
+ let const_ = match fx.monomorphize(constant.literal) {
+ ConstantKind::Ty(ct) => ct,
+ ConstantKind::Val(val, ty) => return codegen_const_value(fx, val, ty),
+ };
+ let const_val = match const_.kind() {
+ ConstKind::Value(valtree) => fx.tcx.valtree_to_const_val((const_.ty(), valtree)),
+ ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
+ if fx.tcx.is_static(def.did) =>
+ {
+ assert!(substs.is_empty());
+ assert!(promoted.is_none());
+
+ return codegen_static_ref(fx, def.did, fx.layout_of(const_.ty())).to_cvalue(fx);
+ }
+ ConstKind::Unevaluated(unevaluated) => {
+ match fx.tcx.const_eval_resolve(ParamEnv::reveal_all(), unevaluated, None) {
+ Ok(const_val) => const_val,
+ Err(_) => {
+ span_bug!(constant.span, "erroneous constant not captured by required_consts");
+ }
+ }
+ }
+ ConstKind::Param(_)
+ | ConstKind::Infer(_)
+ | ConstKind::Bound(_, _)
+ | ConstKind::Placeholder(_)
+ | ConstKind::Error(_) => unreachable!("{:?}", const_),
+ };
+
+ codegen_const_value(fx, const_val, const_.ty())
+}
+
+pub(crate) fn codegen_const_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ const_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+) -> CValue<'tcx> {
+ let layout = fx.layout_of(ty);
+ assert!(!layout.is_unsized(), "sized const value");
+
+ if layout.is_zst() {
+ return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
+ }
+
+ match const_val {
+ ConstValue::ZeroSized => unreachable!(), // we already handles ZST above
+ ConstValue::Scalar(x) => match x {
+ Scalar::Int(int) => {
+ if fx.clif_type(layout.ty).is_some() {
+ return CValue::const_val(fx, layout, int);
+ } else {
+ let raw_val = int.to_bits(int.size()).unwrap();
+ let val = match int.size().bytes() {
+ 1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
+ 2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
+ 4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
+ 8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
+ 16 => {
+ let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
+ let msb =
+ fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ _ => unreachable!(),
+ };
+
+ let place = CPlace::new_stack_slot(fx, layout);
+ place.to_ptr().store(fx, val, MemFlags::trusted());
+ place.to_cvalue(fx)
+ }
+ }
+ Scalar::Ptr(ptr, _size) => {
+ let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
+ let base_addr = match fx.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ GlobalAlloc::Function(instance) => {
+ let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
+ let local_func_id =
+ fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+ fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
+ }
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
+ let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
+ // FIXME: factor this common code with the `Memory` arm into a function?
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ GlobalAlloc::Static(def_id) => {
+ assert!(fx.tcx.is_static(def_id));
+ let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
+ let local_data_id =
+ fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", def_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+ }
+ };
+ let val = if offset.bytes() != 0 {
+ fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
+ } else {
+ base_addr
+ };
+ CValue::by_val(val, layout)
+ }
+ },
+ ConstValue::ByRef { alloc, offset } => CValue::by_ref(
+ pointer_for_allocation(fx, alloc)
+ .offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
+ layout,
+ ),
+ ConstValue::Slice { data, start, end } => {
+ let ptr = pointer_for_allocation(fx, data)
+ .offset_i64(fx, i64::try_from(start).unwrap())
+ .get_addr(fx);
+ let len = fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
+ CValue::by_val_pair(ptr, len, layout)
+ }
+ }
+}
+
+pub(crate) fn pointer_for_allocation<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ alloc: ConstAllocation<'tcx>,
+) -> crate::pointer::Pointer {
+ let alloc_id = fx.tcx.create_memory_alloc(alloc);
+ let data_id = data_id_for_alloc_id(
+ &mut fx.constants_cx,
+ &mut *fx.module,
+ alloc_id,
+ alloc.inner().mutability,
+ );
+
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("{:?}", alloc_id));
+ }
+ let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
+ crate::pointer::Pointer::new(global_ptr)
+}
+
+pub(crate) fn data_id_for_alloc_id(
+ cx: &mut ConstantCx,
+ module: &mut dyn Module,
+ alloc_id: AllocId,
+ mutability: rustc_hir::Mutability,
+) -> DataId {
+ cx.todo.push(TodoItem::Alloc(alloc_id));
+ *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
+ module.declare_anonymous_data(mutability == rustc_hir::Mutability::Mut, false).unwrap()
+ })
+}
+
+fn data_id_for_static(
+ tcx: TyCtxt<'_>,
+ module: &mut dyn Module,
+ def_id: DefId,
+ definition: bool,
+) -> DataId {
+ let rlinkage = tcx.codegen_fn_attrs(def_id).linkage;
+ let linkage = if definition {
+ crate::linkage::get_static_linkage(tcx, def_id)
+ } else if rlinkage == Some(rustc_middle::mir::mono::Linkage::ExternalWeak)
+ || rlinkage == Some(rustc_middle::mir::mono::Linkage::WeakAny)
+ {
+ Linkage::Preemptible
+ } else {
+ Linkage::Import
+ };
+
+ let instance = Instance::mono(tcx, def_id).polymorphize(tcx);
+ let symbol_name = tcx.symbol_name(instance).name;
+ let ty = instance.ty(tcx, ParamEnv::reveal_all());
+ let is_mutable = if tcx.is_mutable_static(def_id) {
+ true
+ } else {
+ !ty.is_freeze(tcx.at(DUMMY_SP), ParamEnv::reveal_all())
+ };
+ let align = tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().align.pref.bytes();
+
+ let attrs = tcx.codegen_fn_attrs(def_id);
+
+ let data_id = match module.declare_data(
+ &*symbol_name,
+ linkage,
+ is_mutable,
+ attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
+ ) {
+ Ok(data_id) => data_id,
+ Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
+ "attempt to declare `{symbol_name}` as static, but it was already declared as function"
+ )),
+ Err(err) => Err::<_, _>(err).unwrap(),
+ };
+
+ if rlinkage.is_some() {
+ // Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+
+ let ref_name = format!("_rust_extern_with_linkage_{}", symbol_name);
+ let ref_data_id = module.declare_data(&ref_name, Linkage::Local, false, false).unwrap();
+ let mut data_ctx = DataContext::new();
+ data_ctx.set_align(align);
+ let data = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.define(std::iter::repeat(0).take(pointer_ty(tcx).bytes() as usize).collect());
+ data_ctx.write_data_addr(0, data, 0);
+ match module.define_data(ref_data_id, &data_ctx) {
+ // Every time the static is referenced there will be another definition of this global,
+ // so duplicate definitions are expected and allowed.
+ Err(ModuleError::DuplicateDefinition(_)) => {}
+ res => res.unwrap(),
+ }
+ ref_data_id
+ } else {
+ data_id
+ }
+}
+
+fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut ConstantCx) {
+ while let Some(todo_item) = cx.todo.pop() {
+ let (data_id, alloc, section_name) = match todo_item {
+ TodoItem::Alloc(alloc_id) => {
+ let alloc = match tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => alloc,
+ GlobalAlloc::Function(_) | GlobalAlloc::Static(_) | GlobalAlloc::VTable(..) => {
+ unreachable!()
+ }
+ };
+ let data_id = *cx.anon_allocs.entry(alloc_id).or_insert_with(|| {
+ module
+ .declare_anonymous_data(
+ alloc.inner().mutability == rustc_hir::Mutability::Mut,
+ false,
+ )
+ .unwrap()
+ });
+ (data_id, alloc, None)
+ }
+ TodoItem::Static(def_id) => {
+ //println!("static {:?}", def_id);
+
+ let section_name = tcx.codegen_fn_attrs(def_id).link_section;
+
+ let alloc = tcx.eval_static_initializer(def_id).unwrap();
+
+ let data_id = data_id_for_static(tcx, module, def_id, true);
+ (data_id, alloc, section_name)
+ }
+ };
+
+ //("data_id {}", data_id);
+ if cx.done.contains(&data_id) {
+ continue;
+ }
+
+ let mut data_ctx = DataContext::new();
+ let alloc = alloc.inner();
+ data_ctx.set_align(alloc.align.bytes());
+
+ if let Some(section_name) = section_name {
+ let (segment_name, section_name) = if tcx.sess.target.is_like_osx {
+ let section_name = section_name.as_str();
+ if let Some(names) = section_name.split_once(',') {
+ names
+ } else {
+ tcx.sess.fatal(&format!(
+ "#[link_section = \"{}\"] is not valid for macos target: must be segment and section separated by comma",
+ section_name
+ ));
+ }
+ } else {
+ ("", section_name.as_str())
+ };
+ data_ctx.set_segment_section(segment_name, section_name);
+ }
+
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()).to_vec();
+ data_ctx.define(bytes.into_boxed_slice());
+
+ for &(offset, alloc_id) in alloc.relocations().iter() {
+ let addend = {
+ let endianness = tcx.data_layout.endian;
+ let offset = offset.bytes() as usize;
+ let ptr_size = tcx.data_layout.pointer_size;
+ let bytes = &alloc.inspect_with_uninit_and_ptr_outside_interpreter(
+ offset..offset + ptr_size.bytes() as usize,
+ );
+ read_target_uint(endianness, bytes).unwrap()
+ };
+
+ let reloc_target_alloc = tcx.global_alloc(alloc_id);
+ let data_id = match reloc_target_alloc {
+ GlobalAlloc::Function(instance) => {
+ assert_eq!(addend, 0);
+ let func_id =
+ crate::abi::import_function(tcx, module, instance.polymorphize(tcx));
+ let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
+ data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
+ continue;
+ }
+ GlobalAlloc::Memory(target_alloc) => {
+ data_id_for_alloc_id(cx, module, alloc_id, target_alloc.inner().mutability)
+ }
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc_id = tcx.vtable_allocation((ty, trait_ref));
+ data_id_for_alloc_id(cx, module, alloc_id, Mutability::Not)
+ }
+ GlobalAlloc::Static(def_id) => {
+ if tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ {
+ tcx.sess.fatal(&format!(
+ "Allocation {:?} contains reference to TLS value {:?}",
+ alloc, def_id
+ ));
+ }
+
+ // Don't push a `TodoItem::Static` here, as it will cause statics used by
+ // multiple crates to be duplicated between them. It isn't necessary anyway,
+ // as it will get pushed by `codegen_static` when necessary.
+ data_id_for_static(tcx, module, def_id, false)
+ }
+ };
+
+ let global_value = module.declare_data_in_data(data_id, &mut data_ctx);
+ data_ctx.write_data_addr(offset.bytes() as u32, global_value, addend as i64);
+ }
+
+ module.define_data(data_id, &data_ctx).unwrap();
+ cx.done.insert(data_id);
+ }
+
+ assert!(cx.todo.is_empty(), "{:?}", cx.todo);
+}
+
+pub(crate) fn mir_operand_get_const_val<'tcx>(
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ operand: &Operand<'tcx>,
+) -> Option<ConstValue<'tcx>> {
+ match operand {
+ Operand::Constant(const_) => match const_.literal {
+ ConstantKind::Ty(const_) => fx
+ .monomorphize(const_)
+ .eval_for_mir(fx.tcx, ParamEnv::reveal_all())
+ .try_to_value(fx.tcx),
+ ConstantKind::Val(val, _) => Some(val),
+ },
+ // FIXME(rust-lang/rust#85105): Casts like `IMM8 as u32` result in the const being stored
+ // inside a temporary before being passed to the intrinsic requiring the const argument.
+ // This code tries to find a single constant defining definition of the referenced local.
+ Operand::Copy(place) | Operand::Move(place) => {
+ if !place.projection.is_empty() {
+ return None;
+ }
+ let mut computed_const_val = None;
+ for bb_data in fx.mir.basic_blocks() {
+ for stmt in &bb_data.statements {
+ match &stmt.kind {
+ StatementKind::Assign(local_and_rvalue) if &local_and_rvalue.0 == place => {
+ match &local_and_rvalue.1 {
+ Rvalue::Cast(CastKind::Misc, operand, ty) => {
+ if computed_const_val.is_some() {
+ return None; // local assigned twice
+ }
+ if !matches!(ty.kind(), ty::Uint(_) | ty::Int(_)) {
+ return None;
+ }
+ let const_val = mir_operand_get_const_val(fx, operand)?;
+ if fx.layout_of(*ty).size
+ != const_val.try_to_scalar_int()?.size()
+ {
+ return None;
+ }
+ computed_const_val = Some(const_val);
+ }
+ Rvalue::Use(operand) => {
+ computed_const_val = mir_operand_get_const_val(fx, operand)
+ }
+ _ => return None,
+ }
+ }
+ StatementKind::SetDiscriminant { place: stmt_place, variant_index: _ }
+ if &**stmt_place == place =>
+ {
+ return None;
+ }
+ StatementKind::CopyNonOverlapping(_) => {
+ return None;
+ } // conservative handling
+ StatementKind::Assign(_)
+ | StatementKind::FakeRead(_)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(_)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag(_, _)
+ | StatementKind::AscribeUserType(_, _)
+ | StatementKind::Coverage(_)
+ | StatementKind::Nop => {}
+ }
+ }
+ match &bb_data.terminator().kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::Assert { .. } => {}
+ TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => unreachable!(),
+ TerminatorKind::InlineAsm { .. } => return None,
+ TerminatorKind::Call { destination, target: Some(_), .. }
+ if destination == place =>
+ {
+ return None;
+ }
+ TerminatorKind::Call { .. } => {}
+ }
+ }
+ computed_const_val
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
new file mode 100644
index 000000000..589910ede
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/emit.rs
@@ -0,0 +1,190 @@
+//! Write the debuginfo into an object file.
+
+use cranelift_object::ObjectProduct;
+use rustc_data_structures::fx::FxHashMap;
+
+use gimli::write::{Address, AttributeValue, EndianVec, Result, Sections, Writer};
+use gimli::{RunTimeEndian, SectionId};
+
+use super::object::WriteDebugInfo;
+use super::DebugContext;
+
+impl DebugContext<'_> {
+ pub(crate) fn emit(&mut self, product: &mut ObjectProduct) {
+ let unit_range_list_id = self.dwarf.unit.ranges.add(self.unit_range_list.clone());
+ let root = self.dwarf.unit.root();
+ let root = self.dwarf.unit.get_mut(root);
+ root.set(gimli::DW_AT_ranges, AttributeValue::RangeListRef(unit_range_list_id));
+
+ let mut sections = Sections::new(WriterRelocate::new(self.endian));
+ self.dwarf.write(&mut sections).unwrap();
+
+ let mut section_map = FxHashMap::default();
+ let _: Result<()> = sections.for_each_mut(|id, section| {
+ if !section.writer.slice().is_empty() {
+ let section_id = product.add_debug_section(id, section.writer.take());
+ section_map.insert(id, section_id);
+ }
+ Ok(())
+ });
+
+ let _: Result<()> = sections.for_each(|id, section| {
+ if let Some(section_id) = section_map.get(&id) {
+ for reloc in &section.relocs {
+ product.add_debug_reloc(&section_map, section_id, reloc);
+ }
+ }
+ Ok(())
+ });
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct DebugReloc {
+ pub(crate) offset: u32,
+ pub(crate) size: u8,
+ pub(crate) name: DebugRelocName,
+ pub(crate) addend: i64,
+ pub(crate) kind: object::RelocationKind,
+}
+
+#[derive(Clone)]
+pub(crate) enum DebugRelocName {
+ Section(SectionId),
+ Symbol(usize),
+}
+
+/// A [`Writer`] that collects all necessary relocations.
+#[derive(Clone)]
+pub(super) struct WriterRelocate {
+ pub(super) relocs: Vec<DebugReloc>,
+ pub(super) writer: EndianVec<RunTimeEndian>,
+}
+
+impl WriterRelocate {
+ pub(super) fn new(endian: RunTimeEndian) -> Self {
+ WriterRelocate { relocs: Vec::new(), writer: EndianVec::new(endian) }
+ }
+
+ /// Perform the collected relocations to be usable for JIT usage.
+ #[cfg(all(feature = "jit", not(windows)))]
+ pub(super) fn relocate_for_jit(mut self, jit_module: &cranelift_jit::JITModule) -> Vec<u8> {
+ for reloc in self.relocs.drain(..) {
+ match reloc.name {
+ super::DebugRelocName::Section(_) => unreachable!(),
+ super::DebugRelocName::Symbol(sym) => {
+ let addr = jit_module.get_finalized_function(
+ cranelift_module::FuncId::from_u32(sym.try_into().unwrap()),
+ );
+ let val = (addr as u64 as i64 + reloc.addend) as u64;
+ self.writer.write_udata_at(reloc.offset as usize, val, reloc.size).unwrap();
+ }
+ }
+ }
+ self.writer.into_vec()
+ }
+}
+
+impl Writer for WriterRelocate {
+ type Endian = RunTimeEndian;
+
+ fn endian(&self) -> Self::Endian {
+ self.writer.endian()
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+
+ fn write(&mut self, bytes: &[u8]) -> Result<()> {
+ self.writer.write(bytes)
+ }
+
+ fn write_at(&mut self, offset: usize, bytes: &[u8]) -> Result<()> {
+ self.writer.write_at(offset, bytes)
+ }
+
+ fn write_address(&mut self, address: Address, size: u8) -> Result<()> {
+ match address {
+ Address::Constant(val) => self.write_udata(val, size),
+ Address::Symbol { symbol, addend } => {
+ let offset = self.len() as u64;
+ self.relocs.push(DebugReloc {
+ offset: offset as u32,
+ size,
+ name: DebugRelocName::Symbol(symbol),
+ addend: addend as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata(0, size)
+ }
+ }
+ }
+
+ fn write_offset(&mut self, val: usize, section: SectionId, size: u8) -> Result<()> {
+ let offset = self.len() as u32;
+ self.relocs.push(DebugReloc {
+ offset,
+ size,
+ name: DebugRelocName::Section(section),
+ addend: val as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata(0, size)
+ }
+
+ fn write_offset_at(
+ &mut self,
+ offset: usize,
+ val: usize,
+ section: SectionId,
+ size: u8,
+ ) -> Result<()> {
+ self.relocs.push(DebugReloc {
+ offset: offset as u32,
+ size,
+ name: DebugRelocName::Section(section),
+ addend: val as i64,
+ kind: object::RelocationKind::Absolute,
+ });
+ self.write_udata_at(offset, 0, size)
+ }
+
+ fn write_eh_pointer(&mut self, address: Address, eh_pe: gimli::DwEhPe, size: u8) -> Result<()> {
+ match address {
+ // Address::Constant arm copied from gimli
+ Address::Constant(val) => {
+ // Indirect doesn't matter here.
+ let val = match eh_pe.application() {
+ gimli::DW_EH_PE_absptr => val,
+ gimli::DW_EH_PE_pcrel => {
+ // FIXME better handling of sign
+ let offset = self.len() as u64;
+ offset.wrapping_sub(val)
+ }
+ _ => {
+ return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe));
+ }
+ };
+ self.write_eh_pointer_data(val, eh_pe.format(), size)
+ }
+ Address::Symbol { symbol, addend } => match eh_pe.application() {
+ gimli::DW_EH_PE_pcrel => {
+ let size = match eh_pe.format() {
+ gimli::DW_EH_PE_sdata4 => 4,
+ _ => return Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+ };
+ self.relocs.push(DebugReloc {
+ offset: self.len() as u32,
+ size,
+ name: DebugRelocName::Symbol(symbol),
+ addend,
+ kind: object::RelocationKind::Relative,
+ });
+ self.write_udata(0, size)
+ }
+ _ => Err(gimli::write::Error::UnsupportedPointerEncoding(eh_pe)),
+ },
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
new file mode 100644
index 000000000..bbcb95913
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -0,0 +1,218 @@
+//! Line info generation (`.debug_line`)
+
+use std::ffi::OsStr;
+use std::path::{Component, Path};
+
+use crate::prelude::*;
+
+use rustc_span::{
+ FileName, Pos, SourceFile, SourceFileAndLine, SourceFileHash, SourceFileHashAlgorithm,
+};
+
+use cranelift_codegen::binemit::CodeOffset;
+use cranelift_codegen::MachSrcLoc;
+
+use gimli::write::{
+ Address, AttributeValue, FileId, FileInfo, LineProgram, LineString, LineStringTable,
+ UnitEntryId,
+};
+
+// OPTIMIZATION: It is cheaper to do this in one pass than using `.parent()` and `.file_name()`.
+fn split_path_dir_and_file(path: &Path) -> (&Path, &OsStr) {
+ let mut iter = path.components();
+ let file_name = match iter.next_back() {
+ Some(Component::Normal(p)) => p,
+ component => {
+ panic!(
+ "Path component {:?} of path {} is an invalid filename",
+ component,
+ path.display()
+ );
+ }
+ };
+ let parent = iter.as_path();
+ (parent, file_name)
+}
+
+// OPTIMIZATION: Avoid UTF-8 validation on UNIX.
+fn osstr_as_utf8_bytes(path: &OsStr) -> &[u8] {
+ #[cfg(unix)]
+ {
+ use std::os::unix::ffi::OsStrExt;
+ path.as_bytes()
+ }
+ #[cfg(not(unix))]
+ {
+ path.to_str().unwrap().as_bytes()
+ }
+}
+
+pub(crate) const MD5_LEN: usize = 16;
+
+pub(crate) fn make_file_info(hash: SourceFileHash) -> Option<FileInfo> {
+ if hash.kind == SourceFileHashAlgorithm::Md5 {
+ let mut buf = [0u8; MD5_LEN];
+ buf.copy_from_slice(hash.hash_bytes());
+ Some(FileInfo { timestamp: 0, size: 0, md5: buf })
+ } else {
+ None
+ }
+}
+
+fn line_program_add_file(
+ line_program: &mut LineProgram,
+ line_strings: &mut LineStringTable,
+ file: &SourceFile,
+) -> FileId {
+ match &file.name {
+ FileName::Real(path) => {
+ let (dir_path, file_name) = split_path_dir_and_file(path.remapped_path_if_available());
+ let dir_name = osstr_as_utf8_bytes(dir_path.as_os_str());
+ let file_name = osstr_as_utf8_bytes(file_name);
+
+ let dir_id = if !dir_name.is_empty() {
+ let dir_name = LineString::new(dir_name, line_program.encoding(), line_strings);
+ line_program.add_directory(dir_name)
+ } else {
+ line_program.default_directory()
+ };
+ let file_name = LineString::new(file_name, line_program.encoding(), line_strings);
+
+ let info = make_file_info(file.src_hash);
+
+ line_program.file_has_md5 &= info.is_some();
+ line_program.add_file(file_name, dir_id, info)
+ }
+ // FIXME give more appropriate file names
+ filename => {
+ let dir_id = line_program.default_directory();
+ let dummy_file_name = LineString::new(
+ filename.prefer_remapped().to_string().into_bytes(),
+ line_program.encoding(),
+ line_strings,
+ );
+ line_program.add_file(dummy_file_name, dir_id, None)
+ }
+ }
+}
+
+impl<'tcx> DebugContext<'tcx> {
+ pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
+ let loc = self.tcx.sess.source_map().lookup_char_pos(span.lo());
+
+ let file_id = line_program_add_file(
+ &mut self.dwarf.unit.line_program,
+ &mut self.dwarf.line_strings,
+ &loc.file,
+ );
+
+ let entry = self.dwarf.unit.get_mut(entry_id);
+
+ entry.set(gimli::DW_AT_decl_file, AttributeValue::FileIndex(Some(file_id)));
+ entry.set(gimli::DW_AT_decl_line, AttributeValue::Udata(loc.line as u64));
+ entry.set(gimli::DW_AT_decl_column, AttributeValue::Udata(loc.col.to_usize() as u64));
+ }
+
+ pub(super) fn create_debug_lines(
+ &mut self,
+ symbol: usize,
+ entry_id: UnitEntryId,
+ context: &Context,
+ function_span: Span,
+ source_info_set: &indexmap::IndexSet<SourceInfo>,
+ ) -> CodeOffset {
+ let tcx = self.tcx;
+ let line_program = &mut self.dwarf.unit.line_program;
+
+ let line_strings = &mut self.dwarf.line_strings;
+ let mut last_span = None;
+ let mut last_file = None;
+ let mut create_row_for_span = |line_program: &mut LineProgram, span: Span| {
+ if let Some(last_span) = last_span {
+ if span == last_span {
+ line_program.generate_row();
+ return;
+ }
+ }
+ last_span = Some(span);
+
+ // Based on https://github.com/rust-lang/rust/blob/e369d87b015a84653343032833d65d0545fd3f26/src/librustc_codegen_ssa/mir/mod.rs#L116-L131
+ // In order to have a good line stepping behavior in debugger, we overwrite debug
+ // locations of macro expansions with that of the outermost expansion site
+ // (unless the crate is being compiled with `-Z debug-macros`).
+ let span = if !span.from_expansion() || tcx.sess.opts.unstable_opts.debug_macros {
+ span
+ } else {
+ // Walk up the macro expansion chain until we reach a non-expanded span.
+ // We also stop at the function body level because no line stepping can occur
+ // at the level above that.
+ rustc_span::hygiene::walk_chain(span, function_span.ctxt())
+ };
+
+ let (file, line, col) = match tcx.sess.source_map().lookup_line(span.lo()) {
+ Ok(SourceFileAndLine { sf: file, line }) => {
+ let line_pos = file.line_begin_pos(span.lo());
+
+ (
+ file,
+ u64::try_from(line).unwrap() + 1,
+ u64::from((span.lo() - line_pos).to_u32()) + 1,
+ )
+ }
+ Err(file) => (file, 0, 0),
+ };
+
+ // line_program_add_file is very slow.
+ // Optimize for the common case of the current file not being changed.
+ let current_file_changed = if let Some(last_file) = &last_file {
+ // If the allocations are not equal, then the files may still be equal, but that
+ // is not a problem, as this is just an optimization.
+ !rustc_data_structures::sync::Lrc::ptr_eq(last_file, &file)
+ } else {
+ true
+ };
+ if current_file_changed {
+ let file_id = line_program_add_file(line_program, line_strings, &file);
+ line_program.row().file = file_id;
+ last_file = Some(file);
+ }
+
+ line_program.row().line = line;
+ line_program.row().column = col;
+ line_program.generate_row();
+ };
+
+ line_program.begin_sequence(Some(Address::Symbol { symbol, addend: 0 }));
+
+ let mut func_end = 0;
+
+ let mcr = context.mach_compile_result.as_ref().unwrap();
+ for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
+ line_program.row().address_offset = u64::from(start);
+ if !loc.is_default() {
+ let source_info = *source_info_set.get_index(loc.bits() as usize).unwrap();
+ create_row_for_span(line_program, source_info.span);
+ } else {
+ create_row_for_span(line_program, function_span);
+ }
+ func_end = end;
+ }
+
+ line_program.end_sequence(u64::from(func_end));
+
+ let func_end = mcr.buffer.total_size();
+
+ assert_ne!(func_end, 0);
+
+ let entry = self.dwarf.unit.get_mut(entry_id);
+ entry.set(
+ gimli::DW_AT_low_pc,
+ AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+ );
+ entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(func_end)));
+
+ self.emit_location(entry_id, function_span);
+
+ func_end
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
new file mode 100644
index 000000000..693092ba5
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -0,0 +1,357 @@
+//! Handling of everything related to debuginfo.
+
+mod emit;
+mod line_info;
+mod object;
+mod unwind;
+
+use crate::prelude::*;
+
+use rustc_index::vec::IndexVec;
+
+use cranelift_codegen::entity::EntityRef;
+use cranelift_codegen::ir::{Endianness, LabelValueLoc, ValueLabel};
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::ValueLocRange;
+
+use gimli::write::{
+ Address, AttributeValue, DwarfUnit, Expression, LineProgram, LineString, Location,
+ LocationList, Range, RangeList, UnitEntryId,
+};
+use gimli::{Encoding, Format, LineEncoding, RunTimeEndian, X86_64};
+
+pub(crate) use emit::{DebugReloc, DebugRelocName};
+pub(crate) use unwind::UnwindContext;
+
+pub(crate) struct DebugContext<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ endian: RunTimeEndian,
+
+ dwarf: DwarfUnit,
+ unit_range_list: RangeList,
+
+ types: FxHashMap<Ty<'tcx>, UnitEntryId>,
+}
+
+impl<'tcx> DebugContext<'tcx> {
+ pub(crate) fn new(tcx: TyCtxt<'tcx>, isa: &dyn TargetIsa) -> Self {
+ let encoding = Encoding {
+ format: Format::Dwarf32,
+ // FIXME this should be configurable
+ // macOS doesn't seem to support DWARF > 3
+ // 5 version is required for md5 file hash
+ version: if tcx.sess.target.is_like_osx {
+ 3
+ } else {
+ // FIXME change to version 5 once the gdb and lldb shipping with the latest debian
+ // support it.
+ 4
+ },
+ address_size: isa.frontend_config().pointer_bytes(),
+ };
+
+ let endian = match isa.endianness() {
+ Endianness::Little => RunTimeEndian::Little,
+ Endianness::Big => RunTimeEndian::Big,
+ };
+
+ let mut dwarf = DwarfUnit::new(encoding);
+
+ let producer = format!(
+ "cg_clif (rustc {}, cranelift {})",
+ rustc_interface::util::version_str().unwrap_or("unknown version"),
+ cranelift_codegen::VERSION,
+ );
+ let comp_dir = tcx
+ .sess
+ .opts
+ .working_dir
+ .to_string_lossy(FileNameDisplayPreference::Remapped)
+ .into_owned();
+ let (name, file_info) = match tcx.sess.local_crate_source_file.clone() {
+ Some(path) => {
+ let name = path.to_string_lossy().into_owned();
+ (name, None)
+ }
+ None => (tcx.crate_name(LOCAL_CRATE).to_string(), None),
+ };
+
+ let mut line_program = LineProgram::new(
+ encoding,
+ LineEncoding::default(),
+ LineString::new(comp_dir.as_bytes(), encoding, &mut dwarf.line_strings),
+ LineString::new(name.as_bytes(), encoding, &mut dwarf.line_strings),
+ file_info,
+ );
+ line_program.file_has_md5 = file_info.is_some();
+
+ dwarf.unit.line_program = line_program;
+
+ {
+ let name = dwarf.strings.add(name);
+ let comp_dir = dwarf.strings.add(comp_dir);
+
+ let root = dwarf.unit.root();
+ let root = dwarf.unit.get_mut(root);
+ root.set(gimli::DW_AT_producer, AttributeValue::StringRef(dwarf.strings.add(producer)));
+ root.set(gimli::DW_AT_language, AttributeValue::Language(gimli::DW_LANG_Rust));
+ root.set(gimli::DW_AT_name, AttributeValue::StringRef(name));
+ root.set(gimli::DW_AT_comp_dir, AttributeValue::StringRef(comp_dir));
+ root.set(gimli::DW_AT_low_pc, AttributeValue::Address(Address::Constant(0)));
+ }
+
+ DebugContext {
+ tcx,
+
+ endian,
+
+ dwarf,
+ unit_range_list: RangeList(Vec::new()),
+
+ types: FxHashMap::default(),
+ }
+ }
+
+ fn dwarf_ty(&mut self, ty: Ty<'tcx>) -> UnitEntryId {
+ if let Some(type_id) = self.types.get(&ty) {
+ return *type_id;
+ }
+
+ let new_entry = |dwarf: &mut DwarfUnit, tag| dwarf.unit.add(dwarf.unit.root(), tag);
+
+ let primitive = |dwarf: &mut DwarfUnit, ate| {
+ let type_id = new_entry(dwarf, gimli::DW_TAG_base_type);
+ let type_entry = dwarf.unit.get_mut(type_id);
+ type_entry.set(gimli::DW_AT_encoding, AttributeValue::Encoding(ate));
+ type_id
+ };
+
+ let name = format!("{}", ty);
+ let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
+
+ let type_id = match ty.kind() {
+ ty::Bool => primitive(&mut self.dwarf, gimli::DW_ATE_boolean),
+ ty::Char => primitive(&mut self.dwarf, gimli::DW_ATE_UTF),
+ ty::Uint(_) => primitive(&mut self.dwarf, gimli::DW_ATE_unsigned),
+ ty::Int(_) => primitive(&mut self.dwarf, gimli::DW_ATE_signed),
+ ty::Float(_) => primitive(&mut self.dwarf, gimli::DW_ATE_float),
+ ty::Ref(_, pointee_ty, _mutbl)
+ | ty::RawPtr(ty::TypeAndMut { ty: pointee_ty, mutbl: _mutbl }) => {
+ let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_pointer_type);
+
+ // Ensure that type is inserted before recursing to avoid duplicates
+ self.types.insert(ty, type_id);
+
+ let pointee = self.dwarf_ty(*pointee_ty);
+
+ let type_entry = self.dwarf.unit.get_mut(type_id);
+
+ //type_entry.set(gimli::DW_AT_mutable, AttributeValue::Flag(mutbl == rustc_hir::Mutability::Mut));
+ type_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(pointee));
+
+ type_id
+ }
+ ty::Adt(adt_def, _substs) if adt_def.is_struct() && !layout.is_unsized() => {
+ let type_id = new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type);
+
+ // Ensure that type is inserted before recursing to avoid duplicates
+ self.types.insert(ty, type_id);
+
+ let variant = adt_def.non_enum_variant();
+
+ for (field_idx, field_def) in variant.fields.iter().enumerate() {
+ let field_offset = layout.fields.offset(field_idx);
+ let field_layout = layout.field(
+ &layout::LayoutCx { tcx: self.tcx, param_env: ParamEnv::reveal_all() },
+ field_idx,
+ );
+
+ let field_type = self.dwarf_ty(field_layout.ty);
+
+ let field_id = self.dwarf.unit.add(type_id, gimli::DW_TAG_member);
+ let field_entry = self.dwarf.unit.get_mut(field_id);
+
+ field_entry.set(
+ gimli::DW_AT_name,
+ AttributeValue::String(field_def.name.as_str().to_string().into_bytes()),
+ );
+ field_entry.set(
+ gimli::DW_AT_data_member_location,
+ AttributeValue::Udata(field_offset.bytes()),
+ );
+ field_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(field_type));
+ }
+
+ type_id
+ }
+ _ => new_entry(&mut self.dwarf, gimli::DW_TAG_structure_type),
+ };
+
+ let type_entry = self.dwarf.unit.get_mut(type_id);
+
+ type_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+ type_entry.set(gimli::DW_AT_byte_size, AttributeValue::Udata(layout.size.bytes()));
+
+ self.types.insert(ty, type_id);
+
+ type_id
+ }
+
+ fn define_local(&mut self, scope: UnitEntryId, name: String, ty: Ty<'tcx>) -> UnitEntryId {
+ let dw_ty = self.dwarf_ty(ty);
+
+ let var_id = self.dwarf.unit.add(scope, gimli::DW_TAG_variable);
+ let var_entry = self.dwarf.unit.get_mut(var_id);
+
+ var_entry.set(gimli::DW_AT_name, AttributeValue::String(name.into_bytes()));
+ var_entry.set(gimli::DW_AT_type, AttributeValue::UnitRef(dw_ty));
+
+ var_id
+ }
+
+ pub(crate) fn define_function(
+ &mut self,
+ instance: Instance<'tcx>,
+ func_id: FuncId,
+ name: &str,
+ isa: &dyn TargetIsa,
+ context: &Context,
+ source_info_set: &indexmap::IndexSet<SourceInfo>,
+ local_map: IndexVec<mir::Local, CPlace<'tcx>>,
+ ) {
+ let symbol = func_id.as_u32() as usize;
+ let mir = self.tcx.instance_mir(instance.def);
+
+ // FIXME: add to appropriate scope instead of root
+ let scope = self.dwarf.unit.root();
+
+ let entry_id = self.dwarf.unit.add(scope, gimli::DW_TAG_subprogram);
+ let entry = self.dwarf.unit.get_mut(entry_id);
+ let name_id = self.dwarf.strings.add(name);
+ // Gdb requires DW_AT_name. Otherwise the DW_TAG_subprogram is skipped.
+ entry.set(gimli::DW_AT_name, AttributeValue::StringRef(name_id));
+ entry.set(gimli::DW_AT_linkage_name, AttributeValue::StringRef(name_id));
+
+ let end = self.create_debug_lines(symbol, entry_id, context, mir.span, source_info_set);
+
+ self.unit_range_list.0.push(Range::StartLength {
+ begin: Address::Symbol { symbol, addend: 0 },
+ length: u64::from(end),
+ });
+
+ let func_entry = self.dwarf.unit.get_mut(entry_id);
+ // Gdb requires both DW_AT_low_pc and DW_AT_high_pc. Otherwise the DW_TAG_subprogram is skipped.
+ func_entry.set(
+ gimli::DW_AT_low_pc,
+ AttributeValue::Address(Address::Symbol { symbol, addend: 0 }),
+ );
+ // Using Udata for DW_AT_high_pc requires at least DWARF4
+ func_entry.set(gimli::DW_AT_high_pc, AttributeValue::Udata(u64::from(end)));
+
+ // FIXME make it more reliable and implement scopes before re-enabling this.
+ if false {
+ let value_labels_ranges = std::collections::HashMap::new(); // FIXME
+
+ for (local, _local_decl) in mir.local_decls.iter_enumerated() {
+ let ty = self.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ mir.local_decls[local].ty,
+ );
+ let var_id = self.define_local(entry_id, format!("{:?}", local), ty);
+
+ let location = place_location(
+ self,
+ isa,
+ symbol,
+ &local_map,
+ &value_labels_ranges,
+ Place { local, projection: ty::List::empty() },
+ );
+
+ let var_entry = self.dwarf.unit.get_mut(var_id);
+ var_entry.set(gimli::DW_AT_location, location);
+ }
+ }
+
+ // FIXME create locals for all entries in mir.var_debug_info
+ }
+}
+
+fn place_location<'tcx>(
+ debug_context: &mut DebugContext<'tcx>,
+ isa: &dyn TargetIsa,
+ symbol: usize,
+ local_map: &IndexVec<mir::Local, CPlace<'tcx>>,
+ #[allow(rustc::default_hash_types)] value_labels_ranges: &std::collections::HashMap<
+ ValueLabel,
+ Vec<ValueLocRange>,
+ >,
+ place: Place<'tcx>,
+) -> AttributeValue {
+ assert!(place.projection.is_empty()); // FIXME implement them
+
+ match local_map[place.local].inner() {
+ CPlaceInner::Var(_local, var) => {
+ let value_label = cranelift_codegen::ir::ValueLabel::new(var.index());
+ if let Some(value_loc_ranges) = value_labels_ranges.get(&value_label) {
+ let loc_list = LocationList(
+ value_loc_ranges
+ .iter()
+ .map(|value_loc_range| Location::StartEnd {
+ begin: Address::Symbol {
+ symbol,
+ addend: i64::from(value_loc_range.start),
+ },
+ end: Address::Symbol { symbol, addend: i64::from(value_loc_range.end) },
+ data: translate_loc(isa, value_loc_range.loc).unwrap(),
+ })
+ .collect(),
+ );
+ let loc_list_id = debug_context.dwarf.unit.locations.add(loc_list);
+
+ AttributeValue::LocationListRef(loc_list_id)
+ } else {
+ // FIXME set value labels for unused locals
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ }
+ CPlaceInner::VarPair(_, _, _) => {
+ // FIXME implement this
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ CPlaceInner::VarLane(_, _, _) => {
+ // FIXME implement this
+
+ AttributeValue::Exprloc(Expression::new())
+ }
+ CPlaceInner::Addr(_, _) => {
+ // FIXME implement this (used by arguments and returns)
+
+ AttributeValue::Exprloc(Expression::new())
+
+ // For PointerBase::Stack:
+ //AttributeValue::Exprloc(translate_loc(ValueLoc::Stack(*stack_slot)).unwrap())
+ }
+ }
+}
+
+// Adapted from https://github.com/CraneStation/wasmtime/blob/5a1845b4caf7a5dba8eda1fef05213a532ed4259/crates/debug/src/transform/expression.rs#L59-L137
+fn translate_loc(isa: &dyn TargetIsa, loc: LabelValueLoc) -> Option<Expression> {
+ match loc {
+ LabelValueLoc::Reg(reg) => {
+ let machine_reg = isa.map_regalloc_reg_to_dwarf(reg).unwrap();
+ let mut expr = Expression::new();
+ expr.op_reg(gimli::Register(machine_reg));
+ Some(expr)
+ }
+ LabelValueLoc::SPOffset(offset) => {
+ let mut expr = Expression::new();
+ expr.op_breg(X86_64::RSP, offset);
+ Some(expr)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs
new file mode 100644
index 000000000..9dc9b2cf9
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/object.rs
@@ -0,0 +1,83 @@
+use rustc_data_structures::fx::FxHashMap;
+
+use cranelift_module::FuncId;
+use cranelift_object::ObjectProduct;
+
+use object::write::{Relocation, StandardSegment};
+use object::{RelocationEncoding, SectionKind};
+
+use gimli::SectionId;
+
+use crate::debuginfo::{DebugReloc, DebugRelocName};
+
+pub(super) trait WriteDebugInfo {
+ type SectionId: Copy;
+
+ fn add_debug_section(&mut self, name: SectionId, data: Vec<u8>) -> Self::SectionId;
+ fn add_debug_reloc(
+ &mut self,
+ section_map: &FxHashMap<SectionId, Self::SectionId>,
+ from: &Self::SectionId,
+ reloc: &DebugReloc,
+ );
+}
+
+impl WriteDebugInfo for ObjectProduct {
+ type SectionId = (object::write::SectionId, object::write::SymbolId);
+
+ fn add_debug_section(
+ &mut self,
+ id: SectionId,
+ data: Vec<u8>,
+ ) -> (object::write::SectionId, object::write::SymbolId) {
+ let name = if self.object.format() == object::BinaryFormat::MachO {
+ id.name().replace('.', "__") // machO expects __debug_info instead of .debug_info
+ } else {
+ id.name().to_string()
+ }
+ .into_bytes();
+
+ let segment = self.object.segment_name(StandardSegment::Debug).to_vec();
+ // FIXME use SHT_X86_64_UNWIND for .eh_frame
+ let section_id = self.object.add_section(
+ segment,
+ name,
+ if id == SectionId::EhFrame { SectionKind::ReadOnlyData } else { SectionKind::Debug },
+ );
+ self.object
+ .section_mut(section_id)
+ .set_data(data, if id == SectionId::EhFrame { 8 } else { 1 });
+ let symbol_id = self.object.section_symbol(section_id);
+ (section_id, symbol_id)
+ }
+
+ fn add_debug_reloc(
+ &mut self,
+ section_map: &FxHashMap<SectionId, Self::SectionId>,
+ from: &Self::SectionId,
+ reloc: &DebugReloc,
+ ) {
+ let (symbol, symbol_offset) = match reloc.name {
+ DebugRelocName::Section(id) => (section_map.get(&id).unwrap().1, 0),
+ DebugRelocName::Symbol(id) => {
+ let symbol_id = self.function_symbol(FuncId::from_u32(id.try_into().unwrap()));
+ self.object
+ .symbol_section_and_offset(symbol_id)
+ .expect("Debug reloc for undef sym???")
+ }
+ };
+ self.object
+ .add_relocation(
+ from.0,
+ Relocation {
+ offset: u64::from(reloc.offset),
+ symbol,
+ kind: reloc.kind,
+ encoding: RelocationEncoding::Generic,
+ size: reloc.size * 8,
+ addend: i64::try_from(symbol_offset).unwrap() + reloc.addend,
+ },
+ )
+ .unwrap();
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
new file mode 100644
index 000000000..d26392c49
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/unwind.rs
@@ -0,0 +1,136 @@
+//! Unwind info generation (`.eh_frame`)
+
+use crate::prelude::*;
+
+use cranelift_codegen::ir::Endianness;
+use cranelift_codegen::isa::{unwind::UnwindInfo, TargetIsa};
+
+use cranelift_object::ObjectProduct;
+use gimli::write::{Address, CieId, EhFrame, FrameTable, Section};
+use gimli::RunTimeEndian;
+
+use super::object::WriteDebugInfo;
+
+pub(crate) struct UnwindContext {
+ endian: RunTimeEndian,
+ frame_table: FrameTable,
+ cie_id: Option<CieId>,
+}
+
+impl UnwindContext {
+ pub(crate) fn new(isa: &dyn TargetIsa, pic_eh_frame: bool) -> Self {
+ let endian = match isa.endianness() {
+ Endianness::Little => RunTimeEndian::Little,
+ Endianness::Big => RunTimeEndian::Big,
+ };
+ let mut frame_table = FrameTable::default();
+
+ let cie_id = if let Some(mut cie) = isa.create_systemv_cie() {
+ if pic_eh_frame {
+ cie.fde_address_encoding =
+ gimli::DwEhPe(gimli::DW_EH_PE_pcrel.0 | gimli::DW_EH_PE_sdata4.0);
+ }
+ Some(frame_table.add_cie(cie))
+ } else {
+ None
+ };
+
+ UnwindContext { endian, frame_table, cie_id }
+ }
+
+ pub(crate) fn add_function(&mut self, func_id: FuncId, context: &Context, isa: &dyn TargetIsa) {
+ let unwind_info = if let Some(unwind_info) = context.create_unwind_info(isa).unwrap() {
+ unwind_info
+ } else {
+ return;
+ };
+
+ match unwind_info {
+ UnwindInfo::SystemV(unwind_info) => {
+ self.frame_table.add_fde(
+ self.cie_id.unwrap(),
+ unwind_info
+ .to_fde(Address::Symbol { symbol: func_id.as_u32() as usize, addend: 0 }),
+ );
+ }
+ UnwindInfo::WindowsX64(_) => {
+ // FIXME implement this
+ }
+ unwind_info => unimplemented!("{:?}", unwind_info),
+ }
+ }
+
+ pub(crate) fn emit(self, product: &mut ObjectProduct) {
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
+ self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+ if !eh_frame.0.writer.slice().is_empty() {
+ let id = eh_frame.id();
+ let section_id = product.add_debug_section(id, eh_frame.0.writer.into_vec());
+ let mut section_map = FxHashMap::default();
+ section_map.insert(id, section_id);
+
+ for reloc in &eh_frame.0.relocs {
+ product.add_debug_reloc(&section_map, &section_id, reloc);
+ }
+ }
+ }
+
+ #[cfg(all(feature = "jit", windows))]
+ pub(crate) unsafe fn register_jit(self, _jit_module: &cranelift_jit::JITModule) {}
+
+ #[cfg(all(feature = "jit", not(windows)))]
+ pub(crate) unsafe fn register_jit(self, jit_module: &cranelift_jit::JITModule) {
+ use std::mem::ManuallyDrop;
+
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(self.endian));
+ self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
+
+ if eh_frame.0.writer.slice().is_empty() {
+ return;
+ }
+
+ let mut eh_frame = eh_frame.0.relocate_for_jit(jit_module);
+
+ // GCC expects a terminating "empty" length, so write a 0 length at the end of the table.
+ eh_frame.extend(&[0, 0, 0, 0]);
+
+ // FIXME support unregistering unwind tables once cranelift-jit supports deallocating
+ // individual functions
+ let eh_frame = ManuallyDrop::new(eh_frame);
+
+ // =======================================================================
+ // Everything after this line up to the end of the file is loosely based on
+ // https://github.com/bytecodealliance/wasmtime/blob/4471a82b0c540ff48960eca6757ccce5b1b5c3e4/crates/jit/src/unwind/systemv.rs
+ #[cfg(target_os = "macos")]
+ {
+ // On macOS, `__register_frame` takes a pointer to a single FDE
+ let start = eh_frame.as_ptr();
+ let end = start.add(eh_frame.len());
+ let mut current = start;
+
+ // Walk all of the entries in the frame table and register them
+ while current < end {
+ let len = std::ptr::read::<u32>(current as *const u32) as usize;
+
+ // Skip over the CIE
+ if current != start {
+ __register_frame(current);
+ }
+
+ // Move to the next table entry (+4 because the length itself is not inclusive)
+ current = current.add(len + 4);
+ }
+ }
+ #[cfg(not(target_os = "macos"))]
+ {
+ // On other platforms, `__register_frame` will walk the FDEs until an entry of length 0
+ __register_frame(eh_frame.as_ptr());
+ }
+ }
+}
+
+extern "C" {
+ // libunwind import
+ fn __register_frame(fde: *const u8);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs
new file mode 100644
index 000000000..f619bb5ed
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs
@@ -0,0 +1,176 @@
+//! Handling of enum discriminants
+//!
+//! Adapted from <https://github.com/rust-lang/rust/blob/d760df5aea483aae041c9a241e7acacf48f75035/src/librustc_codegen_ssa/mir/place.rs>
+
+use rustc_target::abi::{Int, TagEncoding, Variants};
+
+use crate::prelude::*;
+
+pub(crate) fn codegen_set_discriminant<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ place: CPlace<'tcx>,
+ variant_index: VariantIdx,
+) {
+ let layout = place.layout();
+ if layout.for_variant(fx, variant_index).abi.is_uninhabited() {
+ return;
+ }
+ match layout.variants {
+ Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ Variants::Multiple {
+ tag: _,
+ tag_field,
+ tag_encoding: TagEncoding::Direct,
+ variants: _,
+ } => {
+ let ptr = place.place_field(fx, mir::Field::new(tag_field));
+ let to = layout.ty.discriminant_for_variant(fx.tcx, variant_index).unwrap().val;
+ let to = if ptr.layout().abi.is_signed() {
+ ty::ScalarInt::try_from_int(
+ ptr.layout().size.sign_extend(to) as i128,
+ ptr.layout().size,
+ )
+ .unwrap()
+ } else {
+ ty::ScalarInt::try_from_uint(to, ptr.layout().size).unwrap()
+ };
+ let discr = CValue::const_val(fx, ptr.layout(), to);
+ ptr.write_cvalue(fx, discr);
+ }
+ Variants::Multiple {
+ tag: _,
+ tag_field,
+ tag_encoding: TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ variants: _,
+ } => {
+ if variant_index != dataful_variant {
+ let niche = place.place_field(fx, mir::Field::new(tag_field));
+ let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+ let niche_value = ty::ScalarInt::try_from_uint(
+ u128::from(niche_value).wrapping_add(niche_start),
+ niche.layout().size,
+ )
+ .unwrap();
+ let niche_llval = CValue::const_val(fx, niche.layout(), niche_value);
+ niche.write_cvalue(fx, niche_llval);
+ }
+ }
+ }
+}
+
+pub(crate) fn codegen_get_discriminant<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ value: CValue<'tcx>,
+ dest_layout: TyAndLayout<'tcx>,
+) -> CValue<'tcx> {
+ let layout = value.layout();
+
+ if layout.abi == Abi::Uninhabited {
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::UnreachableCodeReached);
+ // Return a dummy value
+ return CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout);
+ }
+
+ let (tag_scalar, tag_field, tag_encoding) = match &layout.variants {
+ Variants::Single { index } => {
+ let discr_val = layout
+ .ty
+ .discriminant_for_variant(fx.tcx, *index)
+ .map_or(u128::from(index.as_u32()), |discr| discr.val);
+ let discr_val = if dest_layout.abi.is_signed() {
+ ty::ScalarInt::try_from_int(
+ dest_layout.size.sign_extend(discr_val) as i128,
+ dest_layout.size,
+ )
+ .unwrap()
+ } else {
+ ty::ScalarInt::try_from_uint(discr_val, dest_layout.size).unwrap()
+ };
+ return CValue::const_val(fx, dest_layout, discr_val);
+ }
+ Variants::Multiple { tag, tag_field, tag_encoding, variants: _ } => {
+ (tag, *tag_field, tag_encoding)
+ }
+ };
+
+ let cast_to = fx.clif_type(dest_layout.ty).unwrap();
+
+ // Read the tag/niche-encoded discriminant from memory.
+ let tag = value.value_field(fx, mir::Field::new(tag_field));
+ let tag = tag.load_scalar(fx);
+
+ // Decode the discriminant (specifically if it's niche-encoded).
+ match *tag_encoding {
+ TagEncoding::Direct => {
+ let signed = match tag_scalar.primitive() {
+ Int(_, signed) => signed,
+ _ => false,
+ };
+ let val = clif_intcast(fx, tag, cast_to, signed);
+ CValue::by_val(val, dest_layout)
+ }
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ // Rebase from niche values to discriminants, and check
+ // whether the result is in range for the niche variants.
+
+ // We first compute the "relative discriminant" (wrt `niche_variants`),
+ // that is, if `n = niche_variants.end() - niche_variants.start()`,
+ // we remap `niche_start..=niche_start + n` (which may wrap around)
+ // to (non-wrap-around) `0..=n`, to be able to check whether the
+ // discriminant corresponds to a niche variant with one comparison.
+ // We also can't go directly to the (variant index) discriminant
+ // and check that it is in the range `niche_variants`, because
+ // that might not fit in the same type, on top of needing an extra
+ // comparison (see also the comment on `let niche_discr`).
+ let relative_discr = if niche_start == 0 {
+ tag
+ } else {
+ let niche_start = match fx.bcx.func.dfg.value_type(tag) {
+ types::I128 => {
+ let lsb = fx.bcx.ins().iconst(types::I64, niche_start as u64 as i64);
+ let msb =
+ fx.bcx.ins().iconst(types::I64, (niche_start >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ ty => fx.bcx.ins().iconst(ty, niche_start as i64),
+ };
+ fx.bcx.ins().isub(tag, niche_start)
+ };
+ let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+ let is_niche = {
+ codegen_icmp_imm(
+ fx,
+ IntCC::UnsignedLessThanOrEqual,
+ relative_discr,
+ i128::from(relative_max),
+ )
+ };
+
+ // NOTE(eddyb) this addition needs to be performed on the final
+ // type, in case the niche itself can't represent all variant
+ // indices (e.g. `u8` niche with more than `256` variants,
+ // but enough uninhabited variants so that the remaining variants
+ // fit in the niche).
+ // In other words, `niche_variants.end - niche_variants.start`
+ // is representable in the niche, but `niche_variants.end`
+ // might not be, in extreme cases.
+ let niche_discr = {
+ let relative_discr = if relative_max == 0 {
+ // HACK(eddyb) since we have only one niche, we know which
+ // one it is, and we can avoid having a dynamic value here.
+ fx.bcx.ins().iconst(cast_to, 0)
+ } else {
+ clif_intcast(fx, relative_discr, cast_to, false)
+ };
+ fx.bcx.ins().iadd_imm(relative_discr, i64::from(niche_variants.start().as_u32()))
+ };
+
+ let dataful_variant = fx.bcx.ins().iconst(cast_to, i64::from(dataful_variant.as_u32()));
+ let discr = fx.bcx.ins().select(is_niche, niche_discr, dataful_variant);
+ CValue::by_val(discr, dest_layout)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
new file mode 100644
index 000000000..3cd1ef563
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -0,0 +1,436 @@
+//! The AOT driver uses [`cranelift_object`] to write object files suitable for linking into a
+//! standalone executable.
+
+use std::path::PathBuf;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::back::metadata::create_compressed_metadata_file;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::mir::mono::{CodegenUnit, MonoItem};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{DebugInfo, OutputType};
+use rustc_session::Session;
+
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_object::{ObjectBuilder, ObjectModule};
+
+use crate::{prelude::*, BackendConfig};
+
+struct ModuleCodegenResult(CompiledModule, Option<(WorkProductId, WorkProduct)>);
+
+impl<HCX> HashStable<HCX> for ModuleCodegenResult {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+fn make_module(sess: &Session, isa: Box<dyn TargetIsa>, name: String) -> ObjectModule {
+ let mut builder =
+ ObjectBuilder::new(isa, name + ".o", cranelift_module::default_libcall_names()).unwrap();
+ // Unlike cg_llvm, cg_clif defaults to disabling -Zfunction-sections. For cg_llvm binary size
+ // is important, while cg_clif cares more about compilation times. Enabling -Zfunction-sections
+ // can easily double the amount of time necessary to perform linking.
+ builder.per_function_section(sess.opts.unstable_opts.function_sections.unwrap_or(false));
+ ObjectModule::new(builder)
+}
+
+fn emit_module(
+ tcx: TyCtxt<'_>,
+ backend_config: &BackendConfig,
+ name: String,
+ kind: ModuleKind,
+ module: ObjectModule,
+ debug: Option<DebugContext<'_>>,
+ unwind_context: UnwindContext,
+) -> ModuleCodegenResult {
+ let mut product = module.finish();
+
+ if let Some(mut debug) = debug {
+ debug.emit(&mut product);
+ }
+
+ unwind_context.emit(&mut product);
+
+ let tmp_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(&name));
+ let obj = product.object.write().unwrap();
+
+ tcx.sess.prof.artifact_size("object_file", name.clone(), obj.len().try_into().unwrap());
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess.fatal(&format!("error writing object file: {}", err));
+ }
+
+ let work_product = if backend_config.disable_incr_cache {
+ None
+ } else {
+ rustc_incremental::copy_cgu_workproduct_to_incr_comp_cache_dir(
+ tcx.sess,
+ &name,
+ &[("o", &tmp_file)],
+ )
+ };
+
+ ModuleCodegenResult(
+ CompiledModule { name, kind, object: Some(tmp_file), dwarf_object: None, bytecode: None },
+ work_product,
+ )
+}
+
+fn reuse_workproduct_for_cgu(
+ tcx: TyCtxt<'_>,
+ cgu: &CodegenUnit<'_>,
+ work_products: &mut FxHashMap<WorkProductId, WorkProduct>,
+) -> CompiledModule {
+ let work_product = cgu.previous_work_product(tcx);
+ let obj_out = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu.name().as_str()));
+ let source_file = rustc_incremental::in_incr_comp_dir_sess(
+ &tcx.sess,
+ &work_product.saved_files.get("o").expect("no saved object file in work product"),
+ );
+ if let Err(err) = rustc_fs_util::link_or_copy(&source_file, &obj_out) {
+ tcx.sess.err(&format!(
+ "unable to copy {} to {}: {}",
+ source_file.display(),
+ obj_out.display(),
+ err
+ ));
+ }
+
+ work_products.insert(cgu.work_product_id(), work_product);
+
+ CompiledModule {
+ name: cgu.name().to_string(),
+ kind: ModuleKind::Regular,
+ object: Some(obj_out),
+ dwarf_object: None,
+ bytecode: None,
+ }
+}
+
+fn module_codegen(
+ tcx: TyCtxt<'_>,
+ (backend_config, cgu_name): (BackendConfig, rustc_span::Symbol),
+) -> ModuleCodegenResult {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+
+ let isa = crate::build_isa(tcx.sess, &backend_config);
+ let mut module = make_module(tcx.sess, isa, cgu_name.as_str().to_string());
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ module.isa(),
+ tcx.sess.opts.debuginfo != DebugInfo::None,
+ cgu_name,
+ );
+ super::predefine_mono_items(tcx, &mut module, &mono_items);
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => {
+ cx.tcx
+ .sess
+ .time("codegen fn", || crate::base::codegen_fn(&mut cx, &mut module, inst));
+ }
+ MonoItem::Static(def_id) => crate::constant::codegen_static(tcx, &mut module, def_id),
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx.hir().item(item_id);
+ if let rustc_hir::ItemKind::GlobalAsm(asm) = item.kind {
+ if !asm.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ cx.global_asm.push_str("\n.intel_syntax noprefix\n");
+ } else {
+ cx.global_asm.push_str("\n.att_syntax\n");
+ }
+ for piece in asm.template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => cx.global_asm.push_str(s),
+ InlineAsmTemplatePiece::Placeholder { .. } => todo!(),
+ }
+ }
+ cx.global_asm.push_str("\n.att_syntax\n\n");
+ } else {
+ bug!("Expected GlobalAsm found {:?}", item);
+ }
+ }
+ }
+ }
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut module,
+ &mut cx.unwind_context,
+ false,
+ cgu.is_primary(),
+ );
+
+ let debug_context = cx.debug_context;
+ let unwind_context = cx.unwind_context;
+ let codegen_result = tcx.sess.time("write object file", || {
+ emit_module(
+ tcx,
+ &backend_config,
+ cgu.name().as_str().to_string(),
+ ModuleKind::Regular,
+ module,
+ debug_context,
+ unwind_context,
+ )
+ });
+
+ codegen_global_asm(tcx, cgu.name().as_str(), &cx.global_asm);
+
+ codegen_result
+}
+
+pub(crate) fn run_aot(
+ tcx: TyCtxt<'_>,
+ backend_config: BackendConfig,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> Box<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)> {
+ let mut work_products = FxHashMap::default();
+
+ let cgus = if tcx.sess.opts.output_types.should_codegen() {
+ tcx.collect_and_partition_mono_items(()).1
+ } else {
+ // If only `--emit metadata` is used, we shouldn't perform any codegen.
+ // Also `tcx.collect_and_partition_mono_items` may panic in that case.
+ &[]
+ };
+
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in &*cgus {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let modules = super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+ cgus.iter()
+ .map(|cgu| {
+ let cgu_reuse = determine_cgu_reuse(tcx, cgu);
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ _ if backend_config.disable_incr_cache => {}
+ CguReuse::No => {}
+ CguReuse::PreLto => {
+ return reuse_workproduct_for_cgu(tcx, &*cgu, &mut work_products);
+ }
+ CguReuse::PostLto => unreachable!(),
+ }
+
+ let dep_node = cgu.codegen_dep_node(tcx);
+ let (ModuleCodegenResult(module, work_product), _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ (backend_config.clone(), cgu.name()),
+ module_codegen,
+ Some(rustc_middle::dep_graph::hash_result),
+ );
+
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+
+ module
+ })
+ .collect::<Vec<_>>()
+ });
+
+ tcx.sess.abort_if_errors();
+
+ let isa = crate::build_isa(tcx.sess, &backend_config);
+ let mut allocator_module = make_module(tcx.sess, isa, "allocator_shim".to_string());
+ assert_eq!(pointer_ty(tcx), allocator_module.target_config().pointer_type());
+ let mut allocator_unwind_context = UnwindContext::new(allocator_module.isa(), true);
+ let created_alloc_shim =
+ crate::allocator::codegen(tcx, &mut allocator_module, &mut allocator_unwind_context);
+
+ let allocator_module = if created_alloc_shim {
+ let ModuleCodegenResult(module, work_product) = emit_module(
+ tcx,
+ &backend_config,
+ "allocator_shim".to_string(),
+ ModuleKind::Allocator,
+ allocator_module,
+ None,
+ allocator_unwind_context,
+ );
+ if let Some((id, product)) = work_product {
+ work_products.insert(id, product);
+ }
+ Some(module)
+ } else {
+ None
+ };
+
+ let metadata_module = if need_metadata_module {
+ let _timer = tcx.prof.generic_activity("codegen crate metadata");
+ let (metadata_cgu_name, tmp_file) = tcx.sess.time("write compressed metadata", || {
+ use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+ let metadata_cgu_name = cgu_name_builder
+ .build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata"))
+ .as_str()
+ .to_string();
+
+ let tmp_file =
+ tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+
+ let symbol_name = rustc_middle::middle::exported_symbols::metadata_symbol_name(tcx);
+ let obj = create_compressed_metadata_file(tcx.sess, &metadata, &symbol_name);
+
+ if let Err(err) = std::fs::write(&tmp_file, obj) {
+ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ }
+
+ (metadata_cgu_name, tmp_file)
+ });
+
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(tmp_file),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ } else {
+ None
+ };
+
+ // FIXME handle `-Ctarget-cpu=native`
+ let target_cpu = match tcx.sess.opts.cg.target_cpu {
+ Some(ref name) => name,
+ None => tcx.sess.target.cpu.as_ref(),
+ }
+ .to_owned();
+
+ Box::new((
+ CodegenResults {
+ modules,
+ allocator_module,
+ metadata_module,
+ metadata,
+ crate_info: CrateInfo::new(tcx, target_cpu),
+ },
+ work_products,
+ ))
+}
+
+fn codegen_global_asm(tcx: TyCtxt<'_>, cgu_name: &str, global_asm: &str) {
+ use std::io::Write;
+ use std::process::{Command, Stdio};
+
+ if global_asm.is_empty() {
+ return;
+ }
+
+ if cfg!(not(feature = "inline_asm"))
+ || tcx.sess.target.is_like_osx
+ || tcx.sess.target.is_like_windows
+ {
+ if global_asm.contains("__rust_probestack") {
+ return;
+ }
+
+ // FIXME fix linker error on macOS
+ if cfg!(not(feature = "inline_asm")) {
+ tcx.sess.fatal(
+ "asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift",
+ );
+ } else {
+ tcx.sess.fatal("asm! and global_asm! are not yet supported on macOS and Windows");
+ }
+ }
+
+ let assembler = crate::toolchain::get_toolchain_binary(tcx.sess, "as");
+ let linker = crate::toolchain::get_toolchain_binary(tcx.sess, "ld");
+
+ // Remove all LLVM style comments
+ let global_asm = global_asm
+ .lines()
+ .map(|line| if let Some(index) = line.find("//") { &line[0..index] } else { line })
+ .collect::<Vec<_>>()
+ .join("\n");
+
+ let output_object_file = tcx.output_filenames(()).temp_path(OutputType::Object, Some(cgu_name));
+
+ // Assemble `global_asm`
+ let global_asm_object_file = add_file_stem_postfix(output_object_file.clone(), ".asm");
+ let mut child = Command::new(assembler)
+ .arg("-o")
+ .arg(&global_asm_object_file)
+ .stdin(Stdio::piped())
+ .spawn()
+ .expect("Failed to spawn `as`.");
+ child.stdin.take().unwrap().write_all(global_asm.as_bytes()).unwrap();
+ let status = child.wait().expect("Failed to wait for `as`.");
+ if !status.success() {
+ tcx.sess.fatal(&format!("Failed to assemble `{}`", global_asm));
+ }
+
+ // Link the global asm and main object file together
+ let main_object_file = add_file_stem_postfix(output_object_file.clone(), ".main");
+ std::fs::rename(&output_object_file, &main_object_file).unwrap();
+ let status = Command::new(linker)
+ .arg("-r") // Create a new object file
+ .arg("-o")
+ .arg(output_object_file)
+ .arg(&main_object_file)
+ .arg(&global_asm_object_file)
+ .status()
+ .unwrap();
+ if !status.success() {
+ tcx.sess.fatal(&format!(
+ "Failed to link `{}` and `{}` together",
+ main_object_file.display(),
+ global_asm_object_file.display(),
+ ));
+ }
+
+ std::fs::remove_file(global_asm_object_file).unwrap();
+ std::fs::remove_file(main_object_file).unwrap();
+}
+
+fn add_file_stem_postfix(mut path: PathBuf, postfix: &str) -> PathBuf {
+ let mut new_filename = path.file_stem().unwrap().to_owned();
+ new_filename.push(postfix);
+ if let Some(extension) = path.extension() {
+ new_filename.push(".");
+ new_filename.push(extension);
+ }
+ path.set_file_name(new_filename);
+ path
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/303d8aff6092709edd4dbd35b1c88e9aa40bf6d8/src/librustc_codegen_ssa/base.rs#L922-L953
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
+ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.try_mark_green(&dep_node) { CguReuse::PreLto } else { CguReuse::No }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
new file mode 100644
index 000000000..a56a91000
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -0,0 +1,385 @@
+//! The JIT driver uses [`cranelift_jit`] to JIT execute programs without writing any object
+//! files.
+
+use std::cell::RefCell;
+use std::ffi::CString;
+use std::os::raw::{c_char, c_int};
+use std::sync::{mpsc, Mutex};
+
+use rustc_codegen_ssa::CrateInfo;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_session::Session;
+use rustc_span::Symbol;
+
+use cranelift_jit::{JITBuilder, JITModule};
+
+// FIXME use std::sync::OnceLock once it stabilizes
+use once_cell::sync::OnceCell;
+
+use crate::{prelude::*, BackendConfig};
+use crate::{CodegenCx, CodegenMode};
+
+struct JitState {
+ backend_config: BackendConfig,
+ jit_module: JITModule,
+}
+
+thread_local! {
+ static LAZY_JIT_STATE: RefCell<Option<JitState>> = const { RefCell::new(None) };
+}
+
+/// The Sender owned by the rustc thread
+static GLOBAL_MESSAGE_SENDER: OnceCell<Mutex<mpsc::Sender<UnsafeMessage>>> = OnceCell::new();
+
+/// A message that is sent from the jitted runtime to the rustc thread.
+/// Senders are responsible for upholding `Send` semantics.
+enum UnsafeMessage {
+ /// Request that the specified `Instance` be lazily jitted.
+ ///
+ /// Nothing accessible through `instance_ptr` may be moved or mutated by the sender after
+ /// this message is sent.
+ JitFn {
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+ tx: mpsc::Sender<*const u8>,
+ },
+}
+unsafe impl Send for UnsafeMessage {}
+
+impl UnsafeMessage {
+ /// Send the message.
+ fn send(self) -> Result<(), mpsc::SendError<UnsafeMessage>> {
+ thread_local! {
+ /// The Sender owned by the local thread
+ static LOCAL_MESSAGE_SENDER: mpsc::Sender<UnsafeMessage> =
+ GLOBAL_MESSAGE_SENDER
+ .get().unwrap()
+ .lock().unwrap()
+ .clone();
+ }
+ LOCAL_MESSAGE_SENDER.with(|sender| sender.send(self))
+ }
+}
+
+fn create_jit_module<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ backend_config: &BackendConfig,
+ hotswap: bool,
+) -> (JITModule, CodegenCx<'tcx>) {
+ let crate_info = CrateInfo::new(tcx, "dummy_target_cpu".to_string());
+ let imported_symbols = load_imported_symbols_for_jit(tcx.sess, crate_info);
+
+ let isa = crate::build_isa(tcx.sess, backend_config);
+ let mut jit_builder = JITBuilder::with_isa(isa, cranelift_module::default_libcall_names());
+ jit_builder.hotswap(hotswap);
+ crate::compiler_builtins::register_functions_for_jit(&mut jit_builder);
+ jit_builder.symbols(imported_symbols);
+ jit_builder.symbol("__clif_jit_fn", clif_jit_fn as *const u8);
+ let mut jit_module = JITModule::new(jit_builder);
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config.clone(),
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+
+ crate::allocator::codegen(tcx, &mut jit_module, &mut cx.unwind_context);
+ crate::main_shim::maybe_create_entry_wrapper(
+ tcx,
+ &mut jit_module,
+ &mut cx.unwind_context,
+ true,
+ true,
+ );
+
+ (jit_module, cx)
+}
+
+pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
+ if !tcx.sess.opts.output_types.should_codegen() {
+ tcx.sess.fatal("JIT mode doesn't work with `cargo check`");
+ }
+
+ if !tcx.sess.crate_types().contains(&rustc_session::config::CrateType::Executable) {
+ tcx.sess.fatal("can't jit non-executable crate");
+ }
+
+ let (mut jit_module, mut cx) = create_jit_module(
+ tcx,
+ &backend_config,
+ matches!(backend_config.codegen_mode, CodegenMode::JitLazy),
+ );
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(());
+ let mono_items = cgus
+ .iter()
+ .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
+ .flatten()
+ .collect::<FxHashMap<_, (_, _)>>()
+ .into_iter()
+ .collect::<Vec<(_, (_, _))>>();
+
+ super::time(tcx, backend_config.display_cg_time, "codegen mono items", || {
+ super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
+ for (mono_item, _) in mono_items {
+ match mono_item {
+ MonoItem::Fn(inst) => match backend_config.codegen_mode {
+ CodegenMode::Aot => unreachable!(),
+ CodegenMode::Jit => {
+ cx.tcx.sess.time("codegen fn", || {
+ crate::base::codegen_fn(&mut cx, &mut jit_module, inst)
+ });
+ }
+ CodegenMode::JitLazy => codegen_shim(&mut cx, &mut jit_module, inst),
+ },
+ MonoItem::Static(def_id) => {
+ crate::constant::codegen_static(tcx, &mut jit_module, def_id);
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = tcx.hir().item(item_id);
+ tcx.sess.span_fatal(item.span, "Global asm is not supported in JIT mode");
+ }
+ }
+ }
+ });
+
+ if !cx.global_asm.is_empty() {
+ tcx.sess.fatal("Inline asm is not supported in JIT mode");
+ }
+
+ tcx.sess.abort_if_errors();
+
+ jit_module.finalize_definitions();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+
+ println!(
+ "Rustc codegen cranelift will JIT run the executable, because -Cllvm-args=mode=jit was passed"
+ );
+
+ let args = std::iter::once(&*tcx.crate_name(LOCAL_CRATE).as_str().to_string())
+ .chain(backend_config.jit_args.iter().map(|arg| &**arg))
+ .map(|arg| CString::new(arg).unwrap())
+ .collect::<Vec<_>>();
+
+ let start_sig = Signature {
+ params: vec![
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ AbiParam::new(jit_module.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(jit_module.target_config().pointer_type() /*isize*/)],
+ call_conv: jit_module.target_config().default_call_conv,
+ };
+ let start_func_id = jit_module.declare_function("main", Linkage::Import, &start_sig).unwrap();
+ let finalized_start: *const u8 = jit_module.get_finalized_function(start_func_id);
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ assert!(lazy_jit_state.is_none());
+ *lazy_jit_state = Some(JitState { backend_config, jit_module });
+ });
+
+ let f: extern "C" fn(c_int, *const *const c_char) -> c_int =
+ unsafe { ::std::mem::transmute(finalized_start) };
+
+ let (tx, rx) = mpsc::channel();
+ GLOBAL_MESSAGE_SENDER.set(Mutex::new(tx)).unwrap();
+
+ // Spawn the jitted runtime in a new thread so that this rustc thread can handle messages
+ // (eg to lazily JIT further functions as required)
+ std::thread::spawn(move || {
+ let mut argv = args.iter().map(|arg| arg.as_ptr()).collect::<Vec<_>>();
+
+ // Push a null pointer as a terminating argument. This is required by POSIX and
+ // useful as some dynamic linkers use it as a marker to jump over.
+ argv.push(std::ptr::null());
+
+ let ret = f(args.len() as c_int, argv.as_ptr());
+ std::process::exit(ret);
+ });
+
+ // Handle messages
+ loop {
+ match rx.recv().unwrap() {
+ // lazy JIT compilation request - compile requested instance and return pointer to result
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx } => {
+ tx.send(jit_fn(instance_ptr, trampoline_ptr))
+ .expect("jitted runtime hung up before response to lazy JIT request was sent");
+ }
+ }
+ }
+}
+
+extern "C" fn clif_jit_fn(
+ instance_ptr: *const Instance<'static>,
+ trampoline_ptr: *const u8,
+) -> *const u8 {
+ // send the JIT request to the rustc thread, with a channel for the response
+ let (tx, rx) = mpsc::channel();
+ UnsafeMessage::JitFn { instance_ptr, trampoline_ptr, tx }
+ .send()
+ .expect("rustc thread hung up before lazy JIT request was sent");
+
+ // block on JIT compilation result
+ rx.recv().expect("rustc thread hung up before responding to sent lazy JIT request")
+}
+
+fn jit_fn(instance_ptr: *const Instance<'static>, trampoline_ptr: *const u8) -> *const u8 {
+ rustc_middle::ty::tls::with(|tcx| {
+ // lift is used to ensure the correct lifetime for instance.
+ let instance = tcx.lift(unsafe { *instance_ptr }).unwrap();
+
+ LAZY_JIT_STATE.with(|lazy_jit_state| {
+ let mut lazy_jit_state = lazy_jit_state.borrow_mut();
+ let lazy_jit_state = lazy_jit_state.as_mut().unwrap();
+ let jit_module = &mut lazy_jit_state.jit_module;
+ let backend_config = lazy_jit_state.backend_config.clone();
+
+ let name = tcx.symbol_name(instance).name;
+ let sig = crate::abi::get_function_sig(tcx, jit_module.isa().triple(), instance);
+ let func_id = jit_module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let current_ptr = jit_module.read_got_entry(func_id);
+
+ // If the function's GOT entry has already been updated to point at something other
+ // than the shim trampoline, don't re-jit but just return the new pointer instead.
+ // This does not need synchronization as this code is executed only by a sole rustc
+ // thread.
+ if current_ptr != trampoline_ptr {
+ return current_ptr;
+ }
+
+ jit_module.prepare_for_function_redefine(func_id).unwrap();
+
+ let mut cx = crate::CodegenCx::new(
+ tcx,
+ backend_config,
+ jit_module.isa(),
+ false,
+ Symbol::intern("dummy_cgu_name"),
+ );
+ tcx.sess.time("codegen fn", || crate::base::codegen_fn(&mut cx, jit_module, instance));
+
+ assert!(cx.global_asm.is_empty());
+ jit_module.finalize_definitions();
+ unsafe { cx.unwind_context.register_jit(&jit_module) };
+ jit_module.get_finalized_function(func_id)
+ })
+ })
+}
+
+fn load_imported_symbols_for_jit(
+ sess: &Session,
+ crate_info: CrateInfo,
+) -> Vec<(String, *const u8)> {
+ use rustc_middle::middle::dependency_format::Linkage;
+
+ let mut dylib_paths = Vec::new();
+
+ let data = &crate_info
+ .dependency_formats
+ .iter()
+ .find(|(crate_type, _data)| *crate_type == rustc_session::config::CrateType::Executable)
+ .unwrap()
+ .1;
+ for &cnum in &crate_info.used_crates {
+ let src = &crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ let name = crate_info.crate_name[&cnum];
+ let mut err = sess.struct_err(&format!("Can't load static lib {}", name));
+ err.note("rustc_codegen_cranelift can only load dylibs in JIT mode.");
+ err.emit();
+ }
+ Linkage::Dynamic => {
+ dylib_paths.push(src.dylib.as_ref().unwrap().0.clone());
+ }
+ }
+ }
+
+ let mut imported_symbols = Vec::new();
+ for path in dylib_paths {
+ use object::{Object, ObjectSymbol};
+ let lib = libloading::Library::new(&path).unwrap();
+ let obj = std::fs::read(path).unwrap();
+ let obj = object::File::parse(&*obj).unwrap();
+ imported_symbols.extend(obj.dynamic_symbols().filter_map(|symbol| {
+ let name = symbol.name().unwrap().to_string();
+ if name.is_empty() || !symbol.is_global() || symbol.is_undefined() {
+ return None;
+ }
+ if name.starts_with("rust_metadata_") {
+ // The metadata is part of a section that is not loaded by the dynamic linker in
+ // case of cg_llvm.
+ return None;
+ }
+ let dlsym_name = if cfg!(target_os = "macos") {
+ // On macOS `dlsym` expects the name without leading `_`.
+ assert!(name.starts_with('_'), "{:?}", name);
+ &name[1..]
+ } else {
+ &name
+ };
+ let symbol: libloading::Symbol<'_, *const u8> =
+ unsafe { lib.get(dlsym_name.as_bytes()) }.unwrap();
+ Some((name, *symbol))
+ }));
+ std::mem::forget(lib)
+ }
+
+ sess.abort_if_errors();
+
+ imported_symbols
+}
+
+fn codegen_shim<'tcx>(cx: &mut CodegenCx<'tcx>, module: &mut JITModule, inst: Instance<'tcx>) {
+ let tcx = cx.tcx;
+
+ let pointer_type = module.target_config().pointer_type();
+
+ let name = tcx.symbol_name(inst).name;
+ let sig = crate::abi::get_function_sig(tcx, module.isa().triple(), inst);
+ let func_id = module.declare_function(name, Linkage::Export, &sig).unwrap();
+
+ let instance_ptr = Box::into_raw(Box::new(inst));
+
+ let jit_fn = module
+ .declare_function(
+ "__clif_jit_fn",
+ Linkage::Import,
+ &Signature {
+ call_conv: module.target_config().default_call_conv,
+ params: vec![AbiParam::new(pointer_type), AbiParam::new(pointer_type)],
+ returns: vec![AbiParam::new(pointer_type)],
+ },
+ )
+ .unwrap();
+
+ cx.cached_context.clear();
+ let trampoline = &mut cx.cached_context.func;
+ trampoline.signature = sig.clone();
+
+ let mut builder_ctx = FunctionBuilderContext::new();
+ let mut trampoline_builder = FunctionBuilder::new(trampoline, &mut builder_ctx);
+
+ let trampoline_fn = module.declare_func_in_func(func_id, trampoline_builder.func);
+ let jit_fn = module.declare_func_in_func(jit_fn, trampoline_builder.func);
+ let sig_ref = trampoline_builder.func.import_signature(sig);
+
+ let entry_block = trampoline_builder.create_block();
+ trampoline_builder.append_block_params_for_function_params(entry_block);
+ let fn_args = trampoline_builder.func.dfg.block_params(entry_block).to_vec();
+
+ trampoline_builder.switch_to_block(entry_block);
+ let instance_ptr = trampoline_builder.ins().iconst(pointer_type, instance_ptr as u64 as i64);
+ let trampoline_ptr = trampoline_builder.ins().func_addr(pointer_type, trampoline_fn);
+ let jitted_fn = trampoline_builder.ins().call(jit_fn, &[instance_ptr, trampoline_ptr]);
+ let jitted_fn = trampoline_builder.func.dfg.inst_results(jitted_fn)[0];
+ let call_inst = trampoline_builder.ins().call_indirect(sig_ref, jitted_fn, &fn_args);
+ let ret_vals = trampoline_builder.func.dfg.inst_results(call_inst).to_vec();
+ trampoline_builder.ins().return_(&ret_vals);
+
+ module.define_function(func_id, &mut cx.cached_context).unwrap();
+}
diff --git a/compiler/rustc_codegen_cranelift/src/driver/mod.rs b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
new file mode 100644
index 000000000..8f5714ecb
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/driver/mod.rs
@@ -0,0 +1,53 @@
+//! Drivers are responsible for calling [`codegen_fn`] or [`codegen_static`] for each mono item and
+//! performing any further actions like JIT executing or writing object files.
+//!
+//! [`codegen_fn`]: crate::base::codegen_fn
+//! [`codegen_static`]: crate::constant::codegen_static
+
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+pub(crate) mod aot;
+#[cfg(feature = "jit")]
+pub(crate) mod jit;
+
+fn predefine_mono_items<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ module: &mut dyn Module,
+ mono_items: &[(MonoItem<'tcx>, (RLinkage, Visibility))],
+) {
+ tcx.sess.time("predefine functions", || {
+ let is_compiler_builtins = tcx.is_compiler_builtins(LOCAL_CRATE);
+ for &(mono_item, (linkage, visibility)) in mono_items {
+ match mono_item {
+ MonoItem::Fn(instance) => {
+ let name = tcx.symbol_name(instance).name;
+ let _inst_guard = crate::PrintOnPanic(|| format!("{:?} {}", instance, name));
+ let sig = get_function_sig(tcx, module.isa().triple(), instance);
+ let linkage = crate::linkage::get_clif_linkage(
+ mono_item,
+ linkage,
+ visibility,
+ is_compiler_builtins,
+ );
+ module.declare_function(name, linkage, &sig).unwrap();
+ }
+ MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
+ }
+ }
+ });
+}
+
+fn time<R>(tcx: TyCtxt<'_>, display: bool, name: &'static str, f: impl FnOnce() -> R) -> R {
+ if display {
+ println!("[{:<30}: {}] start", tcx.crate_name(LOCAL_CRATE), name);
+ let before = std::time::Instant::now();
+ let res = tcx.sess.time(name, f);
+ let after = std::time::Instant::now();
+ println!("[{:<30}: {}] end time: {:?}", tcx.crate_name(LOCAL_CRATE), name, after - before);
+ res
+ } else {
+ tcx.sess.time(name, f)
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/inline_asm.rs b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
new file mode 100644
index 000000000..241de5e36
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/inline_asm.rs
@@ -0,0 +1,677 @@
+//! Codegen of `asm!` invocations.
+
+use crate::prelude::*;
+
+use std::fmt::Write;
+
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_middle::mir::InlineAsmOperand;
+use rustc_span::sym;
+use rustc_target::asm::*;
+
+pub(crate) fn codegen_inline_asm<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ _span: Span,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+) {
+ // FIXME add .eh_frame unwind info directives
+
+ if !template.is_empty() {
+ if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::User(1));
+ return;
+ } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
+ && matches!(
+ template[1],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
+ && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
+ && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
+ && matches!(
+ template[6],
+ InlineAsmTemplatePiece::Placeholder {
+ operand_idx: 0,
+ modifier: Some('r'),
+ span: _
+ }
+ )
+ {
+ assert_eq!(operands.len(), 4);
+ let (leaf, eax_place) = match operands[1] {
+ InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
+ assert_eq!(
+ reg,
+ InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
+ );
+ (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place.unwrap()),
+ )
+ }
+ _ => unreachable!(),
+ };
+ let ebx_place = match operands[0] {
+ InlineAsmOperand::Out { reg, late: true, place } => {
+ assert_eq!(
+ reg,
+ InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::reg
+ ))
+ );
+ crate::base::codegen_place(fx, place.unwrap())
+ }
+ _ => unreachable!(),
+ };
+ let (sub_leaf, ecx_place) = match operands[2] {
+ InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
+ assert_eq!(
+ reg,
+ InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
+ );
+ (
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ crate::base::codegen_place(fx, out_place.unwrap()),
+ )
+ }
+ _ => unreachable!(),
+ };
+ let edx_place = match operands[3] {
+ InlineAsmOperand::Out { reg, late: true, place } => {
+ assert_eq!(
+ reg,
+ InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
+ );
+ crate::base::codegen_place(fx, place.unwrap())
+ }
+ _ => unreachable!(),
+ };
+
+ let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
+
+ eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
+ ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
+ ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
+ edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
+ return;
+ } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
+ // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
+ crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
+ } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
+ crate::trap::trap_unimplemented(fx, "Alloca is not supported");
+ }
+ }
+
+ let mut inputs = Vec::new();
+ let mut outputs = Vec::new();
+
+ let mut asm_gen = InlineAssemblyGenerator {
+ tcx: fx.tcx,
+ arch: fx.tcx.sess.asm_arch.unwrap(),
+ enclosing_def_id: fx.instance.def_id(),
+ template,
+ operands,
+ options,
+ registers: Vec::new(),
+ stack_slots_clobber: Vec::new(),
+ stack_slots_input: Vec::new(),
+ stack_slots_output: Vec::new(),
+ stack_slot_size: Size::from_bytes(0),
+ };
+ asm_gen.allocate_registers();
+ asm_gen.allocate_stack_slots();
+
+ let inline_asm_index = fx.cx.inline_asm_index.get();
+ fx.cx.inline_asm_index.set(inline_asm_index + 1);
+ let asm_name = format!(
+ "__inline_asm_{}_n{}",
+ fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
+ inline_asm_index
+ );
+
+ let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
+ fx.cx.global_asm.push_str(&generated_asm);
+
+ for (i, operand) in operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: _, ref value } => {
+ inputs.push((
+ asm_gen.stack_slots_input[i].unwrap(),
+ crate::base::codegen_operand(fx, value).load_scalar(fx),
+ ));
+ }
+ InlineAsmOperand::Out { reg: _, late: _, place } => {
+ if let Some(place) = place {
+ outputs.push((
+ asm_gen.stack_slots_output[i].unwrap(),
+ crate::base::codegen_place(fx, place),
+ ));
+ }
+ }
+ InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+ inputs.push((
+ asm_gen.stack_slots_input[i].unwrap(),
+ crate::base::codegen_operand(fx, in_value).load_scalar(fx),
+ ));
+ if let Some(out_place) = out_place {
+ outputs.push((
+ asm_gen.stack_slots_output[i].unwrap(),
+ crate::base::codegen_place(fx, out_place),
+ ));
+ }
+ }
+ InlineAsmOperand::Const { value: _ } => todo!(),
+ InlineAsmOperand::SymFn { value: _ } => todo!(),
+ InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
+ }
+ }
+
+ call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
+}
+
+struct InlineAssemblyGenerator<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ arch: InlineAsmArch,
+ enclosing_def_id: DefId,
+ template: &'a [InlineAsmTemplatePiece],
+ operands: &'a [InlineAsmOperand<'tcx>],
+ options: InlineAsmOptions,
+ registers: Vec<Option<InlineAsmReg>>,
+ stack_slots_clobber: Vec<Option<Size>>,
+ stack_slots_input: Vec<Option<Size>>,
+ stack_slots_output: Vec<Option<Size>>,
+ stack_slot_size: Size,
+}
+
+impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
+ fn allocate_registers(&mut self) {
+ let sess = self.tcx.sess;
+ let map = allocatable_registers(
+ self.arch,
+ sess.relocation_model(),
+ self.tcx.asm_target_features(self.enclosing_def_id),
+ &sess.target,
+ );
+ let mut allocated = FxHashMap::<_, (bool, bool)>::default();
+ let mut regs = vec![None; self.operands.len()];
+
+ // Add explicit registers to the allocated set.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().0 = true;
+ }
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
+ } => {
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().1 = true;
+ }
+ InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
+ | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
+ regs[i] = Some(reg);
+ allocated.insert(reg, (true, true));
+ }
+ _ => (),
+ }
+ }
+
+ // Allocate out/inout/inlateout registers first because they are more constrained.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::RegClass(class),
+ late: false,
+ ..
+ }
+ | InlineAsmOperand::InOut {
+ reg: InlineAsmRegOrRegClass::RegClass(class), ..
+ } => {
+ let mut alloc_reg = None;
+ for &reg in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.contains_key(&r) {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.insert(reg, (true, true));
+ }
+ _ => (),
+ }
+ }
+
+ // Allocate in/lateout.
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
+ let mut alloc_reg = None;
+ for &reg in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.get(&r).copied().unwrap_or_default().0 {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().0 = true;
+ }
+ InlineAsmOperand::Out {
+ reg: InlineAsmRegOrRegClass::RegClass(class),
+ late: true,
+ ..
+ } => {
+ let mut alloc_reg = None;
+ for &reg in &map[&class] {
+ let mut used = false;
+ reg.overlapping_regs(|r| {
+ if allocated.get(&r).copied().unwrap_or_default().1 {
+ used = true;
+ }
+ });
+
+ if !used {
+ alloc_reg = Some(reg);
+ break;
+ }
+ }
+
+ let reg = alloc_reg.expect("cannot allocate registers");
+ regs[i] = Some(reg);
+ allocated.entry(reg).or_default().1 = true;
+ }
+ _ => (),
+ }
+ }
+
+ self.registers = regs;
+ }
+
+ fn allocate_stack_slots(&mut self) {
+ let mut slot_size = Size::from_bytes(0);
+ let mut slots_clobber = vec![None; self.operands.len()];
+ let mut slots_input = vec![None; self.operands.len()];
+ let mut slots_output = vec![None; self.operands.len()];
+
+ let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
+ let reg_size =
+ reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
+ let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
+ let offset = slot_size.align_to(align);
+ *slot_size = offset + reg_size;
+ offset
+ };
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for saving clobbered registers
+ let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
+ .unwrap()
+ .clobbered_regs();
+ for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
+ let mut need_save = true;
+ // If the register overlaps with a register clobbered by function call, then
+ // we don't need to save it.
+ for r in abi_clobber {
+ r.overlapping_regs(|r| {
+ if r == reg {
+ need_save = false;
+ }
+ });
+
+ if !need_save {
+ break;
+ }
+ }
+
+ if need_save {
+ slots_clobber[i] = Some(new_slot(reg.reg_class()));
+ }
+ }
+
+ // Allocate stack slots for inout
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
+ let slot = new_slot(reg.reg_class());
+ slots_input[i] = Some(slot);
+ slots_output[i] = Some(slot);
+ }
+ _ => (),
+ }
+ }
+
+ let slot_size_before_input = slot_size;
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for input
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::In { reg, .. }
+ | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
+ slots_input[i] = Some(new_slot(reg.reg_class()));
+ }
+ _ => (),
+ }
+ }
+
+ // Reset slot size to before input so that input and output operands can overlap
+ // and save some memory.
+ let slot_size_after_input = slot_size;
+ slot_size = slot_size_before_input;
+ let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
+
+ // Allocate stack slots for output
+ for (i, operand) in self.operands.iter().enumerate() {
+ match *operand {
+ InlineAsmOperand::Out { reg, place: Some(_), .. } => {
+ slots_output[i] = Some(new_slot(reg.reg_class()));
+ }
+ _ => (),
+ }
+ }
+
+ slot_size = slot_size.max(slot_size_after_input);
+
+ self.stack_slots_clobber = slots_clobber;
+ self.stack_slots_input = slots_input;
+ self.stack_slots_output = slots_output;
+ self.stack_slot_size = slot_size;
+ }
+
+ fn generate_asm_wrapper(&self, asm_name: &str) -> String {
+ let mut generated_asm = String::new();
+ writeln!(generated_asm, ".globl {}", asm_name).unwrap();
+ writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
+ writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
+ writeln!(generated_asm, "{}:", asm_name).unwrap();
+
+ let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
+
+ if is_x86 {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+ Self::prologue(&mut generated_asm, self.arch);
+
+ // Save clobbered registers
+ if !self.options.contains(InlineAsmOptions::NORETURN) {
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_clobber.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::save_register(&mut generated_asm, self.arch, reg, slot);
+ }
+ }
+
+ // Write input registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_input.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".att_syntax\n");
+ }
+
+ // The actual inline asm
+ for piece in self.template {
+ match piece {
+ InlineAsmTemplatePiece::String(s) => {
+ generated_asm.push_str(s);
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push('%');
+ }
+ self.registers[*operand_idx]
+ .unwrap()
+ .emit(&mut generated_asm, self.arch, *modifier)
+ .unwrap();
+ }
+ }
+ }
+ generated_asm.push('\n');
+
+ if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
+ generated_asm.push_str(".intel_syntax noprefix\n");
+ }
+
+ if !self.options.contains(InlineAsmOptions::NORETURN) {
+ // Read output registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_output.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::save_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ // Restore clobbered registers
+ for (reg, slot) in self
+ .registers
+ .iter()
+ .zip(self.stack_slots_clobber.iter().copied())
+ .filter_map(|(r, s)| r.zip(s))
+ {
+ Self::restore_register(&mut generated_asm, self.arch, reg, slot);
+ }
+
+ Self::epilogue(&mut generated_asm, self.arch);
+ } else {
+ Self::epilogue_noreturn(&mut generated_asm, self.arch);
+ }
+
+ if is_x86 {
+ generated_asm.push_str(".att_syntax\n");
+ }
+ writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
+ generated_asm.push_str(".text\n");
+ generated_asm.push_str("\n\n");
+
+ generated_asm
+ }
+
+ fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" push ebp\n");
+ generated_asm.push_str(" mov ebp,[esp+8]\n");
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" push rbp\n");
+ generated_asm.push_str(" mov rbp,rdi\n");
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" addi sp, sp, -8\n");
+ generated_asm.push_str(" sw ra, 4(sp)\n");
+ generated_asm.push_str(" sw s0, 0(sp)\n");
+ generated_asm.push_str(" mv s0, a0\n");
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" addi sp, sp, -16\n");
+ generated_asm.push_str(" sd ra, 8(sp)\n");
+ generated_asm.push_str(" sd s0, 0(sp)\n");
+ generated_asm.push_str(" mv s0, a0\n");
+ }
+ _ => unimplemented!("prologue for {:?}", arch),
+ }
+ }
+
+ fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" pop ebp\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" pop rbp\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" lw s0, 0(sp)\n");
+ generated_asm.push_str(" lw ra, 4(sp)\n");
+ generated_asm.push_str(" addi sp, sp, 8\n");
+ generated_asm.push_str(" ret\n");
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ld s0, 0(sp)\n");
+ generated_asm.push_str(" ld ra, 8(sp)\n");
+ generated_asm.push_str(" addi sp, sp, 16\n");
+ generated_asm.push_str(" ret\n");
+ }
+ _ => unimplemented!("epilogue for {:?}", arch),
+ }
+ }
+
+ fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
+ match arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" ud2\n");
+ }
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ebreak\n");
+ }
+ _ => unimplemented!("epilogue_noreturn for {:?}", arch),
+ }
+ }
+
+ fn save_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+ ) {
+ match arch {
+ InlineAsmArch::X86 => {
+ write!(generated_asm, " mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
+ generated_asm.push('\n');
+ }
+ InlineAsmArch::X86_64 => {
+ write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ generated_asm.push('\n');
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" sw ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" sd ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("save_register for {:?}", arch),
+ }
+ }
+
+ fn restore_register(
+ generated_asm: &mut String,
+ arch: InlineAsmArch,
+ reg: InlineAsmReg,
+ offset: Size,
+ ) {
+ match arch {
+ InlineAsmArch::X86 => {
+ generated_asm.push_str(" mov ");
+ reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
+ writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::X86_64 => {
+ generated_asm.push_str(" mov ");
+ reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
+ writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV32 => {
+ generated_asm.push_str(" lw ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ InlineAsmArch::RiscV64 => {
+ generated_asm.push_str(" ld ");
+ reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
+ writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
+ }
+ _ => unimplemented!("restore_register for {:?}", arch),
+ }
+ }
+}
+
+fn call_inline_asm<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ asm_name: &str,
+ slot_size: Size,
+ inputs: Vec<(Size, Value)>,
+ outputs: Vec<(Size, CPlace<'tcx>)>,
+) {
+ let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ size: u32::try_from(slot_size.bytes()).unwrap(),
+ });
+ if fx.clif_comments.enabled() {
+ fx.add_comment(stack_slot, "inline asm scratch slot");
+ }
+
+ let inline_asm_func = fx
+ .module
+ .declare_function(
+ asm_name,
+ Linkage::Import,
+ &Signature {
+ call_conv: CallConv::SystemV,
+ params: vec![AbiParam::new(fx.pointer_type)],
+ returns: vec![],
+ },
+ )
+ .unwrap();
+ let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(inline_asm_func, asm_name);
+ }
+
+ for (offset, value) in inputs {
+ fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ }
+
+ let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
+ fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
+
+ for (offset, place) in outputs {
+ let ty = fx.clif_type(place.layout().ty).unwrap();
+ let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
+ place.write_cvalue(fx, CValue::by_val(value, place.layout()));
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
new file mode 100644
index 000000000..d02dfd93c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/cpuid.rs
@@ -0,0 +1,74 @@
+//! Emulation of a subset of the cpuid x86 instruction.
+
+use crate::prelude::*;
+
+/// Emulates a subset of the cpuid x86 instruction.
+///
+/// This emulates an intel cpu with sse and sse2 support, but which doesn't support anything else.
+pub(crate) fn codegen_cpuid_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ leaf: Value,
+ _sub_leaf: Value,
+) -> (Value, Value, Value, Value) {
+ let leaf_0 = fx.bcx.create_block();
+ let leaf_1 = fx.bcx.create_block();
+ let leaf_7 = fx.bcx.create_block();
+ let leaf_8000_0000 = fx.bcx.create_block();
+ let leaf_8000_0001 = fx.bcx.create_block();
+ let unsupported_leaf = fx.bcx.create_block();
+
+ let dest = fx.bcx.create_block();
+ let eax = fx.bcx.append_block_param(dest, types::I32);
+ let ebx = fx.bcx.append_block_param(dest, types::I32);
+ let ecx = fx.bcx.append_block_param(dest, types::I32);
+ let edx = fx.bcx.append_block_param(dest, types::I32);
+
+ let mut switch = cranelift_frontend::Switch::new();
+ switch.set_entry(0, leaf_0);
+ switch.set_entry(1, leaf_1);
+ switch.set_entry(7, leaf_7);
+ switch.set_entry(0x8000_0000, leaf_8000_0000);
+ switch.set_entry(0x8000_0001, leaf_8000_0001);
+ switch.emit(&mut fx.bcx, leaf, unsupported_leaf);
+
+ fx.bcx.switch_to_block(leaf_0);
+ let max_basic_leaf = fx.bcx.ins().iconst(types::I32, 1);
+ let vend0 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"Genu")));
+ let vend2 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ineI")));
+ let vend1 = fx.bcx.ins().iconst(types::I32, i64::from(u32::from_le_bytes(*b"ntel")));
+ fx.bcx.ins().jump(dest, &[max_basic_leaf, vend0, vend1, vend2]);
+
+ fx.bcx.switch_to_block(leaf_1);
+ let cpu_signature = fx.bcx.ins().iconst(types::I32, 0);
+ let additional_information = fx.bcx.ins().iconst(types::I32, 0);
+ let ecx_features = fx.bcx.ins().iconst(types::I32, 0);
+ let edx_features = fx.bcx.ins().iconst(types::I32, 1 << 25 /* sse */ | 1 << 26 /* sse2 */);
+ fx.bcx.ins().jump(dest, &[cpu_signature, additional_information, ecx_features, edx_features]);
+
+ fx.bcx.switch_to_block(leaf_7);
+ // This leaf technically has subleaves, but we just return zero for all subleaves.
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ fx.bcx.ins().jump(dest, &[zero, zero, zero, zero]);
+
+ fx.bcx.switch_to_block(leaf_8000_0000);
+ let extended_max_basic_leaf = fx.bcx.ins().iconst(types::I32, 0);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ fx.bcx.ins().jump(dest, &[extended_max_basic_leaf, zero, zero, zero]);
+
+ fx.bcx.switch_to_block(leaf_8000_0001);
+ let zero = fx.bcx.ins().iconst(types::I32, 0);
+ let proc_info_ecx = fx.bcx.ins().iconst(types::I32, 0);
+ let proc_info_edx = fx.bcx.ins().iconst(types::I32, 0);
+ fx.bcx.ins().jump(dest, &[zero, zero, proc_info_ecx, proc_info_edx]);
+
+ fx.bcx.switch_to_block(unsupported_leaf);
+ crate::trap::trap_unreachable(
+ fx,
+ "__cpuid_count arch intrinsic doesn't yet support specified leaf",
+ );
+
+ fx.bcx.switch_to_block(dest);
+ fx.bcx.ins().nop();
+
+ (eax, ebx, ecx, edx)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
new file mode 100644
index 000000000..869670c8c
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm.rs
@@ -0,0 +1,192 @@
+//! Emulate LLVM intrinsics
+
+use crate::intrinsics::*;
+use crate::prelude::*;
+
+use rustc_middle::ty::subst::SubstsRef;
+
+pub(crate) fn codegen_llvm_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: &str,
+ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ target: Option<BasicBlock>,
+) {
+ match intrinsic {
+ // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
+ "llvm.x86.sse2.pmovmskb.128" | "llvm.x86.avx2.pmovmskb" | "llvm.x86.sse2.movmsk.pd" => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_ty = fx.clif_type(lane_ty).unwrap();
+ assert!(lane_count <= 32);
+
+ let mut res = fx.bcx.ins().iconst(types::I32, 0);
+
+ for lane in (0..lane_count).rev() {
+ let a_lane =
+ a.value_field(fx, mir::Field::new(lane.try_into().unwrap())).load_scalar(fx);
+
+ // cast float to int
+ let a_lane = match lane_ty {
+ types::F32 => fx.bcx.ins().bitcast(types::I32, a_lane),
+ types::F64 => fx.bcx.ins().bitcast(types::I64, a_lane),
+ _ => a_lane,
+ };
+
+ // extract sign bit of an int
+ let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_ty.bits() - 1));
+
+ // shift sign bit into result
+ let a_lane_sign = clif_intcast(fx, a_lane_sign, types::I32, false);
+ res = fx.bcx.ins().ishl_imm(res, 1);
+ res = fx.bcx.ins().bor(res, a_lane_sign);
+ }
+
+ let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
+ ret.write_cvalue(fx, res);
+ }
+ "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
+ let (x, y, kind) = match args {
+ [x, y, kind] => (x, y, kind),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let x = codegen_operand(fx, x);
+ let y = codegen_operand(fx, y);
+ let kind = crate::constant::mir_operand_get_const_val(fx, kind)
+ .expect("llvm.x86.sse2.cmp.* kind not const");
+
+ let flt_cc = match kind
+ .try_to_bits(Size::from_bytes(1))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
+ {
+ 0 => FloatCC::Equal,
+ 1 => FloatCC::LessThan,
+ 2 => FloatCC::LessThanOrEqual,
+ 7 => FloatCC::Ordered,
+ 3 => FloatCC::Unordered,
+ 4 => FloatCC::NotEqual,
+ 5 => FloatCC::UnorderedOrGreaterThanOrEqual,
+ 6 => FloatCC::UnorderedOrGreaterThan,
+ kind => unreachable!("kind {:?}", kind),
+ };
+
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
+ let res_lane = match lane_ty.kind() {
+ ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_ty),
+ };
+ bool_to_zero_or_max_uint(fx, res_lane_ty, res_lane)
+ });
+ }
+ "llvm.x86.sse2.psrli.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrli.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.pslli.d" => {
+ let (a, imm8) = match args {
+ [a, imm8] => (a, imm8),
+ _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+ };
+ let a = codegen_operand(fx, a);
+ let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+ .expect("llvm.x86.sse2.psrli.d imm8 not const");
+
+ simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+ .try_to_bits(Size::from_bytes(4))
+ .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+ {
+ imm8 if imm8 < 32 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+ _ => fx.bcx.ins().iconst(types::I32, 0),
+ });
+ }
+ "llvm.x86.sse2.storeu.dq" => {
+ intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
+ let mem_addr = mem_addr.load_scalar(fx);
+
+ // FIXME correctly handle the unalignment
+ let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
+ dest.write_cvalue(fx, a);
+ }
+ "llvm.x86.addcarry.64" => {
+ intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
+ let c_in = c_in.load_scalar(fx);
+
+ llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
+ }
+ "llvm.x86.subborrow.64" => {
+ intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
+ let b_in = b_in.load_scalar(fx);
+
+ llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
+ }
+ _ => {
+ fx.tcx
+ .sess
+ .warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+ crate::trap::trap_unimplemented(fx, intrinsic);
+ }
+ }
+
+ let dest = target.expect("all llvm intrinsics used by stdlib should return");
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
+}
+
+// llvm.x86.avx2.vperm2i128
+// llvm.x86.ssse3.pshuf.b.128
+// llvm.x86.avx2.pshuf.b
+// llvm.x86.avx2.psrli.w
+// llvm.x86.sse2.psrli.w
+
+fn llvm_add_sub<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ ret: CPlace<'tcx>,
+ cb_in: Value,
+ a: CValue<'tcx>,
+ b: CValue<'tcx>,
+) {
+ assert_eq!(
+ a.layout().ty,
+ fx.tcx.types.u64,
+ "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
+ );
+ assert_eq!(
+ b.layout().ty,
+ fx.tcx.types.u64,
+ "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
+ );
+
+ // c + carry -> c + first intermediate carry or borrow respectively
+ let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
+ let c = int0.value_field(fx, mir::Field::new(0));
+ let cb0 = int0.value_field(fx, mir::Field::new(1)).load_scalar(fx);
+
+ // c + carry -> c + second intermediate carry or borrow respectively
+ let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
+ let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
+ let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+ let (c, cb1) = int1.load_scalar_pair(fx);
+
+ // carry0 | carry1 -> carry or borrow respectively
+ let cb_out = fx.bcx.ins().bor(cb0, cb1);
+
+ let layout = fx.layout_of(fx.tcx.mk_tup([fx.tcx.types.u8, fx.tcx.types.u64].iter()));
+ let val = CValue::by_val_pair(cb_out, c, layout);
+ ret.write_cvalue(fx, val);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
new file mode 100644
index 000000000..b2a83e1d4
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -0,0 +1,1292 @@
+//! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
+//! and LLVM intrinsics that have symbol names starting with `llvm.`.
+
+macro_rules! intrinsic_args {
+ ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
+ #[allow(unused_parens)]
+ let ($($arg),*) = if let [$($arg),*] = $args {
+ ($(codegen_operand($fx, $arg)),*)
+ } else {
+ $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
+ };
+ }
+}
+
+mod cpuid;
+mod llvm;
+mod simd;
+
+pub(crate) use cpuid::codegen_cpuid_call;
+pub(crate) use llvm::codegen_llvm_intrinsic_call;
+
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::symbol::{kw, sym, Symbol};
+
+use crate::prelude::*;
+use cranelift_codegen::ir::AtomicRmwOp;
+
+fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
+ bug!("wrong number of args for intrinsic {}", intrinsic);
+}
+
+fn report_atomic_type_validation_error<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'tcx>,
+) {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
+ intrinsic, ty
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+}
+
+pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
+ let (element, count) = match layout.abi {
+ Abi::Vector { element, count } => (element, count),
+ _ => unreachable!(),
+ };
+
+ match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
+ // Cranelift currently only implements icmp for 128bit vectors.
+ Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
+ _ => None,
+ }
+}
+
+fn simd_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
+) {
+ let layout = val.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_pair_for_each_lane<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ x: CValue<'tcx>,
+ y: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+ let ret_lane_layout = fx.layout_of(ret_lane_ty);
+ assert_eq!(lane_count, ret_lane_count);
+
+ for lane_idx in 0..lane_count {
+ let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
+ let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
+
+ let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
+ let res_lane = CValue::by_val(res_lane, ret_lane_layout);
+
+ ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
+ }
+}
+
+fn simd_reduce<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ acc: Option<Value>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert_eq!(lane_layout, ret.layout());
+
+ let (mut res_val, start_lane) =
+ if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
+ for lane_idx in start_lane..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ res_val = f(fx, lane_layout.ty, res_val, lane);
+ }
+ let res = CValue::by_val(res_val, lane_layout);
+ ret.write_cvalue(fx, res);
+}
+
+// FIXME move all uses to `simd_reduce`
+fn simd_reduce_bool<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ val: CValue<'tcx>,
+ ret: CPlace<'tcx>,
+ f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
+) {
+ let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
+ assert!(ret.layout().ty.is_bool());
+
+ let res_val = val.value_lane(fx, 0).load_scalar(fx);
+ let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
+ for lane_idx in 1..lane_count {
+ let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
+ let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
+ res_val = f(fx, res_val, lane);
+ }
+ let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
+ fx.bcx.ins().ireduce(types::I8, res_val)
+ } else {
+ res_val
+ };
+ let res = CValue::by_val(res_val, ret.layout());
+ ret.write_cvalue(fx, res);
+}
+
+fn bool_to_zero_or_max_uint<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ val: Value,
+) -> Value {
+ let ty = fx.clif_type(ty).unwrap();
+
+ let int_ty = match ty {
+ types::F32 => types::I32,
+ types::F64 => types::I64,
+ ty => ty,
+ };
+
+ let val = fx.bcx.ins().bint(int_ty, val);
+ let mut res = fx.bcx.ins().ineg(val);
+
+ if ty.is_float() {
+ res = fx.bcx.ins().bitcast(ty, res);
+ }
+
+ res
+}
+
+pub(crate) fn codegen_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: CPlace<'tcx>,
+ target: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let intrinsic = fx.tcx.item_name(instance.def_id());
+ let substs = instance.substs;
+
+ let target = if let Some(target) = target {
+ target
+ } else {
+ // Insert non returning intrinsics here
+ match intrinsic {
+ sym::abort => {
+ fx.bcx.ins().trap(TrapCode::User(0));
+ }
+ sym::transmute => {
+ crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
+ }
+ _ => unimplemented!("unsupported instrinsic {}", intrinsic),
+ }
+ return;
+ };
+
+ if intrinsic.as_str().starts_with("simd_") {
+ self::simd::codegen_simd_intrinsic_call(
+ fx,
+ intrinsic,
+ substs,
+ args,
+ destination,
+ source_info.span,
+ );
+ let ret_block = fx.get_block(target);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
+ let ret_block = fx.get_block(target);
+ fx.bcx.ins().jump(ret_block, &[]);
+ } else {
+ codegen_regular_intrinsic_call(
+ fx,
+ instance,
+ intrinsic,
+ substs,
+ args,
+ destination,
+ Some(target),
+ source_info,
+ );
+ }
+}
+
+fn codegen_float_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+) -> bool {
+ let (name, arg_count, ty) = match intrinsic {
+ sym::expf32 => ("expf", 1, fx.tcx.types.f32),
+ sym::expf64 => ("exp", 1, fx.tcx.types.f64),
+ sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
+ sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
+ sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
+ sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
+ sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
+ sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
+ sym::powf32 => ("powf", 2, fx.tcx.types.f32),
+ sym::powf64 => ("pow", 2, fx.tcx.types.f64),
+ sym::logf32 => ("logf", 1, fx.tcx.types.f32),
+ sym::logf64 => ("log", 1, fx.tcx.types.f64),
+ sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
+ sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
+ sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
+ sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
+ sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
+ sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
+ sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
+ sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
+ sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
+ sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
+ sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
+ sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
+ sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
+ sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
+ sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
+ sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
+ sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
+ sym::roundf64 => ("round", 1, fx.tcx.types.f64),
+ sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
+ sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
+ sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
+ sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
+ _ => return false,
+ };
+
+ if args.len() != arg_count {
+ bug!("wrong number of args for intrinsic {:?}", intrinsic);
+ }
+
+ let (a, b, c);
+ let args = match args {
+ [x] => {
+ a = [codegen_operand(fx, x)];
+ &a as &[_]
+ }
+ [x, y] => {
+ b = [codegen_operand(fx, x), codegen_operand(fx, y)];
+ &b
+ }
+ [x, y, z] => {
+ c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
+ &c
+ }
+ _ => unreachable!(),
+ };
+
+ let res = fx.easy_call(name, &args, ty);
+ ret.write_cvalue(fx, res);
+
+ true
+}
+
+fn codegen_regular_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ instance: Instance<'tcx>,
+ intrinsic: Symbol,
+ substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ destination: Option<BasicBlock>,
+ source_info: mir::SourceInfo,
+) {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+
+ match intrinsic {
+ sym::assume => {
+ intrinsic_args!(fx, args => (_a); intrinsic);
+ }
+ sym::likely | sym::unlikely => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ ret.write_cvalue(fx, a);
+ }
+ sym::breakpoint => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ fx.bcx.ins().debugtrap();
+ }
+ sym::copy | sym::copy_nonoverlapping => {
+ intrinsic_args!(fx, args => (src, dst, count); intrinsic);
+ let src = src.load_scalar(fx);
+ let dst = dst.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ if intrinsic == sym::copy_nonoverlapping {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ }
+ sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
+ // NOTE: the volatile variants have src and dst swapped
+ intrinsic_args!(fx, args => (dst, src, count); intrinsic);
+ let dst = dst.load_scalar(fx);
+ let src = src.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let elem_ty = substs.type_at(0);
+ let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
+ assert_eq!(args.len(), 3);
+ let byte_amount =
+ if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
+
+ // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
+ if intrinsic == sym::volatile_copy_nonoverlapping_memory {
+ // FIXME emit_small_memcpy
+ fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
+ } else {
+ // FIXME emit_small_memmove
+ fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
+ }
+ }
+ sym::size_of_val => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ size
+ } else {
+ fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ }
+ sym::min_align_of_val => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
+ // branch
+ let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
+ let (_ptr, info) = ptr.load_scalar_pair(fx);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
+ align
+ } else {
+ fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
+ };
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ }
+
+ sym::vtable_size => {
+ intrinsic_args!(fx, args => (vtable); intrinsic);
+ let vtable = vtable.load_scalar(fx);
+
+ let size = crate::vtable::size_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
+ }
+
+ sym::vtable_align => {
+ intrinsic_args!(fx, args => (vtable); intrinsic);
+ let vtable = vtable.load_scalar(fx);
+
+ let align = crate::vtable::min_align_of_obj(fx, vtable);
+ ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
+ }
+
+ sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::unchecked_div
+ | sym::exact_div
+ | sym::unchecked_rem
+ | sym::unchecked_shl
+ | sym::unchecked_shr => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ // FIXME trap on overflow
+ let bin_op = match intrinsic {
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
+ sym::unchecked_mul => BinOp::Mul,
+ sym::unchecked_div | sym::exact_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ _ => unreachable!(),
+ };
+ let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ }
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ assert_eq!(x.layout().ty, y.layout().ty);
+ let bin_op = match intrinsic {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => unreachable!(),
+ };
+
+ let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
+ ret.write_cvalue(fx, res);
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
+
+ assert_eq!(lhs.layout().ty, rhs.layout().ty);
+ let bin_op = match intrinsic {
+ sym::saturating_add => BinOp::Add,
+ sym::saturating_sub => BinOp::Sub,
+ _ => unreachable!(),
+ };
+
+ let signed = type_sign(lhs.layout().ty);
+
+ let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
+
+ let (val, has_overflow) = checked_res.load_scalar_pair(fx);
+ let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
+
+ let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
+
+ let val = match (intrinsic, signed) {
+ (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
+ (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
+ (sym::saturating_add, true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero =
+ fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ (sym::saturating_sub, true) => {
+ let rhs = rhs.load_scalar(fx);
+ let rhs_ge_zero =
+ fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
+ let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
+ fx.bcx.ins().select(has_overflow, sat_val, val)
+ }
+ _ => unreachable!(),
+ };
+
+ let res = CValue::by_val(val, lhs.layout());
+
+ ret.write_cvalue(fx, res);
+ }
+ sym::rotate_left => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+ let y = y.load_scalar(fx);
+
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotl(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ }
+ sym::rotate_right => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+ let y = y.load_scalar(fx);
+
+ let layout = x.layout();
+ let x = x.load_scalar(fx);
+ let res = fx.bcx.ins().rotr(x, y);
+ ret.write_cvalue(fx, CValue::by_val(res, layout));
+ }
+
+ // The only difference between offset and arith_offset is regarding UB. Because Cranelift
+ // doesn't have UB both are codegen'ed the same way
+ sym::offset | sym::arith_offset => {
+ intrinsic_args!(fx, args => (base, offset); intrinsic);
+ let offset = offset.load_scalar(fx);
+
+ let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(offset, pointee_size as i64)
+ } else {
+ offset
+ };
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
+ }
+
+ sym::transmute => {
+ intrinsic_args!(fx, args => (from); intrinsic);
+
+ ret.write_cvalue_transmute(fx, from);
+ }
+ sym::write_bytes | sym::volatile_set_memory => {
+ intrinsic_args!(fx, args => (dst, val, count); intrinsic);
+ let val = val.load_scalar(fx);
+ let count = count.load_scalar(fx);
+
+ let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let count = if pointee_size != 1 {
+ fx.bcx.ins().imul_imm(count, pointee_size as i64)
+ } else {
+ count
+ };
+ let dst_ptr = dst.load_scalar(fx);
+ // FIXME make the memset actually volatile when switching to emit_small_memset
+ // FIXME use emit_small_memset
+ fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
+ }
+ sym::ctlz | sym::ctlz_nonzero => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ // FIXME trap on `ctlz_nonzero` with zero arg.
+ let res = fx.bcx.ins().clz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::cttz | sym::cttz_nonzero => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ // FIXME trap on `cttz_nonzero` with zero arg.
+ let res = fx.bcx.ins().ctz(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::ctpop => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = fx.bcx.ins().popcnt(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::bitreverse => {
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = fx.bcx.ins().bitrev(val);
+ let res = CValue::by_val(res, arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::bswap => {
+ // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
+ fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
+ match bcx.func.dfg.value_type(v) {
+ types::I8 => v,
+
+ // https://code.woboq.org/gcc/include/bits/byteswap.h.html
+ types::I16 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 8);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
+
+ let tmp2 = bcx.ins().ushr_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
+
+ bcx.ins().bor(n1, n2)
+ }
+ types::I32 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 24);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 8);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
+
+ let tmp3 = bcx.ins().ushr_imm(v, 8);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
+
+ let tmp4 = bcx.ins().ushr_imm(v, 24);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ bcx.ins().bor(or_tmp1, or_tmp2)
+ }
+ types::I64 => {
+ let tmp1 = bcx.ins().ishl_imm(v, 56);
+ let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
+
+ let tmp2 = bcx.ins().ishl_imm(v, 40);
+ let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
+
+ let tmp3 = bcx.ins().ishl_imm(v, 24);
+ let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
+
+ let tmp4 = bcx.ins().ishl_imm(v, 8);
+ let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
+
+ let tmp5 = bcx.ins().ushr_imm(v, 8);
+ let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
+
+ let tmp6 = bcx.ins().ushr_imm(v, 24);
+ let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
+
+ let tmp7 = bcx.ins().ushr_imm(v, 40);
+ let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
+
+ let tmp8 = bcx.ins().ushr_imm(v, 56);
+ let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
+
+ let or_tmp1 = bcx.ins().bor(n1, n2);
+ let or_tmp2 = bcx.ins().bor(n3, n4);
+ let or_tmp3 = bcx.ins().bor(n5, n6);
+ let or_tmp4 = bcx.ins().bor(n7, n8);
+
+ let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
+ let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
+ bcx.ins().bor(or_tmp5, or_tmp6)
+ }
+ types::I128 => {
+ let (lo, hi) = bcx.ins().isplit(v);
+ let lo = swap(bcx, lo);
+ let hi = swap(bcx, hi);
+ bcx.ins().iconcat(hi, lo)
+ }
+ ty => unreachable!("bswap {}", ty),
+ }
+ }
+ intrinsic_args!(fx, args => (arg); intrinsic);
+ let val = arg.load_scalar(fx);
+
+ let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
+ ret.write_cvalue(fx, res);
+ }
+ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let layout = fx.layout_of(substs.type_at(0));
+ if layout.abi.is_uninhabited() {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
+ source_info,
+ )
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!(
+ "attempted to zero-initialize type `{}`, which is invalid",
+ layout.ty
+ ),
+ source_info,
+ );
+ });
+ return;
+ }
+
+ if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
+ with_no_trimmed_paths!({
+ crate::base::codegen_panic(
+ fx,
+ &format!(
+ "attempted to leave type `{}` uninitialized, which is invalid",
+ layout.ty
+ ),
+ source_info,
+ )
+ });
+ return;
+ }
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+
+ // Cranelift treats loads as volatile by default
+ // FIXME correctly handle unaligned_volatile_load
+ let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
+ let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
+ ret.write_cvalue(fx, val);
+ }
+ sym::volatile_store | sym::unaligned_volatile_store => {
+ intrinsic_args!(fx, args => (ptr, val); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ // Cranelift treats stores as volatile by default
+ // FIXME correctly handle unaligned_volatile_store
+ let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
+ dest.write_cvalue(fx, val);
+ }
+
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let const_val =
+ fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+ let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ intrinsic_args!(fx, args => (ptr, base); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+ let base = base.load_scalar(fx);
+ let ty = substs.type_at(0);
+
+ let pointee_size: u64 = fx.layout_of(ty).size.bytes();
+ let diff_bytes = fx.bcx.ins().isub(ptr, base);
+ // FIXME this can be an exact division.
+ let val = if intrinsic == sym::ptr_offset_from_unsigned {
+ let usize_layout = fx.layout_of(fx.tcx.types.usize);
+ // Because diff_bytes ULE isize::MAX, this would be fine as signed,
+ // but unsigned is slightly easier to codegen, so might as well.
+ CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
+ } else {
+ let isize_layout = fx.layout_of(fx.tcx.types.isize);
+ CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
+ };
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_guaranteed_eq => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::ptr_guaranteed_ne => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+
+ let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
+ ret.write_cvalue(fx, val);
+ }
+
+ sym::caller_location => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ let caller_location = fx.get_caller_location(source_info);
+ ret.write_cvalue(fx, caller_location);
+ }
+
+ _ if intrinsic.as_str().starts_with("atomic_fence") => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ fx.bcx.ins().fence();
+ }
+ _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
+ intrinsic_args!(fx, args => (); intrinsic);
+
+ // FIXME use a compiler fence once Cranelift supports it
+ fx.bcx.ins().fence();
+ }
+ _ if intrinsic.as_str().starts_with("atomic_load") => {
+ intrinsic_args!(fx, args => (ptr); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+ let clif_ty = fx.clif_type(ty).unwrap();
+
+ let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
+
+ let val = CValue::by_val(val, fx.layout_of(ty));
+ ret.write_cvalue(fx, val);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_store") => {
+ intrinsic_args!(fx, args => (ptr, val); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let ty = substs.type_at(0);
+ match ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ // FIXME implement 128bit atomics
+ if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
+ // special case for compiler-builtins to avoid having to patch it
+ crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+ return;
+ } else {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, "128bit atomics not yet supported");
+ }
+ }
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
+ return;
+ }
+ }
+
+ let val = val.load_scalar(fx);
+
+ fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xchg") => {
+ intrinsic_args!(fx, args => (ptr, new); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
+ // both atomic_cxchg_* and atomic_cxchgweak_*
+ intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = new.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+
+ let test_old = test_old.load_scalar(fx);
+ let new = new.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
+ let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
+
+ let ret_val =
+ CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
+ ret.write_cvalue(fx, ret_val)
+ }
+
+ _ if intrinsic.as_str().starts_with("atomic_xadd") => {
+ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old =
+ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xsub") => {
+ intrinsic_args!(fx, args => (ptr, amount); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = amount.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let amount = amount.load_scalar(fx);
+
+ let old =
+ fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_and") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_or") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_xor") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_nand") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_max") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_umax") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_min") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+ _ if intrinsic.as_str().starts_with("atomic_umin") => {
+ intrinsic_args!(fx, args => (ptr, src); intrinsic);
+ let ptr = ptr.load_scalar(fx);
+
+ let layout = src.layout();
+ match layout.ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+ _ => {
+ report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
+ return;
+ }
+ }
+ let ty = fx.clif_type(layout.ty).unwrap();
+
+ let src = src.load_scalar(fx);
+
+ let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
+
+ let old = CValue::by_val(old, layout);
+ ret.write_cvalue(fx, old);
+ }
+
+ sym::minnumf32 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ }
+ sym::minnumf64 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_min(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ }
+ sym::maxnumf32 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
+ ret.write_cvalue(fx, val);
+ }
+ sym::maxnumf64 => {
+ intrinsic_args!(fx, args => (a, b); intrinsic);
+ let a = a.load_scalar(fx);
+ let b = b.load_scalar(fx);
+
+ let val = crate::num::codegen_float_max(fx, a, b);
+ let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
+ ret.write_cvalue(fx, val);
+ }
+
+ kw::Try => {
+ intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
+ let f = f.load_scalar(fx);
+ let data = data.load_scalar(fx);
+ let _catch_fn = catch_fn.load_scalar(fx);
+
+ // FIXME once unwinding is supported, change this to actually catch panics
+ let f_sig = fx.bcx.func.import_signature(Signature {
+ call_conv: fx.target_config.default_call_conv,
+ params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
+ returns: vec![],
+ });
+
+ fx.bcx.ins().call_indirect(f_sig, f, &[data]);
+
+ let layout = ret.layout();
+ let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
+ ret.write_cvalue(fx, ret_val);
+ }
+
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ let res = crate::num::codegen_float_binop(
+ fx,
+ match intrinsic {
+ sym::fadd_fast => BinOp::Add,
+ sym::fsub_fast => BinOp::Sub,
+ sym::fmul_fast => BinOp::Mul,
+ sym::fdiv_fast => BinOp::Div,
+ sym::frem_fast => BinOp::Rem,
+ _ => unreachable!(),
+ },
+ x,
+ y,
+ );
+ ret.write_cvalue(fx, res);
+ }
+ sym::float_to_int_unchecked => {
+ intrinsic_args!(fx, args => (f); intrinsic);
+ let f = f.load_scalar(fx);
+
+ let res = crate::cast::clif_int_or_float_cast(
+ fx,
+ f,
+ false,
+ fx.clif_type(ret.layout().ty).unwrap(),
+ type_sign(ret.layout().ty),
+ );
+ ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
+ }
+
+ sym::raw_eq => {
+ intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
+ let lhs_ref = lhs_ref.load_scalar(fx);
+ let rhs_ref = rhs_ref.load_scalar(fx);
+
+ let size = fx.layout_of(substs.type_at(0)).layout.size();
+ // FIXME add and use emit_small_memcmp
+ let is_eq_value = if size == Size::ZERO {
+ // No bytes means they're trivially equal
+ fx.bcx.ins().iconst(types::I8, 1)
+ } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
+ // Can't use `trusted` for these loads; they could be unaligned.
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
+ let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
+ let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
+ fx.bcx.ins().bint(types::I8, eq)
+ } else {
+ // Just call `memcmp` (like slices do in core) when the
+ // size is too large or it's not a power-of-two.
+ let signed_bytes = i64::try_from(size.bytes()).unwrap();
+ let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
+ let params = vec![AbiParam::new(fx.pointer_type); 3];
+ let returns = vec![AbiParam::new(types::I32)];
+ let args = &[lhs_ref, rhs_ref, bytes_val];
+ let cmp = fx.lib_call("memcmp", params, returns, args)[0];
+ let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
+ fx.bcx.ins().bint(types::I8, eq)
+ };
+ ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
+ }
+
+ sym::const_allocate => {
+ intrinsic_args!(fx, args => (_size, _align); intrinsic);
+
+ // returns a null pointer at runtime.
+ let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
+ }
+
+ sym::const_deallocate => {
+ intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
+ // nop at runtime.
+ }
+
+ sym::black_box => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ // FIXME implement black_box semantics
+ ret.write_cvalue(fx, a);
+ }
+
+ // FIXME implement variadics in cranelift
+ sym::va_copy | sym::va_arg | sym::va_end => {
+ fx.tcx.sess.span_fatal(
+ source_info.span,
+ "Defining variadic functions is not yet supported by Cranelift",
+ );
+ }
+
+ _ => {
+ fx.tcx
+ .sess
+ .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
+ }
+ }
+
+ let ret_block = fx.get_block(destination.unwrap());
+ fx.bcx.ins().jump(ret_block, &[]);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
new file mode 100644
index 000000000..30e3d1125
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
@@ -0,0 +1,659 @@
+//! Codegen `extern "platform-intrinsic"` intrinsics.
+
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_span::Symbol;
+
+use super::*;
+use crate::prelude::*;
+
+fn report_simd_type_validation_error(
+ fx: &mut FunctionCx<'_, '_, '_>,
+ intrinsic: Symbol,
+ span: Span,
+ ty: Ty<'_>,
+) {
+ fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+}
+
+pub(super) fn codegen_simd_intrinsic_call<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ intrinsic: Symbol,
+ _substs: SubstsRef<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ ret: CPlace<'tcx>,
+ span: Span,
+) {
+ match intrinsic {
+ sym::simd_cast => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| {
+ let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap();
+
+ let from_signed = type_sign(lane_ty);
+ let to_signed = type_sign(ret_lane_ty);
+
+ clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed)
+ });
+ }
+
+ sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
+ let res_lane = match (lane_ty.kind(), intrinsic) {
+ (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
+ (ty::Uint(_), sym::simd_ne) => {
+ fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_lt) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_le) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_gt) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane)
+ }
+ (ty::Uint(_), sym::simd_ge) => {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
+ (ty::Int(_), sym::simd_ne) => {
+ fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_lt) => {
+ fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_le) => {
+ fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_gt) => {
+ fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane)
+ }
+ (ty::Int(_), sym::simd_ge) => {
+ fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ (ty::Float(_), sym::simd_eq) => {
+ fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_ne) => {
+ fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_lt) => {
+ fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_le) => {
+ fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_gt) => {
+ fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane)
+ }
+ (ty::Float(_), sym::simd_ge) => {
+ fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane)
+ }
+
+ _ => unreachable!(),
+ };
+
+ let ty = fx.clif_type(res_lane_ty).unwrap();
+
+ let res_lane = fx.bcx.ins().bint(ty, res_lane);
+ fx.bcx.ins().ineg(res_lane)
+ });
+ }
+
+ // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
+ _ if intrinsic.as_str().starts_with("simd_shuffle") => {
+ let (x, y, idx) = match args {
+ [x, y, idx] => (x, y, idx),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let x = codegen_operand(fx, x);
+ let y = codegen_operand(fx, y);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
+ // If there is no suffix, use the index array length.
+ let n: u16 = if intrinsic == sym::simd_shuffle {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
+ match idx_ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
+ .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
+ .unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ .try_into()
+ .unwrap(),
+ _ => {
+ fx.tcx.sess.span_err(
+ span,
+ &format!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ idx_ty,
+ ),
+ );
+ // Prevent verifier error
+ crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
+ return;
+ }
+ }
+ } else {
+ intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
+ };
+
+ assert_eq!(x.layout(), y.layout());
+ let layout = x.layout();
+
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
+
+ assert_eq!(lane_ty, ret_lane_ty);
+ assert_eq!(u64::from(n), ret_lane_count);
+
+ let total_len = lane_count * 2;
+
+ let indexes = {
+ use rustc_middle::mir::interpret::*;
+ let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
+ .expect("simd_shuffle* idx not const");
+
+ let idx_bytes = match idx_const {
+ ConstValue::ByRef { alloc, offset } => {
+ let size = Size::from_bytes(
+ 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
+ );
+ alloc.inner().get_bytes(fx, alloc_range(offset, size)).unwrap()
+ }
+ _ => unreachable!("{:?}", idx_const),
+ };
+
+ (0..ret_lane_count)
+ .map(|i| {
+ let i = usize::try_from(i).unwrap();
+ let idx = rustc_middle::mir::interpret::read_target_uint(
+ fx.tcx.data_layout.endian,
+ &idx_bytes[4 * i..4 * i + 4],
+ )
+ .expect("read_target_uint");
+ u16::try_from(idx).expect("try_from u32")
+ })
+ .collect::<Vec<u16>>()
+ };
+
+ for &idx in &indexes {
+ assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
+ }
+
+ for (out_idx, in_idx) in indexes.into_iter().enumerate() {
+ let in_lane = if u64::from(in_idx) < lane_count {
+ x.value_lane(fx, in_idx.into())
+ } else {
+ y.value_lane(fx, u64::from(in_idx) - lane_count)
+ };
+ let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
+ out_lane.write_cvalue(fx, in_lane);
+ }
+ }
+
+ sym::simd_insert => {
+ let (base, idx, val) = match args {
+ [base, idx, val] => (base, idx, val),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let base = codegen_operand(fx, base);
+ let val = codegen_operand(fx, val);
+
+ // FIXME validate
+ let idx_const = if let Some(idx_const) =
+ crate::constant::mir_operand_get_const_val(fx, idx)
+ {
+ idx_const
+ } else {
+ fx.tcx.sess.span_fatal(span, "Index argument for `simd_insert` is not a constant");
+ };
+
+ let idx = idx_const
+ .try_to_bits(Size::from_bytes(4 /* u32*/))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count),
+ );
+ }
+
+ ret.write_cvalue(fx, base);
+ let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
+ ret_lane.write_cvalue(fx, val);
+ }
+
+ sym::simd_extract => {
+ let (v, idx) = match args {
+ [v, idx] => (v, idx),
+ _ => {
+ bug!("wrong number of args for intrinsic {intrinsic}");
+ }
+ };
+ let v = codegen_operand(fx, v);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ let idx_const = if let Some(idx_const) =
+ crate::constant::mir_operand_get_const_val(fx, idx)
+ {
+ idx_const
+ } else {
+ fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
+ let res = crate::trap::trap_unimplemented_ret_value(
+ fx,
+ ret.layout(),
+ "Index argument for `simd_extract` is not a constant",
+ );
+ ret.write_cvalue(fx, res);
+ return;
+ };
+
+ let idx = idx_const
+ .try_to_bits(Size::from_bytes(4 /* u32*/))
+ .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
+ let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
+ if idx >= lane_count.into() {
+ fx.tcx.sess.span_fatal(
+ fx.mir.span,
+ &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count),
+ );
+ }
+
+ let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
+ ret.write_cvalue(fx, ret_lane);
+ }
+
+ sym::simd_neg => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(
+ fx,
+ a,
+ ret,
+ &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
+ ty::Int(_) => fx.bcx.ins().ineg(lane),
+ ty::Float(_) => fx.bcx.ins().fneg(lane),
+ _ => unreachable!(),
+ },
+ );
+ }
+
+ sym::simd_add
+ | sym::simd_sub
+ | sym::simd_mul
+ | sym::simd_div
+ | sym::simd_rem
+ | sym::simd_shl
+ | sym::simd_shr
+ | sym::simd_and
+ | sym::simd_or
+ | sym::simd_xor => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
+ match (lane_ty.kind(), intrinsic) {
+ (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
+
+ (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
+ (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
+ (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
+ (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
+ (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
+
+ (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
+ (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
+ (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
+ (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
+ (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
+ "fmodf",
+ vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[x_lane, y_lane],
+ )[0],
+ (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
+ "fmod",
+ vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[x_lane, y_lane],
+ )[0],
+
+ (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
+ (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
+
+ (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
+ (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
+ (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
+ (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
+ (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
+
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_fma => {
+ intrinsic_args!(fx, args => (a, b, c); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+ assert_eq!(a.layout(), c.layout());
+ assert_eq!(a.layout(), ret.layout());
+
+ let layout = a.layout();
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+
+ for lane in 0..lane_count {
+ let a_lane = a.value_lane(fx, lane);
+ let b_lane = b.value_lane(fx, lane);
+ let c_lane = c.value_lane(fx, lane);
+
+ let res_lane = match lane_ty.kind() {
+ ty::Float(FloatTy::F32) => {
+ fx.easy_call("fmaf", &[a_lane, b_lane, c_lane], lane_ty)
+ }
+ ty::Float(FloatTy::F64) => {
+ fx.easy_call("fma", &[a_lane, b_lane, c_lane], lane_ty)
+ }
+ _ => unreachable!(),
+ };
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ }
+
+ sym::simd_fmin | sym::simd_fmax => {
+ intrinsic_args!(fx, args => (x, y); intrinsic);
+
+ if !x.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
+ return;
+ }
+
+ // FIXME use vector instructions when possible
+ simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
+ match lane_ty.kind() {
+ ty::Float(_) => {}
+ _ => unreachable!("{:?}", lane_ty),
+ }
+ match intrinsic {
+ sym::simd_fmin => crate::num::codegen_float_min(fx, x_lane, y_lane),
+ sym::simd_fmax => crate::num::codegen_float_max(fx, x_lane, y_lane),
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_round => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(
+ fx,
+ a,
+ ret,
+ &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
+ ty::Float(FloatTy::F32) => fx.lib_call(
+ "roundf",
+ vec![AbiParam::new(types::F32)],
+ vec![AbiParam::new(types::F32)],
+ &[lane],
+ )[0],
+ ty::Float(FloatTy::F64) => fx.lib_call(
+ "round",
+ vec![AbiParam::new(types::F64)],
+ vec![AbiParam::new(types::F64)],
+ &[lane],
+ )[0],
+ _ => unreachable!("{:?}", lane_ty),
+ },
+ );
+ }
+
+ sym::simd_fabs | sym::simd_fsqrt | sym::simd_ceil | sym::simd_floor | sym::simd_trunc => {
+ intrinsic_args!(fx, args => (a); intrinsic);
+
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+
+ simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
+ match lane_ty.kind() {
+ ty::Float(_) => {}
+ _ => unreachable!("{:?}", lane_ty),
+ }
+ match intrinsic {
+ sym::simd_fabs => fx.bcx.ins().fabs(lane),
+ sym::simd_fsqrt => fx.bcx.ins().sqrt(lane),
+ sym::simd_ceil => fx.bcx.ins().ceil(lane),
+ sym::simd_floor => fx.bcx.ins().floor(lane),
+ sym::simd_trunc => fx.bcx.ins().trunc(lane),
+ _ => unreachable!(),
+ }
+ });
+ }
+
+ sym::simd_reduce_add_ordered | sym::simd_reduce_add_unordered => {
+ intrinsic_args!(fx, args => (v, acc); intrinsic);
+ let acc = acc.load_scalar(fx);
+
+ // FIXME there must be no acc param for integer vectors
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
+ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fadd(a, b)
+ } else {
+ fx.bcx.ins().iadd(a, b)
+ }
+ });
+ }
+
+ sym::simd_reduce_mul_ordered | sym::simd_reduce_mul_unordered => {
+ intrinsic_args!(fx, args => (v, acc); intrinsic);
+ let acc = acc.load_scalar(fx);
+
+ // FIXME there must be no acc param for integer vectors
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
+ if lane_ty.is_floating_point() {
+ fx.bcx.ins().fmul(a, b)
+ } else {
+ fx.bcx.ins().imul(a, b)
+ }
+ });
+ }
+
+ sym::simd_reduce_all => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b));
+ }
+
+ sym::simd_reduce_any => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b));
+ }
+
+ sym::simd_reduce_and => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b));
+ }
+
+ sym::simd_reduce_or => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b));
+ }
+
+ sym::simd_reduce_xor => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b));
+ }
+
+ sym::simd_reduce_min => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
+ let lt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b),
+ ty::Float(_) => return crate::num::codegen_float_min(fx, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(lt, a, b)
+ });
+ }
+
+ sym::simd_reduce_max => {
+ intrinsic_args!(fx, args => (v); intrinsic);
+
+ if !v.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
+ return;
+ }
+
+ simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
+ let gt = match ty.kind() {
+ ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b),
+ ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b),
+ ty::Float(_) => return crate::num::codegen_float_max(fx, a, b),
+ _ => unreachable!(),
+ };
+ fx.bcx.ins().select(gt, a, b)
+ });
+ }
+
+ sym::simd_select => {
+ intrinsic_args!(fx, args => (m, a, b); intrinsic);
+
+ if !m.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty);
+ return;
+ }
+ if !a.layout().ty.is_simd() {
+ report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
+ return;
+ }
+ assert_eq!(a.layout(), b.layout());
+
+ let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+
+ for lane in 0..lane_count {
+ let m_lane = m.value_lane(fx, lane).load_scalar(fx);
+ let a_lane = a.value_lane(fx, lane).load_scalar(fx);
+ let b_lane = b.value_lane(fx, lane).load_scalar(fx);
+
+ let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
+ let res_lane =
+ CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
+
+ ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
+ }
+ }
+
+ // simd_saturating_*
+ // simd_bitmask
+ // simd_scatter
+ // simd_gather
+ _ => {
+ fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
new file mode 100644
index 000000000..bb0793b1d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -0,0 +1,316 @@
+#![feature(rustc_private)]
+// Note: please avoid adding other feature gates where possible
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+#![warn(unreachable_pub)]
+
+#[macro_use]
+extern crate rustc_middle;
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_fs_util;
+extern crate rustc_hir;
+extern crate rustc_incremental;
+extern crate rustc_index;
+extern crate rustc_interface;
+extern crate rustc_metadata;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_target;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+use std::any::Any;
+use std::cell::Cell;
+
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
+use rustc_errors::ErrorGuaranteed;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_session::config::OutputFilenames;
+use rustc_session::Session;
+use rustc_span::Symbol;
+
+use cranelift_codegen::isa::TargetIsa;
+use cranelift_codegen::settings::{self, Configurable};
+
+pub use crate::config::*;
+use crate::prelude::*;
+
+mod abi;
+mod allocator;
+mod analyze;
+mod archive;
+mod base;
+mod cast;
+mod codegen_i128;
+mod common;
+mod compiler_builtins;
+mod config;
+mod constant;
+mod debuginfo;
+mod discriminant;
+mod driver;
+mod inline_asm;
+mod intrinsics;
+mod linkage;
+mod main_shim;
+mod num;
+mod optimize;
+mod pointer;
+mod pretty_clif;
+mod toolchain;
+mod trap;
+mod unsize;
+mod value_and_place;
+mod vtable;
+
+mod prelude {
+ pub(crate) use rustc_span::{FileNameDisplayPreference, Span};
+
+ pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+ pub(crate) use rustc_middle::bug;
+ pub(crate) use rustc_middle::mir::{self, *};
+ pub(crate) use rustc_middle::ty::layout::{self, LayoutOf, TyAndLayout};
+ pub(crate) use rustc_middle::ty::{
+ self, FloatTy, Instance, InstanceDef, IntTy, ParamEnv, Ty, TyCtxt, TypeAndMut,
+ TypeFoldable, TypeVisitable, UintTy,
+ };
+ pub(crate) use rustc_target::abi::{Abi, Scalar, Size, VariantIdx};
+
+ pub(crate) use rustc_data_structures::fx::FxHashMap;
+
+ pub(crate) use rustc_index::vec::Idx;
+
+ pub(crate) use cranelift_codegen::ir::condcodes::{FloatCC, IntCC};
+ pub(crate) use cranelift_codegen::ir::function::Function;
+ pub(crate) use cranelift_codegen::ir::types;
+ pub(crate) use cranelift_codegen::ir::{
+ AbiParam, Block, ExternalName, FuncRef, Inst, InstBuilder, MemFlags, Signature, SourceLoc,
+ StackSlot, StackSlotData, StackSlotKind, TrapCode, Type, Value,
+ };
+ pub(crate) use cranelift_codegen::isa::{self, CallConv};
+ pub(crate) use cranelift_codegen::Context;
+ pub(crate) use cranelift_frontend::{FunctionBuilder, FunctionBuilderContext, Variable};
+ pub(crate) use cranelift_module::{self, DataContext, FuncId, Linkage, Module};
+
+ pub(crate) use crate::abi::*;
+ pub(crate) use crate::base::{codegen_operand, codegen_place};
+ pub(crate) use crate::cast::*;
+ pub(crate) use crate::common::*;
+ pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
+ pub(crate) use crate::pointer::Pointer;
+ pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
+}
+
+struct PrintOnPanic<F: Fn() -> String>(F);
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+ fn drop(&mut self) {
+ if ::std::thread::panicking() {
+ println!("{}", (self.0)());
+ }
+ }
+}
+
+/// The codegen context holds any information shared between the codegen of individual functions
+/// inside a single codegen unit with the exception of the Cranelift [`Module`](cranelift_module::Module).
+struct CodegenCx<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ global_asm: String,
+ inline_asm_index: Cell<usize>,
+ cached_context: Context,
+ debug_context: Option<DebugContext<'tcx>>,
+ unwind_context: UnwindContext,
+ cgu_name: Symbol,
+}
+
+impl<'tcx> CodegenCx<'tcx> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ backend_config: BackendConfig,
+ isa: &dyn TargetIsa,
+ debug_info: bool,
+ cgu_name: Symbol,
+ ) -> Self {
+ assert_eq!(pointer_ty(tcx), isa.pointer_type());
+
+ let unwind_context =
+ UnwindContext::new(isa, matches!(backend_config.codegen_mode, CodegenMode::Aot));
+ let debug_context = if debug_info && !tcx.sess.target.options.is_like_windows {
+ Some(DebugContext::new(tcx, isa))
+ } else {
+ None
+ };
+ CodegenCx {
+ tcx,
+ global_asm: String::new(),
+ inline_asm_index: Cell::new(0),
+ cached_context: Context::new(),
+ debug_context,
+ unwind_context,
+ cgu_name,
+ }
+ }
+}
+
+pub struct CraneliftCodegenBackend {
+ pub config: Option<BackendConfig>,
+}
+
+impl CodegenBackend for CraneliftCodegenBackend {
+ fn init(&self, sess: &Session) {
+ use rustc_session::config::Lto;
+ match sess.lto() {
+ Lto::No | Lto::ThinLocal => {}
+ Lto::Thin | Lto::Fat => sess.warn("LTO is not supported. You may get a linker error."),
+ }
+ }
+
+ fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<rustc_span::Symbol> {
+ vec![]
+ }
+
+ fn print_version(&self) {
+ println!("Cranelift version: {}", cranelift_codegen::VERSION);
+ }
+
+ fn codegen_crate(
+ &self,
+ tcx: TyCtxt<'_>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any> {
+ tcx.sess.abort_if_errors();
+ let config = if let Some(config) = self.config.clone() {
+ config
+ } else {
+ if !tcx.sess.unstable_options() && !tcx.sess.opts.cg.llvm_args.is_empty() {
+ tcx.sess.fatal("`-Z unstable-options` must be passed to allow configuring cg_clif");
+ }
+ BackendConfig::from_opts(&tcx.sess.opts.cg.llvm_args)
+ .unwrap_or_else(|err| tcx.sess.fatal(&err))
+ };
+ match config.codegen_mode {
+ CodegenMode::Aot => driver::aot::run_aot(tcx, config, metadata, need_metadata_module),
+ CodegenMode::Jit | CodegenMode::JitLazy => {
+ #[cfg(feature = "jit")]
+ driver::jit::run_jit(tcx, config);
+
+ #[cfg(not(feature = "jit"))]
+ tcx.sess.fatal("jit support was disabled when compiling rustc_codegen_cranelift");
+ }
+ }
+ }
+
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ _sess: &Session,
+ _outputs: &OutputFilenames,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+ Ok(*ongoing_codegen
+ .downcast::<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>)>()
+ .unwrap())
+ }
+
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorGuaranteed> {
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ link_binary(sess, &crate::archive::ArArchiveBuilderBuilder, &codegen_results, outputs)
+ }
+}
+
+fn target_triple(sess: &Session) -> target_lexicon::Triple {
+ match sess.target.llvm_target.parse() {
+ Ok(triple) => triple,
+ Err(err) => sess.fatal(&format!("target not recognized: {}", err)),
+ }
+}
+
+fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::TargetIsa + 'static> {
+ use target_lexicon::BinaryFormat;
+
+ let target_triple = crate::target_triple(sess);
+
+ let mut flags_builder = settings::builder();
+ flags_builder.enable("is_pic").unwrap();
+ flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
+ let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" };
+ flags_builder.set("enable_verifier", enable_verifier).unwrap();
+ flags_builder.set("regalloc_checker", enable_verifier).unwrap();
+
+ let tls_model = match target_triple.binary_format {
+ BinaryFormat::Elf => "elf_gd",
+ BinaryFormat::Macho => "macho",
+ BinaryFormat::Coff => "coff",
+ _ => "none",
+ };
+ flags_builder.set("tls_model", tls_model).unwrap();
+
+ flags_builder.set("enable_simd", "true").unwrap();
+
+ flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
+
+ use rustc_session::config::OptLevel;
+ match sess.opts.optimize {
+ OptLevel::No => {
+ flags_builder.set("opt_level", "none").unwrap();
+ }
+ OptLevel::Less | OptLevel::Default => {}
+ OptLevel::Size | OptLevel::SizeMin | OptLevel::Aggressive => {
+ flags_builder.set("opt_level", "speed_and_size").unwrap();
+ }
+ }
+
+ let flags = settings::Flags::new(flags_builder);
+
+ let isa_builder = match sess.opts.cg.target_cpu.as_deref() {
+ Some("native") => {
+ let builder = cranelift_native::builder_with_options(true).unwrap();
+ builder
+ }
+ Some(value) => {
+ let mut builder =
+ cranelift_codegen::isa::lookup(target_triple.clone()).unwrap_or_else(|err| {
+ sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
+ });
+ if let Err(_) = builder.enable(value) {
+ sess.fatal("the specified target cpu isn't currently supported by Cranelift.");
+ }
+ builder
+ }
+ None => {
+ let mut builder =
+ cranelift_codegen::isa::lookup(target_triple.clone()).unwrap_or_else(|err| {
+ sess.fatal(&format!("can't compile for {}: {}", target_triple, err));
+ });
+ if target_triple.architecture == target_lexicon::Architecture::X86_64 {
+ // Don't use "haswell" as the default, as it implies `has_lzcnt`.
+ // macOS CI is still at Ivy Bridge EP, so `lzcnt` is interpreted as `bsr`.
+ builder.enable("nehalem").unwrap();
+ }
+ builder
+ }
+ };
+
+ match isa_builder.finish(flags) {
+ Ok(target_isa) => target_isa,
+ Err(err) => sess.fatal(&format!("failed to build TargetIsa: {}", err)),
+ }
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_cranelift
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+ Box::new(CraneliftCodegenBackend { config: None })
+}
diff --git a/compiler/rustc_codegen_cranelift/src/linkage.rs b/compiler/rustc_codegen_cranelift/src/linkage.rs
new file mode 100644
index 000000000..ca853aac1
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/linkage.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
+
+use crate::prelude::*;
+
+pub(crate) fn get_clif_linkage(
+ mono_item: MonoItem<'_>,
+ linkage: RLinkage,
+ visibility: Visibility,
+ is_compiler_builtins: bool,
+) -> Linkage {
+ match (linkage, visibility) {
+ (RLinkage::External, Visibility::Default) if is_compiler_builtins => Linkage::Hidden,
+ (RLinkage::External, Visibility::Default) => Linkage::Export,
+ (RLinkage::Internal, Visibility::Default) => Linkage::Local,
+ (RLinkage::External, Visibility::Hidden) => Linkage::Hidden,
+ (RLinkage::WeakAny, Visibility::Default) => Linkage::Preemptible,
+ _ => panic!("{:?} = {:?} {:?}", mono_item, linkage, visibility),
+ }
+}
+
+pub(crate) fn get_static_linkage(tcx: TyCtxt<'_>, def_id: DefId) -> Linkage {
+ let fn_attrs = tcx.codegen_fn_attrs(def_id);
+
+ if let Some(linkage) = fn_attrs.linkage {
+ match linkage {
+ RLinkage::External => Linkage::Export,
+ RLinkage::Internal => Linkage::Local,
+ RLinkage::ExternalWeak | RLinkage::WeakAny => Linkage::Preemptible,
+ _ => panic!("{:?}", linkage),
+ }
+ } else if tcx.is_reachable_non_generic(def_id) {
+ Linkage::Export
+ } else {
+ Linkage::Hidden
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/main_shim.rs b/compiler/rustc_codegen_cranelift/src/main_shim.rs
new file mode 100644
index 000000000..c67b6e98b
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/main_shim.rs
@@ -0,0 +1,161 @@
+use rustc_hir::LangItem;
+use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::AssocKind;
+use rustc_session::config::EntryFnType;
+use rustc_span::symbol::Ident;
+
+use crate::prelude::*;
+
+/// Create the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub(crate) fn maybe_create_entry_wrapper(
+ tcx: TyCtxt<'_>,
+ module: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ is_jit: bool,
+ is_primary_cgu: bool,
+) {
+ let (main_def_id, is_main_fn) = match tcx.entry_fn(()) {
+ Some((def_id, entry_ty)) => (
+ def_id,
+ match entry_ty {
+ EntryFnType::Main => true,
+ EntryFnType::Start => false,
+ },
+ ),
+ None => return,
+ };
+
+ if main_def_id.is_local() {
+ let instance = Instance::mono(tcx, main_def_id).polymorphize(tcx);
+ if !is_jit && module.get_name(&*tcx.symbol_name(instance).name).is_none() {
+ return;
+ }
+ } else if !is_primary_cgu {
+ return;
+ }
+
+ create_entry_fn(tcx, module, unwind_context, main_def_id, is_jit, is_main_fn);
+
+ fn create_entry_fn(
+ tcx: TyCtxt<'_>,
+ m: &mut impl Module,
+ unwind_context: &mut UnwindContext,
+ rust_main_def_id: DefId,
+ ignore_lang_start_wrapper: bool,
+ is_main_fn: bool,
+ ) {
+ let main_ret_ty = tcx.fn_sig(rust_main_def_id).output();
+ // Given that `main()` has no arguments,
+ // then its return type cannot have
+ // late-bound regions, since late-bound
+ // regions must appear in the argument
+ // listing.
+ let main_ret_ty = tcx.normalize_erasing_regions(
+ ty::ParamEnv::reveal_all(),
+ main_ret_ty.no_bound_vars().unwrap(),
+ );
+
+ let cmain_sig = Signature {
+ params: vec![
+ AbiParam::new(m.target_config().pointer_type()),
+ AbiParam::new(m.target_config().pointer_type()),
+ ],
+ returns: vec![AbiParam::new(m.target_config().pointer_type() /*isize*/)],
+ call_conv: CallConv::triple_default(m.isa().triple()),
+ };
+
+ let cmain_func_id = m.declare_function("main", Linkage::Export, &cmain_sig).unwrap();
+
+ let instance = Instance::mono(tcx, rust_main_def_id).polymorphize(tcx);
+
+ let main_name = tcx.symbol_name(instance).name;
+ let main_sig = get_function_sig(tcx, m.isa().triple(), instance);
+ let main_func_id = m.declare_function(main_name, Linkage::Import, &main_sig).unwrap();
+
+ let mut ctx = Context::new();
+ ctx.func = Function::with_name_signature(ExternalName::user(0, 0), cmain_sig);
+ {
+ let mut func_ctx = FunctionBuilderContext::new();
+ let mut bcx = FunctionBuilder::new(&mut ctx.func, &mut func_ctx);
+
+ let block = bcx.create_block();
+ bcx.switch_to_block(block);
+ let arg_argc = bcx.append_block_param(block, m.target_config().pointer_type());
+ let arg_argv = bcx.append_block_param(block, m.target_config().pointer_type());
+
+ let main_func_ref = m.declare_func_in_func(main_func_id, &mut bcx.func);
+
+ let result = if is_main_fn && ignore_lang_start_wrapper {
+ // regular main fn, but ignoring #[lang = "start"] as we are running in the jit
+ // FIXME set program arguments somehow
+ let call_inst = bcx.ins().call(main_func_ref, &[]);
+ let call_results = bcx.func.dfg.inst_results(call_inst).to_owned();
+
+ let termination_trait = tcx.require_lang_item(LangItem::Termination, None);
+ let report = tcx
+ .associated_items(termination_trait)
+ .find_by_name_and_kind(
+ tcx,
+ Ident::from_str("report"),
+ AssocKind::Fn,
+ termination_trait,
+ )
+ .unwrap();
+ let report = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ report.def_id,
+ tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
+ )
+ .unwrap()
+ .unwrap()
+ .polymorphize(tcx);
+
+ let report_name = tcx.symbol_name(report).name;
+ let report_sig = get_function_sig(tcx, m.isa().triple(), report);
+ let report_func_id =
+ m.declare_function(report_name, Linkage::Import, &report_sig).unwrap();
+ let report_func_ref = m.declare_func_in_func(report_func_id, &mut bcx.func);
+
+ // FIXME do proper abi handling instead of expecting the pass mode to be identical
+ // for returns and arguments.
+ let report_call_inst = bcx.ins().call(report_func_ref, &call_results);
+ let res = bcx.func.dfg.inst_results(report_call_inst)[0];
+ match m.target_config().pointer_type() {
+ types::I32 => res,
+ types::I64 => bcx.ins().sextend(types::I64, res),
+ _ => unimplemented!("16bit systems are not yet supported"),
+ }
+ } else if is_main_fn {
+ let start_def_id = tcx.require_lang_item(LangItem::Start, None);
+ let start_instance = Instance::resolve(
+ tcx,
+ ParamEnv::reveal_all(),
+ start_def_id,
+ tcx.intern_substs(&[main_ret_ty.into()]),
+ )
+ .unwrap()
+ .unwrap()
+ .polymorphize(tcx);
+ let start_func_id = import_function(tcx, m, start_instance);
+
+ let main_val = bcx.ins().func_addr(m.target_config().pointer_type(), main_func_ref);
+
+ let func_ref = m.declare_func_in_func(start_func_id, &mut bcx.func);
+ let call_inst = bcx.ins().call(func_ref, &[main_val, arg_argc, arg_argv]);
+ bcx.inst_results(call_inst)[0]
+ } else {
+ // using user-defined start fn
+ let call_inst = bcx.ins().call(main_func_ref, &[arg_argc, arg_argv]);
+ bcx.inst_results(call_inst)[0]
+ };
+
+ bcx.ins().return_(&[result]);
+ bcx.seal_all_blocks();
+ bcx.finalize();
+ }
+ m.define_function(cmain_func_id, &mut ctx).unwrap();
+ unwind_context.add_function(cmain_func_id, &ctx, m.isa());
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs
new file mode 100644
index 000000000..4ce8adb18
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/num.rs
@@ -0,0 +1,440 @@
+//! Various operations on integer and floating-point numbers
+
+use crate::prelude::*;
+
+pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
+ use BinOp::*;
+ use IntCC::*;
+ Some(match bin_op {
+ Eq => Equal,
+ Lt => {
+ if signed {
+ SignedLessThan
+ } else {
+ UnsignedLessThan
+ }
+ }
+ Le => {
+ if signed {
+ SignedLessThanOrEqual
+ } else {
+ UnsignedLessThanOrEqual
+ }
+ }
+ Ne => NotEqual,
+ Ge => {
+ if signed {
+ SignedGreaterThanOrEqual
+ } else {
+ UnsignedGreaterThanOrEqual
+ }
+ }
+ Gt => {
+ if signed {
+ SignedGreaterThan
+ } else {
+ UnsignedGreaterThan
+ }
+ }
+ _ => return None,
+ })
+}
+
+fn codegen_compare_bin_op<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ signed: bool,
+ lhs: Value,
+ rhs: Value,
+) -> CValue<'tcx> {
+ let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
+ let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
+ let val = fx.bcx.ins().bint(types::I8, val);
+ CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ match bin_op {
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ match in_lhs.layout().ty.kind() {
+ ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
+ let signed = type_sign(in_lhs.layout().ty);
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+
+ match in_lhs.layout().ty.kind() {
+ ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+ }
+}
+
+pub(crate) fn codegen_bool_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let res = match bin_op {
+ BinOp::BitXor => b.bxor(lhs, rhs),
+ BinOp::BitAnd => b.band(lhs, rhs),
+ BinOp::BitOr => b.bor(lhs, rhs),
+ // Compare binops handles by `codegen_binop`.
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+
+ CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
+}
+
+pub(crate) fn codegen_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+ assert_eq!(
+ in_lhs.layout().ty,
+ in_rhs.layout().ty,
+ "int binop requires lhs and rhs of same type"
+ );
+ }
+
+ if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
+ return res;
+ }
+
+ let signed = type_sign(in_lhs.layout().ty);
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let val = match bin_op {
+ BinOp::Add => b.iadd(lhs, rhs),
+ BinOp::Sub => b.isub(lhs, rhs),
+ BinOp::Mul => b.imul(lhs, rhs),
+ BinOp::Div => {
+ if signed {
+ b.sdiv(lhs, rhs)
+ } else {
+ b.udiv(lhs, rhs)
+ }
+ }
+ BinOp::Rem => {
+ if signed {
+ b.srem(lhs, rhs)
+ } else {
+ b.urem(lhs, rhs)
+ }
+ }
+ BinOp::BitXor => b.bxor(lhs, rhs),
+ BinOp::BitAnd => b.band(lhs, rhs),
+ BinOp::BitOr => b.bor(lhs, rhs),
+ BinOp::Shl => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ fx.bcx.ins().ishl(lhs, actual_shift)
+ }
+ BinOp::Shr => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ if signed {
+ fx.bcx.ins().sshr(lhs, actual_shift)
+ } else {
+ fx.bcx.ins().ushr(lhs, actual_shift)
+ }
+ }
+ // Compare binops handles by `codegen_binop`.
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
+ };
+
+ CValue::by_val(val, in_lhs.layout())
+}
+
+pub(crate) fn codegen_checked_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ if bin_op != BinOp::Shl && bin_op != BinOp::Shr {
+ assert_eq!(
+ in_lhs.layout().ty,
+ in_rhs.layout().ty,
+ "checked int binop requires lhs and rhs of same type"
+ );
+ }
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
+ return res;
+ }
+
+ let signed = type_sign(in_lhs.layout().ty);
+
+ let (res, has_overflow) = match bin_op {
+ BinOp::Add => {
+ /*let (val, c_out) = fx.bcx.ins().iadd_cout(lhs, rhs);
+ (val, c_out)*/
+ // FIXME(CraneStation/cranelift#849) legalize iadd_cout for i8 and i16
+ let val = fx.bcx.ins().iadd(lhs, rhs);
+ let has_overflow = if !signed {
+ fx.bcx.ins().icmp(IntCC::UnsignedLessThan, val, lhs)
+ } else {
+ let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+ let slt = fx.bcx.ins().icmp(IntCC::SignedLessThan, val, lhs);
+ fx.bcx.ins().bxor(rhs_is_negative, slt)
+ };
+ (val, has_overflow)
+ }
+ BinOp::Sub => {
+ /*let (val, b_out) = fx.bcx.ins().isub_bout(lhs, rhs);
+ (val, b_out)*/
+ // FIXME(CraneStation/cranelift#849) legalize isub_bout for i8 and i16
+ let val = fx.bcx.ins().isub(lhs, rhs);
+ let has_overflow = if !signed {
+ fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, val, lhs)
+ } else {
+ let rhs_is_negative = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, rhs, 0);
+ let sgt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, val, lhs);
+ fx.bcx.ins().bxor(rhs_is_negative, sgt)
+ };
+ (val, has_overflow)
+ }
+ BinOp::Mul => {
+ let ty = fx.bcx.func.dfg.value_type(lhs);
+ match ty {
+ types::I8 | types::I16 | types::I32 if !signed => {
+ let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::UnsignedGreaterThan,
+ val,
+ (1 << ty.bits()) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, has_overflow)
+ }
+ types::I8 | types::I16 | types::I32 if signed => {
+ let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_underflow =
+ fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::SignedGreaterThan,
+ val,
+ (1 << (ty.bits() - 1)) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, fx.bcx.ins().bor(has_underflow, has_overflow))
+ }
+ types::I64 => {
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = if !signed {
+ let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
+ } else {
+ // Based on LLVM's instruction sequence for compiling
+ // a.checked_mul(b).is_some() to riscv64gc:
+ // mulh a2, a0, a1
+ // mul a0, a0, a1
+ // srai a0, a0, 63
+ // xor a0, a0, a2
+ // snez a0, a0
+ let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
+ let val_sign = fx.bcx.ins().sshr_imm(val, i64::from(ty.bits() - 1));
+ let xor = fx.bcx.ins().bxor(val_hi, val_sign);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, xor, 0)
+ };
+ (val, has_overflow)
+ }
+ types::I128 => {
+ unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
+ }
+ _ => unreachable!("invalid non-integer type {}", ty),
+ }
+ }
+ BinOp::Shl => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let val = fx.bcx.ins().ishl(lhs, masked_shift);
+ let ty = fx.bcx.func.dfg.value_type(val);
+ let max_shift = i64::from(ty.bits()) - 1;
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ (val, has_overflow)
+ }
+ BinOp::Shr => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let val = if !signed {
+ fx.bcx.ins().ushr(lhs, masked_shift)
+ } else {
+ fx.bcx.ins().sshr(lhs, masked_shift)
+ };
+ let ty = fx.bcx.func.dfg.value_type(val);
+ let max_shift = i64::from(ty.bits()) - 1;
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ (val, has_overflow)
+ }
+ _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
+ };
+
+ let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
+
+ let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
+ CValue::by_val_pair(res, has_overflow, out_layout)
+}
+
+pub(crate) fn codegen_float_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ assert_eq!(in_lhs.layout().ty, in_rhs.layout().ty);
+
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ let b = fx.bcx.ins();
+ let res = match bin_op {
+ BinOp::Add => b.fadd(lhs, rhs),
+ BinOp::Sub => b.fsub(lhs, rhs),
+ BinOp::Mul => b.fmul(lhs, rhs),
+ BinOp::Div => b.fdiv(lhs, rhs),
+ BinOp::Rem => {
+ let name = match in_lhs.layout().ty.kind() {
+ ty::Float(FloatTy::F32) => "fmodf",
+ ty::Float(FloatTy::F64) => "fmod",
+ _ => bug!(),
+ };
+ return fx.easy_call(name, &[in_lhs, in_rhs], in_lhs.layout().ty);
+ }
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ let fltcc = match bin_op {
+ BinOp::Eq => FloatCC::Equal,
+ BinOp::Lt => FloatCC::LessThan,
+ BinOp::Le => FloatCC::LessThanOrEqual,
+ BinOp::Ne => FloatCC::NotEqual,
+ BinOp::Ge => FloatCC::GreaterThanOrEqual,
+ BinOp::Gt => FloatCC::GreaterThan,
+ _ => unreachable!(),
+ };
+ let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
+ let val = fx.bcx.ins().bint(types::I8, val);
+ return CValue::by_val(val, fx.layout_of(fx.tcx.types.bool));
+ }
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ };
+
+ CValue::by_val(res, in_lhs.layout())
+}
+
+pub(crate) fn codegen_ptr_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ bin_op: BinOp,
+ in_lhs: CValue<'tcx>,
+ in_rhs: CValue<'tcx>,
+) -> CValue<'tcx> {
+ let is_thin_ptr = in_lhs
+ .layout()
+ .ty
+ .builtin_deref(true)
+ .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
+ .unwrap_or(true);
+
+ if is_thin_ptr {
+ match bin_op {
+ BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
+ let lhs = in_lhs.load_scalar(fx);
+ let rhs = in_rhs.load_scalar(fx);
+
+ codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
+ }
+ BinOp::Offset => {
+ let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
+ let (base, offset) = (in_lhs, in_rhs.load_scalar(fx));
+ let pointee_size = fx.layout_of(pointee_ty).size.bytes();
+ let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
+ let base_val = base.load_scalar(fx);
+ let res = fx.bcx.ins().iadd(base_val, ptr_diff);
+ CValue::by_val(res, base.layout())
+ }
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
+ }
+ } else {
+ let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
+ let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
+
+ let res = match bin_op {
+ BinOp::Eq => {
+ let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+ let extra_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_extra, rhs_extra);
+ fx.bcx.ins().band(ptr_eq, extra_eq)
+ }
+ BinOp::Ne => {
+ let ptr_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_ptr, rhs_ptr);
+ let extra_ne = fx.bcx.ins().icmp(IntCC::NotEqual, lhs_extra, rhs_extra);
+ fx.bcx.ins().bor(ptr_ne, extra_ne)
+ }
+ BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => {
+ let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
+
+ let ptr_cmp =
+ fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
+ let extra_cmp = fx.bcx.ins().icmp(
+ bin_op_to_intcc(bin_op, false).unwrap(),
+ lhs_extra,
+ rhs_extra,
+ );
+
+ fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp)
+ }
+ _ => panic!("bin_op {:?} on ptr", bin_op),
+ };
+
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
+ }
+}
+
+// In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
+// For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
+// and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
+// a float against itself. Only in case of NaN is it not equal to itself.
+pub(crate) fn codegen_float_min(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_ge_b, b, a);
+ fx.bcx.ins().select(a_is_nan, b, temp)
+}
+
+pub(crate) fn codegen_float_max(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_le_b, b, a);
+ fx.bcx.ins().select(a_is_nan, b, temp)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/mod.rs b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
new file mode 100644
index 000000000..d1f89adb3
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/mod.rs
@@ -0,0 +1,20 @@
+//! Various optimizations specific to cg_clif
+
+use cranelift_codegen::isa::TargetIsa;
+
+use crate::prelude::*;
+
+pub(crate) mod peephole;
+
+pub(crate) fn optimize_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ isa: &dyn TargetIsa,
+ instance: Instance<'tcx>,
+ ctx: &mut Context,
+ clif_comments: &mut crate::pretty_clif::CommentWriter,
+) {
+ // FIXME classify optimizations over opt levels once we have more
+
+ crate::pretty_clif::write_clif_file(tcx, "preopt", isa, instance, &ctx.func, &*clif_comments);
+ crate::base::verify_func(tcx, &*clif_comments, &ctx.func);
+}
diff --git a/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
new file mode 100644
index 000000000..d637b4d89
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/optimize/peephole.rs
@@ -0,0 +1,67 @@
+//! Peephole optimizations that can be performed while creating clif ir.
+
+use cranelift_codegen::ir::{condcodes::IntCC, InstructionData, Opcode, Value, ValueDef};
+use cranelift_frontend::FunctionBuilder;
+
+/// If the given value was produced by a `bint` instruction, return it's input, otherwise return the
+/// given value.
+pub(crate) fn maybe_unwrap_bint(bcx: &mut FunctionBuilder<'_>, arg: Value) -> Value {
+ if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ match bcx.func.dfg[arg_inst] {
+ InstructionData::Unary { opcode: Opcode::Bint, arg } => arg,
+ _ => arg,
+ }
+ } else {
+ arg
+ }
+}
+
+/// If the given value was produced by the lowering of `Rvalue::Not` return the input and true,
+/// otherwise return the given value and false.
+pub(crate) fn maybe_unwrap_bool_not(bcx: &mut FunctionBuilder<'_>, arg: Value) -> (Value, bool) {
+ if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ match bcx.func.dfg[arg_inst] {
+ // This is the lowering of `Rvalue::Not`
+ InstructionData::IntCompareImm {
+ opcode: Opcode::IcmpImm,
+ cond: IntCC::Equal,
+ arg,
+ imm,
+ } if imm.bits() == 0 => (arg, true),
+ _ => (arg, false),
+ }
+ } else {
+ (arg, false)
+ }
+}
+
+/// Returns whether the branch is statically known to be taken or `None` if it isn't statically known.
+pub(crate) fn maybe_known_branch_taken(
+ bcx: &FunctionBuilder<'_>,
+ arg: Value,
+ test_zero: bool,
+) -> Option<bool> {
+ let arg_inst = if let ValueDef::Result(arg_inst, 0) = bcx.func.dfg.value_def(arg) {
+ arg_inst
+ } else {
+ return None;
+ };
+
+ match bcx.func.dfg[arg_inst] {
+ InstructionData::UnaryBool { opcode: Opcode::Bconst, imm } => {
+ if test_zero {
+ Some(!imm)
+ } else {
+ Some(imm)
+ }
+ }
+ InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => {
+ if test_zero {
+ Some(imm.bits() == 0)
+ } else {
+ Some(imm.bits() != 0)
+ }
+ }
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/pointer.rs b/compiler/rustc_codegen_cranelift/src/pointer.rs
new file mode 100644
index 000000000..31d827f83
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/pointer.rs
@@ -0,0 +1,134 @@
+//! Defines [`Pointer`] which is used to improve the quality of the generated clif ir for pointer
+//! operations.
+
+use crate::prelude::*;
+
+use rustc_target::abi::Align;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+/// A pointer pointing either to a certain address, a certain stack slot or nothing.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct Pointer {
+ base: PointerBase,
+ offset: Offset32,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum PointerBase {
+ Addr(Value),
+ Stack(StackSlot),
+ Dangling(Align),
+}
+
+impl Pointer {
+ pub(crate) fn new(addr: Value) -> Self {
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn stack_slot(stack_slot: StackSlot) -> Self {
+ Pointer { base: PointerBase::Stack(stack_slot), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn const_addr(fx: &mut FunctionCx<'_, '_, '_>, addr: i64) -> Self {
+ let addr = fx.bcx.ins().iconst(fx.pointer_type, addr);
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn dangling(align: Align) -> Self {
+ Pointer { base: PointerBase::Dangling(align), offset: Offset32::new(0) }
+ }
+
+ pub(crate) fn debug_base_and_offset(self) -> (PointerBase, Offset32) {
+ (self.base, self.offset)
+ }
+
+ pub(crate) fn get_addr(self, fx: &mut FunctionCx<'_, '_, '_>) -> Value {
+ match self.base {
+ PointerBase::Addr(base_addr) => {
+ let offset: i64 = self.offset.into();
+ if offset == 0 { base_addr } else { fx.bcx.ins().iadd_imm(base_addr, offset) }
+ }
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset)
+ }
+ PointerBase::Dangling(align) => {
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+ }
+ }
+ }
+
+ pub(crate) fn offset(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Offset32) -> Self {
+ self.offset_i64(fx, extra_offset.into())
+ }
+
+ pub(crate) fn offset_i64(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: i64) -> Self {
+ if let Some(new_offset) = self.offset.try_add_i64(extra_offset) {
+ Pointer { base: self.base, offset: new_offset }
+ } else {
+ let base_offset: i64 = self.offset.into();
+ if let Some(new_offset) = base_offset.checked_add(extra_offset) {
+ let base_addr = match self.base {
+ PointerBase::Addr(addr) => addr,
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
+ }
+ PointerBase::Dangling(align) => {
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap())
+ }
+ };
+ let addr = fx.bcx.ins().iadd_imm(base_addr, new_offset);
+ Pointer { base: PointerBase::Addr(addr), offset: Offset32::new(0) }
+ } else {
+ panic!(
+ "self.offset ({}) + extra_offset ({}) not representable in i64",
+ base_offset, extra_offset
+ );
+ }
+ }
+ }
+
+ pub(crate) fn offset_value(self, fx: &mut FunctionCx<'_, '_, '_>, extra_offset: Value) -> Self {
+ match self.base {
+ PointerBase::Addr(addr) => Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+ offset: self.offset,
+ },
+ PointerBase::Stack(stack_slot) => {
+ let base_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, self.offset);
+ Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(base_addr, extra_offset)),
+ offset: Offset32::new(0),
+ }
+ }
+ PointerBase::Dangling(align) => {
+ let addr =
+ fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(align.bytes()).unwrap());
+ Pointer {
+ base: PointerBase::Addr(fx.bcx.ins().iadd(addr, extra_offset)),
+ offset: self.offset,
+ }
+ }
+ }
+ }
+
+ pub(crate) fn load(self, fx: &mut FunctionCx<'_, '_, '_>, ty: Type, flags: MemFlags) -> Value {
+ match self.base {
+ PointerBase::Addr(base_addr) => fx.bcx.ins().load(ty, flags, base_addr, self.offset),
+ PointerBase::Stack(stack_slot) => fx.bcx.ins().stack_load(ty, stack_slot, self.offset),
+ PointerBase::Dangling(_align) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn store(self, fx: &mut FunctionCx<'_, '_, '_>, value: Value, flags: MemFlags) {
+ match self.base {
+ PointerBase::Addr(base_addr) => {
+ fx.bcx.ins().store(flags, value, base_addr, self.offset);
+ }
+ PointerBase::Stack(stack_slot) => {
+ fx.bcx.ins().stack_store(value, stack_slot, self.offset);
+ }
+ PointerBase::Dangling(_align) => unreachable!(),
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/pretty_clif.rs b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
new file mode 100644
index 000000000..1d1ec2168
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/pretty_clif.rs
@@ -0,0 +1,278 @@
+//! This module provides the [CommentWriter] which makes it possible
+//! to add comments to the written cranelift ir.
+//!
+//! # Example
+//!
+//! ```clif
+//! test compile
+//! target x86_64
+//!
+//! function u0:0(i64, i64, i64) system_v {
+//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
+//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
+//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
+//!
+//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
+//! ; msg loc.idx param pass mode ssa flags ty
+//! ; ret _0 = v0 ByRef NOT_SSA (u8, u8)
+//! ; arg _1 = v1 ByRef NOT_SSA IsNotEmpty
+//! ; arg _2.0 = v2 ByVal(types::I64) NOT_SSA &&[u16]
+//!
+//! ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
+//! ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
+//! ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
+//! sig0 = (i64, i64, i64) system_v
+//! sig1 = (i64, i64, i64) system_v
+//! fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
+//!
+//! block0(v0: i64, v1: i64, v2: i64):
+//! v3 = stack_addr.i64 ss0
+//! v4 = stack_addr.i64 ss1
+//! store v2, v4
+//! v5 = stack_addr.i64 ss2
+//! jump block1
+//!
+//! block1:
+//! nop
+//! ; _3 = &mut _1
+//! ; _4 = _2
+//! v6 = load.i64 v4
+//! store v6, v5
+//! ;
+//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
+//! v7 = load.i64 v5
+//! call fn0(v0, v3, v7)
+//! jump block2
+//!
+//! block2:
+//! nop
+//! ;
+//! ; return
+//! return
+//! }
+//! ```
+
+use std::fmt;
+use std::io::Write;
+
+use cranelift_codegen::{
+ entity::SecondaryMap,
+ ir::entities::AnyEntity,
+ write::{FuncWriter, PlainWriter},
+};
+
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_session::config::OutputType;
+
+use crate::prelude::*;
+
+#[derive(Clone, Debug)]
+pub(crate) struct CommentWriter {
+ enabled: bool,
+ global_comments: Vec<String>,
+ entity_comments: FxHashMap<AnyEntity, String>,
+}
+
+impl CommentWriter {
+ pub(crate) fn new<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ let enabled = should_write_ir(tcx);
+ let global_comments = if enabled {
+ vec![
+ format!("symbol {}", tcx.symbol_name(instance).name),
+ format!("instance {:?}", instance),
+ format!(
+ "abi {:?}",
+ RevealAllLayoutCx(tcx).fn_abi_of_instance(instance, ty::List::empty())
+ ),
+ String::new(),
+ ]
+ } else {
+ vec![]
+ };
+
+ CommentWriter { enabled, global_comments, entity_comments: FxHashMap::default() }
+ }
+}
+
+impl CommentWriter {
+ pub(crate) fn enabled(&self) -> bool {
+ self.enabled
+ }
+
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ debug_assert!(self.enabled);
+ self.global_comments.push(comment.into());
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ debug_assert!(self.enabled);
+
+ use std::collections::hash_map::Entry;
+ match self.entity_comments.entry(entity.into()) {
+ Entry::Occupied(mut occ) => {
+ occ.get_mut().push('\n');
+ occ.get_mut().push_str(comment.as_ref());
+ }
+ Entry::Vacant(vac) => {
+ vac.insert(comment.into());
+ }
+ }
+ }
+}
+
+impl FuncWriter for &'_ CommentWriter {
+ fn write_preamble(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ ) -> Result<bool, fmt::Error> {
+ for comment in &self.global_comments {
+ if !comment.is_empty() {
+ writeln!(w, "; {}", comment)?;
+ } else {
+ writeln!(w)?;
+ }
+ }
+ if !self.global_comments.is_empty() {
+ writeln!(w)?;
+ }
+
+ self.super_preamble(w, func)
+ }
+
+ fn write_entity_definition(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ _func: &Function,
+ entity: AnyEntity,
+ value: &dyn fmt::Display,
+ ) -> fmt::Result {
+ write!(w, " {} = {}", entity, value)?;
+
+ if let Some(comment) = self.entity_comments.get(&entity) {
+ writeln!(w, " ; {}", comment.replace('\n', "\n; "))
+ } else {
+ writeln!(w)
+ }
+ }
+
+ fn write_block_header(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ block: Block,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_block_header(w, func, block, indent)
+ }
+
+ fn write_instruction(
+ &mut self,
+ w: &mut dyn fmt::Write,
+ func: &Function,
+ aliases: &SecondaryMap<Value, Vec<Value>>,
+ inst: Inst,
+ indent: usize,
+ ) -> fmt::Result {
+ PlainWriter.write_instruction(w, func, aliases, inst, indent)?;
+ if let Some(comment) = self.entity_comments.get(&inst.into()) {
+ writeln!(w, "; {}", comment.replace('\n', "\n; "))?;
+ }
+ Ok(())
+ }
+}
+
+impl FunctionCx<'_, '_, '_> {
+ pub(crate) fn add_global_comment<S: Into<String>>(&mut self, comment: S) {
+ self.clif_comments.add_global_comment(comment);
+ }
+
+ pub(crate) fn add_comment<S: Into<String> + AsRef<str>, E: Into<AnyEntity>>(
+ &mut self,
+ entity: E,
+ comment: S,
+ ) {
+ self.clif_comments.add_comment(entity, comment);
+ }
+}
+
+pub(crate) fn should_write_ir(tcx: TyCtxt<'_>) -> bool {
+ tcx.sess.opts.output_types.contains_key(&OutputType::LlvmAssembly)
+}
+
+pub(crate) fn write_ir_file(
+ tcx: TyCtxt<'_>,
+ name: impl FnOnce() -> String,
+ write: impl FnOnce(&mut dyn Write) -> std::io::Result<()>,
+) {
+ if !should_write_ir(tcx) {
+ return;
+ }
+
+ let clif_output_dir = tcx.output_filenames(()).with_extension("clif");
+
+ match std::fs::create_dir(&clif_output_dir) {
+ Ok(()) => {}
+ Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {}
+ res @ Err(_) => res.unwrap(),
+ }
+
+ let clif_file_name = clif_output_dir.join(name());
+
+ let res = std::fs::File::create(clif_file_name).and_then(|mut file| write(&mut file));
+ if let Err(err) = res {
+ tcx.sess.warn(&format!("error writing ir file: {}", err));
+ }
+}
+
+pub(crate) fn write_clif_file<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ postfix: &str,
+ isa: &dyn cranelift_codegen::isa::TargetIsa,
+ instance: Instance<'tcx>,
+ func: &cranelift_codegen::ir::Function,
+ mut clif_comments: &CommentWriter,
+) {
+ // FIXME work around filename too long errors
+ write_ir_file(
+ tcx,
+ || format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),
+ |file| {
+ let mut clif = String::new();
+ cranelift_codegen::write::decorate_function(&mut clif_comments, &mut clif, func)
+ .unwrap();
+
+ for flag in isa.flags().iter() {
+ writeln!(file, "set {}", flag)?;
+ }
+ write!(file, "target {}", isa.triple().architecture.to_string())?;
+ for isa_flag in isa.isa_flags().iter() {
+ write!(file, " {}", isa_flag)?;
+ }
+ writeln!(file, "\n")?;
+ writeln!(file)?;
+ file.write_all(clif.as_bytes())?;
+ Ok(())
+ },
+ );
+}
+
+impl fmt::Debug for FunctionCx<'_, '_, '_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ writeln!(f, "{:?}", self.instance.substs)?;
+ writeln!(f, "{:?}", self.local_map)?;
+
+ let mut clif = String::new();
+ ::cranelift_codegen::write::decorate_function(
+ &mut &self.clif_comments,
+ &mut clif,
+ &self.bcx.func,
+ )
+ .unwrap();
+ writeln!(f, "\n{}", clif)
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/toolchain.rs b/compiler/rustc_codegen_cranelift/src/toolchain.rs
new file mode 100644
index 000000000..f86236ef3
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/toolchain.rs
@@ -0,0 +1,31 @@
+//! Locating various executables part of a C toolchain.
+
+use std::path::PathBuf;
+
+use rustc_codegen_ssa::back::link::linker_and_flavor;
+use rustc_session::Session;
+
+/// Tries to infer the path of a binary for the target toolchain from the linker name.
+pub(crate) fn get_toolchain_binary(sess: &Session, tool: &str) -> PathBuf {
+ let (mut linker, _linker_flavor) = linker_and_flavor(sess);
+ let linker_file_name = linker
+ .file_name()
+ .and_then(|name| name.to_str())
+ .unwrap_or_else(|| sess.fatal("couldn't extract file name from specified linker"));
+
+ if linker_file_name == "ld.lld" {
+ if tool != "ld" {
+ linker.set_file_name(tool)
+ }
+ } else {
+ let tool_file_name = linker_file_name
+ .replace("ld", tool)
+ .replace("gcc", tool)
+ .replace("clang", tool)
+ .replace("cc", tool);
+
+ linker.set_file_name(tool_file_name)
+ }
+
+ linker
+}
diff --git a/compiler/rustc_codegen_cranelift/src/trap.rs b/compiler/rustc_codegen_cranelift/src/trap.rs
new file mode 100644
index 000000000..923269c4d
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/trap.rs
@@ -0,0 +1,57 @@
+//! Helpers used to print a message and abort in case of certain panics and some detected UB.
+
+use crate::prelude::*;
+
+fn codegen_print(fx: &mut FunctionCx<'_, '_, '_>, msg: &str) {
+ let puts = fx
+ .module
+ .declare_function(
+ "puts",
+ Linkage::Import,
+ &Signature {
+ call_conv: fx.target_config.default_call_conv,
+ params: vec![AbiParam::new(fx.pointer_type)],
+ returns: vec![AbiParam::new(types::I32)],
+ },
+ )
+ .unwrap();
+ let puts = fx.module.declare_func_in_func(puts, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(puts, "puts");
+ }
+
+ let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, fx.symbol_name, msg);
+ let msg_ptr = fx.anonymous_str(&real_msg);
+ fx.bcx.ins().call(puts, &[msg_ptr]);
+}
+
+/// Use this for example when a function call should never return. This will fill the current block,
+/// so you can **not** add instructions to it afterwards.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unreachable(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+ codegen_print(fx, msg.as_ref());
+ fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
+}
+/// Use this when something is unimplemented, but `libcore` or `libstd` requires it to codegen.
+/// Unlike `trap_unreachable` this will not fill the current block, so you **must** add instructions
+/// to it afterwards.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unimplemented(fx: &mut FunctionCx<'_, '_, '_>, msg: impl AsRef<str>) {
+ codegen_print(fx, msg.as_ref());
+ let true_ = fx.bcx.ins().iconst(types::I32, 1);
+ fx.bcx.ins().trapnz(true_, TrapCode::User(!0));
+}
+
+/// Like `trap_unimplemented` but returns a fake value of the specified type.
+///
+/// Trap code: user65535
+pub(crate) fn trap_unimplemented_ret_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ dest_layout: TyAndLayout<'tcx>,
+ msg: impl AsRef<str>,
+) -> CValue<'tcx> {
+ trap_unimplemented(fx, msg);
+ CValue::by_ref(Pointer::const_addr(fx, 0), dest_layout)
+}
diff --git a/compiler/rustc_codegen_cranelift/src/unsize.rs b/compiler/rustc_codegen_cranelift/src/unsize.rs
new file mode 100644
index 000000000..052ca0a08
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/unsize.rs
@@ -0,0 +1,227 @@
+//! Codegen of the [`PointerCast::Unsize`] operation.
+//!
+//! [`PointerCast::Unsize`]: `rustc_middle::ty::adjustment::PointerCast::Unsize`
+
+use crate::prelude::*;
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
+
+/// Retrieve the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit funny. It is intended for use
+/// in an upcast, where the new vtable for an object will be derived
+/// from the old one.
+pub(crate) fn unsized_info<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ old_info: Option<Value>,
+) -> Value {
+ let (source, target) =
+ fx.tcx.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
+ match (&source.kind(), &target.kind()) {
+ (&ty::Array(_, len), &ty::Slice(_)) => fx
+ .bcx
+ .ins()
+ .iconst(fx.pointer_type, len.eval_usize(fx.tcx, ParamEnv::reveal_all()) as i64),
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ let old_info =
+ old_info.expect("unsized_info: missing old info for trait upcasting coercion");
+ if data_a.principal_def_id() == data_b.principal_def_id() {
+ return old_info;
+ }
+
+ // trait upcasting coercion
+ let vptr_entry_idx =
+ fx.tcx.vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
+
+ if let Some(entry_idx) = vptr_entry_idx {
+ let entry_idx = u32::try_from(entry_idx).unwrap();
+ let entry_offset = entry_idx * fx.pointer_type.bytes();
+ let vptr_ptr = Pointer::new(old_info).offset_i64(fx, entry_offset.into()).load(
+ fx,
+ fx.pointer_type,
+ crate::vtable::vtable_memflags(),
+ );
+ vptr_ptr
+ } else {
+ old_info
+ }
+ }
+ (_, &ty::Dynamic(ref data, ..)) => crate::vtable::get_vtable(fx, source, data.principal()),
+ _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
+ }
+}
+
+/// Coerce `src` to `dst_ty`.
+fn unsize_ptr<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: Value,
+ src_layout: TyAndLayout<'tcx>,
+ dst_layout: TyAndLayout<'tcx>,
+ old_info: Option<Value>,
+) -> (Value, Value) {
+ match (&src_layout.ty.kind(), &dst_layout.ty.kind()) {
+ (&ty::Ref(_, a, _), &ty::Ref(_, b, _))
+ | (&ty::Ref(_, a, _), &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+ | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ (src, unsized_info(fx, *a, *b, old_info))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ let (a, b) = (src_layout.ty.boxed_ty(), dst_layout.ty.boxed_ty());
+ (src, unsized_info(fx, a, b, old_info))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ if src_layout == dst_layout {
+ return (src, old_info.unwrap());
+ }
+
+ let mut result = None;
+ for i in 0..src_layout.fields.count() {
+ let src_f = src_layout.field(fx, i);
+ assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+ if src_f.is_zst() {
+ continue;
+ }
+ assert_eq!(src_layout.size, src_f.size);
+
+ let dst_f = dst_layout.field(fx, i);
+ assert_ne!(src_f.ty, dst_f.ty);
+ assert_eq!(result, None);
+ result = Some(unsize_ptr(fx, src, src_f, dst_f, old_info));
+ }
+ result.unwrap()
+ }
+ _ => bug!("unsize_ptr: called on bad types"),
+ }
+}
+
+/// Coerce `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty` and store the result in `dst`
+pub(crate) fn coerce_unsized_into<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ src: CValue<'tcx>,
+ dst: CPlace<'tcx>,
+) {
+ let src_ty = src.layout().ty;
+ let dst_ty = dst.layout().ty;
+ let mut coerce_ptr = || {
+ let (base, info) =
+ if fx.layout_of(src.layout().ty.builtin_deref(true).unwrap().ty).is_unsized() {
+ let (old_base, old_info) = src.load_scalar_pair(fx);
+ unsize_ptr(fx, old_base, src.layout(), dst.layout(), Some(old_info))
+ } else {
+ let base = src.load_scalar(fx);
+ unsize_ptr(fx, base, src.layout(), dst.layout(), None)
+ };
+ dst.write_cvalue(fx, CValue::by_val_pair(base, info, dst.layout()));
+ };
+ match (&src_ty.kind(), &dst_ty.kind()) {
+ (&ty::Ref(..), &ty::Ref(..))
+ | (&ty::Ref(..), &ty::RawPtr(..))
+ | (&ty::RawPtr(..), &ty::RawPtr(..)) => coerce_ptr(),
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
+ let src_f = src.value_field(fx, mir::Field::new(i));
+ let dst_f = dst.place_field(fx, mir::Field::new(i));
+
+ if dst_f.layout().is_zst() {
+ continue;
+ }
+
+ if src_f.layout().ty == dst_f.layout().ty {
+ dst_f.write_cvalue(fx, src_f);
+ } else {
+ coerce_unsized_into(fx, src_f, dst_f);
+ }
+ }
+ }
+ _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty),
+ }
+}
+
+// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
+
+pub(crate) fn size_and_align_of_dst<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ info: Value,
+) -> (Value, Value) {
+ assert!(layout.is_unsized() || layout.abi == Abi::Uninhabited);
+ match layout.ty.kind() {
+ ty::Dynamic(..) => {
+ // load size/align from vtable
+ (crate::vtable::size_of_obj(fx, info), crate::vtable::min_align_of_obj(fx, info))
+ }
+ ty::Slice(_) | ty::Str => {
+ let unit = layout.field(fx, 0);
+ // The info in this case is the length of the str, so the size is that
+ // times the unit size.
+ (
+ fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
+ fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
+ )
+ }
+ _ => {
+ // First get the size of all statically known fields.
+ // Don't use size_of because it also rounds up to alignment, which we
+ // want to avoid, as the unsized field's alignment could be smaller.
+ assert!(!layout.ty.is_simd());
+
+ let i = layout.fields.count() - 1;
+ let sized_size = layout.fields.offset(i).bytes();
+ let sized_align = layout.align.abi.bytes();
+ let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field).
+ let field_layout = layout.field(fx, i);
+ let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_layout, info);
+
+ // FIXME (#26403, #27023): We should be adding padding
+ // to `sized_size` (to accommodate the `unsized_align`
+ // required of the unsized field that follows) before
+ // summing it with `sized_size`. (Note that since #26403
+ // is unfixed, we do not yet add the necessary padding
+ // here. But this is where the add would go.)
+
+ // Return the sum of sizes and max of aligns.
+ let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
+
+ // Packed types ignore the alignment of their fields.
+ if let ty::Adt(def, _) = layout.ty.kind() {
+ if def.repr().packed() {
+ unsized_align = sized_align;
+ }
+ }
+
+ // Choose max of two known alignments (combined value must
+ // be aligned according to more restrictive of the two).
+ let cmp = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
+ let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
+
+ // Issue #27023: must add any necessary padding to `size`
+ // (to make it a multiple of `align`) before returning it.
+ //
+ // Namely, the returned size should be, in C notation:
+ //
+ // `size + ((size & (align-1)) ? align : 0)`
+ //
+ // emulated via the semi-standard fast bit trick:
+ //
+ // `(size + (align-1)) & -align`
+ let addend = fx.bcx.ins().iadd_imm(align, -1);
+ let add = fx.bcx.ins().iadd(size, addend);
+ let neg = fx.bcx.ins().ineg(align);
+ let size = fx.bcx.ins().band(add, neg);
+
+ (size, align)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
new file mode 100644
index 000000000..45ae2bd8f
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -0,0 +1,883 @@
+//! Definition of [`CValue`] and [`CPlace`]
+
+use crate::prelude::*;
+
+use cranelift_codegen::ir::immediates::Offset32;
+
+fn codegen_field<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ base: Pointer,
+ extra: Option<Value>,
+ layout: TyAndLayout<'tcx>,
+ field: mir::Field,
+) -> (Pointer, TyAndLayout<'tcx>) {
+ let field_offset = layout.fields.offset(field.index());
+ let field_layout = layout.field(&*fx, field.index());
+
+ let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
+ (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
+ };
+
+ if let Some(extra) = extra {
+ if !field_layout.is_unsized() {
+ return simple(fx);
+ }
+ match field_layout.ty.kind() {
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
+ ty::Adt(def, _) if def.repr().packed() => {
+ assert_eq!(layout.align.abi.bytes(), 1);
+ simple(fx)
+ }
+ _ => {
+ // We have to align the offset for DST's
+ let unaligned_offset = field_offset.bytes();
+ let (_, unsized_align) =
+ crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
+
+ let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
+ let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
+ let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
+ let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
+ let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
+ let offset = fx.bcx.ins().band(and_lhs, and_rhs);
+
+ (base.offset_value(fx, offset), field_layout)
+ }
+ }
+ } else {
+ simple(fx)
+ }
+}
+
+fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
+ let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
+ Offset32::new(b_offset.bytes().try_into().unwrap())
+}
+
+/// A read-only value
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
+
+#[derive(Debug, Copy, Clone)]
+enum CValueInner {
+ ByRef(Pointer, Option<Value>),
+ ByVal(Value),
+ ByValPair(Value, Value),
+}
+
+impl<'tcx> CValue<'tcx> {
+ pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, None), layout)
+ }
+
+ pub(crate) fn by_ref_unsized(
+ ptr: Pointer,
+ meta: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
+ }
+
+ pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
+ CValue(CValueInner::ByVal(value), layout)
+ }
+
+ pub(crate) fn by_val_pair(
+ value: Value,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ CValue(CValueInner::ByValPair(value, extra), layout)
+ }
+
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.1
+ }
+
+ // FIXME remove
+ pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => (ptr, meta),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
+ let cplace = CPlace::new_stack_slot(fx, layout);
+ cplace.write_cvalue(fx, self);
+ (cplace.to_ptr(), None)
+ }
+ }
+ }
+
+ pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
+ match self.0 {
+ CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
+ CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
+ }
+ }
+
+ /// Load a value with layout.abi of scalar
+ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let clif_ty = match layout.abi {
+ Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
+ Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
+ .by(u16::try_from(count).unwrap())
+ .unwrap(),
+ _ => unreachable!("{:?}", layout.ty),
+ };
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, clif_ty, flags)
+ }
+ CValueInner::ByVal(value) => value,
+ CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
+ CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
+ }
+ }
+
+ /// Load a value pair with layout.abi of scalar pair
+ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let (a_scalar, b_scalar) = match layout.abi {
+ Abi::ScalarPair(a, b) => (a, b),
+ _ => unreachable!("load_scalar_pair({:?})", self),
+ };
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
+ let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ let val1 = ptr.load(fx, clif_ty1, flags);
+ let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
+ (val1, val2)
+ }
+ CValueInner::ByRef(_, Some(_)) => {
+ bug!("load_scalar_pair for unsized value not allowed")
+ }
+ CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
+ CValueInner::ByValPair(val1, val2) => (val1, val2),
+ }
+ }
+
+ pub(crate) fn value_field(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count } => {
+ let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+ let field = u8::try_from(field.index()).unwrap();
+ assert!(field < count);
+ let lane = fx.bcx.ins().extractlane(val, field);
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(lane, field_layout)
+ }
+ _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(val1, val2) => match layout.abi {
+ Abi::ScalarPair(_, _) => {
+ let val = match field.as_u32() {
+ 0 => val1,
+ 1 => val2,
+ _ => bug!("field should be 0 or 1"),
+ };
+ let field_layout = layout.field(&*fx, usize::from(field));
+ CValue::by_val(val, field_layout)
+ }
+ _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
+ },
+ CValueInner::ByRef(ptr, None) => {
+ let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
+ CValue::by_ref(field_ptr, field_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn value_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CValue<'tcx> {
+ let layout = self.1;
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+ match self.0 {
+ CValueInner::ByVal(val) => match layout.abi {
+ Abi::Vector { element: _, count: _ } => {
+ assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
+ let lane_idx = u8::try_from(lane_idx).unwrap();
+ let lane = fx.bcx.ins().extractlane(val, lane_idx);
+ CValue::by_val(lane, lane_layout)
+ }
+ _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
+ },
+ CValueInner::ByValPair(_, _) => unreachable!(),
+ CValueInner::ByRef(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CValue::by_ref(field_ptr, lane_layout)
+ }
+ CValueInner::ByRef(_, Some(_)) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
+ crate::unsize::coerce_unsized_into(fx, self, dest);
+ }
+
+ /// If `ty` is signed, `const_val` must already be sign extended.
+ pub(crate) fn const_val(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ const_val: ty::ScalarInt,
+ ) -> CValue<'tcx> {
+ assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
+ use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
+
+ let clif_ty = fx.clif_type(layout.ty).unwrap();
+
+ if let ty::Bool = layout.ty.kind() {
+ assert!(
+ const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
+ "Invalid bool 0x{:032X}",
+ const_val
+ );
+ }
+
+ let val = match layout.ty.kind() {
+ ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
+ let const_val = const_val.to_bits(layout.size).unwrap();
+ let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
+ let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
+ fx.bcx.ins().iconcat(lsb, msb)
+ }
+ ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
+ fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
+ }
+ ty::Float(FloatTy::F32) => {
+ fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
+ }
+ ty::Float(FloatTy::F64) => {
+ fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
+ }
+ _ => panic!(
+ "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
+ layout.ty
+ ),
+ };
+
+ CValue::by_val(val, layout)
+ }
+
+ pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
+ assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
+ assert_eq!(self.layout().abi, layout.abi);
+ CValue(self.0, layout)
+ }
+}
+
+/// A place where you can write a value to or read a value from
+#[derive(Debug, Copy, Clone)]
+pub(crate) struct CPlace<'tcx> {
+ inner: CPlaceInner,
+ layout: TyAndLayout<'tcx>,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum CPlaceInner {
+ Var(Local, Variable),
+ VarPair(Local, Variable, Variable),
+ VarLane(Local, Variable, u8),
+ Addr(Pointer, Option<Value>),
+}
+
+impl<'tcx> CPlace<'tcx> {
+ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ pub(crate) fn inner(&self) -> &CPlaceInner {
+ &self.inner
+ }
+
+ pub(crate) fn new_stack_slot(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ assert!(!layout.is_unsized());
+ if layout.size.bytes() == 0 {
+ return CPlace {
+ inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
+ layout,
+ };
+ }
+
+ if layout.size.bytes() >= u64::from(u32::MAX - 16) {
+ fx.tcx
+ .sess
+ .fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
+ }
+
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
+ });
+ CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
+ }
+
+ pub(crate) fn new_var(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
+ CPlace { inner: CPlaceInner::Var(local, var), layout }
+ }
+
+ pub(crate) fn new_var_pair(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ local: Local,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ let var1 = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+ let var2 = Variable::with_u32(fx.next_ssa_var);
+ fx.next_ssa_var += 1;
+
+ let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
+ fx.bcx.declare_var(var1, ty1);
+ fx.bcx.declare_var(var2, ty2);
+ CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
+ }
+
+ pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
+ CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
+ }
+
+ pub(crate) fn for_ptr_with_extra(
+ ptr: Pointer,
+ extra: Value,
+ layout: TyAndLayout<'tcx>,
+ ) -> CPlace<'tcx> {
+ CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
+ }
+
+ pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
+ let layout = self.layout();
+ match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ let val = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let val1 = fx.bcx.use_var(var1);
+ //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
+ let val2 = fx.bcx.use_var(var2);
+ //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
+ CValue::by_val_pair(val1, val2, layout)
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let val = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ let val = fx.bcx.ins().extractlane(val, lane);
+ CValue::by_val(val, layout)
+ }
+ CPlaceInner::Addr(ptr, extra) => {
+ if let Some(extra) = extra {
+ CValue::by_ref_unsized(ptr, extra, layout)
+ } else {
+ CValue::by_ref(ptr, layout)
+ }
+ }
+ }
+ }
+
+ pub(crate) fn to_ptr(self) -> Pointer {
+ match self.to_ptr_maybe_unsized() {
+ (ptr, None) => ptr,
+ (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
+ match self.inner {
+ CPlaceInner::Addr(ptr, extra) => (ptr, extra),
+ CPlaceInner::Var(_, _)
+ | CPlaceInner::VarPair(_, _, _)
+ | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
+ }
+ }
+
+ pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
+ assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
+
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
+ }
+
+ pub(crate) fn write_cvalue_transmute(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ ) {
+ self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
+ }
+
+ fn write_cvalue_maybe_transmute(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ from: CValue<'tcx>,
+ method: &'static str,
+ ) {
+ fn transmute_value<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ var: Variable,
+ data: Value,
+ dst_ty: Type,
+ ) {
+ let src_ty = fx.bcx.func.dfg.value_type(data);
+ assert_eq!(
+ src_ty.bytes(),
+ dst_ty.bytes(),
+ "write_cvalue_transmute: {:?} -> {:?}",
+ src_ty,
+ dst_ty,
+ );
+ let data = match (src_ty, dst_ty) {
+ (_, _) if src_ty == dst_ty => data,
+
+ // This is a `write_cvalue_transmute`.
+ (types::I32, types::F32)
+ | (types::F32, types::I32)
+ | (types::I64, types::F64)
+ | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
+ _ if src_ty.is_vector() && dst_ty.is_vector() => {
+ fx.bcx.ins().raw_bitcast(dst_ty, data)
+ }
+ _ if src_ty.is_vector() || dst_ty.is_vector() => {
+ // FIXME do something more efficient for transmutes between vectors and integers.
+ let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
+ kind: StackSlotKind::ExplicitSlot,
+ // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
+ // specify stack slot alignment.
+ size: (src_ty.bytes() + 15) / 16 * 16,
+ });
+ let ptr = Pointer::stack_slot(stack_slot);
+ ptr.store(fx, data, MemFlags::trusted());
+ ptr.load(fx, dst_ty, MemFlags::trusted())
+ }
+
+ // `CValue`s should never contain SSA-only types, so if you ended
+ // up here having seen an error like `B1 -> I8`, then before
+ // calling `write_cvalue` you need to add a `bint` instruction.
+ _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
+ };
+ //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, data);
+ }
+
+ assert_eq!(self.layout().size, from.layout().size);
+
+ if fx.clif_comments.enabled() {
+ use cranelift_codegen::cursor::{Cursor, CursorPosition};
+ let cur_block = match fx.bcx.cursor().position() {
+ CursorPosition::After(block) => block,
+ _ => unreachable!(),
+ };
+ fx.add_comment(
+ fx.bcx.func.layout.last_inst(cur_block).unwrap(),
+ format!(
+ "{}: {:?}: {:?} <- {:?}: {:?}",
+ method,
+ self.inner(),
+ self.layout().ty,
+ from.0,
+ from.layout().ty
+ ),
+ );
+ }
+
+ let dst_layout = self.layout();
+ let to_ptr = match self.inner {
+ CPlaceInner::Var(_local, var) => {
+ if let ty::Array(element, len) = dst_layout.ty.kind() {
+ // Can only happen for vector types
+ let len =
+ u16::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
+ let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
+
+ let data = match from.0 {
+ CValueInner::ByRef(ptr, None) => {
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ ptr.load(fx, vector_ty, flags)
+ }
+ CValueInner::ByVal(_)
+ | CValueInner::ByValPair(_, _)
+ | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
+ };
+
+ fx.bcx.def_var(var, data);
+ return;
+ }
+ let data = CValue(from.0, dst_layout).load_scalar(fx);
+ let dst_ty = fx.clif_type(self.layout().ty).unwrap();
+ transmute_value(fx, var, data, dst_ty);
+ return;
+ }
+ CPlaceInner::VarPair(_local, var1, var2) => {
+ let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
+ let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
+ transmute_value(fx, var1, data1, dst_ty1);
+ transmute_value(fx, var2, data2, dst_ty2);
+ return;
+ }
+ CPlaceInner::VarLane(_local, var, lane) => {
+ let data = from.load_scalar(fx);
+
+ // First get the old vector
+ let vector = fx.bcx.use_var(var);
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+
+ // Next insert the written lane into the vector
+ let vector = fx.bcx.ins().insertlane(vector, data, lane);
+
+ // Finally write the new vector
+ //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
+ fx.bcx.def_var(var, vector);
+
+ return;
+ }
+ CPlaceInner::Addr(ptr, None) => {
+ if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
+ return;
+ }
+ ptr
+ }
+ CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
+ };
+
+ let mut flags = MemFlags::new();
+ flags.set_notrap();
+ match from.layout().abi {
+ // FIXME make Abi::Vector work too
+ Abi::Scalar(_) => {
+ let val = from.load_scalar(fx);
+ to_ptr.store(fx, val, flags);
+ return;
+ }
+ Abi::ScalarPair(a_scalar, b_scalar) => {
+ let (value, extra) = from.load_scalar_pair(fx);
+ let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+ to_ptr.store(fx, value, flags);
+ to_ptr.offset(fx, b_offset).store(fx, extra, flags);
+ return;
+ }
+ _ => {}
+ }
+
+ match from.0 {
+ CValueInner::ByVal(val) => {
+ to_ptr.store(fx, val, flags);
+ }
+ CValueInner::ByValPair(_, _) => {
+ bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
+ }
+ CValueInner::ByRef(from_ptr, None) => {
+ let from_addr = from_ptr.get_addr(fx);
+ let to_addr = to_ptr.get_addr(fx);
+ let src_layout = from.1;
+ let size = dst_layout.size.bytes();
+ let src_align = src_layout.align.abi.bytes() as u8;
+ let dst_align = dst_layout.align.abi.bytes() as u8;
+ fx.bcx.emit_small_memory_copy(
+ fx.target_config,
+ to_addr,
+ from_addr,
+ size,
+ dst_align,
+ src_align,
+ true,
+ MemFlags::trusted(),
+ );
+ }
+ CValueInner::ByRef(_, Some(_)) => todo!(),
+ }
+ }
+
+ pub(crate) fn place_field(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ field: mir::Field,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => match layout.ty.kind() {
+ ty::Array(_, _) => {
+ // Can only happen for vector types
+ return CPlace {
+ inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
+ let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
+
+ match f0_ty.kind() {
+ ty::Array(_, _) => {
+ assert_eq!(field.as_u32(), 0);
+ return CPlace {
+ inner: CPlaceInner::Var(local, var),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ _ => {
+ return CPlace {
+ inner: CPlaceInner::VarLane(
+ local,
+ var,
+ field.as_u32().try_into().unwrap(),
+ ),
+ layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+ };
+ }
+ }
+ }
+ _ => {}
+ },
+ CPlaceInner::VarPair(local, var1, var2) => {
+ let layout = layout.field(&*fx, field.index());
+
+ match field.as_u32() {
+ 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
+ 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
+ _ => unreachable!("field should be 0 or 1"),
+ }
+ }
+ _ => {}
+ }
+
+ let (base, extra) = self.to_ptr_maybe_unsized();
+
+ let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
+ if field_layout.is_unsized() {
+ if let ty::Foreign(_) = field_layout.ty.kind() {
+ assert!(extra.is_none());
+ CPlace::for_ptr(field_ptr, field_layout)
+ } else {
+ CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
+ }
+ } else {
+ CPlace::for_ptr(field_ptr, field_layout)
+ }
+ }
+
+ /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
+ /// such that you can access individual lanes.
+ pub(crate) fn place_lane(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ lane_idx: u64,
+ ) -> CPlace<'tcx> {
+ let layout = self.layout();
+ assert!(layout.ty.is_simd());
+ let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+ let lane_layout = fx.layout_of(lane_ty);
+ assert!(lane_idx < lane_count);
+
+ match self.inner {
+ CPlaceInner::Var(local, var) => {
+ assert!(matches!(layout.abi, Abi::Vector { .. }));
+ CPlace {
+ inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
+ layout: lane_layout,
+ }
+ }
+ CPlaceInner::VarPair(_, _, _) => unreachable!(),
+ CPlaceInner::VarLane(_, _, _) => unreachable!(),
+ CPlaceInner::Addr(ptr, None) => {
+ let field_offset = lane_layout.size * lane_idx;
+ let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+ CPlace::for_ptr(field_ptr, lane_layout)
+ }
+ CPlaceInner::Addr(_, Some(_)) => unreachable!(),
+ }
+ }
+
+ pub(crate) fn place_index(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ index: Value,
+ ) -> CPlace<'tcx> {
+ let (elem_layout, ptr) = match self.layout().ty.kind() {
+ ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
+ ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
+ _ => bug!("place_index({:?})", self.layout().ty),
+ };
+
+ let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
+
+ CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
+ }
+
+ pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
+ let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
+ if has_ptr_meta(fx.tcx, inner_layout.ty) {
+ let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
+ CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
+ } else {
+ CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
+ }
+ }
+
+ pub(crate) fn place_ref(
+ self,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ ) -> CValue<'tcx> {
+ if has_ptr_meta(fx.tcx, self.layout().ty) {
+ let (ptr, extra) = self.to_ptr_maybe_unsized();
+ CValue::by_val_pair(
+ ptr.get_addr(fx),
+ extra.expect("unsized type without metadata"),
+ layout,
+ )
+ } else {
+ CValue::by_val(self.to_ptr().get_addr(fx), layout)
+ }
+ }
+
+ pub(crate) fn downcast_variant(
+ self,
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ variant: VariantIdx,
+ ) -> Self {
+ assert!(!self.layout().is_unsized());
+ let layout = self.layout().for_variant(fx, variant);
+ CPlace { inner: self.inner, layout }
+ }
+}
+
+#[track_caller]
+pub(crate) fn assert_assignable<'tcx>(
+ fx: &FunctionCx<'_, '_, 'tcx>,
+ from_ty: Ty<'tcx>,
+ to_ty: Ty<'tcx>,
+ limit: usize,
+) {
+ if limit == 0 {
+ // assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
+ // soundness. don't attempt to check deep types to avoid exponential behavior in certain
+ // cases.
+ return;
+ }
+ match (from_ty.kind(), to_ty.kind()) {
+ (ty::Ref(_, a, _), ty::Ref(_, b, _))
+ | (
+ ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
+ ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
+ ) => {
+ assert_assignable(fx, *a, *b, limit - 1);
+ }
+ (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
+ | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
+ assert_assignable(fx, *a, *b, limit - 1);
+ }
+ (ty::FnPtr(_), ty::FnPtr(_)) => {
+ let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
+ ParamEnv::reveal_all(),
+ from_ty.fn_sig(fx.tcx),
+ );
+ let to_sig = fx
+ .tcx
+ .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+ assert_eq!(
+ from_sig, to_sig,
+ "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+ from_sig, to_sig, fx,
+ );
+ // fn(&T) -> for<'l> fn(&'l T) is allowed
+ }
+ (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
+ for (from, to) in from_traits.iter().zip(to_traits) {
+ let from =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
+ let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
+ assert_eq!(
+ from, to,
+ "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
+ from_traits, to_traits, fx,
+ );
+ }
+ // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
+ }
+ (&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
+ let mut types_a = types_a.iter();
+ let mut types_b = types_b.iter();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
+ if adt_def_a.did() == adt_def_b.did() =>
+ {
+ let mut types_a = substs_a.types();
+ let mut types_b = substs_b.types();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
+ (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
+ if def_id_a == def_id_b =>
+ {
+ let mut types_a = substs_a.types();
+ let mut types_b = substs_b.types();
+ loop {
+ match (types_a.next(), types_b.next()) {
+ (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+ (None, None) => return,
+ (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+ }
+ }
+ }
+ (ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
+ // No way to check if it is correct or not with polymorphization enabled
+ }
+ _ => {
+ assert_eq!(
+ from_ty,
+ to_ty,
+ "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
+ from_ty.kind(),
+ to_ty.kind(),
+ fx,
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs
new file mode 100644
index 000000000..36b3725ef
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/src/vtable.rs
@@ -0,0 +1,79 @@
+//! Codegen vtables and vtable accesses.
+//!
+//! See `rustc_codegen_ssa/src/meth.rs` for reference.
+
+use crate::constant::data_id_for_alloc_id;
+use crate::prelude::*;
+
+pub(crate) fn vtable_memflags() -> MemFlags {
+ let mut flags = MemFlags::trusted(); // A vtable access is always aligned and will never trap.
+ flags.set_readonly(); // A vtable is always read-only.
+ flags
+}
+
+pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_DROPINPLACE * usize_size) as i32,
+ )
+}
+
+pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_SIZE * usize_size) as i32,
+ )
+}
+
+pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, '_>, vtable: Value) -> Value {
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes() as usize;
+ fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (ty::COMMON_VTABLE_ENTRIES_ALIGN * usize_size) as i32,
+ )
+}
+
+pub(crate) fn get_ptr_and_method_ref<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ arg: CValue<'tcx>,
+ idx: usize,
+) -> (Value, Value) {
+ let (ptr, vtable) = if let Abi::ScalarPair(_, _) = arg.layout().abi {
+ arg.load_scalar_pair(fx)
+ } else {
+ let (ptr, vtable) = arg.try_to_ptr().unwrap();
+ (ptr.get_addr(fx), vtable.unwrap())
+ };
+
+ let usize_size = fx.layout_of(fx.tcx.types.usize).size.bytes();
+ let func_ref = fx.bcx.ins().load(
+ fx.pointer_type,
+ vtable_memflags(),
+ vtable,
+ (idx * usize_size as usize) as i32,
+ );
+ (ptr, func_ref)
+}
+
+pub(crate) fn get_vtable<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
+ ty: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Value {
+ let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
+ let data_id =
+ data_id_for_alloc_id(&mut fx.constants_cx, &mut *fx.module, alloc_id, Mutability::Not);
+ let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ if fx.clif_comments.enabled() {
+ fx.add_comment(local_data_id, format!("vtable: {:?}", alloc_id));
+ }
+ fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
+}
diff --git a/compiler/rustc_codegen_cranelift/test.sh b/compiler/rustc_codegen_cranelift/test.sh
new file mode 100755
index 000000000..a10924628
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/test.sh
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+set -e
+
+./y.rs build --sysroot none "$@"
+
+rm -r target/out || true
+
+scripts/tests.sh no_sysroot
+
+./y.rs build "$@"
+
+scripts/tests.sh base_sysroot
+scripts/tests.sh extended_sysroot
diff --git a/compiler/rustc_codegen_cranelift/y.rs b/compiler/rustc_codegen_cranelift/y.rs
new file mode 100755
index 000000000..f177b91c2
--- /dev/null
+++ b/compiler/rustc_codegen_cranelift/y.rs
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#![deny(unsafe_code)] /*This line is ignored by bash
+# This block is ignored by rustc
+set -e
+echo "[BUILD] y.rs" 1>&2
+rustc $0 -o ${0/.rs/.bin} -Cdebuginfo=1
+exec ${0/.rs/.bin} $@
+*/
+
+//! The build system for cg_clif
+//!
+//! # Manual compilation
+//!
+//! If your system doesn't support shell scripts you can manually compile and run this file using
+//! for example:
+//!
+//! ```shell
+//! $ rustc y.rs -o y.bin
+//! $ ./y.bin
+//! ```
+//!
+//! # Naming
+//!
+//! The name `y.rs` was chosen to not conflict with rustc's `x.py`.
+
+#[path = "build_system/mod.rs"]
+mod build_system;
+
+fn main() {
+ build_system::main();
+}
diff --git a/compiler/rustc_codegen_gcc/.github/workflows/ci.yml b/compiler/rustc_codegen_gcc/.github/workflows/ci.yml
new file mode 100644
index 000000000..8ebdabe82
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/.github/workflows/ci.yml
@@ -0,0 +1,131 @@
+name: CI
+
+on:
+ - push
+ - pull_request
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ strategy:
+ fail-fast: false
+ matrix:
+ libgccjit_version: ["libgccjit.so", "libgccjit_without_int128.so", "libgccjit12.so"]
+
+ steps:
+ - uses: actions/checkout@v2
+
+ - uses: actions/checkout@v2
+ with:
+ repository: llvm/llvm-project
+ path: llvm
+
+ - name: Install packages
+ run: sudo apt-get install ninja-build ripgrep
+
+ - name: Download artifact
+ uses: dawidd6/action-download-artifact@v2
+ with:
+ workflow: main.yml
+ name: ${{ matrix.libgccjit_version }}
+ path: gcc-build
+ repo: antoyo/gcc
+ search_artifacts: true # Because, instead, the action only check the last job ran and that won't work since we want multiple artifacts.
+
+ - name: Setup path to libgccjit
+ run: |
+ echo $(readlink -f gcc-build) > gcc_path
+ # NOTE: the filename is still libgccjit.so even when the artifact name is different.
+ ln gcc-build/libgccjit.so gcc-build/libgccjit.so.0
+
+ - name: Set env
+ run: |
+ echo "LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
+ echo "LD_LIBRARY_PATH=$(cat gcc_path)" >> $GITHUB_ENV
+ echo "workspace="$GITHUB_WORKSPACE >> $GITHUB_ENV
+
+ - name: Set RUST_COMPILER_RT_ROOT
+ run: echo "RUST_COMPILER_RT_ROOT="${{ env.workspace }}/llvm/compiler-rt >> $GITHUB_ENV
+
+ # https://github.com/actions/cache/issues/133
+ - name: Fixup owner of ~/.cargo/
+ # Don't remove the trailing /. It is necessary to follow the symlink.
+ run: sudo chown -R $(whoami):$(id -ng) ~/.cargo/
+
+ - name: Cache cargo installed crates
+ uses: actions/cache@v1.1.2
+ with:
+ path: ~/.cargo/bin
+ key: cargo-installed-crates2-ubuntu-latest
+
+ - name: Cache cargo registry
+ uses: actions/cache@v1
+ with:
+ path: ~/.cargo/registry
+ key: ${{ runner.os }}-cargo-registry2-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo index
+ uses: actions/cache@v1
+ with:
+ path: ~/.cargo/git
+ key: ${{ runner.os }}-cargo-index-${{ hashFiles('**/Cargo.lock') }}
+
+ - name: Cache cargo target dir
+ uses: actions/cache@v1.1.2
+ with:
+ path: target
+ key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('rust-toolchain') }}
+
+ - name: Build
+ if: matrix.libgccjit_version != 'libgccjit12.so'
+ run: |
+ ./prepare_build.sh
+ ./build.sh
+ cargo test
+ ./clean_all.sh
+
+ - name: Build
+ if: matrix.libgccjit_version == 'libgccjit12.so'
+ run: |
+ ./prepare_build.sh
+ ./build.sh --no-default-features
+ cargo test --no-default-features
+ ./clean_all.sh
+
+ - name: Prepare dependencies
+ run: |
+ git config --global user.email "user@example.com"
+ git config --global user.name "User"
+ ./prepare.sh
+
+ # Compile is a separate step, as the actions-rs/cargo action supports error annotations
+ - name: Compile
+ uses: actions-rs/cargo@v1.0.3
+ with:
+ command: build
+ args: --release
+
+ - name: Test
+ if: matrix.libgccjit_version != 'libgccjit12.so'
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
+ ./test.sh --release
+
+ - name: Test
+ if: matrix.libgccjit_version == 'libgccjit12.so'
+ run: |
+ # Enable backtraces for easier debugging
+ export RUST_BACKTRACE=1
+
+ # Reduce amount of benchmark runs as they are slow
+ export COMPILE_RUNS=2
+ export RUN_RUNS=2
+
+ ./test.sh --release --no-default-features
diff --git a/compiler/rustc_codegen_gcc/.rustfmt.toml b/compiler/rustc_codegen_gcc/.rustfmt.toml
new file mode 100644
index 000000000..c7ad93baf
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/.rustfmt.toml
@@ -0,0 +1 @@
+disable_all_formatting = true
diff --git a/compiler/rustc_codegen_gcc/Cargo.lock b/compiler/rustc_codegen_gcc/Cargo.lock
new file mode 100644
index 000000000..6df210247
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/Cargo.lock
@@ -0,0 +1,330 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "aho-corasick"
+version = "0.7.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f"
+dependencies = [
+ "memchr",
+]
+
+[[package]]
+name = "ar"
+version = "0.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "450575f58f7bee32816abbff470cbc47797397c2a81e0eaced4b98436daf52e1"
+
+[[package]]
+name = "bitflags"
+version = "1.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "fm"
+version = "0.1.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "68fda3cff2cce84c19e5dfa5179a4b35d2c0f18b893f108002b8a6a54984acca"
+dependencies = [
+ "regex",
+]
+
+[[package]]
+name = "gccjit"
+version = "1.0.0"
+source = "git+https://github.com/antoyo/gccjit.rs#bdb86fb5092895ff5589726b33250010c64d93f6"
+dependencies = [
+ "gccjit_sys",
+]
+
+[[package]]
+name = "gccjit_sys"
+version = "0.0.1"
+source = "git+https://github.com/antoyo/gccjit.rs#bdb86fb5092895ff5589726b33250010c64d93f6"
+dependencies = [
+ "libc 0.1.12",
+]
+
+[[package]]
+name = "getopts"
+version = "0.2.21"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
+dependencies = [
+ "unicode-width",
+]
+
+[[package]]
+name = "getrandom"
+version = "0.2.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
+dependencies = [
+ "cfg-if",
+ "libc 0.2.112",
+ "wasi",
+]
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
+dependencies = [
+ "libc 0.2.112",
+]
+
+[[package]]
+name = "lang_tester"
+version = "0.3.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "96bd995a092cac79868250589869b5a5d656b02a02bd74c8ebdc566dc7203090"
+dependencies = [
+ "fm",
+ "getopts",
+ "libc 0.2.112",
+ "num_cpus",
+ "termcolor",
+ "threadpool",
+ "wait-timeout",
+ "walkdir",
+]
+
+[[package]]
+name = "libc"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e32a70cf75e5846d53a673923498228bbec6a8624708a9ea5645f075d6276122"
+
+[[package]]
+name = "libc"
+version = "0.2.112"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125"
+
+[[package]]
+name = "memchr"
+version = "2.4.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a"
+
+[[package]]
+name = "num_cpus"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
+dependencies = [
+ "hermit-abi",
+ "libc 0.2.112",
+]
+
+[[package]]
+name = "ppv-lite86"
+version = "0.2.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba"
+
+[[package]]
+name = "rand"
+version = "0.8.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
+dependencies = [
+ "libc 0.2.112",
+ "rand_chacha",
+ "rand_core",
+ "rand_hc",
+]
+
+[[package]]
+name = "rand_chacha"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
+dependencies = [
+ "ppv-lite86",
+ "rand_core",
+]
+
+[[package]]
+name = "rand_core"
+version = "0.6.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
+dependencies = [
+ "getrandom",
+]
+
+[[package]]
+name = "rand_hc"
+version = "0.3.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
+dependencies = [
+ "rand_core",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.2.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff"
+dependencies = [
+ "bitflags",
+]
+
+[[package]]
+name = "regex"
+version = "1.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461"
+dependencies = [
+ "aho-corasick",
+ "memchr",
+ "regex-syntax",
+]
+
+[[package]]
+name = "regex-syntax"
+version = "0.6.25"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
+
+[[package]]
+name = "remove_dir_all"
+version = "0.5.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "rustc_codegen_gcc"
+version = "0.1.0"
+dependencies = [
+ "ar",
+ "gccjit",
+ "lang_tester",
+ "target-lexicon",
+ "tempfile",
+]
+
+[[package]]
+name = "same-file"
+version = "1.0.6"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "target-lexicon"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d"
+
+[[package]]
+name = "tempfile"
+version = "3.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
+dependencies = [
+ "cfg-if",
+ "libc 0.2.112",
+ "rand",
+ "redox_syscall",
+ "remove_dir_all",
+ "winapi",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.1.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "threadpool"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
+dependencies = [
+ "num_cpus",
+]
+
+[[package]]
+name = "unicode-width"
+version = "0.1.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973"
+
+[[package]]
+name = "wait-timeout"
+version = "0.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6"
+dependencies = [
+ "libc 0.2.112",
+]
+
+[[package]]
+name = "walkdir"
+version = "2.3.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56"
+dependencies = [
+ "same-file",
+ "winapi",
+ "winapi-util",
+]
+
+[[package]]
+name = "wasi"
+version = "0.10.2+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/compiler/rustc_codegen_gcc/Cargo.toml b/compiler/rustc_codegen_gcc/Cargo.toml
new file mode 100644
index 000000000..211d19a8d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/Cargo.toml
@@ -0,0 +1,54 @@
+[package]
+name = "rustc_codegen_gcc"
+version = "0.1.0"
+authors = ["Antoni Boucher <bouanto@zoho.com>"]
+edition = "2018"
+license = "MIT OR Apache-2.0"
+
+[lib]
+crate-type = ["dylib"]
+
+[[test]]
+name = "lang_tests_debug"
+path = "tests/lang_tests_debug.rs"
+harness = false
+[[test]]
+name = "lang_tests_release"
+path = "tests/lang_tests_release.rs"
+harness = false
+
+[features]
+default = ["master"]
+master = ["gccjit/master"]
+
+[dependencies]
+gccjit = { git = "https://github.com/antoyo/gccjit.rs" }
+
+# Local copy.
+#gccjit = { path = "../gccjit.rs" }
+
+target-lexicon = "0.10.0"
+
+ar = "0.8.0"
+
+[dev-dependencies]
+lang_tester = "0.3.9"
+tempfile = "3.1.0"
+
+[profile.dev]
+# By compiling dependencies with optimizations, performing tests gets much faster.
+opt-level = 3
+
+[profile.dev.package.rustc_codegen_gcc]
+# Disabling optimizations for cg_gccjit itself makes compilation after a change faster.
+opt-level = 0
+
+# Disable optimizations and debuginfo of build scripts and some of the heavy build deps, as the
+# execution time of build scripts is so fast that optimizing them slows down the total build time.
+[profile.dev.build-override]
+opt-level = 0
+debug = false
+
+[profile.release.build-override]
+opt-level = 0
+debug = false
diff --git a/compiler/rustc_codegen_gcc/LICENSE-APACHE b/compiler/rustc_codegen_gcc/LICENSE-APACHE
new file mode 100644
index 000000000..1b5ec8b78
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/LICENSE-APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/compiler/rustc_codegen_gcc/LICENSE-MIT b/compiler/rustc_codegen_gcc/LICENSE-MIT
new file mode 100644
index 000000000..31aa79387
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/compiler/rustc_codegen_gcc/Readme.md b/compiler/rustc_codegen_gcc/Readme.md
new file mode 100644
index 000000000..fe23a2676
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/Readme.md
@@ -0,0 +1,147 @@
+# WIP libgccjit codegen backend for rust
+
+This is a GCC codegen for rustc, which means it can be loaded by the existing rustc frontend, but benefits from GCC: more architectures are supported and GCC's optimizations are used.
+
+**Despite its name, libgccjit can be used for ahead-of-time compilation, as is used here.**
+
+## Motivation
+
+The primary goal of this project is to be able to compile Rust code on platforms unsupported by LLVM.
+A secondary goal is to check if using the gcc backend will provide any run-time speed improvement for the programs compiled using rustc.
+
+## Building
+
+**This requires a patched libgccjit in order to work.
+The patches in [this repository](https://github.com/antoyo/libgccjit-patches) need to be applied.
+(Those patches should work when applied on master, but in case it doesn't work, they are known to work when applied on 079c23cfe079f203d5df83fea8e92a60c7d7e878.)
+You can also use my [fork of gcc](https://github.com/antoyo/gcc) which already includes these patches.**
+
+**Put the path to your custom build of libgccjit in the file `gcc_path`.**
+
+```bash
+$ git clone https://github.com/rust-lang/rustc_codegen_gcc.git
+$ cd rustc_codegen_gcc
+$ git clone https://github.com/llvm/llvm-project llvm --depth 1 --single-branch
+$ export RUST_COMPILER_RT_ROOT="$PWD/llvm/compiler-rt"
+$ ./prepare_build.sh # download and patch sysroot src
+$ ./build.sh --release
+```
+
+To run the tests:
+
+```bash
+$ ./prepare.sh # download and patch sysroot src and install hyperfine for benchmarking
+$ ./test.sh --release
+```
+
+## Usage
+
+`$cg_gccjit_dir` is the directory you cloned this repo into in the following instructions.
+
+### Cargo
+
+```bash
+$ CHANNEL="release" $cg_gccjit_dir/cargo.sh run
+```
+
+If you compiled cg_gccjit in debug mode (aka you didn't pass `--release` to `./test.sh`) you should use `CHANNEL="debug"` instead or omit `CHANNEL="release"` completely.
+
+### Rustc
+
+> You should prefer using the Cargo method.
+
+```bash
+$ rustc +$(cat $cg_gccjit_dir/rust-toolchain) -Cpanic=abort -Zcodegen-backend=$cg_gccjit_dir/target/release/librustc_codegen_gcc.so --sysroot $cg_gccjit_dir/build_sysroot/sysroot my_crate.rs
+```
+
+## Env vars
+
+<dl>
+ <dt>CG_GCCJIT_INCR_CACHE_DISABLED</dt>
+ <dd>Don't cache object files in the incremental cache. Useful during development of cg_gccjit
+ to make it possible to use incremental mode for all analyses performed by rustc without caching
+ object files when their content should have been changed by a change to cg_gccjit.</dd>
+ <dt>CG_GCCJIT_DISPLAY_CG_TIME</dt>
+ <dd>Display the time it took to perform codegen for a crate</dd>
+</dl>
+
+## Debugging
+
+Sometimes, libgccjit will crash and output an error like this:
+
+```
+during RTL pass: expand
+libgccjit.so: error: in expmed_mode_index, at expmed.h:249
+0x7f0da2e61a35 expmed_mode_index
+ ../../../gcc/gcc/expmed.h:249
+0x7f0da2e61aa4 expmed_op_cost_ptr
+ ../../../gcc/gcc/expmed.h:271
+0x7f0da2e620dc sdiv_cost_ptr
+ ../../../gcc/gcc/expmed.h:540
+0x7f0da2e62129 sdiv_cost
+ ../../../gcc/gcc/expmed.h:558
+0x7f0da2e73c12 expand_divmod(int, tree_code, machine_mode, rtx_def*, rtx_def*, rtx_def*, int)
+ ../../../gcc/gcc/expmed.c:4335
+0x7f0da2ea1423 expand_expr_real_2(separate_ops*, rtx_def*, machine_mode, expand_modifier)
+ ../../../gcc/gcc/expr.c:9240
+0x7f0da2cd1a1e expand_gimple_stmt_1
+ ../../../gcc/gcc/cfgexpand.c:3796
+0x7f0da2cd1c30 expand_gimple_stmt
+ ../../../gcc/gcc/cfgexpand.c:3857
+0x7f0da2cd90a9 expand_gimple_basic_block
+ ../../../gcc/gcc/cfgexpand.c:5898
+0x7f0da2cdade8 execute
+ ../../../gcc/gcc/cfgexpand.c:6582
+```
+
+To see the code which causes this error, call the following function:
+
+```c
+gcc_jit_context_dump_to_file(ctxt, "/tmp/output.c", 1 /* update_locations */)
+```
+
+This will create a C-like file and add the locations into the IR pointing to this C file.
+Then, rerun the program and it will output the location in the second line:
+
+```
+libgccjit.so: /tmp/something.c:61322:0: error: in expmed_mode_index, at expmed.h:249
+```
+
+Or add a breakpoint to `add_error` in gdb and print the line number using:
+
+```
+p loc->m_line
+p loc->m_filename->m_buffer
+```
+
+To print a debug representation of a tree:
+
+```c
+debug_tree(expr);
+```
+
+To get the `rustc` command to run in `gdb`, add the `--verbose` flag to `cargo build`.
+
+### How to use a custom-build rustc
+
+ * Build the stage2 compiler (`rustup toolchain link debug-current build/x86_64-unknown-linux-gnu/stage2`).
+ * Clean and rebuild the codegen with `debug-current` in the file `rust-toolchain`.
+
+### How to build a cross-compiling libgccjit
+
+#### Building libgccjit
+
+ * Follow these instructions: https://preshing.com/20141119/how-to-build-a-gcc-cross-compiler/ with the following changes:
+ * Configure gcc with `../gcc/configure --enable-host-shared --disable-multilib --enable-languages=c,jit,c++ --disable-bootstrap --enable-checking=release --prefix=/opt/m68k-gcc/ --target=m68k-linux --without-headers`.
+ * Some shells, like fish, don't define the environment variable `$MACHTYPE`.
+ * Add `CFLAGS="-Wno-error=attributes -g -O2"` at the end of the configure command for building glibc (`CFLAGS="-Wno-error=attributes -Wno-error=array-parameter -Wno-error=stringop-overflow -Wno-error=array-bounds -g -O2"` for glibc 2.31, which is useful for Debian).
+
+#### Configuring rustc_codegen_gcc
+
+ * Set `TARGET_TRIPLE="m68k-unknown-linux-gnu"` in config.sh.
+ * Since rustc doesn't support this architecture yet, set it back to `TARGET_TRIPLE="mips-unknown-linux-gnu"` (or another target having the same attributes). Alternatively, create a [target specification file](https://book.avr-rust.com/005.1-the-target-specification-json-file.html) (note that the `arch` specified in this file must be supported by the rust compiler).
+ * Set `linker='-Clinker=m68k-linux-gcc'`.
+ * Set the path to the cross-compiling libgccjit in `gcc_path`.
+ * Disable the 128-bit integer types if the target doesn't support them by using `let i128_type = context.new_type::<i64>();` in `context.rs` (same for u128_type).
+ * Comment the line: `context.add_command_line_option("-masm=intel");` in src/base.rs.
+ * (might not be necessary) Disable the compilation of libstd.so (and possibly libcore.so?).
diff --git a/compiler/rustc_codegen_gcc/build.sh b/compiler/rustc_codegen_gcc/build.sh
new file mode 100755
index 000000000..ba0d0d049
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build.sh
@@ -0,0 +1,67 @@
+#!/usr/bin/env bash
+
+#set -x
+set -e
+
+codegen_channel=debug
+sysroot_channel=debug
+
+flags=
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --release)
+ codegen_channel=release
+ shift
+ ;;
+ --release-sysroot)
+ sysroot_channel=release
+ shift
+ ;;
+ --no-default-features)
+ flags="$flags --no-default-features"
+ shift
+ ;;
+ --features)
+ shift
+ flags="$flags --features $1"
+ shift
+ ;;
+ *)
+ echo "Unknown option $1"
+ exit 1
+ ;;
+ esac
+done
+
+if [ -f ./gcc_path ]; then
+ export GCC_PATH=$(cat gcc_path)
+else
+ echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+ exit 1
+fi
+
+export LD_LIBRARY_PATH="$GCC_PATH"
+export LIBRARY_PATH="$GCC_PATH"
+
+if [[ "$codegen_channel" == "release" ]]; then
+ export CHANNEL='release'
+ CARGO_INCREMENTAL=1 cargo rustc --release $flags
+else
+ echo $LD_LIBRARY_PATH
+ export CHANNEL='debug'
+ cargo rustc $flags
+fi
+
+source config.sh
+
+rm -r target/out || true
+mkdir -p target/out/gccjit
+
+echo "[BUILD] sysroot"
+if [[ "$sysroot_channel" == "release" ]]; then
+ time ./build_sysroot/build_sysroot.sh --release
+else
+ time ./build_sysroot/build_sysroot.sh
+fi
+
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml b/compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml
new file mode 100644
index 000000000..cfadf47cc
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build_sysroot/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+authors = ["bjorn3 <bjorn3@users.noreply.github.com>"]
+name = "sysroot"
+version = "0.0.0"
+
+[dependencies]
+core = { path = "./sysroot_src/library/core" }
+compiler_builtins = "0.1"
+alloc = { path = "./sysroot_src/library/alloc" }
+std = { path = "./sysroot_src/library/std", features = ["panic_unwind", "backtrace"] }
+test = { path = "./sysroot_src/library/test" }
+
+[patch.crates-io]
+rustc-std-workspace-core = { path = "./sysroot_src/library/rustc-std-workspace-core" }
+rustc-std-workspace-alloc = { path = "./sysroot_src/library/rustc-std-workspace-alloc" }
+rustc-std-workspace-std = { path = "./sysroot_src/library/rustc-std-workspace-std" }
+
+[profile.release]
+debug = true
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh b/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh
new file mode 100755
index 000000000..f293192a0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build_sysroot/build_sysroot.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+
+# Requires the CHANNEL env var to be set to `debug` or `release.`
+
+set -e
+cd $(dirname "$0")
+
+pushd ../ >/dev/null
+source ./config.sh
+popd >/dev/null
+
+# Cleanup for previous run
+# v Clean target dir except for build scripts and incremental cache
+rm -r target/*/{debug,release}/{build,deps,examples,libsysroot*,native} 2>/dev/null || true
+rm Cargo.lock test_target/Cargo.lock 2>/dev/null || true
+rm -r sysroot/ 2>/dev/null || true
+
+# Build libs
+export RUSTFLAGS="$RUSTFLAGS -Z force-unstable-if-unmarked -Cpanic=abort"
+if [[ "$1" == "--release" ]]; then
+ sysroot_channel='release'
+ RUSTFLAGS="$RUSTFLAGS -Zmir-opt-level=3" cargo build --target $TARGET_TRIPLE --release
+else
+ sysroot_channel='debug'
+ cargo build --target $TARGET_TRIPLE --features compiler_builtins/c
+fi
+
+# Copy files to sysroot
+mkdir -p sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
+cp -r target/$TARGET_TRIPLE/$sysroot_channel/deps/* sysroot/lib/rustlib/$TARGET_TRIPLE/lib/
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh b/compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh
new file mode 100755
index 000000000..56768bbf1
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build_sysroot/prepare_sysroot_src.sh
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+set -e
+cd $(dirname "$0")
+
+SRC_DIR=$(dirname $(rustup which rustc))"/../lib/rustlib/src/rust/"
+DST_DIR="sysroot_src"
+
+if [ ! -e $SRC_DIR ]; then
+ echo "Please install rust-src component"
+ exit 1
+fi
+
+rm -rf $DST_DIR
+mkdir -p $DST_DIR/library
+cp -r $SRC_DIR/library $DST_DIR/
+
+pushd $DST_DIR
+echo "[GIT] init"
+git init
+echo "[GIT] add"
+git add .
+echo "[GIT] commit"
+
+# This is needed on systems where nothing is configured.
+# git really needs something here, or it will fail.
+# Even using --author is not enough.
+git config user.email || git config user.email "none@example.com"
+git config user.name || git config user.name "None"
+
+git commit -m "Initial commit" -q
+for file in $(ls ../../patches/ | grep -v patcha); do
+echo "[GIT] apply" $file
+git apply ../../patches/$file
+git add -A
+git commit --no-gpg-sign -m "Patch $file"
+done
+popd
+
+echo "Successfully prepared libcore for building"
diff --git a/compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs b/compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs
new file mode 100644
index 000000000..0c9ac1ac8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/build_sysroot/src/lib.rs
@@ -0,0 +1 @@
+#![no_std]
diff --git a/compiler/rustc_codegen_gcc/cargo.sh b/compiler/rustc_codegen_gcc/cargo.sh
new file mode 100755
index 000000000..16e49b204
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/cargo.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+
+if [ -z $CHANNEL ]; then
+export CHANNEL='debug'
+fi
+
+pushd $(dirname "$0") >/dev/null
+source config.sh
+
+# read nightly compiler from rust-toolchain file
+TOOLCHAIN=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/')
+
+popd >/dev/null
+
+if [[ $(rustc -V) != $(rustc +${TOOLCHAIN} -V) ]]; then
+ echo "rustc_codegen_gcc is build for $(rustc +${TOOLCHAIN} -V) but the default rustc version is $(rustc -V)."
+ echo "Using $(rustc +${TOOLCHAIN} -V)."
+fi
+
+cmd=$1
+shift
+
+RUSTDOCFLAGS="$RUSTFLAGS" cargo +${TOOLCHAIN} $cmd $@
diff --git a/compiler/rustc_codegen_gcc/clean_all.sh b/compiler/rustc_codegen_gcc/clean_all.sh
new file mode 100755
index 000000000..782bd3e50
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/clean_all.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+set -e
+set -v
+
+rm -rf target/ build_sysroot/{sysroot/,sysroot_src/,target/,Cargo.lock} perf.data{,.old}
+rm -rf regex/ simple-raytracer/
diff --git a/compiler/rustc_codegen_gcc/config.sh b/compiler/rustc_codegen_gcc/config.sh
new file mode 100644
index 000000000..b25e215fb
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/config.sh
@@ -0,0 +1,52 @@
+set -e
+
+export CARGO_INCREMENTAL=0
+
+if [ -f ./gcc_path ]; then
+ export GCC_PATH=$(cat gcc_path)
+else
+ echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+ exit 1
+fi
+
+unamestr=`uname`
+if [[ "$unamestr" == 'Linux' ]]; then
+ dylib_ext='so'
+elif [[ "$unamestr" == 'Darwin' ]]; then
+ dylib_ext='dylib'
+else
+ echo "Unsupported os"
+ exit 1
+fi
+
+HOST_TRIPLE=$(rustc -vV | grep host | cut -d: -f2 | tr -d " ")
+TARGET_TRIPLE=$HOST_TRIPLE
+#TARGET_TRIPLE="m68k-unknown-linux-gnu"
+
+linker=''
+RUN_WRAPPER=''
+if [[ "$HOST_TRIPLE" != "$TARGET_TRIPLE" ]]; then
+ if [[ "$TARGET_TRIPLE" == "m68k-unknown-linux-gnu" ]]; then
+ TARGET_TRIPLE="mips-unknown-linux-gnu"
+ linker='-Clinker=m68k-linux-gcc'
+ elif [[ "$TARGET_TRIPLE" == "aarch64-unknown-linux-gnu" ]]; then
+ # We are cross-compiling for aarch64. Use the correct linker and run tests in qemu.
+ linker='-Clinker=aarch64-linux-gnu-gcc'
+ RUN_WRAPPER='qemu-aarch64 -L /usr/aarch64-linux-gnu'
+ else
+ echo "Unknown non-native platform"
+ fi
+fi
+
+export RUSTFLAGS="$CG_RUSTFLAGS $linker -Cpanic=abort -Csymbol-mangling-version=v0 -Cdebuginfo=2 -Clto=off -Zpanic-abort-tests -Zcodegen-backend=$(pwd)/target/${CHANNEL:-debug}/librustc_codegen_gcc.$dylib_ext --sysroot $(pwd)/build_sysroot/sysroot"
+
+# FIXME(antoyo): remove once the atomic shim is gone
+if [[ `uname` == 'Darwin' ]]; then
+ export RUSTFLAGS="$RUSTFLAGS -Clink-arg=-undefined -Clink-arg=dynamic_lookup"
+fi
+
+RUSTC="rustc $RUSTFLAGS -L crate=target/out --out-dir target/out"
+export RUSTC_LOG=warn # display metadata load errors
+
+export LD_LIBRARY_PATH="$(pwd)/target/out:$(pwd)/build_sysroot/sysroot/lib/rustlib/$TARGET_TRIPLE/lib:$GCC_PATH"
+export DYLD_LIBRARY_PATH=$LD_LIBRARY_PATH
diff --git a/compiler/rustc_codegen_gcc/crate_patches/0002-rand-Disable-failing-test.patch b/compiler/rustc_codegen_gcc/crate_patches/0002-rand-Disable-failing-test.patch
new file mode 100644
index 000000000..449ca5f6e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/crate_patches/0002-rand-Disable-failing-test.patch
@@ -0,0 +1,32 @@
+From a8fb97120d71252538b6b026695df40d02696bdb Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sat, 15 Aug 2020 20:04:38 +0200
+Subject: [PATCH] [rand] Disable failing test
+
+---
+ src/distributions/uniform.rs | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/src/distributions/uniform.rs b/src/distributions/uniform.rs
+index 480b859..c80bb6f 100644
+--- a/src/distributions/uniform.rs
++++ b/src/distributions/uniform.rs
+@@ -1085,7 +1085,7 @@ mod tests {
+ _ => panic!("`UniformDurationMode` was not serialized/deserialized correctly")
+ }
+ }
+-
++
+ #[test]
+ #[cfg(feature = "serde1")]
+ fn test_uniform_serialization() {
+@@ -1314,6 +1314,7 @@ mod tests {
+ not(target_arch = "wasm32"),
+ not(target_arch = "asmjs")
+ ))]
++ #[ignore] // FIXME
+ fn test_float_assertions() {
+ use super::SampleUniform;
+ use std::panic::catch_unwind;
+--
+2.20.1
diff --git a/compiler/rustc_codegen_gcc/example/alloc_example.rs b/compiler/rustc_codegen_gcc/example/alloc_example.rs
new file mode 100644
index 000000000..74ea7ec4e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/alloc_example.rs
@@ -0,0 +1,41 @@
+#![feature(start, box_syntax, core_intrinsics, alloc_error_handler)]
+#![no_std]
+
+extern crate alloc;
+extern crate alloc_system;
+
+use alloc::boxed::Box;
+
+use alloc_system::System;
+
+#[global_allocator]
+static ALLOC: System = System;
+
+#[link(name = "c")]
+extern "C" {
+ fn puts(s: *const u8) -> i32;
+}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ unsafe {
+ core::intrinsics::abort();
+ }
+}
+
+#[alloc_error_handler]
+fn alloc_error_handler(_: alloc::alloc::Layout) -> ! {
+ unsafe {
+ core::intrinsics::abort();
+ }
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ let world: Box<&str> = box "Hello World!\0";
+ unsafe {
+ puts(*world as *const str as *const u8);
+ }
+
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/example/alloc_system.rs b/compiler/rustc_codegen_gcc/example/alloc_system.rs
new file mode 100644
index 000000000..5f66ca67f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/alloc_system.rs
@@ -0,0 +1,212 @@
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![no_std]
+#![feature(allocator_api, rustc_private)]
+#![cfg_attr(any(unix, target_os = "redox"), feature(libc))]
+
+// The minimum alignment guaranteed by the architecture. This value is used to
+// add fast paths for low alignment values.
+#[cfg(all(any(target_arch = "x86",
+ target_arch = "arm",
+ target_arch = "mips",
+ target_arch = "powerpc",
+ target_arch = "powerpc64")))]
+const MIN_ALIGN: usize = 8;
+#[cfg(all(any(target_arch = "x86_64",
+ target_arch = "aarch64",
+ target_arch = "mips64",
+ target_arch = "s390x",
+ target_arch = "sparc64")))]
+const MIN_ALIGN: usize = 16;
+
+pub struct System;
+#[cfg(any(windows, unix, target_os = "redox"))]
+mod realloc_fallback {
+ use core::alloc::{GlobalAlloc, Layout};
+ use core::cmp;
+ use core::ptr;
+ impl super::System {
+ pub(crate) unsafe fn realloc_fallback(&self, ptr: *mut u8, old_layout: Layout,
+ new_size: usize) -> *mut u8 {
+ // Docs for GlobalAlloc::realloc require this to be valid:
+ let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
+ let new_ptr = GlobalAlloc::alloc(self, new_layout);
+ if !new_ptr.is_null() {
+ let size = cmp::min(old_layout.size(), new_size);
+ ptr::copy_nonoverlapping(ptr, new_ptr, size);
+ GlobalAlloc::dealloc(self, ptr, old_layout);
+ }
+ new_ptr
+ }
+ }
+}
+#[cfg(any(unix, target_os = "redox"))]
+mod platform {
+ extern crate libc;
+ use core::ptr;
+ use MIN_ALIGN;
+ use System;
+ use core::alloc::{GlobalAlloc, Layout};
+ unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::malloc(layout.size()) as *mut u8
+ } else {
+ #[cfg(target_os = "macos")]
+ {
+ if layout.align() > (1 << 31) {
+ return ptr::null_mut()
+ }
+ }
+ aligned_malloc(&layout)
+ }
+ }
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= layout.size() {
+ libc::calloc(layout.size(), 1) as *mut u8
+ } else {
+ let ptr = self.alloc(layout.clone());
+ if !ptr.is_null() {
+ ptr::write_bytes(ptr, 0, layout.size());
+ }
+ ptr
+ }
+ }
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
+ libc::free(ptr as *mut libc::c_void)
+ }
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN && layout.align() <= new_size {
+ libc::realloc(ptr as *mut libc::c_void, new_size) as *mut u8
+ } else {
+ self.realloc_fallback(ptr, layout, new_size)
+ }
+ }
+ }
+ #[cfg(any(target_os = "android",
+ target_os = "hermit",
+ target_os = "redox",
+ target_os = "solaris"))]
+ #[inline]
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ // On android we currently target API level 9 which unfortunately
+ // doesn't have the `posix_memalign` API used below. Instead we use
+ // `memalign`, but this unfortunately has the property on some systems
+ // where the memory returned cannot be deallocated by `free`!
+ //
+ // Upon closer inspection, however, this appears to work just fine with
+ // Android, so for this platform we should be fine to call `memalign`
+ // (which is present in API level 9). Some helpful references could
+ // possibly be chromium using memalign [1], attempts at documenting that
+ // memalign + free is ok [2] [3], or the current source of chromium
+ // which still uses memalign on android [4].
+ //
+ // [1]: https://codereview.chromium.org/10796020/
+ // [2]: https://code.google.com/p/android/issues/detail?id=35391
+ // [3]: https://bugs.chromium.org/p/chromium/issues/detail?id=138579
+ // [4]: https://chromium.googlesource.com/chromium/src/base/+/master/
+ // /memory/aligned_memory.cc
+ libc::memalign(layout.align(), layout.size()) as *mut u8
+ }
+ #[cfg(not(any(target_os = "android",
+ target_os = "hermit",
+ target_os = "redox",
+ target_os = "solaris")))]
+ #[inline]
+ unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 {
+ let mut out = ptr::null_mut();
+ let ret = libc::posix_memalign(&mut out, layout.align(), layout.size());
+ if ret != 0 {
+ ptr::null_mut()
+ } else {
+ out as *mut u8
+ }
+ }
+}
+#[cfg(windows)]
+#[allow(nonstandard_style)]
+mod platform {
+ use MIN_ALIGN;
+ use System;
+ use core::alloc::{GlobalAlloc, Layout};
+ type LPVOID = *mut u8;
+ type HANDLE = LPVOID;
+ type SIZE_T = usize;
+ type DWORD = u32;
+ type BOOL = i32;
+ extern "system" {
+ fn GetProcessHeap() -> HANDLE;
+ fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID;
+ fn HeapReAlloc(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID, dwBytes: SIZE_T) -> LPVOID;
+ fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL;
+ fn GetLastError() -> DWORD;
+ }
+ #[repr(C)]
+ struct Header(*mut u8);
+ const HEAP_ZERO_MEMORY: DWORD = 0x00000008;
+ unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header {
+ &mut *(ptr as *mut Header).offset(-1)
+ }
+ unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 {
+ let aligned = ptr.add(align - (ptr as usize & (align - 1)));
+ *get_header(aligned) = Header(ptr);
+ aligned
+ }
+ #[inline]
+ unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 {
+ let ptr = if layout.align() <= MIN_ALIGN {
+ HeapAlloc(GetProcessHeap(), flags, layout.size())
+ } else {
+ let size = layout.size() + layout.align();
+ let ptr = HeapAlloc(GetProcessHeap(), flags, size);
+ if ptr.is_null() {
+ ptr
+ } else {
+ align_ptr(ptr, layout.align())
+ }
+ };
+ ptr as *mut u8
+ }
+ unsafe impl GlobalAlloc for System {
+ #[inline]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, 0)
+ }
+ #[inline]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ allocate_with_flags(layout, HEAP_ZERO_MEMORY)
+ }
+ #[inline]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ if layout.align() <= MIN_ALIGN {
+ let err = HeapFree(GetProcessHeap(), 0, ptr as LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}",
+ GetLastError());
+ } else {
+ let header = get_header(ptr);
+ let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID);
+ debug_assert!(err != 0, "Failed to free heap memory: {}",
+ GetLastError());
+ }
+ }
+ #[inline]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ if layout.align() <= MIN_ALIGN {
+ HeapReAlloc(GetProcessHeap(), 0, ptr as LPVOID, new_size) as *mut u8
+ } else {
+ self.realloc_fallback(ptr, layout, new_size)
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs b/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs
new file mode 100644
index 000000000..3af0ba09e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/arbitrary_self_types_pointers_and_wrappers.rs
@@ -0,0 +1,69 @@
+// Adapted from rustc run-pass test suite
+
+#![feature(arbitrary_self_types, unsize, coerce_unsized, dispatch_from_dyn)]
+#![feature(rustc_attrs)]
+
+use std::{
+ ops::{Deref, CoerceUnsized, DispatchFromDyn},
+ marker::Unsize,
+};
+
+struct Ptr<T: ?Sized>(Box<T>);
+
+impl<T: ?Sized> Deref for Ptr<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+}
+
+impl<T: Unsize<U> + ?Sized, U: ?Sized> CoerceUnsized<Ptr<U>> for Ptr<T> {}
+impl<T: Unsize<U> + ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T> {}
+
+struct Wrapper<T: ?Sized>(T);
+
+impl<T: ?Sized> Deref for Wrapper<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T: CoerceUnsized<U>, U> CoerceUnsized<Wrapper<U>> for Wrapper<T> {}
+impl<T: DispatchFromDyn<U>, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T> {}
+
+
+trait Trait {
+ // This method isn't object-safe yet. Unsized by-value `self` is object-safe (but not callable
+ // without unsized_locals), but wrappers around `Self` currently are not.
+ // FIXME (mikeyhew) uncomment this when unsized rvalues object-safety is implemented
+ // fn wrapper(self: Wrapper<Self>) -> i32;
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32;
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32;
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32;
+}
+
+impl Trait for i32 {
+ fn ptr_wrapper(self: Ptr<Wrapper<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr(self: Wrapper<Ptr<Self>>) -> i32 {
+ **self
+ }
+ fn wrapper_ptr_wrapper(self: Wrapper<Ptr<Wrapper<Self>>>) -> i32 {
+ ***self
+ }
+}
+
+fn main() {
+ let pw = Ptr(Box::new(Wrapper(5))) as Ptr<Wrapper<dyn Trait>>;
+ assert_eq!(pw.ptr_wrapper(), 5);
+
+ let wp = Wrapper(Ptr(Box::new(6))) as Wrapper<Ptr<dyn Trait>>;
+ assert_eq!(wp.wrapper_ptr(), 6);
+
+ let wpw = Wrapper(Ptr(Box::new(Wrapper(7)))) as Wrapper<Ptr<Wrapper<dyn Trait>>>;
+ assert_eq!(wpw.wrapper_ptr_wrapper(), 7);
+}
diff --git a/compiler/rustc_codegen_gcc/example/dst-field-align.rs b/compiler/rustc_codegen_gcc/example/dst-field-align.rs
new file mode 100644
index 000000000..6c338e999
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/dst-field-align.rs
@@ -0,0 +1,67 @@
+// run-pass
+#![allow(dead_code)]
+struct Foo<T: ?Sized> {
+ a: u16,
+ b: T
+}
+
+trait Bar {
+ fn get(&self) -> usize;
+}
+
+impl Bar for usize {
+ fn get(&self) -> usize { *self }
+}
+
+struct Baz<T: ?Sized> {
+ a: T
+}
+
+struct HasDrop<T: ?Sized> {
+ ptr: Box<usize>,
+ data: T
+}
+
+fn main() {
+ // Test that zero-offset works properly
+ let b : Baz<usize> = Baz { a: 7 };
+ assert_eq!(b.a.get(), 7);
+ let b : &Baz<dyn Bar> = &b;
+ assert_eq!(b.a.get(), 7);
+
+ // Test that the field is aligned properly
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ assert_eq!(f.b.get(), 11);
+ let ptr1 : *const u8 = &f.b as *const _ as *const u8;
+
+ let f : &Foo<dyn Bar> = &f;
+ let ptr2 : *const u8 = &f.b as *const _ as *const u8;
+ assert_eq!(f.b.get(), 11);
+
+ // The pointers should be the same
+ assert_eq!(ptr1, ptr2);
+
+ // Test that nested DSTs work properly
+ let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
+ assert_eq!(f.b.b.get(), 17);
+ let f : &Foo<Foo<dyn Bar>> = &f;
+ assert_eq!(f.b.b.get(), 17);
+
+ // Test that get the pointer via destructuring works
+
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ let f : &Foo<dyn Bar> = &f;
+ let &Foo { a: _, b: ref bar } = f;
+ assert_eq!(bar.get(), 11);
+
+ // Make sure that drop flags don't screw things up
+
+ let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
+ ptr: Box::new(0),
+ data: Baz { a: [1,2,3,4] }
+ };
+ assert_eq!([1,2,3,4], d.data.a);
+
+ let d : &HasDrop<Baz<[i32]>> = &d;
+ assert_eq!(&[1,2,3,4], &d.data.a);
+}
diff --git a/compiler/rustc_codegen_gcc/example/example.rs b/compiler/rustc_codegen_gcc/example/example.rs
new file mode 100644
index 000000000..5878e8548
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/example.rs
@@ -0,0 +1,208 @@
+#![feature(no_core, unboxed_closures)]
+#![no_core]
+#![allow(dead_code)]
+
+extern crate mini_core;
+
+use mini_core::*;
+
+fn abc(a: u8) -> u8 {
+ a * 2
+}
+
+fn bcd(b: bool, a: u8) -> u8 {
+ if b {
+ a * 2
+ } else {
+ a * 3
+ }
+}
+
+fn call() {
+ abc(42);
+}
+
+fn indirect_call() {
+ let f: fn() = call;
+ f();
+}
+
+enum BoolOption {
+ Some(bool),
+ None,
+}
+
+fn option_unwrap_or(o: BoolOption, d: bool) -> bool {
+ match o {
+ BoolOption::Some(b) => b,
+ BoolOption::None => d,
+ }
+}
+
+fn ret_42() -> u8 {
+ 42
+}
+
+fn return_str() -> &'static str {
+ "hello world"
+}
+
+fn promoted_val() -> &'static u8 {
+ &(1 * 2)
+}
+
+fn cast_ref_to_raw_ptr(abc: &u8) -> *const u8 {
+ abc as *const u8
+}
+
+fn cmp_raw_ptr(a: *const u8, b: *const u8) -> bool {
+ a == b
+}
+
+fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+ (
+ a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+ b as u32,
+ )
+}
+
+fn char_cast(c: char) -> u8 {
+ c as u8
+}
+
+pub struct DebugTuple(());
+
+fn debug_tuple() -> DebugTuple {
+ DebugTuple(())
+}
+
+fn size_of<T>() -> usize {
+ intrinsics::size_of::<T>()
+}
+
+fn use_size_of() -> usize {
+ size_of::<u64>()
+}
+
+unsafe fn use_copy_intrinsic(src: *const u8, dst: *mut u8) {
+ intrinsics::copy::<u8>(src, dst, 1);
+}
+
+unsafe fn use_copy_intrinsic_ref(src: *const u8, dst: *mut u8) {
+ let copy2 = &intrinsics::copy::<u8>;
+ copy2(src, dst, 1);
+}
+
+const ABC: u8 = 6 * 7;
+
+fn use_const() -> u8 {
+ ABC
+}
+
+pub fn call_closure_3arg() {
+ (|_, _, _| {})(0u8, 42u16, 0u8)
+}
+
+pub fn call_closure_2arg() {
+ (|_, _| {})(0u8, 42u16)
+}
+
+struct IsNotEmpty;
+
+impl<'a, 'b> FnOnce<(&'a &'b [u16],)> for IsNotEmpty {
+ type Output = (u8, u8);
+
+ #[inline]
+ extern "rust-call" fn call_once(mut self, arg: (&'a &'b [u16],)) -> (u8, u8) {
+ self.call_mut(arg)
+ }
+}
+
+impl<'a, 'b> FnMut<(&'a &'b [u16],)> for IsNotEmpty {
+ #[inline]
+ extern "rust-call" fn call_mut(&mut self, _arg: (&'a &'b [u16],)) -> (u8, u8) {
+ (0, 42)
+ }
+}
+
+pub fn call_is_not_empty() {
+ IsNotEmpty.call_once((&(&[0u16] as &[_]),));
+}
+
+fn eq_char(a: char, b: char) -> bool {
+ a == b
+}
+
+unsafe fn transmute(c: char) -> u32 {
+ intrinsics::transmute(c)
+}
+
+unsafe fn deref_str_ptr(s: *const str) -> &'static str {
+ &*s
+}
+
+fn use_array(arr: [u8; 3]) -> u8 {
+ arr[1]
+}
+
+fn repeat_array() -> [u8; 3] {
+ [0; 3]
+}
+
+fn array_as_slice(arr: &[u8; 3]) -> &[u8] {
+ arr
+}
+
+unsafe fn use_ctlz_nonzero(a: u16) -> u16 {
+ intrinsics::ctlz_nonzero(a)
+}
+
+fn ptr_as_usize(ptr: *const u8) -> usize {
+ ptr as usize
+}
+
+fn float_cast(a: f32, b: f64) -> (f64, f32) {
+ (a as f64, b as f32)
+}
+
+fn int_to_float(a: u8, b: i32) -> (f64, f32) {
+ (a as f64, b as f32)
+}
+
+fn make_array() -> [u8; 3] {
+ [42, 0, 5]
+}
+
+fn some_promoted_tuple() -> &'static (&'static str, &'static str) {
+ &("abc", "some")
+}
+
+fn index_slice(s: &[u8]) -> u8 {
+ s[2]
+}
+
+pub struct StrWrapper {
+ s: str,
+}
+
+fn str_wrapper_get(w: &StrWrapper) -> &str {
+ &w.s
+}
+
+fn i16_as_i8(a: i16) -> i8 {
+ a as i8
+}
+
+struct Unsized(u8, str);
+
+fn get_sized_field_ref_from_unsized_type(u: &Unsized) -> &u8 {
+ &u.0
+}
+
+fn get_unsized_field_ref_from_unsized_type(u: &Unsized) -> &str {
+ &u.1
+}
+
+pub fn reuse_byref_argument_storage(a: (u8, u16, u32)) -> u8 {
+ a.0
+}
diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs
new file mode 100644
index 000000000..ddcbb0d9f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/mini_core.rs
@@ -0,0 +1,599 @@
+#![feature(
+ no_core, lang_items, intrinsics, unboxed_closures, type_ascription, extern_types,
+ untagged_unions, decl_macro, rustc_attrs, transparent_unions, auto_traits,
+ thread_local
+)]
+#![no_core]
+#![allow(dead_code)]
+
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+ intrinsics::unreachable();
+}
+
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "destruct"]
+pub trait Destruct {}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "dispatch_from_dyn"]
+pub trait DispatchFromDyn<T> {}
+
+// &T -> &U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a U> for &'a T {}
+// &mut T -> &mut U
+impl<'a, T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<&'a mut U> for &'a mut T {}
+// *const T -> *const U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*const U> for *const T {}
+// *mut T -> *mut U
+impl<T: ?Sized+Unsize<U>, U: ?Sized> DispatchFromDyn<*mut U> for *mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T> {}
+
+#[lang = "receiver"]
+pub trait Receiver {}
+
+impl<T: ?Sized> Receiver for &T {}
+impl<T: ?Sized> Receiver for &mut T {}
+impl<T: ?Sized> Receiver for Box<T> {}
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for f64 {}
+unsafe impl Copy for char {}
+unsafe impl<'a, T: ?Sized> Copy for &'a T {}
+unsafe impl<T: ?Sized> Copy for *const T {}
+unsafe impl<T: ?Sized> Copy for *mut T {}
+
+#[lang = "sync"]
+pub unsafe trait Sync {}
+
+unsafe impl Sync for bool {}
+unsafe impl Sync for u8 {}
+unsafe impl Sync for u16 {}
+unsafe impl Sync for u32 {}
+unsafe impl Sync for u64 {}
+unsafe impl Sync for usize {}
+unsafe impl Sync for i8 {}
+unsafe impl Sync for i16 {}
+unsafe impl Sync for i32 {}
+unsafe impl Sync for isize {}
+unsafe impl Sync for char {}
+unsafe impl<'a, T: ?Sized> Sync for &'a T {}
+unsafe impl Sync for [u8; 16] {}
+
+#[lang = "freeze"]
+unsafe auto trait Freeze {}
+
+unsafe impl<T: ?Sized> Freeze for PhantomData<T> {}
+unsafe impl<T: ?Sized> Freeze for *const T {}
+unsafe impl<T: ?Sized> Freeze for *mut T {}
+unsafe impl<T: ?Sized> Freeze for &T {}
+unsafe impl<T: ?Sized> Freeze for &mut T {}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "not"]
+pub trait Not {
+ type Output;
+
+ fn not(self) -> Self::Output;
+}
+
+impl Not for bool {
+ type Output = bool;
+
+ fn not(self) -> bool {
+ !self
+ }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for usize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+#[lang = "add"]
+pub trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "rem"]
+pub trait Rem<RHS = Self> {
+ type Output;
+
+ fn rem(self, rhs: RHS) -> Self::Output;
+}
+
+impl Rem for usize {
+ type Output = Self;
+
+ fn rem(self, rhs: Self) -> Self {
+ self % rhs
+ }
+}
+
+#[lang = "bitor"]
+pub trait BitOr<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn bitor(self, rhs: RHS) -> Self::Output;
+}
+
+impl BitOr for bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ self | rhs
+ }
+}
+
+impl<'a> BitOr<bool> for &'a bool {
+ type Output = bool;
+
+ fn bitor(self, rhs: bool) -> bool {
+ *self | rhs
+ }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ fn eq(&self, other: &Rhs) -> bool;
+ fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+ fn eq(&self, other: &u8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u16 {
+ fn eq(&self, other: &u16) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u16) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u32 {
+ fn eq(&self, other: &u32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+
+impl PartialEq for u64 {
+ fn eq(&self, other: &u64) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u64) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for usize {
+ fn eq(&self, other: &usize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &usize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i8 {
+ fn eq(&self, other: &i8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for isize {
+ fn eq(&self, other: &isize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &isize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for char {
+ fn eq(&self, other: &char) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &char) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl<T: ?Sized> PartialEq for *const T {
+ fn eq(&self, other: &*const T) -> bool {
+ *self == *other
+ }
+ fn ne(&self, other: &*const T) -> bool {
+ *self != *other
+ }
+}
+
+#[lang = "neg"]
+pub trait Neg {
+ type Output;
+
+ fn neg(self) -> Self::Output;
+}
+
+impl Neg for i8 {
+ type Output = i8;
+
+ fn neg(self) -> i8 {
+ -self
+ }
+}
+
+impl Neg for i16 {
+ type Output = i16;
+
+ fn neg(self) -> i16 {
+ self
+ }
+}
+
+impl Neg for isize {
+ type Output = isize;
+
+ fn neg(self) -> isize {
+ -self
+ }
+}
+
+impl Neg for f32 {
+ type Output = f32;
+
+ fn neg(self) -> f32 {
+ -self
+ }
+}
+
+pub enum Option<T> {
+ Some(T),
+ None,
+}
+
+pub use Option::*;
+
+#[lang = "phantom_data"]
+pub struct PhantomData<T: ?Sized>;
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+ #[lang = "fn_once_output"]
+ type Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "panic"]
+#[track_caller]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\n\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "eh_personality"]
+fn eh_personality() -> ! {
+ loop {}
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
+pub trait Allocator {
+}
+
+pub struct Global;
+
+impl Allocator for Global {}
+
+#[lang = "owned_box"]
+pub struct Box<
+ T: ?Sized,
+ A: Allocator = Global,
+>(*mut T, A);
+
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Box<U>> for Box<T> {}
+
+impl<T: ?Sized, A: Allocator> Drop for Box<T, A> {
+ fn drop(&mut self) {
+ // drop is currently performed by compiler.
+ }
+}
+
+impl<T> Deref for Box<T> {
+ type Target = T;
+
+ fn deref(&self) -> &Self::Target {
+ &**self
+ }
+}
+
+#[lang = "exchange_malloc"]
+unsafe fn allocate(size: usize, _align: usize) -> *mut u8 {
+ libc::malloc(size)
+}
+
+#[lang = "box_free"]
+unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: *mut T, alloc: A) {
+ libc::free(ptr as *mut u8);
+}
+
+#[lang = "drop"]
+pub trait Drop {
+ fn drop(&mut self);
+}
+
+#[lang = "manually_drop"]
+#[repr(transparent)]
+pub struct ManuallyDrop<T: ?Sized> {
+ pub value: T,
+}
+
+#[lang = "maybe_uninit"]
+#[repr(transparent)]
+pub union MaybeUninit<T> {
+ pub uninit: (),
+ pub value: ManuallyDrop<T>,
+}
+
+pub mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ pub fn size_of<T>() -> usize;
+ pub fn size_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn min_align_of<T>() -> usize;
+ pub fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize;
+ pub fn copy<T>(src: *const T, dst: *mut T, count: usize);
+ pub fn transmute<T, U>(e: T) -> U;
+ pub fn ctlz_nonzero<T>(x: T) -> T;
+ pub fn needs_drop<T: ?::Sized>() -> bool;
+ pub fn bitreverse<T>(x: T) -> T;
+ pub fn bswap<T>(x: T) -> T;
+ pub fn write_bytes<T>(dst: *mut T, val: u8, count: usize);
+ pub fn unreachable() -> !;
+ }
+}
+
+pub mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn malloc(size: usize) -> *mut u8;
+ pub fn free(ptr: *mut u8);
+ pub fn memcpy(dst: *mut u8, src: *const u8, size: usize);
+ pub fn memmove(dst: *mut u8, src: *const u8, size: usize);
+ pub fn strncpy(dst: *mut u8, src: *const u8, size: usize);
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+extern {
+ type VaListImpl;
+}
+
+#[lang = "va_list"]
+#[repr(transparent)]
+pub struct VaList<'a>(&'a mut VaListImpl);
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro stringify($($t:tt)*) { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro file() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro line() { /* compiler built-in */ }
+
+#[rustc_builtin_macro]
+#[rustc_macro_transparency = "semitransparent"]
+pub macro cfg() { /* compiler built-in */ }
+
+pub static A_STATIC: u8 = 42;
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[no_mangle]
+pub fn get_tls() -> u8 {
+ #[thread_local]
+ static A: u8 = 42;
+
+ A
+}
diff --git a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs
new file mode 100644
index 000000000..14fd9eeff
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs
@@ -0,0 +1,431 @@
+// Adapted from https://github.com/sunfishcode/mir2cranelift/blob/master/rust-examples/nocore-hello-world.rs
+
+#![feature(
+ no_core, unboxed_closures, start, lang_items, box_syntax, never_type, linkage,
+ extern_types, thread_local
+)]
+#![no_core]
+#![allow(dead_code, non_camel_case_types)]
+
+extern crate mini_core;
+
+use mini_core::*;
+use mini_core::libc::*;
+
+unsafe extern "C" fn my_puts(s: *const u8) {
+ puts(s);
+}
+
+#[lang = "termination"]
+trait Termination {
+ fn report(self) -> i32;
+}
+
+impl Termination for () {
+ fn report(self) -> i32 {
+ unsafe {
+ NUM = 6 * 7 + 1 + (1u8 == 1u8) as u8; // 44
+ *NUM_REF as i32
+ }
+ }
+}
+
+trait SomeTrait {
+ fn object_safe(&self);
+}
+
+impl SomeTrait for &'static str {
+ fn object_safe(&self) {
+ unsafe {
+ puts(*self as *const str as *const u8);
+ }
+ }
+}
+
+struct NoisyDrop {
+ text: &'static str,
+ inner: NoisyDropInner,
+}
+
+struct NoisyDropUnsized {
+ inner: NoisyDropInner,
+ text: str,
+}
+
+struct NoisyDropInner;
+
+impl Drop for NoisyDrop {
+ fn drop(&mut self) {
+ unsafe {
+ puts(self.text as *const str as *const u8);
+ }
+ }
+}
+
+impl Drop for NoisyDropInner {
+ fn drop(&mut self) {
+ unsafe {
+ puts("Inner got dropped!\0" as *const str as *const u8);
+ }
+ }
+}
+
+impl SomeTrait for NoisyDrop {
+ fn object_safe(&self) {}
+}
+
+enum Ordering {
+ Less = -1,
+ Equal = 0,
+ Greater = 1,
+}
+
+#[lang = "start"]
+fn start<T: Termination + 'static>(
+ main: fn() -> T,
+ argc: isize,
+ argv: *const *const u8,
+) -> isize {
+ if argc == 3 {
+ unsafe { puts(*argv); }
+ unsafe { puts(*((argv as usize + intrinsics::size_of::<*const u8>()) as *const *const u8)); }
+ unsafe { puts(*((argv as usize + 2 * intrinsics::size_of::<*const u8>()) as *const *const u8)); }
+ }
+
+ main().report();
+ 0
+}
+
+static mut NUM: u8 = 6 * 7;
+static NUM_REF: &'static u8 = unsafe { &NUM };
+
+macro_rules! assert {
+ ($e:expr) => {
+ if !$e {
+ panic(stringify!(! $e));
+ }
+ };
+}
+
+macro_rules! assert_eq {
+ ($l:expr, $r: expr) => {
+ if $l != $r {
+ panic(stringify!($l != $r));
+ }
+ }
+}
+
+struct Unique<T: ?Sized> {
+ pointer: *const T,
+ _marker: PhantomData<T>,
+}
+
+impl<T: ?Sized, U: ?Sized> CoerceUnsized<Unique<U>> for Unique<T> where T: Unsize<U> {}
+
+unsafe fn zeroed<T>() -> T {
+ let mut uninit = MaybeUninit { uninit: () };
+ intrinsics::write_bytes(&mut uninit.value.value as *mut T, 0, 1);
+ uninit.value.value
+}
+
+fn take_f32(_f: f32) {}
+fn take_unique(_u: Unique<()>) {}
+
+fn return_u128_pair() -> (u128, u128) {
+ (0, 0)
+}
+
+fn call_return_u128_pair() {
+ return_u128_pair();
+}
+
+fn main() {
+ take_unique(Unique {
+ pointer: 0 as *const (),
+ _marker: PhantomData,
+ });
+ take_f32(0.1);
+
+ //call_return_u128_pair();
+
+ let slice = &[0, 1] as &[i32];
+ let slice_ptr = slice as *const [i32] as *const i32;
+
+ assert_eq!(slice_ptr as usize % 4, 0);
+
+ //return;
+
+ unsafe {
+ printf("Hello %s\n\0" as *const str as *const i8, "printf\0" as *const str as *const i8);
+
+ let hello: &[u8] = b"Hello\0" as &[u8; 6];
+ let ptr: *const u8 = hello as *const [u8] as *const u8;
+ puts(ptr);
+
+ let world: Box<&str> = box "World!\0";
+ puts(*world as *const str as *const u8);
+ world as Box<dyn SomeTrait>;
+
+ assert_eq!(intrinsics::bitreverse(0b10101000u8), 0b00010101u8);
+
+ assert_eq!(intrinsics::bswap(0xabu8), 0xabu8);
+ assert_eq!(intrinsics::bswap(0xddccu16), 0xccddu16);
+ assert_eq!(intrinsics::bswap(0xffee_ddccu32), 0xccdd_eeffu32);
+ assert_eq!(intrinsics::bswap(0x1234_5678_ffee_ddccu64), 0xccdd_eeff_7856_3412u64);
+
+ assert_eq!(intrinsics::size_of_val(hello) as u8, 6);
+
+ let chars = &['C', 'h', 'a', 'r', 's'];
+ let chars = chars as &[char];
+ assert_eq!(intrinsics::size_of_val(chars) as u8, 4 * 5);
+
+ let a: &dyn SomeTrait = &"abc\0";
+ a.object_safe();
+
+ assert_eq!(intrinsics::size_of_val(a) as u8, 16);
+ assert_eq!(intrinsics::size_of_val(&0u32) as u8, 4);
+
+ assert_eq!(intrinsics::min_align_of::<u16>() as u8, 2);
+ assert_eq!(intrinsics::min_align_of_val(&a) as u8, intrinsics::min_align_of::<&str>() as u8);
+
+ assert!(!intrinsics::needs_drop::<u8>());
+ assert!(!intrinsics::needs_drop::<[u8]>());
+ assert!(intrinsics::needs_drop::<NoisyDrop>());
+ assert!(intrinsics::needs_drop::<NoisyDropUnsized>());
+
+ Unique {
+ pointer: 0 as *const &str,
+ _marker: PhantomData,
+ } as Unique<dyn SomeTrait>;
+
+ struct MyDst<T: ?Sized>(T);
+
+ intrinsics::size_of_val(&MyDst([0u8; 4]) as &MyDst<[u8]>);
+
+ struct Foo {
+ x: u8,
+ y: !,
+ }
+
+ unsafe fn uninitialized<T>() -> T {
+ MaybeUninit { uninit: () }.value.value
+ }
+
+ zeroed::<(u8, u8)>();
+ #[allow(unreachable_code)]
+ {
+ if false {
+ zeroed::<!>();
+ zeroed::<Foo>();
+ uninitialized::<Foo>();
+ }
+ }
+ }
+
+ let _ = box NoisyDrop {
+ text: "Boxed outer got dropped!\0",
+ inner: NoisyDropInner,
+ } as Box<dyn SomeTrait>;
+
+ const FUNC_REF: Option<fn()> = Some(main);
+ match FUNC_REF {
+ Some(_) => {},
+ None => assert!(false),
+ }
+
+ match Ordering::Less {
+ Ordering::Less => {},
+ _ => assert!(false),
+ }
+
+ [NoisyDropInner, NoisyDropInner];
+
+ let x = &[0u32, 42u32] as &[u32];
+ match x {
+ [] => assert_eq!(0u32, 1),
+ [_, ref y @ ..] => assert_eq!(&x[1] as *const u32 as usize, &y[0] as *const u32 as usize),
+ }
+
+ assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42);
+
+ extern {
+ #[linkage = "weak"]
+ static ABC: *const u8;
+ }
+
+ {
+ extern {
+ #[linkage = "weak"]
+ static ABC: *const u8;
+ }
+ }
+
+ // TODO(antoyo): to make this work, support weak linkage.
+ //unsafe { assert_eq!(ABC as usize, 0); }
+
+ &mut (|| Some(0 as *const ())) as &mut dyn FnMut() -> Option<*const ()>;
+
+ let f = 1000.0;
+ assert_eq!(f as u8, 255);
+ let f2 = -1000.0;
+ assert_eq!(f2 as i8, -128);
+ assert_eq!(f2 as u8, 0);
+
+ static ANOTHER_STATIC: &u8 = &A_STATIC;
+ assert_eq!(*ANOTHER_STATIC, 42);
+
+ check_niche_behavior();
+
+ extern "C" {
+ type ExternType;
+ }
+
+ struct ExternTypeWrapper {
+ _a: ExternType,
+ }
+
+ let nullptr = 0 as *const ();
+ let extern_nullptr = nullptr as *const ExternTypeWrapper;
+ extern_nullptr as *const ();
+ let slice_ptr = &[] as *const [u8];
+ slice_ptr as *const u8;
+
+ #[cfg(not(jit))]
+ test_tls();
+}
+
+#[repr(C)]
+enum c_void {
+ _1,
+ _2,
+}
+
+type c_int = i32;
+type c_ulong = u64;
+
+type pthread_t = c_ulong;
+
+#[repr(C)]
+struct pthread_attr_t {
+ __size: [u64; 7],
+}
+
+#[link(name = "pthread")]
+extern "C" {
+ fn pthread_attr_init(attr: *mut pthread_attr_t) -> c_int;
+
+ fn pthread_create(
+ native: *mut pthread_t,
+ attr: *const pthread_attr_t,
+ f: extern "C" fn(_: *mut c_void) -> *mut c_void,
+ value: *mut c_void
+ ) -> c_int;
+
+ fn pthread_join(
+ native: pthread_t,
+ value: *mut *mut c_void
+ ) -> c_int;
+}
+
+#[thread_local]
+#[cfg(not(jit))]
+static mut TLS: u8 = 42;
+
+#[cfg(not(jit))]
+extern "C" fn mutate_tls(_: *mut c_void) -> *mut c_void {
+ unsafe { TLS = 0; }
+ 0 as *mut c_void
+}
+
+#[cfg(not(jit))]
+fn test_tls() {
+ unsafe {
+ let mut attr: pthread_attr_t = zeroed();
+ let mut thread: pthread_t = 0;
+
+ assert_eq!(TLS, 42);
+
+ if pthread_attr_init(&mut attr) != 0 {
+ assert!(false);
+ }
+
+ if pthread_create(&mut thread, &attr, mutate_tls, 0 as *mut c_void) != 0 {
+ assert!(false);
+ }
+
+ let mut res = 0 as *mut c_void;
+ pthread_join(thread, &mut res);
+
+ // TLS of main thread must not have been changed by the other thread.
+ assert_eq!(TLS, 42);
+
+ puts("TLS works!\n\0" as *const str as *const u8);
+ }
+}
+
+// Copied ui/issues/issue-61696.rs
+
+pub enum Infallible {}
+
+// The check that the `bool` field of `V1` is encoding a "niche variant"
+// (i.e. not `V1`, so `V3` or `V4`) used to be mathematically incorrect,
+// causing valid `V1` values to be interpreted as other variants.
+pub enum E1 {
+ V1 { f: bool },
+ V2 { f: Infallible },
+ V3,
+ V4,
+}
+
+// Computing the discriminant used to be done using the niche type (here `u8`,
+// from the `bool` field of `V1`), overflowing for variants with large enough
+// indices (`V3` and `V4`), causing them to be interpreted as other variants.
+pub enum E2<X> {
+ V1 { f: bool },
+
+ /*_00*/ _01(X), _02(X), _03(X), _04(X), _05(X), _06(X), _07(X),
+ _08(X), _09(X), _0A(X), _0B(X), _0C(X), _0D(X), _0E(X), _0F(X),
+ _10(X), _11(X), _12(X), _13(X), _14(X), _15(X), _16(X), _17(X),
+ _18(X), _19(X), _1A(X), _1B(X), _1C(X), _1D(X), _1E(X), _1F(X),
+ _20(X), _21(X), _22(X), _23(X), _24(X), _25(X), _26(X), _27(X),
+ _28(X), _29(X), _2A(X), _2B(X), _2C(X), _2D(X), _2E(X), _2F(X),
+ _30(X), _31(X), _32(X), _33(X), _34(X), _35(X), _36(X), _37(X),
+ _38(X), _39(X), _3A(X), _3B(X), _3C(X), _3D(X), _3E(X), _3F(X),
+ _40(X), _41(X), _42(X), _43(X), _44(X), _45(X), _46(X), _47(X),
+ _48(X), _49(X), _4A(X), _4B(X), _4C(X), _4D(X), _4E(X), _4F(X),
+ _50(X), _51(X), _52(X), _53(X), _54(X), _55(X), _56(X), _57(X),
+ _58(X), _59(X), _5A(X), _5B(X), _5C(X), _5D(X), _5E(X), _5F(X),
+ _60(X), _61(X), _62(X), _63(X), _64(X), _65(X), _66(X), _67(X),
+ _68(X), _69(X), _6A(X), _6B(X), _6C(X), _6D(X), _6E(X), _6F(X),
+ _70(X), _71(X), _72(X), _73(X), _74(X), _75(X), _76(X), _77(X),
+ _78(X), _79(X), _7A(X), _7B(X), _7C(X), _7D(X), _7E(X), _7F(X),
+ _80(X), _81(X), _82(X), _83(X), _84(X), _85(X), _86(X), _87(X),
+ _88(X), _89(X), _8A(X), _8B(X), _8C(X), _8D(X), _8E(X), _8F(X),
+ _90(X), _91(X), _92(X), _93(X), _94(X), _95(X), _96(X), _97(X),
+ _98(X), _99(X), _9A(X), _9B(X), _9C(X), _9D(X), _9E(X), _9F(X),
+ _A0(X), _A1(X), _A2(X), _A3(X), _A4(X), _A5(X), _A6(X), _A7(X),
+ _A8(X), _A9(X), _AA(X), _AB(X), _AC(X), _AD(X), _AE(X), _AF(X),
+ _B0(X), _B1(X), _B2(X), _B3(X), _B4(X), _B5(X), _B6(X), _B7(X),
+ _B8(X), _B9(X), _BA(X), _BB(X), _BC(X), _BD(X), _BE(X), _BF(X),
+ _C0(X), _C1(X), _C2(X), _C3(X), _C4(X), _C5(X), _C6(X), _C7(X),
+ _C8(X), _C9(X), _CA(X), _CB(X), _CC(X), _CD(X), _CE(X), _CF(X),
+ _D0(X), _D1(X), _D2(X), _D3(X), _D4(X), _D5(X), _D6(X), _D7(X),
+ _D8(X), _D9(X), _DA(X), _DB(X), _DC(X), _DD(X), _DE(X), _DF(X),
+ _E0(X), _E1(X), _E2(X), _E3(X), _E4(X), _E5(X), _E6(X), _E7(X),
+ _E8(X), _E9(X), _EA(X), _EB(X), _EC(X), _ED(X), _EE(X), _EF(X),
+ _F0(X), _F1(X), _F2(X), _F3(X), _F4(X), _F5(X), _F6(X), _F7(X),
+ _F8(X), _F9(X), _FA(X), _FB(X), _FC(X), _FD(X), _FE(X), _FF(X),
+
+ V3,
+ V4,
+}
+
+fn check_niche_behavior () {
+ if let E1::V2 { .. } = (E1::V1 { f: true }) {
+ intrinsics::abort();
+ }
+
+ if let E2::V1 { .. } = E2::V3::<Infallible> {
+ intrinsics::abort();
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/example/mod_bench.rs b/compiler/rustc_codegen_gcc/example/mod_bench.rs
new file mode 100644
index 000000000..2e2b0052d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/mod_bench.rs
@@ -0,0 +1,37 @@
+#![feature(start, box_syntax, core_intrinsics, lang_items)]
+#![no_std]
+
+#[link(name = "c")]
+extern {}
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ unsafe {
+ core::intrinsics::abort();
+ }
+}
+
+#[lang="eh_personality"]
+fn eh_personality(){}
+
+// Required for rustc_codegen_llvm
+#[no_mangle]
+unsafe extern "C" fn _Unwind_Resume() {
+ core::intrinsics::unreachable();
+}
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ for i in 2..100_000_000 {
+ black_box((i + 1) % i);
+ }
+
+ 0
+}
+
+#[inline(never)]
+fn black_box(i: u32) {
+ if i != 1 {
+ unsafe { core::intrinsics::abort(); }
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/example/std_example.rs b/compiler/rustc_codegen_gcc/example/std_example.rs
new file mode 100644
index 000000000..31069058a
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/std_example.rs
@@ -0,0 +1,286 @@
+#![feature(core_intrinsics, generators, generator_trait, is_sorted)]
+
+use std::arch::x86_64::*;
+use std::io::Write;
+use std::ops::Generator;
+
+extern {
+ pub fn printf(format: *const i8, ...) -> i32;
+}
+
+fn main() {
+ let mutex = std::sync::Mutex::new(());
+ let _guard = mutex.lock().unwrap();
+
+ let _ = ::std::iter::repeat('a' as u8).take(10).collect::<Vec<_>>();
+ let stderr = ::std::io::stderr();
+ let mut stderr = stderr.lock();
+
+ std::thread::spawn(move || {
+ println!("Hello from another thread!");
+ });
+
+ writeln!(stderr, "some {} text", "<unknown>").unwrap();
+
+ let _ = std::process::Command::new("true").env("c", "d").spawn();
+
+ println!("cargo:rustc-link-lib=z");
+
+ static ONCE: std::sync::Once = std::sync::Once::new();
+ ONCE.call_once(|| {});
+
+ let _eq = LoopState::Continue(()) == LoopState::Break(());
+
+ // Make sure ByValPair values with differently sized components are correctly passed
+ map(None::<(u8, Box<Instruction>)>);
+
+ println!("{}", 2.3f32.exp());
+ println!("{}", 2.3f32.exp2());
+ println!("{}", 2.3f32.abs());
+ println!("{}", 2.3f32.sqrt());
+ println!("{}", 2.3f32.floor());
+ println!("{}", 2.3f32.ceil());
+ println!("{}", 2.3f32.min(1.0));
+ println!("{}", 2.3f32.max(1.0));
+ println!("{}", 2.3f32.powi(2));
+ println!("{}", 2.3f32.log2());
+ assert_eq!(2.3f32.copysign(-1.0), -2.3f32);
+ println!("{}", 2.3f32.powf(2.0));
+
+ assert_eq!(-128i8, (-128i8).saturating_sub(1));
+ assert_eq!(127i8, 127i8.saturating_sub(-128));
+ assert_eq!(-128i8, (-128i8).saturating_add(-128));
+ assert_eq!(127i8, 127i8.saturating_add(1));
+
+ assert_eq!(-32768i16, (-32768i16).saturating_add(-32768));
+ assert_eq!(32767i16, 32767i16.saturating_add(1));
+
+ assert_eq!(0b0000000000000000000000000010000010000000000000000000000000000000_0000000000100000000000000000000000001000000000000100000000000000u128.leading_zeros(), 26);
+ assert_eq!(0b0000000000000000000000000010000000000000000000000000000000000000_0000000000000000000000000000000000001000000000000000000010000000u128.trailing_zeros(), 7);
+
+ let _d = 0i128.checked_div(2i128);
+ let _d = 0u128.checked_div(2u128);
+ assert_eq!(1u128 + 2, 3);
+
+ assert_eq!(0b100010000000000000000000000000000u128 >> 10, 0b10001000000000000000000u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 >> 64, 0xFEDCBA98765432u128);
+ assert_eq!(0xFEDCBA987654321123456789ABCDEFu128 as i128 >> 64, 0xFEDCBA98765432i128);
+
+ let tmp = 353985398u128;
+ assert_eq!(tmp * 932490u128, 330087843781020u128);
+
+ let tmp = -0x1234_5678_9ABC_DEF0i64;
+ assert_eq!(tmp as i128, -0x1234_5678_9ABC_DEF0i128);
+
+ // Check that all u/i128 <-> float casts work correctly.
+ let houndred_u128 = 100u128;
+ let houndred_i128 = 100i128;
+ let houndred_f32 = 100.0f32;
+ let houndred_f64 = 100.0f64;
+ assert_eq!(houndred_u128 as f32, 100.0);
+ assert_eq!(houndred_u128 as f64, 100.0);
+ assert_eq!(houndred_f32 as u128, 100);
+ assert_eq!(houndred_f64 as u128, 100);
+ assert_eq!(houndred_i128 as f32, 100.0);
+ assert_eq!(houndred_i128 as f64, 100.0);
+ assert_eq!(houndred_f32 as i128, 100);
+ assert_eq!(houndred_f64 as i128, 100);
+
+ let _a = 1u32 << 2u8;
+
+ let empty: [i32; 0] = [];
+ assert!(empty.is_sorted());
+
+ println!("{:?}", std::intrinsics::caller_location());
+
+ #[cfg(feature="master")]
+ unsafe {
+ test_simd();
+ }
+
+ Box::pin(move |mut _task_context| {
+ yield ();
+ }).as_mut().resume(0);
+
+ println!("End");
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_simd() {
+ let x = _mm_setzero_si128();
+ let y = _mm_set1_epi16(7);
+ let or = _mm_or_si128(x, y);
+ let cmp_eq = _mm_cmpeq_epi8(y, y);
+ let cmp_lt = _mm_cmplt_epi8(y, y);
+
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(or), [7, 7, 7, 7, 7, 7, 7, 7]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_eq), [0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff]);
+ assert_eq!(std::mem::transmute::<_, [u16; 8]>(cmp_lt), [0, 0, 0, 0, 0, 0, 0, 0]);
+
+ test_mm_slli_si128();
+ test_mm_movemask_epi8();
+ test_mm256_movemask_epi8();
+ test_mm_add_epi8();
+ test_mm_add_pd();
+ test_mm_cvtepi8_epi16();
+ test_mm_cvtsi128_si64();
+
+ test_mm_extract_epi8();
+ test_mm_insert_epi16();
+
+ let mask1 = _mm_movemask_epi8(dbg!(_mm_setr_epi8(255u8 as i8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)));
+ assert_eq!(mask1, 1);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_slli_si128() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 1);
+ let e = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 15);
+ let e = _mm_setr_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1);
+ assert_eq_m128i(r, e);
+
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
+ );
+ let r = _mm_slli_si128(a, 16);
+ assert_eq_m128i(r, _mm_set1_epi8(0));
+}
+
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_movemask_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8, 0b01,
+ 0b0101, 0b1111_0000u8 as i8, 0, 0,
+ 0, 0, 0b1111_0000u8 as i8, 0b0101,
+ 0b01, 0b1000_0000u8 as i8, 0b0, 0b1000_0000u8 as i8,
+ );
+ let r = _mm_movemask_epi8(a);
+ assert_eq!(r, 0b10100100_00100101);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "avx2")]
+unsafe fn test_mm256_movemask_epi8() {
+ let a = _mm256_set1_epi8(-1);
+ let r = _mm256_movemask_epi8(a);
+ let e = -1;
+ assert_eq!(r, e);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_epi8() {
+ let a = _mm_setr_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
+ #[rustfmt::skip]
+ let b = _mm_setr_epi8(
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ );
+ let r = _mm_add_epi8(a, b);
+ #[rustfmt::skip]
+ let e = _mm_setr_epi8(
+ 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46,
+ );
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_add_pd() {
+ let a = _mm_setr_pd(1.0, 2.0);
+ let b = _mm_setr_pd(5.0, 10.0);
+ let r = _mm_add_pd(a, b);
+ assert_eq_m128d(r, _mm_setr_pd(6.0, 12.0));
+}
+
+#[cfg(feature="master")]
+fn assert_eq_m128i(x: std::arch::x86_64::__m128i, y: std::arch::x86_64::__m128i) {
+ unsafe {
+ assert_eq!(std::mem::transmute::<_, [u8; 16]>(x), std::mem::transmute::<_, [u8; 16]>(y));
+ }
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+pub unsafe fn assert_eq_m128d(a: __m128d, b: __m128d) {
+ if _mm_movemask_pd(_mm_cmpeq_pd(a, b)) != 0b11 {
+ panic!("{:?} != {:?}", a, b);
+ }
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_cvtsi128_si64() {
+ let r = _mm_cvtsi128_si64(std::mem::transmute::<[i64; 2], _>([5, 0]));
+ assert_eq!(r, 5);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_cvtepi8_epi16() {
+ let a = _mm_set1_epi8(10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(10);
+ assert_eq_m128i(r, e);
+ let a = _mm_set1_epi8(-10);
+ let r = _mm_cvtepi8_epi16(a);
+ let e = _mm_set1_epi16(-10);
+ assert_eq_m128i(r, e);
+}
+
+#[cfg(feature="master")]
+#[target_feature(enable = "sse4.1")]
+unsafe fn test_mm_extract_epi8() {
+ #[rustfmt::skip]
+ let a = _mm_setr_epi8(
+ -1, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15
+ );
+ let r1 = _mm_extract_epi8(a, 0);
+ let r2 = _mm_extract_epi8(a, 3);
+ assert_eq!(r1, 0xFF);
+ assert_eq!(r2, 3);
+}
+
+#[cfg(all(feature="master", target_arch = "x86_64"))]
+#[target_feature(enable = "sse2")]
+unsafe fn test_mm_insert_epi16() {
+ let a = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
+ let r = _mm_insert_epi16(a, 9, 0);
+ let e = _mm_setr_epi16(9, 1, 2, 3, 4, 5, 6, 7);
+ assert_eq_m128i(r, e);
+}
+
+#[derive(PartialEq)]
+enum LoopState {
+ Continue(()),
+ Break(())
+}
+
+pub enum Instruction {
+ Increment,
+ Loop,
+}
+
+fn map(a: Option<(u8, Box<Instruction>)>) -> Option<Box<Instruction>> {
+ match a {
+ None => None,
+ Some((_, instr)) => Some(instr),
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs b/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs
new file mode 100644
index 000000000..2cb84786f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/subslice-patterns-const-eval.rs
@@ -0,0 +1,97 @@
+// Based on https://github.com/rust-lang/rust/blob/c5840f9d252c2f5cc16698dbf385a29c5de3ca07/src/test/ui/array-slice-vec/subslice-patterns-const-eval-match.rs
+
+// Test that array subslice patterns are correctly handled in const evaluation.
+
+// run-pass
+
+#[derive(PartialEq, Debug, Clone)]
+struct N(u8);
+
+#[derive(PartialEq, Debug, Clone)]
+struct Z;
+
+macro_rules! n {
+ ($($e:expr),* $(,)?) => {
+ [$(N($e)),*]
+ }
+}
+
+// This macro has an unused variable so that it can be repeated base on the
+// number of times a repeated variable (`$e` in `z`) occurs.
+macro_rules! zed {
+ ($e:expr) => { Z }
+}
+
+macro_rules! z {
+ ($($e:expr),* $(,)?) => {
+ [$(zed!($e)),*]
+ }
+}
+
+// Compare constant evaluation and runtime evaluation of a given expression.
+macro_rules! compare_evaluation {
+ ($e:expr, $t:ty $(,)?) => {{
+ const CONST_EVAL: $t = $e;
+ const fn const_eval() -> $t { $e }
+ static CONST_EVAL2: $t = const_eval();
+ let runtime_eval = $e;
+ assert_eq!(CONST_EVAL, runtime_eval);
+ assert_eq!(CONST_EVAL2, runtime_eval);
+ }}
+}
+
+// Repeat `$test`, substituting the given macro variables with the given
+// identifiers.
+//
+// For example:
+//
+// repeat! {
+// ($name); X; Y:
+// struct $name;
+// }
+//
+// Expands to:
+//
+// struct X; struct Y;
+//
+// This is used to repeat the tests using both the `N` and `Z`
+// types.
+macro_rules! repeat {
+ (($($dollar:tt $placeholder:ident)*); $($($values:ident),+);*: $($test:tt)*) => {
+ macro_rules! single {
+ ($($dollar $placeholder:ident),*) => { $($test)* }
+ }
+ $(single!($($values),+);)*
+ }
+}
+
+fn main() {
+ repeat! {
+ ($arr $Ty); n, N; z, Z:
+ compare_evaluation!({ let [_, x @ .., _] = $arr!(1, 2, 3, 4); x }, [$Ty; 2]);
+ compare_evaluation!({ let [_, ref x @ .., _] = $arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+ compare_evaluation!({ let [_, x @ .., _] = &$arr!(1, 2, 3, 4); x }, &'static [$Ty; 2]);
+
+ compare_evaluation!({ let [_, _, x @ .., _, _] = $arr!(1, 2, 3, 4); x }, [$Ty; 0]);
+ compare_evaluation!(
+ { let [_, _, ref x @ .., _, _] = $arr!(1, 2, 3, 4); x },
+ &'static [$Ty; 0],
+ );
+ compare_evaluation!(
+ { let [_, _, x @ .., _, _] = &$arr!(1, 2, 3, 4); x },
+ &'static [$Ty; 0],
+ );
+
+ compare_evaluation!({ let [_, .., x] = $arr!(1, 2, 3, 4); x }, $Ty);
+ compare_evaluation!({ let [_, .., ref x] = $arr!(1, 2, 3, 4); x }, &'static $Ty);
+ compare_evaluation!({ let [_, _y @ .., x] = &$arr!(1, 2, 3, 4); x }, &'static $Ty);
+ }
+
+ compare_evaluation!({ let [_, .., N(x)] = n!(1, 2, 3, 4); x }, u8);
+ compare_evaluation!({ let [_, .., N(ref x)] = n!(1, 2, 3, 4); x }, &'static u8);
+ compare_evaluation!({ let [_, .., N(x)] = &n!(1, 2, 3, 4); x }, &'static u8);
+
+ compare_evaluation!({ let [N(x), .., _] = n!(1, 2, 3, 4); x }, u8);
+ compare_evaluation!({ let [N(ref x), .., _] = n!(1, 2, 3, 4); x }, &'static u8);
+ compare_evaluation!({ let [N(x), .., _] = &n!(1, 2, 3, 4); x }, &'static u8);
+}
diff --git a/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs b/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs
new file mode 100644
index 000000000..93bab17e4
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/example/track-caller-attribute.rs
@@ -0,0 +1,40 @@
+// Based on https://github.com/anp/rust/blob/175631311716d7dfeceec40d2587cde7142ffa8c/src/test/ui/rfc-2091-track-caller/track-caller-attribute.rs
+
+// run-pass
+
+use std::panic::Location;
+
+#[track_caller]
+fn tracked() -> &'static Location<'static> {
+ Location::caller()
+}
+
+fn nested_intrinsic() -> &'static Location<'static> {
+ Location::caller()
+}
+
+fn nested_tracked() -> &'static Location<'static> {
+ tracked()
+}
+
+fn main() {
+ let location = Location::caller();
+ assert_eq!(location.file(), file!());
+ assert_eq!(location.line(), 21);
+ assert_eq!(location.column(), 20);
+
+ let tracked = tracked();
+ assert_eq!(tracked.file(), file!());
+ assert_eq!(tracked.line(), 26);
+ assert_eq!(tracked.column(), 19);
+
+ let nested = nested_intrinsic();
+ assert_eq!(nested.file(), file!());
+ assert_eq!(nested.line(), 13);
+ assert_eq!(nested.column(), 5);
+
+ let contained = nested_tracked();
+ assert_eq!(contained.file(), file!());
+ assert_eq!(contained.line(), 17);
+ assert_eq!(contained.column(), 5);
+}
diff --git a/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch
new file mode 100644
index 000000000..301b3f9bd
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch
@@ -0,0 +1,63 @@
+From f6befc4bb51d84f5f1cf35938a168c953d421350 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:10:23 +0100
+Subject: [PATCH] [core] Disable not compiling tests
+
+---
+ library/core/tests/Cargo.toml | 8 ++++++++
+ library/core/tests/num/flt2dec/mod.rs | 1 -
+ library/core/tests/num/int_macros.rs | 2 ++
+ library/core/tests/num/uint_macros.rs | 2 ++
+ library/core/tests/ptr.rs | 2 ++
+ library/core/tests/slice.rs | 2 ++
+ 6 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 library/core/tests/Cargo.toml
+
+diff --git a/library/core/tests/Cargo.toml b/library/core/tests/Cargo.toml
+new file mode 100644
+index 0000000..46fd999
+--- /dev/null
++++ b/library/core/tests/Cargo.toml
+@@ -0,0 +1,8 @@
++[package]
++name = "core"
++version = "0.0.0"
++edition = "2021"
++
++[lib]
++name = "coretests"
++path = "lib.rs"
+diff --git a/library/core/tests/num/flt2dec/mod.rs b/library/core/tests/num/flt2dec/mod.rs
+index a35897e..f0bf645 100644
+--- a/library/core/tests/num/flt2dec/mod.rs
++++ b/library/core/tests/num/flt2dec/mod.rs
+@@ -13,7 +13,6 @@ mod strategy {
+ mod dragon;
+ mod grisu;
+ }
+-mod random;
+
+ pub fn decode_finite<T: DecodableFloat>(v: T) -> Decoded {
+ match decode(v).1 {
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 6609bc3..241b497 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -1209,6 +1209,7 @@ fn brute_force_rotate_test_1() {
+ }
+ }
+
++/*
+ #[test]
+ #[cfg(not(target_arch = "wasm32"))]
+ fn sort_unstable() {
+@@ -1394,6 +1395,7 @@ fn partition_at_index() {
+ v.select_nth_unstable(0);
+ assert!(v == [0xDEADBEEF]);
+ }
++*/
+
+ #[test]
+ #[should_panic(expected = "index 0 greater than length of slice")]
+--
+2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch b/compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch
new file mode 100644
index 000000000..ee5ba449f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/patches/0023-core-Ignore-failing-tests.patch
@@ -0,0 +1,49 @@
+From dd82e95c9de212524e14fc60155de1ae40156dfc Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Sun, 24 Nov 2019 15:34:06 +0100
+Subject: [PATCH] [core] Ignore failing tests
+
+---
+ library/core/tests/iter.rs | 4 ++++
+ library/core/tests/num/bignum.rs | 10 ++++++++++
+ library/core/tests/num/mod.rs | 5 +++--
+ library/core/tests/time.rs | 1 +
+ 4 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/library/core/tests/array.rs b/library/core/tests/array.rs
+index 4bc44e9..8e3c7a4 100644
+--- a/library/core/tests/array.rs
++++ b/library/core/tests/array.rs
+@@ -242,6 +242,7 @@ fn iterator_drops() {
+ assert_eq!(i.get(), 5);
+ }
+
++/*
+ // This test does not work on targets without panic=unwind support.
+ // To work around this problem, test is marked is should_panic, so it will
+ // be automagically skipped on unsuitable targets, such as
+@@ -283,6 +284,7 @@ fn array_default_impl_avoids_leaks_on_panic() {
+ assert_eq!(COUNTER.load(Relaxed), 0);
+ panic!("test succeeded")
+ }
++*/
+
+ #[test]
+ fn empty_array_is_always_default() {
+@@ -304,6 +304,7 @@ fn array_map() {
+ assert_eq!(b, [1, 2, 3]);
+ }
+
++/*
+ // See note on above test for why `should_panic` is used.
+ #[test]
+ #[should_panic(expected = "test succeeded")]
+@@ -332,6 +333,7 @@ fn array_map_drop_safety() {
+ assert_eq!(DROPPED.load(Ordering::SeqCst), num_to_create);
+ panic!("test succeeded")
+ }
++*/
+
+ #[test]
+ fn cell_allows_array_cycle() {
+-- 2.21.0 (Apple Git-122)
diff --git a/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch
new file mode 100644
index 000000000..d5fa1cec0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/patches/0024-core-Disable-portable-simd-test.patch
@@ -0,0 +1,29 @@
+From b1ae000f6da1abd3b8e9b80c40bc11c89b8ae93c Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Thu, 30 Dec 2021 16:54:40 +0100
+Subject: [PATCH] [core] Disable portable-simd test
+
+---
+ library/core/tests/lib.rs | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs
+index 06c7be0..359e2e7 100644
+--- a/library/core/tests/lib.rs
++++ b/library/core/tests/lib.rs
+@@ -75,7 +75,6 @@
+ #![feature(never_type)]
+ #![feature(unwrap_infallible)]
+ #![feature(result_into_ok_or_err)]
+-#![feature(portable_simd)]
+ #![feature(ptr_metadata)]
+ #![feature(once_cell)]
+ #![feature(option_result_contains)]
+@@ -127,7 +126,6 @@ mod pin;
+ mod pin_macro;
+ mod ptr;
+ mod result;
+-mod simd;
+ mod slice;
+ mod str;
+ mod str_lossy;
diff --git a/compiler/rustc_codegen_gcc/patches/0028-core-Disable-long-running-tests.patch b/compiler/rustc_codegen_gcc/patches/0028-core-Disable-long-running-tests.patch
new file mode 100644
index 000000000..dc1beae6d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/patches/0028-core-Disable-long-running-tests.patch
@@ -0,0 +1,32 @@
+From eb703e627e7a84f1cd8d0d87f0f69da1f0acf765 Mon Sep 17 00:00:00 2001
+From: bjorn3 <bjorn3@users.noreply.github.com>
+Date: Fri, 3 Dec 2021 12:16:30 +0100
+Subject: [PATCH] Disable long running tests
+
+---
+ library/core/tests/slice.rs | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/library/core/tests/slice.rs b/library/core/tests/slice.rs
+index 8402833..84592e0 100644
+--- a/library/core/tests/slice.rs
++++ b/library/core/tests/slice.rs
+@@ -2462,6 +2462,7 @@ take_tests! {
+ #[cfg(not(miri))] // unused in Miri
+ const EMPTY_MAX: &'static [()] = &[(); usize::MAX];
+
++/*
+ // can't be a constant due to const mutability rules
+ #[cfg(not(miri))] // unused in Miri
+ macro_rules! empty_max_mut {
+@@ -2485,6 +2486,7 @@ take_tests! {
+ (take_mut_oob_max_range_to_inclusive, (..=usize::MAX), None, empty_max_mut!()),
+ (take_mut_in_bounds_max_range_from, (usize::MAX..), Some(&mut [] as _), empty_max_mut!()),
+ }
++*/
+
+ #[test]
+ fn test_slice_from_ptr_range() {
+--
+2.26.2.7.g19db9cfb68
+
diff --git a/compiler/rustc_codegen_gcc/prepare.sh b/compiler/rustc_codegen_gcc/prepare.sh
new file mode 100755
index 000000000..e98f24c6e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/prepare.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+set -e
+set -v
+
+source prepare_build.sh
+
+cargo install hyperfine || echo "Skipping hyperfine install"
+
+git clone https://github.com/rust-random/rand.git || echo "rust-random/rand has already been cloned"
+pushd rand
+git checkout -- .
+git checkout 0f933f9c7176e53b2a3c7952ded484e1783f0bf1
+git am ../crate_patches/*-rand-*.patch
+popd
+
+git clone https://github.com/rust-lang/regex.git || echo "rust-lang/regex has already been cloned"
+pushd regex
+git checkout -- .
+git checkout 341f207c1071f7290e3f228c710817c280c8dca1
+popd
+
+git clone https://github.com/ebobby/simple-raytracer || echo "ebobby/simple-raytracer has already been cloned"
+pushd simple-raytracer
+git checkout -- .
+git checkout 804a7a21b9e673a482797aa289a18ed480e4d813
+
+# build with cg_llvm for perf comparison
+cargo build
+mv target/debug/main raytracer_cg_llvm
+popd
diff --git a/compiler/rustc_codegen_gcc/prepare_build.sh b/compiler/rustc_codegen_gcc/prepare_build.sh
new file mode 100755
index 000000000..8194360da
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/prepare_build.sh
@@ -0,0 +1,5 @@
+#!/usr/bin/env bash
+set -e
+set -v
+
+./build_sysroot/prepare_sysroot_src.sh
diff --git a/compiler/rustc_codegen_gcc/rust-toolchain b/compiler/rustc_codegen_gcc/rust-toolchain
new file mode 100644
index 000000000..b20aeb979
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/rust-toolchain
@@ -0,0 +1,3 @@
+[toolchain]
+channel = "nightly-2022-06-06"
+components = ["rust-src", "rustc-dev", "llvm-tools-preview"]
diff --git a/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch b/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch
new file mode 100644
index 000000000..59143eac3
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/rustc_patches/compile_test.patch
@@ -0,0 +1,14 @@
+diff --git a/src/tools/compiletest/src/header.rs b/src/tools/compiletest/src/header.rs
+index 887d27fd6dca4..2c2239f2b83d1 100644
+--- a/src/tools/compiletest/src/header.rs
++++ b/src/tools/compiletest/src/header.rs
+@@ -806,8 +806,8 @@ pub fn make_test_description<R: Read>(
+ cfg: Option<&str>,
+ ) -> test::TestDesc {
+ let mut ignore = false;
+ #[cfg(not(bootstrap))]
+- let ignore_message: Option<String> = None;
++ let ignore_message: Option<&str> = None;
+ let mut should_fail = false;
+
+ let rustc_has_profiler_support = env::var_os("RUSTC_PROFILER_SUPPORT").is_some();
diff --git a/compiler/rustc_codegen_gcc/rustup.sh b/compiler/rustc_codegen_gcc/rustup.sh
new file mode 100755
index 000000000..041079bc9
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/rustup.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+
+set -e
+
+case $1 in
+ "prepare")
+ TOOLCHAIN=$(date +%Y-%m-%d)
+
+ echo "=> Installing new nightly"
+ rustup toolchain install --profile minimal nightly-${TOOLCHAIN} # Sanity check to see if the nightly exists
+ echo nightly-${TOOLCHAIN} > rust-toolchain
+
+ echo "=> Uninstalling all old nightlies"
+ for nightly in $(rustup toolchain list | grep nightly | grep -v $TOOLCHAIN | grep -v nightly-x86_64); do
+ rustup toolchain uninstall $nightly
+ done
+
+ ./clean_all.sh
+ ./prepare.sh
+ ;;
+ "commit")
+ git add rust-toolchain
+ git commit -m "Rustup to $(rustc -V)"
+ ;;
+ *)
+ echo "Unknown command '$1'"
+ echo "Usage: ./rustup.sh prepare|commit"
+ ;;
+esac
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
new file mode 100644
index 000000000..0ed3e1fbe
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -0,0 +1,179 @@
+use gccjit::{ToLValue, ToRValue, Type};
+use rustc_codegen_ssa::traits::{AbiBuilderMethods, BaseTypeMethods};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::bug;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::{CastTarget, FnAbi, PassMode, Reg, RegKind};
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::intrinsic::ArgAbiExt;
+use crate::type_of::LayoutGccExt;
+
+impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn apply_attrs_callsite(&mut self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _callsite: Self::Value) {
+ // TODO(antoyo)
+ }
+
+ fn get_param(&mut self, index: usize) -> Self::Value {
+ let func = self.current_func();
+ let param = func.get_param(index as i32);
+ let on_stack =
+ if let Some(on_stack_param_indices) = self.on_stack_function_params.borrow().get(&func) {
+ on_stack_param_indices.contains(&index)
+ }
+ else {
+ false
+ };
+ if on_stack {
+ param.to_lvalue().get_address(None)
+ }
+ else {
+ param.to_rvalue()
+ }
+ }
+}
+
+impl GccType for CastTarget {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+ let rest_gcc_unit = self.rest.unit.gcc_type(cx);
+ let (rest_count, rem_bytes) =
+ if self.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ }
+ else {
+ (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes())
+ };
+
+ if self.prefix.iter().all(|x| x.is_none()) {
+ // Simplify to a single unit when there is no prefix and size <= unit size
+ if self.rest.total <= self.rest.unit.size {
+ return rest_gcc_unit;
+ }
+
+ // Simplify to array when all chunks are the same size and type
+ if rem_bytes == 0 {
+ return cx.type_array(rest_gcc_unit, rest_count);
+ }
+ }
+
+ // Create list of fields in the main structure
+ let mut args: Vec<_> = self
+ .prefix
+ .iter()
+ .flat_map(|option_reg| {
+ option_reg.map(|reg| reg.gcc_type(cx))
+ })
+ .chain((0..rest_count).map(|_| rest_gcc_unit))
+ .collect();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(self.rest.unit.kind, RegKind::Integer);
+ args.push(cx.type_ix(rem_bytes * 8));
+ }
+
+ cx.type_struct(&args, false)
+ }
+}
+
+pub trait GccType {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc>;
+}
+
+impl GccType for Reg {
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
+ match self.kind {
+ RegKind::Integer => cx.type_ix(self.size.bits()),
+ RegKind::Float => {
+ match self.size.bits() {
+ 32 => cx.type_f32(),
+ 64 => cx.type_f64(),
+ _ => bug!("unsupported float: {:?}", self),
+ }
+ },
+ RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()),
+ }
+ }
+}
+
+pub trait FnAbiGccExt<'gcc, 'tcx> {
+ // TODO(antoyo): return a function pointer type instead?
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>);
+ fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+}
+
+impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+ fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> (Type<'gcc>, Vec<Type<'gcc>>, bool, FxHashSet<usize>) {
+ let mut on_stack_param_indices = FxHashSet::default();
+ let args_capacity: usize = self.args.iter().map(|arg|
+ if arg.pad.is_some() {
+ 1
+ }
+ else {
+ 0
+ } +
+ if let PassMode::Pair(_, _) = arg.mode {
+ 2
+ } else {
+ 1
+ }
+ ).sum();
+ let mut argument_tys = Vec::with_capacity(
+ if let PassMode::Indirect { .. } = self.ret.mode {
+ 1
+ }
+ else {
+ 0
+ } + args_capacity,
+ );
+
+ let return_ty =
+ match self.ret.mode {
+ PassMode::Ignore => cx.type_void(),
+ PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
+ PassMode::Cast(cast) => cast.gcc_type(cx),
+ PassMode::Indirect { .. } => {
+ argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+ cx.type_void()
+ }
+ };
+
+ for arg in &self.args {
+ // add padding
+ if let Some(ty) = arg.pad {
+ argument_tys.push(ty.gcc_type(cx));
+ }
+
+ let arg_ty = match arg.mode {
+ PassMode::Ignore => continue,
+ PassMode::Direct(_) => arg.layout.immediate_gcc_type(cx),
+ PassMode::Pair(..) => {
+ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 0, true));
+ argument_tys.push(arg.layout.scalar_pair_element_gcc_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Indirect { extra_attrs: Some(_), .. } => {
+ unimplemented!();
+ }
+ PassMode::Cast(cast) => cast.gcc_type(cx),
+ PassMode::Indirect { extra_attrs: None, on_stack: true, .. } => {
+ on_stack_param_indices.insert(argument_tys.len());
+ arg.memory_ty(cx)
+ },
+ PassMode::Indirect { extra_attrs: None, on_stack: false, .. } => cx.type_ptr_to(arg.memory_ty(cx)),
+ };
+ argument_tys.push(arg_ty);
+ }
+
+ (return_ty, argument_tys, self.c_variadic, on_stack_param_indices)
+ }
+
+ fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ let (return_type, params, variadic, on_stack_param_indices) = self.gcc_type(cx);
+ let pointer_type = cx.context.new_function_pointer_type(None, return_type, &params, variadic);
+ cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices);
+ pointer_type
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs
new file mode 100644
index 000000000..58efb81e8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/allocator.rs
@@ -0,0 +1,123 @@
+use gccjit::{FunctionType, GlobalKind, ToRValue};
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::OomStrategy;
+use rustc_span::symbol::sym;
+
+use crate::GccContext;
+
+pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) {
+ let context = &mods.context;
+ let usize =
+ match tcx.sess.target.pointer_width {
+ 16 => context.new_type::<u16>(),
+ 32 => context.new_type::<u32>(),
+ 64 => context.new_type::<u64>(),
+ tws => bug!("Unsupported target word size for int: {}", tws),
+ };
+ let i8 = context.new_type::<i8>();
+ let i8p = i8.make_pointer();
+ let void = context.new_type::<()>();
+
+ for method in ALLOCATOR_METHODS {
+ let mut types = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ types.push(usize);
+ types.push(usize);
+ }
+ AllocatorTy::Ptr => types.push(i8p),
+ AllocatorTy::Usize => types.push(usize),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(i8p),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+ let name = format!("__rust_{}", method.name);
+
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, name, false);
+
+ if tcx.sess.target.options.default_hidden_visibility {
+ // TODO(antoyo): set visibility.
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ // TODO(antoyo): emit unwind tables.
+ }
+
+ let callee = kind.fn_name(method.name);
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, callee, false);
+ // TODO(antoyo): set visibility.
+
+ let block = func.new_block("entry");
+
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+ .collect::<Vec<_>>();
+ let ret = context.new_call(None, callee, &args);
+ //llvm::LLVMSetTailCall(ret, True);
+ if output.is_some() {
+ block.end_with_return(None, ret);
+ }
+ else {
+ block.end_with_void_return(None);
+ }
+
+ // TODO(@Commeownist): Check if we need to emit some extra debugging info in certain circumstances
+ // as described in https://github.com/rust-lang/rust/commit/77a96ed5646f7c3ee8897693decc4626fe380643
+ }
+
+ let types = [usize, usize];
+ let name = "__rust_alloc_error_handler".to_string();
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let func = context.new_function(None, FunctionType::Exported, void, &args, name, false);
+
+ let kind =
+ if has_alloc_error_handler {
+ AllocatorKind::Global
+ }
+ else {
+ AllocatorKind::Default
+ };
+ let callee = kind.fn_name(sym::oom);
+ let args: Vec<_> = types.iter().enumerate()
+ .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+ .collect();
+ let callee = context.new_function(None, FunctionType::Extern, void, &args, callee, false);
+ //llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let block = func.new_block("entry");
+
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| func.get_param(i as i32).to_rvalue())
+ .collect::<Vec<_>>();
+ let _ret = context.new_call(None, callee, &args);
+ //llvm::LLVMSetTailCall(ret, True);
+ block.end_with_void_return(None);
+
+ let name = OomStrategy::SYMBOL.to_string();
+ let global = context.new_global(None, GlobalKind::Exported, i8, name);
+ let value = tcx.sess.opts.unstable_opts.oom.should_panic();
+ let value = context.new_rvalue_from_int(i8, value as i32);
+ global.global_set_initializer_rvalue(value);
+}
diff --git a/compiler/rustc_codegen_gcc/src/archive.rs b/compiler/rustc_codegen_gcc/src/archive.rs
new file mode 100644
index 000000000..f863abdcc
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/archive.rs
@@ -0,0 +1,189 @@
+use std::fs::File;
+use std::path::{Path, PathBuf};
+
+use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
+use rustc_session::Session;
+
+use rustc_session::cstore::DllImport;
+
+struct ArchiveConfig<'a> {
+ sess: &'a Session,
+ use_native_ar: bool,
+ use_gnu_style_archive: bool,
+}
+
+#[derive(Debug)]
+enum ArchiveEntry {
+ FromArchive {
+ archive_index: usize,
+ entry_index: usize,
+ },
+ File(PathBuf),
+}
+
+pub struct ArArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for ArArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
+ let config = ArchiveConfig {
+ sess,
+ use_native_ar: false,
+ // FIXME test for linux and System V derivatives instead
+ use_gnu_style_archive: sess.target.options.archive_format == "gnu",
+ };
+
+ Box::new(ArArchiveBuilder {
+ config,
+ src_archives: vec![],
+ entries: vec![],
+ })
+ }
+
+ fn create_dll_import_lib(
+ &self,
+ _sess: &Session,
+ _lib_name: &str,
+ _dll_imports: &[DllImport],
+ _tmpdir: &Path,
+ ) -> PathBuf {
+ unimplemented!();
+ }
+}
+
+pub struct ArArchiveBuilder<'a> {
+ config: ArchiveConfig<'a>,
+ src_archives: Vec<(PathBuf, ar::Archive<File>)>,
+ // Don't use `HashMap` here, as the order is important. `rust.metadata.bin` must always be at
+ // the end of an archive for linkers to not get confused.
+ entries: Vec<(String, ArchiveEntry)>,
+}
+
+impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
+ fn add_file(&mut self, file: &Path) {
+ self.entries.push((
+ file.file_name().unwrap().to_str().unwrap().to_string(),
+ ArchiveEntry::File(file.to_owned()),
+ ));
+ }
+
+ fn add_archive(
+ &mut self,
+ archive_path: &Path,
+ mut skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> std::io::Result<()> {
+ let mut archive = ar::Archive::new(std::fs::File::open(&archive_path)?);
+ let archive_index = self.src_archives.len();
+
+ let mut i = 0;
+ while let Some(entry) = archive.next_entry() {
+ let entry = entry?;
+ let file_name = String::from_utf8(entry.header().identifier().to_vec())
+ .map_err(|err| std::io::Error::new(std::io::ErrorKind::InvalidData, err))?;
+ if !skip(&file_name) {
+ self.entries
+ .push((file_name, ArchiveEntry::FromArchive { archive_index, entry_index: i }));
+ }
+ i += 1;
+ }
+
+ self.src_archives.push((archive_path.to_owned(), archive));
+ Ok(())
+ }
+
+ fn build(mut self: Box<Self>, output: &Path) -> bool {
+ use std::process::Command;
+
+ fn add_file_using_ar(archive: &Path, file: &Path) {
+ Command::new("ar")
+ .arg("r") // add or replace file
+ .arg("-c") // silence created file message
+ .arg(archive)
+ .arg(&file)
+ .status()
+ .unwrap();
+ }
+
+ enum BuilderKind<'a> {
+ Bsd(ar::Builder<File>),
+ Gnu(ar::GnuBuilder<File>),
+ NativeAr(&'a Path),
+ }
+
+ let mut builder = if self.config.use_native_ar {
+ BuilderKind::NativeAr(output)
+ } else if self.config.use_gnu_style_archive {
+ BuilderKind::Gnu(ar::GnuBuilder::new(
+ File::create(output).unwrap(),
+ self.entries
+ .iter()
+ .map(|(name, _)| name.as_bytes().to_vec())
+ .collect(),
+ ))
+ } else {
+ BuilderKind::Bsd(ar::Builder::new(File::create(output).unwrap()))
+ };
+
+ let any_members = !self.entries.is_empty();
+
+ // Add all files
+ for (entry_name, entry) in self.entries.into_iter() {
+ match entry {
+ ArchiveEntry::FromArchive {
+ archive_index,
+ entry_index,
+ } => {
+ let (ref src_archive_path, ref mut src_archive) =
+ self.src_archives[archive_index];
+ let entry = src_archive.jump_to_entry(entry_index).unwrap();
+ let header = entry.header().clone();
+
+ match builder {
+ BuilderKind::Bsd(ref mut builder) => {
+ builder.append(&header, entry).unwrap()
+ }
+ BuilderKind::Gnu(ref mut builder) => {
+ builder.append(&header, entry).unwrap()
+ }
+ BuilderKind::NativeAr(archive_file) => {
+ Command::new("ar")
+ .arg("x")
+ .arg(src_archive_path)
+ .arg(&entry_name)
+ .status()
+ .unwrap();
+ add_file_using_ar(archive_file, Path::new(&entry_name));
+ std::fs::remove_file(entry_name).unwrap();
+ }
+ }
+ }
+ ArchiveEntry::File(file) =>
+ match builder {
+ BuilderKind::Bsd(ref mut builder) => {
+ builder
+ .append_file(entry_name.as_bytes(), &mut File::open(file).expect("file for bsd builder"))
+ .unwrap()
+ },
+ BuilderKind::Gnu(ref mut builder) => {
+ builder
+ .append_file(entry_name.as_bytes(), &mut File::open(&file).expect(&format!("file {:?} for gnu builder", file)))
+ .unwrap()
+ },
+ BuilderKind::NativeAr(archive_file) => add_file_using_ar(archive_file, &file),
+ },
+ }
+ }
+
+ // Finalize archive
+ std::mem::drop(builder);
+
+ // Run ranlib to be able to link the archive
+ let status =
+ std::process::Command::new("ranlib").arg(output).status().expect("Couldn't run ranlib");
+
+ if !status.success() {
+ self.config.sess.fatal(&format!("Ranlib exited with code {:?}", status.code()));
+ }
+
+ any_members
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
new file mode 100644
index 000000000..52fd66af0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -0,0 +1,817 @@
+use gccjit::{LValue, RValue, ToRValue, Type};
+use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
+
+use rustc_middle::{bug, ty::Instance};
+use rustc_span::Span;
+use rustc_target::asm::*;
+
+use std::borrow::Cow;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+use crate::callee::get_fn;
+
+
+// Rust asm! and GCC Extended Asm semantics differ substantially.
+//
+// 1. Rust asm operands go along as one list of operands. Operands themselves indicate
+// if they're "in" or "out". "In" and "out" operands can interleave. One operand can be
+// both "in" and "out" (`inout(reg)`).
+//
+// GCC asm has two different lists for "in" and "out" operands. In terms of gccjit,
+// this means that all "out" operands must go before "in" operands. "In" and "out" operands
+// cannot interleave.
+//
+// 2. Operand lists in both Rust and GCC are indexed. Index starts from 0. Indexes are important
+// because the asm template refers to operands by index.
+//
+// Mapping from Rust to GCC index would be 1-1 if it wasn't for...
+//
+// 3. Clobbers. GCC has a separate list of clobbers, and clobbers don't have indexes.
+// Contrary, Rust expresses clobbers through "out" operands that aren't tied to
+// a variable (`_`), and such "clobbers" do have index.
+//
+// 4. Furthermore, GCC Extended Asm does not support explicit register constraints
+// (like `out("eax")`) directly, offering so-called "local register variables"
+// as a workaround. These variables need to be declared and initialized *before*
+// the Extended Asm block but *after* normal local variables
+// (see comment in `codegen_inline_asm` for explanation).
+//
+// With that in mind, let's see how we translate Rust syntax to GCC
+// (from now on, `CC` stands for "constraint code"):
+//
+// * `out(reg_class) var` -> translated to output operand: `"=CC"(var)`
+// * `inout(reg_class) var` -> translated to output operand: `"+CC"(var)`
+// * `in(reg_class) var` -> translated to input operand: `"CC"(var)`
+//
+// * `out(reg_class) _` -> translated to one `=r(tmp)`, where "tmp" is a temporary unused variable
+//
+// * `out("explicit register") _` -> not translated to any operands, register is simply added to clobbers list
+//
+// * `inout(reg_class) in_var => out_var` -> translated to two operands:
+// output: `"=CC"(in_var)`
+// input: `"num"(out_var)` where num is the GCC index
+// of the corresponding output operand
+//
+// * `inout(reg_class) in_var => _` -> same as `inout(reg_class) in_var => tmp`,
+// where "tmp" is a temporary unused variable
+//
+// * `out/in/inout("explicit register") var` -> translated to one or two operands as described above
+// with `"r"(var)` constraint,
+// and one register variable assigned to the desired register.
+
+const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t";
+const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix";
+
+
+struct AsmOutOperand<'a, 'tcx, 'gcc> {
+ rust_idx: usize,
+ constraint: &'a str,
+ late: bool,
+ readwrite: bool,
+
+ tmp_var: LValue<'gcc>,
+ out_place: Option<PlaceRef<'tcx, RValue<'gcc>>>
+}
+
+struct AsmInOperand<'a, 'tcx> {
+ rust_idx: usize,
+ constraint: Cow<'a, str>,
+ val: RValue<'tcx>
+}
+
+impl AsmOutOperand<'_, '_, '_> {
+ fn to_constraint(&self) -> String {
+ let mut res = String::with_capacity(self.constraint.len() + self.late as usize + 1);
+
+ let sign = if self.readwrite { '+' } else { '=' };
+ res.push(sign);
+ if !self.late {
+ res.push('&');
+ }
+
+ res.push_str(&self.constraint);
+ res
+ }
+}
+
+enum ConstraintOrRegister {
+ Constraint(&'static str),
+ Register(&'static str)
+}
+
+
+impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) {
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ self.sess()
+ .struct_span_err(span[0], "GCC backend does not support unwinding from inline asm")
+ .emit();
+ return;
+ }
+
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+ let is_x86 = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
+ let att_dialect = is_x86 && options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+ // GCC index of an output operand equals its position in the array
+ let mut outputs = vec![];
+
+ // GCC index of an input operand equals its position in the array
+ // added to `outputs.len()`
+ let mut inputs = vec![];
+
+ // Clobbers collected from `out("explicit register") _` and `inout("expl_reg") var => _`
+ let mut clobbers = vec![];
+
+ // We're trying to preallocate space for the template
+ let mut constants_len = 0;
+
+ // There are rules we must adhere to if we want GCC to do the right thing:
+ //
+ // * Every local variable that the asm block uses as an output must be declared *before*
+ // the asm block.
+ // * There must be no instructions whatsoever between the register variables and the asm.
+ //
+ // Therefore, the backend must generate the instructions strictly in this order:
+ //
+ // 1. Output variables.
+ // 2. Register variables.
+ // 3. The asm block.
+ //
+ // We also must make sure that no input operands are emitted before output operands.
+ //
+ // This is why we work in passes, first emitting local vars, then local register vars.
+ // Also, we don't emit any asm operands immediately; we save them to
+ // the one of the buffers to be emitted later.
+
+ // 1. Normal variables (and saving operands to buffers).
+ for (rust_idx, op) in rust_operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::Out { reg, late, place } => {
+ use ConstraintOrRegister::*;
+
+ let (constraint, ty) = match (reg_to_gcc(reg), place) {
+ (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx, false)),
+ // When `reg` is a class and not an explicit register but the out place is not specified,
+ // we need to create an unused output variable to assign the output to. This var
+ // needs to be of a type that's "compatible" with the register class, but specific type
+ // doesn't matter.
+ (Constraint(constraint), None) => (constraint, dummy_output_type(self.cx, reg.reg_class())),
+ (Register(_), Some(_)) => {
+ // left for the next pass
+ continue
+ },
+ (Register(reg_name), None) => {
+ // `clobber_abi` can add lots of clobbers that are not supported by the target,
+ // such as AVX-512 registers, so we just ignore unsupported registers
+ let is_target_supported = reg.reg_class().supported_types(asm_arch).iter()
+ .any(|&(_, feature)| {
+ if let Some(feature) = feature {
+ self.tcx.sess.target_features.contains(&feature)
+ } else {
+ true // Register class is unconditionally supported
+ }
+ });
+
+ if is_target_supported && !clobbers.contains(&reg_name) {
+ clobbers.push(reg_name);
+ }
+ continue
+ }
+ };
+
+ let tmp_var = self.current_func().new_local(None, ty, "output_register");
+ outputs.push(AsmOutOperand {
+ constraint,
+ rust_idx,
+ late,
+ readwrite: false,
+ tmp_var,
+ out_place: place
+ });
+ }
+
+ InlineAsmOperandRef::In { reg, value } => {
+ if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
+ inputs.push(AsmInOperand {
+ constraint: Cow::Borrowed(constraint),
+ rust_idx,
+ val: value.immediate()
+ });
+ }
+ else {
+ // left for the next pass
+ continue
+ }
+ }
+
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+ let constraint = if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
+ constraint
+ }
+ else {
+ // left for the next pass
+ continue
+ };
+
+ // Rustc frontend guarantees that input and output types are "compatible",
+ // so we can just use input var's type for the output variable.
+ //
+ // This decision is also backed by the fact that LLVM needs in and out
+ // values to be of *exactly the same type*, not just "compatible".
+ // I'm not sure if GCC is so picky too, but better safe than sorry.
+ let ty = in_value.layout.gcc_type(self.cx, false);
+ let tmp_var = self.current_func().new_local(None, ty, "output_register");
+
+ // If the out_place is None (i.e `inout(reg) _` syntax was used), we translate
+ // it to one "readwrite (+) output variable", otherwise we translate it to two
+ // "out and tied in" vars as described above.
+ let readwrite = out_place.is_none();
+ outputs.push(AsmOutOperand {
+ constraint,
+ rust_idx,
+ late,
+ readwrite,
+ tmp_var,
+ out_place,
+ });
+
+ if !readwrite {
+ let out_gcc_idx = outputs.len() - 1;
+ let constraint = Cow::Owned(out_gcc_idx.to_string());
+
+ inputs.push(AsmInOperand {
+ constraint,
+ rust_idx,
+ val: in_value.immediate()
+ });
+ }
+ }
+
+ InlineAsmOperandRef::Const { ref string } => {
+ constants_len += string.len() + att_dialect as usize;
+ }
+
+ InlineAsmOperandRef::SymFn { instance } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O)
+ // or byte count suffixes (x86 Windows).
+ constants_len += self.tcx.symbol_name(instance).name.len();
+ }
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O).
+ constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
+ }
+ }
+ }
+
+ // 2. Register variables.
+ for (rust_idx, op) in rust_operands.iter().enumerate() {
+ match *op {
+ // `out("explicit register") var`
+ InlineAsmOperandRef::Out { reg, late, place } => {
+ if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+ let out_place = if let Some(place) = place {
+ place
+ }
+ else {
+ // processed in the previous pass
+ continue
+ };
+
+ let ty = out_place.layout.gcc_type(self.cx, false);
+ let tmp_var = self.current_func().new_local(None, ty, "output_register");
+ tmp_var.set_register_name(reg_name);
+
+ outputs.push(AsmOutOperand {
+ constraint: "r".into(),
+ rust_idx,
+ late,
+ readwrite: false,
+ tmp_var,
+ out_place: Some(out_place)
+ });
+ }
+
+ // processed in the previous pass
+ }
+
+ // `in("explicit register") var`
+ InlineAsmOperandRef::In { reg, value } => {
+ if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+ let ty = value.layout.gcc_type(self.cx, false);
+ let reg_var = self.current_func().new_local(None, ty, "input_register");
+ reg_var.set_register_name(reg_name);
+ self.llbb().add_assignment(None, reg_var, value.immediate());
+
+ inputs.push(AsmInOperand {
+ constraint: "r".into(),
+ rust_idx,
+ val: reg_var.to_rvalue()
+ });
+ }
+
+ // processed in the previous pass
+ }
+
+ // `inout("explicit register") in_var => out_var`
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+ if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
+ // See explanation in the first pass.
+ let ty = in_value.layout.gcc_type(self.cx, false);
+ let tmp_var = self.current_func().new_local(None, ty, "output_register");
+ tmp_var.set_register_name(reg_name);
+
+ outputs.push(AsmOutOperand {
+ constraint: "r".into(),
+ rust_idx,
+ late,
+ readwrite: false,
+ tmp_var,
+ out_place,
+ });
+
+ let constraint = Cow::Owned((outputs.len() - 1).to_string());
+ inputs.push(AsmInOperand {
+ constraint,
+ rust_idx,
+ val: in_value.immediate()
+ });
+ }
+
+ // processed in the previous pass
+ }
+
+ InlineAsmOperandRef::SymFn { instance } => {
+ inputs.push(AsmInOperand {
+ constraint: "X".into(),
+ rust_idx,
+ val: self.cx.rvalue_as_function(get_fn(self.cx, instance))
+ .get_address(None),
+ });
+ }
+
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ inputs.push(AsmInOperand {
+ constraint: "X".into(),
+ rust_idx,
+ val: self.cx.get_static(def_id).get_address(None),
+ });
+ }
+
+ InlineAsmOperandRef::Const { .. } => {
+ // processed in the previous pass
+ }
+ }
+ }
+
+ // 3. Build the template string
+
+ let mut template_str = String::with_capacity(estimate_template_length(template, constants_len, att_dialect));
+ if att_dialect {
+ template_str.push_str(ATT_SYNTAX_INS);
+ }
+
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref string) => {
+ // TODO(@Commeownist): switch to `Iterator::intersperse` once it's stable
+ let mut iter = string.split('%');
+ if let Some(s) = iter.next() {
+ template_str.push_str(s);
+ }
+
+ for s in iter {
+ template_str.push_str("%%");
+ template_str.push_str(s);
+ }
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ let mut push_to_template = |modifier, gcc_idx| {
+ use std::fmt::Write;
+
+ template_str.push('%');
+ if let Some(modifier) = modifier {
+ template_str.push(modifier);
+ }
+ write!(template_str, "{}", gcc_idx).expect("pushing to string failed");
+ };
+
+ match rust_operands[operand_idx] {
+ InlineAsmOperandRef::Out { reg, .. } => {
+ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+ let gcc_index = outputs.iter()
+ .position(|op| operand_idx == op.rust_idx)
+ .expect("wrong rust index");
+ push_to_template(modifier, gcc_index);
+ }
+
+ InlineAsmOperandRef::In { reg, .. } => {
+ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+ let in_gcc_index = inputs.iter()
+ .position(|op| operand_idx == op.rust_idx)
+ .expect("wrong rust index");
+ let gcc_index = in_gcc_index + outputs.len();
+ push_to_template(modifier, gcc_index);
+ }
+
+ InlineAsmOperandRef::InOut { reg, .. } => {
+ let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
+
+ // The input register is tied to the output, so we can just use the index of the output register
+ let gcc_index = outputs.iter()
+ .position(|op| operand_idx == op.rust_idx)
+ .expect("wrong rust index");
+ push_to_template(modifier, gcc_index);
+ }
+
+ InlineAsmOperandRef::SymFn { instance } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O)
+ // or byte count suffixes (x86 Windows).
+ let name = self.tcx.symbol_name(instance).name;
+ template_str.push_str(name);
+ }
+
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O).
+ let instance = Instance::mono(self.tcx, def_id);
+ let name = self.tcx.symbol_name(instance).name;
+ template_str.push_str(name);
+ }
+
+ InlineAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the template
+ if att_dialect {
+ template_str.push('$');
+ }
+ template_str.push_str(string);
+ }
+ }
+ }
+ }
+ }
+
+ if att_dialect {
+ template_str.push_str(INTEL_SYNTAX_INS);
+ }
+
+ // 4. Generate Extended Asm block
+
+ let block = self.llbb();
+ let extended_asm = block.add_extended_asm(None, &template_str);
+
+ for op in &outputs {
+ extended_asm.add_output_operand(None, &op.to_constraint(), op.tmp_var);
+ }
+
+ for op in &inputs {
+ extended_asm.add_input_operand(None, &op.constraint, op.val);
+ }
+
+ for clobber in clobbers.iter() {
+ extended_asm.add_clobber(clobber);
+ }
+
+ if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+ // TODO(@Commeownist): I'm not 100% sure this one clobber is sufficient
+ // on all architectures. For instance, what about FP stack?
+ extended_asm.add_clobber("cc");
+ }
+ if !options.contains(InlineAsmOptions::NOMEM) {
+ extended_asm.add_clobber("memory");
+ }
+ if !options.contains(InlineAsmOptions::PURE) {
+ extended_asm.set_volatile_flag(true);
+ }
+ if !options.contains(InlineAsmOptions::NOSTACK) {
+ // TODO(@Commeownist): figure out how to align stack
+ }
+ if options.contains(InlineAsmOptions::NORETURN) {
+ let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
+ let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) };
+ self.call(self.type_void(), builtin_unreachable, &[], None);
+ }
+
+ // Write results to outputs.
+ //
+ // We need to do this because:
+ // 1. Turning `PlaceRef` into `RValue` is error-prone and has nasty edge cases
+ // (especially with current `rustc_backend_ssa` API).
+ // 2. Not every output operand has an `out_place`, and it's required by `add_output_operand`.
+ //
+ // Instead, we generate a temporary output variable for each output operand, and then this loop,
+ // generates `out_place = tmp_var;` assignments if out_place exists.
+ for op in &outputs {
+ if let Some(place) = op.out_place {
+ OperandValue::Immediate(op.tmp_var.to_rvalue()).store(self, place);
+ }
+ }
+
+ }
+}
+
+fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize {
+ let len: usize = template.iter().map(|piece| {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref string) => {
+ string.len()
+ }
+ InlineAsmTemplatePiece::Placeholder { .. } => {
+ // '%' + 1 char modifier + 1 char index
+ 3
+ }
+ }
+ })
+ .sum();
+
+ // increase it by 5% to account for possible '%' signs that'll be duplicated
+ // I pulled the number out of blue, but should be fair enough
+ // as the upper bound
+ let mut res = (len as f32 * 1.05) as usize + constants_len;
+
+ if att_dialect {
+ res += INTEL_SYNTAX_INS.len() + ATT_SYNTAX_INS.len();
+ }
+ res
+}
+
+/// Converts a register class to a GCC constraint code.
+fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
+ let constraint = match reg {
+ // For vector registers LLVM wants the register name to match the type size.
+ InlineAsmRegOrRegClass::Reg(reg) => {
+ match reg {
+ InlineAsmReg::X86(_) => {
+ // TODO(antoyo): add support for vector register.
+ //
+ // // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
+ return ConstraintOrRegister::Register(match reg.name() {
+ // Some of registers' names does not map 1-1 from rust to gcc
+ "st(0)" => "st",
+
+ name => name,
+ });
+ }
+
+ _ => unimplemented!(),
+ }
+ },
+ InlineAsmRegOrRegClass::RegClass(reg) => match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => unimplemented!(),
+ InlineAsmRegClass::Avr(_) => unimplemented!(),
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::Msp430(_) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+ | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+ unreachable!("clobber-only")
+ },
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => unimplemented!(),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg,
+ ) => unreachable!("clobber-only"),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("GCC backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+ };
+
+ ConstraintOrRegister::Constraint(constraint)
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegClass) -> Type<'gcc> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => unimplemented!(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)=> cx.type_i32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Avr(_) => unimplemented!(),
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::Msp430(_) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+ | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+ unreachable!("clobber-only")
+ },
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::mmx_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => cx.type_i16(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::tmm_reg) => unimplemented!(),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ },
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
+
+impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span]) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ // Default to Intel syntax on x86
+ let att_dialect = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+ && options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+ // Build the template string
+ let mut template_str = String::new();
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref string) => {
+ for line in string.lines() {
+ // NOTE: gcc does not allow inline comment, so remove them.
+ let line =
+ if let Some(index) = line.rfind("//") {
+ &line[..index]
+ }
+ else {
+ line
+ };
+ template_str.push_str(line);
+ template_str.push('\n');
+ }
+ },
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
+ match operands[operand_idx] {
+ GlobalAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the
+ // template. Note that we don't need to escape %
+ // here unlike normal inline assembly.
+ template_str.push_str(string);
+ }
+
+ GlobalAsmOperandRef::SymFn { instance } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O)
+ // or byte count suffixes (x86 Windows).
+ let name = self.tcx.symbol_name(instance).name;
+ template_str.push_str(name);
+ }
+
+ GlobalAsmOperandRef::SymStatic { def_id } => {
+ // TODO(@Amanieu): Additional mangling is needed on
+ // some targets to add a leading underscore (Mach-O).
+ let instance = Instance::mono(self.tcx, def_id);
+ let name = self.tcx.symbol_name(instance).name;
+ template_str.push_str(name);
+ }
+ }
+ }
+ }
+ }
+
+ let template_str =
+ if att_dialect {
+ format!(".att_syntax\n\t{}\n\t.intel_syntax noprefix", template_str)
+ }
+ else {
+ template_str
+ };
+ // NOTE: seems like gcc will put the asm in the wrong section, so set it to .text manually.
+ let template_str = format!(".pushsection .text\n{}\n.popsection", template_str);
+ self.context.add_top_level_asm(None, &template_str);
+ }
+}
+
+fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option<char>) -> Option<char> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => unimplemented!(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ unimplemented!()
+ }
+ InlineAsmRegClass::Avr(_) => unimplemented!(),
+ InlineAsmRegClass::Bpf(_) => unimplemented!(),
+ InlineAsmRegClass::Hexagon(_) => unimplemented!(),
+ InlineAsmRegClass::Mips(_) => unimplemented!(),
+ InlineAsmRegClass::Msp430(_) => unimplemented!(),
+ InlineAsmRegClass::Nvptx(_) => unimplemented!(),
+ InlineAsmRegClass::PowerPC(_) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+ | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => unimplemented!(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+ None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') },
+ Some('l') => Some('b'),
+ Some('h') => Some('h'),
+ Some('x') => Some('w'),
+ Some('e') => Some('k'),
+ Some('r') => Some('q'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
+ InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+ (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
+ (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
+ (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
+ (_, Some('x')) => Some('x'),
+ (_, Some('y')) => Some('t'),
+ (_, Some('z')) => Some('g'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg0) => None,
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::tmm_reg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => unimplemented!(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ },
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => unimplemented!(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => unimplemented!(),
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/back/mod.rs b/compiler/rustc_codegen_gcc/src/back/mod.rs
new file mode 100644
index 000000000..d692799d7
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/back/mod.rs
@@ -0,0 +1 @@
+pub mod write;
diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs
new file mode 100644
index 000000000..efcf18d31
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/back/write.rs
@@ -0,0 +1,83 @@
+use std::{env, fs};
+
+use gccjit::OutputKind;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::back::write::{CodegenContext, EmitObj, ModuleConfig};
+use rustc_errors::Handler;
+use rustc_session::config::OutputType;
+use rustc_span::fatal_error::FatalError;
+use rustc_target::spec::SplitDebuginfo;
+
+use crate::{GccCodegenBackend, GccContext};
+
+pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
+ {
+ let context = &module.module_llvm.context;
+
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+
+ let _bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+ let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+ if config.bitcode_needed() {
+ // TODO(antoyo)
+ }
+
+ if config.emit_ir {
+ unimplemented!();
+ }
+
+ if config.emit_asm {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
+ let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+ context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
+ }
+
+ match config.emit_obj {
+ EmitObj::ObjectCode(_) => {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
+ if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
+ println!("Module {}", module.name);
+ }
+ if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
+ println!("Dumping reproducer {}", module.name);
+ let _ = fs::create_dir("/tmp/reproducers");
+ // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
+ // transmuting an rvalue to an lvalue.
+ // Segfault is actually in gcc::jit::reproducer::get_identifier_as_lvalue
+ context.dump_reproducer_to_file(&format!("/tmp/reproducers/{}.c", module.name));
+ println!("Dumped reproducer {}", module.name);
+ }
+ if env::var("CG_GCCJIT_DUMP_TO_FILE").as_deref() == Ok("1") {
+ let _ = fs::create_dir("/tmp/gccjit_dumps");
+ let path = &format!("/tmp/gccjit_dumps/{}.c", module.name);
+ context.dump_to_file(path, true);
+ }
+ context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
+ }
+
+ EmitObj::Bitcode => {
+ // TODO(antoyo)
+ }
+
+ EmitObj::None => {}
+ }
+ }
+
+ Ok(module.into_compiled_module(
+ config.emit_obj != EmitObj::None,
+ cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked,
+ config.emit_bc,
+ &cgcx.output_filenames,
+ ))
+}
+
+pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _diag_handler: &Handler, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
+ unimplemented!();
+}
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
new file mode 100644
index 000000000..8f9f6f98f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/base.rs
@@ -0,0 +1,154 @@
+use std::env;
+use std::time::Instant;
+
+use gccjit::{
+ Context,
+ FunctionType,
+ GlobalKind,
+};
+use rustc_middle::dep_graph;
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::mir::mono::Linkage;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::DebugInfoMethods;
+use rustc_session::config::DebugInfo;
+use rustc_span::Symbol;
+
+use crate::GccContext;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind {
+ match linkage {
+ Linkage::External => GlobalKind::Imported,
+ Linkage::AvailableExternally => GlobalKind::Imported,
+ Linkage::LinkOnceAny => unimplemented!(),
+ Linkage::LinkOnceODR => unimplemented!(),
+ Linkage::WeakAny => unimplemented!(),
+ Linkage::WeakODR => unimplemented!(),
+ Linkage::Appending => unimplemented!(),
+ Linkage::Internal => GlobalKind::Internal,
+ Linkage::Private => GlobalKind::Internal,
+ Linkage::ExternalWeak => GlobalKind::Imported, // TODO(antoyo): should be weak linkage.
+ Linkage::Common => unimplemented!(),
+ }
+}
+
+pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
+ match linkage {
+ Linkage::External => FunctionType::Exported,
+ Linkage::AvailableExternally => FunctionType::Extern,
+ Linkage::LinkOnceAny => unimplemented!(),
+ Linkage::LinkOnceODR => unimplemented!(),
+ Linkage::WeakAny => FunctionType::Exported, // FIXME(antoyo): should be similar to linkonce.
+ Linkage::WeakODR => unimplemented!(),
+ Linkage::Appending => unimplemented!(),
+ Linkage::Internal => FunctionType::Internal,
+ Linkage::Private => FunctionType::Internal,
+ Linkage::ExternalWeak => unimplemented!(),
+ Linkage::Common => unimplemented!(),
+ }
+}
+
+pub fn compile_codegen_unit<'tcx>(tcx: TyCtxt<'tcx>, cgu_name: Symbol, supports_128bit_integers: bool) -> (ModuleCodegen<GccContext>, u64) {
+ let prof_timer = tcx.prof.generic_activity("codegen_module");
+ let start_time = Instant::now();
+
+ let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+ let (module, _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ (cgu_name, supports_128bit_integers),
+ module_codegen,
+ Some(dep_graph::hash_result),
+ );
+ let time_to_codegen = start_time.elapsed();
+ drop(prof_timer);
+
+ // We assume that the cost to run GCC on a CGU is proportional to
+ // the time we needed for codegenning it.
+ let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
+
+ fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, supports_128bit_integers): (Symbol, bool)) -> ModuleCodegen<GccContext> {
+ let cgu = tcx.codegen_unit(cgu_name);
+ // Instantiate monomorphizations without filling out definitions yet...
+ //let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
+ let context = Context::default();
+ // TODO(antoyo): only set on x86 platforms.
+ context.add_command_line_option("-masm=intel");
+ // TODO(antoyo): only add the following cli argument if the feature is supported.
+ context.add_command_line_option("-msse2");
+ context.add_command_line_option("-mavx2");
+ context.add_command_line_option("-msha");
+ context.add_command_line_option("-mpclmul");
+ // FIXME(antoyo): the following causes an illegal instruction on vmovdqu64 in std_example on my CPU.
+ // Only add if the CPU supports it.
+ //context.add_command_line_option("-mavx512f");
+ for arg in &tcx.sess.opts.cg.llvm_args {
+ context.add_command_line_option(arg);
+ }
+ // NOTE: This is needed to compile the file src/intrinsic/archs.rs during a bootstrap of rustc.
+ context.add_command_line_option("-fno-var-tracking-assignments");
+ // NOTE: an optimization (https://github.com/rust-lang/rustc_codegen_gcc/issues/53).
+ context.add_command_line_option("-fno-semantic-interposition");
+ // NOTE: Rust relies on LLVM not doing TBAA (https://github.com/rust-lang/unsafe-code-guidelines/issues/292).
+ context.add_command_line_option("-fno-strict-aliasing");
+
+ if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) {
+ context.add_command_line_option("-ffunction-sections");
+ context.add_command_line_option("-fdata-sections");
+ }
+
+ if env::var("CG_GCCJIT_DUMP_CODE").as_deref() == Ok("1") {
+ context.set_dump_code_on_compile(true);
+ }
+ if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
+ context.set_dump_initial_gimple(true);
+ }
+ context.set_debug_info(true);
+ if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
+ context.set_dump_everything(true);
+ }
+ if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
+ context.set_keep_intermediates(true);
+ }
+
+ // TODO(bjorn3): Remove once unwinding is properly implemented
+ context.set_allow_unreachable_blocks(true);
+
+ {
+ let cx = CodegenCx::new(&context, cgu, tcx, supports_128bit_integers);
+
+ let mono_items = cgu.items_in_deterministic_order(tcx);
+ for &(mono_item, (linkage, visibility)) in &mono_items {
+ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+ }
+
+ // ... and now that we have everything pre-defined, fill out those definitions.
+ for &(mono_item, _) in &mono_items {
+ mono_item.define::<Builder<'_, '_, '_>>(&cx);
+ }
+
+ // If this codegen unit contains the main function, also create the
+ // wrapper here
+ maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx);
+
+ // Finalize debuginfo
+ if cx.sess().opts.debuginfo != DebugInfo::None {
+ cx.debuginfo_finalize();
+ }
+ }
+
+ ModuleCodegen {
+ name: cgu_name.to_string(),
+ module_llvm: GccContext {
+ context
+ },
+ kind: ModuleKind::Regular,
+ }
+ }
+
+ (module, cost)
+}
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
new file mode 100644
index 000000000..4d40dd099
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -0,0 +1,1561 @@
+use std::borrow::Cow;
+use std::cell::Cell;
+use std::convert::TryFrom;
+use std::ops::Deref;
+
+use gccjit::{
+ BinaryOp,
+ Block,
+ ComparisonOp,
+ Context,
+ Function,
+ LValue,
+ RValue,
+ ToRValue,
+ Type,
+ UnaryOp,
+};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+ BackendTypes,
+ BaseTypeMethods,
+ BuilderMethods,
+ ConstMethods,
+ DerivedTypeMethods,
+ LayoutTypeMethods,
+ HasCodegen,
+ OverflowOp,
+ StaticBuilderMethods,
+};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{
+ self,
+ call::FnAbi,
+ Align,
+ HasDataLayout,
+ Size,
+ TargetDataLayout,
+ WrappingRange,
+};
+use rustc_target::spec::{HasTargetSpec, Target};
+
+use crate::common::{SignType, TypeReflection, type_is_pointer};
+use crate::context::CodegenCx;
+use crate::intrinsic::llvm;
+use crate::type_of::LayoutGccExt;
+
+// TODO(antoyo)
+type Funclet = ();
+
+// TODO(antoyo): remove this variable.
+static mut RETURN_VALUE_COUNT: usize = 0;
+
+enum ExtremumOperation {
+ Max,
+ Min,
+}
+
+pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
+ pub cx: &'a CodegenCx<'gcc, 'tcx>,
+ pub block: Block<'gcc>,
+ stack_var_count: Cell<usize>,
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
+ Builder {
+ cx,
+ block,
+ stack_var_count: Cell::new(0),
+ }
+ }
+
+ fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+ let size = src.get_type().get_size();
+
+ let func = self.current_func();
+
+ let load_ordering =
+ match order {
+ // TODO(antoyo): does this make sense?
+ AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
+ _ => order,
+ };
+ let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
+ let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
+ let return_value = func.new_local(None, previous_value.get_type(), "return_value");
+ self.llbb().add_assignment(None, previous_var, previous_value);
+ self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
+
+ let while_block = func.new_block("while");
+ let after_block = func.new_block("after_while");
+ self.llbb().end_with_jump(None, while_block);
+
+ // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
+ // state need to be updated.
+ self.switch_to_block(while_block);
+
+ let comparison_operator =
+ match operation {
+ ExtremumOperation::Max => ComparisonOp::LessThan,
+ ExtremumOperation::Min => ComparisonOp::GreaterThan,
+ };
+
+ let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
+ let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
+ let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
+ let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
+
+ while_block.end_with_conditional(None, cond, while_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ return_value.to_rvalue()
+ }
+
+ fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+ let size = src.get_type().get_size();
+ let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
+ let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
+
+ let void_ptr_type = self.context.new_type::<*mut ()>();
+ let volatile_void_ptr_type = void_ptr_type.make_volatile();
+ let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+ let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
+
+ // NOTE: not sure why, but we have the wrong type here.
+ let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
+ let src = self.context.new_cast(None, src, int_type);
+ self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
+ }
+
+ pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
+ self.llbb().add_assignment(None, lvalue, value);
+ }
+
+ fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+ let mut all_args_match = true;
+ let mut param_types = vec![];
+ let param_count = func.get_param_count();
+ for (index, arg) in args.iter().enumerate().take(param_count) {
+ let param = func.get_param(index as i32);
+ let param = param.to_rvalue().get_type();
+ if param != arg.get_type() {
+ all_args_match = false;
+ }
+ param_types.push(param);
+ }
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let casted_args: Vec<_> = param_types
+ .into_iter()
+ .zip(args.iter())
+ .enumerate()
+ .map(|(_i, (expected_ty, &actual_val))| {
+ let actual_ty = actual_val.get_type();
+ if expected_ty != actual_ty {
+ self.bitcast(actual_val, expected_ty)
+ }
+ else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+ let mut all_args_match = true;
+ let mut param_types = vec![];
+ let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
+ for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
+ let param = gcc_func.get_param_type(index);
+ if param != arg.get_type() {
+ all_args_match = false;
+ }
+ param_types.push(param);
+ }
+
+ let mut on_stack_param_indices = FxHashSet::default();
+ if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
+ on_stack_param_indices = indices.clone();
+ }
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let func_name = format!("{:?}", func_ptr);
+
+ let casted_args: Vec<_> = param_types
+ .into_iter()
+ .zip(args.iter())
+ .enumerate()
+ .map(|(index, (expected_ty, &actual_val))| {
+ if llvm::ignore_arg_cast(&func_name, index, args.len()) {
+ return actual_val;
+ }
+
+ let actual_ty = actual_val.get_type();
+ if expected_ty != actual_ty {
+ if !actual_ty.is_vector() && !expected_ty.is_vector() && actual_ty.is_integral() && expected_ty.is_integral() && actual_ty.get_size() != expected_ty.get_size() {
+ self.context.new_cast(None, actual_val, expected_ty)
+ }
+ else if on_stack_param_indices.contains(&index) {
+ actual_val.dereference(None).to_rvalue()
+ }
+ else {
+ assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index);
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ self.bitcast(actual_val, expected_ty)
+ }
+ }
+ else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+ let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
+ let stored_ty = self.cx.val_ty(val);
+ let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
+
+ if dest_ptr_ty == stored_ptr_ty {
+ ptr
+ }
+ else {
+ self.bitcast(ptr, stored_ptr_ty)
+ }
+ }
+
+ pub fn current_func(&self) -> Function<'gcc> {
+ self.block.get_function()
+ }
+
+ fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // TODO(antoyo): remove when the API supports a different type for functions.
+ let func: Function<'gcc> = self.cx.rvalue_as_function(func);
+ let args = self.check_call("call", func, args);
+
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local or call add_eval().
+ let return_type = func.get_return_type();
+ let void_type = self.context.new_type::<()>();
+ let current_func = self.block.get_function();
+ if return_type != void_type {
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+ result.to_rvalue()
+ }
+ else {
+ self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
+ // Return dummy value when not having return value.
+ self.context.new_rvalue_from_long(self.isize_type, 0)
+ }
+ }
+
+ fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ let args = self.check_ptr_call("call", func_ptr, args);
+
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local or call add_eval().
+ let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
+ let return_type = gcc_func.get_return_type();
+ let void_type = self.context.new_type::<()>();
+ let current_func = self.block.get_function();
+
+ if return_type != void_type {
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ let func_name = format!("{:?}", func_ptr);
+ let args = llvm::adjust_intrinsic_arguments(&self, gcc_func, args, &func_name);
+ self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ result.to_rvalue()
+ }
+ else {
+ #[cfg(not(feature="master"))]
+ if gcc_func.get_param_count() == 0 {
+ // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
+ self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
+ }
+ else {
+ self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ }
+ #[cfg(feature="master")]
+ self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+ // Return dummy value when not having return value.
+ let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
+ self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
+ result.to_rvalue()
+ }
+ }
+
+ pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // gccjit requires to use the result of functions, even when it's not used.
+ // That's why we assign the result to a local.
+ let return_type = self.context.new_type::<bool>();
+ let current_func = self.block.get_function();
+ // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
+ self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+ result.to_rvalue()
+ }
+}
+
+impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
+ type CodegenCx = CodegenCx<'gcc, 'tcx>;
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.cx.tcx()
+ }
+}
+
+impl HasDataLayout for Builder<'_, '_, '_> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ self.cx.data_layout()
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ self.cx.handle_layout_err(err, span, ty)
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ self.cx.handle_fn_abi_err(err, span, fn_abi_request)
+ }
+}
+
+impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
+ type Target = CodegenCx<'gcc, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ self.cx
+ }
+}
+
+impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
+ type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
+ type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
+ type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
+ type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
+ type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
+
+ type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
+ type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
+ type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
+}
+
+impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
+ Builder::with_cx(cx, block)
+ }
+
+ fn llbb(&self) -> Block<'gcc> {
+ self.block
+ }
+
+ fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
+ let func = cx.rvalue_as_function(func);
+ func.new_block(name)
+ }
+
+ fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
+ let func = self.current_func();
+ func.new_block(name)
+ }
+
+ fn switch_to_block(&mut self, block: Self::BasicBlock) {
+ self.block = block;
+ }
+
+ fn ret_void(&mut self) {
+ self.llbb().end_with_void_return(None)
+ }
+
+ fn ret(&mut self, value: RValue<'gcc>) {
+ let value =
+ if self.structs_as_pointer.borrow().contains(&value) {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ value.dereference(None).to_rvalue()
+ }
+ else {
+ value
+ };
+ self.llbb().end_with_return(None, value);
+ }
+
+ fn br(&mut self, dest: Block<'gcc>) {
+ self.llbb().end_with_jump(None, dest)
+ }
+
+ fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
+ self.llbb().end_with_conditional(None, cond, then_block, else_block)
+ }
+
+ fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
+ let mut gcc_cases = vec![];
+ let typ = self.val_ty(value);
+ for (on_val, dest) in cases {
+ let on_val = self.const_uint_big(typ, on_val);
+ gcc_cases.push(self.context.new_case(on_val, on_val, dest));
+ }
+ self.block.end_with_switch(None, value, default_block, &gcc_cases);
+ }
+
+ fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // TODO(bjorn3): Properly implement unwinding.
+ let call_site = self.call(typ, func, args, None);
+ let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
+ self.llbb().end_with_conditional(None, condition, then, catch);
+ call_site
+ }
+
+ fn unreachable(&mut self) {
+ let func = self.context.get_builtin_function("__builtin_unreachable");
+ self.block.add_eval(None, self.context.new_call(None, func, &[]));
+ let return_type = self.block.get_function().get_return_type();
+ let void_type = self.context.new_type::<()>();
+ if return_type == void_type {
+ self.block.end_with_void_return(None)
+ }
+ else {
+ let return_value = self.current_func()
+ .new_local(None, return_type, "unreachableReturn");
+ self.block.end_with_return(None, return_value)
+ }
+ }
+
+ fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_add(a, b)
+ }
+
+ fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a + b
+ }
+
+ fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_sub(a, b)
+ }
+
+ fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a - b
+ }
+
+ fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_mul(a, b)
+ }
+
+ fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_udiv(a, b)
+ }
+
+ fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): poison if not exact.
+ let a_type = a.get_type().to_unsigned(self);
+ let a = self.gcc_int_cast(a, a_type);
+ let b_type = b.get_type().to_unsigned(self);
+ let b = self.gcc_int_cast(b, b_type);
+ a / b
+ }
+
+ fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_sdiv(a, b)
+ }
+
+ fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): poison if not exact.
+ // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
+ // should be the same.
+ let typ = a.get_type().to_signed(self);
+ let b = self.context.new_cast(None, b, typ);
+ a / b
+ }
+
+ fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a / b
+ }
+
+ fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_urem(a, b)
+ }
+
+ fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_srem(a, b)
+ }
+
+ fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ if a.get_type().is_compatible_with(self.cx.float_type) {
+ let fmodf = self.context.get_builtin_function("fmodf");
+ // FIXME(antoyo): this seems to produce the wrong result.
+ return self.context.new_call(None, fmodf, &[a, b]);
+ }
+ assert_eq!(a.get_type().unqualified(), self.cx.double_type);
+
+ let fmod = self.context.get_builtin_function("fmod");
+ return self.context.new_call(None, fmod, &[a, b]);
+ }
+
+ fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_shl(a, b)
+ }
+
+ fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_lshr(a, b)
+ }
+
+ fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
+ // It seems to be if the value is signed.
+ self.gcc_lshr(a, b)
+ }
+
+ fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_and(a, b)
+ }
+
+ fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.gcc_or(a, b)
+ }
+
+ fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_xor(a, b)
+ }
+
+ fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_neg(a)
+ }
+
+ fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+ }
+
+ fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_not(a)
+ }
+
+ fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a + b
+ }
+
+ fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_add(a, b)
+ }
+
+ fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a - b
+ }
+
+ fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): should generate poison value?
+ self.gcc_sub(a, b)
+ }
+
+ fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ a * b
+ }
+
+ fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
+ self.gcc_checked_binop(oop, typ, lhs, rhs)
+ }
+
+ fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
+ // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
+ // Ideally, we shouldn't need to do this check.
+ let aligned_type =
+ if ty == self.cx.u128_type || ty == self.cx.i128_type {
+ ty
+ }
+ else {
+ ty.get_aligned(align.bytes())
+ };
+ // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
+ self.stack_var_count.set(self.stack_var_count.get() + 1);
+ self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
+ }
+
+ fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
+ let block = self.llbb();
+ let function = block.get_function();
+ // NOTE: instead of returning the dereference here, we have to assign it to a variable in
+ // the current basic block. Otherwise, it could be used in another basic block, causing a
+ // dereference after a drop, for instance.
+ // TODO(antoyo): handle align of the load instruction.
+ let ptr = self.context.new_cast(None, ptr, pointee_ty.make_pointer());
+ let deref = ptr.dereference(None).to_rvalue();
+ unsafe { RETURN_VALUE_COUNT += 1 };
+ let loaded_value = function.new_local(None, pointee_ty, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
+ block.add_assignment(None, loaded_value, deref);
+ loaded_value.to_rvalue()
+ }
+
+ fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): use ty.
+ let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
+ ptr.dereference(None).to_rvalue()
+ }
+
+ fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
+ // TODO(antoyo): use ty.
+ // TODO(antoyo): handle alignment.
+ let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
+ let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+ let volatile_const_void_ptr_type = self.context.new_type::<()>()
+ .make_const()
+ .make_volatile()
+ .make_pointer();
+ let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+ self.context.new_call(None, atomic_load, &[ptr, ordering])
+ }
+
+ fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
+ assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
+
+ if place.layout.is_zst() {
+ return OperandRef::new_zst(self, place.layout);
+ }
+
+ fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
+ let vr = scalar.valid_range(bx);
+ match scalar.primitive() {
+ abi::Int(..) => {
+ if !scalar.is_always_valid(bx) {
+ bx.range_metadata(load, vr);
+ }
+ }
+ abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
+ bx.nonnull_metadata(load);
+ }
+ _ => {}
+ }
+ }
+
+ let val =
+ if let Some(llextra) = place.llextra {
+ OperandValue::Ref(place.llval, Some(llextra), place.align)
+ }
+ else if place.layout.is_gcc_immediate() {
+ let load = self.load(
+ place.layout.gcc_type(self, false),
+ place.llval,
+ place.align,
+ );
+ if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+ scalar_load_metadata(self, load, scalar);
+ }
+ OperandValue::Immediate(self.to_immediate(load, place.layout))
+ }
+ else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+ let b_offset = a.size(self).align_to(b.align(self).abi);
+ let pair_type = place.layout.gcc_type(self, false);
+
+ let mut load = |i, scalar: &abi::Scalar, align| {
+ let llptr = self.struct_gep(pair_type, place.llval, i as u64);
+ let llty = place.layout.scalar_pair_element_gcc_type(self, i, false);
+ let load = self.load(llty, llptr, align);
+ scalar_load_metadata(self, load, scalar);
+ if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
+ };
+
+ OperandValue::Pair(
+ load(0, a, place.align),
+ load(1, b, place.align.restrict_for_offset(b_offset)),
+ )
+ }
+ else {
+ OperandValue::Ref(place.llval, None, place.align)
+ };
+
+ OperandRef { val, layout: place.layout }
+ }
+
+ fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
+ let zero = self.const_usize(0);
+ let count = self.const_usize(count);
+ let start = dest.project_index(&mut self, zero).llval;
+ let end = dest.project_index(&mut self, count).llval;
+
+ let header_bb = self.append_sibling_block("repeat_loop_header");
+ let body_bb = self.append_sibling_block("repeat_loop_body");
+ let next_bb = self.append_sibling_block("repeat_loop_next");
+
+ let ptr_type = start.get_type();
+ let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
+ let current_val = current.to_rvalue();
+ self.assign(current, start);
+
+ self.br(header_bb);
+
+ self.switch_to_block(header_bb);
+ let keep_going = self.icmp(IntPredicate::IntNE, current_val, end);
+ self.cond_br(keep_going, body_bb, next_bb);
+
+ self.switch_to_block(body_bb);
+ let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+ cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
+
+ let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
+ self.llbb().add_assignment(None, current, next);
+ self.br(header_bb);
+
+ self.switch_to_block(next_bb);
+ self
+ }
+
+ fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
+ // TODO(antoyo)
+ }
+
+ fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
+ self.store_with_flags(val, ptr, align, MemFlags::empty())
+ }
+
+ fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc> {
+ let ptr = self.check_store(val, ptr);
+ let destination = ptr.dereference(None);
+ // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast
+ // to type so it gets the proper alignment.
+ let destination_type = destination.to_rvalue().get_type().unqualified();
+ let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer();
+ let aligned_destination = self.cx.context.new_bitcast(None, ptr, aligned_type);
+ let aligned_destination = aligned_destination.dereference(None);
+ self.llbb().add_assignment(None, aligned_destination, val);
+ // TODO(antoyo): handle align and flags.
+ // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
+ self.cx.context.new_rvalue_zero(self.type_i32())
+ }
+
+ fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
+ // TODO(antoyo): handle alignment.
+ let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
+ let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ let volatile_const_void_ptr_type = self.context.new_type::<()>()
+ .make_volatile()
+ .make_pointer();
+ let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+
+ // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
+ // the following cast is required to avoid this error:
+ // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
+ let int_type = atomic_store.get_param(1).to_rvalue().get_type();
+ let value = self.context.new_cast(None, value, int_type);
+ self.llbb()
+ .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
+ }
+
+ fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+ let mut result = ptr;
+ for index in indices {
+ result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
+ }
+ result
+ }
+
+ fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+ // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
+ // TODO(antoyo): specify inbounds somehow.
+ match indices.len() {
+ 1 => {
+ self.context.new_array_access(None, ptr, indices[0]).get_address(None)
+ },
+ 2 => {
+ let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
+ self.context.new_array_access(None, array, indices[1]).get_address(None)
+ },
+ _ => unimplemented!(),
+ }
+ }
+
+ fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value = ptr.dereference(None).to_rvalue();
+
+ if value_type.dyncast_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, value, index);
+ element.get_address(None)
+ }
+ else if let Some(vector_type) = value_type.dyncast_vector() {
+ let array_type = vector_type.get_element_type().make_pointer();
+ let array = self.bitcast(ptr, array_type);
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, array, index);
+ element.get_address(None)
+ }
+ else if let Some(struct_type) = value_type.is_struct() {
+ ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+
+ /* Casts */
+ fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check that it indeed truncate the value.
+ self.gcc_int_cast(value, dest_ty)
+ }
+
+ fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check that it indeed sign extend the value.
+ if dest_ty.dyncast_vector().is_some() {
+ // TODO(antoyo): nothing to do as it is only for LLVM?
+ return value;
+ }
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.gcc_float_to_uint_cast(value, dest_ty)
+ }
+
+ fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.gcc_float_to_int_cast(value, dest_ty)
+ }
+
+ fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.gcc_uint_to_float_cast(value, dest_ty)
+ }
+
+ fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.gcc_int_to_float_cast(value, dest_ty)
+ }
+
+ fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): make sure it truncates.
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.context.new_cast(None, value, dest_ty)
+ }
+
+ fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ let usize_value = self.cx.const_bitcast(value, self.cx.type_isize());
+ self.intcast(usize_value, dest_ty, false)
+ }
+
+ fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ let usize_value = self.intcast(value, self.cx.type_isize(), false);
+ self.cx.const_bitcast(usize_value, dest_ty)
+ }
+
+ fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ self.cx.const_bitcast(value, dest_ty)
+ }
+
+ fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
+ // NOTE: is_signed is for value, not dest_typ.
+ self.gcc_int_cast(value, dest_typ)
+ }
+
+ fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+ let val_type = value.get_type();
+ match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
+ (false, true) => {
+ // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to
+ // a pointer, which is not supported by gccjit.
+ return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
+ },
+ (false, false) => {
+ // When they are not pointers, we want a transmute (or reinterpret_cast).
+ self.bitcast(value, dest_ty)
+ },
+ (true, true) => self.cx.context.new_cast(None, value, dest_ty),
+ (true, false) => unimplemented!(),
+ }
+ }
+
+ /* Comparisons */
+ fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+ self.gcc_icmp(op, lhs, rhs)
+ }
+
+ fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+ }
+
+ /* Miscellaneous instructions */
+ fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+ assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
+ let size = self.intcast(size, self.type_size_t(), false);
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+ let memcpy = self.context.get_builtin_function("memcpy");
+ // TODO(antoyo): handle aligns and is_volatile.
+ self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
+ }
+
+ fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memmove.
+ let val = self.load(src.get_type().get_pointee().expect("get_pointee"), src, src_align);
+ let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
+ self.store_with_flags(val, ptr, dst_align, flags);
+ return;
+ }
+ let size = self.intcast(size, self.type_size_t(), false);
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
+
+ let memmove = self.context.get_builtin_function("memmove");
+ // TODO(antoyo): handle is_volatile.
+ self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
+ }
+
+ fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
+ let _is_volatile = flags.contains(MemFlags::VOLATILE);
+ let ptr = self.pointercast(ptr, self.type_i8p());
+ let memset = self.context.get_builtin_function("memset");
+ // TODO(antoyo): handle align and is_volatile.
+ let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
+ let size = self.intcast(size, self.type_size_t(), false);
+ self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
+ }
+
+ fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
+ let func = self.current_func();
+ let variable = func.new_local(None, then_val.get_type(), "selectVar");
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+ then_block.add_assignment(None, variable, then_val);
+ then_block.end_with_jump(None, after_block);
+
+ if !then_val.get_type().is_compatible_with(else_val.get_type()) {
+ else_val = self.context.new_cast(None, else_val, then_val.get_type());
+ }
+ else_block.add_assignment(None, variable, else_val);
+ else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ variable.to_rvalue()
+ }
+
+ #[allow(dead_code)]
+ fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value_type = aggregate_value.get_type();
+
+ if value_type.dyncast_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ let element = self.context.new_array_access(None, aggregate_value, index);
+ element.get_address(None)
+ }
+ else if value_type.dyncast_vector().is_some() {
+ panic!();
+ }
+ else if let Some(pointer_type) = value_type.get_pointee() {
+ if let Some(struct_type) = pointer_type.is_struct() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+ else if let Some(struct_type) = value_type.is_struct() {
+ aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+
+ fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+ // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
+ assert_eq!(idx as usize as u64, idx);
+ let value_type = aggregate_value.get_type();
+
+ let lvalue =
+ if value_type.dyncast_array().is_some() {
+ let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+ self.context.new_array_access(None, aggregate_value, index)
+ }
+ else if value_type.dyncast_vector().is_some() {
+ panic!();
+ }
+ else if let Some(pointer_type) = value_type.get_pointee() {
+ if let Some(struct_type) = pointer_type.is_struct() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ }
+ }
+ else {
+ panic!("Unexpected type {:?}", value_type);
+ };
+
+ let lvalue_type = lvalue.to_rvalue().get_type();
+ let value =
+ // NOTE: sometimes, rustc will create a value with the wrong type.
+ if lvalue_type != value.get_type() {
+ self.context.new_cast(None, value, lvalue_type)
+ }
+ else {
+ value
+ };
+
+ self.llbb().add_assignment(None, lvalue, value);
+
+ aggregate_value
+ }
+
+ fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> {
+ let field1 = self.context.new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1");
+ let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
+ let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
+ self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
+ .to_rvalue()
+ // TODO(antoyo): Properly implement unwinding.
+ // the above is just to make the compilation work as it seems
+ // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
+ }
+
+ fn resume(&mut self, _exn: RValue<'gcc>) {
+ // TODO(bjorn3): Properly implement unwinding.
+ self.unreachable();
+ }
+
+ fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
+ unimplemented!();
+ }
+
+ fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) {
+ unimplemented!();
+ }
+
+ fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
+ unimplemented!();
+ }
+
+ fn catch_switch(
+ &mut self,
+ _parent: Option<RValue<'gcc>>,
+ _unwind: Option<Block<'gcc>>,
+ _handlers: &[Block<'gcc>],
+ ) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ // Atomic Operations
+ fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+ let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
+ self.llbb().add_assignment(None, expected, cmp);
+ let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
+
+ let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
+ let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
+ let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
+
+ let value_type = result.to_rvalue().get_type();
+ if let Some(struct_type) = value_type.is_struct() {
+ self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
+ // NOTE: since success contains the call to the intrinsic, it must be stored before
+ // expected so that we store expected after the call.
+ self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
+ }
+ // TODO(antoyo): handle when value is not a struct.
+
+ result.to_rvalue()
+ }
+
+ fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+ let size = src.get_type().get_size();
+ let name =
+ match op {
+ AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
+ AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
+ AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
+ AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
+ AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
+ AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
+ AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
+ AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
+ AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
+ AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
+ AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
+ };
+
+
+ let atomic_function = self.context.get_builtin_function(name);
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+
+ let void_ptr_type = self.context.new_type::<*mut ()>();
+ let volatile_void_ptr_type = void_ptr_type.make_volatile();
+ let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+ // FIXME(antoyo): not sure why, but we have the wrong type here.
+ let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
+ let src = self.context.new_cast(None, src, new_src_type);
+ let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
+ self.context.new_cast(None, res, src.get_type())
+ }
+
+ fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
+ let name =
+ match scope {
+ SynchronizationScope::SingleThread => "__atomic_signal_fence",
+ SynchronizationScope::CrossThread => "__atomic_thread_fence",
+ };
+ let thread_fence = self.context.get_builtin_function(name);
+ let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
+ self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
+ }
+
+ fn set_invariant_load(&mut self, load: RValue<'gcc>) {
+ // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
+ self.normal_function_addresses.borrow_mut().insert(load);
+ // TODO(antoyo)
+ }
+
+ fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
+ // TODO(antoyo)
+ }
+
+ fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
+ // TODO(antoyo)
+ }
+
+ fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
+ // FIXME(antoyo): remove when having a proper API.
+ let gcc_func = unsafe { std::mem::transmute(func) };
+ if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
+ self.function_call(func, args, funclet)
+ }
+ else {
+ // If it's a not function that was defined, it's a function pointer.
+ self.function_ptr_call(func, args, funclet)
+ }
+ }
+
+ fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ // FIXME(antoyo): this does not zero-extend.
+ if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
+ // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
+ // Fix the code in codegen_ssa::base::from_immediate.
+ return value;
+ }
+ self.gcc_int_cast(value, dest_typ)
+ }
+
+ fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
+ self.cx
+ }
+
+ fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
+ // FIXME(bjorn3): implement
+ }
+
+ fn set_span(&mut self, _span: Span) {}
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+ if self.cx().val_ty(val) == self.cx().type_i1() {
+ self.zext(val, self.cx().type_i8())
+ }
+ else {
+ val
+ }
+ }
+
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
+ if scalar.is_bool() {
+ return self.trunc(val, self.cx().type_i1());
+ }
+ val
+ }
+
+ fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
+ None
+ }
+
+ fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
+ None
+ }
+
+ fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
+ unimplemented!();
+ }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ #[cfg(feature="master")]
+ pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
+ let struct_type = mask.get_type().is_struct().expect("mask of struct type");
+
+ // TODO(antoyo): use a recursive unqualified() here.
+ let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type");
+ let element_type = vector_type.get_element_type();
+ let vec_num_units = vector_type.get_num_units();
+
+ let mask_num_units = struct_type.get_field_count();
+ let mut vector_elements = vec![];
+ let mask_element_type =
+ if element_type.is_integral() {
+ element_type
+ }
+ else {
+ #[cfg(feature="master")]
+ {
+ self.cx.type_ix(element_type.get_size() as u64 * 8)
+ }
+ #[cfg(not(feature="master"))]
+ self.int_type
+ };
+ for i in 0..mask_num_units {
+ let field = struct_type.get_field(i as i32);
+ vector_elements.push(self.context.new_cast(None, mask.access_field(None, field).to_rvalue(), mask_element_type));
+ }
+
+ // NOTE: the mask needs to be the same length as the input vectors, so add the missing
+ // elements in the mask if needed.
+ for _ in mask_num_units..vec_num_units {
+ vector_elements.push(self.context.new_rvalue_zero(mask_element_type));
+ }
+
+ let array_type = self.context.new_array_type(None, element_type, vec_num_units as i32);
+ let result_type = self.context.new_vector_type(element_type, mask_num_units as u64);
+ let (v1, v2) =
+ if vec_num_units < mask_num_units {
+ // NOTE: the mask needs to be the same length as the input vectors, so join the 2
+ // vectors and create a dummy second vector.
+ // TODO(antoyo): switch to using new_vector_access.
+ let array = self.context.new_bitcast(None, v1, array_type);
+ let mut elements = vec![];
+ for i in 0..vec_num_units {
+ elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
+ }
+ // TODO(antoyo): switch to using new_vector_access.
+ let array = self.context.new_bitcast(None, v2, array_type);
+ for i in 0..(mask_num_units - vec_num_units) {
+ elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
+ }
+ let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements);
+ let zero = self.context.new_rvalue_zero(element_type);
+ let v2 = self.context.new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]);
+ (v1, v2)
+ }
+ else {
+ (v1, v2)
+ };
+
+ let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units);
+ let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64);
+ let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
+ let result = self.context.new_rvalue_vector_perm(None, v1, v2, mask);
+
+ if vec_num_units != mask_num_units {
+ // NOTE: if padding was added, only select the number of elements of the masks to
+ // remove that padding in the result.
+ let mut elements = vec![];
+ // TODO(antoyo): switch to using new_vector_access.
+ let array = self.context.new_bitcast(None, result, array_type);
+ for i in 0..mask_num_units {
+ elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
+ }
+ self.context.new_rvalue_from_vector(None, result_type, &elements)
+ }
+ else {
+ result
+ }
+ }
+
+ #[cfg(not(feature="master"))]
+ pub fn shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ #[cfg(feature="master")]
+ pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
+ where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
+ {
+ let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
+ let element_count = vector_type.get_num_units();
+ let mut vector_elements = vec![];
+ for i in 0..element_count {
+ vector_elements.push(i);
+ }
+ let mask_type = self.context.new_vector_type(self.int_type, element_count as u64);
+ let mut shift = 1;
+ let mut res = src;
+ while shift < element_count {
+ let vector_elements: Vec<_> =
+ vector_elements.iter()
+ .map(|i| self.context.new_rvalue_from_int(self.int_type, ((i + shift) % element_count) as i32))
+ .collect();
+ let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
+ let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask);
+ shift *= 2;
+ res = op(res, shifted, &self.context);
+ }
+ self.context.new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type))
+ .to_rvalue()
+ }
+
+ #[cfg(not(feature="master"))]
+ pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
+ where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
+ {
+ unimplemented!();
+ }
+
+ pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
+ self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b))
+ }
+
+ pub fn vector_reduce_fadd_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ // Inspired by Hacker's Delight min implementation.
+ pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+ self.vector_reduce(src, |a, b, context| {
+ let differences_or_zeros = difference_or_zero(a, b, context);
+ context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
+ })
+ }
+
+ // Inspired by Hacker's Delight max implementation.
+ pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+ self.vector_reduce(src, |a, b, context| {
+ let differences_or_zeros = difference_or_zero(a, b, context);
+ context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
+ })
+ }
+
+ pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> {
+ // cond is a vector of integers, not of bools.
+ let cond_type = cond.get_type();
+ let vector_type = cond_type.unqualified().dyncast_vector().expect("vector type");
+ let num_units = vector_type.get_num_units();
+ let element_type = vector_type.get_element_type();
+ let zeros = vec![self.context.new_rvalue_zero(element_type); num_units];
+ let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros);
+
+ let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros);
+ let then_vals = masks & then_val;
+
+ let ones = vec![self.context.new_rvalue_one(element_type); num_units];
+ let ones = self.context.new_rvalue_from_vector(None, cond_type, &ones);
+ let inverted_masks = masks + ones;
+ // NOTE: sometimes, the type of else_val can be different than the type of then_val in
+ // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND
+ // operation to work.
+ let else_val = self.context.new_bitcast(None, else_val, then_val.get_type());
+ let else_vals = inverted_masks & else_val;
+
+ then_vals | else_vals
+ }
+}
+
+fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> {
+ let difference = a - b;
+ let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a);
+ difference & masks
+}
+
+impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+ fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
+ // Forward to the `get_static` method of `CodegenCx`
+ self.cx().get_static(def_id).get_address(None)
+ }
+}
+
+impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ self.cx.param_env()
+ }
+}
+
+impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.cx.target_spec()
+ }
+}
+
+pub trait ToGccComp {
+ fn to_gcc_comparison(&self) -> ComparisonOp;
+}
+
+impl ToGccComp for IntPredicate {
+ fn to_gcc_comparison(&self) -> ComparisonOp {
+ match *self {
+ IntPredicate::IntEQ => ComparisonOp::Equals,
+ IntPredicate::IntNE => ComparisonOp::NotEquals,
+ IntPredicate::IntUGT => ComparisonOp::GreaterThan,
+ IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
+ IntPredicate::IntULT => ComparisonOp::LessThan,
+ IntPredicate::IntULE => ComparisonOp::LessThanEquals,
+ IntPredicate::IntSGT => ComparisonOp::GreaterThan,
+ IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
+ IntPredicate::IntSLT => ComparisonOp::LessThan,
+ IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
+ }
+ }
+}
+
+impl ToGccComp for RealPredicate {
+ fn to_gcc_comparison(&self) -> ComparisonOp {
+ // TODO(antoyo): check that ordered vs non-ordered is respected.
+ match *self {
+ RealPredicate::RealPredicateFalse => unreachable!(),
+ RealPredicate::RealOEQ => ComparisonOp::Equals,
+ RealPredicate::RealOGT => ComparisonOp::GreaterThan,
+ RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
+ RealPredicate::RealOLT => ComparisonOp::LessThan,
+ RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
+ RealPredicate::RealONE => ComparisonOp::NotEquals,
+ RealPredicate::RealORD => unreachable!(),
+ RealPredicate::RealUNO => unreachable!(),
+ RealPredicate::RealUEQ => ComparisonOp::Equals,
+ RealPredicate::RealUGT => ComparisonOp::GreaterThan,
+ RealPredicate::RealUGE => ComparisonOp::GreaterThan,
+ RealPredicate::RealULT => ComparisonOp::LessThan,
+ RealPredicate::RealULE => ComparisonOp::LessThan,
+ RealPredicate::RealUNE => ComparisonOp::NotEquals,
+ RealPredicate::RealPredicateTrue => unreachable!(),
+ }
+ }
+}
+
+#[repr(C)]
+#[allow(non_camel_case_types)]
+enum MemOrdering {
+ __ATOMIC_RELAXED,
+ __ATOMIC_CONSUME,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_RELEASE,
+ __ATOMIC_ACQ_REL,
+ __ATOMIC_SEQ_CST,
+}
+
+trait ToGccOrdering {
+ fn to_gcc(self) -> i32;
+}
+
+impl ToGccOrdering for AtomicOrdering {
+ fn to_gcc(self) -> i32 {
+ use MemOrdering::*;
+
+ let ordering =
+ match self {
+ AtomicOrdering::Unordered => __ATOMIC_RELAXED,
+ AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+ AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
+ AtomicOrdering::Release => __ATOMIC_RELEASE,
+ AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
+ AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
+ };
+ ordering as i32
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs
new file mode 100644
index 000000000..c1041125e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/callee.rs
@@ -0,0 +1,77 @@
+use gccjit::{FunctionType, RValue};
+use rustc_codegen_ssa::traits::BaseTypeMethods;
+use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+
+use crate::abi::FnAbiGccExt;
+use crate::context::CodegenCx;
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) -> RValue<'gcc> {
+ let tcx = cx.tcx();
+
+ assert!(!instance.substs.needs_infer());
+ assert!(!instance.substs.has_escaping_bound_vars());
+
+ if let Some(&func) = cx.function_instances.borrow().get(&instance) {
+ return func;
+ }
+
+ let sym = tcx.symbol_name(instance).name;
+
+ let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+
+ let func =
+ if let Some(func) = cx.get_declared_value(&sym) {
+ // Create a fn pointer with the new signature.
+ let ptrty = fn_abi.ptr_to_gcc_type(cx);
+
+ // This is subtle and surprising, but sometimes we have to bitcast
+ // the resulting fn pointer. The reason has to do with external
+ // functions. If you have two crates that both bind the same C
+ // library, they may not use precisely the same types: for
+ // example, they will probably each declare their own structs,
+ // which are distinct types from LLVM's point of view (nominal
+ // types).
+ //
+ // Now, if those two crates are linked into an application, and
+ // they contain inlined code, you can wind up with a situation
+ // where both of those functions wind up being loaded into this
+ // application simultaneously. In that case, the same function
+ // (from LLVM's point of view) requires two types. But of course
+ // LLVM won't allow one function to have two types.
+ //
+ // What we currently do, therefore, is declare the function with
+ // one of the two types (whichever happens to come first) and then
+ // bitcast as needed when the function is referenced to make sure
+ // it has the type we expect.
+ //
+ // This can occur on either a crate-local or crate-external
+ // reference. It also occurs when testing libcore and in some
+ // other weird situations. Annoying.
+ if cx.val_ty(func) != ptrty {
+ // TODO(antoyo): cast the pointer.
+ func
+ }
+ else {
+ func
+ }
+ }
+ else {
+ cx.linkage.set(FunctionType::Extern);
+ let func = cx.declare_fn(&sym, &fn_abi);
+
+ // TODO(antoyo): set linkage and attributes.
+ func
+ };
+
+ cx.function_instances.borrow_mut().insert(instance, func);
+
+ func
+}
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
new file mode 100644
index 000000000..ccb6cbbc2
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -0,0 +1,479 @@
+use gccjit::LValue;
+use gccjit::{RValue, Type, ToRValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{
+ BaseTypeMethods,
+ ConstMethods,
+ DerivedTypeMethods,
+ MiscMethods,
+ StaticMethods,
+};
+use rustc_middle::mir::Mutability;
+use rustc_middle::ty::layout::{TyAndLayout, LayoutOf};
+use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
+use rustc_target::abi::{self, HasDataLayout, Pointer, Size};
+
+use crate::consts::const_alloc_to_gcc;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
+ bytes_in_context(self, bytes)
+ }
+
+ fn global_string(&self, string: &str) -> LValue<'gcc> {
+ // TODO(antoyo): handle non-null-terminated strings.
+ let string = self.context.new_string_literal(&*string);
+ let sym = self.generate_local_symbol_name("str");
+ let global = self.declare_private_global(&sym, self.val_ty(string));
+ global.global_set_initializer_rvalue(string);
+ global
+ // TODO(antoyo): set linkage.
+ }
+}
+
+pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) -> RValue<'gcc> {
+ let context = &cx.context;
+ let byte_type = context.new_type::<u8>();
+ let typ = context.new_array_type(None, byte_type, bytes.len() as i32);
+ let elements: Vec<_> =
+ bytes.iter()
+ .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32))
+ .collect();
+ context.new_array_constructor(None, typ, &elements)
+}
+
+pub fn type_is_pointer<'gcc>(typ: Type<'gcc>) -> bool {
+ typ.get_pointee().is_some()
+}
+
+impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ if type_is_pointer(typ) {
+ self.context.new_null(typ)
+ }
+ else {
+ self.const_int(typ, 0)
+ }
+ }
+
+ fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ let local = self.current_func.borrow().expect("func")
+ .new_local(None, typ, "undefined");
+ if typ.is_struct().is_some() {
+ // NOTE: hack to workaround a limitation of the rustc API: see comment on
+ // CodegenCx.structs_as_pointer
+ let pointer = local.get_address(None);
+ self.structs_as_pointer.borrow_mut().insert(pointer);
+ pointer
+ }
+ else {
+ local.to_rvalue()
+ }
+ }
+
+ fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+ self.gcc_int(typ, int)
+ }
+
+ fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+ self.gcc_uint(typ, int)
+ }
+
+ fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+ self.gcc_uint_big(typ, num)
+ }
+
+ fn const_bool(&self, val: bool) -> RValue<'gcc> {
+ self.const_uint(self.type_i1(), val as u64)
+ }
+
+ fn const_i16(&self, i: i16) -> RValue<'gcc> {
+ self.const_int(self.type_i16(), i as i64)
+ }
+
+ fn const_i32(&self, i: i32) -> RValue<'gcc> {
+ self.const_int(self.type_i32(), i as i64)
+ }
+
+ fn const_u32(&self, i: u32) -> RValue<'gcc> {
+ self.const_uint(self.type_u32(), i as u64)
+ }
+
+ fn const_u64(&self, i: u64) -> RValue<'gcc> {
+ self.const_uint(self.type_u64(), i)
+ }
+
+ fn const_usize(&self, i: u64) -> RValue<'gcc> {
+ let bit_size = self.data_layout().pointer_size.bits();
+ if bit_size < 64 {
+ // make sure it doesn't overflow
+ assert!(i < (1 << bit_size));
+ }
+
+ self.const_uint(self.usize_type, i)
+ }
+
+ fn const_u8(&self, _i: u8) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn const_real(&self, typ: Type<'gcc>, val: f64) -> RValue<'gcc> {
+ self.context.new_rvalue_from_double(typ, val)
+ }
+
+ fn const_str(&self, s: &str) -> (RValue<'gcc>, RValue<'gcc>) {
+ let str_global = *self
+ .const_str_cache
+ .borrow_mut()
+ .raw_entry_mut()
+ .from_key(s)
+ .or_insert_with(|| (s.to_owned(), self.global_string(s)))
+ .1;
+ let len = s.len();
+ let cs = self.const_ptrcast(str_global.get_address(None),
+ self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self, true)),
+ );
+ (cs, self.const_usize(len as u64))
+ }
+
+ fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
+ let fields: Vec<_> = values.iter()
+ .map(|value| value.get_type())
+ .collect();
+ // TODO(antoyo): cache the type? It's anonymous, so probably not.
+ let typ = self.type_struct(&fields, packed);
+ let struct_type = typ.is_struct().expect("struct type");
+ self.context.new_struct_constructor(None, struct_type.as_type(), None, values)
+ }
+
+ fn const_to_opt_uint(&self, _v: RValue<'gcc>) -> Option<u64> {
+ // TODO(antoyo)
+ None
+ }
+
+ fn const_to_opt_u128(&self, _v: RValue<'gcc>, _sign_ext: bool) -> Option<u128> {
+ // TODO(antoyo)
+ None
+ }
+
+ fn zst_to_backend(&self, _ty: Type<'gcc>) -> RValue<'gcc> {
+ self.const_undef(self.type_ix(0))
+ }
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, ty: Type<'gcc>) -> RValue<'gcc> {
+ let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
+ match cv {
+ Scalar::Int(int) => {
+ let data = int.assert_bits(layout.size(self));
+
+ // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
+ // the paths for floating-point values.
+ if ty == self.float_type {
+ return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
+ }
+ else if ty == self.double_type {
+ return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
+ }
+
+ let value = self.const_uint_big(self.type_ix(bitsize), data);
+ // TODO(bjorn3): assert size is correct
+ self.const_bitcast(value, ty)
+ }
+ Scalar::Ptr(ptr, _size) => {
+ let (alloc_id, offset) = ptr.into_parts();
+ let base_addr =
+ match self.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let init = const_alloc_to_gcc(self, alloc);
+ let alloc = alloc.inner();
+ let value =
+ match alloc.mutability {
+ Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+ _ => self.static_addr_of(init, alloc.align, None),
+ };
+ if !self.sess().fewer_names() {
+ // TODO(antoyo): set value name.
+ }
+ value
+ },
+ GlobalAlloc::Function(fn_instance) => {
+ self.get_fn_addr(fn_instance)
+ },
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc = self.tcx.global_alloc(self.tcx.vtable_allocation((ty, trait_ref))).unwrap_memory();
+ let init = const_alloc_to_gcc(self, alloc);
+ self.static_addr_of(init, alloc.inner().align, None)
+ }
+ GlobalAlloc::Static(def_id) => {
+ assert!(self.tcx.is_static(def_id));
+ self.get_static(def_id).get_address(None)
+ },
+ };
+ let ptr_type = base_addr.get_type();
+ let base_addr = self.const_bitcast(base_addr, self.usize_type);
+ let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
+ let ptr = self.const_bitcast(base_addr + offset, ptr_type);
+ if layout.primitive() != Pointer {
+ self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
+ }
+ else {
+ self.const_bitcast(ptr, ty)
+ }
+ }
+ }
+ }
+
+ fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value {
+ const_alloc_to_gcc(self, alloc)
+ }
+
+ fn from_const_alloc(&self, layout: TyAndLayout<'tcx>, alloc: ConstAllocation<'tcx>, offset: Size) -> PlaceRef<'tcx, RValue<'gcc>> {
+ assert_eq!(alloc.inner().align, layout.align.abi);
+ let ty = self.type_ptr_to(layout.gcc_type(self, true));
+ let value =
+ if layout.size == Size::ZERO {
+ let value = self.const_usize(alloc.inner().align.bytes());
+ self.context.new_cast(None, value, ty)
+ }
+ else {
+ let init = const_alloc_to_gcc(self, alloc);
+ let base_addr = self.static_addr_of(init, alloc.inner().align, None);
+
+ let array = self.const_bitcast(base_addr, self.type_i8p());
+ let value = self.context.new_array_access(None, array, self.const_usize(offset.bytes())).get_address(None);
+ self.const_bitcast(value, ty)
+ };
+ PlaceRef::new_sized(value, layout)
+ }
+
+ fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
+ self.context.new_cast(None, val, ty)
+ }
+}
+
+pub trait SignType<'gcc, 'tcx> {
+ fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+ fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+}
+
+impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
+ fn is_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.is_i8(cx) || self.is_i16(cx) || self.is_i32(cx) || self.is_i64(cx) || self.is_i128(cx)
+ }
+
+ fn is_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.is_u8(cx) || self.is_u16(cx) || self.is_u32(cx) || self.is_u64(cx) || self.is_u128(cx)
+ }
+
+ fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ if self.is_u8(cx) {
+ cx.i8_type
+ }
+ else if self.is_u16(cx) {
+ cx.i16_type
+ }
+ else if self.is_u32(cx) {
+ cx.i32_type
+ }
+ else if self.is_u64(cx) {
+ cx.i64_type
+ }
+ else if self.is_u128(cx) {
+ cx.i128_type
+ }
+ else if self.is_uchar(cx) {
+ cx.char_type
+ }
+ else if self.is_ushort(cx) {
+ cx.short_type
+ }
+ else if self.is_uint(cx) {
+ cx.int_type
+ }
+ else if self.is_ulong(cx) {
+ cx.long_type
+ }
+ else if self.is_ulonglong(cx) {
+ cx.longlong_type
+ }
+ else {
+ self.clone()
+ }
+ }
+
+ fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ if self.is_i8(cx) {
+ cx.u8_type
+ }
+ else if self.is_i16(cx) {
+ cx.u16_type
+ }
+ else if self.is_i32(cx) {
+ cx.u32_type
+ }
+ else if self.is_i64(cx) {
+ cx.u64_type
+ }
+ else if self.is_i128(cx) {
+ cx.u128_type
+ }
+ else if self.is_char(cx) {
+ cx.uchar_type
+ }
+ else if self.is_short(cx) {
+ cx.ushort_type
+ }
+ else if self.is_int(cx) {
+ cx.uint_type
+ }
+ else if self.is_long(cx) {
+ cx.ulong_type
+ }
+ else if self.is_longlong(cx) {
+ cx.ulonglong_type
+ }
+ else {
+ self.clone()
+ }
+ }
+}
+
+pub trait TypeReflection<'gcc, 'tcx> {
+ fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+ fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+ fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+ fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
+
+ fn is_vector(&self) -> bool;
+}
+
+impl<'gcc, 'tcx> TypeReflection<'gcc, 'tcx> for Type<'gcc> {
+ fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.uchar_type
+ }
+
+ fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.ushort_type
+ }
+
+ fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.uint_type
+ }
+
+ fn is_ulong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.ulong_type
+ }
+
+ fn is_ulonglong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.ulonglong_type
+ }
+
+ fn is_char(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.char_type
+ }
+
+ fn is_short(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.short_type
+ }
+
+ fn is_int(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.int_type
+ }
+
+ fn is_long(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.long_type
+ }
+
+ fn is_longlong(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.longlong_type
+ }
+
+ fn is_i8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i8_type
+ }
+
+ fn is_u8(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u8_type
+ }
+
+ fn is_i16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i16_type
+ }
+
+ fn is_u16(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u16_type
+ }
+
+ fn is_i32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i32_type
+ }
+
+ fn is_u32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u32_type
+ }
+
+ fn is_i64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i64_type
+ }
+
+ fn is_u64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u64_type
+ }
+
+ fn is_i128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.i128_type.unqualified()
+ }
+
+ fn is_u128(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.u128_type.unqualified()
+ }
+
+ fn is_f32(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.context.new_type::<f32>()
+ }
+
+ fn is_f64(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool {
+ self.unqualified() == cx.context.new_type::<f64>()
+ }
+
+ fn is_vector(&self) -> bool {
+ let mut typ = self.clone();
+ loop {
+ if typ.dyncast_vector().is_some() {
+ return true;
+ }
+
+ let old_type = typ;
+ typ = typ.unqualified();
+ if old_type == typ {
+ break;
+ }
+ }
+
+ false
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
new file mode 100644
index 000000000..c0b8d2181
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/consts.rs
@@ -0,0 +1,405 @@
+use gccjit::{GlobalKind, LValue, RValue, ToRValue, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_middle::{bug, span_bug};
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint};
+use rustc_span::Span;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange};
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
+ if value.get_type() == self.bool_type.make_pointer() {
+ if let Some(pointee) = typ.get_pointee() {
+ if pointee.dyncast_vector().is_some() {
+ panic!()
+ }
+ }
+ }
+ // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some
+ // SIMD builtins require a constant value.
+ self.bitcast_if_needed(value, typ)
+ }
+}
+
+impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
+ fn static_addr_of(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+ // TODO(antoyo): implement a proper rvalue comparison in libgccjit instead of doing the
+ // following:
+ for (value, variable) in &*self.const_globals.borrow() {
+ if format!("{:?}", value) == format!("{:?}", cv) {
+ if let Some(global_variable) = self.global_lvalues.borrow().get(variable) {
+ let alignment = align.bits() as i32;
+ if alignment > global_variable.get_alignment() {
+ global_variable.set_alignment(alignment);
+ }
+ }
+ return *variable;
+ }
+ }
+ let global_value = self.static_addr_of_mut(cv, align, kind);
+ #[cfg(feature = "master")]
+ self.global_lvalues.borrow().get(&global_value)
+ .expect("`static_addr_of_mut` did not add the global to `self.global_lvalues`")
+ .global_set_readonly();
+ self.const_globals.borrow_mut().insert(cv, global_value);
+ global_value
+ }
+
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+ let value =
+ match codegen_static_initializer(&self, def_id) {
+ Ok((value, _)) => value,
+ // Error has already been reported
+ Err(_) => return,
+ };
+
+ let global = self.get_static(def_id);
+
+ // boolean SSA values are i1, but they have to be stored in i8 slots,
+ // otherwise some LLVM optimization passes don't work as expected
+ let val_llty = self.val_ty(value);
+ let value =
+ if val_llty == self.type_i1() {
+ unimplemented!();
+ }
+ else {
+ value
+ };
+
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let gcc_type = self.layout_of(ty).gcc_type(self, true);
+
+ // TODO(antoyo): set alignment.
+
+ let value = self.bitcast_if_needed(value, gcc_type);
+ global.global_set_initializer_rvalue(value);
+
+ // As an optimization, all shared statics which do not have interior
+ // mutability are placed into read-only memory.
+ if !is_mutable {
+ if self.type_is_freeze(ty) {
+ #[cfg(feature = "master")]
+ global.global_set_readonly();
+ }
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ // Do not allow LLVM to change the alignment of a TLS on macOS.
+ //
+ // By default a global's alignment can be freely increased.
+ // This allows LLVM to generate more performant instructions
+ // e.g., using load-aligned into a SIMD register.
+ //
+ // However, on macOS 10.10 or below, the dynamic linker does not
+ // respect any alignment given on the TLS (radar 24221680).
+ // This will violate the alignment assumption, and causing segfault at runtime.
+ //
+ // This bug is very easy to trigger. In `println!` and `panic!`,
+ // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+ // which the values would be `mem::replace`d on initialization.
+ // The implementation of `mem::replace` will use SIMD
+ // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+ // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+ // which macOS's dyld disregarded and causing crashes
+ // (see issues #51794, #51758, #50867, #48866 and #44056).
+ //
+ // To workaround the bug, we trick LLVM into not increasing
+ // the global's alignment by explicitly assigning a section to it
+ // (equivalent to automatically generating a `#[link_section]` attribute).
+ // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+ // of `lib/IR/Globals.cpp` for why this works.
+ //
+ // When the alignment is not increased, the optimized `mem::replace`
+ // will use load-unaligned instructions instead, and thus avoiding the crash.
+ //
+ // We could remove this hack whenever we decide to drop macOS 10.10 support.
+ if self.tcx.sess.target.options.is_like_osx {
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state
+ // (not as part of the interpreter execution).
+ //
+ // FIXME: This check requires that the (arbitrary) value of undefined bytes
+ // happens to be zero. Instead, we should only check the value of defined bytes
+ // and set all undefined bytes to zero if this allocation is headed for the
+ // BSS.
+ unimplemented!();
+ }
+ }
+
+ // Wasm statics with custom link sections get special treatment as they
+ // go into custom sections of the wasm executable.
+ if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
+ if let Some(_section) = attrs.link_section {
+ unimplemented!();
+ }
+ } else {
+ // TODO(antoyo): set link section.
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::USED) || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
+ self.add_used_global(global.to_rvalue());
+ }
+ }
+
+ /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+ fn add_used_global(&self, _global: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn add_compiler_used_global(&self, _global: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
+ let global =
+ match kind {
+ Some(kind) if !self.tcx.sess.fewer_names() => {
+ let name = self.generate_local_symbol_name(kind);
+ // TODO(antoyo): check if it's okay that no link_section is set.
+
+ let typ = self.val_ty(cv).get_aligned(align.bytes());
+ let global = self.declare_private_global(&name[..], typ);
+ global
+ }
+ _ => {
+ let typ = self.val_ty(cv).get_aligned(align.bytes());
+ let global = self.declare_unnamed_global(typ);
+ global
+ },
+ };
+ global.global_set_initializer_rvalue(cv);
+ // TODO(antoyo): set unnamed address.
+ let rvalue = global.get_address(None);
+ self.global_lvalues.borrow_mut().insert(rvalue, global);
+ rvalue
+ }
+
+ pub fn get_static(&self, def_id: DefId) -> LValue<'gcc> {
+ let instance = Instance::mono(self.tcx, def_id);
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+ if let Some(&global) = self.instances.borrow().get(&instance) {
+ return global;
+ }
+
+ let defined_in_current_codegen_unit =
+ self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+ assert!(
+ !defined_in_current_codegen_unit,
+ "consts::get_static() should always hit the cache for \
+ statics defined in the same CGU, but did not for `{:?}`",
+ def_id
+ );
+
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let sym = self.tcx.symbol_name(instance).name;
+
+ let global =
+ if let Some(def_id) = def_id.as_local() {
+ let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let llty = self.layout_of(ty).gcc_type(self, true);
+ // FIXME: refactor this to work without accessing the HIR
+ let global = match self.tcx.hir().get(id) {
+ Node::Item(&hir::Item { span, kind: hir::ItemKind::Static(..), .. }) => {
+ if let Some(global) = self.get_declared_value(&sym) {
+ if self.val_ty(global) != self.type_ptr_to(llty) {
+ span_bug!(span, "Conflicting types for static");
+ }
+ }
+
+ let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let global = self.declare_global(
+ &sym,
+ llty,
+ GlobalKind::Exported,
+ is_tls,
+ fn_attrs.link_section,
+ );
+
+ if !self.tcx.is_reachable_non_generic(def_id) {
+ // TODO(antoyo): set visibility.
+ }
+
+ global
+ }
+
+ Node::ForeignItem(&hir::ForeignItem {
+ span,
+ kind: hir::ForeignItemKind::Static(..),
+ ..
+ }) => {
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+ check_and_apply_linkage(&self, &fn_attrs, ty, sym, span)
+ }
+
+ item => bug!("get_static: expected static, found {:?}", item),
+ };
+
+ global
+ }
+ else {
+ // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
+ //debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
+
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+ let span = self.tcx.def_span(def_id);
+ let global = check_and_apply_linkage(&self, &attrs, ty, sym, span);
+
+ let needs_dll_storage_attr = false; // TODO(antoyo)
+
+ // If this assertion triggers, there's something wrong with commandline
+ // argument validation.
+ debug_assert!(
+ !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+ && self.tcx.sess.target.options.is_like_msvc
+ && self.tcx.sess.opts.cg.prefer_dynamic)
+ );
+
+ if needs_dll_storage_attr {
+ // This item is external but not foreign, i.e., it originates from an external Rust
+ // crate. Since we don't know whether this crate will be linked dynamically or
+ // statically in the final application, we always mark such symbols as 'dllimport'.
+ // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+ // to make things work.
+ //
+ // However, in some scenarios we defer emission of statics to downstream
+ // crates, so there are cases where a static with an upstream DefId
+ // is actually present in the current crate. We can find out via the
+ // is_codegened_item query.
+ if !self.tcx.is_codegened_item(def_id) {
+ unimplemented!();
+ }
+ }
+ global
+ };
+
+ // TODO(antoyo): set dll storage class.
+
+ self.instances.borrow_mut().insert(instance, global);
+ global
+ }
+}
+
+pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> {
+ let alloc = alloc.inner();
+ let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+ let dl = cx.data_layout();
+ let pointer_size = dl.pointer_size.bytes() as usize;
+
+ let mut next_offset = 0;
+ for &(offset, alloc_id) in alloc.relocations().iter() {
+ let offset = offset.bytes();
+ assert_eq!(offset as usize as u64, offset);
+ let offset = offset as usize;
+ if offset > next_offset {
+ // This `inspect` is okay since we have checked that it is not within a relocation, it
+ // is within the bounds of the allocation, and it doesn't affect interpreter execution
+ // (we inspect the result after interpreter execution). Any undef byte is replaced with
+ // some arbitrary byte value.
+ //
+ // FIXME: relay undef bytes to codegen as undef const bytes
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
+ llvals.push(cx.const_bytes(bytes));
+ }
+ let ptr_offset =
+ read_target_uint( dl.endian,
+ // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+ // affect interpreter execution (we inspect the result after interpreter execution),
+ // and we properly interpret the relocation as a relocation pointer offset.
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+ )
+ .expect("const_alloc_to_llvm: could not read relocation pointer")
+ as u64;
+ llvals.push(cx.scalar_to_backend(
+ InterpScalar::from_pointer(
+ interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
+ &cx.tcx,
+ ),
+ abi::Scalar::Initialized { value: Primitive::Pointer, valid_range: WrappingRange::full(dl.pointer_size) },
+ cx.type_i8p(),
+ ));
+ next_offset = offset + pointer_size;
+ }
+ if alloc.len() >= next_offset {
+ let range = next_offset..alloc.len();
+ // This `inspect` is okay since we have check that it is after all relocations, it is
+ // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+ // inspect the result after interpreter execution). Any undef byte is replaced with some
+ // arbitrary byte value.
+ //
+ // FIXME: relay undef bytes to codegen as undef const bytes
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+ llvals.push(cx.const_bytes(bytes));
+ }
+
+ cx.const_struct(&llvals, true)
+}
+
+pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> {
+ let alloc = cx.tcx.eval_static_initializer(def_id)?;
+ Ok((const_alloc_to_gcc(cx, alloc), alloc))
+}
+
+fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str, span: Span) -> LValue<'gcc> {
+ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let llty = cx.layout_of(ty).gcc_type(cx, true);
+ if let Some(linkage) = attrs.linkage {
+ // If this is a static with a linkage specified, then we need to handle
+ // it a little specially. The typesystem prevents things like &T and
+ // extern "C" fn() from being non-null, so we can't just declare a
+ // static and call it a day. Some linkages (like weak) will make it such
+ // that the static actually has a null value.
+ let llty2 =
+ if let ty::RawPtr(ref mt) = ty.kind() {
+ cx.layout_of(mt.ty).gcc_type(cx, true)
+ }
+ else {
+ cx.sess().span_fatal(
+ span,
+ "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
+ )
+ };
+ // Declare a symbol `foo` with the desired linkage.
+ let global1 = cx.declare_global_with_linkage(&sym, llty2, base::global_linkage_to_gcc(linkage));
+
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+ let mut real_name = "_rust_extern_with_linkage_".to_string();
+ real_name.push_str(&sym);
+ let global2 = cx.define_global(&real_name, llty, is_tls, attrs.link_section);
+ // TODO(antoyo): set linkage.
+ global2.global_set_initializer_rvalue(global1.get_address(None));
+ // TODO(antoyo): use global_set_initializer() when it will work.
+ global2
+ }
+ else {
+ // Generate an external declaration.
+ // FIXME(nagisa): investigate whether it can be changed into define_global
+
+ // Thread-local statics in some other crate need to *always* be linked
+ // against in a thread-local fashion, so we need to be sure to apply the
+ // thread-local attribute locally if it was present remotely. If we
+ // don't do this then linker errors can be generated where the linker
+ // complains that one object files has a thread local version of the
+ // symbol and another one doesn't.
+ cx.declare_global(&sym, llty, GlobalKind::Imported, is_tls, attrs.link_section)
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
new file mode 100644
index 000000000..478f6d893
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -0,0 +1,553 @@
+use std::cell::{Cell, RefCell};
+
+use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Struct, Type};
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::traits::{
+ BackendTypes,
+ MiscMethods,
+};
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::span_bug;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
+use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers};
+use rustc_session::Session;
+use rustc_span::Span;
+use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx};
+use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
+
+use crate::callee::get_fn;
+
+#[derive(Clone)]
+pub struct FuncSig<'gcc> {
+ pub params: Vec<Type<'gcc>>,
+ pub return_type: Type<'gcc>,
+}
+
+pub struct CodegenCx<'gcc, 'tcx> {
+ pub check_overflow: bool,
+ pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+ pub context: &'gcc Context<'gcc>,
+
+ // TODO(bjorn3): Can this field be removed?
+ pub current_func: RefCell<Option<Function<'gcc>>>,
+ pub normal_function_addresses: RefCell<FxHashSet<RValue<'gcc>>>,
+
+ pub functions: RefCell<FxHashMap<String, Function<'gcc>>>,
+ pub intrinsics: RefCell<FxHashMap<String, Function<'gcc>>>,
+
+ pub tls_model: gccjit::TlsModel,
+
+ pub bool_type: Type<'gcc>,
+ pub i8_type: Type<'gcc>,
+ pub i16_type: Type<'gcc>,
+ pub i32_type: Type<'gcc>,
+ pub i64_type: Type<'gcc>,
+ pub i128_type: Type<'gcc>,
+ pub isize_type: Type<'gcc>,
+
+ pub u8_type: Type<'gcc>,
+ pub u16_type: Type<'gcc>,
+ pub u32_type: Type<'gcc>,
+ pub u64_type: Type<'gcc>,
+ pub u128_type: Type<'gcc>,
+ pub usize_type: Type<'gcc>,
+
+ pub char_type: Type<'gcc>,
+ pub uchar_type: Type<'gcc>,
+ pub short_type: Type<'gcc>,
+ pub ushort_type: Type<'gcc>,
+ pub int_type: Type<'gcc>,
+ pub uint_type: Type<'gcc>,
+ pub long_type: Type<'gcc>,
+ pub ulong_type: Type<'gcc>,
+ pub longlong_type: Type<'gcc>,
+ pub ulonglong_type: Type<'gcc>,
+ pub sizet_type: Type<'gcc>,
+
+ pub supports_128bit_integers: bool,
+
+ pub float_type: Type<'gcc>,
+ pub double_type: Type<'gcc>,
+
+ pub linkage: Cell<FunctionType>,
+ pub scalar_types: RefCell<FxHashMap<Ty<'tcx>, Type<'gcc>>>,
+ pub types: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), Type<'gcc>>>,
+ pub tcx: TyCtxt<'tcx>,
+
+ pub struct_types: RefCell<FxHashMap<Vec<Type<'gcc>>, Type<'gcc>>>,
+
+ pub types_with_fields_to_set: RefCell<FxHashMap<Type<'gcc>, (Struct<'gcc>, TyAndLayout<'tcx>)>>,
+
+ /// Cache instances of monomorphic and polymorphic items
+ pub instances: RefCell<FxHashMap<Instance<'tcx>, LValue<'gcc>>>,
+ /// Cache function instances of monomorphic and polymorphic items
+ pub function_instances: RefCell<FxHashMap<Instance<'tcx>, RValue<'gcc>>>,
+ /// Cache generated vtables
+ pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
+
+ // TODO(antoyo): improve the SSA API to not require those.
+ // Mapping from function pointer type to indexes of on stack parameters.
+ pub on_stack_params: RefCell<FxHashMap<FunctionPtrType<'gcc>, FxHashSet<usize>>>,
+ // Mapping from function to indexes of on stack parameters.
+ pub on_stack_function_params: RefCell<FxHashMap<Function<'gcc>, FxHashSet<usize>>>,
+
+ /// Cache of emitted const globals (value -> global)
+ pub const_globals: RefCell<FxHashMap<RValue<'gcc>, RValue<'gcc>>>,
+
+ /// Map from the address of a global variable (rvalue) to the global variable itself (lvalue).
+ /// TODO(antoyo): remove when the rustc API is fixed.
+ pub global_lvalues: RefCell<FxHashMap<RValue<'gcc>, LValue<'gcc>>>,
+
+ /// Cache of constant strings,
+ pub const_str_cache: RefCell<FxHashMap<String, LValue<'gcc>>>,
+
+ /// Cache of globals.
+ pub globals: RefCell<FxHashMap<String, RValue<'gcc>>>,
+
+ /// A counter that is used for generating local symbol names
+ local_gen_sym_counter: Cell<usize>,
+
+ eh_personality: Cell<Option<RValue<'gcc>>>,
+
+ pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+
+ /// NOTE: a hack is used because the rustc API is not suitable to libgccjit and as such,
+ /// `const_undef()` returns struct as pointer so that they can later be assigned a value.
+ /// As such, this set remembers which of these pointers were returned by this function so that
+ /// they can be dereferenced later.
+ /// FIXME(antoyo): fix the rustc API to avoid having this hack.
+ pub structs_as_pointer: RefCell<FxHashSet<RValue<'gcc>>>,
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self {
+ let check_overflow = tcx.sess.overflow_checks();
+
+ let i8_type = context.new_c_type(CType::Int8t);
+ let i16_type = context.new_c_type(CType::Int16t);
+ let i32_type = context.new_c_type(CType::Int32t);
+ let i64_type = context.new_c_type(CType::Int64t);
+ let u8_type = context.new_c_type(CType::UInt8t);
+ let u16_type = context.new_c_type(CType::UInt16t);
+ let u32_type = context.new_c_type(CType::UInt32t);
+ let u64_type = context.new_c_type(CType::UInt64t);
+
+ let (i128_type, u128_type) =
+ if supports_128bit_integers {
+ let i128_type = context.new_c_type(CType::Int128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
+ let u128_type = context.new_c_type(CType::UInt128t).get_aligned(8); // TODO(antoyo): should the alignment be hard-coded?;
+ (i128_type, u128_type)
+ }
+ else {
+ let i128_type = context.new_array_type(None, i64_type, 2);
+ let u128_type = context.new_array_type(None, u64_type, 2);
+ (i128_type, u128_type)
+ };
+
+ let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
+
+ let float_type = context.new_type::<f32>();
+ let double_type = context.new_type::<f64>();
+
+ let char_type = context.new_c_type(CType::Char);
+ let uchar_type = context.new_c_type(CType::UChar);
+ let short_type = context.new_c_type(CType::Short);
+ let ushort_type = context.new_c_type(CType::UShort);
+ let int_type = context.new_c_type(CType::Int);
+ let uint_type = context.new_c_type(CType::UInt);
+ let long_type = context.new_c_type(CType::Long);
+ let ulong_type = context.new_c_type(CType::ULong);
+ let longlong_type = context.new_c_type(CType::LongLong);
+ let ulonglong_type = context.new_c_type(CType::ULongLong);
+ let sizet_type = context.new_c_type(CType::SizeT);
+
+ let isize_type = context.new_c_type(CType::LongLong);
+ let usize_type = context.new_c_type(CType::ULongLong);
+ let bool_type = context.new_type::<bool>();
+
+ // TODO(antoyo): only have those assertions on x86_64.
+ assert_eq!(isize_type.get_size(), i64_type.get_size());
+ assert_eq!(usize_type.get_size(), u64_type.get_size());
+
+ let mut functions = FxHashMap::default();
+ let builtins = [
+ "__builtin_unreachable", "abort", "__builtin_expect", "__builtin_add_overflow", "__builtin_mul_overflow",
+ "__builtin_saddll_overflow", /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
+ "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
+ "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
+ "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
+ "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
+ "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
+ "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
+ "__builtin_expect_with_probability",
+ ];
+
+ for builtin in builtins.iter() {
+ functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
+ }
+
+ Self {
+ check_overflow,
+ codegen_unit,
+ context,
+ current_func: RefCell::new(None),
+ normal_function_addresses: Default::default(),
+ functions: RefCell::new(functions),
+ intrinsics: RefCell::new(FxHashMap::default()),
+
+ tls_model,
+
+ bool_type,
+ i8_type,
+ i16_type,
+ i32_type,
+ i64_type,
+ i128_type,
+ isize_type,
+ usize_type,
+ u8_type,
+ u16_type,
+ u32_type,
+ u64_type,
+ u128_type,
+ char_type,
+ uchar_type,
+ short_type,
+ ushort_type,
+ int_type,
+ uint_type,
+ long_type,
+ ulong_type,
+ longlong_type,
+ ulonglong_type,
+ sizet_type,
+
+ supports_128bit_integers,
+
+ float_type,
+ double_type,
+
+ linkage: Cell::new(FunctionType::Internal),
+ instances: Default::default(),
+ function_instances: Default::default(),
+ on_stack_params: Default::default(),
+ on_stack_function_params: Default::default(),
+ vtables: Default::default(),
+ const_globals: Default::default(),
+ global_lvalues: Default::default(),
+ const_str_cache: Default::default(),
+ globals: Default::default(),
+ scalar_types: Default::default(),
+ types: Default::default(),
+ tcx,
+ struct_types: Default::default(),
+ types_with_fields_to_set: Default::default(),
+ local_gen_sym_counter: Cell::new(0),
+ eh_personality: Cell::new(None),
+ pointee_infos: Default::default(),
+ structs_as_pointer: Default::default(),
+ }
+ }
+
+ pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
+ let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
+ debug_assert!(self.functions.borrow().values().find(|value| **value == function).is_some(),
+ "{:?} ({:?}) is not a function", value, value.get_type());
+ function
+ }
+
+ pub fn is_native_int_type(&self, typ: Type<'gcc>) -> bool {
+ let types = [
+ self.u8_type,
+ self.u16_type,
+ self.u32_type,
+ self.u64_type,
+ self.i8_type,
+ self.i16_type,
+ self.i32_type,
+ self.i64_type,
+ ];
+
+ for native_type in types {
+ if native_type.is_compatible_with(typ) {
+ return true;
+ }
+ }
+
+ self.supports_128bit_integers &&
+ (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+ }
+
+ pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool {
+ !self.supports_128bit_integers &&
+ (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+ }
+
+ pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
+ self.is_native_int_type(typ) || typ.is_compatible_with(self.bool_type)
+ }
+
+ pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
+ self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ.is_compatible_with(self.bool_type)
+ }
+
+ pub fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ pub fn bitcast_if_needed(&self, value: RValue<'gcc>, expected_type: Type<'gcc>) -> RValue<'gcc> {
+ if value.get_type() != expected_type {
+ self.context.new_bitcast(None, value, expected_type)
+ }
+ else {
+ value
+ }
+ }
+}
+
+impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
+ type Value = RValue<'gcc>;
+ type Function = RValue<'gcc>;
+
+ type BasicBlock = Block<'gcc>;
+ type Type = Type<'gcc>;
+ type Funclet = (); // TODO(antoyo)
+
+ type DIScope = (); // TODO(antoyo)
+ type DILocation = (); // TODO(antoyo)
+ type DIVariable = (); // TODO(antoyo)
+}
+
+impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
+ &self.vtables
+ }
+
+ fn get_fn(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+ let func = get_fn(self, instance);
+ *self.current_func.borrow_mut() = Some(self.rvalue_as_function(func));
+ func
+ }
+
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
+ let func_name = self.tcx.symbol_name(instance).name;
+
+ let func =
+ if self.intrinsics.borrow().contains_key(func_name) {
+ self.intrinsics.borrow()[func_name].clone()
+ }
+ else {
+ let func = get_fn(self, instance);
+ self.rvalue_as_function(func)
+ };
+ let ptr = func.get_address(None);
+
+ // TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
+ // FIXME(antoyo): the rustc API seems to call get_fn_addr() when not needed (e.g. for FFI).
+
+ self.normal_function_addresses.borrow_mut().insert(ptr);
+
+ ptr
+ }
+
+ fn eh_personality(&self) -> RValue<'gcc> {
+ // The exception handling personality function.
+ //
+ // If our compilation unit has the `eh_personality` lang item somewhere
+ // within it, then we just need to codegen that. Otherwise, we're
+ // building an rlib which will depend on some upstream implementation of
+ // this function, so we just codegen a generic reference to it. We don't
+ // specify any of the types for the function, we just make it a symbol
+ // that LLVM can later use.
+ //
+ // Note that MSVC is a little special here in that we don't use the
+ // `eh_personality` lang item at all. Currently LLVM has support for
+ // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+ // *name of the personality function* to decide what kind of unwind side
+ // tables/landing pads to emit. It looks like Dwarf is used by default,
+ // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+ // an "exception", but for MSVC we want to force SEH. This means that we
+ // can't actually have the personality function be our standard
+ // `rust_eh_personality` function, but rather we wired it up to the
+ // CRT's custom personality function, which forces LLVM to consider
+ // landing pads as "landing pads for SEH".
+ if let Some(llpersonality) = self.eh_personality.get() {
+ return llpersonality;
+ }
+ let tcx = self.tcx;
+ let llfn = match tcx.lang_items().eh_personality() {
+ Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+ ty::Instance::resolve(
+ tcx,
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ tcx.intern_substs(&[]),
+ )
+ .unwrap().unwrap(),
+ ),
+ _ => {
+ let _name = if wants_msvc_seh(self.sess()) {
+ "__CxxFrameHandler3"
+ } else {
+ "rust_eh_personality"
+ };
+ //let func = self.declare_func(name, self.type_i32(), &[], true);
+ // FIXME(antoyo): this hack should not be needed. That will probably be removed when
+ // unwinding support is added.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+ };
+ // TODO(antoyo): apply target cpu attributes.
+ self.eh_personality.set(Some(llfn));
+ llfn
+ }
+
+ fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ fn check_overflow(&self) -> bool {
+ self.check_overflow
+ }
+
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+ self.codegen_unit
+ }
+
+ fn used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
+ unimplemented!();
+ }
+
+ fn set_frame_pointer_type(&self, _llfn: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn apply_target_cpu_attr(&self, _llfn: RValue<'gcc>) {
+ // TODO(antoyo)
+ }
+
+ fn create_used_variable(&self) {
+ unimplemented!();
+ }
+
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+ if self.get_declared_value("main").is_none() {
+ Some(self.declare_cfn("main", fn_type))
+ }
+ else {
+ // If the symbol already exists, it is an error: for example, the user wrote
+ // #[no_mangle] extern "C" fn main(..) {..}
+ // instead of #[start]
+ None
+ }
+ }
+
+ fn compiler_used_statics(&self) -> &RefCell<Vec<RValue<'gcc>>> {
+ unimplemented!()
+ }
+
+ fn create_compiler_used_variable(&self) {
+ unimplemented!()
+ }
+}
+
+impl<'gcc, 'tcx> HasTyCtxt<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'gcc, 'tcx> HasDataLayout for CodegenCx<'gcc, 'tcx> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'gcc, 'tcx> HasTargetSpec for CodegenCx<'gcc, 'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
+impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ if let LayoutError::SizeOverflow(_) = err {
+ self.sess().span_fatal(span, &err.to_string())
+ } else {
+ span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ }
+ }
+}
+
+impl<'gcc, 'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+ self.sess().span_fatal(span, &err.to_string())
+ } else {
+ match fn_abi_request {
+ FnAbiRequest::OfFnPtr { sig, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
+ sig,
+ extra_args,
+ err
+ );
+ }
+ FnAbiRequest::OfInstance { instance, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_instance({}, {:?})` failed: {}",
+ instance,
+ extra_args,
+ err
+ );
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx, 'gcc> HasParamEnv<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ ParamEnv::reveal_all()
+ }
+}
+
+impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
+ /// Generates a new symbol name with the given prefix. This symbol name must
+ /// only be used for definitions with `internal` or `private` linkage.
+ pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+ let idx = self.local_gen_sym_counter.get();
+ self.local_gen_sym_counter.set(idx + 1);
+ // Include a '.' character, so there can be no accidental conflicts with
+ // user defined names
+ let mut name = String::with_capacity(prefix.len() + 6);
+ name.push_str(prefix);
+ name.push_str(".");
+ base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+ name
+ }
+}
+
+fn to_gcc_tls_mode(tls_model: TlsModel) -> gccjit::TlsModel {
+ match tls_model {
+ TlsModel::GeneralDynamic => gccjit::TlsModel::GlobalDynamic,
+ TlsModel::LocalDynamic => gccjit::TlsModel::LocalDynamic,
+ TlsModel::InitialExec => gccjit::TlsModel::InitialExec,
+ TlsModel::LocalExec => gccjit::TlsModel::LocalExec,
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/coverageinfo.rs b/compiler/rustc_codegen_gcc/src/coverageinfo.rs
new file mode 100644
index 000000000..872fc2472
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/coverageinfo.rs
@@ -0,0 +1,69 @@
+use gccjit::RValue;
+use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::coverage::{
+ CodeRegion,
+ CounterValueReference,
+ ExpressionOperandId,
+ InjectedExpressionId,
+ Op,
+};
+use rustc_middle::ty::Instance;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn set_function_source_hash(
+ &mut self,
+ _instance: Instance<'tcx>,
+ _function_source_hash: u64,
+ ) -> bool {
+ unimplemented!();
+ }
+
+ fn add_coverage_counter(&mut self, _instance: Instance<'tcx>, _id: CounterValueReference, _region: CodeRegion) -> bool {
+ // TODO(antoyo)
+ false
+ }
+
+ fn add_coverage_counter_expression(&mut self, _instance: Instance<'tcx>, _id: InjectedExpressionId, _lhs: ExpressionOperandId, _op: Op, _rhs: ExpressionOperandId, _region: Option<CodeRegion>) -> bool {
+ // TODO(antoyo)
+ false
+ }
+
+ fn add_coverage_unreachable(&mut self, _instance: Instance<'tcx>, _region: CodeRegion) -> bool {
+ // TODO(antoyo)
+ false
+ }
+}
+
+impl<'gcc, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn coverageinfo_finalize(&self) {
+ // TODO(antoyo)
+ }
+
+ fn get_pgo_func_name_var(&self, _instance: Instance<'tcx>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ /// Functions with MIR-based coverage are normally codegenned _only_ if
+ /// called. LLVM coverage tools typically expect every function to be
+ /// defined (even if unused), with at least one call to LLVM intrinsic
+ /// `instrprof.increment`.
+ ///
+ /// Codegen a small function that will never be called, with one counter
+ /// that will never be incremented.
+ ///
+ /// For used/called functions, the coverageinfo was already added to the
+ /// `function_coverage_map` (keyed by function `Instance`) during codegen.
+ /// But in this case, since the unused function was _not_ previously
+ /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
+ /// them. The first `CodeRegion` is used to add a single counter, with the
+ /// same counter ID used in the injected `instrprof.increment` intrinsic
+ /// call. Since the function is never called, all other `CodeRegion`s can be
+ /// added as `unreachable_region`s.
+ fn define_unused_fn(&self, _def_id: DefId) {
+ unimplemented!();
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs
new file mode 100644
index 000000000..266759ed6
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs
@@ -0,0 +1,62 @@
+use gccjit::RValue;
+use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods};
+use rustc_middle::mir;
+use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty};
+use rustc_span::{SourceFile, Span, Symbol};
+use rustc_target::abi::Size;
+use rustc_target::abi::call::FnAbi;
+
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+
+impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(&mut self, _dbg_var: Self::DIVariable, _scope_metadata: Self::DIScope, _variable_alloca: Self::Value, _direct_offset: Size, _indirect_offsets: &[Size]) {
+ unimplemented!();
+ }
+
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
+ // TODO(antoyo): insert reference to gdb debug scripts section global.
+ }
+
+ fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
+ unimplemented!();
+ }
+
+ fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
+ unimplemented!();
+ }
+}
+
+impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn create_vtable_debuginfo(&self, _ty: Ty<'tcx>, _trait_ref: Option<PolyExistentialTraitRef<'tcx>>, _vtable: Self::Value) {
+ // TODO(antoyo)
+ }
+
+ fn create_function_debug_context(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>> {
+ // TODO(antoyo)
+ None
+ }
+
+ fn extend_scope_to_file(&self, _scope_metadata: Self::DIScope, _file: &SourceFile) -> Self::DIScope {
+ unimplemented!();
+ }
+
+ fn debuginfo_finalize(&self) {
+ // TODO(antoyo)
+ }
+
+ fn create_dbg_var(&self, _variable_name: Symbol, _variable_type: Ty<'tcx>, _scope_metadata: Self::DIScope, _variable_kind: VariableKind, _span: Span) -> Self::DIVariable {
+ unimplemented!();
+ }
+
+ fn dbg_scope_fn(&self, _instance: Instance<'tcx>, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _maybe_definition_llfn: Option<RValue<'gcc>>) -> Self::DIScope {
+ unimplemented!();
+ }
+
+ fn dbg_loc(&self, _scope: Self::DIScope, _inlined_at: Option<Self::DILocation>, _span: Span) -> Self::DILocation {
+ unimplemented!();
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs
new file mode 100644
index 000000000..a619e2f77
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/declare.rs
@@ -0,0 +1,145 @@
+use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
+use rustc_codegen_ssa::traits::BaseTypeMethods;
+use rustc_middle::ty::Ty;
+use rustc_span::Symbol;
+use rustc_target::abi::call::FnAbi;
+
+use crate::abi::FnAbiGccExt;
+use crate::context::CodegenCx;
+use crate::intrinsic::llvm;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+ if self.globals.borrow().contains_key(name) {
+ let typ = self.globals.borrow()[name].get_type();
+ let global = self.context.new_global(None, GlobalKind::Imported, typ, name);
+ if is_tls {
+ global.set_tls_model(self.tls_model);
+ }
+ if let Some(link_section) = link_section {
+ global.set_link_section(link_section.as_str());
+ }
+ global
+ }
+ else {
+ self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section)
+ }
+ }
+
+ pub fn declare_unnamed_global(&self, ty: Type<'gcc>) -> LValue<'gcc> {
+ let name = self.generate_local_symbol_name("global");
+ self.context.new_global(None, GlobalKind::Internal, ty, &name)
+ }
+
+ pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> {
+ let global = self.context.new_global(None, linkage, ty, name);
+ let global_address = global.get_address(None);
+ self.globals.borrow_mut().insert(name.to_string(), global_address);
+ global
+ }
+
+ /*pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> RValue<'gcc> {
+ self.linkage.set(FunctionType::Exported);
+ let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic);
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }*/
+
+ pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+ let global = self.context.new_global(None, global_kind, ty, name);
+ if is_tls {
+ global.set_tls_model(self.tls_model);
+ }
+ if let Some(link_section) = link_section {
+ global.set_link_section(link_section.as_str());
+ }
+ let global_address = global.get_address(None);
+ self.globals.borrow_mut().insert(name.to_string(), global_address);
+ global
+ }
+
+ pub fn declare_private_global(&self, name: &str, ty: Type<'gcc>) -> LValue<'gcc> {
+ let global = self.context.new_global(None, GlobalKind::Internal, ty, name);
+ let global_address = global.get_address(None);
+ self.globals.borrow_mut().insert(name.to_string(), global_address);
+ global
+ }
+
+ pub fn declare_cfn(&self, name: &str, _fn_type: Type<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): use the fn_type parameter.
+ let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
+ let return_type = self.type_i32();
+ let variadic = false;
+ self.linkage.set(FunctionType::Exported);
+ let func = declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, &[self.type_i32(), const_string], variadic);
+ // NOTE: it is needed to set the current_func here as well, because get_fn() is not called
+ // for the main function.
+ *self.current_func.borrow_mut() = Some(func);
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }
+
+ pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> RValue<'gcc> {
+ let (return_type, params, variadic, on_stack_param_indices) = fn_abi.gcc_type(self);
+ let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &params, variadic);
+ self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
+ // FIXME(antoyo): this is a wrong cast. That requires changing the compiler API.
+ unsafe { std::mem::transmute(func) }
+ }
+
+ pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+ self.get_or_insert_global(name, ty, is_tls, link_section)
+ }
+
+ pub fn get_declared_value(&self, name: &str) -> Option<RValue<'gcc>> {
+ // TODO(antoyo): use a different field than globals, because this seems to return a function?
+ self.globals.borrow().get(name).cloned()
+ }
+}
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
+ if name.starts_with("llvm.") {
+ let intrinsic = llvm::intrinsic(name, cx);
+ cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic);
+ return intrinsic;
+ }
+ let func =
+ if cx.functions.borrow().contains_key(name) {
+ cx.functions.borrow()[name]
+ }
+ else {
+ let params: Vec<_> = param_types.into_iter().enumerate()
+ .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
+ .collect();
+ let func = cx.context.new_function(None, cx.linkage.get(), return_type, &params, mangle_name(name), variadic);
+ cx.functions.borrow_mut().insert(name.to_string(), func);
+ func
+ };
+
+ // TODO(antoyo): set function calling convention.
+ // TODO(antoyo): set unnamed address.
+ // TODO(antoyo): set no red zone function attribute.
+ // TODO(antoyo): set attributes for optimisation.
+ // TODO(antoyo): set attributes for non lazy bind.
+
+ // FIXME(antoyo): invalid cast.
+ func
+}
+
+// FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
+// Unsupported characters: `$` and `.`.
+pub fn mangle_name(name: &str) -> String {
+ name.replace(|char: char| {
+ if !char.is_alphanumeric() && char != '_' {
+ debug_assert!("$.".contains(char), "Unsupported char in function name: {}", char);
+ true
+ }
+ else {
+ false
+ }
+ }, "_")
+}
diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs
new file mode 100644
index 000000000..0c5dab004
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/int.rs
@@ -0,0 +1,742 @@
+//! Module to handle integer operations.
+//! This module exists because some integer types are not supported on some gcc platforms, e.g.
+//! 128-bit integers on 32-bit platforms and thus require to be handled manually.
+
+use std::convert::TryFrom;
+
+use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
+use rustc_middle::ty::Ty;
+
+use crate::builder::ToGccComp;
+use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit unsigned %: __umodti3
+ self.multiplicative_operation(BinaryOp::Modulo, "mod", false, a, b)
+ }
+
+ pub fn gcc_srem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit signed %: __modti3
+ self.multiplicative_operation(BinaryOp::Modulo, "mod", true, a, b)
+ }
+
+ pub fn gcc_not(&self, a: RValue<'gcc>) -> RValue<'gcc> {
+ let typ = a.get_type();
+ if self.is_native_int_type_or_bool(typ) {
+ let operation =
+ if typ.is_bool() {
+ UnaryOp::LogicalNegate
+ }
+ else {
+ UnaryOp::BitwiseNegate
+ };
+ self.cx.context.new_unary_op(None, operation, typ, a)
+ }
+ else {
+ // TODO(antoyo): use __negdi2 and __negti2 instead?
+ let element_type = typ.dyncast_array().expect("element type");
+ let values = [
+ self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
+ self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
+ ];
+ self.cx.context.new_array_constructor(None, typ, &values)
+ }
+ }
+
+ pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ if self.is_native_int_type(a_type) {
+ self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+ }
+ else {
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a], "__negti2", false);
+ self.context.new_call(None, func, &[a])
+ }
+ }
+
+ pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
+ }
+
+ pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type(a_type);
+ let b_native = self.is_native_int_type(b_type);
+ if a_native && b_native {
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
+ // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
+ if a_type.is_signed(self) != b_type.is_signed(self) {
+ let b = self.context.new_cast(None, b, a_type);
+ a >> b
+ }
+ else {
+ a >> b
+ }
+ }
+ else if a_native && !b_native {
+ self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
+ }
+ else {
+ // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
+ // significant half of the number) which uses lshr.
+
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+
+ let func = self.current_func();
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ let b0_block = func.new_block("b0");
+ let actual_else_block = func.new_block("actual_else");
+
+ let result = func.new_local(None, a_type, "shiftResult");
+
+ let sixty_four = self.gcc_int(native_int_type, 64);
+ let sixty_three = self.gcc_int(native_int_type, 63);
+ let zero = self.gcc_zero(native_int_type);
+ let b = self.gcc_int_cast(b, native_int_type);
+ let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
+ self.llbb().end_with_conditional(None, condition, then_block, else_block);
+
+ // TODO(antoyo): take endianness into account.
+ let shift_value = self.gcc_sub(b, sixty_four);
+ let high = self.high(a);
+ let sign =
+ if a_type.is_signed(self) {
+ high >> sixty_three
+ }
+ else {
+ zero
+ };
+ let values = [
+ high >> shift_value,
+ sign,
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ then_block.add_assignment(None, result, array_value);
+ then_block.end_with_jump(None, after_block);
+
+ let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
+ else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+
+ b0_block.add_assignment(None, result, a);
+ b0_block.end_with_jump(None, after_block);
+
+ let shift_value = self.gcc_sub(sixty_four, b);
+ // NOTE: cast low to its unsigned type in order to perform a logical right shift.
+ let unsigned_type = native_int_type.to_unsigned(&self.cx);
+ let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
+ let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
+ let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
+ let values = [
+ (high << shift_value) | shifted_low,
+ high >> b,
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ actual_else_block.add_assignment(None, result, array_value);
+ actual_else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ result.to_rvalue()
+ }
+ }
+
+ fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ if a_type != b_type {
+ if a_type.is_vector() {
+ // Vector types need to be bitcast.
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ b = self.context.new_bitcast(None, b, a.get_type());
+ }
+ else {
+ b = self.context.new_cast(None, b, a.get_type());
+ }
+ }
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ let signed = a_type.is_compatible_with(self.i128_type);
+ let func_name =
+ match (operation, signed) {
+ (BinaryOp::Plus, true) => "__rust_i128_add",
+ (BinaryOp::Plus, false) => "__rust_u128_add",
+ (BinaryOp::Minus, true) => "__rust_i128_sub",
+ (BinaryOp::Minus, false) => "__rust_u128_sub",
+ _ => unreachable!("unexpected additive operation {:?}", operation),
+ };
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
+ self.context.new_call(None, func, &[a, b])
+ }
+ }
+
+ pub fn gcc_add(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.additive_operation(BinaryOp::Plus, a, b)
+ }
+
+ pub fn gcc_mul(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.multiplicative_operation(BinaryOp::Mult, "mul", true, a, b)
+ }
+
+ pub fn gcc_sub(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.additive_operation(BinaryOp::Minus, a, b)
+ }
+
+ fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "u"
+ };
+ let func_name = format!("__{}{}ti3", sign, operation_name);
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
+ self.context.new_call(None, func, &[a, b])
+ }
+ }
+
+ pub fn gcc_sdiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): check if the types are signed?
+ // 128-bit, signed: __divti3
+ // TODO(antoyo): convert the arguments to signed?
+ self.multiplicative_operation(BinaryOp::Divide, "div", true, a, b)
+ }
+
+ pub fn gcc_udiv(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ // 128-bit, unsigned: __udivti3
+ self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
+ }
+
+ pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
+ use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
+
+ let new_kind =
+ match typ.kind() {
+ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+ t @ (Uint(_) | Int(_)) => t.clone(),
+ _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+ };
+
+ // TODO(antoyo): remove duplication with intrinsic?
+ let name =
+ if self.is_native_int_type(lhs.get_type()) {
+ match oop {
+ OverflowOp::Add =>
+ match new_kind {
+ Int(I8) => "__builtin_add_overflow",
+ Int(I16) => "__builtin_add_overflow",
+ Int(I32) => "__builtin_sadd_overflow",
+ Int(I64) => "__builtin_saddll_overflow",
+ Int(I128) => "__builtin_add_overflow",
+
+ Uint(U8) => "__builtin_add_overflow",
+ Uint(U16) => "__builtin_add_overflow",
+ Uint(U32) => "__builtin_uadd_overflow",
+ Uint(U64) => "__builtin_uaddll_overflow",
+ Uint(U128) => "__builtin_add_overflow",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub =>
+ match new_kind {
+ Int(I8) => "__builtin_sub_overflow",
+ Int(I16) => "__builtin_sub_overflow",
+ Int(I32) => "__builtin_ssub_overflow",
+ Int(I64) => "__builtin_ssubll_overflow",
+ Int(I128) => "__builtin_sub_overflow",
+
+ Uint(U8) => "__builtin_sub_overflow",
+ Uint(U16) => "__builtin_sub_overflow",
+ Uint(U32) => "__builtin_usub_overflow",
+ Uint(U64) => "__builtin_usubll_overflow",
+ Uint(U128) => "__builtin_sub_overflow",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I8) => "__builtin_mul_overflow",
+ Int(I16) => "__builtin_mul_overflow",
+ Int(I32) => "__builtin_smul_overflow",
+ Int(I64) => "__builtin_smulll_overflow",
+ Int(I128) => "__builtin_mul_overflow",
+
+ Uint(U8) => "__builtin_mul_overflow",
+ Uint(U16) => "__builtin_mul_overflow",
+ Uint(U32) => "__builtin_umul_overflow",
+ Uint(U64) => "__builtin_umulll_overflow",
+ Uint(U128) => "__builtin_mul_overflow",
+
+ _ => unreachable!(),
+ },
+ }
+ }
+ else {
+ match new_kind {
+ Int(I128) | Uint(U128) => {
+ let func_name =
+ match oop {
+ OverflowOp::Add =>
+ match new_kind {
+ Int(I128) => "__rust_i128_addo",
+ Uint(U128) => "__rust_u128_addo",
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub =>
+ match new_kind {
+ Int(I128) => "__rust_i128_subo",
+ Uint(U128) => "__rust_u128_subo",
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
+ Uint(U128) => "__rust_u128_mulo",
+ _ => unreachable!(),
+ },
+ };
+ let a_type = lhs.get_type();
+ let b_type = rhs.get_type();
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let result_field = self.context.new_field(None, a_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ let result = self.context.new_call(None, func, &[lhs, rhs]);
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ return (int_result, overflow);
+ },
+ _ => {
+ match oop {
+ OverflowOp::Mul =>
+ match new_kind {
+ Int(I32) => "__mulosi4",
+ Int(I64) => "__mulodi4",
+ _ => unreachable!(),
+ },
+ _ => unimplemented!("overflow operation for {:?}", new_kind),
+ }
+ }
+ }
+ };
+
+ let intrinsic = self.context.get_builtin_function(&name);
+ let res = self.current_func()
+ // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
+ .new_local(None, rhs.get_type(), "binopResult")
+ .get_address(None);
+ let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
+ (res.dereference(None).to_rvalue(), overflow)
+ }
+
+ pub fn gcc_icmp(&self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = lhs.get_type();
+ let b_type = rhs.get_type();
+ if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
+ let signed = a_type.is_compatible_with(self.i128_type);
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "u"
+ };
+ let func_name = format!("__{}cmpti2", sign);
+ let param_a = self.context.new_parameter(None, a_type, "a");
+ let param_b = self.context.new_parameter(None, b_type, "b");
+ let func = self.context.new_function(None, FunctionType::Extern, self.int_type, &[param_a, param_b], func_name, false);
+ let cmp = self.context.new_call(None, func, &[lhs, rhs]);
+ let (op, limit) =
+ match op {
+ IntPredicate::IntEQ => {
+ return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
+ },
+ IntPredicate::IntNE => {
+ return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
+ },
+ IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
+ IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
+ IntPredicate::IntULT => (ComparisonOp::Equals, 0),
+ IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
+ IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
+ IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
+ IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
+ IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
+ };
+ self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
+ }
+ else {
+ let left_type = lhs.get_type();
+ let right_type = rhs.get_type();
+ if left_type != right_type {
+ // NOTE: because libgccjit cannot compare function pointers.
+ if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
+ lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
+ rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
+ }
+ // NOTE: hack because we try to cast a vector type to the same vector type.
+ else if format!("{:?}", left_type) != format!("{:?}", right_type) {
+ rhs = self.context.new_cast(None, rhs, left_type);
+ }
+ }
+ self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+ }
+ }
+
+ pub fn gcc_xor(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+ a ^ b
+ }
+ else {
+ let values = [
+ self.low(a) ^ self.low(b),
+ self.high(a) ^ self.high(b),
+ ];
+ self.context.new_array_constructor(None, a_type, &values)
+ }
+ }
+
+ pub fn gcc_shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type(a_type);
+ let b_native = self.is_native_int_type(b_type);
+ if a_native && b_native {
+ // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
+ if a_type.is_unsigned(self) && b_type.is_signed(self) {
+ let a = self.context.new_cast(None, a, b_type);
+ let result = a << b;
+ self.context.new_cast(None, result, a_type)
+ }
+ else if a_type.is_signed(self) && b_type.is_unsigned(self) {
+ let b = self.context.new_cast(None, b, a_type);
+ a << b
+ }
+ else {
+ a << b
+ }
+ }
+ else if a_native && !b_native {
+ self.gcc_shl(a, self.gcc_int_cast(b, a_type))
+ }
+ else {
+ // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+
+ let func = self.current_func();
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+ let b0_block = func.new_block("b0");
+ let actual_else_block = func.new_block("actual_else");
+
+ let result = func.new_local(None, a_type, "shiftResult");
+
+ let b = self.gcc_int_cast(b, native_int_type);
+ let sixty_four = self.gcc_int(native_int_type, 64);
+ let zero = self.gcc_zero(native_int_type);
+ let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
+ self.llbb().end_with_conditional(None, condition, then_block, else_block);
+
+ // TODO(antoyo): take endianness into account.
+ let values = [
+ zero,
+ self.low(a) << (b - sixty_four),
+ ];
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ then_block.add_assignment(None, result, array_value);
+ then_block.end_with_jump(None, after_block);
+
+ let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
+ else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+
+ b0_block.add_assignment(None, result, a);
+ b0_block.end_with_jump(None, after_block);
+
+ // NOTE: cast low to its unsigned type in order to perform a logical right shift.
+ let unsigned_type = native_int_type.to_unsigned(&self.cx);
+ let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
+ let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
+ let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
+ let values = [
+ self.low(a) << b,
+ (self.high(a) << b) | high_low,
+ ];
+
+ let array_value = self.context.new_array_constructor(None, a_type, &values);
+ actual_else_block.add_assignment(None, result, array_value);
+ actual_else_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not expect, the current block in the
+ // state need to be updated.
+ self.switch_to_block(after_block);
+
+ result.to_rvalue()
+ }
+ }
+
+ pub fn gcc_bswap(&mut self, mut arg: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let arg_type = arg.get_type();
+ if !self.is_native_int_type(arg_type) {
+ let native_int_type = arg_type.dyncast_array().expect("get element type");
+ let lsb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 0)).to_rvalue();
+ let swapped_lsb = self.gcc_bswap(lsb, width / 2);
+ let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
+ let msb = self.context.new_array_access(None, arg, self.context.new_rvalue_from_int(self.int_type, 1)).to_rvalue();
+ let swapped_msb = self.gcc_bswap(msb, width / 2);
+ let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
+
+ // NOTE: we also need to swap the two elements here, in addition to swapping inside
+ // the elements themselves like done above.
+ return self.context.new_array_constructor(None, arg_type, &[swapped_msb, swapped_lsb]);
+ }
+
+ // TODO(antoyo): check if it's faster to use string literals and a
+ // match instead of format!.
+ let bswap = self.cx.context.get_builtin_function(&format!("__builtin_bswap{}", width));
+ // FIXME(antoyo): this cast should not be necessary. Remove
+ // when having proper sized integer types.
+ let param_type = bswap.get_param(0).to_rvalue().get_type();
+ if param_type != arg_type {
+ arg = self.bitcast(arg, param_type);
+ }
+ self.cx.context.new_call(None, bswap, &[arg])
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
+ }
+ else {
+ // NOTE: set the sign in high.
+ self.from_low_high(typ, int, -(int.is_negative() as i64))
+ }
+ }
+
+ pub fn gcc_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
+ }
+ else {
+ self.from_low_high(typ, int as i64, 0)
+ }
+ }
+
+ pub fn gcc_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
+ let low = num as u64;
+ let high = (num >> 64) as u64;
+ if num >> 64 != 0 {
+ // FIXME(antoyo): use a new function new_rvalue_from_unsigned_long()?
+ if self.is_native_int_type(typ) {
+ let low = self.context.new_rvalue_from_long(self.u64_type, low as i64);
+ let high = self.context.new_rvalue_from_long(typ, high as i64);
+
+ let sixty_four = self.context.new_rvalue_from_long(typ, 64);
+ let shift = high << sixty_four;
+ shift | self.context.new_cast(None, low, typ)
+ }
+ else {
+ self.from_low_high(typ, low as i64, high as i64)
+ }
+ }
+ else if typ.is_i128(self) {
+ let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
+ self.gcc_int_cast(num, typ)
+ }
+ else {
+ self.gcc_uint(typ, num as u64)
+ }
+ }
+
+ pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
+ if self.is_native_int_type_or_bool(typ) {
+ self.context.new_rvalue_zero(typ)
+ }
+ else {
+ self.from_low_high(typ, 0, 0)
+ }
+ }
+
+ pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
+ if self.is_native_int_type_or_bool(typ) {
+ typ.get_size() as u64 * 8
+ }
+ else {
+ // NOTE: the only unsupported types are u128 and i128.
+ 128
+ }
+ }
+
+ fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+ let a_type = a.get_type();
+ let b_type = b.get_type();
+ let a_native = self.is_native_int_type_or_bool(a_type);
+ let b_native = self.is_native_int_type_or_bool(b_type);
+ if a_type.is_vector() && b_type.is_vector() {
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else if a_native && b_native {
+ if a_type != b_type {
+ b = self.context.new_cast(None, b, a_type);
+ }
+ self.context.new_binary_op(None, operation, a_type, a, b)
+ }
+ else {
+ assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
+ let native_int_type = a_type.dyncast_array().expect("get element type");
+ let values = [
+ self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
+ self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
+ ];
+ self.context.new_array_constructor(None, a_type, &values)
+ }
+ }
+
+ pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+ self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
+ }
+
+ // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
+ pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) {
+ self.context.new_cast(None, value, dest_typ)
+ }
+ else if self.is_native_int_type_or_bool(dest_typ) {
+ self.context.new_cast(None, self.low(value), dest_typ)
+ }
+ else if self.is_native_int_type_or_bool(value_type) {
+ let dest_element_type = dest_typ.dyncast_array().expect("get element type");
+
+ // NOTE: set the sign of the value.
+ let zero = self.context.new_rvalue_zero(value_type);
+ let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
+ let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
+ let values = [
+ self.context.new_cast(None, value, dest_element_type),
+ self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
+ ];
+ self.context.new_array_constructor(None, dest_typ, &values)
+ }
+ else {
+ // Since u128 and i128 are the only types that can be unsupported, we know the type of
+ // value and the destination type have the same size, so a bitcast is fine.
+
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ self.context.new_bitcast(None, value, dest_typ)
+ }
+ }
+
+ fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(value_type) {
+ return self.context.new_cast(None, value, dest_typ);
+ }
+
+ let name_suffix =
+ match self.type_kind(dest_typ) {
+ TypeKind::Float => "tisf",
+ TypeKind::Double => "tidf",
+ kind => panic!("cannot cast a non-native integer to type {:?}", kind),
+ };
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "un"
+ };
+ let func_name = format!("__float{}{}", sign, name_suffix);
+ let param = self.context.new_parameter(None, value_type, "n");
+ let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
+ self.context.new_call(None, func, &[value])
+ }
+
+ pub fn gcc_int_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.int_to_float_cast(true, value, dest_typ)
+ }
+
+ pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.int_to_float_cast(false, value, dest_typ)
+ }
+
+ fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ let value_type = value.get_type();
+ if self.is_native_int_type_or_bool(dest_typ) {
+ return self.context.new_cast(None, value, dest_typ);
+ }
+
+ let name_suffix =
+ match self.type_kind(value_type) {
+ TypeKind::Float => "sfti",
+ TypeKind::Double => "dfti",
+ kind => panic!("cannot cast a {:?} to non-native integer", kind),
+ };
+ let sign =
+ if signed {
+ ""
+ }
+ else {
+ "uns"
+ };
+ let func_name = format!("__fix{}{}", sign, name_suffix);
+ let param = self.context.new_parameter(None, value_type, "n");
+ let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
+ self.context.new_call(None, func, &[value])
+ }
+
+ pub fn gcc_float_to_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.float_to_int_cast(true, value, dest_typ)
+ }
+
+ pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+ self.float_to_int_cast(false, value, dest_typ)
+ }
+
+ fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 1))
+ .to_rvalue()
+ }
+
+ fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
+ self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, 0))
+ .to_rvalue()
+ }
+
+ fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
+ let native_int_type = typ.dyncast_array().expect("get element type");
+ let values = [
+ self.context.new_rvalue_from_long(native_int_type, low),
+ self.context.new_rvalue_from_long(native_int_type, high),
+ ];
+ self.context.new_array_constructor(None, typ, &values)
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
new file mode 100644
index 000000000..fb6c38fa0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
@@ -0,0 +1,5722 @@
+// File generated by `rustc_codegen_gcc/tools/generate_intrinsics.py`
+// DO NOT EDIT IT!
+match name {
+ // AMDGPU
+ "llvm.AMDGPU.div.fixup.f32" => "__builtin_amdgpu_div_fixup",
+ "llvm.AMDGPU.div.fixup.f64" => "__builtin_amdgpu_div_fixup",
+ "llvm.AMDGPU.div.fixup.v2f64" => "__builtin_amdgpu_div_fixup",
+ "llvm.AMDGPU.div.fixup.v4f32" => "__builtin_amdgpu_div_fixup",
+ "llvm.AMDGPU.div.fmas.f32" => "__builtin_amdgpu_div_fmas",
+ "llvm.AMDGPU.div.fmas.f64" => "__builtin_amdgpu_div_fmas",
+ "llvm.AMDGPU.div.fmas.v2f64" => "__builtin_amdgpu_div_fmas",
+ "llvm.AMDGPU.div.fmas.v4f32" => "__builtin_amdgpu_div_fmas",
+ "llvm.AMDGPU.ldexp.f32" => "__builtin_amdgpu_ldexp",
+ "llvm.AMDGPU.ldexp.f64" => "__builtin_amdgpu_ldexp",
+ "llvm.AMDGPU.ldexp.v2f64" => "__builtin_amdgpu_ldexp",
+ "llvm.AMDGPU.ldexp.v4f32" => "__builtin_amdgpu_ldexp",
+ "llvm.AMDGPU.rcp.f32" => "__builtin_amdgpu_rcp",
+ "llvm.AMDGPU.rcp.f64" => "__builtin_amdgpu_rcp",
+ "llvm.AMDGPU.rcp.v2f64" => "__builtin_amdgpu_rcp",
+ "llvm.AMDGPU.rcp.v4f32" => "__builtin_amdgpu_rcp",
+ "llvm.AMDGPU.rsq.clamped.f32" => "__builtin_amdgpu_rsq_clamped",
+ "llvm.AMDGPU.rsq.clamped.f64" => "__builtin_amdgpu_rsq_clamped",
+ "llvm.AMDGPU.rsq.clamped.v2f64" => "__builtin_amdgpu_rsq_clamped",
+ "llvm.AMDGPU.rsq.clamped.v4f32" => "__builtin_amdgpu_rsq_clamped",
+ "llvm.AMDGPU.rsq.f32" => "__builtin_amdgpu_rsq",
+ "llvm.AMDGPU.rsq.f64" => "__builtin_amdgpu_rsq",
+ "llvm.AMDGPU.rsq.v2f64" => "__builtin_amdgpu_rsq",
+ "llvm.AMDGPU.rsq.v4f32" => "__builtin_amdgpu_rsq",
+ "llvm.AMDGPU.trig.preop.f32" => "__builtin_amdgpu_trig_preop",
+ "llvm.AMDGPU.trig.preop.f64" => "__builtin_amdgpu_trig_preop",
+ "llvm.AMDGPU.trig.preop.v2f64" => "__builtin_amdgpu_trig_preop",
+ "llvm.AMDGPU.trig.preop.v4f32" => "__builtin_amdgpu_trig_preop",
+ // aarch64
+ "llvm.aarch64.dmb" => "__builtin_arm_dmb",
+ "llvm.aarch64.dsb" => "__builtin_arm_dsb",
+ "llvm.aarch64.isb" => "__builtin_arm_isb",
+ "llvm.aarch64.sve.aesd" => "__builtin_sve_svaesd_u8",
+ "llvm.aarch64.sve.aese" => "__builtin_sve_svaese_u8",
+ "llvm.aarch64.sve.aesimc" => "__builtin_sve_svaesimc_u8",
+ "llvm.aarch64.sve.aesmc" => "__builtin_sve_svaesmc_u8",
+ "llvm.aarch64.sve.rax1" => "__builtin_sve_svrax1_u64",
+ "llvm.aarch64.sve.rdffr" => "__builtin_sve_svrdffr",
+ "llvm.aarch64.sve.rdffr.z" => "__builtin_sve_svrdffr_z",
+ "llvm.aarch64.sve.setffr" => "__builtin_sve_svsetffr",
+ "llvm.aarch64.sve.sm4e" => "__builtin_sve_svsm4e_u32",
+ "llvm.aarch64.sve.sm4ekey" => "__builtin_sve_svsm4ekey_u32",
+ "llvm.aarch64.sve.wrffr" => "__builtin_sve_svwrffr",
+ "llvm.aarch64.tcancel" => "__builtin_arm_tcancel",
+ "llvm.aarch64.tcommit" => "__builtin_arm_tcommit",
+ "llvm.aarch64.tstart" => "__builtin_arm_tstart",
+ "llvm.aarch64.ttest" => "__builtin_arm_ttest",
+ // amdgcn
+ "llvm.amdgcn.alignbyte" => "__builtin_amdgcn_alignbyte",
+ "llvm.amdgcn.buffer.wbinvl1" => "__builtin_amdgcn_buffer_wbinvl1",
+ "llvm.amdgcn.buffer.wbinvl1.sc" => "__builtin_amdgcn_buffer_wbinvl1_sc",
+ "llvm.amdgcn.buffer.wbinvl1.vol" => "__builtin_amdgcn_buffer_wbinvl1_vol",
+ "llvm.amdgcn.cubeid" => "__builtin_amdgcn_cubeid",
+ "llvm.amdgcn.cubema" => "__builtin_amdgcn_cubema",
+ "llvm.amdgcn.cubesc" => "__builtin_amdgcn_cubesc",
+ "llvm.amdgcn.cubetc" => "__builtin_amdgcn_cubetc",
+ "llvm.amdgcn.cvt.pk.i16" => "__builtin_amdgcn_cvt_pk_i16",
+ "llvm.amdgcn.cvt.pk.u16" => "__builtin_amdgcn_cvt_pk_u16",
+ "llvm.amdgcn.cvt.pk.u8.f32" => "__builtin_amdgcn_cvt_pk_u8_f32",
+ "llvm.amdgcn.cvt.pknorm.i16" => "__builtin_amdgcn_cvt_pknorm_i16",
+ "llvm.amdgcn.cvt.pknorm.u16" => "__builtin_amdgcn_cvt_pknorm_u16",
+ "llvm.amdgcn.cvt.pkrtz" => "__builtin_amdgcn_cvt_pkrtz",
+ "llvm.amdgcn.dispatch.id" => "__builtin_amdgcn_dispatch_id",
+ "llvm.amdgcn.ds.bpermute" => "__builtin_amdgcn_ds_bpermute",
+ "llvm.amdgcn.ds.fadd.v2bf16" => "__builtin_amdgcn_ds_atomic_fadd_v2bf16",
+ "llvm.amdgcn.ds.gws.barrier" => "__builtin_amdgcn_ds_gws_barrier",
+ "llvm.amdgcn.ds.gws.init" => "__builtin_amdgcn_ds_gws_init",
+ "llvm.amdgcn.ds.gws.sema.br" => "__builtin_amdgcn_ds_gws_sema_br",
+ "llvm.amdgcn.ds.gws.sema.p" => "__builtin_amdgcn_ds_gws_sema_p",
+ "llvm.amdgcn.ds.gws.sema.release.all" => "__builtin_amdgcn_ds_gws_sema_release_all",
+ "llvm.amdgcn.ds.gws.sema.v" => "__builtin_amdgcn_ds_gws_sema_v",
+ "llvm.amdgcn.ds.permute" => "__builtin_amdgcn_ds_permute",
+ "llvm.amdgcn.ds.swizzle" => "__builtin_amdgcn_ds_swizzle",
+ "llvm.amdgcn.endpgm" => "__builtin_amdgcn_endpgm",
+ "llvm.amdgcn.fdot2" => "__builtin_amdgcn_fdot2",
+ "llvm.amdgcn.fmed3" => "__builtin_amdgcn_fmed3",
+ "llvm.amdgcn.fmul.legacy" => "__builtin_amdgcn_fmul_legacy",
+ "llvm.amdgcn.groupstaticsize" => "__builtin_amdgcn_groupstaticsize",
+ "llvm.amdgcn.implicit.buffer.ptr" => "__builtin_amdgcn_implicit_buffer_ptr",
+ "llvm.amdgcn.implicitarg.ptr" => "__builtin_amdgcn_implicitarg_ptr",
+ "llvm.amdgcn.interp.mov" => "__builtin_amdgcn_interp_mov",
+ "llvm.amdgcn.interp.p1" => "__builtin_amdgcn_interp_p1",
+ "llvm.amdgcn.interp.p1.f16" => "__builtin_amdgcn_interp_p1_f16",
+ "llvm.amdgcn.interp.p2" => "__builtin_amdgcn_interp_p2",
+ "llvm.amdgcn.interp.p2.f16" => "__builtin_amdgcn_interp_p2_f16",
+ "llvm.amdgcn.is.private" => "__builtin_amdgcn_is_private",
+ "llvm.amdgcn.is.shared" => "__builtin_amdgcn_is_shared",
+ "llvm.amdgcn.kernarg.segment.ptr" => "__builtin_amdgcn_kernarg_segment_ptr",
+ "llvm.amdgcn.lerp" => "__builtin_amdgcn_lerp",
+ "llvm.amdgcn.mbcnt.hi" => "__builtin_amdgcn_mbcnt_hi",
+ "llvm.amdgcn.mbcnt.lo" => "__builtin_amdgcn_mbcnt_lo",
+ "llvm.amdgcn.mqsad.pk.u16.u8" => "__builtin_amdgcn_mqsad_pk_u16_u8",
+ "llvm.amdgcn.mqsad.u32.u8" => "__builtin_amdgcn_mqsad_u32_u8",
+ "llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8",
+ "llvm.amdgcn.perm" => "__builtin_amdgcn_perm",
+ "llvm.amdgcn.permlane16" => "__builtin_amdgcn_permlane16",
+ "llvm.amdgcn.permlanex16" => "__builtin_amdgcn_permlanex16",
+ "llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8",
+ "llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr",
+ "llvm.amdgcn.rcp.legacy" => "__builtin_amdgcn_rcp_legacy",
+ "llvm.amdgcn.readfirstlane" => "__builtin_amdgcn_readfirstlane",
+ "llvm.amdgcn.readlane" => "__builtin_amdgcn_readlane",
+ "llvm.amdgcn.rsq.legacy" => "__builtin_amdgcn_rsq_legacy",
+ "llvm.amdgcn.s.barrier" => "__builtin_amdgcn_s_barrier",
+ "llvm.amdgcn.s.dcache.inv" => "__builtin_amdgcn_s_dcache_inv",
+ "llvm.amdgcn.s.dcache.inv.vol" => "__builtin_amdgcn_s_dcache_inv_vol",
+ "llvm.amdgcn.s.dcache.wb" => "__builtin_amdgcn_s_dcache_wb",
+ "llvm.amdgcn.s.dcache.wb.vol" => "__builtin_amdgcn_s_dcache_wb_vol",
+ "llvm.amdgcn.s.decperflevel" => "__builtin_amdgcn_s_decperflevel",
+ "llvm.amdgcn.s.get.waveid.in.workgroup" => "__builtin_amdgcn_s_get_waveid_in_workgroup",
+ "llvm.amdgcn.s.getpc" => "__builtin_amdgcn_s_getpc",
+ "llvm.amdgcn.s.getreg" => "__builtin_amdgcn_s_getreg",
+ "llvm.amdgcn.s.incperflevel" => "__builtin_amdgcn_s_incperflevel",
+ "llvm.amdgcn.s.memrealtime" => "__builtin_amdgcn_s_memrealtime",
+ "llvm.amdgcn.s.memtime" => "__builtin_amdgcn_s_memtime",
+ "llvm.amdgcn.s.sendmsg" => "__builtin_amdgcn_s_sendmsg",
+ "llvm.amdgcn.s.sendmsghalt" => "__builtin_amdgcn_s_sendmsghalt",
+ "llvm.amdgcn.s.setprio" => "__builtin_amdgcn_s_setprio",
+ "llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg",
+ "llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep",
+ "llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt",
+ "llvm.amdgcn.sad.hi.u8" => "__builtin_amdgcn_sad_hi_u8",
+ "llvm.amdgcn.sad.u16" => "__builtin_amdgcn_sad_u16",
+ "llvm.amdgcn.sad.u8" => "__builtin_amdgcn_sad_u8",
+ "llvm.amdgcn.sched.barrier" => "__builtin_amdgcn_sched_barrier",
+ "llvm.amdgcn.sdot2" => "__builtin_amdgcn_sdot2",
+ "llvm.amdgcn.sdot4" => "__builtin_amdgcn_sdot4",
+ "llvm.amdgcn.sdot8" => "__builtin_amdgcn_sdot8",
+ "llvm.amdgcn.udot2" => "__builtin_amdgcn_udot2",
+ "llvm.amdgcn.udot4" => "__builtin_amdgcn_udot4",
+ "llvm.amdgcn.udot8" => "__builtin_amdgcn_udot8",
+ "llvm.amdgcn.wave.barrier" => "__builtin_amdgcn_wave_barrier",
+ "llvm.amdgcn.wavefrontsize" => "__builtin_amdgcn_wavefrontsize",
+ "llvm.amdgcn.writelane" => "__builtin_amdgcn_writelane",
+ // arm
+ "llvm.arm.cdp" => "__builtin_arm_cdp",
+ "llvm.arm.cdp2" => "__builtin_arm_cdp2",
+ "llvm.arm.cmse.tt" => "__builtin_arm_cmse_TT",
+ "llvm.arm.cmse.tta" => "__builtin_arm_cmse_TTA",
+ "llvm.arm.cmse.ttat" => "__builtin_arm_cmse_TTAT",
+ "llvm.arm.cmse.ttt" => "__builtin_arm_cmse_TTT",
+ "llvm.arm.dmb" => "__builtin_arm_dmb",
+ "llvm.arm.dsb" => "__builtin_arm_dsb",
+ "llvm.arm.get.fpscr" => "__builtin_arm_get_fpscr",
+ "llvm.arm.isb" => "__builtin_arm_isb",
+ "llvm.arm.ldc" => "__builtin_arm_ldc",
+ "llvm.arm.ldc2" => "__builtin_arm_ldc2",
+ "llvm.arm.ldc2l" => "__builtin_arm_ldc2l",
+ "llvm.arm.ldcl" => "__builtin_arm_ldcl",
+ "llvm.arm.mcr" => "__builtin_arm_mcr",
+ "llvm.arm.mcr2" => "__builtin_arm_mcr2",
+ "llvm.arm.mcrr" => "__builtin_arm_mcrr",
+ "llvm.arm.mcrr2" => "__builtin_arm_mcrr2",
+ "llvm.arm.mrc" => "__builtin_arm_mrc",
+ "llvm.arm.mrc2" => "__builtin_arm_mrc2",
+ "llvm.arm.qadd" => "__builtin_arm_qadd",
+ "llvm.arm.qadd16" => "__builtin_arm_qadd16",
+ "llvm.arm.qadd8" => "__builtin_arm_qadd8",
+ "llvm.arm.qasx" => "__builtin_arm_qasx",
+ "llvm.arm.qsax" => "__builtin_arm_qsax",
+ "llvm.arm.qsub" => "__builtin_arm_qsub",
+ "llvm.arm.qsub16" => "__builtin_arm_qsub16",
+ "llvm.arm.qsub8" => "__builtin_arm_qsub8",
+ "llvm.arm.sadd16" => "__builtin_arm_sadd16",
+ "llvm.arm.sadd8" => "__builtin_arm_sadd8",
+ "llvm.arm.sasx" => "__builtin_arm_sasx",
+ "llvm.arm.sel" => "__builtin_arm_sel",
+ "llvm.arm.set.fpscr" => "__builtin_arm_set_fpscr",
+ "llvm.arm.shadd16" => "__builtin_arm_shadd16",
+ "llvm.arm.shadd8" => "__builtin_arm_shadd8",
+ "llvm.arm.shasx" => "__builtin_arm_shasx",
+ "llvm.arm.shsax" => "__builtin_arm_shsax",
+ "llvm.arm.shsub16" => "__builtin_arm_shsub16",
+ "llvm.arm.shsub8" => "__builtin_arm_shsub8",
+ "llvm.arm.smlabb" => "__builtin_arm_smlabb",
+ "llvm.arm.smlabt" => "__builtin_arm_smlabt",
+ "llvm.arm.smlad" => "__builtin_arm_smlad",
+ "llvm.arm.smladx" => "__builtin_arm_smladx",
+ "llvm.arm.smlald" => "__builtin_arm_smlald",
+ "llvm.arm.smlaldx" => "__builtin_arm_smlaldx",
+ "llvm.arm.smlatb" => "__builtin_arm_smlatb",
+ "llvm.arm.smlatt" => "__builtin_arm_smlatt",
+ "llvm.arm.smlawb" => "__builtin_arm_smlawb",
+ "llvm.arm.smlawt" => "__builtin_arm_smlawt",
+ "llvm.arm.smlsd" => "__builtin_arm_smlsd",
+ "llvm.arm.smlsdx" => "__builtin_arm_smlsdx",
+ "llvm.arm.smlsld" => "__builtin_arm_smlsld",
+ "llvm.arm.smlsldx" => "__builtin_arm_smlsldx",
+ "llvm.arm.smuad" => "__builtin_arm_smuad",
+ "llvm.arm.smuadx" => "__builtin_arm_smuadx",
+ "llvm.arm.smulbb" => "__builtin_arm_smulbb",
+ "llvm.arm.smulbt" => "__builtin_arm_smulbt",
+ "llvm.arm.smultb" => "__builtin_arm_smultb",
+ "llvm.arm.smultt" => "__builtin_arm_smultt",
+ "llvm.arm.smulwb" => "__builtin_arm_smulwb",
+ "llvm.arm.smulwt" => "__builtin_arm_smulwt",
+ "llvm.arm.smusd" => "__builtin_arm_smusd",
+ "llvm.arm.smusdx" => "__builtin_arm_smusdx",
+ "llvm.arm.ssat" => "__builtin_arm_ssat",
+ "llvm.arm.ssat16" => "__builtin_arm_ssat16",
+ "llvm.arm.ssax" => "__builtin_arm_ssax",
+ "llvm.arm.ssub16" => "__builtin_arm_ssub16",
+ "llvm.arm.ssub8" => "__builtin_arm_ssub8",
+ "llvm.arm.stc" => "__builtin_arm_stc",
+ "llvm.arm.stc2" => "__builtin_arm_stc2",
+ "llvm.arm.stc2l" => "__builtin_arm_stc2l",
+ "llvm.arm.stcl" => "__builtin_arm_stcl",
+ "llvm.arm.sxtab16" => "__builtin_arm_sxtab16",
+ "llvm.arm.sxtb16" => "__builtin_arm_sxtb16",
+ "llvm.arm.thread.pointer" => "__builtin_thread_pointer",
+ "llvm.arm.uadd16" => "__builtin_arm_uadd16",
+ "llvm.arm.uadd8" => "__builtin_arm_uadd8",
+ "llvm.arm.uasx" => "__builtin_arm_uasx",
+ "llvm.arm.uhadd16" => "__builtin_arm_uhadd16",
+ "llvm.arm.uhadd8" => "__builtin_arm_uhadd8",
+ "llvm.arm.uhasx" => "__builtin_arm_uhasx",
+ "llvm.arm.uhsax" => "__builtin_arm_uhsax",
+ "llvm.arm.uhsub16" => "__builtin_arm_uhsub16",
+ "llvm.arm.uhsub8" => "__builtin_arm_uhsub8",
+ "llvm.arm.uqadd16" => "__builtin_arm_uqadd16",
+ "llvm.arm.uqadd8" => "__builtin_arm_uqadd8",
+ "llvm.arm.uqasx" => "__builtin_arm_uqasx",
+ "llvm.arm.uqsax" => "__builtin_arm_uqsax",
+ "llvm.arm.uqsub16" => "__builtin_arm_uqsub16",
+ "llvm.arm.uqsub8" => "__builtin_arm_uqsub8",
+ "llvm.arm.usad8" => "__builtin_arm_usad8",
+ "llvm.arm.usada8" => "__builtin_arm_usada8",
+ "llvm.arm.usat" => "__builtin_arm_usat",
+ "llvm.arm.usat16" => "__builtin_arm_usat16",
+ "llvm.arm.usax" => "__builtin_arm_usax",
+ "llvm.arm.usub16" => "__builtin_arm_usub16",
+ "llvm.arm.usub8" => "__builtin_arm_usub8",
+ "llvm.arm.uxtab16" => "__builtin_arm_uxtab16",
+ "llvm.arm.uxtb16" => "__builtin_arm_uxtb16",
+ // bpf
+ "llvm.bpf.btf.type.id" => "__builtin_bpf_btf_type_id",
+ "llvm.bpf.compare" => "__builtin_bpf_compare",
+ "llvm.bpf.load.byte" => "__builtin_bpf_load_byte",
+ "llvm.bpf.load.half" => "__builtin_bpf_load_half",
+ "llvm.bpf.load.word" => "__builtin_bpf_load_word",
+ "llvm.bpf.passthrough" => "__builtin_bpf_passthrough",
+ "llvm.bpf.preserve.enum.value" => "__builtin_bpf_preserve_enum_value",
+ "llvm.bpf.preserve.field.info" => "__builtin_bpf_preserve_field_info",
+ "llvm.bpf.preserve.type.info" => "__builtin_bpf_preserve_type_info",
+ "llvm.bpf.pseudo" => "__builtin_bpf_pseudo",
+ // cuda
+ "llvm.cuda.syncthreads" => "__syncthreads",
+ // hexagon
+ "llvm.hexagon.A2.abs" => "__builtin_HEXAGON_A2_abs",
+ "llvm.hexagon.A2.absp" => "__builtin_HEXAGON_A2_absp",
+ "llvm.hexagon.A2.abssat" => "__builtin_HEXAGON_A2_abssat",
+ "llvm.hexagon.A2.add" => "__builtin_HEXAGON_A2_add",
+ "llvm.hexagon.A2.addh.h16.hh" => "__builtin_HEXAGON_A2_addh_h16_hh",
+ "llvm.hexagon.A2.addh.h16.hl" => "__builtin_HEXAGON_A2_addh_h16_hl",
+ "llvm.hexagon.A2.addh.h16.lh" => "__builtin_HEXAGON_A2_addh_h16_lh",
+ "llvm.hexagon.A2.addh.h16.ll" => "__builtin_HEXAGON_A2_addh_h16_ll",
+ "llvm.hexagon.A2.addh.h16.sat.hh" => "__builtin_HEXAGON_A2_addh_h16_sat_hh",
+ "llvm.hexagon.A2.addh.h16.sat.hl" => "__builtin_HEXAGON_A2_addh_h16_sat_hl",
+ "llvm.hexagon.A2.addh.h16.sat.lh" => "__builtin_HEXAGON_A2_addh_h16_sat_lh",
+ "llvm.hexagon.A2.addh.h16.sat.ll" => "__builtin_HEXAGON_A2_addh_h16_sat_ll",
+ "llvm.hexagon.A2.addh.l16.hl" => "__builtin_HEXAGON_A2_addh_l16_hl",
+ "llvm.hexagon.A2.addh.l16.ll" => "__builtin_HEXAGON_A2_addh_l16_ll",
+ "llvm.hexagon.A2.addh.l16.sat.hl" => "__builtin_HEXAGON_A2_addh_l16_sat_hl",
+ "llvm.hexagon.A2.addh.l16.sat.ll" => "__builtin_HEXAGON_A2_addh_l16_sat_ll",
+ "llvm.hexagon.A2.addi" => "__builtin_HEXAGON_A2_addi",
+ "llvm.hexagon.A2.addp" => "__builtin_HEXAGON_A2_addp",
+ "llvm.hexagon.A2.addpsat" => "__builtin_HEXAGON_A2_addpsat",
+ "llvm.hexagon.A2.addsat" => "__builtin_HEXAGON_A2_addsat",
+ "llvm.hexagon.A2.addsp" => "__builtin_HEXAGON_A2_addsp",
+ "llvm.hexagon.A2.and" => "__builtin_HEXAGON_A2_and",
+ "llvm.hexagon.A2.andir" => "__builtin_HEXAGON_A2_andir",
+ "llvm.hexagon.A2.andp" => "__builtin_HEXAGON_A2_andp",
+ "llvm.hexagon.A2.aslh" => "__builtin_HEXAGON_A2_aslh",
+ "llvm.hexagon.A2.asrh" => "__builtin_HEXAGON_A2_asrh",
+ "llvm.hexagon.A2.combine.hh" => "__builtin_HEXAGON_A2_combine_hh",
+ "llvm.hexagon.A2.combine.hl" => "__builtin_HEXAGON_A2_combine_hl",
+ "llvm.hexagon.A2.combine.lh" => "__builtin_HEXAGON_A2_combine_lh",
+ "llvm.hexagon.A2.combine.ll" => "__builtin_HEXAGON_A2_combine_ll",
+ "llvm.hexagon.A2.combineii" => "__builtin_HEXAGON_A2_combineii",
+ "llvm.hexagon.A2.combinew" => "__builtin_HEXAGON_A2_combinew",
+ "llvm.hexagon.A2.max" => "__builtin_HEXAGON_A2_max",
+ "llvm.hexagon.A2.maxp" => "__builtin_HEXAGON_A2_maxp",
+ "llvm.hexagon.A2.maxu" => "__builtin_HEXAGON_A2_maxu",
+ "llvm.hexagon.A2.maxup" => "__builtin_HEXAGON_A2_maxup",
+ "llvm.hexagon.A2.min" => "__builtin_HEXAGON_A2_min",
+ "llvm.hexagon.A2.minp" => "__builtin_HEXAGON_A2_minp",
+ "llvm.hexagon.A2.minu" => "__builtin_HEXAGON_A2_minu",
+ "llvm.hexagon.A2.minup" => "__builtin_HEXAGON_A2_minup",
+ "llvm.hexagon.A2.neg" => "__builtin_HEXAGON_A2_neg",
+ "llvm.hexagon.A2.negp" => "__builtin_HEXAGON_A2_negp",
+ "llvm.hexagon.A2.negsat" => "__builtin_HEXAGON_A2_negsat",
+ "llvm.hexagon.A2.not" => "__builtin_HEXAGON_A2_not",
+ "llvm.hexagon.A2.notp" => "__builtin_HEXAGON_A2_notp",
+ "llvm.hexagon.A2.or" => "__builtin_HEXAGON_A2_or",
+ "llvm.hexagon.A2.orir" => "__builtin_HEXAGON_A2_orir",
+ "llvm.hexagon.A2.orp" => "__builtin_HEXAGON_A2_orp",
+ "llvm.hexagon.A2.roundsat" => "__builtin_HEXAGON_A2_roundsat",
+ "llvm.hexagon.A2.sat" => "__builtin_HEXAGON_A2_sat",
+ "llvm.hexagon.A2.satb" => "__builtin_HEXAGON_A2_satb",
+ "llvm.hexagon.A2.sath" => "__builtin_HEXAGON_A2_sath",
+ "llvm.hexagon.A2.satub" => "__builtin_HEXAGON_A2_satub",
+ "llvm.hexagon.A2.satuh" => "__builtin_HEXAGON_A2_satuh",
+ "llvm.hexagon.A2.sub" => "__builtin_HEXAGON_A2_sub",
+ "llvm.hexagon.A2.subh.h16.hh" => "__builtin_HEXAGON_A2_subh_h16_hh",
+ "llvm.hexagon.A2.subh.h16.hl" => "__builtin_HEXAGON_A2_subh_h16_hl",
+ "llvm.hexagon.A2.subh.h16.lh" => "__builtin_HEXAGON_A2_subh_h16_lh",
+ "llvm.hexagon.A2.subh.h16.ll" => "__builtin_HEXAGON_A2_subh_h16_ll",
+ "llvm.hexagon.A2.subh.h16.sat.hh" => "__builtin_HEXAGON_A2_subh_h16_sat_hh",
+ "llvm.hexagon.A2.subh.h16.sat.hl" => "__builtin_HEXAGON_A2_subh_h16_sat_hl",
+ "llvm.hexagon.A2.subh.h16.sat.lh" => "__builtin_HEXAGON_A2_subh_h16_sat_lh",
+ "llvm.hexagon.A2.subh.h16.sat.ll" => "__builtin_HEXAGON_A2_subh_h16_sat_ll",
+ "llvm.hexagon.A2.subh.l16.hl" => "__builtin_HEXAGON_A2_subh_l16_hl",
+ "llvm.hexagon.A2.subh.l16.ll" => "__builtin_HEXAGON_A2_subh_l16_ll",
+ "llvm.hexagon.A2.subh.l16.sat.hl" => "__builtin_HEXAGON_A2_subh_l16_sat_hl",
+ "llvm.hexagon.A2.subh.l16.sat.ll" => "__builtin_HEXAGON_A2_subh_l16_sat_ll",
+ "llvm.hexagon.A2.subp" => "__builtin_HEXAGON_A2_subp",
+ "llvm.hexagon.A2.subri" => "__builtin_HEXAGON_A2_subri",
+ "llvm.hexagon.A2.subsat" => "__builtin_HEXAGON_A2_subsat",
+ "llvm.hexagon.A2.svaddh" => "__builtin_HEXAGON_A2_svaddh",
+ "llvm.hexagon.A2.svaddhs" => "__builtin_HEXAGON_A2_svaddhs",
+ "llvm.hexagon.A2.svadduhs" => "__builtin_HEXAGON_A2_svadduhs",
+ "llvm.hexagon.A2.svavgh" => "__builtin_HEXAGON_A2_svavgh",
+ "llvm.hexagon.A2.svavghs" => "__builtin_HEXAGON_A2_svavghs",
+ "llvm.hexagon.A2.svnavgh" => "__builtin_HEXAGON_A2_svnavgh",
+ "llvm.hexagon.A2.svsubh" => "__builtin_HEXAGON_A2_svsubh",
+ "llvm.hexagon.A2.svsubhs" => "__builtin_HEXAGON_A2_svsubhs",
+ "llvm.hexagon.A2.svsubuhs" => "__builtin_HEXAGON_A2_svsubuhs",
+ "llvm.hexagon.A2.swiz" => "__builtin_HEXAGON_A2_swiz",
+ "llvm.hexagon.A2.sxtb" => "__builtin_HEXAGON_A2_sxtb",
+ "llvm.hexagon.A2.sxth" => "__builtin_HEXAGON_A2_sxth",
+ "llvm.hexagon.A2.sxtw" => "__builtin_HEXAGON_A2_sxtw",
+ "llvm.hexagon.A2.tfr" => "__builtin_HEXAGON_A2_tfr",
+ "llvm.hexagon.A2.tfrih" => "__builtin_HEXAGON_A2_tfrih",
+ "llvm.hexagon.A2.tfril" => "__builtin_HEXAGON_A2_tfril",
+ "llvm.hexagon.A2.tfrp" => "__builtin_HEXAGON_A2_tfrp",
+ "llvm.hexagon.A2.tfrpi" => "__builtin_HEXAGON_A2_tfrpi",
+ "llvm.hexagon.A2.tfrsi" => "__builtin_HEXAGON_A2_tfrsi",
+ "llvm.hexagon.A2.vabsh" => "__builtin_HEXAGON_A2_vabsh",
+ "llvm.hexagon.A2.vabshsat" => "__builtin_HEXAGON_A2_vabshsat",
+ "llvm.hexagon.A2.vabsw" => "__builtin_HEXAGON_A2_vabsw",
+ "llvm.hexagon.A2.vabswsat" => "__builtin_HEXAGON_A2_vabswsat",
+ "llvm.hexagon.A2.vaddb.map" => "__builtin_HEXAGON_A2_vaddb_map",
+ "llvm.hexagon.A2.vaddh" => "__builtin_HEXAGON_A2_vaddh",
+ "llvm.hexagon.A2.vaddhs" => "__builtin_HEXAGON_A2_vaddhs",
+ "llvm.hexagon.A2.vaddub" => "__builtin_HEXAGON_A2_vaddub",
+ "llvm.hexagon.A2.vaddubs" => "__builtin_HEXAGON_A2_vaddubs",
+ "llvm.hexagon.A2.vadduhs" => "__builtin_HEXAGON_A2_vadduhs",
+ "llvm.hexagon.A2.vaddw" => "__builtin_HEXAGON_A2_vaddw",
+ "llvm.hexagon.A2.vaddws" => "__builtin_HEXAGON_A2_vaddws",
+ "llvm.hexagon.A2.vavgh" => "__builtin_HEXAGON_A2_vavgh",
+ "llvm.hexagon.A2.vavghcr" => "__builtin_HEXAGON_A2_vavghcr",
+ "llvm.hexagon.A2.vavghr" => "__builtin_HEXAGON_A2_vavghr",
+ "llvm.hexagon.A2.vavgub" => "__builtin_HEXAGON_A2_vavgub",
+ "llvm.hexagon.A2.vavgubr" => "__builtin_HEXAGON_A2_vavgubr",
+ "llvm.hexagon.A2.vavguh" => "__builtin_HEXAGON_A2_vavguh",
+ "llvm.hexagon.A2.vavguhr" => "__builtin_HEXAGON_A2_vavguhr",
+ "llvm.hexagon.A2.vavguw" => "__builtin_HEXAGON_A2_vavguw",
+ "llvm.hexagon.A2.vavguwr" => "__builtin_HEXAGON_A2_vavguwr",
+ "llvm.hexagon.A2.vavgw" => "__builtin_HEXAGON_A2_vavgw",
+ "llvm.hexagon.A2.vavgwcr" => "__builtin_HEXAGON_A2_vavgwcr",
+ "llvm.hexagon.A2.vavgwr" => "__builtin_HEXAGON_A2_vavgwr",
+ "llvm.hexagon.A2.vcmpbeq" => "__builtin_HEXAGON_A2_vcmpbeq",
+ "llvm.hexagon.A2.vcmpbgtu" => "__builtin_HEXAGON_A2_vcmpbgtu",
+ "llvm.hexagon.A2.vcmpheq" => "__builtin_HEXAGON_A2_vcmpheq",
+ "llvm.hexagon.A2.vcmphgt" => "__builtin_HEXAGON_A2_vcmphgt",
+ "llvm.hexagon.A2.vcmphgtu" => "__builtin_HEXAGON_A2_vcmphgtu",
+ "llvm.hexagon.A2.vcmpweq" => "__builtin_HEXAGON_A2_vcmpweq",
+ "llvm.hexagon.A2.vcmpwgt" => "__builtin_HEXAGON_A2_vcmpwgt",
+ "llvm.hexagon.A2.vcmpwgtu" => "__builtin_HEXAGON_A2_vcmpwgtu",
+ "llvm.hexagon.A2.vconj" => "__builtin_HEXAGON_A2_vconj",
+ "llvm.hexagon.A2.vmaxb" => "__builtin_HEXAGON_A2_vmaxb",
+ "llvm.hexagon.A2.vmaxh" => "__builtin_HEXAGON_A2_vmaxh",
+ "llvm.hexagon.A2.vmaxub" => "__builtin_HEXAGON_A2_vmaxub",
+ "llvm.hexagon.A2.vmaxuh" => "__builtin_HEXAGON_A2_vmaxuh",
+ "llvm.hexagon.A2.vmaxuw" => "__builtin_HEXAGON_A2_vmaxuw",
+ "llvm.hexagon.A2.vmaxw" => "__builtin_HEXAGON_A2_vmaxw",
+ "llvm.hexagon.A2.vminb" => "__builtin_HEXAGON_A2_vminb",
+ "llvm.hexagon.A2.vminh" => "__builtin_HEXAGON_A2_vminh",
+ "llvm.hexagon.A2.vminub" => "__builtin_HEXAGON_A2_vminub",
+ "llvm.hexagon.A2.vminuh" => "__builtin_HEXAGON_A2_vminuh",
+ "llvm.hexagon.A2.vminuw" => "__builtin_HEXAGON_A2_vminuw",
+ "llvm.hexagon.A2.vminw" => "__builtin_HEXAGON_A2_vminw",
+ "llvm.hexagon.A2.vnavgh" => "__builtin_HEXAGON_A2_vnavgh",
+ "llvm.hexagon.A2.vnavghcr" => "__builtin_HEXAGON_A2_vnavghcr",
+ "llvm.hexagon.A2.vnavghr" => "__builtin_HEXAGON_A2_vnavghr",
+ "llvm.hexagon.A2.vnavgw" => "__builtin_HEXAGON_A2_vnavgw",
+ "llvm.hexagon.A2.vnavgwcr" => "__builtin_HEXAGON_A2_vnavgwcr",
+ "llvm.hexagon.A2.vnavgwr" => "__builtin_HEXAGON_A2_vnavgwr",
+ "llvm.hexagon.A2.vraddub" => "__builtin_HEXAGON_A2_vraddub",
+ "llvm.hexagon.A2.vraddub.acc" => "__builtin_HEXAGON_A2_vraddub_acc",
+ "llvm.hexagon.A2.vrsadub" => "__builtin_HEXAGON_A2_vrsadub",
+ "llvm.hexagon.A2.vrsadub.acc" => "__builtin_HEXAGON_A2_vrsadub_acc",
+ "llvm.hexagon.A2.vsubb.map" => "__builtin_HEXAGON_A2_vsubb_map",
+ "llvm.hexagon.A2.vsubh" => "__builtin_HEXAGON_A2_vsubh",
+ "llvm.hexagon.A2.vsubhs" => "__builtin_HEXAGON_A2_vsubhs",
+ "llvm.hexagon.A2.vsubub" => "__builtin_HEXAGON_A2_vsubub",
+ "llvm.hexagon.A2.vsububs" => "__builtin_HEXAGON_A2_vsububs",
+ "llvm.hexagon.A2.vsubuhs" => "__builtin_HEXAGON_A2_vsubuhs",
+ "llvm.hexagon.A2.vsubw" => "__builtin_HEXAGON_A2_vsubw",
+ "llvm.hexagon.A2.vsubws" => "__builtin_HEXAGON_A2_vsubws",
+ "llvm.hexagon.A2.xor" => "__builtin_HEXAGON_A2_xor",
+ "llvm.hexagon.A2.xorp" => "__builtin_HEXAGON_A2_xorp",
+ "llvm.hexagon.A2.zxtb" => "__builtin_HEXAGON_A2_zxtb",
+ "llvm.hexagon.A2.zxth" => "__builtin_HEXAGON_A2_zxth",
+ "llvm.hexagon.A4.andn" => "__builtin_HEXAGON_A4_andn",
+ "llvm.hexagon.A4.andnp" => "__builtin_HEXAGON_A4_andnp",
+ "llvm.hexagon.A4.bitsplit" => "__builtin_HEXAGON_A4_bitsplit",
+ "llvm.hexagon.A4.bitspliti" => "__builtin_HEXAGON_A4_bitspliti",
+ "llvm.hexagon.A4.boundscheck" => "__builtin_HEXAGON_A4_boundscheck",
+ "llvm.hexagon.A4.cmpbeq" => "__builtin_HEXAGON_A4_cmpbeq",
+ "llvm.hexagon.A4.cmpbeqi" => "__builtin_HEXAGON_A4_cmpbeqi",
+ "llvm.hexagon.A4.cmpbgt" => "__builtin_HEXAGON_A4_cmpbgt",
+ "llvm.hexagon.A4.cmpbgti" => "__builtin_HEXAGON_A4_cmpbgti",
+ "llvm.hexagon.A4.cmpbgtu" => "__builtin_HEXAGON_A4_cmpbgtu",
+ "llvm.hexagon.A4.cmpbgtui" => "__builtin_HEXAGON_A4_cmpbgtui",
+ "llvm.hexagon.A4.cmpheq" => "__builtin_HEXAGON_A4_cmpheq",
+ "llvm.hexagon.A4.cmpheqi" => "__builtin_HEXAGON_A4_cmpheqi",
+ "llvm.hexagon.A4.cmphgt" => "__builtin_HEXAGON_A4_cmphgt",
+ "llvm.hexagon.A4.cmphgti" => "__builtin_HEXAGON_A4_cmphgti",
+ "llvm.hexagon.A4.cmphgtu" => "__builtin_HEXAGON_A4_cmphgtu",
+ "llvm.hexagon.A4.cmphgtui" => "__builtin_HEXAGON_A4_cmphgtui",
+ "llvm.hexagon.A4.combineir" => "__builtin_HEXAGON_A4_combineir",
+ "llvm.hexagon.A4.combineri" => "__builtin_HEXAGON_A4_combineri",
+ "llvm.hexagon.A4.cround.ri" => "__builtin_HEXAGON_A4_cround_ri",
+ "llvm.hexagon.A4.cround.rr" => "__builtin_HEXAGON_A4_cround_rr",
+ "llvm.hexagon.A4.modwrapu" => "__builtin_HEXAGON_A4_modwrapu",
+ "llvm.hexagon.A4.orn" => "__builtin_HEXAGON_A4_orn",
+ "llvm.hexagon.A4.ornp" => "__builtin_HEXAGON_A4_ornp",
+ "llvm.hexagon.A4.rcmpeq" => "__builtin_HEXAGON_A4_rcmpeq",
+ "llvm.hexagon.A4.rcmpeqi" => "__builtin_HEXAGON_A4_rcmpeqi",
+ "llvm.hexagon.A4.rcmpneq" => "__builtin_HEXAGON_A4_rcmpneq",
+ "llvm.hexagon.A4.rcmpneqi" => "__builtin_HEXAGON_A4_rcmpneqi",
+ "llvm.hexagon.A4.round.ri" => "__builtin_HEXAGON_A4_round_ri",
+ "llvm.hexagon.A4.round.ri.sat" => "__builtin_HEXAGON_A4_round_ri_sat",
+ "llvm.hexagon.A4.round.rr" => "__builtin_HEXAGON_A4_round_rr",
+ "llvm.hexagon.A4.round.rr.sat" => "__builtin_HEXAGON_A4_round_rr_sat",
+ "llvm.hexagon.A4.tlbmatch" => "__builtin_HEXAGON_A4_tlbmatch",
+ "llvm.hexagon.A4.vcmpbeq.any" => "__builtin_HEXAGON_A4_vcmpbeq_any",
+ "llvm.hexagon.A4.vcmpbeqi" => "__builtin_HEXAGON_A4_vcmpbeqi",
+ "llvm.hexagon.A4.vcmpbgt" => "__builtin_HEXAGON_A4_vcmpbgt",
+ "llvm.hexagon.A4.vcmpbgti" => "__builtin_HEXAGON_A4_vcmpbgti",
+ "llvm.hexagon.A4.vcmpbgtui" => "__builtin_HEXAGON_A4_vcmpbgtui",
+ "llvm.hexagon.A4.vcmpheqi" => "__builtin_HEXAGON_A4_vcmpheqi",
+ "llvm.hexagon.A4.vcmphgti" => "__builtin_HEXAGON_A4_vcmphgti",
+ "llvm.hexagon.A4.vcmphgtui" => "__builtin_HEXAGON_A4_vcmphgtui",
+ "llvm.hexagon.A4.vcmpweqi" => "__builtin_HEXAGON_A4_vcmpweqi",
+ "llvm.hexagon.A4.vcmpwgti" => "__builtin_HEXAGON_A4_vcmpwgti",
+ "llvm.hexagon.A4.vcmpwgtui" => "__builtin_HEXAGON_A4_vcmpwgtui",
+ "llvm.hexagon.A4.vrmaxh" => "__builtin_HEXAGON_A4_vrmaxh",
+ "llvm.hexagon.A4.vrmaxuh" => "__builtin_HEXAGON_A4_vrmaxuh",
+ "llvm.hexagon.A4.vrmaxuw" => "__builtin_HEXAGON_A4_vrmaxuw",
+ "llvm.hexagon.A4.vrmaxw" => "__builtin_HEXAGON_A4_vrmaxw",
+ "llvm.hexagon.A4.vrminh" => "__builtin_HEXAGON_A4_vrminh",
+ "llvm.hexagon.A4.vrminuh" => "__builtin_HEXAGON_A4_vrminuh",
+ "llvm.hexagon.A4.vrminuw" => "__builtin_HEXAGON_A4_vrminuw",
+ "llvm.hexagon.A4.vrminw" => "__builtin_HEXAGON_A4_vrminw",
+ "llvm.hexagon.A5.vaddhubs" => "__builtin_HEXAGON_A5_vaddhubs",
+ "llvm.hexagon.C2.all8" => "__builtin_HEXAGON_C2_all8",
+ "llvm.hexagon.C2.and" => "__builtin_HEXAGON_C2_and",
+ "llvm.hexagon.C2.andn" => "__builtin_HEXAGON_C2_andn",
+ "llvm.hexagon.C2.any8" => "__builtin_HEXAGON_C2_any8",
+ "llvm.hexagon.C2.bitsclr" => "__builtin_HEXAGON_C2_bitsclr",
+ "llvm.hexagon.C2.bitsclri" => "__builtin_HEXAGON_C2_bitsclri",
+ "llvm.hexagon.C2.bitsset" => "__builtin_HEXAGON_C2_bitsset",
+ "llvm.hexagon.C2.cmpeq" => "__builtin_HEXAGON_C2_cmpeq",
+ "llvm.hexagon.C2.cmpeqi" => "__builtin_HEXAGON_C2_cmpeqi",
+ "llvm.hexagon.C2.cmpeqp" => "__builtin_HEXAGON_C2_cmpeqp",
+ "llvm.hexagon.C2.cmpgei" => "__builtin_HEXAGON_C2_cmpgei",
+ "llvm.hexagon.C2.cmpgeui" => "__builtin_HEXAGON_C2_cmpgeui",
+ "llvm.hexagon.C2.cmpgt" => "__builtin_HEXAGON_C2_cmpgt",
+ "llvm.hexagon.C2.cmpgti" => "__builtin_HEXAGON_C2_cmpgti",
+ "llvm.hexagon.C2.cmpgtp" => "__builtin_HEXAGON_C2_cmpgtp",
+ "llvm.hexagon.C2.cmpgtu" => "__builtin_HEXAGON_C2_cmpgtu",
+ "llvm.hexagon.C2.cmpgtui" => "__builtin_HEXAGON_C2_cmpgtui",
+ "llvm.hexagon.C2.cmpgtup" => "__builtin_HEXAGON_C2_cmpgtup",
+ "llvm.hexagon.C2.cmplt" => "__builtin_HEXAGON_C2_cmplt",
+ "llvm.hexagon.C2.cmpltu" => "__builtin_HEXAGON_C2_cmpltu",
+ "llvm.hexagon.C2.mask" => "__builtin_HEXAGON_C2_mask",
+ "llvm.hexagon.C2.mux" => "__builtin_HEXAGON_C2_mux",
+ "llvm.hexagon.C2.muxii" => "__builtin_HEXAGON_C2_muxii",
+ "llvm.hexagon.C2.muxir" => "__builtin_HEXAGON_C2_muxir",
+ "llvm.hexagon.C2.muxri" => "__builtin_HEXAGON_C2_muxri",
+ "llvm.hexagon.C2.not" => "__builtin_HEXAGON_C2_not",
+ "llvm.hexagon.C2.or" => "__builtin_HEXAGON_C2_or",
+ "llvm.hexagon.C2.orn" => "__builtin_HEXAGON_C2_orn",
+ "llvm.hexagon.C2.pxfer.map" => "__builtin_HEXAGON_C2_pxfer_map",
+ "llvm.hexagon.C2.tfrpr" => "__builtin_HEXAGON_C2_tfrpr",
+ "llvm.hexagon.C2.tfrrp" => "__builtin_HEXAGON_C2_tfrrp",
+ "llvm.hexagon.C2.vitpack" => "__builtin_HEXAGON_C2_vitpack",
+ "llvm.hexagon.C2.vmux" => "__builtin_HEXAGON_C2_vmux",
+ "llvm.hexagon.C2.xor" => "__builtin_HEXAGON_C2_xor",
+ "llvm.hexagon.C4.and.and" => "__builtin_HEXAGON_C4_and_and",
+ "llvm.hexagon.C4.and.andn" => "__builtin_HEXAGON_C4_and_andn",
+ "llvm.hexagon.C4.and.or" => "__builtin_HEXAGON_C4_and_or",
+ "llvm.hexagon.C4.and.orn" => "__builtin_HEXAGON_C4_and_orn",
+ "llvm.hexagon.C4.cmplte" => "__builtin_HEXAGON_C4_cmplte",
+ "llvm.hexagon.C4.cmpltei" => "__builtin_HEXAGON_C4_cmpltei",
+ "llvm.hexagon.C4.cmplteu" => "__builtin_HEXAGON_C4_cmplteu",
+ "llvm.hexagon.C4.cmplteui" => "__builtin_HEXAGON_C4_cmplteui",
+ "llvm.hexagon.C4.cmpneq" => "__builtin_HEXAGON_C4_cmpneq",
+ "llvm.hexagon.C4.cmpneqi" => "__builtin_HEXAGON_C4_cmpneqi",
+ "llvm.hexagon.C4.fastcorner9" => "__builtin_HEXAGON_C4_fastcorner9",
+ "llvm.hexagon.C4.fastcorner9.not" => "__builtin_HEXAGON_C4_fastcorner9_not",
+ "llvm.hexagon.C4.nbitsclr" => "__builtin_HEXAGON_C4_nbitsclr",
+ "llvm.hexagon.C4.nbitsclri" => "__builtin_HEXAGON_C4_nbitsclri",
+ "llvm.hexagon.C4.nbitsset" => "__builtin_HEXAGON_C4_nbitsset",
+ "llvm.hexagon.C4.or.and" => "__builtin_HEXAGON_C4_or_and",
+ "llvm.hexagon.C4.or.andn" => "__builtin_HEXAGON_C4_or_andn",
+ "llvm.hexagon.C4.or.or" => "__builtin_HEXAGON_C4_or_or",
+ "llvm.hexagon.C4.or.orn" => "__builtin_HEXAGON_C4_or_orn",
+ "llvm.hexagon.F2.conv.d2df" => "__builtin_HEXAGON_F2_conv_d2df",
+ "llvm.hexagon.F2.conv.d2sf" => "__builtin_HEXAGON_F2_conv_d2sf",
+ "llvm.hexagon.F2.conv.df2d" => "__builtin_HEXAGON_F2_conv_df2d",
+ "llvm.hexagon.F2.conv.df2d.chop" => "__builtin_HEXAGON_F2_conv_df2d_chop",
+ "llvm.hexagon.F2.conv.df2sf" => "__builtin_HEXAGON_F2_conv_df2sf",
+ "llvm.hexagon.F2.conv.df2ud" => "__builtin_HEXAGON_F2_conv_df2ud",
+ "llvm.hexagon.F2.conv.df2ud.chop" => "__builtin_HEXAGON_F2_conv_df2ud_chop",
+ "llvm.hexagon.F2.conv.df2uw" => "__builtin_HEXAGON_F2_conv_df2uw",
+ "llvm.hexagon.F2.conv.df2uw.chop" => "__builtin_HEXAGON_F2_conv_df2uw_chop",
+ "llvm.hexagon.F2.conv.df2w" => "__builtin_HEXAGON_F2_conv_df2w",
+ "llvm.hexagon.F2.conv.df2w.chop" => "__builtin_HEXAGON_F2_conv_df2w_chop",
+ "llvm.hexagon.F2.conv.sf2d" => "__builtin_HEXAGON_F2_conv_sf2d",
+ "llvm.hexagon.F2.conv.sf2d.chop" => "__builtin_HEXAGON_F2_conv_sf2d_chop",
+ "llvm.hexagon.F2.conv.sf2df" => "__builtin_HEXAGON_F2_conv_sf2df",
+ "llvm.hexagon.F2.conv.sf2ud" => "__builtin_HEXAGON_F2_conv_sf2ud",
+ "llvm.hexagon.F2.conv.sf2ud.chop" => "__builtin_HEXAGON_F2_conv_sf2ud_chop",
+ "llvm.hexagon.F2.conv.sf2uw" => "__builtin_HEXAGON_F2_conv_sf2uw",
+ "llvm.hexagon.F2.conv.sf2uw.chop" => "__builtin_HEXAGON_F2_conv_sf2uw_chop",
+ "llvm.hexagon.F2.conv.sf2w" => "__builtin_HEXAGON_F2_conv_sf2w",
+ "llvm.hexagon.F2.conv.sf2w.chop" => "__builtin_HEXAGON_F2_conv_sf2w_chop",
+ "llvm.hexagon.F2.conv.ud2df" => "__builtin_HEXAGON_F2_conv_ud2df",
+ "llvm.hexagon.F2.conv.ud2sf" => "__builtin_HEXAGON_F2_conv_ud2sf",
+ "llvm.hexagon.F2.conv.uw2df" => "__builtin_HEXAGON_F2_conv_uw2df",
+ "llvm.hexagon.F2.conv.uw2sf" => "__builtin_HEXAGON_F2_conv_uw2sf",
+ "llvm.hexagon.F2.conv.w2df" => "__builtin_HEXAGON_F2_conv_w2df",
+ "llvm.hexagon.F2.conv.w2sf" => "__builtin_HEXAGON_F2_conv_w2sf",
+ "llvm.hexagon.F2.dfadd" => "__builtin_HEXAGON_F2_dfadd",
+ "llvm.hexagon.F2.dfclass" => "__builtin_HEXAGON_F2_dfclass",
+ "llvm.hexagon.F2.dfcmpeq" => "__builtin_HEXAGON_F2_dfcmpeq",
+ "llvm.hexagon.F2.dfcmpge" => "__builtin_HEXAGON_F2_dfcmpge",
+ "llvm.hexagon.F2.dfcmpgt" => "__builtin_HEXAGON_F2_dfcmpgt",
+ "llvm.hexagon.F2.dfcmpuo" => "__builtin_HEXAGON_F2_dfcmpuo",
+ "llvm.hexagon.F2.dffixupd" => "__builtin_HEXAGON_F2_dffixupd",
+ "llvm.hexagon.F2.dffixupn" => "__builtin_HEXAGON_F2_dffixupn",
+ "llvm.hexagon.F2.dffixupr" => "__builtin_HEXAGON_F2_dffixupr",
+ "llvm.hexagon.F2.dffma" => "__builtin_HEXAGON_F2_dffma",
+ "llvm.hexagon.F2.dffma.lib" => "__builtin_HEXAGON_F2_dffma_lib",
+ "llvm.hexagon.F2.dffma.sc" => "__builtin_HEXAGON_F2_dffma_sc",
+ "llvm.hexagon.F2.dffms" => "__builtin_HEXAGON_F2_dffms",
+ "llvm.hexagon.F2.dffms.lib" => "__builtin_HEXAGON_F2_dffms_lib",
+ "llvm.hexagon.F2.dfimm.n" => "__builtin_HEXAGON_F2_dfimm_n",
+ "llvm.hexagon.F2.dfimm.p" => "__builtin_HEXAGON_F2_dfimm_p",
+ "llvm.hexagon.F2.dfmax" => "__builtin_HEXAGON_F2_dfmax",
+ "llvm.hexagon.F2.dfmin" => "__builtin_HEXAGON_F2_dfmin",
+ "llvm.hexagon.F2.dfmpy" => "__builtin_HEXAGON_F2_dfmpy",
+ "llvm.hexagon.F2.dfsub" => "__builtin_HEXAGON_F2_dfsub",
+ "llvm.hexagon.F2.sfadd" => "__builtin_HEXAGON_F2_sfadd",
+ "llvm.hexagon.F2.sfclass" => "__builtin_HEXAGON_F2_sfclass",
+ "llvm.hexagon.F2.sfcmpeq" => "__builtin_HEXAGON_F2_sfcmpeq",
+ "llvm.hexagon.F2.sfcmpge" => "__builtin_HEXAGON_F2_sfcmpge",
+ "llvm.hexagon.F2.sfcmpgt" => "__builtin_HEXAGON_F2_sfcmpgt",
+ "llvm.hexagon.F2.sfcmpuo" => "__builtin_HEXAGON_F2_sfcmpuo",
+ "llvm.hexagon.F2.sffixupd" => "__builtin_HEXAGON_F2_sffixupd",
+ "llvm.hexagon.F2.sffixupn" => "__builtin_HEXAGON_F2_sffixupn",
+ "llvm.hexagon.F2.sffixupr" => "__builtin_HEXAGON_F2_sffixupr",
+ "llvm.hexagon.F2.sffma" => "__builtin_HEXAGON_F2_sffma",
+ "llvm.hexagon.F2.sffma.lib" => "__builtin_HEXAGON_F2_sffma_lib",
+ "llvm.hexagon.F2.sffma.sc" => "__builtin_HEXAGON_F2_sffma_sc",
+ "llvm.hexagon.F2.sffms" => "__builtin_HEXAGON_F2_sffms",
+ "llvm.hexagon.F2.sffms.lib" => "__builtin_HEXAGON_F2_sffms_lib",
+ "llvm.hexagon.F2.sfimm.n" => "__builtin_HEXAGON_F2_sfimm_n",
+ "llvm.hexagon.F2.sfimm.p" => "__builtin_HEXAGON_F2_sfimm_p",
+ "llvm.hexagon.F2.sfmax" => "__builtin_HEXAGON_F2_sfmax",
+ "llvm.hexagon.F2.sfmin" => "__builtin_HEXAGON_F2_sfmin",
+ "llvm.hexagon.F2.sfmpy" => "__builtin_HEXAGON_F2_sfmpy",
+ "llvm.hexagon.F2.sfsub" => "__builtin_HEXAGON_F2_sfsub",
+ "llvm.hexagon.M2.acci" => "__builtin_HEXAGON_M2_acci",
+ "llvm.hexagon.M2.accii" => "__builtin_HEXAGON_M2_accii",
+ "llvm.hexagon.M2.cmaci.s0" => "__builtin_HEXAGON_M2_cmaci_s0",
+ "llvm.hexagon.M2.cmacr.s0" => "__builtin_HEXAGON_M2_cmacr_s0",
+ "llvm.hexagon.M2.cmacs.s0" => "__builtin_HEXAGON_M2_cmacs_s0",
+ "llvm.hexagon.M2.cmacs.s1" => "__builtin_HEXAGON_M2_cmacs_s1",
+ "llvm.hexagon.M2.cmacsc.s0" => "__builtin_HEXAGON_M2_cmacsc_s0",
+ "llvm.hexagon.M2.cmacsc.s1" => "__builtin_HEXAGON_M2_cmacsc_s1",
+ "llvm.hexagon.M2.cmpyi.s0" => "__builtin_HEXAGON_M2_cmpyi_s0",
+ "llvm.hexagon.M2.cmpyr.s0" => "__builtin_HEXAGON_M2_cmpyr_s0",
+ "llvm.hexagon.M2.cmpyrs.s0" => "__builtin_HEXAGON_M2_cmpyrs_s0",
+ "llvm.hexagon.M2.cmpyrs.s1" => "__builtin_HEXAGON_M2_cmpyrs_s1",
+ "llvm.hexagon.M2.cmpyrsc.s0" => "__builtin_HEXAGON_M2_cmpyrsc_s0",
+ "llvm.hexagon.M2.cmpyrsc.s1" => "__builtin_HEXAGON_M2_cmpyrsc_s1",
+ "llvm.hexagon.M2.cmpys.s0" => "__builtin_HEXAGON_M2_cmpys_s0",
+ "llvm.hexagon.M2.cmpys.s1" => "__builtin_HEXAGON_M2_cmpys_s1",
+ "llvm.hexagon.M2.cmpysc.s0" => "__builtin_HEXAGON_M2_cmpysc_s0",
+ "llvm.hexagon.M2.cmpysc.s1" => "__builtin_HEXAGON_M2_cmpysc_s1",
+ "llvm.hexagon.M2.cnacs.s0" => "__builtin_HEXAGON_M2_cnacs_s0",
+ "llvm.hexagon.M2.cnacs.s1" => "__builtin_HEXAGON_M2_cnacs_s1",
+ "llvm.hexagon.M2.cnacsc.s0" => "__builtin_HEXAGON_M2_cnacsc_s0",
+ "llvm.hexagon.M2.cnacsc.s1" => "__builtin_HEXAGON_M2_cnacsc_s1",
+ "llvm.hexagon.M2.dpmpyss.acc.s0" => "__builtin_HEXAGON_M2_dpmpyss_acc_s0",
+ "llvm.hexagon.M2.dpmpyss.nac.s0" => "__builtin_HEXAGON_M2_dpmpyss_nac_s0",
+ "llvm.hexagon.M2.dpmpyss.rnd.s0" => "__builtin_HEXAGON_M2_dpmpyss_rnd_s0",
+ "llvm.hexagon.M2.dpmpyss.s0" => "__builtin_HEXAGON_M2_dpmpyss_s0",
+ "llvm.hexagon.M2.dpmpyuu.acc.s0" => "__builtin_HEXAGON_M2_dpmpyuu_acc_s0",
+ "llvm.hexagon.M2.dpmpyuu.nac.s0" => "__builtin_HEXAGON_M2_dpmpyuu_nac_s0",
+ "llvm.hexagon.M2.dpmpyuu.s0" => "__builtin_HEXAGON_M2_dpmpyuu_s0",
+ "llvm.hexagon.M2.hmmpyh.rs1" => "__builtin_HEXAGON_M2_hmmpyh_rs1",
+ "llvm.hexagon.M2.hmmpyh.s1" => "__builtin_HEXAGON_M2_hmmpyh_s1",
+ "llvm.hexagon.M2.hmmpyl.rs1" => "__builtin_HEXAGON_M2_hmmpyl_rs1",
+ "llvm.hexagon.M2.hmmpyl.s1" => "__builtin_HEXAGON_M2_hmmpyl_s1",
+ "llvm.hexagon.M2.maci" => "__builtin_HEXAGON_M2_maci",
+ "llvm.hexagon.M2.macsin" => "__builtin_HEXAGON_M2_macsin",
+ "llvm.hexagon.M2.macsip" => "__builtin_HEXAGON_M2_macsip",
+ "llvm.hexagon.M2.mmachs.rs0" => "__builtin_HEXAGON_M2_mmachs_rs0",
+ "llvm.hexagon.M2.mmachs.rs1" => "__builtin_HEXAGON_M2_mmachs_rs1",
+ "llvm.hexagon.M2.mmachs.s0" => "__builtin_HEXAGON_M2_mmachs_s0",
+ "llvm.hexagon.M2.mmachs.s1" => "__builtin_HEXAGON_M2_mmachs_s1",
+ "llvm.hexagon.M2.mmacls.rs0" => "__builtin_HEXAGON_M2_mmacls_rs0",
+ "llvm.hexagon.M2.mmacls.rs1" => "__builtin_HEXAGON_M2_mmacls_rs1",
+ "llvm.hexagon.M2.mmacls.s0" => "__builtin_HEXAGON_M2_mmacls_s0",
+ "llvm.hexagon.M2.mmacls.s1" => "__builtin_HEXAGON_M2_mmacls_s1",
+ "llvm.hexagon.M2.mmacuhs.rs0" => "__builtin_HEXAGON_M2_mmacuhs_rs0",
+ "llvm.hexagon.M2.mmacuhs.rs1" => "__builtin_HEXAGON_M2_mmacuhs_rs1",
+ "llvm.hexagon.M2.mmacuhs.s0" => "__builtin_HEXAGON_M2_mmacuhs_s0",
+ "llvm.hexagon.M2.mmacuhs.s1" => "__builtin_HEXAGON_M2_mmacuhs_s1",
+ "llvm.hexagon.M2.mmaculs.rs0" => "__builtin_HEXAGON_M2_mmaculs_rs0",
+ "llvm.hexagon.M2.mmaculs.rs1" => "__builtin_HEXAGON_M2_mmaculs_rs1",
+ "llvm.hexagon.M2.mmaculs.s0" => "__builtin_HEXAGON_M2_mmaculs_s0",
+ "llvm.hexagon.M2.mmaculs.s1" => "__builtin_HEXAGON_M2_mmaculs_s1",
+ "llvm.hexagon.M2.mmpyh.rs0" => "__builtin_HEXAGON_M2_mmpyh_rs0",
+ "llvm.hexagon.M2.mmpyh.rs1" => "__builtin_HEXAGON_M2_mmpyh_rs1",
+ "llvm.hexagon.M2.mmpyh.s0" => "__builtin_HEXAGON_M2_mmpyh_s0",
+ "llvm.hexagon.M2.mmpyh.s1" => "__builtin_HEXAGON_M2_mmpyh_s1",
+ "llvm.hexagon.M2.mmpyl.rs0" => "__builtin_HEXAGON_M2_mmpyl_rs0",
+ "llvm.hexagon.M2.mmpyl.rs1" => "__builtin_HEXAGON_M2_mmpyl_rs1",
+ "llvm.hexagon.M2.mmpyl.s0" => "__builtin_HEXAGON_M2_mmpyl_s0",
+ "llvm.hexagon.M2.mmpyl.s1" => "__builtin_HEXAGON_M2_mmpyl_s1",
+ "llvm.hexagon.M2.mmpyuh.rs0" => "__builtin_HEXAGON_M2_mmpyuh_rs0",
+ "llvm.hexagon.M2.mmpyuh.rs1" => "__builtin_HEXAGON_M2_mmpyuh_rs1",
+ "llvm.hexagon.M2.mmpyuh.s0" => "__builtin_HEXAGON_M2_mmpyuh_s0",
+ "llvm.hexagon.M2.mmpyuh.s1" => "__builtin_HEXAGON_M2_mmpyuh_s1",
+ "llvm.hexagon.M2.mmpyul.rs0" => "__builtin_HEXAGON_M2_mmpyul_rs0",
+ "llvm.hexagon.M2.mmpyul.rs1" => "__builtin_HEXAGON_M2_mmpyul_rs1",
+ "llvm.hexagon.M2.mmpyul.s0" => "__builtin_HEXAGON_M2_mmpyul_s0",
+ "llvm.hexagon.M2.mmpyul.s1" => "__builtin_HEXAGON_M2_mmpyul_s1",
+ "llvm.hexagon.M2.mpy.acc.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_hh_s0",
+ "llvm.hexagon.M2.mpy.acc.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_hh_s1",
+ "llvm.hexagon.M2.mpy.acc.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_hl_s0",
+ "llvm.hexagon.M2.mpy.acc.hl.s1" => "__builtin_HEXAGON_M2_mpy_acc_hl_s1",
+ "llvm.hexagon.M2.mpy.acc.lh.s0" => "__builtin_HEXAGON_M2_mpy_acc_lh_s0",
+ "llvm.hexagon.M2.mpy.acc.lh.s1" => "__builtin_HEXAGON_M2_mpy_acc_lh_s1",
+ "llvm.hexagon.M2.mpy.acc.ll.s0" => "__builtin_HEXAGON_M2_mpy_acc_ll_s0",
+ "llvm.hexagon.M2.mpy.acc.ll.s1" => "__builtin_HEXAGON_M2_mpy_acc_ll_s1",
+ "llvm.hexagon.M2.mpy.acc.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0",
+ "llvm.hexagon.M2.mpy.acc.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1",
+ "llvm.hexagon.M2.mpy.acc.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0",
+ "llvm.hexagon.M2.mpy.acc.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1",
+ "llvm.hexagon.M2.mpy.acc.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0",
+ "llvm.hexagon.M2.mpy.acc.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1",
+ "llvm.hexagon.M2.mpy.acc.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0",
+ "llvm.hexagon.M2.mpy.acc.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1",
+ "llvm.hexagon.M2.mpy.hh.s0" => "__builtin_HEXAGON_M2_mpy_hh_s0",
+ "llvm.hexagon.M2.mpy.hh.s1" => "__builtin_HEXAGON_M2_mpy_hh_s1",
+ "llvm.hexagon.M2.mpy.hl.s0" => "__builtin_HEXAGON_M2_mpy_hl_s0",
+ "llvm.hexagon.M2.mpy.hl.s1" => "__builtin_HEXAGON_M2_mpy_hl_s1",
+ "llvm.hexagon.M2.mpy.lh.s0" => "__builtin_HEXAGON_M2_mpy_lh_s0",
+ "llvm.hexagon.M2.mpy.lh.s1" => "__builtin_HEXAGON_M2_mpy_lh_s1",
+ "llvm.hexagon.M2.mpy.ll.s0" => "__builtin_HEXAGON_M2_mpy_ll_s0",
+ "llvm.hexagon.M2.mpy.ll.s1" => "__builtin_HEXAGON_M2_mpy_ll_s1",
+ "llvm.hexagon.M2.mpy.nac.hh.s0" => "__builtin_HEXAGON_M2_mpy_nac_hh_s0",
+ "llvm.hexagon.M2.mpy.nac.hh.s1" => "__builtin_HEXAGON_M2_mpy_nac_hh_s1",
+ "llvm.hexagon.M2.mpy.nac.hl.s0" => "__builtin_HEXAGON_M2_mpy_nac_hl_s0",
+ "llvm.hexagon.M2.mpy.nac.hl.s1" => "__builtin_HEXAGON_M2_mpy_nac_hl_s1",
+ "llvm.hexagon.M2.mpy.nac.lh.s0" => "__builtin_HEXAGON_M2_mpy_nac_lh_s0",
+ "llvm.hexagon.M2.mpy.nac.lh.s1" => "__builtin_HEXAGON_M2_mpy_nac_lh_s1",
+ "llvm.hexagon.M2.mpy.nac.ll.s0" => "__builtin_HEXAGON_M2_mpy_nac_ll_s0",
+ "llvm.hexagon.M2.mpy.nac.ll.s1" => "__builtin_HEXAGON_M2_mpy_nac_ll_s1",
+ "llvm.hexagon.M2.mpy.nac.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0",
+ "llvm.hexagon.M2.mpy.nac.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1",
+ "llvm.hexagon.M2.mpy.nac.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0",
+ "llvm.hexagon.M2.mpy.nac.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1",
+ "llvm.hexagon.M2.mpy.nac.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0",
+ "llvm.hexagon.M2.mpy.nac.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1",
+ "llvm.hexagon.M2.mpy.nac.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0",
+ "llvm.hexagon.M2.mpy.nac.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1",
+ "llvm.hexagon.M2.mpy.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpy_rnd_hh_s0",
+ "llvm.hexagon.M2.mpy.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpy_rnd_hh_s1",
+ "llvm.hexagon.M2.mpy.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpy_rnd_hl_s0",
+ "llvm.hexagon.M2.mpy.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpy_rnd_hl_s1",
+ "llvm.hexagon.M2.mpy.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpy_rnd_lh_s0",
+ "llvm.hexagon.M2.mpy.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpy_rnd_lh_s1",
+ "llvm.hexagon.M2.mpy.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpy_rnd_ll_s0",
+ "llvm.hexagon.M2.mpy.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpy_rnd_ll_s1",
+ "llvm.hexagon.M2.mpy.sat.hh.s0" => "__builtin_HEXAGON_M2_mpy_sat_hh_s0",
+ "llvm.hexagon.M2.mpy.sat.hh.s1" => "__builtin_HEXAGON_M2_mpy_sat_hh_s1",
+ "llvm.hexagon.M2.mpy.sat.hl.s0" => "__builtin_HEXAGON_M2_mpy_sat_hl_s0",
+ "llvm.hexagon.M2.mpy.sat.hl.s1" => "__builtin_HEXAGON_M2_mpy_sat_hl_s1",
+ "llvm.hexagon.M2.mpy.sat.lh.s0" => "__builtin_HEXAGON_M2_mpy_sat_lh_s0",
+ "llvm.hexagon.M2.mpy.sat.lh.s1" => "__builtin_HEXAGON_M2_mpy_sat_lh_s1",
+ "llvm.hexagon.M2.mpy.sat.ll.s0" => "__builtin_HEXAGON_M2_mpy_sat_ll_s0",
+ "llvm.hexagon.M2.mpy.sat.ll.s1" => "__builtin_HEXAGON_M2_mpy_sat_ll_s1",
+ "llvm.hexagon.M2.mpy.sat.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0",
+ "llvm.hexagon.M2.mpy.sat.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1",
+ "llvm.hexagon.M2.mpy.sat.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0",
+ "llvm.hexagon.M2.mpy.sat.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1",
+ "llvm.hexagon.M2.mpy.sat.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0",
+ "llvm.hexagon.M2.mpy.sat.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1",
+ "llvm.hexagon.M2.mpy.sat.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0",
+ "llvm.hexagon.M2.mpy.sat.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1",
+ "llvm.hexagon.M2.mpy.up" => "__builtin_HEXAGON_M2_mpy_up",
+ "llvm.hexagon.M2.mpy.up.s1" => "__builtin_HEXAGON_M2_mpy_up_s1",
+ "llvm.hexagon.M2.mpy.up.s1.sat" => "__builtin_HEXAGON_M2_mpy_up_s1_sat",
+ "llvm.hexagon.M2.mpyd.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyd_acc_hh_s0",
+ "llvm.hexagon.M2.mpyd.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyd_acc_hh_s1",
+ "llvm.hexagon.M2.mpyd.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyd_acc_hl_s0",
+ "llvm.hexagon.M2.mpyd.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyd_acc_hl_s1",
+ "llvm.hexagon.M2.mpyd.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyd_acc_lh_s0",
+ "llvm.hexagon.M2.mpyd.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyd_acc_lh_s1",
+ "llvm.hexagon.M2.mpyd.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyd_acc_ll_s0",
+ "llvm.hexagon.M2.mpyd.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyd_acc_ll_s1",
+ "llvm.hexagon.M2.mpyd.hh.s0" => "__builtin_HEXAGON_M2_mpyd_hh_s0",
+ "llvm.hexagon.M2.mpyd.hh.s1" => "__builtin_HEXAGON_M2_mpyd_hh_s1",
+ "llvm.hexagon.M2.mpyd.hl.s0" => "__builtin_HEXAGON_M2_mpyd_hl_s0",
+ "llvm.hexagon.M2.mpyd.hl.s1" => "__builtin_HEXAGON_M2_mpyd_hl_s1",
+ "llvm.hexagon.M2.mpyd.lh.s0" => "__builtin_HEXAGON_M2_mpyd_lh_s0",
+ "llvm.hexagon.M2.mpyd.lh.s1" => "__builtin_HEXAGON_M2_mpyd_lh_s1",
+ "llvm.hexagon.M2.mpyd.ll.s0" => "__builtin_HEXAGON_M2_mpyd_ll_s0",
+ "llvm.hexagon.M2.mpyd.ll.s1" => "__builtin_HEXAGON_M2_mpyd_ll_s1",
+ "llvm.hexagon.M2.mpyd.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyd_nac_hh_s0",
+ "llvm.hexagon.M2.mpyd.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyd_nac_hh_s1",
+ "llvm.hexagon.M2.mpyd.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyd_nac_hl_s0",
+ "llvm.hexagon.M2.mpyd.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyd_nac_hl_s1",
+ "llvm.hexagon.M2.mpyd.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyd_nac_lh_s0",
+ "llvm.hexagon.M2.mpyd.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyd_nac_lh_s1",
+ "llvm.hexagon.M2.mpyd.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyd_nac_ll_s0",
+ "llvm.hexagon.M2.mpyd.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyd_nac_ll_s1",
+ "llvm.hexagon.M2.mpyd.rnd.hh.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_hh_s0",
+ "llvm.hexagon.M2.mpyd.rnd.hh.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_hh_s1",
+ "llvm.hexagon.M2.mpyd.rnd.hl.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_hl_s0",
+ "llvm.hexagon.M2.mpyd.rnd.hl.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_hl_s1",
+ "llvm.hexagon.M2.mpyd.rnd.lh.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_lh_s0",
+ "llvm.hexagon.M2.mpyd.rnd.lh.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_lh_s1",
+ "llvm.hexagon.M2.mpyd.rnd.ll.s0" => "__builtin_HEXAGON_M2_mpyd_rnd_ll_s0",
+ "llvm.hexagon.M2.mpyd.rnd.ll.s1" => "__builtin_HEXAGON_M2_mpyd_rnd_ll_s1",
+ "llvm.hexagon.M2.mpyi" => "__builtin_HEXAGON_M2_mpyi",
+ "llvm.hexagon.M2.mpysmi" => "__builtin_HEXAGON_M2_mpysmi",
+ "llvm.hexagon.M2.mpysu.up" => "__builtin_HEXAGON_M2_mpysu_up",
+ "llvm.hexagon.M2.mpyu.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyu_acc_hh_s0",
+ "llvm.hexagon.M2.mpyu.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyu_acc_hh_s1",
+ "llvm.hexagon.M2.mpyu.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyu_acc_hl_s0",
+ "llvm.hexagon.M2.mpyu.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyu_acc_hl_s1",
+ "llvm.hexagon.M2.mpyu.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyu_acc_lh_s0",
+ "llvm.hexagon.M2.mpyu.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyu_acc_lh_s1",
+ "llvm.hexagon.M2.mpyu.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyu_acc_ll_s0",
+ "llvm.hexagon.M2.mpyu.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyu_acc_ll_s1",
+ "llvm.hexagon.M2.mpyu.hh.s0" => "__builtin_HEXAGON_M2_mpyu_hh_s0",
+ "llvm.hexagon.M2.mpyu.hh.s1" => "__builtin_HEXAGON_M2_mpyu_hh_s1",
+ "llvm.hexagon.M2.mpyu.hl.s0" => "__builtin_HEXAGON_M2_mpyu_hl_s0",
+ "llvm.hexagon.M2.mpyu.hl.s1" => "__builtin_HEXAGON_M2_mpyu_hl_s1",
+ "llvm.hexagon.M2.mpyu.lh.s0" => "__builtin_HEXAGON_M2_mpyu_lh_s0",
+ "llvm.hexagon.M2.mpyu.lh.s1" => "__builtin_HEXAGON_M2_mpyu_lh_s1",
+ "llvm.hexagon.M2.mpyu.ll.s0" => "__builtin_HEXAGON_M2_mpyu_ll_s0",
+ "llvm.hexagon.M2.mpyu.ll.s1" => "__builtin_HEXAGON_M2_mpyu_ll_s1",
+ "llvm.hexagon.M2.mpyu.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyu_nac_hh_s0",
+ "llvm.hexagon.M2.mpyu.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyu_nac_hh_s1",
+ "llvm.hexagon.M2.mpyu.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyu_nac_hl_s0",
+ "llvm.hexagon.M2.mpyu.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyu_nac_hl_s1",
+ "llvm.hexagon.M2.mpyu.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyu_nac_lh_s0",
+ "llvm.hexagon.M2.mpyu.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyu_nac_lh_s1",
+ "llvm.hexagon.M2.mpyu.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyu_nac_ll_s0",
+ "llvm.hexagon.M2.mpyu.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyu_nac_ll_s1",
+ "llvm.hexagon.M2.mpyu.up" => "__builtin_HEXAGON_M2_mpyu_up",
+ "llvm.hexagon.M2.mpyud.acc.hh.s0" => "__builtin_HEXAGON_M2_mpyud_acc_hh_s0",
+ "llvm.hexagon.M2.mpyud.acc.hh.s1" => "__builtin_HEXAGON_M2_mpyud_acc_hh_s1",
+ "llvm.hexagon.M2.mpyud.acc.hl.s0" => "__builtin_HEXAGON_M2_mpyud_acc_hl_s0",
+ "llvm.hexagon.M2.mpyud.acc.hl.s1" => "__builtin_HEXAGON_M2_mpyud_acc_hl_s1",
+ "llvm.hexagon.M2.mpyud.acc.lh.s0" => "__builtin_HEXAGON_M2_mpyud_acc_lh_s0",
+ "llvm.hexagon.M2.mpyud.acc.lh.s1" => "__builtin_HEXAGON_M2_mpyud_acc_lh_s1",
+ "llvm.hexagon.M2.mpyud.acc.ll.s0" => "__builtin_HEXAGON_M2_mpyud_acc_ll_s0",
+ "llvm.hexagon.M2.mpyud.acc.ll.s1" => "__builtin_HEXAGON_M2_mpyud_acc_ll_s1",
+ "llvm.hexagon.M2.mpyud.hh.s0" => "__builtin_HEXAGON_M2_mpyud_hh_s0",
+ "llvm.hexagon.M2.mpyud.hh.s1" => "__builtin_HEXAGON_M2_mpyud_hh_s1",
+ "llvm.hexagon.M2.mpyud.hl.s0" => "__builtin_HEXAGON_M2_mpyud_hl_s0",
+ "llvm.hexagon.M2.mpyud.hl.s1" => "__builtin_HEXAGON_M2_mpyud_hl_s1",
+ "llvm.hexagon.M2.mpyud.lh.s0" => "__builtin_HEXAGON_M2_mpyud_lh_s0",
+ "llvm.hexagon.M2.mpyud.lh.s1" => "__builtin_HEXAGON_M2_mpyud_lh_s1",
+ "llvm.hexagon.M2.mpyud.ll.s0" => "__builtin_HEXAGON_M2_mpyud_ll_s0",
+ "llvm.hexagon.M2.mpyud.ll.s1" => "__builtin_HEXAGON_M2_mpyud_ll_s1",
+ "llvm.hexagon.M2.mpyud.nac.hh.s0" => "__builtin_HEXAGON_M2_mpyud_nac_hh_s0",
+ "llvm.hexagon.M2.mpyud.nac.hh.s1" => "__builtin_HEXAGON_M2_mpyud_nac_hh_s1",
+ "llvm.hexagon.M2.mpyud.nac.hl.s0" => "__builtin_HEXAGON_M2_mpyud_nac_hl_s0",
+ "llvm.hexagon.M2.mpyud.nac.hl.s1" => "__builtin_HEXAGON_M2_mpyud_nac_hl_s1",
+ "llvm.hexagon.M2.mpyud.nac.lh.s0" => "__builtin_HEXAGON_M2_mpyud_nac_lh_s0",
+ "llvm.hexagon.M2.mpyud.nac.lh.s1" => "__builtin_HEXAGON_M2_mpyud_nac_lh_s1",
+ "llvm.hexagon.M2.mpyud.nac.ll.s0" => "__builtin_HEXAGON_M2_mpyud_nac_ll_s0",
+ "llvm.hexagon.M2.mpyud.nac.ll.s1" => "__builtin_HEXAGON_M2_mpyud_nac_ll_s1",
+ "llvm.hexagon.M2.mpyui" => "__builtin_HEXAGON_M2_mpyui",
+ "llvm.hexagon.M2.nacci" => "__builtin_HEXAGON_M2_nacci",
+ "llvm.hexagon.M2.naccii" => "__builtin_HEXAGON_M2_naccii",
+ "llvm.hexagon.M2.subacc" => "__builtin_HEXAGON_M2_subacc",
+ "llvm.hexagon.M2.vabsdiffh" => "__builtin_HEXAGON_M2_vabsdiffh",
+ "llvm.hexagon.M2.vabsdiffw" => "__builtin_HEXAGON_M2_vabsdiffw",
+ "llvm.hexagon.M2.vcmac.s0.sat.i" => "__builtin_HEXAGON_M2_vcmac_s0_sat_i",
+ "llvm.hexagon.M2.vcmac.s0.sat.r" => "__builtin_HEXAGON_M2_vcmac_s0_sat_r",
+ "llvm.hexagon.M2.vcmpy.s0.sat.i" => "__builtin_HEXAGON_M2_vcmpy_s0_sat_i",
+ "llvm.hexagon.M2.vcmpy.s0.sat.r" => "__builtin_HEXAGON_M2_vcmpy_s0_sat_r",
+ "llvm.hexagon.M2.vcmpy.s1.sat.i" => "__builtin_HEXAGON_M2_vcmpy_s1_sat_i",
+ "llvm.hexagon.M2.vcmpy.s1.sat.r" => "__builtin_HEXAGON_M2_vcmpy_s1_sat_r",
+ "llvm.hexagon.M2.vdmacs.s0" => "__builtin_HEXAGON_M2_vdmacs_s0",
+ "llvm.hexagon.M2.vdmacs.s1" => "__builtin_HEXAGON_M2_vdmacs_s1",
+ "llvm.hexagon.M2.vdmpyrs.s0" => "__builtin_HEXAGON_M2_vdmpyrs_s0",
+ "llvm.hexagon.M2.vdmpyrs.s1" => "__builtin_HEXAGON_M2_vdmpyrs_s1",
+ "llvm.hexagon.M2.vdmpys.s0" => "__builtin_HEXAGON_M2_vdmpys_s0",
+ "llvm.hexagon.M2.vdmpys.s1" => "__builtin_HEXAGON_M2_vdmpys_s1",
+ "llvm.hexagon.M2.vmac2" => "__builtin_HEXAGON_M2_vmac2",
+ "llvm.hexagon.M2.vmac2es" => "__builtin_HEXAGON_M2_vmac2es",
+ "llvm.hexagon.M2.vmac2es.s0" => "__builtin_HEXAGON_M2_vmac2es_s0",
+ "llvm.hexagon.M2.vmac2es.s1" => "__builtin_HEXAGON_M2_vmac2es_s1",
+ "llvm.hexagon.M2.vmac2s.s0" => "__builtin_HEXAGON_M2_vmac2s_s0",
+ "llvm.hexagon.M2.vmac2s.s1" => "__builtin_HEXAGON_M2_vmac2s_s1",
+ "llvm.hexagon.M2.vmac2su.s0" => "__builtin_HEXAGON_M2_vmac2su_s0",
+ "llvm.hexagon.M2.vmac2su.s1" => "__builtin_HEXAGON_M2_vmac2su_s1",
+ "llvm.hexagon.M2.vmpy2es.s0" => "__builtin_HEXAGON_M2_vmpy2es_s0",
+ "llvm.hexagon.M2.vmpy2es.s1" => "__builtin_HEXAGON_M2_vmpy2es_s1",
+ "llvm.hexagon.M2.vmpy2s.s0" => "__builtin_HEXAGON_M2_vmpy2s_s0",
+ "llvm.hexagon.M2.vmpy2s.s0pack" => "__builtin_HEXAGON_M2_vmpy2s_s0pack",
+ "llvm.hexagon.M2.vmpy2s.s1" => "__builtin_HEXAGON_M2_vmpy2s_s1",
+ "llvm.hexagon.M2.vmpy2s.s1pack" => "__builtin_HEXAGON_M2_vmpy2s_s1pack",
+ "llvm.hexagon.M2.vmpy2su.s0" => "__builtin_HEXAGON_M2_vmpy2su_s0",
+ "llvm.hexagon.M2.vmpy2su.s1" => "__builtin_HEXAGON_M2_vmpy2su_s1",
+ "llvm.hexagon.M2.vraddh" => "__builtin_HEXAGON_M2_vraddh",
+ "llvm.hexagon.M2.vradduh" => "__builtin_HEXAGON_M2_vradduh",
+ "llvm.hexagon.M2.vrcmaci.s0" => "__builtin_HEXAGON_M2_vrcmaci_s0",
+ "llvm.hexagon.M2.vrcmaci.s0c" => "__builtin_HEXAGON_M2_vrcmaci_s0c",
+ "llvm.hexagon.M2.vrcmacr.s0" => "__builtin_HEXAGON_M2_vrcmacr_s0",
+ "llvm.hexagon.M2.vrcmacr.s0c" => "__builtin_HEXAGON_M2_vrcmacr_s0c",
+ "llvm.hexagon.M2.vrcmpyi.s0" => "__builtin_HEXAGON_M2_vrcmpyi_s0",
+ "llvm.hexagon.M2.vrcmpyi.s0c" => "__builtin_HEXAGON_M2_vrcmpyi_s0c",
+ "llvm.hexagon.M2.vrcmpyr.s0" => "__builtin_HEXAGON_M2_vrcmpyr_s0",
+ "llvm.hexagon.M2.vrcmpyr.s0c" => "__builtin_HEXAGON_M2_vrcmpyr_s0c",
+ "llvm.hexagon.M2.vrcmpys.acc.s1" => "__builtin_HEXAGON_M2_vrcmpys_acc_s1",
+ "llvm.hexagon.M2.vrcmpys.s1" => "__builtin_HEXAGON_M2_vrcmpys_s1",
+ "llvm.hexagon.M2.vrcmpys.s1rp" => "__builtin_HEXAGON_M2_vrcmpys_s1rp",
+ "llvm.hexagon.M2.vrmac.s0" => "__builtin_HEXAGON_M2_vrmac_s0",
+ "llvm.hexagon.M2.vrmpy.s0" => "__builtin_HEXAGON_M2_vrmpy_s0",
+ "llvm.hexagon.M2.xor.xacc" => "__builtin_HEXAGON_M2_xor_xacc",
+ "llvm.hexagon.M4.and.and" => "__builtin_HEXAGON_M4_and_and",
+ "llvm.hexagon.M4.and.andn" => "__builtin_HEXAGON_M4_and_andn",
+ "llvm.hexagon.M4.and.or" => "__builtin_HEXAGON_M4_and_or",
+ "llvm.hexagon.M4.and.xor" => "__builtin_HEXAGON_M4_and_xor",
+ "llvm.hexagon.M4.cmpyi.wh" => "__builtin_HEXAGON_M4_cmpyi_wh",
+ "llvm.hexagon.M4.cmpyi.whc" => "__builtin_HEXAGON_M4_cmpyi_whc",
+ "llvm.hexagon.M4.cmpyr.wh" => "__builtin_HEXAGON_M4_cmpyr_wh",
+ "llvm.hexagon.M4.cmpyr.whc" => "__builtin_HEXAGON_M4_cmpyr_whc",
+ "llvm.hexagon.M4.mac.up.s1.sat" => "__builtin_HEXAGON_M4_mac_up_s1_sat",
+ "llvm.hexagon.M4.mpyri.addi" => "__builtin_HEXAGON_M4_mpyri_addi",
+ "llvm.hexagon.M4.mpyri.addr" => "__builtin_HEXAGON_M4_mpyri_addr",
+ "llvm.hexagon.M4.mpyri.addr.u2" => "__builtin_HEXAGON_M4_mpyri_addr_u2",
+ "llvm.hexagon.M4.mpyrr.addi" => "__builtin_HEXAGON_M4_mpyrr_addi",
+ "llvm.hexagon.M4.mpyrr.addr" => "__builtin_HEXAGON_M4_mpyrr_addr",
+ "llvm.hexagon.M4.nac.up.s1.sat" => "__builtin_HEXAGON_M4_nac_up_s1_sat",
+ "llvm.hexagon.M4.or.and" => "__builtin_HEXAGON_M4_or_and",
+ "llvm.hexagon.M4.or.andn" => "__builtin_HEXAGON_M4_or_andn",
+ "llvm.hexagon.M4.or.or" => "__builtin_HEXAGON_M4_or_or",
+ "llvm.hexagon.M4.or.xor" => "__builtin_HEXAGON_M4_or_xor",
+ "llvm.hexagon.M4.pmpyw" => "__builtin_HEXAGON_M4_pmpyw",
+ "llvm.hexagon.M4.pmpyw.acc" => "__builtin_HEXAGON_M4_pmpyw_acc",
+ "llvm.hexagon.M4.vpmpyh" => "__builtin_HEXAGON_M4_vpmpyh",
+ "llvm.hexagon.M4.vpmpyh.acc" => "__builtin_HEXAGON_M4_vpmpyh_acc",
+ "llvm.hexagon.M4.vrmpyeh.acc.s0" => "__builtin_HEXAGON_M4_vrmpyeh_acc_s0",
+ "llvm.hexagon.M4.vrmpyeh.acc.s1" => "__builtin_HEXAGON_M4_vrmpyeh_acc_s1",
+ "llvm.hexagon.M4.vrmpyeh.s0" => "__builtin_HEXAGON_M4_vrmpyeh_s0",
+ "llvm.hexagon.M4.vrmpyeh.s1" => "__builtin_HEXAGON_M4_vrmpyeh_s1",
+ "llvm.hexagon.M4.vrmpyoh.acc.s0" => "__builtin_HEXAGON_M4_vrmpyoh_acc_s0",
+ "llvm.hexagon.M4.vrmpyoh.acc.s1" => "__builtin_HEXAGON_M4_vrmpyoh_acc_s1",
+ "llvm.hexagon.M4.vrmpyoh.s0" => "__builtin_HEXAGON_M4_vrmpyoh_s0",
+ "llvm.hexagon.M4.vrmpyoh.s1" => "__builtin_HEXAGON_M4_vrmpyoh_s1",
+ "llvm.hexagon.M4.xor.and" => "__builtin_HEXAGON_M4_xor_and",
+ "llvm.hexagon.M4.xor.andn" => "__builtin_HEXAGON_M4_xor_andn",
+ "llvm.hexagon.M4.xor.or" => "__builtin_HEXAGON_M4_xor_or",
+ "llvm.hexagon.M4.xor.xacc" => "__builtin_HEXAGON_M4_xor_xacc",
+ "llvm.hexagon.M5.vdmacbsu" => "__builtin_HEXAGON_M5_vdmacbsu",
+ "llvm.hexagon.M5.vdmpybsu" => "__builtin_HEXAGON_M5_vdmpybsu",
+ "llvm.hexagon.M5.vmacbsu" => "__builtin_HEXAGON_M5_vmacbsu",
+ "llvm.hexagon.M5.vmacbuu" => "__builtin_HEXAGON_M5_vmacbuu",
+ "llvm.hexagon.M5.vmpybsu" => "__builtin_HEXAGON_M5_vmpybsu",
+ "llvm.hexagon.M5.vmpybuu" => "__builtin_HEXAGON_M5_vmpybuu",
+ "llvm.hexagon.M5.vrmacbsu" => "__builtin_HEXAGON_M5_vrmacbsu",
+ "llvm.hexagon.M5.vrmacbuu" => "__builtin_HEXAGON_M5_vrmacbuu",
+ "llvm.hexagon.M5.vrmpybsu" => "__builtin_HEXAGON_M5_vrmpybsu",
+ "llvm.hexagon.M5.vrmpybuu" => "__builtin_HEXAGON_M5_vrmpybuu",
+ "llvm.hexagon.M6.vabsdiffb" => "__builtin_HEXAGON_M6_vabsdiffb",
+ "llvm.hexagon.M6.vabsdiffub" => "__builtin_HEXAGON_M6_vabsdiffub",
+ "llvm.hexagon.S2.addasl.rrri" => "__builtin_HEXAGON_S2_addasl_rrri",
+ "llvm.hexagon.S2.asl.i.p" => "__builtin_HEXAGON_S2_asl_i_p",
+ "llvm.hexagon.S2.asl.i.p.acc" => "__builtin_HEXAGON_S2_asl_i_p_acc",
+ "llvm.hexagon.S2.asl.i.p.and" => "__builtin_HEXAGON_S2_asl_i_p_and",
+ "llvm.hexagon.S2.asl.i.p.nac" => "__builtin_HEXAGON_S2_asl_i_p_nac",
+ "llvm.hexagon.S2.asl.i.p.or" => "__builtin_HEXAGON_S2_asl_i_p_or",
+ "llvm.hexagon.S2.asl.i.p.xacc" => "__builtin_HEXAGON_S2_asl_i_p_xacc",
+ "llvm.hexagon.S2.asl.i.r" => "__builtin_HEXAGON_S2_asl_i_r",
+ "llvm.hexagon.S2.asl.i.r.acc" => "__builtin_HEXAGON_S2_asl_i_r_acc",
+ "llvm.hexagon.S2.asl.i.r.and" => "__builtin_HEXAGON_S2_asl_i_r_and",
+ "llvm.hexagon.S2.asl.i.r.nac" => "__builtin_HEXAGON_S2_asl_i_r_nac",
+ "llvm.hexagon.S2.asl.i.r.or" => "__builtin_HEXAGON_S2_asl_i_r_or",
+ "llvm.hexagon.S2.asl.i.r.sat" => "__builtin_HEXAGON_S2_asl_i_r_sat",
+ "llvm.hexagon.S2.asl.i.r.xacc" => "__builtin_HEXAGON_S2_asl_i_r_xacc",
+ "llvm.hexagon.S2.asl.i.vh" => "__builtin_HEXAGON_S2_asl_i_vh",
+ "llvm.hexagon.S2.asl.i.vw" => "__builtin_HEXAGON_S2_asl_i_vw",
+ "llvm.hexagon.S2.asl.r.p" => "__builtin_HEXAGON_S2_asl_r_p",
+ "llvm.hexagon.S2.asl.r.p.acc" => "__builtin_HEXAGON_S2_asl_r_p_acc",
+ "llvm.hexagon.S2.asl.r.p.and" => "__builtin_HEXAGON_S2_asl_r_p_and",
+ "llvm.hexagon.S2.asl.r.p.nac" => "__builtin_HEXAGON_S2_asl_r_p_nac",
+ "llvm.hexagon.S2.asl.r.p.or" => "__builtin_HEXAGON_S2_asl_r_p_or",
+ "llvm.hexagon.S2.asl.r.p.xor" => "__builtin_HEXAGON_S2_asl_r_p_xor",
+ "llvm.hexagon.S2.asl.r.r" => "__builtin_HEXAGON_S2_asl_r_r",
+ "llvm.hexagon.S2.asl.r.r.acc" => "__builtin_HEXAGON_S2_asl_r_r_acc",
+ "llvm.hexagon.S2.asl.r.r.and" => "__builtin_HEXAGON_S2_asl_r_r_and",
+ "llvm.hexagon.S2.asl.r.r.nac" => "__builtin_HEXAGON_S2_asl_r_r_nac",
+ "llvm.hexagon.S2.asl.r.r.or" => "__builtin_HEXAGON_S2_asl_r_r_or",
+ "llvm.hexagon.S2.asl.r.r.sat" => "__builtin_HEXAGON_S2_asl_r_r_sat",
+ "llvm.hexagon.S2.asl.r.vh" => "__builtin_HEXAGON_S2_asl_r_vh",
+ "llvm.hexagon.S2.asl.r.vw" => "__builtin_HEXAGON_S2_asl_r_vw",
+ "llvm.hexagon.S2.asr.i.p" => "__builtin_HEXAGON_S2_asr_i_p",
+ "llvm.hexagon.S2.asr.i.p.acc" => "__builtin_HEXAGON_S2_asr_i_p_acc",
+ "llvm.hexagon.S2.asr.i.p.and" => "__builtin_HEXAGON_S2_asr_i_p_and",
+ "llvm.hexagon.S2.asr.i.p.nac" => "__builtin_HEXAGON_S2_asr_i_p_nac",
+ "llvm.hexagon.S2.asr.i.p.or" => "__builtin_HEXAGON_S2_asr_i_p_or",
+ "llvm.hexagon.S2.asr.i.p.rnd" => "__builtin_HEXAGON_S2_asr_i_p_rnd",
+ "llvm.hexagon.S2.asr.i.p.rnd.goodsyntax" => "__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax",
+ "llvm.hexagon.S2.asr.i.r" => "__builtin_HEXAGON_S2_asr_i_r",
+ "llvm.hexagon.S2.asr.i.r.acc" => "__builtin_HEXAGON_S2_asr_i_r_acc",
+ "llvm.hexagon.S2.asr.i.r.and" => "__builtin_HEXAGON_S2_asr_i_r_and",
+ "llvm.hexagon.S2.asr.i.r.nac" => "__builtin_HEXAGON_S2_asr_i_r_nac",
+ "llvm.hexagon.S2.asr.i.r.or" => "__builtin_HEXAGON_S2_asr_i_r_or",
+ "llvm.hexagon.S2.asr.i.r.rnd" => "__builtin_HEXAGON_S2_asr_i_r_rnd",
+ "llvm.hexagon.S2.asr.i.r.rnd.goodsyntax" => "__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax",
+ "llvm.hexagon.S2.asr.i.svw.trun" => "__builtin_HEXAGON_S2_asr_i_svw_trun",
+ "llvm.hexagon.S2.asr.i.vh" => "__builtin_HEXAGON_S2_asr_i_vh",
+ "llvm.hexagon.S2.asr.i.vw" => "__builtin_HEXAGON_S2_asr_i_vw",
+ "llvm.hexagon.S2.asr.r.p" => "__builtin_HEXAGON_S2_asr_r_p",
+ "llvm.hexagon.S2.asr.r.p.acc" => "__builtin_HEXAGON_S2_asr_r_p_acc",
+ "llvm.hexagon.S2.asr.r.p.and" => "__builtin_HEXAGON_S2_asr_r_p_and",
+ "llvm.hexagon.S2.asr.r.p.nac" => "__builtin_HEXAGON_S2_asr_r_p_nac",
+ "llvm.hexagon.S2.asr.r.p.or" => "__builtin_HEXAGON_S2_asr_r_p_or",
+ "llvm.hexagon.S2.asr.r.p.xor" => "__builtin_HEXAGON_S2_asr_r_p_xor",
+ "llvm.hexagon.S2.asr.r.r" => "__builtin_HEXAGON_S2_asr_r_r",
+ "llvm.hexagon.S2.asr.r.r.acc" => "__builtin_HEXAGON_S2_asr_r_r_acc",
+ "llvm.hexagon.S2.asr.r.r.and" => "__builtin_HEXAGON_S2_asr_r_r_and",
+ "llvm.hexagon.S2.asr.r.r.nac" => "__builtin_HEXAGON_S2_asr_r_r_nac",
+ "llvm.hexagon.S2.asr.r.r.or" => "__builtin_HEXAGON_S2_asr_r_r_or",
+ "llvm.hexagon.S2.asr.r.r.sat" => "__builtin_HEXAGON_S2_asr_r_r_sat",
+ "llvm.hexagon.S2.asr.r.svw.trun" => "__builtin_HEXAGON_S2_asr_r_svw_trun",
+ "llvm.hexagon.S2.asr.r.vh" => "__builtin_HEXAGON_S2_asr_r_vh",
+ "llvm.hexagon.S2.asr.r.vw" => "__builtin_HEXAGON_S2_asr_r_vw",
+ "llvm.hexagon.S2.brev" => "__builtin_HEXAGON_S2_brev",
+ "llvm.hexagon.S2.brevp" => "__builtin_HEXAGON_S2_brevp",
+ "llvm.hexagon.S2.cabacencbin" => "__builtin_HEXAGON_S2_cabacencbin",
+ "llvm.hexagon.S2.cl0" => "__builtin_HEXAGON_S2_cl0",
+ "llvm.hexagon.S2.cl0p" => "__builtin_HEXAGON_S2_cl0p",
+ "llvm.hexagon.S2.cl1" => "__builtin_HEXAGON_S2_cl1",
+ "llvm.hexagon.S2.cl1p" => "__builtin_HEXAGON_S2_cl1p",
+ "llvm.hexagon.S2.clb" => "__builtin_HEXAGON_S2_clb",
+ "llvm.hexagon.S2.clbnorm" => "__builtin_HEXAGON_S2_clbnorm",
+ "llvm.hexagon.S2.clbp" => "__builtin_HEXAGON_S2_clbp",
+ "llvm.hexagon.S2.clrbit.i" => "__builtin_HEXAGON_S2_clrbit_i",
+ "llvm.hexagon.S2.clrbit.r" => "__builtin_HEXAGON_S2_clrbit_r",
+ "llvm.hexagon.S2.ct0" => "__builtin_HEXAGON_S2_ct0",
+ "llvm.hexagon.S2.ct0p" => "__builtin_HEXAGON_S2_ct0p",
+ "llvm.hexagon.S2.ct1" => "__builtin_HEXAGON_S2_ct1",
+ "llvm.hexagon.S2.ct1p" => "__builtin_HEXAGON_S2_ct1p",
+ "llvm.hexagon.S2.deinterleave" => "__builtin_HEXAGON_S2_deinterleave",
+ "llvm.hexagon.S2.extractu" => "__builtin_HEXAGON_S2_extractu",
+ "llvm.hexagon.S2.extractu.rp" => "__builtin_HEXAGON_S2_extractu_rp",
+ "llvm.hexagon.S2.extractup" => "__builtin_HEXAGON_S2_extractup",
+ "llvm.hexagon.S2.extractup.rp" => "__builtin_HEXAGON_S2_extractup_rp",
+ "llvm.hexagon.S2.insert" => "__builtin_HEXAGON_S2_insert",
+ "llvm.hexagon.S2.insert.rp" => "__builtin_HEXAGON_S2_insert_rp",
+ "llvm.hexagon.S2.insertp" => "__builtin_HEXAGON_S2_insertp",
+ "llvm.hexagon.S2.insertp.rp" => "__builtin_HEXAGON_S2_insertp_rp",
+ "llvm.hexagon.S2.interleave" => "__builtin_HEXAGON_S2_interleave",
+ "llvm.hexagon.S2.lfsp" => "__builtin_HEXAGON_S2_lfsp",
+ "llvm.hexagon.S2.lsl.r.p" => "__builtin_HEXAGON_S2_lsl_r_p",
+ "llvm.hexagon.S2.lsl.r.p.acc" => "__builtin_HEXAGON_S2_lsl_r_p_acc",
+ "llvm.hexagon.S2.lsl.r.p.and" => "__builtin_HEXAGON_S2_lsl_r_p_and",
+ "llvm.hexagon.S2.lsl.r.p.nac" => "__builtin_HEXAGON_S2_lsl_r_p_nac",
+ "llvm.hexagon.S2.lsl.r.p.or" => "__builtin_HEXAGON_S2_lsl_r_p_or",
+ "llvm.hexagon.S2.lsl.r.p.xor" => "__builtin_HEXAGON_S2_lsl_r_p_xor",
+ "llvm.hexagon.S2.lsl.r.r" => "__builtin_HEXAGON_S2_lsl_r_r",
+ "llvm.hexagon.S2.lsl.r.r.acc" => "__builtin_HEXAGON_S2_lsl_r_r_acc",
+ "llvm.hexagon.S2.lsl.r.r.and" => "__builtin_HEXAGON_S2_lsl_r_r_and",
+ "llvm.hexagon.S2.lsl.r.r.nac" => "__builtin_HEXAGON_S2_lsl_r_r_nac",
+ "llvm.hexagon.S2.lsl.r.r.or" => "__builtin_HEXAGON_S2_lsl_r_r_or",
+ "llvm.hexagon.S2.lsl.r.vh" => "__builtin_HEXAGON_S2_lsl_r_vh",
+ "llvm.hexagon.S2.lsl.r.vw" => "__builtin_HEXAGON_S2_lsl_r_vw",
+ "llvm.hexagon.S2.lsr.i.p" => "__builtin_HEXAGON_S2_lsr_i_p",
+ "llvm.hexagon.S2.lsr.i.p.acc" => "__builtin_HEXAGON_S2_lsr_i_p_acc",
+ "llvm.hexagon.S2.lsr.i.p.and" => "__builtin_HEXAGON_S2_lsr_i_p_and",
+ "llvm.hexagon.S2.lsr.i.p.nac" => "__builtin_HEXAGON_S2_lsr_i_p_nac",
+ "llvm.hexagon.S2.lsr.i.p.or" => "__builtin_HEXAGON_S2_lsr_i_p_or",
+ "llvm.hexagon.S2.lsr.i.p.xacc" => "__builtin_HEXAGON_S2_lsr_i_p_xacc",
+ "llvm.hexagon.S2.lsr.i.r" => "__builtin_HEXAGON_S2_lsr_i_r",
+ "llvm.hexagon.S2.lsr.i.r.acc" => "__builtin_HEXAGON_S2_lsr_i_r_acc",
+ "llvm.hexagon.S2.lsr.i.r.and" => "__builtin_HEXAGON_S2_lsr_i_r_and",
+ "llvm.hexagon.S2.lsr.i.r.nac" => "__builtin_HEXAGON_S2_lsr_i_r_nac",
+ "llvm.hexagon.S2.lsr.i.r.or" => "__builtin_HEXAGON_S2_lsr_i_r_or",
+ "llvm.hexagon.S2.lsr.i.r.xacc" => "__builtin_HEXAGON_S2_lsr_i_r_xacc",
+ "llvm.hexagon.S2.lsr.i.vh" => "__builtin_HEXAGON_S2_lsr_i_vh",
+ "llvm.hexagon.S2.lsr.i.vw" => "__builtin_HEXAGON_S2_lsr_i_vw",
+ "llvm.hexagon.S2.lsr.r.p" => "__builtin_HEXAGON_S2_lsr_r_p",
+ "llvm.hexagon.S2.lsr.r.p.acc" => "__builtin_HEXAGON_S2_lsr_r_p_acc",
+ "llvm.hexagon.S2.lsr.r.p.and" => "__builtin_HEXAGON_S2_lsr_r_p_and",
+ "llvm.hexagon.S2.lsr.r.p.nac" => "__builtin_HEXAGON_S2_lsr_r_p_nac",
+ "llvm.hexagon.S2.lsr.r.p.or" => "__builtin_HEXAGON_S2_lsr_r_p_or",
+ "llvm.hexagon.S2.lsr.r.p.xor" => "__builtin_HEXAGON_S2_lsr_r_p_xor",
+ "llvm.hexagon.S2.lsr.r.r" => "__builtin_HEXAGON_S2_lsr_r_r",
+ "llvm.hexagon.S2.lsr.r.r.acc" => "__builtin_HEXAGON_S2_lsr_r_r_acc",
+ "llvm.hexagon.S2.lsr.r.r.and" => "__builtin_HEXAGON_S2_lsr_r_r_and",
+ "llvm.hexagon.S2.lsr.r.r.nac" => "__builtin_HEXAGON_S2_lsr_r_r_nac",
+ "llvm.hexagon.S2.lsr.r.r.or" => "__builtin_HEXAGON_S2_lsr_r_r_or",
+ "llvm.hexagon.S2.lsr.r.vh" => "__builtin_HEXAGON_S2_lsr_r_vh",
+ "llvm.hexagon.S2.lsr.r.vw" => "__builtin_HEXAGON_S2_lsr_r_vw",
+ "llvm.hexagon.S2.packhl" => "__builtin_HEXAGON_S2_packhl",
+ "llvm.hexagon.S2.parityp" => "__builtin_HEXAGON_S2_parityp",
+ "llvm.hexagon.S2.setbit.i" => "__builtin_HEXAGON_S2_setbit_i",
+ "llvm.hexagon.S2.setbit.r" => "__builtin_HEXAGON_S2_setbit_r",
+ "llvm.hexagon.S2.shuffeb" => "__builtin_HEXAGON_S2_shuffeb",
+ "llvm.hexagon.S2.shuffeh" => "__builtin_HEXAGON_S2_shuffeh",
+ "llvm.hexagon.S2.shuffob" => "__builtin_HEXAGON_S2_shuffob",
+ "llvm.hexagon.S2.shuffoh" => "__builtin_HEXAGON_S2_shuffoh",
+ "llvm.hexagon.S2.svsathb" => "__builtin_HEXAGON_S2_svsathb",
+ "llvm.hexagon.S2.svsathub" => "__builtin_HEXAGON_S2_svsathub",
+ "llvm.hexagon.S2.tableidxb.goodsyntax" => "__builtin_HEXAGON_S2_tableidxb_goodsyntax",
+ "llvm.hexagon.S2.tableidxd.goodsyntax" => "__builtin_HEXAGON_S2_tableidxd_goodsyntax",
+ "llvm.hexagon.S2.tableidxh.goodsyntax" => "__builtin_HEXAGON_S2_tableidxh_goodsyntax",
+ "llvm.hexagon.S2.tableidxw.goodsyntax" => "__builtin_HEXAGON_S2_tableidxw_goodsyntax",
+ "llvm.hexagon.S2.togglebit.i" => "__builtin_HEXAGON_S2_togglebit_i",
+ "llvm.hexagon.S2.togglebit.r" => "__builtin_HEXAGON_S2_togglebit_r",
+ "llvm.hexagon.S2.tstbit.i" => "__builtin_HEXAGON_S2_tstbit_i",
+ "llvm.hexagon.S2.tstbit.r" => "__builtin_HEXAGON_S2_tstbit_r",
+ "llvm.hexagon.S2.valignib" => "__builtin_HEXAGON_S2_valignib",
+ "llvm.hexagon.S2.valignrb" => "__builtin_HEXAGON_S2_valignrb",
+ "llvm.hexagon.S2.vcnegh" => "__builtin_HEXAGON_S2_vcnegh",
+ "llvm.hexagon.S2.vcrotate" => "__builtin_HEXAGON_S2_vcrotate",
+ "llvm.hexagon.S2.vrcnegh" => "__builtin_HEXAGON_S2_vrcnegh",
+ "llvm.hexagon.S2.vrndpackwh" => "__builtin_HEXAGON_S2_vrndpackwh",
+ "llvm.hexagon.S2.vrndpackwhs" => "__builtin_HEXAGON_S2_vrndpackwhs",
+ "llvm.hexagon.S2.vsathb" => "__builtin_HEXAGON_S2_vsathb",
+ "llvm.hexagon.S2.vsathb.nopack" => "__builtin_HEXAGON_S2_vsathb_nopack",
+ "llvm.hexagon.S2.vsathub" => "__builtin_HEXAGON_S2_vsathub",
+ "llvm.hexagon.S2.vsathub.nopack" => "__builtin_HEXAGON_S2_vsathub_nopack",
+ "llvm.hexagon.S2.vsatwh" => "__builtin_HEXAGON_S2_vsatwh",
+ "llvm.hexagon.S2.vsatwh.nopack" => "__builtin_HEXAGON_S2_vsatwh_nopack",
+ "llvm.hexagon.S2.vsatwuh" => "__builtin_HEXAGON_S2_vsatwuh",
+ "llvm.hexagon.S2.vsatwuh.nopack" => "__builtin_HEXAGON_S2_vsatwuh_nopack",
+ "llvm.hexagon.S2.vsplatrb" => "__builtin_HEXAGON_S2_vsplatrb",
+ "llvm.hexagon.S2.vsplatrh" => "__builtin_HEXAGON_S2_vsplatrh",
+ "llvm.hexagon.S2.vspliceib" => "__builtin_HEXAGON_S2_vspliceib",
+ "llvm.hexagon.S2.vsplicerb" => "__builtin_HEXAGON_S2_vsplicerb",
+ "llvm.hexagon.S2.vsxtbh" => "__builtin_HEXAGON_S2_vsxtbh",
+ "llvm.hexagon.S2.vsxthw" => "__builtin_HEXAGON_S2_vsxthw",
+ "llvm.hexagon.S2.vtrunehb" => "__builtin_HEXAGON_S2_vtrunehb",
+ "llvm.hexagon.S2.vtrunewh" => "__builtin_HEXAGON_S2_vtrunewh",
+ "llvm.hexagon.S2.vtrunohb" => "__builtin_HEXAGON_S2_vtrunohb",
+ "llvm.hexagon.S2.vtrunowh" => "__builtin_HEXAGON_S2_vtrunowh",
+ "llvm.hexagon.S2.vzxtbh" => "__builtin_HEXAGON_S2_vzxtbh",
+ "llvm.hexagon.S2.vzxthw" => "__builtin_HEXAGON_S2_vzxthw",
+ "llvm.hexagon.S4.addaddi" => "__builtin_HEXAGON_S4_addaddi",
+ "llvm.hexagon.S4.addi.asl.ri" => "__builtin_HEXAGON_S4_addi_asl_ri",
+ "llvm.hexagon.S4.addi.lsr.ri" => "__builtin_HEXAGON_S4_addi_lsr_ri",
+ "llvm.hexagon.S4.andi.asl.ri" => "__builtin_HEXAGON_S4_andi_asl_ri",
+ "llvm.hexagon.S4.andi.lsr.ri" => "__builtin_HEXAGON_S4_andi_lsr_ri",
+ "llvm.hexagon.S4.clbaddi" => "__builtin_HEXAGON_S4_clbaddi",
+ "llvm.hexagon.S4.clbpaddi" => "__builtin_HEXAGON_S4_clbpaddi",
+ "llvm.hexagon.S4.clbpnorm" => "__builtin_HEXAGON_S4_clbpnorm",
+ "llvm.hexagon.S4.extract" => "__builtin_HEXAGON_S4_extract",
+ "llvm.hexagon.S4.extract.rp" => "__builtin_HEXAGON_S4_extract_rp",
+ "llvm.hexagon.S4.extractp" => "__builtin_HEXAGON_S4_extractp",
+ "llvm.hexagon.S4.extractp.rp" => "__builtin_HEXAGON_S4_extractp_rp",
+ "llvm.hexagon.S4.lsli" => "__builtin_HEXAGON_S4_lsli",
+ "llvm.hexagon.S4.ntstbit.i" => "__builtin_HEXAGON_S4_ntstbit_i",
+ "llvm.hexagon.S4.ntstbit.r" => "__builtin_HEXAGON_S4_ntstbit_r",
+ "llvm.hexagon.S4.or.andi" => "__builtin_HEXAGON_S4_or_andi",
+ "llvm.hexagon.S4.or.andix" => "__builtin_HEXAGON_S4_or_andix",
+ "llvm.hexagon.S4.or.ori" => "__builtin_HEXAGON_S4_or_ori",
+ "llvm.hexagon.S4.ori.asl.ri" => "__builtin_HEXAGON_S4_ori_asl_ri",
+ "llvm.hexagon.S4.ori.lsr.ri" => "__builtin_HEXAGON_S4_ori_lsr_ri",
+ "llvm.hexagon.S4.parity" => "__builtin_HEXAGON_S4_parity",
+ "llvm.hexagon.S4.subaddi" => "__builtin_HEXAGON_S4_subaddi",
+ "llvm.hexagon.S4.subi.asl.ri" => "__builtin_HEXAGON_S4_subi_asl_ri",
+ "llvm.hexagon.S4.subi.lsr.ri" => "__builtin_HEXAGON_S4_subi_lsr_ri",
+ "llvm.hexagon.S4.vrcrotate" => "__builtin_HEXAGON_S4_vrcrotate",
+ "llvm.hexagon.S4.vrcrotate.acc" => "__builtin_HEXAGON_S4_vrcrotate_acc",
+ "llvm.hexagon.S4.vxaddsubh" => "__builtin_HEXAGON_S4_vxaddsubh",
+ "llvm.hexagon.S4.vxaddsubhr" => "__builtin_HEXAGON_S4_vxaddsubhr",
+ "llvm.hexagon.S4.vxaddsubw" => "__builtin_HEXAGON_S4_vxaddsubw",
+ "llvm.hexagon.S4.vxsubaddh" => "__builtin_HEXAGON_S4_vxsubaddh",
+ "llvm.hexagon.S4.vxsubaddhr" => "__builtin_HEXAGON_S4_vxsubaddhr",
+ "llvm.hexagon.S4.vxsubaddw" => "__builtin_HEXAGON_S4_vxsubaddw",
+ "llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax" => "__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax",
+ "llvm.hexagon.S5.asrhub.sat" => "__builtin_HEXAGON_S5_asrhub_sat",
+ "llvm.hexagon.S5.popcountp" => "__builtin_HEXAGON_S5_popcountp",
+ "llvm.hexagon.S5.vasrhrnd.goodsyntax" => "__builtin_HEXAGON_S5_vasrhrnd_goodsyntax",
+ "llvm.hexagon.S6.rol.i.p" => "__builtin_HEXAGON_S6_rol_i_p",
+ "llvm.hexagon.S6.rol.i.p.acc" => "__builtin_HEXAGON_S6_rol_i_p_acc",
+ "llvm.hexagon.S6.rol.i.p.and" => "__builtin_HEXAGON_S6_rol_i_p_and",
+ "llvm.hexagon.S6.rol.i.p.nac" => "__builtin_HEXAGON_S6_rol_i_p_nac",
+ "llvm.hexagon.S6.rol.i.p.or" => "__builtin_HEXAGON_S6_rol_i_p_or",
+ "llvm.hexagon.S6.rol.i.p.xacc" => "__builtin_HEXAGON_S6_rol_i_p_xacc",
+ "llvm.hexagon.S6.rol.i.r" => "__builtin_HEXAGON_S6_rol_i_r",
+ "llvm.hexagon.S6.rol.i.r.acc" => "__builtin_HEXAGON_S6_rol_i_r_acc",
+ "llvm.hexagon.S6.rol.i.r.and" => "__builtin_HEXAGON_S6_rol_i_r_and",
+ "llvm.hexagon.S6.rol.i.r.nac" => "__builtin_HEXAGON_S6_rol_i_r_nac",
+ "llvm.hexagon.S6.rol.i.r.or" => "__builtin_HEXAGON_S6_rol_i_r_or",
+ "llvm.hexagon.S6.rol.i.r.xacc" => "__builtin_HEXAGON_S6_rol_i_r_xacc",
+ "llvm.hexagon.S6.vsplatrbp" => "__builtin_HEXAGON_S6_vsplatrbp",
+ "llvm.hexagon.S6.vtrunehb.ppp" => "__builtin_HEXAGON_S6_vtrunehb_ppp",
+ "llvm.hexagon.S6.vtrunohb.ppp" => "__builtin_HEXAGON_S6_vtrunohb_ppp",
+ "llvm.hexagon.SI.to.SXTHI.asrh" => "__builtin_SI_to_SXTHI_asrh",
+ "llvm.hexagon.V6.extractw" => "__builtin_HEXAGON_V6_extractw",
+ "llvm.hexagon.V6.extractw.128B" => "__builtin_HEXAGON_V6_extractw_128B",
+ "llvm.hexagon.V6.hi" => "__builtin_HEXAGON_V6_hi",
+ "llvm.hexagon.V6.hi.128B" => "__builtin_HEXAGON_V6_hi_128B",
+ "llvm.hexagon.V6.lo" => "__builtin_HEXAGON_V6_lo",
+ "llvm.hexagon.V6.lo.128B" => "__builtin_HEXAGON_V6_lo_128B",
+ "llvm.hexagon.V6.lvsplatw" => "__builtin_HEXAGON_V6_lvsplatw",
+ "llvm.hexagon.V6.lvsplatw.128B" => "__builtin_HEXAGON_V6_lvsplatw_128B",
+ "llvm.hexagon.V6.vabsdiffh" => "__builtin_HEXAGON_V6_vabsdiffh",
+ "llvm.hexagon.V6.vabsdiffh.128B" => "__builtin_HEXAGON_V6_vabsdiffh_128B",
+ "llvm.hexagon.V6.vabsdiffub" => "__builtin_HEXAGON_V6_vabsdiffub",
+ "llvm.hexagon.V6.vabsdiffub.128B" => "__builtin_HEXAGON_V6_vabsdiffub_128B",
+ "llvm.hexagon.V6.vabsdiffuh" => "__builtin_HEXAGON_V6_vabsdiffuh",
+ "llvm.hexagon.V6.vabsdiffuh.128B" => "__builtin_HEXAGON_V6_vabsdiffuh_128B",
+ "llvm.hexagon.V6.vabsdiffw" => "__builtin_HEXAGON_V6_vabsdiffw",
+ "llvm.hexagon.V6.vabsdiffw.128B" => "__builtin_HEXAGON_V6_vabsdiffw_128B",
+ "llvm.hexagon.V6.vabsh" => "__builtin_HEXAGON_V6_vabsh",
+ "llvm.hexagon.V6.vabsh.128B" => "__builtin_HEXAGON_V6_vabsh_128B",
+ "llvm.hexagon.V6.vabsh.sat" => "__builtin_HEXAGON_V6_vabsh_sat",
+ "llvm.hexagon.V6.vabsh.sat.128B" => "__builtin_HEXAGON_V6_vabsh_sat_128B",
+ "llvm.hexagon.V6.vabsw" => "__builtin_HEXAGON_V6_vabsw",
+ "llvm.hexagon.V6.vabsw.128B" => "__builtin_HEXAGON_V6_vabsw_128B",
+ "llvm.hexagon.V6.vabsw.sat" => "__builtin_HEXAGON_V6_vabsw_sat",
+ "llvm.hexagon.V6.vabsw.sat.128B" => "__builtin_HEXAGON_V6_vabsw_sat_128B",
+ "llvm.hexagon.V6.vaddb" => "__builtin_HEXAGON_V6_vaddb",
+ "llvm.hexagon.V6.vaddb.128B" => "__builtin_HEXAGON_V6_vaddb_128B",
+ "llvm.hexagon.V6.vaddb.dv" => "__builtin_HEXAGON_V6_vaddb_dv",
+ "llvm.hexagon.V6.vaddb.dv.128B" => "__builtin_HEXAGON_V6_vaddb_dv_128B",
+ "llvm.hexagon.V6.vaddh" => "__builtin_HEXAGON_V6_vaddh",
+ "llvm.hexagon.V6.vaddh.128B" => "__builtin_HEXAGON_V6_vaddh_128B",
+ "llvm.hexagon.V6.vaddh.dv" => "__builtin_HEXAGON_V6_vaddh_dv",
+ "llvm.hexagon.V6.vaddh.dv.128B" => "__builtin_HEXAGON_V6_vaddh_dv_128B",
+ "llvm.hexagon.V6.vaddhsat" => "__builtin_HEXAGON_V6_vaddhsat",
+ "llvm.hexagon.V6.vaddhsat.128B" => "__builtin_HEXAGON_V6_vaddhsat_128B",
+ "llvm.hexagon.V6.vaddhsat.dv" => "__builtin_HEXAGON_V6_vaddhsat_dv",
+ "llvm.hexagon.V6.vaddhsat.dv.128B" => "__builtin_HEXAGON_V6_vaddhsat_dv_128B",
+ "llvm.hexagon.V6.vaddhw" => "__builtin_HEXAGON_V6_vaddhw",
+ "llvm.hexagon.V6.vaddhw.128B" => "__builtin_HEXAGON_V6_vaddhw_128B",
+ "llvm.hexagon.V6.vaddubh" => "__builtin_HEXAGON_V6_vaddubh",
+ "llvm.hexagon.V6.vaddubh.128B" => "__builtin_HEXAGON_V6_vaddubh_128B",
+ "llvm.hexagon.V6.vaddubsat" => "__builtin_HEXAGON_V6_vaddubsat",
+ "llvm.hexagon.V6.vaddubsat.128B" => "__builtin_HEXAGON_V6_vaddubsat_128B",
+ "llvm.hexagon.V6.vaddubsat.dv" => "__builtin_HEXAGON_V6_vaddubsat_dv",
+ "llvm.hexagon.V6.vaddubsat.dv.128B" => "__builtin_HEXAGON_V6_vaddubsat_dv_128B",
+ "llvm.hexagon.V6.vadduhsat" => "__builtin_HEXAGON_V6_vadduhsat",
+ "llvm.hexagon.V6.vadduhsat.128B" => "__builtin_HEXAGON_V6_vadduhsat_128B",
+ "llvm.hexagon.V6.vadduhsat.dv" => "__builtin_HEXAGON_V6_vadduhsat_dv",
+ "llvm.hexagon.V6.vadduhsat.dv.128B" => "__builtin_HEXAGON_V6_vadduhsat_dv_128B",
+ "llvm.hexagon.V6.vadduhw" => "__builtin_HEXAGON_V6_vadduhw",
+ "llvm.hexagon.V6.vadduhw.128B" => "__builtin_HEXAGON_V6_vadduhw_128B",
+ "llvm.hexagon.V6.vaddw" => "__builtin_HEXAGON_V6_vaddw",
+ "llvm.hexagon.V6.vaddw.128B" => "__builtin_HEXAGON_V6_vaddw_128B",
+ "llvm.hexagon.V6.vaddw.dv" => "__builtin_HEXAGON_V6_vaddw_dv",
+ "llvm.hexagon.V6.vaddw.dv.128B" => "__builtin_HEXAGON_V6_vaddw_dv_128B",
+ "llvm.hexagon.V6.vaddwsat" => "__builtin_HEXAGON_V6_vaddwsat",
+ "llvm.hexagon.V6.vaddwsat.128B" => "__builtin_HEXAGON_V6_vaddwsat_128B",
+ "llvm.hexagon.V6.vaddwsat.dv" => "__builtin_HEXAGON_V6_vaddwsat_dv",
+ "llvm.hexagon.V6.vaddwsat.dv.128B" => "__builtin_HEXAGON_V6_vaddwsat_dv_128B",
+ "llvm.hexagon.V6.valignb" => "__builtin_HEXAGON_V6_valignb",
+ "llvm.hexagon.V6.valignb.128B" => "__builtin_HEXAGON_V6_valignb_128B",
+ "llvm.hexagon.V6.valignbi" => "__builtin_HEXAGON_V6_valignbi",
+ "llvm.hexagon.V6.valignbi.128B" => "__builtin_HEXAGON_V6_valignbi_128B",
+ "llvm.hexagon.V6.vand" => "__builtin_HEXAGON_V6_vand",
+ "llvm.hexagon.V6.vand.128B" => "__builtin_HEXAGON_V6_vand_128B",
+ "llvm.hexagon.V6.vaslh" => "__builtin_HEXAGON_V6_vaslh",
+ "llvm.hexagon.V6.vaslh.128B" => "__builtin_HEXAGON_V6_vaslh_128B",
+ "llvm.hexagon.V6.vaslhv" => "__builtin_HEXAGON_V6_vaslhv",
+ "llvm.hexagon.V6.vaslhv.128B" => "__builtin_HEXAGON_V6_vaslhv_128B",
+ "llvm.hexagon.V6.vaslw" => "__builtin_HEXAGON_V6_vaslw",
+ "llvm.hexagon.V6.vaslw.128B" => "__builtin_HEXAGON_V6_vaslw_128B",
+ "llvm.hexagon.V6.vaslw.acc" => "__builtin_HEXAGON_V6_vaslw_acc",
+ "llvm.hexagon.V6.vaslw.acc.128B" => "__builtin_HEXAGON_V6_vaslw_acc_128B",
+ "llvm.hexagon.V6.vaslwv" => "__builtin_HEXAGON_V6_vaslwv",
+ "llvm.hexagon.V6.vaslwv.128B" => "__builtin_HEXAGON_V6_vaslwv_128B",
+ "llvm.hexagon.V6.vasrh" => "__builtin_HEXAGON_V6_vasrh",
+ "llvm.hexagon.V6.vasrh.128B" => "__builtin_HEXAGON_V6_vasrh_128B",
+ "llvm.hexagon.V6.vasrhbrndsat" => "__builtin_HEXAGON_V6_vasrhbrndsat",
+ "llvm.hexagon.V6.vasrhbrndsat.128B" => "__builtin_HEXAGON_V6_vasrhbrndsat_128B",
+ "llvm.hexagon.V6.vasrhubrndsat" => "__builtin_HEXAGON_V6_vasrhubrndsat",
+ "llvm.hexagon.V6.vasrhubrndsat.128B" => "__builtin_HEXAGON_V6_vasrhubrndsat_128B",
+ "llvm.hexagon.V6.vasrhubsat" => "__builtin_HEXAGON_V6_vasrhubsat",
+ "llvm.hexagon.V6.vasrhubsat.128B" => "__builtin_HEXAGON_V6_vasrhubsat_128B",
+ "llvm.hexagon.V6.vasrhv" => "__builtin_HEXAGON_V6_vasrhv",
+ "llvm.hexagon.V6.vasrhv.128B" => "__builtin_HEXAGON_V6_vasrhv_128B",
+ "llvm.hexagon.V6.vasrw" => "__builtin_HEXAGON_V6_vasrw",
+ "llvm.hexagon.V6.vasrw.128B" => "__builtin_HEXAGON_V6_vasrw_128B",
+ "llvm.hexagon.V6.vasrw.acc" => "__builtin_HEXAGON_V6_vasrw_acc",
+ "llvm.hexagon.V6.vasrw.acc.128B" => "__builtin_HEXAGON_V6_vasrw_acc_128B",
+ "llvm.hexagon.V6.vasrwh" => "__builtin_HEXAGON_V6_vasrwh",
+ "llvm.hexagon.V6.vasrwh.128B" => "__builtin_HEXAGON_V6_vasrwh_128B",
+ "llvm.hexagon.V6.vasrwhrndsat" => "__builtin_HEXAGON_V6_vasrwhrndsat",
+ "llvm.hexagon.V6.vasrwhrndsat.128B" => "__builtin_HEXAGON_V6_vasrwhrndsat_128B",
+ "llvm.hexagon.V6.vasrwhsat" => "__builtin_HEXAGON_V6_vasrwhsat",
+ "llvm.hexagon.V6.vasrwhsat.128B" => "__builtin_HEXAGON_V6_vasrwhsat_128B",
+ "llvm.hexagon.V6.vasrwuhsat" => "__builtin_HEXAGON_V6_vasrwuhsat",
+ "llvm.hexagon.V6.vasrwuhsat.128B" => "__builtin_HEXAGON_V6_vasrwuhsat_128B",
+ "llvm.hexagon.V6.vasrwv" => "__builtin_HEXAGON_V6_vasrwv",
+ "llvm.hexagon.V6.vasrwv.128B" => "__builtin_HEXAGON_V6_vasrwv_128B",
+ "llvm.hexagon.V6.vassign" => "__builtin_HEXAGON_V6_vassign",
+ "llvm.hexagon.V6.vassign.128B" => "__builtin_HEXAGON_V6_vassign_128B",
+ "llvm.hexagon.V6.vassignp" => "__builtin_HEXAGON_V6_vassignp",
+ "llvm.hexagon.V6.vassignp.128B" => "__builtin_HEXAGON_V6_vassignp_128B",
+ "llvm.hexagon.V6.vavgh" => "__builtin_HEXAGON_V6_vavgh",
+ "llvm.hexagon.V6.vavgh.128B" => "__builtin_HEXAGON_V6_vavgh_128B",
+ "llvm.hexagon.V6.vavghrnd" => "__builtin_HEXAGON_V6_vavghrnd",
+ "llvm.hexagon.V6.vavghrnd.128B" => "__builtin_HEXAGON_V6_vavghrnd_128B",
+ "llvm.hexagon.V6.vavgub" => "__builtin_HEXAGON_V6_vavgub",
+ "llvm.hexagon.V6.vavgub.128B" => "__builtin_HEXAGON_V6_vavgub_128B",
+ "llvm.hexagon.V6.vavgubrnd" => "__builtin_HEXAGON_V6_vavgubrnd",
+ "llvm.hexagon.V6.vavgubrnd.128B" => "__builtin_HEXAGON_V6_vavgubrnd_128B",
+ "llvm.hexagon.V6.vavguh" => "__builtin_HEXAGON_V6_vavguh",
+ "llvm.hexagon.V6.vavguh.128B" => "__builtin_HEXAGON_V6_vavguh_128B",
+ "llvm.hexagon.V6.vavguhrnd" => "__builtin_HEXAGON_V6_vavguhrnd",
+ "llvm.hexagon.V6.vavguhrnd.128B" => "__builtin_HEXAGON_V6_vavguhrnd_128B",
+ "llvm.hexagon.V6.vavgw" => "__builtin_HEXAGON_V6_vavgw",
+ "llvm.hexagon.V6.vavgw.128B" => "__builtin_HEXAGON_V6_vavgw_128B",
+ "llvm.hexagon.V6.vavgwrnd" => "__builtin_HEXAGON_V6_vavgwrnd",
+ "llvm.hexagon.V6.vavgwrnd.128B" => "__builtin_HEXAGON_V6_vavgwrnd_128B",
+ "llvm.hexagon.V6.vcl0h" => "__builtin_HEXAGON_V6_vcl0h",
+ "llvm.hexagon.V6.vcl0h.128B" => "__builtin_HEXAGON_V6_vcl0h_128B",
+ "llvm.hexagon.V6.vcl0w" => "__builtin_HEXAGON_V6_vcl0w",
+ "llvm.hexagon.V6.vcl0w.128B" => "__builtin_HEXAGON_V6_vcl0w_128B",
+ "llvm.hexagon.V6.vcombine" => "__builtin_HEXAGON_V6_vcombine",
+ "llvm.hexagon.V6.vcombine.128B" => "__builtin_HEXAGON_V6_vcombine_128B",
+ "llvm.hexagon.V6.vd0" => "__builtin_HEXAGON_V6_vd0",
+ "llvm.hexagon.V6.vd0.128B" => "__builtin_HEXAGON_V6_vd0_128B",
+ "llvm.hexagon.V6.vdealb" => "__builtin_HEXAGON_V6_vdealb",
+ "llvm.hexagon.V6.vdealb.128B" => "__builtin_HEXAGON_V6_vdealb_128B",
+ "llvm.hexagon.V6.vdealb4w" => "__builtin_HEXAGON_V6_vdealb4w",
+ "llvm.hexagon.V6.vdealb4w.128B" => "__builtin_HEXAGON_V6_vdealb4w_128B",
+ "llvm.hexagon.V6.vdealh" => "__builtin_HEXAGON_V6_vdealh",
+ "llvm.hexagon.V6.vdealh.128B" => "__builtin_HEXAGON_V6_vdealh_128B",
+ "llvm.hexagon.V6.vdealvdd" => "__builtin_HEXAGON_V6_vdealvdd",
+ "llvm.hexagon.V6.vdealvdd.128B" => "__builtin_HEXAGON_V6_vdealvdd_128B",
+ "llvm.hexagon.V6.vdelta" => "__builtin_HEXAGON_V6_vdelta",
+ "llvm.hexagon.V6.vdelta.128B" => "__builtin_HEXAGON_V6_vdelta_128B",
+ "llvm.hexagon.V6.vdmpybus" => "__builtin_HEXAGON_V6_vdmpybus",
+ "llvm.hexagon.V6.vdmpybus.128B" => "__builtin_HEXAGON_V6_vdmpybus_128B",
+ "llvm.hexagon.V6.vdmpybus.acc" => "__builtin_HEXAGON_V6_vdmpybus_acc",
+ "llvm.hexagon.V6.vdmpybus.acc.128B" => "__builtin_HEXAGON_V6_vdmpybus_acc_128B",
+ "llvm.hexagon.V6.vdmpybus.dv" => "__builtin_HEXAGON_V6_vdmpybus_dv",
+ "llvm.hexagon.V6.vdmpybus.dv.128B" => "__builtin_HEXAGON_V6_vdmpybus_dv_128B",
+ "llvm.hexagon.V6.vdmpybus.dv.acc" => "__builtin_HEXAGON_V6_vdmpybus_dv_acc",
+ "llvm.hexagon.V6.vdmpybus.dv.acc.128B" => "__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B",
+ "llvm.hexagon.V6.vdmpyhb" => "__builtin_HEXAGON_V6_vdmpyhb",
+ "llvm.hexagon.V6.vdmpyhb.128B" => "__builtin_HEXAGON_V6_vdmpyhb_128B",
+ "llvm.hexagon.V6.vdmpyhb.acc" => "__builtin_HEXAGON_V6_vdmpyhb_acc",
+ "llvm.hexagon.V6.vdmpyhb.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhb_acc_128B",
+ "llvm.hexagon.V6.vdmpyhb.dv" => "__builtin_HEXAGON_V6_vdmpyhb_dv",
+ "llvm.hexagon.V6.vdmpyhb.dv.128B" => "__builtin_HEXAGON_V6_vdmpyhb_dv_128B",
+ "llvm.hexagon.V6.vdmpyhb.dv.acc" => "__builtin_HEXAGON_V6_vdmpyhb_dv_acc",
+ "llvm.hexagon.V6.vdmpyhb.dv.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B",
+ "llvm.hexagon.V6.vdmpyhisat" => "__builtin_HEXAGON_V6_vdmpyhisat",
+ "llvm.hexagon.V6.vdmpyhisat.128B" => "__builtin_HEXAGON_V6_vdmpyhisat_128B",
+ "llvm.hexagon.V6.vdmpyhisat.acc" => "__builtin_HEXAGON_V6_vdmpyhisat_acc",
+ "llvm.hexagon.V6.vdmpyhisat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhisat_acc_128B",
+ "llvm.hexagon.V6.vdmpyhsat" => "__builtin_HEXAGON_V6_vdmpyhsat",
+ "llvm.hexagon.V6.vdmpyhsat.128B" => "__builtin_HEXAGON_V6_vdmpyhsat_128B",
+ "llvm.hexagon.V6.vdmpyhsat.acc" => "__builtin_HEXAGON_V6_vdmpyhsat_acc",
+ "llvm.hexagon.V6.vdmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsat_acc_128B",
+ "llvm.hexagon.V6.vdmpyhsuisat" => "__builtin_HEXAGON_V6_vdmpyhsuisat",
+ "llvm.hexagon.V6.vdmpyhsuisat.128B" => "__builtin_HEXAGON_V6_vdmpyhsuisat_128B",
+ "llvm.hexagon.V6.vdmpyhsuisat.acc" => "__builtin_HEXAGON_V6_vdmpyhsuisat_acc",
+ "llvm.hexagon.V6.vdmpyhsuisat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B",
+ "llvm.hexagon.V6.vdmpyhsusat" => "__builtin_HEXAGON_V6_vdmpyhsusat",
+ "llvm.hexagon.V6.vdmpyhsusat.128B" => "__builtin_HEXAGON_V6_vdmpyhsusat_128B",
+ "llvm.hexagon.V6.vdmpyhsusat.acc" => "__builtin_HEXAGON_V6_vdmpyhsusat_acc",
+ "llvm.hexagon.V6.vdmpyhsusat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B",
+ "llvm.hexagon.V6.vdmpyhvsat" => "__builtin_HEXAGON_V6_vdmpyhvsat",
+ "llvm.hexagon.V6.vdmpyhvsat.128B" => "__builtin_HEXAGON_V6_vdmpyhvsat_128B",
+ "llvm.hexagon.V6.vdmpyhvsat.acc" => "__builtin_HEXAGON_V6_vdmpyhvsat_acc",
+ "llvm.hexagon.V6.vdmpyhvsat.acc.128B" => "__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B",
+ "llvm.hexagon.V6.vdsaduh" => "__builtin_HEXAGON_V6_vdsaduh",
+ "llvm.hexagon.V6.vdsaduh.128B" => "__builtin_HEXAGON_V6_vdsaduh_128B",
+ "llvm.hexagon.V6.vdsaduh.acc" => "__builtin_HEXAGON_V6_vdsaduh_acc",
+ "llvm.hexagon.V6.vdsaduh.acc.128B" => "__builtin_HEXAGON_V6_vdsaduh_acc_128B",
+ "llvm.hexagon.V6.vinsertwr" => "__builtin_HEXAGON_V6_vinsertwr",
+ "llvm.hexagon.V6.vinsertwr.128B" => "__builtin_HEXAGON_V6_vinsertwr_128B",
+ "llvm.hexagon.V6.vlalignb" => "__builtin_HEXAGON_V6_vlalignb",
+ "llvm.hexagon.V6.vlalignb.128B" => "__builtin_HEXAGON_V6_vlalignb_128B",
+ "llvm.hexagon.V6.vlalignbi" => "__builtin_HEXAGON_V6_vlalignbi",
+ "llvm.hexagon.V6.vlalignbi.128B" => "__builtin_HEXAGON_V6_vlalignbi_128B",
+ "llvm.hexagon.V6.vlsrh" => "__builtin_HEXAGON_V6_vlsrh",
+ "llvm.hexagon.V6.vlsrh.128B" => "__builtin_HEXAGON_V6_vlsrh_128B",
+ "llvm.hexagon.V6.vlsrhv" => "__builtin_HEXAGON_V6_vlsrhv",
+ "llvm.hexagon.V6.vlsrhv.128B" => "__builtin_HEXAGON_V6_vlsrhv_128B",
+ "llvm.hexagon.V6.vlsrw" => "__builtin_HEXAGON_V6_vlsrw",
+ "llvm.hexagon.V6.vlsrw.128B" => "__builtin_HEXAGON_V6_vlsrw_128B",
+ "llvm.hexagon.V6.vlsrwv" => "__builtin_HEXAGON_V6_vlsrwv",
+ "llvm.hexagon.V6.vlsrwv.128B" => "__builtin_HEXAGON_V6_vlsrwv_128B",
+ "llvm.hexagon.V6.vlutb" => "__builtin_HEXAGON_V6_vlutb",
+ "llvm.hexagon.V6.vlutb.128B" => "__builtin_HEXAGON_V6_vlutb_128B",
+ "llvm.hexagon.V6.vlutb.acc" => "__builtin_HEXAGON_V6_vlutb_acc",
+ "llvm.hexagon.V6.vlutb.acc.128B" => "__builtin_HEXAGON_V6_vlutb_acc_128B",
+ "llvm.hexagon.V6.vlutb.dv" => "__builtin_HEXAGON_V6_vlutb_dv",
+ "llvm.hexagon.V6.vlutb.dv.128B" => "__builtin_HEXAGON_V6_vlutb_dv_128B",
+ "llvm.hexagon.V6.vlutb.dv.acc" => "__builtin_HEXAGON_V6_vlutb_dv_acc",
+ "llvm.hexagon.V6.vlutb.dv.acc.128B" => "__builtin_HEXAGON_V6_vlutb_dv_acc_128B",
+ "llvm.hexagon.V6.vlutvvb" => "__builtin_HEXAGON_V6_vlutvvb",
+ "llvm.hexagon.V6.vlutvvb.128B" => "__builtin_HEXAGON_V6_vlutvvb_128B",
+ "llvm.hexagon.V6.vlutvvb.oracc" => "__builtin_HEXAGON_V6_vlutvvb_oracc",
+ "llvm.hexagon.V6.vlutvvb.oracc.128B" => "__builtin_HEXAGON_V6_vlutvvb_oracc_128B",
+ "llvm.hexagon.V6.vlutvwh" => "__builtin_HEXAGON_V6_vlutvwh",
+ "llvm.hexagon.V6.vlutvwh.128B" => "__builtin_HEXAGON_V6_vlutvwh_128B",
+ "llvm.hexagon.V6.vlutvwh.oracc" => "__builtin_HEXAGON_V6_vlutvwh_oracc",
+ "llvm.hexagon.V6.vlutvwh.oracc.128B" => "__builtin_HEXAGON_V6_vlutvwh_oracc_128B",
+ "llvm.hexagon.V6.vmaxh" => "__builtin_HEXAGON_V6_vmaxh",
+ "llvm.hexagon.V6.vmaxh.128B" => "__builtin_HEXAGON_V6_vmaxh_128B",
+ "llvm.hexagon.V6.vmaxub" => "__builtin_HEXAGON_V6_vmaxub",
+ "llvm.hexagon.V6.vmaxub.128B" => "__builtin_HEXAGON_V6_vmaxub_128B",
+ "llvm.hexagon.V6.vmaxuh" => "__builtin_HEXAGON_V6_vmaxuh",
+ "llvm.hexagon.V6.vmaxuh.128B" => "__builtin_HEXAGON_V6_vmaxuh_128B",
+ "llvm.hexagon.V6.vmaxw" => "__builtin_HEXAGON_V6_vmaxw",
+ "llvm.hexagon.V6.vmaxw.128B" => "__builtin_HEXAGON_V6_vmaxw_128B",
+ "llvm.hexagon.V6.vminh" => "__builtin_HEXAGON_V6_vminh",
+ "llvm.hexagon.V6.vminh.128B" => "__builtin_HEXAGON_V6_vminh_128B",
+ "llvm.hexagon.V6.vminub" => "__builtin_HEXAGON_V6_vminub",
+ "llvm.hexagon.V6.vminub.128B" => "__builtin_HEXAGON_V6_vminub_128B",
+ "llvm.hexagon.V6.vminuh" => "__builtin_HEXAGON_V6_vminuh",
+ "llvm.hexagon.V6.vminuh.128B" => "__builtin_HEXAGON_V6_vminuh_128B",
+ "llvm.hexagon.V6.vminw" => "__builtin_HEXAGON_V6_vminw",
+ "llvm.hexagon.V6.vminw.128B" => "__builtin_HEXAGON_V6_vminw_128B",
+ "llvm.hexagon.V6.vmpabus" => "__builtin_HEXAGON_V6_vmpabus",
+ "llvm.hexagon.V6.vmpabus.128B" => "__builtin_HEXAGON_V6_vmpabus_128B",
+ "llvm.hexagon.V6.vmpabus.acc" => "__builtin_HEXAGON_V6_vmpabus_acc",
+ "llvm.hexagon.V6.vmpabus.acc.128B" => "__builtin_HEXAGON_V6_vmpabus_acc_128B",
+ "llvm.hexagon.V6.vmpabusv" => "__builtin_HEXAGON_V6_vmpabusv",
+ "llvm.hexagon.V6.vmpabusv.128B" => "__builtin_HEXAGON_V6_vmpabusv_128B",
+ "llvm.hexagon.V6.vmpabuuv" => "__builtin_HEXAGON_V6_vmpabuuv",
+ "llvm.hexagon.V6.vmpabuuv.128B" => "__builtin_HEXAGON_V6_vmpabuuv_128B",
+ "llvm.hexagon.V6.vmpahb" => "__builtin_HEXAGON_V6_vmpahb",
+ "llvm.hexagon.V6.vmpahb.128B" => "__builtin_HEXAGON_V6_vmpahb_128B",
+ "llvm.hexagon.V6.vmpahb.acc" => "__builtin_HEXAGON_V6_vmpahb_acc",
+ "llvm.hexagon.V6.vmpahb.acc.128B" => "__builtin_HEXAGON_V6_vmpahb_acc_128B",
+ "llvm.hexagon.V6.vmpybus" => "__builtin_HEXAGON_V6_vmpybus",
+ "llvm.hexagon.V6.vmpybus.128B" => "__builtin_HEXAGON_V6_vmpybus_128B",
+ "llvm.hexagon.V6.vmpybus.acc" => "__builtin_HEXAGON_V6_vmpybus_acc",
+ "llvm.hexagon.V6.vmpybus.acc.128B" => "__builtin_HEXAGON_V6_vmpybus_acc_128B",
+ "llvm.hexagon.V6.vmpybusv" => "__builtin_HEXAGON_V6_vmpybusv",
+ "llvm.hexagon.V6.vmpybusv.128B" => "__builtin_HEXAGON_V6_vmpybusv_128B",
+ "llvm.hexagon.V6.vmpybusv.acc" => "__builtin_HEXAGON_V6_vmpybusv_acc",
+ "llvm.hexagon.V6.vmpybusv.acc.128B" => "__builtin_HEXAGON_V6_vmpybusv_acc_128B",
+ "llvm.hexagon.V6.vmpybv" => "__builtin_HEXAGON_V6_vmpybv",
+ "llvm.hexagon.V6.vmpybv.128B" => "__builtin_HEXAGON_V6_vmpybv_128B",
+ "llvm.hexagon.V6.vmpybv.acc" => "__builtin_HEXAGON_V6_vmpybv_acc",
+ "llvm.hexagon.V6.vmpybv.acc.128B" => "__builtin_HEXAGON_V6_vmpybv_acc_128B",
+ "llvm.hexagon.V6.vmpyewuh" => "__builtin_HEXAGON_V6_vmpyewuh",
+ "llvm.hexagon.V6.vmpyewuh.128B" => "__builtin_HEXAGON_V6_vmpyewuh_128B",
+ "llvm.hexagon.V6.vmpyh" => "__builtin_HEXAGON_V6_vmpyh",
+ "llvm.hexagon.V6.vmpyh.128B" => "__builtin_HEXAGON_V6_vmpyh_128B",
+ "llvm.hexagon.V6.vmpyhsat.acc" => "__builtin_HEXAGON_V6_vmpyhsat_acc",
+ "llvm.hexagon.V6.vmpyhsat.acc.128B" => "__builtin_HEXAGON_V6_vmpyhsat_acc_128B",
+ "llvm.hexagon.V6.vmpyhsrs" => "__builtin_HEXAGON_V6_vmpyhsrs",
+ "llvm.hexagon.V6.vmpyhsrs.128B" => "__builtin_HEXAGON_V6_vmpyhsrs_128B",
+ "llvm.hexagon.V6.vmpyhss" => "__builtin_HEXAGON_V6_vmpyhss",
+ "llvm.hexagon.V6.vmpyhss.128B" => "__builtin_HEXAGON_V6_vmpyhss_128B",
+ "llvm.hexagon.V6.vmpyhus" => "__builtin_HEXAGON_V6_vmpyhus",
+ "llvm.hexagon.V6.vmpyhus.128B" => "__builtin_HEXAGON_V6_vmpyhus_128B",
+ "llvm.hexagon.V6.vmpyhus.acc" => "__builtin_HEXAGON_V6_vmpyhus_acc",
+ "llvm.hexagon.V6.vmpyhus.acc.128B" => "__builtin_HEXAGON_V6_vmpyhus_acc_128B",
+ "llvm.hexagon.V6.vmpyhv" => "__builtin_HEXAGON_V6_vmpyhv",
+ "llvm.hexagon.V6.vmpyhv.128B" => "__builtin_HEXAGON_V6_vmpyhv_128B",
+ "llvm.hexagon.V6.vmpyhv.acc" => "__builtin_HEXAGON_V6_vmpyhv_acc",
+ "llvm.hexagon.V6.vmpyhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyhv_acc_128B",
+ "llvm.hexagon.V6.vmpyhvsrs" => "__builtin_HEXAGON_V6_vmpyhvsrs",
+ "llvm.hexagon.V6.vmpyhvsrs.128B" => "__builtin_HEXAGON_V6_vmpyhvsrs_128B",
+ "llvm.hexagon.V6.vmpyieoh" => "__builtin_HEXAGON_V6_vmpyieoh",
+ "llvm.hexagon.V6.vmpyieoh.128B" => "__builtin_HEXAGON_V6_vmpyieoh_128B",
+ "llvm.hexagon.V6.vmpyiewh.acc" => "__builtin_HEXAGON_V6_vmpyiewh_acc",
+ "llvm.hexagon.V6.vmpyiewh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiewh_acc_128B",
+ "llvm.hexagon.V6.vmpyiewuh" => "__builtin_HEXAGON_V6_vmpyiewuh",
+ "llvm.hexagon.V6.vmpyiewuh.128B" => "__builtin_HEXAGON_V6_vmpyiewuh_128B",
+ "llvm.hexagon.V6.vmpyiewuh.acc" => "__builtin_HEXAGON_V6_vmpyiewuh_acc",
+ "llvm.hexagon.V6.vmpyiewuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiewuh_acc_128B",
+ "llvm.hexagon.V6.vmpyih" => "__builtin_HEXAGON_V6_vmpyih",
+ "llvm.hexagon.V6.vmpyih.128B" => "__builtin_HEXAGON_V6_vmpyih_128B",
+ "llvm.hexagon.V6.vmpyih.acc" => "__builtin_HEXAGON_V6_vmpyih_acc",
+ "llvm.hexagon.V6.vmpyih.acc.128B" => "__builtin_HEXAGON_V6_vmpyih_acc_128B",
+ "llvm.hexagon.V6.vmpyihb" => "__builtin_HEXAGON_V6_vmpyihb",
+ "llvm.hexagon.V6.vmpyihb.128B" => "__builtin_HEXAGON_V6_vmpyihb_128B",
+ "llvm.hexagon.V6.vmpyihb.acc" => "__builtin_HEXAGON_V6_vmpyihb_acc",
+ "llvm.hexagon.V6.vmpyihb.acc.128B" => "__builtin_HEXAGON_V6_vmpyihb_acc_128B",
+ "llvm.hexagon.V6.vmpyiowh" => "__builtin_HEXAGON_V6_vmpyiowh",
+ "llvm.hexagon.V6.vmpyiowh.128B" => "__builtin_HEXAGON_V6_vmpyiowh_128B",
+ "llvm.hexagon.V6.vmpyiwb" => "__builtin_HEXAGON_V6_vmpyiwb",
+ "llvm.hexagon.V6.vmpyiwb.128B" => "__builtin_HEXAGON_V6_vmpyiwb_128B",
+ "llvm.hexagon.V6.vmpyiwb.acc" => "__builtin_HEXAGON_V6_vmpyiwb_acc",
+ "llvm.hexagon.V6.vmpyiwb.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwb_acc_128B",
+ "llvm.hexagon.V6.vmpyiwh" => "__builtin_HEXAGON_V6_vmpyiwh",
+ "llvm.hexagon.V6.vmpyiwh.128B" => "__builtin_HEXAGON_V6_vmpyiwh_128B",
+ "llvm.hexagon.V6.vmpyiwh.acc" => "__builtin_HEXAGON_V6_vmpyiwh_acc",
+ "llvm.hexagon.V6.vmpyiwh.acc.128B" => "__builtin_HEXAGON_V6_vmpyiwh_acc_128B",
+ "llvm.hexagon.V6.vmpyowh" => "__builtin_HEXAGON_V6_vmpyowh",
+ "llvm.hexagon.V6.vmpyowh.128B" => "__builtin_HEXAGON_V6_vmpyowh_128B",
+ "llvm.hexagon.V6.vmpyowh.rnd" => "__builtin_HEXAGON_V6_vmpyowh_rnd",
+ "llvm.hexagon.V6.vmpyowh.rnd.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_128B",
+ "llvm.hexagon.V6.vmpyowh.rnd.sacc" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc",
+ "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B" => "__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B",
+ "llvm.hexagon.V6.vmpyowh.sacc" => "__builtin_HEXAGON_V6_vmpyowh_sacc",
+ "llvm.hexagon.V6.vmpyowh.sacc.128B" => "__builtin_HEXAGON_V6_vmpyowh_sacc_128B",
+ "llvm.hexagon.V6.vmpyub" => "__builtin_HEXAGON_V6_vmpyub",
+ "llvm.hexagon.V6.vmpyub.128B" => "__builtin_HEXAGON_V6_vmpyub_128B",
+ "llvm.hexagon.V6.vmpyub.acc" => "__builtin_HEXAGON_V6_vmpyub_acc",
+ "llvm.hexagon.V6.vmpyub.acc.128B" => "__builtin_HEXAGON_V6_vmpyub_acc_128B",
+ "llvm.hexagon.V6.vmpyubv" => "__builtin_HEXAGON_V6_vmpyubv",
+ "llvm.hexagon.V6.vmpyubv.128B" => "__builtin_HEXAGON_V6_vmpyubv_128B",
+ "llvm.hexagon.V6.vmpyubv.acc" => "__builtin_HEXAGON_V6_vmpyubv_acc",
+ "llvm.hexagon.V6.vmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vmpyubv_acc_128B",
+ "llvm.hexagon.V6.vmpyuh" => "__builtin_HEXAGON_V6_vmpyuh",
+ "llvm.hexagon.V6.vmpyuh.128B" => "__builtin_HEXAGON_V6_vmpyuh_128B",
+ "llvm.hexagon.V6.vmpyuh.acc" => "__builtin_HEXAGON_V6_vmpyuh_acc",
+ "llvm.hexagon.V6.vmpyuh.acc.128B" => "__builtin_HEXAGON_V6_vmpyuh_acc_128B",
+ "llvm.hexagon.V6.vmpyuhv" => "__builtin_HEXAGON_V6_vmpyuhv",
+ "llvm.hexagon.V6.vmpyuhv.128B" => "__builtin_HEXAGON_V6_vmpyuhv_128B",
+ "llvm.hexagon.V6.vmpyuhv.acc" => "__builtin_HEXAGON_V6_vmpyuhv_acc",
+ "llvm.hexagon.V6.vmpyuhv.acc.128B" => "__builtin_HEXAGON_V6_vmpyuhv_acc_128B",
+ "llvm.hexagon.V6.vnavgh" => "__builtin_HEXAGON_V6_vnavgh",
+ "llvm.hexagon.V6.vnavgh.128B" => "__builtin_HEXAGON_V6_vnavgh_128B",
+ "llvm.hexagon.V6.vnavgub" => "__builtin_HEXAGON_V6_vnavgub",
+ "llvm.hexagon.V6.vnavgub.128B" => "__builtin_HEXAGON_V6_vnavgub_128B",
+ "llvm.hexagon.V6.vnavgw" => "__builtin_HEXAGON_V6_vnavgw",
+ "llvm.hexagon.V6.vnavgw.128B" => "__builtin_HEXAGON_V6_vnavgw_128B",
+ "llvm.hexagon.V6.vnormamth" => "__builtin_HEXAGON_V6_vnormamth",
+ "llvm.hexagon.V6.vnormamth.128B" => "__builtin_HEXAGON_V6_vnormamth_128B",
+ "llvm.hexagon.V6.vnormamtw" => "__builtin_HEXAGON_V6_vnormamtw",
+ "llvm.hexagon.V6.vnormamtw.128B" => "__builtin_HEXAGON_V6_vnormamtw_128B",
+ "llvm.hexagon.V6.vnot" => "__builtin_HEXAGON_V6_vnot",
+ "llvm.hexagon.V6.vnot.128B" => "__builtin_HEXAGON_V6_vnot_128B",
+ "llvm.hexagon.V6.vor" => "__builtin_HEXAGON_V6_vor",
+ "llvm.hexagon.V6.vor.128B" => "__builtin_HEXAGON_V6_vor_128B",
+ "llvm.hexagon.V6.vpackeb" => "__builtin_HEXAGON_V6_vpackeb",
+ "llvm.hexagon.V6.vpackeb.128B" => "__builtin_HEXAGON_V6_vpackeb_128B",
+ "llvm.hexagon.V6.vpackeh" => "__builtin_HEXAGON_V6_vpackeh",
+ "llvm.hexagon.V6.vpackeh.128B" => "__builtin_HEXAGON_V6_vpackeh_128B",
+ "llvm.hexagon.V6.vpackhb.sat" => "__builtin_HEXAGON_V6_vpackhb_sat",
+ "llvm.hexagon.V6.vpackhb.sat.128B" => "__builtin_HEXAGON_V6_vpackhb_sat_128B",
+ "llvm.hexagon.V6.vpackhub.sat" => "__builtin_HEXAGON_V6_vpackhub_sat",
+ "llvm.hexagon.V6.vpackhub.sat.128B" => "__builtin_HEXAGON_V6_vpackhub_sat_128B",
+ "llvm.hexagon.V6.vpackob" => "__builtin_HEXAGON_V6_vpackob",
+ "llvm.hexagon.V6.vpackob.128B" => "__builtin_HEXAGON_V6_vpackob_128B",
+ "llvm.hexagon.V6.vpackoh" => "__builtin_HEXAGON_V6_vpackoh",
+ "llvm.hexagon.V6.vpackoh.128B" => "__builtin_HEXAGON_V6_vpackoh_128B",
+ "llvm.hexagon.V6.vpackwh.sat" => "__builtin_HEXAGON_V6_vpackwh_sat",
+ "llvm.hexagon.V6.vpackwh.sat.128B" => "__builtin_HEXAGON_V6_vpackwh_sat_128B",
+ "llvm.hexagon.V6.vpackwuh.sat" => "__builtin_HEXAGON_V6_vpackwuh_sat",
+ "llvm.hexagon.V6.vpackwuh.sat.128B" => "__builtin_HEXAGON_V6_vpackwuh_sat_128B",
+ "llvm.hexagon.V6.vpopcounth" => "__builtin_HEXAGON_V6_vpopcounth",
+ "llvm.hexagon.V6.vpopcounth.128B" => "__builtin_HEXAGON_V6_vpopcounth_128B",
+ "llvm.hexagon.V6.vrdelta" => "__builtin_HEXAGON_V6_vrdelta",
+ "llvm.hexagon.V6.vrdelta.128B" => "__builtin_HEXAGON_V6_vrdelta_128B",
+ "llvm.hexagon.V6.vrmpybus" => "__builtin_HEXAGON_V6_vrmpybus",
+ "llvm.hexagon.V6.vrmpybus.128B" => "__builtin_HEXAGON_V6_vrmpybus_128B",
+ "llvm.hexagon.V6.vrmpybus.acc" => "__builtin_HEXAGON_V6_vrmpybus_acc",
+ "llvm.hexagon.V6.vrmpybus.acc.128B" => "__builtin_HEXAGON_V6_vrmpybus_acc_128B",
+ "llvm.hexagon.V6.vrmpybusi" => "__builtin_HEXAGON_V6_vrmpybusi",
+ "llvm.hexagon.V6.vrmpybusi.128B" => "__builtin_HEXAGON_V6_vrmpybusi_128B",
+ "llvm.hexagon.V6.vrmpybusi.acc" => "__builtin_HEXAGON_V6_vrmpybusi_acc",
+ "llvm.hexagon.V6.vrmpybusi.acc.128B" => "__builtin_HEXAGON_V6_vrmpybusi_acc_128B",
+ "llvm.hexagon.V6.vrmpybusv" => "__builtin_HEXAGON_V6_vrmpybusv",
+ "llvm.hexagon.V6.vrmpybusv.128B" => "__builtin_HEXAGON_V6_vrmpybusv_128B",
+ "llvm.hexagon.V6.vrmpybusv.acc" => "__builtin_HEXAGON_V6_vrmpybusv_acc",
+ "llvm.hexagon.V6.vrmpybusv.acc.128B" => "__builtin_HEXAGON_V6_vrmpybusv_acc_128B",
+ "llvm.hexagon.V6.vrmpybv" => "__builtin_HEXAGON_V6_vrmpybv",
+ "llvm.hexagon.V6.vrmpybv.128B" => "__builtin_HEXAGON_V6_vrmpybv_128B",
+ "llvm.hexagon.V6.vrmpybv.acc" => "__builtin_HEXAGON_V6_vrmpybv_acc",
+ "llvm.hexagon.V6.vrmpybv.acc.128B" => "__builtin_HEXAGON_V6_vrmpybv_acc_128B",
+ "llvm.hexagon.V6.vrmpyub" => "__builtin_HEXAGON_V6_vrmpyub",
+ "llvm.hexagon.V6.vrmpyub.128B" => "__builtin_HEXAGON_V6_vrmpyub_128B",
+ "llvm.hexagon.V6.vrmpyub.acc" => "__builtin_HEXAGON_V6_vrmpyub_acc",
+ "llvm.hexagon.V6.vrmpyub.acc.128B" => "__builtin_HEXAGON_V6_vrmpyub_acc_128B",
+ "llvm.hexagon.V6.vrmpyubi" => "__builtin_HEXAGON_V6_vrmpyubi",
+ "llvm.hexagon.V6.vrmpyubi.128B" => "__builtin_HEXAGON_V6_vrmpyubi_128B",
+ "llvm.hexagon.V6.vrmpyubi.acc" => "__builtin_HEXAGON_V6_vrmpyubi_acc",
+ "llvm.hexagon.V6.vrmpyubi.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubi_acc_128B",
+ "llvm.hexagon.V6.vrmpyubv" => "__builtin_HEXAGON_V6_vrmpyubv",
+ "llvm.hexagon.V6.vrmpyubv.128B" => "__builtin_HEXAGON_V6_vrmpyubv_128B",
+ "llvm.hexagon.V6.vrmpyubv.acc" => "__builtin_HEXAGON_V6_vrmpyubv_acc",
+ "llvm.hexagon.V6.vrmpyubv.acc.128B" => "__builtin_HEXAGON_V6_vrmpyubv_acc_128B",
+ "llvm.hexagon.V6.vror" => "__builtin_HEXAGON_V6_vror",
+ "llvm.hexagon.V6.vror.128B" => "__builtin_HEXAGON_V6_vror_128B",
+ "llvm.hexagon.V6.vroundhb" => "__builtin_HEXAGON_V6_vroundhb",
+ "llvm.hexagon.V6.vroundhb.128B" => "__builtin_HEXAGON_V6_vroundhb_128B",
+ "llvm.hexagon.V6.vroundhub" => "__builtin_HEXAGON_V6_vroundhub",
+ "llvm.hexagon.V6.vroundhub.128B" => "__builtin_HEXAGON_V6_vroundhub_128B",
+ "llvm.hexagon.V6.vroundwh" => "__builtin_HEXAGON_V6_vroundwh",
+ "llvm.hexagon.V6.vroundwh.128B" => "__builtin_HEXAGON_V6_vroundwh_128B",
+ "llvm.hexagon.V6.vroundwuh" => "__builtin_HEXAGON_V6_vroundwuh",
+ "llvm.hexagon.V6.vroundwuh.128B" => "__builtin_HEXAGON_V6_vroundwuh_128B",
+ "llvm.hexagon.V6.vrsadubi" => "__builtin_HEXAGON_V6_vrsadubi",
+ "llvm.hexagon.V6.vrsadubi.128B" => "__builtin_HEXAGON_V6_vrsadubi_128B",
+ "llvm.hexagon.V6.vrsadubi.acc" => "__builtin_HEXAGON_V6_vrsadubi_acc",
+ "llvm.hexagon.V6.vrsadubi.acc.128B" => "__builtin_HEXAGON_V6_vrsadubi_acc_128B",
+ "llvm.hexagon.V6.vsathub" => "__builtin_HEXAGON_V6_vsathub",
+ "llvm.hexagon.V6.vsathub.128B" => "__builtin_HEXAGON_V6_vsathub_128B",
+ "llvm.hexagon.V6.vsatwh" => "__builtin_HEXAGON_V6_vsatwh",
+ "llvm.hexagon.V6.vsatwh.128B" => "__builtin_HEXAGON_V6_vsatwh_128B",
+ "llvm.hexagon.V6.vsb" => "__builtin_HEXAGON_V6_vsb",
+ "llvm.hexagon.V6.vsb.128B" => "__builtin_HEXAGON_V6_vsb_128B",
+ "llvm.hexagon.V6.vsh" => "__builtin_HEXAGON_V6_vsh",
+ "llvm.hexagon.V6.vsh.128B" => "__builtin_HEXAGON_V6_vsh_128B",
+ "llvm.hexagon.V6.vshufeh" => "__builtin_HEXAGON_V6_vshufeh",
+ "llvm.hexagon.V6.vshufeh.128B" => "__builtin_HEXAGON_V6_vshufeh_128B",
+ "llvm.hexagon.V6.vshuffb" => "__builtin_HEXAGON_V6_vshuffb",
+ "llvm.hexagon.V6.vshuffb.128B" => "__builtin_HEXAGON_V6_vshuffb_128B",
+ "llvm.hexagon.V6.vshuffeb" => "__builtin_HEXAGON_V6_vshuffeb",
+ "llvm.hexagon.V6.vshuffeb.128B" => "__builtin_HEXAGON_V6_vshuffeb_128B",
+ "llvm.hexagon.V6.vshuffh" => "__builtin_HEXAGON_V6_vshuffh",
+ "llvm.hexagon.V6.vshuffh.128B" => "__builtin_HEXAGON_V6_vshuffh_128B",
+ "llvm.hexagon.V6.vshuffob" => "__builtin_HEXAGON_V6_vshuffob",
+ "llvm.hexagon.V6.vshuffob.128B" => "__builtin_HEXAGON_V6_vshuffob_128B",
+ "llvm.hexagon.V6.vshuffvdd" => "__builtin_HEXAGON_V6_vshuffvdd",
+ "llvm.hexagon.V6.vshuffvdd.128B" => "__builtin_HEXAGON_V6_vshuffvdd_128B",
+ "llvm.hexagon.V6.vshufoeb" => "__builtin_HEXAGON_V6_vshufoeb",
+ "llvm.hexagon.V6.vshufoeb.128B" => "__builtin_HEXAGON_V6_vshufoeb_128B",
+ "llvm.hexagon.V6.vshufoeh" => "__builtin_HEXAGON_V6_vshufoeh",
+ "llvm.hexagon.V6.vshufoeh.128B" => "__builtin_HEXAGON_V6_vshufoeh_128B",
+ "llvm.hexagon.V6.vshufoh" => "__builtin_HEXAGON_V6_vshufoh",
+ "llvm.hexagon.V6.vshufoh.128B" => "__builtin_HEXAGON_V6_vshufoh_128B",
+ "llvm.hexagon.V6.vsubb" => "__builtin_HEXAGON_V6_vsubb",
+ "llvm.hexagon.V6.vsubb.128B" => "__builtin_HEXAGON_V6_vsubb_128B",
+ "llvm.hexagon.V6.vsubb.dv" => "__builtin_HEXAGON_V6_vsubb_dv",
+ "llvm.hexagon.V6.vsubb.dv.128B" => "__builtin_HEXAGON_V6_vsubb_dv_128B",
+ "llvm.hexagon.V6.vsubh" => "__builtin_HEXAGON_V6_vsubh",
+ "llvm.hexagon.V6.vsubh.128B" => "__builtin_HEXAGON_V6_vsubh_128B",
+ "llvm.hexagon.V6.vsubh.dv" => "__builtin_HEXAGON_V6_vsubh_dv",
+ "llvm.hexagon.V6.vsubh.dv.128B" => "__builtin_HEXAGON_V6_vsubh_dv_128B",
+ "llvm.hexagon.V6.vsubhsat" => "__builtin_HEXAGON_V6_vsubhsat",
+ "llvm.hexagon.V6.vsubhsat.128B" => "__builtin_HEXAGON_V6_vsubhsat_128B",
+ "llvm.hexagon.V6.vsubhsat.dv" => "__builtin_HEXAGON_V6_vsubhsat_dv",
+ "llvm.hexagon.V6.vsubhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubhsat_dv_128B",
+ "llvm.hexagon.V6.vsubhw" => "__builtin_HEXAGON_V6_vsubhw",
+ "llvm.hexagon.V6.vsubhw.128B" => "__builtin_HEXAGON_V6_vsubhw_128B",
+ "llvm.hexagon.V6.vsububh" => "__builtin_HEXAGON_V6_vsububh",
+ "llvm.hexagon.V6.vsububh.128B" => "__builtin_HEXAGON_V6_vsububh_128B",
+ "llvm.hexagon.V6.vsububsat" => "__builtin_HEXAGON_V6_vsububsat",
+ "llvm.hexagon.V6.vsububsat.128B" => "__builtin_HEXAGON_V6_vsububsat_128B",
+ "llvm.hexagon.V6.vsububsat.dv" => "__builtin_HEXAGON_V6_vsububsat_dv",
+ "llvm.hexagon.V6.vsububsat.dv.128B" => "__builtin_HEXAGON_V6_vsububsat_dv_128B",
+ "llvm.hexagon.V6.vsubuhsat" => "__builtin_HEXAGON_V6_vsubuhsat",
+ "llvm.hexagon.V6.vsubuhsat.128B" => "__builtin_HEXAGON_V6_vsubuhsat_128B",
+ "llvm.hexagon.V6.vsubuhsat.dv" => "__builtin_HEXAGON_V6_vsubuhsat_dv",
+ "llvm.hexagon.V6.vsubuhsat.dv.128B" => "__builtin_HEXAGON_V6_vsubuhsat_dv_128B",
+ "llvm.hexagon.V6.vsubuhw" => "__builtin_HEXAGON_V6_vsubuhw",
+ "llvm.hexagon.V6.vsubuhw.128B" => "__builtin_HEXAGON_V6_vsubuhw_128B",
+ "llvm.hexagon.V6.vsubw" => "__builtin_HEXAGON_V6_vsubw",
+ "llvm.hexagon.V6.vsubw.128B" => "__builtin_HEXAGON_V6_vsubw_128B",
+ "llvm.hexagon.V6.vsubw.dv" => "__builtin_HEXAGON_V6_vsubw_dv",
+ "llvm.hexagon.V6.vsubw.dv.128B" => "__builtin_HEXAGON_V6_vsubw_dv_128B",
+ "llvm.hexagon.V6.vsubwsat" => "__builtin_HEXAGON_V6_vsubwsat",
+ "llvm.hexagon.V6.vsubwsat.128B" => "__builtin_HEXAGON_V6_vsubwsat_128B",
+ "llvm.hexagon.V6.vsubwsat.dv" => "__builtin_HEXAGON_V6_vsubwsat_dv",
+ "llvm.hexagon.V6.vsubwsat.dv.128B" => "__builtin_HEXAGON_V6_vsubwsat_dv_128B",
+ "llvm.hexagon.V6.vtmpyb" => "__builtin_HEXAGON_V6_vtmpyb",
+ "llvm.hexagon.V6.vtmpyb.128B" => "__builtin_HEXAGON_V6_vtmpyb_128B",
+ "llvm.hexagon.V6.vtmpyb.acc" => "__builtin_HEXAGON_V6_vtmpyb_acc",
+ "llvm.hexagon.V6.vtmpyb.acc.128B" => "__builtin_HEXAGON_V6_vtmpyb_acc_128B",
+ "llvm.hexagon.V6.vtmpybus" => "__builtin_HEXAGON_V6_vtmpybus",
+ "llvm.hexagon.V6.vtmpybus.128B" => "__builtin_HEXAGON_V6_vtmpybus_128B",
+ "llvm.hexagon.V6.vtmpybus.acc" => "__builtin_HEXAGON_V6_vtmpybus_acc",
+ "llvm.hexagon.V6.vtmpybus.acc.128B" => "__builtin_HEXAGON_V6_vtmpybus_acc_128B",
+ "llvm.hexagon.V6.vtmpyhb" => "__builtin_HEXAGON_V6_vtmpyhb",
+ "llvm.hexagon.V6.vtmpyhb.128B" => "__builtin_HEXAGON_V6_vtmpyhb_128B",
+ "llvm.hexagon.V6.vtmpyhb.acc" => "__builtin_HEXAGON_V6_vtmpyhb_acc",
+ "llvm.hexagon.V6.vtmpyhb.acc.128B" => "__builtin_HEXAGON_V6_vtmpyhb_acc_128B",
+ "llvm.hexagon.V6.vunpackb" => "__builtin_HEXAGON_V6_vunpackb",
+ "llvm.hexagon.V6.vunpackb.128B" => "__builtin_HEXAGON_V6_vunpackb_128B",
+ "llvm.hexagon.V6.vunpackh" => "__builtin_HEXAGON_V6_vunpackh",
+ "llvm.hexagon.V6.vunpackh.128B" => "__builtin_HEXAGON_V6_vunpackh_128B",
+ "llvm.hexagon.V6.vunpackob" => "__builtin_HEXAGON_V6_vunpackob",
+ "llvm.hexagon.V6.vunpackob.128B" => "__builtin_HEXAGON_V6_vunpackob_128B",
+ "llvm.hexagon.V6.vunpackoh" => "__builtin_HEXAGON_V6_vunpackoh",
+ "llvm.hexagon.V6.vunpackoh.128B" => "__builtin_HEXAGON_V6_vunpackoh_128B",
+ "llvm.hexagon.V6.vunpackub" => "__builtin_HEXAGON_V6_vunpackub",
+ "llvm.hexagon.V6.vunpackub.128B" => "__builtin_HEXAGON_V6_vunpackub_128B",
+ "llvm.hexagon.V6.vunpackuh" => "__builtin_HEXAGON_V6_vunpackuh",
+ "llvm.hexagon.V6.vunpackuh.128B" => "__builtin_HEXAGON_V6_vunpackuh_128B",
+ "llvm.hexagon.V6.vxor" => "__builtin_HEXAGON_V6_vxor",
+ "llvm.hexagon.V6.vxor.128B" => "__builtin_HEXAGON_V6_vxor_128B",
+ "llvm.hexagon.V6.vzb" => "__builtin_HEXAGON_V6_vzb",
+ "llvm.hexagon.V6.vzb.128B" => "__builtin_HEXAGON_V6_vzb_128B",
+ "llvm.hexagon.V6.vzh" => "__builtin_HEXAGON_V6_vzh",
+ "llvm.hexagon.V6.vzh.128B" => "__builtin_HEXAGON_V6_vzh_128B",
+ "llvm.hexagon.brev.ldb" => "__builtin_brev_ldb",
+ "llvm.hexagon.brev.ldd" => "__builtin_brev_ldd",
+ "llvm.hexagon.brev.ldh" => "__builtin_brev_ldh",
+ "llvm.hexagon.brev.ldub" => "__builtin_brev_ldub",
+ "llvm.hexagon.brev.lduh" => "__builtin_brev_lduh",
+ "llvm.hexagon.brev.ldw" => "__builtin_brev_ldw",
+ "llvm.hexagon.brev.stb" => "__builtin_brev_stb",
+ "llvm.hexagon.brev.std" => "__builtin_brev_std",
+ "llvm.hexagon.brev.sth" => "__builtin_brev_sth",
+ "llvm.hexagon.brev.sthhi" => "__builtin_brev_sthhi",
+ "llvm.hexagon.brev.stw" => "__builtin_brev_stw",
+ "llvm.hexagon.circ.ldb" => "__builtin_circ_ldb",
+ "llvm.hexagon.circ.ldd" => "__builtin_circ_ldd",
+ "llvm.hexagon.circ.ldh" => "__builtin_circ_ldh",
+ "llvm.hexagon.circ.ldub" => "__builtin_circ_ldub",
+ "llvm.hexagon.circ.lduh" => "__builtin_circ_lduh",
+ "llvm.hexagon.circ.ldw" => "__builtin_circ_ldw",
+ "llvm.hexagon.circ.stb" => "__builtin_circ_stb",
+ "llvm.hexagon.circ.std" => "__builtin_circ_std",
+ "llvm.hexagon.circ.sth" => "__builtin_circ_sth",
+ "llvm.hexagon.circ.sthhi" => "__builtin_circ_sthhi",
+ "llvm.hexagon.circ.stw" => "__builtin_circ_stw",
+ "llvm.hexagon.mm256i.vaddw" => "__builtin__mm256i_vaddw",
+ "llvm.hexagon.prefetch" => "__builtin_HEXAGON_prefetch",
+ // mips
+ "llvm.mips.absq.s.ph" => "__builtin_mips_absq_s_ph",
+ "llvm.mips.absq.s.qb" => "__builtin_mips_absq_s_qb",
+ "llvm.mips.absq.s.w" => "__builtin_mips_absq_s_w",
+ "llvm.mips.add.a.b" => "__builtin_msa_add_a_b",
+ "llvm.mips.add.a.d" => "__builtin_msa_add_a_d",
+ "llvm.mips.add.a.h" => "__builtin_msa_add_a_h",
+ "llvm.mips.add.a.w" => "__builtin_msa_add_a_w",
+ "llvm.mips.addq.ph" => "__builtin_mips_addq_ph",
+ "llvm.mips.addq.s.ph" => "__builtin_mips_addq_s_ph",
+ "llvm.mips.addq.s.w" => "__builtin_mips_addq_s_w",
+ "llvm.mips.addqh.ph" => "__builtin_mips_addqh_ph",
+ "llvm.mips.addqh.r.ph" => "__builtin_mips_addqh_r_ph",
+ "llvm.mips.addqh.r.w" => "__builtin_mips_addqh_r_w",
+ "llvm.mips.addqh.w" => "__builtin_mips_addqh_w",
+ "llvm.mips.adds.a.b" => "__builtin_msa_adds_a_b",
+ "llvm.mips.adds.a.d" => "__builtin_msa_adds_a_d",
+ "llvm.mips.adds.a.h" => "__builtin_msa_adds_a_h",
+ "llvm.mips.adds.a.w" => "__builtin_msa_adds_a_w",
+ "llvm.mips.adds.s.b" => "__builtin_msa_adds_s_b",
+ "llvm.mips.adds.s.d" => "__builtin_msa_adds_s_d",
+ "llvm.mips.adds.s.h" => "__builtin_msa_adds_s_h",
+ "llvm.mips.adds.s.w" => "__builtin_msa_adds_s_w",
+ "llvm.mips.adds.u.b" => "__builtin_msa_adds_u_b",
+ "llvm.mips.adds.u.d" => "__builtin_msa_adds_u_d",
+ "llvm.mips.adds.u.h" => "__builtin_msa_adds_u_h",
+ "llvm.mips.adds.u.w" => "__builtin_msa_adds_u_w",
+ "llvm.mips.addsc" => "__builtin_mips_addsc",
+ "llvm.mips.addu.ph" => "__builtin_mips_addu_ph",
+ "llvm.mips.addu.qb" => "__builtin_mips_addu_qb",
+ "llvm.mips.addu.s.ph" => "__builtin_mips_addu_s_ph",
+ "llvm.mips.addu.s.qb" => "__builtin_mips_addu_s_qb",
+ "llvm.mips.adduh.qb" => "__builtin_mips_adduh_qb",
+ "llvm.mips.adduh.r.qb" => "__builtin_mips_adduh_r_qb",
+ "llvm.mips.addv.b" => "__builtin_msa_addv_b",
+ "llvm.mips.addv.d" => "__builtin_msa_addv_d",
+ "llvm.mips.addv.h" => "__builtin_msa_addv_h",
+ "llvm.mips.addv.w" => "__builtin_msa_addv_w",
+ "llvm.mips.addvi.b" => "__builtin_msa_addvi_b",
+ "llvm.mips.addvi.d" => "__builtin_msa_addvi_d",
+ "llvm.mips.addvi.h" => "__builtin_msa_addvi_h",
+ "llvm.mips.addvi.w" => "__builtin_msa_addvi_w",
+ "llvm.mips.addwc" => "__builtin_mips_addwc",
+ "llvm.mips.and.v" => "__builtin_msa_and_v",
+ "llvm.mips.andi.b" => "__builtin_msa_andi_b",
+ "llvm.mips.append" => "__builtin_mips_append",
+ "llvm.mips.asub.s.b" => "__builtin_msa_asub_s_b",
+ "llvm.mips.asub.s.d" => "__builtin_msa_asub_s_d",
+ "llvm.mips.asub.s.h" => "__builtin_msa_asub_s_h",
+ "llvm.mips.asub.s.w" => "__builtin_msa_asub_s_w",
+ "llvm.mips.asub.u.b" => "__builtin_msa_asub_u_b",
+ "llvm.mips.asub.u.d" => "__builtin_msa_asub_u_d",
+ "llvm.mips.asub.u.h" => "__builtin_msa_asub_u_h",
+ "llvm.mips.asub.u.w" => "__builtin_msa_asub_u_w",
+ "llvm.mips.ave.s.b" => "__builtin_msa_ave_s_b",
+ "llvm.mips.ave.s.d" => "__builtin_msa_ave_s_d",
+ "llvm.mips.ave.s.h" => "__builtin_msa_ave_s_h",
+ "llvm.mips.ave.s.w" => "__builtin_msa_ave_s_w",
+ "llvm.mips.ave.u.b" => "__builtin_msa_ave_u_b",
+ "llvm.mips.ave.u.d" => "__builtin_msa_ave_u_d",
+ "llvm.mips.ave.u.h" => "__builtin_msa_ave_u_h",
+ "llvm.mips.ave.u.w" => "__builtin_msa_ave_u_w",
+ "llvm.mips.aver.s.b" => "__builtin_msa_aver_s_b",
+ "llvm.mips.aver.s.d" => "__builtin_msa_aver_s_d",
+ "llvm.mips.aver.s.h" => "__builtin_msa_aver_s_h",
+ "llvm.mips.aver.s.w" => "__builtin_msa_aver_s_w",
+ "llvm.mips.aver.u.b" => "__builtin_msa_aver_u_b",
+ "llvm.mips.aver.u.d" => "__builtin_msa_aver_u_d",
+ "llvm.mips.aver.u.h" => "__builtin_msa_aver_u_h",
+ "llvm.mips.aver.u.w" => "__builtin_msa_aver_u_w",
+ "llvm.mips.balign" => "__builtin_mips_balign",
+ "llvm.mips.bclr.b" => "__builtin_msa_bclr_b",
+ "llvm.mips.bclr.d" => "__builtin_msa_bclr_d",
+ "llvm.mips.bclr.h" => "__builtin_msa_bclr_h",
+ "llvm.mips.bclr.w" => "__builtin_msa_bclr_w",
+ "llvm.mips.bclri.b" => "__builtin_msa_bclri_b",
+ "llvm.mips.bclri.d" => "__builtin_msa_bclri_d",
+ "llvm.mips.bclri.h" => "__builtin_msa_bclri_h",
+ "llvm.mips.bclri.w" => "__builtin_msa_bclri_w",
+ "llvm.mips.binsl.b" => "__builtin_msa_binsl_b",
+ "llvm.mips.binsl.d" => "__builtin_msa_binsl_d",
+ "llvm.mips.binsl.h" => "__builtin_msa_binsl_h",
+ "llvm.mips.binsl.w" => "__builtin_msa_binsl_w",
+ "llvm.mips.binsli.b" => "__builtin_msa_binsli_b",
+ "llvm.mips.binsli.d" => "__builtin_msa_binsli_d",
+ "llvm.mips.binsli.h" => "__builtin_msa_binsli_h",
+ "llvm.mips.binsli.w" => "__builtin_msa_binsli_w",
+ "llvm.mips.binsr.b" => "__builtin_msa_binsr_b",
+ "llvm.mips.binsr.d" => "__builtin_msa_binsr_d",
+ "llvm.mips.binsr.h" => "__builtin_msa_binsr_h",
+ "llvm.mips.binsr.w" => "__builtin_msa_binsr_w",
+ "llvm.mips.binsri.b" => "__builtin_msa_binsri_b",
+ "llvm.mips.binsri.d" => "__builtin_msa_binsri_d",
+ "llvm.mips.binsri.h" => "__builtin_msa_binsri_h",
+ "llvm.mips.binsri.w" => "__builtin_msa_binsri_w",
+ "llvm.mips.bitrev" => "__builtin_mips_bitrev",
+ "llvm.mips.bmnz.v" => "__builtin_msa_bmnz_v",
+ "llvm.mips.bmnzi.b" => "__builtin_msa_bmnzi_b",
+ "llvm.mips.bmz.v" => "__builtin_msa_bmz_v",
+ "llvm.mips.bmzi.b" => "__builtin_msa_bmzi_b",
+ "llvm.mips.bneg.b" => "__builtin_msa_bneg_b",
+ "llvm.mips.bneg.d" => "__builtin_msa_bneg_d",
+ "llvm.mips.bneg.h" => "__builtin_msa_bneg_h",
+ "llvm.mips.bneg.w" => "__builtin_msa_bneg_w",
+ "llvm.mips.bnegi.b" => "__builtin_msa_bnegi_b",
+ "llvm.mips.bnegi.d" => "__builtin_msa_bnegi_d",
+ "llvm.mips.bnegi.h" => "__builtin_msa_bnegi_h",
+ "llvm.mips.bnegi.w" => "__builtin_msa_bnegi_w",
+ "llvm.mips.bnz.b" => "__builtin_msa_bnz_b",
+ "llvm.mips.bnz.d" => "__builtin_msa_bnz_d",
+ "llvm.mips.bnz.h" => "__builtin_msa_bnz_h",
+ "llvm.mips.bnz.v" => "__builtin_msa_bnz_v",
+ "llvm.mips.bnz.w" => "__builtin_msa_bnz_w",
+ "llvm.mips.bposge32" => "__builtin_mips_bposge32",
+ "llvm.mips.bsel.v" => "__builtin_msa_bsel_v",
+ "llvm.mips.bseli.b" => "__builtin_msa_bseli_b",
+ "llvm.mips.bset.b" => "__builtin_msa_bset_b",
+ "llvm.mips.bset.d" => "__builtin_msa_bset_d",
+ "llvm.mips.bset.h" => "__builtin_msa_bset_h",
+ "llvm.mips.bset.w" => "__builtin_msa_bset_w",
+ "llvm.mips.bseti.b" => "__builtin_msa_bseti_b",
+ "llvm.mips.bseti.d" => "__builtin_msa_bseti_d",
+ "llvm.mips.bseti.h" => "__builtin_msa_bseti_h",
+ "llvm.mips.bseti.w" => "__builtin_msa_bseti_w",
+ "llvm.mips.bz.b" => "__builtin_msa_bz_b",
+ "llvm.mips.bz.d" => "__builtin_msa_bz_d",
+ "llvm.mips.bz.h" => "__builtin_msa_bz_h",
+ "llvm.mips.bz.v" => "__builtin_msa_bz_v",
+ "llvm.mips.bz.w" => "__builtin_msa_bz_w",
+ "llvm.mips.ceq.b" => "__builtin_msa_ceq_b",
+ "llvm.mips.ceq.d" => "__builtin_msa_ceq_d",
+ "llvm.mips.ceq.h" => "__builtin_msa_ceq_h",
+ "llvm.mips.ceq.w" => "__builtin_msa_ceq_w",
+ "llvm.mips.ceqi.b" => "__builtin_msa_ceqi_b",
+ "llvm.mips.ceqi.d" => "__builtin_msa_ceqi_d",
+ "llvm.mips.ceqi.h" => "__builtin_msa_ceqi_h",
+ "llvm.mips.ceqi.w" => "__builtin_msa_ceqi_w",
+ "llvm.mips.cfcmsa" => "__builtin_msa_cfcmsa",
+ "llvm.mips.cle.s.b" => "__builtin_msa_cle_s_b",
+ "llvm.mips.cle.s.d" => "__builtin_msa_cle_s_d",
+ "llvm.mips.cle.s.h" => "__builtin_msa_cle_s_h",
+ "llvm.mips.cle.s.w" => "__builtin_msa_cle_s_w",
+ "llvm.mips.cle.u.b" => "__builtin_msa_cle_u_b",
+ "llvm.mips.cle.u.d" => "__builtin_msa_cle_u_d",
+ "llvm.mips.cle.u.h" => "__builtin_msa_cle_u_h",
+ "llvm.mips.cle.u.w" => "__builtin_msa_cle_u_w",
+ "llvm.mips.clei.s.b" => "__builtin_msa_clei_s_b",
+ "llvm.mips.clei.s.d" => "__builtin_msa_clei_s_d",
+ "llvm.mips.clei.s.h" => "__builtin_msa_clei_s_h",
+ "llvm.mips.clei.s.w" => "__builtin_msa_clei_s_w",
+ "llvm.mips.clei.u.b" => "__builtin_msa_clei_u_b",
+ "llvm.mips.clei.u.d" => "__builtin_msa_clei_u_d",
+ "llvm.mips.clei.u.h" => "__builtin_msa_clei_u_h",
+ "llvm.mips.clei.u.w" => "__builtin_msa_clei_u_w",
+ "llvm.mips.clt.s.b" => "__builtin_msa_clt_s_b",
+ "llvm.mips.clt.s.d" => "__builtin_msa_clt_s_d",
+ "llvm.mips.clt.s.h" => "__builtin_msa_clt_s_h",
+ "llvm.mips.clt.s.w" => "__builtin_msa_clt_s_w",
+ "llvm.mips.clt.u.b" => "__builtin_msa_clt_u_b",
+ "llvm.mips.clt.u.d" => "__builtin_msa_clt_u_d",
+ "llvm.mips.clt.u.h" => "__builtin_msa_clt_u_h",
+ "llvm.mips.clt.u.w" => "__builtin_msa_clt_u_w",
+ "llvm.mips.clti.s.b" => "__builtin_msa_clti_s_b",
+ "llvm.mips.clti.s.d" => "__builtin_msa_clti_s_d",
+ "llvm.mips.clti.s.h" => "__builtin_msa_clti_s_h",
+ "llvm.mips.clti.s.w" => "__builtin_msa_clti_s_w",
+ "llvm.mips.clti.u.b" => "__builtin_msa_clti_u_b",
+ "llvm.mips.clti.u.d" => "__builtin_msa_clti_u_d",
+ "llvm.mips.clti.u.h" => "__builtin_msa_clti_u_h",
+ "llvm.mips.clti.u.w" => "__builtin_msa_clti_u_w",
+ "llvm.mips.cmp.eq.ph" => "__builtin_mips_cmp_eq_ph",
+ "llvm.mips.cmp.le.ph" => "__builtin_mips_cmp_le_ph",
+ "llvm.mips.cmp.lt.ph" => "__builtin_mips_cmp_lt_ph",
+ "llvm.mips.cmpgdu.eq.qb" => "__builtin_mips_cmpgdu_eq_qb",
+ "llvm.mips.cmpgdu.le.qb" => "__builtin_mips_cmpgdu_le_qb",
+ "llvm.mips.cmpgdu.lt.qb" => "__builtin_mips_cmpgdu_lt_qb",
+ "llvm.mips.cmpgu.eq.qb" => "__builtin_mips_cmpgu_eq_qb",
+ "llvm.mips.cmpgu.le.qb" => "__builtin_mips_cmpgu_le_qb",
+ "llvm.mips.cmpgu.lt.qb" => "__builtin_mips_cmpgu_lt_qb",
+ "llvm.mips.cmpu.eq.qb" => "__builtin_mips_cmpu_eq_qb",
+ "llvm.mips.cmpu.le.qb" => "__builtin_mips_cmpu_le_qb",
+ "llvm.mips.cmpu.lt.qb" => "__builtin_mips_cmpu_lt_qb",
+ "llvm.mips.copy.s.b" => "__builtin_msa_copy_s_b",
+ "llvm.mips.copy.s.d" => "__builtin_msa_copy_s_d",
+ "llvm.mips.copy.s.h" => "__builtin_msa_copy_s_h",
+ "llvm.mips.copy.s.w" => "__builtin_msa_copy_s_w",
+ "llvm.mips.copy.u.b" => "__builtin_msa_copy_u_b",
+ "llvm.mips.copy.u.d" => "__builtin_msa_copy_u_d",
+ "llvm.mips.copy.u.h" => "__builtin_msa_copy_u_h",
+ "llvm.mips.copy.u.w" => "__builtin_msa_copy_u_w",
+ "llvm.mips.ctcmsa" => "__builtin_msa_ctcmsa",
+ "llvm.mips.div.s.b" => "__builtin_msa_div_s_b",
+ "llvm.mips.div.s.d" => "__builtin_msa_div_s_d",
+ "llvm.mips.div.s.h" => "__builtin_msa_div_s_h",
+ "llvm.mips.div.s.w" => "__builtin_msa_div_s_w",
+ "llvm.mips.div.u.b" => "__builtin_msa_div_u_b",
+ "llvm.mips.div.u.d" => "__builtin_msa_div_u_d",
+ "llvm.mips.div.u.h" => "__builtin_msa_div_u_h",
+ "llvm.mips.div.u.w" => "__builtin_msa_div_u_w",
+ "llvm.mips.dlsa" => "__builtin_mips_dlsa",
+ "llvm.mips.dotp.s.d" => "__builtin_msa_dotp_s_d",
+ "llvm.mips.dotp.s.h" => "__builtin_msa_dotp_s_h",
+ "llvm.mips.dotp.s.w" => "__builtin_msa_dotp_s_w",
+ "llvm.mips.dotp.u.d" => "__builtin_msa_dotp_u_d",
+ "llvm.mips.dotp.u.h" => "__builtin_msa_dotp_u_h",
+ "llvm.mips.dotp.u.w" => "__builtin_msa_dotp_u_w",
+ "llvm.mips.dpa.w.ph" => "__builtin_mips_dpa_w_ph",
+ "llvm.mips.dpadd.s.d" => "__builtin_msa_dpadd_s_d",
+ "llvm.mips.dpadd.s.h" => "__builtin_msa_dpadd_s_h",
+ "llvm.mips.dpadd.s.w" => "__builtin_msa_dpadd_s_w",
+ "llvm.mips.dpadd.u.d" => "__builtin_msa_dpadd_u_d",
+ "llvm.mips.dpadd.u.h" => "__builtin_msa_dpadd_u_h",
+ "llvm.mips.dpadd.u.w" => "__builtin_msa_dpadd_u_w",
+ "llvm.mips.dpaq.s.w.ph" => "__builtin_mips_dpaq_s_w_ph",
+ "llvm.mips.dpaq.sa.l.w" => "__builtin_mips_dpaq_sa_l_w",
+ "llvm.mips.dpaqx.s.w.ph" => "__builtin_mips_dpaqx_s_w_ph",
+ "llvm.mips.dpaqx.sa.w.ph" => "__builtin_mips_dpaqx_sa_w_ph",
+ "llvm.mips.dpau.h.qbl" => "__builtin_mips_dpau_h_qbl",
+ "llvm.mips.dpau.h.qbr" => "__builtin_mips_dpau_h_qbr",
+ "llvm.mips.dpax.w.ph" => "__builtin_mips_dpax_w_ph",
+ "llvm.mips.dps.w.ph" => "__builtin_mips_dps_w_ph",
+ "llvm.mips.dpsq.s.w.ph" => "__builtin_mips_dpsq_s_w_ph",
+ "llvm.mips.dpsq.sa.l.w" => "__builtin_mips_dpsq_sa_l_w",
+ "llvm.mips.dpsqx.s.w.ph" => "__builtin_mips_dpsqx_s_w_ph",
+ "llvm.mips.dpsqx.sa.w.ph" => "__builtin_mips_dpsqx_sa_w_ph",
+ "llvm.mips.dpsu.h.qbl" => "__builtin_mips_dpsu_h_qbl",
+ "llvm.mips.dpsu.h.qbr" => "__builtin_mips_dpsu_h_qbr",
+ "llvm.mips.dpsub.s.d" => "__builtin_msa_dpsub_s_d",
+ "llvm.mips.dpsub.s.h" => "__builtin_msa_dpsub_s_h",
+ "llvm.mips.dpsub.s.w" => "__builtin_msa_dpsub_s_w",
+ "llvm.mips.dpsub.u.d" => "__builtin_msa_dpsub_u_d",
+ "llvm.mips.dpsub.u.h" => "__builtin_msa_dpsub_u_h",
+ "llvm.mips.dpsub.u.w" => "__builtin_msa_dpsub_u_w",
+ "llvm.mips.dpsx.w.ph" => "__builtin_mips_dpsx_w_ph",
+ "llvm.mips.extp" => "__builtin_mips_extp",
+ "llvm.mips.extpdp" => "__builtin_mips_extpdp",
+ "llvm.mips.extr.r.w" => "__builtin_mips_extr_r_w",
+ "llvm.mips.extr.rs.w" => "__builtin_mips_extr_rs_w",
+ "llvm.mips.extr.s.h" => "__builtin_mips_extr_s_h",
+ "llvm.mips.extr.w" => "__builtin_mips_extr_w",
+ "llvm.mips.fadd.d" => "__builtin_msa_fadd_d",
+ "llvm.mips.fadd.w" => "__builtin_msa_fadd_w",
+ "llvm.mips.fcaf.d" => "__builtin_msa_fcaf_d",
+ "llvm.mips.fcaf.w" => "__builtin_msa_fcaf_w",
+ "llvm.mips.fceq.d" => "__builtin_msa_fceq_d",
+ "llvm.mips.fceq.w" => "__builtin_msa_fceq_w",
+ "llvm.mips.fclass.d" => "__builtin_msa_fclass_d",
+ "llvm.mips.fclass.w" => "__builtin_msa_fclass_w",
+ "llvm.mips.fcle.d" => "__builtin_msa_fcle_d",
+ "llvm.mips.fcle.w" => "__builtin_msa_fcle_w",
+ "llvm.mips.fclt.d" => "__builtin_msa_fclt_d",
+ "llvm.mips.fclt.w" => "__builtin_msa_fclt_w",
+ "llvm.mips.fcne.d" => "__builtin_msa_fcne_d",
+ "llvm.mips.fcne.w" => "__builtin_msa_fcne_w",
+ "llvm.mips.fcor.d" => "__builtin_msa_fcor_d",
+ "llvm.mips.fcor.w" => "__builtin_msa_fcor_w",
+ "llvm.mips.fcueq.d" => "__builtin_msa_fcueq_d",
+ "llvm.mips.fcueq.w" => "__builtin_msa_fcueq_w",
+ "llvm.mips.fcule.d" => "__builtin_msa_fcule_d",
+ "llvm.mips.fcule.w" => "__builtin_msa_fcule_w",
+ "llvm.mips.fcult.d" => "__builtin_msa_fcult_d",
+ "llvm.mips.fcult.w" => "__builtin_msa_fcult_w",
+ "llvm.mips.fcun.d" => "__builtin_msa_fcun_d",
+ "llvm.mips.fcun.w" => "__builtin_msa_fcun_w",
+ "llvm.mips.fcune.d" => "__builtin_msa_fcune_d",
+ "llvm.mips.fcune.w" => "__builtin_msa_fcune_w",
+ "llvm.mips.fdiv.d" => "__builtin_msa_fdiv_d",
+ "llvm.mips.fdiv.w" => "__builtin_msa_fdiv_w",
+ "llvm.mips.fexdo.h" => "__builtin_msa_fexdo_h",
+ "llvm.mips.fexdo.w" => "__builtin_msa_fexdo_w",
+ "llvm.mips.fexp2.d" => "__builtin_msa_fexp2_d",
+ "llvm.mips.fexp2.w" => "__builtin_msa_fexp2_w",
+ "llvm.mips.fexupl.d" => "__builtin_msa_fexupl_d",
+ "llvm.mips.fexupl.w" => "__builtin_msa_fexupl_w",
+ "llvm.mips.fexupr.d" => "__builtin_msa_fexupr_d",
+ "llvm.mips.fexupr.w" => "__builtin_msa_fexupr_w",
+ "llvm.mips.ffint.s.d" => "__builtin_msa_ffint_s_d",
+ "llvm.mips.ffint.s.w" => "__builtin_msa_ffint_s_w",
+ "llvm.mips.ffint.u.d" => "__builtin_msa_ffint_u_d",
+ "llvm.mips.ffint.u.w" => "__builtin_msa_ffint_u_w",
+ "llvm.mips.ffql.d" => "__builtin_msa_ffql_d",
+ "llvm.mips.ffql.w" => "__builtin_msa_ffql_w",
+ "llvm.mips.ffqr.d" => "__builtin_msa_ffqr_d",
+ "llvm.mips.ffqr.w" => "__builtin_msa_ffqr_w",
+ "llvm.mips.fill.b" => "__builtin_msa_fill_b",
+ "llvm.mips.fill.d" => "__builtin_msa_fill_d",
+ "llvm.mips.fill.h" => "__builtin_msa_fill_h",
+ "llvm.mips.fill.w" => "__builtin_msa_fill_w",
+ "llvm.mips.flog2.d" => "__builtin_msa_flog2_d",
+ "llvm.mips.flog2.w" => "__builtin_msa_flog2_w",
+ "llvm.mips.fmadd.d" => "__builtin_msa_fmadd_d",
+ "llvm.mips.fmadd.w" => "__builtin_msa_fmadd_w",
+ "llvm.mips.fmax.a.d" => "__builtin_msa_fmax_a_d",
+ "llvm.mips.fmax.a.w" => "__builtin_msa_fmax_a_w",
+ "llvm.mips.fmax.d" => "__builtin_msa_fmax_d",
+ "llvm.mips.fmax.w" => "__builtin_msa_fmax_w",
+ "llvm.mips.fmin.a.d" => "__builtin_msa_fmin_a_d",
+ "llvm.mips.fmin.a.w" => "__builtin_msa_fmin_a_w",
+ "llvm.mips.fmin.d" => "__builtin_msa_fmin_d",
+ "llvm.mips.fmin.w" => "__builtin_msa_fmin_w",
+ "llvm.mips.fmsub.d" => "__builtin_msa_fmsub_d",
+ "llvm.mips.fmsub.w" => "__builtin_msa_fmsub_w",
+ "llvm.mips.fmul.d" => "__builtin_msa_fmul_d",
+ "llvm.mips.fmul.w" => "__builtin_msa_fmul_w",
+ "llvm.mips.frcp.d" => "__builtin_msa_frcp_d",
+ "llvm.mips.frcp.w" => "__builtin_msa_frcp_w",
+ "llvm.mips.frint.d" => "__builtin_msa_frint_d",
+ "llvm.mips.frint.w" => "__builtin_msa_frint_w",
+ "llvm.mips.frsqrt.d" => "__builtin_msa_frsqrt_d",
+ "llvm.mips.frsqrt.w" => "__builtin_msa_frsqrt_w",
+ "llvm.mips.fsaf.d" => "__builtin_msa_fsaf_d",
+ "llvm.mips.fsaf.w" => "__builtin_msa_fsaf_w",
+ "llvm.mips.fseq.d" => "__builtin_msa_fseq_d",
+ "llvm.mips.fseq.w" => "__builtin_msa_fseq_w",
+ "llvm.mips.fsle.d" => "__builtin_msa_fsle_d",
+ "llvm.mips.fsle.w" => "__builtin_msa_fsle_w",
+ "llvm.mips.fslt.d" => "__builtin_msa_fslt_d",
+ "llvm.mips.fslt.w" => "__builtin_msa_fslt_w",
+ "llvm.mips.fsne.d" => "__builtin_msa_fsne_d",
+ "llvm.mips.fsne.w" => "__builtin_msa_fsne_w",
+ "llvm.mips.fsor.d" => "__builtin_msa_fsor_d",
+ "llvm.mips.fsor.w" => "__builtin_msa_fsor_w",
+ "llvm.mips.fsqrt.d" => "__builtin_msa_fsqrt_d",
+ "llvm.mips.fsqrt.w" => "__builtin_msa_fsqrt_w",
+ "llvm.mips.fsub.d" => "__builtin_msa_fsub_d",
+ "llvm.mips.fsub.w" => "__builtin_msa_fsub_w",
+ "llvm.mips.fsueq.d" => "__builtin_msa_fsueq_d",
+ "llvm.mips.fsueq.w" => "__builtin_msa_fsueq_w",
+ "llvm.mips.fsule.d" => "__builtin_msa_fsule_d",
+ "llvm.mips.fsule.w" => "__builtin_msa_fsule_w",
+ "llvm.mips.fsult.d" => "__builtin_msa_fsult_d",
+ "llvm.mips.fsult.w" => "__builtin_msa_fsult_w",
+ "llvm.mips.fsun.d" => "__builtin_msa_fsun_d",
+ "llvm.mips.fsun.w" => "__builtin_msa_fsun_w",
+ "llvm.mips.fsune.d" => "__builtin_msa_fsune_d",
+ "llvm.mips.fsune.w" => "__builtin_msa_fsune_w",
+ "llvm.mips.ftint.s.d" => "__builtin_msa_ftint_s_d",
+ "llvm.mips.ftint.s.w" => "__builtin_msa_ftint_s_w",
+ "llvm.mips.ftint.u.d" => "__builtin_msa_ftint_u_d",
+ "llvm.mips.ftint.u.w" => "__builtin_msa_ftint_u_w",
+ "llvm.mips.ftq.h" => "__builtin_msa_ftq_h",
+ "llvm.mips.ftq.w" => "__builtin_msa_ftq_w",
+ "llvm.mips.ftrunc.s.d" => "__builtin_msa_ftrunc_s_d",
+ "llvm.mips.ftrunc.s.w" => "__builtin_msa_ftrunc_s_w",
+ "llvm.mips.ftrunc.u.d" => "__builtin_msa_ftrunc_u_d",
+ "llvm.mips.ftrunc.u.w" => "__builtin_msa_ftrunc_u_w",
+ "llvm.mips.hadd.s.d" => "__builtin_msa_hadd_s_d",
+ "llvm.mips.hadd.s.h" => "__builtin_msa_hadd_s_h",
+ "llvm.mips.hadd.s.w" => "__builtin_msa_hadd_s_w",
+ "llvm.mips.hadd.u.d" => "__builtin_msa_hadd_u_d",
+ "llvm.mips.hadd.u.h" => "__builtin_msa_hadd_u_h",
+ "llvm.mips.hadd.u.w" => "__builtin_msa_hadd_u_w",
+ "llvm.mips.hsub.s.d" => "__builtin_msa_hsub_s_d",
+ "llvm.mips.hsub.s.h" => "__builtin_msa_hsub_s_h",
+ "llvm.mips.hsub.s.w" => "__builtin_msa_hsub_s_w",
+ "llvm.mips.hsub.u.d" => "__builtin_msa_hsub_u_d",
+ "llvm.mips.hsub.u.h" => "__builtin_msa_hsub_u_h",
+ "llvm.mips.hsub.u.w" => "__builtin_msa_hsub_u_w",
+ "llvm.mips.ilvev.b" => "__builtin_msa_ilvev_b",
+ "llvm.mips.ilvev.d" => "__builtin_msa_ilvev_d",
+ "llvm.mips.ilvev.h" => "__builtin_msa_ilvev_h",
+ "llvm.mips.ilvev.w" => "__builtin_msa_ilvev_w",
+ "llvm.mips.ilvl.b" => "__builtin_msa_ilvl_b",
+ "llvm.mips.ilvl.d" => "__builtin_msa_ilvl_d",
+ "llvm.mips.ilvl.h" => "__builtin_msa_ilvl_h",
+ "llvm.mips.ilvl.w" => "__builtin_msa_ilvl_w",
+ "llvm.mips.ilvod.b" => "__builtin_msa_ilvod_b",
+ "llvm.mips.ilvod.d" => "__builtin_msa_ilvod_d",
+ "llvm.mips.ilvod.h" => "__builtin_msa_ilvod_h",
+ "llvm.mips.ilvod.w" => "__builtin_msa_ilvod_w",
+ "llvm.mips.ilvr.b" => "__builtin_msa_ilvr_b",
+ "llvm.mips.ilvr.d" => "__builtin_msa_ilvr_d",
+ "llvm.mips.ilvr.h" => "__builtin_msa_ilvr_h",
+ "llvm.mips.ilvr.w" => "__builtin_msa_ilvr_w",
+ "llvm.mips.insert.b" => "__builtin_msa_insert_b",
+ "llvm.mips.insert.d" => "__builtin_msa_insert_d",
+ "llvm.mips.insert.h" => "__builtin_msa_insert_h",
+ "llvm.mips.insert.w" => "__builtin_msa_insert_w",
+ "llvm.mips.insv" => "__builtin_mips_insv",
+ "llvm.mips.insve.b" => "__builtin_msa_insve_b",
+ "llvm.mips.insve.d" => "__builtin_msa_insve_d",
+ "llvm.mips.insve.h" => "__builtin_msa_insve_h",
+ "llvm.mips.insve.w" => "__builtin_msa_insve_w",
+ "llvm.mips.lbux" => "__builtin_mips_lbux",
+ "llvm.mips.ld.b" => "__builtin_msa_ld_b",
+ "llvm.mips.ld.d" => "__builtin_msa_ld_d",
+ "llvm.mips.ld.h" => "__builtin_msa_ld_h",
+ "llvm.mips.ld.w" => "__builtin_msa_ld_w",
+ "llvm.mips.ldi.b" => "__builtin_msa_ldi_b",
+ "llvm.mips.ldi.d" => "__builtin_msa_ldi_d",
+ "llvm.mips.ldi.h" => "__builtin_msa_ldi_h",
+ "llvm.mips.ldi.w" => "__builtin_msa_ldi_w",
+ "llvm.mips.ldr.d" => "__builtin_msa_ldr_d",
+ "llvm.mips.ldr.w" => "__builtin_msa_ldr_w",
+ "llvm.mips.lhx" => "__builtin_mips_lhx",
+ "llvm.mips.lsa" => "__builtin_mips_lsa",
+ "llvm.mips.lwx" => "__builtin_mips_lwx",
+ "llvm.mips.madd" => "__builtin_mips_madd",
+ "llvm.mips.madd.q.h" => "__builtin_msa_madd_q_h",
+ "llvm.mips.madd.q.w" => "__builtin_msa_madd_q_w",
+ "llvm.mips.maddr.q.h" => "__builtin_msa_maddr_q_h",
+ "llvm.mips.maddr.q.w" => "__builtin_msa_maddr_q_w",
+ "llvm.mips.maddu" => "__builtin_mips_maddu",
+ "llvm.mips.maddv.b" => "__builtin_msa_maddv_b",
+ "llvm.mips.maddv.d" => "__builtin_msa_maddv_d",
+ "llvm.mips.maddv.h" => "__builtin_msa_maddv_h",
+ "llvm.mips.maddv.w" => "__builtin_msa_maddv_w",
+ "llvm.mips.maq.s.w.phl" => "__builtin_mips_maq_s_w_phl",
+ "llvm.mips.maq.s.w.phr" => "__builtin_mips_maq_s_w_phr",
+ "llvm.mips.maq.sa.w.phl" => "__builtin_mips_maq_sa_w_phl",
+ "llvm.mips.maq.sa.w.phr" => "__builtin_mips_maq_sa_w_phr",
+ "llvm.mips.max.a.b" => "__builtin_msa_max_a_b",
+ "llvm.mips.max.a.d" => "__builtin_msa_max_a_d",
+ "llvm.mips.max.a.h" => "__builtin_msa_max_a_h",
+ "llvm.mips.max.a.w" => "__builtin_msa_max_a_w",
+ "llvm.mips.max.s.b" => "__builtin_msa_max_s_b",
+ "llvm.mips.max.s.d" => "__builtin_msa_max_s_d",
+ "llvm.mips.max.s.h" => "__builtin_msa_max_s_h",
+ "llvm.mips.max.s.w" => "__builtin_msa_max_s_w",
+ "llvm.mips.max.u.b" => "__builtin_msa_max_u_b",
+ "llvm.mips.max.u.d" => "__builtin_msa_max_u_d",
+ "llvm.mips.max.u.h" => "__builtin_msa_max_u_h",
+ "llvm.mips.max.u.w" => "__builtin_msa_max_u_w",
+ "llvm.mips.maxi.s.b" => "__builtin_msa_maxi_s_b",
+ "llvm.mips.maxi.s.d" => "__builtin_msa_maxi_s_d",
+ "llvm.mips.maxi.s.h" => "__builtin_msa_maxi_s_h",
+ "llvm.mips.maxi.s.w" => "__builtin_msa_maxi_s_w",
+ "llvm.mips.maxi.u.b" => "__builtin_msa_maxi_u_b",
+ "llvm.mips.maxi.u.d" => "__builtin_msa_maxi_u_d",
+ "llvm.mips.maxi.u.h" => "__builtin_msa_maxi_u_h",
+ "llvm.mips.maxi.u.w" => "__builtin_msa_maxi_u_w",
+ "llvm.mips.min.a.b" => "__builtin_msa_min_a_b",
+ "llvm.mips.min.a.d" => "__builtin_msa_min_a_d",
+ "llvm.mips.min.a.h" => "__builtin_msa_min_a_h",
+ "llvm.mips.min.a.w" => "__builtin_msa_min_a_w",
+ "llvm.mips.min.s.b" => "__builtin_msa_min_s_b",
+ "llvm.mips.min.s.d" => "__builtin_msa_min_s_d",
+ "llvm.mips.min.s.h" => "__builtin_msa_min_s_h",
+ "llvm.mips.min.s.w" => "__builtin_msa_min_s_w",
+ "llvm.mips.min.u.b" => "__builtin_msa_min_u_b",
+ "llvm.mips.min.u.d" => "__builtin_msa_min_u_d",
+ "llvm.mips.min.u.h" => "__builtin_msa_min_u_h",
+ "llvm.mips.min.u.w" => "__builtin_msa_min_u_w",
+ "llvm.mips.mini.s.b" => "__builtin_msa_mini_s_b",
+ "llvm.mips.mini.s.d" => "__builtin_msa_mini_s_d",
+ "llvm.mips.mini.s.h" => "__builtin_msa_mini_s_h",
+ "llvm.mips.mini.s.w" => "__builtin_msa_mini_s_w",
+ "llvm.mips.mini.u.b" => "__builtin_msa_mini_u_b",
+ "llvm.mips.mini.u.d" => "__builtin_msa_mini_u_d",
+ "llvm.mips.mini.u.h" => "__builtin_msa_mini_u_h",
+ "llvm.mips.mini.u.w" => "__builtin_msa_mini_u_w",
+ "llvm.mips.mod.s.b" => "__builtin_msa_mod_s_b",
+ "llvm.mips.mod.s.d" => "__builtin_msa_mod_s_d",
+ "llvm.mips.mod.s.h" => "__builtin_msa_mod_s_h",
+ "llvm.mips.mod.s.w" => "__builtin_msa_mod_s_w",
+ "llvm.mips.mod.u.b" => "__builtin_msa_mod_u_b",
+ "llvm.mips.mod.u.d" => "__builtin_msa_mod_u_d",
+ "llvm.mips.mod.u.h" => "__builtin_msa_mod_u_h",
+ "llvm.mips.mod.u.w" => "__builtin_msa_mod_u_w",
+ "llvm.mips.modsub" => "__builtin_mips_modsub",
+ "llvm.mips.move.v" => "__builtin_msa_move_v",
+ "llvm.mips.msub" => "__builtin_mips_msub",
+ "llvm.mips.msub.q.h" => "__builtin_msa_msub_q_h",
+ "llvm.mips.msub.q.w" => "__builtin_msa_msub_q_w",
+ "llvm.mips.msubr.q.h" => "__builtin_msa_msubr_q_h",
+ "llvm.mips.msubr.q.w" => "__builtin_msa_msubr_q_w",
+ "llvm.mips.msubu" => "__builtin_mips_msubu",
+ "llvm.mips.msubv.b" => "__builtin_msa_msubv_b",
+ "llvm.mips.msubv.d" => "__builtin_msa_msubv_d",
+ "llvm.mips.msubv.h" => "__builtin_msa_msubv_h",
+ "llvm.mips.msubv.w" => "__builtin_msa_msubv_w",
+ "llvm.mips.mthlip" => "__builtin_mips_mthlip",
+ "llvm.mips.mul.ph" => "__builtin_mips_mul_ph",
+ "llvm.mips.mul.q.h" => "__builtin_msa_mul_q_h",
+ "llvm.mips.mul.q.w" => "__builtin_msa_mul_q_w",
+ "llvm.mips.mul.s.ph" => "__builtin_mips_mul_s_ph",
+ "llvm.mips.muleq.s.w.phl" => "__builtin_mips_muleq_s_w_phl",
+ "llvm.mips.muleq.s.w.phr" => "__builtin_mips_muleq_s_w_phr",
+ "llvm.mips.muleu.s.ph.qbl" => "__builtin_mips_muleu_s_ph_qbl",
+ "llvm.mips.muleu.s.ph.qbr" => "__builtin_mips_muleu_s_ph_qbr",
+ "llvm.mips.mulq.rs.ph" => "__builtin_mips_mulq_rs_ph",
+ "llvm.mips.mulq.rs.w" => "__builtin_mips_mulq_rs_w",
+ "llvm.mips.mulq.s.ph" => "__builtin_mips_mulq_s_ph",
+ "llvm.mips.mulq.s.w" => "__builtin_mips_mulq_s_w",
+ "llvm.mips.mulr.q.h" => "__builtin_msa_mulr_q_h",
+ "llvm.mips.mulr.q.w" => "__builtin_msa_mulr_q_w",
+ "llvm.mips.mulsa.w.ph" => "__builtin_mips_mulsa_w_ph",
+ "llvm.mips.mulsaq.s.w.ph" => "__builtin_mips_mulsaq_s_w_ph",
+ "llvm.mips.mult" => "__builtin_mips_mult",
+ "llvm.mips.multu" => "__builtin_mips_multu",
+ "llvm.mips.mulv.b" => "__builtin_msa_mulv_b",
+ "llvm.mips.mulv.d" => "__builtin_msa_mulv_d",
+ "llvm.mips.mulv.h" => "__builtin_msa_mulv_h",
+ "llvm.mips.mulv.w" => "__builtin_msa_mulv_w",
+ "llvm.mips.nloc.b" => "__builtin_msa_nloc_b",
+ "llvm.mips.nloc.d" => "__builtin_msa_nloc_d",
+ "llvm.mips.nloc.h" => "__builtin_msa_nloc_h",
+ "llvm.mips.nloc.w" => "__builtin_msa_nloc_w",
+ "llvm.mips.nlzc.b" => "__builtin_msa_nlzc_b",
+ "llvm.mips.nlzc.d" => "__builtin_msa_nlzc_d",
+ "llvm.mips.nlzc.h" => "__builtin_msa_nlzc_h",
+ "llvm.mips.nlzc.w" => "__builtin_msa_nlzc_w",
+ "llvm.mips.nor.v" => "__builtin_msa_nor_v",
+ "llvm.mips.nori.b" => "__builtin_msa_nori_b",
+ "llvm.mips.or.v" => "__builtin_msa_or_v",
+ "llvm.mips.ori.b" => "__builtin_msa_ori_b",
+ "llvm.mips.packrl.ph" => "__builtin_mips_packrl_ph",
+ "llvm.mips.pckev.b" => "__builtin_msa_pckev_b",
+ "llvm.mips.pckev.d" => "__builtin_msa_pckev_d",
+ "llvm.mips.pckev.h" => "__builtin_msa_pckev_h",
+ "llvm.mips.pckev.w" => "__builtin_msa_pckev_w",
+ "llvm.mips.pckod.b" => "__builtin_msa_pckod_b",
+ "llvm.mips.pckod.d" => "__builtin_msa_pckod_d",
+ "llvm.mips.pckod.h" => "__builtin_msa_pckod_h",
+ "llvm.mips.pckod.w" => "__builtin_msa_pckod_w",
+ "llvm.mips.pcnt.b" => "__builtin_msa_pcnt_b",
+ "llvm.mips.pcnt.d" => "__builtin_msa_pcnt_d",
+ "llvm.mips.pcnt.h" => "__builtin_msa_pcnt_h",
+ "llvm.mips.pcnt.w" => "__builtin_msa_pcnt_w",
+ "llvm.mips.pick.ph" => "__builtin_mips_pick_ph",
+ "llvm.mips.pick.qb" => "__builtin_mips_pick_qb",
+ "llvm.mips.preceq.w.phl" => "__builtin_mips_preceq_w_phl",
+ "llvm.mips.preceq.w.phr" => "__builtin_mips_preceq_w_phr",
+ "llvm.mips.precequ.ph.qbl" => "__builtin_mips_precequ_ph_qbl",
+ "llvm.mips.precequ.ph.qbla" => "__builtin_mips_precequ_ph_qbla",
+ "llvm.mips.precequ.ph.qbr" => "__builtin_mips_precequ_ph_qbr",
+ "llvm.mips.precequ.ph.qbra" => "__builtin_mips_precequ_ph_qbra",
+ "llvm.mips.preceu.ph.qbl" => "__builtin_mips_preceu_ph_qbl",
+ "llvm.mips.preceu.ph.qbla" => "__builtin_mips_preceu_ph_qbla",
+ "llvm.mips.preceu.ph.qbr" => "__builtin_mips_preceu_ph_qbr",
+ "llvm.mips.preceu.ph.qbra" => "__builtin_mips_preceu_ph_qbra",
+ "llvm.mips.precr.qb.ph" => "__builtin_mips_precr_qb_ph",
+ "llvm.mips.precr.sra.ph.w" => "__builtin_mips_precr_sra_ph_w",
+ "llvm.mips.precr.sra.r.ph.w" => "__builtin_mips_precr_sra_r_ph_w",
+ "llvm.mips.precrq.ph.w" => "__builtin_mips_precrq_ph_w",
+ "llvm.mips.precrq.qb.ph" => "__builtin_mips_precrq_qb_ph",
+ "llvm.mips.precrq.rs.ph.w" => "__builtin_mips_precrq_rs_ph_w",
+ "llvm.mips.precrqu.s.qb.ph" => "__builtin_mips_precrqu_s_qb_ph",
+ "llvm.mips.prepend" => "__builtin_mips_prepend",
+ "llvm.mips.raddu.w.qb" => "__builtin_mips_raddu_w_qb",
+ "llvm.mips.rddsp" => "__builtin_mips_rddsp",
+ "llvm.mips.repl.ph" => "__builtin_mips_repl_ph",
+ "llvm.mips.repl.qb" => "__builtin_mips_repl_qb",
+ "llvm.mips.sat.s.b" => "__builtin_msa_sat_s_b",
+ "llvm.mips.sat.s.d" => "__builtin_msa_sat_s_d",
+ "llvm.mips.sat.s.h" => "__builtin_msa_sat_s_h",
+ "llvm.mips.sat.s.w" => "__builtin_msa_sat_s_w",
+ "llvm.mips.sat.u.b" => "__builtin_msa_sat_u_b",
+ "llvm.mips.sat.u.d" => "__builtin_msa_sat_u_d",
+ "llvm.mips.sat.u.h" => "__builtin_msa_sat_u_h",
+ "llvm.mips.sat.u.w" => "__builtin_msa_sat_u_w",
+ "llvm.mips.shf.b" => "__builtin_msa_shf_b",
+ "llvm.mips.shf.h" => "__builtin_msa_shf_h",
+ "llvm.mips.shf.w" => "__builtin_msa_shf_w",
+ "llvm.mips.shilo" => "__builtin_mips_shilo",
+ "llvm.mips.shll.ph" => "__builtin_mips_shll_ph",
+ "llvm.mips.shll.qb" => "__builtin_mips_shll_qb",
+ "llvm.mips.shll.s.ph" => "__builtin_mips_shll_s_ph",
+ "llvm.mips.shll.s.w" => "__builtin_mips_shll_s_w",
+ "llvm.mips.shra.ph" => "__builtin_mips_shra_ph",
+ "llvm.mips.shra.qb" => "__builtin_mips_shra_qb",
+ "llvm.mips.shra.r.ph" => "__builtin_mips_shra_r_ph",
+ "llvm.mips.shra.r.qb" => "__builtin_mips_shra_r_qb",
+ "llvm.mips.shra.r.w" => "__builtin_mips_shra_r_w",
+ "llvm.mips.shrl.ph" => "__builtin_mips_shrl_ph",
+ "llvm.mips.shrl.qb" => "__builtin_mips_shrl_qb",
+ "llvm.mips.sld.b" => "__builtin_msa_sld_b",
+ "llvm.mips.sld.d" => "__builtin_msa_sld_d",
+ "llvm.mips.sld.h" => "__builtin_msa_sld_h",
+ "llvm.mips.sld.w" => "__builtin_msa_sld_w",
+ "llvm.mips.sldi.b" => "__builtin_msa_sldi_b",
+ "llvm.mips.sldi.d" => "__builtin_msa_sldi_d",
+ "llvm.mips.sldi.h" => "__builtin_msa_sldi_h",
+ "llvm.mips.sldi.w" => "__builtin_msa_sldi_w",
+ "llvm.mips.sll.b" => "__builtin_msa_sll_b",
+ "llvm.mips.sll.d" => "__builtin_msa_sll_d",
+ "llvm.mips.sll.h" => "__builtin_msa_sll_h",
+ "llvm.mips.sll.w" => "__builtin_msa_sll_w",
+ "llvm.mips.slli.b" => "__builtin_msa_slli_b",
+ "llvm.mips.slli.d" => "__builtin_msa_slli_d",
+ "llvm.mips.slli.h" => "__builtin_msa_slli_h",
+ "llvm.mips.slli.w" => "__builtin_msa_slli_w",
+ "llvm.mips.splat.b" => "__builtin_msa_splat_b",
+ "llvm.mips.splat.d" => "__builtin_msa_splat_d",
+ "llvm.mips.splat.h" => "__builtin_msa_splat_h",
+ "llvm.mips.splat.w" => "__builtin_msa_splat_w",
+ "llvm.mips.splati.b" => "__builtin_msa_splati_b",
+ "llvm.mips.splati.d" => "__builtin_msa_splati_d",
+ "llvm.mips.splati.h" => "__builtin_msa_splati_h",
+ "llvm.mips.splati.w" => "__builtin_msa_splati_w",
+ "llvm.mips.sra.b" => "__builtin_msa_sra_b",
+ "llvm.mips.sra.d" => "__builtin_msa_sra_d",
+ "llvm.mips.sra.h" => "__builtin_msa_sra_h",
+ "llvm.mips.sra.w" => "__builtin_msa_sra_w",
+ "llvm.mips.srai.b" => "__builtin_msa_srai_b",
+ "llvm.mips.srai.d" => "__builtin_msa_srai_d",
+ "llvm.mips.srai.h" => "__builtin_msa_srai_h",
+ "llvm.mips.srai.w" => "__builtin_msa_srai_w",
+ "llvm.mips.srar.b" => "__builtin_msa_srar_b",
+ "llvm.mips.srar.d" => "__builtin_msa_srar_d",
+ "llvm.mips.srar.h" => "__builtin_msa_srar_h",
+ "llvm.mips.srar.w" => "__builtin_msa_srar_w",
+ "llvm.mips.srari.b" => "__builtin_msa_srari_b",
+ "llvm.mips.srari.d" => "__builtin_msa_srari_d",
+ "llvm.mips.srari.h" => "__builtin_msa_srari_h",
+ "llvm.mips.srari.w" => "__builtin_msa_srari_w",
+ "llvm.mips.srl.b" => "__builtin_msa_srl_b",
+ "llvm.mips.srl.d" => "__builtin_msa_srl_d",
+ "llvm.mips.srl.h" => "__builtin_msa_srl_h",
+ "llvm.mips.srl.w" => "__builtin_msa_srl_w",
+ "llvm.mips.srli.b" => "__builtin_msa_srli_b",
+ "llvm.mips.srli.d" => "__builtin_msa_srli_d",
+ "llvm.mips.srli.h" => "__builtin_msa_srli_h",
+ "llvm.mips.srli.w" => "__builtin_msa_srli_w",
+ "llvm.mips.srlr.b" => "__builtin_msa_srlr_b",
+ "llvm.mips.srlr.d" => "__builtin_msa_srlr_d",
+ "llvm.mips.srlr.h" => "__builtin_msa_srlr_h",
+ "llvm.mips.srlr.w" => "__builtin_msa_srlr_w",
+ "llvm.mips.srlri.b" => "__builtin_msa_srlri_b",
+ "llvm.mips.srlri.d" => "__builtin_msa_srlri_d",
+ "llvm.mips.srlri.h" => "__builtin_msa_srlri_h",
+ "llvm.mips.srlri.w" => "__builtin_msa_srlri_w",
+ "llvm.mips.st.b" => "__builtin_msa_st_b",
+ "llvm.mips.st.d" => "__builtin_msa_st_d",
+ "llvm.mips.st.h" => "__builtin_msa_st_h",
+ "llvm.mips.st.w" => "__builtin_msa_st_w",
+ "llvm.mips.str.d" => "__builtin_msa_str_d",
+ "llvm.mips.str.w" => "__builtin_msa_str_w",
+ "llvm.mips.subq.ph" => "__builtin_mips_subq_ph",
+ "llvm.mips.subq.s.ph" => "__builtin_mips_subq_s_ph",
+ "llvm.mips.subq.s.w" => "__builtin_mips_subq_s_w",
+ "llvm.mips.subqh.ph" => "__builtin_mips_subqh_ph",
+ "llvm.mips.subqh.r.ph" => "__builtin_mips_subqh_r_ph",
+ "llvm.mips.subqh.r.w" => "__builtin_mips_subqh_r_w",
+ "llvm.mips.subqh.w" => "__builtin_mips_subqh_w",
+ "llvm.mips.subs.s.b" => "__builtin_msa_subs_s_b",
+ "llvm.mips.subs.s.d" => "__builtin_msa_subs_s_d",
+ "llvm.mips.subs.s.h" => "__builtin_msa_subs_s_h",
+ "llvm.mips.subs.s.w" => "__builtin_msa_subs_s_w",
+ "llvm.mips.subs.u.b" => "__builtin_msa_subs_u_b",
+ "llvm.mips.subs.u.d" => "__builtin_msa_subs_u_d",
+ "llvm.mips.subs.u.h" => "__builtin_msa_subs_u_h",
+ "llvm.mips.subs.u.w" => "__builtin_msa_subs_u_w",
+ "llvm.mips.subsus.u.b" => "__builtin_msa_subsus_u_b",
+ "llvm.mips.subsus.u.d" => "__builtin_msa_subsus_u_d",
+ "llvm.mips.subsus.u.h" => "__builtin_msa_subsus_u_h",
+ "llvm.mips.subsus.u.w" => "__builtin_msa_subsus_u_w",
+ "llvm.mips.subsuu.s.b" => "__builtin_msa_subsuu_s_b",
+ "llvm.mips.subsuu.s.d" => "__builtin_msa_subsuu_s_d",
+ "llvm.mips.subsuu.s.h" => "__builtin_msa_subsuu_s_h",
+ "llvm.mips.subsuu.s.w" => "__builtin_msa_subsuu_s_w",
+ "llvm.mips.subu.ph" => "__builtin_mips_subu_ph",
+ "llvm.mips.subu.qb" => "__builtin_mips_subu_qb",
+ "llvm.mips.subu.s.ph" => "__builtin_mips_subu_s_ph",
+ "llvm.mips.subu.s.qb" => "__builtin_mips_subu_s_qb",
+ "llvm.mips.subuh.qb" => "__builtin_mips_subuh_qb",
+ "llvm.mips.subuh.r.qb" => "__builtin_mips_subuh_r_qb",
+ "llvm.mips.subv.b" => "__builtin_msa_subv_b",
+ "llvm.mips.subv.d" => "__builtin_msa_subv_d",
+ "llvm.mips.subv.h" => "__builtin_msa_subv_h",
+ "llvm.mips.subv.w" => "__builtin_msa_subv_w",
+ "llvm.mips.subvi.b" => "__builtin_msa_subvi_b",
+ "llvm.mips.subvi.d" => "__builtin_msa_subvi_d",
+ "llvm.mips.subvi.h" => "__builtin_msa_subvi_h",
+ "llvm.mips.subvi.w" => "__builtin_msa_subvi_w",
+ "llvm.mips.vshf.b" => "__builtin_msa_vshf_b",
+ "llvm.mips.vshf.d" => "__builtin_msa_vshf_d",
+ "llvm.mips.vshf.h" => "__builtin_msa_vshf_h",
+ "llvm.mips.vshf.w" => "__builtin_msa_vshf_w",
+ "llvm.mips.wrdsp" => "__builtin_mips_wrdsp",
+ "llvm.mips.xor.v" => "__builtin_msa_xor_v",
+ "llvm.mips.xori.b" => "__builtin_msa_xori_b",
+ // nvvm
+ "llvm.nvvm.abs.i" => "__nvvm_abs_i",
+ "llvm.nvvm.abs.ll" => "__nvvm_abs_ll",
+ "llvm.nvvm.add.rm.d" => "__nvvm_add_rm_d",
+ "llvm.nvvm.add.rm.f" => "__nvvm_add_rm_f",
+ "llvm.nvvm.add.rm.ftz.f" => "__nvvm_add_rm_ftz_f",
+ "llvm.nvvm.add.rn.d" => "__nvvm_add_rn_d",
+ "llvm.nvvm.add.rn.f" => "__nvvm_add_rn_f",
+ "llvm.nvvm.add.rn.ftz.f" => "__nvvm_add_rn_ftz_f",
+ "llvm.nvvm.add.rp.d" => "__nvvm_add_rp_d",
+ "llvm.nvvm.add.rp.f" => "__nvvm_add_rp_f",
+ "llvm.nvvm.add.rp.ftz.f" => "__nvvm_add_rp_ftz_f",
+ "llvm.nvvm.add.rz.d" => "__nvvm_add_rz_d",
+ "llvm.nvvm.add.rz.f" => "__nvvm_add_rz_f",
+ "llvm.nvvm.add.rz.ftz.f" => "__nvvm_add_rz_ftz_f",
+ "llvm.nvvm.bar.sync" => "__nvvm_bar_sync",
+ "llvm.nvvm.barrier0" => "__nvvm_bar0",
+ // [DUPLICATE]: "llvm.nvvm.barrier0" => "__syncthreads",
+ "llvm.nvvm.barrier0.and" => "__nvvm_bar0_and",
+ "llvm.nvvm.barrier0.or" => "__nvvm_bar0_or",
+ "llvm.nvvm.barrier0.popc" => "__nvvm_bar0_popc",
+ "llvm.nvvm.bitcast.d2ll" => "__nvvm_bitcast_d2ll",
+ "llvm.nvvm.bitcast.f2i" => "__nvvm_bitcast_f2i",
+ "llvm.nvvm.bitcast.i2f" => "__nvvm_bitcast_i2f",
+ "llvm.nvvm.bitcast.ll2d" => "__nvvm_bitcast_ll2d",
+ "llvm.nvvm.brev32" => "__nvvm_brev32",
+ "llvm.nvvm.brev64" => "__nvvm_brev64",
+ "llvm.nvvm.ceil.d" => "__nvvm_ceil_d",
+ "llvm.nvvm.ceil.f" => "__nvvm_ceil_f",
+ "llvm.nvvm.ceil.ftz.f" => "__nvvm_ceil_ftz_f",
+ "llvm.nvvm.clz.i" => "__nvvm_clz_i",
+ "llvm.nvvm.clz.ll" => "__nvvm_clz_ll",
+ "llvm.nvvm.cos.approx.f" => "__nvvm_cos_approx_f",
+ "llvm.nvvm.cos.approx.ftz.f" => "__nvvm_cos_approx_ftz_f",
+ "llvm.nvvm.d2f.rm" => "__nvvm_d2f_rm",
+ "llvm.nvvm.d2f.rm.ftz" => "__nvvm_d2f_rm_ftz",
+ "llvm.nvvm.d2f.rn" => "__nvvm_d2f_rn",
+ "llvm.nvvm.d2f.rn.ftz" => "__nvvm_d2f_rn_ftz",
+ "llvm.nvvm.d2f.rp" => "__nvvm_d2f_rp",
+ "llvm.nvvm.d2f.rp.ftz" => "__nvvm_d2f_rp_ftz",
+ "llvm.nvvm.d2f.rz" => "__nvvm_d2f_rz",
+ "llvm.nvvm.d2f.rz.ftz" => "__nvvm_d2f_rz_ftz",
+ "llvm.nvvm.d2i.hi" => "__nvvm_d2i_hi",
+ "llvm.nvvm.d2i.lo" => "__nvvm_d2i_lo",
+ "llvm.nvvm.d2i.rm" => "__nvvm_d2i_rm",
+ "llvm.nvvm.d2i.rn" => "__nvvm_d2i_rn",
+ "llvm.nvvm.d2i.rp" => "__nvvm_d2i_rp",
+ "llvm.nvvm.d2i.rz" => "__nvvm_d2i_rz",
+ "llvm.nvvm.d2ll.rm" => "__nvvm_d2ll_rm",
+ "llvm.nvvm.d2ll.rn" => "__nvvm_d2ll_rn",
+ "llvm.nvvm.d2ll.rp" => "__nvvm_d2ll_rp",
+ "llvm.nvvm.d2ll.rz" => "__nvvm_d2ll_rz",
+ "llvm.nvvm.d2ui.rm" => "__nvvm_d2ui_rm",
+ "llvm.nvvm.d2ui.rn" => "__nvvm_d2ui_rn",
+ "llvm.nvvm.d2ui.rp" => "__nvvm_d2ui_rp",
+ "llvm.nvvm.d2ui.rz" => "__nvvm_d2ui_rz",
+ "llvm.nvvm.d2ull.rm" => "__nvvm_d2ull_rm",
+ "llvm.nvvm.d2ull.rn" => "__nvvm_d2ull_rn",
+ "llvm.nvvm.d2ull.rp" => "__nvvm_d2ull_rp",
+ "llvm.nvvm.d2ull.rz" => "__nvvm_d2ull_rz",
+ "llvm.nvvm.div.approx.f" => "__nvvm_div_approx_f",
+ "llvm.nvvm.div.approx.ftz.f" => "__nvvm_div_approx_ftz_f",
+ "llvm.nvvm.div.rm.d" => "__nvvm_div_rm_d",
+ "llvm.nvvm.div.rm.f" => "__nvvm_div_rm_f",
+ "llvm.nvvm.div.rm.ftz.f" => "__nvvm_div_rm_ftz_f",
+ "llvm.nvvm.div.rn.d" => "__nvvm_div_rn_d",
+ "llvm.nvvm.div.rn.f" => "__nvvm_div_rn_f",
+ "llvm.nvvm.div.rn.ftz.f" => "__nvvm_div_rn_ftz_f",
+ "llvm.nvvm.div.rp.d" => "__nvvm_div_rp_d",
+ "llvm.nvvm.div.rp.f" => "__nvvm_div_rp_f",
+ "llvm.nvvm.div.rp.ftz.f" => "__nvvm_div_rp_ftz_f",
+ "llvm.nvvm.div.rz.d" => "__nvvm_div_rz_d",
+ "llvm.nvvm.div.rz.f" => "__nvvm_div_rz_f",
+ "llvm.nvvm.div.rz.ftz.f" => "__nvvm_div_rz_ftz_f",
+ "llvm.nvvm.ex2.approx.d" => "__nvvm_ex2_approx_d",
+ "llvm.nvvm.ex2.approx.f" => "__nvvm_ex2_approx_f",
+ "llvm.nvvm.ex2.approx.ftz.f" => "__nvvm_ex2_approx_ftz_f",
+ "llvm.nvvm.f2h.rn" => "__nvvm_f2h_rn",
+ "llvm.nvvm.f2h.rn.ftz" => "__nvvm_f2h_rn_ftz",
+ "llvm.nvvm.f2i.rm" => "__nvvm_f2i_rm",
+ "llvm.nvvm.f2i.rm.ftz" => "__nvvm_f2i_rm_ftz",
+ "llvm.nvvm.f2i.rn" => "__nvvm_f2i_rn",
+ "llvm.nvvm.f2i.rn.ftz" => "__nvvm_f2i_rn_ftz",
+ "llvm.nvvm.f2i.rp" => "__nvvm_f2i_rp",
+ "llvm.nvvm.f2i.rp.ftz" => "__nvvm_f2i_rp_ftz",
+ "llvm.nvvm.f2i.rz" => "__nvvm_f2i_rz",
+ "llvm.nvvm.f2i.rz.ftz" => "__nvvm_f2i_rz_ftz",
+ "llvm.nvvm.f2ll.rm" => "__nvvm_f2ll_rm",
+ "llvm.nvvm.f2ll.rm.ftz" => "__nvvm_f2ll_rm_ftz",
+ "llvm.nvvm.f2ll.rn" => "__nvvm_f2ll_rn",
+ "llvm.nvvm.f2ll.rn.ftz" => "__nvvm_f2ll_rn_ftz",
+ "llvm.nvvm.f2ll.rp" => "__nvvm_f2ll_rp",
+ "llvm.nvvm.f2ll.rp.ftz" => "__nvvm_f2ll_rp_ftz",
+ "llvm.nvvm.f2ll.rz" => "__nvvm_f2ll_rz",
+ "llvm.nvvm.f2ll.rz.ftz" => "__nvvm_f2ll_rz_ftz",
+ "llvm.nvvm.f2ui.rm" => "__nvvm_f2ui_rm",
+ "llvm.nvvm.f2ui.rm.ftz" => "__nvvm_f2ui_rm_ftz",
+ "llvm.nvvm.f2ui.rn" => "__nvvm_f2ui_rn",
+ "llvm.nvvm.f2ui.rn.ftz" => "__nvvm_f2ui_rn_ftz",
+ "llvm.nvvm.f2ui.rp" => "__nvvm_f2ui_rp",
+ "llvm.nvvm.f2ui.rp.ftz" => "__nvvm_f2ui_rp_ftz",
+ "llvm.nvvm.f2ui.rz" => "__nvvm_f2ui_rz",
+ "llvm.nvvm.f2ui.rz.ftz" => "__nvvm_f2ui_rz_ftz",
+ "llvm.nvvm.f2ull.rm" => "__nvvm_f2ull_rm",
+ "llvm.nvvm.f2ull.rm.ftz" => "__nvvm_f2ull_rm_ftz",
+ "llvm.nvvm.f2ull.rn" => "__nvvm_f2ull_rn",
+ "llvm.nvvm.f2ull.rn.ftz" => "__nvvm_f2ull_rn_ftz",
+ "llvm.nvvm.f2ull.rp" => "__nvvm_f2ull_rp",
+ "llvm.nvvm.f2ull.rp.ftz" => "__nvvm_f2ull_rp_ftz",
+ "llvm.nvvm.f2ull.rz" => "__nvvm_f2ull_rz",
+ "llvm.nvvm.f2ull.rz.ftz" => "__nvvm_f2ull_rz_ftz",
+ "llvm.nvvm.fabs.d" => "__nvvm_fabs_d",
+ "llvm.nvvm.fabs.f" => "__nvvm_fabs_f",
+ "llvm.nvvm.fabs.ftz.f" => "__nvvm_fabs_ftz_f",
+ "llvm.nvvm.floor.d" => "__nvvm_floor_d",
+ "llvm.nvvm.floor.f" => "__nvvm_floor_f",
+ "llvm.nvvm.floor.ftz.f" => "__nvvm_floor_ftz_f",
+ "llvm.nvvm.fma.rm.d" => "__nvvm_fma_rm_d",
+ "llvm.nvvm.fma.rm.f" => "__nvvm_fma_rm_f",
+ "llvm.nvvm.fma.rm.ftz.f" => "__nvvm_fma_rm_ftz_f",
+ "llvm.nvvm.fma.rn.d" => "__nvvm_fma_rn_d",
+ "llvm.nvvm.fma.rn.f" => "__nvvm_fma_rn_f",
+ "llvm.nvvm.fma.rn.ftz.f" => "__nvvm_fma_rn_ftz_f",
+ "llvm.nvvm.fma.rp.d" => "__nvvm_fma_rp_d",
+ "llvm.nvvm.fma.rp.f" => "__nvvm_fma_rp_f",
+ "llvm.nvvm.fma.rp.ftz.f" => "__nvvm_fma_rp_ftz_f",
+ "llvm.nvvm.fma.rz.d" => "__nvvm_fma_rz_d",
+ "llvm.nvvm.fma.rz.f" => "__nvvm_fma_rz_f",
+ "llvm.nvvm.fma.rz.ftz.f" => "__nvvm_fma_rz_ftz_f",
+ "llvm.nvvm.fmax.d" => "__nvvm_fmax_d",
+ "llvm.nvvm.fmax.f" => "__nvvm_fmax_f",
+ "llvm.nvvm.fmax.ftz.f" => "__nvvm_fmax_ftz_f",
+ "llvm.nvvm.fmin.d" => "__nvvm_fmin_d",
+ "llvm.nvvm.fmin.f" => "__nvvm_fmin_f",
+ "llvm.nvvm.fmin.ftz.f" => "__nvvm_fmin_ftz_f",
+ "llvm.nvvm.h2f" => "__nvvm_h2f",
+ "llvm.nvvm.i2d.rm" => "__nvvm_i2d_rm",
+ "llvm.nvvm.i2d.rn" => "__nvvm_i2d_rn",
+ "llvm.nvvm.i2d.rp" => "__nvvm_i2d_rp",
+ "llvm.nvvm.i2d.rz" => "__nvvm_i2d_rz",
+ "llvm.nvvm.i2f.rm" => "__nvvm_i2f_rm",
+ "llvm.nvvm.i2f.rn" => "__nvvm_i2f_rn",
+ "llvm.nvvm.i2f.rp" => "__nvvm_i2f_rp",
+ "llvm.nvvm.i2f.rz" => "__nvvm_i2f_rz",
+ "llvm.nvvm.isspacep.const" => "__nvvm_isspacep_const",
+ "llvm.nvvm.isspacep.global" => "__nvvm_isspacep_global",
+ "llvm.nvvm.isspacep.local" => "__nvvm_isspacep_local",
+ "llvm.nvvm.isspacep.shared" => "__nvvm_isspacep_shared",
+ "llvm.nvvm.istypep.sampler" => "__nvvm_istypep_sampler",
+ "llvm.nvvm.istypep.surface" => "__nvvm_istypep_surface",
+ "llvm.nvvm.istypep.texture" => "__nvvm_istypep_texture",
+ "llvm.nvvm.lg2.approx.d" => "__nvvm_lg2_approx_d",
+ "llvm.nvvm.lg2.approx.f" => "__nvvm_lg2_approx_f",
+ "llvm.nvvm.lg2.approx.ftz.f" => "__nvvm_lg2_approx_ftz_f",
+ "llvm.nvvm.ll2d.rm" => "__nvvm_ll2d_rm",
+ "llvm.nvvm.ll2d.rn" => "__nvvm_ll2d_rn",
+ "llvm.nvvm.ll2d.rp" => "__nvvm_ll2d_rp",
+ "llvm.nvvm.ll2d.rz" => "__nvvm_ll2d_rz",
+ "llvm.nvvm.ll2f.rm" => "__nvvm_ll2f_rm",
+ "llvm.nvvm.ll2f.rn" => "__nvvm_ll2f_rn",
+ "llvm.nvvm.ll2f.rp" => "__nvvm_ll2f_rp",
+ "llvm.nvvm.ll2f.rz" => "__nvvm_ll2f_rz",
+ "llvm.nvvm.lohi.i2d" => "__nvvm_lohi_i2d",
+ "llvm.nvvm.max.i" => "__nvvm_max_i",
+ "llvm.nvvm.max.ll" => "__nvvm_max_ll",
+ "llvm.nvvm.max.ui" => "__nvvm_max_ui",
+ "llvm.nvvm.max.ull" => "__nvvm_max_ull",
+ "llvm.nvvm.membar.cta" => "__nvvm_membar_cta",
+ "llvm.nvvm.membar.gl" => "__nvvm_membar_gl",
+ "llvm.nvvm.membar.sys" => "__nvvm_membar_sys",
+ "llvm.nvvm.min.i" => "__nvvm_min_i",
+ "llvm.nvvm.min.ll" => "__nvvm_min_ll",
+ "llvm.nvvm.min.ui" => "__nvvm_min_ui",
+ "llvm.nvvm.min.ull" => "__nvvm_min_ull",
+ "llvm.nvvm.mul.rm.d" => "__nvvm_mul_rm_d",
+ "llvm.nvvm.mul.rm.f" => "__nvvm_mul_rm_f",
+ "llvm.nvvm.mul.rm.ftz.f" => "__nvvm_mul_rm_ftz_f",
+ "llvm.nvvm.mul.rn.d" => "__nvvm_mul_rn_d",
+ "llvm.nvvm.mul.rn.f" => "__nvvm_mul_rn_f",
+ "llvm.nvvm.mul.rn.ftz.f" => "__nvvm_mul_rn_ftz_f",
+ "llvm.nvvm.mul.rp.d" => "__nvvm_mul_rp_d",
+ "llvm.nvvm.mul.rp.f" => "__nvvm_mul_rp_f",
+ "llvm.nvvm.mul.rp.ftz.f" => "__nvvm_mul_rp_ftz_f",
+ "llvm.nvvm.mul.rz.d" => "__nvvm_mul_rz_d",
+ "llvm.nvvm.mul.rz.f" => "__nvvm_mul_rz_f",
+ "llvm.nvvm.mul.rz.ftz.f" => "__nvvm_mul_rz_ftz_f",
+ "llvm.nvvm.mul24.i" => "__nvvm_mul24_i",
+ "llvm.nvvm.mul24.ui" => "__nvvm_mul24_ui",
+ "llvm.nvvm.mulhi.i" => "__nvvm_mulhi_i",
+ "llvm.nvvm.mulhi.ll" => "__nvvm_mulhi_ll",
+ "llvm.nvvm.mulhi.ui" => "__nvvm_mulhi_ui",
+ "llvm.nvvm.mulhi.ull" => "__nvvm_mulhi_ull",
+ "llvm.nvvm.popc.i" => "__nvvm_popc_i",
+ "llvm.nvvm.popc.ll" => "__nvvm_popc_ll",
+ "llvm.nvvm.prmt" => "__nvvm_prmt",
+ "llvm.nvvm.rcp.approx.ftz.d" => "__nvvm_rcp_approx_ftz_d",
+ "llvm.nvvm.rcp.rm.d" => "__nvvm_rcp_rm_d",
+ "llvm.nvvm.rcp.rm.f" => "__nvvm_rcp_rm_f",
+ "llvm.nvvm.rcp.rm.ftz.f" => "__nvvm_rcp_rm_ftz_f",
+ "llvm.nvvm.rcp.rn.d" => "__nvvm_rcp_rn_d",
+ "llvm.nvvm.rcp.rn.f" => "__nvvm_rcp_rn_f",
+ "llvm.nvvm.rcp.rn.ftz.f" => "__nvvm_rcp_rn_ftz_f",
+ "llvm.nvvm.rcp.rp.d" => "__nvvm_rcp_rp_d",
+ "llvm.nvvm.rcp.rp.f" => "__nvvm_rcp_rp_f",
+ "llvm.nvvm.rcp.rp.ftz.f" => "__nvvm_rcp_rp_ftz_f",
+ "llvm.nvvm.rcp.rz.d" => "__nvvm_rcp_rz_d",
+ "llvm.nvvm.rcp.rz.f" => "__nvvm_rcp_rz_f",
+ "llvm.nvvm.rcp.rz.ftz.f" => "__nvvm_rcp_rz_ftz_f",
+ "llvm.nvvm.read.ptx.sreg.clock" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.clock64" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.ctaid.x" => "__nvvm_read_ptx_sreg_ctaid_x",
+ "llvm.nvvm.read.ptx.sreg.ctaid.y" => "__nvvm_read_ptx_sreg_ctaid_y",
+ "llvm.nvvm.read.ptx.sreg.ctaid.z" => "__nvvm_read_ptx_sreg_ctaid_z",
+ "llvm.nvvm.read.ptx.sreg.envreg0" => "__nvvm_read_ptx_sreg_envreg0",
+ "llvm.nvvm.read.ptx.sreg.envreg1" => "__nvvm_read_ptx_sreg_envreg1",
+ "llvm.nvvm.read.ptx.sreg.envreg10" => "__nvvm_read_ptx_sreg_envreg10",
+ "llvm.nvvm.read.ptx.sreg.envreg11" => "__nvvm_read_ptx_sreg_envreg11",
+ "llvm.nvvm.read.ptx.sreg.envreg12" => "__nvvm_read_ptx_sreg_envreg12",
+ "llvm.nvvm.read.ptx.sreg.envreg13" => "__nvvm_read_ptx_sreg_envreg13",
+ "llvm.nvvm.read.ptx.sreg.envreg14" => "__nvvm_read_ptx_sreg_envreg14",
+ "llvm.nvvm.read.ptx.sreg.envreg15" => "__nvvm_read_ptx_sreg_envreg15",
+ "llvm.nvvm.read.ptx.sreg.envreg16" => "__nvvm_read_ptx_sreg_envreg16",
+ "llvm.nvvm.read.ptx.sreg.envreg17" => "__nvvm_read_ptx_sreg_envreg17",
+ "llvm.nvvm.read.ptx.sreg.envreg18" => "__nvvm_read_ptx_sreg_envreg18",
+ "llvm.nvvm.read.ptx.sreg.envreg19" => "__nvvm_read_ptx_sreg_envreg19",
+ "llvm.nvvm.read.ptx.sreg.envreg2" => "__nvvm_read_ptx_sreg_envreg2",
+ "llvm.nvvm.read.ptx.sreg.envreg20" => "__nvvm_read_ptx_sreg_envreg20",
+ "llvm.nvvm.read.ptx.sreg.envreg21" => "__nvvm_read_ptx_sreg_envreg21",
+ "llvm.nvvm.read.ptx.sreg.envreg22" => "__nvvm_read_ptx_sreg_envreg22",
+ "llvm.nvvm.read.ptx.sreg.envreg23" => "__nvvm_read_ptx_sreg_envreg23",
+ "llvm.nvvm.read.ptx.sreg.envreg24" => "__nvvm_read_ptx_sreg_envreg24",
+ "llvm.nvvm.read.ptx.sreg.envreg25" => "__nvvm_read_ptx_sreg_envreg25",
+ "llvm.nvvm.read.ptx.sreg.envreg26" => "__nvvm_read_ptx_sreg_envreg26",
+ "llvm.nvvm.read.ptx.sreg.envreg27" => "__nvvm_read_ptx_sreg_envreg27",
+ "llvm.nvvm.read.ptx.sreg.envreg28" => "__nvvm_read_ptx_sreg_envreg28",
+ "llvm.nvvm.read.ptx.sreg.envreg29" => "__nvvm_read_ptx_sreg_envreg29",
+ "llvm.nvvm.read.ptx.sreg.envreg3" => "__nvvm_read_ptx_sreg_envreg3",
+ "llvm.nvvm.read.ptx.sreg.envreg30" => "__nvvm_read_ptx_sreg_envreg30",
+ "llvm.nvvm.read.ptx.sreg.envreg31" => "__nvvm_read_ptx_sreg_envreg31",
+ "llvm.nvvm.read.ptx.sreg.envreg4" => "__nvvm_read_ptx_sreg_envreg4",
+ "llvm.nvvm.read.ptx.sreg.envreg5" => "__nvvm_read_ptx_sreg_envreg5",
+ "llvm.nvvm.read.ptx.sreg.envreg6" => "__nvvm_read_ptx_sreg_envreg6",
+ "llvm.nvvm.read.ptx.sreg.envreg7" => "__nvvm_read_ptx_sreg_envreg7",
+ "llvm.nvvm.read.ptx.sreg.envreg8" => "__nvvm_read_ptx_sreg_envreg8",
+ "llvm.nvvm.read.ptx.sreg.envreg9" => "__nvvm_read_ptx_sreg_envreg9",
+ "llvm.nvvm.read.ptx.sreg.gridid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.laneid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.eq" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.ge" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.gt" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.le" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.lanemask.lt" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.nctaid.x" => "__nvvm_read_ptx_sreg_nctaid_x",
+ "llvm.nvvm.read.ptx.sreg.nctaid.y" => "__nvvm_read_ptx_sreg_nctaid_y",
+ "llvm.nvvm.read.ptx.sreg.nctaid.z" => "__nvvm_read_ptx_sreg_nctaid_z",
+ "llvm.nvvm.read.ptx.sreg.nsmid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.ntid.x" => "__nvvm_read_ptx_sreg_ntid_x",
+ "llvm.nvvm.read.ptx.sreg.ntid.y" => "__nvvm_read_ptx_sreg_ntid_y",
+ "llvm.nvvm.read.ptx.sreg.ntid.z" => "__nvvm_read_ptx_sreg_ntid_z",
+ "llvm.nvvm.read.ptx.sreg.nwarpid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.pm0" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.pm1" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.pm2" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.pm3" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.smid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.tid.x" => "__nvvm_read_ptx_sreg_tid_x",
+ "llvm.nvvm.read.ptx.sreg.tid.y" => "__nvvm_read_ptx_sreg_tid_y",
+ "llvm.nvvm.read.ptx.sreg.tid.z" => "__nvvm_read_ptx_sreg_tid_z",
+ "llvm.nvvm.read.ptx.sreg.warpid" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_warpsize",
+ // [DUPLICATE]: "llvm.nvvm.read.ptx.sreg.warpsize" => "__nvvm_read_ptx_sreg_",
+ "llvm.nvvm.rotate.b32" => "__nvvm_rotate_b32",
+ "llvm.nvvm.rotate.b64" => "__nvvm_rotate_b64",
+ "llvm.nvvm.rotate.right.b64" => "__nvvm_rotate_right_b64",
+ "llvm.nvvm.round.d" => "__nvvm_round_d",
+ "llvm.nvvm.round.f" => "__nvvm_round_f",
+ "llvm.nvvm.round.ftz.f" => "__nvvm_round_ftz_f",
+ "llvm.nvvm.rsqrt.approx.d" => "__nvvm_rsqrt_approx_d",
+ "llvm.nvvm.rsqrt.approx.f" => "__nvvm_rsqrt_approx_f",
+ "llvm.nvvm.rsqrt.approx.ftz.f" => "__nvvm_rsqrt_approx_ftz_f",
+ "llvm.nvvm.sad.i" => "__nvvm_sad_i",
+ "llvm.nvvm.sad.ui" => "__nvvm_sad_ui",
+ "llvm.nvvm.saturate.d" => "__nvvm_saturate_d",
+ "llvm.nvvm.saturate.f" => "__nvvm_saturate_f",
+ "llvm.nvvm.saturate.ftz.f" => "__nvvm_saturate_ftz_f",
+ "llvm.nvvm.shfl.bfly.f32" => "__nvvm_shfl_bfly_f32",
+ "llvm.nvvm.shfl.bfly.i32" => "__nvvm_shfl_bfly_i32",
+ "llvm.nvvm.shfl.down.f32" => "__nvvm_shfl_down_f32",
+ "llvm.nvvm.shfl.down.i32" => "__nvvm_shfl_down_i32",
+ "llvm.nvvm.shfl.idx.f32" => "__nvvm_shfl_idx_f32",
+ "llvm.nvvm.shfl.idx.i32" => "__nvvm_shfl_idx_i32",
+ "llvm.nvvm.shfl.up.f32" => "__nvvm_shfl_up_f32",
+ "llvm.nvvm.shfl.up.i32" => "__nvvm_shfl_up_i32",
+ "llvm.nvvm.sin.approx.f" => "__nvvm_sin_approx_f",
+ "llvm.nvvm.sin.approx.ftz.f" => "__nvvm_sin_approx_ftz_f",
+ "llvm.nvvm.sqrt.approx.f" => "__nvvm_sqrt_approx_f",
+ "llvm.nvvm.sqrt.approx.ftz.f" => "__nvvm_sqrt_approx_ftz_f",
+ "llvm.nvvm.sqrt.f" => "__nvvm_sqrt_f",
+ "llvm.nvvm.sqrt.rm.d" => "__nvvm_sqrt_rm_d",
+ "llvm.nvvm.sqrt.rm.f" => "__nvvm_sqrt_rm_f",
+ "llvm.nvvm.sqrt.rm.ftz.f" => "__nvvm_sqrt_rm_ftz_f",
+ "llvm.nvvm.sqrt.rn.d" => "__nvvm_sqrt_rn_d",
+ "llvm.nvvm.sqrt.rn.f" => "__nvvm_sqrt_rn_f",
+ "llvm.nvvm.sqrt.rn.ftz.f" => "__nvvm_sqrt_rn_ftz_f",
+ "llvm.nvvm.sqrt.rp.d" => "__nvvm_sqrt_rp_d",
+ "llvm.nvvm.sqrt.rp.f" => "__nvvm_sqrt_rp_f",
+ "llvm.nvvm.sqrt.rp.ftz.f" => "__nvvm_sqrt_rp_ftz_f",
+ "llvm.nvvm.sqrt.rz.d" => "__nvvm_sqrt_rz_d",
+ "llvm.nvvm.sqrt.rz.f" => "__nvvm_sqrt_rz_f",
+ "llvm.nvvm.sqrt.rz.ftz.f" => "__nvvm_sqrt_rz_ftz_f",
+ "llvm.nvvm.suq.array.size" => "__nvvm_suq_array_size",
+ "llvm.nvvm.suq.channel.data.type" => "__nvvm_suq_channel_data_type",
+ "llvm.nvvm.suq.channel.order" => "__nvvm_suq_channel_order",
+ "llvm.nvvm.suq.depth" => "__nvvm_suq_depth",
+ "llvm.nvvm.suq.height" => "__nvvm_suq_height",
+ "llvm.nvvm.suq.width" => "__nvvm_suq_width",
+ "llvm.nvvm.sust.b.1d.array.i16.clamp" => "__nvvm_sust_b_1d_array_i16_clamp",
+ "llvm.nvvm.sust.b.1d.array.i16.trap" => "__nvvm_sust_b_1d_array_i16_trap",
+ "llvm.nvvm.sust.b.1d.array.i16.zero" => "__nvvm_sust_b_1d_array_i16_zero",
+ "llvm.nvvm.sust.b.1d.array.i32.clamp" => "__nvvm_sust_b_1d_array_i32_clamp",
+ "llvm.nvvm.sust.b.1d.array.i32.trap" => "__nvvm_sust_b_1d_array_i32_trap",
+ "llvm.nvvm.sust.b.1d.array.i32.zero" => "__nvvm_sust_b_1d_array_i32_zero",
+ "llvm.nvvm.sust.b.1d.array.i64.clamp" => "__nvvm_sust_b_1d_array_i64_clamp",
+ "llvm.nvvm.sust.b.1d.array.i64.trap" => "__nvvm_sust_b_1d_array_i64_trap",
+ "llvm.nvvm.sust.b.1d.array.i64.zero" => "__nvvm_sust_b_1d_array_i64_zero",
+ "llvm.nvvm.sust.b.1d.array.i8.clamp" => "__nvvm_sust_b_1d_array_i8_clamp",
+ "llvm.nvvm.sust.b.1d.array.i8.trap" => "__nvvm_sust_b_1d_array_i8_trap",
+ "llvm.nvvm.sust.b.1d.array.i8.zero" => "__nvvm_sust_b_1d_array_i8_zero",
+ "llvm.nvvm.sust.b.1d.array.v2i16.clamp" => "__nvvm_sust_b_1d_array_v2i16_clamp",
+ "llvm.nvvm.sust.b.1d.array.v2i16.trap" => "__nvvm_sust_b_1d_array_v2i16_trap",
+ "llvm.nvvm.sust.b.1d.array.v2i16.zero" => "__nvvm_sust_b_1d_array_v2i16_zero",
+ "llvm.nvvm.sust.b.1d.array.v2i32.clamp" => "__nvvm_sust_b_1d_array_v2i32_clamp",
+ "llvm.nvvm.sust.b.1d.array.v2i32.trap" => "__nvvm_sust_b_1d_array_v2i32_trap",
+ "llvm.nvvm.sust.b.1d.array.v2i32.zero" => "__nvvm_sust_b_1d_array_v2i32_zero",
+ "llvm.nvvm.sust.b.1d.array.v2i64.clamp" => "__nvvm_sust_b_1d_array_v2i64_clamp",
+ "llvm.nvvm.sust.b.1d.array.v2i64.trap" => "__nvvm_sust_b_1d_array_v2i64_trap",
+ "llvm.nvvm.sust.b.1d.array.v2i64.zero" => "__nvvm_sust_b_1d_array_v2i64_zero",
+ "llvm.nvvm.sust.b.1d.array.v2i8.clamp" => "__nvvm_sust_b_1d_array_v2i8_clamp",
+ "llvm.nvvm.sust.b.1d.array.v2i8.trap" => "__nvvm_sust_b_1d_array_v2i8_trap",
+ "llvm.nvvm.sust.b.1d.array.v2i8.zero" => "__nvvm_sust_b_1d_array_v2i8_zero",
+ "llvm.nvvm.sust.b.1d.array.v4i16.clamp" => "__nvvm_sust_b_1d_array_v4i16_clamp",
+ "llvm.nvvm.sust.b.1d.array.v4i16.trap" => "__nvvm_sust_b_1d_array_v4i16_trap",
+ "llvm.nvvm.sust.b.1d.array.v4i16.zero" => "__nvvm_sust_b_1d_array_v4i16_zero",
+ "llvm.nvvm.sust.b.1d.array.v4i32.clamp" => "__nvvm_sust_b_1d_array_v4i32_clamp",
+ "llvm.nvvm.sust.b.1d.array.v4i32.trap" => "__nvvm_sust_b_1d_array_v4i32_trap",
+ "llvm.nvvm.sust.b.1d.array.v4i32.zero" => "__nvvm_sust_b_1d_array_v4i32_zero",
+ "llvm.nvvm.sust.b.1d.array.v4i8.clamp" => "__nvvm_sust_b_1d_array_v4i8_clamp",
+ "llvm.nvvm.sust.b.1d.array.v4i8.trap" => "__nvvm_sust_b_1d_array_v4i8_trap",
+ "llvm.nvvm.sust.b.1d.array.v4i8.zero" => "__nvvm_sust_b_1d_array_v4i8_zero",
+ "llvm.nvvm.sust.b.1d.i16.clamp" => "__nvvm_sust_b_1d_i16_clamp",
+ "llvm.nvvm.sust.b.1d.i16.trap" => "__nvvm_sust_b_1d_i16_trap",
+ "llvm.nvvm.sust.b.1d.i16.zero" => "__nvvm_sust_b_1d_i16_zero",
+ "llvm.nvvm.sust.b.1d.i32.clamp" => "__nvvm_sust_b_1d_i32_clamp",
+ "llvm.nvvm.sust.b.1d.i32.trap" => "__nvvm_sust_b_1d_i32_trap",
+ "llvm.nvvm.sust.b.1d.i32.zero" => "__nvvm_sust_b_1d_i32_zero",
+ "llvm.nvvm.sust.b.1d.i64.clamp" => "__nvvm_sust_b_1d_i64_clamp",
+ "llvm.nvvm.sust.b.1d.i64.trap" => "__nvvm_sust_b_1d_i64_trap",
+ "llvm.nvvm.sust.b.1d.i64.zero" => "__nvvm_sust_b_1d_i64_zero",
+ "llvm.nvvm.sust.b.1d.i8.clamp" => "__nvvm_sust_b_1d_i8_clamp",
+ "llvm.nvvm.sust.b.1d.i8.trap" => "__nvvm_sust_b_1d_i8_trap",
+ "llvm.nvvm.sust.b.1d.i8.zero" => "__nvvm_sust_b_1d_i8_zero",
+ "llvm.nvvm.sust.b.1d.v2i16.clamp" => "__nvvm_sust_b_1d_v2i16_clamp",
+ "llvm.nvvm.sust.b.1d.v2i16.trap" => "__nvvm_sust_b_1d_v2i16_trap",
+ "llvm.nvvm.sust.b.1d.v2i16.zero" => "__nvvm_sust_b_1d_v2i16_zero",
+ "llvm.nvvm.sust.b.1d.v2i32.clamp" => "__nvvm_sust_b_1d_v2i32_clamp",
+ "llvm.nvvm.sust.b.1d.v2i32.trap" => "__nvvm_sust_b_1d_v2i32_trap",
+ "llvm.nvvm.sust.b.1d.v2i32.zero" => "__nvvm_sust_b_1d_v2i32_zero",
+ "llvm.nvvm.sust.b.1d.v2i64.clamp" => "__nvvm_sust_b_1d_v2i64_clamp",
+ "llvm.nvvm.sust.b.1d.v2i64.trap" => "__nvvm_sust_b_1d_v2i64_trap",
+ "llvm.nvvm.sust.b.1d.v2i64.zero" => "__nvvm_sust_b_1d_v2i64_zero",
+ "llvm.nvvm.sust.b.1d.v2i8.clamp" => "__nvvm_sust_b_1d_v2i8_clamp",
+ "llvm.nvvm.sust.b.1d.v2i8.trap" => "__nvvm_sust_b_1d_v2i8_trap",
+ "llvm.nvvm.sust.b.1d.v2i8.zero" => "__nvvm_sust_b_1d_v2i8_zero",
+ "llvm.nvvm.sust.b.1d.v4i16.clamp" => "__nvvm_sust_b_1d_v4i16_clamp",
+ "llvm.nvvm.sust.b.1d.v4i16.trap" => "__nvvm_sust_b_1d_v4i16_trap",
+ "llvm.nvvm.sust.b.1d.v4i16.zero" => "__nvvm_sust_b_1d_v4i16_zero",
+ "llvm.nvvm.sust.b.1d.v4i32.clamp" => "__nvvm_sust_b_1d_v4i32_clamp",
+ "llvm.nvvm.sust.b.1d.v4i32.trap" => "__nvvm_sust_b_1d_v4i32_trap",
+ "llvm.nvvm.sust.b.1d.v4i32.zero" => "__nvvm_sust_b_1d_v4i32_zero",
+ "llvm.nvvm.sust.b.1d.v4i8.clamp" => "__nvvm_sust_b_1d_v4i8_clamp",
+ "llvm.nvvm.sust.b.1d.v4i8.trap" => "__nvvm_sust_b_1d_v4i8_trap",
+ "llvm.nvvm.sust.b.1d.v4i8.zero" => "__nvvm_sust_b_1d_v4i8_zero",
+ "llvm.nvvm.sust.b.2d.array.i16.clamp" => "__nvvm_sust_b_2d_array_i16_clamp",
+ "llvm.nvvm.sust.b.2d.array.i16.trap" => "__nvvm_sust_b_2d_array_i16_trap",
+ "llvm.nvvm.sust.b.2d.array.i16.zero" => "__nvvm_sust_b_2d_array_i16_zero",
+ "llvm.nvvm.sust.b.2d.array.i32.clamp" => "__nvvm_sust_b_2d_array_i32_clamp",
+ "llvm.nvvm.sust.b.2d.array.i32.trap" => "__nvvm_sust_b_2d_array_i32_trap",
+ "llvm.nvvm.sust.b.2d.array.i32.zero" => "__nvvm_sust_b_2d_array_i32_zero",
+ "llvm.nvvm.sust.b.2d.array.i64.clamp" => "__nvvm_sust_b_2d_array_i64_clamp",
+ "llvm.nvvm.sust.b.2d.array.i64.trap" => "__nvvm_sust_b_2d_array_i64_trap",
+ "llvm.nvvm.sust.b.2d.array.i64.zero" => "__nvvm_sust_b_2d_array_i64_zero",
+ "llvm.nvvm.sust.b.2d.array.i8.clamp" => "__nvvm_sust_b_2d_array_i8_clamp",
+ "llvm.nvvm.sust.b.2d.array.i8.trap" => "__nvvm_sust_b_2d_array_i8_trap",
+ "llvm.nvvm.sust.b.2d.array.i8.zero" => "__nvvm_sust_b_2d_array_i8_zero",
+ "llvm.nvvm.sust.b.2d.array.v2i16.clamp" => "__nvvm_sust_b_2d_array_v2i16_clamp",
+ "llvm.nvvm.sust.b.2d.array.v2i16.trap" => "__nvvm_sust_b_2d_array_v2i16_trap",
+ "llvm.nvvm.sust.b.2d.array.v2i16.zero" => "__nvvm_sust_b_2d_array_v2i16_zero",
+ "llvm.nvvm.sust.b.2d.array.v2i32.clamp" => "__nvvm_sust_b_2d_array_v2i32_clamp",
+ "llvm.nvvm.sust.b.2d.array.v2i32.trap" => "__nvvm_sust_b_2d_array_v2i32_trap",
+ "llvm.nvvm.sust.b.2d.array.v2i32.zero" => "__nvvm_sust_b_2d_array_v2i32_zero",
+ "llvm.nvvm.sust.b.2d.array.v2i64.clamp" => "__nvvm_sust_b_2d_array_v2i64_clamp",
+ "llvm.nvvm.sust.b.2d.array.v2i64.trap" => "__nvvm_sust_b_2d_array_v2i64_trap",
+ "llvm.nvvm.sust.b.2d.array.v2i64.zero" => "__nvvm_sust_b_2d_array_v2i64_zero",
+ "llvm.nvvm.sust.b.2d.array.v2i8.clamp" => "__nvvm_sust_b_2d_array_v2i8_clamp",
+ "llvm.nvvm.sust.b.2d.array.v2i8.trap" => "__nvvm_sust_b_2d_array_v2i8_trap",
+ "llvm.nvvm.sust.b.2d.array.v2i8.zero" => "__nvvm_sust_b_2d_array_v2i8_zero",
+ "llvm.nvvm.sust.b.2d.array.v4i16.clamp" => "__nvvm_sust_b_2d_array_v4i16_clamp",
+ "llvm.nvvm.sust.b.2d.array.v4i16.trap" => "__nvvm_sust_b_2d_array_v4i16_trap",
+ "llvm.nvvm.sust.b.2d.array.v4i16.zero" => "__nvvm_sust_b_2d_array_v4i16_zero",
+ "llvm.nvvm.sust.b.2d.array.v4i32.clamp" => "__nvvm_sust_b_2d_array_v4i32_clamp",
+ "llvm.nvvm.sust.b.2d.array.v4i32.trap" => "__nvvm_sust_b_2d_array_v4i32_trap",
+ "llvm.nvvm.sust.b.2d.array.v4i32.zero" => "__nvvm_sust_b_2d_array_v4i32_zero",
+ "llvm.nvvm.sust.b.2d.array.v4i8.clamp" => "__nvvm_sust_b_2d_array_v4i8_clamp",
+ "llvm.nvvm.sust.b.2d.array.v4i8.trap" => "__nvvm_sust_b_2d_array_v4i8_trap",
+ "llvm.nvvm.sust.b.2d.array.v4i8.zero" => "__nvvm_sust_b_2d_array_v4i8_zero",
+ "llvm.nvvm.sust.b.2d.i16.clamp" => "__nvvm_sust_b_2d_i16_clamp",
+ "llvm.nvvm.sust.b.2d.i16.trap" => "__nvvm_sust_b_2d_i16_trap",
+ "llvm.nvvm.sust.b.2d.i16.zero" => "__nvvm_sust_b_2d_i16_zero",
+ "llvm.nvvm.sust.b.2d.i32.clamp" => "__nvvm_sust_b_2d_i32_clamp",
+ "llvm.nvvm.sust.b.2d.i32.trap" => "__nvvm_sust_b_2d_i32_trap",
+ "llvm.nvvm.sust.b.2d.i32.zero" => "__nvvm_sust_b_2d_i32_zero",
+ "llvm.nvvm.sust.b.2d.i64.clamp" => "__nvvm_sust_b_2d_i64_clamp",
+ "llvm.nvvm.sust.b.2d.i64.trap" => "__nvvm_sust_b_2d_i64_trap",
+ "llvm.nvvm.sust.b.2d.i64.zero" => "__nvvm_sust_b_2d_i64_zero",
+ "llvm.nvvm.sust.b.2d.i8.clamp" => "__nvvm_sust_b_2d_i8_clamp",
+ "llvm.nvvm.sust.b.2d.i8.trap" => "__nvvm_sust_b_2d_i8_trap",
+ "llvm.nvvm.sust.b.2d.i8.zero" => "__nvvm_sust_b_2d_i8_zero",
+ "llvm.nvvm.sust.b.2d.v2i16.clamp" => "__nvvm_sust_b_2d_v2i16_clamp",
+ "llvm.nvvm.sust.b.2d.v2i16.trap" => "__nvvm_sust_b_2d_v2i16_trap",
+ "llvm.nvvm.sust.b.2d.v2i16.zero" => "__nvvm_sust_b_2d_v2i16_zero",
+ "llvm.nvvm.sust.b.2d.v2i32.clamp" => "__nvvm_sust_b_2d_v2i32_clamp",
+ "llvm.nvvm.sust.b.2d.v2i32.trap" => "__nvvm_sust_b_2d_v2i32_trap",
+ "llvm.nvvm.sust.b.2d.v2i32.zero" => "__nvvm_sust_b_2d_v2i32_zero",
+ "llvm.nvvm.sust.b.2d.v2i64.clamp" => "__nvvm_sust_b_2d_v2i64_clamp",
+ "llvm.nvvm.sust.b.2d.v2i64.trap" => "__nvvm_sust_b_2d_v2i64_trap",
+ "llvm.nvvm.sust.b.2d.v2i64.zero" => "__nvvm_sust_b_2d_v2i64_zero",
+ "llvm.nvvm.sust.b.2d.v2i8.clamp" => "__nvvm_sust_b_2d_v2i8_clamp",
+ "llvm.nvvm.sust.b.2d.v2i8.trap" => "__nvvm_sust_b_2d_v2i8_trap",
+ "llvm.nvvm.sust.b.2d.v2i8.zero" => "__nvvm_sust_b_2d_v2i8_zero",
+ "llvm.nvvm.sust.b.2d.v4i16.clamp" => "__nvvm_sust_b_2d_v4i16_clamp",
+ "llvm.nvvm.sust.b.2d.v4i16.trap" => "__nvvm_sust_b_2d_v4i16_trap",
+ "llvm.nvvm.sust.b.2d.v4i16.zero" => "__nvvm_sust_b_2d_v4i16_zero",
+ "llvm.nvvm.sust.b.2d.v4i32.clamp" => "__nvvm_sust_b_2d_v4i32_clamp",
+ "llvm.nvvm.sust.b.2d.v4i32.trap" => "__nvvm_sust_b_2d_v4i32_trap",
+ "llvm.nvvm.sust.b.2d.v4i32.zero" => "__nvvm_sust_b_2d_v4i32_zero",
+ "llvm.nvvm.sust.b.2d.v4i8.clamp" => "__nvvm_sust_b_2d_v4i8_clamp",
+ "llvm.nvvm.sust.b.2d.v4i8.trap" => "__nvvm_sust_b_2d_v4i8_trap",
+ "llvm.nvvm.sust.b.2d.v4i8.zero" => "__nvvm_sust_b_2d_v4i8_zero",
+ "llvm.nvvm.sust.b.3d.i16.clamp" => "__nvvm_sust_b_3d_i16_clamp",
+ "llvm.nvvm.sust.b.3d.i16.trap" => "__nvvm_sust_b_3d_i16_trap",
+ "llvm.nvvm.sust.b.3d.i16.zero" => "__nvvm_sust_b_3d_i16_zero",
+ "llvm.nvvm.sust.b.3d.i32.clamp" => "__nvvm_sust_b_3d_i32_clamp",
+ "llvm.nvvm.sust.b.3d.i32.trap" => "__nvvm_sust_b_3d_i32_trap",
+ "llvm.nvvm.sust.b.3d.i32.zero" => "__nvvm_sust_b_3d_i32_zero",
+ "llvm.nvvm.sust.b.3d.i64.clamp" => "__nvvm_sust_b_3d_i64_clamp",
+ "llvm.nvvm.sust.b.3d.i64.trap" => "__nvvm_sust_b_3d_i64_trap",
+ "llvm.nvvm.sust.b.3d.i64.zero" => "__nvvm_sust_b_3d_i64_zero",
+ "llvm.nvvm.sust.b.3d.i8.clamp" => "__nvvm_sust_b_3d_i8_clamp",
+ "llvm.nvvm.sust.b.3d.i8.trap" => "__nvvm_sust_b_3d_i8_trap",
+ "llvm.nvvm.sust.b.3d.i8.zero" => "__nvvm_sust_b_3d_i8_zero",
+ "llvm.nvvm.sust.b.3d.v2i16.clamp" => "__nvvm_sust_b_3d_v2i16_clamp",
+ "llvm.nvvm.sust.b.3d.v2i16.trap" => "__nvvm_sust_b_3d_v2i16_trap",
+ "llvm.nvvm.sust.b.3d.v2i16.zero" => "__nvvm_sust_b_3d_v2i16_zero",
+ "llvm.nvvm.sust.b.3d.v2i32.clamp" => "__nvvm_sust_b_3d_v2i32_clamp",
+ "llvm.nvvm.sust.b.3d.v2i32.trap" => "__nvvm_sust_b_3d_v2i32_trap",
+ "llvm.nvvm.sust.b.3d.v2i32.zero" => "__nvvm_sust_b_3d_v2i32_zero",
+ "llvm.nvvm.sust.b.3d.v2i64.clamp" => "__nvvm_sust_b_3d_v2i64_clamp",
+ "llvm.nvvm.sust.b.3d.v2i64.trap" => "__nvvm_sust_b_3d_v2i64_trap",
+ "llvm.nvvm.sust.b.3d.v2i64.zero" => "__nvvm_sust_b_3d_v2i64_zero",
+ "llvm.nvvm.sust.b.3d.v2i8.clamp" => "__nvvm_sust_b_3d_v2i8_clamp",
+ "llvm.nvvm.sust.b.3d.v2i8.trap" => "__nvvm_sust_b_3d_v2i8_trap",
+ "llvm.nvvm.sust.b.3d.v2i8.zero" => "__nvvm_sust_b_3d_v2i8_zero",
+ "llvm.nvvm.sust.b.3d.v4i16.clamp" => "__nvvm_sust_b_3d_v4i16_clamp",
+ "llvm.nvvm.sust.b.3d.v4i16.trap" => "__nvvm_sust_b_3d_v4i16_trap",
+ "llvm.nvvm.sust.b.3d.v4i16.zero" => "__nvvm_sust_b_3d_v4i16_zero",
+ "llvm.nvvm.sust.b.3d.v4i32.clamp" => "__nvvm_sust_b_3d_v4i32_clamp",
+ "llvm.nvvm.sust.b.3d.v4i32.trap" => "__nvvm_sust_b_3d_v4i32_trap",
+ "llvm.nvvm.sust.b.3d.v4i32.zero" => "__nvvm_sust_b_3d_v4i32_zero",
+ "llvm.nvvm.sust.b.3d.v4i8.clamp" => "__nvvm_sust_b_3d_v4i8_clamp",
+ "llvm.nvvm.sust.b.3d.v4i8.trap" => "__nvvm_sust_b_3d_v4i8_trap",
+ "llvm.nvvm.sust.b.3d.v4i8.zero" => "__nvvm_sust_b_3d_v4i8_zero",
+ "llvm.nvvm.sust.p.1d.array.i16.trap" => "__nvvm_sust_p_1d_array_i16_trap",
+ "llvm.nvvm.sust.p.1d.array.i32.trap" => "__nvvm_sust_p_1d_array_i32_trap",
+ "llvm.nvvm.sust.p.1d.array.i8.trap" => "__nvvm_sust_p_1d_array_i8_trap",
+ "llvm.nvvm.sust.p.1d.array.v2i16.trap" => "__nvvm_sust_p_1d_array_v2i16_trap",
+ "llvm.nvvm.sust.p.1d.array.v2i32.trap" => "__nvvm_sust_p_1d_array_v2i32_trap",
+ "llvm.nvvm.sust.p.1d.array.v2i8.trap" => "__nvvm_sust_p_1d_array_v2i8_trap",
+ "llvm.nvvm.sust.p.1d.array.v4i16.trap" => "__nvvm_sust_p_1d_array_v4i16_trap",
+ "llvm.nvvm.sust.p.1d.array.v4i32.trap" => "__nvvm_sust_p_1d_array_v4i32_trap",
+ "llvm.nvvm.sust.p.1d.array.v4i8.trap" => "__nvvm_sust_p_1d_array_v4i8_trap",
+ "llvm.nvvm.sust.p.1d.i16.trap" => "__nvvm_sust_p_1d_i16_trap",
+ "llvm.nvvm.sust.p.1d.i32.trap" => "__nvvm_sust_p_1d_i32_trap",
+ "llvm.nvvm.sust.p.1d.i8.trap" => "__nvvm_sust_p_1d_i8_trap",
+ "llvm.nvvm.sust.p.1d.v2i16.trap" => "__nvvm_sust_p_1d_v2i16_trap",
+ "llvm.nvvm.sust.p.1d.v2i32.trap" => "__nvvm_sust_p_1d_v2i32_trap",
+ "llvm.nvvm.sust.p.1d.v2i8.trap" => "__nvvm_sust_p_1d_v2i8_trap",
+ "llvm.nvvm.sust.p.1d.v4i16.trap" => "__nvvm_sust_p_1d_v4i16_trap",
+ "llvm.nvvm.sust.p.1d.v4i32.trap" => "__nvvm_sust_p_1d_v4i32_trap",
+ "llvm.nvvm.sust.p.1d.v4i8.trap" => "__nvvm_sust_p_1d_v4i8_trap",
+ "llvm.nvvm.sust.p.2d.array.i16.trap" => "__nvvm_sust_p_2d_array_i16_trap",
+ "llvm.nvvm.sust.p.2d.array.i32.trap" => "__nvvm_sust_p_2d_array_i32_trap",
+ "llvm.nvvm.sust.p.2d.array.i8.trap" => "__nvvm_sust_p_2d_array_i8_trap",
+ "llvm.nvvm.sust.p.2d.array.v2i16.trap" => "__nvvm_sust_p_2d_array_v2i16_trap",
+ "llvm.nvvm.sust.p.2d.array.v2i32.trap" => "__nvvm_sust_p_2d_array_v2i32_trap",
+ "llvm.nvvm.sust.p.2d.array.v2i8.trap" => "__nvvm_sust_p_2d_array_v2i8_trap",
+ "llvm.nvvm.sust.p.2d.array.v4i16.trap" => "__nvvm_sust_p_2d_array_v4i16_trap",
+ "llvm.nvvm.sust.p.2d.array.v4i32.trap" => "__nvvm_sust_p_2d_array_v4i32_trap",
+ "llvm.nvvm.sust.p.2d.array.v4i8.trap" => "__nvvm_sust_p_2d_array_v4i8_trap",
+ "llvm.nvvm.sust.p.2d.i16.trap" => "__nvvm_sust_p_2d_i16_trap",
+ "llvm.nvvm.sust.p.2d.i32.trap" => "__nvvm_sust_p_2d_i32_trap",
+ "llvm.nvvm.sust.p.2d.i8.trap" => "__nvvm_sust_p_2d_i8_trap",
+ "llvm.nvvm.sust.p.2d.v2i16.trap" => "__nvvm_sust_p_2d_v2i16_trap",
+ "llvm.nvvm.sust.p.2d.v2i32.trap" => "__nvvm_sust_p_2d_v2i32_trap",
+ "llvm.nvvm.sust.p.2d.v2i8.trap" => "__nvvm_sust_p_2d_v2i8_trap",
+ "llvm.nvvm.sust.p.2d.v4i16.trap" => "__nvvm_sust_p_2d_v4i16_trap",
+ "llvm.nvvm.sust.p.2d.v4i32.trap" => "__nvvm_sust_p_2d_v4i32_trap",
+ "llvm.nvvm.sust.p.2d.v4i8.trap" => "__nvvm_sust_p_2d_v4i8_trap",
+ "llvm.nvvm.sust.p.3d.i16.trap" => "__nvvm_sust_p_3d_i16_trap",
+ "llvm.nvvm.sust.p.3d.i32.trap" => "__nvvm_sust_p_3d_i32_trap",
+ "llvm.nvvm.sust.p.3d.i8.trap" => "__nvvm_sust_p_3d_i8_trap",
+ "llvm.nvvm.sust.p.3d.v2i16.trap" => "__nvvm_sust_p_3d_v2i16_trap",
+ "llvm.nvvm.sust.p.3d.v2i32.trap" => "__nvvm_sust_p_3d_v2i32_trap",
+ "llvm.nvvm.sust.p.3d.v2i8.trap" => "__nvvm_sust_p_3d_v2i8_trap",
+ "llvm.nvvm.sust.p.3d.v4i16.trap" => "__nvvm_sust_p_3d_v4i16_trap",
+ "llvm.nvvm.sust.p.3d.v4i32.trap" => "__nvvm_sust_p_3d_v4i32_trap",
+ "llvm.nvvm.sust.p.3d.v4i8.trap" => "__nvvm_sust_p_3d_v4i8_trap",
+ "llvm.nvvm.swap.lo.hi.b64" => "__nvvm_swap_lo_hi_b64",
+ "llvm.nvvm.trunc.d" => "__nvvm_trunc_d",
+ "llvm.nvvm.trunc.f" => "__nvvm_trunc_f",
+ "llvm.nvvm.trunc.ftz.f" => "__nvvm_trunc_ftz_f",
+ "llvm.nvvm.txq.array.size" => "__nvvm_txq_array_size",
+ "llvm.nvvm.txq.channel.data.type" => "__nvvm_txq_channel_data_type",
+ "llvm.nvvm.txq.channel.order" => "__nvvm_txq_channel_order",
+ "llvm.nvvm.txq.depth" => "__nvvm_txq_depth",
+ "llvm.nvvm.txq.height" => "__nvvm_txq_height",
+ "llvm.nvvm.txq.num.mipmap.levels" => "__nvvm_txq_num_mipmap_levels",
+ "llvm.nvvm.txq.num.samples" => "__nvvm_txq_num_samples",
+ "llvm.nvvm.txq.width" => "__nvvm_txq_width",
+ "llvm.nvvm.ui2d.rm" => "__nvvm_ui2d_rm",
+ "llvm.nvvm.ui2d.rn" => "__nvvm_ui2d_rn",
+ "llvm.nvvm.ui2d.rp" => "__nvvm_ui2d_rp",
+ "llvm.nvvm.ui2d.rz" => "__nvvm_ui2d_rz",
+ "llvm.nvvm.ui2f.rm" => "__nvvm_ui2f_rm",
+ "llvm.nvvm.ui2f.rn" => "__nvvm_ui2f_rn",
+ "llvm.nvvm.ui2f.rp" => "__nvvm_ui2f_rp",
+ "llvm.nvvm.ui2f.rz" => "__nvvm_ui2f_rz",
+ "llvm.nvvm.ull2d.rm" => "__nvvm_ull2d_rm",
+ "llvm.nvvm.ull2d.rn" => "__nvvm_ull2d_rn",
+ "llvm.nvvm.ull2d.rp" => "__nvvm_ull2d_rp",
+ "llvm.nvvm.ull2d.rz" => "__nvvm_ull2d_rz",
+ "llvm.nvvm.ull2f.rm" => "__nvvm_ull2f_rm",
+ "llvm.nvvm.ull2f.rn" => "__nvvm_ull2f_rn",
+ "llvm.nvvm.ull2f.rp" => "__nvvm_ull2f_rp",
+ "llvm.nvvm.ull2f.rz" => "__nvvm_ull2f_rz",
+ // ppc
+ "llvm.ppc.addex" => "__builtin_ppc_addex",
+ "llvm.ppc.addf128.round.to.odd" => "__builtin_addf128_round_to_odd",
+ "llvm.ppc.altivec.crypto.vcipher" => "__builtin_altivec_crypto_vcipher",
+ "llvm.ppc.altivec.crypto.vcipherlast" => "__builtin_altivec_crypto_vcipherlast",
+ "llvm.ppc.altivec.crypto.vncipher" => "__builtin_altivec_crypto_vncipher",
+ "llvm.ppc.altivec.crypto.vncipherlast" => "__builtin_altivec_crypto_vncipherlast",
+ "llvm.ppc.altivec.crypto.vpermxor" => "__builtin_altivec_crypto_vpermxor",
+ "llvm.ppc.altivec.crypto.vpermxor.be" => "__builtin_altivec_crypto_vpermxor_be",
+ "llvm.ppc.altivec.crypto.vpmsumb" => "__builtin_altivec_crypto_vpmsumb",
+ "llvm.ppc.altivec.crypto.vpmsumd" => "__builtin_altivec_crypto_vpmsumd",
+ "llvm.ppc.altivec.crypto.vpmsumh" => "__builtin_altivec_crypto_vpmsumh",
+ "llvm.ppc.altivec.crypto.vpmsumw" => "__builtin_altivec_crypto_vpmsumw",
+ "llvm.ppc.altivec.crypto.vsbox" => "__builtin_altivec_crypto_vsbox",
+ "llvm.ppc.altivec.crypto.vshasigmad" => "__builtin_altivec_crypto_vshasigmad",
+ "llvm.ppc.altivec.crypto.vshasigmaw" => "__builtin_altivec_crypto_vshasigmaw",
+ "llvm.ppc.altivec.dss" => "__builtin_altivec_dss",
+ "llvm.ppc.altivec.dssall" => "__builtin_altivec_dssall",
+ "llvm.ppc.altivec.dst" => "__builtin_altivec_dst",
+ "llvm.ppc.altivec.dstst" => "__builtin_altivec_dstst",
+ "llvm.ppc.altivec.dststt" => "__builtin_altivec_dststt",
+ "llvm.ppc.altivec.dstt" => "__builtin_altivec_dstt",
+ "llvm.ppc.altivec.mfvscr" => "__builtin_altivec_mfvscr",
+ "llvm.ppc.altivec.mtvscr" => "__builtin_altivec_mtvscr",
+ "llvm.ppc.altivec.mtvsrbm" => "__builtin_altivec_mtvsrbm",
+ "llvm.ppc.altivec.mtvsrdm" => "__builtin_altivec_mtvsrdm",
+ "llvm.ppc.altivec.mtvsrhm" => "__builtin_altivec_mtvsrhm",
+ "llvm.ppc.altivec.mtvsrqm" => "__builtin_altivec_mtvsrqm",
+ "llvm.ppc.altivec.mtvsrwm" => "__builtin_altivec_mtvsrwm",
+ "llvm.ppc.altivec.vaddcuw" => "__builtin_altivec_vaddcuw",
+ "llvm.ppc.altivec.vaddecuq" => "__builtin_altivec_vaddecuq",
+ "llvm.ppc.altivec.vaddeuqm" => "__builtin_altivec_vaddeuqm",
+ "llvm.ppc.altivec.vaddsbs" => "__builtin_altivec_vaddsbs",
+ "llvm.ppc.altivec.vaddshs" => "__builtin_altivec_vaddshs",
+ "llvm.ppc.altivec.vaddsws" => "__builtin_altivec_vaddsws",
+ "llvm.ppc.altivec.vaddubs" => "__builtin_altivec_vaddubs",
+ "llvm.ppc.altivec.vadduhs" => "__builtin_altivec_vadduhs",
+ "llvm.ppc.altivec.vadduws" => "__builtin_altivec_vadduws",
+ "llvm.ppc.altivec.vavgsb" => "__builtin_altivec_vavgsb",
+ "llvm.ppc.altivec.vavgsh" => "__builtin_altivec_vavgsh",
+ "llvm.ppc.altivec.vavgsw" => "__builtin_altivec_vavgsw",
+ "llvm.ppc.altivec.vavgub" => "__builtin_altivec_vavgub",
+ "llvm.ppc.altivec.vavguh" => "__builtin_altivec_vavguh",
+ "llvm.ppc.altivec.vavguw" => "__builtin_altivec_vavguw",
+ "llvm.ppc.altivec.vbpermd" => "__builtin_altivec_vbpermd",
+ "llvm.ppc.altivec.vbpermq" => "__builtin_altivec_vbpermq",
+ "llvm.ppc.altivec.vcfsx" => "__builtin_altivec_vcfsx",
+ "llvm.ppc.altivec.vcfuged" => "__builtin_altivec_vcfuged",
+ "llvm.ppc.altivec.vcfux" => "__builtin_altivec_vcfux",
+ "llvm.ppc.altivec.vclrlb" => "__builtin_altivec_vclrlb",
+ "llvm.ppc.altivec.vclrrb" => "__builtin_altivec_vclrrb",
+ "llvm.ppc.altivec.vclzdm" => "__builtin_altivec_vclzdm",
+ "llvm.ppc.altivec.vclzlsbb" => "__builtin_altivec_vclzlsbb",
+ "llvm.ppc.altivec.vcmpbfp" => "__builtin_altivec_vcmpbfp",
+ "llvm.ppc.altivec.vcmpbfp.p" => "__builtin_altivec_vcmpbfp_p",
+ "llvm.ppc.altivec.vcmpeqfp" => "__builtin_altivec_vcmpeqfp",
+ "llvm.ppc.altivec.vcmpeqfp.p" => "__builtin_altivec_vcmpeqfp_p",
+ "llvm.ppc.altivec.vcmpequb" => "__builtin_altivec_vcmpequb",
+ "llvm.ppc.altivec.vcmpequb.p" => "__builtin_altivec_vcmpequb_p",
+ "llvm.ppc.altivec.vcmpequd" => "__builtin_altivec_vcmpequd",
+ "llvm.ppc.altivec.vcmpequd.p" => "__builtin_altivec_vcmpequd_p",
+ "llvm.ppc.altivec.vcmpequh" => "__builtin_altivec_vcmpequh",
+ "llvm.ppc.altivec.vcmpequh.p" => "__builtin_altivec_vcmpequh_p",
+ "llvm.ppc.altivec.vcmpequq" => "__builtin_altivec_vcmpequq",
+ "llvm.ppc.altivec.vcmpequq.p" => "__builtin_altivec_vcmpequq_p",
+ "llvm.ppc.altivec.vcmpequw" => "__builtin_altivec_vcmpequw",
+ "llvm.ppc.altivec.vcmpequw.p" => "__builtin_altivec_vcmpequw_p",
+ "llvm.ppc.altivec.vcmpgefp" => "__builtin_altivec_vcmpgefp",
+ "llvm.ppc.altivec.vcmpgefp.p" => "__builtin_altivec_vcmpgefp_p",
+ "llvm.ppc.altivec.vcmpgtfp" => "__builtin_altivec_vcmpgtfp",
+ "llvm.ppc.altivec.vcmpgtfp.p" => "__builtin_altivec_vcmpgtfp_p",
+ "llvm.ppc.altivec.vcmpgtsb" => "__builtin_altivec_vcmpgtsb",
+ "llvm.ppc.altivec.vcmpgtsb.p" => "__builtin_altivec_vcmpgtsb_p",
+ "llvm.ppc.altivec.vcmpgtsd" => "__builtin_altivec_vcmpgtsd",
+ "llvm.ppc.altivec.vcmpgtsd.p" => "__builtin_altivec_vcmpgtsd_p",
+ "llvm.ppc.altivec.vcmpgtsh" => "__builtin_altivec_vcmpgtsh",
+ "llvm.ppc.altivec.vcmpgtsh.p" => "__builtin_altivec_vcmpgtsh_p",
+ "llvm.ppc.altivec.vcmpgtsq" => "__builtin_altivec_vcmpgtsq",
+ "llvm.ppc.altivec.vcmpgtsq.p" => "__builtin_altivec_vcmpgtsq_p",
+ "llvm.ppc.altivec.vcmpgtsw" => "__builtin_altivec_vcmpgtsw",
+ "llvm.ppc.altivec.vcmpgtsw.p" => "__builtin_altivec_vcmpgtsw_p",
+ "llvm.ppc.altivec.vcmpgtub" => "__builtin_altivec_vcmpgtub",
+ "llvm.ppc.altivec.vcmpgtub.p" => "__builtin_altivec_vcmpgtub_p",
+ "llvm.ppc.altivec.vcmpgtud" => "__builtin_altivec_vcmpgtud",
+ "llvm.ppc.altivec.vcmpgtud.p" => "__builtin_altivec_vcmpgtud_p",
+ "llvm.ppc.altivec.vcmpgtuh" => "__builtin_altivec_vcmpgtuh",
+ "llvm.ppc.altivec.vcmpgtuh.p" => "__builtin_altivec_vcmpgtuh_p",
+ "llvm.ppc.altivec.vcmpgtuq" => "__builtin_altivec_vcmpgtuq",
+ "llvm.ppc.altivec.vcmpgtuq.p" => "__builtin_altivec_vcmpgtuq_p",
+ "llvm.ppc.altivec.vcmpgtuw" => "__builtin_altivec_vcmpgtuw",
+ "llvm.ppc.altivec.vcmpgtuw.p" => "__builtin_altivec_vcmpgtuw_p",
+ "llvm.ppc.altivec.vcmpneb" => "__builtin_altivec_vcmpneb",
+ "llvm.ppc.altivec.vcmpneb.p" => "__builtin_altivec_vcmpneb_p",
+ "llvm.ppc.altivec.vcmpneh" => "__builtin_altivec_vcmpneh",
+ "llvm.ppc.altivec.vcmpneh.p" => "__builtin_altivec_vcmpneh_p",
+ "llvm.ppc.altivec.vcmpnew" => "__builtin_altivec_vcmpnew",
+ "llvm.ppc.altivec.vcmpnew.p" => "__builtin_altivec_vcmpnew_p",
+ "llvm.ppc.altivec.vcmpnezb" => "__builtin_altivec_vcmpnezb",
+ "llvm.ppc.altivec.vcmpnezb.p" => "__builtin_altivec_vcmpnezb_p",
+ "llvm.ppc.altivec.vcmpnezh" => "__builtin_altivec_vcmpnezh",
+ "llvm.ppc.altivec.vcmpnezh.p" => "__builtin_altivec_vcmpnezh_p",
+ "llvm.ppc.altivec.vcmpnezw" => "__builtin_altivec_vcmpnezw",
+ "llvm.ppc.altivec.vcmpnezw.p" => "__builtin_altivec_vcmpnezw_p",
+ "llvm.ppc.altivec.vcntmbb" => "__builtin_altivec_vcntmbb",
+ "llvm.ppc.altivec.vcntmbd" => "__builtin_altivec_vcntmbd",
+ "llvm.ppc.altivec.vcntmbh" => "__builtin_altivec_vcntmbh",
+ "llvm.ppc.altivec.vcntmbw" => "__builtin_altivec_vcntmbw",
+ "llvm.ppc.altivec.vctsxs" => "__builtin_altivec_vctsxs",
+ "llvm.ppc.altivec.vctuxs" => "__builtin_altivec_vctuxs",
+ "llvm.ppc.altivec.vctzdm" => "__builtin_altivec_vctzdm",
+ "llvm.ppc.altivec.vctzlsbb" => "__builtin_altivec_vctzlsbb",
+ "llvm.ppc.altivec.vexpandbm" => "__builtin_altivec_vexpandbm",
+ "llvm.ppc.altivec.vexpanddm" => "__builtin_altivec_vexpanddm",
+ "llvm.ppc.altivec.vexpandhm" => "__builtin_altivec_vexpandhm",
+ "llvm.ppc.altivec.vexpandqm" => "__builtin_altivec_vexpandqm",
+ "llvm.ppc.altivec.vexpandwm" => "__builtin_altivec_vexpandwm",
+ "llvm.ppc.altivec.vexptefp" => "__builtin_altivec_vexptefp",
+ "llvm.ppc.altivec.vextddvlx" => "__builtin_altivec_vextddvlx",
+ "llvm.ppc.altivec.vextddvrx" => "__builtin_altivec_vextddvrx",
+ "llvm.ppc.altivec.vextdubvlx" => "__builtin_altivec_vextdubvlx",
+ "llvm.ppc.altivec.vextdubvrx" => "__builtin_altivec_vextdubvrx",
+ "llvm.ppc.altivec.vextduhvlx" => "__builtin_altivec_vextduhvlx",
+ "llvm.ppc.altivec.vextduhvrx" => "__builtin_altivec_vextduhvrx",
+ "llvm.ppc.altivec.vextduwvlx" => "__builtin_altivec_vextduwvlx",
+ "llvm.ppc.altivec.vextduwvrx" => "__builtin_altivec_vextduwvrx",
+ "llvm.ppc.altivec.vextractbm" => "__builtin_altivec_vextractbm",
+ "llvm.ppc.altivec.vextractdm" => "__builtin_altivec_vextractdm",
+ "llvm.ppc.altivec.vextracthm" => "__builtin_altivec_vextracthm",
+ "llvm.ppc.altivec.vextractqm" => "__builtin_altivec_vextractqm",
+ "llvm.ppc.altivec.vextractwm" => "__builtin_altivec_vextractwm",
+ "llvm.ppc.altivec.vextsb2d" => "__builtin_altivec_vextsb2d",
+ "llvm.ppc.altivec.vextsb2w" => "__builtin_altivec_vextsb2w",
+ "llvm.ppc.altivec.vextsd2q" => "__builtin_altivec_vextsd2q",
+ "llvm.ppc.altivec.vextsh2d" => "__builtin_altivec_vextsh2d",
+ "llvm.ppc.altivec.vextsh2w" => "__builtin_altivec_vextsh2w",
+ "llvm.ppc.altivec.vextsw2d" => "__builtin_altivec_vextsw2d",
+ "llvm.ppc.altivec.vgbbd" => "__builtin_altivec_vgbbd",
+ "llvm.ppc.altivec.vgnb" => "__builtin_altivec_vgnb",
+ "llvm.ppc.altivec.vinsblx" => "__builtin_altivec_vinsblx",
+ "llvm.ppc.altivec.vinsbrx" => "__builtin_altivec_vinsbrx",
+ "llvm.ppc.altivec.vinsbvlx" => "__builtin_altivec_vinsbvlx",
+ "llvm.ppc.altivec.vinsbvrx" => "__builtin_altivec_vinsbvrx",
+ "llvm.ppc.altivec.vinsdlx" => "__builtin_altivec_vinsdlx",
+ "llvm.ppc.altivec.vinsdrx" => "__builtin_altivec_vinsdrx",
+ "llvm.ppc.altivec.vinshlx" => "__builtin_altivec_vinshlx",
+ "llvm.ppc.altivec.vinshrx" => "__builtin_altivec_vinshrx",
+ "llvm.ppc.altivec.vinshvlx" => "__builtin_altivec_vinshvlx",
+ "llvm.ppc.altivec.vinshvrx" => "__builtin_altivec_vinshvrx",
+ "llvm.ppc.altivec.vinswlx" => "__builtin_altivec_vinswlx",
+ "llvm.ppc.altivec.vinswrx" => "__builtin_altivec_vinswrx",
+ "llvm.ppc.altivec.vinswvlx" => "__builtin_altivec_vinswvlx",
+ "llvm.ppc.altivec.vinswvrx" => "__builtin_altivec_vinswvrx",
+ "llvm.ppc.altivec.vlogefp" => "__builtin_altivec_vlogefp",
+ "llvm.ppc.altivec.vmaddfp" => "__builtin_altivec_vmaddfp",
+ "llvm.ppc.altivec.vmaxfp" => "__builtin_altivec_vmaxfp",
+ "llvm.ppc.altivec.vmaxsb" => "__builtin_altivec_vmaxsb",
+ "llvm.ppc.altivec.vmaxsd" => "__builtin_altivec_vmaxsd",
+ "llvm.ppc.altivec.vmaxsh" => "__builtin_altivec_vmaxsh",
+ "llvm.ppc.altivec.vmaxsw" => "__builtin_altivec_vmaxsw",
+ "llvm.ppc.altivec.vmaxub" => "__builtin_altivec_vmaxub",
+ "llvm.ppc.altivec.vmaxud" => "__builtin_altivec_vmaxud",
+ "llvm.ppc.altivec.vmaxuh" => "__builtin_altivec_vmaxuh",
+ "llvm.ppc.altivec.vmaxuw" => "__builtin_altivec_vmaxuw",
+ "llvm.ppc.altivec.vmhaddshs" => "__builtin_altivec_vmhaddshs",
+ "llvm.ppc.altivec.vmhraddshs" => "__builtin_altivec_vmhraddshs",
+ "llvm.ppc.altivec.vminfp" => "__builtin_altivec_vminfp",
+ "llvm.ppc.altivec.vminsb" => "__builtin_altivec_vminsb",
+ "llvm.ppc.altivec.vminsd" => "__builtin_altivec_vminsd",
+ "llvm.ppc.altivec.vminsh" => "__builtin_altivec_vminsh",
+ "llvm.ppc.altivec.vminsw" => "__builtin_altivec_vminsw",
+ "llvm.ppc.altivec.vminub" => "__builtin_altivec_vminub",
+ "llvm.ppc.altivec.vminud" => "__builtin_altivec_vminud",
+ "llvm.ppc.altivec.vminuh" => "__builtin_altivec_vminuh",
+ "llvm.ppc.altivec.vminuw" => "__builtin_altivec_vminuw",
+ "llvm.ppc.altivec.vmladduhm" => "__builtin_altivec_vmladduhm",
+ "llvm.ppc.altivec.vmsumcud" => "__builtin_altivec_vmsumcud",
+ "llvm.ppc.altivec.vmsummbm" => "__builtin_altivec_vmsummbm",
+ "llvm.ppc.altivec.vmsumshm" => "__builtin_altivec_vmsumshm",
+ "llvm.ppc.altivec.vmsumshs" => "__builtin_altivec_vmsumshs",
+ "llvm.ppc.altivec.vmsumubm" => "__builtin_altivec_vmsumubm",
+ "llvm.ppc.altivec.vmsumudm" => "__builtin_altivec_vmsumudm",
+ "llvm.ppc.altivec.vmsumuhm" => "__builtin_altivec_vmsumuhm",
+ "llvm.ppc.altivec.vmsumuhs" => "__builtin_altivec_vmsumuhs",
+ "llvm.ppc.altivec.vmulesb" => "__builtin_altivec_vmulesb",
+ "llvm.ppc.altivec.vmulesh" => "__builtin_altivec_vmulesh",
+ "llvm.ppc.altivec.vmulesw" => "__builtin_altivec_vmulesw",
+ "llvm.ppc.altivec.vmuleub" => "__builtin_altivec_vmuleub",
+ "llvm.ppc.altivec.vmuleuh" => "__builtin_altivec_vmuleuh",
+ "llvm.ppc.altivec.vmuleuw" => "__builtin_altivec_vmuleuw",
+ "llvm.ppc.altivec.vmulosb" => "__builtin_altivec_vmulosb",
+ "llvm.ppc.altivec.vmulosh" => "__builtin_altivec_vmulosh",
+ "llvm.ppc.altivec.vmulosw" => "__builtin_altivec_vmulosw",
+ "llvm.ppc.altivec.vmuloub" => "__builtin_altivec_vmuloub",
+ "llvm.ppc.altivec.vmulouh" => "__builtin_altivec_vmulouh",
+ "llvm.ppc.altivec.vmulouw" => "__builtin_altivec_vmulouw",
+ "llvm.ppc.altivec.vnmsubfp" => "__builtin_altivec_vnmsubfp",
+ "llvm.ppc.altivec.vpdepd" => "__builtin_altivec_vpdepd",
+ "llvm.ppc.altivec.vperm" => "__builtin_altivec_vperm_4si",
+ "llvm.ppc.altivec.vpextd" => "__builtin_altivec_vpextd",
+ "llvm.ppc.altivec.vpkpx" => "__builtin_altivec_vpkpx",
+ "llvm.ppc.altivec.vpksdss" => "__builtin_altivec_vpksdss",
+ "llvm.ppc.altivec.vpksdus" => "__builtin_altivec_vpksdus",
+ "llvm.ppc.altivec.vpkshss" => "__builtin_altivec_vpkshss",
+ "llvm.ppc.altivec.vpkshus" => "__builtin_altivec_vpkshus",
+ "llvm.ppc.altivec.vpkswss" => "__builtin_altivec_vpkswss",
+ "llvm.ppc.altivec.vpkswus" => "__builtin_altivec_vpkswus",
+ "llvm.ppc.altivec.vpkudus" => "__builtin_altivec_vpkudus",
+ "llvm.ppc.altivec.vpkuhus" => "__builtin_altivec_vpkuhus",
+ "llvm.ppc.altivec.vpkuwus" => "__builtin_altivec_vpkuwus",
+ "llvm.ppc.altivec.vprtybd" => "__builtin_altivec_vprtybd",
+ "llvm.ppc.altivec.vprtybq" => "__builtin_altivec_vprtybq",
+ "llvm.ppc.altivec.vprtybw" => "__builtin_altivec_vprtybw",
+ "llvm.ppc.altivec.vrefp" => "__builtin_altivec_vrefp",
+ "llvm.ppc.altivec.vrfim" => "__builtin_altivec_vrfim",
+ "llvm.ppc.altivec.vrfin" => "__builtin_altivec_vrfin",
+ "llvm.ppc.altivec.vrfip" => "__builtin_altivec_vrfip",
+ "llvm.ppc.altivec.vrfiz" => "__builtin_altivec_vrfiz",
+ "llvm.ppc.altivec.vrlb" => "__builtin_altivec_vrlb",
+ "llvm.ppc.altivec.vrld" => "__builtin_altivec_vrld",
+ "llvm.ppc.altivec.vrlh" => "__builtin_altivec_vrlh",
+ "llvm.ppc.altivec.vrlw" => "__builtin_altivec_vrlw",
+ "llvm.ppc.altivec.vrsqrtefp" => "__builtin_altivec_vrsqrtefp",
+ "llvm.ppc.altivec.vsel" => "__builtin_altivec_vsel_4si",
+ "llvm.ppc.altivec.vsl" => "__builtin_altivec_vsl",
+ "llvm.ppc.altivec.vslb" => "__builtin_altivec_vslb",
+ "llvm.ppc.altivec.vsldbi" => "__builtin_altivec_vsldbi",
+ "llvm.ppc.altivec.vslh" => "__builtin_altivec_vslh",
+ "llvm.ppc.altivec.vslo" => "__builtin_altivec_vslo",
+ "llvm.ppc.altivec.vslw" => "__builtin_altivec_vslw",
+ "llvm.ppc.altivec.vsr" => "__builtin_altivec_vsr",
+ "llvm.ppc.altivec.vsrab" => "__builtin_altivec_vsrab",
+ "llvm.ppc.altivec.vsrah" => "__builtin_altivec_vsrah",
+ "llvm.ppc.altivec.vsraw" => "__builtin_altivec_vsraw",
+ "llvm.ppc.altivec.vsrb" => "__builtin_altivec_vsrb",
+ "llvm.ppc.altivec.vsrdbi" => "__builtin_altivec_vsrdbi",
+ "llvm.ppc.altivec.vsrh" => "__builtin_altivec_vsrh",
+ "llvm.ppc.altivec.vsro" => "__builtin_altivec_vsro",
+ "llvm.ppc.altivec.vsrw" => "__builtin_altivec_vsrw",
+ "llvm.ppc.altivec.vstribl" => "__builtin_altivec_vstribl",
+ "llvm.ppc.altivec.vstribl.p" => "__builtin_altivec_vstribl_p",
+ "llvm.ppc.altivec.vstribr" => "__builtin_altivec_vstribr",
+ "llvm.ppc.altivec.vstribr.p" => "__builtin_altivec_vstribr_p",
+ "llvm.ppc.altivec.vstrihl" => "__builtin_altivec_vstrihl",
+ "llvm.ppc.altivec.vstrihl.p" => "__builtin_altivec_vstrihl_p",
+ "llvm.ppc.altivec.vstrihr" => "__builtin_altivec_vstrihr",
+ "llvm.ppc.altivec.vstrihr.p" => "__builtin_altivec_vstrihr_p",
+ "llvm.ppc.altivec.vsubcuw" => "__builtin_altivec_vsubcuw",
+ "llvm.ppc.altivec.vsubecuq" => "__builtin_altivec_vsubecuq",
+ "llvm.ppc.altivec.vsubeuqm" => "__builtin_altivec_vsubeuqm",
+ "llvm.ppc.altivec.vsubsbs" => "__builtin_altivec_vsubsbs",
+ "llvm.ppc.altivec.vsubshs" => "__builtin_altivec_vsubshs",
+ "llvm.ppc.altivec.vsubsws" => "__builtin_altivec_vsubsws",
+ "llvm.ppc.altivec.vsububs" => "__builtin_altivec_vsububs",
+ "llvm.ppc.altivec.vsubuhs" => "__builtin_altivec_vsubuhs",
+ "llvm.ppc.altivec.vsubuws" => "__builtin_altivec_vsubuws",
+ "llvm.ppc.altivec.vsum2sws" => "__builtin_altivec_vsum2sws",
+ "llvm.ppc.altivec.vsum4sbs" => "__builtin_altivec_vsum4sbs",
+ "llvm.ppc.altivec.vsum4shs" => "__builtin_altivec_vsum4shs",
+ "llvm.ppc.altivec.vsum4ubs" => "__builtin_altivec_vsum4ubs",
+ "llvm.ppc.altivec.vsumsws" => "__builtin_altivec_vsumsws",
+ "llvm.ppc.altivec.vupkhpx" => "__builtin_altivec_vupkhpx",
+ "llvm.ppc.altivec.vupkhsb" => "__builtin_altivec_vupkhsb",
+ "llvm.ppc.altivec.vupkhsh" => "__builtin_altivec_vupkhsh",
+ "llvm.ppc.altivec.vupkhsw" => "__builtin_altivec_vupkhsw",
+ "llvm.ppc.altivec.vupklpx" => "__builtin_altivec_vupklpx",
+ "llvm.ppc.altivec.vupklsb" => "__builtin_altivec_vupklsb",
+ "llvm.ppc.altivec.vupklsh" => "__builtin_altivec_vupklsh",
+ "llvm.ppc.altivec.vupklsw" => "__builtin_altivec_vupklsw",
+ "llvm.ppc.bcdadd" => "__builtin_ppc_bcdadd",
+ "llvm.ppc.bcdadd.p" => "__builtin_ppc_bcdadd_p",
+ "llvm.ppc.bcdsub" => "__builtin_ppc_bcdsub",
+ "llvm.ppc.bcdsub.p" => "__builtin_ppc_bcdsub_p",
+ "llvm.ppc.bpermd" => "__builtin_bpermd",
+ "llvm.ppc.cfuged" => "__builtin_cfuged",
+ "llvm.ppc.cmpeqb" => "__builtin_ppc_cmpeqb",
+ "llvm.ppc.cmprb" => "__builtin_ppc_cmprb",
+ "llvm.ppc.cntlzdm" => "__builtin_cntlzdm",
+ "llvm.ppc.cnttzdm" => "__builtin_cnttzdm",
+ "llvm.ppc.compare.exp.eq" => "__builtin_ppc_compare_exp_eq",
+ "llvm.ppc.compare.exp.gt" => "__builtin_ppc_compare_exp_gt",
+ "llvm.ppc.compare.exp.lt" => "__builtin_ppc_compare_exp_lt",
+ "llvm.ppc.compare.exp.uo" => "__builtin_ppc_compare_exp_uo",
+ "llvm.ppc.darn" => "__builtin_darn",
+ "llvm.ppc.darn32" => "__builtin_darn_32",
+ "llvm.ppc.darnraw" => "__builtin_darn_raw",
+ "llvm.ppc.dcbf" => "__builtin_dcbf",
+ "llvm.ppc.dcbfl" => "__builtin_ppc_dcbfl",
+ "llvm.ppc.dcbflp" => "__builtin_ppc_dcbflp",
+ "llvm.ppc.dcbst" => "__builtin_ppc_dcbst",
+ "llvm.ppc.dcbt" => "__builtin_ppc_dcbt",
+ "llvm.ppc.dcbtst" => "__builtin_ppc_dcbtst",
+ "llvm.ppc.dcbtstt" => "__builtin_ppc_dcbtstt",
+ "llvm.ppc.dcbtt" => "__builtin_ppc_dcbtt",
+ "llvm.ppc.dcbz" => "__builtin_ppc_dcbz",
+ "llvm.ppc.divde" => "__builtin_divde",
+ "llvm.ppc.divdeu" => "__builtin_divdeu",
+ "llvm.ppc.divf128.round.to.odd" => "__builtin_divf128_round_to_odd",
+ "llvm.ppc.divwe" => "__builtin_divwe",
+ "llvm.ppc.divweu" => "__builtin_divweu",
+ "llvm.ppc.eieio" => "__builtin_ppc_eieio",
+ "llvm.ppc.extract.exp" => "__builtin_ppc_extract_exp",
+ "llvm.ppc.extract.sig" => "__builtin_ppc_extract_sig",
+ "llvm.ppc.fcfid" => "__builtin_ppc_fcfid",
+ "llvm.ppc.fcfud" => "__builtin_ppc_fcfud",
+ "llvm.ppc.fctid" => "__builtin_ppc_fctid",
+ "llvm.ppc.fctidz" => "__builtin_ppc_fctidz",
+ "llvm.ppc.fctiw" => "__builtin_ppc_fctiw",
+ "llvm.ppc.fctiwz" => "__builtin_ppc_fctiwz",
+ "llvm.ppc.fctudz" => "__builtin_ppc_fctudz",
+ "llvm.ppc.fctuwz" => "__builtin_ppc_fctuwz",
+ "llvm.ppc.fmaf128.round.to.odd" => "__builtin_fmaf128_round_to_odd",
+ "llvm.ppc.fmsub" => "__builtin_ppc_fmsub",
+ "llvm.ppc.fmsubs" => "__builtin_ppc_fmsubs",
+ "llvm.ppc.fnmadd" => "__builtin_ppc_fnmadd",
+ "llvm.ppc.fnmadds" => "__builtin_ppc_fnmadds",
+ "llvm.ppc.fre" => "__builtin_ppc_fre",
+ "llvm.ppc.fres" => "__builtin_ppc_fres",
+ "llvm.ppc.frsqrte" => "__builtin_ppc_frsqrte",
+ "llvm.ppc.frsqrtes" => "__builtin_ppc_frsqrtes",
+ "llvm.ppc.fsel" => "__builtin_ppc_fsel",
+ "llvm.ppc.fsels" => "__builtin_ppc_fsels",
+ "llvm.ppc.get.texasr" => "__builtin_get_texasr",
+ "llvm.ppc.get.texasru" => "__builtin_get_texasru",
+ "llvm.ppc.get.tfhar" => "__builtin_get_tfhar",
+ "llvm.ppc.get.tfiar" => "__builtin_get_tfiar",
+ "llvm.ppc.icbt" => "__builtin_ppc_icbt",
+ "llvm.ppc.insert.exp" => "__builtin_ppc_insert_exp",
+ "llvm.ppc.iospace.eieio" => "__builtin_ppc_iospace_eieio",
+ "llvm.ppc.iospace.lwsync" => "__builtin_ppc_iospace_lwsync",
+ "llvm.ppc.iospace.sync" => "__builtin_ppc_iospace_sync",
+ "llvm.ppc.isync" => "__builtin_ppc_isync",
+ "llvm.ppc.load4r" => "__builtin_ppc_load4r",
+ "llvm.ppc.load8r" => "__builtin_ppc_load8r",
+ "llvm.ppc.lwsync" => "__builtin_ppc_lwsync",
+ "llvm.ppc.maddhd" => "__builtin_ppc_maddhd",
+ "llvm.ppc.maddhdu" => "__builtin_ppc_maddhdu",
+ "llvm.ppc.maddld" => "__builtin_ppc_maddld",
+ "llvm.ppc.mfmsr" => "__builtin_ppc_mfmsr",
+ "llvm.ppc.mftbu" => "__builtin_ppc_mftbu",
+ "llvm.ppc.mtfsb0" => "__builtin_ppc_mtfsb0",
+ "llvm.ppc.mtfsb1" => "__builtin_ppc_mtfsb1",
+ "llvm.ppc.mtfsfi" => "__builtin_ppc_mtfsfi",
+ "llvm.ppc.mtmsr" => "__builtin_ppc_mtmsr",
+ "llvm.ppc.mulf128.round.to.odd" => "__builtin_mulf128_round_to_odd",
+ "llvm.ppc.mulhd" => "__builtin_ppc_mulhd",
+ "llvm.ppc.mulhdu" => "__builtin_ppc_mulhdu",
+ "llvm.ppc.mulhw" => "__builtin_ppc_mulhw",
+ "llvm.ppc.mulhwu" => "__builtin_ppc_mulhwu",
+ "llvm.ppc.pack.longdouble" => "__builtin_pack_longdouble",
+ "llvm.ppc.pdepd" => "__builtin_pdepd",
+ "llvm.ppc.pextd" => "__builtin_pextd",
+ "llvm.ppc.qpx.qvfabs" => "__builtin_qpx_qvfabs",
+ "llvm.ppc.qpx.qvfadd" => "__builtin_qpx_qvfadd",
+ "llvm.ppc.qpx.qvfadds" => "__builtin_qpx_qvfadds",
+ "llvm.ppc.qpx.qvfcfid" => "__builtin_qpx_qvfcfid",
+ "llvm.ppc.qpx.qvfcfids" => "__builtin_qpx_qvfcfids",
+ "llvm.ppc.qpx.qvfcfidu" => "__builtin_qpx_qvfcfidu",
+ "llvm.ppc.qpx.qvfcfidus" => "__builtin_qpx_qvfcfidus",
+ "llvm.ppc.qpx.qvfcmpeq" => "__builtin_qpx_qvfcmpeq",
+ "llvm.ppc.qpx.qvfcmpgt" => "__builtin_qpx_qvfcmpgt",
+ "llvm.ppc.qpx.qvfcmplt" => "__builtin_qpx_qvfcmplt",
+ "llvm.ppc.qpx.qvfcpsgn" => "__builtin_qpx_qvfcpsgn",
+ "llvm.ppc.qpx.qvfctid" => "__builtin_qpx_qvfctid",
+ "llvm.ppc.qpx.qvfctidu" => "__builtin_qpx_qvfctidu",
+ "llvm.ppc.qpx.qvfctiduz" => "__builtin_qpx_qvfctiduz",
+ "llvm.ppc.qpx.qvfctidz" => "__builtin_qpx_qvfctidz",
+ "llvm.ppc.qpx.qvfctiw" => "__builtin_qpx_qvfctiw",
+ "llvm.ppc.qpx.qvfctiwu" => "__builtin_qpx_qvfctiwu",
+ "llvm.ppc.qpx.qvfctiwuz" => "__builtin_qpx_qvfctiwuz",
+ "llvm.ppc.qpx.qvfctiwz" => "__builtin_qpx_qvfctiwz",
+ "llvm.ppc.qpx.qvflogical" => "__builtin_qpx_qvflogical",
+ "llvm.ppc.qpx.qvfmadd" => "__builtin_qpx_qvfmadd",
+ "llvm.ppc.qpx.qvfmadds" => "__builtin_qpx_qvfmadds",
+ "llvm.ppc.qpx.qvfmsub" => "__builtin_qpx_qvfmsub",
+ "llvm.ppc.qpx.qvfmsubs" => "__builtin_qpx_qvfmsubs",
+ "llvm.ppc.qpx.qvfmul" => "__builtin_qpx_qvfmul",
+ "llvm.ppc.qpx.qvfmuls" => "__builtin_qpx_qvfmuls",
+ "llvm.ppc.qpx.qvfnabs" => "__builtin_qpx_qvfnabs",
+ "llvm.ppc.qpx.qvfneg" => "__builtin_qpx_qvfneg",
+ "llvm.ppc.qpx.qvfnmadd" => "__builtin_qpx_qvfnmadd",
+ "llvm.ppc.qpx.qvfnmadds" => "__builtin_qpx_qvfnmadds",
+ "llvm.ppc.qpx.qvfnmsub" => "__builtin_qpx_qvfnmsub",
+ "llvm.ppc.qpx.qvfnmsubs" => "__builtin_qpx_qvfnmsubs",
+ "llvm.ppc.qpx.qvfperm" => "__builtin_qpx_qvfperm",
+ "llvm.ppc.qpx.qvfre" => "__builtin_qpx_qvfre",
+ "llvm.ppc.qpx.qvfres" => "__builtin_qpx_qvfres",
+ "llvm.ppc.qpx.qvfrim" => "__builtin_qpx_qvfrim",
+ "llvm.ppc.qpx.qvfrin" => "__builtin_qpx_qvfrin",
+ "llvm.ppc.qpx.qvfrip" => "__builtin_qpx_qvfrip",
+ "llvm.ppc.qpx.qvfriz" => "__builtin_qpx_qvfriz",
+ "llvm.ppc.qpx.qvfrsp" => "__builtin_qpx_qvfrsp",
+ "llvm.ppc.qpx.qvfrsqrte" => "__builtin_qpx_qvfrsqrte",
+ "llvm.ppc.qpx.qvfrsqrtes" => "__builtin_qpx_qvfrsqrtes",
+ "llvm.ppc.qpx.qvfsel" => "__builtin_qpx_qvfsel",
+ "llvm.ppc.qpx.qvfsub" => "__builtin_qpx_qvfsub",
+ "llvm.ppc.qpx.qvfsubs" => "__builtin_qpx_qvfsubs",
+ "llvm.ppc.qpx.qvftstnan" => "__builtin_qpx_qvftstnan",
+ "llvm.ppc.qpx.qvfxmadd" => "__builtin_qpx_qvfxmadd",
+ "llvm.ppc.qpx.qvfxmadds" => "__builtin_qpx_qvfxmadds",
+ "llvm.ppc.qpx.qvfxmul" => "__builtin_qpx_qvfxmul",
+ "llvm.ppc.qpx.qvfxmuls" => "__builtin_qpx_qvfxmuls",
+ "llvm.ppc.qpx.qvfxxcpnmadd" => "__builtin_qpx_qvfxxcpnmadd",
+ "llvm.ppc.qpx.qvfxxcpnmadds" => "__builtin_qpx_qvfxxcpnmadds",
+ "llvm.ppc.qpx.qvfxxmadd" => "__builtin_qpx_qvfxxmadd",
+ "llvm.ppc.qpx.qvfxxmadds" => "__builtin_qpx_qvfxxmadds",
+ "llvm.ppc.qpx.qvfxxnpmadd" => "__builtin_qpx_qvfxxnpmadd",
+ "llvm.ppc.qpx.qvfxxnpmadds" => "__builtin_qpx_qvfxxnpmadds",
+ "llvm.ppc.qpx.qvgpci" => "__builtin_qpx_qvgpci",
+ "llvm.ppc.qpx.qvlfcd" => "__builtin_qpx_qvlfcd",
+ "llvm.ppc.qpx.qvlfcda" => "__builtin_qpx_qvlfcda",
+ "llvm.ppc.qpx.qvlfcs" => "__builtin_qpx_qvlfcs",
+ "llvm.ppc.qpx.qvlfcsa" => "__builtin_qpx_qvlfcsa",
+ "llvm.ppc.qpx.qvlfd" => "__builtin_qpx_qvlfd",
+ "llvm.ppc.qpx.qvlfda" => "__builtin_qpx_qvlfda",
+ "llvm.ppc.qpx.qvlfiwa" => "__builtin_qpx_qvlfiwa",
+ "llvm.ppc.qpx.qvlfiwaa" => "__builtin_qpx_qvlfiwaa",
+ "llvm.ppc.qpx.qvlfiwz" => "__builtin_qpx_qvlfiwz",
+ "llvm.ppc.qpx.qvlfiwza" => "__builtin_qpx_qvlfiwza",
+ "llvm.ppc.qpx.qvlfs" => "__builtin_qpx_qvlfs",
+ "llvm.ppc.qpx.qvlfsa" => "__builtin_qpx_qvlfsa",
+ "llvm.ppc.qpx.qvlpcld" => "__builtin_qpx_qvlpcld",
+ "llvm.ppc.qpx.qvlpcls" => "__builtin_qpx_qvlpcls",
+ "llvm.ppc.qpx.qvlpcrd" => "__builtin_qpx_qvlpcrd",
+ "llvm.ppc.qpx.qvlpcrs" => "__builtin_qpx_qvlpcrs",
+ "llvm.ppc.qpx.qvstfcd" => "__builtin_qpx_qvstfcd",
+ "llvm.ppc.qpx.qvstfcda" => "__builtin_qpx_qvstfcda",
+ "llvm.ppc.qpx.qvstfcs" => "__builtin_qpx_qvstfcs",
+ "llvm.ppc.qpx.qvstfcsa" => "__builtin_qpx_qvstfcsa",
+ "llvm.ppc.qpx.qvstfd" => "__builtin_qpx_qvstfd",
+ "llvm.ppc.qpx.qvstfda" => "__builtin_qpx_qvstfda",
+ "llvm.ppc.qpx.qvstfiw" => "__builtin_qpx_qvstfiw",
+ "llvm.ppc.qpx.qvstfiwa" => "__builtin_qpx_qvstfiwa",
+ "llvm.ppc.qpx.qvstfs" => "__builtin_qpx_qvstfs",
+ "llvm.ppc.qpx.qvstfsa" => "__builtin_qpx_qvstfsa",
+ "llvm.ppc.readflm" => "__builtin_readflm",
+ "llvm.ppc.scalar.extract.expq" => "__builtin_vsx_scalar_extract_expq",
+ "llvm.ppc.scalar.insert.exp.qp" => "__builtin_vsx_scalar_insert_exp_qp",
+ "llvm.ppc.set.texasr" => "__builtin_set_texasr",
+ "llvm.ppc.set.texasru" => "__builtin_set_texasru",
+ "llvm.ppc.set.tfhar" => "__builtin_set_tfhar",
+ "llvm.ppc.set.tfiar" => "__builtin_set_tfiar",
+ "llvm.ppc.setb" => "__builtin_ppc_setb",
+ "llvm.ppc.setflm" => "__builtin_setflm",
+ "llvm.ppc.setrnd" => "__builtin_setrnd",
+ "llvm.ppc.sqrtf128.round.to.odd" => "__builtin_sqrtf128_round_to_odd",
+ "llvm.ppc.stbcx" => "__builtin_ppc_stbcx",
+ "llvm.ppc.stdcx" => "__builtin_ppc_stdcx",
+ "llvm.ppc.stfiw" => "__builtin_ppc_stfiw",
+ "llvm.ppc.store2r" => "__builtin_ppc_store2r",
+ "llvm.ppc.store4r" => "__builtin_ppc_store4r",
+ "llvm.ppc.store8r" => "__builtin_ppc_store8r",
+ "llvm.ppc.stwcx" => "__builtin_ppc_stwcx",
+ "llvm.ppc.subf128.round.to.odd" => "__builtin_subf128_round_to_odd",
+ "llvm.ppc.sync" => "__builtin_ppc_sync",
+ "llvm.ppc.tabort" => "__builtin_tabort",
+ "llvm.ppc.tabortdc" => "__builtin_tabortdc",
+ "llvm.ppc.tabortdci" => "__builtin_tabortdci",
+ "llvm.ppc.tabortwc" => "__builtin_tabortwc",
+ "llvm.ppc.tabortwci" => "__builtin_tabortwci",
+ "llvm.ppc.tbegin" => "__builtin_tbegin",
+ "llvm.ppc.tcheck" => "__builtin_tcheck",
+ "llvm.ppc.tdw" => "__builtin_ppc_tdw",
+ "llvm.ppc.tend" => "__builtin_tend",
+ "llvm.ppc.tendall" => "__builtin_tendall",
+ "llvm.ppc.trap" => "__builtin_ppc_trap",
+ "llvm.ppc.trapd" => "__builtin_ppc_trapd",
+ "llvm.ppc.trechkpt" => "__builtin_trechkpt",
+ "llvm.ppc.treclaim" => "__builtin_treclaim",
+ "llvm.ppc.tresume" => "__builtin_tresume",
+ "llvm.ppc.truncf128.round.to.odd" => "__builtin_truncf128_round_to_odd",
+ "llvm.ppc.tsr" => "__builtin_tsr",
+ "llvm.ppc.tsuspend" => "__builtin_tsuspend",
+ "llvm.ppc.ttest" => "__builtin_ttest",
+ "llvm.ppc.tw" => "__builtin_ppc_tw",
+ "llvm.ppc.unpack.longdouble" => "__builtin_unpack_longdouble",
+ "llvm.ppc.vsx.xsmaxdp" => "__builtin_vsx_xsmaxdp",
+ "llvm.ppc.vsx.xsmindp" => "__builtin_vsx_xsmindp",
+ "llvm.ppc.vsx.xvcmpeqdp" => "__builtin_vsx_xvcmpeqdp",
+ "llvm.ppc.vsx.xvcmpeqdp.p" => "__builtin_vsx_xvcmpeqdp_p",
+ "llvm.ppc.vsx.xvcmpeqsp" => "__builtin_vsx_xvcmpeqsp",
+ "llvm.ppc.vsx.xvcmpeqsp.p" => "__builtin_vsx_xvcmpeqsp_p",
+ "llvm.ppc.vsx.xvcmpgedp" => "__builtin_vsx_xvcmpgedp",
+ "llvm.ppc.vsx.xvcmpgedp.p" => "__builtin_vsx_xvcmpgedp_p",
+ "llvm.ppc.vsx.xvcmpgesp" => "__builtin_vsx_xvcmpgesp",
+ "llvm.ppc.vsx.xvcmpgesp.p" => "__builtin_vsx_xvcmpgesp_p",
+ "llvm.ppc.vsx.xvcmpgtdp" => "__builtin_vsx_xvcmpgtdp",
+ "llvm.ppc.vsx.xvcmpgtdp.p" => "__builtin_vsx_xvcmpgtdp_p",
+ "llvm.ppc.vsx.xvcmpgtsp" => "__builtin_vsx_xvcmpgtsp",
+ "llvm.ppc.vsx.xvcmpgtsp.p" => "__builtin_vsx_xvcmpgtsp_p",
+ "llvm.ppc.vsx.xvdivdp" => "__builtin_vsx_xvdivdp",
+ "llvm.ppc.vsx.xvdivsp" => "__builtin_vsx_xvdivsp",
+ "llvm.ppc.vsx.xvmaxdp" => "__builtin_vsx_xvmaxdp",
+ "llvm.ppc.vsx.xvmaxsp" => "__builtin_vsx_xvmaxsp",
+ "llvm.ppc.vsx.xvmindp" => "__builtin_vsx_xvmindp",
+ "llvm.ppc.vsx.xvminsp" => "__builtin_vsx_xvminsp",
+ "llvm.ppc.vsx.xvredp" => "__builtin_vsx_xvredp",
+ "llvm.ppc.vsx.xvresp" => "__builtin_vsx_xvresp",
+ "llvm.ppc.vsx.xvrsqrtedp" => "__builtin_vsx_xvrsqrtedp",
+ "llvm.ppc.vsx.xvrsqrtesp" => "__builtin_vsx_xvrsqrtesp",
+ "llvm.ppc.vsx.xxblendvb" => "__builtin_vsx_xxblendvb",
+ "llvm.ppc.vsx.xxblendvd" => "__builtin_vsx_xxblendvd",
+ "llvm.ppc.vsx.xxblendvh" => "__builtin_vsx_xxblendvh",
+ "llvm.ppc.vsx.xxblendvw" => "__builtin_vsx_xxblendvw",
+ "llvm.ppc.vsx.xxleqv" => "__builtin_vsx_xxleqv",
+ "llvm.ppc.vsx.xxpermx" => "__builtin_vsx_xxpermx",
+ // ptx
+ "llvm.ptx.bar.sync" => "__builtin_ptx_bar_sync",
+ "llvm.ptx.read.clock" => "__builtin_ptx_read_clock",
+ "llvm.ptx.read.clock64" => "__builtin_ptx_read_clock64",
+ "llvm.ptx.read.gridid" => "__builtin_ptx_read_gridid",
+ "llvm.ptx.read.laneid" => "__builtin_ptx_read_laneid",
+ "llvm.ptx.read.lanemask.eq" => "__builtin_ptx_read_lanemask_eq",
+ "llvm.ptx.read.lanemask.ge" => "__builtin_ptx_read_lanemask_ge",
+ "llvm.ptx.read.lanemask.gt" => "__builtin_ptx_read_lanemask_gt",
+ "llvm.ptx.read.lanemask.le" => "__builtin_ptx_read_lanemask_le",
+ "llvm.ptx.read.lanemask.lt" => "__builtin_ptx_read_lanemask_lt",
+ "llvm.ptx.read.nsmid" => "__builtin_ptx_read_nsmid",
+ "llvm.ptx.read.nwarpid" => "__builtin_ptx_read_nwarpid",
+ "llvm.ptx.read.pm0" => "__builtin_ptx_read_pm0",
+ "llvm.ptx.read.pm1" => "__builtin_ptx_read_pm1",
+ "llvm.ptx.read.pm2" => "__builtin_ptx_read_pm2",
+ "llvm.ptx.read.pm3" => "__builtin_ptx_read_pm3",
+ "llvm.ptx.read.smid" => "__builtin_ptx_read_smid",
+ "llvm.ptx.read.warpid" => "__builtin_ptx_read_warpid",
+ // s390
+ "llvm.s390.efpc" => "__builtin_s390_efpc",
+ "llvm.s390.etnd" => "__builtin_tx_nesting_depth",
+ "llvm.s390.lcbb" => "__builtin_s390_lcbb",
+ "llvm.s390.ppa.txassist" => "__builtin_tx_assist",
+ "llvm.s390.sfpc" => "__builtin_s390_sfpc",
+ "llvm.s390.tend" => "__builtin_tend",
+ "llvm.s390.vcfn" => "__builtin_s390_vcfn",
+ "llvm.s390.vclfnhs" => "__builtin_s390_vclfnhs",
+ "llvm.s390.vclfnls" => "__builtin_s390_vclfnls",
+ "llvm.s390.vcnf" => "__builtin_s390_vcnf",
+ "llvm.s390.vcrnfs" => "__builtin_s390_vcrnfs",
+ "llvm.s390.vlbb" => "__builtin_s390_vlbb",
+ "llvm.s390.vll" => "__builtin_s390_vll",
+ "llvm.s390.vlrl" => "__builtin_s390_vlrl",
+ "llvm.s390.vmslg" => "__builtin_s390_vmslg",
+ "llvm.s390.vpdi" => "__builtin_s390_vpdi",
+ "llvm.s390.vperm" => "__builtin_s390_vperm",
+ "llvm.s390.vsld" => "__builtin_s390_vsld",
+ "llvm.s390.vsldb" => "__builtin_s390_vsldb",
+ "llvm.s390.vsrd" => "__builtin_s390_vsrd",
+ "llvm.s390.vstl" => "__builtin_s390_vstl",
+ "llvm.s390.vstrl" => "__builtin_s390_vstrl",
+ // ve
+ "llvm.ve.vl.extract.vm512l" => "__builtin_ve_vl_extract_vm512l",
+ "llvm.ve.vl.extract.vm512u" => "__builtin_ve_vl_extract_vm512u",
+ "llvm.ve.vl.insert.vm512l" => "__builtin_ve_vl_insert_vm512l",
+ "llvm.ve.vl.insert.vm512u" => "__builtin_ve_vl_insert_vm512u",
+ "llvm.ve.vl.pack.f32a" => "__builtin_ve_vl_pack_f32a",
+ "llvm.ve.vl.pack.f32p" => "__builtin_ve_vl_pack_f32p",
+ // x86
+ "llvm.x86.3dnow.pavgusb" => "__builtin_ia32_pavgusb",
+ "llvm.x86.3dnow.pf2id" => "__builtin_ia32_pf2id",
+ "llvm.x86.3dnow.pfacc" => "__builtin_ia32_pfacc",
+ "llvm.x86.3dnow.pfadd" => "__builtin_ia32_pfadd",
+ "llvm.x86.3dnow.pfcmpeq" => "__builtin_ia32_pfcmpeq",
+ "llvm.x86.3dnow.pfcmpge" => "__builtin_ia32_pfcmpge",
+ "llvm.x86.3dnow.pfcmpgt" => "__builtin_ia32_pfcmpgt",
+ "llvm.x86.3dnow.pfmax" => "__builtin_ia32_pfmax",
+ "llvm.x86.3dnow.pfmin" => "__builtin_ia32_pfmin",
+ "llvm.x86.3dnow.pfmul" => "__builtin_ia32_pfmul",
+ "llvm.x86.3dnow.pfrcp" => "__builtin_ia32_pfrcp",
+ "llvm.x86.3dnow.pfrcpit1" => "__builtin_ia32_pfrcpit1",
+ "llvm.x86.3dnow.pfrcpit2" => "__builtin_ia32_pfrcpit2",
+ "llvm.x86.3dnow.pfrsqit1" => "__builtin_ia32_pfrsqit1",
+ "llvm.x86.3dnow.pfrsqrt" => "__builtin_ia32_pfrsqrt",
+ "llvm.x86.3dnow.pfsub" => "__builtin_ia32_pfsub",
+ "llvm.x86.3dnow.pfsubr" => "__builtin_ia32_pfsubr",
+ "llvm.x86.3dnow.pi2fd" => "__builtin_ia32_pi2fd",
+ "llvm.x86.3dnow.pmulhrw" => "__builtin_ia32_pmulhrw",
+ "llvm.x86.3dnowa.pf2iw" => "__builtin_ia32_pf2iw",
+ "llvm.x86.3dnowa.pfnacc" => "__builtin_ia32_pfnacc",
+ "llvm.x86.3dnowa.pfpnacc" => "__builtin_ia32_pfpnacc",
+ "llvm.x86.3dnowa.pi2fw" => "__builtin_ia32_pi2fw",
+ "llvm.x86.addcarry.u32" => "__builtin_ia32_addcarry_u32",
+ "llvm.x86.addcarry.u64" => "__builtin_ia32_addcarry_u64",
+ "llvm.x86.addcarryx.u32" => "__builtin_ia32_addcarryx_u32",
+ "llvm.x86.addcarryx.u64" => "__builtin_ia32_addcarryx_u64",
+ "llvm.x86.aesni.aesdec" => "__builtin_ia32_aesdec128",
+ "llvm.x86.aesni.aesdec.256" => "__builtin_ia32_aesdec256",
+ "llvm.x86.aesni.aesdec.512" => "__builtin_ia32_aesdec512",
+ "llvm.x86.aesni.aesdeclast" => "__builtin_ia32_aesdeclast128",
+ "llvm.x86.aesni.aesdeclast.256" => "__builtin_ia32_aesdeclast256",
+ "llvm.x86.aesni.aesdeclast.512" => "__builtin_ia32_aesdeclast512",
+ "llvm.x86.aesni.aesenc" => "__builtin_ia32_aesenc128",
+ "llvm.x86.aesni.aesenc.256" => "__builtin_ia32_aesenc256",
+ "llvm.x86.aesni.aesenc.512" => "__builtin_ia32_aesenc512",
+ "llvm.x86.aesni.aesenclast" => "__builtin_ia32_aesenclast128",
+ "llvm.x86.aesni.aesenclast.256" => "__builtin_ia32_aesenclast256",
+ "llvm.x86.aesni.aesenclast.512" => "__builtin_ia32_aesenclast512",
+ "llvm.x86.aesni.aesimc" => "__builtin_ia32_aesimc128",
+ "llvm.x86.aesni.aeskeygenassist" => "__builtin_ia32_aeskeygenassist128",
+ "llvm.x86.avx.addsub.pd.256" => "__builtin_ia32_addsubpd256",
+ "llvm.x86.avx.addsub.ps.256" => "__builtin_ia32_addsubps256",
+ "llvm.x86.avx.blend.pd.256" => "__builtin_ia32_blendpd256",
+ "llvm.x86.avx.blend.ps.256" => "__builtin_ia32_blendps256",
+ "llvm.x86.avx.blendv.pd.256" => "__builtin_ia32_blendvpd256",
+ "llvm.x86.avx.blendv.ps.256" => "__builtin_ia32_blendvps256",
+ "llvm.x86.avx.cmp.pd.256" => "__builtin_ia32_cmppd256",
+ "llvm.x86.avx.cmp.ps.256" => "__builtin_ia32_cmpps256",
+ "llvm.x86.avx.cvt.pd2.ps.256" => "__builtin_ia32_cvtpd2ps256",
+ "llvm.x86.avx.cvt.pd2dq.256" => "__builtin_ia32_cvtpd2dq256",
+ "llvm.x86.avx.cvt.ps2.pd.256" => "__builtin_ia32_cvtps2pd256",
+ "llvm.x86.avx.cvt.ps2dq.256" => "__builtin_ia32_cvtps2dq256",
+ "llvm.x86.avx.cvtdq2.pd.256" => "__builtin_ia32_cvtdq2pd256",
+ "llvm.x86.avx.cvtdq2.ps.256" => "__builtin_ia32_cvtdq2ps256",
+ "llvm.x86.avx.cvtt.pd2dq.256" => "__builtin_ia32_cvttpd2dq256",
+ "llvm.x86.avx.cvtt.ps2dq.256" => "__builtin_ia32_cvttps2dq256",
+ "llvm.x86.avx.dp.ps.256" => "__builtin_ia32_dpps256",
+ "llvm.x86.avx.hadd.pd.256" => "__builtin_ia32_haddpd256",
+ "llvm.x86.avx.hadd.ps.256" => "__builtin_ia32_haddps256",
+ "llvm.x86.avx.hsub.pd.256" => "__builtin_ia32_hsubpd256",
+ "llvm.x86.avx.hsub.ps.256" => "__builtin_ia32_hsubps256",
+ "llvm.x86.avx.ldu.dq.256" => "__builtin_ia32_lddqu256",
+ "llvm.x86.avx.maskload.pd" => "__builtin_ia32_maskloadpd",
+ "llvm.x86.avx.maskload.pd.256" => "__builtin_ia32_maskloadpd256",
+ "llvm.x86.avx.maskload.ps" => "__builtin_ia32_maskloadps",
+ "llvm.x86.avx.maskload.ps.256" => "__builtin_ia32_maskloadps256",
+ "llvm.x86.avx.maskstore.pd" => "__builtin_ia32_maskstorepd",
+ "llvm.x86.avx.maskstore.pd.256" => "__builtin_ia32_maskstorepd256",
+ "llvm.x86.avx.maskstore.ps" => "__builtin_ia32_maskstoreps",
+ "llvm.x86.avx.maskstore.ps.256" => "__builtin_ia32_maskstoreps256",
+ "llvm.x86.avx.max.pd.256" => "__builtin_ia32_maxpd256",
+ "llvm.x86.avx.max.ps.256" => "__builtin_ia32_maxps256",
+ "llvm.x86.avx.min.pd.256" => "__builtin_ia32_minpd256",
+ "llvm.x86.avx.min.ps.256" => "__builtin_ia32_minps256",
+ "llvm.x86.avx.movmsk.pd.256" => "__builtin_ia32_movmskpd256",
+ "llvm.x86.avx.movmsk.ps.256" => "__builtin_ia32_movmskps256",
+ "llvm.x86.avx.ptestc.256" => "__builtin_ia32_ptestc256",
+ "llvm.x86.avx.ptestnzc.256" => "__builtin_ia32_ptestnzc256",
+ "llvm.x86.avx.ptestz.256" => "__builtin_ia32_ptestz256",
+ "llvm.x86.avx.rcp.ps.256" => "__builtin_ia32_rcpps256",
+ "llvm.x86.avx.round.pd.256" => "__builtin_ia32_roundpd256",
+ "llvm.x86.avx.round.ps.256" => "__builtin_ia32_roundps256",
+ "llvm.x86.avx.rsqrt.ps.256" => "__builtin_ia32_rsqrtps256",
+ "llvm.x86.avx.sqrt.pd.256" => "__builtin_ia32_sqrtpd256",
+ "llvm.x86.avx.sqrt.ps.256" => "__builtin_ia32_sqrtps256",
+ "llvm.x86.avx.storeu.dq.256" => "__builtin_ia32_storedqu256",
+ "llvm.x86.avx.storeu.pd.256" => "__builtin_ia32_storeupd256",
+ "llvm.x86.avx.storeu.ps.256" => "__builtin_ia32_storeups256",
+ "llvm.x86.avx.vbroadcastf128.pd.256" => "__builtin_ia32_vbroadcastf128_pd256",
+ "llvm.x86.avx.vbroadcastf128.ps.256" => "__builtin_ia32_vbroadcastf128_ps256",
+ "llvm.x86.avx.vextractf128.pd.256" => "__builtin_ia32_vextractf128_pd256",
+ "llvm.x86.avx.vextractf128.ps.256" => "__builtin_ia32_vextractf128_ps256",
+ "llvm.x86.avx.vextractf128.si.256" => "__builtin_ia32_vextractf128_si256",
+ "llvm.x86.avx.vinsertf128.pd.256" => "__builtin_ia32_vinsertf128_pd256",
+ "llvm.x86.avx.vinsertf128.ps.256" => "__builtin_ia32_vinsertf128_ps256",
+ "llvm.x86.avx.vinsertf128.si.256" => "__builtin_ia32_vinsertf128_si256",
+ "llvm.x86.avx.vperm2f128.pd.256" => "__builtin_ia32_vperm2f128_pd256",
+ "llvm.x86.avx.vperm2f128.ps.256" => "__builtin_ia32_vperm2f128_ps256",
+ "llvm.x86.avx.vperm2f128.si.256" => "__builtin_ia32_vperm2f128_si256",
+ "llvm.x86.avx.vpermilvar.pd" => "__builtin_ia32_vpermilvarpd",
+ "llvm.x86.avx.vpermilvar.pd.256" => "__builtin_ia32_vpermilvarpd256",
+ "llvm.x86.avx.vpermilvar.ps" => "__builtin_ia32_vpermilvarps",
+ "llvm.x86.avx.vpermilvar.ps.256" => "__builtin_ia32_vpermilvarps256",
+ "llvm.x86.avx.vtestc.pd" => "__builtin_ia32_vtestcpd",
+ "llvm.x86.avx.vtestc.pd.256" => "__builtin_ia32_vtestcpd256",
+ "llvm.x86.avx.vtestc.ps" => "__builtin_ia32_vtestcps",
+ "llvm.x86.avx.vtestc.ps.256" => "__builtin_ia32_vtestcps256",
+ "llvm.x86.avx.vtestnzc.pd" => "__builtin_ia32_vtestnzcpd",
+ "llvm.x86.avx.vtestnzc.pd.256" => "__builtin_ia32_vtestnzcpd256",
+ "llvm.x86.avx.vtestnzc.ps" => "__builtin_ia32_vtestnzcps",
+ "llvm.x86.avx.vtestnzc.ps.256" => "__builtin_ia32_vtestnzcps256",
+ "llvm.x86.avx.vtestz.pd" => "__builtin_ia32_vtestzpd",
+ "llvm.x86.avx.vtestz.pd.256" => "__builtin_ia32_vtestzpd256",
+ "llvm.x86.avx.vtestz.ps" => "__builtin_ia32_vtestzps",
+ "llvm.x86.avx.vtestz.ps.256" => "__builtin_ia32_vtestzps256",
+ "llvm.x86.avx.vzeroall" => "__builtin_ia32_vzeroall",
+ "llvm.x86.avx.vzeroupper" => "__builtin_ia32_vzeroupper",
+ "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gatherd_d",
+ "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gatherd_d256",
+ "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gatherd_pd",
+ "llvm.x86.avx2.gather.d.pd.256" => "__builtin_ia32_gatherd_pd256",
+ "llvm.x86.avx2.gather.d.ps" => "__builtin_ia32_gatherd_ps",
+ "llvm.x86.avx2.gather.d.ps.256" => "__builtin_ia32_gatherd_ps256",
+ "llvm.x86.avx2.gather.d.q" => "__builtin_ia32_gatherd_q",
+ "llvm.x86.avx2.gather.d.q.256" => "__builtin_ia32_gatherd_q256",
+ "llvm.x86.avx2.gather.q.d" => "__builtin_ia32_gatherq_d",
+ "llvm.x86.avx2.gather.q.d.256" => "__builtin_ia32_gatherq_d256",
+ "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherq_pd",
+ "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherq_pd256",
+ "llvm.x86.avx2.gather.q.ps" => "__builtin_ia32_gatherq_ps",
+ "llvm.x86.avx2.gather.q.ps.256" => "__builtin_ia32_gatherq_ps256",
+ "llvm.x86.avx2.gather.q.q" => "__builtin_ia32_gatherq_q",
+ "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherq_q256",
+ "llvm.x86.avx2.maskload.d" => "__builtin_ia32_maskloadd",
+ "llvm.x86.avx2.maskload.d.256" => "__builtin_ia32_maskloadd256",
+ "llvm.x86.avx2.maskload.q" => "__builtin_ia32_maskloadq",
+ "llvm.x86.avx2.maskload.q.256" => "__builtin_ia32_maskloadq256",
+ "llvm.x86.avx2.maskstore.d" => "__builtin_ia32_maskstored",
+ "llvm.x86.avx2.maskstore.d.256" => "__builtin_ia32_maskstored256",
+ "llvm.x86.avx2.maskstore.q" => "__builtin_ia32_maskstoreq",
+ "llvm.x86.avx2.maskstore.q.256" => "__builtin_ia32_maskstoreq256",
+ "llvm.x86.avx2.movntdqa" => "__builtin_ia32_movntdqa256",
+ "llvm.x86.avx2.mpsadbw" => "__builtin_ia32_mpsadbw256",
+ "llvm.x86.avx2.pabs.b" => "__builtin_ia32_pabsb256",
+ "llvm.x86.avx2.pabs.d" => "__builtin_ia32_pabsd256",
+ "llvm.x86.avx2.pabs.w" => "__builtin_ia32_pabsw256",
+ "llvm.x86.avx2.packssdw" => "__builtin_ia32_packssdw256",
+ "llvm.x86.avx2.packsswb" => "__builtin_ia32_packsswb256",
+ "llvm.x86.avx2.packusdw" => "__builtin_ia32_packusdw256",
+ "llvm.x86.avx2.packuswb" => "__builtin_ia32_packuswb256",
+ "llvm.x86.avx2.padds.b" => "__builtin_ia32_paddsb256",
+ "llvm.x86.avx2.padds.w" => "__builtin_ia32_paddsw256",
+ "llvm.x86.avx2.paddus.b" => "__builtin_ia32_paddusb256",
+ "llvm.x86.avx2.paddus.w" => "__builtin_ia32_paddusw256",
+ "llvm.x86.avx2.pavg.b" => "__builtin_ia32_pavgb256",
+ "llvm.x86.avx2.pavg.w" => "__builtin_ia32_pavgw256",
+ "llvm.x86.avx2.pblendd.128" => "__builtin_ia32_pblendd128",
+ "llvm.x86.avx2.pblendd.256" => "__builtin_ia32_pblendd256",
+ "llvm.x86.avx2.pblendvb" => "__builtin_ia32_pblendvb256",
+ "llvm.x86.avx2.pblendw" => "__builtin_ia32_pblendw256",
+ "llvm.x86.avx2.pbroadcastb.128" => "__builtin_ia32_pbroadcastb128",
+ "llvm.x86.avx2.pbroadcastb.256" => "__builtin_ia32_pbroadcastb256",
+ "llvm.x86.avx2.pbroadcastd.128" => "__builtin_ia32_pbroadcastd128",
+ "llvm.x86.avx2.pbroadcastd.256" => "__builtin_ia32_pbroadcastd256",
+ "llvm.x86.avx2.pbroadcastq.128" => "__builtin_ia32_pbroadcastq128",
+ "llvm.x86.avx2.pbroadcastq.256" => "__builtin_ia32_pbroadcastq256",
+ "llvm.x86.avx2.pbroadcastw.128" => "__builtin_ia32_pbroadcastw128",
+ "llvm.x86.avx2.pbroadcastw.256" => "__builtin_ia32_pbroadcastw256",
+ "llvm.x86.avx2.permd" => "__builtin_ia32_permvarsi256",
+ "llvm.x86.avx2.permps" => "__builtin_ia32_permvarsf256",
+ "llvm.x86.avx2.phadd.d" => "__builtin_ia32_phaddd256",
+ "llvm.x86.avx2.phadd.sw" => "__builtin_ia32_phaddsw256",
+ "llvm.x86.avx2.phadd.w" => "__builtin_ia32_phaddw256",
+ "llvm.x86.avx2.phsub.d" => "__builtin_ia32_phsubd256",
+ "llvm.x86.avx2.phsub.sw" => "__builtin_ia32_phsubsw256",
+ "llvm.x86.avx2.phsub.w" => "__builtin_ia32_phsubw256",
+ "llvm.x86.avx2.pmadd.ub.sw" => "__builtin_ia32_pmaddubsw256",
+ "llvm.x86.avx2.pmadd.wd" => "__builtin_ia32_pmaddwd256",
+ "llvm.x86.avx2.pmaxs.b" => "__builtin_ia32_pmaxsb256",
+ "llvm.x86.avx2.pmaxs.d" => "__builtin_ia32_pmaxsd256",
+ "llvm.x86.avx2.pmaxs.w" => "__builtin_ia32_pmaxsw256",
+ "llvm.x86.avx2.pmaxu.b" => "__builtin_ia32_pmaxub256",
+ "llvm.x86.avx2.pmaxu.d" => "__builtin_ia32_pmaxud256",
+ "llvm.x86.avx2.pmaxu.w" => "__builtin_ia32_pmaxuw256",
+ "llvm.x86.avx2.pmins.b" => "__builtin_ia32_pminsb256",
+ "llvm.x86.avx2.pmins.d" => "__builtin_ia32_pminsd256",
+ "llvm.x86.avx2.pmins.w" => "__builtin_ia32_pminsw256",
+ "llvm.x86.avx2.pminu.b" => "__builtin_ia32_pminub256",
+ "llvm.x86.avx2.pminu.d" => "__builtin_ia32_pminud256",
+ "llvm.x86.avx2.pminu.w" => "__builtin_ia32_pminuw256",
+ "llvm.x86.avx2.pmovmskb" => "__builtin_ia32_pmovmskb256",
+ "llvm.x86.avx2.pmovsxbd" => "__builtin_ia32_pmovsxbd256",
+ "llvm.x86.avx2.pmovsxbq" => "__builtin_ia32_pmovsxbq256",
+ "llvm.x86.avx2.pmovsxbw" => "__builtin_ia32_pmovsxbw256",
+ "llvm.x86.avx2.pmovsxdq" => "__builtin_ia32_pmovsxdq256",
+ "llvm.x86.avx2.pmovsxwd" => "__builtin_ia32_pmovsxwd256",
+ "llvm.x86.avx2.pmovsxwq" => "__builtin_ia32_pmovsxwq256",
+ "llvm.x86.avx2.pmovzxbd" => "__builtin_ia32_pmovzxbd256",
+ "llvm.x86.avx2.pmovzxbq" => "__builtin_ia32_pmovzxbq256",
+ "llvm.x86.avx2.pmovzxbw" => "__builtin_ia32_pmovzxbw256",
+ "llvm.x86.avx2.pmovzxdq" => "__builtin_ia32_pmovzxdq256",
+ "llvm.x86.avx2.pmovzxwd" => "__builtin_ia32_pmovzxwd256",
+ "llvm.x86.avx2.pmovzxwq" => "__builtin_ia32_pmovzxwq256",
+ "llvm.x86.avx2.pmul.dq" => "__builtin_ia32_pmuldq256",
+ "llvm.x86.avx2.pmul.hr.sw" => "__builtin_ia32_pmulhrsw256",
+ "llvm.x86.avx2.pmulh.w" => "__builtin_ia32_pmulhw256",
+ "llvm.x86.avx2.pmulhu.w" => "__builtin_ia32_pmulhuw256",
+ "llvm.x86.avx2.pmulu.dq" => "__builtin_ia32_pmuludq256",
+ "llvm.x86.avx2.psad.bw" => "__builtin_ia32_psadbw256",
+ "llvm.x86.avx2.pshuf.b" => "__builtin_ia32_pshufb256",
+ "llvm.x86.avx2.psign.b" => "__builtin_ia32_psignb256",
+ "llvm.x86.avx2.psign.d" => "__builtin_ia32_psignd256",
+ "llvm.x86.avx2.psign.w" => "__builtin_ia32_psignw256",
+ "llvm.x86.avx2.psll.d" => "__builtin_ia32_pslld256",
+ "llvm.x86.avx2.psll.dq" => "__builtin_ia32_pslldqi256",
+ "llvm.x86.avx2.psll.dq.bs" => "__builtin_ia32_pslldqi256_byteshift",
+ "llvm.x86.avx2.psll.q" => "__builtin_ia32_psllq256",
+ "llvm.x86.avx2.psll.w" => "__builtin_ia32_psllw256",
+ "llvm.x86.avx2.pslli.d" => "__builtin_ia32_pslldi256",
+ "llvm.x86.avx2.pslli.q" => "__builtin_ia32_psllqi256",
+ "llvm.x86.avx2.pslli.w" => "__builtin_ia32_psllwi256",
+ "llvm.x86.avx2.psllv.d" => "__builtin_ia32_psllv4si",
+ "llvm.x86.avx2.psllv.d.256" => "__builtin_ia32_psllv8si",
+ "llvm.x86.avx2.psllv.q" => "__builtin_ia32_psllv2di",
+ "llvm.x86.avx2.psllv.q.256" => "__builtin_ia32_psllv4di",
+ "llvm.x86.avx2.psra.d" => "__builtin_ia32_psrad256",
+ "llvm.x86.avx2.psra.w" => "__builtin_ia32_psraw256",
+ "llvm.x86.avx2.psrai.d" => "__builtin_ia32_psradi256",
+ "llvm.x86.avx2.psrai.w" => "__builtin_ia32_psrawi256",
+ "llvm.x86.avx2.psrav.d" => "__builtin_ia32_psrav4si",
+ "llvm.x86.avx2.psrav.d.256" => "__builtin_ia32_psrav8si",
+ "llvm.x86.avx2.psrl.d" => "__builtin_ia32_psrld256",
+ "llvm.x86.avx2.psrl.dq" => "__builtin_ia32_psrldqi256",
+ "llvm.x86.avx2.psrl.dq.bs" => "__builtin_ia32_psrldqi256_byteshift",
+ "llvm.x86.avx2.psrl.q" => "__builtin_ia32_psrlq256",
+ "llvm.x86.avx2.psrl.w" => "__builtin_ia32_psrlw256",
+ "llvm.x86.avx2.psrli.d" => "__builtin_ia32_psrldi256",
+ "llvm.x86.avx2.psrli.q" => "__builtin_ia32_psrlqi256",
+ "llvm.x86.avx2.psrli.w" => "__builtin_ia32_psrlwi256",
+ "llvm.x86.avx2.psrlv.d" => "__builtin_ia32_psrlv4si",
+ "llvm.x86.avx2.psrlv.d.256" => "__builtin_ia32_psrlv8si",
+ "llvm.x86.avx2.psrlv.q" => "__builtin_ia32_psrlv2di",
+ "llvm.x86.avx2.psrlv.q.256" => "__builtin_ia32_psrlv4di",
+ "llvm.x86.avx2.psubs.b" => "__builtin_ia32_psubsb256",
+ "llvm.x86.avx2.psubs.w" => "__builtin_ia32_psubsw256",
+ "llvm.x86.avx2.psubus.b" => "__builtin_ia32_psubusb256",
+ "llvm.x86.avx2.psubus.w" => "__builtin_ia32_psubusw256",
+ "llvm.x86.avx2.vbroadcast.sd.pd.256" => "__builtin_ia32_vbroadcastsd_pd256",
+ "llvm.x86.avx2.vbroadcast.ss.ps" => "__builtin_ia32_vbroadcastss_ps",
+ "llvm.x86.avx2.vbroadcast.ss.ps.256" => "__builtin_ia32_vbroadcastss_ps256",
+ "llvm.x86.avx2.vextracti128" => "__builtin_ia32_extract128i256",
+ "llvm.x86.avx2.vinserti128" => "__builtin_ia32_insert128i256",
+ "llvm.x86.avx2.vperm2i128" => "__builtin_ia32_permti256",
+ "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512",
+ "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512",
+ "llvm.x86.avx512.broadcastmb.128" => "__builtin_ia32_broadcastmb128",
+ "llvm.x86.avx512.broadcastmb.256" => "__builtin_ia32_broadcastmb256",
+ "llvm.x86.avx512.broadcastmb.512" => "__builtin_ia32_broadcastmb512",
+ "llvm.x86.avx512.broadcastmw.128" => "__builtin_ia32_broadcastmw128",
+ "llvm.x86.avx512.broadcastmw.256" => "__builtin_ia32_broadcastmw256",
+ "llvm.x86.avx512.broadcastmw.512" => "__builtin_ia32_broadcastmw512",
+ "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128",
+ "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256",
+ "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512",
+ "llvm.x86.avx512.conflict.q.128" => "__builtin_ia32_vpconflictdi_128",
+ "llvm.x86.avx512.conflict.q.256" => "__builtin_ia32_vpconflictdi_256",
+ "llvm.x86.avx512.conflict.q.512" => "__builtin_ia32_vpconflictdi_512",
+ "llvm.x86.avx512.cvtb2mask.128" => "__builtin_ia32_cvtb2mask128",
+ "llvm.x86.avx512.cvtb2mask.256" => "__builtin_ia32_cvtb2mask256",
+ "llvm.x86.avx512.cvtb2mask.512" => "__builtin_ia32_cvtb2mask512",
+ "llvm.x86.avx512.cvtd2mask.128" => "__builtin_ia32_cvtd2mask128",
+ "llvm.x86.avx512.cvtd2mask.256" => "__builtin_ia32_cvtd2mask256",
+ "llvm.x86.avx512.cvtd2mask.512" => "__builtin_ia32_cvtd2mask512",
+ "llvm.x86.avx512.cvtmask2b.128" => "__builtin_ia32_cvtmask2b128",
+ "llvm.x86.avx512.cvtmask2b.256" => "__builtin_ia32_cvtmask2b256",
+ "llvm.x86.avx512.cvtmask2b.512" => "__builtin_ia32_cvtmask2b512",
+ "llvm.x86.avx512.cvtmask2d.128" => "__builtin_ia32_cvtmask2d128",
+ "llvm.x86.avx512.cvtmask2d.256" => "__builtin_ia32_cvtmask2d256",
+ "llvm.x86.avx512.cvtmask2d.512" => "__builtin_ia32_cvtmask2d512",
+ "llvm.x86.avx512.cvtmask2q.128" => "__builtin_ia32_cvtmask2q128",
+ "llvm.x86.avx512.cvtmask2q.256" => "__builtin_ia32_cvtmask2q256",
+ "llvm.x86.avx512.cvtmask2q.512" => "__builtin_ia32_cvtmask2q512",
+ "llvm.x86.avx512.cvtmask2w.128" => "__builtin_ia32_cvtmask2w128",
+ "llvm.x86.avx512.cvtmask2w.256" => "__builtin_ia32_cvtmask2w256",
+ "llvm.x86.avx512.cvtmask2w.512" => "__builtin_ia32_cvtmask2w512",
+ "llvm.x86.avx512.cvtq2mask.128" => "__builtin_ia32_cvtq2mask128",
+ "llvm.x86.avx512.cvtq2mask.256" => "__builtin_ia32_cvtq2mask256",
+ "llvm.x86.avx512.cvtq2mask.512" => "__builtin_ia32_cvtq2mask512",
+ "llvm.x86.avx512.cvtsd2usi" => "__builtin_ia32_cvtsd2usi",
+ "llvm.x86.avx512.cvtsd2usi64" => "__builtin_ia32_cvtsd2usi64",
+ "llvm.x86.avx512.cvtsi2sd32" => "__builtin_ia32_cvtsi2sd32",
+ "llvm.x86.avx512.cvtsi2sd64" => "__builtin_ia32_cvtsi2sd64",
+ "llvm.x86.avx512.cvtsi2ss32" => "__builtin_ia32_cvtsi2ss32",
+ "llvm.x86.avx512.cvtsi2ss64" => "__builtin_ia32_cvtsi2ss64",
+ "llvm.x86.avx512.cvtss2usi" => "__builtin_ia32_cvtss2usi",
+ "llvm.x86.avx512.cvtss2usi64" => "__builtin_ia32_cvtss2usi64",
+ "llvm.x86.avx512.cvttsd2si" => "__builtin_ia32_vcvttsd2si32",
+ "llvm.x86.avx512.cvttsd2si64" => "__builtin_ia32_vcvttsd2si64",
+ "llvm.x86.avx512.cvttsd2usi" => "__builtin_ia32_vcvttsd2usi32",
+ // [DUPLICATE]: "llvm.x86.avx512.cvttsd2usi" => "__builtin_ia32_cvttsd2usi",
+ "llvm.x86.avx512.cvttsd2usi64" => "__builtin_ia32_vcvttsd2usi64",
+ // [DUPLICATE]: "llvm.x86.avx512.cvttsd2usi64" => "__builtin_ia32_cvttsd2usi64",
+ "llvm.x86.avx512.cvttss2si" => "__builtin_ia32_vcvttss2si32",
+ "llvm.x86.avx512.cvttss2si64" => "__builtin_ia32_vcvttss2si64",
+ "llvm.x86.avx512.cvttss2usi" => "__builtin_ia32_vcvttss2usi32",
+ // [DUPLICATE]: "llvm.x86.avx512.cvttss2usi" => "__builtin_ia32_cvttss2usi",
+ "llvm.x86.avx512.cvttss2usi64" => "__builtin_ia32_vcvttss2usi64",
+ // [DUPLICATE]: "llvm.x86.avx512.cvttss2usi64" => "__builtin_ia32_cvttss2usi64",
+ "llvm.x86.avx512.cvtusi2sd" => "__builtin_ia32_cvtusi2sd",
+ // [DUPLICATE]: "llvm.x86.avx512.cvtusi2sd" => "__builtin_ia32_cvtusi2sd32",
+ "llvm.x86.avx512.cvtusi2ss" => "__builtin_ia32_cvtusi2ss32",
+ // [DUPLICATE]: "llvm.x86.avx512.cvtusi2ss" => "__builtin_ia32_cvtusi2ss",
+ "llvm.x86.avx512.cvtusi642sd" => "__builtin_ia32_cvtusi2sd64",
+ // [DUPLICATE]: "llvm.x86.avx512.cvtusi642sd" => "__builtin_ia32_cvtusi642sd",
+ "llvm.x86.avx512.cvtusi642ss" => "__builtin_ia32_cvtusi2ss64",
+ // [DUPLICATE]: "llvm.x86.avx512.cvtusi642ss" => "__builtin_ia32_cvtusi642ss",
+ "llvm.x86.avx512.cvtw2mask.128" => "__builtin_ia32_cvtw2mask128",
+ "llvm.x86.avx512.cvtw2mask.256" => "__builtin_ia32_cvtw2mask256",
+ "llvm.x86.avx512.cvtw2mask.512" => "__builtin_ia32_cvtw2mask512",
+ "llvm.x86.avx512.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128",
+ "llvm.x86.avx512.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256",
+ "llvm.x86.avx512.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512",
+ "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512",
+ "llvm.x86.avx512.div.ps.512" => "__builtin_ia32_divps512",
+ "llvm.x86.avx512.exp2.pd" => "__builtin_ia32_exp2pd_mask",
+ "llvm.x86.avx512.exp2.ps" => "__builtin_ia32_exp2ps_mask",
+ "llvm.x86.avx512.gather.dpd.512" => "__builtin_ia32_gathersiv8df",
+ "llvm.x86.avx512.gather.dpi.512" => "__builtin_ia32_gathersiv16si",
+ "llvm.x86.avx512.gather.dpq.512" => "__builtin_ia32_gathersiv8di",
+ "llvm.x86.avx512.gather.dps.512" => "__builtin_ia32_gathersiv16sf",
+ "llvm.x86.avx512.gather.qpd.512" => "__builtin_ia32_gatherdiv8df",
+ "llvm.x86.avx512.gather.qpi.512" => "__builtin_ia32_gatherdiv16si",
+ "llvm.x86.avx512.gather.qpq.512" => "__builtin_ia32_gatherdiv8di",
+ "llvm.x86.avx512.gather.qps.512" => "__builtin_ia32_gatherdiv16sf",
+ "llvm.x86.avx512.gather3div2.df" => "__builtin_ia32_gather3div2df",
+ "llvm.x86.avx512.gather3div2.di" => "__builtin_ia32_gather3div2di",
+ "llvm.x86.avx512.gather3div4.df" => "__builtin_ia32_gather3div4df",
+ "llvm.x86.avx512.gather3div4.di" => "__builtin_ia32_gather3div4di",
+ "llvm.x86.avx512.gather3div4.sf" => "__builtin_ia32_gather3div4sf",
+ "llvm.x86.avx512.gather3div4.si" => "__builtin_ia32_gather3div4si",
+ "llvm.x86.avx512.gather3div8.sf" => "__builtin_ia32_gather3div8sf",
+ "llvm.x86.avx512.gather3div8.si" => "__builtin_ia32_gather3div8si",
+ "llvm.x86.avx512.gather3siv2.df" => "__builtin_ia32_gather3siv2df",
+ "llvm.x86.avx512.gather3siv2.di" => "__builtin_ia32_gather3siv2di",
+ "llvm.x86.avx512.gather3siv4.df" => "__builtin_ia32_gather3siv4df",
+ "llvm.x86.avx512.gather3siv4.di" => "__builtin_ia32_gather3siv4di",
+ "llvm.x86.avx512.gather3siv4.sf" => "__builtin_ia32_gather3siv4sf",
+ "llvm.x86.avx512.gather3siv4.si" => "__builtin_ia32_gather3siv4si",
+ "llvm.x86.avx512.gather3siv8.sf" => "__builtin_ia32_gather3siv8sf",
+ "llvm.x86.avx512.gather3siv8.si" => "__builtin_ia32_gather3siv8si",
+ "llvm.x86.avx512.gatherpf.dpd.512" => "__builtin_ia32_gatherpfdpd",
+ "llvm.x86.avx512.gatherpf.dps.512" => "__builtin_ia32_gatherpfdps",
+ "llvm.x86.avx512.gatherpf.qpd.512" => "__builtin_ia32_gatherpfqpd",
+ "llvm.x86.avx512.gatherpf.qps.512" => "__builtin_ia32_gatherpfqps",
+ "llvm.x86.avx512.kand.w" => "__builtin_ia32_kandhi",
+ "llvm.x86.avx512.kandn.w" => "__builtin_ia32_kandnhi",
+ "llvm.x86.avx512.knot.w" => "__builtin_ia32_knothi",
+ "llvm.x86.avx512.kor.w" => "__builtin_ia32_korhi",
+ "llvm.x86.avx512.kortestc.w" => "__builtin_ia32_kortestchi",
+ "llvm.x86.avx512.kortestz.w" => "__builtin_ia32_kortestzhi",
+ "llvm.x86.avx512.kunpck.bw" => "__builtin_ia32_kunpckhi",
+ "llvm.x86.avx512.kunpck.dq" => "__builtin_ia32_kunpckdi",
+ "llvm.x86.avx512.kunpck.wd" => "__builtin_ia32_kunpcksi",
+ "llvm.x86.avx512.kxnor.w" => "__builtin_ia32_kxnorhi",
+ "llvm.x86.avx512.kxor.w" => "__builtin_ia32_kxorhi",
+ "llvm.x86.avx512.mask.add.pd.128" => "__builtin_ia32_addpd128_mask",
+ "llvm.x86.avx512.mask.add.pd.256" => "__builtin_ia32_addpd256_mask",
+ "llvm.x86.avx512.mask.add.pd.512" => "__builtin_ia32_addpd512_mask",
+ "llvm.x86.avx512.mask.add.ps.128" => "__builtin_ia32_addps128_mask",
+ "llvm.x86.avx512.mask.add.ps.256" => "__builtin_ia32_addps256_mask",
+ "llvm.x86.avx512.mask.add.ps.512" => "__builtin_ia32_addps512_mask",
+ "llvm.x86.avx512.mask.add.sd.round" => "__builtin_ia32_addsd_round_mask",
+ "llvm.x86.avx512.mask.add.ss.round" => "__builtin_ia32_addss_round_mask",
+ "llvm.x86.avx512.mask.and.pd.128" => "__builtin_ia32_andpd128_mask",
+ "llvm.x86.avx512.mask.and.pd.256" => "__builtin_ia32_andpd256_mask",
+ "llvm.x86.avx512.mask.and.pd.512" => "__builtin_ia32_andpd512_mask",
+ "llvm.x86.avx512.mask.and.ps.128" => "__builtin_ia32_andps128_mask",
+ "llvm.x86.avx512.mask.and.ps.256" => "__builtin_ia32_andps256_mask",
+ "llvm.x86.avx512.mask.and.ps.512" => "__builtin_ia32_andps512_mask",
+ "llvm.x86.avx512.mask.andn.pd.128" => "__builtin_ia32_andnpd128_mask",
+ "llvm.x86.avx512.mask.andn.pd.256" => "__builtin_ia32_andnpd256_mask",
+ "llvm.x86.avx512.mask.andn.pd.512" => "__builtin_ia32_andnpd512_mask",
+ "llvm.x86.avx512.mask.andn.ps.128" => "__builtin_ia32_andnps128_mask",
+ "llvm.x86.avx512.mask.andn.ps.256" => "__builtin_ia32_andnps256_mask",
+ "llvm.x86.avx512.mask.andn.ps.512" => "__builtin_ia32_andnps512_mask",
+ "llvm.x86.avx512.mask.blend.d.512" => "__builtin_ia32_blendmd_512_mask",
+ "llvm.x86.avx512.mask.blend.pd.512" => "__builtin_ia32_blendmpd_512_mask",
+ "llvm.x86.avx512.mask.blend.ps.512" => "__builtin_ia32_blendmps_512_mask",
+ "llvm.x86.avx512.mask.blend.q.512" => "__builtin_ia32_blendmq_512_mask",
+ "llvm.x86.avx512.mask.broadcastf32x2.256" => "__builtin_ia32_broadcastf32x2_256_mask",
+ "llvm.x86.avx512.mask.broadcastf32x2.512" => "__builtin_ia32_broadcastf32x2_512_mask",
+ "llvm.x86.avx512.mask.broadcastf32x4.256" => "__builtin_ia32_broadcastf32x4_256_mask",
+ "llvm.x86.avx512.mask.broadcastf32x4.512" => "__builtin_ia32_broadcastf32x4_512",
+ "llvm.x86.avx512.mask.broadcastf32x8.512" => "__builtin_ia32_broadcastf32x8_512_mask",
+ "llvm.x86.avx512.mask.broadcastf64x2.256" => "__builtin_ia32_broadcastf64x2_256_mask",
+ "llvm.x86.avx512.mask.broadcastf64x2.512" => "__builtin_ia32_broadcastf64x2_512_mask",
+ "llvm.x86.avx512.mask.broadcastf64x4.512" => "__builtin_ia32_broadcastf64x4_512",
+ "llvm.x86.avx512.mask.broadcasti32x2.128" => "__builtin_ia32_broadcasti32x2_128_mask",
+ "llvm.x86.avx512.mask.broadcasti32x2.256" => "__builtin_ia32_broadcasti32x2_256_mask",
+ "llvm.x86.avx512.mask.broadcasti32x2.512" => "__builtin_ia32_broadcasti32x2_512_mask",
+ "llvm.x86.avx512.mask.broadcasti32x4.256" => "__builtin_ia32_broadcasti32x4_256_mask",
+ "llvm.x86.avx512.mask.broadcasti32x4.512" => "__builtin_ia32_broadcasti32x4_512",
+ "llvm.x86.avx512.mask.broadcasti32x8.512" => "__builtin_ia32_broadcasti32x8_512_mask",
+ "llvm.x86.avx512.mask.broadcasti64x2.256" => "__builtin_ia32_broadcasti64x2_256_mask",
+ "llvm.x86.avx512.mask.broadcasti64x2.512" => "__builtin_ia32_broadcasti64x2_512_mask",
+ "llvm.x86.avx512.mask.broadcasti64x4.512" => "__builtin_ia32_broadcasti64x4_512",
+ "llvm.x86.avx512.mask.cmp.pd.128" => "__builtin_ia32_cmppd128_mask",
+ "llvm.x86.avx512.mask.cmp.pd.256" => "__builtin_ia32_cmppd256_mask",
+ "llvm.x86.avx512.mask.cmp.pd.512" => "__builtin_ia32_cmppd512_mask",
+ "llvm.x86.avx512.mask.cmp.ps.128" => "__builtin_ia32_cmpps128_mask",
+ "llvm.x86.avx512.mask.cmp.ps.256" => "__builtin_ia32_cmpps256_mask",
+ "llvm.x86.avx512.mask.cmp.ps.512" => "__builtin_ia32_cmpps512_mask",
+ "llvm.x86.avx512.mask.cmp.sd" => "__builtin_ia32_cmpsd_mask",
+ "llvm.x86.avx512.mask.cmp.ss" => "__builtin_ia32_cmpss_mask",
+ "llvm.x86.avx512.mask.compress.d.128" => "__builtin_ia32_compresssi128_mask",
+ "llvm.x86.avx512.mask.compress.d.256" => "__builtin_ia32_compresssi256_mask",
+ "llvm.x86.avx512.mask.compress.d.512" => "__builtin_ia32_compresssi512_mask",
+ "llvm.x86.avx512.mask.compress.pd.128" => "__builtin_ia32_compressdf128_mask",
+ "llvm.x86.avx512.mask.compress.pd.256" => "__builtin_ia32_compressdf256_mask",
+ "llvm.x86.avx512.mask.compress.pd.512" => "__builtin_ia32_compressdf512_mask",
+ "llvm.x86.avx512.mask.compress.ps.128" => "__builtin_ia32_compresssf128_mask",
+ "llvm.x86.avx512.mask.compress.ps.256" => "__builtin_ia32_compresssf256_mask",
+ "llvm.x86.avx512.mask.compress.ps.512" => "__builtin_ia32_compresssf512_mask",
+ "llvm.x86.avx512.mask.compress.q.128" => "__builtin_ia32_compressdi128_mask",
+ "llvm.x86.avx512.mask.compress.q.256" => "__builtin_ia32_compressdi256_mask",
+ "llvm.x86.avx512.mask.compress.q.512" => "__builtin_ia32_compressdi512_mask",
+ "llvm.x86.avx512.mask.compress.store.d.128" => "__builtin_ia32_compressstoresi128_mask",
+ "llvm.x86.avx512.mask.compress.store.d.256" => "__builtin_ia32_compressstoresi256_mask",
+ "llvm.x86.avx512.mask.compress.store.d.512" => "__builtin_ia32_compressstoresi512_mask",
+ "llvm.x86.avx512.mask.compress.store.pd.128" => "__builtin_ia32_compressstoredf128_mask",
+ "llvm.x86.avx512.mask.compress.store.pd.256" => "__builtin_ia32_compressstoredf256_mask",
+ "llvm.x86.avx512.mask.compress.store.pd.512" => "__builtin_ia32_compressstoredf512_mask",
+ "llvm.x86.avx512.mask.compress.store.ps.128" => "__builtin_ia32_compressstoresf128_mask",
+ "llvm.x86.avx512.mask.compress.store.ps.256" => "__builtin_ia32_compressstoresf256_mask",
+ "llvm.x86.avx512.mask.compress.store.ps.512" => "__builtin_ia32_compressstoresf512_mask",
+ "llvm.x86.avx512.mask.compress.store.q.128" => "__builtin_ia32_compressstoredi128_mask",
+ "llvm.x86.avx512.mask.compress.store.q.256" => "__builtin_ia32_compressstoredi256_mask",
+ "llvm.x86.avx512.mask.compress.store.q.512" => "__builtin_ia32_compressstoredi512_mask",
+ "llvm.x86.avx512.mask.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask",
+ "llvm.x86.avx512.mask.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask",
+ "llvm.x86.avx512.mask.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask",
+ "llvm.x86.avx512.mask.conflict.q.128" => "__builtin_ia32_vpconflictdi_128_mask",
+ "llvm.x86.avx512.mask.conflict.q.256" => "__builtin_ia32_vpconflictdi_256_mask",
+ "llvm.x86.avx512.mask.conflict.q.512" => "__builtin_ia32_vpconflictdi_512_mask",
+ "llvm.x86.avx512.mask.cvtdq2pd.128" => "__builtin_ia32_cvtdq2pd128_mask",
+ "llvm.x86.avx512.mask.cvtdq2pd.256" => "__builtin_ia32_cvtdq2pd256_mask",
+ "llvm.x86.avx512.mask.cvtdq2pd.512" => "__builtin_ia32_cvtdq2pd512_mask",
+ "llvm.x86.avx512.mask.cvtdq2ps.128" => "__builtin_ia32_cvtdq2ps128_mask",
+ "llvm.x86.avx512.mask.cvtdq2ps.256" => "__builtin_ia32_cvtdq2ps256_mask",
+ "llvm.x86.avx512.mask.cvtdq2ps.512" => "__builtin_ia32_cvtdq2ps512_mask",
+ "llvm.x86.avx512.mask.cvtpd2dq.128" => "__builtin_ia32_cvtpd2dq128_mask",
+ "llvm.x86.avx512.mask.cvtpd2dq.256" => "__builtin_ia32_cvtpd2dq256_mask",
+ "llvm.x86.avx512.mask.cvtpd2dq.512" => "__builtin_ia32_cvtpd2dq512_mask",
+ "llvm.x86.avx512.mask.cvtpd2ps" => "__builtin_ia32_cvtpd2ps_mask",
+ "llvm.x86.avx512.mask.cvtpd2ps.256" => "__builtin_ia32_cvtpd2ps256_mask",
+ "llvm.x86.avx512.mask.cvtpd2ps.512" => "__builtin_ia32_cvtpd2ps512_mask",
+ "llvm.x86.avx512.mask.cvtpd2qq.128" => "__builtin_ia32_cvtpd2qq128_mask",
+ "llvm.x86.avx512.mask.cvtpd2qq.256" => "__builtin_ia32_cvtpd2qq256_mask",
+ "llvm.x86.avx512.mask.cvtpd2qq.512" => "__builtin_ia32_cvtpd2qq512_mask",
+ "llvm.x86.avx512.mask.cvtpd2udq.128" => "__builtin_ia32_cvtpd2udq128_mask",
+ "llvm.x86.avx512.mask.cvtpd2udq.256" => "__builtin_ia32_cvtpd2udq256_mask",
+ "llvm.x86.avx512.mask.cvtpd2udq.512" => "__builtin_ia32_cvtpd2udq512_mask",
+ "llvm.x86.avx512.mask.cvtpd2uqq.128" => "__builtin_ia32_cvtpd2uqq128_mask",
+ "llvm.x86.avx512.mask.cvtpd2uqq.256" => "__builtin_ia32_cvtpd2uqq256_mask",
+ "llvm.x86.avx512.mask.cvtpd2uqq.512" => "__builtin_ia32_cvtpd2uqq512_mask",
+ "llvm.x86.avx512.mask.cvtps2dq.128" => "__builtin_ia32_cvtps2dq128_mask",
+ "llvm.x86.avx512.mask.cvtps2dq.256" => "__builtin_ia32_cvtps2dq256_mask",
+ "llvm.x86.avx512.mask.cvtps2dq.512" => "__builtin_ia32_cvtps2dq512_mask",
+ "llvm.x86.avx512.mask.cvtps2pd.128" => "__builtin_ia32_cvtps2pd128_mask",
+ "llvm.x86.avx512.mask.cvtps2pd.256" => "__builtin_ia32_cvtps2pd256_mask",
+ "llvm.x86.avx512.mask.cvtps2pd.512" => "__builtin_ia32_cvtps2pd512_mask",
+ "llvm.x86.avx512.mask.cvtps2qq.128" => "__builtin_ia32_cvtps2qq128_mask",
+ "llvm.x86.avx512.mask.cvtps2qq.256" => "__builtin_ia32_cvtps2qq256_mask",
+ "llvm.x86.avx512.mask.cvtps2qq.512" => "__builtin_ia32_cvtps2qq512_mask",
+ "llvm.x86.avx512.mask.cvtps2udq.128" => "__builtin_ia32_cvtps2udq128_mask",
+ "llvm.x86.avx512.mask.cvtps2udq.256" => "__builtin_ia32_cvtps2udq256_mask",
+ "llvm.x86.avx512.mask.cvtps2udq.512" => "__builtin_ia32_cvtps2udq512_mask",
+ "llvm.x86.avx512.mask.cvtps2uqq.128" => "__builtin_ia32_cvtps2uqq128_mask",
+ "llvm.x86.avx512.mask.cvtps2uqq.256" => "__builtin_ia32_cvtps2uqq256_mask",
+ "llvm.x86.avx512.mask.cvtps2uqq.512" => "__builtin_ia32_cvtps2uqq512_mask",
+ "llvm.x86.avx512.mask.cvtqq2pd.128" => "__builtin_ia32_cvtqq2pd128_mask",
+ "llvm.x86.avx512.mask.cvtqq2pd.256" => "__builtin_ia32_cvtqq2pd256_mask",
+ "llvm.x86.avx512.mask.cvtqq2pd.512" => "__builtin_ia32_cvtqq2pd512_mask",
+ "llvm.x86.avx512.mask.cvtqq2ps.128" => "__builtin_ia32_cvtqq2ps128_mask",
+ "llvm.x86.avx512.mask.cvtqq2ps.256" => "__builtin_ia32_cvtqq2ps256_mask",
+ "llvm.x86.avx512.mask.cvtqq2ps.512" => "__builtin_ia32_cvtqq2ps512_mask",
+ "llvm.x86.avx512.mask.cvtsd2ss.round" => "__builtin_ia32_cvtsd2ss_round_mask",
+ "llvm.x86.avx512.mask.cvtss2sd.round" => "__builtin_ia32_cvtss2sd_round_mask",
+ "llvm.x86.avx512.mask.cvttpd2dq.128" => "__builtin_ia32_cvttpd2dq128_mask",
+ "llvm.x86.avx512.mask.cvttpd2dq.256" => "__builtin_ia32_cvttpd2dq256_mask",
+ "llvm.x86.avx512.mask.cvttpd2dq.512" => "__builtin_ia32_cvttpd2dq512_mask",
+ "llvm.x86.avx512.mask.cvttpd2qq.128" => "__builtin_ia32_cvttpd2qq128_mask",
+ "llvm.x86.avx512.mask.cvttpd2qq.256" => "__builtin_ia32_cvttpd2qq256_mask",
+ "llvm.x86.avx512.mask.cvttpd2qq.512" => "__builtin_ia32_cvttpd2qq512_mask",
+ "llvm.x86.avx512.mask.cvttpd2udq.128" => "__builtin_ia32_cvttpd2udq128_mask",
+ "llvm.x86.avx512.mask.cvttpd2udq.256" => "__builtin_ia32_cvttpd2udq256_mask",
+ "llvm.x86.avx512.mask.cvttpd2udq.512" => "__builtin_ia32_cvttpd2udq512_mask",
+ "llvm.x86.avx512.mask.cvttpd2uqq.128" => "__builtin_ia32_cvttpd2uqq128_mask",
+ "llvm.x86.avx512.mask.cvttpd2uqq.256" => "__builtin_ia32_cvttpd2uqq256_mask",
+ "llvm.x86.avx512.mask.cvttpd2uqq.512" => "__builtin_ia32_cvttpd2uqq512_mask",
+ "llvm.x86.avx512.mask.cvttps2dq.128" => "__builtin_ia32_cvttps2dq128_mask",
+ "llvm.x86.avx512.mask.cvttps2dq.256" => "__builtin_ia32_cvttps2dq256_mask",
+ "llvm.x86.avx512.mask.cvttps2dq.512" => "__builtin_ia32_cvttps2dq512_mask",
+ "llvm.x86.avx512.mask.cvttps2qq.128" => "__builtin_ia32_cvttps2qq128_mask",
+ "llvm.x86.avx512.mask.cvttps2qq.256" => "__builtin_ia32_cvttps2qq256_mask",
+ "llvm.x86.avx512.mask.cvttps2qq.512" => "__builtin_ia32_cvttps2qq512_mask",
+ "llvm.x86.avx512.mask.cvttps2udq.128" => "__builtin_ia32_cvttps2udq128_mask",
+ "llvm.x86.avx512.mask.cvttps2udq.256" => "__builtin_ia32_cvttps2udq256_mask",
+ "llvm.x86.avx512.mask.cvttps2udq.512" => "__builtin_ia32_cvttps2udq512_mask",
+ "llvm.x86.avx512.mask.cvttps2uqq.128" => "__builtin_ia32_cvttps2uqq128_mask",
+ "llvm.x86.avx512.mask.cvttps2uqq.256" => "__builtin_ia32_cvttps2uqq256_mask",
+ "llvm.x86.avx512.mask.cvttps2uqq.512" => "__builtin_ia32_cvttps2uqq512_mask",
+ "llvm.x86.avx512.mask.cvtudq2pd.128" => "__builtin_ia32_cvtudq2pd128_mask",
+ "llvm.x86.avx512.mask.cvtudq2pd.256" => "__builtin_ia32_cvtudq2pd256_mask",
+ "llvm.x86.avx512.mask.cvtudq2pd.512" => "__builtin_ia32_cvtudq2pd512_mask",
+ "llvm.x86.avx512.mask.cvtudq2ps.128" => "__builtin_ia32_cvtudq2ps128_mask",
+ "llvm.x86.avx512.mask.cvtudq2ps.256" => "__builtin_ia32_cvtudq2ps256_mask",
+ "llvm.x86.avx512.mask.cvtudq2ps.512" => "__builtin_ia32_cvtudq2ps512_mask",
+ "llvm.x86.avx512.mask.cvtuqq2pd.128" => "__builtin_ia32_cvtuqq2pd128_mask",
+ "llvm.x86.avx512.mask.cvtuqq2pd.256" => "__builtin_ia32_cvtuqq2pd256_mask",
+ "llvm.x86.avx512.mask.cvtuqq2pd.512" => "__builtin_ia32_cvtuqq2pd512_mask",
+ "llvm.x86.avx512.mask.cvtuqq2ps.128" => "__builtin_ia32_cvtuqq2ps128_mask",
+ "llvm.x86.avx512.mask.cvtuqq2ps.256" => "__builtin_ia32_cvtuqq2ps256_mask",
+ "llvm.x86.avx512.mask.cvtuqq2ps.512" => "__builtin_ia32_cvtuqq2ps512_mask",
+ "llvm.x86.avx512.mask.dbpsadbw.128" => "__builtin_ia32_dbpsadbw128_mask",
+ "llvm.x86.avx512.mask.dbpsadbw.256" => "__builtin_ia32_dbpsadbw256_mask",
+ "llvm.x86.avx512.mask.dbpsadbw.512" => "__builtin_ia32_dbpsadbw512_mask",
+ "llvm.x86.avx512.mask.div.pd.128" => "__builtin_ia32_divpd_mask",
+ "llvm.x86.avx512.mask.div.pd.256" => "__builtin_ia32_divpd256_mask",
+ "llvm.x86.avx512.mask.div.pd.512" => "__builtin_ia32_divpd512_mask",
+ "llvm.x86.avx512.mask.div.ps.128" => "__builtin_ia32_divps_mask",
+ "llvm.x86.avx512.mask.div.ps.256" => "__builtin_ia32_divps256_mask",
+ "llvm.x86.avx512.mask.div.ps.512" => "__builtin_ia32_divps512_mask",
+ "llvm.x86.avx512.mask.div.sd.round" => "__builtin_ia32_divsd_round_mask",
+ "llvm.x86.avx512.mask.div.ss.round" => "__builtin_ia32_divss_round_mask",
+ "llvm.x86.avx512.mask.expand.d.128" => "__builtin_ia32_expandsi128_mask",
+ "llvm.x86.avx512.mask.expand.d.256" => "__builtin_ia32_expandsi256_mask",
+ "llvm.x86.avx512.mask.expand.d.512" => "__builtin_ia32_expandsi512_mask",
+ "llvm.x86.avx512.mask.expand.load.d.128" => "__builtin_ia32_expandloadsi128_mask",
+ "llvm.x86.avx512.mask.expand.load.d.256" => "__builtin_ia32_expandloadsi256_mask",
+ "llvm.x86.avx512.mask.expand.load.d.512" => "__builtin_ia32_expandloadsi512_mask",
+ "llvm.x86.avx512.mask.expand.load.pd.128" => "__builtin_ia32_expandloaddf128_mask",
+ "llvm.x86.avx512.mask.expand.load.pd.256" => "__builtin_ia32_expandloaddf256_mask",
+ "llvm.x86.avx512.mask.expand.load.pd.512" => "__builtin_ia32_expandloaddf512_mask",
+ "llvm.x86.avx512.mask.expand.load.ps.128" => "__builtin_ia32_expandloadsf128_mask",
+ "llvm.x86.avx512.mask.expand.load.ps.256" => "__builtin_ia32_expandloadsf256_mask",
+ "llvm.x86.avx512.mask.expand.load.ps.512" => "__builtin_ia32_expandloadsf512_mask",
+ "llvm.x86.avx512.mask.expand.load.q.128" => "__builtin_ia32_expandloaddi128_mask",
+ "llvm.x86.avx512.mask.expand.load.q.256" => "__builtin_ia32_expandloaddi256_mask",
+ "llvm.x86.avx512.mask.expand.load.q.512" => "__builtin_ia32_expandloaddi512_mask",
+ "llvm.x86.avx512.mask.expand.pd.128" => "__builtin_ia32_expanddf128_mask",
+ "llvm.x86.avx512.mask.expand.pd.256" => "__builtin_ia32_expanddf256_mask",
+ "llvm.x86.avx512.mask.expand.pd.512" => "__builtin_ia32_expanddf512_mask",
+ "llvm.x86.avx512.mask.expand.ps.128" => "__builtin_ia32_expandsf128_mask",
+ "llvm.x86.avx512.mask.expand.ps.256" => "__builtin_ia32_expandsf256_mask",
+ "llvm.x86.avx512.mask.expand.ps.512" => "__builtin_ia32_expandsf512_mask",
+ "llvm.x86.avx512.mask.expand.q.128" => "__builtin_ia32_expanddi128_mask",
+ "llvm.x86.avx512.mask.expand.q.256" => "__builtin_ia32_expanddi256_mask",
+ "llvm.x86.avx512.mask.expand.q.512" => "__builtin_ia32_expanddi512_mask",
+ "llvm.x86.avx512.mask.fixupimm.pd.128" => "__builtin_ia32_fixupimmpd128_mask",
+ "llvm.x86.avx512.mask.fixupimm.pd.256" => "__builtin_ia32_fixupimmpd256_mask",
+ "llvm.x86.avx512.mask.fixupimm.pd.512" => "__builtin_ia32_fixupimmpd512_mask",
+ "llvm.x86.avx512.mask.fixupimm.ps.128" => "__builtin_ia32_fixupimmps128_mask",
+ "llvm.x86.avx512.mask.fixupimm.ps.256" => "__builtin_ia32_fixupimmps256_mask",
+ "llvm.x86.avx512.mask.fixupimm.ps.512" => "__builtin_ia32_fixupimmps512_mask",
+ "llvm.x86.avx512.mask.fixupimm.sd" => "__builtin_ia32_fixupimmsd_mask",
+ "llvm.x86.avx512.mask.fixupimm.ss" => "__builtin_ia32_fixupimmss_mask",
+ "llvm.x86.avx512.mask.fpclass.pd.128" => "__builtin_ia32_fpclasspd128_mask",
+ "llvm.x86.avx512.mask.fpclass.pd.256" => "__builtin_ia32_fpclasspd256_mask",
+ "llvm.x86.avx512.mask.fpclass.pd.512" => "__builtin_ia32_fpclasspd512_mask",
+ "llvm.x86.avx512.mask.fpclass.ps.128" => "__builtin_ia32_fpclassps128_mask",
+ "llvm.x86.avx512.mask.fpclass.ps.256" => "__builtin_ia32_fpclassps256_mask",
+ "llvm.x86.avx512.mask.fpclass.ps.512" => "__builtin_ia32_fpclassps512_mask",
+ "llvm.x86.avx512.mask.fpclass.sd" => "__builtin_ia32_fpclasssd_mask",
+ "llvm.x86.avx512.mask.fpclass.ss" => "__builtin_ia32_fpclassss_mask",
+ "llvm.x86.avx512.mask.getexp.pd.128" => "__builtin_ia32_getexppd128_mask",
+ "llvm.x86.avx512.mask.getexp.pd.256" => "__builtin_ia32_getexppd256_mask",
+ "llvm.x86.avx512.mask.getexp.pd.512" => "__builtin_ia32_getexppd512_mask",
+ "llvm.x86.avx512.mask.getexp.ps.128" => "__builtin_ia32_getexpps128_mask",
+ "llvm.x86.avx512.mask.getexp.ps.256" => "__builtin_ia32_getexpps256_mask",
+ "llvm.x86.avx512.mask.getexp.ps.512" => "__builtin_ia32_getexpps512_mask",
+ "llvm.x86.avx512.mask.getexp.sd" => "__builtin_ia32_getexpsd128_round_mask",
+ "llvm.x86.avx512.mask.getexp.ss" => "__builtin_ia32_getexpss128_round_mask",
+ "llvm.x86.avx512.mask.getmant.pd.128" => "__builtin_ia32_getmantpd128_mask",
+ "llvm.x86.avx512.mask.getmant.pd.256" => "__builtin_ia32_getmantpd256_mask",
+ "llvm.x86.avx512.mask.getmant.pd.512" => "__builtin_ia32_getmantpd512_mask",
+ "llvm.x86.avx512.mask.getmant.ps.128" => "__builtin_ia32_getmantps128_mask",
+ "llvm.x86.avx512.mask.getmant.ps.256" => "__builtin_ia32_getmantps256_mask",
+ "llvm.x86.avx512.mask.getmant.ps.512" => "__builtin_ia32_getmantps512_mask",
+ "llvm.x86.avx512.mask.getmant.sd" => "__builtin_ia32_getmantsd_round_mask",
+ "llvm.x86.avx512.mask.getmant.ss" => "__builtin_ia32_getmantss_round_mask",
+ "llvm.x86.avx512.mask.insertf32x4.256" => "__builtin_ia32_insertf32x4_256_mask",
+ "llvm.x86.avx512.mask.insertf32x4.512" => "__builtin_ia32_insertf32x4_mask",
+ "llvm.x86.avx512.mask.insertf32x8.512" => "__builtin_ia32_insertf32x8_mask",
+ "llvm.x86.avx512.mask.insertf64x2.256" => "__builtin_ia32_insertf64x2_256_mask",
+ "llvm.x86.avx512.mask.insertf64x2.512" => "__builtin_ia32_insertf64x2_512_mask",
+ "llvm.x86.avx512.mask.insertf64x4.512" => "__builtin_ia32_insertf64x4_mask",
+ "llvm.x86.avx512.mask.inserti32x4.256" => "__builtin_ia32_inserti32x4_256_mask",
+ "llvm.x86.avx512.mask.inserti32x4.512" => "__builtin_ia32_inserti32x4_mask",
+ "llvm.x86.avx512.mask.inserti32x8.512" => "__builtin_ia32_inserti32x8_mask",
+ "llvm.x86.avx512.mask.inserti64x2.256" => "__builtin_ia32_inserti64x2_256_mask",
+ "llvm.x86.avx512.mask.inserti64x2.512" => "__builtin_ia32_inserti64x2_512_mask",
+ "llvm.x86.avx512.mask.inserti64x4.512" => "__builtin_ia32_inserti64x4_mask",
+ "llvm.x86.avx512.mask.loadu.d.512" => "__builtin_ia32_loaddqusi512_mask",
+ "llvm.x86.avx512.mask.loadu.pd.512" => "__builtin_ia32_loadupd512_mask",
+ "llvm.x86.avx512.mask.loadu.ps.512" => "__builtin_ia32_loadups512_mask",
+ "llvm.x86.avx512.mask.loadu.q.512" => "__builtin_ia32_loaddqudi512_mask",
+ "llvm.x86.avx512.mask.lzcnt.d.512" => "__builtin_ia32_vplzcntd_512_mask",
+ "llvm.x86.avx512.mask.lzcnt.q.512" => "__builtin_ia32_vplzcntq_512_mask",
+ "llvm.x86.avx512.mask.max.pd.128" => "__builtin_ia32_maxpd_mask",
+ "llvm.x86.avx512.mask.max.pd.256" => "__builtin_ia32_maxpd256_mask",
+ "llvm.x86.avx512.mask.max.pd.512" => "__builtin_ia32_maxpd512_mask",
+ "llvm.x86.avx512.mask.max.ps.128" => "__builtin_ia32_maxps_mask",
+ "llvm.x86.avx512.mask.max.ps.256" => "__builtin_ia32_maxps256_mask",
+ "llvm.x86.avx512.mask.max.ps.512" => "__builtin_ia32_maxps512_mask",
+ "llvm.x86.avx512.mask.max.sd.round" => "__builtin_ia32_maxsd_round_mask",
+ "llvm.x86.avx512.mask.max.ss.round" => "__builtin_ia32_maxss_round_mask",
+ "llvm.x86.avx512.mask.min.pd.128" => "__builtin_ia32_minpd_mask",
+ "llvm.x86.avx512.mask.min.pd.256" => "__builtin_ia32_minpd256_mask",
+ "llvm.x86.avx512.mask.min.pd.512" => "__builtin_ia32_minpd512_mask",
+ "llvm.x86.avx512.mask.min.ps.128" => "__builtin_ia32_minps_mask",
+ "llvm.x86.avx512.mask.min.ps.256" => "__builtin_ia32_minps256_mask",
+ "llvm.x86.avx512.mask.min.ps.512" => "__builtin_ia32_minps512_mask",
+ "llvm.x86.avx512.mask.min.sd.round" => "__builtin_ia32_minsd_round_mask",
+ "llvm.x86.avx512.mask.min.ss.round" => "__builtin_ia32_minss_round_mask",
+ "llvm.x86.avx512.mask.move.sd" => "__builtin_ia32_movsd_mask",
+ "llvm.x86.avx512.mask.move.ss" => "__builtin_ia32_movss_mask",
+ "llvm.x86.avx512.mask.mul.pd.128" => "__builtin_ia32_mulpd_mask",
+ "llvm.x86.avx512.mask.mul.pd.256" => "__builtin_ia32_mulpd256_mask",
+ "llvm.x86.avx512.mask.mul.pd.512" => "__builtin_ia32_mulpd512_mask",
+ "llvm.x86.avx512.mask.mul.ps.128" => "__builtin_ia32_mulps_mask",
+ "llvm.x86.avx512.mask.mul.ps.256" => "__builtin_ia32_mulps256_mask",
+ "llvm.x86.avx512.mask.mul.ps.512" => "__builtin_ia32_mulps512_mask",
+ "llvm.x86.avx512.mask.mul.sd.round" => "__builtin_ia32_mulsd_round_mask",
+ "llvm.x86.avx512.mask.mul.ss.round" => "__builtin_ia32_mulss_round_mask",
+ "llvm.x86.avx512.mask.or.pd.128" => "__builtin_ia32_orpd128_mask",
+ "llvm.x86.avx512.mask.or.pd.256" => "__builtin_ia32_orpd256_mask",
+ "llvm.x86.avx512.mask.or.pd.512" => "__builtin_ia32_orpd512_mask",
+ "llvm.x86.avx512.mask.or.ps.128" => "__builtin_ia32_orps128_mask",
+ "llvm.x86.avx512.mask.or.ps.256" => "__builtin_ia32_orps256_mask",
+ "llvm.x86.avx512.mask.or.ps.512" => "__builtin_ia32_orps512_mask",
+ "llvm.x86.avx512.mask.pabs.b.128" => "__builtin_ia32_pabsb128_mask",
+ "llvm.x86.avx512.mask.pabs.b.256" => "__builtin_ia32_pabsb256_mask",
+ "llvm.x86.avx512.mask.pabs.b.512" => "__builtin_ia32_pabsb512_mask",
+ "llvm.x86.avx512.mask.pabs.d.128" => "__builtin_ia32_pabsd128_mask",
+ "llvm.x86.avx512.mask.pabs.d.256" => "__builtin_ia32_pabsd256_mask",
+ "llvm.x86.avx512.mask.pabs.d.512" => "__builtin_ia32_pabsd512_mask",
+ "llvm.x86.avx512.mask.pabs.q.128" => "__builtin_ia32_pabsq128_mask",
+ "llvm.x86.avx512.mask.pabs.q.256" => "__builtin_ia32_pabsq256_mask",
+ "llvm.x86.avx512.mask.pabs.q.512" => "__builtin_ia32_pabsq512_mask",
+ "llvm.x86.avx512.mask.pabs.w.128" => "__builtin_ia32_pabsw128_mask",
+ "llvm.x86.avx512.mask.pabs.w.256" => "__builtin_ia32_pabsw256_mask",
+ "llvm.x86.avx512.mask.pabs.w.512" => "__builtin_ia32_pabsw512_mask",
+ "llvm.x86.avx512.mask.packssdw.128" => "__builtin_ia32_packssdw128_mask",
+ "llvm.x86.avx512.mask.packssdw.256" => "__builtin_ia32_packssdw256_mask",
+ "llvm.x86.avx512.mask.packssdw.512" => "__builtin_ia32_packssdw512_mask",
+ "llvm.x86.avx512.mask.packsswb.128" => "__builtin_ia32_packsswb128_mask",
+ "llvm.x86.avx512.mask.packsswb.256" => "__builtin_ia32_packsswb256_mask",
+ "llvm.x86.avx512.mask.packsswb.512" => "__builtin_ia32_packsswb512_mask",
+ "llvm.x86.avx512.mask.packusdw.128" => "__builtin_ia32_packusdw128_mask",
+ "llvm.x86.avx512.mask.packusdw.256" => "__builtin_ia32_packusdw256_mask",
+ "llvm.x86.avx512.mask.packusdw.512" => "__builtin_ia32_packusdw512_mask",
+ "llvm.x86.avx512.mask.packuswb.128" => "__builtin_ia32_packuswb128_mask",
+ "llvm.x86.avx512.mask.packuswb.256" => "__builtin_ia32_packuswb256_mask",
+ "llvm.x86.avx512.mask.packuswb.512" => "__builtin_ia32_packuswb512_mask",
+ "llvm.x86.avx512.mask.padd.b.128" => "__builtin_ia32_paddb128_mask",
+ "llvm.x86.avx512.mask.padd.b.256" => "__builtin_ia32_paddb256_mask",
+ "llvm.x86.avx512.mask.padd.b.512" => "__builtin_ia32_paddb512_mask",
+ "llvm.x86.avx512.mask.padd.d.128" => "__builtin_ia32_paddd128_mask",
+ "llvm.x86.avx512.mask.padd.d.256" => "__builtin_ia32_paddd256_mask",
+ "llvm.x86.avx512.mask.padd.d.512" => "__builtin_ia32_paddd512_mask",
+ "llvm.x86.avx512.mask.padd.q.128" => "__builtin_ia32_paddq128_mask",
+ "llvm.x86.avx512.mask.padd.q.256" => "__builtin_ia32_paddq256_mask",
+ "llvm.x86.avx512.mask.padd.q.512" => "__builtin_ia32_paddq512_mask",
+ "llvm.x86.avx512.mask.padd.w.128" => "__builtin_ia32_paddw128_mask",
+ "llvm.x86.avx512.mask.padd.w.256" => "__builtin_ia32_paddw256_mask",
+ "llvm.x86.avx512.mask.padd.w.512" => "__builtin_ia32_paddw512_mask",
+ "llvm.x86.avx512.mask.padds.b.128" => "__builtin_ia32_paddsb128_mask",
+ "llvm.x86.avx512.mask.padds.b.256" => "__builtin_ia32_paddsb256_mask",
+ "llvm.x86.avx512.mask.padds.b.512" => "__builtin_ia32_paddsb512_mask",
+ "llvm.x86.avx512.mask.padds.w.128" => "__builtin_ia32_paddsw128_mask",
+ "llvm.x86.avx512.mask.padds.w.256" => "__builtin_ia32_paddsw256_mask",
+ "llvm.x86.avx512.mask.padds.w.512" => "__builtin_ia32_paddsw512_mask",
+ "llvm.x86.avx512.mask.paddus.b.128" => "__builtin_ia32_paddusb128_mask",
+ "llvm.x86.avx512.mask.paddus.b.256" => "__builtin_ia32_paddusb256_mask",
+ "llvm.x86.avx512.mask.paddus.b.512" => "__builtin_ia32_paddusb512_mask",
+ "llvm.x86.avx512.mask.paddus.w.128" => "__builtin_ia32_paddusw128_mask",
+ "llvm.x86.avx512.mask.paddus.w.256" => "__builtin_ia32_paddusw256_mask",
+ "llvm.x86.avx512.mask.paddus.w.512" => "__builtin_ia32_paddusw512_mask",
+ "llvm.x86.avx512.mask.pand.d.512" => "__builtin_ia32_pandd512_mask",
+ "llvm.x86.avx512.mask.pand.q.512" => "__builtin_ia32_pandq512_mask",
+ "llvm.x86.avx512.mask.pavg.b.128" => "__builtin_ia32_pavgb128_mask",
+ "llvm.x86.avx512.mask.pavg.b.256" => "__builtin_ia32_pavgb256_mask",
+ "llvm.x86.avx512.mask.pavg.b.512" => "__builtin_ia32_pavgb512_mask",
+ "llvm.x86.avx512.mask.pavg.w.128" => "__builtin_ia32_pavgw128_mask",
+ "llvm.x86.avx512.mask.pavg.w.256" => "__builtin_ia32_pavgw256_mask",
+ "llvm.x86.avx512.mask.pavg.w.512" => "__builtin_ia32_pavgw512_mask",
+ "llvm.x86.avx512.mask.pbroadcast.b.gpr.128" => "__builtin_ia32_pbroadcastb128_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.b.gpr.256" => "__builtin_ia32_pbroadcastb256_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.b.gpr.512" => "__builtin_ia32_pbroadcastb512_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.d.gpr.128" => "__builtin_ia32_pbroadcastd128_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.d.gpr.256" => "__builtin_ia32_pbroadcastd256_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.d.gpr.512" => "__builtin_ia32_pbroadcastd512_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.q.gpr.128" => "__builtin_ia32_pbroadcastq128_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.q.gpr.256" => "__builtin_ia32_pbroadcastq256_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.q.gpr.512" => "__builtin_ia32_pbroadcastq512_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.q.mem.512" => "__builtin_ia32_pbroadcastq512_mem_mask",
+ "llvm.x86.avx512.mask.pbroadcast.w.gpr.128" => "__builtin_ia32_pbroadcastw128_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.w.gpr.256" => "__builtin_ia32_pbroadcastw256_gpr_mask",
+ "llvm.x86.avx512.mask.pbroadcast.w.gpr.512" => "__builtin_ia32_pbroadcastw512_gpr_mask",
+ "llvm.x86.avx512.mask.pcmpeq.b.128" => "__builtin_ia32_pcmpeqb128_mask",
+ "llvm.x86.avx512.mask.pcmpeq.b.256" => "__builtin_ia32_pcmpeqb256_mask",
+ "llvm.x86.avx512.mask.pcmpeq.b.512" => "__builtin_ia32_pcmpeqb512_mask",
+ "llvm.x86.avx512.mask.pcmpeq.d.128" => "__builtin_ia32_pcmpeqd128_mask",
+ "llvm.x86.avx512.mask.pcmpeq.d.256" => "__builtin_ia32_pcmpeqd256_mask",
+ "llvm.x86.avx512.mask.pcmpeq.d.512" => "__builtin_ia32_pcmpeqd512_mask",
+ "llvm.x86.avx512.mask.pcmpeq.q.128" => "__builtin_ia32_pcmpeqq128_mask",
+ "llvm.x86.avx512.mask.pcmpeq.q.256" => "__builtin_ia32_pcmpeqq256_mask",
+ "llvm.x86.avx512.mask.pcmpeq.q.512" => "__builtin_ia32_pcmpeqq512_mask",
+ "llvm.x86.avx512.mask.pcmpeq.w.128" => "__builtin_ia32_pcmpeqw128_mask",
+ "llvm.x86.avx512.mask.pcmpeq.w.256" => "__builtin_ia32_pcmpeqw256_mask",
+ "llvm.x86.avx512.mask.pcmpeq.w.512" => "__builtin_ia32_pcmpeqw512_mask",
+ "llvm.x86.avx512.mask.pcmpgt.b.128" => "__builtin_ia32_pcmpgtb128_mask",
+ "llvm.x86.avx512.mask.pcmpgt.b.256" => "__builtin_ia32_pcmpgtb256_mask",
+ "llvm.x86.avx512.mask.pcmpgt.b.512" => "__builtin_ia32_pcmpgtb512_mask",
+ "llvm.x86.avx512.mask.pcmpgt.d.128" => "__builtin_ia32_pcmpgtd128_mask",
+ "llvm.x86.avx512.mask.pcmpgt.d.256" => "__builtin_ia32_pcmpgtd256_mask",
+ "llvm.x86.avx512.mask.pcmpgt.d.512" => "__builtin_ia32_pcmpgtd512_mask",
+ "llvm.x86.avx512.mask.pcmpgt.q.128" => "__builtin_ia32_pcmpgtq128_mask",
+ "llvm.x86.avx512.mask.pcmpgt.q.256" => "__builtin_ia32_pcmpgtq256_mask",
+ "llvm.x86.avx512.mask.pcmpgt.q.512" => "__builtin_ia32_pcmpgtq512_mask",
+ "llvm.x86.avx512.mask.pcmpgt.w.128" => "__builtin_ia32_pcmpgtw128_mask",
+ "llvm.x86.avx512.mask.pcmpgt.w.256" => "__builtin_ia32_pcmpgtw256_mask",
+ "llvm.x86.avx512.mask.pcmpgt.w.512" => "__builtin_ia32_pcmpgtw512_mask",
+ "llvm.x86.avx512.mask.permvar.df.256" => "__builtin_ia32_permvardf256_mask",
+ "llvm.x86.avx512.mask.permvar.df.512" => "__builtin_ia32_permvardf512_mask",
+ "llvm.x86.avx512.mask.permvar.di.256" => "__builtin_ia32_permvardi256_mask",
+ "llvm.x86.avx512.mask.permvar.di.512" => "__builtin_ia32_permvardi512_mask",
+ "llvm.x86.avx512.mask.permvar.hi.128" => "__builtin_ia32_permvarhi128_mask",
+ "llvm.x86.avx512.mask.permvar.hi.256" => "__builtin_ia32_permvarhi256_mask",
+ "llvm.x86.avx512.mask.permvar.hi.512" => "__builtin_ia32_permvarhi512_mask",
+ "llvm.x86.avx512.mask.permvar.qi.128" => "__builtin_ia32_permvarqi128_mask",
+ "llvm.x86.avx512.mask.permvar.qi.256" => "__builtin_ia32_permvarqi256_mask",
+ "llvm.x86.avx512.mask.permvar.qi.512" => "__builtin_ia32_permvarqi512_mask",
+ "llvm.x86.avx512.mask.permvar.sf.256" => "__builtin_ia32_permvarsf256_mask",
+ "llvm.x86.avx512.mask.permvar.sf.512" => "__builtin_ia32_permvarsf512_mask",
+ "llvm.x86.avx512.mask.permvar.si.256" => "__builtin_ia32_permvarsi256_mask",
+ "llvm.x86.avx512.mask.permvar.si.512" => "__builtin_ia32_permvarsi512_mask",
+ "llvm.x86.avx512.mask.pmaddubs.w.128" => "__builtin_ia32_pmaddubsw128_mask",
+ "llvm.x86.avx512.mask.pmaddubs.w.256" => "__builtin_ia32_pmaddubsw256_mask",
+ "llvm.x86.avx512.mask.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512_mask",
+ "llvm.x86.avx512.mask.pmaddw.d.128" => "__builtin_ia32_pmaddwd128_mask",
+ "llvm.x86.avx512.mask.pmaddw.d.256" => "__builtin_ia32_pmaddwd256_mask",
+ "llvm.x86.avx512.mask.pmaddw.d.512" => "__builtin_ia32_pmaddwd512_mask",
+ "llvm.x86.avx512.mask.pmaxs.b.128" => "__builtin_ia32_pmaxsb128_mask",
+ "llvm.x86.avx512.mask.pmaxs.b.256" => "__builtin_ia32_pmaxsb256_mask",
+ "llvm.x86.avx512.mask.pmaxs.b.512" => "__builtin_ia32_pmaxsb512_mask",
+ "llvm.x86.avx512.mask.pmaxs.d.128" => "__builtin_ia32_pmaxsd128_mask",
+ "llvm.x86.avx512.mask.pmaxs.d.256" => "__builtin_ia32_pmaxsd256_mask",
+ "llvm.x86.avx512.mask.pmaxs.d.512" => "__builtin_ia32_pmaxsd512_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.128" => "__builtin_ia32_pmaxsq128_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.256" => "__builtin_ia32_pmaxsq256_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.512" => "__builtin_ia32_pmaxsq512_mask",
+ "llvm.x86.avx512.mask.pmaxs.w.128" => "__builtin_ia32_pmaxsw128_mask",
+ "llvm.x86.avx512.mask.pmaxs.w.256" => "__builtin_ia32_pmaxsw256_mask",
+ "llvm.x86.avx512.mask.pmaxs.w.512" => "__builtin_ia32_pmaxsw512_mask",
+ "llvm.x86.avx512.mask.pmaxu.b.128" => "__builtin_ia32_pmaxub128_mask",
+ "llvm.x86.avx512.mask.pmaxu.b.256" => "__builtin_ia32_pmaxub256_mask",
+ "llvm.x86.avx512.mask.pmaxu.b.512" => "__builtin_ia32_pmaxub512_mask",
+ "llvm.x86.avx512.mask.pmaxu.d.128" => "__builtin_ia32_pmaxud128_mask",
+ "llvm.x86.avx512.mask.pmaxu.d.256" => "__builtin_ia32_pmaxud256_mask",
+ "llvm.x86.avx512.mask.pmaxu.d.512" => "__builtin_ia32_pmaxud512_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.128" => "__builtin_ia32_pmaxuq128_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.256" => "__builtin_ia32_pmaxuq256_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.512" => "__builtin_ia32_pmaxuq512_mask",
+ "llvm.x86.avx512.mask.pmaxu.w.128" => "__builtin_ia32_pmaxuw128_mask",
+ "llvm.x86.avx512.mask.pmaxu.w.256" => "__builtin_ia32_pmaxuw256_mask",
+ "llvm.x86.avx512.mask.pmaxu.w.512" => "__builtin_ia32_pmaxuw512_mask",
+ "llvm.x86.avx512.mask.pmins.b.128" => "__builtin_ia32_pminsb128_mask",
+ "llvm.x86.avx512.mask.pmins.b.256" => "__builtin_ia32_pminsb256_mask",
+ "llvm.x86.avx512.mask.pmins.b.512" => "__builtin_ia32_pminsb512_mask",
+ "llvm.x86.avx512.mask.pmins.d.128" => "__builtin_ia32_pminsd128_mask",
+ "llvm.x86.avx512.mask.pmins.d.256" => "__builtin_ia32_pminsd256_mask",
+ "llvm.x86.avx512.mask.pmins.d.512" => "__builtin_ia32_pminsd512_mask",
+ "llvm.x86.avx512.mask.pmins.q.128" => "__builtin_ia32_pminsq128_mask",
+ "llvm.x86.avx512.mask.pmins.q.256" => "__builtin_ia32_pminsq256_mask",
+ "llvm.x86.avx512.mask.pmins.q.512" => "__builtin_ia32_pminsq512_mask",
+ "llvm.x86.avx512.mask.pmins.w.128" => "__builtin_ia32_pminsw128_mask",
+ "llvm.x86.avx512.mask.pmins.w.256" => "__builtin_ia32_pminsw256_mask",
+ "llvm.x86.avx512.mask.pmins.w.512" => "__builtin_ia32_pminsw512_mask",
+ "llvm.x86.avx512.mask.pminu.b.128" => "__builtin_ia32_pminub128_mask",
+ "llvm.x86.avx512.mask.pminu.b.256" => "__builtin_ia32_pminub256_mask",
+ "llvm.x86.avx512.mask.pminu.b.512" => "__builtin_ia32_pminub512_mask",
+ "llvm.x86.avx512.mask.pminu.d.128" => "__builtin_ia32_pminud128_mask",
+ "llvm.x86.avx512.mask.pminu.d.256" => "__builtin_ia32_pminud256_mask",
+ "llvm.x86.avx512.mask.pminu.d.512" => "__builtin_ia32_pminud512_mask",
+ "llvm.x86.avx512.mask.pminu.q.128" => "__builtin_ia32_pminuq128_mask",
+ "llvm.x86.avx512.mask.pminu.q.256" => "__builtin_ia32_pminuq256_mask",
+ "llvm.x86.avx512.mask.pminu.q.512" => "__builtin_ia32_pminuq512_mask",
+ "llvm.x86.avx512.mask.pminu.w.128" => "__builtin_ia32_pminuw128_mask",
+ "llvm.x86.avx512.mask.pminu.w.256" => "__builtin_ia32_pminuw256_mask",
+ "llvm.x86.avx512.mask.pminu.w.512" => "__builtin_ia32_pminuw512_mask",
+ "llvm.x86.avx512.mask.pmov.db.128" => "__builtin_ia32_pmovdb128_mask",
+ "llvm.x86.avx512.mask.pmov.db.256" => "__builtin_ia32_pmovdb256_mask",
+ "llvm.x86.avx512.mask.pmov.db.512" => "__builtin_ia32_pmovdb512_mask",
+ "llvm.x86.avx512.mask.pmov.db.mem.128" => "__builtin_ia32_pmovdb128mem_mask",
+ "llvm.x86.avx512.mask.pmov.db.mem.256" => "__builtin_ia32_pmovdb256mem_mask",
+ "llvm.x86.avx512.mask.pmov.db.mem.512" => "__builtin_ia32_pmovdb512mem_mask",
+ "llvm.x86.avx512.mask.pmov.dw.128" => "__builtin_ia32_pmovdw128_mask",
+ "llvm.x86.avx512.mask.pmov.dw.256" => "__builtin_ia32_pmovdw256_mask",
+ "llvm.x86.avx512.mask.pmov.dw.512" => "__builtin_ia32_pmovdw512_mask",
+ "llvm.x86.avx512.mask.pmov.dw.mem.128" => "__builtin_ia32_pmovdw128mem_mask",
+ "llvm.x86.avx512.mask.pmov.dw.mem.256" => "__builtin_ia32_pmovdw256mem_mask",
+ "llvm.x86.avx512.mask.pmov.dw.mem.512" => "__builtin_ia32_pmovdw512mem_mask",
+ "llvm.x86.avx512.mask.pmov.qb.128" => "__builtin_ia32_pmovqb128_mask",
+ "llvm.x86.avx512.mask.pmov.qb.256" => "__builtin_ia32_pmovqb256_mask",
+ "llvm.x86.avx512.mask.pmov.qb.512" => "__builtin_ia32_pmovqb512_mask",
+ "llvm.x86.avx512.mask.pmov.qb.mem.128" => "__builtin_ia32_pmovqb128mem_mask",
+ "llvm.x86.avx512.mask.pmov.qb.mem.256" => "__builtin_ia32_pmovqb256mem_mask",
+ "llvm.x86.avx512.mask.pmov.qb.mem.512" => "__builtin_ia32_pmovqb512mem_mask",
+ "llvm.x86.avx512.mask.pmov.qd.128" => "__builtin_ia32_pmovqd128_mask",
+ "llvm.x86.avx512.mask.pmov.qd.256" => "__builtin_ia32_pmovqd256_mask",
+ "llvm.x86.avx512.mask.pmov.qd.512" => "__builtin_ia32_pmovqd512_mask",
+ "llvm.x86.avx512.mask.pmov.qd.mem.128" => "__builtin_ia32_pmovqd128mem_mask",
+ "llvm.x86.avx512.mask.pmov.qd.mem.256" => "__builtin_ia32_pmovqd256mem_mask",
+ "llvm.x86.avx512.mask.pmov.qd.mem.512" => "__builtin_ia32_pmovqd512mem_mask",
+ "llvm.x86.avx512.mask.pmov.qw.128" => "__builtin_ia32_pmovqw128_mask",
+ "llvm.x86.avx512.mask.pmov.qw.256" => "__builtin_ia32_pmovqw256_mask",
+ "llvm.x86.avx512.mask.pmov.qw.512" => "__builtin_ia32_pmovqw512_mask",
+ "llvm.x86.avx512.mask.pmov.qw.mem.128" => "__builtin_ia32_pmovqw128mem_mask",
+ "llvm.x86.avx512.mask.pmov.qw.mem.256" => "__builtin_ia32_pmovqw256mem_mask",
+ "llvm.x86.avx512.mask.pmov.qw.mem.512" => "__builtin_ia32_pmovqw512mem_mask",
+ "llvm.x86.avx512.mask.pmov.wb.128" => "__builtin_ia32_pmovwb128_mask",
+ "llvm.x86.avx512.mask.pmov.wb.256" => "__builtin_ia32_pmovwb256_mask",
+ "llvm.x86.avx512.mask.pmov.wb.512" => "__builtin_ia32_pmovwb512_mask",
+ "llvm.x86.avx512.mask.pmov.wb.mem.128" => "__builtin_ia32_pmovwb128mem_mask",
+ "llvm.x86.avx512.mask.pmov.wb.mem.256" => "__builtin_ia32_pmovwb256mem_mask",
+ "llvm.x86.avx512.mask.pmov.wb.mem.512" => "__builtin_ia32_pmovwb512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.db.128" => "__builtin_ia32_pmovsdb128_mask",
+ "llvm.x86.avx512.mask.pmovs.db.256" => "__builtin_ia32_pmovsdb256_mask",
+ "llvm.x86.avx512.mask.pmovs.db.512" => "__builtin_ia32_pmovsdb512_mask",
+ "llvm.x86.avx512.mask.pmovs.db.mem.128" => "__builtin_ia32_pmovsdb128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.db.mem.256" => "__builtin_ia32_pmovsdb256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.db.mem.512" => "__builtin_ia32_pmovsdb512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.128" => "__builtin_ia32_pmovsdw128_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.256" => "__builtin_ia32_pmovsdw256_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.512" => "__builtin_ia32_pmovsdw512_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.mem.128" => "__builtin_ia32_pmovsdw128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.mem.256" => "__builtin_ia32_pmovsdw256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.dw.mem.512" => "__builtin_ia32_pmovsdw512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.128" => "__builtin_ia32_pmovsqb128_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.256" => "__builtin_ia32_pmovsqb256_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.512" => "__builtin_ia32_pmovsqb512_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.mem.128" => "__builtin_ia32_pmovsqb128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.mem.256" => "__builtin_ia32_pmovsqb256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qb.mem.512" => "__builtin_ia32_pmovsqb512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.128" => "__builtin_ia32_pmovsqd128_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.256" => "__builtin_ia32_pmovsqd256_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.512" => "__builtin_ia32_pmovsqd512_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.mem.128" => "__builtin_ia32_pmovsqd128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.mem.256" => "__builtin_ia32_pmovsqd256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qd.mem.512" => "__builtin_ia32_pmovsqd512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.128" => "__builtin_ia32_pmovsqw128_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.256" => "__builtin_ia32_pmovsqw256_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.512" => "__builtin_ia32_pmovsqw512_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.mem.128" => "__builtin_ia32_pmovsqw128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.mem.256" => "__builtin_ia32_pmovsqw256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.qw.mem.512" => "__builtin_ia32_pmovsqw512mem_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.128" => "__builtin_ia32_pmovswb128_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.256" => "__builtin_ia32_pmovswb256_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.512" => "__builtin_ia32_pmovswb512_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.mem.128" => "__builtin_ia32_pmovswb128mem_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.mem.256" => "__builtin_ia32_pmovswb256mem_mask",
+ "llvm.x86.avx512.mask.pmovs.wb.mem.512" => "__builtin_ia32_pmovswb512mem_mask",
+ "llvm.x86.avx512.mask.pmovsxb.d.128" => "__builtin_ia32_pmovsxbd128_mask",
+ "llvm.x86.avx512.mask.pmovsxb.d.256" => "__builtin_ia32_pmovsxbd256_mask",
+ "llvm.x86.avx512.mask.pmovsxb.d.512" => "__builtin_ia32_pmovsxbd512_mask",
+ "llvm.x86.avx512.mask.pmovsxb.q.128" => "__builtin_ia32_pmovsxbq128_mask",
+ "llvm.x86.avx512.mask.pmovsxb.q.256" => "__builtin_ia32_pmovsxbq256_mask",
+ "llvm.x86.avx512.mask.pmovsxb.q.512" => "__builtin_ia32_pmovsxbq512_mask",
+ "llvm.x86.avx512.mask.pmovsxb.w.128" => "__builtin_ia32_pmovsxbw128_mask",
+ "llvm.x86.avx512.mask.pmovsxb.w.256" => "__builtin_ia32_pmovsxbw256_mask",
+ "llvm.x86.avx512.mask.pmovsxb.w.512" => "__builtin_ia32_pmovsxbw512_mask",
+ "llvm.x86.avx512.mask.pmovsxd.q.128" => "__builtin_ia32_pmovsxdq128_mask",
+ "llvm.x86.avx512.mask.pmovsxd.q.256" => "__builtin_ia32_pmovsxdq256_mask",
+ "llvm.x86.avx512.mask.pmovsxd.q.512" => "__builtin_ia32_pmovsxdq512_mask",
+ "llvm.x86.avx512.mask.pmovsxw.d.128" => "__builtin_ia32_pmovsxwd128_mask",
+ "llvm.x86.avx512.mask.pmovsxw.d.256" => "__builtin_ia32_pmovsxwd256_mask",
+ "llvm.x86.avx512.mask.pmovsxw.d.512" => "__builtin_ia32_pmovsxwd512_mask",
+ "llvm.x86.avx512.mask.pmovsxw.q.128" => "__builtin_ia32_pmovsxwq128_mask",
+ "llvm.x86.avx512.mask.pmovsxw.q.256" => "__builtin_ia32_pmovsxwq256_mask",
+ "llvm.x86.avx512.mask.pmovsxw.q.512" => "__builtin_ia32_pmovsxwq512_mask",
+ "llvm.x86.avx512.mask.pmovus.db.128" => "__builtin_ia32_pmovusdb128_mask",
+ "llvm.x86.avx512.mask.pmovus.db.256" => "__builtin_ia32_pmovusdb256_mask",
+ "llvm.x86.avx512.mask.pmovus.db.512" => "__builtin_ia32_pmovusdb512_mask",
+ "llvm.x86.avx512.mask.pmovus.db.mem.128" => "__builtin_ia32_pmovusdb128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.db.mem.256" => "__builtin_ia32_pmovusdb256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.db.mem.512" => "__builtin_ia32_pmovusdb512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.128" => "__builtin_ia32_pmovusdw128_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.256" => "__builtin_ia32_pmovusdw256_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.512" => "__builtin_ia32_pmovusdw512_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.mem.128" => "__builtin_ia32_pmovusdw128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.mem.256" => "__builtin_ia32_pmovusdw256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.dw.mem.512" => "__builtin_ia32_pmovusdw512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.128" => "__builtin_ia32_pmovusqb128_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.256" => "__builtin_ia32_pmovusqb256_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.512" => "__builtin_ia32_pmovusqb512_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.mem.128" => "__builtin_ia32_pmovusqb128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.mem.256" => "__builtin_ia32_pmovusqb256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qb.mem.512" => "__builtin_ia32_pmovusqb512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.128" => "__builtin_ia32_pmovusqd128_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.256" => "__builtin_ia32_pmovusqd256_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.512" => "__builtin_ia32_pmovusqd512_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.mem.128" => "__builtin_ia32_pmovusqd128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.mem.256" => "__builtin_ia32_pmovusqd256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qd.mem.512" => "__builtin_ia32_pmovusqd512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.128" => "__builtin_ia32_pmovusqw128_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.256" => "__builtin_ia32_pmovusqw256_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.512" => "__builtin_ia32_pmovusqw512_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.mem.128" => "__builtin_ia32_pmovusqw128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.mem.256" => "__builtin_ia32_pmovusqw256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.qw.mem.512" => "__builtin_ia32_pmovusqw512mem_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.128" => "__builtin_ia32_pmovuswb128_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.256" => "__builtin_ia32_pmovuswb256_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.512" => "__builtin_ia32_pmovuswb512_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.mem.128" => "__builtin_ia32_pmovuswb128mem_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.mem.256" => "__builtin_ia32_pmovuswb256mem_mask",
+ "llvm.x86.avx512.mask.pmovus.wb.mem.512" => "__builtin_ia32_pmovuswb512mem_mask",
+ "llvm.x86.avx512.mask.pmovzxb.d.128" => "__builtin_ia32_pmovzxbd128_mask",
+ "llvm.x86.avx512.mask.pmovzxb.d.256" => "__builtin_ia32_pmovzxbd256_mask",
+ "llvm.x86.avx512.mask.pmovzxb.d.512" => "__builtin_ia32_pmovzxbd512_mask",
+ "llvm.x86.avx512.mask.pmovzxb.q.128" => "__builtin_ia32_pmovzxbq128_mask",
+ "llvm.x86.avx512.mask.pmovzxb.q.256" => "__builtin_ia32_pmovzxbq256_mask",
+ "llvm.x86.avx512.mask.pmovzxb.q.512" => "__builtin_ia32_pmovzxbq512_mask",
+ "llvm.x86.avx512.mask.pmovzxb.w.128" => "__builtin_ia32_pmovzxbw128_mask",
+ "llvm.x86.avx512.mask.pmovzxb.w.256" => "__builtin_ia32_pmovzxbw256_mask",
+ "llvm.x86.avx512.mask.pmovzxb.w.512" => "__builtin_ia32_pmovzxbw512_mask",
+ "llvm.x86.avx512.mask.pmovzxd.q.128" => "__builtin_ia32_pmovzxdq128_mask",
+ "llvm.x86.avx512.mask.pmovzxd.q.256" => "__builtin_ia32_pmovzxdq256_mask",
+ "llvm.x86.avx512.mask.pmovzxd.q.512" => "__builtin_ia32_pmovzxdq512_mask",
+ "llvm.x86.avx512.mask.pmovzxw.d.128" => "__builtin_ia32_pmovzxwd128_mask",
+ "llvm.x86.avx512.mask.pmovzxw.d.256" => "__builtin_ia32_pmovzxwd256_mask",
+ "llvm.x86.avx512.mask.pmovzxw.d.512" => "__builtin_ia32_pmovzxwd512_mask",
+ "llvm.x86.avx512.mask.pmovzxw.q.128" => "__builtin_ia32_pmovzxwq128_mask",
+ "llvm.x86.avx512.mask.pmovzxw.q.256" => "__builtin_ia32_pmovzxwq256_mask",
+ "llvm.x86.avx512.mask.pmovzxw.q.512" => "__builtin_ia32_pmovzxwq512_mask",
+ "llvm.x86.avx512.mask.pmul.dq.128" => "__builtin_ia32_pmuldq128_mask",
+ "llvm.x86.avx512.mask.pmul.dq.256" => "__builtin_ia32_pmuldq256_mask",
+ "llvm.x86.avx512.mask.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask",
+ "llvm.x86.avx512.mask.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128_mask",
+ "llvm.x86.avx512.mask.pmul.hr.sw.256" => "__builtin_ia32_pmulhrsw256_mask",
+ "llvm.x86.avx512.mask.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512_mask",
+ "llvm.x86.avx512.mask.pmulh.w.128" => "__builtin_ia32_pmulhw128_mask",
+ "llvm.x86.avx512.mask.pmulh.w.256" => "__builtin_ia32_pmulhw256_mask",
+ "llvm.x86.avx512.mask.pmulh.w.512" => "__builtin_ia32_pmulhw512_mask",
+ "llvm.x86.avx512.mask.pmulhu.w.128" => "__builtin_ia32_pmulhuw128_mask",
+ "llvm.x86.avx512.mask.pmulhu.w.256" => "__builtin_ia32_pmulhuw256_mask",
+ "llvm.x86.avx512.mask.pmulhu.w.512" => "__builtin_ia32_pmulhuw512_mask",
+ "llvm.x86.avx512.mask.pmull.d.128" => "__builtin_ia32_pmulld128_mask",
+ "llvm.x86.avx512.mask.pmull.d.256" => "__builtin_ia32_pmulld256_mask",
+ "llvm.x86.avx512.mask.pmull.d.512" => "__builtin_ia32_pmulld512_mask",
+ "llvm.x86.avx512.mask.pmull.q.128" => "__builtin_ia32_pmullq128_mask",
+ "llvm.x86.avx512.mask.pmull.q.256" => "__builtin_ia32_pmullq256_mask",
+ "llvm.x86.avx512.mask.pmull.q.512" => "__builtin_ia32_pmullq512_mask",
+ "llvm.x86.avx512.mask.pmull.w.128" => "__builtin_ia32_pmullw128_mask",
+ "llvm.x86.avx512.mask.pmull.w.256" => "__builtin_ia32_pmullw256_mask",
+ "llvm.x86.avx512.mask.pmull.w.512" => "__builtin_ia32_pmullw512_mask",
+ "llvm.x86.avx512.mask.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128_mask",
+ "llvm.x86.avx512.mask.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256_mask",
+ "llvm.x86.avx512.mask.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512_mask",
+ "llvm.x86.avx512.mask.pmulu.dq.128" => "__builtin_ia32_pmuludq128_mask",
+ "llvm.x86.avx512.mask.pmulu.dq.256" => "__builtin_ia32_pmuludq256_mask",
+ "llvm.x86.avx512.mask.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask",
+ "llvm.x86.avx512.mask.prol.d.128" => "__builtin_ia32_prold128_mask",
+ "llvm.x86.avx512.mask.prol.d.256" => "__builtin_ia32_prold256_mask",
+ "llvm.x86.avx512.mask.prol.d.512" => "__builtin_ia32_prold512_mask",
+ "llvm.x86.avx512.mask.prol.q.128" => "__builtin_ia32_prolq128_mask",
+ "llvm.x86.avx512.mask.prol.q.256" => "__builtin_ia32_prolq256_mask",
+ "llvm.x86.avx512.mask.prol.q.512" => "__builtin_ia32_prolq512_mask",
+ "llvm.x86.avx512.mask.prolv.d.128" => "__builtin_ia32_prolvd128_mask",
+ "llvm.x86.avx512.mask.prolv.d.256" => "__builtin_ia32_prolvd256_mask",
+ "llvm.x86.avx512.mask.prolv.d.512" => "__builtin_ia32_prolvd512_mask",
+ "llvm.x86.avx512.mask.prolv.q.128" => "__builtin_ia32_prolvq128_mask",
+ "llvm.x86.avx512.mask.prolv.q.256" => "__builtin_ia32_prolvq256_mask",
+ "llvm.x86.avx512.mask.prolv.q.512" => "__builtin_ia32_prolvq512_mask",
+ "llvm.x86.avx512.mask.pror.d.128" => "__builtin_ia32_prord128_mask",
+ "llvm.x86.avx512.mask.pror.d.256" => "__builtin_ia32_prord256_mask",
+ "llvm.x86.avx512.mask.pror.d.512" => "__builtin_ia32_prord512_mask",
+ "llvm.x86.avx512.mask.pror.q.128" => "__builtin_ia32_prorq128_mask",
+ "llvm.x86.avx512.mask.pror.q.256" => "__builtin_ia32_prorq256_mask",
+ "llvm.x86.avx512.mask.pror.q.512" => "__builtin_ia32_prorq512_mask",
+ "llvm.x86.avx512.mask.prorv.d.128" => "__builtin_ia32_prorvd128_mask",
+ "llvm.x86.avx512.mask.prorv.d.256" => "__builtin_ia32_prorvd256_mask",
+ "llvm.x86.avx512.mask.prorv.d.512" => "__builtin_ia32_prorvd512_mask",
+ "llvm.x86.avx512.mask.prorv.q.128" => "__builtin_ia32_prorvq128_mask",
+ "llvm.x86.avx512.mask.prorv.q.256" => "__builtin_ia32_prorvq256_mask",
+ "llvm.x86.avx512.mask.prorv.q.512" => "__builtin_ia32_prorvq512_mask",
+ "llvm.x86.avx512.mask.pshuf.b.128" => "__builtin_ia32_pshufb128_mask",
+ "llvm.x86.avx512.mask.pshuf.b.256" => "__builtin_ia32_pshufb256_mask",
+ "llvm.x86.avx512.mask.pshuf.b.512" => "__builtin_ia32_pshufb512_mask",
+ "llvm.x86.avx512.mask.psll.d" => "__builtin_ia32_pslld512_mask",
+ "llvm.x86.avx512.mask.psll.d.128" => "__builtin_ia32_pslld128_mask",
+ "llvm.x86.avx512.mask.psll.d.256" => "__builtin_ia32_pslld256_mask",
+ "llvm.x86.avx512.mask.psll.di.128" => "__builtin_ia32_pslldi128_mask",
+ "llvm.x86.avx512.mask.psll.di.256" => "__builtin_ia32_pslldi256_mask",
+ "llvm.x86.avx512.mask.psll.di.512" => "__builtin_ia32_pslldi512_mask",
+ "llvm.x86.avx512.mask.psll.q" => "__builtin_ia32_psllq512_mask",
+ "llvm.x86.avx512.mask.psll.q.128" => "__builtin_ia32_psllq128_mask",
+ "llvm.x86.avx512.mask.psll.q.256" => "__builtin_ia32_psllq256_mask",
+ "llvm.x86.avx512.mask.psll.qi.128" => "__builtin_ia32_psllqi128_mask",
+ "llvm.x86.avx512.mask.psll.qi.256" => "__builtin_ia32_psllqi256_mask",
+ "llvm.x86.avx512.mask.psll.qi.512" => "__builtin_ia32_psllqi512_mask",
+ "llvm.x86.avx512.mask.psll.w.128" => "__builtin_ia32_psllw128_mask",
+ "llvm.x86.avx512.mask.psll.w.256" => "__builtin_ia32_psllw256_mask",
+ "llvm.x86.avx512.mask.psll.w.512" => "__builtin_ia32_psllw512_mask",
+ "llvm.x86.avx512.mask.psll.wi.128" => "__builtin_ia32_psllwi128_mask",
+ "llvm.x86.avx512.mask.psll.wi.256" => "__builtin_ia32_psllwi256_mask",
+ "llvm.x86.avx512.mask.psll.wi.512" => "__builtin_ia32_psllwi512_mask",
+ "llvm.x86.avx512.mask.psllv.d" => "__builtin_ia32_psllv16si_mask",
+ "llvm.x86.avx512.mask.psllv.q" => "__builtin_ia32_psllv8di_mask",
+ "llvm.x86.avx512.mask.psllv16.hi" => "__builtin_ia32_psllv16hi_mask",
+ "llvm.x86.avx512.mask.psllv2.di" => "__builtin_ia32_psllv2di_mask",
+ "llvm.x86.avx512.mask.psllv32hi" => "__builtin_ia32_psllv32hi_mask",
+ "llvm.x86.avx512.mask.psllv4.di" => "__builtin_ia32_psllv4di_mask",
+ "llvm.x86.avx512.mask.psllv4.si" => "__builtin_ia32_psllv4si_mask",
+ "llvm.x86.avx512.mask.psllv8.hi" => "__builtin_ia32_psllv8hi_mask",
+ "llvm.x86.avx512.mask.psllv8.si" => "__builtin_ia32_psllv8si_mask",
+ "llvm.x86.avx512.mask.psra.d" => "__builtin_ia32_psrad512_mask",
+ "llvm.x86.avx512.mask.psra.d.128" => "__builtin_ia32_psrad128_mask",
+ "llvm.x86.avx512.mask.psra.d.256" => "__builtin_ia32_psrad256_mask",
+ "llvm.x86.avx512.mask.psra.di.128" => "__builtin_ia32_psradi128_mask",
+ "llvm.x86.avx512.mask.psra.di.256" => "__builtin_ia32_psradi256_mask",
+ "llvm.x86.avx512.mask.psra.di.512" => "__builtin_ia32_psradi512_mask",
+ "llvm.x86.avx512.mask.psra.q" => "__builtin_ia32_psraq512_mask",
+ "llvm.x86.avx512.mask.psra.q.128" => "__builtin_ia32_psraq128_mask",
+ "llvm.x86.avx512.mask.psra.q.256" => "__builtin_ia32_psraq256_mask",
+ "llvm.x86.avx512.mask.psra.qi.128" => "__builtin_ia32_psraqi128_mask",
+ "llvm.x86.avx512.mask.psra.qi.256" => "__builtin_ia32_psraqi256_mask",
+ "llvm.x86.avx512.mask.psra.qi.512" => "__builtin_ia32_psraqi512_mask",
+ "llvm.x86.avx512.mask.psra.w.128" => "__builtin_ia32_psraw128_mask",
+ "llvm.x86.avx512.mask.psra.w.256" => "__builtin_ia32_psraw256_mask",
+ "llvm.x86.avx512.mask.psra.w.512" => "__builtin_ia32_psraw512_mask",
+ "llvm.x86.avx512.mask.psra.wi.128" => "__builtin_ia32_psrawi128_mask",
+ "llvm.x86.avx512.mask.psra.wi.256" => "__builtin_ia32_psrawi256_mask",
+ "llvm.x86.avx512.mask.psra.wi.512" => "__builtin_ia32_psrawi512_mask",
+ "llvm.x86.avx512.mask.psrav.d" => "__builtin_ia32_psrav16si_mask",
+ "llvm.x86.avx512.mask.psrav.q" => "__builtin_ia32_psrav8di_mask",
+ "llvm.x86.avx512.mask.psrav.q.128" => "__builtin_ia32_psravq128_mask",
+ "llvm.x86.avx512.mask.psrav.q.256" => "__builtin_ia32_psravq256_mask",
+ "llvm.x86.avx512.mask.psrav16.hi" => "__builtin_ia32_psrav16hi_mask",
+ "llvm.x86.avx512.mask.psrav32.hi" => "__builtin_ia32_psrav32hi_mask",
+ "llvm.x86.avx512.mask.psrav4.si" => "__builtin_ia32_psrav4si_mask",
+ "llvm.x86.avx512.mask.psrav8.hi" => "__builtin_ia32_psrav8hi_mask",
+ "llvm.x86.avx512.mask.psrav8.si" => "__builtin_ia32_psrav8si_mask",
+ "llvm.x86.avx512.mask.psrl.d" => "__builtin_ia32_psrld512_mask",
+ "llvm.x86.avx512.mask.psrl.d.128" => "__builtin_ia32_psrld128_mask",
+ "llvm.x86.avx512.mask.psrl.d.256" => "__builtin_ia32_psrld256_mask",
+ "llvm.x86.avx512.mask.psrl.di.128" => "__builtin_ia32_psrldi128_mask",
+ "llvm.x86.avx512.mask.psrl.di.256" => "__builtin_ia32_psrldi256_mask",
+ "llvm.x86.avx512.mask.psrl.di.512" => "__builtin_ia32_psrldi512_mask",
+ "llvm.x86.avx512.mask.psrl.q" => "__builtin_ia32_psrlq512_mask",
+ "llvm.x86.avx512.mask.psrl.q.128" => "__builtin_ia32_psrlq128_mask",
+ "llvm.x86.avx512.mask.psrl.q.256" => "__builtin_ia32_psrlq256_mask",
+ "llvm.x86.avx512.mask.psrl.qi.128" => "__builtin_ia32_psrlqi128_mask",
+ "llvm.x86.avx512.mask.psrl.qi.256" => "__builtin_ia32_psrlqi256_mask",
+ "llvm.x86.avx512.mask.psrl.qi.512" => "__builtin_ia32_psrlqi512_mask",
+ "llvm.x86.avx512.mask.psrl.w.128" => "__builtin_ia32_psrlw128_mask",
+ "llvm.x86.avx512.mask.psrl.w.256" => "__builtin_ia32_psrlw256_mask",
+ "llvm.x86.avx512.mask.psrl.w.512" => "__builtin_ia32_psrlw512_mask",
+ "llvm.x86.avx512.mask.psrl.wi.128" => "__builtin_ia32_psrlwi128_mask",
+ "llvm.x86.avx512.mask.psrl.wi.256" => "__builtin_ia32_psrlwi256_mask",
+ "llvm.x86.avx512.mask.psrl.wi.512" => "__builtin_ia32_psrlwi512_mask",
+ "llvm.x86.avx512.mask.psrlv.d" => "__builtin_ia32_psrlv16si_mask",
+ "llvm.x86.avx512.mask.psrlv.q" => "__builtin_ia32_psrlv8di_mask",
+ "llvm.x86.avx512.mask.psrlv16.hi" => "__builtin_ia32_psrlv16hi_mask",
+ "llvm.x86.avx512.mask.psrlv2.di" => "__builtin_ia32_psrlv2di_mask",
+ "llvm.x86.avx512.mask.psrlv32hi" => "__builtin_ia32_psrlv32hi_mask",
+ "llvm.x86.avx512.mask.psrlv4.di" => "__builtin_ia32_psrlv4di_mask",
+ "llvm.x86.avx512.mask.psrlv4.si" => "__builtin_ia32_psrlv4si_mask",
+ "llvm.x86.avx512.mask.psrlv8.hi" => "__builtin_ia32_psrlv8hi_mask",
+ "llvm.x86.avx512.mask.psrlv8.si" => "__builtin_ia32_psrlv8si_mask",
+ "llvm.x86.avx512.mask.psub.b.128" => "__builtin_ia32_psubb128_mask",
+ "llvm.x86.avx512.mask.psub.b.256" => "__builtin_ia32_psubb256_mask",
+ "llvm.x86.avx512.mask.psub.b.512" => "__builtin_ia32_psubb512_mask",
+ "llvm.x86.avx512.mask.psub.d.128" => "__builtin_ia32_psubd128_mask",
+ "llvm.x86.avx512.mask.psub.d.256" => "__builtin_ia32_psubd256_mask",
+ "llvm.x86.avx512.mask.psub.d.512" => "__builtin_ia32_psubd512_mask",
+ "llvm.x86.avx512.mask.psub.q.128" => "__builtin_ia32_psubq128_mask",
+ "llvm.x86.avx512.mask.psub.q.256" => "__builtin_ia32_psubq256_mask",
+ "llvm.x86.avx512.mask.psub.q.512" => "__builtin_ia32_psubq512_mask",
+ "llvm.x86.avx512.mask.psub.w.128" => "__builtin_ia32_psubw128_mask",
+ "llvm.x86.avx512.mask.psub.w.256" => "__builtin_ia32_psubw256_mask",
+ "llvm.x86.avx512.mask.psub.w.512" => "__builtin_ia32_psubw512_mask",
+ "llvm.x86.avx512.mask.psubs.b.128" => "__builtin_ia32_psubsb128_mask",
+ "llvm.x86.avx512.mask.psubs.b.256" => "__builtin_ia32_psubsb256_mask",
+ "llvm.x86.avx512.mask.psubs.b.512" => "__builtin_ia32_psubsb512_mask",
+ "llvm.x86.avx512.mask.psubs.w.128" => "__builtin_ia32_psubsw128_mask",
+ "llvm.x86.avx512.mask.psubs.w.256" => "__builtin_ia32_psubsw256_mask",
+ "llvm.x86.avx512.mask.psubs.w.512" => "__builtin_ia32_psubsw512_mask",
+ "llvm.x86.avx512.mask.psubus.b.128" => "__builtin_ia32_psubusb128_mask",
+ "llvm.x86.avx512.mask.psubus.b.256" => "__builtin_ia32_psubusb256_mask",
+ "llvm.x86.avx512.mask.psubus.b.512" => "__builtin_ia32_psubusb512_mask",
+ "llvm.x86.avx512.mask.psubus.w.128" => "__builtin_ia32_psubusw128_mask",
+ "llvm.x86.avx512.mask.psubus.w.256" => "__builtin_ia32_psubusw256_mask",
+ "llvm.x86.avx512.mask.psubus.w.512" => "__builtin_ia32_psubusw512_mask",
+ "llvm.x86.avx512.mask.pternlog.d.128" => "__builtin_ia32_pternlogd128_mask",
+ "llvm.x86.avx512.mask.pternlog.d.256" => "__builtin_ia32_pternlogd256_mask",
+ "llvm.x86.avx512.mask.pternlog.d.512" => "__builtin_ia32_pternlogd512_mask",
+ "llvm.x86.avx512.mask.pternlog.q.128" => "__builtin_ia32_pternlogq128_mask",
+ "llvm.x86.avx512.mask.pternlog.q.256" => "__builtin_ia32_pternlogq256_mask",
+ "llvm.x86.avx512.mask.pternlog.q.512" => "__builtin_ia32_pternlogq512_mask",
+ "llvm.x86.avx512.mask.ptestm.d.512" => "__builtin_ia32_ptestmd512",
+ "llvm.x86.avx512.mask.ptestm.q.512" => "__builtin_ia32_ptestmq512",
+ "llvm.x86.avx512.mask.range.pd.128" => "__builtin_ia32_rangepd128_mask",
+ "llvm.x86.avx512.mask.range.pd.256" => "__builtin_ia32_rangepd256_mask",
+ "llvm.x86.avx512.mask.range.pd.512" => "__builtin_ia32_rangepd512_mask",
+ "llvm.x86.avx512.mask.range.ps.128" => "__builtin_ia32_rangeps128_mask",
+ "llvm.x86.avx512.mask.range.ps.256" => "__builtin_ia32_rangeps256_mask",
+ "llvm.x86.avx512.mask.range.ps.512" => "__builtin_ia32_rangeps512_mask",
+ "llvm.x86.avx512.mask.range.sd" => "__builtin_ia32_rangesd128_round_mask",
+ "llvm.x86.avx512.mask.range.ss" => "__builtin_ia32_rangess128_round_mask",
+ "llvm.x86.avx512.mask.reduce.pd.128" => "__builtin_ia32_reducepd128_mask",
+ "llvm.x86.avx512.mask.reduce.pd.256" => "__builtin_ia32_reducepd256_mask",
+ "llvm.x86.avx512.mask.reduce.pd.512" => "__builtin_ia32_reducepd512_mask",
+ "llvm.x86.avx512.mask.reduce.ps.128" => "__builtin_ia32_reduceps128_mask",
+ "llvm.x86.avx512.mask.reduce.ps.256" => "__builtin_ia32_reduceps256_mask",
+ "llvm.x86.avx512.mask.reduce.ps.512" => "__builtin_ia32_reduceps512_mask",
+ "llvm.x86.avx512.mask.reduce.sd" => "__builtin_ia32_reducesd_mask",
+ "llvm.x86.avx512.mask.reduce.ss" => "__builtin_ia32_reducess_mask",
+ "llvm.x86.avx512.mask.rndscale.pd.128" => "__builtin_ia32_rndscalepd_128_mask",
+ "llvm.x86.avx512.mask.rndscale.pd.256" => "__builtin_ia32_rndscalepd_256_mask",
+ "llvm.x86.avx512.mask.rndscale.pd.512" => "__builtin_ia32_rndscalepd_mask",
+ "llvm.x86.avx512.mask.rndscale.ps.128" => "__builtin_ia32_rndscaleps_128_mask",
+ "llvm.x86.avx512.mask.rndscale.ps.256" => "__builtin_ia32_rndscaleps_256_mask",
+ "llvm.x86.avx512.mask.rndscale.ps.512" => "__builtin_ia32_rndscaleps_mask",
+ "llvm.x86.avx512.mask.rndscale.sd" => "__builtin_ia32_rndscalesd_round_mask",
+ "llvm.x86.avx512.mask.rndscale.ss" => "__builtin_ia32_rndscaless_round_mask",
+ "llvm.x86.avx512.mask.scalef.pd.128" => "__builtin_ia32_scalefpd128_mask",
+ "llvm.x86.avx512.mask.scalef.pd.256" => "__builtin_ia32_scalefpd256_mask",
+ "llvm.x86.avx512.mask.scalef.pd.512" => "__builtin_ia32_scalefpd512_mask",
+ "llvm.x86.avx512.mask.scalef.ps.128" => "__builtin_ia32_scalefps128_mask",
+ "llvm.x86.avx512.mask.scalef.ps.256" => "__builtin_ia32_scalefps256_mask",
+ "llvm.x86.avx512.mask.scalef.ps.512" => "__builtin_ia32_scalefps512_mask",
+ "llvm.x86.avx512.mask.scalef.sd" => "__builtin_ia32_scalefsd_round_mask",
+ "llvm.x86.avx512.mask.scalef.ss" => "__builtin_ia32_scalefss_round_mask",
+ "llvm.x86.avx512.mask.shuf.f32x4" => "__builtin_ia32_shuf_f32x4_mask",
+ "llvm.x86.avx512.mask.shuf.f32x4.256" => "__builtin_ia32_shuf_f32x4_256_mask",
+ "llvm.x86.avx512.mask.shuf.f64x2" => "__builtin_ia32_shuf_f64x2_mask",
+ "llvm.x86.avx512.mask.shuf.f64x2.256" => "__builtin_ia32_shuf_f64x2_256_mask",
+ "llvm.x86.avx512.mask.shuf.i32x4" => "__builtin_ia32_shuf_i32x4_mask",
+ "llvm.x86.avx512.mask.shuf.i32x4.256" => "__builtin_ia32_shuf_i32x4_256_mask",
+ "llvm.x86.avx512.mask.shuf.i64x2" => "__builtin_ia32_shuf_i64x2_mask",
+ "llvm.x86.avx512.mask.shuf.i64x2.256" => "__builtin_ia32_shuf_i64x2_256_mask",
+ "llvm.x86.avx512.mask.shuf.pd.128" => "__builtin_ia32_shufpd128_mask",
+ "llvm.x86.avx512.mask.shuf.pd.256" => "__builtin_ia32_shufpd256_mask",
+ "llvm.x86.avx512.mask.shuf.pd.512" => "__builtin_ia32_shufpd512_mask",
+ "llvm.x86.avx512.mask.shuf.ps.128" => "__builtin_ia32_shufps128_mask",
+ "llvm.x86.avx512.mask.shuf.ps.256" => "__builtin_ia32_shufps256_mask",
+ "llvm.x86.avx512.mask.shuf.ps.512" => "__builtin_ia32_shufps512_mask",
+ "llvm.x86.avx512.mask.sqrt.pd.128" => "__builtin_ia32_sqrtpd128_mask",
+ "llvm.x86.avx512.mask.sqrt.pd.256" => "__builtin_ia32_sqrtpd256_mask",
+ "llvm.x86.avx512.mask.sqrt.pd.512" => "__builtin_ia32_sqrtpd512_mask",
+ "llvm.x86.avx512.mask.sqrt.ps.128" => "__builtin_ia32_sqrtps128_mask",
+ "llvm.x86.avx512.mask.sqrt.ps.256" => "__builtin_ia32_sqrtps256_mask",
+ "llvm.x86.avx512.mask.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask",
+ "llvm.x86.avx512.mask.sqrt.sd" => "__builtin_ia32_sqrtsd_round_mask",
+ "llvm.x86.avx512.mask.sqrt.ss" => "__builtin_ia32_sqrtss_round_mask",
+ "llvm.x86.avx512.mask.store.ss" => "__builtin_ia32_storess_mask",
+ "llvm.x86.avx512.mask.storeu.d.512" => "__builtin_ia32_storedqusi512_mask",
+ "llvm.x86.avx512.mask.storeu.pd.512" => "__builtin_ia32_storeupd512_mask",
+ "llvm.x86.avx512.mask.storeu.ps.512" => "__builtin_ia32_storeups512_mask",
+ "llvm.x86.avx512.mask.storeu.q.512" => "__builtin_ia32_storedqudi512_mask",
+ "llvm.x86.avx512.mask.sub.pd.128" => "__builtin_ia32_subpd128_mask",
+ "llvm.x86.avx512.mask.sub.pd.256" => "__builtin_ia32_subpd256_mask",
+ "llvm.x86.avx512.mask.sub.pd.512" => "__builtin_ia32_subpd512_mask",
+ "llvm.x86.avx512.mask.sub.ps.128" => "__builtin_ia32_subps128_mask",
+ "llvm.x86.avx512.mask.sub.ps.256" => "__builtin_ia32_subps256_mask",
+ "llvm.x86.avx512.mask.sub.ps.512" => "__builtin_ia32_subps512_mask",
+ "llvm.x86.avx512.mask.sub.sd.round" => "__builtin_ia32_subsd_round_mask",
+ "llvm.x86.avx512.mask.sub.ss.round" => "__builtin_ia32_subss_round_mask",
+ "llvm.x86.avx512.mask.valign.d.128" => "__builtin_ia32_alignd128_mask",
+ "llvm.x86.avx512.mask.valign.d.256" => "__builtin_ia32_alignd256_mask",
+ "llvm.x86.avx512.mask.valign.d.512" => "__builtin_ia32_alignd512_mask",
+ "llvm.x86.avx512.mask.valign.q.128" => "__builtin_ia32_alignq128_mask",
+ "llvm.x86.avx512.mask.valign.q.256" => "__builtin_ia32_alignq256_mask",
+ "llvm.x86.avx512.mask.valign.q.512" => "__builtin_ia32_alignq512_mask",
+ "llvm.x86.avx512.mask.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps_mask",
+ "llvm.x86.avx512.mask.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256_mask",
+ "llvm.x86.avx512.mask.vcvtph2ps.512" => "__builtin_ia32_vcvtph2ps512_mask",
+ "llvm.x86.avx512.mask.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph_mask",
+ "llvm.x86.avx512.mask.vcvtps2ph.256" => "__builtin_ia32_vcvtps2ph256_mask",
+ "llvm.x86.avx512.mask.vcvtps2ph.512" => "__builtin_ia32_vcvtps2ph512_mask",
+ "llvm.x86.avx512.mask.vextractf32x4.256" => "__builtin_ia32_extractf32x4_256_mask",
+ "llvm.x86.avx512.mask.vextractf32x4.512" => "__builtin_ia32_extractf32x4_mask",
+ "llvm.x86.avx512.mask.vextractf32x8.512" => "__builtin_ia32_extractf32x8_mask",
+ "llvm.x86.avx512.mask.vextractf64x2.256" => "__builtin_ia32_extractf64x2_256_mask",
+ "llvm.x86.avx512.mask.vextractf64x2.512" => "__builtin_ia32_extractf64x2_512_mask",
+ "llvm.x86.avx512.mask.vextractf64x4.512" => "__builtin_ia32_extractf64x4_mask",
+ "llvm.x86.avx512.mask.vextracti32x4.256" => "__builtin_ia32_extracti32x4_256_mask",
+ "llvm.x86.avx512.mask.vextracti32x4.512" => "__builtin_ia32_extracti32x4_mask",
+ "llvm.x86.avx512.mask.vextracti32x8.512" => "__builtin_ia32_extracti32x8_mask",
+ "llvm.x86.avx512.mask.vextracti64x2.256" => "__builtin_ia32_extracti64x2_256_mask",
+ "llvm.x86.avx512.mask.vextracti64x2.512" => "__builtin_ia32_extracti64x2_512_mask",
+ "llvm.x86.avx512.mask.vextracti64x4.512" => "__builtin_ia32_extracti64x4_mask",
+ "llvm.x86.avx512.mask.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_mask",
+ "llvm.x86.avx512.mask.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_mask",
+ "llvm.x86.avx512.mask.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+ "llvm.x86.avx512.mask.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_mask",
+ "llvm.x86.avx512.mask.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_mask",
+ "llvm.x86.avx512.mask.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+ "llvm.x86.avx512.mask.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_mask",
+ "llvm.x86.avx512.mask.vfmadd.ss" => "__builtin_ia32_vfmaddss3_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_mask",
+ "llvm.x86.avx512.mask.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+ "llvm.x86.avx512.mask.vfnmadd.pd.128" => "__builtin_ia32_vfnmaddpd128_mask",
+ "llvm.x86.avx512.mask.vfnmadd.pd.256" => "__builtin_ia32_vfnmaddpd256_mask",
+ "llvm.x86.avx512.mask.vfnmadd.pd.512" => "__builtin_ia32_vfnmaddpd512_mask",
+ "llvm.x86.avx512.mask.vfnmadd.ps.128" => "__builtin_ia32_vfnmaddps128_mask",
+ "llvm.x86.avx512.mask.vfnmadd.ps.256" => "__builtin_ia32_vfnmaddps256_mask",
+ "llvm.x86.avx512.mask.vfnmadd.ps.512" => "__builtin_ia32_vfnmaddps512_mask",
+ "llvm.x86.avx512.mask.vfnmsub.pd.128" => "__builtin_ia32_vfnmsubpd128_mask",
+ "llvm.x86.avx512.mask.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256_mask",
+ "llvm.x86.avx512.mask.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask",
+ "llvm.x86.avx512.mask.vfnmsub.ps.128" => "__builtin_ia32_vfnmsubps128_mask",
+ "llvm.x86.avx512.mask.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256_mask",
+ "llvm.x86.avx512.mask.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.hi.128" => "__builtin_ia32_vpermi2varhi128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.hi.256" => "__builtin_ia32_vpermi2varhi256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.hi.512" => "__builtin_ia32_vpermi2varhi512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512_mask",
+ "llvm.x86.avx512.mask.vpermi2var.qi.128" => "__builtin_ia32_vpermi2varqi128_mask",
+ "llvm.x86.avx512.mask.vpermi2var.qi.256" => "__builtin_ia32_vpermi2varqi256_mask",
+ "llvm.x86.avx512.mask.vpermi2var.qi.512" => "__builtin_ia32_vpermi2varqi512_mask",
+ "llvm.x86.avx512.mask.vpermilvar.pd.128" => "__builtin_ia32_vpermilvarpd_mask",
+ "llvm.x86.avx512.mask.vpermilvar.pd.256" => "__builtin_ia32_vpermilvarpd256_mask",
+ "llvm.x86.avx512.mask.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512_mask",
+ "llvm.x86.avx512.mask.vpermilvar.ps.128" => "__builtin_ia32_vpermilvarps_mask",
+ "llvm.x86.avx512.mask.vpermilvar.ps.256" => "__builtin_ia32_vpermilvarps256_mask",
+ "llvm.x86.avx512.mask.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512_mask",
+ "llvm.x86.avx512.mask.vpermt.d.512" => "__builtin_ia32_vpermt2vard512_mask",
+ "llvm.x86.avx512.mask.vpermt.pd.512" => "__builtin_ia32_vpermt2varpd512_mask",
+ "llvm.x86.avx512.mask.vpermt.ps.512" => "__builtin_ia32_vpermt2varps512_mask",
+ "llvm.x86.avx512.mask.vpermt.q.512" => "__builtin_ia32_vpermt2varq512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.d.128" => "__builtin_ia32_vpermt2vard128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.d.256" => "__builtin_ia32_vpermt2vard256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.d.512" => "__builtin_ia32_vpermt2vard512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.hi.128" => "__builtin_ia32_vpermt2varhi128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.hi.256" => "__builtin_ia32_vpermt2varhi256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.hi.512" => "__builtin_ia32_vpermt2varhi512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.pd.128" => "__builtin_ia32_vpermt2varpd128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.pd.256" => "__builtin_ia32_vpermt2varpd256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.pd.512" => "__builtin_ia32_vpermt2varpd512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.ps.128" => "__builtin_ia32_vpermt2varps128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.ps.256" => "__builtin_ia32_vpermt2varps256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.ps.512" => "__builtin_ia32_vpermt2varps512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.q.128" => "__builtin_ia32_vpermt2varq128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.q.256" => "__builtin_ia32_vpermt2varq256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.q.512" => "__builtin_ia32_vpermt2varq512_mask",
+ "llvm.x86.avx512.mask.vpermt2var.qi.128" => "__builtin_ia32_vpermt2varqi128_mask",
+ "llvm.x86.avx512.mask.vpermt2var.qi.256" => "__builtin_ia32_vpermt2varqi256_mask",
+ "llvm.x86.avx512.mask.vpermt2var.qi.512" => "__builtin_ia32_vpermt2varqi512_mask",
+ "llvm.x86.avx512.mask.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_mask",
+ "llvm.x86.avx512.mask.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_mask",
+ "llvm.x86.avx512.mask.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_mask",
+ "llvm.x86.avx512.mask.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128_mask",
+ "llvm.x86.avx512.mask.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_mask",
+ "llvm.x86.avx512.mask.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_mask",
+ "llvm.x86.avx512.mask.xor.pd.128" => "__builtin_ia32_xorpd128_mask",
+ "llvm.x86.avx512.mask.xor.pd.256" => "__builtin_ia32_xorpd256_mask",
+ "llvm.x86.avx512.mask.xor.pd.512" => "__builtin_ia32_xorpd512_mask",
+ "llvm.x86.avx512.mask.xor.ps.128" => "__builtin_ia32_xorps128_mask",
+ "llvm.x86.avx512.mask.xor.ps.256" => "__builtin_ia32_xorps256_mask",
+ "llvm.x86.avx512.mask.xor.ps.512" => "__builtin_ia32_xorps512_mask",
+ "llvm.x86.avx512.mask3.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_mask3",
+ "llvm.x86.avx512.mask3.vfmadd.ss" => "__builtin_ia32_vfmaddss3_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_mask3",
+ "llvm.x86.avx512.mask3.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.pd.128" => "__builtin_ia32_vfmsubpd128_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.pd.256" => "__builtin_ia32_vfmsubpd256_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.pd.512" => "__builtin_ia32_vfmsubpd512_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.ps.128" => "__builtin_ia32_vfmsubps128_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.ps.256" => "__builtin_ia32_vfmsubps256_mask3",
+ "llvm.x86.avx512.mask3.vfmsub.ps.512" => "__builtin_ia32_vfmsubps512_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.pd.128" => "__builtin_ia32_vfmsubaddpd128_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.pd.256" => "__builtin_ia32_vfmsubaddpd256_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.pd.512" => "__builtin_ia32_vfmsubaddpd512_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.ps.128" => "__builtin_ia32_vfmsubaddps128_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.ps.256" => "__builtin_ia32_vfmsubaddps256_mask3",
+ "llvm.x86.avx512.mask3.vfmsubadd.ps.512" => "__builtin_ia32_vfmsubaddps512_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.pd.128" => "__builtin_ia32_vfnmsubpd128_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.ps.128" => "__builtin_ia32_vfnmsubps128_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256_mask3",
+ "llvm.x86.avx512.mask3.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask3",
+ "llvm.x86.avx512.maskz.fixupimm.pd.128" => "__builtin_ia32_fixupimmpd128_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.pd.256" => "__builtin_ia32_fixupimmpd256_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.pd.512" => "__builtin_ia32_fixupimmpd512_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.ps.128" => "__builtin_ia32_fixupimmps128_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.ps.256" => "__builtin_ia32_fixupimmps256_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.ps.512" => "__builtin_ia32_fixupimmps512_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.sd" => "__builtin_ia32_fixupimmsd_maskz",
+ "llvm.x86.avx512.maskz.fixupimm.ss" => "__builtin_ia32_fixupimmss_maskz",
+ "llvm.x86.avx512.maskz.pternlog.d.128" => "__builtin_ia32_pternlogd128_maskz",
+ "llvm.x86.avx512.maskz.pternlog.d.256" => "__builtin_ia32_pternlogd256_maskz",
+ "llvm.x86.avx512.maskz.pternlog.d.512" => "__builtin_ia32_pternlogd512_maskz",
+ "llvm.x86.avx512.maskz.pternlog.q.128" => "__builtin_ia32_pternlogq128_maskz",
+ "llvm.x86.avx512.maskz.pternlog.q.256" => "__builtin_ia32_pternlogq256_maskz",
+ "llvm.x86.avx512.maskz.pternlog.q.512" => "__builtin_ia32_pternlogq512_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.pd.128" => "__builtin_ia32_vfmaddpd128_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.ps.128" => "__builtin_ia32_vfmaddps128_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.sd" => "__builtin_ia32_vfmaddsd3_maskz",
+ "llvm.x86.avx512.maskz.vfmadd.ss" => "__builtin_ia32_vfmaddss3_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.pd.128" => "__builtin_ia32_vfmaddsubpd128_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.ps.128" => "__builtin_ia32_vfmaddsubps128_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256_maskz",
+ "llvm.x86.avx512.maskz.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.d.128" => "__builtin_ia32_vpermt2vard128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.d.256" => "__builtin_ia32_vpermt2vard256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.d.512" => "__builtin_ia32_vpermt2vard512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.hi.128" => "__builtin_ia32_vpermt2varhi128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.hi.256" => "__builtin_ia32_vpermt2varhi256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.hi.512" => "__builtin_ia32_vpermt2varhi512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.pd.128" => "__builtin_ia32_vpermt2varpd128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.pd.256" => "__builtin_ia32_vpermt2varpd256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.pd.512" => "__builtin_ia32_vpermt2varpd512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.ps.128" => "__builtin_ia32_vpermt2varps128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.ps.256" => "__builtin_ia32_vpermt2varps256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.ps.512" => "__builtin_ia32_vpermt2varps512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.q.128" => "__builtin_ia32_vpermt2varq128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.q.256" => "__builtin_ia32_vpermt2varq256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.q.512" => "__builtin_ia32_vpermt2varq512_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.qi.128" => "__builtin_ia32_vpermt2varqi128_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.qi.256" => "__builtin_ia32_vpermt2varqi256_maskz",
+ "llvm.x86.avx512.maskz.vpermt2var.qi.512" => "__builtin_ia32_vpermt2varqi512_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256_maskz",
+ "llvm.x86.avx512.maskz.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512_maskz",
+ "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512",
+ "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512",
+ "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512",
+ "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512",
+ "llvm.x86.avx512.movntdqa" => "__builtin_ia32_movntdqa512",
+ "llvm.x86.avx512.mul.pd.512" => "__builtin_ia32_mulpd512",
+ "llvm.x86.avx512.mul.ps.512" => "__builtin_ia32_mulps512",
+ "llvm.x86.avx512.packssdw.512" => "__builtin_ia32_packssdw512",
+ "llvm.x86.avx512.packsswb.512" => "__builtin_ia32_packsswb512",
+ "llvm.x86.avx512.packusdw.512" => "__builtin_ia32_packusdw512",
+ "llvm.x86.avx512.packuswb.512" => "__builtin_ia32_packuswb512",
+ "llvm.x86.avx512.pavg.b.512" => "__builtin_ia32_pavgb512",
+ "llvm.x86.avx512.pavg.w.512" => "__builtin_ia32_pavgw512",
+ "llvm.x86.avx512.pbroadcastd.512" => "__builtin_ia32_pbroadcastd512",
+ "llvm.x86.avx512.pbroadcastq.512" => "__builtin_ia32_pbroadcastq512",
+ "llvm.x86.avx512.permvar.df.256" => "__builtin_ia32_permvardf256",
+ "llvm.x86.avx512.permvar.df.512" => "__builtin_ia32_permvardf512",
+ "llvm.x86.avx512.permvar.di.256" => "__builtin_ia32_permvardi256",
+ "llvm.x86.avx512.permvar.di.512" => "__builtin_ia32_permvardi512",
+ "llvm.x86.avx512.permvar.hi.128" => "__builtin_ia32_permvarhi128",
+ "llvm.x86.avx512.permvar.hi.256" => "__builtin_ia32_permvarhi256",
+ "llvm.x86.avx512.permvar.hi.512" => "__builtin_ia32_permvarhi512",
+ "llvm.x86.avx512.permvar.qi.128" => "__builtin_ia32_permvarqi128",
+ "llvm.x86.avx512.permvar.qi.256" => "__builtin_ia32_permvarqi256",
+ "llvm.x86.avx512.permvar.qi.512" => "__builtin_ia32_permvarqi512",
+ "llvm.x86.avx512.permvar.sf.512" => "__builtin_ia32_permvarsf512",
+ "llvm.x86.avx512.permvar.si.512" => "__builtin_ia32_permvarsi512",
+ "llvm.x86.avx512.pmaddubs.w.512" => "__builtin_ia32_pmaddubsw512",
+ "llvm.x86.avx512.pmaddw.d.512" => "__builtin_ia32_pmaddwd512",
+ "llvm.x86.avx512.pmovzxbd" => "__builtin_ia32_pmovzxbd512",
+ "llvm.x86.avx512.pmovzxbq" => "__builtin_ia32_pmovzxbq512",
+ "llvm.x86.avx512.pmovzxdq" => "__builtin_ia32_pmovzxdq512",
+ "llvm.x86.avx512.pmovzxwd" => "__builtin_ia32_pmovzxwd512",
+ "llvm.x86.avx512.pmovzxwq" => "__builtin_ia32_pmovzxwq512",
+ "llvm.x86.avx512.pmul.hr.sw.512" => "__builtin_ia32_pmulhrsw512",
+ "llvm.x86.avx512.pmulh.w.512" => "__builtin_ia32_pmulhw512",
+ "llvm.x86.avx512.pmulhu.w.512" => "__builtin_ia32_pmulhuw512",
+ "llvm.x86.avx512.pmultishift.qb.128" => "__builtin_ia32_vpmultishiftqb128",
+ "llvm.x86.avx512.pmultishift.qb.256" => "__builtin_ia32_vpmultishiftqb256",
+ "llvm.x86.avx512.pmultishift.qb.512" => "__builtin_ia32_vpmultishiftqb512",
+ "llvm.x86.avx512.psad.bw.512" => "__builtin_ia32_psadbw512",
+ "llvm.x86.avx512.pshuf.b.512" => "__builtin_ia32_pshufb512",
+ "llvm.x86.avx512.psll.d.512" => "__builtin_ia32_pslld512",
+ "llvm.x86.avx512.psll.dq" => "__builtin_ia32_pslldqi512",
+ "llvm.x86.avx512.psll.dq.bs" => "__builtin_ia32_pslldqi512_byteshift",
+ "llvm.x86.avx512.psll.q.512" => "__builtin_ia32_psllq512",
+ "llvm.x86.avx512.psll.w.512" => "__builtin_ia32_psllw512",
+ "llvm.x86.avx512.pslli.d.512" => "__builtin_ia32_pslldi512",
+ "llvm.x86.avx512.pslli.q.512" => "__builtin_ia32_psllqi512",
+ "llvm.x86.avx512.pslli.w.512" => "__builtin_ia32_psllwi512",
+ "llvm.x86.avx512.psllv.d.512" => "__builtin_ia32_psllv16si",
+ "llvm.x86.avx512.psllv.q.512" => "__builtin_ia32_psllv8di",
+ "llvm.x86.avx512.psllv.w.128" => "__builtin_ia32_psllv8hi",
+ "llvm.x86.avx512.psllv.w.256" => "__builtin_ia32_psllv16hi",
+ "llvm.x86.avx512.psllv.w.512" => "__builtin_ia32_psllv32hi",
+ "llvm.x86.avx512.psra.d.512" => "__builtin_ia32_psrad512",
+ "llvm.x86.avx512.psra.q.128" => "__builtin_ia32_psraq128",
+ "llvm.x86.avx512.psra.q.256" => "__builtin_ia32_psraq256",
+ "llvm.x86.avx512.psra.q.512" => "__builtin_ia32_psraq512",
+ "llvm.x86.avx512.psra.w.512" => "__builtin_ia32_psraw512",
+ "llvm.x86.avx512.psrai.d.512" => "__builtin_ia32_psradi512",
+ "llvm.x86.avx512.psrai.q.128" => "__builtin_ia32_psraqi128",
+ "llvm.x86.avx512.psrai.q.256" => "__builtin_ia32_psraqi256",
+ "llvm.x86.avx512.psrai.q.512" => "__builtin_ia32_psraqi512",
+ "llvm.x86.avx512.psrai.w.512" => "__builtin_ia32_psrawi512",
+ "llvm.x86.avx512.psrav.d.512" => "__builtin_ia32_psrav16si",
+ "llvm.x86.avx512.psrav.q.128" => "__builtin_ia32_psravq128",
+ "llvm.x86.avx512.psrav.q.256" => "__builtin_ia32_psravq256",
+ "llvm.x86.avx512.psrav.q.512" => "__builtin_ia32_psrav8di",
+ "llvm.x86.avx512.psrav.w.128" => "__builtin_ia32_psrav8hi",
+ "llvm.x86.avx512.psrav.w.256" => "__builtin_ia32_psrav16hi",
+ "llvm.x86.avx512.psrav.w.512" => "__builtin_ia32_psrav32hi",
+ "llvm.x86.avx512.psrl.d.512" => "__builtin_ia32_psrld512",
+ "llvm.x86.avx512.psrl.dq" => "__builtin_ia32_psrldqi512",
+ "llvm.x86.avx512.psrl.dq.bs" => "__builtin_ia32_psrldqi512_byteshift",
+ "llvm.x86.avx512.psrl.q.512" => "__builtin_ia32_psrlq512",
+ "llvm.x86.avx512.psrl.w.512" => "__builtin_ia32_psrlw512",
+ "llvm.x86.avx512.psrli.d.512" => "__builtin_ia32_psrldi512",
+ "llvm.x86.avx512.psrli.q.512" => "__builtin_ia32_psrlqi512",
+ "llvm.x86.avx512.psrli.w.512" => "__builtin_ia32_psrlwi512",
+ "llvm.x86.avx512.psrlv.d.512" => "__builtin_ia32_psrlv16si",
+ "llvm.x86.avx512.psrlv.q.512" => "__builtin_ia32_psrlv8di",
+ "llvm.x86.avx512.psrlv.w.128" => "__builtin_ia32_psrlv8hi",
+ "llvm.x86.avx512.psrlv.w.256" => "__builtin_ia32_psrlv16hi",
+ "llvm.x86.avx512.psrlv.w.512" => "__builtin_ia32_psrlv32hi",
+ "llvm.x86.avx512.pternlog.d.128" => "__builtin_ia32_pternlogd128",
+ "llvm.x86.avx512.pternlog.d.256" => "__builtin_ia32_pternlogd256",
+ "llvm.x86.avx512.pternlog.d.512" => "__builtin_ia32_pternlogd512",
+ "llvm.x86.avx512.pternlog.q.128" => "__builtin_ia32_pternlogq128",
+ "llvm.x86.avx512.pternlog.q.256" => "__builtin_ia32_pternlogq256",
+ "llvm.x86.avx512.pternlog.q.512" => "__builtin_ia32_pternlogq512",
+ "llvm.x86.avx512.ptestm.b.128" => "__builtin_ia32_ptestmb128",
+ "llvm.x86.avx512.ptestm.b.256" => "__builtin_ia32_ptestmb256",
+ "llvm.x86.avx512.ptestm.b.512" => "__builtin_ia32_ptestmb512",
+ "llvm.x86.avx512.ptestm.d.128" => "__builtin_ia32_ptestmd128",
+ "llvm.x86.avx512.ptestm.d.256" => "__builtin_ia32_ptestmd256",
+ "llvm.x86.avx512.ptestm.d.512" => "__builtin_ia32_ptestmd512",
+ "llvm.x86.avx512.ptestm.q.128" => "__builtin_ia32_ptestmq128",
+ "llvm.x86.avx512.ptestm.q.256" => "__builtin_ia32_ptestmq256",
+ "llvm.x86.avx512.ptestm.q.512" => "__builtin_ia32_ptestmq512",
+ "llvm.x86.avx512.ptestm.w.128" => "__builtin_ia32_ptestmw128",
+ "llvm.x86.avx512.ptestm.w.256" => "__builtin_ia32_ptestmw256",
+ "llvm.x86.avx512.ptestm.w.512" => "__builtin_ia32_ptestmw512",
+ "llvm.x86.avx512.ptestnm.b.128" => "__builtin_ia32_ptestnmb128",
+ "llvm.x86.avx512.ptestnm.b.256" => "__builtin_ia32_ptestnmb256",
+ "llvm.x86.avx512.ptestnm.b.512" => "__builtin_ia32_ptestnmb512",
+ "llvm.x86.avx512.ptestnm.d.128" => "__builtin_ia32_ptestnmd128",
+ "llvm.x86.avx512.ptestnm.d.256" => "__builtin_ia32_ptestnmd256",
+ "llvm.x86.avx512.ptestnm.d.512" => "__builtin_ia32_ptestnmd512",
+ "llvm.x86.avx512.ptestnm.q.128" => "__builtin_ia32_ptestnmq128",
+ "llvm.x86.avx512.ptestnm.q.256" => "__builtin_ia32_ptestnmq256",
+ "llvm.x86.avx512.ptestnm.q.512" => "__builtin_ia32_ptestnmq512",
+ "llvm.x86.avx512.ptestnm.w.128" => "__builtin_ia32_ptestnmw128",
+ "llvm.x86.avx512.ptestnm.w.256" => "__builtin_ia32_ptestnmw256",
+ "llvm.x86.avx512.ptestnm.w.512" => "__builtin_ia32_ptestnmw512",
+ "llvm.x86.avx512.rcp14.pd.128" => "__builtin_ia32_rcp14pd128_mask",
+ "llvm.x86.avx512.rcp14.pd.256" => "__builtin_ia32_rcp14pd256_mask",
+ "llvm.x86.avx512.rcp14.pd.512" => "__builtin_ia32_rcp14pd512_mask",
+ "llvm.x86.avx512.rcp14.ps.128" => "__builtin_ia32_rcp14ps128_mask",
+ "llvm.x86.avx512.rcp14.ps.256" => "__builtin_ia32_rcp14ps256_mask",
+ "llvm.x86.avx512.rcp14.ps.512" => "__builtin_ia32_rcp14ps512_mask",
+ "llvm.x86.avx512.rcp14.sd" => "__builtin_ia32_rcp14sd_mask",
+ "llvm.x86.avx512.rcp14.ss" => "__builtin_ia32_rcp14ss_mask",
+ "llvm.x86.avx512.rcp28.pd" => "__builtin_ia32_rcp28pd_mask",
+ "llvm.x86.avx512.rcp28.ps" => "__builtin_ia32_rcp28ps_mask",
+ "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_round_mask",
+ // [DUPLICATE]: "llvm.x86.avx512.rcp28.sd" => "__builtin_ia32_rcp28sd_mask",
+ "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_round_mask",
+ // [DUPLICATE]: "llvm.x86.avx512.rcp28.ss" => "__builtin_ia32_rcp28ss_mask",
+ "llvm.x86.avx512.rndscale.sd" => "__builtin_ia32_rndscalesd",
+ "llvm.x86.avx512.rndscale.ss" => "__builtin_ia32_rndscaless",
+ "llvm.x86.avx512.rsqrt14.pd.128" => "__builtin_ia32_rsqrt14pd128_mask",
+ "llvm.x86.avx512.rsqrt14.pd.256" => "__builtin_ia32_rsqrt14pd256_mask",
+ "llvm.x86.avx512.rsqrt14.pd.512" => "__builtin_ia32_rsqrt14pd512_mask",
+ "llvm.x86.avx512.rsqrt14.ps.128" => "__builtin_ia32_rsqrt14ps128_mask",
+ "llvm.x86.avx512.rsqrt14.ps.256" => "__builtin_ia32_rsqrt14ps256_mask",
+ "llvm.x86.avx512.rsqrt14.ps.512" => "__builtin_ia32_rsqrt14ps512_mask",
+ "llvm.x86.avx512.rsqrt14.sd" => "__builtin_ia32_rsqrt14sd_mask",
+ "llvm.x86.avx512.rsqrt14.ss" => "__builtin_ia32_rsqrt14ss_mask",
+ "llvm.x86.avx512.rsqrt28.pd" => "__builtin_ia32_rsqrt28pd_mask",
+ "llvm.x86.avx512.rsqrt28.ps" => "__builtin_ia32_rsqrt28ps_mask",
+ "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_round_mask",
+ // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.sd" => "__builtin_ia32_rsqrt28sd_mask",
+ "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_round_mask",
+ // [DUPLICATE]: "llvm.x86.avx512.rsqrt28.ss" => "__builtin_ia32_rsqrt28ss_mask",
+ "llvm.x86.avx512.scatter.dpd.512" => "__builtin_ia32_scattersiv8df",
+ "llvm.x86.avx512.scatter.dpi.512" => "__builtin_ia32_scattersiv16si",
+ "llvm.x86.avx512.scatter.dpq.512" => "__builtin_ia32_scattersiv8di",
+ "llvm.x86.avx512.scatter.dps.512" => "__builtin_ia32_scattersiv16sf",
+ "llvm.x86.avx512.scatter.qpd.512" => "__builtin_ia32_scatterdiv8df",
+ "llvm.x86.avx512.scatter.qpi.512" => "__builtin_ia32_scatterdiv16si",
+ "llvm.x86.avx512.scatter.qpq.512" => "__builtin_ia32_scatterdiv8di",
+ "llvm.x86.avx512.scatter.qps.512" => "__builtin_ia32_scatterdiv16sf",
+ "llvm.x86.avx512.scatterdiv2.df" => "__builtin_ia32_scatterdiv2df",
+ "llvm.x86.avx512.scatterdiv2.di" => "__builtin_ia32_scatterdiv2di",
+ "llvm.x86.avx512.scatterdiv4.df" => "__builtin_ia32_scatterdiv4df",
+ "llvm.x86.avx512.scatterdiv4.di" => "__builtin_ia32_scatterdiv4di",
+ "llvm.x86.avx512.scatterdiv4.sf" => "__builtin_ia32_scatterdiv4sf",
+ "llvm.x86.avx512.scatterdiv4.si" => "__builtin_ia32_scatterdiv4si",
+ "llvm.x86.avx512.scatterdiv8.sf" => "__builtin_ia32_scatterdiv8sf",
+ "llvm.x86.avx512.scatterdiv8.si" => "__builtin_ia32_scatterdiv8si",
+ "llvm.x86.avx512.scatterpf.dpd.512" => "__builtin_ia32_scatterpfdpd",
+ "llvm.x86.avx512.scatterpf.dps.512" => "__builtin_ia32_scatterpfdps",
+ "llvm.x86.avx512.scatterpf.qpd.512" => "__builtin_ia32_scatterpfqpd",
+ "llvm.x86.avx512.scatterpf.qps.512" => "__builtin_ia32_scatterpfqps",
+ "llvm.x86.avx512.scattersiv2.df" => "__builtin_ia32_scattersiv2df",
+ "llvm.x86.avx512.scattersiv2.di" => "__builtin_ia32_scattersiv2di",
+ "llvm.x86.avx512.scattersiv4.df" => "__builtin_ia32_scattersiv4df",
+ "llvm.x86.avx512.scattersiv4.di" => "__builtin_ia32_scattersiv4di",
+ "llvm.x86.avx512.scattersiv4.sf" => "__builtin_ia32_scattersiv4sf",
+ "llvm.x86.avx512.scattersiv4.si" => "__builtin_ia32_scattersiv4si",
+ "llvm.x86.avx512.scattersiv8.sf" => "__builtin_ia32_scattersiv8sf",
+ "llvm.x86.avx512.scattersiv8.si" => "__builtin_ia32_scattersiv8si",
+ "llvm.x86.avx512.sqrt.pd.512" => "__builtin_ia32_sqrtpd512_mask",
+ "llvm.x86.avx512.sqrt.ps.512" => "__builtin_ia32_sqrtps512_mask",
+ "llvm.x86.avx512.sqrt.sd" => "__builtin_ia32_sqrtrndsd",
+ "llvm.x86.avx512.sqrt.ss" => "__builtin_ia32_sqrtrndss",
+ "llvm.x86.avx512.sub.pd.512" => "__builtin_ia32_subpd512",
+ "llvm.x86.avx512.sub.ps.512" => "__builtin_ia32_subps512",
+ "llvm.x86.avx512.vbroadcast.sd.512" => "__builtin_ia32_vbroadcastsd512",
+ "llvm.x86.avx512.vbroadcast.sd.pd.512" => "__builtin_ia32_vbroadcastsd_pd512",
+ "llvm.x86.avx512.vbroadcast.ss.512" => "__builtin_ia32_vbroadcastss512",
+ "llvm.x86.avx512.vbroadcast.ss.ps.512" => "__builtin_ia32_vbroadcastss_ps512",
+ "llvm.x86.avx512.vcomi.sd" => "__builtin_ia32_vcomisd",
+ "llvm.x86.avx512.vcomi.ss" => "__builtin_ia32_vcomiss",
+ "llvm.x86.avx512.vcvtsd2si32" => "__builtin_ia32_vcvtsd2si32",
+ "llvm.x86.avx512.vcvtsd2si64" => "__builtin_ia32_vcvtsd2si64",
+ "llvm.x86.avx512.vcvtsd2usi32" => "__builtin_ia32_vcvtsd2usi32",
+ "llvm.x86.avx512.vcvtsd2usi64" => "__builtin_ia32_vcvtsd2usi64",
+ "llvm.x86.avx512.vcvtss2si32" => "__builtin_ia32_vcvtss2si32",
+ "llvm.x86.avx512.vcvtss2si64" => "__builtin_ia32_vcvtss2si64",
+ "llvm.x86.avx512.vcvtss2usi32" => "__builtin_ia32_vcvtss2usi32",
+ "llvm.x86.avx512.vcvtss2usi64" => "__builtin_ia32_vcvtss2usi64",
+ "llvm.x86.avx512.vpdpbusd.128" => "__builtin_ia32_vpdpbusd128",
+ "llvm.x86.avx512.vpdpbusd.256" => "__builtin_ia32_vpdpbusd256",
+ "llvm.x86.avx512.vpdpbusd.512" => "__builtin_ia32_vpdpbusd512",
+ "llvm.x86.avx512.vpdpbusds.128" => "__builtin_ia32_vpdpbusds128",
+ "llvm.x86.avx512.vpdpbusds.256" => "__builtin_ia32_vpdpbusds256",
+ "llvm.x86.avx512.vpdpbusds.512" => "__builtin_ia32_vpdpbusds512",
+ "llvm.x86.avx512.vpdpwssd.128" => "__builtin_ia32_vpdpwssd128",
+ "llvm.x86.avx512.vpdpwssd.256" => "__builtin_ia32_vpdpwssd256",
+ "llvm.x86.avx512.vpdpwssd.512" => "__builtin_ia32_vpdpwssd512",
+ "llvm.x86.avx512.vpdpwssds.128" => "__builtin_ia32_vpdpwssds128",
+ "llvm.x86.avx512.vpdpwssds.256" => "__builtin_ia32_vpdpwssds256",
+ "llvm.x86.avx512.vpdpwssds.512" => "__builtin_ia32_vpdpwssds512",
+ "llvm.x86.avx512.vpermi2var.d.128" => "__builtin_ia32_vpermi2vard128",
+ "llvm.x86.avx512.vpermi2var.d.256" => "__builtin_ia32_vpermi2vard256",
+ "llvm.x86.avx512.vpermi2var.d.512" => "__builtin_ia32_vpermi2vard512",
+ "llvm.x86.avx512.vpermi2var.hi.128" => "__builtin_ia32_vpermi2varhi128",
+ "llvm.x86.avx512.vpermi2var.hi.256" => "__builtin_ia32_vpermi2varhi256",
+ "llvm.x86.avx512.vpermi2var.hi.512" => "__builtin_ia32_vpermi2varhi512",
+ "llvm.x86.avx512.vpermi2var.pd.128" => "__builtin_ia32_vpermi2varpd128",
+ "llvm.x86.avx512.vpermi2var.pd.256" => "__builtin_ia32_vpermi2varpd256",
+ "llvm.x86.avx512.vpermi2var.pd.512" => "__builtin_ia32_vpermi2varpd512",
+ "llvm.x86.avx512.vpermi2var.ps.128" => "__builtin_ia32_vpermi2varps128",
+ "llvm.x86.avx512.vpermi2var.ps.256" => "__builtin_ia32_vpermi2varps256",
+ "llvm.x86.avx512.vpermi2var.ps.512" => "__builtin_ia32_vpermi2varps512",
+ "llvm.x86.avx512.vpermi2var.q.128" => "__builtin_ia32_vpermi2varq128",
+ "llvm.x86.avx512.vpermi2var.q.256" => "__builtin_ia32_vpermi2varq256",
+ "llvm.x86.avx512.vpermi2var.q.512" => "__builtin_ia32_vpermi2varq512",
+ "llvm.x86.avx512.vpermi2var.qi.128" => "__builtin_ia32_vpermi2varqi128",
+ "llvm.x86.avx512.vpermi2var.qi.256" => "__builtin_ia32_vpermi2varqi256",
+ "llvm.x86.avx512.vpermi2var.qi.512" => "__builtin_ia32_vpermi2varqi512",
+ "llvm.x86.avx512.vpermilvar.pd.512" => "__builtin_ia32_vpermilvarpd512",
+ "llvm.x86.avx512.vpermilvar.ps.512" => "__builtin_ia32_vpermilvarps512",
+ "llvm.x86.avx512.vpmadd52h.uq.128" => "__builtin_ia32_vpmadd52huq128",
+ "llvm.x86.avx512.vpmadd52h.uq.256" => "__builtin_ia32_vpmadd52huq256",
+ "llvm.x86.avx512.vpmadd52h.uq.512" => "__builtin_ia32_vpmadd52huq512",
+ "llvm.x86.avx512.vpmadd52l.uq.128" => "__builtin_ia32_vpmadd52luq128",
+ "llvm.x86.avx512.vpmadd52l.uq.256" => "__builtin_ia32_vpmadd52luq256",
+ "llvm.x86.avx512.vpmadd52l.uq.512" => "__builtin_ia32_vpmadd52luq512",
+ "llvm.x86.avx512bf16.cvtne2ps2bf16.128" => "__builtin_ia32_cvtne2ps2bf16_128",
+ "llvm.x86.avx512bf16.cvtne2ps2bf16.256" => "__builtin_ia32_cvtne2ps2bf16_256",
+ "llvm.x86.avx512bf16.cvtne2ps2bf16.512" => "__builtin_ia32_cvtne2ps2bf16_512",
+ "llvm.x86.avx512bf16.cvtneps2bf16.256" => "__builtin_ia32_cvtneps2bf16_256",
+ "llvm.x86.avx512bf16.cvtneps2bf16.512" => "__builtin_ia32_cvtneps2bf16_512",
+ "llvm.x86.avx512bf16.dpbf16ps.128" => "__builtin_ia32_dpbf16ps_128",
+ "llvm.x86.avx512bf16.dpbf16ps.256" => "__builtin_ia32_dpbf16ps_256",
+ "llvm.x86.avx512bf16.dpbf16ps.512" => "__builtin_ia32_dpbf16ps_512",
+ "llvm.x86.avx512fp16.add.ph.512" => "__builtin_ia32_addph512",
+ "llvm.x86.avx512fp16.div.ph.512" => "__builtin_ia32_divph512",
+ "llvm.x86.avx512fp16.mask.add.sh.round" => "__builtin_ia32_addsh_round_mask",
+ "llvm.x86.avx512fp16.mask.cmp.sh" => "__builtin_ia32_cmpsh_mask",
+ "llvm.x86.avx512fp16.mask.div.sh.round" => "__builtin_ia32_divsh_round_mask",
+ "llvm.x86.avx512fp16.mask.fpclass.sh" => "__builtin_ia32_fpclasssh_mask",
+ "llvm.x86.avx512fp16.mask.getexp.ph.128" => "__builtin_ia32_getexpph128_mask",
+ "llvm.x86.avx512fp16.mask.getexp.ph.256" => "__builtin_ia32_getexpph256_mask",
+ "llvm.x86.avx512fp16.mask.getexp.ph.512" => "__builtin_ia32_getexpph512_mask",
+ "llvm.x86.avx512fp16.mask.getexp.sh" => "__builtin_ia32_getexpsh128_round_mask",
+ "llvm.x86.avx512fp16.mask.getmant.ph.128" => "__builtin_ia32_getmantph128_mask",
+ "llvm.x86.avx512fp16.mask.getmant.ph.256" => "__builtin_ia32_getmantph256_mask",
+ "llvm.x86.avx512fp16.mask.getmant.ph.512" => "__builtin_ia32_getmantph512_mask",
+ "llvm.x86.avx512fp16.mask.getmant.sh" => "__builtin_ia32_getmantsh_round_mask",
+ "llvm.x86.avx512fp16.mask.max.sh.round" => "__builtin_ia32_maxsh_round_mask",
+ "llvm.x86.avx512fp16.mask.min.sh.round" => "__builtin_ia32_minsh_round_mask",
+ "llvm.x86.avx512fp16.mask.mul.sh.round" => "__builtin_ia32_mulsh_round_mask",
+ "llvm.x86.avx512fp16.mask.rcp.ph.128" => "__builtin_ia32_rcpph128_mask",
+ "llvm.x86.avx512fp16.mask.rcp.ph.256" => "__builtin_ia32_rcpph256_mask",
+ "llvm.x86.avx512fp16.mask.rcp.ph.512" => "__builtin_ia32_rcpph512_mask",
+ "llvm.x86.avx512fp16.mask.rcp.sh" => "__builtin_ia32_rcpsh_mask",
+ "llvm.x86.avx512fp16.mask.reduce.ph.128" => "__builtin_ia32_reduceph128_mask",
+ "llvm.x86.avx512fp16.mask.reduce.ph.256" => "__builtin_ia32_reduceph256_mask",
+ "llvm.x86.avx512fp16.mask.reduce.ph.512" => "__builtin_ia32_reduceph512_mask",
+ "llvm.x86.avx512fp16.mask.reduce.sh" => "__builtin_ia32_reducesh_mask",
+ "llvm.x86.avx512fp16.mask.rndscale.ph.128" => "__builtin_ia32_rndscaleph_128_mask",
+ "llvm.x86.avx512fp16.mask.rndscale.ph.256" => "__builtin_ia32_rndscaleph_256_mask",
+ "llvm.x86.avx512fp16.mask.rndscale.ph.512" => "__builtin_ia32_rndscaleph_mask",
+ "llvm.x86.avx512fp16.mask.rndscale.sh" => "__builtin_ia32_rndscalesh_round_mask",
+ "llvm.x86.avx512fp16.mask.rsqrt.ph.128" => "__builtin_ia32_rsqrtph128_mask",
+ "llvm.x86.avx512fp16.mask.rsqrt.ph.256" => "__builtin_ia32_rsqrtph256_mask",
+ "llvm.x86.avx512fp16.mask.rsqrt.ph.512" => "__builtin_ia32_rsqrtph512_mask",
+ "llvm.x86.avx512fp16.mask.rsqrt.sh" => "__builtin_ia32_rsqrtsh_mask",
+ "llvm.x86.avx512fp16.mask.scalef.ph.128" => "__builtin_ia32_scalefph128_mask",
+ "llvm.x86.avx512fp16.mask.scalef.ph.256" => "__builtin_ia32_scalefph256_mask",
+ "llvm.x86.avx512fp16.mask.scalef.ph.512" => "__builtin_ia32_scalefph512_mask",
+ "llvm.x86.avx512fp16.mask.scalef.sh" => "__builtin_ia32_scalefsh_round_mask",
+ "llvm.x86.avx512fp16.mask.sub.sh.round" => "__builtin_ia32_subsh_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvtdq2ph.128" => "__builtin_ia32_vcvtdq2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtpd2ph.128" => "__builtin_ia32_vcvtpd2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtpd2ph.256" => "__builtin_ia32_vcvtpd2ph256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtpd2ph.512" => "__builtin_ia32_vcvtpd2ph512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2dq.128" => "__builtin_ia32_vcvtph2dq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2dq.256" => "__builtin_ia32_vcvtph2dq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2dq.512" => "__builtin_ia32_vcvtph2dq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2pd.128" => "__builtin_ia32_vcvtph2pd128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2pd.256" => "__builtin_ia32_vcvtph2pd256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2pd.512" => "__builtin_ia32_vcvtph2pd512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2psx.128" => "__builtin_ia32_vcvtph2psx128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2psx.256" => "__builtin_ia32_vcvtph2psx256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2psx.512" => "__builtin_ia32_vcvtph2psx512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2qq.128" => "__builtin_ia32_vcvtph2qq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2qq.256" => "__builtin_ia32_vcvtph2qq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2qq.512" => "__builtin_ia32_vcvtph2qq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2udq.128" => "__builtin_ia32_vcvtph2udq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2udq.256" => "__builtin_ia32_vcvtph2udq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2udq.512" => "__builtin_ia32_vcvtph2udq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uqq.128" => "__builtin_ia32_vcvtph2uqq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uqq.256" => "__builtin_ia32_vcvtph2uqq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uqq.512" => "__builtin_ia32_vcvtph2uqq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uw.128" => "__builtin_ia32_vcvtph2uw128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uw.256" => "__builtin_ia32_vcvtph2uw256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2uw.512" => "__builtin_ia32_vcvtph2uw512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2w.128" => "__builtin_ia32_vcvtph2w128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2w.256" => "__builtin_ia32_vcvtph2w256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtph2w.512" => "__builtin_ia32_vcvtph2w512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtps2phx.128" => "__builtin_ia32_vcvtps2phx128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtps2phx.256" => "__builtin_ia32_vcvtps2phx256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtps2phx.512" => "__builtin_ia32_vcvtps2phx512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtqq2ph.128" => "__builtin_ia32_vcvtqq2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtqq2ph.256" => "__builtin_ia32_vcvtqq2ph256_mask",
+ "llvm.x86.avx512fp16.mask.vcvtsd2sh.round" => "__builtin_ia32_vcvtsd2sh_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvtsh2sd.round" => "__builtin_ia32_vcvtsh2sd_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvtsh2ss.round" => "__builtin_ia32_vcvtsh2ss_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvtss2sh.round" => "__builtin_ia32_vcvtss2sh_round_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2dq.128" => "__builtin_ia32_vcvttph2dq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2dq.256" => "__builtin_ia32_vcvttph2dq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2dq.512" => "__builtin_ia32_vcvttph2dq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2qq.128" => "__builtin_ia32_vcvttph2qq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2qq.256" => "__builtin_ia32_vcvttph2qq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2qq.512" => "__builtin_ia32_vcvttph2qq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2udq.128" => "__builtin_ia32_vcvttph2udq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2udq.256" => "__builtin_ia32_vcvttph2udq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2udq.512" => "__builtin_ia32_vcvttph2udq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uqq.128" => "__builtin_ia32_vcvttph2uqq128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uqq.256" => "__builtin_ia32_vcvttph2uqq256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uqq.512" => "__builtin_ia32_vcvttph2uqq512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uw.128" => "__builtin_ia32_vcvttph2uw128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uw.256" => "__builtin_ia32_vcvttph2uw256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2uw.512" => "__builtin_ia32_vcvttph2uw512_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2w.128" => "__builtin_ia32_vcvttph2w128_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2w.256" => "__builtin_ia32_vcvttph2w256_mask",
+ "llvm.x86.avx512fp16.mask.vcvttph2w.512" => "__builtin_ia32_vcvttph2w512_mask",
+ "llvm.x86.avx512fp16.mask.vcvtudq2ph.128" => "__builtin_ia32_vcvtudq2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtuqq2ph.128" => "__builtin_ia32_vcvtuqq2ph128_mask",
+ "llvm.x86.avx512fp16.mask.vcvtuqq2ph.256" => "__builtin_ia32_vcvtuqq2ph256_mask",
+ "llvm.x86.avx512fp16.mask.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_mask",
+ "llvm.x86.avx512fp16.mask.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_mask",
+ "llvm.x86.avx512fp16.mask.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_mask3",
+ "llvm.x86.avx512fp16.mask.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_mask",
+ "llvm.x86.avx512fp16.mask.vfcmul.cph.128" => "__builtin_ia32_vfcmulcph128_mask",
+ "llvm.x86.avx512fp16.mask.vfcmul.cph.256" => "__builtin_ia32_vfcmulcph256_mask",
+ "llvm.x86.avx512fp16.mask.vfcmul.cph.512" => "__builtin_ia32_vfcmulcph512_mask",
+ "llvm.x86.avx512fp16.mask.vfcmul.csh" => "__builtin_ia32_vfcmulcsh_mask",
+ "llvm.x86.avx512fp16.mask.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_mask",
+ "llvm.x86.avx512fp16.mask.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_mask",
+ "llvm.x86.avx512fp16.mask.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_mask3",
+ "llvm.x86.avx512fp16.mask.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_mask",
+ "llvm.x86.avx512fp16.mask.vfmul.cph.128" => "__builtin_ia32_vfmulcph128_mask",
+ "llvm.x86.avx512fp16.mask.vfmul.cph.256" => "__builtin_ia32_vfmulcph256_mask",
+ "llvm.x86.avx512fp16.mask.vfmul.cph.512" => "__builtin_ia32_vfmulcph512_mask",
+ "llvm.x86.avx512fp16.mask.vfmul.csh" => "__builtin_ia32_vfmulcsh_mask",
+ "llvm.x86.avx512fp16.maskz.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_maskz",
+ "llvm.x86.avx512fp16.maskz.vfcmadd.cph.256" => "__builtin_ia32_vfcmaddcph256_maskz",
+ "llvm.x86.avx512fp16.maskz.vfcmadd.cph.512" => "__builtin_ia32_vfcmaddcph512_maskz",
+ "llvm.x86.avx512fp16.maskz.vfcmadd.csh" => "__builtin_ia32_vfcmaddcsh_maskz",
+ "llvm.x86.avx512fp16.maskz.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_maskz",
+ "llvm.x86.avx512fp16.maskz.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_maskz",
+ "llvm.x86.avx512fp16.maskz.vfmadd.cph.512" => "__builtin_ia32_vfmaddcph512_maskz",
+ "llvm.x86.avx512fp16.maskz.vfmadd.csh" => "__builtin_ia32_vfmaddcsh_maskz",
+ "llvm.x86.avx512fp16.max.ph.128" => "__builtin_ia32_maxph128",
+ "llvm.x86.avx512fp16.max.ph.256" => "__builtin_ia32_maxph256",
+ "llvm.x86.avx512fp16.max.ph.512" => "__builtin_ia32_maxph512",
+ "llvm.x86.avx512fp16.min.ph.128" => "__builtin_ia32_minph128",
+ "llvm.x86.avx512fp16.min.ph.256" => "__builtin_ia32_minph256",
+ "llvm.x86.avx512fp16.min.ph.512" => "__builtin_ia32_minph512",
+ "llvm.x86.avx512fp16.mul.ph.512" => "__builtin_ia32_mulph512",
+ "llvm.x86.avx512fp16.sub.ph.512" => "__builtin_ia32_subph512",
+ "llvm.x86.avx512fp16.vcomi.sh" => "__builtin_ia32_vcomish",
+ "llvm.x86.avx512fp16.vcvtsh2si32" => "__builtin_ia32_vcvtsh2si32",
+ "llvm.x86.avx512fp16.vcvtsh2si64" => "__builtin_ia32_vcvtsh2si64",
+ "llvm.x86.avx512fp16.vcvtsh2usi32" => "__builtin_ia32_vcvtsh2usi32",
+ "llvm.x86.avx512fp16.vcvtsh2usi64" => "__builtin_ia32_vcvtsh2usi64",
+ "llvm.x86.avx512fp16.vcvtsi2sh" => "__builtin_ia32_vcvtsi2sh",
+ "llvm.x86.avx512fp16.vcvtsi642sh" => "__builtin_ia32_vcvtsi642sh",
+ "llvm.x86.avx512fp16.vcvttsh2si32" => "__builtin_ia32_vcvttsh2si32",
+ "llvm.x86.avx512fp16.vcvttsh2si64" => "__builtin_ia32_vcvttsh2si64",
+ "llvm.x86.avx512fp16.vcvttsh2usi32" => "__builtin_ia32_vcvttsh2usi32",
+ "llvm.x86.avx512fp16.vcvttsh2usi64" => "__builtin_ia32_vcvttsh2usi64",
+ "llvm.x86.avx512fp16.vcvtusi2sh" => "__builtin_ia32_vcvtusi2sh",
+ "llvm.x86.avx512fp16.vcvtusi642sh" => "__builtin_ia32_vcvtusi642sh",
+ "llvm.x86.avx512fp16.vfmaddsub.ph.128" => "__builtin_ia32_vfmaddsubph",
+ "llvm.x86.avx512fp16.vfmaddsub.ph.256" => "__builtin_ia32_vfmaddsubph256",
+ "llvm.x86.bmi.bextr.32" => "__builtin_ia32_bextr_u32",
+ "llvm.x86.bmi.bextr.64" => "__builtin_ia32_bextr_u64",
+ "llvm.x86.bmi.bzhi.32" => "__builtin_ia32_bzhi_si",
+ "llvm.x86.bmi.bzhi.64" => "__builtin_ia32_bzhi_di",
+ "llvm.x86.bmi.pdep.32" => "__builtin_ia32_pdep_si",
+ "llvm.x86.bmi.pdep.64" => "__builtin_ia32_pdep_di",
+ "llvm.x86.bmi.pext.32" => "__builtin_ia32_pext_si",
+ "llvm.x86.bmi.pext.64" => "__builtin_ia32_pext_di",
+ "llvm.x86.cldemote" => "__builtin_ia32_cldemote",
+ "llvm.x86.clflushopt" => "__builtin_ia32_clflushopt",
+ "llvm.x86.clrssbsy" => "__builtin_ia32_clrssbsy",
+ "llvm.x86.clui" => "__builtin_ia32_clui",
+ "llvm.x86.clwb" => "__builtin_ia32_clwb",
+ "llvm.x86.clzero" => "__builtin_ia32_clzero",
+ "llvm.x86.directstore32" => "__builtin_ia32_directstore_u32",
+ "llvm.x86.directstore64" => "__builtin_ia32_directstore_u64",
+ "llvm.x86.enqcmd" => "__builtin_ia32_enqcmd",
+ "llvm.x86.enqcmds" => "__builtin_ia32_enqcmds",
+ "llvm.x86.flags.read.u32" => "__builtin_ia32_readeflags_u32",
+ "llvm.x86.flags.read.u64" => "__builtin_ia32_readeflags_u64",
+ "llvm.x86.flags.write.u32" => "__builtin_ia32_writeeflags_u32",
+ "llvm.x86.flags.write.u64" => "__builtin_ia32_writeeflags_u64",
+ "llvm.x86.fma.mask.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+ "llvm.x86.fma.mask.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+ "llvm.x86.fma.mask.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+ "llvm.x86.fma.mask.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+ "llvm.x86.fma.mask.vfmsub.pd.512" => "__builtin_ia32_vfmsubpd512_mask",
+ "llvm.x86.fma.mask.vfmsub.ps.512" => "__builtin_ia32_vfmsubps512_mask",
+ "llvm.x86.fma.mask.vfmsubadd.pd.512" => "__builtin_ia32_vfmsubaddpd512_mask",
+ "llvm.x86.fma.mask.vfmsubadd.ps.512" => "__builtin_ia32_vfmsubaddps512_mask",
+ "llvm.x86.fma.mask.vfnmadd.pd.512" => "__builtin_ia32_vfnmaddpd512_mask",
+ "llvm.x86.fma.mask.vfnmadd.ps.512" => "__builtin_ia32_vfnmaddps512_mask",
+ "llvm.x86.fma.mask.vfnmsub.pd.512" => "__builtin_ia32_vfnmsubpd512_mask",
+ "llvm.x86.fma.mask.vfnmsub.ps.512" => "__builtin_ia32_vfnmsubps512_mask",
+ "llvm.x86.fma.vfmadd.pd" => "__builtin_ia32_vfmaddpd",
+ "llvm.x86.fma.vfmadd.pd.256" => "__builtin_ia32_vfmaddpd256",
+ "llvm.x86.fma.vfmadd.ps" => "__builtin_ia32_vfmaddps",
+ "llvm.x86.fma.vfmadd.ps.256" => "__builtin_ia32_vfmaddps256",
+ "llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd",
+ "llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss",
+ "llvm.x86.fma.vfmaddsub.pd" => "__builtin_ia32_vfmaddsubpd",
+ "llvm.x86.fma.vfmaddsub.pd.256" => "__builtin_ia32_vfmaddsubpd256",
+ "llvm.x86.fma.vfmaddsub.ps" => "__builtin_ia32_vfmaddsubps",
+ "llvm.x86.fma.vfmaddsub.ps.256" => "__builtin_ia32_vfmaddsubps256",
+ "llvm.x86.fma.vfmsub.pd" => "__builtin_ia32_vfmsubpd",
+ "llvm.x86.fma.vfmsub.pd.256" => "__builtin_ia32_vfmsubpd256",
+ "llvm.x86.fma.vfmsub.ps" => "__builtin_ia32_vfmsubps",
+ "llvm.x86.fma.vfmsub.ps.256" => "__builtin_ia32_vfmsubps256",
+ "llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd",
+ "llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss",
+ "llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmsubaddpd",
+ "llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmsubaddpd256",
+ "llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmsubaddps",
+ "llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmsubaddps256",
+ "llvm.x86.fma.vfnmadd.pd" => "__builtin_ia32_vfnmaddpd",
+ "llvm.x86.fma.vfnmadd.pd.256" => "__builtin_ia32_vfnmaddpd256",
+ "llvm.x86.fma.vfnmadd.ps" => "__builtin_ia32_vfnmaddps",
+ "llvm.x86.fma.vfnmadd.ps.256" => "__builtin_ia32_vfnmaddps256",
+ "llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd",
+ "llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss",
+ "llvm.x86.fma.vfnmsub.pd" => "__builtin_ia32_vfnmsubpd",
+ "llvm.x86.fma.vfnmsub.pd.256" => "__builtin_ia32_vfnmsubpd256",
+ "llvm.x86.fma.vfnmsub.ps" => "__builtin_ia32_vfnmsubps",
+ "llvm.x86.fma.vfnmsub.ps.256" => "__builtin_ia32_vfnmsubps256",
+ "llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd",
+ "llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss",
+ "llvm.x86.fxrstor" => "__builtin_ia32_fxrstor",
+ "llvm.x86.fxrstor64" => "__builtin_ia32_fxrstor64",
+ "llvm.x86.fxsave" => "__builtin_ia32_fxsave",
+ "llvm.x86.fxsave64" => "__builtin_ia32_fxsave64",
+ "llvm.x86.incsspd" => "__builtin_ia32_incsspd",
+ "llvm.x86.incsspq" => "__builtin_ia32_incsspq",
+ "llvm.x86.invpcid" => "__builtin_ia32_invpcid",
+ "llvm.x86.ldtilecfg" => "__builtin_ia32_tile_loadconfig",
+ "llvm.x86.ldtilecfg.internal" => "__builtin_ia32_tile_loadconfig_internal",
+ "llvm.x86.llwpcb" => "__builtin_ia32_llwpcb",
+ "llvm.x86.loadiwkey" => "__builtin_ia32_loadiwkey",
+ "llvm.x86.lwpins32" => "__builtin_ia32_lwpins32",
+ "llvm.x86.lwpins64" => "__builtin_ia32_lwpins64",
+ "llvm.x86.lwpval32" => "__builtin_ia32_lwpval32",
+ "llvm.x86.lwpval64" => "__builtin_ia32_lwpval64",
+ "llvm.x86.mmx.emms" => "__builtin_ia32_emms",
+ "llvm.x86.mmx.femms" => "__builtin_ia32_femms",
+ "llvm.x86.mmx.maskmovq" => "__builtin_ia32_maskmovq",
+ "llvm.x86.mmx.movnt.dq" => "__builtin_ia32_movntq",
+ "llvm.x86.mmx.packssdw" => "__builtin_ia32_packssdw",
+ "llvm.x86.mmx.packsswb" => "__builtin_ia32_packsswb",
+ "llvm.x86.mmx.packuswb" => "__builtin_ia32_packuswb",
+ "llvm.x86.mmx.padd.b" => "__builtin_ia32_paddb",
+ "llvm.x86.mmx.padd.d" => "__builtin_ia32_paddd",
+ "llvm.x86.mmx.padd.q" => "__builtin_ia32_paddq",
+ "llvm.x86.mmx.padd.w" => "__builtin_ia32_paddw",
+ "llvm.x86.mmx.padds.b" => "__builtin_ia32_paddsb",
+ "llvm.x86.mmx.padds.w" => "__builtin_ia32_paddsw",
+ "llvm.x86.mmx.paddus.b" => "__builtin_ia32_paddusb",
+ "llvm.x86.mmx.paddus.w" => "__builtin_ia32_paddusw",
+ "llvm.x86.mmx.palignr.b" => "__builtin_ia32_palignr",
+ "llvm.x86.mmx.pand" => "__builtin_ia32_pand",
+ "llvm.x86.mmx.pandn" => "__builtin_ia32_pandn",
+ "llvm.x86.mmx.pavg.b" => "__builtin_ia32_pavgb",
+ "llvm.x86.mmx.pavg.w" => "__builtin_ia32_pavgw",
+ "llvm.x86.mmx.pcmpeq.b" => "__builtin_ia32_pcmpeqb",
+ "llvm.x86.mmx.pcmpeq.d" => "__builtin_ia32_pcmpeqd",
+ "llvm.x86.mmx.pcmpeq.w" => "__builtin_ia32_pcmpeqw",
+ "llvm.x86.mmx.pcmpgt.b" => "__builtin_ia32_pcmpgtb",
+ "llvm.x86.mmx.pcmpgt.d" => "__builtin_ia32_pcmpgtd",
+ "llvm.x86.mmx.pcmpgt.w" => "__builtin_ia32_pcmpgtw",
+ "llvm.x86.mmx.pextr.w" => "__builtin_ia32_vec_ext_v4hi",
+ "llvm.x86.mmx.pinsr.w" => "__builtin_ia32_vec_set_v4hi",
+ "llvm.x86.mmx.pmadd.wd" => "__builtin_ia32_pmaddwd",
+ "llvm.x86.mmx.pmaxs.w" => "__builtin_ia32_pmaxsw",
+ "llvm.x86.mmx.pmaxu.b" => "__builtin_ia32_pmaxub",
+ "llvm.x86.mmx.pmins.w" => "__builtin_ia32_pminsw",
+ "llvm.x86.mmx.pminu.b" => "__builtin_ia32_pminub",
+ "llvm.x86.mmx.pmovmskb" => "__builtin_ia32_pmovmskb",
+ "llvm.x86.mmx.pmulh.w" => "__builtin_ia32_pmulhw",
+ "llvm.x86.mmx.pmulhu.w" => "__builtin_ia32_pmulhuw",
+ "llvm.x86.mmx.pmull.w" => "__builtin_ia32_pmullw",
+ "llvm.x86.mmx.pmulu.dq" => "__builtin_ia32_pmuludq",
+ "llvm.x86.mmx.por" => "__builtin_ia32_por",
+ "llvm.x86.mmx.psad.bw" => "__builtin_ia32_psadbw",
+ "llvm.x86.mmx.psll.d" => "__builtin_ia32_pslld",
+ "llvm.x86.mmx.psll.q" => "__builtin_ia32_psllq",
+ "llvm.x86.mmx.psll.w" => "__builtin_ia32_psllw",
+ "llvm.x86.mmx.pslli.d" => "__builtin_ia32_pslldi",
+ "llvm.x86.mmx.pslli.q" => "__builtin_ia32_psllqi",
+ "llvm.x86.mmx.pslli.w" => "__builtin_ia32_psllwi",
+ "llvm.x86.mmx.psra.d" => "__builtin_ia32_psrad",
+ "llvm.x86.mmx.psra.w" => "__builtin_ia32_psraw",
+ "llvm.x86.mmx.psrai.d" => "__builtin_ia32_psradi",
+ "llvm.x86.mmx.psrai.w" => "__builtin_ia32_psrawi",
+ "llvm.x86.mmx.psrl.d" => "__builtin_ia32_psrld",
+ "llvm.x86.mmx.psrl.q" => "__builtin_ia32_psrlq",
+ "llvm.x86.mmx.psrl.w" => "__builtin_ia32_psrlw",
+ "llvm.x86.mmx.psrli.d" => "__builtin_ia32_psrldi",
+ "llvm.x86.mmx.psrli.q" => "__builtin_ia32_psrlqi",
+ "llvm.x86.mmx.psrli.w" => "__builtin_ia32_psrlwi",
+ "llvm.x86.mmx.psub.b" => "__builtin_ia32_psubb",
+ "llvm.x86.mmx.psub.d" => "__builtin_ia32_psubd",
+ "llvm.x86.mmx.psub.q" => "__builtin_ia32_psubq",
+ "llvm.x86.mmx.psub.w" => "__builtin_ia32_psubw",
+ "llvm.x86.mmx.psubs.b" => "__builtin_ia32_psubsb",
+ "llvm.x86.mmx.psubs.w" => "__builtin_ia32_psubsw",
+ "llvm.x86.mmx.psubus.b" => "__builtin_ia32_psubusb",
+ "llvm.x86.mmx.psubus.w" => "__builtin_ia32_psubusw",
+ "llvm.x86.mmx.punpckhbw" => "__builtin_ia32_punpckhbw",
+ "llvm.x86.mmx.punpckhdq" => "__builtin_ia32_punpckhdq",
+ "llvm.x86.mmx.punpckhwd" => "__builtin_ia32_punpckhwd",
+ "llvm.x86.mmx.punpcklbw" => "__builtin_ia32_punpcklbw",
+ "llvm.x86.mmx.punpckldq" => "__builtin_ia32_punpckldq",
+ "llvm.x86.mmx.punpcklwd" => "__builtin_ia32_punpcklwd",
+ "llvm.x86.mmx.pxor" => "__builtin_ia32_pxor",
+ "llvm.x86.monitorx" => "__builtin_ia32_monitorx",
+ "llvm.x86.movdir64b" => "__builtin_ia32_movdir64b",
+ "llvm.x86.mwaitx" => "__builtin_ia32_mwaitx",
+ "llvm.x86.pclmulqdq" => "__builtin_ia32_pclmulqdq128",
+ "llvm.x86.pclmulqdq.256" => "__builtin_ia32_pclmulqdq256",
+ "llvm.x86.pclmulqdq.512" => "__builtin_ia32_pclmulqdq512",
+ "llvm.x86.ptwrite32" => "__builtin_ia32_ptwrite32",
+ "llvm.x86.ptwrite64" => "__builtin_ia32_ptwrite64",
+ "llvm.x86.rdfsbase.32" => "__builtin_ia32_rdfsbase32",
+ "llvm.x86.rdfsbase.64" => "__builtin_ia32_rdfsbase64",
+ "llvm.x86.rdgsbase.32" => "__builtin_ia32_rdgsbase32",
+ "llvm.x86.rdgsbase.64" => "__builtin_ia32_rdgsbase64",
+ "llvm.x86.rdpid" => "__builtin_ia32_rdpid",
+ "llvm.x86.rdpkru" => "__builtin_ia32_rdpkru",
+ "llvm.x86.rdpmc" => "__builtin_ia32_rdpmc",
+ "llvm.x86.rdsspd" => "__builtin_ia32_rdsspd",
+ "llvm.x86.rdsspq" => "__builtin_ia32_rdsspq",
+ "llvm.x86.rdtsc" => "__builtin_ia32_rdtsc",
+ "llvm.x86.rdtscp" => "__builtin_ia32_rdtscp",
+ "llvm.x86.rstorssp" => "__builtin_ia32_rstorssp",
+ "llvm.x86.saveprevssp" => "__builtin_ia32_saveprevssp",
+ "llvm.x86.senduipi" => "__builtin_ia32_senduipi",
+ "llvm.x86.serialize" => "__builtin_ia32_serialize",
+ "llvm.x86.setssbsy" => "__builtin_ia32_setssbsy",
+ "llvm.x86.sha1msg1" => "__builtin_ia32_sha1msg1",
+ "llvm.x86.sha1msg2" => "__builtin_ia32_sha1msg2",
+ "llvm.x86.sha1nexte" => "__builtin_ia32_sha1nexte",
+ "llvm.x86.sha1rnds4" => "__builtin_ia32_sha1rnds4",
+ "llvm.x86.sha256msg1" => "__builtin_ia32_sha256msg1",
+ "llvm.x86.sha256msg2" => "__builtin_ia32_sha256msg2",
+ "llvm.x86.sha256rnds2" => "__builtin_ia32_sha256rnds2",
+ "llvm.x86.slwpcb" => "__builtin_ia32_slwpcb",
+ "llvm.x86.sse.add.ss" => "__builtin_ia32_addss",
+ "llvm.x86.sse.cmp.ps" => "__builtin_ia32_cmpps",
+ "llvm.x86.sse.cmp.ss" => "__builtin_ia32_cmpss",
+ "llvm.x86.sse.comieq.ss" => "__builtin_ia32_comieq",
+ "llvm.x86.sse.comige.ss" => "__builtin_ia32_comige",
+ "llvm.x86.sse.comigt.ss" => "__builtin_ia32_comigt",
+ "llvm.x86.sse.comile.ss" => "__builtin_ia32_comile",
+ "llvm.x86.sse.comilt.ss" => "__builtin_ia32_comilt",
+ "llvm.x86.sse.comineq.ss" => "__builtin_ia32_comineq",
+ "llvm.x86.sse.cvtpd2pi" => "__builtin_ia32_cvtpd2pi",
+ "llvm.x86.sse.cvtpi2pd" => "__builtin_ia32_cvtpi2pd",
+ "llvm.x86.sse.cvtpi2ps" => "__builtin_ia32_cvtpi2ps",
+ "llvm.x86.sse.cvtps2pi" => "__builtin_ia32_cvtps2pi",
+ "llvm.x86.sse.cvtsi2ss" => "__builtin_ia32_cvtsi2ss",
+ "llvm.x86.sse.cvtsi642ss" => "__builtin_ia32_cvtsi642ss",
+ "llvm.x86.sse.cvtss2si" => "__builtin_ia32_cvtss2si",
+ "llvm.x86.sse.cvtss2si64" => "__builtin_ia32_cvtss2si64",
+ "llvm.x86.sse.cvttpd2pi" => "__builtin_ia32_cvttpd2pi",
+ "llvm.x86.sse.cvttps2pi" => "__builtin_ia32_cvttps2pi",
+ "llvm.x86.sse.cvttss2si" => "__builtin_ia32_cvttss2si",
+ "llvm.x86.sse.cvttss2si64" => "__builtin_ia32_cvttss2si64",
+ "llvm.x86.sse.div.ss" => "__builtin_ia32_divss",
+ "llvm.x86.sse.max.ps" => "__builtin_ia32_maxps",
+ "llvm.x86.sse.max.ss" => "__builtin_ia32_maxss",
+ "llvm.x86.sse.min.ps" => "__builtin_ia32_minps",
+ "llvm.x86.sse.min.ss" => "__builtin_ia32_minss",
+ "llvm.x86.sse.movmsk.ps" => "__builtin_ia32_movmskps",
+ "llvm.x86.sse.mul.ss" => "__builtin_ia32_mulss",
+ "llvm.x86.sse.pshuf.w" => "__builtin_ia32_pshufw",
+ "llvm.x86.sse.rcp.ps" => "__builtin_ia32_rcpps",
+ "llvm.x86.sse.rcp.ss" => "__builtin_ia32_rcpss",
+ "llvm.x86.sse.rsqrt.ps" => "__builtin_ia32_rsqrtps",
+ "llvm.x86.sse.rsqrt.ss" => "__builtin_ia32_rsqrtss",
+ "llvm.x86.sse.sfence" => "__builtin_ia32_sfence",
+ "llvm.x86.sse.sqrt.ps" => "__builtin_ia32_sqrtps",
+ "llvm.x86.sse.sqrt.ss" => "__builtin_ia32_sqrtss",
+ "llvm.x86.sse.storeu.ps" => "__builtin_ia32_storeups",
+ "llvm.x86.sse.sub.ss" => "__builtin_ia32_subss",
+ "llvm.x86.sse.ucomieq.ss" => "__builtin_ia32_ucomieq",
+ "llvm.x86.sse.ucomige.ss" => "__builtin_ia32_ucomige",
+ "llvm.x86.sse.ucomigt.ss" => "__builtin_ia32_ucomigt",
+ "llvm.x86.sse.ucomile.ss" => "__builtin_ia32_ucomile",
+ "llvm.x86.sse.ucomilt.ss" => "__builtin_ia32_ucomilt",
+ "llvm.x86.sse.ucomineq.ss" => "__builtin_ia32_ucomineq",
+ "llvm.x86.sse2.add.sd" => "__builtin_ia32_addsd",
+ "llvm.x86.sse2.clflush" => "__builtin_ia32_clflush",
+ "llvm.x86.sse2.cmp.pd" => "__builtin_ia32_cmppd",
+ "llvm.x86.sse2.cmp.sd" => "__builtin_ia32_cmpsd",
+ "llvm.x86.sse2.comieq.sd" => "__builtin_ia32_comisdeq",
+ "llvm.x86.sse2.comige.sd" => "__builtin_ia32_comisdge",
+ "llvm.x86.sse2.comigt.sd" => "__builtin_ia32_comisdgt",
+ "llvm.x86.sse2.comile.sd" => "__builtin_ia32_comisdle",
+ "llvm.x86.sse2.comilt.sd" => "__builtin_ia32_comisdlt",
+ "llvm.x86.sse2.comineq.sd" => "__builtin_ia32_comisdneq",
+ "llvm.x86.sse2.cvtdq2pd" => "__builtin_ia32_cvtdq2pd",
+ "llvm.x86.sse2.cvtdq2ps" => "__builtin_ia32_cvtdq2ps",
+ "llvm.x86.sse2.cvtpd2dq" => "__builtin_ia32_cvtpd2dq",
+ "llvm.x86.sse2.cvtpd2ps" => "__builtin_ia32_cvtpd2ps",
+ "llvm.x86.sse2.cvtps2dq" => "__builtin_ia32_cvtps2dq",
+ "llvm.x86.sse2.cvtps2pd" => "__builtin_ia32_cvtps2pd",
+ "llvm.x86.sse2.cvtsd2si" => "__builtin_ia32_cvtsd2si",
+ "llvm.x86.sse2.cvtsd2si64" => "__builtin_ia32_cvtsd2si64",
+ "llvm.x86.sse2.cvtsd2ss" => "__builtin_ia32_cvtsd2ss",
+ "llvm.x86.sse2.cvtsi2sd" => "__builtin_ia32_cvtsi2sd",
+ "llvm.x86.sse2.cvtsi642sd" => "__builtin_ia32_cvtsi642sd",
+ "llvm.x86.sse2.cvtss2sd" => "__builtin_ia32_cvtss2sd",
+ "llvm.x86.sse2.cvttpd2dq" => "__builtin_ia32_cvttpd2dq",
+ "llvm.x86.sse2.cvttps2dq" => "__builtin_ia32_cvttps2dq",
+ "llvm.x86.sse2.cvttsd2si" => "__builtin_ia32_cvttsd2si",
+ "llvm.x86.sse2.cvttsd2si64" => "__builtin_ia32_cvttsd2si64",
+ "llvm.x86.sse2.div.sd" => "__builtin_ia32_divsd",
+ "llvm.x86.sse2.lfence" => "__builtin_ia32_lfence",
+ "llvm.x86.sse2.maskmov.dqu" => "__builtin_ia32_maskmovdqu",
+ "llvm.x86.sse2.max.pd" => "__builtin_ia32_maxpd",
+ "llvm.x86.sse2.max.sd" => "__builtin_ia32_maxsd",
+ "llvm.x86.sse2.mfence" => "__builtin_ia32_mfence",
+ "llvm.x86.sse2.min.pd" => "__builtin_ia32_minpd",
+ "llvm.x86.sse2.min.sd" => "__builtin_ia32_minsd",
+ "llvm.x86.sse2.movmsk.pd" => "__builtin_ia32_movmskpd",
+ "llvm.x86.sse2.mul.sd" => "__builtin_ia32_mulsd",
+ "llvm.x86.sse2.packssdw.128" => "__builtin_ia32_packssdw128",
+ "llvm.x86.sse2.packsswb.128" => "__builtin_ia32_packsswb128",
+ "llvm.x86.sse2.packuswb.128" => "__builtin_ia32_packuswb128",
+ "llvm.x86.sse2.padds.b" => "__builtin_ia32_paddsb128",
+ "llvm.x86.sse2.padds.w" => "__builtin_ia32_paddsw128",
+ "llvm.x86.sse2.paddus.b" => "__builtin_ia32_paddusb128",
+ "llvm.x86.sse2.paddus.w" => "__builtin_ia32_paddusw128",
+ "llvm.x86.sse2.pause" => "__builtin_ia32_pause",
+ "llvm.x86.sse2.pavg.b" => "__builtin_ia32_pavgb128",
+ "llvm.x86.sse2.pavg.w" => "__builtin_ia32_pavgw128",
+ "llvm.x86.sse2.pmadd.wd" => "__builtin_ia32_pmaddwd128",
+ "llvm.x86.sse2.pmaxs.w" => "__builtin_ia32_pmaxsw128",
+ "llvm.x86.sse2.pmaxu.b" => "__builtin_ia32_pmaxub128",
+ "llvm.x86.sse2.pmins.w" => "__builtin_ia32_pminsw128",
+ "llvm.x86.sse2.pminu.b" => "__builtin_ia32_pminub128",
+ "llvm.x86.sse2.pmovmskb.128" => "__builtin_ia32_pmovmskb128",
+ "llvm.x86.sse2.pmulh.w" => "__builtin_ia32_pmulhw128",
+ "llvm.x86.sse2.pmulhu.w" => "__builtin_ia32_pmulhuw128",
+ "llvm.x86.sse2.pmulu.dq" => "__builtin_ia32_pmuludq128",
+ "llvm.x86.sse2.psad.bw" => "__builtin_ia32_psadbw128",
+ "llvm.x86.sse2.pshuf.d" => "__builtin_ia32_pshufd",
+ "llvm.x86.sse2.pshufh.w" => "__builtin_ia32_pshufhw",
+ "llvm.x86.sse2.pshufl.w" => "__builtin_ia32_pshuflw",
+ "llvm.x86.sse2.psll.d" => "__builtin_ia32_pslld128",
+ "llvm.x86.sse2.psll.dq" => "__builtin_ia32_pslldqi128",
+ "llvm.x86.sse2.psll.dq.bs" => "__builtin_ia32_pslldqi128_byteshift",
+ "llvm.x86.sse2.psll.q" => "__builtin_ia32_psllq128",
+ "llvm.x86.sse2.psll.w" => "__builtin_ia32_psllw128",
+ "llvm.x86.sse2.pslli.d" => "__builtin_ia32_pslldi128",
+ "llvm.x86.sse2.pslli.q" => "__builtin_ia32_psllqi128",
+ "llvm.x86.sse2.pslli.w" => "__builtin_ia32_psllwi128",
+ "llvm.x86.sse2.psra.d" => "__builtin_ia32_psrad128",
+ "llvm.x86.sse2.psra.w" => "__builtin_ia32_psraw128",
+ "llvm.x86.sse2.psrai.d" => "__builtin_ia32_psradi128",
+ "llvm.x86.sse2.psrai.w" => "__builtin_ia32_psrawi128",
+ "llvm.x86.sse2.psrl.d" => "__builtin_ia32_psrld128",
+ "llvm.x86.sse2.psrl.dq" => "__builtin_ia32_psrldqi128",
+ "llvm.x86.sse2.psrl.dq.bs" => "__builtin_ia32_psrldqi128_byteshift",
+ "llvm.x86.sse2.psrl.q" => "__builtin_ia32_psrlq128",
+ "llvm.x86.sse2.psrl.w" => "__builtin_ia32_psrlw128",
+ "llvm.x86.sse2.psrli.d" => "__builtin_ia32_psrldi128",
+ "llvm.x86.sse2.psrli.q" => "__builtin_ia32_psrlqi128",
+ "llvm.x86.sse2.psrli.w" => "__builtin_ia32_psrlwi128",
+ "llvm.x86.sse2.psubs.b" => "__builtin_ia32_psubsb128",
+ "llvm.x86.sse2.psubs.w" => "__builtin_ia32_psubsw128",
+ "llvm.x86.sse2.psubus.b" => "__builtin_ia32_psubusb128",
+ "llvm.x86.sse2.psubus.w" => "__builtin_ia32_psubusw128",
+ "llvm.x86.sse2.sqrt.pd" => "__builtin_ia32_sqrtpd",
+ "llvm.x86.sse2.sqrt.sd" => "__builtin_ia32_sqrtsd",
+ "llvm.x86.sse2.storel.dq" => "__builtin_ia32_storelv4si",
+ "llvm.x86.sse2.storeu.dq" => "__builtin_ia32_storedqu",
+ "llvm.x86.sse2.storeu.pd" => "__builtin_ia32_storeupd",
+ "llvm.x86.sse2.sub.sd" => "__builtin_ia32_subsd",
+ "llvm.x86.sse2.ucomieq.sd" => "__builtin_ia32_ucomisdeq",
+ "llvm.x86.sse2.ucomige.sd" => "__builtin_ia32_ucomisdge",
+ "llvm.x86.sse2.ucomigt.sd" => "__builtin_ia32_ucomisdgt",
+ "llvm.x86.sse2.ucomile.sd" => "__builtin_ia32_ucomisdle",
+ "llvm.x86.sse2.ucomilt.sd" => "__builtin_ia32_ucomisdlt",
+ "llvm.x86.sse2.ucomineq.sd" => "__builtin_ia32_ucomisdneq",
+ "llvm.x86.sse3.addsub.pd" => "__builtin_ia32_addsubpd",
+ "llvm.x86.sse3.addsub.ps" => "__builtin_ia32_addsubps",
+ "llvm.x86.sse3.hadd.pd" => "__builtin_ia32_haddpd",
+ "llvm.x86.sse3.hadd.ps" => "__builtin_ia32_haddps",
+ "llvm.x86.sse3.hsub.pd" => "__builtin_ia32_hsubpd",
+ "llvm.x86.sse3.hsub.ps" => "__builtin_ia32_hsubps",
+ "llvm.x86.sse3.ldu.dq" => "__builtin_ia32_lddqu",
+ "llvm.x86.sse3.monitor" => "__builtin_ia32_monitor",
+ "llvm.x86.sse3.mwait" => "__builtin_ia32_mwait",
+ "llvm.x86.sse41.blendpd" => "__builtin_ia32_blendpd",
+ "llvm.x86.sse41.blendps" => "__builtin_ia32_blendps",
+ "llvm.x86.sse41.blendvpd" => "__builtin_ia32_blendvpd",
+ "llvm.x86.sse41.blendvps" => "__builtin_ia32_blendvps",
+ "llvm.x86.sse41.dppd" => "__builtin_ia32_dppd",
+ "llvm.x86.sse41.dpps" => "__builtin_ia32_dpps",
+ "llvm.x86.sse41.extractps" => "__builtin_ia32_extractps128",
+ "llvm.x86.sse41.insertps" => "__builtin_ia32_insertps128",
+ "llvm.x86.sse41.movntdqa" => "__builtin_ia32_movntdqa",
+ "llvm.x86.sse41.mpsadbw" => "__builtin_ia32_mpsadbw128",
+ "llvm.x86.sse41.packusdw" => "__builtin_ia32_packusdw128",
+ "llvm.x86.sse41.pblendvb" => "__builtin_ia32_pblendvb128",
+ "llvm.x86.sse41.pblendw" => "__builtin_ia32_pblendw128",
+ "llvm.x86.sse41.phminposuw" => "__builtin_ia32_phminposuw128",
+ "llvm.x86.sse41.pmaxsb" => "__builtin_ia32_pmaxsb128",
+ "llvm.x86.sse41.pmaxsd" => "__builtin_ia32_pmaxsd128",
+ "llvm.x86.sse41.pmaxud" => "__builtin_ia32_pmaxud128",
+ "llvm.x86.sse41.pmaxuw" => "__builtin_ia32_pmaxuw128",
+ "llvm.x86.sse41.pminsb" => "__builtin_ia32_pminsb128",
+ "llvm.x86.sse41.pminsd" => "__builtin_ia32_pminsd128",
+ "llvm.x86.sse41.pminud" => "__builtin_ia32_pminud128",
+ "llvm.x86.sse41.pminuw" => "__builtin_ia32_pminuw128",
+ "llvm.x86.sse41.pmovsxbd" => "__builtin_ia32_pmovsxbd128",
+ "llvm.x86.sse41.pmovsxbq" => "__builtin_ia32_pmovsxbq128",
+ "llvm.x86.sse41.pmovsxbw" => "__builtin_ia32_pmovsxbw128",
+ "llvm.x86.sse41.pmovsxdq" => "__builtin_ia32_pmovsxdq128",
+ "llvm.x86.sse41.pmovsxwd" => "__builtin_ia32_pmovsxwd128",
+ "llvm.x86.sse41.pmovsxwq" => "__builtin_ia32_pmovsxwq128",
+ "llvm.x86.sse41.pmovzxbd" => "__builtin_ia32_pmovzxbd128",
+ "llvm.x86.sse41.pmovzxbq" => "__builtin_ia32_pmovzxbq128",
+ "llvm.x86.sse41.pmovzxbw" => "__builtin_ia32_pmovzxbw128",
+ "llvm.x86.sse41.pmovzxdq" => "__builtin_ia32_pmovzxdq128",
+ "llvm.x86.sse41.pmovzxwd" => "__builtin_ia32_pmovzxwd128",
+ "llvm.x86.sse41.pmovzxwq" => "__builtin_ia32_pmovzxwq128",
+ "llvm.x86.sse41.pmuldq" => "__builtin_ia32_pmuldq128",
+ "llvm.x86.sse41.ptestc" => "__builtin_ia32_ptestc128",
+ "llvm.x86.sse41.ptestnzc" => "__builtin_ia32_ptestnzc128",
+ "llvm.x86.sse41.ptestz" => "__builtin_ia32_ptestz128",
+ "llvm.x86.sse41.round.pd" => "__builtin_ia32_roundpd",
+ "llvm.x86.sse41.round.ps" => "__builtin_ia32_roundps",
+ "llvm.x86.sse41.round.sd" => "__builtin_ia32_roundsd",
+ "llvm.x86.sse41.round.ss" => "__builtin_ia32_roundss",
+ "llvm.x86.sse42.crc32.32.16" => "__builtin_ia32_crc32hi",
+ "llvm.x86.sse42.crc32.32.32" => "__builtin_ia32_crc32si",
+ "llvm.x86.sse42.crc32.32.8" => "__builtin_ia32_crc32qi",
+ "llvm.x86.sse42.crc32.64.64" => "__builtin_ia32_crc32di",
+ "llvm.x86.sse42.pcmpestri128" => "__builtin_ia32_pcmpestri128",
+ "llvm.x86.sse42.pcmpestria128" => "__builtin_ia32_pcmpestria128",
+ "llvm.x86.sse42.pcmpestric128" => "__builtin_ia32_pcmpestric128",
+ "llvm.x86.sse42.pcmpestrio128" => "__builtin_ia32_pcmpestrio128",
+ "llvm.x86.sse42.pcmpestris128" => "__builtin_ia32_pcmpestris128",
+ "llvm.x86.sse42.pcmpestriz128" => "__builtin_ia32_pcmpestriz128",
+ "llvm.x86.sse42.pcmpestrm128" => "__builtin_ia32_pcmpestrm128",
+ "llvm.x86.sse42.pcmpistri128" => "__builtin_ia32_pcmpistri128",
+ "llvm.x86.sse42.pcmpistria128" => "__builtin_ia32_pcmpistria128",
+ "llvm.x86.sse42.pcmpistric128" => "__builtin_ia32_pcmpistric128",
+ "llvm.x86.sse42.pcmpistrio128" => "__builtin_ia32_pcmpistrio128",
+ "llvm.x86.sse42.pcmpistris128" => "__builtin_ia32_pcmpistris128",
+ "llvm.x86.sse42.pcmpistriz128" => "__builtin_ia32_pcmpistriz128",
+ "llvm.x86.sse42.pcmpistrm128" => "__builtin_ia32_pcmpistrm128",
+ "llvm.x86.sse4a.extrq" => "__builtin_ia32_extrq",
+ "llvm.x86.sse4a.extrqi" => "__builtin_ia32_extrqi",
+ "llvm.x86.sse4a.insertq" => "__builtin_ia32_insertq",
+ "llvm.x86.sse4a.insertqi" => "__builtin_ia32_insertqi",
+ "llvm.x86.sse4a.movnt.sd" => "__builtin_ia32_movntsd",
+ "llvm.x86.sse4a.movnt.ss" => "__builtin_ia32_movntss",
+ "llvm.x86.ssse3.pabs.b" => "__builtin_ia32_pabsb",
+ "llvm.x86.ssse3.pabs.b.128" => "__builtin_ia32_pabsb128",
+ "llvm.x86.ssse3.pabs.d" => "__builtin_ia32_pabsd",
+ "llvm.x86.ssse3.pabs.d.128" => "__builtin_ia32_pabsd128",
+ "llvm.x86.ssse3.pabs.w" => "__builtin_ia32_pabsw",
+ "llvm.x86.ssse3.pabs.w.128" => "__builtin_ia32_pabsw128",
+ "llvm.x86.ssse3.phadd.d" => "__builtin_ia32_phaddd",
+ "llvm.x86.ssse3.phadd.d.128" => "__builtin_ia32_phaddd128",
+ "llvm.x86.ssse3.phadd.sw" => "__builtin_ia32_phaddsw",
+ "llvm.x86.ssse3.phadd.sw.128" => "__builtin_ia32_phaddsw128",
+ "llvm.x86.ssse3.phadd.w" => "__builtin_ia32_phaddw",
+ "llvm.x86.ssse3.phadd.w.128" => "__builtin_ia32_phaddw128",
+ "llvm.x86.ssse3.phsub.d" => "__builtin_ia32_phsubd",
+ "llvm.x86.ssse3.phsub.d.128" => "__builtin_ia32_phsubd128",
+ "llvm.x86.ssse3.phsub.sw" => "__builtin_ia32_phsubsw",
+ "llvm.x86.ssse3.phsub.sw.128" => "__builtin_ia32_phsubsw128",
+ "llvm.x86.ssse3.phsub.w" => "__builtin_ia32_phsubw",
+ "llvm.x86.ssse3.phsub.w.128" => "__builtin_ia32_phsubw128",
+ "llvm.x86.ssse3.pmadd.ub.sw" => "__builtin_ia32_pmaddubsw",
+ "llvm.x86.ssse3.pmadd.ub.sw.128" => "__builtin_ia32_pmaddubsw128",
+ "llvm.x86.ssse3.pmul.hr.sw" => "__builtin_ia32_pmulhrsw",
+ "llvm.x86.ssse3.pmul.hr.sw.128" => "__builtin_ia32_pmulhrsw128",
+ "llvm.x86.ssse3.pshuf.b" => "__builtin_ia32_pshufb",
+ "llvm.x86.ssse3.pshuf.b.128" => "__builtin_ia32_pshufb128",
+ "llvm.x86.ssse3.psign.b" => "__builtin_ia32_psignb",
+ "llvm.x86.ssse3.psign.b.128" => "__builtin_ia32_psignb128",
+ "llvm.x86.ssse3.psign.d" => "__builtin_ia32_psignd",
+ "llvm.x86.ssse3.psign.d.128" => "__builtin_ia32_psignd128",
+ "llvm.x86.ssse3.psign.w" => "__builtin_ia32_psignw",
+ "llvm.x86.ssse3.psign.w.128" => "__builtin_ia32_psignw128",
+ "llvm.x86.sttilecfg" => "__builtin_ia32_tile_storeconfig",
+ "llvm.x86.stui" => "__builtin_ia32_stui",
+ "llvm.x86.subborrow.u32" => "__builtin_ia32_subborrow_u32",
+ "llvm.x86.subborrow.u64" => "__builtin_ia32_subborrow_u64",
+ "llvm.x86.tbm.bextri.u32" => "__builtin_ia32_bextri_u32",
+ "llvm.x86.tbm.bextri.u64" => "__builtin_ia32_bextri_u64",
+ "llvm.x86.tdpbf16ps" => "__builtin_ia32_tdpbf16ps",
+ "llvm.x86.tdpbf16ps.internal" => "__builtin_ia32_tdpbf16ps_internal",
+ "llvm.x86.tdpbssd" => "__builtin_ia32_tdpbssd",
+ "llvm.x86.tdpbssd.internal" => "__builtin_ia32_tdpbssd_internal",
+ "llvm.x86.tdpbsud" => "__builtin_ia32_tdpbsud",
+ "llvm.x86.tdpbsud.internal" => "__builtin_ia32_tdpbsud_internal",
+ "llvm.x86.tdpbusd" => "__builtin_ia32_tdpbusd",
+ "llvm.x86.tdpbusd.internal" => "__builtin_ia32_tdpbusd_internal",
+ "llvm.x86.tdpbuud" => "__builtin_ia32_tdpbuud",
+ "llvm.x86.tdpbuud.internal" => "__builtin_ia32_tdpbuud_internal",
+ "llvm.x86.testui" => "__builtin_ia32_testui",
+ "llvm.x86.tileloadd64" => "__builtin_ia32_tileloadd64",
+ "llvm.x86.tileloadd64.internal" => "__builtin_ia32_tileloadd64_internal",
+ "llvm.x86.tileloaddt164" => "__builtin_ia32_tileloaddt164",
+ "llvm.x86.tileloaddt164.internal" => "__builtin_ia32_tileloaddt164_internal",
+ "llvm.x86.tilerelease" => "__builtin_ia32_tilerelease",
+ "llvm.x86.tilestored64" => "__builtin_ia32_tilestored64",
+ "llvm.x86.tilestored64.internal" => "__builtin_ia32_tilestored64_internal",
+ "llvm.x86.tilezero" => "__builtin_ia32_tilezero",
+ "llvm.x86.tilezero.internal" => "__builtin_ia32_tilezero_internal",
+ "llvm.x86.tpause" => "__builtin_ia32_tpause",
+ "llvm.x86.umonitor" => "__builtin_ia32_umonitor",
+ "llvm.x86.umwait" => "__builtin_ia32_umwait",
+ "llvm.x86.vcvtph2ps.128" => "__builtin_ia32_vcvtph2ps",
+ "llvm.x86.vcvtph2ps.256" => "__builtin_ia32_vcvtph2ps256",
+ "llvm.x86.vcvtps2ph.128" => "__builtin_ia32_vcvtps2ph",
+ "llvm.x86.vcvtps2ph.256" => "__builtin_ia32_vcvtps2ph256",
+ "llvm.x86.vgf2p8affineinvqb.128" => "__builtin_ia32_vgf2p8affineinvqb_v16qi",
+ "llvm.x86.vgf2p8affineinvqb.256" => "__builtin_ia32_vgf2p8affineinvqb_v32qi",
+ "llvm.x86.vgf2p8affineinvqb.512" => "__builtin_ia32_vgf2p8affineinvqb_v64qi",
+ "llvm.x86.vgf2p8affineqb.128" => "__builtin_ia32_vgf2p8affineqb_v16qi",
+ "llvm.x86.vgf2p8affineqb.256" => "__builtin_ia32_vgf2p8affineqb_v32qi",
+ "llvm.x86.vgf2p8affineqb.512" => "__builtin_ia32_vgf2p8affineqb_v64qi",
+ "llvm.x86.vgf2p8mulb.128" => "__builtin_ia32_vgf2p8mulb_v16qi",
+ "llvm.x86.vgf2p8mulb.256" => "__builtin_ia32_vgf2p8mulb_v32qi",
+ "llvm.x86.vgf2p8mulb.512" => "__builtin_ia32_vgf2p8mulb_v64qi",
+ "llvm.x86.wbinvd" => "__builtin_ia32_wbinvd",
+ "llvm.x86.wbnoinvd" => "__builtin_ia32_wbnoinvd",
+ "llvm.x86.wrfsbase.32" => "__builtin_ia32_wrfsbase32",
+ "llvm.x86.wrfsbase.64" => "__builtin_ia32_wrfsbase64",
+ "llvm.x86.wrgsbase.32" => "__builtin_ia32_wrgsbase32",
+ "llvm.x86.wrgsbase.64" => "__builtin_ia32_wrgsbase64",
+ "llvm.x86.wrpkru" => "__builtin_ia32_wrpkru",
+ "llvm.x86.wrssd" => "__builtin_ia32_wrssd",
+ "llvm.x86.wrssq" => "__builtin_ia32_wrssq",
+ "llvm.x86.wrussd" => "__builtin_ia32_wrussd",
+ "llvm.x86.wrussq" => "__builtin_ia32_wrussq",
+ "llvm.x86.xabort" => "__builtin_ia32_xabort",
+ "llvm.x86.xbegin" => "__builtin_ia32_xbegin",
+ "llvm.x86.xend" => "__builtin_ia32_xend",
+ "llvm.x86.xop.vfrcz.pd" => "__builtin_ia32_vfrczpd",
+ "llvm.x86.xop.vfrcz.pd.256" => "__builtin_ia32_vfrczpd256",
+ "llvm.x86.xop.vfrcz.ps" => "__builtin_ia32_vfrczps",
+ "llvm.x86.xop.vfrcz.ps.256" => "__builtin_ia32_vfrczps256",
+ "llvm.x86.xop.vfrcz.sd" => "__builtin_ia32_vfrczsd",
+ "llvm.x86.xop.vfrcz.ss" => "__builtin_ia32_vfrczss",
+ "llvm.x86.xop.vpcmov" => "__builtin_ia32_vpcmov",
+ "llvm.x86.xop.vpcmov.256" => "__builtin_ia32_vpcmov_256",
+ "llvm.x86.xop.vpcomb" => "__builtin_ia32_vpcomb",
+ "llvm.x86.xop.vpcomd" => "__builtin_ia32_vpcomd",
+ "llvm.x86.xop.vpcomq" => "__builtin_ia32_vpcomq",
+ "llvm.x86.xop.vpcomub" => "__builtin_ia32_vpcomub",
+ "llvm.x86.xop.vpcomud" => "__builtin_ia32_vpcomud",
+ "llvm.x86.xop.vpcomuq" => "__builtin_ia32_vpcomuq",
+ "llvm.x86.xop.vpcomuw" => "__builtin_ia32_vpcomuw",
+ "llvm.x86.xop.vpcomw" => "__builtin_ia32_vpcomw",
+ "llvm.x86.xop.vpermil2pd" => "__builtin_ia32_vpermil2pd",
+ "llvm.x86.xop.vpermil2pd.256" => "__builtin_ia32_vpermil2pd256",
+ "llvm.x86.xop.vpermil2ps" => "__builtin_ia32_vpermil2ps",
+ "llvm.x86.xop.vpermil2ps.256" => "__builtin_ia32_vpermil2ps256",
+ "llvm.x86.xop.vphaddbd" => "__builtin_ia32_vphaddbd",
+ "llvm.x86.xop.vphaddbq" => "__builtin_ia32_vphaddbq",
+ "llvm.x86.xop.vphaddbw" => "__builtin_ia32_vphaddbw",
+ "llvm.x86.xop.vphadddq" => "__builtin_ia32_vphadddq",
+ "llvm.x86.xop.vphaddubd" => "__builtin_ia32_vphaddubd",
+ "llvm.x86.xop.vphaddubq" => "__builtin_ia32_vphaddubq",
+ "llvm.x86.xop.vphaddubw" => "__builtin_ia32_vphaddubw",
+ "llvm.x86.xop.vphaddudq" => "__builtin_ia32_vphaddudq",
+ "llvm.x86.xop.vphadduwd" => "__builtin_ia32_vphadduwd",
+ "llvm.x86.xop.vphadduwq" => "__builtin_ia32_vphadduwq",
+ "llvm.x86.xop.vphaddwd" => "__builtin_ia32_vphaddwd",
+ "llvm.x86.xop.vphaddwq" => "__builtin_ia32_vphaddwq",
+ "llvm.x86.xop.vphsubbw" => "__builtin_ia32_vphsubbw",
+ "llvm.x86.xop.vphsubdq" => "__builtin_ia32_vphsubdq",
+ "llvm.x86.xop.vphsubwd" => "__builtin_ia32_vphsubwd",
+ "llvm.x86.xop.vpmacsdd" => "__builtin_ia32_vpmacsdd",
+ "llvm.x86.xop.vpmacsdqh" => "__builtin_ia32_vpmacsdqh",
+ "llvm.x86.xop.vpmacsdql" => "__builtin_ia32_vpmacsdql",
+ "llvm.x86.xop.vpmacssdd" => "__builtin_ia32_vpmacssdd",
+ "llvm.x86.xop.vpmacssdqh" => "__builtin_ia32_vpmacssdqh",
+ "llvm.x86.xop.vpmacssdql" => "__builtin_ia32_vpmacssdql",
+ "llvm.x86.xop.vpmacsswd" => "__builtin_ia32_vpmacsswd",
+ "llvm.x86.xop.vpmacssww" => "__builtin_ia32_vpmacssww",
+ "llvm.x86.xop.vpmacswd" => "__builtin_ia32_vpmacswd",
+ "llvm.x86.xop.vpmacsww" => "__builtin_ia32_vpmacsww",
+ "llvm.x86.xop.vpmadcsswd" => "__builtin_ia32_vpmadcsswd",
+ "llvm.x86.xop.vpmadcswd" => "__builtin_ia32_vpmadcswd",
+ "llvm.x86.xop.vpperm" => "__builtin_ia32_vpperm",
+ "llvm.x86.xop.vprotb" => "__builtin_ia32_vprotb",
+ "llvm.x86.xop.vprotbi" => "__builtin_ia32_vprotbi",
+ "llvm.x86.xop.vprotd" => "__builtin_ia32_vprotd",
+ "llvm.x86.xop.vprotdi" => "__builtin_ia32_vprotdi",
+ "llvm.x86.xop.vprotq" => "__builtin_ia32_vprotq",
+ "llvm.x86.xop.vprotqi" => "__builtin_ia32_vprotqi",
+ "llvm.x86.xop.vprotw" => "__builtin_ia32_vprotw",
+ "llvm.x86.xop.vprotwi" => "__builtin_ia32_vprotwi",
+ "llvm.x86.xop.vpshab" => "__builtin_ia32_vpshab",
+ "llvm.x86.xop.vpshad" => "__builtin_ia32_vpshad",
+ "llvm.x86.xop.vpshaq" => "__builtin_ia32_vpshaq",
+ "llvm.x86.xop.vpshaw" => "__builtin_ia32_vpshaw",
+ "llvm.x86.xop.vpshlb" => "__builtin_ia32_vpshlb",
+ "llvm.x86.xop.vpshld" => "__builtin_ia32_vpshld",
+ "llvm.x86.xop.vpshlq" => "__builtin_ia32_vpshlq",
+ "llvm.x86.xop.vpshlw" => "__builtin_ia32_vpshlw",
+ "llvm.x86.xresldtrk" => "__builtin_ia32_xresldtrk",
+ "llvm.x86.xsusldtrk" => "__builtin_ia32_xsusldtrk",
+ "llvm.x86.xtest" => "__builtin_ia32_xtest",
+ // xcore
+ "llvm.xcore.bitrev" => "__builtin_bitrev",
+ "llvm.xcore.getid" => "__builtin_getid",
+ "llvm.xcore.getps" => "__builtin_getps",
+ "llvm.xcore.setps" => "__builtin_setps",
+ _ => unimplemented!("***** unsupported LLVM intrinsic {}", name),
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
new file mode 100644
index 000000000..1b089f08f
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
@@ -0,0 +1,250 @@
+use std::borrow::Cow;
+
+use gccjit::{Function, FunctionPtrType, RValue, ToRValue};
+
+use crate::{context::CodegenCx, builder::Builder};
+
+pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str) -> Cow<'b, [RValue<'gcc>]> {
+ // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing
+ // arguments here.
+ if gcc_func.get_param_count() != args.len() {
+ match &*func_name {
+ "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask"
+ // FIXME(antoyo): the following intrinsics has 4 (or 5) arguments according to the doc, but is defined with 2 (or 3) arguments in library/stdarch/crates/core_arch/src/x86/avx512f.rs.
+ | "__builtin_ia32_pmaxsd512_mask" | "__builtin_ia32_pmaxsq512_mask" | "__builtin_ia32_pmaxsq256_mask"
+ | "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
+ | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask" | "__builtin_ia32_pmaxuq256_mask"
+ | "__builtin_ia32_pmaxuq128_mask"
+ | "__builtin_ia32_pminsd512_mask" | "__builtin_ia32_pminsq512_mask" | "__builtin_ia32_pminsq256_mask"
+ | "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask"
+ | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask" | "__builtin_ia32_pminuq256_mask"
+ | "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask"
+ => {
+ // TODO: refactor by separating those intrinsics outside of this branch.
+ let add_before_last_arg =
+ match &*func_name {
+ "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
+ | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask"
+ | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => true,
+ _ => false,
+ };
+ let new_first_arg_is_zero =
+ match &*func_name {
+ "__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask"
+ | "__builtin_ia32_pminuq256_mask" | "__builtin_ia32_pminuq128_mask" => true,
+ _ => false
+ };
+ let arg3_index =
+ match &*func_name {
+ "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => 1,
+ _ => 2,
+ };
+ let mut new_args = args.to_vec();
+ let arg3_type = gcc_func.get_param_type(arg3_index);
+ let first_arg =
+ if new_first_arg_is_zero {
+ let vector_type = arg3_type.dyncast_vector().expect("vector type");
+ let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
+ let num_units = vector_type.get_num_units();
+ builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units])
+ }
+ else {
+ builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue()
+ };
+ if add_before_last_arg {
+ new_args.insert(new_args.len() - 1, first_arg);
+ }
+ else {
+ new_args.push(first_arg);
+ }
+ let arg4_index =
+ match &*func_name {
+ "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => 2,
+ _ => 3,
+ };
+ let arg4_type = gcc_func.get_param_type(arg4_index);
+ let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+ if add_before_last_arg {
+ new_args.insert(new_args.len() - 1, minus_one);
+ }
+ else {
+ new_args.push(minus_one);
+ }
+ args = new_args.into();
+ },
+ "__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask"
+ | "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask"
+ | "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => {
+ let mut new_args = args.to_vec();
+ let arg5_type = gcc_func.get_param_type(4);
+ let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1);
+ new_args.push(minus_one);
+ args = new_args.into();
+ },
+ "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
+ let mut new_args = args.to_vec();
+
+ let mut last_arg = None;
+ if args.len() == 4 {
+ last_arg = new_args.pop();
+ }
+
+ let arg4_type = gcc_func.get_param_type(3);
+ let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+ new_args.push(minus_one);
+
+ if args.len() == 3 {
+ // Both llvm.fma.v16f32 and llvm.x86.avx512.vfmadd.ps.512 maps to
+ // the same GCC intrinsic, but the former has 3 parameters and the
+ // latter has 4 so it doesn't require this additional argument.
+ let arg5_type = gcc_func.get_param_type(4);
+ new_args.push(builder.context.new_rvalue_from_int(arg5_type, 4));
+ }
+
+ if let Some(last_arg) = last_arg {
+ new_args.push(last_arg);
+ }
+
+ args = new_args.into();
+ },
+ "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask"
+ | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask"
+ | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask"
+ | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask" => {
+ let mut new_args = args.to_vec();
+ let last_arg = new_args.pop().expect("last arg");
+ let arg3_type = gcc_func.get_param_type(2);
+ let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue();
+ new_args.push(undefined);
+ let arg4_type = gcc_func.get_param_type(3);
+ let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+ new_args.push(minus_one);
+ new_args.push(last_arg);
+ args = new_args.into();
+ },
+ "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => {
+ let mut new_args = args.to_vec();
+ let last_arg = new_args.pop().expect("last arg");
+ let arg4_type = gcc_func.get_param_type(3);
+ let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
+ new_args.push(minus_one);
+ new_args.push(last_arg);
+ args = new_args.into();
+ },
+ _ => (),
+ }
+ }
+
+ args
+}
+
+pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool {
+ // NOTE: these intrinsics have missing parameters before the last one, so ignore the
+ // last argument type check.
+ // FIXME(antoyo): find a way to refactor in order to avoid this hack.
+ match func_name {
+ "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
+ | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" | "__builtin_ia32_sqrtps512_mask"
+ | "__builtin_ia32_sqrtpd512_mask" | "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask"
+ | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask"
+ | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask"
+ | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask"
+ | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => {
+ if index == args_len - 1 {
+ return true;
+ }
+ },
+ "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
+ // Since there are two LLVM intrinsics that map to each of these GCC builtins and only
+ // one of them has a missing parameter before the last one, we check the number of
+ // arguments to distinguish those cases.
+ if args_len == 4 && index == args_len - 1 {
+ return true;
+ }
+ },
+ _ => (),
+ }
+
+ false
+}
+
+#[cfg(not(feature="master"))]
+pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
+ match name {
+ "llvm.x86.xgetbv" => {
+ let gcc_name = "__builtin_trap";
+ let func = cx.context.get_builtin_function(gcc_name);
+ cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+ return func;
+ },
+ _ => unimplemented!("unsupported LLVM intrinsic {}", name),
+ }
+}
+
+#[cfg(feature="master")]
+pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
+ let gcc_name = match name {
+ "llvm.x86.xgetbv" => "__builtin_ia32_xgetbv",
+ // NOTE: this doc specifies the equivalent GCC builtins: http://huonw.github.io/llvmint/llvmint/x86/index.html
+ "llvm.sqrt.v2f64" => "__builtin_ia32_sqrtpd",
+ "llvm.x86.avx512.pmul.dq.512" => "__builtin_ia32_pmuldq512_mask",
+ "llvm.x86.avx512.pmulu.dq.512" => "__builtin_ia32_pmuludq512_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.256" => "__builtin_ia32_pmaxsq256_mask",
+ "llvm.x86.avx512.mask.pmaxs.q.128" => "__builtin_ia32_pmaxsq128_mask",
+ "llvm.x86.avx512.max.ps.512" => "__builtin_ia32_maxps512_mask",
+ "llvm.x86.avx512.max.pd.512" => "__builtin_ia32_maxpd512_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.256" => "__builtin_ia32_pmaxuq256_mask",
+ "llvm.x86.avx512.mask.pmaxu.q.128" => "__builtin_ia32_pmaxuq128_mask",
+ "llvm.x86.avx512.mask.pmins.q.256" => "__builtin_ia32_pminsq256_mask",
+ "llvm.x86.avx512.mask.pmins.q.128" => "__builtin_ia32_pminsq128_mask",
+ "llvm.x86.avx512.min.ps.512" => "__builtin_ia32_minps512_mask",
+ "llvm.x86.avx512.min.pd.512" => "__builtin_ia32_minpd512_mask",
+ "llvm.x86.avx512.mask.pminu.q.256" => "__builtin_ia32_pminuq256_mask",
+ "llvm.x86.avx512.mask.pminu.q.128" => "__builtin_ia32_pminuq128_mask",
+ "llvm.fma.v16f32" => "__builtin_ia32_vfmaddps512_mask",
+ "llvm.fma.v8f64" => "__builtin_ia32_vfmaddpd512_mask",
+ "llvm.x86.avx512.vfmaddsub.ps.512" => "__builtin_ia32_vfmaddsubps512_mask",
+ "llvm.x86.avx512.vfmaddsub.pd.512" => "__builtin_ia32_vfmaddsubpd512_mask",
+ "llvm.x86.avx512.pternlog.d.512" => "__builtin_ia32_pternlogd512_mask",
+ "llvm.x86.avx512.pternlog.d.256" => "__builtin_ia32_pternlogd256_mask",
+ "llvm.x86.avx512.pternlog.d.128" => "__builtin_ia32_pternlogd128_mask",
+ "llvm.x86.avx512.pternlog.q.512" => "__builtin_ia32_pternlogq512_mask",
+ "llvm.x86.avx512.pternlog.q.256" => "__builtin_ia32_pternlogq256_mask",
+ "llvm.x86.avx512.pternlog.q.128" => "__builtin_ia32_pternlogq128_mask",
+ "llvm.x86.avx512.add.ps.512" => "__builtin_ia32_addps512_mask",
+ "llvm.x86.avx512.add.pd.512" => "__builtin_ia32_addpd512_mask",
+ "llvm.x86.avx512.sub.ps.512" => "__builtin_ia32_subps512_mask",
+ "llvm.x86.avx512.sub.pd.512" => "__builtin_ia32_subpd512_mask",
+ "llvm.x86.avx512.mul.ps.512" => "__builtin_ia32_mulps512_mask",
+ "llvm.x86.avx512.mul.pd.512" => "__builtin_ia32_mulpd512_mask",
+ "llvm.x86.avx512.div.ps.512" => "__builtin_ia32_divps512_mask",
+ "llvm.x86.avx512.div.pd.512" => "__builtin_ia32_divpd512_mask",
+ "llvm.x86.avx512.vfmadd.ps.512" => "__builtin_ia32_vfmaddps512_mask",
+ "llvm.x86.avx512.vfmadd.pd.512" => "__builtin_ia32_vfmaddpd512_mask",
+
+ // The above doc points to unknown builtins for the following, so override them:
+ "llvm.x86.avx2.gather.d.d" => "__builtin_ia32_gathersiv4si",
+ "llvm.x86.avx2.gather.d.d.256" => "__builtin_ia32_gathersiv8si",
+ "llvm.x86.avx2.gather.d.ps" => "__builtin_ia32_gathersiv4sf",
+ "llvm.x86.avx2.gather.d.ps.256" => "__builtin_ia32_gathersiv8sf",
+ "llvm.x86.avx2.gather.d.q" => "__builtin_ia32_gathersiv2di",
+ "llvm.x86.avx2.gather.d.q.256" => "__builtin_ia32_gathersiv4di",
+ "llvm.x86.avx2.gather.d.pd" => "__builtin_ia32_gathersiv2df",
+ "llvm.x86.avx2.gather.d.pd.256" => "__builtin_ia32_gathersiv4df",
+ "llvm.x86.avx2.gather.q.d" => "__builtin_ia32_gatherdiv4si",
+ "llvm.x86.avx2.gather.q.d.256" => "__builtin_ia32_gatherdiv4si256",
+ "llvm.x86.avx2.gather.q.ps" => "__builtin_ia32_gatherdiv4sf",
+ "llvm.x86.avx2.gather.q.ps.256" => "__builtin_ia32_gatherdiv4sf256",
+ "llvm.x86.avx2.gather.q.q" => "__builtin_ia32_gatherdiv2di",
+ "llvm.x86.avx2.gather.q.q.256" => "__builtin_ia32_gatherdiv4di",
+ "llvm.x86.avx2.gather.q.pd" => "__builtin_ia32_gatherdiv2df",
+ "llvm.x86.avx2.gather.q.pd.256" => "__builtin_ia32_gatherdiv4df",
+ "" => "",
+ // NOTE: this file is generated by https://github.com/GuillaumeGomez/llvmint/blob/master/generate_list.py
+ _ => include!("archs.rs"),
+ };
+
+ let func = cx.context.get_target_builtin_function(gcc_name);
+ cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
+ func
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
new file mode 100644
index 000000000..5fbdedac0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -0,0 +1,1134 @@
+pub mod llvm;
+mod simd;
+
+use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp, FunctionType};
+use rustc_codegen_ssa::MemFlags;
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::common::{IntPredicate, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_span::{Span, Symbol, symbol::kw, sym};
+use rustc_target::abi::HasDataLayout;
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::spec::PanicStrategy;
+
+use crate::abi::GccType;
+use crate::builder::Builder;
+use crate::common::{SignType, TypeReflection};
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+use crate::intrinsic::simd::generic_simd_intrinsic;
+
+fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
+ let gcc_name = match name {
+ sym::sqrtf32 => "sqrtf",
+ sym::sqrtf64 => "sqrt",
+ sym::powif32 => "__builtin_powif",
+ sym::powif64 => "__builtin_powi",
+ sym::sinf32 => "sinf",
+ sym::sinf64 => "sin",
+ sym::cosf32 => "cosf",
+ sym::cosf64 => "cos",
+ sym::powf32 => "powf",
+ sym::powf64 => "pow",
+ sym::expf32 => "expf",
+ sym::expf64 => "exp",
+ sym::exp2f32 => "exp2f",
+ sym::exp2f64 => "exp2",
+ sym::logf32 => "logf",
+ sym::logf64 => "log",
+ sym::log10f32 => "log10f",
+ sym::log10f64 => "log10",
+ sym::log2f32 => "log2f",
+ sym::log2f64 => "log2",
+ sym::fmaf32 => "fmaf",
+ sym::fmaf64 => "fma",
+ sym::fabsf32 => "fabsf",
+ sym::fabsf64 => "fabs",
+ sym::minnumf32 => "fminf",
+ sym::minnumf64 => "fmin",
+ sym::maxnumf32 => "fmaxf",
+ sym::maxnumf64 => "fmax",
+ sym::copysignf32 => "copysignf",
+ sym::copysignf64 => "copysign",
+ sym::floorf32 => "floorf",
+ sym::floorf64 => "floor",
+ sym::ceilf32 => "ceilf",
+ sym::ceilf64 => "ceil",
+ sym::truncf32 => "truncf",
+ sym::truncf64 => "trunc",
+ sym::rintf32 => "rintf",
+ sym::rintf64 => "rint",
+ sym::nearbyintf32 => "nearbyintf",
+ sym::nearbyintf64 => "nearbyint",
+ sym::roundf32 => "roundf",
+ sym::roundf64 => "round",
+ sym::abort => "abort",
+ _ => return None,
+ };
+ Some(cx.context.get_builtin_function(&gcc_name))
+}
+
+impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) {
+ let tcx = self.tcx;
+ let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+
+ let (def_id, substs) = match *callee_ty.kind() {
+ ty::FnDef(def_id, substs) => (def_id, substs),
+ _ => bug!("expected fn item type, found {}", callee_ty),
+ };
+
+ let sig = callee_ty.fn_sig(tcx);
+ let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = tcx.item_name(def_id);
+ let name_str = name.as_str();
+
+ let llret_ty = self.layout_of(ret_ty).gcc_type(self, true);
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let simple = get_simple_intrinsic(self, name);
+ let llval =
+ match name {
+ _ if simple.is_some() => {
+ // FIXME(antoyo): remove this cast when the API supports function.
+ let func = unsafe { std::mem::transmute(simple.expect("simple")) };
+ self.call(self.type_void(), func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
+ },
+ sym::likely => {
+ self.expect(args[0].immediate(), true)
+ }
+ sym::unlikely => {
+ self.expect(args[0].immediate(), false)
+ }
+ kw::Try => {
+ try_intrinsic(
+ self,
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ llresult,
+ );
+ return;
+ }
+ sym::breakpoint => {
+ unimplemented!();
+ }
+ sym::va_copy => {
+ unimplemented!();
+ }
+ sym::va_arg => {
+ unimplemented!();
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ let tp_ty = substs.type_at(0);
+ let mut ptr = args[0].immediate();
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self)));
+ }
+ let load = self.volatile_load(ptr.get_type(), ptr);
+ // TODO(antoyo): set alignment.
+ self.to_immediate(load, self.layout_of(tp_ty))
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.volatile_store(self, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.unaligned_volatile_store(self, dst);
+ return;
+ }
+ sym::prefetch_read_data
+ | sym::prefetch_write_data
+ | sym::prefetch_read_instruction
+ | sym::prefetch_write_instruction => {
+ unimplemented!();
+ }
+ sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::ctpop
+ | sym::bswap
+ | sym::bitreverse
+ | sym::rotate_left
+ | sym::rotate_right
+ | sym::saturating_add
+ | sym::saturating_sub => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, self) {
+ Some((width, signed)) => match name {
+ sym::ctlz | sym::cttz => {
+ let func = self.current_func.borrow().expect("func");
+ let then_block = func.new_block("then");
+ let else_block = func.new_block("else");
+ let after_block = func.new_block("after");
+
+ let arg = args[0].immediate();
+ let result = func.new_local(None, arg.get_type(), "zeros");
+ let zero = self.cx.gcc_zero(arg.get_type());
+ let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
+ self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+ let zero_result = self.cx.gcc_uint(arg.get_type(), width);
+ then_block.add_assignment(None, result, zero_result);
+ then_block.end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place
+ // count_leading_zeroes() does not expect, the current block
+ // in the state need to be updated.
+ self.switch_to_block(else_block);
+
+ let zeros =
+ match name {
+ sym::ctlz => self.count_leading_zeroes(width, arg),
+ sym::cttz => self.count_trailing_zeroes(width, arg),
+ _ => unreachable!(),
+ };
+ self.llbb().add_assignment(None, result, zeros);
+ self.llbb().end_with_jump(None, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
+
+ result.to_rvalue()
+ }
+ sym::ctlz_nonzero => {
+ self.count_leading_zeroes(width, args[0].immediate())
+ },
+ sym::cttz_nonzero => {
+ self.count_trailing_zeroes(width, args[0].immediate())
+ }
+ sym::ctpop => self.pop_count(args[0].immediate()),
+ sym::bswap => {
+ if width == 8 {
+ args[0].immediate() // byte swap a u8/i8 is just a no-op
+ }
+ else {
+ self.gcc_bswap(args[0].immediate(), width)
+ }
+ },
+ sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
+ sym::rotate_left | sym::rotate_right => {
+ // TODO(antoyo): implement using algorithm from:
+ // https://blog.regehr.org/archives/1063
+ // for other platforms.
+ let is_left = name == sym::rotate_left;
+ let val = args[0].immediate();
+ let raw_shift = args[1].immediate();
+ if is_left {
+ self.rotate_left(val, raw_shift, width)
+ }
+ else {
+ self.rotate_right(val, raw_shift, width)
+ }
+ },
+ sym::saturating_add => {
+ self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
+ },
+ sym::saturating_sub => {
+ self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
+ },
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ tcx.sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ sym::raw_eq => {
+ use rustc_target::abi::Abi::*;
+ let tp_ty = substs.type_at(0);
+ let layout = self.layout_of(tp_ty).layout;
+ let _use_integer_compare = match layout.abi() {
+ Scalar(_) | ScalarPair(_, _) => true,
+ Uninhabited | Vector { .. } => false,
+ Aggregate { .. } => {
+ // For rusty ABIs, small aggregates are actually passed
+ // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+ // so we re-use that same threshold here.
+ layout.size() <= self.data_layout().pointer_size * 2
+ }
+ };
+
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ if layout.size().bytes() == 0 {
+ self.const_bool(true)
+ }
+ /*else if use_integer_compare {
+ let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
+ let ptr_ty = self.type_ptr_to(integer_ty);
+ let a_ptr = self.bitcast(a, ptr_ty);
+ let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
+ let b_ptr = self.bitcast(b, ptr_ty);
+ let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
+ self.icmp(IntPredicate::IntEQ, a_val, b_val)
+ }*/
+ else {
+ let void_ptr_type = self.context.new_type::<*const ()>();
+ let a_ptr = self.bitcast(a, void_ptr_type);
+ let b_ptr = self.bitcast(b, void_ptr_type);
+ let n = self.context.new_cast(None, self.const_usize(layout.size().bytes()), self.sizet_type);
+ let builtin = self.context.get_builtin_function("memcmp");
+ let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+ self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
+ }
+ }
+
+ sym::black_box => {
+ args[0].val.store(self, result);
+
+ let block = self.llbb();
+ let extended_asm = block.add_extended_asm(None, "");
+ extended_asm.add_input_operand(None, "r", result.llval);
+ extended_asm.add_clobber("memory");
+ extended_asm.set_volatile_flag(true);
+
+ // We have copied the value to `result` already.
+ return;
+ }
+
+ _ if name_str.starts_with("simd_") => {
+ match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+ Ok(llval) => llval,
+ Err(()) => return,
+ }
+ }
+
+ _ => bug!("unknown intrinsic '{}'", name),
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
+ let ptr = self.pointercast(result.llval, ptr_llty);
+ self.store(llval, ptr, result.align);
+ }
+ else {
+ OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+ .val
+ .store(self, result);
+ }
+ }
+ }
+
+ fn abort(&mut self) {
+ let func = self.context.get_builtin_function("abort");
+ let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
+ self.call(self.type_void(), func, &[], None);
+ }
+
+ fn assume(&mut self, value: Self::Value) {
+ // TODO(antoyo): switch to assume when it exists.
+ // Or use something like this:
+ // #define __assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0)
+ self.expect(value, true);
+ }
+
+ fn expect(&mut self, cond: Self::Value, _expected: bool) -> Self::Value {
+ // TODO(antoyo)
+ cond
+ }
+
+ fn type_test(&mut self, _pointer: Self::Value, _typeid: Self::Value) -> Self::Value {
+ // Unsupported.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+
+ fn type_checked_load(
+ &mut self,
+ _llvtable: Self::Value,
+ _vtable_byte_offset: u64,
+ _typeid: Self::Value,
+ ) -> Self::Value {
+ // Unsupported.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+
+ fn va_start(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+
+ fn va_end(&mut self, _va_list: RValue<'gcc>) -> RValue<'gcc> {
+ unimplemented!();
+ }
+}
+
+impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
+ fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
+ arg_abi.store_fn_arg(self, idx, dst)
+ }
+
+ fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ arg_abi.store(self, val, dst)
+ }
+
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+ arg_abi.memory_ty(self)
+ }
+}
+
+pub trait ArgAbiExt<'gcc, 'tcx> {
+ fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+ fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
+ fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
+}
+
+impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ /// Gets the LLVM type for a place of the original Rust type of
+ /// this argument/return, i.e., the result of `type_of::type_of`.
+ fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ self.layout.gcc_type(cx, true)
+ }
+
+ /// Stores a direct/indirect value described by this ArgAbi into a
+ /// place for the original Rust type of this argument/return.
+ /// Can be used for both storing formal arguments into Rust variables
+ /// or results of call/invoke instructions into their destinations.
+ fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ if self.is_ignore() {
+ return;
+ }
+ if self.is_sized_indirect() {
+ OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
+ }
+ else if self.is_unsized_indirect() {
+ bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+ }
+ else if let PassMode::Cast(cast) = self.mode {
+ // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+ // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+ let can_store_through_cast_ptr = false;
+ if can_store_through_cast_ptr {
+ let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
+ let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
+ bx.store(val, cast_dst, self.layout.align.abi);
+ }
+ else {
+ // The actual return type is a struct, but the ABI
+ // adaptation code has cast it into some scalar type. The
+ // code that follows is the only reliable way I have
+ // found to do a transform like i64 -> {i32,i32}.
+ // Basically we dump the data onto the stack then memcpy it.
+ //
+ // Other approaches I tried:
+ // - Casting rust ret pointer to the foreign type and using Store
+ // is (a) unsafe if size of foreign type > size of rust type and
+ // (b) runs afoul of strict aliasing rules, yielding invalid
+ // assembly under -O (specifically, the store gets removed).
+ // - Truncating foreign type to correct integral type and then
+ // bitcasting to the struct type yields invalid cast errors.
+
+ // We instead thus allocate some scratch space...
+ let scratch_size = cast.size(bx);
+ let scratch_align = cast.align(bx);
+ let llscratch = bx.alloca(cast.gcc_type(bx), scratch_align);
+ bx.lifetime_start(llscratch, scratch_size);
+
+ // ... where we first store the value...
+ bx.store(val, llscratch, scratch_align);
+
+ // ... and then memcpy it to the intended destination.
+ bx.memcpy(
+ dst.llval,
+ self.layout.align.abi,
+ llscratch,
+ scratch_align,
+ bx.const_usize(self.layout.size.bytes()),
+ MemFlags::empty(),
+ );
+
+ bx.lifetime_end(llscratch, scratch_size);
+ }
+ }
+ else {
+ OperandValue::Immediate(val).store(bx, dst);
+ }
+ }
+
+ fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+ let mut next = || {
+ let val = bx.current_func().get_param(*idx as i32);
+ *idx += 1;
+ val.to_rvalue()
+ };
+ match self.mode {
+ PassMode::Ignore => {},
+ PassMode::Pair(..) => {
+ OperandValue::Pair(next(), next()).store(bx, dst);
+ },
+ PassMode::Indirect { extra_attrs: Some(_), .. } => {
+ OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+ },
+ PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(_) => {
+ let next_arg = next();
+ self.store(bx, next_arg, dst);
+ },
+ }
+ }
+}
+
+fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => Some((
+ match t {
+ rustc_middle::ty::IntTy::Isize => u64::from(cx.tcx.sess.target.pointer_width),
+ rustc_middle::ty::IntTy::I8 => 8,
+ rustc_middle::ty::IntTy::I16 => 16,
+ rustc_middle::ty::IntTy::I32 => 32,
+ rustc_middle::ty::IntTy::I64 => 64,
+ rustc_middle::ty::IntTy::I128 => 128,
+ },
+ true,
+ )),
+ ty::Uint(t) => Some((
+ match t {
+ rustc_middle::ty::UintTy::Usize => u64::from(cx.tcx.sess.target.pointer_width),
+ rustc_middle::ty::UintTy::U8 => 8,
+ rustc_middle::ty::UintTy::U16 => 16,
+ rustc_middle::ty::UintTy::U32 => 32,
+ rustc_middle::ty::UintTy::U64 => 64,
+ rustc_middle::ty::UintTy::U128 => 128,
+ },
+ false,
+ )),
+ _ => None,
+ }
+}
+
+impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
+ fn bit_reverse(&mut self, width: u64, value: RValue<'gcc>) -> RValue<'gcc> {
+ let result_type = value.get_type();
+ let typ = result_type.to_unsigned(self.cx);
+
+ let value =
+ if result_type.is_signed(self.cx) {
+ self.gcc_int_cast(value, typ)
+ }
+ else {
+ value
+ };
+
+ let context = &self.cx.context;
+ let result =
+ match width {
+ 8 => {
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_int(typ, 0xF0));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 4));
+ let right = self.and(value, context.new_rvalue_from_int(typ, 0x0F));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 4));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_int(typ, 0xCC));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_int(typ, 0x33));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_int(typ, 0xAA));
+ let left = self.lshr(left, context.new_rvalue_from_int(typ, 1));
+ let right = self.and(step2, context.new_rvalue_from_int(typ, 0x55));
+ let right = self.shl(right, context.new_rvalue_from_int(typ, 1));
+ let step3 = self.or(left, right);
+
+ step3
+ },
+ 16 => {
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_int(typ, 0x5555));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 1));
+ let right = self.and(value, context.new_rvalue_from_int(typ, 0xAAAA));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 1));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_int(typ, 0x3333));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_int(typ, 0xCCCC));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_int(typ, 0x0F0F));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 4));
+ let right = self.and(step2, context.new_rvalue_from_int(typ, 0xF0F0));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 4));
+ let step3 = self.or(left, right);
+
+ // Fourth step.
+ let left = self.and(step3, context.new_rvalue_from_int(typ, 0x00FF));
+ let left = self.shl(left, context.new_rvalue_from_int(typ, 8));
+ let right = self.and(step3, context.new_rvalue_from_int(typ, 0xFF00));
+ let right = self.lshr(right, context.new_rvalue_from_int(typ, 8));
+ let step4 = self.or(left, right);
+
+ step4
+ },
+ 32 => {
+ // TODO(antoyo): Refactor with other implementations.
+ // First step.
+ let left = self.and(value, context.new_rvalue_from_long(typ, 0x55555555));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 1));
+ let right = self.and(value, context.new_rvalue_from_long(typ, 0xAAAAAAAA));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 1));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_long(typ, 0x33333333));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 2));
+ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xCCCCCCCC));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 2));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.and(step2, context.new_rvalue_from_long(typ, 0x0F0F0F0F));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 4));
+ let right = self.and(step2, context.new_rvalue_from_long(typ, 0xF0F0F0F0));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 4));
+ let step3 = self.or(left, right);
+
+ // Fourth step.
+ let left = self.and(step3, context.new_rvalue_from_long(typ, 0x00FF00FF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 8));
+ let right = self.and(step3, context.new_rvalue_from_long(typ, 0xFF00FF00));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 8));
+ let step4 = self.or(left, right);
+
+ // Fifth step.
+ let left = self.and(step4, context.new_rvalue_from_long(typ, 0x0000FFFF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 16));
+ let right = self.and(step4, context.new_rvalue_from_long(typ, 0xFFFF0000));
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 16));
+ let step5 = self.or(left, right);
+
+ step5
+ },
+ 64 => {
+ // First step.
+ let left = self.shl(value, context.new_rvalue_from_long(typ, 32));
+ let right = self.lshr(value, context.new_rvalue_from_long(typ, 32));
+ let step1 = self.or(left, right);
+
+ // Second step.
+ let left = self.and(step1, context.new_rvalue_from_long(typ, 0x0001FFFF0001FFFF));
+ let left = self.shl(left, context.new_rvalue_from_long(typ, 15));
+ let right = self.and(step1, context.new_rvalue_from_long(typ, 0xFFFE0000FFFE0000u64 as i64)); // TODO(antoyo): transmute the number instead?
+ let right = self.lshr(right, context.new_rvalue_from_long(typ, 17));
+ let step2 = self.or(left, right);
+
+ // Third step.
+ let left = self.lshr(step2, context.new_rvalue_from_long(typ, 10));
+ let left = self.xor(step2, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x003F801F003F801F));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 10));
+ let left = self.or(temp, left);
+ let step3 = self.xor(left, step2);
+
+ // Fourth step.
+ let left = self.lshr(step3, context.new_rvalue_from_long(typ, 4));
+ let left = self.xor(step3, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x0E0384210E038421));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 4));
+ let left = self.or(temp, left);
+ let step4 = self.xor(left, step3);
+
+ // Fifth step.
+ let left = self.lshr(step4, context.new_rvalue_from_long(typ, 2));
+ let left = self.xor(step4, left);
+ let temp = self.and(left, context.new_rvalue_from_long(typ, 0x2248884222488842));
+
+ let left = self.shl(temp, context.new_rvalue_from_long(typ, 2));
+ let left = self.or(temp, left);
+ let step5 = self.xor(left, step4);
+
+ step5
+ },
+ 128 => {
+ // TODO(antoyo): find a more efficient implementation?
+ let sixty_four = self.gcc_int(typ, 64);
+ let right_shift = self.gcc_lshr(value, sixty_four);
+ let high = self.gcc_int_cast(right_shift, self.u64_type);
+ let low = self.gcc_int_cast(value, self.u64_type);
+
+ let reversed_high = self.bit_reverse(64, high);
+ let reversed_low = self.bit_reverse(64, low);
+
+ let new_low = self.gcc_int_cast(reversed_high, typ);
+ let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
+
+ self.gcc_or(new_low, new_high)
+ },
+ _ => {
+ panic!("cannot bit reverse with width = {}", width);
+ },
+ };
+
+ self.gcc_int_cast(result, result_type)
+ }
+
+ fn count_leading_zeroes(&mut self, width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): use width?
+ let arg_type = arg.get_type();
+ let count_leading_zeroes =
+ // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
+ // instead of using is_uint().
+ if arg_type.is_uint(&self.cx) {
+ "__builtin_clz"
+ }
+ else if arg_type.is_ulong(&self.cx) {
+ "__builtin_clzl"
+ }
+ else if arg_type.is_ulonglong(&self.cx) {
+ "__builtin_clzll"
+ }
+ else if width == 128 {
+ // Algorithm from: https://stackoverflow.com/a/28433850/389119
+ let array_type = self.context.new_array_type(None, arg_type, 3);
+ let result = self.current_func()
+ .new_local(None, array_type, "count_loading_zeroes_results");
+
+ let sixty_four = self.const_uint(arg_type, 64);
+ let shift = self.lshr(arg, sixty_four);
+ let high = self.gcc_int_cast(shift, self.u64_type);
+ let low = self.gcc_int_cast(arg, self.u64_type);
+
+ let zero = self.context.new_rvalue_zero(self.usize_type);
+ let one = self.context.new_rvalue_one(self.usize_type);
+ let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+ let clzll = self.context.get_builtin_function("__builtin_clzll");
+
+ let first_elem = self.context.new_array_access(None, result, zero);
+ let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
+ self.llbb()
+ .add_assignment(None, first_elem, first_value);
+
+ let second_elem = self.context.new_array_access(None, result, one);
+ let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type);
+ let second_value = self.add(cast, sixty_four);
+ self.llbb()
+ .add_assignment(None, second_elem, second_value);
+
+ let third_elem = self.context.new_array_access(None, result, two);
+ let third_value = self.const_uint(arg_type, 128);
+ self.llbb()
+ .add_assignment(None, third_elem, third_value);
+
+ let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+ let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+ let not_low_and_not_high = not_low & not_high;
+ let index = not_high + not_low_and_not_high;
+ // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
+ // gcc.
+ // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
+ // compilation stage.
+ let index = self.context.new_cast(None, index, self.i32_type);
+
+ let res = self.context.new_array_access(None, result, index);
+
+ return self.gcc_int_cast(res.to_rvalue(), arg_type);
+ }
+ else {
+ let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
+ let arg = self.context.new_cast(None, arg, self.ulonglong_type);
+ let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
+ let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
+ let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
+ return self.context.new_cast(None, res, arg_type);
+ };
+ let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
+ let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
+ self.context.new_cast(None, res, arg_type)
+ }
+
+ fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
+ let result_type = arg.get_type();
+ let arg =
+ if result_type.is_signed(self.cx) {
+ let new_type = result_type.to_unsigned(self.cx);
+ self.gcc_int_cast(arg, new_type)
+ }
+ else {
+ arg
+ };
+ let arg_type = arg.get_type();
+ let (count_trailing_zeroes, expected_type) =
+ // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
+ // instead of using is_uint().
+ if arg_type.is_uchar(&self.cx) || arg_type.is_ushort(&self.cx) || arg_type.is_uint(&self.cx) {
+ // NOTE: we don't need to & 0xFF for uchar because the result is undefined on zero.
+ ("__builtin_ctz", self.cx.uint_type)
+ }
+ else if arg_type.is_ulong(&self.cx) {
+ ("__builtin_ctzl", self.cx.ulong_type)
+ }
+ else if arg_type.is_ulonglong(&self.cx) {
+ ("__builtin_ctzll", self.cx.ulonglong_type)
+ }
+ else if arg_type.is_u128(&self.cx) {
+ // Adapted from the algorithm to count leading zeroes from: https://stackoverflow.com/a/28433850/389119
+ let array_type = self.context.new_array_type(None, arg_type, 3);
+ let result = self.current_func()
+ .new_local(None, array_type, "count_loading_zeroes_results");
+
+ let sixty_four = self.gcc_int(arg_type, 64);
+ let shift = self.gcc_lshr(arg, sixty_four);
+ let high = self.gcc_int_cast(shift, self.u64_type);
+ let low = self.gcc_int_cast(arg, self.u64_type);
+
+ let zero = self.context.new_rvalue_zero(self.usize_type);
+ let one = self.context.new_rvalue_one(self.usize_type);
+ let two = self.context.new_rvalue_from_long(self.usize_type, 2);
+
+ let ctzll = self.context.get_builtin_function("__builtin_ctzll");
+
+ let first_elem = self.context.new_array_access(None, result, zero);
+ let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type);
+ self.llbb()
+ .add_assignment(None, first_elem, first_value);
+
+ let second_elem = self.context.new_array_access(None, result, one);
+ let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four);
+ self.llbb()
+ .add_assignment(None, second_elem, second_value);
+
+ let third_elem = self.context.new_array_access(None, result, two);
+ let third_value = self.gcc_int(arg_type, 128);
+ self.llbb()
+ .add_assignment(None, third_elem, third_value);
+
+ let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+ let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+ let not_low_and_not_high = not_low & not_high;
+ let index = not_low + not_low_and_not_high;
+ // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
+ // gcc.
+ // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
+ // compilation stage.
+ let index = self.context.new_cast(None, index, self.i32_type);
+
+ let res = self.context.new_array_access(None, result, index);
+
+ return self.gcc_int_cast(res.to_rvalue(), result_type);
+ }
+ else {
+ let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
+ let arg_size = arg_type.get_size();
+ let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type);
+ let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
+ let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
+ let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
+ let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg);
+ let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask);
+ let diff = diff * self.context.new_cast(None, cond, self.int_type);
+ let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff;
+ return self.context.new_cast(None, res, result_type);
+ };
+ let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
+ let arg =
+ if arg_type != expected_type {
+ self.context.new_cast(None, arg, expected_type)
+ }
+ else {
+ arg
+ };
+ let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
+ self.context.new_cast(None, res, result_type)
+ }
+
+ fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
+ // TODO(antoyo): use the optimized version with fewer operations.
+ let result_type = value.get_type();
+ let value_type = result_type.to_unsigned(self.cx);
+
+ let value =
+ if result_type.is_signed(self.cx) {
+ self.gcc_int_cast(value, value_type)
+ }
+ else {
+ value
+ };
+
+ if value_type.is_u128(&self.cx) {
+ // TODO(antoyo): implement in the normal algorithm below to have a more efficient
+ // implementation (that does not require a call to __popcountdi2).
+ let popcount = self.context.get_builtin_function("__builtin_popcountll");
+ let sixty_four = self.gcc_int(value_type, 64);
+ let right_shift = self.gcc_lshr(value, sixty_four);
+ let high = self.gcc_int_cast(right_shift, self.cx.ulonglong_type);
+ let high = self.context.new_call(None, popcount, &[high]);
+ let low = self.gcc_int_cast(value, self.cx.ulonglong_type);
+ let low = self.context.new_call(None, popcount, &[low]);
+ let res = high + low;
+ return self.gcc_int_cast(res, result_type);
+ }
+
+ // First step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x5555555555555555);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 1);
+ let right = shifted & mask;
+ let value = left + right;
+
+ // Second step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x3333333333333333);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 2);
+ let right = shifted & mask;
+ let value = left + right;
+
+ // Third step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x0F0F0F0F0F0F0F0F);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 4);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u8(&self.cx) {
+ return self.context.new_cast(None, value, result_type);
+ }
+
+ // Fourth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x00FF00FF00FF00FF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 8);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u16(&self.cx) {
+ return self.context.new_cast(None, value, result_type);
+ }
+
+ // Fifth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x0000FFFF0000FFFF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 16);
+ let right = shifted & mask;
+ let value = left + right;
+
+ if value_type.is_u32(&self.cx) {
+ return self.context.new_cast(None, value, result_type);
+ }
+
+ // Sixth step.
+ let mask = self.context.new_rvalue_from_long(value_type, 0x00000000FFFFFFFF);
+ let left = value & mask;
+ let shifted = value >> self.context.new_rvalue_from_int(value_type, 32);
+ let right = shifted & mask;
+ let value = left + right;
+
+ self.context.new_cast(None, value, result_type)
+ }
+
+ // Algorithm from: https://blog.regehr.org/archives/1063
+ fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let max = self.const_uint(shift.get_type(), width);
+ let shift = self.urem(shift, max);
+ let lhs = self.shl(value, shift);
+ let result_neg = self.neg(shift);
+ let result_and =
+ self.and(
+ result_neg,
+ self.const_uint(shift.get_type(), width - 1),
+ );
+ let rhs = self.lshr(value, result_and);
+ self.or(lhs, rhs)
+ }
+
+ // Algorithm from: https://blog.regehr.org/archives/1063
+ fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+ let max = self.const_uint(shift.get_type(), width);
+ let shift = self.urem(shift, max);
+ let lhs = self.lshr(value, shift);
+ let result_neg = self.neg(shift);
+ let result_and =
+ self.and(
+ result_neg,
+ self.const_uint(shift.get_type(), width - 1),
+ );
+ let rhs = self.shl(value, result_and);
+ self.or(lhs, rhs)
+ }
+
+ fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+ let result_type = lhs.get_type();
+ if signed {
+ // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
+ let func = self.current_func.borrow().expect("func");
+ let res = func.new_local(None, result_type, "saturating_sum");
+ let supports_native_type = self.is_native_int_type(result_type);
+ let overflow =
+ if supports_native_type {
+ let func_name =
+ match width {
+ 8 => "__builtin_add_overflow",
+ 16 => "__builtin_add_overflow",
+ 32 => "__builtin_sadd_overflow",
+ 64 => "__builtin_saddll_overflow",
+ 128 => "__builtin_add_overflow",
+ _ => unreachable!(),
+ };
+ let overflow_func = self.context.get_builtin_function(func_name);
+ self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
+ }
+ else {
+ let func_name =
+ match width {
+ 128 => "__rust_i128_addo",
+ _ => unreachable!(),
+ };
+ let param_a = self.context.new_parameter(None, result_type, "a");
+ let param_b = self.context.new_parameter(None, result_type, "b");
+ let result_field = self.context.new_field(None, result_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ let result = self.context.new_call(None, func, &[lhs, rhs]);
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ self.llbb().add_assignment(None, res, int_result);
+ overflow
+ };
+
+ let then_block = func.new_block("then");
+ let after_block = func.new_block("after");
+
+ // Return `result_type`'s maximum or minimum value on overflow
+ // NOTE: convert the type to unsigned to have an unsigned shift.
+ let unsigned_type = result_type.to_unsigned(&self.cx);
+ let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+ let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
+ let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
+ then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
+ then_block.end_with_jump(None, after_block);
+
+ self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
+
+ res.to_rvalue()
+ }
+ else {
+ // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
+ let res = self.gcc_add(lhs, rhs);
+ let cond = self.gcc_icmp(IntPredicate::IntULT, res, lhs);
+ let value = self.gcc_neg(self.gcc_int_cast(cond, result_type));
+ self.gcc_or(res, value)
+ }
+ }
+
+ // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
+ fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+ let result_type = lhs.get_type();
+ if signed {
+ // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
+ let func = self.current_func.borrow().expect("func");
+ let res = func.new_local(None, result_type, "saturating_diff");
+ let supports_native_type = self.is_native_int_type(result_type);
+ let overflow =
+ if supports_native_type {
+ let func_name =
+ match width {
+ 8 => "__builtin_sub_overflow",
+ 16 => "__builtin_sub_overflow",
+ 32 => "__builtin_ssub_overflow",
+ 64 => "__builtin_ssubll_overflow",
+ 128 => "__builtin_sub_overflow",
+ _ => unreachable!(),
+ };
+ let overflow_func = self.context.get_builtin_function(func_name);
+ self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
+ }
+ else {
+ let func_name =
+ match width {
+ 128 => "__rust_i128_subo",
+ _ => unreachable!(),
+ };
+ let param_a = self.context.new_parameter(None, result_type, "a");
+ let param_b = self.context.new_parameter(None, result_type, "b");
+ let result_field = self.context.new_field(None, result_type, "result");
+ let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+ let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+ let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+ let result = self.context.new_call(None, func, &[lhs, rhs]);
+ let overflow = result.access_field(None, overflow_field);
+ let int_result = result.access_field(None, result_field);
+ self.llbb().add_assignment(None, res, int_result);
+ overflow
+ };
+
+ let then_block = func.new_block("then");
+ let after_block = func.new_block("after");
+
+ // Return `result_type`'s maximum or minimum value on overflow
+ // NOTE: convert the type to unsigned to have an unsigned shift.
+ let unsigned_type = result_type.to_unsigned(&self.cx);
+ let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+ let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
+ let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
+ then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
+ then_block.end_with_jump(None, after_block);
+
+ self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+
+ // NOTE: since jumps were added in a place rustc does not
+ // expect, the current block in the state need to be updated.
+ self.switch_to_block(after_block);
+
+ res.to_rvalue()
+ }
+ else {
+ let res = self.gcc_sub(lhs, rhs);
+ let comparison = self.gcc_icmp(IntPredicate::IntULE, res, lhs);
+ let value = self.gcc_neg(self.gcc_int_cast(comparison, result_type));
+ self.gcc_and(res, value)
+ }
+ }
+}
+
+fn try_intrinsic<'gcc, 'tcx>(bx: &mut Builder<'_, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
+ // NOTE: the `|| true` here is to use the panic=abort strategy with panic=unwind too
+ if bx.sess().panic_strategy() == PanicStrategy::Abort || true {
+ // TODO(bjorn3): Properly implement unwinding and remove the `|| true` once this is done.
+ bx.call(bx.type_void(), try_func, &[data], None);
+ // Return 0 unconditionally from the intrinsic call;
+ // we can never unwind.
+ let ret_align = bx.tcx.data_layout.i32_align.abi;
+ bx.store(bx.const_i32(0), dest, ret_align);
+ }
+ else if wants_msvc_seh(bx.sess()) {
+ unimplemented!();
+ }
+ else {
+ unimplemented!();
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
new file mode 100644
index 000000000..2401f3350
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -0,0 +1,751 @@
+use std::cmp::Ordering;
+
+use gccjit::{BinaryOp, RValue, Type, ToRValue};
+use rustc_codegen_ssa::base::compare_simd_types;
+use rustc_codegen_ssa::common::{TypeKind, span_invalid_monomorphization_error};
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::{BaseTypeMethods, BuilderMethods};
+use rustc_hir as hir;
+use rustc_middle::span_bug;
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{Span, Symbol, sym};
+use rustc_target::abi::Align;
+
+use crate::builder::Builder;
+use crate::intrinsic;
+
+pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, span: Span) -> Result<RValue<'gcc>, ()> {
+ // macros for error handling:
+ #[allow(unused_macro_rules)]
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ macro_rules! require {
+ ($cond: expr, $($fmt: tt)*) => {
+ if !$cond {
+ return_error!($($fmt)*);
+ }
+ };
+ }
+
+ macro_rules! require_simd {
+ ($ty: expr, $position: expr) => {
+ require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+ };
+ }
+
+ let tcx = bx.tcx();
+ let sig =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
+ let arg_tys = sig.inputs();
+
+ if name == sym::simd_select_bitmask {
+ require_simd!(arg_tys[1], "argument");
+ let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+
+ let expected_int_bits = (len.max(8) - 1).next_power_of_two();
+ let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
+
+ let mask_ty = arg_tys[0];
+ let mut mask = match mask_ty.kind() {
+ ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+ ty::Array(elem, len)
+ if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+ && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+ == Some(expected_bytes) =>
+ {
+ let place = PlaceRef::alloca(bx, args[0].layout);
+ args[0].val.store(bx, place);
+ let int_ty = bx.type_ix(expected_bytes * 8);
+ let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
+ bx.load(int_ty, ptr, Align::ONE)
+ }
+ _ => return_error!(
+ "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
+ mask_ty,
+ expected_int_bits,
+ expected_bytes
+ ),
+ };
+
+ let arg1 = args[1].immediate();
+ let arg1_type = arg1.get_type();
+ let arg1_vector_type = arg1_type.unqualified().dyncast_vector().expect("vector type");
+ let arg1_element_type = arg1_vector_type.get_element_type();
+
+ let mut elements = vec![];
+ let one = bx.context.new_rvalue_one(mask.get_type());
+ for _ in 0..len {
+ let element = bx.context.new_cast(None, mask & one, arg1_element_type);
+ elements.push(element);
+ mask = mask >> one;
+ }
+ let vector_mask = bx.context.new_rvalue_from_vector(None, arg1_type, &elements);
+
+ return Ok(bx.vector_select(vector_mask, arg1, args[2].immediate()));
+ }
+
+ // every intrinsic below takes a SIMD vector as its first argument
+ require_simd!(arg_tys[0], "input");
+ let in_ty = arg_tys[0];
+
+ let comparison = match name {
+ sym::simd_eq => Some(hir::BinOpKind::Eq),
+ sym::simd_ne => Some(hir::BinOpKind::Ne),
+ sym::simd_lt => Some(hir::BinOpKind::Lt),
+ sym::simd_le => Some(hir::BinOpKind::Le),
+ sym::simd_gt => Some(hir::BinOpKind::Gt),
+ sym::simd_ge => Some(hir::BinOpKind::Ge),
+ _ => None,
+ };
+
+ let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
+ if let Some(cmp_op) = comparison {
+ require_simd!(ret_ty, "return");
+
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ require!(
+ bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+ "expected return type with integer elements, found `{}` with non-integer `{}`",
+ ret_ty,
+ out_ty
+ );
+
+ return Ok(compare_simd_types(
+ bx,
+ args[0].immediate(),
+ args[1].immediate(),
+ in_elem,
+ llret_ty,
+ cmp_op,
+ ));
+ }
+
+ if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
+ let n: u64 =
+ if stripped.is_empty() {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ match args[2].layout.ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+ len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ }
+ _ => return_error!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ args[2].layout.ty
+ ),
+ }
+ }
+ else {
+ stripped.parse().unwrap_or_else(|_| {
+ span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
+ })
+ };
+
+ require_simd!(ret_ty, "return");
+
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ out_len == n,
+ "expected return type of length {}, found `{}` with length {}",
+ n,
+ ret_ty,
+ out_len
+ );
+ require!(
+ in_elem == out_ty,
+ "expected return element type `{}` (element of input `{}`), \
+ found `{}` with element type `{}`",
+ in_elem,
+ in_ty,
+ ret_ty,
+ out_ty
+ );
+
+ let vector = args[2].immediate();
+
+ return Ok(bx.shuffle_vector(
+ args[0].immediate(),
+ args[1].immediate(),
+ vector,
+ ));
+ }
+
+ #[cfg(feature="master")]
+ if name == sym::simd_insert {
+ require!(
+ in_elem == arg_tys[2],
+ "expected inserted type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ arg_tys[2]
+ );
+ let vector = args[0].immediate();
+ let index = args[1].immediate();
+ let value = args[2].immediate();
+ // TODO(antoyo): use a recursive unqualified() here.
+ let vector_type = vector.get_type().unqualified().dyncast_vector().expect("vector type");
+ let element_type = vector_type.get_element_type();
+ // NOTE: we cannot cast to an array and assign to its element here because the value might
+ // not be an l-value. So, call a builtin to set the element.
+ // TODO(antoyo): perhaps we could create a new vector or maybe there's a GIMPLE instruction for that?
+ // TODO(antoyo): don't use target specific builtins here.
+ let func_name =
+ match in_len {
+ 2 => {
+ if element_type == bx.i64_type {
+ "__builtin_ia32_vec_set_v2di"
+ }
+ else {
+ unimplemented!();
+ }
+ },
+ 4 => {
+ if element_type == bx.i32_type {
+ "__builtin_ia32_vec_set_v4si"
+ }
+ else {
+ unimplemented!();
+ }
+ },
+ 8 => {
+ if element_type == bx.i16_type {
+ "__builtin_ia32_vec_set_v8hi"
+ }
+ else {
+ unimplemented!();
+ }
+ },
+ _ => unimplemented!("Len: {}", in_len),
+ };
+ let builtin = bx.context.get_target_builtin_function(func_name);
+ let param1_type = builtin.get_param(0).to_rvalue().get_type();
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ let vector = bx.cx.bitcast_if_needed(vector, param1_type);
+ let result = bx.context.new_call(None, builtin, &[vector, value, bx.context.new_cast(None, index, bx.int_type)]);
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ return Ok(bx.context.new_bitcast(None, result, vector.get_type()));
+ }
+
+ #[cfg(feature="master")]
+ if name == sym::simd_extract {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ let vector = args[0].immediate();
+ return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue());
+ }
+
+ if name == sym::simd_select {
+ let m_elem_ty = in_elem;
+ let m_len = in_len;
+ require_simd!(arg_tys[1], "argument");
+ let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ require!(
+ m_len == v_len,
+ "mismatched lengths: mask length `{}` != other vector length `{}`",
+ m_len,
+ v_len
+ );
+ match m_elem_ty.kind() {
+ ty::Int(_) => {}
+ _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
+ }
+ return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate()));
+ }
+
+ if name == sym::simd_cast {
+ require_simd!(ret_ty, "return");
+ let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ // casting cares about nominal type, not just structural type
+ if in_elem == out_elem {
+ return Ok(args[0].immediate());
+ }
+
+ enum Style {
+ Float,
+ Int(/* is signed? */ bool),
+ Unsupported,
+ }
+
+ let (in_style, in_width) = match in_elem.kind() {
+ // vectors of pointer-sized integers should've been
+ // disallowed before here, so this unwrap is safe.
+ ty::Int(i) => (
+ Style::Int(true),
+ i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Uint(u) => (
+ Style::Int(false),
+ u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+ let (out_style, out_width) = match out_elem.kind() {
+ ty::Int(i) => (
+ Style::Int(true),
+ i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Uint(u) => (
+ Style::Int(false),
+ u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+
+ let extend = |in_type, out_type| {
+ let vector_type = bx.context.new_vector_type(out_type, 8);
+ let vector = args[0].immediate();
+ let array_type = bx.context.new_array_type(None, in_type, 8);
+ // TODO(antoyo): switch to using new_vector_access or __builtin_convertvector for vector casting.
+ let array = bx.context.new_bitcast(None, vector, array_type);
+
+ let cast_vec_element = |index| {
+ let index = bx.context.new_rvalue_from_int(bx.int_type, index);
+ bx.context.new_cast(None, bx.context.new_array_access(None, array, index).to_rvalue(), out_type)
+ };
+
+ bx.context.new_rvalue_from_vector(None, vector_type, &[
+ cast_vec_element(0),
+ cast_vec_element(1),
+ cast_vec_element(2),
+ cast_vec_element(3),
+ cast_vec_element(4),
+ cast_vec_element(5),
+ cast_vec_element(6),
+ cast_vec_element(7),
+ ])
+ };
+
+ match (in_style, out_style) {
+ (Style::Int(in_is_signed), Style::Int(_)) => {
+ return Ok(match in_width.cmp(&out_width) {
+ Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
+ Ordering::Equal => args[0].immediate(),
+ Ordering::Less => {
+ if in_is_signed {
+ match (in_width, out_width) {
+ // FIXME(antoyo): the function _mm_cvtepi8_epi16 should directly
+ // call an intrinsic equivalent to __builtin_ia32_pmovsxbw128 so that
+ // we can generate a call to it.
+ (8, 16) => extend(bx.i8_type, bx.i16_type),
+ (8, 32) => extend(bx.i8_type, bx.i32_type),
+ (8, 64) => extend(bx.i8_type, bx.i64_type),
+ (16, 32) => extend(bx.i16_type, bx.i32_type),
+ (32, 64) => extend(bx.i32_type, bx.i64_type),
+ (16, 64) => extend(bx.i16_type, bx.i64_type),
+ _ => unimplemented!("in: {}, out: {}", in_width, out_width),
+ }
+ } else {
+ match (in_width, out_width) {
+ (8, 16) => extend(bx.u8_type, bx.u16_type),
+ (8, 32) => extend(bx.u8_type, bx.u32_type),
+ (8, 64) => extend(bx.u8_type, bx.u64_type),
+ (16, 32) => extend(bx.u16_type, bx.u32_type),
+ (16, 64) => extend(bx.u16_type, bx.u64_type),
+ (32, 64) => extend(bx.u32_type, bx.u64_type),
+ _ => unimplemented!("in: {}, out: {}", in_width, out_width),
+ }
+ }
+ }
+ });
+ }
+ (Style::Int(_), Style::Float) => {
+ // TODO: add support for internal functions in libgccjit to get access to IFN_VEC_CONVERT which is
+ // doing like __builtin_convertvector?
+ // Or maybe provide convert_vector as an API since it might not easy to get the
+ // types of internal functions.
+ unimplemented!();
+ }
+ (Style::Float, Style::Int(_)) => {
+ unimplemented!();
+ }
+ (Style::Float, Style::Float) => {
+ unimplemented!();
+ }
+ _ => { /* Unsupported. Fallthrough. */ }
+ }
+ require!(
+ false,
+ "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
+ in_ty,
+ in_elem,
+ ret_ty,
+ out_elem
+ );
+ }
+
+ macro_rules! arith_binary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+
+ fn simd_simple_float_intrinsic<'gcc, 'tcx>(
+ name: Symbol,
+ in_elem: Ty<'_>,
+ in_ty: Ty<'_>,
+ in_len: u64,
+ bx: &mut Builder<'_, 'gcc, 'tcx>,
+ span: Span,
+ args: &[OperandRef<'tcx, RValue<'gcc>>],
+ ) -> Result<RValue<'gcc>, ()> {
+ macro_rules! emit_error {
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ let (elem_ty_str, elem_ty) =
+ if let ty::Float(f) = in_elem.kind() {
+ let elem_ty = bx.cx.type_float_from_ty(*f);
+ match f.bit_width() {
+ 32 => ("f32", elem_ty),
+ 64 => ("f64", elem_ty),
+ _ => {
+ return_error!(
+ "unsupported element type `{}` of floating-point vector `{}`",
+ f.name_str(),
+ in_ty
+ );
+ }
+ }
+ }
+ else {
+ return_error!("`{}` is not a floating-point type", in_ty);
+ };
+
+ let vec_ty = bx.cx.type_vector(elem_ty, in_len);
+
+ let (intr_name, fn_ty) =
+ match name {
+ sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)), // TODO(antoyo): pand with 170141183420855150465331762880109871103
+ sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
+ sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
+ sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
+ sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
+ _ => return_error!("unrecognized intrinsic `{}`", name),
+ };
+ let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
+ let function = intrinsic::llvm::intrinsic(llvm_name, &bx.cx);
+ let function: RValue<'gcc> = unsafe { std::mem::transmute(function) };
+ let c = bx.call(fn_ty, function, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ Ok(c)
+ }
+
+ if std::matches!(
+ name,
+ sym::simd_ceil
+ | sym::simd_fabs
+ | sym::simd_fcos
+ | sym::simd_fexp2
+ | sym::simd_fexp
+ | sym::simd_flog10
+ | sym::simd_flog2
+ | sym::simd_flog
+ | sym::simd_floor
+ | sym::simd_fma
+ | sym::simd_fpow
+ | sym::simd_fpowi
+ | sym::simd_fsin
+ | sym::simd_fsqrt
+ | sym::simd_round
+ | sym::simd_trunc
+ ) {
+ return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
+ }
+
+ arith_binary! {
+ simd_add: Uint, Int => add, Float => fadd;
+ simd_sub: Uint, Int => sub, Float => fsub;
+ simd_mul: Uint, Int => mul, Float => fmul;
+ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+ simd_rem: Uint => urem, Int => srem, Float => frem;
+ simd_shl: Uint, Int => shl;
+ simd_shr: Uint => lshr, Int => ashr;
+ simd_and: Uint, Int => and;
+ simd_or: Uint, Int => or; // FIXME(antoyo): calling `or` might not work on vectors.
+ simd_xor: Uint, Int => xor;
+ }
+
+ macro_rules! arith_unary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+
+ arith_unary! {
+ simd_neg: Int => neg, Float => fneg;
+ }
+
+ #[cfg(feature="master")]
+ if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
+ let lhs = args[0].immediate();
+ let rhs = args[1].immediate();
+ let is_add = name == sym::simd_saturating_add;
+ let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+ let (signed, elem_width, elem_ty) = match *in_elem.kind() {
+ ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
+ ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
+ _ => {
+ return_error!(
+ "expected element type `{}` of vector type `{}` \
+ to be a signed or unsigned integer type",
+ arg_tys[0].simd_size_and_type(bx.tcx()).1,
+ arg_tys[0]
+ );
+ }
+ };
+ let builtin_name =
+ match (signed, is_add, in_len, elem_width) {
+ (true, true, 32, 8) => "__builtin_ia32_paddsb256", // TODO(antoyo): cast arguments to unsigned.
+ (false, true, 32, 8) => "__builtin_ia32_paddusb256",
+ (true, true, 16, 16) => "__builtin_ia32_paddsw256",
+ (false, true, 16, 16) => "__builtin_ia32_paddusw256",
+ (true, false, 16, 16) => "__builtin_ia32_psubsw256",
+ (false, false, 16, 16) => "__builtin_ia32_psubusw256",
+ (true, false, 32, 8) => "__builtin_ia32_psubsb256",
+ (false, false, 32, 8) => "__builtin_ia32_psubusb256",
+ _ => unimplemented!("signed: {}, is_add: {}, in_len: {}, elem_width: {}", signed, is_add, in_len, elem_width),
+ };
+ let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
+
+ let func = bx.context.get_target_builtin_function(builtin_name);
+ let param1_type = func.get_param(0).to_rvalue().get_type();
+ let param2_type = func.get_param(1).to_rvalue().get_type();
+ let lhs = bx.cx.bitcast_if_needed(lhs, param1_type);
+ let rhs = bx.cx.bitcast_if_needed(rhs, param2_type);
+ let result = bx.context.new_call(None, func, &[lhs, rhs]);
+ // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
+ return Ok(bx.context.new_bitcast(None, result, vec_ty));
+ }
+
+ macro_rules! arith_red {
+ ($name:ident : $vec_op:expr, $float_reduce:ident, $ordered:expr, $op:ident,
+ $identity:expr) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.vector_reduce_op(args[0].immediate(), $vec_op);
+ if $ordered {
+ // if overflow occurs, the result is the
+ // mathematical result modulo 2^n:
+ Ok(bx.$op(args[1].immediate(), r))
+ }
+ else {
+ Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
+ }
+ }
+ ty::Float(_) => {
+ if $ordered {
+ // ordered arithmetic reductions take an accumulator
+ let acc = args[1].immediate();
+ Ok(bx.$float_reduce(acc, args[0].immediate()))
+ }
+ else {
+ Ok(bx.vector_reduce_op(args[0].immediate(), $vec_op))
+ }
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ arith_red!(
+ simd_reduce_add_unordered: BinaryOp::Plus,
+ vector_reduce_fadd_fast,
+ false,
+ add,
+ 0.0 // TODO: Use this argument.
+ );
+ arith_red!(
+ simd_reduce_mul_unordered: BinaryOp::Mult,
+ vector_reduce_fmul_fast,
+ false,
+ mul,
+ 1.0
+ );
+
+ macro_rules! minmax_red {
+ ($name:ident: $reduction:ident) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) | ty::Float(_) => Ok(bx.$reduction(args[0].immediate())),
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ minmax_red!(simd_reduce_min: vector_reduce_min);
+ minmax_red!(simd_reduce_max: vector_reduce_max);
+
+ macro_rules! bitwise_red {
+ ($name:ident : $op:expr, $boolean:expr) => {
+ if name == sym::$name {
+ let input = if !$boolean {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ args[0].immediate()
+ } else {
+ match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {}
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ }
+
+ // boolean reductions operate on vectors of i1s:
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len as u64);
+ bx.trunc(args[0].immediate(), i1xn)
+ };
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.vector_reduce_op(input, $op);
+ Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ bitwise_red!(simd_reduce_and: BinaryOp::BitwiseAnd, false);
+ bitwise_red!(simd_reduce_or: BinaryOp::BitwiseOr, false);
+
+ unimplemented!("simd {}", name);
+}
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
new file mode 100644
index 000000000..8a206c036
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -0,0 +1,331 @@
+/*
+ * TODO(antoyo): implement equality in libgccjit based on https://zpz.github.io/blog/overloading-equality-operator-in-cpp-class-hierarchy/ (for type equality?)
+ * TODO(antoyo): support #[inline] attributes.
+ * TODO(antoyo): support LTO (gcc's equivalent to Thin LTO is enabled by -fwhopr: https://stackoverflow.com/questions/64954525/does-gcc-have-thin-lto).
+ *
+ * TODO(antoyo): remove the patches.
+ */
+
+#![feature(
+ rustc_private,
+ decl_macro,
+ associated_type_bounds,
+ never_type,
+ trusted_len,
+ hash_raw_entry
+)]
+#![allow(broken_intra_doc_links)]
+#![recursion_limit="256"]
+#![warn(rust_2018_idioms)]
+#![warn(unused_lifetimes)]
+
+extern crate rustc_ast;
+extern crate rustc_codegen_ssa;
+extern crate rustc_data_structures;
+extern crate rustc_errors;
+extern crate rustc_hir;
+extern crate rustc_metadata;
+extern crate rustc_middle;
+extern crate rustc_session;
+extern crate rustc_span;
+extern crate rustc_target;
+extern crate tempfile;
+
+// This prevents duplicating functions and statics that are already part of the host rustc process.
+#[allow(unused_extern_crates)]
+extern crate rustc_driver;
+
+mod abi;
+mod allocator;
+mod archive;
+mod asm;
+mod back;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod int;
+mod intrinsic;
+mod mono_item;
+mod type_;
+mod type_of;
+
+use std::any::Any;
+use std::sync::{Arc, Mutex};
+
+use gccjit::{Context, OptimizationLevel, CType};
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::base::codegen_crate;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryFn};
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::target_features::supported_target_features;
+use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{ErrorGuaranteed, Handler};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::ty::query::Providers;
+use rustc_session::config::{Lto, OptLevel, OutputFilenames};
+use rustc_session::Session;
+use rustc_span::Symbol;
+use rustc_span::fatal_error::FatalError;
+use tempfile::TempDir;
+
+pub struct PrintOnPanic<F: Fn() -> String>(pub F);
+
+impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
+ fn drop(&mut self) {
+ if ::std::thread::panicking() {
+ println!("{}", (self.0)());
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct GccCodegenBackend {
+ supports_128bit_integers: Arc<Mutex<bool>>,
+}
+
+impl CodegenBackend for GccCodegenBackend {
+ fn init(&self, sess: &Session) {
+ if sess.lto() != Lto::No {
+ sess.warn("LTO is not supported. You may get a linker error.");
+ }
+
+ let temp_dir = TempDir::new().expect("cannot create temporary directory");
+ let temp_file = temp_dir.into_path().join("result.asm");
+ let check_context = Context::default();
+ check_context.set_print_errors_to_stderr(false);
+ let _int128_ty = check_context.new_c_type(CType::UInt128t);
+ // NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
+ check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
+ *self.supports_128bit_integers.lock().expect("lock") = check_context.get_last_error() == Ok(None);
+ }
+
+ fn provide(&self, providers: &mut Providers) {
+ // FIXME(antoyo) compute list of enabled features from cli flags
+ providers.global_backend_features = |_tcx, ()| vec![];
+ }
+
+ fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
+ let target_cpu = target_cpu(tcx.sess);
+ let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module);
+
+ Box::new(res)
+ }
+
+ fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+ let (codegen_results, work_products) = ongoing_codegen
+ .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
+ .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
+ .join(sess);
+
+ Ok((codegen_results, work_products))
+ }
+
+ fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorGuaranteed> {
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ link_binary(
+ sess,
+ &crate::archive::ArArchiveBuilderBuilder,
+ &codegen_results,
+ outputs,
+ )
+ }
+
+ fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+ target_features(sess, allow_unstable)
+ }
+}
+
+impl ExtraBackendMethods for GccCodegenBackend {
+ fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool) -> Self::Module {
+ let mut mods = GccContext {
+ context: Context::default(),
+ };
+ unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, has_alloc_error_handler); }
+ mods
+ }
+
+ fn compile_codegen_unit<'tcx>(&self, tcx: TyCtxt<'tcx>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
+ base::compile_codegen_unit(tcx, cgu_name, *self.supports_128bit_integers.lock().expect("lock"))
+ }
+
+ fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn<Self> {
+ // TODO(antoyo): set opt level.
+ Arc::new(|_| {
+ Ok(())
+ })
+ }
+
+ fn target_cpu<'b>(&self, _sess: &'b Session) -> &'b str {
+ unimplemented!();
+ }
+
+ fn tune_cpu<'b>(&self, _sess: &'b Session) -> Option<&'b str> {
+ None
+ // TODO(antoyo)
+ }
+}
+
+pub struct ModuleBuffer;
+
+impl ModuleBufferMethods for ModuleBuffer {
+ fn data(&self) -> &[u8] {
+ unimplemented!();
+ }
+}
+
+pub struct ThinBuffer;
+
+impl ThinBufferMethods for ThinBuffer {
+ fn data(&self) -> &[u8] {
+ unimplemented!();
+ }
+}
+
+pub struct GccContext {
+ context: Context<'static>,
+}
+
+unsafe impl Send for GccContext {}
+// FIXME(antoyo): that shouldn't be Sync. Parallel compilation is currently disabled with "-Zno-parallel-llvm". Try to disable it here.
+unsafe impl Sync for GccContext {}
+
+impl WriteBackendMethods for GccCodegenBackend {
+ type Module = GccContext;
+ type TargetMachine = ();
+ type ModuleBuffer = ModuleBuffer;
+ type Context = ();
+ type ThinData = ();
+ type ThinBuffer = ThinBuffer;
+
+ fn run_fat_lto(_cgcx: &CodegenContext<Self>, mut modules: Vec<FatLTOInput<Self>>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
+ // TODO(antoyo): implement LTO by sending -flto to libgccjit and adding the appropriate gcc linker plugins.
+ // NOTE: implemented elsewhere.
+ // TODO(antoyo): what is implemented elsewhere ^ ?
+ let module =
+ match modules.remove(0) {
+ FatLTOInput::InMemory(module) => module,
+ FatLTOInput::Serialized { .. } => {
+ unimplemented!();
+ }
+ };
+ Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: vec![] })
+ }
+
+ fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+ unimplemented!();
+ }
+
+ fn print_pass_timings(&self) {
+ unimplemented!();
+ }
+
+ unsafe fn optimize(_cgcx: &CodegenContext<Self>, _diag_handler: &Handler, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
+ module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
+ Ok(())
+ }
+
+ fn optimize_fat(_cgcx: &CodegenContext<Self>, _module: &mut ModuleCodegen<Self::Module>) -> Result<(), FatalError> {
+ // TODO(antoyo)
+ Ok(())
+ }
+
+ unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ unimplemented!();
+ }
+
+ unsafe fn codegen(cgcx: &CodegenContext<Self>, diag_handler: &Handler, module: ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+ back::write::codegen(cgcx, diag_handler, module, config)
+ }
+
+ fn prepare_thin(_module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
+ unimplemented!();
+ }
+
+ fn serialize_module(_module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+ unimplemented!();
+ }
+
+ fn run_link(cgcx: &CodegenContext<Self>, diag_handler: &Handler, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::write::link(cgcx, diag_handler, modules)
+ }
+}
+
+/// This is the entrypoint for a hot plugged rustc_codegen_gccjit
+#[no_mangle]
+pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
+ Box::new(GccCodegenBackend {
+ supports_128bit_integers: Arc::new(Mutex::new(false)),
+ })
+}
+
+fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
+ match optlevel {
+ None => OptimizationLevel::None,
+ Some(level) => {
+ match level {
+ OptLevel::No => OptimizationLevel::None,
+ OptLevel::Less => OptimizationLevel::Limited,
+ OptLevel::Default => OptimizationLevel::Standard,
+ OptLevel::Aggressive => OptimizationLevel::Aggressive,
+ OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
+ }
+ },
+ }
+}
+
+fn handle_native(name: &str) -> &str {
+ if name != "native" {
+ return name;
+ }
+
+ unimplemented!();
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+ match sess.opts.cg.target_cpu {
+ Some(ref name) => handle_native(name),
+ None => handle_native(sess.target.cpu.as_ref()),
+ }
+}
+
+pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+ supported_target_features(sess)
+ .iter()
+ .filter_map(
+ |&(feature, gate)| {
+ if sess.is_nightly_build() || allow_unstable || gate.is_none() { Some(feature) } else { None }
+ },
+ )
+ .filter(|_feature| {
+ // TODO(antoyo): implement a way to get enabled feature in libgccjit.
+ // Probably using the equivalent of __builtin_cpu_supports.
+ #[cfg(feature="master")]
+ {
+ _feature.contains("sse") || _feature.contains("avx")
+ }
+ #[cfg(not(feature="master"))]
+ {
+ false
+ }
+ /*
+ adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512gfni,
+ avx512ifma, avx512pf, avx512vaes, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpclmulqdq,
+ avx512vpopcntdq, bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm,
+ sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, xsave, xsavec, xsaveopt, xsaves
+ */
+ //false
+ })
+ .map(|feature| Symbol::intern(feature))
+ .collect()
+}
diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs
new file mode 100644
index 000000000..9468a1ef4
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/mono_item.rs
@@ -0,0 +1,38 @@
+use rustc_codegen_ssa::traits::PreDefineMethods;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_span::def_id::DefId;
+
+use crate::base;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn predefine_static(&self, def_id: DefId, _linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let gcc_type = self.layout_of(ty).gcc_type(self, true);
+
+ let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
+ let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section);
+
+ // TODO(antoyo): set linkage and visibility.
+ self.instances.borrow_mut().insert(instance, global);
+ }
+
+ fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, _visibility: Visibility, symbol_name: &str) {
+ assert!(!instance.substs.needs_infer());
+
+ let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+ self.linkage.set(base::linkage_to_gcc(linkage));
+ let _decl = self.declare_fn(symbol_name, &fn_abi);
+ //let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+
+ // TODO(antoyo): call set_link_section() to allow initializing argc/argv.
+ // TODO(antoyo): set unique comdat.
+ // TODO(antoyo): use inline attribute from there in linkage.set() above.
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs
new file mode 100644
index 000000000..68bdb8d4e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/type_.rs
@@ -0,0 +1,303 @@
+use std::convert::TryInto;
+
+use gccjit::{RValue, Struct, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods};
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_middle::{bug, ty};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+
+use crate::common::TypeReflection;
+use crate::context::CodegenCx;
+use crate::type_of::LayoutGccExt;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn type_ix(&self, num_bits: u64) -> Type<'gcc> {
+ // gcc only supports 1, 2, 4 or 8-byte integers.
+ // FIXME(antoyo): this is misleading to use the next power of two as rustc_codegen_ssa
+ // sometimes use 96-bit numbers and the following code will give an integer of a different
+ // size.
+ let bytes = (num_bits / 8).next_power_of_two() as i32;
+ match bytes {
+ 1 => self.i8_type,
+ 2 => self.i16_type,
+ 4 => self.i32_type,
+ 8 => self.i64_type,
+ 16 => self.i128_type,
+ _ => panic!("unexpected num_bits: {}", num_bits),
+ }
+ }
+
+ pub fn type_void(&self) -> Type<'gcc> {
+ self.context.new_type::<()>()
+ }
+
+ pub fn type_size_t(&self) -> Type<'gcc> {
+ self.context.new_type::<usize>()
+ }
+
+ pub fn type_u8(&self) -> Type<'gcc> {
+ self.u8_type
+ }
+
+ pub fn type_u16(&self) -> Type<'gcc> {
+ self.u16_type
+ }
+
+ pub fn type_u32(&self) -> Type<'gcc> {
+ self.u32_type
+ }
+
+ pub fn type_u64(&self) -> Type<'gcc> {
+ self.u64_type
+ }
+
+ pub fn type_u128(&self) -> Type<'gcc> {
+ self.u128_type
+ }
+
+ pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
+ // FIXME(eddyb) We could find a better approximation if ity.align < align.
+ let ity = Integer::approximate_align(self, align);
+ self.type_from_integer(ity)
+ }
+
+ pub fn type_vector(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
+ self.context.new_vector_type(ty, len)
+ }
+
+ pub fn type_float_from_ty(&self, t: ty::FloatTy) -> Type<'gcc> {
+ match t {
+ ty::FloatTy::F32 => self.type_f32(),
+ ty::FloatTy::F64 => self.type_f64(),
+ }
+ }
+}
+
+impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn type_i1(&self) -> Type<'gcc> {
+ self.bool_type
+ }
+
+ fn type_i8(&self) -> Type<'gcc> {
+ self.i8_type
+ }
+
+ fn type_i16(&self) -> Type<'gcc> {
+ self.i16_type
+ }
+
+ fn type_i32(&self) -> Type<'gcc> {
+ self.i32_type
+ }
+
+ fn type_i64(&self) -> Type<'gcc> {
+ self.i64_type
+ }
+
+ fn type_i128(&self) -> Type<'gcc> {
+ self.i128_type
+ }
+
+ fn type_isize(&self) -> Type<'gcc> {
+ self.isize_type
+ }
+
+ fn type_f32(&self) -> Type<'gcc> {
+ self.context.new_type::<f32>()
+ }
+
+ fn type_f64(&self) -> Type<'gcc> {
+ self.context.new_type::<f64>()
+ }
+
+ fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
+ self.context.new_function_pointer_type(None, return_type, params, false)
+ }
+
+ fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
+ let types = fields.to_vec();
+ if let Some(typ) = self.struct_types.borrow().get(fields) {
+ return typ.clone();
+ }
+ let fields: Vec<_> = fields.iter().enumerate()
+ .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
+ .collect();
+ let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
+ if packed {
+ #[cfg(feature="master")]
+ typ.set_packed();
+ }
+ self.struct_types.borrow_mut().insert(types, typ);
+ typ
+ }
+
+ fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
+ if self.is_int_type_or_bool(typ) {
+ TypeKind::Integer
+ }
+ else if typ.is_compatible_with(self.float_type) {
+ TypeKind::Float
+ }
+ else if typ.is_compatible_with(self.double_type) {
+ TypeKind::Double
+ }
+ else if typ.is_vector() {
+ TypeKind::Vector
+ }
+ else {
+ // TODO(antoyo): support other types.
+ TypeKind::Void
+ }
+ }
+
+ fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
+ ty.make_pointer()
+ }
+
+ fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
+ // TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
+ ty.make_pointer()
+ }
+
+ fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
+ if let Some(typ) = ty.dyncast_array() {
+ typ
+ }
+ else if let Some(vector_type) = ty.dyncast_vector() {
+ vector_type.get_element_type()
+ }
+ else if let Some(typ) = ty.get_pointee() {
+ typ
+ }
+ else {
+ unreachable!()
+ }
+ }
+
+ fn vector_length(&self, _ty: Type<'gcc>) -> usize {
+ unimplemented!();
+ }
+
+ fn float_width(&self, typ: Type<'gcc>) -> usize {
+ let f32 = self.context.new_type::<f32>();
+ let f64 = self.context.new_type::<f64>();
+ if typ.is_compatible_with(f32) {
+ 32
+ }
+ else if typ.is_compatible_with(f64) {
+ 64
+ }
+ else {
+ panic!("Cannot get width of float type {:?}", typ);
+ }
+ // TODO(antoyo): support other sizes.
+ }
+
+ fn int_width(&self, typ: Type<'gcc>) -> u64 {
+ self.gcc_int_width(typ)
+ }
+
+ fn val_ty(&self, value: RValue<'gcc>) -> Type<'gcc> {
+ value.get_type()
+ }
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ pub fn type_padding_filler(&self, size: Size, align: Align) -> Type<'gcc> {
+ let unit = Integer::approximate_align(self, align);
+ let size = size.bytes();
+ let unit_size = unit.size().bytes();
+ assert_eq!(size % unit_size, 0);
+ self.type_array(self.type_from_integer(unit), size / unit_size)
+ }
+
+ pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], packed: bool) {
+ let fields: Vec<_> = fields.iter().enumerate()
+ .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
+ .collect();
+ typ.set_fields(None, &fields);
+ if packed {
+ #[cfg(feature="master")]
+ typ.as_type().set_packed();
+ }
+ }
+
+ pub fn type_named_struct(&self, name: &str) -> Struct<'gcc> {
+ self.context.new_opaque_struct_type(None, name)
+ }
+
+ pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
+ if let Some(struct_type) = ty.is_struct() {
+ if struct_type.get_field_count() == 0 {
+ // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
+ // size of usize::MAX in test_binary_search, we workaround this by setting the size to
+ // zero for ZSTs.
+ // FIXME(antoyo): fix gccjit API.
+ len = 0;
+ }
+ }
+
+ // NOTE: see note above. Some other test uses usize::MAX.
+ if len == u64::MAX {
+ len = 0;
+ }
+
+ let len: i32 = len.try_into().expect("array len");
+
+ self.context.new_array_type(None, ty, len)
+ }
+
+ pub fn type_bool(&self) -> Type<'gcc> {
+ self.context.new_type::<bool>()
+ }
+}
+
+pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
+ let field_count = layout.fields.count();
+
+ let mut packed = false;
+ let mut offset = Size::ZERO;
+ let mut prev_effective_align = layout.align.abi;
+ let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+ for i in layout.fields.index_by_increasing_offset() {
+ let target_offset = layout.fields.offset(i as usize);
+ let field = layout.field(cx, i);
+ let effective_field_align =
+ layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+ packed |= effective_field_align < field.align.abi;
+
+ assert!(target_offset >= offset);
+ let padding = target_offset - offset;
+ let padding_align = prev_effective_align.min(effective_field_align);
+ assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+ result.push(cx.type_padding_filler(padding, padding_align));
+
+ result.push(field.gcc_type(cx, !field.ty.is_any_ptr())); // FIXME(antoyo): might need to check if the type is inside another, like Box<Type>.
+ offset = target_offset + field.size;
+ prev_effective_align = effective_field_align;
+ }
+ if !layout.is_unsized() && field_count > 0 {
+ if offset > layout.size {
+ bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+ }
+ let padding = layout.size - offset;
+ let padding_align = prev_effective_align;
+ assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+ result.push(cx.type_padding_filler(padding, padding_align));
+ assert_eq!(result.len(), 1 + field_count * 2);
+ }
+
+ (result, packed)
+}
+
+impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn set_type_metadata(&self, _function: RValue<'gcc>, _typeid: String) {
+ // Unsupported.
+ }
+
+ fn typeid_metadata(&self, _typeid: String) -> RValue<'gcc> {
+ // Unsupported.
+ self.context.new_rvalue_from_int(self.int_type, 0)
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
new file mode 100644
index 000000000..524d10fb5
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -0,0 +1,385 @@
+use std::fmt::Write;
+
+use gccjit::{Struct, Type};
+use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
+use rustc_middle::bug;
+use rustc_middle::ty::{self, Ty, TypeVisitable};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_target::abi::{self, Abi, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants};
+use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+
+use crate::abi::{FnAbiGccExt, GccType};
+use crate::context::CodegenCx;
+use crate::type_::struct_fields;
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+ fn type_from_unsigned_integer(&self, i: Integer) -> Type<'gcc> {
+ use Integer::*;
+ match i {
+ I8 => self.type_u8(),
+ I16 => self.type_u16(),
+ I32 => self.type_u32(),
+ I64 => self.type_u64(),
+ I128 => self.type_u128(),
+ }
+ }
+
+ #[cfg(feature="master")]
+ pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
+ match t {
+ ty::IntTy::Isize => self.type_isize(),
+ ty::IntTy::I8 => self.type_i8(),
+ ty::IntTy::I16 => self.type_i16(),
+ ty::IntTy::I32 => self.type_i32(),
+ ty::IntTy::I64 => self.type_i64(),
+ ty::IntTy::I128 => self.type_i128(),
+ }
+ }
+
+ #[cfg(feature="master")]
+ pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
+ match t {
+ ty::UintTy::Usize => self.type_isize(),
+ ty::UintTy::U8 => self.type_i8(),
+ ty::UintTy::U16 => self.type_i16(),
+ ty::UintTy::U32 => self.type_i32(),
+ ty::UintTy::U64 => self.type_i64(),
+ ty::UintTy::U128 => self.type_i128(),
+ }
+ }
+}
+
+pub fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> {
+ match layout.abi {
+ Abi::Scalar(_) => bug!("handled elsewhere"),
+ Abi::Vector { ref element, count } => {
+ let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO);
+ return cx.context.new_vector_type(element, count);
+ },
+ Abi::ScalarPair(..) => {
+ return cx.type_struct(
+ &[
+ layout.scalar_pair_element_gcc_type(cx, 0, false),
+ layout.scalar_pair_element_gcc_type(cx, 1, false),
+ ],
+ false,
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {}
+ }
+
+ let name = match layout.ty.kind() {
+ // FIXME(eddyb) producing readable type names for trait objects can result
+ // in problematically distinct types due to HRTB and subtyping (see #47638).
+ // ty::Dynamic(..) |
+ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+ if !cx.sess().fewer_names() =>
+ {
+ let mut name = with_no_trimmed_paths!(layout.ty.to_string());
+ if let (&ty::Adt(def, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ if def.is_enum() && !def.variants().is_empty() {
+ write!(&mut name, "::{}", def.variant(index).name).unwrap();
+ }
+ }
+ if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+ }
+ Some(name)
+ }
+ ty::Adt(..) => {
+ // If `Some` is returned then a named struct is created in LLVM. Name collisions are
+ // avoided by LLVM (with increasing suffixes). If rustc doesn't generate names then that
+ // can improve perf.
+ // FIXME(antoyo): I don't think that's true for libgccjit.
+ Some(String::new())
+ }
+ _ => None,
+ };
+
+ match layout.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+ let packed = false;
+ match name {
+ None => cx.type_struct(&[fill], packed),
+ Some(ref name) => {
+ let gcc_type = cx.type_named_struct(name);
+ cx.set_struct_body(gcc_type, &[fill], packed);
+ gcc_type.as_type()
+ },
+ }
+ }
+ FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx, true), count),
+ FieldsShape::Arbitrary { .. } =>
+ match name {
+ None => {
+ let (gcc_fields, packed) = struct_fields(cx, layout);
+ cx.type_struct(&gcc_fields, packed)
+ },
+ Some(ref name) => {
+ let gcc_type = cx.type_named_struct(name);
+ *defer = Some((gcc_type, layout));
+ gcc_type.as_type()
+ },
+ },
+ }
+}
+
+pub trait LayoutGccExt<'tcx> {
+ fn is_gcc_immediate(&self) -> bool;
+ fn is_gcc_scalar_pair(&self) -> bool;
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc>;
+ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+ fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
+ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>;
+ fn gcc_field_index(&self, index: usize) -> u64;
+ fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+}
+
+impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
+ fn is_gcc_immediate(&self) -> bool {
+ match self.abi {
+ Abi::Scalar(_) | Abi::Vector { .. } => true,
+ Abi::ScalarPair(..) => false,
+ Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+ }
+ }
+
+ fn is_gcc_scalar_pair(&self) -> bool {
+ match self.abi {
+ Abi::ScalarPair(..) => true,
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+ }
+ }
+
+ /// Gets the GCC type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+ /// The pointee type of the pointer in `PlaceRef` is always this type.
+ /// For sized types, it is also the right LLVM type for an `alloca`
+ /// containing a value of that type, and most immediates (except `bool`).
+ /// Unsized types, however, are represented by a "minimal unit", e.g.
+ /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+ /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+ /// If the type is an unsized struct, the regular layout is generated,
+ /// with the inner-most trailing unsized field using the "minimal unit"
+ /// of that field's type - this is useful for taking the address of
+ /// that field and ensuring the struct has the right alignment.
+ //TODO(antoyo): do we still need the set_fields parameter?
+ fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, set_fields: bool) -> Type<'gcc> {
+ if let Abi::Scalar(ref scalar) = self.abi {
+ // Use a different cache for scalars because pointers to DSTs
+ // can be either fat or thin (data pointers of fat pointers).
+ if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
+ return ty;
+ }
+ let ty =
+ match *self.ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx, set_fields))
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx, true))
+ }
+ ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())),
+ _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
+ };
+ cx.scalar_types.borrow_mut().insert(self.ty, ty);
+ return ty;
+ }
+
+ // Check the cache.
+ let variant_index =
+ match self.variants {
+ Variants::Single { index } => Some(index),
+ _ => None,
+ };
+ let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
+ if let Some(ty) = cached_type {
+ let type_to_set_fields = cx.types_with_fields_to_set.borrow_mut().remove(&ty);
+ if let Some((struct_type, layout)) = type_to_set_fields {
+ // Since we might be trying to generate a type containing another type which is not
+ // completely generated yet, we deferred setting the fields until now.
+ let (fields, packed) = struct_fields(cx, layout);
+ cx.set_struct_body(struct_type, &fields, packed);
+ }
+ return ty;
+ }
+
+ assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+ // Make sure lifetimes are erased, to avoid generating distinct LLVM
+ // types for Rust types that only differ in the choice of lifetimes.
+ let normal_ty = cx.tcx.erase_regions(self.ty);
+
+ let mut defer = None;
+ let ty =
+ if self.ty != normal_ty {
+ let mut layout = cx.layout_of(normal_ty);
+ if let Some(v) = variant_index {
+ layout = layout.for_variant(cx, v);
+ }
+ layout.gcc_type(cx, true)
+ }
+ else {
+ uncached_gcc_type(cx, *self, &mut defer)
+ };
+
+ cx.types.borrow_mut().insert((self.ty, variant_index), ty);
+
+ if let Some((ty, layout)) = defer {
+ let (fields, packed) = struct_fields(cx, layout);
+ cx.set_struct_body(ty, &fields, packed);
+ }
+
+ ty
+ }
+
+ fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
+ if let Abi::Scalar(ref scalar) = self.abi {
+ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+ }
+ self.gcc_type(cx, true)
+ }
+
+ fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
+ match scalar.primitive() {
+ Int(i, true) => cx.type_from_integer(i),
+ Int(i, false) => cx.type_from_unsigned_integer(i),
+ F32 => cx.type_f32(),
+ F64 => cx.type_f64(),
+ Pointer => {
+ // If we know the alignment, pick something better than i8.
+ let pointee =
+ if let Some(pointee) = self.pointee_info_at(cx, offset) {
+ cx.type_pointee_for_align(pointee.align)
+ }
+ else {
+ cx.type_i8()
+ };
+ cx.type_ptr_to(pointee)
+ }
+ }
+ }
+
+ fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
+ // TODO(antoyo): remove llvm hack:
+ // HACK(eddyb) special-case fat pointers until LLVM removes
+ // pointee types, to avoid bitcasting every `OperandRef::deref`.
+ match self.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(_) => {
+ return self.field(cx, index).gcc_type(cx, true);
+ }
+ // only wide pointer boxes are handled as pointers
+ // thin pointer boxes with scalar allocators are handled by the general logic below
+ ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+ return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate);
+ }
+ _ => {}
+ }
+
+ let (a, b) = match self.abi {
+ Abi::ScalarPair(ref a, ref b) => (a, b),
+ _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
+ };
+ let scalar = [a, b][index];
+
+ // Make sure to return the same type `immediate_gcc_type` would when
+ // dealing with an immediate pair. This means that `(bool, bool)` is
+ // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+ // immediate, just like `bool` is typically `i8` in memory and only `i1`
+ // when immediate. We need to load/store `bool` as `i8` to avoid
+ // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
+ // TODO(antoyo): this bugs certainly don't happen in this case since the bool type is used instead of i1.
+ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+
+ let offset =
+ if index == 0 {
+ Size::ZERO
+ }
+ else {
+ a.size(cx).align_to(b.align(cx).abi)
+ };
+ self.scalar_gcc_type_at(cx, scalar, offset)
+ }
+
+ fn gcc_field_index(&self, index: usize) -> u64 {
+ match self.abi {
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
+ }
+ _ => {}
+ }
+ match self.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ bug!("TyAndLayout::gcc_field_index({:?}): not applicable", self)
+ }
+
+ FieldsShape::Array { .. } => index as u64,
+
+ FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
+ }
+ }
+
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
+ if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
+ return pointee;
+ }
+
+ let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
+
+ cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
+ result
+ }
+}
+
+impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+ layout.gcc_type(self, true)
+ }
+
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Type<'gcc> {
+ layout.immediate_gcc_type(self)
+ }
+
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_gcc_immediate()
+ }
+
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_gcc_scalar_pair()
+ }
+
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
+ layout.gcc_field_index(index)
+ }
+
+ fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> {
+ layout.scalar_pair_element_gcc_type(self, index, immediate)
+ }
+
+ fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> {
+ ty.gcc_type(self)
+ }
+
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+ fn_abi.ptr_to_gcc_type(self)
+ }
+
+ fn reg_backend_type(&self, _ty: &Reg) -> Type<'gcc> {
+ unimplemented!();
+ }
+
+ fn fn_decl_backend_type(&self, _fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
+ // FIXME(antoyo): return correct type.
+ self.type_void()
+ }
+}
diff --git a/compiler/rustc_codegen_gcc/test.sh b/compiler/rustc_codegen_gcc/test.sh
new file mode 100755
index 000000000..8b390f95a
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/test.sh
@@ -0,0 +1,291 @@
+#!/usr/bin/env bash
+
+# TODO(antoyo): rewrite to cargo-make (or just) or something like that to only rebuild the sysroot when needed?
+
+set -e
+
+if [ -f ./gcc_path ]; then
+ export GCC_PATH=$(cat gcc_path)
+else
+ echo 'Please put the path to your custom build of libgccjit in the file `gcc_path`, see Readme.md for details'
+ exit 1
+fi
+
+export LD_LIBRARY_PATH="$GCC_PATH"
+export LIBRARY_PATH="$GCC_PATH"
+
+flags=
+gcc_master_branch=1
+channel="debug"
+func=all
+build_only=0
+
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --release)
+ codegen_channel=release
+ shift
+ ;;
+ --release-sysroot)
+ sysroot_channel=release
+ shift
+ ;;
+ --no-default-features)
+ gcc_master_branch=0
+ flags="$flags --no-default-features"
+ shift
+ ;;
+ --features)
+ shift
+ flags="$flags --features $1"
+ shift
+ ;;
+ --release)
+ channel="release"
+ shift
+ ;;
+ "--test-rustc")
+ func=test_rustc
+ shift
+ ;;
+
+ "--test-libcore")
+ func=test_libcore
+ shift
+ ;;
+
+ "--clean-ui-tests")
+ func=clean_ui_tests
+ shift
+ ;;
+
+ "--std-tests")
+ func=std_tests
+ shift
+ ;;
+
+ "--extended-tests")
+ func=extended_sysroot_tests
+ shift
+ ;;
+
+ "--build-sysroot")
+ func=build_sysroot
+ shift
+ ;;
+ "--build")
+ build_only=1
+ shift
+ ;;
+ *)
+ echo "Unknown option $1"
+ exit 1
+ ;;
+ esac
+done
+
+if [[ $channel == "release" ]]; then
+ export CHANNEL='release'
+ CARGO_INCREMENTAL=1 cargo rustc --release $flags
+ shift
+else
+ echo $LD_LIBRARY_PATH
+ export CHANNEL='debug'
+ cargo rustc $flags
+fi
+
+if (( $build_only == 1 )); then
+ exit
+fi
+
+source config.sh
+
+function clean() {
+ rm -r target/out || true
+ mkdir -p target/out/gccjit
+}
+
+function mini_tests() {
+ echo "[BUILD] mini_core"
+ $RUSTC example/mini_core.rs --crate-name mini_core --crate-type lib,dylib --target $TARGET_TRIPLE
+
+ echo "[BUILD] example"
+ $RUSTC example/example.rs --crate-type lib --target $TARGET_TRIPLE
+
+ echo "[AOT] mini_core_hello_world"
+ $RUSTC example/mini_core_hello_world.rs --crate-name mini_core_hello_world --crate-type bin -g --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/mini_core_hello_world abc bcd
+}
+
+function build_sysroot() {
+ echo "[BUILD] sysroot"
+ time ./build_sysroot/build_sysroot.sh
+}
+
+function std_tests() {
+ echo "[AOT] arbitrary_self_types_pointers_and_wrappers"
+ $RUSTC example/arbitrary_self_types_pointers_and_wrappers.rs --crate-name arbitrary_self_types_pointers_and_wrappers --crate-type bin --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/arbitrary_self_types_pointers_and_wrappers
+
+ echo "[AOT] alloc_system"
+ $RUSTC example/alloc_system.rs --crate-type lib --target "$TARGET_TRIPLE"
+
+ echo "[AOT] alloc_example"
+ $RUSTC example/alloc_example.rs --crate-type bin --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/alloc_example
+
+ echo "[AOT] dst_field_align"
+ # FIXME(antoyo): Re-add -Zmir-opt-level=2 once rust-lang/rust#67529 is fixed.
+ $RUSTC example/dst-field-align.rs --crate-name dst_field_align --crate-type bin --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/dst_field_align || (echo $?; false)
+
+ echo "[AOT] std_example"
+ std_flags="--cfg feature=\"master\""
+ if (( $gcc_master_branch == 0 )); then
+ std_flags=""
+ fi
+ $RUSTC example/std_example.rs --crate-type bin --target $TARGET_TRIPLE $std_flags
+ $RUN_WRAPPER ./target/out/std_example --target $TARGET_TRIPLE
+
+ echo "[AOT] subslice-patterns-const-eval"
+ $RUSTC example/subslice-patterns-const-eval.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/subslice-patterns-const-eval
+
+ echo "[AOT] track-caller-attribute"
+ $RUSTC example/track-caller-attribute.rs --crate-type bin -Cpanic=abort --target $TARGET_TRIPLE
+ $RUN_WRAPPER ./target/out/track-caller-attribute
+
+ echo "[BUILD] mod_bench"
+ $RUSTC example/mod_bench.rs --crate-type bin --target $TARGET_TRIPLE
+}
+
+# FIXME(antoyo): linker gives multiple definitions error on Linux
+#echo "[BUILD] sysroot in release mode"
+#./build_sysroot/build_sysroot.sh --release
+
+function test_libcore() {
+ pushd build_sysroot/sysroot_src/library/core/tests
+ echo "[TEST] libcore"
+ rm -r ./target || true
+ ../../../../../cargo.sh test
+ popd
+}
+
+#echo
+#echo "[BENCH COMPILE] mod_bench"
+
+#COMPILE_MOD_BENCH_INLINE="$RUSTC example/mod_bench.rs --crate-type bin -Zmir-opt-level=3 -O --crate-name mod_bench_inline"
+#COMPILE_MOD_BENCH_LLVM_0="rustc example/mod_bench.rs --crate-type bin -Copt-level=0 -o target/out/mod_bench_llvm_0 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_1="rustc example/mod_bench.rs --crate-type bin -Copt-level=1 -o target/out/mod_bench_llvm_1 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_2="rustc example/mod_bench.rs --crate-type bin -Copt-level=2 -o target/out/mod_bench_llvm_2 -Cpanic=abort"
+#COMPILE_MOD_BENCH_LLVM_3="rustc example/mod_bench.rs --crate-type bin -Copt-level=3 -o target/out/mod_bench_llvm_3 -Cpanic=abort"
+
+## Use 100 runs, because a single compilations doesn't take more than ~150ms, so it isn't very slow
+#hyperfine --runs ${COMPILE_RUNS:-100} "$COMPILE_MOD_BENCH_INLINE" "$COMPILE_MOD_BENCH_LLVM_0" "$COMPILE_MOD_BENCH_LLVM_1" "$COMPILE_MOD_BENCH_LLVM_2" "$COMPILE_MOD_BENCH_LLVM_3"
+
+#echo
+#echo "[BENCH RUN] mod_bench"
+#hyperfine --runs ${RUN_RUNS:-10} ./target/out/mod_bench{,_inline} ./target/out/mod_bench_llvm_*
+
+function extended_sysroot_tests() {
+ if (( $gcc_master_branch == 0 )); then
+ return
+ fi
+
+ pushd rand
+ cargo clean
+ echo "[TEST] rust-random/rand"
+ ../cargo.sh test --workspace
+ popd
+
+ #pushd simple-raytracer
+ #echo "[BENCH COMPILE] ebobby/simple-raytracer"
+ #hyperfine --runs "${RUN_RUNS:-10}" --warmup 1 --prepare "cargo clean" \
+ #"RUSTC=rustc RUSTFLAGS='' cargo build" \
+ #"../cargo.sh build"
+
+ #echo "[BENCH RUN] ebobby/simple-raytracer"
+ #cp ./target/debug/main ./raytracer_cg_gcc
+ #hyperfine --runs "${RUN_RUNS:-10}" ./raytracer_cg_llvm ./raytracer_cg_gcc
+ #popd
+
+ pushd regex
+ echo "[TEST] rust-lang/regex example shootout-regex-dna"
+ cargo clean
+ export CG_RUSTFLAGS="--cap-lints warn" # newer aho_corasick versions throw a deprecation warning
+ # Make sure `[codegen mono items] start` doesn't poison the diff
+ ../cargo.sh build --example shootout-regex-dna
+ cat examples/regexdna-input.txt \
+ | ../cargo.sh run --example shootout-regex-dna \
+ | grep -v "Spawned thread" > res.txt
+ diff -u res.txt examples/regexdna-output.txt
+
+ echo "[TEST] rust-lang/regex tests"
+ ../cargo.sh test --tests -- --exclude-should-panic --test-threads 1 -Zunstable-options -q
+ popd
+}
+
+function test_rustc() {
+ echo
+ echo "[TEST] rust-lang/rust"
+
+ rust_toolchain=$(cat rust-toolchain | grep channel | sed 's/channel = "\(.*\)"/\1/')
+
+ git clone https://github.com/rust-lang/rust.git || true
+ cd rust
+ git fetch
+ git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(')
+ export RUSTFLAGS=
+
+ git apply ../rustc_patches/compile_test.patch || true
+
+ rm config.toml || true
+
+ cat > config.toml <<EOF
+[rust]
+codegen-backends = []
+deny-warnings = false
+
+[build]
+cargo = "$(which cargo)"
+local-rebuild = true
+rustc = "$HOME/.rustup/toolchains/$rust_toolchain-$TARGET_TRIPLE/bin/rustc"
+EOF
+
+ rustc -V | cut -d' ' -f3 | tr -d '('
+ git checkout $(rustc -V | cut -d' ' -f3 | tr -d '(') src/test
+
+ for test in $(rg -i --files-with-matches "//(\[\w+\])?~|// error-pattern:|// build-fail|// run-fail|-Cllvm-args" src/test/ui); do
+ rm $test
+ done
+
+ git checkout -- src/test/ui/issues/auxiliary/issue-3136-a.rs # contains //~ERROR, but shouldn't be removed
+
+ rm -r src/test/ui/{abi*,extern/,panic-runtime/,panics/,unsized-locals/,proc-macro/,threads-sendsync/,thinlto/,borrowck/,test*,*lto*.rs} || true
+ for test in $(rg --files-with-matches "catch_unwind|should_panic|thread|lto" src/test/ui); do
+ rm $test
+ done
+ git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice.rs
+ git checkout src/test/ui/type-alias-impl-trait/auxiliary/cross_crate_ice2.rs
+
+ RUSTC_ARGS="-Zpanic-abort-tests -Csymbol-mangling-version=v0 -Zcodegen-backend="$(pwd)"/../target/"$CHANNEL"/librustc_codegen_gcc."$dylib_ext" --sysroot "$(pwd)"/../build_sysroot/sysroot -Cpanic=abort"
+
+ echo "[TEST] rustc test suite"
+ COMPILETEST_FORCE_STAGE0=1 ./x.py test --run always --stage 0 src/test/ui/ --rustc-args "$RUSTC_ARGS"
+}
+
+function clean_ui_tests() {
+ find rust/build/x86_64-unknown-linux-gnu/test/ui/ -name stamp -exec rm -rf {} \;
+}
+
+function all() {
+ clean
+ mini_tests
+ build_sysroot
+ std_tests
+ test_libcore
+ extended_sysroot_tests
+ test_rustc
+}
+
+$func
diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs
new file mode 100644
index 000000000..8e378177e
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs
@@ -0,0 +1,68 @@
+//! The common code for `tests/lang_tests_*.rs`
+use std::{
+ env::{self, current_dir},
+ path::PathBuf,
+ process::Command,
+};
+
+use lang_tester::LangTester;
+use tempfile::TempDir;
+
+/// Controls the compile options (e.g., optimization level) used to compile
+/// test code.
+#[allow(dead_code)] // Each test crate picks one variant
+pub enum Profile {
+ Debug,
+ Release,
+}
+
+pub fn main_inner(profile: Profile) {
+ let tempdir = TempDir::new().expect("temp dir");
+ let current_dir = current_dir().expect("current dir");
+ let current_dir = current_dir.to_str().expect("current dir").to_string();
+ let gcc_path = include_str!("../gcc_path");
+ let gcc_path = gcc_path.trim();
+ env::set_var("LD_LIBRARY_PATH", gcc_path);
+ LangTester::new()
+ .test_dir("tests/run")
+ .test_file_filter(|path| path.extension().expect("extension").to_str().expect("to_str") == "rs")
+ .test_extract(|source| {
+ let lines =
+ source.lines()
+ .skip_while(|l| !l.starts_with("//"))
+ .take_while(|l| l.starts_with("//"))
+ .map(|l| &l[2..])
+ .collect::<Vec<_>>()
+ .join("\n");
+ Some(lines)
+ })
+ .test_cmds(move |path| {
+ // Test command 1: Compile `x.rs` into `tempdir/x`.
+ let mut exe = PathBuf::new();
+ exe.push(&tempdir);
+ exe.push(path.file_stem().expect("file_stem"));
+ let mut compiler = Command::new("rustc");
+ compiler.args(&[
+ &format!("-Zcodegen-backend={}/target/debug/librustc_codegen_gcc.so", current_dir),
+ "--sysroot", &format!("{}/build_sysroot/sysroot/", current_dir),
+ "-Zno-parallel-llvm",
+ "-C", "panic=abort",
+ "-C", "link-arg=-lc",
+ "-o", exe.to_str().expect("to_str"),
+ path.to_str().expect("to_str"),
+ ]);
+ match profile {
+ Profile::Debug => {}
+ Profile::Release => {
+ compiler.args(&[
+ "-C", "opt-level=3",
+ "-C", "lto=no",
+ ]);
+ }
+ }
+ // Test command 2: run `tempdir/x`.
+ let runtime = Command::new(exe);
+ vec![("Compiler", compiler), ("Run-time", runtime)]
+ })
+ .run();
+}
diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_debug.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_debug.rs
new file mode 100644
index 000000000..96bd74883
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/lang_tests_debug.rs
@@ -0,0 +1,5 @@
+mod lang_tests_common;
+
+fn main() {
+ lang_tests_common::main_inner(lang_tests_common::Profile::Debug);
+}
diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_release.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_release.rs
new file mode 100644
index 000000000..35d5d60c3
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/lang_tests_release.rs
@@ -0,0 +1,5 @@
+mod lang_tests_common;
+
+fn main() {
+ lang_tests_common::main_inner(lang_tests_common::Profile::Release);
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/abort1.rs b/compiler/rustc_codegen_gcc/tests/run/abort1.rs
new file mode 100644
index 000000000..291af5993
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/abort1.rs
@@ -0,0 +1,51 @@
+// Compiler:
+//
+// Run-time:
+// status: signal
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+ use super::Sized;
+
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+/*
+ * Code
+ */
+
+fn test_fail() -> ! {
+ unsafe { intrinsics::abort() };
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ test_fail();
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/abort2.rs b/compiler/rustc_codegen_gcc/tests/run/abort2.rs
new file mode 100644
index 000000000..3c87c5678
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/abort2.rs
@@ -0,0 +1,53 @@
+// Compiler:
+//
+// Run-time:
+// status: signal
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+ use super::Sized;
+
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+/*
+ * Code
+ */
+
+fn fail() -> i32 {
+ unsafe { intrinsics::abort() };
+ 0
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ fail();
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/array.rs b/compiler/rustc_codegen_gcc/tests/run/array.rs
new file mode 100644
index 000000000..8b621d8a3
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/array.rs
@@ -0,0 +1,229 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 42
+// 7
+// 5
+// 10
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+
+/*
+ * Code
+ */
+
+static mut ONE: usize = 1;
+
+fn make_array() -> [u8; 3] {
+ [42, 10, 5]
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ let array = [42, 7, 5];
+ let array2 = make_array();
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE - 1]);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE]);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, array[ONE + 1]);
+
+ libc::printf(b"%d\n\0" as *const u8 as *const i8, array2[argc as usize] as u32);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/asm.rs b/compiler/rustc_codegen_gcc/tests/run/asm.rs
new file mode 100644
index 000000000..46abbb553
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/asm.rs
@@ -0,0 +1,172 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+
+#![feature(asm_const, asm_sym)]
+
+use std::arch::{asm, global_asm};
+
+global_asm!("
+ .global add_asm
+add_asm:
+ mov rax, rdi
+ add rax, rsi
+ ret"
+);
+
+extern "C" {
+ fn add_asm(a: i64, b: i64) -> i64;
+}
+
+pub unsafe fn mem_cpy(dst: *mut u8, src: *const u8, len: usize) {
+ asm!(
+ "rep movsb",
+ inout("rdi") dst => _,
+ inout("rsi") src => _,
+ inout("rcx") len => _,
+ options(preserves_flags, nostack)
+ );
+}
+
+fn main() {
+ unsafe {
+ asm!("nop");
+ }
+
+ let x: u64;
+ unsafe {
+ asm!("mov $5, {}",
+ out(reg) x,
+ options(att_syntax)
+ );
+ }
+ assert_eq!(x, 5);
+
+ let x: u64;
+ let input: u64 = 42;
+ unsafe {
+ asm!("mov {input}, {output}",
+ "add $1, {output}",
+ input = in(reg) input,
+ output = out(reg) x,
+ options(att_syntax)
+ );
+ }
+ assert_eq!(x, 43);
+
+ let x: u64;
+ unsafe {
+ asm!("mov {}, 6",
+ out(reg) x,
+ );
+ }
+ assert_eq!(x, 6);
+
+ let x: u64;
+ let input: u64 = 42;
+ unsafe {
+ asm!("mov {output}, {input}",
+ "add {output}, 1",
+ input = in(reg) input,
+ output = out(reg) x,
+ );
+ }
+ assert_eq!(x, 43);
+
+ // check inout(reg_class) x
+ let mut x: u64 = 42;
+ unsafe {
+ asm!("add {0}, {0}",
+ inout(reg) x
+ );
+ }
+ assert_eq!(x, 84);
+
+ // check inout("reg") x
+ let mut x: u64 = 42;
+ unsafe {
+ asm!("add r11, r11",
+ inout("r11") x
+ );
+ }
+ assert_eq!(x, 84);
+
+ // check a mix of
+ // in("reg")
+ // inout(class) x => y
+ // inout (class) x
+ let x: u64 = 702;
+ let y: u64 = 100;
+ let res: u64;
+ let mut rem: u64 = 0;
+ unsafe {
+ asm!("div r11",
+ in("r11") y,
+ inout("eax") x => res,
+ inout("edx") rem,
+ );
+ }
+ assert_eq!(res, 7);
+ assert_eq!(rem, 2);
+
+ // check const
+ let mut x: u64 = 42;
+ unsafe {
+ asm!("add {}, {}",
+ inout(reg) x,
+ const 1
+ );
+ }
+ assert_eq!(x, 43);
+
+ // check const (ATT syntax)
+ let mut x: u64 = 42;
+ unsafe {
+ asm!("add {}, {}",
+ const 1,
+ inout(reg) x,
+ options(att_syntax)
+ );
+ }
+ assert_eq!(x, 43);
+
+ // check sym fn
+ extern "C" fn foo() -> u64 { 42 }
+ let x: u64;
+ unsafe {
+ asm!("call {}", sym foo, lateout("rax") x);
+ }
+ assert_eq!(x, 42);
+
+ // check sym fn (ATT syntax)
+ let x: u64;
+ unsafe {
+ asm!("call {}", sym foo, lateout("rax") x, options(att_syntax));
+ }
+ assert_eq!(x, 42);
+
+ // check sym static
+ static FOO: u64 = 42;
+ let x: u64;
+ unsafe {
+ asm!("mov {1}, qword ptr [rip + {0}]", sym FOO, lateout(reg) x);
+ }
+ assert_eq!(x, 42);
+
+ // check sym static (ATT syntax)
+ let x: u64;
+ unsafe {
+ asm!("movq {0}(%rip), {1}", sym FOO, lateout(reg) x, options(att_syntax));
+ }
+ assert_eq!(x, 42);
+
+ assert_eq!(unsafe { add_asm(40, 2) }, 42);
+
+ let array1 = [1u8, 2, 3];
+ let mut array2 = [0u8, 0, 0];
+ unsafe {
+ mem_cpy(array2.as_mut_ptr(), array1.as_ptr(), 3);
+ }
+ assert_eq!(array1, array2);
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/assign.rs b/compiler/rustc_codegen_gcc/tests/run/assign.rs
new file mode 100644
index 000000000..eb38a8a38
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/assign.rs
@@ -0,0 +1,153 @@
+// Compiler:
+//
+// Run-time:
+// stdout: 2
+// 7 8
+// 10
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn fflush(stream: *mut i32) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+
+ pub static stdout: *mut i32;
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+/*
+ * Code
+ */
+
+fn inc_ref(num: &mut isize) -> isize {
+ *num = *num + 5;
+ *num + 1
+}
+
+fn inc(num: isize) -> isize {
+ num + 1
+}
+
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ argc = inc(argc);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+ }
+
+ let b = inc_ref(&mut argc);
+ unsafe {
+ libc::printf(b"%ld %ld\n\0" as *const u8 as *const i8, argc, b);
+ }
+
+ argc = 10;
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/closure.rs b/compiler/rustc_codegen_gcc/tests/run/closure.rs
new file mode 100644
index 000000000..7121a5f0d
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/closure.rs
@@ -0,0 +1,230 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: Arg: 1
+// Argument: 1
+// String arg: 1
+// Int argument: 2
+// Both args: 11
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics,
+ unboxed_closures)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "fn_once"]
+#[rustc_paren_sugar]
+pub trait FnOnce<Args> {
+ #[lang = "fn_once_output"]
+ type Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output;
+}
+
+#[lang = "fn_mut"]
+#[rustc_paren_sugar]
+pub trait FnMut<Args>: FnOnce<Args> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output;
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let string = "Arg: %d\n\0";
+ let mut closure = || {
+ unsafe {
+ libc::printf(string as *const str as *const i8, argc);
+ }
+ };
+ closure();
+
+ let mut closure = || {
+ unsafe {
+ libc::printf("Argument: %d\n\0" as *const str as *const i8, argc);
+ }
+ };
+ closure();
+
+ let mut closure = |string| {
+ unsafe {
+ libc::printf(string as *const str as *const i8, argc);
+ }
+ };
+ closure("String arg: %d\n\0");
+
+ let mut closure = |arg: isize| {
+ unsafe {
+ libc::printf("Int argument: %d\n\0" as *const str as *const i8, arg);
+ }
+ };
+ closure(argc + 1);
+
+ let mut closure = |string, arg: isize| {
+ unsafe {
+ libc::printf(string as *const str as *const i8, arg);
+ }
+ };
+ closure("Both args: %d\n\0", argc + 10);
+
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/condition.rs b/compiler/rustc_codegen_gcc/tests/run/condition.rs
new file mode 100644
index 000000000..6a2e2d5bb
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/condition.rs
@@ -0,0 +1,320 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: true
+// 1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for u64 {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+impl Copy for bool {}
+impl Copy for u16 {}
+impl Copy for i16 {}
+impl Copy for char {}
+impl Copy for i8 {}
+impl Copy for u8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "eq"]
+pub trait PartialEq<Rhs: ?Sized = Self> {
+ fn eq(&self, other: &Rhs) -> bool;
+ fn ne(&self, other: &Rhs) -> bool;
+}
+
+impl PartialEq for u8 {
+ fn eq(&self, other: &u8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u16 {
+ fn eq(&self, other: &u16) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u16) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for u32 {
+ fn eq(&self, other: &u32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+
+impl PartialEq for u64 {
+ fn eq(&self, other: &u64) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &u64) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for usize {
+ fn eq(&self, other: &usize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &usize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i8 {
+ fn eq(&self, other: &i8) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i8) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for i32 {
+ fn eq(&self, other: &i32) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &i32) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for isize {
+ fn eq(&self, other: &isize) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &isize) -> bool {
+ (*self) != (*other)
+ }
+}
+
+impl PartialEq for char {
+ fn eq(&self, other: &char) -> bool {
+ (*self) == (*other)
+ }
+ fn ne(&self, other: &char) -> bool {
+ (*self) != (*other)
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ if argc == 1 {
+ libc::printf(b"true\n\0" as *const u8 as *const i8);
+ }
+
+ let string =
+ match argc {
+ 1 => b"1\n\0",
+ 2 => b"2\n\0",
+ 3 => b"3\n\0",
+ 4 => b"4\n\0",
+ 5 => b"5\n\0",
+ _ => b"_\n\0",
+ };
+ libc::printf(string as *const u8 as *const i8);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/empty_main.rs b/compiler/rustc_codegen_gcc/tests/run/empty_main.rs
new file mode 100644
index 000000000..c02cfd2a8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/empty_main.rs
@@ -0,0 +1,39 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+
+#![feature(auto_traits, lang_items, no_core, start)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/exit.rs b/compiler/rustc_codegen_gcc/tests/run/exit.rs
new file mode 100644
index 000000000..956e53dd4
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/exit.rs
@@ -0,0 +1,49 @@
+// Compiler:
+//
+// Run-time:
+// status: 2
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn exit(status: i32);
+ }
+}
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ libc::exit(2);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/exit_code.rs b/compiler/rustc_codegen_gcc/tests/run/exit_code.rs
new file mode 100644
index 000000000..eeab35209
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/exit_code.rs
@@ -0,0 +1,39 @@
+// Compiler:
+//
+// Run-time:
+// status: 1
+
+#![feature(auto_traits, lang_items, no_core, start)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ 1
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs
new file mode 100644
index 000000000..a226fff79
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs
@@ -0,0 +1,223 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+
+/*
+ * Code
+ */
+
+fn i16_as_i8(a: i16) -> i8 {
+ a as i8
+}
+
+fn call_func(func: fn(i16) -> i8, param: i16) -> i8 {
+ func(param)
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ let result = call_func(i16_as_i8, argc as i16) as isize;
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, result);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/int.rs b/compiler/rustc_codegen_gcc/tests/run/int.rs
new file mode 100644
index 000000000..2b90e4ae8
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/int.rs
@@ -0,0 +1,340 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+
+#![feature(bench_black_box, const_black_box, core_intrinsics, start)]
+
+#![no_std]
+
+#[panic_handler]
+fn panic_handler(_: &core::panic::PanicInfo) -> ! {
+ core::intrinsics::abort();
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(_argc: isize, _argv: *const *const u8) -> isize {
+ use core::hint::black_box;
+
+ macro_rules! check {
+ ($ty:ty, $expr:expr) => {
+ {
+ const EXPECTED: $ty = $expr;
+ assert_eq!($expr, EXPECTED);
+ }
+ };
+ }
+
+ check!(u32, (2220326408_u32 + black_box(1)) >> (32 - 6));
+
+ /// Generate `check!` tests for integer types at least as wide as 128 bits.
+ macro_rules! check_ops128 {
+ () => {
+ check_ops64!();
+
+ // Shifts.
+ check!(T, VAL1 << black_box(64));
+ check!(T, VAL1 << black_box(81));
+ check!(T, VAL3 << black_box(63));
+ check!(T, VAL3 << black_box(64));
+
+ check!(T, VAL1 >> black_box(64));
+ check!(T, VAL2 >> black_box(64));
+ check!(T, VAL3 >> black_box(64));
+ check!(T, VAL3 >> black_box(81));
+ };
+ }
+
+ /// Generate `check!` tests for integer types at least as wide as 64 bits.
+ macro_rules! check_ops64 {
+ () => {
+ check_ops32!();
+
+ // Shifts.
+ check!(T, VAL2 << black_box(33));
+ check!(T, VAL2 << black_box(49));
+ check!(T, VAL2 << black_box(61));
+ check!(T, VAL2 << black_box(63));
+
+ check!(T, VAL3 << black_box(33));
+ check!(T, VAL3 << black_box(49));
+ check!(T, VAL3 << black_box(61));
+
+ check!(T, VAL1 >> black_box(33));
+ check!(T, VAL1 >> black_box(49));
+ check!(T, VAL1 >> black_box(61));
+ check!(T, VAL1 >> black_box(63));
+
+ check!(T, VAL2 >> black_box(33));
+ check!(T, VAL2 >> black_box(49));
+ check!(T, VAL2 >> black_box(61));
+ check!(T, VAL2 >> black_box(63));
+
+ check!(T, VAL3 >> black_box(33));
+ check!(T, VAL3 >> black_box(49));
+ check!(T, VAL3 >> black_box(61));
+ check!(T, VAL3 >> black_box(63));
+ };
+ }
+
+ /// Generate `check!` tests for integer types at least as wide as 32 bits.
+ macro_rules! check_ops32 {
+ () => {
+ // Shifts.
+ check!(T, VAL2 << black_box(1));
+ check!(T, VAL2 << black_box(0));
+
+ check!(T, VAL3 << black_box(1));
+ check!(T, VAL3 << black_box(0));
+
+ check!(T, VAL1.wrapping_shl(black_box(0)));
+ check!(T, VAL1.wrapping_shl(black_box(1)));
+ check!(T, VAL1.wrapping_shl(black_box(33)));
+ check!(T, VAL1.wrapping_shl(black_box(49)));
+ check!(T, VAL1.wrapping_shl(black_box(61)));
+ check!(T, VAL1.wrapping_shl(black_box(63)));
+ check!(T, VAL1.wrapping_shl(black_box(64)));
+ check!(T, VAL1.wrapping_shl(black_box(81)));
+
+ check!(Option<T>, VAL1.checked_shl(black_box(0)));
+ check!(Option<T>, VAL1.checked_shl(black_box(1)));
+ check!(Option<T>, VAL1.checked_shl(black_box(33)));
+ check!(Option<T>, VAL1.checked_shl(black_box(49)));
+ check!(Option<T>, VAL1.checked_shl(black_box(61)));
+ check!(Option<T>, VAL1.checked_shl(black_box(63)));
+ check!(Option<T>, VAL1.checked_shl(black_box(64)));
+ check!(Option<T>, VAL1.checked_shl(black_box(81)));
+
+ check!(T, VAL1 >> black_box(0));
+ check!(T, VAL1 >> black_box(1));
+
+ check!(T, VAL2 >> black_box(1));
+ check!(T, VAL2 >> black_box(0));
+
+ check!(T, VAL3 >> black_box(0));
+ check!(T, VAL3 >> black_box(1));
+
+ check!(T, VAL1.wrapping_shr(black_box(0)));
+ check!(T, VAL1.wrapping_shr(black_box(1)));
+ check!(T, VAL1.wrapping_shr(black_box(33)));
+ check!(T, VAL1.wrapping_shr(black_box(49)));
+ check!(T, VAL1.wrapping_shr(black_box(61)));
+ check!(T, VAL1.wrapping_shr(black_box(63)));
+ check!(T, VAL1.wrapping_shr(black_box(64)));
+ check!(T, VAL1.wrapping_shr(black_box(81)));
+
+ check!(Option<T>, VAL1.checked_shr(black_box(0)));
+ check!(Option<T>, VAL1.checked_shr(black_box(1)));
+ check!(Option<T>, VAL1.checked_shr(black_box(33)));
+ check!(Option<T>, VAL1.checked_shr(black_box(49)));
+ check!(Option<T>, VAL1.checked_shr(black_box(61)));
+ check!(Option<T>, VAL1.checked_shr(black_box(63)));
+ check!(Option<T>, VAL1.checked_shr(black_box(64)));
+ check!(Option<T>, VAL1.checked_shr(black_box(81)));
+
+ // Casts
+ check!(u64, (VAL1 >> black_box(1)) as u64);
+
+ // Addition.
+ check!(T, VAL1 + black_box(1));
+ check!(T, VAL2 + black_box(1));
+ check!(T, VAL2 + (VAL2 + black_box(1)));
+ check!(T, VAL3 + black_box(1));
+
+ check!(Option<T>, VAL1.checked_add(black_box(1)));
+ check!(Option<T>, VAL2.checked_add(black_box(1)));
+ check!(Option<T>, VAL2.checked_add(VAL2 + black_box(1)));
+ check!(Option<T>, VAL3.checked_add(T::MAX));
+ check!(Option<T>, VAL3.checked_add(T::MIN));
+
+ check!(T, VAL1.wrapping_add(black_box(1)));
+ check!(T, VAL2.wrapping_add(black_box(1)));
+ check!(T, VAL2.wrapping_add(VAL2 + black_box(1)));
+ check!(T, VAL3.wrapping_add(T::MAX));
+ check!(T, VAL3.wrapping_add(T::MIN));
+
+ check!((T, bool), VAL1.overflowing_add(black_box(1)));
+ check!((T, bool), VAL2.overflowing_add(black_box(1)));
+ check!((T, bool), VAL2.overflowing_add(VAL2 + black_box(1)));
+ check!((T, bool), VAL3.overflowing_add(T::MAX));
+ check!((T, bool), VAL3.overflowing_add(T::MIN));
+
+ check!(T, VAL1.saturating_add(black_box(1)));
+ check!(T, VAL2.saturating_add(black_box(1)));
+ check!(T, VAL2.saturating_add(VAL2 + black_box(1)));
+ check!(T, VAL3.saturating_add(T::MAX));
+ check!(T, VAL3.saturating_add(T::MIN));
+
+ // Subtraction
+ check!(T, VAL1 - black_box(1));
+ check!(T, VAL2 - black_box(1));
+ check!(T, VAL3 - black_box(1));
+
+ check!(Option<T>, VAL1.checked_sub(black_box(1)));
+ check!(Option<T>, VAL2.checked_sub(black_box(1)));
+ check!(Option<T>, VAL2.checked_sub(VAL2 + black_box(1)));
+ check!(Option<T>, VAL3.checked_sub(T::MAX));
+ check!(Option<T>, VAL3.checked_sub(T::MIN));
+
+ check!(T, VAL1.wrapping_sub(black_box(1)));
+ check!(T, VAL2.wrapping_sub(black_box(1)));
+ check!(T, VAL2.wrapping_sub(VAL2 + black_box(1)));
+ check!(T, VAL3.wrapping_sub(T::MAX));
+ check!(T, VAL3.wrapping_sub(T::MIN));
+
+ check!((T, bool), VAL1.overflowing_sub(black_box(1)));
+ check!((T, bool), VAL2.overflowing_sub(black_box(1)));
+ check!((T, bool), VAL2.overflowing_sub(VAL2 + black_box(1)));
+ check!((T, bool), VAL3.overflowing_sub(T::MAX));
+ check!((T, bool), VAL3.overflowing_sub(T::MIN));
+
+ check!(T, VAL1.saturating_sub(black_box(1)));
+ check!(T, VAL2.saturating_sub(black_box(1)));
+ check!(T, VAL2.saturating_sub(VAL2 + black_box(1)));
+ check!(T, VAL3.saturating_sub(T::MAX));
+ check!(T, VAL3.saturating_sub(T::MIN));
+
+ // Multiplication
+ check!(T, VAL1 * black_box(2));
+ check!(T, VAL1 * (black_box(1) + VAL2));
+ check!(T, VAL2 * black_box(2));
+ check!(T, VAL2 * (black_box(1) + VAL2));
+ check!(T, VAL3 * black_box(1));
+ check!(T, VAL4 * black_box(2));
+ check!(T, VAL5 * black_box(2));
+
+ check!(Option<T>, VAL1.checked_mul(black_box(2)));
+ check!(Option<T>, VAL1.checked_mul(black_box(1) + VAL2));
+ check!(Option<T>, VAL3.checked_mul(VAL3));
+ check!(Option<T>, VAL4.checked_mul(black_box(2)));
+ check!(Option<T>, VAL5.checked_mul(black_box(2)));
+
+ check!(T, VAL1.wrapping_mul(black_box(2)));
+ check!(T, VAL1.wrapping_mul((black_box(1) + VAL2)));
+ check!(T, VAL3.wrapping_mul(VAL3));
+ check!(T, VAL4.wrapping_mul(black_box(2)));
+ check!(T, VAL5.wrapping_mul(black_box(2)));
+
+ check!((T, bool), VAL1.overflowing_mul(black_box(2)));
+ check!((T, bool), VAL1.overflowing_mul(black_box(1) + VAL2));
+ check!((T, bool), VAL3.overflowing_mul(VAL3));
+ check!((T, bool), VAL4.overflowing_mul(black_box(2)));
+ check!((T, bool), VAL5.overflowing_mul(black_box(2)));
+
+ check!(T, VAL1.saturating_mul(black_box(2)));
+ check!(T, VAL1.saturating_mul(black_box(1) + VAL2));
+ check!(T, VAL3.saturating_mul(VAL3));
+ check!(T, VAL4.saturating_mul(black_box(2)));
+ check!(T, VAL5.saturating_mul(black_box(2)));
+
+ // Division.
+ check!(T, VAL1 / black_box(2));
+ check!(T, VAL1 / black_box(3));
+
+ check!(T, VAL2 / black_box(2));
+ check!(T, VAL2 / black_box(3));
+
+ check!(T, VAL3 / black_box(2));
+ check!(T, VAL3 / black_box(3));
+ check!(T, VAL3 / (black_box(1) + VAL4));
+ check!(T, VAL3 / (black_box(1) + VAL2));
+
+ check!(T, VAL4 / black_box(2));
+ check!(T, VAL4 / black_box(3));
+
+ check!(Option<T>, VAL1.checked_div(black_box(2)));
+ check!(Option<T>, VAL1.checked_div(black_box(1) + VAL2));
+ check!(Option<T>, VAL3.checked_div(VAL3));
+ check!(Option<T>, VAL4.checked_div(black_box(2)));
+ check!(Option<T>, VAL5.checked_div(black_box(2)));
+ check!(Option<T>, (T::MIN).checked_div(black_box(0 as T).wrapping_sub(1)));
+ check!(Option<T>, VAL5.checked_div(black_box(0))); // var5 / 0
+
+ check!(T, VAL1.wrapping_div(black_box(2)));
+ check!(T, VAL1.wrapping_div(black_box(1) + VAL2));
+ check!(T, VAL3.wrapping_div(VAL3));
+ check!(T, VAL4.wrapping_div(black_box(2)));
+ check!(T, VAL5.wrapping_div(black_box(2)));
+ check!(T, (T::MIN).wrapping_div(black_box(0 as T).wrapping_sub(1)));
+
+ check!((T, bool), VAL1.overflowing_div(black_box(2)));
+ check!((T, bool), VAL1.overflowing_div(black_box(1) + VAL2));
+ check!((T, bool), VAL3.overflowing_div(VAL3));
+ check!((T, bool), VAL4.overflowing_div(black_box(2)));
+ check!((T, bool), VAL5.overflowing_div(black_box(2)));
+ check!((T, bool), (T::MIN).overflowing_div(black_box(0 as T).wrapping_sub(1)));
+
+ check!(T, VAL1.saturating_div(black_box(2)));
+ check!(T, VAL1.saturating_div((black_box(1) + VAL2)));
+ check!(T, VAL3.saturating_div(VAL3));
+ check!(T, VAL4.saturating_div(black_box(2)));
+ check!(T, VAL5.saturating_div(black_box(2)));
+ check!(T, (T::MIN).saturating_div((0 as T).wrapping_sub(black_box(1))));
+ };
+ }
+
+ {
+ type T = u32;
+ const VAL1: T = 14162_u32;
+ const VAL2: T = 14556_u32;
+ const VAL3: T = 323656954_u32;
+ const VAL4: T = 2023651954_u32;
+ const VAL5: T = 1323651954_u32;
+ check_ops32!();
+ }
+
+ {
+ type T = i32;
+ const VAL1: T = 13456_i32;
+ const VAL2: T = 10475_i32;
+ const VAL3: T = 923653954_i32;
+ const VAL4: T = 993198738_i32;
+ const VAL5: T = 1023653954_i32;
+ check_ops32!();
+ }
+
+ {
+ type T = u64;
+ const VAL1: T = 134217856_u64;
+ const VAL2: T = 104753732_u64;
+ const VAL3: T = 12323651988970863954_u64;
+ const VAL4: T = 7323651988970863954_u64;
+ const VAL5: T = 8323651988970863954_u64;
+ check_ops64!();
+ }
+
+ {
+ type T = i64;
+ const VAL1: T = 134217856_i64;
+ const VAL2: T = 104753732_i64;
+ const VAL3: T = 6323651988970863954_i64;
+ const VAL4: T = 2323651988970863954_i64;
+ const VAL5: T = 3323651988970863954_i64;
+ check_ops64!();
+ }
+
+ {
+ type T = u128;
+ const VAL1: T = 134217856_u128;
+ const VAL2: T = 10475372733397991552_u128;
+ const VAL3: T = 193236519889708027473620326106273939584_u128;
+ const VAL4: T = 123236519889708027473620326106273939584_u128;
+ const VAL5: T = 153236519889708027473620326106273939584_u128;
+ check_ops128!();
+ }
+ {
+ type T = i128;
+ const VAL1: T = 134217856_i128;
+ const VAL2: T = 10475372733397991552_i128;
+ const VAL3: T = 83236519889708027473620326106273939584_i128;
+ const VAL4: T = 63236519889708027473620326106273939584_i128;
+ const VAL5: T = 73236519889708027473620326106273939584_i128;
+ check_ops128!();
+ }
+
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs b/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs
new file mode 100644
index 000000000..ea2c5add9
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/int_overflow.rs
@@ -0,0 +1,140 @@
+// Compiler:
+//
+// Run-time:
+// stdout: Success
+// status: signal
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn fflush(stream: *mut i32) -> i32;
+
+ pub static stdout: *mut i32;
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ // Panicking is expected iff overflow checking is enabled.
+ #[cfg(debug_assertions)]
+ libc::puts("Success\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let int = 9223372036854775807isize;
+ let int = int + argc; // overflow
+
+ // If overflow checking is disabled, we should reach here.
+ #[cfg(not(debug_assertions))]
+ unsafe {
+ libc::puts("Success\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+
+ int
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs b/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs
new file mode 100644
index 000000000..52de20021
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/mut_ref.rs
@@ -0,0 +1,165 @@
+
+// Compiler:
+//
+// Run-time:
+// stdout: 2
+// 7
+// 6
+// 11
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, track_caller)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn puts(s: *const u8) -> i32;
+ pub fn fflush(stream: *mut i32) -> i32;
+ pub fn printf(format: *const i8, ...) -> i32;
+
+ pub static stdout: *mut i32;
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+ field: isize,
+}
+
+fn test(num: isize) -> Test {
+ Test {
+ field: num + 1,
+ }
+}
+
+fn update_num(num: &mut isize) {
+ *num = *num + 5;
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let mut test = test(argc);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+ }
+ update_num(&mut test.field);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+ }
+
+ update_num(&mut argc);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+ }
+
+ let refe = &mut argc;
+ *refe = *refe + 5;
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, argc);
+ }
+
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/operations.rs b/compiler/rustc_codegen_gcc/tests/run/operations.rs
new file mode 100644
index 000000000..e078b37b4
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/operations.rs
@@ -0,0 +1,221 @@
+// Compiler:
+//
+// Run-time:
+// stdout: 41
+// 39
+// 10
+
+#![allow(unused_attributes)]
+#![feature(auto_traits, lang_items, no_core, start, intrinsics, arbitrary_self_types)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for *mut i32 {}
+impl Copy for usize {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+impl Copy for i32 {}
+
+#[lang = "deref"]
+pub trait Deref {
+ type Target: ?Sized;
+
+ fn deref(&self) -> &Self::Target;
+}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ pub fn fflush(stream: *mut i32) -> i32;
+
+ pub static stdout: *mut i32;
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ libc::fflush(libc::stdout);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+#[lang = "mul"]
+pub trait Mul<RHS = Self> {
+ type Output;
+
+ #[must_use]
+ fn mul(self, rhs: RHS) -> Self::Output;
+}
+
+impl Mul for u8 {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for usize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+impl Mul for isize {
+ type Output = Self;
+
+ fn mul(self, rhs: Self) -> Self::Output {
+ self * rhs
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 + argc);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 40 - argc);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, 10 * argc);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs
new file mode 100644
index 000000000..6ac099ea1
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs
@@ -0,0 +1,222 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 1
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u8 {}
+impl Copy for i8 {}
+impl Copy for i16 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ pub fn puts(s: *const u8) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic"]
+#[track_caller]
+#[no_mangle]
+pub fn panic(_msg: &str) -> ! {
+ unsafe {
+ libc::puts("Panicking\0" as *const str as *const u8);
+ intrinsics::abort();
+ }
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+#[lang = "add"]
+trait Add<RHS = Self> {
+ type Output;
+
+ fn add(self, rhs: RHS) -> Self::Output;
+}
+
+impl Add for u8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i8 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for i32 {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for usize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+impl Add for isize {
+ type Output = Self;
+
+ fn add(self, rhs: Self) -> Self {
+ self + rhs
+ }
+}
+
+#[lang = "sub"]
+pub trait Sub<RHS = Self> {
+ type Output;
+
+ fn sub(self, rhs: RHS) -> Self::Output;
+}
+
+impl Sub for usize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for isize {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for u8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i8 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+impl Sub for i16 {
+ type Output = Self;
+
+ fn sub(self, rhs: Self) -> Self {
+ self - rhs
+ }
+}
+
+
+/*
+ * Code
+ */
+
+static mut ONE: usize = 1;
+
+fn make_array() -> [u8; 3] {
+ [42, 10, 5]
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ let ptr = ONE as *mut usize;
+ let value = ptr as usize;
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, value);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs b/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs
new file mode 100644
index 000000000..6fa10dca0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs
@@ -0,0 +1,72 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 10
+// 10
+// 42
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+#[lang = "copy"]
+pub unsafe trait Copy {}
+
+unsafe impl Copy for bool {}
+unsafe impl Copy for u8 {}
+unsafe impl Copy for u16 {}
+unsafe impl Copy for u32 {}
+unsafe impl Copy for u64 {}
+unsafe impl Copy for usize {}
+unsafe impl Copy for i8 {}
+unsafe impl Copy for i16 {}
+unsafe impl Copy for i32 {}
+unsafe impl Copy for isize {}
+unsafe impl Copy for f32 {}
+unsafe impl Copy for char {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+/*
+ * Code
+ */
+
+fn int_cast(a: u16, b: i16) -> (u8, u16, u32, usize, i8, i16, i32, isize, u8, u32) {
+ (
+ a as u8, a as u16, a as u32, a as usize, a as i8, a as i16, a as i32, a as isize, b as u8,
+ b as u32,
+ )
+}
+
+#[start]
+fn main(argc: isize, _argv: *const *const u8) -> isize {
+ let (a, b, c, d, e, f, g, h, i, j) = int_cast(10, 42);
+ unsafe {
+ libc::printf(b"%d\n\0" as *const u8 as *const i8, c);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, d);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, j);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/slice.rs b/compiler/rustc_codegen_gcc/tests/run/slice.rs
new file mode 100644
index 000000000..ad9258ed0
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/slice.rs
@@ -0,0 +1,128 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 5
+
+#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+impl Copy for usize {}
+impl Copy for i32 {}
+impl Copy for u32 {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+#[lang = "index"]
+pub trait Index<Idx: ?Sized> {
+ type Output: ?Sized;
+ fn index(&self, index: Idx) -> &Self::Output;
+}
+
+impl<T> Index<usize> for [T; 3] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+impl<T> Index<usize> for [T] {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self[index]
+ }
+}
+
+#[lang = "unsize"]
+pub trait Unsize<T: ?Sized> {}
+
+#[lang = "coerce_unsized"]
+pub trait CoerceUnsized<T> {}
+
+impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {}
+impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {}
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+#[lang = "panic_location"]
+struct PanicLocation {
+ file: &'static str,
+ line: u32,
+ column: u32,
+}
+
+#[lang = "panic_bounds_check"]
+#[track_caller]
+#[no_mangle]
+fn panic_bounds_check(index: usize, len: usize) -> ! {
+ unsafe {
+ libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index);
+ intrinsics::abort();
+ }
+}
+
+mod intrinsics {
+ use super::Sized;
+
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+/*
+ * Code
+ */
+
+static mut TWO: usize = 2;
+
+fn index_slice(s: &[u32]) -> u32 {
+ unsafe {
+ s[TWO]
+ }
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let array = [42, 7, 5];
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, index_slice(&array));
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/static.rs b/compiler/rustc_codegen_gcc/tests/run/static.rs
new file mode 100644
index 000000000..294add968
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/static.rs
@@ -0,0 +1,112 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 10
+// 14
+// 1
+// 12
+// 12
+// 1
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "destruct"]
+pub trait Destruct {}
+
+#[lang = "drop"]
+pub trait Drop {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod intrinsics {
+ use super::Sized;
+
+ extern "rust-intrinsic" {
+ pub fn abort() -> !;
+ }
+}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+#[lang = "structural_peq"]
+pub trait StructuralPartialEq {}
+
+#[lang = "structural_teq"]
+pub trait StructuralEq {}
+
+#[lang = "drop_in_place"]
+#[allow(unconditional_recursion)]
+pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
+ // Code here does not matter - this is replaced by the
+ // real drop glue by the compiler.
+ drop_in_place(to_drop);
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+ field: isize,
+}
+
+struct WithRef {
+ refe: &'static Test,
+}
+
+static mut CONSTANT: isize = 10;
+
+static mut TEST: Test = Test {
+ field: 12,
+};
+
+static mut TEST2: Test = Test {
+ field: 14,
+};
+
+static mut WITH_REF: WithRef = WithRef {
+ refe: unsafe { &TEST },
+};
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, CONSTANT);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field);
+ TEST2.field = argc;
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST2.field);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field);
+ WITH_REF.refe = &TEST2;
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, TEST.field);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, WITH_REF.refe.field);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/structs.rs b/compiler/rustc_codegen_gcc/tests/run/structs.rs
new file mode 100644
index 000000000..6c8884855
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/structs.rs
@@ -0,0 +1,70 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 1
+// 2
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+/*
+ * Code
+ */
+
+struct Test {
+ field: isize,
+}
+
+struct Two {
+ two: isize,
+}
+
+fn one() -> isize {
+ 1
+}
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let test = Test {
+ field: one(),
+ };
+ let two = Two {
+ two: 2,
+ };
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.field);
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, two.two);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tests/run/tuple.rs b/compiler/rustc_codegen_gcc/tests/run/tuple.rs
new file mode 100644
index 000000000..0b670bf26
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tests/run/tuple.rs
@@ -0,0 +1,51 @@
+// Compiler:
+//
+// Run-time:
+// status: 0
+// stdout: 3
+
+#![feature(auto_traits, lang_items, no_core, start, intrinsics)]
+
+#![no_std]
+#![no_core]
+
+/*
+ * Core
+ */
+
+// Because we don't have core yet.
+#[lang = "sized"]
+pub trait Sized {}
+
+#[lang = "copy"]
+trait Copy {
+}
+
+impl Copy for isize {}
+
+#[lang = "receiver"]
+trait Receiver {
+}
+
+#[lang = "freeze"]
+pub(crate) unsafe auto trait Freeze {}
+
+mod libc {
+ #[link(name = "c")]
+ extern "C" {
+ pub fn printf(format: *const i8, ...) -> i32;
+ }
+}
+
+/*
+ * Code
+ */
+
+#[start]
+fn main(mut argc: isize, _argv: *const *const u8) -> isize {
+ let test: (isize, isize, isize) = (3, 1, 4);
+ unsafe {
+ libc::printf(b"%ld\n\0" as *const u8 as *const i8, test.0);
+ }
+ 0
+}
diff --git a/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py b/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py
new file mode 100644
index 000000000..849c6e9c9
--- /dev/null
+++ b/compiler/rustc_codegen_gcc/tools/generate_intrinsics.py
@@ -0,0 +1,238 @@
+import json
+import os
+import re
+import sys
+import subprocess
+from os import walk
+
+
+def run_command(command, cwd=None):
+ p = subprocess.Popen(command, cwd=cwd)
+ if p.wait() != 0:
+ print("command `{}` failed...".format(" ".join(command)))
+ sys.exit(1)
+
+
+def clone_repository(repo_name, path, repo_url, sub_path=None):
+ if os.path.exists(path):
+ while True:
+ choice = input("There is already a `{}` folder, do you want to update it? [y/N]".format(path))
+ if choice == "" or choice.lower() == "n":
+ print("Skipping repository update.")
+ return
+ elif choice.lower() == "y":
+ print("Updating repository...")
+ run_command(["git", "pull", "origin"], cwd=path)
+ return
+ else:
+ print("Didn't understand answer...")
+ print("Cloning {} repository...".format(repo_name))
+ if sub_path is None:
+ run_command(["git", "clone", repo_url, "--depth", "1", path])
+ else:
+ run_command(["git", "clone", repo_url, "--filter=tree:0", "--no-checkout", path])
+ run_command(["git", "sparse-checkout", "init"], cwd=path)
+ run_command(["git", "sparse-checkout", "set", "add", sub_path], cwd=path)
+ run_command(["git", "checkout"], cwd=path)
+
+
+def append_intrinsic(array, intrinsic_name, translation):
+ array.append((intrinsic_name, translation))
+
+
+def extract_instrinsics(intrinsics, file):
+ print("Extracting intrinsics from `{}`...".format(file))
+ with open(file, "r", encoding="utf8") as f:
+ content = f.read()
+
+ lines = content.splitlines()
+ pos = 0
+ current_arch = None
+ while pos < len(lines):
+ line = lines[pos].strip()
+ if line.startswith("let TargetPrefix ="):
+ current_arch = line.split('"')[1].strip()
+ if len(current_arch) == 0:
+ current_arch = None
+ elif current_arch is None:
+ pass
+ elif line == "}":
+ current_arch = None
+ elif line.startswith("def "):
+ content = ""
+ while not content.endswith(";") and not content.endswith("}") and pos < len(lines):
+ line = lines[pos].split(" // ")[0].strip()
+ content += line
+ pos += 1
+ entries = re.findall('GCCBuiltin<"(\\w+)">', content)
+ if len(entries) > 0:
+ intrinsic = content.split("def ")[1].strip().split(":")[0].strip()
+ intrinsic = intrinsic.split("_")
+ if len(intrinsic) < 2 or intrinsic[0] != "int":
+ continue
+ intrinsic[0] = "llvm"
+ intrinsic = ".".join(intrinsic)
+ if current_arch not in intrinsics:
+ intrinsics[current_arch] = []
+ for entry in entries:
+ append_intrinsic(intrinsics[current_arch], intrinsic, entry)
+ continue
+ pos += 1
+ continue
+ print("Done!")
+
+
+def extract_instrinsics_from_llvm(llvm_path, intrinsics):
+ files = []
+ intrinsics_path = os.path.join(llvm_path, "llvm/include/llvm/IR")
+ for (dirpath, dirnames, filenames) in walk(intrinsics_path):
+ files.extend([os.path.join(intrinsics_path, f) for f in filenames if f.endswith(".td")])
+
+ for file in files:
+ extract_instrinsics(intrinsics, file)
+
+
+def append_translation(json_data, p, array):
+ it = json_data["index"][p]
+ content = it["docs"].split('`')
+ if len(content) != 5:
+ return
+ append_intrinsic(array, content[1], content[3])
+
+
+def extract_instrinsics_from_llvmint(llvmint, intrinsics):
+ archs = [
+ "AMDGPU",
+ "aarch64",
+ "arm",
+ "cuda",
+ "hexagon",
+ "mips",
+ "nvvm",
+ "ppc",
+ "ptx",
+ "x86",
+ "xcore",
+ ]
+
+ json_file = os.path.join(llvmint, "target/doc/llvmint.json")
+ # We need to regenerate the documentation!
+ run_command(
+ ["cargo", "rustdoc", "--", "-Zunstable-options", "--output-format", "json"],
+ cwd=llvmint,
+ )
+ with open(json_file, "r", encoding="utf8") as f:
+ json_data = json.loads(f.read())
+ for p in json_data["paths"]:
+ it = json_data["paths"][p]
+ if it["crate_id"] != 0:
+ # This is from an external crate.
+ continue
+ if it["kind"] != "function":
+ # We're only looking for functions.
+ continue
+ # if len(it["path"]) == 2:
+ # # This is a "general" intrinsic, not bound to a specific arch.
+ # append_translation(json_data, p, general)
+ # continue
+ if len(it["path"]) != 3 or it["path"][1] not in archs:
+ continue
+ arch = it["path"][1]
+ if arch not in intrinsics:
+ intrinsics[arch] = []
+ append_translation(json_data, p, intrinsics[arch])
+
+
+def fill_intrinsics(intrinsics, from_intrinsics, all_intrinsics):
+ for arch in from_intrinsics:
+ if arch not in intrinsics:
+ intrinsics[arch] = []
+ for entry in from_intrinsics[arch]:
+ if entry[0] in all_intrinsics:
+ if all_intrinsics[entry[0]] == entry[1]:
+ # This is a "full" duplicate, both the LLVM instruction and the GCC
+ # translation are the same.
+ continue
+ intrinsics[arch].append((entry[0], entry[1], True))
+ else:
+ intrinsics[arch].append((entry[0], entry[1], False))
+ all_intrinsics[entry[0]] = entry[1]
+
+
+def update_intrinsics(llvm_path, llvmint, llvmint2):
+ intrinsics_llvm = {}
+ intrinsics_llvmint = {}
+ all_intrinsics = {}
+
+ extract_instrinsics_from_llvm(llvm_path, intrinsics_llvm)
+ extract_instrinsics_from_llvmint(llvmint, intrinsics_llvmint)
+ extract_instrinsics_from_llvmint(llvmint2, intrinsics_llvmint)
+
+ intrinsics = {}
+ # We give priority to translations from LLVM over the ones from llvmint.
+ fill_intrinsics(intrinsics, intrinsics_llvm, all_intrinsics)
+ fill_intrinsics(intrinsics, intrinsics_llvmint, all_intrinsics)
+
+ archs = [arch for arch in intrinsics]
+ archs.sort()
+
+ output_file = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "../src/intrinsic/archs.rs",
+ )
+ print("Updating content of `{}`...".format(output_file))
+ with open(output_file, "w", encoding="utf8") as out:
+ out.write("// File generated by `rustc_codegen_gcc/tools/generate_intrinsics.py`\n")
+ out.write("// DO NOT EDIT IT!\n")
+ out.write("match name {\n")
+ for arch in archs:
+ if len(intrinsics[arch]) == 0:
+ continue
+ intrinsics[arch].sort(key=lambda x: (x[0], x[2]))
+ out.write(' // {}\n'.format(arch))
+ for entry in intrinsics[arch]:
+ if entry[2] == True: # if it is a duplicate
+ out.write(' // [DUPLICATE]: "{}" => "{}",\n'.format(entry[0], entry[1]))
+ else:
+ out.write(' "{}" => "{}",\n'.format(entry[0], entry[1]))
+ out.write(' _ => unimplemented!("***** unsupported LLVM intrinsic {}", name),\n')
+ out.write("}\n")
+ print("Done!")
+
+
+def main():
+ llvm_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "llvm-project",
+ )
+ llvmint_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "llvmint",
+ )
+ llvmint2_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)),
+ "llvmint-2",
+ )
+
+ # First, we clone the LLVM repository if it's not already here.
+ clone_repository(
+ "llvm-project",
+ llvm_path,
+ "https://github.com/llvm/llvm-project",
+ sub_path="llvm/include/llvm/IR",
+ )
+ clone_repository(
+ "llvmint",
+ llvmint_path,
+ "https://github.com/GuillaumeGomez/llvmint",
+ )
+ clone_repository(
+ "llvmint2",
+ llvmint2_path,
+ "https://github.com/antoyo/llvmint",
+ )
+ update_intrinsics(llvm_path, llvmint_path, llvmint2_path)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/compiler/rustc_codegen_llvm/Cargo.toml b/compiler/rustc_codegen_llvm/Cargo.toml
new file mode 100644
index 000000000..f9a5463ef
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/Cargo.toml
@@ -0,0 +1,36 @@
+[package]
+name = "rustc_codegen_llvm"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+test = false
+doctest = false
+
+[dependencies]
+bitflags = "1.0"
+cstr = "0.2"
+libc = "0.2"
+libloading = "0.7.1"
+measureme = "10.0.0"
+tracing = "0.1"
+rustc_middle = { path = "../rustc_middle" }
+rustc-demangle = "0.1.21"
+rustc_attr = { path = "../rustc_attr" }
+rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_fs_util = { path = "../rustc_fs_util" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_llvm = { path = "../rustc_llvm" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_metadata = { path = "../rustc_metadata" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_session = { path = "../rustc_session" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
+rustc_target = { path = "../rustc_target" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_codegen_llvm/README.md b/compiler/rustc_codegen_llvm/README.md
new file mode 100644
index 000000000..afec60d01
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/README.md
@@ -0,0 +1,7 @@
+The `codegen` crate contains the code to convert from MIR into LLVM IR,
+and then from LLVM IR into machine code. In general it contains code
+that runs towards the end of the compilation process.
+
+For more information about how codegen works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/backend/codegen.html
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
new file mode 100644
index 000000000..9eb3574e7
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -0,0 +1,599 @@
+use crate::attributes;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm::{self, Attribute, AttributePlace};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::MemFlags;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::LayoutOf;
+pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
+use rustc_middle::ty::Ty;
+use rustc_session::config;
+use rustc_target::abi::call::ArgAbi;
+pub use rustc_target::abi::call::*;
+use rustc_target::abi::{self, HasDataLayout, Int};
+pub use rustc_target::spec::abi::Abi;
+
+use libc::c_uint;
+use smallvec::SmallVec;
+
+pub trait ArgAttributesExt {
+ fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value);
+ fn apply_attrs_to_callsite(
+ &self,
+ idx: AttributePlace,
+ cx: &CodegenCx<'_, '_>,
+ callsite: &Value,
+ );
+}
+
+fn should_use_mutable_noalias(cx: &CodegenCx<'_, '_>) -> bool {
+ // LLVM prior to version 12 had known miscompiles in the presence of
+ // noalias attributes (see #54878), but we don't support earlier
+ // versions at all anymore. We now enable mutable noalias by default.
+ cx.tcx.sess.opts.unstable_opts.mutable_noalias.unwrap_or(true)
+}
+
+const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] =
+ [(ArgAttribute::InReg, llvm::AttributeKind::InReg)];
+
+const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [
+ (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias),
+ (ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture),
+ (ArgAttribute::NonNull, llvm::AttributeKind::NonNull),
+ (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly),
+ (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef),
+];
+
+fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> {
+ let mut regular = this.regular;
+
+ let mut attrs = SmallVec::new();
+
+ // ABI-affecting attributes must always be applied
+ for (attr, llattr) in ABI_AFFECTING_ATTRIBUTES {
+ if regular.contains(attr) {
+ attrs.push(llattr.create_attr(cx.llcx));
+ }
+ }
+ if let Some(align) = this.pointee_align {
+ attrs.push(llvm::CreateAlignmentAttr(cx.llcx, align.bytes()));
+ }
+ match this.arg_ext {
+ ArgExtension::None => {}
+ ArgExtension::Zext => attrs.push(llvm::AttributeKind::ZExt.create_attr(cx.llcx)),
+ ArgExtension::Sext => attrs.push(llvm::AttributeKind::SExt.create_attr(cx.llcx)),
+ }
+
+ // Only apply remaining attributes when optimizing
+ if cx.sess().opts.optimize != config::OptLevel::No {
+ let deref = this.pointee_size.bytes();
+ if deref != 0 {
+ if regular.contains(ArgAttribute::NonNull) {
+ attrs.push(llvm::CreateDereferenceableAttr(cx.llcx, deref));
+ } else {
+ attrs.push(llvm::CreateDereferenceableOrNullAttr(cx.llcx, deref));
+ }
+ regular -= ArgAttribute::NonNull;
+ }
+ for (attr, llattr) in OPTIMIZATION_ATTRIBUTES {
+ if regular.contains(attr) {
+ attrs.push(llattr.create_attr(cx.llcx));
+ }
+ }
+ if regular.contains(ArgAttribute::NoAliasMutRef) && should_use_mutable_noalias(cx) {
+ attrs.push(llvm::AttributeKind::NoAlias.create_attr(cx.llcx));
+ }
+ }
+
+ attrs
+}
+
+impl ArgAttributesExt for ArgAttributes {
+ fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value) {
+ let attrs = get_attrs(self, cx);
+ attributes::apply_to_llfn(llfn, idx, &attrs);
+ }
+
+ fn apply_attrs_to_callsite(
+ &self,
+ idx: AttributePlace,
+ cx: &CodegenCx<'_, '_>,
+ callsite: &Value,
+ ) {
+ let attrs = get_attrs(self, cx);
+ attributes::apply_to_callsite(callsite, idx, &attrs);
+ }
+}
+
+pub trait LlvmType {
+ fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
+}
+
+impl LlvmType for Reg {
+ fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
+ match self.kind {
+ RegKind::Integer => cx.type_ix(self.size.bits()),
+ RegKind::Float => match self.size.bits() {
+ 32 => cx.type_f32(),
+ 64 => cx.type_f64(),
+ _ => bug!("unsupported float: {:?}", self),
+ },
+ RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
+ }
+ }
+}
+
+impl LlvmType for CastTarget {
+ fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
+ let rest_ll_unit = self.rest.unit.llvm_type(cx);
+ let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
+ (0, 0)
+ } else {
+ (
+ self.rest.total.bytes() / self.rest.unit.size.bytes(),
+ self.rest.total.bytes() % self.rest.unit.size.bytes(),
+ )
+ };
+
+ if self.prefix.iter().all(|x| x.is_none()) {
+ // Simplify to a single unit when there is no prefix and size <= unit size
+ if self.rest.total <= self.rest.unit.size {
+ return rest_ll_unit;
+ }
+
+ // Simplify to array when all chunks are the same size and type
+ if rem_bytes == 0 {
+ return cx.type_array(rest_ll_unit, rest_count);
+ }
+ }
+
+ // Create list of fields in the main structure
+ let mut args: Vec<_> = self
+ .prefix
+ .iter()
+ .flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx)))
+ .chain((0..rest_count).map(|_| rest_ll_unit))
+ .collect();
+
+ // Append final integer
+ if rem_bytes != 0 {
+ // Only integers can be really split further.
+ assert_eq!(self.rest.unit.kind, RegKind::Integer);
+ args.push(cx.type_ix(rem_bytes * 8));
+ }
+
+ cx.type_struct(&args, false)
+ }
+}
+
+pub trait ArgAbiExt<'ll, 'tcx> {
+ fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn store(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ );
+ fn store_fn_arg(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ );
+}
+
+impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
+ /// Gets the LLVM type for a place of the original Rust type of
+ /// this argument/return, i.e., the result of `type_of::type_of`.
+ fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ self.layout.llvm_type(cx)
+ }
+
+ /// Stores a direct/indirect value described by this ArgAbi into a
+ /// place for the original Rust type of this argument/return.
+ /// Can be used for both storing formal arguments into Rust variables
+ /// or results of call/invoke instructions into their destinations.
+ fn store(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ if self.is_ignore() {
+ return;
+ }
+ if self.is_sized_indirect() {
+ OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
+ } else if self.is_unsized_indirect() {
+ bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
+ } else if let PassMode::Cast(cast) = self.mode {
+ // FIXME(eddyb): Figure out when the simpler Store is safe, clang
+ // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
+ let can_store_through_cast_ptr = false;
+ if can_store_through_cast_ptr {
+ let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
+ let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
+ bx.store(val, cast_dst, self.layout.align.abi);
+ } else {
+ // The actual return type is a struct, but the ABI
+ // adaptation code has cast it into some scalar type. The
+ // code that follows is the only reliable way I have
+ // found to do a transform like i64 -> {i32,i32}.
+ // Basically we dump the data onto the stack then memcpy it.
+ //
+ // Other approaches I tried:
+ // - Casting rust ret pointer to the foreign type and using Store
+ // is (a) unsafe if size of foreign type > size of rust type and
+ // (b) runs afoul of strict aliasing rules, yielding invalid
+ // assembly under -O (specifically, the store gets removed).
+ // - Truncating foreign type to correct integral type and then
+ // bitcasting to the struct type yields invalid cast errors.
+
+ // We instead thus allocate some scratch space...
+ let scratch_size = cast.size(bx);
+ let scratch_align = cast.align(bx);
+ let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
+ bx.lifetime_start(llscratch, scratch_size);
+
+ // ... where we first store the value...
+ bx.store(val, llscratch, scratch_align);
+
+ // ... and then memcpy it to the intended destination.
+ bx.memcpy(
+ dst.llval,
+ self.layout.align.abi,
+ llscratch,
+ scratch_align,
+ bx.const_usize(self.layout.size.bytes()),
+ MemFlags::empty(),
+ );
+
+ bx.lifetime_end(llscratch, scratch_size);
+ }
+ } else {
+ OperandValue::Immediate(val).store(bx, dst);
+ }
+ }
+
+ fn store_fn_arg(
+ &self,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ let mut next = || {
+ let val = llvm::get_param(bx.llfn(), *idx as c_uint);
+ *idx += 1;
+ val
+ };
+ match self.mode {
+ PassMode::Ignore => {}
+ PassMode::Pair(..) => {
+ OperandValue::Pair(next(), next()).store(bx, dst);
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
+ }
+ PassMode::Direct(_)
+ | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ }
+ | PassMode::Cast(_) => {
+ let next_arg = next();
+ self.store(bx, next_arg, dst);
+ }
+ }
+ }
+}
+
+impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+ fn store_fn_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, Self::Value>,
+ ) {
+ arg_abi.store_fn_arg(self, idx, dst)
+ }
+ fn store_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ val: &'ll Value,
+ dst: PlaceRef<'tcx, &'ll Value>,
+ ) {
+ arg_abi.store(self, val, dst)
+ }
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ arg_abi.memory_ty(self)
+ }
+}
+
+pub trait FnAbiLlvmExt<'ll, 'tcx> {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
+ fn llvm_cconv(&self) -> llvm::CallConv;
+ fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
+ fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value);
+}
+
+impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
+ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ // Ignore "extra" args from the call site for C variadic functions.
+ // Only the "fixed" args are part of the LLVM function signature.
+ let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args };
+
+ let args_capacity: usize = args.iter().map(|arg|
+ if arg.pad.is_some() { 1 } else { 0 } +
+ if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
+ ).sum();
+ let mut llargument_tys = Vec::with_capacity(
+ if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 } + args_capacity,
+ );
+
+ let llreturn_ty = match self.ret.mode {
+ PassMode::Ignore => cx.type_void(),
+ PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
+ PassMode::Cast(cast) => cast.llvm_type(cx),
+ PassMode::Indirect { .. } => {
+ llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+ cx.type_void()
+ }
+ };
+
+ for arg in args {
+ // add padding
+ if let Some(ty) = arg.pad {
+ llargument_tys.push(ty.llvm_type(cx));
+ }
+
+ let llarg_ty = match arg.mode {
+ PassMode::Ignore => continue,
+ PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
+ PassMode::Pair(..) => {
+ llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
+ llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
+ let ptr_layout = cx.layout_of(ptr_ty);
+ llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
+ llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
+ continue;
+ }
+ PassMode::Cast(cast) => cast.llvm_type(cx),
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ cx.type_ptr_to(arg.memory_ty(cx))
+ }
+ };
+ llargument_tys.push(llarg_ty);
+ }
+
+ if self.c_variadic {
+ cx.type_variadic_func(&llargument_tys, llreturn_ty)
+ } else {
+ cx.type_func(&llargument_tys, llreturn_ty)
+ }
+ }
+
+ fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
+ unsafe {
+ llvm::LLVMPointerType(
+ self.llvm_type(cx),
+ cx.data_layout().instruction_address_space.0 as c_uint,
+ )
+ }
+ }
+
+ fn llvm_cconv(&self) -> llvm::CallConv {
+ match self.conv {
+ Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv,
+ Conv::RustCold => llvm::ColdCallConv,
+ Conv::AmdGpuKernel => llvm::AmdGpuKernel,
+ Conv::AvrInterrupt => llvm::AvrInterrupt,
+ Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
+ Conv::ArmAapcs => llvm::ArmAapcsCallConv,
+ Conv::Msp430Intr => llvm::Msp430Intr,
+ Conv::PtxKernel => llvm::PtxKernel,
+ Conv::X86Fastcall => llvm::X86FastcallCallConv,
+ Conv::X86Intr => llvm::X86_Intr,
+ Conv::X86Stdcall => llvm::X86StdcallCallConv,
+ Conv::X86ThisCall => llvm::X86_ThisCall,
+ Conv::X86VectorCall => llvm::X86_VectorCall,
+ Conv::X86_64SysV => llvm::X86_64_SysV,
+ Conv::X86_64Win64 => llvm::X86_64_Win64,
+ }
+ }
+
+ fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
+ let mut func_attrs = SmallVec::<[_; 2]>::new();
+ if self.ret.layout.abi.is_uninhabited() {
+ func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx));
+ }
+ if !self.can_unwind {
+ func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
+ }
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
+
+ let mut i = 0;
+ let mut apply = |attrs: &ArgAttributes| {
+ attrs.apply_attrs_to_llfn(llvm::AttributePlace::Argument(i), cx, llfn);
+ i += 1;
+ i - 1
+ };
+ match self.ret.mode {
+ PassMode::Direct(ref attrs) => {
+ attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
+ }
+ PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
+ assert!(!on_stack);
+ let i = apply(attrs);
+ let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx));
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]);
+ }
+ PassMode::Cast(cast) => {
+ cast.attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
+ }
+ _ => {}
+ }
+ for arg in &self.args {
+ if arg.pad.is_some() {
+ apply(&ArgAttributes::new());
+ }
+ match arg.mode {
+ PassMode::Ignore => {}
+ PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
+ let i = apply(attrs);
+ let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx));
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]);
+ }
+ PassMode::Direct(ref attrs)
+ | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
+ apply(attrs);
+ }
+ PassMode::Indirect { ref attrs, extra_attrs: Some(ref extra_attrs), on_stack } => {
+ assert!(!on_stack);
+ apply(attrs);
+ apply(extra_attrs);
+ }
+ PassMode::Pair(ref a, ref b) => {
+ apply(a);
+ apply(b);
+ }
+ PassMode::Cast(cast) => {
+ apply(&cast.attrs);
+ }
+ }
+ }
+ }
+
+ fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) {
+ let mut func_attrs = SmallVec::<[_; 2]>::new();
+ if self.ret.layout.abi.is_uninhabited() {
+ func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx));
+ }
+ if !self.can_unwind {
+ func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(bx.cx.llcx));
+ }
+ attributes::apply_to_callsite(callsite, llvm::AttributePlace::Function, &{ func_attrs });
+
+ let mut i = 0;
+ let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| {
+ attrs.apply_attrs_to_callsite(llvm::AttributePlace::Argument(i), cx, callsite);
+ i += 1;
+ i - 1
+ };
+ match self.ret.mode {
+ PassMode::Direct(ref attrs) => {
+ attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite);
+ }
+ PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => {
+ assert!(!on_stack);
+ let i = apply(bx.cx, attrs);
+ let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx));
+ attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]);
+ }
+ PassMode::Cast(cast) => {
+ cast.attrs.apply_attrs_to_callsite(
+ llvm::AttributePlace::ReturnValue,
+ &bx.cx,
+ callsite,
+ );
+ }
+ _ => {}
+ }
+ if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
+ // If the value is a boolean, the range is 0..2 and that ultimately
+ // become 0..0 when the type becomes i1, which would be rejected
+ // by the LLVM verifier.
+ if let Int(..) = scalar.primitive() {
+ if !scalar.is_bool() && !scalar.is_always_valid(bx) {
+ bx.range_metadata(callsite, scalar.valid_range(bx));
+ }
+ }
+ }
+ for arg in &self.args {
+ if arg.pad.is_some() {
+ apply(bx.cx, &ArgAttributes::new());
+ }
+ match arg.mode {
+ PassMode::Ignore => {}
+ PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: true } => {
+ let i = apply(bx.cx, attrs);
+ let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx));
+ attributes::apply_to_callsite(
+ callsite,
+ llvm::AttributePlace::Argument(i),
+ &[byval],
+ );
+ }
+ PassMode::Direct(ref attrs)
+ | PassMode::Indirect { ref attrs, extra_attrs: None, on_stack: false } => {
+ apply(bx.cx, attrs);
+ }
+ PassMode::Indirect {
+ ref attrs,
+ extra_attrs: Some(ref extra_attrs),
+ on_stack: _,
+ } => {
+ apply(bx.cx, attrs);
+ apply(bx.cx, extra_attrs);
+ }
+ PassMode::Pair(ref a, ref b) => {
+ apply(bx.cx, a);
+ apply(bx.cx, b);
+ }
+ PassMode::Cast(cast) => {
+ apply(bx.cx, &cast.attrs);
+ }
+ }
+ }
+
+ let cconv = self.llvm_cconv();
+ if cconv != llvm::CCallConv {
+ llvm::SetInstructionCallConv(callsite, cconv);
+ }
+
+ if self.conv == Conv::CCmseNonSecureCall {
+ // This will probably get ignored on all targets but those supporting the TrustZone-M
+ // extension (thumbv8m targets).
+ let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
+ attributes::apply_to_callsite(
+ callsite,
+ llvm::AttributePlace::Function,
+ &[cmse_nonsecure_call],
+ );
+ }
+
+ // Some intrinsics require that an elementtype attribute (with the pointee type of a
+ // pointer argument) is added to the callsite.
+ let element_type_index = unsafe { llvm::LLVMRustGetElementTypeArgIndex(callsite) };
+ if element_type_index >= 0 {
+ let arg_ty = self.args[element_type_index as usize].layout.ty;
+ let pointee_ty = arg_ty.builtin_deref(true).expect("Must be pointer argument").ty;
+ let element_type_attr = unsafe {
+ llvm::LLVMRustCreateElementTypeAttr(bx.llcx, bx.layout_of(pointee_ty).llvm_type(bx))
+ };
+ attributes::apply_to_callsite(
+ callsite,
+ llvm::AttributePlace::Argument(element_type_index as u32),
+ &[element_type_attr],
+ );
+ }
+ }
+}
+
+impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+ fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
+ fn_abi.apply_attrs_callsite(self, callsite)
+ }
+
+ fn get_param(&mut self, index: usize) -> Self::Value {
+ llvm::get_param(self.llfn(), index as c_uint)
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs
new file mode 100644
index 000000000..72961ae88
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/allocator.rs
@@ -0,0 +1,157 @@
+use crate::attributes;
+use libc::c_uint;
+use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{DebugInfo, OomStrategy};
+use rustc_span::symbol::sym;
+
+use crate::debuginfo;
+use crate::llvm::{self, False, True};
+use crate::ModuleLlvm;
+
+pub(crate) unsafe fn codegen(
+ tcx: TyCtxt<'_>,
+ module_llvm: &mut ModuleLlvm,
+ module_name: &str,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+) {
+ let llcx = &*module_llvm.llcx;
+ let llmod = module_llvm.llmod();
+ let usize = match tcx.sess.target.pointer_width {
+ 16 => llvm::LLVMInt16TypeInContext(llcx),
+ 32 => llvm::LLVMInt32TypeInContext(llcx),
+ 64 => llvm::LLVMInt64TypeInContext(llcx),
+ tws => bug!("Unsupported target word size for int: {}", tws),
+ };
+ let i8 = llvm::LLVMInt8TypeInContext(llcx);
+ let i8p = llvm::LLVMPointerType(i8, 0);
+ let void = llvm::LLVMVoidTypeInContext(llcx);
+
+ for method in ALLOCATOR_METHODS {
+ let mut args = Vec::with_capacity(method.inputs.len());
+ for ty in method.inputs.iter() {
+ match *ty {
+ AllocatorTy::Layout => {
+ args.push(usize); // size
+ args.push(usize); // align
+ }
+ AllocatorTy::Ptr => args.push(i8p),
+ AllocatorTy::Usize => args.push(usize),
+
+ AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
+ }
+ }
+ let output = match method.output {
+ AllocatorTy::ResultPtr => Some(i8p),
+ AllocatorTy::Unit => None,
+
+ AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
+ panic!("invalid allocator output")
+ }
+ };
+ let ty = llvm::LLVMFunctionType(
+ output.unwrap_or(void),
+ args.as_ptr(),
+ args.len() as c_uint,
+ False,
+ );
+ let name = format!("__rust_{}", method.name);
+ let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ let uwtable = attributes::uwtable_attr(llcx);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
+ }
+
+ let callee = kind.fn_name(method.name);
+ let callee =
+ llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+ llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+ let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+ llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+ .collect::<Vec<_>>();
+ let ret = llvm::LLVMRustBuildCall(
+ llbuilder,
+ ty,
+ callee,
+ args.as_ptr(),
+ args.len() as c_uint,
+ None,
+ );
+ llvm::LLVMSetTailCall(ret, True);
+ if output.is_some() {
+ llvm::LLVMBuildRet(llbuilder, ret);
+ } else {
+ llvm::LLVMBuildRetVoid(llbuilder);
+ }
+ llvm::LLVMDisposeBuilder(llbuilder);
+ }
+
+ // rust alloc error handler
+ let args = [usize, usize]; // size, align
+
+ let ty = llvm::LLVMFunctionType(void, args.as_ptr(), args.len() as c_uint, False);
+ let name = "__rust_alloc_error_handler";
+ let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
+ // -> ! DIFlagNoReturn
+ let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]);
+
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ if tcx.sess.must_emit_unwind_tables() {
+ let uwtable = attributes::uwtable_attr(llcx);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]);
+ }
+
+ let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default };
+ let callee = kind.fn_name(sym::oom);
+ let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
+ // -> ! DIFlagNoReturn
+ attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]);
+ llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
+
+ let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
+
+ let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
+ llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
+ let args = args
+ .iter()
+ .enumerate()
+ .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
+ .collect::<Vec<_>>();
+ let ret =
+ llvm::LLVMRustBuildCall(llbuilder, ty, callee, args.as_ptr(), args.len() as c_uint, None);
+ llvm::LLVMSetTailCall(ret, True);
+ llvm::LLVMBuildRetVoid(llbuilder);
+ llvm::LLVMDisposeBuilder(llbuilder);
+
+ // __rust_alloc_error_handler_should_panic
+ let name = OomStrategy::SYMBOL;
+ let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8);
+ if tcx.sess.target.default_hidden_visibility {
+ llvm::LLVMRustSetVisibility(ll_g, llvm::Visibility::Hidden);
+ }
+ let val = tcx.sess.opts.unstable_opts.oom.should_panic();
+ let llval = llvm::LLVMConstInt(i8, val as u64, False);
+ llvm::LLVMSetInitializer(ll_g, llval);
+
+ if tcx.sess.opts.debuginfo != DebugInfo::None {
+ let dbg_cx = debuginfo::CodegenUnitDebugContext::new(llmod);
+ debuginfo::metadata::build_compile_unit_di_node(tcx, module_name, &dbg_cx);
+ dbg_cx.finalize(tcx.sess);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs
new file mode 100644
index 000000000..a53946995
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/asm.rs
@@ -0,0 +1,1037 @@
+use crate::attributes;
+use crate::builder::Builder;
+use crate::common::Funclet;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_codegen_ssa::mir::operand::OperandValue;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, span_bug, ty::Instance};
+use rustc_span::{Pos, Span};
+use rustc_target::abi::*;
+use rustc_target::asm::*;
+
+use libc::{c_char, c_uint};
+use smallvec::SmallVec;
+use tracing::debug;
+
+impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+ fn codegen_inline_asm(
+ &mut self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Self>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ instance: Instance<'_>,
+ dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
+ ) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ // Collect the types of output operands
+ let mut constraints = vec![];
+ let mut clobbers = vec![];
+ let mut output_types = vec![];
+ let mut op_idx = FxHashMap::default();
+ let mut clobbered_x87 = false;
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::Out { reg, late, place } => {
+ let is_target_supported = |reg_class: InlineAsmRegClass| {
+ for &(_, feature) in reg_class.supported_types(asm_arch) {
+ if let Some(feature) = feature {
+ let codegen_fn_attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+ if self.tcx.sess.target_features.contains(&feature)
+ || codegen_fn_attrs.target_features.contains(&feature)
+ {
+ return true;
+ }
+ } else {
+ // Register class is unconditionally supported
+ return true;
+ }
+ }
+ false
+ };
+
+ let mut layout = None;
+ let ty = if let Some(ref place) = place {
+ layout = Some(&place.layout);
+ llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
+ } else if matches!(
+ reg.reg_class(),
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::x87_reg
+ )
+ ) {
+ // Special handling for x87/mmx registers: we always
+ // clobber the whole set if one register is marked as
+ // clobbered. This is due to the way LLVM handles the
+ // FP stack in inline assembly.
+ if !clobbered_x87 {
+ clobbered_x87 = true;
+ clobbers.push("~{st}".to_string());
+ for i in 1..=7 {
+ clobbers.push(format!("~{{st({})}}", i));
+ }
+ }
+ continue;
+ } else if !is_target_supported(reg.reg_class())
+ || reg.reg_class().is_clobber_only(asm_arch)
+ {
+ // We turn discarded outputs into clobber constraints
+ // if the target feature needed by the register class is
+ // disabled. This is necessary otherwise LLVM will try
+ // to actually allocate a register for the dummy output.
+ assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_)));
+ clobbers.push(format!("~{}", reg_to_llvm(reg, None)));
+ continue;
+ } else {
+ // If the output is discarded, we don't really care what
+ // type is used. We're just using this to tell LLVM to
+ // reserve the register.
+ dummy_output_type(self.cx, reg.reg_class())
+ };
+ output_types.push(ty);
+ op_idx.insert(idx, constraints.len());
+ let prefix = if late { "=" } else { "=&" };
+ constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
+ }
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
+ let layout = if let Some(ref out_place) = out_place {
+ &out_place.layout
+ } else {
+ // LLVM required tied operands to have the same type,
+ // so we just use the type of the input.
+ &in_value.layout
+ };
+ let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
+ output_types.push(ty);
+ op_idx.insert(idx, constraints.len());
+ let prefix = if late { "=" } else { "=&" };
+ constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
+ }
+ _ => {}
+ }
+ }
+
+ // Collect input operands
+ let mut inputs = vec![];
+ for (idx, op) in operands.iter().enumerate() {
+ match *op {
+ InlineAsmOperandRef::In { reg, value } => {
+ let llval =
+ llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
+ inputs.push(llval);
+ op_idx.insert(idx, constraints.len());
+ constraints.push(reg_to_llvm(reg, Some(&value.layout)));
+ }
+ InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => {
+ let value = llvm_fixup_input(
+ self,
+ in_value.immediate(),
+ reg.reg_class(),
+ &in_value.layout,
+ );
+ inputs.push(value);
+ constraints.push(format!("{}", op_idx[&idx]));
+ }
+ InlineAsmOperandRef::SymFn { instance } => {
+ inputs.push(self.cx.get_fn(instance));
+ op_idx.insert(idx, constraints.len());
+ constraints.push("s".to_string());
+ }
+ InlineAsmOperandRef::SymStatic { def_id } => {
+ inputs.push(self.cx.get_static(def_id));
+ op_idx.insert(idx, constraints.len());
+ constraints.push("s".to_string());
+ }
+ _ => {}
+ }
+ }
+
+ // Build the template string
+ let mut template_str = String::new();
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => {
+ if s.contains('$') {
+ for c in s.chars() {
+ if c == '$' {
+ template_str.push_str("$$");
+ } else {
+ template_str.push(c);
+ }
+ }
+ } else {
+ template_str.push_str(s)
+ }
+ }
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
+ match operands[operand_idx] {
+ InlineAsmOperandRef::In { reg, .. }
+ | InlineAsmOperandRef::Out { reg, .. }
+ | InlineAsmOperandRef::InOut { reg, .. } => {
+ let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
+ if let Some(modifier) = modifier {
+ template_str.push_str(&format!(
+ "${{{}:{}}}",
+ op_idx[&operand_idx], modifier
+ ));
+ } else {
+ template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
+ }
+ }
+ InlineAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the template
+ template_str.push_str(string);
+ }
+ InlineAsmOperandRef::SymFn { .. }
+ | InlineAsmOperandRef::SymStatic { .. } => {
+ // Only emit the raw symbol name
+ template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
+ }
+ }
+ }
+ }
+ }
+
+ constraints.append(&mut clobbers);
+ if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
+ match asm_arch {
+ InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
+ constraints.push("~{cc}".to_string());
+ }
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ constraints.extend_from_slice(&[
+ "~{dirflag}".to_string(),
+ "~{fpsr}".to_string(),
+ "~{flags}".to_string(),
+ ]);
+ }
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ constraints.extend_from_slice(&[
+ "~{vtype}".to_string(),
+ "~{vl}".to_string(),
+ "~{vxsat}".to_string(),
+ "~{vxrm}".to_string(),
+ ]);
+ }
+ InlineAsmArch::Avr => {
+ constraints.push("~{sreg}".to_string());
+ }
+ InlineAsmArch::Nvptx64 => {}
+ InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {}
+ InlineAsmArch::Hexagon => {}
+ InlineAsmArch::Mips | InlineAsmArch::Mips64 => {}
+ InlineAsmArch::S390x => {}
+ InlineAsmArch::SpirV => {}
+ InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {}
+ InlineAsmArch::Bpf => {}
+ InlineAsmArch::Msp430 => {
+ constraints.push("~{sr}".to_string());
+ }
+ }
+ }
+ if !options.contains(InlineAsmOptions::NOMEM) {
+ // This is actually ignored by LLVM, but it's probably best to keep
+ // it just in case. LLVM instead uses the ReadOnly/ReadNone
+ // attributes on the call instruction to optimize.
+ constraints.push("~{memory}".to_string());
+ }
+ let volatile = !options.contains(InlineAsmOptions::PURE);
+ let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
+ let output_type = match &output_types[..] {
+ [] => self.type_void(),
+ [ty] => ty,
+ tys => self.type_struct(tys, false),
+ };
+ let dialect = match asm_arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64
+ if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
+ {
+ llvm::AsmDialect::Intel
+ }
+ _ => llvm::AsmDialect::Att,
+ };
+ let result = inline_asm_call(
+ self,
+ &template_str,
+ &constraints.join(","),
+ &inputs,
+ output_type,
+ volatile,
+ alignstack,
+ dialect,
+ line_spans,
+ options.contains(InlineAsmOptions::MAY_UNWIND),
+ dest_catch_funclet,
+ )
+ .unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
+
+ let mut attrs = SmallVec::<[_; 2]>::new();
+ if options.contains(InlineAsmOptions::PURE) {
+ if options.contains(InlineAsmOptions::NOMEM) {
+ attrs.push(llvm::AttributeKind::ReadNone.create_attr(self.cx.llcx));
+ } else if options.contains(InlineAsmOptions::READONLY) {
+ attrs.push(llvm::AttributeKind::ReadOnly.create_attr(self.cx.llcx));
+ }
+ attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx));
+ } else if options.contains(InlineAsmOptions::NOMEM) {
+ attrs.push(llvm::AttributeKind::InaccessibleMemOnly.create_attr(self.cx.llcx));
+ } else {
+ // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
+ }
+ attributes::apply_to_callsite(result, llvm::AttributePlace::Function, &{ attrs });
+
+ // Switch to the 'normal' basic block if we did an `invoke` instead of a `call`
+ if let Some((dest, _, _)) = dest_catch_funclet {
+ self.switch_to_block(dest);
+ }
+
+ // Write results to outputs
+ for (idx, op) in operands.iter().enumerate() {
+ if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
+ | InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
+ {
+ let value = if output_types.len() == 1 {
+ result
+ } else {
+ self.extract_value(result, op_idx[&idx] as u64)
+ };
+ let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
+ OperandValue::Immediate(value).store(self, place);
+ }
+ }
+ }
+}
+
+impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> {
+ fn codegen_global_asm(
+ &self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[GlobalAsmOperandRef<'tcx>],
+ options: InlineAsmOptions,
+ _line_spans: &[Span],
+ ) {
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+
+ // Default to Intel syntax on x86
+ let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64)
+ && !options.contains(InlineAsmOptions::ATT_SYNTAX);
+
+ // Build the template string
+ let mut template_str = String::new();
+ if intel_syntax {
+ template_str.push_str(".intel_syntax\n");
+ }
+ for piece in template {
+ match *piece {
+ InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s),
+ InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
+ match operands[operand_idx] {
+ GlobalAsmOperandRef::Const { ref string } => {
+ // Const operands get injected directly into the
+ // template. Note that we don't need to escape $
+ // here unlike normal inline assembly.
+ template_str.push_str(string);
+ }
+ GlobalAsmOperandRef::SymFn { instance } => {
+ let llval = self.get_fn(instance);
+ self.add_compiler_used_global(llval);
+ let symbol = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustGetMangledName(llval, s);
+ })
+ .expect("symbol is not valid UTF-8");
+ template_str.push_str(&symbol);
+ }
+ GlobalAsmOperandRef::SymStatic { def_id } => {
+ let llval = self
+ .renamed_statics
+ .borrow()
+ .get(&def_id)
+ .copied()
+ .unwrap_or_else(|| self.get_static(def_id));
+ self.add_compiler_used_global(llval);
+ let symbol = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustGetMangledName(llval, s);
+ })
+ .expect("symbol is not valid UTF-8");
+ template_str.push_str(&symbol);
+ }
+ }
+ }
+ }
+ }
+ if intel_syntax {
+ template_str.push_str("\n.att_syntax\n");
+ }
+
+ unsafe {
+ llvm::LLVMRustAppendModuleInlineAsm(
+ self.llmod,
+ template_str.as_ptr().cast(),
+ template_str.len(),
+ );
+ }
+ }
+}
+
+pub(crate) fn inline_asm_call<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ asm: &str,
+ cons: &str,
+ inputs: &[&'ll Value],
+ output: &'ll llvm::Type,
+ volatile: bool,
+ alignstack: bool,
+ dia: llvm::AsmDialect,
+ line_spans: &[Span],
+ unwind: bool,
+ dest_catch_funclet: Option<(
+ &'ll llvm::BasicBlock,
+ &'ll llvm::BasicBlock,
+ Option<&Funclet<'ll>>,
+ )>,
+) -> Option<&'ll Value> {
+ let volatile = if volatile { llvm::True } else { llvm::False };
+ let alignstack = if alignstack { llvm::True } else { llvm::False };
+ let can_throw = if unwind { llvm::True } else { llvm::False };
+
+ let argtys = inputs
+ .iter()
+ .map(|v| {
+ debug!("Asm Input Type: {:?}", *v);
+ bx.cx.val_ty(*v)
+ })
+ .collect::<Vec<_>>();
+
+ debug!("Asm Output Type: {:?}", output);
+ let fty = bx.cx.type_func(&argtys, output);
+ unsafe {
+ // Ask LLVM to verify that the constraints are well-formed.
+ let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
+ debug!("constraint verification result: {:?}", constraints_ok);
+ if constraints_ok {
+ if unwind && llvm_util::get_version() < (13, 0, 0) {
+ bx.cx.sess().span_fatal(
+ line_spans[0],
+ "unwinding from inline assembly is only supported on llvm >= 13.",
+ );
+ }
+
+ let v = llvm::LLVMRustInlineAsm(
+ fty,
+ asm.as_ptr().cast(),
+ asm.len(),
+ cons.as_ptr().cast(),
+ cons.len(),
+ volatile,
+ alignstack,
+ dia,
+ can_throw,
+ );
+
+ let call = if let Some((dest, catch, funclet)) = dest_catch_funclet {
+ bx.invoke(fty, v, inputs, dest, catch, funclet)
+ } else {
+ bx.call(fty, v, inputs, None)
+ };
+
+ // Store mark in a metadata node so we can map LLVM errors
+ // back to source locations. See #17552.
+ let key = "srcloc";
+ let kind = llvm::LLVMGetMDKindIDInContext(
+ bx.llcx,
+ key.as_ptr() as *const c_char,
+ key.len() as c_uint,
+ );
+
+ // srcloc contains one integer for each line of assembly code.
+ // Unfortunately this isn't enough to encode a full span so instead
+ // we just encode the start position of each line.
+ // FIXME: Figure out a way to pass the entire line spans.
+ let mut srcloc = vec![];
+ if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 {
+ // LLVM inserts an extra line to add the ".intel_syntax", so add
+ // a dummy srcloc entry for it.
+ //
+ // Don't do this if we only have 1 line span since that may be
+ // due to the asm template string coming from a macro. LLVM will
+ // default to the first srcloc for lines that don't have an
+ // associated srcloc.
+ srcloc.push(bx.const_i32(0));
+ }
+ srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
+ let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
+ llvm::LLVMSetMetadata(call, kind, md);
+
+ Some(call)
+ } else {
+ // LLVM has detected an issue with our constraints, bail out
+ None
+ }
+ }
+}
+
+/// If the register is an xmm/ymm/zmm register then return its index.
+fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
+ match reg {
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::xmm0 as u32
+ && reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
+ }
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::ymm0 as u32
+ && reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
+ }
+ InlineAsmReg::X86(reg)
+ if reg as u32 >= X86InlineAsmReg::zmm0 as u32
+ && reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
+ {
+ Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
+ }
+ _ => None,
+ }
+}
+
+/// If the register is an AArch64 vector register then return its index.
+fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
+ match reg {
+ InlineAsmReg::AArch64(reg)
+ if reg as u32 >= AArch64InlineAsmReg::v0 as u32
+ && reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
+ {
+ Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
+ }
+ _ => None,
+ }
+}
+
+/// Converts a register class to an LLVM constraint code.
+fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
+ match reg {
+ // For vector registers LLVM wants the register name to match the type size.
+ InlineAsmRegOrRegClass::Reg(reg) => {
+ if let Some(idx) = xmm_reg_index(reg) {
+ let class = if let Some(layout) = layout {
+ match layout.size.bytes() {
+ 64 => 'z',
+ 32 => 'y',
+ _ => 'x',
+ }
+ } else {
+ // We use f32 as the type for discarded outputs
+ 'x'
+ };
+ format!("{{{}mm{}}}", class, idx)
+ } else if let Some(idx) = a64_vreg_index(reg) {
+ let class = if let Some(layout) = layout {
+ match layout.size.bytes() {
+ 16 => 'q',
+ 8 => 'd',
+ 4 => 's',
+ 2 => 'h',
+ 1 => 'd', // We fixup i8 to i8x8
+ _ => unreachable!(),
+ }
+ } else {
+ // We use i64x2 as the type for discarded outputs
+ 'q'
+ };
+ format!("{{{}{}}}", class, idx)
+ } else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
+ // LLVM doesn't recognize x30
+ "{lr}".to_string()
+ } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) {
+ // LLVM doesn't recognize r14
+ "{lr}".to_string()
+ } else {
+ format!("{{{}}}", reg.name())
+ }
+ }
+ InlineAsmRegOrRegClass::RegClass(reg) => match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+ | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::x87_reg
+ | X86InlineAsmRegClass::mmx_reg
+ | X86InlineAsmRegClass::kreg0
+ | X86InlineAsmRegClass::tmm_reg,
+ ) => unreachable!("clobber-only"),
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
+ InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
+ InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+ .to_string(),
+ }
+}
+
+/// Converts a modifier into LLVM's equivalent modifier.
+fn modifier_to_llvm(
+ arch: InlineAsmArch,
+ reg: InlineAsmRegClass,
+ modifier: Option<char>,
+) -> Option<char> {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ if modifier == Some('v') { None } else { modifier }
+ }
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ if modifier.is_none() {
+ Some('q')
+ } else {
+ modifier
+ }
+ }
+ InlineAsmRegClass::Hexagon(_) => None,
+ InlineAsmRegClass::Mips(_) => None,
+ InlineAsmRegClass::Nvptx(_) => None,
+ InlineAsmRegClass::PowerPC(_) => None,
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
+ | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
+ None if arch == InlineAsmArch::X86_64 => Some('q'),
+ None => Some('k'),
+ Some('l') => Some('b'),
+ Some('h') => Some('h'),
+ Some('x') => Some('w'),
+ Some('e') => Some('k'),
+ Some('r') => Some('q'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
+ InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
+ (X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
+ (X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
+ (X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
+ (_, Some('x')) => Some('x'),
+ (_, Some('y')) => Some('t'),
+ (_, Some('z')) => Some('g'),
+ _ => unreachable!(),
+ },
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::x87_reg
+ | X86InlineAsmRegClass::mmx_reg
+ | X86InlineAsmRegClass::kreg0
+ | X86InlineAsmRegClass::tmm_reg,
+ ) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
+ InlineAsmRegClass::Bpf(_) => None,
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
+ | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
+ | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
+ Some('h') => Some('B'),
+ Some('l') => Some('A'),
+ _ => None,
+ },
+ InlineAsmRegClass::Avr(_) => None,
+ InlineAsmRegClass::S390x(_) => None,
+ InlineAsmRegClass::Msp430(_) => None,
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
+
+/// Type to use for outputs that are discarded. It doesn't really matter what
+/// the type is, as long as it is valid for the constraint code.
+fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
+ match reg {
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
+ | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
+ cx.type_vector(cx.type_i64(), 2)
+ }
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+ | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
+ cx.type_vector(cx.type_i64(), 2)
+ }
+ InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
+ InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+ | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
+ InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
+ | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
+ InlineAsmRegClass::X86(
+ X86InlineAsmRegClass::x87_reg
+ | X86InlineAsmRegClass::mmx_reg
+ | X86InlineAsmRegClass::kreg0
+ | X86InlineAsmRegClass::tmm_reg,
+ ) => {
+ unreachable!("clobber-only")
+ }
+ InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
+ InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
+ InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
+ InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(),
+ InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
+ InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
+ InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+ bug!("LLVM backend does not support SPIR-V")
+ }
+ InlineAsmRegClass::Err => unreachable!(),
+ }
+}
+
+/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
+/// the equivalent integer type.
+fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
+ match scalar.primitive() {
+ Primitive::Int(Integer::I8, _) => cx.type_i8(),
+ Primitive::Int(Integer::I16, _) => cx.type_i16(),
+ Primitive::Int(Integer::I32, _) => cx.type_i32(),
+ Primitive::Int(Integer::I64, _) => cx.type_i64(),
+ Primitive::F32 => cx.type_f32(),
+ Primitive::F64 => cx.type_f64(),
+ Primitive::Pointer => cx.type_isize(),
+ _ => unreachable!(),
+ }
+}
+
+/// Fix up an input value to work around LLVM bugs.
+fn llvm_fixup_input<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ mut value: &'ll Value,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Value {
+ match (reg, layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.primitive() {
+ let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
+ bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, s);
+ let count = 16 / layout.size.bytes();
+ let vec_ty = bx.cx.type_vector(elem_ty, count);
+ if let Primitive::Pointer = s.primitive() {
+ value = bx.ptrtoint(value, bx.cx.type_isize());
+ }
+ bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, element);
+ let vec_ty = bx.cx.type_vector(elem_ty, count);
+ let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
+ bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.primitive() == Primitive::F64 =>
+ {
+ bx.bitcast(value, bx.cx.type_i64())
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.primitive() {
+ bx.bitcast(value, bx.cx.type_f32())
+ } else {
+ value
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.primitive() {
+ bx.bitcast(value, bx.cx.type_f64())
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ match s.primitive() {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
+ Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
+ Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
+ _ => value,
+ }
+ }
+ _ => value,
+ }
+}
+
+/// Fix up an output value to work around LLVM bugs.
+fn llvm_fixup_output<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ mut value: &'ll Value,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Value {
+ match (reg, layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.primitive() {
+ bx.extract_element(value, bx.const_i32(0))
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ value = bx.extract_element(value, bx.const_i32(0));
+ if let Primitive::Pointer = s.primitive() {
+ value = bx.inttoptr(value, layout.llvm_type(bx.cx));
+ }
+ value
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(bx.cx, element);
+ let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
+ let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
+ bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.primitive() == Primitive::F64 =>
+ {
+ bx.bitcast(value, bx.cx.type_f64())
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.primitive() {
+ bx.bitcast(value, bx.cx.type_i32())
+ } else {
+ value
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.primitive() {
+ bx.bitcast(value, bx.cx.type_i64())
+ } else {
+ value
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ match s.primitive() {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
+ Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
+ Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
+ Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
+ _ => value,
+ }
+ }
+ _ => value,
+ }
+}
+
+/// Output type to use for llvm_fixup_output.
+fn llvm_fixup_output_type<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ reg: InlineAsmRegClass,
+ layout: &TyAndLayout<'tcx>,
+) -> &'ll Type {
+ match (reg, layout.abi) {
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
+ if let Primitive::Int(Integer::I8, _) = s.primitive() {
+ cx.type_vector(cx.type_i8(), 8)
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
+ let elem_ty = llvm_asm_scalar_type(cx, s);
+ let count = 16 / layout.size.bytes();
+ cx.type_vector(elem_ty, count)
+ }
+ (
+ InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
+ Abi::Vector { element, count },
+ ) if layout.size.bytes() == 8 => {
+ let elem_ty = llvm_asm_scalar_type(cx, element);
+ cx.type_vector(elem_ty, count * 2)
+ }
+ (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
+ if s.primitive() == Primitive::F64 =>
+ {
+ cx.type_i64()
+ }
+ (
+ InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
+ Abi::Vector { .. },
+ ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
+ (
+ InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I32, _) = s.primitive() {
+ cx.type_f32()
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (
+ InlineAsmRegClass::Arm(
+ ArmInlineAsmRegClass::dreg
+ | ArmInlineAsmRegClass::dreg_low8
+ | ArmInlineAsmRegClass::dreg_low16,
+ ),
+ Abi::Scalar(s),
+ ) => {
+ if let Primitive::Int(Integer::I64, _) = s.primitive() {
+ cx.type_f64()
+ } else {
+ layout.llvm_type(cx)
+ }
+ }
+ (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
+ match s.primitive() {
+ // MIPS only supports register-length arithmetics.
+ Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
+ Primitive::F32 => cx.type_i32(),
+ Primitive::F64 => cx.type_i64(),
+ _ => layout.llvm_type(cx),
+ }
+ }
+ _ => layout.llvm_type(cx),
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs
new file mode 100644
index 000000000..aabbe8ac2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/attributes.rs
@@ -0,0 +1,449 @@
+//! Set and unset common attributes on LLVM values.
+
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::small_str::SmallStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::config::OptLevel;
+use rustc_span::symbol::sym;
+use rustc_target::spec::abi::Abi;
+use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector};
+use smallvec::SmallVec;
+
+use crate::attributes;
+use crate::llvm::AttributePlace::Function;
+use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace};
+use crate::llvm_util;
+pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
+
+use crate::context::CodegenCx;
+use crate::value::Value;
+
+pub fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
+ if !attrs.is_empty() {
+ llvm::AddFunctionAttributes(llfn, idx, attrs);
+ }
+}
+
+pub fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) {
+ if !attrs.is_empty() {
+ llvm::AddCallSiteAttributes(callsite, idx, attrs);
+ }
+}
+
+/// Get LLVM attribute for the provided inline heuristic.
+#[inline]
+fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> {
+ match inline {
+ InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)),
+ InlineAttr::Always => Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)),
+ InlineAttr::Never => {
+ if cx.sess().target.arch != "amdgpu" {
+ Some(AttributeKind::NoInline.create_attr(cx.llcx))
+ } else {
+ None
+ }
+ }
+ InlineAttr::None => None,
+ }
+}
+
+/// Get LLVM sanitize attributes.
+#[inline]
+pub fn sanitize_attrs<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ no_sanitize: SanitizerSet,
+) -> SmallVec<[&'ll Attribute; 4]> {
+ let mut attrs = SmallVec::new();
+ let enabled = cx.tcx.sess.opts.unstable_opts.sanitizer - no_sanitize;
+ if enabled.contains(SanitizerSet::ADDRESS) {
+ attrs.push(llvm::AttributeKind::SanitizeAddress.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::MEMORY) {
+ attrs.push(llvm::AttributeKind::SanitizeMemory.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::THREAD) {
+ attrs.push(llvm::AttributeKind::SanitizeThread.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::HWADDRESS) {
+ attrs.push(llvm::AttributeKind::SanitizeHWAddress.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::SHADOWCALLSTACK) {
+ attrs.push(llvm::AttributeKind::ShadowCallStack.create_attr(cx.llcx));
+ }
+ if enabled.contains(SanitizerSet::MEMTAG) {
+ // Check to make sure the mte target feature is actually enabled.
+ let features = cx.tcx.global_backend_features(());
+ let mte_feature =
+ features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..]));
+ if let None | Some("-mte") = mte_feature {
+ cx.tcx.sess.err("`-Zsanitizer=memtag` requires `-Ctarget-feature=+mte`");
+ }
+
+ attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx));
+ }
+ attrs
+}
+
+/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
+#[inline]
+pub fn uwtable_attr(llcx: &llvm::Context) -> &Attribute {
+ // NOTE: We should determine if we even need async unwind tables, as they
+ // take have more overhead and if we can use sync unwind tables we
+ // probably should.
+ llvm::CreateUWTableAttr(llcx, true)
+}
+
+pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ let mut fp = cx.sess().target.frame_pointer;
+ // "mcount" function relies on stack pointer.
+ // See <https://sourceware.org/binutils/docs/gprof/Implementation.html>.
+ if cx.sess().instrument_mcount() || matches!(cx.sess().opts.cg.force_frame_pointers, Some(true))
+ {
+ fp = FramePointer::Always;
+ }
+ let attr_value = match fp {
+ FramePointer::Always => "all",
+ FramePointer::NonLeaf => "non-leaf",
+ FramePointer::MayOmit => return None,
+ };
+ Some(llvm::CreateAttrStringValue(cx.llcx, "frame-pointer", attr_value))
+}
+
+/// Tell LLVM what instrument function to insert.
+#[inline]
+fn instrument_function_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ if cx.sess().instrument_mcount() {
+ // Similar to `clang -pg` behavior. Handled by the
+ // `post-inline-ee-instrument` LLVM pass.
+
+ // The function name varies on platforms.
+ // See test/CodeGen/mcount.c in clang.
+ let mcount_name = cx.sess().target.mcount.as_ref();
+
+ Some(llvm::CreateAttrStringValue(
+ cx.llcx,
+ "instrument-function-entry-inlined",
+ &mcount_name,
+ ))
+ } else {
+ None
+ }
+}
+
+fn probestack_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ // Currently stack probes seem somewhat incompatible with the address
+ // sanitizer and thread sanitizer. With asan we're already protected from
+ // stack overflow anyway so we don't really need stack probes regardless.
+ if cx
+ .sess()
+ .opts
+ .unstable_opts
+ .sanitizer
+ .intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
+ {
+ return None;
+ }
+
+ // probestack doesn't play nice either with `-C profile-generate`.
+ if cx.sess().opts.cg.profile_generate.enabled() {
+ return None;
+ }
+
+ // probestack doesn't play nice either with gcov profiling.
+ if cx.sess().opts.unstable_opts.profile {
+ return None;
+ }
+
+ let attr_value = match cx.sess().target.stack_probes {
+ StackProbeType::None => return None,
+ // Request LLVM to generate the probes inline. If the given LLVM version does not support
+ // this, no probe is generated at all (even if the attribute is specified).
+ StackProbeType::Inline => "inline-asm",
+ // Flag our internal `__rust_probestack` function as the stack probe symbol.
+ // This is defined in the `compiler-builtins` crate for each architecture.
+ StackProbeType::Call => "__rust_probestack",
+ // Pick from the two above based on the LLVM version.
+ StackProbeType::InlineOrCall { min_llvm_version_for_inline } => {
+ if llvm_util::get_version() < min_llvm_version_for_inline {
+ "__rust_probestack"
+ } else {
+ "inline-asm"
+ }
+ }
+ };
+ Some(llvm::CreateAttrStringValue(cx.llcx, "probe-stack", attr_value))
+}
+
+fn stackprotector_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ let sspattr = match cx.sess().stack_protector() {
+ StackProtector::None => return None,
+ StackProtector::All => AttributeKind::StackProtectReq,
+ StackProtector::Strong => AttributeKind::StackProtectStrong,
+ StackProtector::Basic => AttributeKind::StackProtect,
+ };
+
+ Some(sspattr.create_attr(cx.llcx))
+}
+
+pub fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute {
+ let target_cpu = llvm_util::target_cpu(cx.tcx.sess);
+ llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu)
+}
+
+pub fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ llvm_util::tune_cpu(cx.tcx.sess)
+ .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu))
+}
+
+/// Get the `NonLazyBind` LLVM attribute,
+/// if the codegen options allow skipping the PLT.
+pub fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> {
+ // Don't generate calls through PLT if it's not necessary
+ if !cx.sess().needs_plt() {
+ Some(AttributeKind::NonLazyBind.create_attr(cx.llcx))
+ } else {
+ None
+ }
+}
+
+/// Get the default optimizations attrs for a function.
+#[inline]
+pub(crate) fn default_optimisation_attrs<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+) -> SmallVec<[&'ll Attribute; 2]> {
+ let mut attrs = SmallVec::new();
+ match cx.sess().opts.optimize {
+ OptLevel::Size => {
+ attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+ }
+ OptLevel::SizeMin => {
+ attrs.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
+ attrs.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+ }
+ _ => {}
+ }
+ attrs
+}
+
+fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute {
+ llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc")
+}
+
+/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
+/// attributes.
+pub fn from_fn_attrs<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ llfn: &'ll Value,
+ instance: ty::Instance<'tcx>,
+) {
+ let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
+
+ let mut to_add = SmallVec::<[_; 16]>::new();
+
+ match codegen_fn_attrs.optimize {
+ OptimizeAttr::None => {
+ to_add.extend(default_optimisation_attrs(cx));
+ }
+ OptimizeAttr::Size => {
+ to_add.push(llvm::AttributeKind::MinSize.create_attr(cx.llcx));
+ to_add.push(llvm::AttributeKind::OptimizeForSize.create_attr(cx.llcx));
+ }
+ OptimizeAttr::Speed => {}
+ }
+
+ let inline = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ InlineAttr::Never
+ } else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
+ InlineAttr::Hint
+ } else {
+ codegen_fn_attrs.inline
+ };
+ to_add.extend(inline_attr(cx, inline));
+
+ // The `uwtable` attribute according to LLVM is:
+ //
+ // This attribute indicates that the ABI being targeted requires that an
+ // unwind table entry be produced for this function even if we can show
+ // that no exceptions passes by it. This is normally the case for the
+ // ELF x86-64 abi, but it can be disabled for some compilation units.
+ //
+ // Typically when we're compiling with `-C panic=abort` (which implies this
+ // `no_landing_pads` check) we don't need `uwtable` because we can't
+ // generate any exceptions! On Windows, however, exceptions include other
+ // events such as illegal instructions, segfaults, etc. This means that on
+ // Windows we end up still needing the `uwtable` attribute even if the `-C
+ // panic=abort` flag is passed.
+ //
+ // You can also find more info on why Windows always requires uwtables here:
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
+ if cx.sess().must_emit_unwind_tables() {
+ to_add.push(uwtable_attr(cx.llcx));
+ }
+
+ if cx.sess().opts.unstable_opts.profile_sample_use.is_some() {
+ to_add.push(llvm::CreateAttrString(cx.llcx, "use-sample-profile"));
+ }
+
+ // FIXME: none of these three functions interact with source level attributes.
+ to_add.extend(frame_pointer_type_attr(cx));
+ to_add.extend(instrument_function_attr(cx));
+ to_add.extend(probestack_attr(cx));
+ to_add.extend(stackprotector_attr(cx));
+
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+ to_add.push(AttributeKind::Cold.create_attr(cx.llcx));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) {
+ to_add.push(AttributeKind::ReturnsTwice.create_attr(cx.llcx));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
+ to_add.push(AttributeKind::ReadOnly.create_attr(cx.llcx));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
+ to_add.push(AttributeKind::ReadNone.create_attr(cx.llcx));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ to_add.push(AttributeKind::Naked.create_attr(cx.llcx));
+ // HACK(jubilee): "indirect branch tracking" works by attaching prologues to functions.
+ // And it is a module-level attribute, so the alternative is pulling naked functions into new LLVM modules.
+ // Otherwise LLVM's "naked" functions come with endbr prefixes per https://github.com/rust-lang/rust/issues/98768
+ to_add.push(AttributeKind::NoCfCheck.create_attr(cx.llcx));
+ // Need this for AArch64.
+ to_add.push(llvm::CreateAttrStringValue(cx.llcx, "branch-target-enforcement", "false"));
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR)
+ || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED)
+ {
+ if llvm_util::get_version() >= (15, 0, 0) {
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ // apply to argument place instead of function
+ let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]);
+ to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 0));
+ let mut flags = AllocKindFlags::Alloc | AllocKindFlags::Aligned;
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
+ flags |= AllocKindFlags::Uninitialized;
+ } else {
+ flags |= AllocKindFlags::Zeroed;
+ }
+ to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
+ }
+ // apply to return place instead of function (unlike all other attributes applied in this function)
+ let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::REALLOCATOR) {
+ if llvm_util::get_version() >= (15, 0, 0) {
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ to_add.push(llvm::CreateAllocKindAttr(
+ cx.llcx,
+ AllocKindFlags::Realloc | AllocKindFlags::Aligned,
+ ));
+ // applies to argument place instead of function place
+ let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+ // apply to argument place instead of function
+ let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(2), &[alloc_align]);
+ to_add.push(llvm::CreateAllocSizeAttr(cx.llcx, 3));
+ }
+ let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::DEALLOCATOR) {
+ if llvm_util::get_version() >= (15, 0, 0) {
+ to_add.push(create_alloc_family_attr(cx.llcx));
+ to_add.push(llvm::CreateAllocKindAttr(cx.llcx, AllocKindFlags::Free));
+ // applies to argument place instead of function place
+ let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx);
+ attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]);
+ }
+ }
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY) {
+ to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"));
+ }
+ if let Some(align) = codegen_fn_attrs.alignment {
+ llvm::set_alignment(llfn, align as usize);
+ }
+ to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize));
+
+ // Always annotate functions with the target-cpu they are compiled for.
+ // Without this, ThinLTO won't inline Rust functions into Clang generated
+ // functions (because Clang annotates functions this way too).
+ to_add.push(target_cpu_attr(cx));
+ // tune-cpu is only conveyed through the attribute for our purpose.
+ // The target doesn't care; the subtarget reads our attribute.
+ to_add.extend(tune_cpu_attr(cx));
+
+ let function_features =
+ codegen_fn_attrs.target_features.iter().map(|f| f.as_str()).collect::<Vec<&str>>();
+
+ if let Some(f) = llvm_util::check_tied_features(
+ cx.tcx.sess,
+ &function_features.iter().map(|f| (*f, true)).collect(),
+ ) {
+ let span = cx
+ .tcx
+ .get_attr(instance.def_id(), sym::target_feature)
+ .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span);
+ let msg = format!(
+ "the target features {} must all be either enabled or disabled together",
+ f.join(", ")
+ );
+ let mut err = cx.tcx.sess.struct_span_err(span, &msg);
+ err.help("add the missing features in a `target_feature` attribute");
+ err.emit();
+ return;
+ }
+
+ let mut function_features = function_features
+ .iter()
+ .flat_map(|feat| {
+ llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{}", f))
+ })
+ .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x {
+ InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(),
+ InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(),
+ }))
+ .collect::<Vec<String>>();
+
+ if cx.tcx.sess.target.is_like_wasm {
+ // If this function is an import from the environment but the wasm
+ // import has a specific module/name, apply them here.
+ if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
+ to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", &module));
+
+ let name =
+ codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
+ let name = name.as_str();
+ to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name));
+ }
+
+ // The `"wasm"` abi on wasm targets automatically enables the
+ // `+multivalue` feature because the purpose of the wasm abi is to match
+ // the WebAssembly specification, which has this feature. This won't be
+ // needed when LLVM enables this `multivalue` feature by default.
+ if !cx.tcx.is_closure(instance.def_id()) {
+ let abi = cx.tcx.fn_sig(instance.def_id()).abi();
+ if abi == Abi::Wasm {
+ function_features.push("+multivalue".to_string());
+ }
+ }
+ }
+
+ let global_features = cx.tcx.global_backend_features(()).iter().map(|s| s.as_str());
+ let function_features = function_features.iter().map(|s| s.as_str());
+ let target_features =
+ global_features.chain(function_features).intersperse(",").collect::<SmallStr<1024>>();
+ if !target_features.is_empty() {
+ to_add.push(llvm::CreateAttrStringValue(cx.llcx, "target-features", &target_features));
+ }
+
+ attributes::apply_to_llfn(llfn, Function, &to_add);
+}
+
+fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<&String> {
+ tcx.wasm_import_module_map(id.krate).get(&id)
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs
new file mode 100644
index 000000000..27039cda2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/archive.rs
@@ -0,0 +1,383 @@
+//! A helper class for dealing with static archives
+
+use std::env;
+use std::ffi::{CStr, CString, OsString};
+use std::io;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::ptr;
+use std::str;
+
+use crate::llvm::archive_ro::{ArchiveRO, Child};
+use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport};
+use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder};
+use rustc_session::cstore::{DllCallingConvention, DllImport};
+use rustc_session::Session;
+
+/// Helper for adding many files to an archive.
+#[must_use = "must call build() to finish building the archive"]
+pub struct LlvmArchiveBuilder<'a> {
+ sess: &'a Session,
+ additions: Vec<Addition>,
+}
+
+enum Addition {
+ File { path: PathBuf, name_in_archive: String },
+ Archive { path: PathBuf, archive: ArchiveRO, skip: Box<dyn FnMut(&str) -> bool> },
+}
+
+impl Addition {
+ fn path(&self) -> &Path {
+ match self {
+ Addition::File { path, .. } | Addition::Archive { path, .. } => path,
+ }
+ }
+}
+
+fn is_relevant_child(c: &Child<'_>) -> bool {
+ match c.name() {
+ Some(name) => !name.contains("SYMDEF"),
+ None => false,
+ }
+}
+
+/// Map machine type strings to values of LLVM's MachineTypes enum.
+fn llvm_machine_type(cpu: &str) -> LLVMMachineType {
+ match cpu {
+ "x86_64" => LLVMMachineType::AMD64,
+ "x86" => LLVMMachineType::I386,
+ "aarch64" => LLVMMachineType::ARM64,
+ "arm" => LLVMMachineType::ARM,
+ _ => panic!("unsupported cpu type {}", cpu),
+ }
+}
+
+impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
+ fn add_archive(
+ &mut self,
+ archive: &Path,
+ skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> io::Result<()> {
+ let archive_ro = match ArchiveRO::open(archive) {
+ Ok(ar) => ar,
+ Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
+ };
+ if self.additions.iter().any(|ar| ar.path() == archive) {
+ return Ok(());
+ }
+ self.additions.push(Addition::Archive {
+ path: archive.to_path_buf(),
+ archive: archive_ro,
+ skip: Box::new(skip),
+ });
+ Ok(())
+ }
+
+ /// Adds an arbitrary file to this archive
+ fn add_file(&mut self, file: &Path) {
+ let name = file.file_name().unwrap().to_str().unwrap();
+ self.additions
+ .push(Addition::File { path: file.to_path_buf(), name_in_archive: name.to_owned() });
+ }
+
+ /// Combine the provided files, rlibs, and native libraries into a single
+ /// `Archive`.
+ fn build(mut self: Box<Self>, output: &Path) -> bool {
+ match self.build_with_llvm(output) {
+ Ok(any_members) => any_members,
+ Err(e) => self.sess.fatal(&format!("failed to build archive: {}", e)),
+ }
+ }
+}
+
+pub struct LlvmArchiveBuilderBuilder;
+
+impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a> {
+ Box::new(LlvmArchiveBuilder { sess, additions: Vec::new() })
+ }
+
+ fn create_dll_import_lib(
+ &self,
+ sess: &Session,
+ lib_name: &str,
+ dll_imports: &[DllImport],
+ tmpdir: &Path,
+ ) -> PathBuf {
+ let output_path = {
+ let mut output_path: PathBuf = tmpdir.to_path_buf();
+ output_path.push(format!("{}_imports", lib_name));
+ output_path.with_extension("lib")
+ };
+
+ let target = &sess.target;
+ let mingw_gnu_toolchain = target.vendor == "pc"
+ && target.os == "windows"
+ && target.env == "gnu"
+ && target.abi.is_empty();
+
+ let import_name_and_ordinal_vector: Vec<(String, Option<u16>)> = dll_imports
+ .iter()
+ .map(|import: &DllImport| {
+ if sess.target.arch == "x86" {
+ (
+ LlvmArchiveBuilder::i686_decorated_name(import, mingw_gnu_toolchain),
+ import.ordinal,
+ )
+ } else {
+ (import.name.to_string(), import.ordinal)
+ }
+ })
+ .collect();
+
+ if mingw_gnu_toolchain {
+ // The binutils linker used on -windows-gnu targets cannot read the import
+ // libraries generated by LLVM: in our attempts, the linker produced an .EXE
+ // that loaded but crashed with an AV upon calling one of the imported
+ // functions. Therefore, use binutils to create the import library instead,
+ // by writing a .DEF file to the temp dir and calling binutils's dlltool.
+ let def_file_path = tmpdir.join(format!("{}_imports", lib_name)).with_extension("def");
+
+ let def_file_content = format!(
+ "EXPORTS\n{}",
+ import_name_and_ordinal_vector
+ .into_iter()
+ .map(|(name, ordinal)| {
+ match ordinal {
+ Some(n) => format!("{} @{} NONAME", name, n),
+ None => name,
+ }
+ })
+ .collect::<Vec<String>>()
+ .join("\n")
+ );
+
+ match std::fs::write(&def_file_path, def_file_content) {
+ Ok(_) => {}
+ Err(e) => {
+ sess.fatal(&format!("Error writing .DEF file: {}", e));
+ }
+ };
+
+ let dlltool = find_binutils_dlltool(sess);
+ let result = std::process::Command::new(dlltool)
+ .args([
+ "-d",
+ def_file_path.to_str().unwrap(),
+ "-D",
+ lib_name,
+ "-l",
+ output_path.to_str().unwrap(),
+ ])
+ .output();
+
+ match result {
+ Err(e) => {
+ sess.fatal(&format!("Error calling dlltool: {}", e));
+ }
+ Ok(output) if !output.status.success() => sess.fatal(&format!(
+ "Dlltool could not create import library: {}\n{}",
+ String::from_utf8_lossy(&output.stdout),
+ String::from_utf8_lossy(&output.stderr)
+ )),
+ _ => {}
+ }
+ } else {
+ // we've checked for \0 characters in the library name already
+ let dll_name_z = CString::new(lib_name).unwrap();
+
+ let output_path_z = rustc_fs_util::path_to_c_string(&output_path);
+
+ tracing::trace!("invoking LLVMRustWriteImportLibrary");
+ tracing::trace!(" dll_name {:#?}", dll_name_z);
+ tracing::trace!(" output_path {}", output_path.display());
+ tracing::trace!(
+ " import names: {}",
+ dll_imports
+ .iter()
+ .map(|import| import.name.to_string())
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+
+ // All import names are Rust identifiers and therefore cannot contain \0 characters.
+ // FIXME: when support for #[link_name] is implemented, ensure that the import names
+ // still don't contain any \0 characters. Also need to check that the names don't
+ // contain substrings like " @" or "NONAME" that are keywords or otherwise reserved
+ // in definition files.
+ let cstring_import_name_and_ordinal_vector: Vec<(CString, Option<u16>)> =
+ import_name_and_ordinal_vector
+ .into_iter()
+ .map(|(name, ordinal)| (CString::new(name).unwrap(), ordinal))
+ .collect();
+
+ let ffi_exports: Vec<LLVMRustCOFFShortExport> = cstring_import_name_and_ordinal_vector
+ .iter()
+ .map(|(name_z, ordinal)| LLVMRustCOFFShortExport::new(name_z.as_ptr(), *ordinal))
+ .collect();
+ let result = unsafe {
+ crate::llvm::LLVMRustWriteImportLibrary(
+ dll_name_z.as_ptr(),
+ output_path_z.as_ptr(),
+ ffi_exports.as_ptr(),
+ ffi_exports.len(),
+ llvm_machine_type(&sess.target.arch) as u16,
+ !sess.target.is_like_msvc,
+ )
+ };
+
+ if result == crate::llvm::LLVMRustResult::Failure {
+ sess.fatal(&format!(
+ "Error creating import library for {}: {}",
+ lib_name,
+ llvm::last_error().unwrap_or("unknown LLVM error".to_string())
+ ));
+ }
+ };
+
+ output_path
+ }
+}
+
+impl<'a> LlvmArchiveBuilder<'a> {
+ fn build_with_llvm(&mut self, output: &Path) -> io::Result<bool> {
+ let kind = &*self.sess.target.archive_format;
+ let kind = kind.parse::<ArchiveKind>().map_err(|_| kind).unwrap_or_else(|kind| {
+ self.sess.fatal(&format!("Don't know how to build archive of type: {}", kind))
+ });
+
+ let mut additions = mem::take(&mut self.additions);
+ let mut strings = Vec::new();
+ let mut members = Vec::new();
+
+ let dst = CString::new(output.to_str().unwrap())?;
+
+ unsafe {
+ for addition in &mut additions {
+ match addition {
+ Addition::File { path, name_in_archive } => {
+ let path = CString::new(path.to_str().unwrap())?;
+ let name = CString::new(name_in_archive.clone())?;
+ members.push(llvm::LLVMRustArchiveMemberNew(
+ path.as_ptr(),
+ name.as_ptr(),
+ None,
+ ));
+ strings.push(path);
+ strings.push(name);
+ }
+ Addition::Archive { archive, skip, .. } => {
+ for child in archive.iter() {
+ let child = child.map_err(string_to_io_error)?;
+ if !is_relevant_child(&child) {
+ continue;
+ }
+ let child_name = child.name().unwrap();
+ if skip(child_name) {
+ continue;
+ }
+
+ // It appears that LLVM's archive writer is a little
+ // buggy if the name we pass down isn't just the
+ // filename component, so chop that off here and
+ // pass it in.
+ //
+ // See LLVM bug 25877 for more info.
+ let child_name =
+ Path::new(child_name).file_name().unwrap().to_str().unwrap();
+ let name = CString::new(child_name)?;
+ let m = llvm::LLVMRustArchiveMemberNew(
+ ptr::null(),
+ name.as_ptr(),
+ Some(child.raw),
+ );
+ members.push(m);
+ strings.push(name);
+ }
+ }
+ }
+ }
+
+ let r = llvm::LLVMRustWriteArchive(
+ dst.as_ptr(),
+ members.len() as libc::size_t,
+ members.as_ptr() as *const &_,
+ true,
+ kind,
+ );
+ let ret = if r.into_result().is_err() {
+ let err = llvm::LLVMRustGetLastError();
+ let msg = if err.is_null() {
+ "failed to write archive".into()
+ } else {
+ String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
+ };
+ Err(io::Error::new(io::ErrorKind::Other, msg))
+ } else {
+ Ok(!members.is_empty())
+ };
+ for member in members {
+ llvm::LLVMRustArchiveMemberFree(member);
+ }
+ ret
+ }
+ }
+
+ fn i686_decorated_name(import: &DllImport, mingw: bool) -> String {
+ let name = import.name;
+ let prefix = if mingw { "" } else { "_" };
+
+ match import.calling_convention {
+ DllCallingConvention::C => format!("{}{}", prefix, name),
+ DllCallingConvention::Stdcall(arg_list_size) => {
+ format!("{}{}@{}", prefix, name, arg_list_size)
+ }
+ DllCallingConvention::Fastcall(arg_list_size) => format!("@{}@{}", name, arg_list_size),
+ DllCallingConvention::Vectorcall(arg_list_size) => {
+ format!("{}@@{}", name, arg_list_size)
+ }
+ }
+ }
+}
+
+fn string_to_io_error(s: String) -> io::Error {
+ io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
+}
+
+fn find_binutils_dlltool(sess: &Session) -> OsString {
+ assert!(sess.target.options.is_like_windows && !sess.target.options.is_like_msvc);
+ if let Some(dlltool_path) = &sess.opts.unstable_opts.dlltool {
+ return dlltool_path.clone().into_os_string();
+ }
+
+ let mut tool_name: OsString = if sess.host.arch != sess.target.arch {
+ // We are cross-compiling, so we need the tool with the prefix matching our target
+ if sess.target.arch == "x86" {
+ "i686-w64-mingw32-dlltool"
+ } else {
+ "x86_64-w64-mingw32-dlltool"
+ }
+ } else {
+ // We are not cross-compiling, so we just want `dlltool`
+ "dlltool"
+ }
+ .into();
+
+ if sess.host.options.is_like_windows {
+ // If we're compiling on Windows, add the .exe suffix
+ tool_name.push(".exe");
+ }
+
+ // NOTE: it's not clear how useful it is to explicitly search PATH.
+ for dir in env::split_paths(&env::var_os("PATH").unwrap_or_default()) {
+ let full_path = dir.join(&tool_name);
+ if full_path.is_file() {
+ return full_path.into_os_string();
+ }
+ }
+
+ // The user didn't specify the location of the dlltool binary, and we weren't able
+ // to find the appropriate one on the PATH. Just return the name of the tool
+ // and let the invocation fail with a hopefully useful error message.
+ tool_name
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs
new file mode 100644
index 000000000..3731c6bcf
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/lto.rs
@@ -0,0 +1,936 @@
+use crate::back::write::{
+ self, save_temp_bitcode, to_llvm_opt_settings, with_llvm_pmb, DiagnosticHandlers,
+};
+use crate::llvm::archive_ro::ArchiveRO;
+use crate::llvm::{self, build_string, False, True};
+use crate::{llvm_util, LlvmCodegenBackend, ModuleLlvm};
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared};
+use rustc_codegen_ssa::back::symbol_export;
+use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{FatalError, Handler};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{self, CrateType, Lto};
+use tracing::{debug, info};
+
+use std::ffi::{CStr, CString};
+use std::fs::File;
+use std::io;
+use std::iter;
+use std::path::Path;
+use std::ptr;
+use std::slice;
+use std::sync::Arc;
+
+/// We keep track of the computed LTO cache keys from the previous
+/// session to determine which CGUs we can reuse.
+pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin";
+
+pub fn crate_type_allows_lto(crate_type: CrateType) -> bool {
+ match crate_type {
+ CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true,
+ CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false,
+ }
+}
+
+fn prepare_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> {
+ let export_threshold = match cgcx.lto {
+ // We're just doing LTO for our one crate
+ Lto::ThinLocal => SymbolExportLevel::Rust,
+
+ // We're doing LTO for the entire crate graph
+ Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types),
+
+ Lto::No => panic!("didn't request LTO but we're doing LTO"),
+ };
+
+ let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
+ if info.level.is_below_threshold(export_threshold) || info.used {
+ Some(CString::new(name.as_str()).unwrap())
+ } else {
+ None
+ }
+ };
+ let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ let mut symbols_below_threshold = {
+ let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+ exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>()
+ };
+ info!("{} symbols to preserve in this crate", symbols_below_threshold.len());
+
+ // If we're performing LTO for the entire crate graph, then for each of our
+ // upstream dependencies, find the corresponding rlib and load the bitcode
+ // from the archive.
+ //
+ // We save off all the bytecode and LLVM module ids for later processing
+ // with either fat or thin LTO
+ let mut upstream_modules = Vec::new();
+ if cgcx.lto != Lto::ThinLocal {
+ if cgcx.opts.cg.prefer_dynamic {
+ diag_handler
+ .struct_err("cannot prefer dynamic linking when performing LTO")
+ .note(
+ "only 'staticlib', 'bin', and 'cdylib' outputs are \
+ supported with LTO",
+ )
+ .emit();
+ return Err(FatalError);
+ }
+
+ // Make sure we actually can run LTO
+ for crate_type in cgcx.crate_types.iter() {
+ if !crate_type_allows_lto(*crate_type) {
+ let e = diag_handler.fatal(
+ "lto can only be run for executables, cdylibs and \
+ static library outputs",
+ );
+ return Err(e);
+ }
+ }
+
+ for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
+ let exported_symbols =
+ cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
+ {
+ let _timer =
+ cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold");
+ symbols_below_threshold
+ .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
+ }
+
+ let archive = ArchiveRO::open(path).expect("wanted an rlib");
+ let obj_files = archive
+ .iter()
+ .filter_map(|child| child.ok().and_then(|c| c.name().map(|name| (name, c))))
+ .filter(|&(name, _)| looks_like_rust_object_file(name));
+ for (name, child) in obj_files {
+ info!("adding bitcode from {}", name);
+ match get_bitcode_slice_from_object_data(child.data()) {
+ Ok(data) => {
+ let module = SerializedModule::FromRlib(data.to_vec());
+ upstream_modules.push((module, CString::new(name).unwrap()));
+ }
+ Err(msg) => return Err(diag_handler.fatal(&msg)),
+ }
+ }
+ }
+ }
+
+ Ok((symbols_below_threshold, upstream_modules))
+}
+
+fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], String> {
+ let mut len = 0;
+ let data =
+ unsafe { llvm::LLVMRustGetBitcodeSliceFromObjectData(obj.as_ptr(), obj.len(), &mut len) };
+ if !data.is_null() {
+ assert!(len != 0);
+ let bc = unsafe { slice::from_raw_parts(data, len) };
+
+ // `bc` must be a sub-slice of `obj`.
+ assert!(obj.as_ptr() <= bc.as_ptr());
+ assert!(bc[bc.len()..bc.len()].as_ptr() <= obj[obj.len()..obj.len()].as_ptr());
+
+ Ok(bc)
+ } else {
+ assert!(len == 0);
+ let msg = llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string());
+ Err(format!("failed to get bitcode from object file for LTO ({})", msg))
+ }
+}
+
+/// Performs fat LTO by merging all modules into a single one and returning it
+/// for further optimization.
+pub(crate) fn run_fat(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
+ let symbols_below_threshold =
+ symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+ fat_lto(
+ cgcx,
+ &diag_handler,
+ modules,
+ cached_modules,
+ upstream_modules,
+ &symbols_below_threshold,
+ )
+}
+
+/// Performs thin LTO by performing necessary global analysis and returning two
+/// lists, one of the modules that need optimization and another for modules that
+/// can simply be copied over from the incr. comp. cache.
+pub(crate) fn run_thin(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ modules: Vec<(String, ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, &diag_handler)?;
+ let symbols_below_threshold =
+ symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();
+ if cgcx.opts.cg.linker_plugin_lto.enabled() {
+ unreachable!(
+ "We should never reach this case if the LTO step \
+ is deferred to the linker"
+ );
+ }
+ thin_lto(
+ cgcx,
+ &diag_handler,
+ modules,
+ upstream_modules,
+ cached_modules,
+ &symbols_below_threshold,
+ )
+}
+
+pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBuffer) {
+ let name = module.name.clone();
+ let buffer = ThinBuffer::new(module.module_llvm.llmod(), true);
+ (name, buffer)
+}
+
+fn fat_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ modules: Vec<FatLTOInput<LlvmCodegenBackend>>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+ mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ symbols_below_threshold: &[*const libc::c_char],
+) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> {
+ let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module");
+ info!("going for a fat lto");
+
+ // Sort out all our lists of incoming modules into two lists.
+ //
+ // * `serialized_modules` (also and argument to this function) contains all
+ // modules that are serialized in-memory.
+ // * `in_memory` contains modules which are already parsed and in-memory,
+ // such as from multi-CGU builds.
+ //
+ // All of `cached_modules` (cached from previous incremental builds) can
+ // immediately go onto the `serialized_modules` modules list and then we can
+ // split the `modules` array into these two lists.
+ let mut in_memory = Vec::new();
+ serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| {
+ info!("pushing cached module {:?}", wp.cgu_name);
+ (buffer, CString::new(wp.cgu_name).unwrap())
+ }));
+ for module in modules {
+ match module {
+ FatLTOInput::InMemory(m) => in_memory.push(m),
+ FatLTOInput::Serialized { name, buffer } => {
+ info!("pushing serialized module {:?}", name);
+ let buffer = SerializedModule::Local(buffer);
+ serialized_modules.push((buffer, CString::new(name).unwrap()));
+ }
+ }
+ }
+
+ // Find the "costliest" module and merge everything into that codegen unit.
+ // All the other modules will be serialized and reparsed into the new
+ // context, so this hopefully avoids serializing and parsing the largest
+ // codegen unit.
+ //
+ // Additionally use a regular module as the base here to ensure that various
+ // file copy operations in the backend work correctly. The only other kind
+ // of module here should be an allocator one, and if your crate is smaller
+ // than the allocator module then the size doesn't really matter anyway.
+ let costliest_module = in_memory
+ .iter()
+ .enumerate()
+ .filter(|&(_, module)| module.kind == ModuleKind::Regular)
+ .map(|(i, module)| {
+ let cost = unsafe { llvm::LLVMRustModuleCost(module.module_llvm.llmod()) };
+ (cost, i)
+ })
+ .max();
+
+ // If we found a costliest module, we're good to go. Otherwise all our
+ // inputs were serialized which could happen in the case, for example, that
+ // all our inputs were incrementally reread from the cache and we're just
+ // re-executing the LTO passes. If that's the case deserialize the first
+ // module and create a linker with it.
+ let module: ModuleCodegen<ModuleLlvm> = match costliest_module {
+ Some((_cost, i)) => in_memory.remove(i),
+ None => {
+ assert!(!serialized_modules.is_empty(), "must have at least one serialized module");
+ let (buffer, name) = serialized_modules.remove(0);
+ info!("no in-memory regular modules to choose from, parsing {:?}", name);
+ ModuleCodegen {
+ module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), diag_handler)?,
+ name: name.into_string().unwrap(),
+ kind: ModuleKind::Regular,
+ }
+ }
+ };
+ let mut serialized_bitcode = Vec::new();
+ {
+ let (llcx, llmod) = {
+ let llvm = &module.module_llvm;
+ (&llvm.llcx, llvm.llmod())
+ };
+ info!("using {:?} as a base module", module.name);
+
+ // The linking steps below may produce errors and diagnostics within LLVM
+ // which we'd like to handle and print, so set up our diagnostic handlers
+ // (which get unregistered when they go out of scope below).
+ let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ // For all other modules we codegened we'll need to link them into our own
+ // bitcode. All modules were codegened in their own LLVM context, however,
+ // and we want to move everything to the same LLVM context. Currently the
+ // way we know of to do that is to serialize them to a string and them parse
+ // them later. Not great but hey, that's why it's "fat" LTO, right?
+ for module in in_memory {
+ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+ let llmod_id = CString::new(&module.name[..]).unwrap();
+ serialized_modules.push((SerializedModule::Local(buffer), llmod_id));
+ }
+ // Sort the modules to ensure we produce deterministic results.
+ serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1));
+
+ // For all serialized bitcode files we parse them and link them in as we did
+ // above, this is all mostly handled in C++. Like above, though, we don't
+ // know much about the memory management here so we err on the side of being
+ // save and persist everything with the original module.
+ let mut linker = Linker::new(llmod);
+ for (bc_decoded, name) in serialized_modules {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| {
+ recorder.record_arg(format!("{:?}", name))
+ });
+ info!("linking {:?}", name);
+ let data = bc_decoded.data();
+ linker.add(data).map_err(|()| {
+ let msg = format!("failed to load bitcode of module {:?}", name);
+ write::llvm_err(diag_handler, &msg)
+ })?;
+ serialized_bitcode.push(bc_decoded);
+ }
+ drop(linker);
+ save_temp_bitcode(cgcx, &module, "lto.input");
+
+ // Internalize everything below threshold to help strip out more modules and such.
+ unsafe {
+ let ptr = symbols_below_threshold.as_ptr();
+ llvm::LLVMRustRunRestrictionPass(
+ llmod,
+ ptr as *const *const libc::c_char,
+ symbols_below_threshold.len() as libc::size_t,
+ );
+ save_temp_bitcode(cgcx, &module, "lto.after-restriction");
+ }
+ }
+
+ Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode })
+}
+
+pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>);
+
+impl<'a> Linker<'a> {
+ pub(crate) fn new(llmod: &'a llvm::Module) -> Self {
+ unsafe { Linker(llvm::LLVMRustLinkerNew(llmod)) }
+ }
+
+ pub(crate) fn add(&mut self, bytecode: &[u8]) -> Result<(), ()> {
+ unsafe {
+ if llvm::LLVMRustLinkerAdd(
+ self.0,
+ bytecode.as_ptr() as *const libc::c_char,
+ bytecode.len(),
+ ) {
+ Ok(())
+ } else {
+ Err(())
+ }
+ }
+ }
+}
+
+impl Drop for Linker<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustLinkerFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+/// Prepare "thin" LTO to get run on these modules.
+///
+/// The general structure of ThinLTO is quite different from the structure of
+/// "fat" LTO above. With "fat" LTO all LLVM modules in question are merged into
+/// one giant LLVM module, and then we run more optimization passes over this
+/// big module after internalizing most symbols. Thin LTO, on the other hand,
+/// avoid this large bottleneck through more targeted optimization.
+///
+/// At a high level Thin LTO looks like:
+///
+/// 1. Prepare a "summary" of each LLVM module in question which describes
+/// the values inside, cost of the values, etc.
+/// 2. Merge the summaries of all modules in question into one "index"
+/// 3. Perform some global analysis on this index
+/// 4. For each module, use the index and analysis calculated previously to
+/// perform local transformations on the module, for example inlining
+/// small functions from other modules.
+/// 5. Run thin-specific optimization passes over each module, and then code
+/// generate everything at the end.
+///
+/// The summary for each module is intended to be quite cheap, and the global
+/// index is relatively quite cheap to create as well. As a result, the goal of
+/// ThinLTO is to reduce the bottleneck on LTO and enable LTO to be used in more
+/// situations. For example one cheap optimization is that we can parallelize
+/// all codegen modules, easily making use of all the cores on a machine.
+///
+/// With all that in mind, the function here is designed at specifically just
+/// calculating the *index* for ThinLTO. This index will then be shared amongst
+/// all of the `LtoModuleCodegen` units returned below and destroyed once
+/// they all go out of scope.
+fn thin_lto(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ modules: Vec<(String, ThinBuffer)>,
+ serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+ cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+ symbols_below_threshold: &[*const libc::c_char],
+) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> {
+ let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis");
+ unsafe {
+ info!("going for that thin, thin LTO");
+
+ let green_modules: FxHashMap<_, _> =
+ cached_modules.iter().map(|&(_, ref wp)| (wp.cgu_name.clone(), wp.clone())).collect();
+
+ let full_scope_len = modules.len() + serialized_modules.len() + cached_modules.len();
+ let mut thin_buffers = Vec::with_capacity(modules.len());
+ let mut module_names = Vec::with_capacity(full_scope_len);
+ let mut thin_modules = Vec::with_capacity(full_scope_len);
+
+ for (i, (name, buffer)) in modules.into_iter().enumerate() {
+ info!("local module: {} - {}", i, name);
+ let cname = CString::new(name.clone()).unwrap();
+ thin_modules.push(llvm::ThinLTOModule {
+ identifier: cname.as_ptr(),
+ data: buffer.data().as_ptr(),
+ len: buffer.data().len(),
+ });
+ thin_buffers.push(buffer);
+ module_names.push(cname);
+ }
+
+ // FIXME: All upstream crates are deserialized internally in the
+ // function below to extract their summary and modules. Note that
+ // unlike the loop above we *must* decode and/or read something
+ // here as these are all just serialized files on disk. An
+ // improvement, however, to make here would be to store the
+ // module summary separately from the actual module itself. Right
+ // now this is store in one large bitcode file, and the entire
+ // file is deflate-compressed. We could try to bypass some of the
+ // decompression by storing the index uncompressed and only
+ // lazily decompressing the bytecode if necessary.
+ //
+ // Note that truly taking advantage of this optimization will
+ // likely be further down the road. We'd have to implement
+ // incremental ThinLTO first where we could actually avoid
+ // looking at upstream modules entirely sometimes (the contents,
+ // we must always unconditionally look at the index).
+ let mut serialized = Vec::with_capacity(serialized_modules.len() + cached_modules.len());
+
+ let cached_modules =
+ cached_modules.into_iter().map(|(sm, wp)| (sm, CString::new(wp.cgu_name).unwrap()));
+
+ for (module, name) in serialized_modules.into_iter().chain(cached_modules) {
+ info!("upstream or cached module {:?}", name);
+ thin_modules.push(llvm::ThinLTOModule {
+ identifier: name.as_ptr(),
+ data: module.data().as_ptr(),
+ len: module.data().len(),
+ });
+ serialized.push(module);
+ module_names.push(name);
+ }
+
+ // Sanity check
+ assert_eq!(thin_modules.len(), module_names.len());
+
+ // Delegate to the C++ bindings to create some data here. Once this is a
+ // tried-and-true interface we may wish to try to upstream some of this
+ // to LLVM itself, right now we reimplement a lot of what they do
+ // upstream...
+ let data = llvm::LLVMRustCreateThinLTOData(
+ thin_modules.as_ptr(),
+ thin_modules.len() as u32,
+ symbols_below_threshold.as_ptr(),
+ symbols_below_threshold.len() as u32,
+ )
+ .ok_or_else(|| write::llvm_err(diag_handler, "failed to prepare thin LTO context"))?;
+
+ let data = ThinData(data);
+
+ info!("thin LTO data created");
+
+ let (key_map_path, prev_key_map, curr_key_map) = if let Some(ref incr_comp_session_dir) =
+ cgcx.incr_comp_session_dir
+ {
+ let path = incr_comp_session_dir.join(THIN_LTO_KEYS_INCR_COMP_FILE_NAME);
+ // If the previous file was deleted, or we get an IO error
+ // reading the file, then we'll just use `None` as the
+ // prev_key_map, which will force the code to be recompiled.
+ let prev =
+ if path.exists() { ThinLTOKeysMap::load_from_file(&path).ok() } else { None };
+ let curr = ThinLTOKeysMap::from_thin_lto_modules(&data, &thin_modules, &module_names);
+ (Some(path), prev, curr)
+ } else {
+ // If we don't compile incrementally, we don't need to load the
+ // import data from LLVM.
+ assert!(green_modules.is_empty());
+ let curr = ThinLTOKeysMap::default();
+ (None, None, curr)
+ };
+ info!("thin LTO cache key map loaded");
+ info!("prev_key_map: {:#?}", prev_key_map);
+ info!("curr_key_map: {:#?}", curr_key_map);
+
+ // Throw our data in an `Arc` as we'll be sharing it across threads. We
+ // also put all memory referenced by the C++ data (buffers, ids, etc)
+ // into the arc as well. After this we'll create a thin module
+ // codegen per module in this data.
+ let shared = Arc::new(ThinShared {
+ data,
+ thin_buffers,
+ serialized_modules: serialized,
+ module_names,
+ });
+
+ let mut copy_jobs = vec![];
+ let mut opt_jobs = vec![];
+
+ info!("checking which modules can be-reused and which have to be re-optimized.");
+ for (module_index, module_name) in shared.module_names.iter().enumerate() {
+ let module_name = module_name_to_str(module_name);
+ if let (Some(prev_key_map), true) =
+ (prev_key_map.as_ref(), green_modules.contains_key(module_name))
+ {
+ assert!(cgcx.incr_comp_session_dir.is_some());
+
+ // If a module exists in both the current and the previous session,
+ // and has the same LTO cache key in both sessions, then we can re-use it
+ if prev_key_map.keys.get(module_name) == curr_key_map.keys.get(module_name) {
+ let work_product = green_modules[module_name].clone();
+ copy_jobs.push(work_product);
+ info!(" - {}: re-used", module_name);
+ assert!(cgcx.incr_comp_session_dir.is_some());
+ cgcx.cgu_reuse_tracker.set_actual_reuse(module_name, CguReuse::PostLto);
+ continue;
+ }
+ }
+
+ info!(" - {}: re-compiled", module_name);
+ opt_jobs.push(LtoModuleCodegen::Thin(ThinModule {
+ shared: shared.clone(),
+ idx: module_index,
+ }));
+ }
+
+ // Save the current ThinLTO import information for the next compilation
+ // session, overwriting the previous serialized data (if any).
+ if let Some(path) = key_map_path {
+ if let Err(err) = curr_key_map.save_to_file(&path) {
+ let msg = format!("Error while writing ThinLTO key data: {}", err);
+ return Err(write::llvm_err(diag_handler, &msg));
+ }
+ }
+
+ Ok((opt_jobs, copy_jobs))
+ }
+}
+
+pub(crate) fn run_pass_manager(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &mut ModuleCodegen<ModuleLlvm>,
+ thin: bool,
+) -> Result<(), FatalError> {
+ let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &*module.name);
+ let config = cgcx.config(module.kind);
+
+ // Now we have one massive module inside of llmod. Time to run the
+ // LTO-specific optimization passes that LLVM provides.
+ //
+ // This code is based off the code found in llvm's LTO code generator:
+ // llvm/lib/LTO/LTOCodeGenerator.cpp
+ debug!("running the pass manager");
+ unsafe {
+ if !llvm::LLVMRustHasModuleFlag(
+ module.module_llvm.llmod(),
+ "LTOPostLink".as_ptr().cast(),
+ 11,
+ ) {
+ llvm::LLVMRustAddModuleFlag(
+ module.module_llvm.llmod(),
+ llvm::LLVMModFlagBehavior::Error,
+ "LTOPostLink\0".as_ptr().cast(),
+ 1,
+ );
+ }
+ if llvm_util::should_use_new_llvm_pass_manager(
+ &config.new_llvm_pass_manager,
+ &cgcx.target_arch,
+ ) {
+ let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO };
+ let opt_level = config.opt_level.unwrap_or(config::OptLevel::No);
+ write::optimize_with_new_llvm_pass_manager(
+ cgcx,
+ diag_handler,
+ module,
+ config,
+ opt_level,
+ opt_stage,
+ )?;
+ debug!("lto done");
+ return Ok(());
+ }
+
+ let pm = llvm::LLVMCreatePassManager();
+ llvm::LLVMAddAnalysisPasses(module.module_llvm.tm, pm);
+
+ if config.verify_llvm_ir {
+ let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ let opt_level = config
+ .opt_level
+ .map(|x| to_llvm_opt_settings(x).0)
+ .unwrap_or(llvm::CodeGenOptLevel::None);
+ with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| {
+ if thin {
+ llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm);
+ } else {
+ llvm::LLVMRustPassManagerBuilderPopulateLTOPassManager(
+ b, pm, /* Internalize = */ False, /* RunInliner = */ True,
+ );
+ }
+ });
+
+ // We always generate bitcode through ThinLTOBuffers,
+ // which do not support anonymous globals
+ if config.bitcode_needed() {
+ let pass = llvm::LLVMRustFindAndCreatePass("name-anon-globals\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ if config.verify_llvm_ir {
+ let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr().cast());
+ llvm::LLVMRustAddPass(pm, pass.unwrap());
+ }
+
+ llvm::LLVMRunPassManager(pm, module.module_llvm.llmod());
+
+ llvm::LLVMDisposePassManager(pm);
+ }
+ debug!("lto done");
+ Ok(())
+}
+
+pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer);
+
+unsafe impl Send for ModuleBuffer {}
+unsafe impl Sync for ModuleBuffer {}
+
+impl ModuleBuffer {
+ pub fn new(m: &llvm::Module) -> ModuleBuffer {
+ ModuleBuffer(unsafe { llvm::LLVMRustModuleBufferCreate(m) })
+ }
+}
+
+impl ModuleBufferMethods for ModuleBuffer {
+ fn data(&self) -> &[u8] {
+ unsafe {
+ let ptr = llvm::LLVMRustModuleBufferPtr(self.0);
+ let len = llvm::LLVMRustModuleBufferLen(self.0);
+ slice::from_raw_parts(ptr, len)
+ }
+ }
+}
+
+impl Drop for ModuleBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustModuleBufferFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub struct ThinData(&'static mut llvm::ThinLTOData);
+
+unsafe impl Send for ThinData {}
+unsafe impl Sync for ThinData {}
+
+impl Drop for ThinData {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustFreeThinLTOData(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub struct ThinBuffer(&'static mut llvm::ThinLTOBuffer);
+
+unsafe impl Send for ThinBuffer {}
+unsafe impl Sync for ThinBuffer {}
+
+impl ThinBuffer {
+ pub fn new(m: &llvm::Module, is_thin: bool) -> ThinBuffer {
+ unsafe {
+ let buffer = llvm::LLVMRustThinLTOBufferCreate(m, is_thin);
+ ThinBuffer(buffer)
+ }
+ }
+}
+
+impl ThinBufferMethods for ThinBuffer {
+ fn data(&self) -> &[u8] {
+ unsafe {
+ let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _;
+ let len = llvm::LLVMRustThinLTOBufferLen(self.0);
+ slice::from_raw_parts(ptr, len)
+ }
+ }
+}
+
+impl Drop for ThinBuffer {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustThinLTOBufferFree(&mut *(self.0 as *mut _));
+ }
+ }
+}
+
+pub unsafe fn optimize_thin_module(
+ thin_module: ThinModule<LlvmCodegenBackend>,
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+
+ let module_name = &thin_module.shared.module_names[thin_module.idx];
+ let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap());
+ let tm =
+ (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&diag_handler, &e))?;
+
+ // Right now the implementation we've got only works over serialized
+ // modules, so we create a fresh new LLVM context and parse the module
+ // into that context. One day, however, we may do this for upstream
+ // crates but for locally codegened modules we may be able to reuse
+ // that LLVM Context and Module.
+ let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+ let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _;
+ let mut module = ModuleCodegen {
+ module_llvm: ModuleLlvm { llmod_raw, llcx, tm },
+ name: thin_module.name().to_string(),
+ kind: ModuleKind::Regular,
+ };
+ {
+ let target = &*module.module_llvm.tm;
+ let llmod = module.module_llvm.llmod();
+ save_temp_bitcode(cgcx, &module, "thin-lto-input");
+
+ // Before we do much else find the "main" `DICompileUnit` that we'll be
+ // using below. If we find more than one though then rustc has changed
+ // in a way we're not ready for, so generate an ICE by returning
+ // an error.
+ let mut cu1 = ptr::null_mut();
+ let mut cu2 = ptr::null_mut();
+ llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2);
+ if !cu2.is_null() {
+ let msg = "multiple source DICompileUnits found";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+
+ // Up next comes the per-module local analyses that we do for Thin LTO.
+ // Each of these functions is basically copied from the LLVM
+ // implementation and then tailored to suit this implementation. Ideally
+ // each of these would be supported by upstream LLVM but that's perhaps
+ // a patch for another day!
+ //
+ // You can find some more comments about these functions in the LLVM
+ // bindings we've got (currently `PassWrapper.cpp`)
+ {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-rename");
+ }
+
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve");
+ }
+
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize");
+ }
+
+ {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name());
+ if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) {
+ let msg = "failed to prepare thin LTO module";
+ return Err(write::llvm_err(&diag_handler, msg));
+ }
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-import");
+ }
+
+ // Ok now this is a bit unfortunate. This is also something you won't
+ // find upstream in LLVM's ThinLTO passes! This is a hack for now to
+ // work around bugs in LLVM.
+ //
+ // First discovered in #45511 it was found that as part of ThinLTO
+ // importing passes LLVM will import `DICompileUnit` metadata
+ // information across modules. This means that we'll be working with one
+ // LLVM module that has multiple `DICompileUnit` instances in it (a
+ // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of
+ // bugs in LLVM's backend which generates invalid DWARF in a situation
+ // like this:
+ //
+ // https://bugs.llvm.org/show_bug.cgi?id=35212
+ // https://bugs.llvm.org/show_bug.cgi?id=35562
+ //
+ // While the first bug there is fixed the second ended up causing #46346
+ // which was basically a resurgence of #45511 after LLVM's bug 35212 was
+ // fixed.
+ //
+ // This function below is a huge hack around this problem. The function
+ // below is defined in `PassWrapper.cpp` and will basically "merge"
+ // all `DICompileUnit` instances in a module. Basically it'll take all
+ // the objects, rewrite all pointers of `DISubprogram` to point to the
+ // first `DICompileUnit`, and then delete all the other units.
+ //
+ // This is probably mangling to the debug info slightly (but hopefully
+ // not too much) but for now at least gets LLVM to emit valid DWARF (or
+ // so it appears). Hopefully we can remove this once upstream bugs are
+ // fixed in LLVM.
+ {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_thin_lto_patch_debuginfo", thin_module.name());
+ llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1);
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-patch");
+ }
+
+ // Alright now that we've done everything related to the ThinLTO
+ // analysis it's time to run some optimizations! Here we use the same
+ // `run_pass_manager` as the "fat" LTO above except that we tell it to
+ // populate a thin-specific pass manager, which presumably LLVM treats a
+ // little differently.
+ {
+ info!("running thin lto passes over {}", module.name);
+ run_pass_manager(cgcx, &diag_handler, &mut module, true)?;
+ save_temp_bitcode(cgcx, &module, "thin-lto-after-pm");
+ }
+ }
+ Ok(module)
+}
+
+/// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys
+#[derive(Debug, Default)]
+pub struct ThinLTOKeysMap {
+ // key = llvm name of importing module, value = LLVM cache key
+ keys: FxHashMap<String, String>,
+}
+
+impl ThinLTOKeysMap {
+ fn save_to_file(&self, path: &Path) -> io::Result<()> {
+ use std::io::Write;
+ let file = File::create(path)?;
+ let mut writer = io::BufWriter::new(file);
+ for (module, key) in &self.keys {
+ writeln!(writer, "{} {}", module, key)?;
+ }
+ Ok(())
+ }
+
+ fn load_from_file(path: &Path) -> io::Result<Self> {
+ use std::io::BufRead;
+ let mut keys = FxHashMap::default();
+ let file = File::open(path)?;
+ for line in io::BufReader::new(file).lines() {
+ let line = line?;
+ let mut split = line.split(' ');
+ let module = split.next().unwrap();
+ let key = split.next().unwrap();
+ assert_eq!(split.next(), None, "Expected two space-separated values, found {:?}", line);
+ keys.insert(module.to_string(), key.to_string());
+ }
+ Ok(Self { keys })
+ }
+
+ fn from_thin_lto_modules(
+ data: &ThinData,
+ modules: &[llvm::ThinLTOModule],
+ names: &[CString],
+ ) -> Self {
+ let keys = iter::zip(modules, names)
+ .map(|(module, name)| {
+ let key = build_string(|rust_str| unsafe {
+ llvm::LLVMRustComputeLTOCacheKey(rust_str, module.identifier, data.0);
+ })
+ .expect("Invalid ThinLTO module key");
+ (name.clone().into_string().unwrap(), key)
+ })
+ .collect();
+ Self { keys }
+ }
+}
+
+fn module_name_to_str(c_str: &CStr) -> &str {
+ c_str.to_str().unwrap_or_else(|e| {
+ bug!("Encountered non-utf8 LLVM module name `{}`: {}", c_str.to_string_lossy(), e)
+ })
+}
+
+pub fn parse_module<'a>(
+ cx: &'a llvm::Context,
+ name: &CStr,
+ data: &[u8],
+ diag_handler: &Handler,
+) -> Result<&'a llvm::Module, FatalError> {
+ unsafe {
+ llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()).ok_or_else(
+ || {
+ let msg = "failed to parse bitcode for LTO module";
+ write::llvm_err(diag_handler, msg)
+ },
+ )
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/profiling.rs b/compiler/rustc_codegen_llvm/src/back/profiling.rs
new file mode 100644
index 000000000..2741f7d84
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/profiling.rs
@@ -0,0 +1,58 @@
+use measureme::{event_id::SEPARATOR_BYTE, EventId, StringComponent, StringId};
+use rustc_data_structures::profiling::{SelfProfiler, TimingGuard};
+use std::ffi::{c_void, CStr};
+use std::os::raw::c_char;
+use std::sync::Arc;
+
+fn llvm_args_to_string_id(profiler: &SelfProfiler, pass_name: &str, ir_name: &str) -> EventId {
+ let pass_name = profiler.get_or_alloc_cached_string(pass_name);
+ let mut components = vec![StringComponent::Ref(pass_name)];
+ // handle that LazyCallGraph::SCC is a comma separated list within parentheses
+ let parentheses: &[_] = &['(', ')'];
+ let trimmed = ir_name.trim_matches(parentheses);
+ for part in trimmed.split(", ") {
+ let demangled_ir_name = rustc_demangle::demangle(part).to_string();
+ let ir_name = profiler.get_or_alloc_cached_string(demangled_ir_name);
+ components.push(StringComponent::Value(SEPARATOR_BYTE));
+ components.push(StringComponent::Ref(ir_name));
+ }
+ EventId::from_label(profiler.alloc_string(components.as_slice()))
+}
+
+pub struct LlvmSelfProfiler<'a> {
+ profiler: Arc<SelfProfiler>,
+ stack: Vec<TimingGuard<'a>>,
+ llvm_pass_event_kind: StringId,
+}
+
+impl<'a> LlvmSelfProfiler<'a> {
+ pub fn new(profiler: Arc<SelfProfiler>) -> Self {
+ let llvm_pass_event_kind = profiler.alloc_string("LLVM Pass");
+ Self { profiler, stack: Vec::default(), llvm_pass_event_kind }
+ }
+
+ fn before_pass_callback(&'a mut self, pass_name: &str, ir_name: &str) {
+ let event_id = llvm_args_to_string_id(&self.profiler, pass_name, ir_name);
+
+ self.stack.push(TimingGuard::start(&self.profiler, self.llvm_pass_event_kind, event_id));
+ }
+ fn after_pass_callback(&mut self) {
+ self.stack.pop();
+ }
+}
+
+pub unsafe extern "C" fn selfprofile_before_pass_callback(
+ llvm_self_profiler: *mut c_void,
+ pass_name: *const c_char,
+ ir_name: *const c_char,
+) {
+ let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+ let pass_name = CStr::from_ptr(pass_name).to_str().expect("valid UTF-8");
+ let ir_name = CStr::from_ptr(ir_name).to_str().expect("valid UTF-8");
+ llvm_self_profiler.before_pass_callback(pass_name, ir_name);
+}
+
+pub unsafe extern "C" fn selfprofile_after_pass_callback(llvm_self_profiler: *mut c_void) {
+ let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
+ llvm_self_profiler.after_pass_callback();
+}
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
new file mode 100644
index 000000000..534d32e8a
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -0,0 +1,1212 @@
+use crate::back::lto::ThinBuffer;
+use crate::back::profiling::{
+ selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler,
+};
+use crate::base;
+use crate::common;
+use crate::consts;
+use crate::llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::LlvmCodegenBackend;
+use crate::ModuleLlvm;
+use rustc_codegen_ssa::back::link::ensure_removed;
+use rustc_codegen_ssa::back::write::{
+ BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig,
+ TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_errors::{FatalError, Handler, Level};
+use rustc_fs_util::{link_or_copy, path_to_c_string};
+use rustc_middle::bug;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath};
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::InnerSpan;
+use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo};
+use tracing::debug;
+
+use libc::{c_char, c_int, c_uint, c_void, size_t};
+use std::ffi::CString;
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+use std::slice;
+use std::str;
+use std::sync::Arc;
+
+pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError {
+ match llvm::last_error() {
+ Some(err) => handler.fatal(&format!("{}: {}", msg, err)),
+ None => handler.fatal(msg),
+ }
+}
+
+pub fn write_output_file<'ll>(
+ handler: &rustc_errors::Handler,
+ target: &'ll llvm::TargetMachine,
+ pm: &llvm::PassManager<'ll>,
+ m: &'ll llvm::Module,
+ output: &Path,
+ dwo_output: Option<&Path>,
+ file_type: llvm::FileType,
+ self_profiler_ref: &SelfProfilerRef,
+) -> Result<(), FatalError> {
+ debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output);
+ unsafe {
+ let output_c = path_to_c_string(output);
+ let dwo_output_c;
+ let dwo_output_ptr = if let Some(dwo_output) = dwo_output {
+ dwo_output_c = path_to_c_string(dwo_output);
+ dwo_output_c.as_ptr()
+ } else {
+ std::ptr::null()
+ };
+ let result = llvm::LLVMRustWriteOutputFile(
+ target,
+ pm,
+ m,
+ output_c.as_ptr(),
+ dwo_output_ptr,
+ file_type,
+ );
+
+ // Record artifact sizes for self-profiling
+ if result == llvm::LLVMRustResult::Success {
+ let artifact_kind = match file_type {
+ llvm::FileType::ObjectFile => "object_file",
+ llvm::FileType::AssemblyFile => "assembly_file",
+ };
+ record_artifact_size(self_profiler_ref, artifact_kind, output);
+ if let Some(dwo_file) = dwo_output {
+ record_artifact_size(self_profiler_ref, "dwo_file", dwo_file);
+ }
+ }
+
+ result.into_result().map_err(|()| {
+ let msg = format!("could not write output to {}", output.display());
+ llvm_err(handler, &msg)
+ })
+ }
+}
+
+pub fn create_informational_target_machine(sess: &Session) -> &'static mut llvm::TargetMachine {
+ let config = TargetMachineFactoryConfig { split_dwarf_file: None };
+ // Can't use query system here quite yet because this function is invoked before the query
+ // system/tcx is set up.
+ let features = llvm_util::global_llvm_features(sess, false);
+ target_machine_factory(sess, config::OptLevel::No, &features)(config)
+ .unwrap_or_else(|err| llvm_err(sess.diagnostic(), &err).raise())
+}
+
+pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
+ let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
+ tcx.output_filenames(()).split_dwarf_path(
+ tcx.sess.split_debuginfo(),
+ tcx.sess.opts.unstable_opts.split_dwarf_kind,
+ Some(mod_name),
+ )
+ } else {
+ None
+ };
+ let config = TargetMachineFactoryConfig { split_dwarf_file };
+ target_machine_factory(
+ &tcx.sess,
+ tcx.backend_optimization_level(()),
+ tcx.global_backend_features(()),
+ )(config)
+ .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise())
+}
+
+pub fn to_llvm_opt_settings(
+ cfg: config::OptLevel,
+) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) {
+ use self::config::OptLevel::*;
+ match cfg {
+ No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone),
+ Less => (llvm::CodeGenOptLevel::Less, llvm::CodeGenOptSizeNone),
+ Default => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeNone),
+ Aggressive => (llvm::CodeGenOptLevel::Aggressive, llvm::CodeGenOptSizeNone),
+ Size => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeDefault),
+ SizeMin => (llvm::CodeGenOptLevel::Default, llvm::CodeGenOptSizeAggressive),
+ }
+}
+
+fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel {
+ use config::OptLevel::*;
+ match cfg {
+ No => llvm::PassBuilderOptLevel::O0,
+ Less => llvm::PassBuilderOptLevel::O1,
+ Default => llvm::PassBuilderOptLevel::O2,
+ Aggressive => llvm::PassBuilderOptLevel::O3,
+ Size => llvm::PassBuilderOptLevel::Os,
+ SizeMin => llvm::PassBuilderOptLevel::Oz,
+ }
+}
+
+fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel {
+ match relocation_model {
+ RelocModel::Static => llvm::RelocModel::Static,
+ // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute.
+ RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC,
+ RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic,
+ RelocModel::Ropi => llvm::RelocModel::ROPI,
+ RelocModel::Rwpi => llvm::RelocModel::RWPI,
+ RelocModel::RopiRwpi => llvm::RelocModel::ROPI_RWPI,
+ }
+}
+
+pub(crate) fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeModel {
+ match code_model {
+ Some(CodeModel::Tiny) => llvm::CodeModel::Tiny,
+ Some(CodeModel::Small) => llvm::CodeModel::Small,
+ Some(CodeModel::Kernel) => llvm::CodeModel::Kernel,
+ Some(CodeModel::Medium) => llvm::CodeModel::Medium,
+ Some(CodeModel::Large) => llvm::CodeModel::Large,
+ None => llvm::CodeModel::None,
+ }
+}
+
+pub fn target_machine_factory(
+ sess: &Session,
+ optlvl: config::OptLevel,
+ target_features: &[String],
+) -> TargetMachineFactoryFn<LlvmCodegenBackend> {
+ let reloc_model = to_llvm_relocation_model(sess.relocation_model());
+
+ let (opt_level, _) = to_llvm_opt_settings(optlvl);
+ let use_softfp = sess.opts.cg.soft_float;
+
+ let ffunction_sections =
+ sess.opts.unstable_opts.function_sections.unwrap_or(sess.target.function_sections);
+ let fdata_sections = ffunction_sections;
+ let funique_section_names = !sess.opts.unstable_opts.no_unique_section_names;
+
+ let code_model = to_llvm_code_model(sess.code_model());
+
+ let mut singlethread = sess.target.singlethread;
+
+ // On the wasm target once the `atomics` feature is enabled that means that
+ // we're no longer single-threaded, or otherwise we don't want LLVM to
+ // lower atomic operations to single-threaded operations.
+ if singlethread && sess.target.is_like_wasm && sess.target_features.contains(&sym::atomics) {
+ singlethread = false;
+ }
+
+ let triple = SmallCStr::new(&sess.target.llvm_target);
+ let cpu = SmallCStr::new(llvm_util::target_cpu(sess));
+ let features = CString::new(target_features.join(",")).unwrap();
+ let abi = SmallCStr::new(&sess.target.llvm_abiname);
+ let trap_unreachable =
+ sess.opts.unstable_opts.trap_unreachable.unwrap_or(sess.target.trap_unreachable);
+ let emit_stack_size_section = sess.opts.unstable_opts.emit_stack_sizes;
+
+ let asm_comments = sess.asm_comments();
+ let relax_elf_relocations =
+ sess.opts.unstable_opts.relax_elf_relocations.unwrap_or(sess.target.relax_elf_relocations);
+
+ let use_init_array =
+ !sess.opts.unstable_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
+
+ let path_mapping = sess.source_map().path_mapping().clone();
+
+ Arc::new(move |config: TargetMachineFactoryConfig| {
+ let split_dwarf_file =
+ path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
+ let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
+
+ let tm = unsafe {
+ llvm::LLVMRustCreateTargetMachine(
+ triple.as_ptr(),
+ cpu.as_ptr(),
+ features.as_ptr(),
+ abi.as_ptr(),
+ code_model,
+ reloc_model,
+ opt_level,
+ use_softfp,
+ ffunction_sections,
+ fdata_sections,
+ funique_section_names,
+ trap_unreachable,
+ singlethread,
+ asm_comments,
+ emit_stack_size_section,
+ relax_elf_relocations,
+ use_init_array,
+ split_dwarf_file.as_ptr(),
+ )
+ };
+
+ tm.ok_or_else(|| {
+ format!("Could not create LLVM TargetMachine for triple: {}", triple.to_str().unwrap())
+ })
+ })
+}
+
+pub(crate) fn save_temp_bitcode(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ module: &ModuleCodegen<ModuleLlvm>,
+ name: &str,
+) {
+ if !cgcx.save_temps {
+ return;
+ }
+ unsafe {
+ let ext = format!("{}.bc", name);
+ let cgu = Some(&module.name[..]);
+ let path = cgcx.output_filenames.temp_path_ext(&ext, cgu);
+ let cstr = path_to_c_string(&path);
+ let llmod = module.module_llvm.llmod();
+ llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr());
+ }
+}
+
+pub struct DiagnosticHandlers<'a> {
+ data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
+ llcx: &'a llvm::Context,
+ old_handler: Option<&'a llvm::DiagnosticHandler>,
+}
+
+impl<'a> DiagnosticHandlers<'a> {
+ pub fn new(
+ cgcx: &'a CodegenContext<LlvmCodegenBackend>,
+ handler: &'a Handler,
+ llcx: &'a llvm::Context,
+ ) -> Self {
+ let remark_passes_all: bool;
+ let remark_passes: Vec<CString>;
+ match &cgcx.remark {
+ Passes::All => {
+ remark_passes_all = true;
+ remark_passes = Vec::new();
+ }
+ Passes::Some(passes) => {
+ remark_passes_all = false;
+ remark_passes =
+ passes.iter().map(|name| CString::new(name.as_str()).unwrap()).collect();
+ }
+ };
+ let remark_passes: Vec<*const c_char> =
+ remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
+ let data = Box::into_raw(Box::new((cgcx, handler)));
+ unsafe {
+ let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
+ llvm::LLVMRustContextConfigureDiagnosticHandler(
+ llcx,
+ diagnostic_handler,
+ data.cast(),
+ remark_passes_all,
+ remark_passes.as_ptr(),
+ remark_passes.len(),
+ );
+ llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
+ DiagnosticHandlers { data, llcx, old_handler }
+ }
+ }
+}
+
+impl<'a> Drop for DiagnosticHandlers<'a> {
+ fn drop(&mut self) {
+ use std::ptr::null_mut;
+ unsafe {
+ llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
+ llvm::LLVMRustContextSetDiagnosticHandler(self.llcx, self.old_handler);
+ drop(Box::from_raw(self.data));
+ }
+ }
+}
+
+fn report_inline_asm(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ msg: String,
+ level: llvm::DiagnosticLevel,
+ mut cookie: c_uint,
+ source: Option<(String, Vec<InnerSpan>)>,
+) {
+ // In LTO build we may get srcloc values from other crates which are invalid
+ // since they use a different source map. To be safe we just suppress these
+ // in LTO builds.
+ if matches!(cgcx.lto, Lto::Fat | Lto::Thin) {
+ cookie = 0;
+ }
+ let level = match level {
+ llvm::DiagnosticLevel::Error => Level::Error { lint: false },
+ llvm::DiagnosticLevel::Warning => Level::Warning(None),
+ llvm::DiagnosticLevel::Note | llvm::DiagnosticLevel::Remark => Level::Note,
+ };
+ cgcx.diag_emitter.inline_asm_error(cookie as u32, msg, level, source);
+}
+
+unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void, cookie: c_uint) {
+ if user.is_null() {
+ return;
+ }
+ let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
+
+ let smdiag = llvm::diagnostic::SrcMgrDiagnostic::unpack(diag);
+ report_inline_asm(cgcx, smdiag.message, smdiag.level, cookie, smdiag.source);
+}
+
+unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) {
+ if user.is_null() {
+ return;
+ }
+ let (cgcx, diag_handler) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler));
+
+ match llvm::diagnostic::Diagnostic::unpack(info) {
+ llvm::diagnostic::InlineAsm(inline) => {
+ report_inline_asm(cgcx, inline.message, inline.level, inline.cookie, inline.source);
+ }
+
+ llvm::diagnostic::Optimization(opt) => {
+ let enabled = match cgcx.remark {
+ Passes::All => true,
+ Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name),
+ };
+
+ if enabled {
+ diag_handler.note_without_error(&format!(
+ "{}:{}:{}: {}: {}",
+ opt.filename, opt.line, opt.column, opt.pass_name, opt.message,
+ ));
+ }
+ }
+ llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => {
+ let msg = llvm::build_string(|s| {
+ llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+ })
+ .expect("non-UTF8 diagnostic");
+ diag_handler.warn(&msg);
+ }
+ llvm::diagnostic::Unsupported(diagnostic_ref) => {
+ let msg = llvm::build_string(|s| {
+ llvm::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref, s)
+ })
+ .expect("non-UTF8 diagnostic");
+ diag_handler.err(&msg);
+ }
+ llvm::diagnostic::UnknownDiagnostic(..) => {}
+ }
+}
+
+fn get_pgo_gen_path(config: &ModuleConfig) -> Option<CString> {
+ match config.pgo_gen {
+ SwitchWithOptPath::Enabled(ref opt_dir_path) => {
+ let path = if let Some(dir_path) = opt_dir_path {
+ dir_path.join("default_%m.profraw")
+ } else {
+ PathBuf::from("default_%m.profraw")
+ };
+
+ Some(CString::new(format!("{}", path.display())).unwrap())
+ }
+ SwitchWithOptPath::Disabled => None,
+ }
+}
+
+fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> {
+ config
+ .pgo_use
+ .as_ref()
+ .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+fn get_pgo_sample_use_path(config: &ModuleConfig) -> Option<CString> {
+ config
+ .pgo_sample_use
+ .as_ref()
+ .map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
+}
+
+pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+ opt_level: config::OptLevel,
+ opt_stage: llvm::OptStage,
+) -> Result<(), FatalError> {
+ let unroll_loops =
+ opt_level != config::OptLevel::Size && opt_level != config::OptLevel::SizeMin;
+ let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed();
+ let pgo_gen_path = get_pgo_gen_path(config);
+ let pgo_use_path = get_pgo_use_path(config);
+ let pgo_sample_use_path = get_pgo_sample_use_path(config);
+ let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
+ // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
+ let sanitizer_options = if !is_lto {
+ Some(llvm::SanitizerOptions {
+ sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
+ sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
+ sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
+ sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
+ sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int,
+ sanitize_thread: config.sanitizer.contains(SanitizerSet::THREAD),
+ sanitize_hwaddress: config.sanitizer.contains(SanitizerSet::HWADDRESS),
+ sanitize_hwaddress_recover: config.sanitizer_recover.contains(SanitizerSet::HWADDRESS),
+ })
+ } else {
+ None
+ };
+
+ let mut llvm_profiler = if cgcx.prof.llvm_recording_enabled() {
+ Some(LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()))
+ } else {
+ None
+ };
+
+ let llvm_selfprofiler =
+ llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut());
+
+ let extra_passes = if !is_lto { config.passes.join(",") } else { "".to_string() };
+
+ let llvm_plugins = config.llvm_plugins.join(",");
+
+ // FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
+ // We would have to add upstream support for this first, before we can support
+ // config.inline_threshold and our more aggressive default thresholds.
+ let result = llvm::LLVMRustOptimizeWithNewPassManager(
+ module.module_llvm.llmod(),
+ &*module.module_llvm.tm,
+ to_pass_builder_opt_level(opt_level),
+ opt_stage,
+ config.no_prepopulate_passes,
+ config.verify_llvm_ir,
+ using_thin_buffers,
+ config.merge_functions,
+ unroll_loops,
+ config.vectorize_slp,
+ config.vectorize_loop,
+ config.no_builtins,
+ config.emit_lifetime_markers,
+ sanitizer_options.as_ref(),
+ pgo_gen_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ pgo_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ config.instrument_coverage,
+ config.instrument_gcov,
+ pgo_sample_use_path.as_ref().map_or(std::ptr::null(), |s| s.as_ptr()),
+ config.debug_info_for_profiling,
+ llvm_selfprofiler,
+ selfprofile_before_pass_callback,
+ selfprofile_after_pass_callback,
+ extra_passes.as_ptr().cast(),
+ extra_passes.len(),
+ llvm_plugins.as_ptr().cast(),
+ llvm_plugins.len(),
+ );
+ result.into_result().map_err(|()| llvm_err(diag_handler, "failed to run LLVM passes"))
+}
+
+// Unsafe due to LLVM calls.
+pub(crate) unsafe fn optimize(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+) -> Result<(), FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
+
+ let llmod = module.module_llvm.llmod();
+ let llcx = &*module.module_llvm.llcx;
+ let tm = &*module.module_llvm.tm;
+ let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+
+ if let Some(false) = config.new_llvm_pass_manager && llvm_util::get_version() >= (15, 0, 0) {
+ diag_handler.warn(
+ "ignoring `-Z new-llvm-pass-manager=no`, which is no longer supported with LLVM 15",
+ );
+ }
+
+ if config.emit_no_opt_bc {
+ let out = cgcx.output_filenames.temp_path_ext("no-opt.bc", module_name);
+ let out = path_to_c_string(&out);
+ llvm::LLVMWriteBitcodeToFile(llmod, out.as_ptr());
+ }
+
+ if let Some(opt_level) = config.opt_level {
+ if llvm_util::should_use_new_llvm_pass_manager(
+ &config.new_llvm_pass_manager,
+ &cgcx.target_arch,
+ ) {
+ let opt_stage = match cgcx.lto {
+ Lto::Fat => llvm::OptStage::PreLinkFatLTO,
+ Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
+ _ if cgcx.opts.cg.linker_plugin_lto.enabled() => llvm::OptStage::PreLinkThinLTO,
+ _ => llvm::OptStage::PreLinkNoLTO,
+ };
+ return optimize_with_new_llvm_pass_manager(
+ cgcx,
+ diag_handler,
+ module,
+ config,
+ opt_level,
+ opt_stage,
+ );
+ }
+
+ if cgcx.prof.llvm_recording_enabled() {
+ diag_handler
+ .warn("`-Z self-profile-events = llvm` requires `-Z new-llvm-pass-manager`");
+ }
+
+ // Create the two optimizing pass managers. These mirror what clang
+ // does, and are by populated by LLVM's default PassManagerBuilder.
+ // Each manager has a different set of passes, but they also share
+ // some common passes.
+ let fpm = llvm::LLVMCreateFunctionPassManagerForModule(llmod);
+ let mpm = llvm::LLVMCreatePassManager();
+
+ {
+ let find_pass = |pass_name: &str| {
+ let pass_name = SmallCStr::new(pass_name);
+ llvm::LLVMRustFindAndCreatePass(pass_name.as_ptr())
+ };
+
+ if config.verify_llvm_ir {
+ // Verification should run as the very first pass.
+ llvm::LLVMRustAddPass(fpm, find_pass("verify").unwrap());
+ }
+
+ let mut extra_passes = Vec::new();
+ let mut have_name_anon_globals_pass = false;
+
+ for pass_name in &config.passes {
+ if pass_name == "lint" {
+ // Linting should also be performed early, directly on the generated IR.
+ llvm::LLVMRustAddPass(fpm, find_pass("lint").unwrap());
+ continue;
+ }
+
+ if let Some(pass) = find_pass(pass_name) {
+ extra_passes.push(pass);
+ } else {
+ diag_handler.warn(&format!("unknown pass `{}`, ignoring", pass_name));
+ }
+
+ if pass_name == "name-anon-globals" {
+ have_name_anon_globals_pass = true;
+ }
+ }
+
+ // Instrumentation must be inserted before optimization,
+ // otherwise LLVM may optimize some functions away which
+ // breaks llvm-cov.
+ //
+ // This mirrors what Clang does in lib/CodeGen/BackendUtil.cpp.
+ if config.instrument_gcov {
+ llvm::LLVMRustAddPass(mpm, find_pass("insert-gcov-profiling").unwrap());
+ }
+ if config.instrument_coverage {
+ llvm::LLVMRustAddPass(mpm, find_pass("instrprof").unwrap());
+ }
+ if config.debug_info_for_profiling {
+ llvm::LLVMRustAddPass(mpm, find_pass("add-discriminators").unwrap());
+ }
+
+ add_sanitizer_passes(config, &mut extra_passes);
+
+ // Some options cause LLVM bitcode to be emitted, which uses ThinLTOBuffers, so we need
+ // to make sure we run LLVM's NameAnonGlobals pass when emitting bitcode; otherwise
+ // we'll get errors in LLVM.
+ let using_thin_buffers = config.bitcode_needed();
+ if !config.no_prepopulate_passes {
+ llvm::LLVMAddAnalysisPasses(tm, fpm);
+ llvm::LLVMAddAnalysisPasses(tm, mpm);
+ let opt_level = to_llvm_opt_settings(opt_level).0;
+ let prepare_for_thin_lto = cgcx.lto == Lto::Thin
+ || cgcx.lto == Lto::ThinLocal
+ || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled());
+ with_llvm_pmb(llmod, config, opt_level, prepare_for_thin_lto, &mut |b| {
+ llvm::LLVMRustAddLastExtensionPasses(
+ b,
+ extra_passes.as_ptr(),
+ extra_passes.len() as size_t,
+ );
+ llvm::LLVMRustPassManagerBuilderPopulateFunctionPassManager(b, fpm);
+ llvm::LLVMRustPassManagerBuilderPopulateModulePassManager(b, mpm);
+ });
+
+ have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto;
+ if using_thin_buffers && !prepare_for_thin_lto {
+ llvm::LLVMRustAddPass(mpm, find_pass("name-anon-globals").unwrap());
+ have_name_anon_globals_pass = true;
+ }
+ } else {
+ // If we don't use the standard pipeline, directly populate the MPM
+ // with the extra passes.
+ for pass in extra_passes {
+ llvm::LLVMRustAddPass(mpm, pass);
+ }
+ }
+
+ if using_thin_buffers && !have_name_anon_globals_pass {
+ // As described above, this will probably cause an error in LLVM
+ if config.no_prepopulate_passes {
+ diag_handler.err(
+ "The current compilation is going to use thin LTO buffers \
+ without running LLVM's NameAnonGlobals pass. \
+ This will likely cause errors in LLVM. Consider adding \
+ -C passes=name-anon-globals to the compiler command line.",
+ );
+ } else {
+ bug!(
+ "We are using thin LTO buffers without running the NameAnonGlobals pass. \
+ This will likely cause errors in LLVM and should never happen."
+ );
+ }
+ }
+ }
+
+ diag_handler.abort_if_errors();
+
+ // Finally, run the actual optimization passes
+ {
+ let _timer = cgcx.prof.extra_verbose_generic_activity(
+ "LLVM_module_optimize_function_passes",
+ &*module.name,
+ );
+ llvm::LLVMRustRunFunctionPassManager(fpm, llmod);
+ }
+ {
+ let _timer = cgcx.prof.extra_verbose_generic_activity(
+ "LLVM_module_optimize_module_passes",
+ &*module.name,
+ );
+ llvm::LLVMRunPassManager(mpm, llmod);
+ }
+
+ // Deallocate managers that we're now done with
+ llvm::LLVMDisposePassManager(fpm);
+ llvm::LLVMDisposePassManager(mpm);
+ }
+ Ok(())
+}
+
+unsafe fn add_sanitizer_passes(config: &ModuleConfig, passes: &mut Vec<&'static mut llvm::Pass>) {
+ if config.sanitizer.contains(SanitizerSet::ADDRESS) {
+ let recover = config.sanitizer_recover.contains(SanitizerSet::ADDRESS);
+ passes.push(llvm::LLVMRustCreateAddressSanitizerFunctionPass(recover));
+ passes.push(llvm::LLVMRustCreateModuleAddressSanitizerPass(recover));
+ }
+ if config.sanitizer.contains(SanitizerSet::MEMORY) {
+ let track_origins = config.sanitizer_memory_track_origins as c_int;
+ let recover = config.sanitizer_recover.contains(SanitizerSet::MEMORY);
+ passes.push(llvm::LLVMRustCreateMemorySanitizerPass(track_origins, recover));
+ }
+ if config.sanitizer.contains(SanitizerSet::THREAD) {
+ passes.push(llvm::LLVMRustCreateThreadSanitizerPass());
+ }
+ if config.sanitizer.contains(SanitizerSet::HWADDRESS) {
+ let recover = config.sanitizer_recover.contains(SanitizerSet::HWADDRESS);
+ passes.push(llvm::LLVMRustCreateHWAddressSanitizerPass(recover));
+ }
+}
+
+pub(crate) fn link(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ mut modules: Vec<ModuleCodegen<ModuleLlvm>>,
+) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> {
+ use super::lto::{Linker, ModuleBuffer};
+ // Sort the modules by name to ensure to ensure deterministic behavior.
+ modules.sort_by(|a, b| a.name.cmp(&b.name));
+ let (first, elements) =
+ modules.split_first().expect("Bug! modules must contain at least one module.");
+
+ let mut linker = Linker::new(first.module_llvm.llmod());
+ for module in elements {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name);
+ let buffer = ModuleBuffer::new(module.module_llvm.llmod());
+ linker.add(buffer.data()).map_err(|()| {
+ let msg = format!("failed to serialize module {:?}", module.name);
+ llvm_err(diag_handler, &msg)
+ })?;
+ }
+ drop(linker);
+ Ok(modules.remove(0))
+}
+
+pub(crate) unsafe fn codegen(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<ModuleLlvm>,
+ config: &ModuleConfig,
+) -> Result<CompiledModule, FatalError> {
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
+ {
+ let llmod = module.module_llvm.llmod();
+ let llcx = &*module.module_llvm.llcx;
+ let tm = &*module.module_llvm.tm;
+ let module_name = module.name.clone();
+ let module_name = Some(&module_name[..]);
+ let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx);
+
+ if cgcx.msvc_imps_needed {
+ create_msvc_imps(cgcx, llcx, llmod);
+ }
+
+ // A codegen-specific pass manager is used to generate object
+ // files for an LLVM module.
+ //
+ // Apparently each of these pass managers is a one-shot kind of
+ // thing, so we create a new one for each type of output. The
+ // pass manager passed to the closure should be ensured to not
+ // escape the closure itself, and the manager should only be
+ // used once.
+ unsafe fn with_codegen<'ll, F, R>(
+ tm: &'ll llvm::TargetMachine,
+ llmod: &'ll llvm::Module,
+ no_builtins: bool,
+ f: F,
+ ) -> R
+ where
+ F: FnOnce(&'ll mut PassManager<'ll>) -> R,
+ {
+ let cpm = llvm::LLVMCreatePassManager();
+ llvm::LLVMAddAnalysisPasses(tm, cpm);
+ llvm::LLVMRustAddLibraryInfo(cpm, llmod, no_builtins);
+ f(cpm)
+ }
+
+ // Two things to note:
+ // - If object files are just LLVM bitcode we write bitcode, copy it to
+ // the .o file, and delete the bitcode if it wasn't otherwise
+ // requested.
+ // - If we don't have the integrated assembler then we need to emit
+ // asm from LLVM and use `gcc` to create the object file.
+
+ let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
+ let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+
+ if config.bitcode_needed() {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &*module.name);
+ let thin = ThinBuffer::new(llmod, config.emit_thin_lto);
+ let data = thin.data();
+
+ if let Some(bitcode_filename) = bc_out.file_name() {
+ cgcx.prof.artifact_size(
+ "llvm_bitcode",
+ bitcode_filename.to_string_lossy(),
+ data.len() as u64,
+ );
+ }
+
+ if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name);
+ if let Err(e) = fs::write(&bc_out, data) {
+ let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
+ diag_handler.err(&msg);
+ }
+ }
+
+ if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name);
+ embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
+ }
+ }
+
+ if config.emit_ir {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module.name);
+ let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
+ let out_c = path_to_c_string(&out);
+
+ extern "C" fn demangle_callback(
+ input_ptr: *const c_char,
+ input_len: size_t,
+ output_ptr: *mut c_char,
+ output_len: size_t,
+ ) -> size_t {
+ let input =
+ unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) };
+
+ let Ok(input) = str::from_utf8(input) else { return 0 };
+
+ let output = unsafe {
+ slice::from_raw_parts_mut(output_ptr as *mut u8, output_len as usize)
+ };
+ let mut cursor = io::Cursor::new(output);
+
+ let Ok(demangled) = rustc_demangle::try_demangle(input) else { return 0 };
+
+ if write!(cursor, "{:#}", demangled).is_err() {
+ // Possible only if provided buffer is not big enough
+ return 0;
+ }
+
+ cursor.position() as size_t
+ }
+
+ let result = llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback);
+
+ if result == llvm::LLVMRustResult::Success {
+ record_artifact_size(&cgcx.prof, "llvm_ir", &out);
+ }
+
+ result.into_result().map_err(|()| {
+ let msg = format!("failed to write LLVM IR to {}", out.display());
+ llvm_err(diag_handler, &msg)
+ })?;
+ }
+
+ if config.emit_asm {
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
+ let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+
+ // We can't use the same module for asm and object code output,
+ // because that triggers various errors like invalid IR or broken
+ // binaries. So we must clone the module to produce the asm output
+ // if we are also producing object code.
+ let llmod = if let EmitObj::ObjectCode(_) = config.emit_obj {
+ llvm::LLVMCloneModule(llmod)
+ } else {
+ llmod
+ };
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(
+ diag_handler,
+ tm,
+ cpm,
+ llmod,
+ &path,
+ None,
+ llvm::FileType::AssemblyFile,
+ &cgcx.prof,
+ )
+ })?;
+ }
+
+ match config.emit_obj {
+ EmitObj::ObjectCode(_) => {
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
+
+ let dwo_out = cgcx.output_filenames.temp_path_dwo(module_name);
+ let dwo_out = match (cgcx.split_debuginfo, cgcx.split_dwarf_kind) {
+ // Don't change how DWARF is emitted when disabled.
+ (SplitDebuginfo::Off, _) => None,
+ // Don't provide a DWARF object path if split debuginfo is enabled but this is
+ // a platform that doesn't support Split DWARF.
+ _ if !cgcx.target_can_use_split_dwarf => None,
+ // Don't provide a DWARF object path in single mode, sections will be written
+ // into the object as normal but ignored by linker.
+ (_, SplitDwarfKind::Single) => None,
+ // Emit (a subset of the) DWARF into a separate dwarf object file in split
+ // mode.
+ (_, SplitDwarfKind::Split) => Some(dwo_out.as_path()),
+ };
+
+ with_codegen(tm, llmod, config.no_builtins, |cpm| {
+ write_output_file(
+ diag_handler,
+ tm,
+ cpm,
+ llmod,
+ &obj_out,
+ dwo_out,
+ llvm::FileType::ObjectFile,
+ &cgcx.prof,
+ )
+ })?;
+ }
+
+ EmitObj::Bitcode => {
+ debug!("copying bitcode {:?} to obj {:?}", bc_out, obj_out);
+ if let Err(e) = link_or_copy(&bc_out, &obj_out) {
+ diag_handler.err(&format!("failed to copy bitcode to object file: {}", e));
+ }
+
+ if !config.emit_bc {
+ debug!("removing_bitcode {:?}", bc_out);
+ ensure_removed(diag_handler, &bc_out);
+ }
+ }
+
+ EmitObj::None => {}
+ }
+
+ drop(handlers);
+ }
+
+ Ok(module.into_compiled_module(
+ config.emit_obj != EmitObj::None,
+ cgcx.target_can_use_split_dwarf
+ && cgcx.split_debuginfo != SplitDebuginfo::Off
+ && cgcx.split_dwarf_kind == SplitDwarfKind::Split,
+ config.emit_bc,
+ &cgcx.output_filenames,
+ ))
+}
+
+fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
+ let mut asm = format!(".section {},\"{}\"\n", section_name, section_flags).into_bytes();
+ asm.extend_from_slice(b".ascii \"");
+ asm.reserve(data.len());
+ for &byte in data {
+ if byte == b'\\' || byte == b'"' {
+ asm.push(b'\\');
+ asm.push(byte);
+ } else if byte < 0x20 || byte >= 0x80 {
+ // Avoid non UTF-8 inline assembly. Use octal escape sequence, because it is fixed
+ // width, while hex escapes will consume following characters.
+ asm.push(b'\\');
+ asm.push(b'0' + ((byte >> 6) & 0x7));
+ asm.push(b'0' + ((byte >> 3) & 0x7));
+ asm.push(b'0' + ((byte >> 0) & 0x7));
+ } else {
+ asm.push(byte);
+ }
+ }
+ asm.extend_from_slice(b"\"\n");
+ asm
+}
+
+/// Embed the bitcode of an LLVM module in the LLVM module itself.
+///
+/// This is done primarily for iOS where it appears to be standard to compile C
+/// code at least with `-fembed-bitcode` which creates two sections in the
+/// executable:
+///
+/// * __LLVM,__bitcode
+/// * __LLVM,__cmdline
+///
+/// It appears *both* of these sections are necessary to get the linker to
+/// recognize what's going on. A suitable cmdline value is taken from the
+/// target spec.
+///
+/// Furthermore debug/O1 builds don't actually embed bitcode but rather just
+/// embed an empty section.
+///
+/// Basically all of this is us attempting to follow in the footsteps of clang
+/// on iOS. See #35968 for lots more info.
+unsafe fn embed_bitcode(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ llcx: &llvm::Context,
+ llmod: &llvm::Module,
+ cmdline: &str,
+ bitcode: &[u8],
+) {
+ // We're adding custom sections to the output object file, but we definitely
+ // do not want these custom sections to make their way into the final linked
+ // executable. The purpose of these custom sections is for tooling
+ // surrounding object files to work with the LLVM IR, if necessary. For
+ // example rustc's own LTO will look for LLVM IR inside of the object file
+ // in these sections by default.
+ //
+ // To handle this is a bit different depending on the object file format
+ // used by the backend, broken down into a few different categories:
+ //
+ // * Mach-O - this is for macOS. Inspecting the source code for the native
+ // linker here shows that the `.llvmbc` and `.llvmcmd` sections are
+ // automatically skipped by the linker. In that case there's nothing extra
+ // that we need to do here.
+ //
+ // * Wasm - the native LLD linker is hard-coded to skip `.llvmbc` and
+ // `.llvmcmd` sections, so there's nothing extra we need to do.
+ //
+ // * COFF - if we don't do anything the linker will by default copy all
+ // these sections to the output artifact, not what we want! To subvert
+ // this we want to flag the sections we inserted here as
+ // `IMAGE_SCN_LNK_REMOVE`.
+ //
+ // * ELF - this is very similar to COFF above. One difference is that these
+ // sections are removed from the output linked artifact when
+ // `--gc-sections` is passed, which we pass by default. If that flag isn't
+ // passed though then these sections will show up in the final output.
+ // Additionally the flag that we need to set here is `SHF_EXCLUDE`.
+ //
+ // Unfortunately, LLVM provides no way to set custom section flags. For ELF
+ // and COFF we emit the sections using module level inline assembly for that
+ // reason (see issue #90326 for historical background).
+ let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
+ || cgcx.opts.target_triple.triple().contains("-darwin")
+ || cgcx.opts.target_triple.triple().contains("-tvos")
+ || cgcx.opts.target_triple.triple().contains("-watchos");
+ if is_apple
+ || cgcx.opts.target_triple.triple().starts_with("wasm")
+ || cgcx.opts.target_triple.triple().starts_with("asmjs")
+ {
+ // We don't need custom section flags, create LLVM globals.
+ let llconst = common::bytes_in_context(llcx, bitcode);
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.module\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+
+ let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+ llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
+
+ let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.cmdline\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+ let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+ } else {
+ // We need custom section flags, so emit module-level inline assembly.
+ let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
+ let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes());
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ }
+}
+
+pub unsafe fn with_llvm_pmb(
+ llmod: &llvm::Module,
+ config: &ModuleConfig,
+ opt_level: llvm::CodeGenOptLevel,
+ prepare_for_thin_lto: bool,
+ f: &mut dyn FnMut(&llvm::PassManagerBuilder),
+) {
+ use std::ptr;
+
+ // Create the PassManagerBuilder for LLVM. We configure it with
+ // reasonable defaults and prepare it to actually populate the pass
+ // manager.
+ let builder = llvm::LLVMRustPassManagerBuilderCreate();
+ let opt_size = config.opt_size.map_or(llvm::CodeGenOptSizeNone, |x| to_llvm_opt_settings(x).1);
+ let inline_threshold = config.inline_threshold;
+ let pgo_gen_path = get_pgo_gen_path(config);
+ let pgo_use_path = get_pgo_use_path(config);
+ let pgo_sample_use_path = get_pgo_sample_use_path(config);
+
+ llvm::LLVMRustConfigurePassManagerBuilder(
+ builder,
+ opt_level,
+ config.merge_functions,
+ config.vectorize_slp,
+ config.vectorize_loop,
+ prepare_for_thin_lto,
+ pgo_gen_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ pgo_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ pgo_sample_use_path.as_ref().map_or(ptr::null(), |s| s.as_ptr()),
+ opt_size as c_int,
+ );
+
+ llvm::LLVMRustAddBuilderLibraryInfo(builder, llmod, config.no_builtins);
+
+ // Here we match what clang does (kinda). For O0 we only inline
+ // always-inline functions (but don't add lifetime intrinsics), at O1 we
+ // inline with lifetime intrinsics, and O2+ we add an inliner with a
+ // thresholds copied from clang.
+ match (opt_level, opt_size, inline_threshold) {
+ (.., Some(t)) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, t);
+ }
+ (llvm::CodeGenOptLevel::Aggressive, ..) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 275);
+ }
+ (_, llvm::CodeGenOptSizeDefault, _) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 75);
+ }
+ (_, llvm::CodeGenOptSizeAggressive, _) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 25);
+ }
+ (llvm::CodeGenOptLevel::None, ..) => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
+ }
+ (llvm::CodeGenOptLevel::Less, ..) => {
+ llvm::LLVMRustAddAlwaysInlinePass(builder, config.emit_lifetime_markers);
+ }
+ (llvm::CodeGenOptLevel::Default, ..) => {
+ llvm::LLVMRustPassManagerBuilderUseInlinerWithThreshold(builder, 225);
+ }
+ }
+
+ f(builder);
+ llvm::LLVMRustPassManagerBuilderDispose(builder);
+}
+
+// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
+// This is required to satisfy `dllimport` references to static data in .rlibs
+// when using MSVC linker. We do this only for data, as linker can fix up
+// code references on its own.
+// See #26591, #27438
+fn create_msvc_imps(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ llcx: &llvm::Context,
+ llmod: &llvm::Module,
+) {
+ if !cgcx.msvc_imps_needed {
+ return;
+ }
+ // The x86 ABI seems to require that leading underscores are added to symbol
+ // names, so we need an extra underscore on x86. There's also a leading
+ // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
+ // underscores added in front).
+ let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
+
+ unsafe {
+ let i8p_ty = Type::i8p_llcx(llcx);
+ let globals = base::iter_globals(llmod)
+ .filter(|&val| {
+ llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
+ && llvm::LLVMIsDeclaration(val) == 0
+ })
+ .filter_map(|val| {
+ // Exclude some symbols that we know are not Rust symbols.
+ let name = llvm::get_value_name(val);
+ if ignored(name) { None } else { Some((val, name)) }
+ })
+ .map(move |(val, name)| {
+ let mut imp_name = prefix.as_bytes().to_vec();
+ imp_name.extend(name);
+ let imp_name = CString::new(imp_name).unwrap();
+ (imp_name, val)
+ })
+ .collect::<Vec<_>>();
+
+ for (imp_name, val) in globals {
+ let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
+ llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
+ llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
+ }
+ }
+
+ // Use this function to exclude certain symbols from `__imp` generation.
+ fn ignored(symbol_name: &[u8]) -> bool {
+ // These are symbols generated by LLVM's profiling instrumentation
+ symbol_name.starts_with(b"__llvm_profile_")
+ }
+}
+
+fn record_artifact_size(
+ self_profiler_ref: &SelfProfilerRef,
+ artifact_kind: &'static str,
+ path: &Path,
+) {
+ // Don't stat the file if we are not going to record its size.
+ if !self_profiler_ref.enabled() {
+ return;
+ }
+
+ if let Some(artifact_name) = path.file_name() {
+ let file_size = std::fs::metadata(path).map(|m| m.len()).unwrap_or(0);
+ self_profiler_ref.artifact_size(artifact_kind, artifact_name.to_string_lossy(), file_size);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs
new file mode 100644
index 000000000..86f92dc02
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/base.rs
@@ -0,0 +1,173 @@
+//! Codegen the MIR to the LLVM IR.
+//!
+//! Hopefully useful general knowledge about codegen:
+//!
+//! * There's no way to find out the [`Ty`] type of a [`Value`]. Doing so
+//! would be "trying to get the eggs out of an omelette" (credit:
+//! pcwalton). You can, instead, find out its [`llvm::Type`] by calling [`val_ty`],
+//! but one [`llvm::Type`] corresponds to many [`Ty`]s; for instance, `tup(int, int,
+//! int)` and `rec(x=int, y=int, z=int)` will have the same [`llvm::Type`].
+//!
+//! [`Ty`]: rustc_middle::ty::Ty
+//! [`val_ty`]: crate::common::val_ty
+
+use super::ModuleLlvm;
+
+use crate::attributes;
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
+use rustc_codegen_ssa::mono_item::MonoItemExt;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::dep_graph;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::DebugInfo;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::SanitizerSet;
+
+use std::time::Instant;
+
+pub struct ValueIter<'ll> {
+ cur: Option<&'ll Value>,
+ step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
+}
+
+impl<'ll> Iterator for ValueIter<'ll> {
+ type Item = &'ll Value;
+
+ fn next(&mut self) -> Option<&'ll Value> {
+ let old = self.cur;
+ if let Some(old) = old {
+ self.cur = unsafe { (self.step)(old) };
+ }
+ old
+ }
+}
+
+pub fn iter_globals(llmod: &llvm::Module) -> ValueIter<'_> {
+ unsafe { ValueIter { cur: llvm::LLVMGetFirstGlobal(llmod), step: llvm::LLVMGetNextGlobal } }
+}
+
+pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen<ModuleLlvm>, u64) {
+ let start_time = Instant::now();
+
+ let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
+ let (module, _) = tcx.dep_graph.with_task(
+ dep_node,
+ tcx,
+ cgu_name,
+ module_codegen,
+ Some(dep_graph::hash_result),
+ );
+ let time_to_codegen = start_time.elapsed();
+
+ // We assume that the cost to run LLVM on a CGU is proportional to
+ // the time we needed for codegenning it.
+ let cost = time_to_codegen.as_nanos() as u64;
+
+ fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> {
+ let cgu = tcx.codegen_unit(cgu_name);
+ let _prof_timer =
+ tcx.prof.generic_activity_with_arg_recorder("codegen_module", |recorder| {
+ recorder.record_arg(cgu_name.to_string());
+ recorder.record_arg(cgu.size_estimate().to_string());
+ });
+ // Instantiate monomorphizations without filling out definitions yet...
+ let llvm_module = ModuleLlvm::new(tcx, cgu_name.as_str());
+ {
+ let cx = CodegenCx::new(tcx, cgu, &llvm_module);
+ let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
+ for &(mono_item, (linkage, visibility)) in &mono_items {
+ mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
+ }
+
+ // ... and now that we have everything pre-defined, fill out those definitions.
+ for &(mono_item, _) in &mono_items {
+ mono_item.define::<Builder<'_, '_, '_>>(&cx);
+ }
+
+ // If this codegen unit contains the main function, also create the
+ // wrapper here
+ if let Some(entry) = maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx) {
+ let attrs = attributes::sanitize_attrs(&cx, SanitizerSet::empty());
+ attributes::apply_to_llfn(entry, llvm::AttributePlace::Function, &attrs);
+ }
+
+ // Finalize code coverage by injecting the coverage map. Note, the coverage map will
+ // also be added to the `llvm.compiler.used` variable, created next.
+ if cx.sess().instrument_coverage() {
+ cx.coverageinfo_finalize();
+ }
+
+ // Create the llvm.used and llvm.compiler.used variables.
+ if !cx.used_statics().borrow().is_empty() {
+ cx.create_used_variable()
+ }
+ if !cx.compiler_used_statics().borrow().is_empty() {
+ cx.create_compiler_used_variable()
+ }
+
+ // Run replace-all-uses-with for statics that need it. This must
+ // happen after the llvm.used variables are created.
+ for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
+ unsafe {
+ let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
+ llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
+ llvm::LLVMDeleteGlobal(old_g);
+ }
+ }
+
+ // Finalize debuginfo
+ if cx.sess().opts.debuginfo != DebugInfo::None {
+ cx.debuginfo_finalize();
+ }
+ }
+
+ ModuleCodegen {
+ name: cgu_name.to_string(),
+ module_llvm: llvm_module,
+ kind: ModuleKind::Regular,
+ }
+ }
+
+ (module, cost)
+}
+
+pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
+ let Some(sect) = attrs.link_section else { return };
+ unsafe {
+ let buf = SmallCStr::new(sect.as_str());
+ llvm::LLVMSetSection(llval, buf.as_ptr());
+ }
+}
+
+pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
+ match linkage {
+ Linkage::External => llvm::Linkage::ExternalLinkage,
+ Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
+ Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
+ Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
+ Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
+ Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
+ Linkage::Appending => llvm::Linkage::AppendingLinkage,
+ Linkage::Internal => llvm::Linkage::InternalLinkage,
+ Linkage::Private => llvm::Linkage::PrivateLinkage,
+ Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
+ Linkage::Common => llvm::Linkage::CommonLinkage,
+ }
+}
+
+pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
+ match linkage {
+ Visibility::Default => llvm::Visibility::Default,
+ Visibility::Hidden => llvm::Visibility::Hidden,
+ Visibility::Protected => llvm::Visibility::Protected,
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
new file mode 100644
index 000000000..d3096c73a
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -0,0 +1,1508 @@
+use crate::attributes;
+use crate::common::Funclet;
+use crate::context::CodegenCx;
+use crate::llvm::{self, BasicBlock, False};
+use crate::llvm::{AtomicOrdering, AtomicRmwBinOp, SynchronizationScope};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use cstr::cstr;
+use libc::{c_char, c_uint};
+use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, TypeKind};
+use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::MemFlags;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::layout::{
+ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout,
+};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::Span;
+use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange};
+use rustc_target::spec::{HasTargetSpec, Target};
+use std::borrow::Cow;
+use std::ffi::CStr;
+use std::iter;
+use std::ops::Deref;
+use std::ptr;
+use tracing::{debug, instrument};
+
+// All Builders must have an llfn associated with them
+#[must_use]
+pub struct Builder<'a, 'll, 'tcx> {
+ pub llbuilder: &'ll mut llvm::Builder<'ll>,
+ pub cx: &'a CodegenCx<'ll, 'tcx>,
+}
+
+impl Drop for Builder<'_, '_, '_> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _));
+ }
+ }
+}
+
+// FIXME(eddyb) use a checked constructor when they become `const fn`.
+const EMPTY_C_STR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"\0") };
+
+/// Empty string, to be used where LLVM expects an instruction name, indicating
+/// that the instruction is to be left unnamed (i.e. numbered, in textual IR).
+// FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer.
+const UNNAMED: *const c_char = EMPTY_C_STR.as_ptr();
+
+impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> {
+ type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
+ type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function;
+ type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
+ type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
+ type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet;
+
+ type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
+ type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation;
+ type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable;
+}
+
+impl abi::HasDataLayout for Builder<'_, '_, '_> {
+ fn data_layout(&self) -> &abi::TargetDataLayout {
+ self.cx.data_layout()
+ }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.cx.tcx
+ }
+}
+
+impl<'tcx> ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.cx.param_env()
+ }
+}
+
+impl HasTargetSpec for Builder<'_, '_, '_> {
+ #[inline]
+ fn target_spec(&self) -> &Target {
+ self.cx.target_spec()
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ self.cx.handle_layout_err(err, span, ty)
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ self.cx.handle_fn_abi_err(err, span, fn_abi_request)
+ }
+}
+
+impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> {
+ type Target = CodegenCx<'ll, 'tcx>;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ self.cx
+ }
+}
+
+impl<'ll, 'tcx> HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> {
+ type CodegenCx = CodegenCx<'ll, 'tcx>;
+}
+
+macro_rules! builder_methods_for_value_instructions {
+ ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
+ $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
+ unsafe {
+ llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED)
+ }
+ })+
+ }
+}
+
+impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
+ fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Self {
+ let bx = Builder::with_cx(cx);
+ unsafe {
+ llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb);
+ }
+ bx
+ }
+
+ fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
+ self.cx
+ }
+
+ fn llbb(&self) -> &'ll BasicBlock {
+ unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) }
+ }
+
+ fn set_span(&mut self, _span: Span) {}
+
+ fn append_block(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &str) -> &'ll BasicBlock {
+ unsafe {
+ let name = SmallCStr::new(name);
+ llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, name.as_ptr())
+ }
+ }
+
+ fn append_sibling_block(&mut self, name: &str) -> &'ll BasicBlock {
+ Self::append_block(self.cx, self.llfn(), name)
+ }
+
+ fn switch_to_block(&mut self, llbb: Self::BasicBlock) {
+ *self = Self::build(self.cx, llbb)
+ }
+
+ fn ret_void(&mut self) {
+ unsafe {
+ llvm::LLVMBuildRetVoid(self.llbuilder);
+ }
+ }
+
+ fn ret(&mut self, v: &'ll Value) {
+ unsafe {
+ llvm::LLVMBuildRet(self.llbuilder, v);
+ }
+ }
+
+ fn br(&mut self, dest: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMBuildBr(self.llbuilder, dest);
+ }
+ }
+
+ fn cond_br(
+ &mut self,
+ cond: &'ll Value,
+ then_llbb: &'ll BasicBlock,
+ else_llbb: &'ll BasicBlock,
+ ) {
+ unsafe {
+ llvm::LLVMBuildCondBr(self.llbuilder, cond, then_llbb, else_llbb);
+ }
+ }
+
+ fn switch(
+ &mut self,
+ v: &'ll Value,
+ else_llbb: &'ll BasicBlock,
+ cases: impl ExactSizeIterator<Item = (u128, &'ll BasicBlock)>,
+ ) {
+ let switch =
+ unsafe { llvm::LLVMBuildSwitch(self.llbuilder, v, else_llbb, cases.len() as c_uint) };
+ for (on_val, dest) in cases {
+ let on_val = self.const_uint_big(self.val_ty(v), on_val);
+ unsafe { llvm::LLVMAddCase(switch, on_val, dest) }
+ }
+ }
+
+ fn invoke(
+ &mut self,
+ llty: &'ll Type,
+ llfn: &'ll Value,
+ args: &[&'ll Value],
+ then: &'ll BasicBlock,
+ catch: &'ll BasicBlock,
+ funclet: Option<&Funclet<'ll>>,
+ ) -> &'ll Value {
+ debug!("invoke {:?} with args ({:?})", llfn, args);
+
+ let args = self.check_call("invoke", llty, llfn, args);
+ let bundle = funclet.map(|funclet| funclet.bundle());
+ let bundle = bundle.as_ref().map(|b| &*b.raw);
+
+ unsafe {
+ llvm::LLVMRustBuildInvoke(
+ self.llbuilder,
+ llty,
+ llfn,
+ args.as_ptr(),
+ args.len() as c_uint,
+ then,
+ catch,
+ bundle,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn unreachable(&mut self) {
+ unsafe {
+ llvm::LLVMBuildUnreachable(self.llbuilder);
+ }
+ }
+
+ builder_methods_for_value_instructions! {
+ add(a, b) => LLVMBuildAdd,
+ fadd(a, b) => LLVMBuildFAdd,
+ sub(a, b) => LLVMBuildSub,
+ fsub(a, b) => LLVMBuildFSub,
+ mul(a, b) => LLVMBuildMul,
+ fmul(a, b) => LLVMBuildFMul,
+ udiv(a, b) => LLVMBuildUDiv,
+ exactudiv(a, b) => LLVMBuildExactUDiv,
+ sdiv(a, b) => LLVMBuildSDiv,
+ exactsdiv(a, b) => LLVMBuildExactSDiv,
+ fdiv(a, b) => LLVMBuildFDiv,
+ urem(a, b) => LLVMBuildURem,
+ srem(a, b) => LLVMBuildSRem,
+ frem(a, b) => LLVMBuildFRem,
+ shl(a, b) => LLVMBuildShl,
+ lshr(a, b) => LLVMBuildLShr,
+ ashr(a, b) => LLVMBuildAShr,
+ and(a, b) => LLVMBuildAnd,
+ or(a, b) => LLVMBuildOr,
+ xor(a, b) => LLVMBuildXor,
+ neg(x) => LLVMBuildNeg,
+ fneg(x) => LLVMBuildFNeg,
+ not(x) => LLVMBuildNot,
+ unchecked_sadd(x, y) => LLVMBuildNSWAdd,
+ unchecked_uadd(x, y) => LLVMBuildNUWAdd,
+ unchecked_ssub(x, y) => LLVMBuildNSWSub,
+ unchecked_usub(x, y) => LLVMBuildNUWSub,
+ unchecked_smul(x, y) => LLVMBuildNSWMul,
+ unchecked_umul(x, y) => LLVMBuildNUWMul,
+ }
+
+ fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+
+ fn checked_binop(
+ &mut self,
+ oop: OverflowOp,
+ ty: Ty<'_>,
+ lhs: Self::Value,
+ rhs: Self::Value,
+ ) -> (Self::Value, Self::Value) {
+ use rustc_middle::ty::{Int, Uint};
+ use rustc_middle::ty::{IntTy::*, UintTy::*};
+
+ let new_kind = match ty.kind() {
+ Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+ Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+ t @ (Uint(_) | Int(_)) => t.clone(),
+ _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+ };
+
+ let name = match oop {
+ OverflowOp::Add => match new_kind {
+ Int(I8) => "llvm.sadd.with.overflow.i8",
+ Int(I16) => "llvm.sadd.with.overflow.i16",
+ Int(I32) => "llvm.sadd.with.overflow.i32",
+ Int(I64) => "llvm.sadd.with.overflow.i64",
+ Int(I128) => "llvm.sadd.with.overflow.i128",
+
+ Uint(U8) => "llvm.uadd.with.overflow.i8",
+ Uint(U16) => "llvm.uadd.with.overflow.i16",
+ Uint(U32) => "llvm.uadd.with.overflow.i32",
+ Uint(U64) => "llvm.uadd.with.overflow.i64",
+ Uint(U128) => "llvm.uadd.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Sub => match new_kind {
+ Int(I8) => "llvm.ssub.with.overflow.i8",
+ Int(I16) => "llvm.ssub.with.overflow.i16",
+ Int(I32) => "llvm.ssub.with.overflow.i32",
+ Int(I64) => "llvm.ssub.with.overflow.i64",
+ Int(I128) => "llvm.ssub.with.overflow.i128",
+
+ Uint(U8) => "llvm.usub.with.overflow.i8",
+ Uint(U16) => "llvm.usub.with.overflow.i16",
+ Uint(U32) => "llvm.usub.with.overflow.i32",
+ Uint(U64) => "llvm.usub.with.overflow.i64",
+ Uint(U128) => "llvm.usub.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ OverflowOp::Mul => match new_kind {
+ Int(I8) => "llvm.smul.with.overflow.i8",
+ Int(I16) => "llvm.smul.with.overflow.i16",
+ Int(I32) => "llvm.smul.with.overflow.i32",
+ Int(I64) => "llvm.smul.with.overflow.i64",
+ Int(I128) => "llvm.smul.with.overflow.i128",
+
+ Uint(U8) => "llvm.umul.with.overflow.i8",
+ Uint(U16) => "llvm.umul.with.overflow.i16",
+ Uint(U32) => "llvm.umul.with.overflow.i32",
+ Uint(U64) => "llvm.umul.with.overflow.i64",
+ Uint(U128) => "llvm.umul.with.overflow.i128",
+
+ _ => unreachable!(),
+ },
+ };
+
+ let res = self.call_intrinsic(name, &[lhs, rhs]);
+ (self.extract_value(res, 0), self.extract_value(res, 1))
+ }
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
+ if self.cx().val_ty(val) == self.cx().type_i1() {
+ self.zext(val, self.cx().type_i8())
+ } else {
+ val
+ }
+ }
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
+ if scalar.is_bool() {
+ return self.trunc(val, self.cx().type_i1());
+ }
+ val
+ }
+
+ fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+ let mut bx = Builder::with_cx(self.cx);
+ bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) });
+ bx.dynamic_alloca(ty, align)
+ }
+
+ fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
+ unsafe {
+ let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
+ llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
+ alloca
+ }
+ }
+
+ fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, align: Align) -> &'ll Value {
+ unsafe {
+ let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
+ llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
+ alloca
+ }
+ }
+
+ fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
+ llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
+ load
+ }
+ }
+
+ fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
+ llvm::LLVMSetVolatile(load, llvm::True);
+ load
+ }
+ }
+
+ fn atomic_load(
+ &mut self,
+ ty: &'ll Type,
+ ptr: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ size: Size,
+ ) -> &'ll Value {
+ unsafe {
+ let load = llvm::LLVMRustBuildAtomicLoad(
+ self.llbuilder,
+ ty,
+ ptr,
+ UNNAMED,
+ AtomicOrdering::from_generic(order),
+ );
+ // LLVM requires the alignment of atomic loads to be at least the size of the type.
+ llvm::LLVMSetAlignment(load, size.bytes() as c_uint);
+ load
+ }
+ }
+
+ #[instrument(level = "trace", skip(self))]
+ fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> {
+ assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
+
+ if place.layout.is_zst() {
+ return OperandRef::new_zst(self, place.layout);
+ }
+
+ #[instrument(level = "trace", skip(bx))]
+ fn scalar_load_metadata<'a, 'll, 'tcx>(
+ bx: &mut Builder<'a, 'll, 'tcx>,
+ load: &'ll Value,
+ scalar: abi::Scalar,
+ layout: TyAndLayout<'tcx>,
+ offset: Size,
+ ) {
+ if !scalar.is_always_valid(bx) {
+ bx.noundef_metadata(load);
+ }
+
+ match scalar.primitive() {
+ abi::Int(..) => {
+ if !scalar.is_always_valid(bx) {
+ bx.range_metadata(load, scalar.valid_range(bx));
+ }
+ }
+ abi::Pointer => {
+ if !scalar.valid_range(bx).contains(0) {
+ bx.nonnull_metadata(load);
+ }
+
+ if let Some(pointee) = layout.pointee_info_at(bx, offset) {
+ if let Some(_) = pointee.safe {
+ bx.align_metadata(load, pointee.align);
+ }
+ }
+ }
+ abi::F32 | abi::F64 => {}
+ }
+ }
+
+ let val = if let Some(llextra) = place.llextra {
+ OperandValue::Ref(place.llval, Some(llextra), place.align)
+ } else if place.layout.is_llvm_immediate() {
+ let mut const_llval = None;
+ let llty = place.layout.llvm_type(self);
+ unsafe {
+ if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) {
+ if llvm::LLVMIsGlobalConstant(global) == llvm::True {
+ if let Some(init) = llvm::LLVMGetInitializer(global) {
+ if self.val_ty(init) == llty {
+ const_llval = Some(init);
+ }
+ }
+ }
+ }
+ }
+ let llval = const_llval.unwrap_or_else(|| {
+ let load = self.load(llty, place.llval, place.align);
+ if let abi::Abi::Scalar(scalar) = place.layout.abi {
+ scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
+ }
+ load
+ });
+ OperandValue::Immediate(self.to_immediate(llval, place.layout))
+ } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
+ let b_offset = a.size(self).align_to(b.align(self).abi);
+ let pair_ty = place.layout.llvm_type(self);
+
+ let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
+ let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
+ let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
+ let load = self.load(llty, llptr, align);
+ scalar_load_metadata(self, load, scalar, layout, offset);
+ self.to_immediate_scalar(load, scalar)
+ };
+
+ OperandValue::Pair(
+ load(0, a, place.layout, place.align, Size::ZERO),
+ load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset),
+ )
+ } else {
+ OperandValue::Ref(place.llval, None, place.align)
+ };
+
+ OperandRef { val, layout: place.layout }
+ }
+
+ fn write_operand_repeatedly(
+ mut self,
+ cg_elem: OperandRef<'tcx, &'ll Value>,
+ count: u64,
+ dest: PlaceRef<'tcx, &'ll Value>,
+ ) -> Self {
+ let zero = self.const_usize(0);
+ let count = self.const_usize(count);
+ let start = dest.project_index(&mut self, zero).llval;
+ let end = dest.project_index(&mut self, count).llval;
+
+ let header_bb = self.append_sibling_block("repeat_loop_header");
+ let body_bb = self.append_sibling_block("repeat_loop_body");
+ let next_bb = self.append_sibling_block("repeat_loop_next");
+
+ self.br(header_bb);
+
+ let mut header_bx = Self::build(self.cx, header_bb);
+ let current = header_bx.phi(self.val_ty(start), &[start], &[self.llbb()]);
+
+ let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
+ header_bx.cond_br(keep_going, body_bb, next_bb);
+
+ let mut body_bx = Self::build(self.cx, body_bb);
+ let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
+ cg_elem
+ .val
+ .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
+
+ let next = body_bx.inbounds_gep(
+ self.backend_type(cg_elem.layout),
+ current,
+ &[self.const_usize(1)],
+ );
+ body_bx.br(header_bb);
+ header_bx.add_incoming_to_phi(current, next, body_bb);
+
+ Self::build(self.cx, next_bb)
+ }
+
+ fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
+ if self.sess().target.arch == "amdgpu" {
+ // amdgpu/LLVM does something weird and thinks an i64 value is
+ // split into a v2i32, halving the bitwidth LLVM expects,
+ // tripping an assertion. So, for now, just disable this
+ // optimization.
+ return;
+ }
+
+ unsafe {
+ let llty = self.cx.val_ty(load);
+ let v = [
+ self.cx.const_uint_big(llty, range.start),
+ self.cx.const_uint_big(llty, range.end.wrapping_add(1)),
+ ];
+
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_range as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
+ );
+ }
+ }
+
+ fn nonnull_metadata(&mut self, load: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_nonnull as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+ );
+ }
+ }
+
+ fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
+ self.store_with_flags(val, ptr, align, MemFlags::empty())
+ }
+
+ fn store_with_flags(
+ &mut self,
+ val: &'ll Value,
+ ptr: &'ll Value,
+ align: Align,
+ flags: MemFlags,
+ ) -> &'ll Value {
+ debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
+ let ptr = self.check_store(val, ptr);
+ unsafe {
+ let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
+ let align =
+ if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint };
+ llvm::LLVMSetAlignment(store, align);
+ if flags.contains(MemFlags::VOLATILE) {
+ llvm::LLVMSetVolatile(store, llvm::True);
+ }
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // According to LLVM [1] building a nontemporal store must
+ // *always* point to a metadata value of the integer 1.
+ //
+ // [1]: https://llvm.org/docs/LangRef.html#store-instruction
+ let one = self.cx.const_i32(1);
+ let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
+ llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
+ }
+ store
+ }
+ }
+
+ fn atomic_store(
+ &mut self,
+ val: &'ll Value,
+ ptr: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ size: Size,
+ ) {
+ debug!("Store {:?} -> {:?}", val, ptr);
+ let ptr = self.check_store(val, ptr);
+ unsafe {
+ let store = llvm::LLVMRustBuildAtomicStore(
+ self.llbuilder,
+ val,
+ ptr,
+ AtomicOrdering::from_generic(order),
+ );
+ // LLVM requires the alignment of atomic stores to be at least the size of the type.
+ llvm::LLVMSetAlignment(store, size.bytes() as c_uint);
+ }
+ }
+
+ fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildGEP2(
+ self.llbuilder,
+ ty,
+ ptr,
+ indices.as_ptr(),
+ indices.len() as c_uint,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn inbounds_gep(
+ &mut self,
+ ty: &'ll Type,
+ ptr: &'ll Value,
+ indices: &[&'ll Value],
+ ) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildInBoundsGEP2(
+ self.llbuilder,
+ ty,
+ ptr,
+ indices.as_ptr(),
+ indices.len() as c_uint,
+ UNNAMED,
+ )
+ }
+ }
+
+ fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) }
+ }
+
+ /* Casts */
+ fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptoui_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
+ self.fptoint_sat(false, val, dest_ty)
+ }
+
+ fn fptosi_sat(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> Option<&'ll Value> {
+ self.fptoint_sat(true, val, dest_ty)
+ }
+
+ fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ // On WebAssembly the `fptoui` and `fptosi` instructions currently have
+ // poor codegen. The reason for this is that the corresponding wasm
+ // instructions, `i32.trunc_f32_s` for example, will trap when the float
+ // is out-of-bounds, infinity, or nan. This means that LLVM
+ // automatically inserts control flow around `fptoui` and `fptosi`
+ // because the LLVM instruction `fptoui` is defined as producing a
+ // poison value, not having UB on out-of-bounds values.
+ //
+ // This method, however, is only used with non-saturating casts that
+ // have UB on out-of-bounds values. This means that it's ok if we use
+ // the raw wasm instruction since out-of-bounds values can do whatever
+ // we like. To ensure that LLVM picks the right instruction we choose
+ // the raw wasm intrinsic functions which avoid LLVM inserting all the
+ // other control flow automatically.
+ if self.sess().target.is_like_wasm {
+ let src_ty = self.cx.val_ty(val);
+ if self.cx.type_kind(src_ty) != TypeKind::Vector {
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ let name = match (int_width, float_width) {
+ (32, 32) => Some("llvm.wasm.trunc.unsigned.i32.f32"),
+ (32, 64) => Some("llvm.wasm.trunc.unsigned.i32.f64"),
+ (64, 32) => Some("llvm.wasm.trunc.unsigned.i64.f32"),
+ (64, 64) => Some("llvm.wasm.trunc.unsigned.i64.f64"),
+ _ => None,
+ };
+ if let Some(name) = name {
+ return self.call_intrinsic(name, &[val]);
+ }
+ }
+ }
+ unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ // see `fptoui` above for why wasm is different here
+ if self.sess().target.is_like_wasm {
+ let src_ty = self.cx.val_ty(val);
+ if self.cx.type_kind(src_ty) != TypeKind::Vector {
+ let float_width = self.cx.float_width(src_ty);
+ let int_width = self.cx.int_width(dest_ty);
+ let name = match (int_width, float_width) {
+ (32, 32) => Some("llvm.wasm.trunc.signed.i32.f32"),
+ (32, 64) => Some("llvm.wasm.trunc.signed.i32.f64"),
+ (64, 32) => Some("llvm.wasm.trunc.signed.i64.f32"),
+ (64, 64) => Some("llvm.wasm.trunc.signed.i64.f64"),
+ _ => None,
+ };
+ if let Some(name) = name {
+ return self.call_intrinsic(name, &[val]);
+ }
+ }
+ }
+ unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) }
+ }
+
+ fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ /* Comparisons */
+ fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ let op = llvm::IntPredicate::from_generic(op);
+ unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
+ }
+
+ fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ let op = llvm::RealPredicate::from_generic(op);
+ unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, UNNAMED) }
+ }
+
+ /* Miscellaneous instructions */
+ fn memcpy(
+ &mut self,
+ dst: &'ll Value,
+ dst_align: Align,
+ src: &'ll Value,
+ src_align: Align,
+ size: &'ll Value,
+ flags: MemFlags,
+ ) {
+ assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
+ let size = self.intcast(size, self.type_isize(), false);
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemCpy(
+ self.llbuilder,
+ dst,
+ dst_align.bytes() as c_uint,
+ src,
+ src_align.bytes() as c_uint,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn memmove(
+ &mut self,
+ dst: &'ll Value,
+ dst_align: Align,
+ src: &'ll Value,
+ src_align: Align,
+ size: &'ll Value,
+ flags: MemFlags,
+ ) {
+ assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
+ let size = self.intcast(size, self.type_isize(), false);
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let dst = self.pointercast(dst, self.type_i8p());
+ let src = self.pointercast(src, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemMove(
+ self.llbuilder,
+ dst,
+ dst_align.bytes() as c_uint,
+ src,
+ src_align.bytes() as c_uint,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn memset(
+ &mut self,
+ ptr: &'ll Value,
+ fill_byte: &'ll Value,
+ size: &'ll Value,
+ align: Align,
+ flags: MemFlags,
+ ) {
+ let is_volatile = flags.contains(MemFlags::VOLATILE);
+ let ptr = self.pointercast(ptr, self.type_i8p());
+ unsafe {
+ llvm::LLVMRustBuildMemSet(
+ self.llbuilder,
+ ptr,
+ align.bytes() as c_uint,
+ fill_byte,
+ size,
+ is_volatile,
+ );
+ }
+ }
+
+ fn select(
+ &mut self,
+ cond: &'ll Value,
+ then_val: &'ll Value,
+ else_val: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildSelect(self.llbuilder, cond, then_val, else_val, UNNAMED) }
+ }
+
+ fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
+ }
+
+ fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, UNNAMED) }
+ }
+
+ fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
+ unsafe {
+ let elt_ty = self.cx.val_ty(elt);
+ let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
+ let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
+ let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
+ self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
+ }
+ }
+
+ fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, UNNAMED) }
+ }
+
+ fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value {
+ assert_eq!(idx as c_uint as u64, idx);
+ unsafe { llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint, UNNAMED) }
+ }
+
+ fn set_personality_fn(&mut self, personality: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetPersonalityFn(self.llfn(), personality);
+ }
+ }
+
+ fn cleanup_landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value) -> &'ll Value {
+ let landing_pad = self.landing_pad(ty, pers_fn, 1 /* FIXME should this be 0? */);
+ unsafe {
+ llvm::LLVMSetCleanup(landing_pad, llvm::True);
+ }
+ landing_pad
+ }
+
+ fn resume(&mut self, exn: &'ll Value) {
+ unsafe {
+ llvm::LLVMBuildResume(self.llbuilder, exn);
+ }
+ }
+
+ fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> Funclet<'ll> {
+ let name = cstr!("cleanuppad");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCleanupPad(
+ self.llbuilder,
+ parent,
+ args.len() as c_uint,
+ args.as_ptr(),
+ name.as_ptr(),
+ )
+ };
+ Funclet::new(ret.expect("LLVM does not have support for cleanuppad"))
+ }
+
+ fn cleanup_ret(&mut self, funclet: &Funclet<'ll>, unwind: Option<&'ll BasicBlock>) {
+ unsafe {
+ llvm::LLVMRustBuildCleanupRet(self.llbuilder, funclet.cleanuppad(), unwind)
+ .expect("LLVM does not have support for cleanupret");
+ }
+ }
+
+ fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> Funclet<'ll> {
+ let name = cstr!("catchpad");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCatchPad(
+ self.llbuilder,
+ parent,
+ args.len() as c_uint,
+ args.as_ptr(),
+ name.as_ptr(),
+ )
+ };
+ Funclet::new(ret.expect("LLVM does not have support for catchpad"))
+ }
+
+ fn catch_switch(
+ &mut self,
+ parent: Option<&'ll Value>,
+ unwind: Option<&'ll BasicBlock>,
+ handlers: &[&'ll BasicBlock],
+ ) -> &'ll Value {
+ let name = cstr!("catchswitch");
+ let ret = unsafe {
+ llvm::LLVMRustBuildCatchSwitch(
+ self.llbuilder,
+ parent,
+ unwind,
+ handlers.len() as c_uint,
+ name.as_ptr(),
+ )
+ };
+ let ret = ret.expect("LLVM does not have support for catchswitch");
+ for handler in handlers {
+ unsafe {
+ llvm::LLVMRustAddHandler(ret, handler);
+ }
+ }
+ ret
+ }
+
+ // Atomic Operations
+ fn atomic_cmpxchg(
+ &mut self,
+ dst: &'ll Value,
+ cmp: &'ll Value,
+ src: &'ll Value,
+ mut order: rustc_codegen_ssa::common::AtomicOrdering,
+ failure_order: rustc_codegen_ssa::common::AtomicOrdering,
+ weak: bool,
+ ) -> &'ll Value {
+ let weak = if weak { llvm::True } else { llvm::False };
+ if llvm_util::get_version() < (13, 0, 0) {
+ use rustc_codegen_ssa::common::AtomicOrdering::*;
+ // Older llvm has the pre-C++17 restriction on
+ // success and failure memory ordering,
+ // requiring the former to be at least as strong as the latter.
+ // So, for llvm 12, we upgrade the success ordering to a stronger
+ // one if necessary.
+ match (order, failure_order) {
+ (Relaxed, Acquire) => order = Acquire,
+ (Release, Acquire) => order = AcquireRelease,
+ (_, SequentiallyConsistent) => order = SequentiallyConsistent,
+ _ => {}
+ }
+ }
+ unsafe {
+ llvm::LLVMRustBuildAtomicCmpXchg(
+ self.llbuilder,
+ dst,
+ cmp,
+ src,
+ AtomicOrdering::from_generic(order),
+ AtomicOrdering::from_generic(failure_order),
+ weak,
+ )
+ }
+ }
+ fn atomic_rmw(
+ &mut self,
+ op: rustc_codegen_ssa::common::AtomicRmwBinOp,
+ dst: &'ll Value,
+ src: &'ll Value,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ ) -> &'ll Value {
+ unsafe {
+ llvm::LLVMBuildAtomicRMW(
+ self.llbuilder,
+ AtomicRmwBinOp::from_generic(op),
+ dst,
+ src,
+ AtomicOrdering::from_generic(order),
+ False,
+ )
+ }
+ }
+
+ fn atomic_fence(
+ &mut self,
+ order: rustc_codegen_ssa::common::AtomicOrdering,
+ scope: rustc_codegen_ssa::common::SynchronizationScope,
+ ) {
+ unsafe {
+ llvm::LLVMRustBuildAtomicFence(
+ self.llbuilder,
+ AtomicOrdering::from_generic(order),
+ SynchronizationScope::from_generic(scope),
+ );
+ }
+ }
+
+ fn set_invariant_load(&mut self, load: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_invariant_load as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+ );
+ }
+ }
+
+ fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) {
+ self.call_lifetime_intrinsic("llvm.lifetime.start.p0i8", ptr, size);
+ }
+
+ fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) {
+ self.call_lifetime_intrinsic("llvm.lifetime.end.p0i8", ptr, size);
+ }
+
+ fn instrprof_increment(
+ &mut self,
+ fn_name: &'ll Value,
+ hash: &'ll Value,
+ num_counters: &'ll Value,
+ index: &'ll Value,
+ ) {
+ debug!(
+ "instrprof_increment() with args ({:?}, {:?}, {:?}, {:?})",
+ fn_name, hash, num_counters, index
+ );
+
+ let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
+ let llty = self.cx.type_func(
+ &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
+ self.cx.type_void(),
+ );
+ let args = &[fn_name, hash, num_counters, index];
+ let args = self.check_call("call", llty, llfn, args);
+
+ unsafe {
+ let _ = llvm::LLVMRustBuildCall(
+ self.llbuilder,
+ llty,
+ llfn,
+ args.as_ptr() as *const &llvm::Value,
+ args.len() as c_uint,
+ None,
+ );
+ }
+ }
+
+ fn call(
+ &mut self,
+ llty: &'ll Type,
+ llfn: &'ll Value,
+ args: &[&'ll Value],
+ funclet: Option<&Funclet<'ll>>,
+ ) -> &'ll Value {
+ debug!("call {:?} with args ({:?})", llfn, args);
+
+ let args = self.check_call("call", llty, llfn, args);
+ let bundle = funclet.map(|funclet| funclet.bundle());
+ let bundle = bundle.as_ref().map(|b| &*b.raw);
+
+ unsafe {
+ llvm::LLVMRustBuildCall(
+ self.llbuilder,
+ llty,
+ llfn,
+ args.as_ptr() as *const &llvm::Value,
+ args.len() as c_uint,
+ bundle,
+ )
+ }
+ }
+
+ fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) }
+ }
+
+ fn do_not_inline(&mut self, llret: &'ll Value) {
+ let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx);
+ attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]);
+ }
+}
+
+impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> {
+ fn get_static(&mut self, def_id: DefId) -> &'ll Value {
+ // Forward to the `get_static` method of `CodegenCx`
+ self.cx().get_static(def_id)
+ }
+}
+
+impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
+ fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self {
+ // Create a fresh builder from the crate context.
+ let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) };
+ Builder { llbuilder, cx }
+ }
+
+ pub fn llfn(&self) -> &'ll Value {
+ unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) }
+ }
+
+ fn position_at_start(&mut self, llbb: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb);
+ }
+ }
+
+ fn align_metadata(&mut self, load: &'ll Value, align: Align) {
+ unsafe {
+ let v = [self.cx.const_u64(align.bytes())];
+
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_align as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, v.as_ptr(), v.len() as c_uint),
+ );
+ }
+ }
+
+ fn noundef_metadata(&mut self, load: &'ll Value) {
+ unsafe {
+ llvm::LLVMSetMetadata(
+ load,
+ llvm::MD_noundef as c_uint,
+ llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0),
+ );
+ }
+ }
+
+ pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) }
+ }
+
+ pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) }
+ }
+
+ pub fn insert_element(
+ &mut self,
+ vec: &'ll Value,
+ elt: &'ll Value,
+ idx: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) }
+ }
+
+ pub fn shuffle_vector(
+ &mut self,
+ v1: &'ll Value,
+ v2: &'ll Value,
+ mask: &'ll Value,
+ ) -> &'ll Value {
+ unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) }
+ }
+
+ pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) }
+ }
+ pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) }
+ }
+ pub fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) }
+ }
+ pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false)
+ }
+ }
+ pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false)
+ }
+ }
+ pub fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr =
+ llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value {
+ unsafe {
+ let instr =
+ llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true);
+ llvm::LLVMRustSetFastMath(instr);
+ instr
+ }
+ }
+ pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) }
+ }
+ pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value {
+ unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) }
+ }
+
+ pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) {
+ unsafe {
+ llvm::LLVMAddClause(landing_pad, clause);
+ }
+ }
+
+ pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value {
+ let ret =
+ unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) };
+ ret.expect("LLVM does not have support for catchret")
+ }
+
+ fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
+ let dest_ptr_ty = self.cx.val_ty(ptr);
+ let stored_ty = self.cx.val_ty(val);
+ let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
+
+ assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
+
+ if dest_ptr_ty == stored_ptr_ty {
+ ptr
+ } else {
+ debug!(
+ "type mismatch in store. \
+ Expected {:?}, got {:?}; inserting bitcast",
+ dest_ptr_ty, stored_ptr_ty
+ );
+ self.bitcast(ptr, stored_ptr_ty)
+ }
+ }
+
+ fn check_call<'b>(
+ &mut self,
+ typ: &str,
+ fn_ty: &'ll Type,
+ llfn: &'ll Value,
+ args: &'b [&'ll Value],
+ ) -> Cow<'b, [&'ll Value]> {
+ assert!(
+ self.cx.type_kind(fn_ty) == TypeKind::Function,
+ "builder::{} not passed a function, but {:?}",
+ typ,
+ fn_ty
+ );
+
+ let param_tys = self.cx.func_params_types(fn_ty);
+
+ let all_args_match = iter::zip(&param_tys, args.iter().map(|&v| self.val_ty(v)))
+ .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
+
+ if all_args_match {
+ return Cow::Borrowed(args);
+ }
+
+ let casted_args: Vec<_> = iter::zip(param_tys, args)
+ .enumerate()
+ .map(|(i, (expected_ty, &actual_val))| {
+ let actual_ty = self.val_ty(actual_val);
+ if expected_ty != actual_ty {
+ debug!(
+ "type mismatch in function call of {:?}. \
+ Expected {:?} for param {}, got {:?}; injecting bitcast",
+ llfn, expected_ty, i, actual_ty
+ );
+ self.bitcast(actual_val, expected_ty)
+ } else {
+ actual_val
+ }
+ })
+ .collect();
+
+ Cow::Owned(casted_args)
+ }
+
+ pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) }
+ }
+
+ pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value {
+ let (ty, f) = self.cx.get_intrinsic(intrinsic);
+ self.call(ty, f, args, None)
+ }
+
+ fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) {
+ let size = size.bytes();
+ if size == 0 {
+ return;
+ }
+
+ if !self.cx().sess().emit_lifetime_markers() {
+ return;
+ }
+
+ let ptr = self.pointercast(ptr, self.cx.type_i8p());
+ self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
+ }
+
+ pub(crate) fn phi(
+ &mut self,
+ ty: &'ll Type,
+ vals: &[&'ll Value],
+ bbs: &[&'ll BasicBlock],
+ ) -> &'ll Value {
+ assert_eq!(vals.len(), bbs.len());
+ let phi = unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, UNNAMED) };
+ unsafe {
+ llvm::LLVMAddIncoming(phi, vals.as_ptr(), bbs.as_ptr(), vals.len() as c_uint);
+ phi
+ }
+ }
+
+ fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) {
+ unsafe {
+ llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
+ }
+ }
+
+ fn fptoint_sat_broken_in_llvm(&self) -> bool {
+ match self.tcx.sess.target.arch.as_ref() {
+ // FIXME - https://bugs.llvm.org/show_bug.cgi?id=50083
+ "riscv64" => llvm_util::get_version() < (13, 0, 0),
+ _ => false,
+ }
+ }
+
+ fn fptoint_sat(
+ &mut self,
+ signed: bool,
+ val: &'ll Value,
+ dest_ty: &'ll Type,
+ ) -> Option<&'ll Value> {
+ if !self.fptoint_sat_broken_in_llvm() {
+ let src_ty = self.cx.val_ty(val);
+ let (float_ty, int_ty, vector_length) = if self.cx.type_kind(src_ty) == TypeKind::Vector
+ {
+ assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
+ (
+ self.cx.element_type(src_ty),
+ self.cx.element_type(dest_ty),
+ Some(self.cx.vector_length(src_ty)),
+ )
+ } else {
+ (src_ty, dest_ty, None)
+ };
+ let float_width = self.cx.float_width(float_ty);
+ let int_width = self.cx.int_width(int_ty);
+
+ let instr = if signed { "fptosi" } else { "fptoui" };
+ let name = if let Some(vector_length) = vector_length {
+ format!(
+ "llvm.{}.sat.v{}i{}.v{}f{}",
+ instr, vector_length, int_width, vector_length, float_width
+ )
+ } else {
+ format!("llvm.{}.sat.i{}.f{}", instr, int_width, float_width)
+ };
+ let f =
+ self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty));
+ Some(self.call(self.type_func(&[src_ty], dest_ty), f, &[val], None))
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn landing_pad(
+ &mut self,
+ ty: &'ll Type,
+ pers_fn: &'ll Value,
+ num_clauses: usize,
+ ) -> &'ll Value {
+ // Use LLVMSetPersonalityFn to set the personality. It supports arbitrary Consts while,
+ // LLVMBuildLandingPad requires the argument to be a Function (as of LLVM 12). The
+ // personality lives on the parent function anyway.
+ self.set_personality_fn(pers_fn);
+ unsafe {
+ llvm::LLVMBuildLandingPad(self.llbuilder, ty, None, num_clauses as c_uint, UNNAMED)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs
new file mode 100644
index 000000000..72155d874
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/callee.rs
@@ -0,0 +1,194 @@
+//! Handles codegen of callees as well as other call-related
+//! things. Callees are a superset of normal rust values and sometimes
+//! have different representations. In particular, top-level fn items
+//! and methods are represented as just a fn ptr and not a full
+//! closure.
+
+use crate::abi::FnAbiLlvmExt;
+use crate::attributes;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::*;
+use tracing::debug;
+
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+use rustc_middle::ty::{self, Instance, TypeVisitable};
+
+/// Codegens a reference to a fn/method item, monomorphizing and
+/// inlining as it goes.
+///
+/// # Parameters
+///
+/// - `cx`: the crate context
+/// - `instance`: the instance to be instantiated
+pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value {
+ let tcx = cx.tcx();
+
+ debug!("get_fn(instance={:?})", instance);
+
+ assert!(!instance.substs.needs_infer());
+ assert!(!instance.substs.has_escaping_bound_vars());
+
+ if let Some(&llfn) = cx.instances.borrow().get(&instance) {
+ return llfn;
+ }
+
+ let sym = tcx.symbol_name(instance).name;
+ debug!(
+ "get_fn({:?}: {:?}) => {}",
+ instance,
+ instance.ty(cx.tcx(), ty::ParamEnv::reveal_all()),
+ sym
+ );
+
+ let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+
+ let llfn = if let Some(llfn) = cx.get_declared_value(sym) {
+ // Create a fn pointer with the new signature.
+ let llptrty = fn_abi.ptr_to_llvm_type(cx);
+
+ // This is subtle and surprising, but sometimes we have to bitcast
+ // the resulting fn pointer. The reason has to do with external
+ // functions. If you have two crates that both bind the same C
+ // library, they may not use precisely the same types: for
+ // example, they will probably each declare their own structs,
+ // which are distinct types from LLVM's point of view (nominal
+ // types).
+ //
+ // Now, if those two crates are linked into an application, and
+ // they contain inlined code, you can wind up with a situation
+ // where both of those functions wind up being loaded into this
+ // application simultaneously. In that case, the same function
+ // (from LLVM's point of view) requires two types. But of course
+ // LLVM won't allow one function to have two types.
+ //
+ // What we currently do, therefore, is declare the function with
+ // one of the two types (whichever happens to come first) and then
+ // bitcast as needed when the function is referenced to make sure
+ // it has the type we expect.
+ //
+ // This can occur on either a crate-local or crate-external
+ // reference. It also occurs when testing libcore and in some
+ // other weird situations. Annoying.
+ if cx.val_ty(llfn) != llptrty {
+ debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
+ cx.const_ptrcast(llfn, llptrty)
+ } else {
+ debug!("get_fn: not casting pointer!");
+ llfn
+ }
+ } else {
+ let llfn = cx.declare_fn(sym, fn_abi);
+ debug!("get_fn: not casting pointer!");
+
+ attributes::from_fn_attrs(cx, llfn, instance);
+
+ let instance_def_id = instance.def_id();
+
+ // Apply an appropriate linkage/visibility value to our item that we
+ // just declared.
+ //
+ // This is sort of subtle. Inside our codegen unit we started off
+ // compilation by predefining all our own `MonoItem` instances. That
+ // is, everything we're codegenning ourselves is already defined. That
+ // means that anything we're actually codegenning in this codegen unit
+ // will have hit the above branch in `get_declared_value`. As a result,
+ // we're guaranteed here that we're declaring a symbol that won't get
+ // defined, or in other words we're referencing a value from another
+ // codegen unit or even another crate.
+ //
+ // So because this is a foreign value we blanket apply an external
+ // linkage directive because it's coming from a different object file.
+ // The visibility here is where it gets tricky. This symbol could be
+ // referencing some foreign crate or foreign library (an `extern`
+ // block) in which case we want to leave the default visibility. We may
+ // also, though, have multiple codegen units. It could be a
+ // monomorphization, in which case its expected visibility depends on
+ // whether we are sharing generics or not. The important thing here is
+ // that the visibility we apply to the declaration is the same one that
+ // has been applied to the definition (wherever that definition may be).
+ unsafe {
+ llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
+
+ let is_generic = instance.substs.non_erasable_generics().next().is_some();
+
+ if is_generic {
+ // This is a monomorphization. Its expected visibility depends
+ // on whether we are in share-generics mode.
+
+ if cx.tcx.sess.opts.share_generics() {
+ // We are in share_generics mode.
+
+ if let Some(instance_def_id) = instance_def_id.as_local() {
+ // This is a definition from the current crate. If the
+ // definition is unreachable for downstream crates or
+ // the current crate does not re-export generics, the
+ // definition of the instance will have been declared
+ // as `hidden`.
+ if cx.tcx.is_unreachable_local_definition(instance_def_id)
+ || !cx.tcx.local_crate_exports_generics()
+ {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a monomorphization of a generic function
+ // defined in an upstream crate.
+ if instance.upstream_monomorphization(tcx).is_some() {
+ // This is instantiated in another crate. It cannot
+ // be `hidden`.
+ } else {
+ // This is a local instantiation of an upstream definition.
+ // If the current crate does not re-export it
+ // (because it is a C library or an executable), it
+ // will have been declared `hidden`.
+ if !cx.tcx.local_crate_exports_generics() {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ }
+ }
+ } else {
+ // When not sharing generics, all instances are in the same
+ // crate and have hidden visibility
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a non-generic function
+ if cx.tcx.is_codegened_item(instance_def_id) {
+ // This is a function that is instantiated in the local crate
+
+ if instance_def_id.is_local() {
+ // This is function that is defined in the local crate.
+ // If it is not reachable, it is hidden.
+ if !cx.tcx.is_reachable_non_generic(instance_def_id) {
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ } else {
+ // This is a function from an upstream crate that has
+ // been instantiated here. These are always hidden.
+ llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
+ }
+ }
+ }
+
+ // MinGW: For backward compatibility we rely on the linker to decide whether it
+ // should use dllimport for functions.
+ if cx.use_dll_storage_attrs
+ && tcx.is_dllimport_foreign_item(instance_def_id)
+ && !matches!(tcx.sess.target.env.as_ref(), "gnu" | "uclibc")
+ {
+ llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
+ }
+
+ if cx.should_assume_dso_local(llfn, true) {
+ llvm::LLVMRustSetDSOLocal(llfn, true);
+ }
+ }
+
+ llfn
+ };
+
+ cx.instances.borrow_mut().insert(instance, llfn);
+
+ llfn
+}
diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs
new file mode 100644
index 000000000..fb4da9a5f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/common.rs
@@ -0,0 +1,359 @@
+//! Code that is useful in various codegen modules.
+
+use crate::consts::{self, const_alloc_to_llvm};
+pub use crate::context::CodegenCx;
+use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+
+use rustc_ast::Mutability;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::bug;
+use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size};
+
+use libc::{c_char, c_uint};
+use tracing::debug;
+
+/*
+* A note on nomenclature of linking: "extern", "foreign", and "upcall".
+*
+* An "extern" is an LLVM symbol we wind up emitting an undefined external
+* reference to. This means "we don't have the thing in this compilation unit,
+* please make sure you link it in at runtime". This could be a reference to
+* C code found in a C library, or rust code found in a rust crate.
+*
+* Most "externs" are implicitly declared (automatically) as a result of a
+* user declaring an extern _module_ dependency; this causes the rust driver
+* to locate an extern crate, scan its compilation metadata, and emit extern
+* declarations for any symbols used by the declaring crate.
+*
+* A "foreign" is an extern that references C (or other non-rust ABI) code.
+* There is no metadata to scan for extern references so in these cases either
+* a header-digester like bindgen, or manual function prototypes, have to
+* serve as declarators. So these are usually given explicitly as prototype
+* declarations, in rust code, with ABI attributes on them noting which ABI to
+* link via.
+*
+* An "upcall" is a foreign call generated by the compiler (not corresponding
+* to any user-written call in the code) into the runtime library, to perform
+* some helper task such as bringing a task to life, allocating memory, etc.
+*
+*/
+
+/// A structure representing an active landing pad for the duration of a basic
+/// block.
+///
+/// Each `Block` may contain an instance of this, indicating whether the block
+/// is part of a landing pad or not. This is used to make decision about whether
+/// to emit `invoke` instructions (e.g., in a landing pad we don't continue to
+/// use `invoke`) and also about various function call metadata.
+///
+/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
+/// just a bunch of `None` instances (not too interesting), but for MSVC
+/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
+/// When inside of a landing pad, each function call in LLVM IR needs to be
+/// annotated with which landing pad it's a part of. This is accomplished via
+/// the `OperandBundleDef` value created for MSVC landing pads.
+pub struct Funclet<'ll> {
+ cleanuppad: &'ll Value,
+ operand: OperandBundleDef<'ll>,
+}
+
+impl<'ll> Funclet<'ll> {
+ pub fn new(cleanuppad: &'ll Value) -> Self {
+ Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
+ }
+
+ pub fn cleanuppad(&self) -> &'ll Value {
+ self.cleanuppad
+ }
+
+ pub fn bundle(&self) -> &OperandBundleDef<'ll> {
+ &self.operand
+ }
+}
+
+impl<'ll> BackendTypes for CodegenCx<'ll, '_> {
+ type Value = &'ll Value;
+ // FIXME(eddyb) replace this with a `Function` "subclass" of `Value`.
+ type Function = &'ll Value;
+
+ type BasicBlock = &'ll BasicBlock;
+ type Type = &'ll Type;
+ type Funclet = Funclet<'ll>;
+
+ type DIScope = &'ll llvm::debuginfo::DIScope;
+ type DILocation = &'ll llvm::debuginfo::DILocation;
+ type DIVariable = &'ll llvm::debuginfo::DIVariable;
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+ pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
+ unsafe { llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint) }
+ }
+
+ pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
+ unsafe { llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint) }
+ }
+
+ pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
+ bytes_in_context(self.llcx, bytes)
+ }
+
+ pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
+ unsafe {
+ assert_eq!(idx as c_uint as u64, idx);
+ let r = llvm::LLVMGetAggregateElement(v, idx as c_uint).unwrap();
+
+ debug!("const_get_elt(v={:?}, idx={}, r={:?})", v, idx, r);
+
+ r
+ }
+ }
+}
+
+impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn const_null(&self, t: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstNull(t) }
+ }
+
+ fn const_undef(&self, t: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMGetUndef(t) }
+ }
+
+ fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
+ unsafe { llvm::LLVMConstInt(t, i as u64, True) }
+ }
+
+ fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
+ unsafe { llvm::LLVMConstInt(t, i, False) }
+ }
+
+ fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
+ unsafe {
+ let words = [u as u64, (u >> 64) as u64];
+ llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
+ }
+ }
+
+ fn const_bool(&self, val: bool) -> &'ll Value {
+ self.const_uint(self.type_i1(), val as u64)
+ }
+
+ fn const_i16(&self, i: i16) -> &'ll Value {
+ self.const_int(self.type_i16(), i as i64)
+ }
+
+ fn const_i32(&self, i: i32) -> &'ll Value {
+ self.const_int(self.type_i32(), i as i64)
+ }
+
+ fn const_u32(&self, i: u32) -> &'ll Value {
+ self.const_uint(self.type_i32(), i as u64)
+ }
+
+ fn const_u64(&self, i: u64) -> &'ll Value {
+ self.const_uint(self.type_i64(), i)
+ }
+
+ fn const_usize(&self, i: u64) -> &'ll Value {
+ let bit_size = self.data_layout().pointer_size.bits();
+ if bit_size < 64 {
+ // make sure it doesn't overflow
+ assert!(i < (1 << bit_size));
+ }
+
+ self.const_uint(self.isize_ty, i)
+ }
+
+ fn const_u8(&self, i: u8) -> &'ll Value {
+ self.const_uint(self.type_i8(), i as u64)
+ }
+
+ fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {
+ unsafe { llvm::LLVMConstReal(t, val) }
+ }
+
+ fn const_str(&self, s: &str) -> (&'ll Value, &'ll Value) {
+ let str_global = *self
+ .const_str_cache
+ .borrow_mut()
+ .raw_entry_mut()
+ .from_key(s)
+ .or_insert_with(|| {
+ let sc = self.const_bytes(s.as_bytes());
+ let sym = self.generate_local_symbol_name("str");
+ let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| {
+ bug!("symbol `{}` is already defined", sym);
+ });
+ unsafe {
+ llvm::LLVMSetInitializer(g, sc);
+ llvm::LLVMSetGlobalConstant(g, True);
+ llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
+ }
+ (s.to_owned(), g)
+ })
+ .1;
+ let len = s.len();
+ let cs = consts::ptrcast(
+ str_global,
+ self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
+ );
+ (cs, self.const_usize(len as u64))
+ }
+
+ fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
+ struct_in_context(self.llcx, elts, packed)
+ }
+
+ fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
+ try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
+ }
+
+ fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
+ try_as_const_integral(v).and_then(|v| unsafe {
+ let (mut lo, mut hi) = (0u64, 0u64);
+ let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
+ success.then_some(hi_lo_to_u128(lo, hi))
+ })
+ }
+
+ fn zst_to_backend(&self, _llty: &'ll Type) -> &'ll Value {
+ self.const_undef(self.type_ix(0))
+ }
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
+ let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
+ match cv {
+ Scalar::Int(int) => {
+ let data = int.assert_bits(layout.size(self));
+ let llval = self.const_uint_big(self.type_ix(bitsize), data);
+ if layout.primitive() == Pointer {
+ unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
+ } else {
+ self.const_bitcast(llval, llty)
+ }
+ }
+ Scalar::Ptr(ptr, _size) => {
+ let (alloc_id, offset) = ptr.into_parts();
+ let (base_addr, base_addr_space) = match self.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ let init = const_alloc_to_llvm(self, alloc);
+ let alloc = alloc.inner();
+ let value = match alloc.mutability {
+ Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+ _ => self.static_addr_of(init, alloc.align, None),
+ };
+ if !self.sess().fewer_names() {
+ llvm::set_value_name(value, format!("{:?}", alloc_id).as_bytes());
+ }
+ (value, AddressSpace::DATA)
+ }
+ GlobalAlloc::Function(fn_instance) => (
+ self.get_fn_addr(fn_instance.polymorphize(self.tcx)),
+ self.data_layout().instruction_address_space,
+ ),
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc = self
+ .tcx
+ .global_alloc(self.tcx.vtable_allocation((ty, trait_ref)))
+ .unwrap_memory();
+ let init = const_alloc_to_llvm(self, alloc);
+ let value = self.static_addr_of(init, alloc.inner().align, None);
+ (value, AddressSpace::DATA)
+ }
+ GlobalAlloc::Static(def_id) => {
+ assert!(self.tcx.is_static(def_id));
+ assert!(!self.tcx.is_thread_local_static(def_id));
+ (self.get_static(def_id), AddressSpace::DATA)
+ }
+ };
+ let llval = unsafe {
+ llvm::LLVMRustConstInBoundsGEP2(
+ self.type_i8(),
+ self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
+ &self.const_usize(offset.bytes()),
+ 1,
+ )
+ };
+ if layout.primitive() != Pointer {
+ unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
+ } else {
+ self.const_bitcast(llval, llty)
+ }
+ }
+ }
+ }
+
+ fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value {
+ const_alloc_to_llvm(self, alloc)
+ }
+
+ fn from_const_alloc(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ alloc: ConstAllocation<'tcx>,
+ offset: Size,
+ ) -> PlaceRef<'tcx, &'ll Value> {
+ let alloc_align = alloc.inner().align;
+ assert_eq!(alloc_align, layout.align.abi);
+ let llty = self.type_ptr_to(layout.llvm_type(self));
+ let llval = if layout.size == Size::ZERO {
+ let llval = self.const_usize(alloc_align.bytes());
+ unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
+ } else {
+ let init = const_alloc_to_llvm(self, alloc);
+ let base_addr = self.static_addr_of(init, alloc_align, None);
+
+ let llval = unsafe {
+ llvm::LLVMRustConstInBoundsGEP2(
+ self.type_i8(),
+ self.const_bitcast(base_addr, self.type_i8p()),
+ &self.const_usize(offset.bytes()),
+ 1,
+ )
+ };
+ self.const_bitcast(llval, llty)
+ };
+ PlaceRef::new_sized(llval, layout)
+ }
+
+ fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ consts::ptrcast(val, ty)
+ }
+}
+
+/// Get the [LLVM type][Type] of a [`Value`].
+pub fn val_ty(v: &Value) -> &Type {
+ unsafe { llvm::LLVMTypeOf(v) }
+}
+
+pub fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
+ unsafe {
+ let ptr = bytes.as_ptr() as *const c_char;
+ llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True)
+ }
+}
+
+pub fn struct_in_context<'ll>(
+ llcx: &'ll llvm::Context,
+ elts: &[&'ll Value],
+ packed: bool,
+) -> &'ll Value {
+ unsafe {
+ llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), elts.len() as c_uint, packed as Bool)
+ }
+}
+
+#[inline]
+fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
+ ((hi as u128) << 64) | (lo as u128)
+}
+
+fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> {
+ unsafe { llvm::LLVMIsAConstantInt(v) }
+}
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
new file mode 100644
index 000000000..18467e370
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -0,0 +1,577 @@
+use crate::base;
+use crate::common::CodegenCx;
+use crate::debuginfo;
+use crate::llvm::{self, True};
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use cstr::cstr;
+use libc::c_uint;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::DefId;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::interpret::{
+ read_target_uint, Allocation, ConstAllocation, ErrorHandled, GlobalAlloc, InitChunk, Pointer,
+ Scalar as InterpScalar,
+};
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::{bug, span_bug};
+use rustc_target::abi::{
+ AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange,
+};
+use std::ops::Range;
+use tracing::debug;
+
+pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value {
+ let alloc = alloc.inner();
+ let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
+ let dl = cx.data_layout();
+ let pointer_size = dl.pointer_size.bytes() as usize;
+
+ // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`,
+ // so `range` must be within the bounds of `alloc` and not contain or overlap a relocation.
+ fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>(
+ llvals: &mut Vec<&'ll Value>,
+ cx: &'a CodegenCx<'ll, 'b>,
+ alloc: &'a Allocation,
+ range: Range<usize>,
+ ) {
+ let chunks = alloc
+ .init_mask()
+ .range_as_init_chunks(Size::from_bytes(range.start), Size::from_bytes(range.end));
+
+ let chunk_to_llval = move |chunk| match chunk {
+ InitChunk::Init(range) => {
+ let range = (range.start.bytes() as usize)..(range.end.bytes() as usize);
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+ cx.const_bytes(bytes)
+ }
+ InitChunk::Uninit(range) => {
+ let len = range.end.bytes() - range.start.bytes();
+ cx.const_undef(cx.type_array(cx.type_i8(), len))
+ }
+ };
+
+ // Generating partially-uninit consts is limited to small numbers of chunks,
+ // to avoid the cost of generating large complex const expressions.
+ // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element,
+ // and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
+ let max = if llvm_util::get_version() < (14, 0, 0) {
+ // Generating partially-uninit consts inhibits optimizations in LLVM < 14.
+ // See https://github.com/rust-lang/rust/issues/84565.
+ 1
+ } else {
+ cx.sess().opts.unstable_opts.uninit_const_chunk_threshold
+ };
+ let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max;
+
+ if allow_uninit_chunks {
+ llvals.extend(chunks.map(chunk_to_llval));
+ } else {
+ // If this allocation contains any uninit bytes, codegen as if it was initialized
+ // (using some arbitrary value for uninit bytes).
+ let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
+ llvals.push(cx.const_bytes(bytes));
+ }
+ }
+
+ let mut next_offset = 0;
+ for &(offset, alloc_id) in alloc.relocations().iter() {
+ let offset = offset.bytes();
+ assert_eq!(offset as usize as u64, offset);
+ let offset = offset as usize;
+ if offset > next_offset {
+ // This `inspect` is okay since we have checked that it is not within a relocation, it
+ // is within the bounds of the allocation, and it doesn't affect interpreter execution
+ // (we inspect the result after interpreter execution).
+ append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset);
+ }
+ let ptr_offset = read_target_uint(
+ dl.endian,
+ // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+ // affect interpreter execution (we inspect the result after interpreter execution),
+ // and we properly interpret the relocation as a relocation pointer offset.
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+ )
+ .expect("const_alloc_to_llvm: could not read relocation pointer")
+ as u64;
+
+ let address_space = match cx.tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space,
+ GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => {
+ AddressSpace::DATA
+ }
+ };
+
+ llvals.push(cx.scalar_to_backend(
+ InterpScalar::from_pointer(
+ Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
+ &cx.tcx,
+ ),
+ Scalar::Initialized {
+ value: Primitive::Pointer,
+ valid_range: WrappingRange::full(dl.pointer_size),
+ },
+ cx.type_i8p_ext(address_space),
+ ));
+ next_offset = offset + pointer_size;
+ }
+ if alloc.len() >= next_offset {
+ let range = next_offset..alloc.len();
+ // This `inspect` is okay since we have check that it is after all relocations, it is
+ // within the bounds of the allocation, and it doesn't affect interpreter execution (we
+ // inspect the result after interpreter execution).
+ append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range);
+ }
+
+ cx.const_struct(&llvals, true)
+}
+
+pub fn codegen_static_initializer<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ def_id: DefId,
+) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> {
+ let alloc = cx.tcx.eval_static_initializer(def_id)?;
+ Ok((const_alloc_to_llvm(cx, alloc), alloc))
+}
+
+fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
+ // The target may require greater alignment for globals than the type does.
+ // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
+ // which can force it to be smaller. Rust doesn't support this yet.
+ if let Some(min) = cx.sess().target.min_global_align {
+ match Align::from_bits(min) {
+ Ok(min) => align = align.max(min),
+ Err(err) => {
+ cx.sess().err(&format!("invalid minimum global alignment: {}", err));
+ }
+ }
+ }
+ unsafe {
+ llvm::LLVMSetAlignment(gv, align.bytes() as u32);
+ }
+}
+
+fn check_and_apply_linkage<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ attrs: &CodegenFnAttrs,
+ ty: Ty<'tcx>,
+ sym: &str,
+ span_def_id: DefId,
+) -> &'ll Value {
+ let llty = cx.layout_of(ty).llvm_type(cx);
+ if let Some(linkage) = attrs.linkage {
+ debug!("get_static: sym={} linkage={:?}", sym, linkage);
+
+ // If this is a static with a linkage specified, then we need to handle
+ // it a little specially. The typesystem prevents things like &T and
+ // extern "C" fn() from being non-null, so we can't just declare a
+ // static and call it a day. Some linkages (like weak) will make it such
+ // that the static actually has a null value.
+ let llty2 = if let ty::RawPtr(ref mt) = ty.kind() {
+ cx.layout_of(mt.ty).llvm_type(cx)
+ } else {
+ cx.sess().span_fatal(
+ cx.tcx.def_span(span_def_id),
+ "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
+ )
+ };
+ unsafe {
+ // Declare a symbol `foo` with the desired linkage.
+ let g1 = cx.declare_global(sym, llty2);
+ llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
+
+ // Declare an internal global `extern_with_linkage_foo` which
+ // is initialized with the address of `foo`. If `foo` is
+ // discarded during linking (for example, if `foo` has weak
+ // linkage and there are no definitions), then
+ // `extern_with_linkage_foo` will instead be initialized to
+ // zero.
+ let mut real_name = "_rust_extern_with_linkage_".to_string();
+ real_name.push_str(sym);
+ let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
+ cx.sess().span_fatal(
+ cx.tcx.def_span(span_def_id),
+ &format!("symbol `{}` is already defined", &sym),
+ )
+ });
+ llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
+ llvm::LLVMSetInitializer(g2, g1);
+ g2
+ }
+ } else {
+ // Generate an external declaration.
+ // FIXME(nagisa): investigate whether it can be changed into define_global
+ cx.declare_global(sym, llty)
+ }
+}
+
+pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstPointerCast(val, ty) }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+ pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMConstBitCast(val, ty) }
+ }
+
+ pub(crate) fn static_addr_of_mut(
+ &self,
+ cv: &'ll Value,
+ align: Align,
+ kind: Option<&str>,
+ ) -> &'ll Value {
+ unsafe {
+ let gv = match kind {
+ Some(kind) if !self.tcx.sess.fewer_names() => {
+ let name = self.generate_local_symbol_name(kind);
+ let gv = self.define_global(&name, self.val_ty(cv)).unwrap_or_else(|| {
+ bug!("symbol `{}` is already defined", name);
+ });
+ llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
+ gv
+ }
+ _ => self.define_private_global(self.val_ty(cv)),
+ };
+ llvm::LLVMSetInitializer(gv, cv);
+ set_global_alignment(self, gv, align);
+ llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
+ gv
+ }
+ }
+
+ pub(crate) fn get_static(&self, def_id: DefId) -> &'ll Value {
+ let instance = Instance::mono(self.tcx, def_id);
+ if let Some(&g) = self.instances.borrow().get(&instance) {
+ return g;
+ }
+
+ let defined_in_current_codegen_unit =
+ self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
+ assert!(
+ !defined_in_current_codegen_unit,
+ "consts::get_static() should always hit the cache for \
+ statics defined in the same CGU, but did not for `{:?}`",
+ def_id
+ );
+
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let sym = self.tcx.symbol_name(instance).name;
+ let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
+
+ debug!("get_static: sym={} instance={:?} fn_attrs={:?}", sym, instance, fn_attrs);
+
+ let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
+ let llty = self.layout_of(ty).llvm_type(self);
+ if let Some(g) = self.get_declared_value(sym) {
+ if self.val_ty(g) != self.type_ptr_to(llty) {
+ span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
+ }
+ }
+
+ let g = self.declare_global(sym, llty);
+
+ if !self.tcx.is_reachable_non_generic(def_id) {
+ unsafe {
+ llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
+ }
+ }
+
+ g
+ } else {
+ check_and_apply_linkage(self, fn_attrs, ty, sym, def_id)
+ };
+
+ // Thread-local statics in some other crate need to *always* be linked
+ // against in a thread-local fashion, so we need to be sure to apply the
+ // thread-local attribute locally if it was present remotely. If we
+ // don't do this then linker errors can be generated where the linker
+ // complains that one object files has a thread local version of the
+ // symbol and another one doesn't.
+ if fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ llvm::set_thread_local_mode(g, self.tls_model);
+ }
+
+ if !def_id.is_local() {
+ let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
+ // ThinLTO can't handle this workaround in all cases, so we don't
+ // emit the attrs. Instead we make them unnecessary by disallowing
+ // dynamic linking when linker plugin based LTO is enabled.
+ !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();
+
+ // If this assertion triggers, there's something wrong with commandline
+ // argument validation.
+ debug_assert!(
+ !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
+ && self.tcx.sess.target.is_like_windows
+ && self.tcx.sess.opts.cg.prefer_dynamic)
+ );
+
+ if needs_dll_storage_attr {
+ // This item is external but not foreign, i.e., it originates from an external Rust
+ // crate. Since we don't know whether this crate will be linked dynamically or
+ // statically in the final application, we always mark such symbols as 'dllimport'.
+ // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
+ // to make things work.
+ //
+ // However, in some scenarios we defer emission of statics to downstream
+ // crates, so there are cases where a static with an upstream DefId
+ // is actually present in the current crate. We can find out via the
+ // is_codegened_item query.
+ if !self.tcx.is_codegened_item(def_id) {
+ unsafe {
+ llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
+ }
+ }
+ }
+ }
+
+ if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
+ // For foreign (native) libs we know the exact storage type to use.
+ unsafe {
+ llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
+ }
+ }
+
+ unsafe {
+ if self.should_assume_dso_local(g, true) {
+ llvm::LLVMRustSetDSOLocal(g, true);
+ }
+ }
+
+ self.instances.borrow_mut().insert(instance, g);
+ g
+ }
+}
+
+impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
+ fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value {
+ if let Some(&gv) = self.const_globals.borrow().get(&cv) {
+ unsafe {
+ // Upgrade the alignment in cases where the same constant is used with different
+ // alignment requirements
+ let llalign = align.bytes() as u32;
+ if llalign > llvm::LLVMGetAlignment(gv) {
+ llvm::LLVMSetAlignment(gv, llalign);
+ }
+ }
+ return gv;
+ }
+ let gv = self.static_addr_of_mut(cv, align, kind);
+ unsafe {
+ llvm::LLVMSetGlobalConstant(gv, True);
+ }
+ self.const_globals.borrow_mut().insert(cv, gv);
+ gv
+ }
+
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
+ unsafe {
+ let attrs = self.tcx.codegen_fn_attrs(def_id);
+
+ let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else {
+ // Error has already been reported
+ return;
+ };
+ let alloc = alloc.inner();
+
+ let g = self.get_static(def_id);
+
+ // boolean SSA values are i1, but they have to be stored in i8 slots,
+ // otherwise some LLVM optimization passes don't work as expected
+ let mut val_llty = self.val_ty(v);
+ let v = if val_llty == self.type_i1() {
+ val_llty = self.type_i8();
+ llvm::LLVMConstZExt(v, val_llty)
+ } else {
+ v
+ };
+
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let llty = self.layout_of(ty).llvm_type(self);
+ let g = if val_llty == llty {
+ g
+ } else {
+ // If we created the global with the wrong type,
+ // correct the type.
+ let name = llvm::get_value_name(g).to_vec();
+ llvm::set_value_name(g, b"");
+
+ let linkage = llvm::LLVMRustGetLinkage(g);
+ let visibility = llvm::LLVMRustGetVisibility(g);
+
+ let new_g = llvm::LLVMRustGetOrInsertGlobal(
+ self.llmod,
+ name.as_ptr().cast(),
+ name.len(),
+ val_llty,
+ );
+
+ llvm::LLVMRustSetLinkage(new_g, linkage);
+ llvm::LLVMRustSetVisibility(new_g, visibility);
+
+ // The old global has had its name removed but is returned by
+ // get_static since it is in the instance cache. Provide an
+ // alternative lookup that points to the new global so that
+ // global_asm! can compute the correct mangled symbol name
+ // for the global.
+ self.renamed_statics.borrow_mut().insert(def_id, new_g);
+
+ // To avoid breaking any invariants, we leave around the old
+ // global for the moment; we'll replace all references to it
+ // with the new global later. (See base::codegen_backend.)
+ self.statics_to_rauw.borrow_mut().push((g, new_g));
+ new_g
+ };
+ set_global_alignment(self, g, self.align_of(ty));
+ llvm::LLVMSetInitializer(g, v);
+
+ if self.should_assume_dso_local(g, true) {
+ llvm::LLVMRustSetDSOLocal(g, true);
+ }
+
+ // As an optimization, all shared statics which do not have interior
+ // mutability are placed into read-only memory.
+ if !is_mutable && self.type_is_freeze(ty) {
+ llvm::LLVMSetGlobalConstant(g, llvm::True);
+ }
+
+ debuginfo::build_global_var_di_node(self, def_id, g);
+
+ if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ llvm::set_thread_local_mode(g, self.tls_model);
+
+ // Do not allow LLVM to change the alignment of a TLS on macOS.
+ //
+ // By default a global's alignment can be freely increased.
+ // This allows LLVM to generate more performant instructions
+ // e.g., using load-aligned into a SIMD register.
+ //
+ // However, on macOS 10.10 or below, the dynamic linker does not
+ // respect any alignment given on the TLS (radar 24221680).
+ // This will violate the alignment assumption, and causing segfault at runtime.
+ //
+ // This bug is very easy to trigger. In `println!` and `panic!`,
+ // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
+ // which the values would be `mem::replace`d on initialization.
+ // The implementation of `mem::replace` will use SIMD
+ // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
+ // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
+ // which macOS's dyld disregarded and causing crashes
+ // (see issues #51794, #51758, #50867, #48866 and #44056).
+ //
+ // To workaround the bug, we trick LLVM into not increasing
+ // the global's alignment by explicitly assigning a section to it
+ // (equivalent to automatically generating a `#[link_section]` attribute).
+ // See the comment in the `GlobalValue::canIncreaseAlignment()` function
+ // of `lib/IR/Globals.cpp` for why this works.
+ //
+ // When the alignment is not increased, the optimized `mem::replace`
+ // will use load-unaligned instructions instead, and thus avoiding the crash.
+ //
+ // We could remove this hack whenever we decide to drop macOS 10.10 support.
+ if self.tcx.sess.target.is_like_osx {
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state
+ // (not as part of the interpreter execution).
+ //
+ // FIXME: This check requires that the (arbitrary) value of undefined bytes
+ // happens to be zero. Instead, we should only check the value of defined bytes
+ // and set all undefined bytes to zero if this allocation is headed for the
+ // BSS.
+ let all_bytes_are_zero = alloc.relocations().is_empty()
+ && alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
+ .iter()
+ .all(|&byte| byte == 0);
+
+ let sect_name = if all_bytes_are_zero {
+ cstr!("__DATA,__thread_bss")
+ } else {
+ cstr!("__DATA,__thread_data")
+ };
+ llvm::LLVMSetSection(g, sect_name.as_ptr());
+ }
+ }
+
+ // Wasm statics with custom link sections get special treatment as they
+ // go into custom sections of the wasm executable.
+ if self.tcx.sess.target.is_like_wasm {
+ if let Some(section) = attrs.link_section {
+ let section = llvm::LLVMMDStringInContext(
+ self.llcx,
+ section.as_str().as_ptr().cast(),
+ section.as_str().len() as c_uint,
+ );
+ assert!(alloc.relocations().is_empty());
+
+ // The `inspect` method is okay here because we checked relocations, and
+ // because we are doing this access to inspect the final interpreter state (not
+ // as part of the interpreter execution).
+ let bytes =
+ alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
+ let alloc = llvm::LLVMMDStringInContext(
+ self.llcx,
+ bytes.as_ptr().cast(),
+ bytes.len() as c_uint,
+ );
+ let data = [section, alloc];
+ let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
+ llvm::LLVMAddNamedMetadataOperand(
+ self.llmod,
+ "wasm.custom_sections\0".as_ptr().cast(),
+ meta,
+ );
+ }
+ } else {
+ base::set_link_section(g, attrs);
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::USED) {
+ // `USED` and `USED_LINKER` can't be used together.
+ assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER));
+
+ // The semantics of #[used] in Rust only require the symbol to make it into the
+ // object file. It is explicitly allowed for the linker to strip the symbol if it
+ // is dead, which means we are allowed use `llvm.compiler.used` instead of
+ // `llvm.used` here.
+ //
+ // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique
+ // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs
+ // in the handling of `.init_array` (the static constructor list) in versions of
+ // the gold linker (prior to the one released with binutils 2.36).
+ //
+ // That said, we only ever emit these when compiling for ELF targets, unless
+ // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
+ // on other targets, in particular MachO targets have *their* static constructor
+ // lists broken if `llvm.compiler.used` is emitted rather than llvm.used. However,
+ // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_typeck`,
+ // so we don't need to take care of it here.
+ self.add_compiler_used_global(g);
+ }
+ if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
+ // `USED` and `USED_LINKER` can't be used together.
+ assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED));
+
+ self.add_used_global(g);
+ }
+ }
+ }
+
+ /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
+ fn add_used_global(&self, global: &'ll Value) {
+ let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
+ self.used_statics.borrow_mut().push(cast);
+ }
+
+ /// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
+ /// an array of i8*.
+ fn add_compiler_used_global(&self, global: &'ll Value) {
+ let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
+ self.compiler_used_statics.borrow_mut().push(cast);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
new file mode 100644
index 000000000..5857b83f6
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -0,0 +1,1014 @@
+use crate::attributes;
+use crate::back::write::to_llvm_code_model;
+use crate::callee::get_fn;
+use crate::coverageinfo;
+use crate::debuginfo;
+use crate::llvm;
+use crate::llvm_util;
+use crate::type_::Type;
+use crate::value::Value;
+
+use cstr::cstr;
+use rustc_codegen_ssa::base::wants_msvc_seh;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::layout::{
+ FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers,
+ TyAndLayout,
+};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_middle::{bug, span_bug};
+use rustc_session::config::{BranchProtection, CFGuard, CFProtection};
+use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet};
+use rustc_session::Session;
+use rustc_span::source_map::Span;
+use rustc_target::abi::{
+ call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
+};
+use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
+use smallvec::SmallVec;
+
+use std::cell::{Cell, RefCell};
+use std::ffi::CStr;
+use std::str;
+
+/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
+/// `llvm::Context` so that several compilation units may be optimized in parallel.
+/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
+pub struct CodegenCx<'ll, 'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub check_overflow: bool,
+ pub use_dll_storage_attrs: bool,
+ pub tls_model: llvm::ThreadLocalMode,
+
+ pub llmod: &'ll llvm::Module,
+ pub llcx: &'ll llvm::Context,
+ pub codegen_unit: &'tcx CodegenUnit<'tcx>,
+
+ /// Cache instances of monomorphic and polymorphic items
+ pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
+ /// Cache generated vtables
+ pub vtables:
+ RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>,
+ /// Cache of constant strings,
+ pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
+
+ /// Reverse-direction for const ptrs cast from globals.
+ ///
+ /// Key is a Value holding a `*T`,
+ /// Val is a Value holding a `*[T]`.
+ ///
+ /// Needed because LLVM loses pointer->pointee association
+ /// when we ptrcast, and we have to ptrcast during codegen
+ /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
+ /// a pointer to an LLVM array type. Similar for trait objects.
+ pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
+
+ /// Cache of emitted const globals (value -> global)
+ pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
+
+ /// List of globals for static variables which need to be passed to the
+ /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
+ /// (We have to make sure we don't invalidate any Values referring
+ /// to constants.)
+ pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
+
+ /// Statics that will be placed in the llvm.used variable
+ /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
+ pub used_statics: RefCell<Vec<&'ll Value>>,
+
+ /// Statics that will be placed in the llvm.compiler.used variable
+ /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
+ pub compiler_used_statics: RefCell<Vec<&'ll Value>>,
+
+ /// Mapping of non-scalar types to llvm types and field remapping if needed.
+ pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>,
+
+ /// Mapping of scalar types to llvm types.
+ pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
+
+ pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+ pub isize_ty: &'ll Type,
+
+ pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
+ pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>,
+
+ eh_personality: Cell<Option<&'ll Value>>,
+ eh_catch_typeinfo: Cell<Option<&'ll Value>>,
+ pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
+
+ intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
+
+ /// A counter that is used for generating local symbol names
+ local_gen_sym_counter: Cell<usize>,
+
+ /// `codegen_static` will sometimes create a second global variable with a
+ /// different type and clear the symbol name of the original global.
+ /// `global_asm!` needs to be able to find this new global so that it can
+ /// compute the correct mangled symbol name to insert into the asm.
+ pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
+}
+
+pub struct TypeLowering<'ll> {
+ /// Associated LLVM type
+ pub lltype: &'ll Type,
+
+ /// If padding is used the slice maps fields from source order
+ /// to llvm order.
+ pub field_remapping: Option<SmallVec<[u32; 4]>>,
+}
+
+fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
+ match tls_model {
+ TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
+ TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
+ TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
+ TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
+ }
+}
+
+pub unsafe fn create_module<'ll>(
+ tcx: TyCtxt<'_>,
+ llcx: &'ll llvm::Context,
+ mod_name: &str,
+) -> &'ll llvm::Module {
+ let sess = tcx.sess;
+ let mod_name = SmallCStr::new(mod_name);
+ let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
+
+ let mut target_data_layout = sess.target.data_layout.to_string();
+ let llvm_version = llvm_util::get_version();
+ if llvm_version < (13, 0, 0) {
+ if sess.target.arch == "powerpc64" {
+ target_data_layout = target_data_layout.replace("-S128", "");
+ }
+ if sess.target.arch == "wasm32" {
+ target_data_layout = "e-m:e-p:32:32-i64:64-n32:64-S128".to_string();
+ }
+ if sess.target.arch == "wasm64" {
+ target_data_layout = "e-m:e-p:64:64-i64:64-n32:64-S128".to_string();
+ }
+ }
+ if llvm_version < (14, 0, 0) {
+ if sess.target.llvm_target == "i686-pc-windows-msvc"
+ || sess.target.llvm_target == "i586-pc-windows-msvc"
+ {
+ target_data_layout =
+ "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32"
+ .to_string();
+ }
+ if sess.target.arch == "wasm32" {
+ target_data_layout = target_data_layout.replace("-p10:8:8-p20:8:8", "");
+ }
+ }
+
+ // Ensure the data-layout values hardcoded remain the defaults.
+ if sess.target.is_builtin {
+ let tm = crate::back::write::create_informational_target_machine(tcx.sess);
+ llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
+ llvm::LLVMRustDisposeTargetMachine(tm);
+
+ let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
+ let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
+ .expect("got a non-UTF8 data-layout from LLVM");
+
+ // Unfortunately LLVM target specs change over time, and right now we
+ // don't have proper support to work with any more than one
+ // `data_layout` than the one that is in the rust-lang/rust repo. If
+ // this compiler is configured against a custom LLVM, we may have a
+ // differing data layout, even though we should update our own to use
+ // that one.
+ //
+ // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
+ // disable this check entirely as we may be configured with something
+ // that has a different target layout.
+ //
+ // Unsure if this will actually cause breakage when rustc is configured
+ // as such.
+ //
+ // FIXME(#34960)
+ let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
+ let custom_llvm_used = cfg_llvm_root.trim() != "";
+
+ if !custom_llvm_used && target_data_layout != llvm_data_layout {
+ bug!(
+ "data-layout for target `{rustc_target}`, `{rustc_layout}`, \
+ differs from LLVM target's `{llvm_target}` default layout, `{llvm_layout}`",
+ rustc_target = sess.opts.target_triple,
+ rustc_layout = target_data_layout,
+ llvm_target = sess.target.llvm_target,
+ llvm_layout = llvm_data_layout
+ );
+ }
+ }
+
+ let data_layout = SmallCStr::new(&target_data_layout);
+ llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
+
+ let llvm_target = SmallCStr::new(&sess.target.llvm_target);
+ llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
+
+ let reloc_model = sess.relocation_model();
+ if matches!(reloc_model, RelocModel::Pic | RelocModel::Pie) {
+ llvm::LLVMRustSetModulePICLevel(llmod);
+ // PIE is potentially more effective than PIC, but can only be used in executables.
+ // If all our outputs are executables, then we can relax PIC to PIE.
+ if reloc_model == RelocModel::Pie
+ || sess.crate_types().iter().all(|ty| *ty == CrateType::Executable)
+ {
+ llvm::LLVMRustSetModulePIELevel(llmod);
+ }
+ }
+
+ // Linking object files with different code models is undefined behavior
+ // because the compiler would have to generate additional code (to span
+ // longer jumps) if a larger code model is used with a smaller one.
+ //
+ // See https://reviews.llvm.org/D52322 and https://reviews.llvm.org/D52323.
+ llvm::LLVMRustSetModuleCodeModel(llmod, to_llvm_code_model(sess.code_model()));
+
+ // If skipping the PLT is enabled, we need to add some module metadata
+ // to ensure intrinsic calls don't use it.
+ if !sess.needs_plt() {
+ let avoid_plt = "RtLibUseGOT\0".as_ptr().cast();
+ llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
+ }
+
+ if sess.is_sanitizer_cfi_enabled() {
+ // FIXME(rcvalle): Add support for non canonical jump tables.
+ let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast();
+ // FIXME(rcvalle): Add it with Override behavior flag.
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ canonical_jump_tables,
+ 1,
+ );
+ }
+
+ // Control Flow Guard is currently only supported by the MSVC linker on Windows.
+ if sess.target.is_like_msvc {
+ match sess.opts.cg.control_flow_guard {
+ CFGuard::Disabled => {}
+ CFGuard::NoChecks => {
+ // Set `cfguard=1` module flag to emit metadata only.
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ "cfguard\0".as_ptr() as *const _,
+ 1,
+ )
+ }
+ CFGuard::Checks => {
+ // Set `cfguard=2` module flag to emit metadata and checks.
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ "cfguard\0".as_ptr() as *const _,
+ 2,
+ )
+ }
+ }
+ }
+
+ if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
+ if sess.target.arch != "aarch64" {
+ sess.err("-Zbranch-protection is only supported on aarch64");
+ } else {
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "branch-target-enforcement\0".as_ptr().cast(),
+ bti.into(),
+ );
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "sign-return-address\0".as_ptr().cast(),
+ pac_ret.is_some().into(),
+ );
+ let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "sign-return-address-all\0".as_ptr().cast(),
+ pac_opts.leaf.into(),
+ );
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "sign-return-address-with-bkey\0".as_ptr().cast(),
+ u32::from(pac_opts.key == PAuthKey::B),
+ );
+ }
+ }
+
+ // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang).
+ if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Override,
+ "cf-protection-branch\0".as_ptr().cast(),
+ 1,
+ )
+ }
+ if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Override,
+ "cf-protection-return\0".as_ptr().cast(),
+ 1,
+ )
+ }
+
+ if sess.opts.unstable_opts.virtual_function_elimination {
+ llvm::LLVMRustAddModuleFlag(
+ llmod,
+ llvm::LLVMModFlagBehavior::Error,
+ "Virtual Function Elim\0".as_ptr().cast(),
+ 1,
+ );
+ }
+
+ llmod
+}
+
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+ pub(crate) fn new(
+ tcx: TyCtxt<'tcx>,
+ codegen_unit: &'tcx CodegenUnit<'tcx>,
+ llvm_module: &'ll crate::ModuleLlvm,
+ ) -> Self {
+ // An interesting part of Windows which MSVC forces our hand on (and
+ // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
+ // attributes in LLVM IR as well as native dependencies (in C these
+ // correspond to `__declspec(dllimport)`).
+ //
+ // LD (BFD) in MinGW mode can often correctly guess `dllexport` but
+ // relying on that can result in issues like #50176.
+ // LLD won't support that and expects symbols with proper attributes.
+ // Because of that we make MinGW target emit dllexport just like MSVC.
+ // When it comes to dllimport we use it for constants but for functions
+ // rely on the linker to do the right thing. Opposed to dllexport this
+ // task is easy for them (both LD and LLD) and allows us to easily use
+ // symbols from static libraries in shared libraries.
+ //
+ // Whenever a dynamic library is built on Windows it must have its public
+ // interface specified by functions tagged with `dllexport` or otherwise
+ // they're not available to be linked against. This poses a few problems
+ // for the compiler, some of which are somewhat fundamental, but we use
+ // the `use_dll_storage_attrs` variable below to attach the `dllexport`
+ // attribute to all LLVM functions that are exported e.g., they're
+ // already tagged with external linkage). This is suboptimal for a few
+ // reasons:
+ //
+ // * If an object file will never be included in a dynamic library,
+ // there's no need to attach the dllexport attribute. Most object
+ // files in Rust are not destined to become part of a dll as binaries
+ // are statically linked by default.
+ // * If the compiler is emitting both an rlib and a dylib, the same
+ // source object file is currently used but with MSVC this may be less
+ // feasible. The compiler may be able to get around this, but it may
+ // involve some invasive changes to deal with this.
+ //
+ // The flip side of this situation is that whenever you link to a dll and
+ // you import a function from it, the import should be tagged with
+ // `dllimport`. At this time, however, the compiler does not emit
+ // `dllimport` for any declarations other than constants (where it is
+ // required), which is again suboptimal for even more reasons!
+ //
+ // * Calling a function imported from another dll without using
+ // `dllimport` causes the linker/compiler to have extra overhead (one
+ // `jmp` instruction on x86) when calling the function.
+ // * The same object file may be used in different circumstances, so a
+ // function may be imported from a dll if the object is linked into a
+ // dll, but it may be just linked against if linked into an rlib.
+ // * The compiler has no knowledge about whether native functions should
+ // be tagged dllimport or not.
+ //
+ // For now the compiler takes the perf hit (I do not have any numbers to
+ // this effect) by marking very little as `dllimport` and praying the
+ // linker will take care of everything. Fixing this problem will likely
+ // require adding a few attributes to Rust itself (feature gated at the
+ // start) and then strongly recommending static linkage on Windows!
+ let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
+
+ let check_overflow = tcx.sess.overflow_checks();
+
+ let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
+
+ let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
+
+ let coverage_cx = if tcx.sess.instrument_coverage() {
+ let covctx = coverageinfo::CrateCoverageContext::new();
+ Some(covctx)
+ } else {
+ None
+ };
+
+ let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
+ let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
+ debuginfo::metadata::build_compile_unit_di_node(
+ tcx,
+ codegen_unit.name().as_str(),
+ &dctx,
+ );
+ Some(dctx)
+ } else {
+ None
+ };
+
+ let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
+
+ CodegenCx {
+ tcx,
+ check_overflow,
+ use_dll_storage_attrs,
+ tls_model,
+ llmod,
+ llcx,
+ codegen_unit,
+ instances: Default::default(),
+ vtables: Default::default(),
+ const_str_cache: Default::default(),
+ const_unsized: Default::default(),
+ const_globals: Default::default(),
+ statics_to_rauw: RefCell::new(Vec::new()),
+ used_statics: RefCell::new(Vec::new()),
+ compiler_used_statics: RefCell::new(Vec::new()),
+ type_lowering: Default::default(),
+ scalar_lltypes: Default::default(),
+ pointee_infos: Default::default(),
+ isize_ty,
+ coverage_cx,
+ dbg_cx,
+ eh_personality: Cell::new(None),
+ eh_catch_typeinfo: Cell::new(None),
+ rust_try_fn: Cell::new(None),
+ intrinsics: Default::default(),
+ local_gen_sym_counter: Cell::new(0),
+ renamed_statics: Default::default(),
+ }
+ }
+
+ pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
+ &self.statics_to_rauw
+ }
+
+ #[inline]
+ pub fn coverage_context(&self) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
+ self.coverage_cx.as_ref()
+ }
+
+ fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
+ let section = cstr!("llvm.metadata");
+ let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
+
+ unsafe {
+ let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
+ llvm::LLVMSetInitializer(g, array);
+ llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
+ llvm::LLVMSetSection(g, section.as_ptr());
+ }
+ }
+}
+
+impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn vtables(
+ &self,
+ ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>
+ {
+ &self.vtables
+ }
+
+ fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
+ get_fn(self, instance)
+ }
+
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
+ get_fn(self, instance)
+ }
+
+ fn eh_personality(&self) -> &'ll Value {
+ // The exception handling personality function.
+ //
+ // If our compilation unit has the `eh_personality` lang item somewhere
+ // within it, then we just need to codegen that. Otherwise, we're
+ // building an rlib which will depend on some upstream implementation of
+ // this function, so we just codegen a generic reference to it. We don't
+ // specify any of the types for the function, we just make it a symbol
+ // that LLVM can later use.
+ //
+ // Note that MSVC is a little special here in that we don't use the
+ // `eh_personality` lang item at all. Currently LLVM has support for
+ // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
+ // *name of the personality function* to decide what kind of unwind side
+ // tables/landing pads to emit. It looks like Dwarf is used by default,
+ // injecting a dependency on the `_Unwind_Resume` symbol for resuming
+ // an "exception", but for MSVC we want to force SEH. This means that we
+ // can't actually have the personality function be our standard
+ // `rust_eh_personality` function, but rather we wired it up to the
+ // CRT's custom personality function, which forces LLVM to consider
+ // landing pads as "landing pads for SEH".
+ if let Some(llpersonality) = self.eh_personality.get() {
+ return llpersonality;
+ }
+ let tcx = self.tcx;
+ let llfn = match tcx.lang_items().eh_personality() {
+ Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
+ ty::Instance::resolve(
+ tcx,
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ tcx.intern_substs(&[]),
+ )
+ .unwrap()
+ .unwrap(),
+ ),
+ _ => {
+ let name = if wants_msvc_seh(self.sess()) {
+ "__CxxFrameHandler3"
+ } else {
+ "rust_eh_personality"
+ };
+ if let Some(llfn) = self.get_declared_value(name) {
+ llfn
+ } else {
+ let fty = self.type_variadic_func(&[], self.type_i32());
+ let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty);
+ let target_cpu = attributes::target_cpu_attr(self);
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[target_cpu]);
+ llfn
+ }
+ }
+ };
+ self.eh_personality.set(Some(llfn));
+ llfn
+ }
+
+ fn sess(&self) -> &Session {
+ self.tcx.sess
+ }
+
+ fn check_overflow(&self) -> bool {
+ self.check_overflow
+ }
+
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
+ self.codegen_unit
+ }
+
+ fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
+ &self.used_statics
+ }
+
+ fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
+ &self.compiler_used_statics
+ }
+
+ fn set_frame_pointer_type(&self, llfn: &'ll Value) {
+ if let Some(attr) = attributes::frame_pointer_type_attr(self) {
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
+ }
+ }
+
+ fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
+ let mut attrs = SmallVec::<[_; 2]>::new();
+ attrs.push(attributes::target_cpu_attr(self));
+ attrs.extend(attributes::tune_cpu_attr(self));
+ attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
+ }
+
+ fn create_used_variable(&self) {
+ self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow());
+ }
+
+ fn create_compiler_used_variable(&self) {
+ self.create_used_variable_impl(
+ cstr!("llvm.compiler.used"),
+ &*self.compiler_used_statics.borrow(),
+ );
+ }
+
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
+ if self.get_declared_value("main").is_none() {
+ Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type))
+ } else {
+ // If the symbol already exists, it is an error: for example, the user wrote
+ // #[no_mangle] extern "C" fn main(..) {..}
+ // instead of #[start]
+ None
+ }
+ }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+ pub(crate) fn get_intrinsic(&self, key: &str) -> (&'ll Type, &'ll Value) {
+ if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
+ return v;
+ }
+
+ self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
+ }
+
+ fn insert_intrinsic(
+ &self,
+ name: &'static str,
+ args: Option<&[&'ll llvm::Type]>,
+ ret: &'ll llvm::Type,
+ ) -> (&'ll llvm::Type, &'ll llvm::Value) {
+ let fn_ty = if let Some(args) = args {
+ self.type_func(args, ret)
+ } else {
+ self.type_variadic_func(&[], ret)
+ };
+ let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
+ self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
+ (fn_ty, f)
+ }
+
+ fn declare_intrinsic(&self, key: &str) -> Option<(&'ll Type, &'ll Value)> {
+ macro_rules! ifn {
+ ($name:expr, fn() -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, Some(&[]), $ret));
+ }
+ );
+ ($name:expr, fn(...) -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, None, $ret));
+ }
+ );
+ ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
+ if key == $name {
+ return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
+ }
+ );
+ }
+ macro_rules! mk_struct {
+ ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
+ }
+
+ let i8p = self.type_i8p();
+ let void = self.type_void();
+ let i1 = self.type_i1();
+ let t_i8 = self.type_i8();
+ let t_i16 = self.type_i16();
+ let t_i32 = self.type_i32();
+ let t_i64 = self.type_i64();
+ let t_i128 = self.type_i128();
+ let t_isize = self.type_isize();
+ let t_f32 = self.type_f32();
+ let t_f64 = self.type_f64();
+ let t_metadata = self.type_metadata();
+
+ ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
+
+ ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
+ ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
+ ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
+ ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
+ ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
+ ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
+
+ ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
+ ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
+ ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
+ ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
+ ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
+ ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
+ ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
+ ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
+ ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
+ ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
+
+ ifn!("llvm.trap", fn() -> void);
+ ifn!("llvm.debugtrap", fn() -> void);
+ ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
+
+ ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
+ ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
+
+ ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
+
+ ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
+ ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
+
+ ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
+ ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
+
+ ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
+ ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
+ ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
+ ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
+ ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
+
+ ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
+ ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
+ ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
+ ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
+ ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
+ ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
+
+ ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
+ ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
+ ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
+ ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
+ ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
+
+ ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
+ ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
+ ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
+ ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
+ ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
+
+ ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+ ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+ ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+ ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+ ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
+ ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
+ ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
+ ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
+ ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
+ ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
+ ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
+ ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
+ ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
+
+ ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
+ ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
+ ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
+ ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
+ ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
+
+ ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
+ ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
+
+ ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
+ ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
+ ifn!("llvm.localescape", fn(...) -> void);
+ ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
+ ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
+
+ ifn!("llvm.assume", fn(i1) -> void);
+ ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
+
+ // This isn't an "LLVM intrinsic", but LLVM's optimization passes
+ // recognize it like one and we assume it exists in `core::slice::cmp`
+ match self.sess().target.arch.as_ref() {
+ "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16),
+ _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32),
+ }
+
+ // variadic intrinsics
+ ifn!("llvm.va_start", fn(i8p) -> void);
+ ifn!("llvm.va_end", fn(i8p) -> void);
+ ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
+
+ if self.sess().instrument_coverage() {
+ ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
+ }
+
+ ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1);
+ ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1});
+
+ if self.sess().opts.debuginfo != DebugInfo::None {
+ ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
+ ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
+ }
+ None
+ }
+
+ pub(crate) fn eh_catch_typeinfo(&self) -> &'ll Value {
+ if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
+ return eh_catch_typeinfo;
+ }
+ let tcx = self.tcx;
+ assert!(self.sess().target.os == "emscripten");
+ let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
+ Some(def_id) => self.get_static(def_id),
+ _ => {
+ let ty = self
+ .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
+ self.declare_global("rust_eh_catch_typeinfo", ty)
+ }
+ };
+ let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
+ self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
+ eh_catch_typeinfo
+ }
+}
+
+impl CodegenCx<'_, '_> {
+ /// Generates a new symbol name with the given prefix. This symbol name must
+ /// only be used for definitions with `internal` or `private` linkage.
+ pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
+ let idx = self.local_gen_sym_counter.get();
+ self.local_gen_sym_counter.set(idx + 1);
+ // Include a '.' character, so there can be no accidental conflicts with
+ // user defined names
+ let mut name = String::with_capacity(prefix.len() + 6);
+ name.push_str(prefix);
+ name.push('.');
+ base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
+ name
+ }
+}
+
+impl HasDataLayout for CodegenCx<'_, '_> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl HasTargetSpec for CodegenCx<'_, '_> {
+ #[inline]
+ fn target_spec(&self) -> &Target {
+ &self.tcx.sess.target
+ }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for CodegenCx<'_, 'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ ty::ParamEnv::reveal_all()
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ if let LayoutError::SizeOverflow(_) = err {
+ self.sess().span_fatal(span, &err.to_string())
+ } else {
+ span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
+ }
+ }
+}
+
+impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
+ type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
+
+ #[inline]
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> ! {
+ if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
+ self.sess().span_fatal(span, &err.to_string())
+ } else {
+ match fn_abi_request {
+ FnAbiRequest::OfFnPtr { sig, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
+ sig,
+ extra_args,
+ err
+ );
+ }
+ FnAbiRequest::OfInstance { instance, extra_args } => {
+ span_bug!(
+ span,
+ "`fn_abi_of_instance({}, {:?})` failed: {}",
+ instance,
+ extra_args,
+ err
+ );
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
new file mode 100644
index 000000000..58f391692
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -0,0 +1,334 @@
+use crate::common::CodegenCx;
+use crate::coverageinfo;
+use crate::llvm;
+
+use llvm::coverageinfo::CounterMappingRegion;
+use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
+use rustc_codegen_ssa::traits::{ConstMethods, CoverageInfoMethods};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefIdSet;
+use rustc_llvm::RustString;
+use rustc_middle::bug;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::coverage::CodeRegion;
+use rustc_middle::ty::TyCtxt;
+
+use std::ffi::CString;
+
+use tracing::debug;
+
+/// Generates and exports the Coverage Map.
+///
+/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions
+/// 5 (LLVM 12, only) and 6 (zero-based encoded as 4 and 5, respectively), as defined at
+/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+/// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`)
+/// bundled with Rust's fork of LLVM.
+///
+/// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with
+/// the same version. Clang's implementation of Coverage Map generation was referenced when
+/// implementing this Rust version, and though the format documentation is very explicit and
+/// detailed, some undocumented details in Clang's implementation (that may or may not be important)
+/// were also replicated for Rust's Coverage Map.
+pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+ let tcx = cx.tcx;
+
+ // Ensure the installed version of LLVM supports at least Coverage Map
+ // Version 5 (encoded as a zero-based value: 4), which was introduced with
+ // LLVM 12.
+ let version = coverageinfo::mapping_version();
+ if version < 4 {
+ tcx.sess.fatal("rustc option `-C instrument-coverage` requires LLVM 12 or higher.");
+ }
+
+ debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name());
+
+ // In order to show that unused functions have coverage counts of zero (0), LLVM requires the
+ // functions exist. Generate synthetic functions with a (required) single counter, and add the
+ // MIR `Coverage` code regions to the `function_coverage_map`, before calling
+ // `ctx.take_function_coverage_map()`.
+ if cx.codegen_unit.is_code_coverage_dead_code_cgu() {
+ add_unused_functions(cx);
+ }
+
+ let function_coverage_map = match cx.coverage_context() {
+ Some(ctx) => ctx.take_function_coverage_map(),
+ None => return,
+ };
+
+ if function_coverage_map.is_empty() {
+ // This module has no functions with coverage instrumentation
+ return;
+ }
+
+ let mut mapgen = CoverageMapGenerator::new(tcx, version);
+
+ // Encode coverage mappings and generate function records
+ let mut function_data = Vec::new();
+ for (instance, function_coverage) in function_coverage_map {
+ debug!("Generate function coverage for {}, {:?}", cx.codegen_unit.name(), instance);
+ let mangled_function_name = tcx.symbol_name(instance).to_string();
+ let source_hash = function_coverage.source_hash();
+ let is_used = function_coverage.is_used();
+ let (expressions, counter_regions) =
+ function_coverage.get_expressions_and_counter_regions();
+
+ let coverage_mapping_buffer = llvm::build_byte_buffer(|coverage_mapping_buffer| {
+ mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer);
+ });
+
+ if coverage_mapping_buffer.is_empty() {
+ if function_coverage.is_used() {
+ bug!(
+ "A used function should have had coverage mapping data but did not: {}",
+ mangled_function_name
+ );
+ } else {
+ debug!("unused function had no coverage mapping data: {}", mangled_function_name);
+ continue;
+ }
+ }
+
+ function_data.push((mangled_function_name, source_hash, is_used, coverage_mapping_buffer));
+ }
+
+ // Encode all filenames referenced by counters/expressions in this module
+ let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
+ coverageinfo::write_filenames_section_to_buffer(&mapgen.filenames, filenames_buffer);
+ });
+
+ let filenames_size = filenames_buffer.len();
+ let filenames_val = cx.const_bytes(&filenames_buffer);
+ let filenames_ref = coverageinfo::hash_bytes(filenames_buffer);
+
+ // Generate the LLVM IR representation of the coverage map and store it in a well-known global
+ let cov_data_val = mapgen.generate_coverage_map(cx, version, filenames_size, filenames_val);
+
+ for (mangled_function_name, source_hash, is_used, coverage_mapping_buffer) in function_data {
+ save_function_record(
+ cx,
+ mangled_function_name,
+ source_hash,
+ filenames_ref,
+ coverage_mapping_buffer,
+ is_used,
+ );
+ }
+
+ // Save the coverage data value to LLVM IR
+ coverageinfo::save_cov_data_to_mod(cx, cov_data_val);
+}
+
+struct CoverageMapGenerator {
+ filenames: FxIndexSet<CString>,
+}
+
+impl CoverageMapGenerator {
+ fn new(tcx: TyCtxt<'_>, version: u32) -> Self {
+ let mut filenames = FxIndexSet::default();
+ if version >= 5 {
+ // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5)
+ // requires setting the first filename to the compilation directory.
+ // Since rustc generates coverage maps with relative paths, the
+ // compilation directory can be combined with the the relative paths
+ // to get absolute paths, if needed.
+ let working_dir = tcx
+ .sess
+ .opts
+ .working_dir
+ .remapped_path_if_available()
+ .to_string_lossy()
+ .to_string();
+ let c_filename =
+ CString::new(working_dir).expect("null error converting filename to C string");
+ filenames.insert(c_filename);
+ }
+ Self { filenames }
+ }
+
+ /// Using the `expressions` and `counter_regions` collected for the current function, generate
+ /// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
+ /// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
+ /// the given `coverage_mapping` byte buffer, compliant with the LLVM Coverage Mapping format.
+ fn write_coverage_mapping<'a>(
+ &mut self,
+ expressions: Vec<CounterExpression>,
+ counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
+ coverage_mapping_buffer: &RustString,
+ ) {
+ let mut counter_regions = counter_regions.collect::<Vec<_>>();
+ if counter_regions.is_empty() {
+ return;
+ }
+
+ let mut virtual_file_mapping = Vec::new();
+ let mut mapping_regions = Vec::new();
+ let mut current_file_name = None;
+ let mut current_file_id = 0;
+
+ // Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
+ // by filename and position. Capture any new files to compute the `CounterMappingRegion`s
+ // `file_id` (indexing files referenced by the current function), and construct the
+ // function-specific `virtual_file_mapping` from `file_id` to its index in the module's
+ // `filenames` array.
+ counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
+ for (counter, region) in counter_regions {
+ let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
+ let same_file = current_file_name.as_ref().map_or(false, |p| *p == file_name);
+ if !same_file {
+ if current_file_name.is_some() {
+ current_file_id += 1;
+ }
+ current_file_name = Some(file_name);
+ let c_filename = CString::new(file_name.to_string())
+ .expect("null error converting filename to C string");
+ debug!(" file_id: {} = '{:?}'", current_file_id, c_filename);
+ let (filenames_index, _) = self.filenames.insert_full(c_filename);
+ virtual_file_mapping.push(filenames_index as u32);
+ }
+ debug!("Adding counter {:?} to map for {:?}", counter, region);
+ mapping_regions.push(CounterMappingRegion::code_region(
+ counter,
+ current_file_id,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ ));
+ }
+
+ // Encode and append the current function's coverage mapping data
+ coverageinfo::write_mapping_to_buffer(
+ virtual_file_mapping,
+ expressions,
+ mapping_regions,
+ coverage_mapping_buffer,
+ );
+ }
+
+ /// Construct coverage map header and the array of function records, and combine them into the
+ /// coverage map. Save the coverage map data into the LLVM IR as a static global using a
+ /// specific, well-known section and name.
+ fn generate_coverage_map<'ll>(
+ self,
+ cx: &CodegenCx<'ll, '_>,
+ version: u32,
+ filenames_size: usize,
+ filenames_val: &'ll llvm::Value,
+ ) -> &'ll llvm::Value {
+ debug!("cov map: filenames_size = {}, 0-based version = {}", filenames_size, version);
+
+ // Create the coverage data header (Note, fields 0 and 2 are now always zero,
+ // as of `llvm::coverage::CovMapVersion::Version4`.)
+ let zero_was_n_records_val = cx.const_u32(0);
+ let filenames_size_val = cx.const_u32(filenames_size as u32);
+ let zero_was_coverage_size_val = cx.const_u32(0);
+ let version_val = cx.const_u32(version);
+ let cov_data_header_val = cx.const_struct(
+ &[zero_was_n_records_val, filenames_size_val, zero_was_coverage_size_val, version_val],
+ /*packed=*/ false,
+ );
+
+ // Create the complete LLVM coverage data value to add to the LLVM IR
+ cx.const_struct(&[cov_data_header_val, filenames_val], /*packed=*/ false)
+ }
+}
+
+/// Construct a function record and combine it with the function's coverage mapping data.
+/// Save the function record into the LLVM IR as a static global using a
+/// specific, well-known section and name.
+fn save_function_record(
+ cx: &CodegenCx<'_, '_>,
+ mangled_function_name: String,
+ source_hash: u64,
+ filenames_ref: u64,
+ coverage_mapping_buffer: Vec<u8>,
+ is_used: bool,
+) {
+ // Concatenate the encoded coverage mappings
+ let coverage_mapping_size = coverage_mapping_buffer.len();
+ let coverage_mapping_val = cx.const_bytes(&coverage_mapping_buffer);
+
+ let func_name_hash = coverageinfo::hash_str(&mangled_function_name);
+ let func_name_hash_val = cx.const_u64(func_name_hash);
+ let coverage_mapping_size_val = cx.const_u32(coverage_mapping_size as u32);
+ let source_hash_val = cx.const_u64(source_hash);
+ let filenames_ref_val = cx.const_u64(filenames_ref);
+ let func_record_val = cx.const_struct(
+ &[
+ func_name_hash_val,
+ coverage_mapping_size_val,
+ source_hash_val,
+ filenames_ref_val,
+ coverage_mapping_val,
+ ],
+ /*packed=*/ true,
+ );
+
+ coverageinfo::save_func_record_to_mod(cx, func_name_hash, func_record_val, is_used);
+}
+
+/// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for
+/// the functions that went through codegen; such as public functions and "used" functions
+/// (functions referenced by other "used" or public items). Any other functions considered unused,
+/// or "Unreachable", were still parsed and processed through the MIR stage, but were not
+/// codegenned. (Note that `-Clink-dead-code` can force some unused code to be codegenned, but
+/// that flag is known to cause other errors, when combined with `-C instrument-coverage`; and
+/// `-Clink-dead-code` will not generate code for unused generic functions.)
+///
+/// We can find the unused functions (including generic functions) by the set difference of all MIR
+/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`tcx` query
+/// `codegened_and_inlined_items`).
+///
+/// These unused functions are then codegen'd in one of the CGUs which is marked as the
+/// "code coverage dead code cgu" during the partitioning process. This prevents us from generating
+/// code regions for the same function more than once which can lead to linker errors regarding
+/// duplicate symbols.
+fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
+ assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu());
+
+ let tcx = cx.tcx;
+
+ let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics();
+
+ let eligible_def_ids: DefIdSet = tcx
+ .mir_keys(())
+ .iter()
+ .filter_map(|local_def_id| {
+ let def_id = local_def_id.to_def_id();
+ let kind = tcx.def_kind(def_id);
+ // `mir_keys` will give us `DefId`s for all kinds of things, not
+ // just "functions", like consts, statics, etc. Filter those out.
+ // If `ignore_unused_generics` was specified, filter out any
+ // generic functions from consideration as well.
+ if !matches!(
+ kind,
+ DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Generator
+ ) {
+ return None;
+ } else if ignore_unused_generics
+ && tcx.generics_of(def_id).requires_monomorphization(tcx)
+ {
+ return None;
+ }
+ Some(local_def_id.to_def_id())
+ })
+ .collect();
+
+ let codegenned_def_ids = tcx.codegened_and_inlined_items(());
+
+ for &non_codegenned_def_id in eligible_def_ids.difference(codegenned_def_ids) {
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(non_codegenned_def_id);
+
+ // If a function is marked `#[no_coverage]`, then skip generating a
+ // dead code stub for it.
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
+ debug!("skipping unused fn marked #[no_coverage]: {:?}", non_codegenned_def_id);
+ continue;
+ }
+
+ debug!("generating unused fn: {:?}", non_codegenned_def_id);
+ cx.define_unused_fn(non_codegenned_def_id);
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
new file mode 100644
index 000000000..98ba38356
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs
@@ -0,0 +1,385 @@
+use crate::llvm;
+
+use crate::abi::Abi;
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+
+use libc::c_uint;
+use llvm::coverageinfo::CounterMappingRegion;
+use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
+use rustc_codegen_ssa::traits::{
+ BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, CoverageInfoMethods,
+ MiscMethods, StaticMethods,
+};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_llvm::RustString;
+use rustc_middle::bug;
+use rustc_middle::mir::coverage::{
+ CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op,
+};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::Instance;
+
+use std::cell::RefCell;
+use std::ffi::CString;
+
+use std::iter;
+use tracing::debug;
+
+pub mod mapgen;
+
+const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START;
+
+const VAR_ALIGN_BYTES: usize = 8;
+
+/// A context object for maintaining all state needed by the coverageinfo module.
+pub struct CrateCoverageContext<'ll, 'tcx> {
+ // Coverage data for each instrumented function identified by DefId.
+ pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>>>,
+ pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>,
+}
+
+impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
+ pub fn new() -> Self {
+ Self {
+ function_coverage_map: Default::default(),
+ pgo_func_name_var_map: Default::default(),
+ }
+ }
+
+ pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage<'tcx>> {
+ self.function_coverage_map.replace(FxHashMap::default())
+ }
+}
+
+impl<'ll, 'tcx> CoverageInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn coverageinfo_finalize(&self) {
+ mapgen::finalize(self)
+ }
+
+ fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!("getting pgo_func_name_var for instance={:?}", instance);
+ let mut pgo_func_name_var_map = coverage_context.pgo_func_name_var_map.borrow_mut();
+ pgo_func_name_var_map
+ .entry(instance)
+ .or_insert_with(|| create_pgo_func_name_var(self, instance))
+ } else {
+ bug!("Could not get the `coverage_context`");
+ }
+ }
+
+ /// Functions with MIR-based coverage are normally codegenned _only_ if
+ /// called. LLVM coverage tools typically expect every function to be
+ /// defined (even if unused), with at least one call to LLVM intrinsic
+ /// `instrprof.increment`.
+ ///
+ /// Codegen a small function that will never be called, with one counter
+ /// that will never be incremented.
+ ///
+ /// For used/called functions, the coverageinfo was already added to the
+ /// `function_coverage_map` (keyed by function `Instance`) during codegen.
+ /// But in this case, since the unused function was _not_ previously
+ /// codegenned, collect the coverage `CodeRegion`s from the MIR and add
+ /// them. The first `CodeRegion` is used to add a single counter, with the
+ /// same counter ID used in the injected `instrprof.increment` intrinsic
+ /// call. Since the function is never called, all other `CodeRegion`s can be
+ /// added as `unreachable_region`s.
+ fn define_unused_fn(&self, def_id: DefId) {
+ let instance = declare_unused_fn(self, def_id);
+ codegen_unused_fn_and_counter(self, instance);
+ add_unused_function_coverage(self, instance, def_id);
+ }
+}
+
+impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
+ fn set_function_source_hash(
+ &mut self,
+ instance: Instance<'tcx>,
+ function_source_hash: u64,
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "ensuring function source hash is set for instance={:?}; function_source_hash={}",
+ instance, function_source_hash,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .set_function_source_hash(function_source_hash);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn add_coverage_counter(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: CounterValueReference,
+ region: CodeRegion,
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding counter to coverage_map: instance={:?}, id={:?}, region={:?}",
+ instance, id, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_counter(id, region);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn add_coverage_counter_expression(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+ ) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding counter expression to coverage_map: instance={:?}, id={:?}, {:?} {:?} {:?}; \
+ region: {:?}",
+ instance, id, lhs, op, rhs, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_counter_expression(id, lhs, op, rhs, region);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool {
+ if let Some(coverage_context) = self.coverage_context() {
+ debug!(
+ "adding unreachable code to coverage_map: instance={:?}, at {:?}",
+ instance, region,
+ );
+ let mut coverage_map = coverage_context.function_coverage_map.borrow_mut();
+ coverage_map
+ .entry(instance)
+ .or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
+ .add_unreachable_region(region);
+ true
+ } else {
+ false
+ }
+ }
+}
+
+fn declare_unused_fn<'tcx>(cx: &CodegenCx<'_, 'tcx>, def_id: DefId) -> Instance<'tcx> {
+ let tcx = cx.tcx;
+
+ let instance = Instance::new(
+ def_id,
+ InternalSubsts::for_item(tcx, def_id, |param, _| {
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ tcx.lifetimes.re_erased.into()
+ } else {
+ tcx.mk_param_from_def(param)
+ }
+ }),
+ );
+
+ let llfn = cx.declare_fn(
+ tcx.symbol_name(instance).name,
+ cx.fn_abi_of_fn_ptr(
+ ty::Binder::dummy(tcx.mk_fn_sig(
+ iter::once(tcx.mk_unit()),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )),
+ ty::List::empty(),
+ ),
+ );
+
+ llvm::set_linkage(llfn, llvm::Linkage::PrivateLinkage);
+ llvm::set_visibility(llfn, llvm::Visibility::Default);
+
+ assert!(cx.instances.borrow_mut().insert(instance, llfn).is_none());
+
+ instance
+}
+
+fn codegen_unused_fn_and_counter<'tcx>(cx: &CodegenCx<'_, 'tcx>, instance: Instance<'tcx>) {
+ let llfn = cx.get_fn(instance);
+ let llbb = Builder::append_block(cx, llfn, "unused_function");
+ let mut bx = Builder::build(cx, llbb);
+ let fn_name = bx.get_pgo_func_name_var(instance);
+ let hash = bx.const_u64(0);
+ let num_counters = bx.const_u32(1);
+ let index = bx.const_u32(u32::from(UNUSED_FUNCTION_COUNTER_ID));
+ debug!(
+ "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?},
+ index={:?}) for unused function: {:?}",
+ fn_name, hash, num_counters, index, instance
+ );
+ bx.instrprof_increment(fn_name, hash, num_counters, index);
+ bx.ret_void();
+}
+
+fn add_unused_function_coverage<'tcx>(
+ cx: &CodegenCx<'_, 'tcx>,
+ instance: Instance<'tcx>,
+ def_id: DefId,
+) {
+ let tcx = cx.tcx;
+
+ let mut function_coverage = FunctionCoverage::unused(tcx, instance);
+ for (index, &code_region) in tcx.covered_code_regions(def_id).iter().enumerate() {
+ if index == 0 {
+ // Insert at least one real counter so the LLVM CoverageMappingReader will find expected
+ // definitions.
+ function_coverage.add_counter(UNUSED_FUNCTION_COUNTER_ID, code_region.clone());
+ } else {
+ function_coverage.add_unreachable_region(code_region.clone());
+ }
+ }
+
+ if let Some(coverage_context) = cx.coverage_context() {
+ coverage_context.function_coverage_map.borrow_mut().insert(instance, function_coverage);
+ } else {
+ bug!("Could not get the `coverage_context`");
+ }
+}
+
+/// Calls llvm::createPGOFuncNameVar() with the given function instance's
+/// mangled function name. The LLVM API returns an llvm::GlobalVariable
+/// containing the function name, with the specific variable name and linkage
+/// required by LLVM InstrProf source-based coverage instrumentation. Use
+/// `bx.get_pgo_func_name_var()` to ensure the variable is only created once per
+/// `Instance`.
+fn create_pgo_func_name_var<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+) -> &'ll llvm::Value {
+ let mangled_fn_name = CString::new(cx.tcx.symbol_name(instance).name)
+ .expect("error converting function name to C string");
+ let llfn = cx.get_fn(instance);
+ unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
+}
+
+pub(crate) fn write_filenames_section_to_buffer<'a>(
+ filenames: impl IntoIterator<Item = &'a CString>,
+ buffer: &RustString,
+) {
+ let c_str_vec = filenames.into_iter().map(|cstring| cstring.as_ptr()).collect::<Vec<_>>();
+ unsafe {
+ llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
+ c_str_vec.as_ptr(),
+ c_str_vec.len(),
+ buffer,
+ );
+ }
+}
+
+pub(crate) fn write_mapping_to_buffer(
+ virtual_file_mapping: Vec<u32>,
+ expressions: Vec<CounterExpression>,
+ mapping_regions: Vec<CounterMappingRegion>,
+ buffer: &RustString,
+) {
+ unsafe {
+ llvm::LLVMRustCoverageWriteMappingToBuffer(
+ virtual_file_mapping.as_ptr(),
+ virtual_file_mapping.len() as c_uint,
+ expressions.as_ptr(),
+ expressions.len() as c_uint,
+ mapping_regions.as_ptr(),
+ mapping_regions.len() as c_uint,
+ buffer,
+ );
+ }
+}
+
+pub(crate) fn hash_str(strval: &str) -> u64 {
+ let strval = CString::new(strval).expect("null error converting hashable str to C string");
+ unsafe { llvm::LLVMRustCoverageHashCString(strval.as_ptr()) }
+}
+
+pub(crate) fn hash_bytes(bytes: Vec<u8>) -> u64 {
+ unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) }
+}
+
+pub(crate) fn mapping_version() -> u32 {
+ unsafe { llvm::LLVMRustCoverageMappingVersion() }
+}
+
+pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ cov_data_val: &'ll llvm::Value,
+) {
+ let covmap_var_name = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
+ })
+ .expect("Rust Coverage Mapping var name failed UTF-8 conversion");
+ debug!("covmap var name: {:?}", covmap_var_name);
+
+ let covmap_section_name = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteMapSectionNameToString(cx.llmod, s);
+ })
+ .expect("Rust Coverage section name failed UTF-8 conversion");
+ debug!("covmap section name: {:?}", covmap_section_name);
+
+ let llglobal = llvm::add_global(cx.llmod, cx.val_ty(cov_data_val), &covmap_var_name);
+ llvm::set_initializer(llglobal, cov_data_val);
+ llvm::set_global_constant(llglobal, true);
+ llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage);
+ llvm::set_section(llglobal, &covmap_section_name);
+ llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
+ cx.add_used_global(llglobal);
+}
+
+pub(crate) fn save_func_record_to_mod<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ func_name_hash: u64,
+ func_record_val: &'ll llvm::Value,
+ is_used: bool,
+) {
+ // Assign a name to the function record. This is used to merge duplicates.
+ //
+ // In LLVM, a "translation unit" (effectively, a `Crate` in Rust) can describe functions that
+ // are included-but-not-used. If (or when) Rust generates functions that are
+ // included-but-not-used, note that a dummy description for a function included-but-not-used
+ // in a Crate can be replaced by full description provided by a different Crate. The two kinds
+ // of descriptions play distinct roles in LLVM IR; therefore, assign them different names (by
+ // appending "u" to the end of the function record var name, to prevent `linkonce_odr` merging.
+ let func_record_var_name =
+ format!("__covrec_{:X}{}", func_name_hash, if is_used { "u" } else { "" });
+ debug!("function record var name: {:?}", func_record_var_name);
+
+ let func_record_section_name = llvm::build_string(|s| unsafe {
+ llvm::LLVMRustCoverageWriteFuncSectionNameToString(cx.llmod, s);
+ })
+ .expect("Rust Coverage function record section name failed UTF-8 conversion");
+ debug!("function record section name: {:?}", func_record_section_name);
+
+ let llglobal = llvm::add_global(cx.llmod, cx.val_ty(func_record_val), &func_record_var_name);
+ llvm::set_initializer(llglobal, func_record_val);
+ llvm::set_global_constant(llglobal, true);
+ llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage);
+ llvm::set_visibility(llglobal, llvm::Visibility::Hidden);
+ llvm::set_section(llglobal, &func_record_section_name);
+ llvm::set_alignment(llglobal, VAR_ALIGN_BYTES);
+ llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name);
+ cx.add_used_global(llglobal);
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
new file mode 100644
index 000000000..99e4ded62
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs
@@ -0,0 +1,126 @@
+use super::metadata::file_metadata;
+use super::utils::DIB;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
+use rustc_codegen_ssa::traits::*;
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{DILocation, DIScope};
+use rustc_middle::mir::{Body, SourceScope};
+use rustc_middle::ty::layout::FnAbiOf;
+use rustc_middle::ty::{self, Instance};
+use rustc_session::config::DebugInfo;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::Idx;
+
+/// Produces DIScope DIEs for each MIR Scope which has variables defined in it.
+// FIXME(eddyb) almost all of this should be in `rustc_codegen_ssa::mir::debuginfo`.
+pub fn compute_mir_scopes<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ mir: &Body<'tcx>,
+ debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+) {
+ // Find all scopes with variables defined in them.
+ let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
+ let mut vars = BitSet::new_empty(mir.source_scopes.len());
+ // FIXME(eddyb) take into account that arguments always have debuginfo,
+ // irrespective of their name (assuming full debuginfo is enabled).
+ // NOTE(eddyb) actually, on second thought, those are always in the
+ // function scope, which always exists.
+ for var_debug_info in &mir.var_debug_info {
+ vars.insert(var_debug_info.source_info.scope);
+ }
+ Some(vars)
+ } else {
+ // Nothing to emit, of course.
+ None
+ };
+ let mut instantiated = BitSet::new_empty(mir.source_scopes.len());
+ // Instantiate all scopes.
+ for idx in 0..mir.source_scopes.len() {
+ let scope = SourceScope::new(idx);
+ make_mir_scope(cx, instance, mir, &variables, debug_context, &mut instantiated, scope);
+ }
+ assert!(instantiated.count() == mir.source_scopes.len());
+}
+
+fn make_mir_scope<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ mir: &Body<'tcx>,
+ variables: &Option<BitSet<SourceScope>>,
+ debug_context: &mut FunctionDebugContext<&'ll DIScope, &'ll DILocation>,
+ instantiated: &mut BitSet<SourceScope>,
+ scope: SourceScope,
+) {
+ if instantiated.contains(scope) {
+ return;
+ }
+
+ let scope_data = &mir.source_scopes[scope];
+ let parent_scope = if let Some(parent) = scope_data.parent_scope {
+ make_mir_scope(cx, instance, mir, variables, debug_context, instantiated, parent);
+ debug_context.scopes[parent]
+ } else {
+ // The root is the function itself.
+ let loc = cx.lookup_debug_loc(mir.span.lo());
+ debug_context.scopes[scope] = DebugScope {
+ file_start_pos: loc.file.start_pos,
+ file_end_pos: loc.file.end_pos,
+ ..debug_context.scopes[scope]
+ };
+ instantiated.insert(scope);
+ return;
+ };
+
+ if let Some(vars) = variables && !vars.contains(scope) && scope_data.inlined.is_none() {
+ // Do not create a DIScope if there are no variables defined in this
+ // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
+ debug_context.scopes[scope] = parent_scope;
+ instantiated.insert(scope);
+ return;
+ }
+
+ let loc = cx.lookup_debug_loc(scope_data.span.lo());
+ let file_metadata = file_metadata(cx, &loc.file);
+
+ let dbg_scope = match scope_data.inlined {
+ Some((callee, _)) => {
+ // FIXME(eddyb) this would be `self.monomorphize(&callee)`
+ // if this is moved to `rustc_codegen_ssa::mir::debuginfo`.
+ let callee = cx.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ callee,
+ );
+ let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty());
+ cx.dbg_scope_fn(callee, callee_fn_abi, None)
+ }
+ None => unsafe {
+ llvm::LLVMRustDIBuilderCreateLexicalBlock(
+ DIB(cx),
+ parent_scope.dbg_scope,
+ file_metadata,
+ loc.line,
+ loc.col,
+ )
+ },
+ };
+
+ let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
+ // FIXME(eddyb) this doesn't account for the macro-related
+ // `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
+ let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
+ cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
+ });
+
+ debug_context.scopes[scope] = DebugScope {
+ dbg_scope,
+ inlined_at: inlined_at.or(parent_scope.inlined_at),
+ file_start_pos: loc.file.start_pos,
+ file_end_pos: loc.file.end_pos,
+ };
+ instantiated.insert(scope);
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/doc.md b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
new file mode 100644
index 000000000..aaec4e68c
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/doc.md
@@ -0,0 +1,131 @@
+# Debug Info Module
+
+This module serves the purpose of generating debug symbols. We use LLVM's
+[source level debugging](https://llvm.org/docs/SourceLevelDebugging.html)
+features for generating the debug information. The general principle is
+this:
+
+Given the right metadata in the LLVM IR, the LLVM code generator is able to
+create DWARF debug symbols for the given code. The
+[metadata](https://llvm.org/docs/LangRef.html#metadata-type) is structured
+much like DWARF *debugging information entries* (DIE), representing type
+information such as datatype layout, function signatures, block layout,
+variable location and scope information, etc. It is the purpose of this
+module to generate correct metadata and insert it into the LLVM IR.
+
+As the exact format of metadata trees may change between different LLVM
+versions, we now use LLVM
+[DIBuilder](https://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
+to create metadata where possible. This will hopefully ease the adaption of
+this module to future LLVM versions.
+
+The public API of the module is a set of functions that will insert the
+correct metadata into the LLVM IR when called with the right parameters.
+The module is thus driven from an outside client with functions like
+`debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
+
+Internally the module will try to reuse already created metadata by
+utilizing a cache. The way to get a shared metadata node when needed is
+thus to just call the corresponding function in this module:
+```ignore (illustrative)
+let file_metadata = file_metadata(cx, file);
+```
+The function will take care of probing the cache for an existing node for
+that exact file path.
+
+All private state used by the module is stored within either the
+CodegenUnitDebugContext struct (owned by the CodegenCx) or the
+FunctionDebugContext (owned by the FunctionCx).
+
+This file consists of three conceptual sections:
+1. The public interface of the module
+2. Module-internal metadata creation functions
+3. Minor utility functions
+
+
+## Recursive Types
+
+Some kinds of types, such as structs and enums can be recursive. That means
+that the type definition of some type X refers to some other type which in
+turn (transitively) refers to X. This introduces cycles into the type
+referral graph. A naive algorithm doing an on-demand, depth-first traversal
+of this graph when describing types, can get trapped in an endless loop
+when it reaches such a cycle.
+
+For example, the following simple type for a singly-linked list...
+
+```
+struct List {
+ value: i32,
+ tail: Option<Box<List>>,
+}
+```
+
+will generate the following callstack with a naive DFS algorithm:
+
+```ignore (illustrative)
+describe(t = List)
+ describe(t = i32)
+ describe(t = Option<Box<List>>)
+ describe(t = Box<List>)
+ describe(t = List) // at the beginning again...
+ ...
+```
+
+To break cycles like these, we use "stubs". That is, when
+the algorithm encounters a possibly recursive type (any struct or enum), it
+immediately creates a type description node and inserts it into the cache
+*before* describing the members of the type. This type description is just
+a stub (as type members are not described and added to it yet) but it
+allows the algorithm to already refer to the type. After the stub is
+inserted into the cache, the algorithm continues as before. If it now
+encounters a recursive reference, it will hit the cache and does not try to
+describe the type anew. This behavior is encapsulated in the
+`type_map::build_type_with_children()` function.
+
+
+## Source Locations and Line Information
+
+In addition to data type descriptions the debugging information must also
+allow to map machine code locations back to source code locations in order
+to be useful. This functionality is also handled in this module. The
+following functions allow to control source mappings:
+
++ `set_source_location()`
++ `clear_source_location()`
++ `start_emitting_source_locations()`
+
+`set_source_location()` allows to set the current source location. All IR
+instructions created after a call to this function will be linked to the
+given source location, until another location is specified with
+`set_source_location()` or the source location is cleared with
+`clear_source_location()`. In the later case, subsequent IR instruction
+will not be linked to any source location. As you can see, this is a
+stateful API (mimicking the one in LLVM), so be careful with source
+locations set by previous calls. It's probably best to not rely on any
+specific state being present at a given point in code.
+
+One topic that deserves some extra attention is *function prologues*. At
+the beginning of a function's machine code there are typically a few
+instructions for loading argument values into allocas and checking if
+there's enough stack space for the function to execute. This *prologue* is
+not visible in the source code and LLVM puts a special PROLOGUE END marker
+into the line table at the first non-prologue instruction of the function.
+In order to find out where the prologue ends, LLVM looks for the first
+instruction in the function body that is linked to a source location. So,
+when generating prologue instructions we have to make sure that we don't
+emit source location information until the 'real' function body begins. For
+this reason, source location emission is disabled by default for any new
+function being codegened and is only activated after a call to the third
+function from the list above, `start_emitting_source_locations()`. This
+function should be called right before regularly starting to codegen the
+top-level block of the given function.
+
+There is one exception to the above rule: `llvm.dbg.declare` instruction
+must be linked to the source location of the variable being declared. For
+function parameters these `llvm.dbg.declare` instructions typically occur
+in the middle of the prologue, however, they are ignored by LLVM's prologue
+detection. The `create_argument_metadata()` and related functions take care
+of linking the `llvm.dbg.declare` instructions to the correct source
+locations even while source location emission is still disabled, so there
+is no need to do anything special with source location handling here.
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
new file mode 100644
index 000000000..80fd9726f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs
@@ -0,0 +1,120 @@
+// .debug_gdb_scripts binary section.
+
+use crate::llvm;
+
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::value::Value;
+use rustc_codegen_ssa::base::collect_debugger_visualizers_transitive;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::bug;
+use rustc_session::config::{CrateType, DebugInfo};
+
+use rustc_span::symbol::sym;
+use rustc_span::DebuggerVisualizerType;
+
+/// Inserts a side-effect free instruction sequence that makes sure that the
+/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
+pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
+ if needs_gdb_debug_scripts_section(bx) {
+ let gdb_debug_scripts_section =
+ bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p());
+ // Load just the first byte as that's all that's necessary to force
+ // LLVM to keep around the reference to the global.
+ let volative_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);
+ unsafe {
+ llvm::LLVMSetAlignment(volative_load_instruction, 1);
+ }
+ }
+}
+
+/// Allocates the global variable responsible for the .debug_gdb_scripts binary
+/// section.
+pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Value {
+ let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0";
+ let section_var_name = &c_section_var_name[..c_section_var_name.len() - 1];
+
+ let section_var =
+ unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr().cast()) };
+
+ section_var.unwrap_or_else(|| {
+ let section_name = b".debug_gdb_scripts\0";
+ let mut section_contents = Vec::new();
+
+ // Add the pretty printers for the standard library first.
+ section_contents.extend_from_slice(b"\x01gdb_load_rust_pretty_printers.py\0");
+
+ // Next, add the pretty printers that were specified via the `#[debugger_visualizer]` attribute.
+ let visualizers = collect_debugger_visualizers_transitive(
+ cx.tcx,
+ DebuggerVisualizerType::GdbPrettyPrinter,
+ );
+ let crate_name = cx.tcx.crate_name(LOCAL_CRATE);
+ for (index, visualizer) in visualizers.iter().enumerate() {
+ // The initial byte `4` instructs GDB that the following pretty printer
+ // is defined inline as opposed to in a standalone file.
+ section_contents.extend_from_slice(b"\x04");
+ let vis_name = format!("pretty-printer-{}-{}\n", crate_name, index);
+ section_contents.extend_from_slice(vis_name.as_bytes());
+ section_contents.extend_from_slice(&visualizer.src);
+
+ // The final byte `0` tells GDB that the pretty printer has been
+ // fully defined and can continue searching for additional
+ // pretty printers.
+ section_contents.extend_from_slice(b"\0");
+ }
+
+ unsafe {
+ let section_contents = section_contents.as_slice();
+ let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64);
+
+ let section_var = cx
+ .define_global(section_var_name, llvm_type)
+ .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
+ llvm::LLVMSetSection(section_var, section_name.as_ptr().cast());
+ llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
+ llvm::LLVMSetGlobalConstant(section_var, llvm::True);
+ llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);
+ llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
+ // This should make sure that the whole section is not larger than
+ // the string it contains. Otherwise we get a warning from GDB.
+ llvm::LLVMSetAlignment(section_var, 1);
+ section_var
+ }
+ })
+}
+
+pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
+ let omit_gdb_pretty_printer_section =
+ cx.tcx.sess.contains_name(cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section);
+
+ // To ensure the section `__rustc_debug_gdb_scripts_section__` will not create
+ // ODR violations at link time, this section will not be emitted for rlibs since
+ // each rlib could produce a different set of visualizers that would be embedded
+ // in the `.debug_gdb_scripts` section. For that reason, we make sure that the
+ // section is only emitted for leaf crates.
+ let embed_visualizers = cx.sess().crate_types().iter().any(|&crate_type| match crate_type {
+ CrateType::Executable | CrateType::Dylib | CrateType::Cdylib | CrateType::Staticlib => {
+ // These are crate types for which we will embed pretty printers since they
+ // are treated as leaf crates.
+ true
+ }
+ CrateType::ProcMacro => {
+ // We could embed pretty printers for proc macro crates too but it does not
+ // seem like a good default, since this is a rare use case and we don't
+ // want to slow down the common case.
+ false
+ }
+ CrateType::Rlib => {
+ // As per the above description, embedding pretty printers for rlibs could
+ // lead to ODR violations so we skip this crate type as well.
+ false
+ }
+ });
+
+ !omit_gdb_pretty_printer_section
+ && cx.sess().opts.debuginfo != DebugInfo::None
+ && cx.sess().target.emit_debug_gdb_scripts
+ && embed_visualizers
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
new file mode 100644
index 000000000..bd84100e0
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -0,0 +1,1618 @@
+use self::type_map::DINodeCreationResult;
+use self::type_map::Stub;
+use self::type_map::UniqueTypeId;
+
+use super::namespace::mangled_name_of_instance;
+use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_name};
+use super::utils::{
+ create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, DIB,
+};
+use super::CodegenUnitDebugContext;
+
+use crate::abi;
+use crate::common::CodegenCx;
+use crate::debuginfo::metadata::type_map::build_type_with_children;
+use crate::debuginfo::utils::fat_pointer_kind;
+use crate::debuginfo::utils::FatPtrKind;
+use crate::llvm;
+use crate::llvm::debuginfo::{
+ DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind,
+};
+use crate::value::Value;
+
+use cstr::cstr;
+use rustc_codegen_ssa::debuginfo::type_names::cpp_like_debuginfo;
+use rustc_codegen_ssa::debuginfo::type_names::VTableNameKind;
+use rustc_codegen_ssa::traits::*;
+use rustc_fs_util::path_to_c_string;
+use rustc_hir::def::CtorKind;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::bug;
+use rustc_middle::mir::{self, GeneratorLayout};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{
+ self, AdtKind, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt, Visibility,
+};
+use rustc_session::config::{self, DebugInfo, Lto};
+use rustc_span::symbol::Symbol;
+use rustc_span::FileName;
+use rustc_span::{self, FileNameDisplayPreference, SourceFile};
+use rustc_symbol_mangling::typeid_for_trait_ref;
+use rustc_target::abi::{Align, Size};
+use smallvec::smallvec;
+use tracing::debug;
+
+use libc::{c_char, c_longlong, c_uint};
+use std::borrow::Cow;
+use std::fmt::{self, Write};
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::path::{Path, PathBuf};
+use std::ptr;
+use tracing::instrument;
+
+impl PartialEq for llvm::Metadata {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl Eq for llvm::Metadata {}
+
+impl Hash for llvm::Metadata {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ (self as *const Self).hash(hasher);
+ }
+}
+
+impl fmt::Debug for llvm::Metadata {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (self as *const Self).fmt(f)
+ }
+}
+
+// From DWARF 5.
+// See http://www.dwarfstd.org/ShowIssue.php?issue=140129.1.
+const DW_LANG_RUST: c_uint = 0x1c;
+#[allow(non_upper_case_globals)]
+const DW_ATE_boolean: c_uint = 0x02;
+#[allow(non_upper_case_globals)]
+const DW_ATE_float: c_uint = 0x04;
+#[allow(non_upper_case_globals)]
+const DW_ATE_signed: c_uint = 0x05;
+#[allow(non_upper_case_globals)]
+const DW_ATE_unsigned: c_uint = 0x07;
+#[allow(non_upper_case_globals)]
+const DW_ATE_UTF: c_uint = 0x10;
+
+pub(super) const UNKNOWN_LINE_NUMBER: c_uint = 0;
+pub(super) const UNKNOWN_COLUMN_NUMBER: c_uint = 0;
+
+const NO_SCOPE_METADATA: Option<&DIScope> = None;
+/// A function that returns an empty list of generic parameter debuginfo nodes.
+const NO_GENERICS: for<'ll> fn(&CodegenCx<'ll, '_>) -> SmallVec<&'ll DIType> = |_| SmallVec::new();
+
+// SmallVec is used quite a bit in this module, so create a shorthand.
+// The actual number of elements is not so important.
+pub type SmallVec<T> = smallvec::SmallVec<[T; 16]>;
+
+mod enums;
+mod type_map;
+
+pub(crate) use type_map::TypeMap;
+
+/// Returns from the enclosing function if the type debuginfo node with the given
+/// unique ID can be found in the type map.
+macro_rules! return_if_di_node_created_in_meantime {
+ ($cx: expr, $unique_type_id: expr) => {
+ if let Some(di_node) = debug_context($cx).type_map.di_node_for_unique_id($unique_type_id) {
+ return DINodeCreationResult::new(di_node, true);
+ }
+ };
+}
+
+/// Extract size and alignment from a TyAndLayout.
+fn size_and_align_of<'tcx>(ty_and_layout: TyAndLayout<'tcx>) -> (Size, Align) {
+ (ty_and_layout.size, ty_and_layout.align.abi)
+}
+
+/// Creates debuginfo for a fixed size array (e.g. `[u64; 123]`).
+/// For slices (that is, "arrays" of unknown size) use [build_slice_type_di_node].
+fn build_fixed_size_array_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+ array_type: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let ty::Array(element_type, len) = array_type.kind() else {
+ bug!("build_fixed_size_array_di_node() called with non-ty::Array type `{:?}`", array_type)
+ };
+
+ let element_type_di_node = type_di_node(cx, *element_type);
+
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
+
+ let (size, align) = cx.size_and_align_of(array_type);
+
+ let upper_bound = len.eval_usize(cx.tcx, ty::ParamEnv::reveal_all()) as c_longlong;
+
+ let subrange =
+ unsafe { Some(llvm::LLVMRustDIBuilderGetOrCreateSubrange(DIB(cx), 0, upper_bound)) };
+
+ let subscripts = create_DIArray(DIB(cx), &[subrange]);
+ let di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreateArrayType(
+ DIB(cx),
+ size.bits(),
+ align.bits() as u32,
+ element_type_di_node,
+ subscripts,
+ )
+ };
+
+ DINodeCreationResult::new(di_node, false)
+}
+
+/// Creates debuginfo for built-in pointer-like things:
+///
+/// - ty::Ref
+/// - ty::RawPtr
+/// - ty::Adt in the case it's Box
+///
+/// At some point we might want to remove the special handling of Box
+/// and treat it the same as other smart pointers (like Rc, Arc, ...).
+fn build_pointer_or_reference_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ptr_type: Ty<'tcx>,
+ pointee_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ // The debuginfo generated by this function is only valid if `ptr_type` is really just
+ // a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
+ debug_assert_eq!(
+ cx.size_and_align_of(ptr_type),
+ cx.size_and_align_of(cx.tcx.mk_mut_ptr(pointee_type))
+ );
+
+ let pointee_type_di_node = type_di_node(cx, pointee_type);
+
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
+
+ let (thin_pointer_size, thin_pointer_align) =
+ cx.size_and_align_of(cx.tcx.mk_imm_ptr(cx.tcx.types.unit));
+ let ptr_type_debuginfo_name = compute_debuginfo_type_name(cx.tcx, ptr_type, true);
+
+ match fat_pointer_kind(cx, pointee_type) {
+ None => {
+ // This is a thin pointer. Create a regular pointer type and give it the correct name.
+ debug_assert_eq!(
+ (thin_pointer_size, thin_pointer_align),
+ cx.size_and_align_of(ptr_type),
+ "ptr_type={}, pointee_type={}",
+ ptr_type,
+ pointee_type,
+ );
+
+ let di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreatePointerType(
+ DIB(cx),
+ pointee_type_di_node,
+ thin_pointer_size.bits(),
+ thin_pointer_align.bits() as u32,
+ 0, // Ignore DWARF address space.
+ ptr_type_debuginfo_name.as_ptr().cast(),
+ ptr_type_debuginfo_name.len(),
+ )
+ };
+
+ DINodeCreationResult { di_node, already_stored_in_typemap: false }
+ }
+ Some(fat_pointer_kind) => {
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &ptr_type_debuginfo_name,
+ cx.size_and_align_of(ptr_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, owner| {
+ // FIXME: If this fat pointer is a `Box` then we don't want to use its
+ // type layout and instead use the layout of the raw pointer inside
+ // of it.
+ // The proper way to handle this is to not treat Box as a pointer
+ // at all and instead emit regular struct debuginfo for it. We just
+ // need to make sure that we don't break existing debuginfo consumers
+ // by doing that (at least not without a warning period).
+ let layout_type =
+ if ptr_type.is_box() { cx.tcx.mk_mut_ptr(pointee_type) } else { ptr_type };
+
+ let layout = cx.layout_of(layout_type);
+ let addr_field = layout.field(cx, abi::FAT_PTR_ADDR);
+ let extra_field = layout.field(cx, abi::FAT_PTR_EXTRA);
+
+ let (addr_field_name, extra_field_name) = match fat_pointer_kind {
+ FatPtrKind::Dyn => ("pointer", "vtable"),
+ FatPtrKind::Slice => ("data_ptr", "length"),
+ };
+
+ debug_assert_eq!(abi::FAT_PTR_ADDR, 0);
+ debug_assert_eq!(abi::FAT_PTR_EXTRA, 1);
+
+ // The data pointer type is a regular, thin pointer, regardless of whether this
+ // is a slice or a trait object.
+ let data_ptr_type_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreatePointerType(
+ DIB(cx),
+ pointee_type_di_node,
+ addr_field.size.bits(),
+ addr_field.align.abi.bits() as u32,
+ 0, // Ignore DWARF address space.
+ std::ptr::null(),
+ 0,
+ )
+ };
+
+ smallvec![
+ build_field_di_node(
+ cx,
+ owner,
+ addr_field_name,
+ (addr_field.size, addr_field.align.abi),
+ layout.fields.offset(abi::FAT_PTR_ADDR),
+ DIFlags::FlagZero,
+ data_ptr_type_di_node,
+ ),
+ build_field_di_node(
+ cx,
+ owner,
+ extra_field_name,
+ (extra_field.size, extra_field.align.abi),
+ layout.fields.offset(abi::FAT_PTR_EXTRA),
+ DIFlags::FlagZero,
+ type_di_node(cx, extra_field.ty),
+ ),
+ ]
+ },
+ NO_GENERICS,
+ )
+ }
+ }
+}
+
+fn build_subroutine_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ // It's possible to create a self-referential
+ // type in Rust by using 'impl trait':
+ //
+ // fn foo() -> impl Copy { foo }
+ //
+ // Unfortunately LLVM's API does not allow us to create recursive subroutine types.
+ // In order to work around that restriction we place a marker type in the type map,
+ // before creating the actual type. If the actual type is recursive, it will hit the
+ // marker type. So we end up with a type that looks like
+ //
+ // fn foo() -> <recursive_type>
+ //
+ // Once that is created, we replace the marker in the typemap with the actual type.
+ debug_context(cx)
+ .type_map
+ .unique_id_to_di_node
+ .borrow_mut()
+ .insert(unique_type_id, recursion_marker_type_di_node(cx));
+
+ let fn_ty = unique_type_id.expect_ty();
+ let signature = cx
+ .tcx
+ .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), fn_ty.fn_sig(cx.tcx));
+
+ let signature_di_nodes: SmallVec<_> = iter::once(
+ // return type
+ match signature.output().kind() {
+ ty::Tuple(tys) if tys.is_empty() => {
+ // this is a "void" function
+ None
+ }
+ _ => Some(type_di_node(cx, signature.output())),
+ },
+ )
+ .chain(
+ // regular arguments
+ signature.inputs().iter().map(|&argument_type| Some(type_di_node(cx, argument_type))),
+ )
+ .collect();
+
+ debug_context(cx).type_map.unique_id_to_di_node.borrow_mut().remove(&unique_type_id);
+
+ let fn_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreateSubroutineType(
+ DIB(cx),
+ create_DIArray(DIB(cx), &signature_di_nodes[..]),
+ )
+ };
+
+ // This is actually a function pointer, so wrap it in pointer DI.
+ let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false);
+ let di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreatePointerType(
+ DIB(cx),
+ fn_di_node,
+ cx.tcx.data_layout.pointer_size.bits(),
+ cx.tcx.data_layout.pointer_align.abi.bits() as u32,
+ 0, // Ignore DWARF address space.
+ name.as_ptr().cast(),
+ name.len(),
+ )
+ };
+
+ DINodeCreationResult::new(di_node, false)
+}
+
+/// Create debuginfo for `dyn SomeTrait` types. Currently these are empty structs
+/// we with the correct type name (e.g. "dyn SomeTrait<Foo, Item=u32> + Sync").
+fn build_dyn_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ dyn_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ if let ty::Dynamic(..) = dyn_type.kind() {
+ let type_name = compute_debuginfo_type_name(cx.tcx, dyn_type, true);
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ cx.size_and_align_of(dyn_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |_, _| smallvec![],
+ NO_GENERICS,
+ )
+ } else {
+ bug!(
+ "Only ty::Dynamic is valid for build_dyn_type_di_node(). Found {:?} instead.",
+ dyn_type
+ )
+ }
+}
+
+/// Create debuginfo for `[T]` and `str`. These are unsized.
+///
+/// NOTE: We currently emit just emit the debuginfo for the element type here
+/// (i.e. `T` for slices and `u8` for `str`), so that we end up with
+/// `*const T` for the `data_ptr` field of the corresponding fat-pointer
+/// debuginfo of `&[T]`.
+///
+/// It would be preferable and more accurate if we emitted a DIArray of T
+/// without an upper bound instead. That is, LLVM already supports emitting
+/// debuginfo of arrays of unknown size. But GDB currently seems to end up
+/// in an infinite loop when confronted with such a type.
+///
+/// As a side effect of the current encoding every instance of a type like
+/// `struct Foo { unsized_field: [u8] }` will look like
+/// `struct Foo { unsized_field: u8 }` in debuginfo. If the length of the
+/// slice is zero, then accessing `unsized_field` in the debugger would
+/// result in an out-of-bounds access.
+fn build_slice_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ slice_type: Ty<'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let element_type = match slice_type.kind() {
+ ty::Slice(element_type) => *element_type,
+ ty::Str => cx.tcx.types.u8,
+ _ => {
+ bug!(
+ "Only ty::Slice is valid for build_slice_type_di_node(). Found {:?} instead.",
+ slice_type
+ )
+ }
+ };
+
+ let element_type_di_node = type_di_node(cx, element_type);
+ return_if_di_node_created_in_meantime!(cx, unique_type_id);
+ DINodeCreationResult { di_node: element_type_di_node, already_stored_in_typemap: false }
+}
+
+/// Get the debuginfo node for the given type.
+///
+/// This function will look up the debuginfo node in the TypeMap. If it can't find it, it
+/// will create the node by dispatching to the corresponding `build_*_di_node()` function.
+pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
+ let unique_type_id = UniqueTypeId::for_ty(cx.tcx, t);
+
+ if let Some(existing_di_node) = debug_context(cx).type_map.di_node_for_unique_id(unique_type_id)
+ {
+ return existing_di_node;
+ }
+
+ debug!("type_di_node: {:?}", t);
+
+ let DINodeCreationResult { di_node, already_stored_in_typemap } = match *t.kind() {
+ ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
+ build_basic_type_di_node(cx, t)
+ }
+ ty::Tuple(elements) if elements.is_empty() => build_basic_type_di_node(cx, t),
+ ty::Array(..) => build_fixed_size_array_di_node(cx, unique_type_id, t),
+ ty::Slice(_) | ty::Str => build_slice_type_di_node(cx, t, unique_type_id),
+ ty::Dynamic(..) => build_dyn_type_di_node(cx, t, unique_type_id),
+ ty::Foreign(..) => build_foreign_type_di_node(cx, t, unique_type_id),
+ ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => {
+ build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id)
+ }
+ // Box<T, A> may have a non-ZST allocator A. In that case, we
+ // cannot treat Box<T, A> as just an owned alias of `*mut T`.
+ ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id)
+ }
+ ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id),
+ ty::Closure(..) => build_closure_env_di_node(cx, unique_type_id),
+ ty::Generator(..) => enums::build_generator_di_node(cx, unique_type_id),
+ ty::Adt(def, ..) => match def.adt_kind() {
+ AdtKind::Struct => build_struct_type_di_node(cx, unique_type_id),
+ AdtKind::Union => build_union_type_di_node(cx, unique_type_id),
+ AdtKind::Enum => enums::build_enum_type_di_node(cx, unique_type_id),
+ },
+ ty::Tuple(_) => build_tuple_type_di_node(cx, unique_type_id),
+ // Type parameters from polymorphized functions.
+ ty::Param(_) => build_param_type_di_node(cx, t),
+ _ => bug!("debuginfo: unexpected type in type_di_node(): {:?}", t),
+ };
+
+ {
+ if already_stored_in_typemap {
+ // Make sure that we really do have a `TypeMap` entry for the unique type ID.
+ let di_node_for_uid =
+ match debug_context(cx).type_map.di_node_for_unique_id(unique_type_id) {
+ Some(di_node) => di_node,
+ None => {
+ bug!(
+ "expected type debuginfo node for unique \
+ type ID '{:?}' to already be in \
+ the `debuginfo::TypeMap` but it \
+ was not.",
+ unique_type_id,
+ );
+ }
+ };
+
+ debug_assert_eq!(di_node_for_uid as *const _, di_node as *const _);
+ } else {
+ debug_context(cx).type_map.insert(unique_type_id, di_node);
+ }
+ }
+
+ di_node
+}
+
+// FIXME(mw): Cache this via a regular UniqueTypeId instead of an extra field in the debug context.
+fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll DIType {
+ *debug_context(cx).recursion_marker_type.get_or_init(move || {
+ unsafe {
+ // The choice of type here is pretty arbitrary -
+ // anything reading the debuginfo for a recursive
+ // type is going to see *something* weird - the only
+ // question is what exactly it will see.
+ //
+ // FIXME: the name `<recur_type>` does not fit the naming scheme
+ // of other types.
+ //
+ // FIXME: it might make sense to use an actual pointer type here
+ // so that debuggers can show the address.
+ let name = "<recur_type>";
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ cx.tcx.data_layout.pointer_size.bits(),
+ DW_ATE_unsigned,
+ )
+ }
+ })
+}
+
+fn hex_encode(data: &[u8]) -> String {
+ let mut hex_string = String::with_capacity(data.len() * 2);
+ for byte in data.iter() {
+ write!(&mut hex_string, "{:02x}", byte).unwrap();
+ }
+ hex_string
+}
+
+pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll DIFile {
+ let cache_key = Some((source_file.name_hash, source_file.src_hash));
+ return debug_context(cx)
+ .created_files
+ .borrow_mut()
+ .entry(cache_key)
+ .or_insert_with(|| alloc_new_file_metadata(cx, source_file));
+
+ #[instrument(skip(cx, source_file), level = "debug")]
+ fn alloc_new_file_metadata<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ source_file: &SourceFile,
+ ) -> &'ll DIFile {
+ debug!(?source_file.name);
+
+ let (directory, file_name) = match &source_file.name {
+ FileName::Real(filename) => {
+ let working_directory = &cx.sess().opts.working_dir;
+ debug!(?working_directory);
+
+ let filename = cx
+ .sess()
+ .source_map()
+ .path_mapping()
+ .to_embeddable_absolute_path(filename.clone(), working_directory);
+
+ // Construct the absolute path of the file
+ let abs_path = filename.remapped_path_if_available();
+ debug!(?abs_path);
+
+ if let Ok(rel_path) =
+ abs_path.strip_prefix(working_directory.remapped_path_if_available())
+ {
+ // If the compiler's working directory (which also is the DW_AT_comp_dir of
+ // the compilation unit) is a prefix of the path we are about to emit, then
+ // only emit the part relative to the working directory.
+ // Because of path remapping we sometimes see strange things here: `abs_path`
+ // might actually look like a relative path
+ // (e.g. `<crate-name-and-version>/src/lib.rs`), so if we emit it without
+ // taking the working directory into account, downstream tooling will
+ // interpret it as `<working-directory>/<crate-name-and-version>/src/lib.rs`,
+ // which makes no sense. Usually in such cases the working directory will also
+ // be remapped to `<crate-name-and-version>` or some other prefix of the path
+ // we are remapping, so we end up with
+ // `<crate-name-and-version>/<crate-name-and-version>/src/lib.rs`.
+ // By moving the working directory portion into the `directory` part of the
+ // DIFile, we allow LLVM to emit just the relative path for DWARF, while
+ // still emitting the correct absolute path for CodeView.
+ (
+ working_directory.to_string_lossy(FileNameDisplayPreference::Remapped),
+ rel_path.to_string_lossy().into_owned(),
+ )
+ } else {
+ ("".into(), abs_path.to_string_lossy().into_owned())
+ }
+ }
+ other => ("".into(), other.prefer_remapped().to_string_lossy().into_owned()),
+ };
+
+ let hash_kind = match source_file.src_hash.kind {
+ rustc_span::SourceFileHashAlgorithm::Md5 => llvm::ChecksumKind::MD5,
+ rustc_span::SourceFileHashAlgorithm::Sha1 => llvm::ChecksumKind::SHA1,
+ rustc_span::SourceFileHashAlgorithm::Sha256 => llvm::ChecksumKind::SHA256,
+ };
+ let hash_value = hex_encode(source_file.src_hash.hash_bytes());
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateFile(
+ DIB(cx),
+ file_name.as_ptr().cast(),
+ file_name.len(),
+ directory.as_ptr().cast(),
+ directory.len(),
+ hash_kind,
+ hash_value.as_ptr().cast(),
+ hash_value.len(),
+ )
+ }
+ }
+}
+
+pub fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile {
+ debug_context(cx).created_files.borrow_mut().entry(None).or_insert_with(|| unsafe {
+ let file_name = "<unknown>";
+ let directory = "";
+ let hash_value = "";
+
+ llvm::LLVMRustDIBuilderCreateFile(
+ DIB(cx),
+ file_name.as_ptr().cast(),
+ file_name.len(),
+ directory.as_ptr().cast(),
+ directory.len(),
+ llvm::ChecksumKind::None,
+ hash_value.as_ptr().cast(),
+ hash_value.len(),
+ )
+ })
+}
+
+trait MsvcBasicName {
+ fn msvc_basic_name(self) -> &'static str;
+}
+
+impl MsvcBasicName for ty::IntTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ty::IntTy::Isize => "ptrdiff_t",
+ ty::IntTy::I8 => "__int8",
+ ty::IntTy::I16 => "__int16",
+ ty::IntTy::I32 => "__int32",
+ ty::IntTy::I64 => "__int64",
+ ty::IntTy::I128 => "__int128",
+ }
+ }
+}
+
+impl MsvcBasicName for ty::UintTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ty::UintTy::Usize => "size_t",
+ ty::UintTy::U8 => "unsigned __int8",
+ ty::UintTy::U16 => "unsigned __int16",
+ ty::UintTy::U32 => "unsigned __int32",
+ ty::UintTy::U64 => "unsigned __int64",
+ ty::UintTy::U128 => "unsigned __int128",
+ }
+ }
+}
+
+impl MsvcBasicName for ty::FloatTy {
+ fn msvc_basic_name(self) -> &'static str {
+ match self {
+ ty::FloatTy::F32 => "float",
+ ty::FloatTy::F64 => "double",
+ }
+ }
+}
+
+fn build_basic_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ t: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ debug!("build_basic_type_di_node: {:?}", t);
+
+ // When targeting MSVC, emit MSVC style type names for compatibility with
+ // .natvis visualizers (and perhaps other existing native debuggers?)
+ let cpp_like_debuginfo = cpp_like_debuginfo(cx.tcx);
+
+ let (name, encoding) = match t.kind() {
+ ty::Never => ("!", DW_ATE_unsigned),
+ ty::Tuple(elements) if elements.is_empty() => {
+ if cpp_like_debuginfo {
+ return build_tuple_type_di_node(cx, UniqueTypeId::for_ty(cx.tcx, t));
+ } else {
+ ("()", DW_ATE_unsigned)
+ }
+ }
+ ty::Bool => ("bool", DW_ATE_boolean),
+ ty::Char => ("char", DW_ATE_UTF),
+ ty::Int(int_ty) if cpp_like_debuginfo => (int_ty.msvc_basic_name(), DW_ATE_signed),
+ ty::Uint(uint_ty) if cpp_like_debuginfo => (uint_ty.msvc_basic_name(), DW_ATE_unsigned),
+ ty::Float(float_ty) if cpp_like_debuginfo => (float_ty.msvc_basic_name(), DW_ATE_float),
+ ty::Int(int_ty) => (int_ty.name_str(), DW_ATE_signed),
+ ty::Uint(uint_ty) => (uint_ty.name_str(), DW_ATE_unsigned),
+ ty::Float(float_ty) => (float_ty.name_str(), DW_ATE_float),
+ _ => bug!("debuginfo::build_basic_type_di_node - `t` is invalid type"),
+ };
+
+ let ty_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ cx.size_of(t).bits(),
+ encoding,
+ )
+ };
+
+ if !cpp_like_debuginfo {
+ return DINodeCreationResult::new(ty_di_node, false);
+ }
+
+ let typedef_name = match t.kind() {
+ ty::Int(int_ty) => int_ty.name_str(),
+ ty::Uint(uint_ty) => uint_ty.name_str(),
+ ty::Float(float_ty) => float_ty.name_str(),
+ _ => return DINodeCreationResult::new(ty_di_node, false),
+ };
+
+ let typedef_di_node = unsafe {
+ llvm::LLVMRustDIBuilderCreateTypedef(
+ DIB(cx),
+ ty_di_node,
+ typedef_name.as_ptr().cast(),
+ typedef_name.len(),
+ unknown_file_metadata(cx),
+ 0,
+ None,
+ )
+ };
+
+ DINodeCreationResult::new(typedef_di_node, false)
+}
+
+fn build_foreign_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ t: Ty<'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ debug!("build_foreign_type_di_node: {:?}", t);
+
+ let &ty::Foreign(def_id) = unique_type_id.expect_ty().kind() else {
+ bug!("build_foreign_type_di_node() called with unexpected type: {:?}", unique_type_id.expect_ty());
+ };
+
+ build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &compute_debuginfo_type_name(cx.tcx, t, false),
+ cx.size_and_align_of(t),
+ Some(get_namespace_for_item(cx, def_id)),
+ DIFlags::FlagZero,
+ ),
+ |_, _| smallvec![],
+ NO_GENERICS,
+ )
+}
+
+fn build_param_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ t: Ty<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ debug!("build_param_type_di_node: {:?}", t);
+ let name = format!("{:?}", t);
+ DINodeCreationResult {
+ di_node: unsafe {
+ llvm::LLVMRustDIBuilderCreateBasicType(
+ DIB(cx),
+ name.as_ptr().cast(),
+ name.len(),
+ Size::ZERO.bits(),
+ DW_ATE_unsigned,
+ )
+ },
+ already_stored_in_typemap: false,
+ }
+}
+
+pub fn build_compile_unit_di_node<'ll, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ codegen_unit_name: &str,
+ debug_context: &CodegenUnitDebugContext<'ll, 'tcx>,
+) -> &'ll DIDescriptor {
+ let mut name_in_debuginfo = match tcx.sess.local_crate_source_file {
+ Some(ref path) => path.clone(),
+ None => PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str()),
+ };
+
+ // To avoid breaking split DWARF, we need to ensure that each codegen unit
+ // has a unique `DW_AT_name`. This is because there's a remote chance that
+ // different codegen units for the same module will have entirely
+ // identical DWARF entries for the purpose of the DWO ID, which would
+ // violate Appendix F ("Split Dwarf Object Files") of the DWARF 5
+ // specification. LLVM uses the algorithm specified in section 7.32 "Type
+ // Signature Computation" to compute the DWO ID, which does not include
+ // any fields that would distinguish compilation units. So we must embed
+ // the codegen unit name into the `DW_AT_name`. (Issue #88521.)
+ //
+ // Additionally, the OSX linker has an idiosyncrasy where it will ignore
+ // some debuginfo if multiple object files with the same `DW_AT_name` are
+ // linked together.
+ //
+ // As a workaround for these two issues, we generate unique names for each
+ // object file. Those do not correspond to an actual source file but that
+ // is harmless.
+ name_in_debuginfo.push("@");
+ name_in_debuginfo.push(codegen_unit_name);
+
+ debug!("build_compile_unit_di_node: {:?}", name_in_debuginfo);
+ let rustc_producer =
+ format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION"),);
+ // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice.
+ let producer = format!("clang LLVM ({})", rustc_producer);
+
+ let name_in_debuginfo = name_in_debuginfo.to_string_lossy();
+ let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped);
+ let flags = "\0";
+ let output_filenames = tcx.output_filenames(());
+ let split_name = if tcx.sess.target_can_use_split_dwarf() {
+ output_filenames
+ .split_dwarf_path(
+ tcx.sess.split_debuginfo(),
+ tcx.sess.opts.unstable_opts.split_dwarf_kind,
+ Some(codegen_unit_name),
+ )
+ // We get a path relative to the working directory from split_dwarf_path
+ .map(|f| tcx.sess.source_map().path_mapping().map_prefix(f).0)
+ } else {
+ None
+ }
+ .unwrap_or_default();
+ let split_name = split_name.to_str().unwrap();
+
+ // FIXME(#60020):
+ //
+ // This should actually be
+ //
+ // let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo);
+ //
+ // That is, we should set LLVM's emission kind to `LineTablesOnly` if
+ // we are compiling with "limited" debuginfo. However, some of the
+ // existing tools relied on slightly more debuginfo being generated than
+ // would be the case with `LineTablesOnly`, and we did not want to break
+ // these tools in a "drive-by fix", without a good idea or plan about
+ // what limited debuginfo should exactly look like. So for now we keep
+ // the emission kind as `FullDebug`.
+ //
+ // See https://github.com/rust-lang/rust/issues/60020 for details.
+ let kind = DebugEmissionKind::FullDebug;
+ assert!(tcx.sess.opts.debuginfo != DebugInfo::None);
+
+ unsafe {
+ let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile(
+ debug_context.builder,
+ name_in_debuginfo.as_ptr().cast(),
+ name_in_debuginfo.len(),
+ work_dir.as_ptr().cast(),
+ work_dir.len(),
+ llvm::ChecksumKind::None,
+ ptr::null(),
+ 0,
+ );
+
+ let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit(
+ debug_context.builder,
+ DW_LANG_RUST,
+ compile_unit_file,
+ producer.as_ptr().cast(),
+ producer.len(),
+ tcx.sess.opts.optimize != config::OptLevel::No,
+ flags.as_ptr().cast(),
+ 0,
+ // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead
+ // put the path supplied to `MCSplitDwarfFile` into the debug info of the final
+ // output(s).
+ split_name.as_ptr().cast(),
+ split_name.len(),
+ kind,
+ 0,
+ tcx.sess.opts.unstable_opts.split_dwarf_inlining,
+ );
+
+ if tcx.sess.opts.unstable_opts.profile {
+ let cu_desc_metadata =
+ llvm::LLVMRustMetadataAsValue(debug_context.llcontext, unit_metadata);
+ let default_gcda_path = &output_filenames.with_extension("gcda");
+ let gcda_path =
+ tcx.sess.opts.unstable_opts.profile_emit.as_ref().unwrap_or(default_gcda_path);
+
+ let gcov_cu_info = [
+ path_to_mdstring(debug_context.llcontext, &output_filenames.with_extension("gcno")),
+ path_to_mdstring(debug_context.llcontext, gcda_path),
+ cu_desc_metadata,
+ ];
+ let gcov_metadata = llvm::LLVMMDNodeInContext(
+ debug_context.llcontext,
+ gcov_cu_info.as_ptr(),
+ gcov_cu_info.len() as c_uint,
+ );
+
+ let llvm_gcov_ident = cstr!("llvm.gcov");
+ llvm::LLVMAddNamedMetadataOperand(
+ debug_context.llmod,
+ llvm_gcov_ident.as_ptr(),
+ gcov_metadata,
+ );
+ }
+
+ // Insert `llvm.ident` metadata on the wasm targets since that will
+ // get hooked up to the "producer" sections `processed-by` information.
+ if tcx.sess.target.is_like_wasm {
+ let name_metadata = llvm::LLVMMDStringInContext(
+ debug_context.llcontext,
+ rustc_producer.as_ptr().cast(),
+ rustc_producer.as_bytes().len() as c_uint,
+ );
+ llvm::LLVMAddNamedMetadataOperand(
+ debug_context.llmod,
+ cstr!("llvm.ident").as_ptr(),
+ llvm::LLVMMDNodeInContext(debug_context.llcontext, &name_metadata, 1),
+ );
+ }
+
+ return unit_metadata;
+ };
+
+ fn path_to_mdstring<'ll>(llcx: &'ll llvm::Context, path: &Path) -> &'ll Value {
+ let path_str = path_to_c_string(path);
+ unsafe {
+ llvm::LLVMMDStringInContext(
+ llcx,
+ path_str.as_ptr(),
+ path_str.as_bytes().len() as c_uint,
+ )
+ }
+ }
+}
+
+/// Creates a `DW_TAG_member` entry inside the DIE represented by the given `type_di_node`.
+fn build_field_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ owner: &'ll DIScope,
+ name: &str,
+ size_and_align: (Size, Align),
+ offset: Size,
+ flags: DIFlags,
+ type_di_node: &'ll DIType,
+) -> &'ll DIType {
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ owner,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size_and_align.0.bits(),
+ size_and_align.1.bits() as u32,
+ offset.bits(),
+ flags,
+ type_di_node,
+ )
+ }
+}
+
+/// Creates the debuginfo node for a Rust struct type. Maybe be a regular struct or a tuple-struct.
+fn build_struct_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let struct_type = unique_type_id.expect_ty();
+ let ty::Adt(adt_def, _) = struct_type.kind() else {
+ bug!("build_struct_type_di_node() called with non-struct-type: {:?}", struct_type);
+ };
+ debug_assert!(adt_def.is_struct());
+ let containing_scope = get_namespace_for_item(cx, adt_def.did());
+ let struct_type_and_layout = cx.layout_of(struct_type);
+ let variant_def = adt_def.non_enum_variant();
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &compute_debuginfo_type_name(cx.tcx, struct_type, false),
+ size_and_align_of(struct_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| {
+ variant_def
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field_name = if variant_def.ctor_kind == CtorKind::Fn {
+ // This is a tuple struct
+ tuple_field_name(i)
+ } else {
+ // This is struct with named fields
+ Cow::Borrowed(f.name.as_str())
+ };
+ let field_layout = struct_type_and_layout.field(cx, i);
+ build_field_di_node(
+ cx,
+ owner,
+ &field_name[..],
+ (field_layout.size, field_layout.align.abi),
+ struct_type_and_layout.fields.offset(i),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, struct_type),
+ )
+}
+
+//=-----------------------------------------------------------------------------
+// Tuples
+//=-----------------------------------------------------------------------------
+
+/// Returns names of captured upvars for closures and generators.
+///
+/// Here are some examples:
+/// - `name__field1__field2` when the upvar is captured by value.
+/// - `_ref__name__field` when the upvar is captured by reference.
+///
+/// For generators this only contains upvars that are shared by all states.
+fn closure_saved_names_of_captured_variables(tcx: TyCtxt<'_>, def_id: DefId) -> SmallVec<String> {
+ let body = tcx.optimized_mir(def_id);
+
+ body.var_debug_info
+ .iter()
+ .filter_map(|var| {
+ let is_ref = match var.value {
+ mir::VarDebugInfoContents::Place(place) if place.local == mir::Local::new(1) => {
+ // The projection is either `[.., Field, Deref]` or `[.., Field]`. It
+ // implies whether the variable is captured by value or by reference.
+ matches!(place.projection.last().unwrap(), mir::ProjectionElem::Deref)
+ }
+ _ => return None,
+ };
+ let prefix = if is_ref { "_ref__" } else { "" };
+ Some(prefix.to_owned() + var.name.as_str())
+ })
+ .collect()
+}
+
+/// Builds the DW_TAG_member debuginfo nodes for the upvars of a closure or generator.
+/// For a generator, this will handle upvars shared by all states.
+fn build_upvar_field_di_nodes<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ closure_or_generator_ty: Ty<'tcx>,
+ closure_or_generator_di_node: &'ll DIType,
+) -> SmallVec<&'ll DIType> {
+ let (&def_id, up_var_tys) = match closure_or_generator_ty.kind() {
+ ty::Generator(def_id, substs, _) => {
+ let upvar_tys: SmallVec<_> = substs.as_generator().prefix_tys().collect();
+ (def_id, upvar_tys)
+ }
+ ty::Closure(def_id, substs) => {
+ let upvar_tys: SmallVec<_> = substs.as_closure().upvar_tys().collect();
+ (def_id, upvar_tys)
+ }
+ _ => {
+ bug!(
+ "build_upvar_field_di_nodes() called with non-closure-or-generator-type: {:?}",
+ closure_or_generator_ty
+ )
+ }
+ };
+
+ debug_assert!(
+ up_var_tys
+ .iter()
+ .all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
+ );
+
+ let capture_names = closure_saved_names_of_captured_variables(cx.tcx, def_id);
+ let layout = cx.layout_of(closure_or_generator_ty);
+
+ up_var_tys
+ .into_iter()
+ .zip(capture_names.iter())
+ .enumerate()
+ .map(|(index, (up_var_ty, capture_name))| {
+ build_field_di_node(
+ cx,
+ closure_or_generator_di_node,
+ capture_name,
+ cx.size_and_align_of(up_var_ty),
+ layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, up_var_ty),
+ )
+ })
+ .collect()
+}
+
+/// Builds the DW_TAG_structure_type debuginfo node for a Rust tuple type.
+fn build_tuple_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let tuple_type = unique_type_id.expect_ty();
+ let &ty::Tuple(component_types) = tuple_type.kind() else {
+ bug!("build_tuple_type_di_node() called with non-tuple-type: {:?}", tuple_type)
+ };
+
+ let tuple_type_and_layout = cx.layout_of(tuple_type);
+ let type_name = compute_debuginfo_type_name(cx.tcx, tuple_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ size_and_align_of(tuple_type_and_layout),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, tuple_di_node| {
+ component_types
+ .into_iter()
+ .enumerate()
+ .map(|(index, component_type)| {
+ build_field_di_node(
+ cx,
+ tuple_di_node,
+ &tuple_field_name(index),
+ cx.size_and_align_of(component_type),
+ tuple_type_and_layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, component_type),
+ )
+ })
+ .collect()
+ },
+ NO_GENERICS,
+ )
+}
+
+/// Builds the debuginfo node for a closure environment.
+fn build_closure_env_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let closure_env_type = unique_type_id.expect_ty();
+ let &ty::Closure(def_id, _substs) = closure_env_type.kind() else {
+ bug!("build_closure_env_di_node() called with non-closure-type: {:?}", closure_env_type)
+ };
+ let containing_scope = get_namespace_for_item(cx, def_id);
+ let type_name = compute_debuginfo_type_name(cx.tcx, closure_env_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &type_name,
+ cx.size_and_align_of(closure_env_type),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| build_upvar_field_di_nodes(cx, closure_env_type, owner),
+ NO_GENERICS,
+ )
+}
+
+/// Build the debuginfo node for a Rust `union` type.
+fn build_union_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let union_type = unique_type_id.expect_ty();
+ let (union_def_id, variant_def) = match union_type.kind() {
+ ty::Adt(def, _) => (def.did(), def.non_enum_variant()),
+ _ => bug!("build_union_type_di_node on a non-ADT"),
+ };
+ let containing_scope = get_namespace_for_item(cx, union_def_id);
+ let union_ty_and_layout = cx.layout_of(union_type);
+ let type_name = compute_debuginfo_type_name(cx.tcx, union_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Union,
+ unique_type_id,
+ &type_name,
+ size_and_align_of(union_ty_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ // Fields:
+ |cx, owner| {
+ variant_def
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field_layout = union_ty_and_layout.field(cx, i);
+ build_field_di_node(
+ cx,
+ owner,
+ f.name.as_str(),
+ size_and_align_of(field_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ // Generics:
+ |cx| build_generic_type_param_di_nodes(cx, union_type),
+ )
+}
+
+// FIXME(eddyb) maybe precompute this? Right now it's computed once
+// per generator monomorphization, but it doesn't depend on substs.
+fn generator_layout_and_saved_local_names<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> (&'tcx GeneratorLayout<'tcx>, IndexVec<mir::GeneratorSavedLocal, Option<Symbol>>) {
+ let body = tcx.optimized_mir(def_id);
+ let generator_layout = body.generator_layout().unwrap();
+ let mut generator_saved_local_names = IndexVec::from_elem(None, &generator_layout.field_tys);
+
+ let state_arg = mir::Local::new(1);
+ for var in &body.var_debug_info {
+ let mir::VarDebugInfoContents::Place(place) = &var.value else { continue };
+ if place.local != state_arg {
+ continue;
+ }
+ match place.projection[..] {
+ [
+ // Deref of the `Pin<&mut Self>` state argument.
+ mir::ProjectionElem::Field(..),
+ mir::ProjectionElem::Deref,
+ // Field of a variant of the state.
+ mir::ProjectionElem::Downcast(_, variant),
+ mir::ProjectionElem::Field(field, _),
+ ] => {
+ let name = &mut generator_saved_local_names
+ [generator_layout.variant_fields[variant][field]];
+ if name.is_none() {
+ name.replace(var.name);
+ }
+ }
+ _ => {}
+ }
+ }
+ (generator_layout, generator_saved_local_names)
+}
+
+/// Computes the type parameters for a type, if any, for the given metadata.
+fn build_generic_type_param_di_nodes<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+) -> SmallVec<&'ll DIType> {
+ if let ty::Adt(def, substs) = *ty.kind() {
+ if substs.types().next().is_some() {
+ let generics = cx.tcx.generics_of(def.did());
+ let names = get_parameter_names(cx, generics);
+ let template_params: SmallVec<_> = iter::zip(substs, names)
+ .filter_map(|(kind, name)| {
+ if let GenericArgKind::Type(ty) = kind.unpack() {
+ let actual_type =
+ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
+ let actual_type_di_node = type_di_node(cx, actual_type);
+ let name = name.as_str();
+ Some(unsafe {
+ llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+ DIB(cx),
+ None,
+ name.as_ptr().cast(),
+ name.len(),
+ actual_type_di_node,
+ )
+ })
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ return template_params;
+ }
+ }
+
+ return smallvec![];
+
+ fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
+ let mut names = generics
+ .parent
+ .map_or_else(Vec::new, |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
+ names.extend(generics.params.iter().map(|param| param.name));
+ names
+ }
+}
+
+/// Creates debug information for the given global variable.
+///
+/// Adds the created debuginfo nodes directly to the crate's IR.
+pub fn build_global_var_di_node<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId, global: &'ll Value) {
+ if cx.dbg_cx.is_none() {
+ return;
+ }
+
+ // Only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo != DebugInfo::Full {
+ return;
+ }
+
+ let tcx = cx.tcx;
+
+ // We may want to remove the namespace scope if we're in an extern block (see
+ // https://github.com/rust-lang/rust/pull/46457#issuecomment-351750952).
+ let var_scope = get_namespace_for_item(cx, def_id);
+ let span = tcx.def_span(def_id);
+
+ let (file_metadata, line_number) = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ (file_metadata(cx, &loc.file), loc.line)
+ } else {
+ (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER)
+ };
+
+ let is_local_to_unit = is_node_local_to_unit(cx, def_id);
+ let variable_type = Instance::mono(cx.tcx, def_id).ty(cx.tcx, ty::ParamEnv::reveal_all());
+ let type_di_node = type_di_node(cx, variable_type);
+ let var_name = tcx.item_name(def_id);
+ let var_name = var_name.as_str();
+ let linkage_name = mangled_name_of_instance(cx, Instance::mono(tcx, def_id)).name;
+ // When empty, linkage_name field is omitted,
+ // which is what we want for no_mangle statics
+ let linkage_name = if var_name == linkage_name { "" } else { linkage_name };
+
+ let global_align = cx.align_of(variable_type);
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStaticVariable(
+ DIB(cx),
+ Some(var_scope),
+ var_name.as_ptr().cast(),
+ var_name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ file_metadata,
+ line_number,
+ type_di_node,
+ is_local_to_unit,
+ global,
+ None,
+ global_align.bits() as u32,
+ );
+ }
+}
+
+/// Generates LLVM debuginfo for a vtable.
+///
+/// The vtable type looks like a struct with a field for each function pointer and super-trait
+/// pointer it contains (plus the `size` and `align` fields).
+///
+/// Except for `size`, `align`, and `drop_in_place`, the field names don't try to mirror
+/// the name of the method they implement. This can be implemented in the future once there
+/// is a proper disambiguation scheme for dealing with methods from different traits that have
+/// the same name.
+fn build_vtable_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+ poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> &'ll DIType {
+ let tcx = cx.tcx;
+
+ let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
+ let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
+ let trait_ref = tcx.erase_regions(trait_ref);
+
+ tcx.vtable_entries(trait_ref)
+ } else {
+ TyCtxt::COMMON_VTABLE_ENTRIES
+ };
+
+ // All function pointers are described as opaque pointers. This could be improved in the future
+ // by describing them as actual function pointers.
+ let void_pointer_ty = tcx.mk_imm_ptr(tcx.types.unit);
+ let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty);
+ let usize_di_node = type_di_node(cx, tcx.types.usize);
+ let (pointer_size, pointer_align) = cx.size_and_align_of(void_pointer_ty);
+ // If `usize` is not pointer-sized and -aligned then the size and alignment computations
+ // for the vtable as a whole would be wrong. Let's make sure this holds even on weird
+ // platforms.
+ assert_eq!(cx.size_and_align_of(tcx.types.usize), (pointer_size, pointer_align));
+
+ let vtable_type_name =
+ compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::Type);
+ let unique_type_id = UniqueTypeId::for_vtable_ty(tcx, ty, poly_trait_ref);
+ let size = pointer_size * vtable_entries.len() as u64;
+
+ // This gets mapped to a DW_AT_containing_type attribute which allows GDB to correlate
+ // the vtable to the type it is for.
+ let vtable_holder = type_di_node(cx, ty);
+
+ build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::VTableTy { vtable_holder },
+ unique_type_id,
+ &vtable_type_name,
+ (size, pointer_align),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagArtificial,
+ ),
+ |cx, vtable_type_di_node| {
+ vtable_entries
+ .iter()
+ .enumerate()
+ .filter_map(|(index, vtable_entry)| {
+ let (field_name, field_type_di_node) = match vtable_entry {
+ ty::VtblEntry::MetadataDropInPlace => {
+ ("drop_in_place".to_string(), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::Method(_) => {
+ // Note: This code does not try to give a proper name to each method
+ // because their might be multiple methods with the same name
+ // (coming from different traits).
+ (format!("__method{}", index), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::TraitVPtr(_) => {
+ (format!("__super_trait_ptr{}", index), void_pointer_type_di_node)
+ }
+ ty::VtblEntry::MetadataAlign => ("align".to_string(), usize_di_node),
+ ty::VtblEntry::MetadataSize => ("size".to_string(), usize_di_node),
+ ty::VtblEntry::Vacant => return None,
+ };
+
+ let field_offset = pointer_size * index as u64;
+
+ Some(build_field_di_node(
+ cx,
+ vtable_type_di_node,
+ &field_name,
+ (pointer_size, pointer_align),
+ field_offset,
+ DIFlags::FlagZero,
+ field_type_di_node,
+ ))
+ })
+ .collect()
+ },
+ NO_GENERICS,
+ )
+ .di_node
+}
+
+fn vcall_visibility_metadata<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+ trait_ref: Option<PolyExistentialTraitRef<'tcx>>,
+ vtable: &'ll Value,
+) {
+ enum VCallVisibility {
+ Public = 0,
+ LinkageUnit = 1,
+ TranslationUnit = 2,
+ }
+
+ let Some(trait_ref) = trait_ref else { return };
+
+ let trait_ref_self = trait_ref.with_self_ty(cx.tcx, ty);
+ let trait_ref_self = cx.tcx.erase_regions(trait_ref_self);
+ let trait_def_id = trait_ref_self.def_id();
+ let trait_vis = cx.tcx.visibility(trait_def_id);
+
+ let cgus = cx.sess().codegen_units();
+ let single_cgu = cgus == 1;
+
+ let lto = cx.sess().lto();
+
+ // Since LLVM requires full LTO for the virtual function elimination optimization to apply,
+ // only the `Lto::Fat` cases are relevant currently.
+ let vcall_visibility = match (lto, trait_vis, single_cgu) {
+ // If there is not LTO and the visibility in public, we have to assume that the vtable can
+ // be seen from anywhere. With multiple CGUs, the vtable is quasi-public.
+ (Lto::No | Lto::ThinLocal, Visibility::Public, _)
+ | (Lto::No, Visibility::Restricted(_) | Visibility::Invisible, false) => {
+ VCallVisibility::Public
+ }
+ // With LTO and a quasi-public visibility, the usages of the functions of the vtable are
+ // all known by the `LinkageUnit`.
+ // FIXME: LLVM only supports this optimization for `Lto::Fat` currently. Once it also
+ // supports `Lto::Thin` the `VCallVisibility` may have to be adjusted for those.
+ (Lto::Fat | Lto::Thin, Visibility::Public, _)
+ | (
+ Lto::ThinLocal | Lto::Thin | Lto::Fat,
+ Visibility::Restricted(_) | Visibility::Invisible,
+ false,
+ ) => VCallVisibility::LinkageUnit,
+ // If there is only one CGU, private vtables can only be seen by that CGU/translation unit
+ // and therefore we know of all usages of functions in the vtable.
+ (_, Visibility::Restricted(_) | Visibility::Invisible, true) => {
+ VCallVisibility::TranslationUnit
+ }
+ };
+
+ let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref);
+
+ unsafe {
+ let typeid = llvm::LLVMMDStringInContext(
+ cx.llcx,
+ trait_ref_typeid.as_ptr() as *const c_char,
+ trait_ref_typeid.as_bytes().len() as c_uint,
+ );
+ let v = [cx.const_usize(0), typeid];
+ llvm::LLVMRustGlobalAddMetadata(
+ vtable,
+ llvm::MD_type as c_uint,
+ llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+ cx.llcx,
+ v.as_ptr(),
+ v.len() as c_uint,
+ )),
+ );
+ let vcall_visibility = llvm::LLVMValueAsMetadata(cx.const_u64(vcall_visibility as u64));
+ let vcall_visibility_metadata = llvm::LLVMMDNodeInContext2(cx.llcx, &vcall_visibility, 1);
+ llvm::LLVMGlobalSetMetadata(
+ vtable,
+ llvm::MetadataType::MD_vcall_visibility as c_uint,
+ vcall_visibility_metadata,
+ );
+ }
+}
+
+/// Creates debug information for the given vtable, which is for the
+/// given type.
+///
+/// Adds the created metadata nodes directly to the crate's IR.
+pub fn create_vtable_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ ty: Ty<'tcx>,
+ poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ vtable: &'ll Value,
+) {
+ // FIXME(flip1995): The virtual function elimination optimization only works with full LTO in
+ // LLVM at the moment.
+ if cx.sess().opts.unstable_opts.virtual_function_elimination && cx.sess().lto() == Lto::Fat {
+ vcall_visibility_metadata(cx, ty, poly_trait_ref, vtable);
+ }
+
+ if cx.dbg_cx.is_none() {
+ return;
+ }
+
+ // Only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo != DebugInfo::Full {
+ return;
+ }
+
+ let vtable_name =
+ compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::GlobalVariable);
+ let vtable_type_di_node = build_vtable_type_di_node(cx, ty, poly_trait_ref);
+ let linkage_name = "";
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStaticVariable(
+ DIB(cx),
+ NO_SCOPE_METADATA,
+ vtable_name.as_ptr().cast(),
+ vtable_name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ vtable_type_di_node,
+ true,
+ vtable,
+ None,
+ 0,
+ );
+ }
+}
+
+/// Creates an "extension" of an existing `DIScope` into another file.
+pub fn extend_scope_to_file<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ scope_metadata: &'ll DIScope,
+ file: &SourceFile,
+) -> &'ll DILexicalBlock {
+ let file_metadata = file_metadata(cx, file);
+ unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile(DIB(cx), scope_metadata, file_metadata) }
+}
+
+pub fn tuple_field_name(field_index: usize) -> Cow<'static, str> {
+ const TUPLE_FIELD_NAMES: [&'static str; 16] = [
+ "__0", "__1", "__2", "__3", "__4", "__5", "__6", "__7", "__8", "__9", "__10", "__11",
+ "__12", "__13", "__14", "__15",
+ ];
+ TUPLE_FIELD_NAMES
+ .get(field_index)
+ .map(|s| Cow::from(*s))
+ .unwrap_or_else(|| Cow::from(format!("__{}", field_index)))
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
new file mode 100644
index 000000000..d6e2c8ccd
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -0,0 +1,514 @@
+use std::borrow::Cow;
+
+use libc::c_uint;
+use rustc_codegen_ssa::debuginfo::{
+ type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo,
+};
+use rustc_middle::{
+ bug,
+ ty::{
+ self,
+ layout::{LayoutOf, TyAndLayout},
+ util::Discr,
+ AdtDef, GeneratorSubsts,
+ },
+};
+use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
+use smallvec::smallvec;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ build_field_di_node, closure_saved_names_of_captured_variables,
+ enums::tag_base_type,
+ file_metadata, generator_layout_and_saved_local_names, size_and_align_of,
+ type_map::{self, UniqueTypeId},
+ unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA,
+ UNKNOWN_LINE_NUMBER,
+ },
+ utils::DIB,
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFile, DIFlags, DIType},
+ },
+};
+
+/// In CPP-like mode, we generate a union of structs for each variant and an
+/// explicit discriminant field roughly equivalent to the following C/C++ code:
+///
+/// ```c
+/// union enum$<{fully-qualified-name}> {
+/// struct {variant 0 name} {
+/// <variant 0 fields>
+/// } variant0;
+/// <other variant structs>
+/// {name} discriminant;
+/// }
+/// ```
+///
+/// As you can see, the type name is wrapped `enum$`. This way we can have a
+/// single NatVis rule for handling all enums.
+///
+/// At the LLVM IR level this looks like
+///
+/// ```txt
+/// DW_TAG_union_type (top-level type for enum)
+/// DW_TAG_member (member for variant 1)
+/// DW_TAG_member (member for variant 2)
+/// DW_TAG_member (member for variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// DW_TAG_enumeration_type (type of tag)
+/// ```
+///
+/// The above encoding applies for enums with a direct tag. For niche-tag we have to do things
+/// differently in order to allow a NatVis visualizer to extract all the information needed:
+/// We generate a union of two fields, one for the dataful variant
+/// and one that just points to the discriminant (which is some field within the dataful variant).
+/// We also create a DW_TAG_enumeration_type DIE that contains tag values for the non-dataful
+/// variants and make the discriminant field that type. We then use NatVis to render the enum type
+/// correctly in Windbg/VS. This will generate debuginfo roughly equivalent to the following C:
+///
+/// ```c
+/// union enum$<{name}, {min niche}, {max niche}, {dataful variant name}> {
+/// struct <dataful variant name> {
+/// <fields in dataful variant>
+/// } dataful_variant;
+/// enum Discriminant$ {
+/// <non-dataful variants>
+/// } discriminant;
+/// }
+/// ```
+///
+/// The NatVis in `intrinsic.natvis` matches on the type name `enum$<*, *, *, *>`
+/// and evaluates `this.discriminant`. If the value is between the min niche and max
+/// niche, then the enum is in the dataful variant and `this.dataful_variant` is
+/// rendered. Otherwise, the enum is in one of the non-dataful variants. In that
+/// case, we just need to render the name of the `this.discriminant` enum.
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let enum_type_and_layout = cx.layout_of(enum_type);
+ let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ type_map::Stub::Union,
+ unique_type_id,
+ &enum_type_name,
+ cx.size_and_align_of(enum_type),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, enum_type_di_node| {
+ match enum_type_and_layout.variants {
+ Variants::Single { index: variant_index } => {
+ if enum_adt_def.variants().is_empty() {
+ // Uninhabited enums have Variants::Single. We don't generate
+ // any members for them.
+ return smallvec![];
+ }
+
+ build_single_variant_union_fields(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ variant_index,
+ )
+ }
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Direct,
+ ref variants,
+ tag_field,
+ ..
+ } => build_union_fields_for_direct_tag_enum(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &mut variants.indices(),
+ tag_field,
+ ),
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ ref variants,
+ tag_field,
+ ..
+ } => build_union_fields_for_niche_tag_enum(
+ cx,
+ enum_adt_def,
+ enum_type_and_layout,
+ enum_type_di_node,
+ dataful_variant,
+ &mut variants.indices(),
+ tag_field,
+ ),
+ }
+ },
+ NO_GENERICS,
+ )
+}
+
+/// A generator debuginfo node looks the same as a that of an enum type.
+///
+/// See [build_enum_type_di_node] for more information.
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let generator_type = unique_type_id.expect_ty();
+ let generator_type_and_layout = cx.layout_of(generator_type);
+ let generator_type_name = compute_debuginfo_type_name(cx.tcx, generator_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(generator_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ type_map::Stub::Union,
+ unique_type_id,
+ &generator_type_name,
+ size_and_align_of(generator_type_and_layout),
+ NO_SCOPE_METADATA,
+ DIFlags::FlagZero,
+ ),
+ |cx, generator_type_di_node| match generator_type_and_layout.variants {
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => {
+ build_union_fields_for_direct_tag_generator(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ )
+ }
+ Variants::Single { .. }
+ | Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, .. } => {
+ bug!(
+ "Encountered generator with non-direct-tag layout: {:?}",
+ generator_type_and_layout
+ )
+ }
+ },
+ NO_GENERICS,
+ )
+}
+
+fn build_single_variant_union_fields<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_index: VariantIdx,
+) -> SmallVec<&'ll DIType> {
+ let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+ let variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ variant_layout,
+ );
+
+ // NOTE: The field name of the union is the same as the variant name, not "variant0".
+ let variant_name = enum_adt_def.variant(variant_index).name.as_str();
+
+ smallvec![build_field_di_node(
+ cx,
+ enum_type_di_node,
+ variant_name,
+ // NOTE: We use the size and align of the entire type, not from variant_layout
+ // since the later is sometimes smaller (if it has fewer fields).
+ size_and_align_of(enum_type_and_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ variant_struct_type_di_node,
+ )]
+}
+
+fn build_union_fields_for_direct_tag_enum<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_indices: &mut dyn Iterator<Item = VariantIdx>,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_indices
+ .map(|variant_index| {
+ let variant_layout = enum_type_and_layout.for_variant(cx, variant_index);
+
+ VariantFieldInfo {
+ variant_index,
+ variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ variant_layout,
+ ),
+ source_info: None,
+ }
+ })
+ .collect();
+
+ let discr_type_name = cx.tcx.item_name(enum_adt_def.did());
+ let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ discr_type_name.as_str(),
+ tag_base_type,
+ &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
+ (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str()))
+ }),
+ enum_type_di_node,
+ );
+
+ build_union_fields_for_direct_tag_enum_or_generator(
+ cx,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &variant_field_infos,
+ discr_type_di_node,
+ tag_field,
+ )
+}
+
+fn build_union_fields_for_niche_tag_enum<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ dataful_variant_index: VariantIdx,
+ variant_indices: &mut dyn Iterator<Item = VariantIdx>,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let dataful_variant_struct_type_di_node = super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type_and_layout.ty,
+ enum_type_di_node,
+ dataful_variant_index,
+ &enum_adt_def.variant(dataful_variant_index),
+ enum_type_and_layout.for_variant(cx, dataful_variant_index),
+ );
+
+ let tag_base_type = super::tag_base_type(cx, enum_type_and_layout);
+ // Create an DW_TAG_enumerator for each variant except the dataful one.
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ "Discriminant$",
+ tag_base_type,
+ &mut variant_indices.filter_map(|variant_index| {
+ if let Some(discr_val) =
+ super::compute_discriminant_value(cx, enum_type_and_layout, variant_index)
+ {
+ let discr = Discr { val: discr_val as u128, ty: tag_base_type };
+ let variant_name = Cow::from(enum_adt_def.variant(variant_index).name.as_str());
+ Some((discr, variant_name))
+ } else {
+ debug_assert_eq!(variant_index, dataful_variant_index);
+ None
+ }
+ }),
+ enum_type_di_node,
+ );
+
+ smallvec![
+ build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "dataful_variant",
+ size_and_align_of(enum_type_and_layout),
+ Size::ZERO,
+ DIFlags::FlagZero,
+ dataful_variant_struct_type_di_node,
+ ),
+ build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "discriminant",
+ cx.size_and_align_of(tag_base_type),
+ enum_type_and_layout.fields.offset(tag_field),
+ DIFlags::FlagZero,
+ discr_type_di_node,
+ ),
+ ]
+}
+
+fn build_union_fields_for_direct_tag_generator<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ generator_type_and_layout: TyAndLayout<'tcx>,
+ generator_type_di_node: &'ll DIType,
+) -> SmallVec<&'ll DIType> {
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } = generator_type_and_layout.variants else {
+ bug!("This function only supports layouts with directly encoded tags.")
+ };
+
+ let (generator_def_id, generator_substs) = match generator_type_and_layout.ty.kind() {
+ &ty::Generator(def_id, substs, _) => (def_id, substs.as_generator()),
+ _ => unreachable!(),
+ };
+
+ let (generator_layout, state_specific_upvar_names) =
+ generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+
+ let common_upvar_names = closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+ let variant_range = generator_substs.variant_range(generator_def_id, cx.tcx);
+
+ // Build the type node for each field.
+ let variant_field_infos: SmallVec<VariantFieldInfo<'ll>> = variant_range
+ .map(|variant_index| {
+ let variant_struct_type_di_node = super::build_generator_variant_struct_type_di_node(
+ cx,
+ variant_index,
+ generator_type_and_layout,
+ generator_type_di_node,
+ generator_layout,
+ &state_specific_upvar_names,
+ &common_upvar_names,
+ );
+
+ let span = generator_layout.variant_source_info[variant_index].span;
+ let source_info = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ Some((file_metadata(cx, &loc.file), loc.line as c_uint))
+ } else {
+ None
+ };
+
+ VariantFieldInfo { variant_index, variant_struct_type_di_node, source_info }
+ })
+ .collect();
+
+ let tag_base_type = tag_base_type(cx, generator_type_and_layout);
+ let discr_type_name = "Discriminant$";
+ let discr_type_di_node = super::build_enumeration_type_di_node(
+ cx,
+ discr_type_name,
+ tag_base_type,
+ &mut generator_substs
+ .discriminants(generator_def_id, cx.tcx)
+ .map(|(variant_index, discr)| (discr, GeneratorSubsts::variant_name(variant_index))),
+ generator_type_di_node,
+ );
+
+ build_union_fields_for_direct_tag_enum_or_generator(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ &variant_field_infos[..],
+ discr_type_di_node,
+ tag_field,
+ )
+}
+
+/// This is a helper function shared between enums and generators that makes sure fields have the
+/// expect names.
+fn build_union_fields_for_direct_tag_enum_or_generator<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_field_infos: &[VariantFieldInfo<'ll>],
+ discr_type_di_node: &'ll DIType,
+ tag_field: usize,
+) -> SmallVec<&'ll DIType> {
+ let mut unions_fields = SmallVec::with_capacity(variant_field_infos.len() + 1);
+
+ // We create a field in the union for each variant ...
+ unions_fields.extend(variant_field_infos.into_iter().map(|variant_member_info| {
+ let (file_di_node, line_number) = variant_member_info
+ .source_info
+ .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+
+ let field_name = variant_union_field_name(variant_member_info.variant_index);
+ let (size, align) = size_and_align_of(enum_type_and_layout);
+
+ // We use LLVMRustDIBuilderCreateMemberType() member type directly because
+ // the build_field_di_node() function does not support specifying a source location,
+ // which is something that we don't do anywhere else.
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ enum_type_di_node,
+ field_name.as_ptr().cast(),
+ field_name.len(),
+ file_di_node,
+ line_number,
+ // NOTE: We use the size and align of the entire type, not from variant_layout
+ // since the later is sometimes smaller (if it has fewer fields).
+ size.bits(),
+ align.bits() as u32,
+ // Union fields are always at offset zero
+ Size::ZERO.bits(),
+ DIFlags::FlagZero,
+ variant_member_info.variant_struct_type_di_node,
+ )
+ }
+ }));
+
+ debug_assert_eq!(
+ cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+ cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout))
+ );
+
+ // ... and a field for the discriminant.
+ unions_fields.push(build_field_di_node(
+ cx,
+ enum_type_di_node,
+ "discriminant",
+ cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+ enum_type_and_layout.fields.offset(tag_field),
+ DIFlags::FlagZero,
+ discr_type_di_node,
+ ));
+
+ unions_fields
+}
+
+/// Information about a single field of the top-level DW_TAG_union_type.
+struct VariantFieldInfo<'ll> {
+ variant_index: VariantIdx,
+ variant_struct_type_di_node: &'ll DIType,
+ source_info: Option<(&'ll DIFile, c_uint)>,
+}
+
+fn variant_union_field_name(variant_index: VariantIdx) -> Cow<'static, str> {
+ const PRE_ALLOCATED: [&str; 16] = [
+ "variant0",
+ "variant1",
+ "variant2",
+ "variant3",
+ "variant4",
+ "variant5",
+ "variant6",
+ "variant7",
+ "variant8",
+ "variant9",
+ "variant10",
+ "variant11",
+ "variant12",
+ "variant13",
+ "variant14",
+ "variant15",
+ ];
+
+ PRE_ALLOCATED
+ .get(variant_index.as_usize())
+ .map(|&s| Cow::from(s))
+ .unwrap_or_else(|| format!("variant{}", variant_index.as_usize()).into())
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
new file mode 100644
index 000000000..73e01d045
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs
@@ -0,0 +1,437 @@
+use rustc_codegen_ssa::debuginfo::{
+ type_names::{compute_debuginfo_type_name, cpp_like_debuginfo},
+ wants_c_like_enum_debuginfo,
+};
+use rustc_hir::def::CtorKind;
+use rustc_index::vec::IndexVec;
+use rustc_middle::{
+ bug,
+ mir::{Field, GeneratorLayout, GeneratorSavedLocal},
+ ty::{
+ self,
+ layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout},
+ util::Discr,
+ AdtDef, GeneratorSubsts, Ty, VariantDef,
+ },
+};
+use rustc_span::Symbol;
+use rustc_target::abi::{HasDataLayout, Integer, Primitive, TagEncoding, VariantIdx, Variants};
+use std::borrow::Cow;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ build_field_di_node, build_generic_type_param_di_nodes, type_di_node,
+ type_map::{self, Stub},
+ unknown_file_metadata, UNKNOWN_LINE_NUMBER,
+ },
+ utils::{create_DIArray, get_namespace_for_item, DIB},
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFlags, DIType},
+ },
+};
+
+use super::{
+ size_and_align_of,
+ type_map::{DINodeCreationResult, UniqueTypeId},
+ SmallVec,
+};
+
+mod cpp_like;
+mod native;
+
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let enum_type_and_layout = cx.layout_of(enum_type);
+
+ if wants_c_like_enum_debuginfo(enum_type_and_layout) {
+ return build_c_style_enum_di_node(cx, enum_adt_def, enum_type_and_layout);
+ }
+
+ if cpp_like_debuginfo(cx.tcx) {
+ cpp_like::build_enum_type_di_node(cx, unique_type_id)
+ } else {
+ native::build_enum_type_di_node(cx, unique_type_id)
+ }
+}
+
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ if cpp_like_debuginfo(cx.tcx) {
+ cpp_like::build_generator_di_node(cx, unique_type_id)
+ } else {
+ native::build_generator_di_node(cx, unique_type_id)
+ }
+}
+
+/// Build the debuginfo node for a C-style enum, i.e. an enum the variants of which have no fields.
+///
+/// The resulting debuginfo will be a DW_TAG_enumeration_type.
+fn build_c_style_enum_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_adt_def: AdtDef<'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
+ DINodeCreationResult {
+ di_node: build_enumeration_type_di_node(
+ cx,
+ &compute_debuginfo_type_name(cx.tcx, enum_type_and_layout.ty, false),
+ tag_base_type(cx, enum_type_and_layout),
+ &mut enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| {
+ (discr, Cow::from(enum_adt_def.variant(variant_index).name.as_str()))
+ }),
+ containing_scope,
+ ),
+ already_stored_in_typemap: false,
+ }
+}
+
+/// Extract the type with which we want to describe the tag of the given enum or generator.
+fn tag_base_type<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+) -> Ty<'tcx> {
+ debug_assert!(match enum_type_and_layout.ty.kind() {
+ ty::Generator(..) => true,
+ ty::Adt(adt_def, _) => adt_def.is_enum(),
+ _ => false,
+ });
+
+ match enum_type_and_layout.layout.variants() {
+ // A single-variant enum has no discriminant.
+ Variants::Single { .. } => {
+ bug!("tag_base_type() called for enum without tag: {:?}", enum_type_and_layout)
+ }
+
+ Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => {
+ // Niche tags are always normalized to unsized integers of the correct size.
+ match tag.primitive() {
+ Primitive::Int(t, _) => t,
+ Primitive::F32 => Integer::I32,
+ Primitive::F64 => Integer::I64,
+ Primitive::Pointer => {
+ // If the niche is the NULL value of a reference, then `discr_enum_ty` will be
+ // a RawPtr. CodeView doesn't know what to do with enums whose base type is a
+ // pointer so we fix this up to just be `usize`.
+ // DWARF might be able to deal with this but with an integer type we are on
+ // the safe side there too.
+ cx.data_layout().ptr_sized_integer()
+ }
+ }
+ .to_ty(cx.tcx, false)
+ }
+
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
+ // Direct tags preserve the sign.
+ tag.primitive().to_ty(cx.tcx)
+ }
+ }
+}
+
+/// Build a DW_TAG_enumeration_type debuginfo node, with the given base type and variants.
+/// This is a helper function and does not register anything in the type map by itself.
+///
+/// `variants` is an iterator of (discr-value, variant-name).
+///
+// NOTE: Handling of discriminant values is somewhat inconsistent. They can appear as u128,
+// u64, and i64. Here everything gets mapped to i64 because that's what LLVM's API expects.
+fn build_enumeration_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ type_name: &str,
+ base_type: Ty<'tcx>,
+ variants: &mut dyn Iterator<Item = (Discr<'tcx>, Cow<'tcx, str>)>,
+ containing_scope: &'ll DIType,
+) -> &'ll DIType {
+ let is_unsigned = match base_type.kind() {
+ ty::Int(_) => false,
+ ty::Uint(_) => true,
+ _ => bug!("build_enumeration_type_di_node() called with non-integer tag type."),
+ };
+
+ let enumerator_di_nodes: SmallVec<Option<&'ll DIType>> = variants
+ .map(|(discr, variant_name)| {
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateEnumerator(
+ DIB(cx),
+ variant_name.as_ptr().cast(),
+ variant_name.len(),
+ // FIXME: what if enumeration has i128 discriminant?
+ discr.val as i64,
+ is_unsigned,
+ ))
+ }
+ })
+ .collect();
+
+ let (size, align) = cx.size_and_align_of(base_type);
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateEnumerationType(
+ DIB(cx),
+ containing_scope,
+ type_name.as_ptr().cast(),
+ type_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ create_DIArray(DIB(cx), &enumerator_di_nodes[..]),
+ type_di_node(cx, base_type),
+ true,
+ )
+ }
+}
+
+/// Build the debuginfo node for the struct type describing a single variant of an enum.
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+/// ```
+///
+/// In CPP-like mode, we have the exact same descriptions for each variant too:
+///
+/// ```txt
+/// DW_TAG_union_type (top-level type for enum)
+/// DW_TAG_member (member for variant 1)
+/// DW_TAG_member (member for variant 2)
+/// DW_TAG_member (member for variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+/// DW_TAG_enumeration_type (type of tag)
+/// ```
+///
+/// The node looks like:
+///
+/// ```txt
+/// DW_TAG_structure_type
+/// DW_AT_name <name-of-variant>
+/// DW_AT_byte_size 0x00000010
+/// DW_AT_alignment 0x00000008
+/// DW_TAG_member
+/// DW_AT_name <name-of-field-0>
+/// DW_AT_type <0x0000018e>
+/// DW_AT_alignment 0x00000004
+/// DW_AT_data_member_location 4
+/// DW_TAG_member
+/// DW_AT_name <name-of-field-1>
+/// DW_AT_type <0x00000195>
+/// DW_AT_alignment 0x00000008
+/// DW_AT_data_member_location 8
+/// ...
+/// ```
+///
+/// The type of a variant is always a struct type with the name of the variant
+/// and a DW_TAG_member for each field (but not the discriminant).
+fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type: Ty<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_index: VariantIdx,
+ variant_def: &VariantDef,
+ variant_layout: TyAndLayout<'tcx>,
+) -> &'ll DIType {
+ debug_assert_eq!(variant_layout.ty, enum_type);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ UniqueTypeId::for_enum_variant_struct_type(cx.tcx, enum_type, variant_index),
+ variant_def.name.as_str(),
+ // NOTE: We use size and align of enum_type, not from variant_layout:
+ cx.size_and_align_of(enum_type),
+ Some(enum_type_di_node),
+ DIFlags::FlagZero,
+ ),
+ |cx, struct_type_di_node| {
+ (0..variant_layout.fields.count())
+ .map(|field_index| {
+ let field_name = if variant_def.ctor_kind != CtorKind::Fn {
+ // Fields have names
+ Cow::from(variant_def.fields[field_index].name.as_str())
+ } else {
+ // Tuple-like
+ super::tuple_field_name(field_index)
+ };
+
+ let field_layout = variant_layout.field(cx, field_index);
+
+ build_field_di_node(
+ cx,
+ struct_type_di_node,
+ &field_name,
+ (field_layout.size, field_layout.align.abi),
+ variant_layout.fields.offset(field_index),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_layout.ty),
+ )
+ })
+ .collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, enum_type),
+ )
+ .di_node
+}
+
+/// Build the struct type for describing a single generator state.
+/// See [build_generator_variant_struct_type_di_node].
+///
+/// ```txt
+///
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// ---> DW_TAG_structure_type (type of variant 1)
+/// ---> DW_TAG_structure_type (type of variant 2)
+/// ---> DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ variant_index: VariantIdx,
+ generator_type_and_layout: TyAndLayout<'tcx>,
+ generator_type_di_node: &'ll DIType,
+ generator_layout: &GeneratorLayout<'tcx>,
+ state_specific_upvar_names: &IndexVec<GeneratorSavedLocal, Option<Symbol>>,
+ common_upvar_names: &[String],
+) -> &'ll DIType {
+ let variant_name = GeneratorSubsts::variant_name(variant_index);
+ let unique_type_id = UniqueTypeId::for_enum_variant_struct_type(
+ cx.tcx,
+ generator_type_and_layout.ty,
+ variant_index,
+ );
+
+ let variant_layout = generator_type_and_layout.for_variant(cx, variant_index);
+
+ let generator_substs = match generator_type_and_layout.ty.kind() {
+ ty::Generator(_, substs, _) => substs.as_generator(),
+ _ => unreachable!(),
+ };
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &variant_name,
+ size_and_align_of(generator_type_and_layout),
+ Some(generator_type_di_node),
+ DIFlags::FlagZero,
+ ),
+ |cx, variant_struct_type_di_node| {
+ // Fields that just belong to this variant/state
+ let state_specific_fields: SmallVec<_> = (0..variant_layout.fields.count())
+ .map(|field_index| {
+ let generator_saved_local = generator_layout.variant_fields[variant_index]
+ [Field::from_usize(field_index)];
+ let field_name_maybe = state_specific_upvar_names[generator_saved_local];
+ let field_name = field_name_maybe
+ .as_ref()
+ .map(|s| Cow::from(s.as_str()))
+ .unwrap_or_else(|| super::tuple_field_name(field_index));
+
+ let field_type = variant_layout.field(cx, field_index).ty;
+
+ build_field_di_node(
+ cx,
+ variant_struct_type_di_node,
+ &field_name,
+ cx.size_and_align_of(field_type),
+ variant_layout.fields.offset(field_index),
+ DIFlags::FlagZero,
+ type_di_node(cx, field_type),
+ )
+ })
+ .collect();
+
+ // Fields that are common to all states
+ let common_fields: SmallVec<_> = generator_substs
+ .prefix_tys()
+ .enumerate()
+ .map(|(index, upvar_ty)| {
+ build_field_di_node(
+ cx,
+ variant_struct_type_di_node,
+ &common_upvar_names[index],
+ cx.size_and_align_of(upvar_ty),
+ generator_type_and_layout.fields.offset(index),
+ DIFlags::FlagZero,
+ type_di_node(cx, upvar_ty),
+ )
+ })
+ .collect();
+
+ state_specific_fields.into_iter().chain(common_fields.into_iter()).collect()
+ },
+ |cx| build_generic_type_param_di_nodes(cx, generator_type_and_layout.ty),
+ )
+ .di_node
+}
+
+/// Returns the discriminant value corresponding to the variant index.
+///
+/// Will return `None` if there is less than two variants (because then the enum won't have)
+/// a tag, and if this is the dataful variant of a niche-layout enum (because then there is no
+/// single discriminant value).
+fn compute_discriminant_value<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ variant_index: VariantIdx,
+) -> Option<u64> {
+ match enum_type_and_layout.layout.variants() {
+ &Variants::Single { .. } => None,
+ &Variants::Multiple { tag_encoding: TagEncoding::Direct, .. } => Some(
+ enum_type_and_layout.ty.discriminant_for_variant(cx.tcx, variant_index).unwrap().val
+ as u64,
+ ),
+ &Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
+ tag,
+ ..
+ } => {
+ if variant_index == dataful_variant {
+ None
+ } else {
+ let value = (variant_index.as_u32() as u128)
+ .wrapping_sub(niche_variants.start().as_u32() as u128)
+ .wrapping_add(niche_start);
+ let value = tag.size(cx).truncate(value);
+ // NOTE(eddyb) do *NOT* remove this assert, until
+ // we pass the full 128-bit value to LLVM, otherwise
+ // truncation will be silent and remain undetected.
+ assert_eq!(value as u64 as u128, value);
+ Some(value as u64)
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
new file mode 100644
index 000000000..f1935e0ec
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -0,0 +1,441 @@
+use std::borrow::Cow;
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::{
+ metadata::{
+ closure_saved_names_of_captured_variables,
+ enums::tag_base_type,
+ file_metadata, generator_layout_and_saved_local_names, size_and_align_of, type_di_node,
+ type_map::{self, Stub, StubInfo, UniqueTypeId},
+ unknown_file_metadata, DINodeCreationResult, SmallVec, NO_GENERICS,
+ UNKNOWN_LINE_NUMBER,
+ },
+ utils::{create_DIArray, get_namespace_for_item, DIB},
+ },
+ llvm::{
+ self,
+ debuginfo::{DIFile, DIFlags, DIType},
+ },
+};
+use libc::c_uint;
+use rustc_codegen_ssa::{
+ debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo},
+ traits::ConstMethods,
+};
+use rustc_middle::{
+ bug,
+ ty::{
+ self,
+ layout::{LayoutOf, TyAndLayout},
+ },
+};
+use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants};
+use smallvec::smallvec;
+
+/// Build the debuginfo node for an enum type. The listing below shows how such a
+/// type looks like at the LLVM IR/DWARF level. It is a `DW_TAG_structure_type`
+/// with a single `DW_TAG_variant_part` that in turn contains a `DW_TAG_variant`
+/// for each variant of the enum. The variant-part also contains a single member
+/// describing the discriminant, and a nested struct type for each of the variants.
+///
+/// ```txt
+/// ---> DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let enum_type = unique_type_id.expect_ty();
+ let &ty::Adt(enum_adt_def, _) = enum_type.kind() else {
+ bug!("build_enum_type_di_node() called with non-enum type: `{:?}`", enum_type)
+ };
+
+ let containing_scope = get_namespace_for_item(cx, enum_adt_def.did());
+ let enum_type_and_layout = cx.layout_of(enum_type);
+ let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &enum_type_name,
+ size_and_align_of(enum_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ |cx, enum_type_di_node| {
+ // Build the struct type for each variant. These will be referenced by the
+ // DW_TAG_variant DIEs inside of the DW_TAG_variant_part DIE.
+ // We also called the names for the corresponding DW_TAG_variant DIEs here.
+ let variant_member_infos: SmallVec<_> = enum_adt_def
+ .variant_range()
+ .map(|variant_index| VariantMemberInfo {
+ variant_index,
+ variant_name: Cow::from(enum_adt_def.variant(variant_index).name.as_str()),
+ variant_struct_type_di_node: super::build_enum_variant_struct_type_di_node(
+ cx,
+ enum_type,
+ enum_type_di_node,
+ variant_index,
+ enum_adt_def.variant(variant_index),
+ enum_type_and_layout.for_variant(cx, variant_index),
+ ),
+ source_info: None,
+ })
+ .collect();
+
+ smallvec![build_enum_variant_part_di_node(
+ cx,
+ enum_type_and_layout,
+ enum_type_di_node,
+ &variant_member_infos[..],
+ )]
+ },
+ // We don't seem to be emitting generic args on the enum type, it seems. Rather
+ // they get attached to the struct type of each variant.
+ NO_GENERICS,
+ )
+}
+
+/// Build the debuginfo node for a generator environment. It looks the same as the debuginfo for
+/// an enum. See [build_enum_type_di_node] for more information.
+///
+/// ```txt
+///
+/// ---> DW_TAG_structure_type (top-level type for the generator)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+pub(super) fn build_generator_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+) -> DINodeCreationResult<'ll> {
+ let generator_type = unique_type_id.expect_ty();
+ let &ty::Generator(generator_def_id, _, _ ) = generator_type.kind() else {
+ bug!("build_generator_di_node() called with non-generator type: `{:?}`", generator_type)
+ };
+
+ let containing_scope = get_namespace_for_item(cx, generator_def_id);
+ let generator_type_and_layout = cx.layout_of(generator_type);
+
+ debug_assert!(!wants_c_like_enum_debuginfo(generator_type_and_layout));
+
+ let generator_type_name = compute_debuginfo_type_name(cx.tcx, generator_type, false);
+
+ type_map::build_type_with_children(
+ cx,
+ type_map::stub(
+ cx,
+ Stub::Struct,
+ unique_type_id,
+ &generator_type_name,
+ size_and_align_of(generator_type_and_layout),
+ Some(containing_scope),
+ DIFlags::FlagZero,
+ ),
+ |cx, generator_type_di_node| {
+ let (generator_layout, state_specific_upvar_names) =
+ generator_layout_and_saved_local_names(cx.tcx, generator_def_id);
+
+ let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = generator_type_and_layout.variants else {
+ bug!(
+ "Encountered generator with non-direct-tag layout: {:?}",
+ generator_type_and_layout
+ )
+ };
+
+ let common_upvar_names =
+ closure_saved_names_of_captured_variables(cx.tcx, generator_def_id);
+
+ // Build variant struct types
+ let variant_struct_type_di_nodes: SmallVec<_> = variants
+ .indices()
+ .map(|variant_index| {
+ // FIXME: This is problematic because just a number is not a valid identifier.
+ // GeneratorSubsts::variant_name(variant_index), would be consistent
+ // with enums?
+ let variant_name = format!("{}", variant_index.as_usize()).into();
+
+ let span = generator_layout.variant_source_info[variant_index].span;
+ let source_info = if !span.is_dummy() {
+ let loc = cx.lookup_debug_loc(span.lo());
+ Some((file_metadata(cx, &loc.file), loc.line))
+ } else {
+ None
+ };
+
+ VariantMemberInfo {
+ variant_index,
+ variant_name,
+ variant_struct_type_di_node:
+ super::build_generator_variant_struct_type_di_node(
+ cx,
+ variant_index,
+ generator_type_and_layout,
+ generator_type_di_node,
+ generator_layout,
+ &state_specific_upvar_names,
+ &common_upvar_names,
+ ),
+ source_info,
+ }
+ })
+ .collect();
+
+ smallvec![build_enum_variant_part_di_node(
+ cx,
+ generator_type_and_layout,
+ generator_type_di_node,
+ &variant_struct_type_di_nodes[..],
+ )]
+ },
+ // We don't seem to be emitting generic args on the generator type, it seems. Rather
+ // they get attached to the struct type of each variant.
+ NO_GENERICS,
+ )
+}
+
+/// Builds the DW_TAG_variant_part of an enum or generator debuginfo node:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// ---> DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+fn build_enum_variant_part_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ enum_type_di_node: &'ll DIType,
+ variant_member_infos: &[VariantMemberInfo<'_, 'll>],
+) -> &'ll DIType {
+ let tag_member_di_node =
+ build_discr_member_di_node(cx, enum_type_and_layout, enum_type_di_node);
+
+ let variant_part_unique_type_id =
+ UniqueTypeId::for_enum_variant_part(cx.tcx, enum_type_and_layout.ty);
+
+ let stub = StubInfo::new(
+ cx,
+ variant_part_unique_type_id,
+ |cx, variant_part_unique_type_id_str| unsafe {
+ let variant_part_name = "";
+ llvm::LLVMRustDIBuilderCreateVariantPart(
+ DIB(cx),
+ enum_type_di_node,
+ variant_part_name.as_ptr().cast(),
+ variant_part_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ enum_type_and_layout.size.bits(),
+ enum_type_and_layout.align.abi.bits() as u32,
+ DIFlags::FlagZero,
+ tag_member_di_node,
+ create_DIArray(DIB(cx), &[]),
+ variant_part_unique_type_id_str.as_ptr().cast(),
+ variant_part_unique_type_id_str.len(),
+ )
+ },
+ );
+
+ type_map::build_type_with_children(
+ cx,
+ stub,
+ |cx, variant_part_di_node| {
+ variant_member_infos
+ .iter()
+ .map(|variant_member_info| {
+ build_enum_variant_member_di_node(
+ cx,
+ enum_type_and_layout,
+ variant_part_di_node,
+ variant_member_info,
+ )
+ })
+ .collect()
+ },
+ NO_GENERICS,
+ )
+ .di_node
+}
+
+/// Builds the DW_TAG_member describing where we can find the tag of an enum.
+/// Returns `None` if the enum does not have a tag.
+///
+/// ```txt
+///
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// ---> DW_TAG_member (discriminant member)
+/// DW_TAG_variant (variant 1)
+/// DW_TAG_variant (variant 2)
+/// DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+///
+/// ```
+fn build_discr_member_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_or_generator_type_and_layout: TyAndLayout<'tcx>,
+ enum_or_generator_type_di_node: &'ll DIType,
+) -> Option<&'ll DIType> {
+ let tag_name = match enum_or_generator_type_and_layout.ty.kind() {
+ ty::Generator(..) => "__state",
+ _ => "",
+ };
+
+ // NOTE: This is actually wrong. This will become a member of
+ // of the DW_TAG_variant_part. But, due to LLVM's API, that
+ // can only be constructed with this DW_TAG_member already in created.
+ // In LLVM IR the wrong scope will be listed but when DWARF is
+ // generated from it, the DW_TAG_member will be a child the
+ // DW_TAG_variant_part.
+ let containing_scope = enum_or_generator_type_di_node;
+
+ match enum_or_generator_type_and_layout.layout.variants() {
+ // A single-variant enum has no discriminant.
+ &Variants::Single { .. } => None,
+
+ &Variants::Multiple { tag_field, .. } => {
+ let tag_base_type = tag_base_type(cx, enum_or_generator_type_and_layout);
+ let (size, align) = cx.size_and_align_of(tag_base_type);
+
+ unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateMemberType(
+ DIB(cx),
+ containing_scope,
+ tag_name.as_ptr().cast(),
+ tag_name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ enum_or_generator_type_and_layout.fields.offset(tag_field).bits(),
+ DIFlags::FlagArtificial,
+ type_di_node(cx, tag_base_type),
+ ))
+ }
+ }
+ }
+}
+
+/// Build the debuginfo node for `DW_TAG_variant`:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// ---> DW_TAG_variant (variant 1)
+/// ---> DW_TAG_variant (variant 2)
+/// ---> DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+/// ```
+///
+/// This node looks like:
+///
+/// ```txt
+/// DW_TAG_variant
+/// DW_AT_discr_value 0
+/// DW_TAG_member
+/// DW_AT_name None
+/// DW_AT_type <0x000002a1>
+/// DW_AT_alignment 0x00000002
+/// DW_AT_data_member_location 0
+/// ```
+///
+/// The DW_AT_discr_value is optional, and is omitted if
+/// - This is the only variant of a univariant enum (i.e. their is no discriminant)
+/// - This is the "dataful" variant of a niche-layout enum
+/// (where only the other variants are identified by a single value)
+///
+/// There is only ever a single member, the type of which is a struct that describes the
+/// fields of the variant (excluding the discriminant). The name of the member is the name
+/// of the variant as given in the source code. The DW_AT_data_member_location is always
+/// zero.
+///
+/// Note that the LLVM DIBuilder API is a bit unintuitive here. The DW_TAG_variant subtree
+/// (including the DW_TAG_member) is built by a single call to
+/// `LLVMRustDIBuilderCreateVariantMemberType()`.
+fn build_enum_variant_member_di_node<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ enum_type_and_layout: TyAndLayout<'tcx>,
+ variant_part_di_node: &'ll DIType,
+ variant_member_info: &VariantMemberInfo<'_, 'll>,
+) -> &'ll DIType {
+ let variant_index = variant_member_info.variant_index;
+ let discr_value = super::compute_discriminant_value(cx, enum_type_and_layout, variant_index);
+
+ let (file_di_node, line_number) = variant_member_info
+ .source_info
+ .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER));
+
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateVariantMemberType(
+ DIB(cx),
+ variant_part_di_node,
+ variant_member_info.variant_name.as_ptr().cast(),
+ variant_member_info.variant_name.len(),
+ file_di_node,
+ line_number,
+ enum_type_and_layout.size.bits(),
+ enum_type_and_layout.align.abi.bits() as u32,
+ Size::ZERO.bits(),
+ discr_value.map(|v| cx.const_u64(v)),
+ DIFlags::FlagZero,
+ variant_member_info.variant_struct_type_di_node,
+ )
+ }
+}
+
+/// Information needed for building a `DW_TAG_variant`:
+///
+/// ```txt
+/// DW_TAG_structure_type (top-level type for enum)
+/// DW_TAG_variant_part (variant part)
+/// DW_AT_discr (reference to discriminant DW_TAG_member)
+/// DW_TAG_member (discriminant member)
+/// ---> DW_TAG_variant (variant 1)
+/// ---> DW_TAG_variant (variant 2)
+/// ---> DW_TAG_variant (variant 3)
+/// DW_TAG_structure_type (type of variant 1)
+/// DW_TAG_structure_type (type of variant 2)
+/// DW_TAG_structure_type (type of variant 3)
+struct VariantMemberInfo<'a, 'll> {
+ variant_index: VariantIdx,
+ variant_name: Cow<'a, str>,
+ variant_struct_type_di_node: &'ll DIType,
+ source_info: Option<(&'ll DIFile, c_uint)>,
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
new file mode 100644
index 000000000..ce2f419c4
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs
@@ -0,0 +1,267 @@
+use std::cell::RefCell;
+
+use rustc_data_structures::{
+ fingerprint::Fingerprint,
+ fx::FxHashMap,
+ stable_hasher::{HashStable, StableHasher},
+};
+use rustc_middle::{
+ bug,
+ ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt},
+};
+use rustc_target::abi::{Align, Size, VariantIdx};
+
+use crate::{
+ common::CodegenCx,
+ debuginfo::utils::{create_DIArray, debug_context, DIB},
+ llvm::{
+ self,
+ debuginfo::{DIFlags, DIScope, DIType},
+ },
+};
+
+use super::{unknown_file_metadata, SmallVec, UNKNOWN_LINE_NUMBER};
+
+mod private {
+ // This type cannot be constructed outside of this module because
+ // it has a private field. We make use of this in order to prevent
+ // `UniqueTypeId` from being constructed directly, without asserting
+ // the preconditions.
+ #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
+ pub struct HiddenZst;
+}
+
+/// A unique identifier for anything that we create a debuginfo node for.
+/// The types it contains are expected to already be normalized (which
+/// is debug_asserted in the constructors).
+///
+/// Note that there are some things that only show up in debuginfo, like
+/// the separate type descriptions for each enum variant. These get an ID
+/// too because they have their own debuginfo node in LLVM IR.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)]
+pub(super) enum UniqueTypeId<'tcx> {
+ /// The ID of a regular type as it shows up at the language level.
+ Ty(Ty<'tcx>, private::HiddenZst),
+ /// The ID for the single DW_TAG_variant_part nested inside the top-level
+ /// DW_TAG_structure_type that describes enums and generators.
+ VariantPart(Ty<'tcx>, private::HiddenZst),
+ /// The ID for the artificial struct type describing a single enum variant.
+ VariantStructType(Ty<'tcx>, VariantIdx, private::HiddenZst),
+ /// The ID of the artificial type we create for VTables.
+ VTableTy(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>, private::HiddenZst),
+}
+
+impl<'tcx> UniqueTypeId<'tcx> {
+ pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self {
+ debug_assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t));
+ UniqueTypeId::Ty(t, private::HiddenZst)
+ }
+
+ pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self {
+ debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+ UniqueTypeId::VariantPart(enum_ty, private::HiddenZst)
+ }
+
+ pub fn for_enum_variant_struct_type(
+ tcx: TyCtxt<'tcx>,
+ enum_ty: Ty<'tcx>,
+ variant_idx: VariantIdx,
+ ) -> Self {
+ debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
+ UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst)
+ }
+
+ pub fn for_vtable_ty(
+ tcx: TyCtxt<'tcx>,
+ self_type: Ty<'tcx>,
+ implemented_trait: Option<PolyExistentialTraitRef<'tcx>>,
+ ) -> Self {
+ debug_assert_eq!(
+ self_type,
+ tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type)
+ );
+ debug_assert_eq!(
+ implemented_trait,
+ tcx.normalize_erasing_regions(ParamEnv::reveal_all(), implemented_trait)
+ );
+ UniqueTypeId::VTableTy(self_type, implemented_trait, private::HiddenZst)
+ }
+
+ /// Generates a string version of this [UniqueTypeId], which can be used as the `UniqueId`
+ /// argument of the various `LLVMRustDIBuilderCreate*Type()` methods.
+ ///
+ /// Right now this takes the form of a hex-encoded opaque hash value.
+ pub fn generate_unique_id_string(self, tcx: TyCtxt<'tcx>) -> String {
+ let mut hasher = StableHasher::new();
+ tcx.with_stable_hashing_context(|mut hcx| {
+ hcx.while_hashing_spans(false, |hcx| self.hash_stable(hcx, &mut hasher))
+ });
+ hasher.finish::<Fingerprint>().to_hex()
+ }
+
+ pub fn expect_ty(self) -> Ty<'tcx> {
+ match self {
+ UniqueTypeId::Ty(ty, _) => ty,
+ _ => bug!("Expected `UniqueTypeId::Ty` but found `{:?}`", self),
+ }
+ }
+}
+
+/// The `TypeMap` is where the debug context holds the type metadata nodes
+/// created so far. The debuginfo nodes are identified by `UniqueTypeId`.
+#[derive(Default)]
+pub(crate) struct TypeMap<'ll, 'tcx> {
+ pub(super) unique_id_to_di_node: RefCell<FxHashMap<UniqueTypeId<'tcx>, &'ll DIType>>,
+}
+
+impl<'ll, 'tcx> TypeMap<'ll, 'tcx> {
+ /// Adds a `UniqueTypeId` to metadata mapping to the `TypeMap`. The method will
+ /// fail if the mapping already exists.
+ pub(super) fn insert(&self, unique_type_id: UniqueTypeId<'tcx>, metadata: &'ll DIType) {
+ if self.unique_id_to_di_node.borrow_mut().insert(unique_type_id, metadata).is_some() {
+ bug!("type metadata for unique ID '{:?}' is already in the `TypeMap`!", unique_type_id);
+ }
+ }
+
+ pub(super) fn di_node_for_unique_id(
+ &self,
+ unique_type_id: UniqueTypeId<'tcx>,
+ ) -> Option<&'ll DIType> {
+ self.unique_id_to_di_node.borrow().get(&unique_type_id).cloned()
+ }
+}
+
+pub struct DINodeCreationResult<'ll> {
+ pub di_node: &'ll DIType,
+ pub already_stored_in_typemap: bool,
+}
+
+impl<'ll> DINodeCreationResult<'ll> {
+ pub fn new(di_node: &'ll DIType, already_stored_in_typemap: bool) -> Self {
+ DINodeCreationResult { di_node, already_stored_in_typemap }
+ }
+}
+
+#[derive(Debug, Copy, Clone, Eq, PartialEq)]
+pub enum Stub<'ll> {
+ Struct,
+ Union,
+ VTableTy { vtable_holder: &'ll DIType },
+}
+
+pub struct StubInfo<'ll, 'tcx> {
+ metadata: &'ll DIType,
+ unique_type_id: UniqueTypeId<'tcx>,
+}
+
+impl<'ll, 'tcx> StubInfo<'ll, 'tcx> {
+ pub(super) fn new(
+ cx: &CodegenCx<'ll, 'tcx>,
+ unique_type_id: UniqueTypeId<'tcx>,
+ build: impl FnOnce(&CodegenCx<'ll, 'tcx>, /* unique_type_id_str: */ &str) -> &'ll DIType,
+ ) -> StubInfo<'ll, 'tcx> {
+ let unique_type_id_str = unique_type_id.generate_unique_id_string(cx.tcx);
+ let di_node = build(cx, &unique_type_id_str);
+ StubInfo { metadata: di_node, unique_type_id }
+ }
+}
+
+/// Create a stub debuginfo node onto which fields and nested types can be attached.
+pub(super) fn stub<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ kind: Stub<'ll>,
+ unique_type_id: UniqueTypeId<'tcx>,
+ name: &str,
+ (size, align): (Size, Align),
+ containing_scope: Option<&'ll DIScope>,
+ flags: DIFlags,
+) -> StubInfo<'ll, 'tcx> {
+ let empty_array = create_DIArray(DIB(cx), &[]);
+ let unique_type_id_str = unique_type_id.generate_unique_id_string(cx.tcx);
+
+ let metadata = match kind {
+ Stub::Struct | Stub::VTableTy { .. } => {
+ let vtable_holder = match kind {
+ Stub::VTableTy { vtable_holder } => Some(vtable_holder),
+ _ => None,
+ };
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateStructType(
+ DIB(cx),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ flags,
+ None,
+ empty_array,
+ 0,
+ vtable_holder,
+ unique_type_id_str.as_ptr().cast(),
+ unique_type_id_str.len(),
+ )
+ }
+ }
+ Stub::Union => unsafe {
+ llvm::LLVMRustDIBuilderCreateUnionType(
+ DIB(cx),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ unknown_file_metadata(cx),
+ UNKNOWN_LINE_NUMBER,
+ size.bits(),
+ align.bits() as u32,
+ flags,
+ Some(empty_array),
+ 0,
+ unique_type_id_str.as_ptr().cast(),
+ unique_type_id_str.len(),
+ )
+ },
+ };
+ StubInfo { metadata, unique_type_id }
+}
+
+/// This function enables creating debuginfo nodes that can recursively refer to themselves.
+/// It will first insert the given stub into the type map and only then execute the `members`
+/// and `generics` closures passed in. These closures have access to the stub so they can
+/// directly attach fields to them. If the type of a field transitively refers back
+/// to the type currently being built, the stub will already be found in the type map,
+/// which effectively breaks the recursion cycle.
+pub(super) fn build_type_with_children<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ stub_info: StubInfo<'ll, 'tcx>,
+ members: impl FnOnce(&CodegenCx<'ll, 'tcx>, &'ll DIType) -> SmallVec<&'ll DIType>,
+ generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<&'ll DIType>,
+) -> DINodeCreationResult<'ll> {
+ debug_assert_eq!(
+ debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id),
+ None
+ );
+
+ debug_context(cx).type_map.insert(stub_info.unique_type_id, stub_info.metadata);
+
+ let members: SmallVec<_> =
+ members(cx, stub_info.metadata).into_iter().map(|node| Some(node)).collect();
+ let generics: SmallVec<Option<&'ll DIType>> =
+ generics(cx).into_iter().map(|node| Some(node)).collect();
+
+ if !(members.is_empty() && generics.is_empty()) {
+ unsafe {
+ let members_array = create_DIArray(DIB(cx), &members[..]);
+ let generics_array = create_DIArray(DIB(cx), &generics[..]);
+ llvm::LLVMRustDICompositeTypeReplaceArrays(
+ DIB(cx),
+ stub_info.metadata,
+ Some(members_array),
+ Some(generics_array),
+ );
+ }
+ }
+
+ DINodeCreationResult { di_node: stub_info.metadata, already_stored_in_typemap: true }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
new file mode 100644
index 000000000..cf591295b
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -0,0 +1,614 @@
+#![doc = include_str!("doc.md")]
+
+use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
+
+use self::metadata::{file_metadata, type_di_node};
+use self::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
+use self::namespace::mangled_name_of_instance;
+use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
+
+use crate::abi::FnAbi;
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{
+ DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType,
+ DIVariable,
+};
+use crate::value::Value;
+
+use rustc_codegen_ssa::debuginfo::type_names;
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::{DefId, DefIdMap};
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitable};
+use rustc_session::config::{self, DebugInfo};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+use rustc_span::{self, BytePos, Pos, SourceFile, SourceFileAndLine, SourceFileHash, Span};
+use rustc_target::abi::Size;
+
+use libc::c_uint;
+use smallvec::SmallVec;
+use std::cell::OnceCell;
+use std::cell::RefCell;
+use std::iter;
+use tracing::debug;
+
+mod create_scope_map;
+pub mod gdb;
+pub mod metadata;
+mod namespace;
+mod utils;
+
+pub use self::create_scope_map::compute_mir_scopes;
+pub use self::metadata::build_global_var_di_node;
+pub use self::metadata::extend_scope_to_file;
+
+#[allow(non_upper_case_globals)]
+const DW_TAG_auto_variable: c_uint = 0x100;
+#[allow(non_upper_case_globals)]
+const DW_TAG_arg_variable: c_uint = 0x101;
+
+/// A context object for maintaining all state needed by the debuginfo module.
+pub struct CodegenUnitDebugContext<'ll, 'tcx> {
+ llcontext: &'ll llvm::Context,
+ llmod: &'ll llvm::Module,
+ builder: &'ll mut DIBuilder<'ll>,
+ created_files: RefCell<FxHashMap<Option<(u128, SourceFileHash)>, &'ll DIFile>>,
+
+ type_map: metadata::TypeMap<'ll, 'tcx>,
+ namespace_map: RefCell<DefIdMap<&'ll DIScope>>,
+ recursion_marker_type: OnceCell<&'ll DIType>,
+}
+
+impl Drop for CodegenUnitDebugContext<'_, '_> {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustDIBuilderDispose(&mut *(self.builder as *mut _));
+ }
+ }
+}
+
+impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> {
+ pub fn new(llmod: &'ll llvm::Module) -> Self {
+ debug!("CodegenUnitDebugContext::new");
+ let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
+ // DIBuilder inherits context from the module, so we'd better use the same one
+ let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
+ CodegenUnitDebugContext {
+ llcontext,
+ llmod,
+ builder,
+ created_files: Default::default(),
+ type_map: Default::default(),
+ namespace_map: RefCell::new(Default::default()),
+ recursion_marker_type: OnceCell::new(),
+ }
+ }
+
+ pub fn finalize(&self, sess: &Session) {
+ unsafe {
+ llvm::LLVMRustDIBuilderFinalize(self.builder);
+
+ if !sess.target.is_like_msvc {
+ // Debuginfo generation in LLVM by default uses a higher
+ // version of dwarf than macOS currently understands. We can
+ // instruct LLVM to emit an older version of dwarf, however,
+ // for macOS to understand. For more info see #11352
+ // This can be overridden using --llvm-opts -dwarf-version,N.
+ // Android has the same issue (#22398)
+ let dwarf_version = sess
+ .opts
+ .unstable_opts
+ .dwarf_version
+ .unwrap_or(sess.target.default_dwarf_version);
+ llvm::LLVMRustAddModuleFlag(
+ self.llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ "Dwarf Version\0".as_ptr().cast(),
+ dwarf_version,
+ );
+ } else {
+ // Indicate that we want CodeView debug information on MSVC
+ llvm::LLVMRustAddModuleFlag(
+ self.llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ "CodeView\0".as_ptr().cast(),
+ 1,
+ )
+ }
+
+ // Prevent bitcode readers from deleting the debug info.
+ let ptr = "Debug Info Version\0".as_ptr();
+ llvm::LLVMRustAddModuleFlag(
+ self.llmod,
+ llvm::LLVMModFlagBehavior::Warning,
+ ptr.cast(),
+ llvm::LLVMRustDebugMetadataVersion(),
+ );
+ }
+ }
+}
+
+/// Creates any deferred debug metadata nodes
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
+ if let Some(dbg_cx) = &cx.dbg_cx {
+ debug!("finalize");
+
+ if gdb::needs_gdb_debug_scripts_section(cx) {
+ // Add a .debug_gdb_scripts section to this compile-unit. This will
+ // cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
+ // which activates the Rust pretty printers for binary this section is
+ // contained in.
+ gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
+ }
+
+ dbg_cx.finalize(cx.sess());
+ }
+}
+
+impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(
+ &mut self,
+ dbg_var: &'ll DIVariable,
+ dbg_loc: &'ll DILocation,
+ variable_alloca: Self::Value,
+ direct_offset: Size,
+ indirect_offsets: &[Size],
+ ) {
+ // Convert the direct and indirect offsets to address ops.
+ // FIXME(eddyb) use `const`s instead of getting the values via FFI,
+ // the values should match the ones in the DWARF standard anyway.
+ let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
+ let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
+ let mut addr_ops = SmallVec::<[u64; 8]>::new();
+
+ if direct_offset.bytes() > 0 {
+ addr_ops.push(op_plus_uconst());
+ addr_ops.push(direct_offset.bytes() as u64);
+ }
+ for &offset in indirect_offsets {
+ addr_ops.push(op_deref());
+ if offset.bytes() > 0 {
+ addr_ops.push(op_plus_uconst());
+ addr_ops.push(offset.bytes() as u64);
+ }
+ }
+
+ unsafe {
+ // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
+ llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
+ DIB(self.cx()),
+ variable_alloca,
+ dbg_var,
+ addr_ops.as_ptr(),
+ addr_ops.len() as c_uint,
+ dbg_loc,
+ self.llbb(),
+ );
+ }
+ }
+
+ fn set_dbg_loc(&mut self, dbg_loc: &'ll DILocation) {
+ unsafe {
+ let dbg_loc_as_llval = llvm::LLVMRustMetadataAsValue(self.cx().llcx, dbg_loc);
+ llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc_as_llval);
+ }
+ }
+
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
+ gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
+ }
+
+ fn set_var_name(&mut self, value: &'ll Value, name: &str) {
+ // Avoid wasting time if LLVM value names aren't even enabled.
+ if self.sess().fewer_names() {
+ return;
+ }
+
+ // Only function parameters and instructions are local to a function,
+ // don't change the name of anything else (e.g. globals).
+ let param_or_inst = unsafe {
+ llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
+ };
+ if !param_or_inst {
+ return;
+ }
+
+ // Avoid replacing the name if it already exists.
+ // While we could combine the names somehow, it'd
+ // get noisy quick, and the usefulness is dubious.
+ if llvm::get_value_name(value).is_empty() {
+ llvm::set_value_name(value, name.as_bytes());
+ }
+ }
+}
+
+/// A source code location used to generate debug information.
+// FIXME(eddyb) rename this to better indicate it's a duplicate of
+// `rustc_span::Loc` rather than `DILocation`, perhaps by making
+// `lookup_char_pos` return the right information instead.
+pub struct DebugLoc {
+ /// Information about the original source file.
+ pub file: Lrc<SourceFile>,
+ /// The (1-based) line number.
+ pub line: u32,
+ /// The (1-based) column number.
+ pub col: u32,
+}
+
+impl CodegenCx<'_, '_> {
+ /// Looks up debug source information about a `BytePos`.
+ // FIXME(eddyb) rename this to better indicate it's a duplicate of
+ // `lookup_char_pos` rather than `dbg_loc`, perhaps by making
+ // `lookup_char_pos` return the right information instead.
+ pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
+ let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
+ Ok(SourceFileAndLine { sf: file, line }) => {
+ let line_pos = file.line_begin_pos(pos);
+
+ // Use 1-based indexing.
+ let line = (line + 1) as u32;
+ let col = (pos - line_pos).to_u32() + 1;
+
+ (file, line, col)
+ }
+ Err(file) => (file, UNKNOWN_LINE_NUMBER, UNKNOWN_COLUMN_NUMBER),
+ };
+
+ // For MSVC, omit the column number.
+ // Otherwise, emit it. This mimics clang behaviour.
+ // See discussion in https://github.com/rust-lang/rust/issues/42921
+ if self.sess().target.is_like_msvc {
+ DebugLoc { file, line, col: UNKNOWN_COLUMN_NUMBER }
+ } else {
+ DebugLoc { file, line, col }
+ }
+ }
+}
+
+impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn create_function_debug_context(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ llfn: &'ll Value,
+ mir: &mir::Body<'tcx>,
+ ) -> Option<FunctionDebugContext<&'ll DIScope, &'ll DILocation>> {
+ if self.sess().opts.debuginfo == DebugInfo::None {
+ return None;
+ }
+
+ // Initialize fn debug context (including scopes).
+ let empty_scope = DebugScope {
+ dbg_scope: self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
+ inlined_at: None,
+ file_start_pos: BytePos(0),
+ file_end_pos: BytePos(0),
+ };
+ let mut fn_debug_context =
+ FunctionDebugContext { scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes) };
+
+ // Fill in all the scopes, with the information from the MIR body.
+ compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
+
+ Some(fn_debug_context)
+ }
+
+ fn dbg_scope_fn(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ maybe_definition_llfn: Option<&'ll Value>,
+ ) -> &'ll DIScope {
+ let tcx = self.tcx;
+
+ let def_id = instance.def_id();
+ let containing_scope = get_containing_scope(self, instance);
+ let span = tcx.def_span(def_id);
+ let loc = self.lookup_debug_loc(span.lo());
+ let file_metadata = file_metadata(self, &loc.file);
+
+ let function_type_metadata = unsafe {
+ let fn_signature = get_function_signature(self, fn_abi);
+ llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
+ };
+
+ let mut name = String::new();
+ type_names::push_item_name(tcx, def_id, false, &mut name);
+
+ // Find the enclosing function, in case this is a closure.
+ let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
+
+ // We look up the generics of the enclosing function and truncate the substs
+ // to their length in order to cut off extra stuff that might be in there for
+ // closures or generators.
+ let generics = tcx.generics_of(enclosing_fn_def_id);
+ let substs = instance.substs.truncate_to(tcx, generics);
+
+ type_names::push_generic_params(
+ tcx,
+ tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substs),
+ &mut name,
+ );
+
+ let template_parameters = get_template_parameters(self, generics, substs);
+
+ let linkage_name = &mangled_name_of_instance(self, instance).name;
+ // Omit the linkage_name if it is the same as subprogram name.
+ let linkage_name = if &name == linkage_name { "" } else { linkage_name };
+
+ // FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
+ let scope_line = loc.line;
+
+ let mut flags = DIFlags::FlagPrototyped;
+
+ if fn_abi.ret.layout.abi.is_uninhabited() {
+ flags |= DIFlags::FlagNoReturn;
+ }
+
+ let mut spflags = DISPFlags::SPFlagDefinition;
+ if is_node_local_to_unit(self, def_id) {
+ spflags |= DISPFlags::SPFlagLocalToUnit;
+ }
+ if self.sess().opts.optimize != config::OptLevel::No {
+ spflags |= DISPFlags::SPFlagOptimized;
+ }
+ if let Some((id, _)) = tcx.entry_fn(()) {
+ if id == def_id {
+ spflags |= DISPFlags::SPFlagMainSubprogram;
+ }
+ }
+
+ unsafe {
+ return llvm::LLVMRustDIBuilderCreateFunction(
+ DIB(self),
+ containing_scope,
+ name.as_ptr().cast(),
+ name.len(),
+ linkage_name.as_ptr().cast(),
+ linkage_name.len(),
+ file_metadata,
+ loc.line,
+ function_type_metadata,
+ scope_line,
+ flags,
+ spflags,
+ maybe_definition_llfn,
+ template_parameters,
+ None,
+ );
+ }
+
+ fn get_function_signature<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ ) -> &'ll DIArray {
+ if cx.sess().opts.debuginfo == DebugInfo::Limited {
+ return create_DIArray(DIB(cx), &[]);
+ }
+
+ let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
+
+ // Return type -- llvm::DIBuilder wants this at index 0
+ signature.push(if fn_abi.ret.is_ignore() {
+ None
+ } else {
+ Some(type_di_node(cx, fn_abi.ret.layout.ty))
+ });
+
+ // Arguments types
+ if cx.sess().target.is_like_msvc {
+ // FIXME(#42800):
+ // There is a bug in MSDIA that leads to a crash when it encounters
+ // a fixed-size array of `u8` or something zero-sized in a
+ // function-type (see #40477).
+ // As a workaround, we replace those fixed-size arrays with a
+ // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
+ // appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
+ // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
+ // This transformed type is wrong, but these function types are
+ // already inaccurate due to ABI adjustments (see #42800).
+ signature.extend(fn_abi.args.iter().map(|arg| {
+ let t = arg.layout.ty;
+ let t = match t.kind() {
+ ty::Array(ct, _)
+ if (*ct == cx.tcx.types.u8) || cx.layout_of(*ct).is_zst() =>
+ {
+ cx.tcx.mk_imm_ptr(*ct)
+ }
+ _ => t,
+ };
+ Some(type_di_node(cx, t))
+ }));
+ } else {
+ signature
+ .extend(fn_abi.args.iter().map(|arg| Some(type_di_node(cx, arg.layout.ty))));
+ }
+
+ create_DIArray(DIB(cx), &signature[..])
+ }
+
+ fn get_template_parameters<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ generics: &ty::Generics,
+ substs: SubstsRef<'tcx>,
+ ) -> &'ll DIArray {
+ if substs.types().next().is_none() {
+ return create_DIArray(DIB(cx), &[]);
+ }
+
+ // Again, only create type information if full debuginfo is enabled
+ let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
+ let names = get_parameter_names(cx, generics);
+ iter::zip(substs, names)
+ .filter_map(|(kind, name)| {
+ if let GenericArgKind::Type(ty) = kind.unpack() {
+ let actual_type =
+ cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
+ let actual_type_metadata = type_di_node(cx, actual_type);
+ let name = name.as_str();
+ Some(unsafe {
+ Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
+ DIB(cx),
+ None,
+ name.as_ptr().cast(),
+ name.len(),
+ actual_type_metadata,
+ ))
+ })
+ } else {
+ None
+ }
+ })
+ .collect()
+ } else {
+ vec![]
+ };
+
+ create_DIArray(DIB(cx), &template_params)
+ }
+
+ fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
+ let mut names = generics.parent.map_or_else(Vec::new, |def_id| {
+ get_parameter_names(cx, cx.tcx.generics_of(def_id))
+ });
+ names.extend(generics.params.iter().map(|param| param.name));
+ names
+ }
+
+ fn get_containing_scope<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ instance: Instance<'tcx>,
+ ) -> &'ll DIScope {
+ // First, let's see if this is a method within an inherent impl. Because
+ // if yes, we want to make the result subroutine DIE a child of the
+ // subroutine's self-type.
+ let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
+ // If the method does *not* belong to a trait, proceed
+ if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
+ let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ cx.tcx.type_of(impl_def_id),
+ );
+
+ // Only "class" methods are generally understood by LLVM,
+ // so avoid methods on other types (e.g., `<*mut T>::null`).
+ match impl_self_ty.kind() {
+ ty::Adt(def, ..) if !def.is_box() => {
+ // Again, only create type information if full debuginfo is enabled
+ if cx.sess().opts.debuginfo == DebugInfo::Full
+ && !impl_self_ty.needs_subst()
+ {
+ Some(type_di_node(cx, impl_self_ty))
+ } else {
+ Some(namespace::item_namespace(cx, def.did()))
+ }
+ }
+ _ => None,
+ }
+ } else {
+ // For trait method impls we still use the "parallel namespace"
+ // strategy
+ None
+ }
+ });
+
+ self_type.unwrap_or_else(|| {
+ namespace::item_namespace(
+ cx,
+ DefId {
+ krate: instance.def_id().krate,
+ index: cx
+ .tcx
+ .def_key(instance.def_id())
+ .parent
+ .expect("get_containing_scope: missing parent?"),
+ },
+ )
+ })
+ }
+ }
+
+ fn dbg_loc(
+ &self,
+ scope: &'ll DIScope,
+ inlined_at: Option<&'ll DILocation>,
+ span: Span,
+ ) -> &'ll DILocation {
+ let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
+
+ unsafe { llvm::LLVMRustDIBuilderCreateDebugLocation(line, col, scope, inlined_at) }
+ }
+
+ fn create_vtable_debuginfo(
+ &self,
+ ty: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ vtable: Self::Value,
+ ) {
+ metadata::create_vtable_di_node(self, ty, trait_ref, vtable)
+ }
+
+ fn extend_scope_to_file(
+ &self,
+ scope_metadata: &'ll DIScope,
+ file: &rustc_span::SourceFile,
+ ) -> &'ll DILexicalBlock {
+ metadata::extend_scope_to_file(self, scope_metadata, file)
+ }
+
+ fn debuginfo_finalize(&self) {
+ finalize(self)
+ }
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn create_dbg_var(
+ &self,
+ variable_name: Symbol,
+ variable_type: Ty<'tcx>,
+ scope_metadata: &'ll DIScope,
+ variable_kind: VariableKind,
+ span: Span,
+ ) -> &'ll DIVariable {
+ let loc = self.lookup_debug_loc(span.lo());
+ let file_metadata = file_metadata(self, &loc.file);
+
+ let type_metadata = type_di_node(self, variable_type);
+
+ let (argument_index, dwarf_tag) = match variable_kind {
+ ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
+ LocalVariable => (0, DW_TAG_auto_variable),
+ };
+ let align = self.align_of(variable_type);
+
+ let name = variable_name.as_str();
+ unsafe {
+ llvm::LLVMRustDIBuilderCreateVariable(
+ DIB(self),
+ dwarf_tag,
+ scope_metadata,
+ name.as_ptr().cast(),
+ name.len(),
+ file_metadata,
+ loc.line,
+ type_metadata,
+ true,
+ DIFlags::FlagZero,
+ argument_index,
+ align.bytes() as u32,
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
new file mode 100644
index 000000000..d5ea48c31
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs
@@ -0,0 +1,48 @@
+// Namespace Handling.
+
+use super::utils::{debug_context, DIB};
+use rustc_codegen_ssa::debuginfo::type_names;
+use rustc_middle::ty::{self, Instance};
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::DIScope;
+use rustc_hir::def_id::DefId;
+
+pub fn mangled_name_of_instance<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ instance: Instance<'tcx>,
+) -> ty::SymbolName<'tcx> {
+ let tcx = cx.tcx;
+ tcx.symbol_name(instance)
+}
+
+pub fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
+ if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) {
+ return scope;
+ }
+
+ let def_key = cx.tcx.def_key(def_id);
+ let parent_scope = def_key
+ .parent
+ .map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
+
+ let namespace_name_string = {
+ let mut output = String::new();
+ type_names::push_item_name(cx.tcx, def_id, false, &mut output);
+ output
+ };
+
+ let scope = unsafe {
+ llvm::LLVMRustDIBuilderCreateNameSpace(
+ DIB(cx),
+ parent_scope,
+ namespace_name_string.as_ptr().cast(),
+ namespace_name_string.len(),
+ false, // ExportSymbols (only relevant for C++ anonymous namespaces)
+ )
+ };
+
+ debug_context(cx).namespace_map.borrow_mut().insert(def_id, scope);
+ scope
+}
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
new file mode 100644
index 000000000..8f2436739
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs
@@ -0,0 +1,99 @@
+// Utility Functions.
+
+use super::namespace::item_namespace;
+use super::CodegenUnitDebugContext;
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
+use rustc_middle::ty::{self, DefIdTree, Ty};
+use tracing::trace;
+
+use crate::common::CodegenCx;
+use crate::llvm;
+use crate::llvm::debuginfo::{DIArray, DIBuilder, DIDescriptor, DIScope};
+
+pub fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool {
+ // The is_local_to_unit flag indicates whether a function is local to the
+ // current compilation unit (i.e., if it is *static* in the C-sense). The
+ // *reachable* set should provide a good approximation of this, as it
+ // contains everything that might leak out of the current crate (by being
+ // externally visible or by being inlined into something externally
+ // visible). It might better to use the `exported_items` set from
+ // `driver::CrateAnalysis` in the future, but (atm) this set is not
+ // available in the codegen pass.
+ !cx.tcx.is_reachable_non_generic(def_id)
+}
+
+#[allow(non_snake_case)]
+pub fn create_DIArray<'ll>(
+ builder: &DIBuilder<'ll>,
+ arr: &[Option<&'ll DIDescriptor>],
+) -> &'ll DIArray {
+ unsafe { llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) }
+}
+
+#[inline]
+pub fn debug_context<'a, 'll, 'tcx>(
+ cx: &'a CodegenCx<'ll, 'tcx>,
+) -> &'a CodegenUnitDebugContext<'ll, 'tcx> {
+ cx.dbg_cx.as_ref().unwrap()
+}
+
+#[inline]
+#[allow(non_snake_case)]
+pub fn DIB<'a, 'll>(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> {
+ cx.dbg_cx.as_ref().unwrap().builder
+}
+
+pub fn get_namespace_for_item<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
+ item_namespace(cx, cx.tcx.parent(def_id))
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub(crate) enum FatPtrKind {
+ Slice,
+ Dyn,
+}
+
+/// Determines if `pointee_ty` is slice-like or trait-object-like, i.e.
+/// if the second field of the fat pointer is a length or a vtable-pointer.
+/// If `pointee_ty` does not require a fat pointer (because it is Sized) then
+/// the function returns `None`.
+pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ pointee_ty: Ty<'tcx>,
+) -> Option<FatPtrKind> {
+ let pointee_tail_ty = cx.tcx.struct_tail_erasing_lifetimes(pointee_ty, cx.param_env());
+ let layout = cx.layout_of(pointee_tail_ty);
+ trace!(
+ "fat_pointer_kind: {:?} has layout {:?} (is_unsized? {})",
+ pointee_tail_ty,
+ layout,
+ layout.is_unsized()
+ );
+
+ if !layout.is_unsized() {
+ return None;
+ }
+
+ match *pointee_tail_ty.kind() {
+ ty::Str | ty::Slice(_) => Some(FatPtrKind::Slice),
+ ty::Dynamic(..) => Some(FatPtrKind::Dyn),
+ ty::Foreign(_) => {
+ // Assert that pointers to foreign types really are thin:
+ debug_assert_eq!(
+ cx.size_of(cx.tcx.mk_imm_ptr(pointee_tail_ty)),
+ cx.size_of(cx.tcx.mk_imm_ptr(cx.tcx.types.u8))
+ );
+ None
+ }
+ _ => {
+ // For all other pointee types we should already have returned None
+ // at the beginning of the function.
+ panic!(
+ "fat_pointer_kind() - Encountered unexpected `pointee_tail_ty`: {:?}",
+ pointee_tail_ty
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs
new file mode 100644
index 000000000..fa0ecd18f
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/declare.rs
@@ -0,0 +1,146 @@
+//! Declare various LLVM values.
+//!
+//! Prefer using functions and methods from this module rather than calling LLVM
+//! functions directly. These functions do some additional work to ensure we do
+//! the right thing given the preconceptions of codegen.
+//!
+//! Some useful guidelines:
+//!
+//! * Use declare_* family of methods if you are declaring, but are not
+//! interested in defining the Value they return.
+//! * Use define_* family of methods when you might be defining the Value.
+//! * When in doubt, define.
+
+use crate::abi::{FnAbi, FnAbiLlvmExt};
+use crate::attributes;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm::AttributePlace::Function;
+use crate::type_::Type;
+use crate::value::Value;
+use rustc_codegen_ssa::traits::TypeMembershipMethods;
+use rustc_middle::ty::Ty;
+use rustc_symbol_mangling::typeid::typeid_for_fnabi;
+use smallvec::SmallVec;
+use tracing::debug;
+
+/// Declare a function.
+///
+/// If there’s a value with the same name already declared, the function will
+/// update the declaration and return existing Value instead.
+fn declare_raw_fn<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ name: &str,
+ callconv: llvm::CallConv,
+ unnamed: llvm::UnnamedAddr,
+ ty: &'ll Type,
+) -> &'ll Value {
+ debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
+ let llfn = unsafe {
+ llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
+ };
+
+ llvm::SetFunctionCallConv(llfn, callconv);
+ llvm::SetUnnamedAddress(llfn, unnamed);
+
+ let mut attrs = SmallVec::<[_; 4]>::new();
+
+ if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.disable_redzone) {
+ attrs.push(llvm::AttributeKind::NoRedZone.create_attr(cx.llcx));
+ }
+
+ attrs.extend(attributes::non_lazy_bind_attr(cx));
+
+ attributes::apply_to_llfn(llfn, Function, &attrs);
+
+ llfn
+}
+
+impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
+ /// Declare a global value.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// return its Value instead.
+ pub fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
+ debug!("declare_global(name={:?})", name);
+ unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) }
+ }
+
+ /// Declare a C ABI function.
+ ///
+ /// Only use this for foreign function ABIs and glue. For Rust functions use
+ /// `declare_fn` instead.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// update the declaration and return existing Value instead.
+ pub fn declare_cfn(
+ &self,
+ name: &str,
+ unnamed: llvm::UnnamedAddr,
+ fn_type: &'ll Type,
+ ) -> &'ll Value {
+ declare_raw_fn(self, name, llvm::CCallConv, unnamed, fn_type)
+ }
+
+ /// Declare a Rust function.
+ ///
+ /// If there’s a value with the same name already declared, the function will
+ /// update the declaration and return existing Value instead.
+ pub fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Value {
+ debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
+
+ // Function addresses in Rust are never significant, allowing functions to
+ // be merged.
+ let llfn = declare_raw_fn(
+ self,
+ name,
+ fn_abi.llvm_cconv(),
+ llvm::UnnamedAddr::Global,
+ fn_abi.llvm_type(self),
+ );
+ fn_abi.apply_attrs_llfn(self, llfn);
+
+ if self.tcx.sess.is_sanitizer_cfi_enabled() {
+ let typeid = typeid_for_fnabi(self.tcx, fn_abi);
+ self.set_type_metadata(llfn, typeid);
+ }
+
+ llfn
+ }
+
+ /// Declare a global with an intention to define it.
+ ///
+ /// Use this function when you intend to define a global. This function will
+ /// return `None` if the name already has a definition associated with it. In that
+ /// case an error should be reported to the user, because it usually happens due
+ /// to user’s fault (e.g., misuse of `#[no_mangle]` or `#[export_name]` attributes).
+ pub fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
+ if self.get_defined_value(name).is_some() {
+ None
+ } else {
+ Some(self.declare_global(name, ty))
+ }
+ }
+
+ /// Declare a private global
+ ///
+ /// Use this function when you intend to define a global without a name.
+ pub fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
+ unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) }
+ }
+
+ /// Gets declared value by name.
+ pub fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
+ debug!("get_declared_value(name={:?})", name);
+ unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) }
+ }
+
+ /// Gets defined or externally defined (AvailableExternally linkage) value by
+ /// name.
+ pub fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
+ self.get_declared_value(name).and_then(|val| {
+ let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
+ if !declaration { Some(val) } else { None }
+ })
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
new file mode 100644
index 000000000..9f3647492
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -0,0 +1,1924 @@
+use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode};
+use crate::builder::Builder;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::va_arg::emit_va_arg;
+use crate::value::Value;
+
+use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh};
+use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
+use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::mir::place::PlaceRef;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir as hir;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf};
+use rustc_middle::ty::{self, Ty};
+use rustc_middle::{bug, span_bug};
+use rustc_span::{sym, symbol::kw, Span, Symbol};
+use rustc_target::abi::{self, Align, HasDataLayout, Primitive};
+use rustc_target::spec::{HasTargetSpec, PanicStrategy};
+
+use std::cmp::Ordering;
+use std::iter;
+
+fn get_simple_intrinsic<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ name: Symbol,
+) -> Option<(&'ll Type, &'ll Value)> {
+ let llvm_name = match name {
+ sym::sqrtf32 => "llvm.sqrt.f32",
+ sym::sqrtf64 => "llvm.sqrt.f64",
+ sym::powif32 => "llvm.powi.f32",
+ sym::powif64 => "llvm.powi.f64",
+ sym::sinf32 => "llvm.sin.f32",
+ sym::sinf64 => "llvm.sin.f64",
+ sym::cosf32 => "llvm.cos.f32",
+ sym::cosf64 => "llvm.cos.f64",
+ sym::powf32 => "llvm.pow.f32",
+ sym::powf64 => "llvm.pow.f64",
+ sym::expf32 => "llvm.exp.f32",
+ sym::expf64 => "llvm.exp.f64",
+ sym::exp2f32 => "llvm.exp2.f32",
+ sym::exp2f64 => "llvm.exp2.f64",
+ sym::logf32 => "llvm.log.f32",
+ sym::logf64 => "llvm.log.f64",
+ sym::log10f32 => "llvm.log10.f32",
+ sym::log10f64 => "llvm.log10.f64",
+ sym::log2f32 => "llvm.log2.f32",
+ sym::log2f64 => "llvm.log2.f64",
+ sym::fmaf32 => "llvm.fma.f32",
+ sym::fmaf64 => "llvm.fma.f64",
+ sym::fabsf32 => "llvm.fabs.f32",
+ sym::fabsf64 => "llvm.fabs.f64",
+ sym::minnumf32 => "llvm.minnum.f32",
+ sym::minnumf64 => "llvm.minnum.f64",
+ sym::maxnumf32 => "llvm.maxnum.f32",
+ sym::maxnumf64 => "llvm.maxnum.f64",
+ sym::copysignf32 => "llvm.copysign.f32",
+ sym::copysignf64 => "llvm.copysign.f64",
+ sym::floorf32 => "llvm.floor.f32",
+ sym::floorf64 => "llvm.floor.f64",
+ sym::ceilf32 => "llvm.ceil.f32",
+ sym::ceilf64 => "llvm.ceil.f64",
+ sym::truncf32 => "llvm.trunc.f32",
+ sym::truncf64 => "llvm.trunc.f64",
+ sym::rintf32 => "llvm.rint.f32",
+ sym::rintf64 => "llvm.rint.f64",
+ sym::nearbyintf32 => "llvm.nearbyint.f32",
+ sym::nearbyintf64 => "llvm.nearbyint.f64",
+ sym::roundf32 => "llvm.round.f32",
+ sym::roundf64 => "llvm.round.f64",
+ _ => return None,
+ };
+ Some(cx.get_intrinsic(llvm_name))
+}
+
+impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
+ fn codegen_intrinsic_call(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ llresult: &'ll Value,
+ span: Span,
+ ) {
+ let tcx = self.tcx;
+ let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+
+ let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+ bug!("expected fn item type, found {}", callee_ty);
+ };
+
+ let sig = callee_ty.fn_sig(tcx);
+ let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = tcx.item_name(def_id);
+
+ let llret_ty = self.layout_of(ret_ty).llvm_type(self);
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let simple = get_simple_intrinsic(self, name);
+ let llval = match name {
+ _ if simple.is_some() => {
+ let (simple_ty, simple_fn) = simple.unwrap();
+ self.call(
+ simple_ty,
+ simple_fn,
+ &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+ None,
+ )
+ }
+ sym::likely => {
+ self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)])
+ }
+ sym::unlikely => self
+ .call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]),
+ kw::Try => {
+ try_intrinsic(
+ self,
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ llresult,
+ );
+ return;
+ }
+ sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]),
+ sym::va_copy => {
+ self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
+ }
+ sym::va_arg => {
+ match fn_abi.ret.layout.abi {
+ abi::Abi::Scalar(scalar) => {
+ match scalar.primitive() {
+ Primitive::Int(..) => {
+ if self.cx().size_of(ret_ty).bytes() < 4 {
+ // `va_arg` should not be called on an integer type
+ // less than 4 bytes in length. If it is, promote
+ // the integer to an `i32` and truncate the result
+ // back to the smaller type.
+ let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
+ self.trunc(promoted_result, llret_ty)
+ } else {
+ emit_va_arg(self, args[0], ret_ty)
+ }
+ }
+ Primitive::F64 | Primitive::Pointer => {
+ emit_va_arg(self, args[0], ret_ty)
+ }
+ // `va_arg` should never be used with the return type f32.
+ Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
+ }
+ }
+ _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
+ }
+ }
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ let tp_ty = substs.type_at(0);
+ let ptr = args[0].immediate();
+ let load = if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let llty = ty.llvm_type(self);
+ let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
+ self.volatile_load(llty, ptr)
+ } else {
+ self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
+ };
+ let align = if name == sym::unaligned_volatile_load {
+ 1
+ } else {
+ self.align_of(tp_ty).bytes() as u32
+ };
+ unsafe {
+ llvm::LLVMSetAlignment(load, align);
+ }
+ self.to_immediate(load, self.layout_of(tp_ty))
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.volatile_store(self, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(self.cx());
+ args[1].val.unaligned_volatile_store(self, dst);
+ return;
+ }
+ sym::prefetch_read_data
+ | sym::prefetch_write_data
+ | sym::prefetch_read_instruction
+ | sym::prefetch_write_instruction => {
+ let (rw, cache_type) = match name {
+ sym::prefetch_read_data => (0, 1),
+ sym::prefetch_write_data => (1, 1),
+ sym::prefetch_read_instruction => (0, 0),
+ sym::prefetch_write_instruction => (1, 0),
+ _ => bug!(),
+ };
+ self.call_intrinsic(
+ "llvm.prefetch",
+ &[
+ args[0].immediate(),
+ self.const_i32(rw),
+ args[1].immediate(),
+ self.const_i32(cache_type),
+ ],
+ )
+ }
+ sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::ctpop
+ | sym::bswap
+ | sym::bitreverse
+ | sym::rotate_left
+ | sym::rotate_right
+ | sym::saturating_add
+ | sym::saturating_sub => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, self) {
+ Some((width, signed)) => match name {
+ sym::ctlz | sym::cttz => {
+ let y = self.const_bool(false);
+ self.call_intrinsic(
+ &format!("llvm.{}.i{}", name, width),
+ &[args[0].immediate(), y],
+ )
+ }
+ sym::ctlz_nonzero => {
+ let y = self.const_bool(true);
+ let llvm_name = &format!("llvm.ctlz.i{}", width);
+ self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
+ }
+ sym::cttz_nonzero => {
+ let y = self.const_bool(true);
+ let llvm_name = &format!("llvm.cttz.i{}", width);
+ self.call_intrinsic(llvm_name, &[args[0].immediate(), y])
+ }
+ sym::ctpop => self.call_intrinsic(
+ &format!("llvm.ctpop.i{}", width),
+ &[args[0].immediate()],
+ ),
+ sym::bswap => {
+ if width == 8 {
+ args[0].immediate() // byte swap a u8/i8 is just a no-op
+ } else {
+ self.call_intrinsic(
+ &format!("llvm.bswap.i{}", width),
+ &[args[0].immediate()],
+ )
+ }
+ }
+ sym::bitreverse => self.call_intrinsic(
+ &format!("llvm.bitreverse.i{}", width),
+ &[args[0].immediate()],
+ ),
+ sym::rotate_left | sym::rotate_right => {
+ let is_left = name == sym::rotate_left;
+ let val = args[0].immediate();
+ let raw_shift = args[1].immediate();
+ // rotate = funnel shift with first two args the same
+ let llvm_name =
+ &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
+ self.call_intrinsic(llvm_name, &[val, val, raw_shift])
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ let is_add = name == sym::saturating_add;
+ let lhs = args[0].immediate();
+ let rhs = args[1].immediate();
+ let llvm_name = &format!(
+ "llvm.{}{}.sat.i{}",
+ if signed { 's' } else { 'u' },
+ if is_add { "add" } else { "sub" },
+ width
+ );
+ self.call_intrinsic(llvm_name, &[lhs, rhs])
+ }
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ tcx.sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ sym::raw_eq => {
+ use abi::Abi::*;
+ let tp_ty = substs.type_at(0);
+ let layout = self.layout_of(tp_ty).layout;
+ let use_integer_compare = match layout.abi() {
+ Scalar(_) | ScalarPair(_, _) => true,
+ Uninhabited | Vector { .. } => false,
+ Aggregate { .. } => {
+ // For rusty ABIs, small aggregates are actually passed
+ // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+ // so we re-use that same threshold here.
+ layout.size() <= self.data_layout().pointer_size * 2
+ }
+ };
+
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ if layout.size().bytes() == 0 {
+ self.const_bool(true)
+ } else if use_integer_compare {
+ let integer_ty = self.type_ix(layout.size().bits());
+ let ptr_ty = self.type_ptr_to(integer_ty);
+ let a_ptr = self.bitcast(a, ptr_ty);
+ let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
+ let b_ptr = self.bitcast(b, ptr_ty);
+ let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
+ self.icmp(IntPredicate::IntEQ, a_val, b_val)
+ } else {
+ let i8p_ty = self.type_i8p();
+ let a_ptr = self.bitcast(a, i8p_ty);
+ let b_ptr = self.bitcast(b, i8p_ty);
+ let n = self.const_usize(layout.size().bytes());
+ let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
+ match self.cx.sess().target.arch.as_ref() {
+ "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
+ _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
+ }
+ }
+ }
+
+ sym::black_box => {
+ args[0].val.store(self, result);
+
+ // We need to "use" the argument in some way LLVM can't introspect, and on
+ // targets that support it we can typically leverage inline assembly to do
+ // this. LLVM's interpretation of inline assembly is that it's, well, a black
+ // box. This isn't the greatest implementation since it probably deoptimizes
+ // more than we want, but it's so far good enough.
+ crate::asm::inline_asm_call(
+ self,
+ "",
+ "r,~{memory}",
+ &[result.llval],
+ self.type_void(),
+ true,
+ false,
+ llvm::AsmDialect::Att,
+ &[span],
+ false,
+ None,
+ )
+ .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`"));
+
+ // We have copied the value to `result` already.
+ return;
+ }
+
+ _ if name.as_str().starts_with("simd_") => {
+ match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+ Ok(llval) => llval,
+ Err(()) => return,
+ }
+ }
+
+ _ => bug!("unknown intrinsic '{}'", name),
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
+ let ptr = self.pointercast(result.llval, ptr_llty);
+ self.store(llval, ptr, result.align);
+ } else {
+ OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
+ .val
+ .store(self, result);
+ }
+ }
+ }
+
+ fn abort(&mut self) {
+ self.call_intrinsic("llvm.trap", &[]);
+ }
+
+ fn assume(&mut self, val: Self::Value) {
+ self.call_intrinsic("llvm.assume", &[val]);
+ }
+
+ fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
+ self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)])
+ }
+
+ fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
+ // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
+ // optimization pass replaces calls to this intrinsic with code to test type membership.
+ let i8p_ty = self.type_i8p();
+ let bitcast = self.bitcast(pointer, i8p_ty);
+ self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
+ }
+
+ fn type_checked_load(
+ &mut self,
+ llvtable: &'ll Value,
+ vtable_byte_offset: u64,
+ typeid: &'ll Value,
+ ) -> Self::Value {
+ let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32);
+ self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid])
+ }
+
+ fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
+ self.call_intrinsic("llvm.va_start", &[va_list])
+ }
+
+ fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
+ self.call_intrinsic("llvm.va_end", &[va_list])
+ }
+}
+
+fn try_intrinsic<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ if bx.sess().panic_strategy() == PanicStrategy::Abort {
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.call(try_func_ty, try_func, &[data], None);
+ // Return 0 unconditionally from the intrinsic call;
+ // we can never unwind.
+ let ret_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(bx.const_i32(0), dest, ret_align);
+ } else if wants_msvc_seh(bx.sess()) {
+ codegen_msvc_try(bx, try_func, data, catch_func, dest);
+ } else if bx.sess().target.os == "emscripten" {
+ codegen_emcc_try(bx, try_func, data, catch_func, dest);
+ } else {
+ codegen_gnu_try(bx, try_func, data, catch_func, dest);
+ }
+}
+
+// MSVC's definition of the `rust_try` function.
+//
+// This implementation uses the new exception handling instructions in LLVM
+// which have support in LLVM for SEH on MSVC targets. Although these
+// instructions are meant to work for all targets, as of the time of this
+// writing, however, LLVM does not recommend the usage of these new instructions
+// as the old ones are still more optimized.
+fn codegen_msvc_try<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+ bx.set_personality_fn(bx.eh_personality());
+
+ let normal = bx.append_sibling_block("normal");
+ let catchswitch = bx.append_sibling_block("catchswitch");
+ let catchpad_rust = bx.append_sibling_block("catchpad_rust");
+ let catchpad_foreign = bx.append_sibling_block("catchpad_foreign");
+ let caught = bx.append_sibling_block("caught");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+
+ // We're generating an IR snippet that looks like:
+ //
+ // declare i32 @rust_try(%try_func, %data, %catch_func) {
+ // %slot = alloca i8*
+ // invoke %try_func(%data) to label %normal unwind label %catchswitch
+ //
+ // normal:
+ // ret i32 0
+ //
+ // catchswitch:
+ // %cs = catchswitch within none [%catchpad_rust, %catchpad_foreign] unwind to caller
+ //
+ // catchpad_rust:
+ // %tok = catchpad within %cs [%type_descriptor, 8, %slot]
+ // %ptr = load %slot
+ // call %catch_func(%data, %ptr)
+ // catchret from %tok to label %caught
+ //
+ // catchpad_foreign:
+ // %tok = catchpad within %cs [null, 64, null]
+ // call %catch_func(%data, null)
+ // catchret from %tok to label %caught
+ //
+ // caught:
+ // ret i32 1
+ // }
+ //
+ // This structure follows the basic usage of throw/try/catch in LLVM.
+ // For example, compile this C++ snippet to see what LLVM generates:
+ //
+ // struct rust_panic {
+ // rust_panic(const rust_panic&);
+ // ~rust_panic();
+ //
+ // void* x[2];
+ // };
+ //
+ // int __rust_try(
+ // void (*try_func)(void*),
+ // void *data,
+ // void (*catch_func)(void*, void*) noexcept
+ // ) {
+ // try {
+ // try_func(data);
+ // return 0;
+ // } catch(rust_panic& a) {
+ // catch_func(data, &a);
+ // return 1;
+ // } catch(...) {
+ // catch_func(data, NULL);
+ // return 1;
+ // }
+ // }
+ //
+ // More information can be found in libstd's seh.rs implementation.
+ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+ let slot = bx.alloca(bx.type_i8p(), ptr_align);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], normal, catchswitch, None);
+
+ bx.switch_to_block(normal);
+ bx.ret(bx.const_i32(0));
+
+ bx.switch_to_block(catchswitch);
+ let cs = bx.catch_switch(None, None, &[catchpad_rust, catchpad_foreign]);
+
+ // We can't use the TypeDescriptor defined in libpanic_unwind because it
+ // might be in another DLL and the SEH encoding only supports specifying
+ // a TypeDescriptor from the current module.
+ //
+ // However this isn't an issue since the MSVC runtime uses string
+ // comparison on the type name to match TypeDescriptors rather than
+ // pointer equality.
+ //
+ // So instead we generate a new TypeDescriptor in each module that uses
+ // `try` and let the linker merge duplicate definitions in the same
+ // module.
+ //
+ // When modifying, make sure that the type_name string exactly matches
+ // the one used in src/libpanic_unwind/seh.rs.
+ let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
+ let type_name = bx.const_bytes(b"rust_panic\0");
+ let type_info =
+ bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
+ let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
+ unsafe {
+ llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
+ llvm::SetUniqueComdat(bx.llmod, tydesc);
+ llvm::LLVMSetInitializer(tydesc, type_info);
+ }
+
+ // The flag value of 8 indicates that we are catching the exception by
+ // reference instead of by value. We can't use catch by value because
+ // that requires copying the exception object, which we don't support
+ // since our exception object effectively contains a Box.
+ //
+ // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
+ bx.switch_to_block(catchpad_rust);
+ let flags = bx.const_i32(8);
+ let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
+ let ptr = bx.load(bx.type_i8p(), slot, ptr_align);
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ bx.call(catch_ty, catch_func, &[data, ptr], Some(&funclet));
+ bx.catch_ret(&funclet, caught);
+
+ // The flag value of 64 indicates a "catch-all".
+ bx.switch_to_block(catchpad_foreign);
+ let flags = bx.const_i32(64);
+ let null = bx.const_null(bx.type_i8p());
+ let funclet = bx.catch_pad(cs, &[null, flags, null]);
+ bx.call(catch_ty, catch_func, &[data, null], Some(&funclet));
+ bx.catch_ret(&funclet, caught);
+
+ bx.switch_to_block(caught);
+ bx.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Definition of the standard `try` function for Rust using the GNU-like model
+// of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
+// instructions).
+//
+// This codegen is a little surprising because we always call a shim
+// function instead of inlining the call to `invoke` manually here. This is done
+// because in LLVM we're only allowed to have one personality per function
+// definition. The call to the `try` intrinsic is being inlined into the
+// function calling it, and that function may already have other personality
+// functions in play. By calling a shim we're guaranteed that our shim will have
+// the right personality function.
+fn codegen_gnu_try<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+ // Codegens the shims described above:
+ //
+ // bx:
+ // invoke %try_func(%data) normal %normal unwind %catch
+ //
+ // normal:
+ // ret 0
+ //
+ // catch:
+ // (%ptr, _) = landingpad
+ // call %catch_func(%data, %ptr)
+ // ret 1
+ let then = bx.append_sibling_block("then");
+ let catch = bx.append_sibling_block("catch");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
+
+ bx.switch_to_block(then);
+ bx.ret(bx.const_i32(0));
+
+ // Type indicator for the exception being thrown.
+ //
+ // The first value in this tuple is a pointer to the exception object
+ // being thrown. The second value is a "selector" indicating which of
+ // the landing pad clauses the exception's type had been matched to.
+ // rust_try ignores the selector.
+ bx.switch_to_block(catch);
+ let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+ let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
+ let tydesc = bx.const_null(bx.type_i8p());
+ bx.add_clause(vals, tydesc);
+ let ptr = bx.extract_value(vals, 0);
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ bx.call(catch_ty, catch_func, &[data, ptr], None);
+ bx.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Variant of codegen_gnu_try used for emscripten where Rust panics are
+// implemented using C++ exceptions. Here we use exceptions of a specific type
+// (`struct rust_panic`) to represent Rust panics.
+fn codegen_emcc_try<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ try_func: &'ll Value,
+ data: &'ll Value,
+ catch_func: &'ll Value,
+ dest: &'ll Value,
+) {
+ let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| {
+ // Codegens the shims described above:
+ //
+ // bx:
+ // invoke %try_func(%data) normal %normal unwind %catch
+ //
+ // normal:
+ // ret 0
+ //
+ // catch:
+ // (%ptr, %selector) = landingpad
+ // %rust_typeid = @llvm.eh.typeid.for(@_ZTI10rust_panic)
+ // %is_rust_panic = %selector == %rust_typeid
+ // %catch_data = alloca { i8*, i8 }
+ // %catch_data[0] = %ptr
+ // %catch_data[1] = %is_rust_panic
+ // call %catch_func(%data, %catch_data)
+ // ret 1
+ let then = bx.append_sibling_block("then");
+ let catch = bx.append_sibling_block("catch");
+
+ let try_func = llvm::get_param(bx.llfn(), 0);
+ let data = llvm::get_param(bx.llfn(), 1);
+ let catch_func = llvm::get_param(bx.llfn(), 2);
+ let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
+ bx.invoke(try_func_ty, try_func, &[data], then, catch, None);
+
+ bx.switch_to_block(then);
+ bx.ret(bx.const_i32(0));
+
+ // Type indicator for the exception being thrown.
+ //
+ // The first value in this tuple is a pointer to the exception object
+ // being thrown. The second value is a "selector" indicating which of
+ // the landing pad clauses the exception's type had been matched to.
+ bx.switch_to_block(catch);
+ let tydesc = bx.eh_catch_typeinfo();
+ let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
+ let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
+ bx.add_clause(vals, tydesc);
+ bx.add_clause(vals, bx.const_null(bx.type_i8p()));
+ let ptr = bx.extract_value(vals, 0);
+ let selector = bx.extract_value(vals, 1);
+
+ // Check if the typeid we got is the one for a Rust panic.
+ let rust_typeid = bx.call_intrinsic("llvm.eh.typeid.for", &[tydesc]);
+ let is_rust_panic = bx.icmp(IntPredicate::IntEQ, selector, rust_typeid);
+ let is_rust_panic = bx.zext(is_rust_panic, bx.type_bool());
+
+ // We need to pass two values to catch_func (ptr and is_rust_panic), so
+ // create an alloca and pass a pointer to that.
+ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+ let i8_align = bx.tcx().data_layout.i8_align.abi;
+ let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false);
+ let catch_data = bx.alloca(catch_data_type, ptr_align);
+ let catch_data_0 =
+ bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
+ bx.store(ptr, catch_data_0, ptr_align);
+ let catch_data_1 =
+ bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
+ bx.store(is_rust_panic, catch_data_1, i8_align);
+ let catch_data = bx.bitcast(catch_data, bx.type_i8p());
+
+ let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void());
+ bx.call(catch_ty, catch_func, &[data, catch_data], None);
+ bx.ret(bx.const_i32(1));
+ });
+
+ // Note that no invoke is used here because by definition this function
+ // can't panic (that's what it's catching).
+ let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None);
+ let i32_align = bx.tcx().data_layout.i32_align.abi;
+ bx.store(ret, dest, i32_align);
+}
+
+// Helper function to give a Block to a closure to codegen a shim function.
+// This is currently primarily used for the `try` intrinsic functions above.
+fn gen_fn<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ name: &str,
+ rust_fn_sig: ty::PolyFnSig<'tcx>,
+ codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
+) -> (&'ll Type, &'ll Value) {
+ let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
+ let llty = fn_abi.llvm_type(cx);
+ let llfn = cx.declare_fn(name, fn_abi);
+ cx.set_frame_pointer_type(llfn);
+ cx.apply_target_cpu_attr(llfn);
+ // FIXME(eddyb) find a nicer way to do this.
+ unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
+ let llbb = Builder::append_block(cx, llfn, "entry-block");
+ let bx = Builder::build(cx, llbb);
+ codegen(bx);
+ (llty, llfn)
+}
+
+// Helper function used to get a handle to the `__rust_try` function used to
+// catch exceptions.
+//
+// This function is only generated once and is then cached.
+fn get_rust_try_fn<'ll, 'tcx>(
+ cx: &CodegenCx<'ll, 'tcx>,
+ codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
+) -> (&'ll Type, &'ll Value) {
+ if let Some(llfn) = cx.rust_try_fn.get() {
+ return llfn;
+ }
+
+ // Define the type up front for the signature of the rust_try function.
+ let tcx = cx.tcx;
+ let i8p = tcx.mk_mut_ptr(tcx.types.i8);
+ // `unsafe fn(*mut i8) -> ()`
+ let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
+ iter::once(i8p),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )));
+ // `unsafe fn(*mut i8, *mut i8) -> ()`
+ let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
+ [i8p, i8p].iter().cloned(),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ )));
+ // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
+ let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
+ [try_fn_ty, i8p, catch_fn_ty].into_iter(),
+ tcx.types.i32,
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::Rust,
+ ));
+ let rust_try = gen_fn(cx, "__rust_try", rust_fn_sig, codegen);
+ cx.rust_try_fn.set(Some(rust_try));
+ rust_try
+}
+
+fn generic_simd_intrinsic<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ name: Symbol,
+ callee_ty: Ty<'tcx>,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ ret_ty: Ty<'tcx>,
+ llret_ty: &'ll Type,
+ span: Span,
+) -> Result<&'ll Value, ()> {
+ // macros for error handling:
+ #[allow(unused_macro_rules)]
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ macro_rules! require {
+ ($cond: expr, $($fmt: tt)*) => {
+ if !$cond {
+ return_error!($($fmt)*);
+ }
+ };
+ }
+
+ macro_rules! require_simd {
+ ($ty: expr, $position: expr) => {
+ require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
+ };
+ }
+
+ let tcx = bx.tcx();
+ let sig =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx));
+ let arg_tys = sig.inputs();
+
+ if name == sym::simd_select_bitmask {
+ require_simd!(arg_tys[1], "argument");
+ let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+
+ let expected_int_bits = (len.max(8) - 1).next_power_of_two();
+ let expected_bytes = len / 8 + ((len % 8 > 0) as u64);
+
+ let mask_ty = arg_tys[0];
+ let mask = match mask_ty.kind() {
+ ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
+ ty::Array(elem, len)
+ if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+ && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+ == Some(expected_bytes) =>
+ {
+ let place = PlaceRef::alloca(bx, args[0].layout);
+ args[0].val.store(bx, place);
+ let int_ty = bx.type_ix(expected_bytes * 8);
+ let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty));
+ bx.load(int_ty, ptr, Align::ONE)
+ }
+ _ => return_error!(
+ "invalid bitmask `{}`, expected `u{}` or `[u8; {}]`",
+ mask_ty,
+ expected_int_bits,
+ expected_bytes
+ ),
+ };
+
+ let i1 = bx.type_i1();
+ let im = bx.type_ix(len);
+ let i1xn = bx.type_vector(i1, len);
+ let m_im = bx.trunc(mask, im);
+ let m_i1s = bx.bitcast(m_im, i1xn);
+ return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
+ }
+
+ // every intrinsic below takes a SIMD vector as its first argument
+ require_simd!(arg_tys[0], "input");
+ let in_ty = arg_tys[0];
+
+ let comparison = match name {
+ sym::simd_eq => Some(hir::BinOpKind::Eq),
+ sym::simd_ne => Some(hir::BinOpKind::Ne),
+ sym::simd_lt => Some(hir::BinOpKind::Lt),
+ sym::simd_le => Some(hir::BinOpKind::Le),
+ sym::simd_gt => Some(hir::BinOpKind::Gt),
+ sym::simd_ge => Some(hir::BinOpKind::Ge),
+ _ => None,
+ };
+
+ let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx());
+ if let Some(cmp_op) = comparison {
+ require_simd!(ret_ty, "return");
+
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ require!(
+ bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+ "expected return type with integer elements, found `{}` with non-integer `{}`",
+ ret_ty,
+ out_ty
+ );
+
+ return Ok(compare_simd_types(
+ bx,
+ args[0].immediate(),
+ args[1].immediate(),
+ in_elem,
+ llret_ty,
+ cmp_op,
+ ));
+ }
+
+ if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") {
+ // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
+ // If there is no suffix, use the index array length.
+ let n: u64 = if stripped.is_empty() {
+ // Make sure this is actually an array, since typeck only checks the length-suffixed
+ // version of this intrinsic.
+ match args[2].layout.ty.kind() {
+ ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => {
+ len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| {
+ span_bug!(span, "could not evaluate shuffle index array length")
+ })
+ }
+ _ => return_error!(
+ "simd_shuffle index must be an array of `u32`, got `{}`",
+ args[2].layout.ty
+ ),
+ }
+ } else {
+ stripped.parse().unwrap_or_else(|_| {
+ span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
+ })
+ };
+
+ require_simd!(ret_ty, "return");
+ let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ out_len == n,
+ "expected return type of length {}, found `{}` with length {}",
+ n,
+ ret_ty,
+ out_len
+ );
+ require!(
+ in_elem == out_ty,
+ "expected return element type `{}` (element of input `{}`), \
+ found `{}` with element type `{}`",
+ in_elem,
+ in_ty,
+ ret_ty,
+ out_ty
+ );
+
+ let total_len = u128::from(in_len) * 2;
+
+ let vector = args[2].immediate();
+
+ let indices: Option<Vec<_>> = (0..n)
+ .map(|i| {
+ let arg_idx = i;
+ let val = bx.const_get_elt(vector, i as u64);
+ match bx.const_to_opt_u128(val, true) {
+ None => {
+ emit_error!("shuffle index #{} is not a constant", arg_idx);
+ None
+ }
+ Some(idx) if idx >= total_len => {
+ emit_error!(
+ "shuffle index #{} is out of bounds (limit {})",
+ arg_idx,
+ total_len
+ );
+ None
+ }
+ Some(idx) => Some(bx.const_i32(idx as i32)),
+ }
+ })
+ .collect();
+ let Some(indices) = indices else {
+ return Ok(bx.const_null(llret_ty));
+ };
+
+ return Ok(bx.shuffle_vector(
+ args[0].immediate(),
+ args[1].immediate(),
+ bx.const_vector(&indices),
+ ));
+ }
+
+ if name == sym::simd_insert {
+ require!(
+ in_elem == arg_tys[2],
+ "expected inserted type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ arg_tys[2]
+ );
+ return Ok(bx.insert_element(
+ args[0].immediate(),
+ args[2].immediate(),
+ args[1].immediate(),
+ ));
+ }
+ if name == sym::simd_extract {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
+ }
+
+ if name == sym::simd_select {
+ let m_elem_ty = in_elem;
+ let m_len = in_len;
+ require_simd!(arg_tys[1], "argument");
+ let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ require!(
+ m_len == v_len,
+ "mismatched lengths: mask length `{}` != other vector length `{}`",
+ m_len,
+ v_len
+ );
+ match m_elem_ty.kind() {
+ ty::Int(_) => {}
+ _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
+ }
+ // truncate the mask to a vector of i1s
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, m_len as u64);
+ let m_i1s = bx.trunc(args[0].immediate(), i1xn);
+ return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
+ }
+
+ if name == sym::simd_bitmask {
+ // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
+ // vector mask and returns the most significant bit (MSB) of each lane in the form
+ // of either:
+ // * an unsigned integer
+ // * an array of `u8`
+ // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
+ //
+ // The bit order of the result depends on the byte endianness, LSB-first for little
+ // endian and MSB-first for big endian.
+ let expected_int_bits = in_len.max(8);
+ let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
+
+ // Integer vector <i{in_bitwidth} x in_len>:
+ let (i_xn, in_elem_bitwidth) = match in_elem.kind() {
+ ty::Int(i) => (
+ args[0].immediate(),
+ i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+ ),
+ ty::Uint(i) => (
+ args[0].immediate(),
+ i.bit_width().unwrap_or_else(|| bx.data_layout().pointer_size.bits()),
+ ),
+ _ => return_error!(
+ "vector argument `{}`'s element type `{}`, expected integer element type",
+ in_ty,
+ in_elem
+ ),
+ };
+
+ // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
+ let shift_indices =
+ vec![
+ bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
+ in_len as _
+ ];
+ let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
+ // Truncate vector to an <i1 x N>
+ let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
+ // Bitcast <i1 x N> to iN:
+ let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
+
+ match ret_ty.kind() {
+ ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {
+ // Zero-extend iN to the bitmask type:
+ return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
+ }
+ ty::Array(elem, len)
+ if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
+ && len.try_eval_usize(bx.tcx, ty::ParamEnv::reveal_all())
+ == Some(expected_bytes) =>
+ {
+ // Zero-extend iN to the array length:
+ let ze = bx.zext(i_, bx.type_ix(expected_bytes * 8));
+
+ // Convert the integer to a byte array
+ let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
+ bx.store(ze, ptr, Align::ONE);
+ let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
+ let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
+ return Ok(bx.load(array_ty, ptr, Align::ONE));
+ }
+ _ => return_error!(
+ "cannot return `{}`, expected `u{}` or `[u8; {}]`",
+ ret_ty,
+ expected_int_bits,
+ expected_bytes
+ ),
+ }
+ }
+
+ fn simd_simple_float_intrinsic<'ll, 'tcx>(
+ name: Symbol,
+ in_elem: Ty<'_>,
+ in_ty: Ty<'_>,
+ in_len: u64,
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ span: Span,
+ args: &[OperandRef<'tcx, &'ll Value>],
+ ) -> Result<&'ll Value, ()> {
+ #[allow(unused_macro_rules)]
+ macro_rules! emit_error {
+ ($msg: tt) => {
+ emit_error!($msg, )
+ };
+ ($msg: tt, $($fmt: tt)*) => {
+ span_invalid_monomorphization_error(
+ bx.sess(), span,
+ &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
+ name, $($fmt)*));
+ }
+ }
+ macro_rules! return_error {
+ ($($fmt: tt)*) => {
+ {
+ emit_error!($($fmt)*);
+ return Err(());
+ }
+ }
+ }
+
+ let (elem_ty_str, elem_ty) = if let ty::Float(f) = in_elem.kind() {
+ let elem_ty = bx.cx.type_float_from_ty(*f);
+ match f.bit_width() {
+ 32 => ("f32", elem_ty),
+ 64 => ("f64", elem_ty),
+ _ => {
+ return_error!(
+ "unsupported element type `{}` of floating-point vector `{}`",
+ f.name_str(),
+ in_ty
+ );
+ }
+ }
+ } else {
+ return_error!("`{}` is not a floating-point type", in_ty);
+ };
+
+ let vec_ty = bx.type_vector(elem_ty, in_len);
+
+ let (intr_name, fn_ty) = match name {
+ sym::simd_ceil => ("ceil", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fabs => ("fabs", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fcos => ("cos", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp2 => ("exp2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fexp => ("exp", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog10 => ("log10", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog2 => ("log2", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_flog => ("log", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)),
+ sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)),
+ sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)),
+ sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)),
+ sym::simd_trunc => ("trunc", bx.type_func(&[vec_ty], vec_ty)),
+ _ => return_error!("unrecognized intrinsic `{}`", name),
+ };
+ let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str);
+ let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty);
+ let c =
+ bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
+ Ok(c)
+ }
+
+ if std::matches!(
+ name,
+ sym::simd_ceil
+ | sym::simd_fabs
+ | sym::simd_fcos
+ | sym::simd_fexp2
+ | sym::simd_fexp
+ | sym::simd_flog10
+ | sym::simd_flog2
+ | sym::simd_flog
+ | sym::simd_floor
+ | sym::simd_fma
+ | sym::simd_fpow
+ | sym::simd_fpowi
+ | sym::simd_fsin
+ | sym::simd_fsqrt
+ | sym::simd_round
+ | sym::simd_trunc
+ ) {
+ return simd_simple_float_intrinsic(name, in_elem, in_ty, in_len, bx, span, args);
+ }
+
+ // FIXME: use:
+ // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
+ // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
+ fn llvm_vector_str(
+ elem_ty: Ty<'_>,
+ vec_len: u64,
+ no_pointers: usize,
+ bx: &Builder<'_, '_, '_>,
+ ) -> String {
+ let p0s: String = "p0".repeat(no_pointers);
+ match *elem_ty.kind() {
+ ty::Int(v) => format!(
+ "v{}{}i{}",
+ vec_len,
+ p0s,
+ // Normalize to prevent crash if v: IntTy::Isize
+ v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
+ ),
+ ty::Uint(v) => format!(
+ "v{}{}i{}",
+ vec_len,
+ p0s,
+ // Normalize to prevent crash if v: UIntTy::Usize
+ v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
+ ),
+ ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
+ _ => unreachable!(),
+ }
+ }
+
+ fn llvm_vector_ty<'ll>(
+ cx: &CodegenCx<'ll, '_>,
+ elem_ty: Ty<'_>,
+ vec_len: u64,
+ mut no_pointers: usize,
+ ) -> &'ll Type {
+ // FIXME: use cx.layout_of(ty).llvm_type() ?
+ let mut elem_ty = match *elem_ty.kind() {
+ ty::Int(v) => cx.type_int_from_ty(v),
+ ty::Uint(v) => cx.type_uint_from_ty(v),
+ ty::Float(v) => cx.type_float_from_ty(v),
+ _ => unreachable!(),
+ };
+ while no_pointers > 0 {
+ elem_ty = cx.type_ptr_to(elem_ty);
+ no_pointers -= 1;
+ }
+ cx.type_vector(elem_ty, vec_len)
+ }
+
+ if name == sym::simd_gather {
+ // simd_gather(values: <N x T>, pointers: <N x *_ T>,
+ // mask: <N x i{M}>) -> <N x T>
+ // * N: number of elements in the input vectors
+ // * T: type of the element to load
+ // * M: any integer width is supported, will be truncated to i1
+
+ // All types must be simd vector types
+ require_simd!(in_ty, "first");
+ require_simd!(arg_tys[1], "second");
+ require_simd!(arg_tys[2], "third");
+ require_simd!(ret_ty, "return");
+
+ // Of the same length:
+ let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "second",
+ in_len,
+ in_ty,
+ arg_tys[1],
+ out_len
+ );
+ require!(
+ in_len == out_len2,
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "third",
+ in_len,
+ in_ty,
+ arg_tys[2],
+ out_len2
+ );
+
+ // The return type must match the first argument type
+ require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
+
+ // This counts how many pointers
+ fn ptr_count(t: Ty<'_>) -> usize {
+ match t.kind() {
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
+ _ => 0,
+ }
+ }
+
+ // Non-ptr type
+ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
+ match t.kind() {
+ ty::RawPtr(p) => non_ptr(p.ty),
+ _ => t,
+ }
+ }
+
+ // The second argument must be a simd vector with an element type that's a pointer
+ // to the element type of the first argument
+ let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
+ let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (pointer_count, underlying_ty) = match element_ty1.kind() {
+ ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)),
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of second argument `{}` \
+ to be a pointer to the element type `{}` of the first \
+ argument `{}`, found `{}` != `*_ {}`",
+ element_ty1,
+ arg_tys[1],
+ in_elem,
+ in_ty,
+ element_ty1,
+ in_elem
+ );
+ unreachable!();
+ }
+ };
+ assert!(pointer_count > 0);
+ assert_eq!(pointer_count - 1, ptr_count(element_ty0));
+ assert_eq!(underlying_ty, non_ptr(element_ty0));
+
+ // The element type of the third argument must be a signed integer type of any width:
+ let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
+ match element_ty2.kind() {
+ ty::Int(_) => (),
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of third argument `{}` \
+ to be a signed integer type",
+ element_ty2,
+ arg_tys[2]
+ );
+ }
+ }
+
+ // Alignment of T, must be a constant integer value:
+ let alignment_ty = bx.type_i32();
+ let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
+
+ // Truncate the mask vector to a vector of i1s:
+ let (mask, mask_ty) = {
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len);
+ (bx.trunc(args[2].immediate(), i1xn), i1xn)
+ };
+
+ // Type of the vector of pointers:
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
+ let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+
+ // Type of the vector of elements:
+ let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
+ let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+
+ let llvm_intrinsic =
+ format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+ let fn_ty = bx.type_func(
+ &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
+ llvm_elem_vec_ty,
+ );
+ let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v =
+ bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
+ return Ok(v);
+ }
+
+ if name == sym::simd_scatter {
+ // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
+ // mask: <N x i{M}>) -> ()
+ // * N: number of elements in the input vectors
+ // * T: type of the element to load
+ // * M: any integer width is supported, will be truncated to i1
+
+ // All types must be simd vector types
+ require_simd!(in_ty, "first");
+ require_simd!(arg_tys[1], "second");
+ require_simd!(arg_tys[2], "third");
+
+ // Of the same length:
+ let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx());
+ require!(
+ in_len == element_len1,
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "second",
+ in_len,
+ in_ty,
+ arg_tys[1],
+ element_len1
+ );
+ require!(
+ in_len == element_len2,
+ "expected {} argument with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ "third",
+ in_len,
+ in_ty,
+ arg_tys[2],
+ element_len2
+ );
+
+ // This counts how many pointers
+ fn ptr_count(t: Ty<'_>) -> usize {
+ match t.kind() {
+ ty::RawPtr(p) => 1 + ptr_count(p.ty),
+ _ => 0,
+ }
+ }
+
+ // Non-ptr type
+ fn non_ptr(t: Ty<'_>) -> Ty<'_> {
+ match t.kind() {
+ ty::RawPtr(p) => non_ptr(p.ty),
+ _ => t,
+ }
+ }
+
+ // The second argument must be a simd vector with an element type that's a pointer
+ // to the element type of the first argument
+ let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
+ let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
+ let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
+ let (pointer_count, underlying_ty) = match element_ty1.kind() {
+ ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
+ (ptr_count(element_ty1), non_ptr(element_ty1))
+ }
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of second argument `{}` \
+ to be a pointer to the element type `{}` of the first \
+ argument `{}`, found `{}` != `*mut {}`",
+ element_ty1,
+ arg_tys[1],
+ in_elem,
+ in_ty,
+ element_ty1,
+ in_elem
+ );
+ unreachable!();
+ }
+ };
+ assert!(pointer_count > 0);
+ assert_eq!(pointer_count - 1, ptr_count(element_ty0));
+ assert_eq!(underlying_ty, non_ptr(element_ty0));
+
+ // The element type of the third argument must be a signed integer type of any width:
+ match element_ty2.kind() {
+ ty::Int(_) => (),
+ _ => {
+ require!(
+ false,
+ "expected element type `{}` of third argument `{}` \
+ be a signed integer type",
+ element_ty2,
+ arg_tys[2]
+ );
+ }
+ }
+
+ // Alignment of T, must be a constant integer value:
+ let alignment_ty = bx.type_i32();
+ let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
+
+ // Truncate the mask vector to a vector of i1s:
+ let (mask, mask_ty) = {
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len);
+ (bx.trunc(args[2].immediate(), i1xn), i1xn)
+ };
+
+ let ret_t = bx.type_void();
+
+ // Type of the vector of pointers:
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
+ let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx);
+
+ // Type of the vector of elements:
+ let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
+ let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx);
+
+ let llvm_intrinsic =
+ format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
+ let fn_ty =
+ bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t);
+ let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v =
+ bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
+ return Ok(v);
+ }
+
+ macro_rules! arith_red {
+ ($name:ident : $integer_reduce:ident, $float_reduce:ident, $ordered:expr, $op:ident,
+ $identity:expr) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.$integer_reduce(args[0].immediate());
+ if $ordered {
+ // if overflow occurs, the result is the
+ // mathematical result modulo 2^n:
+ Ok(bx.$op(args[1].immediate(), r))
+ } else {
+ Ok(bx.$integer_reduce(args[0].immediate()))
+ }
+ }
+ ty::Float(f) => {
+ let acc = if $ordered {
+ // ordered arithmetic reductions take an accumulator
+ args[1].immediate()
+ } else {
+ // unordered arithmetic reductions use the identity accumulator
+ match f.bit_width() {
+ 32 => bx.const_real(bx.type_f32(), $identity),
+ 64 => bx.const_real(bx.type_f64(), $identity),
+ v => return_error!(
+ r#"
+unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
+ sym::$name,
+ in_ty,
+ in_elem,
+ v,
+ ret_ty
+ ),
+ }
+ };
+ Ok(bx.$float_reduce(acc, args[0].immediate()))
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0);
+ arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0);
+ arith_red!(
+ simd_reduce_add_unordered: vector_reduce_add,
+ vector_reduce_fadd_fast,
+ false,
+ add,
+ 0.0
+ );
+ arith_red!(
+ simd_reduce_mul_unordered: vector_reduce_mul,
+ vector_reduce_fmul_fast,
+ false,
+ mul,
+ 1.0
+ );
+
+ macro_rules! minmax_red {
+ ($name:ident: $int_red:ident, $float_red:ident) => {
+ if name == sym::$name {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ return match in_elem.kind() {
+ ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
+ ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
+ ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ minmax_red!(simd_reduce_min: vector_reduce_min, vector_reduce_fmin);
+ minmax_red!(simd_reduce_max: vector_reduce_max, vector_reduce_fmax);
+
+ minmax_red!(simd_reduce_min_nanless: vector_reduce_min, vector_reduce_fmin_fast);
+ minmax_red!(simd_reduce_max_nanless: vector_reduce_max, vector_reduce_fmax_fast);
+
+ macro_rules! bitwise_red {
+ ($name:ident : $red:ident, $boolean:expr) => {
+ if name == sym::$name {
+ let input = if !$boolean {
+ require!(
+ ret_ty == in_elem,
+ "expected return type `{}` (element of input `{}`), found `{}`",
+ in_elem,
+ in_ty,
+ ret_ty
+ );
+ args[0].immediate()
+ } else {
+ match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {}
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ }
+
+ // boolean reductions operate on vectors of i1s:
+ let i1 = bx.type_i1();
+ let i1xn = bx.type_vector(i1, in_len as u64);
+ bx.trunc(args[0].immediate(), i1xn)
+ };
+ return match in_elem.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let r = bx.$red(input);
+ Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
+ }
+ _ => return_error!(
+ "unsupported {} from `{}` with element `{}` to `{}`",
+ sym::$name,
+ in_ty,
+ in_elem,
+ ret_ty
+ ),
+ };
+ }
+ };
+ }
+
+ bitwise_red!(simd_reduce_and: vector_reduce_and, false);
+ bitwise_red!(simd_reduce_or: vector_reduce_or, false);
+ bitwise_red!(simd_reduce_xor: vector_reduce_xor, false);
+ bitwise_red!(simd_reduce_all: vector_reduce_and, true);
+ bitwise_red!(simd_reduce_any: vector_reduce_or, true);
+
+ if name == sym::simd_cast || name == sym::simd_as {
+ require_simd!(ret_ty, "return");
+ let (out_len, out_elem) = ret_ty.simd_size_and_type(bx.tcx());
+ require!(
+ in_len == out_len,
+ "expected return type with length {} (same as input type `{}`), \
+ found `{}` with length {}",
+ in_len,
+ in_ty,
+ ret_ty,
+ out_len
+ );
+ // casting cares about nominal type, not just structural type
+ if in_elem == out_elem {
+ return Ok(args[0].immediate());
+ }
+
+ enum Style {
+ Float,
+ Int(/* is signed? */ bool),
+ Unsupported,
+ }
+
+ let (in_style, in_width) = match in_elem.kind() {
+ // vectors of pointer-sized integers should've been
+ // disallowed before here, so this unwrap is safe.
+ ty::Int(i) => (
+ Style::Int(true),
+ i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Uint(u) => (
+ Style::Int(false),
+ u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+ let (out_style, out_width) = match out_elem.kind() {
+ ty::Int(i) => (
+ Style::Int(true),
+ i.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Uint(u) => (
+ Style::Int(false),
+ u.normalize(bx.tcx().sess.target.pointer_width).bit_width().unwrap(),
+ ),
+ ty::Float(f) => (Style::Float, f.bit_width()),
+ _ => (Style::Unsupported, 0),
+ };
+
+ match (in_style, out_style) {
+ (Style::Int(in_is_signed), Style::Int(_)) => {
+ return Ok(match in_width.cmp(&out_width) {
+ Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
+ Ordering::Equal => args[0].immediate(),
+ Ordering::Less => {
+ if in_is_signed {
+ bx.sext(args[0].immediate(), llret_ty)
+ } else {
+ bx.zext(args[0].immediate(), llret_ty)
+ }
+ }
+ });
+ }
+ (Style::Int(in_is_signed), Style::Float) => {
+ return Ok(if in_is_signed {
+ bx.sitofp(args[0].immediate(), llret_ty)
+ } else {
+ bx.uitofp(args[0].immediate(), llret_ty)
+ });
+ }
+ (Style::Float, Style::Int(out_is_signed)) => {
+ return Ok(match (out_is_signed, name == sym::simd_as) {
+ (false, false) => bx.fptoui(args[0].immediate(), llret_ty),
+ (true, false) => bx.fptosi(args[0].immediate(), llret_ty),
+ (_, true) => bx.cast_float_to_int(out_is_signed, args[0].immediate(), llret_ty),
+ });
+ }
+ (Style::Float, Style::Float) => {
+ return Ok(match in_width.cmp(&out_width) {
+ Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
+ Ordering::Equal => args[0].immediate(),
+ Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
+ });
+ }
+ _ => { /* Unsupported. Fallthrough. */ }
+ }
+ require!(
+ false,
+ "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
+ in_ty,
+ in_elem,
+ ret_ty,
+ out_elem
+ );
+ }
+ macro_rules! arith_binary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+ arith_binary! {
+ simd_add: Uint, Int => add, Float => fadd;
+ simd_sub: Uint, Int => sub, Float => fsub;
+ simd_mul: Uint, Int => mul, Float => fmul;
+ simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
+ simd_rem: Uint => urem, Int => srem, Float => frem;
+ simd_shl: Uint, Int => shl;
+ simd_shr: Uint => lshr, Int => ashr;
+ simd_and: Uint, Int => and;
+ simd_or: Uint, Int => or;
+ simd_xor: Uint, Int => xor;
+ simd_fmax: Float => maxnum;
+ simd_fmin: Float => minnum;
+
+ }
+ macro_rules! arith_unary {
+ ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
+ $(if name == sym::$name {
+ match in_elem.kind() {
+ $($(ty::$p(_))|* => {
+ return Ok(bx.$call(args[0].immediate()))
+ })*
+ _ => {},
+ }
+ require!(false,
+ "unsupported operation on `{}` with element `{}`",
+ in_ty,
+ in_elem)
+ })*
+ }
+ }
+ arith_unary! {
+ simd_neg: Int => neg, Float => fneg;
+ }
+
+ if name == sym::simd_arith_offset {
+ // This also checks that the first operand is a ptr type.
+ let pointee = in_elem.builtin_deref(true).unwrap_or_else(|| {
+ span_bug!(span, "must be called with a vector of pointer types as first argument")
+ });
+ let layout = bx.layout_of(pointee.ty);
+ let ptrs = args[0].immediate();
+ // The second argument must be a ptr-sized integer.
+ // (We don't care about the signedness, this is wrapping anyway.)
+ let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
+ if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
+ span_bug!(
+ span,
+ "must be called with a vector of pointer-sized integers as second argument"
+ );
+ }
+ let offsets = args[1].immediate();
+
+ return Ok(bx.gep(bx.backend_type(layout), ptrs, &[offsets]));
+ }
+
+ if name == sym::simd_saturating_add || name == sym::simd_saturating_sub {
+ let lhs = args[0].immediate();
+ let rhs = args[1].immediate();
+ let is_add = name == sym::simd_saturating_add;
+ let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
+ let (signed, elem_width, elem_ty) = match *in_elem.kind() {
+ ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
+ ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
+ _ => {
+ return_error!(
+ "expected element type `{}` of vector type `{}` \
+ to be a signed or unsigned integer type",
+ arg_tys[0].simd_size_and_type(bx.tcx()).1,
+ arg_tys[0]
+ );
+ }
+ };
+ let llvm_intrinsic = &format!(
+ "llvm.{}{}.sat.v{}i{}",
+ if signed { 's' } else { 'u' },
+ if is_add { "add" } else { "sub" },
+ in_len,
+ elem_width
+ );
+ let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
+
+ let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty);
+ let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty);
+ let v = bx.call(fn_ty, f, &[lhs, rhs], None);
+ return Ok(v);
+ }
+
+ span_bug!(span, "unknown SIMD intrinsic");
+}
+
+// Returns the width of an int Ty, and if it's signed or not
+// Returns None if the type is not an integer
+// FIXME: there’s multiple of this functions, investigate using some of the already existing
+// stuffs.
+fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), true))
+ }
+ ty::Uint(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(cx.tcx.sess.target.pointer_width)), false))
+ }
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
new file mode 100644
index 000000000..636d689a3
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -0,0 +1,442 @@
+//! The Rust compiler.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(hash_raw_entry)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(extern_types)]
+#![feature(once_cell)]
+#![feature(iter_intersperse)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate rustc_macros;
+
+use back::write::{create_informational_target_machine, create_target_machine};
+
+pub use llvm_util::target_features;
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::back::write::{
+ CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::traits::*;
+use rustc_codegen_ssa::ModuleCodegen;
+use rustc_codegen_ssa::{CodegenResults, CompiledModule};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{ErrorGuaranteed, FatalError, Handler};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{OptLevel, OutputFilenames, PrintRequest};
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+
+use std::any::Any;
+use std::ffi::CStr;
+
+mod back {
+ pub mod archive;
+ pub mod lto;
+ mod profiling;
+ pub mod write;
+}
+
+mod abi;
+mod allocator;
+mod asm;
+mod attributes;
+mod base;
+mod builder;
+mod callee;
+mod common;
+mod consts;
+mod context;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+
+// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
+#[path = "llvm/mod.rs"]
+mod llvm_;
+pub mod llvm {
+ pub use super::llvm_::*;
+}
+
+mod llvm_util;
+mod mono_item;
+mod type_;
+mod type_of;
+mod va_arg;
+mod value;
+
+#[derive(Clone)]
+pub struct LlvmCodegenBackend(());
+
+struct TimeTraceProfiler {
+ enabled: bool,
+}
+
+impl TimeTraceProfiler {
+ fn new(enabled: bool) -> Self {
+ if enabled {
+ unsafe { llvm::LLVMTimeTraceProfilerInitialize() }
+ }
+ TimeTraceProfiler { enabled }
+ }
+}
+
+impl Drop for TimeTraceProfiler {
+ fn drop(&mut self) {
+ if self.enabled {
+ unsafe { llvm::LLVMTimeTraceProfilerFinishThread() }
+ }
+ }
+}
+
+impl ExtraBackendMethods for LlvmCodegenBackend {
+ fn codegen_allocator<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ module_name: &str,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+ ) -> ModuleLlvm {
+ let mut module_llvm = ModuleLlvm::new_metadata(tcx, module_name);
+ unsafe {
+ allocator::codegen(tcx, &mut module_llvm, module_name, kind, has_alloc_error_handler);
+ }
+ module_llvm
+ }
+ fn compile_codegen_unit(
+ &self,
+ tcx: TyCtxt<'_>,
+ cgu_name: Symbol,
+ ) -> (ModuleCodegen<ModuleLlvm>, u64) {
+ base::compile_codegen_unit(tcx, cgu_name)
+ }
+ fn target_machine_factory(
+ &self,
+ sess: &Session,
+ optlvl: OptLevel,
+ target_features: &[String],
+ ) -> TargetMachineFactoryFn<Self> {
+ back::write::target_machine_factory(sess, optlvl, target_features)
+ }
+ fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
+ llvm_util::target_cpu(sess)
+ }
+ fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str> {
+ llvm_util::tune_cpu(sess)
+ }
+
+ fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::spawn(move || {
+ let _profiler = TimeTraceProfiler::new(time_trace);
+ f()
+ })
+ }
+
+ fn spawn_named_thread<F, T>(
+ time_trace: bool,
+ name: String,
+ f: F,
+ ) -> std::io::Result<std::thread::JoinHandle<T>>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::Builder::new().name(name).spawn(move || {
+ let _profiler = TimeTraceProfiler::new(time_trace);
+ f()
+ })
+ }
+}
+
+impl WriteBackendMethods for LlvmCodegenBackend {
+ type Module = ModuleLlvm;
+ type ModuleBuffer = back::lto::ModuleBuffer;
+ type Context = llvm::Context;
+ type TargetMachine = &'static mut llvm::TargetMachine;
+ type ThinData = back::lto::ThinData;
+ type ThinBuffer = back::lto::ThinBuffer;
+ fn print_pass_timings(&self) {
+ unsafe {
+ llvm::LLVMRustPrintPassTimings();
+ }
+ }
+ fn run_link(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ modules: Vec<ModuleCodegen<Self::Module>>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::write::link(cgcx, diag_handler, modules)
+ }
+ fn run_fat_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<FatLTOInput<Self>>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<LtoModuleCodegen<Self>, FatalError> {
+ back::lto::run_fat(cgcx, modules, cached_modules)
+ }
+ fn run_thin_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<(String, Self::ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+ back::lto::run_thin(cgcx, modules, cached_modules)
+ }
+ unsafe fn optimize(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<(), FatalError> {
+ back::write::optimize(cgcx, diag_handler, module, config)
+ }
+ fn optimize_fat(
+ cgcx: &CodegenContext<Self>,
+ module: &mut ModuleCodegen<Self::Module>,
+ ) -> Result<(), FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+ back::lto::run_pass_manager(cgcx, &diag_handler, module, false)
+ }
+ unsafe fn optimize_thin(
+ cgcx: &CodegenContext<Self>,
+ thin: ThinModule<Self>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+ back::lto::optimize_thin_module(thin, cgcx)
+ }
+ unsafe fn codegen(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<CompiledModule, FatalError> {
+ back::write::codegen(cgcx, diag_handler, module, config)
+ }
+ fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
+ back::lto::prepare_thin(module)
+ }
+ fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
+ (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod()))
+ }
+}
+
+unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
+unsafe impl Sync for LlvmCodegenBackend {}
+
+impl LlvmCodegenBackend {
+ pub fn new() -> Box<dyn CodegenBackend> {
+ Box::new(LlvmCodegenBackend(()))
+ }
+}
+
+impl CodegenBackend for LlvmCodegenBackend {
+ fn init(&self, sess: &Session) {
+ llvm_util::init(sess); // Make sure llvm is inited
+ }
+
+ fn provide(&self, providers: &mut Providers) {
+ providers.global_backend_features =
+ |tcx, ()| llvm_util::global_llvm_features(tcx.sess, true)
+ }
+
+ fn print(&self, req: PrintRequest, sess: &Session) {
+ match req {
+ PrintRequest::RelocationModels => {
+ println!("Available relocation models:");
+ for name in &[
+ "static",
+ "pic",
+ "pie",
+ "dynamic-no-pic",
+ "ropi",
+ "rwpi",
+ "ropi-rwpi",
+ "default",
+ ] {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ PrintRequest::CodeModels => {
+ println!("Available code models:");
+ for name in &["tiny", "small", "kernel", "medium", "large"] {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ PrintRequest::TlsModels => {
+ println!("Available TLS models:");
+ for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] {
+ println!(" {}", name);
+ }
+ println!();
+ }
+ PrintRequest::StackProtectorStrategies => {
+ println!(
+ r#"Available stack protector strategies:
+ all
+ Generate stack canaries in all functions.
+
+ strong
+ Generate stack canaries in a function if it either:
+ - has a local variable of `[T; N]` type, regardless of `T` and `N`
+ - takes the address of a local variable.
+
+ (Note that a local variable being borrowed is not equivalent to its
+ address being taken: e.g. some borrows may be removed by optimization,
+ while by-value argument passing may be implemented with reference to a
+ local stack variable in the ABI.)
+
+ basic
+ Generate stack canaries in functions with local variables of `[T; N]`
+ type, where `T` is byte-sized and `N` >= 8.
+
+ none
+ Do not generate stack canaries.
+"#
+ );
+ }
+ req => llvm_util::print(req, sess),
+ }
+ }
+
+ fn print_passes(&self) {
+ llvm_util::print_passes();
+ }
+
+ fn print_version(&self) {
+ llvm_util::print_version();
+ }
+
+ fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+ target_features(sess, allow_unstable)
+ }
+
+ fn codegen_crate<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any> {
+ Box::new(rustc_codegen_ssa::base::codegen_crate(
+ LlvmCodegenBackend(()),
+ tcx,
+ crate::llvm_util::target_cpu(tcx.sess).to_string(),
+ metadata,
+ need_metadata_module,
+ ))
+ }
+
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ sess: &Session,
+ outputs: &OutputFilenames,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+ let (codegen_results, work_products) = ongoing_codegen
+ .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
+ .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
+ .join(sess);
+
+ sess.time("llvm_dump_timing_file", || {
+ if sess.opts.unstable_opts.llvm_time_trace {
+ let file_name = outputs.with_extension("llvm_timings.json");
+ llvm_util::time_trace_profiler_finish(&file_name);
+ }
+ });
+
+ Ok((codegen_results, work_products))
+ }
+
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorGuaranteed> {
+ use crate::back::archive::LlvmArchiveBuilderBuilder;
+ use rustc_codegen_ssa::back::link::link_binary;
+
+ // Run the linker on any artifacts that resulted from the LLVM run.
+ // This should produce either a finished executable or library.
+ link_binary(sess, &LlvmArchiveBuilderBuilder, &codegen_results, outputs)
+ }
+}
+
+pub struct ModuleLlvm {
+ llcx: &'static mut llvm::Context,
+ llmod_raw: *const llvm::Module,
+ tm: &'static mut llvm::TargetMachine,
+}
+
+unsafe impl Send for ModuleLlvm {}
+unsafe impl Sync for ModuleLlvm {}
+
+impl ModuleLlvm {
+ fn new(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
+ let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
+ ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx, mod_name) }
+ }
+ }
+
+ fn new_metadata(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
+ let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
+ ModuleLlvm { llmod_raw, llcx, tm: create_informational_target_machine(tcx.sess) }
+ }
+ }
+
+ fn parse(
+ cgcx: &CodegenContext<LlvmCodegenBackend>,
+ name: &CStr,
+ buffer: &[u8],
+ handler: &Handler,
+ ) -> Result<Self, FatalError> {
+ unsafe {
+ let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
+ let llmod_raw = back::lto::parse_module(llcx, name, buffer, handler)?;
+ let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap());
+ let tm = match (cgcx.tm_factory)(tm_factory_config) {
+ Ok(m) => m,
+ Err(e) => {
+ handler.struct_err(&e).emit();
+ return Err(FatalError);
+ }
+ };
+
+ Ok(ModuleLlvm { llmod_raw, llcx, tm })
+ }
+ }
+
+ fn llmod(&self) -> &llvm::Module {
+ unsafe { &*self.llmod_raw }
+ }
+}
+
+impl Drop for ModuleLlvm {
+ fn drop(&mut self) {
+ unsafe {
+ llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
+ llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
new file mode 100644
index 000000000..64db4f746
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs
@@ -0,0 +1,105 @@
+//! A wrapper around LLVM's archive (.a) code
+
+use rustc_fs_util::path_to_c_string;
+use std::path::Path;
+use std::slice;
+use std::str;
+
+pub struct ArchiveRO {
+ pub raw: &'static mut super::Archive,
+}
+
+unsafe impl Send for ArchiveRO {}
+
+pub struct Iter<'a> {
+ raw: &'a mut super::ArchiveIterator<'a>,
+}
+
+pub struct Child<'a> {
+ pub raw: &'a mut super::ArchiveChild<'a>,
+}
+
+impl ArchiveRO {
+ /// Opens a static archive for read-only purposes. This is more optimized
+ /// than the `open` method because it uses LLVM's internal `Archive` class
+ /// rather than shelling out to `ar` for everything.
+ ///
+ /// If this archive is used with a mutable method, then an error will be
+ /// raised.
+ pub fn open(dst: &Path) -> Result<ArchiveRO, String> {
+ unsafe {
+ let s = path_to_c_string(dst);
+ let ar = super::LLVMRustOpenArchive(s.as_ptr()).ok_or_else(|| {
+ super::last_error().unwrap_or_else(|| "failed to open archive".to_owned())
+ })?;
+ Ok(ArchiveRO { raw: ar })
+ }
+ }
+
+ pub fn iter(&self) -> Iter<'_> {
+ unsafe { Iter { raw: super::LLVMRustArchiveIteratorNew(self.raw) } }
+ }
+}
+
+impl Drop for ArchiveRO {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustDestroyArchive(&mut *(self.raw as *mut _));
+ }
+ }
+}
+
+impl<'a> Iterator for Iter<'a> {
+ type Item = Result<Child<'a>, String>;
+
+ fn next(&mut self) -> Option<Result<Child<'a>, String>> {
+ unsafe {
+ match super::LLVMRustArchiveIteratorNext(self.raw) {
+ Some(raw) => Some(Ok(Child { raw })),
+ None => super::last_error().map(Err),
+ }
+ }
+ }
+}
+
+impl<'a> Drop for Iter<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustArchiveIteratorFree(&mut *(self.raw as *mut _));
+ }
+ }
+}
+
+impl<'a> Child<'a> {
+ pub fn name(&self) -> Option<&'a str> {
+ unsafe {
+ let mut name_len = 0;
+ let name_ptr = super::LLVMRustArchiveChildName(self.raw, &mut name_len);
+ if name_ptr.is_null() {
+ None
+ } else {
+ let name = slice::from_raw_parts(name_ptr as *const u8, name_len as usize);
+ str::from_utf8(name).ok().map(|s| s.trim())
+ }
+ }
+ }
+
+ pub fn data(&self) -> &'a [u8] {
+ unsafe {
+ let mut data_len = 0;
+ let data_ptr = super::LLVMRustArchiveChildData(self.raw, &mut data_len);
+ if data_ptr.is_null() {
+ panic!("failed to read data from archive child");
+ }
+ slice::from_raw_parts(data_ptr as *const u8, data_len as usize)
+ }
+ }
+}
+
+impl<'a> Drop for Child<'a> {
+ fn drop(&mut self) {
+ unsafe {
+ super::LLVMRustArchiveChildFree(&mut *(self.raw as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
new file mode 100644
index 000000000..45de284d2
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs
@@ -0,0 +1,213 @@
+//! LLVM diagnostic reports.
+
+pub use self::Diagnostic::*;
+pub use self::OptimizationDiagnosticKind::*;
+
+use crate::value::Value;
+use libc::c_uint;
+
+use super::{DiagnosticInfo, SMDiagnostic};
+use rustc_span::InnerSpan;
+
+#[derive(Copy, Clone)]
+pub enum OptimizationDiagnosticKind {
+ OptimizationRemark,
+ OptimizationMissed,
+ OptimizationAnalysis,
+ OptimizationAnalysisFPCommute,
+ OptimizationAnalysisAliasing,
+ OptimizationFailure,
+ OptimizationRemarkOther,
+}
+
+pub struct OptimizationDiagnostic<'ll> {
+ pub kind: OptimizationDiagnosticKind,
+ pub pass_name: String,
+ pub function: &'ll Value,
+ pub line: c_uint,
+ pub column: c_uint,
+ pub filename: String,
+ pub message: String,
+}
+
+impl<'ll> OptimizationDiagnostic<'ll> {
+ unsafe fn unpack(kind: OptimizationDiagnosticKind, di: &'ll DiagnosticInfo) -> Self {
+ let mut function = None;
+ let mut line = 0;
+ let mut column = 0;
+
+ let mut message = None;
+ let mut filename = None;
+ let pass_name = super::build_string(|pass_name| {
+ message = super::build_string(|message| {
+ filename = super::build_string(|filename| {
+ super::LLVMRustUnpackOptimizationDiagnostic(
+ di,
+ pass_name,
+ &mut function,
+ &mut line,
+ &mut column,
+ filename,
+ message,
+ )
+ })
+ .ok()
+ })
+ .ok()
+ })
+ .ok();
+
+ let mut filename = filename.unwrap_or_default();
+ if filename.is_empty() {
+ filename.push_str("<unknown file>");
+ }
+
+ OptimizationDiagnostic {
+ kind,
+ pass_name: pass_name.expect("got a non-UTF8 pass name from LLVM"),
+ function: function.unwrap(),
+ line,
+ column,
+ filename,
+ message: message.expect("got a non-UTF8 OptimizationDiagnostic message from LLVM"),
+ }
+ }
+}
+
+pub struct SrcMgrDiagnostic {
+ pub level: super::DiagnosticLevel,
+ pub message: String,
+ pub source: Option<(String, Vec<InnerSpan>)>,
+}
+
+impl SrcMgrDiagnostic {
+ pub unsafe fn unpack(diag: &SMDiagnostic) -> SrcMgrDiagnostic {
+ // Recover the post-substitution assembly code from LLVM for better
+ // diagnostics.
+ let mut have_source = false;
+ let mut buffer = String::new();
+ let mut level = super::DiagnosticLevel::Error;
+ let mut loc = 0;
+ let mut ranges = [0; 8];
+ let mut num_ranges = ranges.len() / 2;
+ let message = super::build_string(|message| {
+ buffer = super::build_string(|buffer| {
+ have_source = super::LLVMRustUnpackSMDiagnostic(
+ diag,
+ message,
+ buffer,
+ &mut level,
+ &mut loc,
+ ranges.as_mut_ptr(),
+ &mut num_ranges,
+ );
+ })
+ .expect("non-UTF8 inline asm");
+ })
+ .expect("non-UTF8 SMDiagnostic");
+
+ SrcMgrDiagnostic {
+ message,
+ level,
+ source: have_source.then(|| {
+ let mut spans = vec![InnerSpan::new(loc as usize, loc as usize)];
+ for i in 0..num_ranges {
+ spans.push(InnerSpan::new(ranges[i * 2] as usize, ranges[i * 2 + 1] as usize));
+ }
+ (buffer, spans)
+ }),
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct InlineAsmDiagnostic {
+ pub level: super::DiagnosticLevel,
+ pub cookie: c_uint,
+ pub message: String,
+ pub source: Option<(String, Vec<InnerSpan>)>,
+}
+
+impl InlineAsmDiagnostic {
+ unsafe fn unpackInlineAsm(di: &DiagnosticInfo) -> Self {
+ let mut cookie = 0;
+ let mut message = None;
+ let mut level = super::DiagnosticLevel::Error;
+
+ super::LLVMRustUnpackInlineAsmDiagnostic(di, &mut level, &mut cookie, &mut message);
+
+ InlineAsmDiagnostic {
+ level,
+ cookie,
+ message: super::twine_to_string(message.unwrap()),
+ source: None,
+ }
+ }
+
+ unsafe fn unpackSrcMgr(di: &DiagnosticInfo) -> Self {
+ let mut cookie = 0;
+ let smdiag = SrcMgrDiagnostic::unpack(super::LLVMRustGetSMDiagnostic(di, &mut cookie));
+ InlineAsmDiagnostic {
+ level: smdiag.level,
+ cookie,
+ message: smdiag.message,
+ source: smdiag.source,
+ }
+ }
+}
+
+pub enum Diagnostic<'ll> {
+ Optimization(OptimizationDiagnostic<'ll>),
+ InlineAsm(InlineAsmDiagnostic),
+ PGO(&'ll DiagnosticInfo),
+ Linker(&'ll DiagnosticInfo),
+ Unsupported(&'ll DiagnosticInfo),
+
+ /// LLVM has other types that we do not wrap here.
+ UnknownDiagnostic(&'ll DiagnosticInfo),
+}
+
+impl<'ll> Diagnostic<'ll> {
+ pub unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self {
+ use super::DiagnosticKind as Dk;
+ let kind = super::LLVMRustGetDiagInfoKind(di);
+
+ match kind {
+ Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpackInlineAsm(di)),
+
+ Dk::OptimizationRemark => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di))
+ }
+ Dk::OptimizationRemarkOther => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationRemarkOther, di))
+ }
+ Dk::OptimizationRemarkMissed => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di))
+ }
+
+ Dk::OptimizationRemarkAnalysis => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di))
+ }
+
+ Dk::OptimizationRemarkAnalysisFPCommute => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisFPCommute, di))
+ }
+
+ Dk::OptimizationRemarkAnalysisAliasing => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisAliasing, di))
+ }
+
+ Dk::OptimizationFailure => {
+ Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di))
+ }
+
+ Dk::PGOProfile => PGO(di),
+ Dk::Linker => Linker(di),
+ Dk::Unsupported => Unsupported(di),
+
+ Dk::SrcMgr => InlineAsm(InlineAsmDiagnostic::unpackSrcMgr(di)),
+
+ _ => UnknownDiagnostic(di),
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
new file mode 100644
index 000000000..3139f93bf
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -0,0 +1,2547 @@
+#![allow(non_camel_case_types)]
+#![allow(non_upper_case_globals)]
+
+use rustc_codegen_ssa::coverageinfo::map as coverage_map;
+
+use super::debuginfo::{
+ DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator,
+ DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace,
+ DISPFlags, DIScope, DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable,
+ DebugEmissionKind,
+};
+
+use libc::{c_char, c_int, c_uint, size_t};
+use libc::{c_ulonglong, c_void};
+
+use std::marker::PhantomData;
+
+use super::RustString;
+
+pub type Bool = c_uint;
+
+pub const True: Bool = 1 as Bool;
+pub const False: Bool = 0 as Bool;
+
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum LLVMRustResult {
+ Success,
+ Failure,
+}
+
+// Rust version of the C struct with the same name in rustc_llvm/llvm-wrapper/RustWrapper.cpp.
+#[repr(C)]
+pub struct LLVMRustCOFFShortExport {
+ pub name: *const c_char,
+ pub ordinal_present: bool,
+ // value of `ordinal` only important when `ordinal_present` is true
+ pub ordinal: u16,
+}
+
+impl LLVMRustCOFFShortExport {
+ pub fn new(name: *const c_char, ordinal: Option<u16>) -> LLVMRustCOFFShortExport {
+ LLVMRustCOFFShortExport {
+ name,
+ ordinal_present: ordinal.is_some(),
+ ordinal: ordinal.unwrap_or(0),
+ }
+ }
+}
+
+/// Translation of LLVM's MachineTypes enum, defined in llvm\include\llvm\BinaryFormat\COFF.h.
+///
+/// We include only architectures supported on Windows.
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum LLVMMachineType {
+ AMD64 = 0x8664,
+ I386 = 0x14c,
+ ARM64 = 0xaa64,
+ ARM = 0x01c0,
+}
+
+/// LLVM's Module::ModFlagBehavior, defined in llvm/include/llvm/IR/Module.h.
+///
+/// When merging modules (e.g. during LTO), their metadata flags are combined. Conflicts are
+/// resolved according to the merge behaviors specified here. Flags differing only in merge
+/// behavior are still considered to be in conflict.
+///
+/// In order for Rust-C LTO to work, we must specify behaviors compatible with Clang. Notably,
+/// 'Error' and 'Warning' cannot be mixed for a given flag.
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum LLVMModFlagBehavior {
+ Error = 1,
+ Warning = 2,
+ Require = 3,
+ Override = 4,
+ Append = 5,
+ AppendUnique = 6,
+ Max = 7,
+}
+
+// Consts for the LLVM CallConv type, pre-cast to usize.
+
+/// LLVM CallingConv::ID. Should we wrap this?
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+pub enum CallConv {
+ CCallConv = 0,
+ FastCallConv = 8,
+ ColdCallConv = 9,
+ X86StdcallCallConv = 64,
+ X86FastcallCallConv = 65,
+ ArmAapcsCallConv = 67,
+ Msp430Intr = 69,
+ X86_ThisCall = 70,
+ PtxKernel = 71,
+ X86_64_SysV = 78,
+ X86_64_Win64 = 79,
+ X86_VectorCall = 80,
+ X86_Intr = 83,
+ AvrNonBlockingInterrupt = 84,
+ AvrInterrupt = 85,
+ AmdGpuKernel = 91,
+}
+
+/// LLVMRustLinkage
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum Linkage {
+ ExternalLinkage = 0,
+ AvailableExternallyLinkage = 1,
+ LinkOnceAnyLinkage = 2,
+ LinkOnceODRLinkage = 3,
+ WeakAnyLinkage = 4,
+ WeakODRLinkage = 5,
+ AppendingLinkage = 6,
+ InternalLinkage = 7,
+ PrivateLinkage = 8,
+ ExternalWeakLinkage = 9,
+ CommonLinkage = 10,
+}
+
+// LLVMRustVisibility
+#[repr(C)]
+#[derive(Copy, Clone, PartialEq)]
+pub enum Visibility {
+ Default = 0,
+ Hidden = 1,
+ Protected = 2,
+}
+
+/// LLVMUnnamedAddr
+#[repr(C)]
+pub enum UnnamedAddr {
+ No,
+ Local,
+ Global,
+}
+
+/// LLVMDLLStorageClass
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum DLLStorageClass {
+ #[allow(dead_code)]
+ Default = 0,
+ DllImport = 1, // Function to be imported from DLL.
+ #[allow(dead_code)]
+ DllExport = 2, // Function to be accessible from DLL.
+}
+
+/// Matches LLVMRustAttribute in LLVMWrapper.h
+/// Semantically a subset of the C++ enum llvm::Attribute::AttrKind,
+/// though it is not ABI compatible (since it's a C++ enum)
+#[repr(C)]
+#[derive(Copy, Clone, Debug)]
+pub enum AttributeKind {
+ AlwaysInline = 0,
+ ByVal = 1,
+ Cold = 2,
+ InlineHint = 3,
+ MinSize = 4,
+ Naked = 5,
+ NoAlias = 6,
+ NoCapture = 7,
+ NoInline = 8,
+ NonNull = 9,
+ NoRedZone = 10,
+ NoReturn = 11,
+ NoUnwind = 12,
+ OptimizeForSize = 13,
+ ReadOnly = 14,
+ SExt = 15,
+ StructRet = 16,
+ UWTable = 17,
+ ZExt = 18,
+ InReg = 19,
+ SanitizeThread = 20,
+ SanitizeAddress = 21,
+ SanitizeMemory = 22,
+ NonLazyBind = 23,
+ OptimizeNone = 24,
+ ReturnsTwice = 25,
+ ReadNone = 26,
+ InaccessibleMemOnly = 27,
+ SanitizeHWAddress = 28,
+ WillReturn = 29,
+ StackProtectReq = 30,
+ StackProtectStrong = 31,
+ StackProtect = 32,
+ NoUndef = 33,
+ SanitizeMemTag = 34,
+ NoCfCheck = 35,
+ ShadowCallStack = 36,
+ AllocSize = 37,
+ AllocatedPointer = 38,
+ AllocAlign = 39,
+}
+
+/// LLVMIntPredicate
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum IntPredicate {
+ IntEQ = 32,
+ IntNE = 33,
+ IntUGT = 34,
+ IntUGE = 35,
+ IntULT = 36,
+ IntULE = 37,
+ IntSGT = 38,
+ IntSGE = 39,
+ IntSLT = 40,
+ IntSLE = 41,
+}
+
+impl IntPredicate {
+ pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self {
+ match intpre {
+ rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ,
+ rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE,
+ rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT,
+ rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE,
+ rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT,
+ rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE,
+ rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT,
+ rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE,
+ rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT,
+ rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE,
+ }
+ }
+}
+
+/// LLVMRealPredicate
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum RealPredicate {
+ RealPredicateFalse = 0,
+ RealOEQ = 1,
+ RealOGT = 2,
+ RealOGE = 3,
+ RealOLT = 4,
+ RealOLE = 5,
+ RealONE = 6,
+ RealORD = 7,
+ RealUNO = 8,
+ RealUEQ = 9,
+ RealUGT = 10,
+ RealUGE = 11,
+ RealULT = 12,
+ RealULE = 13,
+ RealUNE = 14,
+ RealPredicateTrue = 15,
+}
+
+impl RealPredicate {
+ pub fn from_generic(realp: rustc_codegen_ssa::common::RealPredicate) -> Self {
+ match realp {
+ rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => {
+ RealPredicate::RealPredicateFalse
+ }
+ rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ,
+ rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT,
+ rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE,
+ rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT,
+ rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE,
+ rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE,
+ rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD,
+ rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO,
+ rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ,
+ rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT,
+ rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE,
+ rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT,
+ rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE,
+ rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE,
+ rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => {
+ RealPredicate::RealPredicateTrue
+ }
+ }
+ }
+}
+
+/// LLVMTypeKind
+#[derive(Copy, Clone, PartialEq, Debug)]
+#[repr(C)]
+pub enum TypeKind {
+ Void = 0,
+ Half = 1,
+ Float = 2,
+ Double = 3,
+ X86_FP80 = 4,
+ FP128 = 5,
+ PPC_FP128 = 6,
+ Label = 7,
+ Integer = 8,
+ Function = 9,
+ Struct = 10,
+ Array = 11,
+ Pointer = 12,
+ Vector = 13,
+ Metadata = 14,
+ X86_MMX = 15,
+ Token = 16,
+ ScalableVector = 17,
+ BFloat = 18,
+ X86_AMX = 19,
+}
+
+impl TypeKind {
+ pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind {
+ match self {
+ TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void,
+ TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half,
+ TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float,
+ TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double,
+ TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80,
+ TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128,
+ TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128,
+ TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label,
+ TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer,
+ TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function,
+ TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct,
+ TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array,
+ TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer,
+ TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector,
+ TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata,
+ TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX,
+ TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token,
+ TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector,
+ TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat,
+ TypeKind::X86_AMX => rustc_codegen_ssa::common::TypeKind::X86_AMX,
+ }
+ }
+}
+
+/// LLVMAtomicRmwBinOp
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AtomicRmwBinOp {
+ AtomicXchg = 0,
+ AtomicAdd = 1,
+ AtomicSub = 2,
+ AtomicAnd = 3,
+ AtomicNand = 4,
+ AtomicOr = 5,
+ AtomicXor = 6,
+ AtomicMax = 7,
+ AtomicMin = 8,
+ AtomicUMax = 9,
+ AtomicUMin = 10,
+}
+
+impl AtomicRmwBinOp {
+ pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self {
+ match op {
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax,
+ rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin,
+ }
+ }
+}
+
+/// LLVMAtomicOrdering
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum AtomicOrdering {
+ #[allow(dead_code)]
+ NotAtomic = 0,
+ Unordered = 1,
+ Monotonic = 2,
+ // Consume = 3, // Not specified yet.
+ Acquire = 4,
+ Release = 5,
+ AcquireRelease = 6,
+ SequentiallyConsistent = 7,
+}
+
+impl AtomicOrdering {
+ pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
+ match ao {
+ rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered,
+ rustc_codegen_ssa::common::AtomicOrdering::Relaxed => AtomicOrdering::Monotonic,
+ rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire,
+ rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release,
+ rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => {
+ AtomicOrdering::AcquireRelease
+ }
+ rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => {
+ AtomicOrdering::SequentiallyConsistent
+ }
+ }
+ }
+}
+
+/// LLVMRustSynchronizationScope
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum SynchronizationScope {
+ SingleThread,
+ CrossThread,
+}
+
+impl SynchronizationScope {
+ pub fn from_generic(sc: rustc_codegen_ssa::common::SynchronizationScope) -> Self {
+ match sc {
+ rustc_codegen_ssa::common::SynchronizationScope::SingleThread => {
+ SynchronizationScope::SingleThread
+ }
+ rustc_codegen_ssa::common::SynchronizationScope::CrossThread => {
+ SynchronizationScope::CrossThread
+ }
+ }
+ }
+}
+
+/// LLVMRustFileType
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum FileType {
+ AssemblyFile,
+ ObjectFile,
+}
+
+/// LLVMMetadataType
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum MetadataType {
+ MD_dbg = 0,
+ MD_tbaa = 1,
+ MD_prof = 2,
+ MD_fpmath = 3,
+ MD_range = 4,
+ MD_tbaa_struct = 5,
+ MD_invariant_load = 6,
+ MD_alias_scope = 7,
+ MD_noalias = 8,
+ MD_nontemporal = 9,
+ MD_mem_parallel_loop_access = 10,
+ MD_nonnull = 11,
+ MD_align = 17,
+ MD_type = 19,
+ MD_vcall_visibility = 28,
+ MD_noundef = 29,
+}
+
+/// LLVMRustAsmDialect
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum AsmDialect {
+ Att,
+ Intel,
+}
+
+/// LLVMRustCodeGenOptLevel
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum CodeGenOptLevel {
+ None,
+ Less,
+ Default,
+ Aggressive,
+}
+
+/// LLVMRustPassBuilderOptLevel
+#[repr(C)]
+pub enum PassBuilderOptLevel {
+ O0,
+ O1,
+ O2,
+ O3,
+ Os,
+ Oz,
+}
+
+/// LLVMRustOptStage
+#[derive(PartialEq)]
+#[repr(C)]
+pub enum OptStage {
+ PreLinkNoLTO,
+ PreLinkThinLTO,
+ PreLinkFatLTO,
+ ThinLTO,
+ FatLTO,
+}
+
+/// LLVMRustSanitizerOptions
+#[repr(C)]
+pub struct SanitizerOptions {
+ pub sanitize_address: bool,
+ pub sanitize_address_recover: bool,
+ pub sanitize_memory: bool,
+ pub sanitize_memory_recover: bool,
+ pub sanitize_memory_track_origins: c_int,
+ pub sanitize_thread: bool,
+ pub sanitize_hwaddress: bool,
+ pub sanitize_hwaddress_recover: bool,
+}
+
+/// LLVMRelocMode
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum RelocModel {
+ Static,
+ PIC,
+ DynamicNoPic,
+ ROPI,
+ RWPI,
+ ROPI_RWPI,
+}
+
+/// LLVMRustCodeModel
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum CodeModel {
+ Tiny,
+ Small,
+ Kernel,
+ Medium,
+ Large,
+ None,
+}
+
+/// LLVMRustDiagnosticKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum DiagnosticKind {
+ Other,
+ InlineAsm,
+ StackSize,
+ DebugMetadataVersion,
+ SampleProfile,
+ OptimizationRemark,
+ OptimizationRemarkMissed,
+ OptimizationRemarkAnalysis,
+ OptimizationRemarkAnalysisFPCommute,
+ OptimizationRemarkAnalysisAliasing,
+ OptimizationRemarkOther,
+ OptimizationFailure,
+ PGOProfile,
+ Linker,
+ Unsupported,
+ SrcMgr,
+}
+
+/// LLVMRustDiagnosticLevel
+#[derive(Copy, Clone)]
+#[repr(C)]
+#[allow(dead_code)] // Variants constructed by C++.
+pub enum DiagnosticLevel {
+ Error,
+ Warning,
+ Note,
+ Remark,
+}
+
+/// LLVMRustArchiveKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ArchiveKind {
+ K_GNU,
+ K_BSD,
+ K_DARWIN,
+ K_COFF,
+}
+
+// LLVMRustThinLTOData
+extern "C" {
+ pub type ThinLTOData;
+}
+
+// LLVMRustThinLTOBuffer
+extern "C" {
+ pub type ThinLTOBuffer;
+}
+
+/// LLVMRustThinLTOModule
+#[repr(C)]
+pub struct ThinLTOModule {
+ pub identifier: *const c_char,
+ pub data: *const u8,
+ pub len: usize,
+}
+
+/// LLVMThreadLocalMode
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ThreadLocalMode {
+ NotThreadLocal,
+ GeneralDynamic,
+ LocalDynamic,
+ InitialExec,
+ LocalExec,
+}
+
+/// LLVMRustChecksumKind
+#[derive(Copy, Clone)]
+#[repr(C)]
+pub enum ChecksumKind {
+ None,
+ MD5,
+ SHA1,
+ SHA256,
+}
+
+extern "C" {
+ type Opaque;
+}
+#[repr(C)]
+struct InvariantOpaque<'a> {
+ _marker: PhantomData<&'a mut &'a ()>,
+ _opaque: Opaque,
+}
+
+// Opaque pointer types
+extern "C" {
+ pub type Module;
+}
+extern "C" {
+ pub type Context;
+}
+extern "C" {
+ pub type Type;
+}
+extern "C" {
+ pub type Value;
+}
+extern "C" {
+ pub type ConstantInt;
+}
+extern "C" {
+ pub type Attribute;
+}
+extern "C" {
+ pub type Metadata;
+}
+extern "C" {
+ pub type BasicBlock;
+}
+#[repr(C)]
+pub struct Builder<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct PassManager<'a>(InvariantOpaque<'a>);
+extern "C" {
+ pub type PassManagerBuilder;
+}
+extern "C" {
+ pub type Pass;
+}
+extern "C" {
+ pub type TargetMachine;
+}
+extern "C" {
+ pub type Archive;
+}
+#[repr(C)]
+pub struct ArchiveIterator<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct ArchiveChild<'a>(InvariantOpaque<'a>);
+extern "C" {
+ pub type Twine;
+}
+extern "C" {
+ pub type DiagnosticInfo;
+}
+extern "C" {
+ pub type SMDiagnostic;
+}
+#[repr(C)]
+pub struct RustArchiveMember<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct OperandBundleDef<'a>(InvariantOpaque<'a>);
+#[repr(C)]
+pub struct Linker<'a>(InvariantOpaque<'a>);
+
+extern "C" {
+ pub type DiagnosticHandler;
+}
+
+pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void);
+pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint);
+
+pub mod coverageinfo {
+ use super::coverage_map;
+
+ /// Aligns with [llvm::coverage::CounterMappingRegion::RegionKind](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L209-L230)
+ #[derive(Copy, Clone, Debug)]
+ #[repr(C)]
+ pub enum RegionKind {
+ /// A CodeRegion associates some code with a counter
+ CodeRegion = 0,
+
+ /// An ExpansionRegion represents a file expansion region that associates
+ /// a source range with the expansion of a virtual source file, such as
+ /// for a macro instantiation or #include file.
+ ExpansionRegion = 1,
+
+ /// A SkippedRegion represents a source range with code that was skipped
+ /// by a preprocessor or similar means.
+ SkippedRegion = 2,
+
+ /// A GapRegion is like a CodeRegion, but its count is only set as the
+ /// line execution count when its the only region in the line.
+ GapRegion = 3,
+
+ /// A BranchRegion represents leaf-level boolean expressions and is
+ /// associated with two counters, each representing the number of times the
+ /// expression evaluates to true or false.
+ BranchRegion = 4,
+ }
+
+ /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the
+ /// coverage map, in accordance with the
+ /// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format).
+ /// The struct composes fields representing the `Counter` type and value(s) (injected counter
+ /// ID, or expression type and operands), the source file (an indirect index into a "filenames
+ /// array", encoded separately), and source location (start and end positions of the represented
+ /// code region).
+ ///
+ /// Matches LLVMRustCounterMappingRegion.
+ #[derive(Copy, Clone, Debug)]
+ #[repr(C)]
+ pub struct CounterMappingRegion {
+ /// The counter type and type-dependent counter data, if any.
+ counter: coverage_map::Counter,
+
+ /// If the `RegionKind` is a `BranchRegion`, this represents the counter
+ /// for the false branch of the region.
+ false_counter: coverage_map::Counter,
+
+ /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the
+ /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes
+ /// that, in turn, are used to look up the filename for this region.
+ file_id: u32,
+
+ /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find
+ /// the mapping regions created as a result of macro expansion, by checking if their file id
+ /// matches the expanded file id.
+ expanded_file_id: u32,
+
+ /// 1-based starting line of the mapping region.
+ start_line: u32,
+
+ /// 1-based starting column of the mapping region.
+ start_col: u32,
+
+ /// 1-based ending line of the mapping region.
+ end_line: u32,
+
+ /// 1-based ending column of the mapping region. If the high bit is set, the current
+ /// mapping region is a gap area.
+ end_col: u32,
+
+ kind: RegionKind,
+ }
+
+ impl CounterMappingRegion {
+ pub(crate) fn code_region(
+ counter: coverage_map::Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::CodeRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn branch_region(
+ counter: coverage_map::Counter,
+ false_counter: coverage_map::Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter,
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::BranchRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn expansion_region(
+ file_id: u32,
+ expanded_file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter: coverage_map::Counter::zero(),
+ false_counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::ExpansionRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn skipped_region(
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter: coverage_map::Counter::zero(),
+ false_counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col,
+ kind: RegionKind::SkippedRegion,
+ }
+ }
+
+ // This function might be used in the future; the LLVM API is still evolving, as is coverage
+ // support.
+ #[allow(dead_code)]
+ pub(crate) fn gap_region(
+ counter: coverage_map::Counter,
+ file_id: u32,
+ start_line: u32,
+ start_col: u32,
+ end_line: u32,
+ end_col: u32,
+ ) -> Self {
+ Self {
+ counter,
+ false_counter: coverage_map::Counter::zero(),
+ file_id,
+ expanded_file_id: 0,
+ start_line,
+ start_col,
+ end_line,
+ end_col: (1_u32 << 31) | end_col,
+ kind: RegionKind::GapRegion,
+ }
+ }
+ }
+}
+
+pub mod debuginfo {
+ use super::{InvariantOpaque, Metadata};
+ use bitflags::bitflags;
+
+ #[repr(C)]
+ pub struct DIBuilder<'a>(InvariantOpaque<'a>);
+
+ pub type DIDescriptor = Metadata;
+ pub type DILocation = Metadata;
+ pub type DIScope = DIDescriptor;
+ pub type DIFile = DIScope;
+ pub type DILexicalBlock = DIScope;
+ pub type DISubprogram = DIScope;
+ pub type DINameSpace = DIScope;
+ pub type DIType = DIDescriptor;
+ pub type DIBasicType = DIType;
+ pub type DIDerivedType = DIType;
+ pub type DICompositeType = DIDerivedType;
+ pub type DIVariable = DIDescriptor;
+ pub type DIGlobalVariableExpression = DIDescriptor;
+ pub type DIArray = DIDescriptor;
+ pub type DISubrange = DIDescriptor;
+ pub type DIEnumerator = DIDescriptor;
+ pub type DITemplateTypeParameter = DIDescriptor;
+
+ // These values **must** match with LLVMRustDIFlags!!
+ bitflags! {
+ #[repr(transparent)]
+ #[derive(Default)]
+ pub struct DIFlags: u32 {
+ const FlagZero = 0;
+ const FlagPrivate = 1;
+ const FlagProtected = 2;
+ const FlagPublic = 3;
+ const FlagFwdDecl = (1 << 2);
+ const FlagAppleBlock = (1 << 3);
+ const FlagBlockByrefStruct = (1 << 4);
+ const FlagVirtual = (1 << 5);
+ const FlagArtificial = (1 << 6);
+ const FlagExplicit = (1 << 7);
+ const FlagPrototyped = (1 << 8);
+ const FlagObjcClassComplete = (1 << 9);
+ const FlagObjectPointer = (1 << 10);
+ const FlagVector = (1 << 11);
+ const FlagStaticMember = (1 << 12);
+ const FlagLValueReference = (1 << 13);
+ const FlagRValueReference = (1 << 14);
+ const FlagExternalTypeRef = (1 << 15);
+ const FlagIntroducedVirtual = (1 << 18);
+ const FlagBitField = (1 << 19);
+ const FlagNoReturn = (1 << 20);
+ }
+ }
+
+ // These values **must** match with LLVMRustDISPFlags!!
+ bitflags! {
+ #[repr(transparent)]
+ #[derive(Default)]
+ pub struct DISPFlags: u32 {
+ const SPFlagZero = 0;
+ const SPFlagVirtual = 1;
+ const SPFlagPureVirtual = 2;
+ const SPFlagLocalToUnit = (1 << 2);
+ const SPFlagDefinition = (1 << 3);
+ const SPFlagOptimized = (1 << 4);
+ const SPFlagMainSubprogram = (1 << 5);
+ }
+ }
+
+ /// LLVMRustDebugEmissionKind
+ #[derive(Copy, Clone)]
+ #[repr(C)]
+ pub enum DebugEmissionKind {
+ NoDebug,
+ FullDebug,
+ LineTablesOnly,
+ }
+
+ impl DebugEmissionKind {
+ pub fn from_generic(kind: rustc_session::config::DebugInfo) -> Self {
+ use rustc_session::config::DebugInfo;
+ match kind {
+ DebugInfo::None => DebugEmissionKind::NoDebug,
+ DebugInfo::Limited => DebugEmissionKind::LineTablesOnly,
+ DebugInfo::Full => DebugEmissionKind::FullDebug,
+ }
+ }
+ }
+}
+
+use bitflags::bitflags;
+// These values **must** match with LLVMRustAllocKindFlags
+bitflags! {
+ #[repr(transparent)]
+ #[derive(Default)]
+ pub struct AllocKindFlags : u64 {
+ const Unknown = 0;
+ const Alloc = 1;
+ const Realloc = 1 << 1;
+ const Free = 1 << 2;
+ const Uninitialized = 1 << 3;
+ const Zeroed = 1 << 4;
+ const Aligned = 1 << 5;
+ }
+}
+
+extern "C" {
+ pub type ModuleBuffer;
+}
+
+pub type SelfProfileBeforePassCallback =
+ unsafe extern "C" fn(*mut c_void, *const c_char, *const c_char);
+pub type SelfProfileAfterPassCallback = unsafe extern "C" fn(*mut c_void);
+
+extern "C" {
+ pub fn LLVMRustInstallFatalErrorHandler();
+ pub fn LLVMRustDisableSystemDialogsOnCrash();
+
+ // Create and destroy contexts.
+ pub fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context;
+ pub fn LLVMContextDispose(C: &'static mut Context);
+ pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint;
+
+ // Create modules.
+ pub fn LLVMModuleCreateWithNameInContext(ModuleID: *const c_char, C: &Context) -> &Module;
+ pub fn LLVMGetModuleContext(M: &Module) -> &Context;
+ pub fn LLVMCloneModule(M: &Module) -> &Module;
+
+ /// Data layout. See Module::getDataLayout.
+ pub fn LLVMGetDataLayoutStr(M: &Module) -> *const c_char;
+ pub fn LLVMSetDataLayout(M: &Module, Triple: *const c_char);
+
+ /// See Module::setModuleInlineAsm.
+ pub fn LLVMRustAppendModuleInlineAsm(M: &Module, Asm: *const c_char, AsmLen: size_t);
+
+ /// See llvm::LLVMTypeKind::getTypeID.
+ pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind;
+
+ // Operations on integer types
+ pub fn LLVMInt1TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt8TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt16TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt32TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMInt64TypeInContext(C: &Context) -> &Type;
+ pub fn LLVMIntTypeInContext(C: &Context, NumBits: c_uint) -> &Type;
+
+ pub fn LLVMGetIntTypeWidth(IntegerTy: &Type) -> c_uint;
+
+ // Operations on real types
+ pub fn LLVMFloatTypeInContext(C: &Context) -> &Type;
+ pub fn LLVMDoubleTypeInContext(C: &Context) -> &Type;
+
+ // Operations on function types
+ pub fn LLVMFunctionType<'a>(
+ ReturnType: &'a Type,
+ ParamTypes: *const &'a Type,
+ ParamCount: c_uint,
+ IsVarArg: Bool,
+ ) -> &'a Type;
+ pub fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint;
+ pub fn LLVMGetParamTypes<'a>(FunctionTy: &'a Type, Dest: *mut &'a Type);
+
+ // Operations on struct types
+ pub fn LLVMStructTypeInContext<'a>(
+ C: &'a Context,
+ ElementTypes: *const &'a Type,
+ ElementCount: c_uint,
+ Packed: Bool,
+ ) -> &'a Type;
+
+ // Operations on array, pointer, and vector types (sequence types)
+ pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
+ pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type;
+ pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
+
+ pub fn LLVMGetElementType(Ty: &Type) -> &Type;
+ pub fn LLVMGetVectorSize(VectorTy: &Type) -> c_uint;
+
+ // Operations on other types
+ pub fn LLVMVoidTypeInContext(C: &Context) -> &Type;
+ pub fn LLVMRustMetadataTypeInContext(C: &Context) -> &Type;
+
+ // Operations on all values
+ pub fn LLVMTypeOf(Val: &Value) -> &Type;
+ pub fn LLVMGetValueName2(Val: &Value, Length: *mut size_t) -> *const c_char;
+ pub fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t);
+ pub fn LLVMReplaceAllUsesWith<'a>(OldVal: &'a Value, NewVal: &'a Value);
+ pub fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Node: &'a Value);
+ pub fn LLVMGlobalSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
+ pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata);
+ pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata;
+
+ // Operations on constants of any type
+ pub fn LLVMConstNull(Ty: &Type) -> &Value;
+ pub fn LLVMGetUndef(Ty: &Type) -> &Value;
+
+ // Operations on metadata
+ pub fn LLVMMDStringInContext(C: &Context, Str: *const c_char, SLen: c_uint) -> &Value;
+ pub fn LLVMMDNodeInContext<'a>(
+ C: &'a Context,
+ Vals: *const &'a Value,
+ Count: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMMDNodeInContext2<'a>(
+ C: &'a Context,
+ Vals: *const &'a Metadata,
+ Count: size_t,
+ ) -> &'a Metadata;
+ pub fn LLVMAddNamedMetadataOperand<'a>(M: &'a Module, Name: *const c_char, Val: &'a Value);
+
+ // Operations on scalar constants
+ pub fn LLVMConstInt(IntTy: &Type, N: c_ulonglong, SignExtend: Bool) -> &Value;
+ pub fn LLVMConstIntOfArbitraryPrecision(IntTy: &Type, Wn: c_uint, Ws: *const u64) -> &Value;
+ pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value;
+ pub fn LLVMConstIntGetZExtValue(ConstantVal: &ConstantInt) -> c_ulonglong;
+ pub fn LLVMRustConstInt128Get(
+ ConstantVal: &ConstantInt,
+ SExt: bool,
+ high: &mut u64,
+ low: &mut u64,
+ ) -> bool;
+
+ // Operations on composite constants
+ pub fn LLVMConstStringInContext(
+ C: &Context,
+ Str: *const c_char,
+ Length: c_uint,
+ DontNullTerminate: Bool,
+ ) -> &Value;
+ pub fn LLVMConstStructInContext<'a>(
+ C: &'a Context,
+ ConstantVals: *const &'a Value,
+ Count: c_uint,
+ Packed: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMConstArray<'a>(
+ ElementTy: &'a Type,
+ ConstantVals: *const &'a Value,
+ Length: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value;
+
+ // Constant expressions
+ pub fn LLVMRustConstInBoundsGEP2<'a>(
+ ty: &'a Type,
+ ConstantVal: &'a Value,
+ ConstantIndices: *const &'a Value,
+ NumIndices: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMConstZExt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstPtrToInt<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstIntToPtr<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstBitCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMConstPointerCast<'a>(ConstantVal: &'a Value, ToType: &'a Type) -> &'a Value;
+ pub fn LLVMGetAggregateElement(ConstantVal: &Value, Idx: c_uint) -> Option<&Value>;
+
+ // Operations on global variables, functions, and aliases (globals)
+ pub fn LLVMIsDeclaration(Global: &Value) -> Bool;
+ pub fn LLVMRustGetLinkage(Global: &Value) -> Linkage;
+ pub fn LLVMRustSetLinkage(Global: &Value, RustLinkage: Linkage);
+ pub fn LLVMSetSection(Global: &Value, Section: *const c_char);
+ pub fn LLVMRustGetVisibility(Global: &Value) -> Visibility;
+ pub fn LLVMRustSetVisibility(Global: &Value, Viz: Visibility);
+ pub fn LLVMRustSetDSOLocal(Global: &Value, is_dso_local: bool);
+ pub fn LLVMGetAlignment(Global: &Value) -> c_uint;
+ pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint);
+ pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass);
+
+ // Operations on global variables
+ pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMAddGlobal<'a>(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>;
+ pub fn LLVMRustGetOrInsertGlobal<'a>(
+ M: &'a Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ T: &'a Type,
+ ) -> &'a Value;
+ pub fn LLVMRustInsertPrivateGlobal<'a>(M: &'a Module, T: &'a Type) -> &'a Value;
+ pub fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>;
+ pub fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMDeleteGlobal(GlobalVar: &Value);
+ pub fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>;
+ pub fn LLVMSetInitializer<'a>(GlobalVar: &'a Value, ConstantVal: &'a Value);
+ pub fn LLVMIsThreadLocal(GlobalVar: &Value) -> Bool;
+ pub fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode);
+ pub fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool;
+ pub fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool);
+ pub fn LLVMRustGetNamedValue(
+ M: &Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ ) -> Option<&Value>;
+ pub fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool);
+
+ // Operations on attributes
+ pub fn LLVMRustCreateAttrNoValue(C: &Context, attr: AttributeKind) -> &Attribute;
+ pub fn LLVMCreateStringAttribute(
+ C: &Context,
+ Name: *const c_char,
+ NameLen: c_uint,
+ Value: *const c_char,
+ ValueLen: c_uint,
+ ) -> &Attribute;
+ pub fn LLVMRustCreateAlignmentAttr(C: &Context, bytes: u64) -> &Attribute;
+ pub fn LLVMRustCreateDereferenceableAttr(C: &Context, bytes: u64) -> &Attribute;
+ pub fn LLVMRustCreateDereferenceableOrNullAttr(C: &Context, bytes: u64) -> &Attribute;
+ pub fn LLVMRustCreateByValAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+ pub fn LLVMRustCreateStructRetAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+ pub fn LLVMRustCreateElementTypeAttr<'a>(C: &'a Context, ty: &'a Type) -> &'a Attribute;
+ pub fn LLVMRustCreateUWTableAttr(C: &Context, async_: bool) -> &Attribute;
+ pub fn LLVMRustCreateAllocSizeAttr(C: &Context, size_arg: u32) -> &Attribute;
+ pub fn LLVMRustCreateAllocKindAttr(C: &Context, size_arg: u64) -> &Attribute;
+
+ // Operations on functions
+ pub fn LLVMRustGetOrInsertFunction<'a>(
+ M: &'a Module,
+ Name: *const c_char,
+ NameLen: size_t,
+ FunctionTy: &'a Type,
+ ) -> &'a Value;
+ pub fn LLVMSetFunctionCallConv(Fn: &Value, CC: c_uint);
+ pub fn LLVMRustAddFunctionAttributes<'a>(
+ Fn: &'a Value,
+ index: c_uint,
+ Attrs: *const &'a Attribute,
+ AttrsLen: size_t,
+ );
+
+ // Operations on parameters
+ pub fn LLVMIsAArgument(Val: &Value) -> Option<&Value>;
+ pub fn LLVMCountParams(Fn: &Value) -> c_uint;
+ pub fn LLVMGetParam(Fn: &Value, Index: c_uint) -> &Value;
+
+ // Operations on basic blocks
+ pub fn LLVMGetBasicBlockParent(BB: &BasicBlock) -> &Value;
+ pub fn LLVMAppendBasicBlockInContext<'a>(
+ C: &'a Context,
+ Fn: &'a Value,
+ Name: *const c_char,
+ ) -> &'a BasicBlock;
+
+ // Operations on instructions
+ pub fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>;
+ pub fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock;
+
+ // Operations on call sites
+ pub fn LLVMSetInstructionCallConv(Instr: &Value, CC: c_uint);
+ pub fn LLVMRustAddCallSiteAttributes<'a>(
+ Instr: &'a Value,
+ index: c_uint,
+ Attrs: *const &'a Attribute,
+ AttrsLen: size_t,
+ );
+
+ // Operations on load/store instructions (only)
+ pub fn LLVMSetVolatile(MemoryAccessInst: &Value, volatile: Bool);
+
+ // Operations on phi nodes
+ pub fn LLVMAddIncoming<'a>(
+ PhiNode: &'a Value,
+ IncomingValues: *const &'a Value,
+ IncomingBlocks: *const &'a BasicBlock,
+ Count: c_uint,
+ );
+
+ // Instruction builders
+ pub fn LLVMCreateBuilderInContext(C: &Context) -> &mut Builder<'_>;
+ pub fn LLVMPositionBuilderAtEnd<'a>(Builder: &Builder<'a>, Block: &'a BasicBlock);
+ pub fn LLVMGetInsertBlock<'a>(Builder: &Builder<'a>) -> &'a BasicBlock;
+ pub fn LLVMDisposeBuilder<'a>(Builder: &'a mut Builder<'a>);
+
+ // Metadata
+ pub fn LLVMSetCurrentDebugLocation<'a>(Builder: &Builder<'a>, L: &'a Value);
+
+ // Terminators
+ pub fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value;
+ pub fn LLVMBuildRet<'a>(B: &Builder<'a>, V: &'a Value) -> &'a Value;
+ pub fn LLVMBuildBr<'a>(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value;
+ pub fn LLVMBuildCondBr<'a>(
+ B: &Builder<'a>,
+ If: &'a Value,
+ Then: &'a BasicBlock,
+ Else: &'a BasicBlock,
+ ) -> &'a Value;
+ pub fn LLVMBuildSwitch<'a>(
+ B: &Builder<'a>,
+ V: &'a Value,
+ Else: &'a BasicBlock,
+ NumCases: c_uint,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildInvoke<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Fn: &'a Value,
+ Args: *const &'a Value,
+ NumArgs: c_uint,
+ Then: &'a BasicBlock,
+ Catch: &'a BasicBlock,
+ Bundle: Option<&OperandBundleDef<'a>>,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLandingPad<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ PersFn: Option<&'a Value>,
+ NumClauses: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildResume<'a>(B: &Builder<'a>, Exn: &'a Value) -> &'a Value;
+ pub fn LLVMBuildUnreachable<'a>(B: &Builder<'a>) -> &'a Value;
+
+ pub fn LLVMRustBuildCleanupPad<'a>(
+ B: &Builder<'a>,
+ ParentPad: Option<&'a Value>,
+ ArgCnt: c_uint,
+ Args: *const &'a Value,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCleanupRet<'a>(
+ B: &Builder<'a>,
+ CleanupPad: &'a Value,
+ UnwindBB: Option<&'a BasicBlock>,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchPad<'a>(
+ B: &Builder<'a>,
+ ParentPad: &'a Value,
+ ArgCnt: c_uint,
+ Args: *const &'a Value,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchRet<'a>(
+ B: &Builder<'a>,
+ Pad: &'a Value,
+ BB: &'a BasicBlock,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustBuildCatchSwitch<'a>(
+ Builder: &Builder<'a>,
+ ParentPad: Option<&'a Value>,
+ BB: Option<&'a BasicBlock>,
+ NumHandlers: c_uint,
+ Name: *const c_char,
+ ) -> Option<&'a Value>;
+ pub fn LLVMRustAddHandler<'a>(CatchSwitch: &'a Value, Handler: &'a BasicBlock);
+ pub fn LLVMSetPersonalityFn<'a>(Func: &'a Value, Pers: &'a Value);
+
+ // Add a case to the switch instruction
+ pub fn LLVMAddCase<'a>(Switch: &'a Value, OnVal: &'a Value, Dest: &'a BasicBlock);
+
+ // Add a clause to the landing pad instruction
+ pub fn LLVMAddClause<'a>(LandingPad: &'a Value, ClauseVal: &'a Value);
+
+ // Set the cleanup on a landing pad instruction
+ pub fn LLVMSetCleanup(LandingPad: &Value, Val: Bool);
+
+ // Arithmetic
+ pub fn LLVMBuildAdd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFAdd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSub<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFSub<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildMul<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFMul<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildUDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExactUDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExactSDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFDiv<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildURem<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSRem<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFRem<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildShl<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLShr<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildAShr<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWAdd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWAdd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWSub<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWSub<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNSWMul<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNUWMul<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildAnd<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildOr<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildXor<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildFNeg<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildNot<'a>(B: &Builder<'a>, V: &'a Value, Name: *const c_char) -> &'a Value;
+ pub fn LLVMRustSetFastMath(Instr: &Value);
+
+ // Memory
+ pub fn LLVMBuildAlloca<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMBuildArrayAlloca<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Val: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildLoad2<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ PointerVal: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ pub fn LLVMBuildStore<'a>(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
+
+ pub fn LLVMBuildGEP2<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Pointer: &'a Value,
+ Indices: *const &'a Value,
+ NumIndices: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInBoundsGEP2<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Pointer: &'a Value,
+ Indices: *const &'a Value,
+ NumIndices: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildStructGEP2<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Pointer: &'a Value,
+ Idx: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ // Casts
+ pub fn LLVMBuildTrunc<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildZExt<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSExt<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPToUI<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPToSI<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildUIToFP<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildSIToFP<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPTrunc<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFPExt<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildPtrToInt<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildIntToPtr<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildBitCast<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildPointerCast<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildIntCast<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ DestTy: &'a Type,
+ IsSigned: bool,
+ ) -> &'a Value;
+
+ // Comparisons
+ pub fn LLVMBuildICmp<'a>(
+ B: &Builder<'a>,
+ Op: c_uint,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildFCmp<'a>(
+ B: &Builder<'a>,
+ Op: c_uint,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ // Miscellaneous instructions
+ pub fn LLVMBuildPhi<'a>(B: &Builder<'a>, Ty: &'a Type, Name: *const c_char) -> &'a Value;
+ pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &Value;
+ pub fn LLVMRustBuildCall<'a>(
+ B: &Builder<'a>,
+ Ty: &'a Type,
+ Fn: &'a Value,
+ Args: *const &'a Value,
+ NumArgs: c_uint,
+ Bundle: Option<&OperandBundleDef<'a>>,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemCpy<'a>(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Src: &'a Value,
+ SrcAlign: c_uint,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemMove<'a>(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Src: &'a Value,
+ SrcAlign: c_uint,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildMemSet<'a>(
+ B: &Builder<'a>,
+ Dst: &'a Value,
+ DstAlign: c_uint,
+ Val: &'a Value,
+ Size: &'a Value,
+ IsVolatile: bool,
+ ) -> &'a Value;
+ pub fn LLVMBuildSelect<'a>(
+ B: &Builder<'a>,
+ If: &'a Value,
+ Then: &'a Value,
+ Else: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildVAArg<'a>(
+ B: &Builder<'a>,
+ list: &'a Value,
+ Ty: &'a Type,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExtractElement<'a>(
+ B: &Builder<'a>,
+ VecVal: &'a Value,
+ Index: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInsertElement<'a>(
+ B: &Builder<'a>,
+ VecVal: &'a Value,
+ EltVal: &'a Value,
+ Index: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildShuffleVector<'a>(
+ B: &Builder<'a>,
+ V1: &'a Value,
+ V2: &'a Value,
+ Mask: &'a Value,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildExtractValue<'a>(
+ B: &Builder<'a>,
+ AggVal: &'a Value,
+ Index: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+ pub fn LLVMBuildInsertValue<'a>(
+ B: &Builder<'a>,
+ AggVal: &'a Value,
+ EltVal: &'a Value,
+ Index: c_uint,
+ Name: *const c_char,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildVectorReduceFAdd<'a>(
+ B: &Builder<'a>,
+ Acc: &'a Value,
+ Src: &'a Value,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMul<'a>(
+ B: &Builder<'a>,
+ Acc: &'a Value,
+ Src: &'a Value,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceAdd<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMul<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceAnd<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceOr<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceXor<'a>(B: &Builder<'a>, Src: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMin<'a>(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsSigned: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceMax<'a>(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsSigned: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMin<'a>(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsNaN: bool,
+ ) -> &'a Value;
+ pub fn LLVMRustBuildVectorReduceFMax<'a>(
+ B: &Builder<'a>,
+ Src: &'a Value,
+ IsNaN: bool,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildMinNum<'a>(B: &Builder<'a>, LHS: &'a Value, LHS: &'a Value) -> &'a Value;
+ pub fn LLVMRustBuildMaxNum<'a>(B: &Builder<'a>, LHS: &'a Value, LHS: &'a Value) -> &'a Value;
+
+ // Atomic Operations
+ pub fn LLVMRustBuildAtomicLoad<'a>(
+ B: &Builder<'a>,
+ ElementType: &'a Type,
+ PointerVal: &'a Value,
+ Name: *const c_char,
+ Order: AtomicOrdering,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicStore<'a>(
+ B: &Builder<'a>,
+ Val: &'a Value,
+ Ptr: &'a Value,
+ Order: AtomicOrdering,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicCmpXchg<'a>(
+ B: &Builder<'a>,
+ LHS: &'a Value,
+ CMP: &'a Value,
+ RHS: &'a Value,
+ Order: AtomicOrdering,
+ FailureOrder: AtomicOrdering,
+ Weak: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMBuildAtomicRMW<'a>(
+ B: &Builder<'a>,
+ Op: AtomicRmwBinOp,
+ LHS: &'a Value,
+ RHS: &'a Value,
+ Order: AtomicOrdering,
+ SingleThreaded: Bool,
+ ) -> &'a Value;
+
+ pub fn LLVMRustBuildAtomicFence(
+ B: &Builder<'_>,
+ Order: AtomicOrdering,
+ Scope: SynchronizationScope,
+ );
+
+ /// Writes a module to the specified path. Returns 0 on success.
+ pub fn LLVMWriteBitcodeToFile(M: &Module, Path: *const c_char) -> c_int;
+
+ /// Creates a pass manager.
+ pub fn LLVMCreatePassManager<'a>() -> &'a mut PassManager<'a>;
+
+ /// Creates a function-by-function pass manager
+ pub fn LLVMCreateFunctionPassManagerForModule(M: &Module) -> &mut PassManager<'_>;
+
+ /// Disposes a pass manager.
+ pub fn LLVMDisposePassManager<'a>(PM: &'a mut PassManager<'a>);
+
+ /// Runs a pass manager on a module.
+ pub fn LLVMRunPassManager<'a>(PM: &PassManager<'a>, M: &'a Module) -> Bool;
+
+ pub fn LLVMInitializePasses();
+
+ pub fn LLVMTimeTraceProfilerInitialize();
+
+ pub fn LLVMTimeTraceProfilerFinishThread();
+
+ pub fn LLVMTimeTraceProfilerFinish(FileName: *const c_char);
+
+ pub fn LLVMAddAnalysisPasses<'a>(T: &'a TargetMachine, PM: &PassManager<'a>);
+
+ pub fn LLVMRustPassManagerBuilderCreate() -> &'static mut PassManagerBuilder;
+ pub fn LLVMRustPassManagerBuilderDispose(PMB: &'static mut PassManagerBuilder);
+ pub fn LLVMRustPassManagerBuilderUseInlinerWithThreshold(
+ PMB: &PassManagerBuilder,
+ threshold: c_uint,
+ );
+ pub fn LLVMRustPassManagerBuilderPopulateModulePassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+
+ pub fn LLVMRustPassManagerBuilderPopulateFunctionPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+ pub fn LLVMRustPassManagerBuilderPopulateLTOPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ Internalize: Bool,
+ RunInliner: Bool,
+ );
+ pub fn LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
+ PMB: &PassManagerBuilder,
+ PM: &PassManager<'_>,
+ );
+
+ pub fn LLVMGetHostCPUFeatures() -> *mut c_char;
+
+ pub fn LLVMDisposeMessage(message: *mut c_char);
+
+ pub fn LLVMIsMultithreaded() -> Bool;
+
+ /// Returns a string describing the last error caused by an LLVMRust* call.
+ pub fn LLVMRustGetLastError() -> *const c_char;
+
+ /// Print the pass timings since static dtors aren't picking them up.
+ pub fn LLVMRustPrintPassTimings();
+
+ pub fn LLVMStructCreateNamed(C: &Context, Name: *const c_char) -> &Type;
+
+ pub fn LLVMStructSetBody<'a>(
+ StructTy: &'a Type,
+ ElementTypes: *const &'a Type,
+ ElementCount: c_uint,
+ Packed: Bool,
+ );
+
+ /// Prepares inline assembly.
+ pub fn LLVMRustInlineAsm(
+ Ty: &Type,
+ AsmString: *const c_char,
+ AsmStringLen: size_t,
+ Constraints: *const c_char,
+ ConstraintsLen: size_t,
+ SideEffects: Bool,
+ AlignStack: Bool,
+ Dialect: AsmDialect,
+ CanThrow: Bool,
+ ) -> &Value;
+ pub fn LLVMRustInlineAsmVerify(
+ Ty: &Type,
+ Constraints: *const c_char,
+ ConstraintsLen: size_t,
+ ) -> bool;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteFilenamesSectionToBuffer(
+ Filenames: *const *const c_char,
+ FilenamesLen: size_t,
+ BufferOut: &RustString,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteMappingToBuffer(
+ VirtualFileMappingIDs: *const c_uint,
+ NumVirtualFileMappingIDs: c_uint,
+ Expressions: *const coverage_map::CounterExpression,
+ NumExpressions: c_uint,
+ MappingRegions: *const coverageinfo::CounterMappingRegion,
+ NumMappingRegions: c_uint,
+ BufferOut: &RustString,
+ );
+
+ pub fn LLVMRustCoverageCreatePGOFuncNameVar(F: &Value, FuncName: *const c_char) -> &Value;
+ pub fn LLVMRustCoverageHashCString(StrVal: *const c_char) -> u64;
+ pub fn LLVMRustCoverageHashByteArray(Bytes: *const c_char, NumBytes: size_t) -> u64;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteMapSectionNameToString(M: &Module, Str: &RustString);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteFuncSectionNameToString(M: &Module, Str: &RustString);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustCoverageWriteMappingVarNameToString(Str: &RustString);
+
+ pub fn LLVMRustCoverageMappingVersion() -> u32;
+ pub fn LLVMRustDebugMetadataVersion() -> u32;
+ pub fn LLVMRustVersionMajor() -> u32;
+ pub fn LLVMRustVersionMinor() -> u32;
+ pub fn LLVMRustVersionPatch() -> u32;
+
+ /// Add LLVM module flags.
+ ///
+ /// In order for Rust-C LTO to work, module flags must be compatible with Clang. What
+ /// "compatible" means depends on the merge behaviors involved.
+ pub fn LLVMRustAddModuleFlag(
+ M: &Module,
+ merge_behavior: LLVMModFlagBehavior,
+ name: *const c_char,
+ value: u32,
+ );
+ pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool;
+
+ pub fn LLVMRustMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value;
+
+ pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>;
+
+ pub fn LLVMRustDIBuilderDispose<'a>(Builder: &'a mut DIBuilder<'a>);
+
+ pub fn LLVMRustDIBuilderFinalize(Builder: &DIBuilder<'_>);
+
+ pub fn LLVMRustDIBuilderCreateCompileUnit<'a>(
+ Builder: &DIBuilder<'a>,
+ Lang: c_uint,
+ File: &'a DIFile,
+ Producer: *const c_char,
+ ProducerLen: size_t,
+ isOptimized: bool,
+ Flags: *const c_char,
+ RuntimeVer: c_uint,
+ SplitName: *const c_char,
+ SplitNameLen: size_t,
+ kind: DebugEmissionKind,
+ DWOId: u64,
+ SplitDebugInlining: bool,
+ ) -> &'a DIDescriptor;
+
+ pub fn LLVMRustDIBuilderCreateFile<'a>(
+ Builder: &DIBuilder<'a>,
+ Filename: *const c_char,
+ FilenameLen: size_t,
+ Directory: *const c_char,
+ DirectoryLen: size_t,
+ CSKind: ChecksumKind,
+ Checksum: *const c_char,
+ ChecksumLen: size_t,
+ ) -> &'a DIFile;
+
+ pub fn LLVMRustDIBuilderCreateSubroutineType<'a>(
+ Builder: &DIBuilder<'a>,
+ ParameterTypes: &'a DIArray,
+ ) -> &'a DICompositeType;
+
+ pub fn LLVMRustDIBuilderCreateFunction<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ LinkageName: *const c_char,
+ LinkageNameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ ScopeLine: c_uint,
+ Flags: DIFlags,
+ SPFlags: DISPFlags,
+ MaybeFn: Option<&'a Value>,
+ TParam: &'a DIArray,
+ Decl: Option<&'a DIDescriptor>,
+ ) -> &'a DISubprogram;
+
+ pub fn LLVMRustDIBuilderCreateBasicType<'a>(
+ Builder: &DIBuilder<'a>,
+ Name: *const c_char,
+ NameLen: size_t,
+ SizeInBits: u64,
+ Encoding: c_uint,
+ ) -> &'a DIBasicType;
+
+ pub fn LLVMRustDIBuilderCreateTypedef<'a>(
+ Builder: &DIBuilder<'a>,
+ Type: &'a DIBasicType,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Scope: Option<&'a DIScope>,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreatePointerType<'a>(
+ Builder: &DIBuilder<'a>,
+ PointeeTy: &'a DIType,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ AddressSpace: c_uint,
+ Name: *const c_char,
+ NameLen: size_t,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreateStructType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIDescriptor>,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ DerivedFrom: Option<&'a DIType>,
+ Elements: &'a DIArray,
+ RunTimeLang: c_uint,
+ VTableHolder: Option<&'a DIType>,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DICompositeType;
+
+ pub fn LLVMRustDIBuilderCreateMemberType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ OffsetInBits: u64,
+ Flags: DIFlags,
+ Ty: &'a DIType,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMRustDIBuilderCreateVariantMemberType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ OffsetInBits: u64,
+ Discriminant: Option<&'a Value>,
+ Flags: DIFlags,
+ Ty: &'a DIType,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateLexicalBlock<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ File: &'a DIFile,
+ Line: c_uint,
+ Col: c_uint,
+ ) -> &'a DILexicalBlock;
+
+ pub fn LLVMRustDIBuilderCreateLexicalBlockFile<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ File: &'a DIFile,
+ ) -> &'a DILexicalBlock;
+
+ pub fn LLVMRustDIBuilderCreateStaticVariable<'a>(
+ Builder: &DIBuilder<'a>,
+ Context: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ LinkageName: *const c_char,
+ LinkageNameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ isLocalToUnit: bool,
+ Val: &'a Value,
+ Decl: Option<&'a DIDescriptor>,
+ AlignInBits: u32,
+ ) -> &'a DIGlobalVariableExpression;
+
+ pub fn LLVMRustDIBuilderCreateVariable<'a>(
+ Builder: &DIBuilder<'a>,
+ Tag: c_uint,
+ Scope: &'a DIDescriptor,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ Ty: &'a DIType,
+ AlwaysPreserve: bool,
+ Flags: DIFlags,
+ ArgNo: c_uint,
+ AlignInBits: u32,
+ ) -> &'a DIVariable;
+
+ pub fn LLVMRustDIBuilderCreateArrayType<'a>(
+ Builder: &DIBuilder<'a>,
+ Size: u64,
+ AlignInBits: u32,
+ Ty: &'a DIType,
+ Subscripts: &'a DIArray,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderGetOrCreateSubrange<'a>(
+ Builder: &DIBuilder<'a>,
+ Lo: i64,
+ Count: i64,
+ ) -> &'a DISubrange;
+
+ pub fn LLVMRustDIBuilderGetOrCreateArray<'a>(
+ Builder: &DIBuilder<'a>,
+ Ptr: *const Option<&'a DIDescriptor>,
+ Count: c_uint,
+ ) -> &'a DIArray;
+
+ pub fn LLVMRustDIBuilderInsertDeclareAtEnd<'a>(
+ Builder: &DIBuilder<'a>,
+ Val: &'a Value,
+ VarInfo: &'a DIVariable,
+ AddrOps: *const u64,
+ AddrOpsCount: c_uint,
+ DL: &'a DILocation,
+ InsertAtEnd: &'a BasicBlock,
+ ) -> &'a Value;
+
+ pub fn LLVMRustDIBuilderCreateEnumerator<'a>(
+ Builder: &DIBuilder<'a>,
+ Name: *const c_char,
+ NameLen: size_t,
+ Value: i64,
+ IsUnsigned: bool,
+ ) -> &'a DIEnumerator;
+
+ pub fn LLVMRustDIBuilderCreateEnumerationType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Elements: &'a DIArray,
+ ClassType: &'a DIType,
+ IsScoped: bool,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateUnionType<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNumber: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ Elements: Option<&'a DIArray>,
+ RunTimeLang: c_uint,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DIType;
+
+ pub fn LLVMRustDIBuilderCreateVariantPart<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: &'a DIScope,
+ Name: *const c_char,
+ NameLen: size_t,
+ File: &'a DIFile,
+ LineNo: c_uint,
+ SizeInBits: u64,
+ AlignInBits: u32,
+ Flags: DIFlags,
+ Discriminator: Option<&'a DIDerivedType>,
+ Elements: &'a DIArray,
+ UniqueId: *const c_char,
+ UniqueIdLen: size_t,
+ ) -> &'a DIDerivedType;
+
+ pub fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr);
+
+ pub fn LLVMRustDIBuilderCreateTemplateTypeParameter<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ Ty: &'a DIType,
+ ) -> &'a DITemplateTypeParameter;
+
+ pub fn LLVMRustDIBuilderCreateNameSpace<'a>(
+ Builder: &DIBuilder<'a>,
+ Scope: Option<&'a DIScope>,
+ Name: *const c_char,
+ NameLen: size_t,
+ ExportSymbols: bool,
+ ) -> &'a DINameSpace;
+
+ pub fn LLVMRustDICompositeTypeReplaceArrays<'a>(
+ Builder: &DIBuilder<'a>,
+ CompositeType: &'a DIType,
+ Elements: Option<&'a DIArray>,
+ Params: Option<&'a DIArray>,
+ );
+
+ pub fn LLVMRustDIBuilderCreateDebugLocation<'a>(
+ Line: c_uint,
+ Column: c_uint,
+ Scope: &'a DIScope,
+ InlinedAt: Option<&'a DILocation>,
+ ) -> &'a DILocation;
+ pub fn LLVMRustDIBuilderCreateOpDeref() -> u64;
+ pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> u64;
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteTypeToString(Type: &Type, s: &RustString);
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteValueToString(value_ref: &Value, s: &RustString);
+
+ pub fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>;
+
+ pub fn LLVMRustFindAndCreatePass(Pass: *const c_char) -> Option<&'static mut Pass>;
+ pub fn LLVMRustCreateAddressSanitizerFunctionPass(Recover: bool) -> &'static mut Pass;
+ pub fn LLVMRustCreateModuleAddressSanitizerPass(Recover: bool) -> &'static mut Pass;
+ pub fn LLVMRustCreateMemorySanitizerPass(
+ TrackOrigins: c_int,
+ Recover: bool,
+ ) -> &'static mut Pass;
+ pub fn LLVMRustCreateThreadSanitizerPass() -> &'static mut Pass;
+ pub fn LLVMRustCreateHWAddressSanitizerPass(Recover: bool) -> &'static mut Pass;
+ pub fn LLVMRustAddPass(PM: &PassManager<'_>, Pass: &'static mut Pass);
+ pub fn LLVMRustAddLastExtensionPasses(
+ PMB: &PassManagerBuilder,
+ Passes: *const &'static mut Pass,
+ NumPasses: size_t,
+ );
+
+ pub fn LLVMRustHasFeature(T: &TargetMachine, s: *const c_char) -> bool;
+
+ pub fn LLVMRustPrintTargetCPUs(T: &TargetMachine);
+ pub fn LLVMRustGetTargetFeaturesCount(T: &TargetMachine) -> size_t;
+ pub fn LLVMRustGetTargetFeature(
+ T: &TargetMachine,
+ Index: size_t,
+ Feature: &mut *const c_char,
+ Desc: &mut *const c_char,
+ );
+
+ pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
+ pub fn LLVMRustCreateTargetMachine(
+ Triple: *const c_char,
+ CPU: *const c_char,
+ Features: *const c_char,
+ Abi: *const c_char,
+ Model: CodeModel,
+ Reloc: RelocModel,
+ Level: CodeGenOptLevel,
+ UseSoftFP: bool,
+ FunctionSections: bool,
+ DataSections: bool,
+ UniqueSectionNames: bool,
+ TrapUnreachable: bool,
+ Singlethread: bool,
+ AsmComments: bool,
+ EmitStackSizeSection: bool,
+ RelaxELFRelocations: bool,
+ UseInitArray: bool,
+ SplitDwarfFile: *const c_char,
+ ) -> Option<&'static mut TargetMachine>;
+ pub fn LLVMRustDisposeTargetMachine(T: &'static mut TargetMachine);
+ pub fn LLVMRustAddBuilderLibraryInfo<'a>(
+ PMB: &'a PassManagerBuilder,
+ M: &'a Module,
+ DisableSimplifyLibCalls: bool,
+ );
+ pub fn LLVMRustConfigurePassManagerBuilder(
+ PMB: &PassManagerBuilder,
+ OptLevel: CodeGenOptLevel,
+ MergeFunctions: bool,
+ SLPVectorize: bool,
+ LoopVectorize: bool,
+ PrepareForThinLTO: bool,
+ PGOGenPath: *const c_char,
+ PGOUsePath: *const c_char,
+ PGOSampleUsePath: *const c_char,
+ SizeLevel: c_int,
+ );
+ pub fn LLVMRustAddLibraryInfo<'a>(
+ PM: &PassManager<'a>,
+ M: &'a Module,
+ DisableSimplifyLibCalls: bool,
+ );
+ pub fn LLVMRustRunFunctionPassManager<'a>(PM: &PassManager<'a>, M: &'a Module);
+ pub fn LLVMRustWriteOutputFile<'a>(
+ T: &'a TargetMachine,
+ PM: &PassManager<'a>,
+ M: &'a Module,
+ Output: *const c_char,
+ DwoOutput: *const c_char,
+ FileType: FileType,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustOptimizeWithNewPassManager<'a>(
+ M: &'a Module,
+ TM: &'a TargetMachine,
+ OptLevel: PassBuilderOptLevel,
+ OptStage: OptStage,
+ NoPrepopulatePasses: bool,
+ VerifyIR: bool,
+ UseThinLTOBuffers: bool,
+ MergeFunctions: bool,
+ UnrollLoops: bool,
+ SLPVectorize: bool,
+ LoopVectorize: bool,
+ DisableSimplifyLibCalls: bool,
+ EmitLifetimeMarkers: bool,
+ SanitizerOptions: Option<&SanitizerOptions>,
+ PGOGenPath: *const c_char,
+ PGOUsePath: *const c_char,
+ InstrumentCoverage: bool,
+ InstrumentGCOV: bool,
+ PGOSampleUsePath: *const c_char,
+ DebugInfoForProfiling: bool,
+ llvm_selfprofiler: *mut c_void,
+ begin_callback: SelfProfileBeforePassCallback,
+ end_callback: SelfProfileAfterPassCallback,
+ ExtraPasses: *const c_char,
+ ExtraPassesLen: size_t,
+ LLVMPlugins: *const c_char,
+ LLVMPluginsLen: size_t,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustPrintModule(
+ M: &Module,
+ Output: *const c_char,
+ Demangle: extern "C" fn(*const c_char, size_t, *mut c_char, size_t) -> size_t,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustSetLLVMOptions(Argc: c_int, Argv: *const *const c_char);
+ pub fn LLVMRustPrintPasses();
+ pub fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char);
+ pub fn LLVMRustAddAlwaysInlinePass(P: &PassManagerBuilder, AddLifetimes: bool);
+ pub fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t);
+
+ pub fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>;
+ pub fn LLVMRustArchiveIteratorNew(AR: &Archive) -> &mut ArchiveIterator<'_>;
+ pub fn LLVMRustArchiveIteratorNext<'a>(
+ AIR: &ArchiveIterator<'a>,
+ ) -> Option<&'a mut ArchiveChild<'a>>;
+ pub fn LLVMRustArchiveChildName(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
+ pub fn LLVMRustArchiveChildData(ACR: &ArchiveChild<'_>, size: &mut size_t) -> *const c_char;
+ pub fn LLVMRustArchiveChildFree<'a>(ACR: &'a mut ArchiveChild<'a>);
+ pub fn LLVMRustArchiveIteratorFree<'a>(AIR: &'a mut ArchiveIterator<'a>);
+ pub fn LLVMRustDestroyArchive(AR: &'static mut Archive);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteTwineToString(T: &Twine, s: &RustString);
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustUnpackOptimizationDiagnostic<'a>(
+ DI: &'a DiagnosticInfo,
+ pass_name_out: &RustString,
+ function_out: &mut Option<&'a Value>,
+ loc_line_out: &mut c_uint,
+ loc_column_out: &mut c_uint,
+ loc_filename_out: &RustString,
+ message_out: &RustString,
+ );
+
+ pub fn LLVMRustUnpackInlineAsmDiagnostic<'a>(
+ DI: &'a DiagnosticInfo,
+ level_out: &mut DiagnosticLevel,
+ cookie_out: &mut c_uint,
+ message_out: &mut Option<&'a Twine>,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString);
+ pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind;
+
+ pub fn LLVMRustGetSMDiagnostic<'a>(
+ DI: &'a DiagnosticInfo,
+ cookie_out: &mut c_uint,
+ ) -> &'a SMDiagnostic;
+
+ pub fn LLVMRustSetInlineAsmDiagnosticHandler(
+ C: &Context,
+ H: InlineAsmDiagHandlerTy,
+ CX: *mut c_void,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustUnpackSMDiagnostic(
+ d: &SMDiagnostic,
+ message_out: &RustString,
+ buffer_out: &RustString,
+ level_out: &mut DiagnosticLevel,
+ loc_out: &mut c_uint,
+ ranges_out: *mut c_uint,
+ num_ranges: &mut usize,
+ ) -> bool;
+
+ pub fn LLVMRustWriteArchive(
+ Dst: *const c_char,
+ NumMembers: size_t,
+ Members: *const &RustArchiveMember<'_>,
+ WriteSymbtab: bool,
+ Kind: ArchiveKind,
+ ) -> LLVMRustResult;
+ pub fn LLVMRustArchiveMemberNew<'a>(
+ Filename: *const c_char,
+ Name: *const c_char,
+ Child: Option<&ArchiveChild<'a>>,
+ ) -> &'a mut RustArchiveMember<'a>;
+ pub fn LLVMRustArchiveMemberFree<'a>(Member: &'a mut RustArchiveMember<'a>);
+
+ pub fn LLVMRustWriteImportLibrary(
+ ImportName: *const c_char,
+ Path: *const c_char,
+ Exports: *const LLVMRustCOFFShortExport,
+ NumExports: usize,
+ Machine: u16,
+ MinGW: bool,
+ ) -> LLVMRustResult;
+
+ pub fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine);
+
+ pub fn LLVMRustBuildOperandBundleDef<'a>(
+ Name: *const c_char,
+ Inputs: *const &'a Value,
+ NumInputs: c_uint,
+ ) -> &'a mut OperandBundleDef<'a>;
+ pub fn LLVMRustFreeOperandBundleDef<'a>(Bundle: &'a mut OperandBundleDef<'a>);
+
+ pub fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock);
+
+ pub fn LLVMRustSetComdat<'a>(M: &'a Module, V: &'a Value, Name: *const c_char, NameLen: size_t);
+ pub fn LLVMRustSetModulePICLevel(M: &Module);
+ pub fn LLVMRustSetModulePIELevel(M: &Module);
+ pub fn LLVMRustSetModuleCodeModel(M: &Module, Model: CodeModel);
+ pub fn LLVMRustModuleBufferCreate(M: &Module) -> &'static mut ModuleBuffer;
+ pub fn LLVMRustModuleBufferPtr(p: &ModuleBuffer) -> *const u8;
+ pub fn LLVMRustModuleBufferLen(p: &ModuleBuffer) -> usize;
+ pub fn LLVMRustModuleBufferFree(p: &'static mut ModuleBuffer);
+ pub fn LLVMRustModuleCost(M: &Module) -> u64;
+
+ pub fn LLVMRustThinLTOBufferCreate(M: &Module, is_thin: bool) -> &'static mut ThinLTOBuffer;
+ pub fn LLVMRustThinLTOBufferFree(M: &'static mut ThinLTOBuffer);
+ pub fn LLVMRustThinLTOBufferPtr(M: &ThinLTOBuffer) -> *const c_char;
+ pub fn LLVMRustThinLTOBufferLen(M: &ThinLTOBuffer) -> size_t;
+ pub fn LLVMRustCreateThinLTOData(
+ Modules: *const ThinLTOModule,
+ NumModules: c_uint,
+ PreservedSymbols: *const *const c_char,
+ PreservedSymbolsLen: c_uint,
+ ) -> Option<&'static mut ThinLTOData>;
+ pub fn LLVMRustPrepareThinLTORename(
+ Data: &ThinLTOData,
+ Module: &Module,
+ Target: &TargetMachine,
+ ) -> bool;
+ pub fn LLVMRustPrepareThinLTOResolveWeak(Data: &ThinLTOData, Module: &Module) -> bool;
+ pub fn LLVMRustPrepareThinLTOInternalize(Data: &ThinLTOData, Module: &Module) -> bool;
+ pub fn LLVMRustPrepareThinLTOImport(
+ Data: &ThinLTOData,
+ Module: &Module,
+ Target: &TargetMachine,
+ ) -> bool;
+ pub fn LLVMRustFreeThinLTOData(Data: &'static mut ThinLTOData);
+ pub fn LLVMRustParseBitcodeForLTO(
+ Context: &Context,
+ Data: *const u8,
+ len: usize,
+ Identifier: *const c_char,
+ ) -> Option<&Module>;
+ pub fn LLVMRustGetBitcodeSliceFromObjectData(
+ Data: *const u8,
+ len: usize,
+ out_len: &mut usize,
+ ) -> *const u8;
+ pub fn LLVMRustThinLTOGetDICompileUnit(
+ M: &Module,
+ CU1: &mut *mut c_void,
+ CU2: &mut *mut c_void,
+ );
+ pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void);
+
+ pub fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>;
+ pub fn LLVMRustLinkerAdd(
+ linker: &Linker<'_>,
+ bytecode: *const c_char,
+ bytecode_len: usize,
+ ) -> bool;
+ pub fn LLVMRustLinkerFree<'a>(linker: &'a mut Linker<'a>);
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustComputeLTOCacheKey(
+ key_out: &RustString,
+ mod_id: *const c_char,
+ data: &ThinLTOData,
+ );
+
+ pub fn LLVMRustContextGetDiagnosticHandler(Context: &Context) -> Option<&DiagnosticHandler>;
+ pub fn LLVMRustContextSetDiagnosticHandler(
+ context: &Context,
+ diagnostic_handler: Option<&DiagnosticHandler>,
+ );
+ pub fn LLVMRustContextConfigureDiagnosticHandler(
+ context: &Context,
+ diagnostic_handler_callback: DiagnosticHandlerTy,
+ diagnostic_handler_context: *mut c_void,
+ remark_all_passes: bool,
+ remark_passes: *const *const c_char,
+ remark_passes_len: usize,
+ );
+
+ #[allow(improper_ctypes)]
+ pub fn LLVMRustGetMangledName(V: &Value, out: &RustString);
+
+ pub fn LLVMRustGetElementTypeArgIndex(CallSite: &Value) -> i32;
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
new file mode 100644
index 000000000..6602a4ab8
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs
@@ -0,0 +1,318 @@
+#![allow(non_snake_case)]
+
+pub use self::AtomicRmwBinOp::*;
+pub use self::CallConv::*;
+pub use self::CodeGenOptSize::*;
+pub use self::IntPredicate::*;
+pub use self::Linkage::*;
+pub use self::MetadataType::*;
+pub use self::RealPredicate::*;
+
+use libc::c_uint;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_llvm::RustString;
+use std::cell::RefCell;
+use std::ffi::{CStr, CString};
+use std::str::FromStr;
+use std::string::FromUtf8Error;
+
+pub mod archive_ro;
+pub mod diagnostic;
+mod ffi;
+
+pub use self::ffi::*;
+
+impl LLVMRustResult {
+ pub fn into_result(self) -> Result<(), ()> {
+ match self {
+ LLVMRustResult::Success => Ok(()),
+ LLVMRustResult::Failure => Err(()),
+ }
+ }
+}
+
+pub fn AddFunctionAttributes<'ll>(llfn: &'ll Value, idx: AttributePlace, attrs: &[&'ll Attribute]) {
+ unsafe {
+ LLVMRustAddFunctionAttributes(llfn, idx.as_uint(), attrs.as_ptr(), attrs.len());
+ }
+}
+
+pub fn AddCallSiteAttributes<'ll>(
+ callsite: &'ll Value,
+ idx: AttributePlace,
+ attrs: &[&'ll Attribute],
+) {
+ unsafe {
+ LLVMRustAddCallSiteAttributes(callsite, idx.as_uint(), attrs.as_ptr(), attrs.len());
+ }
+}
+
+pub fn CreateAttrStringValue<'ll>(llcx: &'ll Context, attr: &str, value: &str) -> &'ll Attribute {
+ unsafe {
+ LLVMCreateStringAttribute(
+ llcx,
+ attr.as_ptr().cast(),
+ attr.len().try_into().unwrap(),
+ value.as_ptr().cast(),
+ value.len().try_into().unwrap(),
+ )
+ }
+}
+
+pub fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &str) -> &'ll Attribute {
+ unsafe {
+ LLVMCreateStringAttribute(
+ llcx,
+ attr.as_ptr().cast(),
+ attr.len().try_into().unwrap(),
+ std::ptr::null(),
+ 0,
+ )
+ }
+}
+
+pub fn CreateAlignmentAttr(llcx: &Context, bytes: u64) -> &Attribute {
+ unsafe { LLVMRustCreateAlignmentAttr(llcx, bytes) }
+}
+
+pub fn CreateDereferenceableAttr(llcx: &Context, bytes: u64) -> &Attribute {
+ unsafe { LLVMRustCreateDereferenceableAttr(llcx, bytes) }
+}
+
+pub fn CreateDereferenceableOrNullAttr(llcx: &Context, bytes: u64) -> &Attribute {
+ unsafe { LLVMRustCreateDereferenceableOrNullAttr(llcx, bytes) }
+}
+
+pub fn CreateByValAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
+ unsafe { LLVMRustCreateByValAttr(llcx, ty) }
+}
+
+pub fn CreateStructRetAttr<'ll>(llcx: &'ll Context, ty: &'ll Type) -> &'ll Attribute {
+ unsafe { LLVMRustCreateStructRetAttr(llcx, ty) }
+}
+
+pub fn CreateUWTableAttr(llcx: &Context, async_: bool) -> &Attribute {
+ unsafe { LLVMRustCreateUWTableAttr(llcx, async_) }
+}
+
+pub fn CreateAllocSizeAttr(llcx: &Context, size_arg: u32) -> &Attribute {
+ unsafe { LLVMRustCreateAllocSizeAttr(llcx, size_arg) }
+}
+
+pub fn CreateAllocKindAttr(llcx: &Context, kind_arg: AllocKindFlags) -> &Attribute {
+ unsafe { LLVMRustCreateAllocKindAttr(llcx, kind_arg.bits()) }
+}
+
+#[derive(Copy, Clone)]
+pub enum AttributePlace {
+ ReturnValue,
+ Argument(u32),
+ Function,
+}
+
+impl AttributePlace {
+ pub fn as_uint(self) -> c_uint {
+ match self {
+ AttributePlace::ReturnValue => 0,
+ AttributePlace::Argument(i) => 1 + i,
+ AttributePlace::Function => !0,
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq)]
+#[repr(C)]
+pub enum CodeGenOptSize {
+ CodeGenOptSizeNone = 0,
+ CodeGenOptSizeDefault = 1,
+ CodeGenOptSizeAggressive = 2,
+}
+
+impl FromStr for ArchiveKind {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "gnu" => Ok(ArchiveKind::K_GNU),
+ "bsd" => Ok(ArchiveKind::K_BSD),
+ "darwin" => Ok(ArchiveKind::K_DARWIN),
+ "coff" => Ok(ArchiveKind::K_COFF),
+ _ => Err(()),
+ }
+ }
+}
+
+pub fn SetInstructionCallConv(instr: &Value, cc: CallConv) {
+ unsafe {
+ LLVMSetInstructionCallConv(instr, cc as c_uint);
+ }
+}
+pub fn SetFunctionCallConv(fn_: &Value, cc: CallConv) {
+ unsafe {
+ LLVMSetFunctionCallConv(fn_, cc as c_uint);
+ }
+}
+
+// Externally visible symbols that might appear in multiple codegen units need to appear in
+// their own comdat section so that the duplicates can be discarded at link time. This can for
+// example happen for generics when using multiple codegen units. This function simply uses the
+// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
+// function.
+// For more details on COMDAT sections see e.g., https://www.airs.com/blog/archives/52
+pub fn SetUniqueComdat(llmod: &Module, val: &Value) {
+ unsafe {
+ let name = get_value_name(val);
+ LLVMRustSetComdat(llmod, val, name.as_ptr().cast(), name.len());
+ }
+}
+
+pub fn SetUnnamedAddress(global: &Value, unnamed: UnnamedAddr) {
+ unsafe {
+ LLVMSetUnnamedAddress(global, unnamed);
+ }
+}
+
+pub fn set_thread_local_mode(global: &Value, mode: ThreadLocalMode) {
+ unsafe {
+ LLVMSetThreadLocalMode(global, mode);
+ }
+}
+
+impl AttributeKind {
+ /// Create an LLVM Attribute with no associated value.
+ pub fn create_attr(self, llcx: &Context) -> &Attribute {
+ unsafe { LLVMRustCreateAttrNoValue(llcx, self) }
+ }
+}
+
+pub fn set_section(llglobal: &Value, section_name: &str) {
+ let section_name_cstr = CString::new(section_name).expect("unexpected CString error");
+ unsafe {
+ LLVMSetSection(llglobal, section_name_cstr.as_ptr());
+ }
+}
+
+pub fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name: &str) -> &'a Value {
+ let name_cstr = CString::new(name).expect("unexpected CString error");
+ unsafe { LLVMAddGlobal(llmod, ty, name_cstr.as_ptr()) }
+}
+
+pub fn set_initializer(llglobal: &Value, constant_val: &Value) {
+ unsafe {
+ LLVMSetInitializer(llglobal, constant_val);
+ }
+}
+
+pub fn set_global_constant(llglobal: &Value, is_constant: bool) {
+ unsafe {
+ LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False });
+ }
+}
+
+pub fn set_linkage(llglobal: &Value, linkage: Linkage) {
+ unsafe {
+ LLVMRustSetLinkage(llglobal, linkage);
+ }
+}
+
+pub fn set_visibility(llglobal: &Value, visibility: Visibility) {
+ unsafe {
+ LLVMRustSetVisibility(llglobal, visibility);
+ }
+}
+
+pub fn set_alignment(llglobal: &Value, bytes: usize) {
+ unsafe {
+ ffi::LLVMSetAlignment(llglobal, bytes as c_uint);
+ }
+}
+
+pub fn set_comdat(llmod: &Module, llglobal: &Value, name: &str) {
+ unsafe {
+ LLVMRustSetComdat(llmod, llglobal, name.as_ptr().cast(), name.len());
+ }
+}
+
+/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun.
+pub fn get_param(llfn: &Value, index: c_uint) -> &Value {
+ unsafe {
+ assert!(
+ index < LLVMCountParams(llfn),
+ "out of bounds argument access: {} out of {} arguments",
+ index,
+ LLVMCountParams(llfn)
+ );
+ LLVMGetParam(llfn, index)
+ }
+}
+
+/// Safe wrapper for `LLVMGetValueName2` into a byte slice
+pub fn get_value_name(value: &Value) -> &[u8] {
+ unsafe {
+ let mut len = 0;
+ let data = LLVMGetValueName2(value, &mut len);
+ std::slice::from_raw_parts(data.cast(), len)
+ }
+}
+
+/// Safe wrapper for `LLVMSetValueName2` from a byte slice
+pub fn set_value_name(value: &Value, name: &[u8]) {
+ unsafe {
+ let data = name.as_ptr().cast();
+ LLVMSetValueName2(value, data, name.len());
+ }
+}
+
+pub fn build_string(f: impl FnOnce(&RustString)) -> Result<String, FromUtf8Error> {
+ let sr = RustString { bytes: RefCell::new(Vec::new()) };
+ f(&sr);
+ String::from_utf8(sr.bytes.into_inner())
+}
+
+pub fn build_byte_buffer(f: impl FnOnce(&RustString)) -> Vec<u8> {
+ let sr = RustString { bytes: RefCell::new(Vec::new()) };
+ f(&sr);
+ sr.bytes.into_inner()
+}
+
+pub fn twine_to_string(tr: &Twine) -> String {
+ unsafe {
+ build_string(|s| LLVMRustWriteTwineToString(tr, s)).expect("got a non-UTF8 Twine from LLVM")
+ }
+}
+
+pub fn last_error() -> Option<String> {
+ unsafe {
+ let cstr = LLVMRustGetLastError();
+ if cstr.is_null() {
+ None
+ } else {
+ let err = CStr::from_ptr(cstr).to_bytes();
+ let err = String::from_utf8_lossy(err).to_string();
+ libc::free(cstr as *mut _);
+ Some(err)
+ }
+ }
+}
+
+pub struct OperandBundleDef<'a> {
+ pub raw: &'a mut ffi::OperandBundleDef<'a>,
+}
+
+impl<'a> OperandBundleDef<'a> {
+ pub fn new(name: &str, vals: &[&'a Value]) -> Self {
+ let name = SmallCStr::new(name);
+ let def = unsafe {
+ LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)
+ };
+ OperandBundleDef { raw: def }
+ }
+}
+
+impl Drop for OperandBundleDef<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _));
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
new file mode 100644
index 000000000..a0a640473
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -0,0 +1,562 @@
+use crate::back::write::create_informational_target_machine;
+use crate::{llvm, llvm_util};
+use libc::c_int;
+use libloading::Library;
+use rustc_codegen_ssa::target_features::{
+ supported_target_features, tied_target_features, RUSTC_SPECIFIC_FEATURES,
+};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_fs_util::path_to_c_string;
+use rustc_middle::bug;
+use rustc_session::config::PrintRequest;
+use rustc_session::Session;
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::{MergeFunctions, PanicStrategy};
+use smallvec::{smallvec, SmallVec};
+use std::ffi::{CStr, CString};
+use tracing::debug;
+
+use std::mem;
+use std::path::Path;
+use std::ptr;
+use std::slice;
+use std::str;
+use std::sync::Once;
+
+static INIT: Once = Once::new();
+
+pub(crate) fn init(sess: &Session) {
+ unsafe {
+ // Before we touch LLVM, make sure that multithreading is enabled.
+ if llvm::LLVMIsMultithreaded() != 1 {
+ bug!("LLVM compiled without support for threads");
+ }
+ INIT.call_once(|| {
+ configure_llvm(sess);
+ });
+ }
+}
+
+fn require_inited() {
+ if !INIT.is_completed() {
+ bug!("LLVM is not initialized");
+ }
+}
+
+unsafe fn configure_llvm(sess: &Session) {
+ let n_args = sess.opts.cg.llvm_args.len() + sess.target.llvm_args.len();
+ let mut llvm_c_strs = Vec::with_capacity(n_args + 1);
+ let mut llvm_args = Vec::with_capacity(n_args + 1);
+
+ llvm::LLVMRustInstallFatalErrorHandler();
+ // On Windows, an LLVM assertion will open an Abort/Retry/Ignore dialog
+ // box for the purpose of launching a debugger. However, on CI this will
+ // cause it to hang until it times out, which can take several hours.
+ if std::env::var_os("CI").is_some() {
+ llvm::LLVMRustDisableSystemDialogsOnCrash();
+ }
+
+ fn llvm_arg_to_arg_name(full_arg: &str) -> &str {
+ full_arg.trim().split(|c: char| c == '=' || c.is_whitespace()).next().unwrap_or("")
+ }
+
+ let cg_opts = sess.opts.cg.llvm_args.iter().map(AsRef::as_ref);
+ let tg_opts = sess.target.llvm_args.iter().map(AsRef::as_ref);
+ let sess_args = cg_opts.chain(tg_opts);
+
+ let user_specified_args: FxHashSet<_> =
+ sess_args.clone().map(|s| llvm_arg_to_arg_name(s)).filter(|s| !s.is_empty()).collect();
+
+ {
+ // This adds the given argument to LLVM. Unless `force` is true
+ // user specified arguments are *not* overridden.
+ let mut add = |arg: &str, force: bool| {
+ if force || !user_specified_args.contains(llvm_arg_to_arg_name(arg)) {
+ let s = CString::new(arg).unwrap();
+ llvm_args.push(s.as_ptr());
+ llvm_c_strs.push(s);
+ }
+ };
+ // Set the llvm "program name" to make usage and invalid argument messages more clear.
+ add("rustc -Cllvm-args=\"...\" with", true);
+ if sess.time_llvm_passes() {
+ add("-time-passes", false);
+ }
+ if sess.print_llvm_passes() {
+ add("-debug-pass=Structure", false);
+ }
+ if sess.target.generate_arange_section
+ && !sess.opts.unstable_opts.no_generate_arange_section
+ {
+ add("-generate-arange-section", false);
+ }
+
+ // Disable the machine outliner by default in LLVM versions 11 and LLVM
+ // version 12, where it leads to miscompilation.
+ //
+ // Ref:
+ // - https://github.com/rust-lang/rust/issues/85351
+ // - https://reviews.llvm.org/D103167
+ if llvm_util::get_version() < (13, 0, 0) {
+ add("-enable-machine-outliner=never", false);
+ }
+
+ match sess.opts.unstable_opts.merge_functions.unwrap_or(sess.target.merge_functions) {
+ MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
+ MergeFunctions::Aliases => {
+ add("-mergefunc-use-aliases", false);
+ }
+ }
+
+ if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind {
+ add("-enable-emscripten-cxx-exceptions", false);
+ }
+
+ // HACK(eddyb) LLVM inserts `llvm.assume` calls to preserve align attributes
+ // during inlining. Unfortunately these may block other optimizations.
+ add("-preserve-alignment-assumptions-during-inlining=false", false);
+
+ // Use non-zero `import-instr-limit` multiplier for cold callsites.
+ add("-import-cold-multiplier=0.1", false);
+
+ for arg in sess_args {
+ add(&(*arg), true);
+ }
+ }
+
+ if sess.opts.unstable_opts.llvm_time_trace {
+ llvm::LLVMTimeTraceProfilerInitialize();
+ }
+
+ llvm::LLVMInitializePasses();
+
+ // Use the legacy plugin registration if we don't use the new pass manager
+ if !should_use_new_llvm_pass_manager(
+ &sess.opts.unstable_opts.new_llvm_pass_manager,
+ &sess.target.arch,
+ ) {
+ // Register LLVM plugins by loading them into the compiler process.
+ for plugin in &sess.opts.unstable_opts.llvm_plugins {
+ let lib = Library::new(plugin).unwrap_or_else(|e| bug!("couldn't load plugin: {}", e));
+ debug!("LLVM plugin loaded successfully {:?} ({})", lib, plugin);
+
+ // Intentionally leak the dynamic library. We can't ever unload it
+ // since the library can make things that will live arbitrarily long.
+ mem::forget(lib);
+ }
+ }
+
+ rustc_llvm::initialize_available_targets();
+
+ llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
+}
+
+pub fn time_trace_profiler_finish(file_name: &Path) {
+ unsafe {
+ let file_name = path_to_c_string(file_name);
+ llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr());
+ }
+}
+
+// WARNING: the features after applying `to_llvm_features` must be known
+// to LLVM or the feature detection code will walk past the end of the feature
+// array, leading to crashes.
+//
+// To find a list of LLVM's names, check llvm-project/llvm/include/llvm/Support/*TargetParser.def
+// where the * matches the architecture's name
+// Beware to not use the llvm github project for this, but check the git submodule
+// found in src/llvm-project
+// Though note that Rust can also be build with an external precompiled version of LLVM
+// which might lead to failures if the oldest tested / supported LLVM version
+// doesn't yet support the relevant intrinsics
+pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]> {
+ let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch };
+ match (arch, s) {
+ ("x86", "sse4.2") => {
+ if get_version() >= (14, 0, 0) {
+ smallvec!["sse4.2", "crc32"]
+ } else {
+ smallvec!["sse4.2"]
+ }
+ }
+ ("x86", "pclmulqdq") => smallvec!["pclmul"],
+ ("x86", "rdrand") => smallvec!["rdrnd"],
+ ("x86", "bmi1") => smallvec!["bmi"],
+ ("x86", "cmpxchg16b") => smallvec!["cx16"],
+ ("x86", "avx512vaes") => smallvec!["vaes"],
+ ("x86", "avx512gfni") => smallvec!["gfni"],
+ ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"],
+ ("aarch64", "rcpc2") => smallvec!["rcpc-immo"],
+ ("aarch64", "dpb") => smallvec!["ccpp"],
+ ("aarch64", "dpb2") => smallvec!["ccdp"],
+ ("aarch64", "frintts") => smallvec!["fptoint"],
+ ("aarch64", "fcma") => smallvec!["complxnum"],
+ ("aarch64", "pmuv3") => smallvec!["perfmon"],
+ ("aarch64", "paca") => smallvec!["pauth"],
+ ("aarch64", "pacg") => smallvec!["pauth"],
+ // Rust ties fp and neon together. In LLVM neon implicitly enables fp,
+ // but we manually enable neon when a feature only implicitly enables fp
+ ("aarch64", "f32mm") => smallvec!["f32mm", "neon"],
+ ("aarch64", "f64mm") => smallvec!["f64mm", "neon"],
+ ("aarch64", "fhm") => smallvec!["fp16fml", "neon"],
+ ("aarch64", "fp16") => smallvec!["fullfp16", "neon"],
+ ("aarch64", "jsconv") => smallvec!["jsconv", "neon"],
+ ("aarch64", "sve") => smallvec!["sve", "neon"],
+ ("aarch64", "sve2") => smallvec!["sve2", "neon"],
+ ("aarch64", "sve2-aes") => smallvec!["sve2-aes", "neon"],
+ ("aarch64", "sve2-sm4") => smallvec!["sve2-sm4", "neon"],
+ ("aarch64", "sve2-sha3") => smallvec!["sve2-sha3", "neon"],
+ ("aarch64", "sve2-bitperm") => smallvec!["sve2-bitperm", "neon"],
+ (_, s) => smallvec![s],
+ }
+}
+
+// Given a map from target_features to whether they are enabled or disabled,
+// ensure only valid combinations are allowed.
+pub fn check_tied_features(
+ sess: &Session,
+ features: &FxHashMap<&str, bool>,
+) -> Option<&'static [&'static str]> {
+ if !features.is_empty() {
+ for tied in tied_target_features(sess) {
+ // Tied features must be set to the same value, or not set at all
+ let mut tied_iter = tied.iter();
+ let enabled = features.get(tied_iter.next().unwrap());
+ if tied_iter.any(|f| enabled != features.get(f)) {
+ return Some(tied);
+ }
+ }
+ }
+ return None;
+}
+
+// Used to generate cfg variables and apply features
+// Must express features in the way Rust understands them
+pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
+ let target_machine = create_informational_target_machine(sess);
+ let mut features: Vec<Symbol> = supported_target_features(sess)
+ .iter()
+ .filter_map(|&(feature, gate)| {
+ if sess.is_nightly_build() || allow_unstable || gate.is_none() {
+ Some(feature)
+ } else {
+ None
+ }
+ })
+ .filter(|feature| {
+ // check that all features in a given smallvec are enabled
+ for llvm_feature in to_llvm_features(sess, feature) {
+ let cstr = SmallCStr::new(llvm_feature);
+ if !unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } {
+ return false;
+ }
+ }
+ true
+ })
+ .map(|feature| Symbol::intern(feature))
+ .collect();
+
+ // LLVM 14 changed the ABI for i128 arguments to __float/__fix builtins on Win64
+ // (see https://reviews.llvm.org/D110413). This unstable target feature is intended for use
+ // by compiler-builtins, to export the builtins with the expected, LLVM-version-dependent ABI.
+ // The target feature can be dropped once we no longer support older LLVM versions.
+ if sess.is_nightly_build() && get_version() >= (14, 0, 0) {
+ features.push(Symbol::intern("llvm14-builtins-abi"));
+ }
+ features
+}
+
+pub fn print_version() {
+ let (major, minor, patch) = get_version();
+ println!("LLVM version: {}.{}.{}", major, minor, patch);
+}
+
+pub fn get_version() -> (u32, u32, u32) {
+ // Can be called without initializing LLVM
+ unsafe {
+ (llvm::LLVMRustVersionMajor(), llvm::LLVMRustVersionMinor(), llvm::LLVMRustVersionPatch())
+ }
+}
+
+pub fn print_passes() {
+ // Can be called without initializing LLVM
+ unsafe {
+ llvm::LLVMRustPrintPasses();
+ }
+}
+
+fn llvm_target_features(tm: &llvm::TargetMachine) -> Vec<(&str, &str)> {
+ let len = unsafe { llvm::LLVMRustGetTargetFeaturesCount(tm) };
+ let mut ret = Vec::with_capacity(len);
+ for i in 0..len {
+ unsafe {
+ let mut feature = ptr::null();
+ let mut desc = ptr::null();
+ llvm::LLVMRustGetTargetFeature(tm, i, &mut feature, &mut desc);
+ if feature.is_null() || desc.is_null() {
+ bug!("LLVM returned a `null` target feature string");
+ }
+ let feature = CStr::from_ptr(feature).to_str().unwrap_or_else(|e| {
+ bug!("LLVM returned a non-utf8 feature string: {}", e);
+ });
+ let desc = CStr::from_ptr(desc).to_str().unwrap_or_else(|e| {
+ bug!("LLVM returned a non-utf8 feature string: {}", e);
+ });
+ ret.push((feature, desc));
+ }
+ }
+ ret
+}
+
+fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) {
+ let mut target_features = llvm_target_features(tm);
+ let mut rustc_target_features = supported_target_features(sess)
+ .iter()
+ .filter_map(|(feature, _gate)| {
+ for llvm_feature in to_llvm_features(sess, *feature) {
+ // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings.
+ match target_features.binary_search_by_key(&llvm_feature, |(f, _d)| f).ok().map(
+ |index| {
+ let (_f, desc) = target_features.remove(index);
+ (*feature, desc)
+ },
+ ) {
+ Some(v) => return Some(v),
+ None => {}
+ }
+ }
+ None
+ })
+ .collect::<Vec<_>>();
+ rustc_target_features.extend_from_slice(&[(
+ "crt-static",
+ "Enables C Run-time Libraries to be statically linked",
+ )]);
+ let max_feature_len = target_features
+ .iter()
+ .chain(rustc_target_features.iter())
+ .map(|(feature, _desc)| feature.len())
+ .max()
+ .unwrap_or(0);
+
+ println!("Features supported by rustc for this target:");
+ for (feature, desc) in &rustc_target_features {
+ println!(" {1:0$} - {2}.", max_feature_len, feature, desc);
+ }
+ println!("\nCode-generation features supported by LLVM for this target:");
+ for (feature, desc) in &target_features {
+ println!(" {1:0$} - {2}.", max_feature_len, feature, desc);
+ }
+ if target_features.is_empty() {
+ println!(" Target features listing is not supported by this LLVM version.");
+ }
+ println!("\nUse +feature to enable a feature, or -feature to disable it.");
+ println!("For example, rustc -C target-cpu=mycpu -C target-feature=+feature1,-feature2\n");
+ println!("Code-generation features cannot be used in cfg or #[target_feature],");
+ println!("and may be renamed or removed in a future version of LLVM or rustc.\n");
+}
+
+pub(crate) fn print(req: PrintRequest, sess: &Session) {
+ require_inited();
+ let tm = create_informational_target_machine(sess);
+ match req {
+ PrintRequest::TargetCPUs => unsafe { llvm::LLVMRustPrintTargetCPUs(tm) },
+ PrintRequest::TargetFeatures => print_target_features(sess, tm),
+ _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
+ }
+}
+
+fn handle_native(name: &str) -> &str {
+ if name != "native" {
+ return name;
+ }
+
+ unsafe {
+ let mut len = 0;
+ let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
+ str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
+ }
+}
+
+pub fn target_cpu(sess: &Session) -> &str {
+ match sess.opts.cg.target_cpu {
+ Some(ref name) => handle_native(name),
+ None => handle_native(sess.target.cpu.as_ref()),
+ }
+}
+
+/// The list of LLVM features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
+/// `--target` and similar).
+pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<String> {
+ // Features that come earlier are overridden by conflicting features later in the string.
+ // Typically we'll want more explicit settings to override the implicit ones, so:
+ //
+ // * Features from -Ctarget-cpu=*; are overridden by [^1]
+ // * Features implied by --target; are overridden by
+ // * Features from -Ctarget-feature; are overridden by
+ // * function specific features.
+ //
+ // [^1]: target-cpu=native is handled here, other target-cpu values are handled implicitly
+ // through LLVM TargetMachine implementation.
+ //
+ // FIXME(nagisa): it isn't clear what's the best interaction between features implied by
+ // `-Ctarget-cpu` and `--target` are. On one hand, you'd expect CLI arguments to always
+ // override anything that's implicit, so e.g. when there's no `--target` flag, features implied
+ // the host target are overridden by `-Ctarget-cpu=*`. On the other hand, what about when both
+ // `--target` and `-Ctarget-cpu=*` are specified? Both then imply some target features and both
+ // flags are specified by the user on the CLI. It isn't as clear-cut which order of precedence
+ // should be taken in cases like these.
+ let mut features = vec![];
+
+ // -Ctarget-cpu=native
+ match sess.opts.cg.target_cpu {
+ Some(ref s) if s == "native" => {
+ let features_string = unsafe {
+ let ptr = llvm::LLVMGetHostCPUFeatures();
+ let features_string = if !ptr.is_null() {
+ CStr::from_ptr(ptr)
+ .to_str()
+ .unwrap_or_else(|e| {
+ bug!("LLVM returned a non-utf8 features string: {}", e);
+ })
+ .to_owned()
+ } else {
+ bug!("could not allocate host CPU features, LLVM returned a `null` string");
+ };
+
+ llvm::LLVMDisposeMessage(ptr);
+
+ features_string
+ };
+ features.extend(features_string.split(',').map(String::from));
+ }
+ Some(_) | None => {}
+ };
+
+ // Features implied by an implicit or explicit `--target`.
+ features.extend(
+ sess.target
+ .features
+ .split(',')
+ .filter(|v| !v.is_empty() && backend_feature_name(v).is_some())
+ .map(String::from),
+ );
+
+ // -Ctarget-features
+ let supported_features = supported_target_features(sess);
+ let mut featsmap = FxHashMap::default();
+ let feats = sess
+ .opts
+ .cg
+ .target_feature
+ .split(',')
+ .filter_map(|s| {
+ let enable_disable = match s.chars().next() {
+ None => return None,
+ Some(c @ '+' | c @ '-') => c,
+ Some(_) => {
+ if diagnostics {
+ let mut diag = sess.struct_warn(&format!(
+ "unknown feature specified for `-Ctarget-feature`: `{}`",
+ s
+ ));
+ diag.note("features must begin with a `+` to enable or `-` to disable it");
+ diag.emit();
+ }
+ return None;
+ }
+ };
+
+ let feature = backend_feature_name(s)?;
+ // Warn against use of LLVM specific feature names on the CLI.
+ if diagnostics && !supported_features.iter().any(|&(v, _)| v == feature) {
+ let rust_feature = supported_features.iter().find_map(|&(rust_feature, _)| {
+ let llvm_features = to_llvm_features(sess, rust_feature);
+ if llvm_features.contains(&feature) && !llvm_features.contains(&rust_feature) {
+ Some(rust_feature)
+ } else {
+ None
+ }
+ });
+ let mut diag = sess.struct_warn(&format!(
+ "unknown feature specified for `-Ctarget-feature`: `{}`",
+ feature
+ ));
+ diag.note("it is still passed through to the codegen backend");
+ if let Some(rust_feature) = rust_feature {
+ diag.help(&format!("you might have meant: `{}`", rust_feature));
+ } else {
+ diag.note("consider filing a feature request");
+ }
+ diag.emit();
+ }
+
+ if diagnostics {
+ // FIXME(nagisa): figure out how to not allocate a full hashset here.
+ featsmap.insert(feature, enable_disable == '+');
+ }
+
+ // rustc-specific features do not get passed down to LLVM…
+ if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+ return None;
+ }
+ // ... otherwise though we run through `to_llvm_features` when
+ // passing requests down to LLVM. This means that all in-language
+ // features also work on the command line instead of having two
+ // different names when the LLVM name and the Rust name differ.
+ Some(
+ to_llvm_features(sess, feature)
+ .into_iter()
+ .map(move |f| format!("{}{}", enable_disable, f)),
+ )
+ })
+ .flatten();
+ features.extend(feats);
+
+ if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) {
+ sess.err(&format!(
+ "target features {} must all be enabled or disabled together",
+ f.join(", ")
+ ));
+ }
+
+ features
+}
+
+/// Returns a feature name for the given `+feature` or `-feature` string.
+///
+/// Only allows features that are backend specific (i.e. not [`RUSTC_SPECIFIC_FEATURES`].)
+fn backend_feature_name(s: &str) -> Option<&str> {
+ // features must start with a `+` or `-`.
+ let feature = s.strip_prefix(&['+', '-'][..]).unwrap_or_else(|| {
+ bug!("target feature `{}` must begin with a `+` or `-`", s);
+ });
+ // Rustc-specific feature requests like `+crt-static` or `-crt-static`
+ // are not passed down to LLVM.
+ if RUSTC_SPECIFIC_FEATURES.contains(&feature) {
+ return None;
+ }
+ Some(feature)
+}
+
+pub fn tune_cpu(sess: &Session) -> Option<&str> {
+ let name = sess.opts.unstable_opts.tune_cpu.as_ref()?;
+ Some(handle_native(name))
+}
+
+pub(crate) fn should_use_new_llvm_pass_manager(user_opt: &Option<bool>, target_arch: &str) -> bool {
+ // The new pass manager is enabled by default for LLVM >= 13.
+ // This matches Clang, which also enables it since Clang 13.
+
+ // Since LLVM 15, the legacy pass manager is no longer supported.
+ if llvm_util::get_version() >= (15, 0, 0) {
+ return true;
+ }
+
+ // There are some perf issues with the new pass manager when targeting
+ // s390x with LLVM 13, so enable the new pass manager only with LLVM 14.
+ // See https://github.com/rust-lang/rust/issues/89609.
+ let min_version = if target_arch == "s390x" { 14 } else { 13 };
+ user_opt.unwrap_or_else(|| llvm_util::get_version() >= (min_version, 0, 0))
+}
diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs
new file mode 100644
index 000000000..6e9428485
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/mono_item.rs
@@ -0,0 +1,150 @@
+use crate::attributes;
+use crate::base;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::type_of::LayoutLlvmExt;
+use rustc_codegen_ssa::traits::*;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+pub use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_middle::ty::{self, Instance, TypeVisitable};
+use rustc_session::config::CrateType;
+use rustc_target::spec::RelocModel;
+use tracing::debug;
+
+impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> {
+ fn predefine_static(
+ &self,
+ def_id: DefId,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ ) {
+ let instance = Instance::mono(self.tcx, def_id);
+ let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
+ let llty = self.layout_of(ty).llvm_type(self);
+
+ let g = self.define_global(symbol_name, llty).unwrap_or_else(|| {
+ self.sess().span_fatal(
+ self.tcx.def_span(def_id),
+ &format!("symbol `{}` is already defined", symbol_name),
+ )
+ });
+
+ unsafe {
+ llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage));
+ llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility));
+ if self.should_assume_dso_local(g, false) {
+ llvm::LLVMRustSetDSOLocal(g, true);
+ }
+ }
+
+ self.instances.borrow_mut().insert(instance, g);
+ }
+
+ fn predefine_fn(
+ &self,
+ instance: Instance<'tcx>,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ ) {
+ assert!(!instance.substs.needs_infer());
+
+ let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+ let lldecl = self.declare_fn(symbol_name, fn_abi);
+ unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
+ let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
+ base::set_link_section(lldecl, attrs);
+ if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
+ llvm::SetUniqueComdat(self.llmod, lldecl);
+ }
+
+ // If we're compiling the compiler-builtins crate, e.g., the equivalent of
+ // compiler-rt, then we want to implicitly compile everything with hidden
+ // visibility as we're going to link this object all over the place but
+ // don't want the symbols to get exported.
+ if linkage != Linkage::Internal
+ && linkage != Linkage::Private
+ && self.tcx.is_compiler_builtins(LOCAL_CRATE)
+ {
+ unsafe {
+ llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden);
+ }
+ } else {
+ unsafe {
+ llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility));
+ }
+ }
+
+ debug!("predefine_fn: instance = {:?}", instance);
+
+ attributes::from_fn_attrs(self, lldecl, instance);
+
+ unsafe {
+ if self.should_assume_dso_local(lldecl, false) {
+ llvm::LLVMRustSetDSOLocal(lldecl, true);
+ }
+ }
+
+ self.instances.borrow_mut().insert(instance, lldecl);
+ }
+}
+
+impl CodegenCx<'_, '_> {
+ /// Whether a definition or declaration can be assumed to be local to a group of
+ /// libraries that form a single DSO or executable.
+ pub(crate) unsafe fn should_assume_dso_local(
+ &self,
+ llval: &llvm::Value,
+ is_declaration: bool,
+ ) -> bool {
+ let linkage = llvm::LLVMRustGetLinkage(llval);
+ let visibility = llvm::LLVMRustGetVisibility(llval);
+
+ if matches!(linkage, llvm::Linkage::InternalLinkage | llvm::Linkage::PrivateLinkage) {
+ return true;
+ }
+
+ if visibility != llvm::Visibility::Default && linkage != llvm::Linkage::ExternalWeakLinkage
+ {
+ return true;
+ }
+
+ // Symbols from executables can't really be imported any further.
+ let all_exe = self.tcx.sess.crate_types().iter().all(|ty| *ty == CrateType::Executable);
+ let is_declaration_for_linker =
+ is_declaration || linkage == llvm::Linkage::AvailableExternallyLinkage;
+ if all_exe && !is_declaration_for_linker {
+ return true;
+ }
+
+ // PowerPC64 prefers TOC indirection to avoid copy relocations.
+ if matches!(&*self.tcx.sess.target.arch, "powerpc64" | "powerpc64le") {
+ return false;
+ }
+
+ // Thread-local variables generally don't support copy relocations.
+ let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval)
+ .map(|v| llvm::LLVMIsThreadLocal(v) == llvm::True)
+ .unwrap_or(false);
+ if is_thread_local_var {
+ return false;
+ }
+
+ // Match clang by only supporting COFF and ELF for now.
+ if self.tcx.sess.target.is_like_osx {
+ return false;
+ }
+
+ // Static relocation model should force copy relocations everywhere.
+ if self.tcx.sess.relocation_model() == RelocModel::Static {
+ return true;
+ }
+
+ // With pie relocation model calls of functions defined in the translation
+ // unit can use copy relocations.
+ self.tcx.sess.relocation_model() == RelocModel::Pie && !is_declaration
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
new file mode 100644
index 000000000..eeb38d4ec
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -0,0 +1,319 @@
+pub use crate::llvm::Type;
+
+use crate::abi::{FnAbiLlvmExt, LlvmType};
+use crate::common;
+use crate::context::CodegenCx;
+use crate::llvm;
+use crate::llvm::{Bool, False, True};
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_codegen_ssa::common::TypeKind;
+use rustc_codegen_ssa::traits::*;
+use rustc_data_structures::small_c_str::SmallCStr;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+use rustc_target::abi::{AddressSpace, Align, Integer, Size};
+
+use std::fmt;
+use std::ptr;
+
+use libc::{c_char, c_uint};
+
+impl PartialEq for Type {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl fmt::Debug for Type {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(
+ &llvm::build_string(|s| unsafe {
+ llvm::LLVMRustWriteTypeToString(self, s);
+ })
+ .expect("non-UTF8 type description from LLVM"),
+ )
+ }
+}
+
+impl<'ll> CodegenCx<'ll, '_> {
+ pub(crate) fn type_named_struct(&self, name: &str) -> &'ll Type {
+ let name = SmallCStr::new(name);
+ unsafe { llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) }
+ }
+
+ pub(crate) fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
+ unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) }
+ }
+
+ pub(crate) fn type_void(&self) -> &'ll Type {
+ unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
+ }
+
+ pub(crate) fn type_metadata(&self) -> &'ll Type {
+ unsafe { llvm::LLVMRustMetadataTypeInContext(self.llcx) }
+ }
+
+ ///x Creates an integer type with the given number of bits, e.g., i24
+ pub(crate) fn type_ix(&self, num_bits: u64) -> &'ll Type {
+ unsafe { llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) }
+ }
+
+ pub(crate) fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+ unsafe { llvm::LLVMVectorType(ty, len as c_uint) }
+ }
+
+ pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
+ unsafe {
+ let n_args = llvm::LLVMCountParamTypes(ty) as usize;
+ let mut args = Vec::with_capacity(n_args);
+ llvm::LLVMGetParamTypes(ty, args.as_mut_ptr());
+ args.set_len(n_args);
+ args
+ }
+ }
+
+ pub(crate) fn type_bool(&self) -> &'ll Type {
+ self.type_i8()
+ }
+
+ pub(crate) fn type_int_from_ty(&self, t: ty::IntTy) -> &'ll Type {
+ match t {
+ ty::IntTy::Isize => self.type_isize(),
+ ty::IntTy::I8 => self.type_i8(),
+ ty::IntTy::I16 => self.type_i16(),
+ ty::IntTy::I32 => self.type_i32(),
+ ty::IntTy::I64 => self.type_i64(),
+ ty::IntTy::I128 => self.type_i128(),
+ }
+ }
+
+ pub(crate) fn type_uint_from_ty(&self, t: ty::UintTy) -> &'ll Type {
+ match t {
+ ty::UintTy::Usize => self.type_isize(),
+ ty::UintTy::U8 => self.type_i8(),
+ ty::UintTy::U16 => self.type_i16(),
+ ty::UintTy::U32 => self.type_i32(),
+ ty::UintTy::U64 => self.type_i64(),
+ ty::UintTy::U128 => self.type_i128(),
+ }
+ }
+
+ pub(crate) fn type_float_from_ty(&self, t: ty::FloatTy) -> &'ll Type {
+ match t {
+ ty::FloatTy::F32 => self.type_f32(),
+ ty::FloatTy::F64 => self.type_f64(),
+ }
+ }
+
+ pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type {
+ // FIXME(eddyb) We could find a better approximation if ity.align < align.
+ let ity = Integer::approximate_align(self, align);
+ self.type_from_integer(ity)
+ }
+
+ /// Return a LLVM type that has at most the required alignment,
+ /// and exactly the required size, as a best-effort padding array.
+ pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
+ let unit = Integer::approximate_align(self, align);
+ let size = size.bytes();
+ let unit_size = unit.size().bytes();
+ assert_eq!(size % unit_size, 0);
+ self.type_array(self.type_from_integer(unit), size / unit_size)
+ }
+
+ pub(crate) fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
+ unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) }
+ }
+
+ pub(crate) fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+ unsafe { llvm::LLVMRustArrayType(ty, len) }
+ }
+}
+
+impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn type_i1(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt1TypeInContext(self.llcx) }
+ }
+
+ fn type_i8(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt8TypeInContext(self.llcx) }
+ }
+
+ fn type_i16(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt16TypeInContext(self.llcx) }
+ }
+
+ fn type_i32(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt32TypeInContext(self.llcx) }
+ }
+
+ fn type_i64(&self) -> &'ll Type {
+ unsafe { llvm::LLVMInt64TypeInContext(self.llcx) }
+ }
+
+ fn type_i128(&self) -> &'ll Type {
+ unsafe { llvm::LLVMIntTypeInContext(self.llcx, 128) }
+ }
+
+ fn type_isize(&self) -> &'ll Type {
+ self.isize_ty
+ }
+
+ fn type_f32(&self) -> &'ll Type {
+ unsafe { llvm::LLVMFloatTypeInContext(self.llcx) }
+ }
+
+ fn type_f64(&self) -> &'ll Type {
+ unsafe { llvm::LLVMDoubleTypeInContext(self.llcx) }
+ }
+
+ fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
+ unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) }
+ }
+
+ fn type_struct(&self, els: &[&'ll Type], packed: bool) -> &'ll Type {
+ unsafe {
+ llvm::LLVMStructTypeInContext(
+ self.llcx,
+ els.as_ptr(),
+ els.len() as c_uint,
+ packed as Bool,
+ )
+ }
+ }
+
+ fn type_kind(&self, ty: &'ll Type) -> TypeKind {
+ unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
+ }
+
+ fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
+ assert_ne!(
+ self.type_kind(ty),
+ TypeKind::Function,
+ "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense"
+ );
+ ty.ptr_to(AddressSpace::DATA)
+ }
+
+ fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type {
+ ty.ptr_to(address_space)
+ }
+
+ fn element_type(&self, ty: &'ll Type) -> &'ll Type {
+ match self.type_kind(ty) {
+ TypeKind::Array | TypeKind::Vector => unsafe { llvm::LLVMGetElementType(ty) },
+ TypeKind::Pointer => bug!("element_type is not supported for opaque pointers"),
+ other => bug!("element_type called on unsupported type {:?}", other),
+ }
+ }
+
+ fn vector_length(&self, ty: &'ll Type) -> usize {
+ unsafe { llvm::LLVMGetVectorSize(ty) as usize }
+ }
+
+ fn float_width(&self, ty: &'ll Type) -> usize {
+ match self.type_kind(ty) {
+ TypeKind::Float => 32,
+ TypeKind::Double => 64,
+ TypeKind::X86_FP80 => 80,
+ TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
+ _ => bug!("llvm_float_width called on a non-float type"),
+ }
+ }
+
+ fn int_width(&self, ty: &'ll Type) -> u64 {
+ unsafe { llvm::LLVMGetIntTypeWidth(ty) as u64 }
+ }
+
+ fn val_ty(&self, v: &'ll Value) -> &'ll Type {
+ common::val_ty(v)
+ }
+}
+
+impl Type {
+ pub fn i8_llcx(llcx: &llvm::Context) -> &Type {
+ unsafe { llvm::LLVMInt8TypeInContext(llcx) }
+ }
+
+ // Creates an integer type with the given number of bits, e.g., i24
+ pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type {
+ unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
+ }
+
+ pub fn i8p_llcx(llcx: &llvm::Context) -> &Type {
+ Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA)
+ }
+
+ fn ptr_to(&self, address_space: AddressSpace) -> &Type {
+ unsafe { llvm::LLVMPointerType(self, address_space.0) }
+ }
+}
+
+impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
+ layout.llvm_type(self)
+ }
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
+ layout.immediate_llvm_type(self)
+ }
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_llvm_immediate()
+ }
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
+ layout.is_llvm_scalar_pair()
+ }
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
+ layout.llvm_field_index(self, index)
+ }
+ fn scalar_pair_element_backend_type(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'ll Type {
+ layout.scalar_pair_element_llvm_type(self, index, immediate)
+ }
+ fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
+ ty.llvm_type(self)
+ }
+ fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ fn_abi.llvm_type(self)
+ }
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
+ fn_abi.ptr_to_llvm_type(self)
+ }
+ fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
+ ty.llvm_type(self)
+ }
+}
+
+impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> {
+ fn set_type_metadata(&self, function: &'ll Value, typeid: String) {
+ let typeid_metadata = self.typeid_metadata(typeid);
+ let v = [self.const_usize(0), typeid_metadata];
+ unsafe {
+ llvm::LLVMGlobalSetMetadata(
+ function,
+ llvm::MD_type as c_uint,
+ llvm::LLVMValueAsMetadata(llvm::LLVMMDNodeInContext(
+ self.llcx,
+ v.as_ptr(),
+ v.len() as c_uint,
+ )),
+ )
+ }
+ }
+
+ fn typeid_metadata(&self, typeid: String) -> &'ll Value {
+ unsafe {
+ llvm::LLVMMDStringInContext(
+ self.llcx,
+ typeid.as_ptr() as *const c_char,
+ typeid.len() as c_uint,
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
new file mode 100644
index 000000000..9f0e6c80b
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -0,0 +1,418 @@
+use crate::common::*;
+use crate::context::TypeLowering;
+use crate::llvm_util::get_version;
+use crate::type_::Type;
+use rustc_codegen_ssa::traits::*;
+use rustc_middle::bug;
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
+use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
+use rustc_middle::ty::{self, Ty, TypeVisitable};
+use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
+use rustc_target::abi::{Int, Pointer, F32, F64};
+use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
+use smallvec::{smallvec, SmallVec};
+use tracing::debug;
+
+use std::fmt::Write;
+
+fn uncached_llvm_type<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+ defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
+ field_remapping: &mut Option<SmallVec<[u32; 4]>>,
+) -> &'a Type {
+ match layout.abi {
+ Abi::Scalar(_) => bug!("handled elsewhere"),
+ Abi::Vector { element, count } => {
+ let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
+ return cx.type_vector(element, count);
+ }
+ Abi::ScalarPair(..) => {
+ return cx.type_struct(
+ &[
+ layout.scalar_pair_element_llvm_type(cx, 0, false),
+ layout.scalar_pair_element_llvm_type(cx, 1, false),
+ ],
+ false,
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {}
+ }
+
+ let name = match layout.ty.kind() {
+ // FIXME(eddyb) producing readable type names for trait objects can result
+ // in problematically distinct types due to HRTB and subtyping (see #47638).
+ // ty::Dynamic(..) |
+ ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
+ // For performance reasons we use names only when emitting LLVM IR. Unless we are on
+ // LLVM < 14, where the use of unnamed types resulted in various issues, e.g., #76213,
+ // #79564, and #79246.
+ if get_version() < (14, 0, 0) || !cx.sess().fewer_names() =>
+ {
+ let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
+ if let (&ty::Adt(def, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ if def.is_enum() && !def.variants().is_empty() {
+ write!(&mut name, "::{}", def.variant(index).name).unwrap();
+ }
+ }
+ if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
+ (layout.ty.kind(), &layout.variants)
+ {
+ write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
+ }
+ Some(name)
+ }
+ // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition
+ // might be recursive. Other cases are non-recursive and we can use literal structure types.
+ ty::Adt(..) => Some(String::new()),
+ _ => None,
+ };
+
+ match layout.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ let fill = cx.type_padding_filler(layout.size, layout.align.abi);
+ let packed = false;
+ match name {
+ None => cx.type_struct(&[fill], packed),
+ Some(ref name) => {
+ let llty = cx.type_named_struct(name);
+ cx.set_struct_body(llty, &[fill], packed);
+ llty
+ }
+ }
+ }
+ FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
+ FieldsShape::Arbitrary { .. } => match name {
+ None => {
+ let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
+ *field_remapping = new_field_remapping;
+ cx.type_struct(&llfields, packed)
+ }
+ Some(ref name) => {
+ let llty = cx.type_named_struct(name);
+ *defer = Some((llty, layout));
+ llty
+ }
+ },
+ }
+}
+
+fn struct_llfields<'a, 'tcx>(
+ cx: &CodegenCx<'a, 'tcx>,
+ layout: TyAndLayout<'tcx>,
+) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) {
+ debug!("struct_llfields: {:#?}", layout);
+ let field_count = layout.fields.count();
+
+ let mut packed = false;
+ let mut offset = Size::ZERO;
+ let mut prev_effective_align = layout.align.abi;
+ let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
+ let mut field_remapping = smallvec![0; field_count];
+ for i in layout.fields.index_by_increasing_offset() {
+ let target_offset = layout.fields.offset(i as usize);
+ let field = layout.field(cx, i);
+ let effective_field_align =
+ layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
+ packed |= effective_field_align < field.align.abi;
+
+ debug!(
+ "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
+ effective_field_align: {}",
+ i,
+ field,
+ offset,
+ target_offset,
+ effective_field_align.bytes()
+ );
+ assert!(target_offset >= offset);
+ let padding = target_offset - offset;
+ if padding != Size::ZERO {
+ let padding_align = prev_effective_align.min(effective_field_align);
+ assert_eq!(offset.align_to(padding_align) + padding, target_offset);
+ result.push(cx.type_padding_filler(padding, padding_align));
+ debug!(" padding before: {:?}", padding);
+ }
+ field_remapping[i] = result.len() as u32;
+ result.push(field.llvm_type(cx));
+ offset = target_offset + field.size;
+ prev_effective_align = effective_field_align;
+ }
+ let padding_used = result.len() > field_count;
+ if !layout.is_unsized() && field_count > 0 {
+ if offset > layout.size {
+ bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
+ }
+ let padding = layout.size - offset;
+ if padding != Size::ZERO {
+ let padding_align = prev_effective_align;
+ assert_eq!(offset.align_to(padding_align) + padding, layout.size);
+ debug!(
+ "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
+ padding, offset, layout.size
+ );
+ result.push(cx.type_padding_filler(padding, padding_align));
+ }
+ } else {
+ debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
+ }
+ let field_remapping = if padding_used { Some(field_remapping) } else { None };
+ (result, packed, field_remapping)
+}
+
+impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
+ pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
+ self.layout_of(ty).align.abi
+ }
+
+ pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
+ self.layout_of(ty).size
+ }
+
+ pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
+ let layout = self.layout_of(ty);
+ (layout.size, layout.align.abi)
+ }
+}
+
+pub trait LayoutLlvmExt<'tcx> {
+ fn is_llvm_immediate(&self) -> bool;
+ fn is_llvm_scalar_pair(&self) -> bool;
+ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
+ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
+ fn scalar_llvm_type_at<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ scalar: Scalar,
+ offset: Size,
+ ) -> &'a Type;
+ fn scalar_pair_element_llvm_type<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'a Type;
+ fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64;
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+}
+
+impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
+ fn is_llvm_immediate(&self) -> bool {
+ match self.abi {
+ Abi::Scalar(_) | Abi::Vector { .. } => true,
+ Abi::ScalarPair(..) => false,
+ Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
+ }
+ }
+
+ fn is_llvm_scalar_pair(&self) -> bool {
+ match self.abi {
+ Abi::ScalarPair(..) => true,
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
+ }
+ }
+
+ /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
+ /// The pointee type of the pointer in `PlaceRef` is always this type.
+ /// For sized types, it is also the right LLVM type for an `alloca`
+ /// containing a value of that type, and most immediates (except `bool`).
+ /// Unsized types, however, are represented by a "minimal unit", e.g.
+ /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+ /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+ /// If the type is an unsized struct, the regular layout is generated,
+ /// with the inner-most trailing unsized field using the "minimal unit"
+ /// of that field's type - this is useful for taking the address of
+ /// that field and ensuring the struct has the right alignment.
+ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+ if let Abi::Scalar(scalar) = self.abi {
+ // Use a different cache for scalars because pointers to DSTs
+ // can be either fat or thin (data pointers of fat pointers).
+ if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
+ return llty;
+ }
+ let llty = match *self.ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
+ }
+ ty::Adt(def, _) if def.is_box() => {
+ cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
+ }
+ ty::FnPtr(sig) => {
+ cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
+ }
+ _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
+ };
+ cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
+ return llty;
+ }
+
+ // Check the cache.
+ let variant_index = match self.variants {
+ Variants::Single { index } => Some(index),
+ _ => None,
+ };
+ if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
+ return llty.lltype;
+ }
+
+ debug!("llvm_type({:#?})", self);
+
+ assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
+
+ // Make sure lifetimes are erased, to avoid generating distinct LLVM
+ // types for Rust types that only differ in the choice of lifetimes.
+ let normal_ty = cx.tcx.erase_regions(self.ty);
+
+ let mut defer = None;
+ let mut field_remapping = None;
+ let llty = if self.ty != normal_ty {
+ let mut layout = cx.layout_of(normal_ty);
+ if let Some(v) = variant_index {
+ layout = layout.for_variant(cx, v);
+ }
+ layout.llvm_type(cx)
+ } else {
+ uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping)
+ };
+ debug!("--> mapped {:#?} to llty={:?}", self, llty);
+
+ cx.type_lowering
+ .borrow_mut()
+ .insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping });
+
+ if let Some((llty, layout)) = defer {
+ let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
+ cx.set_struct_body(llty, &llfields, packed);
+ cx.type_lowering
+ .borrow_mut()
+ .get_mut(&(self.ty, variant_index))
+ .unwrap()
+ .field_remapping = new_field_remapping;
+ }
+ llty
+ }
+
+ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
+ if let Abi::Scalar(scalar) = self.abi {
+ if scalar.is_bool() {
+ return cx.type_i1();
+ }
+ }
+ self.llvm_type(cx)
+ }
+
+ fn scalar_llvm_type_at<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ scalar: Scalar,
+ offset: Size,
+ ) -> &'a Type {
+ match scalar.primitive() {
+ Int(i, _) => cx.type_from_integer(i),
+ F32 => cx.type_f32(),
+ F64 => cx.type_f64(),
+ Pointer => {
+ // If we know the alignment, pick something better than i8.
+ let (pointee, address_space) =
+ if let Some(pointee) = self.pointee_info_at(cx, offset) {
+ (cx.type_pointee_for_align(pointee.align), pointee.address_space)
+ } else {
+ (cx.type_i8(), AddressSpace::DATA)
+ };
+ cx.type_ptr_to_ext(pointee, address_space)
+ }
+ }
+ }
+
+ fn scalar_pair_element_llvm_type<'a>(
+ &self,
+ cx: &CodegenCx<'a, 'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> &'a Type {
+ // HACK(eddyb) special-case fat pointers until LLVM removes
+ // pointee types, to avoid bitcasting every `OperandRef::deref`.
+ match self.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(_) => {
+ return self.field(cx, index).llvm_type(cx);
+ }
+ // only wide pointer boxes are handled as pointers
+ // thin pointer boxes with scalar allocators are handled by the general logic below
+ ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
+ let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
+ return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
+ }
+ _ => {}
+ }
+
+ let Abi::ScalarPair(a, b) = self.abi else {
+ bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
+ };
+ let scalar = [a, b][index];
+
+ // Make sure to return the same type `immediate_llvm_type` would when
+ // dealing with an immediate pair. This means that `(bool, bool)` is
+ // effectively represented as `{i8, i8}` in memory and two `i1`s as an
+ // immediate, just like `bool` is typically `i8` in memory and only `i1`
+ // when immediate. We need to load/store `bool` as `i8` to avoid
+ // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
+ if immediate && scalar.is_bool() {
+ return cx.type_i1();
+ }
+
+ let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
+ self.scalar_llvm_type_at(cx, scalar, offset)
+ }
+
+ fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
+ match self.abi {
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
+ }
+ _ => {}
+ }
+ match self.fields {
+ FieldsShape::Primitive | FieldsShape::Union(_) => {
+ bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
+ }
+
+ FieldsShape::Array { .. } => index as u64,
+
+ FieldsShape::Arbitrary { .. } => {
+ let variant_index = match self.variants {
+ Variants::Single { index } => Some(index),
+ _ => None,
+ };
+
+ // Look up llvm field if indexes do not match memory order due to padding. If
+ // `field_remapping` is `None` no padding was used and the llvm field index
+ // matches the memory index.
+ match cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
+ Some(TypeLowering { field_remapping: Some(ref remap), .. }) => {
+ remap[index] as u64
+ }
+ Some(_) => self.fields.memory_index(index) as u64,
+ None => {
+ bug!("TyAndLayout::llvm_field_index({:?}): type info not found", self)
+ }
+ }
+ }
+ }
+ }
+
+ // FIXME(eddyb) this having the same name as `TyAndLayout::pointee_info_at`
+ // (the inherent method, which is lacking this caching logic) can result in
+ // the uncached version being called - not wrong, but potentially inefficient.
+ fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
+ if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
+ return pointee;
+ }
+
+ let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
+
+ cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
+ result
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
new file mode 100644
index 000000000..ceb3d5a84
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -0,0 +1,214 @@
+use crate::builder::Builder;
+use crate::type_::Type;
+use crate::type_of::LayoutLlvmExt;
+use crate::value::Value;
+use rustc_codegen_ssa::mir::operand::OperandRef;
+use rustc_codegen_ssa::{
+ common::IntPredicate,
+ traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
+};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Align, Endian, HasDataLayout, Size};
+
+fn round_pointer_up_to_alignment<'ll>(
+ bx: &mut Builder<'_, 'll, '_>,
+ addr: &'ll Value,
+ align: Align,
+ ptr_ty: &'ll Type,
+) -> &'ll Value {
+ let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
+ ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
+ ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
+ bx.inttoptr(ptr_as_int, ptr_ty)
+}
+
+fn emit_direct_ptr_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ llty: &'ll Type,
+ size: Size,
+ align: Align,
+ slot_size: Align,
+ allow_higher_align: bool,
+) -> (&'ll Value, Align) {
+ let va_list_ty = bx.type_i8p();
+ let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
+ let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
+ bx.bitcast(list.immediate(), va_list_ptr_ty)
+ } else {
+ list.immediate()
+ };
+
+ let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+ let (addr, addr_align) = if allow_higher_align && align > slot_size {
+ (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
+ } else {
+ (ptr, slot_size)
+ };
+
+ let aligned_size = size.align_to(slot_size).bytes() as i32;
+ let full_direct_size = bx.cx().const_i32(aligned_size);
+ let next = bx.inbounds_gep(bx.type_i8(), addr, &[full_direct_size]);
+ bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
+
+ if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
+ let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
+ let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
+ (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
+ } else {
+ (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
+ }
+}
+
+fn emit_ptr_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+ indirect: bool,
+ slot_size: Align,
+ allow_higher_align: bool,
+) -> &'ll Value {
+ let layout = bx.cx.layout_of(target_ty);
+ let (llty, size, align) = if indirect {
+ (
+ bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
+ bx.cx.data_layout().pointer_size,
+ bx.cx.data_layout().pointer_align,
+ )
+ } else {
+ (layout.llvm_type(bx.cx), layout.size, layout.align)
+ };
+ let (addr, addr_align) =
+ emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
+ if indirect {
+ let tmp_ret = bx.load(llty, addr, addr_align);
+ bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
+ } else {
+ bx.load(llty, addr, addr_align)
+ }
+}
+
+fn emit_aapcs_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ list: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+) -> &'ll Value {
+ // Implementation of the AAPCS64 calling convention for va_args see
+ // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
+ let va_list_addr = list.immediate();
+ let va_list_layout = list.deref(bx.cx).layout;
+ let va_list_ty = va_list_layout.llvm_type(bx);
+ let layout = bx.cx.layout_of(target_ty);
+
+ let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
+ let in_reg = bx.append_sibling_block("va_arg.in_reg");
+ let on_stack = bx.append_sibling_block("va_arg.on_stack");
+ let end = bx.append_sibling_block("va_arg.end");
+ let zero = bx.const_i32(0);
+ let offset_align = Align::from_bytes(4).unwrap();
+
+ let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
+ let (reg_off, reg_top_index, slot_size) = if gr_type {
+ let gr_offs =
+ bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
+ let nreg = (layout.size.bytes() + 7) / 8;
+ (gr_offs, va_list_layout.llvm_field_index(bx.cx, 1), nreg * 8)
+ } else {
+ let vr_off =
+ bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 4));
+ let nreg = (layout.size.bytes() + 15) / 16;
+ (vr_off, va_list_layout.llvm_field_index(bx.cx, 2), nreg * 16)
+ };
+
+ // if the offset >= 0 then the value will be on the stack
+ let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
+ let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
+ bx.cond_br(use_stack, on_stack, maybe_reg);
+
+ // The value at this point might be in a register, but there is a chance that
+ // it could be on the stack so we have to update the offset and then check
+ // the offset again.
+
+ bx.switch_to_block(maybe_reg);
+ if gr_type && layout.align.abi.bytes() > 8 {
+ reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
+ reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
+ }
+ let new_reg_off_v = bx.add(reg_off_v, bx.const_i32(slot_size as i32));
+
+ bx.store(new_reg_off_v, reg_off, offset_align);
+
+ // Check to see if we have overflowed the registers as a result of this.
+ // If we have then we need to use the stack for this value
+ let use_stack = bx.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
+ bx.cond_br(use_stack, on_stack, in_reg);
+
+ bx.switch_to_block(in_reg);
+ let top_type = bx.type_i8p();
+ let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
+ let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
+
+ // reg_value = *(@top + reg_off_v);
+ let mut reg_addr = bx.gep(bx.type_i8(), top, &[reg_off_v]);
+ if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
+ // On big-endian systems the value is right-aligned in its slot.
+ let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
+ reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
+ }
+ let reg_type = layout.llvm_type(bx);
+ let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
+ let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
+ bx.br(end);
+
+ // On Stack block
+ bx.switch_to_block(on_stack);
+ let stack_value =
+ emit_ptr_va_arg(bx, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
+ bx.br(end);
+
+ bx.switch_to_block(end);
+ let val =
+ bx.phi(layout.immediate_llvm_type(bx), &[reg_value, stack_value], &[in_reg, on_stack]);
+
+ val
+}
+
+pub(super) fn emit_va_arg<'ll, 'tcx>(
+ bx: &mut Builder<'_, 'll, 'tcx>,
+ addr: OperandRef<'tcx, &'ll Value>,
+ target_ty: Ty<'tcx>,
+) -> &'ll Value {
+ // Determine the va_arg implementation to use. The LLVM va_arg instruction
+ // is lacking in some instances, so we should only use it as a fallback.
+ let target = &bx.cx.tcx.sess.target;
+ let arch = &bx.cx.tcx.sess.target.arch;
+ match &**arch {
+ // Windows x86
+ "x86" if target.is_like_windows => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
+ }
+ // Generic x86
+ "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
+ // Windows AArch64
+ "aarch64" if target.is_like_windows => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
+ }
+ // macOS / iOS AArch64
+ "aarch64" if target.is_like_osx => {
+ emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
+ }
+ "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
+ // Windows x86_64
+ "x86_64" if target.is_like_windows => {
+ let target_ty_size = bx.cx.size_of(target_ty).bytes();
+ let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
+ emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
+ }
+ // For all other architecture/OS combinations fall back to using
+ // the LLVM va_arg instruction.
+ // https://llvm.org/docs/LangRef.html#va-arg-instruction
+ _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),
+ }
+}
diff --git a/compiler/rustc_codegen_llvm/src/value.rs b/compiler/rustc_codegen_llvm/src/value.rs
new file mode 100644
index 000000000..1338a2295
--- /dev/null
+++ b/compiler/rustc_codegen_llvm/src/value.rs
@@ -0,0 +1,32 @@
+pub use crate::llvm::Value;
+
+use crate::llvm;
+
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::ptr;
+
+impl PartialEq for Value {
+ fn eq(&self, other: &Self) -> bool {
+ ptr::eq(self, other)
+ }
+}
+
+impl Eq for Value {}
+
+impl Hash for Value {
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ (self as *const Self).hash(hasher);
+ }
+}
+
+impl fmt::Debug for Value {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(
+ &llvm::build_string(|s| unsafe {
+ llvm::LLVMRustWriteValueToString(self, s);
+ })
+ .expect("non-UTF8 value description from LLVM"),
+ )
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml
new file mode 100644
index 000000000..46d6344db
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/Cargo.toml
@@ -0,0 +1,48 @@
+[package]
+name = "rustc_codegen_ssa"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+test = false
+
+[dependencies]
+bitflags = "1.2.1"
+cc = "1.0.69"
+itertools = "0.10.1"
+tracing = "0.1"
+libc = "0.2.50"
+jobserver = "0.1.22"
+tempfile = "3.2"
+thorin-dwp = "0.3"
+pathdiff = "0.2.0"
+serde_json = "1.0.59"
+snap = "1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+regex = "1.4"
+
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_arena = { path = "../rustc_arena" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_fs_util = { path = "../rustc_fs_util" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_incremental = { path = "../rustc_incremental" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_metadata = { path = "../rustc_metadata" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_target = { path = "../rustc_target" }
+rustc_session = { path = "../rustc_session" }
+rustc_const_eval = { path = "../rustc_const_eval" }
+
+[dependencies.object]
+version = "0.29.0"
+default-features = false
+features = ["read_core", "elf", "macho", "pe", "unaligned", "archive", "write"]
diff --git a/compiler/rustc_codegen_ssa/README.md b/compiler/rustc_codegen_ssa/README.md
new file mode 100644
index 000000000..7b770187b
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/README.md
@@ -0,0 +1,3 @@
+Please read the rustc-dev-guide chapter on [Backend Agnostic Codegen][bac].
+
+[bac]: https://rustc-dev-guide.rust-lang.org/backend/backend-agnostic.html
diff --git a/compiler/rustc_codegen_ssa/src/back/archive.rs b/compiler/rustc_codegen_ssa/src/back/archive.rs
new file mode 100644
index 000000000..0d2aa483d
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/archive.rs
@@ -0,0 +1,69 @@
+use rustc_session::cstore::DllImport;
+use rustc_session::Session;
+
+use std::io;
+use std::path::{Path, PathBuf};
+
+pub(super) fn find_library(
+ name: &str,
+ verbatim: bool,
+ search_paths: &[PathBuf],
+ sess: &Session,
+) -> PathBuf {
+ // On Windows, static libraries sometimes show up as libfoo.a and other
+ // times show up as foo.lib
+ let oslibname = if verbatim {
+ name.to_string()
+ } else {
+ format!("{}{}{}", sess.target.staticlib_prefix, name, sess.target.staticlib_suffix)
+ };
+ let unixlibname = format!("lib{}.a", name);
+
+ for path in search_paths {
+ debug!("looking for {} inside {:?}", name, path);
+ let test = path.join(&oslibname);
+ if test.exists() {
+ return test;
+ }
+ if oslibname != unixlibname {
+ let test = path.join(&unixlibname);
+ if test.exists() {
+ return test;
+ }
+ }
+ }
+ sess.fatal(&format!(
+ "could not find native static library `{}`, \
+ perhaps an -L flag is missing?",
+ name
+ ));
+}
+
+pub trait ArchiveBuilderBuilder {
+ fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder<'a> + 'a>;
+
+ /// Creates a DLL Import Library <https://docs.microsoft.com/en-us/windows/win32/dlls/dynamic-link-library-creation#creating-an-import-library>.
+ /// and returns the path on disk to that import library.
+ /// This functions doesn't take `self` so that it can be called from
+ /// `linker_with_args`, which is specialized on `ArchiveBuilder` but
+ /// doesn't take or create an instance of that type.
+ fn create_dll_import_lib(
+ &self,
+ sess: &Session,
+ lib_name: &str,
+ dll_imports: &[DllImport],
+ tmpdir: &Path,
+ ) -> PathBuf;
+}
+
+pub trait ArchiveBuilder<'a> {
+ fn add_file(&mut self, path: &Path);
+
+ fn add_archive(
+ &mut self,
+ archive: &Path,
+ skip: Box<dyn FnMut(&str) -> bool + 'static>,
+ ) -> io::Result<()>;
+
+ fn build(self: Box<Self>, output: &Path) -> bool;
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/command.rs b/compiler/rustc_codegen_ssa/src/back/command.rs
new file mode 100644
index 000000000..9b0ba3413
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/command.rs
@@ -0,0 +1,178 @@
+//! A thin wrapper around `Command` in the standard library which allows us to
+//! read the arguments that are built up.
+
+use std::ffi::{OsStr, OsString};
+use std::fmt;
+use std::io;
+use std::mem;
+use std::process::{self, Output};
+
+use rustc_target::spec::LldFlavor;
+
+#[derive(Clone)]
+pub struct Command {
+ program: Program,
+ args: Vec<OsString>,
+ env: Vec<(OsString, OsString)>,
+ env_remove: Vec<OsString>,
+}
+
+#[derive(Clone)]
+enum Program {
+ Normal(OsString),
+ CmdBatScript(OsString),
+ Lld(OsString, LldFlavor),
+}
+
+impl Command {
+ pub fn new<P: AsRef<OsStr>>(program: P) -> Command {
+ Command::_new(Program::Normal(program.as_ref().to_owned()))
+ }
+
+ pub fn bat_script<P: AsRef<OsStr>>(program: P) -> Command {
+ Command::_new(Program::CmdBatScript(program.as_ref().to_owned()))
+ }
+
+ pub fn lld<P: AsRef<OsStr>>(program: P, flavor: LldFlavor) -> Command {
+ Command::_new(Program::Lld(program.as_ref().to_owned(), flavor))
+ }
+
+ fn _new(program: Program) -> Command {
+ Command { program, args: Vec::new(), env: Vec::new(), env_remove: Vec::new() }
+ }
+
+ pub fn arg<P: AsRef<OsStr>>(&mut self, arg: P) -> &mut Command {
+ self._arg(arg.as_ref());
+ self
+ }
+
+ pub fn args<I>(&mut self, args: I) -> &mut Command
+ where
+ I: IntoIterator<Item: AsRef<OsStr>>,
+ {
+ for arg in args {
+ self._arg(arg.as_ref());
+ }
+ self
+ }
+
+ fn _arg(&mut self, arg: &OsStr) {
+ self.args.push(arg.to_owned());
+ }
+
+ pub fn env<K, V>(&mut self, key: K, value: V) -> &mut Command
+ where
+ K: AsRef<OsStr>,
+ V: AsRef<OsStr>,
+ {
+ self._env(key.as_ref(), value.as_ref());
+ self
+ }
+
+ fn _env(&mut self, key: &OsStr, value: &OsStr) {
+ self.env.push((key.to_owned(), value.to_owned()));
+ }
+
+ pub fn env_remove<K>(&mut self, key: K) -> &mut Command
+ where
+ K: AsRef<OsStr>,
+ {
+ self._env_remove(key.as_ref());
+ self
+ }
+
+ fn _env_remove(&mut self, key: &OsStr) {
+ self.env_remove.push(key.to_owned());
+ }
+
+ pub fn output(&mut self) -> io::Result<Output> {
+ self.command().output()
+ }
+
+ pub fn command(&self) -> process::Command {
+ let mut ret = match self.program {
+ Program::Normal(ref p) => process::Command::new(p),
+ Program::CmdBatScript(ref p) => {
+ let mut c = process::Command::new("cmd");
+ c.arg("/c").arg(p);
+ c
+ }
+ Program::Lld(ref p, flavor) => {
+ let mut c = process::Command::new(p);
+ c.arg("-flavor").arg(flavor.as_str());
+ if let LldFlavor::Wasm = flavor {
+ // LLVM expects host-specific formatting for @file
+ // arguments, but we always generate posix formatted files
+ // at this time. Indicate as such.
+ c.arg("--rsp-quoting=posix");
+ }
+ c
+ }
+ };
+ ret.args(&self.args);
+ ret.envs(self.env.clone());
+ for k in &self.env_remove {
+ ret.env_remove(k);
+ }
+ ret
+ }
+
+ // extensions
+
+ pub fn get_args(&self) -> &[OsString] {
+ &self.args
+ }
+
+ pub fn take_args(&mut self) -> Vec<OsString> {
+ mem::take(&mut self.args)
+ }
+
+ /// Returns a `true` if we're pretty sure that this'll blow OS spawn limits,
+ /// or `false` if we should attempt to spawn and see what the OS says.
+ pub fn very_likely_to_exceed_some_spawn_limit(&self) -> bool {
+ // We mostly only care about Windows in this method, on Unix the limits
+ // can be gargantuan anyway so we're pretty unlikely to hit them
+ if cfg!(unix) {
+ return false;
+ }
+
+ // Right now LLD doesn't support the `@` syntax of passing an argument
+ // through files, so regardless of the platform we try to go to the OS
+ // on this one.
+ if let Program::Lld(..) = self.program {
+ return false;
+ }
+
+ // Ok so on Windows to spawn a process is 32,768 characters in its
+ // command line [1]. Unfortunately we don't actually have access to that
+ // as it's calculated just before spawning. Instead we perform a
+ // poor-man's guess as to how long our command line will be. We're
+ // assuming here that we don't have to escape every character...
+ //
+ // Turns out though that `cmd.exe` has even smaller limits, 8192
+ // characters [2]. Linkers can often be batch scripts (for example
+ // Emscripten, Gecko's current build system) which means that we're
+ // running through batch scripts. These linkers often just forward
+ // arguments elsewhere (and maybe tack on more), so if we blow 8192
+ // bytes we'll typically cause them to blow as well.
+ //
+ // Basically as a result just perform an inflated estimate of what our
+ // command line will look like and test if it's > 8192 (we actually
+ // test against 6k to artificially inflate our estimate). If all else
+ // fails we'll fall back to the normal unix logic of testing the OS
+ // error code if we fail to spawn and automatically re-spawning the
+ // linker with smaller arguments.
+ //
+ // [1]: https://docs.microsoft.com/en-us/windows/win32/api/processthreadsapi/nf-processthreadsapi-createprocessa
+ // [2]: https://devblogs.microsoft.com/oldnewthing/?p=41553
+
+ let estimated_command_line_len = self.args.iter().map(|a| a.len()).sum::<usize>();
+ estimated_command_line_len > 1024 * 6
+ }
+}
+
+impl fmt::Debug for Command {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.command().fmt(f)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs
new file mode 100644
index 000000000..63207803e
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/link.rs
@@ -0,0 +1,2800 @@
+use rustc_arena::TypedArena;
+use rustc_ast::CRATE_NODE_ID;
+use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::temp_dir::MaybeTempDir;
+use rustc_errors::{ErrorGuaranteed, Handler};
+use rustc_fs_util::fix_windows_verbatim_for_gcc;
+use rustc_hir::def_id::CrateNum;
+use rustc_metadata::fs::{emit_metadata, METADATA_FILENAME};
+use rustc_middle::middle::dependency_format::Linkage;
+use rustc_middle::middle::exported_symbols::SymbolExportKind;
+use rustc_session::config::{self, CFGuard, CrateType, DebugInfo, LdImpl, Strip};
+use rustc_session::config::{OutputFilenames, OutputType, PrintRequest, SplitDwarfKind};
+use rustc_session::cstore::DllImport;
+use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename};
+use rustc_session::search_paths::PathKind;
+use rustc_session::utils::NativeLibKind;
+/// For all the linkers we support, and information they might
+/// need out of the shared crate context before we get rid of it.
+use rustc_session::{filesearch, Session};
+use rustc_span::symbol::Symbol;
+use rustc_span::DebuggerVisualizerFile;
+use rustc_target::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
+use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor, SplitDebuginfo};
+use rustc_target::spec::{PanicStrategy, RelocModel, RelroLevel, SanitizerSet, Target};
+
+use super::archive::{find_library, ArchiveBuilder, ArchiveBuilderBuilder};
+use super::command::Command;
+use super::linker::{self, Linker};
+use super::metadata::{create_rmeta_file, MetadataPosition};
+use super::rpath::{self, RPathConfig};
+use crate::{looks_like_rust_object_file, CodegenResults, CompiledModule, CrateInfo, NativeLib};
+
+use cc::windows_registry;
+use regex::Regex;
+use tempfile::Builder as TempFileBuilder;
+
+use std::borrow::Borrow;
+use std::cell::OnceCell;
+use std::collections::BTreeSet;
+use std::ffi::OsString;
+use std::fs::{File, OpenOptions};
+use std::io::{BufWriter, Write};
+use std::ops::Deref;
+use std::path::{Path, PathBuf};
+use std::process::{ExitStatus, Output, Stdio};
+use std::{ascii, char, env, fmt, fs, io, mem, str};
+
+pub fn ensure_removed(diag_handler: &Handler, path: &Path) {
+ if let Err(e) = fs::remove_file(path) {
+ if e.kind() != io::ErrorKind::NotFound {
+ diag_handler.err(&format!("failed to remove {}: {}", path.display(), e));
+ }
+ }
+}
+
+/// Performs the linkage portion of the compilation phase. This will generate all
+/// of the requested outputs for this compilation session.
+pub fn link_binary<'a>(
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ outputs: &OutputFilenames,
+) -> Result<(), ErrorGuaranteed> {
+ let _timer = sess.timer("link_binary");
+ let output_metadata = sess.opts.output_types.contains_key(&OutputType::Metadata);
+ for &crate_type in sess.crate_types().iter() {
+ // Ignore executable crates if we have -Z no-codegen, as they will error.
+ if (sess.opts.unstable_opts.no_codegen || !sess.opts.output_types.should_codegen())
+ && !output_metadata
+ && crate_type == CrateType::Executable
+ {
+ continue;
+ }
+
+ if invalid_output_for_target(sess, crate_type) {
+ bug!(
+ "invalid output type `{:?}` for target os `{}`",
+ crate_type,
+ sess.opts.target_triple
+ );
+ }
+
+ sess.time("link_binary_check_files_are_writeable", || {
+ for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+ check_file_is_writeable(obj, sess);
+ }
+ });
+
+ if outputs.outputs.should_link() {
+ let tmpdir = TempFileBuilder::new()
+ .prefix("rustc")
+ .tempdir()
+ .unwrap_or_else(|err| sess.fatal(&format!("couldn't create a temp dir: {}", err)));
+ let path = MaybeTempDir::new(tmpdir, sess.opts.cg.save_temps);
+ let out_filename = out_filename(
+ sess,
+ crate_type,
+ outputs,
+ codegen_results.crate_info.local_crate_name.as_str(),
+ );
+ match crate_type {
+ CrateType::Rlib => {
+ let _timer = sess.timer("link_rlib");
+ info!("preparing rlib to {:?}", out_filename);
+ link_rlib(
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ RlibFlavor::Normal,
+ &path,
+ )?
+ .build(&out_filename);
+ }
+ CrateType::Staticlib => {
+ link_staticlib(
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ &out_filename,
+ &path,
+ )?;
+ }
+ _ => {
+ link_natively(
+ sess,
+ archive_builder_builder,
+ crate_type,
+ &out_filename,
+ codegen_results,
+ path.as_ref(),
+ )?;
+ }
+ }
+ if sess.opts.json_artifact_notifications {
+ sess.parse_sess.span_diagnostic.emit_artifact_notification(&out_filename, "link");
+ }
+
+ if sess.prof.enabled() {
+ if let Some(artifact_name) = out_filename.file_name() {
+ // Record size for self-profiling
+ let file_size = std::fs::metadata(&out_filename).map(|m| m.len()).unwrap_or(0);
+
+ sess.prof.artifact_size(
+ "linked_artifact",
+ artifact_name.to_string_lossy(),
+ file_size,
+ );
+ }
+ }
+ }
+ }
+
+ // Remove the temporary object file and metadata if we aren't saving temps.
+ sess.time("link_binary_remove_temps", || {
+ // If the user requests that temporaries are saved, don't delete any.
+ if sess.opts.cg.save_temps {
+ return;
+ }
+
+ let maybe_remove_temps_from_module =
+ |preserve_objects: bool, preserve_dwarf_objects: bool, module: &CompiledModule| {
+ if !preserve_objects {
+ if let Some(ref obj) = module.object {
+ ensure_removed(sess.diagnostic(), obj);
+ }
+ }
+
+ if !preserve_dwarf_objects {
+ if let Some(ref dwo_obj) = module.dwarf_object {
+ ensure_removed(sess.diagnostic(), dwo_obj);
+ }
+ }
+ };
+
+ let remove_temps_from_module =
+ |module: &CompiledModule| maybe_remove_temps_from_module(false, false, module);
+
+ // Otherwise, always remove the metadata and allocator module temporaries.
+ if let Some(ref metadata_module) = codegen_results.metadata_module {
+ remove_temps_from_module(metadata_module);
+ }
+
+ if let Some(ref allocator_module) = codegen_results.allocator_module {
+ remove_temps_from_module(allocator_module);
+ }
+
+ // If no requested outputs require linking, then the object temporaries should
+ // be kept.
+ if !sess.opts.output_types.should_link() {
+ return;
+ }
+
+ // Potentially keep objects for their debuginfo.
+ let (preserve_objects, preserve_dwarf_objects) = preserve_objects_for_their_debuginfo(sess);
+ debug!(?preserve_objects, ?preserve_dwarf_objects);
+
+ for module in &codegen_results.modules {
+ maybe_remove_temps_from_module(preserve_objects, preserve_dwarf_objects, module);
+ }
+ });
+
+ Ok(())
+}
+
+pub fn each_linked_rlib(
+ info: &CrateInfo,
+ f: &mut dyn FnMut(CrateNum, &Path),
+) -> Result<(), String> {
+ let crates = info.used_crates.iter();
+ let mut fmts = None;
+ for (ty, list) in info.dependency_formats.iter() {
+ match ty {
+ CrateType::Executable
+ | CrateType::Staticlib
+ | CrateType::Cdylib
+ | CrateType::ProcMacro => {
+ fmts = Some(list);
+ break;
+ }
+ _ => {}
+ }
+ }
+ let Some(fmts) = fmts else {
+ return Err("could not find formats for rlibs".to_string());
+ };
+ for &cnum in crates {
+ match fmts.get(cnum.as_usize() - 1) {
+ Some(&Linkage::NotLinked | &Linkage::IncludedFromDylib) => continue,
+ Some(_) => {}
+ None => return Err("could not find formats for rlibs".to_string()),
+ }
+ let name = info.crate_name[&cnum];
+ let used_crate_source = &info.used_crate_source[&cnum];
+ if let Some((path, _)) = &used_crate_source.rlib {
+ f(cnum, &path);
+ } else {
+ if used_crate_source.rmeta.is_some() {
+ return Err(format!(
+ "could not find rlib for: `{}`, found rmeta (metadata) file",
+ name
+ ));
+ } else {
+ return Err(format!("could not find rlib for: `{}`", name));
+ }
+ }
+ }
+ Ok(())
+}
+
+/// Create an 'rlib'.
+///
+/// An rlib in its current incarnation is essentially a renamed .a file. The rlib primarily contains
+/// the object file of the crate, but it also contains all of the object files from native
+/// libraries. This is done by unzipping native libraries and inserting all of the contents into
+/// this archive.
+fn link_rlib<'a>(
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ flavor: RlibFlavor,
+ tmpdir: &MaybeTempDir,
+) -> Result<Box<dyn ArchiveBuilder<'a> + 'a>, ErrorGuaranteed> {
+ let lib_search_paths = archive_search_paths(sess);
+
+ let mut ab = archive_builder_builder.new_archive_builder(sess);
+
+ let trailing_metadata = match flavor {
+ RlibFlavor::Normal => {
+ let (metadata, metadata_position) =
+ create_rmeta_file(sess, codegen_results.metadata.raw_data());
+ let metadata = emit_metadata(sess, &metadata, tmpdir);
+ match metadata_position {
+ MetadataPosition::First => {
+ // Most of the time metadata in rlib files is wrapped in a "dummy" object
+ // file for the target platform so the rlib can be processed entirely by
+ // normal linkers for the platform. Sometimes this is not possible however.
+ // If it is possible however, placing the metadata object first improves
+ // performance of getting metadata from rlibs.
+ ab.add_file(&metadata);
+ None
+ }
+ MetadataPosition::Last => Some(metadata),
+ }
+ }
+
+ RlibFlavor::StaticlibBase => None,
+ };
+
+ for m in &codegen_results.modules {
+ if let Some(obj) = m.object.as_ref() {
+ ab.add_file(obj);
+ }
+
+ if let Some(dwarf_obj) = m.dwarf_object.as_ref() {
+ ab.add_file(dwarf_obj);
+ }
+ }
+
+ match flavor {
+ RlibFlavor::Normal => {}
+ RlibFlavor::StaticlibBase => {
+ let obj = codegen_results.allocator_module.as_ref().and_then(|m| m.object.as_ref());
+ if let Some(obj) = obj {
+ ab.add_file(obj);
+ }
+ }
+ }
+
+ // Note that in this loop we are ignoring the value of `lib.cfg`. That is,
+ // we may not be configured to actually include a static library if we're
+ // adding it here. That's because later when we consume this rlib we'll
+ // decide whether we actually needed the static library or not.
+ //
+ // To do this "correctly" we'd need to keep track of which libraries added
+ // which object files to the archive. We don't do that here, however. The
+ // #[link(cfg(..))] feature is unstable, though, and only intended to get
+ // liblibc working. In that sense the check below just indicates that if
+ // there are any libraries we want to omit object files for at link time we
+ // just exclude all custom object files.
+ //
+ // Eventually if we want to stabilize or flesh out the #[link(cfg(..))]
+ // feature then we'll need to figure out how to record what objects were
+ // loaded from the libraries found here and then encode that into the
+ // metadata of the rlib we're generating somehow.
+ for lib in codegen_results.crate_info.used_libraries.iter() {
+ match lib.kind {
+ NativeLibKind::Static { bundle: None | Some(true), whole_archive: Some(true) }
+ if flavor == RlibFlavor::Normal =>
+ {
+ // Don't allow mixing +bundle with +whole_archive since an rlib may contain
+ // multiple native libs, some of which are +whole-archive and some of which are
+ // -whole-archive and it isn't clear how we can currently handle such a
+ // situation correctly.
+ // See https://github.com/rust-lang/rust/issues/88085#issuecomment-901050897
+ sess.err(
+ "the linking modifiers `+bundle` and `+whole-archive` are not compatible \
+ with each other when generating rlibs",
+ );
+ }
+ NativeLibKind::Static { bundle: None | Some(true), .. } => {}
+ NativeLibKind::Static { bundle: Some(false), .. }
+ | NativeLibKind::Dylib { .. }
+ | NativeLibKind::Framework { .. }
+ | NativeLibKind::RawDylib
+ | NativeLibKind::LinkArg
+ | NativeLibKind::Unspecified => continue,
+ }
+ if let Some(name) = lib.name {
+ let location =
+ find_library(name.as_str(), lib.verbatim.unwrap_or(false), &lib_search_paths, sess);
+ ab.add_archive(&location, Box::new(|_| false)).unwrap_or_else(|e| {
+ sess.fatal(&format!(
+ "failed to add native library {}: {}",
+ location.to_string_lossy(),
+ e
+ ));
+ });
+ }
+ }
+
+ for (raw_dylib_name, raw_dylib_imports) in
+ collate_raw_dylibs(sess, &codegen_results.crate_info.used_libraries)?
+ {
+ let output_path = archive_builder_builder.create_dll_import_lib(
+ sess,
+ &raw_dylib_name,
+ &raw_dylib_imports,
+ tmpdir.as_ref(),
+ );
+
+ ab.add_archive(&output_path, Box::new(|_| false)).unwrap_or_else(|e| {
+ sess.fatal(&format!("failed to add native library {}: {}", output_path.display(), e));
+ });
+ }
+
+ if let Some(trailing_metadata) = trailing_metadata {
+ // Note that it is important that we add all of our non-object "magical
+ // files" *after* all of the object files in the archive. The reason for
+ // this is as follows:
+ //
+ // * When performing LTO, this archive will be modified to remove
+ // objects from above. The reason for this is described below.
+ //
+ // * When the system linker looks at an archive, it will attempt to
+ // determine the architecture of the archive in order to see whether its
+ // linkable.
+ //
+ // The algorithm for this detection is: iterate over the files in the
+ // archive. Skip magical SYMDEF names. Interpret the first file as an
+ // object file. Read architecture from the object file.
+ //
+ // * As one can probably see, if "metadata" and "foo.bc" were placed
+ // before all of the objects, then the architecture of this archive would
+ // not be correctly inferred once 'foo.o' is removed.
+ //
+ // * Most of the time metadata in rlib files is wrapped in a "dummy" object
+ // file for the target platform so the rlib can be processed entirely by
+ // normal linkers for the platform. Sometimes this is not possible however.
+ //
+ // Basically, all this means is that this code should not move above the
+ // code above.
+ ab.add_file(&trailing_metadata);
+ }
+
+ return Ok(ab);
+}
+
+/// Extract all symbols defined in raw-dylib libraries, collated by library name.
+///
+/// If we have multiple extern blocks that specify symbols defined in the same raw-dylib library,
+/// then the CodegenResults value contains one NativeLib instance for each block. However, the
+/// linker appears to expect only a single import library for each library used, so we need to
+/// collate the symbols together by library name before generating the import libraries.
+fn collate_raw_dylibs(
+ sess: &Session,
+ used_libraries: &[NativeLib],
+) -> Result<Vec<(String, Vec<DllImport>)>, ErrorGuaranteed> {
+ // Use index maps to preserve original order of imports and libraries.
+ let mut dylib_table = FxIndexMap::<String, FxIndexMap<Symbol, &DllImport>>::default();
+
+ for lib in used_libraries {
+ if lib.kind == NativeLibKind::RawDylib {
+ let ext = if matches!(lib.verbatim, Some(true)) { "" } else { ".dll" };
+ let name = format!("{}{}", lib.name.expect("unnamed raw-dylib library"), ext);
+ let imports = dylib_table.entry(name.clone()).or_default();
+ for import in &lib.dll_imports {
+ if let Some(old_import) = imports.insert(import.name, import) {
+ // FIXME: when we add support for ordinals, figure out if we need to do anything
+ // if we have two DllImport values with the same name but different ordinals.
+ if import.calling_convention != old_import.calling_convention {
+ sess.span_err(
+ import.span,
+ &format!(
+ "multiple declarations of external function `{}` from \
+ library `{}` have different calling conventions",
+ import.name, name,
+ ),
+ );
+ }
+ }
+ }
+ }
+ }
+ sess.compile_status()?;
+ Ok(dylib_table
+ .into_iter()
+ .map(|(name, imports)| {
+ (name, imports.into_iter().map(|(_, import)| import.clone()).collect())
+ })
+ .collect())
+}
+
+/// Create a static archive.
+///
+/// This is essentially the same thing as an rlib, but it also involves adding all of the upstream
+/// crates' objects into the archive. This will slurp in all of the native libraries of upstream
+/// dependencies as well.
+///
+/// Additionally, there's no way for us to link dynamic libraries, so we warn about all dynamic
+/// library dependencies that they're not linked in.
+///
+/// There's no need to include metadata in a static archive, so ensure to not link in the metadata
+/// object file (and also don't prepare the archive with a metadata file).
+fn link_staticlib<'a>(
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ out_filename: &Path,
+ tempdir: &MaybeTempDir,
+) -> Result<(), ErrorGuaranteed> {
+ info!("preparing staticlib to {:?}", out_filename);
+ let mut ab = link_rlib(
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ RlibFlavor::StaticlibBase,
+ tempdir,
+ )?;
+ let mut all_native_libs = vec![];
+
+ let res = each_linked_rlib(&codegen_results.crate_info, &mut |cnum, path| {
+ let name = codegen_results.crate_info.crate_name[&cnum];
+ let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+
+ // Here when we include the rlib into our staticlib we need to make a
+ // decision whether to include the extra object files along the way.
+ // These extra object files come from statically included native
+ // libraries, but they may be cfg'd away with #[link(cfg(..))].
+ //
+ // This unstable feature, though, only needs liblibc to work. The only
+ // use case there is where musl is statically included in liblibc.rlib,
+ // so if we don't want the included version we just need to skip it. As
+ // a result the logic here is that if *any* linked library is cfg'd away
+ // we just skip all object files.
+ //
+ // Clearly this is not sufficient for a general purpose feature, and
+ // we'd want to read from the library's metadata to determine which
+ // object files come from where and selectively skip them.
+ let skip_object_files = native_libs.iter().any(|lib| {
+ matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. })
+ && !relevant_lib(sess, lib)
+ });
+
+ let lto = are_upstream_rust_objects_already_included(sess)
+ && !ignored_for_lto(sess, &codegen_results.crate_info, cnum);
+
+ // Ignoring obj file starting with the crate name
+ // as simple comparison is not enough - there
+ // might be also an extra name suffix
+ let obj_start = name.as_str().to_owned();
+
+ ab.add_archive(
+ path,
+ Box::new(move |fname: &str| {
+ // Ignore metadata files, no matter the name.
+ if fname == METADATA_FILENAME {
+ return true;
+ }
+
+ // Don't include Rust objects if LTO is enabled
+ if lto && looks_like_rust_object_file(fname) {
+ return true;
+ }
+
+ // Otherwise if this is *not* a rust object and we're skipping
+ // objects then skip this file
+ if skip_object_files && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
+ return true;
+ }
+
+ // ok, don't skip this
+ false
+ }),
+ )
+ .unwrap();
+
+ all_native_libs.extend(codegen_results.crate_info.native_libraries[&cnum].iter().cloned());
+ });
+ if let Err(e) = res {
+ sess.fatal(&e);
+ }
+
+ ab.build(out_filename);
+
+ if !all_native_libs.is_empty() {
+ if sess.opts.prints.contains(&PrintRequest::NativeStaticLibs) {
+ print_native_static_libs(sess, &all_native_libs);
+ }
+ }
+
+ Ok(())
+}
+
+fn escape_stdout_stderr_string(s: &[u8]) -> String {
+ str::from_utf8(s).map(|s| s.to_owned()).unwrap_or_else(|_| {
+ let mut x = "Non-UTF-8 output: ".to_string();
+ x.extend(s.iter().flat_map(|&b| ascii::escape_default(b)).map(char::from));
+ x
+ })
+}
+
+/// Use `thorin` (rust implementation of a dwarf packaging utility) to link DWARF objects into a
+/// DWARF package.
+fn link_dwarf_object<'a>(
+ sess: &'a Session,
+ cg_results: &CodegenResults,
+ executable_out_filename: &Path,
+) {
+ let dwp_out_filename = executable_out_filename.with_extension("dwp");
+ debug!(?dwp_out_filename, ?executable_out_filename);
+
+ #[derive(Default)]
+ struct ThorinSession<Relocations> {
+ arena_data: TypedArena<Vec<u8>>,
+ arena_mmap: TypedArena<Mmap>,
+ arena_relocations: TypedArena<Relocations>,
+ }
+
+ impl<Relocations> ThorinSession<Relocations> {
+ fn alloc_mmap<'arena>(&'arena self, data: Mmap) -> &'arena Mmap {
+ (*self.arena_mmap.alloc(data)).borrow()
+ }
+ }
+
+ impl<Relocations> thorin::Session<Relocations> for ThorinSession<Relocations> {
+ fn alloc_data<'arena>(&'arena self, data: Vec<u8>) -> &'arena [u8] {
+ (*self.arena_data.alloc(data)).borrow()
+ }
+
+ fn alloc_relocation<'arena>(&'arena self, data: Relocations) -> &'arena Relocations {
+ (*self.arena_relocations.alloc(data)).borrow()
+ }
+
+ fn read_input<'arena>(&'arena self, path: &Path) -> std::io::Result<&'arena [u8]> {
+ let file = File::open(&path)?;
+ let mmap = (unsafe { Mmap::map(file) })?;
+ Ok(self.alloc_mmap(mmap))
+ }
+ }
+
+ match sess.time("run_thorin", || -> Result<(), thorin::Error> {
+ let thorin_sess = ThorinSession::default();
+ let mut package = thorin::DwarfPackage::new(&thorin_sess);
+
+ // Input objs contain .o/.dwo files from the current crate.
+ match sess.opts.unstable_opts.split_dwarf_kind {
+ SplitDwarfKind::Single => {
+ for input_obj in cg_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+ package.add_input_object(input_obj)?;
+ }
+ }
+ SplitDwarfKind::Split => {
+ for input_obj in cg_results.modules.iter().filter_map(|m| m.dwarf_object.as_ref()) {
+ package.add_input_object(input_obj)?;
+ }
+ }
+ }
+
+ // Input rlibs contain .o/.dwo files from dependencies.
+ let input_rlibs = cg_results
+ .crate_info
+ .used_crate_source
+ .values()
+ .filter_map(|csource| csource.rlib.as_ref())
+ .map(|(path, _)| path);
+ for input_rlib in input_rlibs {
+ debug!(?input_rlib);
+ package.add_input_object(input_rlib)?;
+ }
+
+ // Failing to read the referenced objects is expected for dependencies where the path in the
+ // executable will have been cleaned by Cargo, but the referenced objects will be contained
+ // within rlibs provided as inputs.
+ //
+ // If paths have been remapped, then .o/.dwo files from the current crate also won't be
+ // found, but are provided explicitly above.
+ //
+ // Adding an executable is primarily done to make `thorin` check that all the referenced
+ // dwarf objects are found in the end.
+ package.add_executable(
+ &executable_out_filename,
+ thorin::MissingReferencedObjectBehaviour::Skip,
+ )?;
+
+ let output = package.finish()?.write()?;
+ let mut output_stream = BufWriter::new(
+ OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .truncate(true)
+ .open(dwp_out_filename)?,
+ );
+ output_stream.write_all(&output)?;
+ output_stream.flush()?;
+
+ Ok(())
+ }) {
+ Ok(()) => {}
+ Err(e) => {
+ sess.struct_err("linking dwarf objects with thorin failed")
+ .note(&format!("{:?}", e))
+ .emit();
+ sess.abort_if_errors();
+ }
+ }
+}
+
+/// Create a dynamic library or executable.
+///
+/// This will invoke the system linker/cc to create the resulting file. This links to all upstream
+/// files as well.
+fn link_natively<'a>(
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ crate_type: CrateType,
+ out_filename: &Path,
+ codegen_results: &CodegenResults,
+ tmpdir: &Path,
+) -> Result<(), ErrorGuaranteed> {
+ info!("preparing {:?} to {:?}", crate_type, out_filename);
+ let (linker_path, flavor) = linker_and_flavor(sess);
+ let mut cmd = linker_with_args(
+ &linker_path,
+ flavor,
+ sess,
+ archive_builder_builder,
+ crate_type,
+ tmpdir,
+ out_filename,
+ codegen_results,
+ )?;
+
+ linker::disable_localization(&mut cmd);
+
+ for &(ref k, ref v) in sess.target.link_env.as_ref() {
+ cmd.env(k.as_ref(), v.as_ref());
+ }
+ for k in sess.target.link_env_remove.as_ref() {
+ cmd.env_remove(k.as_ref());
+ }
+
+ if sess.opts.prints.contains(&PrintRequest::LinkArgs) {
+ println!("{:?}", &cmd);
+ }
+
+ // May have not found libraries in the right formats.
+ sess.abort_if_errors();
+
+ // Invoke the system linker
+ info!("{:?}", &cmd);
+ let retry_on_segfault = env::var("RUSTC_RETRY_LINKER_ON_SEGFAULT").is_ok();
+ let unknown_arg_regex =
+ Regex::new(r"(unknown|unrecognized) (command line )?(option|argument)").unwrap();
+ let mut prog;
+ let mut i = 0;
+ loop {
+ i += 1;
+ prog = sess.time("run_linker", || exec_linker(sess, &cmd, out_filename, tmpdir));
+ let Ok(ref output) = prog else {
+ break;
+ };
+ if output.status.success() {
+ break;
+ }
+ let mut out = output.stderr.clone();
+ out.extend(&output.stdout);
+ let out = String::from_utf8_lossy(&out);
+
+ // Check to see if the link failed with an error message that indicates it
+ // doesn't recognize the -no-pie option. If so, re-perform the link step
+ // without it. This is safe because if the linker doesn't support -no-pie
+ // then it should not default to linking executables as pie. Different
+ // versions of gcc seem to use different quotes in the error message so
+ // don't check for them.
+ if sess.target.linker_is_gnu
+ && flavor != LinkerFlavor::Ld
+ && unknown_arg_regex.is_match(&out)
+ && out.contains("-no-pie")
+ && cmd.get_args().iter().any(|e| e.to_string_lossy() == "-no-pie")
+ {
+ info!("linker output: {:?}", out);
+ warn!("Linker does not support -no-pie command line option. Retrying without.");
+ for arg in cmd.take_args() {
+ if arg.to_string_lossy() != "-no-pie" {
+ cmd.arg(arg);
+ }
+ }
+ info!("{:?}", &cmd);
+ continue;
+ }
+
+ // Detect '-static-pie' used with an older version of gcc or clang not supporting it.
+ // Fallback from '-static-pie' to '-static' in that case.
+ if sess.target.linker_is_gnu
+ && flavor != LinkerFlavor::Ld
+ && unknown_arg_regex.is_match(&out)
+ && (out.contains("-static-pie") || out.contains("--no-dynamic-linker"))
+ && cmd.get_args().iter().any(|e| e.to_string_lossy() == "-static-pie")
+ {
+ info!("linker output: {:?}", out);
+ warn!(
+ "Linker does not support -static-pie command line option. Retrying with -static instead."
+ );
+ // Mirror `add_(pre,post)_link_objects` to replace CRT objects.
+ let self_contained = crt_objects_fallback(sess, crate_type);
+ let opts = &sess.target;
+ let pre_objects = if self_contained {
+ &opts.pre_link_objects_fallback
+ } else {
+ &opts.pre_link_objects
+ };
+ let post_objects = if self_contained {
+ &opts.post_link_objects_fallback
+ } else {
+ &opts.post_link_objects
+ };
+ let get_objects = |objects: &CrtObjects, kind| {
+ objects
+ .get(&kind)
+ .iter()
+ .copied()
+ .flatten()
+ .map(|obj| get_object_file_path(sess, obj, self_contained).into_os_string())
+ .collect::<Vec<_>>()
+ };
+ let pre_objects_static_pie = get_objects(pre_objects, LinkOutputKind::StaticPicExe);
+ let post_objects_static_pie = get_objects(post_objects, LinkOutputKind::StaticPicExe);
+ let mut pre_objects_static = get_objects(pre_objects, LinkOutputKind::StaticNoPicExe);
+ let mut post_objects_static = get_objects(post_objects, LinkOutputKind::StaticNoPicExe);
+ // Assume that we know insertion positions for the replacement arguments from replaced
+ // arguments, which is true for all supported targets.
+ assert!(pre_objects_static.is_empty() || !pre_objects_static_pie.is_empty());
+ assert!(post_objects_static.is_empty() || !post_objects_static_pie.is_empty());
+ for arg in cmd.take_args() {
+ if arg.to_string_lossy() == "-static-pie" {
+ // Replace the output kind.
+ cmd.arg("-static");
+ } else if pre_objects_static_pie.contains(&arg) {
+ // Replace the pre-link objects (replace the first and remove the rest).
+ cmd.args(mem::take(&mut pre_objects_static));
+ } else if post_objects_static_pie.contains(&arg) {
+ // Replace the post-link objects (replace the first and remove the rest).
+ cmd.args(mem::take(&mut post_objects_static));
+ } else {
+ cmd.arg(arg);
+ }
+ }
+ info!("{:?}", &cmd);
+ continue;
+ }
+
+ // Here's a terribly awful hack that really shouldn't be present in any
+ // compiler. Here an environment variable is supported to automatically
+ // retry the linker invocation if the linker looks like it segfaulted.
+ //
+ // Gee that seems odd, normally segfaults are things we want to know
+ // about! Unfortunately though in rust-lang/rust#38878 we're
+ // experiencing the linker segfaulting on Travis quite a bit which is
+ // causing quite a bit of pain to land PRs when they spuriously fail
+ // due to a segfault.
+ //
+ // The issue #38878 has some more debugging information on it as well,
+ // but this unfortunately looks like it's just a race condition in
+ // macOS's linker with some thread pool working in the background. It
+ // seems that no one currently knows a fix for this so in the meantime
+ // we're left with this...
+ if !retry_on_segfault || i > 3 {
+ break;
+ }
+ let msg_segv = "clang: error: unable to execute command: Segmentation fault: 11";
+ let msg_bus = "clang: error: unable to execute command: Bus error: 10";
+ if out.contains(msg_segv) || out.contains(msg_bus) {
+ warn!(
+ ?cmd, %out,
+ "looks like the linker segfaulted when we tried to call it, \
+ automatically retrying again",
+ );
+ continue;
+ }
+
+ if is_illegal_instruction(&output.status) {
+ warn!(
+ ?cmd, %out, status = %output.status,
+ "looks like the linker hit an illegal instruction when we \
+ tried to call it, automatically retrying again.",
+ );
+ continue;
+ }
+
+ #[cfg(unix)]
+ fn is_illegal_instruction(status: &ExitStatus) -> bool {
+ use std::os::unix::prelude::*;
+ status.signal() == Some(libc::SIGILL)
+ }
+
+ #[cfg(not(unix))]
+ fn is_illegal_instruction(_status: &ExitStatus) -> bool {
+ false
+ }
+ }
+
+ match prog {
+ Ok(prog) => {
+ if !prog.status.success() {
+ let mut output = prog.stderr.clone();
+ output.extend_from_slice(&prog.stdout);
+ let escaped_output = escape_stdout_stderr_string(&output);
+ let mut err = sess.struct_err(&format!(
+ "linking with `{}` failed: {}",
+ linker_path.display(),
+ prog.status
+ ));
+ err.note(&format!("{:?}", &cmd)).note(&escaped_output);
+ if escaped_output.contains("undefined reference to") {
+ err.help(
+ "some `extern` functions couldn't be found; some native libraries may \
+ need to be installed or have their path specified",
+ );
+ err.note("use the `-l` flag to specify native libraries to link");
+ err.note("use the `cargo:rustc-link-lib` directive to specify the native \
+ libraries to link with Cargo (see https://doc.rust-lang.org/cargo/reference/build-scripts.html#cargorustc-link-libkindname)");
+ }
+ err.emit();
+
+ // If MSVC's `link.exe` was expected but the return code
+ // is not a Microsoft LNK error then suggest a way to fix or
+ // install the Visual Studio build tools.
+ if let Some(code) = prog.status.code() {
+ if sess.target.is_like_msvc
+ && flavor == LinkerFlavor::Msvc
+ // Respect the command line override
+ && sess.opts.cg.linker.is_none()
+ // Match exactly "link.exe"
+ && linker_path.to_str() == Some("link.exe")
+ // All Microsoft `link.exe` linking error codes are
+ // four digit numbers in the range 1000 to 9999 inclusive
+ && (code < 1000 || code > 9999)
+ {
+ let is_vs_installed = windows_registry::find_vs_version().is_ok();
+ let has_linker = windows_registry::find_tool(
+ &sess.opts.target_triple.triple(),
+ "link.exe",
+ )
+ .is_some();
+
+ sess.note_without_error("`link.exe` returned an unexpected error");
+ if is_vs_installed && has_linker {
+ // the linker is broken
+ sess.note_without_error(
+ "the Visual Studio build tools may need to be repaired \
+ using the Visual Studio installer",
+ );
+ sess.note_without_error(
+ "or a necessary component may be missing from the \
+ \"C++ build tools\" workload",
+ );
+ } else if is_vs_installed {
+ // the linker is not installed
+ sess.note_without_error(
+ "in the Visual Studio installer, ensure the \
+ \"C++ build tools\" workload is selected",
+ );
+ } else {
+ // visual studio is not installed
+ sess.note_without_error(
+ "you may need to install Visual Studio build tools with the \
+ \"C++ build tools\" workload",
+ );
+ }
+ }
+ }
+
+ sess.abort_if_errors();
+ }
+ info!("linker stderr:\n{}", escape_stdout_stderr_string(&prog.stderr));
+ info!("linker stdout:\n{}", escape_stdout_stderr_string(&prog.stdout));
+ }
+ Err(e) => {
+ let linker_not_found = e.kind() == io::ErrorKind::NotFound;
+
+ let mut linker_error = {
+ if linker_not_found {
+ sess.struct_err(&format!("linker `{}` not found", linker_path.display()))
+ } else {
+ sess.struct_err(&format!(
+ "could not exec the linker `{}`",
+ linker_path.display()
+ ))
+ }
+ };
+
+ linker_error.note(&e.to_string());
+
+ if !linker_not_found {
+ linker_error.note(&format!("{:?}", &cmd));
+ }
+
+ linker_error.emit();
+
+ if sess.target.is_like_msvc && linker_not_found {
+ sess.note_without_error(
+ "the msvc targets depend on the msvc linker \
+ but `link.exe` was not found",
+ );
+ sess.note_without_error(
+ "please ensure that VS 2013, VS 2015, VS 2017, VS 2019 or VS 2022 \
+ was installed with the Visual C++ option",
+ );
+ }
+ sess.abort_if_errors();
+ }
+ }
+
+ match sess.split_debuginfo() {
+ // If split debug information is disabled or located in individual files
+ // there's nothing to do here.
+ SplitDebuginfo::Off | SplitDebuginfo::Unpacked => {}
+
+ // If packed split-debuginfo is requested, but the final compilation
+ // doesn't actually have any debug information, then we skip this step.
+ SplitDebuginfo::Packed if sess.opts.debuginfo == DebugInfo::None => {}
+
+ // On macOS the external `dsymutil` tool is used to create the packed
+ // debug information. Note that this will read debug information from
+ // the objects on the filesystem which we'll clean up later.
+ SplitDebuginfo::Packed if sess.target.is_like_osx => {
+ let prog = Command::new("dsymutil").arg(out_filename).output();
+ match prog {
+ Ok(prog) => {
+ if !prog.status.success() {
+ let mut output = prog.stderr.clone();
+ output.extend_from_slice(&prog.stdout);
+ sess.struct_warn(&format!(
+ "processing debug info with `dsymutil` failed: {}",
+ prog.status
+ ))
+ .note(&escape_string(&output))
+ .emit();
+ }
+ }
+ Err(e) => sess.fatal(&format!("unable to run `dsymutil`: {}", e)),
+ }
+ }
+
+ // On MSVC packed debug information is produced by the linker itself so
+ // there's no need to do anything else here.
+ SplitDebuginfo::Packed if sess.target.is_like_windows => {}
+
+ // ... and otherwise we're processing a `*.dwp` packed dwarf file.
+ //
+ // We cannot rely on the .o paths in the executable because they may have been
+ // remapped by --remap-path-prefix and therefore invalid, so we need to provide
+ // the .o/.dwo paths explicitly.
+ SplitDebuginfo::Packed => link_dwarf_object(sess, codegen_results, out_filename),
+ }
+
+ let strip = strip_value(sess);
+
+ if sess.target.is_like_osx {
+ match (strip, crate_type) {
+ (Strip::Debuginfo, _) => strip_symbols_in_osx(sess, &out_filename, Some("-S")),
+ // Per the manpage, `-x` is the maximum safe strip level for dynamic libraries. (#93988)
+ (Strip::Symbols, CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro) => {
+ strip_symbols_in_osx(sess, &out_filename, Some("-x"))
+ }
+ (Strip::Symbols, _) => strip_symbols_in_osx(sess, &out_filename, None),
+ (Strip::None, _) => {}
+ }
+ }
+
+ Ok(())
+}
+
+// Temporarily support both -Z strip and -C strip
+fn strip_value(sess: &Session) -> Strip {
+ match (sess.opts.unstable_opts.strip, sess.opts.cg.strip) {
+ (s, Strip::None) => s,
+ (_, s) => s,
+ }
+}
+
+fn strip_symbols_in_osx<'a>(sess: &'a Session, out_filename: &Path, option: Option<&str>) {
+ let mut cmd = Command::new("strip");
+ if let Some(option) = option {
+ cmd.arg(option);
+ }
+ let prog = cmd.arg(out_filename).output();
+ match prog {
+ Ok(prog) => {
+ if !prog.status.success() {
+ let mut output = prog.stderr.clone();
+ output.extend_from_slice(&prog.stdout);
+ sess.struct_warn(&format!(
+ "stripping debug info with `strip` failed: {}",
+ prog.status
+ ))
+ .note(&escape_string(&output))
+ .emit();
+ }
+ }
+ Err(e) => sess.fatal(&format!("unable to run `strip`: {}", e)),
+ }
+}
+
+fn escape_string(s: &[u8]) -> String {
+ str::from_utf8(s).map(|s| s.to_owned()).unwrap_or_else(|_| {
+ let mut x = "Non-UTF-8 output: ".to_string();
+ x.extend(s.iter().flat_map(|&b| ascii::escape_default(b)).map(char::from));
+ x
+ })
+}
+
+fn add_sanitizer_libraries(sess: &Session, crate_type: CrateType, linker: &mut dyn Linker) {
+ // On macOS the runtimes are distributed as dylibs which should be linked to
+ // both executables and dynamic shared objects. Everywhere else the runtimes
+ // are currently distributed as static libraries which should be linked to
+ // executables only.
+ let needs_runtime = match crate_type {
+ CrateType::Executable => true,
+ CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => sess.target.is_like_osx,
+ CrateType::Rlib | CrateType::Staticlib => false,
+ };
+
+ if !needs_runtime {
+ return;
+ }
+
+ let sanitizer = sess.opts.unstable_opts.sanitizer;
+ if sanitizer.contains(SanitizerSet::ADDRESS) {
+ link_sanitizer_runtime(sess, linker, "asan");
+ }
+ if sanitizer.contains(SanitizerSet::LEAK) {
+ link_sanitizer_runtime(sess, linker, "lsan");
+ }
+ if sanitizer.contains(SanitizerSet::MEMORY) {
+ link_sanitizer_runtime(sess, linker, "msan");
+ }
+ if sanitizer.contains(SanitizerSet::THREAD) {
+ link_sanitizer_runtime(sess, linker, "tsan");
+ }
+ if sanitizer.contains(SanitizerSet::HWADDRESS) {
+ link_sanitizer_runtime(sess, linker, "hwasan");
+ }
+}
+
+fn link_sanitizer_runtime(sess: &Session, linker: &mut dyn Linker, name: &str) {
+ fn find_sanitizer_runtime(sess: &Session, filename: &str) -> PathBuf {
+ let session_tlib =
+ filesearch::make_target_lib_path(&sess.sysroot, sess.opts.target_triple.triple());
+ let path = session_tlib.join(filename);
+ if path.exists() {
+ return session_tlib;
+ } else {
+ let default_sysroot = filesearch::get_or_default_sysroot();
+ let default_tlib = filesearch::make_target_lib_path(
+ &default_sysroot,
+ sess.opts.target_triple.triple(),
+ );
+ return default_tlib;
+ }
+ }
+
+ let channel = option_env!("CFG_RELEASE_CHANNEL")
+ .map(|channel| format!("-{}", channel))
+ .unwrap_or_default();
+
+ if sess.target.is_like_osx {
+ // On Apple platforms, the sanitizer is always built as a dylib, and
+ // LLVM will link to `@rpath/*.dylib`, so we need to specify an
+ // rpath to the library as well (the rpath should be absolute, see
+ // PR #41352 for details).
+ let filename = format!("rustc{}_rt.{}", channel, name);
+ let path = find_sanitizer_runtime(&sess, &filename);
+ let rpath = path.to_str().expect("non-utf8 component in path");
+ linker.args(&["-Wl,-rpath", "-Xlinker", rpath]);
+ linker.link_dylib(&filename, false, true);
+ } else {
+ let filename = format!("librustc{}_rt.{}.a", channel, name);
+ let path = find_sanitizer_runtime(&sess, &filename).join(&filename);
+ linker.link_whole_rlib(&path);
+ }
+}
+
+/// Returns a boolean indicating whether the specified crate should be ignored
+/// during LTO.
+///
+/// Crates ignored during LTO are not lumped together in the "massive object
+/// file" that we create and are linked in their normal rlib states. See
+/// comments below for what crates do not participate in LTO.
+///
+/// It's unusual for a crate to not participate in LTO. Typically only
+/// compiler-specific and unstable crates have a reason to not participate in
+/// LTO.
+pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool {
+ // If our target enables builtin function lowering in LLVM then the
+ // crates providing these functions don't participate in LTO (e.g.
+ // no_builtins or compiler builtins crates).
+ !sess.target.no_builtins
+ && (info.compiler_builtins == Some(cnum) || info.is_no_builtins.contains(&cnum))
+}
+
+// This functions tries to determine the appropriate linker (and corresponding LinkerFlavor) to use
+pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) {
+ fn infer_from(
+ sess: &Session,
+ linker: Option<PathBuf>,
+ flavor: Option<LinkerFlavor>,
+ ) -> Option<(PathBuf, LinkerFlavor)> {
+ match (linker, flavor) {
+ (Some(linker), Some(flavor)) => Some((linker, flavor)),
+ // only the linker flavor is known; use the default linker for the selected flavor
+ (None, Some(flavor)) => Some((
+ PathBuf::from(match flavor {
+ LinkerFlavor::Em => {
+ if cfg!(windows) {
+ "emcc.bat"
+ } else {
+ "emcc"
+ }
+ }
+ LinkerFlavor::Gcc => {
+ if cfg!(any(target_os = "solaris", target_os = "illumos")) {
+ // On historical Solaris systems, "cc" may have
+ // been Sun Studio, which is not flag-compatible
+ // with "gcc". This history casts a long shadow,
+ // and many modern illumos distributions today
+ // ship GCC as "gcc" without also making it
+ // available as "cc".
+ "gcc"
+ } else {
+ "cc"
+ }
+ }
+ LinkerFlavor::Ld => "ld",
+ LinkerFlavor::Msvc => "link.exe",
+ LinkerFlavor::Lld(_) => "lld",
+ LinkerFlavor::PtxLinker => "rust-ptx-linker",
+ LinkerFlavor::BpfLinker => "bpf-linker",
+ LinkerFlavor::L4Bender => "l4-bender",
+ }),
+ flavor,
+ )),
+ (Some(linker), None) => {
+ let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| {
+ sess.fatal("couldn't extract file stem from specified linker")
+ });
+
+ let flavor = if stem == "emcc" {
+ LinkerFlavor::Em
+ } else if stem == "gcc"
+ || stem.ends_with("-gcc")
+ || stem == "clang"
+ || stem.ends_with("-clang")
+ {
+ LinkerFlavor::Gcc
+ } else if stem == "wasm-ld" || stem.ends_with("-wasm-ld") {
+ LinkerFlavor::Lld(LldFlavor::Wasm)
+ } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") {
+ LinkerFlavor::Ld
+ } else if stem == "link" || stem == "lld-link" {
+ LinkerFlavor::Msvc
+ } else if stem == "lld" || stem == "rust-lld" {
+ LinkerFlavor::Lld(sess.target.lld_flavor)
+ } else {
+ // fall back to the value in the target spec
+ sess.target.linker_flavor
+ };
+
+ Some((linker, flavor))
+ }
+ (None, None) => None,
+ }
+ }
+
+ // linker and linker flavor specified via command line have precedence over what the target
+ // specification specifies
+ if let Some(ret) = infer_from(sess, sess.opts.cg.linker.clone(), sess.opts.cg.linker_flavor) {
+ return ret;
+ }
+
+ if let Some(ret) = infer_from(
+ sess,
+ sess.target.linker.as_deref().map(PathBuf::from),
+ Some(sess.target.linker_flavor),
+ ) {
+ return ret;
+ }
+
+ bug!("Not enough information provided to determine how to invoke the linker");
+}
+
+/// Returns a pair of boolean indicating whether we should preserve the object and
+/// dwarf object files on the filesystem for their debug information. This is often
+/// useful with split-dwarf like schemes.
+fn preserve_objects_for_their_debuginfo(sess: &Session) -> (bool, bool) {
+ // If the objects don't have debuginfo there's nothing to preserve.
+ if sess.opts.debuginfo == config::DebugInfo::None {
+ return (false, false);
+ }
+
+ // If we're only producing artifacts that are archives, no need to preserve
+ // the objects as they're losslessly contained inside the archives.
+ if sess.crate_types().iter().all(|&x| x.is_archive()) {
+ return (false, false);
+ }
+
+ match (sess.split_debuginfo(), sess.opts.unstable_opts.split_dwarf_kind) {
+ // If there is no split debuginfo then do not preserve objects.
+ (SplitDebuginfo::Off, _) => (false, false),
+ // If there is packed split debuginfo, then the debuginfo in the objects
+ // has been packaged and the objects can be deleted.
+ (SplitDebuginfo::Packed, _) => (false, false),
+ // If there is unpacked split debuginfo and the current target can not use
+ // split dwarf, then keep objects.
+ (SplitDebuginfo::Unpacked, _) if !sess.target_can_use_split_dwarf() => (true, false),
+ // If there is unpacked split debuginfo and the target can use split dwarf, then
+ // keep the object containing that debuginfo (whether that is an object file or
+ // dwarf object file depends on the split dwarf kind).
+ (SplitDebuginfo::Unpacked, SplitDwarfKind::Single) => (true, false),
+ (SplitDebuginfo::Unpacked, SplitDwarfKind::Split) => (false, true),
+ }
+}
+
+fn archive_search_paths(sess: &Session) -> Vec<PathBuf> {
+ sess.target_filesearch(PathKind::Native).search_path_dirs()
+}
+
+#[derive(PartialEq)]
+enum RlibFlavor {
+ Normal,
+ StaticlibBase,
+}
+
+fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLib]) {
+ let lib_args: Vec<_> = all_native_libs
+ .iter()
+ .filter(|l| relevant_lib(sess, l))
+ .filter_map(|lib| {
+ let name = lib.name?;
+ match lib.kind {
+ NativeLibKind::Static { bundle: Some(false), .. }
+ | NativeLibKind::Dylib { .. }
+ | NativeLibKind::Unspecified => {
+ let verbatim = lib.verbatim.unwrap_or(false);
+ if sess.target.is_like_msvc {
+ Some(format!("{}{}", name, if verbatim { "" } else { ".lib" }))
+ } else if sess.target.linker_is_gnu {
+ Some(format!("-l{}{}", if verbatim { ":" } else { "" }, name))
+ } else {
+ Some(format!("-l{}", name))
+ }
+ }
+ NativeLibKind::Framework { .. } => {
+ // ld-only syntax, since there are no frameworks in MSVC
+ Some(format!("-framework {}", name))
+ }
+ // These are included, no need to print them
+ NativeLibKind::Static { bundle: None | Some(true), .. }
+ | NativeLibKind::LinkArg
+ | NativeLibKind::RawDylib => None,
+ }
+ })
+ .collect();
+ if !lib_args.is_empty() {
+ sess.note_without_error(
+ "Link against the following native artifacts when linking \
+ against this static library. The order and any duplication \
+ can be significant on some platforms.",
+ );
+ // Prefix for greppability
+ sess.note_without_error(&format!("native-static-libs: {}", &lib_args.join(" ")));
+ }
+}
+
+fn get_object_file_path(sess: &Session, name: &str, self_contained: bool) -> PathBuf {
+ let fs = sess.target_filesearch(PathKind::Native);
+ let file_path = fs.get_lib_path().join(name);
+ if file_path.exists() {
+ return file_path;
+ }
+ // Special directory with objects used only in self-contained linkage mode
+ if self_contained {
+ let file_path = fs.get_self_contained_lib_path().join(name);
+ if file_path.exists() {
+ return file_path;
+ }
+ }
+ for search_path in fs.search_paths() {
+ let file_path = search_path.dir.join(name);
+ if file_path.exists() {
+ return file_path;
+ }
+ }
+ PathBuf::from(name)
+}
+
+fn exec_linker(
+ sess: &Session,
+ cmd: &Command,
+ out_filename: &Path,
+ tmpdir: &Path,
+) -> io::Result<Output> {
+ // When attempting to spawn the linker we run a risk of blowing out the
+ // size limits for spawning a new process with respect to the arguments
+ // we pass on the command line.
+ //
+ // Here we attempt to handle errors from the OS saying "your list of
+ // arguments is too big" by reinvoking the linker again with an `@`-file
+ // that contains all the arguments. The theory is that this is then
+ // accepted on all linkers and the linker will read all its options out of
+ // there instead of looking at the command line.
+ if !cmd.very_likely_to_exceed_some_spawn_limit() {
+ match cmd.command().stdout(Stdio::piped()).stderr(Stdio::piped()).spawn() {
+ Ok(child) => {
+ let output = child.wait_with_output();
+ flush_linked_file(&output, out_filename)?;
+ return output;
+ }
+ Err(ref e) if command_line_too_big(e) => {
+ info!("command line to linker was too big: {}", e);
+ }
+ Err(e) => return Err(e),
+ }
+ }
+
+ info!("falling back to passing arguments to linker via an @-file");
+ let mut cmd2 = cmd.clone();
+ let mut args = String::new();
+ for arg in cmd2.take_args() {
+ args.push_str(
+ &Escape { arg: arg.to_str().unwrap(), is_like_msvc: sess.target.is_like_msvc }
+ .to_string(),
+ );
+ args.push('\n');
+ }
+ let file = tmpdir.join("linker-arguments");
+ let bytes = if sess.target.is_like_msvc {
+ let mut out = Vec::with_capacity((1 + args.len()) * 2);
+ // start the stream with a UTF-16 BOM
+ for c in std::iter::once(0xFEFF).chain(args.encode_utf16()) {
+ // encode in little endian
+ out.push(c as u8);
+ out.push((c >> 8) as u8);
+ }
+ out
+ } else {
+ args.into_bytes()
+ };
+ fs::write(&file, &bytes)?;
+ cmd2.arg(format!("@{}", file.display()));
+ info!("invoking linker {:?}", cmd2);
+ let output = cmd2.output();
+ flush_linked_file(&output, out_filename)?;
+ return output;
+
+ #[cfg(not(windows))]
+ fn flush_linked_file(_: &io::Result<Output>, _: &Path) -> io::Result<()> {
+ Ok(())
+ }
+
+ #[cfg(windows)]
+ fn flush_linked_file(
+ command_output: &io::Result<Output>,
+ out_filename: &Path,
+ ) -> io::Result<()> {
+ // On Windows, under high I/O load, output buffers are sometimes not flushed,
+ // even long after process exit, causing nasty, non-reproducible output bugs.
+ //
+ // File::sync_all() calls FlushFileBuffers() down the line, which solves the problem.
+ //
+ // А full writeup of the original Chrome bug can be found at
+ // randomascii.wordpress.com/2018/02/25/compiler-bug-linker-bug-windows-kernel-bug/amp
+
+ if let &Ok(ref out) = command_output {
+ if out.status.success() {
+ if let Ok(of) = fs::OpenOptions::new().write(true).open(out_filename) {
+ of.sync_all()?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ #[cfg(unix)]
+ fn command_line_too_big(err: &io::Error) -> bool {
+ err.raw_os_error() == Some(::libc::E2BIG)
+ }
+
+ #[cfg(windows)]
+ fn command_line_too_big(err: &io::Error) -> bool {
+ const ERROR_FILENAME_EXCED_RANGE: i32 = 206;
+ err.raw_os_error() == Some(ERROR_FILENAME_EXCED_RANGE)
+ }
+
+ #[cfg(not(any(unix, windows)))]
+ fn command_line_too_big(_: &io::Error) -> bool {
+ false
+ }
+
+ struct Escape<'a> {
+ arg: &'a str,
+ is_like_msvc: bool,
+ }
+
+ impl<'a> fmt::Display for Escape<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.is_like_msvc {
+ // This is "documented" at
+ // https://docs.microsoft.com/en-us/cpp/build/reference/at-specify-a-linker-response-file
+ //
+ // Unfortunately there's not a great specification of the
+ // syntax I could find online (at least) but some local
+ // testing showed that this seemed sufficient-ish to catch
+ // at least a few edge cases.
+ write!(f, "\"")?;
+ for c in self.arg.chars() {
+ match c {
+ '"' => write!(f, "\\{}", c)?,
+ c => write!(f, "{}", c)?,
+ }
+ }
+ write!(f, "\"")?;
+ } else {
+ // This is documented at https://linux.die.net/man/1/ld, namely:
+ //
+ // > Options in file are separated by whitespace. A whitespace
+ // > character may be included in an option by surrounding the
+ // > entire option in either single or double quotes. Any
+ // > character (including a backslash) may be included by
+ // > prefixing the character to be included with a backslash.
+ //
+ // We put an argument on each line, so all we need to do is
+ // ensure the line is interpreted as one whole argument.
+ for c in self.arg.chars() {
+ match c {
+ '\\' | ' ' => write!(f, "\\{}", c)?,
+ c => write!(f, "{}", c)?,
+ }
+ }
+ }
+ Ok(())
+ }
+ }
+}
+
+fn link_output_kind(sess: &Session, crate_type: CrateType) -> LinkOutputKind {
+ let kind = match (crate_type, sess.crt_static(Some(crate_type)), sess.relocation_model()) {
+ (CrateType::Executable, _, _) if sess.is_wasi_reactor() => LinkOutputKind::WasiReactorExe,
+ (CrateType::Executable, false, RelocModel::Pic | RelocModel::Pie) => {
+ LinkOutputKind::DynamicPicExe
+ }
+ (CrateType::Executable, false, _) => LinkOutputKind::DynamicNoPicExe,
+ (CrateType::Executable, true, RelocModel::Pic | RelocModel::Pie) => {
+ LinkOutputKind::StaticPicExe
+ }
+ (CrateType::Executable, true, _) => LinkOutputKind::StaticNoPicExe,
+ (_, true, _) => LinkOutputKind::StaticDylib,
+ (_, false, _) => LinkOutputKind::DynamicDylib,
+ };
+
+ // Adjust the output kind to target capabilities.
+ let opts = &sess.target;
+ let pic_exe_supported = opts.position_independent_executables;
+ let static_pic_exe_supported = opts.static_position_independent_executables;
+ let static_dylib_supported = opts.crt_static_allows_dylibs;
+ match kind {
+ LinkOutputKind::DynamicPicExe if !pic_exe_supported => LinkOutputKind::DynamicNoPicExe,
+ LinkOutputKind::StaticPicExe if !static_pic_exe_supported => LinkOutputKind::StaticNoPicExe,
+ LinkOutputKind::StaticDylib if !static_dylib_supported => LinkOutputKind::DynamicDylib,
+ _ => kind,
+ }
+}
+
+// Returns true if linker is located within sysroot
+fn detect_self_contained_mingw(sess: &Session) -> bool {
+ let (linker, _) = linker_and_flavor(&sess);
+ // Assume `-C linker=rust-lld` as self-contained mode
+ if linker == Path::new("rust-lld") {
+ return true;
+ }
+ let linker_with_extension = if cfg!(windows) && linker.extension().is_none() {
+ linker.with_extension("exe")
+ } else {
+ linker
+ };
+ for dir in env::split_paths(&env::var_os("PATH").unwrap_or_default()) {
+ let full_path = dir.join(&linker_with_extension);
+ // If linker comes from sysroot assume self-contained mode
+ if full_path.is_file() && !full_path.starts_with(&sess.sysroot) {
+ return false;
+ }
+ }
+ true
+}
+
+/// Whether we link to our own CRT objects instead of relying on gcc to pull them.
+/// We only provide such support for a very limited number of targets.
+fn crt_objects_fallback(sess: &Session, crate_type: CrateType) -> bool {
+ if let Some(self_contained) = sess.opts.cg.link_self_contained {
+ return self_contained;
+ }
+
+ match sess.target.crt_objects_fallback {
+ // FIXME: Find a better heuristic for "native musl toolchain is available",
+ // based on host and linker path, for example.
+ // (https://github.com/rust-lang/rust/pull/71769#issuecomment-626330237).
+ Some(CrtObjectsFallback::Musl) => sess.crt_static(Some(crate_type)),
+ Some(CrtObjectsFallback::Mingw) => {
+ sess.host == sess.target
+ && sess.target.vendor != "uwp"
+ && detect_self_contained_mingw(&sess)
+ }
+ // FIXME: Figure out cases in which WASM needs to link with a native toolchain.
+ Some(CrtObjectsFallback::Wasm) => true,
+ None => false,
+ }
+}
+
+/// Add pre-link object files defined by the target spec.
+fn add_pre_link_objects(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ link_output_kind: LinkOutputKind,
+ self_contained: bool,
+) {
+ let opts = &sess.target;
+ let objects =
+ if self_contained { &opts.pre_link_objects_fallback } else { &opts.pre_link_objects };
+ for obj in objects.get(&link_output_kind).iter().copied().flatten() {
+ cmd.add_object(&get_object_file_path(sess, obj, self_contained));
+ }
+}
+
+/// Add post-link object files defined by the target spec.
+fn add_post_link_objects(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ link_output_kind: LinkOutputKind,
+ self_contained: bool,
+) {
+ let opts = &sess.target;
+ let objects =
+ if self_contained { &opts.post_link_objects_fallback } else { &opts.post_link_objects };
+ for obj in objects.get(&link_output_kind).iter().copied().flatten() {
+ cmd.add_object(&get_object_file_path(sess, obj, self_contained));
+ }
+}
+
+/// Add arbitrary "pre-link" args defined by the target spec or from command line.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_pre_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+ if let Some(args) = sess.target.pre_link_args.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+ cmd.args(&sess.opts.unstable_opts.pre_link_args);
+}
+
+/// Add a link script embedded in the target, if applicable.
+fn add_link_script(cmd: &mut dyn Linker, sess: &Session, tmpdir: &Path, crate_type: CrateType) {
+ match (crate_type, &sess.target.link_script) {
+ (CrateType::Cdylib | CrateType::Executable, Some(script)) => {
+ if !sess.target.linker_is_gnu {
+ sess.fatal("can only use link script when linking with GNU-like linker");
+ }
+
+ let file_name = ["rustc", &sess.target.llvm_target, "linkfile.ld"].join("-");
+
+ let path = tmpdir.join(file_name);
+ if let Err(e) = fs::write(&path, script.as_ref()) {
+ sess.fatal(&format!("failed to write link script to {}: {}", path.display(), e));
+ }
+
+ cmd.arg("--script");
+ cmd.arg(path);
+ }
+ _ => {}
+ }
+}
+
+/// Add arbitrary "user defined" args defined from command line.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_user_defined_link_args(cmd: &mut dyn Linker, sess: &Session) {
+ cmd.args(&sess.opts.cg.link_args);
+}
+
+/// Add arbitrary "late link" args defined by the target spec.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_late_link_args(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ flavor: LinkerFlavor,
+ crate_type: CrateType,
+ codegen_results: &CodegenResults,
+) {
+ let any_dynamic_crate = crate_type == CrateType::Dylib
+ || codegen_results.crate_info.dependency_formats.iter().any(|(ty, list)| {
+ *ty == crate_type && list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ if any_dynamic_crate {
+ if let Some(args) = sess.target.late_link_args_dynamic.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+ } else {
+ if let Some(args) = sess.target.late_link_args_static.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+ }
+ if let Some(args) = sess.target.late_link_args.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+}
+
+/// Add arbitrary "post-link" args defined by the target spec.
+/// FIXME: Determine where exactly these args need to be inserted.
+fn add_post_link_args(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+ if let Some(args) = sess.target.post_link_args.get(&flavor) {
+ cmd.args(args.iter().map(Deref::deref));
+ }
+}
+
+/// Add a synthetic object file that contains reference to all symbols that we want to expose to
+/// the linker.
+///
+/// Background: we implement rlibs as static library (archives). Linkers treat archives
+/// differently from object files: all object files participate in linking, while archives will
+/// only participate in linking if they can satisfy at least one undefined reference (version
+/// scripts doesn't count). This causes `#[no_mangle]` or `#[used]` items to be ignored by the
+/// linker, and since they never participate in the linking, using `KEEP` in the linker scripts
+/// can't keep them either. This causes #47384.
+///
+/// To keep them around, we could use `--whole-archive` and equivalents to force rlib to
+/// participate in linking like object files, but this proves to be expensive (#93791). Therefore
+/// we instead just introduce an undefined reference to them. This could be done by `-u` command
+/// line option to the linker or `EXTERN(...)` in linker scripts, however they does not only
+/// introduce an undefined reference, but also make them the GC roots, preventing `--gc-sections`
+/// from removing them, and this is especially problematic for embedded programming where every
+/// byte counts.
+///
+/// This method creates a synthetic object file, which contains undefined references to all symbols
+/// that are necessary for the linking. They are only present in symbol table but not actually
+/// used in any sections, so the linker will therefore pick relevant rlibs for linking, but
+/// unused `#[no_mangle]` or `#[used]` can still be discard by GC sections.
+fn add_linked_symbol_object(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ tmpdir: &Path,
+ symbols: &[(String, SymbolExportKind)],
+) {
+ if symbols.is_empty() {
+ return;
+ }
+
+ let Some(mut file) = super::metadata::create_object_file(sess) else {
+ return;
+ };
+
+ // NOTE(nbdd0121): MSVC will hang if the input object file contains no sections,
+ // so add an empty section.
+ if file.format() == object::BinaryFormat::Coff {
+ file.add_section(Vec::new(), ".text".into(), object::SectionKind::Text);
+
+ // We handle the name decoration of COFF targets in `symbol_export.rs`, so disable the
+ // default mangler in `object` crate.
+ file.set_mangling(object::write::Mangling::None);
+
+ // Add feature flags to the object file. On MSVC this is optional but LLD will complain if
+ // not present.
+ let mut feature = 0;
+
+ if file.architecture() == object::Architecture::I386 {
+ // Indicate that all SEH handlers are registered in .sxdata section.
+ // We don't have generate any code, so we don't need .sxdata section but LLD still
+ // expects us to set this bit (see #96498).
+ // Reference: https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
+ feature |= 1;
+ }
+
+ file.add_symbol(object::write::Symbol {
+ name: "@feat.00".into(),
+ value: feature,
+ size: 0,
+ kind: object::SymbolKind::Data,
+ scope: object::SymbolScope::Compilation,
+ weak: false,
+ section: object::write::SymbolSection::Absolute,
+ flags: object::SymbolFlags::None,
+ });
+ }
+
+ for (sym, kind) in symbols.iter() {
+ file.add_symbol(object::write::Symbol {
+ name: sym.clone().into(),
+ value: 0,
+ size: 0,
+ kind: match kind {
+ SymbolExportKind::Text => object::SymbolKind::Text,
+ SymbolExportKind::Data => object::SymbolKind::Data,
+ SymbolExportKind::Tls => object::SymbolKind::Tls,
+ },
+ scope: object::SymbolScope::Unknown,
+ weak: false,
+ section: object::write::SymbolSection::Undefined,
+ flags: object::SymbolFlags::None,
+ });
+ }
+
+ let path = tmpdir.join("symbols.o");
+ let result = std::fs::write(&path, file.write().unwrap());
+ if let Err(e) = result {
+ sess.fatal(&format!("failed to write {}: {}", path.display(), e));
+ }
+ cmd.add_object(&path);
+}
+
+/// Add object files containing code from the current crate.
+fn add_local_crate_regular_objects(cmd: &mut dyn Linker, codegen_results: &CodegenResults) {
+ for obj in codegen_results.modules.iter().filter_map(|m| m.object.as_ref()) {
+ cmd.add_object(obj);
+ }
+}
+
+/// Add object files for allocator code linked once for the whole crate tree.
+fn add_local_crate_allocator_objects(cmd: &mut dyn Linker, codegen_results: &CodegenResults) {
+ if let Some(obj) = codegen_results.allocator_module.as_ref().and_then(|m| m.object.as_ref()) {
+ cmd.add_object(obj);
+ }
+}
+
+/// Add object files containing metadata for the current crate.
+fn add_local_crate_metadata_objects(
+ cmd: &mut dyn Linker,
+ crate_type: CrateType,
+ codegen_results: &CodegenResults,
+) {
+ // When linking a dynamic library, we put the metadata into a section of the
+ // executable. This metadata is in a separate object file from the main
+ // object file, so we link that in here.
+ if crate_type == CrateType::Dylib || crate_type == CrateType::ProcMacro {
+ if let Some(obj) = codegen_results.metadata_module.as_ref().and_then(|m| m.object.as_ref())
+ {
+ cmd.add_object(obj);
+ }
+ }
+}
+
+/// Add sysroot and other globally set directories to the directory search list.
+fn add_library_search_dirs(cmd: &mut dyn Linker, sess: &Session, self_contained: bool) {
+ // The default library location, we need this to find the runtime.
+ // The location of crates will be determined as needed.
+ let lib_path = sess.target_filesearch(PathKind::All).get_lib_path();
+ cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
+
+ // Special directory with libraries used only in self-contained linkage mode
+ if self_contained {
+ let lib_path = sess.target_filesearch(PathKind::All).get_self_contained_lib_path();
+ cmd.include_path(&fix_windows_verbatim_for_gcc(&lib_path));
+ }
+}
+
+/// Add options making relocation sections in the produced ELF files read-only
+/// and suppressing lazy binding.
+fn add_relro_args(cmd: &mut dyn Linker, sess: &Session) {
+ match sess.opts.unstable_opts.relro_level.unwrap_or(sess.target.relro_level) {
+ RelroLevel::Full => cmd.full_relro(),
+ RelroLevel::Partial => cmd.partial_relro(),
+ RelroLevel::Off => cmd.no_relro(),
+ RelroLevel::None => {}
+ }
+}
+
+/// Add library search paths used at runtime by dynamic linkers.
+fn add_rpath_args(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ codegen_results: &CodegenResults,
+ out_filename: &Path,
+) {
+ // FIXME (#2397): At some point we want to rpath our guesses as to
+ // where extern libraries might live, based on the
+ // add_lib_search_paths
+ if sess.opts.cg.rpath {
+ let libs = codegen_results
+ .crate_info
+ .used_crates
+ .iter()
+ .filter_map(|cnum| {
+ codegen_results.crate_info.used_crate_source[cnum]
+ .dylib
+ .as_ref()
+ .map(|(path, _)| &**path)
+ })
+ .collect::<Vec<_>>();
+ let mut rpath_config = RPathConfig {
+ libs: &*libs,
+ out_filename: out_filename.to_path_buf(),
+ has_rpath: sess.target.has_rpath,
+ is_like_osx: sess.target.is_like_osx,
+ linker_is_gnu: sess.target.linker_is_gnu,
+ };
+ cmd.args(&rpath::get_rpath_flags(&mut rpath_config));
+ }
+}
+
+/// Produce the linker command line containing linker path and arguments.
+///
+/// When comments in the function say "order-(in)dependent" they mean order-dependence between
+/// options and libraries/object files. For example `--whole-archive` (order-dependent) applies
+/// to specific libraries passed after it, and `-o` (output file, order-independent) applies
+/// to the linking process as a whole.
+/// Order-independent options may still override each other in order-dependent fashion,
+/// e.g `--foo=yes --foo=no` may be equivalent to `--foo=no`.
+fn linker_with_args<'a>(
+ path: &Path,
+ flavor: LinkerFlavor,
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ crate_type: CrateType,
+ tmpdir: &Path,
+ out_filename: &Path,
+ codegen_results: &CodegenResults,
+) -> Result<Command, ErrorGuaranteed> {
+ let crt_objects_fallback = crt_objects_fallback(sess, crate_type);
+ let cmd = &mut *super::linker::get_linker(
+ sess,
+ path,
+ flavor,
+ crt_objects_fallback,
+ &codegen_results.crate_info.target_cpu,
+ );
+ let link_output_kind = link_output_kind(sess, crate_type);
+
+ // ------------ Early order-dependent options ------------
+
+ // If we're building something like a dynamic library then some platforms
+ // need to make sure that all symbols are exported correctly from the
+ // dynamic library.
+ // Must be passed before any libraries to prevent the symbols to export from being thrown away,
+ // at least on some platforms (e.g. windows-gnu).
+ cmd.export_symbols(
+ tmpdir,
+ crate_type,
+ &codegen_results.crate_info.exported_symbols[&crate_type],
+ );
+
+ // Can be used for adding custom CRT objects or overriding order-dependent options above.
+ // FIXME: In practice built-in target specs use this for arbitrary order-independent options,
+ // introduce a target spec option for order-independent linker options and migrate built-in
+ // specs to it.
+ add_pre_link_args(cmd, sess, flavor);
+
+ // ------------ Object code and libraries, order-dependent ------------
+
+ // Pre-link CRT objects.
+ add_pre_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
+
+ add_linked_symbol_object(
+ cmd,
+ sess,
+ tmpdir,
+ &codegen_results.crate_info.linked_symbols[&crate_type],
+ );
+
+ // Sanitizer libraries.
+ add_sanitizer_libraries(sess, crate_type, cmd);
+
+ // Object code from the current crate.
+ // Take careful note of the ordering of the arguments we pass to the linker
+ // here. Linkers will assume that things on the left depend on things to the
+ // right. Things on the right cannot depend on things on the left. This is
+ // all formally implemented in terms of resolving symbols (libs on the right
+ // resolve unknown symbols of libs on the left, but not vice versa).
+ //
+ // For this reason, we have organized the arguments we pass to the linker as
+ // such:
+ //
+ // 1. The local object that LLVM just generated
+ // 2. Local native libraries
+ // 3. Upstream rust libraries
+ // 4. Upstream native libraries
+ //
+ // The rationale behind this ordering is that those items lower down in the
+ // list can't depend on items higher up in the list. For example nothing can
+ // depend on what we just generated (e.g., that'd be a circular dependency).
+ // Upstream rust libraries are not supposed to depend on our local native
+ // libraries as that would violate the structure of the DAG, in that
+ // scenario they are required to link to them as well in a shared fashion.
+ // (The current implementation still doesn't prevent it though, see the FIXME below.)
+ //
+ // Note that upstream rust libraries may contain native dependencies as
+ // well, but they also can't depend on what we just started to add to the
+ // link line. And finally upstream native libraries can't depend on anything
+ // in this DAG so far because they can only depend on other native libraries
+ // and such dependencies are also required to be specified.
+ add_local_crate_regular_objects(cmd, codegen_results);
+ add_local_crate_metadata_objects(cmd, crate_type, codegen_results);
+ add_local_crate_allocator_objects(cmd, codegen_results);
+
+ // Avoid linking to dynamic libraries unless they satisfy some undefined symbols
+ // at the point at which they are specified on the command line.
+ // Must be passed before any (dynamic) libraries to have effect on them.
+ // On Solaris-like systems, `-z ignore` acts as both `--as-needed` and `--gc-sections`
+ // so it will ignore unreferenced ELF sections from relocatable objects.
+ // For that reason, we put this flag after metadata objects as they would otherwise be removed.
+ // FIXME: Support more fine-grained dead code removal on Solaris/illumos
+ // and move this option back to the top.
+ cmd.add_as_needed();
+
+ // FIXME: Move this below to other native libraries
+ // (or alternatively link all native libraries after their respective crates).
+ // This change is somewhat breaking in practice due to local static libraries being linked
+ // as whole-archive (#85144), so removing whole-archive may be a pre-requisite.
+ if sess.opts.unstable_opts.link_native_libraries {
+ add_local_native_libraries(cmd, sess, codegen_results);
+ }
+
+ // Upstream rust libraries and their non-bundled static libraries
+ add_upstream_rust_crates(
+ cmd,
+ sess,
+ archive_builder_builder,
+ codegen_results,
+ crate_type,
+ tmpdir,
+ );
+
+ // Upstream dynamic native libraries linked with `#[link]` attributes at and `-l`
+ // command line options.
+ // If -Zlink-native-libraries=false is set, then the assumption is that an
+ // external build system already has the native dependencies defined, and it
+ // will provide them to the linker itself.
+ if sess.opts.unstable_opts.link_native_libraries {
+ add_upstream_native_libraries(cmd, sess, codegen_results);
+ }
+
+ // Link with the import library generated for any raw-dylib functions.
+ for (raw_dylib_name, raw_dylib_imports) in
+ collate_raw_dylibs(sess, &codegen_results.crate_info.used_libraries)?
+ {
+ cmd.add_object(&archive_builder_builder.create_dll_import_lib(
+ sess,
+ &raw_dylib_name,
+ &raw_dylib_imports,
+ tmpdir,
+ ));
+ }
+
+ // Library linking above uses some global state for things like `-Bstatic`/`-Bdynamic` to make
+ // command line shorter, reset it to default here before adding more libraries.
+ cmd.reset_per_library_state();
+
+ // FIXME: Built-in target specs occasionally use this for linking system libraries,
+ // eliminate all such uses by migrating them to `#[link]` attributes in `lib(std,c,unwind)`
+ // and remove the option.
+ add_late_link_args(cmd, sess, flavor, crate_type, codegen_results);
+
+ // ------------ Arbitrary order-independent options ------------
+
+ // Add order-independent options determined by rustc from its compiler options,
+ // target properties and source code.
+ add_order_independent_options(
+ cmd,
+ sess,
+ link_output_kind,
+ crt_objects_fallback,
+ flavor,
+ crate_type,
+ codegen_results,
+ out_filename,
+ tmpdir,
+ );
+
+ // Can be used for arbitrary order-independent options.
+ // In practice may also be occasionally used for linking native libraries.
+ // Passed after compiler-generated options to support manual overriding when necessary.
+ add_user_defined_link_args(cmd, sess);
+
+ // ------------ Object code and libraries, order-dependent ------------
+
+ // Post-link CRT objects.
+ add_post_link_objects(cmd, sess, link_output_kind, crt_objects_fallback);
+
+ // ------------ Late order-dependent options ------------
+
+ // Doesn't really make sense.
+ // FIXME: In practice built-in target specs use this for arbitrary order-independent options,
+ // introduce a target spec option for order-independent linker options, migrate built-in specs
+ // to it and remove the option.
+ add_post_link_args(cmd, sess, flavor);
+
+ Ok(cmd.take_cmd())
+}
+
+fn add_order_independent_options(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ link_output_kind: LinkOutputKind,
+ crt_objects_fallback: bool,
+ flavor: LinkerFlavor,
+ crate_type: CrateType,
+ codegen_results: &CodegenResults,
+ out_filename: &Path,
+ tmpdir: &Path,
+) {
+ add_gcc_ld_path(cmd, sess, flavor);
+
+ add_apple_sdk(cmd, sess, flavor);
+
+ add_link_script(cmd, sess, tmpdir, crate_type);
+
+ if sess.target.os == "fuchsia" && crate_type == CrateType::Executable {
+ let prefix = if sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::ADDRESS) {
+ "asan/"
+ } else {
+ ""
+ };
+ cmd.arg(format!("--dynamic-linker={}ld.so.1", prefix));
+ }
+
+ if sess.target.eh_frame_header {
+ cmd.add_eh_frame_header();
+ }
+
+ // Make the binary compatible with data execution prevention schemes.
+ cmd.add_no_exec();
+
+ if crt_objects_fallback {
+ cmd.no_crt_objects();
+ }
+
+ if sess.target.os == "emscripten" {
+ cmd.arg("-s");
+ cmd.arg(if sess.panic_strategy() == PanicStrategy::Abort {
+ "DISABLE_EXCEPTION_CATCHING=1"
+ } else {
+ "DISABLE_EXCEPTION_CATCHING=0"
+ });
+ }
+
+ if flavor == LinkerFlavor::PtxLinker {
+ // Provide the linker with fallback to internal `target-cpu`.
+ cmd.arg("--fallback-arch");
+ cmd.arg(&codegen_results.crate_info.target_cpu);
+ } else if flavor == LinkerFlavor::BpfLinker {
+ cmd.arg("--cpu");
+ cmd.arg(&codegen_results.crate_info.target_cpu);
+ cmd.arg("--cpu-features");
+ cmd.arg(match &sess.opts.cg.target_feature {
+ feat if !feat.is_empty() => feat.as_ref(),
+ _ => sess.target.options.features.as_ref(),
+ });
+ }
+
+ cmd.linker_plugin_lto();
+
+ add_library_search_dirs(cmd, sess, crt_objects_fallback);
+
+ cmd.output_filename(out_filename);
+
+ if crate_type == CrateType::Executable && sess.target.is_like_windows {
+ if let Some(ref s) = codegen_results.crate_info.windows_subsystem {
+ cmd.subsystem(s);
+ }
+ }
+
+ // Try to strip as much out of the generated object by removing unused
+ // sections if possible. See more comments in linker.rs
+ if !sess.link_dead_code() {
+ // If PGO is enabled sometimes gc_sections will remove the profile data section
+ // as it appears to be unused. This can then cause the PGO profile file to lose
+ // some functions. If we are generating a profile we shouldn't strip those metadata
+ // sections to ensure we have all the data for PGO.
+ let keep_metadata =
+ crate_type == CrateType::Dylib || sess.opts.cg.profile_generate.enabled();
+ if crate_type != CrateType::Executable || !sess.opts.unstable_opts.export_executable_symbols
+ {
+ cmd.gc_sections(keep_metadata);
+ } else {
+ cmd.no_gc_sections();
+ }
+ }
+
+ cmd.set_output_kind(link_output_kind, out_filename);
+
+ add_relro_args(cmd, sess);
+
+ // Pass optimization flags down to the linker.
+ cmd.optimize();
+
+ // Gather the set of NatVis files, if any, and write them out to a temp directory.
+ let natvis_visualizers = collect_natvis_visualizers(
+ tmpdir,
+ sess,
+ &codegen_results.crate_info.local_crate_name,
+ &codegen_results.crate_info.natvis_debugger_visualizers,
+ );
+
+ // Pass debuginfo, NatVis debugger visualizers and strip flags down to the linker.
+ cmd.debuginfo(strip_value(sess), &natvis_visualizers);
+
+ // We want to prevent the compiler from accidentally leaking in any system libraries,
+ // so by default we tell linkers not to link to any default libraries.
+ if !sess.opts.cg.default_linker_libraries && sess.target.no_default_libraries {
+ cmd.no_default_libraries();
+ }
+
+ if sess.opts.cg.profile_generate.enabled() || sess.instrument_coverage() {
+ cmd.pgo_gen();
+ }
+
+ if sess.opts.cg.control_flow_guard != CFGuard::Disabled {
+ cmd.control_flow_guard();
+ }
+
+ add_rpath_args(cmd, sess, codegen_results, out_filename);
+}
+
+// Write the NatVis debugger visualizer files for each crate to the temp directory and gather the file paths.
+fn collect_natvis_visualizers(
+ tmpdir: &Path,
+ sess: &Session,
+ crate_name: &Symbol,
+ natvis_debugger_visualizers: &BTreeSet<DebuggerVisualizerFile>,
+) -> Vec<PathBuf> {
+ let mut visualizer_paths = Vec::with_capacity(natvis_debugger_visualizers.len());
+
+ for (index, visualizer) in natvis_debugger_visualizers.iter().enumerate() {
+ let visualizer_out_file = tmpdir.join(format!("{}-{}.natvis", crate_name.as_str(), index));
+
+ match fs::write(&visualizer_out_file, &visualizer.src) {
+ Ok(()) => {
+ visualizer_paths.push(visualizer_out_file);
+ }
+ Err(error) => {
+ sess.warn(
+ format!(
+ "Unable to write debugger visualizer file `{}`: {} ",
+ visualizer_out_file.display(),
+ error
+ )
+ .as_str(),
+ );
+ }
+ };
+ }
+ visualizer_paths
+}
+
+/// # Native library linking
+///
+/// User-supplied library search paths (-L on the command line). These are the same paths used to
+/// find Rust crates, so some of them may have been added already by the previous crate linking
+/// code. This only allows them to be found at compile time so it is still entirely up to outside
+/// forces to make sure that library can be found at runtime.
+///
+/// Also note that the native libraries linked here are only the ones located in the current crate.
+/// Upstream crates with native library dependencies may have their native library pulled in above.
+fn add_local_native_libraries(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ codegen_results: &CodegenResults,
+) {
+ let filesearch = sess.target_filesearch(PathKind::All);
+ for search_path in filesearch.search_paths() {
+ match search_path.kind {
+ PathKind::Framework => {
+ cmd.framework_path(&search_path.dir);
+ }
+ _ => {
+ cmd.include_path(&fix_windows_verbatim_for_gcc(&search_path.dir));
+ }
+ }
+ }
+
+ let relevant_libs =
+ codegen_results.crate_info.used_libraries.iter().filter(|l| relevant_lib(sess, l));
+
+ let search_path = OnceCell::new();
+ let mut last = (None, NativeLibKind::Unspecified, None);
+ for lib in relevant_libs {
+ let Some(name) = lib.name else {
+ continue;
+ };
+ let name = name.as_str();
+
+ // Skip if this library is the same as the last.
+ last = if (lib.name, lib.kind, lib.verbatim) == last {
+ continue;
+ } else {
+ (lib.name, lib.kind, lib.verbatim)
+ };
+
+ let verbatim = lib.verbatim.unwrap_or(false);
+ match lib.kind {
+ NativeLibKind::Dylib { as_needed } => {
+ cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Unspecified => cmd.link_dylib(name, verbatim, true),
+ NativeLibKind::Framework { as_needed } => {
+ cmd.link_framework(name, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Static { whole_archive, bundle, .. } => {
+ if whole_archive == Some(true)
+ // Backward compatibility case: this can be a rlib (so `+whole-archive` cannot
+ // be added explicitly if necessary, see the error in `fn link_rlib`) compiled
+ // as an executable due to `--test`. Use whole-archive implicitly, like before
+ // the introduction of native lib modifiers.
+ || (whole_archive == None && bundle != Some(false) && sess.opts.test)
+ {
+ cmd.link_whole_staticlib(
+ name,
+ verbatim,
+ &search_path.get_or_init(|| archive_search_paths(sess)),
+ );
+ } else {
+ cmd.link_staticlib(name, verbatim)
+ }
+ }
+ NativeLibKind::RawDylib => {
+ // Ignore RawDylib here, they are handled separately in linker_with_args().
+ }
+ NativeLibKind::LinkArg => {
+ cmd.arg(name);
+ }
+ }
+ }
+}
+
+/// # Linking Rust crates and their non-bundled static libraries
+///
+/// Rust crates are not considered at all when creating an rlib output. All dependencies will be
+/// linked when producing the final output (instead of the intermediate rlib version).
+fn add_upstream_rust_crates<'a>(
+ cmd: &mut dyn Linker,
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ crate_type: CrateType,
+ tmpdir: &Path,
+) {
+ // All of the heavy lifting has previously been accomplished by the
+ // dependency_format module of the compiler. This is just crawling the
+ // output of that module, adding crates as necessary.
+ //
+ // Linking to a rlib involves just passing it to the linker (the linker
+ // will slurp up the object files inside), and linking to a dynamic library
+ // involves just passing the right -l flag.
+
+ let (_, data) = codegen_results
+ .crate_info
+ .dependency_formats
+ .iter()
+ .find(|(ty, _)| *ty == crate_type)
+ .expect("failed to find crate type in dependency format list");
+
+ // Invoke get_used_crates to ensure that we get a topological sorting of
+ // crates.
+ let deps = &codegen_results.crate_info.used_crates;
+
+ // There's a few internal crates in the standard library (aka libcore and
+ // libstd) which actually have a circular dependence upon one another. This
+ // currently arises through "weak lang items" where libcore requires things
+ // like `rust_begin_unwind` but libstd ends up defining it. To get this
+ // circular dependence to work correctly in all situations we'll need to be
+ // sure to correctly apply the `--start-group` and `--end-group` options to
+ // GNU linkers, otherwise if we don't use any other symbol from the standard
+ // library it'll get discarded and the whole application won't link.
+ //
+ // In this loop we're calculating the `group_end`, after which crate to
+ // pass `--end-group` and `group_start`, before which crate to pass
+ // `--start-group`. We currently do this by passing `--end-group` after
+ // the first crate (when iterating backwards) that requires a lang item
+ // defined somewhere else. Once that's set then when we've defined all the
+ // necessary lang items we'll pass `--start-group`.
+ //
+ // Note that this isn't amazing logic for now but it should do the trick
+ // for the current implementation of the standard library.
+ let mut group_end = None;
+ let mut group_start = None;
+ // Crates available for linking thus far.
+ let mut available = FxHashSet::default();
+ // Crates required to satisfy dependencies discovered so far.
+ let mut required = FxHashSet::default();
+
+ let info = &codegen_results.crate_info;
+ for &cnum in deps.iter().rev() {
+ if let Some(missing) = info.missing_lang_items.get(&cnum) {
+ let missing_crates = missing.iter().map(|i| info.lang_item_to_crate.get(i).copied());
+ required.extend(missing_crates);
+ }
+
+ required.insert(Some(cnum));
+ available.insert(Some(cnum));
+
+ if required.len() > available.len() && group_end.is_none() {
+ group_end = Some(cnum);
+ }
+ if required.len() == available.len() && group_end.is_some() {
+ group_start = Some(cnum);
+ break;
+ }
+ }
+
+ // If we didn't end up filling in all lang items from upstream crates then
+ // we'll be filling it in with our crate. This probably means we're the
+ // standard library itself, so skip this for now.
+ if group_end.is_some() && group_start.is_none() {
+ group_end = None;
+ }
+
+ let mut compiler_builtins = None;
+ let search_path = OnceCell::new();
+
+ for &cnum in deps.iter() {
+ if group_start == Some(cnum) {
+ cmd.group_start();
+ }
+
+ // We may not pass all crates through to the linker. Some crates may
+ // appear statically in an existing dylib, meaning we'll pick up all the
+ // symbols from the dylib.
+ let src = &codegen_results.crate_info.used_crate_source[&cnum];
+ match data[cnum.as_usize() - 1] {
+ _ if codegen_results.crate_info.profiler_runtime == Some(cnum) => {
+ add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+ }
+ // compiler-builtins are always placed last to ensure that they're
+ // linked correctly.
+ _ if codegen_results.crate_info.compiler_builtins == Some(cnum) => {
+ assert!(compiler_builtins.is_none());
+ compiler_builtins = Some(cnum);
+ }
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static => {
+ add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+
+ // Link static native libs with "-bundle" modifier only if the crate they originate from
+ // is being linked statically to the current crate. If it's linked dynamically
+ // or is an rlib already included via some other dylib crate, the symbols from
+ // native libs will have already been included in that dylib.
+ //
+ // If -Zlink-native-libraries=false is set, then the assumption is that an
+ // external build system already has the native dependencies defined, and it
+ // will provide them to the linker itself.
+ if sess.opts.unstable_opts.link_native_libraries {
+ let mut last = (None, NativeLibKind::Unspecified, None);
+ for lib in &codegen_results.crate_info.native_libraries[&cnum] {
+ let Some(name) = lib.name else {
+ continue;
+ };
+ let name = name.as_str();
+ if !relevant_lib(sess, lib) {
+ continue;
+ }
+
+ // Skip if this library is the same as the last.
+ last = if (lib.name, lib.kind, lib.verbatim) == last {
+ continue;
+ } else {
+ (lib.name, lib.kind, lib.verbatim)
+ };
+
+ match lib.kind {
+ NativeLibKind::Static {
+ bundle: Some(false),
+ whole_archive: Some(true),
+ } => {
+ cmd.link_whole_staticlib(
+ name,
+ lib.verbatim.unwrap_or(false),
+ search_path.get_or_init(|| archive_search_paths(sess)),
+ );
+ }
+ NativeLibKind::Static {
+ bundle: Some(false),
+ whole_archive: Some(false) | None,
+ } => {
+ cmd.link_staticlib(name, lib.verbatim.unwrap_or(false));
+ }
+ NativeLibKind::LinkArg => {
+ cmd.arg(name);
+ }
+ NativeLibKind::Dylib { .. }
+ | NativeLibKind::Framework { .. }
+ | NativeLibKind::Unspecified
+ | NativeLibKind::RawDylib => {}
+ NativeLibKind::Static {
+ bundle: Some(true) | None,
+ whole_archive: _,
+ } => {}
+ }
+ }
+ }
+ }
+ Linkage::Dynamic => add_dynamic_crate(cmd, sess, &src.dylib.as_ref().unwrap().0),
+ }
+
+ if group_end == Some(cnum) {
+ cmd.group_end();
+ }
+ }
+
+ // compiler-builtins are always placed last to ensure that they're
+ // linked correctly.
+ // We must always link the `compiler_builtins` crate statically. Even if it
+ // was already "included" in a dylib (e.g., `libstd` when `-C prefer-dynamic`
+ // is used)
+ if let Some(cnum) = compiler_builtins {
+ add_static_crate(cmd, sess, archive_builder_builder, codegen_results, tmpdir, cnum);
+ }
+
+ // Converts a library file-stem into a cc -l argument
+ fn unlib<'a>(target: &Target, stem: &'a str) -> &'a str {
+ if stem.starts_with("lib") && !target.is_like_windows { &stem[3..] } else { stem }
+ }
+
+ // Adds the static "rlib" versions of all crates to the command line.
+ // There's a bit of magic which happens here specifically related to LTO,
+ // namely that we remove upstream object files.
+ //
+ // When performing LTO, almost(*) all of the bytecode from the upstream
+ // libraries has already been included in our object file output. As a
+ // result we need to remove the object files in the upstream libraries so
+ // the linker doesn't try to include them twice (or whine about duplicate
+ // symbols). We must continue to include the rest of the rlib, however, as
+ // it may contain static native libraries which must be linked in.
+ //
+ // (*) Crates marked with `#![no_builtins]` don't participate in LTO and
+ // their bytecode wasn't included. The object files in those libraries must
+ // still be passed to the linker.
+ //
+ // Note, however, that if we're not doing LTO we can just pass the rlib
+ // blindly to the linker (fast) because it's fine if it's not actually
+ // included as we're at the end of the dependency chain.
+ fn add_static_crate<'a>(
+ cmd: &mut dyn Linker,
+ sess: &'a Session,
+ archive_builder_builder: &dyn ArchiveBuilderBuilder,
+ codegen_results: &CodegenResults,
+ tmpdir: &Path,
+ cnum: CrateNum,
+ ) {
+ let src = &codegen_results.crate_info.used_crate_source[&cnum];
+ let cratepath = &src.rlib.as_ref().unwrap().0;
+
+ let mut link_upstream = |path: &Path| {
+ cmd.link_rlib(&fix_windows_verbatim_for_gcc(path));
+ };
+
+ // See the comment above in `link_staticlib` and `link_rlib` for why if
+ // there's a static library that's not relevant we skip all object
+ // files.
+ let native_libs = &codegen_results.crate_info.native_libraries[&cnum];
+ let skip_native = native_libs.iter().any(|lib| {
+ matches!(lib.kind, NativeLibKind::Static { bundle: None | Some(true), .. })
+ && !relevant_lib(sess, lib)
+ });
+
+ if (!are_upstream_rust_objects_already_included(sess)
+ || ignored_for_lto(sess, &codegen_results.crate_info, cnum))
+ && !skip_native
+ {
+ link_upstream(cratepath);
+ return;
+ }
+
+ let dst = tmpdir.join(cratepath.file_name().unwrap());
+ let name = cratepath.file_name().unwrap().to_str().unwrap();
+ let name = &name[3..name.len() - 5]; // chop off lib/.rlib
+
+ sess.prof.generic_activity_with_arg("link_altering_rlib", name).run(|| {
+ let canonical_name = name.replace('-', "_");
+ let upstream_rust_objects_already_included =
+ are_upstream_rust_objects_already_included(sess);
+ let is_builtins = sess.target.no_builtins
+ || !codegen_results.crate_info.is_no_builtins.contains(&cnum);
+
+ let mut archive = archive_builder_builder.new_archive_builder(sess);
+ if let Err(e) = archive.add_archive(
+ cratepath,
+ Box::new(move |f| {
+ if f == METADATA_FILENAME {
+ return true;
+ }
+
+ let canonical = f.replace('-', "_");
+
+ let is_rust_object =
+ canonical.starts_with(&canonical_name) && looks_like_rust_object_file(&f);
+
+ // If we've been requested to skip all native object files
+ // (those not generated by the rust compiler) then we can skip
+ // this file. See above for why we may want to do this.
+ let skip_because_cfg_say_so = skip_native && !is_rust_object;
+
+ // If we're performing LTO and this is a rust-generated object
+ // file, then we don't need the object file as it's part of the
+ // LTO module. Note that `#![no_builtins]` is excluded from LTO,
+ // though, so we let that object file slide.
+ let skip_because_lto =
+ upstream_rust_objects_already_included && is_rust_object && is_builtins;
+
+ if skip_because_cfg_say_so || skip_because_lto {
+ return true;
+ }
+
+ false
+ }),
+ ) {
+ sess.fatal(&format!("failed to build archive from rlib: {}", e));
+ }
+ if archive.build(&dst) {
+ link_upstream(&dst);
+ }
+ });
+ }
+
+ // Same thing as above, but for dynamic crates instead of static crates.
+ fn add_dynamic_crate(cmd: &mut dyn Linker, sess: &Session, cratepath: &Path) {
+ // Just need to tell the linker about where the library lives and
+ // what its name is
+ let parent = cratepath.parent();
+ if let Some(dir) = parent {
+ cmd.include_path(&fix_windows_verbatim_for_gcc(dir));
+ }
+ let filestem = cratepath.file_stem().unwrap().to_str().unwrap();
+ cmd.link_rust_dylib(
+ &unlib(&sess.target, filestem),
+ parent.unwrap_or_else(|| Path::new("")),
+ );
+ }
+}
+
+/// Link in all of our upstream crates' native dependencies. Remember that all of these upstream
+/// native dependencies are all non-static dependencies. We've got two cases then:
+///
+/// 1. The upstream crate is an rlib. In this case we *must* link in the native dependency because
+/// the rlib is just an archive.
+///
+/// 2. The upstream crate is a dylib. In order to use the dylib, we have to have the dependency
+/// present on the system somewhere. Thus, we don't gain a whole lot from not linking in the
+/// dynamic dependency to this crate as well.
+///
+/// The use case for this is a little subtle. In theory the native dependencies of a crate are
+/// purely an implementation detail of the crate itself, but the problem arises with generic and
+/// inlined functions. If a generic function calls a native function, then the generic function
+/// must be instantiated in the target crate, meaning that the native symbol must also be resolved
+/// in the target crate.
+fn add_upstream_native_libraries(
+ cmd: &mut dyn Linker,
+ sess: &Session,
+ codegen_results: &CodegenResults,
+) {
+ let mut last = (None, NativeLibKind::Unspecified, None);
+ for &cnum in &codegen_results.crate_info.used_crates {
+ for lib in codegen_results.crate_info.native_libraries[&cnum].iter() {
+ let Some(name) = lib.name else {
+ continue;
+ };
+ let name = name.as_str();
+ if !relevant_lib(sess, &lib) {
+ continue;
+ }
+
+ // Skip if this library is the same as the last.
+ last = if (lib.name, lib.kind, lib.verbatim) == last {
+ continue;
+ } else {
+ (lib.name, lib.kind, lib.verbatim)
+ };
+
+ let verbatim = lib.verbatim.unwrap_or(false);
+ match lib.kind {
+ NativeLibKind::Dylib { as_needed } => {
+ cmd.link_dylib(name, verbatim, as_needed.unwrap_or(true))
+ }
+ NativeLibKind::Unspecified => cmd.link_dylib(name, verbatim, true),
+ NativeLibKind::Framework { as_needed } => {
+ cmd.link_framework(name, as_needed.unwrap_or(true))
+ }
+ // ignore static native libraries here as we've
+ // already included them in add_local_native_libraries and
+ // add_upstream_rust_crates
+ NativeLibKind::Static { .. } => {}
+ NativeLibKind::RawDylib | NativeLibKind::LinkArg => {}
+ }
+ }
+ }
+}
+
+fn relevant_lib(sess: &Session, lib: &NativeLib) -> bool {
+ match lib.cfg {
+ Some(ref cfg) => rustc_attr::cfg_matches(cfg, &sess.parse_sess, CRATE_NODE_ID, None),
+ None => true,
+ }
+}
+
+fn are_upstream_rust_objects_already_included(sess: &Session) -> bool {
+ match sess.lto() {
+ config::Lto::Fat => true,
+ config::Lto::Thin => {
+ // If we defer LTO to the linker, we haven't run LTO ourselves, so
+ // any upstream object files have not been copied yet.
+ !sess.opts.cg.linker_plugin_lto.enabled()
+ }
+ config::Lto::No | config::Lto::ThinLocal => false,
+ }
+}
+
+fn add_apple_sdk(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+ let arch = &sess.target.arch;
+ let os = &sess.target.os;
+ let llvm_target = &sess.target.llvm_target;
+ if sess.target.vendor != "apple"
+ || !matches!(os.as_ref(), "ios" | "tvos" | "watchos")
+ || (flavor != LinkerFlavor::Gcc && flavor != LinkerFlavor::Lld(LldFlavor::Ld64))
+ {
+ return;
+ }
+ let sdk_name = match (arch.as_ref(), os.as_ref()) {
+ ("aarch64", "tvos") => "appletvos",
+ ("x86_64", "tvos") => "appletvsimulator",
+ ("arm", "ios") => "iphoneos",
+ ("aarch64", "ios") if llvm_target.contains("macabi") => "macosx",
+ ("aarch64", "ios") if llvm_target.ends_with("-simulator") => "iphonesimulator",
+ ("aarch64", "ios") => "iphoneos",
+ ("x86", "ios") => "iphonesimulator",
+ ("x86_64", "ios") if llvm_target.contains("macabi") => "macosx",
+ ("x86_64", "ios") => "iphonesimulator",
+ ("x86_64", "watchos") => "watchsimulator",
+ ("arm64_32", "watchos") => "watchos",
+ ("aarch64", "watchos") if llvm_target.ends_with("-simulator") => "watchsimulator",
+ ("aarch64", "watchos") => "watchos",
+ ("arm", "watchos") => "watchos",
+ _ => {
+ sess.err(&format!("unsupported arch `{}` for os `{}`", arch, os));
+ return;
+ }
+ };
+ let sdk_root = match get_apple_sdk_root(sdk_name) {
+ Ok(s) => s,
+ Err(e) => {
+ sess.err(&e);
+ return;
+ }
+ };
+
+ match flavor {
+ LinkerFlavor::Gcc => {
+ cmd.args(&["-isysroot", &sdk_root, "-Wl,-syslibroot", &sdk_root]);
+ }
+ LinkerFlavor::Lld(LldFlavor::Ld64) => {
+ cmd.args(&["-syslibroot", &sdk_root]);
+ }
+ _ => unreachable!(),
+ }
+}
+
+fn get_apple_sdk_root(sdk_name: &str) -> Result<String, String> {
+ // Following what clang does
+ // (https://github.com/llvm/llvm-project/blob/
+ // 296a80102a9b72c3eda80558fb78a3ed8849b341/clang/lib/Driver/ToolChains/Darwin.cpp#L1661-L1678)
+ // to allow the SDK path to be set. (For clang, xcrun sets
+ // SDKROOT; for rustc, the user or build system can set it, or we
+ // can fall back to checking for xcrun on PATH.)
+ if let Ok(sdkroot) = env::var("SDKROOT") {
+ let p = Path::new(&sdkroot);
+ match sdk_name {
+ // Ignore `SDKROOT` if it's clearly set for the wrong platform.
+ "appletvos"
+ if sdkroot.contains("TVSimulator.platform")
+ || sdkroot.contains("MacOSX.platform") => {}
+ "appletvsimulator"
+ if sdkroot.contains("TVOS.platform") || sdkroot.contains("MacOSX.platform") => {}
+ "iphoneos"
+ if sdkroot.contains("iPhoneSimulator.platform")
+ || sdkroot.contains("MacOSX.platform") => {}
+ "iphonesimulator"
+ if sdkroot.contains("iPhoneOS.platform") || sdkroot.contains("MacOSX.platform") => {
+ }
+ "macosx10.15"
+ if sdkroot.contains("iPhoneOS.platform")
+ || sdkroot.contains("iPhoneSimulator.platform") => {}
+ "watchos"
+ if sdkroot.contains("WatchSimulator.platform")
+ || sdkroot.contains("MacOSX.platform") => {}
+ "watchsimulator"
+ if sdkroot.contains("WatchOS.platform") || sdkroot.contains("MacOSX.platform") => {}
+ // Ignore `SDKROOT` if it's not a valid path.
+ _ if !p.is_absolute() || p == Path::new("/") || !p.exists() => {}
+ _ => return Ok(sdkroot),
+ }
+ }
+ let res =
+ Command::new("xcrun").arg("--show-sdk-path").arg("-sdk").arg(sdk_name).output().and_then(
+ |output| {
+ if output.status.success() {
+ Ok(String::from_utf8(output.stdout).unwrap())
+ } else {
+ let error = String::from_utf8(output.stderr);
+ let error = format!("process exit with error: {}", error.unwrap());
+ Err(io::Error::new(io::ErrorKind::Other, &error[..]))
+ }
+ },
+ );
+
+ match res {
+ Ok(output) => Ok(output.trim().to_string()),
+ Err(e) => Err(format!("failed to get {} SDK path: {}", sdk_name, e)),
+ }
+}
+
+fn add_gcc_ld_path(cmd: &mut dyn Linker, sess: &Session, flavor: LinkerFlavor) {
+ if let Some(ld_impl) = sess.opts.unstable_opts.gcc_ld {
+ if let LinkerFlavor::Gcc = flavor {
+ match ld_impl {
+ LdImpl::Lld => {
+ let tools_path = sess.get_tools_search_paths(false);
+ let gcc_ld_dir = tools_path
+ .into_iter()
+ .map(|p| p.join("gcc-ld"))
+ .find(|p| {
+ p.join(if sess.host.is_like_windows { "ld.exe" } else { "ld" }).exists()
+ })
+ .unwrap_or_else(|| sess.fatal("rust-lld (as ld) not found"));
+ cmd.arg({
+ let mut arg = OsString::from("-B");
+ arg.push(gcc_ld_dir);
+ arg
+ });
+ cmd.arg(format!("-Wl,-rustc-lld-flavor={}", sess.target.lld_flavor.as_str()));
+ }
+ }
+ } else {
+ sess.fatal("option `-Z gcc-ld` is used even though linker flavor is not gcc");
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs
new file mode 100644
index 000000000..ce51b2e95
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/linker.rs
@@ -0,0 +1,1788 @@
+use super::archive;
+use super::command::Command;
+use super::symbol_export;
+use rustc_span::symbol::sym;
+
+use std::ffi::{OsStr, OsString};
+use std::fs::{self, File};
+use std::io::prelude::*;
+use std::io::{self, BufWriter};
+use std::path::{Path, PathBuf};
+use std::{env, mem, str};
+
+use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_middle::middle::dependency_format::Linkage;
+use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo, SymbolExportKind};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{self, CrateType, DebugInfo, LinkerPluginLto, Lto, OptLevel, Strip};
+use rustc_session::Session;
+use rustc_target::spec::{LinkOutputKind, LinkerFlavor, LldFlavor};
+
+use cc::windows_registry;
+
+/// Disables non-English messages from localized linkers.
+/// Such messages may cause issues with text encoding on Windows (#35785)
+/// and prevent inspection of linker output in case of errors, which we occasionally do.
+/// This should be acceptable because other messages from rustc are in English anyway,
+/// and may also be desirable to improve searchability of the linker diagnostics.
+pub fn disable_localization(linker: &mut Command) {
+ // No harm in setting both env vars simultaneously.
+ // Unix-style linkers.
+ linker.env("LC_ALL", "C");
+ // MSVC's `link.exe`.
+ linker.env("VSLANG", "1033");
+}
+
+// The third parameter is for env vars, used on windows to set up the
+// path for MSVC to find its DLLs, and gcc to find its bundled
+// toolchain
+pub fn get_linker<'a>(
+ sess: &'a Session,
+ linker: &Path,
+ flavor: LinkerFlavor,
+ self_contained: bool,
+ target_cpu: &'a str,
+) -> Box<dyn Linker + 'a> {
+ let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe");
+
+ // If our linker looks like a batch script on Windows then to execute this
+ // we'll need to spawn `cmd` explicitly. This is primarily done to handle
+ // emscripten where the linker is `emcc.bat` and needs to be spawned as
+ // `cmd /c emcc.bat ...`.
+ //
+ // This worked historically but is needed manually since #42436 (regression
+ // was tagged as #42791) and some more info can be found on #44443 for
+ // emscripten itself.
+ let mut cmd = match linker.to_str() {
+ Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker),
+ _ => match flavor {
+ LinkerFlavor::Lld(f) => Command::lld(linker, f),
+ LinkerFlavor::Msvc if sess.opts.cg.linker.is_none() && sess.target.linker.is_none() => {
+ Command::new(msvc_tool.as_ref().map_or(linker, |t| t.path()))
+ }
+ _ => Command::new(linker),
+ },
+ };
+
+ // UWP apps have API restrictions enforced during Store submissions.
+ // To comply with the Windows App Certification Kit,
+ // MSVC needs to link with the Store versions of the runtime libraries (vcruntime, msvcrt, etc).
+ let t = &sess.target;
+ if (flavor == LinkerFlavor::Msvc || flavor == LinkerFlavor::Lld(LldFlavor::Link))
+ && t.vendor == "uwp"
+ {
+ if let Some(ref tool) = msvc_tool {
+ let original_path = tool.path();
+ if let Some(ref root_lib_path) = original_path.ancestors().nth(4) {
+ let arch = match t.arch.as_ref() {
+ "x86_64" => Some("x64"),
+ "x86" => Some("x86"),
+ "aarch64" => Some("arm64"),
+ "arm" => Some("arm"),
+ _ => None,
+ };
+ if let Some(ref a) = arch {
+ // FIXME: Move this to `fn linker_with_args`.
+ let mut arg = OsString::from("/LIBPATH:");
+ arg.push(format!("{}\\lib\\{}\\store", root_lib_path.display(), a));
+ cmd.arg(&arg);
+ } else {
+ warn!("arch is not supported");
+ }
+ } else {
+ warn!("MSVC root path lib location not found");
+ }
+ } else {
+ warn!("link.exe not found");
+ }
+ }
+
+ // The compiler's sysroot often has some bundled tools, so add it to the
+ // PATH for the child.
+ let mut new_path = sess.get_tools_search_paths(self_contained);
+ let mut msvc_changed_path = false;
+ if sess.target.is_like_msvc {
+ if let Some(ref tool) = msvc_tool {
+ cmd.args(tool.args());
+ for &(ref k, ref v) in tool.env() {
+ if k == "PATH" {
+ new_path.extend(env::split_paths(v));
+ msvc_changed_path = true;
+ } else {
+ cmd.env(k, v);
+ }
+ }
+ }
+ }
+
+ if !msvc_changed_path {
+ if let Some(path) = env::var_os("PATH") {
+ new_path.extend(env::split_paths(&path));
+ }
+ }
+ cmd.env("PATH", env::join_paths(new_path).unwrap());
+
+ // FIXME: Move `/LIBPATH` addition for uwp targets from the linker construction
+ // to the linker args construction.
+ assert!(cmd.get_args().is_empty() || sess.target.vendor == "uwp");
+ match flavor {
+ LinkerFlavor::Lld(LldFlavor::Link) | LinkerFlavor::Msvc => {
+ Box::new(MsvcLinker { cmd, sess }) as Box<dyn Linker>
+ }
+ LinkerFlavor::Em => Box::new(EmLinker { cmd, sess }) as Box<dyn Linker>,
+ LinkerFlavor::Gcc => {
+ Box::new(GccLinker { cmd, sess, target_cpu, hinted_static: false, is_ld: false })
+ as Box<dyn Linker>
+ }
+
+ LinkerFlavor::Lld(LldFlavor::Ld)
+ | LinkerFlavor::Lld(LldFlavor::Ld64)
+ | LinkerFlavor::Ld => {
+ Box::new(GccLinker { cmd, sess, target_cpu, hinted_static: false, is_ld: true })
+ as Box<dyn Linker>
+ }
+
+ LinkerFlavor::Lld(LldFlavor::Wasm) => Box::new(WasmLd::new(cmd, sess)) as Box<dyn Linker>,
+
+ LinkerFlavor::PtxLinker => Box::new(PtxLinker { cmd, sess }) as Box<dyn Linker>,
+
+ LinkerFlavor::BpfLinker => Box::new(BpfLinker { cmd, sess }) as Box<dyn Linker>,
+
+ LinkerFlavor::L4Bender => Box::new(L4Bender::new(cmd, sess)) as Box<dyn Linker>,
+ }
+}
+
+/// Linker abstraction used by `back::link` to build up the command to invoke a
+/// linker.
+///
+/// This trait is the total list of requirements needed by `back::link` and
+/// represents the meaning of each option being passed down. This trait is then
+/// used to dispatch on whether a GNU-like linker (generally `ld.exe`) or an
+/// MSVC linker (e.g., `link.exe`) is being used.
+pub trait Linker {
+ fn cmd(&mut self) -> &mut Command;
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path);
+ fn link_dylib(&mut self, lib: &str, verbatim: bool, as_needed: bool);
+ fn link_rust_dylib(&mut self, lib: &str, path: &Path);
+ fn link_framework(&mut self, framework: &str, as_needed: bool);
+ fn link_staticlib(&mut self, lib: &str, verbatim: bool);
+ fn link_rlib(&mut self, lib: &Path);
+ fn link_whole_rlib(&mut self, lib: &Path);
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, search_path: &[PathBuf]);
+ fn include_path(&mut self, path: &Path);
+ fn framework_path(&mut self, path: &Path);
+ fn output_filename(&mut self, path: &Path);
+ fn add_object(&mut self, path: &Path);
+ fn gc_sections(&mut self, keep_metadata: bool);
+ fn no_gc_sections(&mut self);
+ fn full_relro(&mut self);
+ fn partial_relro(&mut self);
+ fn no_relro(&mut self);
+ fn optimize(&mut self);
+ fn pgo_gen(&mut self);
+ fn control_flow_guard(&mut self);
+ fn debuginfo(&mut self, strip: Strip, natvis_debugger_visualizers: &[PathBuf]);
+ fn no_crt_objects(&mut self);
+ fn no_default_libraries(&mut self);
+ fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType, symbols: &[String]);
+ fn subsystem(&mut self, subsystem: &str);
+ fn group_start(&mut self);
+ fn group_end(&mut self);
+ fn linker_plugin_lto(&mut self);
+ fn add_eh_frame_header(&mut self) {}
+ fn add_no_exec(&mut self) {}
+ fn add_as_needed(&mut self) {}
+ fn reset_per_library_state(&mut self) {}
+}
+
+impl dyn Linker + '_ {
+ pub fn arg(&mut self, arg: impl AsRef<OsStr>) {
+ self.cmd().arg(arg);
+ }
+
+ pub fn args(&mut self, args: impl IntoIterator<Item: AsRef<OsStr>>) {
+ self.cmd().args(args);
+ }
+
+ pub fn take_cmd(&mut self) -> Command {
+ mem::replace(self.cmd(), Command::new(""))
+ }
+}
+
+pub struct GccLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ target_cpu: &'a str,
+ hinted_static: bool, // Keeps track of the current hinting mode.
+ // Link as ld
+ is_ld: bool,
+}
+
+impl<'a> GccLinker<'a> {
+ /// Passes an argument directly to the linker.
+ ///
+ /// When the linker is not ld-like such as when using a compiler as a linker, the argument is
+ /// prepended by `-Wl,`.
+ fn linker_arg(&mut self, arg: impl AsRef<OsStr>) -> &mut Self {
+ self.linker_args(&[arg]);
+ self
+ }
+
+ /// Passes a series of arguments directly to the linker.
+ ///
+ /// When the linker is ld-like, the arguments are simply appended to the command. When the
+ /// linker is not ld-like such as when using a compiler as a linker, the arguments are joined by
+ /// commas to form an argument that is then prepended with `-Wl`. In this situation, only a
+ /// single argument is appended to the command to ensure that the order of the arguments is
+ /// preserved by the compiler.
+ fn linker_args(&mut self, args: &[impl AsRef<OsStr>]) -> &mut Self {
+ if self.is_ld {
+ args.into_iter().for_each(|a| {
+ self.cmd.arg(a);
+ });
+ } else {
+ if !args.is_empty() {
+ let mut s = OsString::from("-Wl");
+ for a in args {
+ s.push(",");
+ s.push(a);
+ }
+ self.cmd.arg(s);
+ }
+ }
+ self
+ }
+
+ fn takes_hints(&self) -> bool {
+ // Really this function only returns true if the underlying linker
+ // configured for a compiler is binutils `ld.bfd` and `ld.gold`. We
+ // don't really have a foolproof way to detect that, so rule out some
+ // platforms where currently this is guaranteed to *not* be the case:
+ //
+ // * On OSX they have their own linker, not binutils'
+ // * For WebAssembly the only functional linker is LLD, which doesn't
+ // support hint flags
+ !self.sess.target.is_like_osx && !self.sess.target.is_like_wasm
+ }
+
+ // Some platforms take hints about whether a library is static or dynamic.
+ // For those that support this, we ensure we pass the option if the library
+ // was flagged "static" (most defaults are dynamic) to ensure that if
+ // libfoo.a and libfoo.so both exist that the right one is chosen.
+ fn hint_static(&mut self) {
+ if !self.takes_hints() {
+ return;
+ }
+ if !self.hinted_static {
+ self.linker_arg("-Bstatic");
+ self.hinted_static = true;
+ }
+ }
+
+ fn hint_dynamic(&mut self) {
+ if !self.takes_hints() {
+ return;
+ }
+ if self.hinted_static {
+ self.linker_arg("-Bdynamic");
+ self.hinted_static = false;
+ }
+ }
+
+ fn push_linker_plugin_lto_args(&mut self, plugin_path: Option<&OsStr>) {
+ if let Some(plugin_path) = plugin_path {
+ let mut arg = OsString::from("-plugin=");
+ arg.push(plugin_path);
+ self.linker_arg(&arg);
+ }
+
+ let opt_level = match self.sess.opts.optimize {
+ config::OptLevel::No => "O0",
+ config::OptLevel::Less => "O1",
+ config::OptLevel::Default | config::OptLevel::Size | config::OptLevel::SizeMin => "O2",
+ config::OptLevel::Aggressive => "O3",
+ };
+
+ if let Some(path) = &self.sess.opts.unstable_opts.profile_sample_use {
+ self.linker_arg(&format!("-plugin-opt=sample-profile={}", path.display()));
+ };
+ self.linker_args(&[
+ &format!("-plugin-opt={}", opt_level),
+ &format!("-plugin-opt=mcpu={}", self.target_cpu),
+ ]);
+ }
+
+ fn build_dylib(&mut self, out_filename: &Path) {
+ // On mac we need to tell the linker to let this library be rpathed
+ if self.sess.target.is_like_osx {
+ if !self.is_ld {
+ self.cmd.arg("-dynamiclib");
+ }
+
+ self.linker_arg("-dylib");
+
+ // Note that the `osx_rpath_install_name` option here is a hack
+ // purely to support rustbuild right now, we should get a more
+ // principled solution at some point to force the compiler to pass
+ // the right `-Wl,-install_name` with an `@rpath` in it.
+ if self.sess.opts.cg.rpath || self.sess.opts.unstable_opts.osx_rpath_install_name {
+ let mut rpath = OsString::from("@rpath/");
+ rpath.push(out_filename.file_name().unwrap());
+ self.linker_args(&[OsString::from("-install_name"), rpath]);
+ }
+ } else {
+ self.cmd.arg("-shared");
+ if self.sess.target.is_like_windows {
+ // The output filename already contains `dll_suffix` so
+ // the resulting import library will have a name in the
+ // form of libfoo.dll.a
+ let implib_name =
+ out_filename.file_name().and_then(|file| file.to_str()).map(|file| {
+ format!(
+ "{}{}{}",
+ self.sess.target.staticlib_prefix,
+ file,
+ self.sess.target.staticlib_suffix
+ )
+ });
+ if let Some(implib_name) = implib_name {
+ let implib = out_filename.parent().map(|dir| dir.join(&implib_name));
+ if let Some(implib) = implib {
+ self.linker_arg(&format!("--out-implib={}", (*implib).to_str().unwrap()));
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<'a> Linker for GccLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
+ match output_kind {
+ LinkOutputKind::DynamicNoPicExe => {
+ if !self.is_ld && self.sess.target.linker_is_gnu {
+ self.cmd.arg("-no-pie");
+ }
+ }
+ LinkOutputKind::DynamicPicExe => {
+ // noop on windows w/ gcc & ld, error w/ lld
+ if !self.sess.target.is_like_windows {
+ // `-pie` works for both gcc wrapper and ld.
+ self.cmd.arg("-pie");
+ }
+ }
+ LinkOutputKind::StaticNoPicExe => {
+ // `-static` works for both gcc wrapper and ld.
+ self.cmd.arg("-static");
+ if !self.is_ld && self.sess.target.linker_is_gnu {
+ self.cmd.arg("-no-pie");
+ }
+ }
+ LinkOutputKind::StaticPicExe => {
+ if !self.is_ld {
+ // Note that combination `-static -pie` doesn't work as expected
+ // for the gcc wrapper, `-static` in that case suppresses `-pie`.
+ self.cmd.arg("-static-pie");
+ } else {
+ // `--no-dynamic-linker` and `-z text` are not strictly necessary for producing
+ // a static pie, but currently passed because gcc and clang pass them.
+ // The former suppresses the `INTERP` ELF header specifying dynamic linker,
+ // which is otherwise implicitly injected by ld (but not lld).
+ // The latter doesn't change anything, only ensures that everything is pic.
+ self.cmd.args(&["-static", "-pie", "--no-dynamic-linker", "-z", "text"]);
+ }
+ }
+ LinkOutputKind::DynamicDylib => self.build_dylib(out_filename),
+ LinkOutputKind::StaticDylib => {
+ self.cmd.arg("-static");
+ self.build_dylib(out_filename);
+ }
+ LinkOutputKind::WasiReactorExe => {
+ self.linker_args(&["--entry", "_initialize"]);
+ }
+ }
+ // VxWorks compiler driver introduced `--static-crt` flag specifically for rustc,
+ // it switches linking for libc and similar system libraries to static without using
+ // any `#[link]` attributes in the `libc` crate, see #72782 for details.
+ // FIXME: Switch to using `#[link]` attributes in the `libc` crate
+ // similarly to other targets.
+ if self.sess.target.os == "vxworks"
+ && matches!(
+ output_kind,
+ LinkOutputKind::StaticNoPicExe
+ | LinkOutputKind::StaticPicExe
+ | LinkOutputKind::StaticDylib
+ )
+ {
+ self.cmd.arg("--static-crt");
+ }
+ }
+
+ fn link_dylib(&mut self, lib: &str, verbatim: bool, as_needed: bool) {
+ if self.sess.target.os == "illumos" && lib == "c" {
+ // libc will be added via late_link_args on illumos so that it will
+ // appear last in the library search order.
+ // FIXME: This should be replaced by a more complete and generic
+ // mechanism for controlling the order of library arguments passed
+ // to the linker.
+ return;
+ }
+ if !as_needed {
+ if self.sess.target.is_like_osx {
+ // FIXME(81490): ld64 doesn't support these flags but macOS 11
+ // has -needed-l{} / -needed_library {}
+ // but we have no way to detect that here.
+ self.sess.warn("`as-needed` modifier not implemented yet for ld64");
+ } else if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ self.linker_arg("--no-as-needed");
+ } else {
+ self.sess.warn("`as-needed` modifier not supported for current linker");
+ }
+ }
+ self.hint_dynamic();
+ self.cmd.arg(format!("-l{}{}", if verbatim { ":" } else { "" }, lib));
+ if !as_needed {
+ if self.sess.target.is_like_osx {
+ // See above FIXME comment
+ } else if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ self.linker_arg("--as-needed");
+ }
+ }
+ }
+ fn link_staticlib(&mut self, lib: &str, verbatim: bool) {
+ self.hint_static();
+ self.cmd.arg(format!("-l{}{}", if verbatim { ":" } else { "" }, lib));
+ }
+ fn link_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ self.cmd.arg(lib);
+ }
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+ fn framework_path(&mut self, path: &Path) {
+ self.cmd.arg("-F").arg(path);
+ }
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+ fn full_relro(&mut self) {
+ self.linker_args(&["-zrelro", "-znow"]);
+ }
+ fn partial_relro(&mut self) {
+ self.linker_arg("-zrelro");
+ }
+ fn no_relro(&mut self) {
+ self.linker_arg("-znorelro");
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.hint_dynamic();
+ self.cmd.arg(format!("-l{}", lib));
+ }
+
+ fn link_framework(&mut self, framework: &str, as_needed: bool) {
+ self.hint_dynamic();
+ if !as_needed {
+ // FIXME(81490): ld64 as of macOS 11 supports the -needed_framework
+ // flag but we have no way to detect that here.
+ // self.cmd.arg("-needed_framework").arg(framework);
+ self.sess.warn("`as-needed` modifier not implemented yet for ld64");
+ }
+ self.cmd.arg("-framework").arg(framework);
+ }
+
+ // Here we explicitly ask that the entire archive is included into the
+ // result artifact. For more details see #15460, but the gist is that
+ // the linker will strip away any unused objects in the archive if we
+ // don't otherwise explicitly reference them. This can occur for
+ // libraries which are just providing bindings, libraries with generic
+ // functions, etc.
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, search_path: &[PathBuf]) {
+ self.hint_static();
+ let target = &self.sess.target;
+ if !target.is_like_osx {
+ self.linker_arg("--whole-archive").cmd.arg(format!(
+ "-l{}{}",
+ if verbatim { ":" } else { "" },
+ lib
+ ));
+ self.linker_arg("--no-whole-archive");
+ } else {
+ // -force_load is the macOS equivalent of --whole-archive, but it
+ // involves passing the full path to the library to link.
+ self.linker_arg("-force_load");
+ let lib = archive::find_library(lib, verbatim, search_path, &self.sess);
+ self.linker_arg(&lib);
+ }
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ if self.sess.target.is_like_osx {
+ self.linker_arg("-force_load");
+ self.linker_arg(&lib);
+ } else {
+ self.linker_arg("--whole-archive").cmd.arg(lib);
+ self.linker_arg("--no-whole-archive");
+ }
+ }
+
+ fn gc_sections(&mut self, keep_metadata: bool) {
+ // The dead_strip option to the linker specifies that functions and data
+ // unreachable by the entry point will be removed. This is quite useful
+ // with Rust's compilation model of compiling libraries at a time into
+ // one object file. For example, this brings hello world from 1.7MB to
+ // 458K.
+ //
+ // Note that this is done for both executables and dynamic libraries. We
+ // won't get much benefit from dylibs because LLVM will have already
+ // stripped away as much as it could. This has not been seen to impact
+ // link times negatively.
+ //
+ // -dead_strip can't be part of the pre_link_args because it's also used
+ // for partial linking when using multiple codegen units (-r). So we
+ // insert it here.
+ if self.sess.target.is_like_osx {
+ self.linker_arg("-dead_strip");
+
+ // If we're building a dylib, we don't use --gc-sections because LLVM
+ // has already done the best it can do, and we also don't want to
+ // eliminate the metadata. If we're building an executable, however,
+ // --gc-sections drops the size of hello world from 1.8MB to 597K, a 67%
+ // reduction.
+ } else if (self.sess.target.linker_is_gnu || self.sess.target.is_like_wasm)
+ && !keep_metadata
+ {
+ self.linker_arg("--gc-sections");
+ }
+ }
+
+ fn no_gc_sections(&mut self) {
+ if self.sess.target.linker_is_gnu || self.sess.target.is_like_wasm {
+ self.linker_arg("--no-gc-sections");
+ }
+ }
+
+ fn optimize(&mut self) {
+ if !self.sess.target.linker_is_gnu && !self.sess.target.is_like_wasm {
+ return;
+ }
+
+ // GNU-style linkers support optimization with -O. GNU ld doesn't
+ // need a numeric argument, but other linkers do.
+ if self.sess.opts.optimize == config::OptLevel::Default
+ || self.sess.opts.optimize == config::OptLevel::Aggressive
+ {
+ self.linker_arg("-O1");
+ }
+ }
+
+ fn pgo_gen(&mut self) {
+ if !self.sess.target.linker_is_gnu {
+ return;
+ }
+
+ // If we're doing PGO generation stuff and on a GNU-like linker, use the
+ // "-u" flag to properly pull in the profiler runtime bits.
+ //
+ // This is because LLVM otherwise won't add the needed initialization
+ // for us on Linux (though the extra flag should be harmless if it
+ // does).
+ //
+ // See https://reviews.llvm.org/D14033 and https://reviews.llvm.org/D14030.
+ //
+ // Though it may be worth to try to revert those changes upstream, since
+ // the overhead of the initialization should be minor.
+ self.cmd.arg("-u");
+ self.cmd.arg("__llvm_profile_runtime");
+ }
+
+ fn control_flow_guard(&mut self) {}
+
+ fn debuginfo(&mut self, strip: Strip, _: &[PathBuf]) {
+ // MacOS linker doesn't support stripping symbols directly anymore.
+ if self.sess.target.is_like_osx {
+ return;
+ }
+
+ match strip {
+ Strip::None => {}
+ Strip::Debuginfo => {
+ self.linker_arg("--strip-debug");
+ }
+ Strip::Symbols => {
+ self.linker_arg("--strip-all");
+ }
+ }
+ }
+
+ fn no_crt_objects(&mut self) {
+ if !self.is_ld {
+ self.cmd.arg("-nostartfiles");
+ }
+ }
+
+ fn no_default_libraries(&mut self) {
+ if !self.is_ld {
+ self.cmd.arg("-nodefaultlibs");
+ }
+ }
+
+ fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType, symbols: &[String]) {
+ // Symbol visibility in object files typically takes care of this.
+ if crate_type == CrateType::Executable {
+ let should_export_executable_symbols =
+ self.sess.opts.unstable_opts.export_executable_symbols;
+ if self.sess.target.override_export_symbols.is_none()
+ && !should_export_executable_symbols
+ {
+ return;
+ }
+ }
+
+ // We manually create a list of exported symbols to ensure we don't expose any more.
+ // The object files have far more public symbols than we actually want to export,
+ // so we hide them all here.
+
+ if !self.sess.target.limit_rdylib_exports {
+ return;
+ }
+
+ // FIXME(#99978) hide #[no_mangle] symbols for proc-macros
+
+ let is_windows = self.sess.target.is_like_windows;
+ let path = tmpdir.join(if is_windows { "list.def" } else { "list" });
+
+ debug!("EXPORTED SYMBOLS:");
+
+ if self.sess.target.is_like_osx {
+ // Write a plain, newline-separated list of symbols
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+ for sym in symbols {
+ debug!(" _{}", sym);
+ writeln!(f, "_{}", sym)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ }
+ } else if is_windows {
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+
+ // .def file similar to MSVC one but without LIBRARY section
+ // because LD doesn't like when it's empty
+ writeln!(f, "EXPORTS")?;
+ for symbol in symbols {
+ debug!(" _{}", symbol);
+ writeln!(f, " {}", symbol)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write list.def file: {}", e));
+ }
+ } else {
+ // Write an LD version script
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+ writeln!(f, "{{")?;
+ if !symbols.is_empty() {
+ writeln!(f, " global:")?;
+ for sym in symbols {
+ debug!(" {};", sym);
+ writeln!(f, " {};", sym)?;
+ }
+ }
+ writeln!(f, "\n local:\n *;\n}};")?;
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write version script: {}", e));
+ }
+ }
+
+ if self.sess.target.is_like_osx {
+ self.linker_args(&[OsString::from("-exported_symbols_list"), path.into()]);
+ } else if self.sess.target.is_like_solaris {
+ self.linker_args(&[OsString::from("-M"), path.into()]);
+ } else {
+ if is_windows {
+ self.linker_arg(path);
+ } else {
+ let mut arg = OsString::from("--version-script=");
+ arg.push(path);
+ self.linker_arg(arg);
+ }
+ }
+ }
+
+ fn subsystem(&mut self, subsystem: &str) {
+ self.linker_arg("--subsystem");
+ self.linker_arg(&subsystem);
+ }
+
+ fn reset_per_library_state(&mut self) {
+ self.hint_dynamic(); // Reset to default before returning the composed command line.
+ }
+
+ fn group_start(&mut self) {
+ if self.takes_hints() {
+ self.linker_arg("--start-group");
+ }
+ }
+
+ fn group_end(&mut self) {
+ if self.takes_hints() {
+ self.linker_arg("--end-group");
+ }
+ }
+
+ fn linker_plugin_lto(&mut self) {
+ match self.sess.opts.cg.linker_plugin_lto {
+ LinkerPluginLto::Disabled => {
+ // Nothing to do
+ }
+ LinkerPluginLto::LinkerPluginAuto => {
+ self.push_linker_plugin_lto_args(None);
+ }
+ LinkerPluginLto::LinkerPlugin(ref path) => {
+ self.push_linker_plugin_lto_args(Some(path.as_os_str()));
+ }
+ }
+ }
+
+ // Add the `GNU_EH_FRAME` program header which is required to locate unwinding information.
+ // Some versions of `gcc` add it implicitly, some (e.g. `musl-gcc`) don't,
+ // so we just always add it.
+ fn add_eh_frame_header(&mut self) {
+ self.linker_arg("--eh-frame-hdr");
+ }
+
+ fn add_no_exec(&mut self) {
+ if self.sess.target.is_like_windows {
+ self.linker_arg("--nxcompat");
+ } else if self.sess.target.linker_is_gnu {
+ self.linker_arg("-znoexecstack");
+ }
+ }
+
+ fn add_as_needed(&mut self) {
+ if self.sess.target.linker_is_gnu && !self.sess.target.is_like_windows {
+ self.linker_arg("--as-needed");
+ } else if self.sess.target.is_like_solaris {
+ // -z ignore is the Solaris equivalent to the GNU ld --as-needed option
+ self.linker_args(&["-z", "ignore"]);
+ }
+ }
+}
+
+pub struct MsvcLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> Linker for MsvcLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, out_filename: &Path) {
+ match output_kind {
+ LinkOutputKind::DynamicNoPicExe
+ | LinkOutputKind::DynamicPicExe
+ | LinkOutputKind::StaticNoPicExe
+ | LinkOutputKind::StaticPicExe => {}
+ LinkOutputKind::DynamicDylib | LinkOutputKind::StaticDylib => {
+ self.cmd.arg("/DLL");
+ let mut arg: OsString = "/IMPLIB:".into();
+ arg.push(out_filename.with_extension("dll.lib"));
+ self.cmd.arg(arg);
+ }
+ LinkOutputKind::WasiReactorExe => {
+ panic!("can't link as reactor on non-wasi target");
+ }
+ }
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.cmd.arg(lib);
+ }
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ // MSVC's ICF (Identical COMDAT Folding) link optimization is
+ // slow for Rust and thus we disable it by default when not in
+ // optimization build.
+ if self.sess.opts.optimize != config::OptLevel::No {
+ self.cmd.arg("/OPT:REF,ICF");
+ } else {
+ // It is necessary to specify NOICF here, because /OPT:REF
+ // implies ICF by default.
+ self.cmd.arg("/OPT:REF,NOICF");
+ }
+ }
+
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("/OPT:NOREF,NOICF");
+ }
+
+ fn link_dylib(&mut self, lib: &str, verbatim: bool, _as_needed: bool) {
+ self.cmd.arg(format!("{}{}", lib, if verbatim { "" } else { ".lib" }));
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, path: &Path) {
+ // When producing a dll, the MSVC linker may not actually emit a
+ // `foo.lib` file if the dll doesn't actually export any symbols, so we
+ // check to see if the file is there and just omit linking to it if it's
+ // not present.
+ let name = format!("{}.dll.lib", lib);
+ if path.join(&name).exists() {
+ self.cmd.arg(name);
+ }
+ }
+
+ fn link_staticlib(&mut self, lib: &str, verbatim: bool) {
+ self.cmd.arg(format!("{}{}", lib, if verbatim { "" } else { ".lib" }));
+ }
+
+ fn full_relro(&mut self) {
+ // noop
+ }
+
+ fn partial_relro(&mut self) {
+ // noop
+ }
+
+ fn no_relro(&mut self) {
+ // noop
+ }
+
+ fn no_crt_objects(&mut self) {
+ // noop
+ }
+
+ fn no_default_libraries(&mut self) {
+ self.cmd.arg("/NODEFAULTLIB");
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ let mut arg = OsString::from("/LIBPATH:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ let mut arg = OsString::from("/OUT:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ bug!("frameworks are not supported on windows")
+ }
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ bug!("frameworks are not supported on windows")
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, _search_path: &[PathBuf]) {
+ self.cmd.arg(format!("/WHOLEARCHIVE:{}{}", lib, if verbatim { "" } else { ".lib" }));
+ }
+ fn link_whole_rlib(&mut self, path: &Path) {
+ let mut arg = OsString::from("/WHOLEARCHIVE:");
+ arg.push(path);
+ self.cmd.arg(arg);
+ }
+ fn optimize(&mut self) {
+ // Needs more investigation of `/OPT` arguments
+ }
+
+ fn pgo_gen(&mut self) {
+ // Nothing needed here.
+ }
+
+ fn control_flow_guard(&mut self) {
+ self.cmd.arg("/guard:cf");
+ }
+
+ fn debuginfo(&mut self, strip: Strip, natvis_debugger_visualizers: &[PathBuf]) {
+ match strip {
+ Strip::None => {
+ // This will cause the Microsoft linker to generate a PDB file
+ // from the CodeView line tables in the object files.
+ self.cmd.arg("/DEBUG");
+
+ // This will cause the Microsoft linker to embed .natvis info into the PDB file
+ let natvis_dir_path = self.sess.sysroot.join("lib\\rustlib\\etc");
+ if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) {
+ for entry in natvis_dir {
+ match entry {
+ Ok(entry) => {
+ let path = entry.path();
+ if path.extension() == Some("natvis".as_ref()) {
+ let mut arg = OsString::from("/NATVIS:");
+ arg.push(path);
+ self.cmd.arg(arg);
+ }
+ }
+ Err(err) => {
+ self.sess
+ .warn(&format!("error enumerating natvis directory: {}", err));
+ }
+ }
+ }
+ }
+
+ // This will cause the Microsoft linker to embed .natvis info for all crates into the PDB file
+ for path in natvis_debugger_visualizers {
+ let mut arg = OsString::from("/NATVIS:");
+ arg.push(path);
+ self.cmd.arg(arg);
+ }
+ }
+ Strip::Debuginfo | Strip::Symbols => {
+ self.cmd.arg("/DEBUG:NONE");
+ }
+ }
+ }
+
+ // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to
+ // export symbols from a dynamic library. When building a dynamic library,
+ // however, we're going to want some symbols exported, so this function
+ // generates a DEF file which lists all the symbols.
+ //
+ // The linker will read this `*.def` file and export all the symbols from
+ // the dynamic library. Note that this is not as simple as just exporting
+ // all the symbols in the current crate (as specified by `codegen.reachable`)
+ // but rather we also need to possibly export the symbols of upstream
+ // crates. Upstream rlibs may be linked statically to this dynamic library,
+ // in which case they may continue to transitively be used and hence need
+ // their symbols exported.
+ fn export_symbols(&mut self, tmpdir: &Path, crate_type: CrateType, symbols: &[String]) {
+ // Symbol visibility takes care of this typically
+ if crate_type == CrateType::Executable {
+ let should_export_executable_symbols =
+ self.sess.opts.unstable_opts.export_executable_symbols;
+ if !should_export_executable_symbols {
+ return;
+ }
+ }
+
+ let path = tmpdir.join("lib.def");
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+
+ // Start off with the standard module name header and then go
+ // straight to exports.
+ writeln!(f, "LIBRARY")?;
+ writeln!(f, "EXPORTS")?;
+ for symbol in symbols {
+ debug!(" _{}", symbol);
+ writeln!(f, " {}", symbol)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write lib.def file: {}", e));
+ }
+ let mut arg = OsString::from("/DEF:");
+ arg.push(path);
+ self.cmd.arg(&arg);
+ }
+
+ fn subsystem(&mut self, subsystem: &str) {
+ // Note that previous passes of the compiler validated this subsystem,
+ // so we just blindly pass it to the linker.
+ self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem));
+
+ // Windows has two subsystems we're interested in right now, the console
+ // and windows subsystems. These both implicitly have different entry
+ // points (starting symbols). The console entry point starts with
+ // `mainCRTStartup` and the windows entry point starts with
+ // `WinMainCRTStartup`. These entry points, defined in system libraries,
+ // will then later probe for either `main` or `WinMain`, respectively to
+ // start the application.
+ //
+ // In Rust we just always generate a `main` function so we want control
+ // to always start there, so we force the entry point on the windows
+ // subsystem to be `mainCRTStartup` to get everything booted up
+ // correctly.
+ //
+ // For more information see RFC #1665
+ if subsystem == "windows" {
+ self.cmd.arg("/ENTRY:mainCRTStartup");
+ }
+ }
+
+ // MSVC doesn't need group indicators
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {
+ // Do nothing
+ }
+
+ fn add_no_exec(&mut self) {
+ self.cmd.arg("/NXCOMPAT");
+ }
+}
+
+pub struct EmLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> Linker for EmLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn link_dylib(&mut self, lib: &str, verbatim: bool, _as_needed: bool) {
+ // Emscripten always links statically
+ self.link_staticlib(lib, verbatim);
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, verbatim: bool, _search_path: &[PathBuf]) {
+ // not supported?
+ self.link_staticlib(lib, verbatim);
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ // not supported?
+ self.link_rlib(lib);
+ }
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.link_dylib(lib, false, true);
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.add_object(lib);
+ }
+
+ fn full_relro(&mut self) {
+ // noop
+ }
+
+ fn partial_relro(&mut self) {
+ // noop
+ }
+
+ fn no_relro(&mut self) {
+ // noop
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ bug!("frameworks are not supported on Emscripten")
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ bug!("frameworks are not supported on Emscripten")
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ // noop
+ }
+
+ fn no_gc_sections(&mut self) {
+ // noop
+ }
+
+ fn optimize(&mut self) {
+ // Emscripten performs own optimizations
+ self.cmd.arg(match self.sess.opts.optimize {
+ OptLevel::No => "-O0",
+ OptLevel::Less => "-O1",
+ OptLevel::Default => "-O2",
+ OptLevel::Aggressive => "-O3",
+ OptLevel::Size => "-Os",
+ OptLevel::SizeMin => "-Oz",
+ });
+ }
+
+ fn pgo_gen(&mut self) {
+ // noop, but maybe we need something like the gnu linker?
+ }
+
+ fn control_flow_guard(&mut self) {}
+
+ fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) {
+ // Preserve names or generate source maps depending on debug info
+ self.cmd.arg(match self.sess.opts.debuginfo {
+ DebugInfo::None => "-g0",
+ DebugInfo::Limited => "--profiling-funcs",
+ DebugInfo::Full => "-g",
+ });
+ }
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {
+ self.cmd.arg("-nodefaultlibs");
+ }
+
+ fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) {
+ debug!("EXPORTED SYMBOLS:");
+
+ self.cmd.arg("-s");
+
+ let mut arg = OsString::from("EXPORTED_FUNCTIONS=");
+ let encoded = serde_json::to_string(
+ &symbols.iter().map(|sym| "_".to_owned() + sym).collect::<Vec<_>>(),
+ )
+ .unwrap();
+ debug!("{}", encoded);
+
+ arg.push(encoded);
+
+ self.cmd.arg(arg);
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {
+ // noop
+ }
+
+ // Appears not necessary on Emscripten
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {
+ // Do nothing
+ }
+}
+
+pub struct WasmLd<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> WasmLd<'a> {
+ fn new(mut cmd: Command, sess: &'a Session) -> WasmLd<'a> {
+ // If the atomics feature is enabled for wasm then we need a whole bunch
+ // of flags:
+ //
+ // * `--shared-memory` - the link won't even succeed without this, flags
+ // the one linear memory as `shared`
+ //
+ // * `--max-memory=1G` - when specifying a shared memory this must also
+ // be specified. We conservatively choose 1GB but users should be able
+ // to override this with `-C link-arg`.
+ //
+ // * `--import-memory` - it doesn't make much sense for memory to be
+ // exported in a threaded module because typically you're
+ // sharing memory and instantiating the module multiple times. As a
+ // result if it were exported then we'd just have no sharing.
+ //
+ // * `--export=__wasm_init_memory` - when using `--passive-segments` the
+ // linker will synthesize this function, and so we need to make sure
+ // that our usage of `--export` below won't accidentally cause this
+ // function to get deleted.
+ //
+ // * `--export=*tls*` - when `#[thread_local]` symbols are used these
+ // symbols are how the TLS segments are initialized and configured.
+ if sess.target_features.contains(&sym::atomics) {
+ cmd.arg("--shared-memory");
+ cmd.arg("--max-memory=1073741824");
+ cmd.arg("--import-memory");
+ cmd.arg("--export=__wasm_init_memory");
+ cmd.arg("--export=__wasm_init_tls");
+ cmd.arg("--export=__tls_size");
+ cmd.arg("--export=__tls_align");
+ cmd.arg("--export=__tls_base");
+ }
+ WasmLd { cmd, sess }
+ }
+}
+
+impl<'a> Linker for WasmLd<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, output_kind: LinkOutputKind, _out_filename: &Path) {
+ match output_kind {
+ LinkOutputKind::DynamicNoPicExe
+ | LinkOutputKind::DynamicPicExe
+ | LinkOutputKind::StaticNoPicExe
+ | LinkOutputKind::StaticPicExe => {}
+ LinkOutputKind::DynamicDylib | LinkOutputKind::StaticDylib => {
+ self.cmd.arg("--no-entry");
+ }
+ LinkOutputKind::WasiReactorExe => {
+ self.cmd.arg("--entry");
+ self.cmd.arg("_initialize");
+ }
+ }
+ }
+
+ fn link_dylib(&mut self, lib: &str, _verbatim: bool, _as_needed: bool) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_rlib(&mut self, lib: &Path) {
+ self.cmd.arg(lib);
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ panic!("frameworks not supported")
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn full_relro(&mut self) {}
+
+ fn partial_relro(&mut self) {}
+
+ fn no_relro(&mut self) {}
+
+ fn link_rust_dylib(&mut self, lib: &str, _path: &Path) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ panic!("frameworks not supported")
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
+ self.cmd.arg("-l").arg(lib);
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.cmd.arg(lib);
+ }
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {
+ self.cmd.arg("--gc-sections");
+ }
+
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("--no-gc-sections");
+ }
+
+ fn optimize(&mut self) {
+ self.cmd.arg(match self.sess.opts.optimize {
+ OptLevel::No => "-O0",
+ OptLevel::Less => "-O1",
+ OptLevel::Default => "-O2",
+ OptLevel::Aggressive => "-O3",
+ // Currently LLD doesn't support `Os` and `Oz`, so pass through `O2`
+ // instead.
+ OptLevel::Size => "-O2",
+ OptLevel::SizeMin => "-O2",
+ });
+ }
+
+ fn pgo_gen(&mut self) {}
+
+ fn debuginfo(&mut self, strip: Strip, _: &[PathBuf]) {
+ match strip {
+ Strip::None => {}
+ Strip::Debuginfo => {
+ self.cmd.arg("--strip-debug");
+ }
+ Strip::Symbols => {
+ self.cmd.arg("--strip-all");
+ }
+ }
+ }
+
+ fn control_flow_guard(&mut self) {}
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {}
+
+ fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) {
+ for sym in symbols {
+ self.cmd.arg("--export").arg(&sym);
+ }
+
+ // LLD will hide these otherwise-internal symbols since it only exports
+ // symbols explicitly passed via the `--export` flags above and hides all
+ // others. Various bits and pieces of tooling use this, so be sure these
+ // symbols make their way out of the linker as well.
+ self.cmd.arg("--export=__heap_base");
+ self.cmd.arg("--export=__data_end");
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {}
+
+ // Not needed for now with LLD
+ fn group_start(&mut self) {}
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {
+ // Do nothing for now
+ }
+}
+
+/// Linker shepherd script for L4Re (Fiasco)
+pub struct L4Bender<'a> {
+ cmd: Command,
+ sess: &'a Session,
+ hinted_static: bool,
+}
+
+impl<'a> Linker for L4Bender<'a> {
+ fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) {
+ bug!("dylibs are not supported on L4Re");
+ }
+ fn link_staticlib(&mut self, lib: &str, _verbatim: bool) {
+ self.hint_static();
+ self.cmd.arg(format!("-PC{}", lib));
+ }
+ fn link_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ self.cmd.arg(lib);
+ }
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+ fn framework_path(&mut self, _: &Path) {
+ bug!("frameworks are not supported on L4Re");
+ }
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn full_relro(&mut self) {
+ self.cmd.arg("-zrelro");
+ self.cmd.arg("-znow");
+ }
+
+ fn partial_relro(&mut self) {
+ self.cmd.arg("-zrelro");
+ }
+
+ fn no_relro(&mut self) {
+ self.cmd.arg("-znorelro");
+ }
+
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+ fn link_rust_dylib(&mut self, _: &str, _: &Path) {
+ panic!("Rust dylibs not supported");
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ bug!("frameworks not supported on L4Re");
+ }
+
+ fn link_whole_staticlib(&mut self, lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
+ self.hint_static();
+ self.cmd.arg("--whole-archive").arg(format!("-l{}", lib));
+ self.cmd.arg("--no-whole-archive");
+ }
+
+ fn link_whole_rlib(&mut self, lib: &Path) {
+ self.hint_static();
+ self.cmd.arg("--whole-archive").arg(lib).arg("--no-whole-archive");
+ }
+
+ fn gc_sections(&mut self, keep_metadata: bool) {
+ if !keep_metadata {
+ self.cmd.arg("--gc-sections");
+ }
+ }
+
+ fn no_gc_sections(&mut self) {
+ self.cmd.arg("--no-gc-sections");
+ }
+
+ fn optimize(&mut self) {
+ // GNU-style linkers support optimization with -O. GNU ld doesn't
+ // need a numeric argument, but other linkers do.
+ if self.sess.opts.optimize == config::OptLevel::Default
+ || self.sess.opts.optimize == config::OptLevel::Aggressive
+ {
+ self.cmd.arg("-O1");
+ }
+ }
+
+ fn pgo_gen(&mut self) {}
+
+ fn debuginfo(&mut self, strip: Strip, _: &[PathBuf]) {
+ match strip {
+ Strip::None => {}
+ Strip::Debuginfo => {
+ self.cmd().arg("--strip-debug");
+ }
+ Strip::Symbols => {
+ self.cmd().arg("--strip-all");
+ }
+ }
+ }
+
+ fn no_default_libraries(&mut self) {
+ self.cmd.arg("-nostdlib");
+ }
+
+ fn export_symbols(&mut self, _: &Path, _: CrateType, _: &[String]) {
+ // ToDo, not implemented, copy from GCC
+ self.sess.warn("exporting symbols not implemented yet for L4Bender");
+ return;
+ }
+
+ fn subsystem(&mut self, subsystem: &str) {
+ self.cmd.arg(&format!("--subsystem {}", subsystem));
+ }
+
+ fn reset_per_library_state(&mut self) {
+ self.hint_static(); // Reset to default before returning the composed command line.
+ }
+
+ fn group_start(&mut self) {
+ self.cmd.arg("--start-group");
+ }
+
+ fn group_end(&mut self) {
+ self.cmd.arg("--end-group");
+ }
+
+ fn linker_plugin_lto(&mut self) {}
+
+ fn control_flow_guard(&mut self) {}
+
+ fn no_crt_objects(&mut self) {}
+}
+
+impl<'a> L4Bender<'a> {
+ pub fn new(cmd: Command, sess: &'a Session) -> L4Bender<'a> {
+ L4Bender { cmd: cmd, sess: sess, hinted_static: false }
+ }
+
+ fn hint_static(&mut self) {
+ if !self.hinted_static {
+ self.cmd.arg("-static");
+ self.hinted_static = true;
+ }
+ }
+}
+
+fn for_each_exported_symbols_include_dep<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ crate_type: CrateType,
+ mut callback: impl FnMut(ExportedSymbol<'tcx>, SymbolExportInfo, CrateNum),
+) {
+ for &(symbol, info) in tcx.exported_symbols(LOCAL_CRATE).iter() {
+ callback(symbol, info, LOCAL_CRATE);
+ }
+
+ let formats = tcx.dependency_formats(());
+ let deps = formats.iter().find_map(|(t, list)| (*t == crate_type).then_some(list)).unwrap();
+
+ for (index, dep_format) in deps.iter().enumerate() {
+ let cnum = CrateNum::new(index + 1);
+ // For each dependency that we are linking to statically ...
+ if *dep_format == Linkage::Static {
+ for &(symbol, info) in tcx.exported_symbols(cnum).iter() {
+ callback(symbol, info, cnum);
+ }
+ }
+ }
+}
+
+pub(crate) fn exported_symbols(tcx: TyCtxt<'_>, crate_type: CrateType) -> Vec<String> {
+ if let Some(ref exports) = tcx.sess.target.override_export_symbols {
+ return exports.iter().map(ToString::to_string).collect();
+ }
+
+ let mut symbols = Vec::new();
+
+ let export_threshold = symbol_export::crates_export_threshold(&[crate_type]);
+ for_each_exported_symbols_include_dep(tcx, crate_type, |symbol, info, cnum| {
+ if info.level.is_below_threshold(export_threshold) {
+ symbols.push(symbol_export::symbol_name_for_instance_in_crate(tcx, symbol, cnum));
+ }
+ });
+
+ symbols
+}
+
+pub(crate) fn linked_symbols(
+ tcx: TyCtxt<'_>,
+ crate_type: CrateType,
+) -> Vec<(String, SymbolExportKind)> {
+ match crate_type {
+ CrateType::Executable | CrateType::Cdylib | CrateType::Dylib => (),
+ CrateType::Staticlib | CrateType::ProcMacro | CrateType::Rlib => {
+ return Vec::new();
+ }
+ }
+
+ let mut symbols = Vec::new();
+
+ let export_threshold = symbol_export::crates_export_threshold(&[crate_type]);
+ for_each_exported_symbols_include_dep(tcx, crate_type, |symbol, info, cnum| {
+ if info.level.is_below_threshold(export_threshold) || info.used {
+ symbols.push((
+ symbol_export::linking_symbol_name_for_instance_in_crate(tcx, symbol, cnum),
+ info.kind,
+ ));
+ }
+ });
+
+ symbols
+}
+
+/// Much simplified and explicit CLI for the NVPTX linker. The linker operates
+/// with bitcode and uses LLVM backend to generate a PTX assembly.
+pub struct PtxLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> Linker for PtxLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+ fn link_rlib(&mut self, path: &Path) {
+ self.cmd.arg("--rlib").arg(path);
+ }
+
+ fn link_whole_rlib(&mut self, path: &Path) {
+ self.cmd.arg("--rlib").arg(path);
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) {
+ self.cmd.arg("--debug");
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg("--bitcode").arg(path);
+ }
+
+ fn optimize(&mut self) {
+ match self.sess.lto() {
+ Lto::Thin | Lto::Fat | Lto::ThinLocal => {
+ self.cmd.arg("-Olto");
+ }
+
+ Lto::No => {}
+ };
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) {
+ panic!("external dylibs not supported")
+ }
+
+ fn link_rust_dylib(&mut self, _lib: &str, _path: &Path) {
+ panic!("external dylibs not supported")
+ }
+
+ fn link_staticlib(&mut self, _lib: &str, _verbatim: bool) {
+ panic!("staticlibs not supported")
+ }
+
+ fn link_whole_staticlib(&mut self, _lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
+ panic!("staticlibs not supported")
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ panic!("frameworks not supported")
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ panic!("frameworks not supported")
+ }
+
+ fn full_relro(&mut self) {}
+
+ fn partial_relro(&mut self) {}
+
+ fn no_relro(&mut self) {}
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {}
+
+ fn no_gc_sections(&mut self) {}
+
+ fn pgo_gen(&mut self) {}
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {}
+
+ fn control_flow_guard(&mut self) {}
+
+ fn export_symbols(&mut self, _tmpdir: &Path, _crate_type: CrateType, _symbols: &[String]) {}
+
+ fn subsystem(&mut self, _subsystem: &str) {}
+
+ fn group_start(&mut self) {}
+
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {}
+}
+
+pub struct BpfLinker<'a> {
+ cmd: Command,
+ sess: &'a Session,
+}
+
+impl<'a> Linker for BpfLinker<'a> {
+ fn cmd(&mut self) -> &mut Command {
+ &mut self.cmd
+ }
+
+ fn set_output_kind(&mut self, _output_kind: LinkOutputKind, _out_filename: &Path) {}
+
+ fn link_rlib(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn link_whole_rlib(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn include_path(&mut self, path: &Path) {
+ self.cmd.arg("-L").arg(path);
+ }
+
+ fn debuginfo(&mut self, _strip: Strip, _: &[PathBuf]) {
+ self.cmd.arg("--debug");
+ }
+
+ fn add_object(&mut self, path: &Path) {
+ self.cmd.arg(path);
+ }
+
+ fn optimize(&mut self) {
+ self.cmd.arg(match self.sess.opts.optimize {
+ OptLevel::No => "-O0",
+ OptLevel::Less => "-O1",
+ OptLevel::Default => "-O2",
+ OptLevel::Aggressive => "-O3",
+ OptLevel::Size => "-Os",
+ OptLevel::SizeMin => "-Oz",
+ });
+ }
+
+ fn output_filename(&mut self, path: &Path) {
+ self.cmd.arg("-o").arg(path);
+ }
+
+ fn link_dylib(&mut self, _lib: &str, _verbatim: bool, _as_needed: bool) {
+ panic!("external dylibs not supported")
+ }
+
+ fn link_rust_dylib(&mut self, _lib: &str, _path: &Path) {
+ panic!("external dylibs not supported")
+ }
+
+ fn link_staticlib(&mut self, _lib: &str, _verbatim: bool) {
+ panic!("staticlibs not supported")
+ }
+
+ fn link_whole_staticlib(&mut self, _lib: &str, _verbatim: bool, _search_path: &[PathBuf]) {
+ panic!("staticlibs not supported")
+ }
+
+ fn framework_path(&mut self, _path: &Path) {
+ panic!("frameworks not supported")
+ }
+
+ fn link_framework(&mut self, _framework: &str, _as_needed: bool) {
+ panic!("frameworks not supported")
+ }
+
+ fn full_relro(&mut self) {}
+
+ fn partial_relro(&mut self) {}
+
+ fn no_relro(&mut self) {}
+
+ fn gc_sections(&mut self, _keep_metadata: bool) {}
+
+ fn no_gc_sections(&mut self) {}
+
+ fn pgo_gen(&mut self) {}
+
+ fn no_crt_objects(&mut self) {}
+
+ fn no_default_libraries(&mut self) {}
+
+ fn control_flow_guard(&mut self) {}
+
+ fn export_symbols(&mut self, tmpdir: &Path, _crate_type: CrateType, symbols: &[String]) {
+ let path = tmpdir.join("symbols");
+ let res: io::Result<()> = try {
+ let mut f = BufWriter::new(File::create(&path)?);
+ for sym in symbols {
+ writeln!(f, "{}", sym)?;
+ }
+ };
+ if let Err(e) = res {
+ self.sess.fatal(&format!("failed to write symbols file: {}", e));
+ } else {
+ self.cmd.arg("--export-symbols").arg(&path);
+ }
+ }
+
+ fn subsystem(&mut self, _subsystem: &str) {}
+
+ fn group_start(&mut self) {}
+
+ fn group_end(&mut self) {}
+
+ fn linker_plugin_lto(&mut self) {}
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs
new file mode 100644
index 000000000..cb6244050
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/lto.rs
@@ -0,0 +1,104 @@
+use super::write::CodegenContext;
+use crate::traits::*;
+use crate::ModuleCodegen;
+
+use rustc_data_structures::memmap::Mmap;
+use rustc_errors::FatalError;
+
+use std::ffi::CString;
+use std::sync::Arc;
+
+pub struct ThinModule<B: WriteBackendMethods> {
+ pub shared: Arc<ThinShared<B>>,
+ pub idx: usize,
+}
+
+impl<B: WriteBackendMethods> ThinModule<B> {
+ pub fn name(&self) -> &str {
+ self.shared.module_names[self.idx].to_str().unwrap()
+ }
+
+ pub fn cost(&self) -> u64 {
+ // Yes, that's correct, we're using the size of the bytecode as an
+ // indicator for how costly this codegen unit is.
+ self.data().len() as u64
+ }
+
+ pub fn data(&self) -> &[u8] {
+ let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data());
+ a.unwrap_or_else(|| {
+ let len = self.shared.thin_buffers.len();
+ self.shared.serialized_modules[self.idx - len].data()
+ })
+ }
+}
+
+pub struct ThinShared<B: WriteBackendMethods> {
+ pub data: B::ThinData,
+ pub thin_buffers: Vec<B::ThinBuffer>,
+ pub serialized_modules: Vec<SerializedModule<B::ModuleBuffer>>,
+ pub module_names: Vec<CString>,
+}
+
+pub enum LtoModuleCodegen<B: WriteBackendMethods> {
+ Fat {
+ module: ModuleCodegen<B::Module>,
+ _serialized_bitcode: Vec<SerializedModule<B::ModuleBuffer>>,
+ },
+
+ Thin(ThinModule<B>),
+}
+
+impl<B: WriteBackendMethods> LtoModuleCodegen<B> {
+ pub fn name(&self) -> &str {
+ match *self {
+ LtoModuleCodegen::Fat { .. } => "everything",
+ LtoModuleCodegen::Thin(ref m) => m.name(),
+ }
+ }
+
+ /// Optimize this module within the given codegen context.
+ ///
+ /// This function is unsafe as it'll return a `ModuleCodegen` still
+ /// points to LLVM data structures owned by this `LtoModuleCodegen`.
+ /// It's intended that the module returned is immediately code generated and
+ /// dropped, and then this LTO module is dropped.
+ pub unsafe fn optimize(
+ self,
+ cgcx: &CodegenContext<B>,
+ ) -> Result<ModuleCodegen<B::Module>, FatalError> {
+ match self {
+ LtoModuleCodegen::Fat { mut module, .. } => {
+ B::optimize_fat(cgcx, &mut module)?;
+ Ok(module)
+ }
+ LtoModuleCodegen::Thin(thin) => B::optimize_thin(cgcx, thin),
+ }
+ }
+
+ /// A "gauge" of how costly it is to optimize this module, used to sort
+ /// biggest modules first.
+ pub fn cost(&self) -> u64 {
+ match *self {
+ // Only one module with fat LTO, so the cost doesn't matter.
+ LtoModuleCodegen::Fat { .. } => 0,
+ LtoModuleCodegen::Thin(ref m) => m.cost(),
+ }
+ }
+}
+
+pub enum SerializedModule<M: ModuleBufferMethods> {
+ Local(M),
+ FromRlib(Vec<u8>),
+ FromUncompressedFile(Mmap),
+}
+
+impl<M: ModuleBufferMethods> SerializedModule<M> {
+ pub fn data(&self) -> &[u8] {
+ match *self {
+ SerializedModule::Local(ref m) => m.data(),
+ SerializedModule::FromRlib(ref m) => m,
+ SerializedModule::FromUncompressedFile(ref m) => m,
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs
new file mode 100644
index 000000000..0302c2881
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs
@@ -0,0 +1,314 @@
+//! Reading of the rustc metadata for rlibs and dylibs
+
+use std::fs::File;
+use std::io::Write;
+use std::path::Path;
+
+use object::write::{self, StandardSegment, Symbol, SymbolSection};
+use object::{
+ elf, pe, Architecture, BinaryFormat, Endianness, FileFlags, Object, ObjectSection,
+ SectionFlags, SectionKind, SymbolFlags, SymbolKind, SymbolScope,
+};
+
+use snap::write::FrameEncoder;
+
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::owning_ref::OwningRef;
+use rustc_data_structures::rustc_erase_owner;
+use rustc_data_structures::sync::MetadataRef;
+use rustc_metadata::fs::METADATA_FILENAME;
+use rustc_metadata::EncodedMetadata;
+use rustc_session::cstore::MetadataLoader;
+use rustc_session::Session;
+use rustc_target::abi::Endian;
+use rustc_target::spec::{RelocModel, Target};
+
+/// The default metadata loader. This is used by cg_llvm and cg_clif.
+///
+/// # Metadata location
+///
+/// <dl>
+/// <dt>rlib</dt>
+/// <dd>The metadata can be found in the `lib.rmeta` file inside of the ar archive.</dd>
+/// <dt>dylib</dt>
+/// <dd>The metadata can be found in the `.rustc` section of the shared library.</dd>
+/// </dl>
+pub struct DefaultMetadataLoader;
+
+fn load_metadata_with(
+ path: &Path,
+ f: impl for<'a> FnOnce(&'a [u8]) -> Result<&'a [u8], String>,
+) -> Result<MetadataRef, String> {
+ let file =
+ File::open(path).map_err(|e| format!("failed to open file '{}': {}", path.display(), e))?;
+ let data = unsafe { Mmap::map(file) }
+ .map_err(|e| format!("failed to mmap file '{}': {}", path.display(), e))?;
+ let metadata = OwningRef::new(data).try_map(f)?;
+ return Ok(rustc_erase_owner!(metadata.map_owner_box()));
+}
+
+impl MetadataLoader for DefaultMetadataLoader {
+ fn get_rlib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
+ load_metadata_with(path, |data| {
+ let archive = object::read::archive::ArchiveFile::parse(&*data)
+ .map_err(|e| format!("failed to parse rlib '{}': {}", path.display(), e))?;
+
+ for entry_result in archive.members() {
+ let entry = entry_result
+ .map_err(|e| format!("failed to parse rlib '{}': {}", path.display(), e))?;
+ if entry.name() == METADATA_FILENAME.as_bytes() {
+ let data = entry
+ .data(data)
+ .map_err(|e| format!("failed to parse rlib '{}': {}", path.display(), e))?;
+ return search_for_metadata(path, data, ".rmeta");
+ }
+ }
+
+ Err(format!("metadata not found in rlib '{}'", path.display()))
+ })
+ }
+
+ fn get_dylib_metadata(&self, _target: &Target, path: &Path) -> Result<MetadataRef, String> {
+ load_metadata_with(path, |data| search_for_metadata(path, data, ".rustc"))
+ }
+}
+
+fn search_for_metadata<'a>(
+ path: &Path,
+ bytes: &'a [u8],
+ section: &str,
+) -> Result<&'a [u8], String> {
+ let Ok(file) = object::File::parse(bytes) else {
+ // The parse above could fail for odd reasons like corruption, but for
+ // now we just interpret it as this target doesn't support metadata
+ // emission in object files so the entire byte slice itself is probably
+ // a metadata file. Ideally though if necessary we could at least check
+ // the prefix of bytes to see if it's an actual metadata object and if
+ // not forward the error along here.
+ return Ok(bytes);
+ };
+ file.section_by_name(section)
+ .ok_or_else(|| format!("no `{}` section in '{}'", section, path.display()))?
+ .data()
+ .map_err(|e| format!("failed to read {} section in '{}': {}", section, path.display(), e))
+}
+
+pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static>> {
+ let endianness = match sess.target.options.endian {
+ Endian::Little => Endianness::Little,
+ Endian::Big => Endianness::Big,
+ };
+ let architecture = match &sess.target.arch[..] {
+ "arm" => Architecture::Arm,
+ "aarch64" => Architecture::Aarch64,
+ "x86" => Architecture::I386,
+ "s390x" => Architecture::S390x,
+ "mips" => Architecture::Mips,
+ "mips64" => Architecture::Mips64,
+ "x86_64" => {
+ if sess.target.pointer_width == 32 {
+ Architecture::X86_64_X32
+ } else {
+ Architecture::X86_64
+ }
+ }
+ "powerpc" => Architecture::PowerPc,
+ "powerpc64" => Architecture::PowerPc64,
+ "riscv32" => Architecture::Riscv32,
+ "riscv64" => Architecture::Riscv64,
+ "sparc64" => Architecture::Sparc64,
+ // Unsupported architecture.
+ _ => return None,
+ };
+ let binary_format = if sess.target.is_like_osx {
+ BinaryFormat::MachO
+ } else if sess.target.is_like_windows {
+ BinaryFormat::Coff
+ } else {
+ BinaryFormat::Elf
+ };
+
+ let mut file = write::Object::new(binary_format, architecture, endianness);
+ let e_flags = match architecture {
+ Architecture::Mips => {
+ let arch = match sess.target.options.cpu.as_ref() {
+ "mips1" => elf::EF_MIPS_ARCH_1,
+ "mips2" => elf::EF_MIPS_ARCH_2,
+ "mips3" => elf::EF_MIPS_ARCH_3,
+ "mips4" => elf::EF_MIPS_ARCH_4,
+ "mips5" => elf::EF_MIPS_ARCH_5,
+ s if s.contains("r6") => elf::EF_MIPS_ARCH_32R6,
+ _ => elf::EF_MIPS_ARCH_32R2,
+ };
+ // The only ABI LLVM supports for 32-bit MIPS CPUs is o32.
+ let mut e_flags = elf::EF_MIPS_CPIC | elf::EF_MIPS_ABI_O32 | arch;
+ if sess.target.options.relocation_model != RelocModel::Static {
+ e_flags |= elf::EF_MIPS_PIC;
+ }
+ if sess.target.options.cpu.contains("r6") {
+ e_flags |= elf::EF_MIPS_NAN2008;
+ }
+ e_flags
+ }
+ Architecture::Mips64 => {
+ // copied from `mips64el-linux-gnuabi64-gcc foo.c -c`
+ let e_flags = elf::EF_MIPS_CPIC
+ | elf::EF_MIPS_PIC
+ | if sess.target.options.cpu.contains("r6") {
+ elf::EF_MIPS_ARCH_64R6 | elf::EF_MIPS_NAN2008
+ } else {
+ elf::EF_MIPS_ARCH_64R2
+ };
+ e_flags
+ }
+ Architecture::Riscv64 if sess.target.options.features.contains("+d") => {
+ // copied from `riscv64-linux-gnu-gcc foo.c -c`, note though
+ // that the `+d` target feature represents whether the double
+ // float abi is enabled.
+ let e_flags = elf::EF_RISCV_RVC | elf::EF_RISCV_FLOAT_ABI_DOUBLE;
+ e_flags
+ }
+ _ => 0,
+ };
+ // adapted from LLVM's `MCELFObjectTargetWriter::getOSABI`
+ let os_abi = match sess.target.options.os.as_ref() {
+ "hermit" => elf::ELFOSABI_STANDALONE,
+ "freebsd" => elf::ELFOSABI_FREEBSD,
+ "solaris" => elf::ELFOSABI_SOLARIS,
+ _ => elf::ELFOSABI_NONE,
+ };
+ let abi_version = 0;
+ file.flags = FileFlags::Elf { os_abi, abi_version, e_flags };
+ Some(file)
+}
+
+pub enum MetadataPosition {
+ First,
+ Last,
+}
+
+// For rlibs we "pack" rustc metadata into a dummy object file. When rustc
+// creates a dylib crate type it will pass `--whole-archive` (or the
+// platform equivalent) to include all object files from an rlib into the
+// final dylib itself. This causes linkers to iterate and try to include all
+// files located in an archive, so if metadata is stored in an archive then
+// it needs to be of a form that the linker will be able to process.
+//
+// Note, though, that we don't actually want this metadata to show up in any
+// final output of the compiler. Instead this is purely for rustc's own
+// metadata tracking purposes.
+//
+// With the above in mind, each "flavor" of object format gets special
+// handling here depending on the target:
+//
+// * MachO - macos-like targets will insert the metadata into a section that
+// is sort of fake dwarf debug info. Inspecting the source of the macos
+// linker this causes these sections to be skipped automatically because
+// it's not in an allowlist of otherwise well known dwarf section names to
+// go into the final artifact.
+//
+// * WebAssembly - we actually don't have any container format for this
+// target. WebAssembly doesn't support the `dylib` crate type anyway so
+// there's no need for us to support this at this time. Consequently the
+// metadata bytes are simply stored as-is into an rlib.
+//
+// * COFF - Windows-like targets create an object with a section that has
+// the `IMAGE_SCN_LNK_REMOVE` flag set which ensures that if the linker
+// ever sees the section it doesn't process it and it's removed.
+//
+// * ELF - All other targets are similar to Windows in that there's a
+// `SHF_EXCLUDE` flag we can set on sections in an object file to get
+// automatically removed from the final output.
+pub fn create_rmeta_file(sess: &Session, metadata: &[u8]) -> (Vec<u8>, MetadataPosition) {
+ let Some(mut file) = create_object_file(sess) else {
+ // This is used to handle all "other" targets. This includes targets
+ // in two categories:
+ //
+ // * Some targets don't have support in the `object` crate just yet
+ // to write an object file. These targets are likely to get filled
+ // out over time.
+ //
+ // * Targets like WebAssembly don't support dylibs, so the purpose
+ // of putting metadata in object files, to support linking rlibs
+ // into dylibs, is moot.
+ //
+ // In both of these cases it means that linking into dylibs will
+ // not be supported by rustc. This doesn't matter for targets like
+ // WebAssembly and for targets not supported by the `object` crate
+ // yet it means that work will need to be done in the `object` crate
+ // to add a case above.
+ return (metadata.to_vec(), MetadataPosition::Last);
+ };
+ let section = file.add_section(
+ file.segment_name(StandardSegment::Debug).to_vec(),
+ b".rmeta".to_vec(),
+ SectionKind::Debug,
+ );
+ match file.format() {
+ BinaryFormat::Coff => {
+ file.section_mut(section).flags =
+ SectionFlags::Coff { characteristics: pe::IMAGE_SCN_LNK_REMOVE };
+ }
+ BinaryFormat::Elf => {
+ file.section_mut(section).flags =
+ SectionFlags::Elf { sh_flags: elf::SHF_EXCLUDE as u64 };
+ }
+ _ => {}
+ };
+ file.append_section_data(section, metadata, 1);
+ (file.write().unwrap(), MetadataPosition::First)
+}
+
+// Historical note:
+//
+// When using link.exe it was seen that the section name `.note.rustc`
+// was getting shortened to `.note.ru`, and according to the PE and COFF
+// specification:
+//
+// > Executable images do not use a string table and do not support
+// > section names longer than 8 characters
+//
+// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
+//
+// As a result, we choose a slightly shorter name! As to why
+// `.note.rustc` works on MinGW, see
+// https://github.com/llvm/llvm-project/blob/llvmorg-12.0.0/lld/COFF/Writer.cpp#L1190-L1197
+pub fn create_compressed_metadata_file(
+ sess: &Session,
+ metadata: &EncodedMetadata,
+ symbol_name: &str,
+) -> Vec<u8> {
+ let mut compressed = rustc_metadata::METADATA_HEADER.to_vec();
+ FrameEncoder::new(&mut compressed).write_all(metadata.raw_data()).unwrap();
+ let Some(mut file) = create_object_file(sess) else {
+ return compressed.to_vec();
+ };
+ let section = file.add_section(
+ file.segment_name(StandardSegment::Data).to_vec(),
+ b".rustc".to_vec(),
+ SectionKind::ReadOnlyData,
+ );
+ match file.format() {
+ BinaryFormat::Elf => {
+ // Explicitly set no flags to avoid SHF_ALLOC default for data section.
+ file.section_mut(section).flags = SectionFlags::Elf { sh_flags: 0 };
+ }
+ _ => {}
+ };
+ let offset = file.append_section_data(section, &compressed, 1);
+
+ // For MachO and probably PE this is necessary to prevent the linker from throwing away the
+ // .rustc section. For ELF this isn't necessary, but it also doesn't harm.
+ file.add_symbol(Symbol {
+ name: symbol_name.as_bytes().to_vec(),
+ value: offset,
+ size: compressed.len() as u64,
+ kind: SymbolKind::Data,
+ scope: SymbolScope::Dynamic,
+ weak: false,
+ section: SymbolSection::Section(section),
+ flags: SymbolFlags::None,
+ });
+
+ file.write().unwrap()
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/mod.rs b/compiler/rustc_codegen_ssa/src/back/mod.rs
new file mode 100644
index 000000000..d11ed54eb
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/mod.rs
@@ -0,0 +1,9 @@
+pub mod archive;
+pub mod command;
+pub mod link;
+pub mod linker;
+pub mod lto;
+pub mod metadata;
+pub mod rpath;
+pub mod symbol_export;
+pub mod write;
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath.rs b/compiler/rustc_codegen_ssa/src/back/rpath.rs
new file mode 100644
index 000000000..0b5656c9a
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/rpath.rs
@@ -0,0 +1,114 @@
+use pathdiff::diff_paths;
+use rustc_data_structures::fx::FxHashSet;
+use std::env;
+use std::fs;
+use std::path::{Path, PathBuf};
+
+pub struct RPathConfig<'a> {
+ pub libs: &'a [&'a Path],
+ pub out_filename: PathBuf,
+ pub is_like_osx: bool,
+ pub has_rpath: bool,
+ pub linker_is_gnu: bool,
+}
+
+pub fn get_rpath_flags(config: &mut RPathConfig<'_>) -> Vec<String> {
+ // No rpath on windows
+ if !config.has_rpath {
+ return Vec::new();
+ }
+
+ debug!("preparing the RPATH!");
+
+ let rpaths = get_rpaths(config);
+ let mut flags = rpaths_to_flags(&rpaths);
+
+ if config.linker_is_gnu {
+ // Use DT_RUNPATH instead of DT_RPATH if available
+ flags.push("-Wl,--enable-new-dtags".to_owned());
+
+ // Set DF_ORIGIN for substitute $ORIGIN
+ flags.push("-Wl,-z,origin".to_owned());
+ }
+
+ flags
+}
+
+fn rpaths_to_flags(rpaths: &[String]) -> Vec<String> {
+ let mut ret = Vec::with_capacity(rpaths.len()); // the minimum needed capacity
+
+ for rpath in rpaths {
+ if rpath.contains(',') {
+ ret.push("-Wl,-rpath".into());
+ ret.push("-Xlinker".into());
+ ret.push(rpath.clone());
+ } else {
+ ret.push(format!("-Wl,-rpath,{}", &(*rpath)));
+ }
+ }
+
+ ret
+}
+
+fn get_rpaths(config: &mut RPathConfig<'_>) -> Vec<String> {
+ debug!("output: {:?}", config.out_filename.display());
+ debug!("libs:");
+ for libpath in config.libs {
+ debug!(" {:?}", libpath.display());
+ }
+
+ // Use relative paths to the libraries. Binaries can be moved
+ // as long as they maintain the relative relationship to the
+ // crates they depend on.
+ let rpaths = get_rpaths_relative_to_output(config);
+
+ debug!("rpaths:");
+ for rpath in &rpaths {
+ debug!(" {}", rpath);
+ }
+
+ // Remove duplicates
+ minimize_rpaths(&rpaths)
+}
+
+fn get_rpaths_relative_to_output(config: &mut RPathConfig<'_>) -> Vec<String> {
+ config.libs.iter().map(|a| get_rpath_relative_to_output(config, a)).collect()
+}
+
+fn get_rpath_relative_to_output(config: &mut RPathConfig<'_>, lib: &Path) -> String {
+ // Mac doesn't appear to support $ORIGIN
+ let prefix = if config.is_like_osx { "@loader_path" } else { "$ORIGIN" };
+
+ let cwd = env::current_dir().unwrap();
+ let mut lib = fs::canonicalize(&cwd.join(lib)).unwrap_or_else(|_| cwd.join(lib));
+ lib.pop(); // strip filename
+ let mut output = cwd.join(&config.out_filename);
+ output.pop(); // strip filename
+ let output = fs::canonicalize(&output).unwrap_or(output);
+ let relative = path_relative_from(&lib, &output)
+ .unwrap_or_else(|| panic!("couldn't create relative path from {:?} to {:?}", output, lib));
+ // FIXME (#9639): This needs to handle non-utf8 paths
+ format!("{}/{}", prefix, relative.to_str().expect("non-utf8 component in path"))
+}
+
+// This routine is adapted from the *old* Path's `path_relative_from`
+// function, which works differently from the new `relative_from` function.
+// In particular, this handles the case on unix where both paths are
+// absolute but with only the root as the common directory.
+fn path_relative_from(path: &Path, base: &Path) -> Option<PathBuf> {
+ diff_paths(path, base)
+}
+
+fn minimize_rpaths(rpaths: &[String]) -> Vec<String> {
+ let mut set = FxHashSet::default();
+ let mut minimized = Vec::new();
+ for rpath in rpaths {
+ if set.insert(rpath) {
+ minimized.push(rpath.clone());
+ }
+ }
+ minimized
+}
+
+#[cfg(all(unix, test))]
+mod tests;
diff --git a/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
new file mode 100644
index 000000000..604f19144
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/rpath/tests.rs
@@ -0,0 +1,72 @@
+use super::RPathConfig;
+use super::{get_rpath_relative_to_output, minimize_rpaths, rpaths_to_flags};
+use std::path::{Path, PathBuf};
+
+#[test]
+fn test_rpaths_to_flags() {
+ let flags = rpaths_to_flags(&["path1".to_string(), "path2".to_string()]);
+ assert_eq!(flags, ["-Wl,-rpath,path1", "-Wl,-rpath,path2"]);
+}
+
+#[test]
+fn test_minimize1() {
+ let res = minimize_rpaths(&["rpath1".to_string(), "rpath2".to_string(), "rpath1".to_string()]);
+ assert!(res == ["rpath1", "rpath2",]);
+}
+
+#[test]
+fn test_minimize2() {
+ let res = minimize_rpaths(&[
+ "1a".to_string(),
+ "2".to_string(),
+ "2".to_string(),
+ "1a".to_string(),
+ "4a".to_string(),
+ "1a".to_string(),
+ "2".to_string(),
+ "3".to_string(),
+ "4a".to_string(),
+ "3".to_string(),
+ ]);
+ assert!(res == ["1a", "2", "4a", "3",]);
+}
+
+#[test]
+fn test_rpath_relative() {
+ if cfg!(target_os = "macos") {
+ let config = &mut RPathConfig {
+ libs: &[],
+ has_rpath: true,
+ is_like_osx: true,
+ linker_is_gnu: false,
+ out_filename: PathBuf::from("bin/rustc"),
+ };
+ let res = get_rpath_relative_to_output(config, Path::new("lib/libstd.so"));
+ assert_eq!(res, "@loader_path/../lib");
+ } else {
+ let config = &mut RPathConfig {
+ libs: &[],
+ out_filename: PathBuf::from("bin/rustc"),
+ has_rpath: true,
+ is_like_osx: false,
+ linker_is_gnu: true,
+ };
+ let res = get_rpath_relative_to_output(config, Path::new("lib/libstd.so"));
+ assert_eq!(res, "$ORIGIN/../lib");
+ }
+}
+
+#[test]
+fn test_xlinker() {
+ let args = rpaths_to_flags(&["a/normal/path".to_string(), "a,comma,path".to_string()]);
+
+ assert_eq!(
+ args,
+ vec![
+ "-Wl,-rpath,a/normal/path".to_string(),
+ "-Wl,-rpath".to_string(),
+ "-Xlinker".to_string(),
+ "a,comma,path".to_string()
+ ]
+ );
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/symbol_export.rs b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
new file mode 100644
index 000000000..e6b605575
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/symbol_export.rs
@@ -0,0 +1,590 @@
+use std::collections::hash_map::Entry::*;
+
+use rustc_ast::expand::allocator::ALLOCATOR_METHODS;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
+use rustc_hir::Node;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::middle::exported_symbols::{
+ metadata_symbol_name, ExportedSymbol, SymbolExportInfo, SymbolExportKind, SymbolExportLevel,
+};
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::{self, SymbolName, TyCtxt};
+use rustc_session::config::CrateType;
+use rustc_target::spec::SanitizerSet;
+
+pub fn threshold(tcx: TyCtxt<'_>) -> SymbolExportLevel {
+ crates_export_threshold(&tcx.sess.crate_types())
+}
+
+fn crate_export_threshold(crate_type: CrateType) -> SymbolExportLevel {
+ match crate_type {
+ CrateType::Executable | CrateType::Staticlib | CrateType::ProcMacro | CrateType::Cdylib => {
+ SymbolExportLevel::C
+ }
+ CrateType::Rlib | CrateType::Dylib => SymbolExportLevel::Rust,
+ }
+}
+
+pub fn crates_export_threshold(crate_types: &[CrateType]) -> SymbolExportLevel {
+ if crate_types
+ .iter()
+ .any(|&crate_type| crate_export_threshold(crate_type) == SymbolExportLevel::Rust)
+ {
+ SymbolExportLevel::Rust
+ } else {
+ SymbolExportLevel::C
+ }
+}
+
+fn reachable_non_generics_provider(tcx: TyCtxt<'_>, cnum: CrateNum) -> DefIdMap<SymbolExportInfo> {
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ if !tcx.sess.opts.output_types.should_codegen() {
+ return Default::default();
+ }
+
+ // Check to see if this crate is a "special runtime crate". These
+ // crates, implementation details of the standard library, typically
+ // have a bunch of `pub extern` and `#[no_mangle]` functions as the
+ // ABI between them. We don't want their symbols to have a `C`
+ // export level, however, as they're just implementation details.
+ // Down below we'll hardwire all of the symbols to the `Rust` export
+ // level instead.
+ let special_runtime_crate =
+ tcx.is_panic_runtime(LOCAL_CRATE) || tcx.is_compiler_builtins(LOCAL_CRATE);
+
+ let mut reachable_non_generics: DefIdMap<_> = tcx
+ .reachable_set(())
+ .iter()
+ .filter_map(|&def_id| {
+ // We want to ignore some FFI functions that are not exposed from
+ // this crate. Reachable FFI functions can be lumped into two
+ // categories:
+ //
+ // 1. Those that are included statically via a static library
+ // 2. Those included otherwise (e.g., dynamically or via a framework)
+ //
+ // Although our LLVM module is not literally emitting code for the
+ // statically included symbols, it's an export of our library which
+ // needs to be passed on to the linker and encoded in the metadata.
+ //
+ // As a result, if this id is an FFI item (foreign item) then we only
+ // let it through if it's included statically.
+ match tcx.hir().get_by_def_id(def_id) {
+ Node::ForeignItem(..) => {
+ tcx.is_statically_included_foreign_item(def_id).then_some(def_id)
+ }
+
+ // Only consider nodes that actually have exported symbols.
+ Node::Item(&hir::Item {
+ kind: hir::ItemKind::Static(..) | hir::ItemKind::Fn(..),
+ ..
+ })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) => {
+ let generics = tcx.generics_of(def_id);
+ if !generics.requires_monomorphization(tcx)
+ // Functions marked with #[inline] are codegened with "internal"
+ // linkage and are not exported unless marked with an extern
+ // indicator
+ && (!Instance::mono(tcx, def_id.to_def_id()).def.generates_cgu_internal_copy(tcx)
+ || tcx.codegen_fn_attrs(def_id.to_def_id()).contains_extern_indicator())
+ {
+ Some(def_id)
+ } else {
+ None
+ }
+ }
+
+ _ => None,
+ }
+ })
+ .map(|def_id| {
+ let (export_level, used) = if special_runtime_crate {
+ let name = tcx.symbol_name(Instance::mono(tcx, def_id.to_def_id())).name;
+ // We won't link right if these symbols are stripped during LTO.
+ let used = match name {
+ "rust_eh_personality"
+ | "rust_eh_register_frames"
+ | "rust_eh_unregister_frames" => true,
+ _ => false,
+ };
+ (SymbolExportLevel::Rust, used)
+ } else {
+ (symbol_export_level(tcx, def_id.to_def_id()), false)
+ };
+ let codegen_attrs = tcx.codegen_fn_attrs(def_id.to_def_id());
+ debug!(
+ "EXPORTED SYMBOL (local): {} ({:?})",
+ tcx.symbol_name(Instance::mono(tcx, def_id.to_def_id())),
+ export_level
+ );
+ (def_id.to_def_id(), SymbolExportInfo {
+ level: export_level,
+ kind: if tcx.is_static(def_id.to_def_id()) {
+ if codegen_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
+ SymbolExportKind::Tls
+ } else {
+ SymbolExportKind::Data
+ }
+ } else {
+ SymbolExportKind::Text
+ },
+ used: codegen_attrs.flags.contains(CodegenFnAttrFlags::USED)
+ || codegen_attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) || used,
+ })
+ })
+ .collect();
+
+ if let Some(id) = tcx.proc_macro_decls_static(()) {
+ reachable_non_generics.insert(
+ id.to_def_id(),
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Data,
+ used: false,
+ },
+ );
+ }
+
+ reachable_non_generics
+}
+
+fn is_reachable_non_generic_provider_local(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ let export_threshold = threshold(tcx);
+
+ if let Some(&info) = tcx.reachable_non_generics(def_id.krate).get(&def_id) {
+ info.level.is_below_threshold(export_threshold)
+ } else {
+ false
+ }
+}
+
+fn is_reachable_non_generic_provider_extern(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.reachable_non_generics(def_id.krate).contains_key(&def_id)
+}
+
+fn exported_symbols_provider_local<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cnum: CrateNum,
+) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ if !tcx.sess.opts.output_types.should_codegen() {
+ return &[];
+ }
+
+ let mut symbols: Vec<_> = tcx
+ .reachable_non_generics(LOCAL_CRATE)
+ .iter()
+ .map(|(&def_id, &info)| (ExportedSymbol::NonGeneric(def_id), info))
+ .collect();
+
+ if tcx.entry_fn(()).is_some() {
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, "main"));
+
+ symbols.push((
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
+ }
+
+ if tcx.allocator_kind(()).is_some() {
+ for method in ALLOCATOR_METHODS {
+ let symbol_name = format!("__rust_{}", method.name);
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
+
+ symbols.push((
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::Rust,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
+ }
+ }
+
+ if tcx.sess.instrument_coverage() || tcx.sess.opts.cg.profile_generate.enabled() {
+ // These are weak symbols that point to the profile version and the
+ // profile name, which need to be treated as exported so LTO doesn't nix
+ // them.
+ const PROFILER_WEAK_SYMBOLS: [&str; 2] =
+ ["__llvm_profile_raw_version", "__llvm_profile_filename"];
+
+ symbols.extend(PROFILER_WEAK_SYMBOLS.iter().map(|sym| {
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, sym));
+ (
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Data,
+ used: false,
+ },
+ )
+ }));
+ }
+
+ if tcx.sess.opts.unstable_opts.sanitizer.contains(SanitizerSet::MEMORY) {
+ let mut msan_weak_symbols = Vec::new();
+
+ // Similar to profiling, preserve weak msan symbol during LTO.
+ if tcx.sess.opts.unstable_opts.sanitizer_recover.contains(SanitizerSet::MEMORY) {
+ msan_weak_symbols.push("__msan_keep_going");
+ }
+
+ if tcx.sess.opts.unstable_opts.sanitizer_memory_track_origins != 0 {
+ msan_weak_symbols.push("__msan_track_origins");
+ }
+
+ symbols.extend(msan_weak_symbols.into_iter().map(|sym| {
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, sym));
+ (
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Data,
+ used: false,
+ },
+ )
+ }));
+ }
+
+ if tcx.sess.crate_types().contains(&CrateType::Dylib)
+ || tcx.sess.crate_types().contains(&CrateType::ProcMacro)
+ {
+ let symbol_name = metadata_symbol_name(tcx);
+ let exported_symbol = ExportedSymbol::NoDefId(SymbolName::new(tcx, &symbol_name));
+
+ symbols.push((
+ exported_symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::C,
+ kind: SymbolExportKind::Data,
+ used: true,
+ },
+ ));
+ }
+
+ if tcx.sess.opts.share_generics() && tcx.local_crate_exports_generics() {
+ use rustc_middle::mir::mono::{Linkage, MonoItem, Visibility};
+ use rustc_middle::ty::InstanceDef;
+
+ // Normally, we require that shared monomorphizations are not hidden,
+ // because if we want to re-use a monomorphization from a Rust dylib, it
+ // needs to be exported.
+ // However, on platforms that don't allow for Rust dylibs, having
+ // external linkage is enough for monomorphization to be linked to.
+ let need_visibility = tcx.sess.target.dynamic_linking && !tcx.sess.target.only_cdylib;
+
+ let (_, cgus) = tcx.collect_and_partition_mono_items(());
+
+ for (mono_item, &(linkage, visibility)) in cgus.iter().flat_map(|cgu| cgu.items().iter()) {
+ if linkage != Linkage::External {
+ // We can only re-use things with external linkage, otherwise
+ // we'll get a linker error
+ continue;
+ }
+
+ if need_visibility && visibility == Visibility::Hidden {
+ // If we potentially share things from Rust dylibs, they must
+ // not be hidden
+ continue;
+ }
+
+ match *mono_item {
+ MonoItem::Fn(Instance { def: InstanceDef::Item(def), substs }) => {
+ if substs.non_erasable_generics().next().is_some() {
+ let symbol = ExportedSymbol::Generic(def.did, substs);
+ symbols.push((
+ symbol,
+ SymbolExportInfo {
+ level: SymbolExportLevel::Rust,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
+ }
+ }
+ MonoItem::Fn(Instance { def: InstanceDef::DropGlue(_, Some(ty)), substs }) => {
+ // A little sanity-check
+ debug_assert_eq!(
+ substs.non_erasable_generics().next(),
+ Some(GenericArgKind::Type(ty))
+ );
+ symbols.push((
+ ExportedSymbol::DropGlue(ty),
+ SymbolExportInfo {
+ level: SymbolExportLevel::Rust,
+ kind: SymbolExportKind::Text,
+ used: false,
+ },
+ ));
+ }
+ _ => {
+ // Any other symbols don't qualify for sharing
+ }
+ }
+ }
+ }
+
+ // Sort so we get a stable incr. comp. hash.
+ symbols.sort_by_cached_key(|s| s.0.symbol_name_for_local_instance(tcx));
+
+ tcx.arena.alloc_from_iter(symbols)
+}
+
+fn upstream_monomorphizations_provider(
+ tcx: TyCtxt<'_>,
+ (): (),
+) -> DefIdMap<FxHashMap<SubstsRef<'_>, CrateNum>> {
+ let cnums = tcx.crates(());
+
+ let mut instances: DefIdMap<FxHashMap<_, _>> = Default::default();
+
+ let drop_in_place_fn_def_id = tcx.lang_items().drop_in_place_fn();
+
+ for &cnum in cnums.iter() {
+ for (exported_symbol, _) in tcx.exported_symbols(cnum).iter() {
+ let (def_id, substs) = match *exported_symbol {
+ ExportedSymbol::Generic(def_id, substs) => (def_id, substs),
+ ExportedSymbol::DropGlue(ty) => {
+ if let Some(drop_in_place_fn_def_id) = drop_in_place_fn_def_id {
+ (drop_in_place_fn_def_id, tcx.intern_substs(&[ty.into()]))
+ } else {
+ // `drop_in_place` in place does not exist, don't try
+ // to use it.
+ continue;
+ }
+ }
+ ExportedSymbol::NonGeneric(..) | ExportedSymbol::NoDefId(..) => {
+ // These are no monomorphizations
+ continue;
+ }
+ };
+
+ let substs_map = instances.entry(def_id).or_default();
+
+ match substs_map.entry(substs) {
+ Occupied(mut e) => {
+ // If there are multiple monomorphizations available,
+ // we select one deterministically.
+ let other_cnum = *e.get();
+ if tcx.stable_crate_id(other_cnum) > tcx.stable_crate_id(cnum) {
+ e.insert(cnum);
+ }
+ }
+ Vacant(e) => {
+ e.insert(cnum);
+ }
+ }
+ }
+ }
+
+ instances
+}
+
+fn upstream_monomorphizations_for_provider(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+) -> Option<&FxHashMap<SubstsRef<'_>, CrateNum>> {
+ debug_assert!(!def_id.is_local());
+ tcx.upstream_monomorphizations(()).get(&def_id)
+}
+
+fn upstream_drop_glue_for_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+) -> Option<CrateNum> {
+ if let Some(def_id) = tcx.lang_items().drop_in_place_fn() {
+ tcx.upstream_monomorphizations_for(def_id).and_then(|monos| monos.get(&substs).cloned())
+ } else {
+ None
+ }
+}
+
+fn is_unreachable_local_definition_provider(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ !tcx.reachable_set(()).contains(&def_id)
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.reachable_non_generics = reachable_non_generics_provider;
+ providers.is_reachable_non_generic = is_reachable_non_generic_provider_local;
+ providers.exported_symbols = exported_symbols_provider_local;
+ providers.upstream_monomorphizations = upstream_monomorphizations_provider;
+ providers.is_unreachable_local_definition = is_unreachable_local_definition_provider;
+ providers.upstream_drop_glue_for = upstream_drop_glue_for_provider;
+ providers.wasm_import_module_map = wasm_import_module_map;
+}
+
+pub fn provide_extern(providers: &mut ExternProviders) {
+ providers.is_reachable_non_generic = is_reachable_non_generic_provider_extern;
+ providers.upstream_monomorphizations_for = upstream_monomorphizations_for_provider;
+}
+
+fn symbol_export_level(tcx: TyCtxt<'_>, sym_def_id: DefId) -> SymbolExportLevel {
+ // We export anything that's not mangled at the "C" layer as it probably has
+ // to do with ABI concerns. We do not, however, apply such treatment to
+ // special symbols in the standard library for various plumbing between
+ // core/std/allocators/etc. For example symbols used to hook up allocation
+ // are not considered for export
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(sym_def_id);
+ let is_extern = codegen_fn_attrs.contains_extern_indicator();
+ let std_internal =
+ codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
+
+ if is_extern && !std_internal {
+ let target = &tcx.sess.target.llvm_target;
+ // WebAssembly cannot export data symbols, so reduce their export level
+ if target.contains("emscripten") {
+ if let Some(Node::Item(&hir::Item { kind: hir::ItemKind::Static(..), .. })) =
+ tcx.hir().get_if_local(sym_def_id)
+ {
+ return SymbolExportLevel::Rust;
+ }
+ }
+
+ SymbolExportLevel::C
+ } else {
+ SymbolExportLevel::Rust
+ }
+}
+
+/// This is the symbol name of the given instance instantiated in a specific crate.
+pub fn symbol_name_for_instance_in_crate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ symbol: ExportedSymbol<'tcx>,
+ instantiating_crate: CrateNum,
+) -> String {
+ // If this is something instantiated in the local crate then we might
+ // already have cached the name as a query result.
+ if instantiating_crate == LOCAL_CRATE {
+ return symbol.symbol_name_for_local_instance(tcx).to_string();
+ }
+
+ // This is something instantiated in an upstream crate, so we have to use
+ // the slower (because uncached) version of computing the symbol name.
+ match symbol {
+ ExportedSymbol::NonGeneric(def_id) => {
+ rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+ tcx,
+ Instance::mono(tcx, def_id),
+ instantiating_crate,
+ )
+ }
+ ExportedSymbol::Generic(def_id, substs) => {
+ rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+ tcx,
+ Instance::new(def_id, substs),
+ instantiating_crate,
+ )
+ }
+ ExportedSymbol::DropGlue(ty) => rustc_symbol_mangling::symbol_name_for_instance_in_crate(
+ tcx,
+ Instance::resolve_drop_in_place(tcx, ty),
+ instantiating_crate,
+ ),
+ ExportedSymbol::NoDefId(symbol_name) => symbol_name.to_string(),
+ }
+}
+
+/// This is the symbol name of the given instance as seen by the linker.
+///
+/// On 32-bit Windows symbols are decorated according to their calling conventions.
+pub fn linking_symbol_name_for_instance_in_crate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ symbol: ExportedSymbol<'tcx>,
+ instantiating_crate: CrateNum,
+) -> String {
+ use rustc_target::abi::call::Conv;
+
+ let mut undecorated = symbol_name_for_instance_in_crate(tcx, symbol, instantiating_crate);
+
+ let target = &tcx.sess.target;
+ if !target.is_like_windows {
+ // Mach-O has a global "_" suffix and `object` crate will handle it.
+ // ELF does not have any symbol decorations.
+ return undecorated;
+ }
+
+ let x86 = match &target.arch[..] {
+ "x86" => true,
+ "x86_64" => false,
+ // Only x86/64 use symbol decorations.
+ _ => return undecorated,
+ };
+
+ let instance = match symbol {
+ ExportedSymbol::NonGeneric(def_id) | ExportedSymbol::Generic(def_id, _)
+ if tcx.is_static(def_id) =>
+ {
+ None
+ }
+ ExportedSymbol::NonGeneric(def_id) => Some(Instance::mono(tcx, def_id)),
+ ExportedSymbol::Generic(def_id, substs) => Some(Instance::new(def_id, substs)),
+ // DropGlue always use the Rust calling convention and thus follow the target's default
+ // symbol decoration scheme.
+ ExportedSymbol::DropGlue(..) => None,
+ // NoDefId always follow the target's default symbol decoration scheme.
+ ExportedSymbol::NoDefId(..) => None,
+ };
+
+ let (conv, args) = instance
+ .map(|i| {
+ tcx.fn_abi_of_instance(ty::ParamEnv::reveal_all().and((i, ty::List::empty())))
+ .unwrap_or_else(|_| bug!("fn_abi_of_instance({i:?}) failed"))
+ })
+ .map(|fnabi| (fnabi.conv, &fnabi.args[..]))
+ .unwrap_or((Conv::Rust, &[]));
+
+ // Decorate symbols with prefices, suffices and total number of bytes of arguments.
+ // Reference: https://docs.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170
+ let (prefix, suffix) = match conv {
+ Conv::X86Fastcall => ("@", "@"),
+ Conv::X86Stdcall => ("_", "@"),
+ Conv::X86VectorCall => ("", "@@"),
+ _ => {
+ if x86 {
+ undecorated.insert(0, '_');
+ }
+ return undecorated;
+ }
+ };
+
+ let args_in_bytes: u64 = args
+ .iter()
+ .map(|abi| abi.layout.size.bytes().next_multiple_of(target.pointer_width as u64 / 8))
+ .sum();
+ format!("{prefix}{undecorated}{suffix}{args_in_bytes}")
+}
+
+fn wasm_import_module_map(tcx: TyCtxt<'_>, cnum: CrateNum) -> FxHashMap<DefId, String> {
+ // Build up a map from DefId to a `NativeLib` structure, where
+ // `NativeLib` internally contains information about
+ // `#[link(wasm_import_module = "...")]` for example.
+ let native_libs = tcx.native_libraries(cnum);
+
+ let def_id_to_native_lib = native_libs
+ .iter()
+ .filter_map(|lib| lib.foreign_module.map(|id| (id, lib)))
+ .collect::<FxHashMap<_, _>>();
+
+ let mut ret = FxHashMap::default();
+ for (def_id, lib) in tcx.foreign_modules(cnum).iter() {
+ let module = def_id_to_native_lib.get(&def_id).and_then(|s| s.wasm_import_module);
+ let Some(module) = module else { continue };
+ ret.extend(lib.foreign_items.iter().map(|id| {
+ assert_eq!(id.krate, cnum);
+ (*id, module.to_string())
+ }));
+ }
+
+ ret
+}
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
new file mode 100644
index 000000000..1b5ad8710
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -0,0 +1,2015 @@
+use super::link::{self, ensure_removed};
+use super::lto::{self, SerializedModule};
+use super::symbol_export::symbol_name_for_instance_in_crate;
+
+use crate::{
+ CachedModuleCodegen, CodegenResults, CompiledModule, CrateInfo, ModuleCodegen, ModuleKind,
+};
+
+use crate::traits::*;
+use jobserver::{Acquired, Client};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::profiling::TimingGuard;
+use rustc_data_structures::profiling::VerboseTimingGuard;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::emitter::Emitter;
+use rustc_errors::{DiagnosticId, FatalError, Handler, Level};
+use rustc_fs_util::link_or_copy;
+use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_incremental::{
+ copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess,
+};
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::middle::exported_symbols::SymbolExportInfo;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::cgu_reuse_tracker::CguReuseTracker;
+use rustc_session::config::{self, CrateType, Lto, OutputFilenames, OutputType};
+use rustc_session::config::{Passes, SwitchWithOptPath};
+use rustc_session::Session;
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::sym;
+use rustc_span::{BytePos, FileName, InnerSpan, Pos, Span};
+use rustc_target::spec::{MergeFunctions, SanitizerSet};
+
+use std::any::Any;
+use std::fs;
+use std::io;
+use std::marker::PhantomData;
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::str;
+use std::sync::mpsc::{channel, Receiver, Sender};
+use std::sync::Arc;
+use std::thread;
+
+const PRE_LTO_BC_EXT: &str = "pre-lto.bc";
+
+/// What kind of object file to emit.
+#[derive(Clone, Copy, PartialEq)]
+pub enum EmitObj {
+ // No object file.
+ None,
+
+ // Just uncompressed llvm bitcode. Provides easy compatibility with
+ // emscripten's ecc compiler, when used as the linker.
+ Bitcode,
+
+ // Object code, possibly augmented with a bitcode section.
+ ObjectCode(BitcodeSection),
+}
+
+/// What kind of llvm bitcode section to embed in an object file.
+#[derive(Clone, Copy, PartialEq)]
+pub enum BitcodeSection {
+ // No bitcode section.
+ None,
+
+ // A full, uncompressed bitcode section.
+ Full,
+}
+
+/// Module-specific configuration for `optimize_and_codegen`.
+pub struct ModuleConfig {
+ /// Names of additional optimization passes to run.
+ pub passes: Vec<String>,
+ /// Some(level) to optimize at a certain level, or None to run
+ /// absolutely no optimizations (used for the metadata module).
+ pub opt_level: Option<config::OptLevel>,
+
+ /// Some(level) to optimize binary size, or None to not affect program size.
+ pub opt_size: Option<config::OptLevel>,
+
+ pub pgo_gen: SwitchWithOptPath,
+ pub pgo_use: Option<PathBuf>,
+ pub pgo_sample_use: Option<PathBuf>,
+ pub debug_info_for_profiling: bool,
+ pub instrument_coverage: bool,
+ pub instrument_gcov: bool,
+
+ pub sanitizer: SanitizerSet,
+ pub sanitizer_recover: SanitizerSet,
+ pub sanitizer_memory_track_origins: usize,
+
+ // Flags indicating which outputs to produce.
+ pub emit_pre_lto_bc: bool,
+ pub emit_no_opt_bc: bool,
+ pub emit_bc: bool,
+ pub emit_ir: bool,
+ pub emit_asm: bool,
+ pub emit_obj: EmitObj,
+ pub emit_thin_lto: bool,
+ pub bc_cmdline: String,
+
+ // Miscellaneous flags. These are mostly copied from command-line
+ // options.
+ pub verify_llvm_ir: bool,
+ pub no_prepopulate_passes: bool,
+ pub no_builtins: bool,
+ pub time_module: bool,
+ pub vectorize_loop: bool,
+ pub vectorize_slp: bool,
+ pub merge_functions: bool,
+ pub inline_threshold: Option<u32>,
+ pub new_llvm_pass_manager: Option<bool>,
+ pub emit_lifetime_markers: bool,
+ pub llvm_plugins: Vec<String>,
+}
+
+impl ModuleConfig {
+ fn new(
+ kind: ModuleKind,
+ sess: &Session,
+ no_builtins: bool,
+ is_compiler_builtins: bool,
+ ) -> ModuleConfig {
+ // If it's a regular module, use `$regular`, otherwise use `$other`.
+ // `$regular` and `$other` are evaluated lazily.
+ macro_rules! if_regular {
+ ($regular: expr, $other: expr) => {
+ if let ModuleKind::Regular = kind { $regular } else { $other }
+ };
+ }
+
+ let opt_level_and_size = if_regular!(Some(sess.opts.optimize), None);
+
+ let save_temps = sess.opts.cg.save_temps;
+
+ let should_emit_obj = sess.opts.output_types.contains_key(&OutputType::Exe)
+ || match kind {
+ ModuleKind::Regular => sess.opts.output_types.contains_key(&OutputType::Object),
+ ModuleKind::Allocator => false,
+ ModuleKind::Metadata => sess.opts.output_types.contains_key(&OutputType::Metadata),
+ };
+
+ let emit_obj = if !should_emit_obj {
+ EmitObj::None
+ } else if sess.target.obj_is_bitcode
+ || (sess.opts.cg.linker_plugin_lto.enabled() && !no_builtins)
+ {
+ // This case is selected if the target uses objects as bitcode, or
+ // if linker plugin LTO is enabled. In the linker plugin LTO case
+ // the assumption is that the final link-step will read the bitcode
+ // and convert it to object code. This may be done by either the
+ // native linker or rustc itself.
+ //
+ // Note, however, that the linker-plugin-lto requested here is
+ // explicitly ignored for `#![no_builtins]` crates. These crates are
+ // specifically ignored by rustc's LTO passes and wouldn't work if
+ // loaded into the linker. These crates define symbols that LLVM
+ // lowers intrinsics to, and these symbol dependencies aren't known
+ // until after codegen. As a result any crate marked
+ // `#![no_builtins]` is assumed to not participate in LTO and
+ // instead goes on to generate object code.
+ EmitObj::Bitcode
+ } else if need_bitcode_in_object(sess) {
+ EmitObj::ObjectCode(BitcodeSection::Full)
+ } else {
+ EmitObj::ObjectCode(BitcodeSection::None)
+ };
+
+ ModuleConfig {
+ passes: if_regular!(sess.opts.cg.passes.clone(), vec![]),
+
+ opt_level: opt_level_and_size,
+ opt_size: opt_level_and_size,
+
+ pgo_gen: if_regular!(
+ sess.opts.cg.profile_generate.clone(),
+ SwitchWithOptPath::Disabled
+ ),
+ pgo_use: if_regular!(sess.opts.cg.profile_use.clone(), None),
+ pgo_sample_use: if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
+ debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
+ instrument_coverage: if_regular!(sess.instrument_coverage(), false),
+ instrument_gcov: if_regular!(
+ // compiler_builtins overrides the codegen-units settings,
+ // which is incompatible with -Zprofile which requires that
+ // only a single codegen unit is used per crate.
+ sess.opts.unstable_opts.profile && !is_compiler_builtins,
+ false
+ ),
+
+ sanitizer: if_regular!(sess.opts.unstable_opts.sanitizer, SanitizerSet::empty()),
+ sanitizer_recover: if_regular!(
+ sess.opts.unstable_opts.sanitizer_recover,
+ SanitizerSet::empty()
+ ),
+ sanitizer_memory_track_origins: if_regular!(
+ sess.opts.unstable_opts.sanitizer_memory_track_origins,
+ 0
+ ),
+
+ emit_pre_lto_bc: if_regular!(
+ save_temps || need_pre_lto_bitcode_for_incr_comp(sess),
+ false
+ ),
+ emit_no_opt_bc: if_regular!(save_temps, false),
+ emit_bc: if_regular!(
+ save_temps || sess.opts.output_types.contains_key(&OutputType::Bitcode),
+ save_temps
+ ),
+ emit_ir: if_regular!(
+ sess.opts.output_types.contains_key(&OutputType::LlvmAssembly),
+ false
+ ),
+ emit_asm: if_regular!(
+ sess.opts.output_types.contains_key(&OutputType::Assembly),
+ false
+ ),
+ emit_obj,
+ emit_thin_lto: sess.opts.unstable_opts.emit_thin_lto,
+ bc_cmdline: sess.target.bitcode_llvm_cmdline.to_string(),
+
+ verify_llvm_ir: sess.verify_llvm_ir(),
+ no_prepopulate_passes: sess.opts.cg.no_prepopulate_passes,
+ no_builtins: no_builtins || sess.target.no_builtins,
+
+ // Exclude metadata and allocator modules from time_passes output,
+ // since they throw off the "LLVM passes" measurement.
+ time_module: if_regular!(true, false),
+
+ // Copy what clang does by turning on loop vectorization at O2 and
+ // slp vectorization at O3.
+ vectorize_loop: !sess.opts.cg.no_vectorize_loops
+ && (sess.opts.optimize == config::OptLevel::Default
+ || sess.opts.optimize == config::OptLevel::Aggressive),
+ vectorize_slp: !sess.opts.cg.no_vectorize_slp
+ && sess.opts.optimize == config::OptLevel::Aggressive,
+
+ // Some targets (namely, NVPTX) interact badly with the
+ // MergeFunctions pass. This is because MergeFunctions can generate
+ // new function calls which may interfere with the target calling
+ // convention; e.g. for the NVPTX target, PTX kernels should not
+ // call other PTX kernels. MergeFunctions can also be configured to
+ // generate aliases instead, but aliases are not supported by some
+ // backends (again, NVPTX). Therefore, allow targets to opt out of
+ // the MergeFunctions pass, but otherwise keep the pass enabled (at
+ // O2 and O3) since it can be useful for reducing code size.
+ merge_functions: match sess
+ .opts
+ .unstable_opts
+ .merge_functions
+ .unwrap_or(sess.target.merge_functions)
+ {
+ MergeFunctions::Disabled => false,
+ MergeFunctions::Trampolines | MergeFunctions::Aliases => {
+ sess.opts.optimize == config::OptLevel::Default
+ || sess.opts.optimize == config::OptLevel::Aggressive
+ }
+ },
+
+ inline_threshold: sess.opts.cg.inline_threshold,
+ new_llvm_pass_manager: sess.opts.unstable_opts.new_llvm_pass_manager,
+ emit_lifetime_markers: sess.emit_lifetime_markers(),
+ llvm_plugins: if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
+ }
+ }
+
+ pub fn bitcode_needed(&self) -> bool {
+ self.emit_bc
+ || self.emit_obj == EmitObj::Bitcode
+ || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)
+ }
+}
+
+/// Configuration passed to the function returned by the `target_machine_factory`.
+pub struct TargetMachineFactoryConfig {
+ /// Split DWARF is enabled in LLVM by checking that `TM.MCOptions.SplitDwarfFile` isn't empty,
+ /// so the path to the dwarf object has to be provided when we create the target machine.
+ /// This can be ignored by backends which do not need it for their Split DWARF support.
+ pub split_dwarf_file: Option<PathBuf>,
+}
+
+impl TargetMachineFactoryConfig {
+ pub fn new(
+ cgcx: &CodegenContext<impl WriteBackendMethods>,
+ module_name: &str,
+ ) -> TargetMachineFactoryConfig {
+ let split_dwarf_file = if cgcx.target_can_use_split_dwarf {
+ cgcx.output_filenames.split_dwarf_path(
+ cgcx.split_debuginfo,
+ cgcx.split_dwarf_kind,
+ Some(module_name),
+ )
+ } else {
+ None
+ };
+ TargetMachineFactoryConfig { split_dwarf_file }
+ }
+}
+
+pub type TargetMachineFactoryFn<B> = Arc<
+ dyn Fn(TargetMachineFactoryConfig) -> Result<<B as WriteBackendMethods>::TargetMachine, String>
+ + Send
+ + Sync,
+>;
+
+pub type ExportedSymbols = FxHashMap<CrateNum, Arc<Vec<(String, SymbolExportInfo)>>>;
+
+/// Additional resources used by optimize_and_codegen (not module specific)
+#[derive(Clone)]
+pub struct CodegenContext<B: WriteBackendMethods> {
+ // Resources needed when running LTO
+ pub backend: B,
+ pub prof: SelfProfilerRef,
+ pub lto: Lto,
+ pub save_temps: bool,
+ pub fewer_names: bool,
+ pub time_trace: bool,
+ pub exported_symbols: Option<Arc<ExportedSymbols>>,
+ pub opts: Arc<config::Options>,
+ pub crate_types: Vec<CrateType>,
+ pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>,
+ pub output_filenames: Arc<OutputFilenames>,
+ pub regular_module_config: Arc<ModuleConfig>,
+ pub metadata_module_config: Arc<ModuleConfig>,
+ pub allocator_module_config: Arc<ModuleConfig>,
+ pub tm_factory: TargetMachineFactoryFn<B>,
+ pub msvc_imps_needed: bool,
+ pub is_pe_coff: bool,
+ pub target_can_use_split_dwarf: bool,
+ pub target_pointer_width: u32,
+ pub target_arch: String,
+ pub debuginfo: config::DebugInfo,
+ pub split_debuginfo: rustc_target::spec::SplitDebuginfo,
+ pub split_dwarf_kind: rustc_session::config::SplitDwarfKind,
+
+ // Number of cgus excluding the allocator/metadata modules
+ pub total_cgus: usize,
+ // Handler to use for diagnostics produced during codegen.
+ pub diag_emitter: SharedEmitter,
+ // LLVM optimizations for which we want to print remarks.
+ pub remark: Passes,
+ // Worker thread number
+ pub worker: usize,
+ // The incremental compilation session directory, or None if we are not
+ // compiling incrementally
+ pub incr_comp_session_dir: Option<PathBuf>,
+ // Used to update CGU re-use information during the thinlto phase.
+ pub cgu_reuse_tracker: CguReuseTracker,
+ // Channel back to the main control thread to send messages to
+ pub coordinator_send: Sender<Box<dyn Any + Send>>,
+}
+
+impl<B: WriteBackendMethods> CodegenContext<B> {
+ pub fn create_diag_handler(&self) -> Handler {
+ Handler::with_emitter(true, None, Box::new(self.diag_emitter.clone()))
+ }
+
+ pub fn config(&self, kind: ModuleKind) -> &ModuleConfig {
+ match kind {
+ ModuleKind::Regular => &self.regular_module_config,
+ ModuleKind::Metadata => &self.metadata_module_config,
+ ModuleKind::Allocator => &self.allocator_module_config,
+ }
+ }
+}
+
+fn generate_lto_work<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ needs_fat_lto: Vec<FatLTOInput<B>>,
+ needs_thin_lto: Vec<(String, B::ThinBuffer)>,
+ import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>,
+) -> Vec<(WorkItem<B>, u64)> {
+ let _prof_timer = cgcx.prof.generic_activity("codegen_generate_lto_work");
+
+ let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
+ assert!(needs_thin_lto.is_empty());
+ let lto_module =
+ B::run_fat_lto(cgcx, needs_fat_lto, import_only_modules).unwrap_or_else(|e| e.raise());
+ (vec![lto_module], vec![])
+ } else {
+ assert!(needs_fat_lto.is_empty());
+ B::run_thin_lto(cgcx, needs_thin_lto, import_only_modules).unwrap_or_else(|e| e.raise())
+ };
+
+ lto_modules
+ .into_iter()
+ .map(|module| {
+ let cost = module.cost();
+ (WorkItem::LTO(module), cost)
+ })
+ .chain(copy_jobs.into_iter().map(|wp| {
+ (
+ WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen {
+ name: wp.cgu_name.clone(),
+ source: wp,
+ }),
+ 0,
+ )
+ }))
+ .collect()
+}
+
+pub struct CompiledModules {
+ pub modules: Vec<CompiledModule>,
+ pub allocator_module: Option<CompiledModule>,
+}
+
+fn need_bitcode_in_object(sess: &Session) -> bool {
+ let requested_for_rlib = sess.opts.cg.embed_bitcode
+ && sess.crate_types().contains(&CrateType::Rlib)
+ && sess.opts.output_types.contains_key(&OutputType::Exe);
+ let forced_by_target = sess.target.forces_embed_bitcode;
+ requested_for_rlib || forced_by_target
+}
+
+fn need_pre_lto_bitcode_for_incr_comp(sess: &Session) -> bool {
+ if sess.opts.incremental.is_none() {
+ return false;
+ }
+
+ match sess.lto() {
+ Lto::No => false,
+ Lto::Fat | Lto::Thin | Lto::ThinLocal => true,
+ }
+}
+
+pub fn start_async_codegen<B: ExtraBackendMethods>(
+ backend: B,
+ tcx: TyCtxt<'_>,
+ target_cpu: String,
+ metadata: EncodedMetadata,
+ metadata_module: Option<CompiledModule>,
+ total_cgus: usize,
+) -> OngoingCodegen<B> {
+ let (coordinator_send, coordinator_receive) = channel();
+ let sess = tcx.sess;
+
+ let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
+ let no_builtins = tcx.sess.contains_name(crate_attrs, sym::no_builtins);
+ let is_compiler_builtins = tcx.sess.contains_name(crate_attrs, sym::compiler_builtins);
+
+ let crate_info = CrateInfo::new(tcx, target_cpu);
+
+ let regular_config =
+ ModuleConfig::new(ModuleKind::Regular, sess, no_builtins, is_compiler_builtins);
+ let metadata_config =
+ ModuleConfig::new(ModuleKind::Metadata, sess, no_builtins, is_compiler_builtins);
+ let allocator_config =
+ ModuleConfig::new(ModuleKind::Allocator, sess, no_builtins, is_compiler_builtins);
+
+ let (shared_emitter, shared_emitter_main) = SharedEmitter::new();
+ let (codegen_worker_send, codegen_worker_receive) = channel();
+
+ let coordinator_thread = start_executing_work(
+ backend.clone(),
+ tcx,
+ &crate_info,
+ shared_emitter,
+ codegen_worker_send,
+ coordinator_receive,
+ total_cgus,
+ sess.jobserver.clone(),
+ Arc::new(regular_config),
+ Arc::new(metadata_config),
+ Arc::new(allocator_config),
+ coordinator_send.clone(),
+ );
+
+ OngoingCodegen {
+ backend,
+ metadata,
+ metadata_module,
+ crate_info,
+
+ codegen_worker_receive,
+ shared_emitter_main,
+ coordinator: Coordinator {
+ sender: coordinator_send,
+ future: Some(coordinator_thread),
+ phantom: PhantomData,
+ },
+ output_filenames: tcx.output_filenames(()).clone(),
+ }
+}
+
+fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
+ sess: &Session,
+ compiled_modules: &CompiledModules,
+) -> FxHashMap<WorkProductId, WorkProduct> {
+ let mut work_products = FxHashMap::default();
+
+ if sess.opts.incremental.is_none() {
+ return work_products;
+ }
+
+ let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
+
+ for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
+ let mut files = Vec::new();
+ if let Some(object_file_path) = &module.object {
+ files.push(("o", object_file_path.as_path()));
+ }
+ if let Some(dwarf_object_file_path) = &module.dwarf_object {
+ files.push(("dwo", dwarf_object_file_path.as_path()));
+ }
+
+ if let Some((id, product)) =
+ copy_cgu_workproduct_to_incr_comp_cache_dir(sess, &module.name, files.as_slice())
+ {
+ work_products.insert(id, product);
+ }
+ }
+
+ work_products
+}
+
+fn produce_final_output_artifacts(
+ sess: &Session,
+ compiled_modules: &CompiledModules,
+ crate_output: &OutputFilenames,
+) {
+ let mut user_wants_bitcode = false;
+ let mut user_wants_objects = false;
+
+ // Produce final compile outputs.
+ let copy_gracefully = |from: &Path, to: &Path| {
+ if let Err(e) = fs::copy(from, to) {
+ sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e));
+ }
+ };
+
+ let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| {
+ if compiled_modules.modules.len() == 1 {
+ // 1) Only one codegen unit. In this case it's no difficulty
+ // to copy `foo.0.x` to `foo.x`.
+ let module_name = Some(&compiled_modules.modules[0].name[..]);
+ let path = crate_output.temp_path(output_type, module_name);
+ copy_gracefully(&path, &crate_output.path(output_type));
+ if !sess.opts.cg.save_temps && !keep_numbered {
+ // The user just wants `foo.x`, not `foo.#module-name#.x`.
+ ensure_removed(sess.diagnostic(), &path);
+ }
+ } else {
+ let ext = crate_output
+ .temp_path(output_type, None)
+ .extension()
+ .unwrap()
+ .to_str()
+ .unwrap()
+ .to_owned();
+
+ if crate_output.outputs.contains_key(&output_type) {
+ // 2) Multiple codegen units, with `--emit foo=some_name`. We have
+ // no good solution for this case, so warn the user.
+ sess.warn(&format!(
+ "ignoring emit path because multiple .{} files \
+ were produced",
+ ext
+ ));
+ } else if crate_output.single_output_file.is_some() {
+ // 3) Multiple codegen units, with `-o some_name`. We have
+ // no good solution for this case, so warn the user.
+ sess.warn(&format!(
+ "ignoring -o because multiple .{} files \
+ were produced",
+ ext
+ ));
+ } else {
+ // 4) Multiple codegen units, but no explicit name. We
+ // just leave the `foo.0.x` files in place.
+ // (We don't have to do any work in this case.)
+ }
+ }
+ };
+
+ // Flag to indicate whether the user explicitly requested bitcode.
+ // Otherwise, we produced it only as a temporary output, and will need
+ // to get rid of it.
+ for output_type in crate_output.outputs.keys() {
+ match *output_type {
+ OutputType::Bitcode => {
+ user_wants_bitcode = true;
+ // Copy to .bc, but always keep the .0.bc. There is a later
+ // check to figure out if we should delete .0.bc files, or keep
+ // them for making an rlib.
+ copy_if_one_unit(OutputType::Bitcode, true);
+ }
+ OutputType::LlvmAssembly => {
+ copy_if_one_unit(OutputType::LlvmAssembly, false);
+ }
+ OutputType::Assembly => {
+ copy_if_one_unit(OutputType::Assembly, false);
+ }
+ OutputType::Object => {
+ user_wants_objects = true;
+ copy_if_one_unit(OutputType::Object, true);
+ }
+ OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {}
+ }
+ }
+
+ // Clean up unwanted temporary files.
+
+ // We create the following files by default:
+ // - #crate#.#module-name#.bc
+ // - #crate#.#module-name#.o
+ // - #crate#.crate.metadata.bc
+ // - #crate#.crate.metadata.o
+ // - #crate#.o (linked from crate.##.o)
+ // - #crate#.bc (copied from crate.##.bc)
+ // We may create additional files if requested by the user (through
+ // `-C save-temps` or `--emit=` flags).
+
+ if !sess.opts.cg.save_temps {
+ // Remove the temporary .#module-name#.o objects. If the user didn't
+ // explicitly request bitcode (with --emit=bc), and the bitcode is not
+ // needed for building an rlib, then we must remove .#module-name#.bc as
+ // well.
+
+ // Specific rules for keeping .#module-name#.bc:
+ // - If the user requested bitcode (`user_wants_bitcode`), and
+ // codegen_units > 1, then keep it.
+ // - If the user requested bitcode but codegen_units == 1, then we
+ // can toss .#module-name#.bc because we copied it to .bc earlier.
+ // - If we're not building an rlib and the user didn't request
+ // bitcode, then delete .#module-name#.bc.
+ // If you change how this works, also update back::link::link_rlib,
+ // where .#module-name#.bc files are (maybe) deleted after making an
+ // rlib.
+ let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe);
+
+ let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1;
+
+ let keep_numbered_objects =
+ needs_crate_object || (user_wants_objects && sess.codegen_units() > 1);
+
+ for module in compiled_modules.modules.iter() {
+ if let Some(ref path) = module.object {
+ if !keep_numbered_objects {
+ ensure_removed(sess.diagnostic(), path);
+ }
+ }
+
+ if let Some(ref path) = module.dwarf_object {
+ if !keep_numbered_objects {
+ ensure_removed(sess.diagnostic(), path);
+ }
+ }
+
+ if let Some(ref path) = module.bytecode {
+ if !keep_numbered_bitcode {
+ ensure_removed(sess.diagnostic(), path);
+ }
+ }
+ }
+
+ if !user_wants_bitcode {
+ if let Some(ref allocator_module) = compiled_modules.allocator_module {
+ if let Some(ref path) = allocator_module.bytecode {
+ ensure_removed(sess.diagnostic(), path);
+ }
+ }
+ }
+ }
+
+ // We leave the following files around by default:
+ // - #crate#.o
+ // - #crate#.crate.metadata.o
+ // - #crate#.bc
+ // These are used in linking steps and will be cleaned up afterward.
+}
+
+pub enum WorkItem<B: WriteBackendMethods> {
+ /// Optimize a newly codegened, totally unoptimized module.
+ Optimize(ModuleCodegen<B::Module>),
+ /// Copy the post-LTO artifacts from the incremental cache to the output
+ /// directory.
+ CopyPostLtoArtifacts(CachedModuleCodegen),
+ /// Performs (Thin)LTO on the given module.
+ LTO(lto::LtoModuleCodegen<B>),
+}
+
+impl<B: WriteBackendMethods> WorkItem<B> {
+ pub fn module_kind(&self) -> ModuleKind {
+ match *self {
+ WorkItem::Optimize(ref m) => m.kind,
+ WorkItem::CopyPostLtoArtifacts(_) | WorkItem::LTO(_) => ModuleKind::Regular,
+ }
+ }
+
+ fn start_profiling<'a>(&self, cgcx: &'a CodegenContext<B>) -> TimingGuard<'a> {
+ match *self {
+ WorkItem::Optimize(ref m) => {
+ cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*m.name)
+ }
+ WorkItem::CopyPostLtoArtifacts(ref m) => cgcx
+ .prof
+ .generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*m.name),
+ WorkItem::LTO(ref m) => {
+ cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", m.name())
+ }
+ }
+ }
+
+ /// Generate a short description of this work item suitable for use as a thread name.
+ fn short_description(&self) -> String {
+ // `pthread_setname()` on *nix is limited to 15 characters and longer names are ignored.
+ // Use very short descriptions in this case to maximize the space available for the module name.
+ // Windows does not have that limitation so use slightly more descriptive names there.
+ match self {
+ WorkItem::Optimize(m) => {
+ #[cfg(windows)]
+ return format!("optimize module {}", m.name);
+ #[cfg(not(windows))]
+ return format!("opt {}", m.name);
+ }
+ WorkItem::CopyPostLtoArtifacts(m) => {
+ #[cfg(windows)]
+ return format!("copy LTO artifacts for {}", m.name);
+ #[cfg(not(windows))]
+ return format!("copy {}", m.name);
+ }
+ WorkItem::LTO(m) => {
+ #[cfg(windows)]
+ return format!("LTO module {}", m.name());
+ #[cfg(not(windows))]
+ return format!("LTO {}", m.name());
+ }
+ }
+ }
+}
+
+enum WorkItemResult<B: WriteBackendMethods> {
+ Compiled(CompiledModule),
+ NeedsLink(ModuleCodegen<B::Module>),
+ NeedsFatLTO(FatLTOInput<B>),
+ NeedsThinLTO(String, B::ThinBuffer),
+}
+
+pub enum FatLTOInput<B: WriteBackendMethods> {
+ Serialized { name: String, buffer: B::ModuleBuffer },
+ InMemory(ModuleCodegen<B::Module>),
+}
+
+fn execute_work_item<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ work_item: WorkItem<B>,
+) -> Result<WorkItemResult<B>, FatalError> {
+ let module_config = cgcx.config(work_item.module_kind());
+
+ match work_item {
+ WorkItem::Optimize(module) => execute_optimize_work_item(cgcx, module, module_config),
+ WorkItem::CopyPostLtoArtifacts(module) => {
+ Ok(execute_copy_from_cache_work_item(cgcx, module, module_config))
+ }
+ WorkItem::LTO(module) => execute_lto_work_item(cgcx, module, module_config),
+ }
+}
+
+// Actual LTO type we end up choosing based on multiple factors.
+pub enum ComputedLtoType {
+ No,
+ Thin,
+ Fat,
+}
+
+pub fn compute_per_cgu_lto_type(
+ sess_lto: &Lto,
+ opts: &config::Options,
+ sess_crate_types: &[CrateType],
+ module_kind: ModuleKind,
+) -> ComputedLtoType {
+ // Metadata modules never participate in LTO regardless of the lto
+ // settings.
+ if module_kind == ModuleKind::Metadata {
+ return ComputedLtoType::No;
+ }
+
+ // If the linker does LTO, we don't have to do it. Note that we
+ // keep doing full LTO, if it is requested, as not to break the
+ // assumption that the output will be a single module.
+ let linker_does_lto = opts.cg.linker_plugin_lto.enabled();
+
+ // When we're automatically doing ThinLTO for multi-codegen-unit
+ // builds we don't actually want to LTO the allocator modules if
+ // it shows up. This is due to various linker shenanigans that
+ // we'll encounter later.
+ let is_allocator = module_kind == ModuleKind::Allocator;
+
+ // We ignore a request for full crate graph LTO if the crate type
+ // is only an rlib, as there is no full crate graph to process,
+ // that'll happen later.
+ //
+ // This use case currently comes up primarily for targets that
+ // require LTO so the request for LTO is always unconditionally
+ // passed down to the backend, but we don't actually want to do
+ // anything about it yet until we've got a final product.
+ let is_rlib = sess_crate_types.len() == 1 && sess_crate_types[0] == CrateType::Rlib;
+
+ match sess_lto {
+ Lto::ThinLocal if !linker_does_lto && !is_allocator => ComputedLtoType::Thin,
+ Lto::Thin if !linker_does_lto && !is_rlib => ComputedLtoType::Thin,
+ Lto::Fat if !is_rlib => ComputedLtoType::Fat,
+ _ => ComputedLtoType::No,
+ }
+}
+
+fn execute_optimize_work_item<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ module: ModuleCodegen<B::Module>,
+ module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+
+ unsafe {
+ B::optimize(cgcx, &diag_handler, &module, module_config)?;
+ }
+
+ // After we've done the initial round of optimizations we need to
+ // decide whether to synchronously codegen this module or ship it
+ // back to the coordinator thread for further LTO processing (which
+ // has to wait for all the initial modules to be optimized).
+
+ let lto_type = compute_per_cgu_lto_type(&cgcx.lto, &cgcx.opts, &cgcx.crate_types, module.kind);
+
+ // If we're doing some form of incremental LTO then we need to be sure to
+ // save our module to disk first.
+ let bitcode = if cgcx.config(module.kind).emit_pre_lto_bc {
+ let filename = pre_lto_bitcode_filename(&module.name);
+ cgcx.incr_comp_session_dir.as_ref().map(|path| path.join(&filename))
+ } else {
+ None
+ };
+
+ match lto_type {
+ ComputedLtoType::No => finish_intra_module_work(cgcx, module, module_config),
+ ComputedLtoType::Thin => {
+ let (name, thin_buffer) = B::prepare_thin(module);
+ if let Some(path) = bitcode {
+ fs::write(&path, thin_buffer.data()).unwrap_or_else(|e| {
+ panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
+ });
+ }
+ Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))
+ }
+ ComputedLtoType::Fat => match bitcode {
+ Some(path) => {
+ let (name, buffer) = B::serialize_module(module);
+ fs::write(&path, buffer.data()).unwrap_or_else(|e| {
+ panic!("Error writing pre-lto-bitcode file `{}`: {}", path.display(), e);
+ });
+ Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::Serialized { name, buffer }))
+ }
+ None => Ok(WorkItemResult::NeedsFatLTO(FatLTOInput::InMemory(module))),
+ },
+ }
+}
+
+fn execute_copy_from_cache_work_item<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ module: CachedModuleCodegen,
+ module_config: &ModuleConfig,
+) -> WorkItemResult<B> {
+ assert!(module_config.emit_obj != EmitObj::None);
+
+ let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
+
+ let load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
+ let source_file = in_incr_comp_dir(&incr_comp_session_dir, saved_path);
+ debug!(
+ "copying pre-existing module `{}` from {:?} to {}",
+ module.name,
+ source_file,
+ output_path.display()
+ );
+ match link_or_copy(&source_file, &output_path) {
+ Ok(_) => Some(output_path),
+ Err(err) => {
+ let diag_handler = cgcx.create_diag_handler();
+ diag_handler.err(&format!(
+ "unable to copy {} to {}: {}",
+ source_file.display(),
+ output_path.display(),
+ err
+ ));
+ None
+ }
+ }
+ };
+
+ let object = load_from_incr_comp_dir(
+ cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name)),
+ &module.source.saved_files.get("o").expect("no saved object file in work product"),
+ );
+ let dwarf_object =
+ module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
+ let dwarf_obj_out = cgcx
+ .output_filenames
+ .split_dwarf_path(cgcx.split_debuginfo, cgcx.split_dwarf_kind, Some(&module.name))
+ .expect(
+ "saved dwarf object in work product but `split_dwarf_path` returned `None`",
+ );
+ load_from_incr_comp_dir(dwarf_obj_out, &saved_dwarf_object_file)
+ });
+
+ WorkItemResult::Compiled(CompiledModule {
+ name: module.name,
+ kind: ModuleKind::Regular,
+ object,
+ dwarf_object,
+ bytecode: None,
+ })
+}
+
+fn execute_lto_work_item<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ module: lto::LtoModuleCodegen<B>,
+ module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+ let module = unsafe { module.optimize(cgcx)? };
+ finish_intra_module_work(cgcx, module, module_config)
+}
+
+fn finish_intra_module_work<B: ExtraBackendMethods>(
+ cgcx: &CodegenContext<B>,
+ module: ModuleCodegen<B::Module>,
+ module_config: &ModuleConfig,
+) -> Result<WorkItemResult<B>, FatalError> {
+ let diag_handler = cgcx.create_diag_handler();
+
+ if !cgcx.opts.unstable_opts.combine_cgu
+ || module.kind == ModuleKind::Metadata
+ || module.kind == ModuleKind::Allocator
+ {
+ let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config)? };
+ Ok(WorkItemResult::Compiled(module))
+ } else {
+ Ok(WorkItemResult::NeedsLink(module))
+ }
+}
+
+pub enum Message<B: WriteBackendMethods> {
+ Token(io::Result<Acquired>),
+ NeedsFatLTO {
+ result: FatLTOInput<B>,
+ worker_id: usize,
+ },
+ NeedsThinLTO {
+ name: String,
+ thin_buffer: B::ThinBuffer,
+ worker_id: usize,
+ },
+ NeedsLink {
+ module: ModuleCodegen<B::Module>,
+ worker_id: usize,
+ },
+ Done {
+ result: Result<CompiledModule, Option<WorkerFatalError>>,
+ worker_id: usize,
+ },
+ CodegenDone {
+ llvm_work_item: WorkItem<B>,
+ cost: u64,
+ },
+ AddImportOnlyModule {
+ module_data: SerializedModule<B::ModuleBuffer>,
+ work_product: WorkProduct,
+ },
+ CodegenComplete,
+ CodegenItem,
+ CodegenAborted,
+}
+
+struct Diagnostic {
+ msg: String,
+ code: Option<DiagnosticId>,
+ lvl: Level,
+}
+
+#[derive(PartialEq, Clone, Copy, Debug)]
+enum MainThreadWorkerState {
+ Idle,
+ Codegenning,
+ LLVMing,
+}
+
+fn start_executing_work<B: ExtraBackendMethods>(
+ backend: B,
+ tcx: TyCtxt<'_>,
+ crate_info: &CrateInfo,
+ shared_emitter: SharedEmitter,
+ codegen_worker_send: Sender<Message<B>>,
+ coordinator_receive: Receiver<Box<dyn Any + Send>>,
+ total_cgus: usize,
+ jobserver: Client,
+ regular_config: Arc<ModuleConfig>,
+ metadata_config: Arc<ModuleConfig>,
+ allocator_config: Arc<ModuleConfig>,
+ tx_to_llvm_workers: Sender<Box<dyn Any + Send>>,
+) -> thread::JoinHandle<Result<CompiledModules, ()>> {
+ let coordinator_send = tx_to_llvm_workers;
+ let sess = tcx.sess;
+
+ // Compute the set of symbols we need to retain when doing LTO (if we need to)
+ let exported_symbols = {
+ let mut exported_symbols = FxHashMap::default();
+
+ let copy_symbols = |cnum| {
+ let symbols = tcx
+ .exported_symbols(cnum)
+ .iter()
+ .map(|&(s, lvl)| (symbol_name_for_instance_in_crate(tcx, s, cnum), lvl))
+ .collect();
+ Arc::new(symbols)
+ };
+
+ match sess.lto() {
+ Lto::No => None,
+ Lto::ThinLocal => {
+ exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
+ Some(Arc::new(exported_symbols))
+ }
+ Lto::Fat | Lto::Thin => {
+ exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE));
+ for &cnum in tcx.crates(()).iter() {
+ exported_symbols.insert(cnum, copy_symbols(cnum));
+ }
+ Some(Arc::new(exported_symbols))
+ }
+ }
+ };
+
+ // First up, convert our jobserver into a helper thread so we can use normal
+ // mpsc channels to manage our messages and such.
+ // After we've requested tokens then we'll, when we can,
+ // get tokens on `coordinator_receive` which will
+ // get managed in the main loop below.
+ let coordinator_send2 = coordinator_send.clone();
+ let helper = jobserver
+ .into_helper_thread(move |token| {
+ drop(coordinator_send2.send(Box::new(Message::Token::<B>(token))));
+ })
+ .expect("failed to spawn helper thread");
+
+ let mut each_linked_rlib_for_lto = Vec::new();
+ drop(link::each_linked_rlib(crate_info, &mut |cnum, path| {
+ if link::ignored_for_lto(sess, crate_info, cnum) {
+ return;
+ }
+ each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
+ }));
+
+ let ol =
+ if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
+ // If we know that we won’t be doing codegen, create target machines without optimisation.
+ config::OptLevel::No
+ } else {
+ tcx.backend_optimization_level(())
+ };
+ let backend_features = tcx.global_backend_features(());
+ let cgcx = CodegenContext::<B> {
+ backend: backend.clone(),
+ crate_types: sess.crate_types().to_vec(),
+ each_linked_rlib_for_lto,
+ lto: sess.lto(),
+ fewer_names: sess.fewer_names(),
+ save_temps: sess.opts.cg.save_temps,
+ time_trace: sess.opts.unstable_opts.llvm_time_trace,
+ opts: Arc::new(sess.opts.clone()),
+ prof: sess.prof.clone(),
+ exported_symbols,
+ remark: sess.opts.cg.remark.clone(),
+ worker: 0,
+ incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()),
+ cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(),
+ coordinator_send,
+ diag_emitter: shared_emitter.clone(),
+ output_filenames: tcx.output_filenames(()).clone(),
+ regular_module_config: regular_config,
+ metadata_module_config: metadata_config,
+ allocator_module_config: allocator_config,
+ tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features),
+ total_cgus,
+ msvc_imps_needed: msvc_imps_needed(tcx),
+ is_pe_coff: tcx.sess.target.is_like_windows,
+ target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(),
+ target_pointer_width: tcx.sess.target.pointer_width,
+ target_arch: tcx.sess.target.arch.to_string(),
+ debuginfo: tcx.sess.opts.debuginfo,
+ split_debuginfo: tcx.sess.split_debuginfo(),
+ split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
+ };
+
+ // This is the "main loop" of parallel work happening for parallel codegen.
+ // It's here that we manage parallelism, schedule work, and work with
+ // messages coming from clients.
+ //
+ // There are a few environmental pre-conditions that shape how the system
+ // is set up:
+ //
+ // - Error reporting only can happen on the main thread because that's the
+ // only place where we have access to the compiler `Session`.
+ // - LLVM work can be done on any thread.
+ // - Codegen can only happen on the main thread.
+ // - Each thread doing substantial work must be in possession of a `Token`
+ // from the `Jobserver`.
+ // - The compiler process always holds one `Token`. Any additional `Tokens`
+ // have to be requested from the `Jobserver`.
+ //
+ // Error Reporting
+ // ===============
+ // The error reporting restriction is handled separately from the rest: We
+ // set up a `SharedEmitter` the holds an open channel to the main thread.
+ // When an error occurs on any thread, the shared emitter will send the
+ // error message to the receiver main thread (`SharedEmitterMain`). The
+ // main thread will periodically query this error message queue and emit
+ // any error messages it has received. It might even abort compilation if
+ // has received a fatal error. In this case we rely on all other threads
+ // being torn down automatically with the main thread.
+ // Since the main thread will often be busy doing codegen work, error
+ // reporting will be somewhat delayed, since the message queue can only be
+ // checked in between to work packages.
+ //
+ // Work Processing Infrastructure
+ // ==============================
+ // The work processing infrastructure knows three major actors:
+ //
+ // - the coordinator thread,
+ // - the main thread, and
+ // - LLVM worker threads
+ //
+ // The coordinator thread is running a message loop. It instructs the main
+ // thread about what work to do when, and it will spawn off LLVM worker
+ // threads as open LLVM WorkItems become available.
+ //
+ // The job of the main thread is to codegen CGUs into LLVM work package
+ // (since the main thread is the only thread that can do this). The main
+ // thread will block until it receives a message from the coordinator, upon
+ // which it will codegen one CGU, send it to the coordinator and block
+ // again. This way the coordinator can control what the main thread is
+ // doing.
+ //
+ // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is
+ // available, it will spawn off a new LLVM worker thread and let it process
+ // that a WorkItem. When a LLVM worker thread is done with its WorkItem,
+ // it will just shut down, which also frees all resources associated with
+ // the given LLVM module, and sends a message to the coordinator that the
+ // has been completed.
+ //
+ // Work Scheduling
+ // ===============
+ // The scheduler's goal is to minimize the time it takes to complete all
+ // work there is, however, we also want to keep memory consumption low
+ // if possible. These two goals are at odds with each other: If memory
+ // consumption were not an issue, we could just let the main thread produce
+ // LLVM WorkItems at full speed, assuring maximal utilization of
+ // Tokens/LLVM worker threads. However, since codegen is usually faster
+ // than LLVM processing, the queue of LLVM WorkItems would fill up and each
+ // WorkItem potentially holds on to a substantial amount of memory.
+ //
+ // So the actual goal is to always produce just enough LLVM WorkItems as
+ // not to starve our LLVM worker threads. That means, once we have enough
+ // WorkItems in our queue, we can block the main thread, so it does not
+ // produce more until we need them.
+ //
+ // Doing LLVM Work on the Main Thread
+ // ----------------------------------
+ // Since the main thread owns the compiler processes implicit `Token`, it is
+ // wasteful to keep it blocked without doing any work. Therefore, what we do
+ // in this case is: We spawn off an additional LLVM worker thread that helps
+ // reduce the queue. The work it is doing corresponds to the implicit
+ // `Token`. The coordinator will mark the main thread as being busy with
+ // LLVM work. (The actual work happens on another OS thread but we just care
+ // about `Tokens`, not actual threads).
+ //
+ // When any LLVM worker thread finishes while the main thread is marked as
+ // "busy with LLVM work", we can do a little switcheroo: We give the Token
+ // of the just finished thread to the LLVM worker thread that is working on
+ // behalf of the main thread's implicit Token, thus freeing up the main
+ // thread again. The coordinator can then again decide what the main thread
+ // should do. This allows the coordinator to make decisions at more points
+ // in time.
+ //
+ // Striking a Balance between Throughput and Memory Consumption
+ // ------------------------------------------------------------
+ // Since our two goals, (1) use as many Tokens as possible and (2) keep
+ // memory consumption as low as possible, are in conflict with each other,
+ // we have to find a trade off between them. Right now, the goal is to keep
+ // all workers busy, which means that no worker should find the queue empty
+ // when it is ready to start.
+ // How do we do achieve this? Good question :) We actually never know how
+ // many `Tokens` are potentially available so it's hard to say how much to
+ // fill up the queue before switching the main thread to LLVM work. Also we
+ // currently don't have a means to estimate how long a running LLVM worker
+ // will still be busy with it's current WorkItem. However, we know the
+ // maximal count of available Tokens that makes sense (=the number of CPU
+ // cores), so we can take a conservative guess. The heuristic we use here
+ // is implemented in the `queue_full_enough()` function.
+ //
+ // Some Background on Jobservers
+ // -----------------------------
+ // It's worth also touching on the management of parallelism here. We don't
+ // want to just spawn a thread per work item because while that's optimal
+ // parallelism it may overload a system with too many threads or violate our
+ // configuration for the maximum amount of cpu to use for this process. To
+ // manage this we use the `jobserver` crate.
+ //
+ // Job servers are an artifact of GNU make and are used to manage
+ // parallelism between processes. A jobserver is a glorified IPC semaphore
+ // basically. Whenever we want to run some work we acquire the semaphore,
+ // and whenever we're done with that work we release the semaphore. In this
+ // manner we can ensure that the maximum number of parallel workers is
+ // capped at any one point in time.
+ //
+ // LTO and the coordinator thread
+ // ------------------------------
+ //
+ // The final job the coordinator thread is responsible for is managing LTO
+ // and how that works. When LTO is requested what we'll to is collect all
+ // optimized LLVM modules into a local vector on the coordinator. Once all
+ // modules have been codegened and optimized we hand this to the `lto`
+ // module for further optimization. The `lto` module will return back a list
+ // of more modules to work on, which the coordinator will continue to spawn
+ // work for.
+ //
+ // Each LLVM module is automatically sent back to the coordinator for LTO if
+ // necessary. There's already optimizations in place to avoid sending work
+ // back to the coordinator if LTO isn't requested.
+ return B::spawn_thread(cgcx.time_trace, move || {
+ let mut worker_id_counter = 0;
+ let mut free_worker_ids = Vec::new();
+ let mut get_worker_id = |free_worker_ids: &mut Vec<usize>| {
+ if let Some(id) = free_worker_ids.pop() {
+ id
+ } else {
+ let id = worker_id_counter;
+ worker_id_counter += 1;
+ id
+ }
+ };
+
+ // This is where we collect codegen units that have gone all the way
+ // through codegen and LLVM.
+ let mut compiled_modules = vec![];
+ let mut compiled_allocator_module = None;
+ let mut needs_link = Vec::new();
+ let mut needs_fat_lto = Vec::new();
+ let mut needs_thin_lto = Vec::new();
+ let mut lto_import_only_modules = Vec::new();
+ let mut started_lto = false;
+ let mut codegen_aborted = false;
+
+ // This flag tracks whether all items have gone through codegens
+ let mut codegen_done = false;
+
+ // This is the queue of LLVM work items that still need processing.
+ let mut work_items = Vec::<(WorkItem<B>, u64)>::new();
+
+ // This are the Jobserver Tokens we currently hold. Does not include
+ // the implicit Token the compiler process owns no matter what.
+ let mut tokens = Vec::new();
+
+ let mut main_thread_worker_state = MainThreadWorkerState::Idle;
+ let mut running = 0;
+
+ let prof = &cgcx.prof;
+ let mut llvm_start_time: Option<VerboseTimingGuard<'_>> = None;
+
+ // Run the message loop while there's still anything that needs message
+ // processing. Note that as soon as codegen is aborted we simply want to
+ // wait for all existing work to finish, so many of the conditions here
+ // only apply if codegen hasn't been aborted as they represent pending
+ // work to be done.
+ while !codegen_done
+ || running > 0
+ || main_thread_worker_state == MainThreadWorkerState::LLVMing
+ || (!codegen_aborted
+ && !(work_items.is_empty()
+ && needs_fat_lto.is_empty()
+ && needs_thin_lto.is_empty()
+ && lto_import_only_modules.is_empty()
+ && main_thread_worker_state == MainThreadWorkerState::Idle))
+ {
+ // While there are still CGUs to be codegened, the coordinator has
+ // to decide how to utilize the compiler processes implicit Token:
+ // For codegenning more CGU or for running them through LLVM.
+ if !codegen_done {
+ if main_thread_worker_state == MainThreadWorkerState::Idle {
+ // Compute the number of workers that will be running once we've taken as many
+ // items from the work queue as we can, plus one for the main thread. It's not
+ // critically important that we use this instead of just `running`, but it
+ // prevents the `queue_full_enough` heuristic from fluctuating just because a
+ // worker finished up and we decreased the `running` count, even though we're
+ // just going to increase it right after this when we put a new worker to work.
+ let extra_tokens = tokens.len().checked_sub(running).unwrap();
+ let additional_running = std::cmp::min(extra_tokens, work_items.len());
+ let anticipated_running = running + additional_running + 1;
+
+ if !queue_full_enough(work_items.len(), anticipated_running) {
+ // The queue is not full enough, codegen more items:
+ if codegen_worker_send.send(Message::CodegenItem).is_err() {
+ panic!("Could not send Message::CodegenItem to main thread")
+ }
+ main_thread_worker_state = MainThreadWorkerState::Codegenning;
+ } else {
+ // The queue is full enough to not let the worker
+ // threads starve. Use the implicit Token to do some
+ // LLVM work too.
+ let (item, _) =
+ work_items.pop().expect("queue empty - queue_full_enough() broken?");
+ let cgcx = CodegenContext {
+ worker: get_worker_id(&mut free_worker_ids),
+ ..cgcx.clone()
+ };
+ maybe_start_llvm_timer(
+ prof,
+ cgcx.config(item.module_kind()),
+ &mut llvm_start_time,
+ );
+ main_thread_worker_state = MainThreadWorkerState::LLVMing;
+ spawn_work(cgcx, item);
+ }
+ }
+ } else if codegen_aborted {
+ // don't queue up any more work if codegen was aborted, we're
+ // just waiting for our existing children to finish
+ } else {
+ // If we've finished everything related to normal codegen
+ // then it must be the case that we've got some LTO work to do.
+ // Perform the serial work here of figuring out what we're
+ // going to LTO and then push a bunch of work items onto our
+ // queue to do LTO
+ if work_items.is_empty()
+ && running == 0
+ && main_thread_worker_state == MainThreadWorkerState::Idle
+ {
+ assert!(!started_lto);
+ started_lto = true;
+
+ let needs_fat_lto = mem::take(&mut needs_fat_lto);
+ let needs_thin_lto = mem::take(&mut needs_thin_lto);
+ let import_only_modules = mem::take(&mut lto_import_only_modules);
+
+ for (work, cost) in
+ generate_lto_work(&cgcx, needs_fat_lto, needs_thin_lto, import_only_modules)
+ {
+ let insertion_index = work_items
+ .binary_search_by_key(&cost, |&(_, cost)| cost)
+ .unwrap_or_else(|e| e);
+ work_items.insert(insertion_index, (work, cost));
+ if !cgcx.opts.unstable_opts.no_parallel_llvm {
+ helper.request_token();
+ }
+ }
+ }
+
+ // In this branch, we know that everything has been codegened,
+ // so it's just a matter of determining whether the implicit
+ // Token is free to use for LLVM work.
+ match main_thread_worker_state {
+ MainThreadWorkerState::Idle => {
+ if let Some((item, _)) = work_items.pop() {
+ let cgcx = CodegenContext {
+ worker: get_worker_id(&mut free_worker_ids),
+ ..cgcx.clone()
+ };
+ maybe_start_llvm_timer(
+ prof,
+ cgcx.config(item.module_kind()),
+ &mut llvm_start_time,
+ );
+ main_thread_worker_state = MainThreadWorkerState::LLVMing;
+ spawn_work(cgcx, item);
+ } else {
+ // There is no unstarted work, so let the main thread
+ // take over for a running worker. Otherwise the
+ // implicit token would just go to waste.
+ // We reduce the `running` counter by one. The
+ // `tokens.truncate()` below will take care of
+ // giving the Token back.
+ debug_assert!(running > 0);
+ running -= 1;
+ main_thread_worker_state = MainThreadWorkerState::LLVMing;
+ }
+ }
+ MainThreadWorkerState::Codegenning => bug!(
+ "codegen worker should not be codegenning after \
+ codegen was already completed"
+ ),
+ MainThreadWorkerState::LLVMing => {
+ // Already making good use of that token
+ }
+ }
+ }
+
+ // Spin up what work we can, only doing this while we've got available
+ // parallelism slots and work left to spawn.
+ while !codegen_aborted && !work_items.is_empty() && running < tokens.len() {
+ let (item, _) = work_items.pop().unwrap();
+
+ maybe_start_llvm_timer(prof, cgcx.config(item.module_kind()), &mut llvm_start_time);
+
+ let cgcx =
+ CodegenContext { worker: get_worker_id(&mut free_worker_ids), ..cgcx.clone() };
+
+ spawn_work(cgcx, item);
+ running += 1;
+ }
+
+ // Relinquish accidentally acquired extra tokens
+ tokens.truncate(running);
+
+ // If a thread exits successfully then we drop a token associated
+ // with that worker and update our `running` count. We may later
+ // re-acquire a token to continue running more work. We may also not
+ // actually drop a token here if the worker was running with an
+ // "ephemeral token"
+ let mut free_worker = |worker_id| {
+ if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ } else {
+ running -= 1;
+ }
+
+ free_worker_ids.push(worker_id);
+ };
+
+ let msg = coordinator_receive.recv().unwrap();
+ match *msg.downcast::<Message<B>>().ok().unwrap() {
+ // Save the token locally and the next turn of the loop will use
+ // this to spawn a new unit of work, or it may get dropped
+ // immediately if we have no more work to spawn.
+ Message::Token(token) => {
+ match token {
+ Ok(token) => {
+ tokens.push(token);
+
+ if main_thread_worker_state == MainThreadWorkerState::LLVMing {
+ // If the main thread token is used for LLVM work
+ // at the moment, we turn that thread into a regular
+ // LLVM worker thread, so the main thread is free
+ // to react to codegen demand.
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ running += 1;
+ }
+ }
+ Err(e) => {
+ let msg = &format!("failed to acquire jobserver token: {}", e);
+ shared_emitter.fatal(msg);
+ // Exit the coordinator thread
+ panic!("{}", msg)
+ }
+ }
+ }
+
+ Message::CodegenDone { llvm_work_item, cost } => {
+ // We keep the queue sorted by estimated processing cost,
+ // so that more expensive items are processed earlier. This
+ // is good for throughput as it gives the main thread more
+ // time to fill up the queue and it avoids scheduling
+ // expensive items to the end.
+ // Note, however, that this is not ideal for memory
+ // consumption, as LLVM module sizes are not evenly
+ // distributed.
+ let insertion_index = work_items.binary_search_by_key(&cost, |&(_, cost)| cost);
+ let insertion_index = match insertion_index {
+ Ok(idx) | Err(idx) => idx,
+ };
+ work_items.insert(insertion_index, (llvm_work_item, cost));
+
+ if !cgcx.opts.unstable_opts.no_parallel_llvm {
+ helper.request_token();
+ }
+ assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ }
+
+ Message::CodegenComplete => {
+ codegen_done = true;
+ assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ }
+
+ // If codegen is aborted that means translation was aborted due
+ // to some normal-ish compiler error. In this situation we want
+ // to exit as soon as possible, but we want to make sure all
+ // existing work has finished. Flag codegen as being done, and
+ // then conditions above will ensure no more work is spawned but
+ // we'll keep executing this loop until `running` hits 0.
+ Message::CodegenAborted => {
+ codegen_done = true;
+ codegen_aborted = true;
+ }
+ Message::Done { result: Ok(compiled_module), worker_id } => {
+ free_worker(worker_id);
+ match compiled_module.kind {
+ ModuleKind::Regular => {
+ compiled_modules.push(compiled_module);
+ }
+ ModuleKind::Allocator => {
+ assert!(compiled_allocator_module.is_none());
+ compiled_allocator_module = Some(compiled_module);
+ }
+ ModuleKind::Metadata => bug!("Should be handled separately"),
+ }
+ }
+ Message::NeedsLink { module, worker_id } => {
+ free_worker(worker_id);
+ needs_link.push(module);
+ }
+ Message::NeedsFatLTO { result, worker_id } => {
+ assert!(!started_lto);
+ free_worker(worker_id);
+ needs_fat_lto.push(result);
+ }
+ Message::NeedsThinLTO { name, thin_buffer, worker_id } => {
+ assert!(!started_lto);
+ free_worker(worker_id);
+ needs_thin_lto.push((name, thin_buffer));
+ }
+ Message::AddImportOnlyModule { module_data, work_product } => {
+ assert!(!started_lto);
+ assert!(!codegen_done);
+ assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
+ lto_import_only_modules.push((module_data, work_product));
+ main_thread_worker_state = MainThreadWorkerState::Idle;
+ }
+ // If the thread failed that means it panicked, so we abort immediately.
+ Message::Done { result: Err(None), worker_id: _ } => {
+ bug!("worker thread panicked");
+ }
+ Message::Done { result: Err(Some(WorkerFatalError)), worker_id } => {
+ // Similar to CodegenAborted, wait for remaining work to finish.
+ free_worker(worker_id);
+ codegen_done = true;
+ codegen_aborted = true;
+ }
+ Message::CodegenItem => bug!("the coordinator should not receive codegen requests"),
+ }
+ }
+
+ if codegen_aborted {
+ return Err(());
+ }
+
+ let needs_link = mem::take(&mut needs_link);
+ if !needs_link.is_empty() {
+ assert!(compiled_modules.is_empty());
+ let diag_handler = cgcx.create_diag_handler();
+ let module = B::run_link(&cgcx, &diag_handler, needs_link).map_err(|_| ())?;
+ let module = unsafe {
+ B::codegen(&cgcx, &diag_handler, module, cgcx.config(ModuleKind::Regular))
+ .map_err(|_| ())?
+ };
+ compiled_modules.push(module);
+ }
+
+ // Drop to print timings
+ drop(llvm_start_time);
+
+ // Regardless of what order these modules completed in, report them to
+ // the backend in the same order every time to ensure that we're handing
+ // out deterministic results.
+ compiled_modules.sort_by(|a, b| a.name.cmp(&b.name));
+
+ Ok(CompiledModules {
+ modules: compiled_modules,
+ allocator_module: compiled_allocator_module,
+ })
+ });
+
+ // A heuristic that determines if we have enough LLVM WorkItems in the
+ // queue so that the main thread can do LLVM work instead of codegen
+ fn queue_full_enough(items_in_queue: usize, workers_running: usize) -> bool {
+ // This heuristic scales ahead-of-time codegen according to available
+ // concurrency, as measured by `workers_running`. The idea is that the
+ // more concurrency we have available, the more demand there will be for
+ // work items, and the fuller the queue should be kept to meet demand.
+ // An important property of this approach is that we codegen ahead of
+ // time only as much as necessary, so as to keep fewer LLVM modules in
+ // memory at once, thereby reducing memory consumption.
+ //
+ // When the number of workers running is less than the max concurrency
+ // available to us, this heuristic can cause us to instruct the main
+ // thread to work on an LLVM item (that is, tell it to "LLVM") instead
+ // of codegen, even though it seems like it *should* be codegenning so
+ // that we can create more work items and spawn more LLVM workers.
+ //
+ // But this is not a problem. When the main thread is told to LLVM,
+ // according to this heuristic and how work is scheduled, there is
+ // always at least one item in the queue, and therefore at least one
+ // pending jobserver token request. If there *is* more concurrency
+ // available, we will immediately receive a token, which will upgrade
+ // the main thread's LLVM worker to a real one (conceptually), and free
+ // up the main thread to codegen if necessary. On the other hand, if
+ // there isn't more concurrency, then the main thread working on an LLVM
+ // item is appropriate, as long as the queue is full enough for demand.
+ //
+ // Speaking of which, how full should we keep the queue? Probably less
+ // full than you'd think. A lot has to go wrong for the queue not to be
+ // full enough and for that to have a negative effect on compile times.
+ //
+ // Workers are unlikely to finish at exactly the same time, so when one
+ // finishes and takes another work item off the queue, we often have
+ // ample time to codegen at that point before the next worker finishes.
+ // But suppose that codegen takes so long that the workers exhaust the
+ // queue, and we have one or more workers that have nothing to work on.
+ // Well, it might not be so bad. Of all the LLVM modules we create and
+ // optimize, one has to finish last. It's not necessarily the case that
+ // by losing some concurrency for a moment, we delay the point at which
+ // that last LLVM module is finished and the rest of compilation can
+ // proceed. Also, when we can't take advantage of some concurrency, we
+ // give tokens back to the job server. That enables some other rustc to
+ // potentially make use of the available concurrency. That could even
+ // *decrease* overall compile time if we're lucky. But yes, if no other
+ // rustc can make use of the concurrency, then we've squandered it.
+ //
+ // However, keeping the queue full is also beneficial when we have a
+ // surge in available concurrency. Then items can be taken from the
+ // queue immediately, without having to wait for codegen.
+ //
+ // So, the heuristic below tries to keep one item in the queue for every
+ // four running workers. Based on limited benchmarking, this appears to
+ // be more than sufficient to avoid increasing compilation times.
+ let quarter_of_workers = workers_running - 3 * workers_running / 4;
+ items_in_queue > 0 && items_in_queue >= quarter_of_workers
+ }
+
+ fn maybe_start_llvm_timer<'a>(
+ prof: &'a SelfProfilerRef,
+ config: &ModuleConfig,
+ llvm_start_time: &mut Option<VerboseTimingGuard<'a>>,
+ ) {
+ if config.time_module && llvm_start_time.is_none() {
+ *llvm_start_time = Some(prof.extra_verbose_generic_activity("LLVM_passes", "crate"));
+ }
+ }
+}
+
+/// `FatalError` is explicitly not `Send`.
+#[must_use]
+pub struct WorkerFatalError;
+
+fn spawn_work<B: ExtraBackendMethods>(cgcx: CodegenContext<B>, work: WorkItem<B>) {
+ B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || {
+ // Set up a destructor which will fire off a message that we're done as
+ // we exit.
+ struct Bomb<B: ExtraBackendMethods> {
+ coordinator_send: Sender<Box<dyn Any + Send>>,
+ result: Option<Result<WorkItemResult<B>, FatalError>>,
+ worker_id: usize,
+ }
+ impl<B: ExtraBackendMethods> Drop for Bomb<B> {
+ fn drop(&mut self) {
+ let worker_id = self.worker_id;
+ let msg = match self.result.take() {
+ Some(Ok(WorkItemResult::Compiled(m))) => {
+ Message::Done::<B> { result: Ok(m), worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsLink(m))) => {
+ Message::NeedsLink::<B> { module: m, worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsFatLTO(m))) => {
+ Message::NeedsFatLTO::<B> { result: m, worker_id }
+ }
+ Some(Ok(WorkItemResult::NeedsThinLTO(name, thin_buffer))) => {
+ Message::NeedsThinLTO::<B> { name, thin_buffer, worker_id }
+ }
+ Some(Err(FatalError)) => {
+ Message::Done::<B> { result: Err(Some(WorkerFatalError)), worker_id }
+ }
+ None => Message::Done::<B> { result: Err(None), worker_id },
+ };
+ drop(self.coordinator_send.send(Box::new(msg)));
+ }
+ }
+
+ let mut bomb = Bomb::<B> {
+ coordinator_send: cgcx.coordinator_send.clone(),
+ result: None,
+ worker_id: cgcx.worker,
+ };
+
+ // Execute the work itself, and if it finishes successfully then flag
+ // ourselves as a success as well.
+ //
+ // Note that we ignore any `FatalError` coming out of `execute_work_item`,
+ // as a diagnostic was already sent off to the main thread - just
+ // surface that there was an error in this worker.
+ bomb.result = {
+ let _prof_timer = work.start_profiling(&cgcx);
+ Some(execute_work_item(&cgcx, work))
+ };
+ })
+ .expect("failed to spawn thread");
+}
+
+enum SharedEmitterMessage {
+ Diagnostic(Diagnostic),
+ InlineAsmError(u32, String, Level, Option<(String, Vec<InnerSpan>)>),
+ AbortIfErrors,
+ Fatal(String),
+}
+
+#[derive(Clone)]
+pub struct SharedEmitter {
+ sender: Sender<SharedEmitterMessage>,
+}
+
+pub struct SharedEmitterMain {
+ receiver: Receiver<SharedEmitterMessage>,
+}
+
+impl SharedEmitter {
+ pub fn new() -> (SharedEmitter, SharedEmitterMain) {
+ let (sender, receiver) = channel();
+
+ (SharedEmitter { sender }, SharedEmitterMain { receiver })
+ }
+
+ pub fn inline_asm_error(
+ &self,
+ cookie: u32,
+ msg: String,
+ level: Level,
+ source: Option<(String, Vec<InnerSpan>)>,
+ ) {
+ drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)));
+ }
+
+ pub fn fatal(&self, msg: &str) {
+ drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string())));
+ }
+}
+
+impl Emitter for SharedEmitter {
+ fn emit_diagnostic(&mut self, diag: &rustc_errors::Diagnostic) {
+ let fluent_args = self.to_fluent_args(diag.args());
+ drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
+ msg: self.translate_messages(&diag.message, &fluent_args).to_string(),
+ code: diag.code.clone(),
+ lvl: diag.level(),
+ })));
+ for child in &diag.children {
+ drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic {
+ msg: self.translate_messages(&child.message, &fluent_args).to_string(),
+ code: None,
+ lvl: child.level,
+ })));
+ }
+ drop(self.sender.send(SharedEmitterMessage::AbortIfErrors));
+ }
+
+ fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+ None
+ }
+
+ fn fluent_bundle(&self) -> Option<&Lrc<rustc_errors::FluentBundle>> {
+ None
+ }
+
+ fn fallback_fluent_bundle(&self) -> &rustc_errors::FluentBundle {
+ panic!("shared emitter attempted to translate a diagnostic");
+ }
+}
+
+impl SharedEmitterMain {
+ pub fn check(&self, sess: &Session, blocking: bool) {
+ loop {
+ let message = if blocking {
+ match self.receiver.recv() {
+ Ok(message) => Ok(message),
+ Err(_) => Err(()),
+ }
+ } else {
+ match self.receiver.try_recv() {
+ Ok(message) => Ok(message),
+ Err(_) => Err(()),
+ }
+ };
+
+ match message {
+ Ok(SharedEmitterMessage::Diagnostic(diag)) => {
+ let handler = sess.diagnostic();
+ let mut d = rustc_errors::Diagnostic::new(diag.lvl, &diag.msg);
+ if let Some(code) = diag.code {
+ d.code(code);
+ }
+ handler.emit_diagnostic(&mut d);
+ }
+ Ok(SharedEmitterMessage::InlineAsmError(cookie, msg, level, source)) => {
+ let msg = msg.strip_prefix("error: ").unwrap_or(&msg);
+
+ let mut err = match level {
+ Level::Error { lint: false } => sess.struct_err(msg).forget_guarantee(),
+ Level::Warning(_) => sess.struct_warn(msg),
+ Level::Note => sess.struct_note_without_error(msg),
+ _ => bug!("Invalid inline asm diagnostic level"),
+ };
+
+ // If the cookie is 0 then we don't have span information.
+ if cookie != 0 {
+ let pos = BytePos::from_u32(cookie);
+ let span = Span::with_root_ctxt(pos, pos);
+ err.set_span(span);
+ };
+
+ // Point to the generated assembly if it is available.
+ if let Some((buffer, spans)) = source {
+ let source = sess
+ .source_map()
+ .new_source_file(FileName::inline_asm_source_code(&buffer), buffer);
+ let source_span = Span::with_root_ctxt(source.start_pos, source.end_pos);
+ let spans: Vec<_> =
+ spans.iter().map(|sp| source_span.from_inner(*sp)).collect();
+ err.span_note(spans, "instantiated into assembly here");
+ }
+
+ err.emit();
+ }
+ Ok(SharedEmitterMessage::AbortIfErrors) => {
+ sess.abort_if_errors();
+ }
+ Ok(SharedEmitterMessage::Fatal(msg)) => {
+ sess.fatal(&msg);
+ }
+ Err(_) => {
+ break;
+ }
+ }
+ }
+ }
+}
+
+pub struct Coordinator<B: ExtraBackendMethods> {
+ pub sender: Sender<Box<dyn Any + Send>>,
+ future: Option<thread::JoinHandle<Result<CompiledModules, ()>>>,
+ // Only used for the Message type.
+ phantom: PhantomData<B>,
+}
+
+impl<B: ExtraBackendMethods> Coordinator<B> {
+ fn join(mut self) -> std::thread::Result<Result<CompiledModules, ()>> {
+ self.future.take().unwrap().join()
+ }
+}
+
+impl<B: ExtraBackendMethods> Drop for Coordinator<B> {
+ fn drop(&mut self) {
+ if let Some(future) = self.future.take() {
+ // If we haven't joined yet, signal to the coordinator that it should spawn no more
+ // work, and wait for worker threads to finish.
+ drop(self.sender.send(Box::new(Message::CodegenAborted::<B>)));
+ drop(future.join());
+ }
+ }
+}
+
+pub struct OngoingCodegen<B: ExtraBackendMethods> {
+ pub backend: B,
+ pub metadata: EncodedMetadata,
+ pub metadata_module: Option<CompiledModule>,
+ pub crate_info: CrateInfo,
+ pub codegen_worker_receive: Receiver<Message<B>>,
+ pub shared_emitter_main: SharedEmitterMain,
+ pub output_filenames: Arc<OutputFilenames>,
+ pub coordinator: Coordinator<B>,
+}
+
+impl<B: ExtraBackendMethods> OngoingCodegen<B> {
+ pub fn join(self, sess: &Session) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
+ let _timer = sess.timer("finish_ongoing_codegen");
+
+ self.shared_emitter_main.check(sess, true);
+ let compiled_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
+ Ok(Ok(compiled_modules)) => compiled_modules,
+ Ok(Err(())) => {
+ sess.abort_if_errors();
+ panic!("expected abort due to worker thread errors")
+ }
+ Err(_) => {
+ bug!("panic during codegen/LLVM phase");
+ }
+ });
+
+ sess.cgu_reuse_tracker.check_expected_reuse(sess.diagnostic());
+
+ sess.abort_if_errors();
+
+ let work_products =
+ copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, &compiled_modules);
+ produce_final_output_artifacts(sess, &compiled_modules, &self.output_filenames);
+
+ // FIXME: time_llvm_passes support - does this use a global context or
+ // something?
+ if sess.codegen_units() == 1 && sess.time_llvm_passes() {
+ self.backend.print_pass_timings()
+ }
+
+ (
+ CodegenResults {
+ metadata: self.metadata,
+ crate_info: self.crate_info,
+
+ modules: compiled_modules.modules,
+ allocator_module: compiled_modules.allocator_module,
+ metadata_module: self.metadata_module,
+ },
+ work_products,
+ )
+ }
+
+ pub fn submit_pre_codegened_module_to_llvm(
+ &self,
+ tcx: TyCtxt<'_>,
+ module: ModuleCodegen<B::Module>,
+ ) {
+ self.wait_for_signal_to_codegen_item();
+ self.check_for_errors(tcx.sess);
+
+ // These are generally cheap and won't throw off scheduling.
+ let cost = 0;
+ submit_codegened_module_to_llvm(&self.backend, &self.coordinator.sender, module, cost);
+ }
+
+ pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
+ self.wait_for_signal_to_codegen_item();
+ self.check_for_errors(tcx.sess);
+ drop(self.coordinator.sender.send(Box::new(Message::CodegenComplete::<B>)));
+ }
+
+ pub fn check_for_errors(&self, sess: &Session) {
+ self.shared_emitter_main.check(sess, false);
+ }
+
+ pub fn wait_for_signal_to_codegen_item(&self) {
+ match self.codegen_worker_receive.recv() {
+ Ok(Message::CodegenItem) => {
+ // Nothing to do
+ }
+ Ok(_) => panic!("unexpected message"),
+ Err(_) => {
+ // One of the LLVM threads must have panicked, fall through so
+ // error handling can be reached.
+ }
+ }
+ }
+}
+
+pub fn submit_codegened_module_to_llvm<B: ExtraBackendMethods>(
+ _backend: &B,
+ tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+ module: ModuleCodegen<B::Module>,
+ cost: u64,
+) {
+ let llvm_work_item = WorkItem::Optimize(module);
+ drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost })));
+}
+
+pub fn submit_post_lto_module_to_llvm<B: ExtraBackendMethods>(
+ _backend: &B,
+ tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+ module: CachedModuleCodegen,
+) {
+ let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module);
+ drop(tx_to_llvm_workers.send(Box::new(Message::CodegenDone::<B> { llvm_work_item, cost: 0 })));
+}
+
+pub fn submit_pre_lto_module_to_llvm<B: ExtraBackendMethods>(
+ _backend: &B,
+ tcx: TyCtxt<'_>,
+ tx_to_llvm_workers: &Sender<Box<dyn Any + Send>>,
+ module: CachedModuleCodegen,
+) {
+ let filename = pre_lto_bitcode_filename(&module.name);
+ let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename);
+ let file = fs::File::open(&bc_path)
+ .unwrap_or_else(|e| panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e));
+
+ let mmap = unsafe {
+ Mmap::map(file).unwrap_or_else(|e| {
+ panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e)
+ })
+ };
+ // Schedule the module to be loaded
+ drop(tx_to_llvm_workers.send(Box::new(Message::AddImportOnlyModule::<B> {
+ module_data: SerializedModule::FromUncompressedFile(mmap),
+ work_product: module.source,
+ })));
+}
+
+pub fn pre_lto_bitcode_filename(module_name: &str) -> String {
+ format!("{}.{}", module_name, PRE_LTO_BC_EXT)
+}
+
+fn msvc_imps_needed(tcx: TyCtxt<'_>) -> bool {
+ // This should never be true (because it's not supported). If it is true,
+ // something is wrong with commandline arg validation.
+ assert!(
+ !(tcx.sess.opts.cg.linker_plugin_lto.enabled()
+ && tcx.sess.target.is_like_windows
+ && tcx.sess.opts.cg.prefer_dynamic)
+ );
+
+ tcx.sess.target.is_like_windows &&
+ tcx.sess.crate_types().iter().any(|ct| *ct == CrateType::Rlib) &&
+ // ThinLTO can't handle this workaround in all cases, so we don't
+ // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing
+ // dynamic linking when linker plugin LTO is enabled.
+ !tcx.sess.opts.cg.linker_plugin_lto.enabled()
+}
diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs
new file mode 100644
index 000000000..a840b2709
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/base.rs
@@ -0,0 +1,961 @@
+use crate::back::metadata::create_compressed_metadata_file;
+use crate::back::write::{
+ compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
+ submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
+};
+use crate::common::{IntPredicate, RealPredicate, TypeKind};
+use crate::meth;
+use crate::mir;
+use crate::mir::operand::OperandValue;
+use crate::mir::place::PlaceRef;
+use crate::traits::*;
+use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
+
+use rustc_attr as attr;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
+
+use rustc_data_structures::sync::par_iter;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::sync::ParallelIterator;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::middle::exported_symbols;
+use rustc_middle::middle::lang_items;
+use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
+use rustc_session::cgu_reuse_tracker::CguReuse;
+use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::{DebuggerVisualizerFile, DebuggerVisualizerType};
+use rustc_target::abi::{Align, VariantIdx};
+
+use std::collections::BTreeSet;
+use std::convert::TryFrom;
+use std::time::{Duration, Instant};
+
+use itertools::Itertools;
+
+pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
+ match op {
+ hir::BinOpKind::Eq => IntPredicate::IntEQ,
+ hir::BinOpKind::Ne => IntPredicate::IntNE,
+ hir::BinOpKind::Lt => {
+ if signed {
+ IntPredicate::IntSLT
+ } else {
+ IntPredicate::IntULT
+ }
+ }
+ hir::BinOpKind::Le => {
+ if signed {
+ IntPredicate::IntSLE
+ } else {
+ IntPredicate::IntULE
+ }
+ }
+ hir::BinOpKind::Gt => {
+ if signed {
+ IntPredicate::IntSGT
+ } else {
+ IntPredicate::IntUGT
+ }
+ }
+ hir::BinOpKind::Ge => {
+ if signed {
+ IntPredicate::IntSGE
+ } else {
+ IntPredicate::IntUGE
+ }
+ }
+ op => bug!(
+ "comparison_op_to_icmp_predicate: expected comparison operator, \
+ found {:?}",
+ op
+ ),
+ }
+}
+
+pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
+ match op {
+ hir::BinOpKind::Eq => RealPredicate::RealOEQ,
+ hir::BinOpKind::Ne => RealPredicate::RealUNE,
+ hir::BinOpKind::Lt => RealPredicate::RealOLT,
+ hir::BinOpKind::Le => RealPredicate::RealOLE,
+ hir::BinOpKind::Gt => RealPredicate::RealOGT,
+ hir::BinOpKind::Ge => RealPredicate::RealOGE,
+ op => {
+ bug!(
+ "comparison_op_to_fcmp_predicate: expected comparison operator, \
+ found {:?}",
+ op
+ );
+ }
+ }
+}
+
+pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+ t: Ty<'tcx>,
+ ret_ty: Bx::Type,
+ op: hir::BinOpKind,
+) -> Bx::Value {
+ let signed = match t.kind() {
+ ty::Float(_) => {
+ let cmp = bin_op_to_fcmp_predicate(op);
+ let cmp = bx.fcmp(cmp, lhs, rhs);
+ return bx.sext(cmp, ret_ty);
+ }
+ ty::Uint(_) => false,
+ ty::Int(_) => true,
+ _ => bug!("compare_simd_types: invalid SIMD type"),
+ };
+
+ let cmp = bin_op_to_icmp_predicate(op, signed);
+ let cmp = bx.icmp(cmp, lhs, rhs);
+ // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
+ // to get the correctly sized type. This will compile to a single instruction
+ // once the IR is converted to assembly if the SIMD instruction is supported
+ // by the target architecture.
+ bx.sext(cmp, ret_ty)
+}
+
+/// Retrieves the information we are losing (making dynamic) in an unsizing
+/// adjustment.
+///
+/// The `old_info` argument is a bit odd. It is intended for use in an upcast,
+/// where the new vtable for an object will be derived from the old one.
+pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ old_info: Option<Bx::Value>,
+) -> Bx::Value {
+ let cx = bx.cx();
+ let (source, target) =
+ cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
+ match (source.kind(), target.kind()) {
+ (&ty::Array(_, len), &ty::Slice(_)) => {
+ cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
+ }
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ let old_info =
+ old_info.expect("unsized_info: missing old info for trait upcasting coercion");
+ if data_a.principal_def_id() == data_b.principal_def_id() {
+ return old_info;
+ }
+
+ // trait upcasting coercion
+
+ let vptr_entry_idx =
+ cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
+
+ if let Some(entry_idx) = vptr_entry_idx {
+ let ptr_ty = cx.type_i8p();
+ let ptr_align = cx.tcx().data_layout.pointer_align.abi;
+ let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
+ let gep = bx.inbounds_gep(
+ ptr_ty,
+ llvtable,
+ &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
+ );
+ let new_vptr = bx.load(ptr_ty, gep, ptr_align);
+ bx.nonnull_metadata(new_vptr);
+ // VTable loads are invariant.
+ bx.set_invariant_load(new_vptr);
+ new_vptr
+ } else {
+ old_info
+ }
+ }
+ (_, &ty::Dynamic(ref data, ..)) => {
+ let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
+ cx.layout_of(cx.tcx().mk_mut_ptr(target)),
+ 1,
+ true,
+ );
+ cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
+ }
+ _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
+ }
+}
+
+/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
+pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ src: Bx::Value,
+ src_ty: Ty<'tcx>,
+ dst_ty: Ty<'tcx>,
+ old_info: Option<Bx::Value>,
+) -> (Bx::Value, Bx::Value) {
+ debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
+ match (src_ty.kind(), dst_ty.kind()) {
+ (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+ | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
+ let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
+ (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+ let src_layout = bx.cx().layout_of(src_ty);
+ let dst_layout = bx.cx().layout_of(dst_ty);
+ if src_ty == dst_ty {
+ return (src, old_info.unwrap());
+ }
+ let mut result = None;
+ for i in 0..src_layout.fields.count() {
+ let src_f = src_layout.field(bx.cx(), i);
+ if src_f.is_zst() {
+ continue;
+ }
+
+ assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(src_layout.size, src_f.size);
+
+ let dst_f = dst_layout.field(bx.cx(), i);
+ assert_ne!(src_f.ty, dst_f.ty);
+ assert_eq!(result, None);
+ result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
+ }
+ let (lldata, llextra) = result.unwrap();
+ let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
+ let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
+ }
+ _ => bug!("unsize_ptr: called on bad types"),
+ }
+}
+
+/// Coerces `src`, which is a reference to a value of type `src_ty`,
+/// to a value of type `dst_ty`, and stores the result in `dst`.
+pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ src: PlaceRef<'tcx, Bx::Value>,
+ dst: PlaceRef<'tcx, Bx::Value>,
+) {
+ let src_ty = src.layout.ty;
+ let dst_ty = dst.layout.ty;
+ match (src_ty.kind(), dst_ty.kind()) {
+ (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
+ let (base, info) = match bx.load_operand(src).val {
+ OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
+ OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
+ OperandValue::Ref(..) => bug!(),
+ };
+ OperandValue::Pair(base, info).store(bx, dst);
+ }
+
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
+ let src_f = src.project_field(bx, i);
+ let dst_f = dst.project_field(bx, i);
+
+ if dst_f.layout.is_zst() {
+ continue;
+ }
+
+ if src_f.layout.ty == dst_f.layout.ty {
+ memcpy_ty(
+ bx,
+ dst_f.llval,
+ dst_f.align,
+ src_f.llval,
+ src_f.align,
+ src_f.layout,
+ MemFlags::empty(),
+ );
+ } else {
+ coerce_unsized_into(bx, src_f, dst_f);
+ }
+ }
+ }
+ _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
+ }
+}
+
+pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ op: hir::BinOpKind,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ cast_shift_rhs(bx, op, lhs, rhs)
+}
+
+fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ op: hir::BinOpKind,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ // Shifts may have any size int on the rhs
+ if op.is_shift() {
+ let mut rhs_llty = bx.cx().val_ty(rhs);
+ let mut lhs_llty = bx.cx().val_ty(lhs);
+ if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
+ rhs_llty = bx.cx().element_type(rhs_llty)
+ }
+ if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
+ lhs_llty = bx.cx().element_type(lhs_llty)
+ }
+ let rhs_sz = bx.cx().int_width(rhs_llty);
+ let lhs_sz = bx.cx().int_width(lhs_llty);
+ if lhs_sz < rhs_sz {
+ bx.trunc(rhs, lhs_llty)
+ } else if lhs_sz > rhs_sz {
+ // FIXME (#1877: If in the future shifting by negative
+ // values is no longer undefined then this is wrong.
+ bx.zext(rhs, lhs_llty)
+ } else {
+ rhs
+ }
+ } else {
+ rhs
+ }
+}
+
+/// Returns `true` if this session's target will use SEH-based unwinding.
+///
+/// This is only true for MSVC targets, and even then the 64-bit MSVC target
+/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
+/// 64-bit MinGW) instead of "full SEH".
+pub fn wants_msvc_seh(sess: &Session) -> bool {
+ sess.target.is_like_msvc
+}
+
+pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ dst: Bx::Value,
+ dst_align: Align,
+ src: Bx::Value,
+ src_align: Align,
+ layout: TyAndLayout<'tcx>,
+ flags: MemFlags,
+) {
+ let size = layout.size.bytes();
+ if size == 0 {
+ return;
+ }
+
+ bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
+}
+
+pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ instance: Instance<'tcx>,
+) {
+ // this is an info! to allow collecting monomorphization statistics
+ // and to allow finding the last function before LLVM aborts from
+ // release builds.
+ info!("codegen_instance({})", instance);
+
+ mir::codegen_mir::<Bx>(cx, instance);
+}
+
+/// Creates the `main` function which will initialize the rust runtime and call
+/// users main function.
+pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+) -> Option<Bx::Function> {
+ let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
+ let main_is_local = main_def_id.is_local();
+ let instance = Instance::mono(cx.tcx(), main_def_id);
+
+ if main_is_local {
+ // We want to create the wrapper in the same codegen unit as Rust's main
+ // function.
+ if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
+ return None;
+ }
+ } else if !cx.codegen_unit().is_primary() {
+ // We want to create the wrapper only when the codegen unit is the primary one
+ return None;
+ }
+
+ let main_llfn = cx.get_fn_addr(instance);
+
+ let use_start_lang_item = EntryFnType::Start != entry_type;
+ let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, use_start_lang_item);
+ return Some(entry_fn);
+
+ fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ rust_main: Bx::Value,
+ rust_main_def_id: DefId,
+ use_start_lang_item: bool,
+ ) -> Bx::Function {
+ // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
+ // depending on whether the target needs `argc` and `argv` to be passed in.
+ let llfty = if cx.sess().target.main_needs_argc_argv {
+ cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
+ } else {
+ cx.type_func(&[], cx.type_int())
+ };
+
+ let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
+ // Given that `main()` has no arguments,
+ // then its return type cannot have
+ // late-bound regions, since late-bound
+ // regions must appear in the argument
+ // listing.
+ let main_ret_ty = cx.tcx().normalize_erasing_regions(
+ ty::ParamEnv::reveal_all(),
+ main_ret_ty.no_bound_vars().unwrap(),
+ );
+
+ let Some(llfn) = cx.declare_c_main(llfty) else {
+ // FIXME: We should be smart and show a better diagnostic here.
+ let span = cx.tcx().def_span(rust_main_def_id);
+ cx.sess()
+ .struct_span_err(span, "entry symbol `main` declared multiple times")
+ .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
+ .emit();
+ cx.sess().abort_if_errors();
+ bug!();
+ };
+
+ // `main` should respect same config for frame pointer elimination as rest of code
+ cx.set_frame_pointer_type(llfn);
+ cx.apply_target_cpu_attr(llfn);
+
+ let llbb = Bx::append_block(&cx, llfn, "top");
+ let mut bx = Bx::build(&cx, llbb);
+
+ bx.insert_reference_to_gdb_debug_scripts_section_global();
+
+ let isize_ty = cx.type_isize();
+ let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
+ let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
+
+ let (start_fn, start_ty, args) = if use_start_lang_item {
+ let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
+ let start_fn = cx.get_fn_addr(
+ ty::Instance::resolve(
+ cx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ start_def_id,
+ cx.tcx().intern_substs(&[main_ret_ty.into()]),
+ )
+ .unwrap()
+ .unwrap(),
+ );
+ let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty], isize_ty);
+ (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv])
+ } else {
+ debug!("using user-defined start fn");
+ let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
+ (rust_main, start_ty, vec![arg_argc, arg_argv])
+ };
+
+ let result = bx.call(start_ty, start_fn, &args, None);
+ let cast = bx.intcast(result, cx.type_int(), true);
+ bx.ret(cast);
+
+ llfn
+ }
+}
+
+/// Obtain the `argc` and `argv` values to pass to the rust start function.
+fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ bx: &mut Bx,
+) -> (Bx::Value, Bx::Value) {
+ if cx.sess().target.main_needs_argc_argv {
+ // Params from native `main()` used as args for rust start function
+ let param_argc = bx.get_param(0);
+ let param_argv = bx.get_param(1);
+ let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
+ let arg_argv = param_argv;
+ (arg_argc, arg_argv)
+ } else {
+ // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
+ let arg_argc = bx.const_int(cx.type_int(), 0);
+ let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
+ (arg_argc, arg_argv)
+ }
+}
+
+/// This function returns all of the debugger visualizers specified for the
+/// current crate as well as all upstream crates transitively that match the
+/// `visualizer_type` specified.
+pub fn collect_debugger_visualizers_transitive(
+ tcx: TyCtxt<'_>,
+ visualizer_type: DebuggerVisualizerType,
+) -> BTreeSet<DebuggerVisualizerFile> {
+ tcx.debugger_visualizers(LOCAL_CRATE)
+ .iter()
+ .chain(
+ tcx.crates(())
+ .iter()
+ .filter(|&cnum| {
+ let used_crate_source = tcx.used_crate_source(*cnum);
+ used_crate_source.rlib.is_some() || used_crate_source.rmeta.is_some()
+ })
+ .flat_map(|&cnum| tcx.debugger_visualizers(cnum)),
+ )
+ .filter(|visualizer| visualizer.visualizer_type == visualizer_type)
+ .cloned()
+ .collect::<BTreeSet<_>>()
+}
+
+pub fn codegen_crate<B: ExtraBackendMethods>(
+ backend: B,
+ tcx: TyCtxt<'_>,
+ target_cpu: String,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+) -> OngoingCodegen<B> {
+ // Skip crate items and just output metadata in -Z no-codegen mode.
+ if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
+ let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, None, 1);
+
+ ongoing_codegen.codegen_finished(tcx);
+
+ ongoing_codegen.check_for_errors(tcx.sess);
+
+ return ongoing_codegen;
+ }
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
+
+ // Run the monomorphization collector and partition the collected items into
+ // codegen units.
+ let codegen_units = tcx.collect_and_partition_mono_items(()).1;
+
+ // Force all codegen_unit queries so they are already either red or green
+ // when compile_codegen_unit accesses them. We are not able to re-execute
+ // the codegen_unit query from just the DepNode, so an unknown color would
+ // lead to having to re-execute compile_codegen_unit, possibly
+ // unnecessarily.
+ if tcx.dep_graph.is_fully_enabled() {
+ for cgu in codegen_units {
+ tcx.ensure().codegen_unit(cgu.name());
+ }
+ }
+
+ let metadata_module = if need_metadata_module {
+ // Emit compressed metadata object.
+ let metadata_cgu_name =
+ cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
+ tcx.sess.time("write_compressed_metadata", || {
+ let file_name =
+ tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+ let data = create_compressed_metadata_file(
+ tcx.sess,
+ &metadata,
+ &exported_symbols::metadata_symbol_name(tcx),
+ );
+ if let Err(err) = std::fs::write(&file_name, data) {
+ tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
+ }
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(file_name),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ })
+ } else {
+ None
+ };
+
+ let ongoing_codegen = start_async_codegen(
+ backend.clone(),
+ tcx,
+ target_cpu,
+ metadata,
+ metadata_module,
+ codegen_units.len(),
+ );
+
+ // Codegen an allocator shim, if necessary.
+ //
+ // If the crate doesn't have an `allocator_kind` set then there's definitely
+ // no shim to generate. Otherwise we also check our dependency graph for all
+ // our output crate types. If anything there looks like its a `Dynamic`
+ // linkage, then it's already got an allocator shim and we'll be using that
+ // one instead. If nothing exists then it's our job to generate the
+ // allocator!
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
+ use rustc_middle::middle::dependency_format::Linkage;
+ list.iter().any(|&linkage| linkage == Linkage::Dynamic)
+ });
+ let allocator_module = if any_dynamic_crate {
+ None
+ } else if let Some(kind) = tcx.allocator_kind(()) {
+ let llmod_id =
+ cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
+ let module_llvm = tcx.sess.time("write_allocator_module", || {
+ backend.codegen_allocator(tcx, &llmod_id, kind, tcx.lang_items().oom().is_some())
+ });
+
+ Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
+ } else {
+ None
+ };
+
+ if let Some(allocator_module) = allocator_module {
+ ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
+ }
+
+ // For better throughput during parallel processing by LLVM, we used to sort
+ // CGUs largest to smallest. This would lead to better thread utilization
+ // by, for example, preventing a large CGU from being processed last and
+ // having only one LLVM thread working while the rest remained idle.
+ //
+ // However, this strategy would lead to high memory usage, as it meant the
+ // LLVM-IR for all of the largest CGUs would be resident in memory at once.
+ //
+ // Instead, we can compromise by ordering CGUs such that the largest and
+ // smallest are first, second largest and smallest are next, etc. If there
+ // are large size variations, this can reduce memory usage significantly.
+ let codegen_units: Vec<_> = {
+ let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
+ sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
+
+ let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
+ second_half.iter().rev().interleave(first_half).copied().collect()
+ };
+
+ // Calculate the CGU reuse
+ let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
+ codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
+ });
+
+ let mut total_codegen_time = Duration::new(0, 0);
+ let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
+
+ // The non-parallel compiler can only translate codegen units to LLVM IR
+ // on a single thread, leading to a staircase effect where the N LLVM
+ // threads have to wait on the single codegen threads to generate work
+ // for them. The parallel compiler does not have this restriction, so
+ // we can pre-load the LLVM queue in parallel before handing off
+ // coordination to the OnGoingCodegen scheduler.
+ //
+ // This likely is a temporary measure. Once we don't have to support the
+ // non-parallel compiler anymore, we can compile CGUs end-to-end in
+ // parallel and get rid of the complicated scheduling logic.
+ let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
+ tcx.sess.time("compile_first_CGU_batch", || {
+ // Try to find one CGU to compile per thread.
+ let cgus: Vec<_> = cgu_reuse
+ .iter()
+ .enumerate()
+ .filter(|&(_, reuse)| reuse == &CguReuse::No)
+ .take(tcx.sess.threads())
+ .collect();
+
+ // Compile the found CGUs in parallel.
+ let start_time = Instant::now();
+
+ let pre_compiled_cgus = par_iter(cgus)
+ .map(|(i, _)| {
+ let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
+ (i, module)
+ })
+ .collect();
+
+ total_codegen_time += start_time.elapsed();
+
+ pre_compiled_cgus
+ })
+ } else {
+ FxHashMap::default()
+ };
+
+ for (i, cgu) in codegen_units.iter().enumerate() {
+ ongoing_codegen.wait_for_signal_to_codegen_item();
+ ongoing_codegen.check_for_errors(tcx.sess);
+
+ let cgu_reuse = cgu_reuse[i];
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
+
+ match cgu_reuse {
+ CguReuse::No => {
+ let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
+ cgu
+ } else {
+ let start_time = Instant::now();
+ let module = backend.compile_codegen_unit(tcx, cgu.name());
+ total_codegen_time += start_time.elapsed();
+ module
+ };
+ // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
+ // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
+ // compilation hang on post-monomorphization errors.
+ tcx.sess.abort_if_errors();
+
+ submit_codegened_module_to_llvm(
+ &backend,
+ &ongoing_codegen.coordinator.sender,
+ module,
+ cost,
+ );
+ false
+ }
+ CguReuse::PreLto => {
+ submit_pre_lto_module_to_llvm(
+ &backend,
+ tcx,
+ &ongoing_codegen.coordinator.sender,
+ CachedModuleCodegen {
+ name: cgu.name().to_string(),
+ source: cgu.previous_work_product(tcx),
+ },
+ );
+ true
+ }
+ CguReuse::PostLto => {
+ submit_post_lto_module_to_llvm(
+ &backend,
+ &ongoing_codegen.coordinator.sender,
+ CachedModuleCodegen {
+ name: cgu.name().to_string(),
+ source: cgu.previous_work_product(tcx),
+ },
+ );
+ true
+ }
+ };
+ }
+
+ ongoing_codegen.codegen_finished(tcx);
+
+ // Since the main thread is sometimes blocked during codegen, we keep track
+ // -Ztime-passes output manually.
+ if tcx.sess.time_passes() {
+ let end_rss = get_resident_set_size();
+
+ print_time_passes_entry(
+ "codegen_to_LLVM_IR",
+ total_codegen_time,
+ start_rss.unwrap(),
+ end_rss,
+ );
+ }
+
+ ongoing_codegen.check_for_errors(tcx.sess);
+ ongoing_codegen
+}
+
+impl CrateInfo {
+ pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
+ let exported_symbols = tcx
+ .sess
+ .crate_types()
+ .iter()
+ .map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
+ .collect();
+ let linked_symbols = tcx
+ .sess
+ .crate_types()
+ .iter()
+ .map(|&c| (c, crate::back::linker::linked_symbols(tcx, c)))
+ .collect();
+ let local_crate_name = tcx.crate_name(LOCAL_CRATE);
+ let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
+ let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
+ let windows_subsystem = subsystem.map(|subsystem| {
+ if subsystem != sym::windows && subsystem != sym::console {
+ tcx.sess.fatal(&format!(
+ "invalid windows subsystem `{}`, only \
+ `windows` and `console` are allowed",
+ subsystem
+ ));
+ }
+ subsystem.to_string()
+ });
+
+ // This list is used when generating the command line to pass through to
+ // system linker. The linker expects undefined symbols on the left of the
+ // command line to be defined in libraries on the right, not the other way
+ // around. For more info, see some comments in the add_used_library function
+ // below.
+ //
+ // In order to get this left-to-right dependency ordering, we use the reverse
+ // postorder of all crates putting the leaves at the right-most positions.
+ let used_crates = tcx
+ .postorder_cnums(())
+ .iter()
+ .rev()
+ .copied()
+ .filter(|&cnum| !tcx.dep_kind(cnum).macros_only())
+ .collect();
+
+ let mut info = CrateInfo {
+ target_cpu,
+ exported_symbols,
+ linked_symbols,
+ local_crate_name,
+ compiler_builtins: None,
+ profiler_runtime: None,
+ is_no_builtins: Default::default(),
+ native_libraries: Default::default(),
+ used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
+ crate_name: Default::default(),
+ used_crates,
+ used_crate_source: Default::default(),
+ lang_item_to_crate: Default::default(),
+ missing_lang_items: Default::default(),
+ dependency_formats: tcx.dependency_formats(()).clone(),
+ windows_subsystem,
+ natvis_debugger_visualizers: Default::default(),
+ };
+ let lang_items = tcx.lang_items();
+
+ let crates = tcx.crates(());
+
+ let n_crates = crates.len();
+ info.native_libraries.reserve(n_crates);
+ info.crate_name.reserve(n_crates);
+ info.used_crate_source.reserve(n_crates);
+ info.missing_lang_items.reserve(n_crates);
+
+ for &cnum in crates.iter() {
+ info.native_libraries
+ .insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
+ info.crate_name.insert(cnum, tcx.crate_name(cnum));
+
+ let used_crate_source = tcx.used_crate_source(cnum);
+ info.used_crate_source.insert(cnum, used_crate_source.clone());
+ if tcx.is_compiler_builtins(cnum) {
+ info.compiler_builtins = Some(cnum);
+ }
+ if tcx.is_profiler_runtime(cnum) {
+ info.profiler_runtime = Some(cnum);
+ }
+ if tcx.is_no_builtins(cnum) {
+ info.is_no_builtins.insert(cnum);
+ }
+ let missing = tcx.missing_lang_items(cnum);
+ for &item in missing.iter() {
+ if let Ok(id) = lang_items.require(item) {
+ info.lang_item_to_crate.insert(item, id.krate);
+ }
+ }
+
+ // No need to look for lang items that don't actually need to exist.
+ let missing =
+ missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
+ info.missing_lang_items.insert(cnum, missing);
+ }
+
+ let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
+ CrateType::Executable | CrateType::Dylib | CrateType::Cdylib => {
+ // These are crate types for which we invoke the linker and can embed
+ // NatVis visualizers.
+ true
+ }
+ CrateType::ProcMacro => {
+ // We could embed NatVis for proc macro crates too (to improve the debugging
+ // experience for them) but it does not seem like a good default, since
+ // this is a rare use case and we don't want to slow down the common case.
+ false
+ }
+ CrateType::Staticlib | CrateType::Rlib => {
+ // We don't invoke the linker for these, so we don't need to collect the NatVis for them.
+ false
+ }
+ });
+
+ if tcx.sess.target.is_like_msvc && embed_visualizers {
+ info.natvis_debugger_visualizers =
+ collect_debugger_visualizers_transitive(tcx, DebuggerVisualizerType::Natvis);
+ }
+
+ info
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.backend_optimization_level = |tcx, cratenum| {
+ let for_speed = match tcx.sess.opts.optimize {
+ // If globally no optimisation is done, #[optimize] has no effect.
+ //
+ // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
+ // pass manager and it is likely that some module-wide passes (such as inliner or
+ // cross-function constant propagation) would ignore the `optnone` annotation we put
+ // on the functions, thus necessarily involving these functions into optimisations.
+ config::OptLevel::No => return config::OptLevel::No,
+ // If globally optimise-speed is already specified, just use that level.
+ config::OptLevel::Less => return config::OptLevel::Less,
+ config::OptLevel::Default => return config::OptLevel::Default,
+ config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
+ // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
+ // are present).
+ config::OptLevel::Size => config::OptLevel::Default,
+ config::OptLevel::SizeMin => config::OptLevel::Default,
+ };
+
+ let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
+ for id in &*defids {
+ let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
+ match optimize {
+ attr::OptimizeAttr::None => continue,
+ attr::OptimizeAttr::Size => continue,
+ attr::OptimizeAttr::Speed => {
+ return for_speed;
+ }
+ }
+ }
+ tcx.sess.opts.optimize
+ };
+}
+
+fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
+ if !tcx.dep_graph.is_fully_enabled() {
+ return CguReuse::No;
+ }
+
+ let work_product_id = &cgu.work_product_id();
+ if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
+ // We don't have anything cached for this CGU. This can happen
+ // if the CGU did not exist in the previous session.
+ return CguReuse::No;
+ }
+
+ // Try to mark the CGU as green. If it we can do so, it means that nothing
+ // affecting the LLVM module has changed and we can re-use a cached version.
+ // If we compile with any kind of LTO, this means we can re-use the bitcode
+ // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
+ // know that later). If we are not doing LTO, there is only one optimized
+ // version of each module, so we re-use that.
+ let dep_node = cgu.codegen_dep_node(tcx);
+ assert!(
+ !tcx.dep_graph.dep_node_exists(&dep_node),
+ "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
+ cgu.name()
+ );
+
+ if tcx.try_mark_green(&dep_node) {
+ // We can re-use either the pre- or the post-thinlto state. If no LTO is
+ // being performed then we can use post-LTO artifacts, otherwise we must
+ // reuse pre-LTO artifacts
+ match compute_per_cgu_lto_type(
+ &tcx.sess.lto(),
+ &tcx.sess.opts,
+ &tcx.sess.crate_types(),
+ ModuleKind::Regular,
+ ) {
+ ComputedLtoType::No => CguReuse::PostLto,
+ _ => CguReuse::PreLto,
+ }
+ } else {
+ CguReuse::No
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/common.rs b/compiler/rustc_codegen_ssa/src/common.rs
new file mode 100644
index 000000000..8ca1a6084
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/common.rs
@@ -0,0 +1,223 @@
+#![allow(non_camel_case_types)]
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::LangItem;
+use rustc_middle::mir::interpret::ConstValue;
+use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
+use rustc_session::Session;
+use rustc_span::Span;
+
+use crate::base;
+use crate::traits::*;
+
+#[derive(Copy, Clone)]
+pub enum IntPredicate {
+ IntEQ,
+ IntNE,
+ IntUGT,
+ IntUGE,
+ IntULT,
+ IntULE,
+ IntSGT,
+ IntSGE,
+ IntSLT,
+ IntSLE,
+}
+
+#[derive(Copy, Clone)]
+pub enum RealPredicate {
+ RealPredicateFalse,
+ RealOEQ,
+ RealOGT,
+ RealOGE,
+ RealOLT,
+ RealOLE,
+ RealONE,
+ RealORD,
+ RealUNO,
+ RealUEQ,
+ RealUGT,
+ RealUGE,
+ RealULT,
+ RealULE,
+ RealUNE,
+ RealPredicateTrue,
+}
+
+#[derive(Copy, Clone)]
+pub enum AtomicRmwBinOp {
+ AtomicXchg,
+ AtomicAdd,
+ AtomicSub,
+ AtomicAnd,
+ AtomicNand,
+ AtomicOr,
+ AtomicXor,
+ AtomicMax,
+ AtomicMin,
+ AtomicUMax,
+ AtomicUMin,
+}
+
+#[derive(Copy, Clone)]
+pub enum AtomicOrdering {
+ Unordered,
+ Relaxed,
+ Acquire,
+ Release,
+ AcquireRelease,
+ SequentiallyConsistent,
+}
+
+#[derive(Copy, Clone)]
+pub enum SynchronizationScope {
+ SingleThread,
+ CrossThread,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum TypeKind {
+ Void,
+ Half,
+ Float,
+ Double,
+ X86_FP80,
+ FP128,
+ PPC_FP128,
+ Label,
+ Integer,
+ Function,
+ Struct,
+ Array,
+ Pointer,
+ Vector,
+ Metadata,
+ X86_MMX,
+ Token,
+ ScalableVector,
+ BFloat,
+ X86_AMX,
+}
+
+// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement
+// the HashStable trait. Normally DepGraph::with_task() calls are
+// hidden behind queries, but CGU creation is a special case in two
+// ways: (1) it's not a query and (2) CGU are output nodes, so their
+// Fingerprints are not actually needed. It remains to be clarified
+// how exactly this case will be handled in the red/green system but
+// for now we content ourselves with providing a no-op HashStable
+// implementation for CGUs.
+mod temp_stable_hash_impls {
+ use crate::ModuleCodegen;
+ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+
+ impl<HCX, M> HashStable<HCX> for ModuleCodegen<M> {
+ fn hash_stable(&self, _: &mut HCX, _: &mut StableHasher) {
+ // do nothing
+ }
+ }
+}
+
+pub fn build_langcall<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &Bx,
+ span: Option<Span>,
+ li: LangItem,
+) -> (Bx::FnAbiOfResult, Bx::Value) {
+ let tcx = bx.tcx();
+ let def_id = tcx.require_lang_item(li, span);
+ let instance = ty::Instance::mono(tcx, def_id);
+ (bx.fn_abi_of_instance(instance, ty::List::empty()), bx.get_fn_addr(instance))
+}
+
+// To avoid UB from LLVM, these two functions mask RHS with an
+// appropriate mask unconditionally (i.e., the fallback behavior for
+// all shifts). For 32- and 64-bit types, this matches the semantics
+// of Java. (See related discussion on #1877 and #10183.)
+
+pub fn build_unchecked_lshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
+ // #1877, #10183: Ensure that input is always valid
+ let rhs = shift_mask_rhs(bx, rhs);
+ bx.shl(lhs, rhs)
+}
+
+pub fn build_unchecked_rshift<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ lhs_t: Ty<'tcx>,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
+ // #1877, #10183: Ensure that input is always valid
+ let rhs = shift_mask_rhs(bx, rhs);
+ let is_signed = lhs_t.is_signed();
+ if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
+}
+
+fn shift_mask_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ rhs: Bx::Value,
+) -> Bx::Value {
+ let rhs_llty = bx.val_ty(rhs);
+ let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
+ bx.and(rhs, shift_val)
+}
+
+pub fn shift_mask_val<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ llty: Bx::Type,
+ mask_llty: Bx::Type,
+ invert: bool,
+) -> Bx::Value {
+ let kind = bx.type_kind(llty);
+ match kind {
+ TypeKind::Integer => {
+ // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
+ let val = bx.int_width(llty) - 1;
+ if invert {
+ bx.const_int(mask_llty, !val as i64)
+ } else {
+ bx.const_uint(mask_llty, val)
+ }
+ }
+ TypeKind::Vector => {
+ let mask =
+ shift_mask_val(bx, bx.element_type(llty), bx.element_type(mask_llty), invert);
+ bx.vector_splat(bx.vector_length(mask_llty), mask)
+ }
+ _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
+ }
+}
+
+pub fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
+ struct_span_err!(a, b, E0511, "{}", c).emit();
+}
+
+pub fn asm_const_to_str<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sp: Span,
+ const_value: ConstValue<'tcx>,
+ ty_and_layout: TyAndLayout<'tcx>,
+) -> String {
+ let ConstValue::Scalar(scalar) = const_value else {
+ span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
+ };
+ let value = scalar.assert_bits(ty_and_layout.size);
+ match ty_and_layout.ty.kind() {
+ ty::Uint(_) => value.to_string(),
+ ty::Int(int_ty) => match int_ty.normalize(tcx.sess.target.pointer_width) {
+ ty::IntTy::I8 => (value as i8).to_string(),
+ ty::IntTy::I16 => (value as i16).to_string(),
+ ty::IntTy::I32 => (value as i32).to_string(),
+ ty::IntTy::I64 => (value as i64).to_string(),
+ ty::IntTy::I128 => (value as i128).to_string(),
+ ty::IntTy::Isize => unreachable!(),
+ },
+ _ => span_bug!(sp, "asm const has bad type {}", ty_and_layout.ty),
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
new file mode 100644
index 000000000..e288760a0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/ffi.rs
@@ -0,0 +1,85 @@
+use rustc_middle::mir::coverage::{CounterValueReference, MappedExpressionIndex};
+
+/// Aligns with [llvm::coverage::Counter::CounterKind](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L95)
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum CounterKind {
+ Zero = 0,
+ CounterValueReference = 1,
+ Expression = 2,
+}
+
+/// A reference to an instance of an abstract "counter" that will yield a value in a coverage
+/// report. Note that `id` has different interpretations, depending on the `kind`:
+/// * For `CounterKind::Zero`, `id` is assumed to be `0`
+/// * For `CounterKind::CounterValueReference`, `id` matches the `counter_id` of the injected
+/// instrumentation counter (the `index` argument to the LLVM intrinsic
+/// `instrprof.increment()`)
+/// * For `CounterKind::Expression`, `id` is the index into the coverage map's array of
+/// counter expressions.
+/// Aligns with [llvm::coverage::Counter](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L102-L103)
+/// Important: The Rust struct layout (order and types of fields) must match its C++ counterpart.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct Counter {
+ // Important: The layout (order and types of fields) must match its C++ counterpart.
+ pub kind: CounterKind,
+ id: u32,
+}
+
+impl Counter {
+ /// Constructs a new `Counter` of kind `Zero`. For this `CounterKind`, the
+ /// `id` is not used.
+ pub fn zero() -> Self {
+ Self { kind: CounterKind::Zero, id: 0 }
+ }
+
+ /// Constructs a new `Counter` of kind `CounterValueReference`, and converts
+ /// the given 1-based counter_id to the required 0-based equivalent for
+ /// the `Counter` encoding.
+ pub fn counter_value_reference(counter_id: CounterValueReference) -> Self {
+ Self { kind: CounterKind::CounterValueReference, id: counter_id.zero_based_index() }
+ }
+
+ /// Constructs a new `Counter` of kind `Expression`.
+ pub fn expression(mapped_expression_index: MappedExpressionIndex) -> Self {
+ Self { kind: CounterKind::Expression, id: mapped_expression_index.into() }
+ }
+
+ /// Returns true if the `Counter` kind is `Zero`.
+ pub fn is_zero(&self) -> bool {
+ matches!(self.kind, CounterKind::Zero)
+ }
+
+ /// An explicitly-named function to get the ID value, making it more obvious
+ /// that the stored value is now 0-based.
+ pub fn zero_based_id(&self) -> u32 {
+ debug_assert!(!self.is_zero(), "`id` is undefined for CounterKind::Zero");
+ self.id
+ }
+}
+
+/// Aligns with [llvm::coverage::CounterExpression::ExprKind](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L150)
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub enum ExprKind {
+ Subtract = 0,
+ Add = 1,
+}
+
+/// Aligns with [llvm::coverage::CounterExpression](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/include/llvm/ProfileData/Coverage/CoverageMapping.h#L151-L152)
+/// Important: The Rust struct layout (order and types of fields) must match its C++
+/// counterpart.
+#[derive(Copy, Clone, Debug)]
+#[repr(C)]
+pub struct CounterExpression {
+ pub kind: ExprKind,
+ pub lhs: Counter,
+ pub rhs: Counter,
+}
+
+impl CounterExpression {
+ pub fn new(lhs: Counter, kind: ExprKind, rhs: Counter) -> Self {
+ Self { kind, lhs, rhs }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
new file mode 100644
index 000000000..1a6495cb1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/map.rs
@@ -0,0 +1,347 @@
+pub use super::ffi::*;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::coverage::{
+ CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId,
+ InjectedExpressionIndex, MappedExpressionIndex, Op,
+};
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::TyCtxt;
+
+#[derive(Clone, Debug, PartialEq)]
+pub struct Expression {
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+}
+
+/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
+/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
+/// for a given Function. Counters and counter expressions have non-overlapping `id`s because they
+/// can both be operands in an expression. This struct also stores the `function_source_hash`,
+/// computed during instrumentation, and forwarded with counters.
+///
+/// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap
+/// regions" (or "gap areas"). A gap region is a code region within a counted region (either counter
+/// or expression), but the line or lines in the gap region are not executable (such as lines with
+/// only whitespace or comments). According to LLVM Code Coverage Mapping documentation, "A count
+/// for a gap area is only used as the line execution count if there are no other regions on a
+/// line."
+#[derive(Debug)]
+pub struct FunctionCoverage<'tcx> {
+ instance: Instance<'tcx>,
+ source_hash: u64,
+ is_used: bool,
+ counters: IndexVec<CounterValueReference, Option<CodeRegion>>,
+ expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>,
+ unreachable_regions: Vec<CodeRegion>,
+}
+
+impl<'tcx> FunctionCoverage<'tcx> {
+ /// Creates a new set of coverage data for a used (called) function.
+ pub fn new(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ Self::create(tcx, instance, true)
+ }
+
+ /// Creates a new set of coverage data for an unused (never called) function.
+ pub fn unused(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> Self {
+ Self::create(tcx, instance, false)
+ }
+
+ fn create(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, is_used: bool) -> Self {
+ let coverageinfo = tcx.coverageinfo(instance.def);
+ debug!(
+ "FunctionCoverage::create(instance={:?}) has coverageinfo={:?}. is_used={}",
+ instance, coverageinfo, is_used
+ );
+ Self {
+ instance,
+ source_hash: 0, // will be set with the first `add_counter()`
+ is_used,
+ counters: IndexVec::from_elem_n(None, coverageinfo.num_counters as usize),
+ expressions: IndexVec::from_elem_n(None, coverageinfo.num_expressions as usize),
+ unreachable_regions: Vec::new(),
+ }
+ }
+
+ /// Returns true for a used (called) function, and false for an unused function.
+ pub fn is_used(&self) -> bool {
+ self.is_used
+ }
+
+ /// Sets the function source hash value. If called multiple times for the same function, all
+ /// calls should have the same hash value.
+ pub fn set_function_source_hash(&mut self, source_hash: u64) {
+ if self.source_hash == 0 {
+ self.source_hash = source_hash;
+ } else {
+ debug_assert_eq!(source_hash, self.source_hash);
+ }
+ }
+
+ /// Adds a code region to be counted by an injected counter intrinsic.
+ pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) {
+ if let Some(previous_region) = self.counters[id].replace(region.clone()) {
+ assert_eq!(previous_region, region, "add_counter: code region for id changed");
+ }
+ }
+
+ /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other
+ /// expressions. Expression IDs start from `u32::MAX` and go down, so the range of expression
+ /// IDs will not overlap with the range of counter IDs. Counters and expressions can be added in
+ /// any order, and expressions can still be assigned contiguous (though descending) IDs, without
+ /// knowing what the last counter ID will be.
+ ///
+ /// When storing the expression data in the `expressions` vector in the `FunctionCoverage`
+ /// struct, its vector index is computed, from the given expression ID, by subtracting from
+ /// `u32::MAX`.
+ ///
+ /// Since the expression operands (`lhs` and `rhs`) can reference either counters or
+ /// expressions, an operand that references an expression also uses its original ID, descending
+ /// from `u32::MAX`. Theses operands are translated only during code generation, after all
+ /// counters and expressions have been added.
+ pub fn add_counter_expression(
+ &mut self,
+ expression_id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+ ) {
+ debug!(
+ "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}",
+ expression_id, lhs, op, rhs, region
+ );
+ let expression_index = self.expression_index(u32::from(expression_id));
+ debug_assert!(
+ expression_index.as_usize() < self.expressions.len(),
+ "expression_index {} is out of range for expressions.len() = {}
+ for {:?}",
+ expression_index.as_usize(),
+ self.expressions.len(),
+ self,
+ );
+ if let Some(previous_expression) = self.expressions[expression_index].replace(Expression {
+ lhs,
+ op,
+ rhs,
+ region: region.clone(),
+ }) {
+ assert_eq!(
+ previous_expression,
+ Expression { lhs, op, rhs, region },
+ "add_counter_expression: expression for id changed"
+ );
+ }
+ }
+
+ /// Add a region that will be marked as "unreachable", with a constant "zero counter".
+ pub fn add_unreachable_region(&mut self, region: CodeRegion) {
+ self.unreachable_regions.push(region)
+ }
+
+ /// Return the source hash, generated from the HIR node structure, and used to indicate whether
+ /// or not the source code structure changed between different compilations.
+ pub fn source_hash(&self) -> u64 {
+ self.source_hash
+ }
+
+ /// Generate an array of CounterExpressions, and an iterator over all `Counter`s and their
+ /// associated `Regions` (from which the LLVM-specific `CoverageMapGenerator` will create
+ /// `CounterMappingRegion`s.
+ pub fn get_expressions_and_counter_regions(
+ &self,
+ ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+ assert!(
+ self.source_hash != 0 || !self.is_used,
+ "No counters provided the source_hash for used function: {:?}",
+ self.instance
+ );
+
+ let counter_regions = self.counter_regions();
+ let (counter_expressions, expression_regions) = self.expressions_with_regions();
+ let unreachable_regions = self.unreachable_regions();
+
+ let counter_regions =
+ counter_regions.chain(expression_regions.into_iter().chain(unreachable_regions));
+ (counter_expressions, counter_regions)
+ }
+
+ fn counter_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+ self.counters.iter_enumerated().filter_map(|(index, entry)| {
+ // Option::map() will return None to filter out missing counters. This may happen
+ // if, for example, a MIR-instrumented counter is removed during an optimization.
+ entry.as_ref().map(|region| (Counter::counter_value_reference(index), region))
+ })
+ }
+
+ fn expressions_with_regions(
+ &self,
+ ) -> (Vec<CounterExpression>, impl Iterator<Item = (Counter, &CodeRegion)>) {
+ let mut counter_expressions = Vec::with_capacity(self.expressions.len());
+ let mut expression_regions = Vec::with_capacity(self.expressions.len());
+ let mut new_indexes = IndexVec::from_elem_n(None, self.expressions.len());
+
+ // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or
+ // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type
+ // and value. Operand ID value `0` maps to `CounterKind::Zero`; values in the known range
+ // of injected LLVM counters map to `CounterKind::CounterValueReference` (and the value
+ // matches the injected counter index); and any other value is converted into a
+ // `CounterKind::Expression` with the expression's `new_index`.
+ //
+ // Expressions will be returned from this function in a sequential vector (array) of
+ // `CounterExpression`, so the expression IDs must be mapped from their original,
+ // potentially sparse set of indexes, originally in reverse order from `u32::MAX`.
+ //
+ // An `Expression` as an operand will have already been encountered as an `Expression` with
+ // operands, so its new_index will already have been generated (as a 1-up index value).
+ // (If an `Expression` as an operand does not have a corresponding new_index, it was
+ // probably optimized out, after the expression was injected into the MIR, so it will
+ // get a `CounterKind::Zero` instead.)
+ //
+ // In other words, an `Expression`s at any given index can include other expressions as
+ // operands, but expression operands can only come from the subset of expressions having
+ // `expression_index`s lower than the referencing `Expression`. Therefore, it is
+ // reasonable to look up the new index of an expression operand while the `new_indexes`
+ // vector is only complete up to the current `ExpressionIndex`.
+ let id_to_counter = |new_indexes: &IndexVec<
+ InjectedExpressionIndex,
+ Option<MappedExpressionIndex>,
+ >,
+ id: ExpressionOperandId| {
+ if id == ExpressionOperandId::ZERO {
+ Some(Counter::zero())
+ } else if id.index() < self.counters.len() {
+ debug_assert!(
+ id.index() > 0,
+ "ExpressionOperandId indexes for counters are 1-based, but this id={}",
+ id.index()
+ );
+ // Note: Some codegen-injected Counters may be only referenced by `Expression`s,
+ // and may not have their own `CodeRegion`s,
+ let index = CounterValueReference::from(id.index());
+ // Note, the conversion to LLVM `Counter` adjusts the index to be zero-based.
+ Some(Counter::counter_value_reference(index))
+ } else {
+ let index = self.expression_index(u32::from(id));
+ self.expressions
+ .get(index)
+ .expect("expression id is out of range")
+ .as_ref()
+ // If an expression was optimized out, assume it would have produced a count
+ // of zero. This ensures that expressions dependent on optimized-out
+ // expressions are still valid.
+ .map_or(Some(Counter::zero()), |_| new_indexes[index].map(Counter::expression))
+ }
+ };
+
+ for (original_index, expression) in
+ self.expressions.iter_enumerated().filter_map(|(original_index, entry)| {
+ // Option::map() will return None to filter out missing expressions. This may happen
+ // if, for example, a MIR-instrumented expression is removed during an optimization.
+ entry.as_ref().map(|expression| (original_index, expression))
+ })
+ {
+ let optional_region = &expression.region;
+ let Expression { lhs, op, rhs, .. } = *expression;
+
+ if let Some(Some((lhs_counter, mut rhs_counter))) = id_to_counter(&new_indexes, lhs)
+ .map(|lhs_counter| {
+ id_to_counter(&new_indexes, rhs).map(|rhs_counter| (lhs_counter, rhs_counter))
+ })
+ {
+ if lhs_counter.is_zero() && op.is_subtract() {
+ // The left side of a subtraction was probably optimized out. As an example,
+ // a branch condition might be evaluated as a constant expression, and the
+ // branch could be removed, dropping unused counters in the process.
+ //
+ // Since counters are unsigned, we must assume the result of the expression
+ // can be no more and no less than zero. An expression known to evaluate to zero
+ // does not need to be added to the coverage map.
+ //
+ // Coverage test `loops_branches.rs` includes multiple variations of branches
+ // based on constant conditional (literal `true` or `false`), and demonstrates
+ // that the expected counts are still correct.
+ debug!(
+ "Expression subtracts from zero (assume unreachable): \
+ original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+ original_index, lhs, op, rhs, optional_region,
+ );
+ rhs_counter = Counter::zero();
+ }
+ debug_assert!(
+ lhs_counter.is_zero()
+ // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
+ || ((lhs_counter.zero_based_id() as usize)
+ <= usize::max(self.counters.len(), self.expressions.len())),
+ "lhs id={} > both counters.len()={} and expressions.len()={}
+ ({:?} {:?} {:?})",
+ lhs_counter.zero_based_id(),
+ self.counters.len(),
+ self.expressions.len(),
+ lhs_counter,
+ op,
+ rhs_counter,
+ );
+
+ debug_assert!(
+ rhs_counter.is_zero()
+ // Note: with `as usize` the ID _could_ overflow/wrap if `usize = u16`
+ || ((rhs_counter.zero_based_id() as usize)
+ <= usize::max(self.counters.len(), self.expressions.len())),
+ "rhs id={} > both counters.len()={} and expressions.len()={}
+ ({:?} {:?} {:?})",
+ rhs_counter.zero_based_id(),
+ self.counters.len(),
+ self.expressions.len(),
+ lhs_counter,
+ op,
+ rhs_counter,
+ );
+
+ // Both operands exist. `Expression` operands exist in `self.expressions` and have
+ // been assigned a `new_index`.
+ let mapped_expression_index =
+ MappedExpressionIndex::from(counter_expressions.len());
+ let expression = CounterExpression::new(
+ lhs_counter,
+ match op {
+ Op::Add => ExprKind::Add,
+ Op::Subtract => ExprKind::Subtract,
+ },
+ rhs_counter,
+ );
+ debug!(
+ "Adding expression {:?} = {:?}, region: {:?}",
+ mapped_expression_index, expression, optional_region
+ );
+ counter_expressions.push(expression);
+ new_indexes[original_index] = Some(mapped_expression_index);
+ if let Some(region) = optional_region {
+ expression_regions.push((Counter::expression(mapped_expression_index), region));
+ }
+ } else {
+ bug!(
+ "expression has one or more missing operands \
+ original_index={:?}, lhs={:?}, op={:?}, rhs={:?}, region={:?}",
+ original_index,
+ lhs,
+ op,
+ rhs,
+ optional_region,
+ );
+ }
+ }
+ (counter_expressions, expression_regions.into_iter())
+ }
+
+ fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> {
+ self.unreachable_regions.iter().map(|region| (Counter::zero(), region))
+ }
+
+ fn expression_index(&self, id_descending_from_max: u32) -> InjectedExpressionIndex {
+ debug_assert!(id_descending_from_max >= self.counters.len() as u32);
+ InjectedExpressionIndex::from(u32::MAX - id_descending_from_max)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs b/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs
new file mode 100644
index 000000000..569fd3f1a
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/coverageinfo/mod.rs
@@ -0,0 +1,2 @@
+pub mod ffi;
+pub mod map;
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs b/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs
new file mode 100644
index 000000000..6e3f4f0b8
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/mod.rs
@@ -0,0 +1,34 @@
+use rustc_middle::ty::{self, layout::TyAndLayout};
+use rustc_target::abi::Size;
+
+// FIXME(eddyb) find a place for this (or a way to replace it).
+pub mod type_names;
+
+/// Returns true if we want to generate a DW_TAG_enumeration_type description for
+/// this instead of a DW_TAG_struct_type with DW_TAG_variant_part.
+///
+/// NOTE: This is somewhat inconsistent right now: For empty enums and enums with a single
+/// fieldless variant, we generate DW_TAG_struct_type, although a
+/// DW_TAG_enumeration_type would be a better fit.
+pub fn wants_c_like_enum_debuginfo<'tcx>(enum_type_and_layout: TyAndLayout<'tcx>) -> bool {
+ match enum_type_and_layout.ty.kind() {
+ ty::Adt(adt_def, _) => {
+ if !adt_def.is_enum() {
+ return false;
+ }
+
+ match adt_def.variants().len() {
+ 0 => false,
+ 1 => {
+ // Univariant enums unless they are zero-sized
+ enum_type_and_layout.size != Size::ZERO && adt_def.all_fields().count() == 0
+ }
+ _ => {
+ // Enums with more than one variant if they have no fields
+ adt_def.all_fields().count() == 0
+ }
+ }
+ }
+ _ => false,
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
new file mode 100644
index 000000000..8cd5a0fc2
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/debuginfo/type_names.rs
@@ -0,0 +1,821 @@
+// Type Names for Debug Info.
+
+// Notes on targeting MSVC:
+// In general, MSVC's debugger attempts to parse all arguments as C++ expressions,
+// even if the argument is explicitly a symbol name.
+// As such, there are many things that cause parsing issues:
+// * `#` is treated as a special character for macros.
+// * `{` or `<` at the beginning of a name is treated as an operator.
+// * `>>` is always treated as a right-shift.
+// * `[` in a name is treated like a regex bracket expression (match any char
+// within the brackets).
+// * `"` is treated as the start of a string.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::DefId;
+use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathData};
+use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Mutability};
+use rustc_middle::ty::layout::{IntegerExt, TyAndLayout};
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, ExistentialProjection, GeneratorSubsts, ParamEnv, Ty, TyCtxt};
+use rustc_target::abi::{Integer, TagEncoding, Variants};
+use smallvec::SmallVec;
+
+use std::borrow::Cow;
+use std::fmt::Write;
+
+use crate::debuginfo::wants_c_like_enum_debuginfo;
+
+// Compute the name of the type as it should be stored in debuginfo. Does not do
+// any caching, i.e., calling the function twice with the same type will also do
+// the work twice. The `qualified` parameter only affects the first level of the
+// type name, further levels (i.e., type parameters) are always fully qualified.
+pub fn compute_debuginfo_type_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ qualified: bool,
+) -> String {
+ let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name");
+
+ let mut result = String::with_capacity(64);
+ let mut visited = FxHashSet::default();
+ push_debuginfo_type_name(tcx, t, qualified, &mut result, &mut visited);
+ result
+}
+
+// Pushes the name of the type as it should be stored in debuginfo on the
+// `output` String. See also compute_debuginfo_type_name().
+fn push_debuginfo_type_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ qualified: bool,
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+) {
+ // When targeting MSVC, emit C++ style type names for compatibility with
+ // .natvis visualizers (and perhaps other existing native debuggers?)
+ let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
+
+ match *t.kind() {
+ ty::Bool => output.push_str("bool"),
+ ty::Char => output.push_str("char"),
+ ty::Str => output.push_str("str"),
+ ty::Never => {
+ if cpp_like_debuginfo {
+ output.push_str("never$");
+ } else {
+ output.push('!');
+ }
+ }
+ ty::Int(int_ty) => output.push_str(int_ty.name_str()),
+ ty::Uint(uint_ty) => output.push_str(uint_ty.name_str()),
+ ty::Float(float_ty) => output.push_str(float_ty.name_str()),
+ ty::Foreign(def_id) => push_item_name(tcx, def_id, qualified, output),
+ ty::Adt(def, substs) => {
+ // `layout_for_cpp_like_fallback` will be `Some` if we want to use the fallback encoding.
+ let layout_for_cpp_like_fallback = if cpp_like_debuginfo && def.is_enum() {
+ match tcx.layout_of(ParamEnv::reveal_all().and(t)) {
+ Ok(layout) => {
+ if !wants_c_like_enum_debuginfo(layout) {
+ Some(layout)
+ } else {
+ // This is a C-like enum so we don't want to use the fallback encoding
+ // for the name.
+ None
+ }
+ }
+ Err(e) => {
+ // Computing the layout can still fail here, e.g. if the target architecture
+ // cannot represent the type. See https://github.com/rust-lang/rust/issues/94961.
+ tcx.sess.fatal(&format!("{}", e));
+ }
+ }
+ } else {
+ // We are not emitting cpp-like debuginfo or this isn't even an enum.
+ None
+ };
+
+ if let Some(ty_and_layout) = layout_for_cpp_like_fallback {
+ msvc_enum_fallback(
+ tcx,
+ ty_and_layout,
+ &|output, visited| {
+ push_item_name(tcx, def.did(), true, output);
+ push_generic_params_internal(tcx, substs, output, visited);
+ },
+ output,
+ visited,
+ );
+ } else {
+ push_item_name(tcx, def.did(), qualified, output);
+ push_generic_params_internal(tcx, substs, output, visited);
+ }
+ }
+ ty::Tuple(component_types) => {
+ if cpp_like_debuginfo {
+ output.push_str("tuple$<");
+ } else {
+ output.push('(');
+ }
+
+ for component_type in component_types {
+ push_debuginfo_type_name(tcx, component_type, true, output, visited);
+ push_arg_separator(cpp_like_debuginfo, output);
+ }
+ if !component_types.is_empty() {
+ pop_arg_separator(output);
+ }
+
+ if cpp_like_debuginfo {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ } else {
+ output.push(')');
+ }
+ }
+ ty::RawPtr(ty::TypeAndMut { ty: inner_type, mutbl }) => {
+ if cpp_like_debuginfo {
+ match mutbl {
+ Mutability::Not => output.push_str("ptr_const$<"),
+ Mutability::Mut => output.push_str("ptr_mut$<"),
+ }
+ } else {
+ output.push('*');
+ match mutbl {
+ Mutability::Not => output.push_str("const "),
+ Mutability::Mut => output.push_str("mut "),
+ }
+ }
+
+ push_debuginfo_type_name(tcx, inner_type, qualified, output, visited);
+
+ if cpp_like_debuginfo {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ }
+ }
+ ty::Ref(_, inner_type, mutbl) => {
+ // Slices and `&str` are treated like C++ pointers when computing debug
+ // info for MSVC debugger. However, wrapping these types' names in a synthetic type
+ // causes the .natvis engine for WinDbg to fail to display their data, so we opt these
+ // types out to aid debugging in MSVC.
+ let is_slice_or_str = matches!(*inner_type.kind(), ty::Slice(_) | ty::Str);
+
+ if !cpp_like_debuginfo {
+ output.push('&');
+ output.push_str(mutbl.prefix_str());
+ } else if !is_slice_or_str {
+ match mutbl {
+ Mutability::Not => output.push_str("ref$<"),
+ Mutability::Mut => output.push_str("ref_mut$<"),
+ }
+ }
+
+ push_debuginfo_type_name(tcx, inner_type, qualified, output, visited);
+
+ if cpp_like_debuginfo && !is_slice_or_str {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ }
+ }
+ ty::Array(inner_type, len) => {
+ if cpp_like_debuginfo {
+ output.push_str("array$<");
+ push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+ match len.kind() {
+ ty::ConstKind::Param(param) => write!(output, ",{}>", param.name).unwrap(),
+ _ => write!(output, ",{}>", len.eval_usize(tcx, ty::ParamEnv::reveal_all()))
+ .unwrap(),
+ }
+ } else {
+ output.push('[');
+ push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+ match len.kind() {
+ ty::ConstKind::Param(param) => write!(output, "; {}]", param.name).unwrap(),
+ _ => write!(output, "; {}]", len.eval_usize(tcx, ty::ParamEnv::reveal_all()))
+ .unwrap(),
+ }
+ }
+ }
+ ty::Slice(inner_type) => {
+ if cpp_like_debuginfo {
+ output.push_str("slice$<");
+ } else {
+ output.push('[');
+ }
+
+ push_debuginfo_type_name(tcx, inner_type, true, output, visited);
+
+ if cpp_like_debuginfo {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ } else {
+ output.push(']');
+ }
+ }
+ ty::Dynamic(ref trait_data, ..) => {
+ let auto_traits: SmallVec<[DefId; 4]> = trait_data.auto_traits().collect();
+
+ let has_enclosing_parens = if cpp_like_debuginfo {
+ output.push_str("dyn$<");
+ false
+ } else {
+ if trait_data.len() > 1 && auto_traits.len() != 0 {
+ // We need enclosing parens because there is more than one trait
+ output.push_str("(dyn ");
+ true
+ } else {
+ output.push_str("dyn ");
+ false
+ }
+ };
+
+ if let Some(principal) = trait_data.principal() {
+ let principal =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), principal);
+ push_item_name(tcx, principal.def_id, qualified, output);
+ let principal_has_generic_params =
+ push_generic_params_internal(tcx, principal.substs, output, visited);
+
+ let projection_bounds: SmallVec<[_; 4]> = trait_data
+ .projection_bounds()
+ .map(|bound| {
+ let ExistentialProjection { item_def_id, term, .. } =
+ tcx.erase_late_bound_regions(bound);
+ // FIXME(associated_const_equality): allow for consts here
+ (item_def_id, term.ty().unwrap())
+ })
+ .collect();
+
+ if projection_bounds.len() != 0 {
+ if principal_has_generic_params {
+ // push_generic_params_internal() above added a `>` but we actually
+ // want to add more items to that list, so remove that again...
+ pop_close_angle_bracket(output);
+ // .. and add a comma to separate the regular generic args from the
+ // associated types.
+ push_arg_separator(cpp_like_debuginfo, output);
+ } else {
+ // push_generic_params_internal() did not add `<...>`, so we open
+ // angle brackets here.
+ output.push('<');
+ }
+
+ for (item_def_id, ty) in projection_bounds {
+ if cpp_like_debuginfo {
+ output.push_str("assoc$<");
+ push_item_name(tcx, item_def_id, false, output);
+ push_arg_separator(cpp_like_debuginfo, output);
+ push_debuginfo_type_name(tcx, ty, true, output, visited);
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ } else {
+ push_item_name(tcx, item_def_id, false, output);
+ output.push('=');
+ push_debuginfo_type_name(tcx, ty, true, output, visited);
+ }
+ push_arg_separator(cpp_like_debuginfo, output);
+ }
+
+ pop_arg_separator(output);
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ }
+
+ if auto_traits.len() != 0 {
+ push_auto_trait_separator(cpp_like_debuginfo, output);
+ }
+ }
+
+ if auto_traits.len() != 0 {
+ let mut auto_traits: SmallVec<[String; 4]> = auto_traits
+ .into_iter()
+ .map(|def_id| {
+ let mut name = String::with_capacity(20);
+ push_item_name(tcx, def_id, true, &mut name);
+ name
+ })
+ .collect();
+ auto_traits.sort_unstable();
+
+ for auto_trait in auto_traits {
+ output.push_str(&auto_trait);
+ push_auto_trait_separator(cpp_like_debuginfo, output);
+ }
+
+ pop_auto_trait_separator(output);
+ }
+
+ if cpp_like_debuginfo {
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+ } else if has_enclosing_parens {
+ output.push(')');
+ }
+ }
+ ty::FnDef(..) | ty::FnPtr(_) => {
+ // We've encountered a weird 'recursive type'
+ // Currently, the only way to generate such a type
+ // is by using 'impl trait':
+ //
+ // fn foo() -> impl Copy { foo }
+ //
+ // There's not really a sensible name we can generate,
+ // since we don't include 'impl trait' types (e.g. ty::Opaque)
+ // in the output
+ //
+ // Since we need to generate *something*, we just
+ // use a dummy string that should make it clear
+ // that something unusual is going on
+ if !visited.insert(t) {
+ output.push_str(if cpp_like_debuginfo {
+ "recursive_type$"
+ } else {
+ "<recursive_type>"
+ });
+ return;
+ }
+
+ let sig =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), t.fn_sig(tcx));
+
+ if cpp_like_debuginfo {
+ // Format as a C++ function pointer: return_type (*)(params...)
+ if sig.output().is_unit() {
+ output.push_str("void");
+ } else {
+ push_debuginfo_type_name(tcx, sig.output(), true, output, visited);
+ }
+ output.push_str(" (*)(");
+ } else {
+ output.push_str(sig.unsafety.prefix_str());
+
+ if sig.abi != rustc_target::spec::abi::Abi::Rust {
+ output.push_str("extern \"");
+ output.push_str(sig.abi.name());
+ output.push_str("\" ");
+ }
+
+ output.push_str("fn(");
+ }
+
+ if !sig.inputs().is_empty() {
+ for &parameter_type in sig.inputs() {
+ push_debuginfo_type_name(tcx, parameter_type, true, output, visited);
+ push_arg_separator(cpp_like_debuginfo, output);
+ }
+ pop_arg_separator(output);
+ }
+
+ if sig.c_variadic {
+ if !sig.inputs().is_empty() {
+ output.push_str(", ...");
+ } else {
+ output.push_str("...");
+ }
+ }
+
+ output.push(')');
+
+ if !cpp_like_debuginfo && !sig.output().is_unit() {
+ output.push_str(" -> ");
+ push_debuginfo_type_name(tcx, sig.output(), true, output, visited);
+ }
+
+ // We only keep the type in 'visited'
+ // for the duration of the body of this method.
+ // It's fine for a particular function type
+ // to show up multiple times in one overall type
+ // (e.g. MyType<fn() -> u8, fn() -> u8>
+ //
+ // We only care about avoiding recursing
+ // directly back to the type we're currently
+ // processing
+ visited.remove(&t);
+ }
+ ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
+ // Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
+ // "{async_fn_env#0}<T1, T2, ...>", etc.
+ // In the case of cpp-like debuginfo, the name additionally gets wrapped inside of
+ // an artificial `enum$<>` type, as defined in msvc_enum_fallback().
+ if cpp_like_debuginfo && t.is_generator() {
+ let ty_and_layout = tcx.layout_of(ParamEnv::reveal_all().and(t)).unwrap();
+ msvc_enum_fallback(
+ tcx,
+ ty_and_layout,
+ &|output, visited| {
+ push_closure_or_generator_name(tcx, def_id, substs, true, output, visited);
+ },
+ output,
+ visited,
+ );
+ } else {
+ push_closure_or_generator_name(tcx, def_id, substs, qualified, output, visited);
+ }
+ }
+ // Type parameters from polymorphized functions.
+ ty::Param(_) => {
+ write!(output, "{:?}", t).unwrap();
+ }
+ ty::Error(_)
+ | ty::Infer(_)
+ | ty::Placeholder(..)
+ | ty::Projection(..)
+ | ty::Bound(..)
+ | ty::Opaque(..)
+ | ty::GeneratorWitness(..) => {
+ bug!(
+ "debuginfo: Trying to create type name for \
+ unexpected type: {:?}",
+ t
+ );
+ }
+ }
+
+ /// MSVC names enums differently than other platforms so that the debugging visualization
+ // format (natvis) is able to understand enums and render the active variant correctly in the
+ // debugger. For more information, look in `src/etc/natvis/intrinsic.natvis` and
+ // `EnumMemberDescriptionFactor::create_member_descriptions`.
+ fn msvc_enum_fallback<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty_and_layout: TyAndLayout<'tcx>,
+ push_inner: &dyn Fn(/*output*/ &mut String, /*visited*/ &mut FxHashSet<Ty<'tcx>>),
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+ ) {
+ debug_assert!(!wants_c_like_enum_debuginfo(ty_and_layout));
+ let ty = ty_and_layout.ty;
+
+ output.push_str("enum$<");
+ push_inner(output, visited);
+
+ let variant_name = |variant_index| match ty.kind() {
+ ty::Adt(adt_def, _) => {
+ debug_assert!(adt_def.is_enum());
+ Cow::from(adt_def.variant(variant_index).name.as_str())
+ }
+ ty::Generator(..) => GeneratorSubsts::variant_name(variant_index),
+ _ => unreachable!(),
+ };
+
+ if let Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ tag,
+ variants,
+ ..
+ } = &ty_and_layout.variants
+ {
+ let dataful_variant_layout = &variants[*dataful_variant];
+
+ // calculate the range of values for the dataful variant
+ let dataful_discriminant_range =
+ dataful_variant_layout.largest_niche().unwrap().valid_range;
+
+ let min = dataful_discriminant_range.start;
+ let min = tag.size(&tcx).truncate(min);
+
+ let max = dataful_discriminant_range.end;
+ let max = tag.size(&tcx).truncate(max);
+
+ let dataful_variant_name = variant_name(*dataful_variant);
+ write!(output, ", {}, {}, {}", min, max, dataful_variant_name).unwrap();
+ } else if let Variants::Single { index: variant_idx } = &ty_and_layout.variants {
+ // Uninhabited enums can't be constructed and should never need to be visualized so
+ // skip this step for them.
+ if !ty_and_layout.abi.is_uninhabited() {
+ write!(output, ", {}", variant_name(*variant_idx)).unwrap();
+ }
+ }
+ push_close_angle_bracket(true, output);
+ }
+
+ const NON_CPP_AUTO_TRAIT_SEPARATOR: &str = " + ";
+
+ fn push_auto_trait_separator(cpp_like_debuginfo: bool, output: &mut String) {
+ if cpp_like_debuginfo {
+ push_arg_separator(cpp_like_debuginfo, output);
+ } else {
+ output.push_str(NON_CPP_AUTO_TRAIT_SEPARATOR);
+ }
+ }
+
+ fn pop_auto_trait_separator(output: &mut String) {
+ if output.ends_with(NON_CPP_AUTO_TRAIT_SEPARATOR) {
+ output.truncate(output.len() - NON_CPP_AUTO_TRAIT_SEPARATOR.len());
+ } else {
+ pop_arg_separator(output);
+ }
+ }
+}
+
+pub enum VTableNameKind {
+ // Is the name for the const/static holding the vtable?
+ GlobalVariable,
+ // Is the name for the type of the vtable?
+ Type,
+}
+
+/// Computes a name for the global variable storing a vtable (or the type of that global variable).
+///
+/// The name is of the form:
+///
+/// `<path::to::SomeType as path::to::SomeTrait>::{vtable}`
+///
+/// or, when generating C++-like names:
+///
+/// `impl$<path::to::SomeType, path::to::SomeTrait>::vtable$`
+///
+/// If `kind` is `VTableNameKind::Type` then the last component is `{vtable_ty}` instead of just
+/// `{vtable}`, so that the type and the corresponding global variable get assigned different
+/// names.
+pub fn compute_debuginfo_vtable_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ kind: VTableNameKind,
+) -> String {
+ let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
+
+ let mut vtable_name = String::with_capacity(64);
+
+ if cpp_like_debuginfo {
+ vtable_name.push_str("impl$<");
+ } else {
+ vtable_name.push('<');
+ }
+
+ let mut visited = FxHashSet::default();
+ push_debuginfo_type_name(tcx, t, true, &mut vtable_name, &mut visited);
+
+ if cpp_like_debuginfo {
+ vtable_name.push_str(", ");
+ } else {
+ vtable_name.push_str(" as ");
+ }
+
+ if let Some(trait_ref) = trait_ref {
+ let trait_ref =
+ tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), trait_ref);
+ push_item_name(tcx, trait_ref.def_id, true, &mut vtable_name);
+ visited.clear();
+ push_generic_params_internal(tcx, trait_ref.substs, &mut vtable_name, &mut visited);
+ } else {
+ vtable_name.push_str("_");
+ }
+
+ push_close_angle_bracket(cpp_like_debuginfo, &mut vtable_name);
+
+ let suffix = match (cpp_like_debuginfo, kind) {
+ (true, VTableNameKind::GlobalVariable) => "::vtable$",
+ (false, VTableNameKind::GlobalVariable) => "::{vtable}",
+ (true, VTableNameKind::Type) => "::vtable_type$",
+ (false, VTableNameKind::Type) => "::{vtable_type}",
+ };
+
+ vtable_name.reserve_exact(suffix.len());
+ vtable_name.push_str(suffix);
+
+ vtable_name
+}
+
+pub fn push_item_name(tcx: TyCtxt<'_>, def_id: DefId, qualified: bool, output: &mut String) {
+ let def_key = tcx.def_key(def_id);
+ if qualified {
+ if let Some(parent) = def_key.parent {
+ push_item_name(tcx, DefId { krate: def_id.krate, index: parent }, true, output);
+ output.push_str("::");
+ }
+ }
+
+ push_unqualified_item_name(tcx, def_id, def_key.disambiguated_data, output);
+}
+
+fn generator_kind_label(generator_kind: Option<GeneratorKind>) -> &'static str {
+ match generator_kind {
+ Some(GeneratorKind::Async(AsyncGeneratorKind::Block)) => "async_block",
+ Some(GeneratorKind::Async(AsyncGeneratorKind::Closure)) => "async_closure",
+ Some(GeneratorKind::Async(AsyncGeneratorKind::Fn)) => "async_fn",
+ Some(GeneratorKind::Gen) => "generator",
+ None => "closure",
+ }
+}
+
+fn push_disambiguated_special_name(
+ label: &str,
+ disambiguator: u32,
+ cpp_like_debuginfo: bool,
+ output: &mut String,
+) {
+ if cpp_like_debuginfo {
+ write!(output, "{}${}", label, disambiguator).unwrap();
+ } else {
+ write!(output, "{{{}#{}}}", label, disambiguator).unwrap();
+ }
+}
+
+fn push_unqualified_item_name(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ disambiguated_data: DisambiguatedDefPathData,
+ output: &mut String,
+) {
+ match disambiguated_data.data {
+ DefPathData::CrateRoot => {
+ output.push_str(tcx.crate_name(def_id.krate).as_str());
+ }
+ DefPathData::ClosureExpr => {
+ let label = generator_kind_label(tcx.generator_kind(def_id));
+
+ push_disambiguated_special_name(
+ label,
+ disambiguated_data.disambiguator,
+ cpp_like_debuginfo(tcx),
+ output,
+ );
+ }
+ _ => match disambiguated_data.data.name() {
+ DefPathDataName::Named(name) => {
+ output.push_str(name.as_str());
+ }
+ DefPathDataName::Anon { namespace } => {
+ push_disambiguated_special_name(
+ namespace.as_str(),
+ disambiguated_data.disambiguator,
+ cpp_like_debuginfo(tcx),
+ output,
+ );
+ }
+ },
+ };
+}
+
+fn push_generic_params_internal<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+) -> bool {
+ if substs.non_erasable_generics().next().is_none() {
+ return false;
+ }
+
+ debug_assert_eq!(substs, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substs));
+
+ let cpp_like_debuginfo = cpp_like_debuginfo(tcx);
+
+ output.push('<');
+
+ for type_parameter in substs.non_erasable_generics() {
+ match type_parameter {
+ GenericArgKind::Type(type_parameter) => {
+ push_debuginfo_type_name(tcx, type_parameter, true, output, visited);
+ }
+ GenericArgKind::Const(ct) => {
+ push_const_param(tcx, ct, output);
+ }
+ other => bug!("Unexpected non-erasable generic: {:?}", other),
+ }
+
+ push_arg_separator(cpp_like_debuginfo, output);
+ }
+ pop_arg_separator(output);
+ push_close_angle_bracket(cpp_like_debuginfo, output);
+
+ true
+}
+
+fn push_const_param<'tcx>(tcx: TyCtxt<'tcx>, ct: ty::Const<'tcx>, output: &mut String) {
+ match ct.kind() {
+ ty::ConstKind::Param(param) => {
+ write!(output, "{}", param.name)
+ }
+ _ => match ct.ty().kind() {
+ ty::Int(ity) => {
+ let bits = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
+ let val = Integer::from_int_ty(&tcx, *ity).size().sign_extend(bits) as i128;
+ write!(output, "{}", val)
+ }
+ ty::Uint(_) => {
+ let val = ct.eval_bits(tcx, ty::ParamEnv::reveal_all(), ct.ty());
+ write!(output, "{}", val)
+ }
+ ty::Bool => {
+ let val = ct.try_eval_bool(tcx, ty::ParamEnv::reveal_all()).unwrap();
+ write!(output, "{}", val)
+ }
+ _ => {
+ // If we cannot evaluate the constant to a known type, we fall back
+ // to emitting a stable hash value of the constant. This isn't very pretty
+ // but we get a deterministic, virtually unique value for the constant.
+ //
+ // Let's only emit 64 bits of the hash value. That should be plenty for
+ // avoiding collisions and will make the emitted type names shorter.
+ let hash_short = tcx.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+ let ct = ct.eval(tcx, ty::ParamEnv::reveal_all());
+ hcx.while_hashing_spans(false, |hcx| {
+ ct.to_valtree().hash_stable(hcx, &mut hasher)
+ });
+ // Note: Don't use `StableHashResult` impl of `u64` here directly, since that
+ // would lead to endianness problems.
+ let hash: u128 = hasher.finish();
+ (hash.to_le() as u64).to_le()
+ });
+
+ if cpp_like_debuginfo(tcx) {
+ write!(output, "CONST${:x}", hash_short)
+ } else {
+ write!(output, "{{CONST#{:x}}}", hash_short)
+ }
+ }
+ },
+ }
+ .unwrap();
+}
+
+pub fn push_generic_params<'tcx>(tcx: TyCtxt<'tcx>, substs: SubstsRef<'tcx>, output: &mut String) {
+ let _prof = tcx.prof.generic_activity("compute_debuginfo_type_name");
+ let mut visited = FxHashSet::default();
+ push_generic_params_internal(tcx, substs, output, &mut visited);
+}
+
+fn push_closure_or_generator_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ qualified: bool,
+ output: &mut String,
+ visited: &mut FxHashSet<Ty<'tcx>>,
+) {
+ // Name will be "{closure_env#0}<T1, T2, ...>", "{generator_env#0}<T1, T2, ...>", or
+ // "{async_fn_env#0}<T1, T2, ...>", etc.
+ let def_key = tcx.def_key(def_id);
+ let generator_kind = tcx.generator_kind(def_id);
+
+ if qualified {
+ let parent_def_id = DefId { index: def_key.parent.unwrap(), ..def_id };
+ push_item_name(tcx, parent_def_id, true, output);
+ output.push_str("::");
+ }
+
+ let mut label = String::with_capacity(20);
+ write!(&mut label, "{}_env", generator_kind_label(generator_kind)).unwrap();
+
+ push_disambiguated_special_name(
+ &label,
+ def_key.disambiguated_data.disambiguator,
+ cpp_like_debuginfo(tcx),
+ output,
+ );
+
+ // We also need to add the generic arguments of the async fn/generator or
+ // the enclosing function (for closures or async blocks), so that we end
+ // up with a unique name for every instantiation.
+
+ // Find the generics of the enclosing function, as defined in the source code.
+ let enclosing_fn_def_id = tcx.typeck_root_def_id(def_id);
+ let generics = tcx.generics_of(enclosing_fn_def_id);
+
+ // Truncate the substs to the length of the above generics. This will cut off
+ // anything closure- or generator-specific.
+ let substs = substs.truncate_to(tcx, generics);
+ push_generic_params_internal(tcx, substs, output, visited);
+}
+
+fn push_close_angle_bracket(cpp_like_debuginfo: bool, output: &mut String) {
+ // MSVC debugger always treats `>>` as a shift, even when parsing templates,
+ // so add a space to avoid confusion.
+ if cpp_like_debuginfo && output.ends_with('>') {
+ output.push(' ')
+ };
+
+ output.push('>');
+}
+
+fn pop_close_angle_bracket(output: &mut String) {
+ assert!(output.ends_with('>'), "'output' does not end with '>': {}", output);
+ output.pop();
+ if output.ends_with(' ') {
+ output.pop();
+ }
+}
+
+fn push_arg_separator(cpp_like_debuginfo: bool, output: &mut String) {
+ // Natvis does not always like having spaces between parts of the type name
+ // and this causes issues when we need to write a typename in natvis, for example
+ // as part of a cast like the `HashMap` visualizer does.
+ if cpp_like_debuginfo {
+ output.push(',');
+ } else {
+ output.push_str(", ");
+ };
+}
+
+fn pop_arg_separator(output: &mut String) {
+ if output.ends_with(' ') {
+ output.pop();
+ }
+
+ assert!(output.ends_with(','));
+
+ output.pop();
+}
+
+/// Check if we should generate C++ like names and debug information.
+pub fn cpp_like_debuginfo(tcx: TyCtxt<'_>) -> bool {
+ tcx.sess.target.is_like_msvc
+}
diff --git a/compiler/rustc_codegen_ssa/src/glue.rs b/compiler/rustc_codegen_ssa/src/glue.rs
new file mode 100644
index 000000000..e6f402ef1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/glue.rs
@@ -0,0 +1,123 @@
+//!
+//
+// Code relating to drop glue.
+
+use crate::common::IntPredicate;
+use crate::meth;
+use crate::traits::*;
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::WrappingRange;
+
+pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ t: Ty<'tcx>,
+ info: Option<Bx::Value>,
+) -> (Bx::Value, Bx::Value) {
+ let layout = bx.layout_of(t);
+ debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}", t, info, layout);
+ if !layout.is_unsized() {
+ let size = bx.const_usize(layout.size.bytes());
+ let align = bx.const_usize(layout.align.abi.bytes());
+ return (size, align);
+ }
+ match t.kind() {
+ ty::Dynamic(..) => {
+ // Load size/align from vtable.
+ let vtable = info.unwrap();
+ let size = meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_SIZE)
+ .get_usize(bx, vtable);
+ let align = meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_ALIGN)
+ .get_usize(bx, vtable);
+
+ // Alignment is always nonzero.
+ bx.range_metadata(align, WrappingRange { start: 1, end: !0 });
+
+ (size, align)
+ }
+ ty::Slice(_) | ty::Str => {
+ let unit = layout.field(bx, 0);
+ // The info in this case is the length of the str, so the size is that
+ // times the unit size.
+ (
+ // All slice sizes must fit into `isize`, so this multiplication cannot (signed) wrap.
+ // NOTE: ideally, we want the effects of both `unchecked_smul` and `unchecked_umul`
+ // (resulting in `mul nsw nuw` in LLVM IR), since we know that the multiplication
+ // cannot signed wrap, and that both operands are non-negative. But at the time of writing,
+ // `BuilderMethods` can't do this, and it doesn't seem to enable any further optimizations.
+ bx.unchecked_smul(info.unwrap(), bx.const_usize(unit.size.bytes())),
+ bx.const_usize(unit.align.abi.bytes()),
+ )
+ }
+ _ => {
+ // First get the size of all statically known fields.
+ // Don't use size_of because it also rounds up to alignment, which we
+ // want to avoid, as the unsized field's alignment could be smaller.
+ assert!(!t.is_simd());
+ debug!("DST {} layout: {:?}", t, layout);
+
+ let i = layout.fields.count() - 1;
+ let sized_size = layout.fields.offset(i).bytes();
+ let sized_align = layout.align.abi.bytes();
+ debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align);
+ let sized_size = bx.const_usize(sized_size);
+ let sized_align = bx.const_usize(sized_align);
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field).
+ let field_ty = layout.field(bx, i).ty;
+ let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
+
+ // FIXME (#26403, #27023): We should be adding padding
+ // to `sized_size` (to accommodate the `unsized_align`
+ // required of the unsized field that follows) before
+ // summing it with `sized_size`. (Note that since #26403
+ // is unfixed, we do not yet add the necessary padding
+ // here. But this is where the add would go.)
+
+ // Return the sum of sizes and max of aligns.
+ let size = bx.add(sized_size, unsized_size);
+
+ // Packed types ignore the alignment of their fields.
+ if let ty::Adt(def, _) = t.kind() {
+ if def.repr().packed() {
+ unsized_align = sized_align;
+ }
+ }
+
+ // Choose max of two known alignments (combined value must
+ // be aligned according to more restrictive of the two).
+ let align = match (
+ bx.const_to_opt_u128(sized_align, false),
+ bx.const_to_opt_u128(unsized_align, false),
+ ) {
+ (Some(sized_align), Some(unsized_align)) => {
+ // If both alignments are constant, (the sized_align should always be), then
+ // pick the correct alignment statically.
+ bx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
+ }
+ _ => {
+ let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align);
+ bx.select(cmp, sized_align, unsized_align)
+ }
+ };
+
+ // Issue #27023: must add any necessary padding to `size`
+ // (to make it a multiple of `align`) before returning it.
+ //
+ // Namely, the returned size should be, in C notation:
+ //
+ // `size + ((size & (align-1)) ? align : 0)`
+ //
+ // emulated via the semi-standard fast bit trick:
+ //
+ // `(size + (align-1)) & -align`
+ let one = bx.const_usize(1);
+ let addend = bx.sub(align, one);
+ let add = bx.add(size, addend);
+ let neg = bx.neg(align);
+ let size = bx.and(add, neg);
+
+ (size, align)
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs
new file mode 100644
index 000000000..1802eedf1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/lib.rs
@@ -0,0 +1,244 @@
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(box_patterns)]
+#![feature(try_blocks)]
+#![feature(let_else)]
+#![feature(once_cell)]
+#![feature(associated_type_bounds)]
+#![feature(strict_provenance)]
+#![feature(int_roundings)]
+#![feature(if_let_guard)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+//! This crate contains codegen code that is used by all codegen backends (LLVM and others).
+//! The backend-agnostic functions of this crate use functions defined in various traits that
+//! have to be implemented by each backends.
+
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+use rustc_ast as ast;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::Lrc;
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::LangItem;
+use rustc_middle::dep_graph::WorkProduct;
+use rustc_middle::middle::dependency_format::Dependencies;
+use rustc_middle::middle::exported_symbols::SymbolExportKind;
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_serialize::opaque::{MemDecoder, MemEncoder};
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_session::config::{CrateType, OutputFilenames, OutputType, RUST_CGU_EXT};
+use rustc_session::cstore::{self, CrateSource};
+use rustc_session::utils::NativeLibKind;
+use rustc_span::symbol::Symbol;
+use rustc_span::DebuggerVisualizerFile;
+use std::collections::BTreeSet;
+use std::path::{Path, PathBuf};
+
+pub mod back;
+pub mod base;
+pub mod common;
+pub mod coverageinfo;
+pub mod debuginfo;
+pub mod glue;
+pub mod meth;
+pub mod mir;
+pub mod mono_item;
+pub mod target_features;
+pub mod traits;
+
+pub struct ModuleCodegen<M> {
+ /// The name of the module. When the crate may be saved between
+ /// compilations, incremental compilation requires that name be
+ /// unique amongst **all** crates. Therefore, it should contain
+ /// something unique to this crate (e.g., a module path) as well
+ /// as the crate name and disambiguator.
+ /// We currently generate these names via CodegenUnit::build_cgu_name().
+ pub name: String,
+ pub module_llvm: M,
+ pub kind: ModuleKind,
+}
+
+impl<M> ModuleCodegen<M> {
+ pub fn into_compiled_module(
+ self,
+ emit_obj: bool,
+ emit_dwarf_obj: bool,
+ emit_bc: bool,
+ outputs: &OutputFilenames,
+ ) -> CompiledModule {
+ let object = emit_obj.then(|| outputs.temp_path(OutputType::Object, Some(&self.name)));
+ let dwarf_object = emit_dwarf_obj.then(|| outputs.temp_path_dwo(Some(&self.name)));
+ let bytecode = emit_bc.then(|| outputs.temp_path(OutputType::Bitcode, Some(&self.name)));
+
+ CompiledModule { name: self.name.clone(), kind: self.kind, object, dwarf_object, bytecode }
+ }
+}
+
+#[derive(Debug, Encodable, Decodable)]
+pub struct CompiledModule {
+ pub name: String,
+ pub kind: ModuleKind,
+ pub object: Option<PathBuf>,
+ pub dwarf_object: Option<PathBuf>,
+ pub bytecode: Option<PathBuf>,
+}
+
+pub struct CachedModuleCodegen {
+ pub name: String,
+ pub source: WorkProduct,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Encodable, Decodable)]
+pub enum ModuleKind {
+ Regular,
+ Metadata,
+ Allocator,
+}
+
+bitflags::bitflags! {
+ pub struct MemFlags: u8 {
+ const VOLATILE = 1 << 0;
+ const NONTEMPORAL = 1 << 1;
+ const UNALIGNED = 1 << 2;
+ }
+}
+
+#[derive(Clone, Debug, Encodable, Decodable, HashStable)]
+pub struct NativeLib {
+ pub kind: NativeLibKind,
+ pub name: Option<Symbol>,
+ pub cfg: Option<ast::MetaItem>,
+ pub verbatim: Option<bool>,
+ pub dll_imports: Vec<cstore::DllImport>,
+}
+
+impl From<&cstore::NativeLib> for NativeLib {
+ fn from(lib: &cstore::NativeLib) -> Self {
+ NativeLib {
+ kind: lib.kind,
+ name: lib.name,
+ cfg: lib.cfg.clone(),
+ verbatim: lib.verbatim,
+ dll_imports: lib.dll_imports.clone(),
+ }
+ }
+}
+
+/// Misc info we load from metadata to persist beyond the tcx.
+///
+/// Note: though `CrateNum` is only meaningful within the same tcx, information within `CrateInfo`
+/// is self-contained. `CrateNum` can be viewed as a unique identifier within a `CrateInfo`, where
+/// `used_crate_source` contains all `CrateSource` of the dependents, and maintains a mapping from
+/// identifiers (`CrateNum`) to `CrateSource`. The other fields map `CrateNum` to the crate's own
+/// additional properties, so that effectively we can retrieve each dependent crate's `CrateSource`
+/// and the corresponding properties without referencing information outside of a `CrateInfo`.
+#[derive(Debug, Encodable, Decodable)]
+pub struct CrateInfo {
+ pub target_cpu: String,
+ pub exported_symbols: FxHashMap<CrateType, Vec<String>>,
+ pub linked_symbols: FxHashMap<CrateType, Vec<(String, SymbolExportKind)>>,
+ pub local_crate_name: Symbol,
+ pub compiler_builtins: Option<CrateNum>,
+ pub profiler_runtime: Option<CrateNum>,
+ pub is_no_builtins: FxHashSet<CrateNum>,
+ pub native_libraries: FxHashMap<CrateNum, Vec<NativeLib>>,
+ pub crate_name: FxHashMap<CrateNum, Symbol>,
+ pub used_libraries: Vec<NativeLib>,
+ pub used_crate_source: FxHashMap<CrateNum, Lrc<CrateSource>>,
+ pub used_crates: Vec<CrateNum>,
+ pub lang_item_to_crate: FxHashMap<LangItem, CrateNum>,
+ pub missing_lang_items: FxHashMap<CrateNum, Vec<LangItem>>,
+ pub dependency_formats: Lrc<Dependencies>,
+ pub windows_subsystem: Option<String>,
+ pub natvis_debugger_visualizers: BTreeSet<DebuggerVisualizerFile>,
+}
+
+#[derive(Encodable, Decodable)]
+pub struct CodegenResults {
+ pub modules: Vec<CompiledModule>,
+ pub allocator_module: Option<CompiledModule>,
+ pub metadata_module: Option<CompiledModule>,
+ pub metadata: rustc_metadata::EncodedMetadata,
+ pub crate_info: CrateInfo,
+}
+
+pub fn provide(providers: &mut Providers) {
+ crate::back::symbol_export::provide(providers);
+ crate::base::provide(providers);
+ crate::target_features::provide(providers);
+}
+
+pub fn provide_extern(providers: &mut ExternProviders) {
+ crate::back::symbol_export::provide_extern(providers);
+}
+
+/// Checks if the given filename ends with the `.rcgu.o` extension that `rustc`
+/// uses for the object files it generates.
+pub fn looks_like_rust_object_file(filename: &str) -> bool {
+ let path = Path::new(filename);
+ let ext = path.extension().and_then(|s| s.to_str());
+ if ext != Some(OutputType::Object.extension()) {
+ // The file name does not end with ".o", so it can't be an object file.
+ return false;
+ }
+
+ // Strip the ".o" at the end
+ let ext2 = path.file_stem().and_then(|s| Path::new(s).extension()).and_then(|s| s.to_str());
+
+ // Check if the "inner" extension
+ ext2 == Some(RUST_CGU_EXT)
+}
+
+const RLINK_VERSION: u32 = 1;
+const RLINK_MAGIC: &[u8] = b"rustlink";
+
+const RUSTC_VERSION: Option<&str> = option_env!("CFG_VERSION");
+
+impl CodegenResults {
+ pub fn serialize_rlink(codegen_results: &CodegenResults) -> Vec<u8> {
+ let mut encoder = MemEncoder::new();
+ encoder.emit_raw_bytes(RLINK_MAGIC);
+ // `emit_raw_bytes` is used to make sure that the version representation does not depend on
+ // Encoder's inner representation of `u32`.
+ encoder.emit_raw_bytes(&RLINK_VERSION.to_be_bytes());
+ encoder.emit_str(RUSTC_VERSION.unwrap());
+ Encodable::encode(codegen_results, &mut encoder);
+ encoder.finish()
+ }
+
+ pub fn deserialize_rlink(data: Vec<u8>) -> Result<Self, String> {
+ // The Decodable machinery is not used here because it panics if the input data is invalid
+ // and because its internal representation may change.
+ if !data.starts_with(RLINK_MAGIC) {
+ return Err("The input does not look like a .rlink file".to_string());
+ }
+ let data = &data[RLINK_MAGIC.len()..];
+ if data.len() < 4 {
+ return Err("The input does not contain version number".to_string());
+ }
+
+ let mut version_array: [u8; 4] = Default::default();
+ version_array.copy_from_slice(&data[..4]);
+ if u32::from_be_bytes(version_array) != RLINK_VERSION {
+ return Err(".rlink file was produced with encoding version {version_array}, but the current version is {RLINK_VERSION}".to_string());
+ }
+
+ let mut decoder = MemDecoder::new(&data[4..], 0);
+ let rustc_version = decoder.read_str();
+ let current_version = RUSTC_VERSION.unwrap();
+ if rustc_version != current_version {
+ return Err(format!(
+ ".rlink file was produced by rustc version {rustc_version}, but the current version is {current_version}."
+ ));
+ }
+
+ let codegen_results = CodegenResults::decode(&mut decoder);
+ Ok(codegen_results)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs
new file mode 100644
index 000000000..27d791d90
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/meth.rs
@@ -0,0 +1,116 @@
+use crate::traits::*;
+
+use rustc_middle::ty::{self, subst::GenericArgKind, ExistentialPredicate, Ty, TyCtxt};
+use rustc_session::config::Lto;
+use rustc_symbol_mangling::typeid_for_trait_ref;
+use rustc_target::abi::call::FnAbi;
+
+#[derive(Copy, Clone, Debug)]
+pub struct VirtualIndex(u64);
+
+impl<'a, 'tcx> VirtualIndex {
+ pub fn from_index(index: usize) -> Self {
+ VirtualIndex(index as u64)
+ }
+
+ pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
+ self,
+ bx: &mut Bx,
+ llvtable: Bx::Value,
+ ty: Ty<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ ) -> Bx::Value {
+ // Load the data pointer from the object.
+ debug!("get_fn({llvtable:?}, {ty:?}, {self:?})");
+ let llty = bx.fn_ptr_backend_type(fn_abi);
+ let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
+
+ if bx.cx().sess().opts.unstable_opts.virtual_function_elimination
+ && bx.cx().sess().lto() == Lto::Fat
+ {
+ let typeid =
+ bx.typeid_metadata(typeid_for_trait_ref(bx.tcx(), get_trait_ref(bx.tcx(), ty)));
+ let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes();
+ let type_checked_load = bx.type_checked_load(llvtable, vtable_byte_offset, typeid);
+ let func = bx.extract_value(type_checked_load, 0);
+ bx.pointercast(func, llty)
+ } else {
+ let ptr_align = bx.tcx().data_layout.pointer_align.abi;
+ let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
+ let ptr = bx.load(llty, gep, ptr_align);
+ bx.nonnull_metadata(ptr);
+ // VTable loads are invariant.
+ bx.set_invariant_load(ptr);
+ ptr
+ }
+ }
+
+ pub fn get_usize<Bx: BuilderMethods<'a, 'tcx>>(
+ self,
+ bx: &mut Bx,
+ llvtable: Bx::Value,
+ ) -> Bx::Value {
+ // Load the data pointer from the object.
+ debug!("get_int({:?}, {:?})", llvtable, self);
+
+ let llty = bx.type_isize();
+ let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
+ let usize_align = bx.tcx().data_layout.pointer_align.abi;
+ let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
+ let ptr = bx.load(llty, gep, usize_align);
+ // VTable loads are invariant.
+ bx.set_invariant_load(ptr);
+ ptr
+ }
+}
+
+fn get_trait_ref<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ty::PolyExistentialTraitRef<'tcx> {
+ for arg in ty.peel_refs().walk() {
+ if let GenericArgKind::Type(ty) = arg.unpack() {
+ if let ty::Dynamic(trait_refs, _) = ty.kind() {
+ return trait_refs[0].map_bound(|trait_ref| match trait_ref {
+ ExistentialPredicate::Trait(tr) => tr,
+ ExistentialPredicate::Projection(proj) => proj.trait_ref(tcx),
+ ExistentialPredicate::AutoTrait(_) => {
+ bug!("auto traits don't have functions")
+ }
+ });
+ }
+ }
+ }
+
+ bug!("expected a `dyn Trait` ty, found {ty:?}")
+}
+
+/// Creates a dynamic vtable for the given type and vtable origin.
+/// This is used only for objects.
+///
+/// The vtables are cached instead of created on every call.
+///
+/// The `trait_ref` encodes the erased self type. Hence if we are
+/// making an object `Foo<dyn Trait>` from a value of type `Foo<T>`, then
+/// `trait_ref` would map `T: Trait`.
+pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
+ cx: &Cx,
+ ty: Ty<'tcx>,
+ trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+) -> Cx::Value {
+ let tcx = cx.tcx();
+
+ debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref);
+
+ // Check the cache.
+ if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) {
+ return val;
+ }
+
+ let vtable_alloc_id = tcx.vtable_allocation((ty, trait_ref));
+ let vtable_allocation = tcx.global_alloc(vtable_alloc_id).unwrap_memory();
+ let vtable_const = cx.const_data_from_alloc(vtable_allocation);
+ let align = cx.data_layout().pointer_align.abi;
+ let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
+
+ cx.create_vtable_debuginfo(ty, trait_ref, vtable);
+ cx.vtables().borrow_mut().insert((ty, trait_ref), vtable);
+ vtable
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
new file mode 100644
index 000000000..24da48ead
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs
@@ -0,0 +1,368 @@
+//! An analysis to determine which locals require allocas and
+//! which do not.
+
+use super::FunctionCx;
+use crate::traits::*;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::traversal;
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{self, Location, TerminatorKind};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+
+pub fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ fx: &FunctionCx<'a, 'tcx, Bx>,
+) -> BitSet<mir::Local> {
+ let mir = fx.mir;
+ let dominators = mir.basic_blocks.dominators();
+ let locals = mir
+ .local_decls
+ .iter()
+ .map(|decl| {
+ let ty = fx.monomorphize(decl.ty);
+ let layout = fx.cx.spanned_layout_of(ty, decl.source_info.span);
+ if layout.is_zst() {
+ LocalKind::ZST
+ } else if fx.cx.is_backend_immediate(layout) || fx.cx.is_backend_scalar_pair(layout) {
+ LocalKind::Unused
+ } else {
+ LocalKind::Memory
+ }
+ })
+ .collect();
+
+ let mut analyzer = LocalAnalyzer { fx, dominators, locals };
+
+ // Arguments get assigned to by means of the function being called
+ for arg in mir.args_iter() {
+ analyzer.assign(arg, mir::START_BLOCK.start_location());
+ }
+
+ // If there exists a local definition that dominates all uses of that local,
+ // the definition should be visited first. Traverse blocks in an order that
+ // is a topological sort of dominance partial order.
+ for (bb, data) in traversal::reverse_postorder(&mir) {
+ analyzer.visit_basic_block_data(bb, data);
+ }
+
+ let mut non_ssa_locals = BitSet::new_empty(analyzer.locals.len());
+ for (local, kind) in analyzer.locals.iter_enumerated() {
+ if matches!(kind, LocalKind::Memory) {
+ non_ssa_locals.insert(local);
+ }
+ }
+
+ non_ssa_locals
+}
+
+#[derive(Copy, Clone, PartialEq, Eq)]
+enum LocalKind {
+ ZST,
+ /// A local that requires an alloca.
+ Memory,
+ /// A scalar or a scalar pair local that is neither defined nor used.
+ Unused,
+ /// A scalar or a scalar pair local with a single definition that dominates all uses.
+ SSA(mir::Location),
+}
+
+struct LocalAnalyzer<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+ fx: &'mir FunctionCx<'a, 'tcx, Bx>,
+ dominators: Dominators<mir::BasicBlock>,
+ locals: IndexVec<mir::Local, LocalKind>,
+}
+
+impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
+ fn assign(&mut self, local: mir::Local, location: Location) {
+ let kind = &mut self.locals[local];
+ match *kind {
+ LocalKind::ZST => {}
+ LocalKind::Memory => {}
+ LocalKind::Unused => {
+ *kind = LocalKind::SSA(location);
+ }
+ LocalKind::SSA(_) => {
+ *kind = LocalKind::Memory;
+ }
+ }
+ }
+
+ fn process_place(
+ &mut self,
+ place_ref: &mir::PlaceRef<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ let cx = self.fx.cx;
+
+ if let Some((place_base, elem)) = place_ref.last_projection() {
+ let mut base_context = if context.is_mutating_use() {
+ PlaceContext::MutatingUse(MutatingUseContext::Projection)
+ } else {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+ };
+
+ // Allow uses of projections that are ZSTs or from scalar fields.
+ let is_consume = matches!(
+ context,
+ PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+ )
+ );
+ if is_consume {
+ let base_ty = place_base.ty(self.fx.mir, cx.tcx());
+ let base_ty = self.fx.monomorphize(base_ty);
+
+ // ZSTs don't require any actual memory access.
+ let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(elem)).ty;
+ let span = self.fx.mir.local_decls[place_ref.local].source_info.span;
+ if cx.spanned_layout_of(elem_ty, span).is_zst() {
+ return;
+ }
+
+ if let mir::ProjectionElem::Field(..) = elem {
+ let layout = cx.spanned_layout_of(base_ty.ty, span);
+ if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) {
+ // Recurse with the same context, instead of `Projection`,
+ // potentially stopping at non-operand projections,
+ // which would trigger `not_ssa` on locals.
+ base_context = context;
+ }
+ }
+ }
+
+ if let mir::ProjectionElem::Deref = elem {
+ // Deref projections typically only read the pointer.
+ base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy);
+ }
+
+ self.process_place(&place_base, base_context, location);
+ // HACK(eddyb) this emulates the old `visit_projection_elem`, this
+ // entire `visit_place`-like `process_place` method should be rewritten,
+ // now that we have moved to the "slice of projections" representation.
+ if let mir::ProjectionElem::Index(local) = elem {
+ self.visit_local(
+ local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location,
+ );
+ }
+ } else {
+ self.visit_local(place_ref.local, context, location);
+ }
+ }
+}
+
+impl<'mir, 'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
+ for LocalAnalyzer<'mir, 'a, 'tcx, Bx>
+{
+ fn visit_assign(
+ &mut self,
+ place: &mir::Place<'tcx>,
+ rvalue: &mir::Rvalue<'tcx>,
+ location: Location,
+ ) {
+ debug!("visit_assign(place={:?}, rvalue={:?})", place, rvalue);
+
+ if let Some(local) = place.as_local() {
+ self.assign(local, location);
+ if self.locals[local] != LocalKind::Memory {
+ let decl_span = self.fx.mir.local_decls[local].source_info.span;
+ if !self.fx.rvalue_creates_operand(rvalue, decl_span) {
+ self.locals[local] = LocalKind::Memory;
+ }
+ }
+ } else {
+ self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
+ }
+
+ self.visit_rvalue(rvalue, location);
+ }
+
+ fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
+ debug!("visit_place(place={:?}, context={:?})", place, context);
+ self.process_place(&place.as_ref(), context, location);
+ }
+
+ fn visit_local(&mut self, local: mir::Local, context: PlaceContext, location: Location) {
+ match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Call)
+ | PlaceContext::MutatingUse(MutatingUseContext::Yield) => {
+ self.assign(local, location);
+ }
+
+ PlaceContext::NonUse(_) | PlaceContext::MutatingUse(MutatingUseContext::Retag) => {}
+
+ PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Copy | NonMutatingUseContext::Move,
+ ) => match &mut self.locals[local] {
+ LocalKind::ZST => {}
+ LocalKind::Memory => {}
+ LocalKind::SSA(def) if def.dominates(location, &self.dominators) => {}
+ // Reads from uninitialized variables (e.g., in dead code, after
+ // optimizations) require locals to be in (uninitialized) memory.
+ // N.B., there can be uninitialized reads of a local visited after
+ // an assignment to that local, if they happen on disjoint paths.
+ kind @ (LocalKind::Unused | LocalKind::SSA(_)) => {
+ *kind = LocalKind::Memory;
+ }
+ },
+
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Store
+ | MutatingUseContext::Deinit
+ | MutatingUseContext::SetDiscriminant
+ | MutatingUseContext::AsmOutput
+ | MutatingUseContext::Borrow
+ | MutatingUseContext::AddressOf
+ | MutatingUseContext::Projection,
+ )
+ | PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Inspect
+ | NonMutatingUseContext::SharedBorrow
+ | NonMutatingUseContext::UniqueBorrow
+ | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::AddressOf
+ | NonMutatingUseContext::Projection,
+ ) => {
+ self.locals[local] = LocalKind::Memory;
+ }
+
+ PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
+ let kind = &mut self.locals[local];
+ if *kind != LocalKind::Memory {
+ let ty = self.fx.mir.local_decls[local].ty;
+ let ty = self.fx.monomorphize(ty);
+ if self.fx.cx.type_needs_drop(ty) {
+ // Only need the place if we're actually dropping it.
+ *kind = LocalKind::Memory;
+ }
+ }
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CleanupKind {
+ NotCleanup,
+ Funclet,
+ Internal { funclet: mir::BasicBlock },
+}
+
+impl CleanupKind {
+ pub fn funclet_bb(self, for_bb: mir::BasicBlock) -> Option<mir::BasicBlock> {
+ match self {
+ CleanupKind::NotCleanup => None,
+ CleanupKind::Funclet => Some(for_bb),
+ CleanupKind::Internal { funclet } => Some(funclet),
+ }
+ }
+}
+
+pub fn cleanup_kinds(mir: &mir::Body<'_>) -> IndexVec<mir::BasicBlock, CleanupKind> {
+ fn discover_masters<'tcx>(
+ result: &mut IndexVec<mir::BasicBlock, CleanupKind>,
+ mir: &mir::Body<'tcx>,
+ ) {
+ for (bb, data) in mir.basic_blocks().iter_enumerated() {
+ match data.terminator().kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Unreachable
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => { /* nothing to do */ }
+ TerminatorKind::Call { cleanup: unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: unwind, .. }
+ | TerminatorKind::Assert { cleanup: unwind, .. }
+ | TerminatorKind::DropAndReplace { unwind, .. }
+ | TerminatorKind::Drop { unwind, .. } => {
+ if let Some(unwind) = unwind {
+ debug!(
+ "cleanup_kinds: {:?}/{:?} registering {:?} as funclet",
+ bb, data, unwind
+ );
+ result[unwind] = CleanupKind::Funclet;
+ }
+ }
+ }
+ }
+ }
+
+ fn propagate<'tcx>(result: &mut IndexVec<mir::BasicBlock, CleanupKind>, mir: &mir::Body<'tcx>) {
+ let mut funclet_succs = IndexVec::from_elem(None, mir.basic_blocks());
+
+ let mut set_successor = |funclet: mir::BasicBlock, succ| match funclet_succs[funclet] {
+ ref mut s @ None => {
+ debug!("set_successor: updating successor of {:?} to {:?}", funclet, succ);
+ *s = Some(succ);
+ }
+ Some(s) => {
+ if s != succ {
+ span_bug!(
+ mir.span,
+ "funclet {:?} has 2 parents - {:?} and {:?}",
+ funclet,
+ s,
+ succ
+ );
+ }
+ }
+ };
+
+ for (bb, data) in traversal::reverse_postorder(mir) {
+ let funclet = match result[bb] {
+ CleanupKind::NotCleanup => continue,
+ CleanupKind::Funclet => bb,
+ CleanupKind::Internal { funclet } => funclet,
+ };
+
+ debug!(
+ "cleanup_kinds: {:?}/{:?}/{:?} propagating funclet {:?}",
+ bb, data, result[bb], funclet
+ );
+
+ for succ in data.terminator().successors() {
+ let kind = result[succ];
+ debug!("cleanup_kinds: propagating {:?} to {:?}/{:?}", funclet, succ, kind);
+ match kind {
+ CleanupKind::NotCleanup => {
+ result[succ] = CleanupKind::Internal { funclet };
+ }
+ CleanupKind::Funclet => {
+ if funclet != succ {
+ set_successor(funclet, succ);
+ }
+ }
+ CleanupKind::Internal { funclet: succ_funclet } => {
+ if funclet != succ_funclet {
+ // `succ` has 2 different funclet going into it, so it must
+ // be a funclet by itself.
+
+ debug!(
+ "promoting {:?} to a funclet and updating {:?}",
+ succ, succ_funclet
+ );
+ result[succ] = CleanupKind::Funclet;
+ set_successor(succ_funclet, succ);
+ set_successor(funclet, succ);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ let mut result = IndexVec::from_elem(CleanupKind::NotCleanup, mir.basic_blocks());
+
+ discover_masters(&mut result, mir);
+ propagate(&mut result, mir);
+ debug!("cleanup_kinds: result={:?}", result);
+ result
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
new file mode 100644
index 000000000..3eee58d9d
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -0,0 +1,1654 @@
+use super::operand::OperandRef;
+use super::operand::OperandValue::{Immediate, Pair, Ref};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate};
+use crate::meth;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_ast as ast;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::{self, SwitchTargets};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
+use rustc_middle::ty::{self, Instance, Ty, TypeVisitable};
+use rustc_span::source_map::Span;
+use rustc_span::{sym, Symbol};
+use rustc_symbol_mangling::typeid::typeid_for_fnabi;
+use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
+use rustc_target::abi::{self, HasDataLayout, WrappingRange};
+use rustc_target::spec::abi::Abi;
+
+/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
+/// e.g., creating a basic block, calling a function, etc.
+struct TerminatorCodegenHelper<'tcx> {
+ bb: mir::BasicBlock,
+ terminator: &'tcx mir::Terminator<'tcx>,
+ funclet_bb: Option<mir::BasicBlock>,
+}
+
+impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
+ /// Returns the appropriate `Funclet` for the current funclet, if on MSVC,
+ /// either already previously cached, or newly created, by `landing_pad_for`.
+ fn funclet<'b, Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
+ ) -> Option<&'b Bx::Funclet> {
+ let funclet_bb = self.funclet_bb?;
+ if base::wants_msvc_seh(fx.cx.tcx().sess) {
+ // If `landing_pad_for` hasn't been called yet to create the `Funclet`,
+ // it has to be now. This may not seem necessary, as RPO should lead
+ // to all the unwind edges being visited (and so to `landing_pad_for`
+ // getting called for them), before building any of the blocks inside
+ // the funclet itself - however, if MIR contains edges that end up not
+ // being needed in the LLVM IR after monomorphization, the funclet may
+ // be unreachable, and we don't have yet a way to skip building it in
+ // such an eventuality (which may be a better solution than this).
+ if fx.funclets[funclet_bb].is_none() {
+ fx.landing_pad_for(funclet_bb);
+ }
+
+ Some(
+ fx.funclets[funclet_bb]
+ .as_ref()
+ .expect("landing_pad_for didn't also create funclets entry"),
+ )
+ } else {
+ None
+ }
+ }
+
+ fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> (Bx::BasicBlock, bool) {
+ let span = self.terminator.source_info.span;
+ let lltarget = fx.llbb(target);
+ let target_funclet = fx.cleanup_kinds[target].funclet_bb(target);
+ match (self.funclet_bb, target_funclet) {
+ (None, None) => (lltarget, false),
+ (Some(f), Some(t_f)) if f == t_f || !base::wants_msvc_seh(fx.cx.tcx().sess) => {
+ (lltarget, false)
+ }
+ // jump *into* cleanup - need a landing pad if GNU, cleanup pad if MSVC
+ (None, Some(_)) => (fx.landing_pad_for(target), false),
+ (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", self.terminator),
+ (Some(_), Some(_)) => (fx.landing_pad_for(target), true),
+ }
+ }
+
+ /// Create a basic block.
+ fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ target: mir::BasicBlock,
+ ) -> Bx::BasicBlock {
+ let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ if is_cleanupret {
+ // MSVC cross-funclet jump - need a trampoline
+
+ debug!("llblock: creating cleanup trampoline for {:?}", target);
+ let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
+ let trampoline = Bx::append_block(fx.cx, fx.llfn, name);
+ let mut trampoline_bx = Bx::build(fx.cx, trampoline);
+ trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ trampoline
+ } else {
+ lltarget
+ }
+ }
+
+ fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ target: mir::BasicBlock,
+ ) {
+ let (lltarget, is_cleanupret) = self.lltarget(fx, target);
+ if is_cleanupret {
+ // micro-optimization: generate a `ret` rather than a jump
+ // to a trampoline.
+ bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
+ } else {
+ bx.br(lltarget);
+ }
+ }
+
+ /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
+ /// return destination `destination` and the cleanup function `cleanup`.
+ fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
+ fn_ptr: Bx::Value,
+ llargs: &[Bx::Value],
+ destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
+ cleanup: Option<mir::BasicBlock>,
+ copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>],
+ ) {
+ // If there is a cleanup block and the function we're calling can unwind, then
+ // do an invoke, otherwise do a call.
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+
+ let unwind_block = if let Some(cleanup) = cleanup.filter(|_| fn_abi.can_unwind) {
+ Some(self.llblock(fx, cleanup))
+ } else if fx.mir[self.bb].is_cleanup
+ && fn_abi.can_unwind
+ && !base::wants_msvc_seh(fx.cx.tcx().sess)
+ {
+ // Exception must not propagate out of the execution of a cleanup (doing so
+ // can cause undefined behaviour). We insert a double unwind guard for
+ // functions that can potentially unwind to protect against this.
+ //
+ // This is not necessary for SEH which does not use successive unwinding
+ // like Itanium EH. EH frames in SEH are different from normal function
+ // frames and SEH will abort automatically if an exception tries to
+ // propagate out from cleanup.
+ Some(fx.double_unwind_guard())
+ } else {
+ None
+ };
+
+ if let Some(unwind_block) = unwind_block {
+ let ret_llbb = if let Some((_, target)) = destination {
+ fx.llbb(target)
+ } else {
+ fx.unreachable_block()
+ };
+ let invokeret =
+ bx.invoke(fn_ty, fn_ptr, &llargs, ret_llbb, unwind_block, self.funclet(fx));
+ bx.apply_attrs_callsite(&fn_abi, invokeret);
+ if fx.mir[self.bb].is_cleanup {
+ bx.do_not_inline(invokeret);
+ }
+
+ if let Some((ret_dest, target)) = destination {
+ bx.switch_to_block(fx.llbb(target));
+ fx.set_debug_loc(bx, self.terminator.source_info);
+ for tmp in copied_constant_arguments {
+ bx.lifetime_end(tmp.llval, tmp.layout.size);
+ }
+ fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
+ }
+ } else {
+ let llret = bx.call(fn_ty, fn_ptr, &llargs, self.funclet(fx));
+ bx.apply_attrs_callsite(&fn_abi, llret);
+ if fx.mir[self.bb].is_cleanup {
+ // Cleanup is always the cold path. Don't inline
+ // drop glue. Also, when there is a deeply-nested
+ // struct, there are "symmetry" issues that cause
+ // exponential inlining - see issue #41696.
+ bx.do_not_inline(llret);
+ }
+
+ if let Some((ret_dest, target)) = destination {
+ for tmp in copied_constant_arguments {
+ bx.lifetime_end(tmp.llval, tmp.layout.size);
+ }
+ fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
+ self.funclet_br(fx, bx, target);
+ } else {
+ bx.unreachable();
+ }
+ }
+ }
+
+ /// Generates inline assembly with optional `destination` and `cleanup`.
+ fn do_inlineasm<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ bx: &mut Bx,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Bx>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ destination: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ instance: Instance<'_>,
+ ) {
+ if let Some(cleanup) = cleanup {
+ let ret_llbb = if let Some(target) = destination {
+ fx.llbb(target)
+ } else {
+ fx.unreachable_block()
+ };
+
+ bx.codegen_inline_asm(
+ template,
+ &operands,
+ options,
+ line_spans,
+ instance,
+ Some((ret_llbb, self.llblock(fx, cleanup), self.funclet(fx))),
+ );
+ } else {
+ bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
+
+ if let Some(target) = destination {
+ self.funclet_br(fx, bx, target);
+ } else {
+ bx.unreachable();
+ }
+ }
+ }
+}
+
+/// Codegen implementations for some terminator variants.
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ /// Generates code for a `Resume` terminator.
+ fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) {
+ if let Some(funclet) = helper.funclet(self) {
+ bx.cleanup_ret(funclet, None);
+ } else {
+ let slot = self.get_personality_slot(&mut bx);
+ let lp0 = slot.project_field(&mut bx, 0);
+ let lp0 = bx.load_operand(lp0).immediate();
+ let lp1 = slot.project_field(&mut bx, 1);
+ let lp1 = bx.load_operand(lp1).immediate();
+ slot.storage_dead(&mut bx);
+
+ let mut lp = bx.const_undef(self.landing_pad_type());
+ lp = bx.insert_value(lp, lp0, 0);
+ lp = bx.insert_value(lp, lp1, 1);
+ bx.resume(lp);
+ }
+ }
+
+ fn codegen_switchint_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ discr: &mir::Operand<'tcx>,
+ switch_ty: Ty<'tcx>,
+ targets: &SwitchTargets,
+ ) {
+ let discr = self.codegen_operand(&mut bx, &discr);
+ // `switch_ty` is redundant, sanity-check that.
+ assert_eq!(discr.layout.ty, switch_ty);
+ let mut target_iter = targets.iter();
+ if target_iter.len() == 1 {
+ // If there are two targets (one conditional, one fallback), emit br instead of switch
+ let (test_value, target) = target_iter.next().unwrap();
+ let lltrue = helper.llblock(self, target);
+ let llfalse = helper.llblock(self, targets.otherwise());
+ if switch_ty == bx.tcx().types.bool {
+ // Don't generate trivial icmps when switching on bool
+ match test_value {
+ 0 => bx.cond_br(discr.immediate(), llfalse, lltrue),
+ 1 => bx.cond_br(discr.immediate(), lltrue, llfalse),
+ _ => bug!(),
+ }
+ } else {
+ let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
+ let llval = bx.const_uint_big(switch_llty, test_value);
+ let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
+ bx.cond_br(cmp, lltrue, llfalse);
+ }
+ } else {
+ bx.switch(
+ discr.immediate(),
+ helper.llblock(self, targets.otherwise()),
+ target_iter.map(|(value, target)| (value, helper.llblock(self, target))),
+ );
+ }
+ }
+
+ fn codegen_return_terminator(&mut self, mut bx: Bx) {
+ // Call `va_end` if this is the definition of a C-variadic function.
+ if self.fn_abi.c_variadic {
+ // The `VaList` "spoofed" argument is just after all the real arguments.
+ let va_list_arg_idx = self.fn_abi.args.len();
+ match self.locals[mir::Local::new(1 + va_list_arg_idx)] {
+ LocalRef::Place(va_list) => {
+ bx.va_end(va_list.llval);
+ }
+ _ => bug!("C-variadic function must have a `VaList` place"),
+ }
+ }
+ if self.fn_abi.ret.layout.abi.is_uninhabited() {
+ // Functions with uninhabited return values are marked `noreturn`,
+ // so we should make sure that we never actually do.
+ // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+ // if that turns out to be helpful.
+ bx.abort();
+ // `abort` does not terminate the block, so we still need to generate
+ // an `unreachable` terminator after it.
+ bx.unreachable();
+ return;
+ }
+ let llval = match self.fn_abi.ret.mode {
+ PassMode::Ignore | PassMode::Indirect { .. } => {
+ bx.ret_void();
+ return;
+ }
+
+ PassMode::Direct(_) | PassMode::Pair(..) => {
+ let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
+ if let Ref(llval, _, align) = op.val {
+ bx.load(bx.backend_type(op.layout), llval, align)
+ } else {
+ op.immediate_or_packed_pair(&mut bx)
+ }
+ }
+
+ PassMode::Cast(cast_ty) => {
+ let op = match self.locals[mir::RETURN_PLACE] {
+ LocalRef::Operand(Some(op)) => op,
+ LocalRef::Operand(None) => bug!("use of return before def"),
+ LocalRef::Place(cg_place) => OperandRef {
+ val: Ref(cg_place.llval, None, cg_place.align),
+ layout: cg_place.layout,
+ },
+ LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+ };
+ let llslot = match op.val {
+ Immediate(_) | Pair(..) => {
+ let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
+ op.val.store(&mut bx, scratch);
+ scratch.llval
+ }
+ Ref(llval, _, align) => {
+ assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
+ llval
+ }
+ };
+ let ty = bx.cast_backend_type(&cast_ty);
+ let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
+ bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
+ }
+ };
+ bx.ret(llval);
+ }
+
+ fn codegen_drop_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ location: mir::Place<'tcx>,
+ target: mir::BasicBlock,
+ unwind: Option<mir::BasicBlock>,
+ ) {
+ let ty = location.ty(self.mir, bx.tcx()).ty;
+ let ty = self.monomorphize(ty);
+ let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
+
+ if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
+ // we don't actually need to drop anything.
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ let place = self.codegen_place(&mut bx, location.as_ref());
+ let (args1, args2);
+ let mut args = if let Some(llextra) = place.llextra {
+ args2 = [place.llval, llextra];
+ &args2[..]
+ } else {
+ args1 = [place.llval];
+ &args1[..]
+ };
+ let (drop_fn, fn_abi) = match ty.kind() {
+ // FIXME(eddyb) perhaps move some of this logic into
+ // `Instance::resolve_drop_in_place`?
+ ty::Dynamic(..) => {
+ let virtual_drop = Instance {
+ def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
+ substs: drop_fn.substs,
+ };
+ let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
+ let vtable = args[1];
+ args = &args[..1];
+ (
+ meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
+ .get_fn(&mut bx, vtable, ty, &fn_abi),
+ fn_abi,
+ )
+ }
+ _ => (bx.get_fn_addr(drop_fn), bx.fn_abi_of_instance(drop_fn, ty::List::empty())),
+ };
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ drop_fn,
+ args,
+ Some((ReturnDest::Nothing, target)),
+ unwind,
+ &[],
+ );
+ }
+
+ fn codegen_assert_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ cond: &mir::Operand<'tcx>,
+ expected: bool,
+ msg: &mir::AssertMessage<'tcx>,
+ target: mir::BasicBlock,
+ cleanup: Option<mir::BasicBlock>,
+ ) {
+ let span = terminator.source_info.span;
+ let cond = self.codegen_operand(&mut bx, cond).immediate();
+ let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
+
+ // This case can currently arise only from functions marked
+ // with #[rustc_inherit_overflow_checks] and inlined from
+ // another crate (mostly core::num generic/#[inline] fns),
+ // while the current crate doesn't use overflow checks.
+ // NOTE: Unlike binops, negation doesn't have its own
+ // checked operation, just a comparison with the minimum
+ // value, so we have to check for the assert message.
+ if !bx.check_overflow() {
+ if let AssertKind::OverflowNeg(_) = *msg {
+ const_cond = Some(expected);
+ }
+ }
+
+ // Don't codegen the panic block if success if known.
+ if const_cond == Some(expected) {
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ // Pass the condition through llvm.expect for branch hinting.
+ let cond = bx.expect(cond, expected);
+
+ // Create the failure block and the conditional branch to it.
+ let lltarget = helper.llblock(self, target);
+ let panic_block = bx.append_sibling_block("panic");
+ if expected {
+ bx.cond_br(cond, lltarget, panic_block);
+ } else {
+ bx.cond_br(cond, panic_block, lltarget);
+ }
+
+ // After this point, bx is the block for the call to panic.
+ bx.switch_to_block(panic_block);
+ self.set_debug_loc(&mut bx, terminator.source_info);
+
+ // Get the location information.
+ let location = self.get_caller_location(&mut bx, terminator.source_info).immediate();
+
+ // Put together the arguments to the panic entry point.
+ let (lang_item, args) = match msg {
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = self.codegen_operand(&mut bx, len).immediate();
+ let index = self.codegen_operand(&mut bx, index).immediate();
+ // It's `fn panic_bounds_check(index: usize, len: usize)`,
+ // and `#[track_caller]` adds an implicit third argument.
+ (LangItem::PanicBoundsCheck, vec![index, len, location])
+ }
+ _ => {
+ let msg = bx.const_str(msg.description());
+ // It's `pub fn panic(expr: &str)`, with the wide reference being passed
+ // as two arguments, and `#[track_caller]` adds an implicit third argument.
+ (LangItem::Panic, vec![msg.0, msg.1, location])
+ }
+ };
+
+ let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), lang_item);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup, &[]);
+ }
+
+ fn codegen_abort_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ ) {
+ let span = terminator.source_info.span;
+ self.set_debug_loc(&mut bx, terminator.source_info);
+
+ // Obtain the panic entry point.
+ let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), LangItem::PanicNoUnwind);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(self, &mut bx, fn_abi, llfn, &[], None, None, &[]);
+ }
+
+ /// Returns `true` if this is indeed a panic intrinsic and codegen is done.
+ fn codegen_panic_intrinsic(
+ &mut self,
+ helper: &TerminatorCodegenHelper<'tcx>,
+ bx: &mut Bx,
+ intrinsic: Option<Symbol>,
+ instance: Option<Instance<'tcx>>,
+ source_info: mir::SourceInfo,
+ target: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ ) -> bool {
+ // Emit a panic or a no-op for `assert_*` intrinsics.
+ // These are intrinsics that compile to panics so that we can get a message
+ // which mentions the offending type, even from a const context.
+ #[derive(Debug, PartialEq)]
+ enum AssertIntrinsic {
+ Inhabited,
+ ZeroValid,
+ UninitValid,
+ }
+ let panic_intrinsic = intrinsic.and_then(|i| match i {
+ sym::assert_inhabited => Some(AssertIntrinsic::Inhabited),
+ sym::assert_zero_valid => Some(AssertIntrinsic::ZeroValid),
+ sym::assert_uninit_valid => Some(AssertIntrinsic::UninitValid),
+ _ => None,
+ });
+ if let Some(intrinsic) = panic_intrinsic {
+ use AssertIntrinsic::*;
+
+ let ty = instance.unwrap().substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let do_panic = match intrinsic {
+ Inhabited => layout.abi.is_uninhabited(),
+ ZeroValid => !bx.tcx().permits_zero_init(layout),
+ UninitValid => !bx.tcx().permits_uninit_init(layout),
+ };
+ if do_panic {
+ let msg_str = with_no_visible_paths!({
+ with_no_trimmed_paths!({
+ if layout.abi.is_uninhabited() {
+ // Use this error even for the other intrinsics as it is more precise.
+ format!("attempted to instantiate uninhabited type `{}`", ty)
+ } else if intrinsic == ZeroValid {
+ format!("attempted to zero-initialize type `{}`, which is invalid", ty)
+ } else {
+ format!(
+ "attempted to leave type `{}` uninitialized, which is invalid",
+ ty
+ )
+ }
+ })
+ });
+ let msg = bx.const_str(&msg_str);
+ let location = self.get_caller_location(bx, source_info).immediate();
+
+ // Obtain the panic entry point.
+ let (fn_abi, llfn) =
+ common::build_langcall(bx, Some(source_info.span), LangItem::Panic);
+
+ // Codegen the actual panic invoke/call.
+ helper.do_call(
+ self,
+ bx,
+ fn_abi,
+ llfn,
+ &[msg.0, msg.1, location],
+ target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)),
+ cleanup,
+ &[],
+ );
+ } else {
+ // a NOP
+ let target = target.unwrap();
+ helper.funclet_br(self, bx, target)
+ }
+ true
+ } else {
+ false
+ }
+ }
+
+ fn codegen_call_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ func: &mir::Operand<'tcx>,
+ args: &[mir::Operand<'tcx>],
+ destination: mir::Place<'tcx>,
+ target: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ fn_span: Span,
+ ) {
+ let source_info = terminator.source_info;
+ let span = source_info.span;
+
+ // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
+ let callee = self.codegen_operand(&mut bx, func);
+
+ let (instance, mut llfn) = match *callee.layout.ty.kind() {
+ ty::FnDef(def_id, substs) => (
+ Some(
+ ty::Instance::resolve(bx.tcx(), ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ .unwrap()
+ .polymorphize(bx.tcx()),
+ ),
+ None,
+ ),
+ ty::FnPtr(_) => (None, Some(callee.immediate())),
+ _ => bug!("{} is not callable", callee.layout.ty),
+ };
+ let def = instance.map(|i| i.def);
+
+ if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
+ // Empty drop glue; a no-op.
+ let target = target.unwrap();
+ helper.funclet_br(self, &mut bx, target);
+ return;
+ }
+
+ // FIXME(eddyb) avoid computing this if possible, when `instance` is
+ // available - right now `sig` is only needed for getting the `abi`
+ // and figuring out how many extra args were passed to a C-variadic `fn`.
+ let sig = callee.layout.ty.fn_sig(bx.tcx());
+ let abi = sig.abi();
+
+ // Handle intrinsics old codegen wants Expr's for, ourselves.
+ let intrinsic = match def {
+ Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id)),
+ _ => None,
+ };
+
+ let extra_args = &args[sig.inputs().skip_binder().len()..];
+ let extra_args = bx.tcx().mk_type_list(extra_args.iter().map(|op_arg| {
+ let op_ty = op_arg.ty(self.mir, bx.tcx());
+ self.monomorphize(op_ty)
+ }));
+
+ let fn_abi = match instance {
+ Some(instance) => bx.fn_abi_of_instance(instance, extra_args),
+ None => bx.fn_abi_of_fn_ptr(sig, extra_args),
+ };
+
+ if intrinsic == Some(sym::transmute) {
+ if let Some(target) = target {
+ self.codegen_transmute(&mut bx, &args[0], destination);
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ // If we are trying to transmute to an uninhabited type,
+ // it is likely there is no allotted destination. In fact,
+ // transmuting to an uninhabited type is UB, which means
+ // we can do what we like. Here, we declare that transmuting
+ // into an uninhabited type is impossible, so anything following
+ // it must be unreachable.
+ assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
+ bx.unreachable();
+ }
+ return;
+ }
+
+ if self.codegen_panic_intrinsic(
+ &helper,
+ &mut bx,
+ intrinsic,
+ instance,
+ source_info,
+ target,
+ cleanup,
+ ) {
+ return;
+ }
+
+ // The arguments we'll be passing. Plus one to account for outptr, if used.
+ let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
+ let mut llargs = Vec::with_capacity(arg_count);
+
+ // Prepare the return value destination
+ let ret_dest = if target.is_some() {
+ let is_intrinsic = intrinsic.is_some();
+ self.make_return_dest(&mut bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
+ } else {
+ ReturnDest::Nothing
+ };
+
+ if intrinsic == Some(sym::caller_location) {
+ if let Some(target) = target {
+ let location = self
+ .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+
+ if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
+ location.val.store(&mut bx, tmp);
+ }
+ self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
+ helper.funclet_br(self, &mut bx, target);
+ }
+ return;
+ }
+
+ match intrinsic {
+ None | Some(sym::drop_in_place) => {}
+ Some(sym::copy_nonoverlapping) => unreachable!(),
+ Some(intrinsic) => {
+ let dest = match ret_dest {
+ _ if fn_abi.ret.is_indirect() => llargs[0],
+ ReturnDest::Nothing => {
+ bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
+ }
+ ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
+ ReturnDest::DirectOperand(_) => {
+ bug!("Cannot use direct operand with an intrinsic call")
+ }
+ };
+
+ let args: Vec<_> = args
+ .iter()
+ .enumerate()
+ .map(|(i, arg)| {
+ // The indices passed to simd_shuffle* in the
+ // third argument must be constant. This is
+ // checked by const-qualification, which also
+ // promotes any complex rvalues to constants.
+ if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
+ if let mir::Operand::Constant(constant) = arg {
+ let c = self.eval_mir_constant(constant);
+ let (llval, ty) = self.simd_shuffle_indices(
+ &bx,
+ constant.span,
+ self.monomorphize(constant.ty()),
+ c,
+ );
+ return OperandRef {
+ val: Immediate(llval),
+ layout: bx.layout_of(ty),
+ };
+ } else {
+ span_bug!(span, "shuffle indices must be constant");
+ }
+ }
+
+ self.codegen_operand(&mut bx, arg)
+ })
+ .collect();
+
+ Self::codegen_intrinsic_call(
+ &mut bx,
+ *instance.as_ref().unwrap(),
+ &fn_abi,
+ &args,
+ dest,
+ span,
+ );
+
+ if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
+ self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+ }
+
+ if let Some(target) = target {
+ helper.funclet_br(self, &mut bx, target);
+ } else {
+ bx.unreachable();
+ }
+
+ return;
+ }
+ }
+
+ // Split the rust-call tupled arguments off.
+ let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
+ let (tup, args) = args.split_last().unwrap();
+ (args, Some(tup))
+ } else {
+ (args, None)
+ };
+
+ let mut copied_constant_arguments = vec![];
+ 'make_args: for (i, arg) in first_args.iter().enumerate() {
+ let mut op = self.codegen_operand(&mut bx, arg);
+
+ if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
+ if let Pair(..) = op.val {
+ // In the case of Rc<Self>, we need to explicitly pass a
+ // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
+ // that is understood elsewhere in the compiler as a method on
+ // `dyn Trait`.
+ // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
+ // we get a value of a built-in pointer type
+ 'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
+ && !op.layout.ty.is_region_ptr()
+ {
+ for i in 0..op.layout.fields.count() {
+ let field = op.extract_field(&mut bx, i);
+ if !field.layout.is_zst() {
+ // we found the one non-zero-sized field that is allowed
+ // now find *its* non-zero-sized field, or stop if it's a
+ // pointer
+ op = field;
+ continue 'descend_newtypes;
+ }
+ }
+
+ span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
+ }
+
+ // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
+ // data pointer and vtable. Look up the method in the vtable, and pass
+ // the data pointer as the first argument
+ match op.val {
+ Pair(data_ptr, meta) => {
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue 'make_args;
+ }
+ other => bug!("expected a Pair, got {:?}", other),
+ }
+ } else if let Ref(data_ptr, Some(meta), _) = op.val {
+ // by-value dynamic dispatch
+ llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
+ &mut bx,
+ meta,
+ op.layout.ty,
+ &fn_abi,
+ ));
+ llargs.push(data_ptr);
+ continue;
+ } else {
+ span_bug!(span, "can't codegen a virtual call on {:?}", op);
+ }
+ }
+
+ // The callee needs to own the argument memory if we pass it
+ // by-ref, so make a local copy of non-immediate constants.
+ match (arg, op.val) {
+ (&mir::Operand::Copy(_), Ref(_, None, _))
+ | (&mir::Operand::Constant(_), Ref(_, None, _)) => {
+ let tmp = PlaceRef::alloca(&mut bx, op.layout);
+ bx.lifetime_start(tmp.llval, tmp.layout.size);
+ op.val.store(&mut bx, tmp);
+ op.val = Ref(tmp.llval, None, tmp.align);
+ copied_constant_arguments.push(tmp);
+ }
+ _ => {}
+ }
+
+ self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
+ }
+ let num_untupled = untuple.map(|tup| {
+ self.codegen_arguments_untupled(
+ &mut bx,
+ tup,
+ &mut llargs,
+ &fn_abi.args[first_args.len()..],
+ )
+ });
+
+ let needs_location =
+ instance.map_or(false, |i| i.def.requires_caller_location(self.cx.tcx()));
+ if needs_location {
+ let mir_args = if let Some(num_untupled) = num_untupled {
+ first_args.len() + num_untupled
+ } else {
+ args.len()
+ };
+ assert_eq!(
+ fn_abi.args.len(),
+ mir_args + 1,
+ "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}",
+ instance,
+ fn_span,
+ fn_abi,
+ );
+ let location =
+ self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+ debug!(
+ "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
+ terminator, location, fn_span
+ );
+
+ let last_arg = fn_abi.args.last().unwrap();
+ self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
+ }
+
+ let (is_indirect_call, fn_ptr) = match (llfn, instance) {
+ (Some(llfn), _) => (true, llfn),
+ (None, Some(instance)) => (false, bx.get_fn_addr(instance)),
+ _ => span_bug!(span, "no llfn for call"),
+ };
+
+ // For backends that support CFI using type membership (i.e., testing whether a given
+ // pointer is associated with a type identifier).
+ if bx.tcx().sess.is_sanitizer_cfi_enabled() && is_indirect_call {
+ // Emit type metadata and checks.
+ // FIXME(rcvalle): Add support for generalized identifiers.
+ // FIXME(rcvalle): Create distinct unnamed MDNodes for internal identifiers.
+ let typeid = typeid_for_fnabi(bx.tcx(), fn_abi);
+ let typeid_metadata = self.cx.typeid_metadata(typeid);
+
+ // Test whether the function pointer is associated with the type identifier.
+ let cond = bx.type_test(fn_ptr, typeid_metadata);
+ let bb_pass = bx.append_sibling_block("type_test.pass");
+ let bb_fail = bx.append_sibling_block("type_test.fail");
+ bx.cond_br(cond, bb_pass, bb_fail);
+
+ bx.switch_to_block(bb_pass);
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ fn_ptr,
+ &llargs,
+ target.as_ref().map(|&target| (ret_dest, target)),
+ cleanup,
+ &copied_constant_arguments,
+ );
+
+ bx.switch_to_block(bb_fail);
+ bx.abort();
+ bx.unreachable();
+
+ return;
+ }
+
+ helper.do_call(
+ self,
+ &mut bx,
+ fn_abi,
+ fn_ptr,
+ &llargs,
+ target.as_ref().map(|&target| (ret_dest, target)),
+ cleanup,
+ &copied_constant_arguments,
+ );
+ }
+
+ fn codegen_asm_terminator(
+ &mut self,
+ helper: TerminatorCodegenHelper<'tcx>,
+ mut bx: Bx,
+ terminator: &mir::Terminator<'tcx>,
+ template: &[ast::InlineAsmTemplatePiece],
+ operands: &[mir::InlineAsmOperand<'tcx>],
+ options: ast::InlineAsmOptions,
+ line_spans: &[Span],
+ destination: Option<mir::BasicBlock>,
+ cleanup: Option<mir::BasicBlock>,
+ instance: Instance<'_>,
+ ) {
+ let span = terminator.source_info.span;
+
+ let operands: Vec<_> = operands
+ .iter()
+ .map(|op| match *op {
+ mir::InlineAsmOperand::In { reg, ref value } => {
+ let value = self.codegen_operand(&mut bx, value);
+ InlineAsmOperandRef::In { reg, value }
+ }
+ mir::InlineAsmOperand::Out { reg, late, ref place } => {
+ let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref()));
+ InlineAsmOperandRef::Out { reg, late, place }
+ }
+ mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
+ let in_value = self.codegen_operand(&mut bx, in_value);
+ let out_place =
+ out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref()));
+ InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
+ }
+ mir::InlineAsmOperand::Const { ref value } => {
+ let const_value = self
+ .eval_mir_constant(value)
+ .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
+ let string = common::asm_const_to_str(
+ bx.tcx(),
+ span,
+ const_value,
+ bx.layout_of(value.ty()),
+ );
+ InlineAsmOperandRef::Const { string }
+ }
+ mir::InlineAsmOperand::SymFn { ref value } => {
+ let literal = self.monomorphize(value.literal);
+ if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ let instance = ty::Instance::resolve_for_fn_ptr(
+ bx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap();
+ InlineAsmOperandRef::SymFn { instance }
+ } else {
+ span_bug!(span, "invalid type for asm sym (fn)");
+ }
+ }
+ mir::InlineAsmOperand::SymStatic { def_id } => {
+ InlineAsmOperandRef::SymStatic { def_id }
+ }
+ })
+ .collect();
+
+ helper.do_inlineasm(
+ self,
+ &mut bx,
+ template,
+ &operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ instance,
+ );
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
+ let llbb = self.llbb(bb);
+ let mut bx = Bx::build(self.cx, llbb);
+ let mir = self.mir;
+ let data = &mir[bb];
+
+ debug!("codegen_block({:?}={:?})", bb, data);
+
+ for statement in &data.statements {
+ bx = self.codegen_statement(bx, statement);
+ }
+
+ self.codegen_terminator(bx, bb, data.terminator());
+ }
+
+ fn codegen_terminator(
+ &mut self,
+ mut bx: Bx,
+ bb: mir::BasicBlock,
+ terminator: &'tcx mir::Terminator<'tcx>,
+ ) {
+ debug!("codegen_terminator: {:?}", terminator);
+
+ // Create the cleanup bundle, if needed.
+ let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
+ let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
+
+ self.set_debug_loc(&mut bx, terminator.source_info);
+ match terminator.kind {
+ mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx),
+
+ mir::TerminatorKind::Abort => {
+ self.codegen_abort_terminator(helper, bx, terminator);
+ }
+
+ mir::TerminatorKind::Goto { target } => {
+ helper.funclet_br(self, &mut bx, target);
+ }
+
+ mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
+ self.codegen_switchint_terminator(helper, bx, discr, switch_ty, targets);
+ }
+
+ mir::TerminatorKind::Return => {
+ self.codegen_return_terminator(bx);
+ }
+
+ mir::TerminatorKind::Unreachable => {
+ bx.unreachable();
+ }
+
+ mir::TerminatorKind::Drop { place, target, unwind } => {
+ self.codegen_drop_terminator(helper, bx, place, target, unwind);
+ }
+
+ mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
+ self.codegen_assert_terminator(
+ helper, bx, terminator, cond, expected, msg, target, cleanup,
+ );
+ }
+
+ mir::TerminatorKind::DropAndReplace { .. } => {
+ bug!("undesugared DropAndReplace in codegen: {:?}", terminator);
+ }
+
+ mir::TerminatorKind::Call {
+ ref func,
+ ref args,
+ destination,
+ target,
+ cleanup,
+ from_hir_call: _,
+ fn_span,
+ } => {
+ self.codegen_call_terminator(
+ helper,
+ bx,
+ terminator,
+ func,
+ args,
+ destination,
+ target,
+ cleanup,
+ fn_span,
+ );
+ }
+ mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => {
+ bug!("generator ops in codegen")
+ }
+ mir::TerminatorKind::FalseEdge { .. } | mir::TerminatorKind::FalseUnwind { .. } => {
+ bug!("borrowck false edges in codegen")
+ }
+
+ mir::TerminatorKind::InlineAsm {
+ template,
+ ref operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ } => {
+ self.codegen_asm_terminator(
+ helper,
+ bx,
+ terminator,
+ template,
+ operands,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ self.instance,
+ );
+ }
+ }
+ }
+
+ fn codegen_argument(
+ &mut self,
+ bx: &mut Bx,
+ op: OperandRef<'tcx, Bx::Value>,
+ llargs: &mut Vec<Bx::Value>,
+ arg: &ArgAbi<'tcx, Ty<'tcx>>,
+ ) {
+ // Fill padding with undef value, where applicable.
+ if let Some(ty) = arg.pad {
+ llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
+ }
+
+ if arg.is_ignore() {
+ return;
+ }
+
+ if let PassMode::Pair(..) = arg.mode {
+ match op.val {
+ Pair(a, b) => {
+ llargs.push(a);
+ llargs.push(b);
+ return;
+ }
+ _ => bug!("codegen_argument: {:?} invalid for pair argument", op),
+ }
+ } else if arg.is_unsized_indirect() {
+ match op.val {
+ Ref(a, Some(b), _) => {
+ llargs.push(a);
+ llargs.push(b);
+ return;
+ }
+ _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
+ }
+ }
+
+ // Force by-ref if we have to load through a cast pointer.
+ let (mut llval, align, by_ref) = match op.val {
+ Immediate(_) | Pair(..) => match arg.mode {
+ PassMode::Indirect { .. } | PassMode::Cast(_) => {
+ let scratch = PlaceRef::alloca(bx, arg.layout);
+ op.val.store(bx, scratch);
+ (scratch.llval, scratch.align, true)
+ }
+ _ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
+ },
+ Ref(llval, _, align) => {
+ if arg.is_indirect() && align < arg.layout.align.abi {
+ // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
+ // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
+ // have scary latent bugs around.
+
+ let scratch = PlaceRef::alloca(bx, arg.layout);
+ base::memcpy_ty(
+ bx,
+ scratch.llval,
+ scratch.align,
+ llval,
+ align,
+ op.layout,
+ MemFlags::empty(),
+ );
+ (scratch.llval, scratch.align, true)
+ } else {
+ (llval, align, true)
+ }
+ }
+ };
+
+ if by_ref && !arg.is_indirect() {
+ // Have to load the argument, maybe while casting it.
+ if let PassMode::Cast(ty) = arg.mode {
+ let llty = bx.cast_backend_type(&ty);
+ let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
+ llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
+ } else {
+ // We can't use `PlaceRef::load` here because the argument
+ // may have a type we don't treat as immediate, but the ABI
+ // used for this call is passing it by-value. In that case,
+ // the load would just produce `OperandValue::Ref` instead
+ // of the `OperandValue::Immediate` we need for the call.
+ llval = bx.load(bx.backend_type(arg.layout), llval, align);
+ if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if scalar.is_bool() {
+ bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
+ }
+ }
+ // We store bools as `i8` so we need to truncate to `i1`.
+ llval = bx.to_immediate(llval, arg.layout);
+ }
+ }
+
+ llargs.push(llval);
+ }
+
+ fn codegen_arguments_untupled(
+ &mut self,
+ bx: &mut Bx,
+ operand: &mir::Operand<'tcx>,
+ llargs: &mut Vec<Bx::Value>,
+ args: &[ArgAbi<'tcx, Ty<'tcx>>],
+ ) -> usize {
+ let tuple = self.codegen_operand(bx, operand);
+
+ // Handle both by-ref and immediate tuples.
+ if let Ref(llval, None, align) = tuple.val {
+ let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align);
+ for i in 0..tuple.layout.fields.count() {
+ let field_ptr = tuple_ptr.project_field(bx, i);
+ let field = bx.load_operand(field_ptr);
+ self.codegen_argument(bx, field, llargs, &args[i]);
+ }
+ } else if let Ref(_, Some(_), _) = tuple.val {
+ bug!("closure arguments must be sized")
+ } else {
+ // If the tuple is immediate, the elements are as well.
+ for i in 0..tuple.layout.fields.count() {
+ let op = tuple.extract_field(bx, i);
+ self.codegen_argument(bx, op, llargs, &args[i]);
+ }
+ }
+ tuple.layout.fields.count()
+ }
+
+ fn get_caller_location(
+ &mut self,
+ bx: &mut Bx,
+ mut source_info: mir::SourceInfo,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ let tcx = bx.tcx();
+
+ let mut span_to_caller_location = |span: Span| {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = tcx.const_caller_location((
+ Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
+ caller.line as u32,
+ caller.col_display as u32 + 1,
+ ));
+ OperandRef::from_const(bx, const_loc, bx.tcx().caller_location_ty())
+ };
+
+ // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+ // If so, the starting `source_info.span` is in the innermost inlined
+ // function, and will be replaced with outer callsite spans as long
+ // as the inlined functions were `#[track_caller]`.
+ loop {
+ let scope_data = &self.mir.source_scopes[source_info.scope];
+
+ if let Some((callee, callsite_span)) = scope_data.inlined {
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !callee.def.requires_caller_location(tcx) {
+ return span_to_caller_location(source_info.span);
+ }
+ source_info.span = callsite_span;
+ }
+
+ // Skip past all of the parents with `inlined: None`.
+ match scope_data.inlined_parent_scope {
+ Some(parent) => source_info.scope = parent,
+ None => break,
+ }
+ }
+
+ // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
+ self.caller_location.unwrap_or_else(|| span_to_caller_location(source_info.span))
+ }
+
+ fn get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value> {
+ let cx = bx.cx();
+ if let Some(slot) = self.personality_slot {
+ slot
+ } else {
+ let layout = cx.layout_of(
+ cx.tcx().intern_tup(&[cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32]),
+ );
+ let slot = PlaceRef::alloca(bx, layout);
+ self.personality_slot = Some(slot);
+ slot
+ }
+ }
+
+ /// Returns the landing/cleanup pad wrapper around the given basic block.
+ // FIXME(eddyb) rename this to `eh_pad_for`.
+ fn landing_pad_for(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ if let Some(landing_pad) = self.landing_pads[bb] {
+ return landing_pad;
+ }
+
+ let landing_pad = self.landing_pad_for_uncached(bb);
+ self.landing_pads[bb] = Some(landing_pad);
+ landing_pad
+ }
+
+ // FIXME(eddyb) rename this to `eh_pad_for_uncached`.
+ fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ let llbb = self.llbb(bb);
+ if base::wants_msvc_seh(self.cx.sess()) {
+ let funclet;
+ let ret_llbb;
+ match self.mir[bb].terminator.as_ref().map(|t| &t.kind) {
+ // This is a basic block that we're aborting the program for,
+ // notably in an `extern` function. These basic blocks are inserted
+ // so that we assert that `extern` functions do indeed not panic,
+ // and if they do we abort the process.
+ //
+ // On MSVC these are tricky though (where we're doing funclets). If
+ // we were to do a cleanuppad (like below) the normal functions like
+ // `longjmp` would trigger the abort logic, terminating the
+ // program. Instead we insert the equivalent of `catch(...)` for C++
+ // which magically doesn't trigger when `longjmp` files over this
+ // frame.
+ //
+ // Lots more discussion can be found on #48251 but this codegen is
+ // modeled after clang's for:
+ //
+ // try {
+ // foo();
+ // } catch (...) {
+ // bar();
+ // }
+ Some(&mir::TerminatorKind::Abort) => {
+ let cs_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("cs_funclet{:?}", bb));
+ let cp_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("cp_funclet{:?}", bb));
+ ret_llbb = cs_bb;
+
+ let mut cs_bx = Bx::build(self.cx, cs_bb);
+ let cs = cs_bx.catch_switch(None, None, &[cp_bb]);
+
+ // The "null" here is actually a RTTI type descriptor for the
+ // C++ personality function, but `catch (...)` has no type so
+ // it's null. The 64 here is actually a bitfield which
+ // represents that this is a catch-all block.
+ let mut cp_bx = Bx::build(self.cx, cp_bb);
+ let null = cp_bx.const_null(
+ cp_bx.type_i8p_ext(cp_bx.cx().data_layout().instruction_address_space),
+ );
+ let sixty_four = cp_bx.const_i32(64);
+ funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
+ cp_bx.br(llbb);
+ }
+ _ => {
+ let cleanup_bb =
+ Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
+ ret_llbb = cleanup_bb;
+ let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
+ funclet = cleanup_bx.cleanup_pad(None, &[]);
+ cleanup_bx.br(llbb);
+ }
+ }
+ self.funclets[bb] = Some(funclet);
+ ret_llbb
+ } else {
+ let bb = Bx::append_block(self.cx, self.llfn, "cleanup");
+ let mut bx = Bx::build(self.cx, bb);
+
+ let llpersonality = self.cx.eh_personality();
+ let llretty = self.landing_pad_type();
+ let lp = bx.cleanup_landing_pad(llretty, llpersonality);
+
+ let slot = self.get_personality_slot(&mut bx);
+ slot.storage_live(&mut bx);
+ Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot);
+
+ bx.br(llbb);
+ bx.llbb()
+ }
+ }
+
+ fn landing_pad_type(&self) -> Bx::Type {
+ let cx = self.cx;
+ cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false)
+ }
+
+ fn unreachable_block(&mut self) -> Bx::BasicBlock {
+ self.unreachable_block.unwrap_or_else(|| {
+ let llbb = Bx::append_block(self.cx, self.llfn, "unreachable");
+ let mut bx = Bx::build(self.cx, llbb);
+ bx.unreachable();
+ self.unreachable_block = Some(llbb);
+ llbb
+ })
+ }
+
+ fn double_unwind_guard(&mut self) -> Bx::BasicBlock {
+ self.double_unwind_guard.unwrap_or_else(|| {
+ assert!(!base::wants_msvc_seh(self.cx.sess()));
+
+ let llbb = Bx::append_block(self.cx, self.llfn, "abort");
+ let mut bx = Bx::build(self.cx, llbb);
+ self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
+
+ let llpersonality = self.cx.eh_personality();
+ let llretty = self.landing_pad_type();
+ bx.cleanup_landing_pad(llretty, llpersonality);
+
+ let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicNoUnwind);
+ let fn_ty = bx.fn_decl_backend_type(&fn_abi);
+
+ let llret = bx.call(fn_ty, fn_ptr, &[], None);
+ bx.apply_attrs_callsite(&fn_abi, llret);
+ bx.do_not_inline(llret);
+
+ bx.unreachable();
+
+ self.double_unwind_guard = Some(llbb);
+ llbb
+ })
+ }
+
+ /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
+ /// cached in `self.cached_llbbs`, or created on demand (and cached).
+ // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
+ // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`).
+ pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
+ self.cached_llbbs[bb].unwrap_or_else(|| {
+ // FIXME(eddyb) only name the block if `fewer_names` is `false`.
+ let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
+ self.cached_llbbs[bb] = Some(llbb);
+ llbb
+ })
+ }
+
+ fn make_return_dest(
+ &mut self,
+ bx: &mut Bx,
+ dest: mir::Place<'tcx>,
+ fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
+ llargs: &mut Vec<Bx::Value>,
+ is_intrinsic: bool,
+ ) -> ReturnDest<'tcx, Bx::Value> {
+ // If the return is ignored, we can just return a do-nothing `ReturnDest`.
+ if fn_ret.is_ignore() {
+ return ReturnDest::Nothing;
+ }
+ let dest = if let Some(index) = dest.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(dest) => dest,
+ LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
+ LocalRef::Operand(None) => {
+ // Handle temporary places, specifically `Operand` ones, as
+ // they don't have `alloca`s.
+ return if fn_ret.is_indirect() {
+ // Odd, but possible, case, we have an operand temporary,
+ // but the calling convention has an indirect return.
+ let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+ tmp.storage_live(bx);
+ llargs.push(tmp.llval);
+ ReturnDest::IndirectOperand(tmp, index)
+ } else if is_intrinsic {
+ // Currently, intrinsics always need a location to store
+ // the result, so we create a temporary `alloca` for the
+ // result.
+ let tmp = PlaceRef::alloca(bx, fn_ret.layout);
+ tmp.storage_live(bx);
+ ReturnDest::IndirectOperand(tmp, index)
+ } else {
+ ReturnDest::DirectOperand(index)
+ };
+ }
+ LocalRef::Operand(Some(_)) => {
+ bug!("place local already assigned to");
+ }
+ }
+ } else {
+ self.codegen_place(
+ bx,
+ mir::PlaceRef { local: dest.local, projection: &dest.projection },
+ )
+ };
+ if fn_ret.is_indirect() {
+ if dest.align < dest.layout.align.abi {
+ // Currently, MIR code generation does not create calls
+ // that store directly to fields of packed structs (in
+ // fact, the calls it creates write only to temps).
+ //
+ // If someone changes that, please update this code path
+ // to create a temporary.
+ span_bug!(self.mir.span, "can't directly store to unaligned value");
+ }
+ llargs.push(dest.llval);
+ ReturnDest::Nothing
+ } else {
+ ReturnDest::Store(dest)
+ }
+ }
+
+ fn codegen_transmute(&mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: mir::Place<'tcx>) {
+ if let Some(index) = dst.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
+ LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
+ LocalRef::Operand(None) => {
+ let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst.as_ref()));
+ assert!(!dst_layout.ty.has_erasable_regions());
+ let place = PlaceRef::alloca(bx, dst_layout);
+ place.storage_live(bx);
+ self.codegen_transmute_into(bx, src, place);
+ let op = bx.load_operand(place);
+ place.storage_dead(bx);
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ LocalRef::Operand(Some(op)) => {
+ assert!(op.layout.is_zst(), "assigning to initialized SSAtemp");
+ }
+ }
+ } else {
+ let dst = self.codegen_place(bx, dst.as_ref());
+ self.codegen_transmute_into(bx, src, dst);
+ }
+ }
+
+ fn codegen_transmute_into(
+ &mut self,
+ bx: &mut Bx,
+ src: &mir::Operand<'tcx>,
+ dst: PlaceRef<'tcx, Bx::Value>,
+ ) {
+ let src = self.codegen_operand(bx, src);
+
+ // Special-case transmutes between scalars as simple bitcasts.
+ match (src.layout.abi, dst.layout.abi) {
+ (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
+ // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
+ if (src_scalar.primitive() == abi::Pointer)
+ == (dst_scalar.primitive() == abi::Pointer)
+ {
+ assert_eq!(src.layout.size, dst.layout.size);
+
+ // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`
+ // conversions allow handling `bool`s the same as `u8`s.
+ let src = bx.from_immediate(src.immediate());
+ let src_as_dst = bx.bitcast(src, bx.backend_type(dst.layout));
+ Immediate(bx.to_immediate_scalar(src_as_dst, dst_scalar)).store(bx, dst);
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ let llty = bx.backend_type(src.layout);
+ let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
+ let align = src.layout.align.abi.min(dst.align);
+ src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
+ }
+
+ // Stores the return value of a function call into it's final location.
+ fn store_return(
+ &mut self,
+ bx: &mut Bx,
+ dest: ReturnDest<'tcx, Bx::Value>,
+ ret_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ llval: Bx::Value,
+ ) {
+ use self::ReturnDest::*;
+
+ match dest {
+ Nothing => (),
+ Store(dst) => bx.store_arg(&ret_abi, llval, dst),
+ IndirectOperand(tmp, index) => {
+ let op = bx.load_operand(tmp);
+ tmp.storage_dead(bx);
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ DirectOperand(index) => {
+ // If there is a cast, we have to store and reload.
+ let op = if let PassMode::Cast(_) = ret_abi.mode {
+ let tmp = PlaceRef::alloca(bx, ret_abi.layout);
+ tmp.storage_live(bx);
+ bx.store_arg(&ret_abi, llval, tmp);
+ let op = bx.load_operand(tmp);
+ tmp.storage_dead(bx);
+ op
+ } else {
+ OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
+ };
+ self.locals[index] = LocalRef::Operand(Some(op));
+ self.debug_introduce_local(bx, index);
+ }
+ }
+ }
+}
+
+enum ReturnDest<'tcx, V> {
+ // Do nothing; the return value is indirect or ignored.
+ Nothing,
+ // Store the return value to the pointer.
+ Store(PlaceRef<'tcx, V>),
+ // Store an indirect return value to an operand local place.
+ IndirectOperand(PlaceRef<'tcx, V>, mir::Local),
+ // Store a direct return value to an operand local place.
+ DirectOperand(mir::Local),
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs
new file mode 100644
index 000000000..9a995fbf6
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs
@@ -0,0 +1,90 @@
+use crate::mir::operand::OperandRef;
+use crate::traits::*;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, ErrorHandled};
+use rustc_middle::ty::layout::HasTyCtxt;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::source_map::Span;
+use rustc_target::abi::Abi;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn eval_mir_constant_to_operand(
+ &self,
+ bx: &mut Bx,
+ constant: &mir::Constant<'tcx>,
+ ) -> Result<OperandRef<'tcx, Bx::Value>, ErrorHandled> {
+ let val = self.eval_mir_constant(constant)?;
+ let ty = self.monomorphize(constant.ty());
+ Ok(OperandRef::from_const(bx, val, ty))
+ }
+
+ pub fn eval_mir_constant(
+ &self,
+ constant: &mir::Constant<'tcx>,
+ ) -> Result<ConstValue<'tcx>, ErrorHandled> {
+ let ct = self.monomorphize(constant.literal);
+ let ct = match ct {
+ mir::ConstantKind::Ty(ct) => ct,
+ mir::ConstantKind::Val(val, _) => return Ok(val),
+ };
+ match ct.kind() {
+ ty::ConstKind::Unevaluated(ct) => self
+ .cx
+ .tcx()
+ .const_eval_resolve(ty::ParamEnv::reveal_all(), ct, None)
+ .map_err(|err| {
+ self.cx.tcx().sess.span_err(constant.span, "erroneous constant encountered");
+ err
+ }),
+ ty::ConstKind::Value(val) => Ok(self.cx.tcx().valtree_to_const_val((ct.ty(), val))),
+ err => span_bug!(
+ constant.span,
+ "encountered bad ConstKind after monomorphizing: {:?}",
+ err
+ ),
+ }
+ }
+
+ /// process constant containing SIMD shuffle indices
+ pub fn simd_shuffle_indices(
+ &mut self,
+ bx: &Bx,
+ span: Span,
+ ty: Ty<'tcx>,
+ constant: Result<ConstValue<'tcx>, ErrorHandled>,
+ ) -> (Bx::Value, Ty<'tcx>) {
+ constant
+ .map(|val| {
+ let field_ty = ty.builtin_index().unwrap();
+ let c = mir::ConstantKind::from_value(val, ty);
+ let values: Vec<_> = bx
+ .tcx()
+ .destructure_mir_constant(ty::ParamEnv::reveal_all(), c)
+ .fields
+ .iter()
+ .map(|field| {
+ if let Some(prim) = field.try_to_scalar() {
+ let layout = bx.layout_of(field_ty);
+ let Abi::Scalar(scalar) = layout.abi else {
+ bug!("from_const: invalid ByVal layout: {:#?}", layout);
+ };
+ bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
+ } else {
+ bug!("simd shuffle field {:?}", field)
+ }
+ })
+ .collect();
+ let llval = bx.const_struct(&values, false);
+ (llval, c.ty())
+ })
+ .unwrap_or_else(|_| {
+ bx.tcx().sess.span_err(span, "could not evaluate shuffle_indices at compile time");
+ // We've errored, so we don't have to produce working code.
+ let ty = self.monomorphize(ty);
+ let llty = bx.backend_type(bx.layout_of(ty));
+ (bx.const_undef(llty), ty)
+ })
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
new file mode 100644
index 000000000..f1fe49528
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/coverageinfo.rs
@@ -0,0 +1,55 @@
+use crate::traits::*;
+
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::Coverage;
+use rustc_middle::mir::SourceScope;
+
+use super::FunctionCx;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_coverage(&self, bx: &mut Bx, coverage: Coverage, scope: SourceScope) {
+ // Determine the instance that coverage data was originally generated for.
+ let instance = if let Some(inlined) = scope.inlined_instance(&self.mir.source_scopes) {
+ self.monomorphize(inlined)
+ } else {
+ self.instance
+ };
+
+ let Coverage { kind, code_region } = coverage;
+ match kind {
+ CoverageKind::Counter { function_source_hash, id } => {
+ if bx.set_function_source_hash(instance, function_source_hash) {
+ // If `set_function_source_hash()` returned true, the coverage map is enabled,
+ // so continue adding the counter.
+ if let Some(code_region) = code_region {
+ // Note: Some counters do not have code regions, but may still be referenced
+ // from expressions. In that case, don't add the counter to the coverage map,
+ // but do inject the counter intrinsic.
+ bx.add_coverage_counter(instance, id, code_region);
+ }
+
+ let coverageinfo = bx.tcx().coverageinfo(instance.def);
+
+ let fn_name = bx.get_pgo_func_name_var(instance);
+ let hash = bx.const_u64(function_source_hash);
+ let num_counters = bx.const_u32(coverageinfo.num_counters);
+ let index = bx.const_u32(id.zero_based_index());
+ debug!(
+ "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})",
+ fn_name, hash, num_counters, index,
+ );
+ bx.instrprof_increment(fn_name, hash, num_counters, index);
+ }
+ }
+ CoverageKind::Expression { id, lhs, op, rhs } => {
+ bx.add_coverage_counter_expression(instance, id, lhs, op, rhs, code_region);
+ }
+ CoverageKind::Unreachable => {
+ bx.add_coverage_unreachable(
+ instance,
+ code_region.expect("unreachable regions always have code regions"),
+ );
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
new file mode 100644
index 000000000..8c3186efc
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs
@@ -0,0 +1,418 @@
+use crate::traits::*;
+use rustc_index::vec::IndexVec;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_session::config::DebugInfo;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_span::{BytePos, Span};
+use rustc_target::abi::Abi;
+use rustc_target::abi::Size;
+
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+pub struct FunctionDebugContext<S, L> {
+ pub scopes: IndexVec<mir::SourceScope, DebugScope<S, L>>,
+}
+
+#[derive(Copy, Clone)]
+pub enum VariableKind {
+ ArgumentVariable(usize /*index*/),
+ LocalVariable,
+}
+
+/// Like `mir::VarDebugInfo`, but within a `mir::Local`.
+#[derive(Copy, Clone)]
+pub struct PerLocalVarDebugInfo<'tcx, D> {
+ pub name: Symbol,
+ pub source_info: mir::SourceInfo,
+
+ /// `DIVariable` returned by `create_dbg_var`.
+ pub dbg_var: Option<D>,
+
+ /// `.place.projection` from `mir::VarDebugInfo`.
+ pub projection: &'tcx ty::List<mir::PlaceElem<'tcx>>,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct DebugScope<S, L> {
+ pub dbg_scope: S,
+
+ /// Call site location, if this scope was inlined from another function.
+ pub inlined_at: Option<L>,
+
+ // Start and end offsets of the file to which this DIScope belongs.
+ // These are used to quickly determine whether some span refers to the same file.
+ pub file_start_pos: BytePos,
+ pub file_end_pos: BytePos,
+}
+
+impl<'tcx, S: Copy, L: Copy> DebugScope<S, L> {
+ /// DILocations inherit source file name from the parent DIScope. Due to macro expansions
+ /// it may so happen that the current span belongs to a different file than the DIScope
+ /// corresponding to span's containing source scope. If so, we need to create a DIScope
+ /// "extension" into that file.
+ pub fn adjust_dbg_scope_for_span<Cx: CodegenMethods<'tcx, DIScope = S, DILocation = L>>(
+ &self,
+ cx: &Cx,
+ span: Span,
+ ) -> S {
+ let pos = span.lo();
+ if pos < self.file_start_pos || pos >= self.file_end_pos {
+ let sm = cx.sess().source_map();
+ cx.extend_scope_to_file(self.dbg_scope, &sm.lookup_char_pos(pos).file)
+ } else {
+ self.dbg_scope
+ }
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn set_debug_loc(&self, bx: &mut Bx, source_info: mir::SourceInfo) {
+ bx.set_span(source_info.span);
+ if let Some(dbg_loc) = self.dbg_loc(source_info) {
+ bx.set_dbg_loc(dbg_loc);
+ }
+ }
+
+ fn dbg_loc(&self, source_info: mir::SourceInfo) -> Option<Bx::DILocation> {
+ let (dbg_scope, inlined_at, span) = self.adjusted_span_and_dbg_scope(source_info)?;
+ Some(self.cx.dbg_loc(dbg_scope, inlined_at, span))
+ }
+
+ fn adjusted_span_and_dbg_scope(
+ &self,
+ source_info: mir::SourceInfo,
+ ) -> Option<(Bx::DIScope, Option<Bx::DILocation>, Span)> {
+ let span = self.adjust_span_for_debugging(source_info.span);
+ let scope = &self.debug_context.as_ref()?.scopes[source_info.scope];
+ Some((scope.adjust_dbg_scope_for_span(self.cx, span), scope.inlined_at, span))
+ }
+
+ /// In order to have a good line stepping behavior in debugger, we overwrite debug
+ /// locations of macro expansions with that of the outermost expansion site
+ /// (unless the crate is being compiled with `-Z debug-macros`).
+ fn adjust_span_for_debugging(&self, mut span: Span) -> Span {
+ // Bail out if debug info emission is not enabled.
+ if self.debug_context.is_none() {
+ return span;
+ }
+
+ if span.from_expansion() && !self.cx.sess().opts.unstable_opts.debug_macros {
+ // Walk up the macro expansion chain until we reach a non-expanded span.
+ // We also stop at the function body level because no line stepping can occur
+ // at the level above that.
+ // Use span of the outermost expansion site, while keeping the original lexical scope.
+ span = rustc_span::hygiene::walk_chain(span, self.mir.span.ctxt());
+ }
+
+ span
+ }
+
+ fn spill_operand_to_stack(
+ operand: &OperandRef<'tcx, Bx::Value>,
+ name: Option<String>,
+ bx: &mut Bx,
+ ) -> PlaceRef<'tcx, Bx::Value> {
+ // "Spill" the value onto the stack, for debuginfo,
+ // without forcing non-debuginfo uses of the local
+ // to also load from the stack every single time.
+ // FIXME(#68817) use `llvm.dbg.value` instead,
+ // at least for the cases which LLVM handles correctly.
+ let spill_slot = PlaceRef::alloca(bx, operand.layout);
+ if let Some(name) = name {
+ bx.set_var_name(spill_slot.llval, &(name + ".dbg.spill"));
+ }
+ operand.val.store(bx, spill_slot);
+ spill_slot
+ }
+
+ /// Apply debuginfo and/or name, after creating the `alloca` for a local,
+ /// or initializing the local with an operand (whichever applies).
+ pub fn debug_introduce_local(&self, bx: &mut Bx, local: mir::Local) {
+ let full_debug_info = bx.sess().opts.debuginfo == DebugInfo::Full;
+
+ // FIXME(eddyb) maybe name the return place as `_0` or `return`?
+ if local == mir::RETURN_PLACE && !self.mir.local_decls[mir::RETURN_PLACE].is_user_variable()
+ {
+ return;
+ }
+
+ let vars = match &self.per_local_var_debug_info {
+ Some(per_local) => &per_local[local],
+ None => return,
+ };
+ let whole_local_var = vars.iter().find(|var| var.projection.is_empty()).copied();
+ let has_proj = || vars.iter().any(|var| !var.projection.is_empty());
+
+ let fallback_var = if self.mir.local_kind(local) == mir::LocalKind::Arg {
+ let arg_index = local.index() - 1;
+
+ // Add debuginfo even to unnamed arguments.
+ // FIXME(eddyb) is this really needed?
+ if arg_index == 0 && has_proj() {
+ // Hide closure environments from debuginfo.
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices
+ // be offset to account for the hidden environment?
+ None
+ } else if whole_local_var.is_some() {
+ // No need to make up anything, there is a `mir::VarDebugInfo`
+ // covering the whole local.
+ // FIXME(eddyb) take `whole_local_var.source_info.scope` into
+ // account, just in case it doesn't use `ArgumentVariable`
+ // (after #67586 gets fixed).
+ None
+ } else {
+ let name = kw::Empty;
+ let decl = &self.mir.local_decls[local];
+ let dbg_var = if full_debug_info {
+ self.adjusted_span_and_dbg_scope(decl.source_info).map(
+ |(dbg_scope, _, span)| {
+ // FIXME(eddyb) is this `+ 1` needed at all?
+ let kind = VariableKind::ArgumentVariable(arg_index + 1);
+
+ let arg_ty = self.monomorphize(decl.ty);
+
+ self.cx.create_dbg_var(name, arg_ty, dbg_scope, kind, span)
+ },
+ )
+ } else {
+ None
+ };
+
+ Some(PerLocalVarDebugInfo {
+ name,
+ source_info: decl.source_info,
+ dbg_var,
+ projection: ty::List::empty(),
+ })
+ }
+ } else {
+ None
+ };
+
+ let local_ref = &self.locals[local];
+
+ let name = if bx.sess().fewer_names() {
+ None
+ } else {
+ Some(match whole_local_var.or(fallback_var) {
+ Some(var) if var.name != kw::Empty => var.name.to_string(),
+ _ => format!("{:?}", local),
+ })
+ };
+
+ if let Some(name) = &name {
+ match local_ref {
+ LocalRef::Place(place) | LocalRef::UnsizedPlace(place) => {
+ bx.set_var_name(place.llval, name);
+ }
+ LocalRef::Operand(Some(operand)) => match operand.val {
+ OperandValue::Ref(x, ..) | OperandValue::Immediate(x) => {
+ bx.set_var_name(x, name);
+ }
+ OperandValue::Pair(a, b) => {
+ // FIXME(eddyb) these are scalar components,
+ // maybe extract the high-level fields?
+ bx.set_var_name(a, &(name.clone() + ".0"));
+ bx.set_var_name(b, &(name.clone() + ".1"));
+ }
+ },
+ LocalRef::Operand(None) => {}
+ }
+ }
+
+ if !full_debug_info || vars.is_empty() && fallback_var.is_none() {
+ return;
+ }
+
+ let base = match local_ref {
+ LocalRef::Operand(None) => return,
+
+ LocalRef::Operand(Some(operand)) => {
+ // Don't spill operands onto the stack in naked functions.
+ // See: https://github.com/rust-lang/rust/issues/42779
+ let attrs = bx.tcx().codegen_fn_attrs(self.instance.def_id());
+ if attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ return;
+ }
+
+ Self::spill_operand_to_stack(operand, name, bx)
+ }
+
+ LocalRef::Place(place) => *place,
+
+ // FIXME(eddyb) add debuginfo for unsized places too.
+ LocalRef::UnsizedPlace(_) => return,
+ };
+
+ let vars = vars.iter().copied().chain(fallback_var);
+
+ for var in vars {
+ let Some(dbg_var) = var.dbg_var else { continue };
+ let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
+
+ let mut direct_offset = Size::ZERO;
+ // FIXME(eddyb) use smallvec here.
+ let mut indirect_offsets = vec![];
+ let mut place = base;
+
+ for elem in &var.projection[..] {
+ match *elem {
+ mir::ProjectionElem::Deref => {
+ indirect_offsets.push(Size::ZERO);
+ place = bx.load_operand(place).deref(bx.cx());
+ }
+ mir::ProjectionElem::Field(field, _) => {
+ let i = field.index();
+ let offset = indirect_offsets.last_mut().unwrap_or(&mut direct_offset);
+ *offset += place.layout.fields.offset(i);
+ place = place.project_field(bx, i);
+ }
+ mir::ProjectionElem::Downcast(_, variant) => {
+ place = place.project_downcast(bx, variant);
+ }
+ _ => span_bug!(
+ var.source_info.span,
+ "unsupported var debuginfo place `{:?}`",
+ mir::Place { local, projection: var.projection },
+ ),
+ }
+ }
+
+ // When targeting MSVC, create extra allocas for arguments instead of pointing multiple
+ // dbg_var_addr() calls into the same alloca with offsets. MSVC uses CodeView records
+ // not DWARF and LLVM doesn't support translating the resulting
+ // [DW_OP_deref, DW_OP_plus_uconst, offset, DW_OP_deref] debug info to CodeView.
+ // Creating extra allocas on the stack makes the resulting debug info simple enough
+ // that LLVM can generate correct CodeView records and thus the values appear in the
+ // debugger. (#83709)
+ let should_create_individual_allocas = bx.cx().sess().target.is_like_msvc
+ && self.mir.local_kind(local) == mir::LocalKind::Arg
+ // LLVM can handle simple things but anything more complex than just a direct
+ // offset or one indirect offset of 0 is too complex for it to generate CV records
+ // correctly.
+ && (direct_offset != Size::ZERO
+ || !matches!(&indirect_offsets[..], [Size::ZERO] | []));
+
+ if should_create_individual_allocas {
+ // Create a variable which will be a pointer to the actual value
+ let ptr_ty = bx.tcx().mk_ty(ty::RawPtr(ty::TypeAndMut {
+ mutbl: mir::Mutability::Mut,
+ ty: place.layout.ty,
+ }));
+ let ptr_layout = bx.layout_of(ptr_ty);
+ let alloca = PlaceRef::alloca(bx, ptr_layout);
+ bx.set_var_name(alloca.llval, &(var.name.to_string() + ".dbg.spill"));
+
+ // Write the pointer to the variable
+ bx.store(place.llval, alloca.llval, alloca.align);
+
+ // Point the debug info to `*alloca` for the current variable
+ bx.dbg_var_addr(dbg_var, dbg_loc, alloca.llval, Size::ZERO, &[Size::ZERO]);
+ } else {
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, direct_offset, &indirect_offsets);
+ }
+ }
+ }
+
+ pub fn debug_introduce_locals(&self, bx: &mut Bx) {
+ if bx.sess().opts.debuginfo == DebugInfo::Full || !bx.sess().fewer_names() {
+ for local in self.locals.indices() {
+ self.debug_introduce_local(bx, local);
+ }
+ }
+ }
+
+ /// Partition all `VarDebugInfo` in `self.mir`, by their base `Local`.
+ pub fn compute_per_local_var_debug_info(
+ &self,
+ bx: &mut Bx,
+ ) -> Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>> {
+ let full_debug_info = self.cx.sess().opts.debuginfo == DebugInfo::Full;
+
+ let target_is_msvc = self.cx.sess().target.is_like_msvc;
+
+ if !full_debug_info && self.cx.sess().fewer_names() {
+ return None;
+ }
+
+ let mut per_local = IndexVec::from_elem(vec![], &self.mir.local_decls);
+ for var in &self.mir.var_debug_info {
+ let dbg_scope_and_span = if full_debug_info {
+ self.adjusted_span_and_dbg_scope(var.source_info)
+ } else {
+ None
+ };
+
+ let dbg_var = dbg_scope_and_span.map(|(dbg_scope, _, span)| {
+ let (var_ty, var_kind) = match var.value {
+ mir::VarDebugInfoContents::Place(place) => {
+ let var_ty = self.monomorphized_place_ty(place.as_ref());
+ let var_kind = if self.mir.local_kind(place.local) == mir::LocalKind::Arg
+ && place.projection.is_empty()
+ && var.source_info.scope == mir::OUTERMOST_SOURCE_SCOPE
+ {
+ let arg_index = place.local.index() - 1;
+ if target_is_msvc {
+ // ScalarPair parameters are spilled to the stack so they need to
+ // be marked as a `LocalVariable` for MSVC debuggers to visualize
+ // their data correctly. (See #81894 & #88625)
+ let var_ty_layout = self.cx.layout_of(var_ty);
+ if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
+ VariableKind::LocalVariable
+ } else {
+ VariableKind::ArgumentVariable(arg_index + 1)
+ }
+ } else {
+ // FIXME(eddyb) shouldn't `ArgumentVariable` indices be
+ // offset in closures to account for the hidden environment?
+ // Also, is this `+ 1` needed at all?
+ VariableKind::ArgumentVariable(arg_index + 1)
+ }
+ } else {
+ VariableKind::LocalVariable
+ };
+ (var_ty, var_kind)
+ }
+ mir::VarDebugInfoContents::Const(c) => {
+ let ty = self.monomorphize(c.ty());
+ (ty, VariableKind::LocalVariable)
+ }
+ };
+
+ self.cx.create_dbg_var(var.name, var_ty, dbg_scope, var_kind, span)
+ });
+
+ match var.value {
+ mir::VarDebugInfoContents::Place(place) => {
+ per_local[place.local].push(PerLocalVarDebugInfo {
+ name: var.name,
+ source_info: var.source_info,
+ dbg_var,
+ projection: place.projection,
+ });
+ }
+ mir::VarDebugInfoContents::Const(c) => {
+ if let Some(dbg_var) = dbg_var {
+ let Some(dbg_loc) = self.dbg_loc(var.source_info) else { continue };
+
+ if let Ok(operand) = self.eval_mir_constant_to_operand(bx, &c) {
+ let base = Self::spill_operand_to_stack(
+ &operand,
+ Some(var.name.to_string()),
+ bx,
+ );
+
+ bx.dbg_var_addr(dbg_var, dbg_loc, base.llval, Size::ZERO, &[]);
+ }
+ }
+ }
+ }
+ }
+ Some(per_local)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
new file mode 100644
index 000000000..94ac71a4d
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -0,0 +1,636 @@
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::FunctionCx;
+use crate::common::{span_invalid_monomorphization_error, IntPredicate};
+use crate::glue;
+use crate::meth;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::{sym, Span};
+use rustc_target::abi::{
+ call::{FnAbi, PassMode},
+ WrappingRange,
+};
+
+fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ allow_overlap: bool,
+ volatile: bool,
+ ty: Ty<'tcx>,
+ dst: Bx::Value,
+ src: Bx::Value,
+ count: Bx::Value,
+) {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let align = layout.align.abi;
+ let size = bx.mul(bx.const_usize(size.bytes()), count);
+ let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
+ if allow_overlap {
+ bx.memmove(dst, align, src, align, size, flags);
+ } else {
+ bx.memcpy(dst, align, src, align, size, flags);
+ }
+}
+
+fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ volatile: bool,
+ ty: Ty<'tcx>,
+ dst: Bx::Value,
+ val: Bx::Value,
+ count: Bx::Value,
+) {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let align = layout.align.abi;
+ let size = bx.mul(bx.const_usize(size.bytes()), count);
+ let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
+ bx.memset(dst, val, size, align, flags);
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn codegen_intrinsic_call(
+ bx: &mut Bx,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, Bx::Value>],
+ llresult: Bx::Value,
+ span: Span,
+ ) {
+ let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all());
+
+ let ty::FnDef(def_id, substs) = *callee_ty.kind() else {
+ bug!("expected fn item type, found {}", callee_ty);
+ };
+
+ let sig = callee_ty.fn_sig(bx.tcx());
+ let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig);
+ let arg_tys = sig.inputs();
+ let ret_ty = sig.output();
+ let name = bx.tcx().item_name(def_id);
+ let name_str = name.as_str();
+
+ let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
+ let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
+
+ let llval = match name {
+ sym::assume => {
+ bx.assume(args[0].immediate());
+ return;
+ }
+ sym::abort => {
+ bx.abort();
+ return;
+ }
+
+ sym::va_start => bx.va_start(args[0].immediate()),
+ sym::va_end => bx.va_end(args[0].immediate()),
+ sym::size_of_val => {
+ let tp_ty = substs.type_at(0);
+ if let OperandValue::Pair(_, meta) = args[0].val {
+ let (llsize, _) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
+ llsize
+ } else {
+ bx.const_usize(bx.layout_of(tp_ty).size.bytes())
+ }
+ }
+ sym::min_align_of_val => {
+ let tp_ty = substs.type_at(0);
+ if let OperandValue::Pair(_, meta) = args[0].val {
+ let (_, llalign) = glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
+ llalign
+ } else {
+ bx.const_usize(bx.layout_of(tp_ty).align.abi.bytes())
+ }
+ }
+ sym::vtable_size | sym::vtable_align => {
+ let vtable = args[0].immediate();
+ let idx = match name {
+ sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE,
+ sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN,
+ _ => bug!(),
+ };
+ let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable);
+ if name == sym::vtable_align {
+ // Alignment is always nonzero.
+ bx.range_metadata(value, WrappingRange { start: 1, end: !0 });
+ };
+ value
+ }
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ let value = bx
+ .tcx()
+ .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
+ .unwrap();
+ OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx)
+ }
+ sym::offset => {
+ let ty = substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let ptr = args[0].immediate();
+ let offset = args[1].immediate();
+ bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset])
+ }
+ sym::arith_offset => {
+ let ty = substs.type_at(0);
+ let layout = bx.layout_of(ty);
+ let ptr = args[0].immediate();
+ let offset = args[1].immediate();
+ bx.gep(bx.backend_type(layout), ptr, &[offset])
+ }
+ sym::copy => {
+ copy_intrinsic(
+ bx,
+ true,
+ false,
+ substs.type_at(0),
+ args[1].immediate(),
+ args[0].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::write_bytes => {
+ memset_intrinsic(
+ bx,
+ false,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+
+ sym::volatile_copy_nonoverlapping_memory => {
+ copy_intrinsic(
+ bx,
+ false,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_copy_memory => {
+ copy_intrinsic(
+ bx,
+ true,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_set_memory => {
+ memset_intrinsic(
+ bx,
+ true,
+ substs.type_at(0),
+ args[0].immediate(),
+ args[1].immediate(),
+ args[2].immediate(),
+ );
+ return;
+ }
+ sym::volatile_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.volatile_store(bx, dst);
+ return;
+ }
+ sym::unaligned_volatile_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.unaligned_volatile_store(bx, dst);
+ return;
+ }
+ sym::add_with_overflow
+ | sym::sub_with_overflow
+ | sym::mul_with_overflow
+ | sym::unchecked_div
+ | sym::unchecked_rem
+ | sym::unchecked_shl
+ | sym::unchecked_shr
+ | sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::exact_div => {
+ let ty = arg_tys[0];
+ match int_type_width_signed(ty, bx.tcx()) {
+ Some((_width, signed)) => match name {
+ sym::add_with_overflow
+ | sym::sub_with_overflow
+ | sym::mul_with_overflow => {
+ let op = match name {
+ sym::add_with_overflow => OverflowOp::Add,
+ sym::sub_with_overflow => OverflowOp::Sub,
+ sym::mul_with_overflow => OverflowOp::Mul,
+ _ => bug!(),
+ };
+ let (val, overflow) =
+ bx.checked_binop(op, ty, args[0].immediate(), args[1].immediate());
+ // Convert `i1` to a `bool`, and write it to the out parameter
+ let val = bx.from_immediate(val);
+ let overflow = bx.from_immediate(overflow);
+
+ let dest = result.project_field(bx, 0);
+ bx.store(val, dest.llval, dest.align);
+ let dest = result.project_field(bx, 1);
+ bx.store(overflow, dest.llval, dest.align);
+
+ return;
+ }
+ sym::exact_div => {
+ if signed {
+ bx.exactsdiv(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.exactudiv(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_div => {
+ if signed {
+ bx.sdiv(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.udiv(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_rem => {
+ if signed {
+ bx.srem(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.urem(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_shl => bx.shl(args[0].immediate(), args[1].immediate()),
+ sym::unchecked_shr => {
+ if signed {
+ bx.ashr(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.lshr(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_add => {
+ if signed {
+ bx.unchecked_sadd(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_uadd(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_sub => {
+ if signed {
+ bx.unchecked_ssub(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_usub(args[0].immediate(), args[1].immediate())
+ }
+ }
+ sym::unchecked_mul => {
+ if signed {
+ bx.unchecked_smul(args[0].immediate(), args[1].immediate())
+ } else {
+ bx.unchecked_umul(args[0].immediate(), args[1].immediate())
+ }
+ }
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ return;
+ }
+ }
+ }
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ match float_type_width(arg_tys[0]) {
+ Some(_width) => match name {
+ sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()),
+ sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()),
+ sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()),
+ sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()),
+ sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()),
+ _ => bug!(),
+ },
+ None => {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic float type, found `{}`",
+ name, arg_tys[0]
+ ),
+ );
+ return;
+ }
+ }
+ }
+
+ sym::float_to_int_unchecked => {
+ if float_type_width(arg_tys[0]).is_none() {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `float_to_int_unchecked` \
+ intrinsic: expected basic float type, \
+ found `{}`",
+ arg_tys[0]
+ ),
+ );
+ return;
+ }
+ let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `float_to_int_unchecked` \
+ intrinsic: expected basic integer type, \
+ found `{}`",
+ ret_ty
+ ),
+ );
+ return;
+ };
+ if signed {
+ bx.fptosi(args[0].immediate(), llret_ty)
+ } else {
+ bx.fptoui(args[0].immediate(), llret_ty)
+ }
+ }
+
+ sym::discriminant_value => {
+ if ret_ty.is_integral() {
+ args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
+ } else {
+ span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0])
+ }
+ }
+
+ sym::const_allocate => {
+ // returns a null pointer at runtime.
+ bx.const_null(bx.type_i8p())
+ }
+
+ sym::const_deallocate => {
+ // nop at runtime.
+ return;
+ }
+
+ // This requires that atomic intrinsics follow a specific naming pattern:
+ // "atomic_<operation>[_<ordering>]"
+ name if let Some(atomic) = name_str.strip_prefix("atomic_") => {
+ use crate::common::AtomicOrdering::*;
+ use crate::common::{AtomicRmwBinOp, SynchronizationScope};
+
+ let Some((instruction, ordering)) = atomic.split_once('_') else {
+ bx.sess().fatal("Atomic intrinsic missing memory ordering");
+ };
+
+ let parse_ordering = |bx: &Bx, s| match s {
+ "unordered" => Unordered,
+ "relaxed" => Relaxed,
+ "acquire" => Acquire,
+ "release" => Release,
+ "acqrel" => AcquireRelease,
+ "seqcst" => SequentiallyConsistent,
+ _ => bx.sess().fatal("unknown ordering in atomic intrinsic"),
+ };
+
+ let invalid_monomorphization = |ty| {
+ span_invalid_monomorphization_error(
+ bx.tcx().sess,
+ span,
+ &format!(
+ "invalid monomorphization of `{}` intrinsic: \
+ expected basic integer type, found `{}`",
+ name, ty
+ ),
+ );
+ };
+
+ match instruction {
+ "cxchg" | "cxchgweak" => {
+ let Some((success, failure)) = ordering.split_once('_') else {
+ bx.sess().fatal("Atomic compare-exchange intrinsic missing failure memory ordering");
+ };
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let weak = instruction == "cxchgweak";
+ let mut dst = args[0].immediate();
+ let mut cmp = args[1].immediate();
+ let mut src = args[2].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ dst = bx.pointercast(dst, ptr_llty);
+ cmp = bx.ptrtoint(cmp, bx.type_isize());
+ src = bx.ptrtoint(src, bx.type_isize());
+ }
+ let pair = bx.atomic_cmpxchg(dst, cmp, src, parse_ordering(bx, success), parse_ordering(bx, failure), weak);
+ let val = bx.extract_value(pair, 0);
+ let success = bx.extract_value(pair, 1);
+ let val = bx.from_immediate(val);
+ let success = bx.from_immediate(success);
+
+ let dest = result.project_field(bx, 0);
+ bx.store(val, dest.llval, dest.align);
+ let dest = result.project_field(bx, 1);
+ bx.store(success, dest.llval, dest.align);
+ return;
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "load" => {
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let layout = bx.layout_of(ty);
+ let size = layout.size;
+ let mut source = args[0].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first...
+ let llty = bx.type_isize();
+ let ptr_llty = bx.type_ptr_to(llty);
+ source = bx.pointercast(source, ptr_llty);
+ let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
+ // ... and then cast the result back to a pointer
+ bx.inttoptr(result, bx.backend_type(layout))
+ } else {
+ bx.atomic_load(bx.backend_type(layout), source, parse_ordering(bx, ordering), size)
+ }
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "store" => {
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let size = bx.layout_of(ty).size;
+ let mut val = args[1].immediate();
+ let mut ptr = args[0].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ ptr = bx.pointercast(ptr, ptr_llty);
+ val = bx.ptrtoint(val, bx.type_isize());
+ }
+ bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
+ return;
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+
+ "fence" => {
+ bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::CrossThread);
+ return;
+ }
+
+ "singlethreadfence" => {
+ bx.atomic_fence(parse_ordering(bx, ordering), SynchronizationScope::SingleThread);
+ return;
+ }
+
+ // These are all AtomicRMW ops
+ op => {
+ let atom_op = match op {
+ "xchg" => AtomicRmwBinOp::AtomicXchg,
+ "xadd" => AtomicRmwBinOp::AtomicAdd,
+ "xsub" => AtomicRmwBinOp::AtomicSub,
+ "and" => AtomicRmwBinOp::AtomicAnd,
+ "nand" => AtomicRmwBinOp::AtomicNand,
+ "or" => AtomicRmwBinOp::AtomicOr,
+ "xor" => AtomicRmwBinOp::AtomicXor,
+ "max" => AtomicRmwBinOp::AtomicMax,
+ "min" => AtomicRmwBinOp::AtomicMin,
+ "umax" => AtomicRmwBinOp::AtomicUMax,
+ "umin" => AtomicRmwBinOp::AtomicUMin,
+ _ => bx.sess().fatal("unknown atomic operation"),
+ };
+
+ let ty = substs.type_at(0);
+ if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
+ let mut ptr = args[0].immediate();
+ let mut val = args[1].immediate();
+ if ty.is_unsafe_ptr() {
+ // Some platforms do not support atomic operations on pointers,
+ // so we cast to integer first.
+ let ptr_llty = bx.type_ptr_to(bx.type_isize());
+ ptr = bx.pointercast(ptr, ptr_llty);
+ val = bx.ptrtoint(val, bx.type_isize());
+ }
+ bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
+ } else {
+ return invalid_monomorphization(ty);
+ }
+ }
+ }
+ }
+
+ sym::nontemporal_store => {
+ let dst = args[0].deref(bx.cx());
+ args[1].val.nontemporal_store(bx, dst);
+ return;
+ }
+
+ sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ if name == sym::ptr_guaranteed_eq {
+ bx.icmp(IntPredicate::IntEQ, a, b)
+ } else {
+ bx.icmp(IntPredicate::IntNE, a, b)
+ }
+ }
+
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ let ty = substs.type_at(0);
+ let pointee_size = bx.layout_of(ty).size;
+
+ let a = args[0].immediate();
+ let b = args[1].immediate();
+ let a = bx.ptrtoint(a, bx.type_isize());
+ let b = bx.ptrtoint(b, bx.type_isize());
+ let pointee_size = bx.const_usize(pointee_size.bytes());
+ if name == sym::ptr_offset_from {
+ // This is the same sequence that Clang emits for pointer subtraction.
+ // It can be neither `nsw` nor `nuw` because the input is treated as
+ // unsigned but then the output is treated as signed, so neither works.
+ let d = bx.sub(a, b);
+ // this is where the signed magic happens (notice the `s` in `exactsdiv`)
+ bx.exactsdiv(d, pointee_size)
+ } else {
+ // The `_unsigned` version knows the relative ordering of the pointers,
+ // so can use `sub nuw` and `udiv exact` instead of dealing in signed.
+ let d = bx.unchecked_usub(a, b);
+ bx.exactudiv(d, pointee_size)
+ }
+ }
+
+ _ => {
+ // Need to use backend-specific things in the implementation.
+ bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span);
+ return;
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ if let PassMode::Cast(ty) = fn_abi.ret.mode {
+ let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(&ty));
+ let ptr = bx.pointercast(result.llval, ptr_llty);
+ bx.store(llval, ptr, result.align);
+ } else {
+ OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
+ .val
+ .store(bx, result);
+ }
+ }
+ }
+}
+
+// Returns the width of an int Ty, and if it's signed or not
+// Returns None if the type is not an integer
+// FIXME: there’s multiple of this functions, investigate using some of the already existing
+// stuffs.
+fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> {
+ match ty.kind() {
+ ty::Int(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true))
+ }
+ ty::Uint(t) => {
+ Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false))
+ }
+ _ => None,
+ }
+}
+
+// Returns the width of a float Ty
+// Returns None if the type is not a float
+fn float_type_width(ty: Ty<'_>) -> Option<u64> {
+ match ty.kind() {
+ ty::Float(t) => Some(t.bit_width()),
+ _ => None,
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs
new file mode 100644
index 000000000..8ee375fa9
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs
@@ -0,0 +1,410 @@
+use crate::traits::*;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
+use rustc_middle::ty::{self, Instance, Ty, TypeFoldable, TypeVisitable};
+use rustc_target::abi::call::{FnAbi, PassMode};
+
+use std::iter;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+
+use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
+use self::place::PlaceRef;
+use rustc_middle::mir::traversal;
+
+use self::operand::{OperandRef, OperandValue};
+
+/// Master context for codegenning from MIR.
+pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
+ instance: Instance<'tcx>,
+
+ mir: &'tcx mir::Body<'tcx>,
+
+ debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
+
+ llfn: Bx::Function,
+
+ cx: &'a Bx::CodegenCx,
+
+ fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
+
+ /// When unwinding is initiated, we have to store this personality
+ /// value somewhere so that we can load it and re-use it in the
+ /// resume instruction. The personality is (afaik) some kind of
+ /// value used for C++ unwinding, which must filter by type: we
+ /// don't really care about it very much. Anyway, this value
+ /// contains an alloca into which the personality is stored and
+ /// then later loaded when generating the DIVERGE_BLOCK.
+ personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
+
+ /// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily
+ /// as-needed (e.g. RPO reaching it or another block branching to it).
+ // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a
+ // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`).
+ cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+
+ /// The funclet status of each basic block
+ cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
+
+ /// When targeting MSVC, this stores the cleanup info for each funclet BB.
+ /// This is initialized at the same time as the `landing_pads` entry for the
+ /// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge.
+ funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
+
+ /// This stores the cached landing/cleanup pad block for a given BB.
+ // FIXME(eddyb) rename this to `eh_pads`.
+ landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
+
+ /// Cached unreachable block
+ unreachable_block: Option<Bx::BasicBlock>,
+
+ /// Cached double unwind guarding block
+ double_unwind_guard: Option<Bx::BasicBlock>,
+
+ /// The location where each MIR arg/var/tmp/ret is stored. This is
+ /// usually an `PlaceRef` representing an alloca, but not always:
+ /// sometimes we can skip the alloca and just store the value
+ /// directly using an `OperandRef`, which makes for tighter LLVM
+ /// IR. The conditions for using an `OperandRef` are as follows:
+ ///
+ /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
+ /// - the operand must never be referenced indirectly
+ /// - we should not take its address using the `&` operator
+ /// - nor should it appear in a place path like `tmp.a`
+ /// - the operand must be defined by an rvalue that can generate immediate
+ /// values
+ ///
+ /// Avoiding allocs can also be important for certain intrinsics,
+ /// notably `expect`.
+ locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
+
+ /// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
+ /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
+ per_local_var_debug_info:
+ Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
+
+ /// Caller location propagated if this function has `#[track_caller]`.
+ caller_location: Option<OperandRef<'tcx, Bx::Value>>,
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn monomorphize<T>(&self, value: T) -> T
+ where
+ T: Copy + TypeFoldable<'tcx>,
+ {
+ debug!("monomorphize: self.instance={:?}", self.instance);
+ self.instance.subst_mir_and_normalize_erasing_regions(
+ self.cx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ value,
+ )
+ }
+}
+
+enum LocalRef<'tcx, V> {
+ Place(PlaceRef<'tcx, V>),
+ /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
+ /// `*p` is the fat pointer that references the actual unsized place.
+ /// Every time it is initialized, we have to reallocate the place
+ /// and update the fat pointer. That's the reason why it is indirect.
+ UnsizedPlace(PlaceRef<'tcx, V>),
+ Operand(Option<OperandRef<'tcx, V>>),
+}
+
+impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
+ fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> LocalRef<'tcx, V> {
+ if layout.is_zst() {
+ // Zero-size temporaries aren't always initialized, which
+ // doesn't matter because they don't contain data, but
+ // we need something in the operand.
+ LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
+ } else {
+ LocalRef::Operand(None)
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+#[instrument(level = "debug", skip(cx))]
+pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ cx: &'a Bx::CodegenCx,
+ instance: Instance<'tcx>,
+) {
+ assert!(!instance.substs.needs_infer());
+
+ let llfn = cx.get_fn(instance);
+
+ let mir = cx.tcx().instance_mir(instance.def);
+
+ let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
+ debug!("fn_abi: {:?}", fn_abi);
+
+ let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
+
+ let start_llbb = Bx::append_block(cx, llfn, "start");
+ let mut bx = Bx::build(cx, start_llbb);
+
+ if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
+ bx.set_personality_fn(cx.eh_personality());
+ }
+
+ let cleanup_kinds = analyze::cleanup_kinds(&mir);
+ let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> = mir
+ .basic_blocks()
+ .indices()
+ .map(|bb| if bb == mir::START_BLOCK { Some(start_llbb) } else { None })
+ .collect();
+
+ let mut fx = FunctionCx {
+ instance,
+ mir,
+ llfn,
+ fn_abi,
+ cx,
+ personality_slot: None,
+ cached_llbbs,
+ unreachable_block: None,
+ double_unwind_guard: None,
+ cleanup_kinds,
+ landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
+ funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()),
+ locals: IndexVec::new(),
+ debug_context,
+ per_local_var_debug_info: None,
+ caller_location: None,
+ };
+
+ fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
+
+ // Evaluate all required consts; codegen later assumes that CTFE will never fail.
+ let mut all_consts_ok = true;
+ for const_ in &mir.required_consts {
+ if let Err(err) = fx.eval_mir_constant(const_) {
+ all_consts_ok = false;
+ match err {
+ // errored or at least linted
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {}
+ ErrorHandled::TooGeneric => {
+ span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
+ }
+ }
+ }
+ }
+ if !all_consts_ok {
+ // We leave the IR in some half-built state here, and rely on this code not even being
+ // submitted to LLVM once an error was raised.
+ return;
+ }
+
+ let memory_locals = analyze::non_ssa_locals(&fx);
+
+ // Allocate variable and temp allocas
+ fx.locals = {
+ let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
+
+ let mut allocate_local = |local| {
+ let decl = &mir.local_decls[local];
+ let layout = bx.layout_of(fx.monomorphize(decl.ty));
+ assert!(!layout.ty.has_erasable_regions());
+
+ if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
+ debug!("alloc: {:?} (return place) -> place", local);
+ let llretptr = bx.get_param(0);
+ return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
+ }
+
+ if memory_locals.contains(local) {
+ debug!("alloc: {:?} -> place", local);
+ if layout.is_unsized() {
+ LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
+ } else {
+ LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
+ }
+ } else {
+ debug!("alloc: {:?} -> operand", local);
+ LocalRef::new_operand(&mut bx, layout)
+ }
+ };
+
+ let retptr = allocate_local(mir::RETURN_PLACE);
+ iter::once(retptr)
+ .chain(args.into_iter())
+ .chain(mir.vars_and_temps_iter().map(allocate_local))
+ .collect()
+ };
+
+ // Apply debuginfo to the newly allocated locals.
+ fx.debug_introduce_locals(&mut bx);
+
+ // Codegen the body of each block using reverse postorder
+ for (bb, _) in traversal::reverse_postorder(&mir) {
+ fx.codegen_block(bb);
+ }
+}
+
+/// Produces, for each argument, a `Value` pointing at the
+/// argument's value. As arguments are places, these are always
+/// indirect.
+fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ fx: &mut FunctionCx<'a, 'tcx, Bx>,
+ memory_locals: &BitSet<mir::Local>,
+) -> Vec<LocalRef<'tcx, Bx::Value>> {
+ let mir = fx.mir;
+ let mut idx = 0;
+ let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
+
+ let mut num_untupled = None;
+
+ let args = mir
+ .args_iter()
+ .enumerate()
+ .map(|(arg_index, local)| {
+ let arg_decl = &mir.local_decls[local];
+
+ if Some(local) == mir.spread_arg {
+ // This argument (e.g., the last argument in the "rust-call" ABI)
+ // is a tuple that was spread at the ABI level and now we have
+ // to reconstruct it into a tuple local variable, from multiple
+ // individual LLVM function arguments.
+
+ let arg_ty = fx.monomorphize(arg_decl.ty);
+ let ty::Tuple(tupled_arg_tys) = arg_ty.kind() else {
+ bug!("spread argument isn't a tuple?!");
+ };
+
+ let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+ for i in 0..tupled_arg_tys.len() {
+ let arg = &fx.fn_abi.args[idx];
+ idx += 1;
+ if arg.pad.is_some() {
+ llarg_idx += 1;
+ }
+ let pr_field = place.project_field(bx, i);
+ bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
+ }
+ assert_eq!(
+ None,
+ num_untupled.replace(tupled_arg_tys.len()),
+ "Replaced existing num_tupled"
+ );
+
+ return LocalRef::Place(place);
+ }
+
+ if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
+ let arg_ty = fx.monomorphize(arg_decl.ty);
+
+ let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+ bx.va_start(va_list.llval);
+
+ return LocalRef::Place(va_list);
+ }
+
+ let arg = &fx.fn_abi.args[idx];
+ idx += 1;
+ if arg.pad.is_some() {
+ llarg_idx += 1;
+ }
+
+ if !memory_locals.contains(local) {
+ // We don't have to cast or keep the argument in the alloca.
+ // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
+ // of putting everything in allocas just so we can use llvm.dbg.declare.
+ let local = |op| LocalRef::Operand(Some(op));
+ match arg.mode {
+ PassMode::Ignore => {
+ return local(OperandRef::new_zst(bx, arg.layout));
+ }
+ PassMode::Direct(_) => {
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ return local(OperandRef::from_immediate_or_packed_pair(
+ bx, llarg, arg.layout,
+ ));
+ }
+ PassMode::Pair(..) => {
+ let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
+ llarg_idx += 2;
+
+ return local(OperandRef {
+ val: OperandValue::Pair(a, b),
+ layout: arg.layout,
+ });
+ }
+ _ => {}
+ }
+ }
+
+ if arg.is_sized_indirect() {
+ // Don't copy an indirect argument to an alloca, the caller
+ // already put it in a temporary alloca and gave it up.
+ // FIXME: lifetimes
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
+ } else if arg.is_unsized_indirect() {
+ // As the storage for the indirect argument lives during
+ // the whole function call, we just copy the fat pointer.
+ let llarg = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ let llextra = bx.get_param(llarg_idx);
+ llarg_idx += 1;
+ let indirect_operand = OperandValue::Pair(llarg, llextra);
+
+ let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
+ indirect_operand.store(bx, tmp);
+ LocalRef::UnsizedPlace(tmp)
+ } else {
+ let tmp = PlaceRef::alloca(bx, arg.layout);
+ bx.store_fn_arg(arg, &mut llarg_idx, tmp);
+ LocalRef::Place(tmp)
+ }
+ })
+ .collect::<Vec<_>>();
+
+ if fx.instance.def.requires_caller_location(bx.tcx()) {
+ let mir_args = if let Some(num_untupled) = num_untupled {
+ // Subtract off the tupled argument that gets 'expanded'
+ args.len() - 1 + num_untupled
+ } else {
+ args.len()
+ };
+ assert_eq!(
+ fx.fn_abi.args.len(),
+ mir_args + 1,
+ "#[track_caller] instance {:?} must have 1 more argument in their ABI than in their MIR",
+ fx.instance
+ );
+
+ let arg = fx.fn_abi.args.last().unwrap();
+ match arg.mode {
+ PassMode::Direct(_) => (),
+ _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
+ }
+
+ fx.caller_location = Some(OperandRef {
+ val: OperandValue::Immediate(bx.get_param(llarg_idx)),
+ layout: arg.layout,
+ });
+ }
+
+ args
+}
+
+mod analyze;
+mod block;
+pub mod constant;
+pub mod coverageinfo;
+pub mod debuginfo;
+mod intrinsic;
+pub mod operand;
+pub mod place;
+mod rvalue;
+mod statement;
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
new file mode 100644
index 000000000..c612634fc
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -0,0 +1,461 @@
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{ConstValue, Pointer, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::Ty;
+use rustc_target::abi::{Abi, Align, Size};
+
+use std::fmt;
+
+/// The representation of a Rust value. The enum variant is in fact
+/// uniquely determined by the value's type, but is kept as a
+/// safety check.
+#[derive(Copy, Clone, Debug)]
+pub enum OperandValue<V> {
+ /// A reference to the actual operand. The data is guaranteed
+ /// to be valid for the operand's lifetime.
+ /// The second value, if any, is the extra data (vtable or length)
+ /// which indicates that it refers to an unsized rvalue.
+ Ref(V, Option<V>, Align),
+ /// A single LLVM value.
+ Immediate(V),
+ /// A pair of immediate LLVM values. Used by fat pointers too.
+ Pair(V, V),
+}
+
+/// An `OperandRef` is an "SSA" reference to a Rust value, along with
+/// its type.
+///
+/// NOTE: unless you know a value's type exactly, you should not
+/// generate LLVM opcodes acting on it and instead act via methods,
+/// to avoid nasty edge cases. In particular, using `Builder::store`
+/// directly is sure to cause problems -- use `OperandRef::store`
+/// instead.
+#[derive(Copy, Clone)]
+pub struct OperandRef<'tcx, V> {
+ // The value.
+ pub val: OperandValue<V>,
+
+ // The layout of value, based on its Rust type.
+ pub layout: TyAndLayout<'tcx>,
+}
+
+impl<V: CodegenObject> fmt::Debug for OperandRef<'_, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
+ pub fn new_zst<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> OperandRef<'tcx, V> {
+ assert!(layout.is_zst());
+ OperandRef {
+ val: OperandValue::Immediate(bx.const_undef(bx.immediate_backend_type(layout))),
+ layout,
+ }
+ }
+
+ pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Self {
+ let layout = bx.layout_of(ty);
+
+ if layout.is_zst() {
+ return OperandRef::new_zst(bx, layout);
+ }
+
+ let val = match val {
+ ConstValue::Scalar(x) => {
+ let Abi::Scalar(scalar) = layout.abi else {
+ bug!("from_const: invalid ByVal layout: {:#?}", layout);
+ };
+ let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
+ OperandValue::Immediate(llval)
+ }
+ ConstValue::ZeroSized => {
+ let llval = bx.zst_to_backend(bx.immediate_backend_type(layout));
+ OperandValue::Immediate(llval)
+ }
+ ConstValue::Slice { data, start, end } => {
+ let Abi::ScalarPair(a_scalar, _) = layout.abi else {
+ bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
+ };
+ let a = Scalar::from_pointer(
+ Pointer::new(bx.tcx().create_memory_alloc(data), Size::from_bytes(start)),
+ &bx.tcx(),
+ );
+ let a_llval = bx.scalar_to_backend(
+ a,
+ a_scalar,
+ bx.scalar_pair_element_backend_type(layout, 0, true),
+ );
+ let b_llval = bx.const_usize((end - start) as u64);
+ OperandValue::Pair(a_llval, b_llval)
+ }
+ ConstValue::ByRef { alloc, offset } => {
+ return bx.load_operand(bx.from_const_alloc(layout, alloc, offset));
+ }
+ };
+
+ OperandRef { val, layout }
+ }
+
+ /// Asserts that this operand refers to a scalar and returns
+ /// a reference to its value.
+ pub fn immediate(self) -> V {
+ match self.val {
+ OperandValue::Immediate(s) => s,
+ _ => bug!("not immediate: {:?}", self),
+ }
+ }
+
+ pub fn deref<Cx: LayoutTypeMethods<'tcx>>(self, cx: &Cx) -> PlaceRef<'tcx, V> {
+ if self.layout.ty.is_box() {
+ bug!("dereferencing {:?} in codegen", self.layout.ty);
+ }
+
+ let projected_ty = self
+ .layout
+ .ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self))
+ .ty;
+
+ let (llptr, llextra) = match self.val {
+ OperandValue::Immediate(llptr) => (llptr, None),
+ OperandValue::Pair(llptr, llextra) => (llptr, Some(llextra)),
+ OperandValue::Ref(..) => bug!("Deref of by-Ref operand {:?}", self),
+ };
+ let layout = cx.layout_of(projected_ty);
+ PlaceRef { llval: llptr, llextra, layout, align: layout.align.abi }
+ }
+
+ /// If this operand is a `Pair`, we return an aggregate with the two values.
+ /// For other cases, see `immediate`.
+ pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ ) -> V {
+ if let OperandValue::Pair(a, b) = self.val {
+ let llty = bx.cx().backend_type(self.layout);
+ debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty);
+ // Reconstruct the immediate aggregate.
+ let mut llpair = bx.cx().const_undef(llty);
+ let imm_a = bx.from_immediate(a);
+ let imm_b = bx.from_immediate(b);
+ llpair = bx.insert_value(llpair, imm_a, 0);
+ llpair = bx.insert_value(llpair, imm_b, 1);
+ llpair
+ } else {
+ self.immediate()
+ }
+ }
+
+ /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
+ pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ llval: V,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ let val = if let Abi::ScalarPair(a, b) = layout.abi {
+ debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
+
+ // Deconstruct the immediate aggregate.
+ let a_llval = bx.extract_value(llval, 0);
+ let a_llval = bx.to_immediate_scalar(a_llval, a);
+ let b_llval = bx.extract_value(llval, 1);
+ let b_llval = bx.to_immediate_scalar(b_llval, b);
+ OperandValue::Pair(a_llval, b_llval)
+ } else {
+ OperandValue::Immediate(llval)
+ };
+ OperandRef { val, layout }
+ }
+
+ pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ i: usize,
+ ) -> Self {
+ let field = self.layout.field(bx.cx(), i);
+ let offset = self.layout.fields.offset(i);
+
+ let mut val = match (self.val, self.layout.abi) {
+ // If the field is ZST, it has no data.
+ _ if field.is_zst() => {
+ return OperandRef::new_zst(bx, field);
+ }
+
+ // Newtype of a scalar, scalar pair or vector.
+ (OperandValue::Immediate(_) | OperandValue::Pair(..), _)
+ if field.size == self.layout.size =>
+ {
+ assert_eq!(offset.bytes(), 0);
+ self.val
+ }
+
+ // Extract a scalar component from a pair.
+ (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
+ if offset.bytes() == 0 {
+ assert_eq!(field.size, a.size(bx.cx()));
+ OperandValue::Immediate(a_llval)
+ } else {
+ assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi));
+ assert_eq!(field.size, b.size(bx.cx()));
+ OperandValue::Immediate(b_llval)
+ }
+ }
+
+ // `#[repr(simd)]` types are also immediate.
+ (OperandValue::Immediate(llval), Abi::Vector { .. }) => {
+ OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
+ }
+
+ _ => bug!("OperandRef::extract_field({:?}): not applicable", self),
+ };
+
+ match (&mut val, field.abi) {
+ (OperandValue::Immediate(llval), _) => {
+ // Bools in union fields needs to be truncated.
+ *llval = bx.to_immediate(*llval, field);
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field));
+ }
+ (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
+ // Bools in union fields needs to be truncated.
+ *a = bx.to_immediate_scalar(*a, a_abi);
+ *b = bx.to_immediate_scalar(*b, b_abi);
+ // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
+ *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true));
+ *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true));
+ }
+ (OperandValue::Pair(..), _) => bug!(),
+ (OperandValue::Ref(..), _) => bug!(),
+ }
+
+ OperandRef { val, layout: field }
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
+ pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::empty());
+ }
+
+ pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::VOLATILE);
+ }
+
+ pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
+ }
+
+ pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ ) {
+ self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
+ }
+
+ fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ dest: PlaceRef<'tcx, V>,
+ flags: MemFlags,
+ ) {
+ debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
+ // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
+ // value is through `undef`, and store itself is useless.
+ if dest.layout.is_zst() {
+ return;
+ }
+ match self {
+ OperandValue::Ref(r, None, source_align) => {
+ if flags.contains(MemFlags::NONTEMPORAL) {
+ // HACK(nox): This is inefficient but there is no nontemporal memcpy.
+ let ty = bx.backend_type(dest.layout);
+ let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
+ let val = bx.load(ty, ptr, source_align);
+ bx.store_with_flags(val, dest.llval, dest.align, flags);
+ return;
+ }
+ base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
+ }
+ OperandValue::Ref(_, Some(_), _) => {
+ bug!("cannot directly store unsized values");
+ }
+ OperandValue::Immediate(s) => {
+ let val = bx.from_immediate(s);
+ bx.store_with_flags(val, dest.llval, dest.align, flags);
+ }
+ OperandValue::Pair(a, b) => {
+ let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
+ bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
+ };
+ let ty = bx.backend_type(dest.layout);
+ let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
+
+ let llptr = bx.struct_gep(ty, dest.llval, 0);
+ let val = bx.from_immediate(a);
+ let align = dest.align;
+ bx.store_with_flags(val, llptr, align, flags);
+
+ let llptr = bx.struct_gep(ty, dest.llval, 1);
+ let val = bx.from_immediate(b);
+ let align = dest.align.restrict_for_offset(b_offset);
+ bx.store_with_flags(val, llptr, align, flags);
+ }
+ }
+ }
+
+ pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ indirect_dest: PlaceRef<'tcx, V>,
+ ) {
+ debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
+ let flags = MemFlags::empty();
+
+ // `indirect_dest` must have `*mut T` type. We extract `T` out of it.
+ let unsized_ty = indirect_dest
+ .layout
+ .ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("indirect_dest has non-pointer type: {:?}", indirect_dest))
+ .ty;
+
+ let OperandValue::Ref(llptr, Some(llextra), _) = self else {
+ bug!("store_unsized called with a sized value")
+ };
+
+ // FIXME: choose an appropriate alignment, or use dynamic align somehow
+ let max_align = Align::from_bits(128).unwrap();
+ let min_align = Align::from_bits(8).unwrap();
+
+ // Allocate an appropriate region on the stack, and copy the value into it
+ let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
+ let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align);
+ bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
+
+ // Store the allocated region and the extra to the indirect place.
+ let indirect_operand = OperandValue::Pair(lldst, llextra);
+ indirect_operand.store(bx, indirect_dest);
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ fn maybe_codegen_consume_direct(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> Option<OperandRef<'tcx, Bx::Value>> {
+ debug!("maybe_codegen_consume_direct(place_ref={:?})", place_ref);
+
+ match self.locals[place_ref.local] {
+ LocalRef::Operand(Some(mut o)) => {
+ // Moves out of scalar and scalar pair fields are trivial.
+ for elem in place_ref.projection.iter() {
+ match elem {
+ mir::ProjectionElem::Field(ref f, _) => {
+ o = o.extract_field(bx, f.index());
+ }
+ mir::ProjectionElem::Index(_)
+ | mir::ProjectionElem::ConstantIndex { .. } => {
+ // ZSTs don't require any actual memory access.
+ // FIXME(eddyb) deduplicate this with the identical
+ // checks in `codegen_consume` and `extract_field`.
+ let elem = o.layout.field(bx.cx(), 0);
+ if elem.is_zst() {
+ o = OperandRef::new_zst(bx, elem);
+ } else {
+ return None;
+ }
+ }
+ _ => return None,
+ }
+ }
+
+ Some(o)
+ }
+ LocalRef::Operand(None) => {
+ bug!("use of {:?} before def", place_ref);
+ }
+ LocalRef::Place(..) | LocalRef::UnsizedPlace(..) => {
+ // watch out for locals that do not have an
+ // alloca; they are handled somewhat differently
+ None
+ }
+ }
+ }
+
+ pub fn codegen_consume(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ debug!("codegen_consume(place_ref={:?})", place_ref);
+
+ let ty = self.monomorphized_place_ty(place_ref);
+ let layout = bx.cx().layout_of(ty);
+
+ // ZSTs don't require any actual memory access.
+ if layout.is_zst() {
+ return OperandRef::new_zst(bx, layout);
+ }
+
+ if let Some(o) = self.maybe_codegen_consume_direct(bx, place_ref) {
+ return o;
+ }
+
+ // for most places, to consume them we just load them
+ // out from their home
+ let place = self.codegen_place(bx, place_ref);
+ bx.load_operand(place)
+ }
+
+ pub fn codegen_operand(
+ &mut self,
+ bx: &mut Bx,
+ operand: &mir::Operand<'tcx>,
+ ) -> OperandRef<'tcx, Bx::Value> {
+ debug!("codegen_operand(operand={:?})", operand);
+
+ match *operand {
+ mir::Operand::Copy(ref place) | mir::Operand::Move(ref place) => {
+ self.codegen_consume(bx, place.as_ref())
+ }
+
+ mir::Operand::Constant(ref constant) => {
+ // This cannot fail because we checked all required_consts in advance.
+ self.eval_mir_constant_to_operand(bx, constant).unwrap_or_else(|_err| {
+ span_bug!(constant.span, "erroneous constant not captured by required_consts")
+ })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs
new file mode 100644
index 000000000..268c4d765
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/place.rs
@@ -0,0 +1,549 @@
+use super::operand::OperandValue;
+use super::{FunctionCx, LocalRef};
+
+use crate::common::IntPredicate;
+use crate::glue;
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, Ty};
+use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+#[derive(Copy, Clone, Debug)]
+pub struct PlaceRef<'tcx, V> {
+ /// A pointer to the contents of the place.
+ pub llval: V,
+
+ /// This place's extra data if it is unsized, or `None` if null.
+ pub llextra: Option<V>,
+
+ /// The monomorphized type of this place, including variant information.
+ pub layout: TyAndLayout<'tcx>,
+
+ /// The alignment we know for this place.
+ pub align: Align,
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+ pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
+ assert!(!layout.is_unsized());
+ PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
+ }
+
+ pub fn new_sized_aligned(
+ llval: V,
+ layout: TyAndLayout<'tcx>,
+ align: Align,
+ ) -> PlaceRef<'tcx, V> {
+ assert!(!layout.is_unsized());
+ PlaceRef { llval, llextra: None, layout, align }
+ }
+
+ // FIXME(eddyb) pass something else for the name so no work is done
+ // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+ pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
+ let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
+ Self::new_sized(tmp, layout)
+ }
+
+ /// Returns a place for an indirect reference to an unsized place.
+ // FIXME(eddyb) pass something else for the name so no work is done
+ // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
+ pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ bx: &mut Bx,
+ layout: TyAndLayout<'tcx>,
+ ) -> Self {
+ assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
+ let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
+ let ptr_layout = bx.cx().layout_of(ptr_ty);
+ Self::alloca(bx, ptr_layout)
+ }
+
+ pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
+ if let FieldsShape::Array { count, .. } = self.layout.fields {
+ if self.layout.is_unsized() {
+ assert_eq!(count, 0);
+ self.llextra.unwrap()
+ } else {
+ cx.const_usize(count)
+ }
+ } else {
+ bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
+ }
+ }
+}
+
+impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
+ /// Access a field, at a point when the value's case is known.
+ pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ ix: usize,
+ ) -> Self {
+ let field = self.layout.field(bx.cx(), ix);
+ let offset = self.layout.fields.offset(ix);
+ let effective_field_align = self.align.restrict_for_offset(offset);
+
+ let mut simple = || {
+ let llval = match self.layout.abi {
+ _ if offset.bytes() == 0 => {
+ // Unions and newtypes only use an offset of 0.
+ // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
+ self.llval
+ }
+ Abi::ScalarPair(a, b)
+ if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) =>
+ {
+ // Offset matches second field.
+ let ty = bx.backend_type(self.layout);
+ bx.struct_gep(ty, self.llval, 1)
+ }
+ Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
+ // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
+ let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
+ bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
+ }
+ Abi::Scalar(_) | Abi::ScalarPair(..) => {
+ // All fields of Scalar and ScalarPair layouts must have been handled by this point.
+ // Vector layouts have additional fields for each element of the vector, so don't panic in that case.
+ bug!(
+ "offset of non-ZST field `{:?}` does not match layout `{:#?}`",
+ field,
+ self.layout
+ );
+ }
+ _ => {
+ let ty = bx.backend_type(self.layout);
+ bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix))
+ }
+ };
+ PlaceRef {
+ // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
+ llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
+ llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
+ layout: field,
+ align: effective_field_align,
+ }
+ };
+
+ // Simple cases, which don't need DST adjustment:
+ // * no metadata available - just log the case
+ // * known alignment - sized types, `[T]`, `str` or a foreign type
+ // * packed struct - there is no alignment padding
+ match field.ty.kind() {
+ _ if self.llextra.is_none() => {
+ debug!(
+ "unsized field `{}`, of `{:?}` has no metadata for adjustment",
+ ix, self.llval
+ );
+ return simple();
+ }
+ _ if !field.is_unsized() => return simple(),
+ ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
+ ty::Adt(def, _) => {
+ if def.repr().packed() {
+ // FIXME(eddyb) generalize the adjustment when we
+ // start supporting packing to larger alignments.
+ assert_eq!(self.layout.align.abi.bytes(), 1);
+ return simple();
+ }
+ }
+ _ => {}
+ }
+
+ // We need to get the pointer manually now.
+ // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
+ // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+ // because the field may have an arbitrary alignment in the LLVM representation
+ // anyway.
+ //
+ // To demonstrate:
+ //
+ // struct Foo<T: ?Sized> {
+ // x: u16,
+ // y: T
+ // }
+ //
+ // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
+ // the `y` field has 16-bit alignment.
+
+ let meta = self.llextra;
+
+ let unaligned_offset = bx.cx().const_usize(offset.bytes());
+
+ // Get the alignment of the field
+ let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
+
+ // Bump the unaligned offset up to the appropriate alignment
+ let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
+
+ debug!("struct_field_ptr: DST field offset: {:?}", offset);
+
+ // Cast and adjust pointer.
+ let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
+ let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
+
+ // Finally, cast back to the type expected.
+ let ll_fty = bx.cx().backend_type(field);
+ debug!("struct_field_ptr: Field type is {:?}", ll_fty);
+
+ PlaceRef {
+ llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
+ llextra: self.llextra,
+ layout: field,
+ align: effective_field_align,
+ }
+ }
+
+ /// Obtain the actual discriminant of a value.
+ #[instrument(level = "trace", skip(bx))]
+ pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ self,
+ bx: &mut Bx,
+ cast_to: Ty<'tcx>,
+ ) -> V {
+ let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
+ if self.layout.abi.is_uninhabited() {
+ return bx.cx().const_undef(cast_to);
+ }
+ let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
+ Variants::Single { index } => {
+ let discr_val = self
+ .layout
+ .ty
+ .discriminant_for_variant(bx.cx().tcx(), index)
+ .map_or(index.as_u32() as u128, |discr| discr.val);
+ return bx.cx().const_uint_big(cast_to, discr_val);
+ }
+ Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+ (tag, tag_encoding, tag_field)
+ }
+ };
+
+ // Read the tag/niche-encoded discriminant from memory.
+ let tag = self.project_field(bx, tag_field);
+ let tag = bx.load_operand(tag);
+
+ // Decode the discriminant (specifically if it's niche-encoded).
+ match *tag_encoding {
+ TagEncoding::Direct => {
+ let signed = match tag_scalar.primitive() {
+ // We use `i1` for bytes that are always `0` or `1`,
+ // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
+ // let LLVM interpret the `i1` as signed, because
+ // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
+ Int(_, signed) => !tag_scalar.is_bool() && signed,
+ _ => false,
+ };
+ bx.intcast(tag.immediate(), cast_to, signed)
+ }
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ // Rebase from niche values to discriminants, and check
+ // whether the result is in range for the niche variants.
+ let niche_llty = bx.cx().immediate_backend_type(tag.layout);
+ let tag = tag.immediate();
+
+ // We first compute the "relative discriminant" (wrt `niche_variants`),
+ // that is, if `n = niche_variants.end() - niche_variants.start()`,
+ // we remap `niche_start..=niche_start + n` (which may wrap around)
+ // to (non-wrap-around) `0..=n`, to be able to check whether the
+ // discriminant corresponds to a niche variant with one comparison.
+ // We also can't go directly to the (variant index) discriminant
+ // and check that it is in the range `niche_variants`, because
+ // that might not fit in the same type, on top of needing an extra
+ // comparison (see also the comment on `let niche_discr`).
+ let relative_discr = if niche_start == 0 {
+ // Avoid subtracting `0`, which wouldn't work for pointers.
+ // FIXME(eddyb) check the actual primitive type here.
+ tag
+ } else {
+ bx.sub(tag, bx.cx().const_uint_big(niche_llty, niche_start))
+ };
+ let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
+ let is_niche = if relative_max == 0 {
+ // Avoid calling `const_uint`, which wouldn't work for pointers.
+ // Also use canonical == 0 instead of non-canonical u<= 0.
+ // FIXME(eddyb) check the actual primitive type here.
+ bx.icmp(IntPredicate::IntEQ, relative_discr, bx.cx().const_null(niche_llty))
+ } else {
+ let relative_max = bx.cx().const_uint(niche_llty, relative_max as u64);
+ bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
+ };
+
+ // NOTE(eddyb) this addition needs to be performed on the final
+ // type, in case the niche itself can't represent all variant
+ // indices (e.g. `u8` niche with more than `256` variants,
+ // but enough uninhabited variants so that the remaining variants
+ // fit in the niche).
+ // In other words, `niche_variants.end - niche_variants.start`
+ // is representable in the niche, but `niche_variants.end`
+ // might not be, in extreme cases.
+ let niche_discr = {
+ let relative_discr = if relative_max == 0 {
+ // HACK(eddyb) since we have only one niche, we know which
+ // one it is, and we can avoid having a dynamic value here.
+ bx.cx().const_uint(cast_to, 0)
+ } else {
+ bx.intcast(relative_discr, cast_to, false)
+ };
+ bx.add(
+ relative_discr,
+ bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
+ )
+ };
+
+ bx.select(
+ is_niche,
+ niche_discr,
+ bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
+ )
+ }
+ }
+ }
+
+ /// Sets the discriminant for a new value of the given case of the given
+ /// representation.
+ pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ variant_index: VariantIdx,
+ ) {
+ if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
+ // We play it safe by using a well-defined `abort`, but we could go for immediate UB
+ // if that turns out to be helpful.
+ bx.abort();
+ return;
+ }
+ match self.layout.variants {
+ Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
+ let ptr = self.project_field(bx, tag_field);
+ let to =
+ self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
+ bx.store(
+ bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
+ ptr.llval,
+ ptr.align,
+ );
+ }
+ Variants::Multiple {
+ tag_encoding:
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ tag_field,
+ ..
+ } => {
+ if variant_index != dataful_variant {
+ if bx.cx().sess().target.arch == "arm"
+ || bx.cx().sess().target.arch == "aarch64"
+ {
+ // FIXME(#34427): as workaround for LLVM bug on ARM,
+ // use memset of 0 before assigning niche value.
+ let fill_byte = bx.cx().const_u8(0);
+ let size = bx.cx().const_usize(self.layout.size.bytes());
+ bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
+ }
+
+ let niche = self.project_field(bx, tag_field);
+ let niche_llty = bx.cx().immediate_backend_type(niche.layout);
+ let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
+ let niche_value = (niche_value as u128).wrapping_add(niche_start);
+ // FIXME(eddyb): check the actual primitive type here.
+ let niche_llval = if niche_value == 0 {
+ // HACK(eddyb): using `c_null` as it works on all types.
+ bx.cx().const_null(niche_llty)
+ } else {
+ bx.cx().const_uint_big(niche_llty, niche_value)
+ };
+ OperandValue::Immediate(niche_llval).store(bx, niche);
+ }
+ }
+ }
+ }
+
+ pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ llindex: V,
+ ) -> Self {
+ // Statically compute the offset if we can, otherwise just use the element size,
+ // as this will yield the lowest alignment.
+ let layout = self.layout.field(bx, 0);
+ let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
+ layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
+ } else {
+ layout.size
+ };
+
+ PlaceRef {
+ llval: bx.inbounds_gep(
+ bx.cx().backend_type(self.layout),
+ self.llval,
+ &[bx.cx().const_usize(0), llindex],
+ ),
+ llextra: None,
+ layout,
+ align: self.align.restrict_for_offset(offset),
+ }
+ }
+
+ pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
+ &self,
+ bx: &mut Bx,
+ variant_index: VariantIdx,
+ ) -> Self {
+ let mut downcast = *self;
+ downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
+
+ // Cast to the appropriate variant struct type.
+ let variant_ty = bx.cx().backend_type(downcast.layout);
+ downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
+
+ downcast
+ }
+
+ pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+ bx.lifetime_start(self.llval, self.layout.size);
+ }
+
+ pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
+ bx.lifetime_end(self.llval, self.layout.size);
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "trace", skip(self, bx))]
+ pub fn codegen_place(
+ &mut self,
+ bx: &mut Bx,
+ place_ref: mir::PlaceRef<'tcx>,
+ ) -> PlaceRef<'tcx, Bx::Value> {
+ let cx = self.cx;
+ let tcx = self.cx.tcx();
+
+ let mut base = 0;
+ let mut cg_base = match self.locals[place_ref.local] {
+ LocalRef::Place(place) => place,
+ LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
+ LocalRef::Operand(..) => {
+ if place_ref.has_deref() {
+ base = 1;
+ let cg_base = self.codegen_consume(
+ bx,
+ mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref },
+ );
+ cg_base.deref(bx.cx())
+ } else {
+ bug!("using operand local {:?} as place", place_ref);
+ }
+ }
+ };
+ for elem in place_ref.projection[base..].iter() {
+ cg_base = match *elem {
+ mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
+ mir::ProjectionElem::Field(ref field, _) => {
+ cg_base.project_field(bx, field.index())
+ }
+ mir::ProjectionElem::Index(index) => {
+ let index = &mir::Operand::Copy(mir::Place::from(index));
+ let index = self.codegen_operand(bx, index);
+ let llindex = index.immediate();
+ cg_base.project_index(bx, llindex)
+ }
+ mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
+ let lloffset = bx.cx().const_usize(offset as u64);
+ cg_base.project_index(bx, lloffset)
+ }
+ mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
+ let lloffset = bx.cx().const_usize(offset as u64);
+ let lllen = cg_base.len(bx.cx());
+ let llindex = bx.sub(lllen, lloffset);
+ cg_base.project_index(bx, llindex)
+ }
+ mir::ProjectionElem::Subslice { from, to, from_end } => {
+ let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64));
+ let projected_ty =
+ PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
+ subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
+
+ if subslice.layout.is_unsized() {
+ assert!(from_end, "slice subslices should be `from_end`");
+ subslice.llextra = Some(bx.sub(
+ cg_base.llextra.unwrap(),
+ bx.cx().const_usize((from as u64) + (to as u64)),
+ ));
+ }
+
+ // Cast the place pointer type to the new
+ // array or slice type (`*[%_; new_len]`).
+ subslice.llval = bx.pointercast(
+ subslice.llval,
+ bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
+ );
+
+ subslice
+ }
+ mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
+ };
+ }
+ debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
+ cg_base
+ }
+
+ pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
+ let tcx = self.cx.tcx();
+ let place_ty = place_ref.ty(self.mir, tcx);
+ self.monomorphize(place_ty.ty)
+ }
+}
+
+fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ value: Bx::Value,
+ align: Bx::Value,
+) -> Bx::Value {
+ // In pseudo code:
+ //
+ // if value & (align - 1) == 0 {
+ // value
+ // } else {
+ // (value & !(align - 1)) + align
+ // }
+ //
+ // Usually this is written without branches as
+ //
+ // (value + align - 1) & !(align - 1)
+ //
+ // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
+ // at compile time to be `1`, this expression should be optimized to `align`. However,
+ // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
+ // that `align` is a power of two, it cannot perform this optimization.
+ //
+ // Instead we use
+ //
+ // value + (-value & (align - 1))
+ //
+ // Since `align` is used only once, the expression can be optimized. For `value = 0`
+ // its optimized to `0` even in debug mode.
+ //
+ // NB: The previous version of this code used
+ //
+ // (value + align - 1) & -align
+ //
+ // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
+ // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
+ let one = bx.const_usize(1);
+ let align_minus_1 = bx.sub(align, one);
+ let neg_value = bx.neg(value);
+ let offset = bx.and(neg_value, align_minus_1);
+ bx.add(value, offset)
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
new file mode 100644
index 000000000..26b9fbf44
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -0,0 +1,729 @@
+use super::operand::{OperandRef, OperandValue};
+use super::place::PlaceRef;
+use super::{FunctionCx, LocalRef};
+
+use crate::base;
+use crate::common::{self, IntPredicate};
+use crate::traits::*;
+use crate::MemFlags;
+
+use rustc_middle::mir;
+use rustc_middle::mir::Operand;
+use rustc_middle::ty::cast::{CastTy, IntTy};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
+use rustc_span::source_map::{Span, DUMMY_SP};
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "trace", skip(self, bx))]
+ pub fn codegen_rvalue(
+ &mut self,
+ mut bx: Bx,
+ dest: PlaceRef<'tcx, Bx::Value>,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> Bx {
+ match *rvalue {
+ mir::Rvalue::Use(ref operand) => {
+ let cg_operand = self.codegen_operand(&mut bx, operand);
+ // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
+ // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
+ cg_operand.val.store(&mut bx, dest);
+ bx
+ }
+
+ mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
+ // The destination necessarily contains a fat pointer, so if
+ // it's a scalar pair, it's a fat pointer or newtype thereof.
+ if bx.cx().is_backend_scalar_pair(dest.layout) {
+ // Into-coerce of a thin pointer to a fat pointer -- just
+ // use the operand path.
+ let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(&mut bx, dest);
+ return bx;
+ }
+
+ // Unsize of a nontrivial struct. I would prefer for
+ // this to be eliminated by MIR building, but
+ // `CoerceUnsized` can be passed by a where-clause,
+ // so the (generic) MIR may not be able to expand it.
+ let operand = self.codegen_operand(&mut bx, source);
+ match operand.val {
+ OperandValue::Pair(..) | OperandValue::Immediate(_) => {
+ // Unsize from an immediate structure. We don't
+ // really need a temporary alloca here, but
+ // avoiding it would require us to have
+ // `coerce_unsized_into` use `extractvalue` to
+ // index into the struct, and this case isn't
+ // important enough for it.
+ debug!("codegen_rvalue: creating ugly alloca");
+ let scratch = PlaceRef::alloca(&mut bx, operand.layout);
+ scratch.storage_live(&mut bx);
+ operand.val.store(&mut bx, scratch);
+ base::coerce_unsized_into(&mut bx, scratch, dest);
+ scratch.storage_dead(&mut bx);
+ }
+ OperandValue::Ref(llref, None, align) => {
+ let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
+ base::coerce_unsized_into(&mut bx, source, dest);
+ }
+ OperandValue::Ref(_, Some(_), _) => {
+ bug!("unsized coercion on an unsized rvalue");
+ }
+ }
+ bx
+ }
+
+ mir::Rvalue::Repeat(ref elem, count) => {
+ let cg_elem = self.codegen_operand(&mut bx, elem);
+
+ // Do not generate the loop for zero-sized elements or empty arrays.
+ if dest.layout.is_zst() {
+ return bx;
+ }
+
+ if let OperandValue::Immediate(v) = cg_elem.val {
+ let zero = bx.const_usize(0);
+ let start = dest.project_index(&mut bx, zero).llval;
+ let size = bx.const_usize(dest.layout.size.bytes());
+
+ // Use llvm.memset.p0i8.* to initialize all zero arrays
+ if bx.cx().const_to_opt_uint(v) == Some(0) {
+ let fill = bx.cx().const_u8(0);
+ bx.memset(start, fill, size, dest.align, MemFlags::empty());
+ return bx;
+ }
+
+ // Use llvm.memset.p0i8.* to initialize byte arrays
+ let v = bx.from_immediate(v);
+ if bx.cx().val_ty(v) == bx.cx().type_i8() {
+ bx.memset(start, v, size, dest.align, MemFlags::empty());
+ return bx;
+ }
+ }
+
+ let count =
+ self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+
+ bx.write_operand_repeatedly(cg_elem, count, dest)
+ }
+
+ mir::Rvalue::Aggregate(ref kind, ref operands) => {
+ let (dest, active_field_index) = match **kind {
+ mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
+ dest.codegen_set_discr(&mut bx, variant_index);
+ if bx.tcx().adt_def(adt_did).is_enum() {
+ (dest.project_downcast(&mut bx, variant_index), active_field_index)
+ } else {
+ (dest, active_field_index)
+ }
+ }
+ _ => (dest, None),
+ };
+ for (i, operand) in operands.iter().enumerate() {
+ let op = self.codegen_operand(&mut bx, operand);
+ // Do not generate stores and GEPis for zero-sized fields.
+ if !op.layout.is_zst() {
+ let field_index = active_field_index.unwrap_or(i);
+ let field = if let mir::AggregateKind::Array(_) = **kind {
+ let llindex = bx.cx().const_usize(field_index as u64);
+ dest.project_index(&mut bx, llindex)
+ } else {
+ dest.project_field(&mut bx, field_index)
+ };
+ op.val.store(&mut bx, field);
+ }
+ }
+ bx
+ }
+
+ _ => {
+ assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
+ let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(&mut bx, dest);
+ bx
+ }
+ }
+ }
+
+ pub fn codegen_rvalue_unsized(
+ &mut self,
+ mut bx: Bx,
+ indirect_dest: PlaceRef<'tcx, Bx::Value>,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> Bx {
+ debug!(
+ "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
+ indirect_dest.llval, rvalue
+ );
+
+ match *rvalue {
+ mir::Rvalue::Use(ref operand) => {
+ let cg_operand = self.codegen_operand(&mut bx, operand);
+ cg_operand.val.store_unsized(&mut bx, indirect_dest);
+ bx
+ }
+
+ _ => bug!("unsized assignment other than `Rvalue::Use`"),
+ }
+ }
+
+ pub fn codegen_rvalue_operand(
+ &mut self,
+ mut bx: Bx,
+ rvalue: &mir::Rvalue<'tcx>,
+ ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+ assert!(
+ self.rvalue_creates_operand(rvalue, DUMMY_SP),
+ "cannot codegen {:?} to operand",
+ rvalue,
+ );
+
+ match *rvalue {
+ mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
+ let operand = self.codegen_operand(&mut bx, source);
+ debug!("cast operand is {:?}", operand);
+ let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
+
+ let val = match *kind {
+ mir::CastKind::PointerExposeAddress => {
+ assert!(bx.cx().is_backend_immediate(cast));
+ let llptr = operand.immediate();
+ let llcast_ty = bx.cx().immediate_backend_type(cast);
+ let lladdr = bx.ptrtoint(llptr, llcast_ty);
+ OperandValue::Immediate(lladdr)
+ }
+ mir::CastKind::Pointer(PointerCast::ReifyFnPointer) => {
+ match *operand.layout.ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ let instance = ty::Instance::resolve_for_fn_ptr(
+ bx.tcx(),
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .unwrap()
+ .polymorphize(bx.cx().tcx());
+ OperandValue::Immediate(bx.get_fn_addr(instance))
+ }
+ _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
+ }
+ }
+ mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)) => {
+ match *operand.layout.ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ bx.cx().tcx(),
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .expect("failed to normalize and resolve closure during codegen")
+ .polymorphize(bx.cx().tcx());
+ OperandValue::Immediate(bx.cx().get_fn_addr(instance))
+ }
+ _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
+ }
+ }
+ mir::CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
+ // This is a no-op at the LLVM level.
+ operand.val
+ }
+ mir::CastKind::Pointer(PointerCast::Unsize) => {
+ assert!(bx.cx().is_backend_scalar_pair(cast));
+ let (lldata, llextra) = match operand.val {
+ OperandValue::Pair(lldata, llextra) => {
+ // unsize from a fat pointer -- this is a
+ // "trait-object-to-supertrait" coercion.
+ (lldata, Some(llextra))
+ }
+ OperandValue::Immediate(lldata) => {
+ // "standard" unsize
+ (lldata, None)
+ }
+ OperandValue::Ref(..) => {
+ bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
+ }
+ };
+ let (lldata, llextra) =
+ base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
+ OperandValue::Pair(lldata, llextra)
+ }
+ mir::CastKind::Pointer(PointerCast::MutToConstPointer)
+ | mir::CastKind::Misc
+ if bx.cx().is_backend_scalar_pair(operand.layout) =>
+ {
+ if let OperandValue::Pair(data_ptr, meta) = operand.val {
+ if bx.cx().is_backend_scalar_pair(cast) {
+ let data_cast = bx.pointercast(
+ data_ptr,
+ bx.cx().scalar_pair_element_backend_type(cast, 0, true),
+ );
+ OperandValue::Pair(data_cast, meta)
+ } else {
+ // cast to thin-ptr
+ // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
+ // pointer-cast of that pointer to desired pointer type.
+ let llcast_ty = bx.cx().immediate_backend_type(cast);
+ let llval = bx.pointercast(data_ptr, llcast_ty);
+ OperandValue::Immediate(llval)
+ }
+ } else {
+ bug!("unexpected non-pair operand");
+ }
+ }
+ mir::CastKind::Pointer(
+ PointerCast::MutToConstPointer | PointerCast::ArrayToPointer,
+ )
+ | mir::CastKind::Misc
+ // Since int2ptr can have arbitrary integer types as input (so we have to do
+ // sign extension and all that), it is currently best handled in the same code
+ // path as the other integer-to-X casts.
+ | mir::CastKind::PointerFromExposedAddress => {
+ assert!(bx.cx().is_backend_immediate(cast));
+ let ll_t_out = bx.cx().immediate_backend_type(cast);
+ if operand.layout.abi.is_uninhabited() {
+ let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
+ return (bx, OperandRef { val, layout: cast });
+ }
+ let r_t_in =
+ CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
+ let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
+ let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
+ let llval = operand.immediate();
+
+ let newval = match (r_t_in, r_t_out) {
+ (CastTy::Int(i), CastTy::Int(_)) => {
+ bx.intcast(llval, ll_t_out, i.is_signed())
+ }
+ (CastTy::Float, CastTy::Float) => {
+ let srcsz = bx.cx().float_width(ll_t_in);
+ let dstsz = bx.cx().float_width(ll_t_out);
+ if dstsz > srcsz {
+ bx.fpext(llval, ll_t_out)
+ } else if srcsz > dstsz {
+ bx.fptrunc(llval, ll_t_out)
+ } else {
+ llval
+ }
+ }
+ (CastTy::Int(i), CastTy::Float) => {
+ if i.is_signed() {
+ bx.sitofp(llval, ll_t_out)
+ } else {
+ bx.uitofp(llval, ll_t_out)
+ }
+ }
+ (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
+ bx.pointercast(llval, ll_t_out)
+ }
+ (CastTy::Int(i), CastTy::Ptr(_)) => {
+ let usize_llval =
+ bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
+ bx.inttoptr(usize_llval, ll_t_out)
+ }
+ (CastTy::Float, CastTy::Int(IntTy::I)) => {
+ bx.cast_float_to_int(true, llval, ll_t_out)
+ }
+ (CastTy::Float, CastTy::Int(_)) => {
+ bx.cast_float_to_int(false, llval, ll_t_out)
+ }
+ _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
+ };
+ OperandValue::Immediate(newval)
+ }
+ };
+ (bx, OperandRef { val, layout: cast })
+ }
+
+ mir::Rvalue::Ref(_, bk, place) => {
+ let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+ tcx.mk_ref(
+ tcx.lifetimes.re_erased,
+ ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
+ )
+ };
+ self.codegen_place_to_pointer(bx, place, mk_ref)
+ }
+
+ mir::Rvalue::CopyForDeref(place) => {
+ let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
+ (bx, operand)
+ }
+ mir::Rvalue::AddressOf(mutability, place) => {
+ let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
+ tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
+ };
+ self.codegen_place_to_pointer(bx, place, mk_ptr)
+ }
+
+ mir::Rvalue::Len(place) => {
+ let size = self.evaluate_array_len(&mut bx, place);
+ let operand = OperandRef {
+ val: OperandValue::Immediate(size),
+ layout: bx.cx().layout_of(bx.tcx().types.usize),
+ };
+ (bx, operand)
+ }
+
+ mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs = self.codegen_operand(&mut bx, lhs);
+ let rhs = self.codegen_operand(&mut bx, rhs);
+ let llresult = match (lhs.val, rhs.val) {
+ (
+ OperandValue::Pair(lhs_addr, lhs_extra),
+ OperandValue::Pair(rhs_addr, rhs_extra),
+ ) => self.codegen_fat_ptr_binop(
+ &mut bx,
+ op,
+ lhs_addr,
+ lhs_extra,
+ rhs_addr,
+ rhs_extra,
+ lhs.layout.ty,
+ ),
+
+ (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
+ self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
+ }
+
+ _ => bug!(),
+ };
+ let operand = OperandRef {
+ val: OperandValue::Immediate(llresult),
+ layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
+ };
+ (bx, operand)
+ }
+ mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs = self.codegen_operand(&mut bx, lhs);
+ let rhs = self.codegen_operand(&mut bx, rhs);
+ let result = self.codegen_scalar_checked_binop(
+ &mut bx,
+ op,
+ lhs.immediate(),
+ rhs.immediate(),
+ lhs.layout.ty,
+ );
+ let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
+ let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
+ let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
+
+ (bx, operand)
+ }
+
+ mir::Rvalue::UnaryOp(op, ref operand) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ let lloperand = operand.immediate();
+ let is_float = operand.layout.ty.is_floating_point();
+ let llval = match op {
+ mir::UnOp::Not => bx.not(lloperand),
+ mir::UnOp::Neg => {
+ if is_float {
+ bx.fneg(lloperand)
+ } else {
+ bx.neg(lloperand)
+ }
+ }
+ };
+ (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
+ }
+
+ mir::Rvalue::Discriminant(ref place) => {
+ let discr_ty = rvalue.ty(self.mir, bx.tcx());
+ let discr_ty = self.monomorphize(discr_ty);
+ let discr = self
+ .codegen_place(&mut bx, place.as_ref())
+ .codegen_get_discr(&mut bx, discr_ty);
+ (
+ bx,
+ OperandRef {
+ val: OperandValue::Immediate(discr),
+ layout: self.cx.layout_of(discr_ty),
+ },
+ )
+ }
+
+ mir::Rvalue::NullaryOp(null_op, ty) => {
+ let ty = self.monomorphize(ty);
+ assert!(bx.cx().type_is_sized(ty));
+ let layout = bx.cx().layout_of(ty);
+ let val = match null_op {
+ mir::NullOp::SizeOf => layout.size.bytes(),
+ mir::NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ let val = bx.cx().const_usize(val);
+ let tcx = self.cx.tcx();
+ (
+ bx,
+ OperandRef {
+ val: OperandValue::Immediate(val),
+ layout: self.cx.layout_of(tcx.types.usize),
+ },
+ )
+ }
+
+ mir::Rvalue::ThreadLocalRef(def_id) => {
+ assert!(bx.cx().tcx().is_static(def_id));
+ let static_ = bx.get_static(def_id);
+ let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
+ let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
+ (bx, operand)
+ }
+ mir::Rvalue::Use(ref operand) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ (bx, operand)
+ }
+ mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
+ // According to `rvalue_creates_operand`, only ZST
+ // aggregate rvalues are allowed to be operands.
+ let ty = rvalue.ty(self.mir, self.cx.tcx());
+ let operand =
+ OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
+ (bx, operand)
+ }
+ mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
+ let operand = self.codegen_operand(&mut bx, operand);
+ let lloperand = operand.immediate();
+
+ let content_ty = self.monomorphize(content_ty);
+ let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
+ let llty_ptr = bx.cx().backend_type(box_layout);
+
+ let val = bx.pointercast(lloperand, llty_ptr);
+ let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
+ (bx, operand)
+ }
+ }
+ }
+
+ fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
+ // ZST are passed as operands and require special handling
+ // because codegen_place() panics if Local is operand.
+ if let Some(index) = place.as_local() {
+ if let LocalRef::Operand(Some(op)) = self.locals[index] {
+ if let ty::Array(_, n) = op.layout.ty.kind() {
+ let n = n.eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
+ return bx.cx().const_usize(n);
+ }
+ }
+ }
+ // use common size calculation for non zero-sized types
+ let cg_value = self.codegen_place(bx, place.as_ref());
+ cg_value.len(bx.cx())
+ }
+
+ /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
+ fn codegen_place_to_pointer(
+ &mut self,
+ mut bx: Bx,
+ place: mir::Place<'tcx>,
+ mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
+ ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+ let cg_place = self.codegen_place(&mut bx, place.as_ref());
+
+ let ty = cg_place.layout.ty;
+
+ // Note: places are indirect, so storing the `llval` into the
+ // destination effectively creates a reference.
+ let val = if !bx.cx().type_has_metadata(ty) {
+ OperandValue::Immediate(cg_place.llval)
+ } else {
+ OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
+ };
+ (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
+ }
+
+ pub fn codegen_scalar_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+ input_ty: Ty<'tcx>,
+ ) -> Bx::Value {
+ let is_float = input_ty.is_floating_point();
+ let is_signed = input_ty.is_signed();
+ match op {
+ mir::BinOp::Add => {
+ if is_float {
+ bx.fadd(lhs, rhs)
+ } else {
+ bx.add(lhs, rhs)
+ }
+ }
+ mir::BinOp::Sub => {
+ if is_float {
+ bx.fsub(lhs, rhs)
+ } else {
+ bx.sub(lhs, rhs)
+ }
+ }
+ mir::BinOp::Mul => {
+ if is_float {
+ bx.fmul(lhs, rhs)
+ } else {
+ bx.mul(lhs, rhs)
+ }
+ }
+ mir::BinOp::Div => {
+ if is_float {
+ bx.fdiv(lhs, rhs)
+ } else if is_signed {
+ bx.sdiv(lhs, rhs)
+ } else {
+ bx.udiv(lhs, rhs)
+ }
+ }
+ mir::BinOp::Rem => {
+ if is_float {
+ bx.frem(lhs, rhs)
+ } else if is_signed {
+ bx.srem(lhs, rhs)
+ } else {
+ bx.urem(lhs, rhs)
+ }
+ }
+ mir::BinOp::BitOr => bx.or(lhs, rhs),
+ mir::BinOp::BitAnd => bx.and(lhs, rhs),
+ mir::BinOp::BitXor => bx.xor(lhs, rhs),
+ mir::BinOp::Offset => {
+ let pointee_type = input_ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
+ .ty;
+ let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type));
+ bx.inbounds_gep(llty, lhs, &[rhs])
+ }
+ mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs),
+ mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
+ mir::BinOp::Ne
+ | mir::BinOp::Lt
+ | mir::BinOp::Gt
+ | mir::BinOp::Eq
+ | mir::BinOp::Le
+ | mir::BinOp::Ge => {
+ if is_float {
+ bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
+ } else {
+ bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
+ }
+ }
+ }
+ }
+
+ pub fn codegen_fat_ptr_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs_addr: Bx::Value,
+ lhs_extra: Bx::Value,
+ rhs_addr: Bx::Value,
+ rhs_extra: Bx::Value,
+ _input_ty: Ty<'tcx>,
+ ) -> Bx::Value {
+ match op {
+ mir::BinOp::Eq => {
+ let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+ let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
+ bx.and(lhs, rhs)
+ }
+ mir::BinOp::Ne => {
+ let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
+ let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
+ bx.or(lhs, rhs)
+ }
+ mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
+ // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
+ let (op, strict_op) = match op {
+ mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
+ mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
+ mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
+ mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
+ _ => bug!(),
+ };
+ let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
+ let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
+ let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
+ let rhs = bx.and(and_lhs, and_rhs);
+ bx.or(lhs, rhs)
+ }
+ _ => {
+ bug!("unexpected fat ptr binop");
+ }
+ }
+ }
+
+ pub fn codegen_scalar_checked_binop(
+ &mut self,
+ bx: &mut Bx,
+ op: mir::BinOp,
+ lhs: Bx::Value,
+ rhs: Bx::Value,
+ input_ty: Ty<'tcx>,
+ ) -> OperandValue<Bx::Value> {
+ // This case can currently arise only from functions marked
+ // with #[rustc_inherit_overflow_checks] and inlined from
+ // another crate (mostly core::num generic/#[inline] fns),
+ // while the current crate doesn't use overflow checks.
+ if !bx.cx().check_overflow() {
+ let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+ return OperandValue::Pair(val, bx.cx().const_bool(false));
+ }
+
+ let (val, of) = match op {
+ // These are checked using intrinsics
+ mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
+ let oop = match op {
+ mir::BinOp::Add => OverflowOp::Add,
+ mir::BinOp::Sub => OverflowOp::Sub,
+ mir::BinOp::Mul => OverflowOp::Mul,
+ _ => unreachable!(),
+ };
+ bx.checked_binop(oop, input_ty, lhs, rhs)
+ }
+ mir::BinOp::Shl | mir::BinOp::Shr => {
+ let lhs_llty = bx.cx().val_ty(lhs);
+ let rhs_llty = bx.cx().val_ty(rhs);
+ let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
+ let outer_bits = bx.and(rhs, invert_mask);
+
+ let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
+ let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
+
+ (val, of)
+ }
+ _ => bug!("Operator `{:?}` is not a checkable operator", op),
+ };
+
+ OperandValue::Pair(val, of)
+ }
+}
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
+ match *rvalue {
+ mir::Rvalue::Ref(..) |
+ mir::Rvalue::CopyForDeref(..) |
+ mir::Rvalue::AddressOf(..) |
+ mir::Rvalue::Len(..) |
+ mir::Rvalue::Cast(..) | // (*)
+ mir::Rvalue::ShallowInitBox(..) | // (*)
+ mir::Rvalue::BinaryOp(..) |
+ mir::Rvalue::CheckedBinaryOp(..) |
+ mir::Rvalue::UnaryOp(..) |
+ mir::Rvalue::Discriminant(..) |
+ mir::Rvalue::NullaryOp(..) |
+ mir::Rvalue::ThreadLocalRef(_) |
+ mir::Rvalue::Use(..) => // (*)
+ true,
+ mir::Rvalue::Repeat(..) |
+ mir::Rvalue::Aggregate(..) => {
+ let ty = rvalue.ty(self.mir, self.cx.tcx());
+ let ty = self.monomorphize(ty);
+ self.cx.spanned_layout_of(ty, span).is_zst()
+ }
+ }
+
+ // (*) this is only true if the type is suitable
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mir/statement.rs b/compiler/rustc_codegen_ssa/src/mir/statement.rs
new file mode 100644
index 000000000..f452f2988
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mir/statement.rs
@@ -0,0 +1,102 @@
+use rustc_middle::mir;
+
+use super::FunctionCx;
+use super::LocalRef;
+use crate::traits::BuilderMethods;
+use crate::traits::*;
+
+impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+ #[instrument(level = "debug", skip(self, bx))]
+ pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
+ self.set_debug_loc(&mut bx, statement.source_info);
+ match statement.kind {
+ mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
+ if let Some(index) = place.as_local() {
+ match self.locals[index] {
+ LocalRef::Place(cg_dest) => self.codegen_rvalue(bx, cg_dest, rvalue),
+ LocalRef::UnsizedPlace(cg_indirect_dest) => {
+ self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
+ }
+ LocalRef::Operand(None) => {
+ let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+ self.locals[index] = LocalRef::Operand(Some(operand));
+ self.debug_introduce_local(&mut bx, index);
+ bx
+ }
+ LocalRef::Operand(Some(op)) => {
+ if !op.layout.is_zst() {
+ span_bug!(
+ statement.source_info.span,
+ "operand {:?} already assigned",
+ rvalue
+ );
+ }
+
+ // If the type is zero-sized, it's already been set here,
+ // but we still need to make sure we codegen the operand
+ self.codegen_rvalue_operand(bx, rvalue).0
+ }
+ }
+ } else {
+ let cg_dest = self.codegen_place(&mut bx, place.as_ref());
+ self.codegen_rvalue(bx, cg_dest, rvalue)
+ }
+ }
+ mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
+ self.codegen_place(&mut bx, place.as_ref())
+ .codegen_set_discr(&mut bx, variant_index);
+ bx
+ }
+ mir::StatementKind::Deinit(..) => {
+ // For now, don't codegen this to anything. In the future it may be worth
+ // experimenting with what kind of information we can emit to LLVM without hurting
+ // perf here
+ bx
+ }
+ mir::StatementKind::StorageLive(local) => {
+ if let LocalRef::Place(cg_place) = self.locals[local] {
+ cg_place.storage_live(&mut bx);
+ } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+ cg_indirect_place.storage_live(&mut bx);
+ }
+ bx
+ }
+ mir::StatementKind::StorageDead(local) => {
+ if let LocalRef::Place(cg_place) = self.locals[local] {
+ cg_place.storage_dead(&mut bx);
+ } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
+ cg_indirect_place.storage_dead(&mut bx);
+ }
+ bx
+ }
+ mir::StatementKind::Coverage(box ref coverage) => {
+ self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
+ bx
+ }
+ mir::StatementKind::CopyNonOverlapping(box mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ let dst_val = self.codegen_operand(&mut bx, dst);
+ let src_val = self.codegen_operand(&mut bx, src);
+ let count = self.codegen_operand(&mut bx, count).immediate();
+ let pointee_layout = dst_val
+ .layout
+ .pointee_info_at(&bx, rustc_target::abi::Size::ZERO)
+ .expect("Expected pointer");
+ let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes()));
+
+ let align = pointee_layout.align;
+ let dst = dst_val.immediate();
+ let src = src_val.immediate();
+ bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
+ bx
+ }
+ mir::StatementKind::FakeRead(..)
+ | mir::StatementKind::Retag { .. }
+ | mir::StatementKind::AscribeUserType(..)
+ | mir::StatementKind::Nop => bx,
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/mono_item.rs b/compiler/rustc_codegen_ssa/src/mono_item.rs
new file mode 100644
index 000000000..5006a2157
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/mono_item.rs
@@ -0,0 +1,147 @@
+use crate::base;
+use crate::common;
+use crate::traits::*;
+use rustc_hir as hir;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
+use rustc_middle::ty::Instance;
+
+pub trait MonoItemExt<'a, 'tcx> {
+ fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx);
+ fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ cx: &'a Bx::CodegenCx,
+ linkage: Linkage,
+ visibility: Visibility,
+ );
+ fn to_raw_string(&self) -> String;
+}
+
+impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {
+ fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx) {
+ debug!(
+ "BEGIN IMPLEMENTING '{} ({})' in cgu {}",
+ self,
+ self.to_raw_string(),
+ cx.codegen_unit().name()
+ );
+
+ match *self {
+ MonoItem::Static(def_id) => {
+ cx.codegen_static(def_id, cx.tcx().is_mutable_static(def_id));
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ let item = cx.tcx().hir().item(item_id);
+ if let hir::ItemKind::GlobalAsm(ref asm) = item.kind {
+ let operands: Vec<_> = asm
+ .operands
+ .iter()
+ .map(|(op, op_sp)| match *op {
+ hir::InlineAsmOperand::Const { ref anon_const } => {
+ let anon_const_def_id =
+ cx.tcx().hir().local_def_id(anon_const.hir_id).to_def_id();
+ let const_value =
+ cx.tcx().const_eval_poly(anon_const_def_id).unwrap_or_else(
+ |_| span_bug!(*op_sp, "asm const cannot be resolved"),
+ );
+ let ty = cx
+ .tcx()
+ .typeck_body(anon_const.body)
+ .node_type(anon_const.hir_id);
+ let string = common::asm_const_to_str(
+ cx.tcx(),
+ *op_sp,
+ const_value,
+ cx.layout_of(ty),
+ );
+ GlobalAsmOperandRef::Const { string }
+ }
+ hir::InlineAsmOperand::SymFn { ref anon_const } => {
+ let ty = cx
+ .tcx()
+ .typeck_body(anon_const.body)
+ .node_type(anon_const.hir_id);
+ let instance = match ty.kind() {
+ &ty::FnDef(def_id, substs) => Instance::new(def_id, substs),
+ _ => span_bug!(*op_sp, "asm sym is not a function"),
+ };
+
+ GlobalAsmOperandRef::SymFn { instance }
+ }
+ hir::InlineAsmOperand::SymStatic { path: _, def_id } => {
+ GlobalAsmOperandRef::SymStatic { def_id }
+ }
+ hir::InlineAsmOperand::In { .. }
+ | hir::InlineAsmOperand::Out { .. }
+ | hir::InlineAsmOperand::InOut { .. }
+ | hir::InlineAsmOperand::SplitInOut { .. } => {
+ span_bug!(*op_sp, "invalid operand type for global_asm!")
+ }
+ })
+ .collect();
+
+ cx.codegen_global_asm(asm.template, &operands, asm.options, asm.line_spans);
+ } else {
+ span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
+ }
+ }
+ MonoItem::Fn(instance) => {
+ base::codegen_instance::<Bx>(&cx, instance);
+ }
+ }
+
+ debug!(
+ "END IMPLEMENTING '{} ({})' in cgu {}",
+ self,
+ self.to_raw_string(),
+ cx.codegen_unit().name()
+ );
+ }
+
+ fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
+ &self,
+ cx: &'a Bx::CodegenCx,
+ linkage: Linkage,
+ visibility: Visibility,
+ ) {
+ debug!(
+ "BEGIN PREDEFINING '{} ({})' in cgu {}",
+ self,
+ self.to_raw_string(),
+ cx.codegen_unit().name()
+ );
+
+ let symbol_name = self.symbol_name(cx.tcx()).name;
+
+ debug!("symbol {}", &symbol_name);
+
+ match *self {
+ MonoItem::Static(def_id) => {
+ cx.predefine_static(def_id, linkage, visibility, &symbol_name);
+ }
+ MonoItem::Fn(instance) => {
+ cx.predefine_fn(instance, linkage, visibility, &symbol_name);
+ }
+ MonoItem::GlobalAsm(..) => {}
+ }
+
+ debug!(
+ "END PREDEFINING '{} ({})' in cgu {}",
+ self,
+ self.to_raw_string(),
+ cx.codegen_unit().name()
+ );
+ }
+
+ fn to_raw_string(&self) -> String {
+ match *self {
+ MonoItem::Fn(instance) => {
+ format!("Fn({:?}, {})", instance.def, instance.substs.as_ptr().addr())
+ }
+ MonoItem::Static(id) => format!("Static({:?})", id),
+ MonoItem::GlobalAsm(id) => format!("GlobalAsm({:?})", id),
+ }
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs
new file mode 100644
index 000000000..ecad05185
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/target_features.rs
@@ -0,0 +1,308 @@
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::ty::query::Providers;
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::symbol::Symbol;
+
+/// Features that control behaviour of rustc, rather than the codegen.
+pub const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"];
+
+// When adding features to the below lists
+// check whether they're named already elsewhere in rust
+// e.g. in stdarch and whether the given name matches LLVM's
+// if it doesn't, to_llvm_feature in llvm_util in rustc_codegen_llvm needs to be adapted
+
+const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("aclass", Some(sym::arm_target_feature)),
+ ("mclass", Some(sym::arm_target_feature)),
+ ("rclass", Some(sym::arm_target_feature)),
+ ("dsp", Some(sym::arm_target_feature)),
+ ("neon", Some(sym::arm_target_feature)),
+ ("crc", Some(sym::arm_target_feature)),
+ ("crypto", Some(sym::arm_target_feature)),
+ ("aes", Some(sym::arm_target_feature)),
+ ("sha2", Some(sym::arm_target_feature)),
+ ("i8mm", Some(sym::arm_target_feature)),
+ ("dotprod", Some(sym::arm_target_feature)),
+ ("v5te", Some(sym::arm_target_feature)),
+ ("v6", Some(sym::arm_target_feature)),
+ ("v6k", Some(sym::arm_target_feature)),
+ ("v6t2", Some(sym::arm_target_feature)),
+ ("v7", Some(sym::arm_target_feature)),
+ ("v8", Some(sym::arm_target_feature)),
+ ("vfp2", Some(sym::arm_target_feature)),
+ ("vfp3", Some(sym::arm_target_feature)),
+ ("vfp4", Some(sym::arm_target_feature)),
+ ("fp-armv8", Some(sym::arm_target_feature)),
+ // This is needed for inline assembly, but shouldn't be stabilized as-is
+ // since it should be enabled per-function using #[instruction_set], not
+ // #[target_feature].
+ ("thumb-mode", Some(sym::arm_target_feature)),
+ ("thumb2", Some(sym::arm_target_feature)),
+ ("d32", Some(sym::arm_target_feature)),
+];
+
+const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ // FEAT_AdvSimd & FEAT_FP
+ ("neon", None),
+ // FEAT_FP16
+ ("fp16", None),
+ // FEAT_SVE
+ ("sve", None),
+ // FEAT_CRC
+ ("crc", None),
+ // FEAT_RAS
+ ("ras", None),
+ // FEAT_LSE
+ ("lse", None),
+ // FEAT_RDM
+ ("rdm", None),
+ // FEAT_RCPC
+ ("rcpc", None),
+ // FEAT_RCPC2
+ ("rcpc2", None),
+ // FEAT_DotProd
+ ("dotprod", None),
+ // FEAT_TME
+ ("tme", None),
+ // FEAT_FHM
+ ("fhm", None),
+ // FEAT_DIT
+ ("dit", None),
+ // FEAT_FLAGM
+ ("flagm", None),
+ // FEAT_SSBS
+ ("ssbs", None),
+ // FEAT_SB
+ ("sb", None),
+ // FEAT_PAUTH (address authentication)
+ ("paca", None),
+ // FEAT_PAUTH (generic authentication)
+ ("pacg", None),
+ // FEAT_DPB
+ ("dpb", None),
+ // FEAT_DPB2
+ ("dpb2", None),
+ // FEAT_SVE2
+ ("sve2", None),
+ // FEAT_SVE2_AES
+ ("sve2-aes", None),
+ // FEAT_SVE2_SM4
+ ("sve2-sm4", None),
+ // FEAT_SVE2_SHA3
+ ("sve2-sha3", None),
+ // FEAT_SVE2_BitPerm
+ ("sve2-bitperm", None),
+ // FEAT_FRINTTS
+ ("frintts", None),
+ // FEAT_I8MM
+ ("i8mm", None),
+ // FEAT_F32MM
+ ("f32mm", None),
+ // FEAT_F64MM
+ ("f64mm", None),
+ // FEAT_BF16
+ ("bf16", None),
+ // FEAT_RAND
+ ("rand", None),
+ // FEAT_BTI
+ ("bti", None),
+ // FEAT_MTE
+ ("mte", None),
+ // FEAT_JSCVT
+ ("jsconv", None),
+ // FEAT_FCMA
+ ("fcma", None),
+ // FEAT_AES
+ ("aes", None),
+ // FEAT_SHA1 & FEAT_SHA256
+ ("sha2", None),
+ // FEAT_SHA512 & FEAT_SHA3
+ ("sha3", None),
+ // FEAT_SM3 & FEAT_SM4
+ ("sm4", None),
+ // FEAT_PAN
+ ("pan", None),
+ // FEAT_LOR
+ ("lor", None),
+ // FEAT_VHE
+ ("vh", None),
+ // FEAT_PMUv3
+ ("pmuv3", None),
+ // FEAT_SPE
+ ("spe", None),
+ ("v8.1a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.2a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.3a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.4a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.5a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.6a", Some(sym::aarch64_ver_target_feature)),
+ ("v8.7a", Some(sym::aarch64_ver_target_feature)),
+];
+
+const AARCH64_TIED_FEATURES: &[&[&str]] = &[
+ &["paca", "pacg"], // Together these represent `pauth` in LLVM
+];
+
+const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("adx", None),
+ ("aes", None),
+ ("avx", None),
+ ("avx2", None),
+ ("avx512bf16", Some(sym::avx512_target_feature)),
+ ("avx512bitalg", Some(sym::avx512_target_feature)),
+ ("avx512bw", Some(sym::avx512_target_feature)),
+ ("avx512cd", Some(sym::avx512_target_feature)),
+ ("avx512dq", Some(sym::avx512_target_feature)),
+ ("avx512er", Some(sym::avx512_target_feature)),
+ ("avx512f", Some(sym::avx512_target_feature)),
+ ("avx512gfni", Some(sym::avx512_target_feature)),
+ ("avx512ifma", Some(sym::avx512_target_feature)),
+ ("avx512pf", Some(sym::avx512_target_feature)),
+ ("avx512vaes", Some(sym::avx512_target_feature)),
+ ("avx512vbmi", Some(sym::avx512_target_feature)),
+ ("avx512vbmi2", Some(sym::avx512_target_feature)),
+ ("avx512vl", Some(sym::avx512_target_feature)),
+ ("avx512vnni", Some(sym::avx512_target_feature)),
+ ("avx512vp2intersect", Some(sym::avx512_target_feature)),
+ ("avx512vpclmulqdq", Some(sym::avx512_target_feature)),
+ ("avx512vpopcntdq", Some(sym::avx512_target_feature)),
+ ("bmi1", None),
+ ("bmi2", None),
+ ("cmpxchg16b", Some(sym::cmpxchg16b_target_feature)),
+ ("ermsb", Some(sym::ermsb_target_feature)),
+ ("f16c", Some(sym::f16c_target_feature)),
+ ("fma", None),
+ ("fxsr", None),
+ ("lzcnt", None),
+ ("movbe", Some(sym::movbe_target_feature)),
+ ("pclmulqdq", None),
+ ("popcnt", None),
+ ("rdrand", None),
+ ("rdseed", None),
+ ("rtm", Some(sym::rtm_target_feature)),
+ ("sha", None),
+ ("sse", None),
+ ("sse2", None),
+ ("sse3", None),
+ ("sse4.1", None),
+ ("sse4.2", None),
+ ("sse4a", Some(sym::sse4a_target_feature)),
+ ("ssse3", None),
+ ("tbm", Some(sym::tbm_target_feature)),
+ ("xsave", None),
+ ("xsavec", None),
+ ("xsaveopt", None),
+ ("xsaves", None),
+];
+
+const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("hvx", Some(sym::hexagon_target_feature)),
+ ("hvx-length128b", Some(sym::hexagon_target_feature)),
+];
+
+const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("altivec", Some(sym::powerpc_target_feature)),
+ ("power8-altivec", Some(sym::powerpc_target_feature)),
+ ("power9-altivec", Some(sym::powerpc_target_feature)),
+ ("power8-vector", Some(sym::powerpc_target_feature)),
+ ("power9-vector", Some(sym::powerpc_target_feature)),
+ ("vsx", Some(sym::powerpc_target_feature)),
+];
+
+const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
+ &[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
+
+const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("m", Some(sym::riscv_target_feature)),
+ ("a", Some(sym::riscv_target_feature)),
+ ("c", Some(sym::riscv_target_feature)),
+ ("f", Some(sym::riscv_target_feature)),
+ ("d", Some(sym::riscv_target_feature)),
+ ("e", Some(sym::riscv_target_feature)),
+ ("v", Some(sym::riscv_target_feature)),
+ ("zfinx", Some(sym::riscv_target_feature)),
+ ("zdinx", Some(sym::riscv_target_feature)),
+ ("zhinx", Some(sym::riscv_target_feature)),
+ ("zhinxmin", Some(sym::riscv_target_feature)),
+ ("zfh", Some(sym::riscv_target_feature)),
+ ("zfhmin", Some(sym::riscv_target_feature)),
+ ("zbkb", Some(sym::riscv_target_feature)),
+ ("zbkc", Some(sym::riscv_target_feature)),
+ ("zbkx", Some(sym::riscv_target_feature)),
+ ("zknd", Some(sym::riscv_target_feature)),
+ ("zkne", Some(sym::riscv_target_feature)),
+ ("zknh", Some(sym::riscv_target_feature)),
+ ("zksed", Some(sym::riscv_target_feature)),
+ ("zksh", Some(sym::riscv_target_feature)),
+ ("zkr", Some(sym::riscv_target_feature)),
+ ("zkn", Some(sym::riscv_target_feature)),
+ ("zks", Some(sym::riscv_target_feature)),
+ ("zk", Some(sym::riscv_target_feature)),
+ ("zkt", Some(sym::riscv_target_feature)),
+];
+
+const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
+ ("simd128", None),
+ ("atomics", Some(sym::wasm_target_feature)),
+ ("nontrapping-fptoint", Some(sym::wasm_target_feature)),
+ ("bulk-memory", Some(sym::wasm_target_feature)),
+ ("mutable-globals", Some(sym::wasm_target_feature)),
+ ("reference-types", Some(sym::wasm_target_feature)),
+ ("sign-ext", Some(sym::wasm_target_feature)),
+];
+
+const BPF_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[("alu32", Some(sym::bpf_target_feature))];
+
+/// When rustdoc is running, provide a list of all known features so that all their respective
+/// primitives may be documented.
+///
+/// IMPORTANT: If you're adding another feature list above, make sure to add it to this iterator!
+pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol>)> {
+ std::iter::empty()
+ .chain(ARM_ALLOWED_FEATURES.iter())
+ .chain(AARCH64_ALLOWED_FEATURES.iter())
+ .chain(X86_ALLOWED_FEATURES.iter())
+ .chain(HEXAGON_ALLOWED_FEATURES.iter())
+ .chain(POWERPC_ALLOWED_FEATURES.iter())
+ .chain(MIPS_ALLOWED_FEATURES.iter())
+ .chain(RISCV_ALLOWED_FEATURES.iter())
+ .chain(WASM_ALLOWED_FEATURES.iter())
+ .chain(BPF_ALLOWED_FEATURES.iter())
+ .cloned()
+}
+
+pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Option<Symbol>)] {
+ match &*sess.target.arch {
+ "arm" => ARM_ALLOWED_FEATURES,
+ "aarch64" => AARCH64_ALLOWED_FEATURES,
+ "x86" | "x86_64" => X86_ALLOWED_FEATURES,
+ "hexagon" => HEXAGON_ALLOWED_FEATURES,
+ "mips" | "mips64" => MIPS_ALLOWED_FEATURES,
+ "powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
+ "riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
+ "wasm32" | "wasm64" => WASM_ALLOWED_FEATURES,
+ "bpf" => BPF_ALLOWED_FEATURES,
+ _ => &[],
+ }
+}
+
+pub fn tied_target_features(sess: &Session) -> &'static [&'static [&'static str]] {
+ match &*sess.target.arch {
+ "aarch64" => AARCH64_TIED_FEATURES,
+ _ => &[],
+ }
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ providers.supported_target_features = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ if tcx.sess.opts.actually_rustdoc {
+ // rustdoc needs to be able to document functions that use all the features, so
+ // whitelist them all
+ all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
+ } else {
+ supported_target_features(tcx.sess).iter().map(|&(a, b)| (a.to_string(), b)).collect()
+ }
+ };
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/abi.rs b/compiler/rustc_codegen_ssa/src/traits/abi.rs
new file mode 100644
index 000000000..a00d78daf
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/abi.rs
@@ -0,0 +1,8 @@
+use super::BackendTypes;
+use rustc_middle::ty::Ty;
+use rustc_target::abi::call::FnAbi;
+
+pub trait AbiBuilderMethods<'tcx>: BackendTypes {
+ fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value);
+ fn get_param(&mut self, index: usize) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/asm.rs b/compiler/rustc_codegen_ssa/src/traits/asm.rs
new file mode 100644
index 000000000..c2ae74b18
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/asm.rs
@@ -0,0 +1,66 @@
+use super::BackendTypes;
+use crate::mir::operand::OperandRef;
+use crate::mir::place::PlaceRef;
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::Instance;
+use rustc_span::Span;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+
+#[derive(Debug)]
+pub enum InlineAsmOperandRef<'tcx, B: BackendTypes + ?Sized> {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ value: OperandRef<'tcx, B::Value>,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ place: Option<PlaceRef<'tcx, B::Value>>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_value: OperandRef<'tcx, B::Value>,
+ out_place: Option<PlaceRef<'tcx, B::Value>>,
+ },
+ Const {
+ string: String,
+ },
+ SymFn {
+ instance: Instance<'tcx>,
+ },
+ SymStatic {
+ def_id: DefId,
+ },
+}
+
+#[derive(Debug)]
+pub enum GlobalAsmOperandRef<'tcx> {
+ Const { string: String },
+ SymFn { instance: Instance<'tcx> },
+ SymStatic { def_id: DefId },
+}
+
+pub trait AsmBuilderMethods<'tcx>: BackendTypes {
+ /// Take an inline assembly expression and splat it out via LLVM
+ fn codegen_inline_asm(
+ &mut self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[InlineAsmOperandRef<'tcx, Self>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ instance: Instance<'_>,
+ dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
+ );
+}
+
+pub trait AsmMethods<'tcx> {
+ fn codegen_global_asm(
+ &self,
+ template: &[InlineAsmTemplatePiece],
+ operands: &[GlobalAsmOperandRef<'tcx>],
+ options: InlineAsmOptions,
+ line_spans: &[Span],
+ );
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
new file mode 100644
index 000000000..779bd3ea2
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -0,0 +1,161 @@
+use super::write::WriteBackendMethods;
+use super::CodegenObject;
+use crate::back::write::TargetMachineFactoryFn;
+use crate::{CodegenResults, ModuleCodegen};
+
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_metadata::EncodedMetadata;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_session::{
+ config::{self, OutputFilenames, PrintRequest},
+ cstore::MetadataLoaderDyn,
+ Session,
+};
+use rustc_span::symbol::Symbol;
+use rustc_target::abi::call::FnAbi;
+use rustc_target::spec::Target;
+
+pub use rustc_data_structures::sync::MetadataRef;
+
+use std::any::Any;
+
+pub trait BackendTypes {
+ type Value: CodegenObject;
+ type Function: CodegenObject;
+
+ type BasicBlock: Copy;
+ type Type: CodegenObject;
+ type Funclet;
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `Dbg`, `Debug`, `DebugInfo`, `DI` etc.).
+ type DIScope: Copy;
+ type DILocation: Copy;
+ type DIVariable: Copy;
+}
+
+pub trait Backend<'tcx>:
+ Sized
+ + BackendTypes
+ + HasTyCtxt<'tcx>
+ + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
+ + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
+{
+}
+
+impl<'tcx, T> Backend<'tcx> for T where
+ Self: BackendTypes
+ + HasTyCtxt<'tcx>
+ + LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>>
+ + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>>
+{
+}
+
+pub trait CodegenBackend {
+ fn init(&self, _sess: &Session) {}
+ fn print(&self, _req: PrintRequest, _sess: &Session) {}
+ fn target_features(&self, _sess: &Session, _allow_unstable: bool) -> Vec<Symbol> {
+ vec![]
+ }
+ fn print_passes(&self) {}
+ fn print_version(&self) {}
+
+ /// If this plugin provides additional builtin targets, provide the one enabled by the options here.
+ /// Be careful: this is called *before* init() is called.
+ fn target_override(&self, _opts: &config::Options) -> Option<Target> {
+ None
+ }
+
+ /// The metadata loader used to load rlib and dylib metadata.
+ ///
+ /// Alternative codegen backends may want to use different rlib or dylib formats than the
+ /// default native static archives and dynamic libraries.
+ fn metadata_loader(&self) -> Box<MetadataLoaderDyn> {
+ Box::new(crate::back::metadata::DefaultMetadataLoader)
+ }
+
+ fn provide(&self, _providers: &mut Providers) {}
+ fn provide_extern(&self, _providers: &mut ExternProviders) {}
+ fn codegen_crate<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ metadata: EncodedMetadata,
+ need_metadata_module: bool,
+ ) -> Box<dyn Any>;
+
+ /// This is called on the returned `Box<dyn Any>` from `codegen_backend`
+ ///
+ /// # Panics
+ ///
+ /// Panics when the passed `Box<dyn Any>` was not returned by `codegen_backend`.
+ fn join_codegen(
+ &self,
+ ongoing_codegen: Box<dyn Any>,
+ sess: &Session,
+ outputs: &OutputFilenames,
+ ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed>;
+
+ /// This is called on the returned `Box<dyn Any>` from `join_codegen`
+ ///
+ /// # Panics
+ ///
+ /// Panics when the passed `Box<dyn Any>` was not returned by `join_codegen`.
+ fn link(
+ &self,
+ sess: &Session,
+ codegen_results: CodegenResults,
+ outputs: &OutputFilenames,
+ ) -> Result<(), ErrorGuaranteed>;
+}
+
+pub trait ExtraBackendMethods: CodegenBackend + WriteBackendMethods + Sized + Send + Sync {
+ fn codegen_allocator<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ module_name: &str,
+ kind: AllocatorKind,
+ has_alloc_error_handler: bool,
+ ) -> Self::Module;
+ /// This generates the codegen unit and returns it along with
+ /// a `u64` giving an estimate of the unit's processing cost.
+ fn compile_codegen_unit(
+ &self,
+ tcx: TyCtxt<'_>,
+ cgu_name: Symbol,
+ ) -> (ModuleCodegen<Self::Module>, u64);
+ fn target_machine_factory(
+ &self,
+ sess: &Session,
+ opt_level: config::OptLevel,
+ target_features: &[String],
+ ) -> TargetMachineFactoryFn<Self>;
+ fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str;
+ fn tune_cpu<'b>(&self, sess: &'b Session) -> Option<&'b str>;
+
+ fn spawn_thread<F, T>(_time_trace: bool, f: F) -> std::thread::JoinHandle<T>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::spawn(f)
+ }
+
+ fn spawn_named_thread<F, T>(
+ _time_trace: bool,
+ name: String,
+ f: F,
+ ) -> std::io::Result<std::thread::JoinHandle<T>>
+ where
+ F: FnOnce() -> T,
+ F: Send + 'static,
+ T: Send + 'static,
+ {
+ std::thread::Builder::new().name(name).spawn(f)
+ }
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
new file mode 100644
index 000000000..9f49749bb
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -0,0 +1,481 @@
+use super::abi::AbiBuilderMethods;
+use super::asm::AsmBuilderMethods;
+use super::consts::ConstMethods;
+use super::coverageinfo::CoverageInfoBuilderMethods;
+use super::debuginfo::DebugInfoBuilderMethods;
+use super::intrinsic::IntrinsicCallMethods;
+use super::misc::MiscMethods;
+use super::type_::{ArgAbiMethods, BaseTypeMethods};
+use super::{HasCodegen, StaticBuilderMethods};
+
+use crate::common::{
+ AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
+};
+use crate::mir::operand::OperandRef;
+use crate::mir::place::PlaceRef;
+use crate::MemFlags;
+
+use rustc_apfloat::{ieee, Float, Round, Status};
+use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
+use rustc_target::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+pub enum OverflowOp {
+ Add,
+ Sub,
+ Mul,
+}
+
+pub trait BuilderMethods<'a, 'tcx>:
+ HasCodegen<'tcx>
+ + CoverageInfoBuilderMethods<'tcx>
+ + DebugInfoBuilderMethods
+ + ArgAbiMethods<'tcx>
+ + AbiBuilderMethods<'tcx>
+ + IntrinsicCallMethods<'tcx>
+ + AsmBuilderMethods<'tcx>
+ + StaticBuilderMethods
+ + HasParamEnv<'tcx>
+ + HasTargetSpec
+{
+ fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
+
+ fn cx(&self) -> &Self::CodegenCx;
+ fn llbb(&self) -> Self::BasicBlock;
+
+ fn set_span(&mut self, span: Span);
+
+ // FIXME(eddyb) replace uses of this with `append_sibling_block`.
+ fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
+
+ fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
+
+ fn switch_to_block(&mut self, llbb: Self::BasicBlock);
+
+ fn ret_void(&mut self);
+ fn ret(&mut self, v: Self::Value);
+ fn br(&mut self, dest: Self::BasicBlock);
+ fn cond_br(
+ &mut self,
+ cond: Self::Value,
+ then_llbb: Self::BasicBlock,
+ else_llbb: Self::BasicBlock,
+ );
+ fn switch(
+ &mut self,
+ v: Self::Value,
+ else_llbb: Self::BasicBlock,
+ cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
+ );
+ fn invoke(
+ &mut self,
+ llty: Self::Type,
+ llfn: Self::Value,
+ args: &[Self::Value],
+ then: Self::BasicBlock,
+ catch: Self::BasicBlock,
+ funclet: Option<&Self::Funclet>,
+ ) -> Self::Value;
+ fn unreachable(&mut self);
+
+ fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn neg(&mut self, v: Self::Value) -> Self::Value;
+ fn fneg(&mut self, v: Self::Value) -> Self::Value;
+ fn not(&mut self, v: Self::Value) -> Self::Value;
+
+ fn checked_binop(
+ &mut self,
+ oop: OverflowOp,
+ ty: Ty<'_>,
+ lhs: Self::Value,
+ rhs: Self::Value,
+ ) -> (Self::Value, Self::Value);
+
+ fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
+ fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
+ if let Abi::Scalar(scalar) = layout.abi {
+ self.to_immediate_scalar(val, scalar)
+ } else {
+ val
+ }
+ }
+ fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
+
+ fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+ fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+ fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
+
+ fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
+ fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
+ fn atomic_load(
+ &mut self,
+ ty: Self::Type,
+ ptr: Self::Value,
+ order: AtomicOrdering,
+ size: Size,
+ ) -> Self::Value;
+ fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
+ -> OperandRef<'tcx, Self::Value>;
+
+ /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
+ fn write_operand_repeatedly(
+ self,
+ elem: OperandRef<'tcx, Self::Value>,
+ count: u64,
+ dest: PlaceRef<'tcx, Self::Value>,
+ ) -> Self;
+
+ fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
+ fn nonnull_metadata(&mut self, load: Self::Value);
+
+ fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
+ fn store_with_flags(
+ &mut self,
+ val: Self::Value,
+ ptr: Self::Value,
+ align: Align,
+ flags: MemFlags,
+ ) -> Self::Value;
+ fn atomic_store(
+ &mut self,
+ val: Self::Value,
+ ptr: Self::Value,
+ order: AtomicOrdering,
+ size: Size,
+ );
+
+ fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
+ fn inbounds_gep(
+ &mut self,
+ ty: Self::Type,
+ ptr: Self::Value,
+ indices: &[Self::Value],
+ ) -> Self::Value;
+ fn struct_gep(&mut self, ty: Self::Type, ptr: Self::Value, idx: u64) -> Self::Value;
+
+ fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+ fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
+ fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+ fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
+ fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+
+ fn cast_float_to_int(
+ &mut self,
+ signed: bool,
+ x: Self::Value,
+ dest_ty: Self::Type,
+ ) -> Self::Value {
+ let in_ty = self.cx().val_ty(x);
+ let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
+ && self.cx().type_kind(in_ty) == TypeKind::Vector
+ {
+ (self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
+ } else {
+ (in_ty, dest_ty)
+ };
+ assert!(matches!(self.cx().type_kind(float_ty), TypeKind::Float | TypeKind::Double));
+ assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
+
+ if let Some(false) = self.cx().sess().opts.unstable_opts.saturating_float_casts {
+ return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
+ }
+
+ let try_sat_result =
+ if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) };
+ if let Some(try_sat_result) = try_sat_result {
+ return try_sat_result;
+ }
+
+ let int_width = self.cx().int_width(int_ty);
+ let float_width = self.cx().float_width(float_ty);
+ // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
+ // destination integer type after rounding towards zero. This `undef` value can cause UB in
+ // safe code (see issue #10184), so we implement a saturating conversion on top of it:
+ // Semantically, the mathematical value of the input is rounded towards zero to the next
+ // mathematical integer, and then the result is clamped into the range of the destination
+ // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
+ // the destination integer type. NaN is mapped to 0.
+ //
+ // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
+ // a value representable in int_ty.
+ // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
+ // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
+ // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
+ // representable. Note that this only works if float_ty's exponent range is sufficiently large.
+ // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
+ // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
+ // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
+ // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
+ // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
+ let int_max = |signed: bool, int_width: u64| -> u128 {
+ let shift_amount = 128 - int_width;
+ if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+ };
+ let int_min = |signed: bool, int_width: u64| -> i128 {
+ if signed { i128::MIN >> (128 - int_width) } else { 0 }
+ };
+
+ let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
+ let rounded_min =
+ ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+ assert_eq!(rounded_min.status, Status::OK);
+ let rounded_max =
+ ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+ assert!(rounded_max.value.is_finite());
+ (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+ };
+ let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
+ let rounded_min =
+ ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
+ assert_eq!(rounded_min.status, Status::OK);
+ let rounded_max =
+ ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
+ assert!(rounded_max.value.is_finite());
+ (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+ };
+ // To implement saturation, we perform the following steps:
+ //
+ // 1. Cast x to an integer with fpto[su]i. This may result in undef.
+ // 2. Compare x to f_min and f_max, and use the comparison results to select:
+ // a) int_ty::MIN if x < f_min or x is NaN
+ // b) int_ty::MAX if x > f_max
+ // c) the result of fpto[su]i otherwise
+ // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
+ //
+ // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
+ // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
+ // undef does not introduce any non-determinism either.
+ // More importantly, the above procedure correctly implements saturating conversion.
+ // Proof (sketch):
+ // If x is NaN, 0 is returned by definition.
+ // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
+ // This yields three cases to consider:
+ // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
+ // saturating conversion for inputs in that range.
+ // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
+ // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
+ // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
+ // is correct.
+ // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
+ // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
+ // QED.
+
+ let float_bits_to_llval = |bx: &mut Self, bits| {
+ let bits_llval = match float_width {
+ 32 => bx.cx().const_u32(bits as u32),
+ 64 => bx.cx().const_u64(bits as u64),
+ n => bug!("unsupported float width {}", n),
+ };
+ bx.bitcast(bits_llval, float_ty)
+ };
+ let (f_min, f_max) = match float_width {
+ 32 => compute_clamp_bounds_single(signed, int_width),
+ 64 => compute_clamp_bounds_double(signed, int_width),
+ n => bug!("unsupported float width {}", n),
+ };
+ let f_min = float_bits_to_llval(self, f_min);
+ let f_max = float_bits_to_llval(self, f_max);
+ let int_max = self.cx().const_uint_big(int_ty, int_max(signed, int_width));
+ let int_min = self.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
+ let zero = self.cx().const_uint(int_ty, 0);
+
+ // If we're working with vectors, constants must be "splatted": the constant is duplicated
+ // into each lane of the vector. The algorithm stays the same, we are just using the
+ // same constant across all lanes.
+ let maybe_splat = |bx: &mut Self, val| {
+ if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
+ bx.vector_splat(bx.vector_length(dest_ty), val)
+ } else {
+ val
+ }
+ };
+ let f_min = maybe_splat(self, f_min);
+ let f_max = maybe_splat(self, f_max);
+ let int_max = maybe_splat(self, int_max);
+ let int_min = maybe_splat(self, int_min);
+ let zero = maybe_splat(self, zero);
+
+ // Step 1 ...
+ let fptosui_result = if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
+ let less_or_nan = self.fcmp(RealPredicate::RealULT, x, f_min);
+ let greater = self.fcmp(RealPredicate::RealOGT, x, f_max);
+
+ // Step 2: We use two comparisons and two selects, with %s1 being the
+ // result:
+ // %less_or_nan = fcmp ult %x, %f_min
+ // %greater = fcmp olt %x, %f_max
+ // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
+ // %s1 = select %greater, int_ty::MAX, %s0
+ // Note that %less_or_nan uses an *unordered* comparison. This
+ // comparison is true if the operands are not comparable (i.e., if x is
+ // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
+ // x is NaN.
+ //
+ // Performance note: Unordered comparison can be lowered to a "flipped"
+ // comparison and a negation, and the negation can be merged into the
+ // select. Therefore, it not necessarily any more expensive than an
+ // ordered ("normal") comparison. Whether these optimizations will be
+ // performed is ultimately up to the backend, but at least x86 does
+ // perform them.
+ let s0 = self.select(less_or_nan, int_min, fptosui_result);
+ let s1 = self.select(greater, int_max, s0);
+
+ // Step 3: NaN replacement.
+ // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
+ // Therefore we only need to execute this step for signed integer types.
+ if signed {
+ // LLVM has no isNaN predicate, so we use (x == x) instead
+ let cmp = self.fcmp(RealPredicate::RealOEQ, x, x);
+ self.select(cmp, s1, zero)
+ } else {
+ s1
+ }
+ }
+
+ fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+ fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
+
+ fn memcpy(
+ &mut self,
+ dst: Self::Value,
+ dst_align: Align,
+ src: Self::Value,
+ src_align: Align,
+ size: Self::Value,
+ flags: MemFlags,
+ );
+ fn memmove(
+ &mut self,
+ dst: Self::Value,
+ dst_align: Align,
+ src: Self::Value,
+ src_align: Align,
+ size: Self::Value,
+ flags: MemFlags,
+ );
+ fn memset(
+ &mut self,
+ ptr: Self::Value,
+ fill_byte: Self::Value,
+ size: Self::Value,
+ align: Align,
+ flags: MemFlags,
+ );
+
+ fn select(
+ &mut self,
+ cond: Self::Value,
+ then_val: Self::Value,
+ else_val: Self::Value,
+ ) -> Self::Value;
+
+ fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
+ fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
+ fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
+ fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
+ fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
+
+ fn set_personality_fn(&mut self, personality: Self::Value);
+
+ // These are used by everyone except msvc
+ fn cleanup_landing_pad(&mut self, ty: Self::Type, pers_fn: Self::Value) -> Self::Value;
+ fn resume(&mut self, exn: Self::Value);
+
+ // These are used only by msvc
+ fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
+ fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
+ fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
+ fn catch_switch(
+ &mut self,
+ parent: Option<Self::Value>,
+ unwind: Option<Self::BasicBlock>,
+ handlers: &[Self::BasicBlock],
+ ) -> Self::Value;
+
+ fn atomic_cmpxchg(
+ &mut self,
+ dst: Self::Value,
+ cmp: Self::Value,
+ src: Self::Value,
+ order: AtomicOrdering,
+ failure_order: AtomicOrdering,
+ weak: bool,
+ ) -> Self::Value;
+ fn atomic_rmw(
+ &mut self,
+ op: AtomicRmwBinOp,
+ dst: Self::Value,
+ src: Self::Value,
+ order: AtomicOrdering,
+ ) -> Self::Value;
+ fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
+ fn set_invariant_load(&mut self, load: Self::Value);
+
+ /// Called for `StorageLive`
+ fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
+
+ /// Called for `StorageDead`
+ fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
+
+ fn instrprof_increment(
+ &mut self,
+ fn_name: Self::Value,
+ hash: Self::Value,
+ num_counters: Self::Value,
+ index: Self::Value,
+ );
+
+ fn call(
+ &mut self,
+ llty: Self::Type,
+ llfn: Self::Value,
+ args: &[Self::Value],
+ funclet: Option<&Self::Funclet>,
+ ) -> Self::Value;
+ fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
+
+ fn do_not_inline(&mut self, llret: Self::Value);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs
new file mode 100644
index 000000000..8a91d4735
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs
@@ -0,0 +1,41 @@
+use super::BackendTypes;
+use crate::mir::place::PlaceRef;
+use rustc_middle::mir::interpret::{ConstAllocation, Scalar};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{self, Size};
+
+pub trait ConstMethods<'tcx>: BackendTypes {
+ // Constant constructors
+ fn const_null(&self, t: Self::Type) -> Self::Value;
+ fn const_undef(&self, t: Self::Type) -> Self::Value;
+ fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
+ fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
+ fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
+ fn const_bool(&self, val: bool) -> Self::Value;
+ fn const_i16(&self, i: i16) -> Self::Value;
+ fn const_i32(&self, i: i32) -> Self::Value;
+ fn const_u32(&self, i: u32) -> Self::Value;
+ fn const_u64(&self, i: u64) -> Self::Value;
+ fn const_usize(&self, i: u64) -> Self::Value;
+ fn const_u8(&self, i: u8) -> Self::Value;
+ fn const_real(&self, t: Self::Type, val: f64) -> Self::Value;
+
+ fn const_str(&self, s: &str) -> (Self::Value, Self::Value);
+ fn const_struct(&self, elts: &[Self::Value], packed: bool) -> Self::Value;
+
+ fn const_to_opt_uint(&self, v: Self::Value) -> Option<u64>;
+ fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
+
+ fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value;
+
+ fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
+ fn zst_to_backend(&self, llty: Self::Type) -> Self::Value;
+ fn from_const_alloc(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ alloc: ConstAllocation<'tcx>,
+ offset: Size,
+ ) -> PlaceRef<'tcx, Self::Value>;
+
+ fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
new file mode 100644
index 000000000..e77201cf0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/coverageinfo.rs
@@ -0,0 +1,57 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::ty::Instance;
+
+pub trait CoverageInfoMethods<'tcx>: BackendTypes {
+ fn coverageinfo_finalize(&self);
+
+ /// Codegen a small function that will never be called, with one counter
+ /// that will never be incremented, that gives LLVM coverage tools a
+ /// function definition it needs in order to resolve coverage map references
+ /// to unused functions. This is necessary so unused functions will appear
+ /// as uncovered (coverage execution count `0`) in LLVM coverage reports.
+ fn define_unused_fn(&self, def_id: DefId);
+
+ /// For LLVM codegen, returns a function-specific `Value` for a global
+ /// string, to hold the function name passed to LLVM intrinsic
+ /// `instrprof.increment()`. The `Value` is only created once per instance.
+ /// Multiple invocations with the same instance return the same `Value`.
+ fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value;
+}
+
+pub trait CoverageInfoBuilderMethods<'tcx>: BackendTypes {
+ /// Returns true if the function source hash was added to the coverage map (even if it had
+ /// already been added, for this instance). Returns false *only* if `-C instrument-coverage` is
+ /// not enabled (a coverage map is not being generated).
+ fn set_function_source_hash(
+ &mut self,
+ instance: Instance<'tcx>,
+ function_source_hash: u64,
+ ) -> bool;
+
+ /// Returns true if the counter was added to the coverage map; false if `-C instrument-coverage`
+ /// is not enabled (a coverage map is not being generated).
+ fn add_coverage_counter(
+ &mut self,
+ instance: Instance<'tcx>,
+ index: CounterValueReference,
+ region: CodeRegion,
+ ) -> bool;
+
+ /// Returns true if the expression was added to the coverage map; false if
+ /// `-C instrument-coverage` is not enabled (a coverage map is not being generated).
+ fn add_coverage_counter_expression(
+ &mut self,
+ instance: Instance<'tcx>,
+ id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ region: Option<CodeRegion>,
+ ) -> bool;
+
+ /// Returns true if the region was added to the coverage map; false if `-C instrument-coverage`
+ /// is not enabled (a coverage map is not being generated).
+ fn add_coverage_unreachable(&mut self, instance: Instance<'tcx>, region: CodeRegion) -> bool;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
new file mode 100644
index 000000000..f310789d1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/debuginfo.rs
@@ -0,0 +1,79 @@
+use super::BackendTypes;
+use crate::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use rustc_middle::mir;
+use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty};
+use rustc_span::{SourceFile, Span, Symbol};
+use rustc_target::abi::call::FnAbi;
+use rustc_target::abi::Size;
+
+pub trait DebugInfoMethods<'tcx>: BackendTypes {
+ fn create_vtable_debuginfo(
+ &self,
+ ty: Ty<'tcx>,
+ trait_ref: Option<PolyExistentialTraitRef<'tcx>>,
+ vtable: Self::Value,
+ );
+
+ /// Creates the function-specific debug context.
+ ///
+ /// Returns the FunctionDebugContext for the function which holds state needed
+ /// for debug info creation, if it is enabled.
+ fn create_function_debug_context(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ llfn: Self::Function,
+ mir: &mir::Body<'tcx>,
+ ) -> Option<FunctionDebugContext<Self::DIScope, Self::DILocation>>;
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_scope_fn(
+ &self,
+ instance: Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ maybe_definition_llfn: Option<Self::Function>,
+ ) -> Self::DIScope;
+
+ fn dbg_loc(
+ &self,
+ scope: Self::DIScope,
+ inlined_at: Option<Self::DILocation>,
+ span: Span,
+ ) -> Self::DILocation;
+
+ fn extend_scope_to_file(
+ &self,
+ scope_metadata: Self::DIScope,
+ file: &SourceFile,
+ ) -> Self::DIScope;
+ fn debuginfo_finalize(&self);
+
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn create_dbg_var(
+ &self,
+ variable_name: Symbol,
+ variable_type: Ty<'tcx>,
+ scope_metadata: Self::DIScope,
+ variable_kind: VariableKind,
+ span: Span,
+ ) -> Self::DIVariable;
+}
+
+pub trait DebugInfoBuilderMethods: BackendTypes {
+ // FIXME(eddyb) find a common convention for all of the debuginfo-related
+ // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
+ fn dbg_var_addr(
+ &mut self,
+ dbg_var: Self::DIVariable,
+ dbg_loc: Self::DILocation,
+ variable_alloca: Self::Value,
+ direct_offset: Size,
+ // NB: each offset implies a deref (i.e. they're steps in a pointer chain).
+ indirect_offsets: &[Size],
+ );
+ fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation);
+ fn insert_reference_to_gdb_debug_scripts_section_global(&mut self);
+ fn set_var_name(&mut self, value: Self::Value, name: &str);
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/declare.rs b/compiler/rustc_codegen_ssa/src/traits/declare.rs
new file mode 100644
index 000000000..655afcd17
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/declare.rs
@@ -0,0 +1,21 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::mono::{Linkage, Visibility};
+use rustc_middle::ty::Instance;
+
+pub trait PreDefineMethods<'tcx>: BackendTypes {
+ fn predefine_static(
+ &self,
+ def_id: DefId,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ );
+ fn predefine_fn(
+ &self,
+ instance: Instance<'tcx>,
+ linkage: Linkage,
+ visibility: Visibility,
+ symbol_name: &str,
+ );
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
new file mode 100644
index 000000000..7755e6793
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/intrinsic.rs
@@ -0,0 +1,39 @@
+use super::BackendTypes;
+use crate::mir::operand::OperandRef;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::Span;
+use rustc_target::abi::call::FnAbi;
+
+pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
+ /// Remember to add all intrinsics here, in `compiler/rustc_typeck/src/check/mod.rs`,
+ /// and in `library/core/src/intrinsics.rs`; if you need access to any LLVM intrinsics,
+ /// add them to `compiler/rustc_codegen_llvm/src/context.rs`.
+ fn codegen_intrinsic_call(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ args: &[OperandRef<'tcx, Self::Value>],
+ llresult: Self::Value,
+ span: Span,
+ );
+
+ fn abort(&mut self);
+ fn assume(&mut self, val: Self::Value);
+ fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
+ /// Trait method used to test whether a given pointer is associated with a type identifier.
+ fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value;
+ /// Trait method used to load a function while testing if it is associated with a type
+ /// identifier.
+ fn type_checked_load(
+ &mut self,
+ llvtable: Self::Value,
+ vtable_byte_offset: u64,
+ typeid: Self::Value,
+ ) -> Self::Value;
+ /// Trait method used to inject `va_start` on the "spoofed" `VaListImpl` in
+ /// Rust defined C-variadic functions.
+ fn va_start(&mut self, val: Self::Value) -> Self::Value;
+ /// Trait method used to inject `va_end` on the "spoofed" `VaListImpl` before
+ /// Rust defined C-variadic functions return.
+ fn va_end(&mut self, val: Self::Value) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/misc.rs b/compiler/rustc_codegen_ssa/src/traits/misc.rs
new file mode 100644
index 000000000..4266e42ec
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/misc.rs
@@ -0,0 +1,26 @@
+use super::BackendTypes;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_session::Session;
+use std::cell::RefCell;
+
+pub trait MiscMethods<'tcx>: BackendTypes {
+ fn vtables(
+ &self,
+ ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Self::Value>>;
+ fn check_overflow(&self) -> bool;
+ fn get_fn(&self, instance: Instance<'tcx>) -> Self::Function;
+ fn get_fn_addr(&self, instance: Instance<'tcx>) -> Self::Value;
+ fn eh_personality(&self) -> Self::Value;
+ fn sess(&self) -> &Session;
+ fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx>;
+ fn used_statics(&self) -> &RefCell<Vec<Self::Value>>;
+ fn compiler_used_statics(&self) -> &RefCell<Vec<Self::Value>>;
+ fn set_frame_pointer_type(&self, llfn: Self::Function);
+ fn apply_target_cpu_attr(&self, llfn: Self::Function);
+ fn create_used_variable(&self);
+ fn create_compiler_used_variable(&self);
+ /// Declares the extern "C" main function for the entry point. Returns None if the symbol already exists.
+ fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function>;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs
new file mode 100644
index 000000000..782fdadbf
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs
@@ -0,0 +1,102 @@
+//! Interface of a Rust codegen backend
+//!
+//! This crate defines all the traits that have to be implemented by a codegen backend in order to
+//! use the backend-agnostic codegen code in `rustc_codegen_ssa`.
+//!
+//! The interface is designed around two backend-specific data structures, the codegen context and
+//! the builder. The codegen context is supposed to be read-only after its creation and during the
+//! actual codegen, while the builder stores the information about the function during codegen and
+//! is used to produce the instructions of the backend IR.
+//!
+//! Finally, a third `Backend` structure has to implement methods related to how codegen information
+//! is passed to the backend, especially for asynchronous compilation.
+//!
+//! The traits contain associated types that are backend-specific, such as the backend's value or
+//! basic blocks.
+
+mod abi;
+mod asm;
+mod backend;
+mod builder;
+mod consts;
+mod coverageinfo;
+mod debuginfo;
+mod declare;
+mod intrinsic;
+mod misc;
+mod statics;
+mod type_;
+mod write;
+
+pub use self::abi::AbiBuilderMethods;
+pub use self::asm::{AsmBuilderMethods, AsmMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
+pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods};
+pub use self::builder::{BuilderMethods, OverflowOp};
+pub use self::consts::ConstMethods;
+pub use self::coverageinfo::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
+pub use self::declare::PreDefineMethods;
+pub use self::intrinsic::IntrinsicCallMethods;
+pub use self::misc::MiscMethods;
+pub use self::statics::{StaticBuilderMethods, StaticMethods};
+pub use self::type_::{
+ ArgAbiMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMembershipMethods,
+ TypeMethods,
+};
+pub use self::write::{ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
+
+use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt};
+use rustc_target::spec::HasTargetSpec;
+
+use std::fmt;
+
+pub trait CodegenObject: Copy + PartialEq + fmt::Debug {}
+impl<T: Copy + PartialEq + fmt::Debug> CodegenObject for T {}
+
+pub trait CodegenMethods<'tcx>:
+ Backend<'tcx>
+ + TypeMethods<'tcx>
+ + MiscMethods<'tcx>
+ + ConstMethods<'tcx>
+ + StaticMethods
+ + CoverageInfoMethods<'tcx>
+ + DebugInfoMethods<'tcx>
+ + AsmMethods<'tcx>
+ + PreDefineMethods<'tcx>
+ + HasParamEnv<'tcx>
+ + HasTyCtxt<'tcx>
+ + HasTargetSpec
+{
+}
+
+impl<'tcx, T> CodegenMethods<'tcx> for T where
+ Self: Backend<'tcx>
+ + TypeMethods<'tcx>
+ + MiscMethods<'tcx>
+ + ConstMethods<'tcx>
+ + StaticMethods
+ + CoverageInfoMethods<'tcx>
+ + DebugInfoMethods<'tcx>
+ + AsmMethods<'tcx>
+ + PreDefineMethods<'tcx>
+ + HasParamEnv<'tcx>
+ + HasTyCtxt<'tcx>
+ + HasTargetSpec
+{
+}
+
+pub trait HasCodegen<'tcx>:
+ Backend<'tcx> + std::ops::Deref<Target = <Self as HasCodegen<'tcx>>::CodegenCx>
+{
+ type CodegenCx: CodegenMethods<'tcx>
+ + BackendTypes<
+ Value = Self::Value,
+ Function = Self::Function,
+ BasicBlock = Self::BasicBlock,
+ Type = Self::Type,
+ Funclet = Self::Funclet,
+ DIScope = Self::DIScope,
+ DILocation = Self::DILocation,
+ DIVariable = Self::DIVariable,
+ >;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/statics.rs b/compiler/rustc_codegen_ssa/src/traits/statics.rs
new file mode 100644
index 000000000..413d31bb9
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/statics.rs
@@ -0,0 +1,24 @@
+use super::BackendTypes;
+use rustc_hir::def_id::DefId;
+use rustc_target::abi::Align;
+
+pub trait StaticMethods: BackendTypes {
+ fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
+ fn codegen_static(&self, def_id: DefId, is_mutable: bool);
+
+ /// Mark the given global value as "used", to prevent the compiler and linker from potentially
+ /// removing a static variable that may otherwise appear unused.
+ fn add_used_global(&self, global: Self::Value);
+
+ /// Same as add_used_global(), but only prevent the compiler from potentially removing an
+ /// otherwise unused symbol. The linker is still permitted to drop it.
+ ///
+ /// This corresponds to the documented semantics of the `#[used]` attribute, although
+ /// on some targets (non-ELF), we may use `add_used_global` for `#[used]` statics
+ /// instead.
+ fn add_compiler_used_global(&self, global: Self::Value);
+}
+
+pub trait StaticBuilderMethods: BackendTypes {
+ fn get_static(&mut self, def_id: DefId) -> Self::Value;
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs
new file mode 100644
index 000000000..8158e8dd0
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs
@@ -0,0 +1,151 @@
+use super::misc::MiscMethods;
+use super::Backend;
+use super::HasCodegen;
+use crate::common::TypeKind;
+use crate::mir::place::PlaceRef;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::call::{ArgAbi, CastTarget, FnAbi, Reg};
+use rustc_target::abi::{AddressSpace, Integer};
+
+// This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use
+// `LayoutOf` or `HasTyCtxt`. This way, they don't have to add a constraint on it themselves.
+pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
+ fn type_i1(&self) -> Self::Type;
+ fn type_i8(&self) -> Self::Type;
+ fn type_i16(&self) -> Self::Type;
+ fn type_i32(&self) -> Self::Type;
+ fn type_i64(&self) -> Self::Type;
+ fn type_i128(&self) -> Self::Type;
+ fn type_isize(&self) -> Self::Type;
+
+ fn type_f32(&self) -> Self::Type;
+ fn type_f64(&self) -> Self::Type;
+
+ fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
+ fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
+ fn type_kind(&self, ty: Self::Type) -> TypeKind;
+ fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
+ fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type;
+ fn element_type(&self, ty: Self::Type) -> Self::Type;
+
+ /// Returns the number of elements in `self` if it is a LLVM vector type.
+ fn vector_length(&self, ty: Self::Type) -> usize;
+
+ fn float_width(&self, ty: Self::Type) -> usize;
+
+ /// Retrieves the bit width of the integer type `self`.
+ fn int_width(&self, ty: Self::Type) -> u64;
+
+ fn val_ty(&self, v: Self::Value) -> Self::Type;
+}
+
+pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
+ fn type_i8p(&self) -> Self::Type {
+ self.type_i8p_ext(AddressSpace::DATA)
+ }
+
+ fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type {
+ self.type_ptr_to_ext(self.type_i8(), address_space)
+ }
+
+ fn type_int(&self) -> Self::Type {
+ match &self.sess().target.c_int_width[..] {
+ "16" => self.type_i16(),
+ "32" => self.type_i32(),
+ "64" => self.type_i64(),
+ width => bug!("Unsupported c_int_width: {}", width),
+ }
+ }
+
+ fn type_from_integer(&self, i: Integer) -> Self::Type {
+ use Integer::*;
+ match i {
+ I8 => self.type_i8(),
+ I16 => self.type_i16(),
+ I32 => self.type_i32(),
+ I64 => self.type_i64(),
+ I128 => self.type_i128(),
+ }
+ }
+
+ fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
+ ty.needs_drop(self.tcx(), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_freeze(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all())
+ }
+
+ fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool {
+ let param_env = ty::ParamEnv::reveal_all();
+ if ty.is_sized(self.tcx().at(DUMMY_SP), param_env) {
+ return false;
+ }
+
+ let tail = self.tcx().struct_tail_erasing_lifetimes(ty, param_env);
+ match tail.kind() {
+ ty::Foreign(..) => false,
+ ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
+ _ => bug!("unexpected unsized tail: {:?}", tail),
+ }
+ }
+}
+
+impl<'tcx, T> DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {}
+
+pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
+ fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
+ fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
+ fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+ fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+ fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
+ fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type;
+ fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool;
+ fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool;
+ fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64;
+ fn scalar_pair_element_backend_type(
+ &self,
+ layout: TyAndLayout<'tcx>,
+ index: usize,
+ immediate: bool,
+ ) -> Self::Type;
+}
+
+// For backends that support CFI using type membership (i.e., testing whether a given pointer is
+// associated with a type identifier).
+pub trait TypeMembershipMethods<'tcx>: Backend<'tcx> {
+ fn set_type_metadata(&self, function: Self::Function, typeid: String);
+ fn typeid_metadata(&self, typeid: String) -> Self::Value;
+}
+
+pub trait ArgAbiMethods<'tcx>: HasCodegen<'tcx> {
+ fn store_fn_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ idx: &mut usize,
+ dst: PlaceRef<'tcx, Self::Value>,
+ );
+ fn store_arg(
+ &mut self,
+ arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ val: Self::Value,
+ dst: PlaceRef<'tcx, Self::Value>,
+ );
+ fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Self::Type;
+}
+
+pub trait TypeMethods<'tcx>:
+ DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> + TypeMembershipMethods<'tcx>
+{
+}
+
+impl<'tcx, T> TypeMethods<'tcx> for T where
+ Self: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> + TypeMembershipMethods<'tcx>
+{
+}
diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs
new file mode 100644
index 000000000..e54ec34f1
--- /dev/null
+++ b/compiler/rustc_codegen_ssa/src/traits/write.rs
@@ -0,0 +1,68 @@
+use crate::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use crate::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
+use crate::{CompiledModule, ModuleCodegen};
+
+use rustc_errors::{FatalError, Handler};
+use rustc_middle::dep_graph::WorkProduct;
+
+pub trait WriteBackendMethods: 'static + Sized + Clone {
+ type Module: Send + Sync;
+ type TargetMachine;
+ type ModuleBuffer: ModuleBufferMethods;
+ type Context: ?Sized;
+ type ThinData: Send + Sync;
+ type ThinBuffer: ThinBufferMethods;
+
+ /// Merge all modules into main_module and returning it
+ fn run_link(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ modules: Vec<ModuleCodegen<Self::Module>>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError>;
+ /// Performs fat LTO by merging all modules into a single one and returning it
+ /// for further optimization.
+ fn run_fat_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<FatLTOInput<Self>>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<LtoModuleCodegen<Self>, FatalError>;
+ /// Performs thin LTO by performing necessary global analysis and returning two
+ /// lists, one of the modules that need optimization and another for modules that
+ /// can simply be copied over from the incr. comp. cache.
+ fn run_thin_lto(
+ cgcx: &CodegenContext<Self>,
+ modules: Vec<(String, Self::ThinBuffer)>,
+ cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+ ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError>;
+ fn print_pass_timings(&self);
+ unsafe fn optimize(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: &ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<(), FatalError>;
+ fn optimize_fat(
+ cgcx: &CodegenContext<Self>,
+ llmod: &mut ModuleCodegen<Self::Module>,
+ ) -> Result<(), FatalError>;
+ unsafe fn optimize_thin(
+ cgcx: &CodegenContext<Self>,
+ thin: ThinModule<Self>,
+ ) -> Result<ModuleCodegen<Self::Module>, FatalError>;
+ unsafe fn codegen(
+ cgcx: &CodegenContext<Self>,
+ diag_handler: &Handler,
+ module: ModuleCodegen<Self::Module>,
+ config: &ModuleConfig,
+ ) -> Result<CompiledModule, FatalError>;
+ fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer);
+ fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer);
+}
+
+pub trait ThinBufferMethods: Send + Sync {
+ fn data(&self) -> &[u8];
+}
+
+pub trait ModuleBufferMethods: Send + Sync {
+ fn data(&self) -> &[u8];
+}
diff --git a/compiler/rustc_const_eval/Cargo.toml b/compiler/rustc_const_eval/Cargo.toml
new file mode 100644
index 000000000..32e8233a0
--- /dev/null
+++ b/compiler/rustc_const_eval/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "rustc_const_eval"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+tracing = "0.1"
+rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_session = { path = "../rustc_session" }
+rustc_target = { path = "../rustc_target" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_span = { path = "../rustc_span" }
+rustc_type_ir = { path = "../rustc_type_ir" }
diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs
new file mode 100644
index 000000000..322bfd5ce
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/error.rs
@@ -0,0 +1,252 @@
+use std::error::Error;
+use std::fmt;
+
+use rustc_errors::Diagnostic;
+use rustc_hir as hir;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::ty::{layout::LayoutError, query::TyCtxtAt, ConstInt};
+use rustc_span::{Span, Symbol};
+
+use super::InterpCx;
+use crate::interpret::{
+ struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType, UnsupportedOpInfo,
+};
+
+/// The CTFE machine has some custom error kinds.
+#[derive(Clone, Debug)]
+pub enum ConstEvalErrKind {
+ NeedsRfc(String),
+ ConstAccessesStatic,
+ ModifiedGlobal,
+ AssertFailure(AssertKind<ConstInt>),
+ Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
+ Abort(String),
+}
+
+impl MachineStopType for ConstEvalErrKind {
+ fn is_hard_err(&self) -> bool {
+ matches!(self, Self::Panic { .. })
+ }
+}
+
+// The errors become `MachineStop` with plain strings when being raised.
+// `ConstEvalErr` (in `librustc_middle/mir/interpret/error.rs`) knows to
+// handle these.
+impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
+ fn into(self) -> InterpErrorInfo<'tcx> {
+ err_machine_stop!(self).into()
+ }
+}
+
+impl fmt::Display for ConstEvalErrKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use self::ConstEvalErrKind::*;
+ match *self {
+ NeedsRfc(ref msg) => {
+ write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
+ }
+ ConstAccessesStatic => write!(f, "constant accesses static"),
+ ModifiedGlobal => {
+ write!(f, "modifying a static's initial value from another static's initializer")
+ }
+ AssertFailure(ref msg) => write!(f, "{:?}", msg),
+ Panic { msg, line, col, file } => {
+ write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col)
+ }
+ Abort(ref msg) => write!(f, "{}", msg),
+ }
+ }
+}
+
+impl Error for ConstEvalErrKind {}
+
+/// When const-evaluation errors, this type is constructed with the resulting information,
+/// and then used to emit the error as a lint or hard error.
+#[derive(Debug)]
+pub struct ConstEvalErr<'tcx> {
+ pub span: Span,
+ pub error: InterpError<'tcx>,
+ pub stacktrace: Vec<FrameInfo<'tcx>>,
+}
+
+impl<'tcx> ConstEvalErr<'tcx> {
+ /// Turn an interpreter error into something to report to the user.
+ /// As a side-effect, if RUSTC_CTFE_BACKTRACE is set, this prints the backtrace.
+ /// Should be called only if the error is actually going to to be reported!
+ pub fn new<'mir, M: Machine<'mir, 'tcx>>(
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ error: InterpErrorInfo<'tcx>,
+ span: Option<Span>,
+ ) -> ConstEvalErr<'tcx>
+ where
+ 'tcx: 'mir,
+ {
+ error.print_backtrace();
+ let mut stacktrace = ecx.generate_stacktrace();
+ // Filter out `requires_caller_location` frames.
+ stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*ecx.tcx));
+ // If `span` is missing, use topmost remaining frame, or else the "root" span from `ecx.tcx`.
+ let span = span.or_else(|| stacktrace.first().map(|f| f.span)).unwrap_or(ecx.tcx.span);
+ ConstEvalErr { error: error.into_kind(), stacktrace, span }
+ }
+
+ pub fn struct_error(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ decorate: impl FnOnce(&mut Diagnostic),
+ ) -> ErrorHandled {
+ self.struct_generic(tcx, message, decorate, None)
+ }
+
+ pub fn report_as_error(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
+ self.struct_error(tcx, message, |_| {})
+ }
+
+ pub fn report_as_lint(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ lint_root: hir::HirId,
+ span: Option<Span>,
+ ) -> ErrorHandled {
+ self.struct_generic(
+ tcx,
+ message,
+ |lint: &mut Diagnostic| {
+ // Apply the span.
+ if let Some(span) = span {
+ let primary_spans = lint.span.primary_spans().to_vec();
+ // point at the actual error as the primary span
+ lint.replace_span_with(span);
+ // point to the `const` statement as a secondary span
+ // they don't have any label
+ for sp in primary_spans {
+ if sp != span {
+ lint.span_label(sp, "");
+ }
+ }
+ }
+ },
+ Some(lint_root),
+ )
+ }
+
+ /// Create a diagnostic for this const eval error.
+ ///
+ /// Sets the message passed in via `message` and adds span labels with detailed error
+ /// information before handing control back to `decorate` to do any final annotations,
+ /// after which the diagnostic is emitted.
+ ///
+ /// If `lint_root.is_some()` report it as a lint, else report it as a hard error.
+ /// (Except that for some errors, we ignore all that -- see `must_error` below.)
+ #[instrument(skip(self, tcx, decorate, lint_root), level = "debug")]
+ fn struct_generic(
+ &self,
+ tcx: TyCtxtAt<'tcx>,
+ message: &str,
+ decorate: impl FnOnce(&mut Diagnostic),
+ lint_root: Option<hir::HirId>,
+ ) -> ErrorHandled {
+ let finish = |err: &mut Diagnostic, span_msg: Option<String>| {
+ trace!("reporting const eval failure at {:?}", self.span);
+ if let Some(span_msg) = span_msg {
+ err.span_label(self.span, span_msg);
+ }
+ // Add some more context for select error types.
+ match self.error {
+ InterpError::Unsupported(
+ UnsupportedOpInfo::ReadPointerAsBytes
+ | UnsupportedOpInfo::PartialPointerOverwrite(_)
+ ) => {
+ err.help("this code performed an operation that depends on the underlying bytes representing a pointer");
+ err.help("the absolute address of a pointer is not known at compile-time, so such operations are not supported");
+ }
+ _ => {}
+ }
+ // Add spans for the stacktrace. Don't print a single-line backtrace though.
+ if self.stacktrace.len() > 1 {
+ // Helper closure to print duplicated lines.
+ let mut flush_last_line = |last_frame, times| {
+ if let Some((line, span)) = last_frame {
+ err.span_label(span, &line);
+ // Don't print [... additional calls ...] if the number of lines is small
+ if times < 3 {
+ for _ in 0..times {
+ err.span_label(span, &line);
+ }
+ } else {
+ err.span_label(
+ span,
+ format!("[... {} additional calls {} ...]", times, &line),
+ );
+ }
+ }
+ };
+
+ let mut last_frame = None;
+ let mut times = 0;
+ for frame_info in &self.stacktrace {
+ let frame = (frame_info.to_string(), frame_info.span);
+ if last_frame.as_ref() == Some(&frame) {
+ times += 1;
+ } else {
+ flush_last_line(last_frame, times);
+ last_frame = Some(frame);
+ times = 0;
+ }
+ }
+ flush_last_line(last_frame, times);
+ }
+ // Let the caller attach any additional information it wants.
+ decorate(err);
+ };
+
+ debug!("self.error: {:?}", self.error);
+ // Special handling for certain errors
+ match &self.error {
+ // Don't emit a new diagnostic for these errors
+ err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
+ return ErrorHandled::TooGeneric;
+ }
+ err_inval!(AlreadyReported(error_reported)) => {
+ return ErrorHandled::Reported(*error_reported);
+ }
+ err_inval!(Layout(LayoutError::SizeOverflow(_))) => {
+ // We must *always* hard error on these, even if the caller wants just a lint.
+ // The `message` makes little sense here, this is a more serious error than the
+ // caller thinks anyway.
+ // See <https://github.com/rust-lang/rust/pull/63152>.
+ let mut err = struct_error(tcx, &self.error.to_string());
+ finish(&mut err, None);
+ return ErrorHandled::Reported(err.emit());
+ }
+ _ => {}
+ };
+
+ let err_msg = self.error.to_string();
+
+ // Regular case - emit a lint.
+ if let Some(lint_root) = lint_root {
+ // Report as lint.
+ let hir_id =
+ self.stacktrace.iter().rev().find_map(|frame| frame.lint_root).unwrap_or(lint_root);
+ tcx.struct_span_lint_hir(
+ rustc_session::lint::builtin::CONST_ERR,
+ hir_id,
+ tcx.span,
+ |lint| {
+ let mut lint = lint.build(message);
+ finish(&mut lint, Some(err_msg));
+ lint.emit();
+ },
+ );
+ ErrorHandled::Linted
+ } else {
+ // Report as hard error.
+ let mut err = struct_error(tcx, message);
+ finish(&mut err, Some(err_msg));
+ ErrorHandled::Reported(err.emit())
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
new file mode 100644
index 000000000..975fb4b22
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -0,0 +1,395 @@
+use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr};
+use crate::interpret::eval_nullary_intrinsic;
+use crate::interpret::{
+ intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
+ Immediate, InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking,
+ ScalarMaybeUninit, StackPopCleanup, InterpError,
+};
+
+use rustc_hir::def::DefKind;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::mir::pretty::display_allocation;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, subst::Subst, TyCtxt};
+use rustc_span::source_map::Span;
+use rustc_target::abi::{self, Abi};
+use std::borrow::Cow;
+use std::convert::TryInto;
+
+const NOTE_ON_UNDEFINED_BEHAVIOR_ERROR: &str = "The rules on what exactly is undefined behavior aren't clear, \
+ so this check might be overzealous. Please open an issue on the rustc \
+ repository if you believe it should not be considered undefined behavior.";
+
+// Returns a pointer to where the result lives
+fn eval_body_using_ecx<'mir, 'tcx>(
+ ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
+ cid: GlobalId<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
+ debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env);
+ let tcx = *ecx.tcx;
+ assert!(
+ cid.promoted.is_some()
+ || matches!(
+ ecx.tcx.def_kind(cid.instance.def_id()),
+ DefKind::Const
+ | DefKind::Static(_)
+ | DefKind::ConstParam
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::AssocConst
+ ),
+ "Unexpected DefKind: {:?}",
+ ecx.tcx.def_kind(cid.instance.def_id())
+ );
+ let layout = ecx.layout_of(body.bound_return_ty().subst(tcx, cid.instance.substs))?;
+ assert!(!layout.is_unsized());
+ let ret = ecx.allocate(layout, MemoryKind::Stack)?;
+
+ trace!(
+ "eval_body_using_ecx: pushing stack frame for global: {}{}",
+ with_no_trimmed_paths!(ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id()))),
+ cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p))
+ );
+
+ ecx.push_stack_frame(
+ cid.instance,
+ body,
+ &ret.into(),
+ StackPopCleanup::Root { cleanup: false },
+ )?;
+
+ // The main interpreter loop.
+ ecx.run()?;
+
+ // Intern the result
+ let intern_kind = if cid.promoted.is_some() {
+ InternKind::Promoted
+ } else {
+ match tcx.static_mutability(cid.instance.def_id()) {
+ Some(m) => InternKind::Static(m),
+ None => InternKind::Constant,
+ }
+ };
+ intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
+
+ debug!("eval_body_using_ecx done: {:?}", *ret);
+ Ok(ret)
+}
+
+/// The `InterpCx` is only meant to be used to do field and index projections into constants for
+/// `simd_shuffle` and const patterns in match arms.
+///
+/// The function containing the `match` that is currently being analyzed may have generic bounds
+/// that inform us about the generic bounds of the constant. E.g., using an associated constant
+/// of a function's generic parameter will require knowledge about the bounds on the generic
+/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
+pub(super) fn mk_eval_cx<'mir, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ root_span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ can_access_statics: bool,
+) -> CompileTimeEvalContext<'mir, 'tcx> {
+ debug!("mk_eval_cx: {:?}", param_env);
+ InterpCx::new(
+ tcx,
+ root_span,
+ param_env,
+ CompileTimeInterpreter::new(tcx.const_eval_limit(), can_access_statics),
+ )
+}
+
+/// This function converts an interpreter value into a constant that is meant for use in the
+/// type system.
+#[instrument(skip(ecx), level = "debug")]
+pub(super) fn op_to_const<'tcx>(
+ ecx: &CompileTimeEvalContext<'_, 'tcx>,
+ op: &OpTy<'tcx>,
+) -> ConstValue<'tcx> {
+ // We do not have value optimizations for everything.
+ // Only scalars and slices, since they are very common.
+ // Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result
+ // from scalar unions that are initialized with one of their zero sized variants. We could
+ // instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all
+ // the usual cases of extracting e.g. a `usize`, without there being a real use case for the
+ // `Undef` situation.
+ let try_as_immediate = match op.layout.abi {
+ Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
+ Abi::ScalarPair(..) => match op.layout.ty.kind() {
+ ty::Ref(_, inner, _) => match *inner.kind() {
+ ty::Slice(elem) => elem == ecx.tcx.types.u8,
+ ty::Str => true,
+ _ => false,
+ },
+ _ => false,
+ },
+ _ => false,
+ };
+ let immediate = if try_as_immediate {
+ Err(ecx.read_immediate(op).expect("normalization works on validated constants"))
+ } else {
+ // It is guaranteed that any non-slice scalar pair is actually ByRef here.
+ // When we come back from raw const eval, we are always by-ref. The only way our op here is
+ // by-val is if we are in destructure_mir_constant, i.e., if this is (a field of) something that we
+ // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
+ // structs containing such.
+ op.try_as_mplace()
+ };
+
+ debug!(?immediate);
+
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let to_const_value = |mplace: &MPlaceTy<'_>| {
+ debug!("to_const_value(mplace: {:?})", mplace);
+ match mplace.ptr.into_parts() {
+ (Some(alloc_id), offset) => {
+ let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
+ ConstValue::ByRef { alloc, offset }
+ }
+ (None, offset) => {
+ assert!(mplace.layout.is_zst());
+ assert_eq!(
+ offset.bytes() % mplace.layout.align.abi.bytes(),
+ 0,
+ "this MPlaceTy must come from a validated constant, thus we can assume the \
+ alignment is correct",
+ );
+ ConstValue::ZeroSized
+ }
+ }
+ };
+ match immediate {
+ Ok(ref mplace) => to_const_value(mplace),
+ // see comment on `let try_as_immediate` above
+ Err(imm) => match *imm {
+ _ if imm.layout.is_zst() => ConstValue::ZeroSized,
+ Immediate::Scalar(x) => match x {
+ ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
+ ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
+ },
+ Immediate::ScalarPair(a, b) => {
+ debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let (data, start) = match a.to_pointer(ecx).unwrap().into_parts() {
+ (Some(alloc_id), offset) => {
+ (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
+ }
+ (None, _offset) => (
+ ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
+ b"" as &[u8],
+ )),
+ 0,
+ ),
+ };
+ let len = b.to_machine_usize(ecx).unwrap();
+ let start = start.try_into().unwrap();
+ let len: usize = len.try_into().unwrap();
+ ConstValue::Slice { data, start, end: start + len }
+ }
+ Immediate::Uninit => to_const_value(&op.assert_mem_place()),
+ },
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn turn_into_const_value<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ constant: ConstAlloc<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ConstValue<'tcx> {
+ let cid = key.value;
+ let def_id = cid.instance.def.def_id();
+ let is_static = tcx.is_static(def_id);
+ let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env, is_static);
+
+ let mplace = ecx.raw_const_to_mplace(constant).expect(
+ "can only fail if layout computation failed, \
+ which should have given a good error before ever invoking this function",
+ );
+ assert!(
+ !is_static || cid.promoted.is_some(),
+ "the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead"
+ );
+
+ // Turn this into a proper constant.
+ let const_val = op_to_const(&ecx, &mplace.into());
+ debug!(?const_val);
+
+ const_val
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_to_const_value_raw_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
+ assert!(key.param_env.is_const());
+ // see comment in eval_to_allocation_raw_provider for what we're doing here
+ if key.param_env.reveal() == Reveal::All {
+ let mut key = key;
+ key.param_env = key.param_env.with_user_facing();
+ match tcx.eval_to_const_value_raw(key) {
+ // try again with reveal all as requested
+ Err(ErrorHandled::TooGeneric) => {}
+ // deduplicate calls
+ other => return other,
+ }
+ }
+
+ // We call `const_eval` for zero arg intrinsics, too, in order to cache their value.
+ // Catch such calls and evaluate them instead of trying to load a constant's MIR.
+ if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
+ let ty = key.value.instance.ty(tcx, key.param_env);
+ let ty::FnDef(_, substs) = ty.kind() else {
+ bug!("intrinsic with type {:?}", ty);
+ };
+ return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
+ let span = tcx.def_span(def_id);
+ let error = ConstEvalErr { error: error.into_kind(), stacktrace: vec![], span };
+ error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
+ });
+ }
+
+ tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key))
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn eval_to_allocation_raw_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
+) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
+ assert!(key.param_env.is_const());
+ // Because the constant is computed twice (once per value of `Reveal`), we are at risk of
+ // reporting the same error twice here. To resolve this, we check whether we can evaluate the
+ // constant in the more restrictive `Reveal::UserFacing`, which most likely already was
+ // computed. For a large percentage of constants that will already have succeeded. Only
+ // associated constants of generic functions will fail due to not enough monomorphization
+ // information being available.
+
+ // In case we fail in the `UserFacing` variant, we just do the real computation.
+ if key.param_env.reveal() == Reveal::All {
+ let mut key = key;
+ key.param_env = key.param_env.with_user_facing();
+ match tcx.eval_to_allocation_raw(key) {
+ // try again with reveal all as requested
+ Err(ErrorHandled::TooGeneric) => {}
+ // deduplicate calls
+ other => return other,
+ }
+ }
+ if cfg!(debug_assertions) {
+ // Make sure we format the instance even if we do not print it.
+ // This serves as a regression test against an ICE on printing.
+ // The next two lines concatenated contain some discussion:
+ // https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/
+ // subject/anon_const_instance_printing/near/135980032
+ let instance = with_no_trimmed_paths!(key.value.instance.to_string());
+ trace!("const eval: {:?} ({})", key, instance);
+ }
+
+ let cid = key.value;
+ let def = cid.instance.def.with_opt_param();
+ let is_static = tcx.is_static(def.did);
+
+ let mut ecx = InterpCx::new(
+ tcx,
+ tcx.def_span(def.did),
+ key.param_env,
+ // Statics (and promoteds inside statics) may access other statics, because unlike consts
+ // they do not have to behave "as if" they were evaluated at runtime.
+ CompileTimeInterpreter::new(tcx.const_eval_limit(), /*can_access_statics:*/ is_static),
+ );
+
+ let res = ecx.load_mir(cid.instance.def, cid.promoted);
+ match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body)) {
+ Err(error) => {
+ let err = ConstEvalErr::new(&ecx, error, None);
+ // Some CTFE errors raise just a lint, not a hard error; see
+ // <https://github.com/rust-lang/rust/issues/71800>.
+ let is_hard_err = if let Some(def) = def.as_local() {
+ // (Associated) consts only emit a lint, since they might be unused.
+ !matches!(tcx.def_kind(def.did.to_def_id()), DefKind::Const | DefKind::AssocConst)
+ // check if the inner InterpError is hard
+ || err.error.is_hard_err()
+ } else {
+ // use of broken constant from other crate: always an error
+ true
+ };
+
+ if is_hard_err {
+ let msg = if is_static {
+ Cow::from("could not evaluate static initializer")
+ } else {
+ // If the current item has generics, we'd like to enrich the message with the
+ // instance and its substs: to show the actual compile-time values, in addition to
+ // the expression, leading to the const eval error.
+ let instance = &key.value.instance;
+ if !instance.substs.is_empty() {
+ let instance = with_no_trimmed_paths!(instance.to_string());
+ let msg = format!("evaluation of `{}` failed", instance);
+ Cow::from(msg)
+ } else {
+ Cow::from("evaluation of constant value failed")
+ }
+ };
+
+ Err(err.report_as_error(ecx.tcx.at(err.span), &msg))
+ } else {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.as_local().unwrap().did);
+ Err(err.report_as_lint(
+ tcx.at(tcx.def_span(def.did)),
+ "any use of this value will cause an error",
+ hir_id,
+ Some(err.span),
+ ))
+ }
+ }
+ Ok(mplace) => {
+ // Since evaluation had no errors, validate the resulting constant.
+ // This is a separate `try` block to provide more targeted error reporting.
+ let validation = try {
+ let mut ref_tracking = RefTracking::new(mplace);
+ let mut inner = false;
+ while let Some((mplace, path)) = ref_tracking.todo.pop() {
+ let mode = match tcx.static_mutability(cid.instance.def_id()) {
+ Some(_) if cid.promoted.is_some() => {
+ // Promoteds in statics are allowed to point to statics.
+ CtfeValidationMode::Const { inner, allow_static_ptrs: true }
+ }
+ Some(_) => CtfeValidationMode::Regular, // a `static`
+ None => CtfeValidationMode::Const { inner, allow_static_ptrs: false },
+ };
+ ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
+ inner = true;
+ }
+ };
+ let alloc_id = mplace.ptr.provenance.unwrap();
+ if let Err(error) = validation {
+ // Validation failed, report an error. This is always a hard error.
+ let err = ConstEvalErr::new(&ecx, error, None);
+ Err(err.struct_error(
+ ecx.tcx,
+ "it is undefined behavior to use this value",
+ |diag| {
+ if matches!(err.error, InterpError::UndefinedBehavior(_)) {
+ diag.note(NOTE_ON_UNDEFINED_BEHAVIOR_ERROR);
+ }
+ diag.note(&format!(
+ "the raw bytes of the constant ({}",
+ display_allocation(
+ *ecx.tcx,
+ ecx.tcx.global_alloc(alloc_id).unwrap_memory().inner()
+ )
+ ));
+ },
+ ))
+ } else {
+ // Convert to raw constant
+ Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
new file mode 100644
index 000000000..f1674d04f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs
@@ -0,0 +1,82 @@
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{DefIdTree, TyCtxt};
+use rustc_span::symbol::Symbol;
+
+/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
+pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
+ if tcx.is_const_fn_raw(def_id) {
+ let const_stab = tcx.lookup_const_stability(def_id)?;
+ if const_stab.is_const_unstable() { Some(const_stab.feature) } else { None }
+ } else {
+ None
+ }
+}
+
+pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ let parent_id = tcx.local_parent(def_id);
+ tcx.def_kind(parent_id) == DefKind::Impl && tcx.constness(parent_id) == hir::Constness::Const
+}
+
+/// Checks whether an item is considered to be `const`. If it is a constructor, it is const. If
+/// it is a trait impl/function, return if it has a `const` modifier. If it is an intrinsic,
+/// report whether said intrinsic has a `rustc_const_{un,}stable` attribute. Otherwise, return
+/// `Constness::NotConst`.
+fn constness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::Constness {
+ let def_id = def_id.expect_local();
+ let node = tcx.hir().get_by_def_id(def_id);
+
+ match node {
+ hir::Node::Ctor(_) => hir::Constness::Const,
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.constness,
+ hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) => {
+ // Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
+ // foreign items cannot be evaluated at compile-time.
+ let is_const = if tcx.is_intrinsic(def_id) {
+ tcx.lookup_const_stability(def_id).is_some()
+ } else {
+ false
+ };
+ if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+ }
+ _ => {
+ if let Some(fn_kind) = node.fn_kind() {
+ if fn_kind.constness() == hir::Constness::Const {
+ return hir::Constness::Const;
+ }
+
+ // If the function itself is not annotated with `const`, it may still be a `const fn`
+ // if it resides in a const trait impl.
+ let is_const = is_parent_const_impl_raw(tcx, def_id);
+ if is_const { hir::Constness::Const } else { hir::Constness::NotConst }
+ } else {
+ hir::Constness::NotConst
+ }
+ }
+ }
+}
+
+fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.is_const_fn(def_id)
+ && match tcx.lookup_const_stability(def_id) {
+ Some(stab) => {
+ if cfg!(debug_assertions) && stab.promotable {
+ let sig = tcx.fn_sig(def_id);
+ assert_eq!(
+ sig.unsafety(),
+ hir::Unsafety::Normal,
+ "don't mark const unsafe fns as promotable",
+ // https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
+ );
+ }
+ stab.promotable
+ }
+ None => false,
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { constness, is_promotable_const_fn, ..*providers };
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs
new file mode 100644
index 000000000..fc2e6652a
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/machine.rs
@@ -0,0 +1,527 @@
+use rustc_hir::def::DefKind;
+use rustc_middle::mir;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
+use std::hash::Hash;
+
+use rustc_data_structures::fx::FxHashMap;
+use std::fmt;
+
+use rustc_ast::Mutability;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::AssertMessage;
+use rustc_session::Limit;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Align, Size};
+use rustc_target::spec::abi::Abi as CallAbi;
+
+use crate::interpret::{
+ self, compile_time_machine, AllocId, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
+ OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind,
+};
+
+use super::error::*;
+
+impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
+ /// "Intercept" a function call to a panic-related function
+ /// because we have something special to do for it.
+ /// If this returns successfully (`Ok`), the function should just be evaluated normally.
+ fn hook_special_const_fn(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx>],
+ ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
+ // All `#[rustc_do_not_const_check]` functions should be hooked here.
+ let def_id = instance.def_id();
+
+ if Some(def_id) == self.tcx.lang_items().const_eval_select() {
+ // redirect to const_eval_select_ct
+ if let Some(const_eval_select) = self.tcx.lang_items().const_eval_select_ct() {
+ return Ok(Some(
+ ty::Instance::resolve(
+ *self.tcx,
+ ty::ParamEnv::reveal_all(),
+ const_eval_select,
+ instance.substs,
+ )
+ .unwrap()
+ .unwrap(),
+ ));
+ }
+ } else if Some(def_id) == self.tcx.lang_items().panic_display()
+ || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
+ {
+ // &str or &&str
+ assert!(args.len() == 1);
+
+ let mut msg_place = self.deref_operand(&args[0])?;
+ while msg_place.layout.ty.is_ref() {
+ msg_place = self.deref_operand(&msg_place.into())?;
+ }
+
+ let msg = Symbol::intern(self.read_str(&msg_place)?);
+ let span = self.find_closest_untracked_caller_location();
+ let (file, line, col) = self.location_triple_for_span(span);
+ return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
+ } else if Some(def_id) == self.tcx.lang_items().panic_fmt() {
+ // For panic_fmt, call const_panic_fmt instead.
+ if let Some(const_panic_fmt) = self.tcx.lang_items().const_panic_fmt() {
+ return Ok(Some(
+ ty::Instance::resolve(
+ *self.tcx,
+ ty::ParamEnv::reveal_all(),
+ const_panic_fmt,
+ self.tcx.intern_substs(&[]),
+ )
+ .unwrap()
+ .unwrap(),
+ ));
+ }
+ }
+ Ok(None)
+ }
+}
+
+/// Extra machine state for CTFE, and the Machine instance
+pub struct CompileTimeInterpreter<'mir, 'tcx> {
+ /// For now, the number of terminators that can be evaluated before we throw a resource
+ /// exhaustion error.
+ ///
+ /// Setting this to `0` disables the limit and allows the interpreter to run forever.
+ pub steps_remaining: usize,
+
+ /// The virtual call stack.
+ pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
+
+ /// We need to make sure consts never point to anything mutable, even recursively. That is
+ /// relied on for pattern matching on consts with references.
+ /// To achieve this, two pieces have to work together:
+ /// * Interning makes everything outside of statics immutable.
+ /// * Pointers to allocations inside of statics can never leak outside, to a non-static global.
+ /// This boolean here controls the second part.
+ pub(super) can_access_statics: bool,
+}
+
+impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
+ pub(crate) fn new(const_eval_limit: Limit, can_access_statics: bool) -> Self {
+ CompileTimeInterpreter {
+ steps_remaining: const_eval_limit.0,
+ stack: Vec::new(),
+ can_access_statics,
+ }
+ }
+}
+
+impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
+ #[inline(always)]
+ fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ {
+ FxHashMap::contains_key(self, k)
+ }
+
+ #[inline(always)]
+ fn insert(&mut self, k: K, v: V) -> Option<V> {
+ FxHashMap::insert(self, k, v)
+ }
+
+ #[inline(always)]
+ fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+ where
+ K: Borrow<Q>,
+ {
+ FxHashMap::remove(self, k)
+ }
+
+ #[inline(always)]
+ fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
+ self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
+ }
+
+ #[inline(always)]
+ fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
+ match self.get(&k) {
+ Some(v) => Ok(v),
+ None => {
+ vacant()?;
+ bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
+ }
+ }
+ }
+
+ #[inline(always)]
+ fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
+ match self.entry(k) {
+ Entry::Occupied(e) => Ok(e.into_mut()),
+ Entry::Vacant(e) => {
+ let v = vacant()?;
+ Ok(e.insert(v))
+ }
+ }
+ }
+}
+
+pub(crate) type CompileTimeEvalContext<'mir, 'tcx> =
+ InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum MemoryKind {
+ Heap,
+}
+
+impl fmt::Display for MemoryKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MemoryKind::Heap => write!(f, "heap allocation"),
+ }
+ }
+}
+
+impl interpret::MayLeak for MemoryKind {
+ #[inline(always)]
+ fn may_leak(self) -> bool {
+ match self {
+ MemoryKind::Heap => false,
+ }
+ }
+}
+
+impl interpret::MayLeak for ! {
+ #[inline(always)]
+ fn may_leak(self) -> bool {
+ // `self` is uninhabited
+ self
+ }
+}
+
+impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
+ fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
+ Ok(match (a, b) {
+ // Comparisons between integers are always known.
+ (Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
+ // Equality with integers can never be known for sure.
+ (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false,
+ // FIXME: return `true` for when both sides are the same pointer, *except* that
+ // some things (like functions and vtables) do not have stable addresses
+ // so we need to be careful around them (see e.g. #73722).
+ (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+ })
+ }
+
+ fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
+ Ok(match (a, b) {
+ // Comparisons between integers are always known.
+ (Scalar::Int(_), Scalar::Int(_)) => a != b,
+ // Comparisons of abstract pointers with null pointers are known if the pointer
+ // is in bounds, because if they are in bounds, the pointer can't be null.
+ // Inequality with integers other than null can never be known for sure.
+ (Scalar::Int(int), ptr @ Scalar::Ptr(..))
+ | (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => {
+ int.is_null() && !self.scalar_may_be_null(ptr)?
+ }
+ // FIXME: return `true` for at least some comparisons where we can reliably
+ // determine the result of runtime inequality tests at compile-time.
+ // Examples include comparison of addresses in different static items.
+ (Scalar::Ptr(..), Scalar::Ptr(..)) => false,
+ })
+ }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
+ compile_time_machine!(<'mir, 'tcx>);
+
+ type MemoryKind = MemoryKind;
+
+ const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
+
+ fn load_mir(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ instance: ty::InstanceDef<'tcx>,
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+ match instance {
+ ty::InstanceDef::Item(def) => {
+ if ecx.tcx.is_ctfe_mir_available(def.did) {
+ Ok(ecx.tcx.mir_for_ctfe_opt_const_arg(def))
+ } else if ecx.tcx.def_kind(def.did) == DefKind::AssocConst {
+ let guar = ecx.tcx.sess.delay_span_bug(
+ rustc_span::DUMMY_SP,
+ "This is likely a const item that is missing from its impl",
+ );
+ throw_inval!(AlreadyReported(guar));
+ } else {
+ let path = ecx.tcx.def_path_str(def.did);
+ Err(ConstEvalErrKind::NeedsRfc(format!("calling extern function `{}`", path))
+ .into())
+ }
+ }
+ _ => Ok(ecx.tcx.instance_mir(instance)),
+ }
+ }
+
+ fn find_mir_or_eval_fn(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ _abi: CallAbi,
+ args: &[OpTy<'tcx>],
+ _dest: &PlaceTy<'tcx>,
+ _ret: Option<mir::BasicBlock>,
+ _unwind: StackPopUnwind, // unwinding is not supported in consts
+ ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
+ debug!("find_mir_or_eval_fn: {:?}", instance);
+
+ // Only check non-glue functions
+ if let ty::InstanceDef::Item(def) = instance.def {
+ // Execution might have wandered off into other crates, so we cannot do a stability-
+ // sensitive check here. But we can at least rule out functions that are not const
+ // at all.
+ if !ecx.tcx.is_const_fn_raw(def.did) {
+ // allow calling functions inside a trait marked with #[const_trait].
+ if !ecx.tcx.is_const_default_method(def.did) {
+ // We certainly do *not* want to actually call the fn
+ // though, so be sure we return here.
+ throw_unsup_format!("calling non-const function `{}`", instance)
+ }
+ }
+
+ if let Some(new_instance) = ecx.hook_special_const_fn(instance, args)? {
+ // We call another const fn instead.
+ // However, we return the *original* instance to make backtraces work out
+ // (and we hope this does not confuse the FnAbi checks too much).
+ return Ok(Self::find_mir_or_eval_fn(
+ ecx,
+ new_instance,
+ _abi,
+ args,
+ _dest,
+ _ret,
+ _unwind,
+ )?
+ .map(|(body, _instance)| (body, instance)));
+ }
+ }
+ // This is a const fn. Call it.
+ Ok(Some((ecx.load_mir(instance.def, None)?, instance)))
+ }
+
+ fn call_intrinsic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx>],
+ dest: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx> {
+ // Shared intrinsics.
+ if ecx.emulate_intrinsic(instance, args, dest, target)? {
+ return Ok(());
+ }
+ let intrinsic_name = ecx.tcx.item_name(instance.def_id());
+
+ // CTFE-specific intrinsics.
+ let Some(ret) = target else {
+ return Err(ConstEvalErrKind::NeedsRfc(format!(
+ "calling intrinsic `{}`",
+ intrinsic_name
+ ))
+ .into());
+ };
+ match intrinsic_name {
+ sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ let a = ecx.read_immediate(&args[0])?.to_scalar()?;
+ let b = ecx.read_immediate(&args[1])?.to_scalar()?;
+ let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
+ ecx.guaranteed_eq(a, b)?
+ } else {
+ ecx.guaranteed_ne(a, b)?
+ };
+ ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
+ }
+ sym::const_allocate => {
+ let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
+ let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+
+ let align = match Align::from_bytes(align) {
+ Ok(a) => a,
+ Err(err) => throw_ub_format!("align has to be a power of 2, {}", err),
+ };
+
+ let ptr = ecx.allocate_ptr(
+ Size::from_bytes(size as u64),
+ align,
+ interpret::MemoryKind::Machine(MemoryKind::Heap),
+ )?;
+ ecx.write_pointer(ptr, dest)?;
+ }
+ sym::const_deallocate => {
+ let ptr = ecx.read_pointer(&args[0])?;
+ let size = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
+ let align = ecx.read_scalar(&args[2])?.to_machine_usize(ecx)?;
+
+ let size = Size::from_bytes(size);
+ let align = match Align::from_bytes(align) {
+ Ok(a) => a,
+ Err(err) => throw_ub_format!("align has to be a power of 2, {}", err),
+ };
+
+ // If an allocation is created in an another const,
+ // we don't deallocate it.
+ let (alloc_id, _, _) = ecx.ptr_get_alloc_id(ptr)?;
+ let is_allocated_in_another_const = matches!(
+ ecx.tcx.try_get_global_alloc(alloc_id),
+ Some(interpret::GlobalAlloc::Memory(_))
+ );
+
+ if !is_allocated_in_another_const {
+ ecx.deallocate_ptr(
+ ptr,
+ Some((size, align)),
+ interpret::MemoryKind::Machine(MemoryKind::Heap),
+ )?;
+ }
+ }
+ _ => {
+ return Err(ConstEvalErrKind::NeedsRfc(format!(
+ "calling intrinsic `{}`",
+ intrinsic_name
+ ))
+ .into());
+ }
+ }
+
+ ecx.go_to_block(ret);
+ Ok(())
+ }
+
+ fn assert_panic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ msg: &AssertMessage<'tcx>,
+ _unwind: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx> {
+ use rustc_middle::mir::AssertKind::*;
+ // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
+ let eval_to_int =
+ |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
+ let err = match msg {
+ BoundsCheck { ref len, ref index } => {
+ let len = eval_to_int(len)?;
+ let index = eval_to_int(index)?;
+ BoundsCheck { len, index }
+ }
+ Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
+ OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
+ DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
+ RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
+ ResumedAfterReturn(generator_kind) => ResumedAfterReturn(*generator_kind),
+ ResumedAfterPanic(generator_kind) => ResumedAfterPanic(*generator_kind),
+ };
+ Err(ConstEvalErrKind::AssertFailure(err).into())
+ }
+
+ fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
+ Err(ConstEvalErrKind::Abort(msg).into())
+ }
+
+ fn binary_ptr_op(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _bin_op: mir::BinOp,
+ _left: &ImmTy<'tcx>,
+ _right: &ImmTy<'tcx>,
+ ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+ Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
+ }
+
+ fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ // The step limit has already been hit in a previous call to `before_terminator`.
+ if ecx.machine.steps_remaining == 0 {
+ return Ok(());
+ }
+
+ ecx.machine.steps_remaining -= 1;
+ if ecx.machine.steps_remaining == 0 {
+ throw_exhaust!(StepLimitReached)
+ }
+
+ Ok(())
+ }
+
+ #[inline(always)]
+ fn expose_ptr(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _ptr: Pointer<AllocId>,
+ ) -> InterpResult<'tcx> {
+ Err(ConstEvalErrKind::NeedsRfc("exposing pointers".to_string()).into())
+ }
+
+ #[inline(always)]
+ fn init_frame_extra(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ frame: Frame<'mir, 'tcx>,
+ ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+ // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
+ if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
+ throw_exhaust!(StackFrameLimitReached)
+ } else {
+ Ok(frame)
+ }
+ }
+
+ #[inline(always)]
+ fn stack<'a>(
+ ecx: &'a InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
+ &ecx.machine.stack
+ }
+
+ #[inline(always)]
+ fn stack_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
+ &mut ecx.machine.stack
+ }
+
+ fn before_access_global(
+ _tcx: TyCtxt<'tcx>,
+ machine: &Self,
+ alloc_id: AllocId,
+ alloc: ConstAllocation<'tcx>,
+ static_def_id: Option<DefId>,
+ is_write: bool,
+ ) -> InterpResult<'tcx> {
+ let alloc = alloc.inner();
+ if is_write {
+ // Write access. These are never allowed, but we give a targeted error message.
+ if alloc.mutability == Mutability::Not {
+ Err(err_ub!(WriteToReadOnly(alloc_id)).into())
+ } else {
+ Err(ConstEvalErrKind::ModifiedGlobal.into())
+ }
+ } else {
+ // Read access. These are usually allowed, with some exceptions.
+ if machine.can_access_statics {
+ // Machine configuration allows us read from anything (e.g., `static` initializer).
+ Ok(())
+ } else if static_def_id.is_some() {
+ // Machine configuration does not allow us to read statics
+ // (e.g., `const` initializer).
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // this check is so important: if we could read statics, we could read pointers
+ // to mutable allocations *inside* statics. These allocations are not themselves
+ // statics, so pointers to them can get around the check in `validity.rs`.
+ Err(ConstEvalErrKind::ConstAccessesStatic.into())
+ } else {
+ // Immutable global, this read is fine.
+ // But make sure we never accept a read from something mutable, that would be
+ // unsound. The reason is that as the content of this allocation may be different
+ // now and at run-time, so if we permit reading now we might return the wrong value.
+ assert_eq!(alloc.mutability, Mutability::Not);
+ Ok(())
+ }
+ }
+ }
+}
+
+// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
+// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
+// at the bottom of this file.
diff --git a/compiler/rustc_const_eval/src/const_eval/mod.rs b/compiler/rustc_const_eval/src/const_eval/mod.rs
new file mode 100644
index 000000000..948c33494
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/mod.rs
@@ -0,0 +1,163 @@
+// Not in interpret to make sure we do not use private implementation details
+
+use rustc_hir::Mutability;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
+
+use crate::interpret::{
+ intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, InterpResult, MemPlaceMeta,
+ Scalar,
+};
+
+mod error;
+mod eval_queries;
+mod fn_queries;
+mod machine;
+mod valtrees;
+
+pub use error::*;
+pub use eval_queries::*;
+pub use fn_queries::*;
+pub use machine::*;
+pub(crate) use valtrees::{const_to_valtree_inner, valtree_to_const_value};
+
+pub(crate) fn const_caller_location(
+ tcx: TyCtxt<'_>,
+ (file, line, col): (Symbol, u32, u32),
+) -> ConstValue<'_> {
+ trace!("const_caller_location: {}:{}:{}", file, line, col);
+ let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false);
+
+ let loc_place = ecx.alloc_caller_location(file, line, col);
+ if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
+ bug!("intern_const_alloc_recursive should not error in this case")
+ }
+ ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr, &tcx))
+}
+
+// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
+const VALTREE_MAX_NODES: usize = 100000;
+
+pub(crate) enum ValTreeCreationError {
+ NodesOverflow,
+ NonSupportedType,
+ Other,
+}
+pub(crate) type ValTreeCreationResult<'tcx> = Result<ty::ValTree<'tcx>, ValTreeCreationError>;
+
+/// Evaluates a constant and turns it into a type-level constant value.
+pub(crate) fn eval_to_valtree<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cid: GlobalId<'tcx>,
+) -> EvalToValTreeResult<'tcx> {
+ let const_alloc = tcx.eval_to_allocation_raw(param_env.and(cid))?;
+
+ // FIXME Need to provide a span to `eval_to_valtree`
+ let ecx = mk_eval_cx(
+ tcx, DUMMY_SP, param_env,
+ // It is absolutely crucial for soundness that
+ // we do not read from static items or other mutable memory.
+ false,
+ );
+ let place = ecx.raw_const_to_mplace(const_alloc).unwrap();
+ debug!(?place);
+
+ let mut num_nodes = 0;
+ let valtree_result = const_to_valtree_inner(&ecx, &place, &mut num_nodes);
+
+ match valtree_result {
+ Ok(valtree) => Ok(Some(valtree)),
+ Err(err) => {
+ let did = cid.instance.def_id();
+ let s = cid.display(tcx);
+ match err {
+ ValTreeCreationError::NodesOverflow => {
+ let msg = format!("maximum number of nodes exceeded in constant {}", &s);
+ let mut diag = match tcx.hir().span_if_local(did) {
+ Some(span) => tcx.sess.struct_span_err(span, &msg),
+ None => tcx.sess.struct_err(&msg),
+ };
+ diag.emit();
+
+ Ok(None)
+ }
+ ValTreeCreationError::NonSupportedType | ValTreeCreationError::Other => Ok(None),
+ }
+ }
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn try_destructure_mir_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ val: mir::ConstantKind<'tcx>,
+) -> InterpResult<'tcx, mir::DestructuredMirConstant<'tcx>> {
+ trace!("destructure_mir_constant: {:?}", val);
+ let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+ let op = ecx.mir_const_to_op(&val, None)?;
+
+ // We go to `usize` as we cannot allocate anything bigger anyway.
+ let (field_count, variant, down) = match val.ty().kind() {
+ ty::Array(_, len) => (len.eval_usize(tcx, param_env) as usize, None, op),
+ ty::Adt(def, _) if def.variants().is_empty() => {
+ throw_ub!(Unreachable)
+ }
+ ty::Adt(def, _) => {
+ let variant = ecx.read_discriminant(&op)?.1;
+ let down = ecx.operand_downcast(&op, variant)?;
+ (def.variants()[variant].fields.len(), Some(variant), down)
+ }
+ ty::Tuple(substs) => (substs.len(), None, op),
+ _ => bug!("cannot destructure mir constant {:?}", val),
+ };
+
+ let fields_iter = (0..field_count)
+ .map(|i| {
+ let field_op = ecx.operand_field(&down, i)?;
+ let val = op_to_const(&ecx, &field_op);
+ Ok(mir::ConstantKind::Val(val, field_op.layout.ty))
+ })
+ .collect::<InterpResult<'tcx, Vec<_>>>()?;
+ let fields = tcx.arena.alloc_from_iter(fields_iter);
+
+ Ok(mir::DestructuredMirConstant { variant, fields })
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn deref_mir_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ val: mir::ConstantKind<'tcx>,
+) -> mir::ConstantKind<'tcx> {
+ let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+ let op = ecx.mir_const_to_op(&val, None).unwrap();
+ let mplace = ecx.deref_operand(&op).unwrap();
+ if let Some(alloc_id) = mplace.ptr.provenance {
+ assert_eq!(
+ tcx.global_alloc(alloc_id).unwrap_memory().0.0.mutability,
+ Mutability::Not,
+ "deref_mir_constant cannot be used with mutable allocations as \
+ that could allow pattern matching to observe mutable statics",
+ );
+ }
+
+ let ty = match mplace.meta {
+ MemPlaceMeta::None => mplace.layout.ty,
+ // In case of unsized types, figure out the real type behind.
+ MemPlaceMeta::Meta(scalar) => match mplace.layout.ty.kind() {
+ ty::Str => bug!("there's no sized equivalent of a `str`"),
+ ty::Slice(elem_ty) => tcx.mk_array(*elem_ty, scalar.to_machine_usize(&tcx).unwrap()),
+ _ => bug!(
+ "type {} should not have metadata, but had {:?}",
+ mplace.layout.ty,
+ mplace.meta
+ ),
+ },
+ };
+
+ mir::ConstantKind::Val(op_to_const(&ecx, &mplace.into()), ty)
+}
diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
new file mode 100644
index 000000000..8fff4571d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs
@@ -0,0 +1,475 @@
+use super::eval_queries::{mk_eval_cx, op_to_const};
+use super::machine::CompileTimeEvalContext;
+use super::{ValTreeCreationError, ValTreeCreationResult, VALTREE_MAX_NODES};
+use crate::interpret::{
+ intern_const_alloc_recursive, ConstValue, ImmTy, Immediate, InternKind, MemPlaceMeta,
+ MemoryKind, PlaceTy, Scalar, ScalarMaybeUninit,
+};
+use crate::interpret::{MPlaceTy, Value};
+use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
+use rustc_span::source_map::DUMMY_SP;
+use rustc_target::abi::{Align, VariantIdx};
+
+#[instrument(skip(ecx), level = "debug")]
+fn branches<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ n: usize,
+ variant: Option<VariantIdx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let place = match variant {
+ Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
+ None => *place,
+ };
+ let variant = variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
+ debug!(?place, ?variant);
+
+ let mut fields = Vec::with_capacity(n);
+ for i in 0..n {
+ let field = ecx.mplace_field(&place, i).unwrap();
+ let valtree = const_to_valtree_inner(ecx, &field, num_nodes)?;
+ fields.push(Some(valtree));
+ }
+
+ // For enums, we prepend their variant index before the variant's fields so we can figure out
+ // the variant again when just seeing a valtree.
+ let branches = variant
+ .into_iter()
+ .chain(fields.into_iter())
+ .collect::<Option<Vec<_>>>()
+ .expect("should have already checked for errors in ValTree creation");
+
+ // Have to account for ZSTs here
+ if branches.len() == 0 {
+ *num_nodes += 1;
+ }
+
+ Ok(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(branches)))
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn slice_branches<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let n = place
+ .len(&ecx.tcx.tcx)
+ .unwrap_or_else(|_| panic!("expected to use len of place {:?}", place));
+
+ let mut elems = Vec::with_capacity(n as usize);
+ for i in 0..n {
+ let place_elem = ecx.mplace_index(place, i).unwrap();
+ let valtree = const_to_valtree_inner(ecx, &place_elem, num_nodes)?;
+ elems.push(valtree);
+ }
+
+ Ok(ty::ValTree::Branch(ecx.tcx.arena.alloc_from_iter(elems)))
+}
+
+#[instrument(skip(ecx), level = "debug")]
+pub(crate) fn const_to_valtree_inner<'tcx>(
+ ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &MPlaceTy<'tcx>,
+ num_nodes: &mut usize,
+) -> ValTreeCreationResult<'tcx> {
+ let ty = place.layout.ty;
+ debug!("ty kind: {:?}", ty.kind());
+
+ if *num_nodes >= VALTREE_MAX_NODES {
+ return Err(ValTreeCreationError::NodesOverflow);
+ }
+
+ match ty.kind() {
+ ty::FnDef(..) => {
+ *num_nodes += 1;
+ Ok(ty::ValTree::zst())
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+ let Ok(val) = ecx.read_immediate(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ let val = val.to_scalar().unwrap();
+ *num_nodes += 1;
+
+ Ok(ty::ValTree::Leaf(val.assert_int()))
+ }
+
+ // Raw pointers are not allowed in type level constants, as we cannot properly test them for
+ // equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
+ // Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
+ // agree with runtime equality tests.
+ ty::FnPtr(_) | ty::RawPtr(_) => Err(ValTreeCreationError::NonSupportedType),
+
+ ty::Ref(_, _, _) => {
+ let Ok(derefd_place)= ecx.deref_operand(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ debug!(?derefd_place);
+
+ const_to_valtree_inner(ecx, &derefd_place, num_nodes)
+ }
+
+ ty::Str | ty::Slice(_) | ty::Array(_, _) => {
+ slice_branches(ecx, place, num_nodes)
+ }
+ // Trait objects are not allowed in type level constants, as we have no concept for
+ // resolving their backing type, even if we can do that at const eval time. We may
+ // hypothetically be able to allow `dyn StructuralEq` trait objects in the future,
+ // but it is unclear if this is useful.
+ ty::Dynamic(..) => Err(ValTreeCreationError::NonSupportedType),
+
+ ty::Tuple(elem_tys) => {
+ branches(ecx, place, elem_tys.len(), None, num_nodes)
+ }
+
+ ty::Adt(def, _) => {
+ if def.is_union() {
+ return Err(ValTreeCreationError::NonSupportedType);
+ } else if def.variants().is_empty() {
+ bug!("uninhabited types should have errored and never gotten converted to valtree")
+ }
+
+ let Ok((_, variant)) = ecx.read_discriminant(&place.into()) else {
+ return Err(ValTreeCreationError::Other);
+ };
+ branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
+ }
+
+ ty::Never
+ | ty::Error(_)
+ | ty::Foreign(..)
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ // FIXME(oli-obk): we could look behind opaque types
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ // FIXME(oli-obk): we can probably encode closures just like structs
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..) => Err(ValTreeCreationError::NonSupportedType),
+ }
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn create_mplace_from_layout<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+) -> MPlaceTy<'tcx> {
+ let tcx = ecx.tcx;
+ let param_env = ecx.param_env;
+ let layout = tcx.layout_of(param_env.and(ty)).unwrap();
+ debug!(?layout);
+
+ ecx.allocate(layout, MemoryKind::Stack).unwrap()
+}
+
+// Walks custom DSTs and gets the type of the unsized field and the number of elements
+// in the unsized field.
+fn get_info_on_unsized_field<'tcx>(
+ ty: Ty<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+ tcx: TyCtxt<'tcx>,
+) -> (Ty<'tcx>, usize) {
+ let mut last_valtree = valtree;
+ let tail = tcx.struct_tail_with_normalize(
+ ty,
+ |ty| ty,
+ || {
+ let branches = last_valtree.unwrap_branch();
+ last_valtree = branches[branches.len() - 1];
+ debug!(?branches, ?last_valtree);
+ },
+ );
+ let unsized_inner_ty = match tail.kind() {
+ ty::Slice(t) => *t,
+ ty::Str => tail,
+ _ => bug!("expected Slice or Str"),
+ };
+
+ // Have to adjust type for ty::Str
+ let unsized_inner_ty = match unsized_inner_ty.kind() {
+ ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+ _ => unsized_inner_ty,
+ };
+
+ // Get the number of elements in the unsized field
+ let num_elems = last_valtree.unwrap_branch().len();
+
+ (unsized_inner_ty, num_elems)
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn create_pointee_place<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ ty: Ty<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+) -> MPlaceTy<'tcx> {
+ let tcx = ecx.tcx.tcx;
+
+ if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty()) {
+ // We need to create `Allocation`s for custom DSTs
+
+ let (unsized_inner_ty, num_elems) = get_info_on_unsized_field(ty, valtree, tcx);
+ let unsized_inner_ty = match unsized_inner_ty.kind() {
+ ty::Str => tcx.mk_ty(ty::Uint(ty::UintTy::U8)),
+ _ => unsized_inner_ty,
+ };
+ let unsized_inner_ty_size =
+ tcx.layout_of(ty::ParamEnv::empty().and(unsized_inner_ty)).unwrap().layout.size();
+ debug!(?unsized_inner_ty, ?unsized_inner_ty_size, ?num_elems);
+
+ // for custom DSTs only the last field/element is unsized, but we need to also allocate
+ // space for the other fields/elements
+ let layout = tcx.layout_of(ty::ParamEnv::empty().and(ty)).unwrap();
+ let size_of_sized_part = layout.layout.size();
+
+ // Get the size of the memory behind the DST
+ let dst_size = unsized_inner_ty_size.checked_mul(num_elems as u64, &tcx).unwrap();
+
+ let size = size_of_sized_part.checked_add(dst_size, &tcx).unwrap();
+ let align = Align::from_bytes(size.bytes().next_power_of_two()).unwrap();
+ let ptr = ecx.allocate_ptr(size, align, MemoryKind::Stack).unwrap();
+ debug!(?ptr);
+
+ let place = MPlaceTy::from_aligned_ptr_with_meta(
+ ptr.into(),
+ layout,
+ MemPlaceMeta::Meta(Scalar::from_machine_usize(num_elems as u64, &tcx)),
+ );
+ debug!(?place);
+
+ place
+ } else {
+ create_mplace_from_layout(ecx, ty)
+ }
+}
+
+/// Converts a `ValTree` to a `ConstValue`, which is needed after mir
+/// construction has finished.
+// FIXME Merge `valtree_to_const_value` and `valtree_into_mplace` into one function
+#[instrument(skip(tcx), level = "debug")]
+pub fn valtree_to_const_value<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+ valtree: ty::ValTree<'tcx>,
+) -> ConstValue<'tcx> {
+ // Basic idea: We directly construct `Scalar` values from trivial `ValTree`s
+ // (those for constants with type bool, int, uint, float or char).
+ // For all other types we create an `MPlace` and fill that by walking
+ // the `ValTree` and using `place_projection` and `place_field` to
+ // create inner `MPlace`s which are filled recursively.
+ // FIXME Does this need an example?
+
+ let (param_env, ty) = param_env_ty.into_parts();
+ let mut ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
+
+ match ty.kind() {
+ ty::FnDef(..) => {
+ assert!(valtree.unwrap_branch().is_empty());
+ ConstValue::ZeroSized
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
+ ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
+ ty::ValTree::Branch(_) => bug!(
+ "ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
+ ),
+ },
+ ty::Ref(_, _, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Adt(..) => {
+ let mut place = match ty.kind() {
+ ty::Ref(_, inner_ty, _) => {
+ // Need to create a place for the pointee to fill for Refs
+ create_pointee_place(&mut ecx, *inner_ty, valtree)
+ }
+ _ => create_mplace_from_layout(&mut ecx, ty),
+ };
+ debug!(?place);
+
+ valtree_into_mplace(&mut ecx, &mut place, valtree);
+ dump_place(&ecx, place.into());
+ intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &place).unwrap();
+
+ let const_val = match ty.kind() {
+ ty::Ref(_, _, _) => {
+ let ref_place = place.to_ref(&tcx);
+ let imm =
+ ImmTy::from_immediate(ref_place, tcx.layout_of(param_env_ty).unwrap());
+
+ op_to_const(&ecx, &imm.into())
+ }
+ _ => op_to_const(&ecx, &place.into()),
+ };
+ debug!(?const_val);
+
+ const_val
+ }
+ ty::Never
+ | ty::Error(_)
+ | ty::Foreign(..)
+ | ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(_)
+ | ty::Str
+ | ty::Slice(_)
+ | ty::Dynamic(..) => bug!("no ValTree should have been created for type {:?}", ty.kind()),
+ }
+}
+
+#[instrument(skip(ecx), level = "debug")]
+fn valtree_into_mplace<'tcx>(
+ ecx: &mut CompileTimeEvalContext<'tcx, 'tcx>,
+ place: &mut MPlaceTy<'tcx>,
+ valtree: ty::ValTree<'tcx>,
+) {
+ // This will match on valtree and write the value(s) corresponding to the ValTree
+ // inside the place recursively.
+
+ let tcx = ecx.tcx.tcx;
+ let ty = place.layout.ty;
+
+ match ty.kind() {
+ ty::FnDef(_, _) => {
+ ecx.write_immediate(Immediate::Uninit, &place.into()).unwrap();
+ }
+ ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
+ let scalar_int = valtree.unwrap_leaf();
+ debug!("writing trivial valtree {:?} to place {:?}", scalar_int, place);
+ ecx.write_immediate(
+ Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar_int.into())),
+ &place.into(),
+ )
+ .unwrap();
+ }
+ ty::Ref(_, inner_ty, _) => {
+ let mut pointee_place = create_pointee_place(ecx, *inner_ty, valtree);
+ debug!(?pointee_place);
+
+ valtree_into_mplace(ecx, &mut pointee_place, valtree);
+ dump_place(ecx, pointee_place.into());
+ intern_const_alloc_recursive(ecx, InternKind::Constant, &pointee_place).unwrap();
+
+ let imm = match inner_ty.kind() {
+ ty::Slice(_) | ty::Str => {
+ let len = valtree.unwrap_branch().len();
+ let len_scalar =
+ ScalarMaybeUninit::Scalar(Scalar::from_machine_usize(len as u64, &tcx));
+
+ Immediate::ScalarPair(
+ ScalarMaybeUninit::from_maybe_pointer((*pointee_place).ptr, &tcx),
+ len_scalar,
+ )
+ }
+ _ => pointee_place.to_ref(&tcx),
+ };
+ debug!(?imm);
+
+ ecx.write_immediate(imm, &place.into()).unwrap();
+ }
+ ty::Adt(_, _) | ty::Tuple(_) | ty::Array(_, _) | ty::Str | ty::Slice(_) => {
+ let branches = valtree.unwrap_branch();
+
+ // Need to downcast place for enums
+ let (place_adjusted, branches, variant_idx) = match ty.kind() {
+ ty::Adt(def, _) if def.is_enum() => {
+ // First element of valtree corresponds to variant
+ let scalar_int = branches[0].unwrap_leaf();
+ let variant_idx = VariantIdx::from_u32(scalar_int.try_to_u32().unwrap());
+ let variant = def.variant(variant_idx);
+ debug!(?variant);
+
+ (
+ place.project_downcast(ecx, variant_idx).unwrap(),
+ &branches[1..],
+ Some(variant_idx),
+ )
+ }
+ _ => (*place, branches, None),
+ };
+ debug!(?place_adjusted, ?branches);
+
+ // Create the places (by indexing into `place`) for the fields and fill
+ // them recursively
+ for (i, inner_valtree) in branches.iter().enumerate() {
+ debug!(?i, ?inner_valtree);
+
+ let mut place_inner = match ty.kind() {
+ ty::Str | ty::Slice(_) => ecx.mplace_index(&place, i as u64).unwrap(),
+ _ if !ty.is_sized(ecx.tcx, ty::ParamEnv::empty())
+ && i == branches.len() - 1 =>
+ {
+ // Note: For custom DSTs we need to manually process the last unsized field.
+ // We created a `Pointer` for the `Allocation` of the complete sized version of
+ // the Adt in `create_pointee_place` and now we fill that `Allocation` with the
+ // values in the ValTree. For the unsized field we have to additionally add the meta
+ // data.
+
+ let (unsized_inner_ty, num_elems) =
+ get_info_on_unsized_field(ty, valtree, tcx);
+ debug!(?unsized_inner_ty);
+
+ let inner_ty = match ty.kind() {
+ ty::Adt(def, substs) => {
+ def.variant(VariantIdx::from_u32(0)).fields[i].ty(tcx, substs)
+ }
+ ty::Tuple(inner_tys) => inner_tys[i],
+ _ => bug!("unexpected unsized type {:?}", ty),
+ };
+
+ let inner_layout =
+ tcx.layout_of(ty::ParamEnv::empty().and(inner_ty)).unwrap();
+ debug!(?inner_layout);
+
+ let offset = place_adjusted.layout.fields.offset(i);
+ place
+ .offset_with_meta(
+ offset,
+ MemPlaceMeta::Meta(Scalar::from_machine_usize(
+ num_elems as u64,
+ &tcx,
+ )),
+ inner_layout,
+ &tcx,
+ )
+ .unwrap()
+ }
+ _ => ecx.mplace_field(&place_adjusted, i).unwrap(),
+ };
+
+ debug!(?place_inner);
+ valtree_into_mplace(ecx, &mut place_inner, *inner_valtree);
+ dump_place(&ecx, place_inner.into());
+ }
+
+ debug!("dump of place_adjusted:");
+ dump_place(ecx, place_adjusted.into());
+
+ if let Some(variant_idx) = variant_idx {
+ // don't forget filling the place with the discriminant of the enum
+ ecx.write_discriminant(variant_idx, &place.into()).unwrap();
+ }
+
+ debug!("dump of place after writing discriminant:");
+ dump_place(ecx, place.into());
+ }
+ _ => bug!("shouldn't have created a ValTree for {:?}", ty),
+ }
+}
+
+fn dump_place<'tcx>(ecx: &CompileTimeEvalContext<'tcx, 'tcx>, place: PlaceTy<'tcx>) {
+ trace!("{:?}", ecx.dump_place(*place));
+}
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
new file mode 100644
index 000000000..a463fe7b9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -0,0 +1,89 @@
+use rustc_hir::ConstContext;
+use rustc_macros::SessionDiagnostic;
+use rustc_span::Span;
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::unstable_in_stable)]
+pub(crate) struct UnstableInStable {
+ pub gate: String,
+ #[primary_span]
+ pub span: Span,
+ #[suggestion(
+ const_eval::unstable_sugg,
+ code = "#[rustc_const_unstable(feature = \"...\", issue = \"...\")]\n",
+ applicability = "has-placeholders"
+ )]
+ #[suggestion(
+ const_eval::bypass_sugg,
+ code = "#[rustc_allow_const_fn_unstable({gate})]\n",
+ applicability = "has-placeholders"
+ )]
+ pub attr_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::thread_local_access, code = "E0625")]
+pub(crate) struct NonConstOpErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::static_access, code = "E0013")]
+#[help]
+pub(crate) struct StaticAccessErr {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+ #[note(const_eval::teach_note)]
+ #[help(const_eval::teach_help)]
+ pub teach: Option<()>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::raw_ptr_to_int)]
+#[note]
+#[note(const_eval::note2)]
+pub(crate) struct RawPtrToIntErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::raw_ptr_comparison)]
+#[note]
+pub(crate) struct RawPtrComparisonErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::panic_non_str)]
+pub(crate) struct PanicNonStrErr {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::mut_deref, code = "E0658")]
+pub(crate) struct MutDerefErr {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::transient_mut_borrow, code = "E0658")]
+pub(crate) struct TransientMutBorrowErr {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(const_eval::transient_mut_borrow_raw, code = "E0658")]
+pub(crate) struct TransientMutBorrowErrRaw {
+ #[primary_span]
+ pub span: Span,
+ pub kind: ConstContext,
+}
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
new file mode 100644
index 000000000..c97c31eb9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -0,0 +1,365 @@
+use std::assert_matches::assert_matches;
+use std::convert::TryFrom;
+
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::{Float, FloatConvert};
+use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
+use rustc_middle::mir::CastKind;
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, FloatTy, Ty, TypeAndMut};
+use rustc_target::abi::Integer;
+use rustc_type_ir::sty::TyKind::*;
+
+use super::{
+ util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ pub fn cast(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ cast_kind: CastKind,
+ cast_ty: Ty<'tcx>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ use rustc_middle::mir::CastKind::*;
+ // FIXME: In which cases should we trigger UB when the source is uninit?
+ match cast_kind {
+ Pointer(PointerCast::Unsize) => {
+ let cast_ty = self.layout_of(cast_ty)?;
+ self.unsize_into(src, cast_ty, dest)?;
+ }
+
+ PointerExposeAddress => {
+ let src = self.read_immediate(src)?;
+ let res = self.pointer_expose_address_cast(&src, cast_ty)?;
+ self.write_immediate(res, dest)?;
+ }
+
+ PointerFromExposedAddress => {
+ let src = self.read_immediate(src)?;
+ let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
+ self.write_immediate(res, dest)?;
+ }
+
+ Misc => {
+ let src = self.read_immediate(src)?;
+ let res = self.misc_cast(&src, cast_ty)?;
+ self.write_immediate(res, dest)?;
+ }
+
+ Pointer(PointerCast::MutToConstPointer | PointerCast::ArrayToPointer) => {
+ // These are NOPs, but can be wide pointers.
+ let v = self.read_immediate(src)?;
+ self.write_immediate(*v, dest)?;
+ }
+
+ Pointer(PointerCast::ReifyFnPointer) => {
+ // The src operand does not matter, just its type
+ match *src.layout.ty.kind() {
+ ty::FnDef(def_id, substs) => {
+ // All reifications must be monomorphic, bail out otherwise.
+ ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+ let instance = ty::Instance::resolve_for_fn_ptr(
+ *self.tcx,
+ self.param_env,
+ def_id,
+ substs,
+ )
+ .ok_or_else(|| err_inval!(TooGeneric))?;
+
+ let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+ self.write_pointer(fn_ptr, dest)?;
+ }
+ _ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
+ }
+ }
+
+ Pointer(PointerCast::UnsafeFnPointer) => {
+ let src = self.read_immediate(src)?;
+ match cast_ty.kind() {
+ ty::FnPtr(_) => {
+ // No change to value
+ self.write_immediate(*src, dest)?;
+ }
+ _ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
+ }
+ }
+
+ Pointer(PointerCast::ClosureFnPointer(_)) => {
+ // The src operand does not matter, just its type
+ match *src.layout.ty.kind() {
+ ty::Closure(def_id, substs) => {
+ // All reifications must be monomorphic, bail out otherwise.
+ ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
+
+ let instance = ty::Instance::resolve_closure(
+ *self.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .ok_or_else(|| err_inval!(TooGeneric))?;
+ let fn_ptr = self.create_fn_alloc_ptr(FnVal::Instance(instance));
+ self.write_pointer(fn_ptr, dest)?;
+ }
+ _ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
+ }
+ }
+ }
+ Ok(())
+ }
+
+ pub fn misc_cast(
+ &mut self,
+ src: &ImmTy<'tcx, M::Provenance>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ use rustc_type_ir::sty::TyKind::*;
+ trace!("Casting {:?}: {:?} to {:?}", *src, src.layout.ty, cast_ty);
+
+ match src.layout.ty.kind() {
+ // Floating point
+ Float(FloatTy::F32) => {
+ return Ok(self.cast_from_float(src.to_scalar()?.to_f32()?, cast_ty).into());
+ }
+ Float(FloatTy::F64) => {
+ return Ok(self.cast_from_float(src.to_scalar()?.to_f64()?, cast_ty).into());
+ }
+ // The rest is integer/pointer-"like", including fn ptr casts
+ _ => assert!(
+ src.layout.ty.is_bool()
+ || src.layout.ty.is_char()
+ || src.layout.ty.is_integral()
+ || src.layout.ty.is_any_ptr(),
+ "Unexpected cast from type {:?}",
+ src.layout.ty
+ ),
+ }
+
+ // # First handle non-scalar source values.
+
+ // Handle casting any ptr to raw ptr (might be a fat ptr).
+ if src.layout.ty.is_any_ptr() && cast_ty.is_unsafe_ptr() {
+ let dest_layout = self.layout_of(cast_ty)?;
+ if dest_layout.size == src.layout.size {
+ // Thin or fat pointer that just hast the ptr kind of target type changed.
+ return Ok(**src);
+ } else {
+ // Casting the metadata away from a fat ptr.
+ assert_eq!(src.layout.size, 2 * self.pointer_size());
+ assert_eq!(dest_layout.size, self.pointer_size());
+ assert!(src.layout.ty.is_unsafe_ptr());
+ return match **src {
+ Immediate::ScalarPair(data, _) => Ok(data.check_init()?.into()),
+ Immediate::Scalar(..) => span_bug!(
+ self.cur_span(),
+ "{:?} input to a fat-to-thin cast ({:?} -> {:?})",
+ *src,
+ src.layout.ty,
+ cast_ty
+ ),
+ Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
+ };
+ }
+ }
+
+ // # The remaining source values are scalar and "int-like".
+ let scalar = src.to_scalar()?;
+ Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
+ }
+
+ pub fn pointer_expose_address_cast(
+ &mut self,
+ src: &ImmTy<'tcx, M::Provenance>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
+ assert!(cast_ty.is_integral());
+
+ let scalar = src.to_scalar()?;
+ let ptr = scalar.to_pointer(self)?;
+ match ptr.into_pointer_or_addr() {
+ Ok(ptr) => M::expose_ptr(self, ptr)?,
+ Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
+ };
+ Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
+ }
+
+ pub fn pointer_from_exposed_address_cast(
+ &mut self,
+ src: &ImmTy<'tcx, M::Provenance>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ assert!(src.layout.ty.is_integral());
+ assert_matches!(cast_ty.kind(), ty::RawPtr(_));
+
+ // First cast to usize.
+ let scalar = src.to_scalar()?;
+ let addr = self.cast_from_int_like(scalar, src.layout, self.tcx.types.usize)?;
+ let addr = addr.to_machine_usize(self)?;
+
+ // Then turn address into pointer.
+ let ptr = M::ptr_from_addr_cast(&self, addr)?;
+ Ok(Scalar::from_maybe_pointer(ptr, self).into())
+ }
+
+ pub fn cast_from_int_like(
+ &self,
+ scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
+ src_layout: TyAndLayout<'tcx>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ // Let's make sure v is sign-extended *if* it has a signed type.
+ let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
+
+ let v = scalar.to_bits(src_layout.size)?;
+ let v = if signed { self.sign_extend(v, src_layout) } else { v };
+ trace!("cast_from_scalar: {}, {} -> {}", v, src_layout.ty, cast_ty);
+
+ Ok(match *cast_ty.kind() {
+ Int(_) | Uint(_) => {
+ let size = match *cast_ty.kind() {
+ Int(t) => Integer::from_int_ty(self, t).size(),
+ Uint(t) => Integer::from_uint_ty(self, t).size(),
+ _ => bug!(),
+ };
+ let v = size.truncate(v);
+ Scalar::from_uint(v, size)
+ }
+
+ Float(FloatTy::F32) if signed => Scalar::from_f32(Single::from_i128(v as i128).value),
+ Float(FloatTy::F64) if signed => Scalar::from_f64(Double::from_i128(v as i128).value),
+ Float(FloatTy::F32) => Scalar::from_f32(Single::from_u128(v).value),
+ Float(FloatTy::F64) => Scalar::from_f64(Double::from_u128(v).value),
+
+ Char => {
+ // `u8` to `char` cast
+ Scalar::from_u32(u8::try_from(v).unwrap().into())
+ }
+
+ // Casts to bool are not permitted by rustc, no need to handle them here.
+ _ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
+ })
+ }
+
+ fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::Provenance>
+ where
+ F: Float + Into<Scalar<M::Provenance>> + FloatConvert<Single> + FloatConvert<Double>,
+ {
+ use rustc_type_ir::sty::TyKind::*;
+ match *dest_ty.kind() {
+ // float -> uint
+ Uint(t) => {
+ let size = Integer::from_uint_ty(self, t).size();
+ // `to_u128` is a saturating cast, which is what we need
+ // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+ let v = f.to_u128(size.bits_usize()).value;
+ // This should already fit the bit width
+ Scalar::from_uint(v, size)
+ }
+ // float -> int
+ Int(t) => {
+ let size = Integer::from_int_ty(self, t).size();
+ // `to_i128` is a saturating cast, which is what we need
+ // (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
+ let v = f.to_i128(size.bits_usize()).value;
+ Scalar::from_int(v, size)
+ }
+ // float -> f32
+ Float(FloatTy::F32) => Scalar::from_f32(f.convert(&mut false).value),
+ // float -> f64
+ Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
+ // That's it.
+ _ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
+ }
+ }
+
+ fn unsize_into_ptr(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ // The pointee types
+ source_ty: Ty<'tcx>,
+ cast_ty: Ty<'tcx>,
+ ) -> InterpResult<'tcx> {
+ // A<Struct> -> A<Trait> conversion
+ let (src_pointee_ty, dest_pointee_ty) =
+ self.tcx.struct_lockstep_tails_erasing_lifetimes(source_ty, cast_ty, self.param_env);
+
+ match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
+ (&ty::Array(_, length), &ty::Slice(_)) => {
+ let ptr = self.read_immediate(src)?.to_scalar()?;
+ // u64 cast is from usize to u64, which is always good
+ let val =
+ Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self);
+ self.write_immediate(val, dest)
+ }
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ let (old_data, old_vptr) = self.read_immediate(src)?.to_scalar_pair()?;
+ let old_vptr = old_vptr.to_pointer(self)?;
+ let (ty, old_trait) = self.get_ptr_vtable(old_vptr)?;
+ if old_trait != data_a.principal() {
+ throw_ub_format!("upcast on a pointer whose vtable does not match its type");
+ }
+ let new_vptr = self.get_vtable_ptr(ty, data_b.principal())?;
+ self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
+ }
+ (_, &ty::Dynamic(ref data, _)) => {
+ // Initial cast from sized to dyn trait
+ let vtable = self.get_vtable_ptr(src_pointee_ty, data.principal())?;
+ let ptr = self.read_immediate(src)?.to_scalar()?;
+ let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
+ self.write_immediate(val, dest)
+ }
+
+ _ => {
+ span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty)
+ }
+ }
+ }
+
+ fn unsize_into(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ cast_ty: TyAndLayout<'tcx>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
+ match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
+ (&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
+ | (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
+ self.unsize_into_ptr(src, dest, *s, *c)
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
+ assert_eq!(def_a, def_b);
+
+ // unsizing of generic struct with pointer fields
+ // Example: `Arc<T>` -> `Arc<Trait>`
+ // here we need to increase the size of every &T thin ptr field to a fat ptr
+ for i in 0..src.layout.fields.count() {
+ let cast_ty_field = cast_ty.field(self, i);
+ if cast_ty_field.is_zst() {
+ continue;
+ }
+ let src_field = self.operand_field(src, i)?;
+ let dst_field = self.place_field(dest, i)?;
+ if src_field.layout.ty == cast_ty_field.ty {
+ self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
+ } else {
+ self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
+ }
+ }
+ Ok(())
+ }
+ _ => span_bug!(
+ self.cur_span(),
+ "unsize_into: invalid conversion: {:?} -> {:?}",
+ src.layout,
+ dest.layout
+ ),
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs
new file mode 100644
index 000000000..150d6589b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs
@@ -0,0 +1,1019 @@
+use std::cell::Cell;
+use std::fmt;
+use std::mem;
+
+use rustc_hir::{self as hir, def_id::DefId, definitions::DefPathData};
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpError, InvalidProgramInfo};
+use rustc_middle::ty::layout::{
+ self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
+ TyAndLayout,
+};
+use rustc_middle::ty::{
+ self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
+};
+use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_session::Limit;
+use rustc_span::{Pos, Span};
+use rustc_target::abi::{call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout};
+
+use super::{
+ AllocId, GlobalId, Immediate, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace,
+ MemPlaceMeta, Memory, MemoryKind, Operand, Place, PlaceTy, PointerArithmetic, Provenance,
+ Scalar, ScalarMaybeUninit, StackPopJump,
+};
+use crate::transform::validate::equal_up_to_regions;
+
+pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ /// Stores the `Machine` instance.
+ ///
+ /// Note: the stack is provided by the machine.
+ pub machine: M,
+
+ /// The results of the type checker, from rustc.
+ /// The span in this is the "root" of the evaluation, i.e., the const
+ /// we are evaluating (if this is CTFE).
+ pub tcx: TyCtxtAt<'tcx>,
+
+ /// Bounds in scope for polymorphic evaluations.
+ pub(crate) param_env: ty::ParamEnv<'tcx>,
+
+ /// The virtual memory system.
+ pub memory: Memory<'mir, 'tcx, M>,
+
+ /// The recursion limit (cached from `tcx.recursion_limit(())`)
+ pub recursion_limit: Limit,
+}
+
+// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
+// boundary and dropped in the other thread, it would exit the span in the other thread.
+struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
+
+impl SpanGuard {
+ /// By default a `SpanGuard` does nothing.
+ fn new() -> Self {
+ Self(tracing::Span::none(), std::marker::PhantomData)
+ }
+
+ /// If a span is entered, we exit the previous span (if any, normally none) and enter the
+ /// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
+ /// `Frame` by creating a dummy span to being with and then entering it once the frame has
+ /// been pushed.
+ fn enter(&mut self, span: tracing::Span) {
+ // This executes the destructor on the previous instance of `SpanGuard`, ensuring that
+ // we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
+ // can't protect the tracing stack, but that'll just lead to weird logging, no actual
+ // problems.
+ *self = Self(span, std::marker::PhantomData);
+ self.0.with_subscriber(|(id, dispatch)| {
+ dispatch.enter(id);
+ });
+ }
+}
+
+impl Drop for SpanGuard {
+ fn drop(&mut self) {
+ self.0.with_subscriber(|(id, dispatch)| {
+ dispatch.exit(id);
+ });
+ }
+}
+
+/// A stack frame.
+pub struct Frame<'mir, 'tcx, Prov: Provenance = AllocId, Extra = ()> {
+ ////////////////////////////////////////////////////////////////////////////////
+ // Function and callsite information
+ ////////////////////////////////////////////////////////////////////////////////
+ /// The MIR for the function called on this frame.
+ pub body: &'mir mir::Body<'tcx>,
+
+ /// The def_id and substs of the current function.
+ pub instance: ty::Instance<'tcx>,
+
+ /// Extra data for the machine.
+ pub extra: Extra,
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Return place and locals
+ ////////////////////////////////////////////////////////////////////////////////
+ /// Work to perform when returning from this function.
+ pub return_to_block: StackPopCleanup,
+
+ /// The location where the result of the current stack frame should be written to,
+ /// and its layout in the caller.
+ pub return_place: PlaceTy<'tcx, Prov>,
+
+ /// The list of locals for this stack frame, stored in order as
+ /// `[return_ptr, arguments..., variables..., temporaries...]`.
+ /// The locals are stored as `Option<Value>`s.
+ /// `None` represents a local that is currently dead, while a live local
+ /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
+ ///
+ /// Do *not* access this directly; always go through the machine hook!
+ pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>,
+
+ /// The span of the `tracing` crate is stored here.
+ /// When the guard is dropped, the span is exited. This gives us
+ /// a full stack trace on all tracing statements.
+ tracing_span: SpanGuard,
+
+ ////////////////////////////////////////////////////////////////////////////////
+ // Current position within the function
+ ////////////////////////////////////////////////////////////////////////////////
+ /// If this is `Err`, we are not currently executing any particular statement in
+ /// this frame (can happen e.g. during frame initialization, and during unwinding on
+ /// frames without cleanup code).
+ /// We basically abuse `Result` as `Either`.
+ ///
+ /// Needs to be public because ConstProp does unspeakable things to it.
+ pub loc: Result<mir::Location, Span>,
+}
+
+/// What we store about a frame in an interpreter backtrace.
+#[derive(Debug)]
+pub struct FrameInfo<'tcx> {
+ pub instance: ty::Instance<'tcx>,
+ pub span: Span,
+ pub lint_root: Option<hir::HirId>,
+}
+
+/// Unwind information.
+#[derive(Clone, Copy, Eq, PartialEq, Debug)]
+pub enum StackPopUnwind {
+ /// The cleanup block.
+ Cleanup(mir::BasicBlock),
+ /// No cleanup needs to be done.
+ Skip,
+ /// Unwinding is not allowed (UB).
+ NotAllowed,
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these
+pub enum StackPopCleanup {
+ /// Jump to the next block in the caller, or cause UB if None (that's a function
+ /// that may never return). Also store layout of return place so
+ /// we can validate it at that layout.
+ /// `ret` stores the block we jump to on a normal return, while `unwind`
+ /// stores the block used for cleanup during unwinding.
+ Goto { ret: Option<mir::BasicBlock>, unwind: StackPopUnwind },
+ /// The root frame of the stack: nowhere else to jump to.
+ /// `cleanup` says whether locals are deallocated. Static computation
+ /// wants them leaked to intern what they need (and just throw away
+ /// the entire `ecx` when it is done).
+ Root { cleanup: bool },
+}
+
+/// State of a local variable including a memoized layout
+#[derive(Clone, Debug)]
+pub struct LocalState<'tcx, Prov: Provenance = AllocId> {
+ pub value: LocalValue<Prov>,
+ /// Don't modify if `Some`, this is only used to prevent computing the layout twice
+ pub layout: Cell<Option<TyAndLayout<'tcx>>>,
+}
+
+/// Current value of a local variable
+#[derive(Copy, Clone, Debug)] // Miri debug-prints these
+pub enum LocalValue<Prov: Provenance = AllocId> {
+ /// This local is not currently alive, and cannot be used at all.
+ Dead,
+ /// A normal, live local.
+ /// Mostly for convenience, we re-use the `Operand` type here.
+ /// This is an optimization over just always having a pointer here;
+ /// we can thus avoid doing an allocation when the local just stores
+ /// immediate values *and* never has its address taken.
+ Live(Operand<Prov>),
+}
+
+impl<'tcx, Prov: Provenance + 'static> LocalState<'tcx, Prov> {
+ /// Read the local's value or error if the local is not yet live or not live anymore.
+ ///
+ /// Note: This may only be invoked from the `Machine::access_local` hook and not from
+ /// anywhere else. You may be invalidating machine invariants if you do!
+ #[inline]
+ pub fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
+ match &self.value {
+ LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
+ LocalValue::Live(val) => Ok(val),
+ }
+ }
+
+ /// Overwrite the local. If the local can be overwritten in place, return a reference
+ /// to do so; otherwise return the `MemPlace` to consult instead.
+ ///
+ /// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
+ /// anywhere else. You may be invalidating machine invariants if you do!
+ #[inline]
+ pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
+ match &mut self.value {
+ LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
+ LocalValue::Live(val) => Ok(val),
+ }
+ }
+}
+
+impl<'mir, 'tcx, Prov: Provenance> Frame<'mir, 'tcx, Prov> {
+ pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Prov, Extra> {
+ Frame {
+ body: self.body,
+ instance: self.instance,
+ return_to_block: self.return_to_block,
+ return_place: self.return_place,
+ locals: self.locals,
+ loc: self.loc,
+ extra,
+ tracing_span: self.tracing_span,
+ }
+ }
+}
+
+impl<'mir, 'tcx, Prov: Provenance, Extra> Frame<'mir, 'tcx, Prov, Extra> {
+ /// Get the current location within the Frame.
+ ///
+ /// If this is `Err`, we are not currently executing any particular statement in
+ /// this frame (can happen e.g. during frame initialization, and during unwinding on
+ /// frames without cleanup code).
+ /// We basically abuse `Result` as `Either`.
+ ///
+ /// Used by priroda.
+ pub fn current_loc(&self) -> Result<mir::Location, Span> {
+ self.loc
+ }
+
+ /// Return the `SourceInfo` of the current instruction.
+ pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
+ self.loc.ok().map(|loc| self.body.source_info(loc))
+ }
+
+ pub fn current_span(&self) -> Span {
+ match self.loc {
+ Ok(loc) => self.body.source_info(loc).span,
+ Err(span) => span,
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for FrameInfo<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ if tcx.def_key(self.instance.def_id()).disambiguated_data.data
+ == DefPathData::ClosureExpr
+ {
+ write!(f, "inside closure")?;
+ } else {
+ write!(f, "inside `{}`", self.instance)?;
+ }
+ if !self.span.is_dummy() {
+ let sm = tcx.sess.source_map();
+ let lo = sm.lookup_char_pos(self.span.lo());
+ write!(
+ f,
+ " at {}:{}:{}",
+ sm.filename_for_diagnostics(&lo.file.name),
+ lo.line,
+ lo.col.to_usize() + 1
+ )?;
+ }
+ Ok(())
+ })
+ }
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+ M: Machine<'mir, 'tcx>,
+{
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ *self.tcx
+ }
+}
+
+impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
+where
+ M: Machine<'mir, 'tcx>,
+{
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+ type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>;
+
+ #[inline]
+ fn layout_tcx_at_span(&self) -> Span {
+ // Using the cheap root span for performance.
+ self.tcx.span
+ }
+
+ #[inline]
+ fn handle_layout_err(
+ &self,
+ err: LayoutError<'tcx>,
+ _: Span,
+ _: Ty<'tcx>,
+ ) -> InterpErrorInfo<'tcx> {
+ err_inval!(Layout(err)).into()
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'mir, 'tcx, M> {
+ type FnAbiOfResult = InterpResult<'tcx, &'tcx FnAbi<'tcx, Ty<'tcx>>>;
+
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ _span: Span,
+ _fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> InterpErrorInfo<'tcx> {
+ match err {
+ FnAbiError::Layout(err) => err_inval!(Layout(err)).into(),
+ FnAbiError::AdjustForForeignAbi(err) => {
+ err_inval!(FnAbiAdjustForForeignAbi(err)).into()
+ }
+ }
+ }
+}
+
+/// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value.
+/// This test should be symmetric, as it is primarily about layout compatibility.
+pub(super) fn mir_assign_valid_types<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ src: TyAndLayout<'tcx>,
+ dest: TyAndLayout<'tcx>,
+) -> bool {
+ // Type-changing assignments can happen when subtyping is used. While
+ // all normal lifetimes are erased, higher-ranked types with their
+ // late-bound lifetimes are still around and can lead to type
+ // differences. So we compare ignoring lifetimes.
+ if equal_up_to_regions(tcx, param_env, src.ty, dest.ty) {
+ // Make sure the layout is equal, too -- just to be safe. Miri really
+ // needs layout equality. For performance reason we skip this check when
+ // the types are equal. Equal types *can* have different layouts when
+ // enum downcast is involved (as enum variants carry the type of the
+ // enum), but those should never occur in assignments.
+ if cfg!(debug_assertions) || src.ty != dest.ty {
+ assert_eq!(src.layout, dest.layout);
+ }
+ true
+ } else {
+ false
+ }
+}
+
+/// Use the already known layout if given (but sanity check in debug mode),
+/// or compute the layout.
+#[cfg_attr(not(debug_assertions), inline(always))]
+pub(super) fn from_known_layout<'tcx>(
+ tcx: TyCtxtAt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ known_layout: Option<TyAndLayout<'tcx>>,
+ compute: impl FnOnce() -> InterpResult<'tcx, TyAndLayout<'tcx>>,
+) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ match known_layout {
+ None => compute(),
+ Some(known_layout) => {
+ if cfg!(debug_assertions) {
+ let check_layout = compute()?;
+ if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
+ span_bug!(
+ tcx.span,
+ "expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
+ known_layout.ty,
+ check_layout.ty,
+ );
+ }
+ }
+ Ok(known_layout)
+ }
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ root_span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ machine: M,
+ ) -> Self {
+ InterpCx {
+ machine,
+ tcx: tcx.at(root_span),
+ param_env,
+ memory: Memory::new(),
+ recursion_limit: tcx.recursion_limit(),
+ }
+ }
+
+ #[inline(always)]
+ pub fn cur_span(&self) -> Span {
+ // This deliberately does *not* honor `requires_caller_location` since it is used for much
+ // more than just panics.
+ self.stack().last().map_or(self.tcx.span, |f| f.current_span())
+ }
+
+ #[inline(always)]
+ pub(crate) fn stack(&self) -> &[Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>] {
+ M::stack(self)
+ }
+
+ #[inline(always)]
+ pub(crate) fn stack_mut(
+ &mut self,
+ ) -> &mut Vec<Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>> {
+ M::stack_mut(self)
+ }
+
+ #[inline(always)]
+ pub fn frame_idx(&self) -> usize {
+ let stack = self.stack();
+ assert!(!stack.is_empty());
+ stack.len() - 1
+ }
+
+ #[inline(always)]
+ pub fn frame(&self) -> &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
+ self.stack().last().expect("no call frames exist")
+ }
+
+ #[inline(always)]
+ pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::Provenance, M::FrameExtra> {
+ self.stack_mut().last_mut().expect("no call frames exist")
+ }
+
+ #[inline(always)]
+ pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
+ self.frame().body
+ }
+
+ #[inline(always)]
+ pub fn sign_extend(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+ assert!(ty.abi.is_signed());
+ ty.size.sign_extend(value)
+ }
+
+ #[inline(always)]
+ pub fn truncate(&self, value: u128, ty: TyAndLayout<'_>) -> u128 {
+ ty.size.truncate(value)
+ }
+
+ #[inline]
+ pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_freeze(self.tcx, self.param_env)
+ }
+
+ pub fn load_mir(
+ &self,
+ instance: ty::InstanceDef<'tcx>,
+ promoted: Option<mir::Promoted>,
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+ let def = instance.with_opt_param();
+ trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
+ let body = if let Some(promoted) = promoted {
+ &self.tcx.promoted_mir_opt_const_arg(def)[promoted]
+ } else {
+ M::load_mir(self, instance)?
+ };
+ // do not continue if typeck errors occurred (can only occur in local crate)
+ if let Some(err) = body.tainted_by_errors {
+ throw_inval!(AlreadyReported(err));
+ }
+ Ok(body)
+ }
+
+ /// Call this on things you got out of the MIR (so it is as generic as the current
+ /// stack frame), to bring it into the proper environment for this interpreter.
+ pub(super) fn subst_from_current_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+ &self,
+ value: T,
+ ) -> Result<T, InterpError<'tcx>> {
+ self.subst_from_frame_and_normalize_erasing_regions(self.frame(), value)
+ }
+
+ /// Call this on things you got out of the MIR (so it is as generic as the provided
+ /// stack frame), to bring it into the proper environment for this interpreter.
+ pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
+ &self,
+ frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+ value: T,
+ ) -> Result<T, InterpError<'tcx>> {
+ frame
+ .instance
+ .try_subst_mir_and_normalize_erasing_regions(*self.tcx, self.param_env, value)
+ .map_err(|e| {
+ self.tcx.sess.delay_span_bug(
+ self.cur_span(),
+ format!("failed to normalize {}", e.get_type_for_failure()).as_str(),
+ );
+
+ InterpError::InvalidProgram(InvalidProgramInfo::TooGeneric)
+ })
+ }
+
+ /// The `substs` are assumed to already be in our interpreter "universe" (param_env).
+ pub(super) fn resolve(
+ &self,
+ def: ty::WithOptConstParam<DefId>,
+ substs: SubstsRef<'tcx>,
+ ) -> InterpResult<'tcx, ty::Instance<'tcx>> {
+ trace!("resolve: {:?}, {:#?}", def, substs);
+ trace!("param_env: {:#?}", self.param_env);
+ trace!("substs: {:#?}", substs);
+ match ty::Instance::resolve_opt_const_arg(*self.tcx, self.param_env, def, substs) {
+ Ok(Some(instance)) => Ok(instance),
+ Ok(None) => throw_inval!(TooGeneric),
+
+ // FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
+ Err(error_reported) => throw_inval!(AlreadyReported(error_reported)),
+ }
+ }
+
+ #[inline(always)]
+ pub fn layout_of_local(
+ &self,
+ frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+ local: mir::Local,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
+ // `const_prop` runs into this with an invalid (empty) frame, so we
+ // have to support that case (mostly by skipping all caching).
+ match frame.locals.get(local).and_then(|state| state.layout.get()) {
+ None => {
+ let layout = from_known_layout(self.tcx, self.param_env, layout, || {
+ let local_ty = frame.body.local_decls[local].ty;
+ let local_ty =
+ self.subst_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
+ self.layout_of(local_ty)
+ })?;
+ if let Some(state) = frame.locals.get(local) {
+ // Layouts of locals are requested a lot, so we cache them.
+ state.layout.set(Some(layout));
+ }
+ Ok(layout)
+ }
+ Some(layout) => Ok(layout),
+ }
+ }
+
+ /// Returns the actual dynamic size and alignment of the place at the given type.
+ /// Only the "meta" (metadata) part of the place matters.
+ /// This can fail to provide an answer for extern types.
+ pub(super) fn size_and_align_of(
+ &self,
+ metadata: &MemPlaceMeta<M::Provenance>,
+ layout: &TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+ if !layout.is_unsized() {
+ return Ok(Some((layout.size, layout.align.abi)));
+ }
+ match layout.ty.kind() {
+ ty::Adt(..) | ty::Tuple(..) => {
+ // First get the size of all statically known fields.
+ // Don't use type_of::sizing_type_of because that expects t to be sized,
+ // and it also rounds up to alignment, which we want to avoid,
+ // as the unsized field's alignment could be smaller.
+ assert!(!layout.ty.is_simd());
+ assert!(layout.fields.count() > 0);
+ trace!("DST layout: {:?}", layout);
+
+ let sized_size = layout.fields.offset(layout.fields.count() - 1);
+ let sized_align = layout.align.abi;
+ trace!(
+ "DST {} statically sized prefix size: {:?} align: {:?}",
+ layout.ty,
+ sized_size,
+ sized_align
+ );
+
+ // Recurse to get the size of the dynamically sized field (must be
+ // the last field). Can't have foreign types here, how would we
+ // adjust alignment and size for them?
+ let field = layout.field(self, layout.fields.count() - 1);
+ let Some((unsized_size, unsized_align)) = self.size_and_align_of(metadata, &field)? else {
+ // A field with an extern type. We don't know the actual dynamic size
+ // or the alignment.
+ return Ok(None);
+ };
+
+ // FIXME (#26403, #27023): We should be adding padding
+ // to `sized_size` (to accommodate the `unsized_align`
+ // required of the unsized field that follows) before
+ // summing it with `sized_size`. (Note that since #26403
+ // is unfixed, we do not yet add the necessary padding
+ // here. But this is where the add would go.)
+
+ // Return the sum of sizes and max of aligns.
+ let size = sized_size + unsized_size; // `Size` addition
+
+ // Choose max of two known alignments (combined value must
+ // be aligned according to more restrictive of the two).
+ let align = sized_align.max(unsized_align);
+
+ // Issue #27023: must add any necessary padding to `size`
+ // (to make it a multiple of `align`) before returning it.
+ let size = size.align_to(align);
+
+ // Check if this brought us over the size limit.
+ if size > self.max_size_of_val() {
+ throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
+ }
+ Ok(Some((size, align)))
+ }
+ ty::Dynamic(..) => {
+ let vtable = metadata.unwrap_meta().to_pointer(self)?;
+ // Read size and align from vtable (already checks size).
+ Ok(Some(self.get_vtable_size_and_align(vtable)?))
+ }
+
+ ty::Slice(_) | ty::Str => {
+ let len = metadata.unwrap_meta().to_machine_usize(self)?;
+ let elem = layout.field(self, 0);
+
+ // Make sure the slice is not too big.
+ let size = elem.size.bytes().saturating_mul(len); // we rely on `max_size_of_val` being smaller than `u64::MAX`.
+ let size = Size::from_bytes(size);
+ if size > self.max_size_of_val() {
+ throw_ub!(InvalidMeta("slice is bigger than largest supported object"));
+ }
+ Ok(Some((size, elem.align.abi)))
+ }
+
+ ty::Foreign(_) => Ok(None),
+
+ _ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
+ }
+ }
+ #[inline]
+ pub fn size_and_align_of_mplace(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Option<(Size, Align)>> {
+ self.size_and_align_of(&mplace.meta, &mplace.layout)
+ }
+
+ #[instrument(skip(self, body, return_place, return_to_block), level = "debug")]
+ pub fn push_stack_frame(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ return_place: &PlaceTy<'tcx, M::Provenance>,
+ return_to_block: StackPopCleanup,
+ ) -> InterpResult<'tcx> {
+ trace!("body: {:#?}", body);
+ // first push a stack frame so we have access to the local substs
+ let pre_frame = Frame {
+ body,
+ loc: Err(body.span), // Span used for errors caused during preamble.
+ return_to_block,
+ return_place: return_place.clone(),
+ // empty local array, we fill it in below, after we are inside the stack frame and
+ // all methods actually know about the frame
+ locals: IndexVec::new(),
+ instance,
+ tracing_span: SpanGuard::new(),
+ extra: (),
+ };
+ let frame = M::init_frame_extra(self, pre_frame)?;
+ self.stack_mut().push(frame);
+
+ // Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
+ for const_ in &body.required_consts {
+ let span = const_.span;
+ let const_ =
+ self.subst_from_current_frame_and_normalize_erasing_regions(const_.literal)?;
+ self.mir_const_to_op(&const_, None).map_err(|err| {
+ // If there was an error, set the span of the current frame to this constant.
+ // Avoiding doing this when evaluation succeeds.
+ self.frame_mut().loc = Err(span);
+ err
+ })?;
+ }
+
+ // Most locals are initially dead.
+ let dummy = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
+ let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
+
+ // Now mark those locals as live that have no `Storage*` annotations.
+ let always_live = always_storage_live_locals(self.body());
+ for local in locals.indices() {
+ if always_live.contains(local) {
+ locals[local].value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+ }
+ }
+ // done
+ self.frame_mut().locals = locals;
+ M::after_stack_push(self)?;
+ self.frame_mut().loc = Ok(mir::Location::START);
+
+ let span = info_span!("frame", "{}", instance);
+ self.frame_mut().tracing_span.enter(span);
+
+ Ok(())
+ }
+
+ /// Jump to the given block.
+ #[inline]
+ pub fn go_to_block(&mut self, target: mir::BasicBlock) {
+ self.frame_mut().loc = Ok(mir::Location { block: target, statement_index: 0 });
+ }
+
+ /// *Return* to the given `target` basic block.
+ /// Do *not* use for unwinding! Use `unwind_to_block` instead.
+ ///
+ /// If `target` is `None`, that indicates the function cannot return, so we raise UB.
+ pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
+ if let Some(target) = target {
+ self.go_to_block(target);
+ Ok(())
+ } else {
+ throw_ub!(Unreachable)
+ }
+ }
+
+ /// *Unwind* to the given `target` basic block.
+ /// Do *not* use for returning! Use `return_to_block` instead.
+ ///
+ /// If `target` is `StackPopUnwind::Skip`, that indicates the function does not need cleanup
+ /// during unwinding, and we will just keep propagating that upwards.
+ ///
+ /// If `target` is `StackPopUnwind::NotAllowed`, that indicates the function does not allow
+ /// unwinding, and doing so is UB.
+ pub fn unwind_to_block(&mut self, target: StackPopUnwind) -> InterpResult<'tcx> {
+ self.frame_mut().loc = match target {
+ StackPopUnwind::Cleanup(block) => Ok(mir::Location { block, statement_index: 0 }),
+ StackPopUnwind::Skip => Err(self.frame_mut().body.span),
+ StackPopUnwind::NotAllowed => {
+ throw_ub_format!("unwinding past a stack frame that does not allow unwinding")
+ }
+ };
+ Ok(())
+ }
+
+ /// Pops the current frame from the stack, deallocating the
+ /// memory for allocated locals.
+ ///
+ /// If `unwinding` is `false`, then we are performing a normal return
+ /// from a function. In this case, we jump back into the frame of the caller,
+ /// and continue execution as normal.
+ ///
+ /// If `unwinding` is `true`, then we are in the middle of a panic,
+ /// and need to unwind this frame. In this case, we jump to the
+ /// `cleanup` block for the function, which is responsible for running
+ /// `Drop` impls for any locals that have been initialized at this point.
+ /// The cleanup block ends with a special `Resume` terminator, which will
+ /// cause us to continue unwinding.
+ #[instrument(skip(self), level = "debug")]
+ pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
+ info!(
+ "popping stack frame ({})",
+ if unwinding { "during unwinding" } else { "returning from function" }
+ );
+
+ // Check `unwinding`.
+ assert_eq!(
+ unwinding,
+ match self.frame().loc {
+ Ok(loc) => self.body().basic_blocks()[loc.block].is_cleanup,
+ Err(_) => true,
+ }
+ );
+ if unwinding && self.frame_idx() == 0 {
+ throw_ub_format!("unwinding past the topmost frame of the stack");
+ }
+
+ // Copy return value. Must of course happen *before* we deallocate the locals.
+ let copy_ret_result = if !unwinding {
+ let op = self
+ .local_to_op(self.frame(), mir::RETURN_PLACE, None)
+ .expect("return place should always be live");
+ let dest = self.frame().return_place.clone();
+ let err = self.copy_op(&op, &dest, /*allow_transmute*/ true);
+ trace!("return value: {:?}", self.dump_place(*dest));
+ // We delay actually short-circuiting on this error until *after* the stack frame is
+ // popped, since we want this error to be attributed to the caller, whose type defines
+ // this transmute.
+ err
+ } else {
+ Ok(())
+ };
+
+ // Cleanup: deallocate locals.
+ // Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
+ // We do this while the frame is still on the stack, so errors point to the callee.
+ let return_to_block = self.frame().return_to_block;
+ let cleanup = match return_to_block {
+ StackPopCleanup::Goto { .. } => true,
+ StackPopCleanup::Root { cleanup, .. } => cleanup,
+ };
+ if cleanup {
+ // We need to take the locals out, since we need to mutate while iterating.
+ let locals = mem::take(&mut self.frame_mut().locals);
+ for local in &locals {
+ self.deallocate_local(local.value)?;
+ }
+ }
+
+ // All right, now it is time to actually pop the frame.
+ // Note that its locals are gone already, but that's fine.
+ let frame =
+ self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
+ // Report error from return value copy, if any.
+ copy_ret_result?;
+
+ // If we are not doing cleanup, also skip everything else.
+ if !cleanup {
+ assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
+ assert!(!unwinding, "tried to skip cleanup during unwinding");
+ // Skip machine hook.
+ return Ok(());
+ }
+ if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
+ // The hook already did everything.
+ return Ok(());
+ }
+
+ // Normal return, figure out where to jump.
+ if unwinding {
+ // Follow the unwind edge.
+ let unwind = match return_to_block {
+ StackPopCleanup::Goto { unwind, .. } => unwind,
+ StackPopCleanup::Root { .. } => {
+ panic!("encountered StackPopCleanup::Root when unwinding!")
+ }
+ };
+ self.unwind_to_block(unwind)
+ } else {
+ // Follow the normal return edge.
+ match return_to_block {
+ StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
+ StackPopCleanup::Root { .. } => {
+ assert!(
+ self.stack().is_empty(),
+ "only the topmost frame can have StackPopCleanup::Root"
+ );
+ Ok(())
+ }
+ }
+ }
+ }
+
+ /// Mark a storage as live, killing the previous content.
+ pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+ assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
+ trace!("{:?} is now live", local);
+
+ let local_val = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
+ // StorageLive expects the local to be dead, and marks it live.
+ let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
+ if !matches!(old, LocalValue::Dead) {
+ throw_ub_format!("StorageLive on a local that was already live");
+ }
+ Ok(())
+ }
+
+ pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
+ assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
+ trace!("{:?} is now dead", local);
+
+ // It is entirely okay for this local to be already dead (at least that's how we currently generate MIR)
+ let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
+ self.deallocate_local(old)?;
+ Ok(())
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> {
+ if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
+ // All locals have a backing allocation, even if the allocation is empty
+ // due to the local having ZST type. Hence we can `unwrap`.
+ trace!(
+ "deallocating local {:?}: {:?}",
+ local,
+ // Locals always have a `alloc_id` (they are never the result of a int2ptr).
+ self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap())
+ );
+ self.deallocate_ptr(ptr, None, MemoryKind::Stack)?;
+ };
+ Ok(())
+ }
+
+ pub fn eval_to_allocation(
+ &self,
+ gid: GlobalId<'tcx>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
+ // and thus don't care about the parameter environment. While we could just use
+ // `self.param_env`, that would mean we invoke the query to evaluate the static
+ // with different parameter environments, thus causing the static to be evaluated
+ // multiple times.
+ let param_env = if self.tcx.is_static(gid.instance.def_id()) {
+ ty::ParamEnv::reveal_all()
+ } else {
+ self.param_env
+ };
+ let param_env = param_env.with_const();
+ // Use a precise span for better cycle errors.
+ let val = self.tcx.at(self.cur_span()).eval_to_allocation_raw(param_env.and(gid))?;
+ self.raw_const_to_mplace(val)
+ }
+
+ #[must_use]
+ pub fn dump_place(&self, place: Place<M::Provenance>) -> PlacePrinter<'_, 'mir, 'tcx, M> {
+ PlacePrinter { ecx: self, place }
+ }
+
+ #[must_use]
+ pub fn generate_stacktrace(&self) -> Vec<FrameInfo<'tcx>> {
+ let mut frames = Vec::new();
+ // This deliberately does *not* honor `requires_caller_location` since it is used for much
+ // more than just panics.
+ for frame in self.stack().iter().rev() {
+ let lint_root = frame.current_source_info().and_then(|source_info| {
+ match &frame.body.source_scopes[source_info.scope].local_data {
+ mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
+ mir::ClearCrossCrate::Clear => None,
+ }
+ });
+ let span = frame.current_span();
+
+ frames.push(FrameInfo { span, instance: frame.instance, lint_root });
+ }
+ trace!("generate stacktrace: {:#?}", frames);
+ frames
+ }
+}
+
+#[doc(hidden)]
+/// Helper struct for the `dump_place` function.
+pub struct PlacePrinter<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ ecx: &'a InterpCx<'mir, 'tcx, M>,
+ place: Place<M::Provenance>,
+}
+
+impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
+ for PlacePrinter<'a, 'mir, 'tcx, M>
+{
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self.place {
+ Place::Local { frame, local } => {
+ let mut allocs = Vec::new();
+ write!(fmt, "{:?}", local)?;
+ if frame != self.ecx.frame_idx() {
+ write!(fmt, " ({} frames up)", self.ecx.frame_idx() - frame)?;
+ }
+ write!(fmt, ":")?;
+
+ match self.ecx.stack()[frame].locals[local].value {
+ LocalValue::Dead => write!(fmt, " is dead")?,
+ LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => {
+ write!(fmt, " is uninitialized")?
+ }
+ LocalValue::Live(Operand::Indirect(mplace)) => {
+ write!(
+ fmt,
+ " by {} ref {:?}:",
+ match mplace.meta {
+ MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
+ MemPlaceMeta::None => String::new(),
+ },
+ mplace.ptr,
+ )?;
+ allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
+ }
+ LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
+ write!(fmt, " {:?}", val)?;
+ if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val {
+ allocs.push(ptr.provenance.get_alloc_id());
+ }
+ }
+ LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
+ write!(fmt, " ({:?}, {:?})", val1, val2)?;
+ if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 {
+ allocs.push(ptr.provenance.get_alloc_id());
+ }
+ if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 {
+ allocs.push(ptr.provenance.get_alloc_id());
+ }
+ }
+ }
+
+ write!(fmt, ": {:?}", self.ecx.dump_allocs(allocs.into_iter().flatten().collect()))
+ }
+ Place::Ptr(mplace) => match mplace.ptr.provenance.and_then(Provenance::get_alloc_id) {
+ Some(alloc_id) => {
+ write!(fmt, "by ref {:?}: {:?}", mplace.ptr, self.ecx.dump_alloc(alloc_id))
+ }
+ ptr => write!(fmt, " integral by ref: {:?}", ptr),
+ },
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
new file mode 100644
index 000000000..376b8872c
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -0,0 +1,486 @@
+//! This module specifies the type based interner for constants.
+//!
+//! After a const evaluation has computed a value, before we destroy the const evaluator's session
+//! memory, we need to extract all memory allocations to the global memory pool so they stay around.
+//!
+//! In principle, this is not very complicated: we recursively walk the final value, follow all the
+//! pointers, and move all reachable allocations to the global `tcx` memory. The only complication
+//! is picking the right mutability for the allocations in a `static` initializer: we want to make
+//! as many allocations as possible immutable so LLVM can put them into read-only memory. At the
+//! same time, we need to make memory that could be mutated by the program mutable to avoid
+//! incorrect compilations. To achieve this, we do a type-based traversal of the final value,
+//! tracking mutable and shared references and `UnsafeCell` to determine the current mutability.
+//! (In principle, we could skip this type-based part for `const` and promoteds, as they need to be
+//! always immutable. At least for `const` however we use this opportunity to reject any `const`
+//! that contains allocations whose mutability we cannot identify.)
+
+use super::validity::RefTracking;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
+
+use rustc_ast::Mutability;
+
+use super::{
+ AllocId, Allocation, ConstAllocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy,
+ ValueVisitor,
+};
+use crate::const_eval;
+
+pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
+ 'mir,
+ 'tcx,
+ MemoryKind = T,
+ Provenance = AllocId,
+ ExtraFnVal = !,
+ FrameExtra = (),
+ AllocExtra = (),
+ MemoryMap = FxHashMap<AllocId, (MemoryKind<T>, Allocation)>,
+>;
+
+struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> {
+ /// The ectx from which we intern.
+ ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+ /// Previously encountered safe references.
+ ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>,
+ /// A list of all encountered allocations. After type-based interning, we traverse this list to
+ /// also intern allocations that are only referenced by a raw pointer or inside a union.
+ leftover_allocations: &'rt mut FxHashSet<AllocId>,
+ /// The root kind of the value that we're looking at. This field is never mutated for a
+ /// particular allocation. It is primarily used to make as many allocations as possible
+ /// read-only so LLVM can place them in const memory.
+ mode: InternMode,
+ /// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect
+ /// the intern mode of references we encounter.
+ inside_unsafe_cell: bool,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+enum InternMode {
+ /// A static and its current mutability. Below shared references inside a `static mut`,
+ /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this
+ /// is *mutable*.
+ Static(hir::Mutability),
+ /// A `const`.
+ Const,
+}
+
+/// Signalling data structure to ensure we don't recurse
+/// into the memory of other constants or statics
+struct IsStaticOrFn;
+
+/// Intern an allocation without looking at its children.
+/// `mode` is the mode of the environment where we found this pointer.
+/// `mutability` is the mutability of the place to be interned; even if that says
+/// `immutable` things might become mutable if `ty` is not frozen.
+/// `ty` can be `None` if there is no potential interior mutability
+/// to account for (e.g. for vtables).
+fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
+ ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
+ leftover_allocations: &'rt mut FxHashSet<AllocId>,
+ alloc_id: AllocId,
+ mode: InternMode,
+ ty: Option<Ty<'tcx>>,
+) -> Option<IsStaticOrFn> {
+ trace!("intern_shallow {:?} with {:?}", alloc_id, mode);
+ // remove allocation
+ let tcx = ecx.tcx;
+ let Some((kind, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) else {
+ // Pointer not found in local memory map. It is either a pointer to the global
+ // map, or dangling.
+ // If the pointer is dangling (neither in local nor global memory), we leave it
+ // to validation to error -- it has the much better error messages, pointing out where
+ // in the value the dangling reference lies.
+ // The `delay_span_bug` ensures that we don't forget such a check in validation.
+ if tcx.try_get_global_alloc(alloc_id).is_none() {
+ tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer");
+ }
+ // treat dangling pointers like other statics
+ // just to stop trying to recurse into them
+ return Some(IsStaticOrFn);
+ };
+ // This match is just a canary for future changes to `MemoryKind`, which most likely need
+ // changes in this function.
+ match kind {
+ MemoryKind::Stack
+ | MemoryKind::Machine(const_eval::MemoryKind::Heap)
+ | MemoryKind::CallerLocation => {}
+ }
+ // Set allocation mutability as appropriate. This is used by LLVM to put things into
+ // read-only memory, and also by Miri when evaluating other globals that
+ // access this one.
+ if let InternMode::Static(mutability) = mode {
+ // For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
+ // no interior mutability.
+ let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env));
+ // For statics, allocation mutability is the combination of place mutability and
+ // type mutability.
+ // The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
+ let immutable = mutability == Mutability::Not && frozen;
+ if immutable {
+ alloc.mutability = Mutability::Not;
+ } else {
+ // Just making sure we are not "upgrading" an immutable allocation to mutable.
+ assert_eq!(alloc.mutability, Mutability::Mut);
+ }
+ } else {
+ // No matter what, *constants are never mutable*. Mutating them is UB.
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // immutability is so important.
+
+ // Validation will ensure that there is no `UnsafeCell` on an immutable allocation.
+ alloc.mutability = Mutability::Not;
+ };
+ // link the alloc id to the actual allocation
+ leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id));
+ let alloc = tcx.intern_const_alloc(alloc);
+ tcx.set_alloc_id_memory(alloc_id, alloc);
+ None
+}
+
+impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
+ InternVisitor<'rt, 'mir, 'tcx, M>
+{
+ fn intern_shallow(
+ &mut self,
+ alloc_id: AllocId,
+ mode: InternMode,
+ ty: Option<Ty<'tcx>>,
+ ) -> Option<IsStaticOrFn> {
+ intern_shallow(self.ecx, self.leftover_allocations, alloc_id, mode, ty)
+ }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
+ ValueVisitor<'mir, 'tcx, M> for InternVisitor<'rt, 'mir, 'tcx, M>
+{
+ type V = MPlaceTy<'tcx>;
+
+ #[inline(always)]
+ fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+ &self.ecx
+ }
+
+ fn visit_aggregate(
+ &mut self,
+ mplace: &MPlaceTy<'tcx>,
+ fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
+ // We want to walk the aggregate to look for references to intern. While doing that we
+ // also need to take special care of interior mutability.
+ //
+ // As an optimization, however, if the allocation does not contain any references: we don't
+ // need to do the walk. It can be costly for big arrays for example (e.g. issue #93215).
+ let is_walk_needed = |mplace: &MPlaceTy<'tcx>| -> InterpResult<'tcx, bool> {
+ // ZSTs cannot contain pointers, we can avoid the interning walk.
+ if mplace.layout.is_zst() {
+ return Ok(false);
+ }
+
+ // Now, check whether this allocation could contain references.
+ //
+ // Note, this check may sometimes not be cheap, so we only do it when the walk we'd like
+ // to avoid could be expensive: on the potentially larger types, arrays and slices,
+ // rather than on all aggregates unconditionally.
+ if matches!(mplace.layout.ty.kind(), ty::Array(..) | ty::Slice(..)) {
+ let Some((size, align)) = self.ecx.size_and_align_of_mplace(&mplace)? else {
+ // We do the walk if we can't determine the size of the mplace: we may be
+ // dealing with extern types here in the future.
+ return Ok(true);
+ };
+
+ // If there are no relocations in this allocation, it does not contain references
+ // that point to another allocation, and we can avoid the interning walk.
+ if let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, align)? {
+ if !alloc.has_relocations() {
+ return Ok(false);
+ }
+ } else {
+ // We're encountering a ZST here, and can avoid the walk as well.
+ return Ok(false);
+ }
+ }
+
+ // In the general case, we do the walk.
+ Ok(true)
+ };
+
+ // If this allocation contains no references to intern, we avoid the potentially costly
+ // walk.
+ //
+ // We can do this before the checks for interior mutability below, because only references
+ // are relevant in that situation, and we're checking if there are any here.
+ if !is_walk_needed(mplace)? {
+ return Ok(());
+ }
+
+ if let Some(def) = mplace.layout.ty.ty_adt_def() {
+ if def.is_unsafe_cell() {
+ // We are crossing over an `UnsafeCell`, we can mutate again. This means that
+ // References we encounter inside here are interned as pointing to mutable
+ // allocations.
+ // Remember the `old` value to handle nested `UnsafeCell`.
+ let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
+ let walked = self.walk_aggregate(mplace, fields);
+ self.inside_unsafe_cell = old;
+ return walked;
+ }
+ }
+
+ self.walk_aggregate(mplace, fields)
+ }
+
+ fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
+ // Handle Reference types, as these are the only relocations supported by const eval.
+ // Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
+ let tcx = self.ecx.tcx;
+ let ty = mplace.layout.ty;
+ if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
+ let value = self.ecx.read_immediate(&mplace.into())?;
+ let mplace = self.ecx.ref_to_mplace(&value)?;
+ assert_eq!(mplace.layout.ty, referenced_ty);
+ // Handle trait object vtables.
+ if let ty::Dynamic(..) =
+ tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
+ {
+ let ptr = mplace.meta.unwrap_meta().to_pointer(&tcx)?;
+ if let Some(alloc_id) = ptr.provenance {
+ // Explicitly choose const mode here, since vtables are immutable, even
+ // if the reference of the fat pointer is mutable.
+ self.intern_shallow(alloc_id, InternMode::Const, None);
+ } else {
+ // Validation will error (with a better message) on an invalid vtable pointer.
+ // Let validation show the error message, but make sure it *does* error.
+ tcx.sess
+ .delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers");
+ }
+ }
+ // Check if we have encountered this pointer+layout combination before.
+ // Only recurse for allocation-backed pointers.
+ if let Some(alloc_id) = mplace.ptr.provenance {
+ // Compute the mode with which we intern this. Our goal here is to make as many
+ // statics as we can immutable so they can be placed in read-only memory by LLVM.
+ let ref_mode = match self.mode {
+ InternMode::Static(mutbl) => {
+ // In statics, merge outer mutability with reference mutability and
+ // take into account whether we are in an `UnsafeCell`.
+
+ // The only way a mutable reference actually works as a mutable reference is
+ // by being in a `static mut` directly or behind another mutable reference.
+ // If there's an immutable reference or we are inside a `static`, then our
+ // mutable reference is equivalent to an immutable one. As an example:
+ // `&&mut Foo` is semantically equivalent to `&&Foo`
+ match ref_mutability {
+ _ if self.inside_unsafe_cell => {
+ // Inside an `UnsafeCell` is like inside a `static mut`, the "outer"
+ // mutability does not matter.
+ InternMode::Static(ref_mutability)
+ }
+ Mutability::Not => {
+ // A shared reference, things become immutable.
+ // We do *not* consider `freeze` here: `intern_shallow` considers
+ // `freeze` for the actual mutability of this allocation; the intern
+ // mode for references contained in this allocation is tracked more
+ // precisely when traversing the referenced data (by tracking
+ // `UnsafeCell`). This makes sure that `&(&i32, &Cell<i32>)` still
+ // has the left inner reference interned into a read-only
+ // allocation.
+ InternMode::Static(Mutability::Not)
+ }
+ Mutability::Mut => {
+ // Mutable reference.
+ InternMode::Static(mutbl)
+ }
+ }
+ }
+ InternMode::Const => {
+ // Ignore `UnsafeCell`, everything is immutable. Validity does some sanity
+ // checking for mutable references that we encounter -- they must all be
+ // ZST.
+ InternMode::Const
+ }
+ };
+ match self.intern_shallow(alloc_id, ref_mode, Some(referenced_ty)) {
+ // No need to recurse, these are interned already and statics may have
+ // cycles, so we don't want to recurse there
+ Some(IsStaticOrFn) => {}
+ // intern everything referenced by this value. The mutability is taken from the
+ // reference. It is checked above that mutable references only happen in
+ // `static mut`
+ None => self.ref_tracking.track((mplace, ref_mode), || ()),
+ }
+ }
+ Ok(())
+ } else {
+ // Not a reference -- proceed recursively.
+ self.walk_value(mplace)
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
+pub enum InternKind {
+ /// The `mutability` of the static, ignoring the type which may have interior mutability.
+ Static(hir::Mutability),
+ Constant,
+ Promoted,
+}
+
+/// Intern `ret` and everything it references.
+///
+/// This *cannot raise an interpreter error*. Doing so is left to validation, which
+/// tracks where in the value we are and thus can show much better error messages.
+/// Any errors here would anyway be turned into `const_err` lints, whereas validation failures
+/// are hard errors.
+#[tracing::instrument(level = "debug", skip(ecx))]
+pub fn intern_const_alloc_recursive<
+ 'mir,
+ 'tcx: 'mir,
+ M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>,
+>(
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ intern_kind: InternKind,
+ ret: &MPlaceTy<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let tcx = ecx.tcx;
+ let base_intern_mode = match intern_kind {
+ InternKind::Static(mutbl) => InternMode::Static(mutbl),
+ // `Constant` includes array lengths.
+ InternKind::Constant | InternKind::Promoted => InternMode::Const,
+ };
+
+ // Type based interning.
+ // `ref_tracking` tracks typed references we have already interned and still need to crawl for
+ // more typed information inside them.
+ // `leftover_allocations` collects *all* allocations we see, because some might not
+ // be available in a typed way. They get interned at the end.
+ let mut ref_tracking = RefTracking::empty();
+ let leftover_allocations = &mut FxHashSet::default();
+
+ // start with the outermost allocation
+ intern_shallow(
+ ecx,
+ leftover_allocations,
+ // The outermost allocation must exist, because we allocated it with
+ // `Memory::allocate`.
+ ret.ptr.provenance.unwrap(),
+ base_intern_mode,
+ Some(ret.layout.ty),
+ );
+
+ ref_tracking.track((*ret, base_intern_mode), || ());
+
+ while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
+ let res = InternVisitor {
+ ref_tracking: &mut ref_tracking,
+ ecx,
+ mode,
+ leftover_allocations,
+ inside_unsafe_cell: false,
+ }
+ .visit_value(&mplace);
+ // We deliberately *ignore* interpreter errors here. When there is a problem, the remaining
+ // references are "leftover"-interned, and later validation will show a proper error
+ // and point at the right part of the value causing the problem.
+ match res {
+ Ok(()) => {}
+ Err(error) => {
+ ecx.tcx.sess.delay_span_bug(
+ ecx.tcx.span,
+ &format!(
+ "error during interning should later cause validation failure: {}",
+ error
+ ),
+ );
+ }
+ }
+ }
+
+ // Intern the rest of the allocations as mutable. These might be inside unions, padding, raw
+ // pointers, ... So we can't intern them according to their type rules
+
+ let mut todo: Vec<_> = leftover_allocations.iter().cloned().collect();
+ debug!(?todo);
+ debug!("dead_alloc_map: {:#?}", ecx.memory.dead_alloc_map);
+ while let Some(alloc_id) = todo.pop() {
+ if let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) {
+ // We can't call the `intern_shallow` method here, as its logic is tailored to safe
+ // references and a `leftover_allocations` set (where we only have a todo-list here).
+ // So we hand-roll the interning logic here again.
+ match intern_kind {
+ // Statics may contain mutable allocations even behind relocations.
+ // Even for immutable statics it would be ok to have mutable allocations behind
+ // raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`.
+ InternKind::Static(_) => {}
+ // Raw pointers in promoteds may only point to immutable things so we mark
+ // everything as immutable.
+ // It is UB to mutate through a raw pointer obtained via an immutable reference:
+ // Since all references and pointers inside a promoted must by their very definition
+ // be created from an immutable reference (and promotion also excludes interior
+ // mutability), mutating through them would be UB.
+ // There's no way we can check whether the user is using raw pointers correctly,
+ // so all we can do is mark this as immutable here.
+ InternKind::Promoted => {
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // immutability is so important.
+ alloc.mutability = Mutability::Not;
+ }
+ InternKind::Constant => {
+ // If it's a constant, we should not have any "leftovers" as everything
+ // is tracked by const-checking.
+ // FIXME: downgrade this to a warning? It rejects some legitimate consts,
+ // such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
+ ecx.tcx
+ .sess
+ .span_err(ecx.tcx.span, "untyped pointers are not allowed in constant");
+ // For better errors later, mark the allocation as immutable.
+ alloc.mutability = Mutability::Not;
+ }
+ }
+ let alloc = tcx.intern_const_alloc(alloc);
+ tcx.set_alloc_id_memory(alloc_id, alloc);
+ for &(_, alloc_id) in alloc.inner().relocations().iter() {
+ if leftover_allocations.insert(alloc_id) {
+ todo.push(alloc_id);
+ }
+ }
+ } else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
+ // Codegen does not like dangling pointers, and generally `tcx` assumes that
+ // all allocations referenced anywhere actually exist. So, make sure we error here.
+ let reported = ecx
+ .tcx
+ .sess
+ .span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
+ return Err(reported);
+ } else if ecx.tcx.try_get_global_alloc(alloc_id).is_none() {
+ // We have hit an `AllocId` that is neither in local or global memory and isn't
+ // marked as dangling by local memory. That should be impossible.
+ span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id);
+ }
+ }
+ Ok(())
+}
+
+impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
+ InterpCx<'mir, 'tcx, M>
+{
+ /// A helper function that allocates memory for the layout given and gives you access to mutate
+ /// it. Once your own mutation code is done, the backing `Allocation` is removed from the
+ /// current `Memory` and returned.
+ pub fn intern_with_temp_alloc(
+ &mut self,
+ layout: TyAndLayout<'tcx>,
+ f: impl FnOnce(
+ &mut InterpCx<'mir, 'tcx, M>,
+ &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ()>,
+ ) -> InterpResult<'tcx, ConstAllocation<'tcx>> {
+ let dest = self.allocate(layout, MemoryKind::Stack)?;
+ f(self, &dest.into())?;
+ let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
+ alloc.mutability = Mutability::Not;
+ Ok(self.tcx.intern_const_alloc(alloc))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
new file mode 100644
index 000000000..08209eb79
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -0,0 +1,696 @@
+//! Intrinsics and other functions that the miri engine executes without
+//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
+//! and miri.
+
+use std::convert::TryFrom;
+
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::{
+ self,
+ interpret::{ConstValue, GlobalId, InterpResult, PointerArithmetic, Scalar},
+ BinOp,
+};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf as _;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::abi::{Abi, Align, Primitive, Size};
+
+use super::{
+ util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
+ Pointer,
+};
+
+mod caller_location;
+mod type_name;
+
+fn numeric_intrinsic<Prov>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Prov> {
+ let size = match kind {
+ Primitive::Int(integer, _) => integer.size(),
+ _ => bug!("invalid `{}` argument: {:?}", name, bits),
+ };
+ let extra = 128 - u128::from(size.bits());
+ let bits_out = match name {
+ sym::ctpop => u128::from(bits.count_ones()),
+ sym::ctlz => u128::from(bits.leading_zeros()) - extra,
+ sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
+ sym::bswap => (bits << extra).swap_bytes(),
+ sym::bitreverse => (bits << extra).reverse_bits(),
+ _ => bug!("not a numeric intrinsic: {}", name),
+ };
+ Scalar::from_uint(bits_out, size)
+}
+
+/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
+/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
+pub(crate) fn eval_nullary_intrinsic<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+) -> InterpResult<'tcx, ConstValue<'tcx>> {
+ let tp_ty = substs.type_at(0);
+ let name = tcx.item_name(def_id);
+ Ok(match name {
+ sym::type_name => {
+ ensure_monomorphic_enough(tcx, tp_ty)?;
+ let alloc = type_name::alloc_type_name(tcx, tp_ty);
+ ConstValue::Slice { data: alloc, start: 0, end: alloc.inner().len() }
+ }
+ sym::needs_drop => {
+ ensure_monomorphic_enough(tcx, tp_ty)?;
+ ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
+ }
+ sym::pref_align_of => {
+ // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
+ let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
+ ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx)
+ }
+ sym::type_id => {
+ ensure_monomorphic_enough(tcx, tp_ty)?;
+ ConstValue::from_u64(tcx.type_id_hash(tp_ty))
+ }
+ sym::variant_count => match tp_ty.kind() {
+ // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
+ ty::Adt(ref adt, _) => {
+ ConstValue::from_machine_usize(adt.variants().len() as u64, &tcx)
+ }
+ ty::Projection(_)
+ | ty::Opaque(_, _)
+ | ty::Param(_)
+ | ty::Bound(_, _)
+ | ty::Placeholder(_)
+ | ty::Infer(_) => throw_inval!(TooGeneric),
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(_, _, _)
+ | ty::FnDef(_, _)
+ | ty::FnPtr(_)
+ | ty::Dynamic(_, _)
+ | ty::Closure(_, _)
+ | ty::Generator(_, _, _)
+ | ty::GeneratorWitness(_)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
+ },
+ other => bug!("`{}` is not a zero arg intrinsic", other),
+ })
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Returns `true` if emulation happened.
+ /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
+ /// intrinsic handling.
+ pub fn emulate_intrinsic(
+ &mut self,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx, M::Provenance>],
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ret: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx, bool> {
+ let substs = instance.substs;
+ let intrinsic_name = self.tcx.item_name(instance.def_id());
+
+ // First handle intrinsics without return place.
+ let ret = match ret {
+ None => match intrinsic_name {
+ sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
+ sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
+ // Unsupported diverging intrinsic.
+ _ => return Ok(false),
+ },
+ Some(p) => p,
+ };
+
+ match intrinsic_name {
+ sym::caller_location => {
+ let span = self.find_closest_untracked_caller_location();
+ let location = self.alloc_caller_location_for_span(span);
+ self.write_immediate(location.to_ref(self), dest)?;
+ }
+
+ sym::min_align_of_val | sym::size_of_val => {
+ // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
+ // dereferenceable!
+ let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
+ let (size, align) = self
+ .size_and_align_of_mplace(&place)?
+ .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
+
+ let result = match intrinsic_name {
+ sym::min_align_of_val => align.bytes(),
+ sym::size_of_val => size.bytes(),
+ _ => bug!(),
+ };
+
+ self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
+ }
+
+ sym::pref_align_of
+ | sym::needs_drop
+ | sym::type_id
+ | sym::type_name
+ | sym::variant_count => {
+ let gid = GlobalId { instance, promoted: None };
+ let ty = match intrinsic_name {
+ sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
+ sym::needs_drop => self.tcx.types.bool,
+ sym::type_id => self.tcx.types.u64,
+ sym::type_name => self.tcx.mk_static_str(),
+ _ => bug!(),
+ };
+ let val =
+ self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
+ let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
+ self.copy_op(&val, dest, /*allow_transmute*/ false)?;
+ }
+
+ sym::ctpop
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::bswap
+ | sym::bitreverse => {
+ let ty = substs.type_at(0);
+ let layout_of = self.layout_of(ty)?;
+ let val = self.read_scalar(&args[0])?.check_init()?;
+ let bits = val.to_bits(layout_of.size)?;
+ let kind = match layout_of.abi {
+ Abi::Scalar(scalar) => scalar.primitive(),
+ _ => span_bug!(
+ self.cur_span(),
+ "{} called on invalid type {:?}",
+ intrinsic_name,
+ ty
+ ),
+ };
+ let (nonzero, intrinsic_name) = match intrinsic_name {
+ sym::cttz_nonzero => (true, sym::cttz),
+ sym::ctlz_nonzero => (true, sym::ctlz),
+ other => (false, other),
+ };
+ if nonzero && bits == 0 {
+ throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
+ }
+ let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
+ self.write_scalar(out_val, dest)?;
+ }
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ let lhs = self.read_immediate(&args[0])?;
+ let rhs = self.read_immediate(&args[1])?;
+ let bin_op = match intrinsic_name {
+ sym::add_with_overflow => BinOp::Add,
+ sym::sub_with_overflow => BinOp::Sub,
+ sym::mul_with_overflow => BinOp::Mul,
+ _ => bug!(),
+ };
+ self.binop_with_overflow(
+ bin_op, /*force_overflow_checks*/ true, &lhs, &rhs, dest,
+ )?;
+ }
+ sym::saturating_add | sym::saturating_sub => {
+ let l = self.read_immediate(&args[0])?;
+ let r = self.read_immediate(&args[1])?;
+ let val = self.saturating_arith(
+ if intrinsic_name == sym::saturating_add { BinOp::Add } else { BinOp::Sub },
+ &l,
+ &r,
+ )?;
+ self.write_scalar(val, dest)?;
+ }
+ sym::discriminant_value => {
+ let place = self.deref_operand(&args[0])?;
+ let discr_val = self.read_discriminant(&place.into())?.0;
+ self.write_scalar(discr_val, dest)?;
+ }
+ sym::unchecked_shl
+ | sym::unchecked_shr
+ | sym::unchecked_add
+ | sym::unchecked_sub
+ | sym::unchecked_mul
+ | sym::unchecked_div
+ | sym::unchecked_rem => {
+ let l = self.read_immediate(&args[0])?;
+ let r = self.read_immediate(&args[1])?;
+ let bin_op = match intrinsic_name {
+ sym::unchecked_shl => BinOp::Shl,
+ sym::unchecked_shr => BinOp::Shr,
+ sym::unchecked_add => BinOp::Add,
+ sym::unchecked_sub => BinOp::Sub,
+ sym::unchecked_mul => BinOp::Mul,
+ sym::unchecked_div => BinOp::Div,
+ sym::unchecked_rem => BinOp::Rem,
+ _ => bug!(),
+ };
+ let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
+ if overflowed {
+ let layout = self.layout_of(substs.type_at(0))?;
+ let r_val = r.to_scalar()?.to_bits(layout.size)?;
+ if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
+ throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
+ } else {
+ throw_ub_format!("overflow executing `{}`", intrinsic_name);
+ }
+ }
+ self.write_scalar(val, dest)?;
+ }
+ sym::rotate_left | sym::rotate_right => {
+ // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
+ // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
+ let layout = self.layout_of(substs.type_at(0))?;
+ let val = self.read_scalar(&args[0])?.check_init()?;
+ let val_bits = val.to_bits(layout.size)?;
+ let raw_shift = self.read_scalar(&args[1])?.check_init()?;
+ let raw_shift_bits = raw_shift.to_bits(layout.size)?;
+ let width_bits = u128::from(layout.size.bits());
+ let shift_bits = raw_shift_bits % width_bits;
+ let inv_shift_bits = (width_bits - shift_bits) % width_bits;
+ let result_bits = if intrinsic_name == sym::rotate_left {
+ (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
+ } else {
+ (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
+ };
+ let truncated_bits = self.truncate(result_bits, layout);
+ let result = Scalar::from_uint(truncated_bits, layout.size);
+ self.write_scalar(result, dest)?;
+ }
+ sym::copy => {
+ self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
+ }
+ sym::write_bytes => {
+ self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?;
+ }
+ sym::offset => {
+ let ptr = self.read_pointer(&args[0])?;
+ let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
+ let pointee_ty = substs.type_at(0);
+
+ let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
+ self.write_pointer(offset_ptr, dest)?;
+ }
+ sym::arith_offset => {
+ let ptr = self.read_pointer(&args[0])?;
+ let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
+ let pointee_ty = substs.type_at(0);
+
+ let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+ let offset_bytes = offset_count.wrapping_mul(pointee_size);
+ let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
+ self.write_pointer(offset_ptr, dest)?;
+ }
+ sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
+ let a = self.read_pointer(&args[0])?;
+ let b = self.read_pointer(&args[1])?;
+
+ let usize_layout = self.layout_of(self.tcx.types.usize)?;
+ let isize_layout = self.layout_of(self.tcx.types.isize)?;
+
+ // Get offsets for both that are at least relative to the same base.
+ let (a_offset, b_offset) =
+ match (self.ptr_try_get_alloc_id(a), self.ptr_try_get_alloc_id(b)) {
+ (Err(a), Err(b)) => {
+ // Neither poiner points to an allocation.
+ // If these are inequal or null, this *will* fail the deref check below.
+ (a, b)
+ }
+ (Err(_), _) | (_, Err(_)) => {
+ // We managed to find a valid allocation for one pointer, but not the other.
+ // That means they are definitely not pointing to the same allocation.
+ throw_ub_format!(
+ "`{}` called on pointers into different allocations",
+ intrinsic_name
+ );
+ }
+ (Ok((a_alloc_id, a_offset, _)), Ok((b_alloc_id, b_offset, _))) => {
+ // Found allocation for both. They must be into the same allocation.
+ if a_alloc_id != b_alloc_id {
+ throw_ub_format!(
+ "`{}` called on pointers into different allocations",
+ intrinsic_name
+ );
+ }
+ // Use these offsets for distance calculation.
+ (a_offset.bytes(), b_offset.bytes())
+ }
+ };
+
+ // Compute distance.
+ let dist = {
+ // Addresses are unsigned, so this is a `usize` computation. We have to do the
+ // overflow check separately anyway.
+ let (val, overflowed, _ty) = {
+ let a_offset = ImmTy::from_uint(a_offset, usize_layout);
+ let b_offset = ImmTy::from_uint(b_offset, usize_layout);
+ self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
+ };
+ if overflowed {
+ // a < b
+ if intrinsic_name == sym::ptr_offset_from_unsigned {
+ throw_ub_format!(
+ "`{}` called when first pointer has smaller offset than second: {} < {}",
+ intrinsic_name,
+ a_offset,
+ b_offset,
+ );
+ }
+ // The signed form of the intrinsic allows this. If we interpret the
+ // difference as isize, we'll get the proper signed difference. If that
+ // seems *positive*, they were more than isize::MAX apart.
+ let dist = val.to_machine_isize(self)?;
+ if dist >= 0 {
+ throw_ub_format!(
+ "`{}` called when first pointer is too far before second",
+ intrinsic_name
+ );
+ }
+ dist
+ } else {
+ // b >= a
+ let dist = val.to_machine_isize(self)?;
+ // If converting to isize produced a *negative* result, we had an overflow
+ // because they were more than isize::MAX apart.
+ if dist < 0 {
+ throw_ub_format!(
+ "`{}` called when first pointer is too far ahead of second",
+ intrinsic_name
+ );
+ }
+ dist
+ }
+ };
+
+ // Check that the range between them is dereferenceable ("in-bounds or one past the
+ // end of the same allocation"). This is like the check in ptr_offset_inbounds.
+ let min_ptr = if dist >= 0 { b } else { a };
+ self.check_ptr_access_align(
+ min_ptr,
+ Size::from_bytes(dist.unsigned_abs()),
+ Align::ONE,
+ CheckInAllocMsg::OffsetFromTest,
+ )?;
+
+ // Perform division by size to compute return value.
+ let ret_layout = if intrinsic_name == sym::ptr_offset_from_unsigned {
+ assert!(0 <= dist && dist <= self.machine_isize_max());
+ usize_layout
+ } else {
+ assert!(self.machine_isize_min() <= dist && dist <= self.machine_isize_max());
+ isize_layout
+ };
+ let pointee_layout = self.layout_of(substs.type_at(0))?;
+ // If ret_layout is unsigned, we checked that so is the distance, so we are good.
+ let val = ImmTy::from_int(dist, ret_layout);
+ let size = ImmTy::from_int(pointee_layout.size.bytes(), ret_layout);
+ self.exact_div(&val, &size, dest)?;
+ }
+
+ sym::transmute => {
+ self.copy_op(&args[0], dest, /*allow_transmute*/ true)?;
+ }
+ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+ let ty = instance.substs.type_at(0);
+ let layout = self.layout_of(ty)?;
+
+ // For *all* intrinsics we first check `is_uninhabited` to give a more specific
+ // error message.
+ if layout.abi.is_uninhabited() {
+ // The run-time intrinsic panics just to get a good backtrace; here we abort
+ // since there is no problem showing a backtrace even for aborts.
+ M::abort(
+ self,
+ format!(
+ "aborted execution: attempted to instantiate uninhabited type `{}`",
+ ty
+ ),
+ )?;
+ }
+
+ if intrinsic_name == sym::assert_zero_valid {
+ let should_panic = !self.tcx.permits_zero_init(layout);
+
+ if should_panic {
+ M::abort(
+ self,
+ format!(
+ "aborted execution: attempted to zero-initialize type `{}`, which is invalid",
+ ty
+ ),
+ )?;
+ }
+ }
+
+ if intrinsic_name == sym::assert_uninit_valid {
+ let should_panic = !self.tcx.permits_uninit_init(layout);
+
+ if should_panic {
+ M::abort(
+ self,
+ format!(
+ "aborted execution: attempted to leave type `{}` uninitialized, which is invalid",
+ ty
+ ),
+ )?;
+ }
+ }
+ }
+ sym::simd_insert => {
+ let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+ let elem = &args[2];
+ let (input, input_len) = self.operand_to_simd(&args[0])?;
+ let (dest, dest_len) = self.place_to_simd(dest)?;
+ assert_eq!(input_len, dest_len, "Return vector length must match input length");
+ assert!(
+ index < dest_len,
+ "Index `{}` must be in bounds of vector with length {}`",
+ index,
+ dest_len
+ );
+
+ for i in 0..dest_len {
+ let place = self.mplace_index(&dest, i)?;
+ let value = if i == index {
+ elem.clone()
+ } else {
+ self.mplace_index(&input, i)?.into()
+ };
+ self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
+ }
+ }
+ sym::simd_extract => {
+ let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
+ let (input, input_len) = self.operand_to_simd(&args[0])?;
+ assert!(
+ index < input_len,
+ "index `{}` must be in bounds of vector with length `{}`",
+ index,
+ input_len
+ );
+ self.copy_op(
+ &self.mplace_index(&input, index)?.into(),
+ dest,
+ /*allow_transmute*/ false,
+ )?;
+ }
+ sym::likely | sym::unlikely | sym::black_box => {
+ // These just return their argument
+ self.copy_op(&args[0], dest, /*allow_transmute*/ false)?;
+ }
+ sym::assume => {
+ let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
+ if !cond {
+ throw_ub_format!("`assume` intrinsic called with `false`");
+ }
+ }
+ sym::raw_eq => {
+ let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
+ self.write_scalar(result, dest)?;
+ }
+
+ sym::vtable_size => {
+ let ptr = self.read_pointer(&args[0])?;
+ let (size, _align) = self.get_vtable_size_and_align(ptr)?;
+ self.write_scalar(Scalar::from_machine_usize(size.bytes(), self), dest)?;
+ }
+ sym::vtable_align => {
+ let ptr = self.read_pointer(&args[0])?;
+ let (_size, align) = self.get_vtable_size_and_align(ptr)?;
+ self.write_scalar(Scalar::from_machine_usize(align.bytes(), self), dest)?;
+ }
+
+ _ => return Ok(false),
+ }
+
+ trace!("{:?}", self.dump_place(**dest));
+ self.go_to_block(ret);
+ Ok(true)
+ }
+
+ pub fn exact_div(
+ &mut self,
+ a: &ImmTy<'tcx, M::Provenance>,
+ b: &ImmTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // Performs an exact division, resulting in undefined behavior where
+ // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
+ // First, check x % y != 0 (or if that computation overflows).
+ let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
+ assert!(!overflow); // All overflow is UB, so this should never return on overflow.
+ if res.assert_bits(a.layout.size) != 0 {
+ throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b)
+ }
+ // `Rem` says this is all right, so we can let `Div` do its job.
+ self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
+ }
+
+ pub fn saturating_arith(
+ &self,
+ mir_op: BinOp,
+ l: &ImmTy<'tcx, M::Provenance>,
+ r: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
+ let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?;
+ Ok(if overflowed {
+ let size = l.layout.size;
+ let num_bits = size.bits();
+ if l.layout.abi.is_signed() {
+ // For signed ints the saturated value depends on the sign of the first
+ // term since the sign of the second term can be inferred from this and
+ // the fact that the operation has overflowed (if either is 0 no
+ // overflow can occur)
+ let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
+ let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
+ if first_term_positive {
+ // Negative overflow not possible since the positive first term
+ // can only increase an (in range) negative term for addition
+ // or corresponding negated positive term for subtraction
+ Scalar::from_int(size.signed_int_max(), size)
+ } else {
+ // Positive overflow not possible for similar reason
+ // max negative
+ Scalar::from_int(size.signed_int_min(), size)
+ }
+ } else {
+ // unsigned
+ if matches!(mir_op, BinOp::Add) {
+ // max unsigned
+ Scalar::from_uint(size.unsigned_int_max(), size)
+ } else {
+ // underflow to 0
+ Scalar::from_uint(0u128, size)
+ }
+ }
+ } else {
+ val
+ })
+ }
+
+ /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
+ /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
+ /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
+ pub fn ptr_offset_inbounds(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ pointee_ty: Ty<'tcx>,
+ offset_count: i64,
+ ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+ // We cannot overflow i64 as a type's size must be <= isize::MAX.
+ let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
+ // The computed offset, in bytes, must not overflow an isize.
+ // `checked_mul` enforces a too small bound, but no actual allocation can be big enough for
+ // the difference to be noticeable.
+ let offset_bytes =
+ offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
+ // The offset being in bounds cannot rely on "wrapping around" the address space.
+ // So, first rule out overflows in the pointer arithmetic.
+ let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
+ // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
+ // memory between these pointers must be accessible. Note that we do not require the
+ // pointers to be properly aligned (unlike a read/write operation).
+ let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
+ // This call handles checking for integer/null pointers.
+ self.check_ptr_access_align(
+ min_ptr,
+ Size::from_bytes(offset_bytes.unsigned_abs()),
+ Align::ONE,
+ CheckInAllocMsg::PointerArithmeticTest,
+ )?;
+ Ok(offset_ptr)
+ }
+
+ /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
+ pub(crate) fn copy_intrinsic(
+ &mut self,
+ src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ nonoverlapping: bool,
+ ) -> InterpResult<'tcx> {
+ let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+ let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
+ let (size, align) = (layout.size, layout.align.abi);
+ // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+ // but no actual allocation can be big enough for the difference to be noticeable.
+ let size = size.checked_mul(count, self).ok_or_else(|| {
+ err_ub_format!(
+ "overflow computing total size of `{}`",
+ if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
+ )
+ })?;
+
+ let src = self.read_pointer(&src)?;
+ let dst = self.read_pointer(&dst)?;
+
+ self.mem_copy(src, align, dst, align, size, nonoverlapping)
+ }
+
+ pub(crate) fn write_bytes_intrinsic(
+ &mut self,
+ dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ byte: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap().ty)?;
+
+ let dst = self.read_pointer(&dst)?;
+ let byte = self.read_scalar(&byte)?.to_u8()?;
+ let count = self.read_scalar(&count)?.to_machine_usize(self)?;
+
+ // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
+ // but no actual allocation can be big enough for the difference to be noticeable.
+ let len = layout
+ .size
+ .checked_mul(count, self)
+ .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
+
+ let bytes = std::iter::repeat(byte).take(len.bytes_usize());
+ self.write_bytes_ptr(dst, bytes)
+ }
+
+ pub(crate) fn raw_eq_intrinsic(
+ &mut self,
+ lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::Provenance>,
+ ) -> InterpResult<'tcx, Scalar<M::Provenance>> {
+ let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
+ assert!(!layout.is_unsized());
+
+ let lhs = self.read_pointer(lhs)?;
+ let rhs = self.read_pointer(rhs)?;
+ let lhs_bytes = self.read_bytes_ptr(lhs, layout.size)?;
+ let rhs_bytes = self.read_bytes_ptr(rhs, layout.size)?;
+ Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
new file mode 100644
index 000000000..5864b9215
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/caller_location.rs
@@ -0,0 +1,130 @@
+use std::convert::TryFrom;
+
+use rustc_ast::Mutability;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir::TerminatorKind;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::subst::Subst;
+use rustc_span::{Span, Symbol};
+
+use crate::interpret::{
+ intrinsics::{InterpCx, Machine},
+ MPlaceTy, MemoryKind, Scalar,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
+ /// frame which is not `#[track_caller]`.
+ pub(crate) fn find_closest_untracked_caller_location(&self) -> Span {
+ for frame in self.stack().iter().rev() {
+ debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
+
+ // Assert that the frame we look at is actually executing code currently
+ // (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
+ let loc = frame.loc.unwrap();
+
+ // This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
+ // (such as `box`). Use the normal span by default.
+ let mut source_info = *frame.body.source_info(loc);
+
+ // If this is a `Call` terminator, use the `fn_span` instead.
+ let block = &frame.body.basic_blocks()[loc.block];
+ if loc.statement_index == block.statements.len() {
+ debug!(
+ "find_closest_untracked_caller_location: got terminator {:?} ({:?})",
+ block.terminator(),
+ block.terminator().kind
+ );
+ if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
+ source_info.span = fn_span;
+ }
+ }
+
+ // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
+ // If so, the starting `source_info.span` is in the innermost inlined
+ // function, and will be replaced with outer callsite spans as long
+ // as the inlined functions were `#[track_caller]`.
+ loop {
+ let scope_data = &frame.body.source_scopes[source_info.scope];
+
+ if let Some((callee, callsite_span)) = scope_data.inlined {
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !callee.def.requires_caller_location(*self.tcx) {
+ return source_info.span;
+ }
+ source_info.span = callsite_span;
+ }
+
+ // Skip past all of the parents with `inlined: None`.
+ match scope_data.inlined_parent_scope {
+ Some(parent) => source_info.scope = parent,
+ None => break,
+ }
+ }
+
+ // Stop inside the most nested non-`#[track_caller]` function,
+ // before ever reaching its caller (which is irrelevant).
+ if !frame.instance.def.requires_caller_location(*self.tcx) {
+ return source_info.span;
+ }
+ }
+
+ span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found")
+ }
+
+ /// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
+ pub(crate) fn alloc_caller_location(
+ &mut self,
+ filename: Symbol,
+ line: u32,
+ col: u32,
+ ) -> MPlaceTy<'tcx, M::Provenance> {
+ let loc_details = &self.tcx.sess.opts.unstable_opts.location_detail;
+ let file = if loc_details.file {
+ self.allocate_str(filename.as_str(), MemoryKind::CallerLocation, Mutability::Not)
+ } else {
+ // FIXME: This creates a new allocation each time. It might be preferable to
+ // perform this allocation only once, and re-use the `MPlaceTy`.
+ // See https://github.com/rust-lang/rust/pull/89920#discussion_r730012398
+ self.allocate_str("<redacted>", MemoryKind::CallerLocation, Mutability::Not)
+ };
+ let line = if loc_details.line { Scalar::from_u32(line) } else { Scalar::from_u32(0) };
+ let col = if loc_details.column { Scalar::from_u32(col) } else { Scalar::from_u32(0) };
+
+ // Allocate memory for `CallerLocation` struct.
+ let loc_ty = self
+ .tcx
+ .bound_type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
+ .subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter()));
+ let loc_layout = self.layout_of(loc_ty).unwrap();
+ // This can fail if rustc runs out of memory right here. Trying to emit an error would be
+ // pointless, since that would require allocating more memory than a Location.
+ let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
+
+ // Initialize fields.
+ self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
+ .expect("writing to memory we just allocated cannot fail");
+ self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
+ .expect("writing to memory we just allocated cannot fail");
+ self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
+ .expect("writing to memory we just allocated cannot fail");
+
+ location
+ }
+
+ pub(crate) fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
+ let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
+ let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ (
+ Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
+ u32::try_from(caller.line).unwrap(),
+ u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
+ )
+ }
+
+ pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::Provenance> {
+ let (file, line, column) = self.location_triple_for_span(span);
+ self.alloc_caller_location(file, line, column)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
new file mode 100644
index 000000000..f9847742f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
@@ -0,0 +1,196 @@
+use rustc_data_structures::intern::Interned;
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::definitions::DisambiguatedDefPathData;
+use rustc_middle::mir::interpret::{Allocation, ConstAllocation};
+use rustc_middle::ty::{
+ self,
+ print::{PrettyPrinter, Print, Printer},
+ subst::{GenericArg, GenericArgKind},
+ Ty, TyCtxt,
+};
+use std::fmt::Write;
+
+struct AbsolutePathPrinter<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ path: String,
+}
+
+impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
+ type Error = std::fmt::Error;
+
+ type Path = Self;
+ type Region = Self;
+ type Type = Self;
+ type DynExistential = Self;
+ type Const = Self;
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+ Ok(self)
+ }
+
+ fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ match *ty.kind() {
+ // Types without identity.
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Array(_, _)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(_, _, _)
+ | ty::FnPtr(_)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::Dynamic(_, _) => self.pretty_print_type(ty),
+
+ // Placeholders (all printed as `_` to uniformize them).
+ ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
+ write!(self, "_")?;
+ Ok(self)
+ }
+
+ // Types with identity (print the module path).
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
+ | ty::FnDef(def_id, substs)
+ | ty::Opaque(def_id, substs)
+ | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
+ | ty::Closure(def_id, substs)
+ | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
+ ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
+
+ ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
+ }
+ }
+
+ fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ self.pretty_print_const(ct, false)
+ }
+
+ fn print_dyn_existential(
+ mut self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ let mut first = true;
+ for p in predicates {
+ if !first {
+ write!(self, "+")?;
+ }
+ first = false;
+ self = p.print(self)?;
+ }
+ Ok(self)
+ }
+
+ fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ self.path.push_str(self.tcx.crate_name(cnum).as_str());
+ Ok(self)
+ }
+
+ fn path_qualified(
+ self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self.pretty_path_qualified(self_ty, trait_ref)
+ }
+
+ fn path_append_impl(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _disambiguated_data: &DisambiguatedDefPathData,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self.pretty_path_append_impl(
+ |mut cx| {
+ cx = print_prefix(cx)?;
+
+ cx.path.push_str("::");
+
+ Ok(cx)
+ },
+ self_ty,
+ trait_ref,
+ )
+ }
+
+ fn path_append(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ write!(self.path, "::{}", disambiguated_data.data).unwrap();
+
+ Ok(self)
+ }
+
+ fn path_generic_args(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+ let args =
+ args.iter().cloned().filter(|arg| !matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
+ if args.clone().next().is_some() {
+ self.generic_delimiters(|cx| cx.comma_sep(args))
+ } else {
+ Ok(self)
+ }
+ }
+}
+
+impl<'tcx> PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> {
+ fn should_print_region(&self, _region: ty::Region<'_>) -> bool {
+ false
+ }
+ fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+ {
+ if let Some(first) = elems.next() {
+ self = first.print(self)?;
+ for elem in elems {
+ self.path.push_str(", ");
+ self = elem.print(self)?;
+ }
+ }
+ Ok(self)
+ }
+
+ fn generic_delimiters(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ ) -> Result<Self, Self::Error> {
+ write!(self, "<")?;
+
+ self = f(self)?;
+
+ write!(self, ">")?;
+
+ Ok(self)
+ }
+}
+
+impl Write for AbsolutePathPrinter<'_> {
+ fn write_str(&mut self, s: &str) -> std::fmt::Result {
+ self.path.push_str(s);
+ Ok(())
+ }
+}
+
+/// Directly returns an `Allocation` containing an absolute path representation of the given type.
+pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAllocation<'tcx> {
+ let path = AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path;
+ let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
+ tcx.intern_const_alloc(alloc)
+}
diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs
new file mode 100644
index 000000000..71ccd1799
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/machine.rs
@@ -0,0 +1,525 @@
+//! This module contains everything needed to instantiate an interpreter.
+//! This separation exists to ensure that no fancy miri features like
+//! interpreting common C functions leak into CTFE.
+
+use std::borrow::{Borrow, Cow};
+use std::fmt::Debug;
+use std::hash::Hash;
+
+use rustc_middle::mir;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_target::abi::Size;
+use rustc_target::spec::abi::Abi as CallAbi;
+
+use super::{
+ AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
+ MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
+};
+
+/// Data returned by Machine::stack_pop,
+/// to provide further control over the popping of the stack frame
+#[derive(Eq, PartialEq, Debug, Copy, Clone)]
+pub enum StackPopJump {
+ /// Indicates that no special handling should be
+ /// done - we'll either return normally or unwind
+ /// based on the terminator for the function
+ /// we're leaving.
+ Normal,
+
+ /// Indicates that we should *not* jump to the return/unwind address, as the callback already
+ /// took care of everything.
+ NoJump,
+}
+
+/// Whether this kind of memory is allowed to leak
+pub trait MayLeak: Copy {
+ fn may_leak(self) -> bool;
+}
+
+/// The functionality needed by memory to manage its allocations
+pub trait AllocMap<K: Hash + Eq, V> {
+ /// Tests if the map contains the given key.
+ /// Deliberately takes `&mut` because that is sufficient, and some implementations
+ /// can be more efficient then (using `RefCell::get_mut`).
+ fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
+ where
+ K: Borrow<Q>;
+
+ /// Inserts a new entry into the map.
+ fn insert(&mut self, k: K, v: V) -> Option<V>;
+
+ /// Removes an entry from the map.
+ fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
+ where
+ K: Borrow<Q>;
+
+ /// Returns data based on the keys and values in the map.
+ fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
+
+ /// Returns a reference to entry `k`. If no such entry exists, call
+ /// `vacant` and either forward its error, or add its result to the map
+ /// and return a reference to *that*.
+ fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E>;
+
+ /// Returns a mutable reference to entry `k`. If no such entry exists, call
+ /// `vacant` and either forward its error, or add its result to the map
+ /// and return a reference to *that*.
+ fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E>;
+
+ /// Read-only lookup.
+ fn get(&self, k: K) -> Option<&V> {
+ self.get_or(k, || Err(())).ok()
+ }
+
+ /// Mutable lookup.
+ fn get_mut(&mut self, k: K) -> Option<&mut V> {
+ self.get_mut_or(k, || Err(())).ok()
+ }
+}
+
+/// Methods of this trait signifies a point where CTFE evaluation would fail
+/// and some use case dependent behaviour can instead be applied.
+pub trait Machine<'mir, 'tcx>: Sized {
+ /// Additional memory kinds a machine wishes to distinguish from the builtin ones
+ type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
+
+ /// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
+ type Provenance: Provenance + Eq + Hash + 'static;
+
+ /// When getting the AllocId of a pointer, some extra data is also obtained from the provenance
+ /// that is passed to memory access hooks so they can do things with it.
+ type ProvenanceExtra: Copy + 'static;
+
+ /// Machines can define extra (non-instance) things that represent values of function pointers.
+ /// For example, Miri uses this to return a function pointer from `dlsym`
+ /// that can later be called to execute the right thing.
+ type ExtraFnVal: Debug + Copy;
+
+ /// Extra data stored in every call frame.
+ type FrameExtra;
+
+ /// Extra data stored in every allocation.
+ type AllocExtra: Debug + Clone + 'static;
+
+ /// Memory's allocation map
+ type MemoryMap: AllocMap<
+ AllocId,
+ (MemoryKind<Self::MemoryKind>, Allocation<Self::Provenance, Self::AllocExtra>),
+ > + Default
+ + Clone;
+
+ /// The memory kind to use for copied global memory (held in `tcx`) --
+ /// or None if such memory should not be mutated and thus any such attempt will cause
+ /// a `ModifiedStatic` error to be raised.
+ /// Statics are copied under two circumstances: When they are mutated, and when
+ /// `adjust_allocation` (see below) returns an owned allocation
+ /// that is added to the memory so that the work is not done twice.
+ const GLOBAL_KIND: Option<Self::MemoryKind>;
+
+ /// Should the machine panic on allocation failures?
+ const PANIC_ON_ALLOC_FAIL: bool;
+
+ /// Whether memory accesses should be alignment-checked.
+ fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Whether, when checking alignment, we should `force_int` and thus support
+ /// custom alignment logic based on whatever the integer address happens to be.
+ ///
+ /// Requires Provenance::OFFSET_IS_ADDR to be true.
+ fn force_int_for_alignment_check(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Whether to enforce the validity invariant
+ fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Whether to enforce integers and floats being initialized.
+ fn enforce_number_init(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Whether function calls should be [ABI](CallAbi)-checked.
+ fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
+ true
+ }
+
+ /// Whether CheckedBinOp MIR statements should actually check for overflow.
+ fn checked_binop_checks_overflow(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
+
+ /// Entry point for obtaining the MIR of anything that should get evaluated.
+ /// So not just functions and shims, but also const/static initializers, anonymous
+ /// constants, ...
+ fn load_mir(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ instance: ty::InstanceDef<'tcx>,
+ ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
+ Ok(ecx.tcx.instance_mir(instance))
+ }
+
+ /// Entry point to all function calls.
+ ///
+ /// Returns either the mir to use for the call, or `None` if execution should
+ /// just proceed (which usually means this hook did all the work that the
+ /// called function should usually have done). In the latter case, it is
+ /// this hook's responsibility to advance the instruction pointer!
+ /// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR
+ /// nor just jump to `ret`, but instead push their own stack frame.)
+ /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them
+ /// was used.
+ fn find_mir_or_eval_fn(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ abi: CallAbi,
+ args: &[OpTy<'tcx, Self::Provenance>],
+ destination: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>;
+
+ /// Execute `fn_val`. It is the hook's responsibility to advance the instruction
+ /// pointer as appropriate.
+ fn call_extra_fn(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ fn_val: Self::ExtraFnVal,
+ abi: CallAbi,
+ args: &[OpTy<'tcx, Self::Provenance>],
+ destination: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx>;
+
+ /// Directly process an intrinsic without pushing a stack frame. It is the hook's
+ /// responsibility to advance the instruction pointer as appropriate.
+ fn call_intrinsic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ instance: ty::Instance<'tcx>,
+ args: &[OpTy<'tcx, Self::Provenance>],
+ destination: &PlaceTy<'tcx, Self::Provenance>,
+ target: Option<mir::BasicBlock>,
+ unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx>;
+
+ /// Called to evaluate `Assert` MIR terminators that trigger a panic.
+ fn assert_panic(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ msg: &mir::AssertMessage<'tcx>,
+ unwind: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx>;
+
+ /// Called to evaluate `Abort` MIR terminator.
+ fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
+ throw_unsup_format!("aborting execution is not supported")
+ }
+
+ /// Called for all binary operations where the LHS has pointer type.
+ ///
+ /// Returns a (value, overflowed) pair if the operation succeeded
+ fn binary_ptr_op(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ bin_op: mir::BinOp,
+ left: &ImmTy<'tcx, Self::Provenance>,
+ right: &ImmTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
+
+ /// Called to read the specified `local` from the `frame`.
+ /// Since reading a ZST is not actually accessing memory or locals, this is never invoked
+ /// for ZST reads.
+ #[inline]
+ fn access_local<'a>(
+ frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+ local: mir::Local,
+ ) -> InterpResult<'tcx, &'a Operand<Self::Provenance>>
+ where
+ 'tcx: 'mir,
+ {
+ frame.locals[local].access()
+ }
+
+ /// Called to write the specified `local` from the `frame`.
+ /// Since writing a ZST is not actually accessing memory or locals, this is never invoked
+ /// for ZST reads.
+ #[inline]
+ fn access_local_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ frame: usize,
+ local: mir::Local,
+ ) -> InterpResult<'tcx, &'a mut Operand<Self::Provenance>>
+ where
+ 'tcx: 'mir,
+ {
+ ecx.stack_mut()[frame].locals[local].access_mut()
+ }
+
+ /// Called before a basic block terminator is executed.
+ /// You can use this to detect endlessly running programs.
+ #[inline]
+ fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Called before a global allocation is accessed.
+ /// `def_id` is `Some` if this is the "lazy" allocation of a static.
+ #[inline]
+ fn before_access_global(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &Self,
+ _alloc_id: AllocId,
+ _allocation: ConstAllocation<'tcx>,
+ _static_def_id: Option<DefId>,
+ _is_write: bool,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Return the `AllocId` for the given thread-local static in the current thread.
+ fn thread_local_static_base_pointer(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ def_id: DefId,
+ ) -> InterpResult<'tcx, Pointer<Self::Provenance>> {
+ throw_unsup!(ThreadLocalStatic(def_id))
+ }
+
+ /// Return the root pointer for the given `extern static`.
+ fn extern_static_base_pointer(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ def_id: DefId,
+ ) -> InterpResult<'tcx, Pointer<Self::Provenance>>;
+
+ /// Return a "base" pointer for the given allocation: the one that is used for direct
+ /// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
+ ///
+ /// Not called on `extern` or thread-local statics (those use the methods above).
+ fn adjust_alloc_base_pointer(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ ptr: Pointer,
+ ) -> Pointer<Self::Provenance>;
+
+ /// "Int-to-pointer cast"
+ fn ptr_from_addr_cast(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ addr: u64,
+ ) -> InterpResult<'tcx, Pointer<Option<Self::Provenance>>>;
+
+ /// Marks a pointer as exposed, allowing it's provenance
+ /// to be recovered. "Pointer-to-int cast"
+ fn expose_ptr(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ ptr: Pointer<Self::Provenance>,
+ ) -> InterpResult<'tcx>;
+
+ /// Convert a pointer with provenance into an allocation-offset pair
+ /// and extra provenance info.
+ ///
+ /// The returned `AllocId` must be the same as `ptr.provenance.get_alloc_id()`.
+ ///
+ /// When this fails, that means the pointer does not point to a live allocation.
+ fn ptr_get_alloc(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ ptr: Pointer<Self::Provenance>,
+ ) -> Option<(AllocId, Size, Self::ProvenanceExtra)>;
+
+ /// Called to adjust allocations to the Provenance and AllocExtra of this machine.
+ ///
+ /// The way we construct allocations is to always first construct it without extra and then add
+ /// the extra. This keeps uniform code paths for handling both allocations created by CTFE for
+ /// globals, and allocations created by Miri during evaluation.
+ ///
+ /// `kind` is the kind of the allocation being adjusted; it can be `None` when
+ /// it's a global and `GLOBAL_KIND` is `None`.
+ ///
+ /// This should avoid copying if no work has to be done! If this returns an owned
+ /// allocation (because a copy had to be done to adjust things), machine memory will
+ /// cache the result. (This relies on `AllocMap::get_or` being able to add the
+ /// owned allocation to the map even when the map is shared.)
+ ///
+ /// This must only fail if `alloc` contains relocations.
+ fn adjust_allocation<'b>(
+ ecx: &InterpCx<'mir, 'tcx, Self>,
+ id: AllocId,
+ alloc: Cow<'b, Allocation>,
+ kind: Option<MemoryKind<Self::MemoryKind>>,
+ ) -> InterpResult<'tcx, Cow<'b, Allocation<Self::Provenance, Self::AllocExtra>>>;
+
+ /// Hook for performing extra checks on a memory read access.
+ ///
+ /// Takes read-only access to the allocation so we can keep all the memory read
+ /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
+ /// need to mutate.
+ #[inline(always)]
+ fn memory_read(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &Self,
+ _alloc_extra: &Self::AllocExtra,
+ _prov: (AllocId, Self::ProvenanceExtra),
+ _range: AllocRange,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Hook for performing extra checks on a memory write access.
+ #[inline(always)]
+ fn memory_written(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &mut Self,
+ _alloc_extra: &mut Self::AllocExtra,
+ _prov: (AllocId, Self::ProvenanceExtra),
+ _range: AllocRange,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Hook for performing extra operations on a memory deallocation.
+ #[inline(always)]
+ fn memory_deallocated(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &mut Self,
+ _alloc_extra: &mut Self::AllocExtra,
+ _prov: (AllocId, Self::ProvenanceExtra),
+ _range: AllocRange,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Executes a retagging operation.
+ #[inline]
+ fn retag(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _kind: mir::RetagKind,
+ _place: &PlaceTy<'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Called immediately before a new stack frame gets pushed.
+ fn init_frame_extra(
+ ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ frame: Frame<'mir, 'tcx, Self::Provenance>,
+ ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
+
+ /// Borrow the current thread's stack.
+ fn stack<'a>(
+ ecx: &'a InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>];
+
+ /// Mutably borrow the current thread's stack.
+ fn stack_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>;
+
+ /// Called immediately after a stack frame got pushed and its locals got initialized.
+ fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
+ Ok(())
+ }
+
+ /// Called immediately after a stack frame got popped, but before jumping back to the caller.
+ /// The `locals` have already been destroyed!
+ fn after_stack_pop(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _frame: Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+ unwinding: bool,
+ ) -> InterpResult<'tcx, StackPopJump> {
+ // By default, we do not support unwinding from panics
+ assert!(!unwinding);
+ Ok(StackPopJump::Normal)
+ }
+}
+
+// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
+// (CTFE and ConstProp) use the same instance. Here, we share that code.
+pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
+ type Provenance = AllocId;
+ type ProvenanceExtra = ();
+
+ type ExtraFnVal = !;
+
+ type MemoryMap =
+ rustc_data_structures::fx::FxHashMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>;
+ const GLOBAL_KIND: Option<Self::MemoryKind> = None; // no copying of globals from `tcx` to machine memory
+
+ type AllocExtra = ();
+ type FrameExtra = ();
+
+ #[inline(always)]
+ fn enforce_alignment(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ // We do not check for alignment to avoid having to carry an `Align`
+ // in `ConstValue::ByRef`.
+ false
+ }
+
+ #[inline(always)]
+ fn force_int_for_alignment_check(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ // We do not support `force_int`.
+ false
+ }
+
+ #[inline(always)]
+ fn enforce_validity(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ false // for now, we don't enforce validity
+ }
+
+ #[inline(always)]
+ fn enforce_number_init(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ true
+ }
+
+ #[inline(always)]
+ fn checked_binop_checks_overflow(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
+ true
+ }
+
+ #[inline(always)]
+ fn call_extra_fn(
+ _ecx: &mut InterpCx<$mir, $tcx, Self>,
+ fn_val: !,
+ _abi: CallAbi,
+ _args: &[OpTy<$tcx>],
+ _destination: &PlaceTy<$tcx, Self::Provenance>,
+ _target: Option<mir::BasicBlock>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<$tcx> {
+ match fn_val {}
+ }
+
+ #[inline(always)]
+ fn adjust_allocation<'b>(
+ _ecx: &InterpCx<$mir, $tcx, Self>,
+ _id: AllocId,
+ alloc: Cow<'b, Allocation>,
+ _kind: Option<MemoryKind<Self::MemoryKind>>,
+ ) -> InterpResult<$tcx, Cow<'b, Allocation<Self::Provenance>>> {
+ Ok(alloc)
+ }
+
+ fn extern_static_base_pointer(
+ ecx: &InterpCx<$mir, $tcx, Self>,
+ def_id: DefId,
+ ) -> InterpResult<$tcx, Pointer> {
+ // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
+ Ok(Pointer::new(ecx.tcx.create_static_alloc(def_id), Size::ZERO))
+ }
+
+ #[inline(always)]
+ fn adjust_alloc_base_pointer(
+ _ecx: &InterpCx<$mir, $tcx, Self>,
+ ptr: Pointer<AllocId>,
+ ) -> Pointer<AllocId> {
+ ptr
+ }
+
+ #[inline(always)]
+ fn ptr_from_addr_cast(
+ _ecx: &InterpCx<$mir, $tcx, Self>,
+ addr: u64,
+ ) -> InterpResult<$tcx, Pointer<Option<AllocId>>> {
+ // Allow these casts, but make the pointer not dereferenceable.
+ // (I.e., they behave like transmutation.)
+ Ok(Pointer::from_addr(addr))
+ }
+
+ #[inline(always)]
+ fn ptr_get_alloc(
+ _ecx: &InterpCx<$mir, $tcx, Self>,
+ ptr: Pointer<AllocId>,
+ ) -> Option<(AllocId, Size, Self::ProvenanceExtra)> {
+ // We know `offset` is relative to the allocation, so we can use `into_parts`.
+ let (alloc_id, offset) = ptr.into_parts();
+ Some((alloc_id, offset, ()))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
new file mode 100644
index 000000000..ed2c4edf9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -0,0 +1,1224 @@
+//! The memory subsystem.
+//!
+//! Generally, we use `Pointer` to denote memory addresses. However, some operations
+//! have a "size"-like parameter, and they take `Scalar` for the address because
+//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
+//! integer. It is crucial that these operations call `check_align` *before*
+//! short-circuiting the empty case!
+
+use std::assert_matches::assert_matches;
+use std::borrow::Cow;
+use std::collections::VecDeque;
+use std::fmt;
+use std::ptr;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::mir::display_allocation;
+use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
+use rustc_target::abi::{Align, HasDataLayout, Size};
+
+use super::{
+ alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx,
+ InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
+ ScalarMaybeUninit,
+};
+
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum MemoryKind<T> {
+ /// Stack memory. Error if deallocated except during a stack pop.
+ Stack,
+ /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
+ CallerLocation,
+ /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
+ Machine(T),
+}
+
+impl<T: MayLeak> MayLeak for MemoryKind<T> {
+ #[inline]
+ fn may_leak(self) -> bool {
+ match self {
+ MemoryKind::Stack => false,
+ MemoryKind::CallerLocation => true,
+ MemoryKind::Machine(k) => k.may_leak(),
+ }
+ }
+}
+
+impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MemoryKind::Stack => write!(f, "stack variable"),
+ MemoryKind::CallerLocation => write!(f, "caller location"),
+ MemoryKind::Machine(m) => write!(f, "{}", m),
+ }
+ }
+}
+
+/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
+pub enum AllocKind {
+ /// A regular live data allocation.
+ LiveData,
+ /// A function allocation (that fn ptrs point to).
+ Function,
+ /// A (symbolic) vtable allocation.
+ VTable,
+ /// A dead allocation.
+ Dead,
+}
+
+/// The value of a function pointer.
+#[derive(Debug, Copy, Clone)]
+pub enum FnVal<'tcx, Other> {
+ Instance(Instance<'tcx>),
+ Other(Other),
+}
+
+impl<'tcx, Other> FnVal<'tcx, Other> {
+ pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
+ match self {
+ FnVal::Instance(instance) => Ok(instance),
+ FnVal::Other(_) => {
+ throw_unsup_format!("'foreign' function pointers are not supported in this context")
+ }
+ }
+ }
+}
+
+// `Memory` has to depend on the `Machine` because some of its operations
+// (e.g., `get`) call a `Machine` hook.
+pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ /// Allocations local to this instance of the miri engine. The kind
+ /// helps ensure that the same mechanism is used for allocation and
+ /// deallocation. When an allocation is not found here, it is a
+ /// global and looked up in the `tcx` for read access. Some machines may
+ /// have to mutate this map even on a read-only access to a global (because
+ /// they do pointer provenance tracking and the allocations in `tcx` have
+ /// the wrong type), so we let the machine override this type.
+ /// Either way, if the machine allows writing to a global, doing so will
+ /// create a copy of the global allocation here.
+ // FIXME: this should not be public, but interning currently needs access to it
+ pub(super) alloc_map: M::MemoryMap,
+
+ /// Map for "extra" function pointers.
+ extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
+
+ /// To be able to compare pointers with null, and to check alignment for accesses
+ /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
+ /// that do not exist any more.
+ // FIXME: this should not be public, but interning currently needs access to it
+ pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
+}
+
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+#[derive(Copy, Clone)]
+pub struct AllocRef<'a, 'tcx, Prov, Extra> {
+ alloc: &'a Allocation<Prov, Extra>,
+ range: AllocRange,
+ tcx: TyCtxt<'tcx>,
+ alloc_id: AllocId,
+}
+/// A reference to some allocation that was already bounds-checked for the given region
+/// and had the on-access machine hooks run.
+pub struct AllocRefMut<'a, 'tcx, Prov, Extra> {
+ alloc: &'a mut Allocation<Prov, Extra>,
+ range: AllocRange,
+ tcx: TyCtxt<'tcx>,
+ alloc_id: AllocId,
+}
+
+impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
+ pub fn new() -> Self {
+ Memory {
+ alloc_map: M::MemoryMap::default(),
+ extra_fn_ptr_map: FxHashMap::default(),
+ dead_alloc_map: FxHashMap::default(),
+ }
+ }
+
+ /// This is used by [priroda](https://github.com/oli-obk/priroda)
+ pub fn alloc_map(&self) -> &M::MemoryMap {
+ &self.alloc_map
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
+ /// the machine pointer to the allocation. Must never be used
+ /// for any other pointers, nor for TLS statics.
+ ///
+ /// Using the resulting pointer represents a *direct* access to that memory
+ /// (e.g. by directly using a `static`),
+ /// as opposed to access through a pointer that was created by the program.
+ ///
+ /// This function can fail only if `ptr` points to an `extern static`.
+ #[inline]
+ pub fn global_base_pointer(
+ &self,
+ ptr: Pointer<AllocId>,
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+ let alloc_id = ptr.provenance;
+ // We need to handle `extern static`.
+ match self.tcx.try_get_global_alloc(alloc_id) {
+ Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
+ bug!("global memory cannot point to thread-local static")
+ }
+ Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
+ return M::extern_static_base_pointer(self, def_id);
+ }
+ _ => {}
+ }
+ // And we need to get the provenance.
+ Ok(M::adjust_alloc_base_pointer(self, ptr))
+ }
+
+ pub fn create_fn_alloc_ptr(
+ &mut self,
+ fn_val: FnVal<'tcx, M::ExtraFnVal>,
+ ) -> Pointer<M::Provenance> {
+ let id = match fn_val {
+ FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
+ FnVal::Other(extra) => {
+ // FIXME(RalfJung): Should we have a cache here?
+ let id = self.tcx.reserve_alloc_id();
+ let old = self.memory.extra_fn_ptr_map.insert(id, extra);
+ assert!(old.is_none());
+ id
+ }
+ };
+ // Functions are global allocations, so make sure we get the right base pointer.
+ // We know this is not an `extern static` so this cannot fail.
+ self.global_base_pointer(Pointer::from(id)).unwrap()
+ }
+
+ pub fn allocate_ptr(
+ &mut self,
+ size: Size,
+ align: Align,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+ let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
+ // We can `unwrap` since `alloc` contains no pointers.
+ Ok(self.allocate_raw_ptr(alloc, kind).unwrap())
+ }
+
+ pub fn allocate_bytes_ptr(
+ &mut self,
+ bytes: &[u8],
+ align: Align,
+ kind: MemoryKind<M::MemoryKind>,
+ mutability: Mutability,
+ ) -> Pointer<M::Provenance> {
+ let alloc = Allocation::from_bytes(bytes, align, mutability);
+ // We can `unwrap` since `alloc` contains no pointers.
+ self.allocate_raw_ptr(alloc, kind).unwrap()
+ }
+
+ /// This can fail only of `alloc` contains relocations.
+ pub fn allocate_raw_ptr(
+ &mut self,
+ alloc: Allocation,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+ let id = self.tcx.reserve_alloc_id();
+ debug_assert_ne!(
+ Some(kind),
+ M::GLOBAL_KIND.map(MemoryKind::Machine),
+ "dynamically allocating global memory"
+ );
+ let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
+ self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
+ Ok(M::adjust_alloc_base_pointer(self, Pointer::from(id)))
+ }
+
+ pub fn reallocate_ptr(
+ &mut self,
+ ptr: Pointer<Option<M::Provenance>>,
+ old_size_and_align: Option<(Size, Align)>,
+ new_size: Size,
+ new_align: Align,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
+ let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
+ if offset.bytes() != 0 {
+ throw_ub_format!(
+ "reallocating {:?} which does not point to the beginning of an object",
+ ptr
+ );
+ }
+
+ // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
+ // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
+ let new_ptr = self.allocate_ptr(new_size, new_align, kind)?;
+ let old_size = match old_size_and_align {
+ Some((size, _align)) => size,
+ None => self.get_alloc_raw(alloc_id)?.size(),
+ };
+ // This will also call the access hooks.
+ self.mem_copy(
+ ptr,
+ Align::ONE,
+ new_ptr.into(),
+ Align::ONE,
+ old_size.min(new_size),
+ /*nonoverlapping*/ true,
+ )?;
+ self.deallocate_ptr(ptr, old_size_and_align, kind)?;
+
+ Ok(new_ptr)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn deallocate_ptr(
+ &mut self,
+ ptr: Pointer<Option<M::Provenance>>,
+ old_size_and_align: Option<(Size, Align)>,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx> {
+ let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr)?;
+ trace!("deallocating: {alloc_id:?}");
+
+ if offset.bytes() != 0 {
+ throw_ub_format!(
+ "deallocating {:?} which does not point to the beginning of an object",
+ ptr
+ );
+ }
+
+ let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
+ // Deallocating global memory -- always an error
+ return Err(match self.tcx.try_get_global_alloc(alloc_id) {
+ Some(GlobalAlloc::Function(..)) => {
+ err_ub_format!("deallocating {alloc_id:?}, which is a function")
+ }
+ Some(GlobalAlloc::VTable(..)) => {
+ err_ub_format!("deallocating {alloc_id:?}, which is a vtable")
+ }
+ Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
+ err_ub_format!("deallocating {alloc_id:?}, which is static memory")
+ }
+ None => err_ub!(PointerUseAfterFree(alloc_id)),
+ }
+ .into());
+ };
+
+ debug!(?alloc);
+
+ if alloc.mutability == Mutability::Not {
+ throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
+ }
+ if alloc_kind != kind {
+ throw_ub_format!(
+ "deallocating {alloc_id:?}, which is {alloc_kind} memory, using {kind} deallocation operation"
+ );
+ }
+ if let Some((size, align)) = old_size_and_align {
+ if size != alloc.size() || align != alloc.align {
+ throw_ub_format!(
+ "incorrect layout on deallocation: {alloc_id:?} has size {} and alignment {}, but gave size {} and alignment {}",
+ alloc.size().bytes(),
+ alloc.align.bytes(),
+ size.bytes(),
+ align.bytes(),
+ )
+ }
+ }
+
+ // Let the machine take some extra action
+ let size = alloc.size();
+ M::memory_deallocated(
+ *self.tcx,
+ &mut self.machine,
+ &mut alloc.extra,
+ (alloc_id, prov),
+ alloc_range(Size::ZERO, size),
+ )?;
+
+ // Don't forget to remember size and align of this now-dead allocation
+ let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
+ if old.is_some() {
+ bug!("Nothing can be deallocated twice");
+ }
+
+ Ok(())
+ }
+
+ /// Internal helper function to determine the allocation and offset of a pointer (if any).
+ #[inline(always)]
+ fn get_ptr_access(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Align,
+ ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
+ let align = M::enforce_alignment(&self).then_some(align);
+ self.check_and_deref_ptr(
+ ptr,
+ size,
+ align,
+ CheckInAllocMsg::MemoryAccessTest,
+ |alloc_id, offset, prov| {
+ let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+ Ok((size, align, (alloc_id, offset, prov)))
+ },
+ )
+ }
+
+ /// Check if the given pointer points to live memory of given `size` and `align`
+ /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
+ /// out-of-bounds case.
+ #[inline(always)]
+ pub fn check_ptr_access_align(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Align,
+ msg: CheckInAllocMsg,
+ ) -> InterpResult<'tcx> {
+ self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
+ let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
+ Ok((size, align, ()))
+ })?;
+ Ok(())
+ }
+
+ /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
+ /// to the allocation it points to. Supports both shared and mutable references, as the actual
+ /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
+ /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
+ fn check_and_deref_ptr<T>(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Option<Align>,
+ msg: CheckInAllocMsg,
+ alloc_size: impl FnOnce(
+ AllocId,
+ Size,
+ M::ProvenanceExtra,
+ ) -> InterpResult<'tcx, (Size, Align, T)>,
+ ) -> InterpResult<'tcx, Option<T>> {
+ fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> {
+ if offset % align.bytes() == 0 {
+ Ok(())
+ } else {
+ // The biggest power of two through which `offset` is divisible.
+ let offset_pow2 = 1 << offset.trailing_zeros();
+ throw_ub!(AlignmentCheckFailed {
+ has: Align::from_bytes(offset_pow2).unwrap(),
+ required: align,
+ })
+ }
+ }
+
+ Ok(match self.ptr_try_get_alloc_id(ptr) {
+ Err(addr) => {
+ // We couldn't get a proper allocation. This is only okay if the access size is 0,
+ // and the address is not null.
+ if size.bytes() > 0 || addr == 0 {
+ throw_ub!(DanglingIntPointer(addr, msg));
+ }
+ // Must be aligned.
+ if let Some(align) = align {
+ check_offset_align(addr, align)?;
+ }
+ None
+ }
+ Ok((alloc_id, offset, prov)) => {
+ let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
+ // Test bounds. This also ensures non-null.
+ // It is sufficient to check this for the end pointer. Also check for overflow!
+ if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
+ throw_ub!(PointerOutOfBounds {
+ alloc_id,
+ alloc_size,
+ ptr_offset: self.machine_usize_to_isize(offset.bytes()),
+ ptr_size: size,
+ msg,
+ })
+ }
+ // Ensure we never consider the null pointer dereferencable.
+ if M::Provenance::OFFSET_IS_ADDR {
+ assert_ne!(ptr.addr(), Size::ZERO);
+ }
+ // Test align. Check this last; if both bounds and alignment are violated
+ // we want the error to be about the bounds.
+ if let Some(align) = align {
+ if M::force_int_for_alignment_check(self) {
+ // `force_int_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
+ check_offset_align(ptr.addr().bytes(), align)?;
+ } else {
+ // Check allocation alignment and offset alignment.
+ if alloc_align.bytes() < align.bytes() {
+ throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
+ }
+ check_offset_align(offset.bytes(), align)?;
+ }
+ }
+
+ // We can still be zero-sized in this branch, in which case we have to
+ // return `None`.
+ if size.bytes() == 0 { None } else { Some(ret_val) }
+ }
+ })
+ }
+}
+
+/// Allocation accessors
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Helper function to obtain a global (tcx) allocation.
+ /// This attempts to return a reference to an existing allocation if
+ /// one can be found in `tcx`. That, however, is only possible if `tcx` and
+ /// this machine use the same pointer provenance, so it is indirected through
+ /// `M::adjust_allocation`.
+ fn get_global_alloc(
+ &self,
+ id: AllocId,
+ is_write: bool,
+ ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra>>> {
+ let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
+ Some(GlobalAlloc::Memory(mem)) => {
+ // Memory of a constant or promoted or anonymous memory referenced by a static.
+ (mem, None)
+ }
+ Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
+ Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
+ None => throw_ub!(PointerUseAfterFree(id)),
+ Some(GlobalAlloc::Static(def_id)) => {
+ assert!(self.tcx.is_static(def_id));
+ assert!(!self.tcx.is_thread_local_static(def_id));
+ // Notice that every static has two `AllocId` that will resolve to the same
+ // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
+ // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
+ // `eval_static_initializer` and it is the "resolved" ID.
+ // The resolved ID is never used by the interpreted program, it is hidden.
+ // This is relied upon for soundness of const-patterns; a pointer to the resolved
+ // ID would "sidestep" the checks that make sure consts do not point to statics!
+ // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
+ // contains a reference to memory that was created during its evaluation (i.e., not
+ // to another static), those inner references only exist in "resolved" form.
+ if self.tcx.is_foreign_item(def_id) {
+ // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
+ // referencing arbitrary (declared) extern statics.
+ throw_unsup!(ReadExternStatic(def_id));
+ }
+
+ // Use a precise span for better cycle errors.
+ (self.tcx.at(self.cur_span()).eval_static_initializer(def_id)?, Some(def_id))
+ }
+ };
+ M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?;
+ // We got tcx memory. Let the machine initialize its "extra" stuff.
+ M::adjust_allocation(
+ self,
+ id, // always use the ID we got as input, not the "hidden" one.
+ Cow::Borrowed(alloc.inner()),
+ M::GLOBAL_KIND.map(MemoryKind::Machine),
+ )
+ }
+
+ /// Gives raw access to the `Allocation`, without bounds or alignment checks.
+ /// The caller is responsible for calling the access hooks!
+ fn get_alloc_raw(
+ &self,
+ id: AllocId,
+ ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> {
+ // The error type of the inner closure here is somewhat funny. We have two
+ // ways of "erroring": An actual error, or because we got a reference from
+ // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
+ // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
+ let a = self.memory.alloc_map.get_or(id, || {
+ let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
+ match alloc {
+ Cow::Borrowed(alloc) => {
+ // We got a ref, cheaply return that as an "error" so that the
+ // map does not get mutated.
+ Err(Ok(alloc))
+ }
+ Cow::Owned(alloc) => {
+ // Need to put it into the map and return a ref to that
+ let kind = M::GLOBAL_KIND.expect(
+ "I got a global allocation that I have to copy but the machine does \
+ not expect that to happen",
+ );
+ Ok((MemoryKind::Machine(kind), alloc))
+ }
+ }
+ });
+ // Now unpack that funny error type
+ match a {
+ Ok(a) => Ok(&a.1),
+ Err(a) => a,
+ }
+ }
+
+ /// "Safe" (bounds and align-checked) allocation access.
+ pub fn get_ptr_alloc<'a>(
+ &'a self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Align,
+ ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
+ let align = M::enforce_alignment(self).then_some(align);
+ let ptr_and_alloc = self.check_and_deref_ptr(
+ ptr,
+ size,
+ align,
+ CheckInAllocMsg::MemoryAccessTest,
+ |alloc_id, offset, prov| {
+ let alloc = self.get_alloc_raw(alloc_id)?;
+ Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
+ },
+ )?;
+ if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
+ let range = alloc_range(offset, size);
+ M::memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?;
+ Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
+ } else {
+ // Even in this branch we have to be sure that we actually access the allocation, in
+ // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
+ // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
+ // always called when `ptr` has an `AllocId`.
+ Ok(None)
+ }
+ }
+
+ /// Return the `extra` field of the given allocation.
+ pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
+ Ok(&self.get_alloc_raw(id)?.extra)
+ }
+
+ /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
+ /// The caller is responsible for calling the access hooks!
+ ///
+ /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
+ /// allocation.
+ fn get_alloc_raw_mut(
+ &mut self,
+ id: AllocId,
+ ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra>, &mut M)> {
+ // We have "NLL problem case #3" here, which cannot be worked around without loss of
+ // efficiency even for the common case where the key is in the map.
+ // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
+ // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
+ if self.memory.alloc_map.get_mut(id).is_none() {
+ // Slow path.
+ // Allocation not found locally, go look global.
+ let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
+ let kind = M::GLOBAL_KIND.expect(
+ "I got a global allocation that I have to copy but the machine does \
+ not expect that to happen",
+ );
+ self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
+ }
+
+ let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
+ if alloc.mutability == Mutability::Not {
+ throw_ub!(WriteToReadOnly(id))
+ }
+ Ok((alloc, &mut self.machine))
+ }
+
+ /// "Safe" (bounds and align-checked) allocation access.
+ pub fn get_ptr_alloc_mut<'a>(
+ &'a mut self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ align: Align,
+ ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
+ let parts = self.get_ptr_access(ptr, size, align)?;
+ if let Some((alloc_id, offset, prov)) = parts {
+ let tcx = *self.tcx;
+ // FIXME: can we somehow avoid looking up the allocation twice here?
+ // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
+ let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
+ let range = alloc_range(offset, size);
+ M::memory_written(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
+ Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
+ } else {
+ Ok(None)
+ }
+ }
+
+ /// Return the `extra` field of the given allocation.
+ pub fn get_alloc_extra_mut<'a>(
+ &'a mut self,
+ id: AllocId,
+ ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
+ let (alloc, machine) = self.get_alloc_raw_mut(id)?;
+ Ok((&mut alloc.extra, machine))
+ }
+
+ /// Obtain the size and alignment of an allocation, even if that allocation has
+ /// been deallocated.
+ pub fn get_alloc_info(&self, id: AllocId) -> (Size, Align, AllocKind) {
+ // # Regular allocations
+ // Don't use `self.get_raw` here as that will
+ // a) cause cycles in case `id` refers to a static
+ // b) duplicate a global's allocation in miri
+ if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
+ return (alloc.size(), alloc.align, AllocKind::LiveData);
+ }
+
+ // # Function pointers
+ // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
+ if self.get_fn_alloc(id).is_some() {
+ return (Size::ZERO, Align::ONE, AllocKind::Function);
+ }
+
+ // # Statics
+ // Can't do this in the match argument, we may get cycle errors since the lock would
+ // be held throughout the match.
+ match self.tcx.try_get_global_alloc(id) {
+ Some(GlobalAlloc::Static(def_id)) => {
+ assert!(self.tcx.is_static(def_id));
+ assert!(!self.tcx.is_thread_local_static(def_id));
+ // Use size and align of the type.
+ let ty = self.tcx.type_of(def_id);
+ let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
+ assert!(!layout.is_unsized());
+ (layout.size, layout.align.abi, AllocKind::LiveData)
+ }
+ Some(GlobalAlloc::Memory(alloc)) => {
+ // Need to duplicate the logic here, because the global allocations have
+ // different associated types than the interpreter-local ones.
+ let alloc = alloc.inner();
+ (alloc.size(), alloc.align, AllocKind::LiveData)
+ }
+ Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
+ Some(GlobalAlloc::VTable(..)) => {
+ // No data to be accessed here. But vtables are pointer-aligned.
+ return (Size::ZERO, self.tcx.data_layout.pointer_align.abi, AllocKind::VTable);
+ }
+ // The rest must be dead.
+ None => {
+ // Deallocated pointers are allowed, we should be able to find
+ // them in the map.
+ let (size, align) = *self
+ .memory
+ .dead_alloc_map
+ .get(&id)
+ .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
+ (size, align, AllocKind::Dead)
+ }
+ }
+ }
+
+ /// Obtain the size and alignment of a live allocation.
+ pub fn get_live_alloc_size_and_align(&self, id: AllocId) -> InterpResult<'tcx, (Size, Align)> {
+ let (size, align, kind) = self.get_alloc_info(id);
+ if matches!(kind, AllocKind::Dead) {
+ throw_ub!(PointerUseAfterFree(id))
+ }
+ Ok((size, align))
+ }
+
+ fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
+ if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
+ Some(FnVal::Other(*extra))
+ } else {
+ match self.tcx.try_get_global_alloc(id) {
+ Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
+ _ => None,
+ }
+ }
+ }
+
+ pub fn get_ptr_fn(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
+ trace!("get_ptr_fn({:?})", ptr);
+ let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
+ if offset.bytes() != 0 {
+ throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
+ }
+ self.get_fn_alloc(alloc_id)
+ .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
+ }
+
+ pub fn get_ptr_vtable(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)> {
+ trace!("get_ptr_vtable({:?})", ptr);
+ let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
+ if offset.bytes() != 0 {
+ throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
+ }
+ match self.tcx.try_get_global_alloc(alloc_id) {
+ Some(GlobalAlloc::VTable(ty, trait_ref)) => Ok((ty, trait_ref)),
+ _ => throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset))),
+ }
+ }
+
+ pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
+ self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
+ Ok(())
+ }
+
+ /// Create a lazy debug printer that prints the given allocation and all allocations it points
+ /// to, recursively.
+ #[must_use]
+ pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+ self.dump_allocs(vec![id])
+ }
+
+ /// Create a lazy debug printer for a list of allocations and all allocations they point to,
+ /// recursively.
+ #[must_use]
+ pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
+ allocs.sort();
+ allocs.dedup();
+ DumpAllocs { ecx: self, allocs }
+ }
+
+ /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
+ /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
+ pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
+ // Collect the set of allocations that are *reachable* from `Global` allocations.
+ let reachable = {
+ let mut reachable = FxHashSet::default();
+ let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
+ let mut todo: Vec<_> =
+ self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
+ if Some(kind) == global_kind { Some(id) } else { None }
+ });
+ todo.extend(static_roots);
+ while let Some(id) = todo.pop() {
+ if reachable.insert(id) {
+ // This is a new allocation, add its relocations to `todo`.
+ if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
+ todo.extend(
+ alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()),
+ );
+ }
+ }
+ }
+ reachable
+ };
+
+ // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
+ let leaks: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
+ if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
+ });
+ let n = leaks.len();
+ if n > 0 {
+ eprintln!("The following memory was leaked: {:?}", self.dump_allocs(leaks));
+ }
+ n
+ }
+}
+
+#[doc(hidden)]
+/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
+pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ ecx: &'a InterpCx<'mir, 'tcx, M>,
+ allocs: Vec<AllocId>,
+}
+
+impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Cannot be a closure because it is generic in `Prov`, `Extra`.
+ fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra>(
+ fmt: &mut std::fmt::Formatter<'_>,
+ tcx: TyCtxt<'tcx>,
+ allocs_to_print: &mut VecDeque<AllocId>,
+ alloc: &Allocation<Prov, Extra>,
+ ) -> std::fmt::Result {
+ for alloc_id in alloc.relocations().values().filter_map(|prov| prov.get_alloc_id()) {
+ allocs_to_print.push_back(alloc_id);
+ }
+ write!(fmt, "{}", display_allocation(tcx, alloc))
+ }
+
+ let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
+ // `allocs_printed` contains all allocations that we have already printed.
+ let mut allocs_printed = FxHashSet::default();
+
+ while let Some(id) = allocs_to_print.pop_front() {
+ if !allocs_printed.insert(id) {
+ // Already printed, so skip this.
+ continue;
+ }
+
+ write!(fmt, "{id:?}")?;
+ match self.ecx.memory.alloc_map.get(id) {
+ Some(&(kind, ref alloc)) => {
+ // normal alloc
+ write!(fmt, " ({}, ", kind)?;
+ write_allocation_track_relocs(
+ &mut *fmt,
+ *self.ecx.tcx,
+ &mut allocs_to_print,
+ alloc,
+ )?;
+ }
+ None => {
+ // global alloc
+ match self.ecx.tcx.try_get_global_alloc(id) {
+ Some(GlobalAlloc::Memory(alloc)) => {
+ write!(fmt, " (unchanged global, ")?;
+ write_allocation_track_relocs(
+ &mut *fmt,
+ *self.ecx.tcx,
+ &mut allocs_to_print,
+ alloc.inner(),
+ )?;
+ }
+ Some(GlobalAlloc::Function(func)) => {
+ write!(fmt, " (fn: {func})")?;
+ }
+ Some(GlobalAlloc::VTable(ty, Some(trait_ref))) => {
+ write!(fmt, " (vtable: impl {trait_ref} for {ty})")?;
+ }
+ Some(GlobalAlloc::VTable(ty, None)) => {
+ write!(fmt, " (vtable: impl <auto trait> for {ty})")?;
+ }
+ Some(GlobalAlloc::Static(did)) => {
+ write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
+ }
+ None => {
+ write!(fmt, " (deallocated)")?;
+ }
+ }
+ }
+ }
+ writeln!(fmt)?;
+ }
+ Ok(())
+ }
+}
+
+/// Reading and writing.
+impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
+ /// `range` is relative to this allocation reference, not the base of the allocation.
+ pub fn write_scalar(
+ &mut self,
+ range: AllocRange,
+ val: ScalarMaybeUninit<Prov>,
+ ) -> InterpResult<'tcx> {
+ let range = self.range.subrange(range);
+ debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
+ Ok(self
+ .alloc
+ .write_scalar(&self.tcx, range, val)
+ .map_err(|e| e.to_interp_error(self.alloc_id))?)
+ }
+
+ /// `offset` is relative to this allocation reference, not the base of the allocation.
+ pub fn write_ptr_sized(
+ &mut self,
+ offset: Size,
+ val: ScalarMaybeUninit<Prov>,
+ ) -> InterpResult<'tcx> {
+ self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
+ }
+
+ /// Mark the entire referenced range as uninitalized
+ pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
+ Ok(self
+ .alloc
+ .write_uninit(&self.tcx, self.range)
+ .map_err(|e| e.to_interp_error(self.alloc_id))?)
+ }
+}
+
+impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
+ /// `range` is relative to this allocation reference, not the base of the allocation.
+ pub fn read_scalar(
+ &self,
+ range: AllocRange,
+ read_provenance: bool,
+ ) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ let range = self.range.subrange(range);
+ let res = self
+ .alloc
+ .read_scalar(&self.tcx, range, read_provenance)
+ .map_err(|e| e.to_interp_error(self.alloc_id))?;
+ debug!("read_scalar at {:?}{range:?}: {res:?}", self.alloc_id);
+ Ok(res)
+ }
+
+ /// `range` is relative to this allocation reference, not the base of the allocation.
+ pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ self.read_scalar(range, /*read_provenance*/ false)
+ }
+
+ /// `offset` is relative to this allocation reference, not the base of the allocation.
+ pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Prov>> {
+ self.read_scalar(
+ alloc_range(offset, self.tcx.data_layout().pointer_size),
+ /*read_provenance*/ true,
+ )
+ }
+
+ /// `range` is relative to this allocation reference, not the base of the allocation.
+ pub fn check_bytes(
+ &self,
+ range: AllocRange,
+ allow_uninit: bool,
+ allow_ptr: bool,
+ ) -> InterpResult<'tcx> {
+ Ok(self
+ .alloc
+ .check_bytes(&self.tcx, self.range.subrange(range), allow_uninit, allow_ptr)
+ .map_err(|e| e.to_interp_error(self.alloc_id))?)
+ }
+
+ /// Returns whether the allocation has relocations for the entire range of the `AllocRef`.
+ pub(crate) fn has_relocations(&self) -> bool {
+ self.alloc.has_relocations(&self.tcx, self.range)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Reads the given number of bytes from memory. Returns them as a slice.
+ ///
+ /// Performs appropriate bounds checks.
+ pub fn read_bytes_ptr(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ size: Size,
+ ) -> InterpResult<'tcx, &[u8]> {
+ let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
+ // zero-sized access
+ return Ok(&[]);
+ };
+ // Side-step AllocRef and directly access the underlying bytes more efficiently.
+ // (We are staying inside the bounds here so all is good.)
+ Ok(alloc_ref
+ .alloc
+ .get_bytes(&alloc_ref.tcx, alloc_ref.range)
+ .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
+ }
+
+ /// Writes the given stream of bytes into memory.
+ ///
+ /// Performs appropriate bounds checks.
+ pub fn write_bytes_ptr(
+ &mut self,
+ ptr: Pointer<Option<M::Provenance>>,
+ src: impl IntoIterator<Item = u8>,
+ ) -> InterpResult<'tcx> {
+ let mut src = src.into_iter();
+ let (lower, upper) = src.size_hint();
+ let len = upper.expect("can only write bounded iterators");
+ assert_eq!(lower, len, "can only write iterators with a precise length");
+
+ let size = Size::from_bytes(len);
+ let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
+ // zero-sized access
+ assert_matches!(
+ src.next(),
+ None,
+ "iterator said it was empty but returned an element"
+ );
+ return Ok(());
+ };
+
+ // Side-step AllocRef and directly access the underlying bytes more efficiently.
+ // (We are staying inside the bounds here so all is good.)
+ let alloc_id = alloc_ref.alloc_id;
+ let bytes = alloc_ref
+ .alloc
+ .get_bytes_mut(&alloc_ref.tcx, alloc_ref.range)
+ .map_err(move |e| e.to_interp_error(alloc_id))?;
+ // `zip` would stop when the first iterator ends; we want to definitely
+ // cover all of `bytes`.
+ for dest in bytes {
+ *dest = src.next().expect("iterator was shorter than it said it would be");
+ }
+ assert_matches!(src.next(), None, "iterator was longer than it said it would be");
+ Ok(())
+ }
+
+ pub fn mem_copy(
+ &mut self,
+ src: Pointer<Option<M::Provenance>>,
+ src_align: Align,
+ dest: Pointer<Option<M::Provenance>>,
+ dest_align: Align,
+ size: Size,
+ nonoverlapping: bool,
+ ) -> InterpResult<'tcx> {
+ self.mem_copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
+ }
+
+ pub fn mem_copy_repeatedly(
+ &mut self,
+ src: Pointer<Option<M::Provenance>>,
+ src_align: Align,
+ dest: Pointer<Option<M::Provenance>>,
+ dest_align: Align,
+ size: Size,
+ num_copies: u64,
+ nonoverlapping: bool,
+ ) -> InterpResult<'tcx> {
+ let tcx = self.tcx;
+ // We need to do our own bounds-checks.
+ let src_parts = self.get_ptr_access(src, size, src_align)?;
+ let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
+
+ // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
+ // and once below to get the underlying `&[mut] Allocation`.
+
+ // Source alloc preparations and access hooks.
+ let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
+ // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
+ return Ok(());
+ };
+ let src_alloc = self.get_alloc_raw(src_alloc_id)?;
+ let src_range = alloc_range(src_offset, size);
+ M::memory_read(*tcx, &self.machine, &src_alloc.extra, (src_alloc_id, src_prov), src_range)?;
+ // We need the `dest` ptr for the next operation, so we get it now.
+ // We already did the source checks and called the hooks so we are good to return early.
+ let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
+ // Zero-sized *destination*.
+ return Ok(());
+ };
+
+ // This checks relocation edges on the src, which needs to happen before
+ // `prepare_relocation_copy`.
+ let src_bytes = src_alloc
+ .get_bytes_with_uninit_and_ptr(&tcx, src_range)
+ .map_err(|e| e.to_interp_error(src_alloc_id))?
+ .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
+ // first copy the relocations to a temporary buffer, because
+ // `get_bytes_mut` will clear the relocations, which is correct,
+ // since we don't want to keep any relocations at the target.
+ let relocations =
+ src_alloc.prepare_relocation_copy(self, src_range, dest_offset, num_copies);
+ // Prepare a copy of the initialization mask.
+ let compressed = src_alloc.compress_uninit_range(src_range);
+
+ // Destination alloc preparations and access hooks.
+ let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
+ let dest_range = alloc_range(dest_offset, size * num_copies);
+ M::memory_written(
+ *tcx,
+ extra,
+ &mut dest_alloc.extra,
+ (dest_alloc_id, dest_prov),
+ dest_range,
+ )?;
+ let dest_bytes = dest_alloc
+ .get_bytes_mut_ptr(&tcx, dest_range)
+ .map_err(|e| e.to_interp_error(dest_alloc_id))?
+ .as_mut_ptr();
+
+ if compressed.no_bytes_init() {
+ // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
+ // is marked as uninitialized but we otherwise omit changing the byte representation which may
+ // be arbitrary for uninitialized bytes.
+ // This also avoids writing to the target bytes so that the backing allocation is never
+ // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
+ // operating system this can avoid physically allocating the page.
+ dest_alloc
+ .write_uninit(&tcx, dest_range)
+ .map_err(|e| e.to_interp_error(dest_alloc_id))?;
+ // We can forget about the relocations, this is all not initialized anyway.
+ return Ok(());
+ }
+
+ // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
+ // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
+ // `dest` could possibly overlap.
+ // The pointers above remain valid even if the `HashMap` table is moved around because they
+ // point into the `Vec` storing the bytes.
+ unsafe {
+ if src_alloc_id == dest_alloc_id {
+ if nonoverlapping {
+ // `Size` additions
+ if (src_offset <= dest_offset && src_offset + size > dest_offset)
+ || (dest_offset <= src_offset && dest_offset + size > src_offset)
+ {
+ throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
+ }
+ }
+
+ for i in 0..num_copies {
+ ptr::copy(
+ src_bytes,
+ dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+ size.bytes_usize(),
+ );
+ }
+ } else {
+ for i in 0..num_copies {
+ ptr::copy_nonoverlapping(
+ src_bytes,
+ dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+ size.bytes_usize(),
+ );
+ }
+ }
+ }
+
+ // now fill in all the "init" data
+ dest_alloc.mark_compressed_init_range(
+ &compressed,
+ alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
+ num_copies,
+ );
+ // copy the relocations to the destination
+ dest_alloc.mark_relocation_range(relocations);
+
+ Ok(())
+ }
+}
+
+/// Machine pointer introspection.
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Test if this value might be null.
+ /// If the machine does not support ptr-to-int casts, this is conservative.
+ pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
+ Ok(match scalar.try_to_int() {
+ Ok(int) => int.is_null(),
+ Err(_) => {
+ // Can only happen during CTFE.
+ let ptr = scalar.to_pointer(self)?;
+ match self.ptr_try_get_alloc_id(ptr) {
+ Ok((alloc_id, offset, _)) => {
+ let (size, _align, _kind) = self.get_alloc_info(alloc_id);
+ // If the pointer is out-of-bounds, it may be null.
+ // Note that one-past-the-end (offset == size) is still inbounds, and never null.
+ offset > size
+ }
+ Err(_offset) => bug!("a non-int scalar is always a pointer"),
+ }
+ }
+ })
+ }
+
+ /// Turning a "maybe pointer" into a proper pointer (and some information
+ /// about where it points), or an absolute address.
+ pub fn ptr_try_get_alloc_id(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
+ match ptr.into_pointer_or_addr() {
+ Ok(ptr) => match M::ptr_get_alloc(self, ptr) {
+ Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
+ None => {
+ assert!(M::Provenance::OFFSET_IS_ADDR);
+ let (_, addr) = ptr.into_parts();
+ Err(addr.bytes())
+ }
+ },
+ Err(addr) => Err(addr.bytes()),
+ }
+ }
+
+ /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
+ #[inline(always)]
+ pub fn ptr_get_alloc_id(
+ &self,
+ ptr: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
+ self.ptr_try_get_alloc_id(ptr).map_err(|offset| {
+ err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
+ })
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs
new file mode 100644
index 000000000..2e356f67b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/mod.rs
@@ -0,0 +1,34 @@
+//! An interpreter for MIR used in CTFE and by miri
+
+mod cast;
+mod eval_context;
+mod intern;
+mod intrinsics;
+mod machine;
+mod memory;
+mod operand;
+mod operator;
+mod place;
+mod projection;
+mod step;
+mod terminator;
+mod traits;
+mod util;
+mod validity;
+mod visitor;
+
+pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
+
+pub use self::eval_context::{
+ Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup, StackPopUnwind,
+};
+pub use self::intern::{intern_const_alloc_recursive, InternKind};
+pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
+pub use self::memory::{AllocKind, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
+pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
+pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
+pub use self::validity::{CtfeValidationMode, RefTracking};
+pub use self::visitor::{MutValueVisitor, Value, ValueVisitor};
+
+pub(crate) use self::intrinsics::eval_nullary_intrinsic;
+use eval_context::{from_known_layout, mir_assign_valid_types};
diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs
new file mode 100644
index 000000000..94ba62c16
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/operand.rs
@@ -0,0 +1,831 @@
+//! Functions concerning immediate values and operands, and reading from operands.
+//! All high-level functions to read from memory work on operands as sources.
+
+use std::fmt::Write;
+
+use rustc_hir::def::Namespace;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
+use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
+use rustc_middle::{mir, ty};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
+use rustc_target::abi::{VariantIdx, Variants};
+
+use super::{
+ alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
+ InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
+ Provenance, Scalar, ScalarMaybeUninit,
+};
+
+/// An `Immediate` represents a single immediate self-contained Rust value.
+///
+/// For optimization of a few very common cases, there is also a representation for a pair of
+/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
+/// operations and wide pointers. This idea was taken from rustc's codegen.
+/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
+/// defined on `Immediate`, and do not have to work with a `Place`.
+#[derive(Copy, Clone, Debug)]
+pub enum Immediate<Prov: Provenance = AllocId> {
+ /// A single scalar value (must have *initialized* `Scalar` ABI).
+ /// FIXME: we also currently often use this for ZST.
+ /// `ScalarMaybeUninit` should reject ZST, and we should use `Uninit` for them instead.
+ Scalar(ScalarMaybeUninit<Prov>),
+ /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
+ /// `Scalar::Initialized`).
+ ScalarPair(ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>),
+ /// A value of fully uninitialized memory. Can have and size and layout.
+ Uninit,
+}
+
+impl<Prov: Provenance> From<ScalarMaybeUninit<Prov>> for Immediate<Prov> {
+ #[inline(always)]
+ fn from(val: ScalarMaybeUninit<Prov>) -> Self {
+ Immediate::Scalar(val)
+ }
+}
+
+impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
+ #[inline(always)]
+ fn from(val: Scalar<Prov>) -> Self {
+ Immediate::Scalar(val.into())
+ }
+}
+
+impl<'tcx, Prov: Provenance> Immediate<Prov> {
+ pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+ Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
+ }
+
+ pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+ Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
+ }
+
+ pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
+ Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
+ }
+
+ pub fn new_dyn_trait(
+ val: Scalar<Prov>,
+ vtable: Pointer<Option<Prov>>,
+ cx: &impl HasDataLayout,
+ ) -> Self {
+ Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
+ }
+
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Prov> {
+ match self {
+ Immediate::Scalar(val) => val,
+ Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
+ Immediate::Uninit => ScalarMaybeUninit::Uninit,
+ }
+ }
+
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Prov>> {
+ self.to_scalar_or_uninit().check_init()
+ }
+
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>) {
+ match self {
+ Immediate::ScalarPair(val1, val2) => (val1, val2),
+ Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
+ Immediate::Uninit => (ScalarMaybeUninit::Uninit, ScalarMaybeUninit::Uninit),
+ }
+ }
+
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Prov>, Scalar<Prov>)> {
+ let (val1, val2) = self.to_scalar_or_uninit_pair();
+ Ok((val1.check_init()?, val2.check_init()?))
+ }
+}
+
+// ScalarPair needs a type to interpret, so we often have an immediate and a type together
+// as input for binary and cast operations.
+#[derive(Clone, Debug)]
+pub struct ImmTy<'tcx, Prov: Provenance = AllocId> {
+ imm: Immediate<Prov>,
+ pub layout: TyAndLayout<'tcx>,
+}
+
+impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ /// Helper function for printing a scalar to a FmtPrinter
+ fn p<'a, 'tcx, Prov: Provenance>(
+ cx: FmtPrinter<'a, 'tcx>,
+ s: ScalarMaybeUninit<Prov>,
+ ty: Ty<'tcx>,
+ ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
+ match s {
+ ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
+ cx.pretty_print_const_scalar_int(int, ty, true)
+ }
+ ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
+ // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
+ // print what is points to, which would fail since it has no access to the local
+ // memory.
+ cx.pretty_print_const_pointer(ptr, ty, true)
+ }
+ ScalarMaybeUninit::Uninit => cx.typed_value(
+ |mut this| {
+ this.write_str("uninit ")?;
+ Ok(this)
+ },
+ |this| this.print_type(ty),
+ " ",
+ ),
+ }
+ }
+ ty::tls::with(|tcx| {
+ match self.imm {
+ Immediate::Scalar(s) => {
+ if let Some(ty) = tcx.lift(self.layout.ty) {
+ let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ f.write_str(&p(cx, s, ty)?.into_buffer())?;
+ return Ok(());
+ }
+ write!(f, "{:x}: {}", s, self.layout.ty)
+ }
+ Immediate::ScalarPair(a, b) => {
+ // FIXME(oli-obk): at least print tuples and slices nicely
+ write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty)
+ }
+ Immediate::Uninit => {
+ write!(f, "uninit: {}", self.layout.ty)
+ }
+ }
+ })
+ }
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
+ type Target = Immediate<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &Immediate<Prov> {
+ &self.imm
+ }
+}
+
+/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
+/// or still in memory. The latter is an optimization, to delay reading that chunk of
+/// memory and to avoid having to store arbitrary-sized data here.
+#[derive(Copy, Clone, Debug)]
+pub enum Operand<Prov: Provenance = AllocId> {
+ Immediate(Immediate<Prov>),
+ Indirect(MemPlace<Prov>),
+}
+
+#[derive(Clone, Debug)]
+pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
+ op: Operand<Prov>, // Keep this private; it helps enforce invariants.
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for an `OpTy`!
+ /// `None` means "alignment does not matter since this is a by-value operand"
+ /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
+ /// Also CTFE ignores alignment anyway, so this is for Miri only.
+ pub align: Option<Align>,
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for OpTy<'tcx, Prov> {
+ type Target = Operand<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &Operand<Prov> {
+ &self.op
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(val: ImmTy<'tcx, Prov>) -> Self {
+ OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
+ }
+}
+
+impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
+ #[inline]
+ pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ ImmTy { imm: val.into(), layout }
+ }
+
+ #[inline]
+ pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
+ ImmTy { imm, layout }
+ }
+
+ #[inline]
+ pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
+ ImmTy { imm: Immediate::Uninit, layout }
+ }
+
+ #[inline]
+ pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+ Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
+ }
+ #[inline]
+ pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
+ Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
+ }
+
+ #[inline]
+ pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
+ Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
+ }
+
+ #[inline]
+ pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
+ Self::from_scalar(Scalar::from_int(i, layout.size), layout)
+ }
+
+ #[inline]
+ pub fn to_const_int(self) -> ConstInt {
+ assert!(self.layout.ty.is_integral());
+ let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
+ ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
+ }
+}
+
+impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
+ pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ if self.layout.is_unsized() {
+ // There are no unsized immediates.
+ self.assert_mem_place().len(cx)
+ } else {
+ match self.layout.fields {
+ abi::FieldsShape::Array { count, .. } => Ok(count),
+ _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+ }
+ }
+ }
+
+ pub fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ match self.try_as_mplace() {
+ Ok(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
+ Err(imm) => {
+ assert!(
+ matches!(*imm, Immediate::Uninit),
+ "Scalar/ScalarPair cannot be offset into"
+ );
+ assert!(!meta.has_meta()); // no place to store metadata here
+ // Every part of an uninit is uninit.
+ Ok(ImmTy::uninit(layout).into())
+ }
+ }
+ }
+
+ pub fn offset(
+ &self,
+ offset: Size,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert!(!layout.is_unsized());
+ self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
+ /// Returns `None` if the layout does not permit loading this as a value.
+ ///
+ /// This is an internal function; call `read_immediate` instead.
+ fn read_immediate_from_mplace_raw(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ force: bool,
+ ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
+ if mplace.layout.is_unsized() {
+ // Don't touch unsized
+ return Ok(None);
+ }
+
+ let Some(alloc) = self.get_place_alloc(mplace)? else {
+ // zero-sized type can be left uninit
+ return Ok(Some(ImmTy::uninit(mplace.layout)));
+ };
+
+ // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
+ // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
+ // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
+ // case where some of the bytes are initialized and others are not. So, we need an extra
+ // check that walks over the type of `mplace` to make sure it is truly correct to treat this
+ // like a `Scalar` (or `ScalarPair`).
+ let scalar_layout = match mplace.layout.abi {
+ // `if` does not work nested inside patterns, making this a bit awkward to express.
+ Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s),
+ Abi::Scalar(s) if force => Some(s.primitive()),
+ _ => None,
+ };
+ if let Some(s) = scalar_layout {
+ let size = s.size(self);
+ assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
+ let scalar = alloc
+ .read_scalar(alloc_range(Size::ZERO, size), /*read_provenance*/ s.is_ptr())?;
+ return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
+ }
+ let scalar_pair_layout = match mplace.layout.abi {
+ Abi::ScalarPair(
+ abi::Scalar::Initialized { value: a, .. },
+ abi::Scalar::Initialized { value: b, .. },
+ ) => Some((a, b)),
+ Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())),
+ _ => None,
+ };
+ if let Some((a, b)) = scalar_pair_layout {
+ // We checked `ptr_align` above, so all fields will have the alignment they need.
+ // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+ // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+ let (a_size, b_size) = (a.size(self), b.size(self));
+ let b_offset = a_size.align_to(b.align(self).abi);
+ assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
+ let a_val = alloc.read_scalar(
+ alloc_range(Size::ZERO, a_size),
+ /*read_provenance*/ a.is_ptr(),
+ )?;
+ let b_val = alloc
+ .read_scalar(alloc_range(b_offset, b_size), /*read_provenance*/ b.is_ptr())?;
+ return Ok(Some(ImmTy {
+ imm: Immediate::ScalarPair(a_val, b_val),
+ layout: mplace.layout,
+ }));
+ }
+ // Neither a scalar nor scalar pair.
+ return Ok(None);
+ }
+
+ /// Try returning an immediate for the operand. If the layout does not permit loading this as an
+ /// immediate, return where in memory we can find the data.
+ /// Note that for a given layout, this operation will either always fail or always
+ /// succeed! Whether it succeeds depends on whether the layout can be represented
+ /// in an `Immediate`, not on which data is stored there currently.
+ ///
+ /// If `force` is `true`, then even scalars with fields that can be ununit will be
+ /// read. This means the load is lossy and should not be written back!
+ /// This flag exists only for validity checking.
+ ///
+ /// This is an internal function that should not usually be used; call `read_immediate` instead.
+ /// ConstProp needs it, though.
+ pub fn read_immediate_raw(
+ &self,
+ src: &OpTy<'tcx, M::Provenance>,
+ force: bool,
+ ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> {
+ Ok(match src.try_as_mplace() {
+ Ok(ref mplace) => {
+ if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? {
+ Ok(val)
+ } else {
+ Err(*mplace)
+ }
+ }
+ Err(val) => Ok(val),
+ })
+ }
+
+ /// Read an immediate from a place, asserting that that is possible with the given layout.
+ #[inline(always)]
+ pub fn read_immediate(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? {
+ Ok(imm)
+ } else {
+ span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
+ }
+ }
+
+ /// Read a scalar from a place
+ pub fn read_scalar(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
+ Ok(self.read_immediate(op)?.to_scalar_or_uninit())
+ }
+
+ /// Read a pointer from a place.
+ pub fn read_pointer(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+ self.read_scalar(op)?.to_pointer(self)
+ }
+
+ /// Turn the wide MPlace into a string (must already be dereferenced!)
+ pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
+ let len = mplace.len(self)?;
+ let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?;
+ let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
+ Ok(str)
+ }
+
+ /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
+ /// Also returns the number of elements.
+ ///
+ /// Can (but does not always) trigger UB if `op` is uninitialized.
+ pub fn operand_to_simd(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
+ // Basically we just transmute this place into an array following simd_size_and_type.
+ // This only works in memory, but repr(simd) types should never be immediates anyway.
+ assert!(op.layout.ty.is_simd());
+ match op.try_as_mplace() {
+ Ok(mplace) => self.mplace_to_simd(&mplace),
+ Err(imm) => match *imm {
+ Immediate::Uninit => {
+ throw_ub!(InvalidUninitBytes(None))
+ }
+ Immediate::Scalar(..) | Immediate::ScalarPair(..) => {
+ bug!("arrays/slices can never have Scalar/ScalarPair layout")
+ }
+ },
+ }
+ }
+
+ /// Read from a local. Will not actually access the local if reading from a ZST.
+ /// Will not access memory, instead an indirect `Operand` is returned.
+ ///
+ /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
+ /// OpTy from a local.
+ pub fn local_to_op(
+ &self,
+ frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
+ local: mir::Local,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let layout = self.layout_of_local(frame, local, layout)?;
+ let op = if layout.is_zst() {
+ // Bypass `access_local` (helps in ConstProp)
+ Operand::Immediate(Immediate::Uninit)
+ } else {
+ *M::access_local(frame, local)?
+ };
+ Ok(OpTy { op, layout, align: Some(layout.align.abi) })
+ }
+
+ /// Every place can be read from, so we can turn them into an operand.
+ /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
+ /// will never actually read from memory.
+ #[inline(always)]
+ pub fn place_to_op(
+ &self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let op = match **place {
+ Place::Ptr(mplace) => Operand::Indirect(mplace),
+ Place::Local { frame, local } => {
+ *self.local_to_op(&self.stack()[frame], local, None)?
+ }
+ };
+ Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
+ }
+
+ /// Evaluate a place with the goal of reading from it. This lets us sometimes
+ /// avoid allocations.
+ pub fn eval_place_to_op(
+ &self,
+ mir_place: mir::Place<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // Do not use the layout passed in as argument if the base we are looking at
+ // here is not the entire place.
+ let layout = if mir_place.projection.is_empty() { layout } else { None };
+
+ let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
+ // Using `try_fold` turned out to be bad for performance, hence the loop.
+ for elem in mir_place.projection.iter() {
+ op = self.operand_projection(&op, elem)?
+ }
+
+ trace!("eval_place_to_op: got {:?}", *op);
+ // Sanity-check the type we ended up with.
+ debug_assert!(
+ mir_assign_valid_types(
+ *self.tcx,
+ self.param_env,
+ self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
+ )?)?,
+ op.layout,
+ ),
+ "eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ op.layout.ty,
+ );
+ Ok(op)
+ }
+
+ /// Evaluate the operand, returning a place where you can then find the data.
+ /// If you already know the layout, you can save two table lookups
+ /// by passing it in here.
+ #[inline]
+ pub fn eval_operand(
+ &self,
+ mir_op: &mir::Operand<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ use rustc_middle::mir::Operand::*;
+ let op = match *mir_op {
+ // FIXME: do some more logic on `move` to invalidate the old location
+ Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
+
+ Constant(ref constant) => {
+ let val =
+ self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
+
+ // This can still fail:
+ // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
+ // checked yet.
+ // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
+ self.mir_const_to_op(&val, layout)?
+ }
+ };
+ trace!("{:?}: {:?}", mir_op, *op);
+ Ok(op)
+ }
+
+ /// Evaluate a bunch of operands at once
+ pub(super) fn eval_operands(
+ &self,
+ ops: &[mir::Operand<'tcx>],
+ ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
+ ops.iter().map(|op| self.eval_operand(op, None)).collect()
+ }
+
+ // Used when the miri-engine runs into a constant and for extracting information from constants
+ // in patterns via the `const_eval` module
+ /// The `val` and `layout` are assumed to already be in our interpreter
+ /// "universe" (param_env).
+ pub fn const_to_op(
+ &self,
+ c: ty::Const<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ match c.kind() {
+ ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
+ ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
+ throw_inval!(AlreadyReported(reported))
+ }
+ ty::ConstKind::Unevaluated(uv) => {
+ let instance = self.resolve(uv.def, uv.substs)?;
+ Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
+ }
+ ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
+ span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", c)
+ }
+ ty::ConstKind::Value(valtree) => {
+ let ty = c.ty();
+ let const_val = self.tcx.valtree_to_const_val((ty, valtree));
+ self.const_val_to_op(const_val, ty, layout)
+ }
+ }
+ }
+
+ pub fn mir_const_to_op(
+ &self,
+ val: &mir::ConstantKind<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ match val {
+ mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout),
+ mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout),
+ }
+ }
+
+ pub(crate) fn const_val_to_op(
+ &self,
+ val_val: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ layout: Option<TyAndLayout<'tcx>>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // Other cases need layout.
+ let adjust_scalar = |scalar| -> InterpResult<'tcx, _> {
+ Ok(match scalar {
+ Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
+ Scalar::Int(int) => Scalar::Int(int),
+ })
+ };
+ let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
+ let op = match val_val {
+ ConstValue::ByRef { alloc, offset } => {
+ let id = self.tcx.create_memory_alloc(alloc);
+ // We rely on mutability being set correctly in that allocation to prevent writes
+ // where none should happen.
+ let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
+ Operand::Indirect(MemPlace::from_ptr(ptr.into()))
+ }
+ ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
+ ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
+ ConstValue::Slice { data, start, end } => {
+ // We rely on mutability being set correctly in `data` to prevent writes
+ // where none should happen.
+ let ptr = Pointer::new(
+ self.tcx.create_memory_alloc(data),
+ Size::from_bytes(start), // offset: `start`
+ );
+ Operand::Immediate(Immediate::new_slice(
+ Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
+ u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
+ self,
+ ))
+ }
+ };
+ Ok(OpTy { op, layout, align: Some(layout.align.abi) })
+ }
+
+ /// Read discriminant, return the runtime value as well as the variant index.
+ /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
+ pub fn read_discriminant(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
+ trace!("read_discriminant_value {:#?}", op.layout);
+ // Get type and layout of the discriminant.
+ let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
+ trace!("discriminant type: {:?}", discr_layout.ty);
+
+ // We use "discriminant" to refer to the value associated with a particular enum variant.
+ // This is not to be confused with its "variant index", which is just determining its position in the
+ // declared list of variants -- they can differ with explicitly assigned discriminants.
+ // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
+ // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
+ let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
+ Variants::Single { index } => {
+ let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
+ Some(discr) => {
+ // This type actually has discriminants.
+ assert_eq!(discr.ty, discr_layout.ty);
+ Scalar::from_uint(discr.val, discr_layout.size)
+ }
+ None => {
+ // On a type without actual discriminants, variant is 0.
+ assert_eq!(index.as_u32(), 0);
+ Scalar::from_uint(index.as_u32(), discr_layout.size)
+ }
+ };
+ return Ok((discr, index));
+ }
+ Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
+ (tag, tag_encoding, tag_field)
+ }
+ };
+
+ // There are *three* layouts that come into play here:
+ // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
+ // the `Scalar` we return.
+ // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
+ // and used to interpret the value we read from the tag field.
+ // For the return value, a cast to `discr_layout` is performed.
+ // - The field storing the tag has a layout, which is very similar to `tag_layout` but
+ // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
+
+ // Get layout for tag.
+ let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
+
+ // Read tag and sanity-check `tag_layout`.
+ let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
+ assert_eq!(tag_layout.size, tag_val.layout.size);
+ assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
+ trace!("tag value: {}", tag_val);
+
+ // Figure out which discriminant and variant this corresponds to.
+ Ok(match *tag_encoding {
+ TagEncoding::Direct => {
+ let scalar = tag_val.to_scalar()?;
+ // Generate a specific error if `tag_val` is not an integer.
+ // (`tag_bits` itself is only used for error messages below.)
+ let tag_bits = scalar
+ .try_to_int()
+ .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
+ .assert_bits(tag_layout.size);
+ // Cast bits from tag layout to discriminant layout.
+ // After the checks we did above, this cannot fail, as
+ // discriminants are int-like.
+ let discr_val =
+ self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
+ let discr_bits = discr_val.assert_bits(discr_layout.size);
+ // Convert discriminant to variant index, and catch invalid discriminants.
+ let index = match *op.layout.ty.kind() {
+ ty::Adt(adt, _) => {
+ adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
+ }
+ ty::Generator(def_id, substs, _) => {
+ let substs = substs.as_generator();
+ substs
+ .discriminants(def_id, *self.tcx)
+ .find(|(_, var)| var.val == discr_bits)
+ }
+ _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
+ }
+ .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
+ // Return the cast value, and the index.
+ (discr_val, index.0)
+ }
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
+ let tag_val = tag_val.to_scalar()?;
+ // Compute the variant this niche value/"tag" corresponds to. With niche layout,
+ // discriminant (encoded in niche/tag) and variant index are the same.
+ let variants_start = niche_variants.start().as_u32();
+ let variants_end = niche_variants.end().as_u32();
+ let variant = match tag_val.try_to_int() {
+ Err(dbg_val) => {
+ // So this is a pointer then, and casting to an int failed.
+ // Can only happen during CTFE.
+ // The niche must be just 0, and the ptr not null, then we know this is
+ // okay. Everything else, we conservatively reject.
+ let ptr_valid = niche_start == 0
+ && variants_start == variants_end
+ && !self.scalar_may_be_null(tag_val)?;
+ if !ptr_valid {
+ throw_ub!(InvalidTag(dbg_val))
+ }
+ dataful_variant
+ }
+ Ok(tag_bits) => {
+ let tag_bits = tag_bits.assert_bits(tag_layout.size);
+ // We need to use machine arithmetic to get the relative variant idx:
+ // variant_index_relative = tag_val - niche_start_val
+ let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
+ let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+ let variant_index_relative_val =
+ self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
+ let variant_index_relative = variant_index_relative_val
+ .to_scalar()?
+ .assert_bits(tag_val.layout.size);
+ // Check if this is in the range that indicates an actual discriminant.
+ if variant_index_relative <= u128::from(variants_end - variants_start) {
+ let variant_index_relative = u32::try_from(variant_index_relative)
+ .expect("we checked that this fits into a u32");
+ // Then computing the absolute variant idx should not overflow any more.
+ let variant_index = variants_start
+ .checked_add(variant_index_relative)
+ .expect("overflow computing absolute variant idx");
+ let variants_len = op
+ .layout
+ .ty
+ .ty_adt_def()
+ .expect("tagged layout for non adt")
+ .variants()
+ .len();
+ assert!(usize::try_from(variant_index).unwrap() < variants_len);
+ VariantIdx::from_u32(variant_index)
+ } else {
+ dataful_variant
+ }
+ }
+ };
+ // Compute the size of the scalar we need to return.
+ // No need to cast, because the variant index directly serves as discriminant and is
+ // encoded in the tag.
+ (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
+ }
+ })
+ }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ rustc_data_structures::static_assert_size!(Immediate, 56);
+ rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
+ rustc_data_structures::static_assert_size!(Operand, 64);
+ rustc_data_structures::static_assert_size!(OpTy<'_>, 88);
+}
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
new file mode 100644
index 000000000..f9912d706
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -0,0 +1,463 @@
+use std::convert::TryFrom;
+
+use rustc_apfloat::Float;
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_middle::ty::{self, FloatTy, Ty};
+use rustc_target::abi::Abi;
+
+use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Applies the binary operation `op` to the two operands and writes a tuple of the result
+ /// and a boolean signifying the potential overflow to the destination.
+ ///
+ /// `force_overflow_checks` indicates whether overflow checks should be done even when
+ /// `tcx.sess.overflow_checks()` is `false`.
+ pub fn binop_with_overflow(
+ &mut self,
+ op: mir::BinOp,
+ force_overflow_checks: bool,
+ left: &ImmTy<'tcx, M::Provenance>,
+ right: &ImmTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
+ debug_assert_eq!(
+ self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
+ dest.layout.ty,
+ "type mismatch for result of {:?}",
+ op,
+ );
+ // As per https://github.com/rust-lang/rust/pull/98738, we always return `false` in the 2nd
+ // component when overflow checking is disabled.
+ let overflowed =
+ overflowed && (force_overflow_checks || M::checked_binop_checks_overflow(self));
+ // Write the result to `dest`.
+ if let Abi::ScalarPair(..) = dest.layout.abi {
+ // We can use the optimized path and avoid `place_field` (which might do
+ // `force_allocation`).
+ let pair = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
+ self.write_immediate(pair, dest)?;
+ } else {
+ assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
+ // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
+ // do a component-wise write here. This code path is slower than the above because
+ // `place_field` will have to `force_allocate` locals here.
+ let val_field = self.place_field(&dest, 0)?;
+ self.write_scalar(val, &val_field)?;
+ let overflowed_field = self.place_field(&dest, 1)?;
+ self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
+ }
+ Ok(())
+ }
+
+ /// Applies the binary operation `op` to the arguments and writes the result to the
+ /// destination.
+ pub fn binop_ignore_overflow(
+ &mut self,
+ op: mir::BinOp,
+ left: &ImmTy<'tcx, M::Provenance>,
+ right: &ImmTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
+ assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
+ self.write_scalar(val, dest)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ fn binary_char_op(
+ &self,
+ bin_op: mir::BinOp,
+ l: char,
+ r: char,
+ ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ use rustc_middle::mir::BinOp::*;
+
+ let res = match bin_op {
+ Eq => l == r,
+ Ne => l != r,
+ Lt => l < r,
+ Le => l <= r,
+ Gt => l > r,
+ Ge => l >= r,
+ _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
+ };
+ (Scalar::from_bool(res), false, self.tcx.types.bool)
+ }
+
+ fn binary_bool_op(
+ &self,
+ bin_op: mir::BinOp,
+ l: bool,
+ r: bool,
+ ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ use rustc_middle::mir::BinOp::*;
+
+ let res = match bin_op {
+ Eq => l == r,
+ Ne => l != r,
+ Lt => l < r,
+ Le => l <= r,
+ Gt => l > r,
+ Ge => l >= r,
+ BitAnd => l & r,
+ BitOr => l | r,
+ BitXor => l ^ r,
+ _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
+ };
+ (Scalar::from_bool(res), false, self.tcx.types.bool)
+ }
+
+ fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
+ &self,
+ bin_op: mir::BinOp,
+ ty: Ty<'tcx>,
+ l: F,
+ r: F,
+ ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
+ use rustc_middle::mir::BinOp::*;
+
+ let (val, ty) = match bin_op {
+ Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
+ Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+ Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
+ Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
+ Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
+ Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+ Add => ((l + r).value.into(), ty),
+ Sub => ((l - r).value.into(), ty),
+ Mul => ((l * r).value.into(), ty),
+ Div => ((l / r).value.into(), ty),
+ Rem => ((l % r).value.into(), ty),
+ _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
+ };
+ (val, false, ty)
+ }
+
+ fn binary_int_op(
+ &self,
+ bin_op: mir::BinOp,
+ // passing in raw bits
+ l: u128,
+ left_layout: TyAndLayout<'tcx>,
+ r: u128,
+ right_layout: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ use rustc_middle::mir::BinOp::*;
+
+ // Shift ops can have an RHS with a different numeric type.
+ if bin_op == Shl || bin_op == Shr {
+ let size = u128::from(left_layout.size.bits());
+ // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
+ // zero-extended form). This matches the codegen backend:
+ // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>.
+ // The overflow check is also ignorant to the sign:
+ // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>.
+ // This would behave rather strangely if we had integer types of size 256: a shift by
+ // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A
+ // shift by -1i16 though would be considered overflowing. If we had integers of size
+ // 512, then a shift by -1i8 would even produce a different result than one by -1i16:
+ // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our
+ // integers are maximally 128bits wide, so negative shifts *always* overflow and we have
+ // consistent results for the same value represented at different bit widths.
+ assert!(size <= 128);
+ let overflow = r >= size;
+ // The shift offset is implicitly masked to the type size, to make sure this operation
+ // is always defined. This is the one MIR operator that does *not* directly map to a
+ // single LLVM operation. See
+ // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
+ // for the corresponding truncation in our codegen backends.
+ let r = r % size;
+ let r = u32::try_from(r).unwrap(); // we masked so this will always fit
+ let result = if left_layout.abi.is_signed() {
+ let l = self.sign_extend(l, left_layout) as i128;
+ let result = match bin_op {
+ Shl => l.checked_shl(r).unwrap(),
+ Shr => l.checked_shr(r).unwrap(),
+ _ => bug!(),
+ };
+ result as u128
+ } else {
+ match bin_op {
+ Shl => l.checked_shl(r).unwrap(),
+ Shr => l.checked_shr(r).unwrap(),
+ _ => bug!(),
+ }
+ };
+ let truncated = self.truncate(result, left_layout);
+ return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
+ }
+
+ // For the remaining ops, the types must be the same on both sides
+ if left_layout.ty != right_layout.ty {
+ span_bug!(
+ self.cur_span(),
+ "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+ bin_op,
+ l,
+ left_layout.ty,
+ r,
+ right_layout.ty,
+ )
+ }
+
+ let size = left_layout.size;
+
+ // Operations that need special treatment for signed integers
+ if left_layout.abi.is_signed() {
+ let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
+ Lt => Some(i128::lt),
+ Le => Some(i128::le),
+ Gt => Some(i128::gt),
+ Ge => Some(i128::ge),
+ _ => None,
+ };
+ if let Some(op) = op {
+ let l = self.sign_extend(l, left_layout) as i128;
+ let r = self.sign_extend(r, right_layout) as i128;
+ return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
+ }
+ let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
+ Div if r == 0 => throw_ub!(DivisionByZero),
+ Rem if r == 0 => throw_ub!(RemainderByZero),
+ Div => Some(i128::overflowing_div),
+ Rem => Some(i128::overflowing_rem),
+ Add => Some(i128::overflowing_add),
+ Sub => Some(i128::overflowing_sub),
+ Mul => Some(i128::overflowing_mul),
+ _ => None,
+ };
+ if let Some(op) = op {
+ let l = self.sign_extend(l, left_layout) as i128;
+ let r = self.sign_extend(r, right_layout) as i128;
+
+ // We need a special check for overflowing Rem and Div since they are *UB*
+ // on overflow, which can happen with "int_min $OP -1".
+ if matches!(bin_op, Rem | Div) {
+ if l == size.signed_int_min() && r == -1 {
+ if bin_op == Rem {
+ throw_ub!(RemainderOverflow)
+ } else {
+ throw_ub!(DivisionOverflow)
+ }
+ }
+ }
+
+ let (result, oflo) = op(l, r);
+ // This may be out-of-bounds for the result type, so we have to truncate ourselves.
+ // If that truncation loses any information, we have an overflow.
+ let result = result as u128;
+ let truncated = self.truncate(result, left_layout);
+ return Ok((
+ Scalar::from_uint(truncated, size),
+ oflo || self.sign_extend(truncated, left_layout) != result,
+ left_layout.ty,
+ ));
+ }
+ }
+
+ let (val, ty) = match bin_op {
+ Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
+ Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
+
+ Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
+ Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
+ Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
+ Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
+
+ BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
+ BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
+ BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
+
+ Add | Sub | Mul | Rem | Div => {
+ assert!(!left_layout.abi.is_signed());
+ let op: fn(u128, u128) -> (u128, bool) = match bin_op {
+ Add => u128::overflowing_add,
+ Sub => u128::overflowing_sub,
+ Mul => u128::overflowing_mul,
+ Div if r == 0 => throw_ub!(DivisionByZero),
+ Rem if r == 0 => throw_ub!(RemainderByZero),
+ Div => u128::overflowing_div,
+ Rem => u128::overflowing_rem,
+ _ => bug!(),
+ };
+ let (result, oflo) = op(l, r);
+ // Truncate to target type.
+ // If that truncation loses any information, we have an overflow.
+ let truncated = self.truncate(result, left_layout);
+ return Ok((
+ Scalar::from_uint(truncated, size),
+ oflo || truncated != result,
+ left_layout.ty,
+ ));
+ }
+
+ _ => span_bug!(
+ self.cur_span(),
+ "invalid binary op {:?}: {:?}, {:?} (both {:?})",
+ bin_op,
+ l,
+ r,
+ right_layout.ty,
+ ),
+ };
+
+ Ok((val, false, ty))
+ }
+
+ /// Returns the result of the specified operation, whether it overflowed, and
+ /// the result type.
+ pub fn overflowing_binary_op(
+ &self,
+ bin_op: mir::BinOp,
+ left: &ImmTy<'tcx, M::Provenance>,
+ right: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ trace!(
+ "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
+ bin_op,
+ *left,
+ left.layout.ty,
+ *right,
+ right.layout.ty
+ );
+
+ match left.layout.ty.kind() {
+ ty::Char => {
+ assert_eq!(left.layout.ty, right.layout.ty);
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
+ }
+ ty::Bool => {
+ assert_eq!(left.layout.ty, right.layout.ty);
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
+ }
+ ty::Float(fty) => {
+ assert_eq!(left.layout.ty, right.layout.ty);
+ let ty = left.layout.ty;
+ let left = left.to_scalar()?;
+ let right = right.to_scalar()?;
+ Ok(match fty {
+ FloatTy::F32 => {
+ self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
+ }
+ FloatTy::F64 => {
+ self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
+ }
+ })
+ }
+ _ if left.layout.ty.is_integral() => {
+ // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
+ assert!(
+ right.layout.ty.is_integral(),
+ "Unexpected types for BinOp: {:?} {:?} {:?}",
+ left.layout.ty,
+ bin_op,
+ right.layout.ty
+ );
+
+ let l = left.to_scalar()?.to_bits(left.layout.size)?;
+ let r = right.to_scalar()?.to_bits(right.layout.size)?;
+ self.binary_int_op(bin_op, l, left.layout, r, right.layout)
+ }
+ _ if left.layout.ty.is_any_ptr() => {
+ // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
+ // (Even when both sides are pointers, their type might differ, see issue #91636)
+ assert!(
+ right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
+ "Unexpected types for BinOp: {:?} {:?} {:?}",
+ left.layout.ty,
+ bin_op,
+ right.layout.ty
+ );
+
+ M::binary_ptr_op(self, bin_op, left, right)
+ }
+ _ => span_bug!(
+ self.cur_span(),
+ "Invalid MIR: bad LHS type for binop: {:?}",
+ left.layout.ty
+ ),
+ }
+ }
+
+ /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
+ #[inline]
+ pub fn binary_op(
+ &self,
+ bin_op: mir::BinOp,
+ left: &ImmTy<'tcx, M::Provenance>,
+ right: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
+ Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+ }
+
+ /// Returns the result of the specified operation, whether it overflowed, and
+ /// the result type.
+ pub fn overflowing_unary_op(
+ &self,
+ un_op: mir::UnOp,
+ val: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
+ use rustc_middle::mir::UnOp::*;
+
+ let layout = val.layout;
+ let val = val.to_scalar()?;
+ trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
+
+ match layout.ty.kind() {
+ ty::Bool => {
+ let val = val.to_bool()?;
+ let res = match un_op {
+ Not => !val,
+ _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
+ };
+ Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
+ }
+ ty::Float(fty) => {
+ let res = match (un_op, fty) {
+ (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
+ (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
+ _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
+ };
+ Ok((res, false, layout.ty))
+ }
+ _ => {
+ assert!(layout.ty.is_integral());
+ let val = val.to_bits(layout.size)?;
+ let (res, overflow) = match un_op {
+ Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
+ Neg => {
+ // arithmetic negation
+ assert!(layout.abi.is_signed());
+ let val = self.sign_extend(val, layout) as i128;
+ let (res, overflow) = val.overflowing_neg();
+ let res = res as u128;
+ // Truncate to target type.
+ // If that truncation loses any information, we have an overflow.
+ let truncated = self.truncate(res, layout);
+ (truncated, overflow || self.sign_extend(truncated, layout) != res)
+ }
+ };
+ Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
+ }
+ }
+ }
+
+ pub fn unary_op(
+ &self,
+ un_op: mir::UnOp,
+ val: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
+ let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
+ Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
new file mode 100644
index 000000000..f4571a1ca
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -0,0 +1,900 @@
+//! Computations on places -- field projections, going from mir::Place, and writing
+//! into a place.
+//! All high-level functions to write to memory work on places as destinations.
+
+use std::hash::Hash;
+
+use rustc_ast::Mutability;
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
+use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding, VariantIdx};
+
+use super::{
+ alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
+ ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
+ Pointer, Provenance, Scalar, ScalarMaybeUninit,
+};
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
+/// Information required for the sound usage of a `MemPlace`.
+pub enum MemPlaceMeta<Prov: Provenance = AllocId> {
+ /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
+ Meta(Scalar<Prov>),
+ /// `Sized` types or unsized `extern type`
+ None,
+}
+
+impl<Prov: Provenance> MemPlaceMeta<Prov> {
+ pub fn unwrap_meta(self) -> Scalar<Prov> {
+ match self {
+ Self::Meta(s) => s,
+ Self::None => {
+ bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
+ }
+ }
+ }
+
+ pub fn has_meta(self) -> bool {
+ match self {
+ Self::Meta(_) => true,
+ Self::None => false,
+ }
+ }
+}
+
+#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
+pub struct MemPlace<Prov: Provenance = AllocId> {
+ /// The pointer can be a pure integer, with the `None` provenance.
+ pub ptr: Pointer<Option<Prov>>,
+ /// Metadata for unsized places. Interpretation is up to the type.
+ /// Must not be present for sized types, but can be missing for unsized types
+ /// (e.g., `extern type`).
+ pub meta: MemPlaceMeta<Prov>,
+}
+
+/// A MemPlace with its layout. Constructing it is only possible in this module.
+#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
+pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
+ mplace: MemPlace<Prov>,
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for a `MPlaceTy`!
+ pub align: Align,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum Place<Prov: Provenance = AllocId> {
+ /// A place referring to a value allocated in the `Memory` system.
+ Ptr(MemPlace<Prov>),
+
+ /// To support alloc-free locals, we are able to write directly to a local.
+ /// (Without that optimization, we'd just always be a `MemPlace`.)
+ Local { frame: usize, local: mir::Local },
+}
+
+#[derive(Clone, Debug)]
+pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
+ place: Place<Prov>, // Keep this private; it helps enforce invariants.
+ pub layout: TyAndLayout<'tcx>,
+ /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
+ /// it needs to have a different alignment than the field type would usually have.
+ /// So we represent this here with a separate field that "overwrites" `layout.align`.
+ /// This means `layout.align` should never be used for a `PlaceTy`!
+ pub align: Align,
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
+ type Target = Place<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &Place<Prov> {
+ &self.place
+ }
+}
+
+impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
+ type Target = MemPlace<Prov>;
+ #[inline(always)]
+ fn deref(&self) -> &MemPlace<Prov> {
+ &self.mplace
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
+ PlaceTy { place: Place::Ptr(*mplace), layout: mplace.layout, align: mplace.align }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
+ PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
+ }
+}
+
+impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
+ #[inline(always)]
+ fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
+ PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
+ }
+}
+
+impl<Prov: Provenance> MemPlace<Prov> {
+ #[inline(always)]
+ pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
+ MemPlace { ptr, meta: MemPlaceMeta::None }
+ }
+
+ /// Adjust the provenance of the main pointer (metadata is unaffected).
+ pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
+ MemPlace { ptr: self.ptr.map_provenance(f), ..self }
+ }
+
+ /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
+ /// This is the inverse of `ref_to_mplace`.
+ #[inline(always)]
+ pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
+ match self.meta {
+ MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
+ MemPlaceMeta::Meta(meta) => {
+ Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into())
+ }
+ }
+ }
+
+ #[inline]
+ pub fn offset_with_meta<'tcx>(
+ self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
+ }
+}
+
+impl<Prov: Provenance> Place<Prov> {
+ /// Asserts that this points to some local variable.
+ /// Returns the frame idx and the variable idx.
+ #[inline]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn assert_local(&self) -> (usize, mir::Local) {
+ match self {
+ Place::Local { frame, local } => (*frame, *local),
+ _ => bug!("assert_local: expected Place::Local, got {:?}", self),
+ }
+ }
+}
+
+impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
+ /// Produces a MemPlace that works for ZST but nothing else.
+ /// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
+ /// don't need to worry about memory leaks.
+ #[inline]
+ pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
+ assert!(layout.is_zst());
+ let align = layout.align.abi;
+ let ptr = Pointer::from_addr(align.bytes()); // no provenance, absolute address
+ MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
+ }
+
+ #[inline]
+ pub fn offset_with_meta(
+ &self,
+ offset: Size,
+ meta: MemPlaceMeta<Prov>,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ Ok(MPlaceTy {
+ mplace: self.mplace.offset_with_meta(offset, meta, cx)?,
+ align: self.align.restrict_for_offset(offset),
+ layout,
+ })
+ }
+
+ pub fn offset(
+ &self,
+ offset: Size,
+ layout: TyAndLayout<'tcx>,
+ cx: &impl HasDataLayout,
+ ) -> InterpResult<'tcx, Self> {
+ assert!(!layout.is_unsized());
+ self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
+ }
+
+ #[inline]
+ pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
+ MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
+ }
+
+ #[inline]
+ pub fn from_aligned_ptr_with_meta(
+ ptr: Pointer<Option<Prov>>,
+ layout: TyAndLayout<'tcx>,
+ meta: MemPlaceMeta<Prov>,
+ ) -> Self {
+ let mut mplace = MemPlace::from_ptr(ptr);
+ mplace.meta = meta;
+
+ MPlaceTy { mplace, layout, align: layout.align.abi }
+ }
+
+ #[inline]
+ pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ if self.layout.is_unsized() {
+ // We need to consult `meta` metadata
+ match self.layout.ty.kind() {
+ ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx),
+ _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
+ }
+ } else {
+ // Go through the layout. There are lots of types that support a length,
+ // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
+ match self.layout.fields {
+ abi::FieldsShape::Array { count, .. } => Ok(count),
+ _ => bug!("len not supported on sized type {:?}", self.layout.ty),
+ }
+ }
+ }
+
+ #[inline]
+ pub(super) fn vtable(&self) -> Scalar<Prov> {
+ match self.layout.ty.kind() {
+ ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
+ _ => bug!("vtable not supported on type {:?}", self.layout.ty),
+ }
+ }
+}
+
+// These are defined here because they produce a place.
+impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
+ #[inline(always)]
+ /// Note: do not call `as_ref` on the resulting place. This function should only be used to
+ /// read from the resulting mplace, not to get its address back.
+ pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
+ match **self {
+ Operand::Indirect(mplace) => {
+ Ok(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
+ }
+ Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
+ }
+ }
+
+ #[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ /// Note: do not call `as_ref` on the resulting place. This function should only be used to
+ /// read from the resulting mplace, not to get its address back.
+ pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
+ self.try_as_mplace().unwrap()
+ }
+}
+
+impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
+ /// A place is either an mplace or some local.
+ #[inline]
+ pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
+ match **self {
+ Place::Ptr(mplace) => Ok(MPlaceTy { mplace, layout: self.layout, align: self.align }),
+ Place::Local { frame, local } => Err((frame, local)),
+ }
+ }
+
+ #[inline(always)]
+ #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
+ pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Prov> {
+ self.try_as_mplace().unwrap()
+ }
+}
+
+// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
+impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
+where
+ Prov: Provenance + Eq + Hash + 'static,
+ M: Machine<'mir, 'tcx, Provenance = Prov>,
+{
+ /// Take a value, which represents a (thin or wide) reference, and make it a place.
+ /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
+ ///
+ /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
+ /// want to ever use the place for memory access!
+ /// Generally prefer `deref_operand`.
+ pub fn ref_to_mplace(
+ &self,
+ val: &ImmTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let pointee_type =
+ val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
+ let layout = self.layout_of(pointee_type)?;
+ let (ptr, meta) = match **val {
+ Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
+ Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
+ Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
+ };
+
+ let mplace = MemPlace { ptr: ptr.to_pointer(self)?, meta };
+ // When deref'ing a pointer, the *static* alignment given by the type is what matters.
+ let align = layout.align.abi;
+ Ok(MPlaceTy { mplace, layout, align })
+ }
+
+ /// Take an operand, representing a pointer, and dereference it to a place -- that
+ /// will always be a MemPlace. Lives in `place.rs` because it creates a place.
+ #[instrument(skip(self), level = "debug")]
+ pub fn deref_operand(
+ &self,
+ src: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let val = self.read_immediate(src)?;
+ trace!("deref to {} on {:?}", val.layout.ty, *val);
+
+ if val.layout.ty.is_box() {
+ bug!("dereferencing {:?}", val.layout.ty);
+ }
+
+ let mplace = self.ref_to_mplace(&val)?;
+ self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?;
+ Ok(mplace)
+ }
+
+ #[inline]
+ pub(super) fn get_place_alloc(
+ &self,
+ place: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
+ assert!(!place.layout.is_unsized());
+ assert!(!place.meta.has_meta());
+ let size = place.layout.size;
+ self.get_ptr_alloc(place.ptr, size, place.align)
+ }
+
+ #[inline]
+ pub(super) fn get_place_alloc_mut(
+ &mut self,
+ place: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra>>> {
+ assert!(!place.layout.is_unsized());
+ assert!(!place.meta.has_meta());
+ let size = place.layout.size;
+ self.get_ptr_alloc_mut(place.ptr, size, place.align)
+ }
+
+ /// Check if this mplace is dereferenceable and sufficiently aligned.
+ fn check_mplace_access(
+ &self,
+ mplace: MPlaceTy<'tcx, M::Provenance>,
+ msg: CheckInAllocMsg,
+ ) -> InterpResult<'tcx> {
+ let (size, align) = self
+ .size_and_align_of_mplace(&mplace)?
+ .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
+ assert!(mplace.align <= align, "dynamic alignment less strict than static one?");
+ let align = M::enforce_alignment(self).then_some(align);
+ self.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?;
+ Ok(())
+ }
+
+ /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
+ /// Also returns the number of elements.
+ pub fn mplace_to_simd(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
+ // Basically we just transmute this place into an array following simd_size_and_type.
+ // (Transmuting is okay since this is an in-memory place. We also double-check the size
+ // stays the same.)
+ let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
+ let array = self.tcx.mk_array(e_ty, len);
+ let layout = self.layout_of(array)?;
+ assert_eq!(layout.size, mplace.layout.size);
+ Ok((MPlaceTy { layout, ..*mplace }, len))
+ }
+
+ /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
+ /// Also returns the number of elements.
+ pub fn place_to_simd(
+ &mut self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
+ let mplace = self.force_allocation(place)?;
+ self.mplace_to_simd(&mplace)
+ }
+
+ pub fn local_to_place(
+ &self,
+ frame: usize,
+ local: mir::Local,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
+ let place = Place::Local { frame, local };
+ Ok(PlaceTy { place, layout, align: layout.align.abi })
+ }
+
+ /// Computes a place. You should only use this if you intend to write into this
+ /// place; for reading, a more efficient alternative is `eval_place_to_op`.
+ #[instrument(skip(self), level = "debug")]
+ pub fn eval_place(
+ &mut self,
+ mir_place: mir::Place<'tcx>,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
+ // Using `try_fold` turned out to be bad for performance, hence the loop.
+ for elem in mir_place.projection.iter() {
+ place = self.place_projection(&place, elem)?
+ }
+
+ trace!("{:?}", self.dump_place(place.place));
+ // Sanity-check the type we ended up with.
+ debug_assert!(
+ mir_assign_valid_types(
+ *self.tcx,
+ self.param_env,
+ self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
+ )?)?,
+ place.layout,
+ ),
+ "eval_place of a MIR place with type {:?} produced an interpreter place with type {:?}",
+ mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
+ place.layout.ty,
+ );
+ Ok(place)
+ }
+
+ /// Write an immediate to a place
+ #[inline(always)]
+ #[instrument(skip(self), level = "debug")]
+ pub fn write_immediate(
+ &mut self,
+ src: Immediate<M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ self.write_immediate_no_validate(src, dest)?;
+
+ if M::enforce_validity(self) {
+ // Data got changed, better make sure it matches the type!
+ self.validate_operand(&self.place_to_op(dest)?)?;
+ }
+
+ Ok(())
+ }
+
+ /// Write a scalar to a place
+ #[inline(always)]
+ pub fn write_scalar(
+ &mut self,
+ val: impl Into<ScalarMaybeUninit<M::Provenance>>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ self.write_immediate(Immediate::Scalar(val.into()), dest)
+ }
+
+ /// Write a pointer to a place
+ #[inline(always)]
+ pub fn write_pointer(
+ &mut self,
+ ptr: impl Into<Pointer<Option<M::Provenance>>>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
+ }
+
+ /// Write an immediate to a place.
+ /// If you use this you are responsible for validating that things got copied at the
+ /// right type.
+ fn write_immediate_no_validate(
+ &mut self,
+ src: Immediate<M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
+ trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+
+ // See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
+ // but not factored as a separate function.
+ let mplace = match dest.place {
+ Place::Local { frame, local } => {
+ match M::access_local_mut(self, frame, local)? {
+ Operand::Immediate(local) => {
+ // Local can be updated in-place.
+ *local = src;
+ return Ok(());
+ }
+ Operand::Indirect(mplace) => {
+ // The local is in memory, go on below.
+ *mplace
+ }
+ }
+ }
+ Place::Ptr(mplace) => mplace, // already referring to memory
+ };
+
+ // This is already in memory, write there.
+ self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
+ }
+
+ /// Write an immediate to memory.
+ /// If you use this you are responsible for validating that things got copied at the
+ /// right layout.
+ fn write_immediate_to_mplace_no_validate(
+ &mut self,
+ value: Immediate<M::Provenance>,
+ layout: TyAndLayout<'tcx>,
+ align: Align,
+ dest: MemPlace<M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // Note that it is really important that the type here is the right one, and matches the
+ // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
+ // to handle padding properly, which is only correct if we never look at this data with the
+ // wrong type.
+
+ let tcx = *self.tcx;
+ let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })? else {
+ // zero-sized access
+ return Ok(());
+ };
+
+ match value {
+ Immediate::Scalar(scalar) => {
+ let Abi::Scalar(s) = layout.abi else { span_bug!(
+ self.cur_span(),
+ "write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
+ )
+ };
+ let size = s.size(&tcx);
+ assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
+ alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
+ }
+ Immediate::ScalarPair(a_val, b_val) => {
+ // We checked `ptr_align` above, so all fields will have the alignment they need.
+ // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
+ // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
+ let Abi::ScalarPair(a, b) = layout.abi else { span_bug!(
+ self.cur_span(),
+ "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
+ layout
+ )
+ };
+ let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
+ let b_offset = a_size.align_to(b.align(&tcx).abi);
+ assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
+
+ // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
+ // but that does not work: We could be a newtype around a pair, then the
+ // fields do not match the `ScalarPair` components.
+
+ alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
+ alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
+ }
+ Immediate::Uninit => alloc.write_uninit(),
+ }
+ }
+
+ pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ let mplace = match dest.try_as_mplace() {
+ Ok(mplace) => mplace,
+ Err((frame, local)) => {
+ match M::access_local_mut(self, frame, local)? {
+ Operand::Immediate(local) => {
+ *local = Immediate::Uninit;
+ return Ok(());
+ }
+ Operand::Indirect(mplace) => {
+ // The local is in memory, go on below.
+ MPlaceTy { mplace: *mplace, layout: dest.layout, align: dest.align }
+ }
+ }
+ }
+ };
+ let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
+ // Zero-sized access
+ return Ok(());
+ };
+ alloc.write_uninit()?;
+ Ok(())
+ }
+
+ /// Copies the data from an operand to a place.
+ /// `allow_transmute` indicates whether the layouts may disagree.
+ #[inline(always)]
+ #[instrument(skip(self), level = "debug")]
+ pub fn copy_op(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ allow_transmute: bool,
+ ) -> InterpResult<'tcx> {
+ self.copy_op_no_validate(src, dest, allow_transmute)?;
+
+ if M::enforce_validity(self) {
+ // Data got changed, better make sure it matches the type!
+ self.validate_operand(&self.place_to_op(dest)?)?;
+ }
+
+ Ok(())
+ }
+
+ /// Copies the data from an operand to a place.
+ /// `allow_transmute` indicates whether the layouts may disagree.
+ /// Also, if you use this you are responsible for validating that things get copied at the
+ /// right type.
+ #[instrument(skip(self), level = "debug")]
+ fn copy_op_no_validate(
+ &mut self,
+ src: &OpTy<'tcx, M::Provenance>,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ allow_transmute: bool,
+ ) -> InterpResult<'tcx> {
+ // We do NOT compare the types for equality, because well-typed code can
+ // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
+ let layout_compat =
+ mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
+ if !allow_transmute && !layout_compat {
+ span_bug!(
+ self.cur_span(),
+ "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
+ src.layout.ty,
+ dest.layout.ty,
+ );
+ }
+
+ // Let us see if the layout is simple so we take a shortcut,
+ // avoid force_allocation.
+ let src = match self.read_immediate_raw(src, /*force*/ false)? {
+ Ok(src_val) => {
+ assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
+ assert!(
+ !dest.layout.is_unsized(),
+ "the src is sized, so the dest must also be sized"
+ );
+ assert_eq!(src.layout.size, dest.layout.size);
+ // Yay, we got a value that we can write directly.
+ return if layout_compat {
+ self.write_immediate_no_validate(*src_val, dest)
+ } else {
+ // This is tricky. The problematic case is `ScalarPair`: the `src_val` was
+ // loaded using the offsets defined by `src.layout`. When we put this back into
+ // the destination, we have to use the same offsets! So (a) we make sure we
+ // write back to memory, and (b) we use `dest` *with the source layout*.
+ let dest_mem = self.force_allocation(dest)?;
+ self.write_immediate_to_mplace_no_validate(
+ *src_val,
+ src.layout,
+ dest_mem.align,
+ *dest_mem,
+ )
+ };
+ }
+ Err(mplace) => mplace,
+ };
+ // Slow path, this does not fit into an immediate. Just memcpy.
+ trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
+
+ let dest = self.force_allocation(&dest)?;
+ let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
+ span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
+ };
+ if cfg!(debug_assertions) {
+ let src_size = self.size_and_align_of_mplace(&src)?.unwrap().0;
+ assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
+ } else {
+ // As a cheap approximation, we compare the fixed parts of the size.
+ assert_eq!(src.layout.size, dest.layout.size);
+ }
+
+ self.mem_copy(
+ src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ false,
+ )
+ }
+
+ /// Ensures that a place is in memory, and returns where it is.
+ /// If the place currently refers to a local that doesn't yet have a matching allocation,
+ /// create such an allocation.
+ /// This is essentially `force_to_memplace`.
+ #[instrument(skip(self), level = "debug")]
+ pub fn force_allocation(
+ &mut self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let mplace = match place.place {
+ Place::Local { frame, local } => {
+ match M::access_local_mut(self, frame, local)? {
+ &mut Operand::Immediate(local_val) => {
+ // We need to make an allocation.
+
+ // We need the layout of the local. We can NOT use the layout we got,
+ // that might e.g., be an inner field of a struct with `Scalar` layout,
+ // that has different alignment than the outer field.
+ let local_layout =
+ self.layout_of_local(&self.stack()[frame], local, None)?;
+ if local_layout.is_unsized() {
+ throw_unsup_format!("unsized locals are not supported");
+ }
+ let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
+ if !matches!(local_val, Immediate::Uninit) {
+ // Preserve old value. (As an optimization, we can skip this if it was uninit.)
+ // We don't have to validate as we can assume the local
+ // was already valid for its type.
+ self.write_immediate_to_mplace_no_validate(
+ local_val,
+ local_layout,
+ local_layout.align.abi,
+ mplace,
+ )?;
+ }
+ // Now we can call `access_mut` again, asserting it goes well,
+ // and actually overwrite things.
+ *M::access_local_mut(self, frame, local).unwrap() =
+ Operand::Indirect(mplace);
+ mplace
+ }
+ &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
+ }
+ }
+ Place::Ptr(mplace) => mplace,
+ };
+ // Return with the original layout, so that the caller can go on
+ Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
+ }
+
+ pub fn allocate(
+ &mut self,
+ layout: TyAndLayout<'tcx>,
+ kind: MemoryKind<M::MemoryKind>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ assert!(!layout.is_unsized());
+ let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
+ Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+ }
+
+ /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
+ pub fn allocate_str(
+ &mut self,
+ str: &str,
+ kind: MemoryKind<M::MemoryKind>,
+ mutbl: Mutability,
+ ) -> MPlaceTy<'tcx, M::Provenance> {
+ let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl);
+ let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
+ let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
+
+ let ty = self.tcx.mk_ref(
+ self.tcx.lifetimes.re_static,
+ ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
+ );
+ let layout = self.layout_of(ty).unwrap();
+ MPlaceTy { mplace, layout, align: layout.align.abi }
+ }
+
+ /// Writes the discriminant of the given variant.
+ #[instrument(skip(self), level = "debug")]
+ pub fn write_discriminant(
+ &mut self,
+ variant_index: VariantIdx,
+ dest: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ // This must be an enum or generator.
+ match dest.layout.ty.kind() {
+ ty::Adt(adt, _) => assert!(adt.is_enum()),
+ ty::Generator(..) => {}
+ _ => span_bug!(
+ self.cur_span(),
+ "write_discriminant called on non-variant-type (neither enum nor generator)"
+ ),
+ }
+ // Layout computation excludes uninhabited variants from consideration
+ // therefore there's no way to represent those variants in the given layout.
+ // Essentially, uninhabited variants do not have a tag that corresponds to their
+ // discriminant, so we cannot do anything here.
+ // When evaluating we will always error before even getting here, but ConstProp 'executes'
+ // dead code, so we cannot ICE here.
+ if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
+ throw_ub!(UninhabitedEnumVariantWritten)
+ }
+
+ match dest.layout.variants {
+ abi::Variants::Single { index } => {
+ assert_eq!(index, variant_index);
+ }
+ abi::Variants::Multiple {
+ tag_encoding: TagEncoding::Direct,
+ tag: tag_layout,
+ tag_field,
+ ..
+ } => {
+ // No need to validate that the discriminant here because the
+ // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+ let discr_val =
+ dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
+
+ // raw discriminants for enums are isize or bigger during
+ // their computation, but the in-memory tag is the smallest possible
+ // representation
+ let size = tag_layout.size(self);
+ let tag_val = size.truncate(discr_val);
+
+ let tag_dest = self.place_field(dest, tag_field)?;
+ self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
+ }
+ abi::Variants::Multiple {
+ tag_encoding:
+ TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
+ tag: tag_layout,
+ tag_field,
+ ..
+ } => {
+ // No need to validate that the discriminant here because the
+ // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
+
+ if variant_index != dataful_variant {
+ let variants_start = niche_variants.start().as_u32();
+ let variant_index_relative = variant_index
+ .as_u32()
+ .checked_sub(variants_start)
+ .expect("overflow computing relative variant idx");
+ // We need to use machine arithmetic when taking into account `niche_start`:
+ // tag_val = variant_index_relative + niche_start_val
+ let tag_layout = self.layout_of(tag_layout.primitive().to_int_ty(*self.tcx))?;
+ let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
+ let variant_index_relative_val =
+ ImmTy::from_uint(variant_index_relative, tag_layout);
+ let tag_val = self.binary_op(
+ mir::BinOp::Add,
+ &variant_index_relative_val,
+ &niche_start_val,
+ )?;
+ // Write result.
+ let niche_dest = self.place_field(dest, tag_field)?;
+ self.write_immediate(*tag_val, &niche_dest)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ pub fn raw_const_to_mplace(
+ &self,
+ raw: ConstAlloc<'tcx>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ // This must be an allocation in `tcx`
+ let _ = self.tcx.global_alloc(raw.alloc_id);
+ let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
+ let layout = self.layout_of(raw.ty)?;
+ Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
+ }
+
+ /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
+ pub(super) fn unpack_dyn_trait(
+ &self,
+ mplace: &MPlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let vtable = mplace.vtable().to_pointer(self)?; // also sanity checks the type
+ let (ty, _) = self.get_ptr_vtable(vtable)?;
+ let layout = self.layout_of(ty)?;
+
+ let mplace = MPlaceTy {
+ mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace },
+ layout,
+ align: layout.align.abi,
+ };
+ Ok(mplace)
+ }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
+ rustc_data_structures::static_assert_size!(MemPlace, 40);
+ rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64);
+ rustc_data_structures::static_assert_size!(Place, 48);
+ rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72);
+}
diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs
new file mode 100644
index 000000000..742339f2b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/projection.rs
@@ -0,0 +1,391 @@
+//! This file implements "place projections"; basically a symmetric API for 3 types: MPlaceTy, OpTy, PlaceTy.
+//!
+//! OpTy and PlaceTy genrally work by "let's see if we are actually an MPlaceTy, and do something custom if not".
+//! For PlaceTy, the custom thing is basically always to call `force_allocation` and then use the MPlaceTy logic anyway.
+//! For OpTy, the custom thing on field pojections has to be pretty clever (since `Operand::Immediate` can have fields),
+//! but for array/slice operations it only has to worry about `Operand::Uninit`. That makes the value part trivial,
+//! but we still need to do bounds checking and adjust the layout. To not duplicate that with MPlaceTy, we actually
+//! implement the logic on OpTy, and MPlaceTy calls that.
+
+use std::hash::Hash;
+
+use rustc_middle::mir;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_target::abi::{self, Abi, VariantIdx};
+
+use super::{
+ ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, PlaceTy,
+ Provenance, Scalar,
+};
+
+// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
+impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
+where
+ Prov: Provenance + Eq + Hash + 'static,
+ M: Machine<'mir, 'tcx, Provenance = Prov>,
+{
+ //# Field access
+
+ /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
+ /// always possible without allocating, so it can take `&self`. Also return the field's layout.
+ /// This supports both struct and array fields.
+ ///
+ /// This also works for arrays, but then the `usize` index type is restricting.
+ /// For indexing into arrays, use `mplace_index`.
+ pub fn mplace_field(
+ &self,
+ base: &MPlaceTy<'tcx, M::Provenance>,
+ field: usize,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ let offset = base.layout.fields.offset(field);
+ let field_layout = base.layout.field(self, field);
+
+ // Offset may need adjustment for unsized fields.
+ let (meta, offset) = if field_layout.is_unsized() {
+ // Re-use parent metadata to determine dynamic field layout.
+ // With custom DSTS, this *will* execute user-defined code, but the same
+ // happens at run-time so that's okay.
+ match self.size_and_align_of(&base.meta, &field_layout)? {
+ Some((_, align)) => (base.meta, offset.align_to(align)),
+ None => {
+ // For unsized types with an extern type tail we perform no adjustments.
+ // NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend.
+ assert!(matches!(base.meta, MemPlaceMeta::None));
+ (base.meta, offset)
+ }
+ }
+ } else {
+ // base.meta could be present; we might be accessing a sized field of an unsized
+ // struct.
+ (MemPlaceMeta::None, offset)
+ };
+
+ // We do not look at `base.layout.align` nor `field_layout.align`, unlike
+ // codegen -- mostly to see if we can get away with that
+ base.offset_with_meta(offset, meta, field_layout, self)
+ }
+
+ /// Gets the place of a field inside the place, and also the field's type.
+ /// Just a convenience function, but used quite a bit.
+ /// This is the only projection that might have a side-effect: We cannot project
+ /// into the field of a local `ScalarPair`, we have to first allocate it.
+ pub fn place_field(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ field: usize,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ // FIXME: We could try to be smarter and avoid allocation for fields that span the
+ // entire place.
+ let base = self.force_allocation(base)?;
+ Ok(self.mplace_field(&base, field)?.into())
+ }
+
+ pub fn operand_field(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ field: usize,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let base = match base.try_as_mplace() {
+ Ok(ref mplace) => {
+ // We can reuse the mplace field computation logic for indirect operands.
+ let field = self.mplace_field(mplace, field)?;
+ return Ok(field.into());
+ }
+ Err(value) => value,
+ };
+
+ let field_layout = base.layout.field(self, field);
+ let offset = base.layout.fields.offset(field);
+ // This makes several assumptions about what layouts we will encounter; we match what
+ // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
+ let field_val: Immediate<_> = match (*base, base.layout.abi) {
+ // the field contains no information, can be left uninit
+ _ if field_layout.is_zst() => Immediate::Uninit,
+ // the field covers the entire type
+ _ if field_layout.size == base.layout.size => {
+ assert!(match (base.layout.abi, field_layout.abi) {
+ (Abi::Scalar(..), Abi::Scalar(..)) => true,
+ (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ _ => false,
+ });
+ assert!(offset.bytes() == 0);
+ *base
+ }
+ // extract fields from types with `ScalarPair` ABI
+ (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
+ assert!(matches!(field_layout.abi, Abi::Scalar(..)));
+ Immediate::from(if offset.bytes() == 0 {
+ debug_assert_eq!(field_layout.size, a.size(self));
+ a_val
+ } else {
+ debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
+ debug_assert_eq!(field_layout.size, b.size(self));
+ b_val
+ })
+ }
+ _ => span_bug!(
+ self.cur_span(),
+ "invalid field access on immediate {}, layout {:#?}",
+ base,
+ base.layout
+ ),
+ };
+
+ Ok(ImmTy::from_immediate(field_val, field_layout).into())
+ }
+
+ //# Downcasting
+
+ pub fn mplace_downcast(
+ &self,
+ base: &MPlaceTy<'tcx, M::Provenance>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ // Downcasts only change the layout.
+ // (In particular, no check about whether this is even the active variant -- that's by design,
+ // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
+ assert!(!base.meta.has_meta());
+ let mut base = *base;
+ base.layout = base.layout.for_variant(self, variant);
+ Ok(base)
+ }
+
+ pub fn place_downcast(
+ &self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ // Downcast just changes the layout
+ let mut base = base.clone();
+ base.layout = base.layout.for_variant(self, variant);
+ Ok(base)
+ }
+
+ pub fn operand_downcast(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // Downcast just changes the layout
+ let mut base = base.clone();
+ base.layout = base.layout.for_variant(self, variant);
+ Ok(base)
+ }
+
+ //# Slice indexing
+
+ #[inline(always)]
+ pub fn operand_index(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ index: u64,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // Not using the layout method because we want to compute on u64
+ match base.layout.fields {
+ abi::FieldsShape::Array { stride, count: _ } => {
+ // `count` is nonsense for slices, use the dynamic length instead.
+ let len = base.len(self)?;
+ if index >= len {
+ // This can only be reached in ConstProp and non-rustc-MIR.
+ throw_ub!(BoundsCheckFailed { len, index });
+ }
+ let offset = stride * index; // `Size` multiplication
+ // All fields have the same layout.
+ let field_layout = base.layout.field(self, 0);
+ base.offset(offset, field_layout, self)
+ }
+ _ => span_bug!(
+ self.cur_span(),
+ "`mplace_index` called on non-array type {:?}",
+ base.layout.ty
+ ),
+ }
+ }
+
+ // Iterates over all fields of an array. Much more efficient than doing the
+ // same by repeatedly calling `operand_index`.
+ pub fn operand_array_fields<'a>(
+ &self,
+ base: &'a OpTy<'tcx, Prov>,
+ ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, OpTy<'tcx, Prov>>> + 'a> {
+ let len = base.len(self)?; // also asserts that we have a type where this makes sense
+ let abi::FieldsShape::Array { stride, .. } = base.layout.fields else {
+ span_bug!(self.cur_span(), "operand_array_fields: expected an array layout");
+ };
+ let field_layout = base.layout.field(self, 0);
+ let dl = &self.tcx.data_layout;
+ // `Size` multiplication
+ Ok((0..len).map(move |i| base.offset(stride * i, field_layout, dl)))
+ }
+
+ /// Index into an array.
+ pub fn mplace_index(
+ &self,
+ base: &MPlaceTy<'tcx, M::Provenance>,
+ index: u64,
+ ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
+ Ok(self.operand_index(&base.into(), index)?.assert_mem_place())
+ }
+
+ pub fn place_index(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ index: u64,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ // There's not a lot we can do here, since we cannot have a place to a part of a local. If
+ // we are accessing the only element of a 1-element array, it's still the entire local...
+ // that doesn't seem worth it.
+ let base = self.force_allocation(base)?;
+ Ok(self.mplace_index(&base, index)?.into())
+ }
+
+ //# ConstantIndex support
+
+ fn operand_constant_index(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ offset: u64,
+ min_length: u64,
+ from_end: bool,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let n = base.len(self)?;
+ if n < min_length {
+ // This can only be reached in ConstProp and non-rustc-MIR.
+ throw_ub!(BoundsCheckFailed { len: min_length, index: n });
+ }
+
+ let index = if from_end {
+ assert!(0 < offset && offset <= min_length);
+ n.checked_sub(offset).unwrap()
+ } else {
+ assert!(offset < min_length);
+ offset
+ };
+
+ self.operand_index(base, index)
+ }
+
+ fn place_constant_index(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ offset: u64,
+ min_length: u64,
+ from_end: bool,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ let base = self.force_allocation(base)?;
+ Ok(self
+ .operand_constant_index(&base.into(), offset, min_length, from_end)?
+ .assert_mem_place()
+ .into())
+ }
+
+ //# Subslicing
+
+ fn operand_subslice(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ from: u64,
+ to: u64,
+ from_end: bool,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ let len = base.len(self)?; // also asserts that we have a type where this makes sense
+ let actual_to = if from_end {
+ if from.checked_add(to).map_or(true, |to| to > len) {
+ // This can only be reached in ConstProp and non-rustc-MIR.
+ throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
+ }
+ len.checked_sub(to).unwrap()
+ } else {
+ to
+ };
+
+ // Not using layout method because that works with usize, and does not work with slices
+ // (that have count 0 in their layout).
+ let from_offset = match base.layout.fields {
+ abi::FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
+ _ => {
+ span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
+ }
+ };
+
+ // Compute meta and new layout
+ let inner_len = actual_to.checked_sub(from).unwrap();
+ let (meta, ty) = match base.layout.ty.kind() {
+ // It is not nice to match on the type, but that seems to be the only way to
+ // implement this.
+ ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)),
+ ty::Slice(..) => {
+ let len = Scalar::from_machine_usize(inner_len, self);
+ (MemPlaceMeta::Meta(len), base.layout.ty)
+ }
+ _ => {
+ span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
+ }
+ };
+ let layout = self.layout_of(ty)?;
+ base.offset_with_meta(from_offset, meta, layout, self)
+ }
+
+ pub fn place_subslice(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ from: u64,
+ to: u64,
+ from_end: bool,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ let base = self.force_allocation(base)?;
+ Ok(self.operand_subslice(&base.into(), from, to, from_end)?.assert_mem_place().into())
+ }
+
+ //# Applying a general projection
+
+ /// Projects into a place.
+ #[instrument(skip(self), level = "trace")]
+ pub fn place_projection(
+ &mut self,
+ base: &PlaceTy<'tcx, M::Provenance>,
+ proj_elem: mir::PlaceElem<'tcx>,
+ ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
+ use rustc_middle::mir::ProjectionElem::*;
+ Ok(match proj_elem {
+ Field(field, _) => self.place_field(base, field.index())?,
+ Downcast(_, variant) => self.place_downcast(base, variant)?,
+ Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
+ Index(local) => {
+ let layout = self.layout_of(self.tcx.types.usize)?;
+ let n = self.local_to_op(self.frame(), local, Some(layout))?;
+ let n = self.read_scalar(&n)?.to_machine_usize(self)?;
+ self.place_index(base, n)?
+ }
+ ConstantIndex { offset, min_length, from_end } => {
+ self.place_constant_index(base, offset, min_length, from_end)?
+ }
+ Subslice { from, to, from_end } => self.place_subslice(base, from, to, from_end)?,
+ })
+ }
+
+ #[instrument(skip(self), level = "trace")]
+ pub fn operand_projection(
+ &self,
+ base: &OpTy<'tcx, M::Provenance>,
+ proj_elem: mir::PlaceElem<'tcx>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ use rustc_middle::mir::ProjectionElem::*;
+ Ok(match proj_elem {
+ Field(field, _) => self.operand_field(base, field.index())?,
+ Downcast(_, variant) => self.operand_downcast(base, variant)?,
+ Deref => self.deref_operand(base)?.into(),
+ Index(local) => {
+ let layout = self.layout_of(self.tcx.types.usize)?;
+ let n = self.local_to_op(self.frame(), local, Some(layout))?;
+ let n = self.read_scalar(&n)?.to_machine_usize(self)?;
+ self.operand_index(base, n)?
+ }
+ ConstantIndex { offset, min_length, from_end } => {
+ self.operand_constant_index(base, offset, min_length, from_end)?
+ }
+ Subslice { from, to, from_end } => self.operand_subslice(base, from, to, from_end)?,
+ })
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs
new file mode 100644
index 000000000..fea158a9f
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/step.rs
@@ -0,0 +1,320 @@
+//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
+//!
+//! The main entry point is the `step` method.
+
+use rustc_middle::mir;
+use rustc_middle::mir::interpret::{InterpResult, Scalar};
+use rustc_middle::ty::layout::LayoutOf;
+
+use super::{InterpCx, Machine};
+
+/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
+/// same type as the result.
+#[inline]
+fn binop_left_homogeneous(op: mir::BinOp) -> bool {
+ use rustc_middle::mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
+ Eq | Ne | Lt | Le | Gt | Ge => false,
+ }
+}
+/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
+/// same type as the LHS.
+#[inline]
+fn binop_right_homogeneous(op: mir::BinOp) -> bool {
+ use rustc_middle::mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
+ Offset | Shl | Shr => false,
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ pub fn run(&mut self) -> InterpResult<'tcx> {
+ while self.step()? {}
+ Ok(())
+ }
+
+ /// Returns `true` as long as there are more things to do.
+ ///
+ /// This is used by [priroda](https://github.com/oli-obk/priroda)
+ ///
+ /// This is marked `#inline(always)` to work around adversarial codegen when `opt-level = 3`
+ #[inline(always)]
+ pub fn step(&mut self) -> InterpResult<'tcx, bool> {
+ if self.stack().is_empty() {
+ return Ok(false);
+ }
+
+ let Ok(loc) = self.frame().loc else {
+ // We are unwinding and this fn has no cleanup code.
+ // Just go on unwinding.
+ trace!("unwinding: skipping frame");
+ self.pop_stack_frame(/* unwinding */ true)?;
+ return Ok(true);
+ };
+ let basic_block = &self.body().basic_blocks()[loc.block];
+
+ if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
+ let old_frames = self.frame_idx();
+ self.statement(stmt)?;
+ // Make sure we are not updating `statement_index` of the wrong frame.
+ assert_eq!(old_frames, self.frame_idx());
+ // Advance the program counter.
+ self.frame_mut().loc.as_mut().unwrap().statement_index += 1;
+ return Ok(true);
+ }
+
+ M::before_terminator(self)?;
+
+ let terminator = basic_block.terminator();
+ self.terminator(terminator)?;
+ Ok(true)
+ }
+
+ /// Runs the interpretation logic for the given `mir::Statement` at the current frame and
+ /// statement counter.
+ ///
+ /// This does NOT move the statement counter forward, the caller has to do that!
+ pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
+ info!("{:?}", stmt);
+
+ use rustc_middle::mir::StatementKind::*;
+
+ match &stmt.kind {
+ Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
+
+ SetDiscriminant { place, variant_index } => {
+ let dest = self.eval_place(**place)?;
+ self.write_discriminant(*variant_index, &dest)?;
+ }
+
+ Deinit(place) => {
+ let dest = self.eval_place(**place)?;
+ self.write_uninit(&dest)?;
+ }
+
+ // Mark locals as alive
+ StorageLive(local) => {
+ self.storage_live(*local)?;
+ }
+
+ // Mark locals as dead
+ StorageDead(local) => {
+ self.storage_dead(*local)?;
+ }
+
+ // No dynamic semantics attached to `FakeRead`; MIR
+ // interpreter is solely intended for borrowck'ed code.
+ FakeRead(..) => {}
+
+ // Stacked Borrows.
+ Retag(kind, place) => {
+ let dest = self.eval_place(**place)?;
+ M::retag(self, *kind, &dest)?;
+ }
+
+ // Call CopyNonOverlapping
+ CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
+ let src = self.eval_operand(src, None)?;
+ let dst = self.eval_operand(dst, None)?;
+ let count = self.eval_operand(count, None)?;
+ self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?;
+ }
+
+ // Statements we do not track.
+ AscribeUserType(..) => {}
+
+ // Currently, Miri discards Coverage statements. Coverage statements are only injected
+ // via an optional compile time MIR pass and have no side effects. Since Coverage
+ // statements don't exist at the source level, it is safe for Miri to ignore them, even
+ // for undefined behavior (UB) checks.
+ //
+ // A coverage counter inside a const expression (for example, a counter injected in a
+ // const function) is discarded when the const is evaluated at compile time. Whether
+ // this should change, and/or how to implement a const eval counter, is a subject of the
+ // following issue:
+ //
+ // FIXME(#73156): Handle source code coverage in const eval
+ Coverage(..) => {}
+
+ // Defined to do nothing. These are added by optimization passes, to avoid changing the
+ // size of MIR constantly.
+ Nop => {}
+ }
+
+ Ok(())
+ }
+
+ /// Evaluate an assignment statement.
+ ///
+ /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
+ /// type writes its results directly into the memory specified by the place.
+ pub fn eval_rvalue_into_place(
+ &mut self,
+ rvalue: &mir::Rvalue<'tcx>,
+ place: mir::Place<'tcx>,
+ ) -> InterpResult<'tcx> {
+ let dest = self.eval_place(place)?;
+ // FIXME: ensure some kind of non-aliasing between LHS and RHS?
+ // Also see https://github.com/rust-lang/rust/issues/68364.
+
+ use rustc_middle::mir::Rvalue::*;
+ match *rvalue {
+ ThreadLocalRef(did) => {
+ let ptr = M::thread_local_static_base_pointer(self, did)?;
+ self.write_pointer(ptr, &dest)?;
+ }
+
+ Use(ref operand) => {
+ // Avoid recomputing the layout
+ let op = self.eval_operand(operand, Some(dest.layout))?;
+ self.copy_op(&op, &dest, /*allow_transmute*/ false)?;
+ }
+
+ CopyForDeref(ref place) => {
+ let op = self.eval_place_to_op(*place, Some(dest.layout))?;
+ self.copy_op(&op, &dest, /* allow_transmute*/ false)?;
+ }
+
+ BinaryOp(bin_op, box (ref left, ref right)) => {
+ let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
+ let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
+ let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+ let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+ self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
+ }
+
+ CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
+ // Due to the extra boolean in the result, we can never reuse the `dest.layout`.
+ let left = self.read_immediate(&self.eval_operand(left, None)?)?;
+ let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
+ let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
+ self.binop_with_overflow(
+ bin_op, /*force_overflow_checks*/ false, &left, &right, &dest,
+ )?;
+ }
+
+ UnaryOp(un_op, ref operand) => {
+ // The operand always has the same type as the result.
+ let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
+ let val = self.unary_op(un_op, &val)?;
+ assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
+ self.write_immediate(*val, &dest)?;
+ }
+
+ Aggregate(box ref kind, ref operands) => {
+ assert!(matches!(kind, mir::AggregateKind::Array(..)));
+
+ for (field_index, operand) in operands.iter().enumerate() {
+ let op = self.eval_operand(operand, None)?;
+ let field_dest = self.place_field(&dest, field_index)?;
+ self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
+ }
+ }
+
+ Repeat(ref operand, _) => {
+ let src = self.eval_operand(operand, None)?;
+ assert!(!src.layout.is_unsized());
+ let dest = self.force_allocation(&dest)?;
+ let length = dest.len(self)?;
+
+ if length == 0 {
+ // Nothing to copy... but let's still make sure that `dest` as a place is valid.
+ self.get_place_alloc_mut(&dest)?;
+ } else {
+ // Write the src to the first element.
+ let first = self.mplace_field(&dest, 0)?;
+ self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
+
+ // This is performance-sensitive code for big static/const arrays! So we
+ // avoid writing each operand individually and instead just make many copies
+ // of the first element.
+ let elem_size = first.layout.size;
+ let first_ptr = first.ptr;
+ let rest_ptr = first_ptr.offset(elem_size, self)?;
+ // For the alignment of `rest_ptr`, we crucially do *not* use `first.align` as
+ // that place might be more aligned than its type mandates (a `u8` array could
+ // be 4-aligned if it sits at the right spot in a struct). Instead we use
+ // `first.layout.align`, i.e., the alignment given by the type.
+ self.mem_copy_repeatedly(
+ first_ptr,
+ first.align,
+ rest_ptr,
+ first.layout.align.abi,
+ elem_size,
+ length - 1,
+ /*nonoverlapping:*/ true,
+ )?;
+ }
+ }
+
+ Len(place) => {
+ let src = self.eval_place(place)?;
+ let mplace = self.force_allocation(&src)?;
+ let len = mplace.len(self)?;
+ self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
+ }
+
+ AddressOf(_, place) | Ref(_, _, place) => {
+ let src = self.eval_place(place)?;
+ let place = self.force_allocation(&src)?;
+ self.write_immediate(place.to_ref(self), &dest)?;
+ }
+
+ NullaryOp(null_op, ty) => {
+ let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty)?;
+ let layout = self.layout_of(ty)?;
+ if layout.is_unsized() {
+ // FIXME: This should be a span_bug (#80742)
+ self.tcx.sess.delay_span_bug(
+ self.frame().current_span(),
+ &format!("Nullary MIR operator called for unsized type {}", ty),
+ );
+ throw_inval!(SizeOfUnsizedType(ty));
+ }
+ let val = match null_op {
+ mir::NullOp::SizeOf => layout.size.bytes(),
+ mir::NullOp::AlignOf => layout.align.abi.bytes(),
+ };
+ self.write_scalar(Scalar::from_machine_usize(val, self), &dest)?;
+ }
+
+ ShallowInitBox(ref operand, _) => {
+ let src = self.eval_operand(operand, None)?;
+ let v = self.read_immediate(&src)?;
+ self.write_immediate(*v, &dest)?;
+ }
+
+ Cast(cast_kind, ref operand, cast_ty) => {
+ let src = self.eval_operand(operand, None)?;
+ let cast_ty =
+ self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty)?;
+ self.cast(&src, cast_kind, cast_ty, &dest)?;
+ }
+
+ Discriminant(place) => {
+ let op = self.eval_place_to_op(place, None)?;
+ let discr_val = self.read_discriminant(&op)?.0;
+ self.write_scalar(discr_val, &dest)?;
+ }
+ }
+
+ trace!("{:?}", self.dump_place(*dest));
+
+ Ok(())
+ }
+
+ /// Evaluate the given terminator. Will also adjust the stack frame and statement position accordingly.
+ fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
+ info!("{:?}", terminator.kind);
+
+ self.eval_terminator(terminator)?;
+ if !self.stack().is_empty() {
+ if let Ok(loc) = self.frame().loc {
+ info!("// executing {:?}", loc.block);
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/terminator.rs b/compiler/rustc_const_eval/src/interpret/terminator.rs
new file mode 100644
index 000000000..d563e35f9
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/terminator.rs
@@ -0,0 +1,671 @@
+use std::borrow::Cow;
+
+use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_middle::ty::Instance;
+use rustc_middle::{
+ mir,
+ ty::{self, Ty},
+};
+use rustc_target::abi;
+use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMode};
+use rustc_target::spec::abi::Abi;
+
+use super::{
+ FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, Operand,
+ PlaceTy, Scalar, StackPopCleanup, StackPopUnwind,
+};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ pub(super) fn eval_terminator(
+ &mut self,
+ terminator: &mir::Terminator<'tcx>,
+ ) -> InterpResult<'tcx> {
+ use rustc_middle::mir::TerminatorKind::*;
+ match terminator.kind {
+ Return => {
+ self.pop_stack_frame(/* unwinding */ false)?
+ }
+
+ Goto { target } => self.go_to_block(target),
+
+ SwitchInt { ref discr, ref targets, switch_ty } => {
+ let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
+ trace!("SwitchInt({:?})", *discr);
+ assert_eq!(discr.layout.ty, switch_ty);
+
+ // Branch to the `otherwise` case by default, if no match is found.
+ assert!(!targets.iter().is_empty());
+ let mut target_block = targets.otherwise();
+
+ for (const_int, target) in targets.iter() {
+ // Compare using MIR BinOp::Eq, to also support pointer values.
+ // (Avoiding `self.binary_op` as that does some redundant layout computation.)
+ let res = self
+ .overflowing_binary_op(
+ mir::BinOp::Eq,
+ &discr,
+ &ImmTy::from_uint(const_int, discr.layout),
+ )?
+ .0;
+ if res.to_bool()? {
+ target_block = target;
+ break;
+ }
+ }
+
+ self.go_to_block(target_block);
+ }
+
+ Call {
+ ref func,
+ ref args,
+ destination,
+ target,
+ ref cleanup,
+ from_hir_call: _,
+ fn_span: _,
+ } => {
+ let old_stack = self.frame_idx();
+ let old_loc = self.frame().loc;
+ let func = self.eval_operand(func, None)?;
+ let args = self.eval_operands(args)?;
+
+ let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
+ let fn_sig =
+ self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
+ let extra_args = &args[fn_sig.inputs().len()..];
+ let extra_args = self.tcx.mk_type_list(extra_args.iter().map(|arg| arg.layout.ty));
+
+ let (fn_val, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
+ ty::FnPtr(_sig) => {
+ let fn_ptr = self.read_pointer(&func)?;
+ let fn_val = self.get_ptr_fn(fn_ptr)?;
+ (fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
+ }
+ ty::FnDef(def_id, substs) => {
+ let instance =
+ self.resolve(ty::WithOptConstParam::unknown(def_id), substs)?;
+ (
+ FnVal::Instance(instance),
+ self.fn_abi_of_instance(instance, extra_args)?,
+ instance.def.requires_caller_location(*self.tcx),
+ )
+ }
+ _ => span_bug!(
+ terminator.source_info.span,
+ "invalid callee of type {:?}",
+ func.layout.ty
+ ),
+ };
+
+ let destination = self.eval_place(destination)?;
+ self.eval_fn_call(
+ fn_val,
+ (fn_sig.abi, fn_abi),
+ &args,
+ with_caller_location,
+ &destination,
+ target,
+ match (cleanup, fn_abi.can_unwind) {
+ (Some(cleanup), true) => StackPopUnwind::Cleanup(*cleanup),
+ (None, true) => StackPopUnwind::Skip,
+ (_, false) => StackPopUnwind::NotAllowed,
+ },
+ )?;
+ // Sanity-check that `eval_fn_call` either pushed a new frame or
+ // did a jump to another block.
+ if self.frame_idx() == old_stack && self.frame().loc == old_loc {
+ span_bug!(terminator.source_info.span, "evaluating this call made no progress");
+ }
+ }
+
+ Drop { place, target, unwind } => {
+ let place = self.eval_place(place)?;
+ let ty = place.layout.ty;
+ trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
+
+ let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
+ self.drop_in_place(&place, instance, target, unwind)?;
+ }
+
+ Assert { ref cond, expected, ref msg, target, cleanup } => {
+ let cond_val =
+ self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
+ if expected == cond_val {
+ self.go_to_block(target);
+ } else {
+ M::assert_panic(self, msg, cleanup)?;
+ }
+ }
+
+ Abort => {
+ M::abort(self, "the program aborted execution".to_owned())?;
+ }
+
+ // When we encounter Resume, we've finished unwinding
+ // cleanup for the current stack frame. We pop it in order
+ // to continue unwinding the next frame
+ Resume => {
+ trace!("unwinding: resuming from cleanup");
+ // By definition, a Resume terminator means
+ // that we're unwinding
+ self.pop_stack_frame(/* unwinding */ true)?;
+ return Ok(());
+ }
+
+ // It is UB to ever encounter this.
+ Unreachable => throw_ub!(Unreachable),
+
+ // These should never occur for MIR we actually run.
+ DropAndReplace { .. }
+ | FalseEdge { .. }
+ | FalseUnwind { .. }
+ | Yield { .. }
+ | GeneratorDrop => span_bug!(
+ terminator.source_info.span,
+ "{:#?} should have been eliminated by MIR pass",
+ terminator.kind
+ ),
+
+ // Inline assembly can't be interpreted.
+ InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
+ }
+
+ Ok(())
+ }
+
+ fn check_argument_compat(
+ caller_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ ) -> bool {
+ // Heuristic for type comparison.
+ let layout_compat = || {
+ if caller_abi.layout.ty == callee_abi.layout.ty {
+ // No question
+ return true;
+ }
+ if caller_abi.layout.is_unsized() || callee_abi.layout.is_unsized() {
+ // No, no, no. We require the types to *exactly* match for unsized arguments. If
+ // these are somehow unsized "in a different way" (say, `dyn Trait` vs `[i32]`),
+ // then who knows what happens.
+ return false;
+ }
+ if caller_abi.layout.size != callee_abi.layout.size
+ || caller_abi.layout.align.abi != callee_abi.layout.align.abi
+ {
+ // This cannot go well...
+ return false;
+ }
+ // The rest *should* be okay, but we are extra conservative.
+ match (caller_abi.layout.abi, callee_abi.layout.abi) {
+ // Different valid ranges are okay (once we enforce validity,
+ // that will take care to make it UB to leave the range, just
+ // like for transmute).
+ (abi::Abi::Scalar(caller), abi::Abi::Scalar(callee)) => {
+ caller.primitive() == callee.primitive()
+ }
+ (
+ abi::Abi::ScalarPair(caller1, caller2),
+ abi::Abi::ScalarPair(callee1, callee2),
+ ) => {
+ caller1.primitive() == callee1.primitive()
+ && caller2.primitive() == callee2.primitive()
+ }
+ // Be conservative
+ _ => false,
+ }
+ };
+ // Padding must be fully equal.
+ let pad_compat = || caller_abi.pad == callee_abi.pad;
+ // When comparing the PassMode, we have to be smart about comparing the attributes.
+ let arg_attr_compat = |a1: ArgAttributes, a2: ArgAttributes| {
+ // There's only one regular attribute that matters for the call ABI: InReg.
+ // Everything else is things like noalias, dereferencable, nonnull, ...
+ // (This also applies to pointee_size, pointee_align.)
+ if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg)
+ {
+ return false;
+ }
+ // We also compare the sign extension mode -- this could let the callee make assumptions
+ // about bits that conceptually were not even passed.
+ if a1.arg_ext != a2.arg_ext {
+ return false;
+ }
+ return true;
+ };
+ let mode_compat = || match (caller_abi.mode, callee_abi.mode) {
+ (PassMode::Ignore, PassMode::Ignore) => true,
+ (PassMode::Direct(a1), PassMode::Direct(a2)) => arg_attr_compat(a1, a2),
+ (PassMode::Pair(a1, b1), PassMode::Pair(a2, b2)) => {
+ arg_attr_compat(a1, a2) && arg_attr_compat(b1, b2)
+ }
+ (PassMode::Cast(c1), PassMode::Cast(c2)) => c1 == c2,
+ (
+ PassMode::Indirect { attrs: a1, extra_attrs: None, on_stack: s1 },
+ PassMode::Indirect { attrs: a2, extra_attrs: None, on_stack: s2 },
+ ) => arg_attr_compat(a1, a2) && s1 == s2,
+ (
+ PassMode::Indirect { attrs: a1, extra_attrs: Some(e1), on_stack: s1 },
+ PassMode::Indirect { attrs: a2, extra_attrs: Some(e2), on_stack: s2 },
+ ) => arg_attr_compat(a1, a2) && arg_attr_compat(e1, e2) && s1 == s2,
+ _ => false,
+ };
+
+ if layout_compat() && pad_compat() && mode_compat() {
+ return true;
+ }
+ trace!(
+ "check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
+ caller_abi,
+ callee_abi
+ );
+ return false;
+ }
+
+ /// Initialize a single callee argument, checking the types for compatibility.
+ fn pass_argument<'x, 'y>(
+ &mut self,
+ caller_args: &mut impl Iterator<
+ Item = (&'x OpTy<'tcx, M::Provenance>, &'y ArgAbi<'tcx, Ty<'tcx>>),
+ >,
+ callee_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+ callee_arg: &PlaceTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx>
+ where
+ 'tcx: 'x,
+ 'tcx: 'y,
+ {
+ if matches!(callee_abi.mode, PassMode::Ignore) {
+ // This one is skipped.
+ return Ok(());
+ }
+ // Find next caller arg.
+ let (caller_arg, caller_abi) = caller_args.next().ok_or_else(|| {
+ err_ub_format!("calling a function with fewer arguments than it requires")
+ })?;
+ // Now, check
+ if !Self::check_argument_compat(caller_abi, callee_abi) {
+ throw_ub_format!(
+ "calling a function with argument of type {:?} passing data of type {:?}",
+ callee_arg.layout.ty,
+ caller_arg.layout.ty
+ )
+ }
+ // Special handling for unsized parameters.
+ if caller_arg.layout.is_unsized() {
+ // `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way.
+ assert_eq!(caller_arg.layout.ty, callee_arg.layout.ty);
+ // We have to properly pre-allocate the memory for the callee.
+ // So let's tear down some wrappers.
+ // This all has to be in memory, there are no immediate unsized values.
+ let src = caller_arg.assert_mem_place();
+ // The destination cannot be one of these "spread args".
+ let (dest_frame, dest_local) = callee_arg.assert_local();
+ // We are just initializing things, so there can't be anything here yet.
+ assert!(matches!(
+ *self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
+ Operand::Immediate(Immediate::Uninit)
+ ));
+ // Allocate enough memory to hold `src`.
+ let Some((size, align)) = self.size_and_align_of_mplace(&src)? else {
+ span_bug!(self.cur_span(), "unsized fn arg with `extern` type tail should not be allowed")
+ };
+ let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?;
+ let dest_place =
+ MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), callee_arg.layout, src.meta);
+ // Update the local to be that new place.
+ *M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
+ }
+ // We allow some transmutes here.
+ // FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
+ // is true for all `copy_op`, but there are a lot of special cases for argument passing
+ // specifically.)
+ self.copy_op(&caller_arg, callee_arg, /*allow_transmute*/ true)
+ }
+
+ /// Call this function -- pushing the stack frame and initializing the arguments.
+ ///
+ /// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
+ /// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
+ ///
+ /// `with_caller_location` indicates whether the caller passed a caller location. Miri
+ /// implements caller locations without argument passing, but to match `FnAbi` we need to know
+ /// when those arguments are present.
+ pub(crate) fn eval_fn_call(
+ &mut self,
+ fn_val: FnVal<'tcx, M::ExtraFnVal>,
+ (caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
+ args: &[OpTy<'tcx, M::Provenance>],
+ with_caller_location: bool,
+ destination: &PlaceTy<'tcx, M::Provenance>,
+ target: Option<mir::BasicBlock>,
+ mut unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx> {
+ trace!("eval_fn_call: {:#?}", fn_val);
+
+ let instance = match fn_val {
+ FnVal::Instance(instance) => instance,
+ FnVal::Other(extra) => {
+ return M::call_extra_fn(
+ self,
+ extra,
+ caller_abi,
+ args,
+ destination,
+ target,
+ unwind,
+ );
+ }
+ };
+
+ match instance.def {
+ ty::InstanceDef::Intrinsic(def_id) => {
+ assert!(self.tcx.is_intrinsic(def_id));
+ // caller_fn_abi is not relevant here, we interpret the arguments directly for each intrinsic.
+ M::call_intrinsic(self, instance, args, destination, target, unwind)
+ }
+ ty::InstanceDef::VTableShim(..)
+ | ty::InstanceDef::ReifyShim(..)
+ | ty::InstanceDef::ClosureOnceShim { .. }
+ | ty::InstanceDef::FnPtrShim(..)
+ | ty::InstanceDef::DropGlue(..)
+ | ty::InstanceDef::CloneShim(..)
+ | ty::InstanceDef::Item(_) => {
+ // We need MIR for this fn
+ let Some((body, instance)) =
+ M::find_mir_or_eval_fn(self, instance, caller_abi, args, destination, target, unwind)? else {
+ return Ok(());
+ };
+
+ // Compute callee information using the `instance` returned by
+ // `find_mir_or_eval_fn`.
+ // FIXME: for variadic support, do we have to somehow determine callee's extra_args?
+ let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
+
+ if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
+ throw_unsup_format!("calling a c-variadic function is not supported");
+ }
+
+ if M::enforce_abi(self) {
+ if caller_fn_abi.conv != callee_fn_abi.conv {
+ throw_ub_format!(
+ "calling a function with calling convention {:?} using calling convention {:?}",
+ callee_fn_abi.conv,
+ caller_fn_abi.conv
+ )
+ }
+ }
+
+ if !matches!(unwind, StackPopUnwind::NotAllowed) && !callee_fn_abi.can_unwind {
+ // The callee cannot unwind.
+ unwind = StackPopUnwind::NotAllowed;
+ }
+
+ self.push_stack_frame(
+ instance,
+ body,
+ destination,
+ StackPopCleanup::Goto { ret: target, unwind },
+ )?;
+
+ // If an error is raised here, pop the frame again to get an accurate backtrace.
+ // To this end, we wrap it all in a `try` block.
+ let res: InterpResult<'tcx> = try {
+ trace!(
+ "caller ABI: {:?}, args: {:#?}",
+ caller_abi,
+ args.iter()
+ .map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
+ .collect::<Vec<_>>()
+ );
+ trace!(
+ "spread_arg: {:?}, locals: {:#?}",
+ body.spread_arg,
+ body.args_iter()
+ .map(|local| (
+ local,
+ self.layout_of_local(self.frame(), local, None).unwrap().ty
+ ))
+ .collect::<Vec<_>>()
+ );
+
+ // In principle, we have two iterators: Where the arguments come from, and where
+ // they go to.
+
+ // For where they come from: If the ABI is RustCall, we untuple the
+ // last incoming argument. These two iterators do not have the same type,
+ // so to keep the code paths uniform we accept an allocation
+ // (for RustCall ABI only).
+ let caller_args: Cow<'_, [OpTy<'tcx, M::Provenance>]> =
+ if caller_abi == Abi::RustCall && !args.is_empty() {
+ // Untuple
+ let (untuple_arg, args) = args.split_last().unwrap();
+ trace!("eval_fn_call: Will pass last argument by untupling");
+ Cow::from(
+ args.iter()
+ .map(|a| Ok(a.clone()))
+ .chain(
+ (0..untuple_arg.layout.fields.count())
+ .map(|i| self.operand_field(untuple_arg, i)),
+ )
+ .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::Provenance>>>>(
+ )?,
+ )
+ } else {
+ // Plain arg passing
+ Cow::from(args)
+ };
+ // If `with_caller_location` is set we pretend there is an extra argument (that
+ // we will not pass).
+ assert_eq!(
+ caller_args.len() + if with_caller_location { 1 } else { 0 },
+ caller_fn_abi.args.len(),
+ "mismatch between caller ABI and caller arguments",
+ );
+ let mut caller_args = caller_args
+ .iter()
+ .zip(caller_fn_abi.args.iter())
+ .filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
+
+ // Now we have to spread them out across the callee's locals,
+ // taking into account the `spread_arg`. If we could write
+ // this is a single iterator (that handles `spread_arg`), then
+ // `pass_argument` would be the loop body. It takes care to
+ // not advance `caller_iter` for ZSTs.
+ let mut callee_args_abis = callee_fn_abi.args.iter();
+ for local in body.args_iter() {
+ let dest = self.eval_place(mir::Place::from(local))?;
+ if Some(local) == body.spread_arg {
+ // Must be a tuple
+ for i in 0..dest.layout.fields.count() {
+ let dest = self.place_field(&dest, i)?;
+ let callee_abi = callee_args_abis.next().unwrap();
+ self.pass_argument(&mut caller_args, callee_abi, &dest)?;
+ }
+ } else {
+ // Normal argument
+ let callee_abi = callee_args_abis.next().unwrap();
+ self.pass_argument(&mut caller_args, callee_abi, &dest)?;
+ }
+ }
+ // If the callee needs a caller location, pretend we consume one more argument from the ABI.
+ if instance.def.requires_caller_location(*self.tcx) {
+ callee_args_abis.next().unwrap();
+ }
+ // Now we should have no more caller args or callee arg ABIs
+ assert!(
+ callee_args_abis.next().is_none(),
+ "mismatch between callee ABI and callee body arguments"
+ );
+ if caller_args.next().is_some() {
+ throw_ub_format!("calling a function with more arguments than it expected")
+ }
+ // Don't forget to check the return type!
+ if !Self::check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret) {
+ throw_ub_format!(
+ "calling a function with return type {:?} passing \
+ return place of type {:?}",
+ callee_fn_abi.ret.layout.ty,
+ caller_fn_abi.ret.layout.ty,
+ )
+ }
+ };
+ match res {
+ Err(err) => {
+ self.stack_mut().pop();
+ Err(err)
+ }
+ Ok(()) => Ok(()),
+ }
+ }
+ // cannot use the shim here, because that will only result in infinite recursion
+ ty::InstanceDef::Virtual(def_id, idx) => {
+ let mut args = args.to_vec();
+ // We have to implement all "object safe receivers". So we have to go search for a
+ // pointer or `dyn Trait` type, but it could be wrapped in newtypes. So recursively
+ // unwrap those newtypes until we are there.
+ let mut receiver = args[0].clone();
+ let receiver_place = loop {
+ match receiver.layout.ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) => break self.deref_operand(&receiver)?,
+ ty::Dynamic(..) => break receiver.assert_mem_place(), // no immediate unsized values
+ _ => {
+ // Not there yet, search for the only non-ZST field.
+ let mut non_zst_field = None;
+ for i in 0..receiver.layout.fields.count() {
+ let field = self.operand_field(&receiver, i)?;
+ if !field.layout.is_zst() {
+ assert!(
+ non_zst_field.is_none(),
+ "multiple non-ZST fields in dyn receiver type {}",
+ receiver.layout.ty
+ );
+ non_zst_field = Some(field);
+ }
+ }
+ receiver = non_zst_field.unwrap_or_else(|| {
+ panic!(
+ "no non-ZST fields in dyn receiver type {}",
+ receiver.layout.ty
+ )
+ });
+ }
+ }
+ };
+ // Obtain the underlying trait we are working on.
+ let receiver_tail = self
+ .tcx
+ .struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
+ let ty::Dynamic(data, ..) = receiver_tail.kind() else {
+ span_bug!(self.cur_span(), "dyanmic call on non-`dyn` type {}", receiver_tail)
+ };
+
+ // Get the required information from the vtable.
+ let vptr = receiver_place.meta.unwrap_meta().to_pointer(self)?;
+ let (dyn_ty, dyn_trait) = self.get_ptr_vtable(vptr)?;
+ if dyn_trait != data.principal() {
+ throw_ub_format!(
+ "`dyn` call on a pointer whose vtable does not match its type"
+ );
+ }
+
+ // Now determine the actual method to call. We can do that in two different ways and
+ // compare them to ensure everything fits.
+ let Some(ty::VtblEntry::Method(fn_inst)) = self.get_vtable_entries(vptr)?.get(idx).copied() else {
+ throw_ub_format!("`dyn` call trying to call something that is not a method")
+ };
+ if cfg!(debug_assertions) {
+ let tcx = *self.tcx;
+
+ let trait_def_id = tcx.trait_of_item(def_id).unwrap();
+ let virtual_trait_ref =
+ ty::TraitRef::from_method(tcx, trait_def_id, instance.substs);
+ assert_eq!(
+ receiver_tail,
+ virtual_trait_ref.self_ty(),
+ "mismatch in underlying dyn trait computation within Miri and MIR building",
+ );
+ let existential_trait_ref =
+ ty::ExistentialTraitRef::erase_self_ty(tcx, virtual_trait_ref);
+ let concrete_trait_ref = existential_trait_ref.with_self_ty(tcx, dyn_ty);
+
+ let concrete_method = Instance::resolve_for_vtable(
+ tcx,
+ self.param_env,
+ def_id,
+ instance.substs.rebase_onto(tcx, trait_def_id, concrete_trait_ref.substs),
+ )
+ .unwrap();
+ assert_eq!(fn_inst, concrete_method);
+ }
+
+ // `*mut receiver_place.layout.ty` is almost the layout that we
+ // want for args[0]: We have to project to field 0 because we want
+ // a thin pointer.
+ assert!(receiver_place.layout.is_unsized());
+ let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
+ let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0);
+ // Adjust receiver argument.
+ args[0] = OpTy::from(ImmTy::from_immediate(
+ Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
+ this_receiver_ptr,
+ ));
+ trace!("Patched receiver operand to {:#?}", args[0]);
+ // recurse with concrete function
+ self.eval_fn_call(
+ FnVal::Instance(fn_inst),
+ (caller_abi, caller_fn_abi),
+ &args,
+ with_caller_location,
+ destination,
+ target,
+ unwind,
+ )
+ }
+ }
+ }
+
+ fn drop_in_place(
+ &mut self,
+ place: &PlaceTy<'tcx, M::Provenance>,
+ instance: ty::Instance<'tcx>,
+ target: mir::BasicBlock,
+ unwind: Option<mir::BasicBlock>,
+ ) -> InterpResult<'tcx> {
+ trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
+ // We take the address of the object. This may well be unaligned, which is fine
+ // for us here. However, unaligned accesses will probably make the actual drop
+ // implementation fail -- a problem shared by rustc.
+ let place = self.force_allocation(place)?;
+
+ let (instance, place) = match place.layout.ty.kind() {
+ ty::Dynamic(..) => {
+ // Dropping a trait object. Need to find actual drop fn.
+ let place = self.unpack_dyn_trait(&place)?;
+ let instance = ty::Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
+ (instance, place)
+ }
+ _ => (instance, place),
+ };
+ let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
+
+ let arg = ImmTy::from_immediate(
+ place.to_ref(self),
+ self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
+ );
+ let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
+
+ self.eval_fn_call(
+ FnVal::Instance(instance),
+ (Abi::Rust, fn_abi),
+ &[arg.into()],
+ false,
+ &ret.into(),
+ Some(target),
+ match unwind {
+ Some(cleanup) => StackPopUnwind::Cleanup(cleanup),
+ None => StackPopUnwind::Skip,
+ },
+ )
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/traits.rs b/compiler/rustc_const_eval/src/interpret/traits.rs
new file mode 100644
index 000000000..b3a511d5a
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/traits.rs
@@ -0,0 +1,59 @@
+use rustc_middle::mir::interpret::{InterpResult, Pointer};
+use rustc_middle::ty::layout::LayoutOf;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::{Align, Size};
+
+use super::util::ensure_monomorphic_enough;
+use super::{InterpCx, Machine};
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
+ /// objects.
+ ///
+ /// The `trait_ref` encodes the erased self type. Hence, if we are making an object `Foo<Trait>`
+ /// from a value of type `Foo<T>`, then `trait_ref` would map `T: Trait`. `None` here means that
+ /// this is an auto trait without any methods, so we only need the basic vtable (drop, size,
+ /// align).
+ pub fn get_vtable_ptr(
+ &self,
+ ty: Ty<'tcx>,
+ poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
+ trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
+
+ let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref));
+
+ // All vtables must be monomorphic, bail out otherwise.
+ ensure_monomorphic_enough(*self.tcx, ty)?;
+ ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
+
+ let vtable_symbolic_allocation = self.tcx.create_vtable_alloc(ty, poly_trait_ref);
+ let vtable_ptr = self.global_base_pointer(Pointer::from(vtable_symbolic_allocation))?;
+ Ok(vtable_ptr.into())
+ }
+
+ /// Returns a high-level representation of the entires of the given vtable.
+ pub fn get_vtable_entries(
+ &self,
+ vtable: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, &'tcx [ty::VtblEntry<'tcx>]> {
+ let (ty, poly_trait_ref) = self.get_ptr_vtable(vtable)?;
+ Ok(if let Some(poly_trait_ref) = poly_trait_ref {
+ let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty);
+ let trait_ref = self.tcx.erase_regions(trait_ref);
+ self.tcx.vtable_entries(trait_ref)
+ } else {
+ TyCtxt::COMMON_VTABLE_ENTRIES
+ })
+ }
+
+ pub fn get_vtable_size_and_align(
+ &self,
+ vtable: Pointer<Option<M::Provenance>>,
+ ) -> InterpResult<'tcx, (Size, Align)> {
+ let (ty, _trait_ref) = self.get_ptr_vtable(vtable)?;
+ let layout = self.layout_of(ty)?;
+ assert!(!layout.is_unsized(), "there are no vtables for unsized types");
+ Ok((layout.size, layout.align.abi))
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs
new file mode 100644
index 000000000..2bc521d5b
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/util.rs
@@ -0,0 +1,73 @@
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use std::convert::TryInto;
+use std::ops::ControlFlow;
+
+/// Checks whether a type contains generic parameters which require substitution.
+///
+/// In case it does, returns a `TooGeneric` const eval error. Note that due to polymorphization
+/// types may be "concrete enough" even though they still contain generic parameters in
+/// case these parameters are unused.
+pub(crate) fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
+where
+ T: TypeVisitable<'tcx>,
+{
+ debug!("ensure_monomorphic_enough: ty={:?}", ty);
+ if !ty.needs_subst() {
+ return Ok(());
+ }
+
+ struct FoundParam;
+ struct UsedParamsNeedSubstVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ }
+
+ impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> {
+ type BreakTy = FoundParam;
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if !ty.needs_subst() {
+ return ControlFlow::CONTINUE;
+ }
+
+ match *ty.kind() {
+ ty::Param(_) => ControlFlow::Break(FoundParam),
+ ty::Closure(def_id, substs)
+ | ty::Generator(def_id, substs, ..)
+ | ty::FnDef(def_id, substs) => {
+ let instance = ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id));
+ let unused_params = self.tcx.unused_generic_params(instance);
+ for (index, subst) in substs.into_iter().enumerate() {
+ let index = index
+ .try_into()
+ .expect("more generic parameters than can fit into a `u32`");
+ let is_used = unused_params.contains(index).map_or(true, |unused| !unused);
+ // Only recurse when generic parameters in fns, closures and generators
+ // are used and require substitution.
+ // Just in case there are closures or generators within this subst,
+ // recurse.
+ if is_used && subst.needs_subst() {
+ return subst.visit_with(self);
+ }
+ }
+ ControlFlow::CONTINUE
+ }
+ _ => ty.super_visit_with(self),
+ }
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match c.kind() {
+ ty::ConstKind::Param(..) => ControlFlow::Break(FoundParam),
+ _ => c.super_visit_with(self),
+ }
+ }
+ }
+
+ let mut vis = UsedParamsNeedSubstVisitor { tcx };
+ if matches!(ty.visit_with(&mut vis), ControlFlow::Break(FoundParam)) {
+ throw_inval!(TooGeneric);
+ } else {
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
new file mode 100644
index 000000000..0e50d1ed4
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -0,0 +1,986 @@
+//! Check the validity invariant of a given value, and tell the user
+//! where in the value it got violated.
+//! In const context, this goes even further and tries to approximate const safety.
+//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
+//! to be const-safe.
+
+use std::convert::TryFrom;
+use std::fmt::Write;
+use std::num::NonZeroUsize;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::mir::interpret::InterpError;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::{Abi, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange};
+
+use std::hash::Hash;
+
+use super::{
+ alloc_range, CheckInAllocMsg, GlobalAlloc, Immediate, InterpCx, InterpResult, MPlaceTy,
+ Machine, MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
+};
+
+macro_rules! throw_validation_failure {
+ ($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{
+ let mut msg = String::new();
+ msg.push_str("encountered ");
+ write!(&mut msg, $($what_fmt),+).unwrap();
+ $(
+ msg.push_str(", but expected ");
+ write!(&mut msg, $($expected_fmt),+).unwrap();
+ )?
+ let path = rustc_middle::ty::print::with_no_trimmed_paths!({
+ let where_ = &$where;
+ if !where_.is_empty() {
+ let mut path = String::new();
+ write_path(&mut path, where_);
+ Some(path)
+ } else {
+ None
+ }
+ });
+ throw_ub!(ValidationFailure { path, msg })
+ }};
+}
+
+/// If $e throws an error matching the pattern, throw a validation failure.
+/// Other errors are passed back to the caller, unchanged -- and if they reach the root of
+/// the visitor, we make sure only validation errors and `InvalidProgram` errors are left.
+/// This lets you use the patterns as a kind of validation list, asserting which errors
+/// can possibly happen:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+/// Foo | Bar | Baz => { "some failure" },
+/// });
+/// ```
+///
+/// An additional expected parameter can also be added to the failure message:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+/// Foo | Bar | Baz => { "some failure" } expected { "something that wasn't a failure" },
+/// });
+/// ```
+///
+/// An additional nicety is that both parameters actually take format args, so you can just write
+/// the format string in directly:
+///
+/// ```
+/// let v = try_validation!(some_fn(), some_path, {
+/// Foo | Bar | Baz => { "{:?}", some_failure } expected { "{}", expected_value },
+/// });
+/// ```
+///
+macro_rules! try_validation {
+ ($e:expr, $where:expr,
+ $( $( $p:pat_param )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
+ ) => {{
+ match $e {
+ Ok(x) => x,
+ // We catch the error and turn it into a validation failure. We are okay with
+ // allocation here as this can only slow down builds that fail anyway.
+ Err(e) => match e.kind() {
+ $(
+ $($p)|+ =>
+ throw_validation_failure!(
+ $where,
+ { $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
+ )
+ ),+,
+ #[allow(unreachable_patterns)]
+ _ => Err::<!, _>(e)?,
+ }
+ }
+ }};
+}
+
+/// We want to show a nice path to the invalid field for diagnostics,
+/// but avoid string operations in the happy case where no error happens.
+/// So we track a `Vec<PathElem>` where `PathElem` contains all the data we
+/// need to later print something for the user.
+#[derive(Copy, Clone, Debug)]
+pub enum PathElem {
+ Field(Symbol),
+ Variant(Symbol),
+ GeneratorState(VariantIdx),
+ CapturedVar(Symbol),
+ ArrayElem(usize),
+ TupleElem(usize),
+ Deref,
+ EnumTag,
+ GeneratorTag,
+ DynDowncast,
+}
+
+/// Extra things to check for during validation of CTFE results.
+pub enum CtfeValidationMode {
+ /// Regular validation, nothing special happening.
+ Regular,
+ /// Validation of a `const`.
+ /// `inner` says if this is an inner, indirect allocation (as opposed to the top-level const
+ /// allocation). Being an inner allocation makes a difference because the top-level allocation
+ /// of a `const` is copied for each use, but the inner allocations are implicitly shared.
+ /// `allow_static_ptrs` says if pointers to statics are permitted (which is the case for promoteds in statics).
+ Const { inner: bool, allow_static_ptrs: bool },
+}
+
+/// State for tracking recursive validation of references
+pub struct RefTracking<T, PATH = ()> {
+ pub seen: FxHashSet<T>,
+ pub todo: Vec<(T, PATH)>,
+}
+
+impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
+ pub fn empty() -> Self {
+ RefTracking { seen: FxHashSet::default(), todo: vec![] }
+ }
+ pub fn new(op: T) -> Self {
+ let mut ref_tracking_for_consts =
+ RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] };
+ ref_tracking_for_consts.seen.insert(op);
+ ref_tracking_for_consts
+ }
+
+ pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
+ if self.seen.insert(op) {
+ trace!("Recursing below ptr {:#?}", op);
+ let path = path();
+ // Remember to come back to this later.
+ self.todo.push((op, path));
+ }
+ }
+}
+
+/// Format a path
+fn write_path(out: &mut String, path: &[PathElem]) {
+ use self::PathElem::*;
+
+ for elem in path.iter() {
+ match elem {
+ Field(name) => write!(out, ".{}", name),
+ EnumTag => write!(out, ".<enum-tag>"),
+ Variant(name) => write!(out, ".<enum-variant({})>", name),
+ GeneratorTag => write!(out, ".<generator-tag>"),
+ GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
+ CapturedVar(name) => write!(out, ".<captured-var({})>", name),
+ TupleElem(idx) => write!(out, ".{}", idx),
+ ArrayElem(idx) => write!(out, "[{}]", idx),
+ // `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
+ // some of the other items here also are not Rust syntax. Actually we can't
+ // even use the usual syntax because we are just showing the projections,
+ // not the root.
+ Deref => write!(out, ".<deref>"),
+ DynDowncast => write!(out, ".<dyn-downcast>"),
+ }
+ .unwrap()
+ }
+}
+
+// Formats such that a sentence like "expected something {}" to mean
+// "expected something <in the given range>" makes sense.
+fn wrapping_range_format(r: WrappingRange, max_hi: u128) -> String {
+ let WrappingRange { start: lo, end: hi } = r;
+ assert!(hi <= max_hi);
+ if lo > hi {
+ format!("less or equal to {}, or greater or equal to {}", hi, lo)
+ } else if lo == hi {
+ format!("equal to {}", lo)
+ } else if lo == 0 {
+ assert!(hi < max_hi, "should not be printing if the range covers everything");
+ format!("less or equal to {}", hi)
+ } else if hi == max_hi {
+ assert!(lo > 0, "should not be printing if the range covers everything");
+ format!("greater or equal to {}", lo)
+ } else {
+ format!("in the range {:?}", r)
+ }
+}
+
+struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
+ /// The `path` may be pushed to, but the part that is present when a function
+ /// starts must not be changed! `visit_fields` and `visit_array` rely on
+ /// this stack discipline.
+ path: Vec<PathElem>,
+ ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
+ /// `None` indicates this is not validating for CTFE (but for runtime).
+ ctfe_mode: Option<CtfeValidationMode>,
+ ecx: &'rt InterpCx<'mir, 'tcx, M>,
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> {
+ fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem {
+ // First, check if we are projecting to a variant.
+ match layout.variants {
+ Variants::Multiple { tag_field, .. } => {
+ if tag_field == field {
+ return match layout.ty.kind() {
+ ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag,
+ ty::Generator(..) => PathElem::GeneratorTag,
+ _ => bug!("non-variant type {:?}", layout.ty),
+ };
+ }
+ }
+ Variants::Single { .. } => {}
+ }
+
+ // Now we know we are projecting to a field, so figure out which one.
+ match layout.ty.kind() {
+ // generators and closures.
+ ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
+ let mut name = None;
+ // FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
+ // https://github.com/rust-lang/project-rfc-2229/issues/46
+ if let Some(local_def_id) = def_id.as_local() {
+ let tables = self.ecx.tcx.typeck(local_def_id);
+ if let Some(captured_place) =
+ tables.closure_min_captures_flattened(local_def_id).nth(field)
+ {
+ // Sometimes the index is beyond the number of upvars (seen
+ // for a generator).
+ let var_hir_id = captured_place.get_root_variable();
+ let node = self.ecx.tcx.hir().get(var_hir_id);
+ if let hir::Node::Pat(pat) = node {
+ if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
+ name = Some(ident.name);
+ }
+ }
+ }
+ }
+
+ PathElem::CapturedVar(name.unwrap_or_else(|| {
+ // Fall back to showing the field index.
+ sym::integer(field)
+ }))
+ }
+
+ // tuples
+ ty::Tuple(_) => PathElem::TupleElem(field),
+
+ // enums
+ ty::Adt(def, ..) if def.is_enum() => {
+ // we might be projecting *to* a variant, or to a field *in* a variant.
+ match layout.variants {
+ Variants::Single { index } => {
+ // Inside a variant
+ PathElem::Field(def.variant(index).fields[field].name)
+ }
+ Variants::Multiple { .. } => bug!("we handled variants above"),
+ }
+ }
+
+ // other ADTs
+ ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].name),
+
+ // arrays/slices
+ ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field),
+
+ // dyn traits
+ ty::Dynamic(..) => PathElem::DynDowncast,
+
+ // nothing else has an aggregate layout
+ _ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty),
+ }
+ }
+
+ fn with_elem<R>(
+ &mut self,
+ elem: PathElem,
+ f: impl FnOnce(&mut Self) -> InterpResult<'tcx, R>,
+ ) -> InterpResult<'tcx, R> {
+ // Remember the old state
+ let path_len = self.path.len();
+ // Record new element
+ self.path.push(elem);
+ // Perform operation
+ let r = f(self)?;
+ // Undo changes
+ self.path.truncate(path_len);
+ // Done
+ Ok(r)
+ }
+
+ fn check_wide_ptr_meta(
+ &mut self,
+ meta: MemPlaceMeta<M::Provenance>,
+ pointee: TyAndLayout<'tcx>,
+ ) -> InterpResult<'tcx> {
+ let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
+ match tail.kind() {
+ ty::Dynamic(..) => {
+ let vtable = meta.unwrap_meta().to_pointer(self.ecx)?;
+ // Make sure it is a genuine vtable pointer.
+ let (_ty, _trait) = try_validation!(
+ self.ecx.get_ptr_vtable(vtable),
+ self.path,
+ err_ub!(DanglingIntPointer(..)) |
+ err_ub!(InvalidVTablePointer(..)) =>
+ { "{vtable}" } expected { "a vtable pointer" },
+ );
+ // FIXME: check if the type/trait match what ty::Dynamic says?
+ }
+ ty::Slice(..) | ty::Str => {
+ let _len = meta.unwrap_meta().to_machine_usize(self.ecx)?;
+ // We do not check that `len * elem_size <= isize::MAX`:
+ // that is only required for references, and there it falls out of the
+ // "dereferenceable" check performed by Stacked Borrows.
+ }
+ ty::Foreign(..) => {
+ // Unsized, but not wide.
+ }
+ _ => bug!("Unexpected unsized type tail: {:?}", tail),
+ }
+
+ Ok(())
+ }
+
+ /// Check a reference or `Box`.
+ fn check_safe_pointer(
+ &mut self,
+ value: &OpTy<'tcx, M::Provenance>,
+ kind: &str,
+ ) -> InterpResult<'tcx> {
+ let value = self.ecx.read_immediate(value)?;
+ // Handle wide pointers.
+ // Check metadata early, for better diagnostics
+ let place = try_validation!(
+ self.ecx.ref_to_mplace(&value),
+ self.path,
+ err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind },
+ );
+ if place.layout.is_unsized() {
+ self.check_wide_ptr_meta(place.meta, place.layout)?;
+ }
+ // Make sure this is dereferenceable and all.
+ let size_and_align = try_validation!(
+ self.ecx.size_and_align_of_mplace(&place),
+ self.path,
+ err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg },
+ );
+ let (size, align) = size_and_align
+ // for the purpose of validity, consider foreign types to have
+ // alignment and size determined by the layout (size will be 0,
+ // alignment should take attributes into account).
+ .unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
+ // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
+ try_validation!(
+ self.ecx.check_ptr_access_align(
+ place.ptr,
+ size,
+ align,
+ CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
+ ),
+ self.path,
+ err_ub!(AlignmentCheckFailed { required, has }) =>
+ {
+ "an unaligned {kind} (required {} byte alignment but found {})",
+ required.bytes(),
+ has.bytes()
+ },
+ err_ub!(DanglingIntPointer(0, _)) =>
+ { "a null {kind}" },
+ err_ub!(DanglingIntPointer(i, _)) =>
+ { "a dangling {kind} (address {i:#x} is unallocated)" },
+ err_ub!(PointerOutOfBounds { .. }) =>
+ { "a dangling {kind} (going beyond the bounds of its allocation)" },
+ // This cannot happen during const-eval (because interning already detects
+ // dangling pointers), but it can happen in Miri.
+ err_ub!(PointerUseAfterFree(..)) =>
+ { "a dangling {kind} (use-after-free)" },
+ );
+ // Do not allow pointers to uninhabited types.
+ if place.layout.abi.is_uninhabited() {
+ throw_validation_failure!(self.path,
+ { "a {kind} pointing to uninhabited type {}", place.layout.ty }
+ )
+ }
+ // Recursive checking
+ if let Some(ref mut ref_tracking) = self.ref_tracking {
+ // Proceed recursively even for ZST, no reason to skip them!
+ // `!` is a ZST and we want to validate it.
+ if let Ok((alloc_id, _offset, _prov)) = self.ecx.ptr_try_get_alloc_id(place.ptr) {
+ // Special handling for pointers to statics (irrespective of their type).
+ let alloc_kind = self.ecx.tcx.try_get_global_alloc(alloc_id);
+ if let Some(GlobalAlloc::Static(did)) = alloc_kind {
+ assert!(!self.ecx.tcx.is_thread_local_static(did));
+ assert!(self.ecx.tcx.is_static(did));
+ if matches!(
+ self.ctfe_mode,
+ Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. })
+ ) {
+ // See const_eval::machine::MemoryExtra::can_access_statics for why
+ // this check is so important.
+ // This check is reachable when the const just referenced the static,
+ // but never read it (so we never entered `before_access_global`).
+ throw_validation_failure!(self.path,
+ { "a {} pointing to a static variable", kind }
+ );
+ }
+ // We skip checking other statics. These statics must be sound by
+ // themselves, and the only way to get broken statics here is by using
+ // unsafe code.
+ // The reasons we don't check other statics is twofold. For one, in all
+ // sound cases, the static was already validated on its own, and second, we
+ // trigger cycle errors if we try to compute the value of the other static
+ // and that static refers back to us.
+ // We might miss const-invalid data,
+ // but things are still sound otherwise (in particular re: consts
+ // referring to statics).
+ return Ok(());
+ }
+ }
+ let path = &self.path;
+ ref_tracking.track(place, || {
+ // We need to clone the path anyway, make sure it gets created
+ // with enough space for the additional `Deref`.
+ let mut new_path = Vec::with_capacity(path.len() + 1);
+ new_path.extend(path);
+ new_path.push(PathElem::Deref);
+ new_path
+ });
+ }
+ Ok(())
+ }
+
+ fn read_scalar(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
+ self.ecx.read_scalar(op)
+ }
+
+ fn read_immediate_forced(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, Immediate<M::Provenance>> {
+ Ok(*self.ecx.read_immediate_raw(op, /*force*/ true)?.unwrap())
+ }
+
+ /// Check if this is a value of primitive type, and if yes check the validity of the value
+ /// at that type. Return `true` if the type is indeed primitive.
+ fn try_visit_primitive(
+ &mut self,
+ value: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, bool> {
+ // Go over all the primitive types
+ let ty = value.layout.ty;
+ match ty.kind() {
+ ty::Bool => {
+ let value = self.read_scalar(value)?;
+ try_validation!(
+ value.to_bool(),
+ self.path,
+ err_ub!(InvalidBool(..)) | err_ub!(InvalidUninitBytes(None)) =>
+ { "{:x}", value } expected { "a boolean" },
+ );
+ Ok(true)
+ }
+ ty::Char => {
+ let value = self.read_scalar(value)?;
+ try_validation!(
+ value.to_char(),
+ self.path,
+ err_ub!(InvalidChar(..)) | err_ub!(InvalidUninitBytes(None)) =>
+ { "{:x}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" },
+ );
+ Ok(true)
+ }
+ ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
+ let value = self.read_scalar(value)?;
+ // NOTE: Keep this in sync with the array optimization for int/float
+ // types below!
+ if M::enforce_number_init(self.ecx) {
+ try_validation!(
+ value.check_init(),
+ self.path,
+ err_ub!(InvalidUninitBytes(..)) =>
+ { "{:x}", value } expected { "initialized bytes" }
+ );
+ }
+ // As a special exception we *do* match on a `Scalar` here, since we truly want
+ // to know its underlying representation (and *not* cast it to an integer).
+ let is_ptr = value.check_init().map_or(false, |v| matches!(v, Scalar::Ptr(..)));
+ if is_ptr {
+ throw_validation_failure!(self.path,
+ { "{:x}", value } expected { "plain (non-pointer) bytes" }
+ )
+ }
+ Ok(true)
+ }
+ ty::RawPtr(..) => {
+ // We are conservative with uninit for integers, but try to
+ // actually enforce the strict rules for raw pointers (mostly because
+ // that lets us re-use `ref_to_mplace`).
+ let place = try_validation!(
+ self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)),
+ self.path,
+ err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" },
+ );
+ if place.layout.is_unsized() {
+ self.check_wide_ptr_meta(place.meta, place.layout)?;
+ }
+ Ok(true)
+ }
+ ty::Ref(_, ty, mutbl) => {
+ if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
+ && *mutbl == hir::Mutability::Mut
+ {
+ // A mutable reference inside a const? That does not seem right (except if it is
+ // a ZST).
+ let layout = self.ecx.layout_of(*ty)?;
+ if !layout.is_zst() {
+ throw_validation_failure!(self.path, { "mutable reference in a `const`" });
+ }
+ }
+ self.check_safe_pointer(value, "reference")?;
+ Ok(true)
+ }
+ ty::FnPtr(_sig) => {
+ let value = try_validation!(
+ self.ecx.read_scalar(value).and_then(|v| v.check_init()),
+ self.path,
+ err_ub!(InvalidUninitBytes(None)) => { "uninitialized bytes" } expected { "a proper pointer or integer value" },
+ );
+
+ // If we check references recursively, also check that this points to a function.
+ if let Some(_) = self.ref_tracking {
+ let ptr = value.to_pointer(self.ecx)?;
+ let _fn = try_validation!(
+ self.ecx.get_ptr_fn(ptr),
+ self.path,
+ err_ub!(DanglingIntPointer(..)) |
+ err_ub!(InvalidFunctionPointer(..)) =>
+ { "{ptr}" } expected { "a function pointer" },
+ );
+ // FIXME: Check if the signature matches
+ } else {
+ // Otherwise (for standalone Miri), we have to still check it to be non-null.
+ if self.ecx.scalar_may_be_null(value)? {
+ throw_validation_failure!(self.path, { "a null function pointer" });
+ }
+ }
+ Ok(true)
+ }
+ ty::Never => throw_validation_failure!(self.path, { "a value of the never type `!`" }),
+ ty::Foreign(..) | ty::FnDef(..) => {
+ // Nothing to check.
+ Ok(true)
+ }
+ // The above should be all the primitive types. The rest is compound, we
+ // check them by visiting their fields/variants.
+ ty::Adt(..)
+ | ty::Tuple(..)
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::Str
+ | ty::Dynamic(..)
+ | ty::Closure(..)
+ | ty::Generator(..) => Ok(false),
+ // Some types only occur during typechecking, they have no layout.
+ // We should not see them here and we could not check them anyway.
+ ty::Error(_)
+ | ty::Infer(..)
+ | ty::Placeholder(..)
+ | ty::Bound(..)
+ | ty::Param(..)
+ | ty::Opaque(..)
+ | ty::Projection(..)
+ | ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
+ }
+ }
+
+ fn visit_scalar(
+ &mut self,
+ scalar: ScalarMaybeUninit<M::Provenance>,
+ scalar_layout: ScalarAbi,
+ ) -> InterpResult<'tcx> {
+ // We check `is_full_range` in a slightly complicated way because *if* we are checking
+ // number validity, then we want to ensure that `Scalar::Initialized` is indeed initialized,
+ // i.e. that we go over the `check_init` below.
+ let size = scalar_layout.size(self.ecx);
+ let is_full_range = match scalar_layout {
+ ScalarAbi::Initialized { .. } => {
+ if M::enforce_number_init(self.ecx) {
+ false // not "full" since uninit is not accepted
+ } else {
+ scalar_layout.is_always_valid(self.ecx)
+ }
+ }
+ ScalarAbi::Union { .. } => true,
+ };
+ if is_full_range {
+ // Nothing to check. Cruciall we don't even `read_scalar` until here, since that would
+ // fail for `Union` scalars!
+ return Ok(());
+ }
+ // We have something to check: it must at least be initialized.
+ let valid_range = scalar_layout.valid_range(self.ecx);
+ let WrappingRange { start, end } = valid_range;
+ let max_value = size.unsigned_int_max();
+ assert!(end <= max_value);
+ let value = try_validation!(
+ scalar.check_init(),
+ self.path,
+ err_ub!(InvalidUninitBytes(None)) => { "{:x}", scalar }
+ expected { "something {}", wrapping_range_format(valid_range, max_value) },
+ );
+ let bits = match value.try_to_int() {
+ Ok(int) => int.assert_bits(size),
+ Err(_) => {
+ // So this is a pointer then, and casting to an int failed.
+ // Can only happen during CTFE.
+ // We support 2 kinds of ranges here: full range, and excluding zero.
+ if start == 1 && end == max_value {
+ // Only null is the niche. So make sure the ptr is NOT null.
+ if self.ecx.scalar_may_be_null(value)? {
+ throw_validation_failure!(self.path,
+ { "a potentially null pointer" }
+ expected {
+ "something that cannot possibly fail to be {}",
+ wrapping_range_format(valid_range, max_value)
+ }
+ )
+ } else {
+ return Ok(());
+ }
+ } else if scalar_layout.is_always_valid(self.ecx) {
+ // Easy. (This is reachable if `enforce_number_validity` is set.)
+ return Ok(());
+ } else {
+ // Conservatively, we reject, because the pointer *could* have a bad
+ // value.
+ throw_validation_failure!(self.path,
+ { "a pointer" }
+ expected {
+ "something that cannot possibly fail to be {}",
+ wrapping_range_format(valid_range, max_value)
+ }
+ )
+ }
+ }
+ };
+ // Now compare.
+ if valid_range.contains(bits) {
+ Ok(())
+ } else {
+ throw_validation_failure!(self.path,
+ { "{}", bits }
+ expected { "something {}", wrapping_range_format(valid_range, max_value) }
+ )
+ }
+ }
+}
+
+impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
+ for ValidityVisitor<'rt, 'mir, 'tcx, M>
+{
+ type V = OpTy<'tcx, M::Provenance>;
+
+ #[inline(always)]
+ fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
+ &self.ecx
+ }
+
+ fn read_discriminant(
+ &mut self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, VariantIdx> {
+ self.with_elem(PathElem::EnumTag, move |this| {
+ Ok(try_validation!(
+ this.ecx.read_discriminant(op),
+ this.path,
+ err_ub!(InvalidTag(val)) =>
+ { "{:x}", val } expected { "a valid enum tag" },
+ err_ub!(InvalidUninitBytes(None)) =>
+ { "uninitialized bytes" } expected { "a valid enum tag" },
+ )
+ .1)
+ })
+ }
+
+ #[inline]
+ fn visit_field(
+ &mut self,
+ old_op: &OpTy<'tcx, M::Provenance>,
+ field: usize,
+ new_op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let elem = self.aggregate_field_path_elem(old_op.layout, field);
+ self.with_elem(elem, move |this| this.visit_value(new_op))
+ }
+
+ #[inline]
+ fn visit_variant(
+ &mut self,
+ old_op: &OpTy<'tcx, M::Provenance>,
+ variant_id: VariantIdx,
+ new_op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx> {
+ let name = match old_op.layout.ty.kind() {
+ ty::Adt(adt, _) => PathElem::Variant(adt.variant(variant_id).name),
+ // Generators also have variants
+ ty::Generator(..) => PathElem::GeneratorState(variant_id),
+ _ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
+ };
+ self.with_elem(name, move |this| this.visit_value(new_op))
+ }
+
+ #[inline(always)]
+ fn visit_union(
+ &mut self,
+ op: &OpTy<'tcx, M::Provenance>,
+ _fields: NonZeroUsize,
+ ) -> InterpResult<'tcx> {
+ // Special check preventing `UnsafeCell` inside unions in the inner part of constants.
+ if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. })) {
+ if !op.layout.ty.is_freeze(self.ecx.tcx.at(DUMMY_SP), self.ecx.param_env) {
+ throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
+ }
+ }
+ Ok(())
+ }
+
+ #[inline]
+ fn visit_box(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ self.check_safe_pointer(op, "box")?;
+ Ok(())
+ }
+
+ #[inline]
+ fn visit_value(&mut self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ trace!("visit_value: {:?}, {:?}", *op, op.layout);
+
+ // Check primitive types -- the leaves of our recursive descent.
+ if self.try_visit_primitive(op)? {
+ return Ok(());
+ }
+
+ // Special check preventing `UnsafeCell` in the inner part of constants
+ if let Some(def) = op.layout.ty.ty_adt_def() {
+ if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. }))
+ && def.is_unsafe_cell()
+ {
+ throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
+ }
+ }
+
+ // Recursively walk the value at its type.
+ self.walk_value(op)?;
+
+ // *After* all of this, check the ABI. We need to check the ABI to handle
+ // types like `NonNull` where the `Scalar` info is more restrictive than what
+ // the fields say (`rustc_layout_scalar_valid_range_start`).
+ // But in most cases, this will just propagate what the fields say,
+ // and then we want the error to point at the field -- so, first recurse,
+ // then check ABI.
+ //
+ // FIXME: We could avoid some redundant checks here. For newtypes wrapping
+ // scalars, we do the same check on every "level" (e.g., first we check
+ // MyNewtype and then the scalar in there).
+ match op.layout.abi {
+ Abi::Uninhabited => {
+ throw_validation_failure!(self.path,
+ { "a value of uninhabited type {:?}", op.layout.ty }
+ );
+ }
+ Abi::Scalar(scalar_layout) => {
+ // We use a 'forced' read because we always need a `Immediate` here
+ // and treating "partially uninit" as "fully uninit" is fine for us.
+ let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit();
+ self.visit_scalar(scalar, scalar_layout)?;
+ }
+ Abi::ScalarPair(a_layout, b_layout) => {
+ // There is no `rustc_layout_scalar_valid_range_start` for pairs, so
+ // we would validate these things as we descend into the fields,
+ // but that can miss bugs in layout computation. Layout computation
+ // is subtle due to enums having ScalarPair layout, where one field
+ // is the discriminant.
+ if cfg!(debug_assertions) {
+ // We use a 'forced' read because we always need a `Immediate` here
+ // and treating "partially uninit" as "fully uninit" is fine for us.
+ let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair();
+ self.visit_scalar(a, a_layout)?;
+ self.visit_scalar(b, b_layout)?;
+ }
+ }
+ Abi::Vector { .. } => {
+ // No checks here, we assume layout computation gets this right.
+ // (This is harder to check since Miri does not represent these as `Immediate`. We
+ // also cannot use field projections since this might be a newtype around a vector.)
+ }
+ Abi::Aggregate { .. } => {
+ // Nothing to do.
+ }
+ }
+
+ Ok(())
+ }
+
+ fn visit_aggregate(
+ &mut self,
+ op: &OpTy<'tcx, M::Provenance>,
+ fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
+ match op.layout.ty.kind() {
+ ty::Str => {
+ let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
+ let len = mplace.len(self.ecx)?;
+ try_validation!(
+ self.ecx.read_bytes_ptr(mplace.ptr, Size::from_bytes(len)),
+ self.path,
+ err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" },
+ );
+ }
+ ty::Array(tys, ..) | ty::Slice(tys)
+ // This optimization applies for types that can hold arbitrary bytes (such as
+ // integer and floating point types) or for structs or tuples with no fields.
+ // FIXME(wesleywiser) This logic could be extended further to arbitrary structs
+ // or tuples made up of integer/floating point types or inhabited ZSTs with no
+ // padding.
+ if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
+ =>
+ {
+ // Optimized handling for arrays of integer/float type.
+
+ // This is the length of the array/slice.
+ let len = op.len(self.ecx)?;
+ // This is the element type size.
+ let layout = self.ecx.layout_of(*tys)?;
+ // This is the size in bytes of the whole array. (This checks for overflow.)
+ let size = layout.size * len;
+ // If the size is 0, there is nothing to check.
+ // (`size` can only be 0 of `len` is 0, and empty arrays are always valid.)
+ if size == Size::ZERO {
+ return Ok(());
+ }
+ // Now that we definitely have a non-ZST array, we know it lives in memory.
+ let mplace = match op.try_as_mplace() {
+ Ok(mplace) => mplace,
+ Err(imm) => match *imm {
+ Immediate::Uninit =>
+ throw_validation_failure!(self.path, { "uninitialized bytes" }),
+ Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
+ bug!("arrays/slices can never have Scalar/ScalarPair layout"),
+ }
+ };
+
+ // Optimization: we just check the entire range at once.
+ // NOTE: Keep this in sync with the handling of integer and float
+ // types above, in `visit_primitive`.
+ // In run-time mode, we accept pointers in here. This is actually more
+ // permissive than a per-element check would be, e.g., we accept
+ // a &[u8] that contains a pointer even though bytewise checking would
+ // reject it. However, that's good: We don't inherently want
+ // to reject those pointers, we just do not have the machinery to
+ // talk about parts of a pointer.
+ // We also accept uninit, for consistency with the slow path.
+ let alloc = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)?.expect("we already excluded size 0");
+
+ match alloc.check_bytes(
+ alloc_range(Size::ZERO, size),
+ /*allow_uninit*/ !M::enforce_number_init(self.ecx),
+ /*allow_ptr*/ false,
+ ) {
+ // In the happy case, we needn't check anything else.
+ Ok(()) => {}
+ // Some error happened, try to provide a more detailed description.
+ Err(err) => {
+ // For some errors we might be able to provide extra information.
+ // (This custom logic does not fit the `try_validation!` macro.)
+ match err.kind() {
+ err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => {
+ // Some byte was uninitialized, determine which
+ // element that byte belongs to so we can
+ // provide an index.
+ let i = usize::try_from(
+ access.uninit.start.bytes() / layout.size.bytes(),
+ )
+ .unwrap();
+ self.path.push(PathElem::ArrayElem(i));
+
+ throw_validation_failure!(self.path, { "uninitialized bytes" })
+ }
+
+ // Propagate upwards (that will also check for unexpected errors).
+ _ => return Err(err),
+ }
+ }
+ }
+ }
+ // Fast path for arrays and slices of ZSTs. We only need to check a single ZST element
+ // of an array and not all of them, because there's only a single value of a specific
+ // ZST type, so either validation fails for all elements or none.
+ ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(*tys)?.is_zst() => {
+ // Validate just the first element (if any).
+ self.walk_aggregate(op, fields.take(1))?
+ }
+ _ => {
+ self.walk_aggregate(op, fields)? // default handler
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
+ fn validate_operand_internal(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ path: Vec<PathElem>,
+ ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>>,
+ ctfe_mode: Option<CtfeValidationMode>,
+ ) -> InterpResult<'tcx> {
+ trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
+
+ // Construct a visitor
+ let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
+
+ // Run it.
+ match visitor.visit_value(&op) {
+ Ok(()) => Ok(()),
+ // Pass through validation failures.
+ Err(err) if matches!(err.kind(), err_ub!(ValidationFailure { .. })) => Err(err),
+ // Complain about any other kind of UB error -- those are bad because we'd like to
+ // report them in a way that shows *where* in the value the issue lies.
+ Err(err) if matches!(err.kind(), InterpError::UndefinedBehavior(_)) => {
+ err.print_backtrace();
+ bug!("Unexpected Undefined Behavior error during validation: {}", err);
+ }
+ // Pass through everything else.
+ Err(err) => Err(err),
+ }
+ }
+
+ /// This function checks the data at `op` to be const-valid.
+ /// `op` is assumed to cover valid memory if it is an indirect operand.
+ /// It will error if the bits at the destination do not match the ones described by the layout.
+ ///
+ /// `ref_tracking` is used to record references that we encounter so that they
+ /// can be checked recursively by an outside driving loop.
+ ///
+ /// `constant` controls whether this must satisfy the rules for constants:
+ /// - no pointers to statics.
+ /// - no `UnsafeCell` or non-ZST `&mut`.
+ #[inline(always)]
+ pub fn const_validate_operand(
+ &self,
+ op: &OpTy<'tcx, M::Provenance>,
+ path: Vec<PathElem>,
+ ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::Provenance>, Vec<PathElem>>,
+ ctfe_mode: CtfeValidationMode,
+ ) -> InterpResult<'tcx> {
+ self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode))
+ }
+
+ /// This function checks the data at `op` to be runtime-valid.
+ /// `op` is assumed to cover valid memory if it is an indirect operand.
+ /// It will error if the bits at the destination do not match the ones described by the layout.
+ #[inline(always)]
+ pub fn validate_operand(&self, op: &OpTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
+ self.validate_operand_internal(op, vec![], None, None)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/interpret/visitor.rs b/compiler/rustc_const_eval/src/interpret/visitor.rs
new file mode 100644
index 000000000..aee1f93b1
--- /dev/null
+++ b/compiler/rustc_const_eval/src/interpret/visitor.rs
@@ -0,0 +1,534 @@
+//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
+//! types until we arrive at the leaves, with custom handling for primitive types.
+
+use rustc_middle::mir::interpret::InterpResult;
+use rustc_middle::ty;
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
+
+use std::num::NonZeroUsize;
+
+use super::{InterpCx, MPlaceTy, Machine, OpTy, PlaceTy};
+
+/// A thing that we can project into, and that has a layout.
+/// This wouldn't have to depend on `Machine` but with the current type inference,
+/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
+pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
+ /// Gets this value's layout.
+ fn layout(&self) -> TyAndLayout<'tcx>;
+
+ /// Makes this into an `OpTy`, in a cheap way that is good for reading.
+ fn to_op_for_read(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+
+ /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
+ fn to_op_for_proj(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ self.to_op_for_read(ecx)
+ }
+
+ /// Creates this from an `OpTy`.
+ ///
+ /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
+
+ /// Projects to the given enum variant.
+ fn project_downcast(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self>;
+
+ /// Projects to the n-th field.
+ fn project_field(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self>;
+}
+
+/// A thing that we can project into given *mutable* access to `ecx`, and that has a layout.
+/// This wouldn't have to depend on `Machine` but with the current type inference,
+/// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
+pub trait ValueMut<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Sized {
+ /// Gets this value's layout.
+ fn layout(&self) -> TyAndLayout<'tcx>;
+
+ /// Makes this into an `OpTy`, in a cheap way that is good for reading.
+ fn to_op_for_read(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+
+ /// Makes this into an `OpTy`, in a potentially more expensive way that is good for projections.
+ fn to_op_for_proj(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>>;
+
+ /// Creates this from an `OpTy`.
+ ///
+ /// If `to_op_for_proj` only ever produces `Indirect` operands, then this one is definitely `Indirect`.
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self;
+
+ /// Projects to the given enum variant.
+ fn project_downcast(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self>;
+
+ /// Projects to the n-th field.
+ fn project_field(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self>;
+}
+
+// We cannot have a general impl which shows that Value implies ValueMut. (When we do, it says we
+// cannot `impl ValueMut for PlaceTy` because some downstream crate could `impl Value for PlaceTy`.)
+// So we have some copy-paste here. (We could have a macro but since we only have 2 types with this
+// double-impl, that would barely make the code shorter, if at all.)
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::Provenance> {
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ op.clone()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.operand_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.operand_field(self, field)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
+ for OpTy<'tcx, M::Provenance>
+{
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone())
+ }
+
+ #[inline(always)]
+ fn to_op_for_proj(
+ &self,
+ _ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.clone())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ op.clone()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.operand_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.operand_field(self, field)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
+ for MPlaceTy<'tcx, M::Provenance>
+{
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.into())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ // assert is justified because our `to_op_for_read` only ever produces `Indirect` operands.
+ op.assert_mem_place()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.mplace_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.mplace_field(self, field)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
+ for MPlaceTy<'tcx, M::Provenance>
+{
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ _ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.into())
+ }
+
+ #[inline(always)]
+ fn to_op_for_proj(
+ &self,
+ _ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ Ok(self.into())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ // assert is justified because our `to_op_for_proj` only ever produces `Indirect` operands.
+ op.assert_mem_place()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.mplace_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.mplace_field(self, field)
+ }
+}
+
+impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueMut<'mir, 'tcx, M>
+ for PlaceTy<'tcx, M::Provenance>
+{
+ #[inline(always)]
+ fn layout(&self) -> TyAndLayout<'tcx> {
+ self.layout
+ }
+
+ #[inline(always)]
+ fn to_op_for_read(
+ &self,
+ ecx: &InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // We `force_allocation` here so that `from_op` below can work.
+ ecx.place_to_op(self)
+ }
+
+ #[inline(always)]
+ fn to_op_for_proj(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
+ // We `force_allocation` here so that `from_op` below can work.
+ Ok(ecx.force_allocation(self)?.into())
+ }
+
+ #[inline(always)]
+ fn from_op(op: &OpTy<'tcx, M::Provenance>) -> Self {
+ // assert is justified because our `to_op` only ever produces `Indirect` operands.
+ op.assert_mem_place().into()
+ }
+
+ #[inline(always)]
+ fn project_downcast(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ variant: VariantIdx,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.place_downcast(self, variant)
+ }
+
+ #[inline(always)]
+ fn project_field(
+ &self,
+ ecx: &mut InterpCx<'mir, 'tcx, M>,
+ field: usize,
+ ) -> InterpResult<'tcx, Self> {
+ ecx.place_field(self, field)
+ }
+}
+
+macro_rules! make_value_visitor {
+ ($visitor_trait:ident, $value_trait:ident, $($mutability:ident)?) => {
+ // How to traverse a value and what to do when we are at the leaves.
+ pub trait $visitor_trait<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
+ type V: $value_trait<'mir, 'tcx, M>;
+
+ /// The visitor must have an `InterpCx` in it.
+ fn ecx(&$($mutability)? self)
+ -> &$($mutability)? InterpCx<'mir, 'tcx, M>;
+
+ /// `read_discriminant` can be hooked for better error messages.
+ #[inline(always)]
+ fn read_discriminant(
+ &mut self,
+ op: &OpTy<'tcx, M::Provenance>,
+ ) -> InterpResult<'tcx, VariantIdx> {
+ Ok(self.ecx().read_discriminant(op)?.1)
+ }
+
+ // Recursive actions, ready to be overloaded.
+ /// Visits the given value, dispatching as appropriate to more specialized visitors.
+ #[inline(always)]
+ fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
+ {
+ self.walk_value(v)
+ }
+ /// Visits the given value as a union. No automatic recursion can happen here.
+ #[inline(always)]
+ fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
+ {
+ Ok(())
+ }
+ /// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
+ /// The type of `v` will be a raw pointer, but this is a field of `Box<T>` and the
+ /// pointee type is the actual `T`.
+ #[inline(always)]
+ fn visit_box(&mut self, _v: &Self::V) -> InterpResult<'tcx>
+ {
+ Ok(())
+ }
+ /// Visits this value as an aggregate, you are getting an iterator yielding
+ /// all the fields (still in an `InterpResult`, you have to do error handling yourself).
+ /// Recurses into the fields.
+ #[inline(always)]
+ fn visit_aggregate(
+ &mut self,
+ v: &Self::V,
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
+ self.walk_aggregate(v, fields)
+ }
+
+ /// Called each time we recurse down to a field of a "product-like" aggregate
+ /// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
+ /// and new (inner) value.
+ /// This gives the visitor the chance to track the stack of nested fields that
+ /// we are descending through.
+ #[inline(always)]
+ fn visit_field(
+ &mut self,
+ _old_val: &Self::V,
+ _field: usize,
+ new_val: &Self::V,
+ ) -> InterpResult<'tcx> {
+ self.visit_value(new_val)
+ }
+ /// Called when recursing into an enum variant.
+ /// This gives the visitor the chance to track the stack of nested fields that
+ /// we are descending through.
+ #[inline(always)]
+ fn visit_variant(
+ &mut self,
+ _old_val: &Self::V,
+ _variant: VariantIdx,
+ new_val: &Self::V,
+ ) -> InterpResult<'tcx> {
+ self.visit_value(new_val)
+ }
+
+ // Default recursors. Not meant to be overloaded.
+ fn walk_aggregate(
+ &mut self,
+ v: &Self::V,
+ fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
+ ) -> InterpResult<'tcx> {
+ // Now iterate over it.
+ for (idx, field_val) in fields.enumerate() {
+ self.visit_field(v, idx, &field_val?)?;
+ }
+ Ok(())
+ }
+ fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
+ {
+ let ty = v.layout().ty;
+ trace!("walk_value: type: {ty}");
+
+ // Special treatment for special types, where the (static) layout is not sufficient.
+ match *ty.kind() {
+ // If it is a trait object, switch to the real type that was used to create it.
+ ty::Dynamic(..) => {
+ // unsized values are never immediate, so we can assert_mem_place
+ let op = v.to_op_for_read(self.ecx())?;
+ let dest = op.assert_mem_place();
+ let inner_mplace = self.ecx().unpack_dyn_trait(&dest)?;
+ trace!("walk_value: dyn object layout: {:#?}", inner_mplace.layout);
+ // recurse with the inner type
+ return self.visit_field(&v, 0, &$value_trait::from_op(&inner_mplace.into()));
+ },
+ // Slices do not need special handling here: they have `Array` field
+ // placement with length 0, so we enter the `Array` case below which
+ // indirectly uses the metadata to determine the actual length.
+
+ // However, `Box`... let's talk about `Box`.
+ ty::Adt(def, ..) if def.is_box() => {
+ // `Box` is a hybrid primitive-library-defined type that one the one hand is
+ // a dereferenceable pointer, on the other hand has *basically arbitrary
+ // user-defined layout* since the user controls the 'allocator' field. So it
+ // cannot be treated like a normal pointer, since it does not fit into an
+ // `Immediate`. Yeah, it is quite terrible. But many visitors want to do
+ // something with "all boxed pointers", so we handle this mess for them.
+ //
+ // When we hit a `Box`, we do not do the usual `visit_aggregate`; instead,
+ // we (a) call `visit_box` on the pointer value, and (b) recurse on the
+ // allocator field. We also assert tons of things to ensure we do not miss
+ // any other fields.
+
+ // `Box` has two fields: the pointer we care about, and the allocator.
+ assert_eq!(v.layout().fields.count(), 2, "`Box` must have exactly 2 fields");
+ let (unique_ptr, alloc) =
+ (v.project_field(self.ecx(), 0)?, v.project_field(self.ecx(), 1)?);
+ // Unfortunately there is some type junk in the way here: `unique_ptr` is a `Unique`...
+ // (which means another 2 fields, the second of which is a `PhantomData`)
+ assert_eq!(unique_ptr.layout().fields.count(), 2);
+ let (nonnull_ptr, phantom) = (
+ unique_ptr.project_field(self.ecx(), 0)?,
+ unique_ptr.project_field(self.ecx(), 1)?,
+ );
+ assert!(
+ phantom.layout().ty.ty_adt_def().is_some_and(|adt| adt.is_phantom_data()),
+ "2nd field of `Unique` should be PhantomData but is {:?}",
+ phantom.layout().ty,
+ );
+ // ... that contains a `NonNull`... (gladly, only a single field here)
+ assert_eq!(nonnull_ptr.layout().fields.count(), 1);
+ let raw_ptr = nonnull_ptr.project_field(self.ecx(), 0)?; // the actual raw ptr
+ // ... whose only field finally is a raw ptr we can dereference.
+ self.visit_box(&raw_ptr)?;
+
+ // The second `Box` field is the allocator, which we recursively check for validity
+ // like in regular structs.
+ self.visit_field(v, 1, &alloc)?;
+
+ // We visited all parts of this one.
+ return Ok(());
+ }
+ _ => {},
+ };
+
+ // Visit the fields of this value.
+ match v.layout().fields {
+ FieldsShape::Primitive => {}
+ FieldsShape::Union(fields) => {
+ self.visit_union(v, fields)?;
+ }
+ FieldsShape::Arbitrary { ref offsets, .. } => {
+ // FIXME: We collect in a vec because otherwise there are lifetime
+ // errors: Projecting to a field needs access to `ecx`.
+ let fields: Vec<InterpResult<'tcx, Self::V>> =
+ (0..offsets.len()).map(|i| {
+ v.project_field(self.ecx(), i)
+ })
+ .collect();
+ self.visit_aggregate(v, fields.into_iter())?;
+ }
+ FieldsShape::Array { .. } => {
+ // Let's get an mplace (or immediate) first.
+ // This might `force_allocate` if `v` is a `PlaceTy`, but `place_index` does that anyway.
+ let op = v.to_op_for_proj(self.ecx())?;
+ // Now we can go over all the fields.
+ // This uses the *run-time length*, i.e., if we are a slice,
+ // the dynamic info from the metadata is used.
+ let iter = self.ecx().operand_array_fields(&op)?
+ .map(|f| f.and_then(|f| {
+ Ok($value_trait::from_op(&f))
+ }));
+ self.visit_aggregate(v, iter)?;
+ }
+ }
+
+ match v.layout().variants {
+ // If this is a multi-variant layout, find the right variant and proceed
+ // with *its* fields.
+ Variants::Multiple { .. } => {
+ let op = v.to_op_for_read(self.ecx())?;
+ let idx = self.read_discriminant(&op)?;
+ let inner = v.project_downcast(self.ecx(), idx)?;
+ trace!("walk_value: variant layout: {:#?}", inner.layout());
+ // recurse with the inner type
+ self.visit_variant(v, idx, &inner)
+ }
+ // For single-variant layouts, we already did anything there is to do.
+ Variants::Single { .. } => Ok(())
+ }
+ }
+ }
+ }
+}
+
+make_value_visitor!(ValueVisitor, Value,);
+make_value_visitor!(MutValueVisitor, ValueMut, mut);
diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs
new file mode 100644
index 000000000..72ac6af68
--- /dev/null
+++ b/compiler/rustc_const_eval/src/lib.rs
@@ -0,0 +1,68 @@
+/*!
+
+Rust MIR: a lowered representation of Rust.
+
+*/
+
+#![feature(assert_matches)]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(decl_macro)]
+#![feature(exact_size_is_empty)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(map_try_insert)]
+#![feature(min_specialization)]
+#![feature(slice_ptr_get)]
+#![feature(option_get_or_insert_default)]
+#![feature(never_type)]
+#![feature(trait_alias)]
+#![feature(trusted_len)]
+#![feature(trusted_step)]
+#![feature(try_blocks)]
+#![feature(yeet_expr)]
+#![feature(is_some_with)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+pub mod const_eval;
+mod errors;
+pub mod interpret;
+mod might_permit_raw_init;
+pub mod transform;
+pub mod util;
+
+use rustc_middle::ty;
+use rustc_middle::ty::query::Providers;
+use rustc_target::abi::InitKind;
+
+pub fn provide(providers: &mut Providers) {
+ const_eval::provide(providers);
+ providers.eval_to_const_value_raw = const_eval::eval_to_const_value_raw_provider;
+ providers.eval_to_allocation_raw = const_eval::eval_to_allocation_raw_provider;
+ providers.const_caller_location = const_eval::const_caller_location;
+ providers.eval_to_valtree = |tcx, param_env_and_value| {
+ let (param_env, raw) = param_env_and_value.into_parts();
+ const_eval::eval_to_valtree(tcx, param_env, raw)
+ };
+ providers.try_destructure_mir_constant = |tcx, param_env_and_value| {
+ let (param_env, value) = param_env_and_value.into_parts();
+ const_eval::try_destructure_mir_constant(tcx, param_env, value).ok()
+ };
+ providers.valtree_to_const_val = |tcx, (ty, valtree)| {
+ const_eval::valtree_to_const_value(tcx, ty::ParamEnv::empty().and(ty), valtree)
+ };
+ providers.deref_mir_constant = |tcx, param_env_and_value| {
+ let (param_env, value) = param_env_and_value.into_parts();
+ const_eval::deref_mir_constant(tcx, param_env, value)
+ };
+ providers.permits_uninit_init =
+ |tcx, ty| might_permit_raw_init::might_permit_raw_init(tcx, ty, InitKind::Uninit);
+ providers.permits_zero_init =
+ |tcx, ty| might_permit_raw_init::might_permit_raw_init(tcx, ty, InitKind::Zero);
+}
diff --git a/compiler/rustc_const_eval/src/might_permit_raw_init.rs b/compiler/rustc_const_eval/src/might_permit_raw_init.rs
new file mode 100644
index 000000000..f971c2238
--- /dev/null
+++ b/compiler/rustc_const_eval/src/might_permit_raw_init.rs
@@ -0,0 +1,40 @@
+use crate::const_eval::CompileTimeInterpreter;
+use crate::interpret::{InterpCx, MemoryKind, OpTy};
+use rustc_middle::ty::layout::LayoutCx;
+use rustc_middle::ty::{layout::TyAndLayout, ParamEnv, TyCtxt};
+use rustc_session::Limit;
+use rustc_target::abi::InitKind;
+
+pub fn might_permit_raw_init<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: TyAndLayout<'tcx>,
+ kind: InitKind,
+) -> bool {
+ let strict = tcx.sess.opts.unstable_opts.strict_init_checks;
+
+ if strict {
+ let machine = CompileTimeInterpreter::new(Limit::new(0), false);
+
+ let mut cx = InterpCx::new(tcx, rustc_span::DUMMY_SP, ParamEnv::reveal_all(), machine);
+
+ let allocated = cx
+ .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap))
+ .expect("OOM: failed to allocate for uninit check");
+
+ if kind == InitKind::Zero {
+ cx.write_bytes_ptr(
+ allocated.ptr,
+ std::iter::repeat(0_u8).take(ty.layout.size().bytes_usize()),
+ )
+ .expect("failed to write bytes for zero valid check");
+ }
+
+ let ot: OpTy<'_, _> = allocated.into();
+
+ // Assume that if it failed, it's a validation failure.
+ cx.validate_operand(&ot).is_ok()
+ } else {
+ let layout_cx = LayoutCx { tcx, param_env: ParamEnv::reveal_all() };
+ ty.might_permit_raw_init(&layout_cx, kind)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/check.rs b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
new file mode 100644
index 000000000..0adb88a18
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/check.rs
@@ -0,0 +1,1032 @@
+//! The `Visitor` responsible for actually checking a `mir::Body` for invalid operations.
+
+use rustc_errors::{Diagnostic, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
+use rustc_middle::ty::{self, adjustment::PointerCast, Instance, InstanceDef, Ty, TyCtxt};
+use rustc_middle::ty::{Binder, TraitPredicate, TraitRef, TypeVisitable};
+use rustc_mir_dataflow::{self, Analysis};
+use rustc_span::{sym, Span, Symbol};
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::SelectionContext;
+
+use std::mem;
+use std::ops::Deref;
+
+use super::ops::{self, NonConstOp, Status};
+use super::qualifs::{self, CustomEq, HasMutInterior, NeedsDrop, NeedsNonConstDrop};
+use super::resolver::FlowSensitiveAnalysis;
+use super::{ConstCx, Qualif};
+use crate::const_eval::is_unstable_const_fn;
+use crate::errors::UnstableInStable;
+
+type QualifResults<'mir, 'tcx, Q> =
+ rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>;
+
+#[derive(Default)]
+pub struct Qualifs<'mir, 'tcx> {
+ has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>,
+ needs_drop: Option<QualifResults<'mir, 'tcx, NeedsDrop>>,
+ needs_non_const_drop: Option<QualifResults<'mir, 'tcx, NeedsNonConstDrop>>,
+}
+
+impl<'mir, 'tcx> Qualifs<'mir, 'tcx> {
+ /// Returns `true` if `local` is `NeedsDrop` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary
+ pub fn needs_drop(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ // Peeking into opaque types causes cycles if the current function declares said opaque
+ // type. Thus we avoid short circuiting on the type and instead run the more expensive
+ // analysis that looks at the actual usage within this function
+ if !ty.has_opaque_types() && !NeedsDrop::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let needs_drop = self.needs_drop.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(NeedsDrop, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ needs_drop.seek_before_primary_effect(location);
+ needs_drop.get().contains(local)
+ }
+
+ /// Returns `true` if `local` is `NeedsNonConstDrop` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary
+ pub fn needs_non_const_drop(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ if !NeedsNonConstDrop::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let needs_non_const_drop = self.needs_non_const_drop.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(NeedsNonConstDrop, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ needs_non_const_drop.seek_before_primary_effect(location);
+ needs_non_const_drop.get().contains(local)
+ }
+
+ /// Returns `true` if `local` is `HasMutInterior` at the given `Location`.
+ ///
+ /// Only updates the cursor if absolutely necessary.
+ pub fn has_mut_interior(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ local: Local,
+ location: Location,
+ ) -> bool {
+ let ty = ccx.body.local_decls[local].ty;
+ // Peeking into opaque types causes cycles if the current function declares said opaque
+ // type. Thus we avoid short circuiting on the type and instead run the more expensive
+ // analysis that looks at the actual usage within this function
+ if !ty.has_opaque_types() && !HasMutInterior::in_any_value_of_ty(ccx, ty) {
+ return false;
+ }
+
+ let has_mut_interior = self.has_mut_interior.get_or_insert_with(|| {
+ let ConstCx { tcx, body, .. } = *ccx;
+
+ FlowSensitiveAnalysis::new(HasMutInterior, ccx)
+ .into_engine(tcx, &body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&body)
+ });
+
+ has_mut_interior.seek_before_primary_effect(location);
+ has_mut_interior.get().contains(local)
+ }
+
+ fn in_return_place(
+ &mut self,
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ tainted_by_errors: Option<ErrorGuaranteed>,
+ ) -> ConstQualifs {
+ // Find the `Return` terminator if one exists.
+ //
+ // If no `Return` terminator exists, this MIR is divergent. Just return the conservative
+ // qualifs for the return type.
+ let return_block = ccx
+ .body
+ .basic_blocks()
+ .iter_enumerated()
+ .find(|(_, block)| matches!(block.terminator().kind, TerminatorKind::Return))
+ .map(|(bb, _)| bb);
+
+ let Some(return_block) = return_block else {
+ return qualifs::in_any_value_of_ty(ccx, ccx.body.return_ty(), tainted_by_errors);
+ };
+
+ let return_loc = ccx.body.terminator_loc(return_block);
+
+ let custom_eq = match ccx.const_kind() {
+ // We don't care whether a `const fn` returns a value that is not structurally
+ // matchable. Functions calls are opaque and always use type-based qualification, so
+ // this value should never be used.
+ hir::ConstContext::ConstFn => true,
+
+ // If we know that all values of the return type are structurally matchable, there's no
+ // need to run dataflow.
+ // Opaque types do not participate in const generics or pattern matching, so we can safely count them out.
+ _ if ccx.body.return_ty().has_opaque_types()
+ || !CustomEq::in_any_value_of_ty(ccx, ccx.body.return_ty()) =>
+ {
+ false
+ }
+
+ hir::ConstContext::Const | hir::ConstContext::Static(_) => {
+ let mut cursor = FlowSensitiveAnalysis::new(CustomEq, ccx)
+ .into_engine(ccx.tcx, &ccx.body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(&ccx.body);
+
+ cursor.seek_after_primary_effect(return_loc);
+ cursor.get().contains(RETURN_PLACE)
+ }
+ };
+
+ ConstQualifs {
+ needs_drop: self.needs_drop(ccx, RETURN_PLACE, return_loc),
+ needs_non_const_drop: self.needs_non_const_drop(ccx, RETURN_PLACE, return_loc),
+ has_mut_interior: self.has_mut_interior(ccx, RETURN_PLACE, return_loc),
+ custom_eq,
+ tainted_by_errors,
+ }
+ }
+}
+
+pub struct Checker<'mir, 'tcx> {
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ qualifs: Qualifs<'mir, 'tcx>,
+
+ /// The span of the current statement.
+ span: Span,
+
+ /// A set that stores for each local whether it has a `StorageDead` for it somewhere.
+ local_has_storage_dead: Option<BitSet<Local>>,
+
+ error_emitted: Option<ErrorGuaranteed>,
+ secondary_errors: Vec<Diagnostic>,
+}
+
+impl<'mir, 'tcx> Deref for Checker<'mir, 'tcx> {
+ type Target = ConstCx<'mir, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+impl<'mir, 'tcx> Checker<'mir, 'tcx> {
+ pub fn new(ccx: &'mir ConstCx<'mir, 'tcx>) -> Self {
+ Checker {
+ span: ccx.body.span,
+ ccx,
+ qualifs: Default::default(),
+ local_has_storage_dead: None,
+ error_emitted: None,
+ secondary_errors: Vec::new(),
+ }
+ }
+
+ pub fn check_body(&mut self) {
+ let ConstCx { tcx, body, .. } = *self.ccx;
+ let def_id = self.ccx.def_id();
+
+ // `async` functions cannot be `const fn`. This is checked during AST lowering, so there's
+ // no need to emit duplicate errors here.
+ if self.ccx.is_async() || body.generator.is_some() {
+ tcx.sess.delay_span_bug(body.span, "`async` functions cannot be `const fn`");
+ return;
+ }
+
+ // The local type and predicate checks are not free and only relevant for `const fn`s.
+ if self.const_kind() == hir::ConstContext::ConstFn {
+ for (idx, local) in body.local_decls.iter_enumerated() {
+ // Handle the return place below.
+ if idx == RETURN_PLACE || local.internal {
+ continue;
+ }
+
+ self.span = local.source_info.span;
+ self.check_local_or_return_ty(local.ty, idx);
+ }
+
+ // impl trait is gone in MIR, so check the return type of a const fn by its signature
+ // instead of the type of the return place.
+ self.span = body.local_decls[RETURN_PLACE].source_info.span;
+ let return_ty = tcx.fn_sig(def_id).output();
+ self.check_local_or_return_ty(return_ty.skip_binder(), RETURN_PLACE);
+ }
+
+ if !tcx.has_attr(def_id.to_def_id(), sym::rustc_do_not_const_check) {
+ self.visit_body(&body);
+ }
+
+ // If we got through const-checking without emitting any "primary" errors, emit any
+ // "secondary" errors if they occurred.
+ let secondary_errors = mem::take(&mut self.secondary_errors);
+ if self.error_emitted.is_none() {
+ for mut error in secondary_errors {
+ self.tcx.sess.diagnostic().emit_diagnostic(&mut error);
+ }
+ } else {
+ assert!(self.tcx.sess.has_errors().is_some());
+ }
+ }
+
+ fn local_has_storage_dead(&mut self, local: Local) -> bool {
+ let ccx = self.ccx;
+ self.local_has_storage_dead
+ .get_or_insert_with(|| {
+ struct StorageDeads {
+ locals: BitSet<Local>,
+ }
+ impl<'tcx> Visitor<'tcx> for StorageDeads {
+ fn visit_statement(&mut self, stmt: &Statement<'tcx>, _: Location) {
+ if let StatementKind::StorageDead(l) = stmt.kind {
+ self.locals.insert(l);
+ }
+ }
+ }
+ let mut v = StorageDeads { locals: BitSet::new_empty(ccx.body.local_decls.len()) };
+ v.visit_body(ccx.body);
+ v.locals
+ })
+ .contains(local)
+ }
+
+ pub fn qualifs_in_return_place(&mut self) -> ConstQualifs {
+ self.qualifs.in_return_place(self.ccx, self.error_emitted)
+ }
+
+ /// Emits an error if an expression cannot be evaluated in the current context.
+ pub fn check_op(&mut self, op: impl NonConstOp<'tcx>) {
+ self.check_op_spanned(op, self.span);
+ }
+
+ /// Emits an error at the given `span` if an expression cannot be evaluated in the current
+ /// context.
+ pub fn check_op_spanned<O: NonConstOp<'tcx>>(&mut self, op: O, span: Span) {
+ let gate = match op.status_in_item(self.ccx) {
+ Status::Allowed => return,
+
+ Status::Unstable(gate) if self.tcx.features().enabled(gate) => {
+ let unstable_in_stable = self.ccx.is_const_stable_const_fn()
+ && !super::rustc_allow_const_fn_unstable(self.tcx, self.def_id(), gate);
+ if unstable_in_stable {
+ emit_unstable_in_stable_error(self.ccx, span, gate);
+ }
+
+ return;
+ }
+
+ Status::Unstable(gate) => Some(gate),
+ Status::Forbidden => None,
+ };
+
+ if self.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
+ self.tcx.sess.miri_unleashed_feature(span, gate);
+ return;
+ }
+
+ let mut err = op.build_error(self.ccx, span);
+ assert!(err.is_error());
+
+ match op.importance() {
+ ops::DiagnosticImportance::Primary => {
+ let reported = err.emit();
+ self.error_emitted = Some(reported);
+ }
+
+ ops::DiagnosticImportance::Secondary => err.buffer(&mut self.secondary_errors),
+ }
+ }
+
+ fn check_static(&mut self, def_id: DefId, span: Span) {
+ if self.tcx.is_thread_local_static(def_id) {
+ self.tcx.sess.delay_span_bug(span, "tls access is checked in `Rvalue::ThreadLocalRef");
+ }
+ self.check_op_spanned(ops::StaticAccess, span)
+ }
+
+ fn check_local_or_return_ty(&mut self, ty: Ty<'tcx>, local: Local) {
+ let kind = self.body.local_kind(local);
+
+ for ty in ty.walk() {
+ let ty = match ty.unpack() {
+ GenericArgKind::Type(ty) => ty,
+
+ // No constraints on lifetimes or constants, except potentially
+ // constants' types, but `walk` will get to them as well.
+ GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => continue,
+ };
+
+ match *ty.kind() {
+ ty::Ref(_, _, hir::Mutability::Mut) => self.check_op(ops::ty::MutRef(kind)),
+ _ => {}
+ }
+ }
+ }
+
+ fn check_mut_borrow(&mut self, local: Local, kind: hir::BorrowKind) {
+ match self.const_kind() {
+ // In a const fn all borrows are transient or point to the places given via
+ // references in the arguments (so we already checked them with
+ // TransientMutBorrow/MutBorrow as appropriate).
+ // The borrow checker guarantees that no new non-transient borrows are created.
+ // NOTE: Once we have heap allocations during CTFE we need to figure out
+ // how to prevent `const fn` to create long-lived allocations that point
+ // to mutable memory.
+ hir::ConstContext::ConstFn => self.check_op(ops::TransientMutBorrow(kind)),
+ _ => {
+ // Locals with StorageDead do not live beyond the evaluation and can
+ // thus safely be borrowed without being able to be leaked to the final
+ // value of the constant.
+ if self.local_has_storage_dead(local) {
+ self.check_op(ops::TransientMutBorrow(kind));
+ } else {
+ self.check_op(ops::MutBorrow(kind));
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> {
+ fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &BasicBlockData<'tcx>) {
+ trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+ // We don't const-check basic blocks on the cleanup path since we never unwind during
+ // const-eval: a panic causes an immediate compile error. In other words, cleanup blocks
+ // are unreachable during const-eval.
+ //
+ // We can't be more conservative (e.g., by const-checking cleanup blocks anyways) because
+ // locals that would never be dropped during normal execution are sometimes dropped during
+ // unwinding, which means backwards-incompatible live-drop errors.
+ if block.is_cleanup {
+ return;
+ }
+
+ self.super_basic_block_data(bb, block);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ trace!("visit_rvalue: rvalue={:?} location={:?}", rvalue, location);
+
+ // Special-case reborrows to be more like a copy of a reference.
+ match *rvalue {
+ Rvalue::Ref(_, kind, place) => {
+ if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+ let ctx = match kind {
+ BorrowKind::Shared => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+ }
+ BorrowKind::Shallow => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+ }
+ BorrowKind::Unique => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::UniqueBorrow)
+ }
+ BorrowKind::Mut { .. } => {
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+ }
+ };
+ self.visit_local(reborrowed_place_ref.local, ctx, location);
+ self.visit_projection(reborrowed_place_ref, ctx, location);
+ return;
+ }
+ }
+ Rvalue::AddressOf(mutbl, place) => {
+ if let Some(reborrowed_place_ref) = place_as_reborrow(self.tcx, self.body, place) {
+ let ctx = match mutbl {
+ Mutability::Not => {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::AddressOf)
+ }
+ Mutability::Mut => PlaceContext::MutatingUse(MutatingUseContext::AddressOf),
+ };
+ self.visit_local(reborrowed_place_ref.local, ctx, location);
+ self.visit_projection(reborrowed_place_ref, ctx, location);
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ self.super_rvalue(rvalue, location);
+
+ match *rvalue {
+ Rvalue::ThreadLocalRef(_) => self.check_op(ops::ThreadLocalAccess),
+
+ Rvalue::Use(_)
+ | Rvalue::CopyForDeref(..)
+ | Rvalue::Repeat(..)
+ | Rvalue::Discriminant(..)
+ | Rvalue::Len(_)
+ | Rvalue::Aggregate(..) => {}
+
+ Rvalue::Ref(_, kind @ BorrowKind::Mut { .. }, ref place)
+ | Rvalue::Ref(_, kind @ BorrowKind::Unique, ref place) => {
+ let ty = place.ty(self.body, self.tcx).ty;
+ let is_allowed = match ty.kind() {
+ // Inside a `static mut`, `&mut [...]` is allowed.
+ ty::Array(..) | ty::Slice(_)
+ if self.const_kind() == hir::ConstContext::Static(hir::Mutability::Mut) =>
+ {
+ true
+ }
+
+ // FIXME(ecstaticmorse): We could allow `&mut []` inside a const context given
+ // that this is merely a ZST and it is already eligible for promotion.
+ // This may require an RFC?
+ /*
+ ty::Array(_, len) if len.try_eval_usize(cx.tcx, cx.param_env) == Some(0)
+ => true,
+ */
+ _ => false,
+ };
+
+ if !is_allowed {
+ if let BorrowKind::Mut { .. } = kind {
+ self.check_mut_borrow(place.local, hir::BorrowKind::Ref)
+ } else {
+ self.check_op(ops::CellBorrow);
+ }
+ }
+ }
+
+ Rvalue::AddressOf(Mutability::Mut, ref place) => {
+ self.check_mut_borrow(place.local, hir::BorrowKind::Raw)
+ }
+
+ Rvalue::Ref(_, BorrowKind::Shared | BorrowKind::Shallow, ref place)
+ | Rvalue::AddressOf(Mutability::Not, ref place) => {
+ let borrowed_place_has_mut_interior = qualifs::in_place::<HasMutInterior, _>(
+ &self.ccx,
+ &mut |local| self.qualifs.has_mut_interior(self.ccx, local, location),
+ place.as_ref(),
+ );
+
+ if borrowed_place_has_mut_interior {
+ match self.const_kind() {
+ // In a const fn all borrows are transient or point to the places given via
+ // references in the arguments (so we already checked them with
+ // TransientCellBorrow/CellBorrow as appropriate).
+ // The borrow checker guarantees that no new non-transient borrows are created.
+ // NOTE: Once we have heap allocations during CTFE we need to figure out
+ // how to prevent `const fn` to create long-lived allocations that point
+ // to (interior) mutable memory.
+ hir::ConstContext::ConstFn => self.check_op(ops::TransientCellBorrow),
+ _ => {
+ // Locals with StorageDead are definitely not part of the final constant value, and
+ // it is thus inherently safe to permit such locals to have their
+ // address taken as we can't end up with a reference to them in the
+ // final value.
+ // Note: This is only sound if every local that has a `StorageDead` has a
+ // `StorageDead` in every control flow path leading to a `return` terminator.
+ if self.local_has_storage_dead(place.local) {
+ self.check_op(ops::TransientCellBorrow);
+ } else {
+ self.check_op(ops::CellBorrow);
+ }
+ }
+ }
+ }
+ }
+
+ Rvalue::Cast(
+ CastKind::Pointer(
+ PointerCast::MutToConstPointer
+ | PointerCast::ArrayToPointer
+ | PointerCast::UnsafeFnPointer
+ | PointerCast::ClosureFnPointer(_)
+ | PointerCast::ReifyFnPointer,
+ ),
+ _,
+ _,
+ ) => {
+ // These are all okay; they only change the type, not the data.
+ }
+
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), _, _) => {
+ // Unsizing is implemented for CTFE.
+ }
+
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => {
+ self.check_op(ops::RawPtrToIntCast);
+ }
+ Rvalue::Cast(CastKind::PointerFromExposedAddress, _, _) => {
+ // Since no pointer can ever get exposed (rejected above), this is easy to support.
+ }
+
+ Rvalue::Cast(CastKind::Misc, _, _) => {}
+
+ Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => {}
+ Rvalue::ShallowInitBox(_, _) => {}
+
+ Rvalue::UnaryOp(_, ref operand) => {
+ let ty = operand.ty(self.body, self.tcx);
+ if is_int_bool_or_char(ty) {
+ // Int, bool, and char operations are fine.
+ } else if ty.is_floating_point() {
+ self.check_op(ops::FloatingPointOp);
+ } else {
+ span_bug!(self.span, "non-primitive type in `Rvalue::UnaryOp`: {:?}", ty);
+ }
+ }
+
+ Rvalue::BinaryOp(op, box (ref lhs, ref rhs))
+ | Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs_ty = lhs.ty(self.body, self.tcx);
+ let rhs_ty = rhs.ty(self.body, self.tcx);
+
+ if is_int_bool_or_char(lhs_ty) && is_int_bool_or_char(rhs_ty) {
+ // Int, bool, and char operations are fine.
+ } else if lhs_ty.is_fn_ptr() || lhs_ty.is_unsafe_ptr() {
+ assert_eq!(lhs_ty, rhs_ty);
+ assert!(
+ op == BinOp::Eq
+ || op == BinOp::Ne
+ || op == BinOp::Le
+ || op == BinOp::Lt
+ || op == BinOp::Ge
+ || op == BinOp::Gt
+ || op == BinOp::Offset
+ );
+
+ self.check_op(ops::RawPtrComparison);
+ } else if lhs_ty.is_floating_point() || rhs_ty.is_floating_point() {
+ self.check_op(ops::FloatingPointOp);
+ } else {
+ span_bug!(
+ self.span,
+ "non-primitive type in `Rvalue::BinaryOp`: {:?} ⚬ {:?}",
+ lhs_ty,
+ rhs_ty
+ );
+ }
+ }
+ }
+ }
+
+ fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) {
+ self.super_operand(op, location);
+ if let Operand::Constant(c) = op {
+ if let Some(def_id) = c.check_static_ptr(self.tcx) {
+ self.check_static(def_id, self.span);
+ }
+ }
+ }
+ fn visit_projection_elem(
+ &mut self,
+ place_local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ trace!(
+ "visit_projection_elem: place_local={:?} proj_base={:?} elem={:?} \
+ context={:?} location={:?}",
+ place_local,
+ proj_base,
+ elem,
+ context,
+ location,
+ );
+
+ self.super_projection_elem(place_local, proj_base, elem, context, location);
+
+ match elem {
+ ProjectionElem::Deref => {
+ let base_ty = Place::ty_from(place_local, proj_base, self.body, self.tcx).ty;
+ if base_ty.is_unsafe_ptr() {
+ if proj_base.is_empty() {
+ let decl = &self.body.local_decls[place_local];
+ if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
+ let span = decl.source_info.span;
+ self.check_static(def_id, span);
+ return;
+ }
+ }
+
+ // `*const T` is stable, `*mut T` is not
+ if !base_ty.is_mutable_ptr() {
+ return;
+ }
+
+ self.check_op(ops::RawMutPtrDeref);
+ }
+
+ if context.is_mutating_use() {
+ self.check_op(ops::MutDeref);
+ }
+ }
+
+ ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Downcast(..)
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Field(..)
+ | ProjectionElem::Index(_) => {}
+ }
+ }
+
+ fn visit_source_info(&mut self, source_info: &SourceInfo) {
+ trace!("visit_source_info: source_info={:?}", source_info);
+ self.span = source_info.span;
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ trace!("visit_statement: statement={:?} location={:?}", statement, location);
+
+ self.super_statement(statement, location);
+
+ match statement.kind {
+ StatementKind::Assign(..)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::FakeRead(..)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Nop => {}
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ self.super_terminator(terminator, location);
+
+ match &terminator.kind {
+ TerminatorKind::Call { func, args, fn_span, from_hir_call, .. } => {
+ let ConstCx { tcx, body, param_env, .. } = *self.ccx;
+ let caller = self.def_id();
+
+ let fn_ty = func.ty(body, tcx);
+
+ let (mut callee, mut substs) = match *fn_ty.kind() {
+ ty::FnDef(def_id, substs) => (def_id, substs),
+
+ ty::FnPtr(_) => {
+ self.check_op(ops::FnCallIndirect);
+ return;
+ }
+ _ => {
+ span_bug!(terminator.source_info.span, "invalid callee of type {:?}", fn_ty)
+ }
+ };
+
+ // Attempting to call a trait method?
+ if let Some(trait_id) = tcx.trait_of_item(callee) {
+ trace!("attempting to call a trait method");
+ if !self.tcx.features().const_trait_impl {
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+
+ let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+ let poly_trait_pred = Binder::dummy(TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::ConstIfConst,
+ polarity: ty::ImplPolarity::Positive,
+ });
+ let obligation =
+ Obligation::new(ObligationCause::dummy(), param_env, poly_trait_pred);
+
+ let implsrc = tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ selcx.select(&obligation)
+ });
+
+ match implsrc {
+ Ok(Some(ImplSource::Param(_, ty::BoundConstness::ConstIfConst))) => {
+ debug!(
+ "const_trait_impl: provided {:?} via where-clause in {:?}",
+ trait_ref, param_env
+ );
+ return;
+ }
+ Ok(Some(ImplSource::UserDefined(data))) => {
+ let callee_name = tcx.item_name(callee);
+ if let Some(&did) = tcx
+ .associated_item_def_ids(data.impl_def_id)
+ .iter()
+ .find(|did| tcx.item_name(**did) == callee_name)
+ {
+ // using internal substs is ok here, since this is only
+ // used for the `resolve` call below
+ substs = InternalSubsts::identity_for_item(tcx, did);
+ callee = did;
+ }
+
+ if let hir::Constness::NotConst = tcx.constness(data.impl_def_id) {
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+ _ if !tcx.is_const_fn_raw(callee) => {
+ // At this point, it is only legal when the caller is in a trait
+ // marked with #[const_trait], and the callee is in the same trait.
+ let mut nonconst_call_permission = false;
+ if let Some(callee_trait) = tcx.trait_of_item(callee)
+ && tcx.has_attr(callee_trait, sym::const_trait)
+ && Some(callee_trait) == tcx.trait_of_item(caller.to_def_id())
+ // Can only call methods when it's `<Self as TheTrait>::f`.
+ && tcx.types.self_param == substs.type_at(0)
+ {
+ nonconst_call_permission = true;
+ }
+
+ if !nonconst_call_permission {
+ let obligation = Obligation::new(
+ ObligationCause::dummy_with_span(*fn_span),
+ param_env,
+ tcx.mk_predicate(
+ poly_trait_pred.map_bound(ty::PredicateKind::Trait),
+ ),
+ );
+
+ // improve diagnostics by showing what failed. Our requirements are stricter this time
+ // as we are going to error again anyways.
+ tcx.infer_ctxt().enter(|infcx| {
+ if let Err(e) = implsrc {
+ infcx.report_selection_error(
+ obligation.clone(),
+ &obligation,
+ &e,
+ false,
+ );
+ }
+ });
+
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+ _ => {}
+ }
+
+ // Resolve a trait method call to its concrete implementation, which may be in a
+ // `const` trait impl.
+ let instance = Instance::resolve(tcx, param_env, callee, substs);
+ debug!("Resolving ({:?}) -> {:?}", callee, instance);
+ if let Ok(Some(func)) = instance {
+ if let InstanceDef::Item(def) = func.def {
+ callee = def.did;
+ }
+ }
+ }
+
+ // At this point, we are calling a function, `callee`, whose `DefId` is known...
+
+ // `begin_panic` and `panic_display` are generic functions that accept
+ // types other than str. Check to enforce that only str can be used in
+ // const-eval.
+
+ // const-eval of the `begin_panic` fn assumes the argument is `&str`
+ if Some(callee) == tcx.lang_items().begin_panic_fn() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if ty.is_str() => return,
+ _ => self.check_op(ops::PanicNonStr),
+ }
+ }
+
+ // const-eval of the `panic_display` fn assumes the argument is `&&str`
+ if Some(callee) == tcx.lang_items().panic_display() {
+ match args[0].ty(&self.ccx.body.local_decls, tcx).kind() {
+ ty::Ref(_, ty, _) if matches!(ty.kind(), ty::Ref(_, ty, _) if ty.is_str()) =>
+ {
+ return;
+ }
+ _ => self.check_op(ops::PanicNonStr),
+ }
+ }
+
+ if Some(callee) == tcx.lang_items().exchange_malloc_fn() {
+ self.check_op(ops::HeapAllocation);
+ return;
+ }
+
+ // `async` blocks get lowered to `std::future::from_generator(/* a closure */)`.
+ let is_async_block = Some(callee) == tcx.lang_items().from_generator_fn();
+ if is_async_block {
+ let kind = hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block);
+ self.check_op(ops::Generator(kind));
+ return;
+ }
+
+ let is_intrinsic = tcx.is_intrinsic(callee);
+
+ if !tcx.is_const_fn_raw(callee) {
+ if !tcx.is_const_default_method(callee) {
+ // To get to here we must have already found a const impl for the
+ // trait, but for it to still be non-const can be that the impl is
+ // using default method bodies.
+ self.check_op(ops::FnCallNonConst {
+ caller,
+ callee,
+ substs,
+ span: *fn_span,
+ from_hir_call: *from_hir_call,
+ });
+ return;
+ }
+ }
+
+ // If the `const fn` we are trying to call is not const-stable, ensure that we have
+ // the proper feature gate enabled.
+ if let Some(gate) = is_unstable_const_fn(tcx, callee) {
+ trace!(?gate, "calling unstable const fn");
+ if self.span.allows_unstable(gate) {
+ return;
+ }
+
+ // Calling an unstable function *always* requires that the corresponding gate
+ // be enabled, even if the function has `#[rustc_allow_const_fn_unstable(the_gate)]`.
+ if !tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == gate) {
+ self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+ return;
+ }
+
+ // If this crate is not using stability attributes, or the caller is not claiming to be a
+ // stable `const fn`, that is all that is required.
+ if !self.ccx.is_const_stable_const_fn() {
+ trace!("crate not using stability attributes or caller not stably const");
+ return;
+ }
+
+ // Otherwise, we are something const-stable calling a const-unstable fn.
+
+ if super::rustc_allow_const_fn_unstable(tcx, caller, gate) {
+ trace!("rustc_allow_const_fn_unstable gate active");
+ return;
+ }
+
+ self.check_op(ops::FnCallUnstable(callee, Some(gate)));
+ return;
+ }
+
+ // FIXME(ecstaticmorse); For compatibility, we consider `unstable` callees that
+ // have no `rustc_const_stable` attributes to be const-unstable as well. This
+ // should be fixed later.
+ let callee_is_unstable_unmarked = tcx.lookup_const_stability(callee).is_none()
+ && tcx.lookup_stability(callee).map_or(false, |s| s.is_unstable());
+ if callee_is_unstable_unmarked {
+ trace!("callee_is_unstable_unmarked");
+ // We do not use `const` modifiers for intrinsic "functions", as intrinsics are
+ // `extern` functions, and these have no way to get marked `const`. So instead we
+ // use `rustc_const_(un)stable` attributes to mean that the intrinsic is `const`
+ if self.ccx.is_const_stable_const_fn() || is_intrinsic {
+ self.check_op(ops::FnCallUnstable(callee, None));
+ return;
+ }
+ }
+ trace!("permitting call");
+ }
+
+ // Forbid all `Drop` terminators unless the place being dropped is a local with no
+ // projections that cannot be `NeedsNonConstDrop`.
+ TerminatorKind::Drop { place: dropped_place, .. }
+ | TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+ // If we are checking live drops after drop-elaboration, don't emit duplicate
+ // errors here.
+ if super::post_drop_elaboration::checking_enabled(self.ccx) {
+ return;
+ }
+
+ let mut err_span = self.span;
+ let ty_of_dropped_place = dropped_place.ty(self.body, self.tcx).ty;
+
+ let ty_needs_non_const_drop =
+ qualifs::NeedsNonConstDrop::in_any_value_of_ty(self.ccx, ty_of_dropped_place);
+
+ debug!(?ty_of_dropped_place, ?ty_needs_non_const_drop);
+
+ if !ty_needs_non_const_drop {
+ return;
+ }
+
+ let needs_non_const_drop = if let Some(local) = dropped_place.as_local() {
+ // Use the span where the local was declared as the span of the drop error.
+ err_span = self.body.local_decls[local].source_info.span;
+ self.qualifs.needs_non_const_drop(self.ccx, local, location)
+ } else {
+ true
+ };
+
+ if needs_non_const_drop {
+ self.check_op_spanned(
+ ops::LiveDrop { dropped_at: Some(terminator.source_info.span) },
+ err_span,
+ );
+ }
+ }
+
+ TerminatorKind::InlineAsm { .. } => self.check_op(ops::InlineAsm),
+
+ TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => {
+ self.check_op(ops::Generator(hir::GeneratorKind::Gen))
+ }
+
+ TerminatorKind::Abort => {
+ // Cleanup blocks are skipped for const checking (see `visit_basic_block_data`).
+ span_bug!(self.span, "`Abort` terminator outside of cleanup block")
+ }
+
+ TerminatorKind::Assert { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Return
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Unreachable => {}
+ }
+ }
+}
+
+fn place_as_reborrow<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ place: Place<'tcx>,
+) -> Option<PlaceRef<'tcx>> {
+ match place.as_ref().last_projection() {
+ Some((place_base, ProjectionElem::Deref)) => {
+ // A borrow of a `static` also looks like `&(*_1)` in the MIR, but `_1` is a `const`
+ // that points to the allocation for the static. Don't treat these as reborrows.
+ if body.local_decls[place_base.local].is_ref_to_static() {
+ None
+ } else {
+ // Ensure the type being derefed is a reference and not a raw pointer.
+ // This is sufficient to prevent an access to a `static mut` from being marked as a
+ // reborrow, even if the check above were to disappear.
+ let inner_ty = place_base.ty(body, tcx).ty;
+
+ if let ty::Ref(..) = inner_ty.kind() {
+ return Some(place_base);
+ } else {
+ return None;
+ }
+ }
+ }
+ _ => None,
+ }
+}
+
+fn is_int_bool_or_char(ty: Ty<'_>) -> bool {
+ ty.is_bool() || ty.is_integral() || ty.is_char()
+}
+
+fn emit_unstable_in_stable_error(ccx: &ConstCx<'_, '_>, span: Span, gate: Symbol) {
+ let attr_span = ccx.tcx.def_span(ccx.def_id()).shrink_to_lo();
+
+ ccx.tcx.sess.emit_err(UnstableInStable { gate: gate.to_string(), span, attr_span });
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/mod.rs b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
new file mode 100644
index 000000000..25b420bed
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/mod.rs
@@ -0,0 +1,132 @@
+//! Check the bodies of `const`s, `static`s and `const fn`s for illegal operations.
+//!
+//! This module will eventually replace the parts of `qualify_consts.rs` that check whether a local
+//! has interior mutability or needs to be dropped, as well as the visitor that emits errors when
+//! it finds operations that are invalid in a certain context.
+
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::mir;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::Symbol;
+
+pub use self::qualifs::Qualif;
+
+pub mod check;
+mod ops;
+pub mod post_drop_elaboration;
+pub mod qualifs;
+mod resolver;
+
+/// Information about the item currently being const-checked, as well as a reference to the global
+/// context.
+pub struct ConstCx<'mir, 'tcx> {
+ pub body: &'mir mir::Body<'tcx>,
+ pub tcx: TyCtxt<'tcx>,
+ pub param_env: ty::ParamEnv<'tcx>,
+ pub const_kind: Option<hir::ConstContext>,
+}
+
+impl<'mir, 'tcx> ConstCx<'mir, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
+ let def_id = body.source.def_id().expect_local();
+ let param_env = tcx.param_env(def_id);
+ Self::new_with_param_env(tcx, body, param_env)
+ }
+
+ pub fn new_with_param_env(
+ tcx: TyCtxt<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
+ ConstCx { body, tcx, param_env, const_kind }
+ }
+
+ pub fn def_id(&self) -> LocalDefId {
+ self.body.source.def_id().expect_local()
+ }
+
+ /// Returns the kind of const context this `Item` represents (`const`, `static`, etc.).
+ ///
+ /// Panics if this `Item` is not const.
+ pub fn const_kind(&self) -> hir::ConstContext {
+ self.const_kind.expect("`const_kind` must not be called on a non-const fn")
+ }
+
+ pub fn is_const_stable_const_fn(&self) -> bool {
+ self.const_kind == Some(hir::ConstContext::ConstFn)
+ && self.tcx.features().staged_api
+ && is_const_stable_const_fn(self.tcx, self.def_id().to_def_id())
+ }
+
+ fn is_async(&self) -> bool {
+ self.tcx.asyncness(self.def_id()) == hir::IsAsync::Async
+ }
+}
+
+pub fn rustc_allow_const_fn_unstable(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+ feature_gate: Symbol,
+) -> bool {
+ let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(def_id));
+ attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate)
+}
+
+// Returns `true` if the given `const fn` is "const-stable".
+//
+// Panics if the given `DefId` does not refer to a `const fn`.
+//
+// Const-stability is only relevant for `const fn` within a `staged_api` crate. Only "const-stable"
+// functions can be called in a const-context by users of the stable compiler. "const-stable"
+// functions are subject to more stringent restrictions than "const-unstable" functions: They
+// cannot use unstable features and can only call other "const-stable" functions.
+pub fn is_const_stable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ // A default body in a `#[const_trait]` is not const-stable because const
+ // trait fns currently cannot be const-stable. We shouldn't
+ // restrict default bodies to only call const-stable functions.
+ if tcx.is_const_default_method(def_id) {
+ return false;
+ }
+
+ // Const-stability is only relevant for `const fn`.
+ assert!(tcx.is_const_fn_raw(def_id));
+
+ // A function is only const-stable if it has `#[rustc_const_stable]` or it the trait it belongs
+ // to is const-stable.
+ match tcx.lookup_const_stability(def_id) {
+ Some(stab) => stab.is_const_stable(),
+ None if is_parent_const_stable_trait(tcx, def_id) => {
+ // Remove this when `#![feature(const_trait_impl)]` is stabilized,
+ // returning `true` unconditionally.
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ "trait implementations cannot be const stable yet",
+ );
+ true
+ }
+ None => false, // By default, items are not const stable.
+ }
+}
+
+fn is_parent_const_stable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ let local_def_id = def_id.expect_local();
+ let hir_id = tcx.local_def_id_to_hir_id(local_def_id);
+
+ let Some(parent) = tcx.hir().find_parent_node(hir_id) else { return false };
+ let parent_def = tcx.hir().get(parent);
+
+ if !matches!(
+ parent_def,
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
+ ..
+ })
+ ) {
+ return false;
+ }
+
+ tcx.lookup_const_stability(parent.owner).map_or(false, |stab| stab.is_const_stable())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/ops.rs b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
new file mode 100644
index 000000000..338022616
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/ops.rs
@@ -0,0 +1,771 @@
+//! Concrete error types for all operations which may be invalid in a certain const context.
+
+use hir::def_id::LocalDefId;
+use rustc_errors::{
+ error_code, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed,
+};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::{ImplSource, Obligation, ObligationCause};
+use rustc_middle::mir;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{
+ suggest_constraining_type_param, Adt, Closure, DefIdTree, FnDef, FnPtr, Param, TraitPredicate,
+ Ty,
+};
+use rustc_middle::ty::{Binder, BoundConstness, ImplPolarity, TraitRef};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{BytePos, Pos, Span, Symbol};
+use rustc_trait_selection::traits::SelectionContext;
+
+use super::ConstCx;
+use crate::errors::{
+ MutDerefErr, NonConstOpErr, PanicNonStrErr, RawPtrToIntErr, StaticAccessErr,
+ TransientMutBorrowErr, TransientMutBorrowErrRaw,
+};
+use crate::util::{call_kind, CallDesugaringKind, CallKind};
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum Status {
+ Allowed,
+ Unstable(Symbol),
+ Forbidden,
+}
+
+#[derive(Clone, Copy)]
+pub enum DiagnosticImportance {
+ /// An operation that must be removed for const-checking to pass.
+ Primary,
+
+ /// An operation that causes const-checking to fail, but is usually a side-effect of a `Primary` operation elsewhere.
+ Secondary,
+}
+
+/// An operation that is not *always* allowed in a const context.
+pub trait NonConstOp<'tcx>: std::fmt::Debug {
+ /// Returns an enum indicating whether this operation is allowed within the given item.
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Forbidden
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ DiagnosticImportance::Primary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+}
+
+#[derive(Debug)]
+pub struct FloatingPointOp;
+impl<'tcx> NonConstOp<'tcx> for FloatingPointOp {
+ fn status_in_item(&self, ccx: &ConstCx<'_, 'tcx>) -> Status {
+ if ccx.const_kind() == hir::ConstContext::ConstFn {
+ Status::Unstable(sym::const_fn_floating_point_arithmetic)
+ } else {
+ Status::Allowed
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_fn_floating_point_arithmetic,
+ span,
+ &format!("floating point arithmetic is not allowed in {}s", ccx.const_kind()),
+ )
+ }
+}
+
+/// A function call where the callee is a pointer.
+#[derive(Debug)]
+pub struct FnCallIndirect;
+impl<'tcx> NonConstOp<'tcx> for FnCallIndirect {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.struct_span_err(
+ span,
+ &format!("function pointer calls are not allowed in {}s", ccx.const_kind()),
+ )
+ }
+}
+
+/// A function call where the callee is not marked as `const`.
+#[derive(Debug, Clone, Copy)]
+pub struct FnCallNonConst<'tcx> {
+ pub caller: LocalDefId,
+ pub callee: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub span: Span,
+ pub from_hir_call: bool,
+}
+
+impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ _: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let FnCallNonConst { caller, callee, substs, span, from_hir_call } = *self;
+ let ConstCx { tcx, param_env, .. } = *ccx;
+
+ let diag_trait = |err, self_ty: Ty<'_>, trait_id| {
+ let trait_ref = TraitRef::from_method(tcx, trait_id, substs);
+
+ match self_ty.kind() {
+ Param(param_ty) => {
+ debug!(?param_ty);
+ let caller_hir_id = tcx.hir().local_def_id_to_hir_id(caller);
+ if let Some(generics) = tcx.hir().get(caller_hir_id).generics() {
+ let constraint = with_no_trimmed_paths!(format!(
+ "~const {}",
+ trait_ref.print_only_trait_path()
+ ));
+ suggest_constraining_type_param(
+ tcx,
+ generics,
+ err,
+ &param_ty.name.as_str(),
+ &constraint,
+ None,
+ );
+ }
+ }
+ Adt(..) => {
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ param_env,
+ Binder::dummy(TraitPredicate {
+ trait_ref,
+ constness: BoundConstness::NotConst,
+ polarity: ImplPolarity::Positive,
+ }),
+ );
+
+ let implsrc = tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ selcx.select(&obligation)
+ });
+
+ if let Ok(Some(ImplSource::UserDefined(data))) = implsrc {
+ let span = tcx.def_span(data.impl_def_id);
+ err.span_note(span, "impl defined here, but it is not `const`");
+ }
+ }
+ _ => {}
+ }
+ };
+
+ let call_kind = call_kind(tcx, ccx.param_env, callee, substs, span, from_hir_call, None);
+
+ debug!(?call_kind);
+
+ let mut err = match call_kind {
+ CallKind::Normal { desugaring: Some((kind, self_ty)), .. } => {
+ macro_rules! error {
+ ($fmt:literal) => {
+ struct_span_err!(tcx.sess, span, E0015, $fmt, self_ty, ccx.const_kind())
+ };
+ }
+
+ let mut err = match kind {
+ CallDesugaringKind::ForLoopIntoIter => {
+ error!("cannot convert `{}` into an iterator in {}s")
+ }
+ CallDesugaringKind::QuestionBranch => {
+ error!("`?` cannot determine the branch of `{}` in {}s")
+ }
+ CallDesugaringKind::QuestionFromResidual => {
+ error!("`?` cannot convert from residual of `{}` in {}s")
+ }
+ CallDesugaringKind::TryBlockFromOutput => {
+ error!("`try` block cannot convert `{}` to the result in {}s")
+ }
+ };
+
+ diag_trait(&mut err, self_ty, kind.trait_def_id(tcx));
+ err
+ }
+ CallKind::FnCall { fn_trait_id, self_ty } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const closure in {}s",
+ ccx.const_kind(),
+ );
+
+ match self_ty.kind() {
+ FnDef(def_id, ..) => {
+ let span = tcx.def_span(*def_id);
+ if ccx.tcx.is_const_fn_raw(*def_id) {
+ span_bug!(span, "calling const FnDef errored when it shouldn't");
+ }
+
+ err.span_note(span, "function defined here, but it is not `const`");
+ }
+ FnPtr(..) => {
+ err.note(&format!(
+ "function pointers need an RFC before allowed to be called in {}s",
+ ccx.const_kind()
+ ));
+ }
+ Closure(..) => {
+ err.note(&format!(
+ "closures need an RFC before allowed to be called in {}s",
+ ccx.const_kind()
+ ));
+ }
+ _ => {}
+ }
+
+ diag_trait(&mut err, self_ty, fn_trait_id);
+ err
+ }
+ CallKind::Operator { trait_id, self_ty, .. } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const operator in {}s",
+ ccx.const_kind()
+ );
+
+ if Some(trait_id) == ccx.tcx.lang_items().eq_trait() {
+ match (substs[0].unpack(), substs[1].unpack()) {
+ (GenericArgKind::Type(self_ty), GenericArgKind::Type(rhs_ty))
+ if self_ty == rhs_ty
+ && self_ty.is_ref()
+ && self_ty.peel_refs().is_primitive() =>
+ {
+ let mut num_refs = 0;
+ let mut tmp_ty = self_ty;
+ while let rustc_middle::ty::Ref(_, inner_ty, _) = tmp_ty.kind() {
+ num_refs += 1;
+ tmp_ty = *inner_ty;
+ }
+ let deref = "*".repeat(num_refs);
+
+ if let Ok(call_str) = ccx.tcx.sess.source_map().span_to_snippet(span) {
+ if let Some(eq_idx) = call_str.find("==") {
+ if let Some(rhs_idx) =
+ call_str[(eq_idx + 2)..].find(|c: char| !c.is_whitespace())
+ {
+ let rhs_pos =
+ span.lo() + BytePos::from_usize(eq_idx + 2 + rhs_idx);
+ let rhs_span = span.with_lo(rhs_pos).with_hi(rhs_pos);
+ err.multipart_suggestion(
+ "consider dereferencing here",
+ vec![
+ (span.shrink_to_lo(), deref.clone()),
+ (rhs_span, deref),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ diag_trait(&mut err, self_ty, trait_id);
+ err
+ }
+ CallKind::DerefCoercion { deref_target, deref_target_ty, self_ty } => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0015,
+ "cannot perform deref coercion on `{}` in {}s",
+ self_ty,
+ ccx.const_kind()
+ );
+
+ err.note(&format!("attempting to deref into `{}`", deref_target_ty));
+
+ // Check first whether the source is accessible (issue #87060)
+ if tcx.sess.source_map().is_span_accessible(deref_target) {
+ err.span_note(deref_target, "deref defined here");
+ }
+
+ diag_trait(&mut err, self_ty, tcx.lang_items().deref_trait().unwrap());
+ err
+ }
+ _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentV1Methods) => {
+ struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const formatting macro in {}s",
+ ccx.const_kind(),
+ )
+ }
+ _ => struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "cannot call non-const fn `{}` in {}s",
+ ccx.tcx.def_path_str_with_substs(callee, substs),
+ ccx.const_kind(),
+ ),
+ };
+
+ err.note(&format!(
+ "calls in {}s are limited to constant functions, \
+ tuple structs and tuple variants",
+ ccx.const_kind(),
+ ));
+
+ err
+ }
+}
+
+/// A call to an `#[unstable]` const fn or `#[rustc_const_unstable]` function.
+///
+/// Contains the name of the feature that would allow the use of this function.
+#[derive(Debug)]
+pub struct FnCallUnstable(pub DefId, pub Option<Symbol>);
+
+impl<'tcx> NonConstOp<'tcx> for FnCallUnstable {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let FnCallUnstable(def_id, feature) = *self;
+
+ let mut err = ccx.tcx.sess.struct_span_err(
+ span,
+ &format!("`{}` is not yet stable as a const fn", ccx.tcx.def_path_str(def_id)),
+ );
+
+ if ccx.is_const_stable_const_fn() {
+ err.help("const-stable functions can only call other const-stable functions");
+ } else if ccx.tcx.sess.is_nightly_build() {
+ if let Some(feature) = feature {
+ err.help(&format!(
+ "add `#![feature({})]` to the crate attributes to enable",
+ feature
+ ));
+ }
+ }
+
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct Generator(pub hir::GeneratorKind);
+impl<'tcx> NonConstOp<'tcx> for Generator {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+ Status::Unstable(sym::const_async_blocks)
+ } else {
+ Status::Forbidden
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let msg = format!("{}s are not allowed in {}s", self.0, ccx.const_kind());
+ if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
+ feature_err(&ccx.tcx.sess.parse_sess, sym::const_async_blocks, span, &msg)
+ } else {
+ ccx.tcx.sess.struct_span_err(span, &msg)
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct HeapAllocation;
+impl<'tcx> NonConstOp<'tcx> for HeapAllocation {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0010,
+ "allocations are not allowed in {}s",
+ ccx.const_kind()
+ );
+ err.span_label(span, format!("allocation not allowed in {}s", ccx.const_kind()));
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "The value of statics and constants must be known at compile time, \
+ and they live for the entire lifetime of a program. Creating a boxed \
+ value allocates memory on the heap at runtime, and therefore cannot \
+ be done at compile time.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct InlineAsm;
+impl<'tcx> NonConstOp<'tcx> for InlineAsm {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0015,
+ "inline assembly is not allowed in {}s",
+ ccx.const_kind()
+ )
+ }
+}
+
+#[derive(Debug)]
+pub struct LiveDrop {
+ pub dropped_at: Option<Span>,
+}
+impl<'tcx> NonConstOp<'tcx> for LiveDrop {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0493,
+ "destructors cannot be evaluated at compile-time"
+ );
+ err.span_label(span, format!("{}s cannot evaluate destructors", ccx.const_kind()));
+ if let Some(span) = self.dropped_at {
+ err.span_label(span, "value is dropped here");
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow never escapes to
+/// the final value of the constant.
+pub struct TransientCellBorrow;
+impl<'tcx> NonConstOp<'tcx> for TransientCellBorrow {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_refs_to_cell)
+ }
+ fn importance(&self) -> DiagnosticImportance {
+ // The cases that cannot possibly work will already emit a `CellBorrow`, so we should
+ // not additionally emit a feature gate error if activating the feature gate won't work.
+ DiagnosticImportance::Secondary
+ }
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_refs_to_cell,
+ span,
+ "cannot borrow here, since the borrowed element may contain interior mutability",
+ )
+ }
+}
+
+#[derive(Debug)]
+/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow might escape to
+/// the final value of the constant, and thus we cannot allow this (for now). We may allow
+/// it in the future for static items.
+pub struct CellBorrow;
+impl<'tcx> NonConstOp<'tcx> for CellBorrow {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0492,
+ "{}s cannot refer to interior mutable data",
+ ccx.const_kind(),
+ );
+ err.span_label(
+ span,
+ "this borrow of an interior mutable value may end up in the final value",
+ );
+ if let hir::ConstContext::Static(_) = ccx.const_kind() {
+ err.help(
+ "to fix this, the value can be extracted to a separate \
+ `static` item and then referenced",
+ );
+ }
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "A constant containing interior mutable data behind a reference can allow you
+ to modify that data. This would make multiple uses of a constant to be able to
+ see different values and allow circumventing the `Send` and `Sync` requirements
+ for shared mutable data, which is unsound.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+/// This op is for `&mut` borrows in the trailing expression of a constant
+/// which uses the "enclosing scopes rule" to leak its locals into anonymous
+/// static or const items.
+pub struct MutBorrow(pub hir::BorrowKind);
+
+impl<'tcx> NonConstOp<'tcx> for MutBorrow {
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Forbidden
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ // If there were primary errors (like non-const function calls), do not emit further
+ // errors about mutable references.
+ DiagnosticImportance::Secondary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let raw = match self.0 {
+ hir::BorrowKind::Raw => "raw ",
+ hir::BorrowKind::Ref => "",
+ };
+
+ let mut err = struct_span_err!(
+ ccx.tcx.sess,
+ span,
+ E0764,
+ "{}mutable references are not allowed in the final value of {}s",
+ raw,
+ ccx.const_kind(),
+ );
+
+ if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "References in statics and constants may only refer \
+ to immutable values.\n\n\
+ Statics are shared everywhere, and if they refer to \
+ mutable data one might violate memory safety since \
+ holding multiple mutable references to shared data \
+ is not allowed.\n\n\
+ If you really want global mutable state, try using \
+ static mut or a global UnsafeCell.",
+ );
+ }
+ err
+ }
+}
+
+#[derive(Debug)]
+pub struct TransientMutBorrow(pub hir::BorrowKind);
+
+impl<'tcx> NonConstOp<'tcx> for TransientMutBorrow {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let kind = ccx.const_kind();
+ match self.0 {
+ hir::BorrowKind::Raw => ccx
+ .tcx
+ .sess
+ .create_feature_err(TransientMutBorrowErrRaw { span, kind }, sym::const_mut_refs),
+ hir::BorrowKind::Ref => ccx
+ .tcx
+ .sess
+ .create_feature_err(TransientMutBorrowErr { span, kind }, sym::const_mut_refs),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct MutDeref;
+impl<'tcx> NonConstOp<'tcx> for MutDeref {
+ fn status_in_item(&self, _: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ // Usually a side-effect of a `TransientMutBorrow` somewhere.
+ DiagnosticImportance::Secondary
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx
+ .sess
+ .create_feature_err(MutDerefErr { span, kind: ccx.const_kind() }, sym::const_mut_refs)
+ }
+}
+
+/// A call to a `panic()` lang item where the first argument is _not_ a `&str`.
+#[derive(Debug)]
+pub struct PanicNonStr;
+impl<'tcx> NonConstOp<'tcx> for PanicNonStr {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(PanicNonStrErr { span })
+ }
+}
+
+/// Comparing raw pointers for equality.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrComparison;
+impl<'tcx> NonConstOp<'tcx> for RawPtrComparison {
+ fn build_error(
+ &self,
+ _: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ span_bug!(span, "raw ptr comparison should already be caught in the trait system");
+ }
+}
+
+#[derive(Debug)]
+pub struct RawMutPtrDeref;
+impl<'tcx> NonConstOp<'tcx> for RawMutPtrDeref {
+ fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_mut_refs,
+ span,
+ &format!("dereferencing raw mutable pointers in {}s is unstable", ccx.const_kind(),),
+ )
+ }
+}
+
+/// Casting raw pointer or function pointer to an integer.
+/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
+/// allocation base addresses that are not known at compile-time.
+#[derive(Debug)]
+pub struct RawPtrToIntCast;
+impl<'tcx> NonConstOp<'tcx> for RawPtrToIntCast {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(RawPtrToIntErr { span })
+ }
+}
+
+/// An access to a (non-thread-local) `static`.
+#[derive(Debug)]
+pub struct StaticAccess;
+impl<'tcx> NonConstOp<'tcx> for StaticAccess {
+ fn status_in_item(&self, ccx: &ConstCx<'_, 'tcx>) -> Status {
+ if let hir::ConstContext::Static(_) = ccx.const_kind() {
+ Status::Allowed
+ } else {
+ Status::Forbidden
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(StaticAccessErr {
+ span,
+ kind: ccx.const_kind(),
+ teach: ccx.tcx.sess.teach(&error_code!(E0013)).then_some(()),
+ })
+ }
+}
+
+/// An access to a thread-local `static`.
+#[derive(Debug)]
+pub struct ThreadLocalAccess;
+impl<'tcx> NonConstOp<'tcx> for ThreadLocalAccess {
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ ccx.tcx.sess.create_err(NonConstOpErr { span })
+ }
+}
+
+// Types that cannot appear in the signature or locals of a `const fn`.
+pub mod ty {
+ use super::*;
+
+ #[derive(Debug)]
+ pub struct MutRef(pub mir::LocalKind);
+ impl<'tcx> NonConstOp<'tcx> for MutRef {
+ fn status_in_item(&self, _ccx: &ConstCx<'_, 'tcx>) -> Status {
+ Status::Unstable(sym::const_mut_refs)
+ }
+
+ fn importance(&self) -> DiagnosticImportance {
+ match self.0 {
+ mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
+ mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
+ DiagnosticImportance::Primary
+ }
+ }
+ }
+
+ fn build_error(
+ &self,
+ ccx: &ConstCx<'_, 'tcx>,
+ span: Span,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ feature_err(
+ &ccx.tcx.sess.parse_sess,
+ sym::const_mut_refs,
+ span,
+ &format!("mutable references are not allowed in {}s", ccx.const_kind()),
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
new file mode 100644
index 000000000..4e210f663
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/post_drop_elaboration.rs
@@ -0,0 +1,123 @@
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::{symbol::sym, Span};
+
+use super::check::Qualifs;
+use super::ops::{self, NonConstOp};
+use super::qualifs::{NeedsNonConstDrop, Qualif};
+use super::ConstCx;
+
+/// Returns `true` if we should use the more precise live drop checker that runs after drop
+/// elaboration.
+pub fn checking_enabled(ccx: &ConstCx<'_, '_>) -> bool {
+ // Const-stable functions must always use the stable live drop checker.
+ if ccx.is_const_stable_const_fn() {
+ return false;
+ }
+
+ ccx.tcx.features().const_precise_live_drops
+}
+
+/// Look for live drops in a const context.
+///
+/// This is separate from the rest of the const checking logic because it must run after drop
+/// elaboration.
+pub fn check_live_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
+ let def_id = body.source.def_id().expect_local();
+ let const_kind = tcx.hir().body_const_context(def_id);
+ if const_kind.is_none() {
+ return;
+ }
+
+ if tcx.has_attr(def_id.to_def_id(), sym::rustc_do_not_const_check) {
+ return;
+ }
+
+ let ccx = ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def_id) };
+ if !checking_enabled(&ccx) {
+ return;
+ }
+
+ let mut visitor = CheckLiveDrops { ccx: &ccx, qualifs: Qualifs::default() };
+
+ visitor.visit_body(body);
+}
+
+struct CheckLiveDrops<'mir, 'tcx> {
+ ccx: &'mir ConstCx<'mir, 'tcx>,
+ qualifs: Qualifs<'mir, 'tcx>,
+}
+
+// So we can access `body` and `tcx`.
+impl<'mir, 'tcx> std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
+ type Target = ConstCx<'mir, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+impl CheckLiveDrops<'_, '_> {
+ fn check_live_drop(&self, span: Span) {
+ ops::LiveDrop { dropped_at: None }.build_error(self.ccx, span).emit();
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for CheckLiveDrops<'_, 'tcx> {
+ fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &mir::BasicBlockData<'tcx>) {
+ trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
+
+ // Ignore drop terminators in cleanup blocks.
+ if block.is_cleanup {
+ return;
+ }
+
+ self.super_basic_block_data(bb, block);
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
+
+ match &terminator.kind {
+ mir::TerminatorKind::Drop { place: dropped_place, .. }
+ | mir::TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+ let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
+ if !NeedsNonConstDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
+ // Instead of throwing a bug, we just return here. This is because we have to
+ // run custom `const Drop` impls.
+ return;
+ }
+
+ if dropped_place.is_indirect() {
+ self.check_live_drop(terminator.source_info.span);
+ return;
+ }
+
+ // Drop elaboration is not precise enough to accept code like
+ // `src/test/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
+ // initialized with `None` and never changed, it still emits drop glue.
+ // Hence we additionally check the qualifs here to allow more code to pass.
+ if self.qualifs.needs_non_const_drop(self.ccx, dropped_place.local, location) {
+ // Use the span where the dropped local was declared for the error.
+ let span = self.body.local_decls[dropped_place.local].source_info.span;
+ self.check_live_drop(span);
+ }
+ }
+
+ mir::TerminatorKind::Abort
+ | mir::TerminatorKind::Call { .. }
+ | mir::TerminatorKind::Assert { .. }
+ | mir::TerminatorKind::FalseEdge { .. }
+ | mir::TerminatorKind::FalseUnwind { .. }
+ | mir::TerminatorKind::GeneratorDrop
+ | mir::TerminatorKind::Goto { .. }
+ | mir::TerminatorKind::InlineAsm { .. }
+ | mir::TerminatorKind::Resume
+ | mir::TerminatorKind::Return
+ | mir::TerminatorKind::SwitchInt { .. }
+ | mir::TerminatorKind::Unreachable
+ | mir::TerminatorKind::Yield { .. } => {}
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
new file mode 100644
index 000000000..c8a63c9c3
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
@@ -0,0 +1,384 @@
+//! Structural const qualification.
+//!
+//! See the `Qualif` trait for more info.
+
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::LangItem;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::TraitEngine;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits::{
+ self, ImplSource, Obligation, ObligationCause, SelectionContext, TraitEngineExt,
+};
+
+use super::ConstCx;
+
+pub fn in_any_value_of_ty<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ ty: Ty<'tcx>,
+ tainted_by_errors: Option<ErrorGuaranteed>,
+) -> ConstQualifs {
+ ConstQualifs {
+ has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
+ needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
+ needs_non_const_drop: NeedsNonConstDrop::in_any_value_of_ty(cx, ty),
+ custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
+ tainted_by_errors,
+ }
+}
+
+/// A "qualif"(-ication) is a way to look for something "bad" in the MIR that would disqualify some
+/// code for promotion or prevent it from evaluating at compile time.
+///
+/// Normally, we would determine what qualifications apply to each type and error when an illegal
+/// operation is performed on such a type. However, this was found to be too imprecise, especially
+/// in the presence of `enum`s. If only a single variant of an enum has a certain qualification, we
+/// needn't reject code unless it actually constructs and operates on the qualified variant.
+///
+/// To accomplish this, const-checking and promotion use a value-based analysis (as opposed to a
+/// type-based one). Qualifications propagate structurally across variables: If a local (or a
+/// projection of a local) is assigned a qualified value, that local itself becomes qualified.
+pub trait Qualif {
+ /// The name of the file used to debug the dataflow analysis that computes this qualif.
+ const ANALYSIS_NAME: &'static str;
+
+ /// Whether this `Qualif` is cleared when a local is moved from.
+ const IS_CLEARED_ON_MOVE: bool = false;
+
+ /// Whether this `Qualif` might be evaluated after the promotion and can encounter a promoted.
+ const ALLOW_PROMOTED: bool = false;
+
+ /// Extracts the field of `ConstQualifs` that corresponds to this `Qualif`.
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool;
+
+ /// Returns `true` if *any* value of the given type could possibly have this `Qualif`.
+ ///
+ /// This function determines `Qualif`s when we cannot do a value-based analysis. Since qualif
+ /// propagation is context-insensitive, this includes function arguments and values returned
+ /// from a call to another function.
+ ///
+ /// It also determines the `Qualif`s for primitive types.
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool;
+
+ /// Returns `true` if this `Qualif` is inherent to the given struct or enum.
+ ///
+ /// By default, `Qualif`s propagate into ADTs in a structural way: An ADT only becomes
+ /// qualified if part of it is assigned a value with that `Qualif`. However, some ADTs *always*
+ /// have a certain `Qualif`, regardless of whether their fields have it. For example, a type
+ /// with a custom `Drop` impl is inherently `NeedsDrop`.
+ ///
+ /// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> bool;
+}
+
+/// Constant containing interior mutability (`UnsafeCell<T>`).
+/// This must be ruled out to make sure that evaluating the constant at compile-time
+/// and at *any point* during the run-time would produce the same result. In particular,
+/// promotion of temporaries must not change program behavior; if the promoted could be
+/// written to, that would be a problem.
+pub struct HasMutInterior;
+
+impl Qualif for HasMutInterior {
+ const ANALYSIS_NAME: &'static str = "flow_has_mut_interior";
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.has_mut_interior
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ !ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
+ }
+
+ fn in_adt_inherently<'tcx>(
+ _cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ // Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
+ // It arises structurally for all other types.
+ adt.is_unsafe_cell()
+ }
+}
+
+/// Constant containing an ADT that implements `Drop`.
+/// This must be ruled out because implicit promotion would remove side-effects
+/// that occur as part of dropping that value. N.B., the implicit promotion has
+/// to reject const Drop implementations because even if side-effects are ruled
+/// out through other means, the execution of the drop could diverge.
+pub struct NeedsDrop;
+
+impl Qualif for NeedsDrop {
+ const ANALYSIS_NAME: &'static str = "flow_needs_drop";
+ const IS_CLEARED_ON_MOVE: bool = true;
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.needs_drop
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ ty.needs_drop(cx.tcx, cx.param_env)
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ adt.has_dtor(cx.tcx)
+ }
+}
+
+/// Constant containing an ADT that implements non-const `Drop`.
+/// This must be ruled out because we cannot run `Drop` during compile-time.
+pub struct NeedsNonConstDrop;
+
+impl Qualif for NeedsNonConstDrop {
+ const ANALYSIS_NAME: &'static str = "flow_needs_nonconst_drop";
+ const IS_CLEARED_ON_MOVE: bool = true;
+ const ALLOW_PROMOTED: bool = true;
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.needs_non_const_drop
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ // Avoid selecting for simple cases, such as builtin types.
+ if ty::util::is_trivially_const_drop(ty) {
+ return false;
+ }
+
+ let destruct = cx.tcx.require_lang_item(LangItem::Destruct, None);
+
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ cx.param_env,
+ ty::Binder::dummy(ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ def_id: destruct,
+ substs: cx.tcx.mk_substs_trait(ty, &[]),
+ },
+ constness: ty::BoundConstness::ConstIfConst,
+ polarity: ty::ImplPolarity::Positive,
+ }),
+ );
+
+ cx.tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ let Some(impl_src) = selcx.select(&obligation).ok().flatten() else {
+ // If we couldn't select a const destruct candidate, then it's bad
+ return true;
+ };
+
+ if !matches!(
+ impl_src,
+ ImplSource::ConstDestruct(_)
+ | ImplSource::Param(_, ty::BoundConstness::ConstIfConst)
+ ) {
+ // If our const destruct candidate is not ConstDestruct or implied by the param env,
+ // then it's bad
+ return true;
+ }
+
+ if impl_src.borrow_nested_obligations().is_empty() {
+ return false;
+ }
+
+ // If we successfully found one, then select all of the predicates
+ // implied by our const drop impl.
+ let mut fcx = <dyn TraitEngine<'tcx>>::new(cx.tcx);
+ for nested in impl_src.nested_obligations() {
+ fcx.register_predicate_obligation(&infcx, nested);
+ }
+
+ // If we had any errors, then it's bad
+ !fcx.select_all_or_error(&infcx).is_empty()
+ })
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ _: SubstsRef<'tcx>,
+ ) -> bool {
+ adt.has_non_const_dtor(cx.tcx)
+ }
+}
+
+/// A constant that cannot be used as part of a pattern in a `match` expression.
+pub struct CustomEq;
+
+impl Qualif for CustomEq {
+ const ANALYSIS_NAME: &'static str = "flow_custom_eq";
+
+ fn in_qualifs(qualifs: &ConstQualifs) -> bool {
+ qualifs.custom_eq
+ }
+
+ fn in_any_value_of_ty<'tcx>(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
+ // If *any* component of a composite data type does not implement `Structural{Partial,}Eq`,
+ // we know that at least some values of that type are not structural-match. I say "some"
+ // because that component may be part of an enum variant (e.g.,
+ // `Option::<NonStructuralMatchTy>::Some`), in which case some values of this type may be
+ // structural-match (`Option::None`).
+ traits::search_for_structural_match_violation(cx.body.span, cx.tcx, ty).is_some()
+ }
+
+ fn in_adt_inherently<'tcx>(
+ cx: &ConstCx<'_, 'tcx>,
+ adt: AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> bool {
+ let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
+ !ty.is_structural_eq_shallow(cx.tcx)
+ }
+}
+
+// FIXME: Use `mir::visit::Visitor` for the `in_*` functions if/when it supports early return.
+
+/// Returns `true` if this `Rvalue` contains qualif `Q`.
+pub fn in_rvalue<'tcx, Q, F>(
+ cx: &ConstCx<'_, 'tcx>,
+ in_local: &mut F,
+ rvalue: &Rvalue<'tcx>,
+) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ match rvalue {
+ Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
+ Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
+ }
+
+ Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+ in_place::<Q, _>(cx, in_local, place.as_ref())
+ }
+
+ Rvalue::CopyForDeref(place) => in_place::<Q, _>(cx, in_local, place.as_ref()),
+
+ Rvalue::Use(operand)
+ | Rvalue::Repeat(operand, _)
+ | Rvalue::UnaryOp(_, operand)
+ | Rvalue::Cast(_, operand, _)
+ | Rvalue::ShallowInitBox(operand, _) => in_operand::<Q, _>(cx, in_local, operand),
+
+ Rvalue::BinaryOp(_, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(_, box (lhs, rhs)) => {
+ in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
+ }
+
+ Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
+ // Special-case reborrows to be more like a copy of the reference.
+ if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
+ let base_ty = place_base.ty(cx.body, cx.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ return in_place::<Q, _>(cx, in_local, place_base);
+ }
+ }
+
+ in_place::<Q, _>(cx, in_local, place.as_ref())
+ }
+
+ Rvalue::Aggregate(kind, operands) => {
+ // Return early if we know that the struct or enum being constructed is always
+ // qualified.
+ if let AggregateKind::Adt(adt_did, _, substs, ..) = **kind {
+ let def = cx.tcx.adt_def(adt_did);
+ if Q::in_adt_inherently(cx, def, substs) {
+ return true;
+ }
+ if def.is_union() && Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) {
+ return true;
+ }
+ }
+
+ // Otherwise, proceed structurally...
+ operands.iter().any(|o| in_operand::<Q, _>(cx, in_local, o))
+ }
+ }
+}
+
+/// Returns `true` if this `Place` contains qualif `Q`.
+pub fn in_place<'tcx, Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, place: PlaceRef<'tcx>) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ let mut place = place;
+ while let Some((place_base, elem)) = place.last_projection() {
+ match elem {
+ ProjectionElem::Index(index) if in_local(index) => return true,
+
+ ProjectionElem::Deref
+ | ProjectionElem::Field(_, _)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. }
+ | ProjectionElem::Downcast(_, _)
+ | ProjectionElem::Index(_) => {}
+ }
+
+ let base_ty = place_base.ty(cx.body, cx.tcx);
+ let proj_ty = base_ty.projection_ty(cx.tcx, elem).ty;
+ if !Q::in_any_value_of_ty(cx, proj_ty) {
+ return false;
+ }
+
+ place = place_base;
+ }
+
+ assert!(place.projection.is_empty());
+ in_local(place.local)
+}
+
+/// Returns `true` if this `Operand` contains qualif `Q`.
+pub fn in_operand<'tcx, Q, F>(
+ cx: &ConstCx<'_, 'tcx>,
+ in_local: &mut F,
+ operand: &Operand<'tcx>,
+) -> bool
+where
+ Q: Qualif,
+ F: FnMut(Local) -> bool,
+{
+ let constant = match operand {
+ Operand::Copy(place) | Operand::Move(place) => {
+ return in_place::<Q, _>(cx, in_local, place.as_ref());
+ }
+
+ Operand::Constant(c) => c,
+ };
+
+ // Check the qualifs of the value of `const` items.
+ if let Some(ct) = constant.literal.const_for_ty() {
+ if let ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs: _, promoted }) = ct.kind()
+ {
+ // Use qualifs of the type for the promoted. Promoteds in MIR body should be possible
+ // only for `NeedsNonConstDrop` with precise drop checking. This is the only const
+ // check performed after the promotion. Verify that with an assertion.
+ assert!(promoted.is_none() || Q::ALLOW_PROMOTED);
+ // Don't peek inside trait associated constants.
+ if promoted.is_none() && cx.tcx.trait_of_item(def.did).is_none() {
+ let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
+ cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
+ } else {
+ cx.tcx.at(constant.span).mir_const_qualif(def.did)
+ };
+
+ if !Q::in_qualifs(&qualifs) {
+ return false;
+ }
+
+ // Just in case the type is more specific than
+ // the definition, e.g., impl associated const
+ // with type parameters, take it into account.
+ }
+ }
+ }
+ // Otherwise use the qualifs of the type.
+ Q::in_any_value_of_ty(cx, constant.literal.ty())
+}
diff --git a/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
new file mode 100644
index 000000000..60c1e4950
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
@@ -0,0 +1,384 @@
+//! Propagate `Qualif`s between locals and query the results.
+//!
+//! This contains the dataflow analysis used to track `Qualif`s on complex control-flow graphs.
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{self, BasicBlock, Local, Location, Statement, StatementKind};
+use rustc_mir_dataflow::fmt::DebugWithContext;
+use rustc_mir_dataflow::JoinSemiLattice;
+use rustc_mir_dataflow::{Analysis, AnalysisDomain, CallReturnPlaces};
+use rustc_span::DUMMY_SP;
+
+use std::fmt;
+use std::marker::PhantomData;
+
+use super::{qualifs, ConstCx, Qualif};
+
+/// A `Visitor` that propagates qualifs between locals. This defines the transfer function of
+/// `FlowSensitiveAnalysis`.
+///
+/// To account for indirect assignments, data flow conservatively assumes that local becomes
+/// qualified immediately after it is borrowed or its address escapes. The borrow must allow for
+/// mutation, which includes shared borrows of places with interior mutability. The type of
+/// borrowed place must contain the qualif.
+struct TransferFunction<'a, 'mir, 'tcx, Q> {
+ ccx: &'a ConstCx<'mir, 'tcx>,
+ state: &'a mut State,
+ _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> TransferFunction<'a, 'mir, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn new(ccx: &'a ConstCx<'mir, 'tcx>, state: &'a mut State) -> Self {
+ TransferFunction { ccx, state, _qualif: PhantomData }
+ }
+
+ fn initialize_state(&mut self) {
+ self.state.qualif.clear();
+ self.state.borrow.clear();
+
+ for arg in self.ccx.body.args_iter() {
+ let arg_ty = self.ccx.body.local_decls[arg].ty;
+ if Q::in_any_value_of_ty(self.ccx, arg_ty) {
+ self.state.qualif.insert(arg);
+ }
+ }
+ }
+
+ fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, mut value: bool) {
+ debug_assert!(!place.is_indirect());
+
+ if !value {
+ for (base, _elem) in place.iter_projections() {
+ let base_ty = base.ty(self.ccx.body, self.ccx.tcx);
+ if base_ty.ty.is_union() && Q::in_any_value_of_ty(self.ccx, base_ty.ty) {
+ value = true;
+ break;
+ }
+ }
+ }
+
+ match (value, place.as_ref()) {
+ (true, mir::PlaceRef { local, .. }) => {
+ self.state.qualif.insert(local);
+ }
+
+ // For now, we do not clear the qualif if a local is overwritten in full by
+ // an unqualified rvalue (e.g. `y = 5`). This is to be consistent
+ // with aggregates where we overwrite all fields with assignments, which would not
+ // get this feature.
+ (false, mir::PlaceRef { local: _, projection: &[] }) => {
+ // self.state.qualif.remove(*local);
+ }
+
+ _ => {}
+ }
+ }
+
+ fn apply_call_return_effect(
+ &mut self,
+ _block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // We cannot reason about another function's internals, so use conservative type-based
+ // qualification for the result of a function call.
+ let return_ty = place.ty(self.ccx.body, self.ccx.tcx).ty;
+ let qualif = Q::in_any_value_of_ty(self.ccx, return_ty);
+
+ if !place.is_indirect() {
+ self.assign_qualif_direct(&place, qualif);
+ }
+ });
+ }
+
+ fn address_of_allows_mutation(&self, _mt: mir::Mutability, _place: mir::Place<'tcx>) -> bool {
+ // Exact set of permissions granted by AddressOf is undecided. Conservatively assume that
+ // it might allow mutation until resolution of #56604.
+ true
+ }
+
+ fn ref_allows_mutation(&self, kind: mir::BorrowKind, place: mir::Place<'tcx>) -> bool {
+ match kind {
+ mir::BorrowKind::Mut { .. } => true,
+ mir::BorrowKind::Shared | mir::BorrowKind::Shallow | mir::BorrowKind::Unique => {
+ self.shared_borrow_allows_mutation(place)
+ }
+ }
+ }
+
+ /// `&` only allow mutation if the borrowed place is `!Freeze`.
+ ///
+ /// This assumes that it is UB to take the address of a struct field whose type is
+ /// `Freeze`, then use pointer arithmetic to derive a pointer to a *different* field of
+ /// that same struct whose type is `!Freeze`. If we decide that this is not UB, we will
+ /// have to check the type of the borrowed **local** instead of the borrowed **place**
+ /// below. See [rust-lang/unsafe-code-guidelines#134].
+ ///
+ /// [rust-lang/unsafe-code-guidelines#134]: https://github.com/rust-lang/unsafe-code-guidelines/issues/134
+ fn shared_borrow_allows_mutation(&self, place: mir::Place<'tcx>) -> bool {
+ !place
+ .ty(self.ccx.body, self.ccx.tcx)
+ .ty
+ .is_freeze(self.ccx.tcx.at(DUMMY_SP), self.ccx.param_env)
+ }
+}
+
+impl<'tcx, Q> Visitor<'tcx> for TransferFunction<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
+ self.super_operand(operand, location);
+
+ if !Q::IS_CLEARED_ON_MOVE {
+ return;
+ }
+
+ // If a local with no projections is moved from (e.g. `x` in `y = x`), record that
+ // it no longer needs to be dropped.
+ if let mir::Operand::Move(place) = operand {
+ if let Some(local) = place.as_local() {
+ // For backward compatibility with the MaybeMutBorrowedLocals used in an earlier
+ // implementation we retain qualif if a local had been borrowed before. This might
+ // not be strictly necessary since the local is no longer initialized.
+ if !self.state.borrow.contains(local) {
+ self.state.qualif.remove(local);
+ }
+ }
+ }
+ }
+
+ fn visit_assign(
+ &mut self,
+ place: &mir::Place<'tcx>,
+ rvalue: &mir::Rvalue<'tcx>,
+ location: Location,
+ ) {
+ let qualif =
+ qualifs::in_rvalue::<Q, _>(self.ccx, &mut |l| self.state.qualif.contains(l), rvalue);
+ if !place.is_indirect() {
+ self.assign_qualif_direct(place, qualif);
+ }
+
+ // We need to assign qualifs to the left-hand side before visiting `rvalue` since
+ // qualifs can be cleared on move.
+ self.super_assign(place, rvalue, location);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+
+ match rvalue {
+ mir::Rvalue::AddressOf(mt, borrowed_place) => {
+ if !borrowed_place.is_indirect()
+ && self.address_of_allows_mutation(*mt, *borrowed_place)
+ {
+ let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+ if Q::in_any_value_of_ty(self.ccx, place_ty) {
+ self.state.qualif.insert(borrowed_place.local);
+ self.state.borrow.insert(borrowed_place.local);
+ }
+ }
+ }
+
+ mir::Rvalue::Ref(_, kind, borrowed_place) => {
+ if !borrowed_place.is_indirect() && self.ref_allows_mutation(*kind, *borrowed_place)
+ {
+ let place_ty = borrowed_place.ty(self.ccx.body, self.ccx.tcx).ty;
+ if Q::in_any_value_of_ty(self.ccx, place_ty) {
+ self.state.qualif.insert(borrowed_place.local);
+ self.state.borrow.insert(borrowed_place.local);
+ }
+ }
+ }
+
+ mir::Rvalue::Cast(..)
+ | mir::Rvalue::ShallowInitBox(..)
+ | mir::Rvalue::Use(..)
+ | mir::Rvalue::CopyForDeref(..)
+ | mir::Rvalue::ThreadLocalRef(..)
+ | mir::Rvalue::Repeat(..)
+ | mir::Rvalue::Len(..)
+ | mir::Rvalue::BinaryOp(..)
+ | mir::Rvalue::CheckedBinaryOp(..)
+ | mir::Rvalue::NullaryOp(..)
+ | mir::Rvalue::UnaryOp(..)
+ | mir::Rvalue::Discriminant(..)
+ | mir::Rvalue::Aggregate(..) => {}
+ }
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match statement.kind {
+ StatementKind::StorageDead(local) => {
+ self.state.qualif.remove(local);
+ self.state.borrow.remove(local);
+ }
+ _ => self.super_statement(statement, location),
+ }
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ // The effect of assignment to the return place in `TerminatorKind::Call` is not applied
+ // here; that occurs in `apply_call_return_effect`.
+
+ if let mir::TerminatorKind::DropAndReplace { value, place, .. } = &terminator.kind {
+ let qualif = qualifs::in_operand::<Q, _>(
+ self.ccx,
+ &mut |l| self.state.qualif.contains(l),
+ value,
+ );
+
+ if !place.is_indirect() {
+ self.assign_qualif_direct(place, qualif);
+ }
+ }
+
+ // We ignore borrow on drop because custom drop impls are not allowed in consts.
+ // FIXME: Reconsider if accounting for borrows in drops is necessary for const drop.
+
+ // We need to assign qualifs to the dropped location before visiting the operand that
+ // replaces it since qualifs can be cleared on move.
+ self.super_terminator(terminator, location);
+ }
+}
+
+/// The dataflow analysis used to propagate qualifs on arbitrary CFGs.
+pub(super) struct FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q> {
+ ccx: &'a ConstCx<'mir, 'tcx>,
+ _qualif: PhantomData<Q>,
+}
+
+impl<'a, 'mir, 'tcx, Q> FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ pub(super) fn new(_: Q, ccx: &'a ConstCx<'mir, 'tcx>) -> Self {
+ FlowSensitiveAnalysis { ccx, _qualif: PhantomData }
+ }
+
+ fn transfer_function(&self, state: &'a mut State) -> TransferFunction<'a, 'mir, 'tcx, Q> {
+ TransferFunction::<Q>::new(self.ccx, state)
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub(super) struct State {
+ /// Describes whether a local contains qualif.
+ pub qualif: BitSet<Local>,
+ /// Describes whether a local's address escaped and it might become qualified as a result an
+ /// indirect mutation.
+ pub borrow: BitSet<Local>,
+}
+
+impl Clone for State {
+ fn clone(&self) -> Self {
+ State { qualif: self.qualif.clone(), borrow: self.borrow.clone() }
+ }
+
+ // Data flow engine when possible uses `clone_from` for domain values.
+ // Providing an implementation will avoid some intermediate memory allocations.
+ fn clone_from(&mut self, other: &Self) {
+ self.qualif.clone_from(&other.qualif);
+ self.borrow.clone_from(&other.borrow);
+ }
+}
+
+impl State {
+ #[inline]
+ pub(super) fn contains(&self, local: Local) -> bool {
+ self.qualif.contains(local)
+ }
+}
+
+impl<C> DebugWithContext<C> for State {
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("qualif: ")?;
+ self.qualif.fmt_with(ctxt, f)?;
+ f.write_str(" borrow: ")?;
+ self.borrow.fmt_with(ctxt, f)?;
+ Ok(())
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self == old {
+ return Ok(());
+ }
+
+ if self.qualif != old.qualif {
+ f.write_str("qualif: ")?;
+ self.qualif.fmt_diff_with(&old.qualif, ctxt, f)?;
+ f.write_str("\n")?;
+ }
+
+ if self.borrow != old.borrow {
+ f.write_str("borrow: ")?;
+ self.qualif.fmt_diff_with(&old.borrow, ctxt, f)?;
+ f.write_str("\n")?;
+ }
+
+ Ok(())
+ }
+}
+
+impl JoinSemiLattice for State {
+ fn join(&mut self, other: &Self) -> bool {
+ self.qualif.join(&other.qualif) || self.borrow.join(&other.borrow)
+ }
+}
+
+impl<'tcx, Q> AnalysisDomain<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ type Domain = State;
+
+ const NAME: &'static str = Q::ANALYSIS_NAME;
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ State {
+ qualif: BitSet::new_empty(body.local_decls.len()),
+ borrow: BitSet::new_empty(body.local_decls.len()),
+ }
+ }
+
+ fn initialize_start_block(&self, _body: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ self.transfer_function(state).initialize_state();
+ }
+}
+
+impl<'tcx, Q> Analysis<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
+where
+ Q: Qualif,
+{
+ fn apply_statement_effect(
+ &self,
+ state: &mut Self::Domain,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(state).visit_statement(statement, location);
+ }
+
+ fn apply_terminator_effect(
+ &self,
+ state: &mut Self::Domain,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(state).visit_terminator(terminator, location);
+ }
+
+ fn apply_call_return_effect(
+ &self,
+ state: &mut Self::Domain,
+ block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ self.transfer_function(state).apply_call_return_effect(block, return_places)
+ }
+}
diff --git a/compiler/rustc_const_eval/src/transform/mod.rs b/compiler/rustc_const_eval/src/transform/mod.rs
new file mode 100644
index 000000000..a2928bdf5
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/mod.rs
@@ -0,0 +1,3 @@
+pub mod check_consts;
+pub mod promote_consts;
+pub mod validate;
diff --git a/compiler/rustc_const_eval/src/transform/promote_consts.rs b/compiler/rustc_const_eval/src/transform/promote_consts.rs
new file mode 100644
index 000000000..ed4d8c95d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/promote_consts.rs
@@ -0,0 +1,1066 @@
+//! A pass that promotes borrows of constant rvalues.
+//!
+//! The rvalues considered constant are trees of temps,
+//! each with exactly one initialization, and holding
+//! a constant value with no interior mutability.
+//! They are placed into a new MIR constant body in
+//! `promoted` and the borrow rvalue is replaced with
+//! a `Literal::Promoted` using the index into `promoted`
+//! of that constant MIR.
+//!
+//! This pass assumes that every use is dominated by an
+//! initialization and can otherwise silence errors, if
+//! move analysis runs after promotion on broken MIR.
+
+use rustc_hir as hir;
+use rustc_middle::mir::traversal::ReversePostorderIter;
+use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{self, List, TyCtxt, TypeVisitable};
+use rustc_span::Span;
+
+use rustc_index::vec::{Idx, IndexVec};
+
+use std::cell::Cell;
+use std::{cmp, iter, mem};
+
+use crate::transform::check_consts::{qualifs, ConstCx};
+
+/// A `MirPass` for promotion.
+///
+/// Promotion is the extraction of promotable temps into separate MIR bodies so they can have
+/// `'static` lifetime.
+///
+/// After this pass is run, `promoted_fragments` will hold the MIR body corresponding to each
+/// newly created `Constant`.
+#[derive(Default)]
+pub struct PromoteTemps<'tcx> {
+ pub promoted_fragments: Cell<IndexVec<Promoted, Body<'tcx>>>,
+}
+
+impl<'tcx> MirPass<'tcx> for PromoteTemps<'tcx> {
+ fn phase_change(&self) -> Option<MirPhase> {
+ Some(MirPhase::ConstsPromoted)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // There's not really any point in promoting errorful MIR.
+ //
+ // This does not include MIR that failed const-checking, which we still try to promote.
+ if body.return_ty().references_error() {
+ tcx.sess.delay_span_bug(body.span, "PromoteTemps: MIR had errors");
+ return;
+ }
+
+ if body.source.promoted.is_some() {
+ return;
+ }
+
+ let mut rpo = traversal::reverse_postorder(body);
+ let ccx = ConstCx::new(tcx, body);
+ let (mut temps, all_candidates) = collect_temps_and_candidates(&ccx, &mut rpo);
+
+ let promotable_candidates = validate_candidates(&ccx, &mut temps, &all_candidates);
+
+ let promoted = promote_candidates(body, tcx, temps, promotable_candidates);
+ self.promoted_fragments.set(promoted);
+ }
+}
+
+/// State of a temporary during collection and promotion.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum TempState {
+ /// No references to this temp.
+ Undefined,
+ /// One direct assignment and any number of direct uses.
+ /// A borrow of this temp is promotable if the assigned
+ /// value is qualified as constant.
+ Defined { location: Location, uses: usize, valid: Result<(), ()> },
+ /// Any other combination of assignments/uses.
+ Unpromotable,
+ /// This temp was part of an rvalue which got extracted
+ /// during promotion and needs cleanup.
+ PromotedOut,
+}
+
+impl TempState {
+ pub fn is_promotable(&self) -> bool {
+ debug!("is_promotable: self={:?}", self);
+ matches!(self, TempState::Defined { .. })
+ }
+}
+
+/// A "root candidate" for promotion, which will become the
+/// returned value in a promoted MIR, unless it's a subset
+/// of a larger candidate.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Candidate {
+ location: Location,
+}
+
+struct Collector<'a, 'tcx> {
+ ccx: &'a ConstCx<'a, 'tcx>,
+ temps: IndexVec<Local, TempState>,
+ candidates: Vec<Candidate>,
+}
+
+impl<'tcx> Visitor<'tcx> for Collector<'_, 'tcx> {
+ fn visit_local(&mut self, index: Local, context: PlaceContext, location: Location) {
+ debug!("visit_local: index={:?} context={:?} location={:?}", index, context, location);
+ // We're only interested in temporaries and the return place
+ match self.ccx.body.local_kind(index) {
+ LocalKind::Temp | LocalKind::ReturnPointer => {}
+ LocalKind::Arg | LocalKind::Var => return,
+ }
+
+ // Ignore drops, if the temp gets promoted,
+ // then it's constant and thus drop is noop.
+ // Non-uses are also irrelevant.
+ if context.is_drop() || !context.is_use() {
+ debug!(
+ "visit_local: context.is_drop={:?} context.is_use={:?}",
+ context.is_drop(),
+ context.is_use(),
+ );
+ return;
+ }
+
+ let temp = &mut self.temps[index];
+ debug!("visit_local: temp={:?}", temp);
+ if *temp == TempState::Undefined {
+ match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Store)
+ | PlaceContext::MutatingUse(MutatingUseContext::Call) => {
+ *temp = TempState::Defined { location, uses: 0, valid: Err(()) };
+ return;
+ }
+ _ => { /* mark as unpromotable below */ }
+ }
+ } else if let TempState::Defined { ref mut uses, .. } = *temp {
+ // We always allow borrows, even mutable ones, as we need
+ // to promote mutable borrows of some ZSTs e.g., `&mut []`.
+ let allowed_use = match context {
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+ | PlaceContext::NonMutatingUse(_) => true,
+ PlaceContext::MutatingUse(_) | PlaceContext::NonUse(_) => false,
+ };
+ debug!("visit_local: allowed_use={:?}", allowed_use);
+ if allowed_use {
+ *uses += 1;
+ return;
+ }
+ /* mark as unpromotable below */
+ }
+ *temp = TempState::Unpromotable;
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+
+ match *rvalue {
+ Rvalue::Ref(..) => {
+ self.candidates.push(Candidate { location });
+ }
+ _ => {}
+ }
+ }
+}
+
+pub fn collect_temps_and_candidates<'tcx>(
+ ccx: &ConstCx<'_, 'tcx>,
+ rpo: &mut ReversePostorderIter<'_, 'tcx>,
+) -> (IndexVec<Local, TempState>, Vec<Candidate>) {
+ let mut collector = Collector {
+ temps: IndexVec::from_elem(TempState::Undefined, &ccx.body.local_decls),
+ candidates: vec![],
+ ccx,
+ };
+ for (bb, data) in rpo {
+ collector.visit_basic_block_data(bb, data);
+ }
+ (collector.temps, collector.candidates)
+}
+
+/// Checks whether locals that appear in a promotion context (`Candidate`) are actually promotable.
+///
+/// This wraps an `Item`, and has access to all fields of that `Item` via `Deref` coercion.
+struct Validator<'a, 'tcx> {
+ ccx: &'a ConstCx<'a, 'tcx>,
+ temps: &'a mut IndexVec<Local, TempState>,
+}
+
+impl<'a, 'tcx> std::ops::Deref for Validator<'a, 'tcx> {
+ type Target = ConstCx<'a, 'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.ccx
+ }
+}
+
+struct Unpromotable;
+
+impl<'tcx> Validator<'_, 'tcx> {
+ fn validate_candidate(&mut self, candidate: Candidate) -> Result<(), Unpromotable> {
+ let loc = candidate.location;
+ let statement = &self.body[loc.block].statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, Rvalue::Ref(_, kind, place))) => {
+ // We can only promote interior borrows of promotable temps (non-temps
+ // don't get promoted anyway).
+ self.validate_local(place.local)?;
+
+ // The reference operation itself must be promotable.
+ // (Needs to come after `validate_local` to avoid ICEs.)
+ self.validate_ref(*kind, place)?;
+
+ // We do not check all the projections (they do not get promoted anyway),
+ // but we do stay away from promoting anything involving a dereference.
+ if place.projection.contains(&ProjectionElem::Deref) {
+ return Err(Unpromotable);
+ }
+
+ // We cannot promote things that need dropping, since the promoted value
+ // would not get dropped.
+ if self.qualif_local::<qualifs::NeedsDrop>(place.local) {
+ return Err(Unpromotable);
+ }
+
+ Ok(())
+ }
+ _ => bug!(),
+ }
+ }
+
+ // FIXME(eddyb) maybe cache this?
+ fn qualif_local<Q: qualifs::Qualif>(&mut self, local: Local) -> bool {
+ if let TempState::Defined { location: loc, .. } = self.temps[local] {
+ let num_stmts = self.body[loc.block].statements.len();
+
+ if loc.statement_index < num_stmts {
+ let statement = &self.body[loc.block].statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, rhs)) => qualifs::in_rvalue::<Q, _>(
+ &self.ccx,
+ &mut |l| self.qualif_local::<Q>(l),
+ rhs,
+ ),
+ _ => {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ }
+ }
+ } else {
+ let terminator = self.body[loc.block].terminator();
+ match &terminator.kind {
+ TerminatorKind::Call { .. } => {
+ let return_ty = self.body.local_decls[local].ty;
+ Q::in_any_value_of_ty(&self.ccx, return_ty)
+ }
+ kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ }
+ }
+ } else {
+ let span = self.body.local_decls[local].source_info.span;
+ span_bug!(span, "{:?} not promotable, qualif_local shouldn't have been called", local);
+ }
+ }
+
+ fn validate_local(&mut self, local: Local) -> Result<(), Unpromotable> {
+ if let TempState::Defined { location: loc, uses, valid } = self.temps[local] {
+ valid.or_else(|_| {
+ let ok = {
+ let block = &self.body[loc.block];
+ let num_stmts = block.statements.len();
+
+ if loc.statement_index < num_stmts {
+ let statement = &block.statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (_, rhs)) => self.validate_rvalue(rhs),
+ _ => {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ }
+ }
+ } else {
+ let terminator = block.terminator();
+ match &terminator.kind {
+ TerminatorKind::Call { func, args, .. } => {
+ self.validate_call(func, args)
+ }
+ TerminatorKind::Yield { .. } => Err(Unpromotable),
+ kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ }
+ }
+ };
+ self.temps[local] = match ok {
+ Ok(()) => TempState::Defined { location: loc, uses, valid: Ok(()) },
+ Err(_) => TempState::Unpromotable,
+ };
+ ok
+ })
+ } else {
+ Err(Unpromotable)
+ }
+ }
+
+ fn validate_place(&mut self, place: PlaceRef<'tcx>) -> Result<(), Unpromotable> {
+ match place.last_projection() {
+ None => self.validate_local(place.local),
+ Some((place_base, elem)) => {
+ // Validate topmost projection, then recurse.
+ match elem {
+ ProjectionElem::Deref => {
+ let mut promotable = false;
+ // We need to make sure this is a `Deref` of a local with no further projections.
+ // Discussion can be found at
+ // https://github.com/rust-lang/rust/pull/74945#discussion_r463063247
+ if let Some(local) = place_base.as_local() {
+ // This is a special treatment for cases like *&STATIC where STATIC is a
+ // global static variable.
+ // This pattern is generated only when global static variables are directly
+ // accessed and is qualified for promotion safely.
+ if let TempState::Defined { location, .. } = self.temps[local] {
+ let def_stmt = self.body[location.block]
+ .statements
+ .get(location.statement_index);
+ if let Some(Statement {
+ kind:
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Use(Operand::Constant(c)),
+ )),
+ ..
+ }) = def_stmt
+ {
+ if let Some(did) = c.check_static_ptr(self.tcx) {
+ // Evaluating a promoted may not read statics except if it got
+ // promoted from a static (this is a CTFE check). So we
+ // can only promote static accesses inside statics.
+ if let Some(hir::ConstContext::Static(..)) = self.const_kind
+ {
+ if !self.tcx.is_thread_local_static(did) {
+ promotable = true;
+ }
+ }
+ }
+ }
+ }
+ }
+ if !promotable {
+ return Err(Unpromotable);
+ }
+ }
+ ProjectionElem::Downcast(..) => {
+ return Err(Unpromotable);
+ }
+
+ ProjectionElem::ConstantIndex { .. } | ProjectionElem::Subslice { .. } => {}
+
+ ProjectionElem::Index(local) => {
+ let mut promotable = false;
+ // Only accept if we can predict the index and are indexing an array.
+ let val =
+ if let TempState::Defined { location: loc, .. } = self.temps[local] {
+ let block = &self.body[loc.block];
+ if loc.statement_index < block.statements.len() {
+ let statement = &block.statements[loc.statement_index];
+ match &statement.kind {
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Use(Operand::Constant(c)),
+ )) => c.literal.try_eval_usize(self.tcx, self.param_env),
+ _ => None,
+ }
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+ if let Some(idx) = val {
+ // Determine the type of the thing we are indexing.
+ let ty = place_base.ty(self.body, self.tcx).ty;
+ match ty.kind() {
+ ty::Array(_, len) => {
+ // It's an array; determine its length.
+ if let Some(len) = len.try_eval_usize(self.tcx, self.param_env)
+ {
+ // If the index is in-bounds, go ahead.
+ if idx < len {
+ promotable = true;
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ if !promotable {
+ return Err(Unpromotable);
+ }
+
+ self.validate_local(local)?;
+ }
+
+ ProjectionElem::Field(..) => {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if base_ty.is_union() {
+ // No promotion of union field accesses.
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ self.validate_place(place_base)
+ }
+ }
+ }
+
+ fn validate_operand(&mut self, operand: &Operand<'tcx>) -> Result<(), Unpromotable> {
+ match operand {
+ Operand::Copy(place) | Operand::Move(place) => self.validate_place(place.as_ref()),
+
+ // The qualifs for a constant (e.g. `HasMutInterior`) are checked in
+ // `validate_rvalue` upon access.
+ Operand::Constant(c) => {
+ if let Some(def_id) = c.check_static_ptr(self.tcx) {
+ // Only allow statics (not consts) to refer to other statics.
+ // FIXME(eddyb) does this matter at all for promotion?
+ // FIXME(RalfJung) it makes little sense to not promote this in `fn`/`const fn`,
+ // and in `const` this cannot occur anyway. The only concern is that we might
+ // promote even `let x = &STATIC` which would be useless, but this applies to
+ // promotion inside statics as well.
+ let is_static = matches!(self.const_kind, Some(hir::ConstContext::Static(_)));
+ if !is_static {
+ return Err(Unpromotable);
+ }
+
+ let is_thread_local = self.tcx.is_thread_local_static(def_id);
+ if is_thread_local {
+ return Err(Unpromotable);
+ }
+ }
+
+ Ok(())
+ }
+ }
+ }
+
+ fn validate_ref(&mut self, kind: BorrowKind, place: &Place<'tcx>) -> Result<(), Unpromotable> {
+ match kind {
+ // Reject these borrow types just to be safe.
+ // FIXME(RalfJung): could we allow them? Should we? No point in it until we have a usecase.
+ BorrowKind::Shallow | BorrowKind::Unique => return Err(Unpromotable),
+
+ BorrowKind::Shared => {
+ let has_mut_interior = self.qualif_local::<qualifs::HasMutInterior>(place.local);
+ if has_mut_interior {
+ return Err(Unpromotable);
+ }
+ }
+
+ BorrowKind::Mut { .. } => {
+ let ty = place.ty(self.body, self.tcx).ty;
+
+ // In theory, any zero-sized value could be borrowed
+ // mutably without consequences. However, only &mut []
+ // is allowed right now.
+ if let ty::Array(_, len) = ty.kind() {
+ match len.try_eval_usize(self.tcx, self.param_env) {
+ Some(0) => {}
+ _ => return Err(Unpromotable),
+ }
+ } else {
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn validate_rvalue(&mut self, rvalue: &Rvalue<'tcx>) -> Result<(), Unpromotable> {
+ match rvalue {
+ Rvalue::Use(operand) | Rvalue::Repeat(operand, _) => {
+ self.validate_operand(operand)?;
+ }
+ Rvalue::CopyForDeref(place) => {
+ let op = &Operand::Copy(*place);
+ self.validate_operand(op)?
+ }
+
+ Rvalue::Discriminant(place) | Rvalue::Len(place) => {
+ self.validate_place(place.as_ref())?
+ }
+
+ Rvalue::ThreadLocalRef(_) => return Err(Unpromotable),
+
+ // ptr-to-int casts are not possible in consts and thus not promotable
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => return Err(Unpromotable),
+
+ // all other casts including int-to-ptr casts are fine, they just use the integer value
+ // at pointer type.
+ Rvalue::Cast(_, operand, _) => {
+ self.validate_operand(operand)?;
+ }
+
+ Rvalue::NullaryOp(op, _) => match op {
+ NullOp::SizeOf => {}
+ NullOp::AlignOf => {}
+ },
+
+ Rvalue::ShallowInitBox(_, _) => return Err(Unpromotable),
+
+ Rvalue::UnaryOp(op, operand) => {
+ match op {
+ // These operations can never fail.
+ UnOp::Neg | UnOp::Not => {}
+ }
+
+ self.validate_operand(operand)?;
+ }
+
+ Rvalue::BinaryOp(op, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(op, box (lhs, rhs)) => {
+ let op = *op;
+ let lhs_ty = lhs.ty(self.body, self.tcx);
+
+ if let ty::RawPtr(_) | ty::FnPtr(..) = lhs_ty.kind() {
+ // Raw and fn pointer operations are not allowed inside consts and thus not promotable.
+ assert!(matches!(
+ op,
+ BinOp::Eq
+ | BinOp::Ne
+ | BinOp::Le
+ | BinOp::Lt
+ | BinOp::Ge
+ | BinOp::Gt
+ | BinOp::Offset
+ ));
+ return Err(Unpromotable);
+ }
+
+ match op {
+ BinOp::Div | BinOp::Rem => {
+ if lhs_ty.is_integral() {
+ // Integer division: the RHS must be a non-zero const.
+ let const_val = match rhs {
+ Operand::Constant(c) => {
+ c.literal.try_eval_bits(self.tcx, self.param_env, lhs_ty)
+ }
+ _ => None,
+ };
+ match const_val {
+ Some(x) if x != 0 => {} // okay
+ _ => return Err(Unpromotable), // value not known or 0 -- not okay
+ }
+ }
+ }
+ // The remaining operations can never fail.
+ BinOp::Eq
+ | BinOp::Ne
+ | BinOp::Le
+ | BinOp::Lt
+ | BinOp::Ge
+ | BinOp::Gt
+ | BinOp::Offset
+ | BinOp::Add
+ | BinOp::Sub
+ | BinOp::Mul
+ | BinOp::BitXor
+ | BinOp::BitAnd
+ | BinOp::BitOr
+ | BinOp::Shl
+ | BinOp::Shr => {}
+ }
+
+ self.validate_operand(lhs)?;
+ self.validate_operand(rhs)?;
+ }
+
+ Rvalue::AddressOf(_, place) => {
+ // We accept `&raw *`, i.e., raw reborrows -- creating a raw pointer is
+ // no problem, only using it is.
+ if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection()
+ {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ return self.validate_place(place_base);
+ }
+ }
+ return Err(Unpromotable);
+ }
+
+ Rvalue::Ref(_, kind, place) => {
+ // Special-case reborrows to be more like a copy of the reference.
+ let mut place_simplified = place.as_ref();
+ if let Some((place_base, ProjectionElem::Deref)) =
+ place_simplified.last_projection()
+ {
+ let base_ty = place_base.ty(self.body, self.tcx).ty;
+ if let ty::Ref(..) = base_ty.kind() {
+ place_simplified = place_base;
+ }
+ }
+
+ self.validate_place(place_simplified)?;
+
+ // Check that the reference is fine (using the original place!).
+ // (Needs to come after `validate_place` to avoid ICEs.)
+ self.validate_ref(*kind, place)?;
+ }
+
+ Rvalue::Aggregate(_, operands) => {
+ for o in operands {
+ self.validate_operand(o)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn validate_call(
+ &mut self,
+ callee: &Operand<'tcx>,
+ args: &[Operand<'tcx>],
+ ) -> Result<(), Unpromotable> {
+ let fn_ty = callee.ty(self.body, self.tcx);
+
+ // Inside const/static items, we promote all (eligible) function calls.
+ // Everywhere else, we require `#[rustc_promotable]` on the callee.
+ let promote_all_const_fn = matches!(
+ self.const_kind,
+ Some(hir::ConstContext::Static(_) | hir::ConstContext::Const)
+ );
+ if !promote_all_const_fn {
+ if let ty::FnDef(def_id, _) = *fn_ty.kind() {
+ // Never promote runtime `const fn` calls of
+ // functions without `#[rustc_promotable]`.
+ if !self.tcx.is_promotable_const_fn(def_id) {
+ return Err(Unpromotable);
+ }
+ }
+ }
+
+ let is_const_fn = match *fn_ty.kind() {
+ ty::FnDef(def_id, _) => self.tcx.is_const_fn_raw(def_id),
+ _ => false,
+ };
+ if !is_const_fn {
+ return Err(Unpromotable);
+ }
+
+ self.validate_operand(callee)?;
+ for arg in args {
+ self.validate_operand(arg)?;
+ }
+
+ Ok(())
+ }
+}
+
+// FIXME(eddyb) remove the differences for promotability in `static`, `const`, `const fn`.
+pub fn validate_candidates(
+ ccx: &ConstCx<'_, '_>,
+ temps: &mut IndexVec<Local, TempState>,
+ candidates: &[Candidate],
+) -> Vec<Candidate> {
+ let mut validator = Validator { ccx, temps };
+
+ candidates
+ .iter()
+ .copied()
+ .filter(|&candidate| validator.validate_candidate(candidate).is_ok())
+ .collect()
+}
+
+struct Promoter<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ source: &'a mut Body<'tcx>,
+ promoted: Body<'tcx>,
+ temps: &'a mut IndexVec<Local, TempState>,
+ extra_statements: &'a mut Vec<(Location, Statement<'tcx>)>,
+
+ /// If true, all nested temps are also kept in the
+ /// source MIR, not moved to the promoted MIR.
+ keep_original: bool,
+}
+
+impl<'a, 'tcx> Promoter<'a, 'tcx> {
+ fn new_block(&mut self) -> BasicBlock {
+ let span = self.promoted.span;
+ self.promoted.basic_blocks_mut().push(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(span),
+ kind: TerminatorKind::Return,
+ }),
+ is_cleanup: false,
+ })
+ }
+
+ fn assign(&mut self, dest: Local, rvalue: Rvalue<'tcx>, span: Span) {
+ let last = self.promoted.basic_blocks().last().unwrap();
+ let data = &mut self.promoted[last];
+ data.statements.push(Statement {
+ source_info: SourceInfo::outermost(span),
+ kind: StatementKind::Assign(Box::new((Place::from(dest), rvalue))),
+ });
+ }
+
+ fn is_temp_kind(&self, local: Local) -> bool {
+ self.source.local_kind(local) == LocalKind::Temp
+ }
+
+ /// Copies the initialization of this temp to the
+ /// promoted MIR, recursing through temps.
+ fn promote_temp(&mut self, temp: Local) -> Local {
+ let old_keep_original = self.keep_original;
+ let loc = match self.temps[temp] {
+ TempState::Defined { location, uses, .. } if uses > 0 => {
+ if uses > 1 {
+ self.keep_original = true;
+ }
+ location
+ }
+ state => {
+ span_bug!(self.promoted.span, "{:?} not promotable: {:?}", temp, state);
+ }
+ };
+ if !self.keep_original {
+ self.temps[temp] = TempState::PromotedOut;
+ }
+
+ let num_stmts = self.source[loc.block].statements.len();
+ let new_temp = self.promoted.local_decls.push(LocalDecl::new(
+ self.source.local_decls[temp].ty,
+ self.source.local_decls[temp].source_info.span,
+ ));
+
+ debug!("promote({:?} @ {:?}/{:?}, {:?})", temp, loc, num_stmts, self.keep_original);
+
+ // First, take the Rvalue or Call out of the source MIR,
+ // or duplicate it, depending on keep_original.
+ if loc.statement_index < num_stmts {
+ let (mut rvalue, source_info) = {
+ let statement = &mut self.source[loc.block].statements[loc.statement_index];
+ let StatementKind::Assign(box (_, ref mut rhs)) = statement.kind else {
+ span_bug!(
+ statement.source_info.span,
+ "{:?} is not an assignment",
+ statement
+ );
+ };
+
+ (
+ if self.keep_original {
+ rhs.clone()
+ } else {
+ let unit = Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: statement.source_info.span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(self.tcx.types.unit),
+ })));
+ mem::replace(rhs, unit)
+ },
+ statement.source_info,
+ )
+ };
+
+ self.visit_rvalue(&mut rvalue, loc);
+ self.assign(new_temp, rvalue, source_info.span);
+ } else {
+ let terminator = if self.keep_original {
+ self.source[loc.block].terminator().clone()
+ } else {
+ let terminator = self.source[loc.block].terminator_mut();
+ let target = match terminator.kind {
+ TerminatorKind::Call { target: Some(target), .. } => target,
+ ref kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ };
+ Terminator {
+ source_info: terminator.source_info,
+ kind: mem::replace(&mut terminator.kind, TerminatorKind::Goto { target }),
+ }
+ };
+
+ match terminator.kind {
+ TerminatorKind::Call { mut func, mut args, from_hir_call, fn_span, .. } => {
+ self.visit_operand(&mut func, loc);
+ for arg in &mut args {
+ self.visit_operand(arg, loc);
+ }
+
+ let last = self.promoted.basic_blocks().last().unwrap();
+ let new_target = self.new_block();
+
+ *self.promoted[last].terminator_mut() = Terminator {
+ kind: TerminatorKind::Call {
+ func,
+ args,
+ cleanup: None,
+ destination: Place::from(new_temp),
+ target: Some(new_target),
+ from_hir_call,
+ fn_span,
+ },
+ source_info: SourceInfo::outermost(terminator.source_info.span),
+ ..terminator
+ };
+ }
+ ref kind => {
+ span_bug!(terminator.source_info.span, "{:?} not promotable", kind);
+ }
+ };
+ };
+
+ self.keep_original = old_keep_original;
+ new_temp
+ }
+
+ fn promote_candidate(mut self, candidate: Candidate, next_promoted_id: usize) -> Body<'tcx> {
+ let def = self.source.source.with_opt_param();
+ let mut rvalue = {
+ let promoted = &mut self.promoted;
+ let promoted_id = Promoted::new(next_promoted_id);
+ let tcx = self.tcx;
+ let mut promoted_operand = |ty, span| {
+ promoted.span = span;
+ promoted.local_decls[RETURN_PLACE] = LocalDecl::new(ty, span);
+ let _const = tcx.mk_const(ty::ConstS {
+ ty,
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def,
+ substs: InternalSubsts::for_item(tcx, def.did, |param, _| {
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ tcx.lifetimes.re_erased.into()
+ } else {
+ tcx.mk_param_from_def(param)
+ }
+ }),
+ promoted: Some(promoted_id),
+ }),
+ });
+
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::from_const(_const, tcx),
+ }))
+ };
+ let blocks = self.source.basic_blocks.as_mut();
+ let local_decls = &mut self.source.local_decls;
+ let loc = candidate.location;
+ let statement = &mut blocks[loc.block].statements[loc.statement_index];
+ match statement.kind {
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Ref(ref mut region, borrow_kind, ref mut place),
+ )) => {
+ // Use the underlying local for this (necessarily interior) borrow.
+ let ty = local_decls[place.local].ty;
+ let span = statement.source_info.span;
+
+ let ref_ty = tcx.mk_ref(
+ tcx.lifetimes.re_erased,
+ ty::TypeAndMut { ty, mutbl: borrow_kind.to_mutbl_lossy() },
+ );
+
+ *region = tcx.lifetimes.re_erased;
+
+ let mut projection = vec![PlaceElem::Deref];
+ projection.extend(place.projection);
+ place.projection = tcx.intern_place_elems(&projection);
+
+ // Create a temp to hold the promoted reference.
+ // This is because `*r` requires `r` to be a local,
+ // otherwise we would use the `promoted` directly.
+ let mut promoted_ref = LocalDecl::new(ref_ty, span);
+ promoted_ref.source_info = statement.source_info;
+ let promoted_ref = local_decls.push(promoted_ref);
+ assert_eq!(self.temps.push(TempState::Unpromotable), promoted_ref);
+
+ let promoted_ref_statement = Statement {
+ source_info: statement.source_info,
+ kind: StatementKind::Assign(Box::new((
+ Place::from(promoted_ref),
+ Rvalue::Use(promoted_operand(ref_ty, span)),
+ ))),
+ };
+ self.extra_statements.push((loc, promoted_ref_statement));
+
+ Rvalue::Ref(
+ tcx.lifetimes.re_erased,
+ borrow_kind,
+ Place {
+ local: mem::replace(&mut place.local, promoted_ref),
+ projection: List::empty(),
+ },
+ )
+ }
+ _ => bug!(),
+ }
+ };
+
+ assert_eq!(self.new_block(), START_BLOCK);
+ self.visit_rvalue(
+ &mut rvalue,
+ Location { block: BasicBlock::new(0), statement_index: usize::MAX },
+ );
+
+ let span = self.promoted.span;
+ self.assign(RETURN_PLACE, rvalue, span);
+ self.promoted
+ }
+}
+
+/// Replaces all temporaries with their promoted counterparts.
+impl<'a, 'tcx> MutVisitor<'tcx> for Promoter<'a, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+ if self.is_temp_kind(*local) {
+ *local = self.promote_temp(*local);
+ }
+ }
+}
+
+pub fn promote_candidates<'tcx>(
+ body: &mut Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ mut temps: IndexVec<Local, TempState>,
+ candidates: Vec<Candidate>,
+) -> IndexVec<Promoted, Body<'tcx>> {
+ // Visit candidates in reverse, in case they're nested.
+ debug!("promote_candidates({:?})", candidates);
+
+ let mut promotions = IndexVec::new();
+
+ let mut extra_statements = vec![];
+ for candidate in candidates.into_iter().rev() {
+ let Location { block, statement_index } = candidate.location;
+ if let StatementKind::Assign(box (place, _)) = &body[block].statements[statement_index].kind
+ {
+ if let Some(local) = place.as_local() {
+ if temps[local] == TempState::PromotedOut {
+ // Already promoted.
+ continue;
+ }
+ }
+ }
+
+ // Declare return place local so that `mir::Body::new` doesn't complain.
+ let initial_locals = iter::once(LocalDecl::new(tcx.types.never, body.span)).collect();
+
+ let mut scope = body.source_scopes[body.source_info(candidate.location).scope].clone();
+ scope.parent_scope = None;
+
+ let promoted = Body::new(
+ body.source, // `promoted` gets filled in below
+ IndexVec::new(),
+ IndexVec::from_elem_n(scope, 1),
+ initial_locals,
+ IndexVec::new(),
+ 0,
+ vec![],
+ body.span,
+ body.generator_kind(),
+ body.tainted_by_errors,
+ );
+
+ let promoter = Promoter {
+ promoted,
+ tcx,
+ source: body,
+ temps: &mut temps,
+ extra_statements: &mut extra_statements,
+ keep_original: false,
+ };
+
+ let mut promoted = promoter.promote_candidate(candidate, promotions.len());
+ promoted.source.promoted = Some(promotions.next_index());
+ promotions.push(promoted);
+ }
+
+ // Insert each of `extra_statements` before its indicated location, which
+ // has to be done in reverse location order, to not invalidate the rest.
+ extra_statements.sort_by_key(|&(loc, _)| cmp::Reverse(loc));
+ for (loc, statement) in extra_statements {
+ body[loc.block].statements.insert(loc.statement_index, statement);
+ }
+
+ // Eliminate assignments to, and drops of promoted temps.
+ let promoted = |index: Local| temps[index] == TempState::PromotedOut;
+ for block in body.basic_blocks_mut() {
+ block.statements.retain(|statement| match &statement.kind {
+ StatementKind::Assign(box (place, _)) => {
+ if let Some(index) = place.as_local() {
+ !promoted(index)
+ } else {
+ true
+ }
+ }
+ StatementKind::StorageLive(index) | StatementKind::StorageDead(index) => {
+ !promoted(*index)
+ }
+ _ => true,
+ });
+ let terminator = block.terminator_mut();
+ if let TerminatorKind::Drop { place, target, .. } = &terminator.kind {
+ if let Some(index) = place.as_local() {
+ if promoted(index) {
+ terminator.kind = TerminatorKind::Goto { target: *target };
+ }
+ }
+ }
+ }
+
+ promotions
+}
+
+/// This function returns `true` if the function being called in the array
+/// repeat expression is a `const` function.
+pub fn is_const_fn_in_array_repeat_expression<'tcx>(
+ ccx: &ConstCx<'_, 'tcx>,
+ place: &Place<'tcx>,
+ body: &Body<'tcx>,
+) -> bool {
+ match place.as_local() {
+ // rule out cases such as: `let my_var = some_fn(); [my_var; N]`
+ Some(local) if body.local_decls[local].is_user_variable() => return false,
+ None => return false,
+ _ => {}
+ }
+
+ for block in body.basic_blocks() {
+ if let Some(Terminator { kind: TerminatorKind::Call { func, destination, .. }, .. }) =
+ &block.terminator
+ {
+ if let Operand::Constant(box Constant { literal, .. }) = func {
+ if let ty::FnDef(def_id, _) = *literal.ty().kind() {
+ if destination == place {
+ if ccx.tcx.is_const_fn(def_id) {
+ return true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ false
+}
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
new file mode 100644
index 000000000..15e820f2d
--- /dev/null
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -0,0 +1,913 @@
+//! Validates the MIR to ensure that invariants are upheld.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_index::bit_set::BitSet;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{
+ traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, Local, Location,
+ MirPass, MirPhase, Operand, Place, PlaceElem, PlaceRef, ProjectionElem, Rvalue, SourceScope,
+ Statement, StatementKind, Terminator, TerminatorKind, UnOp, START_BLOCK,
+};
+use rustc_middle::ty::fold::BottomUpFolder;
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable, TypeVisitable};
+use rustc_mir_dataflow::impls::MaybeStorageLive;
+use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_mir_dataflow::{Analysis, ResultsCursor};
+use rustc_target::abi::{Size, VariantIdx};
+
+#[derive(Copy, Clone, Debug)]
+enum EdgeKind {
+ Unwind,
+ Normal,
+}
+
+pub struct Validator {
+ /// Describes at which point in the pipeline this validation is happening.
+ pub when: String,
+ /// The phase for which we are upholding the dialect. If the given phase forbids a specific
+ /// element, this validator will now emit errors if that specific element is encountered.
+ /// Note that phases that change the dialect cause all *following* phases to check the
+ /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
+ /// itself.
+ pub mir_phase: MirPhase,
+}
+
+impl<'tcx> MirPass<'tcx> for Validator {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
+ // terribly important that they pass the validator. However, I think other passes might
+ // still see them, in which case they might be surprised. It would probably be better if we
+ // didn't put this through the MIR pipeline at all.
+ if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
+ return;
+ }
+ let def_id = body.source.def_id();
+ let param_env = tcx.param_env(def_id);
+ let mir_phase = self.mir_phase;
+
+ let always_live_locals = always_storage_live_locals(body);
+ let storage_liveness = MaybeStorageLive::new(always_live_locals)
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+
+ TypeChecker {
+ when: &self.when,
+ body,
+ tcx,
+ param_env,
+ mir_phase,
+ reachable_blocks: traversal::reachable_as_bitset(body),
+ storage_liveness,
+ place_cache: Vec::new(),
+ value_cache: Vec::new(),
+ }
+ .visit_body(body);
+ }
+}
+
+/// Returns whether the two types are equal up to lifetimes.
+/// All lifetimes, including higher-ranked ones, get ignored for this comparison.
+/// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
+///
+/// The point of this function is to approximate "equal up to subtyping". However,
+/// the approximation is incorrect as variance is ignored.
+pub fn equal_up_to_regions<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ src: Ty<'tcx>,
+ dest: Ty<'tcx>,
+) -> bool {
+ // Fast path.
+ if src == dest {
+ return true;
+ }
+
+ // Normalize lifetimes away on both sides, then compare.
+ let normalize = |ty: Ty<'tcx>| {
+ tcx.normalize_erasing_regions(
+ param_env,
+ ty.fold_with(&mut BottomUpFolder {
+ tcx,
+ // FIXME: We erase all late-bound lifetimes, but this is not fully correct.
+ // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
+ // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
+ // since one may have an `impl SomeTrait for fn(&32)` and
+ // `impl SomeTrait for fn(&'static u32)` at the same time which
+ // specify distinct values for Assoc. (See also #56105)
+ lt_op: |_| tcx.lifetimes.re_erased,
+ // Leave consts and types unchanged.
+ ct_op: |ct| ct,
+ ty_op: |ty| ty,
+ }),
+ )
+ };
+ tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
+}
+
+struct TypeChecker<'a, 'tcx> {
+ when: &'a str,
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ mir_phase: MirPhase,
+ reachable_blocks: BitSet<BasicBlock>,
+ storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
+ place_cache: Vec<PlaceRef<'tcx>>,
+ value_cache: Vec<u128>,
+}
+
+impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
+ fn fail(&self, location: Location, msg: impl AsRef<str>) {
+ let span = self.body.source_info(location).span;
+ // We use `delay_span_bug` as we might see broken MIR when other errors have already
+ // occurred.
+ self.tcx.sess.diagnostic().delay_span_bug(
+ span,
+ &format!(
+ "broken MIR in {:?} ({}) at {:?}:\n{}",
+ self.body.source.instance,
+ self.when,
+ location,
+ msg.as_ref()
+ ),
+ );
+ }
+
+ fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
+ if bb == START_BLOCK {
+ self.fail(location, "start block must not have predecessors")
+ }
+ if let Some(bb) = self.body.basic_blocks().get(bb) {
+ let src = self.body.basic_blocks().get(location.block).unwrap();
+ match (src.is_cleanup, bb.is_cleanup, edge_kind) {
+ // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
+ (false, false, EdgeKind::Normal)
+ // Non-cleanup blocks can jump to cleanup blocks along unwind edges
+ | (false, true, EdgeKind::Unwind)
+ // Cleanup blocks can jump to cleanup blocks along non-unwind edges
+ | (true, true, EdgeKind::Normal) => {}
+ // All other jumps are invalid
+ _ => {
+ self.fail(
+ location,
+ format!(
+ "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
+ edge_kind,
+ bb,
+ src.is_cleanup,
+ bb.is_cleanup,
+ )
+ )
+ }
+ }
+ } else {
+ self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
+ }
+ }
+
+ /// Check if src can be assigned into dest.
+ /// This is not precise, it will accept some incorrect assignments.
+ fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
+ // Fast path before we normalize.
+ if src == dest {
+ // Equal types, all is good.
+ return true;
+ }
+ // Normalization reveals opaque types, but we may be validating MIR while computing
+ // said opaque types, causing cycles.
+ if (src, dest).has_opaque_types() {
+ return true;
+ }
+ // Normalize projections and things like that.
+ let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
+ let src = self.tcx.normalize_erasing_regions(param_env, src);
+ let dest = self.tcx.normalize_erasing_regions(param_env, dest);
+
+ // Type-changing assignments can happen when subtyping is used. While
+ // all normal lifetimes are erased, higher-ranked types with their
+ // late-bound lifetimes are still around and can lead to type
+ // differences. So we compare ignoring lifetimes.
+ equal_up_to_regions(self.tcx, param_env, src, dest)
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
+ if self.body.local_decls.get(local).is_none() {
+ self.fail(
+ location,
+ format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
+ );
+ }
+
+ if self.reachable_blocks.contains(location.block) && context.is_use() {
+ // We check that the local is live whenever it is used. Technically, violating this
+ // restriction is only UB and not actually indicative of not well-formed MIR. This means
+ // that an optimization which turns MIR that already has UB into MIR that fails this
+ // check is not necessarily wrong. However, we have no such optimizations at the moment,
+ // and so we include this check anyway to help us catch bugs. If you happen to write an
+ // optimization that might cause this to incorrectly fire, feel free to remove this
+ // check.
+ self.storage_liveness.seek_after_primary_effect(location);
+ let locals_with_storage = self.storage_liveness.get();
+ if !locals_with_storage.contains(local) {
+ self.fail(location, format!("use of local {:?}, which has no storage here", local));
+ }
+ }
+ }
+
+ fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+ // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
+ if self.tcx.sess.opts.unstable_opts.validate_mir && self.mir_phase < MirPhase::DropsLowered
+ {
+ // `Operand::Copy` is only supposed to be used with `Copy` types.
+ if let Operand::Copy(place) = operand {
+ let ty = place.ty(&self.body.local_decls, self.tcx).ty;
+ let span = self.body.source_info(location).span;
+
+ if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
+ self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
+ }
+ }
+ }
+
+ self.super_operand(operand, location);
+ }
+
+ fn visit_projection_elem(
+ &mut self,
+ local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ match elem {
+ ProjectionElem::Index(index) => {
+ let index_ty = self.body.local_decls[index].ty;
+ if index_ty != self.tcx.types.usize {
+ self.fail(location, format!("bad index ({:?} != usize)", index_ty))
+ }
+ }
+ ProjectionElem::Deref if self.mir_phase >= MirPhase::GeneratorsLowered => {
+ let base_ty = Place::ty_from(local, proj_base, &self.body.local_decls, self.tcx).ty;
+
+ if base_ty.is_box() {
+ self.fail(
+ location,
+ format!("{:?} dereferenced after ElaborateBoxDerefs", base_ty),
+ )
+ }
+ }
+ ProjectionElem::Field(f, ty) => {
+ let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
+ let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
+ let fail_out_of_bounds = |this: &Self, location| {
+ this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
+ };
+ let check_equal = |this: &Self, location, f_ty| {
+ if !this.mir_assign_valid_types(ty, f_ty) {
+ this.fail(
+ location,
+ format!(
+ "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is {:?}",
+ parent, f, ty, f_ty
+ )
+ )
+ }
+ };
+
+ let kind = match parent_ty.ty.kind() {
+ &ty::Opaque(def_id, substs) => {
+ self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
+ }
+ kind => kind,
+ };
+
+ match kind {
+ ty::Tuple(fields) => {
+ let Some(f_ty) = fields.get(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, *f_ty);
+ }
+ ty::Adt(adt_def, substs) => {
+ let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0));
+ let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, field.ty(self.tcx, substs));
+ }
+ ty::Closure(_, substs) => {
+ let substs = substs.as_closure();
+ let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+ check_equal(self, location, f_ty);
+ }
+ &ty::Generator(def_id, substs, _) => {
+ let f_ty = if let Some(var) = parent_ty.variant_index {
+ let gen_body = if def_id == self.body.source.def_id() {
+ self.body
+ } else {
+ self.tcx.optimized_mir(def_id)
+ };
+
+ let Some(layout) = gen_body.generator_layout() else {
+ self.fail(location, format!("No generator layout for {:?}", parent_ty));
+ return;
+ };
+
+ let Some(&local) = layout.variant_fields[var].get(f) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+
+ let Some(&f_ty) = layout.field_tys.get(local) else {
+ self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
+ return;
+ };
+
+ f_ty
+ } else {
+ let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
+ fail_out_of_bounds(self, location);
+ return;
+ };
+
+ f_ty
+ };
+
+ check_equal(self, location, f_ty);
+ }
+ _ => {
+ self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
+ }
+ }
+ }
+ _ => {}
+ }
+ self.super_projection_elem(local, proj_base, elem, context, location);
+ }
+
+ fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) {
+ // Set off any `bug!`s in the type computation code
+ let _ = place.ty(&self.body.local_decls, self.tcx);
+
+ if self.mir_phase >= MirPhase::Derefered
+ && place.projection.len() > 1
+ && cntxt != PlaceContext::NonUse(VarDebugInfo)
+ && place.projection[1..].contains(&ProjectionElem::Deref)
+ {
+ self.fail(location, format!("{:?}, has deref at the wrong place", place));
+ }
+
+ self.super_place(place, cntxt, location);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ macro_rules! check_kinds {
+ ($t:expr, $text:literal, $($patterns:tt)*) => {
+ if !matches!(($t).kind(), $($patterns)*) {
+ self.fail(location, format!($text, $t));
+ }
+ };
+ }
+ match rvalue {
+ Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
+ Rvalue::Aggregate(agg_kind, _) => {
+ let disallowed = match **agg_kind {
+ AggregateKind::Array(..) => false,
+ AggregateKind::Generator(..) => self.mir_phase >= MirPhase::GeneratorsLowered,
+ _ => self.mir_phase >= MirPhase::Deaggregated,
+ };
+ if disallowed {
+ self.fail(
+ location,
+ format!("{:?} have been lowered to field assignments", rvalue),
+ )
+ }
+ }
+ Rvalue::Ref(_, BorrowKind::Shallow, _) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
+ );
+ }
+ }
+ Rvalue::Ref(..) => {}
+ Rvalue::Len(p) => {
+ let pty = p.ty(&self.body.local_decls, self.tcx).ty;
+ check_kinds!(
+ pty,
+ "Cannot compute length of non-array type {:?}",
+ ty::Array(..) | ty::Slice(..)
+ );
+ }
+ Rvalue::BinaryOp(op, vals) => {
+ use BinOp::*;
+ let a = vals.0.ty(&self.body.local_decls, self.tcx);
+ let b = vals.1.ty(&self.body.local_decls, self.tcx);
+ match op {
+ Offset => {
+ check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
+ if b != self.tcx.types.isize && b != self.tcx.types.usize {
+ self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
+ }
+ }
+ Eq | Lt | Le | Ne | Ge | Gt => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot compare type {:?}",
+ ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::RawPtr(..)
+ | ty::FnPtr(..)
+ )
+ }
+ // The function pointer types can have lifetimes
+ if !self.mir_assign_valid_types(a, b) {
+ self.fail(
+ location,
+ format!("Cannot compare unequal types {:?} and {:?}", a, b),
+ );
+ }
+ }
+ Shl | Shr => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot shift non-integer type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ }
+ BitAnd | BitOr | BitXor => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform bitwise op on type {:?}",
+ ty::Uint(..) | ty::Int(..) | ty::Bool
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform bitwise op on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ Add | Sub | Mul | Div | Rem => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform arithmetic on type {:?}",
+ ty::Uint(..) | ty::Int(..) | ty::Float(..)
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform arithmetic on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ }
+ }
+ Rvalue::CheckedBinaryOp(op, vals) => {
+ use BinOp::*;
+ let a = vals.0.ty(&self.body.local_decls, self.tcx);
+ let b = vals.1.ty(&self.body.local_decls, self.tcx);
+ match op {
+ Add | Sub | Mul => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform checked arithmetic on type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ if a != b {
+ self.fail(
+ location,
+ format!(
+ "Cannot perform checked arithmetic on unequal types {:?} and {:?}",
+ a, b
+ ),
+ );
+ }
+ }
+ Shl | Shr => {
+ for x in [a, b] {
+ check_kinds!(
+ x,
+ "Cannot perform checked shift on non-integer type {:?}",
+ ty::Uint(..) | ty::Int(..)
+ )
+ }
+ }
+ _ => self.fail(location, format!("There is no checked version of {:?}", op)),
+ }
+ }
+ Rvalue::UnaryOp(op, operand) => {
+ let a = operand.ty(&self.body.local_decls, self.tcx);
+ match op {
+ UnOp::Neg => {
+ check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
+ }
+ UnOp::Not => {
+ check_kinds!(
+ a,
+ "Cannot binary not type {:?}",
+ ty::Int(..) | ty::Uint(..) | ty::Bool
+ );
+ }
+ }
+ }
+ Rvalue::ShallowInitBox(operand, _) => {
+ let a = operand.ty(&self.body.local_decls, self.tcx);
+ check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
+ }
+ Rvalue::Cast(kind, operand, target_type) => {
+ match kind {
+ CastKind::Misc => {
+ let op_ty = operand.ty(self.body, self.tcx);
+ if op_ty.is_enum() {
+ self.fail(
+ location,
+ format!(
+ "enum -> int casts should go through `Rvalue::Discriminant`: {operand:?}:{op_ty} as {target_type}",
+ ),
+ );
+ }
+ }
+ // Nothing to check here
+ CastKind::PointerFromExposedAddress
+ | CastKind::PointerExposeAddress
+ | CastKind::Pointer(_) => {}
+ }
+ }
+ Rvalue::Repeat(_, _)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::AddressOf(_, _)
+ | Rvalue::NullaryOp(_, _)
+ | Rvalue::Discriminant(_) => {}
+ }
+ self.super_rvalue(rvalue, location);
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match &statement.kind {
+ StatementKind::Assign(box (dest, rvalue)) => {
+ // LHS and RHS of the assignment must have the same type.
+ let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
+ let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
+ if !self.mir_assign_valid_types(right_ty, left_ty) {
+ self.fail(
+ location,
+ format!(
+ "encountered `{:?}` with incompatible types:\n\
+ left-hand side has type: {}\n\
+ right-hand side has type: {}",
+ statement.kind, left_ty, right_ty,
+ ),
+ );
+ }
+ if let Rvalue::CopyForDeref(place) = rvalue {
+ if !place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_some()
+ {
+ self.fail(
+ location,
+ "`CopyForDeref` should only be used for dereferenceable types",
+ )
+ }
+ }
+ // FIXME(JakobDegen): Check this for all rvalues, not just this one.
+ if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
+ // The sides of an assignment must not alias. Currently this just checks whether
+ // the places are identical.
+ if dest == src {
+ self.fail(
+ location,
+ "encountered `Assign` statement with overlapping memory",
+ );
+ }
+ }
+ }
+ StatementKind::AscribeUserType(..) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`AscribeUserType` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::FakeRead(..) => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FakeRead` should have been removed after drop lowering phase",
+ );
+ }
+ }
+ StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ let src_ty = src.ty(&self.body.local_decls, self.tcx);
+ let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
+ src_deref.ty
+ } else {
+ self.fail(
+ location,
+ format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
+ );
+ return;
+ };
+ let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
+ let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
+ dst_deref.ty
+ } else {
+ self.fail(
+ location,
+ format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
+ );
+ return;
+ };
+ // since CopyNonOverlapping is parametrized by 1 type,
+ // we only need to check that they are equal and not keep an extra parameter.
+ if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
+ self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
+ }
+
+ let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
+ if op_cnt_ty != self.tcx.types.usize {
+ self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
+ }
+ }
+ StatementKind::SetDiscriminant { place, .. } => {
+ if self.mir_phase < MirPhase::Deaggregated {
+ self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
+ }
+ let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
+ if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Opaque(..)) {
+ self.fail(
+ location,
+ format!(
+ "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
+ pty
+ ),
+ );
+ }
+ }
+ StatementKind::Deinit(..) => {
+ if self.mir_phase < MirPhase::Deaggregated {
+ self.fail(location, "`Deinit`is not allowed until deaggregation");
+ }
+ }
+ StatementKind::Retag(_, _) => {
+ // FIXME(JakobDegen) The validator should check that `self.mir_phase <
+ // DropsLowered`. However, this causes ICEs with generation of drop shims, which
+ // seem to fail to set their `MirPhase` correctly.
+ }
+ StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::Coverage(_)
+ | StatementKind::Nop => {}
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ match &terminator.kind {
+ TerminatorKind::Goto { target } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
+ let ty = discr.ty(&self.body.local_decls, self.tcx);
+ if ty != *switch_ty {
+ self.fail(
+ location,
+ format!(
+ "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
+ ty, switch_ty,
+ ),
+ );
+ }
+
+ let target_width = self.tcx.sess.target.pointer_width;
+
+ let size = Size::from_bits(match switch_ty.kind() {
+ ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
+ ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
+ ty::Char => 32,
+ ty::Bool => 1,
+ other => bug!("unhandled type: {:?}", other),
+ });
+
+ for (value, target) in targets.iter() {
+ if Scalar::<()>::try_from_uint(value, size).is_none() {
+ self.fail(
+ location,
+ format!("the value {:#x} is not a proper {:?}", value, switch_ty),
+ )
+ }
+
+ self.check_edge(location, target, EdgeKind::Normal);
+ }
+ self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
+
+ self.value_cache.clear();
+ self.value_cache.extend(targets.iter().map(|(value, _)| value));
+ let all_len = self.value_cache.len();
+ self.value_cache.sort_unstable();
+ self.value_cache.dedup();
+ let has_duplicates = all_len != self.value_cache.len();
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "duplicated values in `SwitchInt` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Drop { target, unwind, .. } => {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::DropAndReplace { target, unwind, .. } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`DropAndReplace` should have been removed during drop elaboration",
+ );
+ }
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::Call { func, args, destination, target, cleanup, .. } => {
+ let func_ty = func.ty(&self.body.local_decls, self.tcx);
+ match func_ty.kind() {
+ ty::FnPtr(..) | ty::FnDef(..) => {}
+ _ => self.fail(
+ location,
+ format!("encountered non-callable type {} in `Call` terminator", func_ty),
+ ),
+ }
+ if let Some(target) = target {
+ self.check_edge(location, *target, EdgeKind::Normal);
+ }
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+
+ // The call destination place and Operand::Move place used as an argument might be
+ // passed by a reference to the callee. Consequently they must be non-overlapping.
+ // Currently this simply checks for duplicate places.
+ self.place_cache.clear();
+ self.place_cache.push(destination.as_ref());
+ for arg in args {
+ if let Operand::Move(place) = arg {
+ self.place_cache.push(place.as_ref());
+ }
+ }
+ let all_len = self.place_cache.len();
+ let mut dedup = FxHashSet::default();
+ self.place_cache.retain(|p| dedup.insert(*p));
+ let has_duplicates = all_len != self.place_cache.len();
+ if has_duplicates {
+ self.fail(
+ location,
+ format!(
+ "encountered overlapping memory in `Call` terminator: {:?}",
+ terminator.kind,
+ ),
+ );
+ }
+ }
+ TerminatorKind::Assert { cond, target, cleanup, .. } => {
+ let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
+ if cond_ty != self.tcx.types.bool {
+ self.fail(
+ location,
+ format!(
+ "encountered non-boolean condition of type {} in `Assert` terminator",
+ cond_ty
+ ),
+ );
+ }
+ self.check_edge(location, *target, EdgeKind::Normal);
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::Yield { resume, drop, .. } => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`Yield` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::GeneratorsLowered {
+ self.fail(location, "`Yield` should have been replaced by generator lowering");
+ }
+ self.check_edge(location, *resume, EdgeKind::Normal);
+ if let Some(drop) = drop {
+ self.check_edge(location, *drop, EdgeKind::Normal);
+ }
+ }
+ TerminatorKind::FalseEdge { real_target, imaginary_target } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FalseEdge` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ self.check_edge(location, *imaginary_target, EdgeKind::Normal);
+ }
+ TerminatorKind::FalseUnwind { real_target, unwind } => {
+ if self.mir_phase >= MirPhase::DropsLowered {
+ self.fail(
+ location,
+ "`FalseUnwind` should have been removed after drop elaboration",
+ );
+ }
+ self.check_edge(location, *real_target, EdgeKind::Normal);
+ if let Some(unwind) = unwind {
+ self.check_edge(location, *unwind, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::InlineAsm { destination, cleanup, .. } => {
+ if let Some(destination) = destination {
+ self.check_edge(location, *destination, EdgeKind::Normal);
+ }
+ if let Some(cleanup) = cleanup {
+ self.check_edge(location, *cleanup, EdgeKind::Unwind);
+ }
+ }
+ TerminatorKind::GeneratorDrop => {
+ if self.body.generator.is_none() {
+ self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
+ }
+ if self.mir_phase >= MirPhase::GeneratorsLowered {
+ self.fail(
+ location,
+ "`GeneratorDrop` should have been replaced by generator lowering",
+ );
+ }
+ }
+ TerminatorKind::Resume | TerminatorKind::Abort => {
+ let bb = location.block;
+ if !self.body.basic_blocks()[bb].is_cleanup {
+ self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block")
+ }
+ }
+ TerminatorKind::Return => {
+ let bb = location.block;
+ if self.body.basic_blocks()[bb].is_cleanup {
+ self.fail(location, "Cannot `Return` from cleanup basic block")
+ }
+ }
+ TerminatorKind::Unreachable => {}
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_source_scope(&mut self, scope: SourceScope) {
+ if self.body.source_scopes.get(scope).is_none() {
+ self.tcx.sess.diagnostic().delay_span_bug(
+ self.body.span,
+ &format!(
+ "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
+ self.body.source.instance, self.when, scope,
+ ),
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/util/aggregate.rs b/compiler/rustc_const_eval/src/util/aggregate.rs
new file mode 100644
index 000000000..180a40043
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/aggregate.rs
@@ -0,0 +1,77 @@
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use std::convert::TryFrom;
+use std::iter::TrustedLen;
+
+/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
+///
+/// Produces something like
+///
+/// (lhs as Variant).field0 = arg0; // We only have a downcast if this is an enum
+/// (lhs as Variant).field1 = arg1;
+/// discriminant(lhs) = variant_index; // If lhs is an enum or generator.
+pub fn expand_aggregate<'tcx>(
+ orig_lhs: Place<'tcx>,
+ operands: impl Iterator<Item = (Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
+ kind: AggregateKind<'tcx>,
+ source_info: SourceInfo,
+ tcx: TyCtxt<'tcx>,
+) -> impl Iterator<Item = Statement<'tcx>> + TrustedLen {
+ let mut lhs = orig_lhs;
+ let mut set_discriminant = None;
+ let active_field_index = match kind {
+ AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
+ let adt_def = tcx.adt_def(adt_did);
+ if adt_def.is_enum() {
+ set_discriminant = Some(Statement {
+ kind: StatementKind::SetDiscriminant {
+ place: Box::new(orig_lhs),
+ variant_index,
+ },
+ source_info,
+ });
+ lhs = tcx.mk_place_downcast(orig_lhs, adt_def, variant_index);
+ }
+ active_field_index
+ }
+ AggregateKind::Generator(..) => {
+ // Right now we only support initializing generators to
+ // variant 0 (Unresumed).
+ let variant_index = VariantIdx::new(0);
+ set_discriminant = Some(Statement {
+ kind: StatementKind::SetDiscriminant { place: Box::new(orig_lhs), variant_index },
+ source_info,
+ });
+
+ // Operands are upvars stored on the base place, so no
+ // downcast is necessary.
+
+ None
+ }
+ _ => None,
+ };
+
+ let operands = operands.enumerate().map(move |(i, (op, ty))| {
+ let lhs_field = if let AggregateKind::Array(_) = kind {
+ let offset = u64::try_from(i).unwrap();
+ tcx.mk_place_elem(
+ lhs,
+ ProjectionElem::ConstantIndex { offset, min_length: offset + 1, from_end: false },
+ )
+ } else {
+ let field = Field::new(active_field_index.unwrap_or(i));
+ tcx.mk_place_field(lhs, field, ty)
+ };
+ Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((lhs_field, Rvalue::Use(op)))),
+ }
+ });
+ [Statement { source_info, kind: StatementKind::Deinit(Box::new(orig_lhs)) }]
+ .into_iter()
+ .chain(operands)
+ .chain(set_discriminant)
+}
diff --git a/compiler/rustc_const_eval/src/util/alignment.rs b/compiler/rustc_const_eval/src/util/alignment.rs
new file mode 100644
index 000000000..4f39dad20
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/alignment.rs
@@ -0,0 +1,63 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::Align;
+
+/// Returns `true` if this place is allowed to be less aligned
+/// than its containing struct (because it is within a packed
+/// struct).
+pub fn is_disaligned<'tcx, L>(
+ tcx: TyCtxt<'tcx>,
+ local_decls: &L,
+ param_env: ty::ParamEnv<'tcx>,
+ place: Place<'tcx>,
+) -> bool
+where
+ L: HasLocalDecls<'tcx>,
+{
+ debug!("is_disaligned({:?})", place);
+ let Some(pack) = is_within_packed(tcx, local_decls, place) else {
+ debug!("is_disaligned({:?}) - not within packed", place);
+ return false;
+ };
+
+ let ty = place.ty(local_decls, tcx).ty;
+ match tcx.layout_of(param_env.and(ty)) {
+ Ok(layout) if layout.align.abi <= pack => {
+ // If the packed alignment is greater or equal to the field alignment, the type won't be
+ // further disaligned.
+ debug!(
+ "is_disaligned({:?}) - align = {}, packed = {}; not disaligned",
+ place,
+ layout.align.abi.bytes(),
+ pack.bytes()
+ );
+ false
+ }
+ _ => {
+ debug!("is_disaligned({:?}) - true", place);
+ true
+ }
+ }
+}
+
+fn is_within_packed<'tcx, L>(
+ tcx: TyCtxt<'tcx>,
+ local_decls: &L,
+ place: Place<'tcx>,
+) -> Option<Align>
+where
+ L: HasLocalDecls<'tcx>,
+{
+ place
+ .iter_projections()
+ .rev()
+ // Stop at `Deref`; standard ABI alignment applies there.
+ .take_while(|(_base, elem)| !matches!(elem, ProjectionElem::Deref))
+ // Consider the packed alignments at play here...
+ .filter_map(|(base, _elem)| {
+ base.ty(local_decls, tcx).ty.ty_adt_def().and_then(|adt| adt.repr().pack)
+ })
+ // ... and compute their minimum.
+ // The overall smallest alignment is what matters.
+ .min()
+}
diff --git a/compiler/rustc_const_eval/src/util/call_kind.rs b/compiler/rustc_const_eval/src/util/call_kind.rs
new file mode 100644
index 000000000..af9d83f06
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/call_kind.rs
@@ -0,0 +1,146 @@
+//! Common logic for borrowck use-after-move errors when moved into a `fn(self)`,
+//! as well as errors when attempting to call a non-const function in a const
+//! context.
+
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItemGroup;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, AssocItemContainer, DefIdTree, Instance, ParamEnv, Ty, TyCtxt};
+use rustc_span::symbol::Ident;
+use rustc_span::{sym, DesugaringKind, Span};
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum CallDesugaringKind {
+ /// for _ in x {} calls x.into_iter()
+ ForLoopIntoIter,
+ /// x? calls x.branch()
+ QuestionBranch,
+ /// x? calls type_of(x)::from_residual()
+ QuestionFromResidual,
+ /// try { ..; x } calls type_of(x)::from_output(x)
+ TryBlockFromOutput,
+}
+
+impl CallDesugaringKind {
+ pub fn trait_def_id(self, tcx: TyCtxt<'_>) -> DefId {
+ match self {
+ Self::ForLoopIntoIter => tcx.get_diagnostic_item(sym::IntoIterator).unwrap(),
+ Self::QuestionBranch | Self::TryBlockFromOutput => {
+ tcx.lang_items().try_trait().unwrap()
+ }
+ Self::QuestionFromResidual => tcx.get_diagnostic_item(sym::FromResidual).unwrap(),
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+pub enum CallKind<'tcx> {
+ /// A normal method call of the form `receiver.foo(a, b, c)`
+ Normal {
+ self_arg: Option<Ident>,
+ desugaring: Option<(CallDesugaringKind, Ty<'tcx>)>,
+ /// Whether the self type of the method call has an `.as_ref()` method.
+ /// Used for better diagnostics.
+ is_option_or_result: bool,
+ },
+ /// A call to `Fn(..)::call(..)`, desugared from `my_closure(a, b, c)`
+ FnCall { fn_trait_id: DefId, self_ty: Ty<'tcx> },
+ /// A call to an operator trait, desugared from operator syntax (e.g. `a << b`)
+ Operator { self_arg: Option<Ident>, trait_id: DefId, self_ty: Ty<'tcx> },
+ DerefCoercion {
+ /// The `Span` of the `Target` associated type
+ /// in the `Deref` impl we are using.
+ deref_target: Span,
+ /// The type `T::Deref` we are dereferencing to
+ deref_target_ty: Ty<'tcx>,
+ self_ty: Ty<'tcx>,
+ },
+}
+
+pub fn call_kind<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ method_did: DefId,
+ method_substs: SubstsRef<'tcx>,
+ fn_call_span: Span,
+ from_hir_call: bool,
+ self_arg: Option<Ident>,
+) -> CallKind<'tcx> {
+ let parent = tcx.opt_associated_item(method_did).and_then(|assoc| {
+ let container_id = assoc.container_id(tcx);
+ match assoc.container {
+ AssocItemContainer::ImplContainer => tcx.trait_id_of_impl(container_id),
+ AssocItemContainer::TraitContainer => Some(container_id),
+ }
+ });
+
+ let fn_call = parent
+ .and_then(|p| tcx.lang_items().group(LangItemGroup::Fn).iter().find(|did| **did == p));
+
+ let operator = (!from_hir_call)
+ .then(|| parent)
+ .flatten()
+ .and_then(|p| tcx.lang_items().group(LangItemGroup::Op).iter().find(|did| **did == p));
+
+ let is_deref = !from_hir_call && tcx.is_diagnostic_item(sym::deref_method, method_did);
+
+ // Check for a 'special' use of 'self' -
+ // an FnOnce call, an operator (e.g. `<<`), or a
+ // deref coercion.
+ let kind = if let Some(&trait_id) = fn_call {
+ Some(CallKind::FnCall { fn_trait_id: trait_id, self_ty: method_substs.type_at(0) })
+ } else if let Some(&trait_id) = operator {
+ Some(CallKind::Operator { self_arg, trait_id, self_ty: method_substs.type_at(0) })
+ } else if is_deref {
+ let deref_target = tcx.get_diagnostic_item(sym::deref_target).and_then(|deref_target| {
+ Instance::resolve(tcx, param_env, deref_target, method_substs).transpose()
+ });
+ if let Some(Ok(instance)) = deref_target {
+ let deref_target_ty = instance.ty(tcx, param_env);
+ Some(CallKind::DerefCoercion {
+ deref_target: tcx.def_span(instance.def_id()),
+ deref_target_ty,
+ self_ty: method_substs.type_at(0),
+ })
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+
+ kind.unwrap_or_else(|| {
+ // This isn't a 'special' use of `self`
+ debug!(?method_did, ?fn_call_span);
+ let desugaring = if Some(method_did) == tcx.lang_items().into_iter_fn()
+ && fn_call_span.desugaring_kind() == Some(DesugaringKind::ForLoop)
+ {
+ Some((CallDesugaringKind::ForLoopIntoIter, method_substs.type_at(0)))
+ } else if fn_call_span.desugaring_kind() == Some(DesugaringKind::QuestionMark) {
+ if Some(method_did) == tcx.lang_items().branch_fn() {
+ Some((CallDesugaringKind::QuestionBranch, method_substs.type_at(0)))
+ } else if Some(method_did) == tcx.lang_items().from_residual_fn() {
+ Some((CallDesugaringKind::QuestionFromResidual, method_substs.type_at(0)))
+ } else {
+ None
+ }
+ } else if Some(method_did) == tcx.lang_items().from_output_fn()
+ && fn_call_span.desugaring_kind() == Some(DesugaringKind::TryBlock)
+ {
+ Some((CallDesugaringKind::TryBlockFromOutput, method_substs.type_at(0)))
+ } else {
+ None
+ };
+ let parent_did = tcx.parent(method_did);
+ let parent_self_ty = (tcx.def_kind(parent_did) == rustc_hir::def::DefKind::Impl)
+ .then_some(parent_did)
+ .and_then(|did| match tcx.type_of(did).kind() {
+ ty::Adt(def, ..) => Some(def.did()),
+ _ => None,
+ });
+ let is_option_or_result = parent_self_ty.map_or(false, |def_id| {
+ matches!(tcx.get_diagnostic_name(def_id), Some(sym::Option | sym::Result))
+ });
+ CallKind::Normal { self_arg, desugaring, is_option_or_result }
+ })
+}
diff --git a/compiler/rustc_const_eval/src/util/collect_writes.rs b/compiler/rustc_const_eval/src/util/collect_writes.rs
new file mode 100644
index 000000000..8d92bb359
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/collect_writes.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::visit::PlaceContext;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{Body, Local, Location};
+
+pub trait FindAssignments {
+ // Finds all statements that assign directly to local (i.e., X = ...)
+ // and returns their locations.
+ fn find_assignments(&self, local: Local) -> Vec<Location>;
+}
+
+impl<'tcx> FindAssignments for Body<'tcx> {
+ fn find_assignments(&self, local: Local) -> Vec<Location> {
+ let mut visitor = FindLocalAssignmentVisitor { needle: local, locations: vec![] };
+ visitor.visit_body(self);
+ visitor.locations
+ }
+}
+
+// The Visitor walks the MIR to return the assignment statements corresponding
+// to a Local.
+struct FindLocalAssignmentVisitor {
+ needle: Local,
+ locations: Vec<Location>,
+}
+
+impl<'tcx> Visitor<'tcx> for FindLocalAssignmentVisitor {
+ fn visit_local(&mut self, local: Local, place_context: PlaceContext, location: Location) {
+ if self.needle != local {
+ return;
+ }
+
+ if place_context.is_place_assignment() {
+ self.locations.push(location);
+ }
+ }
+}
diff --git a/compiler/rustc_const_eval/src/util/find_self_call.rs b/compiler/rustc_const_eval/src/util/find_self_call.rs
new file mode 100644
index 000000000..33ad128ee
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/find_self_call.rs
@@ -0,0 +1,36 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::def_id::DefId;
+
+/// Checks if the specified `local` is used as the `self` parameter of a method call
+/// in the provided `BasicBlock`. If it is, then the `DefId` of the called method is
+/// returned.
+pub fn find_self_call<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ local: Local,
+ block: BasicBlock,
+) -> Option<(DefId, SubstsRef<'tcx>)> {
+ debug!("find_self_call(local={:?}): terminator={:?}", local, &body[block].terminator);
+ if let Some(Terminator { kind: TerminatorKind::Call { func, args, .. }, .. }) =
+ &body[block].terminator
+ {
+ debug!("find_self_call: func={:?}", func);
+ if let Operand::Constant(box Constant { literal, .. }) = func {
+ if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
+ if let Some(ty::AssocItem { fn_has_self_parameter: true, .. }) =
+ tcx.opt_associated_item(def_id)
+ {
+ debug!("find_self_call: args={:?}", args);
+ if let [Operand::Move(self_place) | Operand::Copy(self_place), ..] = **args {
+ if self_place.as_local() == Some(local) {
+ return Some((def_id, substs));
+ }
+ }
+ }
+ }
+ }
+ }
+ None
+}
diff --git a/compiler/rustc_const_eval/src/util/mod.rs b/compiler/rustc_const_eval/src/util/mod.rs
new file mode 100644
index 000000000..a1876bed8
--- /dev/null
+++ b/compiler/rustc_const_eval/src/util/mod.rs
@@ -0,0 +1,10 @@
+pub mod aggregate;
+mod alignment;
+mod call_kind;
+pub mod collect_writes;
+mod find_self_call;
+
+pub use self::aggregate::expand_aggregate;
+pub use self::alignment::is_disaligned;
+pub use self::call_kind::{call_kind, CallDesugaringKind, CallKind};
+pub use self::find_self_call::find_self_call;
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml
new file mode 100644
index 000000000..5c641f54f
--- /dev/null
+++ b/compiler/rustc_data_structures/Cargo.toml
@@ -0,0 +1,41 @@
+[package]
+name = "rustc_data_structures"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+arrayvec = { version = "0.7", default-features = false }
+ena = "0.14"
+indexmap = { version = "1.9.1" }
+tracing = "0.1"
+jobserver_crate = { version = "0.1.13", package = "jobserver" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+cfg-if = "0.1.2"
+stable_deref_trait = "1.0.0"
+rayon = { version = "0.4.0", package = "rustc-rayon", optional = true }
+rayon-core = { version = "0.4.0", package = "rustc-rayon-core", optional = true }
+rustc-hash = "1.1.0"
+smallvec = { version = "1.8.1", features = ["const_generics", "union", "may_dangle"] }
+rustc_index = { path = "../rustc_index", package = "rustc_index" }
+bitflags = "1.2.1"
+measureme = "10.0.0"
+libc = "0.2"
+stacker = "0.1.14"
+tempfile = "3.2"
+
+[dependencies.parking_lot]
+version = "0.11"
+
+[target.'cfg(windows)'.dependencies]
+winapi = { version = "0.3", features = ["fileapi", "psapi", "winerror"] }
+
+[target.'cfg(not(target_arch = "wasm32"))'.dependencies]
+memmap2 = "0.2.1"
+
+[features]
+rustc_use_parallel_compiler = ["indexmap/rustc-rayon", "rayon", "rayon-core"]
diff --git a/compiler/rustc_data_structures/src/atomic_ref.rs b/compiler/rustc_data_structures/src/atomic_ref.rs
new file mode 100644
index 000000000..eeb1b3092
--- /dev/null
+++ b/compiler/rustc_data_structures/src/atomic_ref.rs
@@ -0,0 +1,26 @@
+use std::marker::PhantomData;
+use std::sync::atomic::{AtomicPtr, Ordering};
+
+/// This is essentially an `AtomicPtr` but is guaranteed to always be valid
+pub struct AtomicRef<T: 'static>(AtomicPtr<T>, PhantomData<&'static T>);
+
+impl<T: 'static> AtomicRef<T> {
+ pub const fn new(initial: &'static T) -> AtomicRef<T> {
+ AtomicRef(AtomicPtr::new(initial as *const T as *mut T), PhantomData)
+ }
+
+ pub fn swap(&self, new: &'static T) -> &'static T {
+ // We never allow storing anything but a `'static` reference so it's safe to
+ // return it for the same.
+ unsafe { &*self.0.swap(new as *const T as *mut T, Ordering::SeqCst) }
+ }
+}
+
+impl<T: 'static> std::ops::Deref for AtomicRef<T> {
+ type Target = T;
+ fn deref(&self) -> &Self::Target {
+ // We never allow storing anything but a `'static` reference so it's safe to lend
+ // it out for any amount of time.
+ unsafe { &*self.0.load(Ordering::SeqCst) }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/base_n.rs b/compiler/rustc_data_structures/src/base_n.rs
new file mode 100644
index 000000000..3c7bea271
--- /dev/null
+++ b/compiler/rustc_data_structures/src/base_n.rs
@@ -0,0 +1,42 @@
+/// Converts unsigned integers into a string representation with some base.
+/// Bases up to and including 36 can be used for case-insensitive things.
+use std::str;
+
+#[cfg(test)]
+mod tests;
+
+pub const MAX_BASE: usize = 64;
+pub const ALPHANUMERIC_ONLY: usize = 62;
+pub const CASE_INSENSITIVE: usize = 36;
+
+const BASE_64: &[u8; MAX_BASE as usize] =
+ b"0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ@$";
+
+#[inline]
+pub fn push_str(mut n: u128, base: usize, output: &mut String) {
+ debug_assert!(base >= 2 && base <= MAX_BASE);
+ let mut s = [0u8; 128];
+ let mut index = 0;
+
+ let base = base as u128;
+
+ loop {
+ s[index] = BASE_64[(n % base) as usize];
+ index += 1;
+ n /= base;
+
+ if n == 0 {
+ break;
+ }
+ }
+ s[0..index].reverse();
+
+ output.push_str(str::from_utf8(&s[0..index]).unwrap());
+}
+
+#[inline]
+pub fn encode(n: u128, base: usize) -> String {
+ let mut s = String::new();
+ push_str(n, base, &mut s);
+ s
+}
diff --git a/compiler/rustc_data_structures/src/base_n/tests.rs b/compiler/rustc_data_structures/src/base_n/tests.rs
new file mode 100644
index 000000000..2be2f0532
--- /dev/null
+++ b/compiler/rustc_data_structures/src/base_n/tests.rs
@@ -0,0 +1,24 @@
+use super::*;
+
+#[test]
+fn test_encode() {
+ fn test(n: u128, base: usize) {
+ assert_eq!(Ok(n), u128::from_str_radix(&encode(n, base), base as u32));
+ }
+
+ for base in 2..37 {
+ test(0, base);
+ test(1, base);
+ test(35, base);
+ test(36, base);
+ test(37, base);
+ test(u64::MAX as u128, base);
+ test(u128::MAX, base);
+
+ const N: u128 = if cfg!(miri) { 10 } else { 1000 };
+
+ for i in 0..N {
+ test(i * 983, base);
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/binary_search_util/mod.rs b/compiler/rustc_data_structures/src/binary_search_util/mod.rs
new file mode 100644
index 000000000..d40172a2e
--- /dev/null
+++ b/compiler/rustc_data_structures/src/binary_search_util/mod.rs
@@ -0,0 +1,68 @@
+#[cfg(test)]
+mod tests;
+
+/// Uses a sorted slice `data: &[E]` as a kind of "multi-map". The
+/// `key_fn` extracts a key of type `K` from the data, and this
+/// function finds the range of elements that match the key. `data`
+/// must have been sorted as if by a call to `sort_by_key` for this to
+/// work.
+pub fn binary_search_slice<'d, E, K>(data: &'d [E], key_fn: impl Fn(&E) -> K, key: &K) -> &'d [E]
+where
+ K: Ord,
+{
+ let Ok(mid) = data.binary_search_by_key(key, &key_fn) else {
+ return &[];
+ };
+ let size = data.len();
+
+ // We get back *some* element with the given key -- so do
+ // a galloping search backwards to find the *first* one.
+ let mut start = mid;
+ let mut previous = mid;
+ let mut step = 1;
+ loop {
+ start = start.saturating_sub(step);
+ if start == 0 || key_fn(&data[start]) != *key {
+ break;
+ }
+ previous = start;
+ step *= 2;
+ }
+ step = previous - start;
+ while step > 1 {
+ let half = step / 2;
+ let mid = start + half;
+ if key_fn(&data[mid]) != *key {
+ start = mid;
+ }
+ step -= half;
+ }
+ // adjust by one if we have overshot
+ if start < size && key_fn(&data[start]) != *key {
+ start += 1;
+ }
+
+ // Now search forward to find the *last* one.
+ let mut end = mid;
+ let mut previous = mid;
+ let mut step = 1;
+ loop {
+ end = end.saturating_add(step).min(size);
+ if end == size || key_fn(&data[end]) != *key {
+ break;
+ }
+ previous = end;
+ step *= 2;
+ }
+ step = end - previous;
+ while step > 1 {
+ let half = step / 2;
+ let mid = end - half;
+ if key_fn(&data[mid]) != *key {
+ end = mid;
+ }
+ step -= half;
+ }
+
+ &data[start..end]
+}
diff --git a/compiler/rustc_data_structures/src/binary_search_util/tests.rs b/compiler/rustc_data_structures/src/binary_search_util/tests.rs
new file mode 100644
index 000000000..d74febb5c
--- /dev/null
+++ b/compiler/rustc_data_structures/src/binary_search_util/tests.rs
@@ -0,0 +1,23 @@
+use super::*;
+
+type Element = (usize, &'static str);
+
+fn test_map() -> Vec<Element> {
+ let mut data = vec![(3, "three-a"), (0, "zero"), (3, "three-b"), (22, "twenty-two")];
+ data.sort_by_key(get_key);
+ data
+}
+
+fn get_key(data: &Element) -> usize {
+ data.0
+}
+
+#[test]
+fn binary_search_slice_test() {
+ let map = test_map();
+ assert_eq!(binary_search_slice(&map, get_key, &0), &[(0, "zero")]);
+ assert_eq!(binary_search_slice(&map, get_key, &1), &[]);
+ assert_eq!(binary_search_slice(&map, get_key, &3), &[(3, "three-a"), (3, "three-b")]);
+ assert_eq!(binary_search_slice(&map, get_key, &22), &[(22, "twenty-two")]);
+ assert_eq!(binary_search_slice(&map, get_key, &23), &[]);
+}
diff --git a/compiler/rustc_data_structures/src/captures.rs b/compiler/rustc_data_structures/src/captures.rs
new file mode 100644
index 000000000..677ccb314
--- /dev/null
+++ b/compiler/rustc_data_structures/src/captures.rs
@@ -0,0 +1,8 @@
+/// "Signaling" trait used in impl trait to tag lifetimes that you may
+/// need to capture but don't really need for other reasons.
+/// Basically a workaround; see [this comment] for details.
+///
+/// [this comment]: https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999
+pub trait Captures<'a> {}
+
+impl<'a, T: ?Sized> Captures<'a> for T {}
diff --git a/compiler/rustc_data_structures/src/fingerprint.rs b/compiler/rustc_data_structures/src/fingerprint.rs
new file mode 100644
index 000000000..5ff2d18dd
--- /dev/null
+++ b/compiler/rustc_data_structures/src/fingerprint.rs
@@ -0,0 +1,215 @@
+use crate::stable_hasher;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::convert::TryInto;
+use std::hash::{Hash, Hasher};
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Copy)]
+#[repr(C)]
+pub struct Fingerprint(u64, u64);
+
+impl Fingerprint {
+ pub const ZERO: Fingerprint = Fingerprint(0, 0);
+
+ #[inline]
+ pub fn new(_0: u64, _1: u64) -> Fingerprint {
+ Fingerprint(_0, _1)
+ }
+
+ #[inline]
+ pub fn from_smaller_hash(hash: u64) -> Fingerprint {
+ Fingerprint(hash, hash)
+ }
+
+ #[inline]
+ pub fn to_smaller_hash(&self) -> u64 {
+ // Even though both halves of the fingerprint are expected to be good
+ // quality hash values, let's still combine the two values because the
+ // Fingerprints in DefPathHash have the StableCrateId portion which is
+ // the same for all DefPathHashes from the same crate. Combining the
+ // two halfs makes sure we get a good quality hash in such cases too.
+ self.0.wrapping_mul(3).wrapping_add(self.1)
+ }
+
+ #[inline]
+ pub fn as_value(&self) -> (u64, u64) {
+ (self.0, self.1)
+ }
+
+ #[inline]
+ pub fn combine(self, other: Fingerprint) -> Fingerprint {
+ // See https://stackoverflow.com/a/27952689 on why this function is
+ // implemented this way.
+ Fingerprint(
+ self.0.wrapping_mul(3).wrapping_add(other.0),
+ self.1.wrapping_mul(3).wrapping_add(other.1),
+ )
+ }
+
+ // Combines two hashes in an order independent way. Make sure this is what
+ // you want.
+ #[inline]
+ pub fn combine_commutative(self, other: Fingerprint) -> Fingerprint {
+ let a = u128::from(self.1) << 64 | u128::from(self.0);
+ let b = u128::from(other.1) << 64 | u128::from(other.0);
+
+ let c = a.wrapping_add(b);
+
+ Fingerprint(c as u64, (c >> 64) as u64)
+ }
+
+ pub fn to_hex(&self) -> String {
+ format!("{:x}{:x}", self.0, self.1)
+ }
+
+ #[inline]
+ pub fn to_le_bytes(&self) -> [u8; 16] {
+ // This seems to optimize to the same machine code as
+ // `unsafe { mem::transmute(*k) }`. Well done, LLVM! :)
+ let mut result = [0u8; 16];
+
+ let first_half: &mut [u8; 8] = (&mut result[0..8]).try_into().unwrap();
+ *first_half = self.0.to_le_bytes();
+
+ let second_half: &mut [u8; 8] = (&mut result[8..16]).try_into().unwrap();
+ *second_half = self.1.to_le_bytes();
+
+ result
+ }
+
+ #[inline]
+ pub fn from_le_bytes(bytes: [u8; 16]) -> Fingerprint {
+ Fingerprint(
+ u64::from_le_bytes(bytes[0..8].try_into().unwrap()),
+ u64::from_le_bytes(bytes[8..16].try_into().unwrap()),
+ )
+ }
+}
+
+impl std::fmt::Display for Fingerprint {
+ fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(formatter, "{:x}-{:x}", self.0, self.1)
+ }
+}
+
+impl Hash for Fingerprint {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_fingerprint(self);
+ }
+}
+
+trait FingerprintHasher {
+ fn write_fingerprint(&mut self, fingerprint: &Fingerprint);
+}
+
+impl<H: Hasher> FingerprintHasher for H {
+ #[inline]
+ default fn write_fingerprint(&mut self, fingerprint: &Fingerprint) {
+ self.write_u64(fingerprint.0);
+ self.write_u64(fingerprint.1);
+ }
+}
+
+impl FingerprintHasher for crate::unhash::Unhasher {
+ #[inline]
+ fn write_fingerprint(&mut self, fingerprint: &Fingerprint) {
+ // Even though both halves of the fingerprint are expected to be good
+ // quality hash values, let's still combine the two values because the
+ // Fingerprints in DefPathHash have the StableCrateId portion which is
+ // the same for all DefPathHashes from the same crate. Combining the
+ // two halfs makes sure we get a good quality hash in such cases too.
+ //
+ // Since `Unhasher` is used only in the context of HashMaps, it is OK
+ // to combine the two components in an order-independent way (which is
+ // cheaper than the more robust Fingerprint::to_smaller_hash()). For
+ // HashMaps we don't really care if Fingerprint(x,y) and
+ // Fingerprint(y, x) result in the same hash value. Collision
+ // probability will still be much better than with FxHash.
+ self.write_u64(fingerprint.0.wrapping_add(fingerprint.1));
+ }
+}
+
+impl stable_hasher::StableHasherResult for Fingerprint {
+ #[inline]
+ fn finish(hasher: stable_hasher::StableHasher) -> Self {
+ let (_0, _1) = hasher.finalize();
+ Fingerprint(_0, _1)
+ }
+}
+
+impl_stable_hash_via_hash!(Fingerprint);
+
+impl<E: Encoder> Encodable<E> for Fingerprint {
+ #[inline]
+ fn encode(&self, s: &mut E) {
+ s.emit_raw_bytes(&self.to_le_bytes());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for Fingerprint {
+ #[inline]
+ fn decode(d: &mut D) -> Self {
+ Fingerprint::from_le_bytes(d.read_raw_bytes(16).try_into().unwrap())
+ }
+}
+
+// `PackedFingerprint` wraps a `Fingerprint`. Its purpose is to, on certain
+// architectures, behave like a `Fingerprint` without alignment requirements.
+// This behavior is only enabled on x86 and x86_64, where the impact of
+// unaligned accesses is tolerable in small doses.
+//
+// This may be preferable to use in large collections of structs containing
+// fingerprints, as it can reduce memory consumption by preventing the padding
+// that the more strictly-aligned `Fingerprint` can introduce. An application of
+// this is in the query dependency graph, which contains a large collection of
+// `DepNode`s. As of this writing, the size of a `DepNode` decreases by ~30%
+// (from 24 bytes to 17) by using the packed representation here, which
+// noticeably decreases total memory usage when compiling large crates.
+//
+// The wrapped `Fingerprint` is private to reduce the chance of a client
+// invoking undefined behavior by taking a reference to the packed field.
+#[cfg_attr(any(target_arch = "x86", target_arch = "x86_64"), repr(packed))]
+#[derive(Eq, PartialEq, Ord, PartialOrd, Debug, Clone, Copy, Hash)]
+pub struct PackedFingerprint(Fingerprint);
+
+impl std::fmt::Display for PackedFingerprint {
+ #[inline]
+ fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ // Copy to avoid taking reference to packed field.
+ let copy = self.0;
+ copy.fmt(formatter)
+ }
+}
+
+impl<E: Encoder> Encodable<E> for PackedFingerprint {
+ #[inline]
+ fn encode(&self, s: &mut E) {
+ // Copy to avoid taking reference to packed field.
+ let copy = self.0;
+ copy.encode(s);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for PackedFingerprint {
+ #[inline]
+ fn decode(d: &mut D) -> Self {
+ Self(Fingerprint::decode(d))
+ }
+}
+
+impl From<Fingerprint> for PackedFingerprint {
+ #[inline]
+ fn from(f: Fingerprint) -> PackedFingerprint {
+ PackedFingerprint(f)
+ }
+}
+
+impl From<PackedFingerprint> for Fingerprint {
+ #[inline]
+ fn from(f: PackedFingerprint) -> Fingerprint {
+ f.0
+ }
+}
diff --git a/compiler/rustc_data_structures/src/fingerprint/tests.rs b/compiler/rustc_data_structures/src/fingerprint/tests.rs
new file mode 100644
index 000000000..9b0783e33
--- /dev/null
+++ b/compiler/rustc_data_structures/src/fingerprint/tests.rs
@@ -0,0 +1,14 @@
+use super::*;
+
+// Check that `combine_commutative` is order independent.
+#[test]
+fn combine_commutative_is_order_independent() {
+ let a = Fingerprint::new(0xf6622fb349898b06, 0x70be9377b2f9c610);
+ let b = Fingerprint::new(0xa9562bf5a2a5303c, 0x67d9b6c82034f13d);
+ let c = Fingerprint::new(0x0d013a27811dbbc3, 0x9a3f7b3d9142ec43);
+ let permutations = [(a, b, c), (a, c, b), (b, a, c), (b, c, a), (c, a, b), (c, b, a)];
+ let f = a.combine_commutative(b).combine_commutative(c);
+ for p in &permutations {
+ assert_eq!(f, p.0.combine_commutative(p.1).combine_commutative(p.2));
+ }
+}
diff --git a/compiler/rustc_data_structures/src/flock.rs b/compiler/rustc_data_structures/src/flock.rs
new file mode 100644
index 000000000..e395d8dbb
--- /dev/null
+++ b/compiler/rustc_data_structures/src/flock.rs
@@ -0,0 +1,26 @@
+//! Simple file-locking apis for each OS.
+//!
+//! This is not meant to be in the standard library, it does nothing with
+//! green/native threading. This is just a bare-bones enough solution for
+//! librustdoc, it is not production quality at all.
+
+#![allow(non_camel_case_types)]
+#![allow(nonstandard_style)]
+
+cfg_if! {
+ if #[cfg(target_os = "linux")] {
+ mod linux;
+ use linux as imp;
+ } else if #[cfg(unix)] {
+ mod unix;
+ use unix as imp;
+ } else if #[cfg(windows)] {
+ mod windows;
+ use windows as imp;
+ } else {
+ mod unsupported;
+ use unsupported as imp;
+ }
+}
+
+pub use imp::Lock;
diff --git a/compiler/rustc_data_structures/src/flock/linux.rs b/compiler/rustc_data_structures/src/flock/linux.rs
new file mode 100644
index 000000000..bb3ecfbc3
--- /dev/null
+++ b/compiler/rustc_data_structures/src/flock/linux.rs
@@ -0,0 +1,40 @@
+//! We use `flock` rather than `fcntl` on Linux, because WSL1 does not support
+//! `fcntl`-style advisory locks properly (rust-lang/rust#72157). For other Unix
+//! targets we still use `fcntl` because it's more portable than `flock`.
+
+use std::fs::{File, OpenOptions};
+use std::io;
+use std::os::unix::prelude::*;
+use std::path::Path;
+
+#[derive(Debug)]
+pub struct Lock {
+ _file: File,
+}
+
+impl Lock {
+ pub fn new(p: &Path, wait: bool, create: bool, exclusive: bool) -> io::Result<Lock> {
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(create)
+ .mode(libc::S_IRWXU as u32)
+ .open(p)?;
+
+ let mut operation = if exclusive { libc::LOCK_EX } else { libc::LOCK_SH };
+ if !wait {
+ operation |= libc::LOCK_NB
+ }
+
+ let ret = unsafe { libc::flock(file.as_raw_fd(), operation) };
+ if ret == -1 { Err(io::Error::last_os_error()) } else { Ok(Lock { _file: file }) }
+ }
+
+ pub fn error_unsupported(err: &io::Error) -> bool {
+ matches!(err.raw_os_error(), Some(libc::ENOTSUP) | Some(libc::ENOSYS))
+ }
+}
+
+// Note that we don't need a Drop impl to execute `flock(fd, LOCK_UN)`. A lock acquired by
+// `flock` is associated with the file descriptor and closing the file releases it
+// automatically.
diff --git a/compiler/rustc_data_structures/src/flock/unix.rs b/compiler/rustc_data_structures/src/flock/unix.rs
new file mode 100644
index 000000000..4e5297d58
--- /dev/null
+++ b/compiler/rustc_data_structures/src/flock/unix.rs
@@ -0,0 +1,51 @@
+use std::fs::{File, OpenOptions};
+use std::io;
+use std::mem;
+use std::os::unix::prelude::*;
+use std::path::Path;
+
+#[derive(Debug)]
+pub struct Lock {
+ file: File,
+}
+
+impl Lock {
+ pub fn new(p: &Path, wait: bool, create: bool, exclusive: bool) -> io::Result<Lock> {
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(create)
+ .mode(libc::S_IRWXU as u32)
+ .open(p)?;
+
+ let lock_type = if exclusive { libc::F_WRLCK } else { libc::F_RDLCK };
+
+ let mut flock: libc::flock = unsafe { mem::zeroed() };
+ flock.l_type = lock_type as libc::c_short;
+ flock.l_whence = libc::SEEK_SET as libc::c_short;
+ flock.l_start = 0;
+ flock.l_len = 0;
+
+ let cmd = if wait { libc::F_SETLKW } else { libc::F_SETLK };
+ let ret = unsafe { libc::fcntl(file.as_raw_fd(), cmd, &flock) };
+ if ret == -1 { Err(io::Error::last_os_error()) } else { Ok(Lock { file }) }
+ }
+
+ pub fn error_unsupported(err: &io::Error) -> bool {
+ matches!(err.raw_os_error(), Some(libc::ENOTSUP) | Some(libc::ENOSYS))
+ }
+}
+
+impl Drop for Lock {
+ fn drop(&mut self) {
+ let mut flock: libc::flock = unsafe { mem::zeroed() };
+ flock.l_type = libc::F_UNLCK as libc::c_short;
+ flock.l_whence = libc::SEEK_SET as libc::c_short;
+ flock.l_start = 0;
+ flock.l_len = 0;
+
+ unsafe {
+ libc::fcntl(self.file.as_raw_fd(), libc::F_SETLK, &flock);
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/flock/unsupported.rs b/compiler/rustc_data_structures/src/flock/unsupported.rs
new file mode 100644
index 000000000..9245fca37
--- /dev/null
+++ b/compiler/rustc_data_structures/src/flock/unsupported.rs
@@ -0,0 +1,16 @@
+use std::io;
+use std::path::Path;
+
+#[derive(Debug)]
+pub struct Lock(());
+
+impl Lock {
+ pub fn new(_p: &Path, _wait: bool, _create: bool, _exclusive: bool) -> io::Result<Lock> {
+ let msg = "file locks not supported on this platform";
+ Err(io::Error::new(io::ErrorKind::Other, msg))
+ }
+
+ pub fn error_unsupported(_err: &io::Error) -> bool {
+ true
+ }
+}
diff --git a/compiler/rustc_data_structures/src/flock/windows.rs b/compiler/rustc_data_structures/src/flock/windows.rs
new file mode 100644
index 000000000..43e6caaa1
--- /dev/null
+++ b/compiler/rustc_data_structures/src/flock/windows.rs
@@ -0,0 +1,77 @@
+use std::fs::{File, OpenOptions};
+use std::io;
+use std::mem;
+use std::os::windows::prelude::*;
+use std::path::Path;
+
+use winapi::shared::winerror::ERROR_INVALID_FUNCTION;
+use winapi::um::fileapi::LockFileEx;
+use winapi::um::minwinbase::{LOCKFILE_EXCLUSIVE_LOCK, LOCKFILE_FAIL_IMMEDIATELY, OVERLAPPED};
+use winapi::um::winnt::{FILE_SHARE_DELETE, FILE_SHARE_READ, FILE_SHARE_WRITE};
+
+#[derive(Debug)]
+pub struct Lock {
+ _file: File,
+}
+
+impl Lock {
+ pub fn new(p: &Path, wait: bool, create: bool, exclusive: bool) -> io::Result<Lock> {
+ assert!(
+ p.parent().unwrap().exists(),
+ "Parent directory of lock-file must exist: {}",
+ p.display()
+ );
+
+ let share_mode = FILE_SHARE_DELETE | FILE_SHARE_READ | FILE_SHARE_WRITE;
+
+ let mut open_options = OpenOptions::new();
+ open_options.read(true).share_mode(share_mode);
+
+ if create {
+ open_options.create(true).write(true);
+ }
+
+ debug!("attempting to open lock file `{}`", p.display());
+ let file = match open_options.open(p) {
+ Ok(file) => {
+ debug!("lock file opened successfully");
+ file
+ }
+ Err(err) => {
+ debug!("error opening lock file: {}", err);
+ return Err(err);
+ }
+ };
+
+ let ret = unsafe {
+ let mut overlapped: OVERLAPPED = mem::zeroed();
+
+ let mut dwFlags = 0;
+ if !wait {
+ dwFlags |= LOCKFILE_FAIL_IMMEDIATELY;
+ }
+
+ if exclusive {
+ dwFlags |= LOCKFILE_EXCLUSIVE_LOCK;
+ }
+
+ debug!("attempting to acquire lock on lock file `{}`", p.display());
+ LockFileEx(file.as_raw_handle(), dwFlags, 0, 0xFFFF_FFFF, 0xFFFF_FFFF, &mut overlapped)
+ };
+ if ret == 0 {
+ let err = io::Error::last_os_error();
+ debug!("failed acquiring file lock: {}", err);
+ Err(err)
+ } else {
+ debug!("successfully acquired lock");
+ Ok(Lock { _file: file })
+ }
+ }
+
+ pub fn error_unsupported(err: &io::Error) -> bool {
+ err.raw_os_error() == Some(ERROR_INVALID_FUNCTION as i32)
+ }
+}
+
+// Note that we don't need a Drop impl on Windows: The file is unlocked
+// automatically when it's closed.
diff --git a/compiler/rustc_data_structures/src/frozen.rs b/compiler/rustc_data_structures/src/frozen.rs
new file mode 100644
index 000000000..c81e1b124
--- /dev/null
+++ b/compiler/rustc_data_structures/src/frozen.rs
@@ -0,0 +1,64 @@
+//! An immutable, owned value (except for interior mutability).
+//!
+//! The purpose of `Frozen` is to make a value immutable for the sake of defensive programming. For example,
+//! suppose we have the following:
+//!
+//! ```rust
+//! struct Bar { /* some data */ }
+//!
+//! struct Foo {
+//! /// Some computed data that should never change after construction.
+//! pub computed: Bar,
+//!
+//! /* some other fields */
+//! }
+//!
+//! impl Bar {
+//! /// Mutate the `Bar`.
+//! pub fn mutate(&mut self) { }
+//! }
+//! ```
+//!
+//! Now suppose we want to pass around a mutable `Foo` instance but, we want to make sure that
+//! `computed` does not change accidentally (e.g. somebody might accidentally call
+//! `foo.computed.mutate()`). This is what `Frozen` is for. We can do the following:
+//!
+//! ```
+//! # struct Bar {}
+//! use rustc_data_structures::frozen::Frozen;
+//!
+//! struct Foo {
+//! /// Some computed data that should never change after construction.
+//! pub computed: Frozen<Bar>,
+//!
+//! /* some other fields */
+//! }
+//! ```
+//!
+//! `Frozen` impls `Deref`, so we can ergonomically call methods on `Bar`, but it doesn't `impl
+//! DerefMut`. Now calling `foo.compute.mutate()` will result in a compile-time error stating that
+//! `mutate` requires a mutable reference but we don't have one.
+//!
+//! # Caveats
+//!
+//! - `Frozen` doesn't try to defend against interior mutability (e.g. `Frozen<RefCell<Bar>>`).
+//! - `Frozen` doesn't pin it's contents (e.g. one could still do `foo.computed =
+//! Frozen::freeze(new_bar)`).
+
+/// An owned immutable value.
+#[derive(Debug)]
+pub struct Frozen<T>(T);
+
+impl<T> Frozen<T> {
+ pub fn freeze(val: T) -> Self {
+ Frozen(val)
+ }
+}
+
+impl<T> std::ops::Deref for Frozen<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
diff --git a/compiler/rustc_data_structures/src/functor.rs b/compiler/rustc_data_structures/src/functor.rs
new file mode 100644
index 000000000..a3d3f9883
--- /dev/null
+++ b/compiler/rustc_data_structures/src/functor.rs
@@ -0,0 +1,99 @@
+use rustc_index::vec::{Idx, IndexVec};
+use std::mem;
+
+pub trait IdFunctor: Sized {
+ type Inner;
+
+ fn try_map_id<F, E>(self, f: F) -> Result<Self, E>
+ where
+ F: FnMut(Self::Inner) -> Result<Self::Inner, E>;
+}
+
+impl<T> IdFunctor for Box<T> {
+ type Inner = T;
+
+ #[inline]
+ fn try_map_id<F, E>(self, mut f: F) -> Result<Self, E>
+ where
+ F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
+ {
+ let raw = Box::into_raw(self);
+ Ok(unsafe {
+ // SAFETY: The raw pointer points to a valid value of type `T`.
+ let value = raw.read();
+ // SAFETY: Converts `Box<T>` to `Box<MaybeUninit<T>>` which is the
+ // inverse of `Box::assume_init()` and should be safe.
+ let raw: Box<mem::MaybeUninit<T>> = Box::from_raw(raw.cast());
+ // SAFETY: Write the mapped value back into the `Box`.
+ Box::write(raw, f(value)?)
+ })
+ }
+}
+
+impl<T> IdFunctor for Vec<T> {
+ type Inner = T;
+
+ #[inline]
+ fn try_map_id<F, E>(self, mut f: F) -> Result<Self, E>
+ where
+ F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
+ {
+ struct HoleVec<T> {
+ vec: Vec<mem::ManuallyDrop<T>>,
+ hole: Option<usize>,
+ }
+
+ impl<T> Drop for HoleVec<T> {
+ fn drop(&mut self) {
+ unsafe {
+ for (index, slot) in self.vec.iter_mut().enumerate() {
+ if self.hole != Some(index) {
+ mem::ManuallyDrop::drop(slot);
+ }
+ }
+ }
+ }
+ }
+
+ unsafe {
+ let (ptr, length, capacity) = self.into_raw_parts();
+ let vec = Vec::from_raw_parts(ptr.cast(), length, capacity);
+ let mut hole_vec = HoleVec { vec, hole: None };
+
+ for (index, slot) in hole_vec.vec.iter_mut().enumerate() {
+ hole_vec.hole = Some(index);
+ let original = mem::ManuallyDrop::take(slot);
+ let mapped = f(original)?;
+ *slot = mem::ManuallyDrop::new(mapped);
+ hole_vec.hole = None;
+ }
+
+ mem::forget(hole_vec);
+ Ok(Vec::from_raw_parts(ptr, length, capacity))
+ }
+ }
+}
+
+impl<T> IdFunctor for Box<[T]> {
+ type Inner = T;
+
+ #[inline]
+ fn try_map_id<F, E>(self, f: F) -> Result<Self, E>
+ where
+ F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
+ {
+ Vec::from(self).try_map_id(f).map(Into::into)
+ }
+}
+
+impl<I: Idx, T> IdFunctor for IndexVec<I, T> {
+ type Inner = T;
+
+ #[inline]
+ fn try_map_id<F, E>(self, f: F) -> Result<Self, E>
+ where
+ F: FnMut(Self::Inner) -> Result<Self::Inner, E>,
+ {
+ self.raw.try_map_id(f).map(IndexVec::from_raw)
+ }
+}
diff --git a/compiler/rustc_data_structures/src/fx.rs b/compiler/rustc_data_structures/src/fx.rs
new file mode 100644
index 000000000..bbeb193db
--- /dev/null
+++ b/compiler/rustc_data_structures/src/fx.rs
@@ -0,0 +1,14 @@
+use std::hash::BuildHasherDefault;
+
+pub use rustc_hash::{FxHashMap, FxHashSet, FxHasher};
+
+pub type FxIndexMap<K, V> = indexmap::IndexMap<K, V, BuildHasherDefault<FxHasher>>;
+pub type FxIndexSet<V> = indexmap::IndexSet<V, BuildHasherDefault<FxHasher>>;
+
+#[macro_export]
+macro_rules! define_id_collections {
+ ($map_name:ident, $set_name:ident, $key:ty) => {
+ pub type $map_name<T> = $crate::fx::FxHashMap<$key, T>;
+ pub type $set_name = $crate::fx::FxHashSet<$key>;
+ };
+}
diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
new file mode 100644
index 000000000..00913a483
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs
@@ -0,0 +1,324 @@
+//! Finding the dominators in a control-flow graph.
+//!
+//! Algorithm based on Loukas Georgiadis,
+//! "Linear-Time Algorithms for Dominators and Related Problems",
+//! <ftp://ftp.cs.princeton.edu/techreports/2005/737.pdf>
+//!
+//! Additionally useful is the original Lengauer-Tarjan paper on this subject,
+//! "A Fast Algorithm for Finding Dominators in a Flowgraph"
+//! Thomas Lengauer and Robert Endre Tarjan.
+//! <https://www.cs.princeton.edu/courses/archive/spr03/cs423/download/dominators.pdf>
+
+use super::ControlFlowGraph;
+use rustc_index::vec::{Idx, IndexVec};
+use std::cmp::Ordering;
+
+#[cfg(test)]
+mod tests;
+
+struct PreOrderFrame<Iter> {
+ pre_order_idx: PreorderIndex,
+ iter: Iter,
+}
+
+rustc_index::newtype_index! {
+ struct PreorderIndex { .. }
+}
+
+pub fn dominators<G: ControlFlowGraph>(graph: G) -> Dominators<G::Node> {
+ // compute the post order index (rank) for each node
+ let mut post_order_rank = IndexVec::from_elem_n(0, graph.num_nodes());
+
+ // We allocate capacity for the full set of nodes, because most of the time
+ // most of the nodes *are* reachable.
+ let mut parent: IndexVec<PreorderIndex, PreorderIndex> =
+ IndexVec::with_capacity(graph.num_nodes());
+
+ let mut stack = vec![PreOrderFrame {
+ pre_order_idx: PreorderIndex::new(0),
+ iter: graph.successors(graph.start_node()),
+ }];
+ let mut pre_order_to_real: IndexVec<PreorderIndex, G::Node> =
+ IndexVec::with_capacity(graph.num_nodes());
+ let mut real_to_pre_order: IndexVec<G::Node, Option<PreorderIndex>> =
+ IndexVec::from_elem_n(None, graph.num_nodes());
+ pre_order_to_real.push(graph.start_node());
+ parent.push(PreorderIndex::new(0)); // the parent of the root node is the root for now.
+ real_to_pre_order[graph.start_node()] = Some(PreorderIndex::new(0));
+ let mut post_order_idx = 0;
+
+ // Traverse the graph, collecting a number of things:
+ //
+ // * Preorder mapping (to it, and back to the actual ordering)
+ // * Postorder mapping (used exclusively for rank_partial_cmp on the final product)
+ // * Parents for each vertex in the preorder tree
+ //
+ // These are all done here rather than through one of the 'standard'
+ // graph traversals to help make this fast.
+ 'recurse: while let Some(frame) = stack.last_mut() {
+ while let Some(successor) = frame.iter.next() {
+ if real_to_pre_order[successor].is_none() {
+ let pre_order_idx = pre_order_to_real.push(successor);
+ real_to_pre_order[successor] = Some(pre_order_idx);
+ parent.push(frame.pre_order_idx);
+ stack.push(PreOrderFrame { pre_order_idx, iter: graph.successors(successor) });
+
+ continue 'recurse;
+ }
+ }
+ post_order_rank[pre_order_to_real[frame.pre_order_idx]] = post_order_idx;
+ post_order_idx += 1;
+
+ stack.pop();
+ }
+
+ let reachable_vertices = pre_order_to_real.len();
+
+ let mut idom = IndexVec::from_elem_n(PreorderIndex::new(0), reachable_vertices);
+ let mut semi = IndexVec::from_fn_n(std::convert::identity, reachable_vertices);
+ let mut label = semi.clone();
+ let mut bucket = IndexVec::from_elem_n(vec![], reachable_vertices);
+ let mut lastlinked = None;
+
+ // We loop over vertices in reverse preorder. This implements the pseudocode
+ // of the simple Lengauer-Tarjan algorithm. A few key facts are noted here
+ // which are helpful for understanding the code (full proofs and such are
+ // found in various papers, including one cited at the top of this file).
+ //
+ // For each vertex w (which is not the root),
+ // * semi[w] is a proper ancestor of the vertex w (i.e., semi[w] != w)
+ // * idom[w] is an ancestor of semi[w] (i.e., idom[w] may equal semi[w])
+ //
+ // An immediate dominator of w (idom[w]) is a vertex v where v dominates w
+ // and every other dominator of w dominates v. (Every vertex except the root has
+ // a unique immediate dominator.)
+ //
+ // A semidominator for a given vertex w (semi[w]) is the vertex v with minimum
+ // preorder number such that there exists a path from v to w in which all elements (other than w) have
+ // preorder numbers greater than w (i.e., this path is not the tree path to
+ // w).
+ for w in (PreorderIndex::new(1)..PreorderIndex::new(reachable_vertices)).rev() {
+ // Optimization: process buckets just once, at the start of the
+ // iteration. Do not explicitly empty the bucket (even though it will
+ // not be used again), to save some instructions.
+ //
+ // The bucket here contains the vertices whose semidominator is the
+ // vertex w, which we are guaranteed to have found: all vertices who can
+ // be semidominated by w must have a preorder number exceeding w, so
+ // they have been placed in the bucket.
+ //
+ // We compute a partial set of immediate dominators here.
+ let z = parent[w];
+ for &v in bucket[z].iter() {
+ // This uses the result of Lemma 5 from section 2 from the original
+ // 1979 paper, to compute either the immediate or relative dominator
+ // for a given vertex v.
+ //
+ // eval returns a vertex y, for which semi[y] is minimum among
+ // vertices semi[v] +> y *> v. Note that semi[v] = z as we're in the
+ // z bucket.
+ //
+ // Given such a vertex y, semi[y] <= semi[v] and idom[y] = idom[v].
+ // If semi[y] = semi[v], though, idom[v] = semi[v].
+ //
+ // Using this, we can either set idom[v] to be:
+ // * semi[v] (i.e. z), if semi[y] is z
+ // * idom[y], otherwise
+ //
+ // We don't directly set to idom[y] though as it's not necessarily
+ // known yet. The second preorder traversal will cleanup by updating
+ // the idom for any that were missed in this pass.
+ let y = eval(&mut parent, lastlinked, &semi, &mut label, v);
+ idom[v] = if semi[y] < z { y } else { z };
+ }
+
+ // This loop computes the semi[w] for w.
+ semi[w] = w;
+ for v in graph.predecessors(pre_order_to_real[w]) {
+ let v = real_to_pre_order[v].unwrap();
+
+ // eval returns a vertex x from which semi[x] is minimum among
+ // vertices semi[v] +> x *> v.
+ //
+ // From Lemma 4 from section 2, we know that the semidominator of a
+ // vertex w is the minimum (by preorder number) vertex of the
+ // following:
+ //
+ // * direct predecessors of w with preorder number less than w
+ // * semidominators of u such that u > w and there exists (v, w)
+ // such that u *> v
+ //
+ // This loop therefore identifies such a minima. Note that any
+ // semidominator path to w must have all but the first vertex go
+ // through vertices numbered greater than w, so the reverse preorder
+ // traversal we are using guarantees that all of the information we
+ // might need is available at this point.
+ //
+ // The eval call will give us semi[x], which is either:
+ //
+ // * v itself, if v has not yet been processed
+ // * A possible 'best' semidominator for w.
+ let x = eval(&mut parent, lastlinked, &semi, &mut label, v);
+ semi[w] = std::cmp::min(semi[w], semi[x]);
+ }
+ // semi[w] is now semidominator(w) and won't change any more.
+
+ // Optimization: Do not insert into buckets if parent[w] = semi[w], as
+ // we then immediately know the idom.
+ //
+ // If we don't yet know the idom directly, then push this vertex into
+ // our semidominator's bucket, where it will get processed at a later
+ // stage to compute its immediate dominator.
+ if parent[w] != semi[w] {
+ bucket[semi[w]].push(w);
+ } else {
+ idom[w] = parent[w];
+ }
+
+ // Optimization: We share the parent array between processed and not
+ // processed elements; lastlinked represents the divider.
+ lastlinked = Some(w);
+ }
+
+ // Finalize the idoms for any that were not fully settable during initial
+ // traversal.
+ //
+ // If idom[w] != semi[w] then we know that we've stored vertex y from above
+ // into idom[w]. It is known to be our 'relative dominator', which means
+ // that it's one of w's ancestors and has the same immediate dominator as w,
+ // so use that idom.
+ for w in PreorderIndex::new(1)..PreorderIndex::new(reachable_vertices) {
+ if idom[w] != semi[w] {
+ idom[w] = idom[idom[w]];
+ }
+ }
+
+ let mut immediate_dominators = IndexVec::from_elem_n(None, graph.num_nodes());
+ for (idx, node) in pre_order_to_real.iter_enumerated() {
+ immediate_dominators[*node] = Some(pre_order_to_real[idom[idx]]);
+ }
+
+ Dominators { post_order_rank, immediate_dominators }
+}
+
+/// Evaluate the link-eval virtual forest, providing the currently minimum semi
+/// value for the passed `node` (which may be itself).
+///
+/// This maintains that for every vertex v, `label[v]` is such that:
+///
+/// ```text
+/// semi[eval(v)] = min { semi[label[u]] | root_in_forest(v) +> u *> v }
+/// ```
+///
+/// where `+>` is a proper ancestor and `*>` is just an ancestor.
+#[inline]
+fn eval(
+ ancestor: &mut IndexVec<PreorderIndex, PreorderIndex>,
+ lastlinked: Option<PreorderIndex>,
+ semi: &IndexVec<PreorderIndex, PreorderIndex>,
+ label: &mut IndexVec<PreorderIndex, PreorderIndex>,
+ node: PreorderIndex,
+) -> PreorderIndex {
+ if is_processed(node, lastlinked) {
+ compress(ancestor, lastlinked, semi, label, node);
+ label[node]
+ } else {
+ node
+ }
+}
+
+#[inline]
+fn is_processed(v: PreorderIndex, lastlinked: Option<PreorderIndex>) -> bool {
+ if let Some(ll) = lastlinked { v >= ll } else { false }
+}
+
+#[inline]
+fn compress(
+ ancestor: &mut IndexVec<PreorderIndex, PreorderIndex>,
+ lastlinked: Option<PreorderIndex>,
+ semi: &IndexVec<PreorderIndex, PreorderIndex>,
+ label: &mut IndexVec<PreorderIndex, PreorderIndex>,
+ v: PreorderIndex,
+) {
+ assert!(is_processed(v, lastlinked));
+ // Compute the processed list of ancestors
+ //
+ // We use a heap stack here to avoid recursing too deeply, exhausting the
+ // stack space.
+ let mut stack: smallvec::SmallVec<[_; 8]> = smallvec::smallvec![v];
+ let mut u = ancestor[v];
+ while is_processed(u, lastlinked) {
+ stack.push(u);
+ u = ancestor[u];
+ }
+
+ // Then in reverse order, popping the stack
+ for &[v, u] in stack.array_windows().rev() {
+ if semi[label[u]] < semi[label[v]] {
+ label[v] = label[u];
+ }
+ ancestor[v] = ancestor[u];
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct Dominators<N: Idx> {
+ post_order_rank: IndexVec<N, usize>,
+ immediate_dominators: IndexVec<N, Option<N>>,
+}
+
+impl<Node: Idx> Dominators<Node> {
+ pub fn dummy() -> Self {
+ Self { post_order_rank: IndexVec::new(), immediate_dominators: IndexVec::new() }
+ }
+
+ pub fn is_reachable(&self, node: Node) -> bool {
+ self.immediate_dominators[node].is_some()
+ }
+
+ pub fn immediate_dominator(&self, node: Node) -> Node {
+ assert!(self.is_reachable(node), "node {:?} is not reachable", node);
+ self.immediate_dominators[node].unwrap()
+ }
+
+ pub fn dominators(&self, node: Node) -> Iter<'_, Node> {
+ assert!(self.is_reachable(node), "node {:?} is not reachable", node);
+ Iter { dominators: self, node: Some(node) }
+ }
+
+ pub fn is_dominated_by(&self, node: Node, dom: Node) -> bool {
+ // FIXME -- could be optimized by using post-order-rank
+ self.dominators(node).any(|n| n == dom)
+ }
+
+ /// Provide deterministic ordering of nodes such that, if any two nodes have a dominator
+ /// relationship, the dominator will always precede the dominated. (The relative ordering
+ /// of two unrelated nodes will also be consistent, but otherwise the order has no
+ /// meaning.) This method cannot be used to determine if either Node dominates the other.
+ pub fn rank_partial_cmp(&self, lhs: Node, rhs: Node) -> Option<Ordering> {
+ self.post_order_rank[lhs].partial_cmp(&self.post_order_rank[rhs])
+ }
+}
+
+pub struct Iter<'dom, Node: Idx> {
+ dominators: &'dom Dominators<Node>,
+ node: Option<Node>,
+}
+
+impl<'dom, Node: Idx> Iterator for Iter<'dom, Node> {
+ type Item = Node;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if let Some(node) = self.node {
+ let dom = self.dominators.immediate_dominator(node);
+ if dom == node {
+ self.node = None; // reached the root
+ } else {
+ self.node = Some(dom);
+ }
+ Some(node)
+ } else {
+ None
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/graph/dominators/tests.rs b/compiler/rustc_data_structures/src/graph/dominators/tests.rs
new file mode 100644
index 000000000..ff31d8f7f
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/dominators/tests.rs
@@ -0,0 +1,45 @@
+use super::*;
+
+use super::super::tests::TestGraph;
+
+#[test]
+fn diamond() {
+ let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
+
+ let dominators = dominators(&graph);
+ let immediate_dominators = &dominators.immediate_dominators;
+ assert_eq!(immediate_dominators[0], Some(0));
+ assert_eq!(immediate_dominators[1], Some(0));
+ assert_eq!(immediate_dominators[2], Some(0));
+ assert_eq!(immediate_dominators[3], Some(0));
+}
+
+#[test]
+fn paper() {
+ // example from the paper:
+ let graph = TestGraph::new(
+ 6,
+ &[(6, 5), (6, 4), (5, 1), (4, 2), (4, 3), (1, 2), (2, 3), (3, 2), (2, 1)],
+ );
+
+ let dominators = dominators(&graph);
+ let immediate_dominators = &dominators.immediate_dominators;
+ assert_eq!(immediate_dominators[0], None); // <-- note that 0 is not in graph
+ assert_eq!(immediate_dominators[1], Some(6));
+ assert_eq!(immediate_dominators[2], Some(6));
+ assert_eq!(immediate_dominators[3], Some(6));
+ assert_eq!(immediate_dominators[4], Some(6));
+ assert_eq!(immediate_dominators[5], Some(6));
+ assert_eq!(immediate_dominators[6], Some(6));
+}
+
+#[test]
+fn paper_slt() {
+ // example from the paper:
+ let graph = TestGraph::new(
+ 1,
+ &[(1, 2), (1, 3), (2, 3), (2, 7), (3, 4), (3, 6), (4, 5), (5, 4), (6, 7), (7, 8), (8, 5)],
+ );
+
+ dominators(&graph);
+}
diff --git a/compiler/rustc_data_structures/src/graph/implementation/mod.rs b/compiler/rustc_data_structures/src/graph/implementation/mod.rs
new file mode 100644
index 000000000..1aa7ac024
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/implementation/mod.rs
@@ -0,0 +1,366 @@
+//! A graph module for use in dataflow, region resolution, and elsewhere.
+//!
+//! # Interface details
+//!
+//! You customize the graph by specifying a "node data" type `N` and an
+//! "edge data" type `E`. You can then later gain access (mutable or
+//! immutable) to these "user-data" bits. Currently, you can only add
+//! nodes or edges to the graph. You cannot remove or modify them once
+//! added. This could be changed if we have a need.
+//!
+//! # Implementation details
+//!
+//! The main tricky thing about this code is the way that edges are
+//! stored. The edges are stored in a central array, but they are also
+//! threaded onto two linked lists for each node, one for incoming edges
+//! and one for outgoing edges. Note that every edge is a member of some
+//! incoming list and some outgoing list. Basically you can load the
+//! first index of the linked list from the node data structures (the
+//! field `first_edge`) and then, for each edge, load the next index from
+//! the field `next_edge`). Each of those fields is an array that should
+//! be indexed by the direction (see the type `Direction`).
+
+use crate::snapshot_vec::{SnapshotVec, SnapshotVecDelegate};
+use rustc_index::bit_set::BitSet;
+use std::fmt::Debug;
+
+#[cfg(test)]
+mod tests;
+
+pub struct Graph<N, E> {
+ nodes: SnapshotVec<Node<N>>,
+ edges: SnapshotVec<Edge<E>>,
+}
+
+pub struct Node<N> {
+ first_edge: [EdgeIndex; 2], // see module comment
+ pub data: N,
+}
+
+#[derive(Debug)]
+pub struct Edge<E> {
+ next_edge: [EdgeIndex; 2], // see module comment
+ source: NodeIndex,
+ target: NodeIndex,
+ pub data: E,
+}
+
+impl<N> SnapshotVecDelegate for Node<N> {
+ type Value = Node<N>;
+ type Undo = ();
+
+ fn reverse(_: &mut Vec<Node<N>>, _: ()) {}
+}
+
+impl<N> SnapshotVecDelegate for Edge<N> {
+ type Value = Edge<N>;
+ type Undo = ();
+
+ fn reverse(_: &mut Vec<Edge<N>>, _: ()) {}
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub struct NodeIndex(pub usize);
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub struct EdgeIndex(pub usize);
+
+pub const INVALID_EDGE_INDEX: EdgeIndex = EdgeIndex(usize::MAX);
+
+// Use a private field here to guarantee no more instances are created:
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub struct Direction {
+ repr: usize,
+}
+
+pub const OUTGOING: Direction = Direction { repr: 0 };
+
+pub const INCOMING: Direction = Direction { repr: 1 };
+
+impl NodeIndex {
+ /// Returns unique ID (unique with respect to the graph holding associated node).
+ pub fn node_id(self) -> usize {
+ self.0
+ }
+}
+
+impl<N: Debug, E: Debug> Graph<N, E> {
+ pub fn new() -> Graph<N, E> {
+ Graph { nodes: SnapshotVec::new(), edges: SnapshotVec::new() }
+ }
+
+ pub fn with_capacity(nodes: usize, edges: usize) -> Graph<N, E> {
+ Graph { nodes: SnapshotVec::with_capacity(nodes), edges: SnapshotVec::with_capacity(edges) }
+ }
+
+ // # Simple accessors
+
+ #[inline]
+ pub fn all_nodes(&self) -> &[Node<N>] {
+ &self.nodes
+ }
+
+ #[inline]
+ pub fn len_nodes(&self) -> usize {
+ self.nodes.len()
+ }
+
+ #[inline]
+ pub fn all_edges(&self) -> &[Edge<E>] {
+ &self.edges
+ }
+
+ #[inline]
+ pub fn len_edges(&self) -> usize {
+ self.edges.len()
+ }
+
+ // # Node construction
+
+ pub fn next_node_index(&self) -> NodeIndex {
+ NodeIndex(self.nodes.len())
+ }
+
+ pub fn add_node(&mut self, data: N) -> NodeIndex {
+ let idx = self.next_node_index();
+ self.nodes.push(Node { first_edge: [INVALID_EDGE_INDEX, INVALID_EDGE_INDEX], data });
+ idx
+ }
+
+ pub fn mut_node_data(&mut self, idx: NodeIndex) -> &mut N {
+ &mut self.nodes[idx.0].data
+ }
+
+ pub fn node_data(&self, idx: NodeIndex) -> &N {
+ &self.nodes[idx.0].data
+ }
+
+ pub fn node(&self, idx: NodeIndex) -> &Node<N> {
+ &self.nodes[idx.0]
+ }
+
+ // # Edge construction and queries
+
+ pub fn next_edge_index(&self) -> EdgeIndex {
+ EdgeIndex(self.edges.len())
+ }
+
+ pub fn add_edge(&mut self, source: NodeIndex, target: NodeIndex, data: E) -> EdgeIndex {
+ debug!("graph: add_edge({:?}, {:?}, {:?})", source, target, data);
+
+ let idx = self.next_edge_index();
+
+ // read current first of the list of edges from each node
+ let source_first = self.nodes[source.0].first_edge[OUTGOING.repr];
+ let target_first = self.nodes[target.0].first_edge[INCOMING.repr];
+
+ // create the new edge, with the previous firsts from each node
+ // as the next pointers
+ self.edges.push(Edge { next_edge: [source_first, target_first], source, target, data });
+
+ // adjust the firsts for each node target be the next object.
+ self.nodes[source.0].first_edge[OUTGOING.repr] = idx;
+ self.nodes[target.0].first_edge[INCOMING.repr] = idx;
+
+ idx
+ }
+
+ pub fn edge(&self, idx: EdgeIndex) -> &Edge<E> {
+ &self.edges[idx.0]
+ }
+
+ // # Iterating over nodes, edges
+
+ pub fn enumerated_nodes(&self) -> impl Iterator<Item = (NodeIndex, &Node<N>)> {
+ self.nodes.iter().enumerate().map(|(idx, n)| (NodeIndex(idx), n))
+ }
+
+ pub fn enumerated_edges(&self) -> impl Iterator<Item = (EdgeIndex, &Edge<E>)> {
+ self.edges.iter().enumerate().map(|(idx, e)| (EdgeIndex(idx), e))
+ }
+
+ pub fn each_node<'a>(&'a self, mut f: impl FnMut(NodeIndex, &'a Node<N>) -> bool) -> bool {
+ //! Iterates over all edges defined in the graph.
+ self.enumerated_nodes().all(|(node_idx, node)| f(node_idx, node))
+ }
+
+ pub fn each_edge<'a>(&'a self, mut f: impl FnMut(EdgeIndex, &'a Edge<E>) -> bool) -> bool {
+ //! Iterates over all edges defined in the graph
+ self.enumerated_edges().all(|(edge_idx, edge)| f(edge_idx, edge))
+ }
+
+ pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges<'_, N, E> {
+ self.adjacent_edges(source, OUTGOING)
+ }
+
+ pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges<'_, N, E> {
+ self.adjacent_edges(source, INCOMING)
+ }
+
+ pub fn adjacent_edges(
+ &self,
+ source: NodeIndex,
+ direction: Direction,
+ ) -> AdjacentEdges<'_, N, E> {
+ let first_edge = self.node(source).first_edge[direction.repr];
+ AdjacentEdges { graph: self, direction, next: first_edge }
+ }
+
+ pub fn successor_nodes<'a>(
+ &'a self,
+ source: NodeIndex,
+ ) -> impl Iterator<Item = NodeIndex> + 'a {
+ self.outgoing_edges(source).targets()
+ }
+
+ pub fn predecessor_nodes<'a>(
+ &'a self,
+ target: NodeIndex,
+ ) -> impl Iterator<Item = NodeIndex> + 'a {
+ self.incoming_edges(target).sources()
+ }
+
+ pub fn depth_traverse(
+ &self,
+ start: NodeIndex,
+ direction: Direction,
+ ) -> DepthFirstTraversal<'_, N, E> {
+ DepthFirstTraversal::with_start_node(self, start, direction)
+ }
+
+ pub fn nodes_in_postorder(
+ &self,
+ direction: Direction,
+ entry_node: NodeIndex,
+ ) -> Vec<NodeIndex> {
+ let mut visited = BitSet::new_empty(self.len_nodes());
+ let mut stack = vec![];
+ let mut result = Vec::with_capacity(self.len_nodes());
+ let mut push_node = |stack: &mut Vec<_>, node: NodeIndex| {
+ if visited.insert(node.0) {
+ stack.push((node, self.adjacent_edges(node, direction)));
+ }
+ };
+
+ for node in
+ Some(entry_node).into_iter().chain(self.enumerated_nodes().map(|(node, _)| node))
+ {
+ push_node(&mut stack, node);
+ while let Some((node, mut iter)) = stack.pop() {
+ if let Some((_, child)) = iter.next() {
+ let target = child.source_or_target(direction);
+ // the current node needs more processing, so
+ // add it back to the stack
+ stack.push((node, iter));
+ // and then push the new node
+ push_node(&mut stack, target);
+ } else {
+ result.push(node);
+ }
+ }
+ }
+
+ assert_eq!(result.len(), self.len_nodes());
+ result
+ }
+}
+
+// # Iterators
+
+pub struct AdjacentEdges<'g, N, E> {
+ graph: &'g Graph<N, E>,
+ direction: Direction,
+ next: EdgeIndex,
+}
+
+impl<'g, N: Debug, E: Debug> AdjacentEdges<'g, N, E> {
+ fn targets(self) -> impl Iterator<Item = NodeIndex> + 'g {
+ self.map(|(_, edge)| edge.target)
+ }
+
+ fn sources(self) -> impl Iterator<Item = NodeIndex> + 'g {
+ self.map(|(_, edge)| edge.source)
+ }
+}
+
+impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> {
+ type Item = (EdgeIndex, &'g Edge<E>);
+
+ fn next(&mut self) -> Option<(EdgeIndex, &'g Edge<E>)> {
+ let edge_index = self.next;
+ if edge_index == INVALID_EDGE_INDEX {
+ return None;
+ }
+
+ let edge = self.graph.edge(edge_index);
+ self.next = edge.next_edge[self.direction.repr];
+ Some((edge_index, edge))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // At most, all the edges in the graph.
+ (0, Some(self.graph.len_edges()))
+ }
+}
+
+pub struct DepthFirstTraversal<'g, N, E> {
+ graph: &'g Graph<N, E>,
+ stack: Vec<NodeIndex>,
+ visited: BitSet<usize>,
+ direction: Direction,
+}
+
+impl<'g, N: Debug, E: Debug> DepthFirstTraversal<'g, N, E> {
+ pub fn with_start_node(
+ graph: &'g Graph<N, E>,
+ start_node: NodeIndex,
+ direction: Direction,
+ ) -> Self {
+ let mut visited = BitSet::new_empty(graph.len_nodes());
+ visited.insert(start_node.node_id());
+ DepthFirstTraversal { graph, stack: vec![start_node], visited, direction }
+ }
+
+ fn visit(&mut self, node: NodeIndex) {
+ if self.visited.insert(node.node_id()) {
+ self.stack.push(node);
+ }
+ }
+}
+
+impl<'g, N: Debug, E: Debug> Iterator for DepthFirstTraversal<'g, N, E> {
+ type Item = NodeIndex;
+
+ fn next(&mut self) -> Option<NodeIndex> {
+ let next = self.stack.pop();
+ if let Some(idx) = next {
+ for (_, edge) in self.graph.adjacent_edges(idx, self.direction) {
+ let target = edge.source_or_target(self.direction);
+ self.visit(target);
+ }
+ }
+ next
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // We will visit every node in the graph exactly once.
+ let remaining = self.graph.len_nodes() - self.visited.count();
+ (remaining, Some(remaining))
+ }
+}
+
+impl<'g, N: Debug, E: Debug> ExactSizeIterator for DepthFirstTraversal<'g, N, E> {}
+
+impl<E> Edge<E> {
+ pub fn source(&self) -> NodeIndex {
+ self.source
+ }
+
+ pub fn target(&self) -> NodeIndex {
+ self.target
+ }
+
+ pub fn source_or_target(&self, direction: Direction) -> NodeIndex {
+ if direction == OUTGOING { self.target } else { self.source }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/graph/implementation/tests.rs b/compiler/rustc_data_structures/src/graph/implementation/tests.rs
new file mode 100644
index 000000000..e4e4d0d44
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/implementation/tests.rs
@@ -0,0 +1,131 @@
+use crate::graph::implementation::*;
+use std::fmt::Debug;
+
+type TestGraph = Graph<&'static str, &'static str>;
+
+fn create_graph() -> TestGraph {
+ let mut graph = Graph::new();
+
+ // Create a simple graph
+ //
+ // F
+ // |
+ // V
+ // A --> B --> C
+ // | ^
+ // v |
+ // D --> E
+
+ let a = graph.add_node("A");
+ let b = graph.add_node("B");
+ let c = graph.add_node("C");
+ let d = graph.add_node("D");
+ let e = graph.add_node("E");
+ let f = graph.add_node("F");
+
+ graph.add_edge(a, b, "AB");
+ graph.add_edge(b, c, "BC");
+ graph.add_edge(b, d, "BD");
+ graph.add_edge(d, e, "DE");
+ graph.add_edge(e, c, "EC");
+ graph.add_edge(f, b, "FB");
+
+ return graph;
+}
+
+#[test]
+fn each_node() {
+ let graph = create_graph();
+ let expected = ["A", "B", "C", "D", "E", "F"];
+ graph.each_node(|idx, node| {
+ assert_eq!(&expected[idx.0], graph.node_data(idx));
+ assert_eq!(expected[idx.0], node.data);
+ true
+ });
+}
+
+#[test]
+fn each_edge() {
+ let graph = create_graph();
+ let expected = ["AB", "BC", "BD", "DE", "EC", "FB"];
+ graph.each_edge(|idx, edge| {
+ assert_eq!(expected[idx.0], edge.data);
+ true
+ });
+}
+
+fn test_adjacent_edges<N: PartialEq + Debug, E: PartialEq + Debug>(
+ graph: &Graph<N, E>,
+ start_index: NodeIndex,
+ start_data: N,
+ expected_incoming: &[(E, N)],
+ expected_outgoing: &[(E, N)],
+) {
+ assert!(graph.node_data(start_index) == &start_data);
+
+ let mut counter = 0;
+ for (edge_index, edge) in graph.incoming_edges(start_index) {
+ assert!(counter < expected_incoming.len());
+ debug!(
+ "counter={:?} expected={:?} edge_index={:?} edge={:?}",
+ counter, expected_incoming[counter], edge_index, edge
+ );
+ match expected_incoming[counter] {
+ (ref e, ref n) => {
+ assert!(e == &edge.data);
+ assert!(n == graph.node_data(edge.source()));
+ assert!(start_index == edge.target);
+ }
+ }
+ counter += 1;
+ }
+ assert_eq!(counter, expected_incoming.len());
+
+ let mut counter = 0;
+ for (edge_index, edge) in graph.outgoing_edges(start_index) {
+ assert!(counter < expected_outgoing.len());
+ debug!(
+ "counter={:?} expected={:?} edge_index={:?} edge={:?}",
+ counter, expected_outgoing[counter], edge_index, edge
+ );
+ match expected_outgoing[counter] {
+ (ref e, ref n) => {
+ assert!(e == &edge.data);
+ assert!(start_index == edge.source);
+ assert!(n == graph.node_data(edge.target));
+ }
+ }
+ counter += 1;
+ }
+ assert_eq!(counter, expected_outgoing.len());
+}
+
+#[test]
+fn each_adjacent_from_a() {
+ let graph = create_graph();
+ test_adjacent_edges(&graph, NodeIndex(0), "A", &[], &[("AB", "B")]);
+}
+
+#[test]
+fn each_adjacent_from_b() {
+ let graph = create_graph();
+ test_adjacent_edges(
+ &graph,
+ NodeIndex(1),
+ "B",
+ &[("FB", "F"), ("AB", "A")],
+ &[("BD", "D"), ("BC", "C")],
+ );
+}
+
+#[test]
+fn each_adjacent_from_c() {
+ let graph = create_graph();
+ test_adjacent_edges(&graph, NodeIndex(2), "C", &[("EC", "E"), ("BC", "B")], &[]);
+}
+
+#[test]
+fn each_adjacent_from_d() {
+ let graph = create_graph();
+ test_adjacent_edges(&graph, NodeIndex(3), "D", &[("BD", "B")], &[("DE", "E")]);
+}
diff --git a/compiler/rustc_data_structures/src/graph/iterate/mod.rs b/compiler/rustc_data_structures/src/graph/iterate/mod.rs
new file mode 100644
index 000000000..57007611a
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/iterate/mod.rs
@@ -0,0 +1,353 @@
+use super::{DirectedGraph, WithNumNodes, WithStartNode, WithSuccessors};
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use std::ops::ControlFlow;
+
+#[cfg(test)]
+mod tests;
+
+pub fn post_order_from<G: DirectedGraph + WithSuccessors + WithNumNodes>(
+ graph: &G,
+ start_node: G::Node,
+) -> Vec<G::Node> {
+ post_order_from_to(graph, start_node, None)
+}
+
+pub fn post_order_from_to<G: DirectedGraph + WithSuccessors + WithNumNodes>(
+ graph: &G,
+ start_node: G::Node,
+ end_node: Option<G::Node>,
+) -> Vec<G::Node> {
+ let mut visited: IndexVec<G::Node, bool> = IndexVec::from_elem_n(false, graph.num_nodes());
+ let mut result: Vec<G::Node> = Vec::with_capacity(graph.num_nodes());
+ if let Some(end_node) = end_node {
+ visited[end_node] = true;
+ }
+ post_order_walk(graph, start_node, &mut result, &mut visited);
+ result
+}
+
+fn post_order_walk<G: DirectedGraph + WithSuccessors + WithNumNodes>(
+ graph: &G,
+ node: G::Node,
+ result: &mut Vec<G::Node>,
+ visited: &mut IndexVec<G::Node, bool>,
+) {
+ struct PostOrderFrame<Node, Iter> {
+ node: Node,
+ iter: Iter,
+ }
+
+ if visited[node] {
+ return;
+ }
+
+ let mut stack = vec![PostOrderFrame { node, iter: graph.successors(node) }];
+
+ 'recurse: while let Some(frame) = stack.last_mut() {
+ let node = frame.node;
+ visited[node] = true;
+
+ while let Some(successor) = frame.iter.next() {
+ if !visited[successor] {
+ stack.push(PostOrderFrame { node: successor, iter: graph.successors(successor) });
+ continue 'recurse;
+ }
+ }
+
+ let _ = stack.pop();
+ result.push(node);
+ }
+}
+
+pub fn reverse_post_order<G: DirectedGraph + WithSuccessors + WithNumNodes>(
+ graph: &G,
+ start_node: G::Node,
+) -> Vec<G::Node> {
+ let mut vec = post_order_from(graph, start_node);
+ vec.reverse();
+ vec
+}
+
+/// A "depth-first search" iterator for a directed graph.
+pub struct DepthFirstSearch<'graph, G>
+where
+ G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+ graph: &'graph G,
+ stack: Vec<G::Node>,
+ visited: BitSet<G::Node>,
+}
+
+impl<'graph, G> DepthFirstSearch<'graph, G>
+where
+ G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+ pub fn new(graph: &'graph G) -> Self {
+ Self { graph, stack: vec![], visited: BitSet::new_empty(graph.num_nodes()) }
+ }
+
+ /// Version of `push_start_node` that is convenient for chained
+ /// use.
+ pub fn with_start_node(mut self, start_node: G::Node) -> Self {
+ self.push_start_node(start_node);
+ self
+ }
+
+ /// Pushes another start node onto the stack. If the node
+ /// has not already been visited, then you will be able to
+ /// walk its successors (and so forth) after the current
+ /// contents of the stack are drained. If multiple start nodes
+ /// are added into the walk, then their mutual successors
+ /// will all be walked. You can use this method once the
+ /// iterator has been completely drained to add additional
+ /// start nodes.
+ pub fn push_start_node(&mut self, start_node: G::Node) {
+ if self.visited.insert(start_node) {
+ self.stack.push(start_node);
+ }
+ }
+
+ /// Searches all nodes reachable from the current start nodes.
+ /// This is equivalent to just invoke `next` repeatedly until
+ /// you get a `None` result.
+ pub fn complete_search(&mut self) {
+ while let Some(_) = self.next() {}
+ }
+
+ /// Returns true if node has been visited thus far.
+ /// A node is considered "visited" once it is pushed
+ /// onto the internal stack; it may not yet have been yielded
+ /// from the iterator. This method is best used after
+ /// the iterator is completely drained.
+ pub fn visited(&self, node: G::Node) -> bool {
+ self.visited.contains(node)
+ }
+}
+
+impl<G> std::fmt::Debug for DepthFirstSearch<'_, G>
+where
+ G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let mut f = fmt.debug_set();
+ for n in self.visited.iter() {
+ f.entry(&n);
+ }
+ f.finish()
+ }
+}
+
+impl<G> Iterator for DepthFirstSearch<'_, G>
+where
+ G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+ type Item = G::Node;
+
+ fn next(&mut self) -> Option<G::Node> {
+ let DepthFirstSearch { stack, visited, graph } = self;
+ let n = stack.pop()?;
+ stack.extend(graph.successors(n).filter(|&m| visited.insert(m)));
+ Some(n)
+ }
+}
+
+/// The status of a node in the depth-first search.
+///
+/// See the documentation of `TriColorDepthFirstSearch` to see how a node's status is updated
+/// during DFS.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum NodeStatus {
+ /// This node has been examined by the depth-first search but is not yet `Settled`.
+ ///
+ /// Also referred to as "gray" or "discovered" nodes in [CLR].
+ ///
+ /// [CLR]: https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+ Visited,
+
+ /// This node and all nodes reachable from it have been examined by the depth-first search.
+ ///
+ /// Also referred to as "black" or "finished" nodes in [CLR].
+ ///
+ /// [CLR]: https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+ Settled,
+}
+
+struct Event<N> {
+ node: N,
+ becomes: NodeStatus,
+}
+
+/// A depth-first search that also tracks when all successors of a node have been examined.
+///
+/// This is based on the DFS described in [Introduction to Algorithms (1st ed.)][CLR], hereby
+/// referred to as **CLR**. However, we use the terminology in [`NodeStatus`] above instead of
+/// "discovered"/"finished" or "white"/"grey"/"black". Each node begins the search with no status,
+/// becomes `Visited` when it is first examined by the DFS and is `Settled` when all nodes
+/// reachable from it have been examined. This allows us to differentiate between "tree", "back"
+/// and "forward" edges (see [`TriColorVisitor::node_examined`]).
+///
+/// Unlike the pseudocode in [CLR], this implementation is iterative and does not use timestamps.
+/// We accomplish this by storing `Event`s on the stack that result in a (possible) state change
+/// for each node. A `Visited` event signifies that we should examine this node if it has not yet
+/// been `Visited` or `Settled`. When a node is examined for the first time, we mark it as
+/// `Visited` and push a `Settled` event for it on stack followed by `Visited` events for all of
+/// its predecessors, scheduling them for examination. Multiple `Visited` events for a single node
+/// may exist on the stack simultaneously if a node has multiple predecessors, but only one
+/// `Settled` event will ever be created for each node. After all `Visited` events for a node's
+/// successors have been popped off the stack (as well as any new events triggered by visiting
+/// those successors), we will pop off that node's `Settled` event.
+///
+/// [CLR]: https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+pub struct TriColorDepthFirstSearch<'graph, G>
+where
+ G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+ graph: &'graph G,
+ stack: Vec<Event<G::Node>>,
+ visited: BitSet<G::Node>,
+ settled: BitSet<G::Node>,
+}
+
+impl<'graph, G> TriColorDepthFirstSearch<'graph, G>
+where
+ G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors,
+{
+ pub fn new(graph: &'graph G) -> Self {
+ TriColorDepthFirstSearch {
+ graph,
+ stack: vec![],
+ visited: BitSet::new_empty(graph.num_nodes()),
+ settled: BitSet::new_empty(graph.num_nodes()),
+ }
+ }
+
+ /// Performs a depth-first search, starting from the given `root`.
+ ///
+ /// This won't visit nodes that are not reachable from `root`.
+ pub fn run_from<V>(mut self, root: G::Node, visitor: &mut V) -> Option<V::BreakVal>
+ where
+ V: TriColorVisitor<G>,
+ {
+ use NodeStatus::{Settled, Visited};
+
+ self.stack.push(Event { node: root, becomes: Visited });
+
+ loop {
+ match self.stack.pop()? {
+ Event { node, becomes: Settled } => {
+ let not_previously_settled = self.settled.insert(node);
+ assert!(not_previously_settled, "A node should be settled exactly once");
+ if let ControlFlow::Break(val) = visitor.node_settled(node) {
+ return Some(val);
+ }
+ }
+
+ Event { node, becomes: Visited } => {
+ let not_previously_visited = self.visited.insert(node);
+ let prior_status = if not_previously_visited {
+ None
+ } else if self.settled.contains(node) {
+ Some(Settled)
+ } else {
+ Some(Visited)
+ };
+
+ if let ControlFlow::Break(val) = visitor.node_examined(node, prior_status) {
+ return Some(val);
+ }
+
+ // If this node has already been examined, we are done.
+ if prior_status.is_some() {
+ continue;
+ }
+
+ // Otherwise, push a `Settled` event for this node onto the stack, then
+ // schedule its successors for examination.
+ self.stack.push(Event { node, becomes: Settled });
+ for succ in self.graph.successors(node) {
+ if !visitor.ignore_edge(node, succ) {
+ self.stack.push(Event { node: succ, becomes: Visited });
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<G> TriColorDepthFirstSearch<'_, G>
+where
+ G: ?Sized + DirectedGraph + WithNumNodes + WithSuccessors + WithStartNode,
+{
+ /// Performs a depth-first search, starting from `G::start_node()`.
+ ///
+ /// This won't visit nodes that are not reachable from the start node.
+ pub fn run_from_start<V>(self, visitor: &mut V) -> Option<V::BreakVal>
+ where
+ V: TriColorVisitor<G>,
+ {
+ let root = self.graph.start_node();
+ self.run_from(root, visitor)
+ }
+}
+
+/// What to do when a node is examined or becomes `Settled` during DFS.
+pub trait TriColorVisitor<G>
+where
+ G: ?Sized + DirectedGraph,
+{
+ /// The value returned by this search.
+ type BreakVal;
+
+ /// Called when a node is examined by the depth-first search.
+ ///
+ /// By checking the value of `prior_status`, this visitor can determine whether the edge
+ /// leading to this node was a tree edge (`None`), forward edge (`Some(Settled)`) or back edge
+ /// (`Some(Visited)`). For a full explanation of each edge type, see the "Depth-first Search"
+ /// chapter in [CLR] or [wikipedia].
+ ///
+ /// If you want to know *both* nodes linked by each edge, you'll need to modify
+ /// `TriColorDepthFirstSearch` to store a `source` node for each `Visited` event.
+ ///
+ /// [wikipedia]: https://en.wikipedia.org/wiki/Depth-first_search#Output_of_a_depth-first_search
+ /// [CLR]: https://en.wikipedia.org/wiki/Introduction_to_Algorithms
+ fn node_examined(
+ &mut self,
+ _node: G::Node,
+ _prior_status: Option<NodeStatus>,
+ ) -> ControlFlow<Self::BreakVal> {
+ ControlFlow::CONTINUE
+ }
+
+ /// Called after all nodes reachable from this one have been examined.
+ fn node_settled(&mut self, _node: G::Node) -> ControlFlow<Self::BreakVal> {
+ ControlFlow::CONTINUE
+ }
+
+ /// Behave as if no edges exist from `source` to `target`.
+ fn ignore_edge(&mut self, _source: G::Node, _target: G::Node) -> bool {
+ false
+ }
+}
+
+/// This `TriColorVisitor` looks for back edges in a graph, which indicate that a cycle exists.
+pub struct CycleDetector;
+
+impl<G> TriColorVisitor<G> for CycleDetector
+where
+ G: ?Sized + DirectedGraph,
+{
+ type BreakVal = ();
+
+ fn node_examined(
+ &mut self,
+ _node: G::Node,
+ prior_status: Option<NodeStatus>,
+ ) -> ControlFlow<Self::BreakVal> {
+ match prior_status {
+ Some(NodeStatus::Visited) => ControlFlow::BREAK,
+ _ => ControlFlow::CONTINUE,
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/graph/iterate/tests.rs b/compiler/rustc_data_structures/src/graph/iterate/tests.rs
new file mode 100644
index 000000000..c498c2893
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/iterate/tests.rs
@@ -0,0 +1,38 @@
+use super::super::tests::TestGraph;
+
+use super::*;
+
+#[test]
+fn diamond_post_order() {
+ let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
+
+ let result = post_order_from(&graph, 0);
+ assert_eq!(result, vec![3, 1, 2, 0]);
+}
+
+#[test]
+fn is_cyclic() {
+ use super::super::is_cyclic;
+
+ let diamond_acyclic = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
+ let diamond_cyclic = TestGraph::new(0, &[(0, 1), (1, 2), (2, 3), (3, 0)]);
+
+ assert!(!is_cyclic(&diamond_acyclic));
+ assert!(is_cyclic(&diamond_cyclic));
+}
+
+#[test]
+fn dfs() {
+ let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3), (3, 0)]);
+
+ let result: Vec<usize> = DepthFirstSearch::new(&graph).with_start_node(0).collect();
+ assert_eq!(result, vec![0, 2, 3, 1]);
+}
+
+#[test]
+fn dfs_debug() {
+ let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3), (3, 0)]);
+ let mut dfs = DepthFirstSearch::new(&graph).with_start_node(0);
+ dfs.complete_search();
+ assert_eq!(format!("{{0, 1, 2, 3}}"), format!("{:?}", dfs));
+}
diff --git a/compiler/rustc_data_structures/src/graph/mod.rs b/compiler/rustc_data_structures/src/graph/mod.rs
new file mode 100644
index 000000000..3560df6e5
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/mod.rs
@@ -0,0 +1,81 @@
+use rustc_index::vec::Idx;
+
+pub mod dominators;
+pub mod implementation;
+pub mod iterate;
+mod reference;
+pub mod scc;
+pub mod vec_graph;
+
+#[cfg(test)]
+mod tests;
+
+pub trait DirectedGraph {
+ type Node: Idx;
+}
+
+pub trait WithNumNodes: DirectedGraph {
+ fn num_nodes(&self) -> usize;
+}
+
+pub trait WithNumEdges: DirectedGraph {
+ fn num_edges(&self) -> usize;
+}
+
+pub trait WithSuccessors: DirectedGraph
+where
+ Self: for<'graph> GraphSuccessors<'graph, Item = <Self as DirectedGraph>::Node>,
+{
+ fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter;
+
+ fn depth_first_search(&self, from: Self::Node) -> iterate::DepthFirstSearch<'_, Self>
+ where
+ Self: WithNumNodes,
+ {
+ iterate::DepthFirstSearch::new(self).with_start_node(from)
+ }
+}
+
+#[allow(unused_lifetimes)]
+pub trait GraphSuccessors<'graph> {
+ type Item;
+ type Iter: Iterator<Item = Self::Item>;
+}
+
+pub trait WithPredecessors: DirectedGraph
+where
+ Self: for<'graph> GraphPredecessors<'graph, Item = <Self as DirectedGraph>::Node>,
+{
+ fn predecessors(&self, node: Self::Node) -> <Self as GraphPredecessors<'_>>::Iter;
+}
+
+#[allow(unused_lifetimes)]
+pub trait GraphPredecessors<'graph> {
+ type Item;
+ type Iter: Iterator<Item = Self::Item>;
+}
+
+pub trait WithStartNode: DirectedGraph {
+ fn start_node(&self) -> Self::Node;
+}
+
+pub trait ControlFlowGraph:
+ DirectedGraph + WithStartNode + WithPredecessors + WithSuccessors + WithNumNodes
+{
+ // convenient trait
+}
+
+impl<T> ControlFlowGraph for T where
+ T: DirectedGraph + WithStartNode + WithPredecessors + WithSuccessors + WithNumNodes
+{
+}
+
+/// Returns `true` if the graph has a cycle that is reachable from the start node.
+pub fn is_cyclic<G>(graph: &G) -> bool
+where
+ G: ?Sized + DirectedGraph + WithStartNode + WithSuccessors + WithNumNodes,
+{
+ iterate::TriColorDepthFirstSearch::new(graph)
+ .run_from_start(&mut iterate::CycleDetector)
+ .is_some()
+}
diff --git a/compiler/rustc_data_structures/src/graph/reference.rs b/compiler/rustc_data_structures/src/graph/reference.rs
new file mode 100644
index 000000000..c259fe56c
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/reference.rs
@@ -0,0 +1,39 @@
+use super::*;
+
+impl<'graph, G: DirectedGraph> DirectedGraph for &'graph G {
+ type Node = G::Node;
+}
+
+impl<'graph, G: WithNumNodes> WithNumNodes for &'graph G {
+ fn num_nodes(&self) -> usize {
+ (**self).num_nodes()
+ }
+}
+
+impl<'graph, G: WithStartNode> WithStartNode for &'graph G {
+ fn start_node(&self) -> Self::Node {
+ (**self).start_node()
+ }
+}
+
+impl<'graph, G: WithSuccessors> WithSuccessors for &'graph G {
+ fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
+ (**self).successors(node)
+ }
+}
+
+impl<'graph, G: WithPredecessors> WithPredecessors for &'graph G {
+ fn predecessors(&self, node: Self::Node) -> <Self as GraphPredecessors<'_>>::Iter {
+ (**self).predecessors(node)
+ }
+}
+
+impl<'iter, 'graph, G: WithPredecessors> GraphPredecessors<'iter> for &'graph G {
+ type Item = G::Node;
+ type Iter = <G as GraphPredecessors<'iter>>::Iter;
+}
+
+impl<'iter, 'graph, G: WithSuccessors> GraphSuccessors<'iter> for &'graph G {
+ type Item = G::Node;
+ type Iter = <G as GraphSuccessors<'iter>>::Iter;
+}
diff --git a/compiler/rustc_data_structures/src/graph/scc/mod.rs b/compiler/rustc_data_structures/src/graph/scc/mod.rs
new file mode 100644
index 000000000..7099ca7eb
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/scc/mod.rs
@@ -0,0 +1,567 @@
+//! Routine to compute the strongly connected components (SCCs) of a graph.
+//!
+//! Also computes as the resulting DAG if each SCC is replaced with a
+//! node in the graph. This uses [Tarjan's algorithm](
+//! https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm)
+//! that completes in *O*(*n*) time.
+
+use crate::fx::FxHashSet;
+use crate::graph::vec_graph::VecGraph;
+use crate::graph::{DirectedGraph, GraphSuccessors, WithNumEdges, WithNumNodes, WithSuccessors};
+use rustc_index::vec::{Idx, IndexVec};
+use std::cmp::Ord;
+use std::ops::Range;
+
+#[cfg(test)]
+mod tests;
+
+/// Strongly connected components (SCC) of a graph. The type `N` is
+/// the index type for the graph nodes and `S` is the index type for
+/// the SCCs. We can map from each node to the SCC that it
+/// participates in, and we also have the successors of each SCC.
+pub struct Sccs<N: Idx, S: Idx> {
+ /// For each node, what is the SCC index of the SCC to which it
+ /// belongs.
+ scc_indices: IndexVec<N, S>,
+
+ /// Data about each SCC.
+ scc_data: SccData<S>,
+}
+
+struct SccData<S: Idx> {
+ /// For each SCC, the range of `all_successors` where its
+ /// successors can be found.
+ ranges: IndexVec<S, Range<usize>>,
+
+ /// Contains the successors for all the Sccs, concatenated. The
+ /// range of indices corresponding to a given SCC is found in its
+ /// SccData.
+ all_successors: Vec<S>,
+}
+
+impl<N: Idx, S: Idx + Ord> Sccs<N, S> {
+ pub fn new(graph: &(impl DirectedGraph<Node = N> + WithNumNodes + WithSuccessors)) -> Self {
+ SccsConstruction::construct(graph)
+ }
+
+ /// Returns the number of SCCs in the graph.
+ pub fn num_sccs(&self) -> usize {
+ self.scc_data.len()
+ }
+
+ /// Returns an iterator over the SCCs in the graph.
+ ///
+ /// The SCCs will be iterated in **dependency order** (or **post order**),
+ /// meaning that if `S1 -> S2`, we will visit `S2` first and `S1` after.
+ /// This is convenient when the edges represent dependencies: when you visit
+ /// `S1`, the value for `S2` will already have been computed.
+ pub fn all_sccs(&self) -> impl Iterator<Item = S> {
+ (0..self.scc_data.len()).map(S::new)
+ }
+
+ /// Returns the SCC to which a node `r` belongs.
+ pub fn scc(&self, r: N) -> S {
+ self.scc_indices[r]
+ }
+
+ /// Returns the successors of the given SCC.
+ pub fn successors(&self, scc: S) -> &[S] {
+ self.scc_data.successors(scc)
+ }
+
+ /// Construct the reverse graph of the SCC graph.
+ pub fn reverse(&self) -> VecGraph<S> {
+ VecGraph::new(
+ self.num_sccs(),
+ self.all_sccs()
+ .flat_map(|source| {
+ self.successors(source).iter().map(move |&target| (target, source))
+ })
+ .collect(),
+ )
+ }
+}
+
+impl<N: Idx, S: Idx> DirectedGraph for Sccs<N, S> {
+ type Node = S;
+}
+
+impl<N: Idx, S: Idx + Ord> WithNumNodes for Sccs<N, S> {
+ fn num_nodes(&self) -> usize {
+ self.num_sccs()
+ }
+}
+
+impl<N: Idx, S: Idx> WithNumEdges for Sccs<N, S> {
+ fn num_edges(&self) -> usize {
+ self.scc_data.all_successors.len()
+ }
+}
+
+impl<'graph, N: Idx, S: Idx> GraphSuccessors<'graph> for Sccs<N, S> {
+ type Item = S;
+
+ type Iter = std::iter::Cloned<std::slice::Iter<'graph, S>>;
+}
+
+impl<N: Idx, S: Idx + Ord> WithSuccessors for Sccs<N, S> {
+ fn successors(&self, node: S) -> <Self as GraphSuccessors<'_>>::Iter {
+ self.successors(node).iter().cloned()
+ }
+}
+
+impl<S: Idx> SccData<S> {
+ /// Number of SCCs,
+ fn len(&self) -> usize {
+ self.ranges.len()
+ }
+
+ /// Returns the successors of the given SCC.
+ fn successors(&self, scc: S) -> &[S] {
+ // Annoyingly, `range` does not implement `Copy`, so we have
+ // to do `range.start..range.end`:
+ let range = &self.ranges[scc];
+ &self.all_successors[range.start..range.end]
+ }
+
+ /// Creates a new SCC with `successors` as its successors and
+ /// returns the resulting index.
+ fn create_scc(&mut self, successors: impl IntoIterator<Item = S>) -> S {
+ // Store the successors on `scc_successors_vec`, remembering
+ // the range of indices.
+ let all_successors_start = self.all_successors.len();
+ self.all_successors.extend(successors);
+ let all_successors_end = self.all_successors.len();
+
+ debug!(
+ "create_scc({:?}) successors={:?}",
+ self.ranges.len(),
+ &self.all_successors[all_successors_start..all_successors_end],
+ );
+
+ self.ranges.push(all_successors_start..all_successors_end)
+ }
+}
+
+struct SccsConstruction<'c, G: DirectedGraph + WithNumNodes + WithSuccessors, S: Idx> {
+ graph: &'c G,
+
+ /// The state of each node; used during walk to record the stack
+ /// and after walk to record what cycle each node ended up being
+ /// in.
+ node_states: IndexVec<G::Node, NodeState<G::Node, S>>,
+
+ /// The stack of nodes that we are visiting as part of the DFS.
+ node_stack: Vec<G::Node>,
+
+ /// The stack of successors: as we visit a node, we mark our
+ /// position in this stack, and when we encounter a successor SCC,
+ /// we push it on the stack. When we complete an SCC, we can pop
+ /// everything off the stack that was found along the way.
+ successors_stack: Vec<S>,
+
+ /// A set used to strip duplicates. As we accumulate successors
+ /// into the successors_stack, we sometimes get duplicate entries.
+ /// We use this set to remove those -- we also keep its storage
+ /// around between successors to amortize memory allocation costs.
+ duplicate_set: FxHashSet<S>,
+
+ scc_data: SccData<S>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum NodeState<N, S> {
+ /// This node has not yet been visited as part of the DFS.
+ ///
+ /// After SCC construction is complete, this state ought to be
+ /// impossible.
+ NotVisited,
+
+ /// This node is currently being walk as part of our DFS. It is on
+ /// the stack at the depth `depth`.
+ ///
+ /// After SCC construction is complete, this state ought to be
+ /// impossible.
+ BeingVisited { depth: usize },
+
+ /// Indicates that this node is a member of the given cycle.
+ InCycle { scc_index: S },
+
+ /// Indicates that this node is a member of whatever cycle
+ /// `parent` is a member of. This state is transient: whenever we
+ /// see it, we try to overwrite it with the current state of
+ /// `parent` (this is the "path compression" step of a union-find
+ /// algorithm).
+ InCycleWith { parent: N },
+}
+
+#[derive(Copy, Clone, Debug)]
+enum WalkReturn<S> {
+ Cycle { min_depth: usize },
+ Complete { scc_index: S },
+}
+
+impl<'c, G, S> SccsConstruction<'c, G, S>
+where
+ G: DirectedGraph + WithNumNodes + WithSuccessors,
+ S: Idx,
+{
+ /// Identifies SCCs in the graph `G` and computes the resulting
+ /// DAG. This uses a variant of [Tarjan's
+ /// algorithm][wikipedia]. The high-level summary of the algorithm
+ /// is that we do a depth-first search. Along the way, we keep a
+ /// stack of each node whose successors are being visited. We
+ /// track the depth of each node on this stack (there is no depth
+ /// if the node is not on the stack). When we find that some node
+ /// N with depth D can reach some other node N' with lower depth
+ /// D' (i.e., D' < D), we know that N, N', and all nodes in
+ /// between them on the stack are part of an SCC.
+ ///
+ /// [wikipedia]: https://bit.ly/2EZIx84
+ fn construct(graph: &'c G) -> Sccs<G::Node, S> {
+ let num_nodes = graph.num_nodes();
+
+ let mut this = Self {
+ graph,
+ node_states: IndexVec::from_elem_n(NodeState::NotVisited, num_nodes),
+ node_stack: Vec::with_capacity(num_nodes),
+ successors_stack: Vec::new(),
+ scc_data: SccData { ranges: IndexVec::new(), all_successors: Vec::new() },
+ duplicate_set: FxHashSet::default(),
+ };
+
+ let scc_indices = (0..num_nodes)
+ .map(G::Node::new)
+ .map(|node| match this.start_walk_from(node) {
+ WalkReturn::Complete { scc_index } => scc_index,
+ WalkReturn::Cycle { min_depth } => panic!(
+ "`start_walk_node({:?})` returned cycle with depth {:?}",
+ node, min_depth
+ ),
+ })
+ .collect();
+
+ Sccs { scc_indices, scc_data: this.scc_data }
+ }
+
+ fn start_walk_from(&mut self, node: G::Node) -> WalkReturn<S> {
+ if let Some(result) = self.inspect_node(node) {
+ result
+ } else {
+ self.walk_unvisited_node(node)
+ }
+ }
+
+ /// Inspect a node during the DFS. We first examine its current
+ /// state -- if it is not yet visited (`NotVisited`), return `None` so
+ /// that the caller might push it onto the stack and start walking its
+ /// successors.
+ ///
+ /// If it is already on the DFS stack it will be in the state
+ /// `BeingVisited`. In that case, we have found a cycle and we
+ /// return the depth from the stack.
+ ///
+ /// Otherwise, we are looking at a node that has already been
+ /// completely visited. We therefore return `WalkReturn::Complete`
+ /// with its associated SCC index.
+ fn inspect_node(&mut self, node: G::Node) -> Option<WalkReturn<S>> {
+ Some(match self.find_state(node) {
+ NodeState::InCycle { scc_index } => WalkReturn::Complete { scc_index },
+
+ NodeState::BeingVisited { depth: min_depth } => WalkReturn::Cycle { min_depth },
+
+ NodeState::NotVisited => return None,
+
+ NodeState::InCycleWith { parent } => panic!(
+ "`find_state` returned `InCycleWith({:?})`, which ought to be impossible",
+ parent
+ ),
+ })
+ }
+
+ /// Fetches the state of the node `r`. If `r` is recorded as being
+ /// in a cycle with some other node `r2`, then fetches the state
+ /// of `r2` (and updates `r` to reflect current result). This is
+ /// basically the "find" part of a standard union-find algorithm
+ /// (with path compression).
+ fn find_state(&mut self, mut node: G::Node) -> NodeState<G::Node, S> {
+ // To avoid recursion we temporarily reuse the `parent` of each
+ // InCycleWith link to encode a downwards link while compressing
+ // the path. After we have found the root or deepest node being
+ // visited, we traverse the reverse links and correct the node
+ // states on the way.
+ //
+ // **Note**: This mutation requires that this is a leaf function
+ // or at least that none of the called functions inspects the
+ // current node states. Luckily, we are a leaf.
+
+ // Remember one previous link. The termination condition when
+ // following links downwards is then simply as soon as we have
+ // found the initial self-loop.
+ let mut previous_node = node;
+
+ // Ultimately assigned by the parent when following
+ // `InCycleWith` upwards.
+ let node_state = loop {
+ debug!("find_state(r = {:?} in state {:?})", node, self.node_states[node]);
+ match self.node_states[node] {
+ NodeState::InCycle { scc_index } => break NodeState::InCycle { scc_index },
+ NodeState::BeingVisited { depth } => break NodeState::BeingVisited { depth },
+ NodeState::NotVisited => break NodeState::NotVisited,
+ NodeState::InCycleWith { parent } => {
+ // We test this, to be extremely sure that we never
+ // ever break our termination condition for the
+ // reverse iteration loop.
+ assert!(node != parent, "Node can not be in cycle with itself");
+ // Store the previous node as an inverted list link
+ self.node_states[node] = NodeState::InCycleWith { parent: previous_node };
+ // Update to parent node.
+ previous_node = node;
+ node = parent;
+ }
+ }
+ };
+
+ // The states form a graph where up to one outgoing link is stored at
+ // each node. Initially in general,
+ //
+ // E
+ // ^
+ // |
+ // InCycleWith/BeingVisited/NotVisited
+ // |
+ // A-InCycleWith->B-InCycleWith…>C-InCycleWith->D-+
+ // |
+ // = node, previous_node
+ //
+ // After the first loop, this will look like
+ // E
+ // ^
+ // |
+ // InCycleWith/BeingVisited/NotVisited
+ // |
+ // +>A<-InCycleWith-B<…InCycleWith-C<-InCycleWith-D-+
+ // | | | |
+ // | InCycleWith | = node
+ // +-+ =previous_node
+ //
+ // Note in particular that A will be linked to itself in a self-cycle
+ // and no other self-cycles occur due to how InCycleWith is assigned in
+ // the find phase implemented by `walk_unvisited_node`.
+ //
+ // We now want to compress the path, that is assign the state of the
+ // link D-E to all other links.
+ //
+ // We can then walk backwards, starting from `previous_node`, and assign
+ // each node in the list with the updated state. The loop terminates
+ // when we reach the self-cycle.
+
+ // Move backwards until we found the node where we started. We
+ // will know when we hit the state where previous_node == node.
+ loop {
+ // Back at the beginning, we can return.
+ if previous_node == node {
+ return node_state;
+ }
+ // Update to previous node in the link.
+ match self.node_states[previous_node] {
+ NodeState::InCycleWith { parent: previous } => {
+ node = previous_node;
+ previous_node = previous;
+ }
+ // Only InCycleWith nodes were added to the reverse linked list.
+ other => panic!("Invalid previous link while compressing cycle: {:?}", other),
+ }
+
+ debug!("find_state: parent_state = {:?}", node_state);
+
+ // Update the node state from the parent state. The assigned
+ // state is actually a loop invariant but it will only be
+ // evaluated if there is at least one backlink to follow.
+ // Fully trusting llvm here to find this loop optimization.
+ match node_state {
+ // Path compression, make current node point to the same root.
+ NodeState::InCycle { .. } => {
+ self.node_states[node] = node_state;
+ }
+ // Still visiting nodes, compress to cycle to the node
+ // at that depth.
+ NodeState::BeingVisited { depth } => {
+ self.node_states[node] =
+ NodeState::InCycleWith { parent: self.node_stack[depth] };
+ }
+ // These are never allowed as parent nodes. InCycleWith
+ // should have been followed to a real parent and
+ // NotVisited can not be part of a cycle since it should
+ // have instead gotten explored.
+ NodeState::NotVisited | NodeState::InCycleWith { .. } => {
+ panic!("invalid parent state: {:?}", node_state)
+ }
+ }
+ }
+ }
+
+ /// Walks a node that has never been visited before.
+ ///
+ /// Call this method when `inspect_node` has returned `None`. Having the
+ /// caller decide avoids mutual recursion between the two methods and allows
+ /// us to maintain an allocated stack for nodes on the path between calls.
+ #[instrument(skip(self, initial), level = "debug")]
+ fn walk_unvisited_node(&mut self, initial: G::Node) -> WalkReturn<S> {
+ struct VisitingNodeFrame<G: DirectedGraph, Successors> {
+ node: G::Node,
+ iter: Option<Successors>,
+ depth: usize,
+ min_depth: usize,
+ successors_len: usize,
+ min_cycle_root: G::Node,
+ successor_node: G::Node,
+ }
+
+ // Move the stack to a local variable. We want to utilize the existing allocation and
+ // mutably borrow it without borrowing self at the same time.
+ let mut successors_stack = core::mem::take(&mut self.successors_stack);
+ debug_assert_eq!(successors_stack.len(), 0);
+
+ let mut stack: Vec<VisitingNodeFrame<G, _>> = vec![VisitingNodeFrame {
+ node: initial,
+ depth: 0,
+ min_depth: 0,
+ iter: None,
+ successors_len: 0,
+ min_cycle_root: initial,
+ successor_node: initial,
+ }];
+
+ let mut return_value = None;
+
+ 'recurse: while let Some(frame) = stack.last_mut() {
+ let VisitingNodeFrame {
+ node,
+ depth,
+ iter,
+ successors_len,
+ min_depth,
+ min_cycle_root,
+ successor_node,
+ } = frame;
+
+ let node = *node;
+ let depth = *depth;
+
+ let successors = match iter {
+ Some(iter) => iter,
+ None => {
+ // This None marks that we still have the initialize this node's frame.
+ debug!(?depth, ?node);
+
+ debug_assert!(matches!(self.node_states[node], NodeState::NotVisited));
+
+ // Push `node` onto the stack.
+ self.node_states[node] = NodeState::BeingVisited { depth };
+ self.node_stack.push(node);
+
+ // Walk each successor of the node, looking to see if any of
+ // them can reach a node that is presently on the stack. If
+ // so, that means they can also reach us.
+ *successors_len = successors_stack.len();
+ // Set and return a reference, this is currently empty.
+ iter.get_or_insert(self.graph.successors(node))
+ }
+ };
+
+ // Now that iter is initialized, this is a constant for this frame.
+ let successors_len = *successors_len;
+
+ // Construct iterators for the nodes and walk results. There are two cases:
+ // * The walk of a successor node returned.
+ // * The remaining successor nodes.
+ let returned_walk =
+ return_value.take().into_iter().map(|walk| (*successor_node, Some(walk)));
+
+ let successor_walk = successors.by_ref().map(|successor_node| {
+ debug!(?node, ?successor_node);
+ (successor_node, self.inspect_node(successor_node))
+ });
+
+ for (successor_node, walk) in returned_walk.chain(successor_walk) {
+ match walk {
+ Some(WalkReturn::Cycle { min_depth: successor_min_depth }) => {
+ // Track the minimum depth we can reach.
+ assert!(successor_min_depth <= depth);
+ if successor_min_depth < *min_depth {
+ debug!(?node, ?successor_min_depth);
+ *min_depth = successor_min_depth;
+ *min_cycle_root = successor_node;
+ }
+ }
+
+ Some(WalkReturn::Complete { scc_index: successor_scc_index }) => {
+ // Push the completed SCC indices onto
+ // the `successors_stack` for later.
+ debug!(?node, ?successor_scc_index);
+ successors_stack.push(successor_scc_index);
+ }
+
+ None => {
+ let depth = depth + 1;
+ debug!(?depth, ?successor_node);
+ // Remember which node the return value will come from.
+ frame.successor_node = successor_node;
+ // Start a new stack frame the step into it.
+ stack.push(VisitingNodeFrame {
+ node: successor_node,
+ depth,
+ iter: None,
+ successors_len: 0,
+ min_depth: depth,
+ min_cycle_root: successor_node,
+ successor_node,
+ });
+ continue 'recurse;
+ }
+ }
+ }
+
+ // Completed walk, remove `node` from the stack.
+ let r = self.node_stack.pop();
+ debug_assert_eq!(r, Some(node));
+
+ // Remove the frame, it's done.
+ let frame = stack.pop().unwrap();
+
+ // If `min_depth == depth`, then we are the root of the
+ // cycle: we can't reach anyone further down the stack.
+
+ // Pass the 'return value' down the stack.
+ // We return one frame at a time so there can't be another return value.
+ debug_assert!(return_value.is_none());
+ return_value = Some(if frame.min_depth == depth {
+ // Note that successor stack may have duplicates, so we
+ // want to remove those:
+ let deduplicated_successors = {
+ let duplicate_set = &mut self.duplicate_set;
+ duplicate_set.clear();
+ successors_stack
+ .drain(successors_len..)
+ .filter(move |&i| duplicate_set.insert(i))
+ };
+ let scc_index = self.scc_data.create_scc(deduplicated_successors);
+ self.node_states[node] = NodeState::InCycle { scc_index };
+ WalkReturn::Complete { scc_index }
+ } else {
+ // We are not the head of the cycle. Return back to our
+ // caller. They will take ownership of the
+ // `self.successors` data that we pushed.
+ self.node_states[node] = NodeState::InCycleWith { parent: frame.min_cycle_root };
+ WalkReturn::Cycle { min_depth: frame.min_depth }
+ });
+ }
+
+ // Keep the allocation we used for successors_stack.
+ self.successors_stack = successors_stack;
+ debug_assert_eq!(self.successors_stack.len(), 0);
+
+ return_value.unwrap()
+ }
+}
diff --git a/compiler/rustc_data_structures/src/graph/scc/tests.rs b/compiler/rustc_data_structures/src/graph/scc/tests.rs
new file mode 100644
index 000000000..9940fee60
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/scc/tests.rs
@@ -0,0 +1,216 @@
+extern crate test;
+
+use super::*;
+use crate::graph::tests::TestGraph;
+
+#[test]
+fn diamond() {
+ let graph = TestGraph::new(0, &[(0, 1), (0, 2), (1, 3), (2, 3)]);
+ let sccs: Sccs<_, usize> = Sccs::new(&graph);
+ assert_eq!(sccs.num_sccs(), 4);
+ assert_eq!(sccs.num_sccs(), 4);
+}
+
+#[test]
+fn test_big_scc() {
+ // The order in which things will be visited is important to this
+ // test.
+ //
+ // We will visit:
+ //
+ // 0 -> 1 -> 2 -> 0
+ //
+ // and at this point detect a cycle. 2 will return back to 1 which
+ // will visit 3. 3 will visit 2 before the cycle is complete, and
+ // hence it too will return a cycle.
+
+ /*
+ +-> 0
+ | |
+ | v
+ | 1 -> 3
+ | | |
+ | v |
+ +-- 2 <--+
+ */
+ let graph = TestGraph::new(0, &[(0, 1), (1, 2), (1, 3), (2, 0), (3, 2)]);
+ let sccs: Sccs<_, usize> = Sccs::new(&graph);
+ assert_eq!(sccs.num_sccs(), 1);
+}
+
+#[test]
+fn test_three_sccs() {
+ /*
+ 0
+ |
+ v
+ +-> 1 3
+ | | |
+ | v |
+ +-- 2 <--+
+ */
+ let graph = TestGraph::new(0, &[(0, 1), (1, 2), (2, 1), (3, 2)]);
+ let sccs: Sccs<_, usize> = Sccs::new(&graph);
+ assert_eq!(sccs.num_sccs(), 3);
+ assert_eq!(sccs.scc(0), 1);
+ assert_eq!(sccs.scc(1), 0);
+ assert_eq!(sccs.scc(2), 0);
+ assert_eq!(sccs.scc(3), 2);
+ assert_eq!(sccs.successors(0), &[]);
+ assert_eq!(sccs.successors(1), &[0]);
+ assert_eq!(sccs.successors(2), &[0]);
+}
+
+#[test]
+fn test_find_state_2() {
+ // The order in which things will be visited is important to this
+ // test. It tests part of the `find_state` behavior. Here is the
+ // graph:
+ //
+ //
+ // /----+
+ // 0 <--+ |
+ // | | |
+ // v | |
+ // +-> 1 -> 3 4
+ // | | |
+ // | v |
+ // +-- 2 <----+
+
+ let graph = TestGraph::new(0, &[(0, 1), (0, 4), (1, 2), (1, 3), (2, 1), (3, 0), (4, 2)]);
+
+ // For this graph, we will start in our DFS by visiting:
+ //
+ // 0 -> 1 -> 2 -> 1
+ //
+ // and at this point detect a cycle. The state of 2 will thus be
+ // `InCycleWith { 1 }`. We will then visit the 1 -> 3 edge, which
+ // will attempt to visit 0 as well, thus going to the state
+ // `InCycleWith { 0 }`. Finally, node 1 will complete; the lowest
+ // depth of any successor was 3 which had depth 0, and thus it
+ // will be in the state `InCycleWith { 3 }`.
+ //
+ // When we finally traverse the `0 -> 4` edge and then visit node 2,
+ // the states of the nodes are:
+ //
+ // 0 BeingVisited { 0 }
+ // 1 InCycleWith { 3 }
+ // 2 InCycleWith { 1 }
+ // 3 InCycleWith { 0 }
+ //
+ // and hence 4 will traverse the links, finding an ultimate depth of 0.
+ // If will also collapse the states to the following:
+ //
+ // 0 BeingVisited { 0 }
+ // 1 InCycleWith { 3 }
+ // 2 InCycleWith { 1 }
+ // 3 InCycleWith { 0 }
+
+ let sccs: Sccs<_, usize> = Sccs::new(&graph);
+ assert_eq!(sccs.num_sccs(), 1);
+ assert_eq!(sccs.scc(0), 0);
+ assert_eq!(sccs.scc(1), 0);
+ assert_eq!(sccs.scc(2), 0);
+ assert_eq!(sccs.scc(3), 0);
+ assert_eq!(sccs.scc(4), 0);
+ assert_eq!(sccs.successors(0), &[]);
+}
+
+#[test]
+fn test_find_state_3() {
+ /*
+ /----+
+ 0 <--+ |
+ | | |
+ v | |
+ +-> 1 -> 3 4 5
+ | | | |
+ | v | |
+ +-- 2 <----+-+
+ */
+ let graph =
+ TestGraph::new(0, &[(0, 1), (0, 4), (1, 2), (1, 3), (2, 1), (3, 0), (4, 2), (5, 2)]);
+ let sccs: Sccs<_, usize> = Sccs::new(&graph);
+ assert_eq!(sccs.num_sccs(), 2);
+ assert_eq!(sccs.scc(0), 0);
+ assert_eq!(sccs.scc(1), 0);
+ assert_eq!(sccs.scc(2), 0);
+ assert_eq!(sccs.scc(3), 0);
+ assert_eq!(sccs.scc(4), 0);
+ assert_eq!(sccs.scc(5), 1);
+ assert_eq!(sccs.successors(0), &[]);
+ assert_eq!(sccs.successors(1), &[0]);
+}
+
+#[test]
+fn test_deep_linear() {
+ /*
+ 0
+ |
+ v
+ 1
+ |
+ v
+ 2
+ |
+ v
+ …
+ */
+ #[cfg(not(miri))]
+ const NR_NODES: usize = 1 << 14;
+ #[cfg(miri)]
+ const NR_NODES: usize = 1 << 3;
+ let mut nodes = vec![];
+ for i in 1..NR_NODES {
+ nodes.push((i - 1, i));
+ }
+ let graph = TestGraph::new(0, nodes.as_slice());
+ let sccs: Sccs<_, usize> = Sccs::new(&graph);
+ assert_eq!(sccs.num_sccs(), NR_NODES);
+ assert_eq!(sccs.scc(0), NR_NODES - 1);
+ assert_eq!(sccs.scc(NR_NODES - 1), 0);
+}
+
+#[bench]
+fn bench_sccc(b: &mut test::Bencher) {
+ // Like `test_three_sccs` but each state is replaced by a group of
+ // three or four to have some amount of test data.
+ /*
+ 0-3
+ |
+ v
+ +->4-6 11-14
+ | | |
+ | v |
+ +--7-10<-+
+ */
+ fn make_3_clique(slice: &mut [(usize, usize)], base: usize) {
+ slice[0] = (base + 0, base + 1);
+ slice[1] = (base + 1, base + 2);
+ slice[2] = (base + 2, base + 0);
+ }
+ // Not actually a clique but strongly connected.
+ fn make_4_clique(slice: &mut [(usize, usize)], base: usize) {
+ slice[0] = (base + 0, base + 1);
+ slice[1] = (base + 1, base + 2);
+ slice[2] = (base + 2, base + 3);
+ slice[3] = (base + 3, base + 0);
+ slice[4] = (base + 1, base + 3);
+ slice[5] = (base + 2, base + 1);
+ }
+
+ let mut graph = [(0, 0); 6 + 3 + 6 + 3 + 4];
+ make_4_clique(&mut graph[0..6], 0);
+ make_3_clique(&mut graph[6..9], 4);
+ make_4_clique(&mut graph[9..15], 7);
+ make_3_clique(&mut graph[15..18], 11);
+ graph[18] = (0, 4);
+ graph[19] = (5, 7);
+ graph[20] = (11, 10);
+ graph[21] = (7, 4);
+ let graph = TestGraph::new(0, &graph[..]);
+ b.iter(|| {
+ let sccs: Sccs<_, usize> = Sccs::new(&graph);
+ assert_eq!(sccs.num_sccs(), 3);
+ });
+}
diff --git a/compiler/rustc_data_structures/src/graph/tests.rs b/compiler/rustc_data_structures/src/graph/tests.rs
new file mode 100644
index 000000000..7f4ef906b
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/tests.rs
@@ -0,0 +1,73 @@
+use crate::fx::FxHashMap;
+use std::cmp::max;
+use std::iter;
+use std::slice;
+
+use super::*;
+
+pub struct TestGraph {
+ num_nodes: usize,
+ start_node: usize,
+ successors: FxHashMap<usize, Vec<usize>>,
+ predecessors: FxHashMap<usize, Vec<usize>>,
+}
+
+impl TestGraph {
+ pub fn new(start_node: usize, edges: &[(usize, usize)]) -> Self {
+ let mut graph = TestGraph {
+ num_nodes: start_node + 1,
+ start_node,
+ successors: FxHashMap::default(),
+ predecessors: FxHashMap::default(),
+ };
+ for &(source, target) in edges {
+ graph.num_nodes = max(graph.num_nodes, source + 1);
+ graph.num_nodes = max(graph.num_nodes, target + 1);
+ graph.successors.entry(source).or_default().push(target);
+ graph.predecessors.entry(target).or_default().push(source);
+ }
+ for node in 0..graph.num_nodes {
+ graph.successors.entry(node).or_default();
+ graph.predecessors.entry(node).or_default();
+ }
+ graph
+ }
+}
+
+impl DirectedGraph for TestGraph {
+ type Node = usize;
+}
+
+impl WithStartNode for TestGraph {
+ fn start_node(&self) -> usize {
+ self.start_node
+ }
+}
+
+impl WithNumNodes for TestGraph {
+ fn num_nodes(&self) -> usize {
+ self.num_nodes
+ }
+}
+
+impl WithPredecessors for TestGraph {
+ fn predecessors(&self, node: usize) -> <Self as GraphPredecessors<'_>>::Iter {
+ self.predecessors[&node].iter().cloned()
+ }
+}
+
+impl WithSuccessors for TestGraph {
+ fn successors(&self, node: usize) -> <Self as GraphSuccessors<'_>>::Iter {
+ self.successors[&node].iter().cloned()
+ }
+}
+
+impl<'graph> GraphPredecessors<'graph> for TestGraph {
+ type Item = usize;
+ type Iter = iter::Cloned<slice::Iter<'graph, usize>>;
+}
+
+impl<'graph> GraphSuccessors<'graph> for TestGraph {
+ type Item = usize;
+ type Iter = iter::Cloned<slice::Iter<'graph, usize>>;
+}
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
new file mode 100644
index 000000000..3d91bcade
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/mod.rs
@@ -0,0 +1,109 @@
+use std::cmp::Ord;
+
+use crate::graph::{DirectedGraph, GraphSuccessors, WithNumEdges, WithNumNodes, WithSuccessors};
+use rustc_index::vec::{Idx, IndexVec};
+
+#[cfg(test)]
+mod tests;
+
+pub struct VecGraph<N: Idx> {
+ /// Maps from a given node to an index where the set of successors
+ /// for that node starts. The index indexes into the `edges`
+ /// vector. To find the range for a given node, we look up the
+ /// start for that node and then the start for the next node
+ /// (i.e., with an index 1 higher) and get the range between the
+ /// two. This vector always has an extra entry so that this works
+ /// even for the max element.
+ node_starts: IndexVec<N, usize>,
+
+ edge_targets: Vec<N>,
+}
+
+impl<N: Idx + Ord> VecGraph<N> {
+ pub fn new(num_nodes: usize, mut edge_pairs: Vec<(N, N)>) -> Self {
+ // Sort the edges by the source -- this is important.
+ edge_pairs.sort();
+
+ let num_edges = edge_pairs.len();
+
+ // Store the *target* of each edge into `edge_targets`.
+ let edge_targets: Vec<N> = edge_pairs.iter().map(|&(_, target)| target).collect();
+
+ // Create the *edge starts* array. We are iterating over over
+ // the (sorted) edge pairs. We maintain the invariant that the
+ // length of the `node_starts` array is enough to store the
+ // current source node -- so when we see that the source node
+ // for an edge is greater than the current length, we grow the
+ // edge-starts array by just enough.
+ let mut node_starts = IndexVec::with_capacity(num_edges);
+ for (index, &(source, _)) in edge_pairs.iter().enumerate() {
+ // If we have a list like `[(0, x), (2, y)]`:
+ //
+ // - Start out with `node_starts` of `[]`
+ // - Iterate to `(0, x)` at index 0:
+ // - Push one entry because `node_starts.len()` (0) is <= the source (0)
+ // - Leaving us with `node_starts` of `[0]`
+ // - Iterate to `(2, y)` at index 1:
+ // - Push one entry because `node_starts.len()` (1) is <= the source (2)
+ // - Push one entry because `node_starts.len()` (2) is <= the source (2)
+ // - Leaving us with `node_starts` of `[0, 1, 1]`
+ // - Loop terminates
+ while node_starts.len() <= source.index() {
+ node_starts.push(index);
+ }
+ }
+
+ // Pad out the `node_starts` array so that it has `num_nodes +
+ // 1` entries. Continuing our example above, if `num_nodes` is
+ // be `3`, we would push one more index: `[0, 1, 1, 2]`.
+ //
+ // Interpretation of that vector:
+ //
+ // [0, 1, 1, 2]
+ // ---- range for N=2
+ // ---- range for N=1
+ // ---- range for N=0
+ while node_starts.len() <= num_nodes {
+ node_starts.push(edge_targets.len());
+ }
+
+ assert_eq!(node_starts.len(), num_nodes + 1);
+
+ Self { node_starts, edge_targets }
+ }
+
+ /// Gets the successors for `source` as a slice.
+ pub fn successors(&self, source: N) -> &[N] {
+ let start_index = self.node_starts[source];
+ let end_index = self.node_starts[source.plus(1)];
+ &self.edge_targets[start_index..end_index]
+ }
+}
+
+impl<N: Idx> DirectedGraph for VecGraph<N> {
+ type Node = N;
+}
+
+impl<N: Idx> WithNumNodes for VecGraph<N> {
+ fn num_nodes(&self) -> usize {
+ self.node_starts.len() - 1
+ }
+}
+
+impl<N: Idx> WithNumEdges for VecGraph<N> {
+ fn num_edges(&self) -> usize {
+ self.edge_targets.len()
+ }
+}
+
+impl<'graph, N: Idx> GraphSuccessors<'graph> for VecGraph<N> {
+ type Item = N;
+
+ type Iter = std::iter::Cloned<std::slice::Iter<'graph, N>>;
+}
+
+impl<N: Idx + Ord> WithSuccessors for VecGraph<N> {
+ fn successors(&self, node: N) -> <Self as GraphSuccessors<'_>>::Iter {
+ self.successors(node).iter().cloned()
+ }
+}
diff --git a/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs b/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs
new file mode 100644
index 000000000..c8f979267
--- /dev/null
+++ b/compiler/rustc_data_structures/src/graph/vec_graph/tests.rs
@@ -0,0 +1,42 @@
+use super::*;
+
+fn create_graph() -> VecGraph<usize> {
+ // Create a simple graph
+ //
+ // 5
+ // |
+ // V
+ // 0 --> 1 --> 2
+ // |
+ // v
+ // 3 --> 4
+ //
+ // 6
+
+ VecGraph::new(7, vec![(0, 1), (1, 2), (1, 3), (3, 4), (5, 1)])
+}
+
+#[test]
+fn num_nodes() {
+ let graph = create_graph();
+ assert_eq!(graph.num_nodes(), 7);
+}
+
+#[test]
+fn successors() {
+ let graph = create_graph();
+ assert_eq!(graph.successors(0), &[1]);
+ assert_eq!(graph.successors(1), &[2, 3]);
+ assert_eq!(graph.successors(2), &[]);
+ assert_eq!(graph.successors(3), &[4]);
+ assert_eq!(graph.successors(4), &[]);
+ assert_eq!(graph.successors(5), &[1]);
+ assert_eq!(graph.successors(6), &[]);
+}
+
+#[test]
+fn dfs() {
+ let graph = create_graph();
+ let dfs: Vec<_> = graph.depth_first_search(0).collect();
+ assert_eq!(dfs, vec![0, 1, 3, 4, 2]);
+}
diff --git a/compiler/rustc_data_structures/src/intern.rs b/compiler/rustc_data_structures/src/intern.rs
new file mode 100644
index 000000000..009b5d534
--- /dev/null
+++ b/compiler/rustc_data_structures/src/intern.rs
@@ -0,0 +1,196 @@
+use crate::stable_hasher::{HashStable, StableHasher};
+use std::cmp::Ordering;
+use std::hash::{Hash, Hasher};
+use std::ops::Deref;
+use std::ptr;
+
+use crate::fingerprint::Fingerprint;
+
+mod private {
+ #[derive(Clone, Copy, Debug)]
+ pub struct PrivateZst;
+}
+
+/// A reference to a value that is interned, and is known to be unique.
+///
+/// Note that it is possible to have a `T` and a `Interned<T>` that are (or
+/// refer to) equal but different values. But if you have two different
+/// `Interned<T>`s, they both refer to the same value, at a single location in
+/// memory. This means that equality and hashing can be done on the value's
+/// address rather than the value's contents, which can improve performance.
+///
+/// The `PrivateZst` field means you can pattern match with `Interned(v, _)`
+/// but you can only construct a `Interned` with `new_unchecked`, and not
+/// directly.
+#[derive(Debug)]
+#[rustc_pass_by_value]
+pub struct Interned<'a, T>(pub &'a T, pub private::PrivateZst);
+
+impl<'a, T> Interned<'a, T> {
+ /// Create a new `Interned` value. The value referred to *must* be interned
+ /// and thus be unique, and it *must* remain unique in the future. This
+ /// function has `_unchecked` in the name but is not `unsafe`, because if
+ /// the uniqueness condition is violated condition it will cause incorrect
+ /// behaviour but will not affect memory safety.
+ #[inline]
+ pub const fn new_unchecked(t: &'a T) -> Self {
+ Interned(t, private::PrivateZst)
+ }
+}
+
+impl<'a, T> Clone for Interned<'a, T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<'a, T> Copy for Interned<'a, T> {}
+
+impl<'a, T> Deref for Interned<'a, T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ self.0
+ }
+}
+
+impl<'a, T> PartialEq for Interned<'a, T> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ // Pointer equality implies equality, due to the uniqueness constraint.
+ ptr::eq(self.0, other.0)
+ }
+}
+
+impl<'a, T> Eq for Interned<'a, T> {}
+
+impl<'a, T: PartialOrd> PartialOrd for Interned<'a, T> {
+ fn partial_cmp(&self, other: &Interned<'a, T>) -> Option<Ordering> {
+ // Pointer equality implies equality, due to the uniqueness constraint,
+ // but the contents must be compared otherwise.
+ if ptr::eq(self.0, other.0) {
+ Some(Ordering::Equal)
+ } else {
+ let res = self.0.partial_cmp(&other.0);
+ debug_assert_ne!(res, Some(Ordering::Equal));
+ res
+ }
+ }
+}
+
+impl<'a, T: Ord> Ord for Interned<'a, T> {
+ fn cmp(&self, other: &Interned<'a, T>) -> Ordering {
+ // Pointer equality implies equality, due to the uniqueness constraint,
+ // but the contents must be compared otherwise.
+ if ptr::eq(self.0, other.0) {
+ Ordering::Equal
+ } else {
+ let res = self.0.cmp(&other.0);
+ debug_assert_ne!(res, Ordering::Equal);
+ res
+ }
+ }
+}
+
+impl<'a, T> Hash for Interned<'a, T> {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // Pointer hashing is sufficient, due to the uniqueness constraint.
+ ptr::hash(self.0, s)
+ }
+}
+
+impl<T, CTX> HashStable<CTX> for Interned<'_, T>
+where
+ T: HashStable<CTX>,
+{
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.0.hash_stable(hcx, hasher);
+ }
+}
+
+/// A helper trait so that `Interned` things can cache stable hashes reproducibly.
+pub trait InternedHashingContext {
+ fn with_def_path_and_no_spans(&mut self, f: impl FnOnce(&mut Self));
+}
+
+/// A helper type that you can wrap round your own type in order to automatically
+/// cache the stable hash on creation and not recompute it whenever the stable hash
+/// of the type is computed.
+/// This is only done in incremental mode. You can also opt out of caching by using
+/// StableHash::ZERO for the hash, in which case the hash gets computed each time.
+/// This is useful if you have values that you intern but never (can?) use for stable
+/// hashing.
+#[derive(Copy, Clone)]
+pub struct WithStableHash<T> {
+ pub internee: T,
+ pub stable_hash: Fingerprint,
+}
+
+impl<T: PartialEq> PartialEq for WithStableHash<T> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.internee.eq(&other.internee)
+ }
+}
+
+impl<T: Eq> Eq for WithStableHash<T> {}
+
+impl<T: Ord> PartialOrd for WithStableHash<T> {
+ fn partial_cmp(&self, other: &WithStableHash<T>) -> Option<Ordering> {
+ Some(self.internee.cmp(&other.internee))
+ }
+}
+
+impl<T: Ord> Ord for WithStableHash<T> {
+ fn cmp(&self, other: &WithStableHash<T>) -> Ordering {
+ self.internee.cmp(&other.internee)
+ }
+}
+
+impl<T> Deref for WithStableHash<T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ &self.internee
+ }
+}
+
+impl<T: Hash> Hash for WithStableHash<T> {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ self.internee.hash(s)
+ }
+}
+
+impl<T: HashStable<CTX>, CTX: InternedHashingContext> HashStable<CTX> for WithStableHash<T> {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ if self.stable_hash == Fingerprint::ZERO || cfg!(debug_assertions) {
+ // No cached hash available. This can only mean that incremental is disabled.
+ // We don't cache stable hashes in non-incremental mode, because they are used
+ // so rarely that the performance actually suffers.
+
+ // We need to build the hash as if we cached it and then hash that hash, as
+ // otherwise the hashes will differ between cached and non-cached mode.
+ let stable_hash: Fingerprint = {
+ let mut hasher = StableHasher::new();
+ hcx.with_def_path_and_no_spans(|hcx| self.internee.hash_stable(hcx, &mut hasher));
+ hasher.finish()
+ };
+ if cfg!(debug_assertions) && self.stable_hash != Fingerprint::ZERO {
+ assert_eq!(
+ stable_hash, self.stable_hash,
+ "cached stable hash does not match freshly computed stable hash"
+ );
+ }
+ stable_hash.hash_stable(hcx, hasher);
+ } else {
+ self.stable_hash.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/intern/tests.rs b/compiler/rustc_data_structures/src/intern/tests.rs
new file mode 100644
index 000000000..09810a085
--- /dev/null
+++ b/compiler/rustc_data_structures/src/intern/tests.rs
@@ -0,0 +1,59 @@
+use super::*;
+use std::cmp::Ordering;
+
+#[derive(Debug)]
+struct S(u32);
+
+impl PartialEq for S {
+ fn eq(&self, _other: &Self) -> bool {
+ panic!("shouldn't be called");
+ }
+}
+
+impl Eq for S {}
+
+impl PartialOrd for S {
+ fn partial_cmp(&self, other: &S) -> Option<Ordering> {
+ // The `==` case should be handled by `Interned`.
+ assert_ne!(self.0, other.0);
+ self.0.partial_cmp(&other.0)
+ }
+}
+
+impl Ord for S {
+ fn cmp(&self, other: &S) -> Ordering {
+ // The `==` case should be handled by `Interned`.
+ assert_ne!(self.0, other.0);
+ self.0.cmp(&other.0)
+ }
+}
+
+#[test]
+fn test_uniq() {
+ let s1 = S(1);
+ let s2 = S(2);
+ let s3 = S(3);
+ let s4 = S(1); // violates uniqueness
+
+ let v1 = Interned::new_unchecked(&s1);
+ let v2 = Interned::new_unchecked(&s2);
+ let v3a = Interned::new_unchecked(&s3);
+ let v3b = Interned::new_unchecked(&s3);
+ let v4 = Interned::new_unchecked(&s4); // violates uniqueness
+
+ assert_ne!(v1, v2);
+ assert_ne!(v2, v3a);
+ assert_eq!(v1, v1);
+ assert_eq!(v3a, v3b);
+ assert_ne!(v1, v4); // same content but different addresses: not equal
+
+ assert_eq!(v1.cmp(&v2), Ordering::Less);
+ assert_eq!(v3a.cmp(&v2), Ordering::Greater);
+ assert_eq!(v1.cmp(&v1), Ordering::Equal); // only uses Interned::eq, not S::cmp
+ assert_eq!(v3a.cmp(&v3b), Ordering::Equal); // only uses Interned::eq, not S::cmp
+
+ assert_eq!(v1.partial_cmp(&v2), Some(Ordering::Less));
+ assert_eq!(v3a.partial_cmp(&v2), Some(Ordering::Greater));
+ assert_eq!(v1.partial_cmp(&v1), Some(Ordering::Equal)); // only uses Interned::eq, not S::cmp
+ assert_eq!(v3a.partial_cmp(&v3b), Some(Ordering::Equal)); // only uses Interned::eq, not S::cmp
+}
diff --git a/compiler/rustc_data_structures/src/jobserver.rs b/compiler/rustc_data_structures/src/jobserver.rs
new file mode 100644
index 000000000..09baa3095
--- /dev/null
+++ b/compiler/rustc_data_structures/src/jobserver.rs
@@ -0,0 +1,40 @@
+pub use jobserver_crate::Client;
+use std::sync::LazyLock;
+
+// We can only call `from_env` once per process
+
+// Note that this is unsafe because it may misinterpret file descriptors
+// on Unix as jobserver file descriptors. We hopefully execute this near
+// the beginning of the process though to ensure we don't get false
+// positives, or in other words we try to execute this before we open
+// any file descriptors ourselves.
+//
+// Pick a "reasonable maximum" if we don't otherwise have
+// a jobserver in our environment, capping out at 32 so we
+// don't take everything down by hogging the process run queue.
+// The fixed number is used to have deterministic compilation
+// across machines.
+//
+// Also note that we stick this in a global because there could be
+// multiple rustc instances in this process, and the jobserver is
+// per-process.
+static GLOBAL_CLIENT: LazyLock<Client> = LazyLock::new(|| unsafe {
+ Client::from_env().unwrap_or_else(|| {
+ let client = Client::new(32).expect("failed to create jobserver");
+ // Acquire a token for the main thread which we can release later
+ client.acquire_raw().ok();
+ client
+ })
+});
+
+pub fn client() -> Client {
+ GLOBAL_CLIENT.clone()
+}
+
+pub fn acquire_thread() {
+ GLOBAL_CLIENT.acquire_raw().ok();
+}
+
+pub fn release_thread() {
+ GLOBAL_CLIENT.release_raw().ok();
+}
diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs
new file mode 100644
index 000000000..265f45b72
--- /dev/null
+++ b/compiler/rustc_data_structures/src/lib.rs
@@ -0,0 +1,113 @@
+//! Various data structures used by the Rust compiler. The intention
+//! is that code in here should be not be *specific* to rustc, so that
+//! it can be easily unit tested and so forth.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(array_windows)]
+#![feature(associated_type_bounds)]
+#![feature(auto_traits)]
+#![feature(cell_leak)]
+#![feature(control_flow_enum)]
+#![feature(extend_one)]
+#![feature(let_else)]
+#![feature(hash_raw_entry)]
+#![feature(hasher_prefixfree_extras)]
+#![feature(maybe_uninit_uninit_array)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(type_alias_impl_trait)]
+#![feature(new_uninit)]
+#![feature(once_cell)]
+#![feature(rustc_attrs)]
+#![feature(test)]
+#![feature(thread_id_value)]
+#![feature(vec_into_raw_parts)]
+#![allow(rustc::default_hash_types)]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate cfg_if;
+#[macro_use]
+extern crate rustc_macros;
+
+pub use rustc_index::static_assert_size;
+
+#[inline(never)]
+#[cold]
+pub fn cold_path<F: FnOnce() -> R, R>(f: F) -> R {
+ f()
+}
+
+pub mod base_n;
+pub mod binary_search_util;
+pub mod captures;
+pub mod flock;
+pub mod functor;
+pub mod fx;
+pub mod graph;
+pub mod intern;
+pub mod jobserver;
+pub mod macros;
+pub mod map_in_place;
+pub mod obligation_forest;
+pub mod owning_ref;
+pub mod sip128;
+pub mod small_c_str;
+pub mod small_str;
+pub mod snapshot_map;
+pub mod svh;
+pub use ena::snapshot_vec;
+pub mod memmap;
+pub mod sorted_map;
+#[macro_use]
+pub mod stable_hasher;
+mod atomic_ref;
+pub mod fingerprint;
+pub mod profiling;
+pub mod sharded;
+pub mod stack;
+pub mod sync;
+pub mod thin_vec;
+pub mod tiny_list;
+pub mod transitive_relation;
+pub mod vec_linked_list;
+pub mod vec_map;
+pub mod work_queue;
+pub use atomic_ref::AtomicRef;
+pub mod frozen;
+pub mod sso;
+pub mod steal;
+pub mod tagged_ptr;
+pub mod temp_dir;
+pub mod unhash;
+
+pub use ena::undo_log;
+pub use ena::unify;
+
+pub struct OnDrop<F: Fn()>(pub F);
+
+impl<F: Fn()> OnDrop<F> {
+ /// Forgets the function which prevents it from running.
+ /// Ensure that the function owns no memory, otherwise it will be leaked.
+ #[inline]
+ pub fn disable(self) {
+ std::mem::forget(self);
+ }
+}
+
+impl<F: Fn()> Drop for OnDrop<F> {
+ #[inline]
+ fn drop(&mut self) {
+ (self.0)();
+ }
+}
+
+// See comments in src/librustc_middle/lib.rs
+#[doc(hidden)]
+pub fn __noop_fix_for_27438() {}
diff --git a/compiler/rustc_data_structures/src/macros.rs b/compiler/rustc_data_structures/src/macros.rs
new file mode 100644
index 000000000..e05491f6f
--- /dev/null
+++ b/compiler/rustc_data_structures/src/macros.rs
@@ -0,0 +1,37 @@
+#[macro_export]
+macro_rules! enum_from_u32 {
+ ($(#[$attr:meta])* pub enum $name:ident {
+ $($(#[$var_attr:meta])* $variant:ident = $e:expr,)*
+ }) => {
+ $(#[$attr])*
+ pub enum $name {
+ $($(#[$var_attr])* $variant = $e),*
+ }
+
+ impl $name {
+ pub fn from_u32(u: u32) -> Option<$name> {
+ $(if u == $name::$variant as u32 {
+ return Some($name::$variant)
+ })*
+ None
+ }
+ }
+ };
+ ($(#[$attr:meta])* pub enum $name:ident {
+ $($(#[$var_attr:meta])* $variant:ident,)*
+ }) => {
+ $(#[$attr])*
+ pub enum $name {
+ $($(#[$var_attr])* $variant,)*
+ }
+
+ impl $name {
+ pub fn from_u32(u: u32) -> Option<$name> {
+ $(if u == $name::$variant as u32 {
+ return Some($name::$variant)
+ })*
+ None
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/map_in_place.rs b/compiler/rustc_data_structures/src/map_in_place.rs
new file mode 100644
index 000000000..874de03d3
--- /dev/null
+++ b/compiler/rustc_data_structures/src/map_in_place.rs
@@ -0,0 +1,108 @@
+use smallvec::{Array, SmallVec};
+use std::ptr;
+
+pub trait MapInPlace<T>: Sized {
+ fn map_in_place<F>(&mut self, mut f: F)
+ where
+ F: FnMut(T) -> T,
+ {
+ self.flat_map_in_place(|e| Some(f(e)))
+ }
+
+ fn flat_map_in_place<F, I>(&mut self, f: F)
+ where
+ F: FnMut(T) -> I,
+ I: IntoIterator<Item = T>;
+}
+
+impl<T> MapInPlace<T> for Vec<T> {
+ fn flat_map_in_place<F, I>(&mut self, mut f: F)
+ where
+ F: FnMut(T) -> I,
+ I: IntoIterator<Item = T>,
+ {
+ let mut read_i = 0;
+ let mut write_i = 0;
+ unsafe {
+ let mut old_len = self.len();
+ self.set_len(0); // make sure we just leak elements in case of panic
+
+ while read_i < old_len {
+ // move the read_i'th item out of the vector and map it
+ // to an iterator
+ let e = ptr::read(self.as_ptr().add(read_i));
+ let iter = f(e).into_iter();
+ read_i += 1;
+
+ for e in iter {
+ if write_i < read_i {
+ ptr::write(self.as_mut_ptr().add(write_i), e);
+ write_i += 1;
+ } else {
+ // If this is reached we ran out of space
+ // in the middle of the vector.
+ // However, the vector is in a valid state here,
+ // so we just do a somewhat inefficient insert.
+ self.set_len(old_len);
+ self.insert(write_i, e);
+
+ old_len = self.len();
+ self.set_len(0);
+
+ read_i += 1;
+ write_i += 1;
+ }
+ }
+ }
+
+ // write_i tracks the number of actually written new items.
+ self.set_len(write_i);
+ }
+ }
+}
+
+impl<T, A: Array<Item = T>> MapInPlace<T> for SmallVec<A> {
+ fn flat_map_in_place<F, I>(&mut self, mut f: F)
+ where
+ F: FnMut(T) -> I,
+ I: IntoIterator<Item = T>,
+ {
+ let mut read_i = 0;
+ let mut write_i = 0;
+ unsafe {
+ let mut old_len = self.len();
+ self.set_len(0); // make sure we just leak elements in case of panic
+
+ while read_i < old_len {
+ // move the read_i'th item out of the vector and map it
+ // to an iterator
+ let e = ptr::read(self.as_ptr().add(read_i));
+ let iter = f(e).into_iter();
+ read_i += 1;
+
+ for e in iter {
+ if write_i < read_i {
+ ptr::write(self.as_mut_ptr().add(write_i), e);
+ write_i += 1;
+ } else {
+ // If this is reached we ran out of space
+ // in the middle of the vector.
+ // However, the vector is in a valid state here,
+ // so we just do a somewhat inefficient insert.
+ self.set_len(old_len);
+ self.insert(write_i, e);
+
+ old_len = self.len();
+ self.set_len(0);
+
+ read_i += 1;
+ write_i += 1;
+ }
+ }
+ }
+
+ // write_i tracks the number of actually written new items.
+ self.set_len(write_i);
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/memmap.rs b/compiler/rustc_data_structures/src/memmap.rs
new file mode 100644
index 000000000..917416df6
--- /dev/null
+++ b/compiler/rustc_data_structures/src/memmap.rs
@@ -0,0 +1,108 @@
+use std::fs::File;
+use std::io;
+use std::ops::{Deref, DerefMut};
+
+use crate::owning_ref::StableAddress;
+
+/// A trivial wrapper for [`memmap2::Mmap`] that implements [`StableAddress`].
+#[cfg(not(target_arch = "wasm32"))]
+pub struct Mmap(memmap2::Mmap);
+
+#[cfg(target_arch = "wasm32")]
+pub struct Mmap(Vec<u8>);
+
+#[cfg(not(target_arch = "wasm32"))]
+impl Mmap {
+ #[inline]
+ pub unsafe fn map(file: File) -> io::Result<Self> {
+ memmap2::Mmap::map(&file).map(Mmap)
+ }
+}
+
+#[cfg(target_arch = "wasm32")]
+impl Mmap {
+ #[inline]
+ pub unsafe fn map(mut file: File) -> io::Result<Self> {
+ use std::io::Read;
+
+ let mut data = Vec::new();
+ file.read_to_end(&mut data)?;
+ Ok(Mmap(data))
+ }
+}
+
+impl Deref for Mmap {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ &*self.0
+ }
+}
+
+// SAFETY: On architectures other than WASM, mmap is used as backing storage. The address of this
+// memory map is stable. On WASM, `Vec<u8>` is used as backing storage. The `Mmap` type doesn't
+// export any function that can cause the `Vec` to be re-allocated. As such the address of the
+// bytes inside this `Vec` is stable.
+unsafe impl StableAddress for Mmap {}
+
+#[cfg(not(target_arch = "wasm32"))]
+pub struct MmapMut(memmap2::MmapMut);
+
+#[cfg(target_arch = "wasm32")]
+pub struct MmapMut(Vec<u8>);
+
+#[cfg(not(target_arch = "wasm32"))]
+impl MmapMut {
+ #[inline]
+ pub fn map_anon(len: usize) -> io::Result<Self> {
+ let mmap = memmap2::MmapMut::map_anon(len)?;
+ Ok(MmapMut(mmap))
+ }
+
+ #[inline]
+ pub fn flush(&mut self) -> io::Result<()> {
+ self.0.flush()
+ }
+
+ #[inline]
+ pub fn make_read_only(self) -> std::io::Result<Mmap> {
+ let mmap = self.0.make_read_only()?;
+ Ok(Mmap(mmap))
+ }
+}
+
+#[cfg(target_arch = "wasm32")]
+impl MmapMut {
+ #[inline]
+ pub fn map_anon(len: usize) -> io::Result<Self> {
+ let data = Vec::with_capacity(len);
+ Ok(MmapMut(data))
+ }
+
+ #[inline]
+ pub fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+
+ #[inline]
+ pub fn make_read_only(self) -> std::io::Result<Mmap> {
+ Ok(Mmap(self.0))
+ }
+}
+
+impl Deref for MmapMut {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ &*self.0
+ }
+}
+
+impl DerefMut for MmapMut {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [u8] {
+ &mut *self.0
+ }
+}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs b/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs
new file mode 100644
index 000000000..3a268e4b4
--- /dev/null
+++ b/compiler/rustc_data_structures/src/obligation_forest/graphviz.rs
@@ -0,0 +1,90 @@
+use crate::obligation_forest::{ForestObligation, ObligationForest};
+use rustc_graphviz as dot;
+use std::env::var_os;
+use std::fs::File;
+use std::io::BufWriter;
+use std::path::Path;
+use std::sync::atomic::AtomicUsize;
+use std::sync::atomic::Ordering;
+
+impl<O: ForestObligation> ObligationForest<O> {
+ /// Creates a graphviz representation of the obligation forest. Given a directory this will
+ /// create files with name of the format `<counter>_<description>.gv`. The counter is
+ /// global and is maintained internally.
+ ///
+ /// Calling this will do nothing unless the environment variable
+ /// `DUMP_OBLIGATION_FOREST_GRAPHVIZ` is defined.
+ ///
+ /// A few post-processing that you might want to do make the forest easier to visualize:
+ ///
+ /// * `sed 's,std::[a-z]*::,,g'` — Deletes the `std::<package>::` prefix of paths.
+ /// * `sed 's,"Binder(TraitPredicate(<\(.*\)>)) (\([^)]*\))","\1 (\2)",'` — Transforms
+ /// `Binder(TraitPredicate(<predicate>))` into just `<predicate>`.
+ #[allow(dead_code)]
+ pub fn dump_graphviz<P: AsRef<Path>>(&self, dir: P, description: &str) {
+ static COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+ if var_os("DUMP_OBLIGATION_FOREST_GRAPHVIZ").is_none() {
+ return;
+ }
+
+ let counter = COUNTER.fetch_add(1, Ordering::AcqRel);
+
+ let file_path = dir.as_ref().join(format!("{:010}_{}.gv", counter, description));
+
+ let mut gv_file = BufWriter::new(File::create(file_path).unwrap());
+
+ dot::render(&self, &mut gv_file).unwrap();
+ }
+}
+
+impl<'a, O: ForestObligation + 'a> dot::Labeller<'a> for &'a ObligationForest<O> {
+ type Node = usize;
+ type Edge = (usize, usize);
+
+ fn graph_id(&self) -> dot::Id<'_> {
+ dot::Id::new("trait_obligation_forest").unwrap()
+ }
+
+ fn node_id(&self, index: &Self::Node) -> dot::Id<'_> {
+ dot::Id::new(format!("obligation_{}", index)).unwrap()
+ }
+
+ fn node_label(&self, index: &Self::Node) -> dot::LabelText<'_> {
+ let node = &self.nodes[*index];
+ let label = format!("{:?} ({:?})", node.obligation.as_cache_key(), node.state.get());
+
+ dot::LabelText::LabelStr(label.into())
+ }
+
+ fn edge_label(&self, (_index_source, _index_target): &Self::Edge) -> dot::LabelText<'_> {
+ dot::LabelText::LabelStr("".into())
+ }
+}
+
+impl<'a, O: ForestObligation + 'a> dot::GraphWalk<'a> for &'a ObligationForest<O> {
+ type Node = usize;
+ type Edge = (usize, usize);
+
+ fn nodes(&self) -> dot::Nodes<'_, Self::Node> {
+ (0..self.nodes.len()).collect()
+ }
+
+ fn edges(&self) -> dot::Edges<'_, Self::Edge> {
+ (0..self.nodes.len())
+ .flat_map(|i| {
+ let node = &self.nodes[i];
+
+ node.dependents.iter().map(move |&d| (d, i))
+ })
+ .collect()
+ }
+
+ fn source(&self, (s, _): &Self::Edge) -> Self::Node {
+ *s
+ }
+
+ fn target(&self, (_, t): &Self::Edge) -> Self::Node {
+ *t
+ }
+}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
new file mode 100644
index 000000000..07a96dd7d
--- /dev/null
+++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs
@@ -0,0 +1,698 @@
+//! The `ObligationForest` is a utility data structure used in trait
+//! matching to track the set of outstanding obligations (those not yet
+//! resolved to success or error). It also tracks the "backtrace" of each
+//! pending obligation (why we are trying to figure this out in the first
+//! place).
+//!
+//! ### External view
+//!
+//! `ObligationForest` supports two main public operations (there are a
+//! few others not discussed here):
+//!
+//! 1. Add a new root obligations (`register_obligation`).
+//! 2. Process the pending obligations (`process_obligations`).
+//!
+//! When a new obligation `N` is added, it becomes the root of an
+//! obligation tree. This tree can also carry some per-tree state `T`,
+//! which is given at the same time. This tree is a singleton to start, so
+//! `N` is both the root and the only leaf. Each time the
+//! `process_obligations` method is called, it will invoke its callback
+//! with every pending obligation (so that will include `N`, the first
+//! time). The callback also receives a (mutable) reference to the
+//! per-tree state `T`. The callback should process the obligation `O`
+//! that it is given and return a `ProcessResult`:
+//!
+//! - `Unchanged` -> ambiguous result. Obligation was neither a success
+//! nor a failure. It is assumed that further attempts to process the
+//! obligation will yield the same result unless something in the
+//! surrounding environment changes.
+//! - `Changed(C)` - the obligation was *shallowly successful*. The
+//! vector `C` is a list of subobligations. The meaning of this is that
+//! `O` was successful on the assumption that all the obligations in `C`
+//! are also successful. Therefore, `O` is only considered a "true"
+//! success if `C` is empty. Otherwise, `O` is put into a suspended
+//! state and the obligations in `C` become the new pending
+//! obligations. They will be processed the next time you call
+//! `process_obligations`.
+//! - `Error(E)` -> obligation failed with error `E`. We will collect this
+//! error and return it from `process_obligations`, along with the
+//! "backtrace" of obligations (that is, the list of obligations up to
+//! and including the root of the failed obligation). No further
+//! obligations from that same tree will be processed, since the tree is
+//! now considered to be in error.
+//!
+//! When the call to `process_obligations` completes, you get back an `Outcome`,
+//! which includes two bits of information:
+//!
+//! - `completed`: a list of obligations where processing was fully
+//! completed without error (meaning that all transitive subobligations
+//! have also been completed). So, for example, if the callback from
+//! `process_obligations` returns `Changed(C)` for some obligation `O`,
+//! then `O` will be considered completed right away if `C` is the
+//! empty vector. Otherwise it will only be considered completed once
+//! all the obligations in `C` have been found completed.
+//! - `errors`: a list of errors that occurred and associated backtraces
+//! at the time of error, which can be used to give context to the user.
+//!
+//! Upon completion, none of the existing obligations were *shallowly
+//! successful* (that is, no callback returned `Changed(_)`). This implies that
+//! all obligations were either errors or returned an ambiguous result.
+//!
+//! ### Implementation details
+//!
+//! For the most part, comments specific to the implementation are in the
+//! code. This file only contains a very high-level overview. Basically,
+//! the forest is stored in a vector. Each element of the vector is a node
+//! in some tree. Each node in the vector has the index of its dependents,
+//! including the first dependent which is known as the parent. It also
+//! has a current state, described by `NodeState`. After each processing
+//! step, we compress the vector to remove completed and error nodes, which
+//! aren't needed anymore.
+
+use crate::fx::{FxHashMap, FxHashSet};
+
+use std::cell::Cell;
+use std::collections::hash_map::Entry;
+use std::fmt::Debug;
+use std::hash;
+use std::marker::PhantomData;
+
+mod graphviz;
+
+#[cfg(test)]
+mod tests;
+
+pub trait ForestObligation: Clone + Debug {
+ type CacheKey: Clone + hash::Hash + Eq + Debug;
+
+ /// Converts this `ForestObligation` suitable for use as a cache key.
+ /// If two distinct `ForestObligations`s return the same cache key,
+ /// then it must be sound to use the result of processing one obligation
+ /// (e.g. success for error) for the other obligation
+ fn as_cache_key(&self) -> Self::CacheKey;
+}
+
+pub trait ObligationProcessor {
+ type Obligation: ForestObligation;
+ type Error: Debug;
+
+ fn needs_process_obligation(&self, obligation: &Self::Obligation) -> bool;
+
+ fn process_obligation(
+ &mut self,
+ obligation: &mut Self::Obligation,
+ ) -> ProcessResult<Self::Obligation, Self::Error>;
+
+ /// As we do the cycle check, we invoke this callback when we
+ /// encounter an actual cycle. `cycle` is an iterator that starts
+ /// at the start of the cycle in the stack and walks **toward the
+ /// top**.
+ ///
+ /// In other words, if we had O1 which required O2 which required
+ /// O3 which required O1, we would give an iterator yielding O1,
+ /// O2, O3 (O1 is not yielded twice).
+ fn process_backedge<'c, I>(&mut self, cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+ where
+ I: Clone + Iterator<Item = &'c Self::Obligation>;
+}
+
+/// The result type used by `process_obligation`.
+#[derive(Debug)]
+pub enum ProcessResult<O, E> {
+ Unchanged,
+ Changed(Vec<O>),
+ Error(E),
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+struct ObligationTreeId(usize);
+
+type ObligationTreeIdGenerator =
+ std::iter::Map<std::ops::RangeFrom<usize>, fn(usize) -> ObligationTreeId>;
+
+pub struct ObligationForest<O: ForestObligation> {
+ /// The list of obligations. In between calls to [Self::process_obligations],
+ /// this list only contains nodes in the `Pending` or `Waiting` state.
+ ///
+ /// `usize` indices are used here and throughout this module, rather than
+ /// [`rustc_index::newtype_index!`] indices, because this code is hot enough
+ /// that the `u32`-to-`usize` conversions that would be required are
+ /// significant, and space considerations are not important.
+ nodes: Vec<Node<O>>,
+
+ /// A cache of predicates that have been successfully completed.
+ done_cache: FxHashSet<O::CacheKey>,
+
+ /// A cache of the nodes in `nodes`, indexed by predicate. Unfortunately,
+ /// its contents are not guaranteed to match those of `nodes`. See the
+ /// comments in `Self::process_obligation` for details.
+ active_cache: FxHashMap<O::CacheKey, usize>,
+
+ /// A vector reused in [Self::compress()] and [Self::find_cycles_from_node()],
+ /// to avoid allocating new vectors.
+ reused_node_vec: Vec<usize>,
+
+ obligation_tree_id_generator: ObligationTreeIdGenerator,
+
+ /// Per tree error cache. This is used to deduplicate errors,
+ /// which is necessary to avoid trait resolution overflow in
+ /// some cases.
+ ///
+ /// See [this][details] for details.
+ ///
+ /// [details]: https://github.com/rust-lang/rust/pull/53255#issuecomment-421184780
+ error_cache: FxHashMap<ObligationTreeId, FxHashSet<O::CacheKey>>,
+}
+
+#[derive(Debug)]
+struct Node<O> {
+ obligation: O,
+ state: Cell<NodeState>,
+
+ /// Obligations that depend on this obligation for their completion. They
+ /// must all be in a non-pending state.
+ dependents: Vec<usize>,
+
+ /// If true, `dependents[0]` points to a "parent" node, which requires
+ /// special treatment upon error but is otherwise treated the same.
+ /// (It would be more idiomatic to store the parent node in a separate
+ /// `Option<usize>` field, but that slows down the common case of
+ /// iterating over the parent and other descendants together.)
+ has_parent: bool,
+
+ /// Identifier of the obligation tree to which this node belongs.
+ obligation_tree_id: ObligationTreeId,
+}
+
+impl<O> Node<O> {
+ fn new(parent: Option<usize>, obligation: O, obligation_tree_id: ObligationTreeId) -> Node<O> {
+ Node {
+ obligation,
+ state: Cell::new(NodeState::Pending),
+ dependents: if let Some(parent_index) = parent { vec![parent_index] } else { vec![] },
+ has_parent: parent.is_some(),
+ obligation_tree_id,
+ }
+ }
+}
+
+/// The state of one node in some tree within the forest. This represents the
+/// current state of processing for the obligation (of type `O`) associated
+/// with this node.
+///
+/// The non-`Error` state transitions are as follows.
+/// ```text
+/// (Pre-creation)
+/// |
+/// | register_obligation_at() (called by process_obligations() and
+/// v from outside the crate)
+/// Pending
+/// |
+/// | process_obligations()
+/// v
+/// Success
+/// | ^
+/// | | mark_successes()
+/// | v
+/// | Waiting
+/// |
+/// | process_cycles()
+/// v
+/// Done
+/// |
+/// | compress()
+/// v
+/// (Removed)
+/// ```
+/// The `Error` state can be introduced in several places, via `error_at()`.
+///
+/// Outside of `ObligationForest` methods, nodes should be either `Pending` or
+/// `Waiting`.
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum NodeState {
+ /// This obligation has not yet been selected successfully. Cannot have
+ /// subobligations.
+ Pending,
+
+ /// This obligation was selected successfully, but may or may not have
+ /// subobligations.
+ Success,
+
+ /// This obligation was selected successfully, but it has a pending
+ /// subobligation.
+ Waiting,
+
+ /// This obligation, along with its subobligations, are complete, and will
+ /// be removed in the next collection.
+ Done,
+
+ /// This obligation was resolved to an error. It will be removed by the
+ /// next compression step.
+ Error,
+}
+
+/// This trait allows us to have two different Outcome types:
+/// - the normal one that does as little as possible
+/// - one for tests that does some additional work and checking
+pub trait OutcomeTrait {
+ type Error;
+ type Obligation;
+
+ fn new() -> Self;
+ fn record_completed(&mut self, outcome: &Self::Obligation);
+ fn record_error(&mut self, error: Self::Error);
+}
+
+#[derive(Debug)]
+pub struct Outcome<O, E> {
+ /// Backtrace of obligations that were found to be in error.
+ pub errors: Vec<Error<O, E>>,
+}
+
+impl<O, E> OutcomeTrait for Outcome<O, E> {
+ type Error = Error<O, E>;
+ type Obligation = O;
+
+ fn new() -> Self {
+ Self { errors: vec![] }
+ }
+
+ fn record_completed(&mut self, _outcome: &Self::Obligation) {
+ // do nothing
+ }
+
+ fn record_error(&mut self, error: Self::Error) {
+ self.errors.push(error)
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub struct Error<O, E> {
+ pub error: E,
+ pub backtrace: Vec<O>,
+}
+
+impl<O: ForestObligation> ObligationForest<O> {
+ pub fn new() -> ObligationForest<O> {
+ ObligationForest {
+ nodes: vec![],
+ done_cache: Default::default(),
+ active_cache: Default::default(),
+ reused_node_vec: vec![],
+ obligation_tree_id_generator: (0..).map(ObligationTreeId),
+ error_cache: Default::default(),
+ }
+ }
+
+ /// Returns the total number of nodes in the forest that have not
+ /// yet been fully resolved.
+ pub fn len(&self) -> usize {
+ self.nodes.len()
+ }
+
+ /// Registers an obligation.
+ pub fn register_obligation(&mut self, obligation: O) {
+ // Ignore errors here - there is no guarantee of success.
+ let _ = self.register_obligation_at(obligation, None);
+ }
+
+ // Returns Err(()) if we already know this obligation failed.
+ fn register_obligation_at(&mut self, obligation: O, parent: Option<usize>) -> Result<(), ()> {
+ let cache_key = obligation.as_cache_key();
+ if self.done_cache.contains(&cache_key) {
+ debug!("register_obligation_at: ignoring already done obligation: {:?}", obligation);
+ return Ok(());
+ }
+
+ match self.active_cache.entry(cache_key) {
+ Entry::Occupied(o) => {
+ let node = &mut self.nodes[*o.get()];
+ if let Some(parent_index) = parent {
+ // If the node is already in `active_cache`, it has already
+ // had its chance to be marked with a parent. So if it's
+ // not already present, just dump `parent` into the
+ // dependents as a non-parent.
+ if !node.dependents.contains(&parent_index) {
+ node.dependents.push(parent_index);
+ }
+ }
+ if let NodeState::Error = node.state.get() { Err(()) } else { Ok(()) }
+ }
+ Entry::Vacant(v) => {
+ let obligation_tree_id = match parent {
+ Some(parent_index) => self.nodes[parent_index].obligation_tree_id,
+ None => self.obligation_tree_id_generator.next().unwrap(),
+ };
+
+ let already_failed = parent.is_some()
+ && self
+ .error_cache
+ .get(&obligation_tree_id)
+ .map_or(false, |errors| errors.contains(v.key()));
+
+ if already_failed {
+ Err(())
+ } else {
+ let new_index = self.nodes.len();
+ v.insert(new_index);
+ self.nodes.push(Node::new(parent, obligation, obligation_tree_id));
+ Ok(())
+ }
+ }
+ }
+ }
+
+ /// Converts all remaining obligations to the given error.
+ pub fn to_errors<E: Clone>(&mut self, error: E) -> Vec<Error<O, E>> {
+ let errors = self
+ .nodes
+ .iter()
+ .enumerate()
+ .filter(|(_index, node)| node.state.get() == NodeState::Pending)
+ .map(|(index, _node)| Error { error: error.clone(), backtrace: self.error_at(index) })
+ .collect();
+
+ self.compress(|_| assert!(false));
+ errors
+ }
+
+ /// Returns the set of obligations that are in a pending state.
+ pub fn map_pending_obligations<P, F>(&self, f: F) -> Vec<P>
+ where
+ F: Fn(&O) -> P,
+ {
+ self.nodes
+ .iter()
+ .filter(|node| node.state.get() == NodeState::Pending)
+ .map(|node| f(&node.obligation))
+ .collect()
+ }
+
+ fn insert_into_error_cache(&mut self, index: usize) {
+ let node = &self.nodes[index];
+ self.error_cache
+ .entry(node.obligation_tree_id)
+ .or_default()
+ .insert(node.obligation.as_cache_key());
+ }
+
+ /// Performs a fixpoint computation over the obligation list.
+ #[inline(never)]
+ pub fn process_obligations<P, OUT>(&mut self, processor: &mut P) -> OUT
+ where
+ P: ObligationProcessor<Obligation = O>,
+ OUT: OutcomeTrait<Obligation = O, Error = Error<O, P::Error>>,
+ {
+ let mut outcome = OUT::new();
+
+ // Fixpoint computation: we repeat until the inner loop stalls.
+ loop {
+ let mut has_changed = false;
+
+ // Note that the loop body can append new nodes, and those new nodes
+ // will then be processed by subsequent iterations of the loop.
+ //
+ // We can't use an iterator for the loop because `self.nodes` is
+ // appended to and the borrow checker would complain. We also can't use
+ // `for index in 0..self.nodes.len() { ... }` because the range would
+ // be computed with the initial length, and we would miss the appended
+ // nodes. Therefore we use a `while` loop.
+ let mut index = 0;
+ while let Some(node) = self.nodes.get_mut(index) {
+ if node.state.get() != NodeState::Pending
+ || !processor.needs_process_obligation(&node.obligation)
+ {
+ index += 1;
+ continue;
+ }
+
+ // `processor.process_obligation` can modify the predicate within
+ // `node.obligation`, and that predicate is the key used for
+ // `self.active_cache`. This means that `self.active_cache` can get
+ // out of sync with `nodes`. It's not very common, but it does
+ // happen, and code in `compress` has to allow for it.
+
+ match processor.process_obligation(&mut node.obligation) {
+ ProcessResult::Unchanged => {
+ // No change in state.
+ }
+ ProcessResult::Changed(children) => {
+ // We are not (yet) stalled.
+ has_changed = true;
+ node.state.set(NodeState::Success);
+
+ for child in children {
+ let st = self.register_obligation_at(child, Some(index));
+ if let Err(()) = st {
+ // Error already reported - propagate it
+ // to our node.
+ self.error_at(index);
+ }
+ }
+ }
+ ProcessResult::Error(err) => {
+ has_changed = true;
+ outcome.record_error(Error { error: err, backtrace: self.error_at(index) });
+ }
+ }
+ index += 1;
+ }
+
+ // If unchanged, then we saw no successful obligations, which means
+ // there is no point in further iteration. This is based on the
+ // assumption that when trait matching returns `Error` or
+ // `Unchanged`, those results do not affect environmental inference
+ // state. (Note that this will occur if we invoke
+ // `process_obligations` with no pending obligations.)
+ if !has_changed {
+ break;
+ }
+
+ self.mark_successes();
+ self.process_cycles(processor);
+ self.compress(|obl| outcome.record_completed(obl));
+ }
+
+ outcome
+ }
+
+ /// Returns a vector of obligations for `p` and all of its
+ /// ancestors, putting them into the error state in the process.
+ fn error_at(&self, mut index: usize) -> Vec<O> {
+ let mut error_stack: Vec<usize> = vec![];
+ let mut trace = vec![];
+
+ loop {
+ let node = &self.nodes[index];
+ node.state.set(NodeState::Error);
+ trace.push(node.obligation.clone());
+ if node.has_parent {
+ // The first dependent is the parent, which is treated
+ // specially.
+ error_stack.extend(node.dependents.iter().skip(1));
+ index = node.dependents[0];
+ } else {
+ // No parent; treat all dependents non-specially.
+ error_stack.extend(node.dependents.iter());
+ break;
+ }
+ }
+
+ while let Some(index) = error_stack.pop() {
+ let node = &self.nodes[index];
+ if node.state.get() != NodeState::Error {
+ node.state.set(NodeState::Error);
+ error_stack.extend(node.dependents.iter());
+ }
+ }
+
+ trace
+ }
+
+ /// Mark all `Waiting` nodes as `Success`, except those that depend on a
+ /// pending node.
+ fn mark_successes(&self) {
+ // Convert all `Waiting` nodes to `Success`.
+ for node in &self.nodes {
+ if node.state.get() == NodeState::Waiting {
+ node.state.set(NodeState::Success);
+ }
+ }
+
+ // Convert `Success` nodes that depend on a pending node back to
+ // `Waiting`.
+ for node in &self.nodes {
+ if node.state.get() == NodeState::Pending {
+ // This call site is hot.
+ self.inlined_mark_dependents_as_waiting(node);
+ }
+ }
+ }
+
+ // This always-inlined function is for the hot call site.
+ #[inline(always)]
+ fn inlined_mark_dependents_as_waiting(&self, node: &Node<O>) {
+ for &index in node.dependents.iter() {
+ let node = &self.nodes[index];
+ let state = node.state.get();
+ if state == NodeState::Success {
+ // This call site is cold.
+ self.uninlined_mark_dependents_as_waiting(node);
+ } else {
+ debug_assert!(state == NodeState::Waiting || state == NodeState::Error)
+ }
+ }
+ }
+
+ // This never-inlined function is for the cold call site.
+ #[inline(never)]
+ fn uninlined_mark_dependents_as_waiting(&self, node: &Node<O>) {
+ // Mark node Waiting in the cold uninlined code instead of the hot inlined
+ node.state.set(NodeState::Waiting);
+ self.inlined_mark_dependents_as_waiting(node)
+ }
+
+ /// Report cycles between all `Success` nodes, and convert all `Success`
+ /// nodes to `Done`. This must be called after `mark_successes`.
+ fn process_cycles<P>(&mut self, processor: &mut P)
+ where
+ P: ObligationProcessor<Obligation = O>,
+ {
+ let mut stack = std::mem::take(&mut self.reused_node_vec);
+ for (index, node) in self.nodes.iter().enumerate() {
+ // For some benchmarks this state test is extremely hot. It's a win
+ // to handle the no-op cases immediately to avoid the cost of the
+ // function call.
+ if node.state.get() == NodeState::Success {
+ self.find_cycles_from_node(&mut stack, processor, index);
+ }
+ }
+
+ debug_assert!(stack.is_empty());
+ self.reused_node_vec = stack;
+ }
+
+ fn find_cycles_from_node<P>(&self, stack: &mut Vec<usize>, processor: &mut P, index: usize)
+ where
+ P: ObligationProcessor<Obligation = O>,
+ {
+ let node = &self.nodes[index];
+ if node.state.get() == NodeState::Success {
+ match stack.iter().rposition(|&n| n == index) {
+ None => {
+ stack.push(index);
+ for &dep_index in node.dependents.iter() {
+ self.find_cycles_from_node(stack, processor, dep_index);
+ }
+ stack.pop();
+ node.state.set(NodeState::Done);
+ }
+ Some(rpos) => {
+ // Cycle detected.
+ processor.process_backedge(
+ stack[rpos..].iter().map(|&i| &self.nodes[i].obligation),
+ PhantomData,
+ );
+ }
+ }
+ }
+ }
+
+ /// Compresses the vector, removing all popped nodes. This adjusts the
+ /// indices and hence invalidates any outstanding indices. `process_cycles`
+ /// must be run beforehand to remove any cycles on `Success` nodes.
+ #[inline(never)]
+ fn compress(&mut self, mut outcome_cb: impl FnMut(&O)) {
+ let orig_nodes_len = self.nodes.len();
+ let mut node_rewrites: Vec<_> = std::mem::take(&mut self.reused_node_vec);
+ debug_assert!(node_rewrites.is_empty());
+ node_rewrites.extend(0..orig_nodes_len);
+ let mut dead_nodes = 0;
+
+ // Move removable nodes to the end, preserving the order of the
+ // remaining nodes.
+ //
+ // LOOP INVARIANT:
+ // self.nodes[0..index - dead_nodes] are the first remaining nodes
+ // self.nodes[index - dead_nodes..index] are all dead
+ // self.nodes[index..] are unchanged
+ for index in 0..orig_nodes_len {
+ let node = &self.nodes[index];
+ match node.state.get() {
+ NodeState::Pending | NodeState::Waiting => {
+ if dead_nodes > 0 {
+ self.nodes.swap(index, index - dead_nodes);
+ node_rewrites[index] -= dead_nodes;
+ }
+ }
+ NodeState::Done => {
+ // The removal lookup might fail because the contents of
+ // `self.active_cache` are not guaranteed to match those of
+ // `self.nodes`. See the comment in `process_obligation`
+ // for more details.
+ let cache_key = node.obligation.as_cache_key();
+ self.active_cache.remove(&cache_key);
+ self.done_cache.insert(cache_key);
+
+ // Extract the success stories.
+ outcome_cb(&node.obligation);
+ node_rewrites[index] = orig_nodes_len;
+ dead_nodes += 1;
+ }
+ NodeState::Error => {
+ // We *intentionally* remove the node from the cache at this point. Otherwise
+ // tests must come up with a different type on every type error they
+ // check against.
+ self.active_cache.remove(&node.obligation.as_cache_key());
+ self.insert_into_error_cache(index);
+ node_rewrites[index] = orig_nodes_len;
+ dead_nodes += 1;
+ }
+ NodeState::Success => unreachable!(),
+ }
+ }
+
+ if dead_nodes > 0 {
+ // Remove the dead nodes and rewrite indices.
+ self.nodes.truncate(orig_nodes_len - dead_nodes);
+ self.apply_rewrites(&node_rewrites);
+ }
+
+ node_rewrites.truncate(0);
+ self.reused_node_vec = node_rewrites;
+ }
+
+ #[inline(never)]
+ fn apply_rewrites(&mut self, node_rewrites: &[usize]) {
+ let orig_nodes_len = node_rewrites.len();
+
+ for node in &mut self.nodes {
+ let mut i = 0;
+ while let Some(dependent) = node.dependents.get_mut(i) {
+ let new_index = node_rewrites[*dependent];
+ if new_index >= orig_nodes_len {
+ node.dependents.swap_remove(i);
+ if i == 0 && node.has_parent {
+ // We just removed the parent.
+ node.has_parent = false;
+ }
+ } else {
+ *dependent = new_index;
+ i += 1;
+ }
+ }
+ }
+
+ // This updating of `self.active_cache` is necessary because the
+ // removal of nodes within `compress` can fail. See above.
+ self.active_cache.retain(|_predicate, index| {
+ let new_index = node_rewrites[*index];
+ if new_index >= orig_nodes_len {
+ false
+ } else {
+ *index = new_index;
+ true
+ }
+ });
+ }
+}
diff --git a/compiler/rustc_data_structures/src/obligation_forest/tests.rs b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
new file mode 100644
index 000000000..e2991aae1
--- /dev/null
+++ b/compiler/rustc_data_structures/src/obligation_forest/tests.rs
@@ -0,0 +1,479 @@
+use super::*;
+
+use std::fmt;
+use std::marker::PhantomData;
+
+impl<'a> super::ForestObligation for &'a str {
+ type CacheKey = &'a str;
+
+ fn as_cache_key(&self) -> Self::CacheKey {
+ self
+ }
+}
+
+struct ClosureObligationProcessor<OF, BF, O, E> {
+ process_obligation: OF,
+ _process_backedge: BF,
+ marker: PhantomData<(O, E)>,
+}
+
+struct TestOutcome<O, E> {
+ pub completed: Vec<O>,
+ pub errors: Vec<Error<O, E>>,
+}
+
+impl<O, E> OutcomeTrait for TestOutcome<O, E>
+where
+ O: Clone,
+{
+ type Error = Error<O, E>;
+ type Obligation = O;
+
+ fn new() -> Self {
+ Self { errors: vec![], completed: vec![] }
+ }
+
+ fn record_completed(&mut self, outcome: &Self::Obligation) {
+ self.completed.push(outcome.clone())
+ }
+
+ fn record_error(&mut self, error: Self::Error) {
+ self.errors.push(error)
+ }
+}
+
+#[allow(non_snake_case)]
+fn C<OF, BF, O>(of: OF, bf: BF) -> ClosureObligationProcessor<OF, BF, O, &'static str>
+where
+ OF: FnMut(&mut O) -> ProcessResult<O, &'static str>,
+ BF: FnMut(&[O]),
+{
+ ClosureObligationProcessor {
+ process_obligation: of,
+ _process_backedge: bf,
+ marker: PhantomData,
+ }
+}
+
+impl<OF, BF, O, E> ObligationProcessor for ClosureObligationProcessor<OF, BF, O, E>
+where
+ O: super::ForestObligation + fmt::Debug,
+ E: fmt::Debug,
+ OF: FnMut(&mut O) -> ProcessResult<O, E>,
+ BF: FnMut(&[O]),
+{
+ type Obligation = O;
+ type Error = E;
+
+ fn needs_process_obligation(&self, _obligation: &Self::Obligation) -> bool {
+ true
+ }
+
+ fn process_obligation(
+ &mut self,
+ obligation: &mut Self::Obligation,
+ ) -> ProcessResult<Self::Obligation, Self::Error> {
+ (self.process_obligation)(obligation)
+ }
+
+ fn process_backedge<'c, I>(&mut self, _cycle: I, _marker: PhantomData<&'c Self::Obligation>)
+ where
+ I: Clone + Iterator<Item = &'c Self::Obligation>,
+ {
+ }
+}
+
+#[test]
+fn push_pop() {
+ let mut forest = ObligationForest::new();
+ forest.register_obligation("A");
+ forest.register_obligation("B");
+ forest.register_obligation("C");
+
+ // first round, B errors out, A has subtasks, and C completes, creating this:
+ // A |-> A.1
+ // |-> A.2
+ // |-> A.3
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+ "B" => ProcessResult::Error("B is for broken"),
+ "C" => ProcessResult::Changed(vec![]),
+ "A.1" | "A.2" | "A.3" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok, vec!["C"]);
+ assert_eq!(err, vec![Error { error: "B is for broken", backtrace: vec!["B"] }]);
+
+ // second round: two delays, one success, creating an uneven set of subtasks:
+ // A |-> A.1
+ // |-> A.2
+ // |-> A.3 |-> A.3.i
+ // D |-> D.1
+ // |-> D.2
+ forest.register_obligation("D");
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A.1" => ProcessResult::Unchanged,
+ "A.2" => ProcessResult::Unchanged,
+ "A.3" => ProcessResult::Changed(vec!["A.3.i"]),
+ "D" => ProcessResult::Changed(vec!["D.1", "D.2"]),
+ "A.3.i" | "D.1" | "D.2" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok, Vec::<&'static str>::new());
+ assert_eq!(err, Vec::new());
+
+ // third round: ok in A.1 but trigger an error in A.2. Check that it
+ // propagates to A, but not D.1 or D.2.
+ // D |-> D.1 |-> D.1.i
+ // |-> D.2 |-> D.2.i
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A.1" => ProcessResult::Changed(vec![]),
+ "A.2" => ProcessResult::Error("A is for apple"),
+ "A.3.i" => ProcessResult::Changed(vec![]),
+ "D.1" => ProcessResult::Changed(vec!["D.1.i"]),
+ "D.2" => ProcessResult::Changed(vec!["D.2.i"]),
+ "D.1.i" | "D.2.i" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ let mut ok = ok;
+ ok.sort();
+ assert_eq!(ok, vec!["A.1", "A.3", "A.3.i"]);
+ assert_eq!(err, vec![Error { error: "A is for apple", backtrace: vec!["A.2", "A"] }]);
+
+ // fourth round: error in D.1.i
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "D.1.i" => ProcessResult::Error("D is for dumb"),
+ "D.2.i" => ProcessResult::Changed(vec![]),
+ _ => panic!("unexpected obligation {:?}", obligation),
+ },
+ |_| {},
+ ));
+ let mut ok = ok;
+ ok.sort();
+ assert_eq!(ok, vec!["D.2", "D.2.i"]);
+ assert_eq!(err, vec![Error { error: "D is for dumb", backtrace: vec!["D.1.i", "D.1", "D"] }]);
+}
+
+// Test that if a tree with grandchildren succeeds, everything is
+// reported as expected:
+// A
+// A.1
+// A.2
+// A.2.i
+// A.2.ii
+// A.3
+#[test]
+fn success_in_grandchildren() {
+ let mut forest = ObligationForest::new();
+ forest.register_obligation("A");
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+ "A.1" => ProcessResult::Changed(vec![]),
+ "A.2" => ProcessResult::Changed(vec!["A.2.i", "A.2.ii"]),
+ "A.3" => ProcessResult::Changed(vec![]),
+ "A.2.i" | "A.2.ii" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ let mut ok = ok;
+ ok.sort();
+ assert_eq!(ok, vec!["A.1", "A.3"]);
+ assert!(err.is_empty());
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A.2.i" => ProcessResult::Unchanged,
+ "A.2.ii" => ProcessResult::Changed(vec![]),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok, vec!["A.2.ii"]);
+ assert!(err.is_empty());
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A.2.i" => ProcessResult::Changed(vec!["A.2.i.a"]),
+ "A.2.i.a" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert!(ok.is_empty());
+ assert!(err.is_empty());
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A.2.i.a" => ProcessResult::Changed(vec![]),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ let mut ok = ok;
+ ok.sort();
+ assert_eq!(ok, vec!["A", "A.2", "A.2.i", "A.2.i.a"]);
+ assert!(err.is_empty());
+
+ let TestOutcome { completed: ok, errors: err, .. } =
+ forest.process_obligations(&mut C(|_| unreachable!(), |_| {}));
+
+ assert!(ok.is_empty());
+ assert!(err.is_empty());
+}
+
+#[test]
+fn to_errors_no_throw() {
+ // check that converting multiple children with common parent (A)
+ // yields to correct errors (and does not panic, in particular).
+ let mut forest = ObligationForest::new();
+ forest.register_obligation("A");
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A" => ProcessResult::Changed(vec!["A.1", "A.2", "A.3"]),
+ "A.1" | "A.2" | "A.3" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err.len(), 0);
+ let errors = forest.to_errors(());
+ assert_eq!(errors[0].backtrace, vec!["A.1", "A"]);
+ assert_eq!(errors[1].backtrace, vec!["A.2", "A"]);
+ assert_eq!(errors[2].backtrace, vec!["A.3", "A"]);
+ assert_eq!(errors.len(), 3);
+}
+
+#[test]
+fn diamond() {
+ // check that diamond dependencies are handled correctly
+ let mut forest = ObligationForest::new();
+ forest.register_obligation("A");
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A" => ProcessResult::Changed(vec!["A.1", "A.2"]),
+ "A.1" | "A.2" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err.len(), 0);
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A.1" => ProcessResult::Changed(vec!["D"]),
+ "A.2" => ProcessResult::Changed(vec!["D"]),
+ "D" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err.len(), 0);
+
+ let mut d_count = 0;
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "D" => {
+ d_count += 1;
+ ProcessResult::Changed(vec![])
+ }
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(d_count, 1);
+ let mut ok = ok;
+ ok.sort();
+ assert_eq!(ok, vec!["A", "A.1", "A.2", "D"]);
+ assert_eq!(err.len(), 0);
+
+ let errors = forest.to_errors(());
+ assert_eq!(errors.len(), 0);
+
+ forest.register_obligation("A'");
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A'" => ProcessResult::Changed(vec!["A'.1", "A'.2"]),
+ "A'.1" | "A'.2" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err.len(), 0);
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A'.1" => ProcessResult::Changed(vec!["D'", "A'"]),
+ "A'.2" => ProcessResult::Changed(vec!["D'"]),
+ "D'" | "A'" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err.len(), 0);
+
+ let mut d_count = 0;
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "D'" => {
+ d_count += 1;
+ ProcessResult::Error("operation failed")
+ }
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(d_count, 1);
+ assert_eq!(ok.len(), 0);
+ assert_eq!(
+ err,
+ vec![super::Error { error: "operation failed", backtrace: vec!["D'", "A'.1", "A'"] }]
+ );
+
+ let errors = forest.to_errors(());
+ assert_eq!(errors.len(), 0);
+}
+
+#[test]
+fn done_dependency() {
+ // check that the local cache works
+ let mut forest = ObligationForest::new();
+ forest.register_obligation("A: Sized");
+ forest.register_obligation("B: Sized");
+ forest.register_obligation("C: Sized");
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A: Sized" | "B: Sized" | "C: Sized" => ProcessResult::Changed(vec![]),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ let mut ok = ok;
+ ok.sort();
+ assert_eq!(ok, vec!["A: Sized", "B: Sized", "C: Sized"]);
+ assert_eq!(err.len(), 0);
+
+ forest.register_obligation("(A,B,C): Sized");
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "(A,B,C): Sized" => ProcessResult::Changed(vec!["A: Sized", "B: Sized", "C: Sized"]),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok, vec!["(A,B,C): Sized"]);
+ assert_eq!(err.len(), 0);
+}
+
+#[test]
+fn orphan() {
+ // check that orphaned nodes are handled correctly
+ let mut forest = ObligationForest::new();
+ forest.register_obligation("A");
+ forest.register_obligation("B");
+ forest.register_obligation("C1");
+ forest.register_obligation("C2");
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A" => ProcessResult::Changed(vec!["D", "E"]),
+ "B" => ProcessResult::Unchanged,
+ "C1" => ProcessResult::Changed(vec![]),
+ "C2" => ProcessResult::Changed(vec![]),
+ "D" | "E" => ProcessResult::Unchanged,
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ let mut ok = ok;
+ ok.sort();
+ assert_eq!(ok, vec!["C1", "C2"]);
+ assert_eq!(err.len(), 0);
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "D" | "E" => ProcessResult::Unchanged,
+ "B" => ProcessResult::Changed(vec!["D"]),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err.len(), 0);
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "D" => ProcessResult::Unchanged,
+ "E" => ProcessResult::Error("E is for error"),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err, vec![super::Error { error: "E is for error", backtrace: vec!["E", "A"] }]);
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "D" => ProcessResult::Error("D is dead"),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err, vec![super::Error { error: "D is dead", backtrace: vec!["D"] }]);
+
+ let errors = forest.to_errors(());
+ assert_eq!(errors.len(), 0);
+}
+
+#[test]
+fn simultaneous_register_and_error() {
+ // check that registering a failed obligation works correctly
+ let mut forest = ObligationForest::new();
+ forest.register_obligation("A");
+ forest.register_obligation("B");
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A" => ProcessResult::Error("An error"),
+ "B" => ProcessResult::Changed(vec!["A"]),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err, vec![super::Error { error: "An error", backtrace: vec!["A"] }]);
+
+ let mut forest = ObligationForest::new();
+ forest.register_obligation("B");
+ forest.register_obligation("A");
+
+ let TestOutcome { completed: ok, errors: err, .. } = forest.process_obligations(&mut C(
+ |obligation| match *obligation {
+ "A" => ProcessResult::Error("An error"),
+ "B" => ProcessResult::Changed(vec!["A"]),
+ _ => unreachable!(),
+ },
+ |_| {},
+ ));
+ assert_eq!(ok.len(), 0);
+ assert_eq!(err, vec![super::Error { error: "An error", backtrace: vec!["A"] }]);
+}
diff --git a/compiler/rustc_data_structures/src/owning_ref/LICENSE b/compiler/rustc_data_structures/src/owning_ref/LICENSE
new file mode 100644
index 000000000..dff72d1e4
--- /dev/null
+++ b/compiler/rustc_data_structures/src/owning_ref/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Marvin Löbel
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/compiler/rustc_data_structures/src/owning_ref/mod.rs b/compiler/rustc_data_structures/src/owning_ref/mod.rs
new file mode 100644
index 000000000..ed5e56618
--- /dev/null
+++ b/compiler/rustc_data_structures/src/owning_ref/mod.rs
@@ -0,0 +1,1214 @@
+#![warn(missing_docs)]
+
+/*!
+# An owning reference.
+
+This crate provides the _owning reference_ types `OwningRef` and `OwningRefMut`
+that enables it to bundle a reference together with the owner of the data it points to.
+This allows moving and dropping of an `OwningRef` without needing to recreate the reference.
+
+This can sometimes be useful because Rust borrowing rules normally prevent
+moving a type that has been moved from. For example, this kind of code gets rejected:
+
+```compile_fail,E0515
+fn return_owned_and_referenced<'a>() -> (Vec<u8>, &'a [u8]) {
+ let v = vec![1, 2, 3, 4];
+ let s = &v[1..3];
+ (v, s)
+}
+```
+
+Even though, from a memory-layout point of view, this can be entirely safe
+if the new location of the vector still lives longer than the lifetime `'a`
+of the reference because the backing allocation of the vector does not change.
+
+This library enables this safe usage by keeping the owner and the reference
+bundled together in a wrapper type that ensure that lifetime constraint:
+
+```
+# use rustc_data_structures::owning_ref::OwningRef;
+# fn main() {
+fn return_owned_and_referenced() -> OwningRef<Vec<u8>, [u8]> {
+ let v = vec![1, 2, 3, 4];
+ let or = OwningRef::new(v);
+ let or = or.map(|v| &v[1..3]);
+ or
+}
+# }
+```
+
+It works by requiring owner types to dereference to stable memory locations
+and preventing mutable access to root containers, which in practice requires heap allocation
+as provided by `Box<T>`, `Rc<T>`, etc.
+
+Also provided are typedefs for common owner type combinations,
+which allow for less verbose type signatures.
+For example, `BoxRef<T>` instead of `OwningRef<Box<T>, T>`.
+
+The crate also provides the more advanced `OwningHandle` type,
+which allows more freedom in bundling a dependent handle object
+along with the data it depends on, at the cost of some unsafe needed in the API.
+See the documentation around `OwningHandle` for more details.
+
+# Examples
+
+## Basics
+
+```
+use rustc_data_structures::owning_ref::BoxRef;
+
+fn main() {
+ // Create an array owned by a Box.
+ let arr = Box::new([1, 2, 3, 4]) as Box<[i32]>;
+
+ // Transfer into a BoxRef.
+ let arr: BoxRef<[i32]> = BoxRef::new(arr);
+ assert_eq!(&*arr, &[1, 2, 3, 4]);
+
+ // We can slice the array without losing ownership or changing type.
+ let arr: BoxRef<[i32]> = arr.map(|arr| &arr[1..3]);
+ assert_eq!(&*arr, &[2, 3]);
+
+ // Also works for Arc, Rc, String and Vec!
+}
+```
+
+## Caching a reference to a struct field
+
+```
+use rustc_data_structures::owning_ref::BoxRef;
+
+fn main() {
+ struct Foo {
+ tag: u32,
+ x: u16,
+ y: u16,
+ z: u16,
+ }
+ let foo = Foo { tag: 1, x: 100, y: 200, z: 300 };
+
+ let or = BoxRef::new(Box::new(foo)).map(|foo| {
+ match foo.tag {
+ 0 => &foo.x,
+ 1 => &foo.y,
+ 2 => &foo.z,
+ _ => panic!(),
+ }
+ });
+
+ assert_eq!(*or, 200);
+}
+```
+
+## Caching a reference to an entry in a vector
+
+```
+use rustc_data_structures::owning_ref::VecRef;
+
+fn main() {
+ let v = VecRef::new(vec![1, 2, 3, 4, 5]).map(|v| &v[3]);
+ assert_eq!(*v, 4);
+}
+```
+
+## Caching a subslice of a String
+
+```
+use rustc_data_structures::owning_ref::StringRef;
+
+fn main() {
+ let s = StringRef::new("hello world".to_owned())
+ .map(|s| s.split(' ').nth(1).unwrap());
+
+ assert_eq!(&*s, "world");
+}
+```
+
+## Reference counted slices that share ownership of the backing storage
+
+```
+use rustc_data_structures::owning_ref::RcRef;
+use std::rc::Rc;
+
+fn main() {
+ let rc: RcRef<[i32]> = RcRef::new(Rc::new([1, 2, 3, 4]) as Rc<[i32]>);
+ assert_eq!(&*rc, &[1, 2, 3, 4]);
+
+ let rc_a: RcRef<[i32]> = rc.clone().map(|s| &s[0..2]);
+ let rc_b = rc.clone().map(|s| &s[1..3]);
+ let rc_c = rc.clone().map(|s| &s[2..4]);
+ assert_eq!(&*rc_a, &[1, 2]);
+ assert_eq!(&*rc_b, &[2, 3]);
+ assert_eq!(&*rc_c, &[3, 4]);
+
+ let rc_c_a = rc_c.clone().map(|s| &s[1]);
+ assert_eq!(&*rc_c_a, &4);
+}
+```
+
+## Atomic reference counted slices that share ownership of the backing storage
+
+```
+use rustc_data_structures::owning_ref::ArcRef;
+use std::sync::Arc;
+
+fn main() {
+ use std::thread;
+
+ fn par_sum(rc: ArcRef<[i32]>) -> i32 {
+ if rc.len() == 0 {
+ return 0;
+ } else if rc.len() == 1 {
+ return rc[0];
+ }
+ let mid = rc.len() / 2;
+ let left = rc.clone().map(|s| &s[..mid]);
+ let right = rc.map(|s| &s[mid..]);
+
+ let left = thread::spawn(move || par_sum(left));
+ let right = thread::spawn(move || par_sum(right));
+
+ left.join().unwrap() + right.join().unwrap()
+ }
+
+ let rc: Arc<[i32]> = Arc::new([1, 2, 3, 4]);
+ let rc: ArcRef<[i32]> = rc.into();
+
+ assert_eq!(par_sum(rc), 10);
+}
+```
+
+## References into RAII locks
+
+```
+use rustc_data_structures::owning_ref::RefRef;
+use std::cell::{RefCell, Ref};
+
+fn main() {
+ let refcell = RefCell::new((1, 2, 3, 4));
+ // Also works with Mutex and RwLock
+
+ let refref = {
+ let refref = RefRef::new(refcell.borrow()).map(|x| &x.3);
+ assert_eq!(*refref, 4);
+
+ // We move the RAII lock and the reference to one of
+ // the subfields in the data it guards here:
+ refref
+ };
+
+ assert_eq!(*refref, 4);
+
+ drop(refref);
+
+ assert_eq!(*refcell.borrow(), (1, 2, 3, 4));
+}
+```
+
+## Mutable reference
+
+When the owned container implements `DerefMut`, it is also possible to make
+a _mutable owning reference_. (e.g., with `Box`, `RefMut`, `MutexGuard`)
+
+```
+use rustc_data_structures::owning_ref::RefMutRefMut;
+use std::cell::{RefCell, RefMut};
+
+fn main() {
+ let refcell = RefCell::new((1, 2, 3, 4));
+
+ let mut refmut_refmut = {
+ let mut refmut_refmut = RefMutRefMut::new(refcell.borrow_mut()).map_mut(|x| &mut x.3);
+ assert_eq!(*refmut_refmut, 4);
+ *refmut_refmut *= 2;
+
+ refmut_refmut
+ };
+
+ assert_eq!(*refmut_refmut, 8);
+ *refmut_refmut *= 2;
+
+ drop(refmut_refmut);
+
+ assert_eq!(*refcell.borrow(), (1, 2, 3, 16));
+}
+```
+*/
+
+pub use stable_deref_trait::{
+ CloneStableDeref as CloneStableAddress, StableDeref as StableAddress,
+};
+use std::mem;
+
+/// An owning reference.
+///
+/// This wraps an owner `O` and a reference `&T` pointing
+/// at something reachable from `O::Target` while keeping
+/// the ability to move `self` around.
+///
+/// The owner is usually a pointer that points at some base type.
+///
+/// For more details and examples, see the module and method docs.
+pub struct OwningRef<O, T: ?Sized> {
+ owner: O,
+ reference: *const T,
+}
+
+/// An mutable owning reference.
+///
+/// This wraps an owner `O` and a reference `&mut T` pointing
+/// at something reachable from `O::Target` while keeping
+/// the ability to move `self` around.
+///
+/// The owner is usually a pointer that points at some base type.
+///
+/// For more details and examples, see the module and method docs.
+pub struct OwningRefMut<O, T: ?Sized> {
+ owner: O,
+ reference: *mut T,
+}
+
+/// Helper trait for an erased concrete type an owner dereferences to.
+/// This is used in form of a trait object for keeping
+/// something around to (virtually) call the destructor.
+pub trait Erased {}
+impl<T> Erased for T {}
+
+/// Helper trait for erasing the concrete type of what an owner dereferences to,
+/// for example `Box<T> -> Box<Erased>`. This would be unneeded with
+/// higher kinded types support in the language.
+#[allow(unused_lifetimes)]
+pub unsafe trait IntoErased<'a> {
+ /// Owner with the dereference type substituted to `Erased`.
+ type Erased;
+ /// Performs the type erasure.
+ fn into_erased(self) -> Self::Erased;
+}
+
+/// Helper trait for erasing the concrete type of what an owner dereferences to,
+/// for example `Box<T> -> Box<Erased + Send>`. This would be unneeded with
+/// higher kinded types support in the language.
+#[allow(unused_lifetimes)]
+pub unsafe trait IntoErasedSend<'a> {
+ /// Owner with the dereference type substituted to `Erased + Send`.
+ type Erased: Send;
+ /// Performs the type erasure.
+ fn into_erased_send(self) -> Self::Erased;
+}
+
+/// Helper trait for erasing the concrete type of what an owner dereferences to,
+/// for example `Box<T> -> Box<Erased + Send + Sync>`. This would be unneeded with
+/// higher kinded types support in the language.
+#[allow(unused_lifetimes)]
+pub unsafe trait IntoErasedSendSync<'a> {
+ /// Owner with the dereference type substituted to `Erased + Send + Sync`.
+ type Erased: Send + Sync;
+ /// Performs the type erasure.
+ fn into_erased_send_sync(self) -> Self::Erased;
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// OwningRef
+/////////////////////////////////////////////////////////////////////////////
+
+impl<O, T: ?Sized> OwningRef<O, T> {
+ /// Creates a new owning reference from an owner
+ /// initialized to the direct dereference of it.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::OwningRef;
+ ///
+ /// fn main() {
+ /// let owning_ref = OwningRef::new(Box::new(42));
+ /// assert_eq!(*owning_ref, 42);
+ /// }
+ /// ```
+ pub fn new(o: O) -> Self
+ where
+ O: StableAddress,
+ O: Deref<Target = T>,
+ {
+ OwningRef { reference: &*o, owner: o }
+ }
+
+ /// Like `new`, but doesn’t require `O` to implement the `StableAddress` trait.
+ /// Instead, the caller is responsible to make the same promises as implementing the trait.
+ ///
+ /// This is useful for cases where coherence rules prevents implementing the trait
+ /// without adding a dependency to this crate in a third-party library.
+ pub unsafe fn new_assert_stable_address(o: O) -> Self
+ where
+ O: Deref<Target = T>,
+ {
+ OwningRef { reference: &*o, owner: o }
+ }
+
+ /// Converts `self` into a new owning reference that points at something reachable
+ /// from the previous one.
+ ///
+ /// This can be a reference to a field of `U`, something reachable from a field of
+ /// `U`, or even something unrelated with a `'static` lifetime.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::OwningRef;
+ ///
+ /// fn main() {
+ /// let owning_ref = OwningRef::new(Box::new([1, 2, 3, 4]));
+ ///
+ /// // create an owning reference that points at the
+ /// // third element of the array.
+ /// let owning_ref = owning_ref.map(|array| &array[2]);
+ /// assert_eq!(*owning_ref, 3);
+ /// }
+ /// ```
+ pub fn map<F, U: ?Sized>(self, f: F) -> OwningRef<O, U>
+ where
+ O: StableAddress,
+ F: FnOnce(&T) -> &U,
+ {
+ OwningRef { reference: f(&self), owner: self.owner }
+ }
+
+ /// Tries to convert `self` into a new owning reference that points
+ /// at something reachable from the previous one.
+ ///
+ /// This can be a reference to a field of `U`, something reachable from a field of
+ /// `U`, or even something unrelated with a `'static` lifetime.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::OwningRef;
+ ///
+ /// fn main() {
+ /// let owning_ref = OwningRef::new(Box::new([1, 2, 3, 4]));
+ ///
+ /// // create an owning reference that points at the
+ /// // third element of the array.
+ /// let owning_ref = owning_ref.try_map(|array| {
+ /// if array[2] == 3 { Ok(&array[2]) } else { Err(()) }
+ /// });
+ /// assert_eq!(*owning_ref.unwrap(), 3);
+ /// }
+ /// ```
+ pub fn try_map<F, U: ?Sized, E>(self, f: F) -> Result<OwningRef<O, U>, E>
+ where
+ O: StableAddress,
+ F: FnOnce(&T) -> Result<&U, E>,
+ {
+ Ok(OwningRef { reference: f(&self)?, owner: self.owner })
+ }
+
+ /// Converts `self` into a new owning reference with a different owner type.
+ ///
+ /// The new owner type needs to still contain the original owner in some way
+ /// so that the reference into it remains valid. This function is marked unsafe
+ /// because the user needs to manually uphold this guarantee.
+ pub unsafe fn map_owner<F, P>(self, f: F) -> OwningRef<P, T>
+ where
+ O: StableAddress,
+ P: StableAddress,
+ F: FnOnce(O) -> P,
+ {
+ OwningRef { reference: self.reference, owner: f(self.owner) }
+ }
+
+ /// Converts `self` into a new owning reference where the owner is wrapped
+ /// in an additional `Box<O>`.
+ ///
+ /// This can be used to safely erase the owner of any `OwningRef<O, T>`
+ /// to an `OwningRef<Box<Erased>, T>`.
+ pub fn map_owner_box(self) -> OwningRef<Box<O>, T> {
+ OwningRef { reference: self.reference, owner: Box::new(self.owner) }
+ }
+
+ /// Erases the concrete base type of the owner with a trait object.
+ ///
+ /// This allows mixing of owned references with different owner base types.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::{OwningRef, Erased};
+ ///
+ /// fn main() {
+ /// // N.B., using the concrete types here for explicitness.
+ /// // For less verbose code type aliases like `BoxRef` are provided.
+ ///
+ /// let owning_ref_a: OwningRef<Box<[i32; 4]>, [i32; 4]>
+ /// = OwningRef::new(Box::new([1, 2, 3, 4]));
+ ///
+ /// let owning_ref_b: OwningRef<Box<Vec<(i32, bool)>>, Vec<(i32, bool)>>
+ /// = OwningRef::new(Box::new(vec![(0, false), (1, true)]));
+ ///
+ /// let owning_ref_a: OwningRef<Box<[i32; 4]>, i32>
+ /// = owning_ref_a.map(|a| &a[0]);
+ ///
+ /// let owning_ref_b: OwningRef<Box<Vec<(i32, bool)>>, i32>
+ /// = owning_ref_b.map(|a| &a[1].0);
+ ///
+ /// let owning_refs: [OwningRef<Box<dyn Erased>, i32>; 2]
+ /// = [owning_ref_a.erase_owner(), owning_ref_b.erase_owner()];
+ ///
+ /// assert_eq!(*owning_refs[0], 1);
+ /// assert_eq!(*owning_refs[1], 1);
+ /// }
+ /// ```
+ pub fn erase_owner<'a>(self) -> OwningRef<O::Erased, T>
+ where
+ O: IntoErased<'a>,
+ {
+ OwningRef { reference: self.reference, owner: self.owner.into_erased() }
+ }
+
+ /// Erases the concrete base type of the owner with a trait object which implements `Send`.
+ ///
+ /// This allows mixing of owned references with different owner base types.
+ pub fn erase_send_owner<'a>(self) -> OwningRef<O::Erased, T>
+ where
+ O: IntoErasedSend<'a>,
+ {
+ OwningRef { reference: self.reference, owner: self.owner.into_erased_send() }
+ }
+
+ /// Erases the concrete base type of the owner with a trait object
+ /// which implements `Send` and `Sync`.
+ ///
+ /// This allows mixing of owned references with different owner base types.
+ pub fn erase_send_sync_owner<'a>(self) -> OwningRef<O::Erased, T>
+ where
+ O: IntoErasedSendSync<'a>,
+ {
+ OwningRef { reference: self.reference, owner: self.owner.into_erased_send_sync() }
+ }
+
+ // UNIMPLEMENTED: wrap_owner
+
+ // FIXME: Naming convention?
+ /// A getter for the underlying owner.
+ pub fn owner(&self) -> &O {
+ &self.owner
+ }
+
+ // FIXME: Naming convention?
+ /// Discards the reference and retrieves the owner.
+ pub fn into_inner(self) -> O {
+ self.owner
+ }
+}
+
+impl<O, T: ?Sized> OwningRefMut<O, T> {
+ /// Creates a new owning reference from an owner
+ /// initialized to the direct dereference of it.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::OwningRefMut;
+ ///
+ /// fn main() {
+ /// let owning_ref_mut = OwningRefMut::new(Box::new(42));
+ /// assert_eq!(*owning_ref_mut, 42);
+ /// }
+ /// ```
+ pub fn new(mut o: O) -> Self
+ where
+ O: StableAddress,
+ O: DerefMut<Target = T>,
+ {
+ OwningRefMut { reference: &mut *o, owner: o }
+ }
+
+ /// Like `new`, but doesn’t require `O` to implement the `StableAddress` trait.
+ /// Instead, the caller is responsible to make the same promises as implementing the trait.
+ ///
+ /// This is useful for cases where coherence rules prevents implementing the trait
+ /// without adding a dependency to this crate in a third-party library.
+ pub unsafe fn new_assert_stable_address(mut o: O) -> Self
+ where
+ O: DerefMut<Target = T>,
+ {
+ OwningRefMut { reference: &mut *o, owner: o }
+ }
+
+ /// Converts `self` into a new _shared_ owning reference that points at
+ /// something reachable from the previous one.
+ ///
+ /// This can be a reference to a field of `U`, something reachable from a field of
+ /// `U`, or even something unrelated with a `'static` lifetime.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::OwningRefMut;
+ ///
+ /// fn main() {
+ /// let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+ ///
+ /// // create an owning reference that points at the
+ /// // third element of the array.
+ /// let owning_ref = owning_ref_mut.map(|array| &array[2]);
+ /// assert_eq!(*owning_ref, 3);
+ /// }
+ /// ```
+ pub fn map<F, U: ?Sized>(mut self, f: F) -> OwningRef<O, U>
+ where
+ O: StableAddress,
+ F: FnOnce(&mut T) -> &U,
+ {
+ OwningRef { reference: f(&mut self), owner: self.owner }
+ }
+
+ /// Converts `self` into a new _mutable_ owning reference that points at
+ /// something reachable from the previous one.
+ ///
+ /// This can be a reference to a field of `U`, something reachable from a field of
+ /// `U`, or even something unrelated with a `'static` lifetime.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::OwningRefMut;
+ ///
+ /// fn main() {
+ /// let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+ ///
+ /// // create an owning reference that points at the
+ /// // third element of the array.
+ /// let owning_ref_mut = owning_ref_mut.map_mut(|array| &mut array[2]);
+ /// assert_eq!(*owning_ref_mut, 3);
+ /// }
+ /// ```
+ pub fn map_mut<F, U: ?Sized>(mut self, f: F) -> OwningRefMut<O, U>
+ where
+ O: StableAddress,
+ F: FnOnce(&mut T) -> &mut U,
+ {
+ OwningRefMut { reference: f(&mut self), owner: self.owner }
+ }
+
+ /// Tries to convert `self` into a new _shared_ owning reference that points
+ /// at something reachable from the previous one.
+ ///
+ /// This can be a reference to a field of `U`, something reachable from a field of
+ /// `U`, or even something unrelated with a `'static` lifetime.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::OwningRefMut;
+ ///
+ /// fn main() {
+ /// let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+ ///
+ /// // create an owning reference that points at the
+ /// // third element of the array.
+ /// let owning_ref = owning_ref_mut.try_map(|array| {
+ /// if array[2] == 3 { Ok(&array[2]) } else { Err(()) }
+ /// });
+ /// assert_eq!(*owning_ref.unwrap(), 3);
+ /// }
+ /// ```
+ pub fn try_map<F, U: ?Sized, E>(mut self, f: F) -> Result<OwningRef<O, U>, E>
+ where
+ O: StableAddress,
+ F: FnOnce(&mut T) -> Result<&U, E>,
+ {
+ Ok(OwningRef { reference: f(&mut self)?, owner: self.owner })
+ }
+
+ /// Tries to convert `self` into a new _mutable_ owning reference that points
+ /// at something reachable from the previous one.
+ ///
+ /// This can be a reference to a field of `U`, something reachable from a field of
+ /// `U`, or even something unrelated with a `'static` lifetime.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::OwningRefMut;
+ ///
+ /// fn main() {
+ /// let owning_ref_mut = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+ ///
+ /// // create an owning reference that points at the
+ /// // third element of the array.
+ /// let owning_ref_mut = owning_ref_mut.try_map_mut(|array| {
+ /// if array[2] == 3 { Ok(&mut array[2]) } else { Err(()) }
+ /// });
+ /// assert_eq!(*owning_ref_mut.unwrap(), 3);
+ /// }
+ /// ```
+ pub fn try_map_mut<F, U: ?Sized, E>(mut self, f: F) -> Result<OwningRefMut<O, U>, E>
+ where
+ O: StableAddress,
+ F: FnOnce(&mut T) -> Result<&mut U, E>,
+ {
+ Ok(OwningRefMut { reference: f(&mut self)?, owner: self.owner })
+ }
+
+ /// Converts `self` into a new owning reference with a different owner type.
+ ///
+ /// The new owner type needs to still contain the original owner in some way
+ /// so that the reference into it remains valid. This function is marked unsafe
+ /// because the user needs to manually uphold this guarantee.
+ pub unsafe fn map_owner<F, P>(self, f: F) -> OwningRefMut<P, T>
+ where
+ O: StableAddress,
+ P: StableAddress,
+ F: FnOnce(O) -> P,
+ {
+ OwningRefMut { reference: self.reference, owner: f(self.owner) }
+ }
+
+ /// Converts `self` into a new owning reference where the owner is wrapped
+ /// in an additional `Box<O>`.
+ ///
+ /// This can be used to safely erase the owner of any `OwningRefMut<O, T>`
+ /// to an `OwningRefMut<Box<Erased>, T>`.
+ pub fn map_owner_box(self) -> OwningRefMut<Box<O>, T> {
+ OwningRefMut { reference: self.reference, owner: Box::new(self.owner) }
+ }
+
+ /// Erases the concrete base type of the owner with a trait object.
+ ///
+ /// This allows mixing of owned references with different owner base types.
+ ///
+ /// # Example
+ /// ```
+ /// use rustc_data_structures::owning_ref::{OwningRefMut, Erased};
+ ///
+ /// fn main() {
+ /// // N.B., using the concrete types here for explicitness.
+ /// // For less verbose code type aliases like `BoxRef` are provided.
+ ///
+ /// let owning_ref_mut_a: OwningRefMut<Box<[i32; 4]>, [i32; 4]>
+ /// = OwningRefMut::new(Box::new([1, 2, 3, 4]));
+ ///
+ /// let owning_ref_mut_b: OwningRefMut<Box<Vec<(i32, bool)>>, Vec<(i32, bool)>>
+ /// = OwningRefMut::new(Box::new(vec![(0, false), (1, true)]));
+ ///
+ /// let owning_ref_mut_a: OwningRefMut<Box<[i32; 4]>, i32>
+ /// = owning_ref_mut_a.map_mut(|a| &mut a[0]);
+ ///
+ /// let owning_ref_mut_b: OwningRefMut<Box<Vec<(i32, bool)>>, i32>
+ /// = owning_ref_mut_b.map_mut(|a| &mut a[1].0);
+ ///
+ /// let owning_refs_mut: [OwningRefMut<Box<dyn Erased>, i32>; 2]
+ /// = [owning_ref_mut_a.erase_owner(), owning_ref_mut_b.erase_owner()];
+ ///
+ /// assert_eq!(*owning_refs_mut[0], 1);
+ /// assert_eq!(*owning_refs_mut[1], 1);
+ /// }
+ /// ```
+ pub fn erase_owner<'a>(self) -> OwningRefMut<O::Erased, T>
+ where
+ O: IntoErased<'a>,
+ {
+ OwningRefMut { reference: self.reference, owner: self.owner.into_erased() }
+ }
+
+ // UNIMPLEMENTED: wrap_owner
+
+ // FIXME: Naming convention?
+ /// A getter for the underlying owner.
+ pub fn owner(&self) -> &O {
+ &self.owner
+ }
+
+ // FIXME: Naming convention?
+ /// Discards the reference and retrieves the owner.
+ pub fn into_inner(self) -> O {
+ self.owner
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// OwningHandle
+/////////////////////////////////////////////////////////////////////////////
+
+use std::ops::{Deref, DerefMut};
+
+/// `OwningHandle` is a complement to `OwningRef`. Where `OwningRef` allows
+/// consumers to pass around an owned object and a dependent reference,
+/// `OwningHandle` contains an owned object and a dependent _object_.
+///
+/// `OwningHandle` can encapsulate a `RefMut` along with its associated
+/// `RefCell`, or an `RwLockReadGuard` along with its associated `RwLock`.
+/// However, the API is completely generic and there are no restrictions on
+/// what types of owning and dependent objects may be used.
+///
+/// `OwningHandle` is created by passing an owner object (which dereferences
+/// to a stable address) along with a callback which receives a pointer to
+/// that stable location. The callback may then dereference the pointer and
+/// mint a dependent object, with the guarantee that the returned object will
+/// not outlive the referent of the pointer.
+///
+/// Since the callback needs to dereference a raw pointer, it requires `unsafe`
+/// code. To avoid forcing this unsafety on most callers, the `ToHandle` trait is
+/// implemented for common data structures. Types that implement `ToHandle` can
+/// be wrapped into an `OwningHandle` without passing a callback.
+pub struct OwningHandle<O, H>
+where
+ O: StableAddress,
+ H: Deref,
+{
+ handle: H,
+ _owner: O,
+}
+
+impl<O, H> Deref for OwningHandle<O, H>
+where
+ O: StableAddress,
+ H: Deref,
+{
+ type Target = H::Target;
+ fn deref(&self) -> &H::Target {
+ self.handle.deref()
+ }
+}
+
+unsafe impl<O, H> StableAddress for OwningHandle<O, H>
+where
+ O: StableAddress,
+ H: StableAddress,
+{
+}
+
+impl<O, H> DerefMut for OwningHandle<O, H>
+where
+ O: StableAddress,
+ H: DerefMut,
+{
+ fn deref_mut(&mut self) -> &mut H::Target {
+ self.handle.deref_mut()
+ }
+}
+
+/// Trait to implement the conversion of owner to handle for common types.
+pub trait ToHandle {
+ /// The type of handle to be encapsulated by the OwningHandle.
+ type Handle: Deref;
+
+ /// Given an appropriately-long-lived pointer to ourselves, create a
+ /// handle to be encapsulated by the `OwningHandle`.
+ unsafe fn to_handle(x: *const Self) -> Self::Handle;
+}
+
+/// Trait to implement the conversion of owner to mutable handle for common types.
+pub trait ToHandleMut {
+ /// The type of handle to be encapsulated by the OwningHandle.
+ type HandleMut: DerefMut;
+
+ /// Given an appropriately-long-lived pointer to ourselves, create a
+ /// mutable handle to be encapsulated by the `OwningHandle`.
+ unsafe fn to_handle_mut(x: *const Self) -> Self::HandleMut;
+}
+
+impl<O, H> OwningHandle<O, H>
+where
+ O: StableAddress<Target: ToHandle<Handle = H>>,
+ H: Deref,
+{
+ /// Creates a new `OwningHandle` for a type that implements `ToHandle`. For types
+ /// that don't implement `ToHandle`, callers may invoke `new_with_fn`, which accepts
+ /// a callback to perform the conversion.
+ pub fn new(o: O) -> Self {
+ OwningHandle::new_with_fn(o, |x| unsafe { O::Target::to_handle(x) })
+ }
+}
+
+impl<O, H> OwningHandle<O, H>
+where
+ O: StableAddress<Target: ToHandleMut<HandleMut = H>>,
+ H: DerefMut,
+{
+ /// Creates a new mutable `OwningHandle` for a type that implements `ToHandleMut`.
+ pub fn new_mut(o: O) -> Self {
+ OwningHandle::new_with_fn(o, |x| unsafe { O::Target::to_handle_mut(x) })
+ }
+}
+
+impl<O, H> OwningHandle<O, H>
+where
+ O: StableAddress,
+ H: Deref,
+{
+ /// Creates a new OwningHandle. The provided callback will be invoked with
+ /// a pointer to the object owned by `o`, and the returned value is stored
+ /// as the object to which this `OwningHandle` will forward `Deref` and
+ /// `DerefMut`.
+ pub fn new_with_fn<F>(o: O, f: F) -> Self
+ where
+ F: FnOnce(*const O::Target) -> H,
+ {
+ let h: H;
+ {
+ h = f(o.deref() as *const O::Target);
+ }
+
+ OwningHandle { handle: h, _owner: o }
+ }
+
+ /// Creates a new OwningHandle. The provided callback will be invoked with
+ /// a pointer to the object owned by `o`, and the returned value is stored
+ /// as the object to which this `OwningHandle` will forward `Deref` and
+ /// `DerefMut`.
+ pub fn try_new<F, E>(o: O, f: F) -> Result<Self, E>
+ where
+ F: FnOnce(*const O::Target) -> Result<H, E>,
+ {
+ let h: H;
+ {
+ h = f(o.deref() as *const O::Target)?;
+ }
+
+ Ok(OwningHandle { handle: h, _owner: o })
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// std traits
+/////////////////////////////////////////////////////////////////////////////
+
+use std::borrow::Borrow;
+use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
+use std::convert::From;
+use std::fmt::{self, Debug};
+use std::hash::{Hash, Hasher};
+use std::marker::{Send, Sync};
+
+impl<O, T: ?Sized> Deref for OwningRef<O, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.reference }
+ }
+}
+
+impl<O, T: ?Sized> Deref for OwningRefMut<O, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ unsafe { &*self.reference }
+ }
+}
+
+impl<O, T: ?Sized> DerefMut for OwningRefMut<O, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { &mut *self.reference }
+ }
+}
+
+unsafe impl<O, T: ?Sized> StableAddress for OwningRef<O, T> {}
+
+impl<O, T: ?Sized> AsRef<T> for OwningRef<O, T> {
+ fn as_ref(&self) -> &T {
+ &*self
+ }
+}
+
+impl<O, T: ?Sized> AsRef<T> for OwningRefMut<O, T> {
+ fn as_ref(&self) -> &T {
+ &*self
+ }
+}
+
+impl<O, T: ?Sized> AsMut<T> for OwningRefMut<O, T> {
+ fn as_mut(&mut self) -> &mut T {
+ &mut *self
+ }
+}
+
+impl<O, T: ?Sized> Borrow<T> for OwningRef<O, T> {
+ fn borrow(&self) -> &T {
+ &*self
+ }
+}
+
+impl<O, T: ?Sized> From<O> for OwningRef<O, T>
+where
+ O: StableAddress,
+ O: Deref<Target = T>,
+{
+ fn from(owner: O) -> Self {
+ OwningRef::new(owner)
+ }
+}
+
+impl<O, T: ?Sized> From<O> for OwningRefMut<O, T>
+where
+ O: StableAddress,
+ O: DerefMut<Target = T>,
+{
+ fn from(owner: O) -> Self {
+ OwningRefMut::new(owner)
+ }
+}
+
+impl<O, T: ?Sized> From<OwningRefMut<O, T>> for OwningRef<O, T>
+where
+ O: StableAddress,
+ O: DerefMut<Target = T>,
+{
+ fn from(other: OwningRefMut<O, T>) -> Self {
+ OwningRef { owner: other.owner, reference: other.reference }
+ }
+}
+
+// ^ FIXME: Is an Into impl for calling into_inner() possible as well?
+
+impl<O, T: ?Sized> Debug for OwningRef<O, T>
+where
+ O: Debug,
+ T: Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "OwningRef {{ owner: {:?}, reference: {:?} }}", self.owner(), &**self)
+ }
+}
+
+impl<O, T: ?Sized> Debug for OwningRefMut<O, T>
+where
+ O: Debug,
+ T: Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "OwningRefMut {{ owner: {:?}, reference: {:?} }}", self.owner(), &**self)
+ }
+}
+
+impl<O, T: ?Sized> Clone for OwningRef<O, T>
+where
+ O: CloneStableAddress,
+{
+ fn clone(&self) -> Self {
+ OwningRef { owner: self.owner.clone(), reference: self.reference }
+ }
+}
+
+unsafe impl<O, T: ?Sized> CloneStableAddress for OwningRef<O, T> where O: CloneStableAddress {}
+
+unsafe impl<O, T: ?Sized> Send for OwningRef<O, T>
+where
+ O: Send,
+ for<'a> &'a T: Send,
+{
+}
+unsafe impl<O, T: ?Sized> Sync for OwningRef<O, T>
+where
+ O: Sync,
+ for<'a> &'a T: Sync,
+{
+}
+
+unsafe impl<O, T: ?Sized> Send for OwningRefMut<O, T>
+where
+ O: Send,
+ for<'a> &'a mut T: Send,
+{
+}
+unsafe impl<O, T: ?Sized> Sync for OwningRefMut<O, T>
+where
+ O: Sync,
+ for<'a> &'a mut T: Sync,
+{
+}
+
+impl Debug for dyn Erased {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "<Erased>",)
+ }
+}
+
+impl<O, T: ?Sized> PartialEq for OwningRef<O, T>
+where
+ T: PartialEq,
+{
+ fn eq(&self, other: &Self) -> bool {
+ (&*self as &T).eq(&*other as &T)
+ }
+}
+
+impl<O, T: ?Sized> Eq for OwningRef<O, T> where T: Eq {}
+
+impl<O, T: ?Sized> PartialOrd for OwningRef<O, T>
+where
+ T: PartialOrd,
+{
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ (&*self as &T).partial_cmp(&*other as &T)
+ }
+}
+
+impl<O, T: ?Sized> Ord for OwningRef<O, T>
+where
+ T: Ord,
+{
+ fn cmp(&self, other: &Self) -> Ordering {
+ (&*self as &T).cmp(&*other as &T)
+ }
+}
+
+impl<O, T: ?Sized> Hash for OwningRef<O, T>
+where
+ T: Hash,
+{
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (&*self as &T).hash(state);
+ }
+}
+
+impl<O, T: ?Sized> PartialEq for OwningRefMut<O, T>
+where
+ T: PartialEq,
+{
+ fn eq(&self, other: &Self) -> bool {
+ (&*self as &T).eq(&*other as &T)
+ }
+}
+
+impl<O, T: ?Sized> Eq for OwningRefMut<O, T> where T: Eq {}
+
+impl<O, T: ?Sized> PartialOrd for OwningRefMut<O, T>
+where
+ T: PartialOrd,
+{
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ (&*self as &T).partial_cmp(&*other as &T)
+ }
+}
+
+impl<O, T: ?Sized> Ord for OwningRefMut<O, T>
+where
+ T: Ord,
+{
+ fn cmp(&self, other: &Self) -> Ordering {
+ (&*self as &T).cmp(&*other as &T)
+ }
+}
+
+impl<O, T: ?Sized> Hash for OwningRefMut<O, T>
+where
+ T: Hash,
+{
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (&*self as &T).hash(state);
+ }
+}
+
+/////////////////////////////////////////////////////////////////////////////
+// std types integration and convenience type defs
+/////////////////////////////////////////////////////////////////////////////
+
+use std::boxed::Box;
+use std::cell::{Ref, RefCell, RefMut};
+use std::rc::Rc;
+use std::sync::Arc;
+use std::sync::{MutexGuard, RwLockReadGuard, RwLockWriteGuard};
+
+impl<T: 'static> ToHandle for RefCell<T> {
+ type Handle = Ref<'static, T>;
+ unsafe fn to_handle(x: *const Self) -> Self::Handle {
+ (*x).borrow()
+ }
+}
+
+impl<T: 'static> ToHandleMut for RefCell<T> {
+ type HandleMut = RefMut<'static, T>;
+ unsafe fn to_handle_mut(x: *const Self) -> Self::HandleMut {
+ (*x).borrow_mut()
+ }
+}
+
+// N.B., implementing ToHandle{,Mut} for Mutex and RwLock requires a decision
+// about which handle creation to use (i.e., read() vs try_read()) as well as
+// what to do with error results.
+
+/// Typedef of an owning reference that uses a `Box` as the owner.
+pub type BoxRef<T, U = T> = OwningRef<Box<T>, U>;
+/// Typedef of an owning reference that uses a `Vec` as the owner.
+pub type VecRef<T, U = T> = OwningRef<Vec<T>, U>;
+/// Typedef of an owning reference that uses a `String` as the owner.
+pub type StringRef = OwningRef<String, str>;
+
+/// Typedef of an owning reference that uses an `Rc` as the owner.
+pub type RcRef<T, U = T> = OwningRef<Rc<T>, U>;
+/// Typedef of an owning reference that uses an `Arc` as the owner.
+pub type ArcRef<T, U = T> = OwningRef<Arc<T>, U>;
+
+/// Typedef of an owning reference that uses a `Ref` as the owner.
+pub type RefRef<'a, T, U = T> = OwningRef<Ref<'a, T>, U>;
+/// Typedef of an owning reference that uses a `RefMut` as the owner.
+pub type RefMutRef<'a, T, U = T> = OwningRef<RefMut<'a, T>, U>;
+/// Typedef of an owning reference that uses a `MutexGuard` as the owner.
+pub type MutexGuardRef<'a, T, U = T> = OwningRef<MutexGuard<'a, T>, U>;
+/// Typedef of an owning reference that uses an `RwLockReadGuard` as the owner.
+pub type RwLockReadGuardRef<'a, T, U = T> = OwningRef<RwLockReadGuard<'a, T>, U>;
+/// Typedef of an owning reference that uses an `RwLockWriteGuard` as the owner.
+pub type RwLockWriteGuardRef<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
+
+/// Typedef of a mutable owning reference that uses a `Box` as the owner.
+pub type BoxRefMut<T, U = T> = OwningRefMut<Box<T>, U>;
+/// Typedef of a mutable owning reference that uses a `Vec` as the owner.
+pub type VecRefMut<T, U = T> = OwningRefMut<Vec<T>, U>;
+/// Typedef of a mutable owning reference that uses a `String` as the owner.
+pub type StringRefMut = OwningRefMut<String, str>;
+
+/// Typedef of a mutable owning reference that uses a `RefMut` as the owner.
+pub type RefMutRefMut<'a, T, U = T> = OwningRefMut<RefMut<'a, T>, U>;
+/// Typedef of a mutable owning reference that uses a `MutexGuard` as the owner.
+pub type MutexGuardRefMut<'a, T, U = T> = OwningRefMut<MutexGuard<'a, T>, U>;
+/// Typedef of a mutable owning reference that uses an `RwLockWriteGuard` as the owner.
+pub type RwLockWriteGuardRefMut<'a, T, U = T> = OwningRef<RwLockWriteGuard<'a, T>, U>;
+
+unsafe impl<'a, T: 'a> IntoErased<'a> for Box<T> {
+ type Erased = Box<dyn Erased + 'a>;
+ fn into_erased(self) -> Self::Erased {
+ self
+ }
+}
+unsafe impl<'a, T: 'a> IntoErased<'a> for Rc<T> {
+ type Erased = Rc<dyn Erased + 'a>;
+ fn into_erased(self) -> Self::Erased {
+ self
+ }
+}
+unsafe impl<'a, T: 'a> IntoErased<'a> for Arc<T> {
+ type Erased = Arc<dyn Erased + 'a>;
+ fn into_erased(self) -> Self::Erased {
+ self
+ }
+}
+
+unsafe impl<'a, T: Send + 'a> IntoErasedSend<'a> for Box<T> {
+ type Erased = Box<dyn Erased + Send + 'a>;
+ fn into_erased_send(self) -> Self::Erased {
+ self
+ }
+}
+
+unsafe impl<'a, T: Send + 'a> IntoErasedSendSync<'a> for Box<T> {
+ type Erased = Box<dyn Erased + Sync + Send + 'a>;
+ fn into_erased_send_sync(self) -> Self::Erased {
+ let result: Box<dyn Erased + Send + 'a> = self;
+ // This is safe since Erased can always implement Sync
+ // Only the destructor is available and it takes &mut self
+ unsafe { mem::transmute(result) }
+ }
+}
+
+unsafe impl<'a, T: Send + Sync + 'a> IntoErasedSendSync<'a> for Arc<T> {
+ type Erased = Arc<dyn Erased + Send + Sync + 'a>;
+ fn into_erased_send_sync(self) -> Self::Erased {
+ self
+ }
+}
+
+/// Typedef of an owning reference that uses an erased `Box` as the owner.
+pub type ErasedBoxRef<U> = OwningRef<Box<dyn Erased>, U>;
+/// Typedef of an owning reference that uses an erased `Rc` as the owner.
+pub type ErasedRcRef<U> = OwningRef<Rc<dyn Erased>, U>;
+/// Typedef of an owning reference that uses an erased `Arc` as the owner.
+pub type ErasedArcRef<U> = OwningRef<Arc<dyn Erased>, U>;
+
+/// Typedef of a mutable owning reference that uses an erased `Box` as the owner.
+pub type ErasedBoxRefMut<U> = OwningRefMut<Box<dyn Erased>, U>;
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/owning_ref/tests.rs b/compiler/rustc_data_structures/src/owning_ref/tests.rs
new file mode 100644
index 000000000..320c03d51
--- /dev/null
+++ b/compiler/rustc_data_structures/src/owning_ref/tests.rs
@@ -0,0 +1,711 @@
+// FIXME: owning_ref is not sound under stacked borrows. Preferably, get rid of it.
+#[cfg(not(miri))]
+mod owning_ref {
+ use super::super::OwningRef;
+ use super::super::{BoxRef, Erased, ErasedBoxRef, RcRef};
+ use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
+ use std::collections::hash_map::DefaultHasher;
+ use std::collections::HashMap;
+ use std::hash::{Hash, Hasher};
+ use std::rc::Rc;
+
+ #[derive(Debug, PartialEq)]
+ struct Example(u32, String, [u8; 3]);
+ fn example() -> Example {
+ Example(42, "hello world".to_string(), [1, 2, 3])
+ }
+
+ #[test]
+ fn new_deref() {
+ let or: OwningRef<Box<()>, ()> = OwningRef::new(Box::new(()));
+ assert_eq!(&*or, &());
+ }
+
+ #[test]
+ fn into() {
+ let or: OwningRef<Box<()>, ()> = Box::new(()).into();
+ assert_eq!(&*or, &());
+ }
+
+ #[test]
+ fn map_offset_ref() {
+ let or: BoxRef<Example> = Box::new(example()).into();
+ let or: BoxRef<_, u32> = or.map(|x| &x.0);
+ assert_eq!(&*or, &42);
+
+ let or: BoxRef<Example> = Box::new(example()).into();
+ let or: BoxRef<_, u8> = or.map(|x| &x.2[1]);
+ assert_eq!(&*or, &2);
+ }
+
+ #[test]
+ fn map_heap_ref() {
+ let or: BoxRef<Example> = Box::new(example()).into();
+ let or: BoxRef<_, str> = or.map(|x| &x.1[..5]);
+ assert_eq!(&*or, "hello");
+ }
+
+ #[test]
+ fn map_static_ref() {
+ let or: BoxRef<()> = Box::new(()).into();
+ let or: BoxRef<_, str> = or.map(|_| "hello");
+ assert_eq!(&*or, "hello");
+ }
+
+ #[test]
+ fn map_chained() {
+ let or: BoxRef<String> = Box::new(example().1).into();
+ let or: BoxRef<_, str> = or.map(|x| &x[1..5]);
+ let or: BoxRef<_, str> = or.map(|x| &x[..2]);
+ assert_eq!(&*or, "el");
+ }
+
+ #[test]
+ fn map_chained_inference() {
+ let or = BoxRef::new(Box::new(example().1)).map(|x| &x[..5]).map(|x| &x[1..3]);
+ assert_eq!(&*or, "el");
+ }
+
+ #[test]
+ fn owner() {
+ let or: BoxRef<String> = Box::new(example().1).into();
+ let or = or.map(|x| &x[..5]);
+ assert_eq!(&*or, "hello");
+ assert_eq!(&**or.owner(), "hello world");
+ }
+
+ #[test]
+ fn into_inner() {
+ let or: BoxRef<String> = Box::new(example().1).into();
+ let or = or.map(|x| &x[..5]);
+ assert_eq!(&*or, "hello");
+ let s = *or.into_inner();
+ assert_eq!(&s, "hello world");
+ }
+
+ #[test]
+ fn fmt_debug() {
+ let or: BoxRef<String> = Box::new(example().1).into();
+ let or = or.map(|x| &x[..5]);
+ let s = format!("{:?}", or);
+ assert_eq!(&s, "OwningRef { owner: \"hello world\", reference: \"hello\" }");
+ }
+
+ #[test]
+ fn erased_owner() {
+ let o1: BoxRef<Example, str> = BoxRef::new(Box::new(example())).map(|x| &x.1[..]);
+
+ let o2: BoxRef<String, str> = BoxRef::new(Box::new(example().1)).map(|x| &x[..]);
+
+ let os: Vec<ErasedBoxRef<str>> = vec![o1.erase_owner(), o2.erase_owner()];
+ assert!(os.iter().all(|e| &e[..] == "hello world"));
+ }
+
+ #[test]
+ fn raii_locks() {
+ use super::super::{MutexGuardRef, RwLockReadGuardRef, RwLockWriteGuardRef};
+ use super::super::{RefMutRef, RefRef};
+ use std::cell::RefCell;
+ use std::sync::{Mutex, RwLock};
+
+ {
+ let a = RefCell::new(1);
+ let a = {
+ let a = RefRef::new(a.borrow());
+ assert_eq!(*a, 1);
+ a
+ };
+ assert_eq!(*a, 1);
+ drop(a);
+ }
+ {
+ let a = RefCell::new(1);
+ let a = {
+ let a = RefMutRef::new(a.borrow_mut());
+ assert_eq!(*a, 1);
+ a
+ };
+ assert_eq!(*a, 1);
+ drop(a);
+ }
+ {
+ let a = Mutex::new(1);
+ let a = {
+ let a = MutexGuardRef::new(a.lock().unwrap());
+ assert_eq!(*a, 1);
+ a
+ };
+ assert_eq!(*a, 1);
+ drop(a);
+ }
+ {
+ let a = RwLock::new(1);
+ let a = {
+ let a = RwLockReadGuardRef::new(a.read().unwrap());
+ assert_eq!(*a, 1);
+ a
+ };
+ assert_eq!(*a, 1);
+ drop(a);
+ }
+ {
+ let a = RwLock::new(1);
+ let a = {
+ let a = RwLockWriteGuardRef::new(a.write().unwrap());
+ assert_eq!(*a, 1);
+ a
+ };
+ assert_eq!(*a, 1);
+ drop(a);
+ }
+ }
+
+ #[test]
+ fn eq() {
+ let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+ let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+ assert_eq!(or1.eq(&or2), true);
+ }
+
+ #[test]
+ fn cmp() {
+ let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+ let or2: BoxRef<[u8]> = BoxRef::new(vec![4, 5, 6].into_boxed_slice());
+ assert_eq!(or1.cmp(&or2), Ordering::Less);
+ }
+
+ #[test]
+ fn partial_cmp() {
+ let or1: BoxRef<[u8]> = BoxRef::new(vec![4, 5, 6].into_boxed_slice());
+ let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+ assert_eq!(or1.partial_cmp(&or2), Some(Ordering::Greater));
+ }
+
+ #[test]
+ fn hash() {
+ let mut h1 = DefaultHasher::new();
+ let mut h2 = DefaultHasher::new();
+
+ let or1: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+ let or2: BoxRef<[u8]> = BoxRef::new(vec![1, 2, 3].into_boxed_slice());
+
+ or1.hash(&mut h1);
+ or2.hash(&mut h2);
+
+ assert_eq!(h1.finish(), h2.finish());
+ }
+
+ #[test]
+ fn borrow() {
+ let mut hash = HashMap::new();
+ let key = RcRef::<String>::new(Rc::new("foo-bar".to_string())).map(|s| &s[..]);
+
+ hash.insert(key.clone().map(|s| &s[..3]), 42);
+ hash.insert(key.clone().map(|s| &s[4..]), 23);
+
+ assert_eq!(hash.get("foo"), Some(&42));
+ assert_eq!(hash.get("bar"), Some(&23));
+ }
+
+ #[test]
+ fn total_erase() {
+ let a: OwningRef<Vec<u8>, [u8]> = OwningRef::new(vec![]).map(|x| &x[..]);
+ let b: OwningRef<Box<[u8]>, [u8]> =
+ OwningRef::new(vec![].into_boxed_slice()).map(|x| &x[..]);
+
+ let c: OwningRef<Rc<Vec<u8>>, [u8]> = unsafe { a.map_owner(Rc::new) };
+ let d: OwningRef<Rc<Box<[u8]>>, [u8]> = unsafe { b.map_owner(Rc::new) };
+
+ let e: OwningRef<Rc<dyn Erased>, [u8]> = c.erase_owner();
+ let f: OwningRef<Rc<dyn Erased>, [u8]> = d.erase_owner();
+
+ let _g = e.clone();
+ let _h = f.clone();
+ }
+
+ #[test]
+ fn total_erase_box() {
+ let a: OwningRef<Vec<u8>, [u8]> = OwningRef::new(vec![]).map(|x| &x[..]);
+ let b: OwningRef<Box<[u8]>, [u8]> =
+ OwningRef::new(vec![].into_boxed_slice()).map(|x| &x[..]);
+
+ let c: OwningRef<Box<Vec<u8>>, [u8]> = a.map_owner_box();
+ let d: OwningRef<Box<Box<[u8]>>, [u8]> = b.map_owner_box();
+
+ let _e: OwningRef<Box<dyn Erased>, [u8]> = c.erase_owner();
+ let _f: OwningRef<Box<dyn Erased>, [u8]> = d.erase_owner();
+ }
+
+ #[test]
+ fn try_map1() {
+ use std::any::Any;
+
+ let x = Box::new(123_i32);
+ let y: Box<dyn Any> = x;
+
+ assert!(OwningRef::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_ok());
+ }
+
+ #[test]
+ fn try_map2() {
+ use std::any::Any;
+
+ let x = Box::new(123_i32);
+ let y: Box<dyn Any> = x;
+
+ assert!(!OwningRef::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_err());
+ }
+}
+
+mod owning_handle {
+ use super::super::OwningHandle;
+ use super::super::RcRef;
+ use std::cell::RefCell;
+ use std::rc::Rc;
+ use std::sync::Arc;
+ use std::sync::RwLock;
+
+ #[test]
+ fn owning_handle() {
+ use std::cell::RefCell;
+ let cell = Rc::new(RefCell::new(2));
+ let cell_ref = RcRef::new(cell);
+ let mut handle =
+ OwningHandle::new_with_fn(cell_ref, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
+ assert_eq!(*handle, 2);
+ *handle = 3;
+ assert_eq!(*handle, 3);
+ }
+
+ #[test]
+ fn try_owning_handle_ok() {
+ use std::cell::RefCell;
+ let cell = Rc::new(RefCell::new(2));
+ let cell_ref = RcRef::new(cell);
+ let mut handle = OwningHandle::try_new::<_, ()>(cell_ref, |x| {
+ Ok(unsafe { x.as_ref() }.unwrap().borrow_mut())
+ })
+ .unwrap();
+ assert_eq!(*handle, 2);
+ *handle = 3;
+ assert_eq!(*handle, 3);
+ }
+
+ #[test]
+ fn try_owning_handle_err() {
+ use std::cell::RefCell;
+ let cell = Rc::new(RefCell::new(2));
+ let cell_ref = RcRef::new(cell);
+ let handle = OwningHandle::try_new::<_, ()>(cell_ref, |x| {
+ if false {
+ return Ok(unsafe { x.as_ref() }.unwrap().borrow_mut());
+ }
+ Err(())
+ });
+ assert!(handle.is_err());
+ }
+
+ #[test]
+ fn nested() {
+ use std::cell::RefCell;
+ use std::sync::{Arc, RwLock};
+
+ let result = {
+ let complex = Rc::new(RefCell::new(Arc::new(RwLock::new("someString"))));
+ let curr = RcRef::new(complex);
+ let curr =
+ OwningHandle::new_with_fn(curr, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
+ let mut curr = OwningHandle::new_with_fn(curr, |x| {
+ unsafe { x.as_ref() }.unwrap().try_write().unwrap()
+ });
+ assert_eq!(*curr, "someString");
+ *curr = "someOtherString";
+ curr
+ };
+ assert_eq!(*result, "someOtherString");
+ }
+
+ #[test]
+ fn owning_handle_safe() {
+ use std::cell::RefCell;
+ let cell = Rc::new(RefCell::new(2));
+ let cell_ref = RcRef::new(cell);
+ let handle = OwningHandle::new(cell_ref);
+ assert_eq!(*handle, 2);
+ }
+
+ #[test]
+ fn owning_handle_mut_safe() {
+ use std::cell::RefCell;
+ let cell = Rc::new(RefCell::new(2));
+ let cell_ref = RcRef::new(cell);
+ let mut handle = OwningHandle::new_mut(cell_ref);
+ assert_eq!(*handle, 2);
+ *handle = 3;
+ assert_eq!(*handle, 3);
+ }
+
+ #[test]
+ fn owning_handle_safe_2() {
+ let result = {
+ let complex = Rc::new(RefCell::new(Arc::new(RwLock::new("someString"))));
+ let curr = RcRef::new(complex);
+ let curr =
+ OwningHandle::new_with_fn(curr, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
+ let mut curr = OwningHandle::new_with_fn(curr, |x| {
+ unsafe { x.as_ref() }.unwrap().try_write().unwrap()
+ });
+ assert_eq!(*curr, "someString");
+ *curr = "someOtherString";
+ curr
+ };
+ assert_eq!(*result, "someOtherString");
+ }
+}
+
+// FIXME: owning_ref is not sound under stacked borrows. Preferably, get rid of it.
+#[cfg(not(miri))]
+mod owning_ref_mut {
+ use super::super::BoxRef;
+ use super::super::{BoxRefMut, Erased, ErasedBoxRefMut, OwningRefMut};
+ use std::cmp::{Ord, Ordering, PartialEq, PartialOrd};
+ use std::collections::hash_map::DefaultHasher;
+ use std::collections::HashMap;
+ use std::hash::{Hash, Hasher};
+
+ #[derive(Debug, PartialEq)]
+ struct Example(u32, String, [u8; 3]);
+ fn example() -> Example {
+ Example(42, "hello world".to_string(), [1, 2, 3])
+ }
+
+ #[test]
+ fn new_deref() {
+ let or: OwningRefMut<Box<()>, ()> = OwningRefMut::new(Box::new(()));
+ assert_eq!(&*or, &());
+ }
+
+ #[test]
+ fn new_deref_mut() {
+ let mut or: OwningRefMut<Box<()>, ()> = OwningRefMut::new(Box::new(()));
+ assert_eq!(&mut *or, &mut ());
+ }
+
+ #[test]
+ fn mutate() {
+ let mut or: OwningRefMut<Box<usize>, usize> = OwningRefMut::new(Box::new(0));
+ assert_eq!(&*or, &0);
+ *or = 1;
+ assert_eq!(&*or, &1);
+ }
+
+ #[test]
+ fn into() {
+ let or: OwningRefMut<Box<()>, ()> = Box::new(()).into();
+ assert_eq!(&*or, &());
+ }
+
+ #[test]
+ fn map_offset_ref() {
+ let or: BoxRefMut<Example> = Box::new(example()).into();
+ let or: BoxRef<_, u32> = or.map(|x| &mut x.0);
+ assert_eq!(&*or, &42);
+
+ let or: BoxRefMut<Example> = Box::new(example()).into();
+ let or: BoxRef<_, u8> = or.map(|x| &mut x.2[1]);
+ assert_eq!(&*or, &2);
+ }
+
+ #[test]
+ fn map_heap_ref() {
+ let or: BoxRefMut<Example> = Box::new(example()).into();
+ let or: BoxRef<_, str> = or.map(|x| &mut x.1[..5]);
+ assert_eq!(&*or, "hello");
+ }
+
+ #[test]
+ fn map_static_ref() {
+ let or: BoxRefMut<()> = Box::new(()).into();
+ let or: BoxRef<_, str> = or.map(|_| "hello");
+ assert_eq!(&*or, "hello");
+ }
+
+ #[test]
+ fn map_mut_offset_ref() {
+ let or: BoxRefMut<Example> = Box::new(example()).into();
+ let or: BoxRefMut<_, u32> = or.map_mut(|x| &mut x.0);
+ assert_eq!(&*or, &42);
+
+ let or: BoxRefMut<Example> = Box::new(example()).into();
+ let or: BoxRefMut<_, u8> = or.map_mut(|x| &mut x.2[1]);
+ assert_eq!(&*or, &2);
+ }
+
+ #[test]
+ fn map_mut_heap_ref() {
+ let or: BoxRefMut<Example> = Box::new(example()).into();
+ let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x.1[..5]);
+ assert_eq!(&*or, "hello");
+ }
+
+ #[test]
+ fn map_mut_static_ref() {
+ static mut MUT_S: [u8; 5] = *b"hello";
+
+ let mut_s: &'static mut [u8] = unsafe { &mut MUT_S };
+
+ let or: BoxRefMut<()> = Box::new(()).into();
+ let or: BoxRefMut<_, [u8]> = or.map_mut(move |_| mut_s);
+ assert_eq!(&*or, b"hello");
+ }
+
+ #[test]
+ fn map_mut_chained() {
+ let or: BoxRefMut<String> = Box::new(example().1).into();
+ let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x[1..5]);
+ let or: BoxRefMut<_, str> = or.map_mut(|x| &mut x[..2]);
+ assert_eq!(&*or, "el");
+ }
+
+ #[test]
+ fn map_chained_inference() {
+ let or = BoxRefMut::new(Box::new(example().1))
+ .map_mut(|x| &mut x[..5])
+ .map_mut(|x| &mut x[1..3]);
+ assert_eq!(&*or, "el");
+ }
+
+ #[test]
+ fn try_map_mut() {
+ let or: BoxRefMut<String> = Box::new(example().1).into();
+ let or: Result<BoxRefMut<_, str>, ()> = or.try_map_mut(|x| Ok(&mut x[1..5]));
+ assert_eq!(&*or.unwrap(), "ello");
+
+ let or: BoxRefMut<String> = Box::new(example().1).into();
+ let or: Result<BoxRefMut<_, str>, ()> = or.try_map_mut(|_| Err(()));
+ assert!(or.is_err());
+ }
+
+ #[test]
+ fn owner() {
+ let or: BoxRefMut<String> = Box::new(example().1).into();
+ let or = or.map_mut(|x| &mut x[..5]);
+ assert_eq!(&*or, "hello");
+ assert_eq!(&**or.owner(), "hello world");
+ }
+
+ #[test]
+ fn into_inner() {
+ let or: BoxRefMut<String> = Box::new(example().1).into();
+ let or = or.map_mut(|x| &mut x[..5]);
+ assert_eq!(&*or, "hello");
+ let s = *or.into_inner();
+ assert_eq!(&s, "hello world");
+ }
+
+ #[test]
+ fn fmt_debug() {
+ let or: BoxRefMut<String> = Box::new(example().1).into();
+ let or = or.map_mut(|x| &mut x[..5]);
+ let s = format!("{:?}", or);
+ assert_eq!(&s, "OwningRefMut { owner: \"hello world\", reference: \"hello\" }");
+ }
+
+ #[test]
+ fn erased_owner() {
+ let o1: BoxRefMut<Example, str> =
+ BoxRefMut::new(Box::new(example())).map_mut(|x| &mut x.1[..]);
+
+ let o2: BoxRefMut<String, str> =
+ BoxRefMut::new(Box::new(example().1)).map_mut(|x| &mut x[..]);
+
+ let os: Vec<ErasedBoxRefMut<str>> = vec![o1.erase_owner(), o2.erase_owner()];
+ assert!(os.iter().all(|e| &e[..] == "hello world"));
+ }
+
+ #[test]
+ fn raii_locks() {
+ use super::super::RefMutRefMut;
+ use super::super::{MutexGuardRefMut, RwLockWriteGuardRefMut};
+ use std::cell::RefCell;
+ use std::sync::{Mutex, RwLock};
+
+ {
+ let a = RefCell::new(1);
+ let a = {
+ let a = RefMutRefMut::new(a.borrow_mut());
+ assert_eq!(*a, 1);
+ a
+ };
+ assert_eq!(*a, 1);
+ drop(a);
+ }
+ {
+ let a = Mutex::new(1);
+ let a = {
+ let a = MutexGuardRefMut::new(a.lock().unwrap());
+ assert_eq!(*a, 1);
+ a
+ };
+ assert_eq!(*a, 1);
+ drop(a);
+ }
+ {
+ let a = RwLock::new(1);
+ let a = {
+ let a = RwLockWriteGuardRefMut::new(a.write().unwrap());
+ assert_eq!(*a, 1);
+ a
+ };
+ assert_eq!(*a, 1);
+ drop(a);
+ }
+ }
+
+ #[test]
+ fn eq() {
+ let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+ let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+ assert_eq!(or1.eq(&or2), true);
+ }
+
+ #[test]
+ fn cmp() {
+ let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+ let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![4, 5, 6].into_boxed_slice());
+ assert_eq!(or1.cmp(&or2), Ordering::Less);
+ }
+
+ #[test]
+ fn partial_cmp() {
+ let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![4, 5, 6].into_boxed_slice());
+ let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+ assert_eq!(or1.partial_cmp(&or2), Some(Ordering::Greater));
+ }
+
+ #[test]
+ fn hash() {
+ let mut h1 = DefaultHasher::new();
+ let mut h2 = DefaultHasher::new();
+
+ let or1: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+ let or2: BoxRefMut<[u8]> = BoxRefMut::new(vec![1, 2, 3].into_boxed_slice());
+
+ or1.hash(&mut h1);
+ or2.hash(&mut h2);
+
+ assert_eq!(h1.finish(), h2.finish());
+ }
+
+ #[test]
+ fn borrow() {
+ let mut hash = HashMap::new();
+ let key1 = BoxRefMut::<String>::new(Box::new("foo".to_string())).map(|s| &s[..]);
+ let key2 = BoxRefMut::<String>::new(Box::new("bar".to_string())).map(|s| &s[..]);
+
+ hash.insert(key1, 42);
+ hash.insert(key2, 23);
+
+ assert_eq!(hash.get("foo"), Some(&42));
+ assert_eq!(hash.get("bar"), Some(&23));
+ }
+
+ #[test]
+ fn total_erase() {
+ let a: OwningRefMut<Vec<u8>, [u8]> = OwningRefMut::new(vec![]).map_mut(|x| &mut x[..]);
+ let b: OwningRefMut<Box<[u8]>, [u8]> =
+ OwningRefMut::new(vec![].into_boxed_slice()).map_mut(|x| &mut x[..]);
+
+ let c: OwningRefMut<Box<Vec<u8>>, [u8]> = unsafe { a.map_owner(Box::new) };
+ let d: OwningRefMut<Box<Box<[u8]>>, [u8]> = unsafe { b.map_owner(Box::new) };
+
+ let _e: OwningRefMut<Box<dyn Erased>, [u8]> = c.erase_owner();
+ let _f: OwningRefMut<Box<dyn Erased>, [u8]> = d.erase_owner();
+ }
+
+ #[test]
+ fn total_erase_box() {
+ let a: OwningRefMut<Vec<u8>, [u8]> = OwningRefMut::new(vec![]).map_mut(|x| &mut x[..]);
+ let b: OwningRefMut<Box<[u8]>, [u8]> =
+ OwningRefMut::new(vec![].into_boxed_slice()).map_mut(|x| &mut x[..]);
+
+ let c: OwningRefMut<Box<Vec<u8>>, [u8]> = a.map_owner_box();
+ let d: OwningRefMut<Box<Box<[u8]>>, [u8]> = b.map_owner_box();
+
+ let _e: OwningRefMut<Box<dyn Erased>, [u8]> = c.erase_owner();
+ let _f: OwningRefMut<Box<dyn Erased>, [u8]> = d.erase_owner();
+ }
+
+ #[test]
+ fn try_map1() {
+ use std::any::Any;
+
+ let x = Box::new(123_i32);
+ let y: Box<dyn Any> = x;
+
+ assert!(OwningRefMut::new(y).try_map_mut(|x| x.downcast_mut::<i32>().ok_or(())).is_ok());
+ }
+
+ #[test]
+ fn try_map2() {
+ use std::any::Any;
+
+ let x = Box::new(123_i32);
+ let y: Box<dyn Any> = x;
+
+ assert!(!OwningRefMut::new(y).try_map_mut(|x| x.downcast_mut::<i32>().ok_or(())).is_err());
+ }
+
+ #[test]
+ fn try_map3() {
+ use std::any::Any;
+
+ let x = Box::new(123_i32);
+ let y: Box<dyn Any> = x;
+
+ assert!(OwningRefMut::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_ok());
+ }
+
+ #[test]
+ fn try_map4() {
+ use std::any::Any;
+
+ let x = Box::new(123_i32);
+ let y: Box<dyn Any> = x;
+
+ assert!(!OwningRefMut::new(y).try_map(|x| x.downcast_ref::<i32>().ok_or(())).is_err());
+ }
+
+ #[test]
+ fn into_owning_ref() {
+ use super::super::BoxRef;
+
+ let or: BoxRefMut<()> = Box::new(()).into();
+ let or: BoxRef<()> = or.into();
+ assert_eq!(&*or, &());
+ }
+
+ struct Foo {
+ u: u32,
+ }
+ struct Bar {
+ f: Foo,
+ }
+
+ #[test]
+ fn ref_mut() {
+ use std::cell::RefCell;
+
+ let a = RefCell::new(Bar { f: Foo { u: 42 } });
+ let mut b = OwningRefMut::new(a.borrow_mut());
+ assert_eq!(b.f.u, 42);
+ b.f.u = 43;
+ let mut c = b.map_mut(|x| &mut x.f);
+ assert_eq!(c.u, 43);
+ c.u = 44;
+ let mut d = c.map_mut(|x| &mut x.u);
+ assert_eq!(*d, 44);
+ *d = 45;
+ assert_eq!(*d, 45);
+ }
+}
diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs
new file mode 100644
index 000000000..d8b26f984
--- /dev/null
+++ b/compiler/rustc_data_structures/src/profiling.rs
@@ -0,0 +1,861 @@
+//! # Rust Compiler Self-Profiling
+//!
+//! This module implements the basic framework for the compiler's self-
+//! profiling support. It provides the `SelfProfiler` type which enables
+//! recording "events". An event is something that starts and ends at a given
+//! point in time and has an ID and a kind attached to it. This allows for
+//! tracing the compiler's activity.
+//!
+//! Internally this module uses the custom tailored [measureme][mm] crate for
+//! efficiently recording events to disk in a compact format that can be
+//! post-processed and analyzed by the suite of tools in the `measureme`
+//! project. The highest priority for the tracing framework is on incurring as
+//! little overhead as possible.
+//!
+//!
+//! ## Event Overview
+//!
+//! Events have a few properties:
+//!
+//! - The `event_kind` designates the broad category of an event (e.g. does it
+//! correspond to the execution of a query provider or to loading something
+//! from the incr. comp. on-disk cache, etc).
+//! - The `event_id` designates the query invocation or function call it
+//! corresponds to, possibly including the query key or function arguments.
+//! - Each event stores the ID of the thread it was recorded on.
+//! - The timestamp stores beginning and end of the event, or the single point
+//! in time it occurred at for "instant" events.
+//!
+//!
+//! ## Event Filtering
+//!
+//! Event generation can be filtered by event kind. Recording all possible
+//! events generates a lot of data, much of which is not needed for most kinds
+//! of analysis. So, in order to keep overhead as low as possible for a given
+//! use case, the `SelfProfiler` will only record the kinds of events that
+//! pass the filter specified as a command line argument to the compiler.
+//!
+//!
+//! ## `event_id` Assignment
+//!
+//! As far as `measureme` is concerned, `event_id`s are just strings. However,
+//! it would incur too much overhead to generate and persist each `event_id`
+//! string at the point where the event is recorded. In order to make this more
+//! efficient `measureme` has two features:
+//!
+//! - Strings can share their content, so that re-occurring parts don't have to
+//! be copied over and over again. One allocates a string in `measureme` and
+//! gets back a `StringId`. This `StringId` is then used to refer to that
+//! string. `measureme` strings are actually DAGs of string components so that
+//! arbitrary sharing of substrings can be done efficiently. This is useful
+//! because `event_id`s contain lots of redundant text like query names or
+//! def-path components.
+//!
+//! - `StringId`s can be "virtual" which means that the client picks a numeric
+//! ID according to some application-specific scheme and can later make that
+//! ID be mapped to an actual string. This is used to cheaply generate
+//! `event_id`s while the events actually occur, causing little timing
+//! distortion, and then later map those `StringId`s, in bulk, to actual
+//! `event_id` strings. This way the largest part of the tracing overhead is
+//! localized to one contiguous chunk of time.
+//!
+//! How are these `event_id`s generated in the compiler? For things that occur
+//! infrequently (e.g. "generic activities"), we just allocate the string the
+//! first time it is used and then keep the `StringId` in a hash table. This
+//! is implemented in `SelfProfiler::get_or_alloc_cached_string()`.
+//!
+//! For queries it gets more interesting: First we need a unique numeric ID for
+//! each query invocation (the `QueryInvocationId`). This ID is used as the
+//! virtual `StringId` we use as `event_id` for a given event. This ID has to
+//! be available both when the query is executed and later, together with the
+//! query key, when we allocate the actual `event_id` strings in bulk.
+//!
+//! We could make the compiler generate and keep track of such an ID for each
+//! query invocation but luckily we already have something that fits all the
+//! the requirements: the query's `DepNodeIndex`. So we use the numeric value
+//! of the `DepNodeIndex` as `event_id` when recording the event and then,
+//! just before the query context is dropped, we walk the entire query cache
+//! (which stores the `DepNodeIndex` along with the query key for each
+//! invocation) and allocate the corresponding strings together with a mapping
+//! for `DepNodeIndex as StringId`.
+//!
+//! [mm]: https://github.com/rust-lang/measureme/
+
+use crate::cold_path;
+use crate::fx::FxHashMap;
+
+use std::borrow::Borrow;
+use std::collections::hash_map::Entry;
+use std::convert::Into;
+use std::error::Error;
+use std::fs;
+use std::path::Path;
+use std::process;
+use std::sync::Arc;
+use std::time::{Duration, Instant};
+
+pub use measureme::EventId;
+use measureme::{EventIdBuilder, Profiler, SerializableString, StringId};
+use parking_lot::RwLock;
+use smallvec::SmallVec;
+
+bitflags::bitflags! {
+ struct EventFilter: u32 {
+ const GENERIC_ACTIVITIES = 1 << 0;
+ const QUERY_PROVIDERS = 1 << 1;
+ const QUERY_CACHE_HITS = 1 << 2;
+ const QUERY_BLOCKED = 1 << 3;
+ const INCR_CACHE_LOADS = 1 << 4;
+
+ const QUERY_KEYS = 1 << 5;
+ const FUNCTION_ARGS = 1 << 6;
+ const LLVM = 1 << 7;
+ const INCR_RESULT_HASHING = 1 << 8;
+ const ARTIFACT_SIZES = 1 << 9;
+
+ const DEFAULT = Self::GENERIC_ACTIVITIES.bits |
+ Self::QUERY_PROVIDERS.bits |
+ Self::QUERY_BLOCKED.bits |
+ Self::INCR_CACHE_LOADS.bits |
+ Self::INCR_RESULT_HASHING.bits |
+ Self::ARTIFACT_SIZES.bits;
+
+ const ARGS = Self::QUERY_KEYS.bits | Self::FUNCTION_ARGS.bits;
+ }
+}
+
+// keep this in sync with the `-Z self-profile-events` help message in rustc_session/options.rs
+const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
+ ("none", EventFilter::empty()),
+ ("all", EventFilter::all()),
+ ("default", EventFilter::DEFAULT),
+ ("generic-activity", EventFilter::GENERIC_ACTIVITIES),
+ ("query-provider", EventFilter::QUERY_PROVIDERS),
+ ("query-cache-hit", EventFilter::QUERY_CACHE_HITS),
+ ("query-blocked", EventFilter::QUERY_BLOCKED),
+ ("incr-cache-load", EventFilter::INCR_CACHE_LOADS),
+ ("query-keys", EventFilter::QUERY_KEYS),
+ ("function-args", EventFilter::FUNCTION_ARGS),
+ ("args", EventFilter::ARGS),
+ ("llvm", EventFilter::LLVM),
+ ("incr-result-hashing", EventFilter::INCR_RESULT_HASHING),
+ ("artifact-sizes", EventFilter::ARTIFACT_SIZES),
+];
+
+/// Something that uniquely identifies a query invocation.
+pub struct QueryInvocationId(pub u32);
+
+/// A reference to the SelfProfiler. It can be cloned and sent across thread
+/// boundaries at will.
+#[derive(Clone)]
+pub struct SelfProfilerRef {
+ // This field is `None` if self-profiling is disabled for the current
+ // compilation session.
+ profiler: Option<Arc<SelfProfiler>>,
+
+ // We store the filter mask directly in the reference because that doesn't
+ // cost anything and allows for filtering with checking if the profiler is
+ // actually enabled.
+ event_filter_mask: EventFilter,
+
+ // Print verbose generic activities to stdout
+ print_verbose_generic_activities: bool,
+
+ // Print extra verbose generic activities to stdout
+ print_extra_verbose_generic_activities: bool,
+}
+
+impl SelfProfilerRef {
+ pub fn new(
+ profiler: Option<Arc<SelfProfiler>>,
+ print_verbose_generic_activities: bool,
+ print_extra_verbose_generic_activities: bool,
+ ) -> SelfProfilerRef {
+ // If there is no SelfProfiler then the filter mask is set to NONE,
+ // ensuring that nothing ever tries to actually access it.
+ let event_filter_mask =
+ profiler.as_ref().map_or(EventFilter::empty(), |p| p.event_filter_mask);
+
+ SelfProfilerRef {
+ profiler,
+ event_filter_mask,
+ print_verbose_generic_activities,
+ print_extra_verbose_generic_activities,
+ }
+ }
+
+ /// This shim makes sure that calls only get executed if the filter mask
+ /// lets them pass. It also contains some trickery to make sure that
+ /// code is optimized for non-profiling compilation sessions, i.e. anything
+ /// past the filter check is never inlined so it doesn't clutter the fast
+ /// path.
+ #[inline(always)]
+ fn exec<F>(&self, event_filter: EventFilter, f: F) -> TimingGuard<'_>
+ where
+ F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
+ {
+ #[inline(never)]
+ #[cold]
+ fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
+ where
+ F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
+ {
+ let profiler = profiler_ref.profiler.as_ref().unwrap();
+ f(&**profiler)
+ }
+
+ if self.event_filter_mask.contains(event_filter) {
+ cold_call(self, f)
+ } else {
+ TimingGuard::none()
+ }
+ }
+
+ /// Start profiling a verbose generic activity. Profiling continues until the
+ /// VerboseTimingGuard returned from this call is dropped. In addition to recording
+ /// a measureme event, "verbose" generic activities also print a timing entry to
+ /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
+ pub fn verbose_generic_activity<'a>(
+ &'a self,
+ event_label: &'static str,
+ ) -> VerboseTimingGuard<'a> {
+ let message =
+ if self.print_verbose_generic_activities { Some(event_label.to_owned()) } else { None };
+
+ VerboseTimingGuard::start(message, self.generic_activity(event_label))
+ }
+
+ /// Start profiling an extra verbose generic activity. Profiling continues until the
+ /// VerboseTimingGuard returned from this call is dropped. In addition to recording
+ /// a measureme event, "extra verbose" generic activities also print a timing entry to
+ /// stdout if the compiler is invoked with -Ztime-passes.
+ pub fn extra_verbose_generic_activity<'a, A>(
+ &'a self,
+ event_label: &'static str,
+ event_arg: A,
+ ) -> VerboseTimingGuard<'a>
+ where
+ A: Borrow<str> + Into<String>,
+ {
+ let message = if self.print_extra_verbose_generic_activities {
+ Some(format!("{}({})", event_label, event_arg.borrow()))
+ } else {
+ None
+ };
+
+ VerboseTimingGuard::start(message, self.generic_activity_with_arg(event_label, event_arg))
+ }
+
+ /// Start profiling a generic activity. Profiling continues until the
+ /// TimingGuard returned from this call is dropped.
+ #[inline(always)]
+ pub fn generic_activity(&self, event_label: &'static str) -> TimingGuard<'_> {
+ self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
+ let event_label = profiler.get_or_alloc_cached_string(event_label);
+ let event_id = EventId::from_label(event_label);
+ TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
+ })
+ }
+
+ /// Start profiling with some event filter for a given event. Profiling continues until the
+ /// TimingGuard returned from this call is dropped.
+ #[inline(always)]
+ pub fn generic_activity_with_event_id(&self, event_id: EventId) -> TimingGuard<'_> {
+ self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
+ TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
+ })
+ }
+
+ /// Start profiling a generic activity. Profiling continues until the
+ /// TimingGuard returned from this call is dropped.
+ #[inline(always)]
+ pub fn generic_activity_with_arg<A>(
+ &self,
+ event_label: &'static str,
+ event_arg: A,
+ ) -> TimingGuard<'_>
+ where
+ A: Borrow<str> + Into<String>,
+ {
+ self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
+ let builder = EventIdBuilder::new(&profiler.profiler);
+ let event_label = profiler.get_or_alloc_cached_string(event_label);
+ let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
+ let event_arg = profiler.get_or_alloc_cached_string(event_arg);
+ builder.from_label_and_arg(event_label, event_arg)
+ } else {
+ builder.from_label(event_label)
+ };
+ TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
+ })
+ }
+
+ /// Start profiling a generic activity, allowing costly arguments to be recorded. Profiling
+ /// continues until the `TimingGuard` returned from this call is dropped.
+ ///
+ /// If the arguments to a generic activity are cheap to create, use `generic_activity_with_arg`
+ /// or `generic_activity_with_args` for their simpler API. However, if they are costly or
+ /// require allocation in sufficiently hot contexts, then this allows for a closure to be called
+ /// only when arguments were asked to be recorded via `-Z self-profile-events=args`.
+ ///
+ /// In this case, the closure will be passed a `&mut EventArgRecorder`, to help with recording
+ /// one or many arguments within the generic activity being profiled, by calling its
+ /// `record_arg` method for example.
+ ///
+ /// This `EventArgRecorder` may implement more specific traits from other rustc crates, e.g. for
+ /// richer handling of rustc-specific argument types, while keeping this single entry-point API
+ /// for recording arguments.
+ ///
+ /// Note: recording at least one argument is *required* for the self-profiler to create the
+ /// `TimingGuard`. A panic will be triggered if that doesn't happen. This function exists
+ /// explicitly to record arguments, so it fails loudly when there are none to record.
+ ///
+ #[inline(always)]
+ pub fn generic_activity_with_arg_recorder<F>(
+ &self,
+ event_label: &'static str,
+ mut f: F,
+ ) -> TimingGuard<'_>
+ where
+ F: FnMut(&mut EventArgRecorder<'_>),
+ {
+ // Ensure this event will only be recorded when self-profiling is turned on.
+ self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
+ let builder = EventIdBuilder::new(&profiler.profiler);
+ let event_label = profiler.get_or_alloc_cached_string(event_label);
+
+ // Ensure the closure to create event arguments will only be called when argument
+ // recording is turned on.
+ let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
+ // Set up the builder and call the user-provided closure to record potentially
+ // costly event arguments.
+ let mut recorder = EventArgRecorder { profiler, args: SmallVec::new() };
+ f(&mut recorder);
+
+ // It is expected that the closure will record at least one argument. If that
+ // doesn't happen, it's a bug: we've been explicitly called in order to record
+ // arguments, so we fail loudly when there are none to record.
+ if recorder.args.is_empty() {
+ panic!(
+ "The closure passed to `generic_activity_with_arg_recorder` needs to \
+ record at least one argument"
+ );
+ }
+
+ builder.from_label_and_args(event_label, &recorder.args)
+ } else {
+ builder.from_label(event_label)
+ };
+ TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
+ })
+ }
+
+ /// Record the size of an artifact that the compiler produces
+ ///
+ /// `artifact_kind` is the class of artifact (e.g., query_cache, object_file, etc.)
+ /// `artifact_name` is an identifier to the specific artifact being stored (usually a filename)
+ #[inline(always)]
+ pub fn artifact_size<A>(&self, artifact_kind: &str, artifact_name: A, size: u64)
+ where
+ A: Borrow<str> + Into<String>,
+ {
+ drop(self.exec(EventFilter::ARTIFACT_SIZES, |profiler| {
+ let builder = EventIdBuilder::new(&profiler.profiler);
+ let event_label = profiler.get_or_alloc_cached_string(artifact_kind);
+ let event_arg = profiler.get_or_alloc_cached_string(artifact_name);
+ let event_id = builder.from_label_and_arg(event_label, event_arg);
+ let thread_id = get_thread_id();
+
+ profiler.profiler.record_integer_event(
+ profiler.artifact_size_event_kind,
+ event_id,
+ thread_id,
+ size,
+ );
+
+ TimingGuard::none()
+ }))
+ }
+
+ #[inline(always)]
+ pub fn generic_activity_with_args(
+ &self,
+ event_label: &'static str,
+ event_args: &[String],
+ ) -> TimingGuard<'_> {
+ self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
+ let builder = EventIdBuilder::new(&profiler.profiler);
+ let event_label = profiler.get_or_alloc_cached_string(event_label);
+ let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
+ let event_args: Vec<_> = event_args
+ .iter()
+ .map(|s| profiler.get_or_alloc_cached_string(&s[..]))
+ .collect();
+ builder.from_label_and_args(event_label, &event_args)
+ } else {
+ builder.from_label(event_label)
+ };
+ TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
+ })
+ }
+
+ /// Start profiling a query provider. Profiling continues until the
+ /// TimingGuard returned from this call is dropped.
+ #[inline(always)]
+ pub fn query_provider(&self) -> TimingGuard<'_> {
+ self.exec(EventFilter::QUERY_PROVIDERS, |profiler| {
+ TimingGuard::start(profiler, profiler.query_event_kind, EventId::INVALID)
+ })
+ }
+
+ /// Record a query in-memory cache hit.
+ #[inline(always)]
+ pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) {
+ self.instant_query_event(
+ |profiler| profiler.query_cache_hit_event_kind,
+ query_invocation_id,
+ EventFilter::QUERY_CACHE_HITS,
+ );
+ }
+
+ /// Start profiling a query being blocked on a concurrent execution.
+ /// Profiling continues until the TimingGuard returned from this call is
+ /// dropped.
+ #[inline(always)]
+ pub fn query_blocked(&self) -> TimingGuard<'_> {
+ self.exec(EventFilter::QUERY_BLOCKED, |profiler| {
+ TimingGuard::start(profiler, profiler.query_blocked_event_kind, EventId::INVALID)
+ })
+ }
+
+ /// Start profiling how long it takes to load a query result from the
+ /// incremental compilation on-disk cache. Profiling continues until the
+ /// TimingGuard returned from this call is dropped.
+ #[inline(always)]
+ pub fn incr_cache_loading(&self) -> TimingGuard<'_> {
+ self.exec(EventFilter::INCR_CACHE_LOADS, |profiler| {
+ TimingGuard::start(
+ profiler,
+ profiler.incremental_load_result_event_kind,
+ EventId::INVALID,
+ )
+ })
+ }
+
+ /// Start profiling how long it takes to hash query results for incremental compilation.
+ /// Profiling continues until the TimingGuard returned from this call is dropped.
+ #[inline(always)]
+ pub fn incr_result_hashing(&self) -> TimingGuard<'_> {
+ self.exec(EventFilter::INCR_RESULT_HASHING, |profiler| {
+ TimingGuard::start(
+ profiler,
+ profiler.incremental_result_hashing_event_kind,
+ EventId::INVALID,
+ )
+ })
+ }
+
+ #[inline(always)]
+ fn instant_query_event(
+ &self,
+ event_kind: fn(&SelfProfiler) -> StringId,
+ query_invocation_id: QueryInvocationId,
+ event_filter: EventFilter,
+ ) {
+ drop(self.exec(event_filter, |profiler| {
+ let event_id = StringId::new_virtual(query_invocation_id.0);
+ let thread_id = get_thread_id();
+
+ profiler.profiler.record_instant_event(
+ event_kind(profiler),
+ EventId::from_virtual(event_id),
+ thread_id,
+ );
+
+ TimingGuard::none()
+ }));
+ }
+
+ pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
+ if let Some(profiler) = &self.profiler {
+ f(&profiler)
+ }
+ }
+
+ /// Gets a `StringId` for the given string. This method makes sure that
+ /// any strings going through it will only be allocated once in the
+ /// profiling data.
+ /// Returns `None` if the self-profiling is not enabled.
+ pub fn get_or_alloc_cached_string(&self, s: &str) -> Option<StringId> {
+ self.profiler.as_ref().map(|p| p.get_or_alloc_cached_string(s))
+ }
+
+ #[inline]
+ pub fn enabled(&self) -> bool {
+ self.profiler.is_some()
+ }
+
+ #[inline]
+ pub fn llvm_recording_enabled(&self) -> bool {
+ self.event_filter_mask.contains(EventFilter::LLVM)
+ }
+ #[inline]
+ pub fn get_self_profiler(&self) -> Option<Arc<SelfProfiler>> {
+ self.profiler.clone()
+ }
+}
+
+/// A helper for recording costly arguments to self-profiling events. Used with
+/// `SelfProfilerRef::generic_activity_with_arg_recorder`.
+pub struct EventArgRecorder<'p> {
+ /// The `SelfProfiler` used to intern the event arguments that users will ask to record.
+ profiler: &'p SelfProfiler,
+
+ /// The interned event arguments to be recorded in the generic activity event.
+ ///
+ /// The most common case, when actually recording event arguments, is to have one argument. Then
+ /// followed by recording two, in a couple places.
+ args: SmallVec<[StringId; 2]>,
+}
+
+impl EventArgRecorder<'_> {
+ /// Records a single argument within the current generic activity being profiled.
+ ///
+ /// Note: when self-profiling with costly event arguments, at least one argument
+ /// needs to be recorded. A panic will be triggered if that doesn't happen.
+ pub fn record_arg<A>(&mut self, event_arg: A)
+ where
+ A: Borrow<str> + Into<String>,
+ {
+ let event_arg = self.profiler.get_or_alloc_cached_string(event_arg);
+ self.args.push(event_arg);
+ }
+}
+
+pub struct SelfProfiler {
+ profiler: Profiler,
+ event_filter_mask: EventFilter,
+
+ string_cache: RwLock<FxHashMap<String, StringId>>,
+
+ query_event_kind: StringId,
+ generic_activity_event_kind: StringId,
+ incremental_load_result_event_kind: StringId,
+ incremental_result_hashing_event_kind: StringId,
+ query_blocked_event_kind: StringId,
+ query_cache_hit_event_kind: StringId,
+ artifact_size_event_kind: StringId,
+}
+
+impl SelfProfiler {
+ pub fn new(
+ output_directory: &Path,
+ crate_name: Option<&str>,
+ event_filters: Option<&[String]>,
+ counter_name: &str,
+ ) -> Result<SelfProfiler, Box<dyn Error + Send + Sync>> {
+ fs::create_dir_all(output_directory)?;
+
+ let crate_name = crate_name.unwrap_or("unknown-crate");
+ // HACK(eddyb) we need to pad the PID, strange as it may seem, as its
+ // length can behave as a source of entropy for heap addresses, when
+ // ASLR is disabled and the heap is otherwise determinic.
+ let pid: u32 = process::id();
+ let filename = format!("{}-{:07}.rustc_profile", crate_name, pid);
+ let path = output_directory.join(&filename);
+ let profiler =
+ Profiler::with_counter(&path, measureme::counters::Counter::by_name(counter_name)?)?;
+
+ let query_event_kind = profiler.alloc_string("Query");
+ let generic_activity_event_kind = profiler.alloc_string("GenericActivity");
+ let incremental_load_result_event_kind = profiler.alloc_string("IncrementalLoadResult");
+ let incremental_result_hashing_event_kind =
+ profiler.alloc_string("IncrementalResultHashing");
+ let query_blocked_event_kind = profiler.alloc_string("QueryBlocked");
+ let query_cache_hit_event_kind = profiler.alloc_string("QueryCacheHit");
+ let artifact_size_event_kind = profiler.alloc_string("ArtifactSize");
+
+ let mut event_filter_mask = EventFilter::empty();
+
+ if let Some(event_filters) = event_filters {
+ let mut unknown_events = vec![];
+ for item in event_filters {
+ if let Some(&(_, mask)) =
+ EVENT_FILTERS_BY_NAME.iter().find(|&(name, _)| name == item)
+ {
+ event_filter_mask |= mask;
+ } else {
+ unknown_events.push(item.clone());
+ }
+ }
+
+ // Warn about any unknown event names
+ if !unknown_events.is_empty() {
+ unknown_events.sort();
+ unknown_events.dedup();
+
+ warn!(
+ "Unknown self-profiler events specified: {}. Available options are: {}.",
+ unknown_events.join(", "),
+ EVENT_FILTERS_BY_NAME
+ .iter()
+ .map(|&(name, _)| name.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
+ );
+ }
+ } else {
+ event_filter_mask = EventFilter::DEFAULT;
+ }
+
+ Ok(SelfProfiler {
+ profiler,
+ event_filter_mask,
+ string_cache: RwLock::new(FxHashMap::default()),
+ query_event_kind,
+ generic_activity_event_kind,
+ incremental_load_result_event_kind,
+ incremental_result_hashing_event_kind,
+ query_blocked_event_kind,
+ query_cache_hit_event_kind,
+ artifact_size_event_kind,
+ })
+ }
+
+ /// Allocates a new string in the profiling data. Does not do any caching
+ /// or deduplication.
+ pub fn alloc_string<STR: SerializableString + ?Sized>(&self, s: &STR) -> StringId {
+ self.profiler.alloc_string(s)
+ }
+
+ /// Gets a `StringId` for the given string. This method makes sure that
+ /// any strings going through it will only be allocated once in the
+ /// profiling data.
+ pub fn get_or_alloc_cached_string<A>(&self, s: A) -> StringId
+ where
+ A: Borrow<str> + Into<String>,
+ {
+ // Only acquire a read-lock first since we assume that the string is
+ // already present in the common case.
+ {
+ let string_cache = self.string_cache.read();
+
+ if let Some(&id) = string_cache.get(s.borrow()) {
+ return id;
+ }
+ }
+
+ let mut string_cache = self.string_cache.write();
+ // Check if the string has already been added in the small time window
+ // between dropping the read lock and acquiring the write lock.
+ match string_cache.entry(s.into()) {
+ Entry::Occupied(e) => *e.get(),
+ Entry::Vacant(e) => {
+ let string_id = self.profiler.alloc_string(&e.key()[..]);
+ *e.insert(string_id)
+ }
+ }
+ }
+
+ pub fn map_query_invocation_id_to_string(&self, from: QueryInvocationId, to: StringId) {
+ let from = StringId::new_virtual(from.0);
+ self.profiler.map_virtual_to_concrete_string(from, to);
+ }
+
+ pub fn bulk_map_query_invocation_id_to_single_string<I>(&self, from: I, to: StringId)
+ where
+ I: Iterator<Item = QueryInvocationId> + ExactSizeIterator,
+ {
+ let from = from.map(|qid| StringId::new_virtual(qid.0));
+ self.profiler.bulk_map_virtual_to_single_concrete_string(from, to);
+ }
+
+ pub fn query_key_recording_enabled(&self) -> bool {
+ self.event_filter_mask.contains(EventFilter::QUERY_KEYS)
+ }
+
+ pub fn event_id_builder(&self) -> EventIdBuilder<'_> {
+ EventIdBuilder::new(&self.profiler)
+ }
+}
+
+#[must_use]
+pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a>>);
+
+impl<'a> TimingGuard<'a> {
+ #[inline]
+ pub fn start(
+ profiler: &'a SelfProfiler,
+ event_kind: StringId,
+ event_id: EventId,
+ ) -> TimingGuard<'a> {
+ let thread_id = get_thread_id();
+ let raw_profiler = &profiler.profiler;
+ let timing_guard =
+ raw_profiler.start_recording_interval_event(event_kind, event_id, thread_id);
+ TimingGuard(Some(timing_guard))
+ }
+
+ #[inline]
+ pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) {
+ if let Some(guard) = self.0 {
+ cold_path(|| {
+ let event_id = StringId::new_virtual(query_invocation_id.0);
+ let event_id = EventId::from_virtual(event_id);
+ guard.finish_with_override_event_id(event_id);
+ });
+ }
+ }
+
+ #[inline]
+ pub fn none() -> TimingGuard<'a> {
+ TimingGuard(None)
+ }
+
+ #[inline(always)]
+ pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
+ let _timer = self;
+ f()
+ }
+}
+
+#[must_use]
+pub struct VerboseTimingGuard<'a> {
+ start_and_message: Option<(Instant, Option<usize>, String)>,
+ _guard: TimingGuard<'a>,
+}
+
+impl<'a> VerboseTimingGuard<'a> {
+ pub fn start(message: Option<String>, _guard: TimingGuard<'a>) -> Self {
+ VerboseTimingGuard {
+ _guard,
+ start_and_message: message.map(|msg| (Instant::now(), get_resident_set_size(), msg)),
+ }
+ }
+
+ #[inline(always)]
+ pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
+ let _timer = self;
+ f()
+ }
+}
+
+impl Drop for VerboseTimingGuard<'_> {
+ fn drop(&mut self) {
+ if let Some((start_time, start_rss, ref message)) = self.start_and_message {
+ let end_rss = get_resident_set_size();
+ let dur = start_time.elapsed();
+
+ if should_print_passes(dur, start_rss, end_rss) {
+ print_time_passes_entry(&message, dur, start_rss, end_rss);
+ }
+ }
+ }
+}
+
+fn should_print_passes(dur: Duration, start_rss: Option<usize>, end_rss: Option<usize>) -> bool {
+ if dur.as_millis() > 5 {
+ return true;
+ }
+
+ if let (Some(start_rss), Some(end_rss)) = (start_rss, end_rss) {
+ let change_rss = end_rss.abs_diff(start_rss);
+ if change_rss > 0 {
+ return true;
+ }
+ }
+
+ false
+}
+
+pub fn print_time_passes_entry(
+ what: &str,
+ dur: Duration,
+ start_rss: Option<usize>,
+ end_rss: Option<usize>,
+) {
+ let rss_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as usize;
+ let rss_change_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as i128;
+
+ let mem_string = match (start_rss, end_rss) {
+ (Some(start_rss), Some(end_rss)) => {
+ let change_rss = end_rss as i128 - start_rss as i128;
+
+ format!(
+ "; rss: {:>4}MB -> {:>4}MB ({:>+5}MB)",
+ rss_to_mb(start_rss),
+ rss_to_mb(end_rss),
+ rss_change_to_mb(change_rss),
+ )
+ }
+ (Some(start_rss), None) => format!("; rss start: {:>4}MB", rss_to_mb(start_rss)),
+ (None, Some(end_rss)) => format!("; rss end: {:>4}MB", rss_to_mb(end_rss)),
+ (None, None) => String::new(),
+ };
+
+ eprintln!("time: {:>7}{}\t{}", duration_to_secs_str(dur), mem_string, what);
+}
+
+// Hack up our own formatting for the duration to make it easier for scripts
+// to parse (always use the same number of decimal places and the same unit).
+pub fn duration_to_secs_str(dur: std::time::Duration) -> String {
+ format!("{:.3}", dur.as_secs_f64())
+}
+
+fn get_thread_id() -> u32 {
+ std::thread::current().id().as_u64().get() as u32
+}
+
+// Memory reporting
+cfg_if! {
+ if #[cfg(windows)] {
+ pub fn get_resident_set_size() -> Option<usize> {
+ use std::mem::{self, MaybeUninit};
+ use winapi::shared::minwindef::DWORD;
+ use winapi::um::processthreadsapi::GetCurrentProcess;
+ use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
+
+ let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
+ match unsafe {
+ GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
+ } {
+ 0 => None,
+ _ => {
+ let pmc = unsafe { pmc.assume_init() };
+ Some(pmc.WorkingSetSize as usize)
+ }
+ }
+ }
+ } else if #[cfg(target_os = "macos")] {
+ pub fn get_resident_set_size() -> Option<usize> {
+ use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO};
+ use std::mem;
+ const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int;
+
+ unsafe {
+ let mut info: proc_taskinfo = mem::zeroed();
+ let info_ptr = &mut info as *mut proc_taskinfo as *mut c_void;
+ let pid = getpid() as c_int;
+ let ret = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, info_ptr, PROC_TASKINFO_SIZE);
+ if ret == PROC_TASKINFO_SIZE {
+ Some(info.pti_resident_size as usize)
+ } else {
+ None
+ }
+ }
+ }
+ } else if #[cfg(unix)] {
+ pub fn get_resident_set_size() -> Option<usize> {
+ let field = 1;
+ let contents = fs::read("/proc/self/statm").ok()?;
+ let contents = String::from_utf8(contents).ok()?;
+ let s = contents.split_whitespace().nth(field)?;
+ let npages = s.parse::<usize>().ok()?;
+ Some(npages * 4096)
+ }
+ } else {
+ pub fn get_resident_set_size() -> Option<usize> {
+ None
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
new file mode 100644
index 000000000..01d292dde
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -0,0 +1,150 @@
+use crate::fx::{FxHashMap, FxHasher};
+use crate::sync::{Lock, LockGuard};
+use std::borrow::Borrow;
+use std::collections::hash_map::RawEntryMut;
+use std::hash::{Hash, Hasher};
+use std::mem;
+
+#[derive(Clone, Default)]
+#[cfg_attr(parallel_compiler, repr(align(64)))]
+struct CacheAligned<T>(T);
+
+#[cfg(parallel_compiler)]
+// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
+// but this should be tested on higher core count CPUs. How the `Sharded` type gets used
+// may also affect the ideal number of shards.
+const SHARD_BITS: usize = 5;
+
+#[cfg(not(parallel_compiler))]
+const SHARD_BITS: usize = 0;
+
+pub const SHARDS: usize = 1 << SHARD_BITS;
+
+/// An array of cache-line aligned inner locked structures with convenience methods.
+#[derive(Clone)]
+pub struct Sharded<T> {
+ shards: [CacheAligned<Lock<T>>; SHARDS],
+}
+
+impl<T: Default> Default for Sharded<T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new(T::default)
+ }
+}
+
+impl<T> Sharded<T> {
+ #[inline]
+ pub fn new(mut value: impl FnMut() -> T) -> Self {
+ Sharded { shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))) }
+ }
+
+ /// The shard is selected by hashing `val` with `FxHasher`.
+ #[inline]
+ pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
+ if SHARDS == 1 { &self.shards[0].0 } else { self.get_shard_by_hash(make_hash(val)) }
+ }
+
+ #[inline]
+ pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
+ &self.shards[get_shard_index_by_hash(hash)].0
+ }
+
+ #[inline]
+ pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
+ &self.shards[i].0
+ }
+
+ pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
+ (0..SHARDS).map(|i| self.shards[i].0.lock()).collect()
+ }
+
+ pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
+ (0..SHARDS).map(|i| self.shards[i].0.try_lock()).collect()
+ }
+}
+
+pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
+
+impl<K: Eq, V> ShardedHashMap<K, V> {
+ pub fn len(&self) -> usize {
+ self.lock_shards().iter().map(|shard| shard.len()).sum()
+ }
+}
+
+impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
+ #[inline]
+ pub fn intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ let hash = make_hash(value);
+ let mut shard = self.get_shard_by_hash(hash).lock();
+ let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
+
+ match entry {
+ RawEntryMut::Occupied(e) => *e.key(),
+ RawEntryMut::Vacant(e) => {
+ let v = make();
+ e.insert_hashed_nocheck(hash, v, ());
+ v
+ }
+ }
+ }
+
+ #[inline]
+ pub fn intern<Q>(&self, value: Q, make: impl FnOnce(Q) -> K) -> K
+ where
+ K: Borrow<Q>,
+ Q: Hash + Eq,
+ {
+ let hash = make_hash(&value);
+ let mut shard = self.get_shard_by_hash(hash).lock();
+ let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
+
+ match entry {
+ RawEntryMut::Occupied(e) => *e.key(),
+ RawEntryMut::Vacant(e) => {
+ let v = make(value);
+ e.insert_hashed_nocheck(hash, v, ());
+ v
+ }
+ }
+ }
+}
+
+pub trait IntoPointer {
+ /// Returns a pointer which outlives `self`.
+ fn into_pointer(&self) -> *const ();
+}
+
+impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
+ pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
+ let hash = make_hash(&value);
+ let shard = self.get_shard_by_hash(hash).lock();
+ let value = value.into_pointer();
+ shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
+ }
+}
+
+#[inline]
+pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 {
+ let mut state = FxHasher::default();
+ val.hash(&mut state);
+ state.finish()
+}
+
+/// Get a shard with a pre-computed hash value. If `get_shard_by_value` is
+/// ever used in combination with `get_shard_by_hash` on a single `Sharded`
+/// instance, then `hash` must be computed with `FxHasher`. Otherwise,
+/// `hash` can be computed with any hasher, so long as that hasher is used
+/// consistently for each `Sharded` instance.
+#[inline]
+pub fn get_shard_index_by_hash(hash: u64) -> usize {
+ let hash_len = mem::size_of::<usize>();
+ // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits.
+ // hashbrown also uses the lowest bits, so we can't use those
+ let bits = (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize;
+ bits % SHARDS
+}
diff --git a/compiler/rustc_data_structures/src/sip128.rs b/compiler/rustc_data_structures/src/sip128.rs
new file mode 100644
index 000000000..90793a97e
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sip128.rs
@@ -0,0 +1,496 @@
+//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
+
+use std::hash::Hasher;
+use std::mem::{self, MaybeUninit};
+use std::ptr;
+
+#[cfg(test)]
+mod tests;
+
+// The SipHash algorithm operates on 8-byte chunks.
+const ELEM_SIZE: usize = mem::size_of::<u64>();
+
+// Size of the buffer in number of elements, not including the spill.
+//
+// The selection of this size was guided by rustc-perf benchmark comparisons of
+// different buffer sizes. It should be periodically reevaluated as the compiler
+// implementation and input characteristics change.
+//
+// Using the same-sized buffer for everything we hash is a performance versus
+// complexity tradeoff. The ideal buffer size, and whether buffering should even
+// be used, depends on what is being hashed. It may be worth it to size the
+// buffer appropriately (perhaps by making SipHasher128 generic over the buffer
+// size) or disable buffering depending on what is being hashed. But at this
+// time, we use the same buffer size for everything.
+const BUFFER_CAPACITY: usize = 8;
+
+// Size of the buffer in bytes, not including the spill.
+const BUFFER_SIZE: usize = BUFFER_CAPACITY * ELEM_SIZE;
+
+// Size of the buffer in number of elements, including the spill.
+const BUFFER_WITH_SPILL_CAPACITY: usize = BUFFER_CAPACITY + 1;
+
+// Size of the buffer in bytes, including the spill.
+const BUFFER_WITH_SPILL_SIZE: usize = BUFFER_WITH_SPILL_CAPACITY * ELEM_SIZE;
+
+// Index of the spill element in the buffer.
+const BUFFER_SPILL_INDEX: usize = BUFFER_WITH_SPILL_CAPACITY - 1;
+
+#[derive(Debug, Clone)]
+#[repr(C)]
+pub struct SipHasher128 {
+ // The access pattern during hashing consists of accesses to `nbuf` and
+ // `buf` until the buffer is full, followed by accesses to `state` and
+ // `processed`, and then repetition of that pattern until hashing is done.
+ // This is the basis for the ordering of fields below. However, in practice
+ // the cache miss-rate for data access is extremely low regardless of order.
+ nbuf: usize, // how many bytes in buf are valid
+ buf: [MaybeUninit<u64>; BUFFER_WITH_SPILL_CAPACITY], // unprocessed bytes le
+ state: State, // hash State
+ processed: usize, // how many bytes we've processed
+}
+
+#[derive(Debug, Clone, Copy)]
+#[repr(C)]
+struct State {
+ // v0, v2 and v1, v3 show up in pairs in the algorithm,
+ // and simd implementations of SipHash will use vectors
+ // of v02 and v13. By placing them in this order in the struct,
+ // the compiler can pick up on just a few simd optimizations by itself.
+ v0: u64,
+ v2: u64,
+ v1: u64,
+ v3: u64,
+}
+
+macro_rules! compress {
+ ($state:expr) => {{ compress!($state.v0, $state.v1, $state.v2, $state.v3) }};
+ ($v0:expr, $v1:expr, $v2:expr, $v3:expr) => {{
+ $v0 = $v0.wrapping_add($v1);
+ $v1 = $v1.rotate_left(13);
+ $v1 ^= $v0;
+ $v0 = $v0.rotate_left(32);
+ $v2 = $v2.wrapping_add($v3);
+ $v3 = $v3.rotate_left(16);
+ $v3 ^= $v2;
+ $v0 = $v0.wrapping_add($v3);
+ $v3 = $v3.rotate_left(21);
+ $v3 ^= $v0;
+ $v2 = $v2.wrapping_add($v1);
+ $v1 = $v1.rotate_left(17);
+ $v1 ^= $v2;
+ $v2 = $v2.rotate_left(32);
+ }};
+}
+
+// Copies up to 8 bytes from source to destination. This performs better than
+// `ptr::copy_nonoverlapping` on microbenchmarks and may perform better on real
+// workloads since all of the copies have fixed sizes and avoid calling memcpy.
+//
+// This is specifically designed for copies of up to 8 bytes, because that's the
+// maximum of number bytes needed to fill an 8-byte-sized element on which
+// SipHash operates. Note that for variable-sized copies which are known to be
+// less than 8 bytes, this function will perform more work than necessary unless
+// the compiler is able to optimize the extra work away.
+#[inline]
+unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize) {
+ debug_assert!(count <= 8);
+
+ if count == 8 {
+ ptr::copy_nonoverlapping(src, dst, 8);
+ return;
+ }
+
+ let mut i = 0;
+ if i + 3 < count {
+ ptr::copy_nonoverlapping(src.add(i), dst.add(i), 4);
+ i += 4;
+ }
+
+ if i + 1 < count {
+ ptr::copy_nonoverlapping(src.add(i), dst.add(i), 2);
+ i += 2
+ }
+
+ if i < count {
+ *dst.add(i) = *src.add(i);
+ i += 1;
+ }
+
+ debug_assert_eq!(i, count);
+}
+
+// # Implementation
+//
+// This implementation uses buffering to reduce the hashing cost for inputs
+// consisting of many small integers. Buffering simplifies the integration of
+// integer input--the integer write function typically just appends to the
+// buffer with a statically sized write, updates metadata, and returns.
+//
+// Buffering also prevents alternating between writes that do and do not trigger
+// the hashing process. Only when the entire buffer is full do we transition
+// into hashing. This allows us to keep the hash state in registers for longer,
+// instead of loading and storing it before and after processing each element.
+//
+// When a write fills the buffer, a buffer processing function is invoked to
+// hash all of the buffered input. The buffer processing functions are marked
+// `#[inline(never)]` so that they aren't inlined into the append functions,
+// which ensures the more frequently called append functions remain inlineable
+// and don't include register pushing/popping that would only be made necessary
+// by inclusion of the complex buffer processing path which uses those
+// registers.
+//
+// The buffer includes a "spill"--an extra element at the end--which simplifies
+// the integer write buffer processing path. The value that fills the buffer can
+// be written with a statically sized write that may spill over into the spill.
+// After the buffer is processed, the part of the value that spilled over can be
+// written from the spill to the beginning of the buffer with another statically
+// sized write. This write may copy more bytes than actually spilled over, but
+// we maintain the metadata such that any extra copied bytes will be ignored by
+// subsequent processing. Due to the static sizes, this scheme performs better
+// than copying the exact number of bytes needed into the end and beginning of
+// the buffer.
+//
+// The buffer is uninitialized, which improves performance, but may preclude
+// efficient implementation of alternative approaches. The improvement is not so
+// large that an alternative approach should be disregarded because it cannot be
+// efficiently implemented with an uninitialized buffer. On the other hand, an
+// uninitialized buffer may become more important should a larger one be used.
+//
+// # Platform Dependence
+//
+// The SipHash algorithm operates on byte sequences. It parses the input stream
+// as 8-byte little-endian integers. Therefore, given the same byte sequence, it
+// produces the same result on big- and little-endian hardware.
+//
+// However, the Hasher trait has methods which operate on multi-byte integers.
+// How they are converted into byte sequences can be endian-dependent (by using
+// native byte order) or independent (by consistently using either LE or BE byte
+// order). It can also be `isize` and `usize` size dependent (by using the
+// native size), or independent (by converting to a common size), supposing the
+// values can be represented in 32 bits.
+//
+// In order to make `SipHasher128` consistent with `SipHasher` in libstd, we
+// choose to do the integer to byte sequence conversion in the platform-
+// dependent way. Clients can achieve platform-independent hashing by widening
+// `isize` and `usize` integers to 64 bits on 32-bit systems and byte-swapping
+// integers on big-endian systems before passing them to the writing functions.
+// This causes the input byte sequence to look identical on big- and little-
+// endian systems (supposing `isize` and `usize` values can be represented in 32
+// bits), which ensures platform-independent results.
+impl SipHasher128 {
+ #[inline]
+ pub fn new_with_keys(key0: u64, key1: u64) -> SipHasher128 {
+ let mut hasher = SipHasher128 {
+ nbuf: 0,
+ buf: MaybeUninit::uninit_array(),
+ state: State {
+ v0: key0 ^ 0x736f6d6570736575,
+ // The XOR with 0xee is only done on 128-bit algorithm version.
+ v1: key1 ^ (0x646f72616e646f6d ^ 0xee),
+ v2: key0 ^ 0x6c7967656e657261,
+ v3: key1 ^ 0x7465646279746573,
+ },
+ processed: 0,
+ };
+
+ unsafe {
+ // Initialize spill because we read from it in `short_write_process_buffer`.
+ *hasher.buf.get_unchecked_mut(BUFFER_SPILL_INDEX) = MaybeUninit::zeroed();
+ }
+
+ hasher
+ }
+
+ #[inline]
+ pub fn short_write<const LEN: usize>(&mut self, bytes: [u8; LEN]) {
+ let nbuf = self.nbuf;
+ debug_assert!(LEN <= 8);
+ debug_assert!(nbuf < BUFFER_SIZE);
+ debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
+
+ if nbuf + LEN < BUFFER_SIZE {
+ unsafe {
+ // The memcpy call is optimized away because the size is known.
+ let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+ ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
+ }
+
+ self.nbuf = nbuf + LEN;
+
+ return;
+ }
+
+ unsafe { self.short_write_process_buffer(bytes) }
+ }
+
+ // A specialized write function for values with size <= 8 that should only
+ // be called when the write would cause the buffer to fill.
+ //
+ // SAFETY: the write of `x` into `self.buf` starting at byte offset
+ // `self.nbuf` must cause `self.buf` to become fully initialized (and not
+ // overflow) if it wasn't already.
+ #[inline(never)]
+ unsafe fn short_write_process_buffer<const LEN: usize>(&mut self, bytes: [u8; LEN]) {
+ let nbuf = self.nbuf;
+ debug_assert!(LEN <= 8);
+ debug_assert!(nbuf < BUFFER_SIZE);
+ debug_assert!(nbuf + LEN >= BUFFER_SIZE);
+ debug_assert!(nbuf + LEN < BUFFER_WITH_SPILL_SIZE);
+
+ // Copy first part of input into end of buffer, possibly into spill
+ // element. The memcpy call is optimized away because the size is known.
+ let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+ ptr::copy_nonoverlapping(bytes.as_ptr(), dst, LEN);
+
+ // Process buffer.
+ for i in 0..BUFFER_CAPACITY {
+ let elem = self.buf.get_unchecked(i).assume_init().to_le();
+ self.state.v3 ^= elem;
+ Sip24Rounds::c_rounds(&mut self.state);
+ self.state.v0 ^= elem;
+ }
+
+ // Copy remaining input into start of buffer by copying LEN - 1
+ // elements from spill (at most LEN - 1 bytes could have overflowed
+ // into the spill). The memcpy call is optimized away because the size
+ // is known. And the whole copy is optimized away for LEN == 1.
+ let dst = self.buf.as_mut_ptr() as *mut u8;
+ let src = self.buf.get_unchecked(BUFFER_SPILL_INDEX) as *const _ as *const u8;
+ ptr::copy_nonoverlapping(src, dst, LEN - 1);
+
+ // This function should only be called when the write fills the buffer.
+ // Therefore, when LEN == 1, the new `self.nbuf` must be zero.
+ // LEN is statically known, so the branch is optimized away.
+ self.nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE };
+ self.processed += BUFFER_SIZE;
+ }
+
+ // A write function for byte slices.
+ #[inline]
+ fn slice_write(&mut self, msg: &[u8]) {
+ let length = msg.len();
+ let nbuf = self.nbuf;
+ debug_assert!(nbuf < BUFFER_SIZE);
+
+ if nbuf + length < BUFFER_SIZE {
+ unsafe {
+ let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+
+ if length <= 8 {
+ copy_nonoverlapping_small(msg.as_ptr(), dst, length);
+ } else {
+ // This memcpy is *not* optimized away.
+ ptr::copy_nonoverlapping(msg.as_ptr(), dst, length);
+ }
+ }
+
+ self.nbuf = nbuf + length;
+
+ return;
+ }
+
+ unsafe { self.slice_write_process_buffer(msg) }
+ }
+
+ // A write function for byte slices that should only be called when the
+ // write would cause the buffer to fill.
+ //
+ // SAFETY: `self.buf` must be initialized up to the byte offset `self.nbuf`,
+ // and `msg` must contain enough bytes to initialize the rest of the element
+ // containing the byte offset `self.nbuf`.
+ #[inline(never)]
+ unsafe fn slice_write_process_buffer(&mut self, msg: &[u8]) {
+ let length = msg.len();
+ let nbuf = self.nbuf;
+ debug_assert!(nbuf < BUFFER_SIZE);
+ debug_assert!(nbuf + length >= BUFFER_SIZE);
+
+ // Always copy first part of input into current element of buffer.
+ // This function should only be called when the write fills the buffer,
+ // so we know that there is enough input to fill the current element.
+ let valid_in_elem = nbuf % ELEM_SIZE;
+ let needed_in_elem = ELEM_SIZE - valid_in_elem;
+
+ let src = msg.as_ptr();
+ let dst = (self.buf.as_mut_ptr() as *mut u8).add(nbuf);
+ copy_nonoverlapping_small(src, dst, needed_in_elem);
+
+ // Process buffer.
+
+ // Using `nbuf / ELEM_SIZE + 1` rather than `(nbuf + needed_in_elem) /
+ // ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
+ // We know that is true, because last step ensured we have a full
+ // element in the buffer.
+ let last = nbuf / ELEM_SIZE + 1;
+
+ for i in 0..last {
+ let elem = self.buf.get_unchecked(i).assume_init().to_le();
+ self.state.v3 ^= elem;
+ Sip24Rounds::c_rounds(&mut self.state);
+ self.state.v0 ^= elem;
+ }
+
+ // Process the remaining element-sized chunks of input.
+ let mut processed = needed_in_elem;
+ let input_left = length - processed;
+ let elems_left = input_left / ELEM_SIZE;
+ let extra_bytes_left = input_left % ELEM_SIZE;
+
+ for _ in 0..elems_left {
+ let elem = (msg.as_ptr().add(processed) as *const u64).read_unaligned().to_le();
+ self.state.v3 ^= elem;
+ Sip24Rounds::c_rounds(&mut self.state);
+ self.state.v0 ^= elem;
+ processed += ELEM_SIZE;
+ }
+
+ // Copy remaining input into start of buffer.
+ let src = msg.as_ptr().add(processed);
+ let dst = self.buf.as_mut_ptr() as *mut u8;
+ copy_nonoverlapping_small(src, dst, extra_bytes_left);
+
+ self.nbuf = extra_bytes_left;
+ self.processed += nbuf + processed;
+ }
+
+ #[inline]
+ pub fn finish128(mut self) -> (u64, u64) {
+ debug_assert!(self.nbuf < BUFFER_SIZE);
+
+ // Process full elements in buffer.
+ let last = self.nbuf / ELEM_SIZE;
+
+ // Since we're consuming self, avoid updating members for a potential
+ // performance gain.
+ let mut state = self.state;
+
+ for i in 0..last {
+ let elem = unsafe { self.buf.get_unchecked(i).assume_init().to_le() };
+ state.v3 ^= elem;
+ Sip24Rounds::c_rounds(&mut state);
+ state.v0 ^= elem;
+ }
+
+ // Get remaining partial element.
+ let elem = if self.nbuf % ELEM_SIZE != 0 {
+ unsafe {
+ // Ensure element is initialized by writing zero bytes. At most
+ // `ELEM_SIZE - 1` are required given the above check. It's safe
+ // to write this many because we have the spill and we maintain
+ // `self.nbuf` such that this write will start before the spill.
+ let dst = (self.buf.as_mut_ptr() as *mut u8).add(self.nbuf);
+ ptr::write_bytes(dst, 0, ELEM_SIZE - 1);
+ self.buf.get_unchecked(last).assume_init().to_le()
+ }
+ } else {
+ 0
+ };
+
+ // Finalize the hash.
+ let length = self.processed + self.nbuf;
+ let b: u64 = ((length as u64 & 0xff) << 56) | elem;
+
+ state.v3 ^= b;
+ Sip24Rounds::c_rounds(&mut state);
+ state.v0 ^= b;
+
+ state.v2 ^= 0xee;
+ Sip24Rounds::d_rounds(&mut state);
+ let _0 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
+
+ state.v1 ^= 0xdd;
+ Sip24Rounds::d_rounds(&mut state);
+ let _1 = state.v0 ^ state.v1 ^ state.v2 ^ state.v3;
+
+ (_0, _1)
+ }
+}
+
+impl Hasher for SipHasher128 {
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.short_write(i.to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.short_write(i.to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.short_write(i.to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.short_write(i.to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ self.short_write(i.to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_i8(&mut self, i: i8) {
+ self.short_write((i as u8).to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_i16(&mut self, i: i16) {
+ self.short_write((i as u16).to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_i32(&mut self, i: i32) {
+ self.short_write((i as u32).to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_i64(&mut self, i: i64) {
+ self.short_write((i as u64).to_ne_bytes());
+ }
+
+ #[inline]
+ fn write_isize(&mut self, i: isize) {
+ self.short_write((i as usize).to_ne_bytes());
+ }
+
+ #[inline]
+ fn write(&mut self, msg: &[u8]) {
+ self.slice_write(msg);
+ }
+
+ #[inline]
+ fn write_str(&mut self, s: &str) {
+ // This hasher works byte-wise, and `0xFF` cannot show up in a `str`,
+ // so just hashing the one extra byte is enough to be prefix-free.
+ self.write(s.as_bytes());
+ self.write_u8(0xFF);
+ }
+
+ fn finish(&self) -> u64 {
+ panic!("SipHasher128 cannot provide valid 64 bit hashes")
+ }
+}
+
+#[derive(Debug, Clone, Default)]
+struct Sip24Rounds;
+
+impl Sip24Rounds {
+ #[inline]
+ fn c_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ }
+
+ #[inline]
+ fn d_rounds(state: &mut State) {
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ compress!(state);
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sip128/tests.rs b/compiler/rustc_data_structures/src/sip128/tests.rs
new file mode 100644
index 000000000..5fe967c41
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sip128/tests.rs
@@ -0,0 +1,497 @@
+use super::*;
+
+use std::hash::{Hash, Hasher};
+
+// Hash just the bytes of the slice, without length prefix
+struct Bytes<'a>(&'a [u8]);
+
+impl<'a> Hash for Bytes<'a> {
+ #[allow(unused_must_use)]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ for byte in self.0 {
+ state.write_u8(*byte);
+ }
+ }
+}
+
+fn hash_with<T: Hash>(mut st: SipHasher128, x: &T) -> (u64, u64) {
+ x.hash(&mut st);
+ st.finish128()
+}
+
+fn hash<T: Hash>(x: &T) -> (u64, u64) {
+ hash_with(SipHasher128::new_with_keys(0, 0), x)
+}
+
+const TEST_VECTOR: [[u8; 16]; 64] = [
+ [
+ 0xa3, 0x81, 0x7f, 0x04, 0xba, 0x25, 0xa8, 0xe6, 0x6d, 0xf6, 0x72, 0x14, 0xc7, 0x55, 0x02,
+ 0x93,
+ ],
+ [
+ 0xda, 0x87, 0xc1, 0xd8, 0x6b, 0x99, 0xaf, 0x44, 0x34, 0x76, 0x59, 0x11, 0x9b, 0x22, 0xfc,
+ 0x45,
+ ],
+ [
+ 0x81, 0x77, 0x22, 0x8d, 0xa4, 0xa4, 0x5d, 0xc7, 0xfc, 0xa3, 0x8b, 0xde, 0xf6, 0x0a, 0xff,
+ 0xe4,
+ ],
+ [
+ 0x9c, 0x70, 0xb6, 0x0c, 0x52, 0x67, 0xa9, 0x4e, 0x5f, 0x33, 0xb6, 0xb0, 0x29, 0x85, 0xed,
+ 0x51,
+ ],
+ [
+ 0xf8, 0x81, 0x64, 0xc1, 0x2d, 0x9c, 0x8f, 0xaf, 0x7d, 0x0f, 0x6e, 0x7c, 0x7b, 0xcd, 0x55,
+ 0x79,
+ ],
+ [
+ 0x13, 0x68, 0x87, 0x59, 0x80, 0x77, 0x6f, 0x88, 0x54, 0x52, 0x7a, 0x07, 0x69, 0x0e, 0x96,
+ 0x27,
+ ],
+ [
+ 0x14, 0xee, 0xca, 0x33, 0x8b, 0x20, 0x86, 0x13, 0x48, 0x5e, 0xa0, 0x30, 0x8f, 0xd7, 0xa1,
+ 0x5e,
+ ],
+ [
+ 0xa1, 0xf1, 0xeb, 0xbe, 0xd8, 0xdb, 0xc1, 0x53, 0xc0, 0xb8, 0x4a, 0xa6, 0x1f, 0xf0, 0x82,
+ 0x39,
+ ],
+ [
+ 0x3b, 0x62, 0xa9, 0xba, 0x62, 0x58, 0xf5, 0x61, 0x0f, 0x83, 0xe2, 0x64, 0xf3, 0x14, 0x97,
+ 0xb4,
+ ],
+ [
+ 0x26, 0x44, 0x99, 0x06, 0x0a, 0xd9, 0xba, 0xab, 0xc4, 0x7f, 0x8b, 0x02, 0xbb, 0x6d, 0x71,
+ 0xed,
+ ],
+ [
+ 0x00, 0x11, 0x0d, 0xc3, 0x78, 0x14, 0x69, 0x56, 0xc9, 0x54, 0x47, 0xd3, 0xf3, 0xd0, 0xfb,
+ 0xba,
+ ],
+ [
+ 0x01, 0x51, 0xc5, 0x68, 0x38, 0x6b, 0x66, 0x77, 0xa2, 0xb4, 0xdc, 0x6f, 0x81, 0xe5, 0xdc,
+ 0x18,
+ ],
+ [
+ 0xd6, 0x26, 0xb2, 0x66, 0x90, 0x5e, 0xf3, 0x58, 0x82, 0x63, 0x4d, 0xf6, 0x85, 0x32, 0xc1,
+ 0x25,
+ ],
+ [
+ 0x98, 0x69, 0xe2, 0x47, 0xe9, 0xc0, 0x8b, 0x10, 0xd0, 0x29, 0x93, 0x4f, 0xc4, 0xb9, 0x52,
+ 0xf7,
+ ],
+ [
+ 0x31, 0xfc, 0xef, 0xac, 0x66, 0xd7, 0xde, 0x9c, 0x7e, 0xc7, 0x48, 0x5f, 0xe4, 0x49, 0x49,
+ 0x02,
+ ],
+ [
+ 0x54, 0x93, 0xe9, 0x99, 0x33, 0xb0, 0xa8, 0x11, 0x7e, 0x08, 0xec, 0x0f, 0x97, 0xcf, 0xc3,
+ 0xd9,
+ ],
+ [
+ 0x6e, 0xe2, 0xa4, 0xca, 0x67, 0xb0, 0x54, 0xbb, 0xfd, 0x33, 0x15, 0xbf, 0x85, 0x23, 0x05,
+ 0x77,
+ ],
+ [
+ 0x47, 0x3d, 0x06, 0xe8, 0x73, 0x8d, 0xb8, 0x98, 0x54, 0xc0, 0x66, 0xc4, 0x7a, 0xe4, 0x77,
+ 0x40,
+ ],
+ [
+ 0xa4, 0x26, 0xe5, 0xe4, 0x23, 0xbf, 0x48, 0x85, 0x29, 0x4d, 0xa4, 0x81, 0xfe, 0xae, 0xf7,
+ 0x23,
+ ],
+ [
+ 0x78, 0x01, 0x77, 0x31, 0xcf, 0x65, 0xfa, 0xb0, 0x74, 0xd5, 0x20, 0x89, 0x52, 0x51, 0x2e,
+ 0xb1,
+ ],
+ [
+ 0x9e, 0x25, 0xfc, 0x83, 0x3f, 0x22, 0x90, 0x73, 0x3e, 0x93, 0x44, 0xa5, 0xe8, 0x38, 0x39,
+ 0xeb,
+ ],
+ [
+ 0x56, 0x8e, 0x49, 0x5a, 0xbe, 0x52, 0x5a, 0x21, 0x8a, 0x22, 0x14, 0xcd, 0x3e, 0x07, 0x1d,
+ 0x12,
+ ],
+ [
+ 0x4a, 0x29, 0xb5, 0x45, 0x52, 0xd1, 0x6b, 0x9a, 0x46, 0x9c, 0x10, 0x52, 0x8e, 0xff, 0x0a,
+ 0xae,
+ ],
+ [
+ 0xc9, 0xd1, 0x84, 0xdd, 0xd5, 0xa9, 0xf5, 0xe0, 0xcf, 0x8c, 0xe2, 0x9a, 0x9a, 0xbf, 0x69,
+ 0x1c,
+ ],
+ [
+ 0x2d, 0xb4, 0x79, 0xae, 0x78, 0xbd, 0x50, 0xd8, 0x88, 0x2a, 0x8a, 0x17, 0x8a, 0x61, 0x32,
+ 0xad,
+ ],
+ [
+ 0x8e, 0xce, 0x5f, 0x04, 0x2d, 0x5e, 0x44, 0x7b, 0x50, 0x51, 0xb9, 0xea, 0xcb, 0x8d, 0x8f,
+ 0x6f,
+ ],
+ [
+ 0x9c, 0x0b, 0x53, 0xb4, 0xb3, 0xc3, 0x07, 0xe8, 0x7e, 0xae, 0xe0, 0x86, 0x78, 0x14, 0x1f,
+ 0x66,
+ ],
+ [
+ 0xab, 0xf2, 0x48, 0xaf, 0x69, 0xa6, 0xea, 0xe4, 0xbf, 0xd3, 0xeb, 0x2f, 0x12, 0x9e, 0xeb,
+ 0x94,
+ ],
+ [
+ 0x06, 0x64, 0xda, 0x16, 0x68, 0x57, 0x4b, 0x88, 0xb9, 0x35, 0xf3, 0x02, 0x73, 0x58, 0xae,
+ 0xf4,
+ ],
+ [
+ 0xaa, 0x4b, 0x9d, 0xc4, 0xbf, 0x33, 0x7d, 0xe9, 0x0c, 0xd4, 0xfd, 0x3c, 0x46, 0x7c, 0x6a,
+ 0xb7,
+ ],
+ [
+ 0xea, 0x5c, 0x7f, 0x47, 0x1f, 0xaf, 0x6b, 0xde, 0x2b, 0x1a, 0xd7, 0xd4, 0x68, 0x6d, 0x22,
+ 0x87,
+ ],
+ [
+ 0x29, 0x39, 0xb0, 0x18, 0x32, 0x23, 0xfa, 0xfc, 0x17, 0x23, 0xde, 0x4f, 0x52, 0xc4, 0x3d,
+ 0x35,
+ ],
+ [
+ 0x7c, 0x39, 0x56, 0xca, 0x5e, 0xea, 0xfc, 0x3e, 0x36, 0x3e, 0x9d, 0x55, 0x65, 0x46, 0xeb,
+ 0x68,
+ ],
+ [
+ 0x77, 0xc6, 0x07, 0x71, 0x46, 0xf0, 0x1c, 0x32, 0xb6, 0xb6, 0x9d, 0x5f, 0x4e, 0xa9, 0xff,
+ 0xcf,
+ ],
+ [
+ 0x37, 0xa6, 0x98, 0x6c, 0xb8, 0x84, 0x7e, 0xdf, 0x09, 0x25, 0xf0, 0xf1, 0x30, 0x9b, 0x54,
+ 0xde,
+ ],
+ [
+ 0xa7, 0x05, 0xf0, 0xe6, 0x9d, 0xa9, 0xa8, 0xf9, 0x07, 0x24, 0x1a, 0x2e, 0x92, 0x3c, 0x8c,
+ 0xc8,
+ ],
+ [
+ 0x3d, 0xc4, 0x7d, 0x1f, 0x29, 0xc4, 0x48, 0x46, 0x1e, 0x9e, 0x76, 0xed, 0x90, 0x4f, 0x67,
+ 0x11,
+ ],
+ [
+ 0x0d, 0x62, 0xbf, 0x01, 0xe6, 0xfc, 0x0e, 0x1a, 0x0d, 0x3c, 0x47, 0x51, 0xc5, 0xd3, 0x69,
+ 0x2b,
+ ],
+ [
+ 0x8c, 0x03, 0x46, 0x8b, 0xca, 0x7c, 0x66, 0x9e, 0xe4, 0xfd, 0x5e, 0x08, 0x4b, 0xbe, 0xe7,
+ 0xb5,
+ ],
+ [
+ 0x52, 0x8a, 0x5b, 0xb9, 0x3b, 0xaf, 0x2c, 0x9c, 0x44, 0x73, 0xcc, 0xe5, 0xd0, 0xd2, 0x2b,
+ 0xd9,
+ ],
+ [
+ 0xdf, 0x6a, 0x30, 0x1e, 0x95, 0xc9, 0x5d, 0xad, 0x97, 0xae, 0x0c, 0xc8, 0xc6, 0x91, 0x3b,
+ 0xd8,
+ ],
+ [
+ 0x80, 0x11, 0x89, 0x90, 0x2c, 0x85, 0x7f, 0x39, 0xe7, 0x35, 0x91, 0x28, 0x5e, 0x70, 0xb6,
+ 0xdb,
+ ],
+ [
+ 0xe6, 0x17, 0x34, 0x6a, 0xc9, 0xc2, 0x31, 0xbb, 0x36, 0x50, 0xae, 0x34, 0xcc, 0xca, 0x0c,
+ 0x5b,
+ ],
+ [
+ 0x27, 0xd9, 0x34, 0x37, 0xef, 0xb7, 0x21, 0xaa, 0x40, 0x18, 0x21, 0xdc, 0xec, 0x5a, 0xdf,
+ 0x89,
+ ],
+ [
+ 0x89, 0x23, 0x7d, 0x9d, 0xed, 0x9c, 0x5e, 0x78, 0xd8, 0xb1, 0xc9, 0xb1, 0x66, 0xcc, 0x73,
+ 0x42,
+ ],
+ [
+ 0x4a, 0x6d, 0x80, 0x91, 0xbf, 0x5e, 0x7d, 0x65, 0x11, 0x89, 0xfa, 0x94, 0xa2, 0x50, 0xb1,
+ 0x4c,
+ ],
+ [
+ 0x0e, 0x33, 0xf9, 0x60, 0x55, 0xe7, 0xae, 0x89, 0x3f, 0xfc, 0x0e, 0x3d, 0xcf, 0x49, 0x29,
+ 0x02,
+ ],
+ [
+ 0xe6, 0x1c, 0x43, 0x2b, 0x72, 0x0b, 0x19, 0xd1, 0x8e, 0xc8, 0xd8, 0x4b, 0xdc, 0x63, 0x15,
+ 0x1b,
+ ],
+ [
+ 0xf7, 0xe5, 0xae, 0xf5, 0x49, 0xf7, 0x82, 0xcf, 0x37, 0x90, 0x55, 0xa6, 0x08, 0x26, 0x9b,
+ 0x16,
+ ],
+ [
+ 0x43, 0x8d, 0x03, 0x0f, 0xd0, 0xb7, 0xa5, 0x4f, 0xa8, 0x37, 0xf2, 0xad, 0x20, 0x1a, 0x64,
+ 0x03,
+ ],
+ [
+ 0xa5, 0x90, 0xd3, 0xee, 0x4f, 0xbf, 0x04, 0xe3, 0x24, 0x7e, 0x0d, 0x27, 0xf2, 0x86, 0x42,
+ 0x3f,
+ ],
+ [
+ 0x5f, 0xe2, 0xc1, 0xa1, 0x72, 0xfe, 0x93, 0xc4, 0xb1, 0x5c, 0xd3, 0x7c, 0xae, 0xf9, 0xf5,
+ 0x38,
+ ],
+ [
+ 0x2c, 0x97, 0x32, 0x5c, 0xbd, 0x06, 0xb3, 0x6e, 0xb2, 0x13, 0x3d, 0xd0, 0x8b, 0x3a, 0x01,
+ 0x7c,
+ ],
+ [
+ 0x92, 0xc8, 0x14, 0x22, 0x7a, 0x6b, 0xca, 0x94, 0x9f, 0xf0, 0x65, 0x9f, 0x00, 0x2a, 0xd3,
+ 0x9e,
+ ],
+ [
+ 0xdc, 0xe8, 0x50, 0x11, 0x0b, 0xd8, 0x32, 0x8c, 0xfb, 0xd5, 0x08, 0x41, 0xd6, 0x91, 0x1d,
+ 0x87,
+ ],
+ [
+ 0x67, 0xf1, 0x49, 0x84, 0xc7, 0xda, 0x79, 0x12, 0x48, 0xe3, 0x2b, 0xb5, 0x92, 0x25, 0x83,
+ 0xda,
+ ],
+ [
+ 0x19, 0x38, 0xf2, 0xcf, 0x72, 0xd5, 0x4e, 0xe9, 0x7e, 0x94, 0x16, 0x6f, 0xa9, 0x1d, 0x2a,
+ 0x36,
+ ],
+ [
+ 0x74, 0x48, 0x1e, 0x96, 0x46, 0xed, 0x49, 0xfe, 0x0f, 0x62, 0x24, 0x30, 0x16, 0x04, 0x69,
+ 0x8e,
+ ],
+ [
+ 0x57, 0xfc, 0xa5, 0xde, 0x98, 0xa9, 0xd6, 0xd8, 0x00, 0x64, 0x38, 0xd0, 0x58, 0x3d, 0x8a,
+ 0x1d,
+ ],
+ [
+ 0x9f, 0xec, 0xde, 0x1c, 0xef, 0xdc, 0x1c, 0xbe, 0xd4, 0x76, 0x36, 0x74, 0xd9, 0x57, 0x53,
+ 0x59,
+ ],
+ [
+ 0xe3, 0x04, 0x0c, 0x00, 0xeb, 0x28, 0xf1, 0x53, 0x66, 0xca, 0x73, 0xcb, 0xd8, 0x72, 0xe7,
+ 0x40,
+ ],
+ [
+ 0x76, 0x97, 0x00, 0x9a, 0x6a, 0x83, 0x1d, 0xfe, 0xcc, 0xa9, 0x1c, 0x59, 0x93, 0x67, 0x0f,
+ 0x7a,
+ ],
+ [
+ 0x58, 0x53, 0x54, 0x23, 0x21, 0xf5, 0x67, 0xa0, 0x05, 0xd5, 0x47, 0xa4, 0xf0, 0x47, 0x59,
+ 0xbd,
+ ],
+ [
+ 0x51, 0x50, 0xd1, 0x77, 0x2f, 0x50, 0x83, 0x4a, 0x50, 0x3e, 0x06, 0x9a, 0x97, 0x3f, 0xbd,
+ 0x7c,
+ ],
+];
+
+// Test vector from reference implementation
+#[test]
+fn test_siphash_2_4_test_vector() {
+ let k0 = 0x_07_06_05_04_03_02_01_00;
+ let k1 = 0x_0f_0e_0d_0c_0b_0a_09_08;
+
+ let mut input: Vec<u8> = Vec::new();
+
+ for i in 0..64 {
+ let out = hash_with(SipHasher128::new_with_keys(k0, k1), &Bytes(&input[..]));
+ let expected = (
+ ((TEST_VECTOR[i][0] as u64) << 0)
+ | ((TEST_VECTOR[i][1] as u64) << 8)
+ | ((TEST_VECTOR[i][2] as u64) << 16)
+ | ((TEST_VECTOR[i][3] as u64) << 24)
+ | ((TEST_VECTOR[i][4] as u64) << 32)
+ | ((TEST_VECTOR[i][5] as u64) << 40)
+ | ((TEST_VECTOR[i][6] as u64) << 48)
+ | ((TEST_VECTOR[i][7] as u64) << 56),
+ ((TEST_VECTOR[i][8] as u64) << 0)
+ | ((TEST_VECTOR[i][9] as u64) << 8)
+ | ((TEST_VECTOR[i][10] as u64) << 16)
+ | ((TEST_VECTOR[i][11] as u64) << 24)
+ | ((TEST_VECTOR[i][12] as u64) << 32)
+ | ((TEST_VECTOR[i][13] as u64) << 40)
+ | ((TEST_VECTOR[i][14] as u64) << 48)
+ | ((TEST_VECTOR[i][15] as u64) << 56),
+ );
+
+ assert_eq!(out, expected);
+ input.push(i as u8);
+ }
+}
+
+#[test]
+#[cfg(target_arch = "arm")]
+fn test_hash_usize() {
+ let val = 0xdeadbeef_deadbeef_u64;
+ assert!(hash(&(val as u64)) != hash(&(val as usize)));
+ assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
+}
+#[test]
+#[cfg(target_arch = "x86_64")]
+fn test_hash_usize() {
+ let val = 0xdeadbeef_deadbeef_u64;
+ assert_eq!(hash(&(val as u64)), hash(&(val as usize)));
+ assert!(hash(&(val as u32)) != hash(&(val as usize)));
+}
+#[test]
+#[cfg(target_arch = "x86")]
+fn test_hash_usize() {
+ let val = 0xdeadbeef_deadbeef_u64;
+ assert!(hash(&(val as u64)) != hash(&(val as usize)));
+ assert_eq!(hash(&(val as u32)), hash(&(val as usize)));
+}
+
+#[test]
+fn test_hash_idempotent() {
+ let val64 = 0xdeadbeef_deadbeef_u64;
+ assert_eq!(hash(&val64), hash(&val64));
+ let val32 = 0xdeadbeef_u32;
+ assert_eq!(hash(&val32), hash(&val32));
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_64() {
+ let val = 0xdeadbeef_deadbeef_u64;
+
+ assert!(hash(&val) != hash(&zero_byte(val, 0)));
+ assert!(hash(&val) != hash(&zero_byte(val, 1)));
+ assert!(hash(&val) != hash(&zero_byte(val, 2)));
+ assert!(hash(&val) != hash(&zero_byte(val, 3)));
+ assert!(hash(&val) != hash(&zero_byte(val, 4)));
+ assert!(hash(&val) != hash(&zero_byte(val, 5)));
+ assert!(hash(&val) != hash(&zero_byte(val, 6)));
+ assert!(hash(&val) != hash(&zero_byte(val, 7)));
+
+ fn zero_byte(val: u64, byte: usize) -> u64 {
+ assert!(byte < 8);
+ val & !(0xff << (byte * 8))
+ }
+}
+
+#[test]
+fn test_hash_no_bytes_dropped_32() {
+ let val = 0xdeadbeef_u32;
+
+ assert!(hash(&val) != hash(&zero_byte(val, 0)));
+ assert!(hash(&val) != hash(&zero_byte(val, 1)));
+ assert!(hash(&val) != hash(&zero_byte(val, 2)));
+ assert!(hash(&val) != hash(&zero_byte(val, 3)));
+
+ fn zero_byte(val: u32, byte: usize) -> u32 {
+ assert!(byte < 4);
+ val & !(0xff << (byte * 8))
+ }
+}
+
+#[test]
+fn test_hash_no_concat_alias() {
+ let s = ("aa", "bb");
+ let t = ("aabb", "");
+ let u = ("a", "abb");
+
+ assert!(s != t && t != u);
+ assert!(hash(&s) != hash(&t) && hash(&s) != hash(&u));
+
+ let u = [1, 0, 0, 0];
+ let v = (&u[..1], &u[1..3], &u[3..]);
+ let w = (&u[..], &u[4..4], &u[4..4]);
+
+ assert!(v != w);
+ assert!(hash(&v) != hash(&w));
+}
+
+#[test]
+fn test_short_write_works() {
+ let test_u8 = 0xFF_u8;
+ let test_u16 = 0x1122_u16;
+ let test_u32 = 0x22334455_u32;
+ let test_u64 = 0x33445566_778899AA_u64;
+ let test_u128 = 0x11223344_55667788_99AABBCC_DDEEFF77_u128;
+ let test_usize = 0xD0C0B0A0_usize;
+
+ let test_i8 = -1_i8;
+ let test_i16 = -2_i16;
+ let test_i32 = -3_i32;
+ let test_i64 = -4_i64;
+ let test_i128 = -5_i128;
+ let test_isize = -6_isize;
+
+ let mut h1 = SipHasher128::new_with_keys(0, 0);
+ h1.write(b"bytes");
+ h1.write(b"string");
+ h1.write_u8(test_u8);
+ h1.write_u16(test_u16);
+ h1.write_u32(test_u32);
+ h1.write_u64(test_u64);
+ h1.write_u128(test_u128);
+ h1.write_usize(test_usize);
+ h1.write_i8(test_i8);
+ h1.write_i16(test_i16);
+ h1.write_i32(test_i32);
+ h1.write_i64(test_i64);
+ h1.write_i128(test_i128);
+ h1.write_isize(test_isize);
+
+ let mut h2 = SipHasher128::new_with_keys(0, 0);
+ h2.write(b"bytes");
+ h2.write(b"string");
+ h2.write(&test_u8.to_ne_bytes());
+ h2.write(&test_u16.to_ne_bytes());
+ h2.write(&test_u32.to_ne_bytes());
+ h2.write(&test_u64.to_ne_bytes());
+ h2.write(&test_u128.to_ne_bytes());
+ h2.write(&test_usize.to_ne_bytes());
+ h2.write(&test_i8.to_ne_bytes());
+ h2.write(&test_i16.to_ne_bytes());
+ h2.write(&test_i32.to_ne_bytes());
+ h2.write(&test_i64.to_ne_bytes());
+ h2.write(&test_i128.to_ne_bytes());
+ h2.write(&test_isize.to_ne_bytes());
+
+ let h1_hash = h1.finish128();
+ let h2_hash = h2.finish128();
+
+ assert_eq!(h1_hash, h2_hash);
+}
+
+macro_rules! test_fill_buffer {
+ ($type:ty, $write_method:ident) => {{
+ // Test filling and overfilling the buffer from all possible offsets
+ // for a given integer type and its corresponding write method.
+ const SIZE: usize = std::mem::size_of::<$type>();
+ let input = [42; BUFFER_SIZE];
+ let x = 0x01234567_89ABCDEF_76543210_FEDCBA98_u128 as $type;
+ let x_bytes = &x.to_ne_bytes();
+
+ for i in 1..=SIZE {
+ let s = &input[..BUFFER_SIZE - i];
+
+ let mut h1 = SipHasher128::new_with_keys(7, 13);
+ h1.write(s);
+ h1.$write_method(x);
+
+ let mut h2 = SipHasher128::new_with_keys(7, 13);
+ h2.write(s);
+ h2.write(x_bytes);
+
+ let h1_hash = h1.finish128();
+ let h2_hash = h2.finish128();
+
+ assert_eq!(h1_hash, h2_hash);
+ }
+ }};
+}
+
+#[test]
+fn test_fill_buffer() {
+ test_fill_buffer!(u8, write_u8);
+ test_fill_buffer!(u16, write_u16);
+ test_fill_buffer!(u32, write_u32);
+ test_fill_buffer!(u64, write_u64);
+ test_fill_buffer!(u128, write_u128);
+ test_fill_buffer!(usize, write_usize);
+
+ test_fill_buffer!(i8, write_i8);
+ test_fill_buffer!(i16, write_i16);
+ test_fill_buffer!(i32, write_i32);
+ test_fill_buffer!(i64, write_i64);
+ test_fill_buffer!(i128, write_i128);
+ test_fill_buffer!(isize, write_isize);
+}
diff --git a/compiler/rustc_data_structures/src/small_c_str.rs b/compiler/rustc_data_structures/src/small_c_str.rs
new file mode 100644
index 000000000..3a8ab8ff9
--- /dev/null
+++ b/compiler/rustc_data_structures/src/small_c_str.rs
@@ -0,0 +1,81 @@
+use std::ffi;
+use std::ops::Deref;
+
+use smallvec::SmallVec;
+
+#[cfg(test)]
+mod tests;
+
+const SIZE: usize = 36;
+
+/// Like SmallVec but for C strings.
+#[derive(Clone)]
+pub struct SmallCStr {
+ data: SmallVec<[u8; SIZE]>,
+}
+
+impl SmallCStr {
+ #[inline]
+ pub fn new(s: &str) -> SmallCStr {
+ let len = s.len();
+ let len1 = len + 1;
+ let data = if len < SIZE {
+ let mut buf = [0; SIZE];
+ buf[..len].copy_from_slice(s.as_bytes());
+ SmallVec::from_buf_and_len(buf, len1)
+ } else {
+ let mut data = Vec::with_capacity(len1);
+ data.extend_from_slice(s.as_bytes());
+ data.push(0);
+ SmallVec::from_vec(data)
+ };
+ if let Err(e) = ffi::CStr::from_bytes_with_nul(&data) {
+ panic!("The string \"{}\" cannot be converted into a CStr: {}", s, e);
+ }
+ SmallCStr { data }
+ }
+
+ #[inline]
+ pub fn new_with_nul(s: &str) -> SmallCStr {
+ let b = s.as_bytes();
+ if let Err(e) = ffi::CStr::from_bytes_with_nul(b) {
+ panic!("The string \"{}\" cannot be converted into a CStr: {}", s, e);
+ }
+ SmallCStr { data: SmallVec::from_slice(s.as_bytes()) }
+ }
+
+ #[inline]
+ pub fn as_c_str(&self) -> &ffi::CStr {
+ unsafe { ffi::CStr::from_bytes_with_nul_unchecked(&self.data) }
+ }
+
+ #[inline]
+ pub fn len_with_nul(&self) -> usize {
+ self.data.len()
+ }
+
+ pub fn spilled(&self) -> bool {
+ self.data.spilled()
+ }
+}
+
+impl Deref for SmallCStr {
+ type Target = ffi::CStr;
+
+ #[inline]
+ fn deref(&self) -> &ffi::CStr {
+ self.as_c_str()
+ }
+}
+
+impl<'a> FromIterator<&'a str> for SmallCStr {
+ fn from_iter<T: IntoIterator<Item = &'a str>>(iter: T) -> Self {
+ let mut data =
+ iter.into_iter().flat_map(|s| s.as_bytes()).copied().collect::<SmallVec<_>>();
+ data.push(0);
+ if let Err(e) = ffi::CStr::from_bytes_with_nul(&data) {
+ panic!("The iterator {:?} cannot be converted into a CStr: {}", data, e);
+ }
+ Self { data }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/small_c_str/tests.rs b/compiler/rustc_data_structures/src/small_c_str/tests.rs
new file mode 100644
index 000000000..47277604b
--- /dev/null
+++ b/compiler/rustc_data_structures/src/small_c_str/tests.rs
@@ -0,0 +1,45 @@
+use super::*;
+
+#[test]
+fn short() {
+ const TEXT: &str = "abcd";
+ let reference = ffi::CString::new(TEXT.to_string()).unwrap();
+
+ let scs = SmallCStr::new(TEXT);
+
+ assert_eq!(scs.len_with_nul(), TEXT.len() + 1);
+ assert_eq!(scs.as_c_str(), reference.as_c_str());
+ assert!(!scs.spilled());
+}
+
+#[test]
+fn empty() {
+ const TEXT: &str = "";
+ let reference = ffi::CString::new(TEXT.to_string()).unwrap();
+
+ let scs = SmallCStr::new(TEXT);
+
+ assert_eq!(scs.len_with_nul(), TEXT.len() + 1);
+ assert_eq!(scs.as_c_str(), reference.as_c_str());
+ assert!(!scs.spilled());
+}
+
+#[test]
+fn long() {
+ const TEXT: &str = "01234567890123456789012345678901234567890123456789\
+ 01234567890123456789012345678901234567890123456789\
+ 01234567890123456789012345678901234567890123456789";
+ let reference = ffi::CString::new(TEXT.to_string()).unwrap();
+
+ let scs = SmallCStr::new(TEXT);
+
+ assert_eq!(scs.len_with_nul(), TEXT.len() + 1);
+ assert_eq!(scs.as_c_str(), reference.as_c_str());
+ assert!(scs.spilled());
+}
+
+#[test]
+#[should_panic]
+fn internal_nul() {
+ let _ = SmallCStr::new("abcd\0def");
+}
diff --git a/compiler/rustc_data_structures/src/small_str.rs b/compiler/rustc_data_structures/src/small_str.rs
new file mode 100644
index 000000000..800acb1b0
--- /dev/null
+++ b/compiler/rustc_data_structures/src/small_str.rs
@@ -0,0 +1,68 @@
+use smallvec::SmallVec;
+
+#[cfg(test)]
+mod tests;
+
+/// Like SmallVec but for strings.
+#[derive(Default)]
+pub struct SmallStr<const N: usize>(SmallVec<[u8; N]>);
+
+impl<const N: usize> SmallStr<N> {
+ #[inline]
+ pub fn new() -> Self {
+ SmallStr(SmallVec::default())
+ }
+
+ #[inline]
+ pub fn push_str(&mut self, s: &str) {
+ self.0.extend_from_slice(s.as_bytes());
+ }
+
+ #[inline]
+ pub fn empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ #[inline]
+ pub fn spilled(&self) -> bool {
+ self.0.spilled()
+ }
+
+ #[inline]
+ pub fn as_str(&self) -> &str {
+ unsafe { std::str::from_utf8_unchecked(self.0.as_slice()) }
+ }
+}
+
+impl<const N: usize> std::ops::Deref for SmallStr<N> {
+ type Target = str;
+
+ #[inline]
+ fn deref(&self) -> &str {
+ self.as_str()
+ }
+}
+
+impl<const N: usize, A: AsRef<str>> FromIterator<A> for SmallStr<N> {
+ #[inline]
+ fn from_iter<T>(iter: T) -> Self
+ where
+ T: IntoIterator<Item = A>,
+ {
+ let mut s = SmallStr::default();
+ s.extend(iter);
+ s
+ }
+}
+
+impl<const N: usize, A: AsRef<str>> Extend<A> for SmallStr<N> {
+ #[inline]
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = A>,
+ {
+ for a in iter.into_iter() {
+ self.push_str(a.as_ref());
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/small_str/tests.rs b/compiler/rustc_data_structures/src/small_str/tests.rs
new file mode 100644
index 000000000..7635a9b72
--- /dev/null
+++ b/compiler/rustc_data_structures/src/small_str/tests.rs
@@ -0,0 +1,20 @@
+use super::*;
+
+#[test]
+fn empty() {
+ let s = SmallStr::<1>::new();
+ assert!(s.empty());
+ assert_eq!("", s.as_str());
+ assert!(!s.spilled());
+}
+
+#[test]
+fn from_iter() {
+ let s = ["aa", "bb", "cc"].iter().collect::<SmallStr<6>>();
+ assert_eq!("aabbcc", s.as_str());
+ assert!(!s.spilled());
+
+ let s = ["aa", "bb", "cc", "dd"].iter().collect::<SmallStr<6>>();
+ assert_eq!("aabbccdd", s.as_str());
+ assert!(s.spilled());
+}
diff --git a/compiler/rustc_data_structures/src/snapshot_map/mod.rs b/compiler/rustc_data_structures/src/snapshot_map/mod.rs
new file mode 100644
index 000000000..8a50179cd
--- /dev/null
+++ b/compiler/rustc_data_structures/src/snapshot_map/mod.rs
@@ -0,0 +1,143 @@
+use crate::fx::FxHashMap;
+use crate::undo_log::{Rollback, Snapshots, UndoLogs, VecLog};
+use std::borrow::{Borrow, BorrowMut};
+use std::hash::Hash;
+use std::marker::PhantomData;
+use std::ops;
+
+pub use crate::undo_log::Snapshot;
+
+#[cfg(test)]
+mod tests;
+
+pub type SnapshotMapStorage<K, V> = SnapshotMap<K, V, FxHashMap<K, V>, ()>;
+pub type SnapshotMapRef<'a, K, V, L> = SnapshotMap<K, V, &'a mut FxHashMap<K, V>, &'a mut L>;
+
+#[derive(Clone)]
+pub struct SnapshotMap<K, V, M = FxHashMap<K, V>, L = VecLog<UndoLog<K, V>>> {
+ map: M,
+ undo_log: L,
+ _marker: PhantomData<(K, V)>,
+}
+
+// HACK(eddyb) manual impl avoids `Default` bounds on `K` and `V`.
+impl<K, V, M, L> Default for SnapshotMap<K, V, M, L>
+where
+ M: Default,
+ L: Default,
+{
+ fn default() -> Self {
+ SnapshotMap { map: Default::default(), undo_log: Default::default(), _marker: PhantomData }
+ }
+}
+
+#[derive(Clone)]
+pub enum UndoLog<K, V> {
+ Inserted(K),
+ Overwrite(K, V),
+ Purged,
+}
+
+impl<K, V, M, L> SnapshotMap<K, V, M, L> {
+ #[inline]
+ pub fn with_log<L2>(&mut self, undo_log: L2) -> SnapshotMap<K, V, &mut M, L2> {
+ SnapshotMap { map: &mut self.map, undo_log, _marker: PhantomData }
+ }
+}
+
+impl<K, V, M, L> SnapshotMap<K, V, M, L>
+where
+ K: Hash + Clone + Eq,
+ M: BorrowMut<FxHashMap<K, V>> + Borrow<FxHashMap<K, V>>,
+ L: UndoLogs<UndoLog<K, V>>,
+{
+ pub fn clear(&mut self) {
+ self.map.borrow_mut().clear();
+ self.undo_log.clear();
+ }
+
+ pub fn insert(&mut self, key: K, value: V) -> bool {
+ match self.map.borrow_mut().insert(key.clone(), value) {
+ None => {
+ self.undo_log.push(UndoLog::Inserted(key));
+ true
+ }
+ Some(old_value) => {
+ self.undo_log.push(UndoLog::Overwrite(key, old_value));
+ false
+ }
+ }
+ }
+
+ pub fn remove(&mut self, key: K) -> bool {
+ match self.map.borrow_mut().remove(&key) {
+ Some(old_value) => {
+ self.undo_log.push(UndoLog::Overwrite(key, old_value));
+ true
+ }
+ None => false,
+ }
+ }
+
+ pub fn get(&self, key: &K) -> Option<&V> {
+ self.map.borrow().get(key)
+ }
+}
+
+impl<K, V> SnapshotMap<K, V>
+where
+ K: Hash + Clone + Eq,
+{
+ pub fn snapshot(&mut self) -> Snapshot {
+ self.undo_log.start_snapshot()
+ }
+
+ pub fn commit(&mut self, snapshot: Snapshot) {
+ self.undo_log.commit(snapshot)
+ }
+
+ pub fn rollback_to(&mut self, snapshot: Snapshot) {
+ let map = &mut self.map;
+ self.undo_log.rollback_to(|| map, snapshot)
+ }
+}
+
+impl<'k, K, V, M, L> ops::Index<&'k K> for SnapshotMap<K, V, M, L>
+where
+ K: Hash + Clone + Eq,
+ M: Borrow<FxHashMap<K, V>>,
+{
+ type Output = V;
+ fn index(&self, key: &'k K) -> &V {
+ &self.map.borrow()[key]
+ }
+}
+
+impl<K, V, M, L> Rollback<UndoLog<K, V>> for SnapshotMap<K, V, M, L>
+where
+ K: Eq + Hash,
+ M: Rollback<UndoLog<K, V>>,
+{
+ fn reverse(&mut self, undo: UndoLog<K, V>) {
+ self.map.reverse(undo)
+ }
+}
+
+impl<K, V> Rollback<UndoLog<K, V>> for FxHashMap<K, V>
+where
+ K: Eq + Hash,
+{
+ fn reverse(&mut self, undo: UndoLog<K, V>) {
+ match undo {
+ UndoLog::Inserted(key) => {
+ self.remove(&key);
+ }
+
+ UndoLog::Overwrite(key, old_value) => {
+ self.insert(key, old_value);
+ }
+
+ UndoLog::Purged => {}
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/snapshot_map/tests.rs b/compiler/rustc_data_structures/src/snapshot_map/tests.rs
new file mode 100644
index 000000000..72ca53c2b
--- /dev/null
+++ b/compiler/rustc_data_structures/src/snapshot_map/tests.rs
@@ -0,0 +1,43 @@
+use super::SnapshotMap;
+
+#[test]
+fn basic() {
+ let mut map = SnapshotMap::default();
+ map.insert(22, "twenty-two");
+ let snapshot = map.snapshot();
+ map.insert(22, "thirty-three");
+ assert_eq!(map[&22], "thirty-three");
+ map.insert(44, "forty-four");
+ assert_eq!(map[&44], "forty-four");
+ assert_eq!(map.get(&33), None);
+ map.rollback_to(snapshot);
+ assert_eq!(map[&22], "twenty-two");
+ assert_eq!(map.get(&33), None);
+ assert_eq!(map.get(&44), None);
+}
+
+#[test]
+#[should_panic]
+fn out_of_order() {
+ let mut map = SnapshotMap::default();
+ map.insert(22, "twenty-two");
+ let snapshot1 = map.snapshot();
+ map.insert(33, "thirty-three");
+ let snapshot2 = map.snapshot();
+ map.insert(44, "forty-four");
+ map.rollback_to(snapshot1); // bogus, but accepted
+ map.rollback_to(snapshot2); // asserts
+}
+
+#[test]
+fn nested_commit_then_rollback() {
+ let mut map = SnapshotMap::default();
+ map.insert(22, "twenty-two");
+ let snapshot1 = map.snapshot();
+ let snapshot2 = map.snapshot();
+ map.insert(22, "thirty-three");
+ map.commit(snapshot2);
+ assert_eq!(map[&22], "thirty-three");
+ map.rollback_to(snapshot1);
+ assert_eq!(map[&22], "twenty-two");
+}
diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs
new file mode 100644
index 000000000..9efea1228
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sorted_map.rs
@@ -0,0 +1,302 @@
+use crate::stable_hasher::{HashStable, StableHasher};
+use std::borrow::Borrow;
+use std::cmp::Ordering;
+use std::iter::FromIterator;
+use std::mem;
+use std::ops::{Bound, Index, IndexMut, RangeBounds};
+
+mod index_map;
+
+pub use index_map::SortedIndexMultiMap;
+
+/// `SortedMap` is a data structure with similar characteristics as BTreeMap but
+/// slightly different trade-offs: lookup, insertion, and removal are *O*(log(*n*))
+/// and elements can be iterated in order cheaply.
+///
+/// `SortedMap` can be faster than a `BTreeMap` for small sizes (<50) since it
+/// stores data in a more compact way. It also supports accessing contiguous
+/// ranges of elements as a slice, and slices of already sorted elements can be
+/// inserted efficiently.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
+pub struct SortedMap<K, V> {
+ data: Vec<(K, V)>,
+}
+
+impl<K, V> Default for SortedMap<K, V> {
+ #[inline]
+ fn default() -> SortedMap<K, V> {
+ SortedMap { data: Vec::new() }
+ }
+}
+
+impl<K, V> SortedMap<K, V> {
+ #[inline]
+ pub const fn new() -> SortedMap<K, V> {
+ SortedMap { data: Vec::new() }
+ }
+}
+
+impl<K: Ord, V> SortedMap<K, V> {
+ /// Construct a `SortedMap` from a presorted set of elements. This is faster
+ /// than creating an empty map and then inserting the elements individually.
+ ///
+ /// It is up to the caller to make sure that the elements are sorted by key
+ /// and that there are no duplicates.
+ #[inline]
+ pub fn from_presorted_elements(elements: Vec<(K, V)>) -> SortedMap<K, V> {
+ debug_assert!(elements.array_windows().all(|[fst, snd]| fst.0 < snd.0));
+
+ SortedMap { data: elements }
+ }
+
+ #[inline]
+ pub fn insert(&mut self, key: K, mut value: V) -> Option<V> {
+ match self.lookup_index_for(&key) {
+ Ok(index) => {
+ let slot = unsafe { self.data.get_unchecked_mut(index) };
+ mem::swap(&mut slot.1, &mut value);
+ Some(value)
+ }
+ Err(index) => {
+ self.data.insert(index, (key, value));
+ None
+ }
+ }
+ }
+
+ #[inline]
+ pub fn remove(&mut self, key: &K) -> Option<V> {
+ match self.lookup_index_for(key) {
+ Ok(index) => Some(self.data.remove(index).1),
+ Err(_) => None,
+ }
+ }
+
+ #[inline]
+ pub fn get<Q>(&self, key: &Q) -> Option<&V>
+ where
+ K: Borrow<Q>,
+ Q: Ord + ?Sized,
+ {
+ match self.lookup_index_for(key) {
+ Ok(index) => unsafe { Some(&self.data.get_unchecked(index).1) },
+ Err(_) => None,
+ }
+ }
+
+ #[inline]
+ pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
+ where
+ K: Borrow<Q>,
+ Q: Ord + ?Sized,
+ {
+ match self.lookup_index_for(key) {
+ Ok(index) => unsafe { Some(&mut self.data.get_unchecked_mut(index).1) },
+ Err(_) => None,
+ }
+ }
+
+ #[inline]
+ pub fn clear(&mut self) {
+ self.data.clear();
+ }
+
+ /// Iterate over elements, sorted by key
+ #[inline]
+ pub fn iter(&self) -> std::slice::Iter<'_, (K, V)> {
+ self.data.iter()
+ }
+
+ /// Iterate over the keys, sorted
+ #[inline]
+ pub fn keys(&self) -> impl Iterator<Item = &K> + ExactSizeIterator + DoubleEndedIterator {
+ self.data.iter().map(|&(ref k, _)| k)
+ }
+
+ /// Iterate over values, sorted by key
+ #[inline]
+ pub fn values(&self) -> impl Iterator<Item = &V> + ExactSizeIterator + DoubleEndedIterator {
+ self.data.iter().map(|&(_, ref v)| v)
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ #[inline]
+ pub fn range<R>(&self, range: R) -> &[(K, V)]
+ where
+ R: RangeBounds<K>,
+ {
+ let (start, end) = self.range_slice_indices(range);
+ &self.data[start..end]
+ }
+
+ #[inline]
+ pub fn remove_range<R>(&mut self, range: R)
+ where
+ R: RangeBounds<K>,
+ {
+ let (start, end) = self.range_slice_indices(range);
+ self.data.splice(start..end, std::iter::empty());
+ }
+
+ /// Mutate all keys with the given function `f`. This mutation must not
+ /// change the sort-order of keys.
+ #[inline]
+ pub fn offset_keys<F>(&mut self, f: F)
+ where
+ F: Fn(&mut K),
+ {
+ self.data.iter_mut().map(|&mut (ref mut k, _)| k).for_each(f);
+ }
+
+ /// Inserts a presorted range of elements into the map. If the range can be
+ /// inserted as a whole in between to existing elements of the map, this
+ /// will be faster than inserting the elements individually.
+ ///
+ /// It is up to the caller to make sure that the elements are sorted by key
+ /// and that there are no duplicates.
+ #[inline]
+ pub fn insert_presorted(&mut self, mut elements: Vec<(K, V)>) {
+ if elements.is_empty() {
+ return;
+ }
+
+ debug_assert!(elements.array_windows().all(|[fst, snd]| fst.0 < snd.0));
+
+ let start_index = self.lookup_index_for(&elements[0].0);
+
+ let drain = match start_index {
+ Ok(index) => {
+ let mut drain = elements.drain(..);
+ self.data[index] = drain.next().unwrap();
+ drain
+ }
+ Err(index) => {
+ if index == self.data.len() || elements.last().unwrap().0 < self.data[index].0 {
+ // We can copy the whole range without having to mix with
+ // existing elements.
+ self.data.splice(index..index, elements.drain(..));
+ return;
+ }
+
+ let mut drain = elements.drain(..);
+ self.data.insert(index, drain.next().unwrap());
+ drain
+ }
+ };
+
+ // Insert the rest
+ for (k, v) in drain {
+ self.insert(k, v);
+ }
+ }
+
+ /// Looks up the key in `self.data` via `slice::binary_search()`.
+ #[inline(always)]
+ fn lookup_index_for<Q>(&self, key: &Q) -> Result<usize, usize>
+ where
+ K: Borrow<Q>,
+ Q: Ord + ?Sized,
+ {
+ self.data.binary_search_by(|&(ref x, _)| x.borrow().cmp(key))
+ }
+
+ #[inline]
+ fn range_slice_indices<R>(&self, range: R) -> (usize, usize)
+ where
+ R: RangeBounds<K>,
+ {
+ let start = match range.start_bound() {
+ Bound::Included(ref k) => match self.lookup_index_for(k) {
+ Ok(index) | Err(index) => index,
+ },
+ Bound::Excluded(ref k) => match self.lookup_index_for(k) {
+ Ok(index) => index + 1,
+ Err(index) => index,
+ },
+ Bound::Unbounded => 0,
+ };
+
+ let end = match range.end_bound() {
+ Bound::Included(ref k) => match self.lookup_index_for(k) {
+ Ok(index) => index + 1,
+ Err(index) => index,
+ },
+ Bound::Excluded(ref k) => match self.lookup_index_for(k) {
+ Ok(index) | Err(index) => index,
+ },
+ Bound::Unbounded => self.data.len(),
+ };
+
+ (start, end)
+ }
+
+ #[inline]
+ pub fn contains_key<Q>(&self, key: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ Q: Ord + ?Sized,
+ {
+ self.get(key).is_some()
+ }
+}
+
+impl<K: Ord, V> IntoIterator for SortedMap<K, V> {
+ type Item = (K, V);
+ type IntoIter = std::vec::IntoIter<(K, V)>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.data.into_iter()
+ }
+}
+
+impl<'a, K, Q, V> Index<&'a Q> for SortedMap<K, V>
+where
+ K: Ord + Borrow<Q>,
+ Q: Ord + ?Sized,
+{
+ type Output = V;
+
+ fn index(&self, key: &Q) -> &Self::Output {
+ self.get(key).expect("no entry found for key")
+ }
+}
+
+impl<'a, K, Q, V> IndexMut<&'a Q> for SortedMap<K, V>
+where
+ K: Ord + Borrow<Q>,
+ Q: Ord + ?Sized,
+{
+ fn index_mut(&mut self, key: &Q) -> &mut Self::Output {
+ self.get_mut(key).expect("no entry found for key")
+ }
+}
+
+impl<K: Ord, V> FromIterator<(K, V)> for SortedMap<K, V> {
+ fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
+ let mut data: Vec<(K, V)> = iter.into_iter().collect();
+
+ data.sort_unstable_by(|&(ref k1, _), &(ref k2, _)| k1.cmp(k2));
+ data.dedup_by(|&mut (ref k1, _), &mut (ref k2, _)| k1.cmp(k2) == Ordering::Equal);
+
+ SortedMap { data }
+ }
+}
+
+impl<K: HashStable<CTX>, V: HashStable<CTX>, CTX> HashStable<CTX> for SortedMap<K, V> {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.data.hash_stable(ctx, hasher);
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/sorted_map/index_map.rs b/compiler/rustc_data_structures/src/sorted_map/index_map.rs
new file mode 100644
index 000000000..0ec32dc43
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sorted_map/index_map.rs
@@ -0,0 +1,154 @@
+//! A variant of `SortedMap` that preserves insertion order.
+
+use std::hash::{Hash, Hasher};
+use std::iter::FromIterator;
+
+use crate::stable_hasher::{HashStable, StableHasher};
+use rustc_index::vec::{Idx, IndexVec};
+
+/// An indexed multi-map that preserves insertion order while permitting both *O*(log *n*) lookup of
+/// an item by key and *O*(1) lookup by index.
+///
+/// This data structure is a hybrid of an [`IndexVec`] and a [`SortedMap`]. Like `IndexVec`,
+/// `SortedIndexMultiMap` assigns a typed index to each item while preserving insertion order.
+/// Like `SortedMap`, `SortedIndexMultiMap` has efficient lookup of items by key. However, this
+/// is accomplished by sorting an array of item indices instead of the items themselves.
+///
+/// Unlike `SortedMap`, this data structure can hold multiple equivalent items at once, so the
+/// `get_by_key` method and its variants return an iterator instead of an `Option`. Equivalent
+/// items will be yielded in insertion order.
+///
+/// Unlike a general-purpose map like `BTreeSet` or `HashSet`, `SortedMap` and
+/// `SortedIndexMultiMap` require *O*(*n*) time to insert a single item. This is because we may need
+/// to insert into the middle of the sorted array. Users should avoid mutating this data structure
+/// in-place.
+///
+/// [`SortedMap`]: super::SortedMap
+#[derive(Clone, Debug)]
+pub struct SortedIndexMultiMap<I: Idx, K, V> {
+ /// The elements of the map in insertion order.
+ items: IndexVec<I, (K, V)>,
+
+ /// Indices of the items in the set, sorted by the item's key.
+ idx_sorted_by_item_key: Vec<I>,
+}
+
+impl<I: Idx, K: Ord, V> SortedIndexMultiMap<I, K, V> {
+ #[inline]
+ pub fn new() -> Self {
+ SortedIndexMultiMap { items: IndexVec::new(), idx_sorted_by_item_key: Vec::new() }
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.items.len()
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.items.is_empty()
+ }
+
+ /// Returns an iterator over the items in the map in insertion order.
+ #[inline]
+ pub fn into_iter(self) -> impl DoubleEndedIterator<Item = (K, V)> {
+ self.items.into_iter()
+ }
+
+ /// Returns an iterator over the items in the map in insertion order along with their indices.
+ #[inline]
+ pub fn into_iter_enumerated(self) -> impl DoubleEndedIterator<Item = (I, (K, V))> {
+ self.items.into_iter_enumerated()
+ }
+
+ /// Returns an iterator over the items in the map in insertion order.
+ #[inline]
+ pub fn iter(&self) -> impl '_ + DoubleEndedIterator<Item = (&K, &V)> {
+ self.items.iter().map(|(ref k, ref v)| (k, v))
+ }
+
+ /// Returns an iterator over the items in the map in insertion order along with their indices.
+ #[inline]
+ pub fn iter_enumerated(&self) -> impl '_ + DoubleEndedIterator<Item = (I, (&K, &V))> {
+ self.items.iter_enumerated().map(|(i, (ref k, ref v))| (i, (k, v)))
+ }
+
+ /// Returns the item in the map with the given index.
+ #[inline]
+ pub fn get(&self, idx: I) -> Option<&(K, V)> {
+ self.items.get(idx)
+ }
+
+ /// Returns an iterator over the items in the map that are equal to `key`.
+ ///
+ /// If there are multiple items that are equivalent to `key`, they will be yielded in
+ /// insertion order.
+ #[inline]
+ pub fn get_by_key(&self, key: K) -> impl Iterator<Item = &V> + '_ {
+ self.get_by_key_enumerated(key).map(|(_, v)| v)
+ }
+
+ /// Returns an iterator over the items in the map that are equal to `key` along with their
+ /// indices.
+ ///
+ /// If there are multiple items that are equivalent to `key`, they will be yielded in
+ /// insertion order.
+ #[inline]
+ pub fn get_by_key_enumerated(&self, key: K) -> impl Iterator<Item = (I, &V)> + '_ {
+ let lower_bound = self.idx_sorted_by_item_key.partition_point(|&i| self.items[i].0 < key);
+ self.idx_sorted_by_item_key[lower_bound..].iter().map_while(move |&i| {
+ let (k, v) = &self.items[i];
+ (k == &key).then_some((i, v))
+ })
+ }
+}
+
+impl<I: Idx, K: Eq, V: Eq> Eq for SortedIndexMultiMap<I, K, V> {}
+impl<I: Idx, K: PartialEq, V: PartialEq> PartialEq for SortedIndexMultiMap<I, K, V> {
+ fn eq(&self, other: &Self) -> bool {
+ // No need to compare the sorted index. If the items are the same, the index will be too.
+ self.items == other.items
+ }
+}
+
+impl<I: Idx, K, V> Hash for SortedIndexMultiMap<I, K, V>
+where
+ K: Hash,
+ V: Hash,
+{
+ fn hash<H: Hasher>(&self, hasher: &mut H) {
+ self.items.hash(hasher)
+ }
+}
+impl<I: Idx, K, V, C> HashStable<C> for SortedIndexMultiMap<I, K, V>
+where
+ K: HashStable<C>,
+ V: HashStable<C>,
+{
+ fn hash_stable(&self, ctx: &mut C, hasher: &mut StableHasher) {
+ self.items.hash_stable(ctx, hasher)
+ }
+}
+
+impl<I: Idx, K: Ord, V> FromIterator<(K, V)> for SortedIndexMultiMap<I, K, V> {
+ fn from_iter<J>(iter: J) -> Self
+ where
+ J: IntoIterator<Item = (K, V)>,
+ {
+ let items = IndexVec::from_iter(iter);
+ let mut idx_sorted_by_item_key: Vec<_> = items.indices().collect();
+
+ // `sort_by_key` is stable, so insertion order is preserved for duplicate items.
+ idx_sorted_by_item_key.sort_by_key(|&idx| &items[idx].0);
+
+ SortedIndexMultiMap { items, idx_sorted_by_item_key }
+ }
+}
+
+impl<I: Idx, K, V> std::ops::Index<I> for SortedIndexMultiMap<I, K, V> {
+ type Output = V;
+
+ fn index(&self, idx: I) -> &Self::Output {
+ &self.items[idx].1
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sorted_map/tests.rs b/compiler/rustc_data_structures/src/sorted_map/tests.rs
new file mode 100644
index 000000000..1e977d709
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sorted_map/tests.rs
@@ -0,0 +1,222 @@
+use super::{SortedIndexMultiMap, SortedMap};
+
+#[test]
+fn test_sorted_index_multi_map() {
+ let entries: Vec<_> = vec![(2, 0), (1, 0), (2, 1), (3, 0), (2, 2)];
+ let set: SortedIndexMultiMap<usize, _, _> = entries.iter().copied().collect();
+
+ // Insertion order is preserved.
+ assert!(entries.iter().map(|(ref k, ref v)| (k, v)).eq(set.iter()));
+
+ // Indexing
+ for (i, expect) in entries.iter().enumerate() {
+ assert_eq!(set[i], expect.1);
+ }
+
+ // `get_by_key` works.
+ assert_eq!(set.get_by_key(3).copied().collect::<Vec<_>>(), vec![0]);
+ assert!(set.get_by_key(4).next().is_none());
+
+ // `get_by_key` returns items in insertion order.
+ let twos: Vec<_> = set.get_by_key_enumerated(2).collect();
+ let idxs: Vec<usize> = twos.iter().map(|(i, _)| *i).collect();
+ let values: Vec<usize> = twos.iter().map(|(_, &v)| v).collect();
+
+ assert_eq!(idxs, vec![0, 2, 4]);
+ assert_eq!(values, vec![0, 1, 2]);
+}
+
+#[test]
+fn test_insert_and_iter() {
+ let mut map = SortedMap::new();
+ let mut expected = Vec::new();
+
+ for x in 0..100 {
+ assert_eq!(map.iter().cloned().collect::<Vec<_>>(), expected);
+
+ let x = 1000 - x * 2;
+ map.insert(x, x);
+ expected.insert(0, (x, x));
+ }
+}
+
+#[test]
+fn test_get_and_index() {
+ let mut map = SortedMap::new();
+ let mut expected = Vec::new();
+
+ for x in 0..100 {
+ let x = 1000 - x;
+ if x & 1 == 0 {
+ map.insert(x, x);
+ }
+ expected.push(x);
+ }
+
+ for mut x in expected {
+ if x & 1 == 0 {
+ assert_eq!(map.get(&x), Some(&x));
+ assert_eq!(map.get_mut(&x), Some(&mut x));
+ assert_eq!(map[&x], x);
+ assert_eq!(&mut map[&x], &mut x);
+ } else {
+ assert_eq!(map.get(&x), None);
+ assert_eq!(map.get_mut(&x), None);
+ }
+ }
+}
+
+#[test]
+fn test_range() {
+ let mut map = SortedMap::new();
+ map.insert(1, 1);
+ map.insert(3, 3);
+ map.insert(6, 6);
+ map.insert(9, 9);
+
+ let keys = |s: &[(_, _)]| s.into_iter().map(|e| e.0).collect::<Vec<u32>>();
+
+ for start in 0..11 {
+ for end in 0..11 {
+ if end < start {
+ continue;
+ }
+
+ let mut expected = vec![1, 3, 6, 9];
+ expected.retain(|&x| x >= start && x < end);
+
+ assert_eq!(keys(map.range(start..end)), expected, "range = {}..{}", start, end);
+ }
+ }
+}
+
+#[test]
+fn test_offset_keys() {
+ let mut map = SortedMap::new();
+ map.insert(1, 1);
+ map.insert(3, 3);
+ map.insert(6, 6);
+
+ map.offset_keys(|k| *k += 1);
+
+ let mut expected = SortedMap::new();
+ expected.insert(2, 1);
+ expected.insert(4, 3);
+ expected.insert(7, 6);
+
+ assert_eq!(map, expected);
+}
+
+fn keys(s: SortedMap<u32, u32>) -> Vec<u32> {
+ s.into_iter().map(|(k, _)| k).collect::<Vec<u32>>()
+}
+
+fn elements(s: SortedMap<u32, u32>) -> Vec<(u32, u32)> {
+ s.into_iter().collect::<Vec<(u32, u32)>>()
+}
+
+#[test]
+fn test_remove_range() {
+ let mut map = SortedMap::new();
+ map.insert(1, 1);
+ map.insert(3, 3);
+ map.insert(6, 6);
+ map.insert(9, 9);
+
+ for start in 0..11 {
+ for end in 0..11 {
+ if end < start {
+ continue;
+ }
+
+ let mut expected = vec![1, 3, 6, 9];
+ expected.retain(|&x| x < start || x >= end);
+
+ let mut map = map.clone();
+ map.remove_range(start..end);
+
+ assert_eq!(keys(map), expected, "range = {}..{}", start, end);
+ }
+ }
+}
+
+#[test]
+fn test_remove() {
+ let mut map = SortedMap::new();
+ let mut expected = Vec::new();
+
+ for x in 0..10 {
+ map.insert(x, x);
+ expected.push((x, x));
+ }
+
+ for x in 0..10 {
+ let mut map = map.clone();
+ let mut expected = expected.clone();
+
+ assert_eq!(map.remove(&x), Some(x));
+ expected.remove(x as usize);
+
+ assert_eq!(map.iter().cloned().collect::<Vec<_>>(), expected);
+ }
+}
+
+#[test]
+fn test_insert_presorted_non_overlapping() {
+ let mut map = SortedMap::new();
+ map.insert(2, 0);
+ map.insert(8, 0);
+
+ map.insert_presorted(vec![(3, 0), (7, 0)]);
+
+ let expected = vec![2, 3, 7, 8];
+ assert_eq!(keys(map), expected);
+}
+
+#[test]
+fn test_insert_presorted_first_elem_equal() {
+ let mut map = SortedMap::new();
+ map.insert(2, 2);
+ map.insert(8, 8);
+
+ map.insert_presorted(vec![(2, 0), (7, 7)]);
+
+ let expected = vec![(2, 0), (7, 7), (8, 8)];
+ assert_eq!(elements(map), expected);
+}
+
+#[test]
+fn test_insert_presorted_last_elem_equal() {
+ let mut map = SortedMap::new();
+ map.insert(2, 2);
+ map.insert(8, 8);
+
+ map.insert_presorted(vec![(3, 3), (8, 0)]);
+
+ let expected = vec![(2, 2), (3, 3), (8, 0)];
+ assert_eq!(elements(map), expected);
+}
+
+#[test]
+fn test_insert_presorted_shuffle() {
+ let mut map = SortedMap::new();
+ map.insert(2, 2);
+ map.insert(7, 7);
+
+ map.insert_presorted(vec![(1, 1), (3, 3), (8, 8)]);
+
+ let expected = vec![(1, 1), (2, 2), (3, 3), (7, 7), (8, 8)];
+ assert_eq!(elements(map), expected);
+}
+
+#[test]
+fn test_insert_presorted_at_end() {
+ let mut map = SortedMap::new();
+ map.insert(1, 1);
+ map.insert(2, 2);
+
+ map.insert_presorted(vec![(3, 3), (8, 8)]);
+
+ let expected = vec![(1, 1), (2, 2), (3, 3), (8, 8)];
+ assert_eq!(elements(map), expected);
+}
diff --git a/compiler/rustc_data_structures/src/sso/either_iter.rs b/compiler/rustc_data_structures/src/sso/either_iter.rs
new file mode 100644
index 000000000..131eeef45
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sso/either_iter.rs
@@ -0,0 +1,75 @@
+use std::fmt;
+use std::iter::ExactSizeIterator;
+use std::iter::FusedIterator;
+use std::iter::Iterator;
+
+/// Iterator which may contain instance of
+/// one of two specific implementations.
+///
+/// Note: For most methods providing custom
+/// implementation may marginally
+/// improve performance by avoiding
+/// doing Left/Right match on every step
+/// and doing it only once instead.
+#[derive(Clone)]
+pub enum EitherIter<L, R> {
+ Left(L),
+ Right(R),
+}
+
+impl<L, R> Iterator for EitherIter<L, R>
+where
+ L: Iterator,
+ R: Iterator<Item = L::Item>,
+{
+ type Item = L::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match self {
+ EitherIter::Left(l) => l.next(),
+ EitherIter::Right(r) => r.next(),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match self {
+ EitherIter::Left(l) => l.size_hint(),
+ EitherIter::Right(r) => r.size_hint(),
+ }
+ }
+}
+
+impl<L, R> ExactSizeIterator for EitherIter<L, R>
+where
+ L: ExactSizeIterator,
+ R: ExactSizeIterator,
+ EitherIter<L, R>: Iterator,
+{
+ fn len(&self) -> usize {
+ match self {
+ EitherIter::Left(l) => l.len(),
+ EitherIter::Right(r) => r.len(),
+ }
+ }
+}
+
+impl<L, R> FusedIterator for EitherIter<L, R>
+where
+ L: FusedIterator,
+ R: FusedIterator,
+ EitherIter<L, R>: Iterator,
+{
+}
+
+impl<L, R> fmt::Debug for EitherIter<L, R>
+where
+ L: fmt::Debug,
+ R: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ EitherIter::Left(l) => l.fmt(f),
+ EitherIter::Right(r) => r.fmt(f),
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sso/map.rs b/compiler/rustc_data_structures/src/sso/map.rs
new file mode 100644
index 000000000..ec6a62016
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sso/map.rs
@@ -0,0 +1,557 @@
+use super::either_iter::EitherIter;
+use crate::fx::FxHashMap;
+use arrayvec::ArrayVec;
+use std::fmt;
+use std::hash::Hash;
+use std::iter::FromIterator;
+use std::ops::Index;
+
+// For pointer-sized arguments arrays
+// are faster than set/map for up to 64
+// arguments.
+//
+// On the other hand such a big array
+// hurts cache performance, makes passing
+// sso structures around very expensive.
+//
+// Biggest performance benefit is gained
+// for reasonably small arrays that stay
+// small in vast majority of cases.
+//
+// '8' is chosen as a sane default, to be
+// reevaluated later.
+const SSO_ARRAY_SIZE: usize = 8;
+
+/// Small-storage-optimized implementation of a map.
+///
+/// Stores elements in a small array up to a certain length
+/// and switches to `HashMap` when that length is exceeded.
+//
+// FIXME: Implements subset of HashMap API.
+//
+// Missing HashMap API:
+// all hasher-related
+// try_reserve
+// shrink_to (unstable)
+// drain_filter (unstable)
+// into_keys/into_values (unstable)
+// all raw_entry-related
+// PartialEq/Eq (requires sorting the array)
+// Entry::or_insert_with_key
+// Vacant/Occupied entries and related
+//
+// FIXME: In HashMap most methods accepting key reference
+// accept reference to generic `Q` where `K: Borrow<Q>`.
+//
+// However, using this approach in `HashMap::get` apparently
+// breaks inlining and noticeably reduces performance.
+//
+// Performance *should* be the same given that borrow is
+// a NOP in most cases, but in practice that's not the case.
+//
+// Further investigation is required.
+//
+// Affected methods:
+// SsoHashMap::get
+// SsoHashMap::get_mut
+// SsoHashMap::get_entry
+// SsoHashMap::get_key_value
+// SsoHashMap::contains_key
+// SsoHashMap::remove
+// SsoHashMap::remove_entry
+// Index::index
+// SsoHashSet::take
+// SsoHashSet::get
+// SsoHashSet::remove
+// SsoHashSet::contains
+
+#[derive(Clone)]
+pub enum SsoHashMap<K, V> {
+ Array(ArrayVec<(K, V), SSO_ARRAY_SIZE>),
+ Map(FxHashMap<K, V>),
+}
+
+impl<K, V> SsoHashMap<K, V> {
+ /// Creates an empty `SsoHashMap`.
+ #[inline]
+ pub fn new() -> Self {
+ SsoHashMap::Array(ArrayVec::new())
+ }
+
+ /// Creates an empty `SsoHashMap` with the specified capacity.
+ pub fn with_capacity(cap: usize) -> Self {
+ if cap <= SSO_ARRAY_SIZE {
+ Self::new()
+ } else {
+ SsoHashMap::Map(FxHashMap::with_capacity_and_hasher(cap, Default::default()))
+ }
+ }
+
+ /// Clears the map, removing all key-value pairs. Keeps the allocated memory
+ /// for reuse.
+ pub fn clear(&mut self) {
+ match self {
+ SsoHashMap::Array(array) => array.clear(),
+ SsoHashMap::Map(map) => map.clear(),
+ }
+ }
+
+ /// Returns the number of elements the map can hold without reallocating.
+ pub fn capacity(&self) -> usize {
+ match self {
+ SsoHashMap::Array(_) => SSO_ARRAY_SIZE,
+ SsoHashMap::Map(map) => map.capacity(),
+ }
+ }
+
+ /// Returns the number of elements in the map.
+ pub fn len(&self) -> usize {
+ match self {
+ SsoHashMap::Array(array) => array.len(),
+ SsoHashMap::Map(map) => map.len(),
+ }
+ }
+
+ /// Returns `true` if the map contains no elements.
+ pub fn is_empty(&self) -> bool {
+ match self {
+ SsoHashMap::Array(array) => array.is_empty(),
+ SsoHashMap::Map(map) => map.is_empty(),
+ }
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order.
+ /// The iterator element type is `(&'a K, &'a V)`.
+ #[inline]
+ pub fn iter(&self) -> <&Self as IntoIterator>::IntoIter {
+ self.into_iter()
+ }
+
+ /// An iterator visiting all key-value pairs in arbitrary order,
+ /// with mutable references to the values.
+ /// The iterator element type is `(&'a K, &'a mut V)`.
+ #[inline]
+ pub fn iter_mut(&mut self) -> impl Iterator<Item = (&'_ K, &'_ mut V)> {
+ self.into_iter()
+ }
+
+ /// An iterator visiting all keys in arbitrary order.
+ /// The iterator element type is `&'a K`.
+ pub fn keys(&self) -> impl Iterator<Item = &'_ K> {
+ match self {
+ SsoHashMap::Array(array) => EitherIter::Left(array.iter().map(|(k, _v)| k)),
+ SsoHashMap::Map(map) => EitherIter::Right(map.keys()),
+ }
+ }
+
+ /// An iterator visiting all values in arbitrary order.
+ /// The iterator element type is `&'a V`.
+ pub fn values(&self) -> impl Iterator<Item = &'_ V> {
+ match self {
+ SsoHashMap::Array(array) => EitherIter::Left(array.iter().map(|(_k, v)| v)),
+ SsoHashMap::Map(map) => EitherIter::Right(map.values()),
+ }
+ }
+
+ /// An iterator visiting all values mutably in arbitrary order.
+ /// The iterator element type is `&'a mut V`.
+ pub fn values_mut(&mut self) -> impl Iterator<Item = &'_ mut V> {
+ match self {
+ SsoHashMap::Array(array) => EitherIter::Left(array.iter_mut().map(|(_k, v)| v)),
+ SsoHashMap::Map(map) => EitherIter::Right(map.values_mut()),
+ }
+ }
+
+ /// Clears the map, returning all key-value pairs as an iterator. Keeps the
+ /// allocated memory for reuse.
+ pub fn drain(&mut self) -> impl Iterator<Item = (K, V)> + '_ {
+ match self {
+ SsoHashMap::Array(array) => EitherIter::Left(array.drain(..)),
+ SsoHashMap::Map(map) => EitherIter::Right(map.drain()),
+ }
+ }
+}
+
+impl<K: Eq + Hash, V> SsoHashMap<K, V> {
+ /// Changes underlying storage from array to hashmap
+ /// if array is full.
+ fn migrate_if_full(&mut self) {
+ if let SsoHashMap::Array(array) = self {
+ if array.is_full() {
+ *self = SsoHashMap::Map(array.drain(..).collect());
+ }
+ }
+ }
+
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the `SsoHashMap`. The collection may reserve more space to avoid
+ /// frequent reallocations.
+ pub fn reserve(&mut self, additional: usize) {
+ match self {
+ SsoHashMap::Array(array) => {
+ if SSO_ARRAY_SIZE < (array.len() + additional) {
+ let mut map: FxHashMap<K, V> = array.drain(..).collect();
+ map.reserve(additional);
+ *self = SsoHashMap::Map(map);
+ }
+ }
+ SsoHashMap::Map(map) => map.reserve(additional),
+ }
+ }
+
+ /// Shrinks the capacity of the map as much as possible. It will drop
+ /// down as much as possible while maintaining the internal rules
+ /// and possibly leaving some space in accordance with the resize policy.
+ pub fn shrink_to_fit(&mut self) {
+ if let SsoHashMap::Map(map) = self {
+ if map.len() <= SSO_ARRAY_SIZE {
+ *self = SsoHashMap::Array(map.drain().collect());
+ } else {
+ map.shrink_to_fit();
+ }
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ match self {
+ SsoHashMap::Array(array) => array.retain(|(k, v)| f(k, v)),
+ SsoHashMap::Map(map) => map.retain(f),
+ }
+ }
+
+ /// Inserts a key-value pair into the map.
+ ///
+ /// If the map did not have this key present, [`None`] is returned.
+ ///
+ /// If the map did have this key present, the value is updated, and the old
+ /// value is returned. The key is not updated, though; this matters for
+ /// types that can be `==` without being identical. See the [module-level
+ /// documentation] for more.
+ pub fn insert(&mut self, key: K, value: V) -> Option<V> {
+ match self {
+ SsoHashMap::Array(array) => {
+ for (k, v) in array.iter_mut() {
+ if *k == key {
+ let old_value = std::mem::replace(v, value);
+ return Some(old_value);
+ }
+ }
+ if let Err(error) = array.try_push((key, value)) {
+ let mut map: FxHashMap<K, V> = array.drain(..).collect();
+ let (key, value) = error.element();
+ map.insert(key, value);
+ *self = SsoHashMap::Map(map);
+ }
+ None
+ }
+ SsoHashMap::Map(map) => map.insert(key, value),
+ }
+ }
+
+ /// Removes a key from the map, returning the value at the key if the key
+ /// was previously in the map.
+ pub fn remove(&mut self, key: &K) -> Option<V> {
+ match self {
+ SsoHashMap::Array(array) => {
+ if let Some(index) = array.iter().position(|(k, _v)| k == key) {
+ Some(array.swap_remove(index).1)
+ } else {
+ None
+ }
+ }
+ SsoHashMap::Map(map) => map.remove(key),
+ }
+ }
+
+ /// Removes a key from the map, returning the stored key and value if the
+ /// key was previously in the map.
+ pub fn remove_entry(&mut self, key: &K) -> Option<(K, V)> {
+ match self {
+ SsoHashMap::Array(array) => {
+ if let Some(index) = array.iter().position(|(k, _v)| k == key) {
+ Some(array.swap_remove(index))
+ } else {
+ None
+ }
+ }
+ SsoHashMap::Map(map) => map.remove_entry(key),
+ }
+ }
+
+ /// Returns a reference to the value corresponding to the key.
+ pub fn get(&self, key: &K) -> Option<&V> {
+ match self {
+ SsoHashMap::Array(array) => {
+ for (k, v) in array {
+ if k == key {
+ return Some(v);
+ }
+ }
+ None
+ }
+ SsoHashMap::Map(map) => map.get(key),
+ }
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
+ match self {
+ SsoHashMap::Array(array) => {
+ for (k, v) in array {
+ if k == key {
+ return Some(v);
+ }
+ }
+ None
+ }
+ SsoHashMap::Map(map) => map.get_mut(key),
+ }
+ }
+
+ /// Returns the key-value pair corresponding to the supplied key.
+ pub fn get_key_value(&self, key: &K) -> Option<(&K, &V)> {
+ match self {
+ SsoHashMap::Array(array) => {
+ for (k, v) in array {
+ if k == key {
+ return Some((k, v));
+ }
+ }
+ None
+ }
+ SsoHashMap::Map(map) => map.get_key_value(key),
+ }
+ }
+
+ /// Returns `true` if the map contains a value for the specified key.
+ pub fn contains_key(&self, key: &K) -> bool {
+ match self {
+ SsoHashMap::Array(array) => array.iter().any(|(k, _v)| k == key),
+ SsoHashMap::Map(map) => map.contains_key(key),
+ }
+ }
+
+ /// Gets the given key's corresponding entry in the map for in-place manipulation.
+ #[inline]
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+ Entry { ssomap: self, key }
+ }
+}
+
+impl<K, V> Default for SsoHashMap<K, V> {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<K: Eq + Hash, V> FromIterator<(K, V)> for SsoHashMap<K, V> {
+ fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I) -> SsoHashMap<K, V> {
+ let mut map: SsoHashMap<K, V> = Default::default();
+ map.extend(iter);
+ map
+ }
+}
+
+impl<K: Eq + Hash, V> Extend<(K, V)> for SsoHashMap<K, V> {
+ fn extend<I>(&mut self, iter: I)
+ where
+ I: IntoIterator<Item = (K, V)>,
+ {
+ for (key, value) in iter.into_iter() {
+ self.insert(key, value);
+ }
+ }
+
+ #[inline]
+ fn extend_one(&mut self, (k, v): (K, V)) {
+ self.insert(k, v);
+ }
+
+ fn extend_reserve(&mut self, additional: usize) {
+ match self {
+ SsoHashMap::Array(array) => {
+ if SSO_ARRAY_SIZE < (array.len() + additional) {
+ let mut map: FxHashMap<K, V> = array.drain(..).collect();
+ map.extend_reserve(additional);
+ *self = SsoHashMap::Map(map);
+ }
+ }
+ SsoHashMap::Map(map) => map.extend_reserve(additional),
+ }
+ }
+}
+
+impl<'a, K, V> Extend<(&'a K, &'a V)> for SsoHashMap<K, V>
+where
+ K: Eq + Hash + Copy,
+ V: Copy,
+{
+ fn extend<T: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: T) {
+ self.extend(iter.into_iter().map(|(k, v)| (*k, *v)))
+ }
+
+ #[inline]
+ fn extend_one(&mut self, (&k, &v): (&'a K, &'a V)) {
+ self.insert(k, v);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ Extend::<(K, V)>::extend_reserve(self, additional)
+ }
+}
+
+impl<K, V> IntoIterator for SsoHashMap<K, V> {
+ type IntoIter = EitherIter<
+ <ArrayVec<(K, V), 8> as IntoIterator>::IntoIter,
+ <FxHashMap<K, V> as IntoIterator>::IntoIter,
+ >;
+ type Item = <Self::IntoIter as Iterator>::Item;
+
+ fn into_iter(self) -> Self::IntoIter {
+ match self {
+ SsoHashMap::Array(array) => EitherIter::Left(array.into_iter()),
+ SsoHashMap::Map(map) => EitherIter::Right(map.into_iter()),
+ }
+ }
+}
+
+/// adapts Item of array reference iterator to Item of hashmap reference iterator.
+#[inline(always)]
+fn adapt_array_ref_it<K, V>(pair: &(K, V)) -> (&K, &V) {
+ let (a, b) = pair;
+ (a, b)
+}
+
+/// adapts Item of array mut reference iterator to Item of hashmap mut reference iterator.
+#[inline(always)]
+fn adapt_array_mut_it<K, V>(pair: &mut (K, V)) -> (&K, &mut V) {
+ let (a, b) = pair;
+ (a, b)
+}
+
+impl<'a, K, V> IntoIterator for &'a SsoHashMap<K, V> {
+ type IntoIter = EitherIter<
+ std::iter::Map<
+ <&'a ArrayVec<(K, V), 8> as IntoIterator>::IntoIter,
+ fn(&'a (K, V)) -> (&'a K, &'a V),
+ >,
+ <&'a FxHashMap<K, V> as IntoIterator>::IntoIter,
+ >;
+ type Item = <Self::IntoIter as Iterator>::Item;
+
+ fn into_iter(self) -> Self::IntoIter {
+ match self {
+ SsoHashMap::Array(array) => EitherIter::Left(array.into_iter().map(adapt_array_ref_it)),
+ SsoHashMap::Map(map) => EitherIter::Right(map.iter()),
+ }
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut SsoHashMap<K, V> {
+ type IntoIter = EitherIter<
+ std::iter::Map<
+ <&'a mut ArrayVec<(K, V), 8> as IntoIterator>::IntoIter,
+ fn(&'a mut (K, V)) -> (&'a K, &'a mut V),
+ >,
+ <&'a mut FxHashMap<K, V> as IntoIterator>::IntoIter,
+ >;
+ type Item = <Self::IntoIter as Iterator>::Item;
+
+ fn into_iter(self) -> Self::IntoIter {
+ match self {
+ SsoHashMap::Array(array) => EitherIter::Left(array.into_iter().map(adapt_array_mut_it)),
+ SsoHashMap::Map(map) => EitherIter::Right(map.iter_mut()),
+ }
+ }
+}
+
+impl<K, V> fmt::Debug for SsoHashMap<K, V>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_map().entries(self.iter()).finish()
+ }
+}
+
+impl<'a, K, V> Index<&'a K> for SsoHashMap<K, V>
+where
+ K: Eq + Hash,
+{
+ type Output = V;
+
+ #[inline]
+ fn index(&self, key: &K) -> &V {
+ self.get(key).expect("no entry found for key")
+ }
+}
+
+/// A view into a single entry in a map.
+pub struct Entry<'a, K, V> {
+ ssomap: &'a mut SsoHashMap<K, V>,
+ key: K,
+}
+
+impl<'a, K: Eq + Hash, V> Entry<'a, K, V> {
+ /// Provides in-place mutable access to an occupied entry before any
+ /// potential inserts into the map.
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ if let Some(value) = self.ssomap.get_mut(&self.key) {
+ f(value);
+ }
+ self
+ }
+
+ /// Ensures a value is in the entry by inserting the default if empty, and returns
+ /// a mutable reference to the value in the entry.
+ #[inline]
+ pub fn or_insert(self, value: V) -> &'a mut V {
+ self.or_insert_with(|| value)
+ }
+
+ /// Ensures a value is in the entry by inserting the result of the default function if empty,
+ /// and returns a mutable reference to the value in the entry.
+ pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
+ self.ssomap.migrate_if_full();
+ match self.ssomap {
+ SsoHashMap::Array(array) => {
+ let key_ref = &self.key;
+ let found_index = array.iter().position(|(k, _v)| k == key_ref);
+ let index = if let Some(index) = found_index {
+ index
+ } else {
+ let index = array.len();
+ array.try_push((self.key, default())).unwrap();
+ index
+ };
+ &mut array[index].1
+ }
+ SsoHashMap::Map(map) => map.entry(self.key).or_insert_with(default),
+ }
+ }
+
+ /// Returns a reference to this entry's key.
+ #[inline]
+ pub fn key(&self) -> &K {
+ &self.key
+ }
+}
+
+impl<'a, K: Eq + Hash, V: Default> Entry<'a, K, V> {
+ /// Ensures a value is in the entry by inserting the default value if empty,
+ /// and returns a mutable reference to the value in the entry.
+ #[inline]
+ pub fn or_default(self) -> &'a mut V {
+ self.or_insert_with(Default::default)
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sso/mod.rs b/compiler/rustc_data_structures/src/sso/mod.rs
new file mode 100644
index 000000000..dd21bc8e6
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sso/mod.rs
@@ -0,0 +1,6 @@
+mod either_iter;
+mod map;
+mod set;
+
+pub use map::SsoHashMap;
+pub use set::SsoHashSet;
diff --git a/compiler/rustc_data_structures/src/sso/set.rs b/compiler/rustc_data_structures/src/sso/set.rs
new file mode 100644
index 000000000..4fda3adb7
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sso/set.rs
@@ -0,0 +1,238 @@
+use std::fmt;
+use std::hash::Hash;
+use std::iter::FromIterator;
+
+use super::map::SsoHashMap;
+
+/// Small-storage-optimized implementation of a set.
+///
+/// Stores elements in a small array up to a certain length
+/// and switches to `HashSet` when that length is exceeded.
+//
+// FIXME: Implements subset of HashSet API.
+//
+// Missing HashSet API:
+// all hasher-related
+// try_reserve
+// shrink_to (unstable)
+// drain_filter (unstable)
+// replace
+// get_or_insert/get_or_insert_owned/get_or_insert_with (unstable)
+// difference/symmetric_difference/intersection/union
+// is_disjoint/is_subset/is_superset
+// PartialEq/Eq (requires SsoHashMap implementation)
+// BitOr/BitAnd/BitXor/Sub
+#[derive(Clone)]
+pub struct SsoHashSet<T> {
+ map: SsoHashMap<T, ()>,
+}
+
+/// Adapter function used ot return
+/// result if SsoHashMap functions into
+/// result SsoHashSet should return.
+#[inline(always)]
+fn entry_to_key<K, V>((k, _v): (K, V)) -> K {
+ k
+}
+
+impl<T> SsoHashSet<T> {
+ /// Creates an empty `SsoHashSet`.
+ #[inline]
+ pub fn new() -> Self {
+ Self { map: SsoHashMap::new() }
+ }
+
+ /// Creates an empty `SsoHashSet` with the specified capacity.
+ #[inline]
+ pub fn with_capacity(cap: usize) -> Self {
+ Self { map: SsoHashMap::with_capacity(cap) }
+ }
+
+ /// Clears the set, removing all values.
+ #[inline]
+ pub fn clear(&mut self) {
+ self.map.clear()
+ }
+
+ /// Returns the number of elements the set can hold without reallocating.
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.map.capacity()
+ }
+
+ /// Returns the number of elements in the set.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Returns `true` if the set contains no elements.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.map.is_empty()
+ }
+
+ /// An iterator visiting all elements in arbitrary order.
+ /// The iterator element type is `&'a T`.
+ #[inline]
+ pub fn iter(&self) -> impl Iterator<Item = &T> {
+ self.into_iter()
+ }
+
+ /// Clears the set, returning all elements in an iterator.
+ #[inline]
+ pub fn drain(&mut self) -> impl Iterator<Item = T> + '_ {
+ self.map.drain().map(entry_to_key)
+ }
+}
+
+impl<T: Eq + Hash> SsoHashSet<T> {
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the `SsoHashSet`. The collection may reserve more space to avoid
+ /// frequent reallocations.
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ self.map.reserve(additional)
+ }
+
+ /// Shrinks the capacity of the set as much as possible. It will drop
+ /// down as much as possible while maintaining the internal rules
+ /// and possibly leaving some space in accordance with the resize policy.
+ #[inline]
+ pub fn shrink_to_fit(&mut self) {
+ self.map.shrink_to_fit()
+ }
+
+ /// Retains only the elements specified by the predicate.
+ #[inline]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.map.retain(|k, _v| f(k))
+ }
+
+ /// Removes and returns the value in the set, if any, that is equal to the given one.
+ #[inline]
+ pub fn take(&mut self, value: &T) -> Option<T> {
+ self.map.remove_entry(value).map(entry_to_key)
+ }
+
+ /// Returns a reference to the value in the set, if any, that is equal to the given value.
+ #[inline]
+ pub fn get(&self, value: &T) -> Option<&T> {
+ self.map.get_key_value(value).map(entry_to_key)
+ }
+
+ /// Adds a value to the set.
+ ///
+ /// Returns whether the value was newly inserted. That is:
+ ///
+ /// - If the set did not previously contain this value, `true` is returned.
+ /// - If the set already contained this value, `false` is returned.
+ #[inline]
+ pub fn insert(&mut self, elem: T) -> bool {
+ self.map.insert(elem, ()).is_none()
+ }
+
+ /// Removes a value from the set. Returns whether the value was
+ /// present in the set.
+ #[inline]
+ pub fn remove(&mut self, value: &T) -> bool {
+ self.map.remove(value).is_some()
+ }
+
+ /// Returns `true` if the set contains a value.
+ #[inline]
+ pub fn contains(&self, value: &T) -> bool {
+ self.map.contains_key(value)
+ }
+}
+
+impl<T: Eq + Hash> FromIterator<T> for SsoHashSet<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> SsoHashSet<T> {
+ let mut set: SsoHashSet<T> = Default::default();
+ set.extend(iter);
+ set
+ }
+}
+
+impl<T> Default for SsoHashSet<T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T: Eq + Hash> Extend<T> for SsoHashSet<T> {
+ fn extend<I>(&mut self, iter: I)
+ where
+ I: IntoIterator<Item = T>,
+ {
+ for val in iter.into_iter() {
+ self.insert(val);
+ }
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.insert(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.map.extend_reserve(additional)
+ }
+}
+
+impl<'a, T> Extend<&'a T> for SsoHashSet<T>
+where
+ T: 'a + Eq + Hash + Copy,
+{
+ #[inline]
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.insert(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ Extend::<T>::extend_reserve(self, additional)
+ }
+}
+
+impl<T> IntoIterator for SsoHashSet<T> {
+ type IntoIter = std::iter::Map<<SsoHashMap<T, ()> as IntoIterator>::IntoIter, fn((T, ())) -> T>;
+ type Item = <Self::IntoIter as Iterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.map.into_iter().map(entry_to_key)
+ }
+}
+
+impl<'a, T> IntoIterator for &'a SsoHashSet<T> {
+ type IntoIter = std::iter::Map<
+ <&'a SsoHashMap<T, ()> as IntoIterator>::IntoIter,
+ fn((&'a T, &'a ())) -> &'a T,
+ >;
+ type Item = <Self::IntoIter as Iterator>::Item;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.map.iter().map(entry_to_key)
+ }
+}
+
+impl<T> fmt::Debug for SsoHashSet<T>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_set().entries(self.iter()).finish()
+ }
+}
diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs
new file mode 100644
index 000000000..ce8591734
--- /dev/null
+++ b/compiler/rustc_data_structures/src/stable_hasher.rs
@@ -0,0 +1,650 @@
+use crate::sip128::SipHasher128;
+use rustc_index::bit_set;
+use rustc_index::vec;
+use smallvec::SmallVec;
+use std::hash::{BuildHasher, Hash, Hasher};
+use std::marker::PhantomData;
+use std::mem;
+
+#[cfg(test)]
+mod tests;
+
+/// When hashing something that ends up affecting properties like symbol names,
+/// we want these symbol names to be calculated independently of other factors
+/// like what architecture you're compiling *from*.
+///
+/// To that end we always convert integers to little-endian format before
+/// hashing and the architecture dependent `isize` and `usize` types are
+/// extended to 64 bits if needed.
+pub struct StableHasher {
+ state: SipHasher128,
+}
+
+impl ::std::fmt::Debug for StableHasher {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{:?}", self.state)
+ }
+}
+
+pub trait StableHasherResult: Sized {
+ fn finish(hasher: StableHasher) -> Self;
+}
+
+impl StableHasher {
+ #[inline]
+ pub fn new() -> Self {
+ StableHasher { state: SipHasher128::new_with_keys(0, 0) }
+ }
+
+ #[inline]
+ pub fn finish<W: StableHasherResult>(self) -> W {
+ W::finish(self)
+ }
+}
+
+impl StableHasherResult for u128 {
+ #[inline]
+ fn finish(hasher: StableHasher) -> Self {
+ let (_0, _1) = hasher.finalize();
+ u128::from(_0) | (u128::from(_1) << 64)
+ }
+}
+
+impl StableHasherResult for u64 {
+ #[inline]
+ fn finish(hasher: StableHasher) -> Self {
+ hasher.finalize().0
+ }
+}
+
+impl StableHasher {
+ #[inline]
+ pub fn finalize(self) -> (u64, u64) {
+ self.state.finish128()
+ }
+}
+
+impl Hasher for StableHasher {
+ fn finish(&self) -> u64 {
+ panic!("use StableHasher::finalize instead");
+ }
+
+ #[inline]
+ fn write(&mut self, bytes: &[u8]) {
+ self.state.write(bytes);
+ }
+
+ #[inline]
+ fn write_str(&mut self, s: &str) {
+ self.state.write_str(s);
+ }
+
+ #[inline]
+ fn write_length_prefix(&mut self, len: usize) {
+ // Our impl for `usize` will extend it if needed.
+ self.write_usize(len);
+ }
+
+ #[inline]
+ fn write_u8(&mut self, i: u8) {
+ self.state.write_u8(i);
+ }
+
+ #[inline]
+ fn write_u16(&mut self, i: u16) {
+ self.state.short_write(i.to_le_bytes());
+ }
+
+ #[inline]
+ fn write_u32(&mut self, i: u32) {
+ self.state.short_write(i.to_le_bytes());
+ }
+
+ #[inline]
+ fn write_u64(&mut self, i: u64) {
+ self.state.short_write(i.to_le_bytes());
+ }
+
+ #[inline]
+ fn write_u128(&mut self, i: u128) {
+ self.state.write(&i.to_le_bytes());
+ }
+
+ #[inline]
+ fn write_usize(&mut self, i: usize) {
+ // Always treat usize as u64 so we get the same results on 32 and 64 bit
+ // platforms. This is important for symbol hashes when cross compiling,
+ // for example.
+ self.state.short_write((i as u64).to_le_bytes());
+ }
+
+ #[inline]
+ fn write_i8(&mut self, i: i8) {
+ self.state.write_i8(i);
+ }
+
+ #[inline]
+ fn write_i16(&mut self, i: i16) {
+ self.state.short_write((i as u16).to_le_bytes());
+ }
+
+ #[inline]
+ fn write_i32(&mut self, i: i32) {
+ self.state.short_write((i as u32).to_le_bytes());
+ }
+
+ #[inline]
+ fn write_i64(&mut self, i: i64) {
+ self.state.short_write((i as u64).to_le_bytes());
+ }
+
+ #[inline]
+ fn write_i128(&mut self, i: i128) {
+ self.state.write(&(i as u128).to_le_bytes());
+ }
+
+ #[inline]
+ fn write_isize(&mut self, i: isize) {
+ // Always treat isize as a 64-bit number so we get the same results on 32 and 64 bit
+ // platforms. This is important for symbol hashes when cross compiling,
+ // for example. Sign extending here is preferable as it means that the
+ // same negative number hashes the same on both 32 and 64 bit platforms.
+ let value = i as u64;
+
+ // Cold path
+ #[cold]
+ #[inline(never)]
+ fn hash_value(state: &mut SipHasher128, value: u64) {
+ state.write_u8(0xFF);
+ state.short_write(value.to_le_bytes());
+ }
+
+ // `isize` values often seem to have a small (positive) numeric value in practice.
+ // To exploit this, if the value is small, we will hash a smaller amount of bytes.
+ // However, we cannot just skip the leading zero bytes, as that would produce the same hash
+ // e.g. if you hash two values that have the same bit pattern when they are swapped.
+ // See https://github.com/rust-lang/rust/pull/93014 for context.
+ //
+ // Therefore, we employ the following strategy:
+ // 1) When we encounter a value that fits within a single byte (the most common case), we
+ // hash just that byte. This is the most common case that is being optimized. However, we do
+ // not do this for the value 0xFF, as that is a reserved prefix (a bit like in UTF-8).
+ // 2) When we encounter a larger value, we hash a "marker" 0xFF and then the corresponding
+ // 8 bytes. Since this prefix cannot occur when we hash a single byte, when we hash two
+ // `isize`s that fit within a different amount of bytes, they should always produce a different
+ // byte stream for the hasher.
+ if value < 0xFF {
+ self.state.write_u8(value as u8);
+ } else {
+ hash_value(&mut self.state, value);
+ }
+ }
+}
+
+/// Something that implements `HashStable<CTX>` can be hashed in a way that is
+/// stable across multiple compilation sessions.
+///
+/// Note that `HashStable` imposes rather more strict requirements than usual
+/// hash functions:
+///
+/// - Stable hashes are sometimes used as identifiers. Therefore they must
+/// conform to the corresponding `PartialEq` implementations:
+///
+/// - `x == y` implies `hash_stable(x) == hash_stable(y)`, and
+/// - `x != y` implies `hash_stable(x) != hash_stable(y)`.
+///
+/// That second condition is usually not required for hash functions
+/// (e.g. `Hash`). In practice this means that `hash_stable` must feed any
+/// information into the hasher that a `PartialEq` comparison takes into
+/// account. See [#49300](https://github.com/rust-lang/rust/issues/49300)
+/// for an example where violating this invariant has caused trouble in the
+/// past.
+///
+/// - `hash_stable()` must be independent of the current
+/// compilation session. E.g. they must not hash memory addresses or other
+/// things that are "randomly" assigned per compilation session.
+///
+/// - `hash_stable()` must be independent of the host architecture. The
+/// `StableHasher` takes care of endianness and `isize`/`usize` platform
+/// differences.
+pub trait HashStable<CTX> {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher);
+}
+
+/// Implement this for types that can be turned into stable keys like, for
+/// example, for DefId that can be converted to a DefPathHash. This is used for
+/// bringing maps into a predictable order before hashing them.
+pub trait ToStableHashKey<HCX> {
+ type KeyType: Ord + Sized + HashStable<HCX>;
+ fn to_stable_hash_key(&self, hcx: &HCX) -> Self::KeyType;
+}
+
+/// Implement HashStable by just calling `Hash::hash()`.
+///
+/// **WARNING** This is only valid for types that *really* don't need any context for fingerprinting.
+/// But it is easy to misuse this macro (see [#96013](https://github.com/rust-lang/rust/issues/96013)
+/// for examples). Therefore this macro is not exported and should only be used in the limited cases
+/// here in this module.
+///
+/// Use `#[derive(HashStable_Generic)]` instead.
+macro_rules! impl_stable_hash_via_hash {
+ ($t:ty) => {
+ impl<CTX> $crate::stable_hasher::HashStable<CTX> for $t {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, hasher: &mut $crate::stable_hasher::StableHasher) {
+ ::std::hash::Hash::hash(self, hasher);
+ }
+ }
+ };
+}
+
+impl_stable_hash_via_hash!(i8);
+impl_stable_hash_via_hash!(i16);
+impl_stable_hash_via_hash!(i32);
+impl_stable_hash_via_hash!(i64);
+impl_stable_hash_via_hash!(isize);
+
+impl_stable_hash_via_hash!(u8);
+impl_stable_hash_via_hash!(u16);
+impl_stable_hash_via_hash!(u32);
+impl_stable_hash_via_hash!(u64);
+impl_stable_hash_via_hash!(usize);
+
+impl_stable_hash_via_hash!(u128);
+impl_stable_hash_via_hash!(i128);
+
+impl_stable_hash_via_hash!(char);
+impl_stable_hash_via_hash!(());
+
+impl<CTX> HashStable<CTX> for ! {
+ fn hash_stable(&self, _ctx: &mut CTX, _hasher: &mut StableHasher) {
+ unreachable!()
+ }
+}
+
+impl<CTX, T> HashStable<CTX> for PhantomData<T> {
+ fn hash_stable(&self, _ctx: &mut CTX, _hasher: &mut StableHasher) {}
+}
+
+impl<CTX> HashStable<CTX> for ::std::num::NonZeroU32 {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.get().hash_stable(ctx, hasher)
+ }
+}
+
+impl<CTX> HashStable<CTX> for ::std::num::NonZeroUsize {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.get().hash_stable(ctx, hasher)
+ }
+}
+
+impl<CTX> HashStable<CTX> for f32 {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ let val: u32 = unsafe { ::std::mem::transmute(*self) };
+ val.hash_stable(ctx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for f64 {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ let val: u64 = unsafe { ::std::mem::transmute(*self) };
+ val.hash_stable(ctx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for ::std::cmp::Ordering {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ (*self as i8).hash_stable(ctx, hasher);
+ }
+}
+
+impl<T1: HashStable<CTX>, CTX> HashStable<CTX> for (T1,) {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ let (ref _0,) = *self;
+ _0.hash_stable(ctx, hasher);
+ }
+}
+
+impl<T1: HashStable<CTX>, T2: HashStable<CTX>, CTX> HashStable<CTX> for (T1, T2) {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ let (ref _0, ref _1) = *self;
+ _0.hash_stable(ctx, hasher);
+ _1.hash_stable(ctx, hasher);
+ }
+}
+
+impl<T1, T2, T3, CTX> HashStable<CTX> for (T1, T2, T3)
+where
+ T1: HashStable<CTX>,
+ T2: HashStable<CTX>,
+ T3: HashStable<CTX>,
+{
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ let (ref _0, ref _1, ref _2) = *self;
+ _0.hash_stable(ctx, hasher);
+ _1.hash_stable(ctx, hasher);
+ _2.hash_stable(ctx, hasher);
+ }
+}
+
+impl<T1, T2, T3, T4, CTX> HashStable<CTX> for (T1, T2, T3, T4)
+where
+ T1: HashStable<CTX>,
+ T2: HashStable<CTX>,
+ T3: HashStable<CTX>,
+ T4: HashStable<CTX>,
+{
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ let (ref _0, ref _1, ref _2, ref _3) = *self;
+ _0.hash_stable(ctx, hasher);
+ _1.hash_stable(ctx, hasher);
+ _2.hash_stable(ctx, hasher);
+ _3.hash_stable(ctx, hasher);
+ }
+}
+
+impl<T: HashStable<CTX>, CTX> HashStable<CTX> for [T] {
+ default fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.len().hash_stable(ctx, hasher);
+ for item in self {
+ item.hash_stable(ctx, hasher);
+ }
+ }
+}
+
+impl<CTX> HashStable<CTX> for [u8] {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.len().hash_stable(ctx, hasher);
+ hasher.write(self);
+ }
+}
+
+impl<T: HashStable<CTX>, CTX> HashStable<CTX> for Vec<T> {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ (&self[..]).hash_stable(ctx, hasher);
+ }
+}
+
+impl<K, V, R, CTX> HashStable<CTX> for indexmap::IndexMap<K, V, R>
+where
+ K: HashStable<CTX> + Eq + Hash,
+ V: HashStable<CTX>,
+ R: BuildHasher,
+{
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.len().hash_stable(ctx, hasher);
+ for kv in self {
+ kv.hash_stable(ctx, hasher);
+ }
+ }
+}
+
+impl<K, R, CTX> HashStable<CTX> for indexmap::IndexSet<K, R>
+where
+ K: HashStable<CTX> + Eq + Hash,
+ R: BuildHasher,
+{
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.len().hash_stable(ctx, hasher);
+ for key in self {
+ key.hash_stable(ctx, hasher);
+ }
+ }
+}
+
+impl<A, CTX> HashStable<CTX> for SmallVec<[A; 1]>
+where
+ A: HashStable<CTX>,
+{
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ (&self[..]).hash_stable(ctx, hasher);
+ }
+}
+
+impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for Box<T> {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ (**self).hash_stable(ctx, hasher);
+ }
+}
+
+impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for ::std::rc::Rc<T> {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ (**self).hash_stable(ctx, hasher);
+ }
+}
+
+impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for ::std::sync::Arc<T> {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ (**self).hash_stable(ctx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for str {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.as_bytes().hash_stable(ctx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for String {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ (&self[..]).hash_stable(hcx, hasher);
+ }
+}
+
+impl<HCX> ToStableHashKey<HCX> for String {
+ type KeyType = String;
+ #[inline]
+ fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType {
+ self.clone()
+ }
+}
+
+impl<CTX> HashStable<CTX> for bool {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ (if *self { 1u8 } else { 0u8 }).hash_stable(ctx, hasher);
+ }
+}
+
+impl<T, CTX> HashStable<CTX> for Option<T>
+where
+ T: HashStable<CTX>,
+{
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ if let Some(ref value) = *self {
+ 1u8.hash_stable(ctx, hasher);
+ value.hash_stable(ctx, hasher);
+ } else {
+ 0u8.hash_stable(ctx, hasher);
+ }
+ }
+}
+
+impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2>
+where
+ T1: HashStable<CTX>,
+ T2: HashStable<CTX>,
+{
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ mem::discriminant(self).hash_stable(ctx, hasher);
+ match *self {
+ Ok(ref x) => x.hash_stable(ctx, hasher),
+ Err(ref x) => x.hash_stable(ctx, hasher),
+ }
+ }
+}
+
+impl<'a, T, CTX> HashStable<CTX> for &'a T
+where
+ T: HashStable<CTX> + ?Sized,
+{
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ (**self).hash_stable(ctx, hasher);
+ }
+}
+
+impl<T, CTX> HashStable<CTX> for ::std::mem::Discriminant<T> {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, hasher: &mut StableHasher) {
+ ::std::hash::Hash::hash(self, hasher);
+ }
+}
+
+impl<T, CTX> HashStable<CTX> for ::std::ops::RangeInclusive<T>
+where
+ T: HashStable<CTX>,
+{
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.start().hash_stable(ctx, hasher);
+ self.end().hash_stable(ctx, hasher);
+ }
+}
+
+impl<I: vec::Idx, T, CTX> HashStable<CTX> for vec::IndexVec<I, T>
+where
+ T: HashStable<CTX>,
+{
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.len().hash_stable(ctx, hasher);
+ for v in &self.raw {
+ v.hash_stable(ctx, hasher);
+ }
+ }
+}
+
+impl<I: vec::Idx, CTX> HashStable<CTX> for bit_set::BitSet<I> {
+ fn hash_stable(&self, _ctx: &mut CTX, hasher: &mut StableHasher) {
+ ::std::hash::Hash::hash(self, hasher);
+ }
+}
+
+impl<R: vec::Idx, C: vec::Idx, CTX> HashStable<CTX> for bit_set::BitMatrix<R, C> {
+ fn hash_stable(&self, _ctx: &mut CTX, hasher: &mut StableHasher) {
+ ::std::hash::Hash::hash(self, hasher);
+ }
+}
+
+impl<T, CTX> HashStable<CTX> for bit_set::FiniteBitSet<T>
+where
+ T: HashStable<CTX> + bit_set::FiniteBitSetTy,
+{
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.0.hash_stable(hcx, hasher);
+ }
+}
+
+impl_stable_hash_via_hash!(::std::path::Path);
+impl_stable_hash_via_hash!(::std::path::PathBuf);
+
+impl<K, V, R, HCX> HashStable<HCX> for ::std::collections::HashMap<K, V, R>
+where
+ K: ToStableHashKey<HCX> + Eq,
+ V: HashStable<HCX>,
+ R: BuildHasher,
+{
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, (key, value)| {
+ let key = key.to_stable_hash_key(hcx);
+ key.hash_stable(hcx, hasher);
+ value.hash_stable(hcx, hasher);
+ });
+ }
+}
+
+impl<K, R, HCX> HashStable<HCX> for ::std::collections::HashSet<K, R>
+where
+ K: ToStableHashKey<HCX> + Eq,
+ R: BuildHasher,
+{
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, key| {
+ let key = key.to_stable_hash_key(hcx);
+ key.hash_stable(hcx, hasher);
+ });
+ }
+}
+
+impl<K, V, HCX> HashStable<HCX> for ::std::collections::BTreeMap<K, V>
+where
+ K: ToStableHashKey<HCX>,
+ V: HashStable<HCX>,
+{
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, (key, value)| {
+ let key = key.to_stable_hash_key(hcx);
+ key.hash_stable(hcx, hasher);
+ value.hash_stable(hcx, hasher);
+ });
+ }
+}
+
+impl<K, HCX> HashStable<HCX> for ::std::collections::BTreeSet<K>
+where
+ K: ToStableHashKey<HCX>,
+{
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ stable_hash_reduce(hcx, hasher, self.iter(), self.len(), |hasher, hcx, key| {
+ let key = key.to_stable_hash_key(hcx);
+ key.hash_stable(hcx, hasher);
+ });
+ }
+}
+
+fn stable_hash_reduce<HCX, I, C, F>(
+ hcx: &mut HCX,
+ hasher: &mut StableHasher,
+ mut collection: C,
+ length: usize,
+ hash_function: F,
+) where
+ C: Iterator<Item = I>,
+ F: Fn(&mut StableHasher, &mut HCX, I),
+{
+ length.hash_stable(hcx, hasher);
+
+ match length {
+ 1 => {
+ hash_function(hasher, hcx, collection.next().unwrap());
+ }
+ _ => {
+ let hash = collection
+ .map(|value| {
+ let mut hasher = StableHasher::new();
+ hash_function(&mut hasher, hcx, value);
+ hasher.finish::<u128>()
+ })
+ .reduce(|accum, value| accum.wrapping_add(value));
+ hash.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+/// Controls what data we do or do not hash.
+/// Whenever a `HashStable` implementation caches its
+/// result, it needs to include `HashingControls` as part
+/// of the key, to ensure that it does not produce an incorrect
+/// result (for example, using a `Fingerprint` produced while
+/// hashing `Span`s when a `Fingerprint` without `Span`s is
+/// being requested)
+#[derive(Clone, Hash, Eq, PartialEq, Debug)]
+pub struct HashingControls {
+ pub hash_spans: bool,
+}
diff --git a/compiler/rustc_data_structures/src/stable_hasher/tests.rs b/compiler/rustc_data_structures/src/stable_hasher/tests.rs
new file mode 100644
index 000000000..b0d66c32a
--- /dev/null
+++ b/compiler/rustc_data_structures/src/stable_hasher/tests.rs
@@ -0,0 +1,163 @@
+use super::*;
+
+// The tests below compare the computed hashes to particular expected values
+// in order to test that we produce the same results on different platforms,
+// regardless of endianness and `usize` and `isize` size differences (this
+// of course assumes we run these tests on platforms that differ in those
+// ways). The expected values depend on the hashing algorithm used, so they
+// need to be updated whenever StableHasher changes its hashing algorithm.
+
+#[test]
+fn test_hash_integers() {
+ // Test that integers are handled consistently across platforms.
+ let test_u8 = 0xAB_u8;
+ let test_u16 = 0xFFEE_u16;
+ let test_u32 = 0x445577AA_u32;
+ let test_u64 = 0x01234567_13243546_u64;
+ let test_u128 = 0x22114433_66557788_99AACCBB_EEDDFF77_u128;
+ let test_usize = 0xD0C0B0A0_usize;
+
+ let test_i8 = -100_i8;
+ let test_i16 = -200_i16;
+ let test_i32 = -300_i32;
+ let test_i64 = -400_i64;
+ let test_i128 = -500_i128;
+ let test_isize = -600_isize;
+
+ let mut h = StableHasher::new();
+ test_u8.hash(&mut h);
+ test_u16.hash(&mut h);
+ test_u32.hash(&mut h);
+ test_u64.hash(&mut h);
+ test_u128.hash(&mut h);
+ test_usize.hash(&mut h);
+ test_i8.hash(&mut h);
+ test_i16.hash(&mut h);
+ test_i32.hash(&mut h);
+ test_i64.hash(&mut h);
+ test_i128.hash(&mut h);
+ test_isize.hash(&mut h);
+
+ // This depends on the hashing algorithm. See note at top of file.
+ let expected = (1784307454142909076, 11471672289340283879);
+
+ assert_eq!(h.finalize(), expected);
+}
+
+#[test]
+fn test_hash_usize() {
+ // Test that usize specifically is handled consistently across platforms.
+ let test_usize = 0xABCDEF01_usize;
+
+ let mut h = StableHasher::new();
+ test_usize.hash(&mut h);
+
+ // This depends on the hashing algorithm. See note at top of file.
+ let expected = (5798740672699530587, 11186240177685111648);
+
+ assert_eq!(h.finalize(), expected);
+}
+
+#[test]
+fn test_hash_isize() {
+ // Test that isize specifically is handled consistently across platforms.
+ let test_isize = -7_isize;
+
+ let mut h = StableHasher::new();
+ test_isize.hash(&mut h);
+
+ // This depends on the hashing algorithm. See note at top of file.
+ let expected = (2789913510339652884, 674280939192711005);
+
+ assert_eq!(h.finalize(), expected);
+}
+
+fn hash<T: HashStable<()>>(t: &T) -> u128 {
+ let mut h = StableHasher::new();
+ let ctx = &mut ();
+ t.hash_stable(ctx, &mut h);
+ h.finish()
+}
+
+// Check that bit set hash includes the domain size.
+#[test]
+fn test_hash_bit_set() {
+ use rustc_index::bit_set::BitSet;
+ let a: BitSet<usize> = BitSet::new_empty(1);
+ let b: BitSet<usize> = BitSet::new_empty(2);
+ assert_ne!(a, b);
+ assert_ne!(hash(&a), hash(&b));
+}
+
+// Check that bit matrix hash includes the matrix dimensions.
+#[test]
+fn test_hash_bit_matrix() {
+ use rustc_index::bit_set::BitMatrix;
+ let a: BitMatrix<usize, usize> = BitMatrix::new(1, 1);
+ let b: BitMatrix<usize, usize> = BitMatrix::new(1, 2);
+ assert_ne!(a, b);
+ assert_ne!(hash(&a), hash(&b));
+}
+
+// Check that exchanging the value of two adjacent fields changes the hash.
+#[test]
+fn test_attribute_permutation() {
+ macro_rules! test_type {
+ ($ty: ty) => {{
+ struct Foo {
+ a: $ty,
+ b: $ty,
+ }
+
+ impl<CTX> HashStable<CTX> for Foo {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.a.hash_stable(hcx, hasher);
+ self.b.hash_stable(hcx, hasher);
+ }
+ }
+
+ #[allow(overflowing_literals)]
+ let mut item = Foo { a: 0xFF, b: 0xFF_FF };
+ let hash_a = hash(&item);
+ std::mem::swap(&mut item.a, &mut item.b);
+ let hash_b = hash(&item);
+ assert_ne!(
+ hash_a,
+ hash_b,
+ "The hash stayed the same after values were swapped for type `{}`!",
+ stringify!($ty)
+ );
+ }};
+ }
+
+ test_type!(u16);
+ test_type!(u32);
+ test_type!(u64);
+ test_type!(u128);
+
+ test_type!(i16);
+ test_type!(i32);
+ test_type!(i64);
+ test_type!(i128);
+}
+
+// Check that the `isize` hashing optimization does not produce the same hash when permuting two
+// values.
+#[test]
+fn test_isize_compression() {
+ fn check_hash(a: u64, b: u64) {
+ let hash_a = hash(&(a as isize, b as isize));
+ let hash_b = hash(&(b as isize, a as isize));
+ assert_ne!(
+ hash_a, hash_b,
+ "The hash stayed the same when permuting values `{a}` and `{b}!",
+ );
+ }
+
+ check_hash(0xAA, 0xAAAA);
+ check_hash(0xFF, 0xFFFF);
+ check_hash(0xAAAA, 0xAAAAAA);
+ check_hash(0xAAAAAA, 0xAAAAAAAA);
+ check_hash(0xFF, 0xFFFFFFFFFFFFFFFF);
+ check_hash(u64::MAX /* -1 */, 1);
+}
diff --git a/compiler/rustc_data_structures/src/stack.rs b/compiler/rustc_data_structures/src/stack.rs
new file mode 100644
index 000000000..3bdd67512
--- /dev/null
+++ b/compiler/rustc_data_structures/src/stack.rs
@@ -0,0 +1,18 @@
+// This is the amount of bytes that need to be left on the stack before increasing the size.
+// It must be at least as large as the stack required by any code that does not call
+// `ensure_sufficient_stack`.
+const RED_ZONE: usize = 100 * 1024; // 100k
+
+// Only the first stack that is pushed, grows exponentially (2^n * STACK_PER_RECURSION) from then
+// on. This flag has performance relevant characteristics. Don't set it too high.
+const STACK_PER_RECURSION: usize = 1 * 1024 * 1024; // 1MB
+
+/// Grows the stack on demand to prevent stack overflow. Call this in strategic locations
+/// to "break up" recursive calls. E.g. almost any call to `visit_expr` or equivalent can benefit
+/// from this.
+///
+/// Should not be sprinkled around carelessly, as it causes a little bit of overhead.
+#[inline]
+pub fn ensure_sufficient_stack<R>(f: impl FnOnce() -> R) -> R {
+ stacker::maybe_grow(RED_ZONE, STACK_PER_RECURSION, f)
+}
diff --git a/compiler/rustc_data_structures/src/steal.rs b/compiler/rustc_data_structures/src/steal.rs
new file mode 100644
index 000000000..a3ece6550
--- /dev/null
+++ b/compiler/rustc_data_structures/src/steal.rs
@@ -0,0 +1,55 @@
+use crate::stable_hasher::{HashStable, StableHasher};
+use crate::sync::{MappedReadGuard, ReadGuard, RwLock};
+
+/// The `Steal` struct is intended to used as the value for a query.
+/// Specifically, we sometimes have queries (*cough* MIR *cough*)
+/// where we create a large, complex value that we want to iteratively
+/// update (e.g., optimize). We could clone the value for each
+/// optimization, but that'd be expensive. And yet we don't just want
+/// to mutate it in place, because that would spoil the idea that
+/// queries are these pure functions that produce an immutable value
+/// (since if you did the query twice, you could observe the mutations).
+/// So instead we have the query produce a `&'tcx Steal<mir::Body<'tcx>>`
+/// (to be very specific). Now we can read from this
+/// as much as we want (using `borrow()`), but you can also
+/// `steal()`. Once you steal, any further attempt to read will panic.
+/// Therefore, we know that -- assuming no ICE -- nobody is observing
+/// the fact that the MIR was updated.
+///
+/// Obviously, whenever you have a query that yields a `Steal` value,
+/// you must treat it with caution, and make sure that you know that
+/// -- once the value is stolen -- it will never be read from again.
+//
+// FIXME(#41710): what is the best way to model linear queries?
+#[derive(Debug)]
+pub struct Steal<T> {
+ value: RwLock<Option<T>>,
+}
+
+impl<T> Steal<T> {
+ pub fn new(value: T) -> Self {
+ Steal { value: RwLock::new(Some(value)) }
+ }
+
+ #[track_caller]
+ pub fn borrow(&self) -> MappedReadGuard<'_, T> {
+ let borrow = self.value.borrow();
+ if borrow.is_none() {
+ panic!("attempted to read from stolen value: {}", std::any::type_name::<T>());
+ }
+ ReadGuard::map(borrow, |opt| opt.as_ref().unwrap())
+ }
+
+ #[track_caller]
+ pub fn steal(&self) -> T {
+ let value_ref = &mut *self.value.try_write().expect("stealing value which is locked");
+ let value = value_ref.take();
+ value.expect("attempt to steal from stolen value")
+ }
+}
+
+impl<CTX, T: HashStable<CTX>> HashStable<CTX> for Steal<T> {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.borrow().hash_stable(hcx, hasher);
+ }
+}
diff --git a/compiler/rustc_data_structures/src/svh.rs b/compiler/rustc_data_structures/src/svh.rs
new file mode 100644
index 000000000..61654b9e8
--- /dev/null
+++ b/compiler/rustc_data_structures/src/svh.rs
@@ -0,0 +1,69 @@
+//! Calculation and management of a Strict Version Hash for crates
+//!
+//! The SVH is used for incremental compilation to track when HIR
+//! nodes have changed between compilations, and also to detect
+//! mismatches where we have two versions of the same crate that were
+//! compiled from distinct sources.
+
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::fmt;
+use std::hash::{Hash, Hasher};
+
+use crate::stable_hasher;
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Svh {
+ hash: u64,
+}
+
+impl Svh {
+ /// Creates a new `Svh` given the hash. If you actually want to
+ /// compute the SVH from some HIR, you want the `calculate_svh`
+ /// function found in `rustc_incremental`.
+ pub fn new(hash: u64) -> Svh {
+ Svh { hash }
+ }
+
+ pub fn as_u64(&self) -> u64 {
+ self.hash
+ }
+
+ pub fn to_string(&self) -> String {
+ format!("{:016x}", self.hash)
+ }
+}
+
+impl Hash for Svh {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: Hasher,
+ {
+ self.hash.to_le().hash(state);
+ }
+}
+
+impl fmt::Display for Svh {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(&self.to_string())
+ }
+}
+
+impl<S: Encoder> Encodable<S> for Svh {
+ fn encode(&self, s: &mut S) {
+ s.emit_u64(self.as_u64().to_le());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for Svh {
+ fn decode(d: &mut D) -> Svh {
+ Svh::new(u64::from_le(d.read_u64()))
+ }
+}
+
+impl<T> stable_hasher::HashStable<T> for Svh {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut T, hasher: &mut stable_hasher::StableHasher) {
+ let Svh { hash } = *self;
+ hash.hash_stable(ctx, hasher);
+ }
+}
diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs
new file mode 100644
index 000000000..52952a793
--- /dev/null
+++ b/compiler/rustc_data_structures/src/sync.rs
@@ -0,0 +1,630 @@
+//! This module defines types which are thread safe if cfg!(parallel_compiler) is true.
+//!
+//! `Lrc` is an alias of `Arc` if cfg!(parallel_compiler) is true, `Rc` otherwise.
+//!
+//! `Lock` is a mutex.
+//! It internally uses `parking_lot::Mutex` if cfg!(parallel_compiler) is true,
+//! `RefCell` otherwise.
+//!
+//! `RwLock` is a read-write lock.
+//! It internally uses `parking_lot::RwLock` if cfg!(parallel_compiler) is true,
+//! `RefCell` otherwise.
+//!
+//! `MTLock` is a mutex which disappears if cfg!(parallel_compiler) is false.
+//!
+//! `MTRef` is an immutable reference if cfg!(parallel_compiler), and a mutable reference otherwise.
+//!
+//! `rustc_erase_owner!` erases an OwningRef owner into Erased or Erased + Send + Sync
+//! depending on the value of cfg!(parallel_compiler).
+
+use crate::owning_ref::{Erased, OwningRef};
+use std::collections::HashMap;
+use std::hash::{BuildHasher, Hash};
+use std::ops::{Deref, DerefMut};
+use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe};
+
+pub use std::sync::atomic::Ordering;
+pub use std::sync::atomic::Ordering::SeqCst;
+
+cfg_if! {
+ if #[cfg(not(parallel_compiler))] {
+ pub auto trait Send {}
+ pub auto trait Sync {}
+
+ impl<T: ?Sized> Send for T {}
+ impl<T: ?Sized> Sync for T {}
+
+ #[macro_export]
+ macro_rules! rustc_erase_owner {
+ ($v:expr) => {
+ $v.erase_owner()
+ }
+ }
+
+ use std::ops::Add;
+
+ /// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
+ /// It has explicit ordering arguments and is only intended for use with
+ /// the native atomic types.
+ /// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
+ /// as it's not intended to be used separately.
+ #[derive(Debug)]
+ pub struct Atomic<T: Copy>(Cell<T>);
+
+ impl<T: Copy> Atomic<T> {
+ #[inline]
+ pub fn new(v: T) -> Self {
+ Atomic(Cell::new(v))
+ }
+ }
+
+ impl<T: Copy> Atomic<T> {
+ #[inline]
+ pub fn into_inner(self) -> T {
+ self.0.into_inner()
+ }
+
+ #[inline]
+ pub fn load(&self, _: Ordering) -> T {
+ self.0.get()
+ }
+
+ #[inline]
+ pub fn store(&self, val: T, _: Ordering) {
+ self.0.set(val)
+ }
+
+ #[inline]
+ pub fn swap(&self, val: T, _: Ordering) -> T {
+ self.0.replace(val)
+ }
+ }
+
+ impl<T: Copy + PartialEq> Atomic<T> {
+ #[inline]
+ pub fn compare_exchange(&self,
+ current: T,
+ new: T,
+ _: Ordering,
+ _: Ordering)
+ -> Result<T, T> {
+ let read = self.0.get();
+ if read == current {
+ self.0.set(new);
+ Ok(read)
+ } else {
+ Err(read)
+ }
+ }
+ }
+
+ impl<T: Add<Output=T> + Copy> Atomic<T> {
+ #[inline]
+ pub fn fetch_add(&self, val: T, _: Ordering) -> T {
+ let old = self.0.get();
+ self.0.set(old + val);
+ old
+ }
+ }
+
+ pub type AtomicUsize = Atomic<usize>;
+ pub type AtomicBool = Atomic<bool>;
+ pub type AtomicU32 = Atomic<u32>;
+ pub type AtomicU64 = Atomic<u64>;
+
+ pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
+ where A: FnOnce() -> RA,
+ B: FnOnce() -> RB
+ {
+ (oper_a(), oper_b())
+ }
+
+ #[macro_export]
+ macro_rules! parallel {
+ ($($blocks:tt),*) => {
+ // We catch panics here ensuring that all the blocks execute.
+ // This makes behavior consistent with the parallel compiler.
+ let mut panic = None;
+ $(
+ if let Err(p) = ::std::panic::catch_unwind(
+ ::std::panic::AssertUnwindSafe(|| $blocks)
+ ) {
+ if panic.is_none() {
+ panic = Some(p);
+ }
+ }
+ )*
+ if let Some(panic) = panic {
+ ::std::panic::resume_unwind(panic);
+ }
+ }
+ }
+
+ pub use std::iter::Iterator as ParallelIterator;
+
+ pub fn par_iter<T: IntoIterator>(t: T) -> T::IntoIter {
+ t.into_iter()
+ }
+
+ pub fn par_for_each_in<T: IntoIterator>(t: T, mut for_each: impl FnMut(T::Item) + Sync + Send) {
+ // We catch panics here ensuring that all the loop iterations execute.
+ // This makes behavior consistent with the parallel compiler.
+ let mut panic = None;
+ t.into_iter().for_each(|i| {
+ if let Err(p) = catch_unwind(AssertUnwindSafe(|| for_each(i))) {
+ if panic.is_none() {
+ panic = Some(p);
+ }
+ }
+ });
+ if let Some(panic) = panic {
+ resume_unwind(panic);
+ }
+ }
+
+ pub type MetadataRef = OwningRef<Box<dyn Erased>, [u8]>;
+
+ pub use std::rc::Rc as Lrc;
+ pub use std::rc::Weak as Weak;
+ pub use std::cell::Ref as ReadGuard;
+ pub use std::cell::Ref as MappedReadGuard;
+ pub use std::cell::RefMut as WriteGuard;
+ pub use std::cell::RefMut as MappedWriteGuard;
+ pub use std::cell::RefMut as LockGuard;
+ pub use std::cell::RefMut as MappedLockGuard;
+
+ pub use std::cell::OnceCell;
+
+ use std::cell::RefCell as InnerRwLock;
+ use std::cell::RefCell as InnerLock;
+
+ use std::cell::Cell;
+
+ #[derive(Debug)]
+ pub struct WorkerLocal<T>(OneThread<T>);
+
+ impl<T> WorkerLocal<T> {
+ /// Creates a new worker local where the `initial` closure computes the
+ /// value this worker local should take for each thread in the thread pool.
+ #[inline]
+ pub fn new<F: FnMut(usize) -> T>(mut f: F) -> WorkerLocal<T> {
+ WorkerLocal(OneThread::new(f(0)))
+ }
+
+ /// Returns the worker-local value for each thread
+ #[inline]
+ pub fn into_inner(self) -> Vec<T> {
+ vec![OneThread::into_inner(self.0)]
+ }
+ }
+
+ impl<T> Deref for WorkerLocal<T> {
+ type Target = T;
+
+ #[inline(always)]
+ fn deref(&self) -> &T {
+ &*self.0
+ }
+ }
+
+ pub type MTRef<'a, T> = &'a mut T;
+
+ #[derive(Debug, Default)]
+ pub struct MTLock<T>(T);
+
+ impl<T> MTLock<T> {
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ MTLock(inner)
+ }
+
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.0
+ }
+
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ &mut self.0
+ }
+
+ #[inline(always)]
+ pub fn lock(&self) -> &T {
+ &self.0
+ }
+
+ #[inline(always)]
+ pub fn lock_mut(&mut self) -> &mut T {
+ &mut self.0
+ }
+ }
+
+ // FIXME: Probably a bad idea (in the threaded case)
+ impl<T: Clone> Clone for MTLock<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ MTLock(self.0.clone())
+ }
+ }
+ } else {
+ pub use std::marker::Send as Send;
+ pub use std::marker::Sync as Sync;
+
+ pub use parking_lot::RwLockReadGuard as ReadGuard;
+ pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
+ pub use parking_lot::RwLockWriteGuard as WriteGuard;
+ pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
+
+ pub use parking_lot::MutexGuard as LockGuard;
+ pub use parking_lot::MappedMutexGuard as MappedLockGuard;
+
+ pub use std::sync::OnceLock as OnceCell;
+
+ pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
+
+ pub use std::sync::Arc as Lrc;
+ pub use std::sync::Weak as Weak;
+
+ pub type MTRef<'a, T> = &'a T;
+
+ #[derive(Debug, Default)]
+ pub struct MTLock<T>(Lock<T>);
+
+ impl<T> MTLock<T> {
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ MTLock(Lock::new(inner))
+ }
+
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.0.into_inner()
+ }
+
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.0.get_mut()
+ }
+
+ #[inline(always)]
+ pub fn lock(&self) -> LockGuard<'_, T> {
+ self.0.lock()
+ }
+
+ #[inline(always)]
+ pub fn lock_mut(&self) -> LockGuard<'_, T> {
+ self.lock()
+ }
+ }
+
+ use parking_lot::Mutex as InnerLock;
+ use parking_lot::RwLock as InnerRwLock;
+
+ use std::thread;
+ pub use rayon::{join, scope};
+
+ /// Runs a list of blocks in parallel. The first block is executed immediately on
+ /// the current thread. Use that for the longest running block.
+ #[macro_export]
+ macro_rules! parallel {
+ (impl $fblock:tt [$($c:tt,)*] [$block:tt $(, $rest:tt)*]) => {
+ parallel!(impl $fblock [$block, $($c,)*] [$($rest),*])
+ };
+ (impl $fblock:tt [$($blocks:tt,)*] []) => {
+ ::rustc_data_structures::sync::scope(|s| {
+ $(
+ s.spawn(|_| $blocks);
+ )*
+ $fblock;
+ })
+ };
+ ($fblock:tt, $($blocks:tt),*) => {
+ // Reverse the order of the later blocks since Rayon executes them in reverse order
+ // when using a single thread. This ensures the execution order matches that
+ // of a single threaded rustc
+ parallel!(impl $fblock [] [$($blocks),*]);
+ };
+ }
+
+ pub use rayon_core::WorkerLocal;
+
+ pub use rayon::iter::ParallelIterator;
+ use rayon::iter::IntoParallelIterator;
+
+ pub fn par_iter<T: IntoParallelIterator>(t: T) -> T::Iter {
+ t.into_par_iter()
+ }
+
+ pub fn par_for_each_in<T: IntoParallelIterator>(
+ t: T,
+ for_each: impl Fn(T::Item) + Sync + Send,
+ ) {
+ let ps: Vec<_> = t.into_par_iter().map(|i| catch_unwind(AssertUnwindSafe(|| for_each(i)))).collect();
+ ps.into_iter().for_each(|p| if let Err(panic) = p {
+ resume_unwind(panic)
+ });
+ }
+
+ pub type MetadataRef = OwningRef<Box<dyn Erased + Send + Sync>, [u8]>;
+
+ /// This makes locks panic if they are already held.
+ /// It is only useful when you are running in a single thread
+ const ERROR_CHECKING: bool = false;
+
+ #[macro_export]
+ macro_rules! rustc_erase_owner {
+ ($v:expr) => {{
+ let v = $v;
+ ::rustc_data_structures::sync::assert_send_val(&v);
+ v.erase_send_sync_owner()
+ }}
+ }
+ }
+}
+
+pub fn assert_sync<T: ?Sized + Sync>() {}
+pub fn assert_send<T: ?Sized + Send>() {}
+pub fn assert_send_val<T: ?Sized + Send>(_t: &T) {}
+pub fn assert_send_sync_val<T: ?Sized + Sync + Send>(_t: &T) {}
+
+pub trait HashMapExt<K, V> {
+ /// Same as HashMap::insert, but it may panic if there's already an
+ /// entry for `key` with a value not equal to `value`
+ fn insert_same(&mut self, key: K, value: V);
+}
+
+impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> {
+ fn insert_same(&mut self, key: K, value: V) {
+ self.entry(key).and_modify(|old| assert!(*old == value)).or_insert(value);
+ }
+}
+
+#[derive(Debug)]
+pub struct Lock<T>(InnerLock<T>);
+
+impl<T> Lock<T> {
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ Lock(InnerLock::new(inner))
+ }
+
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.0.into_inner()
+ }
+
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.0.get_mut()
+ }
+
+ #[cfg(parallel_compiler)]
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+ self.0.try_lock()
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<LockGuard<'_, T>> {
+ self.0.try_borrow_mut().ok()
+ }
+
+ #[cfg(parallel_compiler)]
+ #[inline(always)]
+ pub fn lock(&self) -> LockGuard<'_, T> {
+ if ERROR_CHECKING {
+ self.0.try_lock().expect("lock was already held")
+ } else {
+ self.0.lock()
+ }
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline(always)]
+ pub fn lock(&self) -> LockGuard<'_, T> {
+ self.0.borrow_mut()
+ }
+
+ #[inline(always)]
+ pub fn with_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
+ f(&mut *self.lock())
+ }
+
+ #[inline(always)]
+ pub fn borrow(&self) -> LockGuard<'_, T> {
+ self.lock()
+ }
+
+ #[inline(always)]
+ pub fn borrow_mut(&self) -> LockGuard<'_, T> {
+ self.lock()
+ }
+}
+
+impl<T: Default> Default for Lock<T> {
+ #[inline]
+ fn default() -> Self {
+ Lock::new(T::default())
+ }
+}
+
+// FIXME: Probably a bad idea
+impl<T: Clone> Clone for Lock<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ Lock::new(self.borrow().clone())
+ }
+}
+
+#[derive(Debug, Default)]
+pub struct RwLock<T>(InnerRwLock<T>);
+
+impl<T> RwLock<T> {
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ RwLock(InnerRwLock::new(inner))
+ }
+
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.0.into_inner()
+ }
+
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.0.get_mut()
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline(always)]
+ pub fn read(&self) -> ReadGuard<'_, T> {
+ self.0.borrow()
+ }
+
+ #[cfg(parallel_compiler)]
+ #[inline(always)]
+ pub fn read(&self) -> ReadGuard<'_, T> {
+ if ERROR_CHECKING {
+ self.0.try_read().expect("lock was already held")
+ } else {
+ self.0.read()
+ }
+ }
+
+ #[inline(always)]
+ pub fn with_read_lock<F: FnOnce(&T) -> R, R>(&self, f: F) -> R {
+ f(&*self.read())
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline(always)]
+ pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
+ self.0.try_borrow_mut().map_err(|_| ())
+ }
+
+ #[cfg(parallel_compiler)]
+ #[inline(always)]
+ pub fn try_write(&self) -> Result<WriteGuard<'_, T>, ()> {
+ self.0.try_write().ok_or(())
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline(always)]
+ pub fn write(&self) -> WriteGuard<'_, T> {
+ self.0.borrow_mut()
+ }
+
+ #[cfg(parallel_compiler)]
+ #[inline(always)]
+ pub fn write(&self) -> WriteGuard<'_, T> {
+ if ERROR_CHECKING {
+ self.0.try_write().expect("lock was already held")
+ } else {
+ self.0.write()
+ }
+ }
+
+ #[inline(always)]
+ pub fn with_write_lock<F: FnOnce(&mut T) -> R, R>(&self, f: F) -> R {
+ f(&mut *self.write())
+ }
+
+ #[inline(always)]
+ pub fn borrow(&self) -> ReadGuard<'_, T> {
+ self.read()
+ }
+
+ #[inline(always)]
+ pub fn borrow_mut(&self) -> WriteGuard<'_, T> {
+ self.write()
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline(always)]
+ pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
+ ReadGuard::clone(rg)
+ }
+
+ #[cfg(parallel_compiler)]
+ #[inline(always)]
+ pub fn clone_guard<'a>(rg: &ReadGuard<'a, T>) -> ReadGuard<'a, T> {
+ ReadGuard::rwlock(&rg).read()
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline(always)]
+ pub fn leak(&self) -> &T {
+ ReadGuard::leak(self.read())
+ }
+
+ #[cfg(parallel_compiler)]
+ #[inline(always)]
+ pub fn leak(&self) -> &T {
+ let guard = self.read();
+ let ret = unsafe { &*(&*guard as *const T) };
+ std::mem::forget(guard);
+ ret
+ }
+}
+
+// FIXME: Probably a bad idea
+impl<T: Clone> Clone for RwLock<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ RwLock::new(self.borrow().clone())
+ }
+}
+
+/// A type which only allows its inner value to be used in one thread.
+/// It will panic if it is used on multiple threads.
+#[derive(Debug)]
+pub struct OneThread<T> {
+ #[cfg(parallel_compiler)]
+ thread: thread::ThreadId,
+ inner: T,
+}
+
+#[cfg(parallel_compiler)]
+unsafe impl<T> std::marker::Sync for OneThread<T> {}
+#[cfg(parallel_compiler)]
+unsafe impl<T> std::marker::Send for OneThread<T> {}
+
+impl<T> OneThread<T> {
+ #[inline(always)]
+ fn check(&self) {
+ #[cfg(parallel_compiler)]
+ assert_eq!(thread::current().id(), self.thread);
+ }
+
+ #[inline(always)]
+ pub fn new(inner: T) -> Self {
+ OneThread {
+ #[cfg(parallel_compiler)]
+ thread: thread::current().id(),
+ inner,
+ }
+ }
+
+ #[inline(always)]
+ pub fn into_inner(value: Self) -> T {
+ value.check();
+ value.inner
+ }
+}
+
+impl<T> Deref for OneThread<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ self.check();
+ &self.inner
+ }
+}
+
+impl<T> DerefMut for OneThread<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ self.check();
+ &mut self.inner
+ }
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr.rs b/compiler/rustc_data_structures/src/tagged_ptr.rs
new file mode 100644
index 000000000..651bc556c
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr.rs
@@ -0,0 +1,168 @@
+//! This module implements tagged pointers.
+//!
+//! In order to utilize the pointer packing, you must have two types: a pointer,
+//! and a tag.
+//!
+//! The pointer must implement the `Pointer` trait, with the primary requirement
+//! being conversion to and from a usize. Note that the pointer must be
+//! dereferenceable, so raw pointers generally cannot implement the `Pointer`
+//! trait. This implies that the pointer must also be nonzero.
+//!
+//! Many common pointer types already implement the `Pointer` trait.
+//!
+//! The tag must implement the `Tag` trait. We assert that the tag and `Pointer`
+//! are compatible at compile time.
+
+use std::mem::ManuallyDrop;
+use std::ops::Deref;
+use std::rc::Rc;
+use std::sync::Arc;
+
+mod copy;
+mod drop;
+
+pub use copy::CopyTaggedPtr;
+pub use drop::TaggedPtr;
+
+/// This describes the pointer type encapsulated by TaggedPtr.
+///
+/// # Safety
+///
+/// The usize returned from `into_usize` must be a valid, dereferenceable,
+/// pointer to `<Self as Deref>::Target`. Note that pointers to `Pointee` must
+/// be thin, even though `Pointee` may not be sized.
+///
+/// Note that the returned pointer from `into_usize` should be castable to `&mut
+/// <Self as Deref>::Target` if `Pointer: DerefMut`.
+///
+/// The BITS constant must be correct. At least `BITS` bits, least-significant,
+/// must be zero on all returned pointers from `into_usize`.
+///
+/// For example, if the alignment of `Pointee` is 2, then `BITS` should be 1.
+pub unsafe trait Pointer: Deref {
+ /// Most likely the value you want to use here is the following, unless
+ /// your Pointee type is unsized (e.g., `ty::List<T>` in rustc) in which
+ /// case you'll need to manually figure out what the right type to pass to
+ /// align_of is.
+ ///
+ /// ```ignore UNSOLVED (what to do about the Self)
+ /// # use std::ops::Deref;
+ /// std::mem::align_of::<<Self as Deref>::Target>().trailing_zeros() as usize;
+ /// ```
+ const BITS: usize;
+ fn into_usize(self) -> usize;
+
+ /// # Safety
+ ///
+ /// The passed `ptr` must be returned from `into_usize`.
+ ///
+ /// This acts as `ptr::read` semantically, it should not be called more than
+ /// once on non-`Copy` `Pointer`s.
+ unsafe fn from_usize(ptr: usize) -> Self;
+
+ /// This provides a reference to the `Pointer` itself, rather than the
+ /// `Deref::Target`. It is used for cases where we want to call methods that
+ /// may be implement differently for the Pointer than the Pointee (e.g.,
+ /// `Rc::clone` vs cloning the inner value).
+ ///
+ /// # Safety
+ ///
+ /// The passed `ptr` must be returned from `into_usize`.
+ unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R;
+}
+
+/// This describes tags that the `TaggedPtr` struct can hold.
+///
+/// # Safety
+///
+/// The BITS constant must be correct.
+///
+/// No more than `BITS` least significant bits may be set in the returned usize.
+pub unsafe trait Tag: Copy {
+ const BITS: usize;
+
+ fn into_usize(self) -> usize;
+
+ /// # Safety
+ ///
+ /// The passed `tag` must be returned from `into_usize`.
+ unsafe fn from_usize(tag: usize) -> Self;
+}
+
+unsafe impl<T> Pointer for Box<T> {
+ const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+ #[inline]
+ fn into_usize(self) -> usize {
+ Box::into_raw(self) as usize
+ }
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> Self {
+ Box::from_raw(ptr as *mut T)
+ }
+ unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+ let raw = ManuallyDrop::new(Self::from_usize(ptr));
+ f(&raw)
+ }
+}
+
+unsafe impl<T> Pointer for Rc<T> {
+ const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+ #[inline]
+ fn into_usize(self) -> usize {
+ Rc::into_raw(self) as usize
+ }
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> Self {
+ Rc::from_raw(ptr as *const T)
+ }
+ unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+ let raw = ManuallyDrop::new(Self::from_usize(ptr));
+ f(&raw)
+ }
+}
+
+unsafe impl<T> Pointer for Arc<T> {
+ const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+ #[inline]
+ fn into_usize(self) -> usize {
+ Arc::into_raw(self) as usize
+ }
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> Self {
+ Arc::from_raw(ptr as *const T)
+ }
+ unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+ let raw = ManuallyDrop::new(Self::from_usize(ptr));
+ f(&raw)
+ }
+}
+
+unsafe impl<'a, T: 'a> Pointer for &'a T {
+ const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+ #[inline]
+ fn into_usize(self) -> usize {
+ self as *const T as usize
+ }
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> Self {
+ &*(ptr as *const T)
+ }
+ unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+ f(&*(&ptr as *const usize as *const Self))
+ }
+}
+
+unsafe impl<'a, T: 'a> Pointer for &'a mut T {
+ const BITS: usize = std::mem::align_of::<T>().trailing_zeros() as usize;
+ #[inline]
+ fn into_usize(self) -> usize {
+ self as *mut T as usize
+ }
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> Self {
+ &mut *(ptr as *mut T)
+ }
+ unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+ f(&*(&ptr as *const usize as *const Self))
+ }
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs
new file mode 100644
index 000000000..e1d3e0bd3
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs
@@ -0,0 +1,185 @@
+use super::{Pointer, Tag};
+use crate::stable_hasher::{HashStable, StableHasher};
+use std::fmt;
+use std::marker::PhantomData;
+use std::num::NonZeroUsize;
+
+/// A `Copy` TaggedPtr.
+///
+/// You should use this instead of the `TaggedPtr` type in all cases where
+/// `P: Copy`.
+///
+/// If `COMPARE_PACKED` is true, then the pointers will be compared and hashed without
+/// unpacking. Otherwise we don't implement PartialEq/Eq/Hash; if you want that,
+/// wrap the TaggedPtr.
+pub struct CopyTaggedPtr<P, T, const COMPARE_PACKED: bool>
+where
+ P: Pointer,
+ T: Tag,
+{
+ packed: NonZeroUsize,
+ data: PhantomData<(P, T)>,
+}
+
+impl<P, T, const COMPARE_PACKED: bool> Copy for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ T: Tag,
+ P: Copy,
+{
+}
+
+impl<P, T, const COMPARE_PACKED: bool> Clone for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ T: Tag,
+ P: Copy,
+{
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+// We pack the tag into the *upper* bits of the pointer to ease retrieval of the
+// value; a left shift is a multiplication and those are embeddable in
+// instruction encoding.
+impl<P, T, const COMPARE_PACKED: bool> CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ T: Tag,
+{
+ const TAG_BIT_SHIFT: usize = usize::BITS as usize - T::BITS;
+ const ASSERTION: () = {
+ assert!(T::BITS <= P::BITS);
+ // Used for the transmute_copy's below
+ assert!(std::mem::size_of::<&P::Target>() == std::mem::size_of::<usize>());
+ };
+
+ pub fn new(pointer: P, tag: T) -> Self {
+ // Trigger assert!
+ let () = Self::ASSERTION;
+ let packed_tag = tag.into_usize() << Self::TAG_BIT_SHIFT;
+
+ Self {
+ // SAFETY: We know that the pointer is non-null, as it must be
+ // dereferenceable per `Pointer` safety contract.
+ packed: unsafe {
+ NonZeroUsize::new_unchecked((P::into_usize(pointer) >> T::BITS) | packed_tag)
+ },
+ data: PhantomData,
+ }
+ }
+
+ pub(super) fn pointer_raw(&self) -> usize {
+ self.packed.get() << T::BITS
+ }
+ pub fn pointer(self) -> P
+ where
+ P: Copy,
+ {
+ // SAFETY: pointer_raw returns the original pointer
+ //
+ // Note that this isn't going to double-drop or anything because we have
+ // P: Copy
+ unsafe { P::from_usize(self.pointer_raw()) }
+ }
+ pub fn pointer_ref(&self) -> &P::Target {
+ // SAFETY: pointer_raw returns the original pointer
+ unsafe { std::mem::transmute_copy(&self.pointer_raw()) }
+ }
+ pub fn pointer_mut(&mut self) -> &mut P::Target
+ where
+ P: std::ops::DerefMut,
+ {
+ // SAFETY: pointer_raw returns the original pointer
+ unsafe { std::mem::transmute_copy(&self.pointer_raw()) }
+ }
+ #[inline]
+ pub fn tag(&self) -> T {
+ unsafe { T::from_usize(self.packed.get() >> Self::TAG_BIT_SHIFT) }
+ }
+ #[inline]
+ pub fn set_tag(&mut self, tag: T) {
+ let mut packed = self.packed.get();
+ let new_tag = T::into_usize(tag) << Self::TAG_BIT_SHIFT;
+ let tag_mask = (1 << T::BITS) - 1;
+ packed &= !(tag_mask << Self::TAG_BIT_SHIFT);
+ packed |= new_tag;
+ self.packed = unsafe { NonZeroUsize::new_unchecked(packed) };
+ }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> std::ops::Deref for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ T: Tag,
+{
+ type Target = P::Target;
+ fn deref(&self) -> &Self::Target {
+ self.pointer_ref()
+ }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> std::ops::DerefMut for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer + std::ops::DerefMut,
+ T: Tag,
+{
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.pointer_mut()
+ }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> fmt::Debug for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ P::Target: fmt::Debug,
+ T: Tag + fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("CopyTaggedPtr")
+ .field("pointer", &self.pointer_ref())
+ .field("tag", &self.tag())
+ .finish()
+ }
+}
+
+impl<P, T> PartialEq for CopyTaggedPtr<P, T, true>
+where
+ P: Pointer,
+ T: Tag,
+{
+ fn eq(&self, other: &Self) -> bool {
+ self.packed == other.packed
+ }
+}
+
+impl<P, T> Eq for CopyTaggedPtr<P, T, true>
+where
+ P: Pointer,
+ T: Tag,
+{
+}
+
+impl<P, T> std::hash::Hash for CopyTaggedPtr<P, T, true>
+where
+ P: Pointer,
+ T: Tag,
+{
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ self.packed.hash(state);
+ }
+}
+
+impl<P, T, HCX, const COMPARE_PACKED: bool> HashStable<HCX> for CopyTaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer + HashStable<HCX>,
+ T: Tag + HashStable<HCX>,
+{
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ unsafe {
+ Pointer::with_ref(self.pointer_raw(), |p: &P| p.hash_stable(hcx, hasher));
+ }
+ self.tag().hash_stable(hcx, hasher);
+ }
+}
diff --git a/compiler/rustc_data_structures/src/tagged_ptr/drop.rs b/compiler/rustc_data_structures/src/tagged_ptr/drop.rs
new file mode 100644
index 000000000..d44ccd368
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tagged_ptr/drop.rs
@@ -0,0 +1,133 @@
+use super::{Pointer, Tag};
+use crate::stable_hasher::{HashStable, StableHasher};
+use std::fmt;
+
+use super::CopyTaggedPtr;
+
+/// A TaggedPtr implementing `Drop`.
+///
+/// If `COMPARE_PACKED` is true, then the pointers will be compared and hashed without
+/// unpacking. Otherwise we don't implement PartialEq/Eq/Hash; if you want that,
+/// wrap the TaggedPtr.
+pub struct TaggedPtr<P, T, const COMPARE_PACKED: bool>
+where
+ P: Pointer,
+ T: Tag,
+{
+ raw: CopyTaggedPtr<P, T, COMPARE_PACKED>,
+}
+
+impl<P, T, const COMPARE_PACKED: bool> Clone for TaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer + Clone,
+ T: Tag,
+{
+ fn clone(&self) -> Self {
+ unsafe { Self::new(P::with_ref(self.raw.pointer_raw(), |p| p.clone()), self.raw.tag()) }
+ }
+}
+
+// We pack the tag into the *upper* bits of the pointer to ease retrieval of the
+// value; a right shift is a multiplication and those are embeddable in
+// instruction encoding.
+impl<P, T, const COMPARE_PACKED: bool> TaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ T: Tag,
+{
+ pub fn new(pointer: P, tag: T) -> Self {
+ TaggedPtr { raw: CopyTaggedPtr::new(pointer, tag) }
+ }
+
+ pub fn pointer_ref(&self) -> &P::Target {
+ self.raw.pointer_ref()
+ }
+ pub fn tag(&self) -> T {
+ self.raw.tag()
+ }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> std::ops::Deref for TaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ T: Tag,
+{
+ type Target = P::Target;
+ fn deref(&self) -> &Self::Target {
+ self.raw.pointer_ref()
+ }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> std::ops::DerefMut for TaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer + std::ops::DerefMut,
+ T: Tag,
+{
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ self.raw.pointer_mut()
+ }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> Drop for TaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ T: Tag,
+{
+ fn drop(&mut self) {
+ // No need to drop the tag, as it's Copy
+ unsafe {
+ std::mem::drop(P::from_usize(self.raw.pointer_raw()));
+ }
+ }
+}
+
+impl<P, T, const COMPARE_PACKED: bool> fmt::Debug for TaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer,
+ P::Target: fmt::Debug,
+ T: Tag + fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("TaggedPtr")
+ .field("pointer", &self.pointer_ref())
+ .field("tag", &self.tag())
+ .finish()
+ }
+}
+
+impl<P, T> PartialEq for TaggedPtr<P, T, true>
+where
+ P: Pointer,
+ T: Tag,
+{
+ fn eq(&self, other: &Self) -> bool {
+ self.raw.eq(&other.raw)
+ }
+}
+
+impl<P, T> Eq for TaggedPtr<P, T, true>
+where
+ P: Pointer,
+ T: Tag,
+{
+}
+
+impl<P, T> std::hash::Hash for TaggedPtr<P, T, true>
+where
+ P: Pointer,
+ T: Tag,
+{
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ self.raw.hash(state);
+ }
+}
+
+impl<P, T, HCX, const COMPARE_PACKED: bool> HashStable<HCX> for TaggedPtr<P, T, COMPARE_PACKED>
+where
+ P: Pointer + HashStable<HCX>,
+ T: Tag + HashStable<HCX>,
+{
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ self.raw.hash_stable(hcx, hasher);
+ }
+}
diff --git a/compiler/rustc_data_structures/src/temp_dir.rs b/compiler/rustc_data_structures/src/temp_dir.rs
new file mode 100644
index 000000000..a780d2386
--- /dev/null
+++ b/compiler/rustc_data_structures/src/temp_dir.rs
@@ -0,0 +1,34 @@
+use std::mem::ManuallyDrop;
+use std::path::Path;
+use tempfile::TempDir;
+
+/// This is used to avoid TempDir being dropped on error paths unintentionally.
+#[derive(Debug)]
+pub struct MaybeTempDir {
+ dir: ManuallyDrop<TempDir>,
+ // Whether the TempDir should be deleted on drop.
+ keep: bool,
+}
+
+impl Drop for MaybeTempDir {
+ fn drop(&mut self) {
+ // SAFETY: We are in the destructor, and no further access will
+ // occur.
+ let dir = unsafe { ManuallyDrop::take(&mut self.dir) };
+ if self.keep {
+ dir.into_path();
+ }
+ }
+}
+
+impl AsRef<Path> for MaybeTempDir {
+ fn as_ref(&self) -> &Path {
+ self.dir.path()
+ }
+}
+
+impl MaybeTempDir {
+ pub fn new(dir: TempDir, keep_on_drop: bool) -> MaybeTempDir {
+ MaybeTempDir { dir: ManuallyDrop::new(dir), keep: keep_on_drop }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/thin_vec.rs b/compiler/rustc_data_structures/src/thin_vec.rs
new file mode 100644
index 000000000..716259142
--- /dev/null
+++ b/compiler/rustc_data_structures/src/thin_vec.rs
@@ -0,0 +1,135 @@
+use crate::stable_hasher::{HashStable, StableHasher};
+
+use std::iter::FromIterator;
+
+/// A vector type optimized for cases where this size is usually 0 (cf. `SmallVec`).
+/// The `Option<Box<..>>` wrapping allows us to represent a zero sized vector with `None`,
+/// which uses only a single (null) pointer.
+#[derive(Clone, Encodable, Decodable, Debug, Hash, Eq, PartialEq)]
+pub struct ThinVec<T>(Option<Box<Vec<T>>>);
+
+impl<T> ThinVec<T> {
+ pub fn new() -> Self {
+ ThinVec(None)
+ }
+
+ pub fn iter(&self) -> std::slice::Iter<'_, T> {
+ self.into_iter()
+ }
+
+ pub fn iter_mut(&mut self) -> std::slice::IterMut<'_, T> {
+ self.into_iter()
+ }
+
+ pub fn push(&mut self, item: T) {
+ match *self {
+ ThinVec(Some(ref mut vec)) => vec.push(item),
+ ThinVec(None) => *self = vec![item].into(),
+ }
+ }
+}
+
+impl<T> From<Vec<T>> for ThinVec<T> {
+ fn from(vec: Vec<T>) -> Self {
+ if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
+ }
+}
+
+impl<T> Into<Vec<T>> for ThinVec<T> {
+ fn into(self) -> Vec<T> {
+ match self {
+ ThinVec(None) => Vec::new(),
+ ThinVec(Some(vec)) => *vec,
+ }
+ }
+}
+
+impl<T> ::std::ops::Deref for ThinVec<T> {
+ type Target = [T];
+ fn deref(&self) -> &[T] {
+ match *self {
+ ThinVec(None) => &[],
+ ThinVec(Some(ref vec)) => vec,
+ }
+ }
+}
+
+impl<T> ::std::ops::DerefMut for ThinVec<T> {
+ fn deref_mut(&mut self) -> &mut [T] {
+ match *self {
+ ThinVec(None) => &mut [],
+ ThinVec(Some(ref mut vec)) => vec,
+ }
+ }
+}
+
+impl<T> FromIterator<T> for ThinVec<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
+ // `Vec::from_iter()` should not allocate if the iterator is empty.
+ let vec: Vec<_> = iter.into_iter().collect();
+ if vec.is_empty() { ThinVec(None) } else { ThinVec(Some(Box::new(vec))) }
+ }
+}
+
+impl<T> IntoIterator for ThinVec<T> {
+ type Item = T;
+ type IntoIter = std::vec::IntoIter<T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ // This is still performant because `Vec::new()` does not allocate.
+ self.0.map_or_else(Vec::new, |ptr| *ptr).into_iter()
+ }
+}
+
+impl<'a, T> IntoIterator for &'a ThinVec<T> {
+ type Item = &'a T;
+ type IntoIter = std::slice::Iter<'a, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_ref().iter()
+ }
+}
+
+impl<'a, T> IntoIterator for &'a mut ThinVec<T> {
+ type Item = &'a mut T;
+ type IntoIter = std::slice::IterMut<'a, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.as_mut().iter_mut()
+ }
+}
+
+impl<T> Extend<T> for ThinVec<T> {
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ match *self {
+ ThinVec(Some(ref mut vec)) => vec.extend(iter),
+ ThinVec(None) => *self = iter.into_iter().collect::<Vec<_>>().into(),
+ }
+ }
+
+ fn extend_one(&mut self, item: T) {
+ self.push(item)
+ }
+
+ fn extend_reserve(&mut self, additional: usize) {
+ match *self {
+ ThinVec(Some(ref mut vec)) => vec.reserve(additional),
+ ThinVec(None) => *self = Vec::with_capacity(additional).into(),
+ }
+ }
+}
+
+impl<T: HashStable<CTX>, CTX> HashStable<CTX> for ThinVec<T> {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ (**self).hash_stable(hcx, hasher)
+ }
+}
+
+impl<T> Default for ThinVec<T> {
+ fn default() -> Self {
+ Self(None)
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/thin_vec/tests.rs b/compiler/rustc_data_structures/src/thin_vec/tests.rs
new file mode 100644
index 000000000..0221b9912
--- /dev/null
+++ b/compiler/rustc_data_structures/src/thin_vec/tests.rs
@@ -0,0 +1,42 @@
+use super::*;
+
+impl<T> ThinVec<T> {
+ fn into_vec(self) -> Vec<T> {
+ self.into()
+ }
+}
+
+#[test]
+fn test_from_iterator() {
+ assert_eq!(std::iter::empty().collect::<ThinVec<String>>().into_vec(), Vec::<String>::new());
+ assert_eq!(std::iter::once(42).collect::<ThinVec<_>>().into_vec(), vec![42]);
+ assert_eq!([1, 2].into_iter().collect::<ThinVec<_>>().into_vec(), vec![1, 2]);
+ assert_eq!([1, 2, 3].into_iter().collect::<ThinVec<_>>().into_vec(), vec![1, 2, 3]);
+}
+
+#[test]
+fn test_into_iterator_owned() {
+ assert_eq!(ThinVec::new().into_iter().collect::<Vec<String>>(), Vec::<String>::new());
+ assert_eq!(ThinVec::from(vec![1]).into_iter().collect::<Vec<_>>(), vec![1]);
+ assert_eq!(ThinVec::from(vec![1, 2]).into_iter().collect::<Vec<_>>(), vec![1, 2]);
+ assert_eq!(ThinVec::from(vec![1, 2, 3]).into_iter().collect::<Vec<_>>(), vec![1, 2, 3]);
+}
+
+#[test]
+fn test_into_iterator_ref() {
+ assert_eq!(ThinVec::new().iter().collect::<Vec<&String>>(), Vec::<&String>::new());
+ assert_eq!(ThinVec::from(vec![1]).iter().collect::<Vec<_>>(), vec![&1]);
+ assert_eq!(ThinVec::from(vec![1, 2]).iter().collect::<Vec<_>>(), vec![&1, &2]);
+ assert_eq!(ThinVec::from(vec![1, 2, 3]).iter().collect::<Vec<_>>(), vec![&1, &2, &3]);
+}
+
+#[test]
+fn test_into_iterator_ref_mut() {
+ assert_eq!(ThinVec::new().iter_mut().collect::<Vec<&mut String>>(), Vec::<&mut String>::new());
+ assert_eq!(ThinVec::from(vec![1]).iter_mut().collect::<Vec<_>>(), vec![&mut 1]);
+ assert_eq!(ThinVec::from(vec![1, 2]).iter_mut().collect::<Vec<_>>(), vec![&mut 1, &mut 2]);
+ assert_eq!(
+ ThinVec::from(vec![1, 2, 3]).iter_mut().collect::<Vec<_>>(),
+ vec![&mut 1, &mut 2, &mut 3],
+ );
+}
diff --git a/compiler/rustc_data_structures/src/tiny_list.rs b/compiler/rustc_data_structures/src/tiny_list.rs
new file mode 100644
index 000000000..9b07f8684
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tiny_list.rs
@@ -0,0 +1,81 @@
+//! A singly-linked list.
+//!
+//! Using this data structure only makes sense under very specific
+//! circumstances:
+//!
+//! - If you have a list that rarely stores more than one element, then this
+//! data-structure can store the element without allocating and only uses as
+//! much space as an `Option<(T, usize)>`. If T can double as the `Option`
+//! discriminant, it will even only be as large as `T, usize`.
+//!
+//! If you expect to store more than 1 element in the common case, steer clear
+//! and use a `Vec<T>`, `Box<[T]>`, or a `SmallVec<T>`.
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Clone)]
+pub struct TinyList<T> {
+ head: Option<Element<T>>,
+}
+
+impl<T: PartialEq> TinyList<T> {
+ #[inline]
+ pub fn new() -> TinyList<T> {
+ TinyList { head: None }
+ }
+
+ #[inline]
+ pub fn new_single(data: T) -> TinyList<T> {
+ TinyList { head: Some(Element { data, next: None }) }
+ }
+
+ #[inline]
+ pub fn insert(&mut self, data: T) {
+ self.head = Some(Element { data, next: self.head.take().map(Box::new) });
+ }
+
+ #[inline]
+ pub fn remove(&mut self, data: &T) -> bool {
+ self.head = match self.head {
+ Some(ref mut head) if head.data == *data => head.next.take().map(|x| *x),
+ Some(ref mut head) => return head.remove_next(data),
+ None => return false,
+ };
+ true
+ }
+
+ #[inline]
+ pub fn contains(&self, data: &T) -> bool {
+ let mut elem = self.head.as_ref();
+ while let Some(ref e) = elem {
+ if &e.data == data {
+ return true;
+ }
+ elem = e.next.as_deref();
+ }
+ false
+ }
+}
+
+#[derive(Clone)]
+struct Element<T> {
+ data: T,
+ next: Option<Box<Element<T>>>,
+}
+
+impl<T: PartialEq> Element<T> {
+ fn remove_next(&mut self, data: &T) -> bool {
+ let mut n = self;
+ loop {
+ match n.next {
+ Some(ref mut next) if next.data == *data => {
+ n.next = next.next.take();
+ return true;
+ }
+ Some(ref mut next) => n = next,
+ None => return false,
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_data_structures/src/tiny_list/tests.rs b/compiler/rustc_data_structures/src/tiny_list/tests.rs
new file mode 100644
index 000000000..c0334d2e2
--- /dev/null
+++ b/compiler/rustc_data_structures/src/tiny_list/tests.rs
@@ -0,0 +1,155 @@
+use super::*;
+
+extern crate test;
+use test::{black_box, Bencher};
+
+impl<T> TinyList<T> {
+ fn len(&self) -> usize {
+ let (mut elem, mut count) = (self.head.as_ref(), 0);
+ while let Some(ref e) = elem {
+ count += 1;
+ elem = e.next.as_deref();
+ }
+ count
+ }
+}
+
+#[test]
+fn test_contains_and_insert() {
+ fn do_insert(i: u32) -> bool {
+ i % 2 == 0
+ }
+
+ let mut list = TinyList::new();
+
+ for i in 0..10 {
+ for j in 0..i {
+ if do_insert(j) {
+ assert!(list.contains(&j));
+ } else {
+ assert!(!list.contains(&j));
+ }
+ }
+
+ assert!(!list.contains(&i));
+
+ if do_insert(i) {
+ list.insert(i);
+ assert!(list.contains(&i));
+ }
+ }
+}
+
+#[test]
+fn test_remove_first() {
+ let mut list = TinyList::new();
+ list.insert(1);
+ list.insert(2);
+ list.insert(3);
+ list.insert(4);
+ assert_eq!(list.len(), 4);
+
+ assert!(list.remove(&4));
+ assert!(!list.contains(&4));
+
+ assert_eq!(list.len(), 3);
+ assert!(list.contains(&1));
+ assert!(list.contains(&2));
+ assert!(list.contains(&3));
+}
+
+#[test]
+fn test_remove_last() {
+ let mut list = TinyList::new();
+ list.insert(1);
+ list.insert(2);
+ list.insert(3);
+ list.insert(4);
+ assert_eq!(list.len(), 4);
+
+ assert!(list.remove(&1));
+ assert!(!list.contains(&1));
+
+ assert_eq!(list.len(), 3);
+ assert!(list.contains(&2));
+ assert!(list.contains(&3));
+ assert!(list.contains(&4));
+}
+
+#[test]
+fn test_remove_middle() {
+ let mut list = TinyList::new();
+ list.insert(1);
+ list.insert(2);
+ list.insert(3);
+ list.insert(4);
+ assert_eq!(list.len(), 4);
+
+ assert!(list.remove(&2));
+ assert!(!list.contains(&2));
+
+ assert_eq!(list.len(), 3);
+ assert!(list.contains(&1));
+ assert!(list.contains(&3));
+ assert!(list.contains(&4));
+}
+
+#[test]
+fn test_remove_single() {
+ let mut list = TinyList::new();
+ list.insert(1);
+ assert_eq!(list.len(), 1);
+
+ assert!(list.remove(&1));
+ assert!(!list.contains(&1));
+
+ assert_eq!(list.len(), 0);
+}
+
+#[bench]
+fn bench_insert_empty(b: &mut Bencher) {
+ b.iter(|| {
+ let mut list = black_box(TinyList::new());
+ list.insert(1);
+ list
+ })
+}
+
+#[bench]
+fn bench_insert_one(b: &mut Bencher) {
+ b.iter(|| {
+ let mut list = black_box(TinyList::new_single(0));
+ list.insert(1);
+ list
+ })
+}
+
+#[bench]
+fn bench_contains_empty(b: &mut Bencher) {
+ b.iter(|| black_box(TinyList::new()).contains(&1));
+}
+
+#[bench]
+fn bench_contains_unknown(b: &mut Bencher) {
+ b.iter(|| black_box(TinyList::new_single(0)).contains(&1));
+}
+
+#[bench]
+fn bench_contains_one(b: &mut Bencher) {
+ b.iter(|| black_box(TinyList::new_single(1)).contains(&1));
+}
+
+#[bench]
+fn bench_remove_empty(b: &mut Bencher) {
+ b.iter(|| black_box(TinyList::new()).remove(&1));
+}
+
+#[bench]
+fn bench_remove_unknown(b: &mut Bencher) {
+ b.iter(|| black_box(TinyList::new_single(0)).remove(&1));
+}
+
+#[bench]
+fn bench_remove_one(b: &mut Bencher) {
+ b.iter(|| black_box(TinyList::new_single(1)).remove(&1));
+}
diff --git a/compiler/rustc_data_structures/src/transitive_relation.rs b/compiler/rustc_data_structures/src/transitive_relation.rs
new file mode 100644
index 000000000..0ff64969b
--- /dev/null
+++ b/compiler/rustc_data_structures/src/transitive_relation.rs
@@ -0,0 +1,392 @@
+use crate::fx::FxIndexSet;
+use crate::sync::Lock;
+use rustc_index::bit_set::BitMatrix;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::mem;
+
+#[cfg(test)]
+mod tests;
+
+#[derive(Clone, Debug)]
+pub struct TransitiveRelation<T> {
+ // List of elements. This is used to map from a T to a usize.
+ elements: FxIndexSet<T>,
+
+ // List of base edges in the graph. Require to compute transitive
+ // closure.
+ edges: Vec<Edge>,
+
+ // This is a cached transitive closure derived from the edges.
+ // Currently, we build it lazily and just throw out any existing
+ // copy whenever a new edge is added. (The Lock is to permit
+ // the lazy computation.) This is kind of silly, except for the
+ // fact its size is tied to `self.elements.len()`, so I wanted to
+ // wait before building it up to avoid reallocating as new edges
+ // are added with new elements. Perhaps better would be to ask the
+ // user for a batch of edges to minimize this effect, but I
+ // already wrote the code this way. :P -nmatsakis
+ closure: Lock<Option<BitMatrix<usize, usize>>>,
+}
+
+// HACK(eddyb) manual impl avoids `Default` bound on `T`.
+impl<T: Eq + Hash> Default for TransitiveRelation<T> {
+ fn default() -> Self {
+ TransitiveRelation {
+ elements: Default::default(),
+ edges: Default::default(),
+ closure: Default::default(),
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Debug)]
+struct Index(usize);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+struct Edge {
+ source: Index,
+ target: Index,
+}
+
+impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
+ pub fn is_empty(&self) -> bool {
+ self.edges.is_empty()
+ }
+
+ pub fn elements(&self) -> impl Iterator<Item = &T> {
+ self.elements.iter()
+ }
+
+ fn index(&self, a: T) -> Option<Index> {
+ self.elements.get_index_of(&a).map(Index)
+ }
+
+ fn add_index(&mut self, a: T) -> Index {
+ let (index, added) = self.elements.insert_full(a);
+ if added {
+ // if we changed the dimensions, clear the cache
+ *self.closure.get_mut() = None;
+ }
+ Index(index)
+ }
+
+ /// Applies the (partial) function to each edge and returns a new
+ /// relation. If `f` returns `None` for any end-point, returns
+ /// `None`.
+ pub fn maybe_map<F, U>(&self, mut f: F) -> Option<TransitiveRelation<U>>
+ where
+ F: FnMut(T) -> Option<U>,
+ U: Clone + Debug + Eq + Hash + Copy,
+ {
+ let mut result = TransitiveRelation::default();
+ for edge in &self.edges {
+ result.add(f(self.elements[edge.source.0])?, f(self.elements[edge.target.0])?);
+ }
+ Some(result)
+ }
+
+ /// Indicate that `a < b` (where `<` is this relation)
+ pub fn add(&mut self, a: T, b: T) {
+ let a = self.add_index(a);
+ let b = self.add_index(b);
+ let edge = Edge { source: a, target: b };
+ if !self.edges.contains(&edge) {
+ self.edges.push(edge);
+
+ // added an edge, clear the cache
+ *self.closure.get_mut() = None;
+ }
+ }
+
+ /// Checks whether `a < target` (transitively)
+ pub fn contains(&self, a: T, b: T) -> bool {
+ match (self.index(a), self.index(b)) {
+ (Some(a), Some(b)) => self.with_closure(|closure| closure.contains(a.0, b.0)),
+ (None, _) | (_, None) => false,
+ }
+ }
+
+ /// Thinking of `x R y` as an edge `x -> y` in a graph, this
+ /// returns all things reachable from `a`.
+ ///
+ /// Really this probably ought to be `impl Iterator<Item = &T>`, but
+ /// I'm too lazy to make that work, and -- given the caching
+ /// strategy -- it'd be a touch tricky anyhow.
+ pub fn reachable_from(&self, a: T) -> Vec<T> {
+ match self.index(a) {
+ Some(a) => {
+ self.with_closure(|closure| closure.iter(a.0).map(|i| self.elements[i]).collect())
+ }
+ None => vec![],
+ }
+ }
+
+ /// Picks what I am referring to as the "postdominating"
+ /// upper-bound for `a` and `b`. This is usually the least upper
+ /// bound, but in cases where there is no single least upper
+ /// bound, it is the "mutual immediate postdominator", if you
+ /// imagine a graph where `a < b` means `a -> b`.
+ ///
+ /// This function is needed because region inference currently
+ /// requires that we produce a single "UB", and there is no best
+ /// choice for the LUB. Rather than pick arbitrarily, I pick a
+ /// less good, but predictable choice. This should help ensure
+ /// that region inference yields predictable results (though it
+ /// itself is not fully sufficient).
+ ///
+ /// Examples are probably clearer than any prose I could write
+ /// (there are corresponding tests below, btw). In each case,
+ /// the query is `postdom_upper_bound(a, b)`:
+ ///
+ /// ```text
+ /// // Returns Some(x), which is also LUB.
+ /// a -> a1 -> x
+ /// ^
+ /// |
+ /// b -> b1 ---+
+ ///
+ /// // Returns `Some(x)`, which is not LUB (there is none)
+ /// // diagonal edges run left-to-right.
+ /// a -> a1 -> x
+ /// \/ ^
+ /// /\ |
+ /// b -> b1 ---+
+ ///
+ /// // Returns `None`.
+ /// a -> a1
+ /// b -> b1
+ /// ```
+ pub fn postdom_upper_bound(&self, a: T, b: T) -> Option<T> {
+ let mubs = self.minimal_upper_bounds(a, b);
+ self.mutual_immediate_postdominator(mubs)
+ }
+
+ /// Viewing the relation as a graph, computes the "mutual
+ /// immediate postdominator" of a set of points (if one
+ /// exists). See `postdom_upper_bound` for details.
+ pub fn mutual_immediate_postdominator<'a>(&'a self, mut mubs: Vec<T>) -> Option<T> {
+ loop {
+ match mubs.len() {
+ 0 => return None,
+ 1 => return Some(mubs[0]),
+ _ => {
+ let m = mubs.pop().unwrap();
+ let n = mubs.pop().unwrap();
+ mubs.extend(self.minimal_upper_bounds(n, m));
+ }
+ }
+ }
+ }
+
+ /// Returns the set of bounds `X` such that:
+ ///
+ /// - `a < X` and `b < X`
+ /// - there is no `Y != X` such that `a < Y` and `Y < X`
+ /// - except for the case where `X < a` (i.e., a strongly connected
+ /// component in the graph). In that case, the smallest
+ /// representative of the SCC is returned (as determined by the
+ /// internal indices).
+ ///
+ /// Note that this set can, in principle, have any size.
+ pub fn minimal_upper_bounds(&self, a: T, b: T) -> Vec<T> {
+ let (Some(mut a), Some(mut b)) = (self.index(a), self.index(b)) else {
+ return vec![];
+ };
+
+ // in some cases, there are some arbitrary choices to be made;
+ // it doesn't really matter what we pick, as long as we pick
+ // the same thing consistently when queried, so ensure that
+ // (a, b) are in a consistent relative order
+ if a > b {
+ mem::swap(&mut a, &mut b);
+ }
+
+ let lub_indices = self.with_closure(|closure| {
+ // Easy case is when either a < b or b < a:
+ if closure.contains(a.0, b.0) {
+ return vec![b.0];
+ }
+ if closure.contains(b.0, a.0) {
+ return vec![a.0];
+ }
+
+ // Otherwise, the tricky part is that there may be some c
+ // where a < c and b < c. In fact, there may be many such
+ // values. So here is what we do:
+ //
+ // 1. Find the vector `[X | a < X && b < X]` of all values
+ // `X` where `a < X` and `b < X`. In terms of the
+ // graph, this means all values reachable from both `a`
+ // and `b`. Note that this vector is also a set, but we
+ // use the term vector because the order matters
+ // to the steps below.
+ // - This vector contains upper bounds, but they are
+ // not minimal upper bounds. So you may have e.g.
+ // `[x, y, tcx, z]` where `x < tcx` and `y < tcx` and
+ // `z < x` and `z < y`:
+ //
+ // z --+---> x ----+----> tcx
+ // | |
+ // | |
+ // +---> y ----+
+ //
+ // In this case, we really want to return just `[z]`.
+ // The following steps below achieve this by gradually
+ // reducing the list.
+ // 2. Pare down the vector using `pare_down`. This will
+ // remove elements from the vector that can be reached
+ // by an earlier element.
+ // - In the example above, this would convert `[x, y,
+ // tcx, z]` to `[x, y, z]`. Note that `x` and `y` are
+ // still in the vector; this is because while `z < x`
+ // (and `z < y`) holds, `z` comes after them in the
+ // vector.
+ // 3. Reverse the vector and repeat the pare down process.
+ // - In the example above, we would reverse to
+ // `[z, y, x]` and then pare down to `[z]`.
+ // 4. Reverse once more just so that we yield a vector in
+ // increasing order of index. Not necessary, but why not.
+ //
+ // I believe this algorithm yields a minimal set. The
+ // argument is that, after step 2, we know that no element
+ // can reach its successors (in the vector, not the graph).
+ // After step 3, we know that no element can reach any of
+ // its predecessors (because of step 2) nor successors
+ // (because we just called `pare_down`)
+ //
+ // This same algorithm is used in `parents` below.
+
+ let mut candidates = closure.intersect_rows(a.0, b.0); // (1)
+ pare_down(&mut candidates, closure); // (2)
+ candidates.reverse(); // (3a)
+ pare_down(&mut candidates, closure); // (3b)
+ candidates
+ });
+
+ lub_indices
+ .into_iter()
+ .rev() // (4)
+ .map(|i| self.elements[i])
+ .collect()
+ }
+
+ /// Given an element A, returns the maximal set {B} of elements B
+ /// such that
+ ///
+ /// - A != B
+ /// - A R B is true
+ /// - for each i, j: `B[i]` R `B[j]` does not hold
+ ///
+ /// The intuition is that this moves "one step up" through a lattice
+ /// (where the relation is encoding the `<=` relation for the lattice).
+ /// So e.g., if the relation is `->` and we have
+ ///
+ /// ```text
+ /// a -> b -> d -> f
+ /// | ^
+ /// +--> c -> e ---+
+ /// ```
+ ///
+ /// then `parents(a)` returns `[b, c]`. The `postdom_parent` function
+ /// would further reduce this to just `f`.
+ pub fn parents(&self, a: T) -> Vec<T> {
+ let Some(a) = self.index(a) else {
+ return vec![];
+ };
+
+ // Steal the algorithm for `minimal_upper_bounds` above, but
+ // with a slight tweak. In the case where `a R a`, we remove
+ // that from the set of candidates.
+ let ancestors = self.with_closure(|closure| {
+ let mut ancestors = closure.intersect_rows(a.0, a.0);
+
+ // Remove anything that can reach `a`. If this is a
+ // reflexive relation, this will include `a` itself.
+ ancestors.retain(|&e| !closure.contains(e, a.0));
+
+ pare_down(&mut ancestors, closure); // (2)
+ ancestors.reverse(); // (3a)
+ pare_down(&mut ancestors, closure); // (3b)
+ ancestors
+ });
+
+ ancestors
+ .into_iter()
+ .rev() // (4)
+ .map(|i| self.elements[i])
+ .collect()
+ }
+
+ fn with_closure<OP, R>(&self, op: OP) -> R
+ where
+ OP: FnOnce(&BitMatrix<usize, usize>) -> R,
+ {
+ let mut closure_cell = self.closure.borrow_mut();
+ let mut closure = closure_cell.take();
+ if closure.is_none() {
+ closure = Some(self.compute_closure());
+ }
+ let result = op(closure.as_ref().unwrap());
+ *closure_cell = closure;
+ result
+ }
+
+ fn compute_closure(&self) -> BitMatrix<usize, usize> {
+ let mut matrix = BitMatrix::new(self.elements.len(), self.elements.len());
+ let mut changed = true;
+ while changed {
+ changed = false;
+ for edge in &self.edges {
+ // add an edge from S -> T
+ changed |= matrix.insert(edge.source.0, edge.target.0);
+
+ // add all outgoing edges from T into S
+ changed |= matrix.union_rows(edge.target.0, edge.source.0);
+ }
+ }
+ matrix
+ }
+
+ /// Lists all the base edges in the graph: the initial _non-transitive_ set of element
+ /// relations, which will be later used as the basis for the transitive closure computation.
+ pub fn base_edges(&self) -> impl Iterator<Item = (T, T)> + '_ {
+ self.edges
+ .iter()
+ .map(move |edge| (self.elements[edge.source.0], self.elements[edge.target.0]))
+ }
+}
+
+/// Pare down is used as a step in the LUB computation. It edits the
+/// candidates array in place by removing any element j for which
+/// there exists an earlier element i<j such that i -> j. That is,
+/// after you run `pare_down`, you know that for all elements that
+/// remain in candidates, they cannot reach any of the elements that
+/// come after them.
+///
+/// Examples follow. Assume that a -> b -> c and x -> y -> z.
+///
+/// - Input: `[a, b, x]`. Output: `[a, x]`.
+/// - Input: `[b, a, x]`. Output: `[b, a, x]`.
+/// - Input: `[a, x, b, y]`. Output: `[a, x]`.
+fn pare_down(candidates: &mut Vec<usize>, closure: &BitMatrix<usize, usize>) {
+ let mut i = 0;
+ while let Some(&candidate_i) = candidates.get(i) {
+ i += 1;
+
+ let mut j = i;
+ let mut dead = 0;
+ while let Some(&candidate_j) = candidates.get(j) {
+ if closure.contains(candidate_i, candidate_j) {
+ // If `i` can reach `j`, then we can remove `j`. So just
+ // mark it as dead and move on; subsequent indices will be
+ // shifted into its place.
+ dead += 1;
+ } else {
+ candidates[j - dead] = candidate_j;
+ }
+ j += 1;
+ }
+ candidates.truncate(j - dead);
+ }
+}
diff --git a/compiler/rustc_data_structures/src/transitive_relation/tests.rs b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
new file mode 100644
index 000000000..e1f4c7ee0
--- /dev/null
+++ b/compiler/rustc_data_structures/src/transitive_relation/tests.rs
@@ -0,0 +1,362 @@
+use super::*;
+
+impl<T: Eq + Hash + Copy> TransitiveRelation<T> {
+ /// A "best" parent in some sense. See `parents` and
+ /// `postdom_upper_bound` for more details.
+ fn postdom_parent(&self, a: T) -> Option<T> {
+ self.mutual_immediate_postdominator(self.parents(a))
+ }
+}
+
+#[test]
+fn test_one_step() {
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "b");
+ relation.add("a", "c");
+ assert!(relation.contains("a", "c"));
+ assert!(relation.contains("a", "b"));
+ assert!(!relation.contains("b", "a"));
+ assert!(!relation.contains("a", "d"));
+}
+
+#[test]
+fn test_many_steps() {
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "b");
+ relation.add("a", "c");
+ relation.add("a", "f");
+
+ relation.add("b", "c");
+ relation.add("b", "d");
+ relation.add("b", "e");
+
+ relation.add("e", "g");
+
+ assert!(relation.contains("a", "b"));
+ assert!(relation.contains("a", "c"));
+ assert!(relation.contains("a", "d"));
+ assert!(relation.contains("a", "e"));
+ assert!(relation.contains("a", "f"));
+ assert!(relation.contains("a", "g"));
+
+ assert!(relation.contains("b", "g"));
+
+ assert!(!relation.contains("a", "x"));
+ assert!(!relation.contains("b", "f"));
+}
+
+#[test]
+fn mubs_triangle() {
+ // a -> tcx
+ // ^
+ // |
+ // b
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "tcx");
+ relation.add("b", "tcx");
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["tcx"]);
+ assert_eq!(relation.parents("a"), vec!["tcx"]);
+ assert_eq!(relation.parents("b"), vec!["tcx"]);
+}
+
+#[test]
+fn mubs_best_choice1() {
+ // 0 -> 1 <- 3
+ // | ^ |
+ // | | |
+ // +--> 2 <--+
+ //
+ // mubs(0,3) = [1]
+
+ // This tests a particular state in the algorithm, in which we
+ // need the second pare down call to get the right result (after
+ // intersection, we have [1, 2], but 2 -> 1).
+
+ let mut relation = TransitiveRelation::default();
+ relation.add("0", "1");
+ relation.add("0", "2");
+
+ relation.add("2", "1");
+
+ relation.add("3", "1");
+ relation.add("3", "2");
+
+ assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["2"]);
+ assert_eq!(relation.parents("0"), vec!["2"]);
+ assert_eq!(relation.parents("2"), vec!["1"]);
+ assert!(relation.parents("1").is_empty());
+}
+
+#[test]
+fn mubs_best_choice2() {
+ // 0 -> 1 <- 3
+ // | | |
+ // | v |
+ // +--> 2 <--+
+ //
+ // mubs(0,3) = [2]
+
+ // Like the preceding test, but in this case intersection is [2,
+ // 1], and hence we rely on the first pare down call.
+
+ let mut relation = TransitiveRelation::default();
+ relation.add("0", "1");
+ relation.add("0", "2");
+
+ relation.add("1", "2");
+
+ relation.add("3", "1");
+ relation.add("3", "2");
+
+ assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1"]);
+ assert_eq!(relation.parents("0"), vec!["1"]);
+ assert_eq!(relation.parents("1"), vec!["2"]);
+ assert!(relation.parents("2").is_empty());
+}
+
+#[test]
+fn mubs_no_best_choice() {
+ // in this case, the intersection yields [1, 2], and the "pare
+ // down" calls find nothing to remove.
+ let mut relation = TransitiveRelation::default();
+ relation.add("0", "1");
+ relation.add("0", "2");
+
+ relation.add("3", "1");
+ relation.add("3", "2");
+
+ assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1", "2"]);
+ assert_eq!(relation.parents("0"), vec!["1", "2"]);
+ assert_eq!(relation.parents("3"), vec!["1", "2"]);
+}
+
+#[test]
+fn mubs_best_choice_scc() {
+ // in this case, 1 and 2 form a cycle; we pick arbitrarily (but
+ // consistently).
+
+ let mut relation = TransitiveRelation::default();
+ relation.add("0", "1");
+ relation.add("0", "2");
+
+ relation.add("1", "2");
+ relation.add("2", "1");
+
+ relation.add("3", "1");
+ relation.add("3", "2");
+
+ assert_eq!(relation.minimal_upper_bounds("0", "3"), vec!["1"]);
+ assert_eq!(relation.parents("0"), vec!["1"]);
+}
+
+#[test]
+fn pdub_crisscross() {
+ // diagonal edges run left-to-right
+ // a -> a1 -> x
+ // \/ ^
+ // /\ |
+ // b -> b1 ---+
+
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "a1");
+ relation.add("a", "b1");
+ relation.add("b", "a1");
+ relation.add("b", "b1");
+ relation.add("a1", "x");
+ relation.add("b1", "x");
+
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["a1", "b1"]);
+ assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
+ assert_eq!(relation.postdom_parent("a"), Some("x"));
+ assert_eq!(relation.postdom_parent("b"), Some("x"));
+}
+
+#[test]
+fn pdub_crisscross_more() {
+ // diagonal edges run left-to-right
+ // a -> a1 -> a2 -> a3 -> x
+ // \/ \/ ^
+ // /\ /\ |
+ // b -> b1 -> b2 ---------+
+
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "a1");
+ relation.add("a", "b1");
+ relation.add("b", "a1");
+ relation.add("b", "b1");
+
+ relation.add("a1", "a2");
+ relation.add("a1", "b2");
+ relation.add("b1", "a2");
+ relation.add("b1", "b2");
+
+ relation.add("a2", "a3");
+
+ relation.add("a3", "x");
+ relation.add("b2", "x");
+
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["a1", "b1"]);
+ assert_eq!(relation.minimal_upper_bounds("a1", "b1"), vec!["a2", "b2"]);
+ assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
+
+ assert_eq!(relation.postdom_parent("a"), Some("x"));
+ assert_eq!(relation.postdom_parent("b"), Some("x"));
+}
+
+#[test]
+fn pdub_lub() {
+ // a -> a1 -> x
+ // ^
+ // |
+ // b -> b1 ---+
+
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "a1");
+ relation.add("b", "b1");
+ relation.add("a1", "x");
+ relation.add("b1", "x");
+
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["x"]);
+ assert_eq!(relation.postdom_upper_bound("a", "b"), Some("x"));
+
+ assert_eq!(relation.postdom_parent("a"), Some("a1"));
+ assert_eq!(relation.postdom_parent("b"), Some("b1"));
+ assert_eq!(relation.postdom_parent("a1"), Some("x"));
+ assert_eq!(relation.postdom_parent("b1"), Some("x"));
+}
+
+#[test]
+fn mubs_intermediate_node_on_one_side_only() {
+ // a -> c -> d
+ // ^
+ // |
+ // b
+
+ // "digraph { a -> c -> d; b -> d; }",
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("b", "d");
+
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["d"]);
+}
+
+#[test]
+fn mubs_scc_1() {
+ // +-------------+
+ // | +----+ |
+ // | v | |
+ // a -> c -> d <-+
+ // ^
+ // |
+ // b
+
+ // "digraph { a -> c -> d; d -> c; a -> d; b -> d; }",
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("d", "c");
+ relation.add("a", "d");
+ relation.add("b", "d");
+
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
+}
+
+#[test]
+fn mubs_scc_2() {
+ // +----+
+ // v |
+ // a -> c -> d
+ // ^ ^
+ // | |
+ // +--- b
+
+ // "digraph { a -> c -> d; d -> c; b -> d; b -> c; }",
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("d", "c");
+ relation.add("b", "d");
+ relation.add("b", "c");
+
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
+}
+
+#[test]
+fn mubs_scc_3() {
+ // +---------+
+ // v |
+ // a -> c -> d -> e
+ // ^ ^
+ // | |
+ // b ---+
+
+ // "digraph { a -> c -> d -> e -> c; b -> d; b -> e; }",
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("d", "e");
+ relation.add("e", "c");
+ relation.add("b", "d");
+ relation.add("b", "e");
+
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
+}
+
+#[test]
+fn mubs_scc_4() {
+ // +---------+
+ // v |
+ // a -> c -> d -> e
+ // | ^ ^
+ // +---------+ |
+ // |
+ // b ---+
+
+ // "digraph { a -> c -> d -> e -> c; a -> d; b -> e; }"
+ let mut relation = TransitiveRelation::default();
+ relation.add("a", "c");
+ relation.add("c", "d");
+ relation.add("d", "e");
+ relation.add("e", "c");
+ relation.add("a", "d");
+ relation.add("b", "e");
+
+ assert_eq!(relation.minimal_upper_bounds("a", "b"), vec!["c"]);
+}
+
+#[test]
+fn parent() {
+ // An example that was misbehaving in the compiler.
+ //
+ // 4 -> 1 -> 3
+ // \ | /
+ // \ v /
+ // 2 -> 0
+ //
+ // plus a bunch of self-loops
+ //
+ // Here `->` represents `<=` and `0` is `'static`.
+
+ let pairs = vec![
+ (2, /*->*/ 0),
+ (2, /*->*/ 2),
+ (0, /*->*/ 0),
+ (0, /*->*/ 0),
+ (1, /*->*/ 0),
+ (1, /*->*/ 1),
+ (3, /*->*/ 0),
+ (3, /*->*/ 3),
+ (4, /*->*/ 0),
+ (4, /*->*/ 1),
+ (1, /*->*/ 3),
+ ];
+
+ let mut relation = TransitiveRelation::default();
+ for (a, b) in pairs {
+ relation.add(a, b);
+ }
+
+ let p = relation.postdom_parent(3);
+ assert_eq!(p, Some(0));
+}
diff --git a/compiler/rustc_data_structures/src/unhash.rs b/compiler/rustc_data_structures/src/unhash.rs
new file mode 100644
index 000000000..48e21a9da
--- /dev/null
+++ b/compiler/rustc_data_structures/src/unhash.rs
@@ -0,0 +1,29 @@
+use std::collections::{HashMap, HashSet};
+use std::hash::{BuildHasherDefault, Hasher};
+
+pub type UnhashMap<K, V> = HashMap<K, V, BuildHasherDefault<Unhasher>>;
+pub type UnhashSet<V> = HashSet<V, BuildHasherDefault<Unhasher>>;
+
+/// This no-op hasher expects only a single `write_u64` call. It's intended for
+/// map keys that already have hash-like quality, like `Fingerprint`.
+#[derive(Default)]
+pub struct Unhasher {
+ value: u64,
+}
+
+impl Hasher for Unhasher {
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.value
+ }
+
+ fn write(&mut self, _bytes: &[u8]) {
+ unimplemented!("use write_u64");
+ }
+
+ #[inline]
+ fn write_u64(&mut self, value: u64) {
+ debug_assert_eq!(0, self.value, "Unhasher doesn't mix values!");
+ self.value = value;
+ }
+}
diff --git a/compiler/rustc_data_structures/src/vec_linked_list.rs b/compiler/rustc_data_structures/src/vec_linked_list.rs
new file mode 100644
index 000000000..ce60d40b2
--- /dev/null
+++ b/compiler/rustc_data_structures/src/vec_linked_list.rs
@@ -0,0 +1,70 @@
+use rustc_index::vec::{Idx, IndexVec};
+
+pub fn iter<Ls>(
+ first: Option<Ls::LinkIndex>,
+ links: &Ls,
+) -> impl Iterator<Item = Ls::LinkIndex> + '_
+where
+ Ls: Links,
+{
+ VecLinkedListIterator { links, current: first }
+}
+
+pub struct VecLinkedListIterator<Ls>
+where
+ Ls: Links,
+{
+ links: Ls,
+ current: Option<Ls::LinkIndex>,
+}
+
+impl<Ls> Iterator for VecLinkedListIterator<Ls>
+where
+ Ls: Links,
+{
+ type Item = Ls::LinkIndex;
+
+ fn next(&mut self) -> Option<Ls::LinkIndex> {
+ if let Some(c) = self.current {
+ self.current = <Ls as Links>::next(&self.links, c);
+ Some(c)
+ } else {
+ None
+ }
+ }
+}
+
+pub trait Links {
+ type LinkIndex: Copy;
+
+ fn next(links: &Self, index: Self::LinkIndex) -> Option<Self::LinkIndex>;
+}
+
+impl<Ls> Links for &Ls
+where
+ Ls: Links,
+{
+ type LinkIndex = Ls::LinkIndex;
+
+ fn next(links: &Self, index: Ls::LinkIndex) -> Option<Ls::LinkIndex> {
+ <Ls as Links>::next(links, index)
+ }
+}
+
+pub trait LinkElem {
+ type LinkIndex: Copy;
+
+ fn next(elem: &Self) -> Option<Self::LinkIndex>;
+}
+
+impl<L, E> Links for IndexVec<L, E>
+where
+ E: LinkElem<LinkIndex = L>,
+ L: Idx,
+{
+ type LinkIndex = L;
+
+ fn next(links: &Self, index: L) -> Option<L> {
+ <E as LinkElem>::next(&links[index])
+ }
+}
diff --git a/compiler/rustc_data_structures/src/vec_map.rs b/compiler/rustc_data_structures/src/vec_map.rs
new file mode 100644
index 000000000..86be0bd87
--- /dev/null
+++ b/compiler/rustc_data_structures/src/vec_map.rs
@@ -0,0 +1,194 @@
+use std::borrow::Borrow;
+use std::fmt::Debug;
+use std::iter::FromIterator;
+use std::slice::Iter;
+use std::vec::IntoIter;
+
+use crate::stable_hasher::{HashStable, StableHasher};
+
+/// A map type implemented as a vector of pairs `K` (key) and `V` (value).
+/// It currently provides a subset of all the map operations, the rest could be added as needed.
+#[derive(Clone, Encodable, Decodable, Debug)]
+pub struct VecMap<K, V>(Vec<(K, V)>);
+
+impl<K, V> VecMap<K, V>
+where
+ K: Debug + PartialEq,
+ V: Debug,
+{
+ pub fn new() -> Self {
+ VecMap(Default::default())
+ }
+
+ /// Sets the value of the entry, and returns the entry's old value.
+ pub fn insert(&mut self, k: K, v: V) -> Option<V> {
+ if let Some(elem) = self.0.iter_mut().find(|(key, _)| *key == k) {
+ Some(std::mem::replace(&mut elem.1, v))
+ } else {
+ self.0.push((k, v));
+ None
+ }
+ }
+
+ /// Removes the entry from the map and returns the removed value
+ pub fn remove(&mut self, k: &K) -> Option<V> {
+ self.0.iter().position(|(k2, _)| k2 == k).map(|pos| self.0.remove(pos).1)
+ }
+
+ /// Gets a reference to the value in the entry.
+ pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
+ where
+ K: Borrow<Q>,
+ Q: Eq,
+ {
+ self.0.iter().find(|(key, _)| k == key.borrow()).map(|elem| &elem.1)
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
+ where
+ K: Borrow<Q>,
+ Q: Eq,
+ {
+ self.0.iter_mut().find(|(key, _)| k == key.borrow()).map(|elem| &mut elem.1)
+ }
+
+ /// Returns the any value corresponding to the supplied predicate filter.
+ ///
+ /// The supplied predicate will be applied to each (key, value) pair and it will return a
+ /// reference to the values where the predicate returns `true`.
+ pub fn any_value_matching(&self, mut predicate: impl FnMut(&(K, V)) -> bool) -> Option<&V> {
+ self.0.iter().find(|kv| predicate(kv)).map(|elem| &elem.1)
+ }
+
+ /// Returns the value corresponding to the supplied predicate filter. It crashes if there's
+ /// more than one matching element.
+ ///
+ /// The supplied predicate will be applied to each (key, value) pair and it will return a
+ /// reference to the value where the predicate returns `true`.
+ pub fn get_value_matching(&self, mut predicate: impl FnMut(&(K, V)) -> bool) -> Option<&V> {
+ let mut filter = self.0.iter().filter(|kv| predicate(kv));
+ let (_, value) = filter.next()?;
+ // This should return just one element, otherwise it's a bug
+ assert!(
+ filter.next().is_none(),
+ "Collection {:#?} should have just one matching element",
+ self
+ );
+ Some(value)
+ }
+
+ /// Returns `true` if the map contains a value for the specified key.
+ ///
+ /// The key may be any borrowed form of the map's key type,
+ /// [`Eq`] on the borrowed form *must* match those for
+ /// the key type.
+ pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
+ where
+ K: Borrow<Q>,
+ Q: Eq,
+ {
+ self.get(k).is_some()
+ }
+
+ /// Returns `true` if the map contains no elements.
+ pub fn is_empty(&self) -> bool {
+ self.0.is_empty()
+ }
+
+ pub fn iter(&self) -> Iter<'_, (K, V)> {
+ self.into_iter()
+ }
+
+ pub fn iter_mut(&mut self) -> impl Iterator<Item = (&K, &mut V)> {
+ self.into_iter()
+ }
+
+ pub fn retain(&mut self, f: impl Fn(&(K, V)) -> bool) {
+ self.0.retain(f)
+ }
+}
+
+impl<K, V> Default for VecMap<K, V> {
+ #[inline]
+ fn default() -> Self {
+ Self(Default::default())
+ }
+}
+
+impl<K, V> From<Vec<(K, V)>> for VecMap<K, V> {
+ fn from(vec: Vec<(K, V)>) -> Self {
+ Self(vec)
+ }
+}
+
+impl<K, V> Into<Vec<(K, V)>> for VecMap<K, V> {
+ fn into(self) -> Vec<(K, V)> {
+ self.0
+ }
+}
+
+impl<K, V> FromIterator<(K, V)> for VecMap<K, V> {
+ fn from_iter<I: IntoIterator<Item = (K, V)>>(iter: I) -> Self {
+ Self(iter.into_iter().collect())
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a VecMap<K, V> {
+ type Item = &'a (K, V);
+ type IntoIter = Iter<'a, (K, V)>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.0.iter()
+ }
+}
+
+impl<'a, K: 'a, V: 'a> IntoIterator for &'a mut VecMap<K, V> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = impl Iterator<Item = Self::Item>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.0.iter_mut().map(|(k, v)| (&*k, v))
+ }
+}
+
+impl<K, V> IntoIterator for VecMap<K, V> {
+ type Item = (K, V);
+ type IntoIter = IntoIter<(K, V)>;
+
+ #[inline]
+ fn into_iter(self) -> Self::IntoIter {
+ self.0.into_iter()
+ }
+}
+
+impl<K: PartialEq + Debug, V: Debug> Extend<(K, V)> for VecMap<K, V> {
+ fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, iter: I) {
+ for (k, v) in iter {
+ self.insert(k, v);
+ }
+ }
+
+ fn extend_one(&mut self, (k, v): (K, V)) {
+ self.insert(k, v);
+ }
+
+ fn extend_reserve(&mut self, additional: usize) {
+ self.0.extend_reserve(additional);
+ }
+}
+
+impl<K, V, CTX> HashStable<CTX> for VecMap<K, V>
+where
+ K: HashStable<CTX> + Eq,
+ V: HashStable<CTX>,
+{
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.0.hash_stable(hcx, hasher)
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_data_structures/src/vec_map/tests.rs b/compiler/rustc_data_structures/src/vec_map/tests.rs
new file mode 100644
index 000000000..458b60077
--- /dev/null
+++ b/compiler/rustc_data_structures/src/vec_map/tests.rs
@@ -0,0 +1,48 @@
+use super::*;
+
+impl<K, V> VecMap<K, V> {
+ fn into_vec(self) -> Vec<(K, V)> {
+ self.0.into()
+ }
+}
+
+#[test]
+fn test_from_iterator() {
+ assert_eq!(
+ std::iter::empty().collect::<VecMap<i32, bool>>().into_vec(),
+ Vec::<(i32, bool)>::new()
+ );
+ assert_eq!(std::iter::once((42, true)).collect::<VecMap<_, _>>().into_vec(), vec![(42, true)]);
+ assert_eq!(
+ [(1, true), (2, false)].into_iter().collect::<VecMap<_, _>>().into_vec(),
+ vec![(1, true), (2, false)]
+ );
+}
+
+#[test]
+fn test_into_iterator_owned() {
+ assert_eq!(VecMap::new().into_iter().collect::<Vec<(i32, bool)>>(), Vec::<(i32, bool)>::new());
+ assert_eq!(VecMap::from(vec![(1, true)]).into_iter().collect::<Vec<_>>(), vec![(1, true)]);
+ assert_eq!(
+ VecMap::from(vec![(1, true), (2, false)]).into_iter().collect::<Vec<_>>(),
+ vec![(1, true), (2, false)]
+ );
+}
+
+#[test]
+fn test_insert() {
+ let mut v = VecMap::new();
+ assert_eq!(v.insert(1, true), None);
+ assert_eq!(v.insert(2, false), None);
+ assert_eq!(v.clone().into_vec(), vec![(1, true), (2, false)]);
+ assert_eq!(v.insert(1, false), Some(true));
+ assert_eq!(v.into_vec(), vec![(1, false), (2, false)]);
+}
+
+#[test]
+fn test_get() {
+ let v = [(1, true), (2, false)].into_iter().collect::<VecMap<_, _>>();
+ assert_eq!(v.get(&1), Some(&true));
+ assert_eq!(v.get(&2), Some(&false));
+ assert_eq!(v.get(&3), None);
+}
diff --git a/compiler/rustc_data_structures/src/work_queue.rs b/compiler/rustc_data_structures/src/work_queue.rs
new file mode 100644
index 000000000..10317f1af
--- /dev/null
+++ b/compiler/rustc_data_structures/src/work_queue.rs
@@ -0,0 +1,44 @@
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::Idx;
+use std::collections::VecDeque;
+
+/// A work queue is a handy data structure for tracking work left to
+/// do. (For example, basic blocks left to process.) It is basically a
+/// de-duplicating queue; so attempting to insert X if X is already
+/// enqueued has no effect. This implementation assumes that the
+/// elements are dense indices, so it can allocate the queue to size
+/// and also use a bit set to track occupancy.
+pub struct WorkQueue<T: Idx> {
+ deque: VecDeque<T>,
+ set: BitSet<T>,
+}
+
+impl<T: Idx> WorkQueue<T> {
+ /// Creates a new work queue that starts empty, where elements range from (0..len).
+ #[inline]
+ pub fn with_none(len: usize) -> Self {
+ WorkQueue { deque: VecDeque::with_capacity(len), set: BitSet::new_empty(len) }
+ }
+
+ /// Attempt to enqueue `element` in the work queue. Returns false if it was already present.
+ #[inline]
+ pub fn insert(&mut self, element: T) -> bool {
+ if self.set.insert(element) {
+ self.deque.push_back(element);
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Attempt to pop an element from the work queue.
+ #[inline]
+ pub fn pop(&mut self) -> Option<T> {
+ if let Some(element) = self.deque.pop_front() {
+ self.set.remove(element);
+ Some(element)
+ } else {
+ None
+ }
+ }
+}
diff --git a/compiler/rustc_driver/Cargo.toml b/compiler/rustc_driver/Cargo.toml
new file mode 100644
index 000000000..08d5d4f34
--- /dev/null
+++ b/compiler/rustc_driver/Cargo.toml
@@ -0,0 +1,44 @@
+[package]
+name = "rustc_driver"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+crate-type = ["dylib"]
+
+[dependencies]
+tracing = { version = "0.1.28" }
+serde_json = "1.0.59"
+rustc_log = { path = "../rustc_log" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_target = { path = "../rustc_target" }
+rustc_lint = { path = "../rustc_lint" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_hir_pretty = { path = "../rustc_hir_pretty" }
+rustc_metadata = { path = "../rustc_metadata" }
+rustc_parse = { path = "../rustc_parse" }
+rustc_plugin_impl = { path = "../rustc_plugin_impl" }
+rustc_save_analysis = { path = "../rustc_save_analysis" }
+rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
+rustc_session = { path = "../rustc_session" }
+rustc_error_codes = { path = "../rustc_error_codes" }
+rustc_interface = { path = "../rustc_interface" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+rustc_typeck = { path = "../rustc_typeck" }
+
+[target.'cfg(unix)'.dependencies]
+libc = "0.2"
+
+[target.'cfg(windows)'.dependencies]
+winapi = { version = "0.3", features = ["consoleapi", "debugapi", "processenv"] }
+
+[features]
+llvm = ['rustc_interface/llvm']
+max_level_info = ['rustc_log/max_level_info']
+rustc_use_parallel_compiler = ['rustc_data_structures/rustc_use_parallel_compiler', 'rustc_interface/rustc_use_parallel_compiler',
+ 'rustc_middle/rustc_use_parallel_compiler']
diff --git a/compiler/rustc_driver/README.md b/compiler/rustc_driver/README.md
new file mode 100644
index 000000000..37dc7f6ba
--- /dev/null
+++ b/compiler/rustc_driver/README.md
@@ -0,0 +1,10 @@
+The `driver` crate is effectively the "main" function for the rust
+compiler. It orchestrates the compilation process and "knits together"
+the code from the other crates within rustc. This crate itself does
+not contain any of the "main logic" of the compiler (though it does
+have some code related to pretty printing or other minor compiler
+options).
+
+For more information about how the driver works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/rustc-driver.html
diff --git a/compiler/rustc_driver/src/args.rs b/compiler/rustc_driver/src/args.rs
new file mode 100644
index 000000000..01338359f
--- /dev/null
+++ b/compiler/rustc_driver/src/args.rs
@@ -0,0 +1,51 @@
+use std::error;
+use std::fmt;
+use std::fs;
+use std::io;
+
+fn arg_expand(arg: String) -> Result<Vec<String>, Error> {
+ if let Some(path) = arg.strip_prefix('@') {
+ let file = match fs::read_to_string(path) {
+ Ok(file) => file,
+ Err(ref err) if err.kind() == io::ErrorKind::InvalidData => {
+ return Err(Error::Utf8Error(Some(path.to_string())));
+ }
+ Err(err) => return Err(Error::IOError(path.to_string(), err)),
+ };
+ Ok(file.lines().map(ToString::to_string).collect())
+ } else {
+ Ok(vec![arg])
+ }
+}
+
+pub fn arg_expand_all(at_args: &[String]) -> Vec<String> {
+ let mut args = Vec::new();
+ for arg in at_args {
+ match arg_expand(arg.clone()) {
+ Ok(arg) => args.extend(arg),
+ Err(err) => rustc_session::early_error(
+ rustc_session::config::ErrorOutputType::default(),
+ &format!("Failed to load argument file: {}", err),
+ ),
+ }
+ }
+ args
+}
+
+#[derive(Debug)]
+pub enum Error {
+ Utf8Error(Option<String>),
+ IOError(String, io::Error),
+}
+
+impl fmt::Display for Error {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Error::Utf8Error(None) => write!(fmt, "Utf8 error"),
+ Error::Utf8Error(Some(path)) => write!(fmt, "Utf8 error in {}", path),
+ Error::IOError(path, err) => write!(fmt, "IO Error: {}: {}", path, err),
+ }
+ }
+}
+
+impl error::Error for Error {}
diff --git a/compiler/rustc_driver/src/lib.rs b/compiler/rustc_driver/src/lib.rs
new file mode 100644
index 000000000..53ae913f9
--- /dev/null
+++ b/compiler/rustc_driver/src/lib.rs
@@ -0,0 +1,1340 @@
+//! The Rust compiler.
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(let_else)]
+#![feature(once_cell)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+
+pub extern crate rustc_plugin_impl as plugin;
+
+use rustc_ast as ast;
+use rustc_codegen_ssa::{traits::CodegenBackend, CodegenResults};
+use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
+use rustc_data_structures::sync::SeqCst;
+use rustc_errors::registry::{InvalidErrorCode, Registry};
+use rustc_errors::{ErrorGuaranteed, PResult};
+use rustc_feature::find_gated_cfg;
+use rustc_interface::util::{self, collect_crate_types, get_codegen_backend};
+use rustc_interface::{interface, Queries};
+use rustc_lint::LintStore;
+use rustc_log::stdout_isatty;
+use rustc_metadata::locator;
+use rustc_save_analysis as save;
+use rustc_save_analysis::DumpHandler;
+use rustc_session::config::{nightly_options, CG_OPTIONS, Z_OPTIONS};
+use rustc_session::config::{ErrorOutputType, Input, OutputType, PrintRequest, TrimmedDefPaths};
+use rustc_session::cstore::MetadataLoader;
+use rustc_session::getopts;
+use rustc_session::lint::{Lint, LintId};
+use rustc_session::{config, DiagnosticOutput, Session};
+use rustc_session::{early_error, early_error_no_abort, early_warn};
+use rustc_span::source_map::{FileLoader, FileName};
+use rustc_span::symbol::sym;
+use rustc_target::json::ToJson;
+
+use std::borrow::Cow;
+use std::cmp::max;
+use std::default::Default;
+use std::env;
+use std::ffi::OsString;
+use std::fs;
+use std::io::{self, Read, Write};
+use std::panic::{self, catch_unwind};
+use std::path::PathBuf;
+use std::process::{self, Command, Stdio};
+use std::str;
+use std::sync::LazyLock;
+use std::time::Instant;
+
+pub mod args;
+pub mod pretty;
+
+/// Exit status code used for successful compilation and help output.
+pub const EXIT_SUCCESS: i32 = 0;
+
+/// Exit status code used for compilation failures and invalid flags.
+pub const EXIT_FAILURE: i32 = 1;
+
+const BUG_REPORT_URL: &str = "https://github.com/rust-lang/rust/issues/new\
+ ?labels=C-bug%2C+I-ICE%2C+T-compiler&template=ice.md";
+
+const ICE_REPORT_COMPILER_FLAGS: &[&str] = &["-Z", "-C", "--crate-type"];
+
+const ICE_REPORT_COMPILER_FLAGS_EXCLUDE: &[&str] = &["metadata", "extra-filename"];
+
+const ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE: &[&str] = &["incremental"];
+
+pub fn abort_on_err<T>(result: Result<T, ErrorGuaranteed>, sess: &Session) -> T {
+ match result {
+ Err(..) => {
+ sess.abort_if_errors();
+ panic!("error reported but abort_if_errors didn't abort???");
+ }
+ Ok(x) => x,
+ }
+}
+
+pub trait Callbacks {
+ /// Called before creating the compiler instance
+ fn config(&mut self, _config: &mut interface::Config) {}
+ /// Called after parsing. Return value instructs the compiler whether to
+ /// continue the compilation afterwards (defaults to `Compilation::Continue`)
+ fn after_parsing<'tcx>(
+ &mut self,
+ _compiler: &interface::Compiler,
+ _queries: &'tcx Queries<'tcx>,
+ ) -> Compilation {
+ Compilation::Continue
+ }
+ /// Called after expansion. Return value instructs the compiler whether to
+ /// continue the compilation afterwards (defaults to `Compilation::Continue`)
+ fn after_expansion<'tcx>(
+ &mut self,
+ _compiler: &interface::Compiler,
+ _queries: &'tcx Queries<'tcx>,
+ ) -> Compilation {
+ Compilation::Continue
+ }
+ /// Called after analysis. Return value instructs the compiler whether to
+ /// continue the compilation afterwards (defaults to `Compilation::Continue`)
+ fn after_analysis<'tcx>(
+ &mut self,
+ _compiler: &interface::Compiler,
+ _queries: &'tcx Queries<'tcx>,
+ ) -> Compilation {
+ Compilation::Continue
+ }
+}
+
+#[derive(Default)]
+pub struct TimePassesCallbacks {
+ time_passes: bool,
+}
+
+impl Callbacks for TimePassesCallbacks {
+ fn config(&mut self, config: &mut interface::Config) {
+ // If a --prints=... option has been given, we don't print the "total"
+ // time because it will mess up the --prints output. See #64339.
+ self.time_passes = config.opts.prints.is_empty() && config.opts.time_passes();
+ config.opts.trimmed_def_paths = TrimmedDefPaths::GoodPath;
+ }
+}
+
+pub fn diagnostics_registry() -> Registry {
+ Registry::new(rustc_error_codes::DIAGNOSTICS)
+}
+
+/// This is the primary entry point for rustc.
+pub struct RunCompiler<'a, 'b> {
+ at_args: &'a [String],
+ callbacks: &'b mut (dyn Callbacks + Send),
+ file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
+ emitter: Option<Box<dyn Write + Send>>,
+ make_codegen_backend:
+ Option<Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>>,
+}
+
+impl<'a, 'b> RunCompiler<'a, 'b> {
+ pub fn new(at_args: &'a [String], callbacks: &'b mut (dyn Callbacks + Send)) -> Self {
+ Self { at_args, callbacks, file_loader: None, emitter: None, make_codegen_backend: None }
+ }
+
+ /// Set a custom codegen backend.
+ ///
+ /// Used by cg_clif.
+ pub fn set_make_codegen_backend(
+ &mut self,
+ make_codegen_backend: Option<
+ Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>,
+ >,
+ ) -> &mut Self {
+ self.make_codegen_backend = make_codegen_backend;
+ self
+ }
+
+ /// Emit diagnostics to the specified location.
+ ///
+ /// Used by RLS.
+ pub fn set_emitter(&mut self, emitter: Option<Box<dyn Write + Send>>) -> &mut Self {
+ self.emitter = emitter;
+ self
+ }
+
+ /// Load files from sources other than the file system.
+ ///
+ /// Used by RLS.
+ pub fn set_file_loader(
+ &mut self,
+ file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
+ ) -> &mut Self {
+ self.file_loader = file_loader;
+ self
+ }
+
+ /// Parse args and run the compiler.
+ pub fn run(self) -> interface::Result<()> {
+ run_compiler(
+ self.at_args,
+ self.callbacks,
+ self.file_loader,
+ self.emitter,
+ self.make_codegen_backend,
+ )
+ }
+}
+fn run_compiler(
+ at_args: &[String],
+ callbacks: &mut (dyn Callbacks + Send),
+ file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
+ emitter: Option<Box<dyn Write + Send>>,
+ make_codegen_backend: Option<
+ Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>,
+ >,
+) -> interface::Result<()> {
+ let args = args::arg_expand_all(at_args);
+
+ let diagnostic_output = emitter.map_or(DiagnosticOutput::Default, DiagnosticOutput::Raw);
+ let Some(matches) = handle_options(&args) else { return Ok(()) };
+
+ let sopts = config::build_session_options(&matches);
+
+ if let Some(ref code) = matches.opt_str("explain") {
+ handle_explain(diagnostics_registry(), code, sopts.error_format);
+ return Ok(());
+ }
+
+ let cfg = interface::parse_cfgspecs(matches.opt_strs("cfg"));
+ let check_cfg = interface::parse_check_cfg(matches.opt_strs("check-cfg"));
+ let (odir, ofile) = make_output(&matches);
+ let mut config = interface::Config {
+ opts: sopts,
+ crate_cfg: cfg,
+ crate_check_cfg: check_cfg,
+ input: Input::File(PathBuf::new()),
+ input_path: None,
+ output_file: ofile,
+ output_dir: odir,
+ file_loader,
+ diagnostic_output,
+ lint_caps: Default::default(),
+ parse_sess_created: None,
+ register_lints: None,
+ override_queries: None,
+ make_codegen_backend,
+ registry: diagnostics_registry(),
+ };
+
+ match make_input(config.opts.error_format, &matches.free) {
+ Err(reported) => return Err(reported),
+ Ok(Some((input, input_file_path))) => {
+ config.input = input;
+ config.input_path = input_file_path;
+
+ callbacks.config(&mut config);
+ }
+ Ok(None) => match matches.free.len() {
+ 0 => {
+ callbacks.config(&mut config);
+ interface::run_compiler(config, |compiler| {
+ let sopts = &compiler.session().opts;
+ if sopts.describe_lints {
+ let mut lint_store = rustc_lint::new_lint_store(
+ sopts.unstable_opts.no_interleave_lints,
+ compiler.session().enable_internal_lints(),
+ );
+ let registered_lints =
+ if let Some(register_lints) = compiler.register_lints() {
+ register_lints(compiler.session(), &mut lint_store);
+ true
+ } else {
+ false
+ };
+ describe_lints(compiler.session(), &lint_store, registered_lints);
+ return;
+ }
+ let should_stop = print_crate_info(
+ &***compiler.codegen_backend(),
+ compiler.session(),
+ None,
+ compiler.output_dir(),
+ compiler.output_file(),
+ compiler.temps_dir(),
+ );
+
+ if should_stop == Compilation::Stop {
+ return;
+ }
+ early_error(sopts.error_format, "no input filename given")
+ });
+ return Ok(());
+ }
+ 1 => panic!("make_input should have provided valid inputs"),
+ _ => early_error(
+ config.opts.error_format,
+ &format!(
+ "multiple input filenames provided (first two filenames are `{}` and `{}`)",
+ matches.free[0], matches.free[1],
+ ),
+ ),
+ },
+ };
+
+ interface::run_compiler(config, |compiler| {
+ let sess = compiler.session();
+ let should_stop = print_crate_info(
+ &***compiler.codegen_backend(),
+ sess,
+ Some(compiler.input()),
+ compiler.output_dir(),
+ compiler.output_file(),
+ compiler.temps_dir(),
+ )
+ .and_then(|| {
+ list_metadata(sess, &*compiler.codegen_backend().metadata_loader(), compiler.input())
+ })
+ .and_then(|| try_process_rlink(sess, compiler));
+
+ if should_stop == Compilation::Stop {
+ return sess.compile_status();
+ }
+
+ let linker = compiler.enter(|queries| {
+ let early_exit = || sess.compile_status().map(|_| None);
+ queries.parse()?;
+
+ if let Some(ppm) = &sess.opts.pretty {
+ if ppm.needs_ast_map() {
+ let expanded_crate = queries.expansion()?.peek().0.clone();
+ queries.global_ctxt()?.peek_mut().enter(|tcx| {
+ pretty::print_after_hir_lowering(
+ tcx,
+ compiler.input(),
+ &*expanded_crate,
+ *ppm,
+ compiler.output_file().as_ref().map(|p| &**p),
+ );
+ Ok(())
+ })?;
+ } else {
+ let krate = queries.parse()?.take();
+ pretty::print_after_parsing(
+ sess,
+ compiler.input(),
+ &krate,
+ *ppm,
+ compiler.output_file().as_ref().map(|p| &**p),
+ );
+ }
+ trace!("finished pretty-printing");
+ return early_exit();
+ }
+
+ if callbacks.after_parsing(compiler, queries) == Compilation::Stop {
+ return early_exit();
+ }
+
+ if sess.opts.unstable_opts.parse_only || sess.opts.unstable_opts.show_span.is_some() {
+ return early_exit();
+ }
+
+ {
+ let (_, lint_store) = &*queries.register_plugins()?.peek();
+
+ // Lint plugins are registered; now we can process command line flags.
+ if sess.opts.describe_lints {
+ describe_lints(sess, lint_store, true);
+ return early_exit();
+ }
+ }
+
+ queries.expansion()?;
+ if callbacks.after_expansion(compiler, queries) == Compilation::Stop {
+ return early_exit();
+ }
+
+ queries.prepare_outputs()?;
+
+ if sess.opts.output_types.contains_key(&OutputType::DepInfo)
+ && sess.opts.output_types.len() == 1
+ {
+ return early_exit();
+ }
+
+ queries.global_ctxt()?;
+
+ if sess.opts.unstable_opts.no_analysis {
+ return early_exit();
+ }
+
+ queries.global_ctxt()?.peek_mut().enter(|tcx| {
+ let result = tcx.analysis(());
+ if sess.opts.unstable_opts.save_analysis {
+ let crate_name = queries.crate_name()?.peek().clone();
+ sess.time("save_analysis", || {
+ save::process_crate(
+ tcx,
+ &crate_name,
+ compiler.input(),
+ None,
+ DumpHandler::new(
+ compiler.output_dir().as_ref().map(|p| &**p),
+ &crate_name,
+ ),
+ )
+ });
+ }
+ result
+ })?;
+
+ if callbacks.after_analysis(compiler, queries) == Compilation::Stop {
+ return early_exit();
+ }
+
+ queries.ongoing_codegen()?;
+
+ if sess.opts.unstable_opts.print_type_sizes {
+ sess.code_stats.print_type_sizes();
+ }
+
+ let linker = queries.linker()?;
+ Ok(Some(linker))
+ })?;
+
+ if let Some(linker) = linker {
+ let _timer = sess.timer("link");
+ linker.link()?
+ }
+
+ if sess.opts.unstable_opts.perf_stats {
+ sess.print_perf_stats();
+ }
+
+ if sess.opts.unstable_opts.print_fuel.is_some() {
+ eprintln!(
+ "Fuel used by {}: {}",
+ sess.opts.unstable_opts.print_fuel.as_ref().unwrap(),
+ sess.print_fuel.load(SeqCst)
+ );
+ }
+
+ Ok(())
+ })
+}
+
+#[cfg(unix)]
+pub fn set_sigpipe_handler() {
+ unsafe {
+ // Set the SIGPIPE signal handler, so that an EPIPE
+ // will cause rustc to terminate, as expected.
+ assert_ne!(libc::signal(libc::SIGPIPE, libc::SIG_DFL), libc::SIG_ERR);
+ }
+}
+
+#[cfg(windows)]
+pub fn set_sigpipe_handler() {}
+
+// Extract output directory and file from matches.
+fn make_output(matches: &getopts::Matches) -> (Option<PathBuf>, Option<PathBuf>) {
+ let odir = matches.opt_str("out-dir").map(|o| PathBuf::from(&o));
+ let ofile = matches.opt_str("o").map(|o| PathBuf::from(&o));
+ (odir, ofile)
+}
+
+// Extract input (string or file and optional path) from matches.
+fn make_input(
+ error_format: ErrorOutputType,
+ free_matches: &[String],
+) -> Result<Option<(Input, Option<PathBuf>)>, ErrorGuaranteed> {
+ if free_matches.len() == 1 {
+ let ifile = &free_matches[0];
+ if ifile == "-" {
+ let mut src = String::new();
+ if io::stdin().read_to_string(&mut src).is_err() {
+ // Immediately stop compilation if there was an issue reading
+ // the input (for example if the input stream is not UTF-8).
+ let reported = early_error_no_abort(
+ error_format,
+ "couldn't read from stdin, as it did not contain valid UTF-8",
+ );
+ return Err(reported);
+ }
+ if let Ok(path) = env::var("UNSTABLE_RUSTDOC_TEST_PATH") {
+ let line = env::var("UNSTABLE_RUSTDOC_TEST_LINE").expect(
+ "when UNSTABLE_RUSTDOC_TEST_PATH is set \
+ UNSTABLE_RUSTDOC_TEST_LINE also needs to be set",
+ );
+ let line = isize::from_str_radix(&line, 10)
+ .expect("UNSTABLE_RUSTDOC_TEST_LINE needs to be an number");
+ let file_name = FileName::doc_test_source_code(PathBuf::from(path), line);
+ Ok(Some((Input::Str { name: file_name, input: src }, None)))
+ } else {
+ Ok(Some((Input::Str { name: FileName::anon_source_code(&src), input: src }, None)))
+ }
+ } else {
+ Ok(Some((Input::File(PathBuf::from(ifile)), Some(PathBuf::from(ifile)))))
+ }
+ } else {
+ Ok(None)
+ }
+}
+
+/// Whether to stop or continue compilation.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum Compilation {
+ Stop,
+ Continue,
+}
+
+impl Compilation {
+ pub fn and_then<F: FnOnce() -> Compilation>(self, next: F) -> Compilation {
+ match self {
+ Compilation::Stop => Compilation::Stop,
+ Compilation::Continue => next(),
+ }
+ }
+}
+
+fn handle_explain(registry: Registry, code: &str, output: ErrorOutputType) {
+ let upper_cased_code = code.to_ascii_uppercase();
+ let normalised = if upper_cased_code.starts_with('E') {
+ upper_cased_code
+ } else {
+ format!("E{0:0>4}", code)
+ };
+ match registry.try_find_description(&normalised) {
+ Ok(Some(description)) => {
+ let mut is_in_code_block = false;
+ let mut text = String::new();
+ // Slice off the leading newline and print.
+ for line in description.lines() {
+ let indent_level =
+ line.find(|c: char| !c.is_whitespace()).unwrap_or_else(|| line.len());
+ let dedented_line = &line[indent_level..];
+ if dedented_line.starts_with("```") {
+ is_in_code_block = !is_in_code_block;
+ text.push_str(&line[..(indent_level + 3)]);
+ } else if is_in_code_block && dedented_line.starts_with("# ") {
+ continue;
+ } else {
+ text.push_str(line);
+ }
+ text.push('\n');
+ }
+ if stdout_isatty() {
+ show_content_with_pager(&text);
+ } else {
+ print!("{}", text);
+ }
+ }
+ Ok(None) => {
+ early_error(output, &format!("no extended information for {}", code));
+ }
+ Err(InvalidErrorCode) => {
+ early_error(output, &format!("{} is not a valid error code", code));
+ }
+ }
+}
+
+fn show_content_with_pager(content: &str) {
+ let pager_name = env::var_os("PAGER").unwrap_or_else(|| {
+ if cfg!(windows) { OsString::from("more.com") } else { OsString::from("less") }
+ });
+
+ let mut fallback_to_println = false;
+
+ match Command::new(pager_name).stdin(Stdio::piped()).spawn() {
+ Ok(mut pager) => {
+ if let Some(pipe) = pager.stdin.as_mut() {
+ if pipe.write_all(content.as_bytes()).is_err() {
+ fallback_to_println = true;
+ }
+ }
+
+ if pager.wait().is_err() {
+ fallback_to_println = true;
+ }
+ }
+ Err(_) => {
+ fallback_to_println = true;
+ }
+ }
+
+ // If pager fails for whatever reason, we should still print the content
+ // to standard output
+ if fallback_to_println {
+ print!("{}", content);
+ }
+}
+
+pub fn try_process_rlink(sess: &Session, compiler: &interface::Compiler) -> Compilation {
+ if sess.opts.unstable_opts.link_only {
+ if let Input::File(file) = compiler.input() {
+ // FIXME: #![crate_type] and #![crate_name] support not implemented yet
+ sess.init_crate_types(collect_crate_types(sess, &[]));
+ let outputs = compiler.build_output_filenames(sess, &[]);
+ let rlink_data = fs::read(file).unwrap_or_else(|err| {
+ sess.fatal(&format!("failed to read rlink file: {}", err));
+ });
+ let codegen_results = match CodegenResults::deserialize_rlink(rlink_data) {
+ Ok(codegen) => codegen,
+ Err(error) => {
+ sess.fatal(&format!("Could not deserialize .rlink file: {error}"));
+ }
+ };
+ let result = compiler.codegen_backend().link(sess, codegen_results, &outputs);
+ abort_on_err(result, sess);
+ } else {
+ sess.fatal("rlink must be a file")
+ }
+ Compilation::Stop
+ } else {
+ Compilation::Continue
+ }
+}
+
+pub fn list_metadata(
+ sess: &Session,
+ metadata_loader: &dyn MetadataLoader,
+ input: &Input,
+) -> Compilation {
+ if sess.opts.unstable_opts.ls {
+ match *input {
+ Input::File(ref ifile) => {
+ let path = &(*ifile);
+ let mut v = Vec::new();
+ locator::list_file_metadata(&sess.target, path, metadata_loader, &mut v).unwrap();
+ println!("{}", String::from_utf8(v).unwrap());
+ }
+ Input::Str { .. } => {
+ early_error(ErrorOutputType::default(), "cannot list metadata for stdin");
+ }
+ }
+ return Compilation::Stop;
+ }
+
+ Compilation::Continue
+}
+
+fn print_crate_info(
+ codegen_backend: &dyn CodegenBackend,
+ sess: &Session,
+ input: Option<&Input>,
+ odir: &Option<PathBuf>,
+ ofile: &Option<PathBuf>,
+ temps_dir: &Option<PathBuf>,
+) -> Compilation {
+ use rustc_session::config::PrintRequest::*;
+ // NativeStaticLibs and LinkArgs are special - printed during linking
+ // (empty iterator returns true)
+ if sess.opts.prints.iter().all(|&p| p == NativeStaticLibs || p == LinkArgs) {
+ return Compilation::Continue;
+ }
+
+ let attrs = match input {
+ None => None,
+ Some(input) => {
+ let result = parse_crate_attrs(sess, input);
+ match result {
+ Ok(attrs) => Some(attrs),
+ Err(mut parse_error) => {
+ parse_error.emit();
+ return Compilation::Stop;
+ }
+ }
+ }
+ };
+ for req in &sess.opts.prints {
+ match *req {
+ TargetList => {
+ let mut targets = rustc_target::spec::TARGETS.iter().copied().collect::<Vec<_>>();
+ targets.sort_unstable();
+ println!("{}", targets.join("\n"));
+ }
+ Sysroot => println!("{}", sess.sysroot.display()),
+ TargetLibdir => println!("{}", sess.target_tlib_path.dir.display()),
+ TargetSpec => {
+ println!("{}", serde_json::to_string_pretty(&sess.target.to_json()).unwrap());
+ }
+ FileNames | CrateName => {
+ let input = input.unwrap_or_else(|| {
+ early_error(ErrorOutputType::default(), "no input file provided")
+ });
+ let attrs = attrs.as_ref().unwrap();
+ let t_outputs = rustc_interface::util::build_output_filenames(
+ input, odir, ofile, temps_dir, attrs, sess,
+ );
+ let id = rustc_session::output::find_crate_name(sess, attrs, input);
+ if *req == PrintRequest::CrateName {
+ println!("{}", id);
+ continue;
+ }
+ let crate_types = collect_crate_types(sess, attrs);
+ for &style in &crate_types {
+ let fname =
+ rustc_session::output::filename_for_input(sess, style, &id, &t_outputs);
+ println!("{}", fname.file_name().unwrap().to_string_lossy());
+ }
+ }
+ Cfg => {
+ let mut cfgs = sess
+ .parse_sess
+ .config
+ .iter()
+ .filter_map(|&(name, value)| {
+ // Note that crt-static is a specially recognized cfg
+ // directive that's printed out here as part of
+ // rust-lang/rust#37406, but in general the
+ // `target_feature` cfg is gated under
+ // rust-lang/rust#29717. For now this is just
+ // specifically allowing the crt-static cfg and that's
+ // it, this is intended to get into Cargo and then go
+ // through to build scripts.
+ if (name != sym::target_feature || value != Some(sym::crt_dash_static))
+ && !sess.is_nightly_build()
+ && find_gated_cfg(|cfg_sym| cfg_sym == name).is_some()
+ {
+ return None;
+ }
+
+ if let Some(value) = value {
+ Some(format!("{}=\"{}\"", name, value))
+ } else {
+ Some(name.to_string())
+ }
+ })
+ .collect::<Vec<String>>();
+
+ cfgs.sort();
+ for cfg in cfgs {
+ println!("{}", cfg);
+ }
+ }
+ RelocationModels
+ | CodeModels
+ | TlsModels
+ | TargetCPUs
+ | StackProtectorStrategies
+ | TargetFeatures => {
+ codegen_backend.print(*req, sess);
+ }
+ // Any output here interferes with Cargo's parsing of other printed output
+ NativeStaticLibs => {}
+ LinkArgs => {}
+ }
+ }
+ Compilation::Stop
+}
+
+/// Prints version information
+pub fn version(binary: &str, matches: &getopts::Matches) {
+ let verbose = matches.opt_present("verbose");
+
+ println!("{} {}", binary, util::version_str().unwrap_or("unknown version"));
+
+ if verbose {
+ fn unw(x: Option<&str>) -> &str {
+ x.unwrap_or("unknown")
+ }
+ println!("binary: {}", binary);
+ println!("commit-hash: {}", unw(util::commit_hash_str()));
+ println!("commit-date: {}", unw(util::commit_date_str()));
+ println!("host: {}", config::host_triple());
+ println!("release: {}", unw(util::release_str()));
+
+ let debug_flags = matches.opt_strs("Z");
+ let backend_name = debug_flags.iter().find_map(|x| x.strip_prefix("codegen-backend="));
+ get_codegen_backend(&None, backend_name).print_version();
+ }
+}
+
+fn usage(verbose: bool, include_unstable_options: bool, nightly_build: bool) {
+ let groups = if verbose { config::rustc_optgroups() } else { config::rustc_short_optgroups() };
+ let mut options = getopts::Options::new();
+ for option in groups.iter().filter(|x| include_unstable_options || x.is_stable()) {
+ (option.apply)(&mut options);
+ }
+ let message = "Usage: rustc [OPTIONS] INPUT";
+ let nightly_help = if nightly_build {
+ "\n -Z help Print unstable compiler options"
+ } else {
+ ""
+ };
+ let verbose_help = if verbose {
+ ""
+ } else {
+ "\n --help -v Print the full set of options rustc accepts"
+ };
+ let at_path = if verbose {
+ " @path Read newline separated options from `path`\n"
+ } else {
+ ""
+ };
+ println!(
+ "{options}{at_path}\nAdditional help:
+ -C help Print codegen options
+ -W help \
+ Print 'lint' options and default settings{nightly}{verbose}\n",
+ options = options.usage(message),
+ at_path = at_path,
+ nightly = nightly_help,
+ verbose = verbose_help
+ );
+}
+
+fn print_wall_help() {
+ println!(
+ "
+The flag `-Wall` does not exist in `rustc`. Most useful lints are enabled by
+default. Use `rustc -W help` to see all available lints. It's more common to put
+warning settings in the crate root using `#![warn(LINT_NAME)]` instead of using
+the command line flag directly.
+"
+ );
+}
+
+/// Write to stdout lint command options, together with a list of all available lints
+pub fn describe_lints(sess: &Session, lint_store: &LintStore, loaded_plugins: bool) {
+ println!(
+ "
+Available lint options:
+ -W <foo> Warn about <foo>
+ -A <foo> \
+ Allow <foo>
+ -D <foo> Deny <foo>
+ -F <foo> Forbid <foo> \
+ (deny <foo> and all attempts to override)
+
+"
+ );
+
+ fn sort_lints(sess: &Session, mut lints: Vec<&'static Lint>) -> Vec<&'static Lint> {
+ // The sort doesn't case-fold but it's doubtful we care.
+ lints.sort_by_cached_key(|x: &&Lint| (x.default_level(sess.edition()), x.name));
+ lints
+ }
+
+ fn sort_lint_groups(
+ lints: Vec<(&'static str, Vec<LintId>, bool)>,
+ ) -> Vec<(&'static str, Vec<LintId>)> {
+ let mut lints: Vec<_> = lints.into_iter().map(|(x, y, _)| (x, y)).collect();
+ lints.sort_by_key(|l| l.0);
+ lints
+ }
+
+ let (plugin, builtin): (Vec<_>, _) =
+ lint_store.get_lints().iter().cloned().partition(|&lint| lint.is_plugin);
+ let plugin = sort_lints(sess, plugin);
+ let builtin = sort_lints(sess, builtin);
+
+ let (plugin_groups, builtin_groups): (Vec<_>, _) =
+ lint_store.get_lint_groups().partition(|&(.., p)| p);
+ let plugin_groups = sort_lint_groups(plugin_groups);
+ let builtin_groups = sort_lint_groups(builtin_groups);
+
+ let max_name_len =
+ plugin.iter().chain(&builtin).map(|&s| s.name.chars().count()).max().unwrap_or(0);
+ let padded = |x: &str| {
+ let mut s = " ".repeat(max_name_len - x.chars().count());
+ s.push_str(x);
+ s
+ };
+
+ println!("Lint checks provided by rustc:\n");
+
+ let print_lints = |lints: Vec<&Lint>| {
+ println!(" {} {:7.7} {}", padded("name"), "default", "meaning");
+ println!(" {} {:7.7} {}", padded("----"), "-------", "-------");
+ for lint in lints {
+ let name = lint.name_lower().replace('_', "-");
+ println!(
+ " {} {:7.7} {}",
+ padded(&name),
+ lint.default_level(sess.edition()).as_str(),
+ lint.desc
+ );
+ }
+ println!("\n");
+ };
+
+ print_lints(builtin);
+
+ let max_name_len = max(
+ "warnings".len(),
+ plugin_groups
+ .iter()
+ .chain(&builtin_groups)
+ .map(|&(s, _)| s.chars().count())
+ .max()
+ .unwrap_or(0),
+ );
+
+ let padded = |x: &str| {
+ let mut s = " ".repeat(max_name_len - x.chars().count());
+ s.push_str(x);
+ s
+ };
+
+ println!("Lint groups provided by rustc:\n");
+
+ let print_lint_groups = |lints: Vec<(&'static str, Vec<LintId>)>, all_warnings| {
+ println!(" {} sub-lints", padded("name"));
+ println!(" {} ---------", padded("----"));
+
+ if all_warnings {
+ println!(" {} all lints that are set to issue warnings", padded("warnings"));
+ }
+
+ for (name, to) in lints {
+ let name = name.to_lowercase().replace('_', "-");
+ let desc = to
+ .into_iter()
+ .map(|x| x.to_string().replace('_', "-"))
+ .collect::<Vec<String>>()
+ .join(", ");
+ println!(" {} {}", padded(&name), desc);
+ }
+ println!("\n");
+ };
+
+ print_lint_groups(builtin_groups, true);
+
+ match (loaded_plugins, plugin.len(), plugin_groups.len()) {
+ (false, 0, _) | (false, _, 0) => {
+ println!("Lint tools like Clippy can provide additional lints and lint groups.");
+ }
+ (false, ..) => panic!("didn't load lint plugins but got them anyway!"),
+ (true, 0, 0) => println!("This crate does not load any lint plugins or lint groups."),
+ (true, l, g) => {
+ if l > 0 {
+ println!("Lint checks provided by plugins loaded by this crate:\n");
+ print_lints(plugin);
+ }
+ if g > 0 {
+ println!("Lint groups provided by plugins loaded by this crate:\n");
+ print_lint_groups(plugin_groups, false);
+ }
+ }
+ }
+}
+
+fn describe_debug_flags() {
+ println!("\nAvailable options:\n");
+ print_flag_list("-Z", config::Z_OPTIONS);
+}
+
+fn describe_codegen_flags() {
+ println!("\nAvailable codegen options:\n");
+ print_flag_list("-C", config::CG_OPTIONS);
+}
+
+pub fn print_flag_list<T>(
+ cmdline_opt: &str,
+ flag_list: &[(&'static str, T, &'static str, &'static str)],
+) {
+ let max_len = flag_list.iter().map(|&(name, _, _, _)| name.chars().count()).max().unwrap_or(0);
+
+ for &(name, _, _, desc) in flag_list {
+ println!(
+ " {} {:>width$}=val -- {}",
+ cmdline_opt,
+ name.replace('_', "-"),
+ desc,
+ width = max_len
+ );
+ }
+}
+
+/// Process command line options. Emits messages as appropriate. If compilation
+/// should continue, returns a getopts::Matches object parsed from args,
+/// otherwise returns `None`.
+///
+/// The compiler's handling of options is a little complicated as it ties into
+/// our stability story. The current intention of each compiler option is to
+/// have one of two modes:
+///
+/// 1. An option is stable and can be used everywhere.
+/// 2. An option is unstable, and can only be used on nightly.
+///
+/// Like unstable library and language features, however, unstable options have
+/// always required a form of "opt in" to indicate that you're using them. This
+/// provides the easy ability to scan a code base to check to see if anything
+/// unstable is being used. Currently, this "opt in" is the `-Z` "zed" flag.
+///
+/// All options behind `-Z` are considered unstable by default. Other top-level
+/// options can also be considered unstable, and they were unlocked through the
+/// `-Z unstable-options` flag. Note that `-Z` remains to be the root of
+/// instability in both cases, though.
+///
+/// So with all that in mind, the comments below have some more detail about the
+/// contortions done here to get things to work out correctly.
+pub fn handle_options(args: &[String]) -> Option<getopts::Matches> {
+ // Throw away the first argument, the name of the binary
+ let args = &args[1..];
+
+ if args.is_empty() {
+ // user did not write `-v` nor `-Z unstable-options`, so do not
+ // include that extra information.
+ let nightly_build =
+ rustc_feature::UnstableFeatures::from_environment(None).is_nightly_build();
+ usage(false, false, nightly_build);
+ return None;
+ }
+
+ // Parse with *all* options defined in the compiler, we don't worry about
+ // option stability here we just want to parse as much as possible.
+ let mut options = getopts::Options::new();
+ for option in config::rustc_optgroups() {
+ (option.apply)(&mut options);
+ }
+ let matches = options.parse(args).unwrap_or_else(|e| {
+ let msg = match e {
+ getopts::Fail::UnrecognizedOption(ref opt) => CG_OPTIONS
+ .iter()
+ .map(|&(name, ..)| ('C', name))
+ .chain(Z_OPTIONS.iter().map(|&(name, ..)| ('Z', name)))
+ .find(|&(_, name)| *opt == name.replace('_', "-"))
+ .map(|(flag, _)| format!("{}. Did you mean `-{} {}`?", e, flag, opt)),
+ _ => None,
+ };
+ early_error(ErrorOutputType::default(), &msg.unwrap_or_else(|| e.to_string()));
+ });
+
+ // For all options we just parsed, we check a few aspects:
+ //
+ // * If the option is stable, we're all good
+ // * If the option wasn't passed, we're all good
+ // * If `-Z unstable-options` wasn't passed (and we're not a -Z option
+ // ourselves), then we require the `-Z unstable-options` flag to unlock
+ // this option that was passed.
+ // * If we're a nightly compiler, then unstable options are now unlocked, so
+ // we're good to go.
+ // * Otherwise, if we're an unstable option then we generate an error
+ // (unstable option being used on stable)
+ nightly_options::check_nightly_options(&matches, &config::rustc_optgroups());
+
+ if matches.opt_present("h") || matches.opt_present("help") {
+ // Only show unstable options in --help if we accept unstable options.
+ let unstable_enabled = nightly_options::is_unstable_enabled(&matches);
+ let nightly_build = nightly_options::match_is_nightly_build(&matches);
+ usage(matches.opt_present("verbose"), unstable_enabled, nightly_build);
+ return None;
+ }
+
+ // Handle the special case of -Wall.
+ let wall = matches.opt_strs("W");
+ if wall.iter().any(|x| *x == "all") {
+ print_wall_help();
+ rustc_errors::FatalError.raise();
+ }
+
+ // Don't handle -W help here, because we might first load plugins.
+ let debug_flags = matches.opt_strs("Z");
+ if debug_flags.iter().any(|x| *x == "help") {
+ describe_debug_flags();
+ return None;
+ }
+
+ let cg_flags = matches.opt_strs("C");
+
+ if cg_flags.iter().any(|x| *x == "help") {
+ describe_codegen_flags();
+ return None;
+ }
+
+ if cg_flags.iter().any(|x| *x == "no-stack-check") {
+ early_warn(
+ ErrorOutputType::default(),
+ "the --no-stack-check flag is deprecated and does nothing",
+ );
+ }
+
+ if cg_flags.iter().any(|x| *x == "passes=list") {
+ let backend_name = debug_flags.iter().find_map(|x| x.strip_prefix("codegen-backend="));
+ get_codegen_backend(&None, backend_name).print_passes();
+ return None;
+ }
+
+ if matches.opt_present("version") {
+ version("rustc", &matches);
+ return None;
+ }
+
+ Some(matches)
+}
+
+fn parse_crate_attrs<'a>(sess: &'a Session, input: &Input) -> PResult<'a, Vec<ast::Attribute>> {
+ match input {
+ Input::File(ifile) => rustc_parse::parse_crate_attrs_from_file(ifile, &sess.parse_sess),
+ Input::Str { name, input } => rustc_parse::parse_crate_attrs_from_source_str(
+ name.clone(),
+ input.clone(),
+ &sess.parse_sess,
+ ),
+ }
+}
+
+/// Gets a list of extra command-line flags provided by the user, as strings.
+///
+/// This function is used during ICEs to show more information useful for
+/// debugging, since some ICEs only happens with non-default compiler flags
+/// (and the users don't always report them).
+fn extra_compiler_flags() -> Option<(Vec<String>, bool)> {
+ let mut args = env::args_os().map(|arg| arg.to_string_lossy().to_string()).peekable();
+
+ let mut result = Vec::new();
+ let mut excluded_cargo_defaults = false;
+ while let Some(arg) = args.next() {
+ if let Some(a) = ICE_REPORT_COMPILER_FLAGS.iter().find(|a| arg.starts_with(*a)) {
+ let content = if arg.len() == a.len() {
+ match args.next() {
+ Some(arg) => arg.to_string(),
+ None => continue,
+ }
+ } else if arg.get(a.len()..a.len() + 1) == Some("=") {
+ arg[a.len() + 1..].to_string()
+ } else {
+ arg[a.len()..].to_string()
+ };
+ if ICE_REPORT_COMPILER_FLAGS_EXCLUDE.iter().any(|exc| content.starts_with(exc)) {
+ excluded_cargo_defaults = true;
+ } else {
+ result.push(a.to_string());
+ match ICE_REPORT_COMPILER_FLAGS_STRIP_VALUE.iter().find(|s| content.starts_with(*s))
+ {
+ Some(s) => result.push(s.to_string()),
+ None => result.push(content),
+ }
+ }
+ }
+ }
+
+ if !result.is_empty() { Some((result, excluded_cargo_defaults)) } else { None }
+}
+
+/// Runs a closure and catches unwinds triggered by fatal errors.
+///
+/// The compiler currently unwinds with a special sentinel value to abort
+/// compilation on fatal errors. This function catches that sentinel and turns
+/// the panic into a `Result` instead.
+pub fn catch_fatal_errors<F: FnOnce() -> R, R>(f: F) -> Result<R, ErrorGuaranteed> {
+ catch_unwind(panic::AssertUnwindSafe(f)).map_err(|value| {
+ if value.is::<rustc_errors::FatalErrorMarker>() {
+ ErrorGuaranteed::unchecked_claim_error_was_emitted()
+ } else {
+ panic::resume_unwind(value);
+ }
+ })
+}
+
+/// Variant of `catch_fatal_errors` for the `interface::Result` return type
+/// that also computes the exit code.
+pub fn catch_with_exit_code(f: impl FnOnce() -> interface::Result<()>) -> i32 {
+ let result = catch_fatal_errors(f).and_then(|result| result);
+ match result {
+ Ok(()) => EXIT_SUCCESS,
+ Err(_) => EXIT_FAILURE,
+ }
+}
+
+static DEFAULT_HOOK: LazyLock<Box<dyn Fn(&panic::PanicInfo<'_>) + Sync + Send + 'static>> =
+ LazyLock::new(|| {
+ let hook = panic::take_hook();
+ panic::set_hook(Box::new(|info| {
+ // Invoke the default handler, which prints the actual panic message and optionally a backtrace
+ (*DEFAULT_HOOK)(info);
+
+ // Separate the output with an empty line
+ eprintln!();
+
+ // Print the ICE message
+ report_ice(info, BUG_REPORT_URL);
+ }));
+ hook
+ });
+
+/// Prints the ICE message, including query stack, but without backtrace.
+///
+/// The message will point the user at `bug_report_url` to report the ICE.
+///
+/// When `install_ice_hook` is called, this function will be called as the panic
+/// hook.
+pub fn report_ice(info: &panic::PanicInfo<'_>, bug_report_url: &str) {
+ let fallback_bundle =
+ rustc_errors::fallback_fluent_bundle(rustc_errors::DEFAULT_LOCALE_RESOURCES, false);
+ let emitter = Box::new(rustc_errors::emitter::EmitterWriter::stderr(
+ rustc_errors::ColorConfig::Auto,
+ None,
+ None,
+ fallback_bundle,
+ false,
+ false,
+ None,
+ false,
+ ));
+ let handler = rustc_errors::Handler::with_emitter(true, None, emitter);
+
+ // a .span_bug or .bug call has already printed what
+ // it wants to print.
+ if !info.payload().is::<rustc_errors::ExplicitBug>() {
+ let mut d = rustc_errors::Diagnostic::new(rustc_errors::Level::Bug, "unexpected panic");
+ handler.emit_diagnostic(&mut d);
+ }
+
+ let mut xs: Vec<Cow<'static, str>> = vec![
+ "the compiler unexpectedly panicked. this is a bug.".into(),
+ format!("we would appreciate a bug report: {}", bug_report_url).into(),
+ format!(
+ "rustc {} running on {}",
+ util::version_str().unwrap_or("unknown_version"),
+ config::host_triple()
+ )
+ .into(),
+ ];
+
+ if let Some((flags, excluded_cargo_defaults)) = extra_compiler_flags() {
+ xs.push(format!("compiler flags: {}", flags.join(" ")).into());
+
+ if excluded_cargo_defaults {
+ xs.push("some of the compiler flags provided by cargo are hidden".into());
+ }
+ }
+
+ for note in &xs {
+ handler.note_without_error(note.as_ref());
+ }
+
+ // If backtraces are enabled, also print the query stack
+ let backtrace = env::var_os("RUST_BACKTRACE").map_or(false, |x| &x != "0");
+
+ let num_frames = if backtrace { None } else { Some(2) };
+
+ interface::try_print_query_stack(&handler, num_frames);
+
+ #[cfg(windows)]
+ unsafe {
+ if env::var("RUSTC_BREAK_ON_ICE").is_ok() {
+ // Trigger a debugger if we crashed during bootstrap
+ winapi::um::debugapi::DebugBreak();
+ }
+ }
+}
+
+/// Installs a panic hook that will print the ICE message on unexpected panics.
+///
+/// A custom rustc driver can skip calling this to set up a custom ICE hook.
+pub fn install_ice_hook() {
+ // If the user has not explicitly overridden "RUST_BACKTRACE", then produce
+ // full backtraces. When a compiler ICE happens, we want to gather
+ // as much information as possible to present in the issue opened
+ // by the user. Compiler developers and other rustc users can
+ // opt in to less-verbose backtraces by manually setting "RUST_BACKTRACE"
+ // (e.g. `RUST_BACKTRACE=1`)
+ if std::env::var("RUST_BACKTRACE").is_err() {
+ std::env::set_var("RUST_BACKTRACE", "full");
+ }
+ LazyLock::force(&DEFAULT_HOOK);
+}
+
+/// This allows tools to enable rust logging without having to magically match rustc's
+/// tracing crate version.
+pub fn init_rustc_env_logger() {
+ if let Err(error) = rustc_log::init_rustc_env_logger() {
+ early_error(ErrorOutputType::default(), &error.to_string());
+ }
+}
+
+/// This allows tools to enable rust logging without having to magically match rustc's
+/// tracing crate version. In contrast to `init_rustc_env_logger` it allows you to choose an env var
+/// other than `RUSTC_LOG`.
+pub fn init_env_logger(env: &str) {
+ if let Err(error) = rustc_log::init_env_logger(env) {
+ early_error(ErrorOutputType::default(), &error.to_string());
+ }
+}
+
+#[cfg(all(unix, any(target_env = "gnu", target_os = "macos")))]
+mod signal_handler {
+ extern "C" {
+ fn backtrace_symbols_fd(
+ buffer: *const *mut libc::c_void,
+ size: libc::c_int,
+ fd: libc::c_int,
+ );
+ }
+
+ extern "C" fn print_stack_trace(_: libc::c_int) {
+ const MAX_FRAMES: usize = 256;
+ static mut STACK_TRACE: [*mut libc::c_void; MAX_FRAMES] =
+ [std::ptr::null_mut(); MAX_FRAMES];
+ unsafe {
+ let depth = libc::backtrace(STACK_TRACE.as_mut_ptr(), MAX_FRAMES as i32);
+ if depth == 0 {
+ return;
+ }
+ backtrace_symbols_fd(STACK_TRACE.as_ptr(), depth, 2);
+ }
+ }
+
+ // When an error signal (such as SIGABRT or SIGSEGV) is delivered to the
+ // process, print a stack trace and then exit.
+ pub(super) fn install() {
+ unsafe {
+ const ALT_STACK_SIZE: usize = libc::MINSIGSTKSZ + 64 * 1024;
+ let mut alt_stack: libc::stack_t = std::mem::zeroed();
+ alt_stack.ss_sp =
+ std::alloc::alloc(std::alloc::Layout::from_size_align(ALT_STACK_SIZE, 1).unwrap())
+ as *mut libc::c_void;
+ alt_stack.ss_size = ALT_STACK_SIZE;
+ libc::sigaltstack(&alt_stack, std::ptr::null_mut());
+
+ let mut sa: libc::sigaction = std::mem::zeroed();
+ sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
+ sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
+ libc::sigemptyset(&mut sa.sa_mask);
+ libc::sigaction(libc::SIGSEGV, &sa, std::ptr::null_mut());
+ }
+ }
+}
+
+#[cfg(not(all(unix, any(target_env = "gnu", target_os = "macos"))))]
+mod signal_handler {
+ pub(super) fn install() {}
+}
+
+pub fn main() -> ! {
+ let start_time = Instant::now();
+ let start_rss = get_resident_set_size();
+ init_rustc_env_logger();
+ signal_handler::install();
+ let mut callbacks = TimePassesCallbacks::default();
+ install_ice_hook();
+ let exit_code = catch_with_exit_code(|| {
+ let args = env::args_os()
+ .enumerate()
+ .map(|(i, arg)| {
+ arg.into_string().unwrap_or_else(|arg| {
+ early_error(
+ ErrorOutputType::default(),
+ &format!("argument {} is not valid Unicode: {:?}", i, arg),
+ )
+ })
+ })
+ .collect::<Vec<_>>();
+ RunCompiler::new(&args, &mut callbacks).run()
+ });
+
+ if callbacks.time_passes {
+ let end_rss = get_resident_set_size();
+ print_time_passes_entry("total", start_time.elapsed(), start_rss, end_rss);
+ }
+
+ process::exit(exit_code)
+}
diff --git a/compiler/rustc_driver/src/pretty.rs b/compiler/rustc_driver/src/pretty.rs
new file mode 100644
index 000000000..f66b1a297
--- /dev/null
+++ b/compiler/rustc_driver/src/pretty.rs
@@ -0,0 +1,518 @@
+//! The various pretty-printing routines.
+
+use rustc_ast as ast;
+use rustc_ast_pretty::pprust;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir_pretty as pprust_hir;
+use rustc_middle::hir::map as hir_map;
+use rustc_middle::mir::{write_mir_graphviz, write_mir_pretty};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::config::{Input, PpAstTreeMode, PpHirMode, PpMode, PpSourceMode};
+use rustc_session::Session;
+use rustc_span::symbol::Ident;
+use rustc_span::FileName;
+
+use std::cell::Cell;
+use std::fmt::Write;
+use std::path::Path;
+
+pub use self::PpMode::*;
+pub use self::PpSourceMode::*;
+use crate::abort_on_err;
+
+// This slightly awkward construction is to allow for each PpMode to
+// choose whether it needs to do analyses (which can consume the
+// Session) and then pass through the session (now attached to the
+// analysis results) on to the chosen pretty-printer, along with the
+// `&PpAnn` object.
+//
+// Note that since the `&PrinterSupport` is freshly constructed on each
+// call, it would not make sense to try to attach the lifetime of `self`
+// to the lifetime of the `&PrinterObject`.
+
+/// Constructs a `PrinterSupport` object and passes it to `f`.
+fn call_with_pp_support<'tcx, A, F>(
+ ppmode: &PpSourceMode,
+ sess: &'tcx Session,
+ tcx: Option<TyCtxt<'tcx>>,
+ f: F,
+) -> A
+where
+ F: FnOnce(&dyn PrinterSupport) -> A,
+{
+ match *ppmode {
+ Normal | Expanded => {
+ let annotation = NoAnn { sess, tcx };
+ f(&annotation)
+ }
+
+ Identified | ExpandedIdentified => {
+ let annotation = IdentifiedAnnotation { sess, tcx };
+ f(&annotation)
+ }
+ ExpandedHygiene => {
+ let annotation = HygieneAnnotation { sess };
+ f(&annotation)
+ }
+ }
+}
+fn call_with_pp_support_hir<A, F>(ppmode: &PpHirMode, tcx: TyCtxt<'_>, f: F) -> A
+where
+ F: FnOnce(&dyn HirPrinterSupport<'_>, hir_map::Map<'_>) -> A,
+{
+ match *ppmode {
+ PpHirMode::Normal => {
+ let annotation = NoAnn { sess: tcx.sess, tcx: Some(tcx) };
+ f(&annotation, tcx.hir())
+ }
+
+ PpHirMode::Identified => {
+ let annotation = IdentifiedAnnotation { sess: tcx.sess, tcx: Some(tcx) };
+ f(&annotation, tcx.hir())
+ }
+ PpHirMode::Typed => {
+ abort_on_err(tcx.analysis(()), tcx.sess);
+
+ let annotation = TypedAnnotation { tcx, maybe_typeck_results: Cell::new(None) };
+ tcx.dep_graph.with_ignore(|| f(&annotation, tcx.hir()))
+ }
+ }
+}
+
+trait PrinterSupport: pprust::PpAnn {
+ /// Provides a uniform interface for re-extracting a reference to a
+ /// `Session` from a value that now owns it.
+ fn sess(&self) -> &Session;
+
+ /// Produces the pretty-print annotation object.
+ ///
+ /// (Rust does not yet support upcasting from a trait object to
+ /// an object for one of its supertraits.)
+ fn pp_ann(&self) -> &dyn pprust::PpAnn;
+}
+
+trait HirPrinterSupport<'hir>: pprust_hir::PpAnn {
+ /// Provides a uniform interface for re-extracting a reference to a
+ /// `Session` from a value that now owns it.
+ fn sess(&self) -> &Session;
+
+ /// Provides a uniform interface for re-extracting a reference to an
+ /// `hir_map::Map` from a value that now owns it.
+ fn hir_map(&self) -> Option<hir_map::Map<'hir>>;
+
+ /// Produces the pretty-print annotation object.
+ ///
+ /// (Rust does not yet support upcasting from a trait object to
+ /// an object for one of its supertraits.)
+ fn pp_ann(&self) -> &dyn pprust_hir::PpAnn;
+}
+
+struct NoAnn<'hir> {
+ sess: &'hir Session,
+ tcx: Option<TyCtxt<'hir>>,
+}
+
+impl<'hir> PrinterSupport for NoAnn<'hir> {
+ fn sess(&self) -> &Session {
+ self.sess
+ }
+
+ fn pp_ann(&self) -> &dyn pprust::PpAnn {
+ self
+ }
+}
+
+impl<'hir> HirPrinterSupport<'hir> for NoAnn<'hir> {
+ fn sess(&self) -> &Session {
+ self.sess
+ }
+
+ fn hir_map(&self) -> Option<hir_map::Map<'hir>> {
+ self.tcx.map(|tcx| tcx.hir())
+ }
+
+ fn pp_ann(&self) -> &dyn pprust_hir::PpAnn {
+ self
+ }
+}
+
+impl<'hir> pprust::PpAnn for NoAnn<'hir> {}
+impl<'hir> pprust_hir::PpAnn for NoAnn<'hir> {
+ fn nested(&self, state: &mut pprust_hir::State<'_>, nested: pprust_hir::Nested) {
+ if let Some(tcx) = self.tcx {
+ pprust_hir::PpAnn::nested(&(&tcx.hir() as &dyn hir::intravisit::Map<'_>), state, nested)
+ }
+ }
+}
+
+struct IdentifiedAnnotation<'hir> {
+ sess: &'hir Session,
+ tcx: Option<TyCtxt<'hir>>,
+}
+
+impl<'hir> PrinterSupport for IdentifiedAnnotation<'hir> {
+ fn sess(&self) -> &Session {
+ self.sess
+ }
+
+ fn pp_ann(&self) -> &dyn pprust::PpAnn {
+ self
+ }
+}
+
+impl<'hir> pprust::PpAnn for IdentifiedAnnotation<'hir> {
+ fn pre(&self, s: &mut pprust::State<'_>, node: pprust::AnnNode<'_>) {
+ if let pprust::AnnNode::Expr(_) = node {
+ s.popen();
+ }
+ }
+ fn post(&self, s: &mut pprust::State<'_>, node: pprust::AnnNode<'_>) {
+ match node {
+ pprust::AnnNode::Crate(_) | pprust::AnnNode::Ident(_) | pprust::AnnNode::Name(_) => {}
+
+ pprust::AnnNode::Item(item) => {
+ s.s.space();
+ s.synth_comment(item.id.to_string())
+ }
+ pprust::AnnNode::SubItem(id) => {
+ s.s.space();
+ s.synth_comment(id.to_string())
+ }
+ pprust::AnnNode::Block(blk) => {
+ s.s.space();
+ s.synth_comment(format!("block {}", blk.id))
+ }
+ pprust::AnnNode::Expr(expr) => {
+ s.s.space();
+ s.synth_comment(expr.id.to_string());
+ s.pclose()
+ }
+ pprust::AnnNode::Pat(pat) => {
+ s.s.space();
+ s.synth_comment(format!("pat {}", pat.id));
+ }
+ }
+ }
+}
+
+impl<'hir> HirPrinterSupport<'hir> for IdentifiedAnnotation<'hir> {
+ fn sess(&self) -> &Session {
+ self.sess
+ }
+
+ fn hir_map(&self) -> Option<hir_map::Map<'hir>> {
+ self.tcx.map(|tcx| tcx.hir())
+ }
+
+ fn pp_ann(&self) -> &dyn pprust_hir::PpAnn {
+ self
+ }
+}
+
+impl<'hir> pprust_hir::PpAnn for IdentifiedAnnotation<'hir> {
+ fn nested(&self, state: &mut pprust_hir::State<'_>, nested: pprust_hir::Nested) {
+ if let Some(ref tcx) = self.tcx {
+ pprust_hir::PpAnn::nested(&(&tcx.hir() as &dyn hir::intravisit::Map<'_>), state, nested)
+ }
+ }
+ fn pre(&self, s: &mut pprust_hir::State<'_>, node: pprust_hir::AnnNode<'_>) {
+ if let pprust_hir::AnnNode::Expr(_) = node {
+ s.popen();
+ }
+ }
+ fn post(&self, s: &mut pprust_hir::State<'_>, node: pprust_hir::AnnNode<'_>) {
+ match node {
+ pprust_hir::AnnNode::Name(_) => {}
+ pprust_hir::AnnNode::Item(item) => {
+ s.s.space();
+ s.synth_comment(format!("hir_id: {}", item.hir_id()));
+ }
+ pprust_hir::AnnNode::SubItem(id) => {
+ s.s.space();
+ s.synth_comment(id.to_string());
+ }
+ pprust_hir::AnnNode::Block(blk) => {
+ s.s.space();
+ s.synth_comment(format!("block hir_id: {}", blk.hir_id));
+ }
+ pprust_hir::AnnNode::Expr(expr) => {
+ s.s.space();
+ s.synth_comment(format!("expr hir_id: {}", expr.hir_id));
+ s.pclose();
+ }
+ pprust_hir::AnnNode::Pat(pat) => {
+ s.s.space();
+ s.synth_comment(format!("pat hir_id: {}", pat.hir_id));
+ }
+ pprust_hir::AnnNode::Arm(arm) => {
+ s.s.space();
+ s.synth_comment(format!("arm hir_id: {}", arm.hir_id));
+ }
+ }
+ }
+}
+
+struct HygieneAnnotation<'a> {
+ sess: &'a Session,
+}
+
+impl<'a> PrinterSupport for HygieneAnnotation<'a> {
+ fn sess(&self) -> &Session {
+ self.sess
+ }
+
+ fn pp_ann(&self) -> &dyn pprust::PpAnn {
+ self
+ }
+}
+
+impl<'a> pprust::PpAnn for HygieneAnnotation<'a> {
+ fn post(&self, s: &mut pprust::State<'_>, node: pprust::AnnNode<'_>) {
+ match node {
+ pprust::AnnNode::Ident(&Ident { name, span }) => {
+ s.s.space();
+ s.synth_comment(format!("{}{:?}", name.as_u32(), span.ctxt()))
+ }
+ pprust::AnnNode::Name(&name) => {
+ s.s.space();
+ s.synth_comment(name.as_u32().to_string())
+ }
+ pprust::AnnNode::Crate(_) => {
+ s.s.hardbreak();
+ let verbose = self.sess.verbose();
+ s.synth_comment(rustc_span::hygiene::debug_hygiene_data(verbose));
+ s.s.hardbreak_if_not_bol();
+ }
+ _ => {}
+ }
+ }
+}
+
+struct TypedAnnotation<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ maybe_typeck_results: Cell<Option<&'tcx ty::TypeckResults<'tcx>>>,
+}
+
+impl<'tcx> HirPrinterSupport<'tcx> for TypedAnnotation<'tcx> {
+ fn sess(&self) -> &Session {
+ self.tcx.sess
+ }
+
+ fn hir_map(&self) -> Option<hir_map::Map<'tcx>> {
+ Some(self.tcx.hir())
+ }
+
+ fn pp_ann(&self) -> &dyn pprust_hir::PpAnn {
+ self
+ }
+}
+
+impl<'tcx> pprust_hir::PpAnn for TypedAnnotation<'tcx> {
+ fn nested(&self, state: &mut pprust_hir::State<'_>, nested: pprust_hir::Nested) {
+ let old_maybe_typeck_results = self.maybe_typeck_results.get();
+ if let pprust_hir::Nested::Body(id) = nested {
+ self.maybe_typeck_results.set(Some(self.tcx.typeck_body(id)));
+ }
+ let pp_ann = &(&self.tcx.hir() as &dyn hir::intravisit::Map<'_>);
+ pprust_hir::PpAnn::nested(pp_ann, state, nested);
+ self.maybe_typeck_results.set(old_maybe_typeck_results);
+ }
+ fn pre(&self, s: &mut pprust_hir::State<'_>, node: pprust_hir::AnnNode<'_>) {
+ if let pprust_hir::AnnNode::Expr(_) = node {
+ s.popen();
+ }
+ }
+ fn post(&self, s: &mut pprust_hir::State<'_>, node: pprust_hir::AnnNode<'_>) {
+ if let pprust_hir::AnnNode::Expr(expr) = node {
+ let typeck_results = self.maybe_typeck_results.get().or_else(|| {
+ self.tcx
+ .hir()
+ .maybe_body_owned_by(expr.hir_id.owner)
+ .map(|body_id| self.tcx.typeck_body(body_id))
+ });
+
+ if let Some(typeck_results) = typeck_results {
+ s.s.space();
+ s.s.word("as");
+ s.s.space();
+ s.s.word(typeck_results.expr_ty(expr).to_string());
+ }
+
+ s.pclose();
+ }
+ }
+}
+
+fn get_source(input: &Input, sess: &Session) -> (String, FileName) {
+ let src_name = input.source_name();
+ let src = String::clone(
+ sess.source_map()
+ .get_source_file(&src_name)
+ .expect("get_source_file")
+ .src
+ .as_ref()
+ .expect("src"),
+ );
+ (src, src_name)
+}
+
+fn write_or_print(out: &str, ofile: Option<&Path>) {
+ match ofile {
+ None => print!("{}", out),
+ Some(p) => {
+ if let Err(e) = std::fs::write(p, out) {
+ panic!("print-print failed to write {} due to {}", p.display(), e);
+ }
+ }
+ }
+}
+
+pub fn print_after_parsing(
+ sess: &Session,
+ input: &Input,
+ krate: &ast::Crate,
+ ppm: PpMode,
+ ofile: Option<&Path>,
+) {
+ let (src, src_name) = get_source(input, sess);
+
+ let out = match ppm {
+ Source(s) => {
+ // Silently ignores an identified node.
+ call_with_pp_support(&s, sess, None, move |annotation| {
+ debug!("pretty printing source code {:?}", s);
+ let sess = annotation.sess();
+ let parse = &sess.parse_sess;
+ pprust::print_crate(
+ sess.source_map(),
+ krate,
+ src_name,
+ src,
+ annotation.pp_ann(),
+ false,
+ parse.edition,
+ )
+ })
+ }
+ AstTree(PpAstTreeMode::Normal) => {
+ debug!("pretty printing AST tree");
+ format!("{:#?}", krate)
+ }
+ _ => unreachable!(),
+ };
+
+ write_or_print(&out, ofile);
+}
+
+pub fn print_after_hir_lowering<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ input: &Input,
+ krate: &ast::Crate,
+ ppm: PpMode,
+ ofile: Option<&Path>,
+) {
+ if ppm.needs_analysis() {
+ abort_on_err(print_with_analysis(tcx, ppm, ofile), tcx.sess);
+ return;
+ }
+
+ let (src, src_name) = get_source(input, tcx.sess);
+
+ let out = match ppm {
+ Source(s) => {
+ // Silently ignores an identified node.
+ call_with_pp_support(&s, tcx.sess, Some(tcx), move |annotation| {
+ debug!("pretty printing source code {:?}", s);
+ let sess = annotation.sess();
+ let parse = &sess.parse_sess;
+ pprust::print_crate(
+ sess.source_map(),
+ krate,
+ src_name,
+ src,
+ annotation.pp_ann(),
+ true,
+ parse.edition,
+ )
+ })
+ }
+
+ AstTree(PpAstTreeMode::Expanded) => {
+ debug!("pretty-printing expanded AST");
+ format!("{:#?}", krate)
+ }
+
+ Hir(s) => call_with_pp_support_hir(&s, tcx, move |annotation, hir_map| {
+ debug!("pretty printing HIR {:?}", s);
+ let sess = annotation.sess();
+ let sm = sess.source_map();
+ let attrs = |id| hir_map.attrs(id);
+ pprust_hir::print_crate(
+ sm,
+ hir_map.root_module(),
+ src_name,
+ src,
+ &attrs,
+ annotation.pp_ann(),
+ )
+ }),
+
+ HirTree => {
+ call_with_pp_support_hir(&PpHirMode::Normal, tcx, move |_annotation, hir_map| {
+ debug!("pretty printing HIR tree");
+ format!("{:#?}", hir_map.krate())
+ })
+ }
+
+ _ => unreachable!(),
+ };
+
+ write_or_print(&out, ofile);
+}
+
+// In an ideal world, this would be a public function called by the driver after
+// analysis is performed. However, we want to call `phase_3_run_analysis_passes`
+// with a different callback than the standard driver, so that isn't easy.
+// Instead, we call that function ourselves.
+fn print_with_analysis(
+ tcx: TyCtxt<'_>,
+ ppm: PpMode,
+ ofile: Option<&Path>,
+) -> Result<(), ErrorGuaranteed> {
+ tcx.analysis(())?;
+ let out = match ppm {
+ Mir => {
+ let mut out = Vec::new();
+ write_mir_pretty(tcx, None, &mut out).unwrap();
+ String::from_utf8(out).unwrap()
+ }
+
+ MirCFG => {
+ let mut out = Vec::new();
+ write_mir_graphviz(tcx, None, &mut out).unwrap();
+ String::from_utf8(out).unwrap()
+ }
+
+ ThirTree => {
+ let mut out = String::new();
+ abort_on_err(rustc_typeck::check_crate(tcx), tcx.sess);
+ debug!("pretty printing THIR tree");
+ for did in tcx.hir().body_owners() {
+ let _ = writeln!(
+ out,
+ "{:?}:\n{}\n",
+ did,
+ tcx.thir_tree(ty::WithOptConstParam::unknown(did))
+ );
+ }
+ out
+ }
+
+ _ => unreachable!(),
+ };
+
+ write_or_print(&out, ofile);
+
+ Ok(())
+}
diff --git a/compiler/rustc_error_codes/Cargo.toml b/compiler/rustc_error_codes/Cargo.toml
new file mode 100644
index 000000000..7d5f3e467
--- /dev/null
+++ b/compiler/rustc_error_codes/Cargo.toml
@@ -0,0 +1,4 @@
+[package]
+name = "rustc_error_codes"
+version = "0.0.0"
+edition = "2021"
diff --git a/compiler/rustc_error_codes/src/error_codes.rs b/compiler/rustc_error_codes/src/error_codes.rs
new file mode 100644
index 000000000..854625579
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes.rs
@@ -0,0 +1,649 @@
+// Error messages for EXXXX errors. Each message should start and end with a
+// new line, and be wrapped to 80 characters. In vim you can `:set tw=80` and
+// use `gq` to wrap paragraphs. Use `:set tw=0` to disable.
+//
+// /!\ IMPORTANT /!\
+//
+// Error messages' format must follow the RFC 1567 available here:
+// https://github.com/rust-lang/rfcs/pull/1567
+
+register_diagnostics! {
+E0001: include_str!("./error_codes/E0001.md"),
+E0002: include_str!("./error_codes/E0002.md"),
+E0004: include_str!("./error_codes/E0004.md"),
+E0005: include_str!("./error_codes/E0005.md"),
+E0007: include_str!("./error_codes/E0007.md"),
+E0009: include_str!("./error_codes/E0009.md"),
+E0010: include_str!("./error_codes/E0010.md"),
+E0013: include_str!("./error_codes/E0013.md"),
+E0014: include_str!("./error_codes/E0014.md"),
+E0015: include_str!("./error_codes/E0015.md"),
+E0023: include_str!("./error_codes/E0023.md"),
+E0025: include_str!("./error_codes/E0025.md"),
+E0026: include_str!("./error_codes/E0026.md"),
+E0027: include_str!("./error_codes/E0027.md"),
+E0029: include_str!("./error_codes/E0029.md"),
+E0030: include_str!("./error_codes/E0030.md"),
+E0033: include_str!("./error_codes/E0033.md"),
+E0034: include_str!("./error_codes/E0034.md"),
+E0038: include_str!("./error_codes/E0038.md"),
+E0040: include_str!("./error_codes/E0040.md"),
+E0044: include_str!("./error_codes/E0044.md"),
+E0045: include_str!("./error_codes/E0045.md"),
+E0046: include_str!("./error_codes/E0046.md"),
+E0049: include_str!("./error_codes/E0049.md"),
+E0050: include_str!("./error_codes/E0050.md"),
+E0053: include_str!("./error_codes/E0053.md"),
+E0054: include_str!("./error_codes/E0054.md"),
+E0055: include_str!("./error_codes/E0055.md"),
+E0057: include_str!("./error_codes/E0057.md"),
+E0059: include_str!("./error_codes/E0059.md"),
+E0060: include_str!("./error_codes/E0060.md"),
+E0061: include_str!("./error_codes/E0061.md"),
+E0062: include_str!("./error_codes/E0062.md"),
+E0063: include_str!("./error_codes/E0063.md"),
+E0067: include_str!("./error_codes/E0067.md"),
+E0069: include_str!("./error_codes/E0069.md"),
+E0070: include_str!("./error_codes/E0070.md"),
+E0071: include_str!("./error_codes/E0071.md"),
+E0072: include_str!("./error_codes/E0072.md"),
+E0073: include_str!("./error_codes/E0073.md"),
+E0074: include_str!("./error_codes/E0074.md"),
+E0075: include_str!("./error_codes/E0075.md"),
+E0076: include_str!("./error_codes/E0076.md"),
+E0077: include_str!("./error_codes/E0077.md"),
+E0080: include_str!("./error_codes/E0080.md"),
+E0081: include_str!("./error_codes/E0081.md"),
+E0084: include_str!("./error_codes/E0084.md"),
+E0087: include_str!("./error_codes/E0087.md"),
+E0088: include_str!("./error_codes/E0088.md"),
+E0089: include_str!("./error_codes/E0089.md"),
+E0090: include_str!("./error_codes/E0090.md"),
+E0091: include_str!("./error_codes/E0091.md"),
+E0092: include_str!("./error_codes/E0092.md"),
+E0093: include_str!("./error_codes/E0093.md"),
+E0094: include_str!("./error_codes/E0094.md"),
+E0106: include_str!("./error_codes/E0106.md"),
+E0107: include_str!("./error_codes/E0107.md"),
+E0109: include_str!("./error_codes/E0109.md"),
+E0110: include_str!("./error_codes/E0110.md"),
+E0116: include_str!("./error_codes/E0116.md"),
+E0117: include_str!("./error_codes/E0117.md"),
+E0118: include_str!("./error_codes/E0118.md"),
+E0119: include_str!("./error_codes/E0119.md"),
+E0120: include_str!("./error_codes/E0120.md"),
+E0121: include_str!("./error_codes/E0121.md"),
+E0124: include_str!("./error_codes/E0124.md"),
+E0128: include_str!("./error_codes/E0128.md"),
+E0130: include_str!("./error_codes/E0130.md"),
+E0131: include_str!("./error_codes/E0131.md"),
+E0132: include_str!("./error_codes/E0132.md"),
+E0133: include_str!("./error_codes/E0133.md"),
+E0136: include_str!("./error_codes/E0136.md"),
+E0137: include_str!("./error_codes/E0137.md"),
+E0138: include_str!("./error_codes/E0138.md"),
+E0139: include_str!("./error_codes/E0139.md"),
+E0152: include_str!("./error_codes/E0152.md"),
+E0154: include_str!("./error_codes/E0154.md"),
+E0158: include_str!("./error_codes/E0158.md"),
+E0161: include_str!("./error_codes/E0161.md"),
+E0162: include_str!("./error_codes/E0162.md"),
+E0164: include_str!("./error_codes/E0164.md"),
+E0165: include_str!("./error_codes/E0165.md"),
+E0170: include_str!("./error_codes/E0170.md"),
+E0178: include_str!("./error_codes/E0178.md"),
+E0183: include_str!("./error_codes/E0183.md"),
+E0184: include_str!("./error_codes/E0184.md"),
+E0185: include_str!("./error_codes/E0185.md"),
+E0186: include_str!("./error_codes/E0186.md"),
+E0191: include_str!("./error_codes/E0191.md"),
+E0192: include_str!("./error_codes/E0192.md"),
+E0193: include_str!("./error_codes/E0193.md"),
+E0195: include_str!("./error_codes/E0195.md"),
+E0197: include_str!("./error_codes/E0197.md"),
+E0198: include_str!("./error_codes/E0198.md"),
+E0199: include_str!("./error_codes/E0199.md"),
+E0200: include_str!("./error_codes/E0200.md"),
+E0201: include_str!("./error_codes/E0201.md"),
+E0203: include_str!("./error_codes/E0203.md"),
+E0204: include_str!("./error_codes/E0204.md"),
+E0205: include_str!("./error_codes/E0205.md"),
+E0206: include_str!("./error_codes/E0206.md"),
+E0207: include_str!("./error_codes/E0207.md"),
+E0210: include_str!("./error_codes/E0210.md"),
+E0211: include_str!("./error_codes/E0211.md"),
+E0212: include_str!("./error_codes/E0212.md"),
+E0214: include_str!("./error_codes/E0214.md"),
+E0220: include_str!("./error_codes/E0220.md"),
+E0221: include_str!("./error_codes/E0221.md"),
+E0222: include_str!("./error_codes/E0222.md"),
+E0223: include_str!("./error_codes/E0223.md"),
+E0224: include_str!("./error_codes/E0224.md"),
+E0225: include_str!("./error_codes/E0225.md"),
+E0226: include_str!("./error_codes/E0226.md"),
+E0227: include_str!("./error_codes/E0227.md"),
+E0228: include_str!("./error_codes/E0228.md"),
+E0229: include_str!("./error_codes/E0229.md"),
+E0230: include_str!("./error_codes/E0230.md"),
+E0231: include_str!("./error_codes/E0231.md"),
+E0232: include_str!("./error_codes/E0232.md"),
+E0243: include_str!("./error_codes/E0243.md"),
+E0244: include_str!("./error_codes/E0244.md"),
+E0251: include_str!("./error_codes/E0251.md"),
+E0252: include_str!("./error_codes/E0252.md"),
+E0253: include_str!("./error_codes/E0253.md"),
+E0254: include_str!("./error_codes/E0254.md"),
+E0255: include_str!("./error_codes/E0255.md"),
+E0256: include_str!("./error_codes/E0256.md"),
+E0259: include_str!("./error_codes/E0259.md"),
+E0260: include_str!("./error_codes/E0260.md"),
+E0261: include_str!("./error_codes/E0261.md"),
+E0262: include_str!("./error_codes/E0262.md"),
+E0263: include_str!("./error_codes/E0263.md"),
+E0264: include_str!("./error_codes/E0264.md"),
+E0267: include_str!("./error_codes/E0267.md"),
+E0268: include_str!("./error_codes/E0268.md"),
+E0271: include_str!("./error_codes/E0271.md"),
+E0275: include_str!("./error_codes/E0275.md"),
+E0276: include_str!("./error_codes/E0276.md"),
+E0277: include_str!("./error_codes/E0277.md"),
+E0281: include_str!("./error_codes/E0281.md"),
+E0282: include_str!("./error_codes/E0282.md"),
+E0283: include_str!("./error_codes/E0283.md"),
+E0284: include_str!("./error_codes/E0284.md"),
+E0297: include_str!("./error_codes/E0297.md"),
+E0301: include_str!("./error_codes/E0301.md"),
+E0302: include_str!("./error_codes/E0302.md"),
+E0303: include_str!("./error_codes/E0303.md"),
+E0307: include_str!("./error_codes/E0307.md"),
+E0308: include_str!("./error_codes/E0308.md"),
+E0309: include_str!("./error_codes/E0309.md"),
+E0310: include_str!("./error_codes/E0310.md"),
+E0312: include_str!("./error_codes/E0312.md"),
+E0316: include_str!("./error_codes/E0316.md"),
+E0317: include_str!("./error_codes/E0317.md"),
+E0321: include_str!("./error_codes/E0321.md"),
+E0322: include_str!("./error_codes/E0322.md"),
+E0323: include_str!("./error_codes/E0323.md"),
+E0324: include_str!("./error_codes/E0324.md"),
+E0325: include_str!("./error_codes/E0325.md"),
+E0326: include_str!("./error_codes/E0326.md"),
+E0328: include_str!("./error_codes/E0328.md"),
+E0329: include_str!("./error_codes/E0329.md"),
+E0364: include_str!("./error_codes/E0364.md"),
+E0365: include_str!("./error_codes/E0365.md"),
+E0366: include_str!("./error_codes/E0366.md"),
+E0367: include_str!("./error_codes/E0367.md"),
+E0368: include_str!("./error_codes/E0368.md"),
+E0369: include_str!("./error_codes/E0369.md"),
+E0370: include_str!("./error_codes/E0370.md"),
+E0371: include_str!("./error_codes/E0371.md"),
+E0373: include_str!("./error_codes/E0373.md"),
+E0374: include_str!("./error_codes/E0374.md"),
+E0375: include_str!("./error_codes/E0375.md"),
+E0376: include_str!("./error_codes/E0376.md"),
+E0378: include_str!("./error_codes/E0378.md"),
+E0379: include_str!("./error_codes/E0379.md"),
+E0380: include_str!("./error_codes/E0380.md"),
+E0381: include_str!("./error_codes/E0381.md"),
+E0382: include_str!("./error_codes/E0382.md"),
+E0383: include_str!("./error_codes/E0383.md"),
+E0384: include_str!("./error_codes/E0384.md"),
+E0386: include_str!("./error_codes/E0386.md"),
+E0387: include_str!("./error_codes/E0387.md"),
+E0388: include_str!("./error_codes/E0388.md"),
+E0389: include_str!("./error_codes/E0389.md"),
+E0390: include_str!("./error_codes/E0390.md"),
+E0391: include_str!("./error_codes/E0391.md"),
+E0392: include_str!("./error_codes/E0392.md"),
+E0393: include_str!("./error_codes/E0393.md"),
+E0398: include_str!("./error_codes/E0398.md"),
+E0399: include_str!("./error_codes/E0399.md"),
+E0401: include_str!("./error_codes/E0401.md"),
+E0403: include_str!("./error_codes/E0403.md"),
+E0404: include_str!("./error_codes/E0404.md"),
+E0405: include_str!("./error_codes/E0405.md"),
+E0407: include_str!("./error_codes/E0407.md"),
+E0408: include_str!("./error_codes/E0408.md"),
+E0409: include_str!("./error_codes/E0409.md"),
+E0411: include_str!("./error_codes/E0411.md"),
+E0412: include_str!("./error_codes/E0412.md"),
+E0415: include_str!("./error_codes/E0415.md"),
+E0416: include_str!("./error_codes/E0416.md"),
+E0422: include_str!("./error_codes/E0422.md"),
+E0423: include_str!("./error_codes/E0423.md"),
+E0424: include_str!("./error_codes/E0424.md"),
+E0425: include_str!("./error_codes/E0425.md"),
+E0426: include_str!("./error_codes/E0426.md"),
+E0428: include_str!("./error_codes/E0428.md"),
+E0429: include_str!("./error_codes/E0429.md"),
+E0430: include_str!("./error_codes/E0430.md"),
+E0431: include_str!("./error_codes/E0431.md"),
+E0432: include_str!("./error_codes/E0432.md"),
+E0433: include_str!("./error_codes/E0433.md"),
+E0434: include_str!("./error_codes/E0434.md"),
+E0435: include_str!("./error_codes/E0435.md"),
+E0436: include_str!("./error_codes/E0436.md"),
+E0437: include_str!("./error_codes/E0437.md"),
+E0438: include_str!("./error_codes/E0438.md"),
+E0439: include_str!("./error_codes/E0439.md"),
+E0445: include_str!("./error_codes/E0445.md"),
+E0446: include_str!("./error_codes/E0446.md"),
+E0447: include_str!("./error_codes/E0447.md"),
+E0448: include_str!("./error_codes/E0448.md"),
+E0449: include_str!("./error_codes/E0449.md"),
+E0451: include_str!("./error_codes/E0451.md"),
+E0452: include_str!("./error_codes/E0452.md"),
+E0453: include_str!("./error_codes/E0453.md"),
+E0454: include_str!("./error_codes/E0454.md"),
+E0455: include_str!("./error_codes/E0455.md"),
+E0458: include_str!("./error_codes/E0458.md"),
+E0459: include_str!("./error_codes/E0459.md"),
+E0463: include_str!("./error_codes/E0463.md"),
+E0464: include_str!("./error_codes/E0464.md"),
+E0466: include_str!("./error_codes/E0466.md"),
+E0468: include_str!("./error_codes/E0468.md"),
+E0469: include_str!("./error_codes/E0469.md"),
+E0477: include_str!("./error_codes/E0477.md"),
+E0478: include_str!("./error_codes/E0478.md"),
+E0482: include_str!("./error_codes/E0482.md"),
+E0491: include_str!("./error_codes/E0491.md"),
+E0492: include_str!("./error_codes/E0492.md"),
+E0493: include_str!("./error_codes/E0493.md"),
+E0495: include_str!("./error_codes/E0495.md"),
+E0496: include_str!("./error_codes/E0496.md"),
+E0497: include_str!("./error_codes/E0497.md"),
+E0498: include_str!("./error_codes/E0498.md"),
+E0499: include_str!("./error_codes/E0499.md"),
+E0500: include_str!("./error_codes/E0500.md"),
+E0501: include_str!("./error_codes/E0501.md"),
+E0502: include_str!("./error_codes/E0502.md"),
+E0503: include_str!("./error_codes/E0503.md"),
+E0504: include_str!("./error_codes/E0504.md"),
+E0505: include_str!("./error_codes/E0505.md"),
+E0506: include_str!("./error_codes/E0506.md"),
+E0507: include_str!("./error_codes/E0507.md"),
+E0508: include_str!("./error_codes/E0508.md"),
+E0509: include_str!("./error_codes/E0509.md"),
+E0510: include_str!("./error_codes/E0510.md"),
+E0511: include_str!("./error_codes/E0511.md"),
+E0512: include_str!("./error_codes/E0512.md"),
+E0515: include_str!("./error_codes/E0515.md"),
+E0516: include_str!("./error_codes/E0516.md"),
+E0517: include_str!("./error_codes/E0517.md"),
+E0518: include_str!("./error_codes/E0518.md"),
+E0520: include_str!("./error_codes/E0520.md"),
+E0521: include_str!("./error_codes/E0521.md"),
+E0522: include_str!("./error_codes/E0522.md"),
+E0524: include_str!("./error_codes/E0524.md"),
+E0525: include_str!("./error_codes/E0525.md"),
+E0527: include_str!("./error_codes/E0527.md"),
+E0528: include_str!("./error_codes/E0528.md"),
+E0529: include_str!("./error_codes/E0529.md"),
+E0530: include_str!("./error_codes/E0530.md"),
+E0531: include_str!("./error_codes/E0531.md"),
+E0532: include_str!("./error_codes/E0532.md"),
+E0533: include_str!("./error_codes/E0533.md"),
+E0534: include_str!("./error_codes/E0534.md"),
+E0535: include_str!("./error_codes/E0535.md"),
+E0536: include_str!("./error_codes/E0536.md"),
+E0537: include_str!("./error_codes/E0537.md"),
+E0538: include_str!("./error_codes/E0538.md"),
+E0539: include_str!("./error_codes/E0539.md"),
+E0541: include_str!("./error_codes/E0541.md"),
+E0542: include_str!("./error_codes/E0542.md"),
+E0543: include_str!("./error_codes/E0543.md"),
+E0544: include_str!("./error_codes/E0544.md"),
+E0545: include_str!("./error_codes/E0545.md"),
+E0546: include_str!("./error_codes/E0546.md"),
+E0547: include_str!("./error_codes/E0547.md"),
+E0549: include_str!("./error_codes/E0549.md"),
+E0550: include_str!("./error_codes/E0550.md"),
+E0551: include_str!("./error_codes/E0551.md"),
+E0552: include_str!("./error_codes/E0552.md"),
+E0554: include_str!("./error_codes/E0554.md"),
+E0556: include_str!("./error_codes/E0556.md"),
+E0557: include_str!("./error_codes/E0557.md"),
+E0559: include_str!("./error_codes/E0559.md"),
+E0560: include_str!("./error_codes/E0560.md"),
+E0561: include_str!("./error_codes/E0561.md"),
+E0562: include_str!("./error_codes/E0562.md"),
+E0565: include_str!("./error_codes/E0565.md"),
+E0566: include_str!("./error_codes/E0566.md"),
+E0567: include_str!("./error_codes/E0567.md"),
+E0568: include_str!("./error_codes/E0568.md"),
+E0569: include_str!("./error_codes/E0569.md"),
+E0570: include_str!("./error_codes/E0570.md"),
+E0571: include_str!("./error_codes/E0571.md"),
+E0572: include_str!("./error_codes/E0572.md"),
+E0573: include_str!("./error_codes/E0573.md"),
+E0574: include_str!("./error_codes/E0574.md"),
+E0575: include_str!("./error_codes/E0575.md"),
+E0576: include_str!("./error_codes/E0576.md"),
+E0577: include_str!("./error_codes/E0577.md"),
+E0578: include_str!("./error_codes/E0578.md"),
+E0579: include_str!("./error_codes/E0579.md"),
+E0580: include_str!("./error_codes/E0580.md"),
+E0581: include_str!("./error_codes/E0581.md"),
+E0582: include_str!("./error_codes/E0582.md"),
+E0583: include_str!("./error_codes/E0583.md"),
+E0584: include_str!("./error_codes/E0584.md"),
+E0585: include_str!("./error_codes/E0585.md"),
+E0586: include_str!("./error_codes/E0586.md"),
+E0587: include_str!("./error_codes/E0587.md"),
+E0588: include_str!("./error_codes/E0588.md"),
+E0589: include_str!("./error_codes/E0589.md"),
+E0590: include_str!("./error_codes/E0590.md"),
+E0591: include_str!("./error_codes/E0591.md"),
+E0592: include_str!("./error_codes/E0592.md"),
+E0593: include_str!("./error_codes/E0593.md"),
+E0594: include_str!("./error_codes/E0594.md"),
+E0595: include_str!("./error_codes/E0595.md"),
+E0596: include_str!("./error_codes/E0596.md"),
+E0597: include_str!("./error_codes/E0597.md"),
+E0599: include_str!("./error_codes/E0599.md"),
+E0600: include_str!("./error_codes/E0600.md"),
+E0601: include_str!("./error_codes/E0601.md"),
+E0602: include_str!("./error_codes/E0602.md"),
+E0603: include_str!("./error_codes/E0603.md"),
+E0604: include_str!("./error_codes/E0604.md"),
+E0605: include_str!("./error_codes/E0605.md"),
+E0606: include_str!("./error_codes/E0606.md"),
+E0607: include_str!("./error_codes/E0607.md"),
+E0608: include_str!("./error_codes/E0608.md"),
+E0609: include_str!("./error_codes/E0609.md"),
+E0610: include_str!("./error_codes/E0610.md"),
+E0614: include_str!("./error_codes/E0614.md"),
+E0615: include_str!("./error_codes/E0615.md"),
+E0616: include_str!("./error_codes/E0616.md"),
+E0617: include_str!("./error_codes/E0617.md"),
+E0618: include_str!("./error_codes/E0618.md"),
+E0619: include_str!("./error_codes/E0619.md"),
+E0620: include_str!("./error_codes/E0620.md"),
+E0621: include_str!("./error_codes/E0621.md"),
+E0622: include_str!("./error_codes/E0622.md"),
+E0623: include_str!("./error_codes/E0623.md"),
+E0624: include_str!("./error_codes/E0624.md"),
+E0625: include_str!("./error_codes/E0625.md"),
+E0626: include_str!("./error_codes/E0626.md"),
+E0627: include_str!("./error_codes/E0627.md"),
+E0628: include_str!("./error_codes/E0628.md"),
+E0631: include_str!("./error_codes/E0631.md"),
+E0632: include_str!("./error_codes/E0632.md"),
+E0633: include_str!("./error_codes/E0633.md"),
+E0634: include_str!("./error_codes/E0634.md"),
+E0635: include_str!("./error_codes/E0635.md"),
+E0636: include_str!("./error_codes/E0636.md"),
+E0637: include_str!("./error_codes/E0637.md"),
+E0638: include_str!("./error_codes/E0638.md"),
+E0639: include_str!("./error_codes/E0639.md"),
+E0641: include_str!("./error_codes/E0641.md"),
+E0642: include_str!("./error_codes/E0642.md"),
+E0643: include_str!("./error_codes/E0643.md"),
+E0644: include_str!("./error_codes/E0644.md"),
+E0646: include_str!("./error_codes/E0646.md"),
+E0647: include_str!("./error_codes/E0647.md"),
+E0648: include_str!("./error_codes/E0648.md"),
+E0657: include_str!("./error_codes/E0657.md"),
+E0658: include_str!("./error_codes/E0658.md"),
+E0659: include_str!("./error_codes/E0659.md"),
+E0660: include_str!("./error_codes/E0660.md"),
+E0661: include_str!("./error_codes/E0661.md"),
+E0662: include_str!("./error_codes/E0662.md"),
+E0663: include_str!("./error_codes/E0663.md"),
+E0664: include_str!("./error_codes/E0664.md"),
+E0665: include_str!("./error_codes/E0665.md"),
+E0666: include_str!("./error_codes/E0666.md"),
+E0667: include_str!("./error_codes/E0667.md"),
+E0668: include_str!("./error_codes/E0668.md"),
+E0669: include_str!("./error_codes/E0669.md"),
+E0670: include_str!("./error_codes/E0670.md"),
+E0671: include_str!("./error_codes/E0671.md"),
+E0687: include_str!("./error_codes/E0687.md"),
+E0688: include_str!("./error_codes/E0688.md"),
+E0689: include_str!("./error_codes/E0689.md"),
+E0690: include_str!("./error_codes/E0690.md"),
+E0691: include_str!("./error_codes/E0691.md"),
+E0692: include_str!("./error_codes/E0692.md"),
+E0693: include_str!("./error_codes/E0693.md"),
+E0695: include_str!("./error_codes/E0695.md"),
+E0696: include_str!("./error_codes/E0696.md"),
+E0697: include_str!("./error_codes/E0697.md"),
+E0698: include_str!("./error_codes/E0698.md"),
+E0699: include_str!("./error_codes/E0699.md"),
+E0700: include_str!("./error_codes/E0700.md"),
+E0701: include_str!("./error_codes/E0701.md"),
+E0703: include_str!("./error_codes/E0703.md"),
+E0704: include_str!("./error_codes/E0704.md"),
+E0705: include_str!("./error_codes/E0705.md"),
+E0706: include_str!("./error_codes/E0706.md"),
+E0708: include_str!("./error_codes/E0708.md"),
+E0710: include_str!("./error_codes/E0710.md"),
+E0712: include_str!("./error_codes/E0712.md"),
+E0713: include_str!("./error_codes/E0713.md"),
+E0714: include_str!("./error_codes/E0714.md"),
+E0715: include_str!("./error_codes/E0715.md"),
+E0716: include_str!("./error_codes/E0716.md"),
+E0718: include_str!("./error_codes/E0718.md"),
+E0719: include_str!("./error_codes/E0719.md"),
+E0720: include_str!("./error_codes/E0720.md"),
+E0722: include_str!("./error_codes/E0722.md"),
+E0724: include_str!("./error_codes/E0724.md"),
+E0725: include_str!("./error_codes/E0725.md"),
+E0726: include_str!("./error_codes/E0726.md"),
+E0727: include_str!("./error_codes/E0727.md"),
+E0728: include_str!("./error_codes/E0728.md"),
+E0729: include_str!("./error_codes/E0729.md"),
+E0730: include_str!("./error_codes/E0730.md"),
+E0731: include_str!("./error_codes/E0731.md"),
+E0732: include_str!("./error_codes/E0732.md"),
+E0733: include_str!("./error_codes/E0733.md"),
+E0734: include_str!("./error_codes/E0734.md"),
+E0735: include_str!("./error_codes/E0735.md"),
+E0736: include_str!("./error_codes/E0736.md"),
+E0737: include_str!("./error_codes/E0737.md"),
+E0739: include_str!("./error_codes/E0739.md"),
+E0740: include_str!("./error_codes/E0740.md"),
+E0741: include_str!("./error_codes/E0741.md"),
+E0742: include_str!("./error_codes/E0742.md"),
+E0743: include_str!("./error_codes/E0743.md"),
+E0744: include_str!("./error_codes/E0744.md"),
+E0745: include_str!("./error_codes/E0745.md"),
+E0746: include_str!("./error_codes/E0746.md"),
+E0747: include_str!("./error_codes/E0747.md"),
+E0748: include_str!("./error_codes/E0748.md"),
+E0749: include_str!("./error_codes/E0749.md"),
+E0750: include_str!("./error_codes/E0750.md"),
+E0751: include_str!("./error_codes/E0751.md"),
+E0752: include_str!("./error_codes/E0752.md"),
+E0753: include_str!("./error_codes/E0753.md"),
+E0754: include_str!("./error_codes/E0754.md"),
+E0755: include_str!("./error_codes/E0755.md"),
+E0756: include_str!("./error_codes/E0756.md"),
+E0757: include_str!("./error_codes/E0757.md"),
+E0758: include_str!("./error_codes/E0758.md"),
+E0759: include_str!("./error_codes/E0759.md"),
+E0760: include_str!("./error_codes/E0760.md"),
+E0761: include_str!("./error_codes/E0761.md"),
+E0762: include_str!("./error_codes/E0762.md"),
+E0763: include_str!("./error_codes/E0763.md"),
+E0764: include_str!("./error_codes/E0764.md"),
+E0765: include_str!("./error_codes/E0765.md"),
+E0766: include_str!("./error_codes/E0766.md"),
+E0767: include_str!("./error_codes/E0767.md"),
+E0768: include_str!("./error_codes/E0768.md"),
+E0769: include_str!("./error_codes/E0769.md"),
+E0770: include_str!("./error_codes/E0770.md"),
+E0771: include_str!("./error_codes/E0771.md"),
+E0772: include_str!("./error_codes/E0772.md"),
+E0773: include_str!("./error_codes/E0773.md"),
+E0774: include_str!("./error_codes/E0774.md"),
+E0775: include_str!("./error_codes/E0775.md"),
+E0776: include_str!("./error_codes/E0776.md"),
+E0777: include_str!("./error_codes/E0777.md"),
+E0778: include_str!("./error_codes/E0778.md"),
+E0779: include_str!("./error_codes/E0779.md"),
+E0780: include_str!("./error_codes/E0780.md"),
+E0781: include_str!("./error_codes/E0781.md"),
+E0782: include_str!("./error_codes/E0782.md"),
+E0783: include_str!("./error_codes/E0783.md"),
+E0784: include_str!("./error_codes/E0784.md"),
+E0785: include_str!("./error_codes/E0785.md"),
+E0786: include_str!("./error_codes/E0786.md"),
+E0787: include_str!("./error_codes/E0787.md"),
+E0788: include_str!("./error_codes/E0788.md"),
+E0790: include_str!("./error_codes/E0790.md"),
+;
+// E0006, // merged with E0005
+// E0008, // cannot bind by-move into a pattern guard
+// E0019, // merged into E0015
+// E0035, // merged into E0087/E0089
+// E0036, // merged into E0087/E0089
+// E0068,
+// E0085,
+// E0086,
+// E0101, // replaced with E0282
+// E0102, // replaced with E0282
+// E0103,
+// E0104,
+// E0122, // bounds in type aliases are ignored, turned into proper lint
+// E0123,
+// E0127,
+// E0129,
+// E0134,
+// E0135,
+// E0141,
+// E0153, // unused error code
+// E0157, // unused error code
+// E0159, // use of trait `{}` as struct constructor
+// E0163, // merged into E0071
+// E0167,
+// E0168,
+// E0172, // non-trait found in a type sum, moved to resolve
+// E0173, // manual implementations of unboxed closure traits are experimental
+// E0174,
+// E0182, // merged into E0229
+// E0187, // cannot infer the kind of the closure
+// E0188, // can not cast an immutable reference to a mutable pointer
+// E0189, // deprecated: can only cast a boxed pointer to a boxed object
+// E0190, // deprecated: can only cast a &-pointer to an &-object
+// E0194, // merged into E0403
+// E0196, // cannot determine a type for this closure
+ E0208, // internal error code
+// E0209, // builtin traits can only be implemented on structs or enums
+// E0213, // associated types are not accepted in this context
+// E0215, // angle-bracket notation is not stable with `Fn`
+// E0216, // parenthetical notation is only stable with `Fn`
+// E0217, // ambiguous associated type, defined in multiple supertraits
+// E0218, // no associated type defined
+// E0219, // associated type defined in higher-ranked supertrait
+// E0233,
+// E0234,
+// E0235, // structure constructor specifies a structure of type but
+// E0236, // no lang item for range syntax
+// E0237, // no lang item for range syntax
+// E0238, // parenthesized parameters may only be used with a trait
+// E0239, // `next` method of `Iterator` trait has unexpected type
+// E0240,
+// E0241,
+// E0242,
+// E0245, // not a trait
+// E0246, // invalid recursive type
+// E0247,
+// E0248, // value used as a type, now reported earlier during resolution
+ // as E0412
+// E0249,
+// E0257,
+// E0258,
+// E0272, // on_unimplemented #0
+// E0273, // on_unimplemented #1
+// E0274, // on_unimplemented #2
+// E0278, // requirement is not satisfied
+// E0279,
+ E0280, // requirement is not satisfied
+// E0285, // overflow evaluation builtin bounds
+// E0296, // replaced with a generic attribute input check
+// E0298, // cannot compare constants
+// E0299, // mismatched types between arms
+// E0300, // unexpanded macro
+// E0304, // expected signed integer constant
+// E0305, // expected constant
+ E0311, // thing may not live long enough
+ E0313, // lifetime of borrowed pointer outlives lifetime of captured
+ // variable
+// E0314, // closure outlives stack frame
+// E0315, // cannot invoke closure outside of its lifetime
+// E0319, // trait impls for defaulted traits allowed just for structs/enums
+ E0320, // recursive overflow during dropck
+// E0372, // coherence not object safe
+ E0377, // the trait `CoerceUnsized` may only be implemented for a coercion
+ // between structures with the same definition
+// E0385, // {} in an aliasable location
+// E0402, // cannot use an outer type parameter in this context
+// E0406, // merged into 420
+// E0410, // merged into 408
+// E0413, // merged into 530
+// E0414, // merged into 530
+// E0417, // merged into 532
+// E0418, // merged into 532
+// E0419, // merged into 531
+// E0420, // merged into 532
+// E0421, // merged into 531
+// E0427, // merged into 530
+// E0456, // plugin `..` is not available for triple `..`
+ E0457, // plugin `..` only found in rlib format, but must be available...
+ E0460, // found possibly newer version of crate `..`
+ E0461, // couldn't find crate `..` with expected target triple ..
+ E0462, // found staticlib `..` instead of rlib or dylib
+ E0465, // multiple .. candidates for `..` found
+// E0467, // removed
+// E0470, // removed
+// E0471, // constant evaluation error (in pattern)
+ E0472, // llvm_asm! is unsupported on this target
+// E0473, // dereference of reference outside its lifetime
+// E0474, // captured variable `..` does not outlive the enclosing closure
+// E0475, // index of slice outside its lifetime
+ E0476, // lifetime of the source pointer does not outlive lifetime bound...
+// E0479, // the type `..` (provided as the value of a type parameter) is...
+// E0480, // lifetime of method receiver does not outlive the method call
+// E0481, // lifetime of function argument does not outlive the function call
+// E0483, // lifetime of operand does not outlive the operation
+// E0484, // reference is not valid at the time of borrow
+// E0485, // automatically reference is not valid at the time of borrow
+// E0486, // type of expression contains references that are not valid during..
+// E0487, // unsafe use of destructor: destructor might be called while...
+// E0488, // lifetime of variable does not enclose its declaration
+// E0489, // type/lifetime parameter not in scope here
+ E0490, // a value of type `..` is borrowed for too long
+ E0514, // metadata version mismatch
+ E0519, // local crate and dependency have same (crate-name, disambiguator)
+ E0523, // two dependencies have same (crate-name, disambiguator) but different SVH
+// E0526, // shuffle indices are not constant
+// E0540, // multiple rustc_deprecated attributes
+// E0548, // replaced with a generic attribute input check
+// E0553, // multiple rustc_const_unstable attributes
+// E0555, // replaced with a generic attribute input check
+// E0558, // replaced with a generic attribute input check
+// E0563, // cannot determine a type for this `impl Trait` removed in 6383de15
+// E0564, // only named lifetimes are allowed in `impl Trait`,
+ // but `{}` was found in the type `{}`
+// E0598, // lifetime of {} is too short to guarantee its contents can be...
+// E0611, // merged into E0616
+// E0612, // merged into E0609
+// E0613, // Removed (merged with E0609)
+// E0629, // missing 'feature' (rustc_const_unstable)
+// E0630, // rustc_const_unstable attribute must be paired with stable/unstable
+ // attribute
+ E0640, // infer outlives requirements, internal error code
+// E0645, // trait aliases not finished
+// E0694, // an unknown tool name found in scoped attributes
+// E0702, // replaced with a generic attribute input check
+// E0707, // multiple elided lifetimes used in arguments of `async fn`
+// E0709, // multiple different lifetimes used in arguments of `async fn`
+ E0711, // a feature has been declared with conflicting stability attributes, internal error code
+ E0717, // rustc_promotable without stability attribute, internal error code
+// E0721, // `await` keyword
+// E0723, // unstable feature in `const` context
+// E0738, // Removed; errored on `#[track_caller] fn`s in `extern "Rust" { ... }`.
+ E0789, // rustc_allowed_through_unstable_modules without stability attribute
+}
diff --git a/compiler/rustc_error_codes/src/error_codes/E0001.md b/compiler/rustc_error_codes/src/error_codes/E0001.md
new file mode 100644
index 000000000..90756780d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0001.md
@@ -0,0 +1,24 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error suggests that the expression arm corresponding to the noted pattern
+will never be reached as for all possible values of the expression being
+matched, one of the preceding patterns will match.
+
+This means that perhaps some of the preceding patterns are too general, this
+one is too specific or the ordering is incorrect.
+
+For example, the following `match` block has too many arms:
+
+```
+match Some(0) {
+ Some(bar) => {/* ... */}
+ x => {/* ... */} // This handles the `None` case
+ _ => {/* ... */} // All possible cases have already been handled
+}
+```
+
+`match` blocks have their patterns matched in order, so, for example, putting
+a wildcard arm above a more specific arm will make the latter arm irrelevant.
+
+Ensure the ordering of the match arm is correct and remove any superfluous
+arms.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0002.md b/compiler/rustc_error_codes/src/error_codes/E0002.md
new file mode 100644
index 000000000..5cb59da10
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0002.md
@@ -0,0 +1,29 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error indicates that an empty match expression is invalid because the type
+it is matching on is non-empty (there exist values of this type). In safe code
+it is impossible to create an instance of an empty type, so empty match
+expressions are almost never desired. This error is typically fixed by adding
+one or more cases to the match expression.
+
+An example of an empty type is `enum Empty { }`. So, the following will work:
+
+```
+enum Empty {}
+
+fn foo(x: Empty) {
+ match x {
+ // empty
+ }
+}
+```
+
+However, this won't:
+
+```compile_fail
+fn foo(x: Option<String>) {
+ match x {
+ // empty
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0004.md b/compiler/rustc_error_codes/src/error_codes/E0004.md
new file mode 100644
index 000000000..b79ef5fd8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0004.md
@@ -0,0 +1,46 @@
+This error indicates that the compiler cannot guarantee a matching pattern for
+one or more possible inputs to a match expression. Guaranteed matches are
+required in order to assign values to match expressions, or alternatively,
+determine the flow of execution.
+
+Erroneous code example:
+
+```compile_fail,E0004
+enum Terminator {
+ HastaLaVistaBaby,
+ TalkToMyHand,
+}
+
+let x = Terminator::HastaLaVistaBaby;
+
+match x { // error: non-exhaustive patterns: `HastaLaVistaBaby` not covered
+ Terminator::TalkToMyHand => {}
+}
+```
+
+If you encounter this error you must alter your patterns so that every possible
+value of the input type is matched. For types with a small number of variants
+(like enums) you should probably cover all cases explicitly. Alternatively, the
+underscore `_` wildcard pattern can be added after all other patterns to match
+"anything else". Example:
+
+```
+enum Terminator {
+ HastaLaVistaBaby,
+ TalkToMyHand,
+}
+
+let x = Terminator::HastaLaVistaBaby;
+
+match x {
+ Terminator::TalkToMyHand => {}
+ Terminator::HastaLaVistaBaby => {}
+}
+
+// or:
+
+match x {
+ Terminator::TalkToMyHand => {}
+ _ => {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0005.md b/compiler/rustc_error_codes/src/error_codes/E0005.md
new file mode 100644
index 000000000..e2e7db508
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0005.md
@@ -0,0 +1,30 @@
+Patterns used to bind names must be irrefutable, that is, they must guarantee
+that a name will be extracted in all cases.
+
+Erroneous code example:
+
+```compile_fail,E0005
+let x = Some(1);
+let Some(y) = x;
+// error: refutable pattern in local binding: `None` not covered
+```
+
+If you encounter this error you probably need to use a `match` or `if let` to
+deal with the possibility of failure. Example:
+
+```
+let x = Some(1);
+
+match x {
+ Some(y) => {
+ // do something
+ },
+ None => {}
+}
+
+// or:
+
+if let Some(y) = x {
+ // do something
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0007.md b/compiler/rustc_error_codes/src/error_codes/E0007.md
new file mode 100644
index 000000000..2c22b86af
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0007.md
@@ -0,0 +1,22 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error indicates that the bindings in a match arm would require a value to
+be moved into more than one location, thus violating unique ownership. Code
+like the following is invalid as it requires the entire `Option<String>` to be
+moved into a variable called `op_string` while simultaneously requiring the
+inner `String` to be moved into a variable called `s`.
+
+Erroneous code example:
+
+```compile_fail,E0382
+#![feature(bindings_after_at)]
+
+let x = Some("s".to_string());
+
+match x {
+ op_string @ Some(s) => {}, // error: use of moved value
+ None => {},
+}
+```
+
+See also the error E0303.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0009.md b/compiler/rustc_error_codes/src/error_codes/E0009.md
new file mode 100644
index 000000000..aaabba043
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0009.md
@@ -0,0 +1,52 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+In a pattern, all values that don't implement the `Copy` trait have to be bound
+the same way. The goal here is to avoid binding simultaneously by-move and
+by-ref.
+
+This limitation may be removed in a future version of Rust.
+
+Erroneous code example:
+
+```
+#![feature(move_ref_pattern)]
+
+struct X { x: (), }
+
+let x = Some((X { x: () }, X { x: () }));
+match x {
+ Some((y, ref z)) => {}, // error: cannot bind by-move and by-ref in the
+ // same pattern
+ None => panic!()
+}
+```
+
+You have two solutions:
+
+Solution #1: Bind the pattern's values the same way.
+
+```
+struct X { x: (), }
+
+let x = Some((X { x: () }, X { x: () }));
+match x {
+ Some((ref y, ref z)) => {},
+ // or Some((y, z)) => {}
+ None => panic!()
+}
+```
+
+Solution #2: Implement the `Copy` trait for the `X` structure.
+
+However, please keep in mind that the first solution should be preferred.
+
+```
+#[derive(Clone, Copy)]
+struct X { x: (), }
+
+let x = Some((X { x: () }, X { x: () }));
+match x {
+ Some((y, ref z)) => {},
+ None => panic!()
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0010.md b/compiler/rustc_error_codes/src/error_codes/E0010.md
new file mode 100644
index 000000000..71c790e10
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0010.md
@@ -0,0 +1,11 @@
+The value of statics and constants must be known at compile time, and they live
+for the entire lifetime of a program. Creating a boxed value allocates memory on
+the heap at runtime, and therefore cannot be done at compile time.
+
+Erroneous code example:
+
+```compile_fail,E0010
+#![feature(box_syntax)]
+
+const CON : Box<i32> = box 0;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0013.md b/compiler/rustc_error_codes/src/error_codes/E0013.md
new file mode 100644
index 000000000..560530277
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0013.md
@@ -0,0 +1,18 @@
+Static and const variables can refer to other const variables. But a const
+variable cannot refer to a static variable.
+
+Erroneous code example:
+
+```compile_fail,E0013
+static X: i32 = 42;
+const Y: i32 = X;
+```
+
+In this example, `Y` cannot refer to `X`. To fix this, the value can be
+extracted as a const and then used:
+
+```
+const A: i32 = 42;
+static X: i32 = A;
+const Y: i32 = A;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0014.md b/compiler/rustc_error_codes/src/error_codes/E0014.md
new file mode 100644
index 000000000..2c69957e9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0014.md
@@ -0,0 +1,20 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Constants can only be initialized by a constant value or, in a future
+version of Rust, a call to a const function. This error indicates the use
+of a path (like a::b, or x) denoting something other than one of these
+allowed items.
+
+Erroneous code example:
+
+```
+const FOO: i32 = { let x = 0; x }; // 'x' isn't a constant nor a function!
+```
+
+To avoid it, you have to replace the non-constant value:
+
+```
+const FOO: i32 = { const X : i32 = 0; X };
+// or even:
+const FOO2: i32 = { 0 }; // but brackets are useless here
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0015.md b/compiler/rustc_error_codes/src/error_codes/E0015.md
new file mode 100644
index 000000000..021a0219d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0015.md
@@ -0,0 +1,33 @@
+A constant item was initialized with something that is not a constant
+expression.
+
+Erroneous code example:
+
+```compile_fail,E0015
+fn create_some() -> Option<u8> {
+ Some(1)
+}
+
+const FOO: Option<u8> = create_some(); // error!
+```
+
+The only functions that can be called in static or constant expressions are
+`const` functions, and struct/enum constructors.
+
+To fix this error, you can declare `create_some` as a constant function:
+
+```
+const fn create_some() -> Option<u8> { // declared as a const function
+ Some(1)
+}
+
+const FOO: Option<u8> = create_some(); // ok!
+
+// These are also working:
+struct Bar {
+ x: u8,
+}
+
+const OTHER_FOO: Option<u8> = Some(1);
+const BAR: Bar = Bar {x: 1};
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0023.md b/compiler/rustc_error_codes/src/error_codes/E0023.md
new file mode 100644
index 000000000..c1d85705d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0023.md
@@ -0,0 +1,57 @@
+A pattern attempted to extract an incorrect number of fields from a variant.
+
+Erroneous code example:
+
+```compile_fail,E0023
+enum Fruit {
+ Apple(String, String),
+ Pear(u32),
+}
+
+let x = Fruit::Apple(String::new(), String::new());
+
+match x {
+ Fruit::Apple(a) => {}, // error!
+ _ => {}
+}
+```
+
+A pattern used to match against an enum variant must provide a sub-pattern for
+each field of the enum variant.
+
+Here the `Apple` variant has two fields, and should be matched against like so:
+
+```
+enum Fruit {
+ Apple(String, String),
+ Pear(u32),
+}
+
+let x = Fruit::Apple(String::new(), String::new());
+
+// Correct.
+match x {
+ Fruit::Apple(a, b) => {},
+ _ => {}
+}
+```
+
+Matching with the wrong number of fields has no sensible interpretation:
+
+```compile_fail,E0023
+enum Fruit {
+ Apple(String, String),
+ Pear(u32),
+}
+
+let x = Fruit::Apple(String::new(), String::new());
+
+// Incorrect.
+match x {
+ Fruit::Apple(a) => {},
+ Fruit::Apple(a, b, c) => {},
+}
+```
+
+Check how many fields the enum was declared with and ensure that your pattern
+uses the same number.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0025.md b/compiler/rustc_error_codes/src/error_codes/E0025.md
new file mode 100644
index 000000000..a85dc8c19
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0025.md
@@ -0,0 +1,34 @@
+Each field of a struct can only be bound once in a pattern.
+
+Erroneous code example:
+
+```compile_fail,E0025
+struct Foo {
+ a: u8,
+ b: u8,
+}
+
+fn main(){
+ let x = Foo { a:1, b:2 };
+
+ let Foo { a: x, a: y } = x;
+ // error: field `a` bound multiple times in the pattern
+}
+```
+
+Each occurrence of a field name binds the value of that field, so to fix this
+error you will have to remove or alter the duplicate uses of the field name.
+Perhaps you misspelled another field name? Example:
+
+```
+struct Foo {
+ a: u8,
+ b: u8,
+}
+
+fn main(){
+ let x = Foo { a:1, b:2 };
+
+ let Foo { a: x, b: y } = x; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0026.md b/compiler/rustc_error_codes/src/error_codes/E0026.md
new file mode 100644
index 000000000..72c575aab
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0026.md
@@ -0,0 +1,34 @@
+A struct pattern attempted to extract a non-existent field from a struct.
+
+Erroneous code example:
+
+```compile_fail,E0026
+struct Thing {
+ x: u32,
+ y: u32,
+}
+
+let thing = Thing { x: 0, y: 0 };
+
+match thing {
+ Thing { x, z } => {} // error: `Thing::z` field doesn't exist
+}
+```
+
+If you are using shorthand field patterns but want to refer to the struct field
+by a different name, you should rename it explicitly. Struct fields are
+identified by the name used before the colon `:` so struct patterns should
+resemble the declaration of the struct type being matched.
+
+```
+struct Thing {
+ x: u32,
+ y: u32,
+}
+
+let thing = Thing { x: 0, y: 0 };
+
+match thing {
+ Thing { x, y: z } => {} // we renamed `y` to `z`
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0027.md b/compiler/rustc_error_codes/src/error_codes/E0027.md
new file mode 100644
index 000000000..a8b1340ca
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0027.md
@@ -0,0 +1,39 @@
+A pattern for a struct fails to specify a sub-pattern for every one of the
+struct's fields.
+
+Erroneous code example:
+
+```compile_fail,E0027
+struct Dog {
+ name: String,
+ age: u32,
+}
+
+let d = Dog { name: "Rusty".to_string(), age: 8 };
+
+// This is incorrect.
+match d {
+ Dog { age: x } => {}
+}
+```
+
+To fix this error, ensure that each field from the struct's definition is
+mentioned in the pattern, or use `..` to ignore unwanted fields. Example:
+
+```
+struct Dog {
+ name: String,
+ age: u32,
+}
+
+let d = Dog { name: "Rusty".to_string(), age: 8 };
+
+match d {
+ Dog { name: ref n, age: x } => {}
+}
+
+// This is also correct (ignore unused fields).
+match d {
+ Dog { age: x, .. } => {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0029.md b/compiler/rustc_error_codes/src/error_codes/E0029.md
new file mode 100644
index 000000000..d12d85b9b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0029.md
@@ -0,0 +1,26 @@
+Something other than numbers and characters has been used for a range.
+
+Erroneous code example:
+
+```compile_fail,E0029
+let string = "salutations !";
+
+// The ordering relation for strings cannot be evaluated at compile time,
+// so this doesn't work:
+match string {
+ "hello" ..= "world" => {}
+ _ => {}
+}
+
+// This is a more general version, using a guard:
+match string {
+ s if s >= "hello" && s <= "world" => {}
+ _ => {}
+}
+```
+
+In a match expression, only numbers and characters can be matched against a
+range. This is because the compiler checks that the range is non-empty at
+compile-time, and is unable to evaluate arbitrary comparison functions. If you
+want to capture values of an orderable type between two end-points, you can use
+a guard.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0030.md b/compiler/rustc_error_codes/src/error_codes/E0030.md
new file mode 100644
index 000000000..67d496da5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0030.md
@@ -0,0 +1,15 @@
+When matching against a range, the compiler verifies that the range is
+non-empty. Range patterns include both end-points, so this is equivalent to
+requiring the start of the range to be less than or equal to the end of the
+range.
+
+Erroneous code example:
+
+```compile_fail,E0030
+match 5u32 {
+ // This range is ok, albeit pointless.
+ 1 ..= 1 => {}
+ // This range is empty, and the compiler can tell.
+ 1000 ..= 5 => {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0033.md b/compiler/rustc_error_codes/src/error_codes/E0033.md
new file mode 100644
index 000000000..735a2d1f3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0033.md
@@ -0,0 +1,27 @@
+A trait type has been dereferenced.
+
+Erroneous code example:
+
+```compile_fail,E0033
+# trait SomeTrait { fn method_one(&self){} fn method_two(&self){} }
+# impl<T> SomeTrait for T {}
+let trait_obj: &SomeTrait = &"some_value";
+
+// This tries to implicitly dereference to create an unsized local variable.
+let &invalid = trait_obj;
+
+// You can call methods without binding to the value being pointed at.
+trait_obj.method_one();
+trait_obj.method_two();
+```
+
+A pointer to a trait type cannot be implicitly dereferenced by a pattern. Every
+trait defines a type, but because the size of trait implementers isn't fixed,
+this type has no compile-time size. Therefore, all accesses to trait types must
+be through pointers. If you encounter this error you should try to avoid
+dereferencing the pointer.
+
+You can read more about trait objects in the [Trait Objects] section of the
+Reference.
+
+[Trait Objects]: https://doc.rust-lang.org/reference/types.html#trait-objects
diff --git a/compiler/rustc_error_codes/src/error_codes/E0034.md b/compiler/rustc_error_codes/src/error_codes/E0034.md
new file mode 100644
index 000000000..2a21f3441
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0034.md
@@ -0,0 +1,86 @@
+The compiler doesn't know what method to call because more than one method
+has the same prototype.
+
+Erroneous code example:
+
+```compile_fail,E0034
+struct Test;
+
+trait Trait1 {
+ fn foo();
+}
+
+trait Trait2 {
+ fn foo();
+}
+
+impl Trait1 for Test { fn foo() {} }
+impl Trait2 for Test { fn foo() {} }
+
+fn main() {
+ Test::foo() // error, which foo() to call?
+}
+```
+
+To avoid this error, you have to keep only one of them and remove the others.
+So let's take our example and fix it:
+
+```
+struct Test;
+
+trait Trait1 {
+ fn foo();
+}
+
+impl Trait1 for Test { fn foo() {} }
+
+fn main() {
+ Test::foo() // and now that's good!
+}
+```
+
+However, a better solution would be using fully explicit naming of type and
+trait:
+
+```
+struct Test;
+
+trait Trait1 {
+ fn foo();
+}
+
+trait Trait2 {
+ fn foo();
+}
+
+impl Trait1 for Test { fn foo() {} }
+impl Trait2 for Test { fn foo() {} }
+
+fn main() {
+ <Test as Trait1>::foo()
+}
+```
+
+One last example:
+
+```
+trait F {
+ fn m(&self);
+}
+
+trait G {
+ fn m(&self);
+}
+
+struct X;
+
+impl F for X { fn m(&self) { println!("I am F"); } }
+impl G for X { fn m(&self) { println!("I am G"); } }
+
+fn main() {
+ let f = X;
+
+ F::m(&f); // it displays "I am F"
+ G::m(&f); // it displays "I am G"
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0038.md b/compiler/rustc_error_codes/src/error_codes/E0038.md
new file mode 100644
index 000000000..584b78554
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0038.md
@@ -0,0 +1,324 @@
+For any given trait `Trait` there may be a related _type_ called the _trait
+object type_ which is typically written as `dyn Trait`. In earlier editions of
+Rust, trait object types were written as plain `Trait` (just the name of the
+trait, written in type positions) but this was a bit too confusing, so we now
+write `dyn Trait`.
+
+Some traits are not allowed to be used as trait object types. The traits that
+are allowed to be used as trait object types are called "object-safe" traits.
+Attempting to use a trait object type for a trait that is not object-safe will
+trigger error E0038.
+
+Two general aspects of trait object types give rise to the restrictions:
+
+ 1. Trait object types are dynamically sized types (DSTs), and trait objects of
+ these types can only be accessed through pointers, such as `&dyn Trait` or
+ `Box<dyn Trait>`. The size of such a pointer is known, but the size of the
+ `dyn Trait` object pointed-to by the pointer is _opaque_ to code working
+ with it, and different trait objects with the same trait object type may
+ have different sizes.
+
+ 2. The pointer used to access a trait object is paired with an extra pointer
+ to a "virtual method table" or "vtable", which is used to implement dynamic
+ dispatch to the object's implementations of the trait's methods. There is a
+ single such vtable for each trait implementation, but different trait
+ objects with the same trait object type may point to vtables from different
+ implementations.
+
+The specific conditions that violate object-safety follow, most of which relate
+to missing size information and vtable polymorphism arising from these aspects.
+
+### The trait requires `Self: Sized`
+
+Traits that are declared as `Trait: Sized` or which otherwise inherit a
+constraint of `Self:Sized` are not object-safe.
+
+The reasoning behind this is somewhat subtle. It derives from the fact that Rust
+requires (and defines) that every trait object type `dyn Trait` automatically
+implements `Trait`. Rust does this to simplify error reporting and ease
+interoperation between static and dynamic polymorphism. For example, this code
+works:
+
+```
+trait Trait {
+}
+
+fn static_foo<T:Trait + ?Sized>(b: &T) {
+}
+
+fn dynamic_bar(a: &dyn Trait) {
+ static_foo(a)
+}
+```
+
+This code works because `dyn Trait`, if it exists, always implements `Trait`.
+
+However as we know, any `dyn Trait` is also unsized, and so it can never
+implement a sized trait like `Trait:Sized`. So, rather than allow an exception
+to the rule that `dyn Trait` always implements `Trait`, Rust chooses to prohibit
+such a `dyn Trait` from existing at all.
+
+Only unsized traits are considered object-safe.
+
+Generally, `Self: Sized` is used to indicate that the trait should not be used
+as a trait object. If the trait comes from your own crate, consider removing
+this restriction.
+
+### Method references the `Self` type in its parameters or return type
+
+This happens when a trait has a method like the following:
+
+```
+trait Trait {
+ fn foo(&self) -> Self;
+}
+
+impl Trait for String {
+ fn foo(&self) -> Self {
+ "hi".to_owned()
+ }
+}
+
+impl Trait for u8 {
+ fn foo(&self) -> Self {
+ 1
+ }
+}
+```
+
+(Note that `&self` and `&mut self` are okay, it's additional `Self` types which
+cause this problem.)
+
+In such a case, the compiler cannot predict the return type of `foo()` in a
+situation like the following:
+
+```compile_fail,E0038
+trait Trait {
+ fn foo(&self) -> Self;
+}
+
+fn call_foo(x: Box<dyn Trait>) {
+ let y = x.foo(); // What type is y?
+ // ...
+}
+```
+
+If only some methods aren't object-safe, you can add a `where Self: Sized` bound
+on them to mark them as explicitly unavailable to trait objects. The
+functionality will still be available to all other implementers, including
+`Box<dyn Trait>` which is itself sized (assuming you `impl Trait for Box<dyn
+Trait>`).
+
+```
+trait Trait {
+ fn foo(&self) -> Self where Self: Sized;
+ // more functions
+}
+```
+
+Now, `foo()` can no longer be called on a trait object, but you will now be
+allowed to make a trait object, and that will be able to call any object-safe
+methods. With such a bound, one can still call `foo()` on types implementing
+that trait that aren't behind trait objects.
+
+### Method has generic type parameters
+
+As mentioned before, trait objects contain pointers to method tables. So, if we
+have:
+
+```
+trait Trait {
+ fn foo(&self);
+}
+
+impl Trait for String {
+ fn foo(&self) {
+ // implementation 1
+ }
+}
+
+impl Trait for u8 {
+ fn foo(&self) {
+ // implementation 2
+ }
+}
+// ...
+```
+
+At compile time each implementation of `Trait` will produce a table containing
+the various methods (and other items) related to the implementation, which will
+be used as the virtual method table for a `dyn Trait` object derived from that
+implementation.
+
+This works fine, but when the method gains generic parameters, we can have a
+problem.
+
+Usually, generic parameters get _monomorphized_. For example, if I have
+
+```
+fn foo<T>(x: T) {
+ // ...
+}
+```
+
+The machine code for `foo::<u8>()`, `foo::<bool>()`, `foo::<String>()`, or any
+other type substitution is different. Hence the compiler generates the
+implementation on-demand. If you call `foo()` with a `bool` parameter, the
+compiler will only generate code for `foo::<bool>()`. When we have additional
+type parameters, the number of monomorphized implementations the compiler
+generates does not grow drastically, since the compiler will only generate an
+implementation if the function is called with unparameterized substitutions
+(i.e., substitutions where none of the substituted types are themselves
+parameterized).
+
+However, with trait objects we have to make a table containing _every_ object
+that implements the trait. Now, if it has type parameters, we need to add
+implementations for every type that implements the trait, and there could
+theoretically be an infinite number of types.
+
+For example, with:
+
+```
+trait Trait {
+ fn foo<T>(&self, on: T);
+ // more methods
+}
+
+impl Trait for String {
+ fn foo<T>(&self, on: T) {
+ // implementation 1
+ }
+}
+
+impl Trait for u8 {
+ fn foo<T>(&self, on: T) {
+ // implementation 2
+ }
+}
+
+// 8 more implementations
+```
+
+Now, if we have the following code:
+
+```compile_fail,E0038
+# trait Trait { fn foo<T>(&self, on: T); }
+# impl Trait for String { fn foo<T>(&self, on: T) {} }
+# impl Trait for u8 { fn foo<T>(&self, on: T) {} }
+# impl Trait for bool { fn foo<T>(&self, on: T) {} }
+# // etc.
+fn call_foo(thing: Box<dyn Trait>) {
+ thing.foo(true); // this could be any one of the 8 types above
+ thing.foo(1);
+ thing.foo("hello");
+}
+```
+
+We don't just need to create a table of all implementations of all methods of
+`Trait`, we need to create such a table, for each different type fed to
+`foo()`. In this case this turns out to be (10 types implementing `Trait`)\*(3
+types being fed to `foo()`) = 30 implementations!
+
+With real world traits these numbers can grow drastically.
+
+To fix this, it is suggested to use a `where Self: Sized` bound similar to the
+fix for the sub-error above if you do not intend to call the method with type
+parameters:
+
+```
+trait Trait {
+ fn foo<T>(&self, on: T) where Self: Sized;
+ // more methods
+}
+```
+
+If this is not an option, consider replacing the type parameter with another
+trait object (e.g., if `T: OtherTrait`, use `on: Box<dyn OtherTrait>`). If the
+number of types you intend to feed to this method is limited, consider manually
+listing out the methods of different types.
+
+### Method has no receiver
+
+Methods that do not take a `self` parameter can't be called since there won't be
+a way to get a pointer to the method table for them.
+
+```
+trait Foo {
+ fn foo() -> u8;
+}
+```
+
+This could be called as `<Foo as Foo>::foo()`, which would not be able to pick
+an implementation.
+
+Adding a `Self: Sized` bound to these methods will generally make this compile.
+
+```
+trait Foo {
+ fn foo() -> u8 where Self: Sized;
+}
+```
+
+### Trait contains associated constants
+
+Just like static functions, associated constants aren't stored on the method
+table. If the trait or any subtrait contain an associated constant, they cannot
+be made into an object.
+
+```compile_fail,E0038
+trait Foo {
+ const X: i32;
+}
+
+impl Foo {}
+```
+
+A simple workaround is to use a helper method instead:
+
+```
+trait Foo {
+ fn x(&self) -> i32;
+}
+```
+
+### Trait uses `Self` as a type parameter in the supertrait listing
+
+This is similar to the second sub-error, but subtler. It happens in situations
+like the following:
+
+```compile_fail,E0038
+trait Super<A: ?Sized> {}
+
+trait Trait: Super<Self> {
+}
+
+struct Foo;
+
+impl Super<Foo> for Foo{}
+
+impl Trait for Foo {}
+
+fn main() {
+ let x: Box<dyn Trait>;
+}
+```
+
+Here, the supertrait might have methods as follows:
+
+```
+trait Super<A: ?Sized> {
+ fn get_a(&self) -> &A; // note that this is object safe!
+}
+```
+
+If the trait `Trait` was deriving from something like `Super<String>` or
+`Super<T>` (where `Foo` itself is `Foo<T>`), this is okay, because given a type
+`get_a()` will definitely return an object of that type.
+
+However, if it derives from `Super<Self>`, even though `Super` is object safe,
+the method `get_a()` would return an object of unknown type when called on the
+function. `Self` type parameters let us make object safe traits no longer safe,
+so they are forbidden when specifying supertraits.
+
+There's no easy fix for this. Generally, code will need to be refactored so that
+you no longer need to derive from `Super<Self>`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0040.md b/compiler/rustc_error_codes/src/error_codes/E0040.md
new file mode 100644
index 000000000..1373f8340
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0040.md
@@ -0,0 +1,39 @@
+It is not allowed to manually call destructors in Rust.
+
+Erroneous code example:
+
+```compile_fail,E0040
+struct Foo {
+ x: i32,
+}
+
+impl Drop for Foo {
+ fn drop(&mut self) {
+ println!("kaboom");
+ }
+}
+
+fn main() {
+ let mut x = Foo { x: -7 };
+ x.drop(); // error: explicit use of destructor method
+}
+```
+
+It is unnecessary to do this since `drop` is called automatically whenever a
+value goes out of scope. However, if you really need to drop a value by hand,
+you can use the `std::mem::drop` function:
+
+```
+struct Foo {
+ x: i32,
+}
+impl Drop for Foo {
+ fn drop(&mut self) {
+ println!("kaboom");
+ }
+}
+fn main() {
+ let mut x = Foo { x: -7 };
+ drop(x); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0044.md b/compiler/rustc_error_codes/src/error_codes/E0044.md
new file mode 100644
index 000000000..ed7daf8dd
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0044.md
@@ -0,0 +1,15 @@
+You cannot use type or const parameters on foreign items.
+
+Example of erroneous code:
+
+```compile_fail,E0044
+extern "C" { fn some_func<T>(x: T); }
+```
+
+To fix this, replace the generic parameter with the specializations that you
+need:
+
+```
+extern "C" { fn some_func_i32(x: i32); }
+extern "C" { fn some_func_i64(x: i64); }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0045.md b/compiler/rustc_error_codes/src/error_codes/E0045.md
new file mode 100644
index 000000000..143c693bf
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0045.md
@@ -0,0 +1,21 @@
+Variadic parameters have been used on a non-C ABI function.
+
+Erroneous code example:
+
+```compile_fail,E0045
+#![feature(unboxed_closures)]
+
+extern "rust-call" {
+ fn foo(x: u8, ...); // error!
+}
+```
+
+Rust only supports variadic parameters for interoperability with C code in its
+FFI. As such, variadic parameters can only be used with functions which are
+using the C ABI. To fix such code, put them in an extern "C" block:
+
+```
+extern "C" {
+ fn foo (x: u8, ...);
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0046.md b/compiler/rustc_error_codes/src/error_codes/E0046.md
new file mode 100644
index 000000000..d8f95330c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0046.md
@@ -0,0 +1,31 @@
+Items are missing in a trait implementation.
+
+Erroneous code example:
+
+```compile_fail,E0046
+trait Foo {
+ fn foo();
+}
+
+struct Bar;
+
+impl Foo for Bar {}
+// error: not all trait items implemented, missing: `foo`
+```
+
+When trying to make some type implement a trait `Foo`, you must, at minimum,
+provide implementations for all of `Foo`'s required methods (meaning the
+methods that do not have default implementations), as well as any required
+trait items like associated types or constants. Example:
+
+```
+trait Foo {
+ fn foo();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn foo() {} // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0049.md b/compiler/rustc_error_codes/src/error_codes/E0049.md
new file mode 100644
index 000000000..a2034a342
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0049.md
@@ -0,0 +1,36 @@
+An attempted implementation of a trait method has the wrong number of type or
+const parameters.
+
+Erroneous code example:
+
+```compile_fail,E0049
+trait Foo {
+ fn foo<T: Default>(x: T) -> Self;
+}
+
+struct Bar;
+
+// error: method `foo` has 0 type parameters but its trait declaration has 1
+// type parameter
+impl Foo for Bar {
+ fn foo(x: bool) -> Self { Bar }
+}
+```
+
+For example, the `Foo` trait has a method `foo` with a type parameter `T`,
+but the implementation of `foo` for the type `Bar` is missing this parameter.
+To fix this error, they must have the same type parameters:
+
+```
+trait Foo {
+ fn foo<T: Default>(x: T) -> Self;
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn foo<T: Default>(x: T) -> Self { // ok!
+ Bar
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0050.md b/compiler/rustc_error_codes/src/error_codes/E0050.md
new file mode 100644
index 000000000..7b84c4800
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0050.md
@@ -0,0 +1,36 @@
+An attempted implementation of a trait method has the wrong number of function
+parameters.
+
+Erroneous code example:
+
+```compile_fail,E0050
+trait Foo {
+ fn foo(&self, x: u8) -> bool;
+}
+
+struct Bar;
+
+// error: method `foo` has 1 parameter but the declaration in trait `Foo::foo`
+// has 2
+impl Foo for Bar {
+ fn foo(&self) -> bool { true }
+}
+```
+
+For example, the `Foo` trait has a method `foo` with two function parameters
+(`&self` and `u8`), but the implementation of `foo` for the type `Bar` omits
+the `u8` parameter. To fix this error, they must have the same parameters:
+
+```
+trait Foo {
+ fn foo(&self, x: u8) -> bool;
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn foo(&self, x: u8) -> bool { // ok!
+ true
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0053.md b/compiler/rustc_error_codes/src/error_codes/E0053.md
new file mode 100644
index 000000000..cb2a8638a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0053.md
@@ -0,0 +1,21 @@
+The parameters of any trait method must match between a trait implementation
+and the trait definition.
+
+Erroneous code example:
+
+```compile_fail,E0053
+trait Foo {
+ fn foo(x: u16);
+ fn bar(&self);
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ // error, expected u16, found i16
+ fn foo(x: i16) { }
+
+ // error, types differ in mutability
+ fn bar(&mut self) { }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0054.md b/compiler/rustc_error_codes/src/error_codes/E0054.md
new file mode 100644
index 000000000..c3eb375fb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0054.md
@@ -0,0 +1,20 @@
+It is not allowed to cast to a bool.
+
+Erroneous code example:
+
+```compile_fail,E0054
+let x = 5;
+
+// Not allowed, won't compile
+let x_is_nonzero = x as bool;
+```
+
+If you are trying to cast a numeric type to a bool, you can compare it with
+zero instead:
+
+```
+let x = 5;
+
+// Ok
+let x_is_nonzero = x != 0;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0055.md b/compiler/rustc_error_codes/src/error_codes/E0055.md
new file mode 100644
index 000000000..223ba4000
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0055.md
@@ -0,0 +1,28 @@
+During a method call, a value is automatically dereferenced as many times as
+needed to make the value's type match the method's receiver. The catch is that
+the compiler will only attempt to dereference a number of times up to the
+recursion limit (which can be set via the `recursion_limit` attribute).
+
+For a somewhat artificial example:
+
+```compile_fail,E0055
+#![recursion_limit="4"]
+
+struct Foo;
+
+impl Foo {
+ fn foo(&self) {}
+}
+
+fn main() {
+ let foo = Foo;
+ let ref_foo = &&&&&Foo;
+
+ // error, reached the recursion limit while auto-dereferencing `&&&&&Foo`
+ ref_foo.foo();
+}
+```
+
+One fix may be to increase the recursion limit. Note that it is possible to
+create an infinite recursion of dereferencing, in which case the only fix is to
+somehow break the recursion.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0057.md b/compiler/rustc_error_codes/src/error_codes/E0057.md
new file mode 100644
index 000000000..bb5e4b48d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0057.md
@@ -0,0 +1,22 @@
+An invalid number of arguments was given when calling a closure.
+
+Erroneous code example:
+
+```compile_fail,E0057
+let f = |x| x * 3;
+let a = f(); // invalid, too few parameters
+let b = f(4); // this works!
+let c = f(2, 3); // invalid, too many parameters
+```
+
+When invoking closures or other implementations of the function traits `Fn`,
+`FnMut` or `FnOnce` using call notation, the number of parameters passed to the
+function must match its definition.
+
+A generic function must be treated similarly:
+
+```
+fn foo<F: Fn()>(f: F) {
+ f(); // this is valid, but f(3) would not work
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0059.md b/compiler/rustc_error_codes/src/error_codes/E0059.md
new file mode 100644
index 000000000..b22edead2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0059.md
@@ -0,0 +1,25 @@
+The built-in function traits are generic over a tuple of the function arguments.
+If one uses angle-bracket notation (`Fn<(T,), Output=U>`) instead of parentheses
+(`Fn(T) -> U`) to denote the function trait, the type parameter should be a
+tuple. Otherwise function call notation cannot be used and the trait will not be
+implemented by closures.
+
+The most likely source of this error is using angle-bracket notation without
+wrapping the function argument type into a tuple, for example:
+
+```compile_fail,E0059
+#![feature(unboxed_closures)]
+
+fn foo<F: Fn<i32>>(f: F) -> F::Output { f(3) }
+```
+
+It can be fixed by adjusting the trait bound like this:
+
+```
+#![feature(unboxed_closures)]
+
+fn foo<F: Fn<(i32,)>>(f: F) -> F::Output { f(3) }
+```
+
+Note that `(T,)` always denotes the type of a 1-tuple containing an element of
+type `T`. The comma is necessary for syntactic disambiguation.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0060.md b/compiler/rustc_error_codes/src/error_codes/E0060.md
new file mode 100644
index 000000000..54b10c886
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0060.md
@@ -0,0 +1,38 @@
+External C functions are allowed to be variadic. However, a variadic function
+takes a minimum number of arguments. For example, consider C's variadic `printf`
+function:
+
+```compile_fail,E0060
+use std::os::raw::{c_char, c_int};
+
+extern "C" {
+ fn printf(_: *const c_char, ...) -> c_int;
+}
+
+unsafe { printf(); } // error!
+```
+
+Using this declaration, it must be called with at least one argument, so
+simply calling `printf()` is invalid. But the following uses are allowed:
+
+```
+# use std::os::raw::{c_char, c_int};
+# #[cfg_attr(all(windows, target_env = "msvc"),
+# link(name = "legacy_stdio_definitions",
+# kind = "static", modifiers = "-bundle"))]
+# extern "C" { fn printf(_: *const c_char, ...) -> c_int; }
+# fn main() {
+unsafe {
+ use std::ffi::CString;
+
+ let fmt = CString::new("test\n").unwrap();
+ printf(fmt.as_ptr());
+
+ let fmt = CString::new("number = %d\n").unwrap();
+ printf(fmt.as_ptr(), 3);
+
+ let fmt = CString::new("%d, %d\n").unwrap();
+ printf(fmt.as_ptr(), 10, 5);
+}
+# }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0061.md b/compiler/rustc_error_codes/src/error_codes/E0061.md
new file mode 100644
index 000000000..143251c13
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0061.md
@@ -0,0 +1,23 @@
+An invalid number of arguments was passed when calling a function.
+
+Erroneous code example:
+
+```compile_fail,E0061
+fn f(u: i32) {}
+
+f(); // error!
+```
+
+The number of arguments passed to a function must match the number of arguments
+specified in the function signature.
+
+For example, a function like:
+
+```
+fn f(a: u16, b: &str) {}
+```
+
+Must always be called with exactly two arguments, e.g., `f(2, "test")`.
+
+Note that Rust does not have a notion of optional function arguments or
+variadic functions (except for its C-FFI).
diff --git a/compiler/rustc_error_codes/src/error_codes/E0062.md b/compiler/rustc_error_codes/src/error_codes/E0062.md
new file mode 100644
index 000000000..64fc027b8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0062.md
@@ -0,0 +1,30 @@
+A struct's or struct-like enum variant's field was specified more than once.
+
+Erroneous code example:
+
+```compile_fail,E0062
+struct Foo {
+ x: i32,
+}
+
+fn main() {
+ let x = Foo {
+ x: 0,
+ x: 0, // error: field `x` specified more than once
+ };
+}
+```
+
+This error indicates that during an attempt to build a struct or struct-like
+enum variant, one of the fields was specified more than once. Each field should
+be specified exactly one time. Example:
+
+```
+struct Foo {
+ x: i32,
+}
+
+fn main() {
+ let x = Foo { x: 0 }; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0063.md b/compiler/rustc_error_codes/src/error_codes/E0063.md
new file mode 100644
index 000000000..0e611deac
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0063.md
@@ -0,0 +1,27 @@
+A struct's or struct-like enum variant's field was not provided.
+
+Erroneous code example:
+
+```compile_fail,E0063
+struct Foo {
+ x: i32,
+ y: i32,
+}
+
+fn main() {
+ let x = Foo { x: 0 }; // error: missing field: `y`
+}
+```
+
+Each field should be specified exactly once. Example:
+
+```
+struct Foo {
+ x: i32,
+ y: i32,
+}
+
+fn main() {
+ let x = Foo { x: 0, y: 0 }; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0067.md b/compiler/rustc_error_codes/src/error_codes/E0067.md
new file mode 100644
index 000000000..11041bb53
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0067.md
@@ -0,0 +1,15 @@
+An invalid left-hand side expression was used on an assignment operation.
+
+Erroneous code example:
+
+```compile_fail,E0067
+12 += 1; // error!
+```
+
+You need to have a place expression to be able to assign it something. For
+example:
+
+```
+let mut x: i8 = 12;
+x += 1; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0069.md b/compiler/rustc_error_codes/src/error_codes/E0069.md
new file mode 100644
index 000000000..7367a5c09
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0069.md
@@ -0,0 +1,14 @@
+The compiler found a function whose body contains a `return;` statement but
+whose return type is not `()`.
+
+Erroneous code example:
+
+```compile_fail,E0069
+// error
+fn foo() -> u8 {
+ return;
+}
+```
+
+Since `return;` is just like `return ();`, there is a mismatch between the
+function's return type and the value being returned.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0070.md b/compiler/rustc_error_codes/src/error_codes/E0070.md
new file mode 100644
index 000000000..97522af3d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0070.md
@@ -0,0 +1,49 @@
+An assignment operator was used on a non-place expression.
+
+Erroneous code examples:
+
+```compile_fail,E0070
+struct SomeStruct {
+ x: i32,
+ y: i32,
+}
+
+const SOME_CONST: i32 = 12;
+
+fn some_other_func() {}
+
+fn some_function() {
+ SOME_CONST = 14; // error: a constant value cannot be changed!
+ 1 = 3; // error: 1 isn't a valid place!
+ some_other_func() = 4; // error: we cannot assign value to a function!
+ SomeStruct::x = 12; // error: SomeStruct a structure name but it is used
+ // like a variable!
+}
+```
+
+The left-hand side of an assignment operator must be a place expression. A
+place expression represents a memory location and can be a variable (with
+optional namespacing), a dereference, an indexing expression or a field
+reference.
+
+More details can be found in the [Expressions] section of the Reference.
+
+[Expressions]: https://doc.rust-lang.org/reference/expressions.html#places-rvalues-and-temporaries
+
+And now let's give working examples:
+
+```
+struct SomeStruct {
+ x: i32,
+ y: i32,
+}
+let mut s = SomeStruct { x: 0, y: 0 };
+
+s.x = 3; // that's good !
+
+// ...
+
+fn some_func(x: &mut i32) {
+ *x = 12; // that's good !
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0071.md b/compiler/rustc_error_codes/src/error_codes/E0071.md
new file mode 100644
index 000000000..a6d6d1976
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0071.md
@@ -0,0 +1,27 @@
+A structure-literal syntax was used to create an item that is not a structure
+or enum variant.
+
+Example of erroneous code:
+
+```compile_fail,E0071
+type U32 = u32;
+let t = U32 { value: 4 }; // error: expected struct, variant or union type,
+ // found builtin type `u32`
+```
+
+To fix this, ensure that the name was correctly spelled, and that the correct
+form of initializer was used.
+
+For example, the code above can be fixed to:
+
+```
+type U32 = u32;
+let t: U32 = 4;
+```
+
+or:
+
+```
+struct U32 { value: u32 }
+let t = U32 { value: 4 };
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0072.md b/compiler/rustc_error_codes/src/error_codes/E0072.md
new file mode 100644
index 000000000..8f7749aba
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0072.md
@@ -0,0 +1,37 @@
+A recursive type has infinite size because it doesn't have an indirection.
+
+Erroneous code example:
+
+```compile_fail,E0072
+struct ListNode {
+ head: u8,
+ tail: Option<ListNode>, // error: no indirection here so impossible to
+ // compute the type's size
+}
+```
+
+When defining a recursive struct or enum, any use of the type being defined
+from inside the definition must occur behind a pointer (like `Box`, `&` or
+`Rc`). This is because structs and enums must have a well-defined size, and
+without the pointer, the size of the type would need to be unbounded.
+
+In the example, the type cannot have a well-defined size, because it needs to be
+arbitrarily large (since we would be able to nest `ListNode`s to any depth).
+Specifically,
+
+```plain
+size of `ListNode` = 1 byte for `head`
+ + 1 byte for the discriminant of the `Option`
+ + size of `ListNode`
+```
+
+One way to fix this is by wrapping `ListNode` in a `Box`, like so:
+
+```
+struct ListNode {
+ head: u8,
+ tail: Option<Box<ListNode>>,
+}
+```
+
+This works because `Box` is a pointer, so its size is well-known.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0073.md b/compiler/rustc_error_codes/src/error_codes/E0073.md
new file mode 100644
index 000000000..a5aea86ff
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0073.md
@@ -0,0 +1,19 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+You cannot define a struct (or enum) `Foo` that requires an instance of `Foo`
+in order to make a new `Foo` value. This is because there would be no way a
+first instance of `Foo` could be made to initialize another instance!
+
+Here's an example of a struct that has this problem:
+
+```
+struct Foo { x: Box<Foo> } // error
+```
+
+One fix is to use `Option`, like so:
+
+```
+struct Foo { x: Option<Box<Foo>> }
+```
+
+Now it's possible to create at least one instance of `Foo`: `Foo { x: None }`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0074.md b/compiler/rustc_error_codes/src/error_codes/E0074.md
new file mode 100644
index 000000000..785d6de22
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0074.md
@@ -0,0 +1,24 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+When using the `#[simd]` attribute on a tuple struct, the components of the
+tuple struct must all be of a concrete, nongeneric type so the compiler can
+reason about how to use SIMD with them. This error will occur if the types
+are generic.
+
+This will cause an error:
+
+```
+#![feature(repr_simd)]
+
+#[repr(simd)]
+struct Bad<T>(T, T, T, T);
+```
+
+This will not:
+
+```
+#![feature(repr_simd)]
+
+#[repr(simd)]
+struct Good(u32, u32, u32, u32);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0075.md b/compiler/rustc_error_codes/src/error_codes/E0075.md
new file mode 100644
index 000000000..969c1ee71
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0075.md
@@ -0,0 +1,23 @@
+A `#[simd]` attribute was applied to an empty tuple struct.
+
+Erroneous code example:
+
+```compile_fail,E0075
+#![feature(repr_simd)]
+
+#[repr(simd)]
+struct Bad; // error!
+```
+
+The `#[simd]` attribute can only be applied to non empty tuple structs, because
+it doesn't make sense to try to use SIMD operations when there are no values to
+operate on.
+
+Fixed example:
+
+```
+#![feature(repr_simd)]
+
+#[repr(simd)]
+struct Good(u32); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0076.md b/compiler/rustc_error_codes/src/error_codes/E0076.md
new file mode 100644
index 000000000..1da8caa95
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0076.md
@@ -0,0 +1,24 @@
+All types in a tuple struct aren't the same when using the `#[simd]`
+attribute.
+
+Erroneous code example:
+
+```compile_fail,E0076
+#![feature(repr_simd)]
+
+#[repr(simd)]
+struct Bad(u16, u32, u32 u32); // error!
+```
+
+When using the `#[simd]` attribute to automatically use SIMD operations in tuple
+struct, the types in the struct must all be of the same type, or the compiler
+will trigger this error.
+
+Fixed example:
+
+```
+#![feature(repr_simd)]
+
+#[repr(simd)]
+struct Good(u32, u32, u32, u32); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0077.md b/compiler/rustc_error_codes/src/error_codes/E0077.md
new file mode 100644
index 000000000..91aa24d1f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0077.md
@@ -0,0 +1,23 @@
+A tuple struct's element isn't a machine type when using the `#[simd]`
+attribute.
+
+Erroneous code example:
+
+```compile_fail,E0077
+#![feature(repr_simd)]
+
+#[repr(simd)]
+struct Bad(String); // error!
+```
+
+When using the `#[simd]` attribute on a tuple struct, the elements in the tuple
+must be machine types so SIMD operations can be applied to them.
+
+Fixed example:
+
+```
+#![feature(repr_simd)]
+
+#[repr(simd)]
+struct Good(u32, u32, u32, u32); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0080.md b/compiler/rustc_error_codes/src/error_codes/E0080.md
new file mode 100644
index 000000000..7b1bbde61
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0080.md
@@ -0,0 +1,23 @@
+A constant value failed to get evaluated.
+
+Erroneous code example:
+
+```compile_fail,E0080
+enum Enum {
+ X = (1 << 500),
+ Y = (1 / 0),
+}
+```
+
+This error indicates that the compiler was unable to sensibly evaluate a
+constant expression that had to be evaluated. Attempting to divide by 0
+or causing an integer overflow are two ways to induce this error.
+
+Ensure that the expressions given can be evaluated as the desired integer type.
+
+See the [Custom Discriminants][custom-discriminants] section of the Reference
+for more information about setting custom integer types on fieldless enums
+using the [`repr` attribute][repr-attribute].
+
+[custom-discriminants]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-field-less-enumerations
+[repr-attribute]: https://doc.rust-lang.org/reference/type-layout.html#reprc-enums
diff --git a/compiler/rustc_error_codes/src/error_codes/E0081.md b/compiler/rustc_error_codes/src/error_codes/E0081.md
new file mode 100644
index 000000000..b834a734c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0081.md
@@ -0,0 +1,37 @@
+A discriminant value is present more than once.
+
+Erroneous code example:
+
+```compile_fail,E0081
+enum Enum {
+ P = 3,
+ X = 3, // error!
+ Y = 5,
+}
+```
+
+Enum discriminants are used to differentiate enum variants stored in memory.
+This error indicates that the same value was used for two or more variants,
+making it impossible to distinguish them.
+
+```
+enum Enum {
+ P,
+ X = 3, // ok!
+ Y = 5,
+}
+```
+
+Note that variants without a manually specified discriminant are numbered from
+top to bottom starting from 0, so clashes can occur with seemingly unrelated
+variants.
+
+```compile_fail,E0081
+enum Bad {
+ X,
+ Y = 0, // error!
+}
+```
+
+Here `X` will have already been specified the discriminant 0 by the time `Y` is
+encountered, so a conflict occurs.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0084.md b/compiler/rustc_error_codes/src/error_codes/E0084.md
new file mode 100644
index 000000000..38ce9b43d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0084.md
@@ -0,0 +1,27 @@
+An unsupported representation was attempted on a zero-variant enum.
+
+Erroneous code example:
+
+```compile_fail,E0084
+#[repr(i32)]
+enum NightsWatch {} // error: unsupported representation for zero-variant enum
+```
+
+It is impossible to define an integer type to be used to represent zero-variant
+enum values because there are no zero-variant enum values. There is no way to
+construct an instance of the following type using only safe code. So you have
+two solutions. Either you add variants in your enum:
+
+```
+#[repr(i32)]
+enum NightsWatch {
+ JonSnow,
+ Commander,
+}
+```
+
+or you remove the integer representation of your enum:
+
+```
+enum NightsWatch {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0087.md b/compiler/rustc_error_codes/src/error_codes/E0087.md
new file mode 100644
index 000000000..9d292186f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0087.md
@@ -0,0 +1,15 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Too many type arguments were supplied for a function. For example:
+
+```compile_fail,E0107
+fn foo<T>() {}
+
+fn main() {
+ foo::<f64, bool>(); // error: wrong number of type arguments:
+ // expected 1, found 2
+}
+```
+
+The number of supplied arguments must exactly match the number of defined type
+parameters.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0088.md b/compiler/rustc_error_codes/src/error_codes/E0088.md
new file mode 100644
index 000000000..7780ad5b5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0088.md
@@ -0,0 +1,45 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+You gave too many lifetime arguments. Erroneous code example:
+
+```compile_fail,E0107
+fn f() {}
+
+fn main() {
+ f::<'static>() // error: wrong number of lifetime arguments:
+ // expected 0, found 1
+}
+```
+
+Please check you give the right number of lifetime arguments. Example:
+
+```
+fn f() {}
+
+fn main() {
+ f() // ok!
+}
+```
+
+It's also important to note that the Rust compiler can generally
+determine the lifetime by itself. Example:
+
+```
+struct Foo {
+ value: String
+}
+
+impl Foo {
+ // it can be written like this
+ fn get_value<'a>(&'a self) -> &'a str { &self.value }
+ // but the compiler works fine with this too:
+ fn without_lifetime(&self) -> &str { &self.value }
+}
+
+fn main() {
+ let f = Foo { value: "hello".to_owned() };
+
+ println!("{}", f.get_value());
+ println!("{}", f.without_lifetime());
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0089.md b/compiler/rustc_error_codes/src/error_codes/E0089.md
new file mode 100644
index 000000000..504fbc7b9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0089.md
@@ -0,0 +1,25 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Too few type arguments were supplied for a function. For example:
+
+```compile_fail,E0107
+fn foo<T, U>() {}
+
+fn main() {
+ foo::<f64>(); // error: wrong number of type arguments: expected 2, found 1
+}
+```
+
+Note that if a function takes multiple type arguments but you want the compiler
+to infer some of them, you can use type placeholders:
+
+```compile_fail,E0107
+fn foo<T, U>(x: T) {}
+
+fn main() {
+ let x: bool = true;
+ foo::<f64>(x); // error: wrong number of type arguments:
+ // expected 2, found 1
+ foo::<_, f64>(x); // same as `foo::<bool, f64>(x)`
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0090.md b/compiler/rustc_error_codes/src/error_codes/E0090.md
new file mode 100644
index 000000000..e091bb6c9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0090.md
@@ -0,0 +1,22 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+You gave too few lifetime arguments. Example:
+
+```compile_fail,E0107
+fn foo<'a: 'b, 'b: 'a>() {}
+
+fn main() {
+ foo::<'static>(); // error: wrong number of lifetime arguments:
+ // expected 2, found 1
+}
+```
+
+Please check you give the right number of lifetime arguments. Example:
+
+```
+fn foo<'a: 'b, 'b: 'a>() {}
+
+fn main() {
+ foo::<'static, 'static>();
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0091.md b/compiler/rustc_error_codes/src/error_codes/E0091.md
new file mode 100644
index 000000000..03cb32803
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0091.md
@@ -0,0 +1,16 @@
+An unnecessary type or const parameter was given in a type alias.
+
+Erroneous code example:
+
+```compile_fail,E0091
+type Foo<T> = u32; // error: type parameter `T` is unused
+// or:
+type Foo<A,B> = Box<A>; // error: type parameter `B` is unused
+```
+
+Please check you didn't write too many parameters. Example:
+
+```
+type Foo = u32; // ok!
+type Foo2<A> = Box<A>; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0092.md b/compiler/rustc_error_codes/src/error_codes/E0092.md
new file mode 100644
index 000000000..496174b28
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0092.md
@@ -0,0 +1,24 @@
+An undefined atomic operation function was declared.
+
+Erroneous code example:
+
+```compile_fail,E0092
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn atomic_foo(); // error: unrecognized atomic operation
+ // function
+}
+```
+
+Please check you didn't make a mistake in the function's name. All intrinsic
+functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in
+`library/core/src/intrinsics.rs` in the Rust source code. Example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn atomic_fence(); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0093.md b/compiler/rustc_error_codes/src/error_codes/E0093.md
new file mode 100644
index 000000000..b1683cf4f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0093.md
@@ -0,0 +1,35 @@
+An unknown intrinsic function was declared.
+
+Erroneous code example:
+
+```compile_fail,E0093
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn foo(); // error: unrecognized intrinsic function: `foo`
+}
+
+fn main() {
+ unsafe {
+ foo();
+ }
+}
+```
+
+Please check you didn't make a mistake in the function's name. All intrinsic
+functions are defined in `compiler/rustc_codegen_llvm/src/intrinsic.rs` and in
+`library/core/src/intrinsics.rs` in the Rust source code. Example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn atomic_fence_seqcst(); // ok!
+}
+
+fn main() {
+ unsafe {
+ atomic_fence_seqcst();
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0094.md b/compiler/rustc_error_codes/src/error_codes/E0094.md
new file mode 100644
index 000000000..ec86ec44e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0094.md
@@ -0,0 +1,24 @@
+An invalid number of generic parameters was passed to an intrinsic function.
+
+Erroneous code example:
+
+```compile_fail,E0094
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn size_of<T, U>() -> usize; // error: intrinsic has wrong number
+ // of type parameters
+}
+```
+
+Please check that you provided the right number of type parameters
+and verify with the function declaration in the Rust source code.
+Example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn size_of<T>() -> usize; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0106.md b/compiler/rustc_error_codes/src/error_codes/E0106.md
new file mode 100644
index 000000000..60ca1ddc2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0106.md
@@ -0,0 +1,53 @@
+This error indicates that a lifetime is missing from a type. If it is an error
+inside a function signature, the problem may be with failing to adhere to the
+lifetime elision rules (see below).
+
+Erroneous code examples:
+
+```compile_fail,E0106
+struct Foo1 { x: &bool }
+ // ^ expected lifetime parameter
+struct Foo2<'a> { x: &'a bool } // correct
+
+struct Bar1 { x: Foo2 }
+ // ^^^^ expected lifetime parameter
+struct Bar2<'a> { x: Foo2<'a> } // correct
+
+enum Baz1 { A(u8), B(&bool), }
+ // ^ expected lifetime parameter
+enum Baz2<'a> { A(u8), B(&'a bool), } // correct
+
+type MyStr1 = &str;
+ // ^ expected lifetime parameter
+type MyStr2<'a> = &'a str; // correct
+```
+
+Lifetime elision is a special, limited kind of inference for lifetimes in
+function signatures which allows you to leave out lifetimes in certain cases.
+For more background on lifetime elision see [the book][book-le].
+
+The lifetime elision rules require that any function signature with an elided
+output lifetime must either have:
+
+ - exactly one input lifetime
+ - or, multiple input lifetimes, but the function must also be a method with a
+ `&self` or `&mut self` receiver
+
+In the first case, the output lifetime is inferred to be the same as the unique
+input lifetime. In the second case, the lifetime is instead inferred to be the
+same as the lifetime on `&self` or `&mut self`.
+
+Here are some examples of elision errors:
+
+```compile_fail,E0106
+// error, no input lifetimes
+fn foo() -> &str { }
+
+// error, `x` and `y` have distinct lifetimes inferred
+fn bar(x: &str, y: &str) -> &str { }
+
+// error, `y`'s lifetime is inferred to be distinct from `x`'s
+fn baz<'a>(x: &'a str, y: &str) -> &str { }
+```
+
+[book-le]: https://doc.rust-lang.org/book/ch10-03-lifetime-syntax.html#lifetime-elision
diff --git a/compiler/rustc_error_codes/src/error_codes/E0107.md b/compiler/rustc_error_codes/src/error_codes/E0107.md
new file mode 100644
index 000000000..4e37695a5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0107.md
@@ -0,0 +1,44 @@
+An incorrect number of generic arguments was provided.
+
+Erroneous code example:
+
+```compile_fail,E0107
+struct Foo<T> { x: T }
+
+struct Bar { x: Foo } // error: wrong number of type arguments:
+ // expected 1, found 0
+struct Baz<S, T> { x: Foo<S, T> } // error: wrong number of type arguments:
+ // expected 1, found 2
+
+fn foo<T, U>(x: T, y: U) {}
+fn f() {}
+
+fn main() {
+ let x: bool = true;
+ foo::<bool>(x); // error: wrong number of type arguments:
+ // expected 2, found 1
+ foo::<bool, i32, i32>(x, 2, 4); // error: wrong number of type arguments:
+ // expected 2, found 3
+ f::<'static>(); // error: wrong number of lifetime arguments
+ // expected 0, found 1
+}
+```
+
+When using/declaring an item with generic arguments, you must provide the exact
+same number:
+
+```
+struct Foo<T> { x: T }
+
+struct Bar<T> { x: Foo<T> } // ok!
+struct Baz<S, T> { x: Foo<S>, y: Foo<T> } // ok!
+
+fn foo<T, U>(x: T, y: U) {}
+fn f() {}
+
+fn main() {
+ let x: bool = true;
+ foo::<bool, u32>(x, 12); // ok!
+ f(); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0109.md b/compiler/rustc_error_codes/src/error_codes/E0109.md
new file mode 100644
index 000000000..2eab9725a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0109.md
@@ -0,0 +1,22 @@
+You tried to provide a generic argument to a type which doesn't need it.
+
+Erroneous code example:
+
+```compile_fail,E0109
+type X = u32<i32>; // error: type arguments are not allowed for this type
+type Y = bool<'static>; // error: lifetime parameters are not allowed on
+ // this type
+```
+
+Check that you used the correct argument and that the definition is correct.
+
+Example:
+
+```
+type X = u32; // ok!
+type Y = bool; // ok!
+```
+
+Note that generic arguments for enum variant constructors go after the variant,
+not after the enum. For example, you would write `Option::None::<u32>`,
+rather than `Option::<u32>::None`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0110.md b/compiler/rustc_error_codes/src/error_codes/E0110.md
new file mode 100644
index 000000000..b9fe406ff
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0110.md
@@ -0,0 +1,4 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+You tried to provide a lifetime to a type which doesn't need it.
+See `E0109` for more details.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0116.md b/compiler/rustc_error_codes/src/error_codes/E0116.md
new file mode 100644
index 000000000..653be6029
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0116.md
@@ -0,0 +1,27 @@
+An inherent implementation was defined for a type outside the current crate.
+
+Erroneous code example:
+
+```compile_fail,E0116
+impl Vec<u8> { } // error
+```
+
+You can only define an inherent implementation for a type in the same crate
+where the type was defined. For example, an `impl` block as above is not allowed
+since `Vec` is defined in the standard library.
+
+To fix this problem, you can either:
+
+ - define a trait that has the desired associated functions/types/constants and
+ implement the trait for the type in question
+ - define a new type wrapping the type and define an implementation on the new
+ type
+
+Note that using the `type` keyword does not work here because `type` only
+introduces a type alias:
+
+```compile_fail,E0116
+type Bytes = Vec<u8>;
+
+impl Bytes { } // error, same as above
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0117.md b/compiler/rustc_error_codes/src/error_codes/E0117.md
new file mode 100644
index 000000000..0544667cc
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0117.md
@@ -0,0 +1,50 @@
+Only traits defined in the current crate can be implemented for arbitrary types.
+
+Erroneous code example:
+
+```compile_fail,E0117
+impl Drop for u32 {}
+```
+
+This error indicates a violation of one of Rust's orphan rules for trait
+implementations. The rule prohibits any implementation of a foreign trait (a
+trait defined in another crate) where
+
+ - the type that is implementing the trait is foreign
+ - all of the parameters being passed to the trait (if there are any) are also
+ foreign.
+
+To avoid this kind of error, ensure that at least one local type is referenced
+by the `impl`:
+
+```
+pub struct Foo; // you define your type in your crate
+
+impl Drop for Foo { // and you can implement the trait on it!
+ // code of trait implementation here
+# fn drop(&mut self) { }
+}
+
+impl From<Foo> for i32 { // or you use a type from your crate as
+ // a type parameter
+ fn from(i: Foo) -> i32 {
+ 0
+ }
+}
+```
+
+Alternatively, define a trait locally and implement that instead:
+
+```
+trait Bar {
+ fn get(&self) -> usize;
+}
+
+impl Bar for u32 {
+ fn get(&self) -> usize { 0 }
+}
+```
+
+For information on the design of the orphan rules, see [RFC 1023].
+
+[RFC 1023]: https://github.com/rust-lang/rfcs/blob/master/text/1023-rebalancing-coherence.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0118.md b/compiler/rustc_error_codes/src/error_codes/E0118.md
new file mode 100644
index 000000000..cfabae1a6
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0118.md
@@ -0,0 +1,43 @@
+An inherent implementation was defined for something which isn't a struct,
+enum, union, or trait object.
+
+Erroneous code example:
+
+```compile_fail,E0118
+impl<T> T { // error: no nominal type found for inherent implementation
+ fn get_state(&self) -> String {
+ // ...
+ }
+}
+```
+
+To fix this error, please implement a trait on the type or wrap it in a struct.
+Example:
+
+```
+// we create a trait here
+trait LiveLongAndProsper {
+ fn get_state(&self) -> String;
+}
+
+// and now you can implement it on T
+impl<T> LiveLongAndProsper for T {
+ fn get_state(&self) -> String {
+ "He's dead, Jim!".to_owned()
+ }
+}
+```
+
+Alternatively, you can create a newtype. A newtype is a wrapping tuple-struct.
+For example, `NewType` is a newtype over `Foo` in `struct NewType(Foo)`.
+Example:
+
+```
+struct TypeWrapper<T>(T);
+
+impl<T> TypeWrapper<T> {
+ fn get_state(&self) -> String {
+ "Fascinating!".to_owned()
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0119.md b/compiler/rustc_error_codes/src/error_codes/E0119.md
new file mode 100644
index 000000000..e596349e5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0119.md
@@ -0,0 +1,59 @@
+There are conflicting trait implementations for the same type.
+
+Erroneous code example:
+
+```compile_fail,E0119
+trait MyTrait {
+ fn get(&self) -> usize;
+}
+
+impl<T> MyTrait for T {
+ fn get(&self) -> usize { 0 }
+}
+
+struct Foo {
+ value: usize
+}
+
+impl MyTrait for Foo { // error: conflicting implementations of trait
+ // `MyTrait` for type `Foo`
+ fn get(&self) -> usize { self.value }
+}
+```
+
+When looking for the implementation for the trait, the compiler finds
+both the `impl<T> MyTrait for T` where T is all types and the `impl
+MyTrait for Foo`. Since a trait cannot be implemented multiple times,
+this is an error. So, when you write:
+
+```
+trait MyTrait {
+ fn get(&self) -> usize;
+}
+
+impl<T> MyTrait for T {
+ fn get(&self) -> usize { 0 }
+}
+```
+
+This makes the trait implemented on all types in the scope. So if you
+try to implement it on another one after that, the implementations will
+conflict. Example:
+
+```
+trait MyTrait {
+ fn get(&self) -> usize;
+}
+
+impl<T> MyTrait for T {
+ fn get(&self) -> usize { 0 }
+}
+
+struct Foo;
+
+fn main() {
+ let f = Foo;
+
+ f.get(); // the trait is implemented so we can use it
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0120.md b/compiler/rustc_error_codes/src/error_codes/E0120.md
new file mode 100644
index 000000000..dc7258d87
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0120.md
@@ -0,0 +1,38 @@
+Drop was implemented on a trait, which is not allowed: only structs and
+enums can implement Drop.
+
+Erroneous code example:
+
+```compile_fail,E0120
+trait MyTrait {}
+
+impl Drop for MyTrait {
+ fn drop(&mut self) {}
+}
+```
+
+A workaround for this problem is to wrap the trait up in a struct, and implement
+Drop on that:
+
+```
+trait MyTrait {}
+struct MyWrapper<T: MyTrait> { foo: T }
+
+impl <T: MyTrait> Drop for MyWrapper<T> {
+ fn drop(&mut self) {}
+}
+
+```
+
+Alternatively, wrapping trait objects requires something:
+
+```
+trait MyTrait {}
+
+//or Box<MyTrait>, if you wanted an owned trait object
+struct MyWrapper<'a> { foo: &'a MyTrait }
+
+impl <'a> Drop for MyWrapper<'a> {
+ fn drop(&mut self) {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0121.md b/compiler/rustc_error_codes/src/error_codes/E0121.md
new file mode 100644
index 000000000..06fe396d5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0121.md
@@ -0,0 +1,24 @@
+The type placeholder `_` was used within a type on an item's signature.
+
+Erroneous code example:
+
+```compile_fail,E0121
+fn foo() -> _ { 5 } // error
+
+static BAR: _ = "test"; // error
+```
+
+In those cases, you need to provide the type explicitly:
+
+```
+fn foo() -> i32 { 5 } // ok!
+
+static BAR: &str = "test"; // ok!
+```
+
+The type placeholder `_` can be used outside item's signature as follows:
+
+```
+let x = "a4a".split('4')
+ .collect::<Vec<_>>(); // No need to precise the Vec's generic type.
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0124.md b/compiler/rustc_error_codes/src/error_codes/E0124.md
new file mode 100644
index 000000000..8af7cb819
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0124.md
@@ -0,0 +1,19 @@
+A struct was declared with two fields having the same name.
+
+Erroneous code example:
+
+```compile_fail,E0124
+struct Foo {
+ field1: i32,
+ field1: i32, // error: field is already declared
+}
+```
+
+Please verify that the field names have been correctly spelled. Example:
+
+```
+struct Foo {
+ field1: i32,
+ field2: i32, // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0128.md b/compiler/rustc_error_codes/src/error_codes/E0128.md
new file mode 100644
index 000000000..2ea8ae68e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0128.md
@@ -0,0 +1,25 @@
+A type parameter with default value is using forward declared identifier.
+
+Erroneous code example:
+
+```compile_fail,E0128
+struct Foo<T = U, U = ()> {
+ field1: T,
+ field2: U,
+}
+// error: generic parameters with a default cannot use forward declared
+// identifiers
+```
+
+Type parameter defaults can only use parameters that occur before them. Since
+type parameters are evaluated in-order, this issue could be fixed by doing:
+
+```
+struct Foo<U = (), T = U> {
+ field1: T,
+ field2: U,
+}
+```
+
+Please also verify that this wasn't because of a name-clash and rename the type
+parameter if so.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0130.md b/compiler/rustc_error_codes/src/error_codes/E0130.md
new file mode 100644
index 000000000..2cd27b5ec
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0130.md
@@ -0,0 +1,31 @@
+A pattern was declared as an argument in a foreign function declaration.
+
+Erroneous code example:
+
+```compile_fail,E0130
+extern "C" {
+ fn foo((a, b): (u32, u32)); // error: patterns aren't allowed in foreign
+ // function declarations
+}
+```
+
+To fix this error, replace the pattern argument with a regular one. Example:
+
+```
+struct SomeStruct {
+ a: u32,
+ b: u32,
+}
+
+extern "C" {
+ fn foo(s: SomeStruct); // ok!
+}
+```
+
+Or:
+
+```
+extern "C" {
+ fn foo(a: (u32, u32)); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0131.md b/compiler/rustc_error_codes/src/error_codes/E0131.md
new file mode 100644
index 000000000..ed798d4f8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0131.md
@@ -0,0 +1,11 @@
+The `main` function was defined with generic parameters.
+
+Erroneous code example:
+
+```compile_fail,E0131
+fn main<T>() { // error: main function is not allowed to have generic parameters
+}
+```
+
+It is not possible to define the `main` function with generic parameters.
+It must not take any arguments.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0132.md b/compiler/rustc_error_codes/src/error_codes/E0132.md
new file mode 100644
index 000000000..a23cc988b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0132.md
@@ -0,0 +1,32 @@
+A function with the `start` attribute was declared with type parameters.
+
+Erroneous code example:
+
+```compile_fail,E0132
+#![feature(start)]
+
+#[start]
+fn f<T>() {}
+```
+
+It is not possible to declare type parameters on a function that has the `start`
+attribute. Such a function must have the following type signature (for more
+information, view [the unstable book][1]):
+
+[1]: https://doc.rust-lang.org/unstable-book/language-features/lang-items.html#writing-an-executable-without-stdlib
+
+```
+# let _:
+fn(isize, *const *const u8) -> isize;
+```
+
+Example:
+
+```
+#![feature(start)]
+
+#[start]
+fn my_start(argc: isize, argv: *const *const u8) -> isize {
+ 0
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0133.md b/compiler/rustc_error_codes/src/error_codes/E0133.md
new file mode 100644
index 000000000..1adbcc313
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0133.md
@@ -0,0 +1,33 @@
+Unsafe code was used outside of an unsafe function or block.
+
+Erroneous code example:
+
+```compile_fail,E0133
+unsafe fn f() { return; } // This is the unsafe code
+
+fn main() {
+ f(); // error: call to unsafe function requires unsafe function or block
+}
+```
+
+Using unsafe functionality is potentially dangerous and disallowed by safety
+checks. Examples:
+
+* Dereferencing raw pointers
+* Calling functions via FFI
+* Calling functions marked unsafe
+
+These safety checks can be relaxed for a section of the code by wrapping the
+unsafe instructions with an `unsafe` block. For instance:
+
+```
+unsafe fn f() { return; }
+
+fn main() {
+ unsafe { f(); } // ok!
+}
+```
+
+See the [unsafe section][unsafe-section] of the Book for more details.
+
+[unsafe-section]: https://doc.rust-lang.org/book/ch19-01-unsafe-rust.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0136.md b/compiler/rustc_error_codes/src/error_codes/E0136.md
new file mode 100644
index 000000000..15cf09a18
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0136.md
@@ -0,0 +1,21 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+More than one `main` function was found.
+
+Erroneous code example:
+
+```compile_fail
+fn main() {
+ // ...
+}
+
+// ...
+
+fn main() { // error!
+ // ...
+}
+```
+
+A binary can only have one entry point, and by default that entry point is the
+`main()` function. If there are multiple instances of this function, please
+rename one of them.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0137.md b/compiler/rustc_error_codes/src/error_codes/E0137.md
new file mode 100644
index 000000000..d4e19170f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0137.md
@@ -0,0 +1,26 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+More than one function was declared with the `#[main]` attribute.
+
+Erroneous code example:
+
+```compile_fail
+#![feature(main)]
+
+#[main]
+fn foo() {}
+
+#[main]
+fn f() {} // error: multiple functions with a `#[main]` attribute
+```
+
+This error indicates that the compiler found multiple functions with the
+`#[main]` attribute. This is an error because there must be a unique entry
+point into a Rust program. Example:
+
+```compile_fail
+#![feature(main)]
+
+#[main]
+fn f() {} // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0138.md b/compiler/rustc_error_codes/src/error_codes/E0138.md
new file mode 100644
index 000000000..3f5eaea9f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0138.md
@@ -0,0 +1,25 @@
+More than one function was declared with the `#[start]` attribute.
+
+Erroneous code example:
+
+```compile_fail,E0138
+#![feature(start)]
+
+#[start]
+fn foo(argc: isize, argv: *const *const u8) -> isize {}
+
+#[start]
+fn f(argc: isize, argv: *const *const u8) -> isize {}
+// error: multiple 'start' functions
+```
+
+This error indicates that the compiler found multiple functions with the
+`#[start]` attribute. This is an error because there must be a unique entry
+point into a Rust program. Example:
+
+```
+#![feature(start)]
+
+#[start]
+fn foo(argc: isize, argv: *const *const u8) -> isize { 0 } // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0139.md b/compiler/rustc_error_codes/src/error_codes/E0139.md
new file mode 100644
index 000000000..a116cf293
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0139.md
@@ -0,0 +1,84 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+There are various restrictions on transmuting between types in Rust; for example
+types being transmuted must have the same size. To apply all these restrictions,
+the compiler must know the exact types that may be transmuted. When type
+parameters are involved, this cannot always be done.
+
+So, for example, the following is not allowed:
+
+```
+use std::mem::transmute;
+
+struct Foo<T>(Vec<T>);
+
+fn foo<T>(x: Vec<T>) {
+ // we are transmuting between Vec<T> and Foo<F> here
+ let y: Foo<T> = unsafe { transmute(x) };
+ // do something with y
+}
+```
+
+In this specific case there's a good chance that the transmute is harmless (but
+this is not guaranteed by Rust). However, when alignment and enum optimizations
+come into the picture, it's quite likely that the sizes may or may not match
+with different type parameter substitutions. It's not possible to check this for
+_all_ possible types, so `transmute()` simply only accepts types without any
+unsubstituted type parameters.
+
+If you need this, there's a good chance you're doing something wrong. Keep in
+mind that Rust doesn't guarantee much about the layout of different structs
+(even two structs with identical declarations may have different layouts). If
+there is a solution that avoids the transmute entirely, try it instead.
+
+If it's possible, hand-monomorphize the code by writing the function for each
+possible type substitution. It's possible to use traits to do this cleanly,
+for example:
+
+```
+use std::mem::transmute;
+
+struct Foo<T>(Vec<T>);
+
+trait MyTransmutableType: Sized {
+ fn transmute(_: Vec<Self>) -> Foo<Self>;
+}
+
+impl MyTransmutableType for u8 {
+ fn transmute(x: Vec<u8>) -> Foo<u8> {
+ unsafe { transmute(x) }
+ }
+}
+
+impl MyTransmutableType for String {
+ fn transmute(x: Vec<String>) -> Foo<String> {
+ unsafe { transmute(x) }
+ }
+}
+
+// ... more impls for the types you intend to transmute
+
+fn foo<T: MyTransmutableType>(x: Vec<T>) {
+ let y: Foo<T> = <T as MyTransmutableType>::transmute(x);
+ // do something with y
+}
+```
+
+Each impl will be checked for a size match in the transmute as usual, and since
+there are no unbound type parameters involved, this should compile unless there
+is a size mismatch in one of the impls.
+
+It is also possible to manually transmute:
+
+```
+# use std::ptr;
+# let v = Some("value");
+# type SomeType = &'static [u8];
+unsafe {
+ ptr::read(&v as *const _ as *const SomeType) // `v` transmuted to `SomeType`
+}
+# ;
+```
+
+Note that this does not move `v` (unlike `transmute`), and may need a
+call to `mem::forget(v)` in case you want to avoid destructors being called.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0152.md b/compiler/rustc_error_codes/src/error_codes/E0152.md
new file mode 100644
index 000000000..ef17b8b4c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0152.md
@@ -0,0 +1,25 @@
+A lang item was redefined.
+
+Erroneous code example:
+
+```compile_fail,E0152
+#![feature(lang_items)]
+
+#[lang = "owned_box"]
+struct Foo<T>(T); // error: duplicate lang item found: `owned_box`
+```
+
+Lang items are already implemented in the standard library. Unless you are
+writing a free-standing application (e.g., a kernel), you do not need to provide
+them yourself.
+
+You can build a free-standing crate by adding `#![no_std]` to the crate
+attributes:
+
+```ignore (only-for-syntax-highlight)
+#![no_std]
+```
+
+See also the [unstable book][1].
+
+[1]: https://doc.rust-lang.org/unstable-book/language-features/lang-items.html#writing-an-executable-without-stdlib
diff --git a/compiler/rustc_error_codes/src/error_codes/E0154.md b/compiler/rustc_error_codes/src/error_codes/E0154.md
new file mode 100644
index 000000000..e437a7189
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0154.md
@@ -0,0 +1,34 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Imports (`use` statements) are not allowed after non-item statements, such as
+variable declarations and expression statements.
+
+Here is an example that demonstrates the error:
+
+```
+fn f() {
+ // Variable declaration before import
+ let x = 0;
+ use std::io::Read;
+ // ...
+}
+```
+
+The solution is to declare the imports at the top of the block, function, or
+file.
+
+Here is the previous example again, with the correct order:
+
+```
+fn f() {
+ use std::io::Read;
+ let x = 0;
+ // ...
+}
+```
+
+See the [Declaration Statements][declaration-statements] section of the
+reference for more information about what constitutes an item declaration
+and what does not.
+
+[declaration-statements]: https://doc.rust-lang.org/reference/statements.html#declaration-statements
diff --git a/compiler/rustc_error_codes/src/error_codes/E0158.md b/compiler/rustc_error_codes/src/error_codes/E0158.md
new file mode 100644
index 000000000..0a9ef9c39
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0158.md
@@ -0,0 +1,38 @@
+An associated const has been referenced in a pattern.
+
+Erroneous code example:
+
+```compile_fail,E0158
+enum EFoo { A, B, C, D }
+
+trait Foo {
+ const X: EFoo;
+}
+
+fn test<A: Foo>(arg: EFoo) {
+ match arg {
+ A::X => { // error!
+ println!("A::X");
+ }
+ }
+}
+```
+
+`const` and `static` mean different things. A `const` is a compile-time
+constant, an alias for a literal value. This property means you can match it
+directly within a pattern.
+
+The `static` keyword, on the other hand, guarantees a fixed location in memory.
+This does not always mean that the value is constant. For example, a global
+mutex can be declared `static` as well.
+
+If you want to match against a `static`, consider using a guard instead:
+
+```
+static FORTY_TWO: i32 = 42;
+
+match Some(42) {
+ Some(x) if x == FORTY_TWO => {}
+ _ => {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0161.md b/compiler/rustc_error_codes/src/error_codes/E0161.md
new file mode 100644
index 000000000..ebd2c9769
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0161.md
@@ -0,0 +1,45 @@
+A value was moved whose size was not known at compile time.
+
+Erroneous code example:
+
+```compile_fail,E0161
+#![feature(box_syntax)]
+trait Bar {
+ fn f(self);
+}
+
+impl Bar for i32 {
+ fn f(self) {}
+}
+
+fn main() {
+ let b: Box<dyn Bar> = box (0 as i32);
+ b.f();
+ // error: cannot move a value of type dyn Bar: the size of dyn Bar cannot
+ // be statically determined
+}
+```
+
+In Rust, you can only move a value when its size is known at compile time.
+
+To work around this restriction, consider "hiding" the value behind a reference:
+either `&x` or `&mut x`. Since a reference has a fixed size, this lets you move
+it around as usual. Example:
+
+```
+#![feature(box_syntax)]
+
+trait Bar {
+ fn f(&self);
+}
+
+impl Bar for i32 {
+ fn f(&self) {}
+}
+
+fn main() {
+ let b: Box<dyn Bar> = box (0 as i32);
+ b.f();
+ // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0162.md b/compiler/rustc_error_codes/src/error_codes/E0162.md
new file mode 100644
index 000000000..0161c9325
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0162.md
@@ -0,0 +1,26 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+An `if let` pattern attempts to match the pattern, and enters the body if the
+match was successful. If the match is irrefutable (when it cannot fail to
+match), use a regular `let`-binding instead. For instance:
+
+```
+struct Irrefutable(i32);
+let irr = Irrefutable(0);
+
+// This fails to compile because the match is irrefutable.
+if let Irrefutable(x) = irr {
+ // This body will always be executed.
+ // ...
+}
+```
+
+Try this instead:
+
+```
+struct Irrefutable(i32);
+let irr = Irrefutable(0);
+
+let Irrefutable(x) = irr;
+println!("{}", x);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0164.md b/compiler/rustc_error_codes/src/error_codes/E0164.md
new file mode 100644
index 000000000..48bb6f4b3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0164.md
@@ -0,0 +1,44 @@
+Something which is neither a tuple struct nor a tuple variant was used as a
+pattern.
+
+Erroneous code example:
+
+```compile_fail,E0164
+enum A {
+ B,
+ C,
+}
+
+impl A {
+ fn new() {}
+}
+
+fn bar(foo: A) {
+ match foo {
+ A::new() => (), // error!
+ _ => {}
+ }
+}
+```
+
+This error means that an attempt was made to match something which is neither a
+tuple struct nor a tuple variant. Only these two elements are allowed as a
+pattern:
+
+```
+enum A {
+ B,
+ C,
+}
+
+impl A {
+ fn new() {}
+}
+
+fn bar(foo: A) {
+ match foo {
+ A::B => (), // ok!
+ _ => {}
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0165.md b/compiler/rustc_error_codes/src/error_codes/E0165.md
new file mode 100644
index 000000000..7bcd6c0cb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0165.md
@@ -0,0 +1,27 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+A `while let` pattern attempts to match the pattern, and enters the body if the
+match was successful. If the match is irrefutable (when it cannot fail to
+match), use a regular `let`-binding inside a `loop` instead. For instance:
+
+```no_run
+struct Irrefutable(i32);
+let irr = Irrefutable(0);
+
+// This fails to compile because the match is irrefutable.
+while let Irrefutable(x) = irr {
+ // ...
+}
+```
+
+Try this instead:
+
+```no_run
+struct Irrefutable(i32);
+let irr = Irrefutable(0);
+
+loop {
+ let Irrefutable(x) = irr;
+ // ...
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0170.md b/compiler/rustc_error_codes/src/error_codes/E0170.md
new file mode 100644
index 000000000..9678cd173
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0170.md
@@ -0,0 +1,67 @@
+A pattern binding is using the same name as one of the variants of a type.
+
+Erroneous code example:
+
+```compile_fail,E0170
+# #![deny(warnings)]
+enum Method {
+ GET,
+ POST,
+}
+
+fn is_empty(s: Method) -> bool {
+ match s {
+ GET => true,
+ _ => false
+ }
+}
+
+fn main() {}
+```
+
+Enum variants are qualified by default. For example, given this type:
+
+```
+enum Method {
+ GET,
+ POST,
+}
+```
+
+You would match it using:
+
+```
+enum Method {
+ GET,
+ POST,
+}
+
+let m = Method::GET;
+
+match m {
+ Method::GET => {},
+ Method::POST => {},
+}
+```
+
+If you don't qualify the names, the code will bind new variables named "GET" and
+"POST" instead. This behavior is likely not what you want, so `rustc` warns when
+that happens.
+
+Qualified names are good practice, and most code works well with them. But if
+you prefer them unqualified, you can import the variants into scope:
+
+```
+use Method::*;
+enum Method { GET, POST }
+# fn main() {}
+```
+
+If you want others to be able to import variants from your module directly, use
+`pub use`:
+
+```
+pub use Method::*;
+pub enum Method { GET, POST }
+# fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0178.md b/compiler/rustc_error_codes/src/error_codes/E0178.md
new file mode 100644
index 000000000..0c6f91863
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0178.md
@@ -0,0 +1,30 @@
+The `+` type operator was used in an ambiguous context.
+
+Erroneous code example:
+
+```compile_fail,E0178
+trait Foo {}
+
+struct Bar<'a> {
+ x: &'a Foo + 'a, // error!
+ y: &'a mut Foo + 'a, // error!
+ z: fn() -> Foo + 'a, // error!
+}
+```
+
+In types, the `+` type operator has low precedence, so it is often necessary
+to use parentheses:
+
+```
+trait Foo {}
+
+struct Bar<'a> {
+ x: &'a (Foo + 'a), // ok!
+ y: &'a mut (Foo + 'a), // ok!
+ z: fn() -> (Foo + 'a), // ok!
+}
+```
+
+More details can be found in [RFC 438].
+
+[RFC 438]: https://github.com/rust-lang/rfcs/pull/438
diff --git a/compiler/rustc_error_codes/src/error_codes/E0183.md b/compiler/rustc_error_codes/src/error_codes/E0183.md
new file mode 100644
index 000000000..92fa4c7c2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0183.md
@@ -0,0 +1,39 @@
+Manual implementation of a `Fn*` trait.
+
+Erroneous code example:
+
+```compile_fail,E0183
+struct MyClosure {
+ foo: i32
+}
+
+impl FnOnce<()> for MyClosure { // error
+ type Output = ();
+ extern "rust-call" fn call_once(self, args: ()) -> Self::Output {
+ println!("{}", self.foo);
+ }
+}
+```
+
+Manually implementing `Fn`, `FnMut` or `FnOnce` is unstable
+and requires `#![feature(fn_traits, unboxed_closures)]`.
+
+```
+#![feature(fn_traits, unboxed_closures)]
+
+struct MyClosure {
+ foo: i32
+}
+
+impl FnOnce<()> for MyClosure { // ok!
+ type Output = ();
+ extern "rust-call" fn call_once(self, args: ()) -> Self::Output {
+ println!("{}", self.foo);
+ }
+}
+```
+
+The arguments must be a tuple representing the argument list.
+For more info, see the [tracking issue][iss29625]:
+
+[iss29625]: https://github.com/rust-lang/rust/issues/29625
diff --git a/compiler/rustc_error_codes/src/error_codes/E0184.md b/compiler/rustc_error_codes/src/error_codes/E0184.md
new file mode 100644
index 000000000..4624f9e5b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0184.md
@@ -0,0 +1,20 @@
+The `Copy` trait was implemented on a type with a `Drop` implementation.
+
+Erroneous code example:
+
+```compile_fail,E0184
+#[derive(Copy)]
+struct Foo; // error!
+
+impl Drop for Foo {
+ fn drop(&mut self) {
+ }
+}
+```
+
+Explicitly implementing both `Drop` and `Copy` trait on a type is currently
+disallowed. This feature can make some sense in theory, but the current
+implementation is incorrect and can lead to memory unsafety (see
+[issue #20126][iss20126]), so it has been disabled for now.
+
+[iss20126]: https://github.com/rust-lang/rust/issues/20126
diff --git a/compiler/rustc_error_codes/src/error_codes/E0185.md b/compiler/rustc_error_codes/src/error_codes/E0185.md
new file mode 100644
index 000000000..944a93ed1
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0185.md
@@ -0,0 +1,35 @@
+An associated function for a trait was defined to be static, but an
+implementation of the trait declared the same function to be a method (i.e., to
+take a `self` parameter).
+
+Erroneous code example:
+
+```compile_fail,E0185
+trait Foo {
+ fn foo();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ // error, method `foo` has a `&self` declaration in the impl, but not in
+ // the trait
+ fn foo(&self) {}
+}
+```
+
+When a type implements a trait's associated function, it has to use the same
+signature. So in this case, since `Foo::foo` does not take any argument and
+does not return anything, its implementation on `Bar` should be the same:
+
+```
+trait Foo {
+ fn foo();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn foo() {} // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0186.md b/compiler/rustc_error_codes/src/error_codes/E0186.md
new file mode 100644
index 000000000..7db1e8433
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0186.md
@@ -0,0 +1,35 @@
+An associated function for a trait was defined to be a method (i.e., to take a
+`self` parameter), but an implementation of the trait declared the same function
+to be static.
+
+Erroneous code example:
+
+```compile_fail,E0186
+trait Foo {
+ fn foo(&self);
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ // error, method `foo` has a `&self` declaration in the trait, but not in
+ // the impl
+ fn foo() {}
+}
+```
+
+When a type implements a trait's associated function, it has to use the same
+signature. So in this case, since `Foo::foo` takes `self` as argument and
+does not return anything, its implementation on `Bar` should be the same:
+
+```
+trait Foo {
+ fn foo(&self);
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn foo(&self) {} // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0191.md b/compiler/rustc_error_codes/src/error_codes/E0191.md
new file mode 100644
index 000000000..46b773bdc
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0191.md
@@ -0,0 +1,24 @@
+An associated type wasn't specified for a trait object.
+
+Erroneous code example:
+
+```compile_fail,E0191
+trait Trait {
+ type Bar;
+}
+
+type Foo = Trait; // error: the value of the associated type `Bar` (from
+ // the trait `Trait`) must be specified
+```
+
+Trait objects need to have all associated types specified. Please verify that
+all associated types of the trait were specified and the correct trait was used.
+Example:
+
+```
+trait Trait {
+ type Bar;
+}
+
+type Foo = Trait<Bar=i32>; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0192.md b/compiler/rustc_error_codes/src/error_codes/E0192.md
new file mode 100644
index 000000000..deca042a9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0192.md
@@ -0,0 +1,22 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+A negative impl was added on a trait implementation.
+
+Erroneous code example:
+
+```compile_fail
+trait Trait {
+ type Bar;
+}
+
+struct Foo;
+
+impl !Trait for Foo { } //~ ERROR
+
+fn main() {}
+```
+
+Negative impls are only allowed for auto traits. For more
+information see the [opt-in builtin traits RFC][RFC 19].
+
+[RFC 19]: https://github.com/rust-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0193.md b/compiler/rustc_error_codes/src/error_codes/E0193.md
new file mode 100644
index 000000000..e29a949ff
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0193.md
@@ -0,0 +1,44 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+`where` clauses must use generic type parameters: it does not make sense to use
+them otherwise. An example causing this error:
+
+```
+trait Foo {
+ fn bar(&self);
+}
+
+#[derive(Copy,Clone)]
+struct Wrapper<T> {
+ Wrapped: T
+}
+
+impl Foo for Wrapper<u32> where Wrapper<u32>: Clone {
+ fn bar(&self) { }
+}
+```
+
+This use of a `where` clause is strange - a more common usage would look
+something like the following:
+
+```
+trait Foo {
+ fn bar(&self);
+}
+
+#[derive(Copy,Clone)]
+struct Wrapper<T> {
+ Wrapped: T
+}
+impl <T> Foo for Wrapper<T> where Wrapper<T>: Clone {
+ fn bar(&self) { }
+}
+```
+
+Here, we're saying that the implementation exists on Wrapper only when the
+wrapped type `T` implements `Clone`. The `where` clause is important because
+some types will not implement `Clone`, and thus will not get this method.
+
+In our erroneous example, however, we're referencing a single concrete type.
+Since we know for certain that `Wrapper<u32>` implements `Clone`, there's no
+reason to also specify it in a `where` clause.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0195.md b/compiler/rustc_error_codes/src/error_codes/E0195.md
new file mode 100644
index 000000000..b8c313d41
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0195.md
@@ -0,0 +1,35 @@
+The lifetime parameters of the method do not match the trait declaration.
+
+Erroneous code example:
+
+```compile_fail,E0195
+trait Trait {
+ fn bar<'a,'b:'a>(x: &'a str, y: &'b str);
+}
+
+struct Foo;
+
+impl Trait for Foo {
+ fn bar<'a,'b>(x: &'a str, y: &'b str) {
+ // error: lifetime parameters or bounds on method `bar`
+ // do not match the trait declaration
+ }
+}
+```
+
+The lifetime constraint `'b` for `bar()` implementation does not match the
+trait declaration. Ensure lifetime declarations match exactly in both trait
+declaration and implementation. Example:
+
+```
+trait Trait {
+ fn t<'a,'b:'a>(x: &'a str, y: &'b str);
+}
+
+struct Foo;
+
+impl Trait for Foo {
+ fn t<'a,'b:'a>(x: &'a str, y: &'b str) { // ok!
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0197.md b/compiler/rustc_error_codes/src/error_codes/E0197.md
new file mode 100644
index 000000000..c142b8f36
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0197.md
@@ -0,0 +1,20 @@
+An inherent implementation was marked unsafe.
+
+Erroneous code example:
+
+```compile_fail,E0197
+struct Foo;
+
+unsafe impl Foo { } // error!
+```
+
+Inherent implementations (one that do not implement a trait but provide
+methods associated with a type) are always safe because they are not
+implementing an unsafe trait. Removing the `unsafe` keyword from the inherent
+implementation will resolve this error.
+
+```
+struct Foo;
+
+impl Foo { } // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0198.md b/compiler/rustc_error_codes/src/error_codes/E0198.md
new file mode 100644
index 000000000..1238165cb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0198.md
@@ -0,0 +1,28 @@
+A negative implementation was marked as unsafe.
+
+Erroneous code example:
+
+```compile_fail,E0198
+struct Foo;
+
+unsafe impl !Clone for Foo { } // error!
+```
+
+A negative implementation is one that excludes a type from implementing a
+particular trait. Not being able to use a trait is always a safe operation,
+so negative implementations are always safe and never need to be marked as
+unsafe.
+
+This will compile:
+
+```ignore (ignore auto_trait future compatibility warning)
+#![feature(auto_traits)]
+
+struct Foo;
+
+auto trait Enterprise {}
+
+impl !Enterprise for Foo { }
+```
+
+Please note that negative impls are only allowed for auto traits.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0199.md b/compiler/rustc_error_codes/src/error_codes/E0199.md
new file mode 100644
index 000000000..88130e8e5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0199.md
@@ -0,0 +1,23 @@
+A trait implementation was marked as unsafe while the trait is safe.
+
+Erroneous code example:
+
+```compile_fail,E0199
+struct Foo;
+
+trait Bar { }
+
+unsafe impl Bar for Foo { } // error!
+```
+
+Safe traits should not have unsafe implementations, therefore marking an
+implementation for a safe trait unsafe will cause a compiler error. Removing
+the unsafe marker on the trait noted in the error will resolve this problem:
+
+```
+struct Foo;
+
+trait Bar { }
+
+impl Bar for Foo { } // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0200.md b/compiler/rustc_error_codes/src/error_codes/E0200.md
new file mode 100644
index 000000000..7245bb59c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0200.md
@@ -0,0 +1,23 @@
+An unsafe trait was implemented without an unsafe implementation.
+
+Erroneous code example:
+
+```compile_fail,E0200
+struct Foo;
+
+unsafe trait Bar { }
+
+impl Bar for Foo { } // error!
+```
+
+Unsafe traits must have unsafe implementations. This error occurs when an
+implementation for an unsafe trait isn't marked as unsafe. This may be resolved
+by marking the unsafe implementation as unsafe.
+
+```
+struct Foo;
+
+unsafe trait Bar { }
+
+unsafe impl Bar for Foo { } // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0201.md b/compiler/rustc_error_codes/src/error_codes/E0201.md
new file mode 100644
index 000000000..0e1a7b7b7
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0201.md
@@ -0,0 +1,45 @@
+Two associated items (like methods, associated types, associated functions,
+etc.) were defined with the same identifier.
+
+Erroneous code example:
+
+```compile_fail,E0201
+struct Foo(u8);
+
+impl Foo {
+ fn bar(&self) -> bool { self.0 > 5 }
+ fn bar() {} // error: duplicate associated function
+}
+
+trait Baz {
+ type Quux;
+ fn baz(&self) -> bool;
+}
+
+impl Baz for Foo {
+ type Quux = u32;
+
+ fn baz(&self) -> bool { true }
+
+ // error: duplicate method
+ fn baz(&self) -> bool { self.0 > 5 }
+
+ // error: duplicate associated type
+ type Quux = u32;
+}
+```
+
+Note, however, that items with the same name are allowed for inherent `impl`
+blocks that don't overlap:
+
+```
+struct Foo<T>(T);
+
+impl Foo<u8> {
+ fn bar(&self) -> bool { self.0 > 5 }
+}
+
+impl Foo<bool> {
+ fn bar(&self) -> bool { self.0 }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0203.md b/compiler/rustc_error_codes/src/error_codes/E0203.md
new file mode 100644
index 000000000..1edb51927
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0203.md
@@ -0,0 +1,18 @@
+Having multiple relaxed default bounds is unsupported.
+
+Erroneous code example:
+
+```compile_fail,E0203
+struct Bad<T: ?Sized + ?Send>{
+ inner: T
+}
+```
+
+Here the type `T` cannot have a relaxed bound for multiple default traits
+(`Sized` and `Send`). This can be fixed by only using one relaxed bound.
+
+```
+struct Good<T: ?Sized>{
+ inner: T
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0204.md b/compiler/rustc_error_codes/src/error_codes/E0204.md
new file mode 100644
index 000000000..96e44758b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0204.md
@@ -0,0 +1,28 @@
+The `Copy` trait was implemented on a type which contains a field that doesn't
+implement the `Copy` trait.
+
+Erroneous code example:
+
+```compile_fail,E0204
+struct Foo {
+ foo: Vec<u32>,
+}
+
+impl Copy for Foo { } // error!
+```
+
+The `Copy` trait is implemented by default only on primitive types. If your
+type only contains primitive types, you'll be able to implement `Copy` on it.
+Otherwise, it won't be possible.
+
+Here's another example that will fail:
+
+```compile_fail,E0204
+#[derive(Copy)] // error!
+struct Foo<'a> {
+ ty: &'a mut bool,
+}
+```
+
+This fails because `&mut T` is not `Copy`, even when `T` is `Copy` (this
+differs from the behavior for `&T`, which is always `Copy`).
diff --git a/compiler/rustc_error_codes/src/error_codes/E0205.md b/compiler/rustc_error_codes/src/error_codes/E0205.md
new file mode 100644
index 000000000..7916f53ad
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0205.md
@@ -0,0 +1,29 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+An attempt to implement the `Copy` trait for an enum failed because one of the
+variants does not implement `Copy`. To fix this, you must implement `Copy` for
+the mentioned variant. Note that this may not be possible, as in the example of
+
+```compile_fail,E0204
+enum Foo {
+ Bar(Vec<u32>),
+ Baz,
+}
+
+impl Copy for Foo { }
+```
+
+This fails because `Vec<T>` does not implement `Copy` for any `T`.
+
+Here's another example that will fail:
+
+```compile_fail,E0204
+#[derive(Copy)]
+enum Foo<'a> {
+ Bar(&'a mut bool),
+ Baz,
+}
+```
+
+This fails because `&mut T` is not `Copy`, even when `T` is `Copy` (this
+differs from the behavior for `&T`, which is always `Copy`).
diff --git a/compiler/rustc_error_codes/src/error_codes/E0206.md b/compiler/rustc_error_codes/src/error_codes/E0206.md
new file mode 100644
index 000000000..4405a2149
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0206.md
@@ -0,0 +1,15 @@
+The `Copy` trait was implemented on a type which is neither a struct nor an
+enum.
+
+Erroneous code example:
+
+```compile_fail,E0206
+#[derive(Copy, Clone)]
+struct Bar;
+
+impl Copy for &'static mut Bar { } // error!
+```
+
+You can only implement `Copy` for a struct or an enum.
+The previous example will fail because `&'static mut Bar`
+is not a struct or enum.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0207.md b/compiler/rustc_error_codes/src/error_codes/E0207.md
new file mode 100644
index 000000000..8a7923ac9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0207.md
@@ -0,0 +1,135 @@
+A type parameter that is specified for `impl` is not constrained.
+
+Erroneous code example:
+
+```compile_fail,E0207
+struct Foo;
+
+impl<T: Default> Foo {
+ // error: the type parameter `T` is not constrained by the impl trait, self
+ // type, or predicates [E0207]
+ fn get(&self) -> T {
+ <T as Default>::default()
+ }
+}
+```
+
+Any type parameter of an `impl` must meet at least one of
+the following criteria:
+
+ - it appears in the _implementing type_ of the impl, e.g. `impl<T> Foo<T>`
+ - for a trait impl, it appears in the _implemented trait_, e.g.
+ `impl<T> SomeTrait<T> for Foo`
+ - it is bound as an associated type, e.g. `impl<T, U> SomeTrait for T
+ where T: AnotherTrait<AssocType=U>`
+
+### Error example 1
+
+Suppose we have a struct `Foo` and we would like to define some methods for it.
+The previous code example has a definition which leads to a compiler error:
+
+The problem is that the parameter `T` does not appear in the implementing type
+(`Foo`) of the impl. In this case, we can fix the error by moving the type
+parameter from the `impl` to the method `get`:
+
+
+```
+struct Foo;
+
+// Move the type parameter from the impl to the method
+impl Foo {
+ fn get<T: Default>(&self) -> T {
+ <T as Default>::default()
+ }
+}
+```
+
+### Error example 2
+
+As another example, suppose we have a `Maker` trait and want to establish a
+type `FooMaker` that makes `Foo`s:
+
+```compile_fail,E0207
+trait Maker {
+ type Item;
+ fn make(&mut self) -> Self::Item;
+}
+
+struct Foo<T> {
+ foo: T
+}
+
+struct FooMaker;
+
+impl<T: Default> Maker for FooMaker {
+// error: the type parameter `T` is not constrained by the impl trait, self
+// type, or predicates [E0207]
+ type Item = Foo<T>;
+
+ fn make(&mut self) -> Foo<T> {
+ Foo { foo: <T as Default>::default() }
+ }
+}
+```
+
+This fails to compile because `T` does not appear in the trait or in the
+implementing type.
+
+One way to work around this is to introduce a phantom type parameter into
+`FooMaker`, like so:
+
+```
+use std::marker::PhantomData;
+
+trait Maker {
+ type Item;
+ fn make(&mut self) -> Self::Item;
+}
+
+struct Foo<T> {
+ foo: T
+}
+
+// Add a type parameter to `FooMaker`
+struct FooMaker<T> {
+ phantom: PhantomData<T>,
+}
+
+impl<T: Default> Maker for FooMaker<T> {
+ type Item = Foo<T>;
+
+ fn make(&mut self) -> Foo<T> {
+ Foo {
+ foo: <T as Default>::default(),
+ }
+ }
+}
+```
+
+Another way is to do away with the associated type in `Maker` and use an input
+type parameter instead:
+
+```
+// Use a type parameter instead of an associated type here
+trait Maker<Item> {
+ fn make(&mut self) -> Item;
+}
+
+struct Foo<T> {
+ foo: T
+}
+
+struct FooMaker;
+
+impl<T: Default> Maker<Foo<T>> for FooMaker {
+ fn make(&mut self) -> Foo<T> {
+ Foo { foo: <T as Default>::default() }
+ }
+}
+```
+
+### Additional information
+
+For more information, please see [RFC 447].
+
+[RFC 447]: https://github.com/rust-lang/rfcs/blob/master/text/0447-no-unused-impl-parameters.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0210.md b/compiler/rustc_error_codes/src/error_codes/E0210.md
new file mode 100644
index 000000000..dc2fd9b0c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0210.md
@@ -0,0 +1,82 @@
+This error indicates a violation of one of Rust's orphan rules for trait
+implementations. The rule concerns the use of type parameters in an
+implementation of a foreign trait (a trait defined in another crate), and
+states that type parameters must be "covered" by a local type.
+
+When implementing a foreign trait for a foreign type,
+the trait must have one or more type parameters.
+A type local to your crate must appear before any use of any type parameters.
+
+To understand what this means, it is perhaps easier to consider a few examples.
+
+If `ForeignTrait` is a trait defined in some external crate `foo`, then the
+following trait `impl` is an error:
+
+```compile_fail,E0210
+# #[cfg(for_demonstration_only)]
+extern crate foo;
+# #[cfg(for_demonstration_only)]
+use foo::ForeignTrait;
+# use std::panic::UnwindSafe as ForeignTrait;
+
+impl<T> ForeignTrait for T { } // error
+# fn main() {}
+```
+
+To work around this, it can be covered with a local type, `MyType`:
+
+```
+# use std::panic::UnwindSafe as ForeignTrait;
+struct MyType<T>(T);
+impl<T> ForeignTrait for MyType<T> { } // Ok
+```
+
+Please note that a type alias is not sufficient.
+
+For another example of an error, suppose there's another trait defined in `foo`
+named `ForeignTrait2` that takes two type parameters. Then this `impl` results
+in the same rule violation:
+
+```ignore (cannot-doctest-multicrate-project)
+struct MyType2;
+impl<T> ForeignTrait2<T, MyType<T>> for MyType2 { } // error
+```
+
+The reason for this is that there are two appearances of type parameter `T` in
+the `impl` header, both as parameters for `ForeignTrait2`. The first appearance
+is uncovered, and so runs afoul of the orphan rule.
+
+Consider one more example:
+
+```ignore (cannot-doctest-multicrate-project)
+impl<T> ForeignTrait2<MyType<T>, T> for MyType2 { } // Ok
+```
+
+This only differs from the previous `impl` in that the parameters `T` and
+`MyType<T>` for `ForeignTrait2` have been swapped. This example does *not*
+violate the orphan rule; it is permitted.
+
+To see why that last example was allowed, you need to understand the general
+rule. Unfortunately this rule is a bit tricky to state. Consider an `impl`:
+
+```ignore (only-for-syntax-highlight)
+impl<P1, ..., Pm> ForeignTrait<T1, ..., Tn> for T0 { ... }
+```
+
+where `P1, ..., Pm` are the type parameters of the `impl` and `T0, ..., Tn`
+are types. One of the types `T0, ..., Tn` must be a local type (this is another
+orphan rule, see the explanation for E0117).
+
+Both of the following must be true:
+1. At least one of the types `T0..=Tn` must be a local type.
+Let `Ti` be the first such type.
+2. No uncovered type parameters `P1..=Pm` may appear in `T0..Ti`
+(excluding `Ti`).
+
+For information on the design of the orphan rules,
+see [RFC 2451] and [RFC 1023].
+
+For information on the design of the orphan rules, see [RFC 1023].
+
+[RFC 2451]: https://rust-lang.github.io/rfcs/2451-re-rebalancing-coherence.html
+[RFC 1023]: https://github.com/rust-lang/rfcs/blob/master/text/1023-rebalancing-coherence.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0211.md b/compiler/rustc_error_codes/src/error_codes/E0211.md
new file mode 100644
index 000000000..77289f019
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0211.md
@@ -0,0 +1,79 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+You used a function or type which doesn't fit the requirements for where it was
+used. Erroneous code examples:
+
+```compile_fail
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn size_of<T>(); // error: intrinsic has wrong type
+}
+
+// or:
+
+fn main() -> i32 { 0 }
+// error: main function expects type: `fn() {main}`: expected (), found i32
+
+// or:
+
+let x = 1u8;
+match x {
+ 0u8..=3i8 => (),
+ // error: mismatched types in range: expected u8, found i8
+ _ => ()
+}
+
+// or:
+
+use std::rc::Rc;
+struct Foo;
+
+impl Foo {
+ fn x(self: Rc<Foo>) {}
+ // error: mismatched self type: expected `Foo`: expected struct
+ // `Foo`, found struct `alloc::rc::Rc`
+}
+```
+
+For the first code example, please check the function definition. Example:
+
+```
+#![feature(intrinsics)]
+
+extern "rust-intrinsic" {
+ fn size_of<T>() -> usize; // ok!
+}
+```
+
+The second case example is a bit particular: the main function must always
+have this definition:
+
+```compile_fail
+fn main();
+```
+
+They never take parameters and never return types.
+
+For the third example, when you match, all patterns must have the same type
+as the type you're matching on. Example:
+
+```
+let x = 1u8;
+
+match x {
+ 0u8..=3u8 => (), // ok!
+ _ => ()
+}
+```
+
+And finally, for the last example, only `Box<Self>`, `&Self`, `Self`,
+or `&mut Self` work as explicit self parameters. Example:
+
+```
+struct Foo;
+
+impl Foo {
+ fn x(self: Box<Foo>) {} // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0212.md b/compiler/rustc_error_codes/src/error_codes/E0212.md
new file mode 100644
index 000000000..174654146
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0212.md
@@ -0,0 +1,35 @@
+Cannot use the associated type of
+a trait with uninferred generic parameters.
+
+Erroneous code example:
+
+```compile_fail,E0212
+pub trait Foo<T> {
+ type A;
+
+ fn get(&self, t: T) -> Self::A;
+}
+
+fn foo2<I : for<'x> Foo<&'x isize>>(
+ field: I::A) {} // error!
+```
+
+In this example, we have to instantiate `'x`, and
+we don't know what lifetime to instantiate it with.
+To fix this, spell out the precise lifetimes involved.
+Example:
+
+```
+pub trait Foo<T> {
+ type A;
+
+ fn get(&self, t: T) -> Self::A;
+}
+
+fn foo3<I : for<'x> Foo<&'x isize>>(
+ x: <I as Foo<&isize>>::A) {} // ok!
+
+
+fn foo4<'a, I : for<'x> Foo<&'x isize>>(
+ x: <I as Foo<&'a isize>>::A) {} // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0214.md b/compiler/rustc_error_codes/src/error_codes/E0214.md
new file mode 100644
index 000000000..b64ee80e2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0214.md
@@ -0,0 +1,17 @@
+A generic type was described using parentheses rather than angle brackets.
+
+Erroneous code example:
+
+```compile_fail,E0214
+let v: Vec(&str) = vec!["foo"];
+```
+
+This is not currently supported: `v` should be defined as `Vec<&str>`.
+Parentheses are currently only used with generic types when defining parameters
+for `Fn`-family traits.
+
+The previous code example fixed:
+
+```
+let v: Vec<&str> = vec!["foo"];
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0220.md b/compiler/rustc_error_codes/src/error_codes/E0220.md
new file mode 100644
index 000000000..ddc54007c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0220.md
@@ -0,0 +1,42 @@
+The associated type used was not defined in the trait.
+
+Erroneous code example:
+
+```compile_fail,E0220
+trait T1 {
+ type Bar;
+}
+
+type Foo = T1<F=i32>; // error: associated type `F` not found for `T1`
+
+// or:
+
+trait T2 {
+ type Bar;
+
+ // error: Baz is used but not declared
+ fn return_bool(&self, _: &Self::Bar, _: &Self::Baz) -> bool;
+}
+```
+
+Make sure that you have defined the associated type in the trait body.
+Also, verify that you used the right trait or you didn't misspell the
+associated type name. Example:
+
+```
+trait T1 {
+ type Bar;
+}
+
+type Foo = T1<Bar=i32>; // ok!
+
+// or:
+
+trait T2 {
+ type Bar;
+ type Baz; // we declare `Baz` in our trait.
+
+ // and now we can use it here:
+ fn return_bool(&self, _: &Self::Bar, _: &Self::Baz) -> bool;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0221.md b/compiler/rustc_error_codes/src/error_codes/E0221.md
new file mode 100644
index 000000000..26111ca42
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0221.md
@@ -0,0 +1,44 @@
+An attempt was made to retrieve an associated type, but the type was ambiguous.
+
+Erroneous code example:
+
+```compile_fail,E0221
+trait T1 {}
+trait T2 {}
+
+trait Foo {
+ type A: T1;
+}
+
+trait Bar : Foo {
+ type A: T2;
+ fn do_something() {
+ let _: Self::A;
+ }
+}
+```
+
+In this example, `Foo` defines an associated type `A`. `Bar` inherits that type
+from `Foo`, and defines another associated type of the same name. As a result,
+when we attempt to use `Self::A`, it's ambiguous whether we mean the `A` defined
+by `Foo` or the one defined by `Bar`.
+
+There are two options to work around this issue. The first is simply to rename
+one of the types. Alternatively, one can specify the intended type using the
+following syntax:
+
+```
+trait T1 {}
+trait T2 {}
+
+trait Foo {
+ type A: T1;
+}
+
+trait Bar : Foo {
+ type A: T2;
+ fn do_something() {
+ let _: <Self as Bar>::A;
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0222.md b/compiler/rustc_error_codes/src/error_codes/E0222.md
new file mode 100644
index 000000000..f929f219a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0222.md
@@ -0,0 +1,52 @@
+An attempt was made to constrain an associated type.
+
+Erroneous code example:
+
+```compile_fail,E0222
+pub trait Vehicle {
+ type Color;
+}
+
+pub trait Box {
+ type Color;
+}
+
+pub trait BoxCar : Box + Vehicle {}
+
+fn dent_object<COLOR>(c: dyn BoxCar<Color=COLOR>) {} // Invalid constraint
+```
+
+In this example, `BoxCar` has two supertraits: `Vehicle` and `Box`. Both of
+these traits define an associated type `Color`. `BoxCar` inherits two types
+with that name from both supertraits. Because of this, we need to use the
+fully qualified path syntax to refer to the appropriate `Color` associated
+type, either `<BoxCar as Vehicle>::Color` or `<BoxCar as Box>::Color`, but this
+syntax is not allowed to be used in a function signature.
+
+In order to encode this kind of constraint, a `where` clause and a new type
+parameter are needed:
+
+```
+pub trait Vehicle {
+ type Color;
+}
+
+pub trait Box {
+ type Color;
+}
+
+pub trait BoxCar : Box + Vehicle {}
+
+// Introduce a new `CAR` type parameter
+fn foo<CAR, COLOR>(
+ c: CAR,
+) where
+ // Bind the type parameter `CAR` to the trait `BoxCar`
+ CAR: BoxCar,
+ // Further restrict `<BoxCar as Vehicle>::Color` to be the same as the
+ // type parameter `COLOR`
+ CAR: Vehicle<Color = COLOR>,
+ // We can also simultaneously restrict the other trait's associated type
+ CAR: Box<Color = COLOR>
+{}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0223.md b/compiler/rustc_error_codes/src/error_codes/E0223.md
new file mode 100644
index 000000000..0d49f514c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0223.md
@@ -0,0 +1,33 @@
+An attempt was made to retrieve an associated type, but the type was ambiguous.
+
+Erroneous code example:
+
+```compile_fail,E0223
+trait MyTrait {type X; }
+
+fn main() {
+ let foo: MyTrait::X;
+}
+```
+
+The problem here is that we're attempting to take the type of X from MyTrait.
+Unfortunately, the type of X is not defined, because it's only made concrete in
+implementations of the trait. A working version of this code might look like:
+
+```
+trait MyTrait {type X; }
+struct MyStruct;
+
+impl MyTrait for MyStruct {
+ type X = u32;
+}
+
+fn main() {
+ let foo: <MyStruct as MyTrait>::X;
+}
+```
+
+This syntax specifies that we want the X type from MyTrait, as made concrete in
+MyStruct. The reason that we cannot simply use `MyStruct::X` is that MyStruct
+might implement two different traits with identically-named associated types.
+This syntax allows disambiguation between the two.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0224.md b/compiler/rustc_error_codes/src/error_codes/E0224.md
new file mode 100644
index 000000000..628488575
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0224.md
@@ -0,0 +1,15 @@
+A trait object was declared with no traits.
+
+Erroneous code example:
+
+```compile_fail,E0224
+type Foo = dyn 'static +;
+```
+
+Rust does not currently support this.
+
+To solve, ensure that the trait object has at least one trait:
+
+```
+type Foo = dyn 'static + Copy;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0225.md b/compiler/rustc_error_codes/src/error_codes/E0225.md
new file mode 100644
index 000000000..c306e7100
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0225.md
@@ -0,0 +1,21 @@
+Multiple types were used as bounds for a closure or trait object.
+
+Erroneous code example:
+
+```compile_fail,E0225
+fn main() {
+ let _: Box<dyn std::io::Read + std::io::Write>;
+}
+```
+
+Rust does not currently support this.
+
+Auto traits such as Send and Sync are an exception to this rule:
+It's possible to have bounds of one non-builtin trait, plus any number of
+auto traits. For example, the following compiles correctly:
+
+```
+fn main() {
+ let _: Box<dyn std::io::Read + Send + Sync>;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0226.md b/compiler/rustc_error_codes/src/error_codes/E0226.md
new file mode 100644
index 000000000..4e65132ff
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0226.md
@@ -0,0 +1,21 @@
+More than one explicit lifetime bound was used on a trait object.
+
+Example of erroneous code:
+
+```compile_fail,E0226
+trait Foo {}
+
+type T<'a, 'b> = dyn Foo + 'a + 'b; // error: Trait object `arg` has two
+ // lifetime bound, 'a and 'b.
+```
+
+Here `T` is a trait object with two explicit lifetime bounds, 'a and 'b.
+
+Only a single explicit lifetime bound is permitted on trait objects.
+To fix this error, consider removing one of the lifetime bounds:
+
+```
+trait Foo {}
+
+type T<'a> = dyn Foo + 'a;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0227.md b/compiler/rustc_error_codes/src/error_codes/E0227.md
new file mode 100644
index 000000000..f68614723
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0227.md
@@ -0,0 +1,33 @@
+This error indicates that the compiler is unable to determine whether there is
+exactly one unique region in the set of derived region bounds.
+
+Example of erroneous code:
+
+```compile_fail,E0227
+trait Foo<'foo>: 'foo {}
+trait Bar<'bar>: 'bar {}
+
+trait FooBar<'foo, 'bar>: Foo<'foo> + Bar<'bar> {}
+
+struct Baz<'foo, 'bar> {
+ baz: dyn FooBar<'foo, 'bar>,
+}
+```
+
+Here, `baz` can have either `'foo` or `'bar` lifetimes.
+
+To resolve this error, provide an explicit lifetime:
+
+```rust
+trait Foo<'foo>: 'foo {}
+trait Bar<'bar>: 'bar {}
+
+trait FooBar<'foo, 'bar>: Foo<'foo> + Bar<'bar> {}
+
+struct Baz<'foo, 'bar, 'baz>
+where
+ 'baz: 'foo + 'bar,
+{
+ obj: dyn FooBar<'foo, 'bar> + 'baz,
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0228.md b/compiler/rustc_error_codes/src/error_codes/E0228.md
new file mode 100644
index 000000000..3443a5ae8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0228.md
@@ -0,0 +1,40 @@
+The lifetime bound for this object type cannot be deduced from context and must
+be specified.
+
+Erroneous code example:
+
+```compile_fail,E0228
+trait Trait { }
+
+struct TwoBounds<'a, 'b, T: Sized + 'a + 'b> {
+ x: &'a i32,
+ y: &'b i32,
+ z: T,
+}
+
+type Foo<'a, 'b> = TwoBounds<'a, 'b, dyn Trait>;
+```
+
+When a trait object is used as a type argument of a generic type, Rust will try
+to infer its lifetime if unspecified. However, this isn't possible when the
+containing type has more than one lifetime bound.
+
+The above example can be resolved by either reducing the number of lifetime
+bounds to one or by making the trait object lifetime explicit, like so:
+
+```
+trait Trait { }
+
+struct TwoBounds<'a, 'b, T: Sized + 'a + 'b> {
+ x: &'a i32,
+ y: &'b i32,
+ z: T,
+}
+
+type Foo<'a, 'b> = TwoBounds<'a, 'b, dyn Trait + 'b>;
+```
+
+For more information, see [RFC 599] and its amendment [RFC 1156].
+
+[RFC 599]: https://github.com/rust-lang/rfcs/blob/master/text/0599-default-object-bound.md
+[RFC 1156]: https://github.com/rust-lang/rfcs/blob/master/text/1156-adjust-default-object-bounds.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0229.md b/compiler/rustc_error_codes/src/error_codes/E0229.md
new file mode 100644
index 000000000..a8fab057d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0229.md
@@ -0,0 +1,38 @@
+An associated type binding was done outside of the type parameter declaration
+and `where` clause.
+
+Erroneous code example:
+
+```compile_fail,E0229
+pub trait Foo {
+ type A;
+ fn boo(&self) -> <Self as Foo>::A;
+}
+
+struct Bar;
+
+impl Foo for isize {
+ type A = usize;
+ fn boo(&self) -> usize { 42 }
+}
+
+fn baz<I>(x: &<I as Foo<A=Bar>>::A) {}
+// error: associated type bindings are not allowed here
+```
+
+To solve this error, please move the type bindings in the type parameter
+declaration:
+
+```
+# struct Bar;
+# trait Foo { type A; }
+fn baz<I: Foo<A=Bar>>(x: &<I as Foo>::A) {} // ok!
+```
+
+Or in the `where` clause:
+
+```
+# struct Bar;
+# trait Foo { type A; }
+fn baz<I>(x: &<I as Foo>::A) where I: Foo<A=Bar> {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0230.md b/compiler/rustc_error_codes/src/error_codes/E0230.md
new file mode 100644
index 000000000..cfb72e743
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0230.md
@@ -0,0 +1,25 @@
+The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
+message for when a particular trait isn't implemented on a type placed in a
+position that needs that trait. For example, when the following code is
+compiled:
+
+```compile_fail,E0230
+#![feature(rustc_attrs)]
+
+#[rustc_on_unimplemented = "error on `{Self}` with params `<{A},{B}>`"] // error
+trait BadAnnotation<A> {}
+```
+
+There will be an error about `bool` not implementing `Index<u8>`, followed by a
+note saying "the type `bool` cannot be indexed by `u8`".
+
+As you can see, you can specify type parameters in curly braces for
+substitution with the actual types (using the regular format string syntax) in
+a given situation. Furthermore, `{Self}` will substitute to the type (in this
+case, `bool`) that we tried to use.
+
+This error appears when the curly braces contain an identifier which doesn't
+match with any of the type parameters or the string `Self`. This might happen
+if you misspelled a type parameter, or if you intended to use literal curly
+braces. If it is the latter, escape the curly braces with a second curly brace
+of the same type; e.g., a literal `{` is `{{`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0231.md b/compiler/rustc_error_codes/src/error_codes/E0231.md
new file mode 100644
index 000000000..23a0a88ec
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0231.md
@@ -0,0 +1,23 @@
+The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
+message for when a particular trait isn't implemented on a type placed in a
+position that needs that trait. For example, when the following code is
+compiled:
+
+```compile_fail,E0231
+#![feature(rustc_attrs)]
+
+#[rustc_on_unimplemented = "error on `{Self}` with params `<{A},{}>`"] // error!
+trait BadAnnotation<A> {}
+```
+
+there will be an error about `bool` not implementing `Index<u8>`, followed by a
+note saying "the type `bool` cannot be indexed by `u8`".
+
+As you can see, you can specify type parameters in curly braces for
+substitution with the actual types (using the regular format string syntax) in
+a given situation. Furthermore, `{Self}` will substitute to the type (in this
+case, `bool`) that we tried to use.
+
+This error appears when the curly braces do not contain an identifier. Please
+add one of the same name as a type parameter. If you intended to use literal
+braces, use `{{` and `}}` to escape them.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0232.md b/compiler/rustc_error_codes/src/error_codes/E0232.md
new file mode 100644
index 000000000..b310caefa
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0232.md
@@ -0,0 +1,18 @@
+The `#[rustc_on_unimplemented]` attribute lets you specify a custom error
+message for when a particular trait isn't implemented on a type placed in a
+position that needs that trait. For example, when the following code is
+compiled:
+
+```compile_fail,E0232
+#![feature(rustc_attrs)]
+
+#[rustc_on_unimplemented(lorem="")] // error!
+trait BadAnnotation {}
+```
+
+there will be an error about `bool` not implementing `Index<u8>`, followed by a
+note saying "the type `bool` cannot be indexed by `u8`".
+
+For this to work, some note must be specified. An empty attribute will not do
+anything, please remove the attribute or add some helpful note for users of the
+trait.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0243.md b/compiler/rustc_error_codes/src/error_codes/E0243.md
new file mode 100644
index 000000000..5d3d1828b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0243.md
@@ -0,0 +1,13 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error indicates that not enough type parameters were found in a type or
+trait.
+
+For example, the `Foo` struct below is defined to be generic in `T`, but the
+type parameter is missing in the definition of `Bar`:
+
+```compile_fail,E0107
+struct Foo<T> { x: T }
+
+struct Bar { x: Foo }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0244.md b/compiler/rustc_error_codes/src/error_codes/E0244.md
new file mode 100644
index 000000000..5187b7b05
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0244.md
@@ -0,0 +1,13 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error indicates that too many type parameters were found in a type or
+trait.
+
+For example, the `Foo` struct below has no type parameters, but is supplied
+with two in the definition of `Bar`:
+
+```compile_fail,E0107
+struct Foo { x: bool }
+
+struct Bar<S, T> { x: Foo<S, T> }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0251.md b/compiler/rustc_error_codes/src/error_codes/E0251.md
new file mode 100644
index 000000000..4121dd278
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0251.md
@@ -0,0 +1,21 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Two items of the same name cannot be imported without rebinding one of the
+items under a new local name.
+
+An example of this error:
+
+```
+use foo::baz;
+use bar::*; // error, do `use foo::baz as quux` instead on the previous line
+
+fn main() {}
+
+mod foo {
+ pub struct baz;
+}
+
+mod bar {
+ pub mod baz {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0252.md b/compiler/rustc_error_codes/src/error_codes/E0252.md
new file mode 100644
index 000000000..e67894465
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0252.md
@@ -0,0 +1,54 @@
+Two items of the same name cannot be imported without rebinding one of the
+items under a new local name.
+
+Erroneous code example:
+
+```compile_fail,E0252
+use foo::baz;
+use bar::baz; // error, do `use bar::baz as quux` instead
+
+fn main() {}
+
+mod foo {
+ pub struct baz;
+}
+
+mod bar {
+ pub mod baz {}
+}
+```
+
+You can use aliases in order to fix this error. Example:
+
+```
+use foo::baz as foo_baz;
+use bar::baz; // ok!
+
+fn main() {}
+
+mod foo {
+ pub struct baz;
+}
+
+mod bar {
+ pub mod baz {}
+}
+```
+
+Or you can reference the item with its parent:
+
+```
+use bar::baz;
+
+fn main() {
+ let x = foo::baz; // ok!
+}
+
+mod foo {
+ pub struct baz;
+}
+
+mod bar {
+ pub mod baz {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0253.md b/compiler/rustc_error_codes/src/error_codes/E0253.md
new file mode 100644
index 000000000..aea51d402
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0253.md
@@ -0,0 +1,19 @@
+Attempt was made to import an unimportable value. This can happen when trying
+to import a method from a trait.
+
+Erroneous code example:
+
+```compile_fail,E0253
+mod foo {
+ pub trait MyTrait {
+ fn do_something();
+ }
+}
+
+use foo::MyTrait::do_something;
+// error: `do_something` is not directly importable
+
+fn main() {}
+```
+
+It's invalid to directly import methods belonging to a trait or concrete type.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0254.md b/compiler/rustc_error_codes/src/error_codes/E0254.md
new file mode 100644
index 000000000..44383ed6e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0254.md
@@ -0,0 +1,36 @@
+Attempt was made to import an item whereas an extern crate with this name has
+already been imported.
+
+Erroneous code example:
+
+```compile_fail,E0254
+extern crate core;
+
+mod foo {
+ pub trait core {
+ fn do_something();
+ }
+}
+
+use foo::core; // error: an extern crate named `core` has already
+ // been imported in this module
+
+fn main() {}
+```
+
+To fix this issue, you have to rename at least one of the two imports.
+Example:
+
+```
+extern crate core as libcore; // ok!
+
+mod foo {
+ pub trait core {
+ fn do_something();
+ }
+}
+
+use foo::core;
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0255.md b/compiler/rustc_error_codes/src/error_codes/E0255.md
new file mode 100644
index 000000000..83f5ec3dd
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0255.md
@@ -0,0 +1,44 @@
+You can't import a value whose name is the same as another value defined in the
+module.
+
+Erroneous code example:
+
+```compile_fail,E0255
+use bar::foo; // error: an item named `foo` is already in scope
+
+fn foo() {}
+
+mod bar {
+ pub fn foo() {}
+}
+
+fn main() {}
+```
+
+You can use aliases in order to fix this error. Example:
+
+```
+use bar::foo as bar_foo; // ok!
+
+fn foo() {}
+
+mod bar {
+ pub fn foo() {}
+}
+
+fn main() {}
+```
+
+Or you can reference the item with its parent:
+
+```
+fn foo() {}
+
+mod bar {
+ pub fn foo() {}
+}
+
+fn main() {
+ bar::foo(); // we get the item by referring to its parent
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0256.md b/compiler/rustc_error_codes/src/error_codes/E0256.md
new file mode 100644
index 000000000..385376cda
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0256.md
@@ -0,0 +1,18 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+You can't import a type or module when the name of the item being imported is
+the same as another type or submodule defined in the module.
+
+An example of this error:
+
+```compile_fail
+use foo::Bar; // error
+
+type Bar = u32;
+
+mod foo {
+ pub mod Bar { }
+}
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0259.md b/compiler/rustc_error_codes/src/error_codes/E0259.md
new file mode 100644
index 000000000..8d8f93db3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0259.md
@@ -0,0 +1,23 @@
+The name chosen for an external crate conflicts with another external crate
+that has been imported into the current module.
+
+Erroneous code example:
+
+```compile_fail,E0259
+extern crate core;
+extern crate std as core;
+
+fn main() {}
+```
+
+The solution is to choose a different name that doesn't conflict with any
+external crate imported into the current module.
+
+Correct example:
+
+```
+extern crate core;
+extern crate std as other_name;
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0260.md b/compiler/rustc_error_codes/src/error_codes/E0260.md
new file mode 100644
index 000000000..b8bdb81fc
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0260.md
@@ -0,0 +1,35 @@
+The name for an item declaration conflicts with an external crate's name.
+
+Erroneous code example:
+
+```compile_fail,E0260
+extern crate core;
+
+struct core;
+
+fn main() {}
+```
+
+There are two possible solutions:
+
+Solution #1: Rename the item.
+
+```
+extern crate core;
+
+struct xyz;
+```
+
+Solution #2: Import the crate with a different name.
+
+```
+extern crate core as xyz;
+
+struct abc;
+```
+
+See the [Declaration Statements][declaration-statements] section of the
+reference for more information about what constitutes an item declaration
+and what does not.
+
+[declaration-statements]: https://doc.rust-lang.org/reference/statements.html#declaration-statements
diff --git a/compiler/rustc_error_codes/src/error_codes/E0261.md b/compiler/rustc_error_codes/src/error_codes/E0261.md
new file mode 100644
index 000000000..e32684373
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0261.md
@@ -0,0 +1,52 @@
+An undeclared lifetime was used.
+
+Erroneous code example:
+
+```compile_fail,E0261
+// error, use of undeclared lifetime name `'a`
+fn foo(x: &'a str) { }
+
+struct Foo {
+ // error, use of undeclared lifetime name `'a`
+ x: &'a str,
+}
+```
+
+These can be fixed by declaring lifetime parameters:
+
+```
+struct Foo<'a> {
+ x: &'a str,
+}
+
+fn foo<'a>(x: &'a str) {}
+```
+
+Impl blocks declare lifetime parameters separately. You need to add lifetime
+parameters to an impl block if you're implementing a type that has a lifetime
+parameter of its own.
+For example:
+
+```compile_fail,E0261
+struct Foo<'a> {
+ x: &'a str,
+}
+
+// error, use of undeclared lifetime name `'a`
+impl Foo<'a> {
+ fn foo<'a>(x: &'a str) {}
+}
+```
+
+This is fixed by declaring the impl block like this:
+
+```
+struct Foo<'a> {
+ x: &'a str,
+}
+
+// correct
+impl<'a> Foo<'a> {
+ fn foo(x: &'a str) {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0262.md b/compiler/rustc_error_codes/src/error_codes/E0262.md
new file mode 100644
index 000000000..67419d53e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0262.md
@@ -0,0 +1,12 @@
+An invalid name was used for a lifetime parameter.
+
+Erroneous code example:
+
+```compile_fail,E0262
+// error, invalid lifetime parameter name `'static`
+fn foo<'static>(x: &'static str) { }
+```
+
+Declaring certain lifetime names in parameters is disallowed. For example,
+because the `'static` lifetime is a special built-in lifetime name denoting
+the lifetime of the entire program, this is an error:
diff --git a/compiler/rustc_error_codes/src/error_codes/E0263.md b/compiler/rustc_error_codes/src/error_codes/E0263.md
new file mode 100644
index 000000000..2d1ac4026
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0263.md
@@ -0,0 +1,18 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+A lifetime was declared more than once in the same scope.
+
+Erroneous code example:
+
+```compile_fail,E0403
+fn foo<'a, 'b, 'a>(x: &'a str, y: &'b str, z: &'a str) { // error!
+}
+```
+
+Two lifetimes cannot have the same name. To fix this example, change
+the second `'a` lifetime into something else (`'c` for example):
+
+```
+fn foo<'a, 'b, 'c>(x: &'a str, y: &'b str, z: &'c str) { // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0264.md b/compiler/rustc_error_codes/src/error_codes/E0264.md
new file mode 100644
index 000000000..e2a27f7b1
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0264.md
@@ -0,0 +1,24 @@
+An unknown external lang item was used.
+
+Erroneous code example:
+
+```compile_fail,E0264
+#![feature(lang_items)]
+
+extern "C" {
+ #[lang = "cake"] // error: unknown external lang item: `cake`
+ fn cake();
+}
+```
+
+A list of available external lang items is available in
+`src/librustc_middle/middle/weak_lang_items.rs`. Example:
+
+```
+#![feature(lang_items)]
+
+extern "C" {
+ #[lang = "panic_impl"] // ok!
+ fn cake();
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0267.md b/compiler/rustc_error_codes/src/error_codes/E0267.md
new file mode 100644
index 000000000..951490df8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0267.md
@@ -0,0 +1,22 @@
+A loop keyword (`break` or `continue`) was used inside a closure but outside of
+any loop.
+
+Erroneous code example:
+
+```compile_fail,E0267
+let w = || { break; }; // error: `break` inside of a closure
+```
+
+`break` and `continue` keywords can be used as normal inside closures as long as
+they are also contained within a loop. To halt the execution of a closure you
+should instead use a return statement. Example:
+
+```
+let w = || {
+ for _ in 0..10 {
+ break;
+ }
+};
+
+w();
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0268.md b/compiler/rustc_error_codes/src/error_codes/E0268.md
new file mode 100644
index 000000000..436aef79f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0268.md
@@ -0,0 +1,20 @@
+A loop keyword (`break` or `continue`) was used outside of a loop.
+
+Erroneous code example:
+
+```compile_fail,E0268
+fn some_func() {
+ break; // error: `break` outside of a loop
+}
+```
+
+Without a loop to break out of or continue in, no sensible action can be taken.
+Please verify that you are using `break` and `continue` only in loops. Example:
+
+```
+fn some_func() {
+ for _ in 0..10 {
+ break; // ok!
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0271.md b/compiler/rustc_error_codes/src/error_codes/E0271.md
new file mode 100644
index 000000000..ddd245b1a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0271.md
@@ -0,0 +1,65 @@
+A type mismatched an associated type of a trait.
+
+Erroneous code example:
+
+```compile_fail,E0271
+trait Trait { type AssociatedType; }
+
+fn foo<T>(t: T) where T: Trait<AssociatedType=u32> {
+// ~~~~~~~~ ~~~~~~~~~~~~~~~~~~
+// | |
+// This says `foo` can |
+// only be used with |
+// some type that |
+// implements `Trait`. |
+// |
+// This says not only must
+// `T` be an impl of `Trait`
+// but also that the impl
+// must assign the type `u32`
+// to the associated type.
+ println!("in foo");
+}
+
+impl Trait for i8 { type AssociatedType = &'static str; }
+//~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+// | |
+// `i8` does have |
+// implementation |
+// of `Trait`... |
+// ... but it is an implementation
+// that assigns `&'static str` to
+// the associated type.
+
+foo(3_i8);
+// Here, we invoke `foo` with an `i8`, which does not satisfy
+// the constraint `<i8 as Trait>::AssociatedType=u32`, and
+// therefore the type-checker complains with this error code.
+```
+
+The issue can be resolved by changing the associated type:
+1) in the `foo` implementation:
+```
+trait Trait { type AssociatedType; }
+
+fn foo<T>(t: T) where T: Trait<AssociatedType = &'static str> {
+ println!("in foo");
+}
+
+impl Trait for i8 { type AssociatedType = &'static str; }
+
+foo(3_i8);
+```
+
+2) in the `Trait` implementation for `i8`:
+```
+trait Trait { type AssociatedType; }
+
+fn foo<T>(t: T) where T: Trait<AssociatedType = u32> {
+ println!("in foo");
+}
+
+impl Trait for i8 { type AssociatedType = u32; }
+
+foo(3_i8);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0275.md b/compiler/rustc_error_codes/src/error_codes/E0275.md
new file mode 100644
index 000000000..2d12fcea4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0275.md
@@ -0,0 +1,22 @@
+An evaluation of a trait requirement overflowed.
+
+Erroneous code example:
+
+```compile_fail,E0275
+trait Foo {}
+
+struct Bar<T>(T);
+
+impl<T> Foo for T where Bar<T>: Foo {}
+```
+
+This error occurs when there was a recursive trait requirement that overflowed
+before it could be evaluated. This often means that there is an unbounded
+recursion in resolving some type bounds.
+
+To determine if a `T` is `Foo`, we need to check if `Bar<T>` is `Foo`. However,
+to do this check, we need to determine that `Bar<Bar<T>>` is `Foo`. To
+determine this, we check if `Bar<Bar<Bar<T>>>` is `Foo`, and so on. This is
+clearly a recursive requirement that can't be resolved directly.
+
+Consider changing your trait bounds so that they're less self-referential.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0276.md b/compiler/rustc_error_codes/src/error_codes/E0276.md
new file mode 100644
index 000000000..ad76968c5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0276.md
@@ -0,0 +1,20 @@
+A trait implementation has stricter requirements than the trait definition.
+
+Erroneous code example:
+
+```compile_fail,E0276
+trait Foo {
+ fn foo<T>(x: T);
+}
+
+impl Foo for bool {
+ fn foo<T>(x: T) where T: Copy {}
+}
+```
+
+Here, all types implementing `Foo` must have a method `foo<T>(x: T)` which can
+take any type `T`. However, in the `impl` for `bool`, we have added an extra
+bound that `T` is `Copy`, which isn't compatible with the original trait.
+
+Consider removing the bound from the method or adding the bound to the original
+method definition in the trait.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0277.md b/compiler/rustc_error_codes/src/error_codes/E0277.md
new file mode 100644
index 000000000..5f05b59d5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0277.md
@@ -0,0 +1,87 @@
+You tried to use a type which doesn't implement some trait in a place which
+expected that trait.
+
+Erroneous code example:
+
+```compile_fail,E0277
+// here we declare the Foo trait with a bar method
+trait Foo {
+ fn bar(&self);
+}
+
+// we now declare a function which takes an object implementing the Foo trait
+fn some_func<T: Foo>(foo: T) {
+ foo.bar();
+}
+
+fn main() {
+ // we now call the method with the i32 type, which doesn't implement
+ // the Foo trait
+ some_func(5i32); // error: the trait bound `i32 : Foo` is not satisfied
+}
+```
+
+In order to fix this error, verify that the type you're using does implement
+the trait. Example:
+
+```
+trait Foo {
+ fn bar(&self);
+}
+
+// we implement the trait on the i32 type
+impl Foo for i32 {
+ fn bar(&self) {}
+}
+
+fn some_func<T: Foo>(foo: T) {
+ foo.bar(); // we can now use this method since i32 implements the
+ // Foo trait
+}
+
+fn main() {
+ some_func(5i32); // ok!
+}
+```
+
+Or in a generic context, an erroneous code example would look like:
+
+```compile_fail,E0277
+fn some_func<T>(foo: T) {
+ println!("{:?}", foo); // error: the trait `core::fmt::Debug` is not
+ // implemented for the type `T`
+}
+
+fn main() {
+ // We now call the method with the i32 type,
+ // which *does* implement the Debug trait.
+ some_func(5i32);
+}
+```
+
+Note that the error here is in the definition of the generic function. Although
+we only call it with a parameter that does implement `Debug`, the compiler
+still rejects the function. It must work with all possible input types. In
+order to make this example compile, we need to restrict the generic type we're
+accepting:
+
+```
+use std::fmt;
+
+// Restrict the input type to types that implement Debug.
+fn some_func<T: fmt::Debug>(foo: T) {
+ println!("{:?}", foo);
+}
+
+fn main() {
+ // Calling the method is still fine, as i32 implements Debug.
+ some_func(5i32);
+
+ // This would fail to compile now:
+ // struct WithoutDebug;
+ // some_func(WithoutDebug);
+}
+```
+
+Rust only looks at the signature of the called function, as such it must
+already specify all requirements that will be used for every type parameter.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0281.md b/compiler/rustc_error_codes/src/error_codes/E0281.md
new file mode 100644
index 000000000..1d7904b67
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0281.md
@@ -0,0 +1,20 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+You tried to supply a type which doesn't implement some trait in a location
+which expected that trait. This error typically occurs when working with
+`Fn`-based types. Erroneous code example:
+
+```compile_fail
+fn foo<F: Fn(usize)>(x: F) { }
+
+fn main() {
+ // type mismatch: ... implements the trait `core::ops::Fn<(String,)>`,
+ // but the trait `core::ops::Fn<(usize,)>` is required
+ // [E0281]
+ foo(|y: String| { });
+}
+```
+
+The issue in this case is that `foo` is defined as accepting a `Fn` with one
+argument of type `String`, but the closure we attempted to pass to it requires
+one arguments of type `usize`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0282.md b/compiler/rustc_error_codes/src/error_codes/E0282.md
new file mode 100644
index 000000000..49d2205f9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0282.md
@@ -0,0 +1,69 @@
+The compiler could not infer a type and asked for a type annotation.
+
+Erroneous code example:
+
+```compile_fail,E0282
+let x = "hello".chars().rev().collect();
+```
+
+This error indicates that type inference did not result in one unique possible
+type, and extra information is required. In most cases this can be provided
+by adding a type annotation. Sometimes you need to specify a generic type
+parameter manually.
+
+A common example is the `collect` method on `Iterator`. It has a generic type
+parameter with a `FromIterator` bound, which for a `char` iterator is
+implemented by `Vec` and `String` among others. Consider the following snippet
+that reverses the characters of a string:
+
+In the first code example, the compiler cannot infer what the type of `x` should
+be: `Vec<char>` and `String` are both suitable candidates. To specify which type
+to use, you can use a type annotation on `x`:
+
+```
+let x: Vec<char> = "hello".chars().rev().collect();
+```
+
+It is not necessary to annotate the full type. Once the ambiguity is resolved,
+the compiler can infer the rest:
+
+```
+let x: Vec<_> = "hello".chars().rev().collect();
+```
+
+Another way to provide the compiler with enough information, is to specify the
+generic type parameter:
+
+```
+let x = "hello".chars().rev().collect::<Vec<char>>();
+```
+
+Again, you need not specify the full type if the compiler can infer it:
+
+```
+let x = "hello".chars().rev().collect::<Vec<_>>();
+```
+
+Apart from a method or function with a generic type parameter, this error can
+occur when a type parameter of a struct or trait cannot be inferred. In that
+case it is not always possible to use a type annotation, because all candidates
+have the same return type. For instance:
+
+```compile_fail,E0282
+struct Foo<T> {
+ num: T,
+}
+
+impl<T> Foo<T> {
+ fn bar() -> i32 {
+ 0
+ }
+
+ fn baz() {
+ let number = Foo::bar();
+ }
+}
+```
+
+This will fail because the compiler does not know which instance of `Foo` to
+call `bar` on. Change `Foo::bar()` to `Foo::<T>::bar()` to resolve the error.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0283.md b/compiler/rustc_error_codes/src/error_codes/E0283.md
new file mode 100644
index 000000000..79d2c8204
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0283.md
@@ -0,0 +1,29 @@
+An implementation cannot be chosen unambiguously because of lack of information.
+
+Erroneous code example:
+
+```compile_fail,E0283
+struct Foo;
+
+impl Into<u32> for Foo {
+ fn into(self) -> u32 { 1 }
+}
+
+let foo = Foo;
+let bar: u32 = foo.into() * 1u32;
+```
+
+This error can be solved by adding type annotations that provide the missing
+information to the compiler. In this case, the solution is to specify the
+trait's type parameter:
+
+```
+struct Foo;
+
+impl Into<u32> for Foo {
+ fn into(self) -> u32 { 1 }
+}
+
+let foo = Foo;
+let bar: u32 = Into::<u32>::into(foo) * 1u32;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0284.md b/compiler/rustc_error_codes/src/error_codes/E0284.md
new file mode 100644
index 000000000..5a92f8352
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0284.md
@@ -0,0 +1,32 @@
+This error occurs when the compiler is unable to unambiguously infer the
+return type of a function or method which is generic on return type, such
+as the `collect` method for `Iterator`s.
+
+For example:
+
+```compile_fail,E0284
+fn main() {
+ let n: u32 = 1;
+ let mut d: u64 = 2;
+ d = d + n.into();
+}
+```
+
+Here we have an addition of `d` and `n.into()`. Hence, `n.into()` can return
+any type `T` where `u64: Add<T>`. On the other hand, the `into` method can
+return any type where `u32: Into<T>`.
+
+The author of this code probably wants `into()` to return a `u64`, but the
+compiler can't be sure that there isn't another type `T` where both
+`u32: Into<T>` and `u64: Add<T>`.
+
+To resolve this error, use a concrete type for the intermediate expression:
+
+```
+fn main() {
+ let n: u32 = 1;
+ let mut d: u64 = 2;
+ let m: u64 = n.into();
+ d = d + m;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0297.md b/compiler/rustc_error_codes/src/error_codes/E0297.md
new file mode 100644
index 000000000..66c31376d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0297.md
@@ -0,0 +1,40 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Patterns used to bind names must be irrefutable. That is, they must guarantee
+that a name will be extracted in all cases. Instead of pattern matching the
+loop variable, consider using a `match` or `if let` inside the loop body. For
+instance:
+
+```compile_fail,E0005
+let xs : Vec<Option<i32>> = vec![Some(1), None];
+
+// This fails because `None` is not covered.
+for Some(x) in xs {
+ // ...
+}
+```
+
+Match inside the loop instead:
+
+```
+let xs : Vec<Option<i32>> = vec![Some(1), None];
+
+for item in xs {
+ match item {
+ Some(x) => {},
+ None => {},
+ }
+}
+```
+
+Or use `if let`:
+
+```
+let xs : Vec<Option<i32>> = vec![Some(1), None];
+
+for item in xs {
+ if let Some(x) = item {
+ // ...
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0301.md b/compiler/rustc_error_codes/src/error_codes/E0301.md
new file mode 100644
index 000000000..485e19fbb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0301.md
@@ -0,0 +1,17 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Mutable borrows are not allowed in pattern guards, because matching cannot have
+side effects. Side effects could alter the matched object or the environment
+on which the match depends in such a way, that the match would not be
+exhaustive. For instance, the following would not match any arm if mutable
+borrows were allowed:
+
+```compile_fail,E0596
+match Some(()) {
+ None => { },
+ option if option.take().is_none() => {
+ /* impossible, option is `Some` */
+ },
+ Some(_) => { } // When the previous match failed, the option became `None`.
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0302.md b/compiler/rustc_error_codes/src/error_codes/E0302.md
new file mode 100644
index 000000000..e6ac9d590
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0302.md
@@ -0,0 +1,15 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Assignments are not allowed in pattern guards, because matching cannot have
+side effects. Side effects could alter the matched object or the environment
+on which the match depends in such a way, that the match would not be
+exhaustive. For instance, the following would not match any arm if assignments
+were allowed:
+
+```compile_fail,E0594
+match Some(()) {
+ None => { },
+ option if { option = None; false } => { },
+ Some(_) => { } // When the previous match failed, the option became `None`.
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0303.md b/compiler/rustc_error_codes/src/error_codes/E0303.md
new file mode 100644
index 000000000..459906047
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0303.md
@@ -0,0 +1,38 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Sub-bindings, e.g. `ref x @ Some(ref y)` are now allowed under
+`#![feature(bindings_after_at)]` and checked to make sure that
+memory safety is upheld.
+
+--------------
+
+In certain cases it is possible for sub-bindings to violate memory safety.
+Updates to the borrow checker in a future version of Rust may remove this
+restriction, but for now patterns must be rewritten without sub-bindings.
+
+Before:
+
+```compile_fail
+match Some("hi".to_string()) {
+ ref op_string_ref @ Some(s) => {},
+ None => {},
+}
+```
+
+After:
+
+```
+match Some("hi".to_string()) {
+ Some(ref s) => {
+ let op_string_ref = &Some(s);
+ // ...
+ },
+ None => {},
+}
+```
+
+The `op_string_ref` binding has type `&Option<&String>` in both cases.
+
+See also [Issue 14587][issue-14587].
+
+[issue-14587]: https://github.com/rust-lang/rust/issues/14587
diff --git a/compiler/rustc_error_codes/src/error_codes/E0307.md b/compiler/rustc_error_codes/src/error_codes/E0307.md
new file mode 100644
index 000000000..0d29d56ea
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0307.md
@@ -0,0 +1,92 @@
+The `self` parameter in a method has an invalid "receiver type".
+
+Erroneous code example:
+
+```compile_fail,E0307
+struct Foo;
+struct Bar;
+
+trait Trait {
+ fn foo(&self);
+}
+
+impl Trait for Foo {
+ fn foo(self: &Bar) {}
+}
+```
+
+Methods take a special first parameter, of which there are three variants:
+`self`, `&self`, and `&mut self`. These are syntactic sugar for
+`self: Self`, `self: &Self`, and `self: &mut Self` respectively.
+
+```
+# struct Foo;
+trait Trait {
+ fn foo(&self);
+// ^^^^^ `self` here is a reference to the receiver object
+}
+
+impl Trait for Foo {
+ fn foo(&self) {}
+// ^^^^^ the receiver type is `&Foo`
+}
+```
+
+The type `Self` acts as an alias to the type of the current trait
+implementer, or "receiver type". Besides the already mentioned `Self`,
+`&Self` and `&mut Self` valid receiver types, the following are also valid:
+`self: Box<Self>`, `self: Rc<Self>`, `self: Arc<Self>`, and `self: Pin<P>`
+(where P is one of the previous types except `Self`). Note that `Self` can
+also be the underlying implementing type, like `Foo` in the following
+example:
+
+```
+# struct Foo;
+# trait Trait {
+# fn foo(&self);
+# }
+impl Trait for Foo {
+ fn foo(self: &Foo) {}
+}
+```
+
+This error will be emitted by the compiler when using an invalid receiver type,
+like in the following example:
+
+```compile_fail,E0307
+# struct Foo;
+# struct Bar;
+# trait Trait {
+# fn foo(&self);
+# }
+impl Trait for Foo {
+ fn foo(self: &Bar) {}
+}
+```
+
+The nightly feature [Arbitrary self types][AST] extends the accepted
+set of receiver types to also include any type that can dereference to
+`Self`:
+
+```
+#![feature(arbitrary_self_types)]
+
+struct Foo;
+struct Bar;
+
+// Because you can dereference `Bar` into `Foo`...
+impl std::ops::Deref for Bar {
+ type Target = Foo;
+
+ fn deref(&self) -> &Foo {
+ &Foo
+ }
+}
+
+impl Foo {
+ fn foo(self: Bar) {}
+// ^^^^^^^^^ ...it can be used as the receiver type
+}
+```
+
+[AST]: https://doc.rust-lang.org/unstable-book/language-features/arbitrary-self-types.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0308.md b/compiler/rustc_error_codes/src/error_codes/E0308.md
new file mode 100644
index 000000000..decee6309
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0308.md
@@ -0,0 +1,26 @@
+Expected type did not match the received type.
+
+Erroneous code examples:
+
+```compile_fail,E0308
+fn plus_one(x: i32) -> i32 {
+ x + 1
+}
+
+plus_one("Not a number");
+// ^^^^^^^^^^^^^^ expected `i32`, found `&str`
+
+if "Not a bool" {
+// ^^^^^^^^^^^^ expected `bool`, found `&str`
+}
+
+let x: f32 = "Not a float";
+// --- ^^^^^^^^^^^^^ expected `f32`, found `&str`
+// |
+// expected due to this
+```
+
+This error occurs when an expression was used in a place where the compiler
+expected an expression of a different type. It can occur in several cases, the
+most common being when calling a function and passing an argument which has a
+different type than the matching type in the function declaration.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0309.md b/compiler/rustc_error_codes/src/error_codes/E0309.md
new file mode 100644
index 000000000..c36a56b00
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0309.md
@@ -0,0 +1,54 @@
+A parameter type is missing an explicit lifetime bound and may not live long
+enough.
+
+Erroneous code example:
+
+```compile_fail,E0309
+// This won't compile because the applicable impl of
+// `SomeTrait` (below) requires that `T: 'a`, but the struct does
+// not have a matching where-clause.
+struct Foo<'a, T> {
+ foo: <T as SomeTrait<'a>>::Output,
+}
+
+trait SomeTrait<'a> {
+ type Output;
+}
+
+impl<'a, T> SomeTrait<'a> for T
+where
+ T: 'a,
+{
+ type Output = u32;
+}
+```
+
+The type definition contains some field whose type requires an outlives
+annotation. Outlives annotations (e.g., `T: 'a`) are used to guarantee that all
+the data in `T` is valid for at least the lifetime `'a`. This scenario most
+commonly arises when the type contains an associated type reference like
+`<T as SomeTrait<'a>>::Output`, as shown in the previous code.
+
+There, the where clause `T: 'a` that appears on the impl is not known to be
+satisfied on the struct. To make this example compile, you have to add a
+where-clause like `T: 'a` to the struct definition:
+
+```
+struct Foo<'a, T>
+where
+ T: 'a,
+{
+ foo: <T as SomeTrait<'a>>::Output
+}
+
+trait SomeTrait<'a> {
+ type Output;
+}
+
+impl<'a, T> SomeTrait<'a> for T
+where
+ T: 'a,
+{
+ type Output = u32;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0310.md b/compiler/rustc_error_codes/src/error_codes/E0310.md
new file mode 100644
index 000000000..8d4311d01
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0310.md
@@ -0,0 +1,25 @@
+A parameter type is missing a lifetime constraint or has a lifetime that
+does not live long enough.
+
+Erroneous code example:
+
+```compile_fail,E0310
+// This won't compile because T is not constrained to the static lifetime
+// the reference needs
+struct Foo<T> {
+ foo: &'static T
+}
+```
+
+Type parameters in type definitions have lifetimes associated with them that
+represent how long the data stored within them is guaranteed to live. This
+lifetime must be as long as the data needs to be alive, and missing the
+constraint that denotes this will cause this error.
+
+This will compile, because it has the constraint on the type parameter:
+
+```
+struct Foo<T: 'static> {
+ foo: &'static T
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0312.md b/compiler/rustc_error_codes/src/error_codes/E0312.md
new file mode 100644
index 000000000..c5f7cf2e3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0312.md
@@ -0,0 +1,32 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Reference's lifetime of borrowed content doesn't match the expected lifetime.
+
+Erroneous code example:
+
+```compile_fail
+pub fn opt_str<'a>(maybestr: &'a Option<String>) -> &'static str {
+ if maybestr.is_none() {
+ "(none)"
+ } else {
+ let s: &'a str = maybestr.as_ref().unwrap();
+ s // Invalid lifetime!
+ }
+}
+```
+
+To fix this error, either lessen the expected lifetime or find a way to not have
+to use this reference outside of its current scope (by running the code directly
+in the same block for example?):
+
+```
+// In this case, we can fix the issue by switching from "static" lifetime to 'a
+pub fn opt_str<'a>(maybestr: &'a Option<String>) -> &'a str {
+ if maybestr.is_none() {
+ "(none)"
+ } else {
+ let s: &'a str = maybestr.as_ref().unwrap();
+ s // Ok!
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0316.md b/compiler/rustc_error_codes/src/error_codes/E0316.md
new file mode 100644
index 000000000..4368c3217
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0316.md
@@ -0,0 +1,32 @@
+A `where` clause contains a nested quantification over lifetimes.
+
+Erroneous code example:
+
+```compile_fail,E0316
+trait Tr<'a, 'b> {}
+
+fn foo<T>(t: T)
+where
+ for<'a> &'a T: for<'b> Tr<'a, 'b>, // error: nested quantification
+{
+}
+```
+
+Rust syntax allows lifetime quantifications in two places within
+`where` clauses: Quantifying over the trait bound only (as in
+`Ty: for<'l> Trait<'l>`) and quantifying over the whole clause
+(as in `for<'l> &'l Ty: Trait<'l>`). Using both in the same clause
+leads to a nested lifetime quantification, which is not supported.
+
+The following example compiles, because the clause with the nested
+quantification has been rewritten to use only one `for<>`:
+
+```
+trait Tr<'a, 'b> {}
+
+fn foo<T>(t: T)
+where
+ for<'a, 'b> &'a T: Tr<'a, 'b>, // ok
+{
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0317.md b/compiler/rustc_error_codes/src/error_codes/E0317.md
new file mode 100644
index 000000000..230911c20
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0317.md
@@ -0,0 +1,30 @@
+An `if` expression is missing an `else` block.
+
+Erroneous code example:
+
+```compile_fail,E0317
+let x = 5;
+let a = if x == 5 {
+ 1
+};
+```
+
+This error occurs when an `if` expression without an `else` block is used in a
+context where a type other than `()` is expected. In the previous code example,
+the `let` expression was expecting a value but since there was no `else`, no
+value was returned.
+
+An `if` expression without an `else` block has the type `()`, so this is a type
+error. To resolve it, add an `else` block having the same type as the `if`
+block.
+
+So to fix the previous code example:
+
+```
+let x = 5;
+let a = if x == 5 {
+ 1
+} else {
+ 2
+};
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0321.md b/compiler/rustc_error_codes/src/error_codes/E0321.md
new file mode 100644
index 000000000..bcfc12897
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0321.md
@@ -0,0 +1,21 @@
+A cross-crate opt-out trait was implemented on something which wasn't a struct
+or enum type.
+
+Erroneous code example:
+
+```compile_fail,E0321
+#![feature(auto_traits)]
+
+struct Foo;
+
+impl !Sync for Foo {}
+
+unsafe impl Send for &'static Foo {}
+// error: cross-crate traits with a default impl, like `core::marker::Send`,
+// can only be implemented for a struct/enum type, not
+// `&'static Foo`
+```
+
+Only structs and enums are permitted to impl Send, Sync, and other opt-out
+trait, and the struct or enum must be local to the current crate. So, for
+example, `unsafe impl Send for Rc<Foo>` is not allowed.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0322.md b/compiler/rustc_error_codes/src/error_codes/E0322.md
new file mode 100644
index 000000000..ccef8681d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0322.md
@@ -0,0 +1,14 @@
+The `Sized` trait was implemented explicitly.
+
+Erroneous code example:
+
+```compile_fail,E0322
+struct Foo;
+
+impl Sized for Foo {} // error!
+```
+
+The `Sized` trait is a special trait built-in to the compiler for types with a
+constant size known at compile-time. This trait is automatically implemented
+for types as needed by the compiler, and it is currently disallowed to
+explicitly implement it for a type.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0323.md b/compiler/rustc_error_codes/src/error_codes/E0323.md
new file mode 100644
index 000000000..0bf42d17e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0323.md
@@ -0,0 +1,46 @@
+An associated const was implemented when another trait item was expected.
+
+Erroneous code example:
+
+```compile_fail,E0323
+trait Foo {
+ type N;
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ const N : u32 = 0;
+ // error: item `N` is an associated const, which doesn't match its
+ // trait `<Bar as Foo>`
+}
+```
+
+Please verify that the associated const wasn't misspelled and the correct trait
+was implemented. Example:
+
+```
+struct Bar;
+
+trait Foo {
+ type N;
+}
+
+impl Foo for Bar {
+ type N = u32; // ok!
+}
+```
+
+Or:
+
+```
+struct Bar;
+
+trait Foo {
+ const N : u32;
+}
+
+impl Foo for Bar {
+ const N : u32 = 0; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0324.md b/compiler/rustc_error_codes/src/error_codes/E0324.md
new file mode 100644
index 000000000..1442cb77d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0324.md
@@ -0,0 +1,38 @@
+A method was implemented when another trait item was expected.
+
+Erroneous code example:
+
+```compile_fail,E0324
+struct Bar;
+
+trait Foo {
+ const N : u32;
+
+ fn M();
+}
+
+impl Foo for Bar {
+ fn N() {}
+ // error: item `N` is an associated method, which doesn't match its
+ // trait `<Bar as Foo>`
+}
+```
+
+To fix this error, please verify that the method name wasn't misspelled and
+verify that you are indeed implementing the correct trait items. Example:
+
+```
+struct Bar;
+
+trait Foo {
+ const N : u32;
+
+ fn M();
+}
+
+impl Foo for Bar {
+ const N : u32 = 0;
+
+ fn M() {} // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0325.md b/compiler/rustc_error_codes/src/error_codes/E0325.md
new file mode 100644
index 000000000..656fd1ec8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0325.md
@@ -0,0 +1,46 @@
+An associated type was implemented when another trait item was expected.
+
+Erroneous code example:
+
+```compile_fail,E0325
+struct Bar;
+
+trait Foo {
+ const N : u32;
+}
+
+impl Foo for Bar {
+ type N = u32;
+ // error: item `N` is an associated type, which doesn't match its
+ // trait `<Bar as Foo>`
+}
+```
+
+Please verify that the associated type name wasn't misspelled and your
+implementation corresponds to the trait definition. Example:
+
+```
+struct Bar;
+
+trait Foo {
+ type N;
+}
+
+impl Foo for Bar {
+ type N = u32; // ok!
+}
+```
+
+Or:
+
+```
+struct Bar;
+
+trait Foo {
+ const N : u32;
+}
+
+impl Foo for Bar {
+ const N : u32 = 0; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0326.md b/compiler/rustc_error_codes/src/error_codes/E0326.md
new file mode 100644
index 000000000..bc522e9cf
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0326.md
@@ -0,0 +1,18 @@
+An implementation of a trait doesn't match the type constraint.
+
+Erroneous code example:
+
+```compile_fail,E0326
+trait Foo {
+ const BAR: bool;
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ const BAR: u32 = 5; // error, expected bool, found u32
+}
+```
+
+The types of any associated constants in a trait implementation must match the
+types in the trait definition.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0328.md b/compiler/rustc_error_codes/src/error_codes/E0328.md
new file mode 100644
index 000000000..839092354
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0328.md
@@ -0,0 +1,34 @@
+The Unsize trait should not be implemented directly. All implementations of
+Unsize are provided automatically by the compiler.
+
+Erroneous code example:
+
+```compile_fail,E0328
+#![feature(unsize)]
+
+use std::marker::Unsize;
+
+pub struct MyType;
+
+impl<T> Unsize<T> for MyType {}
+```
+
+If you are defining your own smart pointer type and would like to enable
+conversion from a sized to an unsized type with the
+[DST coercion system][RFC 982], use [`CoerceUnsized`] instead.
+
+```
+#![feature(coerce_unsized)]
+
+use std::ops::CoerceUnsized;
+
+pub struct MyType<T: ?Sized> {
+ field_with_unsized_type: T,
+}
+
+impl<T, U> CoerceUnsized<MyType<U>> for MyType<T>
+ where T: CoerceUnsized<U> {}
+```
+
+[RFC 982]: https://github.com/rust-lang/rfcs/blob/master/text/0982-dst-coercion.md
+[`CoerceUnsized`]: https://doc.rust-lang.org/std/ops/trait.CoerceUnsized.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0329.md b/compiler/rustc_error_codes/src/error_codes/E0329.md
new file mode 100644
index 000000000..37d84a1a8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0329.md
@@ -0,0 +1,40 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+An attempt was made to access an associated constant through either a generic
+type parameter or `Self`. This is not supported yet. An example causing this
+error is shown below:
+
+```
+trait Foo {
+ const BAR: f64;
+}
+
+struct MyStruct;
+
+impl Foo for MyStruct {
+ const BAR: f64 = 0f64;
+}
+
+fn get_bar_bad<F: Foo>(t: F) -> f64 {
+ F::BAR
+}
+```
+
+Currently, the value of `BAR` for a particular type can only be accessed
+through a concrete type, as shown below:
+
+```
+trait Foo {
+ const BAR: f64;
+}
+
+struct MyStruct;
+
+impl Foo for MyStruct {
+ const BAR: f64 = 0f64;
+}
+
+fn get_bar_good() -> f64 {
+ <MyStruct as Foo>::BAR
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0364.md b/compiler/rustc_error_codes/src/error_codes/E0364.md
new file mode 100644
index 000000000..d01fb0c9c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0364.md
@@ -0,0 +1,32 @@
+Private items cannot be publicly re-exported. This error indicates that you
+attempted to `pub use` a type or value that was not itself public.
+
+Erroneous code example:
+
+```compile_fail,E0364
+mod a {
+ fn foo() {}
+
+ mod a {
+ pub use super::foo; // error!
+ }
+}
+```
+
+The solution to this problem is to ensure that the items that you are
+re-exporting are themselves marked with `pub`:
+
+```
+mod a {
+ pub fn foo() {} // ok!
+
+ mod a {
+ pub use super::foo;
+ }
+}
+```
+
+See the [Use Declarations][use-declarations] section of the reference for
+more information on this topic.
+
+[use-declarations]: https://doc.rust-lang.org/reference/items/use-declarations.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0365.md b/compiler/rustc_error_codes/src/error_codes/E0365.md
new file mode 100644
index 000000000..e3d417a7d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0365.md
@@ -0,0 +1,32 @@
+Private modules cannot be publicly re-exported. This error indicates that you
+attempted to `pub use` a module that was not itself public.
+
+Erroneous code example:
+
+```compile_fail,E0365
+mod foo {
+ pub const X: u32 = 1;
+}
+
+pub use foo as foo2;
+
+fn main() {}
+```
+
+The solution to this problem is to ensure that the module that you are
+re-exporting is itself marked with `pub`:
+
+```
+pub mod foo {
+ pub const X: u32 = 1;
+}
+
+pub use foo as foo2;
+
+fn main() {}
+```
+
+See the [Use Declarations][use-declarations] section of the reference for
+more information on this topic.
+
+[use-declarations]: https://doc.rust-lang.org/reference/items/use-declarations.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0366.md b/compiler/rustc_error_codes/src/error_codes/E0366.md
new file mode 100644
index 000000000..e6f8e6189
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0366.md
@@ -0,0 +1,30 @@
+An attempt was made to implement `Drop` on a concrete specialization of a
+generic type. An example is shown below:
+
+```compile_fail,E0366
+struct Foo<T> {
+ t: T
+}
+
+impl Drop for Foo<u32> {
+ fn drop(&mut self) {}
+}
+```
+
+This code is not legal: it is not possible to specialize `Drop` to a subset of
+implementations of a generic type. One workaround for this is to wrap the
+generic type, as shown below:
+
+```
+struct Foo<T> {
+ t: T
+}
+
+struct Bar {
+ t: Foo<u32>
+}
+
+impl Drop for Bar {
+ fn drop(&mut self) {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0367.md b/compiler/rustc_error_codes/src/error_codes/E0367.md
new file mode 100644
index 000000000..cfebeada2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0367.md
@@ -0,0 +1,36 @@
+An attempt was made to implement `Drop` on a specialization of a generic type.
+
+Erroneous code example:
+
+```compile_fail,E0367
+trait Foo {}
+
+struct MyStruct<T> {
+ t: T
+}
+
+impl<T: Foo> Drop for MyStruct<T> {
+ fn drop(&mut self) {}
+}
+```
+
+This code is not legal: it is not possible to specialize `Drop` to a subset of
+implementations of a generic type. In order for this code to work, `MyStruct`
+must also require that `T` implements `Foo`. Alternatively, another option is
+to wrap the generic type in another that specializes appropriately:
+
+```
+trait Foo{}
+
+struct MyStruct<T> {
+ t: T
+}
+
+struct MyStructWrapper<T: Foo> {
+ t: MyStruct<T>
+}
+
+impl <T: Foo> Drop for MyStructWrapper<T> {
+ fn drop(&mut self) {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0368.md b/compiler/rustc_error_codes/src/error_codes/E0368.md
new file mode 100644
index 000000000..7b9d93348
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0368.md
@@ -0,0 +1,49 @@
+A binary assignment operator like `+=` or `^=` was applied to a type that
+doesn't support it.
+
+Erroneous code example:
+
+```compile_fail,E0368
+let mut x = 12f32; // error: binary operation `<<` cannot be applied to
+ // type `f32`
+
+x <<= 2;
+```
+
+To fix this error, please check that this type implements this binary
+operation. Example:
+
+```
+let mut x = 12u32; // the `u32` type does implement the `ShlAssign` trait
+
+x <<= 2; // ok!
+```
+
+It is also possible to overload most operators for your own type by
+implementing the `[OP]Assign` traits from `std::ops`.
+
+Another problem you might be facing is this: suppose you've overloaded the `+`
+operator for some type `Foo` by implementing the `std::ops::Add` trait for
+`Foo`, but you find that using `+=` does not work, as in this example:
+
+```compile_fail,E0368
+use std::ops::Add;
+
+struct Foo(u32);
+
+impl Add for Foo {
+ type Output = Foo;
+
+ fn add(self, rhs: Foo) -> Foo {
+ Foo(self.0 + rhs.0)
+ }
+}
+
+fn main() {
+ let mut x: Foo = Foo(5);
+ x += Foo(7); // error, `+= cannot be applied to the type `Foo`
+}
+```
+
+This is because `AddAssign` is not automatically implemented, so you need to
+manually implement it for your type.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0369.md b/compiler/rustc_error_codes/src/error_codes/E0369.md
new file mode 100644
index 000000000..ab0f4b408
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0369.md
@@ -0,0 +1,29 @@
+A binary operation was attempted on a type which doesn't support it.
+
+Erroneous code example:
+
+```compile_fail,E0369
+let x = 12f32; // error: binary operation `<<` cannot be applied to
+ // type `f32`
+
+x << 2;
+```
+
+To fix this error, please check that this type implements this binary
+operation. Example:
+
+```
+let x = 12u32; // the `u32` type does implement it:
+ // https://doc.rust-lang.org/stable/std/ops/trait.Shl.html
+
+x << 2; // ok!
+```
+
+It is also possible to overload most operators for your own type by
+implementing traits from `std::ops`.
+
+String concatenation appends the string on the right to the string on the
+left and may require reallocation. This requires ownership of the string
+on the left. If something should be added to a string literal, move the
+literal to the heap by allocating it with `to_owned()` like in
+`"Your text".to_owned()`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0370.md b/compiler/rustc_error_codes/src/error_codes/E0370.md
new file mode 100644
index 000000000..14e954722
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0370.md
@@ -0,0 +1,35 @@
+The maximum value of an enum was reached, so it cannot be automatically
+set in the next enum value.
+
+Erroneous code example:
+
+```compile_fail,E0370
+#[repr(i64)]
+enum Foo {
+ X = 0x7fffffffffffffff,
+ Y, // error: enum discriminant overflowed on value after
+ // 9223372036854775807: i64; set explicitly via
+ // Y = -9223372036854775808 if that is desired outcome
+}
+```
+
+To fix this, please set manually the next enum value or put the enum variant
+with the maximum value at the end of the enum. Examples:
+
+```
+#[repr(i64)]
+enum Foo {
+ X = 0x7fffffffffffffff,
+ Y = 0, // ok!
+}
+```
+
+Or:
+
+```
+#[repr(i64)]
+enum Foo {
+ Y = 0, // ok!
+ X = 0x7fffffffffffffff,
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0371.md b/compiler/rustc_error_codes/src/error_codes/E0371.md
new file mode 100644
index 000000000..a44721346
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0371.md
@@ -0,0 +1,19 @@
+A trait was implemented on another which already automatically implemented it.
+
+Erroneous code examples:
+
+```compile_fail,E0371
+trait Foo { fn foo(&self) { } }
+trait Bar: Foo { }
+trait Baz: Bar { }
+
+impl Bar for Baz { } // error, `Baz` implements `Bar` by definition
+impl Foo for Baz { } // error, `Baz` implements `Bar` which implements `Foo`
+impl Baz for Baz { } // error, `Baz` (trivially) implements `Baz`
+impl Baz for Bar { } // Note: This is OK
+```
+
+When `Trait2` is a subtrait of `Trait1` (for example, when `Trait2` has a
+definition like `trait Trait2: Trait1 { ... }`), it is not allowed to implement
+`Trait1` for `Trait2`. This is because `Trait2` already implements `Trait1` by
+definition, so it is not useful to do this.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0373.md b/compiler/rustc_error_codes/src/error_codes/E0373.md
new file mode 100644
index 000000000..effa597aa
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0373.md
@@ -0,0 +1,73 @@
+A captured variable in a closure may not live long enough.
+
+Erroneous code example:
+
+```compile_fail,E0373
+fn foo() -> Box<Fn(u32) -> u32> {
+ let x = 0u32;
+ Box::new(|y| x + y)
+}
+```
+
+This error occurs when an attempt is made to use data captured by a closure,
+when that data may no longer exist. It's most commonly seen when attempting to
+return a closure as shown in the previous code example.
+
+Notice that `x` is stack-allocated by `foo()`. By default, Rust captures
+closed-over data by reference. This means that once `foo()` returns, `x` no
+longer exists. An attempt to access `x` within the closure would thus be
+unsafe.
+
+Another situation where this might be encountered is when spawning threads:
+
+```compile_fail,E0373
+fn foo() {
+ let x = 0u32;
+ let y = 1u32;
+
+ let thr = std::thread::spawn(|| {
+ x + y
+ });
+}
+```
+
+Since our new thread runs in parallel, the stack frame containing `x` and `y`
+may well have disappeared by the time we try to use them. Even if we call
+`thr.join()` within foo (which blocks until `thr` has completed, ensuring the
+stack frame won't disappear), we will not succeed: the compiler cannot prove
+that this behavior is safe, and so won't let us do it.
+
+The solution to this problem is usually to switch to using a `move` closure.
+This approach moves (or copies, where possible) data into the closure, rather
+than taking references to it. For example:
+
+```
+fn foo() -> Box<Fn(u32) -> u32> {
+ let x = 0u32;
+ Box::new(move |y| x + y)
+}
+```
+
+Now that the closure has its own copy of the data, there's no need to worry
+about safety.
+
+This error may also be encountered while using `async` blocks:
+
+```compile_fail,E0373,edition2018
+use std::future::Future;
+
+async fn f() {
+ let v = vec![1, 2, 3i32];
+ spawn(async { //~ ERROR E0373
+ println!("{:?}", v)
+ });
+}
+
+fn spawn<F: Future + Send + 'static>(future: F) {
+ unimplemented!()
+}
+```
+
+Similarly to closures, `async` blocks are not executed immediately and may
+capture closed-over data by reference. For more information, see
+https://rust-lang.github.io/async-book/03_async_await/01_chapter.html.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0374.md b/compiler/rustc_error_codes/src/error_codes/E0374.md
new file mode 100644
index 000000000..6d7dc8882
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0374.md
@@ -0,0 +1,57 @@
+`CoerceUnsized` was implemented on a struct which does not contain a field with
+an unsized type.
+
+Example of erroneous code:
+
+```compile_fail,E0374
+#![feature(coerce_unsized)]
+use std::ops::CoerceUnsized;
+
+struct Foo<T: ?Sized> {
+ a: i32,
+}
+
+// error: Struct `Foo` has no unsized fields that need `CoerceUnsized`.
+impl<T, U> CoerceUnsized<Foo<U>> for Foo<T>
+ where T: CoerceUnsized<U> {}
+```
+
+An [unsized type][1] is any type where the compiler does not know the length or
+alignment of at compile time. Any struct containing an unsized type is also
+unsized.
+
+[1]: https://doc.rust-lang.org/book/ch19-04-advanced-types.html#dynamically-sized-types-and-the-sized-trait
+
+`CoerceUnsized` is used to coerce one struct containing an unsized type
+into another struct containing a different unsized type. If the struct
+doesn't have any fields of unsized types then you don't need explicit
+coercion to get the types you want. To fix this you can either
+not try to implement `CoerceUnsized` or you can add a field that is
+unsized to the struct.
+
+Example:
+
+```
+#![feature(coerce_unsized)]
+use std::ops::CoerceUnsized;
+
+// We don't need to impl `CoerceUnsized` here.
+struct Foo {
+ a: i32,
+}
+
+// We add the unsized type field to the struct.
+struct Bar<T: ?Sized> {
+ a: i32,
+ b: T,
+}
+
+// The struct has an unsized field so we can implement
+// `CoerceUnsized` for it.
+impl<T, U> CoerceUnsized<Bar<U>> for Bar<T>
+ where T: CoerceUnsized<U> {}
+```
+
+Note that `CoerceUnsized` is mainly used by smart pointers like `Box`, `Rc`
+and `Arc` to be able to mark that they can coerce unsized types that they
+are pointing at.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0375.md b/compiler/rustc_error_codes/src/error_codes/E0375.md
new file mode 100644
index 000000000..71e530571
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0375.md
@@ -0,0 +1,55 @@
+`CoerceUnsized` was implemented on a struct which contains more than one field
+with an unsized type.
+
+Erroneous code example:
+
+```compile_fail,E0375
+#![feature(coerce_unsized)]
+use std::ops::CoerceUnsized;
+
+struct Foo<T: ?Sized, U: ?Sized> {
+ a: i32,
+ b: T,
+ c: U,
+}
+
+// error: Struct `Foo` has more than one unsized field.
+impl<T, U> CoerceUnsized<Foo<U, T>> for Foo<T, U> {}
+```
+
+A struct with more than one field containing an unsized type cannot implement
+`CoerceUnsized`. This only occurs when you are trying to coerce one of the
+types in your struct to another type in the struct. In this case we try to
+impl `CoerceUnsized` from `T` to `U` which are both types that the struct
+takes. An [unsized type][1] is any type that the compiler doesn't know the
+length or alignment of at compile time. Any struct containing an unsized type
+is also unsized.
+
+`CoerceUnsized` only allows for coercion from a structure with a single
+unsized type field to another struct with a single unsized type field.
+In fact Rust only allows for a struct to have one unsized type in a struct
+and that unsized type must be the last field in the struct. So having two
+unsized types in a single struct is not allowed by the compiler. To fix this
+use only one field containing an unsized type in the struct and then use
+multiple structs to manage each unsized type field you need.
+
+Example:
+
+```
+#![feature(coerce_unsized)]
+use std::ops::CoerceUnsized;
+
+struct Foo<T: ?Sized> {
+ a: i32,
+ b: T,
+}
+
+impl <T, U> CoerceUnsized<Foo<U>> for Foo<T>
+ where T: CoerceUnsized<U> {}
+
+fn coerce_foo<T: CoerceUnsized<U>, U>(t: T) -> Foo<U> {
+ Foo { a: 12i32, b: t } // we use coercion to get the `Foo<U>` type we need
+}
+```
+
+[1]: https://doc.rust-lang.org/book/ch19-04-advanced-types.html#dynamically-sized-types-and-the-sized-trait
diff --git a/compiler/rustc_error_codes/src/error_codes/E0376.md b/compiler/rustc_error_codes/src/error_codes/E0376.md
new file mode 100644
index 000000000..50de15bd3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0376.md
@@ -0,0 +1,46 @@
+`CoerceUnsized` was implemented on something that isn't a struct.
+
+Erroneous code example:
+
+```compile_fail,E0376
+#![feature(coerce_unsized)]
+use std::ops::CoerceUnsized;
+
+struct Foo<T: ?Sized> {
+ a: T,
+}
+
+// error: The type `U` is not a struct
+impl<T, U> CoerceUnsized<U> for Foo<T> {}
+```
+
+`CoerceUnsized` can only be implemented for a struct. Unsized types are
+already able to be coerced without an implementation of `CoerceUnsized`
+whereas a struct containing an unsized type needs to know the unsized type
+field it's containing is able to be coerced. An [unsized type][1]
+is any type that the compiler doesn't know the length or alignment of at
+compile time. Any struct containing an unsized type is also unsized.
+
+[1]: https://doc.rust-lang.org/book/ch19-04-advanced-types.html#dynamically-sized-types-and-the-sized-trait
+
+The `CoerceUnsized` trait takes a struct type. Make sure the type you are
+providing to `CoerceUnsized` is a struct with only the last field containing an
+unsized type.
+
+Example:
+
+```
+#![feature(coerce_unsized)]
+use std::ops::CoerceUnsized;
+
+struct Foo<T> {
+ a: T,
+}
+
+// The `Foo<U>` is a struct so `CoerceUnsized` can be implemented
+impl<T, U> CoerceUnsized<Foo<U>> for Foo<T> where T: CoerceUnsized<U> {}
+```
+
+Note that in Rust, structs can only contain an unsized type if the field
+containing the unsized type is the last and only unsized type field in the
+struct.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0378.md b/compiler/rustc_error_codes/src/error_codes/E0378.md
new file mode 100644
index 000000000..c6fe997f3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0378.md
@@ -0,0 +1,59 @@
+The `DispatchFromDyn` trait was implemented on something which is not a pointer
+or a newtype wrapper around a pointer.
+
+Erroneous code example:
+
+```compile_fail,E0378
+#![feature(dispatch_from_dyn)]
+use std::ops::DispatchFromDyn;
+
+struct WrapperExtraField<T> {
+ ptr: T,
+ extra_stuff: i32,
+}
+
+impl<T, U> DispatchFromDyn<WrapperExtraField<U>> for WrapperExtraField<T>
+where
+ T: DispatchFromDyn<U>,
+{}
+```
+
+The `DispatchFromDyn` trait currently can only be implemented for
+builtin pointer types and structs that are newtype wrappers around them
+— that is, the struct must have only one field (except for`PhantomData`),
+and that field must itself implement `DispatchFromDyn`.
+
+```
+#![feature(dispatch_from_dyn, unsize)]
+use std::{
+ marker::Unsize,
+ ops::DispatchFromDyn,
+};
+
+struct Ptr<T: ?Sized>(*const T);
+
+impl<T: ?Sized, U: ?Sized> DispatchFromDyn<Ptr<U>> for Ptr<T>
+where
+ T: Unsize<U>,
+{}
+```
+
+Another example:
+
+```
+#![feature(dispatch_from_dyn)]
+use std::{
+ ops::DispatchFromDyn,
+ marker::PhantomData,
+};
+
+struct Wrapper<T> {
+ ptr: T,
+ _phantom: PhantomData<()>,
+}
+
+impl<T, U> DispatchFromDyn<Wrapper<U>> for Wrapper<T>
+where
+ T: DispatchFromDyn<U>,
+{}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0379.md b/compiler/rustc_error_codes/src/error_codes/E0379.md
new file mode 100644
index 000000000..ab438e414
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0379.md
@@ -0,0 +1,14 @@
+A trait method was declared const.
+
+Erroneous code example:
+
+```compile_fail,E0379
+trait Foo {
+ const fn bar() -> u32; // error!
+}
+```
+
+Trait methods cannot be declared `const` by design. For more information, see
+[RFC 911].
+
+[RFC 911]: https://github.com/rust-lang/rfcs/pull/911
diff --git a/compiler/rustc_error_codes/src/error_codes/E0380.md b/compiler/rustc_error_codes/src/error_codes/E0380.md
new file mode 100644
index 000000000..638f0c8ec
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0380.md
@@ -0,0 +1,14 @@
+An auto trait was declared with a method or an associated item.
+
+Erroneous code example:
+
+```compile_fail,E0380
+unsafe auto trait Trait {
+ type Output; // error!
+}
+```
+
+Auto traits cannot have methods or associated items. For more information see
+the [opt-in builtin traits RFC][RFC 19].
+
+[RFC 19]: https://github.com/rust-lang/rfcs/blob/master/text/0019-opt-in-builtin-traits.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0381.md b/compiler/rustc_error_codes/src/error_codes/E0381.md
new file mode 100644
index 000000000..976780099
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0381.md
@@ -0,0 +1,20 @@
+It is not allowed to use or capture an uninitialized variable.
+
+Erroneous code example:
+
+```compile_fail,E0381
+fn main() {
+ let x: i32;
+ let y = x; // error, use of possibly-uninitialized variable
+}
+```
+
+To fix this, ensure that any declared variables are initialized before being
+used. Example:
+
+```
+fn main() {
+ let x: i32 = 0;
+ let y = x; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0382.md b/compiler/rustc_error_codes/src/error_codes/E0382.md
new file mode 100644
index 000000000..d1408a062
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0382.md
@@ -0,0 +1,109 @@
+A variable was used after its contents have been moved elsewhere.
+
+Erroneous code example:
+
+```compile_fail,E0382
+struct MyStruct { s: u32 }
+
+fn main() {
+ let mut x = MyStruct{ s: 5u32 };
+ let y = x;
+ x.s = 6;
+ println!("{}", x.s);
+}
+```
+
+Since `MyStruct` is a type that is not marked `Copy`, the data gets moved out
+of `x` when we set `y`. This is fundamental to Rust's ownership system: outside
+of workarounds like `Rc`, a value cannot be owned by more than one variable.
+
+Sometimes we don't need to move the value. Using a reference, we can let another
+function borrow the value without changing its ownership. In the example below,
+we don't actually have to move our string to `calculate_length`, we can give it
+a reference to it with `&` instead.
+
+```
+fn main() {
+ let s1 = String::from("hello");
+
+ let len = calculate_length(&s1);
+
+ println!("The length of '{}' is {}.", s1, len);
+}
+
+fn calculate_length(s: &String) -> usize {
+ s.len()
+}
+```
+
+A mutable reference can be created with `&mut`.
+
+Sometimes we don't want a reference, but a duplicate. All types marked `Clone`
+can be duplicated by calling `.clone()`. Subsequent changes to a clone do not
+affect the original variable.
+
+Most types in the standard library are marked `Clone`. The example below
+demonstrates using `clone()` on a string. `s1` is first set to "many", and then
+copied to `s2`. Then the first character of `s1` is removed, without affecting
+`s2`. "any many" is printed to the console.
+
+```
+fn main() {
+ let mut s1 = String::from("many");
+ let s2 = s1.clone();
+ s1.remove(0);
+ println!("{} {}", s1, s2);
+}
+```
+
+If we control the definition of a type, we can implement `Clone` on it ourselves
+with `#[derive(Clone)]`.
+
+Some types have no ownership semantics at all and are trivial to duplicate. An
+example is `i32` and the other number types. We don't have to call `.clone()` to
+clone them, because they are marked `Copy` in addition to `Clone`. Implicit
+cloning is more convenient in this case. We can mark our own types `Copy` if
+all their members also are marked `Copy`.
+
+In the example below, we implement a `Point` type. Because it only stores two
+integers, we opt-out of ownership semantics with `Copy`. Then we can
+`let p2 = p1` without `p1` being moved.
+
+```
+#[derive(Copy, Clone)]
+struct Point { x: i32, y: i32 }
+
+fn main() {
+ let mut p1 = Point{ x: -1, y: 2 };
+ let p2 = p1;
+ p1.x = 1;
+ println!("p1: {}, {}", p1.x, p1.y);
+ println!("p2: {}, {}", p2.x, p2.y);
+}
+```
+
+Alternatively, if we don't control the struct's definition, or mutable shared
+ownership is truly required, we can use `Rc` and `RefCell`:
+
+```
+use std::cell::RefCell;
+use std::rc::Rc;
+
+struct MyStruct { s: u32 }
+
+fn main() {
+ let mut x = Rc::new(RefCell::new(MyStruct{ s: 5u32 }));
+ let y = x.clone();
+ x.borrow_mut().s = 6;
+ println!("{}", x.borrow().s);
+}
+```
+
+With this approach, x and y share ownership of the data via the `Rc` (reference
+count type). `RefCell` essentially performs runtime borrow checking: ensuring
+that at most one writer or multiple readers can access the data at any one time.
+
+If you wish to learn more about ownership in Rust, start with the
+[Understanding Ownership][understanding-ownership] chapter in the Book.
+
+[understanding-ownership]: https://doc.rust-lang.org/book/ch04-00-understanding-ownership.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0383.md b/compiler/rustc_error_codes/src/error_codes/E0383.md
new file mode 100644
index 000000000..fd2b0b08f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0383.md
@@ -0,0 +1,34 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error occurs when an attempt is made to partially reinitialize a
+structure that is currently uninitialized.
+
+For example, this can happen when a drop has taken place:
+
+```compile_fail
+struct Foo {
+ a: u32,
+}
+impl Drop for Foo {
+ fn drop(&mut self) { /* ... */ }
+}
+
+let mut x = Foo { a: 1 };
+drop(x); // `x` is now uninitialized
+x.a = 2; // error, partial reinitialization of uninitialized structure `t`
+```
+
+This error can be fixed by fully reinitializing the structure in question:
+
+```
+struct Foo {
+ a: u32,
+}
+impl Drop for Foo {
+ fn drop(&mut self) { /* ... */ }
+}
+
+let mut x = Foo { a: 1 };
+drop(x);
+x = Foo { a: 2 };
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0384.md b/compiler/rustc_error_codes/src/error_codes/E0384.md
new file mode 100644
index 000000000..e21fac079
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0384.md
@@ -0,0 +1,20 @@
+An immutable variable was reassigned.
+
+Erroneous code example:
+
+```compile_fail,E0384
+fn main() {
+ let x = 3;
+ x = 5; // error, reassignment of immutable variable
+}
+```
+
+By default, variables in Rust are immutable. To fix this error, add the keyword
+`mut` after the keyword `let` when declaring the variable. For example:
+
+```
+fn main() {
+ let mut x = 3;
+ x = 5;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0386.md b/compiler/rustc_error_codes/src/error_codes/E0386.md
new file mode 100644
index 000000000..de3b468b6
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0386.md
@@ -0,0 +1,31 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error occurs when an attempt is made to mutate the target of a mutable
+reference stored inside an immutable container.
+
+For example, this can happen when storing a `&mut` inside an immutable `Box`:
+
+```
+let mut x: i64 = 1;
+let y: Box<_> = Box::new(&mut x);
+**y = 2; // error, cannot assign to data in an immutable container
+```
+
+This error can be fixed by making the container mutable:
+
+```
+let mut x: i64 = 1;
+let mut y: Box<_> = Box::new(&mut x);
+**y = 2;
+```
+
+It can also be fixed by using a type with interior mutability, such as `Cell`
+or `RefCell`:
+
+```
+use std::cell::Cell;
+
+let x: i64 = 1;
+let y: Box<Cell<_>> = Box::new(Cell::new(x));
+y.set(2);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0387.md b/compiler/rustc_error_codes/src/error_codes/E0387.md
new file mode 100644
index 000000000..38ad19bd6
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0387.md
@@ -0,0 +1,57 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error occurs when an attempt is made to mutate or mutably reference data
+that a closure has captured immutably.
+
+Erroneous code example:
+
+```compile_fail
+// Accepts a function or a closure that captures its environment immutably.
+// Closures passed to foo will not be able to mutate their closed-over state.
+fn foo<F: Fn()>(f: F) { }
+
+// Attempts to mutate closed-over data. Error message reads:
+// `cannot assign to data in a captured outer variable...`
+fn mutable() {
+ let mut x = 0u32;
+ foo(|| x = 2);
+}
+
+// Attempts to take a mutable reference to closed-over data. Error message
+// reads: `cannot borrow data mutably in a captured outer variable...`
+fn mut_addr() {
+ let mut x = 0u32;
+ foo(|| { let y = &mut x; });
+}
+```
+
+The problem here is that foo is defined as accepting a parameter of type `Fn`.
+Closures passed into foo will thus be inferred to be of type `Fn`, meaning that
+they capture their context immutably.
+
+If the definition of `foo` is under your control, the simplest solution is to
+capture the data mutably. This can be done by defining `foo` to take FnMut
+rather than Fn:
+
+```
+fn foo<F: FnMut()>(f: F) { }
+```
+
+Alternatively, we can consider using the `Cell` and `RefCell` types to achieve
+interior mutability through a shared reference. Our example's `mutable`
+function could be redefined as below:
+
+```
+use std::cell::Cell;
+
+fn foo<F: Fn()>(f: F) { }
+
+fn mutable() {
+ let x = Cell::new(0u32);
+ foo(|| x.set(2));
+}
+```
+
+You can read more in the API documentation for [Cell][std-cell].
+
+[std-cell]: https://doc.rust-lang.org/std/cell/
diff --git a/compiler/rustc_error_codes/src/error_codes/E0388.md b/compiler/rustc_error_codes/src/error_codes/E0388.md
new file mode 100644
index 000000000..512fb42e6
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0388.md
@@ -0,0 +1 @@
+#### Note: this error code is no longer emitted by the compiler.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0389.md b/compiler/rustc_error_codes/src/error_codes/E0389.md
new file mode 100644
index 000000000..9f064e44c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0389.md
@@ -0,0 +1,63 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+An attempt was made to mutate data using a non-mutable reference. This
+commonly occurs when attempting to assign to a non-mutable reference of a
+mutable reference (`&(&mut T)`).
+
+Erroneous code example:
+
+```compile_fail
+struct FancyNum {
+ num: u8,
+}
+
+fn main() {
+ let mut fancy = FancyNum{ num: 5 };
+ let fancy_ref = &(&mut fancy);
+ fancy_ref.num = 6; // error: cannot assign to data in a `&` reference
+ println!("{}", fancy_ref.num);
+}
+```
+
+Here, `&mut fancy` is mutable, but `&(&mut fancy)` is not. Creating an
+immutable reference to a value borrows it immutably. There can be multiple
+references of type `&(&mut T)` that point to the same value, so they must be
+immutable to prevent multiple mutable references to the same value.
+
+To fix this, either remove the outer reference:
+
+```
+struct FancyNum {
+ num: u8,
+}
+
+fn main() {
+ let mut fancy = FancyNum{ num: 5 };
+
+ let fancy_ref = &mut fancy;
+ // `fancy_ref` is now &mut FancyNum, rather than &(&mut FancyNum)
+
+ fancy_ref.num = 6; // No error!
+
+ println!("{}", fancy_ref.num);
+}
+```
+
+Or make the outer reference mutable:
+
+```
+struct FancyNum {
+ num: u8
+}
+
+fn main() {
+ let mut fancy = FancyNum{ num: 5 };
+
+ let fancy_ref = &mut (&mut fancy);
+ // `fancy_ref` is now &mut(&mut FancyNum), rather than &(&mut FancyNum)
+
+ fancy_ref.num = 6; // No error!
+
+ println!("{}", fancy_ref.num);
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0390.md b/compiler/rustc_error_codes/src/error_codes/E0390.md
new file mode 100644
index 000000000..26a9dd331
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0390.md
@@ -0,0 +1,51 @@
+A method or constant was implemented on a primitive type.
+
+Erroneous code example:
+
+```compile_fail,E0390
+struct Foo {
+ x: i32
+}
+
+impl *mut Foo {}
+// error: cannot define inherent `impl` for primitive types
+```
+
+This isn't allowed, but using a trait to implement a method or constant
+is a good solution.
+Example:
+
+```
+struct Foo {
+ x: i32
+}
+
+trait Bar {
+ fn bar();
+}
+
+impl Bar for *mut Foo {
+ fn bar() {} // ok!
+}
+```
+
+Instead of defining an inherent implementation on a reference, you could also
+move the reference inside the implementation:
+
+```compile_fail,E0390
+struct Foo;
+
+impl &Foo { // error: no nominal type found for inherent implementation
+ fn bar(self, other: Self) {}
+}
+```
+
+becomes
+
+```
+struct Foo;
+
+impl Foo {
+ fn bar(&self, other: &Self) {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0391.md b/compiler/rustc_error_codes/src/error_codes/E0391.md
new file mode 100644
index 000000000..dff50ccaa
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0391.md
@@ -0,0 +1,16 @@
+A type dependency cycle has been encountered.
+
+Erroneous code example:
+
+```compile_fail,E0391
+trait FirstTrait : SecondTrait {
+
+}
+
+trait SecondTrait : FirstTrait {
+
+}
+```
+
+The previous example contains a circular dependency between two traits:
+`FirstTrait` depends on `SecondTrait` which itself depends on `FirstTrait`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0392.md b/compiler/rustc_error_codes/src/error_codes/E0392.md
new file mode 100644
index 000000000..f373d8945
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0392.md
@@ -0,0 +1,58 @@
+A type or lifetime parameter has been declared but is not actually used.
+
+Erroneous code example:
+
+```compile_fail,E0392
+enum Foo<T> {
+ Bar,
+}
+```
+
+If the type parameter was included by mistake, this error can be fixed
+by simply removing the type parameter, as shown below:
+
+```
+enum Foo {
+ Bar,
+}
+```
+
+Alternatively, if the type parameter was intentionally inserted, it must be
+used. A simple fix is shown below:
+
+```
+enum Foo<T> {
+ Bar(T),
+}
+```
+
+This error may also commonly be found when working with unsafe code. For
+example, when using raw pointers one may wish to specify the lifetime for
+which the pointed-at data is valid. An initial attempt (below) causes this
+error:
+
+```compile_fail,E0392
+struct Foo<'a, T> {
+ x: *const T,
+}
+```
+
+We want to express the constraint that Foo should not outlive `'a`, because
+the data pointed to by `T` is only valid for that lifetime. The problem is
+that there are no actual uses of `'a`. It's possible to work around this
+by adding a PhantomData type to the struct, using it to tell the compiler
+to act as if the struct contained a borrowed reference `&'a T`:
+
+```
+use std::marker::PhantomData;
+
+struct Foo<'a, T: 'a> {
+ x: *const T,
+ phantom: PhantomData<&'a T>
+}
+```
+
+[PhantomData] can also be used to express information about unused type
+parameters.
+
+[PhantomData]: https://doc.rust-lang.org/std/marker/struct.PhantomData.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0393.md b/compiler/rustc_error_codes/src/error_codes/E0393.md
new file mode 100644
index 000000000..3e853cf1b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0393.md
@@ -0,0 +1,29 @@
+A type parameter which references `Self` in its default value was not specified.
+
+Erroneous code example:
+
+```compile_fail,E0393
+trait A<T=Self> {}
+
+fn together_we_will_rule_the_galaxy(son: &A) {}
+// error: the type parameter `T` must be explicitly specified in an
+// object type because its default value `Self` references the
+// type `Self`
+```
+
+A trait object is defined over a single, fully-defined trait. With a regular
+default parameter, this parameter can just be substituted in. However, if the
+default parameter is `Self`, the trait changes for each concrete type; i.e.
+`i32` will be expected to implement `A<i32>`, `bool` will be expected to
+implement `A<bool>`, etc... These types will not share an implementation of a
+fully-defined trait; instead they share implementations of a trait with
+different parameters substituted in for each implementation. This is
+irreconcilable with what we need to make a trait object work, and is thus
+disallowed. Making the trait concrete by explicitly specifying the value of the
+defaulted parameter will fix this issue. Fixed example:
+
+```
+trait A<T=Self> {}
+
+fn together_we_will_rule_the_galaxy(son: &A<i32>) {} // Ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0398.md b/compiler/rustc_error_codes/src/error_codes/E0398.md
new file mode 100644
index 000000000..75d86979e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0398.md
@@ -0,0 +1,35 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+In Rust 1.3, the default object lifetime bounds are expected to change, as
+described in [RFC 1156]. You are getting a warning because the compiler
+thinks it is possible that this change will cause a compilation error in your
+code. It is possible, though unlikely, that this is a false alarm.
+
+The heart of the change is that where `&'a Box<SomeTrait>` used to default to
+`&'a Box<SomeTrait+'a>`, it now defaults to `&'a Box<SomeTrait+'static>` (here,
+`SomeTrait` is the name of some trait type). Note that the only types which are
+affected are references to boxes, like `&Box<SomeTrait>` or
+`&[Box<SomeTrait>]`. More common types like `&SomeTrait` or `Box<SomeTrait>`
+are unaffected.
+
+To silence this warning, edit your code to use an explicit bound. Most of the
+time, this means that you will want to change the signature of a function that
+you are calling. For example, if the error is reported on a call like `foo(x)`,
+and `foo` is defined as follows:
+
+```
+# trait SomeTrait {}
+fn foo(arg: &Box<SomeTrait>) { /* ... */ }
+```
+
+You might change it to:
+
+```
+# trait SomeTrait {}
+fn foo<'a>(arg: &'a Box<SomeTrait+'a>) { /* ... */ }
+```
+
+This explicitly states that you expect the trait object `SomeTrait` to contain
+references (with a maximum lifetime of `'a`).
+
+[RFC 1156]: https://github.com/rust-lang/rfcs/blob/master/text/1156-adjust-default-object-bounds.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0399.md b/compiler/rustc_error_codes/src/error_codes/E0399.md
new file mode 100644
index 000000000..6ea6054b4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0399.md
@@ -0,0 +1,37 @@
+#### Note: this error code is no longer emitted by the compiler
+
+You implemented a trait, overriding one or more of its associated types but did
+not reimplement its default methods.
+
+Example of erroneous code:
+
+```
+#![feature(associated_type_defaults)]
+
+pub trait Foo {
+ type Assoc = u8;
+ fn bar(&self) {}
+}
+
+impl Foo for i32 {
+ // error - the following trait items need to be reimplemented as
+ // `Assoc` was overridden: `bar`
+ type Assoc = i32;
+}
+```
+
+To fix this, add an implementation for each default method from the trait:
+
+```
+#![feature(associated_type_defaults)]
+
+pub trait Foo {
+ type Assoc = u8;
+ fn bar(&self) {}
+}
+
+impl Foo for i32 {
+ type Assoc = i32;
+ fn bar(&self) {} // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0401.md b/compiler/rustc_error_codes/src/error_codes/E0401.md
new file mode 100644
index 000000000..4c93053d5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0401.md
@@ -0,0 +1,106 @@
+Inner items do not inherit type or const parameters from the functions
+they are embedded in.
+
+Erroneous code example:
+
+```compile_fail,E0401
+fn foo<T>(x: T) {
+ fn bar(y: T) { // T is defined in the "outer" function
+ // ..
+ }
+ bar(x);
+}
+```
+
+Nor will this:
+
+```compile_fail,E0401
+fn foo<T>(x: T) {
+ type MaybeT = Option<T>;
+ // ...
+}
+```
+
+Or this:
+
+```compile_fail,E0401
+fn foo<T>(x: T) {
+ struct Foo {
+ x: T,
+ }
+ // ...
+}
+```
+
+Items inside functions are basically just like top-level items, except
+that they can only be used from the function they are in.
+
+There are a couple of solutions for this.
+
+If the item is a function, you may use a closure:
+
+```
+fn foo<T>(x: T) {
+ let bar = |y: T| { // explicit type annotation may not be necessary
+ // ..
+ };
+ bar(x);
+}
+```
+
+For a generic item, you can copy over the parameters:
+
+```
+fn foo<T>(x: T) {
+ fn bar<T>(y: T) {
+ // ..
+ }
+ bar(x);
+}
+```
+
+```
+fn foo<T>(x: T) {
+ type MaybeT<T> = Option<T>;
+}
+```
+
+Be sure to copy over any bounds as well:
+
+```
+fn foo<T: Copy>(x: T) {
+ fn bar<T: Copy>(y: T) {
+ // ..
+ }
+ bar(x);
+}
+```
+
+```
+fn foo<T: Copy>(x: T) {
+ struct Foo<T: Copy> {
+ x: T,
+ }
+}
+```
+
+This may require additional type hints in the function body.
+
+In case the item is a function inside an `impl`, defining a private helper
+function might be easier:
+
+```
+# struct Foo<T>(T);
+impl<T> Foo<T> {
+ pub fn foo(&self, x: T) {
+ self.bar(x);
+ }
+
+ fn bar(&self, y: T) {
+ // ..
+ }
+}
+```
+
+For default impls in traits, the private helper solution won't work, however
+closures or copying the parameters should still work.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0403.md b/compiler/rustc_error_codes/src/error_codes/E0403.md
new file mode 100644
index 000000000..5f4a40ce9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0403.md
@@ -0,0 +1,25 @@
+Some type parameters have the same name.
+
+Erroneous code example:
+
+```compile_fail,E0403
+fn f<T, T>(s: T, u: T) {} // error: the name `T` is already used for a generic
+ // parameter in this item's generic parameters
+```
+
+Please verify that none of the type parameters are misspelled, and rename any
+clashing parameters. Example:
+
+```
+fn f<T, Y>(s: T, u: Y) {} // ok!
+```
+
+Type parameters in an associated item also cannot shadow parameters from the
+containing item:
+
+```compile_fail,E0403
+trait Foo<T> {
+ fn do_something(&self) -> T;
+ fn do_something_else<T: Clone>(&self, bar: T);
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0404.md b/compiler/rustc_error_codes/src/error_codes/E0404.md
new file mode 100644
index 000000000..d6fa51e61
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0404.md
@@ -0,0 +1,57 @@
+A type that is not a trait was used in a trait position, such as a bound
+or `impl`.
+
+Erroneous code example:
+
+```compile_fail,E0404
+struct Foo;
+struct Bar;
+
+impl Foo for Bar {} // error: `Foo` is not a trait
+fn baz<T: Foo>(t: T) {} // error: `Foo` is not a trait
+```
+
+Another erroneous code example:
+
+```compile_fail,E0404
+type Foo = Iterator<Item=String>;
+
+fn bar<T: Foo>(t: T) {} // error: `Foo` is a type alias
+```
+
+Please verify that the trait's name was not misspelled or that the right
+identifier was used. Example:
+
+```
+trait Foo {
+ // some functions
+}
+struct Bar;
+
+impl Foo for Bar { // ok!
+ // functions implementation
+}
+
+fn baz<T: Foo>(t: T) {} // ok!
+```
+
+Alternatively, you could introduce a new trait with your desired restrictions
+as a super trait:
+
+```
+# trait Foo {}
+# struct Bar;
+# impl Foo for Bar {}
+trait Qux: Foo {} // Anything that implements Qux also needs to implement Foo
+fn baz<T: Qux>(t: T) {} // also ok!
+```
+
+Finally, if you are on nightly and want to use a trait alias
+instead of a type alias, you should use `#![feature(trait_alias)]`:
+
+```
+#![feature(trait_alias)]
+trait Foo = Iterator<Item=String>;
+
+fn bar<T: Foo>(t: T) {} // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0405.md b/compiler/rustc_error_codes/src/error_codes/E0405.md
new file mode 100644
index 000000000..ff1e8c0be
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0405.md
@@ -0,0 +1,29 @@
+The code refers to a trait that is not in scope.
+
+Erroneous code example:
+
+```compile_fail,E0405
+struct Foo;
+
+impl SomeTrait for Foo {} // error: trait `SomeTrait` is not in scope
+```
+
+Please verify that the name of the trait wasn't misspelled and ensure that it
+was imported. Example:
+
+```
+# #[cfg(for_demonstration_only)]
+// solution 1:
+use some_file::SomeTrait;
+
+// solution 2:
+trait SomeTrait {
+ // some functions
+}
+
+struct Foo;
+
+impl SomeTrait for Foo { // ok!
+ // implements functions
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0407.md b/compiler/rustc_error_codes/src/error_codes/E0407.md
new file mode 100644
index 000000000..fa26c77a1
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0407.md
@@ -0,0 +1,52 @@
+A definition of a method not in the implemented trait was given in a trait
+implementation.
+
+Erroneous code example:
+
+```compile_fail,E0407
+trait Foo {
+ fn a();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn a() {}
+ fn b() {} // error: method `b` is not a member of trait `Foo`
+}
+```
+
+Please verify you didn't misspell the method name and you used the correct
+trait. First example:
+
+```
+trait Foo {
+ fn a();
+ fn b();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn a() {}
+ fn b() {} // ok!
+}
+```
+
+Second example:
+
+```
+trait Foo {
+ fn a();
+}
+
+struct Bar;
+
+impl Foo for Bar {
+ fn a() {}
+}
+
+impl Bar {
+ fn b() {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0408.md b/compiler/rustc_error_codes/src/error_codes/E0408.md
new file mode 100644
index 000000000..3acdbb740
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0408.md
@@ -0,0 +1,41 @@
+An "or" pattern was used where the variable bindings are not consistently bound
+across patterns.
+
+Erroneous code example:
+
+```compile_fail,E0408
+match x {
+ Some(y) | None => { /* use y */ } // error: variable `y` from pattern #1 is
+ // not bound in pattern #2
+ _ => ()
+}
+```
+
+Here, `y` is bound to the contents of the `Some` and can be used within the
+block corresponding to the match arm. However, in case `x` is `None`, we have
+not specified what `y` is, and the block will use a nonexistent variable.
+
+To fix this error, either split into multiple match arms:
+
+```
+let x = Some(1);
+match x {
+ Some(y) => { /* use y */ }
+ None => { /* ... */ }
+}
+```
+
+or, bind the variable to a field of the same type in all sub-patterns of the
+or pattern:
+
+```
+let x = (0, 2);
+match x {
+ (0, y) | (y, 0) => { /* use y */}
+ _ => {}
+}
+```
+
+In this example, if `x` matches the pattern `(0, _)`, the second field is set
+to `y`. If it matches `(_, 0)`, the first field is set to `y`; so in all
+cases `y` is set to some value.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0409.md b/compiler/rustc_error_codes/src/error_codes/E0409.md
new file mode 100644
index 000000000..53eb0fd05
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0409.md
@@ -0,0 +1,38 @@
+An "or" pattern was used where the variable bindings are not consistently bound
+across patterns.
+
+Erroneous code example:
+
+```compile_fail,E0409
+let x = (0, 2);
+match x {
+ (0, ref y) | (y, 0) => { /* use y */} // error: variable `y` is bound with
+ // different mode in pattern #2
+ // than in pattern #1
+ _ => ()
+}
+```
+
+Here, `y` is bound by-value in one case and by-reference in the other.
+
+To fix this error, just use the same mode in both cases.
+Generally using `ref` or `ref mut` where not already used will fix this:
+
+```
+let x = (0, 2);
+match x {
+ (0, ref y) | (ref y, 0) => { /* use y */}
+ _ => ()
+}
+```
+
+Alternatively, split the pattern:
+
+```
+let x = (0, 2);
+match x {
+ (y, 0) => { /* use y */ }
+ (0, ref y) => { /* use y */}
+ _ => ()
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0411.md b/compiler/rustc_error_codes/src/error_codes/E0411.md
new file mode 100644
index 000000000..d82171533
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0411.md
@@ -0,0 +1,56 @@
+The `Self` keyword was used outside an impl, trait, or type definition.
+
+Erroneous code example:
+
+```compile_fail,E0411
+<Self>::foo; // error: use of `Self` outside of an impl, trait, or type
+ // definition
+```
+
+The `Self` keyword represents the current type, which explains why it can only
+be used inside an impl, trait, or type definition. It gives access to the
+associated items of a type:
+
+```
+trait Foo {
+ type Bar;
+}
+
+trait Baz : Foo {
+ fn bar() -> Self::Bar; // like this
+}
+```
+
+However, be careful when two types have a common associated type:
+
+```compile_fail
+trait Foo {
+ type Bar;
+}
+
+trait Foo2 {
+ type Bar;
+}
+
+trait Baz : Foo + Foo2 {
+ fn bar() -> Self::Bar;
+ // error: ambiguous associated type `Bar` in bounds of `Self`
+}
+```
+
+This problem can be solved by specifying from which trait we want to use the
+`Bar` type:
+
+```
+trait Foo {
+ type Bar;
+}
+
+trait Foo2 {
+ type Bar;
+}
+
+trait Baz : Foo + Foo2 {
+ fn bar() -> <Self as Foo>::Bar; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0412.md b/compiler/rustc_error_codes/src/error_codes/E0412.md
new file mode 100644
index 000000000..d9ebc852b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0412.md
@@ -0,0 +1,64 @@
+A used type name is not in scope.
+
+Erroneous code examples:
+
+```compile_fail,E0412
+impl Something {} // error: type name `Something` is not in scope
+
+// or:
+
+trait Foo {
+ fn bar(N); // error: type name `N` is not in scope
+}
+
+// or:
+
+fn foo(x: T) {} // type name `T` is not in scope
+```
+
+To fix this error, please verify you didn't misspell the type name, you did
+declare it or imported it into the scope. Examples:
+
+```
+struct Something;
+
+impl Something {} // ok!
+
+// or:
+
+trait Foo {
+ type N;
+
+ fn bar(_: Self::N); // ok!
+}
+
+// or:
+
+fn foo<T>(x: T) {} // ok!
+```
+
+Another case that causes this error is when a type is imported into a parent
+module. To fix this, you can follow the suggestion and use File directly or
+`use super::File;` which will import the types from the parent namespace. An
+example that causes this error is below:
+
+```compile_fail,E0412
+use std::fs::File;
+
+mod foo {
+ fn some_function(f: File) {}
+}
+```
+
+```
+use std::fs::File;
+
+mod foo {
+ // either
+ use super::File;
+ // or
+ // use std::fs::File;
+ fn foo(f: File) {}
+}
+# fn main() {} // don't insert it for us; that'll break imports
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0415.md b/compiler/rustc_error_codes/src/error_codes/E0415.md
new file mode 100644
index 000000000..97d733a7b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0415.md
@@ -0,0 +1,14 @@
+More than one function parameter have the same name.
+
+Erroneous code example:
+
+```compile_fail,E0415
+fn foo(f: i32, f: i32) {} // error: identifier `f` is bound more than
+ // once in this parameter list
+```
+
+Please verify you didn't misspell parameters' name. Example:
+
+```
+fn foo(f: i32, g: i32) {} // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0416.md b/compiler/rustc_error_codes/src/error_codes/E0416.md
new file mode 100644
index 000000000..7bc316daf
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0416.md
@@ -0,0 +1,28 @@
+An identifier is bound more than once in a pattern.
+
+Erroneous code example:
+
+```compile_fail,E0416
+match (1, 2) {
+ (x, x) => {} // error: identifier `x` is bound more than once in the
+ // same pattern
+}
+```
+
+Please verify you didn't misspell identifiers' name. Example:
+
+```
+match (1, 2) {
+ (x, y) => {} // ok!
+}
+```
+
+Or maybe did you mean to unify? Consider using a guard:
+
+```
+# let (A, B, C) = (1, 2, 3);
+match (A, B, C) {
+ (x, x2, see) if x == x2 => { /* A and B are equal, do one thing */ }
+ (y, z, see) => { /* A and B unequal; do another thing */ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0422.md b/compiler/rustc_error_codes/src/error_codes/E0422.md
new file mode 100644
index 000000000..828a52e73
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0422.md
@@ -0,0 +1,22 @@
+An identifier that is neither defined nor a struct was used.
+
+Erroneous code example:
+
+```compile_fail,E0422
+fn main () {
+ let x = Foo { x: 1, y: 2 };
+}
+```
+
+In this case, `Foo` is undefined, so it inherently isn't anything, and
+definitely not a struct.
+
+```compile_fail
+fn main () {
+ let foo = 1;
+ let x = foo { x: 1, y: 2 };
+}
+```
+
+In this case, `foo` is defined, but is not a struct, so Rust can't use it as
+one.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0423.md b/compiler/rustc_error_codes/src/error_codes/E0423.md
new file mode 100644
index 000000000..a98ada17a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0423.md
@@ -0,0 +1,46 @@
+An identifier was used like a function name or a value was expected and the
+identifier exists but it belongs to a different namespace.
+
+Erroneous code example:
+
+```compile_fail,E0423
+struct Foo { a: bool };
+
+let f = Foo();
+// error: expected function, tuple struct or tuple variant, found `Foo`
+// `Foo` is a struct name, but this expression uses it like a function name
+```
+
+Please verify you didn't misspell the name of what you actually wanted to use
+here. Example:
+
+```
+fn Foo() -> u32 { 0 }
+
+let f = Foo(); // ok!
+```
+
+It is common to forget the trailing `!` on macro invocations, which would also
+yield this error:
+
+```compile_fail,E0423
+println("");
+// error: expected function, tuple struct or tuple variant,
+// found macro `println`
+// did you mean `println!(...)`? (notice the trailing `!`)
+```
+
+Another case where this error is emitted is when a value is expected, but
+something else is found:
+
+```compile_fail,E0423
+pub mod a {
+ pub const I: i32 = 1;
+}
+
+fn h1() -> i32 {
+ a.I
+ //~^ ERROR expected value, found module `a`
+ // did you mean `a::I`?
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0424.md b/compiler/rustc_error_codes/src/error_codes/E0424.md
new file mode 100644
index 000000000..a58c16b59
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0424.md
@@ -0,0 +1,40 @@
+The `self` keyword was used inside of an associated function without a "`self`
+receiver" parameter.
+
+Erroneous code example:
+
+```compile_fail,E0424
+struct Foo;
+
+impl Foo {
+ // `bar` is a method, because it has a receiver parameter.
+ fn bar(&self) {}
+
+ // `foo` is not a method, because it has no receiver parameter.
+ fn foo() {
+ self.bar(); // error: `self` value is a keyword only available in
+ // methods with a `self` parameter
+ }
+}
+```
+
+The `self` keyword can only be used inside methods, which are associated
+functions (functions defined inside of a `trait` or `impl` block) that have a
+`self` receiver as its first parameter, like `self`, `&self`, `&mut self` or
+`self: &mut Pin<Self>` (this last one is an example of an ["arbitrary `self`
+type"](https://github.com/rust-lang/rust/issues/44874)).
+
+Check if the associated function's parameter list should have contained a `self`
+receiver for it to be a method, and add it if so. Example:
+
+```
+struct Foo;
+
+impl Foo {
+ fn bar(&self) {}
+
+ fn foo(self) { // `foo` is now a method.
+ self.bar(); // ok!
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0425.md b/compiler/rustc_error_codes/src/error_codes/E0425.md
new file mode 100644
index 000000000..13e71b850
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0425.md
@@ -0,0 +1,60 @@
+An unresolved name was used.
+
+Erroneous code examples:
+
+```compile_fail,E0425
+something_that_doesnt_exist::foo;
+// error: unresolved name `something_that_doesnt_exist::foo`
+
+// or:
+
+trait Foo {
+ fn bar() {
+ Self; // error: unresolved name `Self`
+ }
+}
+
+// or:
+
+let x = unknown_variable; // error: unresolved name `unknown_variable`
+```
+
+Please verify that the name wasn't misspelled and ensure that the
+identifier being referred to is valid for the given situation. Example:
+
+```
+enum something_that_does_exist {
+ Foo,
+}
+```
+
+Or:
+
+```
+mod something_that_does_exist {
+ pub static foo : i32 = 0i32;
+}
+
+something_that_does_exist::foo; // ok!
+```
+
+Or:
+
+```
+let unknown_variable = 12u32;
+let x = unknown_variable; // ok!
+```
+
+If the item is not defined in the current module, it must be imported using a
+`use` statement, like so:
+
+```
+# mod foo { pub fn bar() {} }
+# fn main() {
+use foo::bar;
+bar();
+# }
+```
+
+If the item you are importing is not defined in some super-module of the
+current module, then it must also be declared as public (e.g., `pub fn`).
diff --git a/compiler/rustc_error_codes/src/error_codes/E0426.md b/compiler/rustc_error_codes/src/error_codes/E0426.md
new file mode 100644
index 000000000..275a83e60
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0426.md
@@ -0,0 +1,17 @@
+An undeclared label was used.
+
+Erroneous code example:
+
+```compile_fail,E0426
+loop {
+ break 'a; // error: use of undeclared label `'a`
+}
+```
+
+Please verify you spelled or declared the label correctly. Example:
+
+```
+'a: loop {
+ break 'a; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0428.md b/compiler/rustc_error_codes/src/error_codes/E0428.md
new file mode 100644
index 000000000..38db0b4bc
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0428.md
@@ -0,0 +1,16 @@
+A type or module has been defined more than once.
+
+Erroneous code example:
+
+```compile_fail,E0428
+struct Bar;
+struct Bar; // error: duplicate definition of value `Bar`
+```
+
+Please verify you didn't misspell the type/module's name or remove/rename the
+duplicated one. Example:
+
+```
+struct Bar;
+struct Bar2; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0429.md b/compiler/rustc_error_codes/src/error_codes/E0429.md
new file mode 100644
index 000000000..8c5fd8624
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0429.md
@@ -0,0 +1,21 @@
+The `self` keyword cannot appear alone as the last segment in a `use`
+declaration.
+
+Erroneous code example:
+
+```compile_fail,E0429
+use std::fmt::self; // error: `self` imports are only allowed within a { } list
+```
+
+To use a namespace itself in addition to some of its members, `self` may appear
+as part of a brace-enclosed list of imports:
+
+```
+use std::fmt::{self, Debug};
+```
+
+If you only want to import the namespace, do so directly:
+
+```
+use std::fmt;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0430.md b/compiler/rustc_error_codes/src/error_codes/E0430.md
new file mode 100644
index 000000000..8cca0f21e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0430.md
@@ -0,0 +1,18 @@
+The `self` import appears more than once in the list.
+
+Erroneous code example:
+
+```compile_fail,E0430
+use something::{self, self}; // error: `self` import can only appear once in
+ // the list
+```
+
+Please verify you didn't misspell the import name or remove the duplicated
+`self` import. Example:
+
+```
+# mod something {}
+# fn main() {
+use something::{self}; // ok!
+# }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0431.md b/compiler/rustc_error_codes/src/error_codes/E0431.md
new file mode 100644
index 000000000..1b70f5f1d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0431.md
@@ -0,0 +1,11 @@
+An invalid `self` import was made.
+
+Erroneous code example:
+
+```compile_fail,E0431
+use {self}; // error: `self` import can only appear in an import list with a
+ // non-empty prefix
+```
+
+You cannot import the current module into itself, please remove this import
+or verify you didn't misspell it.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0432.md b/compiler/rustc_error_codes/src/error_codes/E0432.md
new file mode 100644
index 000000000..2920e2623
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0432.md
@@ -0,0 +1,47 @@
+An import was unresolved.
+
+Erroneous code example:
+
+```compile_fail,E0432
+use something::Foo; // error: unresolved import `something::Foo`.
+```
+
+In Rust 2015, paths in `use` statements are relative to the crate root. To
+import items relative to the current and parent modules, use the `self::` and
+`super::` prefixes, respectively.
+
+In Rust 2018 or later, paths in `use` statements are relative to the current
+module unless they begin with the name of a crate or a literal `crate::`, in
+which case they start from the crate root. As in Rust 2015 code, the `self::`
+and `super::` prefixes refer to the current and parent modules respectively.
+
+Also verify that you didn't misspell the import name and that the import exists
+in the module from where you tried to import it. Example:
+
+```
+use self::something::Foo; // Ok.
+
+mod something {
+ pub struct Foo;
+}
+# fn main() {}
+```
+
+If you tried to use a module from an external crate and are using Rust 2015,
+you may have missed the `extern crate` declaration (which is usually placed in
+the crate root):
+
+```edition2015
+extern crate core; // Required to use the `core` crate in Rust 2015.
+
+use core::any;
+# fn main() {}
+```
+
+Since Rust 2018 the `extern crate` declaration is not required and
+you can instead just `use` it:
+
+```edition2018
+use core::any; // No extern crate required in Rust 2018.
+# fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0433.md b/compiler/rustc_error_codes/src/error_codes/E0433.md
new file mode 100644
index 000000000..5a64c13c9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0433.md
@@ -0,0 +1,27 @@
+An undeclared crate, module, or type was used.
+
+Erroneous code example:
+
+```compile_fail,E0433
+let map = HashMap::new();
+// error: failed to resolve: use of undeclared type `HashMap`
+```
+
+Please verify you didn't misspell the type/module's name or that you didn't
+forget to import it:
+
+```
+use std::collections::HashMap; // HashMap has been imported.
+let map: HashMap<u32, u32> = HashMap::new(); // So it can be used!
+```
+
+If you've expected to use a crate name:
+
+```compile_fail
+use ferris_wheel::BigO;
+// error: failed to resolve: use of undeclared crate or module `ferris_wheel`
+```
+
+Make sure the crate has been added as a dependency in `Cargo.toml`.
+
+To use a module from your current crate, add the `crate::` prefix to the path.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0434.md b/compiler/rustc_error_codes/src/error_codes/E0434.md
new file mode 100644
index 000000000..8fd60412b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0434.md
@@ -0,0 +1,40 @@
+A variable used inside an inner function comes from a dynamic environment.
+
+Erroneous code example:
+
+```compile_fail,E0434
+fn foo() {
+ let y = 5;
+ fn bar() -> u32 {
+ y // error: can't capture dynamic environment in a fn item; use the
+ // || { ... } closure form instead.
+ }
+}
+```
+
+Inner functions do not have access to their containing environment. To fix this
+error, you can replace the function with a closure:
+
+```
+fn foo() {
+ let y = 5;
+ let bar = || {
+ y
+ };
+}
+```
+
+Or replace the captured variable with a constant or a static item:
+
+```
+fn foo() {
+ static mut X: u32 = 4;
+ const Y: u32 = 5;
+ fn bar() -> u32 {
+ unsafe {
+ X = 3;
+ }
+ Y
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0435.md b/compiler/rustc_error_codes/src/error_codes/E0435.md
new file mode 100644
index 000000000..798a20d48
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0435.md
@@ -0,0 +1,27 @@
+A non-constant value was used in a constant expression.
+
+Erroneous code example:
+
+```compile_fail,E0435
+let foo = 42;
+let a: [u8; foo]; // error: attempt to use a non-constant value in a constant
+```
+
+'constant' means 'a compile-time value'.
+
+More details can be found in the [Variables and Mutability] section of the book.
+
+[Variables and Mutability]: https://doc.rust-lang.org/book/ch03-01-variables-and-mutability.html#differences-between-variables-and-constants
+
+To fix this error, please replace the value with a constant. Example:
+
+```
+let a: [u8; 42]; // ok!
+```
+
+Or:
+
+```
+const FOO: usize = 42;
+let a: [u8; FOO]; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0436.md b/compiler/rustc_error_codes/src/error_codes/E0436.md
new file mode 100644
index 000000000..48ecc49e9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0436.md
@@ -0,0 +1,48 @@
+The functional record update syntax was used on something other than a struct.
+
+Erroneous code example:
+
+```compile_fail,E0436
+enum PublicationFrequency {
+ Weekly,
+ SemiMonthly { days: (u8, u8), annual_special: bool },
+}
+
+fn one_up_competitor(competitor_frequency: PublicationFrequency)
+ -> PublicationFrequency {
+ match competitor_frequency {
+ PublicationFrequency::Weekly => PublicationFrequency::SemiMonthly {
+ days: (1, 15), annual_special: false
+ },
+ c @ PublicationFrequency::SemiMonthly{ .. } =>
+ PublicationFrequency::SemiMonthly {
+ annual_special: true, ..c // error: functional record update
+ // syntax requires a struct
+ }
+ }
+}
+```
+
+The functional record update syntax is only allowed for structs (struct-like
+enum variants don't qualify, for example). To fix the previous code, rewrite the
+expression without functional record update syntax:
+
+```
+enum PublicationFrequency {
+ Weekly,
+ SemiMonthly { days: (u8, u8), annual_special: bool },
+}
+
+fn one_up_competitor(competitor_frequency: PublicationFrequency)
+ -> PublicationFrequency {
+ match competitor_frequency {
+ PublicationFrequency::Weekly => PublicationFrequency::SemiMonthly {
+ days: (1, 15), annual_special: false
+ },
+ PublicationFrequency::SemiMonthly{ days, .. } =>
+ PublicationFrequency::SemiMonthly {
+ days, annual_special: true // ok!
+ }
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0437.md b/compiler/rustc_error_codes/src/error_codes/E0437.md
new file mode 100644
index 000000000..0f924ba69
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0437.md
@@ -0,0 +1,23 @@
+An associated type whose name does not match any of the associated types
+in the trait was used when implementing the trait.
+
+Erroneous code example:
+
+```compile_fail,E0437
+trait Foo {}
+
+impl Foo for i32 {
+ type Bar = bool;
+}
+```
+
+Trait implementations can only implement associated types that are members of
+the trait in question.
+
+The solution to this problem is to remove the extraneous associated type:
+
+```
+trait Foo {}
+
+impl Foo for i32 {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0438.md b/compiler/rustc_error_codes/src/error_codes/E0438.md
new file mode 100644
index 000000000..13723bc30
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0438.md
@@ -0,0 +1,23 @@
+An associated constant whose name does not match any of the associated constants
+in the trait was used when implementing the trait.
+
+Erroneous code example:
+
+```compile_fail,E0438
+trait Foo {}
+
+impl Foo for i32 {
+ const BAR: bool = true;
+}
+```
+
+Trait implementations can only implement associated constants that are
+members of the trait in question.
+
+The solution to this problem is to remove the extraneous associated constant:
+
+```
+trait Foo {}
+
+impl Foo for i32 {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0439.md b/compiler/rustc_error_codes/src/error_codes/E0439.md
new file mode 100644
index 000000000..24268aef2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0439.md
@@ -0,0 +1,25 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+The length of the platform-intrinsic function `simd_shuffle` wasn't specified.
+
+Erroneous code example:
+
+```ignore (no longer emitted)
+#![feature(platform_intrinsics)]
+
+extern "platform-intrinsic" {
+ fn simd_shuffle<A,B>(a: A, b: A, c: [u32; 8]) -> B;
+ // error: invalid `simd_shuffle`, needs length: `simd_shuffle`
+}
+```
+
+The `simd_shuffle` function needs the length of the array passed as
+last parameter in its name. Example:
+
+```
+#![feature(platform_intrinsics)]
+
+extern "platform-intrinsic" {
+ fn simd_shuffle8<A,B>(a: A, b: A, c: [u32; 8]) -> B;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0445.md b/compiler/rustc_error_codes/src/error_codes/E0445.md
new file mode 100644
index 000000000..e6a28a9c2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0445.md
@@ -0,0 +1,33 @@
+A private trait was used on a public type parameter bound.
+
+Erroneous code examples:
+
+```compile_fail,E0445
+#![deny(private_in_public)]
+
+trait Foo {
+ fn dummy(&self) { }
+}
+
+pub trait Bar : Foo {} // error: private trait in public interface
+pub struct Bar2<T: Foo>(pub T); // same error
+pub fn foo<T: Foo> (t: T) {} // same error
+
+fn main() {}
+```
+
+To solve this error, please ensure that the trait is also public. The trait
+can be made inaccessible if necessary by placing it into a private inner
+module, but it still has to be marked with `pub`. Example:
+
+```
+pub trait Foo { // we set the Foo trait public
+ fn dummy(&self) { }
+}
+
+pub trait Bar : Foo {} // ok!
+pub struct Bar2<T: Foo>(pub T); // ok!
+pub fn foo<T: Foo> (t: T) {} // ok!
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0446.md b/compiler/rustc_error_codes/src/error_codes/E0446.md
new file mode 100644
index 000000000..6ec47c496
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0446.md
@@ -0,0 +1,50 @@
+A private type was used in a public type signature.
+
+Erroneous code example:
+
+```compile_fail,E0446
+#![deny(private_in_public)]
+struct Bar(u32);
+
+mod foo {
+ use crate::Bar;
+ pub fn bar() -> Bar { // error: private type in public interface
+ Bar(0)
+ }
+}
+
+fn main() {}
+```
+
+There are two ways to solve this error. The first is to make the public type
+signature only public to a module that also has access to the private type.
+This is done by using pub(crate) or pub(in crate::my_mod::etc)
+Example:
+
+```
+struct Bar(u32);
+
+mod foo {
+ use crate::Bar;
+ pub(crate) fn bar() -> Bar { // only public to crate root
+ Bar(0)
+ }
+}
+
+fn main() {}
+```
+
+The other way to solve this error is to make the private type public.
+Example:
+
+```
+pub struct Bar(u32); // we set the Bar type public
+mod foo {
+ use crate::Bar;
+ pub fn bar() -> Bar { // ok!
+ Bar(0)
+ }
+}
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0447.md b/compiler/rustc_error_codes/src/error_codes/E0447.md
new file mode 100644
index 000000000..af8cd8d6d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0447.md
@@ -0,0 +1,15 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+The `pub` keyword was used inside a function.
+
+Erroneous code example:
+
+```
+fn foo() {
+ pub struct Bar; // error: visibility has no effect inside functions
+}
+```
+
+Since we cannot access items defined inside a function, the visibility of its
+items does not impact outer code. So using the `pub` keyword in this context
+is invalid.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0448.md b/compiler/rustc_error_codes/src/error_codes/E0448.md
new file mode 100644
index 000000000..ba096f9e9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0448.md
@@ -0,0 +1,28 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+The `pub` keyword was used inside a public enum.
+
+Erroneous code example:
+
+```compile_fail
+pub enum Foo {
+ pub Bar, // error: unnecessary `pub` visibility
+}
+```
+
+Since the enum is already public, adding `pub` on one its elements is
+unnecessary. Example:
+
+```compile_fail
+enum Foo {
+ pub Bar, // not ok!
+}
+```
+
+This is the correct syntax:
+
+```
+pub enum Foo {
+ Bar, // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0449.md b/compiler/rustc_error_codes/src/error_codes/E0449.md
new file mode 100644
index 000000000..9afc67689
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0449.md
@@ -0,0 +1,38 @@
+A visibility qualifier was used when it was unnecessary.
+
+Erroneous code examples:
+
+```compile_fail,E0449
+struct Bar;
+
+trait Foo {
+ fn foo();
+}
+
+pub impl Bar {} // error: unnecessary visibility qualifier
+
+pub impl Foo for Bar { // error: unnecessary visibility qualifier
+ pub fn foo() {} // error: unnecessary visibility qualifier
+}
+```
+
+To fix this error, please remove the visibility qualifier when it is not
+required. Example:
+
+```
+struct Bar;
+
+trait Foo {
+ fn foo();
+}
+
+// Directly implemented methods share the visibility of the type itself,
+// so `pub` is unnecessary here
+impl Bar {}
+
+// Trait methods share the visibility of the trait, so `pub` is
+// unnecessary in either case
+impl Foo for Bar {
+ fn foo() {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0451.md b/compiler/rustc_error_codes/src/error_codes/E0451.md
new file mode 100644
index 000000000..a12378a20
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0451.md
@@ -0,0 +1,48 @@
+A struct constructor with private fields was invoked.
+
+Erroneous code example:
+
+```compile_fail,E0451
+mod bar {
+ pub struct Foo {
+ pub a: isize,
+ b: isize,
+ }
+}
+
+let f = bar::Foo{ a: 0, b: 0 }; // error: field `b` of struct `bar::Foo`
+ // is private
+```
+
+To fix this error, please ensure that all the fields of the struct are public,
+or implement a function for easy instantiation. Examples:
+
+```
+mod bar {
+ pub struct Foo {
+ pub a: isize,
+ pub b: isize, // we set `b` field public
+ }
+}
+
+let f = bar::Foo{ a: 0, b: 0 }; // ok!
+```
+
+Or:
+
+```
+mod bar {
+ pub struct Foo {
+ pub a: isize,
+ b: isize, // still private
+ }
+
+ impl Foo {
+ pub fn new() -> Foo { // we create a method to instantiate `Foo`
+ Foo { a: 0, b: 0 }
+ }
+ }
+}
+
+let f = bar::Foo::new(); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0452.md b/compiler/rustc_error_codes/src/error_codes/E0452.md
new file mode 100644
index 000000000..429813a7c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0452.md
@@ -0,0 +1,16 @@
+An invalid lint attribute has been given.
+
+Erroneous code example:
+
+```compile_fail,E0452
+#![allow(foo = "")] // error: malformed lint attribute
+```
+
+Lint attributes only accept a list of identifiers (where each identifier is a
+lint name). Ensure the attribute is of this form:
+
+```
+#![allow(foo)] // ok!
+// or:
+#![allow(foo, foo2)] // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0453.md b/compiler/rustc_error_codes/src/error_codes/E0453.md
new file mode 100644
index 000000000..11789db8f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0453.md
@@ -0,0 +1,42 @@
+A lint check attribute was overruled by a `forbid` directive set as an
+attribute on an enclosing scope, or on the command line with the `-F` option.
+
+Example of erroneous code:
+
+```compile_fail,E0453
+#![forbid(non_snake_case)]
+
+#[allow(non_snake_case)]
+fn main() {
+ let MyNumber = 2; // error: allow(non_snake_case) overruled by outer
+ // forbid(non_snake_case)
+}
+```
+
+The `forbid` lint setting, like `deny`, turns the corresponding compiler
+warning into a hard error. Unlike `deny`, `forbid` prevents itself from being
+overridden by inner attributes.
+
+If you're sure you want to override the lint check, you can change `forbid` to
+`deny` (or use `-D` instead of `-F` if the `forbid` setting was given as a
+command-line option) to allow the inner lint check attribute:
+
+```
+#![deny(non_snake_case)]
+
+#[allow(non_snake_case)]
+fn main() {
+ let MyNumber = 2; // ok!
+}
+```
+
+Otherwise, edit the code to pass the lint check, and remove the overruled
+attribute:
+
+```
+#![forbid(non_snake_case)]
+
+fn main() {
+ let my_number = 2;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0454.md b/compiler/rustc_error_codes/src/error_codes/E0454.md
new file mode 100644
index 000000000..95a22b92e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0454.md
@@ -0,0 +1,15 @@
+A link name was given with an empty name.
+
+Erroneous code example:
+
+```compile_fail,E0454
+#[link(name = "")] extern "C" {}
+// error: `#[link(name = "")]` given with empty name
+```
+
+The rust compiler cannot link to an external library if you don't give it its
+name. Example:
+
+```no_run
+#[link(name = "some_lib")] extern "C" {} // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0455.md b/compiler/rustc_error_codes/src/error_codes/E0455.md
new file mode 100644
index 000000000..437dacaff
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0455.md
@@ -0,0 +1,26 @@
+Some linking kinds are target-specific and not supported on all platforms.
+
+Linking with `kind=framework` is only supported when targeting macOS,
+as frameworks are specific to that operating system.
+
+Similarly, `kind=raw-dylib` is only supported when targeting Windows-like
+platforms.
+
+Erroneous code example:
+
+```ignore (should-compile_fail-but-cannot-doctest-conditionally-without-macos)
+#[link(name = "FooCoreServices", kind = "framework")] extern "C" {}
+// OS used to compile is Linux for example
+```
+
+To solve this error you can use conditional compilation:
+
+```
+#[cfg_attr(target="macos", link(name = "FooCoreServices", kind = "framework"))]
+extern "C" {}
+```
+
+Learn more in the [Conditional Compilation][conditional-compilation] section
+of the Reference.
+
+[conditional-compilation]: https://doc.rust-lang.org/reference/attributes.html#conditional-compilation
diff --git a/compiler/rustc_error_codes/src/error_codes/E0458.md b/compiler/rustc_error_codes/src/error_codes/E0458.md
new file mode 100644
index 000000000..1b280cba4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0458.md
@@ -0,0 +1,15 @@
+An unknown "kind" was specified for a link attribute.
+
+Erroneous code example:
+
+```compile_fail,E0458
+#[link(kind = "wonderful_unicorn")] extern "C" {}
+// error: unknown kind: `wonderful_unicorn`
+```
+
+Please specify a valid "kind" value, from one of the following:
+
+* static
+* dylib
+* framework
+* raw-dylib
diff --git a/compiler/rustc_error_codes/src/error_codes/E0459.md b/compiler/rustc_error_codes/src/error_codes/E0459.md
new file mode 100644
index 000000000..4a49a7654
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0459.md
@@ -0,0 +1,15 @@
+A link was used without a name parameter.
+
+Erroneous code example:
+
+```compile_fail,E0459
+#[link(kind = "dylib")] extern "C" {}
+// error: `#[link(...)]` specified without `name = "foo"`
+```
+
+Please add the name parameter to allow the rust compiler to find the library
+you want. Example:
+
+```no_run
+#[link(kind = "dylib", name = "some_lib")] extern "C" {} // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0463.md b/compiler/rustc_error_codes/src/error_codes/E0463.md
new file mode 100644
index 000000000..d0cd1b1dc
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0463.md
@@ -0,0 +1,34 @@
+A plugin/crate was declared but cannot be found.
+
+Erroneous code example:
+
+```compile_fail,E0463
+#![feature(plugin)]
+#![plugin(cookie_monster)] // error: can't find crate for `cookie_monster`
+extern crate cake_is_a_lie; // error: can't find crate for `cake_is_a_lie`
+```
+
+You need to link your code to the relevant crate in order to be able to use it
+(through Cargo or the `-L` option of rustc example). Plugins are crates as
+well, and you link to them the same way.
+
+## Common causes
+
+- The crate is not present at all. If using Cargo, add it to `[dependencies]`
+ in Cargo.toml.
+- The crate is present, but under a different name. If using Cargo, look for
+ `package = ` under `[dependencies]` in Cargo.toml.
+
+## Common causes for missing `std` or `core`
+
+- You are cross-compiling for a target which doesn't have `std` prepackaged.
+ Consider one of the following:
+ + Adding a pre-compiled version of std with `rustup target add`
+ + Building std from source with `cargo build -Z build-std`
+ + Using `#![no_std]` at the crate root, so you won't need `std` in the first
+ place.
+- You are developing the compiler itself and haven't built libstd from source.
+ You can usually build it with `x.py build library/std`. More information
+ about x.py is available in the [rustc-dev-guide].
+
+[rustc-dev-guide]: https://rustc-dev-guide.rust-lang.org/building/how-to-build-and-run.html#building-the-compiler
diff --git a/compiler/rustc_error_codes/src/error_codes/E0464.md b/compiler/rustc_error_codes/src/error_codes/E0464.md
new file mode 100644
index 000000000..9108d856c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0464.md
@@ -0,0 +1,6 @@
+The compiler found multiple library files with the requested crate name.
+
+This error can occur in several different cases -- for example, when using
+`extern crate` or passing `--extern` options without crate paths. It can also be
+caused by caching issues with the build directory, in which case `cargo clean`
+may help.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0466.md b/compiler/rustc_error_codes/src/error_codes/E0466.md
new file mode 100644
index 000000000..7aefedbc0
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0466.md
@@ -0,0 +1,34 @@
+Macro import declaration was malformed.
+
+Erroneous code examples:
+
+```compile_fail,E0466
+#[macro_use(a_macro(another_macro))] // error: invalid import declaration
+extern crate core as some_crate;
+
+#[macro_use(i_want = "some_macros")] // error: invalid import declaration
+extern crate core as another_crate;
+```
+
+This is a syntax error at the level of attribute declarations. The proper
+syntax for macro imports is the following:
+
+```ignore (cannot-doctest-multicrate-project)
+// In some_crate:
+#[macro_export]
+macro_rules! get_tacos {
+ ...
+}
+
+#[macro_export]
+macro_rules! get_pimientos {
+ ...
+}
+
+// In your crate:
+#[macro_use(get_tacos, get_pimientos)] // It imports `get_tacos` and
+extern crate some_crate; // `get_pimientos` macros from some_crate
+```
+
+If you would like to import all exported macros, write `macro_use` with no
+arguments.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0468.md b/compiler/rustc_error_codes/src/error_codes/E0468.md
new file mode 100644
index 000000000..cf8664718
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0468.md
@@ -0,0 +1,27 @@
+A non-root module tried to import macros from another crate.
+
+Example of erroneous code:
+
+```compile_fail,E0468
+mod foo {
+ #[macro_use(debug_assert)] // error: must be at crate root to import
+ extern crate core; // macros from another crate
+ fn run_macro() { debug_assert!(true); }
+}
+```
+
+Only `extern crate` imports at the crate root level are allowed to import
+macros.
+
+Either move the macro import to crate root or do without the foreign macros.
+This will work:
+
+```
+#[macro_use(debug_assert)] // ok!
+extern crate core;
+
+mod foo {
+ fn run_macro() { debug_assert!(true); }
+}
+# fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0469.md b/compiler/rustc_error_codes/src/error_codes/E0469.md
new file mode 100644
index 000000000..22db976aa
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0469.md
@@ -0,0 +1,39 @@
+A macro listed for import was not found.
+
+Erroneous code example:
+
+```compile_fail,E0469
+#[macro_use(drink, be_merry)] // error: imported macro not found
+extern crate alloc;
+
+fn main() {
+ // ...
+}
+```
+
+Either the listed macro is not contained in the imported crate, or it is not
+exported from the given crate.
+
+This could be caused by a typo. Did you misspell the macro's name?
+
+Double-check the names of the macros listed for import, and that the crate
+in question exports them.
+
+A working version would be:
+
+```ignore (cannot-doctest-multicrate-project)
+// In some_crate crate:
+#[macro_export]
+macro_rules! eat {
+ ...
+}
+
+#[macro_export]
+macro_rules! drink {
+ ...
+}
+
+// In your crate:
+#[macro_use(eat, drink)]
+extern crate some_crate; //ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0477.md b/compiler/rustc_error_codes/src/error_codes/E0477.md
new file mode 100644
index 000000000..c6be8dc70
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0477.md
@@ -0,0 +1,46 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+The type does not fulfill the required lifetime.
+
+Erroneous code example:
+
+```compile_fail
+use std::sync::Mutex;
+
+struct MyString<'a> {
+ data: &'a str,
+}
+
+fn i_want_static_closure<F>(a: F)
+ where F: Fn() + 'static {}
+
+fn print_string<'a>(s: Mutex<MyString<'a>>) {
+
+ i_want_static_closure(move || { // error: this closure has lifetime 'a
+ // rather than 'static
+ println!("{}", s.lock().unwrap().data);
+ });
+}
+```
+
+In this example, the closure does not satisfy the `'static` lifetime constraint.
+To fix this error, you need to double check the lifetime of the type. Here, we
+can fix this problem by giving `s` a static lifetime:
+
+```
+use std::sync::Mutex;
+
+struct MyString<'a> {
+ data: &'a str,
+}
+
+fn i_want_static_closure<F>(a: F)
+ where F: Fn() + 'static {}
+
+fn print_string(s: Mutex<MyString<'static>>) {
+
+ i_want_static_closure(move || { // ok!
+ println!("{}", s.lock().unwrap().data);
+ });
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0478.md b/compiler/rustc_error_codes/src/error_codes/E0478.md
new file mode 100644
index 000000000..4bc5fde2e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0478.md
@@ -0,0 +1,28 @@
+A lifetime bound was not satisfied.
+
+Erroneous code example:
+
+```compile_fail,E0478
+// Check that the explicit lifetime bound (`'SnowWhite`, in this example) must
+// outlive all the superbounds from the trait (`'kiss`, in this example).
+
+trait Wedding<'t>: 't { }
+
+struct Prince<'kiss, 'SnowWhite> {
+ child: Box<Wedding<'kiss> + 'SnowWhite>,
+ // error: lifetime bound not satisfied
+}
+```
+
+In this example, the `'SnowWhite` lifetime is supposed to outlive the `'kiss`
+lifetime but the declaration of the `Prince` struct doesn't enforce it. To fix
+this issue, you need to specify it:
+
+```
+trait Wedding<'t>: 't { }
+
+struct Prince<'kiss, 'SnowWhite: 'kiss> { // You say here that 'SnowWhite
+ // must live longer than 'kiss.
+ child: Box<Wedding<'kiss> + 'SnowWhite>, // And now it's all good!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0482.md b/compiler/rustc_error_codes/src/error_codes/E0482.md
new file mode 100644
index 000000000..ad363816e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0482.md
@@ -0,0 +1,75 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+A lifetime of a returned value does not outlive the function call.
+
+Erroneous code example:
+
+```compile_fail,E0700
+fn prefix<'a>(
+ words: impl Iterator<Item = &'a str>
+) -> impl Iterator<Item = String> { // error!
+ words.map(|v| format!("foo-{}", v))
+}
+```
+
+To fix this error, make the lifetime of the returned value explicit:
+
+```
+fn prefix<'a>(
+ words: impl Iterator<Item = &'a str> + 'a
+) -> impl Iterator<Item = String> + 'a { // ok!
+ words.map(|v| format!("foo-{}", v))
+}
+```
+
+The [`impl Trait`] feature in this example uses an implicit `'static` lifetime
+restriction in the returned type. However the type implementing the `Iterator`
+passed to the function lives just as long as `'a`, which is not long enough.
+
+The solution involves adding lifetime bound to both function argument and
+the return value to make sure that the values inside the iterator
+are not dropped when the function goes out of the scope.
+
+An alternative solution would be to guarantee that the `Item` references
+in the iterator are alive for the whole lifetime of the program.
+
+```
+fn prefix(
+ words: impl Iterator<Item = &'static str>
+) -> impl Iterator<Item = String> { // ok!
+ words.map(|v| format!("foo-{}", v))
+}
+```
+
+A similar lifetime problem might arise when returning closures:
+
+```compile_fail,E0700
+fn foo(
+ x: &mut Vec<i32>
+) -> impl FnMut(&mut Vec<i32>) -> &[i32] { // error!
+ |y| {
+ y.append(x);
+ y
+ }
+}
+```
+
+Analogically, a solution here is to use explicit return lifetime
+and move the ownership of the variable to the closure.
+
+```
+fn foo<'a>(
+ x: &'a mut Vec<i32>
+) -> impl FnMut(&mut Vec<i32>) -> &[i32] + 'a { // ok!
+ move |y| {
+ y.append(x);
+ y
+ }
+}
+```
+
+To better understand the lifetime treatment in the [`impl Trait`],
+please see the [RFC 1951].
+
+[`impl Trait`]: https://doc.rust-lang.org/reference/types/impl-trait.html
+[RFC 1951]: https://rust-lang.github.io/rfcs/1951-expand-impl-trait.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0491.md b/compiler/rustc_error_codes/src/error_codes/E0491.md
new file mode 100644
index 000000000..d45663f3a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0491.md
@@ -0,0 +1,36 @@
+A reference has a longer lifetime than the data it references.
+
+Erroneous code example:
+
+```compile_fail,E0491
+struct Foo<'a> {
+ x: fn(&'a i32),
+}
+
+trait Trait<'a, 'b> {
+ type Out;
+}
+
+impl<'a, 'b> Trait<'a, 'b> for usize {
+ type Out = &'a Foo<'b>; // error!
+}
+```
+
+Here, the problem is that the compiler cannot be sure that the `'b` lifetime
+will live longer than `'a`, which should be mandatory in order to be sure that
+`Trait::Out` will always have a reference pointing to an existing type. So in
+this case, we just need to tell the compiler than `'b` must outlive `'a`:
+
+```
+struct Foo<'a> {
+ x: fn(&'a i32),
+}
+
+trait Trait<'a, 'b> {
+ type Out;
+}
+
+impl<'a, 'b: 'a> Trait<'a, 'b> for usize { // we added the lifetime enforcement
+ type Out = &'a Foo<'b>; // it now works!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0492.md b/compiler/rustc_error_codes/src/error_codes/E0492.md
new file mode 100644
index 000000000..79e7c069a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0492.md
@@ -0,0 +1,71 @@
+A borrow of a constant containing interior mutability was attempted.
+
+Erroneous code example:
+
+```compile_fail,E0492
+use std::sync::atomic::AtomicUsize;
+
+const A: AtomicUsize = AtomicUsize::new(0);
+const B: &'static AtomicUsize = &A;
+// error: cannot borrow a constant which may contain interior mutability,
+// create a static instead
+```
+
+A `const` represents a constant value that should never change. If one takes
+a `&` reference to the constant, then one is taking a pointer to some memory
+location containing the value. Normally this is perfectly fine: most values
+can't be changed via a shared `&` pointer, but interior mutability would allow
+it. That is, a constant value could be mutated. On the other hand, a `static` is
+explicitly a single memory location, which can be mutated at will.
+
+So, in order to solve this error, use statics which are `Sync`:
+
+```
+use std::sync::atomic::AtomicUsize;
+
+static A: AtomicUsize = AtomicUsize::new(0);
+static B: &'static AtomicUsize = &A; // ok!
+```
+
+You can also have this error while using a cell type:
+
+```compile_fail,E0492
+use std::cell::Cell;
+
+const A: Cell<usize> = Cell::new(1);
+const B: &Cell<usize> = &A;
+// error: cannot borrow a constant which may contain interior mutability,
+// create a static instead
+
+// or:
+struct C { a: Cell<usize> }
+
+const D: C = C { a: Cell::new(1) };
+const E: &Cell<usize> = &D.a; // error
+
+// or:
+const F: &C = &D; // error
+```
+
+This is because cell types do operations that are not thread-safe. Due to this,
+they don't implement Sync and thus can't be placed in statics.
+
+However, if you still wish to use these types, you can achieve this by an unsafe
+wrapper:
+
+```
+use std::cell::Cell;
+use std::marker::Sync;
+
+struct NotThreadSafe<T> {
+ value: Cell<T>,
+}
+
+unsafe impl<T> Sync for NotThreadSafe<T> {}
+
+static A: NotThreadSafe<usize> = NotThreadSafe { value : Cell::new(1) };
+static B: &'static NotThreadSafe<usize> = &A; // ok!
+```
+
+Remember this solution is unsafe! You will have to ensure that accesses to the
+cell are synchronized.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0493.md b/compiler/rustc_error_codes/src/error_codes/E0493.md
new file mode 100644
index 000000000..e891129ef
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0493.md
@@ -0,0 +1,42 @@
+A value with a custom `Drop` implementation may be dropped during const-eval.
+
+Erroneous code example:
+
+```compile_fail,E0493
+enum DropType {
+ A,
+}
+
+impl Drop for DropType {
+ fn drop(&mut self) {}
+}
+
+struct Foo {
+ field1: DropType,
+}
+
+static FOO: Foo = Foo { field1: (DropType::A, DropType::A).1 }; // error!
+```
+
+The problem here is that if the given type or one of its fields implements the
+`Drop` trait, this `Drop` implementation cannot be called within a const
+context since it may run arbitrary, non-const-checked code. To prevent this
+issue, ensure all values with a custom `Drop` implementation escape the
+initializer.
+
+```
+enum DropType {
+ A,
+}
+
+impl Drop for DropType {
+ fn drop(&mut self) {}
+}
+
+struct Foo {
+ field1: DropType,
+}
+
+static FOO: Foo = Foo { field1: DropType::A }; // We initialize all fields
+ // by hand.
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0495.md b/compiler/rustc_error_codes/src/error_codes/E0495.md
new file mode 100644
index 000000000..cd10e7193
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0495.md
@@ -0,0 +1,40 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+A lifetime cannot be determined in the given situation.
+
+Erroneous code example:
+
+```compile_fail
+fn transmute_lifetime<'a, 'b, T>(t: &'a (T,)) -> &'b T {
+ match (&t,) { // error!
+ ((u,),) => u,
+ }
+}
+
+let y = Box::new((42,));
+let x = transmute_lifetime(&y);
+```
+
+In this code, you have two ways to solve this issue:
+ 1. Enforce that `'a` lives at least as long as `'b`.
+ 2. Use the same lifetime requirement for both input and output values.
+
+So for the first solution, you can do it by replacing `'a` with `'a: 'b`:
+
+```
+fn transmute_lifetime<'a: 'b, 'b, T>(t: &'a (T,)) -> &'b T {
+ match (&t,) { // ok!
+ ((u,),) => u,
+ }
+}
+```
+
+In the second you can do it by simply removing `'b` so they both use `'a`:
+
+```
+fn transmute_lifetime<'a, T>(t: &'a (T,)) -> &'a T {
+ match (&t,) { // ok!
+ ((u,),) => u,
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0496.md b/compiler/rustc_error_codes/src/error_codes/E0496.md
new file mode 100644
index 000000000..83d65cd3e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0496.md
@@ -0,0 +1,31 @@
+A lifetime name is shadowing another lifetime name.
+
+Erroneous code example:
+
+```compile_fail,E0496
+struct Foo<'a> {
+ a: &'a i32,
+}
+
+impl<'a> Foo<'a> {
+ fn f<'a>(x: &'a i32) { // error: lifetime name `'a` shadows a lifetime
+ // name that is already in scope
+ }
+}
+```
+
+Please change the name of one of the lifetimes to remove this error. Example:
+
+```
+struct Foo<'a> {
+ a: &'a i32,
+}
+
+impl<'a> Foo<'a> {
+ fn f<'b>(x: &'b i32) { // ok!
+ }
+}
+
+fn main() {
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0497.md b/compiler/rustc_error_codes/src/error_codes/E0497.md
new file mode 100644
index 000000000..ef2882415
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0497.md
@@ -0,0 +1,14 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+A stability attribute was used outside of the standard library.
+
+Erroneous code example:
+
+```compile_fail
+#[stable] // error: stability attributes may not be used outside of the
+ // standard library
+fn foo() {}
+```
+
+It is not possible to use stability attributes outside of the standard library.
+Also, for now, it is not possible to write deprecation messages either.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0498.md b/compiler/rustc_error_codes/src/error_codes/E0498.md
new file mode 100644
index 000000000..c9ea4a794
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0498.md
@@ -0,0 +1,22 @@
+The `plugin` attribute was malformed.
+
+Erroneous code example:
+
+```compile_fail,E0498
+#![feature(plugin)]
+#![plugin(foo(args))] // error: invalid argument
+#![plugin(bar="test")] // error: invalid argument
+```
+
+The `#[plugin]` attribute should take a single argument: the name of the plugin.
+
+For example, for the plugin `foo`:
+
+```ignore (requires external plugin crate)
+#![feature(plugin)]
+#![plugin(foo)] // ok!
+```
+
+See the [`plugin` feature] section of the Unstable book for more details.
+
+[`plugin` feature]: https://doc.rust-lang.org/nightly/unstable-book/language-features/plugin.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0499.md b/compiler/rustc_error_codes/src/error_codes/E0499.md
new file mode 100644
index 000000000..a07e8eb3b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0499.md
@@ -0,0 +1,32 @@
+A variable was borrowed as mutable more than once.
+
+Erroneous code example:
+
+```compile_fail,E0499
+let mut i = 0;
+let mut x = &mut i;
+let mut a = &mut i;
+x;
+// error: cannot borrow `i` as mutable more than once at a time
+```
+
+Please note that in Rust, you can either have many immutable references, or one
+mutable reference. For more details you may want to read the
+[References & Borrowing][references-and-borrowing] section of the Book.
+
+[references-and-borrowing]: https://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html
+
+Example:
+
+```
+let mut i = 0;
+let mut x = &mut i; // ok!
+
+// or:
+let mut i = 0;
+let a = &i; // ok!
+let b = &i; // still ok!
+let c = &i; // still ok!
+b;
+a;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0500.md b/compiler/rustc_error_codes/src/error_codes/E0500.md
new file mode 100644
index 000000000..fde31d2c7
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0500.md
@@ -0,0 +1,41 @@
+A borrowed variable was used by a closure.
+
+Erroneous code example:
+
+```compile_fail,E0500
+fn you_know_nothing(jon_snow: &mut i32) {
+ let nights_watch = &jon_snow;
+ let starks = || {
+ *jon_snow = 3; // error: closure requires unique access to `jon_snow`
+ // but it is already borrowed
+ };
+ println!("{}", nights_watch);
+}
+```
+
+In here, `jon_snow` is already borrowed by the `nights_watch` reference, so it
+cannot be borrowed by the `starks` closure at the same time. To fix this issue,
+you can create the closure after the borrow has ended:
+
+```
+fn you_know_nothing(jon_snow: &mut i32) {
+ let nights_watch = &jon_snow;
+ println!("{}", nights_watch);
+ let starks = || {
+ *jon_snow = 3;
+ };
+}
+```
+
+Or, if the type implements the `Clone` trait, you can clone it between
+closures:
+
+```
+fn you_know_nothing(jon_snow: &mut i32) {
+ let mut jon_copy = jon_snow.clone();
+ let starks = || {
+ *jon_snow = 3;
+ };
+ println!("{}", jon_copy);
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0501.md b/compiler/rustc_error_codes/src/error_codes/E0501.md
new file mode 100644
index 000000000..ffdbc4439
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0501.md
@@ -0,0 +1,79 @@
+A mutable variable is used but it is already captured by a closure.
+
+Erroneous code example:
+
+```compile_fail,E0501
+fn inside_closure(x: &mut i32) {
+ // Actions which require unique access
+}
+
+fn outside_closure(x: &mut i32) {
+ // Actions which require unique access
+}
+
+fn foo(a: &mut i32) {
+ let mut bar = || {
+ inside_closure(a)
+ };
+ outside_closure(a); // error: cannot borrow `*a` as mutable because previous
+ // closure requires unique access.
+ bar();
+}
+```
+
+This error indicates that a mutable variable is used while it is still captured
+by a closure. Because the closure has borrowed the variable, it is not available
+until the closure goes out of scope.
+
+Note that a capture will either move or borrow a variable, but in this
+situation, the closure is borrowing the variable. Take a look at the chapter
+on [Capturing][capturing] in Rust By Example for more information.
+
+[capturing]: https://doc.rust-lang.org/stable/rust-by-example/fn/closures/capture.html
+
+To fix this error, you can finish using the closure before using the captured
+variable:
+
+```
+fn inside_closure(x: &mut i32) {}
+fn outside_closure(x: &mut i32) {}
+
+fn foo(a: &mut i32) {
+ let mut bar = || {
+ inside_closure(a)
+ };
+ bar();
+ // borrow on `a` ends.
+ outside_closure(a); // ok!
+}
+```
+
+Or you can pass the variable as a parameter to the closure:
+
+```
+fn inside_closure(x: &mut i32) {}
+fn outside_closure(x: &mut i32) {}
+
+fn foo(a: &mut i32) {
+ let mut bar = |s: &mut i32| {
+ inside_closure(s)
+ };
+ outside_closure(a);
+ bar(a);
+}
+```
+
+It may be possible to define the closure later:
+
+```
+fn inside_closure(x: &mut i32) {}
+fn outside_closure(x: &mut i32) {}
+
+fn foo(a: &mut i32) {
+ outside_closure(a);
+ let mut bar = || {
+ inside_closure(a)
+ };
+ bar();
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0502.md b/compiler/rustc_error_codes/src/error_codes/E0502.md
new file mode 100644
index 000000000..dc3ffdfdd
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0502.md
@@ -0,0 +1,30 @@
+A variable already borrowed as immutable was borrowed as mutable.
+
+Erroneous code example:
+
+```compile_fail,E0502
+fn bar(x: &mut i32) {}
+fn foo(a: &mut i32) {
+ let y = &a; // a is borrowed as immutable.
+ bar(a); // error: cannot borrow `*a` as mutable because `a` is also borrowed
+ // as immutable
+ println!("{}", y);
+}
+```
+
+To fix this error, ensure that you don't have any other references to the
+variable before trying to access it mutably:
+
+```
+fn bar(x: &mut i32) {}
+fn foo(a: &mut i32) {
+ bar(a);
+ let y = &a; // ok!
+ println!("{}", y);
+}
+```
+
+For more information on Rust's ownership system, take a look at the
+[References & Borrowing][references-and-borrowing] section of the Book.
+
+[references-and-borrowing]: https://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0503.md b/compiler/rustc_error_codes/src/error_codes/E0503.md
new file mode 100644
index 000000000..c52525fee
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0503.md
@@ -0,0 +1,54 @@
+A value was used after it was mutably borrowed.
+
+Erroneous code example:
+
+```compile_fail,E0503
+fn main() {
+ let mut value = 3;
+ // Create a mutable borrow of `value`.
+ let borrow = &mut value;
+ let _sum = value + 1; // error: cannot use `value` because
+ // it was mutably borrowed
+ println!("{}", borrow);
+}
+```
+
+In this example, `value` is mutably borrowed by `borrow` and cannot be
+used to calculate `sum`. This is not possible because this would violate
+Rust's mutability rules.
+
+You can fix this error by finishing using the borrow before the next use of
+the value:
+
+```
+fn main() {
+ let mut value = 3;
+ let borrow = &mut value;
+ println!("{}", borrow);
+ // The block has ended and with it the borrow.
+ // You can now use `value` again.
+ let _sum = value + 1;
+}
+```
+
+Or by cloning `value` before borrowing it:
+
+```
+fn main() {
+ let mut value = 3;
+ // We clone `value`, creating a copy.
+ let value_cloned = value.clone();
+ // The mutable borrow is a reference to `value` and
+ // not to `value_cloned`...
+ let borrow = &mut value;
+ // ... which means we can still use `value_cloned`,
+ let _sum = value_cloned + 1;
+ // even though the borrow only ends here.
+ println!("{}", borrow);
+}
+```
+
+For more information on Rust's ownership system, take a look at the
+[References & Borrowing][references-and-borrowing] section of the Book.
+
+[references-and-borrowing]: https://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0504.md b/compiler/rustc_error_codes/src/error_codes/E0504.md
new file mode 100644
index 000000000..bcbd00a86
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0504.md
@@ -0,0 +1,103 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+This error occurs when an attempt is made to move a borrowed variable into a
+closure.
+
+Erroneous code example:
+
+```compile_fail
+struct FancyNum {
+ num: u8,
+}
+
+fn main() {
+ let fancy_num = FancyNum { num: 5 };
+ let fancy_ref = &fancy_num;
+
+ let x = move || {
+ println!("child function: {}", fancy_num.num);
+ // error: cannot move `fancy_num` into closure because it is borrowed
+ };
+
+ x();
+ println!("main function: {}", fancy_ref.num);
+}
+```
+
+Here, `fancy_num` is borrowed by `fancy_ref` and so cannot be moved into
+the closure `x`. There is no way to move a value into a closure while it is
+borrowed, as that would invalidate the borrow.
+
+If the closure can't outlive the value being moved, try using a reference
+rather than moving:
+
+```
+struct FancyNum {
+ num: u8,
+}
+
+fn main() {
+ let fancy_num = FancyNum { num: 5 };
+ let fancy_ref = &fancy_num;
+
+ let x = move || {
+ // fancy_ref is usable here because it doesn't move `fancy_num`
+ println!("child function: {}", fancy_ref.num);
+ };
+
+ x();
+
+ println!("main function: {}", fancy_num.num);
+}
+```
+
+If the value has to be borrowed and then moved, try limiting the lifetime of
+the borrow using a scoped block:
+
+```
+struct FancyNum {
+ num: u8,
+}
+
+fn main() {
+ let fancy_num = FancyNum { num: 5 };
+
+ {
+ let fancy_ref = &fancy_num;
+ println!("main function: {}", fancy_ref.num);
+ // `fancy_ref` goes out of scope here
+ }
+
+ let x = move || {
+ // `fancy_num` can be moved now (no more references exist)
+ println!("child function: {}", fancy_num.num);
+ };
+
+ x();
+}
+```
+
+If the lifetime of a reference isn't enough, such as in the case of threading,
+consider using an `Arc` to create a reference-counted value:
+
+```
+use std::sync::Arc;
+use std::thread;
+
+struct FancyNum {
+ num: u8,
+}
+
+fn main() {
+ let fancy_ref1 = Arc::new(FancyNum { num: 5 });
+ let fancy_ref2 = fancy_ref1.clone();
+
+ let x = thread::spawn(move || {
+ // `fancy_ref1` can be moved and has a `'static` lifetime
+ println!("child thread: {}", fancy_ref1.num);
+ });
+
+ x.join().expect("child thread should finish");
+ println!("main thread: {}", fancy_ref2.num);
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0505.md b/compiler/rustc_error_codes/src/error_codes/E0505.md
new file mode 100644
index 000000000..b11e3c0e9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0505.md
@@ -0,0 +1,87 @@
+A value was moved out while it was still borrowed.
+
+Erroneous code example:
+
+```compile_fail,E0505
+struct Value {}
+
+fn borrow(val: &Value) {}
+
+fn eat(val: Value) {}
+
+fn main() {
+ let x = Value{};
+ let _ref_to_val: &Value = &x;
+ eat(x);
+ borrow(_ref_to_val);
+}
+```
+
+Here, the function `eat` takes ownership of `x`. However,
+`x` cannot be moved because the borrow to `_ref_to_val`
+needs to last till the function `borrow`.
+To fix that you can do a few different things:
+
+* Try to avoid moving the variable.
+* Release borrow before move.
+* Implement the `Copy` trait on the type.
+
+Examples:
+
+```
+struct Value {}
+
+fn borrow(val: &Value) {}
+
+fn eat(val: &Value) {}
+
+fn main() {
+ let x = Value{};
+
+ let ref_to_val: &Value = &x;
+ eat(&x); // pass by reference, if it's possible
+ borrow(ref_to_val);
+}
+```
+
+Or:
+
+```
+struct Value {}
+
+fn borrow(val: &Value) {}
+
+fn eat(val: Value) {}
+
+fn main() {
+ let x = Value{};
+
+ let ref_to_val: &Value = &x;
+ borrow(ref_to_val);
+ // ref_to_val is no longer used.
+ eat(x);
+}
+```
+
+Or:
+
+```
+#[derive(Clone, Copy)] // implement Copy trait
+struct Value {}
+
+fn borrow(val: &Value) {}
+
+fn eat(val: Value) {}
+
+fn main() {
+ let x = Value{};
+ let ref_to_val: &Value = &x;
+ eat(x); // it will be copied here.
+ borrow(ref_to_val);
+}
+```
+
+For more information on Rust's ownership system, take a look at the
+[References & Borrowing][references-and-borrowing] section of the Book.
+
+[references-and-borrowing]: https://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0506.md b/compiler/rustc_error_codes/src/error_codes/E0506.md
new file mode 100644
index 000000000..c312a0460
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0506.md
@@ -0,0 +1,73 @@
+An attempt was made to assign to a borrowed value.
+
+Erroneous code example:
+
+```compile_fail,E0506
+struct FancyNum {
+ num: u8,
+}
+
+let mut fancy_num = FancyNum { num: 5 };
+let fancy_ref = &fancy_num;
+fancy_num = FancyNum { num: 6 };
+// error: cannot assign to `fancy_num` because it is borrowed
+
+println!("Num: {}, Ref: {}", fancy_num.num, fancy_ref.num);
+```
+
+Because `fancy_ref` still holds a reference to `fancy_num`, `fancy_num` can't
+be assigned to a new value as it would invalidate the reference.
+
+Alternatively, we can move out of `fancy_num` into a second `fancy_num`:
+
+```
+struct FancyNum {
+ num: u8,
+}
+
+let mut fancy_num = FancyNum { num: 5 };
+let moved_num = fancy_num;
+fancy_num = FancyNum { num: 6 };
+
+println!("Num: {}, Moved num: {}", fancy_num.num, moved_num.num);
+```
+
+If the value has to be borrowed, try limiting the lifetime of the borrow using
+a scoped block:
+
+```
+struct FancyNum {
+ num: u8,
+}
+
+let mut fancy_num = FancyNum { num: 5 };
+
+{
+ let fancy_ref = &fancy_num;
+ println!("Ref: {}", fancy_ref.num);
+}
+
+// Works because `fancy_ref` is no longer in scope
+fancy_num = FancyNum { num: 6 };
+println!("Num: {}", fancy_num.num);
+```
+
+Or by moving the reference into a function:
+
+```
+struct FancyNum {
+ num: u8,
+}
+
+fn print_fancy_ref(fancy_ref: &FancyNum){
+ println!("Ref: {}", fancy_ref.num);
+}
+
+let mut fancy_num = FancyNum { num: 5 };
+
+print_fancy_ref(&fancy_num);
+
+// Works because function borrow has ended
+fancy_num = FancyNum { num: 6 };
+println!("Num: {}", fancy_num.num);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0507.md b/compiler/rustc_error_codes/src/error_codes/E0507.md
new file mode 100644
index 000000000..254751fc4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0507.md
@@ -0,0 +1,133 @@
+A borrowed value was moved out.
+
+Erroneous code example:
+
+```compile_fail,E0507
+use std::cell::RefCell;
+
+struct TheDarkKnight;
+
+impl TheDarkKnight {
+ fn nothing_is_true(self) {}
+}
+
+fn main() {
+ let x = RefCell::new(TheDarkKnight);
+
+ x.borrow().nothing_is_true(); // error: cannot move out of borrowed content
+}
+```
+
+Here, the `nothing_is_true` method takes the ownership of `self`. However,
+`self` cannot be moved because `.borrow()` only provides an `&TheDarkKnight`,
+which is a borrow of the content owned by the `RefCell`. To fix this error,
+you have three choices:
+
+* Try to avoid moving the variable.
+* Somehow reclaim the ownership.
+* Implement the `Copy` trait on the type.
+
+This can also happen when using a type implementing `Fn` or `FnMut`, as neither
+allows moving out of them (they usually represent closures which can be called
+more than once). Much of the text following applies equally well to non-`FnOnce`
+closure bodies.
+
+Examples:
+
+```
+use std::cell::RefCell;
+
+struct TheDarkKnight;
+
+impl TheDarkKnight {
+ fn nothing_is_true(&self) {} // First case, we don't take ownership
+}
+
+fn main() {
+ let x = RefCell::new(TheDarkKnight);
+
+ x.borrow().nothing_is_true(); // ok!
+}
+```
+
+Or:
+
+```
+use std::cell::RefCell;
+
+struct TheDarkKnight;
+
+impl TheDarkKnight {
+ fn nothing_is_true(self) {}
+}
+
+fn main() {
+ let x = RefCell::new(TheDarkKnight);
+ let x = x.into_inner(); // we get back ownership
+
+ x.nothing_is_true(); // ok!
+}
+```
+
+Or:
+
+```
+use std::cell::RefCell;
+
+#[derive(Clone, Copy)] // we implement the Copy trait
+struct TheDarkKnight;
+
+impl TheDarkKnight {
+ fn nothing_is_true(self) {}
+}
+
+fn main() {
+ let x = RefCell::new(TheDarkKnight);
+
+ x.borrow().nothing_is_true(); // ok!
+}
+```
+
+Moving a member out of a mutably borrowed struct will also cause E0507 error:
+
+```compile_fail,E0507
+struct TheDarkKnight;
+
+impl TheDarkKnight {
+ fn nothing_is_true(self) {}
+}
+
+struct Batcave {
+ knight: TheDarkKnight
+}
+
+fn main() {
+ let mut cave = Batcave {
+ knight: TheDarkKnight
+ };
+ let borrowed = &mut cave;
+
+ borrowed.knight.nothing_is_true(); // E0507
+}
+```
+
+It is fine only if you put something back. `mem::replace` can be used for that:
+
+```
+# struct TheDarkKnight;
+# impl TheDarkKnight { fn nothing_is_true(self) {} }
+# struct Batcave { knight: TheDarkKnight }
+use std::mem;
+
+let mut cave = Batcave {
+ knight: TheDarkKnight
+};
+let borrowed = &mut cave;
+
+mem::replace(&mut borrowed.knight, TheDarkKnight).nothing_is_true(); // ok!
+```
+
+For more information on Rust's ownership system, take a look at the
+[References & Borrowing][references-and-borrowing] section of the Book.
+
+[references-and-borrowing]: https://doc.rust-lang.org/book/ch04-02-references-and-borrowing.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0508.md b/compiler/rustc_error_codes/src/error_codes/E0508.md
new file mode 100644
index 000000000..91865907b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0508.md
@@ -0,0 +1,54 @@
+A value was moved out of a non-copy fixed-size array.
+
+Erroneous code example:
+
+```compile_fail,E0508
+struct NonCopy;
+
+fn main() {
+ let array = [NonCopy; 1];
+ let _value = array[0]; // error: cannot move out of type `[NonCopy; 1]`,
+ // a non-copy fixed-size array
+}
+```
+
+The first element was moved out of the array, but this is not
+possible because `NonCopy` does not implement the `Copy` trait.
+
+Consider borrowing the element instead of moving it:
+
+```
+struct NonCopy;
+
+fn main() {
+ let array = [NonCopy; 1];
+ let _value = &array[0]; // Borrowing is allowed, unlike moving.
+}
+```
+
+Alternatively, if your type implements `Clone` and you need to own the value,
+consider borrowing and then cloning:
+
+```
+#[derive(Clone)]
+struct NonCopy;
+
+fn main() {
+ let array = [NonCopy; 1];
+ // Now you can clone the array element.
+ let _value = array[0].clone();
+}
+```
+
+If you really want to move the value out, you can use a destructuring array
+pattern to move it:
+
+```
+struct NonCopy;
+
+fn main() {
+ let array = [NonCopy; 1];
+ // Destructuring the array
+ let [_value] = array;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0509.md b/compiler/rustc_error_codes/src/error_codes/E0509.md
new file mode 100644
index 000000000..9cbd7d695
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0509.md
@@ -0,0 +1,92 @@
+This error occurs when an attempt is made to move out of a value whose type
+implements the `Drop` trait.
+
+Erroneous code example:
+
+```compile_fail,E0509
+struct FancyNum {
+ num: usize
+}
+
+struct DropStruct {
+ fancy: FancyNum
+}
+
+impl Drop for DropStruct {
+ fn drop(&mut self) {
+ // Destruct DropStruct, possibly using FancyNum
+ }
+}
+
+fn main() {
+ let drop_struct = DropStruct{fancy: FancyNum{num: 5}};
+ let fancy_field = drop_struct.fancy; // Error E0509
+ println!("Fancy: {}", fancy_field.num);
+ // implicit call to `drop_struct.drop()` as drop_struct goes out of scope
+}
+```
+
+Here, we tried to move a field out of a struct of type `DropStruct` which
+implements the `Drop` trait. However, a struct cannot be dropped if one or
+more of its fields have been moved.
+
+Structs implementing the `Drop` trait have an implicit destructor that gets
+called when they go out of scope. This destructor may use the fields of the
+struct, so moving out of the struct could make it impossible to run the
+destructor. Therefore, we must think of all values whose type implements the
+`Drop` trait as single units whose fields cannot be moved.
+
+This error can be fixed by creating a reference to the fields of a struct,
+enum, or tuple using the `ref` keyword:
+
+```
+struct FancyNum {
+ num: usize
+}
+
+struct DropStruct {
+ fancy: FancyNum
+}
+
+impl Drop for DropStruct {
+ fn drop(&mut self) {
+ // Destruct DropStruct, possibly using FancyNum
+ }
+}
+
+fn main() {
+ let drop_struct = DropStruct{fancy: FancyNum{num: 5}};
+ let ref fancy_field = drop_struct.fancy; // No more errors!
+ println!("Fancy: {}", fancy_field.num);
+ // implicit call to `drop_struct.drop()` as drop_struct goes out of scope
+}
+```
+
+Note that this technique can also be used in the arms of a match expression:
+
+```
+struct FancyNum {
+ num: usize
+}
+
+enum DropEnum {
+ Fancy(FancyNum)
+}
+
+impl Drop for DropEnum {
+ fn drop(&mut self) {
+ // Destruct DropEnum, possibly using FancyNum
+ }
+}
+
+fn main() {
+ // Creates and enum of type `DropEnum`, which implements `Drop`
+ let drop_enum = DropEnum::Fancy(FancyNum{num: 10});
+ match drop_enum {
+ // Creates a reference to the inside of `DropEnum::Fancy`
+ DropEnum::Fancy(ref fancy_field) => // No error!
+ println!("It was fancy-- {}!", fancy_field.num),
+ }
+ // implicit call to `drop_enum.drop()` as drop_enum goes out of scope
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0510.md b/compiler/rustc_error_codes/src/error_codes/E0510.md
new file mode 100644
index 000000000..e045e04bd
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0510.md
@@ -0,0 +1,29 @@
+The matched value was assigned in a match guard.
+
+Erroneous code example:
+
+```compile_fail,E0510
+let mut x = Some(0);
+match x {
+ None => {}
+ Some(_) if { x = None; false } => {} // error!
+ Some(_) => {}
+}
+```
+
+When matching on a variable it cannot be mutated in the match guards, as this
+could cause the match to be non-exhaustive.
+
+Here executing `x = None` would modify the value being matched and require us
+to go "back in time" to the `None` arm. To fix it, change the value in the match
+arm:
+
+```
+let mut x = Some(0);
+match x {
+ None => {}
+ Some(_) => {
+ x = None; // ok!
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0511.md b/compiler/rustc_error_codes/src/error_codes/E0511.md
new file mode 100644
index 000000000..5351a685e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0511.md
@@ -0,0 +1,33 @@
+Invalid monomorphization of an intrinsic function was used.
+
+Erroneous code example:
+
+```compile_fail,E0511
+#![feature(platform_intrinsics)]
+
+extern "platform-intrinsic" {
+ fn simd_add<T>(a: T, b: T) -> T;
+}
+
+fn main() {
+ unsafe { simd_add(0, 1); }
+ // error: invalid monomorphization of `simd_add` intrinsic
+}
+```
+
+The generic type has to be a SIMD type. Example:
+
+```
+#![feature(repr_simd)]
+#![feature(platform_intrinsics)]
+
+#[repr(simd)]
+#[derive(Copy, Clone)]
+struct i32x2(i32, i32);
+
+extern "platform-intrinsic" {
+ fn simd_add<T>(a: T, b: T) -> T;
+}
+
+unsafe { simd_add(i32x2(0, 0), i32x2(1, 2)); } // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0512.md b/compiler/rustc_error_codes/src/error_codes/E0512.md
new file mode 100644
index 000000000..00c096122
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0512.md
@@ -0,0 +1,25 @@
+Transmute with two differently sized types was attempted.
+
+Erroneous code example:
+
+```compile_fail,E0512
+fn takes_u8(_: u8) {}
+
+fn main() {
+ unsafe { takes_u8(::std::mem::transmute(0u16)); }
+ // error: cannot transmute between types of different sizes,
+ // or dependently-sized types
+}
+```
+
+Please use types with same size or use the expected type directly. Example:
+
+```
+fn takes_u8(_: u8) {}
+
+fn main() {
+ unsafe { takes_u8(::std::mem::transmute(0i8)); } // ok!
+ // or:
+ unsafe { takes_u8(0u8); } // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0515.md b/compiler/rustc_error_codes/src/error_codes/E0515.md
new file mode 100644
index 000000000..0f4fbf672
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0515.md
@@ -0,0 +1,37 @@
+A reference to a local variable was returned.
+
+Erroneous code example:
+
+```compile_fail,E0515
+fn get_dangling_reference() -> &'static i32 {
+ let x = 0;
+ &x
+}
+```
+
+```compile_fail,E0515
+use std::slice::Iter;
+fn get_dangling_iterator<'a>() -> Iter<'a, i32> {
+ let v = vec![1, 2, 3];
+ v.iter()
+}
+```
+
+Local variables, function parameters and temporaries are all dropped before the
+end of the function body. So a reference to them cannot be returned.
+
+Consider returning an owned value instead:
+
+```
+use std::vec::IntoIter;
+
+fn get_integer() -> i32 {
+ let x = 0;
+ x
+}
+
+fn get_owned_iterator() -> IntoIter<i32> {
+ let v = vec![1, 2, 3];
+ v.into_iter()
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0516.md b/compiler/rustc_error_codes/src/error_codes/E0516.md
new file mode 100644
index 000000000..935c31bba
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0516.md
@@ -0,0 +1,17 @@
+The `typeof` keyword is currently reserved but unimplemented.
+
+Erroneous code example:
+
+```compile_fail,E0516
+fn main() {
+ let x: typeof(92) = 92;
+}
+```
+
+Try using type inference instead. Example:
+
+```
+fn main() {
+ let x = 92;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0517.md b/compiler/rustc_error_codes/src/error_codes/E0517.md
new file mode 100644
index 000000000..ae802245b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0517.md
@@ -0,0 +1,44 @@
+A `#[repr(..)]` attribute was placed on an unsupported item.
+
+Examples of erroneous code:
+
+```compile_fail,E0517
+#[repr(C)]
+type Foo = u8;
+
+#[repr(packed)]
+enum Foo {Bar, Baz}
+
+#[repr(u8)]
+struct Foo {bar: bool, baz: bool}
+
+#[repr(C)]
+impl Foo {
+ // ...
+}
+```
+
+* The `#[repr(C)]` attribute can only be placed on structs and enums.
+* The `#[repr(packed)]` and `#[repr(simd)]` attributes only work on structs.
+* The `#[repr(u8)]`, `#[repr(i16)]`, etc attributes only work on enums.
+
+These attributes do not work on typedefs, since typedefs are just aliases.
+
+Representations like `#[repr(u8)]`, `#[repr(i64)]` are for selecting the
+discriminant size for enums with no data fields on any of the variants, e.g.
+`enum Color {Red, Blue, Green}`, effectively setting the size of the enum to
+the size of the provided type. Such an enum can be cast to a value of the same
+type as well. In short, `#[repr(u8)]` makes the enum behave like an integer
+with a constrained set of allowed values.
+
+Only field-less enums can be cast to numerical primitives, so this attribute
+will not apply to structs.
+
+`#[repr(packed)]` reduces padding to make the struct size smaller. The
+representation of enums isn't strictly defined in Rust, and this attribute
+won't work on enums.
+
+`#[repr(simd)]` will give a struct consisting of a homogeneous series of machine
+types (i.e., `u8`, `i32`, etc) a representation that permits vectorization via
+SIMD. This doesn't make much sense for enums since they don't consist of a
+single list of data.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0518.md b/compiler/rustc_error_codes/src/error_codes/E0518.md
new file mode 100644
index 000000000..f04329bc4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0518.md
@@ -0,0 +1,23 @@
+An `#[inline(..)]` attribute was incorrectly placed on something other than a
+function or method.
+
+Example of erroneous code:
+
+```compile_fail,E0518
+#[inline(always)]
+struct Foo;
+
+#[inline(never)]
+impl Foo {
+ // ...
+}
+```
+
+`#[inline]` hints the compiler whether or not to attempt to inline a method or
+function. By default, the compiler does a pretty good job of figuring this out
+itself, but if you feel the need for annotations, `#[inline(always)]` and
+`#[inline(never)]` can override or force the compiler's decision.
+
+If you wish to apply this attribute to all methods in an impl, manually annotate
+each method; it is not possible to annotate the entire impl with an `#[inline]`
+attribute.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0520.md b/compiler/rustc_error_codes/src/error_codes/E0520.md
new file mode 100644
index 000000000..f9d7e02e5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0520.md
@@ -0,0 +1,61 @@
+A non-default implementation was already made on this type so it cannot be
+specialized further.
+
+Erroneous code example:
+
+```compile_fail,E0520
+#![feature(specialization)]
+
+trait SpaceLlama {
+ fn fly(&self);
+}
+
+// applies to all T
+impl<T> SpaceLlama for T {
+ default fn fly(&self) {}
+}
+
+// non-default impl
+// applies to all `Clone` T and overrides the previous impl
+impl<T: Clone> SpaceLlama for T {
+ fn fly(&self) {}
+}
+
+// since `i32` is clone, this conflicts with the previous implementation
+impl SpaceLlama for i32 {
+ default fn fly(&self) {}
+ // error: item `fly` is provided by an `impl` that specializes
+ // another, but the item in the parent `impl` is not marked
+ // `default` and so it cannot be specialized.
+}
+```
+
+Specialization only allows you to override `default` functions in
+implementations.
+
+To fix this error, you need to mark all the parent implementations as default.
+Example:
+
+```
+#![feature(specialization)]
+
+trait SpaceLlama {
+ fn fly(&self);
+}
+
+// applies to all T
+impl<T> SpaceLlama for T {
+ default fn fly(&self) {} // This is a parent implementation.
+}
+
+// applies to all `Clone` T; overrides the previous impl
+impl<T: Clone> SpaceLlama for T {
+ default fn fly(&self) {} // This is a parent implementation but was
+ // previously not a default one, causing the error
+}
+
+// applies to i32, overrides the previous two impls
+impl SpaceLlama for i32 {
+ fn fly(&self) {} // And now that's ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0521.md b/compiler/rustc_error_codes/src/error_codes/E0521.md
new file mode 100644
index 000000000..fedf6365f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0521.md
@@ -0,0 +1,28 @@
+Borrowed data escapes outside of closure.
+
+Erroneous code example:
+
+```compile_fail,E0521
+let mut list: Vec<&str> = Vec::new();
+
+let _add = |el: &str| {
+ list.push(el); // error: `el` escapes the closure body here
+};
+```
+
+A type annotation of a closure parameter implies a new lifetime declaration.
+Consider to drop it, the compiler is reliably able to infer them.
+
+```
+let mut list: Vec<&str> = Vec::new();
+
+let _add = |el| {
+ list.push(el);
+};
+```
+
+See the [Closure type inference and annotation][closure-infere-annotation] and
+[Lifetime elision][lifetime-elision] sections of the Book for more details.
+
+[closure-infere-annotation]: https://doc.rust-lang.org/book/ch13-01-closures.html#closure-type-inference-and-annotation
+[lifetime-elision]: https://doc.rust-lang.org/reference/lifetime-elision.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0522.md b/compiler/rustc_error_codes/src/error_codes/E0522.md
new file mode 100644
index 000000000..83272314a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0522.md
@@ -0,0 +1,17 @@
+The lang attribute was used in an invalid context.
+
+Erroneous code example:
+
+```compile_fail,E0522
+#![feature(lang_items)]
+
+#[lang = "cookie"]
+fn cookie() -> ! { // error: definition of an unknown language item: `cookie`
+ loop {}
+}
+```
+
+The lang attribute is intended for marking special items that are built-in to
+Rust itself. This includes special traits (like `Copy` and `Sized`) that affect
+how the compiler behaves, as well as special functions that may be automatically
+invoked (such as the handler for out-of-bounds accesses when indexing a slice).
diff --git a/compiler/rustc_error_codes/src/error_codes/E0524.md b/compiler/rustc_error_codes/src/error_codes/E0524.md
new file mode 100644
index 000000000..bab241b5a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0524.md
@@ -0,0 +1,60 @@
+A variable which requires unique access is being used in more than one closure
+at the same time.
+
+Erroneous code example:
+
+```compile_fail,E0524
+fn set(x: &mut isize) {
+ *x += 4;
+}
+
+fn dragoooon(x: &mut isize) {
+ let mut c1 = || set(x);
+ let mut c2 = || set(x); // error!
+
+ c2();
+ c1();
+}
+```
+
+To solve this issue, multiple solutions are available. First, is it required
+for this variable to be used in more than one closure at a time? If it is the
+case, use reference counted types such as `Rc` (or `Arc` if it runs
+concurrently):
+
+```
+use std::rc::Rc;
+use std::cell::RefCell;
+
+fn set(x: &mut isize) {
+ *x += 4;
+}
+
+fn dragoooon(x: &mut isize) {
+ let x = Rc::new(RefCell::new(x));
+ let y = Rc::clone(&x);
+ let mut c1 = || { let mut x2 = x.borrow_mut(); set(&mut x2); };
+ let mut c2 = || { let mut x2 = y.borrow_mut(); set(&mut x2); }; // ok!
+
+ c2();
+ c1();
+}
+```
+
+If not, just run closures one at a time:
+
+```
+fn set(x: &mut isize) {
+ *x += 4;
+}
+
+fn dragoooon(x: &mut isize) {
+ { // This block isn't necessary since non-lexical lifetimes, it's just to
+ // make it more clear.
+ let mut c1 = || set(&mut *x);
+ c1();
+ } // `c1` has been dropped here so we're free to use `x` again!
+ let mut c2 = || set(&mut *x);
+ c2();
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0525.md b/compiler/rustc_error_codes/src/error_codes/E0525.md
new file mode 100644
index 000000000..a769440ca
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0525.md
@@ -0,0 +1,42 @@
+A closure was used but didn't implement the expected trait.
+
+Erroneous code example:
+
+```compile_fail,E0525
+struct X;
+
+fn foo<T>(_: T) {}
+fn bar<T: Fn(u32)>(_: T) {}
+
+fn main() {
+ let x = X;
+ let closure = |_| foo(x); // error: expected a closure that implements
+ // the `Fn` trait, but this closure only
+ // implements `FnOnce`
+ bar(closure);
+}
+```
+
+In the example above, `closure` is an `FnOnce` closure whereas the `bar`
+function expected an `Fn` closure. In this case, it's simple to fix the issue,
+you just have to implement `Copy` and `Clone` traits on `struct X` and it'll
+be ok:
+
+```
+#[derive(Clone, Copy)] // We implement `Clone` and `Copy` traits.
+struct X;
+
+fn foo<T>(_: T) {}
+fn bar<T: Fn(u32)>(_: T) {}
+
+fn main() {
+ let x = X;
+ let closure = |_| foo(x);
+ bar(closure); // ok!
+}
+```
+
+To better understand how these work in Rust, read the [Closures][closures]
+chapter of the Book.
+
+[closures]: https://doc.rust-lang.org/book/ch13-01-closures.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0527.md b/compiler/rustc_error_codes/src/error_codes/E0527.md
new file mode 100644
index 000000000..97ea31269
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0527.md
@@ -0,0 +1,26 @@
+The number of elements in an array or slice pattern differed from the number of
+elements in the array being matched.
+
+Example of erroneous code:
+
+```compile_fail,E0527
+let r = &[1, 2, 3, 4];
+match r {
+ &[a, b] => { // error: pattern requires 2 elements but array
+ // has 4
+ println!("a={}, b={}", a, b);
+ }
+}
+```
+
+Ensure that the pattern is consistent with the size of the matched
+array. Additional elements can be matched with `..`:
+
+```
+let r = &[1, 2, 3, 4];
+match r {
+ &[a, b, ..] => { // ok!
+ println!("a={}, b={}", a, b);
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0528.md b/compiler/rustc_error_codes/src/error_codes/E0528.md
new file mode 100644
index 000000000..54c2c4d4e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0528.md
@@ -0,0 +1,27 @@
+An array or slice pattern required more elements than were present in the
+matched array.
+
+Example of erroneous code:
+
+```compile_fail,E0528
+let r = &[1, 2];
+match r {
+ &[a, b, c, rest @ ..] => { // error: pattern requires at least 3
+ // elements but array has 2
+ println!("a={}, b={}, c={} rest={:?}", a, b, c, rest);
+ }
+}
+```
+
+Ensure that the matched array has at least as many elements as the pattern
+requires. You can match an arbitrary number of remaining elements with `..`:
+
+```
+let r = &[1, 2, 3, 4, 5];
+match r {
+ &[a, b, c, rest @ ..] => { // ok!
+ // prints `a=1, b=2, c=3 rest=[4, 5]`
+ println!("a={}, b={}, c={} rest={:?}", a, b, c, rest);
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0529.md b/compiler/rustc_error_codes/src/error_codes/E0529.md
new file mode 100644
index 000000000..013f438ba
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0529.md
@@ -0,0 +1,24 @@
+An array or slice pattern was matched against some other type.
+
+Example of erroneous code:
+
+```compile_fail,E0529
+let r: f32 = 1.0;
+match r {
+ [a, b] => { // error: expected an array or slice, found `f32`
+ println!("a={}, b={}", a, b);
+ }
+}
+```
+
+Ensure that the pattern and the expression being matched on are of consistent
+types:
+
+```
+let r = [1.0, 2.0];
+match r {
+ [a, b] => { // ok!
+ println!("a={}, b={}", a, b);
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0530.md b/compiler/rustc_error_codes/src/error_codes/E0530.md
new file mode 100644
index 000000000..60fa711cb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0530.md
@@ -0,0 +1,57 @@
+A binding shadowed something it shouldn't.
+
+A match arm or a variable has a name that is already used by
+something else, e.g.
+
+* struct name
+* enum variant
+* static
+* associated constant
+
+This error may also happen when an enum variant *with fields* is used
+in a pattern, but without its fields.
+
+```compile_fail
+enum Enum {
+ WithField(i32)
+}
+
+use Enum::*;
+match WithField(1) {
+ WithField => {} // error: missing (_)
+}
+```
+
+Match bindings cannot shadow statics:
+
+```compile_fail,E0530
+static TEST: i32 = 0;
+
+let r = 123;
+match r {
+ TEST => {} // error: name of a static
+}
+```
+
+Fixed examples:
+
+```
+static TEST: i32 = 0;
+
+let r = 123;
+match r {
+ some_value => {} // ok!
+}
+```
+
+or
+
+```
+const TEST: i32 = 0; // const, not static
+
+let r = 123;
+match r {
+ TEST => {} // const is ok!
+ other_values => {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0531.md b/compiler/rustc_error_codes/src/error_codes/E0531.md
new file mode 100644
index 000000000..2814046fb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0531.md
@@ -0,0 +1,42 @@
+An unknown tuple struct/variant has been used.
+
+Erroneous code example:
+
+```compile_fail,E0531
+let Type(x) = Type(12); // error!
+match Bar(12) {
+ Bar(x) => {} // error!
+ _ => {}
+}
+```
+
+In most cases, it's either a forgotten import or a typo. However, let's look at
+how you can have such a type:
+
+```edition2018
+struct Type(u32); // this is a tuple struct
+
+enum Foo {
+ Bar(u32), // this is a tuple variant
+}
+
+use Foo::*; // To use Foo's variant directly, we need to import them in
+ // the scope.
+```
+
+Either way, it should work fine with our previous code:
+
+```edition2018
+struct Type(u32);
+
+enum Foo {
+ Bar(u32),
+}
+use Foo::*;
+
+let Type(x) = Type(12); // ok!
+match Type(12) {
+ Type(x) => {} // ok!
+ _ => {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0532.md b/compiler/rustc_error_codes/src/error_codes/E0532.md
new file mode 100644
index 000000000..6fb315a37
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0532.md
@@ -0,0 +1,38 @@
+Pattern arm did not match expected kind.
+
+Erroneous code example:
+
+```compile_fail,E0532
+enum State {
+ Succeeded,
+ Failed(String),
+}
+
+fn print_on_failure(state: &State) {
+ match *state {
+ // error: expected unit struct, unit variant or constant, found tuple
+ // variant `State::Failed`
+ State::Failed => println!("Failed"),
+ _ => ()
+ }
+}
+```
+
+To fix this error, ensure the match arm kind is the same as the expression
+matched.
+
+Fixed example:
+
+```
+enum State {
+ Succeeded,
+ Failed(String),
+}
+
+fn print_on_failure(state: &State) {
+ match *state {
+ State::Failed(ref msg) => println!("Failed with {}", msg),
+ _ => ()
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0533.md b/compiler/rustc_error_codes/src/error_codes/E0533.md
new file mode 100644
index 000000000..279d728ca
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0533.md
@@ -0,0 +1,34 @@
+An item which isn't a unit struct, a variant, nor a constant has been used as a
+match pattern.
+
+Erroneous code example:
+
+```compile_fail,E0533
+struct Tortoise;
+
+impl Tortoise {
+ fn turtle(&self) -> u32 { 0 }
+}
+
+match 0u32 {
+ Tortoise::turtle => {} // Error!
+ _ => {}
+}
+if let Tortoise::turtle = 0u32 {} // Same error!
+```
+
+If you want to match against a value returned by a method, you need to bind the
+value first:
+
+```
+struct Tortoise;
+
+impl Tortoise {
+ fn turtle(&self) -> u32 { 0 }
+}
+
+match 0u32 {
+ x if x == Tortoise.turtle() => {} // Bound into `x` then we compare it!
+ _ => {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0534.md b/compiler/rustc_error_codes/src/error_codes/E0534.md
new file mode 100644
index 000000000..1ca9411b8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0534.md
@@ -0,0 +1,37 @@
+The `inline` attribute was malformed.
+
+Erroneous code example:
+
+```compile_fail,E0534
+#[inline()] // error: expected one argument
+pub fn something() {}
+
+fn main() {}
+```
+
+The parenthesized `inline` attribute requires the parameter to be specified:
+
+```
+#[inline(always)]
+fn something() {}
+```
+
+or:
+
+```
+#[inline(never)]
+fn something() {}
+```
+
+Alternatively, a paren-less version of the attribute may be used to hint the
+compiler about inlining opportunity:
+
+```
+#[inline]
+fn something() {}
+```
+
+For more information see the [`inline` attribute][inline-attribute] section
+of the Reference.
+
+[inline-attribute]: https://doc.rust-lang.org/reference/attributes/codegen.html#the-inline-attribute
diff --git a/compiler/rustc_error_codes/src/error_codes/E0535.md b/compiler/rustc_error_codes/src/error_codes/E0535.md
new file mode 100644
index 000000000..0cf3118b0
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0535.md
@@ -0,0 +1,30 @@
+An unknown argument was given to the `inline` attribute.
+
+Erroneous code example:
+
+```compile_fail,E0535
+#[inline(unknown)] // error: invalid argument
+pub fn something() {}
+
+fn main() {}
+```
+
+The `inline` attribute only supports two arguments:
+
+ * always
+ * never
+
+All other arguments given to the `inline` attribute will return this error.
+Example:
+
+```
+#[inline(never)] // ok!
+pub fn something() {}
+
+fn main() {}
+```
+
+For more information see the [`inline` Attribute][inline-attribute] section
+of the Reference.
+
+[inline-attribute]: https://doc.rust-lang.org/reference/attributes/codegen.html#the-inline-attribute
diff --git a/compiler/rustc_error_codes/src/error_codes/E0536.md b/compiler/rustc_error_codes/src/error_codes/E0536.md
new file mode 100644
index 000000000..c081a3d9c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0536.md
@@ -0,0 +1,24 @@
+The `not` cfg-predicate was malformed.
+
+Erroneous code example:
+
+```compile_fail,E0536
+#[cfg(not())] // error: expected 1 cfg-pattern
+pub fn something() {}
+
+pub fn main() {}
+```
+
+The `not` predicate expects one cfg-pattern. Example:
+
+```
+#[cfg(not(target_os = "linux"))] // ok!
+pub fn something() {}
+
+pub fn main() {}
+```
+
+For more information about the `cfg` attribute, read the section on
+[Conditional Compilation][conditional-compilation] in the Reference.
+
+[conditional-compilation]: https://doc.rust-lang.org/reference/conditional-compilation.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0537.md b/compiler/rustc_error_codes/src/error_codes/E0537.md
new file mode 100644
index 000000000..123efd4f5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0537.md
@@ -0,0 +1,30 @@
+An unknown predicate was used inside the `cfg` attribute.
+
+Erroneous code example:
+
+```compile_fail,E0537
+#[cfg(unknown())] // error: invalid predicate `unknown`
+pub fn something() {}
+
+pub fn main() {}
+```
+
+The `cfg` attribute supports only three kinds of predicates:
+
+ * any
+ * all
+ * not
+
+Example:
+
+```
+#[cfg(not(target_os = "linux"))] // ok!
+pub fn something() {}
+
+pub fn main() {}
+```
+
+For more information about the `cfg` attribute, read the section on
+[Conditional Compilation][conditional-compilation] in the Reference.
+
+[conditional-compilation]: https://doc.rust-lang.org/reference/conditional-compilation.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0538.md b/compiler/rustc_error_codes/src/error_codes/E0538.md
new file mode 100644
index 000000000..5858771ce
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0538.md
@@ -0,0 +1,27 @@
+Attribute contains same meta item more than once.
+
+Erroneous code example:
+
+```compile_fail,E0538
+#[deprecated(
+ since="1.0.0",
+ note="First deprecation note.",
+ note="Second deprecation note." // error: multiple same meta item
+)]
+fn deprecated_function() {}
+```
+
+Meta items are the key-value pairs inside of an attribute. Each key may only be
+used once in each attribute.
+
+To fix the problem, remove all but one of the meta items with the same key.
+
+Example:
+
+```
+#[deprecated(
+ since="1.0.0",
+ note="First deprecation note."
+)]
+fn deprecated_function() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0539.md b/compiler/rustc_error_codes/src/error_codes/E0539.md
new file mode 100644
index 000000000..c53d60a5f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0539.md
@@ -0,0 +1,48 @@
+An invalid meta-item was used inside an attribute.
+
+Erroneous code example:
+
+```compile_fail,E0539
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[deprecated(note)] // error!
+#[unstable(feature = "deprecated_fn", issue = "123")]
+fn deprecated() {}
+
+#[unstable(feature = "unstable_struct", issue)] // error!
+struct Unstable;
+
+#[rustc_const_unstable(feature)] // error!
+const fn unstable_fn() {}
+
+#[stable(feature = "stable_struct", since)] // error!
+struct Stable;
+
+#[rustc_const_stable(feature)] // error!
+const fn stable_fn() {}
+```
+
+Meta items are the key-value pairs inside of an attribute.
+To fix these issues you need to give required key-value pairs.
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[deprecated(since = "1.39.0", note = "reason")] // ok!
+#[unstable(feature = "deprecated_fn", issue = "123")]
+fn deprecated() {}
+
+#[unstable(feature = "unstable_struct", issue = "123")] // ok!
+struct Unstable;
+
+#[rustc_const_unstable(feature = "unstable_fn", issue = "124")] // ok!
+const fn unstable_fn() {}
+
+#[stable(feature = "stable_struct", since = "1.39.0")] // ok!
+struct Stable;
+
+#[rustc_const_stable(feature = "stable_fn", since = "1.39.0")] // ok!
+const fn stable_fn() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0541.md b/compiler/rustc_error_codes/src/error_codes/E0541.md
new file mode 100644
index 000000000..96334088f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0541.md
@@ -0,0 +1,29 @@
+An unknown meta item was used.
+
+Erroneous code example:
+
+```compile_fail,E0541
+#[deprecated(
+ since="1.0.0",
+ // error: unknown meta item
+ reason="Example invalid meta item. Should be 'note'")
+]
+fn deprecated_function() {}
+```
+
+Meta items are the key-value pairs inside of an attribute. The keys provided
+must be one of the valid keys for the specified attribute.
+
+To fix the problem, either remove the unknown meta item, or rename it if you
+provided the wrong name.
+
+In the erroneous code example above, the wrong name was provided, so changing
+to a correct one it will fix the error. Example:
+
+```
+#[deprecated(
+ since="1.0.0",
+ note="This is a valid meta item for the deprecated attribute."
+)]
+fn deprecated_function() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0542.md b/compiler/rustc_error_codes/src/error_codes/E0542.md
new file mode 100644
index 000000000..c69e57417
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0542.md
@@ -0,0 +1,47 @@
+The `since` value is missing in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0542
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(feature = "_stable_fn")] // invalid
+fn _stable_fn() {}
+
+#[rustc_const_stable(feature = "_stable_const_fn")] // invalid
+const fn _stable_const_fn() {}
+
+#[stable(feature = "_deprecated_fn", since = "0.1.0")]
+#[deprecated(
+ note = "explanation for deprecation"
+)] // invalid
+fn _deprecated_fn() {}
+```
+
+To fix this issue, you need to provide the `since` field. Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(feature = "_stable_fn", since = "1.0.0")] // ok!
+fn _stable_fn() {}
+
+#[rustc_const_stable(feature = "_stable_const_fn", since = "1.0.0")] // ok!
+const fn _stable_const_fn() {}
+
+#[stable(feature = "_deprecated_fn", since = "0.1.0")]
+#[deprecated(
+ since = "1.0.0",
+ note = "explanation for deprecation"
+)] // ok!
+fn _deprecated_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0543.md b/compiler/rustc_error_codes/src/error_codes/E0543.md
new file mode 100644
index 000000000..d0b2e2f7a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0543.md
@@ -0,0 +1,35 @@
+The `note` value is missing in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0543
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(since = "0.1.0", feature = "_deprecated_fn")]
+#[deprecated(
+ since = "1.0.0"
+)] // invalid
+fn _deprecated_fn() {}
+```
+
+To fix this issue, you need to provide the `note` field. Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(since = "0.1.0", feature = "_deprecated_fn")]
+#[deprecated(
+ since = "1.0.0",
+ note = "explanation for deprecation"
+)] // ok!
+fn _deprecated_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0544.md b/compiler/rustc_error_codes/src/error_codes/E0544.md
new file mode 100644
index 000000000..2227e2a06
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0544.md
@@ -0,0 +1,29 @@
+Multiple stability attributes were declared on the same item.
+
+Erroneous code example:
+
+```compile_fail,E0544
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "rust1")]
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[stable(feature = "test", since = "2.0.0")] // invalid
+fn foo() {}
+```
+
+To fix this issue, ensure that each item has at most one stability attribute.
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "rust1")]
+
+#[stable(feature = "test", since = "2.0.0")] // ok!
+fn foo() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0545.md b/compiler/rustc_error_codes/src/error_codes/E0545.md
new file mode 100644
index 000000000..7aba084f4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0545.md
@@ -0,0 +1,35 @@
+The `issue` value is incorrect in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0545
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "_unstable_fn", issue = "0")] // invalid
+fn _unstable_fn() {}
+
+#[rustc_const_unstable(feature = "_unstable_const_fn", issue = "0")] // invalid
+const fn _unstable_const_fn() {}
+```
+
+To fix this issue, you need to provide a correct value in the `issue` field.
+Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "_unstable_fn", issue = "none")] // ok!
+fn _unstable_fn() {}
+
+#[rustc_const_unstable(feature = "_unstable_const_fn", issue = "1")] // ok!
+const fn _unstable_const_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0546.md b/compiler/rustc_error_codes/src/error_codes/E0546.md
new file mode 100644
index 000000000..a33dcb7a9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0546.md
@@ -0,0 +1,34 @@
+The `feature` value is missing in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0546
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(issue = "none")] // invalid
+fn unstable_fn() {}
+
+#[stable(since = "1.0.0")] // invalid
+fn stable_fn() {}
+```
+
+To fix this issue, you need to provide the `feature` field. Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "unstable_fn", issue = "none")] // ok!
+fn unstable_fn() {}
+
+#[stable(feature = "stable_fn", since = "1.0.0")] // ok!
+fn stable_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0547.md b/compiler/rustc_error_codes/src/error_codes/E0547.md
new file mode 100644
index 000000000..4950325df
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0547.md
@@ -0,0 +1,37 @@
+The `issue` value is missing in a stability attribute.
+
+Erroneous code example:
+
+```compile_fail,E0547
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "_unstable_fn")] // invalid
+fn _unstable_fn() {}
+
+#[rustc_const_unstable(feature = "_unstable_const_fn")] // invalid
+const fn _unstable_const_fn() {}
+```
+
+To fix this issue, you need to provide the `issue` field. Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[unstable(feature = "_unstable_fn", issue = "none")] // ok!
+fn _unstable_fn() {}
+
+#[rustc_const_unstable(
+ feature = "_unstable_const_fn",
+ issue = "none"
+)] // ok!
+const fn _unstable_const_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0549.md b/compiler/rustc_error_codes/src/error_codes/E0549.md
new file mode 100644
index 000000000..70e458a98
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0549.md
@@ -0,0 +1,37 @@
+A `deprecated` attribute wasn't paired with a `stable`/`unstable` attribute with
+`#![feature(staged_api)]` enabled.
+
+Erroneous code example:
+
+```compile_fail,E0549
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[deprecated(
+ since = "1.0.1",
+ note = "explanation for deprecation"
+)] // invalid
+fn _deprecated_fn() {}
+```
+
+To fix this issue, you need to add also an attribute `stable` or `unstable`.
+Example:
+
+```
+#![feature(staged_api)]
+#![stable(since = "1.0.0", feature = "test")]
+
+#[stable(since = "1.0.0", feature = "test")]
+#[deprecated(
+ since = "1.0.1",
+ note = "explanation for deprecation"
+)] // ok!
+fn _deprecated_fn() {}
+```
+
+See the [How Rust is Made and “Nightly Rust”][how-rust-made-nightly] appendix
+of the Book and the [Stability attributes][stability-attributes] section of the
+Rustc Dev Guide for more details.
+
+[how-rust-made-nightly]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+[stability-attributes]: https://rustc-dev-guide.rust-lang.org/stability.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0550.md b/compiler/rustc_error_codes/src/error_codes/E0550.md
new file mode 100644
index 000000000..6aac5c969
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0550.md
@@ -0,0 +1,18 @@
+#### Note: this error code is no longer emitted by the compiler
+
+More than one `deprecated` attribute has been put on an item.
+
+Erroneous code example:
+
+```compile_fail
+#[deprecated(note = "because why not?")]
+#[deprecated(note = "right?")] // error!
+fn the_banished() {}
+```
+
+The `deprecated` attribute can only be present **once** on an item.
+
+```
+#[deprecated(note = "because why not, right?")]
+fn the_banished() {} // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0551.md b/compiler/rustc_error_codes/src/error_codes/E0551.md
new file mode 100644
index 000000000..53db559a4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0551.md
@@ -0,0 +1,16 @@
+An invalid meta-item was used inside an attribute.
+
+Erroneous code example:
+
+```compile_fail,E0551
+#[deprecated(note)] // error!
+fn i_am_deprecated() {}
+```
+
+Meta items are the key-value pairs inside of an attribute. To fix this issue,
+you need to give a value to the `note` key. Example:
+
+```
+#[deprecated(note = "because")] // ok!
+fn i_am_deprecated() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0552.md b/compiler/rustc_error_codes/src/error_codes/E0552.md
new file mode 100644
index 000000000..0fbc861fb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0552.md
@@ -0,0 +1,27 @@
+A unrecognized representation attribute was used.
+
+Erroneous code example:
+
+```compile_fail,E0552
+#[repr(D)] // error: unrecognized representation hint
+struct MyStruct {
+ my_field: usize
+}
+```
+
+You can use a `repr` attribute to tell the compiler how you want a struct or
+enum to be laid out in memory.
+
+Make sure you're using one of the supported options:
+
+```
+#[repr(C)] // ok!
+struct MyStruct {
+ my_field: usize
+}
+```
+
+For more information about specifying representations, see the ["Alternative
+Representations" section] of the Rustonomicon.
+
+["Alternative Representations" section]: https://doc.rust-lang.org/nomicon/other-reprs.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0554.md b/compiler/rustc_error_codes/src/error_codes/E0554.md
new file mode 100644
index 000000000..3178bf219
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0554.md
@@ -0,0 +1,12 @@
+Feature attributes are only allowed on the nightly release channel. Stable or
+beta compilers will not comply.
+
+Erroneous code example:
+
+```ignore (depends on release channel)
+#![feature(lang_items)] // error: `#![feature]` may not be used on the
+ // stable release channel
+```
+
+If you need the feature, make sure to use a nightly release of the compiler
+(but be warned that the feature may be removed or altered in the future).
diff --git a/compiler/rustc_error_codes/src/error_codes/E0556.md b/compiler/rustc_error_codes/src/error_codes/E0556.md
new file mode 100644
index 000000000..2aac8240d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0556.md
@@ -0,0 +1,16 @@
+The `feature` attribute was badly formed.
+
+Erroneous code example:
+
+```compile_fail,E0556
+#![feature(foo_bar_baz, foo(bar), foo = "baz", foo)] // error!
+#![feature] // error!
+#![feature = "foo"] // error!
+```
+
+The `feature` attribute only accept a "feature flag" and can only be used on
+nightly. Example:
+
+```ignore (only works in nightly)
+#![feature(flag)]
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0557.md b/compiler/rustc_error_codes/src/error_codes/E0557.md
new file mode 100644
index 000000000..f330efe59
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0557.md
@@ -0,0 +1,9 @@
+A feature attribute named a feature that has been removed.
+
+Erroneous code example:
+
+```compile_fail,E0557
+#![feature(managed_boxes)] // error: feature has been removed
+```
+
+Delete the offending feature attribute.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0559.md b/compiler/rustc_error_codes/src/error_codes/E0559.md
new file mode 100644
index 000000000..b9f7b6508
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0559.md
@@ -0,0 +1,22 @@
+An unknown field was specified into an enum's structure variant.
+
+Erroneous code example:
+
+```compile_fail,E0559
+enum Field {
+ Fool { x: u32 },
+}
+
+let s = Field::Fool { joke: 0 };
+// error: struct variant `Field::Fool` has no field named `joke`
+```
+
+Verify you didn't misspell the field's name or that the field exists. Example:
+
+```
+enum Field {
+ Fool { joke: u32 },
+}
+
+let s = Field::Fool { joke: 0 }; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0560.md b/compiler/rustc_error_codes/src/error_codes/E0560.md
new file mode 100644
index 000000000..7185bf38c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0560.md
@@ -0,0 +1,23 @@
+An unknown field was specified into a structure.
+
+Erroneous code example:
+
+```compile_fail,E0560
+struct Simba {
+ mother: u32,
+}
+
+let s = Simba { mother: 1, father: 0 };
+// error: structure `Simba` has no field named `father`
+```
+
+Verify you didn't misspell the field's name or that the field exists. Example:
+
+```
+struct Simba {
+ mother: u32,
+ father: u32,
+}
+
+let s = Simba { mother: 1, father: 0 }; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0561.md b/compiler/rustc_error_codes/src/error_codes/E0561.md
new file mode 100644
index 000000000..529001890
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0561.md
@@ -0,0 +1,25 @@
+A non-ident or non-wildcard pattern has been used as a parameter of a function
+pointer type.
+
+Erroneous code example:
+
+```compile_fail,E0561
+type A1 = fn(mut param: u8); // error!
+type A2 = fn(&param: u32); // error!
+```
+
+When using an alias over a function type, you cannot e.g. denote a parameter as
+being mutable.
+
+To fix the issue, remove patterns (`_` is allowed though). Example:
+
+```
+type A1 = fn(param: u8); // ok!
+type A2 = fn(_: u32); // ok!
+```
+
+You can also omit the parameter name:
+
+```
+type A3 = fn(i16); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0562.md b/compiler/rustc_error_codes/src/error_codes/E0562.md
new file mode 100644
index 000000000..95f038df5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0562.md
@@ -0,0 +1,33 @@
+Abstract return types (written `impl Trait` for some trait `Trait`) are only
+allowed as function and inherent impl return types.
+
+Erroneous code example:
+
+```compile_fail,E0562
+fn main() {
+ let count_to_ten: impl Iterator<Item=usize> = 0..10;
+ // error: `impl Trait` not allowed outside of function and inherent method
+ // return types
+ for i in count_to_ten {
+ println!("{}", i);
+ }
+}
+```
+
+Make sure `impl Trait` only appears in return-type position.
+
+```
+fn count_to_n(n: usize) -> impl Iterator<Item=usize> {
+ 0..n
+}
+
+fn main() {
+ for i in count_to_n(10) { // ok!
+ println!("{}", i);
+ }
+}
+```
+
+See [RFC 1522] for more details.
+
+[RFC 1522]: https://github.com/rust-lang/rfcs/blob/master/text/1522-conservative-impl-trait.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0565.md b/compiler/rustc_error_codes/src/error_codes/E0565.md
new file mode 100644
index 000000000..d5bba941c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0565.md
@@ -0,0 +1,21 @@
+A literal was used in a built-in attribute that doesn't support literals.
+
+Erroneous code example:
+
+```compile_fail,E0565
+#[repr("C")] // error: meta item in `repr` must be an identifier
+struct Repr {}
+
+fn main() {}
+```
+
+Literals in attributes are new and largely unsupported in built-in attributes.
+Work to support literals where appropriate is ongoing. Try using an unquoted
+name instead:
+
+```
+#[repr(C)] // ok!
+struct Repr {}
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0566.md b/compiler/rustc_error_codes/src/error_codes/E0566.md
new file mode 100644
index 000000000..3dcd801a2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0566.md
@@ -0,0 +1,18 @@
+Conflicting representation hints have been used on a same item.
+
+Erroneous code example:
+
+```compile_fail,E0566
+#[repr(u32, u64)]
+enum Repr { A }
+```
+
+In most cases (if not all), using just one representation hint is more than
+enough. If you want to have a representation hint depending on the current
+architecture, use `cfg_attr`. Example:
+
+```
+#[cfg_attr(linux, repr(u32))]
+#[cfg_attr(not(linux), repr(u64))]
+enum Repr { A }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0567.md b/compiler/rustc_error_codes/src/error_codes/E0567.md
new file mode 100644
index 000000000..bc13ee4c0
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0567.md
@@ -0,0 +1,23 @@
+Generics have been used on an auto trait.
+
+Erroneous code example:
+
+```compile_fail,E0567
+#![feature(auto_traits)]
+
+auto trait Generic<T> {} // error!
+# fn main() {}
+```
+
+Since an auto trait is implemented on all existing types, the
+compiler would not be able to infer the types of the trait's generic
+parameters.
+
+To fix this issue, just remove the generics:
+
+```
+#![feature(auto_traits)]
+
+auto trait Generic {} // ok!
+# fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0568.md b/compiler/rustc_error_codes/src/error_codes/E0568.md
new file mode 100644
index 000000000..17b3f5e31
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0568.md
@@ -0,0 +1,26 @@
+A super trait has been added to an auto trait.
+
+Erroneous code example:
+
+```compile_fail,E0568
+#![feature(auto_traits)]
+
+auto trait Bound : Copy {} // error!
+
+fn main() {}
+```
+
+Since an auto trait is implemented on all existing types, adding a super trait
+would filter out a lot of those types. In the current example, almost none of
+all the existing types could implement `Bound` because very few of them have the
+`Copy` trait.
+
+To fix this issue, just remove the super trait:
+
+```
+#![feature(auto_traits)]
+
+auto trait Bound {} // ok!
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0569.md b/compiler/rustc_error_codes/src/error_codes/E0569.md
new file mode 100644
index 000000000..2ca2b57ec
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0569.md
@@ -0,0 +1,18 @@
+If an impl has a generic parameter with the `#[may_dangle]` attribute, then
+that impl must be declared as an `unsafe impl`.
+
+Erroneous code example:
+
+```compile_fail,E0569
+#![feature(dropck_eyepatch)]
+
+struct Foo<X>(X);
+impl<#[may_dangle] X> Drop for Foo<X> {
+ fn drop(&mut self) { }
+}
+```
+
+In this example, we are asserting that the destructor for `Foo` will not
+access any data of type `X`, and require this assertion to be true for
+overall safety in our program. The compiler does not currently attempt to
+verify this assertion; therefore we must tag this `impl` as unsafe.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0570.md b/compiler/rustc_error_codes/src/error_codes/E0570.md
new file mode 100644
index 000000000..355e71ffb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0570.md
@@ -0,0 +1,7 @@
+The requested ABI is unsupported by the current target.
+
+The rust compiler maintains for each target a list of unsupported ABIs on
+that target. If an ABI is present in such a list this usually means that the
+target / ABI combination is currently unsupported by llvm.
+
+If necessary, you can circumvent this check using custom target specifications.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0571.md b/compiler/rustc_error_codes/src/error_codes/E0571.md
new file mode 100644
index 000000000..eadae05aa
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0571.md
@@ -0,0 +1,31 @@
+A `break` statement with an argument appeared in a non-`loop` loop.
+
+Example of erroneous code:
+
+```compile_fail,E0571
+# let mut i = 1;
+# fn satisfied(n: usize) -> bool { n % 23 == 0 }
+let result = while true {
+ if satisfied(i) {
+ break 2 * i; // error: `break` with value from a `while` loop
+ }
+ i += 1;
+};
+```
+
+The `break` statement can take an argument (which will be the value of the loop
+expression if the `break` statement is executed) in `loop` loops, but not
+`for`, `while`, or `while let` loops.
+
+Make sure `break value;` statements only occur in `loop` loops:
+
+```
+# let mut i = 1;
+# fn satisfied(n: usize) -> bool { n % 23 == 0 }
+let result = loop { // This is now a "loop" loop.
+ if satisfied(i) {
+ break 2 * i; // ok!
+ }
+ i += 1;
+};
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0572.md b/compiler/rustc_error_codes/src/error_codes/E0572.md
new file mode 100644
index 000000000..b2660650f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0572.md
@@ -0,0 +1,24 @@
+A return statement was found outside of a function body.
+
+Erroneous code example:
+
+```compile_fail,E0572
+const FOO: u32 = return 0; // error: return statement outside of function body
+
+fn main() {}
+```
+
+To fix this issue, just remove the return keyword or move the expression into a
+function. Example:
+
+```
+const FOO: u32 = 0;
+
+fn some_fn() -> u32 {
+ return FOO;
+}
+
+fn main() {
+ some_fn();
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0573.md b/compiler/rustc_error_codes/src/error_codes/E0573.md
new file mode 100644
index 000000000..6021ed0ef
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0573.md
@@ -0,0 +1,71 @@
+Something other than a type has been used when one was expected.
+
+Erroneous code examples:
+
+```compile_fail,E0573
+enum Dragon {
+ Born,
+}
+
+fn oblivion() -> Dragon::Born { // error!
+ Dragon::Born
+}
+
+const HOBBIT: u32 = 2;
+impl HOBBIT {} // error!
+
+enum Wizard {
+ Gandalf,
+ Saruman,
+}
+
+trait Isengard {
+ fn wizard(_: Wizard::Saruman); // error!
+}
+```
+
+In all these errors, a type was expected. For example, in the first error, if
+we want to return the `Born` variant from the `Dragon` enum, we must set the
+function to return the enum and not its variant:
+
+```
+enum Dragon {
+ Born,
+}
+
+fn oblivion() -> Dragon { // ok!
+ Dragon::Born
+}
+```
+
+In the second error, you can't implement something on an item, only on types.
+We would need to create a new type if we wanted to do something similar:
+
+```
+struct Hobbit(u32); // we create a new type
+
+const HOBBIT: Hobbit = Hobbit(2);
+impl Hobbit {} // ok!
+```
+
+In the third case, we tried to only expect one variant of the `Wizard` enum,
+which is not possible. To make this work, we need to using pattern matching
+over the `Wizard` enum:
+
+```
+enum Wizard {
+ Gandalf,
+ Saruman,
+}
+
+trait Isengard {
+ fn wizard(w: Wizard) { // ok!
+ match w {
+ Wizard::Saruman => {
+ // do something
+ }
+ _ => {} // ignore everything else
+ }
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0574.md b/compiler/rustc_error_codes/src/error_codes/E0574.md
new file mode 100644
index 000000000..4881f61d0
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0574.md
@@ -0,0 +1,47 @@
+Something other than a struct, variant or union has been used when one was
+expected.
+
+Erroneous code example:
+
+```compile_fail,E0574
+mod mordor {}
+
+let sauron = mordor { x: () }; // error!
+
+enum Jak {
+ Daxter { i: isize },
+}
+
+let eco = Jak::Daxter { i: 1 };
+match eco {
+ Jak { i } => {} // error!
+}
+```
+
+In all these errors, a type was expected. For example, in the first error,
+we tried to instantiate the `mordor` module, which is impossible. If you want
+to instantiate a type inside a module, you can do it as follow:
+
+```
+mod mordor {
+ pub struct TheRing {
+ pub x: usize,
+ }
+}
+
+let sauron = mordor::TheRing { x: 1 }; // ok!
+```
+
+In the second error, we tried to bind the `Jak` enum directly, which is not
+possible: you can only bind one of its variants. To do so:
+
+```
+enum Jak {
+ Daxter { i: isize },
+}
+
+let eco = Jak::Daxter { i: 1 };
+match eco {
+ Jak::Daxter { i } => {} // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0575.md b/compiler/rustc_error_codes/src/error_codes/E0575.md
new file mode 100644
index 000000000..903939a9a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0575.md
@@ -0,0 +1,50 @@
+Something other than a type or an associated type was given.
+
+Erroneous code example:
+
+```compile_fail,E0575
+enum Rick { Morty }
+
+let _: <u8 as Rick>::Morty; // error!
+
+trait Age {
+ type Empire;
+ fn Mythology() {}
+}
+
+impl Age for u8 {
+ type Empire = u16;
+}
+
+let _: <u8 as Age>::Mythology; // error!
+```
+
+In both cases, we're declaring a variable (called `_`) and we're giving it a
+type. However, `<u8 as Rick>::Morty` and `<u8 as Age>::Mythology` aren't types,
+therefore the compiler throws an error.
+
+`<u8 as Rick>::Morty` is an enum variant, you cannot use a variant as a type,
+you have to use the enum directly:
+
+```
+enum Rick { Morty }
+
+let _: Rick; // ok!
+```
+
+`<u8 as Age>::Mythology` is a trait method, which is definitely not a type.
+However, the `Age` trait provides an associated type `Empire` which can be
+used as a type:
+
+```
+trait Age {
+ type Empire;
+ fn Mythology() {}
+}
+
+impl Age for u8 {
+ type Empire = u16;
+}
+
+let _: <u8 as Age>::Empire; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0576.md b/compiler/rustc_error_codes/src/error_codes/E0576.md
new file mode 100644
index 000000000..8eead4e7e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0576.md
@@ -0,0 +1,22 @@
+An associated item wasn't found in the given type.
+
+Erroneous code example:
+
+```compile_fail,E0576
+trait Hello {
+ type Who;
+
+ fn hello() -> <Self as Hello>::You; // error!
+}
+```
+
+In this example, we tried to use the non-existent associated type `You` of the
+`Hello` trait. To fix this error, use an existing associated type:
+
+```
+trait Hello {
+ type Who;
+
+ fn hello() -> <Self as Hello>::Who; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0577.md b/compiler/rustc_error_codes/src/error_codes/E0577.md
new file mode 100644
index 000000000..eba2d3b14
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0577.md
@@ -0,0 +1,24 @@
+Something other than a module was found in visibility scope.
+
+Erroneous code example:
+
+```compile_fail,E0577,edition2018
+pub struct Sea;
+
+pub (in crate::Sea) struct Shark; // error!
+
+fn main() {}
+```
+
+`Sea` is not a module, therefore it is invalid to use it in a visibility path.
+To fix this error we need to ensure `sea` is a module.
+
+Please note that the visibility scope can only be applied on ancestors!
+
+```edition2018
+pub mod sea {
+ pub (in crate::sea) struct Shark; // ok!
+}
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0578.md b/compiler/rustc_error_codes/src/error_codes/E0578.md
new file mode 100644
index 000000000..fca897572
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0578.md
@@ -0,0 +1,25 @@
+A module cannot be found and therefore, the visibility cannot be determined.
+
+Erroneous code example:
+
+```compile_fail,E0578,edition2018
+foo!();
+
+pub (in ::Sea) struct Shark; // error!
+
+fn main() {}
+```
+
+Because of the call to the `foo` macro, the compiler guesses that the missing
+module could be inside it and fails because the macro definition cannot be
+found.
+
+To fix this error, please be sure that the module is in scope:
+
+```edition2018
+pub mod Sea {
+ pub (in crate::Sea) struct Shark;
+}
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0579.md b/compiler/rustc_error_codes/src/error_codes/E0579.md
new file mode 100644
index 000000000..f554242a3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0579.md
@@ -0,0 +1,21 @@
+A lower range wasn't less than the upper range.
+
+Erroneous code example:
+
+```compile_fail,E0579
+#![feature(exclusive_range_pattern)]
+
+fn main() {
+ match 5u32 {
+ // This range is ok, albeit pointless.
+ 1 .. 2 => {}
+ // This range is empty, and the compiler can tell.
+ 5 .. 5 => {} // error!
+ }
+}
+```
+
+When matching against an exclusive range, the compiler verifies that the range
+is non-empty. Exclusive range patterns include the start point but not the end
+point, so this is equivalent to requiring the start of the range to be less
+than the end of the range.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0580.md b/compiler/rustc_error_codes/src/error_codes/E0580.md
new file mode 100644
index 000000000..260575d5d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0580.md
@@ -0,0 +1,21 @@
+The `main` function was incorrectly declared.
+
+Erroneous code example:
+
+```compile_fail,E0580
+fn main(x: i32) { // error: main function has wrong type
+ println!("{}", x);
+}
+```
+
+The `main` function prototype should never take arguments.
+Example:
+
+```
+fn main() {
+ // your code
+}
+```
+
+If you want to get command-line arguments, use `std::env::args`. To exit with a
+specified exit code, use `std::process::exit`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0581.md b/compiler/rustc_error_codes/src/error_codes/E0581.md
new file mode 100644
index 000000000..02468dd94
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0581.md
@@ -0,0 +1,31 @@
+In a `fn` type, a lifetime appears only in the return type
+and not in the arguments types.
+
+Erroneous code example:
+
+```compile_fail,E0581
+fn main() {
+ // Here, `'a` appears only in the return type:
+ let x: for<'a> fn() -> &'a i32;
+}
+```
+
+The problem here is that the lifetime isn't constrained by any of the arguments,
+making it impossible to determine how long it's supposed to live.
+
+To fix this issue, either use the lifetime in the arguments, or use the
+`'static` lifetime. Example:
+
+```
+fn main() {
+ // Here, `'a` appears only in the return type:
+ let x: for<'a> fn(&'a i32) -> &'a i32;
+ let y: fn() -> &'static i32;
+}
+```
+
+Note: The examples above used to be (erroneously) accepted by the
+compiler, but this was since corrected. See [issue #33685] for more
+details.
+
+[issue #33685]: https://github.com/rust-lang/rust/issues/33685
diff --git a/compiler/rustc_error_codes/src/error_codes/E0582.md b/compiler/rustc_error_codes/src/error_codes/E0582.md
new file mode 100644
index 000000000..e50cc60ea
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0582.md
@@ -0,0 +1,34 @@
+A lifetime is only present in an associated-type binding, and not in the input
+types to the trait.
+
+Erroneous code example:
+
+```compile_fail,E0582
+fn bar<F>(t: F)
+ // No type can satisfy this requirement, since `'a` does not
+ // appear in any of the input types (here, `i32`):
+ where F: for<'a> Fn(i32) -> Option<&'a i32>
+{
+}
+
+fn main() { }
+```
+
+To fix this issue, either use the lifetime in the inputs, or use
+`'static`. Example:
+
+```
+fn bar<F, G>(t: F, u: G)
+ where F: for<'a> Fn(&'a i32) -> Option<&'a i32>,
+ G: Fn(i32) -> Option<&'static i32>,
+{
+}
+
+fn main() { }
+```
+
+Note: The examples above used to be (erroneously) accepted by the
+compiler, but this was since corrected. See [issue #33685] for more
+details.
+
+[issue #33685]: https://github.com/rust-lang/rust/issues/33685
diff --git a/compiler/rustc_error_codes/src/error_codes/E0583.md b/compiler/rustc_error_codes/src/error_codes/E0583.md
new file mode 100644
index 000000000..701900bb0
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0583.md
@@ -0,0 +1,14 @@
+A file wasn't found for an out-of-line module.
+
+Erroneous code example:
+
+```compile_fail,E0583
+mod file_that_doesnt_exist; // error: file not found for module
+
+fn main() {}
+```
+
+Please be sure that a file corresponding to the module exists. If you
+want to use a module named `file_that_doesnt_exist`, you need to have a file
+named `file_that_doesnt_exist.rs` or `file_that_doesnt_exist/mod.rs` in the
+same directory.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0584.md b/compiler/rustc_error_codes/src/error_codes/E0584.md
new file mode 100644
index 000000000..8b00655ee
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0584.md
@@ -0,0 +1,24 @@
+A doc comment that is not attached to anything has been encountered.
+
+Erroneous code example:
+
+```compile_fail,E0584
+trait Island {
+ fn lost();
+
+ /// I'm lost!
+}
+```
+
+A little reminder: a doc comment has to be placed before the item it's supposed
+to document. So if you want to document the `Island` trait, you need to put a
+doc comment before it, not inside it. Same goes for the `lost` method: the doc
+comment needs to be before it:
+
+```
+/// I'm THE island!
+trait Island {
+ /// I'm lost!
+ fn lost();
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0585.md b/compiler/rustc_error_codes/src/error_codes/E0585.md
new file mode 100644
index 000000000..de27cc19d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0585.md
@@ -0,0 +1,21 @@
+A documentation comment that doesn't document anything was found.
+
+Erroneous code example:
+
+```compile_fail,E0585
+fn main() {
+ // The following doc comment will fail:
+ /// This is a useless doc comment!
+}
+```
+
+Documentation comments need to be followed by items, including functions,
+types, modules, etc. Examples:
+
+```
+/// I'm documenting the following struct:
+struct Foo;
+
+/// I'm documenting the following function:
+fn foo() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0586.md b/compiler/rustc_error_codes/src/error_codes/E0586.md
new file mode 100644
index 000000000..bc6572eca
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0586.md
@@ -0,0 +1,29 @@
+An inclusive range was used with no end.
+
+Erroneous code example:
+
+```compile_fail,E0586
+fn main() {
+ let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1];
+ let x = &tmp[1..=]; // error: inclusive range was used with no end
+}
+```
+
+An inclusive range needs an end in order to *include* it. If you just need a
+start and no end, use a non-inclusive range (with `..`):
+
+```
+fn main() {
+ let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1];
+ let x = &tmp[1..]; // ok!
+}
+```
+
+Or put an end to your inclusive range:
+
+```
+fn main() {
+ let tmp = vec![0, 1, 2, 3, 4, 4, 3, 3, 2, 1];
+ let x = &tmp[1..=3]; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0587.md b/compiler/rustc_error_codes/src/error_codes/E0587.md
new file mode 100644
index 000000000..ee9031dc3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0587.md
@@ -0,0 +1,16 @@
+A type has both `packed` and `align` representation hints.
+
+Erroneous code example:
+
+```compile_fail,E0587
+#[repr(packed, align(8))] // error!
+struct Umbrella(i32);
+```
+
+You cannot use `packed` and `align` hints on a same type. If you want to pack a
+type to a given size, you should provide a size to packed:
+
+```
+#[repr(packed)] // ok!
+struct Umbrella(i32);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0588.md b/compiler/rustc_error_codes/src/error_codes/E0588.md
new file mode 100644
index 000000000..040c7a02e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0588.md
@@ -0,0 +1,24 @@
+A type with `packed` representation hint has a field with `align`
+representation hint.
+
+Erroneous code example:
+
+```compile_fail,E0588
+#[repr(align(16))]
+struct Aligned(i32);
+
+#[repr(packed)] // error!
+struct Packed(Aligned);
+```
+
+Just like you cannot have both `align` and `packed` representation hints on a
+same type, a `packed` type cannot contain another type with the `align`
+representation hint. However, you can do the opposite:
+
+```
+#[repr(packed)]
+struct Packed(i32);
+
+#[repr(align(16))] // ok!
+struct Aligned(Packed);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0589.md b/compiler/rustc_error_codes/src/error_codes/E0589.md
new file mode 100644
index 000000000..8a4f8d217
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0589.md
@@ -0,0 +1,11 @@
+The value of `N` that was specified for `repr(align(N))` was not a power
+of two, or was greater than 2^29.
+
+Erroneous code example:
+
+```compile_fail,E0589
+#[repr(align(15))] // error: invalid `repr(align)` attribute: not a power of two
+enum Foo {
+ Bar(u64),
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0590.md b/compiler/rustc_error_codes/src/error_codes/E0590.md
new file mode 100644
index 000000000..11005b833
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0590.md
@@ -0,0 +1,17 @@
+`break` or `continue` keywords were used in a condition of a `while` loop
+without a label.
+
+Erroneous code code:
+
+```compile_fail,E0590
+while break {}
+```
+
+`break` or `continue` must include a label when used in the condition of a
+`while` loop.
+
+To fix this, add a label specifying which loop is being broken out of:
+
+```
+'foo: while break 'foo {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0591.md b/compiler/rustc_error_codes/src/error_codes/E0591.md
new file mode 100644
index 000000000..f49805d9b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0591.md
@@ -0,0 +1,81 @@
+Per [RFC 401][rfc401], if you have a function declaration `foo`:
+
+```
+struct S;
+
+// For the purposes of this explanation, all of these
+// different kinds of `fn` declarations are equivalent:
+
+fn foo(x: S) { /* ... */ }
+# #[cfg(for_demonstration_only)]
+extern "C" {
+ fn foo(x: S);
+}
+# #[cfg(for_demonstration_only)]
+impl S {
+ fn foo(self) { /* ... */ }
+}
+```
+
+the type of `foo` is **not** `fn(S)`, as one might expect.
+Rather, it is a unique, zero-sized marker type written here as `typeof(foo)`.
+However, `typeof(foo)` can be _coerced_ to a function pointer `fn(S)`,
+so you rarely notice this:
+
+```
+# struct S;
+# fn foo(_: S) {}
+let x: fn(S) = foo; // OK, coerces
+```
+
+The reason that this matter is that the type `fn(S)` is not specific to
+any particular function: it's a function _pointer_. So calling `x()` results
+in a virtual call, whereas `foo()` is statically dispatched, because the type
+of `foo` tells us precisely what function is being called.
+
+As noted above, coercions mean that most code doesn't have to be
+concerned with this distinction. However, you can tell the difference
+when using **transmute** to convert a fn item into a fn pointer.
+
+This is sometimes done as part of an FFI:
+
+```compile_fail,E0591
+extern "C" fn foo(userdata: Box<i32>) {
+ /* ... */
+}
+
+# fn callback(_: extern "C" fn(*mut i32)) {}
+# use std::mem::transmute;
+unsafe {
+ let f: extern "C" fn(*mut i32) = transmute(foo);
+ callback(f);
+}
+```
+
+Here, transmute is being used to convert the types of the fn arguments.
+This pattern is incorrect because, because the type of `foo` is a function
+**item** (`typeof(foo)`), which is zero-sized, and the target type (`fn()`)
+is a function pointer, which is not zero-sized.
+This pattern should be rewritten. There are a few possible ways to do this:
+
+- change the original fn declaration to match the expected signature,
+ and do the cast in the fn body (the preferred option)
+- cast the fn item of a fn pointer before calling transmute, as shown here:
+
+ ```
+ # extern "C" fn foo(_: Box<i32>) {}
+ # use std::mem::transmute;
+ # unsafe {
+ let f: extern "C" fn(*mut i32) = transmute(foo as extern "C" fn(_));
+ let f: extern "C" fn(*mut i32) = transmute(foo as usize); // works too
+ # }
+ ```
+
+The same applies to transmutes to `*mut fn()`, which were observed in practice.
+Note though that use of this type is generally incorrect.
+The intention is typically to describe a function pointer, but just `fn()`
+alone suffices for that. `*mut fn()` is a pointer to a fn pointer.
+(Since these values are typically just passed to C code, however, this rarely
+makes a difference in practice.)
+
+[rfc401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0592.md b/compiler/rustc_error_codes/src/error_codes/E0592.md
new file mode 100644
index 000000000..06959b5d7
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0592.md
@@ -0,0 +1,31 @@
+This error occurs when you defined methods or associated functions with same
+name.
+
+Erroneous code example:
+
+```compile_fail,E0592
+struct Foo;
+
+impl Foo {
+ fn bar() {} // previous definition here
+}
+
+impl Foo {
+ fn bar() {} // duplicate definition here
+}
+```
+
+A similar error is E0201. The difference is whether there is one declaration
+block or not. To avoid this error, you must give each `fn` a unique name.
+
+```
+struct Foo;
+
+impl Foo {
+ fn bar() {}
+}
+
+impl Foo {
+ fn baz() {} // define with different name
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0593.md b/compiler/rustc_error_codes/src/error_codes/E0593.md
new file mode 100644
index 000000000..1902d73f4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0593.md
@@ -0,0 +1,24 @@
+You tried to supply an `Fn`-based type with an incorrect number of arguments
+than what was expected.
+
+Erroneous code example:
+
+```compile_fail,E0593
+fn foo<F: Fn()>(x: F) { }
+
+fn main() {
+ // [E0593] closure takes 1 argument but 0 arguments are required
+ foo(|y| { });
+}
+```
+
+You have to provide the same number of arguments as expected by the `Fn`-based
+type. So to fix the previous example, we need to remove the `y` argument:
+
+```
+fn foo<F: Fn()>(x: F) { }
+
+fn main() {
+ foo(|| { }); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0594.md b/compiler/rustc_error_codes/src/error_codes/E0594.md
new file mode 100644
index 000000000..ad8eb631e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0594.md
@@ -0,0 +1,23 @@
+A non-mutable value was assigned a value.
+
+Erroneous code example:
+
+```compile_fail,E0594
+struct SolarSystem {
+ earth: i32,
+}
+
+let ss = SolarSystem { earth: 3 };
+ss.earth = 2; // error!
+```
+
+To fix this error, declare `ss` as mutable by using the `mut` keyword:
+
+```
+struct SolarSystem {
+ earth: i32,
+}
+
+let mut ss = SolarSystem { earth: 3 }; // declaring `ss` as mutable
+ss.earth = 2; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0595.md b/compiler/rustc_error_codes/src/error_codes/E0595.md
new file mode 100644
index 000000000..e67290132
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0595.md
@@ -0,0 +1,17 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Closures cannot mutate immutable captured variables.
+
+Erroneous code example:
+
+```compile_fail,E0594
+let x = 3; // error: closure cannot assign to immutable local variable `x`
+let mut c = || { x += 1 };
+```
+
+Make the variable binding mutable:
+
+```
+let mut x = 3; // ok!
+let mut c = || { x += 1 };
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0596.md b/compiler/rustc_error_codes/src/error_codes/E0596.md
new file mode 100644
index 000000000..95669309b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0596.md
@@ -0,0 +1,16 @@
+This error occurs because you tried to mutably borrow a non-mutable variable.
+
+Erroneous code example:
+
+```compile_fail,E0596
+let x = 1;
+let y = &mut x; // error: cannot borrow mutably
+```
+
+In here, `x` isn't mutable, so when we try to mutably borrow it in `y`, it
+fails. To fix this error, you need to make `x` mutable:
+
+```
+let mut x = 1;
+let y = &mut x; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0597.md b/compiler/rustc_error_codes/src/error_codes/E0597.md
new file mode 100644
index 000000000..f6e0b62e1
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0597.md
@@ -0,0 +1,33 @@
+This error occurs because a value was dropped while it was still borrowed.
+
+Erroneous code example:
+
+```compile_fail,E0597
+struct Foo<'a> {
+ x: Option<&'a u32>,
+}
+
+let mut x = Foo { x: None };
+{
+ let y = 0;
+ x.x = Some(&y); // error: `y` does not live long enough
+}
+println!("{:?}", x.x);
+```
+
+Here, `y` is dropped at the end of the inner scope, but it is borrowed by
+`x` until the `println`. To fix the previous example, just remove the scope
+so that `y` isn't dropped until after the println
+
+```
+struct Foo<'a> {
+ x: Option<&'a u32>,
+}
+
+let mut x = Foo { x: None };
+
+let y = 0;
+x.x = Some(&y);
+
+println!("{:?}", x.x);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0599.md b/compiler/rustc_error_codes/src/error_codes/E0599.md
new file mode 100644
index 000000000..5b1590b29
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0599.md
@@ -0,0 +1,26 @@
+This error occurs when a method is used on a type which doesn't implement it:
+
+Erroneous code example:
+
+```compile_fail,E0599
+struct Mouth;
+
+let x = Mouth;
+x.chocolate(); // error: no method named `chocolate` found for type `Mouth`
+ // in the current scope
+```
+
+In this case, you need to implement the `chocolate` method to fix the error:
+
+```
+struct Mouth;
+
+impl Mouth {
+ fn chocolate(&self) { // We implement the `chocolate` method here.
+ println!("Hmmm! I love chocolate!");
+ }
+}
+
+let x = Mouth;
+x.chocolate(); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0600.md b/compiler/rustc_error_codes/src/error_codes/E0600.md
new file mode 100644
index 000000000..356006c72
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0600.md
@@ -0,0 +1,40 @@
+An unary operator was used on a type which doesn't implement it.
+
+Erroneous code example:
+
+```compile_fail,E0600
+enum Question {
+ Yes,
+ No,
+}
+
+!Question::Yes; // error: cannot apply unary operator `!` to type `Question`
+```
+
+In this case, `Question` would need to implement the `std::ops::Not` trait in
+order to be able to use `!` on it. Let's implement it:
+
+```
+use std::ops::Not;
+
+enum Question {
+ Yes,
+ No,
+}
+
+// We implement the `Not` trait on the enum.
+impl Not for Question {
+ type Output = bool;
+
+ fn not(self) -> bool {
+ match self {
+ Question::Yes => false, // If the `Answer` is `Yes`, then it
+ // returns false.
+ Question::No => true, // And here we do the opposite.
+ }
+ }
+}
+
+assert_eq!(!Question::Yes, false);
+assert_eq!(!Question::No, true);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0601.md b/compiler/rustc_error_codes/src/error_codes/E0601.md
new file mode 100644
index 000000000..7194b7971
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0601.md
@@ -0,0 +1,15 @@
+No `main` function was found in a binary crate.
+
+To fix this error, add a `main` function:
+
+```
+fn main() {
+ // Your program will start here.
+ println!("Hello world!");
+}
+```
+
+If you don't know the basics of Rust, you can look at the
+[Rust Book][rust-book] to get started.
+
+[rust-book]: https://doc.rust-lang.org/book/
diff --git a/compiler/rustc_error_codes/src/error_codes/E0602.md b/compiler/rustc_error_codes/src/error_codes/E0602.md
new file mode 100644
index 000000000..7980b704c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0602.md
@@ -0,0 +1,10 @@
+An unknown or invalid lint was used on the command line.
+
+Erroneous code example:
+
+```sh
+rustc -D bogus rust_file.rs
+```
+
+Maybe you just misspelled the lint name or the lint doesn't exist anymore.
+Either way, try to update/remove it in order to fix the error.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0603.md b/compiler/rustc_error_codes/src/error_codes/E0603.md
new file mode 100644
index 000000000..eb293118a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0603.md
@@ -0,0 +1,26 @@
+A private item was used outside its scope.
+
+Erroneous code example:
+
+```compile_fail,E0603
+mod foo {
+ const PRIVATE: u32 = 0x_a_bad_1dea_u32; // This const is private, so we
+ // can't use it outside of the
+ // `foo` module.
+}
+
+println!("const value: {}", foo::PRIVATE); // error: constant `PRIVATE`
+ // is private
+```
+
+In order to fix this error, you need to make the item public by using the `pub`
+keyword. Example:
+
+```
+mod foo {
+ pub const PRIVATE: u32 = 0x_a_bad_1dea_u32; // We set it public by using the
+ // `pub` keyword.
+}
+
+println!("const value: {}", foo::PRIVATE); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0604.md b/compiler/rustc_error_codes/src/error_codes/E0604.md
new file mode 100644
index 000000000..806f0001c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0604.md
@@ -0,0 +1,24 @@
+A cast to `char` was attempted on a type other than `u8`.
+
+Erroneous code example:
+
+```compile_fail,E0604
+0u32 as char; // error: only `u8` can be cast as `char`, not `u32`
+```
+
+`char` is a Unicode Scalar Value, an integer value from 0 to 0xD7FF and
+0xE000 to 0x10FFFF. (The gap is for surrogate pairs.) Only `u8` always fits in
+those ranges so only `u8` may be cast to `char`.
+
+To allow larger values, use `char::from_u32`, which checks the value is valid.
+
+```
+assert_eq!(86u8 as char, 'V'); // ok!
+assert_eq!(char::from_u32(0x3B1), Some('α')); // ok!
+assert_eq!(char::from_u32(0xD800), None); // not a USV.
+```
+
+For more information about casts, take a look at the Type cast section in
+[The Reference Book][1].
+
+[1]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions
diff --git a/compiler/rustc_error_codes/src/error_codes/E0605.md b/compiler/rustc_error_codes/src/error_codes/E0605.md
new file mode 100644
index 000000000..f3cc65dd8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0605.md
@@ -0,0 +1,28 @@
+An invalid cast was attempted.
+
+Erroneous code examples:
+
+```compile_fail,E0605
+let x = 0u8;
+x as Vec<u8>; // error: non-primitive cast: `u8` as `std::vec::Vec<u8>`
+
+// Another example
+
+let v = core::ptr::null::<u8>(); // So here, `v` is a `*const u8`.
+v as &u8; // error: non-primitive cast: `*const u8` as `&u8`
+```
+
+Only primitive types can be cast into each other. Examples:
+
+```
+let x = 0u8;
+x as u32; // ok!
+
+let v = core::ptr::null::<u8>();
+v as *const i8; // ok!
+```
+
+For more information about casts, take a look at the Type cast section in
+[The Reference Book][1].
+
+[1]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions
diff --git a/compiler/rustc_error_codes/src/error_codes/E0606.md b/compiler/rustc_error_codes/src/error_codes/E0606.md
new file mode 100644
index 000000000..06ee7497f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0606.md
@@ -0,0 +1,21 @@
+An incompatible cast was attempted.
+
+Erroneous code example:
+
+```compile_fail,E0606
+let x = &0u8; // Here, `x` is a `&u8`.
+let y: u32 = x as u32; // error: casting `&u8` as `u32` is invalid
+```
+
+When casting, keep in mind that only primitive types can be cast into each
+other. Example:
+
+```
+let x = &0u8;
+let y: u32 = *x as u32; // We dereference it first and then cast it.
+```
+
+For more information about casts, take a look at the Type cast section in
+[The Reference Book][1].
+
+[1]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions
diff --git a/compiler/rustc_error_codes/src/error_codes/E0607.md b/compiler/rustc_error_codes/src/error_codes/E0607.md
new file mode 100644
index 000000000..054524692
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0607.md
@@ -0,0 +1,26 @@
+A cast between a thin and a fat pointer was attempted.
+
+Erroneous code example:
+
+```compile_fail,E0607
+let v = core::ptr::null::<u8>();
+v as *const [u8];
+```
+
+First: what are thin and fat pointers?
+
+Thin pointers are "simple" pointers: they are purely a reference to a memory
+address.
+
+Fat pointers are pointers referencing Dynamically Sized Types (also called
+DSTs). DSTs don't have a statically known size, therefore they can only exist
+behind some kind of pointer that contains additional information. For example,
+slices and trait objects are DSTs. In the case of slices, the additional
+information the fat pointer holds is their size.
+
+To fix this error, don't try to cast directly between thin and fat pointers.
+
+For more information about type casts, take a look at the section of the
+[The Rust Reference][1] on type cast expressions.
+
+[1]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions
diff --git a/compiler/rustc_error_codes/src/error_codes/E0608.md b/compiler/rustc_error_codes/src/error_codes/E0608.md
new file mode 100644
index 000000000..d0ebc3a26
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0608.md
@@ -0,0 +1,18 @@
+An attempt to use index on a type which doesn't implement the `std::ops::Index`
+trait was performed.
+
+Erroneous code example:
+
+```compile_fail,E0608
+0u8[2]; // error: cannot index into a value of type `u8`
+```
+
+To be able to index into a type it needs to implement the `std::ops::Index`
+trait. Example:
+
+```
+let v: Vec<u8> = vec![0, 1, 2, 3];
+
+// The `Vec` type implements the `Index` trait so you can do:
+println!("{}", v[2]);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0609.md b/compiler/rustc_error_codes/src/error_codes/E0609.md
new file mode 100644
index 000000000..a9db34f47
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0609.md
@@ -0,0 +1,24 @@
+Attempted to access a non-existent field in a struct.
+
+Erroneous code example:
+
+```compile_fail,E0609
+struct StructWithFields {
+ x: u32,
+}
+
+let s = StructWithFields { x: 0 };
+println!("{}", s.foo); // error: no field `foo` on type `StructWithFields`
+```
+
+To fix this error, check that you didn't misspell the field's name or that the
+field actually exists. Example:
+
+```
+struct StructWithFields {
+ x: u32,
+}
+
+let s = StructWithFields { x: 0 };
+println!("{}", s.x); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0610.md b/compiler/rustc_error_codes/src/error_codes/E0610.md
new file mode 100644
index 000000000..c737bd618
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0610.md
@@ -0,0 +1,30 @@
+Attempted to access a field on a primitive type.
+
+Erroneous code example:
+
+```compile_fail,E0610
+let x: u32 = 0;
+println!("{}", x.foo); // error: `{integer}` is a primitive type, therefore
+ // doesn't have fields
+```
+
+Primitive types are the most basic types available in Rust and don't have
+fields. To access data via named fields, struct types are used. Example:
+
+```
+// We declare struct called `Foo` containing two fields:
+struct Foo {
+ x: u32,
+ y: i64,
+}
+
+// We create an instance of this struct:
+let variable = Foo { x: 0, y: -12 };
+// And we can now access its fields:
+println!("x: {}, y: {}", variable.x, variable.y);
+```
+
+For more information about [primitives] and [structs], take a look at the Book.
+
+[primitives]: https://doc.rust-lang.org/book/ch03-02-data-types.html
+[structs]: https://doc.rust-lang.org/book/ch05-00-structs.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0614.md b/compiler/rustc_error_codes/src/error_codes/E0614.md
new file mode 100644
index 000000000..d0ed06ebc
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0614.md
@@ -0,0 +1,18 @@
+Attempted to dereference a variable which cannot be dereferenced.
+
+Erroneous code example:
+
+```compile_fail,E0614
+let y = 0u32;
+*y; // error: type `u32` cannot be dereferenced
+```
+
+Only types implementing `std::ops::Deref` can be dereferenced (such as `&T`).
+Example:
+
+```
+let y = 0u32;
+let x = &y;
+// So here, `x` is a `&u32`, so we can dereference it:
+*x; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0615.md b/compiler/rustc_error_codes/src/error_codes/E0615.md
new file mode 100644
index 000000000..f513d0596
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0615.md
@@ -0,0 +1,35 @@
+Attempted to access a method like a field.
+
+Erroneous code example:
+
+```compile_fail,E0615
+struct Foo {
+ x: u32,
+}
+
+impl Foo {
+ fn method(&self) {}
+}
+
+let f = Foo { x: 0 };
+f.method; // error: attempted to take value of method `method` on type `Foo`
+```
+
+If you want to use a method, add `()` after it:
+
+```
+# struct Foo { x: u32 }
+# impl Foo { fn method(&self) {} }
+# let f = Foo { x: 0 };
+f.method();
+```
+
+However, if you wanted to access a field of a struct check that the field name
+is spelled correctly. Example:
+
+```
+# struct Foo { x: u32 }
+# impl Foo { fn method(&self) {} }
+# let f = Foo { x: 0 };
+println!("{}", f.x);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0616.md b/compiler/rustc_error_codes/src/error_codes/E0616.md
new file mode 100644
index 000000000..5acbc6e44
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0616.md
@@ -0,0 +1,57 @@
+Attempted to access a private field on a struct.
+
+Erroneous code example:
+
+```compile_fail,E0616
+mod some_module {
+ pub struct Foo {
+ x: u32, // So `x` is private in here.
+ }
+
+ impl Foo {
+ pub fn new() -> Foo { Foo { x: 0 } }
+ }
+}
+
+let f = some_module::Foo::new();
+println!("{}", f.x); // error: field `x` of struct `some_module::Foo` is private
+```
+
+If you want to access this field, you have two options:
+
+1) Set the field public:
+
+```
+mod some_module {
+ pub struct Foo {
+ pub x: u32, // `x` is now public.
+ }
+
+ impl Foo {
+ pub fn new() -> Foo { Foo { x: 0 } }
+ }
+}
+
+let f = some_module::Foo::new();
+println!("{}", f.x); // ok!
+```
+
+2) Add a getter function:
+
+```
+mod some_module {
+ pub struct Foo {
+ x: u32, // So `x` is still private in here.
+ }
+
+ impl Foo {
+ pub fn new() -> Foo { Foo { x: 0 } }
+
+ // We create the getter function here:
+ pub fn get_x(&self) -> &u32 { &self.x }
+ }
+}
+
+let f = some_module::Foo::new();
+println!("{}", f.get_x()); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0617.md b/compiler/rustc_error_codes/src/error_codes/E0617.md
new file mode 100644
index 000000000..eed384b49
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0617.md
@@ -0,0 +1,33 @@
+Attempted to pass an invalid type of variable into a variadic function.
+
+Erroneous code example:
+
+```compile_fail,E0617
+# use std::os::raw::{c_char, c_int};
+extern "C" {
+ fn printf(format: *const c_char, ...) -> c_int;
+}
+
+unsafe {
+ printf("%f\n\0".as_ptr() as _, 0f32);
+ // error: cannot pass an `f32` to variadic function, cast to `c_double`
+}
+```
+
+Certain Rust types must be cast before passing them to a variadic function,
+because of arcane ABI rules dictated by the C standard. To fix the error,
+cast the value to the type specified by the error message (which you may need
+to import from `std::os::raw`).
+
+In this case, `c_double` has the same size as `f64` so we can use it directly:
+
+```no_run
+# use std::os::raw::{c_char, c_int};
+# extern "C" {
+# fn printf(format: *const c_char, ...) -> c_int;
+# }
+
+unsafe {
+ printf("%f\n\0".as_ptr() as _, 0f64); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0618.md b/compiler/rustc_error_codes/src/error_codes/E0618.md
new file mode 100644
index 000000000..c8dc9040c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0618.md
@@ -0,0 +1,26 @@
+Attempted to call something which isn't a function nor a method.
+
+Erroneous code examples:
+
+```compile_fail,E0618
+enum X {
+ Entry,
+}
+
+X::Entry(); // error: expected function, tuple struct or tuple variant,
+ // found `X::Entry`
+
+// Or even simpler:
+let x = 0i32;
+x(); // error: expected function, tuple struct or tuple variant, found `i32`
+```
+
+Only functions and methods can be called using `()`. Example:
+
+```
+// We declare a function:
+fn i_am_a_function() {}
+
+// And we call it:
+i_am_a_function();
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0619.md b/compiler/rustc_error_codes/src/error_codes/E0619.md
new file mode 100644
index 000000000..f516de430
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0619.md
@@ -0,0 +1,39 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+The type-checker needed to know the type of an expression, but that type had not
+yet been inferred.
+
+Erroneous code example:
+
+```compile_fail
+let mut x = vec![];
+match x.pop() {
+ Some(v) => {
+ // Here, the type of `v` is not (yet) known, so we
+ // cannot resolve this method call:
+ v.to_uppercase(); // error: the type of this value must be known in
+ // this context
+ }
+ None => {}
+}
+```
+
+Type inference typically proceeds from the top of the function to the bottom,
+figuring out types as it goes. In some cases -- notably method calls and
+overloadable operators like `*` -- the type checker may not have enough
+information *yet* to make progress. This can be true even if the rest of the
+function provides enough context (because the type-checker hasn't looked that
+far ahead yet). In this case, type annotations can be used to help it along.
+
+To fix this error, just specify the type of the variable. Example:
+
+```
+let mut x: Vec<String> = vec![]; // We precise the type of the vec elements.
+match x.pop() {
+ Some(v) => {
+ v.to_uppercase(); // Since rustc now knows the type of the vec elements,
+ // we can use `v`'s methods.
+ }
+ None => {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0620.md b/compiler/rustc_error_codes/src/error_codes/E0620.md
new file mode 100644
index 000000000..f8e442807
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0620.md
@@ -0,0 +1,18 @@
+A cast to an unsized type was attempted.
+
+Erroneous code example:
+
+```compile_fail,E0620
+let x = &[1_usize, 2] as [usize]; // error: cast to unsized type: `&[usize; 2]`
+ // as `[usize]`
+```
+
+In Rust, some types don't have a known size at compile-time. For example, in a
+slice type like `[u32]`, the number of elements is not known at compile-time and
+hence the overall size cannot be computed. As a result, such types can only be
+manipulated through a reference (e.g., `&T` or `&mut T`) or other pointer-type
+(e.g., `Box` or `Rc`). Try casting to a reference instead:
+
+```
+let x = &[1_usize, 2] as &[usize]; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0621.md b/compiler/rustc_error_codes/src/error_codes/E0621.md
new file mode 100644
index 000000000..7c0878df2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0621.md
@@ -0,0 +1,34 @@
+This error code indicates a mismatch between the lifetimes appearing in the
+function signature (i.e., the parameter types and the return type) and the
+data-flow found in the function body.
+
+Erroneous code example:
+
+```compile_fail,E0621
+fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 { // error: explicit lifetime
+ // required in the type of
+ // `y`
+ if x > y { x } else { y }
+}
+```
+
+In the code above, the function is returning data borrowed from either `x` or
+`y`, but the `'a` annotation indicates that it is returning data only from `x`.
+To fix the error, the signature and the body must be made to match. Typically,
+this is done by updating the function signature. So, in this case, we change
+the type of `y` to `&'a i32`, like so:
+
+```
+fn foo<'a>(x: &'a i32, y: &'a i32) -> &'a i32 {
+ if x > y { x } else { y }
+}
+```
+
+Now the signature indicates that the function data borrowed from either `x` or
+`y`. Alternatively, you could change the body to not return data from `y`:
+
+```
+fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32 {
+ x
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0622.md b/compiler/rustc_error_codes/src/error_codes/E0622.md
new file mode 100644
index 000000000..990a25494
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0622.md
@@ -0,0 +1,25 @@
+An intrinsic was declared without being a function.
+
+Erroneous code example:
+
+```compile_fail,E0622
+#![feature(intrinsics)]
+extern "rust-intrinsic" {
+ pub static breakpoint : fn(); // error: intrinsic must be a function
+}
+
+fn main() { unsafe { breakpoint(); } }
+```
+
+An intrinsic is a function available for use in a given programming language
+whose implementation is handled specially by the compiler. In order to fix this
+error, just declare a function. Example:
+
+```no_run
+#![feature(intrinsics)]
+extern "rust-intrinsic" {
+ pub fn breakpoint(); // ok!
+}
+
+fn main() { unsafe { breakpoint(); } }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0623.md b/compiler/rustc_error_codes/src/error_codes/E0623.md
new file mode 100644
index 000000000..34db641bb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0623.md
@@ -0,0 +1,72 @@
+A lifetime didn't match what was expected.
+
+Erroneous code example:
+
+```compile_fail,E0623
+struct Foo<'a, 'b, T>(std::marker::PhantomData<(&'a (), &'b (), T)>)
+where
+ T: Convert<'a, 'b>;
+
+trait Convert<'a, 'b>: Sized {
+ fn cast(&'a self) -> &'b Self;
+}
+impl<'long: 'short, 'short, T> Convert<'long, 'short> for T {
+ fn cast(&'long self) -> &'short T {
+ self
+ }
+}
+// error
+fn badboi<'in_, 'out, T>(
+ x: Foo<'in_, 'out, T>,
+ sadness: &'in_ T
+) -> &'out T {
+ sadness.cast()
+}
+```
+
+In this example, we tried to set a value with an incompatible lifetime to
+another one (`'in_` is unrelated to `'out`). We can solve this issue in
+two different ways:
+
+Either we make `'in_` live at least as long as `'out`:
+
+```
+struct Foo<'a, 'b, T>(std::marker::PhantomData<(&'a (), &'b (), T)>)
+where
+ T: Convert<'a, 'b>;
+
+trait Convert<'a, 'b>: Sized {
+ fn cast(&'a self) -> &'b Self;
+}
+impl<'long: 'short, 'short, T> Convert<'long, 'short> for T {
+ fn cast(&'long self) -> &'short T {
+ self
+ }
+}
+fn badboi<'in_: 'out, 'out, T>(
+ x: Foo<'in_, 'out, T>,
+ sadness: &'in_ T
+) -> &'out T {
+ sadness.cast()
+}
+```
+
+Or we use only one lifetime:
+
+```
+struct Foo<'a, 'b, T>(std::marker::PhantomData<(&'a (), &'b (), T)>)
+where
+ T: Convert<'a, 'b>;
+
+trait Convert<'a, 'b>: Sized {
+ fn cast(&'a self) -> &'b Self;
+}
+impl<'long: 'short, 'short, T> Convert<'long, 'short> for T {
+ fn cast(&'long self) -> &'short T {
+ self
+ }
+}
+fn badboi<'out, T>(x: Foo<'out, 'out, T>, sadness: &'out T) -> &'out T {
+ sadness.cast()
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0624.md b/compiler/rustc_error_codes/src/error_codes/E0624.md
new file mode 100644
index 000000000..0fd21c44b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0624.md
@@ -0,0 +1,53 @@
+A private item was used outside of its scope.
+
+Erroneous code example:
+
+```compile_fail,E0624
+mod inner {
+ pub struct Foo;
+
+ impl Foo {
+ fn method(&self) {}
+ }
+}
+
+let foo = inner::Foo;
+foo.method(); // error: method `method` is private
+```
+
+Two possibilities are available to solve this issue:
+
+1. Only use the item in the scope it has been defined:
+
+```
+mod inner {
+ pub struct Foo;
+
+ impl Foo {
+ fn method(&self) {}
+ }
+
+ pub fn call_method(foo: &Foo) { // We create a public function.
+ foo.method(); // Which calls the item.
+ }
+}
+
+let foo = inner::Foo;
+inner::call_method(&foo); // And since the function is public, we can call the
+ // method through it.
+```
+
+2. Make the item public:
+
+```
+mod inner {
+ pub struct Foo;
+
+ impl Foo {
+ pub fn method(&self) {} // It's now public.
+ }
+}
+
+let foo = inner::Foo;
+foo.method(); // Ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0625.md b/compiler/rustc_error_codes/src/error_codes/E0625.md
new file mode 100644
index 000000000..7db857723
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0625.md
@@ -0,0 +1,28 @@
+A compile-time const variable is referring to a thread-local static variable.
+
+Erroneous code example:
+
+```compile_fail,E0625
+#![feature(thread_local)]
+
+#[thread_local]
+static X: usize = 12;
+
+const Y: usize = 2 * X;
+```
+
+Static and const variables can refer to other const variables but a const
+variable cannot refer to a thread-local static variable. In this example,
+`Y` cannot refer to `X`. To fix this, the value can be extracted as a const
+and then used:
+
+```
+#![feature(thread_local)]
+
+const C: usize = 12;
+
+#[thread_local]
+static X: usize = C;
+
+const Y: usize = 2 * C;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0626.md b/compiler/rustc_error_codes/src/error_codes/E0626.md
new file mode 100644
index 000000000..cc6e03d1c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0626.md
@@ -0,0 +1,90 @@
+This error occurs because a borrow in a generator persists across a
+yield point.
+
+Erroneous code example:
+
+```compile_fail,E0626
+# #![feature(generators, generator_trait, pin)]
+# use std::ops::Generator;
+# use std::pin::Pin;
+let mut b = || {
+ let a = &String::new(); // <-- This borrow...
+ yield (); // ...is still in scope here, when the yield occurs.
+ println!("{}", a);
+};
+Pin::new(&mut b).resume(());
+```
+
+At present, it is not permitted to have a yield that occurs while a
+borrow is still in scope. To resolve this error, the borrow must
+either be "contained" to a smaller scope that does not overlap the
+yield or else eliminated in another way. So, for example, we might
+resolve the previous example by removing the borrow and just storing
+the integer by value:
+
+```
+# #![feature(generators, generator_trait, pin)]
+# use std::ops::Generator;
+# use std::pin::Pin;
+let mut b = || {
+ let a = 3;
+ yield ();
+ println!("{}", a);
+};
+Pin::new(&mut b).resume(());
+```
+
+This is a very simple case, of course. In more complex cases, we may
+wish to have more than one reference to the value that was borrowed --
+in those cases, something like the `Rc` or `Arc` types may be useful.
+
+This error also frequently arises with iteration:
+
+```compile_fail,E0626
+# #![feature(generators, generator_trait, pin)]
+# use std::ops::Generator;
+# use std::pin::Pin;
+let mut b = || {
+ let v = vec![1,2,3];
+ for &x in &v { // <-- borrow of `v` is still in scope...
+ yield x; // ...when this yield occurs.
+ }
+};
+Pin::new(&mut b).resume(());
+```
+
+Such cases can sometimes be resolved by iterating "by value" (or using
+`into_iter()`) to avoid borrowing:
+
+```
+# #![feature(generators, generator_trait, pin)]
+# use std::ops::Generator;
+# use std::pin::Pin;
+let mut b = || {
+ let v = vec![1,2,3];
+ for x in v { // <-- Take ownership of the values instead!
+ yield x; // <-- Now yield is OK.
+ }
+};
+Pin::new(&mut b).resume(());
+```
+
+If taking ownership is not an option, using indices can work too:
+
+```
+# #![feature(generators, generator_trait, pin)]
+# use std::ops::Generator;
+# use std::pin::Pin;
+let mut b = || {
+ let v = vec![1,2,3];
+ let len = v.len(); // (*)
+ for i in 0..len {
+ let x = v[i]; // (*)
+ yield x; // <-- Now yield is OK.
+ }
+};
+Pin::new(&mut b).resume(());
+
+// (*) -- Unfortunately, these temporaries are currently required.
+// See <https://github.com/rust-lang/rust/issues/43122>.
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0627.md b/compiler/rustc_error_codes/src/error_codes/E0627.md
new file mode 100644
index 000000000..21358e1e5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0627.md
@@ -0,0 +1,30 @@
+A yield expression was used outside of the generator literal.
+
+Erroneous code example:
+
+```compile_fail,E0627
+#![feature(generators, generator_trait)]
+
+fn fake_generator() -> &'static str {
+ yield 1;
+ return "foo"
+}
+
+fn main() {
+ let mut generator = fake_generator;
+}
+```
+
+The error occurs because keyword `yield` can only be used inside the generator
+literal. This can be fixed by constructing the generator correctly.
+
+```
+#![feature(generators, generator_trait)]
+
+fn main() {
+ let mut generator = || {
+ yield 1;
+ return "foo"
+ };
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0628.md b/compiler/rustc_error_codes/src/error_codes/E0628.md
new file mode 100644
index 000000000..40040c9a5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0628.md
@@ -0,0 +1,30 @@
+More than one parameter was used for a generator.
+
+Erroneous code example:
+
+```compile_fail,E0628
+#![feature(generators, generator_trait)]
+
+fn main() {
+ let generator = |a: i32, b: i32| {
+ // error: too many parameters for a generator
+ // Allowed only 0 or 1 parameter
+ yield a;
+ };
+}
+```
+
+At present, it is not permitted to pass more than one explicit
+parameter for a generator.This can be fixed by using
+at most 1 parameter for the generator. For example, we might resolve
+the previous example by passing only one parameter.
+
+```
+#![feature(generators, generator_trait)]
+
+fn main() {
+ let generator = |a: i32| {
+ yield a;
+ };
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0631.md b/compiler/rustc_error_codes/src/error_codes/E0631.md
new file mode 100644
index 000000000..6188d5f61
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0631.md
@@ -0,0 +1,27 @@
+This error indicates a type mismatch in closure arguments.
+
+Erroneous code example:
+
+```compile_fail,E0631
+fn foo<F: Fn(i32)>(f: F) {
+}
+
+fn main() {
+ foo(|x: &str| {});
+}
+```
+
+The error occurs because `foo` accepts a closure that takes an `i32` argument,
+but in `main`, it is passed a closure with a `&str` argument.
+
+This can be resolved by changing the type annotation or removing it entirely
+if it can be inferred.
+
+```
+fn foo<F: Fn(i32)>(f: F) {
+}
+
+fn main() {
+ foo(|x: i32| {});
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0632.md b/compiler/rustc_error_codes/src/error_codes/E0632.md
new file mode 100644
index 000000000..7e0a5c71f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0632.md
@@ -0,0 +1,27 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+An explicit generic argument was provided when calling a function that
+uses `impl Trait` in argument position.
+
+Erroneous code example:
+
+```ignore (no longer an error)
+fn foo<T: Copy>(a: T, b: impl Clone) {}
+
+foo::<i32>(0i32, "abc".to_string());
+```
+
+Either all generic arguments should be inferred at the call site, or
+the function definition should use an explicit generic type parameter
+instead of `impl Trait`. Example:
+
+```
+fn foo<T: Copy>(a: T, b: impl Clone) {}
+fn bar<T: Copy, U: Clone>(a: T, b: U) {}
+
+foo(0i32, "abc".to_string());
+
+bar::<i32, String>(0i32, "abc".to_string());
+bar::<_, _>(0i32, "abc".to_string());
+bar(0i32, "abc".to_string());
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0633.md b/compiler/rustc_error_codes/src/error_codes/E0633.md
new file mode 100644
index 000000000..5b6c15c82
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0633.md
@@ -0,0 +1,27 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+The `unwind` attribute was malformed.
+
+Erroneous code example:
+
+```compile_fail
+#![feature(unwind_attributes)]
+
+#[unwind()] // error: expected one argument
+pub extern "C" fn something() {}
+
+fn main() {}
+```
+
+The `#[unwind]` attribute should be used as follows:
+
+- `#[unwind(aborts)]` -- specifies that if a non-Rust ABI function
+ should abort the process if it attempts to unwind. This is the safer
+ and preferred option.
+
+- `#[unwind(allowed)]` -- specifies that a non-Rust ABI function
+ should be allowed to unwind. This can easily result in Undefined
+ Behavior (UB), so be careful.
+
+NB. The default behavior here is "allowed", but this is unspecified
+and likely to change in the future.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0634.md b/compiler/rustc_error_codes/src/error_codes/E0634.md
new file mode 100644
index 000000000..0c4ed2596
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0634.md
@@ -0,0 +1,20 @@
+A type has conflicting `packed` representation hints.
+
+Erroneous code examples:
+
+```compile_fail,E0634
+#[repr(packed, packed(2))] // error!
+struct Company(i32);
+
+#[repr(packed(2))] // error!
+#[repr(packed)]
+struct Company(i32);
+```
+
+You cannot use conflicting `packed` hints on a same type. If you want to pack a
+type to a given size, you should provide a size to packed:
+
+```
+#[repr(packed)] // ok!
+struct Company(i32);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0635.md b/compiler/rustc_error_codes/src/error_codes/E0635.md
new file mode 100644
index 000000000..a39d2be4f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0635.md
@@ -0,0 +1,7 @@
+The `#![feature]` attribute specified an unknown feature.
+
+Erroneous code example:
+
+```compile_fail,E0635
+#![feature(nonexistent_rust_feature)] // error: unknown feature
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0636.md b/compiler/rustc_error_codes/src/error_codes/E0636.md
new file mode 100644
index 000000000..57cf72db5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0636.md
@@ -0,0 +1,9 @@
+A `#![feature]` attribute was declared multiple times.
+
+Erroneous code example:
+
+```compile_fail,E0636
+#![allow(stable_features)]
+#![feature(rust1)]
+#![feature(rust1)] // error: the feature `rust1` has already been declared
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0637.md b/compiler/rustc_error_codes/src/error_codes/E0637.md
new file mode 100644
index 000000000..62d5565df
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0637.md
@@ -0,0 +1,51 @@
+`'_` lifetime name or `&T` without an explicit lifetime name has been used
+on illegal place.
+
+Erroneous code example:
+
+```compile_fail,E0106,E0637
+fn underscore_lifetime<'_>(str1: &'_ str, str2: &'_ str) -> &'_ str {
+ //^^ `'_` is a reserved lifetime name
+ if str1.len() > str2.len() {
+ str1
+ } else {
+ str2
+ }
+}
+
+fn and_without_explicit_lifetime<T>()
+where
+ T: Into<&u32>,
+ //^ `&` without an explicit lifetime name
+{
+}
+```
+
+First, `'_` cannot be used as a lifetime identifier in some places
+because it is a reserved for the anonymous lifetime. Second, `&T`
+without an explicit lifetime name cannot also be used in some places.
+To fix them, use a lowercase letter such as `'a`, or a series
+of lowercase letters such as `'foo`. For more information about lifetime
+identifier, see [the book][bk-no]. For more information on using
+the anonymous lifetime in Rust 2018, see [the Rust 2018 blog post][blog-al].
+
+Corrected example:
+
+```
+fn underscore_lifetime<'a>(str1: &'a str, str2: &'a str) -> &'a str {
+ if str1.len() > str2.len() {
+ str1
+ } else {
+ str2
+ }
+}
+
+fn and_without_explicit_lifetime<'foo, T>()
+where
+ T: Into<&'foo u32>,
+{
+}
+```
+
+[bk-no]: https://doc.rust-lang.org/book/appendix-02-operators.html#non-operator-symbols
+[blog-al]: https://blog.rust-lang.org/2018/12/06/Rust-1.31-and-rust-2018.html#more-lifetime-elision-rules
diff --git a/compiler/rustc_error_codes/src/error_codes/E0638.md b/compiler/rustc_error_codes/src/error_codes/E0638.md
new file mode 100644
index 000000000..14cd31502
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0638.md
@@ -0,0 +1,47 @@
+This error indicates that the struct, enum or enum variant must be matched
+non-exhaustively as it has been marked as `non_exhaustive`.
+
+When applied within a crate, downstream users of the crate will need to use the
+`_` pattern when matching enums and use the `..` pattern when matching structs.
+Downstream crates cannot match against non-exhaustive enum variants.
+
+For example, in the below example, since the enum is marked as
+`non_exhaustive`, it is required that downstream crates match non-exhaustively
+on it.
+
+```rust,ignore (pseudo-Rust)
+#[non_exhaustive]
+pub enum Error {
+ Message(String),
+ Other,
+}
+
+impl Display for Error {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ // This will not error, despite being marked as non_exhaustive, as this
+ // enum is defined within the current crate, it can be matched
+ // exhaustively.
+ let display = match self {
+ Message(s) => s,
+ Other => "other or unknown error",
+ };
+ formatter.write_str(display)
+ }
+}
+```
+
+An example of matching non-exhaustively on the above enum is provided below:
+
+```rust,ignore (pseudo-Rust)
+use mycrate::Error;
+
+// This will not error as the non_exhaustive Error enum has been matched with a
+// wildcard.
+match error {
+ Message(s) => ...,
+ Other => ...,
+ _ => ...,
+}
+```
+
+Similarly, for structs, match with `..` to avoid this error.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0639.md b/compiler/rustc_error_codes/src/error_codes/E0639.md
new file mode 100644
index 000000000..4646e37fb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0639.md
@@ -0,0 +1,19 @@
+This error indicates that the struct, enum or enum variant cannot be
+instantiated from outside of the defining crate as it has been marked
+as `non_exhaustive` and as such more fields/variants may be added in
+future that could cause adverse side effects for this code.
+
+Erroneous code example:
+
+```ignore (it only works cross-crate)
+#[non_exhaustive]
+pub struct NormalStruct {
+ pub first_field: u16,
+ pub second_field: u16,
+}
+
+let ns = NormalStruct { first_field: 640, second_field: 480 }; // error!
+```
+
+It is recommended that you look for a `new` function or equivalent in the
+crate's documentation.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0641.md b/compiler/rustc_error_codes/src/error_codes/E0641.md
new file mode 100644
index 000000000..5848e9b5c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0641.md
@@ -0,0 +1,19 @@
+Attempted to cast to/from a pointer with an unknown kind.
+
+Erroneous code example:
+
+```compile_fail,E0641
+let b = 0 as *const _; // error
+```
+
+Type information must be provided if a pointer type being cast from/into another
+type which cannot be inferred:
+
+```
+// Creating a pointer from reference: type can be inferred
+let a = &(String::from("Hello world!")) as *const _; // ok!
+
+let b = 0 as *const i32; // ok!
+
+let c: *const i32 = 0 as *const _; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0642.md b/compiler/rustc_error_codes/src/error_codes/E0642.md
new file mode 100644
index 000000000..c790aa154
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0642.md
@@ -0,0 +1,18 @@
+Trait methods currently cannot take patterns as arguments.
+
+Erroneous code example:
+
+```compile_fail,E0642
+trait Foo {
+ fn foo((x, y): (i32, i32)); // error: patterns aren't allowed
+ // in trait methods
+}
+```
+
+You can instead use a single name for the argument:
+
+```
+trait Foo {
+ fn foo(x_and_y: (i32, i32)); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0643.md b/compiler/rustc_error_codes/src/error_codes/E0643.md
new file mode 100644
index 000000000..53919607d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0643.md
@@ -0,0 +1,12 @@
+This error indicates that there is a mismatch between generic parameters and
+impl Trait parameters in a trait declaration versus its impl.
+
+```compile_fail,E0643
+trait Foo {
+ fn foo(&self, _: &impl Iterator);
+}
+impl Foo for () {
+ fn foo<U: Iterator>(&self, _: &U) { } // error method `foo` has incompatible
+ // signature for trait
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0644.md b/compiler/rustc_error_codes/src/error_codes/E0644.md
new file mode 100644
index 000000000..8c68da3b2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0644.md
@@ -0,0 +1,29 @@
+A closure or generator was constructed that references its own type.
+
+Erroneous code example:
+
+```compile_fail,E0644
+fn fix<F>(f: &F)
+ where F: Fn(&F)
+{
+ f(&f);
+}
+
+fn main() {
+ fix(&|y| {
+ // Here, when `x` is called, the parameter `y` is equal to `x`.
+ });
+}
+```
+
+Rust does not permit a closure to directly reference its own type,
+either through an argument (as in the example above) or by capturing
+itself through its environment. This restriction helps keep closure
+inference tractable.
+
+The easiest fix is to rewrite your closure into a top-level function,
+or into a method. In some cases, you may also be able to have your
+closure call itself by capturing a `&Fn()` object or `fn()` pointer
+that refers to itself. That is permitting, since the closure would be
+invoking itself via a virtual call, and hence does not directly
+reference its own *type*.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0646.md b/compiler/rustc_error_codes/src/error_codes/E0646.md
new file mode 100644
index 000000000..1e9ec7d43
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0646.md
@@ -0,0 +1,9 @@
+It is not possible to define `main` with a where clause.
+
+Erroneous code example:
+
+```compile_fail,E0646
+fn main() where i32: Copy { // error: main function is not allowed to have
+ // a where clause
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0647.md b/compiler/rustc_error_codes/src/error_codes/E0647.md
new file mode 100644
index 000000000..8ca6e777f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0647.md
@@ -0,0 +1,13 @@
+The `start` function was defined with a where clause.
+
+Erroneous code example:
+
+```compile_fail,E0647
+#![feature(start)]
+
+#[start]
+fn start(_: isize, _: *const *const u8) -> isize where (): Copy {
+ //^ error: start function is not allowed to have a where clause
+ 0
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0648.md b/compiler/rustc_error_codes/src/error_codes/E0648.md
new file mode 100644
index 000000000..d99dc1950
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0648.md
@@ -0,0 +1,15 @@
+An `export_name` attribute contains null characters (`\0`).
+
+Erroneous code example:
+
+```compile_fail,E0648
+#[export_name="\0foo"] // error: `export_name` may not contain null characters
+pub fn bar() {}
+```
+
+To fix this error, remove the null characters:
+
+```
+#[export_name="foo"] // ok!
+pub fn bar() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0657.md b/compiler/rustc_error_codes/src/error_codes/E0657.md
new file mode 100644
index 000000000..7fe48c511
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0657.md
@@ -0,0 +1,57 @@
+A lifetime bound on a trait implementation was captured at an incorrect place.
+
+Erroneous code example:
+
+```compile_fail,E0657
+trait Id<T> {}
+trait Lt<'a> {}
+
+impl<'a> Lt<'a> for () {}
+impl<T> Id<T> for T {}
+
+fn free_fn_capture_hrtb_in_impl_trait()
+ -> Box<for<'a> Id<impl Lt<'a>>> // error!
+{
+ Box::new(())
+}
+
+struct Foo;
+impl Foo {
+ fn impl_fn_capture_hrtb_in_impl_trait()
+ -> Box<for<'a> Id<impl Lt<'a>>> // error!
+ {
+ Box::new(())
+ }
+}
+```
+
+Here, you have used the inappropriate lifetime in the `impl Trait`,
+The `impl Trait` can only capture lifetimes bound at the fn or impl
+level.
+
+To fix this we have to define the lifetime at the function or impl
+level and use that lifetime in the `impl Trait`. For example you can
+define the lifetime at the function:
+
+```
+trait Id<T> {}
+trait Lt<'a> {}
+
+impl<'a> Lt<'a> for () {}
+impl<T> Id<T> for T {}
+
+fn free_fn_capture_hrtb_in_impl_trait<'b>()
+ -> Box<for<'a> Id<impl Lt<'b>>> // ok!
+{
+ Box::new(())
+}
+
+struct Foo;
+impl Foo {
+ fn impl_fn_capture_hrtb_in_impl_trait<'b>()
+ -> Box<for<'a> Id<impl Lt<'b>>> // ok!
+ {
+ Box::new(())
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0658.md b/compiler/rustc_error_codes/src/error_codes/E0658.md
new file mode 100644
index 000000000..24245a38a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0658.md
@@ -0,0 +1,28 @@
+An unstable feature was used.
+
+Erroneous code example:
+
+```compile_fail,E0658
+#[repr(u128)] // error: use of unstable library feature 'repr128'
+enum Foo {
+ Bar(u64),
+}
+```
+
+If you're using a stable or a beta version of rustc, you won't be able to use
+any unstable features. In order to do so, please switch to a nightly version of
+rustc (by using [rustup]).
+
+If you're using a nightly version of rustc, just add the corresponding feature
+to be able to use it:
+
+```
+#![feature(repr128)]
+
+#[repr(u128)] // ok!
+enum Foo {
+ Bar(u64),
+}
+```
+
+[rustup]: https://rust-lang.github.io/rustup/concepts/channels.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0659.md b/compiler/rustc_error_codes/src/error_codes/E0659.md
new file mode 100644
index 000000000..e2c7e25cc
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0659.md
@@ -0,0 +1,50 @@
+An item usage is ambiguous.
+
+Erroneous code example:
+
+```compile_fail,edition2018,E0659
+pub mod moon {
+ pub fn foo() {}
+}
+
+pub mod earth {
+ pub fn foo() {}
+}
+
+mod collider {
+ pub use crate::moon::*;
+ pub use crate::earth::*;
+}
+
+fn main() {
+ crate::collider::foo(); // ERROR: `foo` is ambiguous
+}
+```
+
+This error generally appears when two items with the same name are imported into
+a module. Here, the `foo` functions are imported and reexported from the
+`collider` module and therefore, when we're using `collider::foo()`, both
+functions collide.
+
+To solve this error, the best solution is generally to keep the path before the
+item when using it. Example:
+
+```edition2018
+pub mod moon {
+ pub fn foo() {}
+}
+
+pub mod earth {
+ pub fn foo() {}
+}
+
+mod collider {
+ pub use crate::moon;
+ pub use crate::earth;
+}
+
+fn main() {
+ crate::collider::moon::foo(); // ok!
+ crate::collider::earth::foo(); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0660.md b/compiler/rustc_error_codes/src/error_codes/E0660.md
new file mode 100644
index 000000000..abf902759
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0660.md
@@ -0,0 +1,9 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+The argument to the `llvm_asm` macro is not well-formed.
+
+Erroneous code example:
+
+```ignore (no longer emitted)
+llvm_asm!("nop" "nop");
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0661.md b/compiler/rustc_error_codes/src/error_codes/E0661.md
new file mode 100644
index 000000000..245f755cd
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0661.md
@@ -0,0 +1,10 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+An invalid syntax was passed to the second argument of an `llvm_asm` macro line.
+
+Erroneous code example:
+
+```ignore (no longer emitted)
+let a;
+llvm_asm!("nop" : "r"(a));
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0662.md b/compiler/rustc_error_codes/src/error_codes/E0662.md
new file mode 100644
index 000000000..ffb716f99
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0662.md
@@ -0,0 +1,13 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+An invalid input operand constraint was passed to the `llvm_asm` macro
+(third line).
+
+Erroneous code example:
+
+```ignore (no longer emitted)
+llvm_asm!("xor %eax, %eax"
+ :
+ : "=test"("a")
+ );
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0663.md b/compiler/rustc_error_codes/src/error_codes/E0663.md
new file mode 100644
index 000000000..351cfaca2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0663.md
@@ -0,0 +1,13 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+An invalid input operand constraint was passed to the `llvm_asm` macro
+(third line).
+
+Erroneous code example:
+
+```ignore (no longer emitted)
+llvm_asm!("xor %eax, %eax"
+ :
+ : "+test"("a")
+ );
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0664.md b/compiler/rustc_error_codes/src/error_codes/E0664.md
new file mode 100644
index 000000000..34135d5db
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0664.md
@@ -0,0 +1,13 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+A clobber was surrounded by braces in the `llvm_asm` macro.
+
+Erroneous code example:
+
+```ignore (no longer emitted)
+llvm_asm!("mov $$0x200, %eax"
+ :
+ :
+ : "{eax}"
+ );
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0665.md b/compiler/rustc_error_codes/src/error_codes/E0665.md
new file mode 100644
index 000000000..ae54d6d15
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0665.md
@@ -0,0 +1,33 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+The `Default` trait was derived on an enum.
+
+Erroneous code example:
+
+```compile_fail
+#[derive(Default)]
+enum Food {
+ Sweet,
+ Salty,
+}
+```
+
+The `Default` cannot be derived on an enum for the simple reason that the
+compiler doesn't know which value to pick by default whereas it can for a
+struct as long as all its fields implement the `Default` trait as well.
+
+If you still want to implement `Default` on your enum, you'll have to do it "by
+hand":
+
+```
+enum Food {
+ Sweet,
+ Salty,
+}
+
+impl Default for Food {
+ fn default() -> Food {
+ Food::Sweet
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0666.md b/compiler/rustc_error_codes/src/error_codes/E0666.md
new file mode 100644
index 000000000..1a0dc5a52
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0666.md
@@ -0,0 +1,25 @@
+`impl Trait` types cannot appear nested in the generic arguments of other
+`impl Trait` types.
+
+Erroneous code example:
+
+```compile_fail,E0666
+trait MyGenericTrait<T> {}
+trait MyInnerTrait {}
+
+fn foo(
+ bar: impl MyGenericTrait<impl MyInnerTrait>, // error!
+) {}
+```
+
+Type parameters for `impl Trait` types must be explicitly defined as named
+generic parameters:
+
+```
+trait MyGenericTrait<T> {}
+trait MyInnerTrait {}
+
+fn foo<T: MyInnerTrait>(
+ bar: impl MyGenericTrait<T>, // ok!
+) {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0667.md b/compiler/rustc_error_codes/src/error_codes/E0667.md
new file mode 100644
index 000000000..0709a24c4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0667.md
@@ -0,0 +1,18 @@
+`impl Trait` is not allowed in path parameters.
+
+Erroneous code example:
+
+```compile_fail,E0667
+fn some_fn(mut x: impl Iterator) -> <impl Iterator>::Item { // error!
+ x.next().unwrap()
+}
+```
+
+You cannot use `impl Trait` in path parameters. If you want something
+equivalent, you can do this instead:
+
+```
+fn some_fn<T: Iterator>(mut x: T) -> T::Item { // ok!
+ x.next().unwrap()
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0668.md b/compiler/rustc_error_codes/src/error_codes/E0668.md
new file mode 100644
index 000000000..393aabe28
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0668.md
@@ -0,0 +1,24 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Malformed inline assembly rejected by LLVM.
+
+Erroneous code example:
+
+```ignore (no longer emitted)
+#![feature(llvm_asm)]
+
+fn main() {
+ let rax: u64;
+ unsafe {
+ llvm_asm!("" :"={rax"(rax));
+ println!("Accumulator is: {}", rax);
+ }
+}
+```
+
+LLVM checks the validity of the constraints and the assembly string passed to
+it. This error implies that LLVM seems something wrong with the inline
+assembly call.
+
+In particular, it can happen if you forgot the closing bracket of a register
+constraint (see issue #51430), like in the previous code example.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0669.md b/compiler/rustc_error_codes/src/error_codes/E0669.md
new file mode 100644
index 000000000..2be8f04ed
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0669.md
@@ -0,0 +1,22 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Cannot convert inline assembly operand to a single LLVM value.
+
+Erroneous code example:
+
+```ignore (no longer emitted)
+#![feature(llvm_asm)]
+
+fn main() {
+ unsafe {
+ llvm_asm!("" :: "r"("")); // error!
+ }
+}
+```
+
+This error usually happens when trying to pass in a value to an input inline
+assembly operand that is actually a pair of values. In particular, this can
+happen when trying to pass in a slice, for instance a `&str`. In Rust, these
+values are represented internally as a pair of values, the pointer and its
+length. When passed as an input operand, this pair of values can not be
+coerced into a register and thus we must fail with an error.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0670.md b/compiler/rustc_error_codes/src/error_codes/E0670.md
new file mode 100644
index 000000000..74c1af06c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0670.md
@@ -0,0 +1,9 @@
+Rust 2015 does not permit the use of `async fn`.
+
+Erroneous code example:
+
+```compile_fail,E0670
+async fn foo() {}
+```
+
+Switch to the Rust 2018 edition to use `async fn`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0671.md b/compiler/rustc_error_codes/src/error_codes/E0671.md
new file mode 100644
index 000000000..d4dbfb7a5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0671.md
@@ -0,0 +1,10 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Const parameters cannot depend on type parameters.
+The following is therefore invalid:
+
+```compile_fail,E0770
+fn const_id<T, const N: T>() -> T { // error
+ N
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0687.md b/compiler/rustc_error_codes/src/error_codes/E0687.md
new file mode 100644
index 000000000..05c491553
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0687.md
@@ -0,0 +1,38 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+In-band lifetimes cannot be used in `fn`/`Fn` syntax.
+
+Erroneous code examples:
+
+```ignore (feature got removed)
+#![feature(in_band_lifetimes)]
+
+fn foo(x: fn(&'a u32)) {} // error!
+
+fn bar(x: &Fn(&'a u32)) {} // error!
+
+fn baz(x: fn(&'a u32), y: &'a u32) {} // error!
+
+struct Foo<'a> { x: &'a u32 }
+
+impl Foo<'a> {
+ fn bar(&self, x: fn(&'a u32)) {} // error!
+}
+```
+
+Lifetimes used in `fn` or `Fn` syntax must be explicitly
+declared using `<...>` binders. For example:
+
+```
+fn foo<'a>(x: fn(&'a u32)) {} // ok!
+
+fn bar<'a>(x: &Fn(&'a u32)) {} // ok!
+
+fn baz<'a>(x: fn(&'a u32), y: &'a u32) {} // ok!
+
+struct Foo<'a> { x: &'a u32 }
+
+impl<'a> Foo<'a> {
+ fn bar(&self, x: fn(&'a u32)) {} // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0688.md b/compiler/rustc_error_codes/src/error_codes/E0688.md
new file mode 100644
index 000000000..44e641a2a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0688.md
@@ -0,0 +1,38 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+In-band lifetimes were mixed with explicit lifetime binders.
+
+Erroneous code example:
+
+```ignore (feature got removed)
+#![feature(in_band_lifetimes)]
+
+fn foo<'a>(x: &'a u32, y: &'b u32) {} // error!
+
+struct Foo<'a> { x: &'a u32 }
+
+impl Foo<'a> {
+ fn bar<'b>(x: &'a u32, y: &'b u32, z: &'c u32) {} // error!
+}
+
+impl<'b> Foo<'a> { // error!
+ fn baz() {}
+}
+```
+
+In-band lifetimes cannot be mixed with explicit lifetime binders.
+For example:
+
+```
+fn foo<'a, 'b>(x: &'a u32, y: &'b u32) {} // ok!
+
+struct Foo<'a> { x: &'a u32 }
+
+impl<'a> Foo<'a> {
+ fn bar<'b,'c>(x: &'a u32, y: &'b u32, z: &'c u32) {} // ok!
+}
+
+impl<'a> Foo<'a> { // ok!
+ fn baz() {}
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0689.md b/compiler/rustc_error_codes/src/error_codes/E0689.md
new file mode 100644
index 000000000..a680a2042
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0689.md
@@ -0,0 +1,29 @@
+A method was called on an ambiguous numeric type.
+
+Erroneous code example:
+
+```compile_fail,E0689
+2.0.neg(); // error!
+```
+
+This error indicates that the numeric value for the method being passed exists
+but the type of the numeric value or binding could not be identified.
+
+The error happens on numeric literals and on numeric bindings without an
+identified concrete type:
+
+```compile_fail,E0689
+let x = 2.0;
+x.neg(); // same error as above
+```
+
+Because of this, you must give the numeric literal or binding a type:
+
+```
+use std::ops::Neg;
+
+let _ = 2.0_f32.neg(); // ok!
+let x: f32 = 2.0;
+let _ = x.neg(); // ok!
+let _ = (2.0 as f32).neg(); // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0690.md b/compiler/rustc_error_codes/src/error_codes/E0690.md
new file mode 100644
index 000000000..ba706ad2b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0690.md
@@ -0,0 +1,33 @@
+A struct with the representation hint `repr(transparent)` had two or more fields
+that were not guaranteed to be zero-sized.
+
+Erroneous code example:
+
+```compile_fail,E0690
+#[repr(transparent)]
+struct LengthWithUnit<U> { // error: transparent struct needs at most one
+ value: f32, // non-zero-sized field, but has 2
+ unit: U,
+}
+```
+
+Because transparent structs are represented exactly like one of their fields at
+run time, said field must be uniquely determined. If there are multiple fields,
+it is not clear how the struct should be represented.
+Note that fields of zero-sized types (e.g., `PhantomData`) can also exist
+alongside the field that contains the actual data, they do not count for this
+error. When generic types are involved (as in the above example), an error is
+reported because the type parameter could be non-zero-sized.
+
+To combine `repr(transparent)` with type parameters, `PhantomData` may be
+useful:
+
+```
+use std::marker::PhantomData;
+
+#[repr(transparent)]
+struct LengthWithUnit<U> {
+ value: f32,
+ unit: PhantomData<U>,
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0691.md b/compiler/rustc_error_codes/src/error_codes/E0691.md
new file mode 100644
index 000000000..60060cacb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0691.md
@@ -0,0 +1,48 @@
+A struct, enum, or union with the `repr(transparent)` representation hint
+contains a zero-sized field that requires non-trivial alignment.
+
+Erroneous code example:
+
+```compile_fail,E0691
+#![feature(repr_align)]
+
+#[repr(align(32))]
+struct ForceAlign32;
+
+#[repr(transparent)]
+struct Wrapper(f32, ForceAlign32); // error: zero-sized field in transparent
+ // struct has alignment larger than 1
+```
+
+A transparent struct, enum, or union is supposed to be represented exactly like
+the piece of data it contains. Zero-sized fields with different alignment
+requirements potentially conflict with this property. In the example above,
+`Wrapper` would have to be aligned to 32 bytes even though `f32` has a smaller
+alignment requirement.
+
+Consider removing the over-aligned zero-sized field:
+
+```
+#[repr(transparent)]
+struct Wrapper(f32);
+```
+
+Alternatively, `PhantomData<T>` has alignment 1 for all `T`, so you can use it
+if you need to keep the field for some reason:
+
+```
+#![feature(repr_align)]
+
+use std::marker::PhantomData;
+
+#[repr(align(32))]
+struct ForceAlign32;
+
+#[repr(transparent)]
+struct Wrapper(f32, PhantomData<ForceAlign32>);
+```
+
+Note that empty arrays `[T; 0]` have the same alignment requirement as the
+element type `T`. Also note that the error is conservatively reported even when
+the alignment of the zero-sized type is less than or equal to the data field's
+alignment.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0692.md b/compiler/rustc_error_codes/src/error_codes/E0692.md
new file mode 100644
index 000000000..596cb1e77
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0692.md
@@ -0,0 +1,49 @@
+A `repr(transparent)` type was also annotated with other, incompatible
+representation hints.
+
+Erroneous code example:
+
+```compile_fail,E0692
+#[repr(transparent, C)] // error: incompatible representation hints
+struct Grams(f32);
+```
+
+A type annotated as `repr(transparent)` delegates all representation concerns to
+another type, so adding more representation hints is contradictory. Remove
+either the `transparent` hint or the other hints, like this:
+
+```
+#[repr(transparent)]
+struct Grams(f32);
+```
+
+Alternatively, move the other attributes to the contained type:
+
+```
+#[repr(C)]
+struct Foo {
+ x: i32,
+ // ...
+}
+
+#[repr(transparent)]
+struct FooWrapper(Foo);
+```
+
+Note that introducing another `struct` just to have a place for the other
+attributes may have unintended side effects on the representation:
+
+```
+#[repr(transparent)]
+struct Grams(f32);
+
+#[repr(C)]
+struct Float(f32);
+
+#[repr(transparent)]
+struct Grams2(Float); // this is not equivalent to `Grams` above
+```
+
+Here, `Grams2` is a not equivalent to `Grams` -- the former transparently wraps
+a (non-transparent) struct containing a single float, while `Grams` is a
+transparent wrapper around a float. This can make a difference for the ABI.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0693.md b/compiler/rustc_error_codes/src/error_codes/E0693.md
new file mode 100644
index 000000000..43e9d1797
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0693.md
@@ -0,0 +1,19 @@
+`align` representation hint was incorrectly declared.
+
+Erroneous code examples:
+
+```compile_fail,E0693
+#[repr(align=8)] // error!
+struct Align8(i8);
+
+#[repr(align="8")] // error!
+struct Align8(i8);
+```
+
+This is a syntax error at the level of attribute declarations. The proper
+syntax for `align` representation hint is the following:
+
+```
+#[repr(align(8))] // ok!
+struct Align8(i8);
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0695.md b/compiler/rustc_error_codes/src/error_codes/E0695.md
new file mode 100644
index 000000000..5013e83ca
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0695.md
@@ -0,0 +1,35 @@
+A `break` statement without a label appeared inside a labeled block.
+
+Erroneous code example:
+
+```compile_fail,E0695
+# #![feature(label_break_value)]
+loop {
+ 'a: {
+ break;
+ }
+}
+```
+
+Make sure to always label the `break`:
+
+```
+# #![feature(label_break_value)]
+'l: loop {
+ 'a: {
+ break 'l;
+ }
+}
+```
+
+Or if you want to `break` the labeled block:
+
+```
+# #![feature(label_break_value)]
+loop {
+ 'a: {
+ break 'a;
+ }
+ break;
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0696.md b/compiler/rustc_error_codes/src/error_codes/E0696.md
new file mode 100644
index 000000000..fc32d1cc5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0696.md
@@ -0,0 +1,49 @@
+A function is using `continue` keyword incorrectly.
+
+Erroneous code example:
+
+```compile_fail,E0696
+fn continue_simple() {
+ 'b: {
+ continue; // error!
+ }
+}
+fn continue_labeled() {
+ 'b: {
+ continue 'b; // error!
+ }
+}
+fn continue_crossing() {
+ loop {
+ 'b: {
+ continue; // error!
+ }
+ }
+}
+```
+
+Here we have used the `continue` keyword incorrectly. As we
+have seen above that `continue` pointing to a labeled block.
+
+To fix this we have to use the labeled block properly.
+For example:
+
+```
+fn continue_simple() {
+ 'b: loop {
+ continue ; // ok!
+ }
+}
+fn continue_labeled() {
+ 'b: loop {
+ continue 'b; // ok!
+ }
+}
+fn continue_crossing() {
+ loop {
+ 'b: loop {
+ continue; // ok!
+ }
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0697.md b/compiler/rustc_error_codes/src/error_codes/E0697.md
new file mode 100644
index 000000000..ab63d2e73
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0697.md
@@ -0,0 +1,15 @@
+A closure has been used as `static`.
+
+Erroneous code example:
+
+```compile_fail,E0697
+fn main() {
+ static || {}; // used as `static`
+}
+```
+
+Closures cannot be used as `static`. They "save" the environment,
+and as such a static closure would save only a static environment
+which would consist only of variables with a static lifetime. Given
+this it would be better to use a proper function. The easiest fix
+is to remove the `static` keyword.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0698.md b/compiler/rustc_error_codes/src/error_codes/E0698.md
new file mode 100644
index 000000000..3ba992a84
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0698.md
@@ -0,0 +1,25 @@
+When using generators (or async) all type variables must be bound so a
+generator can be constructed.
+
+Erroneous code example:
+
+```edition2018,compile_fail,E0698
+async fn bar<T>() -> () {}
+
+async fn foo() {
+ bar().await; // error: cannot infer type for `T`
+}
+```
+
+In the above example `T` is unknowable by the compiler.
+To fix this you must bind `T` to a concrete type such as `String`
+so that a generator can then be constructed:
+
+```edition2018
+async fn bar<T>() -> () {}
+
+async fn foo() {
+ bar::<String>().await;
+ // ^^^^^^^^ specify type explicitly
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0699.md b/compiler/rustc_error_codes/src/error_codes/E0699.md
new file mode 100644
index 000000000..454d2507e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0699.md
@@ -0,0 +1,46 @@
+A method was called on a raw pointer whose inner type wasn't completely known.
+
+Erroneous code example:
+
+```compile_fail,edition2018,E0699
+# #![deny(warnings)]
+# fn main() {
+let foo = &1;
+let bar = foo as *const _;
+if bar.is_null() {
+ // ...
+}
+# }
+```
+
+Here, the type of `bar` isn't known; it could be a pointer to anything. Instead,
+specify a type for the pointer (preferably something that makes sense for the
+thing you're pointing to):
+
+```
+let foo = &1;
+let bar = foo as *const i32;
+if bar.is_null() {
+ // ...
+}
+```
+
+Even though `is_null()` exists as a method on any raw pointer, Rust shows this
+error because Rust allows for `self` to have arbitrary types (behind the
+arbitrary_self_types feature flag).
+
+This means that someone can specify such a function:
+
+```ignore (cannot-doctest-feature-doesnt-exist-yet)
+impl Foo {
+ fn is_null(self: *const Self) -> bool {
+ // do something else
+ }
+}
+```
+
+and now when you call `.is_null()` on a raw pointer to `Foo`, there's ambiguity.
+
+Given that we don't know what type the pointer is, and there's potential
+ambiguity for some types, we disallow calling methods on raw pointers when
+the type is unknown.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0700.md b/compiler/rustc_error_codes/src/error_codes/E0700.md
new file mode 100644
index 000000000..b1eb8b66a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0700.md
@@ -0,0 +1,44 @@
+The `impl Trait` return type captures lifetime parameters that do not
+appear within the `impl Trait` itself.
+
+Erroneous code example:
+
+```compile_fail,E0700
+use std::cell::Cell;
+
+trait Trait<'a> { }
+
+impl<'a, 'b> Trait<'b> for Cell<&'a u32> { }
+
+fn foo<'x, 'y>(x: Cell<&'x u32>) -> impl Trait<'y>
+where 'x: 'y
+{
+ x
+}
+```
+
+Here, the function `foo` returns a value of type `Cell<&'x u32>`,
+which references the lifetime `'x`. However, the return type is
+declared as `impl Trait<'y>` -- this indicates that `foo` returns
+"some type that implements `Trait<'y>`", but it also indicates that
+the return type **only captures data referencing the lifetime `'y`**.
+In this case, though, we are referencing data with lifetime `'x`, so
+this function is in error.
+
+To fix this, you must reference the lifetime `'x` from the return
+type. For example, changing the return type to `impl Trait<'y> + 'x`
+would work:
+
+```
+use std::cell::Cell;
+
+trait Trait<'a> { }
+
+impl<'a,'b> Trait<'b> for Cell<&'a u32> { }
+
+fn foo<'x, 'y>(x: Cell<&'x u32>) -> impl Trait<'y> + 'x
+where 'x: 'y
+{
+ x
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0701.md b/compiler/rustc_error_codes/src/error_codes/E0701.md
new file mode 100644
index 000000000..4965e6431
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0701.md
@@ -0,0 +1,9 @@
+This error indicates that a `#[non_exhaustive]` attribute was incorrectly placed
+on something other than a struct or enum.
+
+Erroneous code example:
+
+```compile_fail,E0701
+#[non_exhaustive]
+trait Foo { }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0703.md b/compiler/rustc_error_codes/src/error_codes/E0703.md
new file mode 100644
index 000000000..b42677d52
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0703.md
@@ -0,0 +1,17 @@
+Invalid ABI (Application Binary Interface) used in the code.
+
+Erroneous code example:
+
+```compile_fail,E0703
+extern "invalid" fn foo() {} // error!
+# fn main() {}
+```
+
+At present few predefined ABI's (like Rust, C, system, etc.) can be
+used in Rust. Verify that the ABI is predefined. For example you can
+replace the given ABI from 'Rust'.
+
+```
+extern "Rust" fn foo() {} // ok!
+# fn main() { }
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0704.md b/compiler/rustc_error_codes/src/error_codes/E0704.md
new file mode 100644
index 000000000..c22b274fb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0704.md
@@ -0,0 +1,27 @@
+An incorrect visibility restriction was specified.
+
+Erroneous code example:
+
+```compile_fail,E0704
+mod foo {
+ pub(foo) struct Bar {
+ x: i32
+ }
+}
+```
+
+To make struct `Bar` only visible in module `foo` the `in` keyword should be
+used:
+
+```
+mod foo {
+ pub(in crate::foo) struct Bar {
+ x: i32
+ }
+}
+# fn main() {}
+```
+
+For more information see the Rust Reference on [Visibility].
+
+[Visibility]: https://doc.rust-lang.org/reference/visibility-and-privacy.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0705.md b/compiler/rustc_error_codes/src/error_codes/E0705.md
new file mode 100644
index 000000000..eb76d1836
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0705.md
@@ -0,0 +1,11 @@
+A `#![feature]` attribute was declared for a feature that is stable in the
+current edition, but not in all editions.
+
+Erroneous code example:
+
+```rust2018,compile_fail,E0705
+#![feature(rust_2018_preview)]
+#![feature(test_2018_feature)] // error: the feature
+ // `test_2018_feature` is
+ // included in the Rust 2018 edition
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0706.md b/compiler/rustc_error_codes/src/error_codes/E0706.md
new file mode 100644
index 000000000..d379b8a23
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0706.md
@@ -0,0 +1,59 @@
+`async fn`s are not yet supported in traits in Rust.
+
+Erroneous code example:
+
+```compile_fail,edition2018
+trait T {
+ // Neither case is currently supported.
+ async fn foo() {}
+ async fn bar(&self) {}
+}
+```
+
+`async fn`s return an `impl Future`, making the following two examples
+equivalent:
+
+```edition2018,ignore (example-of-desugaring-equivalence)
+async fn foo() -> User {
+ unimplemented!()
+}
+// The async fn above gets desugared as follows:
+fn foo(&self) -> impl Future<Output = User> + '_ {
+ unimplemented!()
+}
+```
+
+But when it comes to supporting this in traits, there are [a few implementation
+issues][async-is-hard]. One of them is returning `impl Trait` in traits is not
+supported, as it would require [Generic Associated Types] to be supported:
+
+```edition2018,ignore (example-of-desugaring-equivalence)
+impl MyDatabase {
+ async fn get_user(&self) -> User {
+ unimplemented!()
+ }
+}
+
+impl MyDatabase {
+ fn get_user(&self) -> impl Future<Output = User> + '_ {
+ unimplemented!()
+ }
+}
+```
+
+Until these issues are resolved, you can use the [`async-trait` crate], allowing
+you to use `async fn` in traits by desugaring to "boxed futures"
+(`Pin<Box<dyn Future + Send + 'async>>`).
+
+Note that using these trait methods will result in a heap allocation
+per-function-call. This is not a significant cost for the vast majority of
+applications, but should be considered when deciding whether to use this
+functionality in the public API of a low-level function that is expected to be
+called millions of times a second.
+
+You might be interested in visiting the [async book] for further information.
+
+[`async-trait` crate]: https://crates.io/crates/async-trait
+[async-is-hard]: https://smallcultfollowing.com/babysteps/blog/2019/10/26/async-fn-in-traits-are-hard/
+[Generic Associated Types]: https://github.com/rust-lang/rust/issues/44265
+[async book]: https://rust-lang.github.io/async-book/07_workarounds/06_async_in_traits.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0708.md b/compiler/rustc_error_codes/src/error_codes/E0708.md
new file mode 100644
index 000000000..9287fc803
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0708.md
@@ -0,0 +1,26 @@
+`async` non-`move` closures with parameters are currently not supported.
+
+Erroneous code example:
+
+```compile_fail,edition2018,E0708
+#![feature(async_closure)]
+
+fn main() {
+ let add_one = async |num: u8| { // error!
+ num + 1
+ };
+}
+```
+
+`async` with non-move is currently not supported with the current
+version, you can use successfully by using move:
+
+```edition2018
+#![feature(async_closure)]
+
+fn main() {
+ let add_one = async move |num: u8| { // ok!
+ num + 1
+ };
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0710.md b/compiler/rustc_error_codes/src/error_codes/E0710.md
new file mode 100644
index 000000000..b7037ea61
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0710.md
@@ -0,0 +1,34 @@
+An unknown tool name was found in a scoped lint.
+
+Erroneous code examples:
+
+```compile_fail,E0710
+#[allow(clipp::filter_map)] // error!`
+fn main() {
+ // business logic
+}
+```
+
+```compile_fail,E0710
+#[warn(clipp::filter_map)] // error!`
+fn main() {
+ // business logic
+}
+```
+
+Please verify you didn't misspell the tool's name or that you didn't
+forget to import it in you project:
+
+```
+#[allow(clippy::filter_map)] // ok!
+fn main() {
+ // business logic
+}
+```
+
+```
+#[warn(clippy::filter_map)] // ok!
+fn main() {
+ // business logic
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0712.md b/compiler/rustc_error_codes/src/error_codes/E0712.md
new file mode 100644
index 000000000..7e09210e7
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0712.md
@@ -0,0 +1,19 @@
+A borrow of a thread-local variable was made inside a function which outlived
+the lifetime of the function.
+
+Erroneous code example:
+
+```compile_fail,E0712
+#![feature(thread_local)]
+
+#[thread_local]
+static FOO: u8 = 3;
+
+fn main() {
+ let a = &FOO; // error: thread-local variable borrowed past end of function
+
+ std::thread::spawn(move || {
+ println!("{}", a);
+ });
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0713.md b/compiler/rustc_error_codes/src/error_codes/E0713.md
new file mode 100644
index 000000000..9b1b77f3b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0713.md
@@ -0,0 +1,49 @@
+This error occurs when an attempt is made to borrow state past the end of the
+lifetime of a type that implements the `Drop` trait.
+
+Erroneous code example:
+
+```compile_fail,E0713
+pub struct S<'a> { data: &'a mut String }
+
+impl<'a> Drop for S<'a> {
+ fn drop(&mut self) { self.data.push_str("being dropped"); }
+}
+
+fn demo<'a>(s: S<'a>) -> &'a mut String { let p = &mut *s.data; p }
+```
+
+Here, `demo` tries to borrow the string data held within its
+argument `s` and then return that borrow. However, `S` is
+declared as implementing `Drop`.
+
+Structs implementing the `Drop` trait have an implicit destructor that
+gets called when they go out of scope. This destructor gets exclusive
+access to the fields of the struct when it runs.
+
+This means that when `s` reaches the end of `demo`, its destructor
+gets exclusive access to its `&mut`-borrowed string data. allowing
+another borrow of that string data (`p`), to exist across the drop of
+`s` would be a violation of the principle that `&mut`-borrows have
+exclusive, unaliased access to their referenced data.
+
+This error can be fixed by changing `demo` so that the destructor does
+not run while the string-data is borrowed; for example by taking `S`
+by reference:
+
+```
+pub struct S<'a> { data: &'a mut String }
+
+impl<'a> Drop for S<'a> {
+ fn drop(&mut self) { self.data.push_str("being dropped"); }
+}
+
+fn demo<'a>(s: &'a mut S<'a>) -> &'a mut String { let p = &mut *(*s).data; p }
+```
+
+Note that this approach needs a reference to S with lifetime `'a`.
+Nothing shorter than `'a` will suffice: a shorter lifetime would imply
+that after `demo` finishes executing, something else (such as the
+destructor!) could access `s.data` after the end of that shorter
+lifetime, which would again violate the `&mut`-borrow's exclusive
+access.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0714.md b/compiler/rustc_error_codes/src/error_codes/E0714.md
new file mode 100644
index 000000000..45d1cafa6
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0714.md
@@ -0,0 +1,19 @@
+A `#[marker]` trait contained an associated item.
+
+Erroneous code example:
+
+```compile_fail,E0714
+#![feature(marker_trait_attr)]
+#![feature(associated_type_defaults)]
+
+#[marker]
+trait MarkerConst {
+ const N: usize; // error!
+}
+
+fn main() {}
+```
+
+The items of marker traits cannot be overridden, so there's no need to have them
+when they cannot be changed per-type anyway. If you wanted them for ergonomic
+reasons, consider making an extension trait instead.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0715.md b/compiler/rustc_error_codes/src/error_codes/E0715.md
new file mode 100644
index 000000000..b27702b3c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0715.md
@@ -0,0 +1,23 @@
+An `impl` for a `#[marker]` trait tried to override an associated item.
+
+Erroneous code example:
+
+```compile_fail,E0715
+#![feature(marker_trait_attr)]
+
+#[marker]
+trait Marker {
+ const N: usize = 0;
+ fn do_something() {}
+}
+
+struct OverrideConst;
+impl Marker for OverrideConst { // error!
+ const N: usize = 1;
+}
+# fn main() {}
+```
+
+Because marker traits are allowed to have multiple implementations for the same
+type, it's not allowed to override anything in those implementations, as it
+would be ambiguous which override should actually be used.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0716.md b/compiler/rustc_error_codes/src/error_codes/E0716.md
new file mode 100644
index 000000000..c3546cd74
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0716.md
@@ -0,0 +1,79 @@
+A temporary value is being dropped while a borrow is still in active use.
+
+Erroneous code example:
+
+```compile_fail,E0716
+fn foo() -> i32 { 22 }
+fn bar(x: &i32) -> &i32 { x }
+let p = bar(&foo());
+ // ------ creates a temporary
+let q = *p;
+```
+
+Here, the expression `&foo()` is borrowing the expression `foo()`. As `foo()` is
+a call to a function, and not the name of a variable, this creates a
+**temporary** -- that temporary stores the return value from `foo()` so that it
+can be borrowed. You could imagine that `let p = bar(&foo());` is equivalent to
+the following, which uses an explicit temporary variable.
+
+Erroneous code example:
+
+```compile_fail,E0597
+# fn foo() -> i32 { 22 }
+# fn bar(x: &i32) -> &i32 { x }
+let p = {
+ let tmp = foo(); // the temporary
+ bar(&tmp) // error: `tmp` does not live long enough
+}; // <-- tmp is freed as we exit this block
+let q = p;
+```
+
+Whenever a temporary is created, it is automatically dropped (freed) according
+to fixed rules. Ordinarily, the temporary is dropped at the end of the enclosing
+statement -- in this case, after the `let`. This is illustrated in the example
+above by showing that `tmp` would be freed as we exit the block.
+
+To fix this problem, you need to create a local variable to store the value in
+rather than relying on a temporary. For example, you might change the original
+program to the following:
+
+```
+fn foo() -> i32 { 22 }
+fn bar(x: &i32) -> &i32 { x }
+let value = foo(); // dropped at the end of the enclosing block
+let p = bar(&value);
+let q = *p;
+```
+
+By introducing the explicit `let value`, we allocate storage that will last
+until the end of the enclosing block (when `value` goes out of scope). When we
+borrow `&value`, we are borrowing a local variable that already exists, and
+hence no temporary is created.
+
+Temporaries are not always dropped at the end of the enclosing statement. In
+simple cases where the `&` expression is immediately stored into a variable, the
+compiler will automatically extend the lifetime of the temporary until the end
+of the enclosing block. Therefore, an alternative way to fix the original
+program is to write `let tmp = &foo()` and not `let tmp = foo()`:
+
+```
+fn foo() -> i32 { 22 }
+fn bar(x: &i32) -> &i32 { x }
+let value = &foo();
+let p = bar(value);
+let q = *p;
+```
+
+Here, we are still borrowing `foo()`, but as the borrow is assigned directly
+into a variable, the temporary will not be dropped until the end of the
+enclosing block. Similar rules apply when temporaries are stored into aggregate
+structures like a tuple or struct:
+
+```
+// Here, two temporaries are created, but
+// as they are stored directly into `value`,
+// they are not dropped until the end of the
+// enclosing block.
+fn foo() -> i32 { 22 }
+let value = (&foo(), &foo());
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0718.md b/compiler/rustc_error_codes/src/error_codes/E0718.md
new file mode 100644
index 000000000..1fe62ecf1
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0718.md
@@ -0,0 +1,10 @@
+A `#[lang = ".."]` attribute was placed on the wrong item type.
+
+Erroneous code example:
+
+```compile_fail,E0718
+#![feature(lang_items)]
+
+#[lang = "owned_box"]
+static X: u32 = 42;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0719.md b/compiler/rustc_error_codes/src/error_codes/E0719.md
new file mode 100644
index 000000000..057a0b164
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0719.md
@@ -0,0 +1,35 @@
+An associated type value was specified more than once.
+
+Erroneous code example:
+
+```compile_fail,E0719
+#![feature(associated_type_bounds)]
+
+trait FooTrait {}
+trait BarTrait {}
+
+// error: associated type `Item` in trait `Iterator` is specified twice
+struct Foo<T: Iterator<Item: FooTrait, Item: BarTrait>> { f: T }
+```
+
+`Item` in trait `Iterator` cannot be specified multiple times for struct `Foo`.
+To fix this, create a new trait that is a combination of the desired traits and
+specify the associated type with the new trait.
+
+Corrected example:
+
+```
+#![feature(associated_type_bounds)]
+
+trait FooTrait {}
+trait BarTrait {}
+trait FooBarTrait: FooTrait + BarTrait {}
+
+struct Foo<T: Iterator<Item: FooBarTrait>> { f: T } // ok!
+```
+
+For more information about associated types, see [the book][bk-at]. For more
+information on associated type bounds, see [RFC 2289][rfc-2289].
+
+[bk-at]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#specifying-placeholder-types-in-trait-definitions-with-associated-types
+[rfc-2289]: https://rust-lang.github.io/rfcs/2289-associated-type-bounds.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0720.md b/compiler/rustc_error_codes/src/error_codes/E0720.md
new file mode 100644
index 000000000..40dfa484d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0720.md
@@ -0,0 +1,13 @@
+An `impl Trait` type expands to a recursive type.
+
+Erroneous code example:
+
+```compile_fail,E0720
+fn make_recursive_type() -> impl Sized {
+ [make_recursive_type(), make_recursive_type()]
+}
+```
+
+An `impl Trait` type must be expandable to a concrete type that contains no
+`impl Trait` types. For example the previous example tries to create an
+`impl Trait` type `T` that is equal to `[T, T]`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0722.md b/compiler/rustc_error_codes/src/error_codes/E0722.md
new file mode 100644
index 000000000..570717a92
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0722.md
@@ -0,0 +1,31 @@
+The `optimize` attribute was malformed.
+
+Erroneous code example:
+
+```compile_fail,E0722
+#![feature(optimize_attribute)]
+
+#[optimize(something)] // error: invalid argument
+pub fn something() {}
+```
+
+The `#[optimize]` attribute should be used as follows:
+
+- `#[optimize(size)]` -- instructs the optimization pipeline to generate code
+ that's smaller rather than faster
+
+- `#[optimize(speed)]` -- instructs the optimization pipeline to generate code
+ that's faster rather than smaller
+
+For example:
+
+```
+#![feature(optimize_attribute)]
+
+#[optimize(size)]
+pub fn something() {}
+```
+
+See [RFC 2412] for more details.
+
+[RFC 2412]: https://rust-lang.github.io/rfcs/2412-optimize-attr.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0724.md b/compiler/rustc_error_codes/src/error_codes/E0724.md
new file mode 100644
index 000000000..70578acbe
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0724.md
@@ -0,0 +1,25 @@
+`#[ffi_returns_twice]` was used on something other than a foreign function
+declaration.
+
+Erroneous code example:
+
+```compile_fail,E0724
+#![feature(ffi_returns_twice)]
+#![crate_type = "lib"]
+
+#[ffi_returns_twice] // error!
+pub fn foo() {}
+```
+
+`#[ffi_returns_twice]` can only be used on foreign function declarations.
+For example, we might correct the previous example by declaring
+the function inside of an `extern` block.
+
+```
+#![feature(ffi_returns_twice)]
+
+extern "C" {
+ #[ffi_returns_twice] // ok!
+ pub fn foo();
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0725.md b/compiler/rustc_error_codes/src/error_codes/E0725.md
new file mode 100644
index 000000000..9bd321e5f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0725.md
@@ -0,0 +1,12 @@
+A feature attribute named a feature that was disallowed in the compiler
+command line flags.
+
+Erroneous code example:
+
+```ignore (can't specify compiler flags from doctests)
+#![feature(never_type)] // error: the feature `never_type` is not in
+ // the list of allowed features
+```
+
+Delete the offending feature attribute, or add it to the list of allowed
+features in the `-Z allow_features` flag.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0726.md b/compiler/rustc_error_codes/src/error_codes/E0726.md
new file mode 100644
index 000000000..e3794327f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0726.md
@@ -0,0 +1,46 @@
+An argument lifetime was elided in an async function.
+
+Erroneous code example:
+
+When a struct or a type is bound/declared with a lifetime it is important for
+the Rust compiler to know, on usage, the lifespan of the type. When the
+lifetime is not explicitly mentioned and the Rust Compiler cannot determine
+the lifetime of your type, the following error occurs.
+
+```compile_fail,E0726
+use futures::executor::block_on;
+struct Content<'a> {
+ title: &'a str,
+ body: &'a str,
+}
+async fn create(content: Content) { // error: implicit elided
+ // lifetime not allowed here
+ println!("title: {}", content.title);
+ println!("body: {}", content.body);
+}
+let content = Content { title: "Rust", body: "is great!" };
+let future = create(content);
+block_on(future);
+```
+
+Specify desired lifetime of parameter `content` or indicate the anonymous
+lifetime like `content: Content<'_>`. The anonymous lifetime tells the Rust
+compiler that `content` is only needed until create function is done with
+it's execution.
+
+The `implicit elision` meaning the omission of suggested lifetime that is
+`pub async fn create<'a>(content: Content<'a>) {}` is not allowed here as
+lifetime of the `content` can differ from current context:
+
+```ignore (needs futures dependency)
+async fn create(content: Content<'_>) { // ok!
+ println!("title: {}", content.title);
+ println!("body: {}", content.body);
+}
+```
+
+Know more about lifetime elision in this [chapter][lifetime-elision] and a
+chapter on lifetimes can be found [here][lifetimes].
+
+[lifetime-elision]: https://doc.rust-lang.org/book/ch10-03-lifetime-syntax.html#lifetime-elision
+[lifetimes]: https://doc.rust-lang.org/rust-by-example/scope/lifetime.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0727.md b/compiler/rustc_error_codes/src/error_codes/E0727.md
new file mode 100644
index 000000000..386daea0c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0727.md
@@ -0,0 +1,30 @@
+A `yield` clause was used in an `async` context.
+
+Erroneous code example:
+
+```compile_fail,E0727,edition2018
+#![feature(generators)]
+
+fn main() {
+ let generator = || {
+ async {
+ yield;
+ }
+ };
+}
+```
+
+Here, the `yield` keyword is used in an `async` block,
+which is not yet supported.
+
+To fix this error, you have to move `yield` out of the `async` block:
+
+```edition2018
+#![feature(generators)]
+
+fn main() {
+ let generator = || {
+ yield;
+ };
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0728.md b/compiler/rustc_error_codes/src/error_codes/E0728.md
new file mode 100644
index 000000000..f4968a4f0
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0728.md
@@ -0,0 +1,75 @@
+[`await`] has been used outside [`async`] function or [`async`] block.
+
+Erroneous code example:
+
+```edition2018,compile_fail,E0728
+# use std::pin::Pin;
+# use std::future::Future;
+# use std::task::{Context, Poll};
+#
+# struct WakeOnceThenComplete(bool);
+#
+# fn wake_and_yield_once() -> WakeOnceThenComplete {
+# WakeOnceThenComplete(false)
+# }
+#
+# impl Future for WakeOnceThenComplete {
+# type Output = ();
+# fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+# if self.0 {
+# Poll::Ready(())
+# } else {
+# cx.waker().wake_by_ref();
+# self.0 = true;
+# Poll::Pending
+# }
+# }
+# }
+#
+fn foo() {
+ wake_and_yield_once().await // `await` is used outside `async` context
+}
+```
+
+[`await`] is used to suspend the current computation until the given
+future is ready to produce a value. So it is legal only within
+an [`async`] context, like an `async` function or an `async` block.
+
+```edition2018
+# use std::pin::Pin;
+# use std::future::Future;
+# use std::task::{Context, Poll};
+#
+# struct WakeOnceThenComplete(bool);
+#
+# fn wake_and_yield_once() -> WakeOnceThenComplete {
+# WakeOnceThenComplete(false)
+# }
+#
+# impl Future for WakeOnceThenComplete {
+# type Output = ();
+# fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
+# if self.0 {
+# Poll::Ready(())
+# } else {
+# cx.waker().wake_by_ref();
+# self.0 = true;
+# Poll::Pending
+# }
+# }
+# }
+#
+async fn foo() {
+ wake_and_yield_once().await // `await` is used within `async` function
+}
+
+fn bar(x: u8) -> impl Future<Output = u8> {
+ async move {
+ wake_and_yield_once().await; // `await` is used within `async` block
+ x
+ }
+}
+```
+
+[`async`]: https://doc.rust-lang.org/std/keyword.async.html
+[`await`]: https://doc.rust-lang.org/std/keyword.await.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0729.md b/compiler/rustc_error_codes/src/error_codes/E0729.md
new file mode 100644
index 000000000..74f89080b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0729.md
@@ -0,0 +1,30 @@
+Support for Non-Lexical Lifetimes (NLL) has been included in the Rust compiler
+since 1.31, and has been enabled on the 2015 edition since 1.36. The new borrow
+checker for NLL uncovered some bugs in the old borrow checker, which in some
+cases allowed unsound code to compile, resulting in memory safety issues.
+
+### What do I do?
+
+Change your code so the warning does no longer trigger. For backwards
+compatibility, this unsound code may still compile (with a warning) right now.
+However, at some point in the future, the compiler will no longer accept this
+code and will throw a hard error.
+
+### Shouldn't you fix the old borrow checker?
+
+The old borrow checker has known soundness issues that are basically impossible
+to fix. The new NLL-based borrow checker is the fix.
+
+### Can I turn these warnings into errors by denying a lint?
+
+No.
+
+### When are these warnings going to turn into errors?
+
+No formal timeline for turning the warnings into errors has been set. See
+[GitHub issue 58781](https://github.com/rust-lang/rust/issues/58781) for more
+information.
+
+### Why do I get this message with code that doesn't involve borrowing?
+
+There are some known bugs that trigger this message.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0730.md b/compiler/rustc_error_codes/src/error_codes/E0730.md
new file mode 100644
index 000000000..56d0e6afa
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0730.md
@@ -0,0 +1,39 @@
+An array without a fixed length was pattern-matched.
+
+Erroneous code example:
+
+```compile_fail,E0730
+fn is_123<const N: usize>(x: [u32; N]) -> bool {
+ match x {
+ [1, 2, ..] => true, // error: cannot pattern-match on an
+ // array without a fixed length
+ _ => false
+ }
+}
+```
+
+To fix this error, you have two solutions:
+ 1. Use an array with a fixed length.
+ 2. Use a slice.
+
+Example with an array with a fixed length:
+
+```
+fn is_123(x: [u32; 3]) -> bool { // We use an array with a fixed size
+ match x {
+ [1, 2, ..] => true, // ok!
+ _ => false
+ }
+}
+```
+
+Example with a slice:
+
+```
+fn is_123(x: &[u32]) -> bool { // We use a slice
+ match x {
+ [1, 2, ..] => true, // ok!
+ _ => false
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0731.md b/compiler/rustc_error_codes/src/error_codes/E0731.md
new file mode 100644
index 000000000..096c053fe
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0731.md
@@ -0,0 +1,17 @@
+An enum with the representation hint `repr(transparent)` had zero or more than
+one variants.
+
+Erroneous code example:
+
+```compile_fail,E0731
+#[repr(transparent)]
+enum Status { // error: transparent enum needs exactly one variant, but has 2
+ Errno(u32),
+ Ok,
+}
+```
+
+Because transparent enums are represented exactly like one of their variants at
+run time, said variant must be uniquely determined. If there is no variant, or
+if there are multiple variants, it is not clear how the enum should be
+represented.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0732.md b/compiler/rustc_error_codes/src/error_codes/E0732.md
new file mode 100644
index 000000000..7347e6654
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0732.md
@@ -0,0 +1,44 @@
+An `enum` with a discriminant must specify a `#[repr(inttype)]`.
+
+Erroneous code example:
+
+```compile_fail,E0732
+#![feature(arbitrary_enum_discriminant)]
+
+enum Enum { // error!
+ Unit = 1,
+ Tuple() = 2,
+ Struct{} = 3,
+}
+# fn main() {}
+```
+
+A `#[repr(inttype)]` must be provided on an `enum` if it has a non-unit
+variant with a discriminant, or where there are both unit variants with
+discriminants and non-unit variants. This restriction ensures that there
+is a well-defined way to extract a variant's discriminant from a value;
+for instance:
+
+```
+#![feature(arbitrary_enum_discriminant)]
+
+#[repr(u8)]
+enum Enum {
+ Unit = 3,
+ Tuple(u16) = 2,
+ Struct {
+ a: u8,
+ b: u16,
+ } = 1,
+}
+
+fn discriminant(v : &Enum) -> u8 {
+ unsafe { *(v as *const Enum as *const u8) }
+}
+
+fn main() {
+ assert_eq!(3, discriminant(&Enum::Unit));
+ assert_eq!(2, discriminant(&Enum::Tuple(5)));
+ assert_eq!(1, discriminant(&Enum::Struct{a: 7, b: 11}));
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0733.md b/compiler/rustc_error_codes/src/error_codes/E0733.md
new file mode 100644
index 000000000..051b75148
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0733.md
@@ -0,0 +1,44 @@
+An [`async`] function used recursion without boxing.
+
+Erroneous code example:
+
+```edition2018,compile_fail,E0733
+async fn foo(n: usize) {
+ if n > 0 {
+ foo(n - 1).await;
+ }
+}
+```
+
+To perform async recursion, the `async fn` needs to be desugared such that the
+`Future` is explicit in the return type:
+
+```edition2018,compile_fail,E0720
+use std::future::Future;
+fn foo_desugared(n: usize) -> impl Future<Output = ()> {
+ async move {
+ if n > 0 {
+ foo_desugared(n - 1).await;
+ }
+ }
+}
+```
+
+Finally, the future is wrapped in a pinned box:
+
+```edition2018
+use std::future::Future;
+use std::pin::Pin;
+fn foo_recursive(n: usize) -> Pin<Box<dyn Future<Output = ()>>> {
+ Box::pin(async move {
+ if n > 0 {
+ foo_recursive(n - 1).await;
+ }
+ })
+}
+```
+
+The `Box<...>` ensures that the result is of known size, and the pin is
+required to keep it in the same place in memory.
+
+[`async`]: https://doc.rust-lang.org/std/keyword.async.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0734.md b/compiler/rustc_error_codes/src/error_codes/E0734.md
new file mode 100644
index 000000000..b912061ec
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0734.md
@@ -0,0 +1,12 @@
+A stability attribute has been used outside of the standard library.
+
+Erroneous code example:
+
+```compile_fail,E0734
+#[stable(feature = "a", since = "b")] // invalid
+#[unstable(feature = "b", issue = "none")] // invalid
+fn foo(){}
+```
+
+These attributes are meant to only be used by the standard library and are
+rejected in your own crates.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0735.md b/compiler/rustc_error_codes/src/error_codes/E0735.md
new file mode 100644
index 000000000..e8268a583
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0735.md
@@ -0,0 +1,11 @@
+Type parameter defaults cannot use `Self` on structs, enums, or unions.
+
+Erroneous code example:
+
+```compile_fail,E0735
+struct Foo<X = Box<Self>> {
+ field1: Option<X>,
+ field2: Option<X>,
+}
+// error: type parameters cannot use `Self` in their defaults.
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0736.md b/compiler/rustc_error_codes/src/error_codes/E0736.md
new file mode 100644
index 000000000..0f3d41ba6
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0736.md
@@ -0,0 +1,14 @@
+`#[track_caller]` and `#[naked]` cannot both be applied to the same function.
+
+Erroneous code example:
+
+```compile_fail,E0736
+#[naked]
+#[track_caller]
+fn foo() {}
+```
+
+This is primarily due to ABI incompatibilities between the two attributes.
+See [RFC 2091] for details on this and other limitations.
+
+[RFC 2091]: https://github.com/rust-lang/rfcs/blob/master/text/2091-inline-semantic.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0737.md b/compiler/rustc_error_codes/src/error_codes/E0737.md
new file mode 100644
index 000000000..ab5e60692
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0737.md
@@ -0,0 +1,12 @@
+`#[track_caller]` requires functions to have the `"Rust"` ABI for implicitly
+receiving caller location. See [RFC 2091] for details on this and other
+restrictions.
+
+Erroneous code example:
+
+```compile_fail,E0737
+#[track_caller]
+extern "C" fn foo() {}
+```
+
+[RFC 2091]: https://github.com/rust-lang/rfcs/blob/master/text/2091-inline-semantic.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0739.md b/compiler/rustc_error_codes/src/error_codes/E0739.md
new file mode 100644
index 000000000..8d9039bef
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0739.md
@@ -0,0 +1,12 @@
+`#[track_caller]` can not be applied on struct.
+
+Erroneous code example:
+
+```compile_fail,E0739
+#[track_caller]
+struct Bar {
+ a: u8,
+}
+```
+
+[RFC 2091]: https://github.com/rust-lang/rfcs/blob/master/text/2091-inline-semantic.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0740.md b/compiler/rustc_error_codes/src/error_codes/E0740.md
new file mode 100644
index 000000000..6240099a9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0740.md
@@ -0,0 +1,18 @@
+A `union` was declared with fields with destructors.
+
+Erroneous code example:
+
+```compile_fail,E0740
+union Test {
+ a: A, // error!
+}
+
+#[derive(Debug)]
+struct A(i32);
+
+impl Drop for A {
+ fn drop(&mut self) { println!("A"); }
+}
+```
+
+A `union` cannot have fields with destructors.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0741.md b/compiler/rustc_error_codes/src/error_codes/E0741.md
new file mode 100644
index 000000000..70d963cd4
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0741.md
@@ -0,0 +1,25 @@
+A non-structural-match type was used as the type of a const generic parameter.
+
+Erroneous code example:
+
+```compile_fail,E0741
+#![feature(adt_const_params)]
+
+struct A;
+
+struct B<const X: A>; // error!
+```
+
+Only structural-match types (that is, types that derive `PartialEq` and `Eq`)
+may be used as the types of const generic parameters.
+
+To fix the previous code example, we derive `PartialEq` and `Eq`:
+
+```
+#![feature(adt_const_params)]
+
+#[derive(PartialEq, Eq)] // We derive both traits here.
+struct A;
+
+struct B<const X: A>; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0742.md b/compiler/rustc_error_codes/src/error_codes/E0742.md
new file mode 100644
index 000000000..e10c1639d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0742.md
@@ -0,0 +1,35 @@
+Visibility is restricted to a module which isn't an ancestor of the current
+item.
+
+Erroneous code example:
+
+```compile_fail,E0742,edition2018
+pub mod sea {}
+
+pub (in crate::sea) struct Shark; // error!
+
+fn main() {}
+```
+
+To fix this error, we need to move the `Shark` struct inside the `sea` module:
+
+```edition2018
+pub mod sea {
+ pub (in crate::sea) struct Shark; // ok!
+}
+
+fn main() {}
+```
+
+Of course, you can do it as long as the module you're referring to is an
+ancestor:
+
+```edition2018
+pub mod earth {
+ pub mod sea {
+ pub (in crate::earth) struct Shark; // ok!
+ }
+}
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0743.md b/compiler/rustc_error_codes/src/error_codes/E0743.md
new file mode 100644
index 000000000..ddd3136df
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0743.md
@@ -0,0 +1,16 @@
+The C-variadic type `...` has been nested inside another type.
+
+Erroneous code example:
+
+```compile_fail,E0743
+#![feature(c_variadic)]
+
+fn foo2(x: u8, y: &...) {} // error!
+```
+
+Only foreign functions can use the C-variadic type (`...`). In such functions,
+`...` may only occur non-nested. That is, `y: &'a ...` is not allowed.
+
+A C-variadic type is used to give an undefined number of parameters to a given
+function (like `printf` in C). The equivalent in Rust would be to use macros
+directly (like `println!` for example).
diff --git a/compiler/rustc_error_codes/src/error_codes/E0744.md b/compiler/rustc_error_codes/src/error_codes/E0744.md
new file mode 100644
index 000000000..9a8ef3b84
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0744.md
@@ -0,0 +1,16 @@
+An unsupported expression was used inside a const context.
+
+Erroneous code example:
+
+```compile_fail,edition2018,E0744
+const _: i32 = {
+ async { 0 }.await
+};
+```
+
+At the moment, `.await` is forbidden inside a `const`, `static`, or `const fn`.
+
+This may be allowed at some point in the future, but the implementation is not
+yet complete. See the tracking issue for [`async`] in `const fn`.
+
+[`async`]: https://github.com/rust-lang/rust/issues/69431
diff --git a/compiler/rustc_error_codes/src/error_codes/E0745.md b/compiler/rustc_error_codes/src/error_codes/E0745.md
new file mode 100644
index 000000000..23ee7af30
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0745.md
@@ -0,0 +1,23 @@
+The address of temporary value was taken.
+
+Erroneous code example:
+
+```compile_fail,E0745
+# #![feature(raw_ref_op)]
+fn temp_address() {
+ let ptr = &raw const 2; // error!
+}
+```
+
+In this example, `2` is destroyed right after the assignment, which means that
+`ptr` now points to an unavailable location.
+
+To avoid this error, first bind the temporary to a named local variable:
+
+```
+# #![feature(raw_ref_op)]
+fn temp_address() {
+ let val = 2;
+ let ptr = &raw const val; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0746.md b/compiler/rustc_error_codes/src/error_codes/E0746.md
new file mode 100644
index 000000000..90755d47f
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0746.md
@@ -0,0 +1,139 @@
+An unboxed trait object was used as a return value.
+
+Erroneous code example:
+
+```compile_fail,E0746
+trait T {
+ fn bar(&self);
+}
+struct S(usize);
+impl T for S {
+ fn bar(&self) {}
+}
+
+// Having the trait `T` as return type is invalid because
+// unboxed trait objects do not have a statically known size:
+fn foo() -> dyn T { // error!
+ S(42)
+}
+```
+
+Return types cannot be `dyn Trait`s as they must be `Sized`.
+
+To avoid the error there are a couple of options.
+
+If there is a single type involved, you can use [`impl Trait`]:
+
+```
+# trait T {
+# fn bar(&self);
+# }
+# struct S(usize);
+# impl T for S {
+# fn bar(&self) {}
+# }
+// The compiler will select `S(usize)` as the materialized return type of this
+// function, but callers will only know that the return type implements `T`.
+fn foo() -> impl T { // ok!
+ S(42)
+}
+```
+
+If there are multiple types involved, the only way you care to interact with
+them is through the trait's interface, and having to rely on dynamic dispatch
+is acceptable, then you can use [trait objects] with `Box`, or other container
+types like `Rc` or `Arc`:
+
+```
+# trait T {
+# fn bar(&self);
+# }
+# struct S(usize);
+# impl T for S {
+# fn bar(&self) {}
+# }
+struct O(&'static str);
+impl T for O {
+ fn bar(&self) {}
+}
+
+// This now returns a "trait object" and callers are only be able to access
+// associated items from `T`.
+fn foo(x: bool) -> Box<dyn T> { // ok!
+ if x {
+ Box::new(S(42))
+ } else {
+ Box::new(O("val"))
+ }
+}
+```
+
+Finally, if you wish to still be able to access the original type, you can
+create a new `enum` with a variant for each type:
+
+```
+# trait T {
+# fn bar(&self);
+# }
+# struct S(usize);
+# impl T for S {
+# fn bar(&self) {}
+# }
+# struct O(&'static str);
+# impl T for O {
+# fn bar(&self) {}
+# }
+enum E {
+ S(S),
+ O(O),
+}
+
+// The caller can access the original types directly, but it needs to match on
+// the returned `enum E`.
+fn foo(x: bool) -> E {
+ if x {
+ E::S(S(42))
+ } else {
+ E::O(O("val"))
+ }
+}
+```
+
+You can even implement the `trait` on the returned `enum` so the callers
+*don't* have to match on the returned value to invoke the associated items:
+
+```
+# trait T {
+# fn bar(&self);
+# }
+# struct S(usize);
+# impl T for S {
+# fn bar(&self) {}
+# }
+# struct O(&'static str);
+# impl T for O {
+# fn bar(&self) {}
+# }
+# enum E {
+# S(S),
+# O(O),
+# }
+impl T for E {
+ fn bar(&self) {
+ match self {
+ E::S(s) => s.bar(),
+ E::O(o) => o.bar(),
+ }
+ }
+}
+```
+
+If you decide to use trait objects, be aware that these rely on
+[dynamic dispatch], which has performance implications, as the compiler needs
+to emit code that will figure out which method to call *at runtime* instead of
+during compilation. Using trait objects we are trading flexibility for
+performance.
+
+[`impl Trait`]: https://doc.rust-lang.org/book/ch10-02-traits.html#returning-types-that-implement-traits
+[trait objects]: https://doc.rust-lang.org/book/ch17-02-trait-objects.html#using-trait-objects-that-allow-for-values-of-different-types
+[dynamic dispatch]: https://doc.rust-lang.org/book/ch17-02-trait-objects.html#trait-objects-perform-dynamic-dispatch
diff --git a/compiler/rustc_error_codes/src/error_codes/E0747.md b/compiler/rustc_error_codes/src/error_codes/E0747.md
new file mode 100644
index 000000000..caf7e0fba
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0747.md
@@ -0,0 +1,20 @@
+Generic arguments were not provided in the same order as the corresponding
+generic parameters are declared.
+
+Erroneous code example:
+
+```compile_fail,E0747
+struct S<'a, T>(&'a T);
+
+type X = S<(), 'static>; // error: the type argument is provided before the
+ // lifetime argument
+```
+
+The argument order should be changed to match the parameter declaration
+order, as in the following:
+
+```
+struct S<'a, T>(&'a T);
+
+type X = S<'static, ()>; // ok
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0748.md b/compiler/rustc_error_codes/src/error_codes/E0748.md
new file mode 100644
index 000000000..69f1c0261
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0748.md
@@ -0,0 +1,16 @@
+A raw string isn't correctly terminated because the trailing `#` count doesn't
+match its leading `#` count.
+
+Erroneous code example:
+
+```compile_fail,E0748
+let dolphins = r##"Dolphins!"#; // error!
+```
+
+To terminate a raw string, you have to have the same number of `#` at the end
+as at the beginning. Example:
+
+```
+let dolphins = r#"Dolphins!"#; // One `#` at the beginning, one at the end so
+ // all good!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0749.md b/compiler/rustc_error_codes/src/error_codes/E0749.md
new file mode 100644
index 000000000..dfe90ae89
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0749.md
@@ -0,0 +1,29 @@
+An item was added on a negative impl.
+
+Erroneous code example:
+
+```compile_fail,E0749
+# #![feature(negative_impls)]
+trait MyTrait {
+ type Foo;
+}
+
+impl !MyTrait for u32 {
+ type Foo = i32; // error!
+}
+```
+
+Negative impls are not allowed to have any items. Negative impls declare that a
+trait is **not** implemented (and never will be) and hence there is no need to
+specify the values for trait methods or other items.
+
+One way to fix this is to remove the items in negative impls:
+
+```
+# #![feature(negative_impls)]
+trait MyTrait {
+ type Foo;
+}
+
+impl !MyTrait for u32 {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0750.md b/compiler/rustc_error_codes/src/error_codes/E0750.md
new file mode 100644
index 000000000..905e852f8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0750.md
@@ -0,0 +1,18 @@
+A negative impl was made default impl.
+
+Erroneous code example:
+
+```compile_fail,E0750
+# #![feature(negative_impls)]
+# #![feature(specialization)]
+trait MyTrait {
+ type Foo;
+}
+
+default impl !MyTrait for u32 {} // error!
+# fn main() {}
+```
+
+Negative impls cannot be default impls. A default impl supplies default values
+for the items within to be used by other impls, whereas a negative impl declares
+that there are no other impls. Combining it does not make sense.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0751.md b/compiler/rustc_error_codes/src/error_codes/E0751.md
new file mode 100644
index 000000000..8794f7868
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0751.md
@@ -0,0 +1,12 @@
+There are both a positive and negative trait implementation for the same type.
+
+Erroneous code example:
+
+```compile_fail,E0751
+trait MyTrait {}
+impl MyTrait for i32 { }
+impl !MyTrait for i32 { } // error!
+```
+
+Negative implementations are a promise that the trait will never be implemented
+for the given types. Therefore, both cannot exists at the same time.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0752.md b/compiler/rustc_error_codes/src/error_codes/E0752.md
new file mode 100644
index 000000000..9736da80c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0752.md
@@ -0,0 +1,19 @@
+The entry point of the program was marked as `async`.
+
+Erroneous code example:
+
+```compile_fail,E0752
+async fn main() -> Result<(), ()> { // error!
+ Ok(())
+}
+```
+
+`fn main()` or the specified start function is not allowed to be `async`. Not
+having a correct async runtime library setup may cause this error. To fix it,
+declare the entry point without `async`:
+
+```
+fn main() -> Result<(), ()> { // ok!
+ Ok(())
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0753.md b/compiler/rustc_error_codes/src/error_codes/E0753.md
new file mode 100644
index 000000000..a69da964a
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0753.md
@@ -0,0 +1,31 @@
+An inner doc comment was used in an invalid context.
+
+Erroneous code example:
+
+```compile_fail,E0753
+fn foo() {}
+//! foo
+// ^ error!
+fn main() {}
+```
+
+Inner document can only be used before items. For example:
+
+```
+//! A working comment applied to the module!
+fn foo() {
+ //! Another working comment!
+}
+fn main() {}
+```
+
+In case you want to document the item following the doc comment, you might want
+to use outer doc comment:
+
+```
+/// I am an outer doc comment
+#[doc = "I am also an outer doc comment!"]
+fn foo() {
+ // ...
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0754.md b/compiler/rustc_error_codes/src/error_codes/E0754.md
new file mode 100644
index 000000000..acddb69aa
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0754.md
@@ -0,0 +1,24 @@
+A non-ASCII identifier was used in an invalid context.
+
+Erroneous code examples:
+
+```compile_fail,E0754
+
+mod řųśť; // error!
+
+#[no_mangle]
+fn řųśť() {} // error!
+
+fn main() {}
+```
+
+Non-ASCII can be used as module names if it is inlined or if a `#[path]`
+attribute is specified. For example:
+
+```
+mod řųśť { // ok!
+ const IS_GREAT: bool = true;
+}
+
+fn main() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0755.md b/compiler/rustc_error_codes/src/error_codes/E0755.md
new file mode 100644
index 000000000..88b7f4849
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0755.md
@@ -0,0 +1,28 @@
+The `ffi_pure` attribute was used on a non-foreign function.
+
+Erroneous code example:
+
+```compile_fail,E0755
+#![feature(ffi_pure)]
+
+#[ffi_pure] // error!
+pub fn foo() {}
+# fn main() {}
+```
+
+The `ffi_pure` attribute can only be used on foreign functions which do not have
+side effects or infinite loops:
+
+```
+#![feature(ffi_pure)]
+
+extern "C" {
+ #[ffi_pure] // ok!
+ pub fn strlen(s: *const i8) -> isize;
+}
+# fn main() {}
+```
+
+You can find more information about it in the [unstable Rust Book].
+
+[unstable Rust Book]: https://doc.rust-lang.org/unstable-book/language-features/ffi-pure.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0756.md b/compiler/rustc_error_codes/src/error_codes/E0756.md
new file mode 100644
index 000000000..ffdc421aa
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0756.md
@@ -0,0 +1,29 @@
+The `ffi_const` attribute was used on something other than a foreign function
+declaration.
+
+Erroneous code example:
+
+```compile_fail,E0756
+#![feature(ffi_const)]
+
+#[ffi_const] // error!
+pub fn foo() {}
+# fn main() {}
+```
+
+The `ffi_const` attribute can only be used on foreign function declarations
+which have no side effects except for their return value:
+
+```
+#![feature(ffi_const)]
+
+extern "C" {
+ #[ffi_const] // ok!
+ pub fn strlen(s: *const i8) -> i32;
+}
+# fn main() {}
+```
+
+You can get more information about it in the [unstable Rust Book].
+
+[unstable Rust Book]: https://doc.rust-lang.org/nightly/unstable-book/language-features/ffi-const.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0757.md b/compiler/rustc_error_codes/src/error_codes/E0757.md
new file mode 100644
index 000000000..41b06b23c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0757.md
@@ -0,0 +1,33 @@
+A function was given both the `ffi_const` and `ffi_pure` attributes.
+
+Erroneous code example:
+
+```compile_fail,E0757
+#![feature(ffi_const, ffi_pure)]
+
+extern "C" {
+ #[ffi_const]
+ #[ffi_pure] // error: `#[ffi_const]` function cannot be `#[ffi_pure]`
+ pub fn square(num: i32) -> i32;
+}
+```
+
+As `ffi_const` provides stronger guarantees than `ffi_pure`, remove the
+`ffi_pure` attribute:
+
+```
+#![feature(ffi_const)]
+
+extern "C" {
+ #[ffi_const]
+ pub fn square(num: i32) -> i32;
+}
+```
+
+You can get more information about `const` and `pure` in the [GCC documentation
+on Common Function Attributes]. The unstable Rust Book has more information
+about [`ffi_const`] and [`ffi_pure`].
+
+[GCC documentation on Common Function Attributes]: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html
+[`ffi_const`]: https://doc.rust-lang.org/nightly/unstable-book/language-features/ffi-const.html
+[`ffi_pure`]: https://doc.rust-lang.org/nightly/unstable-book/language-features/ffi-pure.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0758.md b/compiler/rustc_error_codes/src/error_codes/E0758.md
new file mode 100644
index 000000000..ddca4b3d7
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0758.md
@@ -0,0 +1,20 @@
+A multi-line (doc-)comment is unterminated.
+
+Erroneous code example:
+
+```compile_fail,E0758
+/* I am not terminated!
+```
+
+The same goes for doc comments:
+
+```compile_fail,E0758
+/*! I am not terminated!
+```
+
+You need to end your multi-line comment with `*/` in order to fix this error:
+
+```
+/* I am terminated! */
+/*! I am also terminated! */
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0759.md b/compiler/rustc_error_codes/src/error_codes/E0759.md
new file mode 100644
index 000000000..ce5d42b3c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0759.md
@@ -0,0 +1,63 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+Return type involving a trait did not require `'static` lifetime.
+
+Erroneous code examples:
+
+```compile_fail
+use std::fmt::Debug;
+
+fn foo(x: &i32) -> impl Debug { // error!
+ x
+}
+
+fn bar(x: &i32) -> Box<dyn Debug> { // error!
+ Box::new(x)
+}
+```
+
+Add `'static` requirement to fix them:
+
+```
+# use std::fmt::Debug;
+fn foo(x: &'static i32) -> impl Debug + 'static { // ok!
+ x
+}
+
+fn bar(x: &'static i32) -> Box<dyn Debug + 'static> { // ok!
+ Box::new(x)
+}
+```
+
+Both [`dyn Trait`] and [`impl Trait`] in return types have an implicit
+`'static` requirement, meaning that the value implementing them that is being
+returned has to be either a `'static` borrow or an owned value.
+
+In order to change the requirement from `'static` to be a lifetime derived from
+its arguments, you can add an explicit bound, either to an anonymous lifetime
+`'_` or some appropriate named lifetime.
+
+```
+# use std::fmt::Debug;
+fn foo(x: &i32) -> impl Debug + '_ {
+ x
+}
+fn bar(x: &i32) -> Box<dyn Debug + '_> {
+ Box::new(x)
+}
+```
+
+These are equivalent to the following explicit lifetime annotations:
+
+```
+# use std::fmt::Debug;
+fn foo<'a>(x: &'a i32) -> impl Debug + 'a {
+ x
+}
+fn bar<'a>(x: &'a i32) -> Box<dyn Debug + 'a> {
+ Box::new(x)
+}
+```
+
+[`dyn Trait`]: https://doc.rust-lang.org/book/ch17-02-trait-objects.html#using-trait-objects-that-allow-for-values-of-different-types
+[`impl Trait`]: https://doc.rust-lang.org/book/ch10-02-traits.html#returning-types-that-implement-traits
diff --git a/compiler/rustc_error_codes/src/error_codes/E0760.md b/compiler/rustc_error_codes/src/error_codes/E0760.md
new file mode 100644
index 000000000..e1dcfefeb
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0760.md
@@ -0,0 +1,32 @@
+`async fn`/`impl trait` return type cannot contain a projection
+or `Self` that references lifetimes from a parent scope.
+
+Erroneous code example:
+
+```compile_fail,E0760,edition2018
+struct S<'a>(&'a i32);
+
+impl<'a> S<'a> {
+ async fn new(i: &'a i32) -> Self {
+ S(&22)
+ }
+}
+```
+
+To fix this error we need to spell out `Self` to `S<'a>`:
+
+```edition2018
+struct S<'a>(&'a i32);
+
+impl<'a> S<'a> {
+ async fn new(i: &'a i32) -> S<'a> {
+ S(&22)
+ }
+}
+```
+
+This will be allowed at some point in the future,
+but the implementation is not yet complete.
+See the [issue-61949] for this limitation.
+
+[issue-61949]: https://github.com/rust-lang/rust/issues/61949
diff --git a/compiler/rustc_error_codes/src/error_codes/E0761.md b/compiler/rustc_error_codes/src/error_codes/E0761.md
new file mode 100644
index 000000000..760c58976
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0761.md
@@ -0,0 +1,21 @@
+Multiple candidate files were found for an out-of-line module.
+
+Erroneous code example:
+
+```ignore (Multiple source files are required for compile_fail.)
+// file: ambiguous_module/mod.rs
+
+fn foo() {}
+
+// file: ambiguous_module.rs
+
+fn foo() {}
+
+// file: lib.rs
+
+mod ambiguous_module; // error: file for module `ambiguous_module`
+ // found at both ambiguous_module.rs and
+ // ambiguous_module.rs/mod.rs
+```
+
+Please remove this ambiguity by deleting/renaming one of the candidate files.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0762.md b/compiler/rustc_error_codes/src/error_codes/E0762.md
new file mode 100644
index 000000000..b01ded4a8
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0762.md
@@ -0,0 +1,13 @@
+A character literal wasn't ended with a quote.
+
+Erroneous code example:
+
+```compile_fail,E0762
+static C: char = '●; // error!
+```
+
+To fix this error, add the missing quote:
+
+```
+static C: char = '●'; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0763.md b/compiler/rustc_error_codes/src/error_codes/E0763.md
new file mode 100644
index 000000000..095b779f3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0763.md
@@ -0,0 +1,13 @@
+A byte constant wasn't correctly ended.
+
+Erroneous code example:
+
+```compile_fail,E0763
+let c = b'a; // error!
+```
+
+To fix this error, add the missing quote:
+
+```
+let c = b'a'; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0764.md b/compiler/rustc_error_codes/src/error_codes/E0764.md
new file mode 100644
index 000000000..152627cf6
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0764.md
@@ -0,0 +1,41 @@
+A mutable reference was used in a constant.
+
+Erroneous code example:
+
+```compile_fail,E0764
+#![feature(const_mut_refs)]
+
+fn main() {
+ const OH_NO: &'static mut usize = &mut 1; // error!
+}
+```
+
+Mutable references (`&mut`) can only be used in constant functions, not statics
+or constants. This limitation exists to prevent the creation of constants that
+have a mutable reference in their final value. If you had a constant of
+`&mut i32` type, you could modify the value through that reference, making the
+constant essentially mutable.
+
+While there could be a more fine-grained scheme in the future that allows
+mutable references if they are not "leaked" to the final value, a more
+conservative approach was chosen for now. `const fn` do not have this problem,
+as the borrow checker will prevent the `const fn` from returning new mutable
+references.
+
+Remember: you cannot use a function call inside a constant or static. However,
+you can totally use it in constant functions:
+
+```
+#![feature(const_mut_refs)]
+
+const fn foo(x: usize) -> usize {
+ let mut y = 1;
+ let z = &mut y;
+ *z += x;
+ y
+}
+
+fn main() {
+ const FOO: usize = foo(10); // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0765.md b/compiler/rustc_error_codes/src/error_codes/E0765.md
new file mode 100644
index 000000000..456e3f3e9
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0765.md
@@ -0,0 +1,13 @@
+A double quote string (`"`) was not terminated.
+
+Erroneous code example:
+
+```compile_fail,E0765
+let s = "; // error!
+```
+
+To fix this error, add the missing double quote at the end of the string:
+
+```
+let s = ""; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0766.md b/compiler/rustc_error_codes/src/error_codes/E0766.md
new file mode 100644
index 000000000..4e775df2c
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0766.md
@@ -0,0 +1,13 @@
+A double quote byte string (`b"`) was not terminated.
+
+Erroneous code example:
+
+```compile_fail,E0766
+let s = b"; // error!
+```
+
+To fix this error, add the missing double quote at the end of the string:
+
+```
+let s = b""; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0767.md b/compiler/rustc_error_codes/src/error_codes/E0767.md
new file mode 100644
index 000000000..679fe7e41
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0767.md
@@ -0,0 +1,20 @@
+An unreachable label was used.
+
+Erroneous code example:
+
+```compile_fail,E0767
+'a: loop {
+ || {
+ loop { break 'a } // error: use of unreachable label `'a`
+ };
+}
+```
+
+Ensure that the label is within scope. Labels are not reachable through
+functions, closures, async blocks or modules. Example:
+
+```
+'a: loop {
+ break 'a; // ok!
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0768.md b/compiler/rustc_error_codes/src/error_codes/E0768.md
new file mode 100644
index 000000000..24169ef51
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0768.md
@@ -0,0 +1,13 @@
+A number in a non-decimal base has no digits.
+
+Erroneous code example:
+
+```compile_fail,E0768
+let s: i32 = 0b; // error!
+```
+
+To fix this error, add the missing digits:
+
+```
+let s: i32 = 0b1; // ok!
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0769.md b/compiler/rustc_error_codes/src/error_codes/E0769.md
new file mode 100644
index 000000000..4a3b674b0
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0769.md
@@ -0,0 +1,47 @@
+A tuple struct or tuple variant was used in a pattern as if it were a struct or
+struct variant.
+
+Erroneous code example:
+
+```compile_fail,E0769
+enum E {
+ A(i32),
+}
+
+let e = E::A(42);
+
+match e {
+ E::A { number } => { // error!
+ println!("{}", number);
+ }
+}
+```
+
+To fix this error, you can use the tuple pattern:
+
+```
+# enum E {
+# A(i32),
+# }
+# let e = E::A(42);
+match e {
+ E::A(number) => { // ok!
+ println!("{}", number);
+ }
+}
+```
+
+Alternatively, you can also use the struct pattern by using the correct field
+names and binding them to new identifiers:
+
+```
+# enum E {
+# A(i32),
+# }
+# let e = E::A(42);
+match e {
+ E::A { 0: number } => { // ok!
+ println!("{}", number);
+ }
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0770.md b/compiler/rustc_error_codes/src/error_codes/E0770.md
new file mode 100644
index 000000000..cd8fc481b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0770.md
@@ -0,0 +1,13 @@
+The type of a const parameter references other generic parameters.
+
+Erroneous code example:
+
+```compile_fail,E0770
+fn foo<T, const N: T>() {} // error!
+```
+
+To fix this error, use a concrete type for the const parameter:
+
+```
+fn foo<T, const N: usize>() {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0771.md b/compiler/rustc_error_codes/src/error_codes/E0771.md
new file mode 100644
index 000000000..a2a1a20f2
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0771.md
@@ -0,0 +1,23 @@
+A non-`'static` lifetime was used in a const generic. This is currently not
+allowed.
+
+Erroneous code example:
+
+```compile_fail,E0771
+#![feature(adt_const_params)]
+
+fn function_with_str<'a, const STRING: &'a str>() {} // error!
+```
+
+To fix this issue, the lifetime in the const generic need to be changed to
+`'static`:
+
+```
+#![feature(adt_const_params)]
+
+fn function_with_str<const STRING: &'static str>() {} // ok!
+```
+
+For more information, see [GitHub issue #74052].
+
+[GitHub issue #74052]: https://github.com/rust-lang/rust/issues/74052
diff --git a/compiler/rustc_error_codes/src/error_codes/E0772.md b/compiler/rustc_error_codes/src/error_codes/E0772.md
new file mode 100644
index 000000000..5ffffd511
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0772.md
@@ -0,0 +1,91 @@
+#### Note: this error code is no longer emitted by the compiler.
+
+A trait object has some specific lifetime `'1`, but it was used in a way that
+requires it to have a `'static` lifetime.
+
+Example of erroneous code:
+
+```compile_fail
+trait BooleanLike {}
+trait Person {}
+
+impl BooleanLike for bool {}
+
+impl dyn Person {
+ fn is_cool(&self) -> bool {
+ // hey you, you're pretty cool
+ true
+ }
+}
+
+fn get_is_cool<'p>(person: &'p dyn Person) -> impl BooleanLike {
+ // error: `person` has an anonymous lifetime `'p` but calling
+ // `print_cool_fn` introduces an implicit `'static` lifetime
+ // requirement
+ person.is_cool()
+}
+```
+
+The trait object `person` in the function `get_is_cool`, while already being
+behind a reference with lifetime `'p`, also has it's own implicit lifetime,
+`'2`.
+
+Lifetime `'2` represents the data the trait object might hold inside, for
+example:
+
+```
+trait MyTrait {}
+
+struct MyStruct<'a>(&'a i32);
+
+impl<'a> MyTrait for MyStruct<'a> {}
+```
+
+With this scenario, if a trait object of `dyn MyTrait + '2` was made from
+`MyStruct<'a>`, `'a` must live as long, if not longer than `'2`. This allows the
+trait object's internal data to be accessed safely from any trait methods. This
+rule also goes for any lifetime any struct made into a trait object may have.
+
+In the implementation for `dyn Person`, the `'2` lifetime representing the
+internal data was omitted, meaning that the compiler inferred the lifetime
+`'static`. As a result, the implementation's `is_cool` is inferred by the
+compiler to look like this:
+
+```
+# trait Person {}
+#
+# impl dyn Person {
+fn is_cool<'a>(self: &'a (dyn Person + 'static)) -> bool {unimplemented!()}
+# }
+```
+
+While the `get_is_cool` function is inferred to look like this:
+
+```
+# trait Person {}
+# trait BooleanLike {}
+#
+fn get_is_cool<'p, R: BooleanLike>(person: &'p (dyn Person + 'p)) -> R {
+ unimplemented!()
+}
+```
+
+Which brings us to the core of the problem; the assignment of type
+`&'_ (dyn Person + '_)` to type `&'_ (dyn Person + 'static)` is impossible.
+
+Fixing it is as simple as being generic over lifetime `'2`, as to prevent the
+compiler from inferring it as `'static`:
+
+```
+# trait Person {}
+#
+impl<'d> dyn Person + 'd {/* ... */}
+
+// This works too, and is more elegant:
+//impl dyn Person + '_ {/* ... */}
+```
+
+See the [Rust Reference on Trait Object Lifetime Bounds][trait-objects] for
+more information on trait object lifetimes.
+
+[trait-object-lifetime-bounds]: https://doc.rust-lang.org/reference/types/trait-object.html#trait-object-lifetime-bounds
diff --git a/compiler/rustc_error_codes/src/error_codes/E0773.md b/compiler/rustc_error_codes/src/error_codes/E0773.md
new file mode 100644
index 000000000..b19a58bf3
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0773.md
@@ -0,0 +1,38 @@
+A builtin-macro was defined more than once.
+
+Erroneous code example:
+
+```compile_fail,E0773
+#![feature(decl_macro)]
+#![feature(rustc_attrs)]
+
+#[rustc_builtin_macro]
+pub macro test($item:item) {
+ /* compiler built-in */
+}
+
+mod inner {
+ #[rustc_builtin_macro]
+ pub macro test($item:item) {
+ /* compiler built-in */
+ }
+}
+```
+
+To fix the issue, remove the duplicate declaration:
+
+```
+#![feature(decl_macro)]
+#![feature(rustc_attrs)]
+
+#[rustc_builtin_macro]
+pub macro test($item:item) {
+ /* compiler built-in */
+}
+```
+
+In very rare edge cases, this may happen when loading `core` or `std` twice,
+once with `check` metadata and once with `build` metadata.
+For more information, see [#75176].
+
+[#75176]: https://github.com/rust-lang/rust/pull/75176#issuecomment-683234468
diff --git a/compiler/rustc_error_codes/src/error_codes/E0774.md b/compiler/rustc_error_codes/src/error_codes/E0774.md
new file mode 100644
index 000000000..79793ba9d
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0774.md
@@ -0,0 +1,24 @@
+`derive` was applied on something which is not a struct, a union or an enum.
+
+Erroneous code example:
+
+```compile_fail,E0774
+trait Foo {
+ #[derive(Clone)] // error!
+ type Bar;
+}
+```
+
+As said above, the `derive` attribute is only allowed on structs, unions or
+enums:
+
+```
+#[derive(Clone)] // ok!
+struct Bar {
+ field: u32,
+}
+```
+
+You can find more information about `derive` in the [Rust Book].
+
+[Rust Book]: https://doc.rust-lang.org/book/appendix-03-derivable-traits.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0775.md b/compiler/rustc_error_codes/src/error_codes/E0775.md
new file mode 100644
index 000000000..9bafd52f7
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0775.md
@@ -0,0 +1,17 @@
+`#[cmse_nonsecure_entry]` is only valid for targets with the TrustZone-M
+extension.
+
+Erroneous code example:
+
+```compile_fail,E0775
+#![feature(cmse_nonsecure_entry)]
+
+#[cmse_nonsecure_entry]
+pub extern "C" fn entry_function() {}
+```
+
+To fix this error, compile your code for a Rust target that supports the
+TrustZone-M extension. The current possible targets are:
+* `thumbv8m.main-none-eabi`
+* `thumbv8m.main-none-eabihf`
+* `thumbv8m.base-none-eabi`
diff --git a/compiler/rustc_error_codes/src/error_codes/E0776.md b/compiler/rustc_error_codes/src/error_codes/E0776.md
new file mode 100644
index 000000000..d65beebe0
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0776.md
@@ -0,0 +1,13 @@
+`#[cmse_nonsecure_entry]` functions require a C ABI
+
+Erroneous code example:
+
+```compile_fail,E0776
+#![feature(cmse_nonsecure_entry)]
+
+#[no_mangle]
+#[cmse_nonsecure_entry]
+pub fn entry_function(input: Vec<u32>) {}
+```
+
+To fix this error, declare your entry function with a C ABI, using `extern "C"`.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0777.md b/compiler/rustc_error_codes/src/error_codes/E0777.md
new file mode 100644
index 000000000..8c5c6e28b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0777.md
@@ -0,0 +1,19 @@
+A literal value was used inside `#[derive]`.
+
+Erroneous code example:
+
+```compile_fail,E0777
+#[derive("Clone")] // error!
+struct Foo;
+```
+
+Only paths to traits are allowed as argument inside `#[derive]`. You can find
+more information about the `#[derive]` attribute in the [Rust Book].
+
+
+```
+#[derive(Clone)] // ok!
+struct Foo;
+```
+
+[Rust Book]: https://doc.rust-lang.org/book/appendix-03-derivable-traits.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0778.md b/compiler/rustc_error_codes/src/error_codes/E0778.md
new file mode 100644
index 000000000..467362dca
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0778.md
@@ -0,0 +1,35 @@
+The `instruction_set` attribute was malformed.
+
+Erroneous code example:
+
+```compile_fail,E0778
+#![feature(isa_attribute)]
+
+#[instruction_set()] // error: expected one argument
+pub fn something() {}
+fn main() {}
+```
+
+The parenthesized `instruction_set` attribute requires the parameter to be
+specified:
+
+```
+#![feature(isa_attribute)]
+
+#[cfg_attr(target_arch="arm", instruction_set(arm::a32))]
+fn something() {}
+```
+
+or:
+
+```
+#![feature(isa_attribute)]
+
+#[cfg_attr(target_arch="arm", instruction_set(arm::t32))]
+fn something() {}
+```
+
+For more information see the [`instruction_set` attribute][isa-attribute]
+section of the Reference.
+
+[isa-attribute]: https://doc.rust-lang.org/reference/attributes/codegen.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0779.md b/compiler/rustc_error_codes/src/error_codes/E0779.md
new file mode 100644
index 000000000..146e20c26
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0779.md
@@ -0,0 +1,32 @@
+An unknown argument was given to the `instruction_set` attribute.
+
+Erroneous code example:
+
+```compile_fail,E0779
+#![feature(isa_attribute)]
+
+#[instruction_set(intel::x64)] // error: invalid argument
+pub fn something() {}
+fn main() {}
+```
+
+The `instruction_set` attribute only supports two arguments currently:
+
+ * arm::a32
+ * arm::t32
+
+All other arguments given to the `instruction_set` attribute will return this
+error. Example:
+
+```
+#![feature(isa_attribute)]
+
+#[cfg_attr(target_arch="arm", instruction_set(arm::a32))] // ok!
+pub fn something() {}
+fn main() {}
+```
+
+For more information see the [`instruction_set` attribute][isa-attribute]
+section of the Reference.
+
+[isa-attribute]: https://doc.rust-lang.org/reference/attributes/codegen.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0780.md b/compiler/rustc_error_codes/src/error_codes/E0780.md
new file mode 100644
index 000000000..704b4ae18
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0780.md
@@ -0,0 +1,19 @@
+Cannot use `doc(inline)` with anonymous imports
+
+Erroneous code example:
+
+```ignore (cannot-doctest-multicrate-project)
+
+#[doc(inline)] // error: invalid doc argument
+pub use foo::Foo as _;
+```
+
+Anonymous imports are always rendered with `#[doc(no_inline)]`. To fix this
+error, remove the `#[doc(inline)]` attribute.
+
+Example:
+
+```ignore (cannot-doctest-multicrate-project)
+
+pub use foo::Foo as _;
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0781.md b/compiler/rustc_error_codes/src/error_codes/E0781.md
new file mode 100644
index 000000000..7641acfb5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0781.md
@@ -0,0 +1,12 @@
+The `C-cmse-nonsecure-call` ABI can only be used with function pointers.
+
+Erroneous code example:
+
+```compile_fail,E0781
+#![feature(abi_c_cmse_nonsecure_call)]
+
+pub extern "C-cmse-nonsecure-call" fn test() {}
+```
+
+The `C-cmse-nonsecure-call` ABI should be used by casting function pointers to
+specific addresses.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0782.md b/compiler/rustc_error_codes/src/error_codes/E0782.md
new file mode 100644
index 000000000..0f3253c05
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0782.md
@@ -0,0 +1,26 @@
+Trait objects must include the `dyn` keyword.
+
+Erroneous code example:
+
+```edition2021,compile_fail,E0782
+trait Foo {}
+fn test(arg: Box<Foo>) {} // error!
+```
+
+Trait objects are a way to call methods on types that are not known until
+runtime but conform to some trait.
+
+Trait objects should be formed with `Box<dyn Foo>`, but in the code above
+`dyn` is left off.
+
+This makes it harder to see that `arg` is a trait object and not a
+simply a heap allocated type called `Foo`.
+
+To fix this issue, add `dyn` before the trait name.
+
+```edition2021
+trait Foo {}
+fn test(arg: Box<dyn Foo>) {} // ok!
+```
+
+This used to be allowed before edition 2021, but is now an error.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0783.md b/compiler/rustc_error_codes/src/error_codes/E0783.md
new file mode 100644
index 000000000..73981e59e
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0783.md
@@ -0,0 +1,22 @@
+The range pattern `...` is no longer allowed.
+
+Erroneous code example:
+
+```edition2021,compile_fail,E0783
+match 2u8 {
+ 0...9 => println!("Got a number less than 10"), // error!
+ _ => println!("Got a number 10 or more"),
+}
+```
+
+Older Rust code using previous editions allowed `...` to stand for exclusive
+ranges which are now signified using `..=`.
+
+To make this code compile replace the `...` with `..=`.
+
+```edition2021
+match 2u8 {
+ 0..=9 => println!("Got a number less than 10"), // ok!
+ _ => println!("Got a number 10 or more"),
+}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0784.md b/compiler/rustc_error_codes/src/error_codes/E0784.md
new file mode 100644
index 000000000..b20b7039b
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0784.md
@@ -0,0 +1,32 @@
+A union expression does not have exactly one field.
+
+Erroneous code example:
+
+```compile_fail,E0784
+union Bird {
+ pigeon: u8,
+ turtledove: u16,
+}
+
+let bird = Bird {}; // error
+let bird = Bird { pigeon: 0, turtledove: 1 }; // error
+```
+
+The key property of unions is that all fields of a union share common storage.
+As a result, writes to one field of a union can overwrite its other fields, and
+size of a union is determined by the size of its largest field.
+
+You can find more information about the union types in the [Rust reference].
+
+Working example:
+
+```
+union Bird {
+ pigeon: u8,
+ turtledove: u16,
+}
+
+let bird = Bird { pigeon: 0 }; // OK
+```
+
+[Rust reference]: https://doc.rust-lang.org/reference/items/unions.html
diff --git a/compiler/rustc_error_codes/src/error_codes/E0785.md b/compiler/rustc_error_codes/src/error_codes/E0785.md
new file mode 100644
index 000000000..373320539
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0785.md
@@ -0,0 +1,30 @@
+An inherent `impl` was written on a dyn auto trait.
+
+Erroneous code example:
+
+```compile_fail,E0785
+#![feature(auto_traits)]
+
+auto trait AutoTrait {}
+
+impl dyn AutoTrait {}
+```
+
+Dyn objects allow any number of auto traits, plus at most one non-auto trait.
+The non-auto trait becomes the "principal trait".
+
+When checking if an impl on a dyn trait is coherent, the principal trait is
+normally the only one considered. Since the erroneous code has no principal
+trait, it cannot be implemented at all.
+
+Working example:
+
+```
+#![feature(auto_traits)]
+
+trait PrincipalTrait {}
+
+auto trait AutoTrait {}
+
+impl dyn PrincipalTrait + AutoTrait + Send {}
+```
diff --git a/compiler/rustc_error_codes/src/error_codes/E0786.md b/compiler/rustc_error_codes/src/error_codes/E0786.md
new file mode 100644
index 000000000..4a9635bf5
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0786.md
@@ -0,0 +1,14 @@
+A metadata file was invalid.
+
+Erroneous code example:
+
+```ignore (needs extern files)
+use ::foo; // error: found invalid metadata files for crate `foo`
+```
+
+When loading crates, each crate must have a valid metadata file.
+Invalid files could be caused by filesystem corruption,
+an IO error while reading the file, or (rarely) a bug in the compiler itself.
+
+Consider deleting the file and recreating it,
+or reporting a bug against the compiler.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0787.md b/compiler/rustc_error_codes/src/error_codes/E0787.md
new file mode 100644
index 000000000..cee508292
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0787.md
@@ -0,0 +1,28 @@
+An unsupported naked function definition.
+
+Erroneous code example:
+
+```compile_fail,E0787
+#![feature(naked_functions)]
+
+#[naked]
+pub extern "C" fn f() -> u32 {
+ 42
+}
+```
+
+The naked functions must be defined using a single inline assembly
+block.
+
+The execution must never fall through past the end of the assembly
+code so the block must use `noreturn` option. The asm block can also
+use `att_syntax` and `raw` options, but others options are not allowed.
+
+The asm block must not contain any operands other than `const` and
+`sym`.
+
+### Additional information
+
+For more information, please see [RFC 2972].
+
+[RFC 2972]: https://github.com/rust-lang/rfcs/blob/master/text/2972-constrained-naked.md
diff --git a/compiler/rustc_error_codes/src/error_codes/E0788.md b/compiler/rustc_error_codes/src/error_codes/E0788.md
new file mode 100644
index 000000000..d26f9b594
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0788.md
@@ -0,0 +1,26 @@
+A `#[no_coverage]` attribute was applied to something which does not show up
+in code coverage, or is too granular to be excluded from the coverage report.
+
+For now, this attribute can only be applied to function, method, and closure
+definitions. In the future, it may be added to statements, blocks, and
+expressions, and for the time being, using this attribute in those places
+will just emit an `unused_attributes` lint instead of this error.
+
+Example of erroneous code:
+
+```compile_fail,E0788
+#[no_coverage]
+struct Foo;
+
+#[no_coverage]
+const FOO: Foo = Foo;
+```
+
+`#[no_coverage]` tells the compiler to not generate coverage instrumentation for
+a piece of code when the `-C instrument-coverage` flag is passed. Things like
+structs and consts are not coverable code, and thus cannot do anything with this
+attribute.
+
+If you wish to apply this attribute to all methods in an impl or module,
+manually annotate each method; it is not possible to annotate the entire impl
+with a `#[no_coverage]` attribute.
diff --git a/compiler/rustc_error_codes/src/error_codes/E0790.md b/compiler/rustc_error_codes/src/error_codes/E0790.md
new file mode 100644
index 000000000..2aee9dfbd
--- /dev/null
+++ b/compiler/rustc_error_codes/src/error_codes/E0790.md
@@ -0,0 +1,47 @@
+You need to specify a specific implementation of the trait in order to call the
+method.
+
+Erroneous code example:
+
+```compile_fail,E0790
+trait Generator {
+ fn create() -> u32;
+}
+
+struct Impl;
+
+impl Generator for Impl {
+ fn create() -> u32 { 1 }
+}
+
+struct AnotherImpl;
+
+impl Generator for AnotherImpl {
+ fn create() -> u32 { 2 }
+}
+
+let cont: u32 = Generator::create();
+// error, impossible to choose one of Generator trait implementation
+// Should it be Impl or AnotherImpl, maybe something else?
+```
+
+This error can be solved by adding type annotations that provide the missing
+information to the compiler. In this case, the solution is to use a concrete
+type:
+
+```
+trait Generator {
+ fn create() -> u32;
+}
+
+struct AnotherImpl;
+
+impl Generator for AnotherImpl {
+ fn create() -> u32 { 2 }
+}
+
+let gen1 = AnotherImpl::create();
+
+// if there are multiple methods with same name (different traits)
+let gen2 = <AnotherImpl as Generator>::create();
+```
diff --git a/compiler/rustc_error_codes/src/lib.rs b/compiler/rustc_error_codes/src/lib.rs
new file mode 100644
index 000000000..f2432f616
--- /dev/null
+++ b/compiler/rustc_error_codes/src/lib.rs
@@ -0,0 +1,15 @@
+#![deny(rustdoc::invalid_codeblock_attributes)]
+//! This library is used to gather all error codes into one place,
+//! the goal being to make their maintenance easier.
+
+macro_rules! register_diagnostics {
+ ($($ecode:ident: $message:expr,)* ; $($code:ident,)*) => (
+ pub static DIAGNOSTICS: &[(&str, Option<&str>)] = &[
+ $( (stringify!($ecode), Some($message)), )*
+ $( (stringify!($code), None), )*
+ ];
+ )
+}
+
+mod error_codes;
+pub use error_codes::DIAGNOSTICS;
diff --git a/compiler/rustc_error_messages/Cargo.toml b/compiler/rustc_error_messages/Cargo.toml
new file mode 100644
index 000000000..fc84c7c86
--- /dev/null
+++ b/compiler/rustc_error_messages/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "rustc_error_messages"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+fluent-bundle = "0.15.2"
+fluent-syntax = "0.11"
+intl-memoizer = "0.5.1"
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_span = { path = "../rustc_span" }
+rustc_macros = { path = "../rustc_macros" }
+tracing = "0.1"
+unic-langid = { version = "0.9.0", features = ["macros"] }
diff --git a/compiler/rustc_error_messages/locales/en-US/borrowck.ftl b/compiler/rustc_error_messages/locales/en-US/borrowck.ftl
new file mode 100644
index 000000000..645673ef4
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/borrowck.ftl
@@ -0,0 +1,18 @@
+borrowck-move-unsized =
+ cannot move a value of type `{$ty}`
+ .label = the size of `{$ty}` cannot be statically determined
+
+borrowck-higher-ranked-lifetime-error =
+ higher-ranked lifetime error
+
+borrowck-could-not-prove =
+ could not prove `{$predicate}`
+
+borrowck-could-not-normalize =
+ could not normalize `{$value}`
+
+borrowck-higher-ranked-subtype-error =
+ higher-ranked subtype error
+
+generic-does-not-live-long-enough =
+ `{$kind}` does not live long enough \ No newline at end of file
diff --git a/compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl b/compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl
new file mode 100644
index 000000000..1d3e33c81
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/builtin_macros.ftl
@@ -0,0 +1,5 @@
+builtin-macros-requires-cfg-pattern =
+ macro requires a cfg-pattern as an argument
+ .label = cfg-pattern required
+
+builtin-macros-expected-one-cfg-pattern = expected 1 cfg-pattern
diff --git a/compiler/rustc_error_messages/locales/en-US/const_eval.ftl b/compiler/rustc_error_messages/locales/en-US/const_eval.ftl
new file mode 100644
index 000000000..3f2ff8610
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/const_eval.ftl
@@ -0,0 +1,31 @@
+const-eval-unstable-in-stable =
+ const-stable function cannot use `#[feature({$gate})]`
+ .unstable-sugg = if it is not part of the public API, make this function unstably const
+ .bypass-sugg = otherwise `#[rustc_allow_const_fn_unstable]` can be used to bypass stability checks
+
+const-eval-thread-local-access =
+ thread-local statics cannot be accessed at compile-time
+
+const-eval-static-access =
+ {$kind}s cannot refer to statics
+ .help = consider extracting the value of the `static` to a `const`, and referring to that
+ .teach-note = `static` and `const` variables can refer to other `const` variables. A `const` variable, however, cannot refer to a `static` variable.
+ .teach-help = To fix this, the value can be extracted to a `const` and then used.
+
+const-eval-raw-ptr-to-int =
+ pointers cannot be cast to integers during const eval
+ .note = at compile-time, pointers do not have an integer value
+ .note2 = avoiding this restriction via `transmute`, `union`, or raw pointers leads to compile-time undefined behavior
+
+const-eval-raw-ptr-comparison =
+ pointers cannot be reliably compared during const eval
+ .note = see issue #53020 <https://github.com/rust-lang/rust/issues/53020> for more information
+
+const-eval-panic-non-str = argument to `panic!()` in a const context must have type `&str`
+
+const-eval-mut-deref =
+ mutation through a reference is not allowed in {$kind}s
+
+const-eval-transient-mut-borrow = mutable references are not allowed in {$kind}s
+
+const-eval-transient-mut-borrow-raw = raw mutable references are not allowed in {$kind}s
diff --git a/compiler/rustc_error_messages/locales/en-US/expand.ftl b/compiler/rustc_error_messages/locales/en-US/expand.ftl
new file mode 100644
index 000000000..8d506a3ea
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/expand.ftl
@@ -0,0 +1,5 @@
+expand-explain-doc-comment-outer =
+ outer doc comments expand to `#[doc = "..."]`, which is what this macro attempted to match
+
+expand-explain-doc-comment-inner =
+ inner doc comments expand to `#![doc = "..."]`, which is what this macro attempted to match
diff --git a/compiler/rustc_error_messages/locales/en-US/lint.ftl b/compiler/rustc_error_messages/locales/en-US/lint.ftl
new file mode 100644
index 000000000..55e96e58e
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/lint.ftl
@@ -0,0 +1,395 @@
+lint-array-into-iter =
+ this method call resolves to `<&{$target} as IntoIterator>::into_iter` (due to backwards compatibility), but will resolve to <{$target} as IntoIterator>::into_iter in Rust 2021
+ .use-iter-suggestion = use `.iter()` instead of `.into_iter()` to avoid ambiguity
+ .remove-into-iter-suggestion = or remove `.into_iter()` to iterate by value
+ .use-explicit-into-iter-suggestion =
+ or use `IntoIterator::into_iter(..)` instead of `.into_iter()` to explicitly iterate by value
+
+lint-enum-intrinsics-mem-discriminant =
+ the return value of `mem::discriminant` is unspecified when called with a non-enum type
+ .note = the argument to `discriminant` should be a reference to an enum, but it was passed a reference to a `{$ty_param}`, which is not an enum.
+
+lint-enum-intrinsics-mem-variant =
+ the return value of `mem::variant_count` is unspecified when called with a non-enum type
+ .note = the type parameter of `variant_count` should be an enum, but it was instantiated with the type `{$ty_param}`, which is not an enum.
+
+lint-expectation = this lint expectation is unfulfilled
+ .note = the `unfulfilled_lint_expectations` lint can't be expected and will always produce this message
+
+lint-hidden-unicode-codepoints = unicode codepoint changing visible direction of text present in {$label}
+ .label = this {$label} contains {$count ->
+ [one] an invisible
+ *[other] invisible
+ } unicode text flow control {$count ->
+ [one] codepoint
+ *[other] codepoints
+ }
+ .note = these kind of unicode codepoints change the way text flows on applications that support them, but can cause confusion because they change the order of characters on the screen
+ .suggestion-remove = if their presence wasn't intentional, you can remove them
+ .suggestion-escape = if you want to keep them but make them visible in your source code, you can escape them
+ .no-suggestion-note-escape = if you want to keep them but make them visible in your source code, you can escape them: {$escaped}
+
+lint-default-hash-types = prefer `{$preferred}` over `{$used}`, it has better performance
+ .note = a `use rustc_data_structures::fx::{$preferred}` may be necessary
+
+lint-query-instability = using `{$query}` can result in unstable query results
+ .note = if you believe this case to be fine, allow this lint and add a comment explaining your rationale
+
+lint-tykind-kind = usage of `ty::TyKind::<kind>`
+ .suggestion = try using `ty::<kind>` directly
+
+lint-tykind = usage of `ty::TyKind`
+ .help = try using `Ty` instead
+
+lint-ty-qualified = usage of qualified `ty::{$ty}`
+ .suggestion = try importing it and using it unqualified
+
+lint-lintpass-by-hand = implementing `LintPass` by hand
+ .help = try using `declare_lint_pass!` or `impl_lint_pass!` instead
+
+lint-non-existant-doc-keyword = found non-existing keyword `{$keyword}` used in `#[doc(keyword = \"...\")]`
+ .help = only existing keywords are allowed in core/std
+
+lint-diag-out-of-impl =
+ diagnostics should only be created in `SessionDiagnostic`/`AddSubdiagnostic` impls
+
+lint-untranslatable-diag = diagnostics should be created using translatable messages
+
+lint-cstring-ptr = getting the inner pointer of a temporary `CString`
+ .as-ptr-label = this pointer will be invalid
+ .unwrap-label = this `CString` is deallocated at the end of the statement, bind it to a variable to extend its lifetime
+ .note = pointers do not have a lifetime; when calling `as_ptr` the `CString` will be deallocated at the end of the statement because nothing is referencing it as far as the type system is concerned
+ .help = for more information, see https://doc.rust-lang.org/reference/destructors.html
+
+lint-identifier-non-ascii-char = identifier contains non-ASCII characters
+
+lint-identifier-uncommon-codepoints = identifier contains uncommon Unicode codepoints
+
+lint-confusable-identifier-pair = identifier pair considered confusable between `{$existing_sym}` and `{$sym}`
+ .label = this is where the previous identifier occurred
+
+lint-mixed-script-confusables =
+ the usage of Script Group `{$set}` in this crate consists solely of mixed script confusables
+ .includes-note = the usage includes {$includes}
+ .note = please recheck to make sure their usages are indeed what you want
+
+lint-non-fmt-panic = panic message is not a string literal
+ .note = this usage of `{$name}!()` is deprecated; it will be a hard error in Rust 2021
+ .more-info-note = for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/panic-macro-consistency.html>
+ .supports-fmt-note = the `{$name}!()` macro supports formatting, so there's no need for the `format!()` macro here
+ .supports-fmt-suggestion = remove the `format!(..)` macro call
+ .display-suggestion = add a "{"{"}{"}"}" format string to `Display` the message
+ .debug-suggestion =
+ add a "{"{"}:?{"}"}" format string to use the `Debug` implementation of `{$ty}`
+ .panic-suggestion = {$already_suggested ->
+ [true] or use
+ *[false] use
+ } std::panic::panic_any instead
+
+lint-non-fmt-panic-unused =
+ panic message contains {$count ->
+ [one] an unused
+ *[other] unused
+ } formatting {$count ->
+ [one] placeholder
+ *[other] placeholders
+ }
+ .note = this message is not used as a format string when given without arguments, but will be in Rust 2021
+ .add-args-suggestion = add the missing {$count ->
+ [one] argument
+ *[other] arguments
+ }
+ .add-fmt-suggestion = or add a "{"{"}{"}"}" format string to use the message literally
+
+lint-non-fmt-panic-braces =
+ panic message contains {$count ->
+ [one] a brace
+ *[other] braces
+ }
+ .note = this message is not used as a format string, but will be in Rust 2021
+ .suggestion = add a "{"{"}{"}"}" format string to use the message literally
+
+lint-non-camel-case-type = {$sort} `{$name}` should have an upper camel case name
+ .suggestion = convert the identifier to upper camel case
+ .label = should have an UpperCamelCase name
+
+lint-non-snake-case = {$sort} `{$name}` should have a snake case name
+ .rename-or-convert-suggestion = rename the identifier or convert it to a snake case raw identifier
+ .cannot-convert-note = `{$sc}` cannot be used as a raw identifier
+ .rename-suggestion = rename the identifier
+ .convert-suggestion = convert the identifier to snake case
+ .help = convert the identifier to snake case: `{$sc}`
+ .label = should have a snake_case name
+
+lint-non-upper_case-global = {$sort} `{$name}` should have an upper case name
+ .suggestion = convert the identifier to upper case
+ .label = should have an UPPER_CASE name
+
+lint-noop-method-call = call to `.{$method}()` on a reference in this situation does nothing
+ .label = unnecessary method call
+ .note = the type `{$receiver_ty}` which `{$method}` is being called on is the same as the type returned from `{$method}`, so the method call does not do anything and can be removed
+
+lint-pass-by-value = passing `{$ty}` by reference
+ .suggestion = try passing by value
+
+lint-redundant-semicolons =
+ unnecessary trailing {$multiple ->
+ [true] semicolons
+ *[false] semicolon
+ }
+ .suggestion = remove {$multiple ->
+ [true] these semicolons
+ *[false] this semicolon
+ }
+
+lint-drop-trait-constraints =
+ bounds on `{$predicate}` are most likely incorrect, consider instead using `{$needs_drop}` to detect whether a type can be trivially dropped
+
+lint-drop-glue =
+ types that do not implement `Drop` can still have drop glue, consider instead using `{$needs_drop}` to detect whether a type is trivially dropped
+
+lint-range-endpoint-out-of-range = range endpoint is out of range for `{$ty}`
+ .suggestion = use an inclusive range instead
+
+lint-overflowing-bin-hex = literal out of range for `{$ty}`
+ .negative-note = the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}`
+ .negative-becomes-note = and the value `-{$lit}` will become `{$actually}{$ty}`
+ .positive-note = the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}` and will become `{$actually}{$ty}`
+ .suggestion = consider using the type `{$suggestion_ty}` instead
+ .help = consider using the type `{$suggestion_ty}` instead
+
+lint-overflowing-int = literal out of range for `{$ty}`
+ .note = the literal `{$lit}` does not fit into the type `{$ty}` whose range is `{$min}..={$max}`
+ .help = consider using the type `{$suggestion_ty}` instead
+
+lint-only-cast-u8-to-char = only `u8` can be cast into `char`
+ .suggestion = use a `char` literal instead
+
+lint-overflowing-uint = literal out of range for `{$ty}`
+ .note = the literal `{$lit}` does not fit into the type `{$ty}` whose range is `{$min}..={$max}`
+
+lint-overflowing-literal = literal out of range for `{$ty}`
+ .note = the literal `{$lit}` does not fit into the type `{$ty}` and will be converted to `{$ty}::INFINITY`
+
+lint-unused-comparisons = comparison is useless due to type limits
+
+lint-improper-ctypes = `extern` {$desc} uses type `{$ty}`, which is not FFI-safe
+ .label = not FFI-safe
+ .note = the type is defined here
+
+lint-improper-ctypes-opaque = opaque types have no C equivalent
+
+lint-improper-ctypes-fnptr-reason = this function pointer has Rust-specific calling convention
+lint-improper-ctypes-fnptr-help = consider using an `extern fn(...) -> ...` function pointer instead
+
+lint-improper-ctypes-tuple-reason = tuples have unspecified layout
+lint-improper-ctypes-tuple-help = consider using a struct instead
+
+lint-improper-ctypes-str-reason = string slices have no C equivalent
+lint-improper-ctypes-str-help = consider using `*const u8` and a length instead
+
+lint-improper-ctypes-dyn = trait objects have no C equivalent
+
+lint-improper-ctypes-slice-reason = slices have no C equivalent
+lint-improper-ctypes-slice-help = consider using a raw pointer instead
+
+lint-improper-ctypes-128bit = 128-bit integers don't currently have a known stable ABI
+
+lint-improper-ctypes-char-reason = the `char` type has no C equivalent
+lint-improper-ctypes-char-help = consider using `u32` or `libc::wchar_t` instead
+
+lint-improper-ctypes-non-exhaustive = this enum is non-exhaustive
+lint-improper-ctypes-non-exhaustive-variant = this enum has non-exhaustive variants
+
+lint-improper-ctypes-enum-repr-reason = enum has no representation hint
+lint-improper-ctypes-enum-repr-help =
+ consider adding a `#[repr(C)]`, `#[repr(transparent)]`, or integer `#[repr(...)]` attribute to this enum
+
+lint-improper-ctypes-struct-fieldless-reason = this struct has no fields
+lint-improper-ctypes-struct-fieldless-help = consider adding a member to this struct
+
+lint-improper-ctypes-union-fieldless-reason = this union has no fields
+lint-improper-ctypes-union-fieldless-help = consider adding a member to this union
+
+lint-improper-ctypes-struct-non-exhaustive = this struct is non-exhaustive
+lint-improper-ctypes-union-non-exhaustive = this union is non-exhaustive
+
+lint-improper-ctypes-struct-layout-reason = this struct has unspecified layout
+lint-improper-ctypes-struct-layout-help = consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this struct
+
+lint-improper-ctypes-union-layout-reason = this union has unspecified layout
+lint-improper-ctypes-union-layout-help = consider adding a `#[repr(C)]` or `#[repr(transparent)]` attribute to this union
+
+lint-improper-ctypes-box = box cannot be represented as a single pointer
+
+lint-improper-ctypes-enum-phantomdata = this enum contains a PhantomData field
+
+lint-improper-ctypes-struct-zst = this struct contains only zero-sized fields
+
+lint-improper-ctypes-array-reason = passing raw arrays by value is not FFI-safe
+lint-improper-ctypes-array-help = consider passing a pointer to the array
+
+lint-improper-ctypes-only-phantomdata = composed only of `PhantomData`
+
+lint-variant-size-differences =
+ enum variant is more than three times larger ({$largest} bytes) than the next largest
+
+lint-atomic-ordering-load = atomic loads cannot have `Release` or `AcqRel` ordering
+ .help = consider using ordering modes `Acquire`, `SeqCst` or `Relaxed`
+
+lint-atomic-ordering-store = atomic stores cannot have `Acquire` or `AcqRel` ordering
+ .help = consider using ordering modes `Release`, `SeqCst` or `Relaxed`
+
+lint-atomic-ordering-fence = memory fences cannot have `Relaxed` ordering
+ .help = consider using ordering modes `Acquire`, `Release`, `AcqRel` or `SeqCst`
+
+lint-atomic-ordering-invalid = `{$method}`'s failure ordering may not be `Release` or `AcqRel`, since a failed `{$method}` does not result in a write
+ .label = invalid failure ordering
+ .help = consider using `Acquire` or `Relaxed` failure ordering instead
+
+lint-unused-op = unused {$op} that must be used
+ .label = the {$op} produces a value
+ .suggestion = use `let _ = ...` to ignore the resulting value
+
+lint-unused-result = unused result of type `{$ty}`
+
+lint-unused-closure =
+ unused {$pre}{$count ->
+ [one] closure
+ *[other] closures
+ }{$post} that must be used
+ .note = closures are lazy and do nothing unless called
+
+lint-unused-generator =
+ unused {$pre}{$count ->
+ [one] generator
+ *[other] generator
+ }{$post} that must be used
+ .note = generators are lazy and do nothing unless resumed
+
+lint-unused-def = unused {$pre}`{$def}`{$post} that must be used
+
+lint-path-statement-drop = path statement drops value
+ .suggestion = use `drop` to clarify the intent
+
+lint-path-statement-no-effect = path statement with no effect
+
+lint-unused-delim = unnecessary {$delim} around {$item}
+ .suggestion = remove these {$delim}
+
+lint-unused-import-braces = braces around {$node} is unnecessary
+
+lint-unused-allocation = unnecessary allocation, use `&` instead
+lint-unused-allocation-mut = unnecessary allocation, use `&mut` instead
+
+lint-builtin-while-true = denote infinite loops with `loop {"{"} ... {"}"}`
+ .suggestion = use `loop`
+
+lint-builtin-box-pointers = type uses owned (Box type) pointers: {$ty}
+
+lint-builtin-non-shorthand-field-patterns = the `{$ident}:` in this pattern is redundant
+ .suggestion = use shorthand field pattern
+
+lint-builtin-overridden-symbol-name =
+ the linker's behavior with multiple libraries exporting duplicate symbol names is undefined and Rust cannot provide guarantees when you manually override them
+
+lint-builtin-overridden-symbol-section =
+ the program's behavior with overridden link sections on items is unpredictable and Rust cannot provide guarantees when you manually override them
+
+lint-builtin-allow-internal-unsafe =
+ `allow_internal_unsafe` allows defining macros using unsafe without triggering the `unsafe_code` lint at their call site
+
+lint-builtin-unsafe-block = usage of an `unsafe` block
+
+lint-builtin-unsafe-trait = declaration of an `unsafe` trait
+
+lint-builtin-unsafe-impl = implementation of an `unsafe` trait
+
+lint-builtin-no-mangle-fn = declaration of a `no_mangle` function
+lint-builtin-export-name-fn = declaration of a function with `export_name`
+lint-builtin-link-section-fn = declaration of a function with `link_section`
+
+lint-builtin-no-mangle-static = declaration of a `no_mangle` static
+lint-builtin-export-name-static = declaration of a static with `export_name`
+lint-builtin-link-section-static = declaration of a static with `link_section`
+
+lint-builtin-no-mangle-method = declaration of a `no_mangle` method
+lint-builtin-export-name-method = declaration of a method with `export_name`
+
+lint-builtin-decl-unsafe-fn = declaration of an `unsafe` function
+lint-builtin-decl-unsafe-method = declaration of an `unsafe` method
+lint-builtin-impl-unsafe-method = implementation of an `unsafe` method
+
+lint-builtin-missing-doc = missing documentation for {$article} {$desc}
+
+lint-builtin-missing-copy-impl = type could implement `Copy`; consider adding `impl Copy`
+
+lint-builtin-missing-debug-impl =
+ type does not implement `{$debug}`; consider adding `#[derive(Debug)]` or a manual implementation
+
+lint-builtin-anonymous-params = anonymous parameters are deprecated and will be removed in the next edition
+ .suggestion = try naming the parameter or explicitly ignoring it
+
+lint-builtin-deprecated-attr-link = use of deprecated attribute `{$name}`: {$reason}. See {$link}
+lint-builtin-deprecated-attr-used = use of deprecated attribute `{$name}`: no longer used.
+lint-builtin-deprecated-attr-default-suggestion = remove this attribute
+
+lint-builtin-unused-doc-comment = unused doc comment
+ .label = rustdoc does not generate documentation for {$kind}
+ .plain-help = use `//` for a plain comment
+ .block-help = use `/* */` for a plain comment
+
+lint-builtin-no-mangle-generic = functions generic over types or consts must be mangled
+ .suggestion = remove this attribute
+
+lint-builtin-const-no-mangle = const items should never be `#[no_mangle]`
+ .suggestion = try a static value
+
+lint-builtin-mutable-transmutes =
+ transmuting &T to &mut T is undefined behavior, even if the reference is unused, consider instead using an UnsafeCell
+
+lint-builtin-unstable-features = unstable feature
+
+lint-builtin-unreachable-pub = unreachable `pub` {$what}
+ .suggestion = consider restricting its visibility
+ .help = or consider exporting it for use by other crates
+
+lint-builtin-type-alias-bounds-help = use fully disambiguated paths (i.e., `<T as Trait>::Assoc`) to refer to associated types in type aliases
+
+lint-builtin-type-alias-where-clause = where clauses are not enforced in type aliases
+ .suggestion = the clause will not be checked when the type alias is used, and should be removed
+
+lint-builtin-type-alias-generic-bounds = bounds on generic parameters are not enforced in type aliases
+ .suggestion = the bound will not be checked when the type alias is used, and should be removed
+
+lint-builtin-trivial-bounds = {$predicate_kind_name} bound {$predicate} does not depend on any type or lifetime parameters
+
+lint-builtin-ellipsis-inclusive-range-patterns = `...` range patterns are deprecated
+ .suggestion = use `..=` for an inclusive range
+
+lint-builtin-unnameable-test-items = cannot test inner items
+
+lint-builtin-keyword-idents = `{$kw}` is a keyword in the {$next} edition
+ .suggestion = you can use a raw identifier to stay compatible
+
+lint-builtin-explicit-outlives = outlives requirements can be inferred
+ .suggestion = remove {$count ->
+ [one] this bound
+ *[other] these bounds
+ }
+
+lint-builtin-incomplete-features = the feature `{$name}` is incomplete and may not be safe to use and/or cause compiler crashes
+ .note = see issue #{$n} <https://github.com/rust-lang/rust/issues/{$n}> for more information
+ .help = consider using `min_{$name}` instead, which is more stable and complete
+
+lint-builtin-clashing-extern-same-name = `{$this_fi}` redeclared with a different signature
+ .previous-decl-label = `{$orig}` previously declared here
+ .mismatch-label = this signature doesn't match the previous declaration
+lint-builtin-clashing-extern-diff-name = `{$this_fi}` redeclares `{$orig}` with a different signature
+ .previous-decl-label = `{$orig}` previously declared here
+ .mismatch-label = this signature doesn't match the previous declaration
+
+lint-builtin-deref-nullptr = dereferencing a null pointer
+ .label = this code causes undefined behavior when executed
+
+lint-builtin-asm-labels = avoid using named labels in inline assembly
diff --git a/compiler/rustc_error_messages/locales/en-US/parser.ftl b/compiler/rustc_error_messages/locales/en-US/parser.ftl
new file mode 100644
index 000000000..076b1b1ca
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/parser.ftl
@@ -0,0 +1,34 @@
+parser-struct-literal-body-without-path =
+ struct literal body without path
+ .suggestion = you might have forgotten to add the struct literal inside the block
+
+parser-maybe-report-ambiguous-plus =
+ ambiguous `+` in a type
+ .suggestion = use parentheses to disambiguate
+
+parser-maybe-recover-from-bad-type-plus =
+ expected a path on the left-hand side of `+`, not `{$ty}`
+
+parser-add-paren = try adding parentheses
+
+parser-forgot-paren = perhaps you forgot parentheses?
+
+parser-expect-path = expected a path
+
+parser-maybe-recover-from-bad-qpath-stage-2 =
+ missing angle brackets in associated item path
+ .suggestion = try: `{$ty}`
+
+parser-incorrect-semicolon =
+ expected item, found `;`
+ .suggestion = remove this semicolon
+ .help = {$name} declarations are not followed by a semicolon
+
+parser-incorrect-use-of-await =
+ incorrect use of `await`
+ .parentheses-suggestion = `await` is not a method call, remove the parentheses
+ .postfix-suggestion = `await` is a postfix operation
+
+parser-in-in-typo =
+ expected iterable, found keyword `in`
+ .suggestion = remove the duplicated `in`
diff --git a/compiler/rustc_error_messages/locales/en-US/passes.ftl b/compiler/rustc_error_messages/locales/en-US/passes.ftl
new file mode 100644
index 000000000..b17eb9c2d
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/passes.ftl
@@ -0,0 +1,264 @@
+-passes-previously-accepted =
+ this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+
+-passes-see-issue =
+ see issue #{$issue} <https://github.com/rust-lang/rust/issues/{$issue}> for more information
+
+passes-outer-crate-level-attr =
+ crate-level attribute should be an inner attribute: add an exclamation mark: `#![foo]`
+
+passes-inner-crate-level-attr =
+ crate-level attribute should be in the root module
+
+passes-ignored-attr-with-macro = `#[{$sym}]` is ignored on struct fields, match arms and macro defs
+ .warn = {-passes-previously-accepted}
+ .note = {-passes-see-issue(issue: "80564")}
+
+passes-ignored-attr = `#[{$sym}]` is ignored on struct fields and match arms
+ .warn = {-passes-previously-accepted}
+ .note = {-passes-see-issue(issue: "80564")}
+
+passes-inline-ignored-function-prototype = `#[inline]` is ignored on function prototypes
+
+passes-inline-ignored-constants = `#[inline]` is ignored on constants
+ .warn = {-passes-previously-accepted}
+ .note = {-passes-see-issue(issue: "65833")}
+
+passes-inline-not-fn-or-closure = attribute should be applied to function or closure
+ .label = not a function or closure
+
+passes-no-coverage-ignored-function-prototype = `#[no_coverage]` is ignored on function prototypes
+
+passes-no-coverage-propagate =
+ `#[no_coverage]` does not propagate into items and must be applied to the contained functions directly
+
+passes-no-coverage-fn-defn = `#[no_coverage]` may only be applied to function definitions
+
+passes-no-coverage-not-coverable = `#[no_coverage]` must be applied to coverable code
+ .label = not coverable code
+
+passes-should-be-applied-to-fn = attribute should be applied to a function definition
+ .label = not a function definition
+
+passes-naked-tracked-caller = cannot use `#[track_caller]` with `#[naked]`
+
+passes-should-be-applied-to-struct-enum = attribute should be applied to a struct or enum
+ .label = not a struct or enum
+
+passes-should-be-applied-to-trait = attribute should be applied to a trait
+ .label = not a trait
+
+passes-target-feature-on-statement = {passes-should-be-applied-to-fn}
+ .warn = {-passes-previously-accepted}
+ .label = {passes-should-be-applied-to-fn.label}
+
+passes-should-be-applied-to-static = attribute should be applied to a static
+ .label = not a static
+
+passes-doc-expect-str = doc {$attr_name} attribute expects a string: #[doc({$attr_name} = "a")]
+
+passes-doc-alias-empty = {$attr_str} attribute cannot have empty value
+
+passes-doc-alias-bad-char = {$char_} character isn't allowed in {$attr_str}
+
+passes-doc-alias-start-end = {$attr_str} cannot start or end with ' '
+
+passes-doc-alias-bad-location = {$attr_str} isn't allowed on {$location}
+
+passes-doc-alias-not-an-alias = {$attr_str} is the same as the item's name
+
+passes-doc-alias-duplicated = doc alias is duplicated
+ .label = first defined here
+
+passes-doc-alias-not-string-literal = `#[doc(alias("a"))]` expects string literals
+
+passes-doc-alias-malformed =
+ doc alias attribute expects a string `#[doc(alias = "a")]` or a list of strings `#[doc(alias("a", "b"))]`
+
+passes-doc-keyword-empty-mod = `#[doc(keyword = "...")]` should be used on empty modules
+
+passes-doc-keyword-not-mod = `#[doc(keyword = "...")]` should be used on modules
+
+passes-doc-keyword-invalid-ident = `{$doc_keyword}` is not a valid identifier
+
+passes-doc-fake-variadic-not-valid =
+ `#[doc(fake_variadic)]` must be used on the first of a set of tuple or fn pointer trait impls with varying arity
+
+passes-doc-keyword-only-impl = `#[doc(keyword = "...")]` should be used on impl blocks
+
+passes-doc-inline-conflict-first = this attribute...
+passes-doc-inline-conflict-second = ...conflicts with this attribute
+passes-doc-inline-conflict = conflicting doc inlining attributes
+ .help = remove one of the conflicting attributes
+
+passes-doc-inline-only-use = this attribute can only be applied to a `use` item
+ .label = only applicable on `use` items
+ .not-a-use-item-label = not a `use` item
+ .note = read <https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#inline-and-no_inline> for more information
+
+passes-doc-attr-not-crate-level =
+ `#![doc({$attr_name} = "...")]` isn't allowed as a crate-level attribute
+
+passes-attr-crate-level = this attribute can only be applied at the crate level
+ .suggestion = to apply to the crate, use an inner attribute
+ .help = to apply to the crate, use an inner attribute
+ .note = read <https://doc.rust-lang.org/nightly/rustdoc/the-doc-attribute.html#at-the-crate-level> for more information
+
+passes-doc-test-unknown = unknown `doc(test)` attribute `{$path}`
+
+passes-doc-test-takes-list = `#[doc(test(...)]` takes a list of attributes
+
+passes-doc-primitive = `doc(primitive)` should never have been stable
+
+passes-doc-test-unknown-any = unknown `doc` attribute `{$path}`
+
+passes-doc-test-unknown-spotlight = unknown `doc` attribute `{$path}`
+ .note = `doc(spotlight)` was renamed to `doc(notable_trait)`
+ .suggestion = use `notable_trait` instead
+ .no-op-note = `doc(spotlight)` is now a no-op
+
+passes-doc-test-unknown-include = unknown `doc` attribute `{$path}`
+ .suggestion = use `doc = include_str!` instead
+
+passes-doc-invalid = invalid `doc` attribute
+
+passes-pass-by-value = `pass_by_value` attribute should be applied to a struct, enum or type alias
+ .label = is not a struct, enum or type alias
+
+passes-allow-incoherent-impl =
+ `rustc_allow_incoherent_impl` attribute should be applied to impl items.
+ .label = the only currently supported targets are inherent methods
+
+passes-has-incoherent-inherent-impl =
+ `rustc_has_incoherent_inherent_impls` attribute should be applied to types or traits.
+ .label = only adts, extern types and traits are supported
+
+passes-must-use-async =
+ `must_use` attribute on `async` functions applies to the anonymous `Future` returned by the function, not the value within
+ .label = this attribute does nothing, the `Future`s returned by async functions are already `must_use`
+
+passes-must-use-no-effect = `#[must_use]` has no effect when applied to {$article} {$target}
+
+passes-must-not-suspend = `must_not_suspend` attribute should be applied to a struct, enum, or trait
+ .label = is not a struct, enum, or trait
+
+passes-cold = {passes-should-be-applied-to-fn}
+ .warn = {-passes-previously-accepted}
+ .label = {passes-should-be-applied-to-fn.label}
+
+passes-link = attribute should be applied to an `extern` block with non-Rust ABI
+ .warn = {-passes-previously-accepted}
+ .label = not an `extern` block
+
+passes-link-name = attribute should be applied to a foreign function or static
+ .warn = {-passes-previously-accepted}
+ .label = not a foreign function or static
+ .help = try `#[link(name = "{$value}")]` instead
+
+passes-no-link = attribute should be applied to an `extern crate` item
+ .label = not an `extern crate` item
+
+passes-export-name = attribute should be applied to a free function, impl method or static
+ .label = not a free function, impl method or static
+
+passes-rustc-layout-scalar-valid-range-not-struct = attribute should be applied to a struct
+ .label = not a struct
+
+passes-rustc-layout-scalar-valid-range-arg = expected exactly one integer literal argument
+
+passes-rustc-legacy-const-generics-only = #[rustc_legacy_const_generics] functions must only have const generics
+ .label = non-const generic parameter
+
+passes-rustc-legacy-const-generics-index = #[rustc_legacy_const_generics] must have one index for each generic parameter
+ .label = generic parameters
+
+passes-rustc-legacy-const-generics-index-exceed = index exceeds number of arguments
+ .label = there {$arg_count ->
+ [one] is
+ *[other] are
+ } only {$arg_count} {$arg_count ->
+ [one] argument
+ *[other] arguments
+ }
+
+passes-rustc-legacy-const-generics-index-negative = arguments should be non-negative integers
+
+passes-rustc-dirty-clean = attribute requires -Z query-dep-graph to be enabled
+
+passes-link-section = attribute should be applied to a function or static
+ .warn = {-passes-previously-accepted}
+ .label = not a function or static
+
+passes-no-mangle-foreign = `#[no_mangle]` has no effect on a foreign {$foreign_item_kind}
+ .warn = {-passes-previously-accepted}
+ .label = foreign {$foreign_item_kind}
+ .note = symbol names in extern blocks are not mangled
+ .suggestion = remove this attribute
+
+passes-no-mangle = attribute should be applied to a free function, impl method or static
+ .warn = {-passes-previously-accepted}
+ .label = not a free function, impl method or static
+
+passes-repr-ident = meta item in `repr` must be an identifier
+
+passes-repr-conflicting = conflicting representation hints
+
+passes-used-static = attribute must be applied to a `static` variable
+
+passes-used-compiler-linker = `used(compiler)` and `used(linker)` can't be used together
+
+passes-allow-internal-unstable = attribute should be applied to a macro
+ .label = not a macro
+
+passes-debug-visualizer-placement = attribute should be applied to a module
+
+passes-debug-visualizer-invalid = invalid argument
+ .note-1 = expected: `natvis_file = "..."`
+ .note-2 = OR
+ .note-3 = expected: `gdb_script_file = "..."`
+
+passes-rustc-allow-const-fn-unstable = attribute should be applied to `const fn`
+ .label = not a `const fn`
+
+passes-rustc-std-internal-symbol = attribute should be applied to functions or statics
+ .label = not a function or static
+
+passes-const-trait = attribute should be applied to a trait
+
+passes-stability-promotable = attribute cannot be applied to an expression
+
+passes-deprecated = attribute is ignored here
+
+passes-macro-use = `#[{$name}]` only has an effect on `extern crate` and modules
+
+passes-macro-export = `#[macro_export]` only has an effect on macro definitions
+
+passes-plugin-registrar = `#[plugin_registrar]` only has an effect on functions
+
+passes-unused-empty-lints-note = attribute `{$name}` with an empty list has no effect
+
+passes-unused-no-lints-note = attribute `{$name}` without any lints has no effect
+
+passes-unused-default-method-body-const-note =
+ `default_method_body_is_const` has been replaced with `#[const_trait]` on traits
+
+passes-unused = unused attribute
+ .suggestion = remove this attribute
+
+passes-non-exported-macro-invalid-attrs = attribute should be applied to function or closure
+ .label = not a function or closure
+
+passes-unused-duplicate = unused attribute
+ .suggestion = remove this attribute
+ .note = attribute also specified here
+ .warn = {-passes-previously-accepted}
+
+passes-unused-multiple = multiple `{$name}` attributes
+ .suggestion = remove this attribute
+ .note = attribute also specified here
+
+passes-rustc-lint-opt-ty = `#[rustc_lint_opt_ty]` should be applied to a struct
+ .label = not a struct
+
+passes-rustc-lint-opt-deny-field-access = `#[rustc_lint_opt_deny_field_access]` should be applied to a field
+ .label = not a field
diff --git a/compiler/rustc_error_messages/locales/en-US/privacy.ftl b/compiler/rustc_error_messages/locales/en-US/privacy.ftl
new file mode 100644
index 000000000..f8a750da9
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/privacy.ftl
@@ -0,0 +1,21 @@
+privacy-field-is-private = field `{$field_name}` of {$variant_descr} `{$def_path_str}` is private
+privacy-field-is-private-is-update-syntax-label = field `{$field_name}` is private
+privacy-field-is-private-label = private field
+
+privacy-item-is-private = {$kind} `{$descr}` is private
+ .label = private {$kind}
+privacy-unnamed-item-is-private = {$kind} is private
+ .label = private {$kind}
+
+privacy-in-public-interface = {$vis_descr} {$kind} `{$descr}` in public interface
+ .label = can't leak {$vis_descr} {$kind}
+ .visibility-label = `{$descr}` declared as {$vis_descr}
+
+privacy-from-private-dep-in-public-interface =
+ {$kind} `{$descr}` from private dependency '{$krate}' in public interface
+
+private-in-public-lint =
+ {$vis_descr} {$kind} `{$descr}` in public interface (error {$kind ->
+ [trait] E0445
+ *[other] E0446
+ })
diff --git a/compiler/rustc_error_messages/locales/en-US/typeck.ftl b/compiler/rustc_error_messages/locales/en-US/typeck.ftl
new file mode 100644
index 000000000..c61735a57
--- /dev/null
+++ b/compiler/rustc_error_messages/locales/en-US/typeck.ftl
@@ -0,0 +1,125 @@
+typeck-field-multiply-specified-in-initializer =
+ field `{$ident}` specified more than once
+ .label = used more than once
+ .previous-use-label = first use of `{$ident}`
+
+typeck-unrecognized-atomic-operation =
+ unrecognized atomic operation function: `{$op}`
+ .label = unrecognized atomic operation
+
+typeck-wrong-number-of-generic-arguments-to-intrinsic =
+ intrinsic has wrong number of {$descr} parameters: found {$found}, expected {$expected}
+ .label = expected {$expected} {$descr} {$expected ->
+ [one] parameter
+ *[other] parameters
+ }
+
+typeck-unrecognized-intrinsic-function =
+ unrecognized intrinsic function: `{$name}`
+ .label = unrecognized intrinsic
+
+typeck-lifetimes-or-bounds-mismatch-on-trait =
+ lifetime parameters or bounds on {$item_kind} `{$ident}` do not match the trait declaration
+ .label = lifetimes do not match {$item_kind} in trait
+ .generics-label = lifetimes in impl do not match this {$item_kind} in trait
+
+typeck-drop-impl-on-wrong-item =
+ the `Drop` trait may only be implemented for structs, enums, and unions
+ .label = must be a struct, enum, or union
+
+typeck-field-already-declared =
+ field `{$field_name}` is already declared
+ .label = field already declared
+ .previous-decl-label = `{$field_name}` first declared here
+
+typeck-copy-impl-on-type-with-dtor =
+ the trait `Copy` may not be implemented for this type; the type has a destructor
+ .label = `Copy` not allowed on types with destructors
+
+typeck-multiple-relaxed-default-bounds =
+ type parameter has more than one relaxed default bound, only one is supported
+
+typeck-copy-impl-on-non-adt =
+ the trait `Copy` may not be implemented for this type
+ .label = type is not a structure or enumeration
+
+typeck-trait-object-declared-with-no-traits =
+ at least one trait is required for an object type
+ .alias-span = this alias does not contain a trait
+
+typeck-ambiguous-lifetime-bound =
+ ambiguous lifetime bound, explicit lifetime bound required
+
+typeck-assoc-type-binding-not-allowed =
+ associated type bindings are not allowed here
+ .label = associated type not allowed here
+
+typeck-functional-record-update-on-non-struct =
+ functional record update syntax requires a struct
+
+typeck-typeof-reserved-keyword-used =
+ `typeof` is a reserved keyword but unimplemented
+ .suggestion = consider replacing `typeof(...)` with an actual type
+ .label = reserved keyword
+
+typeck-return-stmt-outside-of-fn-body =
+ return statement outside of function body
+ .encl-body-label = the return is part of this body...
+ .encl-fn-label = ...not the enclosing function body
+
+typeck-yield-expr-outside-of-generator =
+ yield expression outside of generator literal
+
+typeck-struct-expr-non-exhaustive =
+ cannot create non-exhaustive {$what} using struct expression
+
+typeck-method-call-on-unknown-type =
+ the type of this value must be known to call a method on a raw pointer on it
+
+typeck-value-of-associated-struct-already-specified =
+ the value of the associated type `{$item_name}` (from trait `{$def_path}`) is already specified
+ .label = re-bound here
+ .previous-bound-label = `{$item_name}` bound here first
+
+typeck-address-of-temporary-taken = cannot take address of a temporary
+ .label = temporary value
+
+typeck-add-return-type-add = try adding a return type
+
+typeck-add-return-type-missing-here = a return type might be missing here
+
+typeck-expected-default-return-type = expected `()` because of default return type
+
+typeck-expected-return-type = expected `{$expected}` because of return type
+
+typeck-unconstrained-opaque-type = unconstrained opaque type
+ .note = `{$name}` must be used in combination with a concrete type within the same module
+
+typeck-missing-type-params =
+ the type {$parameterCount ->
+ [one] parameter
+ *[other] parameters
+ } {$parameters} must be explicitly specified
+ .label = type {$parameterCount ->
+ [one] parameter
+ *[other] parameters
+ } {$parameters} must be specified for this
+ .suggestion = set the type {$parameterCount ->
+ [one] parameter
+ *[other] parameters
+ } to the desired {$parameterCount ->
+ [one] type
+ *[other] types
+ }
+ .no-suggestion-label = missing {$parameterCount ->
+ [one] reference
+ *[other] references
+ } to {$parameters}
+ .note = because of the default `Self` reference, type parameters must be specified on object types
+
+typeck-manual-implementation =
+ manual implementations of `{$trait_name}` are experimental
+ .label = manual implementations of `{$trait_name}` are experimental
+ .help = add `#![feature(unboxed_closures)]` to the crate attributes to enable
+
+typeck-substs-on-overridden-impl = could not resolve substs on overridden impl
diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs
new file mode 100644
index 000000000..02bb04d98
--- /dev/null
+++ b/compiler/rustc_error_messages/src/lib.rs
@@ -0,0 +1,489 @@
+#![feature(let_chains)]
+#![feature(once_cell)]
+#![feature(rustc_attrs)]
+#![feature(type_alias_impl_trait)]
+
+use fluent_bundle::FluentResource;
+use fluent_syntax::parser::ParserError;
+use rustc_data_structures::sync::Lrc;
+use rustc_macros::{fluent_messages, Decodable, Encodable};
+use rustc_span::Span;
+use std::borrow::Cow;
+use std::error::Error;
+use std::fmt;
+use std::fs;
+use std::io;
+use std::path::{Path, PathBuf};
+use tracing::{instrument, trace};
+
+#[cfg(not(parallel_compiler))]
+use std::cell::LazyCell as Lazy;
+#[cfg(parallel_compiler)]
+use std::sync::LazyLock as Lazy;
+
+#[cfg(parallel_compiler)]
+use intl_memoizer::concurrent::IntlLangMemoizer;
+#[cfg(not(parallel_compiler))]
+use intl_memoizer::IntlLangMemoizer;
+
+pub use fluent_bundle::{FluentArgs, FluentError, FluentValue};
+pub use unic_langid::{langid, LanguageIdentifier};
+
+// Generates `DEFAULT_LOCALE_RESOURCES` static and `fluent_generated` module.
+fluent_messages! {
+ borrowck => "../locales/en-US/borrowck.ftl",
+ builtin_macros => "../locales/en-US/builtin_macros.ftl",
+ const_eval => "../locales/en-US/const_eval.ftl",
+ expand => "../locales/en-US/expand.ftl",
+ lint => "../locales/en-US/lint.ftl",
+ parser => "../locales/en-US/parser.ftl",
+ passes => "../locales/en-US/passes.ftl",
+ privacy => "../locales/en-US/privacy.ftl",
+ typeck => "../locales/en-US/typeck.ftl",
+}
+
+pub use fluent_generated::{self as fluent, DEFAULT_LOCALE_RESOURCES};
+
+pub type FluentBundle = fluent_bundle::bundle::FluentBundle<FluentResource, IntlLangMemoizer>;
+
+#[cfg(parallel_compiler)]
+fn new_bundle(locales: Vec<LanguageIdentifier>) -> FluentBundle {
+ FluentBundle::new_concurrent(locales)
+}
+
+#[cfg(not(parallel_compiler))]
+fn new_bundle(locales: Vec<LanguageIdentifier>) -> FluentBundle {
+ FluentBundle::new(locales)
+}
+
+#[derive(Debug)]
+pub enum TranslationBundleError {
+ /// Failed to read from `.ftl` file.
+ ReadFtl(io::Error),
+ /// Failed to parse contents of `.ftl` file.
+ ParseFtl(ParserError),
+ /// Failed to add `FluentResource` to `FluentBundle`.
+ AddResource(FluentError),
+ /// `$sysroot/share/locale/$locale` does not exist.
+ MissingLocale,
+ /// Cannot read directory entries of `$sysroot/share/locale/$locale`.
+ ReadLocalesDir(io::Error),
+ /// Cannot read directory entry of `$sysroot/share/locale/$locale`.
+ ReadLocalesDirEntry(io::Error),
+ /// `$sysroot/share/locale/$locale` is not a directory.
+ LocaleIsNotDir,
+}
+
+impl fmt::Display for TranslationBundleError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ TranslationBundleError::ReadFtl(e) => write!(f, "could not read ftl file: {}", e),
+ TranslationBundleError::ParseFtl(e) => {
+ write!(f, "could not parse ftl file: {}", e)
+ }
+ TranslationBundleError::AddResource(e) => write!(f, "failed to add resource: {}", e),
+ TranslationBundleError::MissingLocale => write!(f, "missing locale directory"),
+ TranslationBundleError::ReadLocalesDir(e) => {
+ write!(f, "could not read locales dir: {}", e)
+ }
+ TranslationBundleError::ReadLocalesDirEntry(e) => {
+ write!(f, "could not read locales dir entry: {}", e)
+ }
+ TranslationBundleError::LocaleIsNotDir => {
+ write!(f, "`$sysroot/share/locales/$locale` is not a directory")
+ }
+ }
+ }
+}
+
+impl Error for TranslationBundleError {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ match self {
+ TranslationBundleError::ReadFtl(e) => Some(e),
+ TranslationBundleError::ParseFtl(e) => Some(e),
+ TranslationBundleError::AddResource(e) => Some(e),
+ TranslationBundleError::MissingLocale => None,
+ TranslationBundleError::ReadLocalesDir(e) => Some(e),
+ TranslationBundleError::ReadLocalesDirEntry(e) => Some(e),
+ TranslationBundleError::LocaleIsNotDir => None,
+ }
+ }
+}
+
+impl From<(FluentResource, Vec<ParserError>)> for TranslationBundleError {
+ fn from((_, mut errs): (FluentResource, Vec<ParserError>)) -> Self {
+ TranslationBundleError::ParseFtl(errs.pop().expect("failed ftl parse with no errors"))
+ }
+}
+
+impl From<Vec<FluentError>> for TranslationBundleError {
+ fn from(mut errs: Vec<FluentError>) -> Self {
+ TranslationBundleError::AddResource(
+ errs.pop().expect("failed adding resource to bundle with no errors"),
+ )
+ }
+}
+
+/// Returns Fluent bundle with the user's locale resources from
+/// `$sysroot/share/locale/$requested_locale/*.ftl`.
+///
+/// If `-Z additional-ftl-path` was provided, load that resource and add it to the bundle
+/// (overriding any conflicting messages).
+#[instrument(level = "trace")]
+pub fn fluent_bundle(
+ mut user_provided_sysroot: Option<PathBuf>,
+ mut sysroot_candidates: Vec<PathBuf>,
+ requested_locale: Option<LanguageIdentifier>,
+ additional_ftl_path: Option<&Path>,
+ with_directionality_markers: bool,
+) -> Result<Option<Lrc<FluentBundle>>, TranslationBundleError> {
+ if requested_locale.is_none() && additional_ftl_path.is_none() {
+ return Ok(None);
+ }
+
+ let fallback_locale = langid!("en-US");
+ let requested_fallback_locale = requested_locale.as_ref() == Some(&fallback_locale);
+
+ // If there is only `-Z additional-ftl-path`, assume locale is "en-US", otherwise use user
+ // provided locale.
+ let locale = requested_locale.clone().unwrap_or(fallback_locale);
+ trace!(?locale);
+ let mut bundle = new_bundle(vec![locale]);
+
+ // Fluent diagnostics can insert directionality isolation markers around interpolated variables
+ // indicating that there may be a shift from right-to-left to left-to-right text (or
+ // vice-versa). These are disabled because they are sometimes visible in the error output, but
+ // may be worth investigating in future (for example: if type names are left-to-right and the
+ // surrounding diagnostic messages are right-to-left, then these might be helpful).
+ bundle.set_use_isolating(with_directionality_markers);
+
+ // If the user requests the default locale then don't try to load anything.
+ if !requested_fallback_locale && let Some(requested_locale) = requested_locale {
+ let mut found_resources = false;
+ for sysroot in user_provided_sysroot.iter_mut().chain(sysroot_candidates.iter_mut()) {
+ sysroot.push("share");
+ sysroot.push("locale");
+ sysroot.push(requested_locale.to_string());
+ trace!(?sysroot);
+
+ if !sysroot.exists() {
+ trace!("skipping");
+ continue;
+ }
+
+ if !sysroot.is_dir() {
+ return Err(TranslationBundleError::LocaleIsNotDir);
+ }
+
+ for entry in sysroot.read_dir().map_err(TranslationBundleError::ReadLocalesDir)? {
+ let entry = entry.map_err(TranslationBundleError::ReadLocalesDirEntry)?;
+ let path = entry.path();
+ trace!(?path);
+ if path.extension().and_then(|s| s.to_str()) != Some("ftl") {
+ trace!("skipping");
+ continue;
+ }
+
+ let resource_str =
+ fs::read_to_string(path).map_err(TranslationBundleError::ReadFtl)?;
+ let resource =
+ FluentResource::try_new(resource_str).map_err(TranslationBundleError::from)?;
+ trace!(?resource);
+ bundle.add_resource(resource).map_err(TranslationBundleError::from)?;
+ found_resources = true;
+ }
+ }
+
+ if !found_resources {
+ return Err(TranslationBundleError::MissingLocale);
+ }
+ }
+
+ if let Some(additional_ftl_path) = additional_ftl_path {
+ let resource_str =
+ fs::read_to_string(additional_ftl_path).map_err(TranslationBundleError::ReadFtl)?;
+ let resource =
+ FluentResource::try_new(resource_str).map_err(TranslationBundleError::from)?;
+ trace!(?resource);
+ bundle.add_resource_overriding(resource);
+ }
+
+ let bundle = Lrc::new(bundle);
+ Ok(Some(bundle))
+}
+
+/// Type alias for the result of `fallback_fluent_bundle` - a reference-counted pointer to a lazily
+/// evaluated fluent bundle.
+pub type LazyFallbackBundle = Lrc<Lazy<FluentBundle, impl FnOnce() -> FluentBundle>>;
+
+/// Return the default `FluentBundle` with standard "en-US" diagnostic messages.
+#[instrument(level = "trace")]
+pub fn fallback_fluent_bundle(
+ resources: &'static [&'static str],
+ with_directionality_markers: bool,
+) -> LazyFallbackBundle {
+ Lrc::new(Lazy::new(move || {
+ let mut fallback_bundle = new_bundle(vec![langid!("en-US")]);
+ // See comment in `fluent_bundle`.
+ fallback_bundle.set_use_isolating(with_directionality_markers);
+
+ for resource in resources {
+ let resource = FluentResource::try_new(resource.to_string())
+ .expect("failed to parse fallback fluent resource");
+ trace!(?resource);
+ fallback_bundle.add_resource_overriding(resource);
+ }
+
+ fallback_bundle
+ }))
+}
+
+/// Identifier for the Fluent message/attribute corresponding to a diagnostic message.
+type FluentId = Cow<'static, str>;
+
+/// Abstraction over a message in a subdiagnostic (i.e. label, note, help, etc) to support both
+/// translatable and non-translatable diagnostic messages.
+///
+/// Translatable messages for subdiagnostics are typically attributes attached to a larger Fluent
+/// message so messages of this type must be combined with a `DiagnosticMessage` (using
+/// `DiagnosticMessage::with_subdiagnostic_message`) before rendering. However, subdiagnostics from
+/// the `SessionSubdiagnostic` derive refer to Fluent identifiers directly.
+#[rustc_diagnostic_item = "SubdiagnosticMessage"]
+pub enum SubdiagnosticMessage {
+ /// Non-translatable diagnostic message.
+ // FIXME(davidtwco): can a `Cow<'static, str>` be used here?
+ Str(String),
+ /// Identifier of a Fluent message. Instances of this variant are generated by the
+ /// `SessionSubdiagnostic` derive.
+ FluentIdentifier(FluentId),
+ /// Attribute of a Fluent message. Needs to be combined with a Fluent identifier to produce an
+ /// actual translated message. Instances of this variant are generated by the `fluent_messages`
+ /// macro.
+ ///
+ /// <https://projectfluent.org/fluent/guide/attributes.html>
+ FluentAttr(FluentId),
+}
+
+/// `From` impl that enables existing diagnostic calls to functions which now take
+/// `impl Into<SubdiagnosticMessage>` to continue to work as before.
+impl<S: Into<String>> From<S> for SubdiagnosticMessage {
+ fn from(s: S) -> Self {
+ SubdiagnosticMessage::Str(s.into())
+ }
+}
+
+/// Abstraction over a message in a diagnostic to support both translatable and non-translatable
+/// diagnostic messages.
+///
+/// Intended to be removed once diagnostics are entirely translatable.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
+#[rustc_diagnostic_item = "DiagnosticMessage"]
+pub enum DiagnosticMessage {
+ /// Non-translatable diagnostic message.
+ // FIXME(davidtwco): can a `Cow<'static, str>` be used here?
+ Str(String),
+ /// Identifier for a Fluent message (with optional attribute) corresponding to the diagnostic
+ /// message.
+ ///
+ /// <https://projectfluent.org/fluent/guide/hello.html>
+ /// <https://projectfluent.org/fluent/guide/attributes.html>
+ FluentIdentifier(FluentId, Option<FluentId>),
+}
+
+impl DiagnosticMessage {
+ /// Given a `SubdiagnosticMessage` which may contain a Fluent attribute, create a new
+ /// `DiagnosticMessage` that combines that attribute with the Fluent identifier of `self`.
+ ///
+ /// - If the `SubdiagnosticMessage` is non-translatable then return the message as a
+ /// `DiagnosticMessage`.
+ /// - If `self` is non-translatable then return `self`'s message.
+ pub fn with_subdiagnostic_message(&self, sub: SubdiagnosticMessage) -> Self {
+ let attr = match sub {
+ SubdiagnosticMessage::Str(s) => return DiagnosticMessage::Str(s),
+ SubdiagnosticMessage::FluentIdentifier(id) => {
+ return DiagnosticMessage::FluentIdentifier(id, None);
+ }
+ SubdiagnosticMessage::FluentAttr(attr) => attr,
+ };
+
+ match self {
+ DiagnosticMessage::Str(s) => DiagnosticMessage::Str(s.clone()),
+ DiagnosticMessage::FluentIdentifier(id, _) => {
+ DiagnosticMessage::FluentIdentifier(id.clone(), Some(attr))
+ }
+ }
+ }
+
+ /// Returns the `String` contained within the `DiagnosticMessage::Str` variant, assuming that
+ /// this diagnostic message is of the legacy, non-translatable variety. Panics if this
+ /// assumption does not hold.
+ ///
+ /// Don't use this - it exists to support some places that do comparison with diagnostic
+ /// strings.
+ pub fn expect_str(&self) -> &str {
+ match self {
+ DiagnosticMessage::Str(s) => s,
+ _ => panic!("expected non-translatable diagnostic message"),
+ }
+ }
+}
+
+/// `From` impl that enables existing diagnostic calls to functions which now take
+/// `impl Into<DiagnosticMessage>` to continue to work as before.
+impl<S: Into<String>> From<S> for DiagnosticMessage {
+ fn from(s: S) -> Self {
+ DiagnosticMessage::Str(s.into())
+ }
+}
+
+/// Translating *into* a subdiagnostic message from a diagnostic message is a little strange - but
+/// the subdiagnostic functions (e.g. `span_label`) take a `SubdiagnosticMessage` and the
+/// subdiagnostic derive refers to typed identifiers that are `DiagnosticMessage`s, so need to be
+/// able to convert between these, as much as they'll be converted back into `DiagnosticMessage`
+/// using `with_subdiagnostic_message` eventually. Don't use this other than for the derive.
+impl Into<SubdiagnosticMessage> for DiagnosticMessage {
+ fn into(self) -> SubdiagnosticMessage {
+ match self {
+ DiagnosticMessage::Str(s) => SubdiagnosticMessage::Str(s),
+ DiagnosticMessage::FluentIdentifier(id, None) => {
+ SubdiagnosticMessage::FluentIdentifier(id)
+ }
+ // There isn't really a sensible behaviour for this because it loses information but
+ // this is the most sensible of the behaviours.
+ DiagnosticMessage::FluentIdentifier(_, Some(attr)) => {
+ SubdiagnosticMessage::FluentAttr(attr)
+ }
+ }
+ }
+}
+
+/// A span together with some additional data.
+#[derive(Clone, Debug)]
+pub struct SpanLabel {
+ /// The span we are going to include in the final snippet.
+ pub span: Span,
+
+ /// Is this a primary span? This is the "locus" of the message,
+ /// and is indicated with a `^^^^` underline, versus `----`.
+ pub is_primary: bool,
+
+ /// What label should we attach to this span (if any)?
+ pub label: Option<DiagnosticMessage>,
+}
+
+/// A collection of `Span`s.
+///
+/// Spans have two orthogonal attributes:
+///
+/// - They can be *primary spans*. In this case they are the locus of
+/// the error, and would be rendered with `^^^`.
+/// - They can have a *label*. In this case, the label is written next
+/// to the mark in the snippet when we render.
+#[derive(Clone, Debug, Hash, PartialEq, Eq, Encodable, Decodable)]
+pub struct MultiSpan {
+ primary_spans: Vec<Span>,
+ span_labels: Vec<(Span, DiagnosticMessage)>,
+}
+
+impl MultiSpan {
+ #[inline]
+ pub fn new() -> MultiSpan {
+ MultiSpan { primary_spans: vec![], span_labels: vec![] }
+ }
+
+ pub fn from_span(primary_span: Span) -> MultiSpan {
+ MultiSpan { primary_spans: vec![primary_span], span_labels: vec![] }
+ }
+
+ pub fn from_spans(mut vec: Vec<Span>) -> MultiSpan {
+ vec.sort();
+ MultiSpan { primary_spans: vec, span_labels: vec![] }
+ }
+
+ pub fn push_span_label(&mut self, span: Span, label: impl Into<DiagnosticMessage>) {
+ self.span_labels.push((span, label.into()));
+ }
+
+ /// Selects the first primary span (if any).
+ pub fn primary_span(&self) -> Option<Span> {
+ self.primary_spans.first().cloned()
+ }
+
+ /// Returns all primary spans.
+ pub fn primary_spans(&self) -> &[Span] {
+ &self.primary_spans
+ }
+
+ /// Returns `true` if any of the primary spans are displayable.
+ pub fn has_primary_spans(&self) -> bool {
+ !self.is_dummy()
+ }
+
+ /// Returns `true` if this contains only a dummy primary span with any hygienic context.
+ pub fn is_dummy(&self) -> bool {
+ self.primary_spans.iter().all(|sp| sp.is_dummy())
+ }
+
+ /// Replaces all occurrences of one Span with another. Used to move `Span`s in areas that don't
+ /// display well (like std macros). Returns whether replacements occurred.
+ pub fn replace(&mut self, before: Span, after: Span) -> bool {
+ let mut replacements_occurred = false;
+ for primary_span in &mut self.primary_spans {
+ if *primary_span == before {
+ *primary_span = after;
+ replacements_occurred = true;
+ }
+ }
+ for span_label in &mut self.span_labels {
+ if span_label.0 == before {
+ span_label.0 = after;
+ replacements_occurred = true;
+ }
+ }
+ replacements_occurred
+ }
+
+ /// Returns the strings to highlight. We always ensure that there
+ /// is an entry for each of the primary spans -- for each primary
+ /// span `P`, if there is at least one label with span `P`, we return
+ /// those labels (marked as primary). But otherwise we return
+ /// `SpanLabel` instances with empty labels.
+ pub fn span_labels(&self) -> Vec<SpanLabel> {
+ let is_primary = |span| self.primary_spans.contains(&span);
+
+ let mut span_labels = self
+ .span_labels
+ .iter()
+ .map(|&(span, ref label)| SpanLabel {
+ span,
+ is_primary: is_primary(span),
+ label: Some(label.clone()),
+ })
+ .collect::<Vec<_>>();
+
+ for &span in &self.primary_spans {
+ if !span_labels.iter().any(|sl| sl.span == span) {
+ span_labels.push(SpanLabel { span, is_primary: true, label: None });
+ }
+ }
+
+ span_labels
+ }
+
+ /// Returns `true` if any of the span labels is displayable.
+ pub fn has_span_labels(&self) -> bool {
+ self.span_labels.iter().any(|(sp, _)| !sp.is_dummy())
+ }
+}
+
+impl From<Span> for MultiSpan {
+ fn from(span: Span) -> MultiSpan {
+ MultiSpan::from_span(span)
+ }
+}
+
+impl From<Vec<Span>> for MultiSpan {
+ fn from(spans: Vec<Span>) -> MultiSpan {
+ MultiSpan::from_spans(spans)
+ }
+}
diff --git a/compiler/rustc_errors/Cargo.toml b/compiler/rustc_errors/Cargo.toml
new file mode 100644
index 000000000..7d7e92c52
--- /dev/null
+++ b/compiler/rustc_errors/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "rustc_errors"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+tracing = "0.1"
+rustc_error_messages = { path = "../rustc_error_messages" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_span = { path = "../rustc_span" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_lint_defs = { path = "../rustc_lint_defs" }
+unicode-width = "0.1.4"
+atty = "0.2"
+termcolor = "1.0"
+annotate-snippets = "0.8.0"
+termize = "0.1.1"
+serde = { version = "1.0.125", features = ["derive"] }
+serde_json = "1.0.59"
+
+[target.'cfg(windows)'.dependencies]
+winapi = { version = "0.3", features = ["handleapi", "synchapi", "winbase"] }
diff --git a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
new file mode 100644
index 000000000..0fcd61d1e
--- /dev/null
+++ b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs
@@ -0,0 +1,215 @@
+//! Emit diagnostics using the `annotate-snippets` library
+//!
+//! This is the equivalent of `./emitter.rs` but making use of the
+//! [`annotate-snippets`][annotate_snippets] library instead of building the output ourselves.
+//!
+//! [annotate_snippets]: https://docs.rs/crate/annotate-snippets/
+
+use crate::emitter::FileWithAnnotatedLines;
+use crate::snippet::Line;
+use crate::{
+ CodeSuggestion, Diagnostic, DiagnosticId, DiagnosticMessage, Emitter, FluentBundle,
+ LazyFallbackBundle, Level, MultiSpan, Style, SubDiagnostic,
+};
+use annotate_snippets::display_list::{DisplayList, FormatOptions};
+use annotate_snippets::snippet::*;
+use rustc_data_structures::sync::Lrc;
+use rustc_error_messages::FluentArgs;
+use rustc_span::source_map::SourceMap;
+use rustc_span::SourceFile;
+
+/// Generates diagnostics using annotate-snippet
+pub struct AnnotateSnippetEmitterWriter {
+ source_map: Option<Lrc<SourceMap>>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+
+ /// If true, hides the longer explanation text
+ short_message: bool,
+ /// If true, will normalize line numbers with `LL` to prevent noise in UI test diffs.
+ ui_testing: bool,
+
+ macro_backtrace: bool,
+}
+
+impl Emitter for AnnotateSnippetEmitterWriter {
+ /// The entry point for the diagnostics generation
+ fn emit_diagnostic(&mut self, diag: &Diagnostic) {
+ let fluent_args = self.to_fluent_args(diag.args());
+
+ let mut children = diag.children.clone();
+ let (mut primary_span, suggestions) = self.primary_span_formatted(&diag, &fluent_args);
+
+ self.fix_multispans_in_extern_macros_and_render_macro_backtrace(
+ &self.source_map,
+ &mut primary_span,
+ &mut children,
+ &diag.level,
+ self.macro_backtrace,
+ );
+
+ self.emit_messages_default(
+ &diag.level,
+ &diag.message,
+ &fluent_args,
+ &diag.code,
+ &primary_span,
+ &children,
+ &suggestions,
+ );
+ }
+
+ fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+ self.source_map.as_ref()
+ }
+
+ fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
+ self.fluent_bundle.as_ref()
+ }
+
+ fn fallback_fluent_bundle(&self) -> &FluentBundle {
+ &**self.fallback_bundle
+ }
+
+ fn should_show_explain(&self) -> bool {
+ !self.short_message
+ }
+}
+
+/// Provides the source string for the given `line` of `file`
+fn source_string(file: Lrc<SourceFile>, line: &Line) -> String {
+ file.get_line(line.line_index - 1).map(|a| a.to_string()).unwrap_or_default()
+}
+
+/// Maps `Diagnostic::Level` to `snippet::AnnotationType`
+fn annotation_type_for_level(level: Level) -> AnnotationType {
+ match level {
+ Level::Bug | Level::DelayedBug | Level::Fatal | Level::Error { .. } => {
+ AnnotationType::Error
+ }
+ Level::Warning(_) => AnnotationType::Warning,
+ Level::Note | Level::OnceNote => AnnotationType::Note,
+ Level::Help => AnnotationType::Help,
+ // FIXME(#59346): Not sure how to map this level
+ Level::FailureNote => AnnotationType::Error,
+ Level::Allow => panic!("Should not call with Allow"),
+ Level::Expect(_) => panic!("Should not call with Expect"),
+ }
+}
+
+impl AnnotateSnippetEmitterWriter {
+ pub fn new(
+ source_map: Option<Lrc<SourceMap>>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ short_message: bool,
+ macro_backtrace: bool,
+ ) -> Self {
+ Self {
+ source_map,
+ fluent_bundle,
+ fallback_bundle,
+ short_message,
+ ui_testing: false,
+ macro_backtrace,
+ }
+ }
+
+ /// Allows to modify `Self` to enable or disable the `ui_testing` flag.
+ ///
+ /// If this is set to true, line numbers will be normalized as `LL` in the output.
+ pub fn ui_testing(mut self, ui_testing: bool) -> Self {
+ self.ui_testing = ui_testing;
+ self
+ }
+
+ fn emit_messages_default(
+ &mut self,
+ level: &Level,
+ messages: &[(DiagnosticMessage, Style)],
+ args: &FluentArgs<'_>,
+ code: &Option<DiagnosticId>,
+ msp: &MultiSpan,
+ _children: &[SubDiagnostic],
+ _suggestions: &[CodeSuggestion],
+ ) {
+ let message = self.translate_messages(messages, args);
+ if let Some(source_map) = &self.source_map {
+ // Make sure our primary file comes first
+ let primary_lo = if let Some(ref primary_span) = msp.primary_span().as_ref() {
+ if primary_span.is_dummy() {
+ // FIXME(#59346): Not sure when this is the case and what
+ // should be done if it happens
+ return;
+ } else {
+ source_map.lookup_char_pos(primary_span.lo())
+ }
+ } else {
+ // FIXME(#59346): Not sure when this is the case and what
+ // should be done if it happens
+ return;
+ };
+ let mut annotated_files = FileWithAnnotatedLines::collect_annotations(self, args, msp);
+ if let Ok(pos) =
+ annotated_files.binary_search_by(|x| x.file.name.cmp(&primary_lo.file.name))
+ {
+ annotated_files.swap(0, pos);
+ }
+ // owned: line source, line index, annotations
+ type Owned = (String, usize, Vec<crate::snippet::Annotation>);
+ let filename = source_map.filename_for_diagnostics(&primary_lo.file.name);
+ let origin = filename.to_string_lossy();
+ let annotated_files: Vec<Owned> = annotated_files
+ .into_iter()
+ .flat_map(|annotated_file| {
+ let file = annotated_file.file;
+ annotated_file
+ .lines
+ .into_iter()
+ .map(|line| {
+ (source_string(file.clone(), &line), line.line_index, line.annotations)
+ })
+ .collect::<Vec<Owned>>()
+ })
+ .collect();
+ let snippet = Snippet {
+ title: Some(Annotation {
+ label: Some(&message),
+ id: code.as_ref().map(|c| match c {
+ DiagnosticId::Error(val) | DiagnosticId::Lint { name: val, .. } => {
+ val.as_str()
+ }
+ }),
+ annotation_type: annotation_type_for_level(*level),
+ }),
+ footer: vec![],
+ opt: FormatOptions { color: true, anonymized_line_numbers: self.ui_testing },
+ slices: annotated_files
+ .iter()
+ .map(|(source, line_index, annotations)| {
+ Slice {
+ source,
+ line_start: *line_index,
+ origin: Some(&origin),
+ // FIXME(#59346): Not really sure when `fold` should be true or false
+ fold: false,
+ annotations: annotations
+ .iter()
+ .map(|annotation| SourceAnnotation {
+ range: (annotation.start_col, annotation.end_col),
+ label: annotation.label.as_deref().unwrap_or_default(),
+ annotation_type: annotation_type_for_level(*level),
+ })
+ .collect(),
+ }
+ })
+ .collect(),
+ };
+ // FIXME(#59346): Figure out if we can _always_ print to stderr or not.
+ // `emitter.rs` has the `Destination` enum that lists various possible output
+ // destinations.
+ eprintln!("{}", DisplayList::from(snippet))
+ }
+ // FIXME(#59346): Is it ok to return None if there's no source_map?
+ }
+}
diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs
new file mode 100644
index 000000000..17e6c9e95
--- /dev/null
+++ b/compiler/rustc_errors/src/diagnostic.rs
@@ -0,0 +1,1016 @@
+use crate::snippet::Style;
+use crate::{
+ CodeSuggestion, DiagnosticMessage, EmissionGuarantee, Level, LintDiagnosticBuilder, MultiSpan,
+ SubdiagnosticMessage, Substitution, SubstitutionPart, SuggestionStyle,
+};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_error_messages::FluentValue;
+use rustc_hir as hir;
+use rustc_lint_defs::{Applicability, LintExpectationId};
+use rustc_span::edition::LATEST_STABLE_EDITION;
+use rustc_span::symbol::{Ident, Symbol};
+use rustc_span::{edition::Edition, Span, DUMMY_SP};
+use std::borrow::Cow;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+
+/// Error type for `Diagnostic`'s `suggestions` field, indicating that
+/// `.disable_suggestions()` was called on the `Diagnostic`.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
+pub struct SuggestionsDisabled;
+
+/// Simplified version of `FluentArg` that can implement `Encodable` and `Decodable`. Collection of
+/// `DiagnosticArg` are converted to `FluentArgs` (consuming the collection) at the start of
+/// diagnostic emission.
+pub type DiagnosticArg<'source> = (Cow<'source, str>, DiagnosticArgValue<'source>);
+
+/// Simplified version of `FluentValue` that can implement `Encodable` and `Decodable`. Converted
+/// to a `FluentValue` by the emitter to be used in diagnostic translation.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
+pub enum DiagnosticArgValue<'source> {
+ Str(Cow<'source, str>),
+ Number(usize),
+}
+
+/// Converts a value of a type into a `DiagnosticArg` (typically a field of a `SessionDiagnostic`
+/// struct). Implemented as a custom trait rather than `From` so that it is implemented on the type
+/// being converted rather than on `DiagnosticArgValue`, which enables types from other `rustc_*`
+/// crates to implement this.
+pub trait IntoDiagnosticArg {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static>;
+}
+
+pub struct DiagnosticArgFromDisplay<'a>(pub &'a dyn fmt::Display);
+
+impl IntoDiagnosticArg for DiagnosticArgFromDisplay<'_> {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.0.to_string().into_diagnostic_arg()
+ }
+}
+
+impl<'a> From<&'a dyn fmt::Display> for DiagnosticArgFromDisplay<'a> {
+ fn from(t: &'a dyn fmt::Display) -> Self {
+ DiagnosticArgFromDisplay(t)
+ }
+}
+
+impl<'a, T: fmt::Display> From<&'a T> for DiagnosticArgFromDisplay<'a> {
+ fn from(t: &'a T) -> Self {
+ DiagnosticArgFromDisplay(t)
+ }
+}
+
+macro_rules! into_diagnostic_arg_using_display {
+ ($( $ty:ty ),+ $(,)?) => {
+ $(
+ impl IntoDiagnosticArg for $ty {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.to_string().into_diagnostic_arg()
+ }
+ }
+ )+
+ }
+}
+
+into_diagnostic_arg_using_display!(
+ i8,
+ u8,
+ i16,
+ u16,
+ i32,
+ u32,
+ i64,
+ u64,
+ i128,
+ u128,
+ std::num::NonZeroU32,
+ hir::Target,
+ Edition,
+ Ident,
+);
+
+impl IntoDiagnosticArg for bool {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ if self {
+ DiagnosticArgValue::Str(Cow::Borrowed("true"))
+ } else {
+ DiagnosticArgValue::Str(Cow::Borrowed("false"))
+ }
+ }
+}
+
+impl IntoDiagnosticArg for char {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(format!("{:?}", self)))
+ }
+}
+
+impl IntoDiagnosticArg for Symbol {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.to_ident_string().into_diagnostic_arg()
+ }
+}
+
+impl<'a> IntoDiagnosticArg for &'a str {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ self.to_string().into_diagnostic_arg()
+ }
+}
+
+impl IntoDiagnosticArg for String {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Owned(self))
+ }
+}
+
+impl IntoDiagnosticArg for usize {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Number(self)
+ }
+}
+
+impl<'source> Into<FluentValue<'source>> for DiagnosticArgValue<'source> {
+ fn into(self) -> FluentValue<'source> {
+ match self {
+ DiagnosticArgValue::Str(s) => From::from(s),
+ DiagnosticArgValue::Number(n) => From::from(n),
+ }
+ }
+}
+
+impl IntoDiagnosticArg for hir::ConstContext {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ DiagnosticArgValue::Str(Cow::Borrowed(match self {
+ hir::ConstContext::ConstFn => "constant function",
+ hir::ConstContext::Static(_) => "static",
+ hir::ConstContext::Const => "constant",
+ }))
+ }
+}
+
+/// Trait implemented by error types. This should not be implemented manually. Instead, use
+/// `#[derive(SessionSubdiagnostic)]` -- see [rustc_macros::SessionSubdiagnostic].
+#[rustc_diagnostic_item = "AddSubdiagnostic"]
+pub trait AddSubdiagnostic {
+ /// Add a subdiagnostic to an existing diagnostic.
+ fn add_to_diagnostic(self, diag: &mut Diagnostic);
+}
+
+/// Trait implemented by lint types. This should not be implemented manually. Instead, use
+/// `#[derive(LintDiagnostic)]` -- see [rustc_macros::LintDiagnostic].
+#[rustc_diagnostic_item = "DecorateLint"]
+pub trait DecorateLint<'a, G: EmissionGuarantee> {
+ /// Decorate and emit a lint.
+ fn decorate_lint(self, diag: LintDiagnosticBuilder<'a, G>);
+}
+
+#[must_use]
+#[derive(Clone, Debug, Encodable, Decodable)]
+pub struct Diagnostic {
+ // NOTE(eddyb) this is private to disallow arbitrary after-the-fact changes,
+ // outside of what methods in this crate themselves allow.
+ pub(crate) level: Level,
+
+ pub message: Vec<(DiagnosticMessage, Style)>,
+ pub code: Option<DiagnosticId>,
+ pub span: MultiSpan,
+ pub children: Vec<SubDiagnostic>,
+ pub suggestions: Result<Vec<CodeSuggestion>, SuggestionsDisabled>,
+ args: Vec<DiagnosticArg<'static>>,
+
+ /// This is not used for highlighting or rendering any error message. Rather, it can be used
+ /// as a sort key to sort a buffer of diagnostics. By default, it is the primary span of
+ /// `span` if there is one. Otherwise, it is `DUMMY_SP`.
+ pub sort_span: Span,
+
+ /// If diagnostic is from Lint, custom hash function ignores notes
+ /// otherwise hash is based on the all the fields
+ pub is_lint: bool,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
+pub enum DiagnosticId {
+ Error(String),
+ Lint { name: String, has_future_breakage: bool, is_force_warn: bool },
+}
+
+/// A "sub"-diagnostic attached to a parent diagnostic.
+/// For example, a note attached to an error.
+#[derive(Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
+pub struct SubDiagnostic {
+ pub level: Level,
+ pub message: Vec<(DiagnosticMessage, Style)>,
+ pub span: MultiSpan,
+ pub render_span: Option<MultiSpan>,
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub struct DiagnosticStyledString(pub Vec<StringPart>);
+
+impl DiagnosticStyledString {
+ pub fn new() -> DiagnosticStyledString {
+ DiagnosticStyledString(vec![])
+ }
+ pub fn push_normal<S: Into<String>>(&mut self, t: S) {
+ self.0.push(StringPart::Normal(t.into()));
+ }
+ pub fn push_highlighted<S: Into<String>>(&mut self, t: S) {
+ self.0.push(StringPart::Highlighted(t.into()));
+ }
+ pub fn push<S: Into<String>>(&mut self, t: S, highlight: bool) {
+ if highlight {
+ self.push_highlighted(t);
+ } else {
+ self.push_normal(t);
+ }
+ }
+ pub fn normal<S: Into<String>>(t: S) -> DiagnosticStyledString {
+ DiagnosticStyledString(vec![StringPart::Normal(t.into())])
+ }
+
+ pub fn highlighted<S: Into<String>>(t: S) -> DiagnosticStyledString {
+ DiagnosticStyledString(vec![StringPart::Highlighted(t.into())])
+ }
+
+ pub fn content(&self) -> String {
+ self.0.iter().map(|x| x.content()).collect::<String>()
+ }
+}
+
+#[derive(Debug, PartialEq, Eq)]
+pub enum StringPart {
+ Normal(String),
+ Highlighted(String),
+}
+
+impl StringPart {
+ pub fn content(&self) -> &str {
+ match self {
+ &StringPart::Normal(ref s) | &StringPart::Highlighted(ref s) => s,
+ }
+ }
+}
+
+impl Diagnostic {
+ pub fn new<M: Into<DiagnosticMessage>>(level: Level, message: M) -> Self {
+ Diagnostic::new_with_code(level, None, message)
+ }
+
+ pub fn new_with_code<M: Into<DiagnosticMessage>>(
+ level: Level,
+ code: Option<DiagnosticId>,
+ message: M,
+ ) -> Self {
+ Diagnostic {
+ level,
+ message: vec![(message.into(), Style::NoStyle)],
+ code,
+ span: MultiSpan::new(),
+ children: vec![],
+ suggestions: Ok(vec![]),
+ args: vec![],
+ sort_span: DUMMY_SP,
+ is_lint: false,
+ }
+ }
+
+ #[inline(always)]
+ pub fn level(&self) -> Level {
+ self.level
+ }
+
+ pub fn is_error(&self) -> bool {
+ match self.level {
+ Level::Bug
+ | Level::DelayedBug
+ | Level::Fatal
+ | Level::Error { .. }
+ | Level::FailureNote => true,
+
+ Level::Warning(_)
+ | Level::Note
+ | Level::OnceNote
+ | Level::Help
+ | Level::Allow
+ | Level::Expect(_) => false,
+ }
+ }
+
+ pub fn update_unstable_expectation_id(
+ &mut self,
+ unstable_to_stable: &FxHashMap<LintExpectationId, LintExpectationId>,
+ ) {
+ if let Level::Expect(expectation_id) | Level::Warning(Some(expectation_id)) =
+ &mut self.level
+ {
+ if expectation_id.is_stable() {
+ return;
+ }
+
+ // The unstable to stable map only maps the unstable `AttrId` to a stable `HirId` with an attribute index.
+ // The lint index inside the attribute is manually transferred here.
+ let lint_index = expectation_id.get_lint_index();
+ expectation_id.set_lint_index(None);
+ let mut stable_id = *unstable_to_stable
+ .get(&expectation_id)
+ .expect("each unstable `LintExpectationId` must have a matching stable id");
+
+ stable_id.set_lint_index(lint_index);
+ *expectation_id = stable_id;
+ }
+ }
+
+ pub fn has_future_breakage(&self) -> bool {
+ match self.code {
+ Some(DiagnosticId::Lint { has_future_breakage, .. }) => has_future_breakage,
+ _ => false,
+ }
+ }
+
+ pub fn is_force_warn(&self) -> bool {
+ match self.code {
+ Some(DiagnosticId::Lint { is_force_warn, .. }) => is_force_warn,
+ _ => false,
+ }
+ }
+
+ /// Delay emission of this diagnostic as a bug.
+ ///
+ /// This can be useful in contexts where an error indicates a bug but
+ /// typically this only happens when other compilation errors have already
+ /// happened. In those cases this can be used to defer emission of this
+ /// diagnostic as a bug in the compiler only if no other errors have been
+ /// emitted.
+ ///
+ /// In the meantime, though, callsites are required to deal with the "bug"
+ /// locally in whichever way makes the most sense.
+ #[track_caller]
+ pub fn downgrade_to_delayed_bug(&mut self) -> &mut Self {
+ assert!(
+ self.is_error(),
+ "downgrade_to_delayed_bug: cannot downgrade {:?} to DelayedBug: not an error",
+ self.level
+ );
+ self.level = Level::DelayedBug;
+
+ self
+ }
+
+ /// Adds a span/label to be included in the resulting snippet.
+ ///
+ /// This is pushed onto the [`MultiSpan`] that was created when the diagnostic
+ /// was first built. That means it will be shown together with the original
+ /// span/label, *not* a span added by one of the `span_{note,warn,help,suggestions}` methods.
+ ///
+ /// This span is *not* considered a ["primary span"][`MultiSpan`]; only
+ /// the `Span` supplied when creating the diagnostic is primary.
+ #[rustc_lint_diagnostics]
+ pub fn span_label(&mut self, span: Span, label: impl Into<SubdiagnosticMessage>) -> &mut Self {
+ self.span.push_span_label(span, self.subdiagnostic_message_to_diagnostic_message(label));
+ self
+ }
+
+ /// Labels all the given spans with the provided label.
+ /// See [`Self::span_label()`] for more information.
+ pub fn span_labels(
+ &mut self,
+ spans: impl IntoIterator<Item = Span>,
+ label: impl AsRef<str>,
+ ) -> &mut Self {
+ let label = label.as_ref();
+ for span in spans {
+ self.span_label(span, label);
+ }
+ self
+ }
+
+ pub fn replace_span_with(&mut self, after: Span) -> &mut Self {
+ let before = self.span.clone();
+ self.set_span(after);
+ for span_label in before.span_labels() {
+ if let Some(label) = span_label.label {
+ self.span.push_span_label(after, label);
+ }
+ }
+ self
+ }
+
+ pub fn note_expected_found(
+ &mut self,
+ expected_label: &dyn fmt::Display,
+ expected: DiagnosticStyledString,
+ found_label: &dyn fmt::Display,
+ found: DiagnosticStyledString,
+ ) -> &mut Self {
+ self.note_expected_found_extra(expected_label, expected, found_label, found, &"", &"")
+ }
+
+ pub fn note_unsuccessful_coercion(
+ &mut self,
+ expected: DiagnosticStyledString,
+ found: DiagnosticStyledString,
+ ) -> &mut Self {
+ let mut msg: Vec<_> = vec![("required when trying to coerce from type `", Style::NoStyle)];
+ msg.extend(expected.0.iter().map(|x| match *x {
+ StringPart::Normal(ref s) => (s.as_str(), Style::NoStyle),
+ StringPart::Highlighted(ref s) => (s.as_str(), Style::Highlight),
+ }));
+ msg.push(("` to type '", Style::NoStyle));
+ msg.extend(found.0.iter().map(|x| match *x {
+ StringPart::Normal(ref s) => (s.as_str(), Style::NoStyle),
+ StringPart::Highlighted(ref s) => (s.as_str(), Style::Highlight),
+ }));
+ msg.push(("`", Style::NoStyle));
+
+ // For now, just attach these as notes
+ self.highlighted_note(msg);
+ self
+ }
+
+ pub fn note_expected_found_extra(
+ &mut self,
+ expected_label: &dyn fmt::Display,
+ expected: DiagnosticStyledString,
+ found_label: &dyn fmt::Display,
+ found: DiagnosticStyledString,
+ expected_extra: &dyn fmt::Display,
+ found_extra: &dyn fmt::Display,
+ ) -> &mut Self {
+ let expected_label = expected_label.to_string();
+ let expected_label = if expected_label.is_empty() {
+ "expected".to_string()
+ } else {
+ format!("expected {}", expected_label)
+ };
+ let found_label = found_label.to_string();
+ let found_label = if found_label.is_empty() {
+ "found".to_string()
+ } else {
+ format!("found {}", found_label)
+ };
+ let (found_padding, expected_padding) = if expected_label.len() > found_label.len() {
+ (expected_label.len() - found_label.len(), 0)
+ } else {
+ (0, found_label.len() - expected_label.len())
+ };
+ let mut msg: Vec<_> =
+ vec![(format!("{}{} `", " ".repeat(expected_padding), expected_label), Style::NoStyle)];
+ msg.extend(expected.0.iter().map(|x| match *x {
+ StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
+ StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
+ }));
+ msg.push((format!("`{}\n", expected_extra), Style::NoStyle));
+ msg.push((format!("{}{} `", " ".repeat(found_padding), found_label), Style::NoStyle));
+ msg.extend(found.0.iter().map(|x| match *x {
+ StringPart::Normal(ref s) => (s.to_owned(), Style::NoStyle),
+ StringPart::Highlighted(ref s) => (s.to_owned(), Style::Highlight),
+ }));
+ msg.push((format!("`{}", found_extra), Style::NoStyle));
+
+ // For now, just attach these as notes.
+ self.highlighted_note(msg);
+ self
+ }
+
+ pub fn note_trait_signature(&mut self, name: Symbol, signature: String) -> &mut Self {
+ self.highlighted_note(vec![
+ (format!("`{}` from trait: `", name), Style::NoStyle),
+ (signature, Style::Highlight),
+ ("`".to_string(), Style::NoStyle),
+ ]);
+ self
+ }
+
+ /// Add a note attached to this diagnostic.
+ #[rustc_lint_diagnostics]
+ pub fn note(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self {
+ self.sub(Level::Note, msg, MultiSpan::new(), None);
+ self
+ }
+
+ pub fn highlighted_note<M: Into<SubdiagnosticMessage>>(
+ &mut self,
+ msg: Vec<(M, Style)>,
+ ) -> &mut Self {
+ self.sub_with_highlights(Level::Note, msg, MultiSpan::new(), None);
+ self
+ }
+
+ /// Prints the span with a note above it.
+ /// This is like [`Diagnostic::note()`], but it gets its own span.
+ pub fn note_once(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self {
+ self.sub(Level::OnceNote, msg, MultiSpan::new(), None);
+ self
+ }
+
+ /// Prints the span with a note above it.
+ /// This is like [`Diagnostic::note()`], but it gets its own span.
+ #[rustc_lint_diagnostics]
+ pub fn span_note<S: Into<MultiSpan>>(
+ &mut self,
+ sp: S,
+ msg: impl Into<SubdiagnosticMessage>,
+ ) -> &mut Self {
+ self.sub(Level::Note, msg, sp.into(), None);
+ self
+ }
+
+ /// Prints the span with a note above it.
+ /// This is like [`Diagnostic::note()`], but it gets its own span.
+ pub fn span_note_once<S: Into<MultiSpan>>(
+ &mut self,
+ sp: S,
+ msg: impl Into<SubdiagnosticMessage>,
+ ) -> &mut Self {
+ self.sub(Level::OnceNote, msg, sp.into(), None);
+ self
+ }
+
+ /// Add a warning attached to this diagnostic.
+ #[rustc_lint_diagnostics]
+ pub fn warn(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self {
+ self.sub(Level::Warning(None), msg, MultiSpan::new(), None);
+ self
+ }
+
+ /// Prints the span with a warning above it.
+ /// This is like [`Diagnostic::warn()`], but it gets its own span.
+ #[rustc_lint_diagnostics]
+ pub fn span_warn<S: Into<MultiSpan>>(
+ &mut self,
+ sp: S,
+ msg: impl Into<SubdiagnosticMessage>,
+ ) -> &mut Self {
+ self.sub(Level::Warning(None), msg, sp.into(), None);
+ self
+ }
+
+ /// Add a help message attached to this diagnostic.
+ #[rustc_lint_diagnostics]
+ pub fn help(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self {
+ self.sub(Level::Help, msg, MultiSpan::new(), None);
+ self
+ }
+
+ /// Add a help message attached to this diagnostic with a customizable highlighted message.
+ pub fn highlighted_help(&mut self, msg: Vec<(String, Style)>) -> &mut Self {
+ self.sub_with_highlights(Level::Help, msg, MultiSpan::new(), None);
+ self
+ }
+
+ /// Prints the span with some help above it.
+ /// This is like [`Diagnostic::help()`], but it gets its own span.
+ #[rustc_lint_diagnostics]
+ pub fn span_help<S: Into<MultiSpan>>(
+ &mut self,
+ sp: S,
+ msg: impl Into<SubdiagnosticMessage>,
+ ) -> &mut Self {
+ self.sub(Level::Help, msg, sp.into(), None);
+ self
+ }
+
+ /// Help the user upgrade to the latest edition.
+ /// This is factored out to make sure it does the right thing with `Cargo.toml`.
+ pub fn help_use_latest_edition(&mut self) -> &mut Self {
+ if std::env::var_os("CARGO").is_some() {
+ self.help(&format!("set `edition = \"{}\"` in `Cargo.toml`", LATEST_STABLE_EDITION));
+ } else {
+ self.help(&format!("pass `--edition {}` to `rustc`", LATEST_STABLE_EDITION));
+ }
+ self.note("for more on editions, read https://doc.rust-lang.org/edition-guide");
+ self
+ }
+
+ /// Disallow attaching suggestions this diagnostic.
+ /// Any suggestions attached e.g. with the `span_suggestion_*` methods
+ /// (before and after the call to `disable_suggestions`) will be ignored.
+ pub fn disable_suggestions(&mut self) -> &mut Self {
+ self.suggestions = Err(SuggestionsDisabled);
+ self
+ }
+
+ /// Clear any existing suggestions.
+ pub fn clear_suggestions(&mut self) -> &mut Self {
+ if let Ok(suggestions) = &mut self.suggestions {
+ suggestions.clear();
+ }
+ self
+ }
+
+ /// Helper for pushing to `self.suggestions`, if available (not disable).
+ fn push_suggestion(&mut self, suggestion: CodeSuggestion) {
+ if let Ok(suggestions) = &mut self.suggestions {
+ suggestions.push(suggestion);
+ }
+ }
+
+ /// Show a suggestion that has multiple parts to it.
+ /// In other words, multiple changes need to be applied as part of this suggestion.
+ pub fn multipart_suggestion(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: Vec<(Span, String)>,
+ applicability: Applicability,
+ ) -> &mut Self {
+ self.multipart_suggestion_with_style(
+ msg,
+ suggestion,
+ applicability,
+ SuggestionStyle::ShowCode,
+ )
+ }
+
+ /// Show a suggestion that has multiple parts to it, always as it's own subdiagnostic.
+ /// In other words, multiple changes need to be applied as part of this suggestion.
+ pub fn multipart_suggestion_verbose(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: Vec<(Span, String)>,
+ applicability: Applicability,
+ ) -> &mut Self {
+ self.multipart_suggestion_with_style(
+ msg,
+ suggestion,
+ applicability,
+ SuggestionStyle::ShowAlways,
+ )
+ }
+ /// [`Diagnostic::multipart_suggestion()`] but you can set the [`SuggestionStyle`].
+ pub fn multipart_suggestion_with_style(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: Vec<(Span, String)>,
+ applicability: Applicability,
+ style: SuggestionStyle,
+ ) -> &mut Self {
+ assert!(!suggestion.is_empty());
+ self.push_suggestion(CodeSuggestion {
+ substitutions: vec![Substitution {
+ parts: suggestion
+ .into_iter()
+ .map(|(span, snippet)| SubstitutionPart { snippet, span })
+ .collect(),
+ }],
+ msg: self.subdiagnostic_message_to_diagnostic_message(msg),
+ style,
+ applicability,
+ });
+ self
+ }
+
+ /// Prints out a message with for a multipart suggestion without showing the suggested code.
+ ///
+ /// This is intended to be used for suggestions that are obvious in what the changes need to
+ /// be from the message, showing the span label inline would be visually unpleasant
+ /// (marginally overlapping spans or multiline spans) and showing the snippet window wouldn't
+ /// improve understandability.
+ pub fn tool_only_multipart_suggestion(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: Vec<(Span, String)>,
+ applicability: Applicability,
+ ) -> &mut Self {
+ assert!(!suggestion.is_empty());
+ self.push_suggestion(CodeSuggestion {
+ substitutions: vec![Substitution {
+ parts: suggestion
+ .into_iter()
+ .map(|(span, snippet)| SubstitutionPart { snippet, span })
+ .collect(),
+ }],
+ msg: self.subdiagnostic_message_to_diagnostic_message(msg),
+ style: SuggestionStyle::CompletelyHidden,
+ applicability,
+ });
+ self
+ }
+
+ /// Prints out a message with a suggested edit of the code.
+ ///
+ /// In case of short messages and a simple suggestion, rustc displays it as a label:
+ ///
+ /// ```text
+ /// try adding parentheses: `(tup.0).1`
+ /// ```
+ ///
+ /// The message
+ ///
+ /// * should not end in any punctuation (a `:` is added automatically)
+ /// * should not be a question (avoid language like "did you mean")
+ /// * should not contain any phrases like "the following", "as shown", etc.
+ /// * may look like "to do xyz, use" or "to do xyz, use abc"
+ /// * may contain a name of a function, variable, or type, but not whole expressions
+ ///
+ /// See `CodeSuggestion` for more information.
+ pub fn span_suggestion(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self {
+ self.span_suggestion_with_style(
+ sp,
+ msg,
+ suggestion,
+ applicability,
+ SuggestionStyle::ShowCode,
+ );
+ self
+ }
+
+ /// [`Diagnostic::span_suggestion()`] but you can set the [`SuggestionStyle`].
+ pub fn span_suggestion_with_style(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ style: SuggestionStyle,
+ ) -> &mut Self {
+ self.push_suggestion(CodeSuggestion {
+ substitutions: vec![Substitution {
+ parts: vec![SubstitutionPart { snippet: suggestion.to_string(), span: sp }],
+ }],
+ msg: self.subdiagnostic_message_to_diagnostic_message(msg),
+ style,
+ applicability,
+ });
+ self
+ }
+
+ /// Always show the suggested change.
+ pub fn span_suggestion_verbose(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self {
+ self.span_suggestion_with_style(
+ sp,
+ msg,
+ suggestion,
+ applicability,
+ SuggestionStyle::ShowAlways,
+ );
+ self
+ }
+
+ /// Prints out a message with multiple suggested edits of the code.
+ /// See also [`Diagnostic::span_suggestion()`].
+ pub fn span_suggestions(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestions: impl Iterator<Item = String>,
+ applicability: Applicability,
+ ) -> &mut Self {
+ let mut suggestions: Vec<_> = suggestions.collect();
+ suggestions.sort();
+ let substitutions = suggestions
+ .into_iter()
+ .map(|snippet| Substitution { parts: vec![SubstitutionPart { snippet, span: sp }] })
+ .collect();
+ self.push_suggestion(CodeSuggestion {
+ substitutions,
+ msg: self.subdiagnostic_message_to_diagnostic_message(msg),
+ style: SuggestionStyle::ShowCode,
+ applicability,
+ });
+ self
+ }
+
+ /// Prints out a message with multiple suggested edits of the code.
+ /// See also [`Diagnostic::span_suggestion()`].
+ pub fn multipart_suggestions(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestions: impl Iterator<Item = Vec<(Span, String)>>,
+ applicability: Applicability,
+ ) -> &mut Self {
+ self.push_suggestion(CodeSuggestion {
+ substitutions: suggestions
+ .map(|sugg| Substitution {
+ parts: sugg
+ .into_iter()
+ .map(|(span, snippet)| SubstitutionPart { snippet, span })
+ .collect(),
+ })
+ .collect(),
+ msg: self.subdiagnostic_message_to_diagnostic_message(msg),
+ style: SuggestionStyle::ShowCode,
+ applicability,
+ });
+ self
+ }
+ /// Prints out a message with a suggested edit of the code. If the suggestion is presented
+ /// inline, it will only show the message and not the suggestion.
+ ///
+ /// See `CodeSuggestion` for more information.
+ pub fn span_suggestion_short(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self {
+ self.span_suggestion_with_style(
+ sp,
+ msg,
+ suggestion,
+ applicability,
+ SuggestionStyle::HideCodeInline,
+ );
+ self
+ }
+
+ /// Prints out a message for a suggestion without showing the suggested code.
+ ///
+ /// This is intended to be used for suggestions that are obvious in what the changes need to
+ /// be from the message, showing the span label inline would be visually unpleasant
+ /// (marginally overlapping spans or multiline spans) and showing the snippet window wouldn't
+ /// improve understandability.
+ pub fn span_suggestion_hidden(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self {
+ self.span_suggestion_with_style(
+ sp,
+ msg,
+ suggestion,
+ applicability,
+ SuggestionStyle::HideCodeAlways,
+ );
+ self
+ }
+
+ /// Adds a suggestion to the JSON output that will not be shown in the CLI.
+ ///
+ /// This is intended to be used for suggestions that are *very* obvious in what the changes
+ /// need to be from the message, but we still want other tools to be able to apply them.
+ pub fn tool_only_span_suggestion(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self {
+ self.span_suggestion_with_style(
+ sp,
+ msg,
+ suggestion,
+ applicability,
+ SuggestionStyle::CompletelyHidden,
+ );
+ self
+ }
+
+ /// Add a subdiagnostic from a type that implements `SessionSubdiagnostic` - see
+ /// [rustc_macros::SessionSubdiagnostic].
+ pub fn subdiagnostic(&mut self, subdiagnostic: impl AddSubdiagnostic) -> &mut Self {
+ subdiagnostic.add_to_diagnostic(self);
+ self
+ }
+
+ pub fn set_span<S: Into<MultiSpan>>(&mut self, sp: S) -> &mut Self {
+ self.span = sp.into();
+ if let Some(span) = self.span.primary_span() {
+ self.sort_span = span;
+ }
+ self
+ }
+
+ pub fn set_is_lint(&mut self) -> &mut Self {
+ self.is_lint = true;
+ self
+ }
+
+ pub fn code(&mut self, s: DiagnosticId) -> &mut Self {
+ self.code = Some(s);
+ self
+ }
+
+ pub fn clear_code(&mut self) -> &mut Self {
+ self.code = None;
+ self
+ }
+
+ pub fn get_code(&self) -> Option<DiagnosticId> {
+ self.code.clone()
+ }
+
+ pub fn set_primary_message(&mut self, msg: impl Into<DiagnosticMessage>) -> &mut Self {
+ self.message[0] = (msg.into(), Style::NoStyle);
+ self
+ }
+
+ pub fn args(&self) -> &[DiagnosticArg<'static>] {
+ &self.args
+ }
+
+ pub fn set_arg(
+ &mut self,
+ name: impl Into<Cow<'static, str>>,
+ arg: impl IntoDiagnosticArg,
+ ) -> &mut Self {
+ self.args.push((name.into(), arg.into_diagnostic_arg()));
+ self
+ }
+
+ pub fn styled_message(&self) -> &[(DiagnosticMessage, Style)] {
+ &self.message
+ }
+
+ /// Helper function that takes a `SubdiagnosticMessage` and returns a `DiagnosticMessage` by
+ /// combining it with the primary message of the diagnostic (if translatable, otherwise it just
+ /// passes the user's string along).
+ fn subdiagnostic_message_to_diagnostic_message(
+ &self,
+ attr: impl Into<SubdiagnosticMessage>,
+ ) -> DiagnosticMessage {
+ let msg =
+ self.message.iter().map(|(msg, _)| msg).next().expect("diagnostic with no messages");
+ msg.with_subdiagnostic_message(attr.into())
+ }
+
+ /// Convenience function for internal use, clients should use one of the
+ /// public methods above.
+ ///
+ /// Used by `proc_macro_server` for implementing `server::Diagnostic`.
+ pub fn sub(
+ &mut self,
+ level: Level,
+ message: impl Into<SubdiagnosticMessage>,
+ span: MultiSpan,
+ render_span: Option<MultiSpan>,
+ ) {
+ let sub = SubDiagnostic {
+ level,
+ message: vec![(
+ self.subdiagnostic_message_to_diagnostic_message(message),
+ Style::NoStyle,
+ )],
+ span,
+ render_span,
+ };
+ self.children.push(sub);
+ }
+
+ /// Convenience function for internal use, clients should use one of the
+ /// public methods above.
+ fn sub_with_highlights<M: Into<SubdiagnosticMessage>>(
+ &mut self,
+ level: Level,
+ mut message: Vec<(M, Style)>,
+ span: MultiSpan,
+ render_span: Option<MultiSpan>,
+ ) {
+ let message = message
+ .drain(..)
+ .map(|m| (self.subdiagnostic_message_to_diagnostic_message(m.0), m.1))
+ .collect();
+ let sub = SubDiagnostic { level, message, span, render_span };
+ self.children.push(sub);
+ }
+
+ /// Fields used for Hash, and PartialEq trait
+ fn keys(
+ &self,
+ ) -> (
+ &Level,
+ &[(DiagnosticMessage, Style)],
+ &Option<DiagnosticId>,
+ &MultiSpan,
+ &Result<Vec<CodeSuggestion>, SuggestionsDisabled>,
+ Option<&[SubDiagnostic]>,
+ ) {
+ (
+ &self.level,
+ &self.message,
+ &self.code,
+ &self.span,
+ &self.suggestions,
+ (if self.is_lint { None } else { Some(&self.children) }),
+ )
+ }
+}
+
+impl Hash for Diagnostic {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: Hasher,
+ {
+ self.keys().hash(state);
+ }
+}
+
+impl PartialEq for Diagnostic {
+ fn eq(&self, other: &Self) -> bool {
+ self.keys() == other.keys()
+ }
+}
diff --git a/compiler/rustc_errors/src/diagnostic_builder.rs b/compiler/rustc_errors/src/diagnostic_builder.rs
new file mode 100644
index 000000000..9e68ee282
--- /dev/null
+++ b/compiler/rustc_errors/src/diagnostic_builder.rs
@@ -0,0 +1,616 @@
+use crate::diagnostic::IntoDiagnosticArg;
+use crate::{
+ Diagnostic, DiagnosticId, DiagnosticMessage, DiagnosticStyledString, ErrorGuaranteed,
+ SubdiagnosticMessage,
+};
+use crate::{Handler, Level, MultiSpan, StashKey};
+use rustc_lint_defs::Applicability;
+
+use rustc_span::Span;
+use std::borrow::Cow;
+use std::fmt::{self, Debug};
+use std::marker::PhantomData;
+use std::ops::{Deref, DerefMut};
+use std::thread::panicking;
+use tracing::debug;
+
+/// Used for emitting structured error messages and other diagnostic information.
+///
+/// If there is some state in a downstream crate you would like to
+/// access in the methods of `DiagnosticBuilder` here, consider
+/// extending `HandlerFlags`, accessed via `self.handler.flags`.
+#[must_use]
+#[derive(Clone)]
+pub struct DiagnosticBuilder<'a, G: EmissionGuarantee> {
+ inner: DiagnosticBuilderInner<'a>,
+ _marker: PhantomData<G>,
+}
+
+/// This type exists only for `DiagnosticBuilder::forget_guarantee`, because it:
+/// 1. lacks the `G` parameter and therefore `DiagnosticBuilder<G1>` can be
+/// converted into `DiagnosticBuilder<G2>` while reusing the `inner` field
+/// 2. can implement the `Drop` "bomb" instead of `DiagnosticBuilder`, as it
+/// contains all of the data (`state` + `diagnostic`) of `DiagnosticBuilder`
+///
+/// The `diagnostic` field is not `Copy` and can't be moved out of whichever
+/// type implements the `Drop` "bomb", but because of the above two facts, that
+/// never needs to happen - instead, the whole `inner: DiagnosticBuilderInner`
+/// can be moved out of a `DiagnosticBuilder` and into another.
+#[must_use]
+#[derive(Clone)]
+struct DiagnosticBuilderInner<'a> {
+ state: DiagnosticBuilderState<'a>,
+
+ /// `Diagnostic` is a large type, and `DiagnosticBuilder` is often used as a
+ /// return value, especially within the frequently-used `PResult` type.
+ /// In theory, return value optimization (RVO) should avoid unnecessary
+ /// copying. In practice, it does not (at the time of writing).
+ diagnostic: Box<Diagnostic>,
+}
+
+#[derive(Clone)]
+enum DiagnosticBuilderState<'a> {
+ /// Initial state of a `DiagnosticBuilder`, before `.emit()` or `.cancel()`.
+ ///
+ /// The `Diagnostic` will be emitted through this `Handler`.
+ Emittable(&'a Handler),
+
+ /// State of a `DiagnosticBuilder`, after `.emit()` or *during* `.cancel()`.
+ ///
+ /// The `Diagnostic` will be ignored when calling `.emit()`, and it can be
+ /// assumed that `.emit()` was previously called, to end up in this state.
+ ///
+ /// While this is also used by `.cancel()`, this state is only observed by
+ /// the `Drop` `impl` of `DiagnosticBuilderInner`, as `.cancel()` takes
+ /// `self` by-value specifically to prevent any attempts to `.emit()`.
+ ///
+ // FIXME(eddyb) currently this doesn't prevent extending the `Diagnostic`,
+ // despite that being potentially lossy, if important information is added
+ // *after* the original `.emit()` call.
+ AlreadyEmittedOrDuringCancellation,
+}
+
+// `DiagnosticBuilderState` should be pointer-sized.
+rustc_data_structures::static_assert_size!(
+ DiagnosticBuilderState<'_>,
+ std::mem::size_of::<&Handler>()
+);
+
+/// Trait for types that `DiagnosticBuilder::emit` can return as a "guarantee"
+/// (or "proof") token that the emission happened.
+pub trait EmissionGuarantee: Sized {
+ /// Implementation of `DiagnosticBuilder::emit`, fully controlled by each
+ /// `impl` of `EmissionGuarantee`, to make it impossible to create a value
+ /// of `Self` without actually performing the emission.
+ #[track_caller]
+ fn diagnostic_builder_emit_producing_guarantee(db: &mut DiagnosticBuilder<'_, Self>) -> Self;
+}
+
+/// Private module for sealing the `IsError` helper trait.
+mod sealed_level_is_error {
+ use crate::Level;
+
+ /// Sealed helper trait for statically checking that a `Level` is an error.
+ pub(crate) trait IsError<const L: Level> {}
+
+ impl IsError<{ Level::Bug }> for () {}
+ impl IsError<{ Level::DelayedBug }> for () {}
+ impl IsError<{ Level::Fatal }> for () {}
+ // NOTE(eddyb) `Level::Error { lint: true }` is also an error, but lints
+ // don't need error guarantees, as their levels are always dynamic.
+ impl IsError<{ Level::Error { lint: false } }> for () {}
+}
+
+impl<'a> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ /// Convenience function for internal use, clients should use one of the
+ /// `struct_*` methods on [`Handler`].
+ pub(crate) fn new_guaranteeing_error<M: Into<DiagnosticMessage>, const L: Level>(
+ handler: &'a Handler,
+ message: M,
+ ) -> Self
+ where
+ (): sealed_level_is_error::IsError<L>,
+ {
+ Self {
+ inner: DiagnosticBuilderInner {
+ state: DiagnosticBuilderState::Emittable(handler),
+ diagnostic: Box::new(Diagnostic::new_with_code(L, None, message)),
+ },
+ _marker: PhantomData,
+ }
+ }
+
+ /// Discard the guarantee `.emit()` would return, in favor of having the
+ /// type `DiagnosticBuilder<'a, ()>`. This may be necessary whenever there
+ /// is a common codepath handling both errors and warnings.
+ pub fn forget_guarantee(self) -> DiagnosticBuilder<'a, ()> {
+ DiagnosticBuilder { inner: self.inner, _marker: PhantomData }
+ }
+}
+
+// FIXME(eddyb) make `ErrorGuaranteed` impossible to create outside `.emit()`.
+impl EmissionGuarantee for ErrorGuaranteed {
+ fn diagnostic_builder_emit_producing_guarantee(db: &mut DiagnosticBuilder<'_, Self>) -> Self {
+ match db.inner.state {
+ // First `.emit()` call, the `&Handler` is still available.
+ DiagnosticBuilderState::Emittable(handler) => {
+ db.inner.state = DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation;
+
+ let guar = handler.emit_diagnostic(&mut db.inner.diagnostic);
+
+ // Only allow a guarantee if the `level` wasn't switched to a
+ // non-error - the field isn't `pub`, but the whole `Diagnostic`
+ // can be overwritten with a new one, thanks to `DerefMut`.
+ assert!(
+ db.inner.diagnostic.is_error(),
+ "emitted non-error ({:?}) diagnostic \
+ from `DiagnosticBuilder<ErrorGuaranteed>`",
+ db.inner.diagnostic.level,
+ );
+ guar.unwrap()
+ }
+ // `.emit()` was previously called, disallowed from repeating it,
+ // but can take advantage of the previous `.emit()`'s guarantee
+ // still being applicable (i.e. as a form of idempotency).
+ DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {
+ // Only allow a guarantee if the `level` wasn't switched to a
+ // non-error - the field isn't `pub`, but the whole `Diagnostic`
+ // can be overwritten with a new one, thanks to `DerefMut`.
+ assert!(
+ db.inner.diagnostic.is_error(),
+ "`DiagnosticBuilder<ErrorGuaranteed>`'s diagnostic \
+ became non-error ({:?}), after original `.emit()`",
+ db.inner.diagnostic.level,
+ );
+ ErrorGuaranteed::unchecked_claim_error_was_emitted()
+ }
+ }
+ }
+}
+
+impl<'a> DiagnosticBuilder<'a, ()> {
+ /// Convenience function for internal use, clients should use one of the
+ /// `struct_*` methods on [`Handler`].
+ pub(crate) fn new<M: Into<DiagnosticMessage>>(
+ handler: &'a Handler,
+ level: Level,
+ message: M,
+ ) -> Self {
+ let diagnostic = Diagnostic::new_with_code(level, None, message);
+ Self::new_diagnostic(handler, diagnostic)
+ }
+
+ /// Creates a new `DiagnosticBuilder` with an already constructed
+ /// diagnostic.
+ pub(crate) fn new_diagnostic(handler: &'a Handler, diagnostic: Diagnostic) -> Self {
+ debug!("Created new diagnostic");
+ Self {
+ inner: DiagnosticBuilderInner {
+ state: DiagnosticBuilderState::Emittable(handler),
+ diagnostic: Box::new(diagnostic),
+ },
+ _marker: PhantomData,
+ }
+ }
+}
+
+// FIXME(eddyb) should there be a `Option<ErrorGuaranteed>` impl as well?
+impl EmissionGuarantee for () {
+ fn diagnostic_builder_emit_producing_guarantee(db: &mut DiagnosticBuilder<'_, Self>) -> Self {
+ match db.inner.state {
+ // First `.emit()` call, the `&Handler` is still available.
+ DiagnosticBuilderState::Emittable(handler) => {
+ db.inner.state = DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation;
+
+ handler.emit_diagnostic(&mut db.inner.diagnostic);
+ }
+ // `.emit()` was previously called, disallowed from repeating it.
+ DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {}
+ }
+ }
+}
+
+impl<'a> DiagnosticBuilder<'a, !> {
+ /// Convenience function for internal use, clients should use one of the
+ /// `struct_*` methods on [`Handler`].
+ pub(crate) fn new_fatal(handler: &'a Handler, message: impl Into<DiagnosticMessage>) -> Self {
+ let diagnostic = Diagnostic::new_with_code(Level::Fatal, None, message);
+ Self::new_diagnostic_fatal(handler, diagnostic)
+ }
+
+ /// Creates a new `DiagnosticBuilder` with an already constructed
+ /// diagnostic.
+ pub(crate) fn new_diagnostic_fatal(handler: &'a Handler, diagnostic: Diagnostic) -> Self {
+ debug!("Created new diagnostic");
+ Self {
+ inner: DiagnosticBuilderInner {
+ state: DiagnosticBuilderState::Emittable(handler),
+ diagnostic: Box::new(diagnostic),
+ },
+ _marker: PhantomData,
+ }
+ }
+}
+
+impl EmissionGuarantee for ! {
+ fn diagnostic_builder_emit_producing_guarantee(db: &mut DiagnosticBuilder<'_, Self>) -> Self {
+ match db.inner.state {
+ // First `.emit()` call, the `&Handler` is still available.
+ DiagnosticBuilderState::Emittable(handler) => {
+ db.inner.state = DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation;
+
+ handler.emit_diagnostic(&mut db.inner.diagnostic);
+ }
+ // `.emit()` was previously called, disallowed from repeating it.
+ DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {}
+ }
+ // Then fatally error, returning `!`
+ crate::FatalError.raise()
+ }
+}
+
+/// In general, the `DiagnosticBuilder` uses deref to allow access to
+/// the fields and methods of the embedded `diagnostic` in a
+/// transparent way. *However,* many of the methods are intended to
+/// be used in a chained way, and hence ought to return `self`. In
+/// that case, we can't just naively forward to the method on the
+/// `diagnostic`, because the return type would be a `&Diagnostic`
+/// instead of a `&DiagnosticBuilder<'a>`. This `forward!` macro makes
+/// it easy to declare such methods on the builder.
+macro_rules! forward {
+ // Forward pattern for &mut self -> &mut Self
+ (
+ $(#[$attrs:meta])*
+ pub fn $n:ident(&mut self, $($name:ident: $ty:ty),* $(,)?) -> &mut Self
+ ) => {
+ $(#[$attrs])*
+ #[doc = concat!("See [`Diagnostic::", stringify!($n), "()`].")]
+ pub fn $n(&mut self, $($name: $ty),*) -> &mut Self {
+ self.inner.diagnostic.$n($($name),*);
+ self
+ }
+ };
+}
+
+impl<G: EmissionGuarantee> Deref for DiagnosticBuilder<'_, G> {
+ type Target = Diagnostic;
+
+ fn deref(&self) -> &Diagnostic {
+ &self.inner.diagnostic
+ }
+}
+
+impl<G: EmissionGuarantee> DerefMut for DiagnosticBuilder<'_, G> {
+ fn deref_mut(&mut self) -> &mut Diagnostic {
+ &mut self.inner.diagnostic
+ }
+}
+
+impl<'a, G: EmissionGuarantee> DiagnosticBuilder<'a, G> {
+ /// Emit the diagnostic.
+ #[track_caller]
+ pub fn emit(&mut self) -> G {
+ G::diagnostic_builder_emit_producing_guarantee(self)
+ }
+
+ /// Emit the diagnostic unless `delay` is true,
+ /// in which case the emission will be delayed as a bug.
+ ///
+ /// See `emit` and `delay_as_bug` for details.
+ #[track_caller]
+ pub fn emit_unless(&mut self, delay: bool) -> G {
+ if delay {
+ self.downgrade_to_delayed_bug();
+ }
+ self.emit()
+ }
+
+ /// Cancel the diagnostic (a structured diagnostic must either be emitted or
+ /// cancelled or it will panic when dropped).
+ ///
+ /// This method takes `self` by-value to disallow calling `.emit()` on it,
+ /// which may be expected to *guarantee* the emission of an error, either
+ /// at the time of the call, or through a prior `.emit()` call.
+ pub fn cancel(mut self) {
+ self.inner.state = DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation;
+ drop(self);
+ }
+
+ /// Stashes diagnostic for possible later improvement in a different,
+ /// later stage of the compiler. The diagnostic can be accessed with
+ /// the provided `span` and `key` through [`Handler::steal_diagnostic()`].
+ ///
+ /// As with `buffer`, this is unless the handler has disabled such buffering.
+ pub fn stash(self, span: Span, key: StashKey) {
+ if let Some((diag, handler)) = self.into_diagnostic() {
+ handler.stash_diagnostic(span, key, diag);
+ }
+ }
+
+ /// Converts the builder to a `Diagnostic` for later emission,
+ /// unless handler has disabled such buffering, or `.emit()` was called.
+ pub fn into_diagnostic(mut self) -> Option<(Diagnostic, &'a Handler)> {
+ let handler = match self.inner.state {
+ // No `.emit()` calls, the `&Handler` is still available.
+ DiagnosticBuilderState::Emittable(handler) => handler,
+ // `.emit()` was previously called, nothing we can do.
+ DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {
+ return None;
+ }
+ };
+
+ if handler.flags.dont_buffer_diagnostics || handler.flags.treat_err_as_bug.is_some() {
+ self.emit();
+ return None;
+ }
+
+ // Take the `Diagnostic` by replacing it with a dummy.
+ let dummy = Diagnostic::new(Level::Allow, DiagnosticMessage::Str("".to_string()));
+ let diagnostic = std::mem::replace(&mut *self.inner.diagnostic, dummy);
+
+ // Disable the ICE on `Drop`.
+ self.cancel();
+
+ // Logging here is useful to help track down where in logs an error was
+ // actually emitted.
+ debug!("buffer: diagnostic={:?}", diagnostic);
+
+ Some((diagnostic, handler))
+ }
+
+ /// Buffers the diagnostic for later emission,
+ /// unless handler has disabled such buffering.
+ pub fn buffer(self, buffered_diagnostics: &mut Vec<Diagnostic>) {
+ buffered_diagnostics.extend(self.into_diagnostic().map(|(diag, _)| diag));
+ }
+
+ /// Delay emission of this diagnostic as a bug.
+ ///
+ /// This can be useful in contexts where an error indicates a bug but
+ /// typically this only happens when other compilation errors have already
+ /// happened. In those cases this can be used to defer emission of this
+ /// diagnostic as a bug in the compiler only if no other errors have been
+ /// emitted.
+ ///
+ /// In the meantime, though, callsites are required to deal with the "bug"
+ /// locally in whichever way makes the most sense.
+ #[track_caller]
+ pub fn delay_as_bug(&mut self) {
+ self.downgrade_to_delayed_bug();
+ self.emit();
+ }
+
+ forward!(
+ #[track_caller]
+ pub fn downgrade_to_delayed_bug(&mut self,) -> &mut Self
+ );
+
+ forward!(
+ /// Appends a labeled span to the diagnostic.
+ ///
+ /// Labels are used to convey additional context for the diagnostic's primary span. They will
+ /// be shown together with the original diagnostic's span, *not* with spans added by
+ /// `span_note`, `span_help`, etc. Therefore, if the primary span is not displayable (because
+ /// the span is `DUMMY_SP` or the source code isn't found), labels will not be displayed
+ /// either.
+ ///
+ /// Implementation-wise, the label span is pushed onto the [`MultiSpan`] that was created when
+ /// the diagnostic was constructed. However, the label span is *not* considered a
+ /// ["primary span"][`MultiSpan`]; only the `Span` supplied when creating the diagnostic is
+ /// primary.
+ pub fn span_label(&mut self, span: Span, label: impl Into<SubdiagnosticMessage>) -> &mut Self);
+
+ forward!(
+ /// Labels all the given spans with the provided label.
+ /// See [`Diagnostic::span_label()`] for more information.
+ pub fn span_labels(
+ &mut self,
+ spans: impl IntoIterator<Item = Span>,
+ label: impl AsRef<str>,
+ ) -> &mut Self);
+
+ forward!(pub fn note_expected_found(
+ &mut self,
+ expected_label: &dyn fmt::Display,
+ expected: DiagnosticStyledString,
+ found_label: &dyn fmt::Display,
+ found: DiagnosticStyledString,
+ ) -> &mut Self);
+
+ forward!(pub fn note_expected_found_extra(
+ &mut self,
+ expected_label: &dyn fmt::Display,
+ expected: DiagnosticStyledString,
+ found_label: &dyn fmt::Display,
+ found: DiagnosticStyledString,
+ expected_extra: &dyn fmt::Display,
+ found_extra: &dyn fmt::Display,
+ ) -> &mut Self);
+
+ forward!(pub fn note_unsuccessful_coercion(
+ &mut self,
+ expected: DiagnosticStyledString,
+ found: DiagnosticStyledString,
+ ) -> &mut Self);
+
+ forward!(pub fn note(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self);
+ forward!(pub fn note_once(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self);
+ forward!(pub fn span_note(
+ &mut self,
+ sp: impl Into<MultiSpan>,
+ msg: impl Into<SubdiagnosticMessage>,
+ ) -> &mut Self);
+ forward!(pub fn span_note_once(
+ &mut self,
+ sp: impl Into<MultiSpan>,
+ msg: impl Into<SubdiagnosticMessage>,
+ ) -> &mut Self);
+ forward!(pub fn warn(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self);
+ forward!(pub fn span_warn(
+ &mut self,
+ sp: impl Into<MultiSpan>,
+ msg: impl Into<SubdiagnosticMessage>,
+ ) -> &mut Self);
+ forward!(pub fn help(&mut self, msg: impl Into<SubdiagnosticMessage>) -> &mut Self);
+ forward!(pub fn span_help(
+ &mut self,
+ sp: impl Into<MultiSpan>,
+ msg: impl Into<SubdiagnosticMessage>,
+ ) -> &mut Self);
+ forward!(pub fn help_use_latest_edition(&mut self,) -> &mut Self);
+ forward!(pub fn set_is_lint(&mut self,) -> &mut Self);
+
+ forward!(pub fn disable_suggestions(&mut self,) -> &mut Self);
+ forward!(pub fn clear_suggestions(&mut self,) -> &mut Self);
+
+ forward!(pub fn multipart_suggestion(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: Vec<(Span, String)>,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn multipart_suggestion_verbose(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: Vec<(Span, String)>,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn tool_only_multipart_suggestion(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: Vec<(Span, String)>,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn span_suggestion(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn span_suggestions(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestions: impl Iterator<Item = String>,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn multipart_suggestions(
+ &mut self,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestions: impl Iterator<Item = Vec<(Span, String)>>,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn span_suggestion_short(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn span_suggestion_verbose(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn span_suggestion_hidden(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self);
+ forward!(pub fn tool_only_span_suggestion(
+ &mut self,
+ sp: Span,
+ msg: impl Into<SubdiagnosticMessage>,
+ suggestion: impl ToString,
+ applicability: Applicability,
+ ) -> &mut Self);
+
+ forward!(pub fn set_primary_message(&mut self, msg: impl Into<DiagnosticMessage>) -> &mut Self);
+ forward!(pub fn set_span(&mut self, sp: impl Into<MultiSpan>) -> &mut Self);
+ forward!(pub fn code(&mut self, s: DiagnosticId) -> &mut Self);
+ forward!(pub fn set_arg(
+ &mut self,
+ name: impl Into<Cow<'static, str>>,
+ arg: impl IntoDiagnosticArg,
+ ) -> &mut Self);
+
+ forward!(pub fn subdiagnostic(
+ &mut self,
+ subdiagnostic: impl crate::AddSubdiagnostic
+ ) -> &mut Self);
+}
+
+impl<G: EmissionGuarantee> Debug for DiagnosticBuilder<'_, G> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.inner.diagnostic.fmt(f)
+ }
+}
+
+/// Destructor bomb - a `DiagnosticBuilder` must be either emitted or cancelled
+/// or we emit a bug.
+impl Drop for DiagnosticBuilderInner<'_> {
+ fn drop(&mut self) {
+ match self.state {
+ // No `.emit()` or `.cancel()` calls.
+ DiagnosticBuilderState::Emittable(handler) => {
+ if !panicking() {
+ handler.emit_diagnostic(&mut Diagnostic::new(
+ Level::Bug,
+ DiagnosticMessage::Str(
+ "the following error was constructed but not emitted".to_string(),
+ ),
+ ));
+ handler.emit_diagnostic(&mut self.diagnostic);
+ panic!();
+ }
+ }
+ // `.emit()` was previously called, or maybe we're during `.cancel()`.
+ DiagnosticBuilderState::AlreadyEmittedOrDuringCancellation => {}
+ }
+ }
+}
+
+#[macro_export]
+macro_rules! struct_span_err {
+ ($session:expr, $span:expr, $code:ident, $($message:tt)*) => ({
+ $session.struct_span_err_with_code(
+ $span,
+ &format!($($message)*),
+ $crate::error_code!($code),
+ )
+ })
+}
+
+#[macro_export]
+macro_rules! error_code {
+ ($code:ident) => {{ $crate::DiagnosticId::Error(stringify!($code).to_owned()) }};
+}
+
+/// Wrapper around a `DiagnosticBuilder` for creating lints.
+pub struct LintDiagnosticBuilder<'a, G: EmissionGuarantee>(DiagnosticBuilder<'a, G>);
+
+impl<'a, G: EmissionGuarantee> LintDiagnosticBuilder<'a, G> {
+ #[rustc_lint_diagnostics]
+ /// Return the inner `DiagnosticBuilder`, first setting the primary message to `msg`.
+ pub fn build(mut self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'a, G> {
+ self.0.set_primary_message(msg);
+ self.0.set_is_lint();
+ self.0
+ }
+
+ /// Create a `LintDiagnosticBuilder` from some existing `DiagnosticBuilder`.
+ pub fn new(err: DiagnosticBuilder<'a, G>) -> LintDiagnosticBuilder<'a, G> {
+ LintDiagnosticBuilder(err)
+ }
+}
+
+impl<'a> LintDiagnosticBuilder<'a, ErrorGuaranteed> {
+ pub fn forget_guarantee(self) -> LintDiagnosticBuilder<'a, ()> {
+ LintDiagnosticBuilder(self.0.forget_guarantee())
+ }
+}
diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs
new file mode 100644
index 000000000..61d953cd6
--- /dev/null
+++ b/compiler/rustc_errors/src/emitter.rs
@@ -0,0 +1,2687 @@
+//! The current rustc diagnostics emitter.
+//!
+//! An `Emitter` takes care of generating the output from a `DiagnosticBuilder` struct.
+//!
+//! There are various `Emitter` implementations that generate different output formats such as
+//! JSON and human readable output.
+//!
+//! The output types are defined in `rustc_session::config::ErrorOutputType`.
+
+use Destination::*;
+
+use rustc_span::source_map::SourceMap;
+use rustc_span::{FileLines, SourceFile, Span};
+
+use crate::snippet::{Annotation, AnnotationType, Line, MultilineAnnotation, Style, StyledString};
+use crate::styled_buffer::StyledBuffer;
+use crate::{
+ CodeSuggestion, Diagnostic, DiagnosticArg, DiagnosticId, DiagnosticMessage, FluentBundle,
+ Handler, LazyFallbackBundle, Level, MultiSpan, SubDiagnostic, SubstitutionHighlight,
+ SuggestionStyle,
+};
+
+use rustc_lint_defs::pluralize;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lrc;
+use rustc_error_messages::FluentArgs;
+use rustc_span::hygiene::{ExpnKind, MacroKind};
+use std::borrow::Cow;
+use std::cmp::{max, min, Reverse};
+use std::io;
+use std::io::prelude::*;
+use std::iter;
+use std::path::Path;
+use termcolor::{Ansi, BufferWriter, ColorChoice, ColorSpec, StandardStream};
+use termcolor::{Buffer, Color, WriteColor};
+use tracing::*;
+
+/// Default column width, used in tests and when terminal dimensions cannot be determined.
+const DEFAULT_COLUMN_WIDTH: usize = 140;
+
+/// Describes the way the content of the `rendered` field of the json output is generated
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum HumanReadableErrorType {
+ Default(ColorConfig),
+ AnnotateSnippet(ColorConfig),
+ Short(ColorConfig),
+}
+
+impl HumanReadableErrorType {
+ /// Returns a (`short`, `color`) tuple
+ pub fn unzip(self) -> (bool, ColorConfig) {
+ match self {
+ HumanReadableErrorType::Default(cc) => (false, cc),
+ HumanReadableErrorType::Short(cc) => (true, cc),
+ HumanReadableErrorType::AnnotateSnippet(cc) => (false, cc),
+ }
+ }
+ pub fn new_emitter(
+ self,
+ dst: Box<dyn Write + Send>,
+ source_map: Option<Lrc<SourceMap>>,
+ bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ teach: bool,
+ diagnostic_width: Option<usize>,
+ macro_backtrace: bool,
+ ) -> EmitterWriter {
+ let (short, color_config) = self.unzip();
+ let color = color_config.suggests_using_colors();
+ EmitterWriter::new(
+ dst,
+ source_map,
+ bundle,
+ fallback_bundle,
+ short,
+ teach,
+ color,
+ diagnostic_width,
+ macro_backtrace,
+ )
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+struct Margin {
+ /// The available whitespace in the left that can be consumed when centering.
+ pub whitespace_left: usize,
+ /// The column of the beginning of left-most span.
+ pub span_left: usize,
+ /// The column of the end of right-most span.
+ pub span_right: usize,
+ /// The beginning of the line to be displayed.
+ pub computed_left: usize,
+ /// The end of the line to be displayed.
+ pub computed_right: usize,
+ /// The current width of the terminal. Uses value of `DEFAULT_COLUMN_WIDTH` constant by default
+ /// and in tests.
+ pub column_width: usize,
+ /// The end column of a span label, including the span. Doesn't account for labels not in the
+ /// same line as the span.
+ pub label_right: usize,
+}
+
+impl Margin {
+ fn new(
+ whitespace_left: usize,
+ span_left: usize,
+ span_right: usize,
+ label_right: usize,
+ column_width: usize,
+ max_line_len: usize,
+ ) -> Self {
+ // The 6 is padding to give a bit of room for `...` when displaying:
+ // ```
+ // error: message
+ // --> file.rs:16:58
+ // |
+ // 16 | ... fn foo(self) -> Self::Bar {
+ // | ^^^^^^^^^
+ // ```
+
+ let mut m = Margin {
+ whitespace_left: whitespace_left.saturating_sub(6),
+ span_left: span_left.saturating_sub(6),
+ span_right: span_right + 6,
+ computed_left: 0,
+ computed_right: 0,
+ column_width,
+ label_right: label_right + 6,
+ };
+ m.compute(max_line_len);
+ m
+ }
+
+ fn was_cut_left(&self) -> bool {
+ self.computed_left > 0
+ }
+
+ fn was_cut_right(&self, line_len: usize) -> bool {
+ let right =
+ if self.computed_right == self.span_right || self.computed_right == self.label_right {
+ // Account for the "..." padding given above. Otherwise we end up with code lines that
+ // do fit but end in "..." as if they were trimmed.
+ self.computed_right - 6
+ } else {
+ self.computed_right
+ };
+ right < line_len && self.computed_left + self.column_width < line_len
+ }
+
+ fn compute(&mut self, max_line_len: usize) {
+ // When there's a lot of whitespace (>20), we want to trim it as it is useless.
+ self.computed_left = if self.whitespace_left > 20 {
+ self.whitespace_left - 16 // We want some padding.
+ } else {
+ 0
+ };
+ // We want to show as much as possible, max_line_len is the right-most boundary for the
+ // relevant code.
+ self.computed_right = max(max_line_len, self.computed_left);
+
+ if self.computed_right - self.computed_left > self.column_width {
+ // Trimming only whitespace isn't enough, let's get craftier.
+ if self.label_right - self.whitespace_left <= self.column_width {
+ // Attempt to fit the code window only trimming whitespace.
+ self.computed_left = self.whitespace_left;
+ self.computed_right = self.computed_left + self.column_width;
+ } else if self.label_right - self.span_left <= self.column_width {
+ // Attempt to fit the code window considering only the spans and labels.
+ let padding_left = (self.column_width - (self.label_right - self.span_left)) / 2;
+ self.computed_left = self.span_left.saturating_sub(padding_left);
+ self.computed_right = self.computed_left + self.column_width;
+ } else if self.span_right - self.span_left <= self.column_width {
+ // Attempt to fit the code window considering the spans and labels plus padding.
+ let padding_left = (self.column_width - (self.span_right - self.span_left)) / 5 * 2;
+ self.computed_left = self.span_left.saturating_sub(padding_left);
+ self.computed_right = self.computed_left + self.column_width;
+ } else {
+ // Mostly give up but still don't show the full line.
+ self.computed_left = self.span_left;
+ self.computed_right = self.span_right;
+ }
+ }
+ }
+
+ fn left(&self, line_len: usize) -> usize {
+ min(self.computed_left, line_len)
+ }
+
+ fn right(&self, line_len: usize) -> usize {
+ if line_len.saturating_sub(self.computed_left) <= self.column_width {
+ line_len
+ } else {
+ min(line_len, self.computed_right)
+ }
+ }
+}
+
+const ANONYMIZED_LINE_NUM: &str = "LL";
+
+/// Emitter trait for emitting errors.
+pub trait Emitter {
+ /// Emit a structured diagnostic.
+ fn emit_diagnostic(&mut self, diag: &Diagnostic);
+
+ /// Emit a notification that an artifact has been output.
+ /// This is currently only supported for the JSON format,
+ /// other formats can, and will, simply ignore it.
+ fn emit_artifact_notification(&mut self, _path: &Path, _artifact_type: &str) {}
+
+ fn emit_future_breakage_report(&mut self, _diags: Vec<Diagnostic>) {}
+
+ /// Emit list of unused externs
+ fn emit_unused_externs(
+ &mut self,
+ _lint_level: rustc_lint_defs::Level,
+ _unused_externs: &[&str],
+ ) {
+ }
+
+ /// Checks if should show explanations about "rustc --explain"
+ fn should_show_explain(&self) -> bool {
+ true
+ }
+
+ /// Checks if we can use colors in the current output stream.
+ fn supports_color(&self) -> bool {
+ false
+ }
+
+ fn source_map(&self) -> Option<&Lrc<SourceMap>>;
+
+ /// Return `FluentBundle` with localized diagnostics for the locale requested by the user. If no
+ /// language was requested by the user then this will be `None` and `fallback_fluent_bundle`
+ /// should be used.
+ fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>>;
+
+ /// Return `FluentBundle` with localized diagnostics for the default locale of the compiler.
+ /// Used when the user has not requested a specific language or when a localized diagnostic is
+ /// unavailable for the requested locale.
+ fn fallback_fluent_bundle(&self) -> &FluentBundle;
+
+ /// Convert diagnostic arguments (a rustc internal type that exists to implement
+ /// `Encodable`/`Decodable`) into `FluentArgs` which is necessary to perform translation.
+ ///
+ /// Typically performed once for each diagnostic at the start of `emit_diagnostic` and then
+ /// passed around as a reference thereafter.
+ fn to_fluent_args<'arg>(&self, args: &[DiagnosticArg<'arg>]) -> FluentArgs<'arg> {
+ FromIterator::from_iter(args.to_vec().drain(..))
+ }
+
+ /// Convert `DiagnosticMessage`s to a string, performing translation if necessary.
+ fn translate_messages(
+ &self,
+ messages: &[(DiagnosticMessage, Style)],
+ args: &FluentArgs<'_>,
+ ) -> Cow<'_, str> {
+ Cow::Owned(
+ messages.iter().map(|(m, _)| self.translate_message(m, args)).collect::<String>(),
+ )
+ }
+
+ /// Convert a `DiagnosticMessage` to a string, performing translation if necessary.
+ fn translate_message<'a>(
+ &'a self,
+ message: &'a DiagnosticMessage,
+ args: &'a FluentArgs<'_>,
+ ) -> Cow<'_, str> {
+ trace!(?message, ?args);
+ let (identifier, attr) = match message {
+ DiagnosticMessage::Str(msg) => return Cow::Borrowed(&msg),
+ DiagnosticMessage::FluentIdentifier(identifier, attr) => (identifier, attr),
+ };
+
+ let bundle = match self.fluent_bundle() {
+ Some(bundle) if bundle.has_message(&identifier) => bundle,
+ _ => self.fallback_fluent_bundle(),
+ };
+
+ let message = bundle.get_message(&identifier).expect("missing diagnostic in fluent bundle");
+ let value = match attr {
+ Some(attr) => {
+ if let Some(attr) = message.get_attribute(attr) {
+ attr.value()
+ } else {
+ panic!("missing attribute `{attr}` in fluent message `{identifier}`")
+ }
+ }
+ None => {
+ if let Some(value) = message.value() {
+ value
+ } else {
+ panic!("missing value in fluent message `{identifier}`")
+ }
+ }
+ };
+
+ let mut err = vec![];
+ let translated = bundle.format_pattern(value, Some(&args), &mut err);
+ trace!(?translated, ?err);
+ debug_assert!(
+ err.is_empty(),
+ "identifier: {:?}, args: {:?}, errors: {:?}",
+ identifier,
+ args,
+ err
+ );
+ translated
+ }
+
+ /// Formats the substitutions of the primary_span
+ ///
+ /// There are a lot of conditions to this method, but in short:
+ ///
+ /// * If the current `Diagnostic` has only one visible `CodeSuggestion`,
+ /// we format the `help` suggestion depending on the content of the
+ /// substitutions. In that case, we return the modified span only.
+ ///
+ /// * If the current `Diagnostic` has multiple suggestions,
+ /// we return the original `primary_span` and the original suggestions.
+ fn primary_span_formatted<'a>(
+ &mut self,
+ diag: &'a Diagnostic,
+ fluent_args: &FluentArgs<'_>,
+ ) -> (MultiSpan, &'a [CodeSuggestion]) {
+ let mut primary_span = diag.span.clone();
+ let suggestions = diag.suggestions.as_ref().map_or(&[][..], |suggestions| &suggestions[..]);
+ if let Some((sugg, rest)) = suggestions.split_first() {
+ let msg = self.translate_message(&sugg.msg, fluent_args);
+ if rest.is_empty() &&
+ // ^ if there is only one suggestion
+ // don't display multi-suggestions as labels
+ sugg.substitutions.len() == 1 &&
+ // don't display multipart suggestions as labels
+ sugg.substitutions[0].parts.len() == 1 &&
+ // don't display long messages as labels
+ msg.split_whitespace().count() < 10 &&
+ // don't display multiline suggestions as labels
+ !sugg.substitutions[0].parts[0].snippet.contains('\n') &&
+ ![
+ // when this style is set we want the suggestion to be a message, not inline
+ SuggestionStyle::HideCodeAlways,
+ // trivial suggestion for tooling's sake, never shown
+ SuggestionStyle::CompletelyHidden,
+ // subtle suggestion, never shown inline
+ SuggestionStyle::ShowAlways,
+ ].contains(&sugg.style)
+ {
+ let substitution = &sugg.substitutions[0].parts[0].snippet.trim();
+ let msg = if substitution.is_empty() || sugg.style.hide_inline() {
+ // This substitution is only removal OR we explicitly don't want to show the
+ // code inline (`hide_inline`). Therefore, we don't show the substitution.
+ format!("help: {}", &msg)
+ } else {
+ // Show the default suggestion text with the substitution
+ format!(
+ "help: {}{}: `{}`",
+ &msg,
+ if self
+ .source_map()
+ .map(|sm| is_case_difference(
+ &**sm,
+ substitution,
+ sugg.substitutions[0].parts[0].span,
+ ))
+ .unwrap_or(false)
+ {
+ " (notice the capitalization)"
+ } else {
+ ""
+ },
+ substitution,
+ )
+ };
+ primary_span.push_span_label(sugg.substitutions[0].parts[0].span, msg);
+
+ // We return only the modified primary_span
+ (primary_span, &[])
+ } else {
+ // if there are multiple suggestions, print them all in full
+ // to be consistent. We could try to figure out if we can
+ // make one (or the first one) inline, but that would give
+ // undue importance to a semi-random suggestion
+ (primary_span, suggestions)
+ }
+ } else {
+ (primary_span, suggestions)
+ }
+ }
+
+ fn fix_multispans_in_extern_macros_and_render_macro_backtrace(
+ &self,
+ source_map: &Option<Lrc<SourceMap>>,
+ span: &mut MultiSpan,
+ children: &mut Vec<SubDiagnostic>,
+ level: &Level,
+ backtrace: bool,
+ ) {
+ // Check for spans in macros, before `fix_multispans_in_extern_macros`
+ // has a chance to replace them.
+ let has_macro_spans: Vec<_> = iter::once(&*span)
+ .chain(children.iter().map(|child| &child.span))
+ .flat_map(|span| span.primary_spans())
+ .flat_map(|sp| sp.macro_backtrace())
+ .filter_map(|expn_data| {
+ match expn_data.kind {
+ ExpnKind::Root => None,
+
+ // Skip past non-macro entries, just in case there
+ // are some which do actually involve macros.
+ ExpnKind::Inlined | ExpnKind::Desugaring(..) | ExpnKind::AstPass(..) => None,
+
+ ExpnKind::Macro(macro_kind, name) => Some((macro_kind, name)),
+ }
+ })
+ .collect();
+
+ if !backtrace {
+ self.fix_multispans_in_extern_macros(source_map, span, children);
+ }
+
+ self.render_multispans_macro_backtrace(span, children, backtrace);
+
+ if !backtrace {
+ if let Some((macro_kind, name)) = has_macro_spans.first() {
+ // Mark the actual macro this originates from
+ let and_then = if let Some((macro_kind, last_name)) = has_macro_spans.last()
+ && last_name != name
+ {
+ let descr = macro_kind.descr();
+ format!(
+ " which comes from the expansion of the {descr} `{last_name}`",
+ )
+ } else {
+ "".to_string()
+ };
+
+ let descr = macro_kind.descr();
+ let msg = format!(
+ "this {level} originates in the {descr} `{name}`{and_then} \
+ (in Nightly builds, run with -Z macro-backtrace for more info)",
+ );
+
+ children.push(SubDiagnostic {
+ level: Level::Note,
+ message: vec![(DiagnosticMessage::Str(msg), Style::NoStyle)],
+ span: MultiSpan::new(),
+ render_span: None,
+ });
+ }
+ }
+ }
+
+ fn render_multispans_macro_backtrace(
+ &self,
+ span: &mut MultiSpan,
+ children: &mut Vec<SubDiagnostic>,
+ backtrace: bool,
+ ) {
+ for span in iter::once(span).chain(children.iter_mut().map(|child| &mut child.span)) {
+ self.render_multispan_macro_backtrace(span, backtrace);
+ }
+ }
+
+ fn render_multispan_macro_backtrace(&self, span: &mut MultiSpan, always_backtrace: bool) {
+ let mut new_labels: Vec<(Span, String)> = vec![];
+
+ for &sp in span.primary_spans() {
+ if sp.is_dummy() {
+ continue;
+ }
+
+ // FIXME(eddyb) use `retain` on `macro_backtrace` to remove all the
+ // entries we don't want to print, to make sure the indices being
+ // printed are contiguous (or omitted if there's only one entry).
+ let macro_backtrace: Vec<_> = sp.macro_backtrace().collect();
+ for (i, trace) in macro_backtrace.iter().rev().enumerate() {
+ if trace.def_site.is_dummy() {
+ continue;
+ }
+
+ if always_backtrace && !matches!(trace.kind, ExpnKind::Inlined) {
+ new_labels.push((
+ trace.def_site,
+ format!(
+ "in this expansion of `{}`{}",
+ trace.kind.descr(),
+ if macro_backtrace.len() > 1 {
+ // if macro_backtrace.len() == 1 it'll be
+ // pointed at by "in this macro invocation"
+ format!(" (#{})", i + 1)
+ } else {
+ String::new()
+ },
+ ),
+ ));
+ }
+
+ // Don't add a label on the call site if the diagnostic itself
+ // already points to (a part of) that call site, as the label
+ // is meant for showing the relevant invocation when the actual
+ // diagnostic is pointing to some part of macro definition.
+ //
+ // This also handles the case where an external span got replaced
+ // with the call site span by `fix_multispans_in_extern_macros`.
+ //
+ // NB: `-Zmacro-backtrace` overrides this, for uniformity, as the
+ // "in this expansion of" label above is always added in that mode,
+ // and it needs an "in this macro invocation" label to match that.
+ let redundant_span = trace.call_site.contains(sp);
+
+ if !redundant_span || always_backtrace {
+ let msg: Cow<'static, _> = match trace.kind {
+ ExpnKind::Macro(MacroKind::Attr, _) => {
+ "this procedural macro expansion".into()
+ }
+ ExpnKind::Macro(MacroKind::Derive, _) => {
+ "this derive macro expansion".into()
+ }
+ ExpnKind::Macro(MacroKind::Bang, _) => "this macro invocation".into(),
+ ExpnKind::Inlined => "this inlined function call".into(),
+ ExpnKind::Root => "the crate root".into(),
+ ExpnKind::AstPass(kind) => kind.descr().into(),
+ ExpnKind::Desugaring(kind) => {
+ format!("this {} desugaring", kind.descr()).into()
+ }
+ };
+ new_labels.push((
+ trace.call_site,
+ format!(
+ "in {}{}",
+ msg,
+ if macro_backtrace.len() > 1 && always_backtrace {
+ // only specify order when the macro
+ // backtrace is multiple levels deep
+ format!(" (#{})", i + 1)
+ } else {
+ String::new()
+ },
+ ),
+ ));
+ }
+ if !always_backtrace {
+ break;
+ }
+ }
+ }
+
+ for (label_span, label_text) in new_labels {
+ span.push_span_label(label_span, label_text);
+ }
+ }
+
+ // This does a small "fix" for multispans by looking to see if it can find any that
+ // point directly at external macros. Since these are often difficult to read,
+ // this will change the span to point at the use site.
+ fn fix_multispans_in_extern_macros(
+ &self,
+ source_map: &Option<Lrc<SourceMap>>,
+ span: &mut MultiSpan,
+ children: &mut Vec<SubDiagnostic>,
+ ) {
+ let Some(source_map) = source_map else { return };
+ debug!("fix_multispans_in_extern_macros: before: span={:?} children={:?}", span, children);
+ self.fix_multispan_in_extern_macros(source_map, span);
+ for child in children.iter_mut() {
+ self.fix_multispan_in_extern_macros(source_map, &mut child.span);
+ }
+ debug!("fix_multispans_in_extern_macros: after: span={:?} children={:?}", span, children);
+ }
+
+ // This "fixes" MultiSpans that contain `Span`s pointing to locations inside of external macros.
+ // Since these locations are often difficult to read,
+ // we move these spans from the external macros to their corresponding use site.
+ fn fix_multispan_in_extern_macros(&self, source_map: &Lrc<SourceMap>, span: &mut MultiSpan) {
+ // First, find all the spans in external macros and point instead at their use site.
+ let replacements: Vec<(Span, Span)> = span
+ .primary_spans()
+ .iter()
+ .copied()
+ .chain(span.span_labels().iter().map(|sp_label| sp_label.span))
+ .filter_map(|sp| {
+ if !sp.is_dummy() && source_map.is_imported(sp) {
+ let maybe_callsite = sp.source_callsite();
+ if sp != maybe_callsite {
+ return Some((sp, maybe_callsite));
+ }
+ }
+ None
+ })
+ .collect();
+
+ // After we have them, make sure we replace these 'bad' def sites with their use sites.
+ for (from, to) in replacements {
+ span.replace(from, to);
+ }
+ }
+}
+
+impl Emitter for EmitterWriter {
+ fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+ self.sm.as_ref()
+ }
+
+ fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
+ self.fluent_bundle.as_ref()
+ }
+
+ fn fallback_fluent_bundle(&self) -> &FluentBundle {
+ &**self.fallback_bundle
+ }
+
+ fn emit_diagnostic(&mut self, diag: &Diagnostic) {
+ let fluent_args = self.to_fluent_args(diag.args());
+
+ let mut children = diag.children.clone();
+ let (mut primary_span, suggestions) = self.primary_span_formatted(&diag, &fluent_args);
+ debug!("emit_diagnostic: suggestions={:?}", suggestions);
+
+ self.fix_multispans_in_extern_macros_and_render_macro_backtrace(
+ &self.sm,
+ &mut primary_span,
+ &mut children,
+ &diag.level,
+ self.macro_backtrace,
+ );
+
+ self.emit_messages_default(
+ &diag.level,
+ &diag.message,
+ &fluent_args,
+ &diag.code,
+ &primary_span,
+ &children,
+ &suggestions,
+ );
+ }
+
+ fn should_show_explain(&self) -> bool {
+ !self.short_message
+ }
+
+ fn supports_color(&self) -> bool {
+ self.dst.supports_color()
+ }
+}
+
+/// An emitter that does nothing when emitting a non-fatal diagnostic.
+/// Fatal diagnostics are forwarded to `fatal_handler` to avoid silent
+/// failures of rustc, as witnessed e.g. in issue #89358.
+pub struct SilentEmitter {
+ pub fatal_handler: Handler,
+ pub fatal_note: Option<String>,
+}
+
+impl Emitter for SilentEmitter {
+ fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+ None
+ }
+
+ fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
+ None
+ }
+
+ fn fallback_fluent_bundle(&self) -> &FluentBundle {
+ panic!("silent emitter attempted to translate message")
+ }
+
+ fn emit_diagnostic(&mut self, d: &Diagnostic) {
+ if d.level == Level::Fatal {
+ let mut d = d.clone();
+ if let Some(ref note) = self.fatal_note {
+ d.note(note);
+ }
+ self.fatal_handler.emit_diagnostic(&mut d);
+ }
+ }
+}
+
+/// Maximum number of suggestions to be shown
+///
+/// Arbitrary, but taken from trait import suggestion limit
+pub const MAX_SUGGESTIONS: usize = 4;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum ColorConfig {
+ Auto,
+ Always,
+ Never,
+}
+
+impl ColorConfig {
+ fn to_color_choice(self) -> ColorChoice {
+ match self {
+ ColorConfig::Always => {
+ if atty::is(atty::Stream::Stderr) {
+ ColorChoice::Always
+ } else {
+ ColorChoice::AlwaysAnsi
+ }
+ }
+ ColorConfig::Never => ColorChoice::Never,
+ ColorConfig::Auto if atty::is(atty::Stream::Stderr) => ColorChoice::Auto,
+ ColorConfig::Auto => ColorChoice::Never,
+ }
+ }
+ fn suggests_using_colors(self) -> bool {
+ match self {
+ ColorConfig::Always | ColorConfig::Auto => true,
+ ColorConfig::Never => false,
+ }
+ }
+}
+
+/// Handles the writing of `HumanReadableErrorType::Default` and `HumanReadableErrorType::Short`
+pub struct EmitterWriter {
+ dst: Destination,
+ sm: Option<Lrc<SourceMap>>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ short_message: bool,
+ teach: bool,
+ ui_testing: bool,
+ diagnostic_width: Option<usize>,
+
+ macro_backtrace: bool,
+}
+
+#[derive(Debug)]
+pub struct FileWithAnnotatedLines {
+ pub file: Lrc<SourceFile>,
+ pub lines: Vec<Line>,
+ multiline_depth: usize,
+}
+
+impl EmitterWriter {
+ pub fn stderr(
+ color_config: ColorConfig,
+ source_map: Option<Lrc<SourceMap>>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ short_message: bool,
+ teach: bool,
+ diagnostic_width: Option<usize>,
+ macro_backtrace: bool,
+ ) -> EmitterWriter {
+ let dst = Destination::from_stderr(color_config);
+ EmitterWriter {
+ dst,
+ sm: source_map,
+ fluent_bundle,
+ fallback_bundle,
+ short_message,
+ teach,
+ ui_testing: false,
+ diagnostic_width,
+ macro_backtrace,
+ }
+ }
+
+ pub fn new(
+ dst: Box<dyn Write + Send>,
+ source_map: Option<Lrc<SourceMap>>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ short_message: bool,
+ teach: bool,
+ colored: bool,
+ diagnostic_width: Option<usize>,
+ macro_backtrace: bool,
+ ) -> EmitterWriter {
+ EmitterWriter {
+ dst: Raw(dst, colored),
+ sm: source_map,
+ fluent_bundle,
+ fallback_bundle,
+ short_message,
+ teach,
+ ui_testing: false,
+ diagnostic_width,
+ macro_backtrace,
+ }
+ }
+
+ pub fn ui_testing(mut self, ui_testing: bool) -> Self {
+ self.ui_testing = ui_testing;
+ self
+ }
+
+ fn maybe_anonymized(&self, line_num: usize) -> Cow<'static, str> {
+ if self.ui_testing {
+ Cow::Borrowed(ANONYMIZED_LINE_NUM)
+ } else {
+ Cow::Owned(line_num.to_string())
+ }
+ }
+
+ fn draw_line(
+ &self,
+ buffer: &mut StyledBuffer,
+ source_string: &str,
+ line_index: usize,
+ line_offset: usize,
+ width_offset: usize,
+ code_offset: usize,
+ margin: Margin,
+ ) {
+ // Tabs are assumed to have been replaced by spaces in calling code.
+ debug_assert!(!source_string.contains('\t'));
+ let line_len = source_string.len();
+ // Create the source line we will highlight.
+ let left = margin.left(line_len);
+ let right = margin.right(line_len);
+ // On long lines, we strip the source line, accounting for unicode.
+ let mut taken = 0;
+ let code: String = source_string
+ .chars()
+ .skip(left)
+ .take_while(|ch| {
+ // Make sure that the trimming on the right will fall within the terminal width.
+ // FIXME: `unicode_width` sometimes disagrees with terminals on how wide a `char` is.
+ // For now, just accept that sometimes the code line will be longer than desired.
+ let next = unicode_width::UnicodeWidthChar::width(*ch).unwrap_or(1);
+ if taken + next > right - left {
+ return false;
+ }
+ taken += next;
+ true
+ })
+ .collect();
+ buffer.puts(line_offset, code_offset, &code, Style::Quotation);
+ if margin.was_cut_left() {
+ // We have stripped some code/whitespace from the beginning, make it clear.
+ buffer.puts(line_offset, code_offset, "...", Style::LineNumber);
+ }
+ if margin.was_cut_right(line_len) {
+ // We have stripped some code after the right-most span end, make it clear we did so.
+ buffer.puts(line_offset, code_offset + taken - 3, "...", Style::LineNumber);
+ }
+ buffer.puts(line_offset, 0, &self.maybe_anonymized(line_index), Style::LineNumber);
+
+ draw_col_separator_no_space(buffer, line_offset, width_offset - 2);
+ }
+
+ fn render_source_line(
+ &self,
+ buffer: &mut StyledBuffer,
+ file: Lrc<SourceFile>,
+ line: &Line,
+ width_offset: usize,
+ code_offset: usize,
+ margin: Margin,
+ ) -> Vec<(usize, Style)> {
+ // Draw:
+ //
+ // LL | ... code ...
+ // | ^^-^ span label
+ // | |
+ // | secondary span label
+ //
+ // ^^ ^ ^^^ ^^^^ ^^^ we don't care about code too far to the right of a span, we trim it
+ // | | | |
+ // | | | actual code found in your source code and the spans we use to mark it
+ // | | when there's too much wasted space to the left, trim it
+ // | vertical divider between the column number and the code
+ // column number
+
+ if line.line_index == 0 {
+ return Vec::new();
+ }
+
+ let source_string = match file.get_line(line.line_index - 1) {
+ Some(s) => normalize_whitespace(&*s),
+ None => return Vec::new(),
+ };
+
+ let line_offset = buffer.num_lines();
+
+ // Left trim
+ let left = margin.left(source_string.len());
+
+ // Account for unicode characters of width !=0 that were removed.
+ let left = source_string
+ .chars()
+ .take(left)
+ .map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1))
+ .sum();
+
+ self.draw_line(
+ buffer,
+ &source_string,
+ line.line_index,
+ line_offset,
+ width_offset,
+ code_offset,
+ margin,
+ );
+
+ // Special case when there's only one annotation involved, it is the start of a multiline
+ // span and there's no text at the beginning of the code line. Instead of doing the whole
+ // graph:
+ //
+ // 2 | fn foo() {
+ // | _^
+ // 3 | |
+ // 4 | | }
+ // | |_^ test
+ //
+ // we simplify the output to:
+ //
+ // 2 | / fn foo() {
+ // 3 | |
+ // 4 | | }
+ // | |_^ test
+ if let [ann] = &line.annotations[..] {
+ if let AnnotationType::MultilineStart(depth) = ann.annotation_type {
+ if source_string.chars().take(ann.start_col).all(|c| c.is_whitespace()) {
+ let style = if ann.is_primary {
+ Style::UnderlinePrimary
+ } else {
+ Style::UnderlineSecondary
+ };
+ buffer.putc(line_offset, width_offset + depth - 1, '/', style);
+ return vec![(depth, style)];
+ }
+ }
+ }
+
+ // We want to display like this:
+ //
+ // vec.push(vec.pop().unwrap());
+ // --- ^^^ - previous borrow ends here
+ // | |
+ // | error occurs here
+ // previous borrow of `vec` occurs here
+ //
+ // But there are some weird edge cases to be aware of:
+ //
+ // vec.push(vec.pop().unwrap());
+ // -------- - previous borrow ends here
+ // ||
+ // |this makes no sense
+ // previous borrow of `vec` occurs here
+ //
+ // For this reason, we group the lines into "highlight lines"
+ // and "annotations lines", where the highlight lines have the `^`.
+
+ // Sort the annotations by (start, end col)
+ // The labels are reversed, sort and then reversed again.
+ // Consider a list of annotations (A1, A2, C1, C2, B1, B2) where
+ // the letter signifies the span. Here we are only sorting by the
+ // span and hence, the order of the elements with the same span will
+ // not change. On reversing the ordering (|a, b| but b.cmp(a)), you get
+ // (C1, C2, B1, B2, A1, A2). All the elements with the same span are
+ // still ordered first to last, but all the elements with different
+ // spans are ordered by their spans in last to first order. Last to
+ // first order is important, because the jiggly lines and | are on
+ // the left, so the rightmost span needs to be rendered first,
+ // otherwise the lines would end up needing to go over a message.
+
+ let mut annotations = line.annotations.clone();
+ annotations.sort_by_key(|a| Reverse(a.start_col));
+
+ // First, figure out where each label will be positioned.
+ //
+ // In the case where you have the following annotations:
+ //
+ // vec.push(vec.pop().unwrap());
+ // -------- - previous borrow ends here [C]
+ // ||
+ // |this makes no sense [B]
+ // previous borrow of `vec` occurs here [A]
+ //
+ // `annotations_position` will hold [(2, A), (1, B), (0, C)].
+ //
+ // We try, when possible, to stick the rightmost annotation at the end
+ // of the highlight line:
+ //
+ // vec.push(vec.pop().unwrap());
+ // --- --- - previous borrow ends here
+ //
+ // But sometimes that's not possible because one of the other
+ // annotations overlaps it. For example, from the test
+ // `span_overlap_label`, we have the following annotations
+ // (written on distinct lines for clarity):
+ //
+ // fn foo(x: u32) {
+ // --------------
+ // -
+ //
+ // In this case, we can't stick the rightmost-most label on
+ // the highlight line, or we would get:
+ //
+ // fn foo(x: u32) {
+ // -------- x_span
+ // |
+ // fn_span
+ //
+ // which is totally weird. Instead we want:
+ //
+ // fn foo(x: u32) {
+ // --------------
+ // | |
+ // | x_span
+ // fn_span
+ //
+ // which is...less weird, at least. In fact, in general, if
+ // the rightmost span overlaps with any other span, we should
+ // use the "hang below" version, so we can at least make it
+ // clear where the span *starts*. There's an exception for this
+ // logic, when the labels do not have a message:
+ //
+ // fn foo(x: u32) {
+ // --------------
+ // |
+ // x_span
+ //
+ // instead of:
+ //
+ // fn foo(x: u32) {
+ // --------------
+ // | |
+ // | x_span
+ // <EMPTY LINE>
+ //
+ let mut annotations_position = vec![];
+ let mut line_len = 0;
+ let mut p = 0;
+ for (i, annotation) in annotations.iter().enumerate() {
+ for (j, next) in annotations.iter().enumerate() {
+ if overlaps(next, annotation, 0) // This label overlaps with another one and both
+ && annotation.has_label() // take space (they have text and are not
+ && j > i // multiline lines).
+ && p == 0
+ // We're currently on the first line, move the label one line down
+ {
+ // If we're overlapping with an un-labelled annotation with the same span
+ // we can just merge them in the output
+ if next.start_col == annotation.start_col
+ && next.end_col == annotation.end_col
+ && !next.has_label()
+ {
+ continue;
+ }
+
+ // This annotation needs a new line in the output.
+ p += 1;
+ break;
+ }
+ }
+ annotations_position.push((p, annotation));
+ for (j, next) in annotations.iter().enumerate() {
+ if j > i {
+ let l = next.label.as_ref().map_or(0, |label| label.len() + 2);
+ if (overlaps(next, annotation, l) // Do not allow two labels to be in the same
+ // line if they overlap including padding, to
+ // avoid situations like:
+ //
+ // fn foo(x: u32) {
+ // -------^------
+ // | |
+ // fn_spanx_span
+ //
+ && annotation.has_label() // Both labels must have some text, otherwise
+ && next.has_label()) // they are not overlapping.
+ // Do not add a new line if this annotation
+ // or the next are vertical line placeholders.
+ || (annotation.takes_space() // If either this or the next annotation is
+ && next.has_label()) // multiline start/end, move it to a new line
+ || (annotation.has_label() // so as not to overlap the horizontal lines.
+ && next.takes_space())
+ || (annotation.takes_space() && next.takes_space())
+ || (overlaps(next, annotation, l)
+ && next.end_col <= annotation.end_col
+ && next.has_label()
+ && p == 0)
+ // Avoid #42595.
+ {
+ // This annotation needs a new line in the output.
+ p += 1;
+ break;
+ }
+ }
+ }
+ line_len = max(line_len, p);
+ }
+
+ if line_len != 0 {
+ line_len += 1;
+ }
+
+ // If there are no annotations or the only annotations on this line are
+ // MultilineLine, then there's only code being shown, stop processing.
+ if line.annotations.iter().all(|a| a.is_line()) {
+ return vec![];
+ }
+
+ // Write the column separator.
+ //
+ // After this we will have:
+ //
+ // 2 | fn foo() {
+ // |
+ // |
+ // |
+ // 3 |
+ // 4 | }
+ // |
+ for pos in 0..=line_len {
+ draw_col_separator(buffer, line_offset + pos + 1, width_offset - 2);
+ }
+
+ // Write the horizontal lines for multiline annotations
+ // (only the first and last lines need this).
+ //
+ // After this we will have:
+ //
+ // 2 | fn foo() {
+ // | __________
+ // |
+ // |
+ // 3 |
+ // 4 | }
+ // | _
+ for &(pos, annotation) in &annotations_position {
+ let style = if annotation.is_primary {
+ Style::UnderlinePrimary
+ } else {
+ Style::UnderlineSecondary
+ };
+ let pos = pos + 1;
+ match annotation.annotation_type {
+ AnnotationType::MultilineStart(depth) | AnnotationType::MultilineEnd(depth) => {
+ draw_range(
+ buffer,
+ '_',
+ line_offset + pos,
+ width_offset + depth,
+ (code_offset + annotation.start_col).saturating_sub(left),
+ style,
+ );
+ }
+ _ if self.teach => {
+ buffer.set_style_range(
+ line_offset,
+ (code_offset + annotation.start_col).saturating_sub(left),
+ (code_offset + annotation.end_col).saturating_sub(left),
+ style,
+ annotation.is_primary,
+ );
+ }
+ _ => {}
+ }
+ }
+
+ // Write the vertical lines for labels that are on a different line as the underline.
+ //
+ // After this we will have:
+ //
+ // 2 | fn foo() {
+ // | __________
+ // | | |
+ // | |
+ // 3 | |
+ // 4 | | }
+ // | |_
+ for &(pos, annotation) in &annotations_position {
+ let style = if annotation.is_primary {
+ Style::UnderlinePrimary
+ } else {
+ Style::UnderlineSecondary
+ };
+ let pos = pos + 1;
+
+ if pos > 1 && (annotation.has_label() || annotation.takes_space()) {
+ for p in line_offset + 1..=line_offset + pos {
+ buffer.putc(
+ p,
+ (code_offset + annotation.start_col).saturating_sub(left),
+ '|',
+ style,
+ );
+ }
+ }
+ match annotation.annotation_type {
+ AnnotationType::MultilineStart(depth) => {
+ for p in line_offset + pos + 1..line_offset + line_len + 2 {
+ buffer.putc(p, width_offset + depth - 1, '|', style);
+ }
+ }
+ AnnotationType::MultilineEnd(depth) => {
+ for p in line_offset..=line_offset + pos {
+ buffer.putc(p, width_offset + depth - 1, '|', style);
+ }
+ }
+ _ => (),
+ }
+ }
+
+ // Write the labels on the annotations that actually have a label.
+ //
+ // After this we will have:
+ //
+ // 2 | fn foo() {
+ // | __________
+ // | |
+ // | something about `foo`
+ // 3 |
+ // 4 | }
+ // | _ test
+ for &(pos, annotation) in &annotations_position {
+ let style =
+ if annotation.is_primary { Style::LabelPrimary } else { Style::LabelSecondary };
+ let (pos, col) = if pos == 0 {
+ (pos + 1, (annotation.end_col + 1).saturating_sub(left))
+ } else {
+ (pos + 2, annotation.start_col.saturating_sub(left))
+ };
+ if let Some(ref label) = annotation.label {
+ buffer.puts(line_offset + pos, code_offset + col, &label, style);
+ }
+ }
+
+ // Sort from biggest span to smallest span so that smaller spans are
+ // represented in the output:
+ //
+ // x | fn foo()
+ // | ^^^---^^
+ // | | |
+ // | | something about `foo`
+ // | something about `fn foo()`
+ annotations_position.sort_by_key(|(_, ann)| {
+ // Decreasing order. When annotations share the same length, prefer `Primary`.
+ (Reverse(ann.len()), ann.is_primary)
+ });
+
+ // Write the underlines.
+ //
+ // After this we will have:
+ //
+ // 2 | fn foo() {
+ // | ____-_____^
+ // | |
+ // | something about `foo`
+ // 3 |
+ // 4 | }
+ // | _^ test
+ for &(_, annotation) in &annotations_position {
+ let (underline, style) = if annotation.is_primary {
+ ('^', Style::UnderlinePrimary)
+ } else {
+ ('-', Style::UnderlineSecondary)
+ };
+ for p in annotation.start_col..annotation.end_col {
+ buffer.putc(
+ line_offset + 1,
+ (code_offset + p).saturating_sub(left),
+ underline,
+ style,
+ );
+ }
+ }
+ annotations_position
+ .iter()
+ .filter_map(|&(_, annotation)| match annotation.annotation_type {
+ AnnotationType::MultilineStart(p) | AnnotationType::MultilineEnd(p) => {
+ let style = if annotation.is_primary {
+ Style::LabelPrimary
+ } else {
+ Style::LabelSecondary
+ };
+ Some((p, style))
+ }
+ _ => None,
+ })
+ .collect::<Vec<_>>()
+ }
+
+ fn get_multispan_max_line_num(&mut self, msp: &MultiSpan) -> usize {
+ let Some(ref sm) = self.sm else {
+ return 0;
+ };
+
+ let will_be_emitted = |span: Span| {
+ !span.is_dummy() && {
+ let file = sm.lookup_source_file(span.hi());
+ sm.ensure_source_file_source_present(file)
+ }
+ };
+
+ let mut max = 0;
+ for primary_span in msp.primary_spans() {
+ if will_be_emitted(*primary_span) {
+ let hi = sm.lookup_char_pos(primary_span.hi());
+ max = (hi.line).max(max);
+ }
+ }
+ if !self.short_message {
+ for span_label in msp.span_labels() {
+ if will_be_emitted(span_label.span) {
+ let hi = sm.lookup_char_pos(span_label.span.hi());
+ max = (hi.line).max(max);
+ }
+ }
+ }
+
+ max
+ }
+
+ fn get_max_line_num(&mut self, span: &MultiSpan, children: &[SubDiagnostic]) -> usize {
+ let primary = self.get_multispan_max_line_num(span);
+ children
+ .iter()
+ .map(|sub| self.get_multispan_max_line_num(&sub.span))
+ .max()
+ .unwrap_or(0)
+ .max(primary)
+ }
+
+ /// Adds a left margin to every line but the first, given a padding length and the label being
+ /// displayed, keeping the provided highlighting.
+ fn msg_to_buffer(
+ &self,
+ buffer: &mut StyledBuffer,
+ msg: &[(DiagnosticMessage, Style)],
+ args: &FluentArgs<'_>,
+ padding: usize,
+ label: &str,
+ override_style: Option<Style>,
+ ) {
+ // The extra 5 ` ` is padding that's always needed to align to the `note: `:
+ //
+ // error: message
+ // --> file.rs:13:20
+ // |
+ // 13 | <CODE>
+ // | ^^^^
+ // |
+ // = note: multiline
+ // message
+ // ++^^^----xx
+ // | | | |
+ // | | | magic `2`
+ // | | length of label
+ // | magic `3`
+ // `max_line_num_len`
+ let padding = " ".repeat(padding + label.len() + 5);
+
+ /// Returns `override` if it is present and `style` is `NoStyle` or `style` otherwise
+ fn style_or_override(style: Style, override_: Option<Style>) -> Style {
+ match (style, override_) {
+ (Style::NoStyle, Some(override_)) => override_,
+ _ => style,
+ }
+ }
+
+ let mut line_number = 0;
+
+ // Provided the following diagnostic message:
+ //
+ // let msg = vec![
+ // ("
+ // ("highlighted multiline\nstring to\nsee how it ", Style::NoStyle),
+ // ("looks", Style::Highlight),
+ // ("with\nvery ", Style::NoStyle),
+ // ("weird", Style::Highlight),
+ // (" formats\n", Style::NoStyle),
+ // ("see?", Style::Highlight),
+ // ];
+ //
+ // the expected output on a note is (* surround the highlighted text)
+ //
+ // = note: highlighted multiline
+ // string to
+ // see how it *looks* with
+ // very *weird* formats
+ // see?
+ for &(ref text, ref style) in msg.iter() {
+ let text = self.translate_message(text, args);
+ let lines = text.split('\n').collect::<Vec<_>>();
+ if lines.len() > 1 {
+ for (i, line) in lines.iter().enumerate() {
+ if i != 0 {
+ line_number += 1;
+ buffer.append(line_number, &padding, Style::NoStyle);
+ }
+ buffer.append(line_number, line, style_or_override(*style, override_style));
+ }
+ } else {
+ buffer.append(line_number, &text, style_or_override(*style, override_style));
+ }
+ }
+ }
+
+ fn emit_message_default(
+ &mut self,
+ msp: &MultiSpan,
+ msg: &[(DiagnosticMessage, Style)],
+ args: &FluentArgs<'_>,
+ code: &Option<DiagnosticId>,
+ level: &Level,
+ max_line_num_len: usize,
+ is_secondary: bool,
+ ) -> io::Result<()> {
+ let mut buffer = StyledBuffer::new();
+
+ if !msp.has_primary_spans() && !msp.has_span_labels() && is_secondary && !self.short_message
+ {
+ // This is a secondary message with no span info
+ for _ in 0..max_line_num_len {
+ buffer.prepend(0, " ", Style::NoStyle);
+ }
+ draw_note_separator(&mut buffer, 0, max_line_num_len + 1);
+ if *level != Level::FailureNote {
+ buffer.append(0, level.to_str(), Style::MainHeaderMsg);
+ buffer.append(0, ": ", Style::NoStyle);
+ }
+ self.msg_to_buffer(&mut buffer, msg, args, max_line_num_len, "note", None);
+ } else {
+ let mut label_width = 0;
+ // The failure note level itself does not provide any useful diagnostic information
+ if *level != Level::FailureNote {
+ buffer.append(0, level.to_str(), Style::Level(*level));
+ label_width += level.to_str().len();
+ }
+ // only render error codes, not lint codes
+ if let Some(DiagnosticId::Error(ref code)) = *code {
+ buffer.append(0, "[", Style::Level(*level));
+ buffer.append(0, &code, Style::Level(*level));
+ buffer.append(0, "]", Style::Level(*level));
+ label_width += 2 + code.len();
+ }
+ let header_style = if is_secondary { Style::HeaderMsg } else { Style::MainHeaderMsg };
+ if *level != Level::FailureNote {
+ buffer.append(0, ": ", header_style);
+ label_width += 2;
+ }
+ for &(ref text, _) in msg.iter() {
+ let text = self.translate_message(text, args);
+ // Account for newlines to align output to its label.
+ for (line, text) in normalize_whitespace(&text).lines().enumerate() {
+ buffer.append(
+ 0 + line,
+ &format!(
+ "{}{}",
+ if line == 0 { String::new() } else { " ".repeat(label_width) },
+ text
+ ),
+ header_style,
+ );
+ }
+ }
+ }
+
+ let mut annotated_files = FileWithAnnotatedLines::collect_annotations(self, args, msp);
+
+ // Make sure our primary file comes first
+ let (primary_lo, sm) = if let (Some(sm), Some(ref primary_span)) =
+ (self.sm.as_ref(), msp.primary_span().as_ref())
+ {
+ if !primary_span.is_dummy() {
+ (sm.lookup_char_pos(primary_span.lo()), sm)
+ } else {
+ emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?;
+ return Ok(());
+ }
+ } else {
+ // If we don't have span information, emit and exit
+ emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?;
+ return Ok(());
+ };
+ if let Ok(pos) =
+ annotated_files.binary_search_by(|x| x.file.name.cmp(&primary_lo.file.name))
+ {
+ annotated_files.swap(0, pos);
+ }
+
+ // Print out the annotate source lines that correspond with the error
+ for annotated_file in annotated_files {
+ // we can't annotate anything if the source is unavailable.
+ if !sm.ensure_source_file_source_present(annotated_file.file.clone()) {
+ continue;
+ }
+
+ // print out the span location and spacer before we print the annotated source
+ // to do this, we need to know if this span will be primary
+ let is_primary = primary_lo.file.name == annotated_file.file.name;
+ if is_primary {
+ let loc = primary_lo.clone();
+ if !self.short_message {
+ // remember where we are in the output buffer for easy reference
+ let buffer_msg_line_offset = buffer.num_lines();
+
+ buffer.prepend(buffer_msg_line_offset, "--> ", Style::LineNumber);
+ buffer.append(
+ buffer_msg_line_offset,
+ &format!(
+ "{}:{}:{}",
+ sm.filename_for_diagnostics(&loc.file.name),
+ sm.doctest_offset_line(&loc.file.name, loc.line),
+ loc.col.0 + 1,
+ ),
+ Style::LineAndColumn,
+ );
+ for _ in 0..max_line_num_len {
+ buffer.prepend(buffer_msg_line_offset, " ", Style::NoStyle);
+ }
+ } else {
+ buffer.prepend(
+ 0,
+ &format!(
+ "{}:{}:{}: ",
+ sm.filename_for_diagnostics(&loc.file.name),
+ sm.doctest_offset_line(&loc.file.name, loc.line),
+ loc.col.0 + 1,
+ ),
+ Style::LineAndColumn,
+ );
+ }
+ } else if !self.short_message {
+ // remember where we are in the output buffer for easy reference
+ let buffer_msg_line_offset = buffer.num_lines();
+
+ // Add spacing line
+ draw_col_separator_no_space(
+ &mut buffer,
+ buffer_msg_line_offset,
+ max_line_num_len + 1,
+ );
+
+ // Then, the secondary file indicator
+ buffer.prepend(buffer_msg_line_offset + 1, "::: ", Style::LineNumber);
+ let loc = if let Some(first_line) = annotated_file.lines.first() {
+ let col = if let Some(first_annotation) = first_line.annotations.first() {
+ format!(":{}", first_annotation.start_col + 1)
+ } else {
+ String::new()
+ };
+ format!(
+ "{}:{}{}",
+ sm.filename_for_diagnostics(&annotated_file.file.name),
+ sm.doctest_offset_line(&annotated_file.file.name, first_line.line_index),
+ col
+ )
+ } else {
+ format!("{}", sm.filename_for_diagnostics(&annotated_file.file.name))
+ };
+ buffer.append(buffer_msg_line_offset + 1, &loc, Style::LineAndColumn);
+ for _ in 0..max_line_num_len {
+ buffer.prepend(buffer_msg_line_offset + 1, " ", Style::NoStyle);
+ }
+ }
+
+ if !self.short_message {
+ // Put in the spacer between the location and annotated source
+ let buffer_msg_line_offset = buffer.num_lines();
+ draw_col_separator_no_space(
+ &mut buffer,
+ buffer_msg_line_offset,
+ max_line_num_len + 1,
+ );
+
+ // Contains the vertical lines' positions for active multiline annotations
+ let mut multilines = FxHashMap::default();
+
+ // Get the left-side margin to remove it
+ let mut whitespace_margin = usize::MAX;
+ for line_idx in 0..annotated_file.lines.len() {
+ let file = annotated_file.file.clone();
+ let line = &annotated_file.lines[line_idx];
+ if let Some(source_string) = file.get_line(line.line_index - 1) {
+ let leading_whitespace = source_string
+ .chars()
+ .take_while(|c| c.is_whitespace())
+ .map(|c| {
+ match c {
+ // Tabs are displayed as 4 spaces
+ '\t' => 4,
+ _ => 1,
+ }
+ })
+ .sum();
+ if source_string.chars().any(|c| !c.is_whitespace()) {
+ whitespace_margin = min(whitespace_margin, leading_whitespace);
+ }
+ }
+ }
+ if whitespace_margin == usize::MAX {
+ whitespace_margin = 0;
+ }
+
+ // Left-most column any visible span points at.
+ let mut span_left_margin = usize::MAX;
+ for line in &annotated_file.lines {
+ for ann in &line.annotations {
+ span_left_margin = min(span_left_margin, ann.start_col);
+ span_left_margin = min(span_left_margin, ann.end_col);
+ }
+ }
+ if span_left_margin == usize::MAX {
+ span_left_margin = 0;
+ }
+
+ // Right-most column any visible span points at.
+ let mut span_right_margin = 0;
+ let mut label_right_margin = 0;
+ let mut max_line_len = 0;
+ for line in &annotated_file.lines {
+ max_line_len = max(
+ max_line_len,
+ annotated_file.file.get_line(line.line_index - 1).map_or(0, |s| s.len()),
+ );
+ for ann in &line.annotations {
+ span_right_margin = max(span_right_margin, ann.start_col);
+ span_right_margin = max(span_right_margin, ann.end_col);
+ // FIXME: account for labels not in the same line
+ let label_right = ann.label.as_ref().map_or(0, |l| l.len() + 1);
+ label_right_margin = max(label_right_margin, ann.end_col + label_right);
+ }
+ }
+
+ let width_offset = 3 + max_line_num_len;
+ let code_offset = if annotated_file.multiline_depth == 0 {
+ width_offset
+ } else {
+ width_offset + annotated_file.multiline_depth + 1
+ };
+
+ let column_width = if let Some(width) = self.diagnostic_width {
+ width.saturating_sub(code_offset)
+ } else if self.ui_testing {
+ DEFAULT_COLUMN_WIDTH
+ } else {
+ termize::dimensions()
+ .map(|(w, _)| w.saturating_sub(code_offset))
+ .unwrap_or(DEFAULT_COLUMN_WIDTH)
+ };
+
+ let margin = Margin::new(
+ whitespace_margin,
+ span_left_margin,
+ span_right_margin,
+ label_right_margin,
+ column_width,
+ max_line_len,
+ );
+
+ // Next, output the annotate source for this file
+ for line_idx in 0..annotated_file.lines.len() {
+ let previous_buffer_line = buffer.num_lines();
+
+ let depths = self.render_source_line(
+ &mut buffer,
+ annotated_file.file.clone(),
+ &annotated_file.lines[line_idx],
+ width_offset,
+ code_offset,
+ margin,
+ );
+
+ let mut to_add = FxHashMap::default();
+
+ for (depth, style) in depths {
+ if multilines.remove(&depth).is_none() {
+ to_add.insert(depth, style);
+ }
+ }
+
+ // Set the multiline annotation vertical lines to the left of
+ // the code in this line.
+ for (depth, style) in &multilines {
+ for line in previous_buffer_line..buffer.num_lines() {
+ draw_multiline_line(&mut buffer, line, width_offset, *depth, *style);
+ }
+ }
+ // check to see if we need to print out or elide lines that come between
+ // this annotated line and the next one.
+ if line_idx < (annotated_file.lines.len() - 1) {
+ let line_idx_delta = annotated_file.lines[line_idx + 1].line_index
+ - annotated_file.lines[line_idx].line_index;
+ if line_idx_delta > 2 {
+ let last_buffer_line_num = buffer.num_lines();
+ buffer.puts(last_buffer_line_num, 0, "...", Style::LineNumber);
+
+ // Set the multiline annotation vertical lines on `...` bridging line.
+ for (depth, style) in &multilines {
+ draw_multiline_line(
+ &mut buffer,
+ last_buffer_line_num,
+ width_offset,
+ *depth,
+ *style,
+ );
+ }
+ } else if line_idx_delta == 2 {
+ let unannotated_line = annotated_file
+ .file
+ .get_line(annotated_file.lines[line_idx].line_index)
+ .unwrap_or_else(|| Cow::from(""));
+
+ let last_buffer_line_num = buffer.num_lines();
+
+ self.draw_line(
+ &mut buffer,
+ &normalize_whitespace(&unannotated_line),
+ annotated_file.lines[line_idx + 1].line_index - 1,
+ last_buffer_line_num,
+ width_offset,
+ code_offset,
+ margin,
+ );
+
+ for (depth, style) in &multilines {
+ draw_multiline_line(
+ &mut buffer,
+ last_buffer_line_num,
+ width_offset,
+ *depth,
+ *style,
+ );
+ }
+ }
+ }
+
+ multilines.extend(&to_add);
+ }
+ }
+ }
+
+ // final step: take our styled buffer, render it, then output it
+ emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?;
+
+ Ok(())
+ }
+
+ fn emit_suggestion_default(
+ &mut self,
+ span: &MultiSpan,
+ suggestion: &CodeSuggestion,
+ args: &FluentArgs<'_>,
+ level: &Level,
+ max_line_num_len: usize,
+ ) -> io::Result<()> {
+ let Some(ref sm) = self.sm else {
+ return Ok(());
+ };
+
+ // Render the replacements for each suggestion
+ let suggestions = suggestion.splice_lines(&**sm);
+ debug!("emit_suggestion_default: suggestions={:?}", suggestions);
+
+ if suggestions.is_empty() {
+ // Suggestions coming from macros can have malformed spans. This is a heavy handed
+ // approach to avoid ICEs by ignoring the suggestion outright.
+ return Ok(());
+ }
+
+ let mut buffer = StyledBuffer::new();
+
+ // Render the suggestion message
+ buffer.append(0, level.to_str(), Style::Level(*level));
+ buffer.append(0, ": ", Style::HeaderMsg);
+
+ self.msg_to_buffer(
+ &mut buffer,
+ &[(suggestion.msg.to_owned(), Style::NoStyle)],
+ args,
+ max_line_num_len,
+ "suggestion",
+ Some(Style::HeaderMsg),
+ );
+
+ let mut row_num = 2;
+ draw_col_separator_no_space(&mut buffer, 1, max_line_num_len + 1);
+ let mut notice_capitalization = false;
+ for (complete, parts, highlights, only_capitalization) in
+ suggestions.iter().take(MAX_SUGGESTIONS)
+ {
+ notice_capitalization |= only_capitalization;
+
+ let has_deletion = parts.iter().any(|p| p.is_deletion());
+ let is_multiline = complete.lines().count() > 1;
+
+ if let Some(span) = span.primary_span() {
+ // Compare the primary span of the diagnostic with the span of the suggestion
+ // being emitted. If they belong to the same file, we don't *need* to show the
+ // file name, saving in verbosity, but if it *isn't* we do need it, otherwise we're
+ // telling users to make a change but not clarifying *where*.
+ let loc = sm.lookup_char_pos(parts[0].span.lo());
+ if loc.file.name != sm.span_to_filename(span) && loc.file.name.is_real() {
+ buffer.puts(row_num - 1, 0, "--> ", Style::LineNumber);
+ buffer.append(
+ row_num - 1,
+ &format!(
+ "{}:{}:{}",
+ sm.filename_for_diagnostics(&loc.file.name),
+ sm.doctest_offset_line(&loc.file.name, loc.line),
+ loc.col.0 + 1,
+ ),
+ Style::LineAndColumn,
+ );
+ for _ in 0..max_line_num_len {
+ buffer.prepend(row_num - 1, " ", Style::NoStyle);
+ }
+ row_num += 1;
+ }
+ }
+ let show_code_change = if has_deletion && !is_multiline {
+ DisplaySuggestion::Diff
+ } else if (parts.len() != 1 || parts[0].snippet.trim() != complete.trim())
+ && !is_multiline
+ {
+ DisplaySuggestion::Underline
+ } else {
+ DisplaySuggestion::None
+ };
+
+ if let DisplaySuggestion::Diff = show_code_change {
+ row_num += 1;
+ }
+
+ let file_lines = sm
+ .span_to_lines(parts[0].span)
+ .expect("span_to_lines failed when emitting suggestion");
+
+ assert!(!file_lines.lines.is_empty() || parts[0].span.is_dummy());
+
+ let line_start = sm.lookup_char_pos(parts[0].span.lo()).line;
+ draw_col_separator_no_space(&mut buffer, row_num - 1, max_line_num_len + 1);
+ let mut lines = complete.lines();
+ if lines.clone().next().is_none() {
+ // Account for a suggestion to completely remove a line(s) with whitespace (#94192).
+ let line_end = sm.lookup_char_pos(parts[0].span.hi()).line;
+ for line in line_start..=line_end {
+ buffer.puts(
+ row_num - 1 + line - line_start,
+ 0,
+ &self.maybe_anonymized(line),
+ Style::LineNumber,
+ );
+ buffer.puts(
+ row_num - 1 + line - line_start,
+ max_line_num_len + 1,
+ "- ",
+ Style::Removal,
+ );
+ buffer.puts(
+ row_num - 1 + line - line_start,
+ max_line_num_len + 3,
+ &normalize_whitespace(&*file_lines.file.get_line(line - 1).unwrap()),
+ Style::Removal,
+ );
+ }
+ row_num += line_end - line_start;
+ }
+ let mut unhighlighted_lines = Vec::new();
+ for (line_pos, (line, highlight_parts)) in lines.by_ref().zip(highlights).enumerate() {
+ debug!(%line_pos, %line, ?highlight_parts);
+
+ // Remember lines that are not highlighted to hide them if needed
+ if highlight_parts.is_empty() {
+ unhighlighted_lines.push((line_pos, line));
+ continue;
+ }
+
+ match unhighlighted_lines.len() {
+ 0 => (),
+ // Since we show first line, "..." line and last line,
+ // There is no reason to hide if there are 3 or less lines
+ // (because then we just replace a line with ... which is
+ // not helpful)
+ n if n <= 3 => unhighlighted_lines.drain(..).for_each(|(p, l)| {
+ self.draw_code_line(
+ &mut buffer,
+ &mut row_num,
+ &Vec::new(),
+ p,
+ l,
+ line_start,
+ show_code_change,
+ max_line_num_len,
+ &file_lines,
+ is_multiline,
+ )
+ }),
+ // Print first unhighlighted line, "..." and last unhighlighted line, like so:
+ //
+ // LL | this line was highlighted
+ // LL | this line is just for context
+ // ...
+ // LL | this line is just for context
+ // LL | this line was highlighted
+ _ => {
+ let last_line = unhighlighted_lines.pop();
+ let first_line = unhighlighted_lines.drain(..).next();
+
+ first_line.map(|(p, l)| {
+ self.draw_code_line(
+ &mut buffer,
+ &mut row_num,
+ &Vec::new(),
+ p,
+ l,
+ line_start,
+ show_code_change,
+ max_line_num_len,
+ &file_lines,
+ is_multiline,
+ )
+ });
+
+ buffer.puts(row_num, max_line_num_len - 1, "...", Style::LineNumber);
+ row_num += 1;
+
+ last_line.map(|(p, l)| {
+ self.draw_code_line(
+ &mut buffer,
+ &mut row_num,
+ &Vec::new(),
+ p,
+ l,
+ line_start,
+ show_code_change,
+ max_line_num_len,
+ &file_lines,
+ is_multiline,
+ )
+ });
+ }
+ }
+
+ self.draw_code_line(
+ &mut buffer,
+ &mut row_num,
+ highlight_parts,
+ line_pos,
+ line,
+ line_start,
+ show_code_change,
+ max_line_num_len,
+ &file_lines,
+ is_multiline,
+ )
+ }
+
+ // This offset and the ones below need to be signed to account for replacement code
+ // that is shorter than the original code.
+ let mut offsets: Vec<(usize, isize)> = Vec::new();
+ // Only show an underline in the suggestions if the suggestion is not the
+ // entirety of the code being shown and the displayed code is not multiline.
+ if let DisplaySuggestion::Diff | DisplaySuggestion::Underline = show_code_change {
+ draw_col_separator_no_space(&mut buffer, row_num, max_line_num_len + 1);
+ for part in parts {
+ let span_start_pos = sm.lookup_char_pos(part.span.lo()).col_display;
+ let span_end_pos = sm.lookup_char_pos(part.span.hi()).col_display;
+
+ // Do not underline the leading...
+ let start = part.snippet.len().saturating_sub(part.snippet.trim_start().len());
+ // ...or trailing spaces. Account for substitutions containing unicode
+ // characters.
+ let sub_len: usize = part
+ .snippet
+ .trim()
+ .chars()
+ .map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1))
+ .sum();
+
+ let offset: isize = offsets
+ .iter()
+ .filter_map(
+ |(start, v)| if span_start_pos <= *start { None } else { Some(v) },
+ )
+ .sum();
+ let underline_start = (span_start_pos + start) as isize + offset;
+ let underline_end = (span_start_pos + start + sub_len) as isize + offset;
+ assert!(underline_start >= 0 && underline_end >= 0);
+ let padding: usize = max_line_num_len + 3;
+ for p in underline_start..underline_end {
+ if let DisplaySuggestion::Underline = show_code_change {
+ // If this is a replacement, underline with `^`, if this is an addition
+ // underline with `+`.
+ buffer.putc(
+ row_num,
+ (padding as isize + p) as usize,
+ if part.is_addition(&sm) { '+' } else { '~' },
+ Style::Addition,
+ );
+ }
+ }
+ if let DisplaySuggestion::Diff = show_code_change {
+ // Colorize removal with red in diff format.
+ buffer.set_style_range(
+ row_num - 2,
+ (padding as isize + span_start_pos as isize) as usize,
+ (padding as isize + span_end_pos as isize) as usize,
+ Style::Removal,
+ true,
+ );
+ }
+
+ // length of the code after substitution
+ let full_sub_len = part
+ .snippet
+ .chars()
+ .map(|ch| unicode_width::UnicodeWidthChar::width(ch).unwrap_or(1))
+ .sum::<usize>() as isize;
+
+ // length of the code to be substituted
+ let snippet_len = span_end_pos as isize - span_start_pos as isize;
+ // For multiple substitutions, use the position *after* the previous
+ // substitutions have happened, only when further substitutions are
+ // located strictly after.
+ offsets.push((span_end_pos, full_sub_len - snippet_len));
+ }
+ row_num += 1;
+ }
+
+ // if we elided some lines, add an ellipsis
+ if lines.next().is_some() {
+ buffer.puts(row_num, max_line_num_len - 1, "...", Style::LineNumber);
+ } else if let DisplaySuggestion::None = show_code_change {
+ draw_col_separator_no_space(&mut buffer, row_num, max_line_num_len + 1);
+ row_num += 1;
+ }
+ }
+ if suggestions.len() > MAX_SUGGESTIONS {
+ let others = suggestions.len() - MAX_SUGGESTIONS;
+ let msg = format!("and {} other candidate{}", others, pluralize!(others));
+ buffer.puts(row_num, max_line_num_len + 3, &msg, Style::NoStyle);
+ } else if notice_capitalization {
+ let msg = "notice the capitalization difference";
+ buffer.puts(row_num, max_line_num_len + 3, &msg, Style::NoStyle);
+ }
+ emit_to_destination(&buffer.render(), level, &mut self.dst, self.short_message)?;
+ Ok(())
+ }
+
+ fn emit_messages_default(
+ &mut self,
+ level: &Level,
+ message: &[(DiagnosticMessage, Style)],
+ args: &FluentArgs<'_>,
+ code: &Option<DiagnosticId>,
+ span: &MultiSpan,
+ children: &[SubDiagnostic],
+ suggestions: &[CodeSuggestion],
+ ) {
+ let max_line_num_len = if self.ui_testing {
+ ANONYMIZED_LINE_NUM.len()
+ } else {
+ let n = self.get_max_line_num(span, children);
+ num_decimal_digits(n)
+ };
+
+ match self.emit_message_default(span, message, args, code, level, max_line_num_len, false) {
+ Ok(()) => {
+ if !children.is_empty()
+ || suggestions.iter().any(|s| s.style != SuggestionStyle::CompletelyHidden)
+ {
+ let mut buffer = StyledBuffer::new();
+ if !self.short_message {
+ draw_col_separator_no_space(&mut buffer, 0, max_line_num_len + 1);
+ }
+ if let Err(e) = emit_to_destination(
+ &buffer.render(),
+ level,
+ &mut self.dst,
+ self.short_message,
+ ) {
+ panic!("failed to emit error: {}", e)
+ }
+ }
+ if !self.short_message {
+ for child in children {
+ let span = child.render_span.as_ref().unwrap_or(&child.span);
+ if let Err(err) = self.emit_message_default(
+ &span,
+ &child.message,
+ args,
+ &None,
+ &child.level,
+ max_line_num_len,
+ true,
+ ) {
+ panic!("failed to emit error: {}", err);
+ }
+ }
+ for sugg in suggestions {
+ if sugg.style == SuggestionStyle::CompletelyHidden {
+ // do not display this suggestion, it is meant only for tools
+ } else if sugg.style == SuggestionStyle::HideCodeAlways {
+ if let Err(e) = self.emit_message_default(
+ &MultiSpan::new(),
+ &[(sugg.msg.to_owned(), Style::HeaderMsg)],
+ args,
+ &None,
+ &Level::Help,
+ max_line_num_len,
+ true,
+ ) {
+ panic!("failed to emit error: {}", e);
+ }
+ } else if let Err(e) = self.emit_suggestion_default(
+ span,
+ sugg,
+ args,
+ &Level::Help,
+ max_line_num_len,
+ ) {
+ panic!("failed to emit error: {}", e);
+ };
+ }
+ }
+ }
+ Err(e) => panic!("failed to emit error: {}", e),
+ }
+
+ let mut dst = self.dst.writable();
+ match writeln!(dst) {
+ Err(e) => panic!("failed to emit error: {}", e),
+ _ => {
+ if let Err(e) = dst.flush() {
+ panic!("failed to emit error: {}", e)
+ }
+ }
+ }
+ }
+
+ fn draw_code_line(
+ &self,
+ buffer: &mut StyledBuffer,
+ row_num: &mut usize,
+ highlight_parts: &Vec<SubstitutionHighlight>,
+ line_pos: usize,
+ line: &str,
+ line_start: usize,
+ show_code_change: DisplaySuggestion,
+ max_line_num_len: usize,
+ file_lines: &FileLines,
+ is_multiline: bool,
+ ) {
+ // Print the span column to avoid confusion
+ buffer.puts(*row_num, 0, &self.maybe_anonymized(line_start + line_pos), Style::LineNumber);
+ if let DisplaySuggestion::Diff = show_code_change {
+ // Add the line number for both addition and removal to drive the point home.
+ //
+ // N - fn foo<A: T>(bar: A) {
+ // N + fn foo(bar: impl T) {
+ buffer.puts(
+ *row_num - 1,
+ 0,
+ &self.maybe_anonymized(line_start + line_pos),
+ Style::LineNumber,
+ );
+ buffer.puts(*row_num - 1, max_line_num_len + 1, "- ", Style::Removal);
+ buffer.puts(
+ *row_num - 1,
+ max_line_num_len + 3,
+ &normalize_whitespace(
+ &*file_lines.file.get_line(file_lines.lines[line_pos].line_index).unwrap(),
+ ),
+ Style::NoStyle,
+ );
+ buffer.puts(*row_num, max_line_num_len + 1, "+ ", Style::Addition);
+ } else if is_multiline {
+ match &highlight_parts[..] {
+ [SubstitutionHighlight { start: 0, end }] if *end == line.len() => {
+ buffer.puts(*row_num, max_line_num_len + 1, "+ ", Style::Addition);
+ }
+ [] => {
+ draw_col_separator(buffer, *row_num, max_line_num_len + 1);
+ }
+ _ => {
+ buffer.puts(*row_num, max_line_num_len + 1, "~ ", Style::Addition);
+ }
+ }
+ } else {
+ draw_col_separator(buffer, *row_num, max_line_num_len + 1);
+ }
+
+ // print the suggestion
+ buffer.append(*row_num, &normalize_whitespace(line), Style::NoStyle);
+
+ // Colorize addition/replacements with green.
+ for &SubstitutionHighlight { start, end } in highlight_parts {
+ // Account for tabs when highlighting (#87972).
+ let tabs: usize = line
+ .chars()
+ .take(start)
+ .map(|ch| match ch {
+ '\t' => 3,
+ _ => 0,
+ })
+ .sum();
+ buffer.set_style_range(
+ *row_num,
+ max_line_num_len + 3 + start + tabs,
+ max_line_num_len + 3 + end + tabs,
+ Style::Addition,
+ true,
+ );
+ }
+ *row_num += 1;
+ }
+}
+
+#[derive(Clone, Copy)]
+enum DisplaySuggestion {
+ Underline,
+ Diff,
+ None,
+}
+
+impl FileWithAnnotatedLines {
+ /// Preprocess all the annotations so that they are grouped by file and by line number
+ /// This helps us quickly iterate over the whole message (including secondary file spans)
+ pub fn collect_annotations(
+ emitter: &dyn Emitter,
+ args: &FluentArgs<'_>,
+ msp: &MultiSpan,
+ ) -> Vec<FileWithAnnotatedLines> {
+ fn add_annotation_to_file(
+ file_vec: &mut Vec<FileWithAnnotatedLines>,
+ file: Lrc<SourceFile>,
+ line_index: usize,
+ ann: Annotation,
+ ) {
+ for slot in file_vec.iter_mut() {
+ // Look through each of our files for the one we're adding to
+ if slot.file.name == file.name {
+ // See if we already have a line for it
+ for line_slot in &mut slot.lines {
+ if line_slot.line_index == line_index {
+ line_slot.annotations.push(ann);
+ return;
+ }
+ }
+ // We don't have a line yet, create one
+ slot.lines.push(Line { line_index, annotations: vec![ann] });
+ slot.lines.sort();
+ return;
+ }
+ }
+ // This is the first time we're seeing the file
+ file_vec.push(FileWithAnnotatedLines {
+ file,
+ lines: vec![Line { line_index, annotations: vec![ann] }],
+ multiline_depth: 0,
+ });
+ }
+
+ let mut output = vec![];
+ let mut multiline_annotations = vec![];
+
+ if let Some(ref sm) = emitter.source_map() {
+ for span_label in msp.span_labels() {
+ if span_label.span.is_dummy() {
+ continue;
+ }
+
+ let lo = sm.lookup_char_pos(span_label.span.lo());
+ let mut hi = sm.lookup_char_pos(span_label.span.hi());
+
+ // Watch out for "empty spans". If we get a span like 6..6, we
+ // want to just display a `^` at 6, so convert that to
+ // 6..7. This is degenerate input, but it's best to degrade
+ // gracefully -- and the parser likes to supply a span like
+ // that for EOF, in particular.
+
+ if lo.col_display == hi.col_display && lo.line == hi.line {
+ hi.col_display += 1;
+ }
+
+ if lo.line != hi.line {
+ let ml = MultilineAnnotation {
+ depth: 1,
+ line_start: lo.line,
+ line_end: hi.line,
+ start_col: lo.col_display,
+ end_col: hi.col_display,
+ is_primary: span_label.is_primary,
+ label: span_label
+ .label
+ .as_ref()
+ .map(|m| emitter.translate_message(m, args).to_string()),
+ overlaps_exactly: false,
+ };
+ multiline_annotations.push((lo.file, ml));
+ } else {
+ let ann = Annotation {
+ start_col: lo.col_display,
+ end_col: hi.col_display,
+ is_primary: span_label.is_primary,
+ label: span_label
+ .label
+ .as_ref()
+ .map(|m| emitter.translate_message(m, args).to_string()),
+ annotation_type: AnnotationType::Singleline,
+ };
+ add_annotation_to_file(&mut output, lo.file, lo.line, ann);
+ };
+ }
+ }
+
+ // Find overlapping multiline annotations, put them at different depths
+ multiline_annotations.sort_by_key(|&(_, ref ml)| (ml.line_start, ml.line_end));
+ for (_, ann) in multiline_annotations.clone() {
+ for (_, a) in multiline_annotations.iter_mut() {
+ // Move all other multiline annotations overlapping with this one
+ // one level to the right.
+ if !(ann.same_span(a))
+ && num_overlap(ann.line_start, ann.line_end, a.line_start, a.line_end, true)
+ {
+ a.increase_depth();
+ } else if ann.same_span(a) && &ann != a {
+ a.overlaps_exactly = true;
+ } else {
+ break;
+ }
+ }
+ }
+
+ let mut max_depth = 0; // max overlapping multiline spans
+ for (file, ann) in multiline_annotations {
+ max_depth = max(max_depth, ann.depth);
+ let mut end_ann = ann.as_end();
+ if !ann.overlaps_exactly {
+ // avoid output like
+ //
+ // | foo(
+ // | _____^
+ // | |_____|
+ // | || bar,
+ // | || );
+ // | || ^
+ // | ||______|
+ // | |______foo
+ // | baz
+ //
+ // and instead get
+ //
+ // | foo(
+ // | _____^
+ // | | bar,
+ // | | );
+ // | | ^
+ // | | |
+ // | |______foo
+ // | baz
+ add_annotation_to_file(&mut output, file.clone(), ann.line_start, ann.as_start());
+ // 4 is the minimum vertical length of a multiline span when presented: two lines
+ // of code and two lines of underline. This is not true for the special case where
+ // the beginning doesn't have an underline, but the current logic seems to be
+ // working correctly.
+ let middle = min(ann.line_start + 4, ann.line_end);
+ for line in ann.line_start + 1..middle {
+ // Every `|` that joins the beginning of the span (`___^`) to the end (`|__^`).
+ add_annotation_to_file(&mut output, file.clone(), line, ann.as_line());
+ }
+ let line_end = ann.line_end - 1;
+ if middle < line_end {
+ add_annotation_to_file(&mut output, file.clone(), line_end, ann.as_line());
+ }
+ } else {
+ end_ann.annotation_type = AnnotationType::Singleline;
+ }
+ add_annotation_to_file(&mut output, file, ann.line_end, end_ann);
+ }
+ for file_vec in output.iter_mut() {
+ file_vec.multiline_depth = max_depth;
+ }
+ output
+ }
+}
+
+// instead of taking the String length or dividing by 10 while > 0, we multiply a limit by 10 until
+// we're higher. If the loop isn't exited by the `return`, the last multiplication will wrap, which
+// is OK, because while we cannot fit a higher power of 10 in a usize, the loop will end anyway.
+// This is also why we need the max number of decimal digits within a `usize`.
+fn num_decimal_digits(num: usize) -> usize {
+ #[cfg(target_pointer_width = "64")]
+ const MAX_DIGITS: usize = 20;
+
+ #[cfg(target_pointer_width = "32")]
+ const MAX_DIGITS: usize = 10;
+
+ #[cfg(target_pointer_width = "16")]
+ const MAX_DIGITS: usize = 5;
+
+ let mut lim = 10;
+ for num_digits in 1..MAX_DIGITS {
+ if num < lim {
+ return num_digits;
+ }
+ lim = lim.wrapping_mul(10);
+ }
+ MAX_DIGITS
+}
+
+// We replace some characters so the CLI output is always consistent and underlines aligned.
+const OUTPUT_REPLACEMENTS: &[(char, &str)] = &[
+ ('\t', " "), // We do our own tab replacement
+ ('\u{200D}', ""), // Replace ZWJ with nothing for consistent terminal output of grapheme clusters.
+ ('\u{202A}', ""), // The following unicode text flow control characters are inconsistently
+ ('\u{202B}', ""), // supported across CLIs and can cause confusion due to the bytes on disk
+ ('\u{202D}', ""), // not corresponding to the visible source code, so we replace them always.
+ ('\u{202E}', ""),
+ ('\u{2066}', ""),
+ ('\u{2067}', ""),
+ ('\u{2068}', ""),
+ ('\u{202C}', ""),
+ ('\u{2069}', ""),
+];
+
+fn normalize_whitespace(str: &str) -> String {
+ let mut s = str.to_string();
+ for (c, replacement) in OUTPUT_REPLACEMENTS {
+ s = s.replace(*c, replacement);
+ }
+ s
+}
+
+fn draw_col_separator(buffer: &mut StyledBuffer, line: usize, col: usize) {
+ buffer.puts(line, col, "| ", Style::LineNumber);
+}
+
+fn draw_col_separator_no_space(buffer: &mut StyledBuffer, line: usize, col: usize) {
+ draw_col_separator_no_space_with_style(buffer, line, col, Style::LineNumber);
+}
+
+fn draw_col_separator_no_space_with_style(
+ buffer: &mut StyledBuffer,
+ line: usize,
+ col: usize,
+ style: Style,
+) {
+ buffer.putc(line, col, '|', style);
+}
+
+fn draw_range(
+ buffer: &mut StyledBuffer,
+ symbol: char,
+ line: usize,
+ col_from: usize,
+ col_to: usize,
+ style: Style,
+) {
+ for col in col_from..col_to {
+ buffer.putc(line, col, symbol, style);
+ }
+}
+
+fn draw_note_separator(buffer: &mut StyledBuffer, line: usize, col: usize) {
+ buffer.puts(line, col, "= ", Style::LineNumber);
+}
+
+fn draw_multiline_line(
+ buffer: &mut StyledBuffer,
+ line: usize,
+ offset: usize,
+ depth: usize,
+ style: Style,
+) {
+ buffer.putc(line, offset + depth - 1, '|', style);
+}
+
+fn num_overlap(
+ a_start: usize,
+ a_end: usize,
+ b_start: usize,
+ b_end: usize,
+ inclusive: bool,
+) -> bool {
+ let extra = if inclusive { 1 } else { 0 };
+ (b_start..b_end + extra).contains(&a_start) || (a_start..a_end + extra).contains(&b_start)
+}
+fn overlaps(a1: &Annotation, a2: &Annotation, padding: usize) -> bool {
+ num_overlap(a1.start_col, a1.end_col + padding, a2.start_col, a2.end_col, false)
+}
+
+fn emit_to_destination(
+ rendered_buffer: &[Vec<StyledString>],
+ lvl: &Level,
+ dst: &mut Destination,
+ short_message: bool,
+) -> io::Result<()> {
+ use crate::lock;
+
+ let mut dst = dst.writable();
+
+ // In order to prevent error message interleaving, where multiple error lines get intermixed
+ // when multiple compiler processes error simultaneously, we emit errors with additional
+ // steps.
+ //
+ // On Unix systems, we write into a buffered terminal rather than directly to a terminal. When
+ // the .flush() is called we take the buffer created from the buffered writes and write it at
+ // one shot. Because the Unix systems use ANSI for the colors, which is a text-based styling
+ // scheme, this buffered approach works and maintains the styling.
+ //
+ // On Windows, styling happens through calls to a terminal API. This prevents us from using the
+ // same buffering approach. Instead, we use a global Windows mutex, which we acquire long
+ // enough to output the full error message, then we release.
+ let _buffer_lock = lock::acquire_global_lock("rustc_errors");
+ for (pos, line) in rendered_buffer.iter().enumerate() {
+ for part in line {
+ dst.apply_style(*lvl, part.style)?;
+ write!(dst, "{}", part.text)?;
+ dst.reset()?;
+ }
+ if !short_message && (!lvl.is_failure_note() || pos != rendered_buffer.len() - 1) {
+ writeln!(dst)?;
+ }
+ }
+ dst.flush()?;
+ Ok(())
+}
+
+pub enum Destination {
+ Terminal(StandardStream),
+ Buffered(BufferWriter),
+ // The bool denotes whether we should be emitting ansi color codes or not
+ Raw(Box<(dyn Write + Send)>, bool),
+}
+
+pub enum WritableDst<'a> {
+ Terminal(&'a mut StandardStream),
+ Buffered(&'a mut BufferWriter, Buffer),
+ Raw(&'a mut (dyn Write + Send)),
+ ColoredRaw(Ansi<&'a mut (dyn Write + Send)>),
+}
+
+impl Destination {
+ fn from_stderr(color: ColorConfig) -> Destination {
+ let choice = color.to_color_choice();
+ // On Windows we'll be performing global synchronization on the entire
+ // system for emitting rustc errors, so there's no need to buffer
+ // anything.
+ //
+ // On non-Windows we rely on the atomicity of `write` to ensure errors
+ // don't get all jumbled up.
+ if cfg!(windows) {
+ Terminal(StandardStream::stderr(choice))
+ } else {
+ Buffered(BufferWriter::stderr(choice))
+ }
+ }
+
+ fn writable(&mut self) -> WritableDst<'_> {
+ match *self {
+ Destination::Terminal(ref mut t) => WritableDst::Terminal(t),
+ Destination::Buffered(ref mut t) => {
+ let buf = t.buffer();
+ WritableDst::Buffered(t, buf)
+ }
+ Destination::Raw(ref mut t, false) => WritableDst::Raw(t),
+ Destination::Raw(ref mut t, true) => WritableDst::ColoredRaw(Ansi::new(t)),
+ }
+ }
+
+ fn supports_color(&self) -> bool {
+ match *self {
+ Self::Terminal(ref stream) => stream.supports_color(),
+ Self::Buffered(ref buffer) => buffer.buffer().supports_color(),
+ Self::Raw(_, supports_color) => supports_color,
+ }
+ }
+}
+
+impl<'a> WritableDst<'a> {
+ fn apply_style(&mut self, lvl: Level, style: Style) -> io::Result<()> {
+ let mut spec = ColorSpec::new();
+ match style {
+ Style::Addition => {
+ spec.set_fg(Some(Color::Green)).set_intense(true);
+ }
+ Style::Removal => {
+ spec.set_fg(Some(Color::Red)).set_intense(true);
+ }
+ Style::LineAndColumn => {}
+ Style::LineNumber => {
+ spec.set_bold(true);
+ spec.set_intense(true);
+ if cfg!(windows) {
+ spec.set_fg(Some(Color::Cyan));
+ } else {
+ spec.set_fg(Some(Color::Blue));
+ }
+ }
+ Style::Quotation => {}
+ Style::MainHeaderMsg => {
+ spec.set_bold(true);
+ if cfg!(windows) {
+ spec.set_intense(true).set_fg(Some(Color::White));
+ }
+ }
+ Style::UnderlinePrimary | Style::LabelPrimary => {
+ spec = lvl.color();
+ spec.set_bold(true);
+ }
+ Style::UnderlineSecondary | Style::LabelSecondary => {
+ spec.set_bold(true).set_intense(true);
+ if cfg!(windows) {
+ spec.set_fg(Some(Color::Cyan));
+ } else {
+ spec.set_fg(Some(Color::Blue));
+ }
+ }
+ Style::HeaderMsg | Style::NoStyle => {}
+ Style::Level(lvl) => {
+ spec = lvl.color();
+ spec.set_bold(true);
+ }
+ Style::Highlight => {
+ spec.set_bold(true);
+ }
+ }
+ self.set_color(&spec)
+ }
+
+ fn set_color(&mut self, color: &ColorSpec) -> io::Result<()> {
+ match *self {
+ WritableDst::Terminal(ref mut t) => t.set_color(color),
+ WritableDst::Buffered(_, ref mut t) => t.set_color(color),
+ WritableDst::ColoredRaw(ref mut t) => t.set_color(color),
+ WritableDst::Raw(_) => Ok(()),
+ }
+ }
+
+ fn reset(&mut self) -> io::Result<()> {
+ match *self {
+ WritableDst::Terminal(ref mut t) => t.reset(),
+ WritableDst::Buffered(_, ref mut t) => t.reset(),
+ WritableDst::ColoredRaw(ref mut t) => t.reset(),
+ WritableDst::Raw(_) => Ok(()),
+ }
+ }
+}
+
+impl<'a> Write for WritableDst<'a> {
+ fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
+ match *self {
+ WritableDst::Terminal(ref mut t) => t.write(bytes),
+ WritableDst::Buffered(_, ref mut buf) => buf.write(bytes),
+ WritableDst::Raw(ref mut w) => w.write(bytes),
+ WritableDst::ColoredRaw(ref mut t) => t.write(bytes),
+ }
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ match *self {
+ WritableDst::Terminal(ref mut t) => t.flush(),
+ WritableDst::Buffered(_, ref mut buf) => buf.flush(),
+ WritableDst::Raw(ref mut w) => w.flush(),
+ WritableDst::ColoredRaw(ref mut w) => w.flush(),
+ }
+ }
+}
+
+impl<'a> Drop for WritableDst<'a> {
+ fn drop(&mut self) {
+ if let WritableDst::Buffered(ref mut dst, ref mut buf) = self {
+ drop(dst.print(buf));
+ }
+ }
+}
+
+/// Whether the original and suggested code are visually similar enough to warrant extra wording.
+pub fn is_case_difference(sm: &SourceMap, suggested: &str, sp: Span) -> bool {
+ // FIXME: this should probably be extended to also account for `FO0` → `FOO` and unicode.
+ let found = match sm.span_to_snippet(sp) {
+ Ok(snippet) => snippet,
+ Err(e) => {
+ warn!(error = ?e, "Invalid span {:?}", sp);
+ return false;
+ }
+ };
+ let ascii_confusables = &['c', 'f', 'i', 'k', 'o', 's', 'u', 'v', 'w', 'x', 'y', 'z'];
+ // All the chars that differ in capitalization are confusable (above):
+ let confusable = iter::zip(found.chars(), suggested.chars())
+ .filter(|(f, s)| f != s)
+ .all(|(f, s)| (ascii_confusables.contains(&f) || ascii_confusables.contains(&s)));
+ confusable && found.to_lowercase() == suggested.to_lowercase()
+ // FIXME: We sometimes suggest the same thing we already have, which is a
+ // bug, but be defensive against that here.
+ && found != suggested
+}
diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs
new file mode 100644
index 000000000..b8cd334b4
--- /dev/null
+++ b/compiler/rustc_errors/src/json.rs
@@ -0,0 +1,561 @@
+//! A JSON emitter for errors.
+//!
+//! This works by converting errors to a simplified structural format (see the
+//! structs at the start of the file) and then serializing them. These should
+//! contain as much information about the error as possible.
+//!
+//! The format of the JSON output should be considered *unstable*. For now the
+//! structs at the end of this file (Diagnostic*) specify the error format.
+
+// FIXME: spec the JSON output properly.
+
+use rustc_span::source_map::{FilePathMapping, SourceMap};
+
+use crate::emitter::{Emitter, HumanReadableErrorType};
+use crate::registry::Registry;
+use crate::DiagnosticId;
+use crate::{
+ CodeSuggestion, FluentBundle, LazyFallbackBundle, MultiSpan, SpanLabel, SubDiagnostic,
+};
+use rustc_lint_defs::Applicability;
+
+use rustc_data_structures::sync::Lrc;
+use rustc_error_messages::FluentArgs;
+use rustc_span::hygiene::ExpnData;
+use rustc_span::Span;
+use std::io::{self, Write};
+use std::path::Path;
+use std::sync::{Arc, Mutex};
+use std::vec;
+
+use serde::Serialize;
+
+#[cfg(test)]
+mod tests;
+
+pub struct JsonEmitter {
+ dst: Box<dyn Write + Send>,
+ registry: Option<Registry>,
+ sm: Lrc<SourceMap>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ pretty: bool,
+ ui_testing: bool,
+ json_rendered: HumanReadableErrorType,
+ diagnostic_width: Option<usize>,
+ macro_backtrace: bool,
+}
+
+impl JsonEmitter {
+ pub fn stderr(
+ registry: Option<Registry>,
+ source_map: Lrc<SourceMap>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ pretty: bool,
+ json_rendered: HumanReadableErrorType,
+ diagnostic_width: Option<usize>,
+ macro_backtrace: bool,
+ ) -> JsonEmitter {
+ JsonEmitter {
+ dst: Box::new(io::BufWriter::new(io::stderr())),
+ registry,
+ sm: source_map,
+ fluent_bundle,
+ fallback_bundle,
+ pretty,
+ ui_testing: false,
+ json_rendered,
+ diagnostic_width,
+ macro_backtrace,
+ }
+ }
+
+ pub fn basic(
+ pretty: bool,
+ json_rendered: HumanReadableErrorType,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ diagnostic_width: Option<usize>,
+ macro_backtrace: bool,
+ ) -> JsonEmitter {
+ let file_path_mapping = FilePathMapping::empty();
+ JsonEmitter::stderr(
+ None,
+ Lrc::new(SourceMap::new(file_path_mapping)),
+ fluent_bundle,
+ fallback_bundle,
+ pretty,
+ json_rendered,
+ diagnostic_width,
+ macro_backtrace,
+ )
+ }
+
+ pub fn new(
+ dst: Box<dyn Write + Send>,
+ registry: Option<Registry>,
+ source_map: Lrc<SourceMap>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ pretty: bool,
+ json_rendered: HumanReadableErrorType,
+ diagnostic_width: Option<usize>,
+ macro_backtrace: bool,
+ ) -> JsonEmitter {
+ JsonEmitter {
+ dst,
+ registry,
+ sm: source_map,
+ fluent_bundle,
+ fallback_bundle,
+ pretty,
+ ui_testing: false,
+ json_rendered,
+ diagnostic_width,
+ macro_backtrace,
+ }
+ }
+
+ pub fn ui_testing(self, ui_testing: bool) -> Self {
+ Self { ui_testing, ..self }
+ }
+}
+
+impl Emitter for JsonEmitter {
+ fn emit_diagnostic(&mut self, diag: &crate::Diagnostic) {
+ let data = Diagnostic::from_errors_diagnostic(diag, self);
+ let result = if self.pretty {
+ writeln!(&mut self.dst, "{}", serde_json::to_string_pretty(&data).unwrap())
+ } else {
+ writeln!(&mut self.dst, "{}", serde_json::to_string(&data).unwrap())
+ }
+ .and_then(|_| self.dst.flush());
+ if let Err(e) = result {
+ panic!("failed to print diagnostics: {:?}", e);
+ }
+ }
+
+ fn emit_artifact_notification(&mut self, path: &Path, artifact_type: &str) {
+ let data = ArtifactNotification { artifact: path, emit: artifact_type };
+ let result = if self.pretty {
+ writeln!(&mut self.dst, "{}", serde_json::to_string_pretty(&data).unwrap())
+ } else {
+ writeln!(&mut self.dst, "{}", serde_json::to_string(&data).unwrap())
+ }
+ .and_then(|_| self.dst.flush());
+ if let Err(e) = result {
+ panic!("failed to print notification: {:?}", e);
+ }
+ }
+
+ fn emit_future_breakage_report(&mut self, diags: Vec<crate::Diagnostic>) {
+ let data: Vec<FutureBreakageItem> = diags
+ .into_iter()
+ .map(|mut diag| {
+ if diag.level == crate::Level::Allow {
+ diag.level = crate::Level::Warning(None);
+ }
+ FutureBreakageItem { diagnostic: Diagnostic::from_errors_diagnostic(&diag, self) }
+ })
+ .collect();
+ let report = FutureIncompatReport { future_incompat_report: data };
+ let result = if self.pretty {
+ writeln!(&mut self.dst, "{}", serde_json::to_string_pretty(&report).unwrap())
+ } else {
+ writeln!(&mut self.dst, "{}", serde_json::to_string(&report).unwrap())
+ }
+ .and_then(|_| self.dst.flush());
+ if let Err(e) = result {
+ panic!("failed to print future breakage report: {:?}", e);
+ }
+ }
+
+ fn emit_unused_externs(&mut self, lint_level: rustc_lint_defs::Level, unused_externs: &[&str]) {
+ let lint_level = lint_level.as_str();
+ let data = UnusedExterns { lint_level, unused_extern_names: unused_externs };
+ let result = if self.pretty {
+ writeln!(&mut self.dst, "{}", serde_json::to_string_pretty(&data).unwrap())
+ } else {
+ writeln!(&mut self.dst, "{}", serde_json::to_string(&data).unwrap())
+ }
+ .and_then(|_| self.dst.flush());
+ if let Err(e) = result {
+ panic!("failed to print unused externs: {:?}", e);
+ }
+ }
+
+ fn source_map(&self) -> Option<&Lrc<SourceMap>> {
+ Some(&self.sm)
+ }
+
+ fn fluent_bundle(&self) -> Option<&Lrc<FluentBundle>> {
+ self.fluent_bundle.as_ref()
+ }
+
+ fn fallback_fluent_bundle(&self) -> &FluentBundle {
+ &**self.fallback_bundle
+ }
+
+ fn should_show_explain(&self) -> bool {
+ !matches!(self.json_rendered, HumanReadableErrorType::Short(_))
+ }
+}
+
+// The following data types are provided just for serialisation.
+
+#[derive(Serialize)]
+struct Diagnostic {
+ /// The primary error message.
+ message: String,
+ code: Option<DiagnosticCode>,
+ /// "error: internal compiler error", "error", "warning", "note", "help".
+ level: &'static str,
+ spans: Vec<DiagnosticSpan>,
+ /// Associated diagnostic messages.
+ children: Vec<Diagnostic>,
+ /// The message as rustc would render it.
+ rendered: Option<String>,
+}
+
+#[derive(Serialize)]
+struct DiagnosticSpan {
+ file_name: String,
+ byte_start: u32,
+ byte_end: u32,
+ /// 1-based.
+ line_start: usize,
+ line_end: usize,
+ /// 1-based, character offset.
+ column_start: usize,
+ column_end: usize,
+ /// Is this a "primary" span -- meaning the point, or one of the points,
+ /// where the error occurred?
+ is_primary: bool,
+ /// Source text from the start of line_start to the end of line_end.
+ text: Vec<DiagnosticSpanLine>,
+ /// Label that should be placed at this location (if any)
+ label: Option<String>,
+ /// If we are suggesting a replacement, this will contain text
+ /// that should be sliced in atop this span.
+ suggested_replacement: Option<String>,
+ /// If the suggestion is approximate
+ suggestion_applicability: Option<Applicability>,
+ /// Macro invocations that created the code at this span, if any.
+ expansion: Option<Box<DiagnosticSpanMacroExpansion>>,
+}
+
+#[derive(Serialize)]
+struct DiagnosticSpanLine {
+ text: String,
+
+ /// 1-based, character offset in self.text.
+ highlight_start: usize,
+
+ highlight_end: usize,
+}
+
+#[derive(Serialize)]
+struct DiagnosticSpanMacroExpansion {
+ /// span where macro was applied to generate this code; note that
+ /// this may itself derive from a macro (if
+ /// `span.expansion.is_some()`)
+ span: DiagnosticSpan,
+
+ /// name of macro that was applied (e.g., "foo!" or "#[derive(Eq)]")
+ macro_decl_name: String,
+
+ /// span where macro was defined (if known)
+ def_site_span: DiagnosticSpan,
+}
+
+#[derive(Serialize)]
+struct DiagnosticCode {
+ /// The code itself.
+ code: String,
+ /// An explanation for the code.
+ explanation: Option<&'static str>,
+}
+
+#[derive(Serialize)]
+struct ArtifactNotification<'a> {
+ /// The path of the artifact.
+ artifact: &'a Path,
+ /// What kind of artifact we're emitting.
+ emit: &'a str,
+}
+
+#[derive(Serialize)]
+struct FutureBreakageItem {
+ diagnostic: Diagnostic,
+}
+
+#[derive(Serialize)]
+struct FutureIncompatReport {
+ future_incompat_report: Vec<FutureBreakageItem>,
+}
+
+// NOTE: Keep this in sync with the equivalent structs in rustdoc's
+// doctest component (as well as cargo).
+// We could unify this struct the one in rustdoc but they have different
+// ownership semantics, so doing so would create wasteful allocations.
+#[derive(Serialize)]
+struct UnusedExterns<'a, 'b, 'c> {
+ /// The severity level of the unused dependencies lint
+ lint_level: &'a str,
+ /// List of unused externs by their names.
+ unused_extern_names: &'b [&'c str],
+}
+
+impl Diagnostic {
+ fn from_errors_diagnostic(diag: &crate::Diagnostic, je: &JsonEmitter) -> Diagnostic {
+ let args = je.to_fluent_args(diag.args());
+ let sugg = diag.suggestions.iter().flatten().map(|sugg| {
+ let translated_message = je.translate_message(&sugg.msg, &args);
+ Diagnostic {
+ message: translated_message.to_string(),
+ code: None,
+ level: "help",
+ spans: DiagnosticSpan::from_suggestion(sugg, &args, je),
+ children: vec![],
+ rendered: None,
+ }
+ });
+
+ // generate regular command line output and store it in the json
+
+ // A threadsafe buffer for writing.
+ #[derive(Default, Clone)]
+ struct BufWriter(Arc<Mutex<Vec<u8>>>);
+
+ impl Write for BufWriter {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.0.lock().unwrap().write(buf)
+ }
+ fn flush(&mut self) -> io::Result<()> {
+ self.0.lock().unwrap().flush()
+ }
+ }
+ let buf = BufWriter::default();
+ let output = buf.clone();
+ je.json_rendered
+ .new_emitter(
+ Box::new(buf),
+ Some(je.sm.clone()),
+ je.fluent_bundle.clone(),
+ je.fallback_bundle.clone(),
+ false,
+ je.diagnostic_width,
+ je.macro_backtrace,
+ )
+ .ui_testing(je.ui_testing)
+ .emit_diagnostic(diag);
+ let output = Arc::try_unwrap(output.0).unwrap().into_inner().unwrap();
+ let output = String::from_utf8(output).unwrap();
+
+ let translated_message = je.translate_messages(&diag.message, &args);
+ Diagnostic {
+ message: translated_message.to_string(),
+ code: DiagnosticCode::map_opt_string(diag.code.clone(), je),
+ level: diag.level.to_str(),
+ spans: DiagnosticSpan::from_multispan(&diag.span, &args, je),
+ children: diag
+ .children
+ .iter()
+ .map(|c| Diagnostic::from_sub_diagnostic(c, &args, je))
+ .chain(sugg)
+ .collect(),
+ rendered: Some(output),
+ }
+ }
+
+ fn from_sub_diagnostic(
+ diag: &SubDiagnostic,
+ args: &FluentArgs<'_>,
+ je: &JsonEmitter,
+ ) -> Diagnostic {
+ let translated_message = je.translate_messages(&diag.message, args);
+ Diagnostic {
+ message: translated_message.to_string(),
+ code: None,
+ level: diag.level.to_str(),
+ spans: diag
+ .render_span
+ .as_ref()
+ .map(|sp| DiagnosticSpan::from_multispan(sp, args, je))
+ .unwrap_or_else(|| DiagnosticSpan::from_multispan(&diag.span, args, je)),
+ children: vec![],
+ rendered: None,
+ }
+ }
+}
+
+impl DiagnosticSpan {
+ fn from_span_label(
+ span: SpanLabel,
+ suggestion: Option<(&String, Applicability)>,
+ args: &FluentArgs<'_>,
+ je: &JsonEmitter,
+ ) -> DiagnosticSpan {
+ Self::from_span_etc(
+ span.span,
+ span.is_primary,
+ span.label.as_ref().map(|m| je.translate_message(m, args)).map(|m| m.to_string()),
+ suggestion,
+ je,
+ )
+ }
+
+ fn from_span_etc(
+ span: Span,
+ is_primary: bool,
+ label: Option<String>,
+ suggestion: Option<(&String, Applicability)>,
+ je: &JsonEmitter,
+ ) -> DiagnosticSpan {
+ // obtain the full backtrace from the `macro_backtrace`
+ // helper; in some ways, it'd be better to expand the
+ // backtrace ourselves, but the `macro_backtrace` helper makes
+ // some decision, such as dropping some frames, and I don't
+ // want to duplicate that logic here.
+ let backtrace = span.macro_backtrace();
+ DiagnosticSpan::from_span_full(span, is_primary, label, suggestion, backtrace, je)
+ }
+
+ fn from_span_full(
+ span: Span,
+ is_primary: bool,
+ label: Option<String>,
+ suggestion: Option<(&String, Applicability)>,
+ mut backtrace: impl Iterator<Item = ExpnData>,
+ je: &JsonEmitter,
+ ) -> DiagnosticSpan {
+ let start = je.sm.lookup_char_pos(span.lo());
+ let end = je.sm.lookup_char_pos(span.hi());
+ let backtrace_step = backtrace.next().map(|bt| {
+ let call_site = Self::from_span_full(bt.call_site, false, None, None, backtrace, je);
+ let def_site_span = Self::from_span_full(
+ je.sm.guess_head_span(bt.def_site),
+ false,
+ None,
+ None,
+ [].into_iter(),
+ je,
+ );
+ Box::new(DiagnosticSpanMacroExpansion {
+ span: call_site,
+ macro_decl_name: bt.kind.descr(),
+ def_site_span,
+ })
+ });
+
+ DiagnosticSpan {
+ file_name: je.sm.filename_for_diagnostics(&start.file.name).to_string(),
+ byte_start: start.file.original_relative_byte_pos(span.lo()).0,
+ byte_end: start.file.original_relative_byte_pos(span.hi()).0,
+ line_start: start.line,
+ line_end: end.line,
+ column_start: start.col.0 + 1,
+ column_end: end.col.0 + 1,
+ is_primary,
+ text: DiagnosticSpanLine::from_span(span, je),
+ suggested_replacement: suggestion.map(|x| x.0.clone()),
+ suggestion_applicability: suggestion.map(|x| x.1),
+ expansion: backtrace_step,
+ label,
+ }
+ }
+
+ fn from_multispan(
+ msp: &MultiSpan,
+ args: &FluentArgs<'_>,
+ je: &JsonEmitter,
+ ) -> Vec<DiagnosticSpan> {
+ msp.span_labels()
+ .into_iter()
+ .map(|span_str| Self::from_span_label(span_str, None, args, je))
+ .collect()
+ }
+
+ fn from_suggestion(
+ suggestion: &CodeSuggestion,
+ args: &FluentArgs<'_>,
+ je: &JsonEmitter,
+ ) -> Vec<DiagnosticSpan> {
+ suggestion
+ .substitutions
+ .iter()
+ .flat_map(|substitution| {
+ substitution.parts.iter().map(move |suggestion_inner| {
+ let span_label =
+ SpanLabel { span: suggestion_inner.span, is_primary: true, label: None };
+ DiagnosticSpan::from_span_label(
+ span_label,
+ Some((&suggestion_inner.snippet, suggestion.applicability)),
+ args,
+ je,
+ )
+ })
+ })
+ .collect()
+ }
+}
+
+impl DiagnosticSpanLine {
+ fn line_from_source_file(
+ sf: &rustc_span::SourceFile,
+ index: usize,
+ h_start: usize,
+ h_end: usize,
+ ) -> DiagnosticSpanLine {
+ DiagnosticSpanLine {
+ text: sf.get_line(index).map_or_else(String::new, |l| l.into_owned()),
+ highlight_start: h_start,
+ highlight_end: h_end,
+ }
+ }
+
+ /// Creates a list of DiagnosticSpanLines from span - each line with any part
+ /// of `span` gets a DiagnosticSpanLine, with the highlight indicating the
+ /// `span` within the line.
+ fn from_span(span: Span, je: &JsonEmitter) -> Vec<DiagnosticSpanLine> {
+ je.sm
+ .span_to_lines(span)
+ .map(|lines| {
+ // We can't get any lines if the source is unavailable.
+ if !je.sm.ensure_source_file_source_present(lines.file.clone()) {
+ return vec![];
+ }
+
+ let sf = &*lines.file;
+ lines
+ .lines
+ .iter()
+ .map(|line| {
+ DiagnosticSpanLine::line_from_source_file(
+ sf,
+ line.line_index,
+ line.start_col.0 + 1,
+ line.end_col.0 + 1,
+ )
+ })
+ .collect()
+ })
+ .unwrap_or_else(|_| vec![])
+ }
+}
+
+impl DiagnosticCode {
+ fn map_opt_string(s: Option<DiagnosticId>, je: &JsonEmitter) -> Option<DiagnosticCode> {
+ s.map(|s| {
+ let s = match s {
+ DiagnosticId::Error(s) => s,
+ DiagnosticId::Lint { name, .. } => name,
+ };
+ let je_result =
+ je.registry.as_ref().map(|registry| registry.try_find_description(&s)).unwrap();
+
+ DiagnosticCode { code: s, explanation: je_result.unwrap_or(None) }
+ })
+ }
+}
diff --git a/compiler/rustc_errors/src/json/tests.rs b/compiler/rustc_errors/src/json/tests.rs
new file mode 100644
index 000000000..d940d14e1
--- /dev/null
+++ b/compiler/rustc_errors/src/json/tests.rs
@@ -0,0 +1,204 @@
+use super::*;
+
+use crate::json::JsonEmitter;
+use rustc_span::source_map::{FilePathMapping, SourceMap};
+
+use crate::emitter::{ColorConfig, HumanReadableErrorType};
+use crate::Handler;
+use rustc_span::{BytePos, Span};
+
+use std::str;
+
+use serde::Deserialize;
+
+#[derive(Deserialize, Debug, PartialEq, Eq)]
+struct TestData {
+ spans: Vec<SpanTestData>,
+}
+
+#[derive(Deserialize, Debug, PartialEq, Eq)]
+struct SpanTestData {
+ pub byte_start: u32,
+ pub byte_end: u32,
+ pub line_start: u32,
+ pub column_start: u32,
+ pub line_end: u32,
+ pub column_end: u32,
+}
+
+struct Shared<T> {
+ data: Arc<Mutex<T>>,
+}
+
+impl<T: Write> Write for Shared<T> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.data.lock().unwrap().write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.data.lock().unwrap().flush()
+ }
+}
+
+/// Test the span yields correct positions in JSON.
+fn test_positions(code: &str, span: (u32, u32), expected_output: SpanTestData) {
+ rustc_span::create_default_session_globals_then(|| {
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ sm.new_source_file(Path::new("test.rs").to_owned().into(), code.to_owned());
+ let fallback_bundle =
+ crate::fallback_fluent_bundle(rustc_error_messages::DEFAULT_LOCALE_RESOURCES, false);
+
+ let output = Arc::new(Mutex::new(Vec::new()));
+ let je = JsonEmitter::new(
+ Box::new(Shared { data: output.clone() }),
+ None,
+ sm,
+ None,
+ fallback_bundle,
+ true,
+ HumanReadableErrorType::Short(ColorConfig::Never),
+ None,
+ false,
+ );
+
+ let span = Span::with_root_ctxt(BytePos(span.0), BytePos(span.1));
+ let handler = Handler::with_emitter(true, None, Box::new(je));
+ handler.span_err(span, "foo");
+
+ let bytes = output.lock().unwrap();
+ let actual_output = str::from_utf8(&bytes).unwrap();
+ let actual_output: TestData = serde_json::from_str(actual_output).unwrap();
+ let spans = actual_output.spans;
+ assert_eq!(spans.len(), 1);
+
+ assert_eq!(expected_output, spans[0])
+ })
+}
+
+#[test]
+fn empty() {
+ test_positions(
+ " ",
+ (0, 1),
+ SpanTestData {
+ byte_start: 0,
+ byte_end: 1,
+ line_start: 1,
+ column_start: 1,
+ line_end: 1,
+ column_end: 2,
+ },
+ )
+}
+
+#[test]
+fn bom() {
+ test_positions(
+ "\u{feff} ",
+ (0, 1),
+ SpanTestData {
+ byte_start: 3,
+ byte_end: 4,
+ line_start: 1,
+ column_start: 1,
+ line_end: 1,
+ column_end: 2,
+ },
+ )
+}
+
+#[test]
+fn lf_newlines() {
+ test_positions(
+ "\nmod foo;\nmod bar;\n",
+ (5, 12),
+ SpanTestData {
+ byte_start: 5,
+ byte_end: 12,
+ line_start: 2,
+ column_start: 5,
+ line_end: 3,
+ column_end: 3,
+ },
+ )
+}
+
+#[test]
+fn crlf_newlines() {
+ test_positions(
+ "\r\nmod foo;\r\nmod bar;\r\n",
+ (5, 12),
+ SpanTestData {
+ byte_start: 6,
+ byte_end: 14,
+ line_start: 2,
+ column_start: 5,
+ line_end: 3,
+ column_end: 3,
+ },
+ )
+}
+
+#[test]
+fn crlf_newlines_with_bom() {
+ test_positions(
+ "\u{feff}\r\nmod foo;\r\nmod bar;\r\n",
+ (5, 12),
+ SpanTestData {
+ byte_start: 9,
+ byte_end: 17,
+ line_start: 2,
+ column_start: 5,
+ line_end: 3,
+ column_end: 3,
+ },
+ )
+}
+
+#[test]
+fn span_before_crlf() {
+ test_positions(
+ "foo\r\nbar",
+ (2, 3),
+ SpanTestData {
+ byte_start: 2,
+ byte_end: 3,
+ line_start: 1,
+ column_start: 3,
+ line_end: 1,
+ column_end: 4,
+ },
+ )
+}
+
+#[test]
+fn span_on_crlf() {
+ test_positions(
+ "foo\r\nbar",
+ (3, 4),
+ SpanTestData {
+ byte_start: 3,
+ byte_end: 5,
+ line_start: 1,
+ column_start: 4,
+ line_end: 2,
+ column_end: 1,
+ },
+ )
+}
+
+#[test]
+fn span_after_crlf() {
+ test_positions(
+ "foo\r\nbar",
+ (4, 5),
+ SpanTestData {
+ byte_start: 5,
+ byte_end: 6,
+ line_start: 2,
+ column_start: 1,
+ line_end: 2,
+ column_end: 2,
+ },
+ )
+}
diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs
new file mode 100644
index 000000000..2409c0b5a
--- /dev/null
+++ b/compiler/rustc_errors/src/lib.rs
@@ -0,0 +1,1589 @@
+//! Diagnostics creation and emission for `rustc`.
+//!
+//! This module contains the code for creating and emitting diagnostics.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(drain_filter)]
+#![feature(backtrace)]
+#![feature(if_let_guard)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(never_type)]
+#![feature(adt_const_params)]
+#![feature(rustc_attrs)]
+#![allow(incomplete_features)]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate rustc_macros;
+
+#[macro_use]
+extern crate tracing;
+
+pub use emitter::ColorConfig;
+
+use rustc_lint_defs::LintExpectationId;
+use Level::*;
+
+use emitter::{is_case_difference, Emitter, EmitterWriter};
+use registry::Registry;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
+use rustc_data_structures::stable_hasher::StableHasher;
+use rustc_data_structures::sync::{self, Lock, Lrc};
+use rustc_data_structures::AtomicRef;
+pub use rustc_error_messages::{
+ fallback_fluent_bundle, fluent, fluent_bundle, DiagnosticMessage, FluentBundle,
+ LanguageIdentifier, LazyFallbackBundle, MultiSpan, SpanLabel, SubdiagnosticMessage,
+ DEFAULT_LOCALE_RESOURCES,
+};
+pub use rustc_lint_defs::{pluralize, Applicability};
+use rustc_span::source_map::SourceMap;
+use rustc_span::HashStableContext;
+use rustc_span::{Loc, Span};
+
+use std::borrow::Cow;
+use std::hash::Hash;
+use std::num::NonZeroUsize;
+use std::panic;
+use std::path::Path;
+use std::{error, fmt};
+
+use termcolor::{Color, ColorSpec};
+
+pub mod annotate_snippet_emitter_writer;
+mod diagnostic;
+mod diagnostic_builder;
+pub mod emitter;
+pub mod json;
+mod lock;
+pub mod registry;
+mod snippet;
+mod styled_buffer;
+
+pub use snippet::Style;
+
+pub type PResult<'a, T> = Result<T, DiagnosticBuilder<'a, ErrorGuaranteed>>;
+
+// `PResult` is used a lot. Make sure it doesn't unintentionally get bigger.
+// (See also the comment on `DiagnosticBuilder`'s `diagnostic` field.)
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(PResult<'_, ()>, 16);
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(PResult<'_, bool>, 24);
+
+#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash, Encodable, Decodable)]
+pub enum SuggestionStyle {
+ /// Hide the suggested code when displaying this suggestion inline.
+ HideCodeInline,
+ /// Always hide the suggested code but display the message.
+ HideCodeAlways,
+ /// Do not display this suggestion in the cli output, it is only meant for tools.
+ CompletelyHidden,
+ /// Always show the suggested code.
+ /// This will *not* show the code if the suggestion is inline *and* the suggested code is
+ /// empty.
+ ShowCode,
+ /// Always show the suggested code independently.
+ ShowAlways,
+}
+
+impl SuggestionStyle {
+ fn hide_inline(&self) -> bool {
+ !matches!(*self, SuggestionStyle::ShowCode)
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
+pub struct CodeSuggestion {
+ /// Each substitute can have multiple variants due to multiple
+ /// applicable suggestions
+ ///
+ /// `foo.bar` might be replaced with `a.b` or `x.y` by replacing
+ /// `foo` and `bar` on their own:
+ ///
+ /// ```ignore (illustrative)
+ /// vec![
+ /// Substitution { parts: vec![(0..3, "a"), (4..7, "b")] },
+ /// Substitution { parts: vec![(0..3, "x"), (4..7, "y")] },
+ /// ]
+ /// ```
+ ///
+ /// or by replacing the entire span:
+ ///
+ /// ```ignore (illustrative)
+ /// vec![
+ /// Substitution { parts: vec![(0..7, "a.b")] },
+ /// Substitution { parts: vec![(0..7, "x.y")] },
+ /// ]
+ /// ```
+ pub substitutions: Vec<Substitution>,
+ pub msg: DiagnosticMessage,
+ /// Visual representation of this suggestion.
+ pub style: SuggestionStyle,
+ /// Whether or not the suggestion is approximate
+ ///
+ /// Sometimes we may show suggestions with placeholders,
+ /// which are useful for users but not useful for
+ /// tools like rustfix
+ pub applicability: Applicability,
+}
+
+#[derive(Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
+/// See the docs on `CodeSuggestion::substitutions`
+pub struct Substitution {
+ pub parts: Vec<SubstitutionPart>,
+}
+
+#[derive(Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
+pub struct SubstitutionPart {
+ pub span: Span,
+ pub snippet: String,
+}
+
+/// Used to translate between `Span`s and byte positions within a single output line in highlighted
+/// code of structured suggestions.
+#[derive(Debug, Clone, Copy)]
+pub struct SubstitutionHighlight {
+ start: usize,
+ end: usize,
+}
+
+impl SubstitutionPart {
+ pub fn is_addition(&self, sm: &SourceMap) -> bool {
+ !self.snippet.is_empty()
+ && sm
+ .span_to_snippet(self.span)
+ .map_or(self.span.is_empty(), |snippet| snippet.trim().is_empty())
+ }
+
+ pub fn is_deletion(&self) -> bool {
+ self.snippet.trim().is_empty()
+ }
+
+ pub fn is_replacement(&self, sm: &SourceMap) -> bool {
+ !self.snippet.is_empty()
+ && sm
+ .span_to_snippet(self.span)
+ .map_or(!self.span.is_empty(), |snippet| !snippet.trim().is_empty())
+ }
+}
+
+impl CodeSuggestion {
+ /// Returns the assembled code suggestions, whether they should be shown with an underline
+ /// and whether the substitution only differs in capitalization.
+ pub fn splice_lines(
+ &self,
+ sm: &SourceMap,
+ ) -> Vec<(String, Vec<SubstitutionPart>, Vec<Vec<SubstitutionHighlight>>, bool)> {
+ // For the `Vec<Vec<SubstitutionHighlight>>` value, the first level of the vector
+ // corresponds to the output snippet's lines, while the second level corresponds to the
+ // substrings within that line that should be highlighted.
+
+ use rustc_span::{CharPos, Pos};
+
+ /// Append to a buffer the remainder of the line of existing source code, and return the
+ /// count of lines that have been added for accurate highlighting.
+ fn push_trailing(
+ buf: &mut String,
+ line_opt: Option<&Cow<'_, str>>,
+ lo: &Loc,
+ hi_opt: Option<&Loc>,
+ ) -> usize {
+ let mut line_count = 0;
+ let (lo, hi_opt) = (lo.col.to_usize(), hi_opt.map(|hi| hi.col.to_usize()));
+ if let Some(line) = line_opt {
+ if let Some(lo) = line.char_indices().map(|(i, _)| i).nth(lo) {
+ let hi_opt = hi_opt.and_then(|hi| line.char_indices().map(|(i, _)| i).nth(hi));
+ match hi_opt {
+ Some(hi) if hi > lo => {
+ line_count = line[lo..hi].matches('\n').count();
+ buf.push_str(&line[lo..hi])
+ }
+ Some(_) => (),
+ None => {
+ line_count = line[lo..].matches('\n').count();
+ buf.push_str(&line[lo..])
+ }
+ }
+ }
+ if hi_opt.is_none() {
+ buf.push('\n');
+ }
+ }
+ line_count
+ }
+
+ assert!(!self.substitutions.is_empty());
+
+ self.substitutions
+ .iter()
+ .filter(|subst| {
+ // Suggestions coming from macros can have malformed spans. This is a heavy
+ // handed approach to avoid ICEs by ignoring the suggestion outright.
+ let invalid = subst.parts.iter().any(|item| sm.is_valid_span(item.span).is_err());
+ if invalid {
+ debug!("splice_lines: suggestion contains an invalid span: {:?}", subst);
+ }
+ !invalid
+ })
+ .cloned()
+ .filter_map(|mut substitution| {
+ // Assumption: all spans are in the same file, and all spans
+ // are disjoint. Sort in ascending order.
+ substitution.parts.sort_by_key(|part| part.span.lo());
+
+ // Find the bounding span.
+ let lo = substitution.parts.iter().map(|part| part.span.lo()).min()?;
+ let hi = substitution.parts.iter().map(|part| part.span.hi()).max()?;
+ let bounding_span = Span::with_root_ctxt(lo, hi);
+ // The different spans might belong to different contexts, if so ignore suggestion.
+ let lines = sm.span_to_lines(bounding_span).ok()?;
+ assert!(!lines.lines.is_empty() || bounding_span.is_dummy());
+
+ // We can't splice anything if the source is unavailable.
+ if !sm.ensure_source_file_source_present(lines.file.clone()) {
+ return None;
+ }
+
+ let mut highlights = vec![];
+ // To build up the result, we do this for each span:
+ // - push the line segment trailing the previous span
+ // (at the beginning a "phantom" span pointing at the start of the line)
+ // - push lines between the previous and current span (if any)
+ // - if the previous and current span are not on the same line
+ // push the line segment leading up to the current span
+ // - splice in the span substitution
+ //
+ // Finally push the trailing line segment of the last span
+ let sf = &lines.file;
+ let mut prev_hi = sm.lookup_char_pos(bounding_span.lo());
+ prev_hi.col = CharPos::from_usize(0);
+ let mut prev_line =
+ lines.lines.get(0).and_then(|line0| sf.get_line(line0.line_index));
+ let mut buf = String::new();
+
+ let mut line_highlight = vec![];
+ // We need to keep track of the difference between the existing code and the added
+ // or deleted code in order to point at the correct column *after* substitution.
+ let mut acc = 0;
+ for part in &substitution.parts {
+ let cur_lo = sm.lookup_char_pos(part.span.lo());
+ if prev_hi.line == cur_lo.line {
+ let mut count =
+ push_trailing(&mut buf, prev_line.as_ref(), &prev_hi, Some(&cur_lo));
+ while count > 0 {
+ highlights.push(std::mem::take(&mut line_highlight));
+ acc = 0;
+ count -= 1;
+ }
+ } else {
+ acc = 0;
+ highlights.push(std::mem::take(&mut line_highlight));
+ let mut count = push_trailing(&mut buf, prev_line.as_ref(), &prev_hi, None);
+ while count > 0 {
+ highlights.push(std::mem::take(&mut line_highlight));
+ count -= 1;
+ }
+ // push lines between the previous and current span (if any)
+ for idx in prev_hi.line..(cur_lo.line - 1) {
+ if let Some(line) = sf.get_line(idx) {
+ buf.push_str(line.as_ref());
+ buf.push('\n');
+ highlights.push(std::mem::take(&mut line_highlight));
+ }
+ }
+ if let Some(cur_line) = sf.get_line(cur_lo.line - 1) {
+ let end = match cur_line.char_indices().nth(cur_lo.col.to_usize()) {
+ Some((i, _)) => i,
+ None => cur_line.len(),
+ };
+ buf.push_str(&cur_line[..end]);
+ }
+ }
+ // Add a whole line highlight per line in the snippet.
+ let len: isize = part
+ .snippet
+ .split('\n')
+ .next()
+ .unwrap_or(&part.snippet)
+ .chars()
+ .map(|c| match c {
+ '\t' => 4,
+ _ => 1,
+ })
+ .sum();
+ line_highlight.push(SubstitutionHighlight {
+ start: (cur_lo.col.0 as isize + acc) as usize,
+ end: (cur_lo.col.0 as isize + acc + len) as usize,
+ });
+ buf.push_str(&part.snippet);
+ let cur_hi = sm.lookup_char_pos(part.span.hi());
+ if prev_hi.line == cur_lo.line && cur_hi.line == cur_lo.line {
+ // Account for the difference between the width of the current code and the
+ // snippet being suggested, so that the *later* suggestions are correctly
+ // aligned on the screen.
+ acc += len as isize - (cur_hi.col.0 - cur_lo.col.0) as isize;
+ }
+ prev_hi = cur_hi;
+ prev_line = sf.get_line(prev_hi.line - 1);
+ for line in part.snippet.split('\n').skip(1) {
+ acc = 0;
+ highlights.push(std::mem::take(&mut line_highlight));
+ let end: usize = line
+ .chars()
+ .map(|c| match c {
+ '\t' => 4,
+ _ => 1,
+ })
+ .sum();
+ line_highlight.push(SubstitutionHighlight { start: 0, end });
+ }
+ }
+ highlights.push(std::mem::take(&mut line_highlight));
+ let only_capitalization = is_case_difference(sm, &buf, bounding_span);
+ // if the replacement already ends with a newline, don't print the next line
+ if !buf.ends_with('\n') {
+ push_trailing(&mut buf, prev_line.as_ref(), &prev_hi, None);
+ }
+ // remove trailing newlines
+ while buf.ends_with('\n') {
+ buf.pop();
+ }
+ Some((buf, substitution.parts, highlights, only_capitalization))
+ })
+ .collect()
+ }
+}
+
+pub use rustc_span::fatal_error::{FatalError, FatalErrorMarker};
+
+/// Signifies that the compiler died with an explicit call to `.bug`
+/// or `.span_bug` rather than a failed assertion, etc.
+#[derive(Copy, Clone, Debug)]
+pub struct ExplicitBug;
+
+impl fmt::Display for ExplicitBug {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "parser internal bug")
+ }
+}
+
+impl error::Error for ExplicitBug {}
+
+pub use diagnostic::{
+ AddSubdiagnostic, DecorateLint, Diagnostic, DiagnosticArg, DiagnosticArgFromDisplay,
+ DiagnosticArgValue, DiagnosticId, DiagnosticStyledString, IntoDiagnosticArg, SubDiagnostic,
+};
+pub use diagnostic_builder::{DiagnosticBuilder, EmissionGuarantee, LintDiagnosticBuilder};
+use std::backtrace::Backtrace;
+
+/// A handler deals with errors and other compiler output.
+/// Certain errors (fatal, bug, unimpl) may cause immediate exit,
+/// others log errors for later reporting.
+pub struct Handler {
+ flags: HandlerFlags,
+ inner: Lock<HandlerInner>,
+}
+
+/// This inner struct exists to keep it all behind a single lock;
+/// this is done to prevent possible deadlocks in a multi-threaded compiler,
+/// as well as inconsistent state observation.
+struct HandlerInner {
+ flags: HandlerFlags,
+ /// The number of lint errors that have been emitted.
+ lint_err_count: usize,
+ /// The number of errors that have been emitted, including duplicates.
+ ///
+ /// This is not necessarily the count that's reported to the user once
+ /// compilation ends.
+ err_count: usize,
+ warn_count: usize,
+ deduplicated_err_count: usize,
+ emitter: Box<dyn Emitter + sync::Send>,
+ delayed_span_bugs: Vec<Diagnostic>,
+ delayed_good_path_bugs: Vec<DelayedDiagnostic>,
+ /// This flag indicates that an expected diagnostic was emitted and suppressed.
+ /// This is used for the `delayed_good_path_bugs` check.
+ suppressed_expected_diag: bool,
+
+ /// This set contains the `DiagnosticId` of all emitted diagnostics to avoid
+ /// emitting the same diagnostic with extended help (`--teach`) twice, which
+ /// would be unnecessary repetition.
+ taught_diagnostics: FxHashSet<DiagnosticId>,
+
+ /// Used to suggest rustc --explain <error code>
+ emitted_diagnostic_codes: FxHashSet<DiagnosticId>,
+
+ /// This set contains a hash of every diagnostic that has been emitted by
+ /// this handler. These hashes is used to avoid emitting the same error
+ /// twice.
+ emitted_diagnostics: FxHashSet<u128>,
+
+ /// Stashed diagnostics emitted in one stage of the compiler that may be
+ /// stolen by other stages (e.g. to improve them and add more information).
+ /// The stashed diagnostics count towards the total error count.
+ /// When `.abort_if_errors()` is called, these are also emitted.
+ stashed_diagnostics: FxIndexMap<(Span, StashKey), Diagnostic>,
+
+ /// The warning count, used for a recap upon finishing
+ deduplicated_warn_count: usize,
+
+ future_breakage_diagnostics: Vec<Diagnostic>,
+
+ /// The [`Self::unstable_expect_diagnostics`] should be empty when this struct is
+ /// dropped. However, it can have values if the compilation is stopped early
+ /// or is only partially executed. To avoid ICEs, like in rust#94953 we only
+ /// check if [`Self::unstable_expect_diagnostics`] is empty, if the expectation ids
+ /// have been converted.
+ check_unstable_expect_diagnostics: bool,
+
+ /// Expected [`Diagnostic`]s store a [`LintExpectationId`] as part of
+ /// the lint level. [`LintExpectationId`]s created early during the compilation
+ /// (before `HirId`s have been defined) are not stable and can therefore not be
+ /// stored on disk. This buffer stores these diagnostics until the ID has been
+ /// replaced by a stable [`LintExpectationId`]. The [`Diagnostic`]s are the
+ /// submitted for storage and added to the list of fulfilled expectations.
+ unstable_expect_diagnostics: Vec<Diagnostic>,
+
+ /// expected diagnostic will have the level `Expect` which additionally
+ /// carries the [`LintExpectationId`] of the expectation that can be
+ /// marked as fulfilled. This is a collection of all [`LintExpectationId`]s
+ /// that have been marked as fulfilled this way.
+ ///
+ /// [RFC-2383]: https://rust-lang.github.io/rfcs/2383-lint-reasons.html
+ fulfilled_expectations: FxHashSet<LintExpectationId>,
+}
+
+/// A key denoting where from a diagnostic was stashed.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub enum StashKey {
+ ItemNoType,
+}
+
+fn default_track_diagnostic(_: &Diagnostic) {}
+
+pub static TRACK_DIAGNOSTICS: AtomicRef<fn(&Diagnostic)> =
+ AtomicRef::new(&(default_track_diagnostic as fn(&_)));
+
+#[derive(Copy, Clone, Default)]
+pub struct HandlerFlags {
+ /// If false, warning-level lints are suppressed.
+ /// (rustc: see `--allow warnings` and `--cap-lints`)
+ pub can_emit_warnings: bool,
+ /// If true, error-level diagnostics are upgraded to bug-level.
+ /// (rustc: see `-Z treat-err-as-bug`)
+ pub treat_err_as_bug: Option<NonZeroUsize>,
+ /// If true, immediately emit diagnostics that would otherwise be buffered.
+ /// (rustc: see `-Z dont-buffer-diagnostics` and `-Z treat-err-as-bug`)
+ pub dont_buffer_diagnostics: bool,
+ /// If true, immediately print bugs registered with `delay_span_bug`.
+ /// (rustc: see `-Z report-delayed-bugs`)
+ pub report_delayed_bugs: bool,
+ /// Show macro backtraces.
+ /// (rustc: see `-Z macro-backtrace`)
+ pub macro_backtrace: bool,
+ /// If true, identical diagnostics are reported only once.
+ pub deduplicate_diagnostics: bool,
+}
+
+impl Drop for HandlerInner {
+ fn drop(&mut self) {
+ self.emit_stashed_diagnostics();
+
+ if !self.has_errors() {
+ let bugs = std::mem::replace(&mut self.delayed_span_bugs, Vec::new());
+ self.flush_delayed(bugs, "no errors encountered even though `delay_span_bug` issued");
+ }
+
+ // FIXME(eddyb) this explains what `delayed_good_path_bugs` are!
+ // They're `delayed_span_bugs` but for "require some diagnostic happened"
+ // instead of "require some error happened". Sadly that isn't ideal, as
+ // lints can be `#[allow]`'d, potentially leading to this triggering.
+ // Also, "good path" should be replaced with a better naming.
+ if !self.has_any_message() && !self.suppressed_expected_diag {
+ let bugs = std::mem::replace(&mut self.delayed_good_path_bugs, Vec::new());
+ self.flush_delayed(
+ bugs.into_iter().map(DelayedDiagnostic::decorate),
+ "no warnings or errors encountered even though `delayed_good_path_bugs` issued",
+ );
+ }
+
+ if self.check_unstable_expect_diagnostics {
+ assert!(
+ self.unstable_expect_diagnostics.is_empty(),
+ "all diagnostics with unstable expectations should have been converted",
+ );
+ }
+ }
+}
+
+impl Handler {
+ pub fn with_tty_emitter(
+ color_config: ColorConfig,
+ can_emit_warnings: bool,
+ treat_err_as_bug: Option<NonZeroUsize>,
+ sm: Option<Lrc<SourceMap>>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ ) -> Self {
+ Self::with_tty_emitter_and_flags(
+ color_config,
+ sm,
+ fluent_bundle,
+ fallback_bundle,
+ HandlerFlags { can_emit_warnings, treat_err_as_bug, ..Default::default() },
+ )
+ }
+
+ pub fn with_tty_emitter_and_flags(
+ color_config: ColorConfig,
+ sm: Option<Lrc<SourceMap>>,
+ fluent_bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ flags: HandlerFlags,
+ ) -> Self {
+ let emitter = Box::new(EmitterWriter::stderr(
+ color_config,
+ sm,
+ fluent_bundle,
+ fallback_bundle,
+ false,
+ false,
+ None,
+ flags.macro_backtrace,
+ ));
+ Self::with_emitter_and_flags(emitter, flags)
+ }
+
+ pub fn with_emitter(
+ can_emit_warnings: bool,
+ treat_err_as_bug: Option<NonZeroUsize>,
+ emitter: Box<dyn Emitter + sync::Send>,
+ ) -> Self {
+ Handler::with_emitter_and_flags(
+ emitter,
+ HandlerFlags { can_emit_warnings, treat_err_as_bug, ..Default::default() },
+ )
+ }
+
+ pub fn with_emitter_and_flags(
+ emitter: Box<dyn Emitter + sync::Send>,
+ flags: HandlerFlags,
+ ) -> Self {
+ Self {
+ flags,
+ inner: Lock::new(HandlerInner {
+ flags,
+ lint_err_count: 0,
+ err_count: 0,
+ warn_count: 0,
+ deduplicated_err_count: 0,
+ deduplicated_warn_count: 0,
+ emitter,
+ delayed_span_bugs: Vec::new(),
+ delayed_good_path_bugs: Vec::new(),
+ suppressed_expected_diag: false,
+ taught_diagnostics: Default::default(),
+ emitted_diagnostic_codes: Default::default(),
+ emitted_diagnostics: Default::default(),
+ stashed_diagnostics: Default::default(),
+ future_breakage_diagnostics: Vec::new(),
+ check_unstable_expect_diagnostics: false,
+ unstable_expect_diagnostics: Vec::new(),
+ fulfilled_expectations: Default::default(),
+ }),
+ }
+ }
+
+ // This is here to not allow mutation of flags;
+ // as of this writing it's only used in tests in librustc_middle.
+ pub fn can_emit_warnings(&self) -> bool {
+ self.flags.can_emit_warnings
+ }
+
+ /// Resets the diagnostic error count as well as the cached emitted diagnostics.
+ ///
+ /// NOTE: *do not* call this function from rustc. It is only meant to be called from external
+ /// tools that want to reuse a `Parser` cleaning the previously emitted diagnostics as well as
+ /// the overall count of emitted error diagnostics.
+ pub fn reset_err_count(&self) {
+ let mut inner = self.inner.borrow_mut();
+ inner.err_count = 0;
+ inner.warn_count = 0;
+ inner.deduplicated_err_count = 0;
+ inner.deduplicated_warn_count = 0;
+
+ // actually free the underlying memory (which `clear` would not do)
+ inner.delayed_span_bugs = Default::default();
+ inner.delayed_good_path_bugs = Default::default();
+ inner.taught_diagnostics = Default::default();
+ inner.emitted_diagnostic_codes = Default::default();
+ inner.emitted_diagnostics = Default::default();
+ inner.stashed_diagnostics = Default::default();
+ }
+
+ /// Stash a given diagnostic with the given `Span` and `StashKey` as the key for later stealing.
+ pub fn stash_diagnostic(&self, span: Span, key: StashKey, diag: Diagnostic) {
+ let mut inner = self.inner.borrow_mut();
+ // FIXME(Centril, #69537): Consider reintroducing panic on overwriting a stashed diagnostic
+ // if/when we have a more robust macro-friendly replacement for `(span, key)` as a key.
+ // See the PR for a discussion.
+ inner.stashed_diagnostics.insert((span, key), diag);
+ }
+
+ /// Steal a previously stashed diagnostic with the given `Span` and `StashKey` as the key.
+ pub fn steal_diagnostic(&self, span: Span, key: StashKey) -> Option<DiagnosticBuilder<'_, ()>> {
+ self.inner
+ .borrow_mut()
+ .stashed_diagnostics
+ .remove(&(span, key))
+ .map(|diag| DiagnosticBuilder::new_diagnostic(self, diag))
+ }
+
+ /// Emit all stashed diagnostics.
+ pub fn emit_stashed_diagnostics(&self) -> Option<ErrorGuaranteed> {
+ self.inner.borrow_mut().emit_stashed_diagnostics()
+ }
+
+ /// Construct a builder at the `Warning` level at the given `span` and with the `msg`.
+ ///
+ /// Attempting to `.emit()` the builder will only emit if either:
+ /// * `can_emit_warnings` is `true`
+ /// * `is_force_warn` was set in `DiagnosticId::Lint`
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_warn(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ()> {
+ let mut result = self.struct_warn(msg);
+ result.set_span(span);
+ result
+ }
+
+ /// Construct a builder at the `Warning` level at the given `span` and with the `msg`.
+ /// The `id` is used for lint emissions which should also fulfill a lint expectation.
+ ///
+ /// Attempting to `.emit()` the builder will only emit if either:
+ /// * `can_emit_warnings` is `true`
+ /// * `is_force_warn` was set in `DiagnosticId::Lint`
+ pub fn struct_span_warn_with_expectation(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ id: LintExpectationId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ let mut result = self.struct_warn_with_expectation(msg, id);
+ result.set_span(span);
+ result
+ }
+
+ /// Construct a builder at the `Allow` level at the given `span` and with the `msg`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_allow(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ()> {
+ let mut result = self.struct_allow(msg);
+ result.set_span(span);
+ result
+ }
+
+ /// Construct a builder at the `Warning` level at the given `span` and with the `msg`.
+ /// Also include a code.
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_warn_with_code(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ let mut result = self.struct_span_warn(span, msg);
+ result.code(code);
+ result
+ }
+
+ /// Construct a builder at the `Warning` level with the `msg`.
+ ///
+ /// Attempting to `.emit()` the builder will only emit if either:
+ /// * `can_emit_warnings` is `true`
+ /// * `is_force_warn` was set in `DiagnosticId::Lint`
+ #[rustc_lint_diagnostics]
+ pub fn struct_warn(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
+ DiagnosticBuilder::new(self, Level::Warning(None), msg)
+ }
+
+ /// Construct a builder at the `Warning` level with the `msg`. The `id` is used for
+ /// lint emissions which should also fulfill a lint expectation.
+ ///
+ /// Attempting to `.emit()` the builder will only emit if either:
+ /// * `can_emit_warnings` is `true`
+ /// * `is_force_warn` was set in `DiagnosticId::Lint`
+ pub fn struct_warn_with_expectation(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ id: LintExpectationId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ DiagnosticBuilder::new(self, Level::Warning(Some(id)), msg)
+ }
+
+ /// Construct a builder at the `Allow` level with the `msg`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_allow(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
+ DiagnosticBuilder::new(self, Level::Allow, msg)
+ }
+
+ /// Construct a builder at the `Expect` level with the `msg`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_expect(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ id: LintExpectationId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ DiagnosticBuilder::new(self, Level::Expect(id), msg)
+ }
+
+ /// Construct a builder at the `Error` level at the given `span` and with the `msg`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_err(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut result = self.struct_err(msg);
+ result.set_span(span);
+ result
+ }
+
+ /// Construct a builder at the `Error` level at the given `span`, with the `msg`, and `code`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_err_with_code(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut result = self.struct_span_err(span, msg);
+ result.code(code);
+ result
+ }
+
+ /// Construct a builder at the `Error` level with the `msg`.
+ // FIXME: This method should be removed (every error should have an associated error code).
+ #[rustc_lint_diagnostics]
+ pub fn struct_err(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ DiagnosticBuilder::new_guaranteeing_error::<_, { Level::Error { lint: false } }>(self, msg)
+ }
+
+ /// This should only be used by `rustc_middle::lint::struct_lint_level`. Do not use it for hard errors.
+ #[doc(hidden)]
+ pub fn struct_err_lint(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
+ DiagnosticBuilder::new(self, Level::Error { lint: true }, msg)
+ }
+
+ /// Construct a builder at the `Error` level with the `msg` and the `code`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_err_with_code(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut result = self.struct_err(msg);
+ result.code(code);
+ result
+ }
+
+ /// Construct a builder at the `Warn` level with the `msg` and the `code`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_warn_with_code(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ let mut result = self.struct_warn(msg);
+ result.code(code);
+ result
+ }
+
+ /// Construct a builder at the `Fatal` level at the given `span` and with the `msg`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_fatal(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, !> {
+ let mut result = self.struct_fatal(msg);
+ result.set_span(span);
+ result
+ }
+
+ /// Construct a builder at the `Fatal` level at the given `span`, with the `msg`, and `code`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_fatal_with_code(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, !> {
+ let mut result = self.struct_span_fatal(span, msg);
+ result.code(code);
+ result
+ }
+
+ /// Construct a builder at the `Error` level with the `msg`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_fatal(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, !> {
+ DiagnosticBuilder::new_fatal(self, msg)
+ }
+
+ /// Construct a builder at the `Help` level with the `msg`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_help(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
+ DiagnosticBuilder::new(self, Level::Help, msg)
+ }
+
+ /// Construct a builder at the `Note` level with the `msg`.
+ #[rustc_lint_diagnostics]
+ pub fn struct_note_without_error(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ()> {
+ DiagnosticBuilder::new(self, Level::Note, msg)
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn span_fatal(&self, span: impl Into<MultiSpan>, msg: impl Into<DiagnosticMessage>) -> ! {
+ self.emit_diag_at_span(Diagnostic::new(Fatal, msg), span);
+ FatalError.raise()
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn span_fatal_with_code(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> ! {
+ self.emit_diag_at_span(Diagnostic::new_with_code(Fatal, Some(code), msg), span);
+ FatalError.raise()
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn span_err(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> ErrorGuaranteed {
+ self.emit_diag_at_span(Diagnostic::new(Error { lint: false }, msg), span).unwrap()
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn span_err_with_code(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) {
+ self.emit_diag_at_span(
+ Diagnostic::new_with_code(Error { lint: false }, Some(code), msg),
+ span,
+ );
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn span_warn(&self, span: impl Into<MultiSpan>, msg: impl Into<DiagnosticMessage>) {
+ self.emit_diag_at_span(Diagnostic::new(Warning(None), msg), span);
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn span_warn_with_code(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) {
+ self.emit_diag_at_span(Diagnostic::new_with_code(Warning(None), Some(code), msg), span);
+ }
+
+ pub fn span_bug(&self, span: impl Into<MultiSpan>, msg: impl Into<DiagnosticMessage>) -> ! {
+ self.inner.borrow_mut().span_bug(span, msg)
+ }
+
+ #[track_caller]
+ pub fn delay_span_bug(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> ErrorGuaranteed {
+ self.inner.borrow_mut().delay_span_bug(span, msg)
+ }
+
+ // FIXME(eddyb) note the comment inside `impl Drop for HandlerInner`, that's
+ // where the explanation of what "good path" is (also, it should be renamed).
+ pub fn delay_good_path_bug(&self, msg: impl Into<DiagnosticMessage>) {
+ self.inner.borrow_mut().delay_good_path_bug(msg)
+ }
+
+ pub fn span_bug_no_panic(&self, span: impl Into<MultiSpan>, msg: impl Into<DiagnosticMessage>) {
+ self.emit_diag_at_span(Diagnostic::new(Bug, msg), span);
+ }
+
+ pub fn span_note_without_error(
+ &self,
+ span: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ ) {
+ self.emit_diag_at_span(Diagnostic::new(Note, msg), span);
+ }
+
+ pub fn span_note_diag(
+ &self,
+ span: Span,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ()> {
+ let mut db = DiagnosticBuilder::new(self, Note, msg);
+ db.set_span(span);
+ db
+ }
+
+ // NOTE: intentionally doesn't raise an error so rustc_codegen_ssa only reports fatal errors in the main thread
+ pub fn fatal(&self, msg: impl Into<DiagnosticMessage>) -> FatalError {
+ self.inner.borrow_mut().fatal(msg)
+ }
+
+ pub fn err(&self, msg: impl Into<DiagnosticMessage>) -> ErrorGuaranteed {
+ self.inner.borrow_mut().err(msg)
+ }
+
+ pub fn warn(&self, msg: impl Into<DiagnosticMessage>) {
+ let mut db = DiagnosticBuilder::new(self, Warning(None), msg);
+ db.emit();
+ }
+
+ pub fn note_without_error(&self, msg: impl Into<DiagnosticMessage>) {
+ DiagnosticBuilder::new(self, Note, msg).emit();
+ }
+
+ pub fn bug(&self, msg: impl Into<DiagnosticMessage>) -> ! {
+ self.inner.borrow_mut().bug(msg)
+ }
+
+ #[inline]
+ pub fn err_count(&self) -> usize {
+ self.inner.borrow().err_count()
+ }
+
+ pub fn has_errors(&self) -> Option<ErrorGuaranteed> {
+ if self.inner.borrow().has_errors() { Some(ErrorGuaranteed(())) } else { None }
+ }
+ pub fn has_errors_or_lint_errors(&self) -> Option<ErrorGuaranteed> {
+ if self.inner.borrow().has_errors_or_lint_errors() {
+ Some(ErrorGuaranteed(()))
+ } else {
+ None
+ }
+ }
+ pub fn has_errors_or_delayed_span_bugs(&self) -> bool {
+ self.inner.borrow().has_errors_or_delayed_span_bugs()
+ }
+
+ pub fn print_error_count(&self, registry: &Registry) {
+ self.inner.borrow_mut().print_error_count(registry)
+ }
+
+ pub fn take_future_breakage_diagnostics(&self) -> Vec<Diagnostic> {
+ std::mem::take(&mut self.inner.borrow_mut().future_breakage_diagnostics)
+ }
+
+ pub fn abort_if_errors(&self) {
+ self.inner.borrow_mut().abort_if_errors()
+ }
+
+ /// `true` if we haven't taught a diagnostic with this code already.
+ /// The caller must then teach the user about such a diagnostic.
+ ///
+ /// Used to suppress emitting the same error multiple times with extended explanation when
+ /// calling `-Zteach`.
+ pub fn must_teach(&self, code: &DiagnosticId) -> bool {
+ self.inner.borrow_mut().must_teach(code)
+ }
+
+ pub fn force_print_diagnostic(&self, db: Diagnostic) {
+ self.inner.borrow_mut().force_print_diagnostic(db)
+ }
+
+ pub fn emit_diagnostic(&self, diagnostic: &mut Diagnostic) -> Option<ErrorGuaranteed> {
+ self.inner.borrow_mut().emit_diagnostic(diagnostic)
+ }
+
+ fn emit_diag_at_span(
+ &self,
+ mut diag: Diagnostic,
+ sp: impl Into<MultiSpan>,
+ ) -> Option<ErrorGuaranteed> {
+ let mut inner = self.inner.borrow_mut();
+ inner.emit_diagnostic(diag.set_span(sp))
+ }
+
+ pub fn emit_artifact_notification(&self, path: &Path, artifact_type: &str) {
+ self.inner.borrow_mut().emit_artifact_notification(path, artifact_type)
+ }
+
+ pub fn emit_future_breakage_report(&self, diags: Vec<Diagnostic>) {
+ self.inner.borrow_mut().emitter.emit_future_breakage_report(diags)
+ }
+
+ pub fn emit_unused_externs(
+ &self,
+ lint_level: rustc_lint_defs::Level,
+ loud: bool,
+ unused_externs: &[&str],
+ ) {
+ let mut inner = self.inner.borrow_mut();
+
+ if loud && lint_level.is_error() {
+ inner.bump_err_count();
+ }
+
+ inner.emit_unused_externs(lint_level, unused_externs)
+ }
+
+ pub fn update_unstable_expectation_id(
+ &self,
+ unstable_to_stable: &FxHashMap<LintExpectationId, LintExpectationId>,
+ ) {
+ let mut inner = self.inner.borrow_mut();
+ let diags = std::mem::take(&mut inner.unstable_expect_diagnostics);
+ inner.check_unstable_expect_diagnostics = true;
+
+ if !diags.is_empty() {
+ inner.suppressed_expected_diag = true;
+ for mut diag in diags.into_iter() {
+ diag.update_unstable_expectation_id(unstable_to_stable);
+
+ // Here the diagnostic is given back to `emit_diagnostic` where it was first
+ // intercepted. Now it should be processed as usual, since the unstable expectation
+ // id is now stable.
+ inner.emit_diagnostic(&mut diag);
+ }
+ }
+
+ inner
+ .stashed_diagnostics
+ .values_mut()
+ .for_each(|diag| diag.update_unstable_expectation_id(unstable_to_stable));
+ inner
+ .future_breakage_diagnostics
+ .iter_mut()
+ .for_each(|diag| diag.update_unstable_expectation_id(unstable_to_stable));
+ }
+
+ /// This methods steals all [`LintExpectationId`]s that are stored inside
+ /// [`HandlerInner`] and indicate that the linked expectation has been fulfilled.
+ #[must_use]
+ pub fn steal_fulfilled_expectation_ids(&self) -> FxHashSet<LintExpectationId> {
+ assert!(
+ self.inner.borrow().unstable_expect_diagnostics.is_empty(),
+ "`HandlerInner::unstable_expect_diagnostics` should be empty at this point",
+ );
+ std::mem::take(&mut self.inner.borrow_mut().fulfilled_expectations)
+ }
+}
+
+impl HandlerInner {
+ fn must_teach(&mut self, code: &DiagnosticId) -> bool {
+ self.taught_diagnostics.insert(code.clone())
+ }
+
+ fn force_print_diagnostic(&mut self, mut db: Diagnostic) {
+ self.emitter.emit_diagnostic(&mut db);
+ }
+
+ /// Emit all stashed diagnostics.
+ fn emit_stashed_diagnostics(&mut self) -> Option<ErrorGuaranteed> {
+ let diags = self.stashed_diagnostics.drain(..).map(|x| x.1).collect::<Vec<_>>();
+ let mut reported = None;
+ for mut diag in diags {
+ if diag.is_error() {
+ reported = Some(ErrorGuaranteed(()));
+ }
+ self.emit_diagnostic(&mut diag);
+ }
+ reported
+ }
+
+ // FIXME(eddyb) this should ideally take `diagnostic` by value.
+ fn emit_diagnostic(&mut self, diagnostic: &mut Diagnostic) -> Option<ErrorGuaranteed> {
+ // The `LintExpectationId` can be stable or unstable depending on when it was created.
+ // Diagnostics created before the definition of `HirId`s are unstable and can not yet
+ // be stored. Instead, they are buffered until the `LintExpectationId` is replaced by
+ // a stable one by the `LintLevelsBuilder`.
+ if let Some(LintExpectationId::Unstable { .. }) = diagnostic.level.get_expectation_id() {
+ self.unstable_expect_diagnostics.push(diagnostic.clone());
+ return None;
+ }
+
+ if diagnostic.level == Level::DelayedBug {
+ // FIXME(eddyb) this should check for `has_errors` and stop pushing
+ // once *any* errors were emitted (and truncate `delayed_span_bugs`
+ // when an error is first emitted, also), but maybe there's a case
+ // in which that's not sound? otherwise this is really inefficient.
+ self.delayed_span_bugs.push(diagnostic.clone());
+
+ if !self.flags.report_delayed_bugs {
+ return Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
+ }
+ }
+
+ if diagnostic.has_future_breakage() {
+ self.future_breakage_diagnostics.push(diagnostic.clone());
+ }
+
+ if let Some(expectation_id) = diagnostic.level.get_expectation_id() {
+ self.suppressed_expected_diag = true;
+ self.fulfilled_expectations.insert(expectation_id);
+ }
+
+ if matches!(diagnostic.level, Warning(_))
+ && !self.flags.can_emit_warnings
+ && !diagnostic.is_force_warn()
+ {
+ if diagnostic.has_future_breakage() {
+ (*TRACK_DIAGNOSTICS)(diagnostic);
+ }
+ return None;
+ }
+
+ (*TRACK_DIAGNOSTICS)(diagnostic);
+
+ if matches!(diagnostic.level, Level::Expect(_) | Level::Allow) {
+ return None;
+ }
+
+ if let Some(ref code) = diagnostic.code {
+ self.emitted_diagnostic_codes.insert(code.clone());
+ }
+
+ let already_emitted = |this: &mut Self| {
+ let mut hasher = StableHasher::new();
+ diagnostic.hash(&mut hasher);
+ let diagnostic_hash = hasher.finish();
+ !this.emitted_diagnostics.insert(diagnostic_hash)
+ };
+
+ // Only emit the diagnostic if we've been asked to deduplicate or
+ // haven't already emitted an equivalent diagnostic.
+ if !(self.flags.deduplicate_diagnostics && already_emitted(self)) {
+ debug!(?diagnostic);
+ debug!(?self.emitted_diagnostics);
+ let already_emitted_sub = |sub: &mut SubDiagnostic| {
+ debug!(?sub);
+ if sub.level != Level::OnceNote {
+ return false;
+ }
+ let mut hasher = StableHasher::new();
+ sub.hash(&mut hasher);
+ let diagnostic_hash = hasher.finish();
+ debug!(?diagnostic_hash);
+ !self.emitted_diagnostics.insert(diagnostic_hash)
+ };
+
+ diagnostic.children.drain_filter(already_emitted_sub).for_each(|_| {});
+
+ self.emitter.emit_diagnostic(&diagnostic);
+ if diagnostic.is_error() {
+ self.deduplicated_err_count += 1;
+ } else if let Warning(_) = diagnostic.level {
+ self.deduplicated_warn_count += 1;
+ }
+ }
+ if diagnostic.is_error() {
+ if matches!(diagnostic.level, Level::Error { lint: true }) {
+ self.bump_lint_err_count();
+ } else {
+ self.bump_err_count();
+ }
+
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted())
+ } else {
+ self.bump_warn_count();
+
+ None
+ }
+ }
+
+ fn emit_artifact_notification(&mut self, path: &Path, artifact_type: &str) {
+ self.emitter.emit_artifact_notification(path, artifact_type);
+ }
+
+ fn emit_unused_externs(&mut self, lint_level: rustc_lint_defs::Level, unused_externs: &[&str]) {
+ self.emitter.emit_unused_externs(lint_level, unused_externs);
+ }
+
+ fn treat_err_as_bug(&self) -> bool {
+ self.flags
+ .treat_err_as_bug
+ .map_or(false, |c| self.err_count() + self.lint_err_count >= c.get())
+ }
+
+ fn print_error_count(&mut self, registry: &Registry) {
+ self.emit_stashed_diagnostics();
+
+ let warnings = match self.deduplicated_warn_count {
+ 0 => String::new(),
+ 1 => "1 warning emitted".to_string(),
+ count => format!("{count} warnings emitted"),
+ };
+ let errors = match self.deduplicated_err_count {
+ 0 => String::new(),
+ 1 => "aborting due to previous error".to_string(),
+ count => format!("aborting due to {count} previous errors"),
+ };
+ if self.treat_err_as_bug() {
+ return;
+ }
+
+ match (errors.len(), warnings.len()) {
+ (0, 0) => return,
+ (0, _) => self.emitter.emit_diagnostic(&Diagnostic::new(
+ Level::Warning(None),
+ DiagnosticMessage::Str(warnings),
+ )),
+ (_, 0) => {
+ let _ = self.fatal(&errors);
+ }
+ (_, _) => {
+ let _ = self.fatal(&format!("{}; {}", &errors, &warnings));
+ }
+ }
+
+ let can_show_explain = self.emitter.should_show_explain();
+ let are_there_diagnostics = !self.emitted_diagnostic_codes.is_empty();
+ if can_show_explain && are_there_diagnostics {
+ let mut error_codes = self
+ .emitted_diagnostic_codes
+ .iter()
+ .filter_map(|x| match &x {
+ DiagnosticId::Error(s)
+ if registry.try_find_description(s).map_or(false, |o| o.is_some()) =>
+ {
+ Some(s.clone())
+ }
+ _ => None,
+ })
+ .collect::<Vec<_>>();
+ if !error_codes.is_empty() {
+ error_codes.sort();
+ if error_codes.len() > 1 {
+ let limit = if error_codes.len() > 9 { 9 } else { error_codes.len() };
+ self.failure(&format!(
+ "Some errors have detailed explanations: {}{}",
+ error_codes[..limit].join(", "),
+ if error_codes.len() > 9 { "..." } else { "." }
+ ));
+ self.failure(&format!(
+ "For more information about an error, try \
+ `rustc --explain {}`.",
+ &error_codes[0]
+ ));
+ } else {
+ self.failure(&format!(
+ "For more information about this error, try \
+ `rustc --explain {}`.",
+ &error_codes[0]
+ ));
+ }
+ }
+ }
+ }
+
+ #[inline]
+ fn err_count(&self) -> usize {
+ self.err_count + self.stashed_diagnostics.len()
+ }
+
+ fn has_errors(&self) -> bool {
+ self.err_count() > 0
+ }
+ fn has_errors_or_lint_errors(&self) -> bool {
+ self.has_errors() || self.lint_err_count > 0
+ }
+ fn has_errors_or_delayed_span_bugs(&self) -> bool {
+ self.has_errors() || !self.delayed_span_bugs.is_empty()
+ }
+ fn has_any_message(&self) -> bool {
+ self.err_count() > 0 || self.lint_err_count > 0 || self.warn_count > 0
+ }
+
+ fn abort_if_errors(&mut self) {
+ self.emit_stashed_diagnostics();
+
+ if self.has_errors() {
+ FatalError.raise();
+ }
+ }
+
+ fn span_bug(&mut self, sp: impl Into<MultiSpan>, msg: impl Into<DiagnosticMessage>) -> ! {
+ self.emit_diag_at_span(Diagnostic::new(Bug, msg), sp);
+ panic::panic_any(ExplicitBug);
+ }
+
+ fn emit_diag_at_span(&mut self, mut diag: Diagnostic, sp: impl Into<MultiSpan>) {
+ self.emit_diagnostic(diag.set_span(sp));
+ }
+
+ #[track_caller]
+ fn delay_span_bug(
+ &mut self,
+ sp: impl Into<MultiSpan>,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> ErrorGuaranteed {
+ // This is technically `self.treat_err_as_bug()` but `delay_span_bug` is called before
+ // incrementing `err_count` by one, so we need to +1 the comparing.
+ // FIXME: Would be nice to increment err_count in a more coherent way.
+ if self.flags.treat_err_as_bug.map_or(false, |c| self.err_count() + 1 >= c.get()) {
+ // FIXME: don't abort here if report_delayed_bugs is off
+ self.span_bug(sp, msg);
+ }
+ let mut diagnostic = Diagnostic::new(Level::DelayedBug, msg);
+ diagnostic.set_span(sp.into());
+ diagnostic.note(&format!("delayed at {}", std::panic::Location::caller()));
+ self.emit_diagnostic(&mut diagnostic).unwrap()
+ }
+
+ // FIXME(eddyb) note the comment inside `impl Drop for HandlerInner`, that's
+ // where the explanation of what "good path" is (also, it should be renamed).
+ fn delay_good_path_bug(&mut self, msg: impl Into<DiagnosticMessage>) {
+ let mut diagnostic = Diagnostic::new(Level::DelayedBug, msg);
+ if self.flags.report_delayed_bugs {
+ self.emit_diagnostic(&mut diagnostic);
+ }
+ let backtrace = std::backtrace::Backtrace::force_capture();
+ self.delayed_good_path_bugs.push(DelayedDiagnostic::with_backtrace(diagnostic, backtrace));
+ }
+
+ fn failure(&mut self, msg: impl Into<DiagnosticMessage>) {
+ self.emit_diagnostic(&mut Diagnostic::new(FailureNote, msg));
+ }
+
+ fn fatal(&mut self, msg: impl Into<DiagnosticMessage>) -> FatalError {
+ self.emit(Fatal, msg);
+ FatalError
+ }
+
+ fn err(&mut self, msg: impl Into<DiagnosticMessage>) -> ErrorGuaranteed {
+ self.emit(Error { lint: false }, msg)
+ }
+
+ /// Emit an error; level should be `Error` or `Fatal`.
+ fn emit(&mut self, level: Level, msg: impl Into<DiagnosticMessage>) -> ErrorGuaranteed {
+ if self.treat_err_as_bug() {
+ self.bug(msg);
+ }
+ self.emit_diagnostic(&mut Diagnostic::new(level, msg)).unwrap()
+ }
+
+ fn bug(&mut self, msg: impl Into<DiagnosticMessage>) -> ! {
+ self.emit_diagnostic(&mut Diagnostic::new(Bug, msg));
+ panic::panic_any(ExplicitBug);
+ }
+
+ fn flush_delayed(
+ &mut self,
+ bugs: impl IntoIterator<Item = Diagnostic>,
+ explanation: impl Into<DiagnosticMessage> + Copy,
+ ) {
+ let mut no_bugs = true;
+ for mut bug in bugs {
+ if no_bugs {
+ // Put the overall explanation before the `DelayedBug`s, to
+ // frame them better (e.g. separate warnings from them).
+ self.emit_diagnostic(&mut Diagnostic::new(Bug, explanation));
+ no_bugs = false;
+ }
+
+ // "Undelay" the `DelayedBug`s (into plain `Bug`s).
+ if bug.level != Level::DelayedBug {
+ // NOTE(eddyb) not panicking here because we're already producing
+ // an ICE, and the more information the merrier.
+ bug.note(&format!(
+ "`flushed_delayed` got diagnostic with level {:?}, \
+ instead of the expected `DelayedBug`",
+ bug.level,
+ ));
+ }
+ bug.level = Level::Bug;
+
+ self.emit_diagnostic(&mut bug);
+ }
+
+ // Panic with `ExplicitBug` to avoid "unexpected panic" messages.
+ if !no_bugs {
+ panic::panic_any(ExplicitBug);
+ }
+ }
+
+ fn bump_lint_err_count(&mut self) {
+ self.lint_err_count += 1;
+ self.panic_if_treat_err_as_bug();
+ }
+
+ fn bump_err_count(&mut self) {
+ self.err_count += 1;
+ self.panic_if_treat_err_as_bug();
+ }
+
+ fn bump_warn_count(&mut self) {
+ self.warn_count += 1;
+ }
+
+ fn panic_if_treat_err_as_bug(&self) {
+ if self.treat_err_as_bug() {
+ match (
+ self.err_count() + self.lint_err_count,
+ self.flags.treat_err_as_bug.map(|c| c.get()).unwrap_or(0),
+ ) {
+ (1, 1) => panic!("aborting due to `-Z treat-err-as-bug=1`"),
+ (0 | 1, _) => {}
+ (count, as_bug) => panic!(
+ "aborting after {} errors due to `-Z treat-err-as-bug={}`",
+ count, as_bug,
+ ),
+ }
+ }
+ }
+}
+
+struct DelayedDiagnostic {
+ inner: Diagnostic,
+ note: Backtrace,
+}
+
+impl DelayedDiagnostic {
+ fn with_backtrace(diagnostic: Diagnostic, backtrace: Backtrace) -> Self {
+ DelayedDiagnostic { inner: diagnostic, note: backtrace }
+ }
+
+ fn decorate(mut self) -> Diagnostic {
+ self.inner.note(&format!("delayed at {}", self.note));
+ self.inner
+ }
+}
+
+#[derive(Copy, PartialEq, Eq, Clone, Hash, Debug, Encodable, Decodable)]
+pub enum Level {
+ Bug,
+ DelayedBug,
+ Fatal,
+ Error {
+ /// If this error comes from a lint, don't abort compilation even when abort_if_errors() is called.
+ lint: bool,
+ },
+ /// This [`LintExpectationId`] is used for expected lint diagnostics, which should
+ /// also emit a warning due to the `force-warn` flag. In all other cases this should
+ /// be `None`.
+ Warning(Option<LintExpectationId>),
+ Note,
+ /// A note that is only emitted once.
+ OnceNote,
+ Help,
+ FailureNote,
+ Allow,
+ Expect(LintExpectationId),
+}
+
+impl fmt::Display for Level {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.to_str().fmt(f)
+ }
+}
+
+impl Level {
+ fn color(self) -> ColorSpec {
+ let mut spec = ColorSpec::new();
+ match self {
+ Bug | DelayedBug | Fatal | Error { .. } => {
+ spec.set_fg(Some(Color::Red)).set_intense(true);
+ }
+ Warning(_) => {
+ spec.set_fg(Some(Color::Yellow)).set_intense(cfg!(windows));
+ }
+ Note | OnceNote => {
+ spec.set_fg(Some(Color::Green)).set_intense(true);
+ }
+ Help => {
+ spec.set_fg(Some(Color::Cyan)).set_intense(true);
+ }
+ FailureNote => {}
+ Allow | Expect(_) => unreachable!(),
+ }
+ spec
+ }
+
+ pub fn to_str(self) -> &'static str {
+ match self {
+ Bug | DelayedBug => "error: internal compiler error",
+ Fatal | Error { .. } => "error",
+ Warning(_) => "warning",
+ Note | OnceNote => "note",
+ Help => "help",
+ FailureNote => "failure-note",
+ Allow => panic!("Shouldn't call on allowed error"),
+ Expect(_) => panic!("Shouldn't call on expected error"),
+ }
+ }
+
+ pub fn is_failure_note(&self) -> bool {
+ matches!(*self, FailureNote)
+ }
+
+ pub fn get_expectation_id(&self) -> Option<LintExpectationId> {
+ match self {
+ Level::Expect(id) | Level::Warning(Some(id)) => Some(*id),
+ _ => None,
+ }
+ }
+}
+
+// FIXME(eddyb) this doesn't belong here AFAICT, should be moved to callsite.
+pub fn add_elided_lifetime_in_path_suggestion(
+ source_map: &SourceMap,
+ diag: &mut Diagnostic,
+ n: usize,
+ path_span: Span,
+ incl_angl_brckt: bool,
+ insertion_span: Span,
+) {
+ diag.span_label(path_span, format!("expected lifetime parameter{}", pluralize!(n)));
+ if !source_map.is_span_accessible(insertion_span) {
+ // Do not try to suggest anything if generated by a proc-macro.
+ return;
+ }
+ let anon_lts = vec!["'_"; n].join(", ");
+ let suggestion =
+ if incl_angl_brckt { format!("<{}>", anon_lts) } else { format!("{}, ", anon_lts) };
+ diag.span_suggestion_verbose(
+ insertion_span.shrink_to_hi(),
+ &format!("indicate the anonymous lifetime{}", pluralize!(n)),
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+}
+
+/// Useful type to use with `Result<>` indicate that an error has already
+/// been reported to the user, so no need to continue checking.
+#[derive(Clone, Copy, Debug, Encodable, Decodable, Hash, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable_Generic)]
+pub struct ErrorGuaranteed(());
+
+impl ErrorGuaranteed {
+ /// To be used only if you really know what you are doing... ideally, we would find a way to
+ /// eliminate all calls to this method.
+ pub fn unchecked_claim_error_was_emitted() -> Self {
+ ErrorGuaranteed(())
+ }
+}
diff --git a/compiler/rustc_errors/src/lock.rs b/compiler/rustc_errors/src/lock.rs
new file mode 100644
index 000000000..a73472021
--- /dev/null
+++ b/compiler/rustc_errors/src/lock.rs
@@ -0,0 +1,93 @@
+//! Bindings to acquire a global named lock.
+//!
+//! This is intended to be used to synchronize multiple compiler processes to
+//! ensure that we can output complete errors without interleaving on Windows.
+//! Note that this is currently only needed for allowing only one 32-bit MSVC
+//! linker to execute at once on MSVC hosts, so this is only implemented for
+//! `cfg(windows)`. Also note that this may not always be used on Windows,
+//! only when targeting 32-bit MSVC.
+//!
+//! For more information about why this is necessary, see where this is called.
+
+use std::any::Any;
+
+#[cfg(windows)]
+pub fn acquire_global_lock(name: &str) -> Box<dyn Any> {
+ use std::ffi::CString;
+ use std::io;
+
+ use winapi::shared::ntdef::HANDLE;
+ use winapi::um::handleapi::CloseHandle;
+ use winapi::um::synchapi::{CreateMutexA, ReleaseMutex, WaitForSingleObject};
+ use winapi::um::winbase::{INFINITE, WAIT_ABANDONED, WAIT_OBJECT_0};
+
+ struct Handle(HANDLE);
+
+ impl Drop for Handle {
+ fn drop(&mut self) {
+ unsafe {
+ CloseHandle(self.0);
+ }
+ }
+ }
+
+ struct Guard(Handle);
+
+ impl Drop for Guard {
+ fn drop(&mut self) {
+ unsafe {
+ ReleaseMutex((self.0).0);
+ }
+ }
+ }
+
+ let cname = CString::new(name).unwrap();
+ unsafe {
+ // Create a named mutex, with no security attributes and also not
+ // acquired when we create it.
+ //
+ // This will silently create one if it doesn't already exist, or it'll
+ // open up a handle to one if it already exists.
+ let mutex = CreateMutexA(std::ptr::null_mut(), 0, cname.as_ptr());
+ if mutex.is_null() {
+ panic!(
+ "failed to create global mutex named `{}`: {}",
+ name,
+ io::Error::last_os_error()
+ );
+ }
+ let mutex = Handle(mutex);
+
+ // Acquire the lock through `WaitForSingleObject`.
+ //
+ // A return value of `WAIT_OBJECT_0` means we successfully acquired it.
+ //
+ // A return value of `WAIT_ABANDONED` means that the previous holder of
+ // the thread exited without calling `ReleaseMutex`. This can happen,
+ // for example, when the compiler crashes or is interrupted via ctrl-c
+ // or the like. In this case, however, we are still transferred
+ // ownership of the lock so we continue.
+ //
+ // If an error happens.. well... that's surprising!
+ match WaitForSingleObject(mutex.0, INFINITE) {
+ WAIT_OBJECT_0 | WAIT_ABANDONED => {}
+ code => {
+ panic!(
+ "WaitForSingleObject failed on global mutex named \
+ `{}`: {} (ret={:x})",
+ name,
+ io::Error::last_os_error(),
+ code
+ );
+ }
+ }
+
+ // Return a guard which will call `ReleaseMutex` when dropped.
+ Box::new(Guard(mutex))
+ }
+}
+
+#[cfg(not(windows))]
+pub fn acquire_global_lock(_name: &str) -> Box<dyn Any> {
+ Box::new(())
+}
diff --git a/compiler/rustc_errors/src/registry.rs b/compiler/rustc_errors/src/registry.rs
new file mode 100644
index 000000000..da764d993
--- /dev/null
+++ b/compiler/rustc_errors/src/registry.rs
@@ -0,0 +1,25 @@
+use rustc_data_structures::fx::FxHashMap;
+
+#[derive(Debug)]
+pub struct InvalidErrorCode;
+
+#[derive(Clone)]
+pub struct Registry {
+ long_descriptions: FxHashMap<&'static str, Option<&'static str>>,
+}
+
+impl Registry {
+ pub fn new(long_descriptions: &[(&'static str, Option<&'static str>)]) -> Registry {
+ Registry { long_descriptions: long_descriptions.iter().copied().collect() }
+ }
+
+ /// Returns `InvalidErrorCode` if the code requested does not exist in the
+ /// registry. Otherwise, returns an `Option` where `None` means the error
+ /// code is valid but has no extended information.
+ pub fn try_find_description(
+ &self,
+ code: &str,
+ ) -> Result<Option<&'static str>, InvalidErrorCode> {
+ self.long_descriptions.get(code).copied().ok_or(InvalidErrorCode)
+ }
+}
diff --git a/compiler/rustc_errors/src/snippet.rs b/compiler/rustc_errors/src/snippet.rs
new file mode 100644
index 000000000..e4cc44c41
--- /dev/null
+++ b/compiler/rustc_errors/src/snippet.rs
@@ -0,0 +1,179 @@
+// Code for annotating snippets.
+
+use crate::Level;
+
+#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
+pub struct Line {
+ pub line_index: usize,
+ pub annotations: Vec<Annotation>,
+}
+
+#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
+pub struct MultilineAnnotation {
+ pub depth: usize,
+ pub line_start: usize,
+ pub line_end: usize,
+ pub start_col: usize,
+ pub end_col: usize,
+ pub is_primary: bool,
+ pub label: Option<String>,
+ pub overlaps_exactly: bool,
+}
+
+impl MultilineAnnotation {
+ pub fn increase_depth(&mut self) {
+ self.depth += 1;
+ }
+
+ /// Compare two `MultilineAnnotation`s considering only the `Span` they cover.
+ pub fn same_span(&self, other: &MultilineAnnotation) -> bool {
+ self.line_start == other.line_start
+ && self.line_end == other.line_end
+ && self.start_col == other.start_col
+ && self.end_col == other.end_col
+ }
+
+ pub fn as_start(&self) -> Annotation {
+ Annotation {
+ start_col: self.start_col,
+ end_col: self.start_col + 1,
+ is_primary: self.is_primary,
+ label: None,
+ annotation_type: AnnotationType::MultilineStart(self.depth),
+ }
+ }
+
+ pub fn as_end(&self) -> Annotation {
+ Annotation {
+ start_col: self.end_col.saturating_sub(1),
+ end_col: self.end_col,
+ is_primary: self.is_primary,
+ label: self.label.clone(),
+ annotation_type: AnnotationType::MultilineEnd(self.depth),
+ }
+ }
+
+ pub fn as_line(&self) -> Annotation {
+ Annotation {
+ start_col: 0,
+ end_col: 0,
+ is_primary: self.is_primary,
+ label: None,
+ annotation_type: AnnotationType::MultilineLine(self.depth),
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
+pub enum AnnotationType {
+ /// Annotation under a single line of code
+ Singleline,
+
+ // The Multiline type above is replaced with the following three in order
+ // to reuse the current label drawing code.
+ //
+ // Each of these corresponds to one part of the following diagram:
+ //
+ // x | foo(1 + bar(x,
+ // | _________^ < MultilineStart
+ // x | | y), < MultilineLine
+ // | |______________^ label < MultilineEnd
+ // x | z);
+ /// Annotation marking the first character of a fully shown multiline span
+ MultilineStart(usize),
+ /// Annotation marking the last character of a fully shown multiline span
+ MultilineEnd(usize),
+ /// Line at the left enclosing the lines of a fully shown multiline span
+ // Just a placeholder for the drawing algorithm, to know that it shouldn't skip the first 4
+ // and last 2 lines of code. The actual line is drawn in `emit_message_default` and not in
+ // `draw_multiline_line`.
+ MultilineLine(usize),
+}
+
+#[derive(Clone, Debug, PartialOrd, Ord, PartialEq, Eq)]
+pub struct Annotation {
+ /// Start column, 0-based indexing -- counting *characters*, not
+ /// utf-8 bytes. Note that it is important that this field goes
+ /// first, so that when we sort, we sort orderings by start
+ /// column.
+ pub start_col: usize,
+
+ /// End column within the line (exclusive)
+ pub end_col: usize,
+
+ /// Is this annotation derived from primary span
+ pub is_primary: bool,
+
+ /// Optional label to display adjacent to the annotation.
+ pub label: Option<String>,
+
+ /// Is this a single line, multiline or multiline span minimized down to a
+ /// smaller span.
+ pub annotation_type: AnnotationType,
+}
+
+impl Annotation {
+ /// Whether this annotation is a vertical line placeholder.
+ pub fn is_line(&self) -> bool {
+ matches!(self.annotation_type, AnnotationType::MultilineLine(_))
+ }
+
+ pub fn len(&self) -> usize {
+ // Account for usize underflows
+ if self.end_col > self.start_col {
+ self.end_col - self.start_col
+ } else {
+ self.start_col - self.end_col
+ }
+ }
+
+ pub fn has_label(&self) -> bool {
+ if let Some(ref label) = self.label {
+ // Consider labels with no text as effectively not being there
+ // to avoid weird output with unnecessary vertical lines, like:
+ //
+ // X | fn foo(x: u32) {
+ // | -------^------
+ // | | |
+ // | |
+ // |
+ //
+ // Note that this would be the complete output users would see.
+ !label.is_empty()
+ } else {
+ false
+ }
+ }
+
+ pub fn takes_space(&self) -> bool {
+ // Multiline annotations always have to keep vertical space.
+ matches!(
+ self.annotation_type,
+ AnnotationType::MultilineStart(_) | AnnotationType::MultilineEnd(_)
+ )
+ }
+}
+
+#[derive(Debug)]
+pub struct StyledString {
+ pub text: String,
+ pub style: Style,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Encodable, Decodable)]
+pub enum Style {
+ MainHeaderMsg,
+ HeaderMsg,
+ LineAndColumn,
+ LineNumber,
+ Quotation,
+ UnderlinePrimary,
+ UnderlineSecondary,
+ LabelPrimary,
+ LabelSecondary,
+ NoStyle,
+ Level(Level),
+ Highlight,
+ Addition,
+ Removal,
+}
diff --git a/compiler/rustc_errors/src/styled_buffer.rs b/compiler/rustc_errors/src/styled_buffer.rs
new file mode 100644
index 000000000..9abdb5fc9
--- /dev/null
+++ b/compiler/rustc_errors/src/styled_buffer.rs
@@ -0,0 +1,151 @@
+// Code for creating styled buffers
+
+use crate::snippet::{Style, StyledString};
+
+#[derive(Debug)]
+pub struct StyledBuffer {
+ lines: Vec<Vec<StyledChar>>,
+}
+
+#[derive(Debug, Clone)]
+struct StyledChar {
+ chr: char,
+ style: Style,
+}
+
+impl StyledChar {
+ const SPACE: Self = StyledChar::new(' ', Style::NoStyle);
+
+ const fn new(chr: char, style: Style) -> Self {
+ StyledChar { chr, style }
+ }
+}
+
+impl StyledBuffer {
+ pub fn new() -> StyledBuffer {
+ StyledBuffer { lines: vec![] }
+ }
+
+ /// Returns content of `StyledBuffer` split by lines and line styles
+ pub fn render(&self) -> Vec<Vec<StyledString>> {
+ // Tabs are assumed to have been replaced by spaces in calling code.
+ debug_assert!(self.lines.iter().all(|r| !r.iter().any(|sc| sc.chr == '\t')));
+
+ let mut output: Vec<Vec<StyledString>> = vec![];
+ let mut styled_vec: Vec<StyledString> = vec![];
+
+ for styled_line in &self.lines {
+ let mut current_style = Style::NoStyle;
+ let mut current_text = String::new();
+
+ for sc in styled_line {
+ if sc.style != current_style {
+ if !current_text.is_empty() {
+ styled_vec.push(StyledString { text: current_text, style: current_style });
+ }
+ current_style = sc.style;
+ current_text = String::new();
+ }
+ current_text.push(sc.chr);
+ }
+ if !current_text.is_empty() {
+ styled_vec.push(StyledString { text: current_text, style: current_style });
+ }
+
+ // We're done with the row, push and keep going
+ output.push(styled_vec);
+
+ styled_vec = vec![];
+ }
+
+ output
+ }
+
+ fn ensure_lines(&mut self, line: usize) {
+ if line >= self.lines.len() {
+ self.lines.resize(line + 1, Vec::new());
+ }
+ }
+
+ /// Sets `chr` with `style` for given `line`, `col`.
+ /// If `line` does not exist in our buffer, adds empty lines up to the given
+ /// and fills the last line with unstyled whitespace.
+ pub fn putc(&mut self, line: usize, col: usize, chr: char, style: Style) {
+ self.ensure_lines(line);
+ if col >= self.lines[line].len() {
+ self.lines[line].resize(col + 1, StyledChar::SPACE);
+ }
+ self.lines[line][col] = StyledChar::new(chr, style);
+ }
+
+ /// Sets `string` with `style` for given `line`, starting from `col`.
+ /// If `line` does not exist in our buffer, adds empty lines up to the given
+ /// and fills the last line with unstyled whitespace.
+ pub fn puts(&mut self, line: usize, col: usize, string: &str, style: Style) {
+ let mut n = col;
+ for c in string.chars() {
+ self.putc(line, n, c, style);
+ n += 1;
+ }
+ }
+
+ /// For given `line` inserts `string` with `style` before old content of that line,
+ /// adding lines if needed
+ pub fn prepend(&mut self, line: usize, string: &str, style: Style) {
+ self.ensure_lines(line);
+ let string_len = string.chars().count();
+
+ if !self.lines[line].is_empty() {
+ // Push the old content over to make room for new content
+ for _ in 0..string_len {
+ self.lines[line].insert(0, StyledChar::SPACE);
+ }
+ }
+
+ self.puts(line, 0, string, style);
+ }
+
+ /// For given `line` inserts `string` with `style` after old content of that line,
+ /// adding lines if needed
+ pub fn append(&mut self, line: usize, string: &str, style: Style) {
+ if line >= self.lines.len() {
+ self.puts(line, 0, string, style);
+ } else {
+ let col = self.lines[line].len();
+ self.puts(line, col, string, style);
+ }
+ }
+
+ pub fn num_lines(&self) -> usize {
+ self.lines.len()
+ }
+
+ /// Set `style` for `line`, `col_start..col_end` range if:
+ /// 1. That line and column range exist in `StyledBuffer`
+ /// 2. `overwrite` is `true` or existing style is `Style::NoStyle` or `Style::Quotation`
+ pub fn set_style_range(
+ &mut self,
+ line: usize,
+ col_start: usize,
+ col_end: usize,
+ style: Style,
+ overwrite: bool,
+ ) {
+ for col in col_start..col_end {
+ self.set_style(line, col, style, overwrite);
+ }
+ }
+
+ /// Set `style` for `line`, `col` if:
+ /// 1. That line and column exist in `StyledBuffer`
+ /// 2. `overwrite` is `true` or existing style is `Style::NoStyle` or `Style::Quotation`
+ pub fn set_style(&mut self, line: usize, col: usize, style: Style, overwrite: bool) {
+ if let Some(ref mut line) = self.lines.get_mut(line) {
+ if let Some(StyledChar { style: s, .. }) = line.get_mut(col) {
+ if overwrite || *s == Style::NoStyle || *s == Style::Quotation {
+ *s = style;
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_expand/Cargo.toml b/compiler/rustc_expand/Cargo.toml
new file mode 100644
index 000000000..4ee7b6c42
--- /dev/null
+++ b/compiler/rustc_expand/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "rustc_expand"
+version = "0.0.0"
+edition = "2021"
+build = false
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_serialize = { path = "../rustc_serialize" }
+tracing = "0.1"
+rustc_span = { path = "../rustc_span" }
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_ast_passes = { path = "../rustc_ast_passes" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_lint_defs = { path = "../rustc_lint_defs" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_lexer = { path = "../rustc_lexer" }
+rustc_parse = { path = "../rustc_parse" }
+rustc_session = { path = "../rustc_session" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_ast = { path = "../rustc_ast" }
+crossbeam-channel = "0.5.0"
diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs
new file mode 100644
index 000000000..6e093811f
--- /dev/null
+++ b/compiler/rustc_expand/src/base.rs
@@ -0,0 +1,1444 @@
+use crate::expand::{self, AstFragment, Invocation};
+use crate::module::DirOwnership;
+
+use rustc_ast::attr::MarkedAttrs;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Nonterminal};
+use rustc_ast::tokenstream::TokenStream;
+use rustc_ast::visit::{AssocCtxt, Visitor};
+use rustc_ast::{self as ast, Attribute, HasAttrs, Item, NodeId, PatKind};
+use rustc_attr::{self as attr, Deprecation, Stability};
+use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
+use rustc_data_structures::sync::{self, Lrc};
+use rustc_errors::{Applicability, DiagnosticBuilder, ErrorGuaranteed, MultiSpan, PResult};
+use rustc_lint_defs::builtin::PROC_MACRO_BACK_COMPAT;
+use rustc_lint_defs::{BufferedEarlyLint, BuiltinLintDiagnostics};
+use rustc_parse::{self, parser, MACRO_ARGUMENTS};
+use rustc_session::{parse::ParseSess, Limit, Session, SessionDiagnostic};
+use rustc_span::def_id::{CrateNum, DefId, LocalDefId};
+use rustc_span::edition::Edition;
+use rustc_span::hygiene::{AstPass, ExpnData, ExpnKind, LocalExpnId};
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{FileName, Span, DUMMY_SP};
+use smallvec::{smallvec, SmallVec};
+
+use std::default::Default;
+use std::iter;
+use std::path::PathBuf;
+use std::rc::Rc;
+
+pub(crate) use rustc_span::hygiene::MacroKind;
+
+// When adding new variants, make sure to
+// adjust the `visit_*` / `flat_map_*` calls in `InvocationCollector`
+// to use `assign_id!`
+#[derive(Debug, Clone)]
+pub enum Annotatable {
+ Item(P<ast::Item>),
+ TraitItem(P<ast::AssocItem>),
+ ImplItem(P<ast::AssocItem>),
+ ForeignItem(P<ast::ForeignItem>),
+ Stmt(P<ast::Stmt>),
+ Expr(P<ast::Expr>),
+ Arm(ast::Arm),
+ ExprField(ast::ExprField),
+ PatField(ast::PatField),
+ GenericParam(ast::GenericParam),
+ Param(ast::Param),
+ FieldDef(ast::FieldDef),
+ Variant(ast::Variant),
+ Crate(ast::Crate),
+}
+
+impl Annotatable {
+ pub fn span(&self) -> Span {
+ match *self {
+ Annotatable::Item(ref item) => item.span,
+ Annotatable::TraitItem(ref trait_item) => trait_item.span,
+ Annotatable::ImplItem(ref impl_item) => impl_item.span,
+ Annotatable::ForeignItem(ref foreign_item) => foreign_item.span,
+ Annotatable::Stmt(ref stmt) => stmt.span,
+ Annotatable::Expr(ref expr) => expr.span,
+ Annotatable::Arm(ref arm) => arm.span,
+ Annotatable::ExprField(ref field) => field.span,
+ Annotatable::PatField(ref fp) => fp.pat.span,
+ Annotatable::GenericParam(ref gp) => gp.ident.span,
+ Annotatable::Param(ref p) => p.span,
+ Annotatable::FieldDef(ref sf) => sf.span,
+ Annotatable::Variant(ref v) => v.span,
+ Annotatable::Crate(ref c) => c.spans.inner_span,
+ }
+ }
+
+ pub fn visit_attrs(&mut self, f: impl FnOnce(&mut Vec<Attribute>)) {
+ match self {
+ Annotatable::Item(item) => item.visit_attrs(f),
+ Annotatable::TraitItem(trait_item) => trait_item.visit_attrs(f),
+ Annotatable::ImplItem(impl_item) => impl_item.visit_attrs(f),
+ Annotatable::ForeignItem(foreign_item) => foreign_item.visit_attrs(f),
+ Annotatable::Stmt(stmt) => stmt.visit_attrs(f),
+ Annotatable::Expr(expr) => expr.visit_attrs(f),
+ Annotatable::Arm(arm) => arm.visit_attrs(f),
+ Annotatable::ExprField(field) => field.visit_attrs(f),
+ Annotatable::PatField(fp) => fp.visit_attrs(f),
+ Annotatable::GenericParam(gp) => gp.visit_attrs(f),
+ Annotatable::Param(p) => p.visit_attrs(f),
+ Annotatable::FieldDef(sf) => sf.visit_attrs(f),
+ Annotatable::Variant(v) => v.visit_attrs(f),
+ Annotatable::Crate(c) => c.visit_attrs(f),
+ }
+ }
+
+ pub fn visit_with<'a, V: Visitor<'a>>(&'a self, visitor: &mut V) {
+ match self {
+ Annotatable::Item(item) => visitor.visit_item(item),
+ Annotatable::TraitItem(item) => visitor.visit_assoc_item(item, AssocCtxt::Trait),
+ Annotatable::ImplItem(item) => visitor.visit_assoc_item(item, AssocCtxt::Impl),
+ Annotatable::ForeignItem(foreign_item) => visitor.visit_foreign_item(foreign_item),
+ Annotatable::Stmt(stmt) => visitor.visit_stmt(stmt),
+ Annotatable::Expr(expr) => visitor.visit_expr(expr),
+ Annotatable::Arm(arm) => visitor.visit_arm(arm),
+ Annotatable::ExprField(field) => visitor.visit_expr_field(field),
+ Annotatable::PatField(fp) => visitor.visit_pat_field(fp),
+ Annotatable::GenericParam(gp) => visitor.visit_generic_param(gp),
+ Annotatable::Param(p) => visitor.visit_param(p),
+ Annotatable::FieldDef(sf) => visitor.visit_field_def(sf),
+ Annotatable::Variant(v) => visitor.visit_variant(v),
+ Annotatable::Crate(c) => visitor.visit_crate(c),
+ }
+ }
+
+ pub fn to_tokens(&self) -> TokenStream {
+ match self {
+ Annotatable::Item(node) => TokenStream::from_ast(node),
+ Annotatable::TraitItem(node) | Annotatable::ImplItem(node) => {
+ TokenStream::from_ast(node)
+ }
+ Annotatable::ForeignItem(node) => TokenStream::from_ast(node),
+ Annotatable::Stmt(node) => {
+ assert!(!matches!(node.kind, ast::StmtKind::Empty));
+ TokenStream::from_ast(node)
+ }
+ Annotatable::Expr(node) => TokenStream::from_ast(node),
+ Annotatable::Arm(..)
+ | Annotatable::ExprField(..)
+ | Annotatable::PatField(..)
+ | Annotatable::GenericParam(..)
+ | Annotatable::Param(..)
+ | Annotatable::FieldDef(..)
+ | Annotatable::Variant(..)
+ | Annotatable::Crate(..) => panic!("unexpected annotatable"),
+ }
+ }
+
+ pub fn expect_item(self) -> P<ast::Item> {
+ match self {
+ Annotatable::Item(i) => i,
+ _ => panic!("expected Item"),
+ }
+ }
+
+ pub fn expect_trait_item(self) -> P<ast::AssocItem> {
+ match self {
+ Annotatable::TraitItem(i) => i,
+ _ => panic!("expected Item"),
+ }
+ }
+
+ pub fn expect_impl_item(self) -> P<ast::AssocItem> {
+ match self {
+ Annotatable::ImplItem(i) => i,
+ _ => panic!("expected Item"),
+ }
+ }
+
+ pub fn expect_foreign_item(self) -> P<ast::ForeignItem> {
+ match self {
+ Annotatable::ForeignItem(i) => i,
+ _ => panic!("expected foreign item"),
+ }
+ }
+
+ pub fn expect_stmt(self) -> ast::Stmt {
+ match self {
+ Annotatable::Stmt(stmt) => stmt.into_inner(),
+ _ => panic!("expected statement"),
+ }
+ }
+
+ pub fn expect_expr(self) -> P<ast::Expr> {
+ match self {
+ Annotatable::Expr(expr) => expr,
+ _ => panic!("expected expression"),
+ }
+ }
+
+ pub fn expect_arm(self) -> ast::Arm {
+ match self {
+ Annotatable::Arm(arm) => arm,
+ _ => panic!("expected match arm"),
+ }
+ }
+
+ pub fn expect_expr_field(self) -> ast::ExprField {
+ match self {
+ Annotatable::ExprField(field) => field,
+ _ => panic!("expected field"),
+ }
+ }
+
+ pub fn expect_pat_field(self) -> ast::PatField {
+ match self {
+ Annotatable::PatField(fp) => fp,
+ _ => panic!("expected field pattern"),
+ }
+ }
+
+ pub fn expect_generic_param(self) -> ast::GenericParam {
+ match self {
+ Annotatable::GenericParam(gp) => gp,
+ _ => panic!("expected generic parameter"),
+ }
+ }
+
+ pub fn expect_param(self) -> ast::Param {
+ match self {
+ Annotatable::Param(param) => param,
+ _ => panic!("expected parameter"),
+ }
+ }
+
+ pub fn expect_field_def(self) -> ast::FieldDef {
+ match self {
+ Annotatable::FieldDef(sf) => sf,
+ _ => panic!("expected struct field"),
+ }
+ }
+
+ pub fn expect_variant(self) -> ast::Variant {
+ match self {
+ Annotatable::Variant(v) => v,
+ _ => panic!("expected variant"),
+ }
+ }
+
+ pub fn expect_crate(self) -> ast::Crate {
+ match self {
+ Annotatable::Crate(krate) => krate,
+ _ => panic!("expected krate"),
+ }
+ }
+}
+
+/// Result of an expansion that may need to be retried.
+/// Consider using this for non-`MultiItemModifier` expanders as well.
+pub enum ExpandResult<T, U> {
+ /// Expansion produced a result (possibly dummy).
+ Ready(T),
+ /// Expansion could not produce a result and needs to be retried.
+ Retry(U),
+}
+
+// `meta_item` is the attribute, and `item` is the item being modified.
+pub trait MultiItemModifier {
+ fn expand(
+ &self,
+ ecx: &mut ExtCtxt<'_>,
+ span: Span,
+ meta_item: &ast::MetaItem,
+ item: Annotatable,
+ ) -> ExpandResult<Vec<Annotatable>, Annotatable>;
+}
+
+impl<F> MultiItemModifier for F
+where
+ F: Fn(&mut ExtCtxt<'_>, Span, &ast::MetaItem, Annotatable) -> Vec<Annotatable>,
+{
+ fn expand(
+ &self,
+ ecx: &mut ExtCtxt<'_>,
+ span: Span,
+ meta_item: &ast::MetaItem,
+ item: Annotatable,
+ ) -> ExpandResult<Vec<Annotatable>, Annotatable> {
+ ExpandResult::Ready(self(ecx, span, meta_item, item))
+ }
+}
+
+pub trait BangProcMacro {
+ fn expand<'cx>(
+ &self,
+ ecx: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ ts: TokenStream,
+ ) -> Result<TokenStream, ErrorGuaranteed>;
+}
+
+impl<F> BangProcMacro for F
+where
+ F: Fn(TokenStream) -> TokenStream,
+{
+ fn expand<'cx>(
+ &self,
+ _ecx: &'cx mut ExtCtxt<'_>,
+ _span: Span,
+ ts: TokenStream,
+ ) -> Result<TokenStream, ErrorGuaranteed> {
+ // FIXME setup implicit context in TLS before calling self.
+ Ok(self(ts))
+ }
+}
+
+pub trait AttrProcMacro {
+ fn expand<'cx>(
+ &self,
+ ecx: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ annotation: TokenStream,
+ annotated: TokenStream,
+ ) -> Result<TokenStream, ErrorGuaranteed>;
+}
+
+impl<F> AttrProcMacro for F
+where
+ F: Fn(TokenStream, TokenStream) -> TokenStream,
+{
+ fn expand<'cx>(
+ &self,
+ _ecx: &'cx mut ExtCtxt<'_>,
+ _span: Span,
+ annotation: TokenStream,
+ annotated: TokenStream,
+ ) -> Result<TokenStream, ErrorGuaranteed> {
+ // FIXME setup implicit context in TLS before calling self.
+ Ok(self(annotation, annotated))
+ }
+}
+
+/// Represents a thing that maps token trees to Macro Results
+pub trait TTMacroExpander {
+ fn expand<'cx>(
+ &self,
+ ecx: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ input: TokenStream,
+ ) -> Box<dyn MacResult + 'cx>;
+}
+
+pub type MacroExpanderFn =
+ for<'cx> fn(&'cx mut ExtCtxt<'_>, Span, TokenStream) -> Box<dyn MacResult + 'cx>;
+
+impl<F> TTMacroExpander for F
+where
+ F: for<'cx> Fn(&'cx mut ExtCtxt<'_>, Span, TokenStream) -> Box<dyn MacResult + 'cx>,
+{
+ fn expand<'cx>(
+ &self,
+ ecx: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ input: TokenStream,
+ ) -> Box<dyn MacResult + 'cx> {
+ self(ecx, span, input)
+ }
+}
+
+// Use a macro because forwarding to a simple function has type system issues
+macro_rules! make_stmts_default {
+ ($me:expr) => {
+ $me.make_expr().map(|e| {
+ smallvec![ast::Stmt {
+ id: ast::DUMMY_NODE_ID,
+ span: e.span,
+ kind: ast::StmtKind::Expr(e),
+ }]
+ })
+ };
+}
+
+/// The result of a macro expansion. The return values of the various
+/// methods are spliced into the AST at the callsite of the macro.
+pub trait MacResult {
+ /// Creates an expression.
+ fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> {
+ None
+ }
+
+ /// Creates zero or more items.
+ fn make_items(self: Box<Self>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
+ None
+ }
+
+ /// Creates zero or more impl items.
+ fn make_impl_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> {
+ None
+ }
+
+ /// Creates zero or more trait items.
+ fn make_trait_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> {
+ None
+ }
+
+ /// Creates zero or more items in an `extern {}` block
+ fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> {
+ None
+ }
+
+ /// Creates a pattern.
+ fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> {
+ None
+ }
+
+ /// Creates zero or more statements.
+ ///
+ /// By default this attempts to create an expression statement,
+ /// returning None if that fails.
+ fn make_stmts(self: Box<Self>) -> Option<SmallVec<[ast::Stmt; 1]>> {
+ make_stmts_default!(self)
+ }
+
+ fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> {
+ None
+ }
+
+ fn make_arms(self: Box<Self>) -> Option<SmallVec<[ast::Arm; 1]>> {
+ None
+ }
+
+ fn make_expr_fields(self: Box<Self>) -> Option<SmallVec<[ast::ExprField; 1]>> {
+ None
+ }
+
+ fn make_pat_fields(self: Box<Self>) -> Option<SmallVec<[ast::PatField; 1]>> {
+ None
+ }
+
+ fn make_generic_params(self: Box<Self>) -> Option<SmallVec<[ast::GenericParam; 1]>> {
+ None
+ }
+
+ fn make_params(self: Box<Self>) -> Option<SmallVec<[ast::Param; 1]>> {
+ None
+ }
+
+ fn make_field_defs(self: Box<Self>) -> Option<SmallVec<[ast::FieldDef; 1]>> {
+ None
+ }
+
+ fn make_variants(self: Box<Self>) -> Option<SmallVec<[ast::Variant; 1]>> {
+ None
+ }
+
+ fn make_crate(self: Box<Self>) -> Option<ast::Crate> {
+ // Fn-like macros cannot produce a crate.
+ unreachable!()
+ }
+}
+
+macro_rules! make_MacEager {
+ ( $( $fld:ident: $t:ty, )* ) => {
+ /// `MacResult` implementation for the common case where you've already
+ /// built each form of AST that you might return.
+ #[derive(Default)]
+ pub struct MacEager {
+ $(
+ pub $fld: Option<$t>,
+ )*
+ }
+
+ impl MacEager {
+ $(
+ pub fn $fld(v: $t) -> Box<dyn MacResult> {
+ Box::new(MacEager {
+ $fld: Some(v),
+ ..Default::default()
+ })
+ }
+ )*
+ }
+ }
+}
+
+make_MacEager! {
+ expr: P<ast::Expr>,
+ pat: P<ast::Pat>,
+ items: SmallVec<[P<ast::Item>; 1]>,
+ impl_items: SmallVec<[P<ast::AssocItem>; 1]>,
+ trait_items: SmallVec<[P<ast::AssocItem>; 1]>,
+ foreign_items: SmallVec<[P<ast::ForeignItem>; 1]>,
+ stmts: SmallVec<[ast::Stmt; 1]>,
+ ty: P<ast::Ty>,
+}
+
+impl MacResult for MacEager {
+ fn make_expr(self: Box<Self>) -> Option<P<ast::Expr>> {
+ self.expr
+ }
+
+ fn make_items(self: Box<Self>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
+ self.items
+ }
+
+ fn make_impl_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> {
+ self.impl_items
+ }
+
+ fn make_trait_items(self: Box<Self>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> {
+ self.trait_items
+ }
+
+ fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> {
+ self.foreign_items
+ }
+
+ fn make_stmts(self: Box<Self>) -> Option<SmallVec<[ast::Stmt; 1]>> {
+ match self.stmts.as_ref().map_or(0, |s| s.len()) {
+ 0 => make_stmts_default!(self),
+ _ => self.stmts,
+ }
+ }
+
+ fn make_pat(self: Box<Self>) -> Option<P<ast::Pat>> {
+ if let Some(p) = self.pat {
+ return Some(p);
+ }
+ if let Some(e) = self.expr {
+ if let ast::ExprKind::Lit(_) = e.kind {
+ return Some(P(ast::Pat {
+ id: ast::DUMMY_NODE_ID,
+ span: e.span,
+ kind: PatKind::Lit(e),
+ tokens: None,
+ }));
+ }
+ }
+ None
+ }
+
+ fn make_ty(self: Box<Self>) -> Option<P<ast::Ty>> {
+ self.ty
+ }
+}
+
+/// Fill-in macro expansion result, to allow compilation to continue
+/// after hitting errors.
+#[derive(Copy, Clone)]
+pub struct DummyResult {
+ is_error: bool,
+ span: Span,
+}
+
+impl DummyResult {
+ /// Creates a default MacResult that can be anything.
+ ///
+ /// Use this as a return value after hitting any errors and
+ /// calling `span_err`.
+ pub fn any(span: Span) -> Box<dyn MacResult + 'static> {
+ Box::new(DummyResult { is_error: true, span })
+ }
+
+ /// Same as `any`, but must be a valid fragment, not error.
+ pub fn any_valid(span: Span) -> Box<dyn MacResult + 'static> {
+ Box::new(DummyResult { is_error: false, span })
+ }
+
+ /// A plain dummy expression.
+ pub fn raw_expr(sp: Span, is_error: bool) -> P<ast::Expr> {
+ P(ast::Expr {
+ id: ast::DUMMY_NODE_ID,
+ kind: if is_error { ast::ExprKind::Err } else { ast::ExprKind::Tup(Vec::new()) },
+ span: sp,
+ attrs: ast::AttrVec::new(),
+ tokens: None,
+ })
+ }
+
+ /// A plain dummy pattern.
+ pub fn raw_pat(sp: Span) -> ast::Pat {
+ ast::Pat { id: ast::DUMMY_NODE_ID, kind: PatKind::Wild, span: sp, tokens: None }
+ }
+
+ /// A plain dummy type.
+ pub fn raw_ty(sp: Span, is_error: bool) -> P<ast::Ty> {
+ P(ast::Ty {
+ id: ast::DUMMY_NODE_ID,
+ kind: if is_error { ast::TyKind::Err } else { ast::TyKind::Tup(Vec::new()) },
+ span: sp,
+ tokens: None,
+ })
+ }
+}
+
+impl MacResult for DummyResult {
+ fn make_expr(self: Box<DummyResult>) -> Option<P<ast::Expr>> {
+ Some(DummyResult::raw_expr(self.span, self.is_error))
+ }
+
+ fn make_pat(self: Box<DummyResult>) -> Option<P<ast::Pat>> {
+ Some(P(DummyResult::raw_pat(self.span)))
+ }
+
+ fn make_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::Item>; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_impl_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_trait_items(self: Box<DummyResult>) -> Option<SmallVec<[P<ast::AssocItem>; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_foreign_items(self: Box<Self>) -> Option<SmallVec<[P<ast::ForeignItem>; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_stmts(self: Box<DummyResult>) -> Option<SmallVec<[ast::Stmt; 1]>> {
+ Some(smallvec![ast::Stmt {
+ id: ast::DUMMY_NODE_ID,
+ kind: ast::StmtKind::Expr(DummyResult::raw_expr(self.span, self.is_error)),
+ span: self.span,
+ }])
+ }
+
+ fn make_ty(self: Box<DummyResult>) -> Option<P<ast::Ty>> {
+ Some(DummyResult::raw_ty(self.span, self.is_error))
+ }
+
+ fn make_arms(self: Box<DummyResult>) -> Option<SmallVec<[ast::Arm; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_expr_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::ExprField; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_pat_fields(self: Box<DummyResult>) -> Option<SmallVec<[ast::PatField; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_generic_params(self: Box<DummyResult>) -> Option<SmallVec<[ast::GenericParam; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_params(self: Box<DummyResult>) -> Option<SmallVec<[ast::Param; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_field_defs(self: Box<DummyResult>) -> Option<SmallVec<[ast::FieldDef; 1]>> {
+ Some(SmallVec::new())
+ }
+
+ fn make_variants(self: Box<DummyResult>) -> Option<SmallVec<[ast::Variant; 1]>> {
+ Some(SmallVec::new())
+ }
+}
+
+/// A syntax extension kind.
+pub enum SyntaxExtensionKind {
+ /// A token-based function-like macro.
+ Bang(
+ /// An expander with signature TokenStream -> TokenStream.
+ Box<dyn BangProcMacro + sync::Sync + sync::Send>,
+ ),
+
+ /// An AST-based function-like macro.
+ LegacyBang(
+ /// An expander with signature TokenStream -> AST.
+ Box<dyn TTMacroExpander + sync::Sync + sync::Send>,
+ ),
+
+ /// A token-based attribute macro.
+ Attr(
+ /// An expander with signature (TokenStream, TokenStream) -> TokenStream.
+ /// The first TokenSteam is the attribute itself, the second is the annotated item.
+ /// The produced TokenSteam replaces the input TokenSteam.
+ Box<dyn AttrProcMacro + sync::Sync + sync::Send>,
+ ),
+
+ /// An AST-based attribute macro.
+ LegacyAttr(
+ /// An expander with signature (AST, AST) -> AST.
+ /// The first AST fragment is the attribute itself, the second is the annotated item.
+ /// The produced AST fragment replaces the input AST fragment.
+ Box<dyn MultiItemModifier + sync::Sync + sync::Send>,
+ ),
+
+ /// A trivial attribute "macro" that does nothing,
+ /// only keeps the attribute and marks it as inert,
+ /// thus making it ineligible for further expansion.
+ NonMacroAttr,
+
+ /// A token-based derive macro.
+ Derive(
+ /// An expander with signature TokenStream -> TokenStream (not yet).
+ /// The produced TokenSteam is appended to the input TokenSteam.
+ Box<dyn MultiItemModifier + sync::Sync + sync::Send>,
+ ),
+
+ /// An AST-based derive macro.
+ LegacyDerive(
+ /// An expander with signature AST -> AST.
+ /// The produced AST fragment is appended to the input AST fragment.
+ Box<dyn MultiItemModifier + sync::Sync + sync::Send>,
+ ),
+}
+
+/// A struct representing a macro definition in "lowered" form ready for expansion.
+pub struct SyntaxExtension {
+ /// A syntax extension kind.
+ pub kind: SyntaxExtensionKind,
+ /// Span of the macro definition.
+ pub span: Span,
+ /// List of unstable features that are treated as stable inside this macro.
+ pub allow_internal_unstable: Option<Lrc<[Symbol]>>,
+ /// Suppresses the `unsafe_code` lint for code produced by this macro.
+ pub allow_internal_unsafe: bool,
+ /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`) for this macro.
+ pub local_inner_macros: bool,
+ /// The macro's stability info.
+ pub stability: Option<Stability>,
+ /// The macro's deprecation info.
+ pub deprecation: Option<Deprecation>,
+ /// Names of helper attributes registered by this macro.
+ pub helper_attrs: Vec<Symbol>,
+ /// Edition of the crate in which this macro is defined.
+ pub edition: Edition,
+ /// Built-in macros have a couple of special properties like availability
+ /// in `#[no_implicit_prelude]` modules, so we have to keep this flag.
+ pub builtin_name: Option<Symbol>,
+}
+
+impl SyntaxExtension {
+ /// Returns which kind of macro calls this syntax extension.
+ pub fn macro_kind(&self) -> MacroKind {
+ match self.kind {
+ SyntaxExtensionKind::Bang(..) | SyntaxExtensionKind::LegacyBang(..) => MacroKind::Bang,
+ SyntaxExtensionKind::Attr(..)
+ | SyntaxExtensionKind::LegacyAttr(..)
+ | SyntaxExtensionKind::NonMacroAttr => MacroKind::Attr,
+ SyntaxExtensionKind::Derive(..) | SyntaxExtensionKind::LegacyDerive(..) => {
+ MacroKind::Derive
+ }
+ }
+ }
+
+ /// Constructs a syntax extension with default properties.
+ pub fn default(kind: SyntaxExtensionKind, edition: Edition) -> SyntaxExtension {
+ SyntaxExtension {
+ span: DUMMY_SP,
+ allow_internal_unstable: None,
+ allow_internal_unsafe: false,
+ local_inner_macros: false,
+ stability: None,
+ deprecation: None,
+ helper_attrs: Vec::new(),
+ edition,
+ builtin_name: None,
+ kind,
+ }
+ }
+
+ /// Constructs a syntax extension with the given properties
+ /// and other properties converted from attributes.
+ pub fn new(
+ sess: &Session,
+ kind: SyntaxExtensionKind,
+ span: Span,
+ helper_attrs: Vec<Symbol>,
+ edition: Edition,
+ name: Symbol,
+ attrs: &[ast::Attribute],
+ ) -> SyntaxExtension {
+ let allow_internal_unstable =
+ attr::allow_internal_unstable(sess, &attrs).collect::<Vec<Symbol>>();
+
+ let mut local_inner_macros = false;
+ if let Some(macro_export) = sess.find_by_name(attrs, sym::macro_export) {
+ if let Some(l) = macro_export.meta_item_list() {
+ local_inner_macros = attr::list_contains_name(&l, sym::local_inner_macros);
+ }
+ }
+
+ let (builtin_name, helper_attrs) = sess
+ .find_by_name(attrs, sym::rustc_builtin_macro)
+ .map(|attr| {
+ // Override `helper_attrs` passed above if it's a built-in macro,
+ // marking `proc_macro_derive` macros as built-in is not a realistic use case.
+ parse_macro_name_and_helper_attrs(sess.diagnostic(), attr, "built-in").map_or_else(
+ || (Some(name), Vec::new()),
+ |(name, helper_attrs)| (Some(name), helper_attrs),
+ )
+ })
+ .unwrap_or_else(|| (None, helper_attrs));
+ let (stability, const_stability) = attr::find_stability(&sess, attrs, span);
+ if let Some((_, sp)) = const_stability {
+ sess.parse_sess
+ .span_diagnostic
+ .struct_span_err(sp, "macros cannot have const stability attributes")
+ .span_label(sp, "invalid const stability attribute")
+ .span_label(
+ sess.source_map().guess_head_span(span),
+ "const stability attribute affects this macro",
+ )
+ .emit();
+ }
+
+ SyntaxExtension {
+ kind,
+ span,
+ allow_internal_unstable: (!allow_internal_unstable.is_empty())
+ .then(|| allow_internal_unstable.into()),
+ allow_internal_unsafe: sess.contains_name(attrs, sym::allow_internal_unsafe),
+ local_inner_macros,
+ stability: stability.map(|(s, _)| s),
+ deprecation: attr::find_deprecation(&sess, attrs).map(|(d, _)| d),
+ helper_attrs,
+ edition,
+ builtin_name,
+ }
+ }
+
+ pub fn dummy_bang(edition: Edition) -> SyntaxExtension {
+ fn expander<'cx>(
+ _: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ _: TokenStream,
+ ) -> Box<dyn MacResult + 'cx> {
+ DummyResult::any(span)
+ }
+ SyntaxExtension::default(SyntaxExtensionKind::LegacyBang(Box::new(expander)), edition)
+ }
+
+ pub fn dummy_derive(edition: Edition) -> SyntaxExtension {
+ fn expander(
+ _: &mut ExtCtxt<'_>,
+ _: Span,
+ _: &ast::MetaItem,
+ _: Annotatable,
+ ) -> Vec<Annotatable> {
+ Vec::new()
+ }
+ SyntaxExtension::default(SyntaxExtensionKind::Derive(Box::new(expander)), edition)
+ }
+
+ pub fn non_macro_attr(edition: Edition) -> SyntaxExtension {
+ SyntaxExtension::default(SyntaxExtensionKind::NonMacroAttr, edition)
+ }
+
+ pub fn expn_data(
+ &self,
+ parent: LocalExpnId,
+ call_site: Span,
+ descr: Symbol,
+ macro_def_id: Option<DefId>,
+ parent_module: Option<DefId>,
+ ) -> ExpnData {
+ ExpnData::new(
+ ExpnKind::Macro(self.macro_kind(), descr),
+ parent.to_expn_id(),
+ call_site,
+ self.span,
+ self.allow_internal_unstable.clone(),
+ self.allow_internal_unsafe,
+ self.local_inner_macros,
+ self.edition,
+ macro_def_id,
+ parent_module,
+ )
+ }
+}
+
+/// Error type that denotes indeterminacy.
+pub struct Indeterminate;
+
+pub type DeriveResolutions = Vec<(ast::Path, Annotatable, Option<Lrc<SyntaxExtension>>)>;
+
+pub trait ResolverExpand {
+ fn next_node_id(&mut self) -> NodeId;
+ fn invocation_parent(&self, id: LocalExpnId) -> LocalDefId;
+
+ fn resolve_dollar_crates(&mut self);
+ fn visit_ast_fragment_with_placeholders(
+ &mut self,
+ expn_id: LocalExpnId,
+ fragment: &AstFragment,
+ );
+ fn register_builtin_macro(&mut self, name: Symbol, ext: SyntaxExtensionKind);
+
+ fn expansion_for_ast_pass(
+ &mut self,
+ call_site: Span,
+ pass: AstPass,
+ features: &[Symbol],
+ parent_module_id: Option<NodeId>,
+ ) -> LocalExpnId;
+
+ fn resolve_imports(&mut self);
+
+ fn resolve_macro_invocation(
+ &mut self,
+ invoc: &Invocation,
+ eager_expansion_root: LocalExpnId,
+ force: bool,
+ ) -> Result<Lrc<SyntaxExtension>, Indeterminate>;
+
+ fn record_macro_rule_usage(&mut self, mac_id: NodeId, rule_index: usize);
+
+ fn check_unused_macros(&mut self);
+
+ // Resolver interfaces for specific built-in macros.
+ /// Does `#[derive(...)]` attribute with the given `ExpnId` have built-in `Copy` inside it?
+ fn has_derive_copy(&self, expn_id: LocalExpnId) -> bool;
+ /// Resolve paths inside the `#[derive(...)]` attribute with the given `ExpnId`.
+ fn resolve_derives(
+ &mut self,
+ expn_id: LocalExpnId,
+ force: bool,
+ derive_paths: &dyn Fn() -> DeriveResolutions,
+ ) -> Result<(), Indeterminate>;
+ /// Take resolutions for paths inside the `#[derive(...)]` attribute with the given `ExpnId`
+ /// back from resolver.
+ fn take_derive_resolutions(&mut self, expn_id: LocalExpnId) -> Option<DeriveResolutions>;
+ /// Path resolution logic for `#[cfg_accessible(path)]`.
+ fn cfg_accessible(
+ &mut self,
+ expn_id: LocalExpnId,
+ path: &ast::Path,
+ ) -> Result<bool, Indeterminate>;
+
+ /// Decodes the proc-macro quoted span in the specified crate, with the specified id.
+ /// No caching is performed.
+ fn get_proc_macro_quoted_span(&self, krate: CrateNum, id: usize) -> Span;
+
+ /// The order of items in the HIR is unrelated to the order of
+ /// items in the AST. However, we generate proc macro harnesses
+ /// based on the AST order, and later refer to these harnesses
+ /// from the HIR. This field keeps track of the order in which
+ /// we generated proc macros harnesses, so that we can map
+ /// HIR proc macros items back to their harness items.
+ fn declare_proc_macro(&mut self, id: NodeId);
+
+ /// Tools registered with `#![register_tool]` and used by tool attributes and lints.
+ fn registered_tools(&self) -> &FxHashSet<Ident>;
+}
+
+pub trait LintStoreExpand {
+ fn pre_expansion_lint(
+ &self,
+ sess: &Session,
+ registered_tools: &FxHashSet<Ident>,
+ node_id: NodeId,
+ attrs: &[Attribute],
+ items: &[P<Item>],
+ name: &str,
+ );
+}
+
+type LintStoreExpandDyn<'a> = Option<&'a (dyn LintStoreExpand + 'a)>;
+
+#[derive(Clone, Default)]
+pub struct ModuleData {
+ /// Path to the module starting from the crate name, like `my_crate::foo::bar`.
+ pub mod_path: Vec<Ident>,
+ /// Stack of paths to files loaded by out-of-line module items,
+ /// used to detect and report recursive module inclusions.
+ pub file_path_stack: Vec<PathBuf>,
+ /// Directory to search child module files in,
+ /// often (but not necessarily) the parent of the top file path on the `file_path_stack`.
+ pub dir_path: PathBuf,
+}
+
+impl ModuleData {
+ pub fn with_dir_path(&self, dir_path: PathBuf) -> ModuleData {
+ ModuleData {
+ mod_path: self.mod_path.clone(),
+ file_path_stack: self.file_path_stack.clone(),
+ dir_path,
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct ExpansionData {
+ pub id: LocalExpnId,
+ pub depth: usize,
+ pub module: Rc<ModuleData>,
+ pub dir_ownership: DirOwnership,
+ pub prior_type_ascription: Option<(Span, bool)>,
+ /// Some parent node that is close to this macro call
+ pub lint_node_id: NodeId,
+ pub is_trailing_mac: bool,
+}
+
+/// One of these is made during expansion and incrementally updated as we go;
+/// when a macro expansion occurs, the resulting nodes have the `backtrace()
+/// -> expn_data` of their expansion context stored into their span.
+pub struct ExtCtxt<'a> {
+ pub sess: &'a Session,
+ pub ecfg: expand::ExpansionConfig<'a>,
+ pub reduced_recursion_limit: Option<Limit>,
+ pub root_path: PathBuf,
+ pub resolver: &'a mut dyn ResolverExpand,
+ pub current_expansion: ExpansionData,
+ /// Error recovery mode entered when expansion is stuck
+ /// (or during eager expansion, but that's a hack).
+ pub force_mode: bool,
+ pub expansions: FxIndexMap<Span, Vec<String>>,
+ /// Used for running pre-expansion lints on freshly loaded modules.
+ pub(super) lint_store: LintStoreExpandDyn<'a>,
+ /// Used for storing lints generated during expansion, like `NAMED_ARGUMENTS_USED_POSITIONALLY`
+ pub buffered_early_lint: Vec<BufferedEarlyLint>,
+ /// When we 'expand' an inert attribute, we leave it
+ /// in the AST, but insert it here so that we know
+ /// not to expand it again.
+ pub(super) expanded_inert_attrs: MarkedAttrs,
+}
+
+impl<'a> ExtCtxt<'a> {
+ pub fn new(
+ sess: &'a Session,
+ ecfg: expand::ExpansionConfig<'a>,
+ resolver: &'a mut dyn ResolverExpand,
+ lint_store: LintStoreExpandDyn<'a>,
+ ) -> ExtCtxt<'a> {
+ ExtCtxt {
+ sess,
+ ecfg,
+ reduced_recursion_limit: None,
+ resolver,
+ lint_store,
+ root_path: PathBuf::new(),
+ current_expansion: ExpansionData {
+ id: LocalExpnId::ROOT,
+ depth: 0,
+ module: Default::default(),
+ dir_ownership: DirOwnership::Owned { relative: None },
+ prior_type_ascription: None,
+ lint_node_id: ast::CRATE_NODE_ID,
+ is_trailing_mac: false,
+ },
+ force_mode: false,
+ expansions: FxIndexMap::default(),
+ expanded_inert_attrs: MarkedAttrs::new(),
+ buffered_early_lint: vec![],
+ }
+ }
+
+ /// Returns a `Folder` for deeply expanding all macros in an AST node.
+ pub fn expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> {
+ expand::MacroExpander::new(self, false)
+ }
+
+ /// Returns a `Folder` that deeply expands all macros and assigns all `NodeId`s in an AST node.
+ /// Once `NodeId`s are assigned, the node may not be expanded, removed, or otherwise modified.
+ pub fn monotonic_expander<'b>(&'b mut self) -> expand::MacroExpander<'b, 'a> {
+ expand::MacroExpander::new(self, true)
+ }
+ pub fn new_parser_from_tts(&self, stream: TokenStream) -> parser::Parser<'a> {
+ rustc_parse::stream_to_parser(&self.sess.parse_sess, stream, MACRO_ARGUMENTS)
+ }
+ pub fn source_map(&self) -> &'a SourceMap {
+ self.sess.parse_sess.source_map()
+ }
+ pub fn parse_sess(&self) -> &'a ParseSess {
+ &self.sess.parse_sess
+ }
+ pub fn call_site(&self) -> Span {
+ self.current_expansion.id.expn_data().call_site
+ }
+
+ /// Returns the current expansion kind's description.
+ pub(crate) fn expansion_descr(&self) -> String {
+ let expn_data = self.current_expansion.id.expn_data();
+ expn_data.kind.descr()
+ }
+
+ /// Equivalent of `Span::def_site` from the proc macro API,
+ /// except that the location is taken from the span passed as an argument.
+ pub fn with_def_site_ctxt(&self, span: Span) -> Span {
+ span.with_def_site_ctxt(self.current_expansion.id.to_expn_id())
+ }
+
+ /// Equivalent of `Span::call_site` from the proc macro API,
+ /// except that the location is taken from the span passed as an argument.
+ pub fn with_call_site_ctxt(&self, span: Span) -> Span {
+ span.with_call_site_ctxt(self.current_expansion.id.to_expn_id())
+ }
+
+ /// Equivalent of `Span::mixed_site` from the proc macro API,
+ /// except that the location is taken from the span passed as an argument.
+ pub fn with_mixed_site_ctxt(&self, span: Span) -> Span {
+ span.with_mixed_site_ctxt(self.current_expansion.id.to_expn_id())
+ }
+
+ /// Returns span for the macro which originally caused the current expansion to happen.
+ ///
+ /// Stops backtracing at include! boundary.
+ pub fn expansion_cause(&self) -> Option<Span> {
+ self.current_expansion.id.expansion_cause()
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_err<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: &str,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ self.sess.parse_sess.span_diagnostic.struct_span_err(sp, msg)
+ }
+
+ pub fn create_err(
+ &self,
+ err: impl SessionDiagnostic<'a>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ self.sess.create_err(err)
+ }
+
+ pub fn emit_err(&self, err: impl SessionDiagnostic<'a>) -> ErrorGuaranteed {
+ self.sess.emit_err(err)
+ }
+
+ /// Emit `msg` attached to `sp`, without immediately stopping
+ /// compilation.
+ ///
+ /// Compilation will be stopped in the near future (at the end of
+ /// the macro expansion phase).
+ #[rustc_lint_diagnostics]
+ pub fn span_err<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
+ self.sess.parse_sess.span_diagnostic.span_err(sp, msg);
+ }
+ #[rustc_lint_diagnostics]
+ pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: &str) {
+ self.sess.parse_sess.span_diagnostic.span_warn(sp, msg);
+ }
+ pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, msg: &str) -> ! {
+ self.sess.parse_sess.span_diagnostic.span_bug(sp, msg);
+ }
+ pub fn trace_macros_diag(&mut self) {
+ for (sp, notes) in self.expansions.iter() {
+ let mut db = self.sess.parse_sess.span_diagnostic.span_note_diag(*sp, "trace_macro");
+ for note in notes {
+ db.note(note);
+ }
+ db.emit();
+ }
+ // Fixme: does this result in errors?
+ self.expansions.clear();
+ }
+ pub fn bug(&self, msg: &str) -> ! {
+ self.sess.parse_sess.span_diagnostic.bug(msg);
+ }
+ pub fn trace_macros(&self) -> bool {
+ self.ecfg.trace_mac
+ }
+ pub fn set_trace_macros(&mut self, x: bool) {
+ self.ecfg.trace_mac = x
+ }
+ pub fn std_path(&self, components: &[Symbol]) -> Vec<Ident> {
+ let def_site = self.with_def_site_ctxt(DUMMY_SP);
+ iter::once(Ident::new(kw::DollarCrate, def_site))
+ .chain(components.iter().map(|&s| Ident::with_dummy_span(s)))
+ .collect()
+ }
+ pub fn def_site_path(&self, components: &[Symbol]) -> Vec<Ident> {
+ let def_site = self.with_def_site_ctxt(DUMMY_SP);
+ components.iter().map(|&s| Ident::new(s, def_site)).collect()
+ }
+
+ pub fn check_unused_macros(&mut self) {
+ self.resolver.check_unused_macros();
+ }
+}
+
+/// Resolves a `path` mentioned inside Rust code, returning an absolute path.
+///
+/// This unifies the logic used for resolving `include_X!`.
+pub fn resolve_path(
+ parse_sess: &ParseSess,
+ path: impl Into<PathBuf>,
+ span: Span,
+) -> PResult<'_, PathBuf> {
+ let path = path.into();
+
+ // Relative paths are resolved relative to the file in which they are found
+ // after macro expansion (that is, they are unhygienic).
+ if !path.is_absolute() {
+ let callsite = span.source_callsite();
+ let mut result = match parse_sess.source_map().span_to_filename(callsite) {
+ FileName::Real(name) => name
+ .into_local_path()
+ .expect("attempting to resolve a file path in an external file"),
+ FileName::DocTest(path, _) => path,
+ other => {
+ return Err(parse_sess.span_diagnostic.struct_span_err(
+ span,
+ &format!(
+ "cannot resolve relative path in non-file source `{}`",
+ parse_sess.source_map().filename_for_diagnostics(&other)
+ ),
+ ));
+ }
+ };
+ result.pop();
+ result.push(path);
+ Ok(result)
+ } else {
+ Ok(path)
+ }
+}
+
+/// Extracts a string literal from the macro expanded version of `expr`,
+/// returning a diagnostic error of `err_msg` if `expr` is not a string literal.
+/// The returned bool indicates whether an applicable suggestion has already been
+/// added to the diagnostic to avoid emitting multiple suggestions. `Err(None)`
+/// indicates that an ast error was encountered.
+pub fn expr_to_spanned_string<'a>(
+ cx: &'a mut ExtCtxt<'_>,
+ expr: P<ast::Expr>,
+ err_msg: &str,
+) -> Result<(Symbol, ast::StrStyle, Span), Option<(DiagnosticBuilder<'a, ErrorGuaranteed>, bool)>> {
+ // Perform eager expansion on the expression.
+ // We want to be able to handle e.g., `concat!("foo", "bar")`.
+ let expr = cx.expander().fully_expand_fragment(AstFragment::Expr(expr)).make_expr();
+
+ Err(match expr.kind {
+ ast::ExprKind::Lit(ref l) => match l.kind {
+ ast::LitKind::Str(s, style) => return Ok((s, style, expr.span)),
+ ast::LitKind::ByteStr(_) => {
+ let mut err = cx.struct_span_err(l.span, err_msg);
+ err.span_suggestion(
+ expr.span.shrink_to_lo(),
+ "consider removing the leading `b`",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ Some((err, true))
+ }
+ ast::LitKind::Err(_) => None,
+ _ => Some((cx.struct_span_err(l.span, err_msg), false)),
+ },
+ ast::ExprKind::Err => None,
+ _ => Some((cx.struct_span_err(expr.span, err_msg), false)),
+ })
+}
+
+/// Extracts a string literal from the macro expanded version of `expr`,
+/// emitting `err_msg` if `expr` is not a string literal. This does not stop
+/// compilation on error, merely emits a non-fatal error and returns `None`.
+pub fn expr_to_string(
+ cx: &mut ExtCtxt<'_>,
+ expr: P<ast::Expr>,
+ err_msg: &str,
+) -> Option<(Symbol, ast::StrStyle)> {
+ expr_to_spanned_string(cx, expr, err_msg)
+ .map_err(|err| {
+ err.map(|(mut err, _)| {
+ err.emit();
+ })
+ })
+ .ok()
+ .map(|(symbol, style, _)| (symbol, style))
+}
+
+/// Non-fatally assert that `tts` is empty. Note that this function
+/// returns even when `tts` is non-empty, macros that *need* to stop
+/// compilation should call
+/// `cx.parse_sess.span_diagnostic.abort_if_errors()` (this should be
+/// done as rarely as possible).
+pub fn check_zero_tts(cx: &ExtCtxt<'_>, sp: Span, tts: TokenStream, name: &str) {
+ if !tts.is_empty() {
+ cx.span_err(sp, &format!("{} takes no arguments", name));
+ }
+}
+
+/// Parse an expression. On error, emit it, advancing to `Eof`, and return `None`.
+pub fn parse_expr(p: &mut parser::Parser<'_>) -> Option<P<ast::Expr>> {
+ match p.parse_expr() {
+ Ok(e) => return Some(e),
+ Err(mut err) => {
+ err.emit();
+ }
+ }
+ while p.token != token::Eof {
+ p.bump();
+ }
+ None
+}
+
+/// Interpreting `tts` as a comma-separated sequence of expressions,
+/// expect exactly one string literal, or emit an error and return `None`.
+pub fn get_single_str_from_tts(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+ name: &str,
+) -> Option<Symbol> {
+ let mut p = cx.new_parser_from_tts(tts);
+ if p.token == token::Eof {
+ cx.span_err(sp, &format!("{} takes 1 argument", name));
+ return None;
+ }
+ let ret = parse_expr(&mut p)?;
+ let _ = p.eat(&token::Comma);
+
+ if p.token != token::Eof {
+ cx.span_err(sp, &format!("{} takes 1 argument", name));
+ }
+ expr_to_string(cx, ret, "argument must be a string literal").map(|(s, _)| s)
+}
+
+/// Extracts comma-separated expressions from `tts`.
+/// On error, emit it, and return `None`.
+pub fn get_exprs_from_tts(
+ cx: &mut ExtCtxt<'_>,
+ sp: Span,
+ tts: TokenStream,
+) -> Option<Vec<P<ast::Expr>>> {
+ let mut p = cx.new_parser_from_tts(tts);
+ let mut es = Vec::new();
+ while p.token != token::Eof {
+ let expr = parse_expr(&mut p)?;
+
+ // Perform eager expansion on the expression.
+ // We want to be able to handle e.g., `concat!("foo", "bar")`.
+ let expr = cx.expander().fully_expand_fragment(AstFragment::Expr(expr)).make_expr();
+
+ es.push(expr);
+ if p.eat(&token::Comma) {
+ continue;
+ }
+ if p.token != token::Eof {
+ cx.span_err(sp, "expected token: `,`");
+ return None;
+ }
+ }
+ Some(es)
+}
+
+pub fn parse_macro_name_and_helper_attrs(
+ diag: &rustc_errors::Handler,
+ attr: &Attribute,
+ descr: &str,
+) -> Option<(Symbol, Vec<Symbol>)> {
+ // Once we've located the `#[proc_macro_derive]` attribute, verify
+ // that it's of the form `#[proc_macro_derive(Foo)]` or
+ // `#[proc_macro_derive(Foo, attributes(A, ..))]`
+ let list = attr.meta_item_list()?;
+ if list.len() != 1 && list.len() != 2 {
+ diag.span_err(attr.span, "attribute must have either one or two arguments");
+ return None;
+ }
+ let Some(trait_attr) = list[0].meta_item() else {
+ diag.span_err(list[0].span(), "not a meta item");
+ return None;
+ };
+ let trait_ident = match trait_attr.ident() {
+ Some(trait_ident) if trait_attr.is_word() => trait_ident,
+ _ => {
+ diag.span_err(trait_attr.span, "must only be one word");
+ return None;
+ }
+ };
+
+ if !trait_ident.name.can_be_raw() {
+ diag.span_err(
+ trait_attr.span,
+ &format!("`{}` cannot be a name of {} macro", trait_ident, descr),
+ );
+ }
+
+ let attributes_attr = list.get(1);
+ let proc_attrs: Vec<_> = if let Some(attr) = attributes_attr {
+ if !attr.has_name(sym::attributes) {
+ diag.span_err(attr.span(), "second argument must be `attributes`");
+ }
+ attr.meta_item_list()
+ .unwrap_or_else(|| {
+ diag.span_err(attr.span(), "attribute must be of form: `attributes(foo, bar)`");
+ &[]
+ })
+ .iter()
+ .filter_map(|attr| {
+ let Some(attr) = attr.meta_item() else {
+ diag.span_err(attr.span(), "not a meta item");
+ return None;
+ };
+
+ let ident = match attr.ident() {
+ Some(ident) if attr.is_word() => ident,
+ _ => {
+ diag.span_err(attr.span, "must only be one word");
+ return None;
+ }
+ };
+ if !ident.name.can_be_raw() {
+ diag.span_err(
+ attr.span,
+ &format!("`{}` cannot be a name of derive helper attribute", ident),
+ );
+ }
+
+ Some(ident.name)
+ })
+ .collect()
+ } else {
+ Vec::new()
+ };
+
+ Some((trait_ident.name, proc_attrs))
+}
+
+/// This nonterminal looks like some specific enums from
+/// `proc-macro-hack` and `procedural-masquerade` crates.
+/// We need to maintain some special pretty-printing behavior for them due to incorrect
+/// asserts in old versions of those crates and their wide use in the ecosystem.
+/// See issue #73345 for more details.
+/// FIXME(#73933): Remove this eventually.
+fn pretty_printing_compatibility_hack(item: &Item, sess: &ParseSess) -> bool {
+ let name = item.ident.name;
+ if name == sym::ProceduralMasqueradeDummyType {
+ if let ast::ItemKind::Enum(enum_def, _) = &item.kind {
+ if let [variant] = &*enum_def.variants {
+ if variant.ident.name == sym::Input {
+ sess.buffer_lint_with_diagnostic(
+ &PROC_MACRO_BACK_COMPAT,
+ item.ident.span,
+ ast::CRATE_NODE_ID,
+ "using `procedural-masquerade` crate",
+ BuiltinLintDiagnostics::ProcMacroBackCompat(
+ "The `procedural-masquerade` crate has been unnecessary since Rust 1.30.0. \
+ Versions of this crate below 0.1.7 will eventually stop compiling.".to_string())
+ );
+ return true;
+ }
+ }
+ }
+ }
+ false
+}
+
+pub(crate) fn ann_pretty_printing_compatibility_hack(ann: &Annotatable, sess: &ParseSess) -> bool {
+ let item = match ann {
+ Annotatable::Item(item) => item,
+ Annotatable::Stmt(stmt) => match &stmt.kind {
+ ast::StmtKind::Item(item) => item,
+ _ => return false,
+ },
+ _ => return false,
+ };
+ pretty_printing_compatibility_hack(item, sess)
+}
+
+pub(crate) fn nt_pretty_printing_compatibility_hack(nt: &Nonterminal, sess: &ParseSess) -> bool {
+ let item = match nt {
+ Nonterminal::NtItem(item) => item,
+ Nonterminal::NtStmt(stmt) => match &stmt.kind {
+ ast::StmtKind::Item(item) => item,
+ _ => return false,
+ },
+ _ => return false,
+ };
+ pretty_printing_compatibility_hack(item, sess)
+}
diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs
new file mode 100644
index 000000000..fa3e2a4a5
--- /dev/null
+++ b/compiler/rustc_expand/src/build.rs
@@ -0,0 +1,616 @@
+use crate::base::ExtCtxt;
+
+use rustc_ast::attr;
+use rustc_ast::ptr::P;
+use rustc_ast::{self as ast, AttrVec, BlockCheckMode, Expr, LocalKind, PatKind, UnOp};
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+
+use rustc_span::Span;
+
+impl<'a> ExtCtxt<'a> {
+ pub fn path(&self, span: Span, strs: Vec<Ident>) -> ast::Path {
+ self.path_all(span, false, strs, vec![])
+ }
+ pub fn path_ident(&self, span: Span, id: Ident) -> ast::Path {
+ self.path(span, vec![id])
+ }
+ pub fn path_global(&self, span: Span, strs: Vec<Ident>) -> ast::Path {
+ self.path_all(span, true, strs, vec![])
+ }
+ pub fn path_all(
+ &self,
+ span: Span,
+ global: bool,
+ mut idents: Vec<Ident>,
+ args: Vec<ast::GenericArg>,
+ ) -> ast::Path {
+ assert!(!idents.is_empty());
+ let add_root = global && !idents[0].is_path_segment_keyword();
+ let mut segments = Vec::with_capacity(idents.len() + add_root as usize);
+ if add_root {
+ segments.push(ast::PathSegment::path_root(span));
+ }
+ let last_ident = idents.pop().unwrap();
+ segments.extend(
+ idents.into_iter().map(|ident| ast::PathSegment::from_ident(ident.with_span_pos(span))),
+ );
+ let args = if !args.is_empty() {
+ let args = args.into_iter().map(ast::AngleBracketedArg::Arg).collect();
+ ast::AngleBracketedArgs { args, span }.into()
+ } else {
+ None
+ };
+ segments.push(ast::PathSegment {
+ ident: last_ident.with_span_pos(span),
+ id: ast::DUMMY_NODE_ID,
+ args,
+ });
+ ast::Path { span, segments, tokens: None }
+ }
+
+ pub fn ty_mt(&self, ty: P<ast::Ty>, mutbl: ast::Mutability) -> ast::MutTy {
+ ast::MutTy { ty, mutbl }
+ }
+
+ pub fn ty(&self, span: Span, kind: ast::TyKind) -> P<ast::Ty> {
+ P(ast::Ty { id: ast::DUMMY_NODE_ID, span, kind, tokens: None })
+ }
+
+ pub fn ty_infer(&self, span: Span) -> P<ast::Ty> {
+ self.ty(span, ast::TyKind::Infer)
+ }
+
+ pub fn ty_path(&self, path: ast::Path) -> P<ast::Ty> {
+ self.ty(path.span, ast::TyKind::Path(None, path))
+ }
+
+ // Might need to take bounds as an argument in the future, if you ever want
+ // to generate a bounded existential trait type.
+ pub fn ty_ident(&self, span: Span, ident: Ident) -> P<ast::Ty> {
+ self.ty_path(self.path_ident(span, ident))
+ }
+
+ pub fn anon_const(&self, span: Span, kind: ast::ExprKind) -> ast::AnonConst {
+ ast::AnonConst {
+ id: ast::DUMMY_NODE_ID,
+ value: P(ast::Expr {
+ id: ast::DUMMY_NODE_ID,
+ kind,
+ span,
+ attrs: AttrVec::new(),
+ tokens: None,
+ }),
+ }
+ }
+
+ pub fn const_ident(&self, span: Span, ident: Ident) -> ast::AnonConst {
+ self.anon_const(span, ast::ExprKind::Path(None, self.path_ident(span, ident)))
+ }
+
+ pub fn ty_rptr(
+ &self,
+ span: Span,
+ ty: P<ast::Ty>,
+ lifetime: Option<ast::Lifetime>,
+ mutbl: ast::Mutability,
+ ) -> P<ast::Ty> {
+ self.ty(span, ast::TyKind::Rptr(lifetime, self.ty_mt(ty, mutbl)))
+ }
+
+ pub fn ty_ptr(&self, span: Span, ty: P<ast::Ty>, mutbl: ast::Mutability) -> P<ast::Ty> {
+ self.ty(span, ast::TyKind::Ptr(self.ty_mt(ty, mutbl)))
+ }
+
+ pub fn typaram(
+ &self,
+ span: Span,
+ ident: Ident,
+ attrs: Vec<ast::Attribute>,
+ bounds: ast::GenericBounds,
+ default: Option<P<ast::Ty>>,
+ ) -> ast::GenericParam {
+ ast::GenericParam {
+ ident: ident.with_span_pos(span),
+ id: ast::DUMMY_NODE_ID,
+ attrs: attrs.into(),
+ bounds,
+ kind: ast::GenericParamKind::Type { default },
+ is_placeholder: false,
+ colon_span: None,
+ }
+ }
+
+ pub fn trait_ref(&self, path: ast::Path) -> ast::TraitRef {
+ ast::TraitRef { path, ref_id: ast::DUMMY_NODE_ID }
+ }
+
+ pub fn poly_trait_ref(&self, span: Span, path: ast::Path) -> ast::PolyTraitRef {
+ ast::PolyTraitRef {
+ bound_generic_params: Vec::new(),
+ trait_ref: self.trait_ref(path),
+ span,
+ }
+ }
+
+ pub fn trait_bound(&self, path: ast::Path) -> ast::GenericBound {
+ ast::GenericBound::Trait(
+ self.poly_trait_ref(path.span, path),
+ ast::TraitBoundModifier::None,
+ )
+ }
+
+ pub fn lifetime(&self, span: Span, ident: Ident) -> ast::Lifetime {
+ ast::Lifetime { id: ast::DUMMY_NODE_ID, ident: ident.with_span_pos(span) }
+ }
+
+ pub fn lifetime_static(&self, span: Span) -> ast::Lifetime {
+ self.lifetime(span, Ident::new(kw::StaticLifetime, span))
+ }
+
+ pub fn stmt_expr(&self, expr: P<ast::Expr>) -> ast::Stmt {
+ ast::Stmt { id: ast::DUMMY_NODE_ID, span: expr.span, kind: ast::StmtKind::Expr(expr) }
+ }
+
+ pub fn stmt_let_pat(&self, sp: Span, pat: P<ast::Pat>, ex: P<ast::Expr>) -> ast::Stmt {
+ let local = P(ast::Local {
+ pat,
+ ty: None,
+ id: ast::DUMMY_NODE_ID,
+ kind: LocalKind::Init(ex),
+ span: sp,
+ attrs: AttrVec::new(),
+ tokens: None,
+ });
+ self.stmt_local(local, sp)
+ }
+
+ pub fn stmt_let(&self, sp: Span, mutbl: bool, ident: Ident, ex: P<ast::Expr>) -> ast::Stmt {
+ self.stmt_let_ty(sp, mutbl, ident, None, ex)
+ }
+
+ pub fn stmt_let_ty(
+ &self,
+ sp: Span,
+ mutbl: bool,
+ ident: Ident,
+ ty: Option<P<ast::Ty>>,
+ ex: P<ast::Expr>,
+ ) -> ast::Stmt {
+ let pat = if mutbl {
+ let binding_mode = ast::BindingMode::ByValue(ast::Mutability::Mut);
+ self.pat_ident_binding_mode(sp, ident, binding_mode)
+ } else {
+ self.pat_ident(sp, ident)
+ };
+ let local = P(ast::Local {
+ pat,
+ ty,
+ id: ast::DUMMY_NODE_ID,
+ kind: LocalKind::Init(ex),
+ span: sp,
+ attrs: AttrVec::new(),
+ tokens: None,
+ });
+ self.stmt_local(local, sp)
+ }
+
+ // Generates `let _: Type;`, which is usually used for type assertions.
+ pub fn stmt_let_type_only(&self, span: Span, ty: P<ast::Ty>) -> ast::Stmt {
+ let local = P(ast::Local {
+ pat: self.pat_wild(span),
+ ty: Some(ty),
+ id: ast::DUMMY_NODE_ID,
+ kind: LocalKind::Decl,
+ span,
+ attrs: AttrVec::new(),
+ tokens: None,
+ });
+ self.stmt_local(local, span)
+ }
+
+ pub fn stmt_local(&self, local: P<ast::Local>, span: Span) -> ast::Stmt {
+ ast::Stmt { id: ast::DUMMY_NODE_ID, kind: ast::StmtKind::Local(local), span }
+ }
+
+ pub fn stmt_item(&self, sp: Span, item: P<ast::Item>) -> ast::Stmt {
+ ast::Stmt { id: ast::DUMMY_NODE_ID, kind: ast::StmtKind::Item(item), span: sp }
+ }
+
+ pub fn block_expr(&self, expr: P<ast::Expr>) -> P<ast::Block> {
+ self.block(
+ expr.span,
+ vec![ast::Stmt {
+ id: ast::DUMMY_NODE_ID,
+ span: expr.span,
+ kind: ast::StmtKind::Expr(expr),
+ }],
+ )
+ }
+ pub fn block(&self, span: Span, stmts: Vec<ast::Stmt>) -> P<ast::Block> {
+ P(ast::Block {
+ stmts,
+ id: ast::DUMMY_NODE_ID,
+ rules: BlockCheckMode::Default,
+ span,
+ tokens: None,
+ could_be_bare_literal: false,
+ })
+ }
+
+ pub fn expr(&self, span: Span, kind: ast::ExprKind) -> P<ast::Expr> {
+ P(ast::Expr { id: ast::DUMMY_NODE_ID, kind, span, attrs: AttrVec::new(), tokens: None })
+ }
+
+ pub fn expr_path(&self, path: ast::Path) -> P<ast::Expr> {
+ self.expr(path.span, ast::ExprKind::Path(None, path))
+ }
+
+ pub fn expr_ident(&self, span: Span, id: Ident) -> P<ast::Expr> {
+ self.expr_path(self.path_ident(span, id))
+ }
+ pub fn expr_self(&self, span: Span) -> P<ast::Expr> {
+ self.expr_ident(span, Ident::with_dummy_span(kw::SelfLower))
+ }
+
+ pub fn expr_binary(
+ &self,
+ sp: Span,
+ op: ast::BinOpKind,
+ lhs: P<ast::Expr>,
+ rhs: P<ast::Expr>,
+ ) -> P<ast::Expr> {
+ self.expr(sp, ast::ExprKind::Binary(Spanned { node: op, span: sp }, lhs, rhs))
+ }
+
+ pub fn expr_deref(&self, sp: Span, e: P<ast::Expr>) -> P<ast::Expr> {
+ self.expr(sp, ast::ExprKind::Unary(UnOp::Deref, e))
+ }
+
+ pub fn expr_addr_of(&self, sp: Span, e: P<ast::Expr>) -> P<ast::Expr> {
+ self.expr(sp, ast::ExprKind::AddrOf(ast::BorrowKind::Ref, ast::Mutability::Not, e))
+ }
+
+ pub fn expr_call(
+ &self,
+ span: Span,
+ expr: P<ast::Expr>,
+ args: Vec<P<ast::Expr>>,
+ ) -> P<ast::Expr> {
+ self.expr(span, ast::ExprKind::Call(expr, args))
+ }
+ pub fn expr_call_ident(&self, span: Span, id: Ident, args: Vec<P<ast::Expr>>) -> P<ast::Expr> {
+ self.expr(span, ast::ExprKind::Call(self.expr_ident(span, id), args))
+ }
+ pub fn expr_call_global(
+ &self,
+ sp: Span,
+ fn_path: Vec<Ident>,
+ args: Vec<P<ast::Expr>>,
+ ) -> P<ast::Expr> {
+ let pathexpr = self.expr_path(self.path_global(sp, fn_path));
+ self.expr_call(sp, pathexpr, args)
+ }
+ pub fn expr_block(&self, b: P<ast::Block>) -> P<ast::Expr> {
+ self.expr(b.span, ast::ExprKind::Block(b, None))
+ }
+ pub fn field_imm(&self, span: Span, ident: Ident, e: P<ast::Expr>) -> ast::ExprField {
+ ast::ExprField {
+ ident: ident.with_span_pos(span),
+ expr: e,
+ span,
+ is_shorthand: false,
+ attrs: AttrVec::new(),
+ id: ast::DUMMY_NODE_ID,
+ is_placeholder: false,
+ }
+ }
+ pub fn expr_struct(
+ &self,
+ span: Span,
+ path: ast::Path,
+ fields: Vec<ast::ExprField>,
+ ) -> P<ast::Expr> {
+ self.expr(
+ span,
+ ast::ExprKind::Struct(P(ast::StructExpr {
+ qself: None,
+ path,
+ fields,
+ rest: ast::StructRest::None,
+ })),
+ )
+ }
+ pub fn expr_struct_ident(
+ &self,
+ span: Span,
+ id: Ident,
+ fields: Vec<ast::ExprField>,
+ ) -> P<ast::Expr> {
+ self.expr_struct(span, self.path_ident(span, id), fields)
+ }
+
+ pub fn expr_lit(&self, span: Span, lit_kind: ast::LitKind) -> P<ast::Expr> {
+ let lit = ast::Lit::from_lit_kind(lit_kind, span);
+ self.expr(span, ast::ExprKind::Lit(lit))
+ }
+ pub fn expr_usize(&self, span: Span, i: usize) -> P<ast::Expr> {
+ self.expr_lit(
+ span,
+ ast::LitKind::Int(i as u128, ast::LitIntType::Unsigned(ast::UintTy::Usize)),
+ )
+ }
+ pub fn expr_u32(&self, sp: Span, u: u32) -> P<ast::Expr> {
+ self.expr_lit(sp, ast::LitKind::Int(u as u128, ast::LitIntType::Unsigned(ast::UintTy::U32)))
+ }
+ pub fn expr_bool(&self, sp: Span, value: bool) -> P<ast::Expr> {
+ self.expr_lit(sp, ast::LitKind::Bool(value))
+ }
+
+ /// `[expr1, expr2, ...]`
+ pub fn expr_array(&self, sp: Span, exprs: Vec<P<ast::Expr>>) -> P<ast::Expr> {
+ self.expr(sp, ast::ExprKind::Array(exprs))
+ }
+
+ /// `&[expr1, expr2, ...]`
+ pub fn expr_array_ref(&self, sp: Span, exprs: Vec<P<ast::Expr>>) -> P<ast::Expr> {
+ self.expr_addr_of(sp, self.expr_array(sp, exprs))
+ }
+
+ pub fn expr_str(&self, sp: Span, s: Symbol) -> P<ast::Expr> {
+ self.expr_lit(sp, ast::LitKind::Str(s, ast::StrStyle::Cooked))
+ }
+
+ pub fn expr_cast(&self, sp: Span, expr: P<ast::Expr>, ty: P<ast::Ty>) -> P<ast::Expr> {
+ self.expr(sp, ast::ExprKind::Cast(expr, ty))
+ }
+
+ pub fn expr_some(&self, sp: Span, expr: P<ast::Expr>) -> P<ast::Expr> {
+ let some = self.std_path(&[sym::option, sym::Option, sym::Some]);
+ self.expr_call_global(sp, some, vec![expr])
+ }
+
+ pub fn expr_none(&self, sp: Span) -> P<ast::Expr> {
+ let none = self.std_path(&[sym::option, sym::Option, sym::None]);
+ self.expr_path(self.path_global(sp, none))
+ }
+ pub fn expr_tuple(&self, sp: Span, exprs: Vec<P<ast::Expr>>) -> P<ast::Expr> {
+ self.expr(sp, ast::ExprKind::Tup(exprs))
+ }
+
+ pub fn expr_fail(&self, span: Span, msg: Symbol) -> P<ast::Expr> {
+ self.expr_call_global(
+ span,
+ [sym::std, sym::rt, sym::begin_panic].iter().map(|s| Ident::new(*s, span)).collect(),
+ vec![self.expr_str(span, msg)],
+ )
+ }
+
+ pub fn expr_unreachable(&self, span: Span) -> P<ast::Expr> {
+ self.expr_fail(span, Symbol::intern("internal error: entered unreachable code"))
+ }
+
+ pub fn expr_ok(&self, sp: Span, expr: P<ast::Expr>) -> P<ast::Expr> {
+ let ok = self.std_path(&[sym::result, sym::Result, sym::Ok]);
+ self.expr_call_global(sp, ok, vec![expr])
+ }
+
+ pub fn expr_try(&self, sp: Span, head: P<ast::Expr>) -> P<ast::Expr> {
+ let ok = self.std_path(&[sym::result, sym::Result, sym::Ok]);
+ let ok_path = self.path_global(sp, ok);
+ let err = self.std_path(&[sym::result, sym::Result, sym::Err]);
+ let err_path = self.path_global(sp, err);
+
+ let binding_variable = Ident::new(sym::__try_var, sp);
+ let binding_pat = self.pat_ident(sp, binding_variable);
+ let binding_expr = self.expr_ident(sp, binding_variable);
+
+ // `Ok(__try_var)` pattern
+ let ok_pat = self.pat_tuple_struct(sp, ok_path, vec![binding_pat.clone()]);
+
+ // `Err(__try_var)` (pattern and expression respectively)
+ let err_pat = self.pat_tuple_struct(sp, err_path.clone(), vec![binding_pat]);
+ let err_inner_expr =
+ self.expr_call(sp, self.expr_path(err_path), vec![binding_expr.clone()]);
+ // `return Err(__try_var)`
+ let err_expr = self.expr(sp, ast::ExprKind::Ret(Some(err_inner_expr)));
+
+ // `Ok(__try_var) => __try_var`
+ let ok_arm = self.arm(sp, ok_pat, binding_expr);
+ // `Err(__try_var) => return Err(__try_var)`
+ let err_arm = self.arm(sp, err_pat, err_expr);
+
+ // `match head { Ok() => ..., Err() => ... }`
+ self.expr_match(sp, head, vec![ok_arm, err_arm])
+ }
+
+ pub fn pat(&self, span: Span, kind: PatKind) -> P<ast::Pat> {
+ P(ast::Pat { id: ast::DUMMY_NODE_ID, kind, span, tokens: None })
+ }
+ pub fn pat_wild(&self, span: Span) -> P<ast::Pat> {
+ self.pat(span, PatKind::Wild)
+ }
+ pub fn pat_lit(&self, span: Span, expr: P<ast::Expr>) -> P<ast::Pat> {
+ self.pat(span, PatKind::Lit(expr))
+ }
+ pub fn pat_ident(&self, span: Span, ident: Ident) -> P<ast::Pat> {
+ let binding_mode = ast::BindingMode::ByValue(ast::Mutability::Not);
+ self.pat_ident_binding_mode(span, ident, binding_mode)
+ }
+
+ pub fn pat_ident_binding_mode(
+ &self,
+ span: Span,
+ ident: Ident,
+ bm: ast::BindingMode,
+ ) -> P<ast::Pat> {
+ let pat = PatKind::Ident(bm, ident.with_span_pos(span), None);
+ self.pat(span, pat)
+ }
+ pub fn pat_path(&self, span: Span, path: ast::Path) -> P<ast::Pat> {
+ self.pat(span, PatKind::Path(None, path))
+ }
+ pub fn pat_tuple_struct(
+ &self,
+ span: Span,
+ path: ast::Path,
+ subpats: Vec<P<ast::Pat>>,
+ ) -> P<ast::Pat> {
+ self.pat(span, PatKind::TupleStruct(None, path, subpats))
+ }
+ pub fn pat_struct(
+ &self,
+ span: Span,
+ path: ast::Path,
+ field_pats: Vec<ast::PatField>,
+ ) -> P<ast::Pat> {
+ self.pat(span, PatKind::Struct(None, path, field_pats, false))
+ }
+ pub fn pat_tuple(&self, span: Span, pats: Vec<P<ast::Pat>>) -> P<ast::Pat> {
+ self.pat(span, PatKind::Tuple(pats))
+ }
+
+ pub fn pat_some(&self, span: Span, pat: P<ast::Pat>) -> P<ast::Pat> {
+ let some = self.std_path(&[sym::option, sym::Option, sym::Some]);
+ let path = self.path_global(span, some);
+ self.pat_tuple_struct(span, path, vec![pat])
+ }
+
+ pub fn arm(&self, span: Span, pat: P<ast::Pat>, expr: P<ast::Expr>) -> ast::Arm {
+ ast::Arm {
+ attrs: AttrVec::new(),
+ pat,
+ guard: None,
+ body: expr,
+ span,
+ id: ast::DUMMY_NODE_ID,
+ is_placeholder: false,
+ }
+ }
+
+ pub fn arm_unreachable(&self, span: Span) -> ast::Arm {
+ self.arm(span, self.pat_wild(span), self.expr_unreachable(span))
+ }
+
+ pub fn expr_match(&self, span: Span, arg: P<ast::Expr>, arms: Vec<ast::Arm>) -> P<Expr> {
+ self.expr(span, ast::ExprKind::Match(arg, arms))
+ }
+
+ pub fn expr_if(
+ &self,
+ span: Span,
+ cond: P<ast::Expr>,
+ then: P<ast::Expr>,
+ els: Option<P<ast::Expr>>,
+ ) -> P<ast::Expr> {
+ let els = els.map(|x| self.expr_block(self.block_expr(x)));
+ self.expr(span, ast::ExprKind::If(cond, self.block_expr(then), els))
+ }
+
+ pub fn lambda(&self, span: Span, ids: Vec<Ident>, body: P<ast::Expr>) -> P<ast::Expr> {
+ let fn_decl = self.fn_decl(
+ ids.iter().map(|id| self.param(span, *id, self.ty(span, ast::TyKind::Infer))).collect(),
+ ast::FnRetTy::Default(span),
+ );
+
+ // FIXME -- We are using `span` as the span of the `|...|`
+ // part of the lambda, but it probably (maybe?) corresponds to
+ // the entire lambda body. Probably we should extend the API
+ // here, but that's not entirely clear.
+ self.expr(
+ span,
+ ast::ExprKind::Closure(
+ ast::ClosureBinder::NotPresent,
+ ast::CaptureBy::Ref,
+ ast::Async::No,
+ ast::Movability::Movable,
+ fn_decl,
+ body,
+ span,
+ ),
+ )
+ }
+
+ pub fn lambda0(&self, span: Span, body: P<ast::Expr>) -> P<ast::Expr> {
+ self.lambda(span, Vec::new(), body)
+ }
+
+ pub fn lambda1(&self, span: Span, body: P<ast::Expr>, ident: Ident) -> P<ast::Expr> {
+ self.lambda(span, vec![ident], body)
+ }
+
+ pub fn lambda_stmts_1(&self, span: Span, stmts: Vec<ast::Stmt>, ident: Ident) -> P<ast::Expr> {
+ self.lambda1(span, self.expr_block(self.block(span, stmts)), ident)
+ }
+
+ pub fn param(&self, span: Span, ident: Ident, ty: P<ast::Ty>) -> ast::Param {
+ let arg_pat = self.pat_ident(span, ident);
+ ast::Param {
+ attrs: AttrVec::default(),
+ id: ast::DUMMY_NODE_ID,
+ pat: arg_pat,
+ span,
+ ty,
+ is_placeholder: false,
+ }
+ }
+
+ // `self` is unused but keep it as method for the convenience use.
+ pub fn fn_decl(&self, inputs: Vec<ast::Param>, output: ast::FnRetTy) -> P<ast::FnDecl> {
+ P(ast::FnDecl { inputs, output })
+ }
+
+ pub fn item(
+ &self,
+ span: Span,
+ name: Ident,
+ attrs: Vec<ast::Attribute>,
+ kind: ast::ItemKind,
+ ) -> P<ast::Item> {
+ // FIXME: Would be nice if our generated code didn't violate
+ // Rust coding conventions
+ P(ast::Item {
+ ident: name,
+ attrs,
+ id: ast::DUMMY_NODE_ID,
+ kind,
+ vis: ast::Visibility {
+ span: span.shrink_to_lo(),
+ kind: ast::VisibilityKind::Inherited,
+ tokens: None,
+ },
+ span,
+ tokens: None,
+ })
+ }
+
+ pub fn item_static(
+ &self,
+ span: Span,
+ name: Ident,
+ ty: P<ast::Ty>,
+ mutbl: ast::Mutability,
+ expr: P<ast::Expr>,
+ ) -> P<ast::Item> {
+ self.item(span, name, Vec::new(), ast::ItemKind::Static(ty, mutbl, Some(expr)))
+ }
+
+ pub fn item_const(
+ &self,
+ span: Span,
+ name: Ident,
+ ty: P<ast::Ty>,
+ expr: P<ast::Expr>,
+ ) -> P<ast::Item> {
+ let def = ast::Defaultness::Final;
+ self.item(span, name, Vec::new(), ast::ItemKind::Const(def, ty, Some(expr)))
+ }
+
+ pub fn attribute(&self, mi: ast::MetaItem) -> ast::Attribute {
+ attr::mk_attr_outer(mi)
+ }
+
+ pub fn meta_word(&self, sp: Span, w: Symbol) -> ast::MetaItem {
+ attr::mk_word_item(Ident::new(w, sp))
+ }
+}
diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs
new file mode 100644
index 000000000..3e1acf438
--- /dev/null
+++ b/compiler/rustc_expand/src/config.rs
@@ -0,0 +1,535 @@
+//! Conditional compilation stripping.
+
+use rustc_ast::ptr::P;
+use rustc_ast::token::{Delimiter, Token, TokenKind};
+use rustc_ast::tokenstream::{AttrAnnotatedTokenStream, AttrAnnotatedTokenTree};
+use rustc_ast::tokenstream::{DelimSpan, Spacing};
+use rustc_ast::tokenstream::{LazyTokenStream, TokenTree};
+use rustc_ast::NodeId;
+use rustc_ast::{self as ast, AttrStyle, Attribute, HasAttrs, HasTokens, MetaItem};
+use rustc_attr as attr;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::map_in_place::MapInPlace;
+use rustc_errors::{error_code, struct_span_err, Applicability, Handler};
+use rustc_feature::{Feature, Features, State as FeatureState};
+use rustc_feature::{
+ ACCEPTED_FEATURES, ACTIVE_FEATURES, REMOVED_FEATURES, STABLE_REMOVED_FEATURES,
+};
+use rustc_parse::validate_attr;
+use rustc_session::parse::feature_err;
+use rustc_session::Session;
+use rustc_span::edition::{Edition, ALL_EDITIONS};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+/// A folder that strips out items that do not belong in the current configuration.
+pub struct StripUnconfigured<'a> {
+ pub sess: &'a Session,
+ pub features: Option<&'a Features>,
+ /// If `true`, perform cfg-stripping on attached tokens.
+ /// This is only used for the input to derive macros,
+ /// which needs eager expansion of `cfg` and `cfg_attr`
+ pub config_tokens: bool,
+ pub lint_node_id: NodeId,
+}
+
+fn get_features(
+ sess: &Session,
+ span_handler: &Handler,
+ krate_attrs: &[ast::Attribute],
+) -> Features {
+ fn feature_removed(span_handler: &Handler, span: Span, reason: Option<&str>) {
+ let mut err = struct_span_err!(span_handler, span, E0557, "feature has been removed");
+ err.span_label(span, "feature has been removed");
+ if let Some(reason) = reason {
+ err.note(reason);
+ }
+ err.emit();
+ }
+
+ fn active_features_up_to(edition: Edition) -> impl Iterator<Item = &'static Feature> {
+ ACTIVE_FEATURES.iter().filter(move |feature| {
+ if let Some(feature_edition) = feature.edition {
+ feature_edition <= edition
+ } else {
+ false
+ }
+ })
+ }
+
+ let mut features = Features::default();
+ let mut edition_enabled_features = FxHashMap::default();
+ let crate_edition = sess.edition();
+
+ for &edition in ALL_EDITIONS {
+ if edition <= crate_edition {
+ // The `crate_edition` implies its respective umbrella feature-gate
+ // (i.e., `#![feature(rust_20XX_preview)]` isn't needed on edition 20XX).
+ edition_enabled_features.insert(edition.feature_name(), edition);
+ }
+ }
+
+ for feature in active_features_up_to(crate_edition) {
+ feature.set(&mut features, DUMMY_SP);
+ edition_enabled_features.insert(feature.name, crate_edition);
+ }
+
+ // Process the edition umbrella feature-gates first, to ensure
+ // `edition_enabled_features` is completed before it's queried.
+ for attr in krate_attrs {
+ if !attr.has_name(sym::feature) {
+ continue;
+ }
+
+ let Some(list) = attr.meta_item_list() else {
+ continue;
+ };
+
+ for mi in list {
+ if !mi.is_word() {
+ continue;
+ }
+
+ let name = mi.name_or_empty();
+
+ let edition = ALL_EDITIONS.iter().find(|e| name == e.feature_name()).copied();
+ if let Some(edition) = edition {
+ if edition <= crate_edition {
+ continue;
+ }
+
+ for feature in active_features_up_to(edition) {
+ // FIXME(Manishearth) there is currently no way to set
+ // lib features by edition
+ feature.set(&mut features, DUMMY_SP);
+ edition_enabled_features.insert(feature.name, edition);
+ }
+ }
+ }
+ }
+
+ for attr in krate_attrs {
+ if !attr.has_name(sym::feature) {
+ continue;
+ }
+
+ let Some(list) = attr.meta_item_list() else {
+ continue;
+ };
+
+ let bad_input = |span| {
+ struct_span_err!(span_handler, span, E0556, "malformed `feature` attribute input")
+ };
+
+ for mi in list {
+ let name = match mi.ident() {
+ Some(ident) if mi.is_word() => ident.name,
+ Some(ident) => {
+ bad_input(mi.span())
+ .span_suggestion(
+ mi.span(),
+ "expected just one word",
+ ident.name,
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ continue;
+ }
+ None => {
+ bad_input(mi.span()).span_label(mi.span(), "expected just one word").emit();
+ continue;
+ }
+ };
+
+ if let Some(edition) = edition_enabled_features.get(&name) {
+ let msg =
+ &format!("the feature `{}` is included in the Rust {} edition", name, edition);
+ span_handler.struct_span_warn_with_code(mi.span(), msg, error_code!(E0705)).emit();
+ continue;
+ }
+
+ if ALL_EDITIONS.iter().any(|e| name == e.feature_name()) {
+ // Handled in the separate loop above.
+ continue;
+ }
+
+ let removed = REMOVED_FEATURES.iter().find(|f| name == f.name);
+ let stable_removed = STABLE_REMOVED_FEATURES.iter().find(|f| name == f.name);
+ if let Some(Feature { state, .. }) = removed.or(stable_removed) {
+ if let FeatureState::Removed { reason } | FeatureState::Stabilized { reason } =
+ state
+ {
+ feature_removed(span_handler, mi.span(), *reason);
+ continue;
+ }
+ }
+
+ if let Some(Feature { since, .. }) = ACCEPTED_FEATURES.iter().find(|f| name == f.name) {
+ let since = Some(Symbol::intern(since));
+ features.declared_lang_features.push((name, mi.span(), since));
+ features.active_features.insert(name);
+ continue;
+ }
+
+ if let Some(allowed) = sess.opts.unstable_opts.allow_features.as_ref() {
+ if allowed.iter().all(|f| name.as_str() != f) {
+ struct_span_err!(
+ span_handler,
+ mi.span(),
+ E0725,
+ "the feature `{}` is not in the list of allowed features",
+ name
+ )
+ .emit();
+ continue;
+ }
+ }
+
+ if let Some(f) = ACTIVE_FEATURES.iter().find(|f| name == f.name) {
+ f.set(&mut features, mi.span());
+ features.declared_lang_features.push((name, mi.span(), None));
+ features.active_features.insert(name);
+ continue;
+ }
+
+ features.declared_lib_features.push((name, mi.span()));
+ features.active_features.insert(name);
+ }
+ }
+
+ features
+}
+
+// `cfg_attr`-process the crate's attributes and compute the crate's features.
+pub fn features(
+ sess: &Session,
+ mut krate: ast::Crate,
+ lint_node_id: NodeId,
+) -> (ast::Crate, Features) {
+ let mut strip_unconfigured =
+ StripUnconfigured { sess, features: None, config_tokens: false, lint_node_id };
+
+ let unconfigured_attrs = krate.attrs.clone();
+ let diag = &sess.parse_sess.span_diagnostic;
+ let err_count = diag.err_count();
+ let features = match strip_unconfigured.configure_krate_attrs(krate.attrs) {
+ None => {
+ // The entire crate is unconfigured.
+ krate.attrs = Vec::new();
+ krate.items = Vec::new();
+ Features::default()
+ }
+ Some(attrs) => {
+ krate.attrs = attrs;
+ let features = get_features(sess, diag, &krate.attrs);
+ if err_count == diag.err_count() {
+ // Avoid reconfiguring malformed `cfg_attr`s.
+ strip_unconfigured.features = Some(&features);
+ // Run configuration again, this time with features available
+ // so that we can perform feature-gating.
+ strip_unconfigured.configure_krate_attrs(unconfigured_attrs);
+ }
+ features
+ }
+ };
+ (krate, features)
+}
+
+#[macro_export]
+macro_rules! configure {
+ ($this:ident, $node:ident) => {
+ match $this.configure($node) {
+ Some(node) => node,
+ None => return Default::default(),
+ }
+ };
+}
+
+impl<'a> StripUnconfigured<'a> {
+ pub fn configure<T: HasAttrs + HasTokens>(&self, mut node: T) -> Option<T> {
+ self.process_cfg_attrs(&mut node);
+ if self.in_cfg(node.attrs()) {
+ self.try_configure_tokens(&mut node);
+ Some(node)
+ } else {
+ None
+ }
+ }
+
+ fn try_configure_tokens<T: HasTokens>(&self, node: &mut T) {
+ if self.config_tokens {
+ if let Some(Some(tokens)) = node.tokens_mut() {
+ let attr_annotated_tokens = tokens.create_token_stream();
+ *tokens = LazyTokenStream::new(self.configure_tokens(&attr_annotated_tokens));
+ }
+ }
+ }
+
+ fn configure_krate_attrs(&self, mut attrs: Vec<ast::Attribute>) -> Option<Vec<ast::Attribute>> {
+ attrs.flat_map_in_place(|attr| self.process_cfg_attr(attr));
+ if self.in_cfg(&attrs) { Some(attrs) } else { None }
+ }
+
+ /// Performs cfg-expansion on `stream`, producing a new `AttrAnnotatedTokenStream`.
+ /// This is only used during the invocation of `derive` proc-macros,
+ /// which require that we cfg-expand their entire input.
+ /// Normal cfg-expansion operates on parsed AST nodes via the `configure` method
+ fn configure_tokens(&self, stream: &AttrAnnotatedTokenStream) -> AttrAnnotatedTokenStream {
+ fn can_skip(stream: &AttrAnnotatedTokenStream) -> bool {
+ stream.0.iter().all(|(tree, _spacing)| match tree {
+ AttrAnnotatedTokenTree::Attributes(_) => false,
+ AttrAnnotatedTokenTree::Token(_) => true,
+ AttrAnnotatedTokenTree::Delimited(_, _, inner) => can_skip(inner),
+ })
+ }
+
+ if can_skip(stream) {
+ return stream.clone();
+ }
+
+ let trees: Vec<_> = stream
+ .0
+ .iter()
+ .flat_map(|(tree, spacing)| match tree.clone() {
+ AttrAnnotatedTokenTree::Attributes(mut data) => {
+ let mut attrs: Vec<_> = std::mem::take(&mut data.attrs).into();
+ attrs.flat_map_in_place(|attr| self.process_cfg_attr(attr));
+ data.attrs = attrs.into();
+
+ if self.in_cfg(&data.attrs) {
+ data.tokens = LazyTokenStream::new(
+ self.configure_tokens(&data.tokens.create_token_stream()),
+ );
+ Some((AttrAnnotatedTokenTree::Attributes(data), *spacing)).into_iter()
+ } else {
+ None.into_iter()
+ }
+ }
+ AttrAnnotatedTokenTree::Delimited(sp, delim, mut inner) => {
+ inner = self.configure_tokens(&inner);
+ Some((AttrAnnotatedTokenTree::Delimited(sp, delim, inner), *spacing))
+ .into_iter()
+ }
+ AttrAnnotatedTokenTree::Token(ref token) if let TokenKind::Interpolated(ref nt) = token.kind => {
+ panic!(
+ "Nonterminal should have been flattened at {:?}: {:?}",
+ token.span, nt
+ );
+ }
+ AttrAnnotatedTokenTree::Token(token) => {
+ Some((AttrAnnotatedTokenTree::Token(token), *spacing)).into_iter()
+ }
+ })
+ .collect();
+ AttrAnnotatedTokenStream::new(trees)
+ }
+
+ /// Parse and expand all `cfg_attr` attributes into a list of attributes
+ /// that are within each `cfg_attr` that has a true configuration predicate.
+ ///
+ /// Gives compiler warnings if any `cfg_attr` does not contain any
+ /// attributes and is in the original source code. Gives compiler errors if
+ /// the syntax of any `cfg_attr` is incorrect.
+ fn process_cfg_attrs<T: HasAttrs>(&self, node: &mut T) {
+ node.visit_attrs(|attrs| {
+ attrs.flat_map_in_place(|attr| self.process_cfg_attr(attr));
+ });
+ }
+
+ fn process_cfg_attr(&self, attr: Attribute) -> Vec<Attribute> {
+ if attr.has_name(sym::cfg_attr) { self.expand_cfg_attr(attr, true) } else { vec![attr] }
+ }
+
+ /// Parse and expand a single `cfg_attr` attribute into a list of attributes
+ /// when the configuration predicate is true, or otherwise expand into an
+ /// empty list of attributes.
+ ///
+ /// Gives a compiler warning when the `cfg_attr` contains no attributes and
+ /// is in the original source file. Gives a compiler error if the syntax of
+ /// the attribute is incorrect.
+ pub(crate) fn expand_cfg_attr(&self, attr: Attribute, recursive: bool) -> Vec<Attribute> {
+ let Some((cfg_predicate, expanded_attrs)) =
+ rustc_parse::parse_cfg_attr(&attr, &self.sess.parse_sess) else {
+ return vec![];
+ };
+
+ // Lint on zero attributes in source.
+ if expanded_attrs.is_empty() {
+ self.sess.parse_sess.buffer_lint(
+ rustc_lint_defs::builtin::UNUSED_ATTRIBUTES,
+ attr.span,
+ ast::CRATE_NODE_ID,
+ "`#[cfg_attr]` does not expand to any attributes",
+ );
+ }
+
+ if !attr::cfg_matches(
+ &cfg_predicate,
+ &self.sess.parse_sess,
+ self.lint_node_id,
+ self.features,
+ ) {
+ return vec![];
+ }
+
+ if recursive {
+ // We call `process_cfg_attr` recursively in case there's a
+ // `cfg_attr` inside of another `cfg_attr`. E.g.
+ // `#[cfg_attr(false, cfg_attr(true, some_attr))]`.
+ expanded_attrs
+ .into_iter()
+ .flat_map(|item| self.process_cfg_attr(self.expand_cfg_attr_item(&attr, item)))
+ .collect()
+ } else {
+ expanded_attrs.into_iter().map(|item| self.expand_cfg_attr_item(&attr, item)).collect()
+ }
+ }
+
+ fn expand_cfg_attr_item(
+ &self,
+ attr: &Attribute,
+ (item, item_span): (ast::AttrItem, Span),
+ ) -> Attribute {
+ let orig_tokens = attr.tokens().to_tokenstream();
+
+ // We are taking an attribute of the form `#[cfg_attr(pred, attr)]`
+ // and producing an attribute of the form `#[attr]`. We
+ // have captured tokens for `attr` itself, but we need to
+ // synthesize tokens for the wrapper `#` and `[]`, which
+ // we do below.
+
+ // Use the `#` in `#[cfg_attr(pred, attr)]` as the `#` token
+ // for `attr` when we expand it to `#[attr]`
+ let mut orig_trees = orig_tokens.into_trees();
+ let TokenTree::Token(pound_token @ Token { kind: TokenKind::Pound, .. }, _) = orig_trees.next().unwrap() else {
+ panic!("Bad tokens for attribute {:?}", attr);
+ };
+ let pound_span = pound_token.span;
+
+ let mut trees = vec![(AttrAnnotatedTokenTree::Token(pound_token), Spacing::Alone)];
+ if attr.style == AttrStyle::Inner {
+ // For inner attributes, we do the same thing for the `!` in `#![some_attr]`
+ let TokenTree::Token(bang_token @ Token { kind: TokenKind::Not, .. }, _) = orig_trees.next().unwrap() else {
+ panic!("Bad tokens for attribute {:?}", attr);
+ };
+ trees.push((AttrAnnotatedTokenTree::Token(bang_token), Spacing::Alone));
+ }
+ // We don't really have a good span to use for the synthesized `[]`
+ // in `#[attr]`, so just use the span of the `#` token.
+ let bracket_group = AttrAnnotatedTokenTree::Delimited(
+ DelimSpan::from_single(pound_span),
+ Delimiter::Bracket,
+ item.tokens
+ .as_ref()
+ .unwrap_or_else(|| panic!("Missing tokens for {:?}", item))
+ .create_token_stream(),
+ );
+ trees.push((bracket_group, Spacing::Alone));
+ let tokens = Some(LazyTokenStream::new(AttrAnnotatedTokenStream::new(trees)));
+ let attr = attr::mk_attr_from_item(item, tokens, attr.style, item_span);
+ if attr.has_name(sym::crate_type) {
+ self.sess.parse_sess.buffer_lint(
+ rustc_lint_defs::builtin::DEPRECATED_CFG_ATTR_CRATE_TYPE_NAME,
+ attr.span,
+ ast::CRATE_NODE_ID,
+ "`crate_type` within an `#![cfg_attr] attribute is deprecated`",
+ );
+ }
+ if attr.has_name(sym::crate_name) {
+ self.sess.parse_sess.buffer_lint(
+ rustc_lint_defs::builtin::DEPRECATED_CFG_ATTR_CRATE_TYPE_NAME,
+ attr.span,
+ ast::CRATE_NODE_ID,
+ "`crate_name` within an `#![cfg_attr] attribute is deprecated`",
+ );
+ }
+ attr
+ }
+
+ /// Determines if a node with the given attributes should be included in this configuration.
+ fn in_cfg(&self, attrs: &[Attribute]) -> bool {
+ attrs.iter().all(|attr| !is_cfg(attr) || self.cfg_true(attr))
+ }
+
+ pub(crate) fn cfg_true(&self, attr: &Attribute) -> bool {
+ let meta_item = match validate_attr::parse_meta(&self.sess.parse_sess, attr) {
+ Ok(meta_item) => meta_item,
+ Err(mut err) => {
+ err.emit();
+ return true;
+ }
+ };
+ parse_cfg(&meta_item, &self.sess).map_or(true, |meta_item| {
+ attr::cfg_matches(&meta_item, &self.sess.parse_sess, self.lint_node_id, self.features)
+ })
+ }
+
+ /// If attributes are not allowed on expressions, emit an error for `attr`
+ pub(crate) fn maybe_emit_expr_attr_err(&self, attr: &Attribute) {
+ if !self.features.map_or(true, |features| features.stmt_expr_attributes) {
+ let mut err = feature_err(
+ &self.sess.parse_sess,
+ sym::stmt_expr_attributes,
+ attr.span,
+ "attributes on expressions are experimental",
+ );
+
+ if attr.is_doc_comment() {
+ err.help("`///` is for documentation comments. For a plain comment, use `//`.");
+ }
+
+ err.emit();
+ }
+ }
+
+ pub fn configure_expr(&self, expr: &mut P<ast::Expr>) {
+ for attr in expr.attrs.iter() {
+ self.maybe_emit_expr_attr_err(attr);
+ }
+
+ // If an expr is valid to cfg away it will have been removed by the
+ // outer stmt or expression folder before descending in here.
+ // Anything else is always required, and thus has to error out
+ // in case of a cfg attr.
+ //
+ // N.B., this is intentionally not part of the visit_expr() function
+ // in order for filter_map_expr() to be able to avoid this check
+ if let Some(attr) = expr.attrs().iter().find(|a| is_cfg(*a)) {
+ let msg = "removing an expression is not supported in this position";
+ self.sess.parse_sess.span_diagnostic.span_err(attr.span, msg);
+ }
+
+ self.process_cfg_attrs(expr);
+ self.try_configure_tokens(&mut *expr);
+ }
+}
+
+pub fn parse_cfg<'a>(meta_item: &'a MetaItem, sess: &Session) -> Option<&'a MetaItem> {
+ let error = |span, msg, suggestion: &str| {
+ let mut err = sess.parse_sess.span_diagnostic.struct_span_err(span, msg);
+ if !suggestion.is_empty() {
+ err.span_suggestion(
+ span,
+ "expected syntax is",
+ suggestion,
+ Applicability::HasPlaceholders,
+ );
+ }
+ err.emit();
+ None
+ };
+ let span = meta_item.span;
+ match meta_item.meta_item_list() {
+ None => error(span, "`cfg` is not followed by parentheses", "cfg(/* predicate */)"),
+ Some([]) => error(span, "`cfg` predicate is not specified", ""),
+ Some([_, .., l]) => error(l.span(), "multiple `cfg` predicates are specified", ""),
+ Some([single]) => match single.meta_item() {
+ Some(meta_item) => Some(meta_item),
+ None => error(single.span(), "`cfg` predicate key cannot be a literal", ""),
+ },
+ }
+}
+
+fn is_cfg(attr: &Attribute) -> bool {
+ attr.has_name(sym::cfg)
+}
diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs
new file mode 100644
index 000000000..93eeca5b2
--- /dev/null
+++ b/compiler/rustc_expand/src/expand.rs
@@ -0,0 +1,1888 @@
+use crate::base::*;
+use crate::config::StripUnconfigured;
+use crate::hygiene::SyntaxContext;
+use crate::mbe::macro_rules::annotate_err_with_kind;
+use crate::module::{mod_dir_path, parse_external_mod, DirOwnership, ParsedExternalMod};
+use crate::placeholders::{placeholder, PlaceholderExpander};
+
+use rustc_ast as ast;
+use rustc_ast::mut_visit::*;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter};
+use rustc_ast::tokenstream::TokenStream;
+use rustc_ast::visit::{self, AssocCtxt, Visitor};
+use rustc_ast::{AssocItemKind, AstNodeWrapper, AttrStyle, ExprKind, ForeignItemKind};
+use rustc_ast::{HasAttrs, HasNodeId};
+use rustc_ast::{Inline, ItemKind, MacArgs, MacStmtStyle, MetaItemKind, ModKind};
+use rustc_ast::{NestedMetaItem, NodeId, PatKind, StmtKind, TyKind};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::map_in_place::MapInPlace;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, PResult};
+use rustc_feature::Features;
+use rustc_parse::parser::{
+ AttemptLocalParseRecovery, CommaRecoveryMode, ForceCollect, Parser, RecoverColon, RecoverComma,
+};
+use rustc_parse::validate_attr;
+use rustc_session::lint::builtin::{UNUSED_ATTRIBUTES, UNUSED_DOC_COMMENTS};
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_session::parse::{feature_err, ParseSess};
+use rustc_session::Limit;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::{FileName, LocalExpnId, Span};
+
+use smallvec::SmallVec;
+use std::ops::Deref;
+use std::path::PathBuf;
+use std::rc::Rc;
+use std::{iter, mem};
+
+macro_rules! ast_fragments {
+ (
+ $($Kind:ident($AstTy:ty) {
+ $kind_name:expr;
+ $(one fn $mut_visit_ast:ident; fn $visit_ast:ident;)?
+ $(many fn $flat_map_ast_elt:ident; fn $visit_ast_elt:ident($($args:tt)*);)?
+ fn $make_ast:ident;
+ })*
+ ) => {
+ /// A fragment of AST that can be produced by a single macro expansion.
+ /// Can also serve as an input and intermediate result for macro expansion operations.
+ pub enum AstFragment {
+ OptExpr(Option<P<ast::Expr>>),
+ $($Kind($AstTy),)*
+ }
+
+ /// "Discriminant" of an AST fragment.
+ #[derive(Copy, Clone, PartialEq, Eq)]
+ pub enum AstFragmentKind {
+ OptExpr,
+ $($Kind,)*
+ }
+
+ impl AstFragmentKind {
+ pub fn name(self) -> &'static str {
+ match self {
+ AstFragmentKind::OptExpr => "expression",
+ $(AstFragmentKind::$Kind => $kind_name,)*
+ }
+ }
+
+ fn make_from<'a>(self, result: Box<dyn MacResult + 'a>) -> Option<AstFragment> {
+ match self {
+ AstFragmentKind::OptExpr =>
+ result.make_expr().map(Some).map(AstFragment::OptExpr),
+ $(AstFragmentKind::$Kind => result.$make_ast().map(AstFragment::$Kind),)*
+ }
+ }
+ }
+
+ impl AstFragment {
+ pub fn add_placeholders(&mut self, placeholders: &[NodeId]) {
+ if placeholders.is_empty() {
+ return;
+ }
+ match self {
+ $($(AstFragment::$Kind(ast) => ast.extend(placeholders.iter().flat_map(|id| {
+ ${ignore(flat_map_ast_elt)}
+ placeholder(AstFragmentKind::$Kind, *id, None).$make_ast()
+ })),)?)*
+ _ => panic!("unexpected AST fragment kind")
+ }
+ }
+
+ pub fn make_opt_expr(self) -> Option<P<ast::Expr>> {
+ match self {
+ AstFragment::OptExpr(expr) => expr,
+ _ => panic!("AstFragment::make_* called on the wrong kind of fragment"),
+ }
+ }
+
+ $(pub fn $make_ast(self) -> $AstTy {
+ match self {
+ AstFragment::$Kind(ast) => ast,
+ _ => panic!("AstFragment::make_* called on the wrong kind of fragment"),
+ }
+ })*
+
+ fn make_ast<T: InvocationCollectorNode>(self) -> T::OutputTy {
+ T::fragment_to_output(self)
+ }
+
+ pub fn mut_visit_with<F: MutVisitor>(&mut self, vis: &mut F) {
+ match self {
+ AstFragment::OptExpr(opt_expr) => {
+ visit_clobber(opt_expr, |opt_expr| {
+ if let Some(expr) = opt_expr {
+ vis.filter_map_expr(expr)
+ } else {
+ None
+ }
+ });
+ }
+ $($(AstFragment::$Kind(ast) => vis.$mut_visit_ast(ast),)?)*
+ $($(AstFragment::$Kind(ast) =>
+ ast.flat_map_in_place(|ast| vis.$flat_map_ast_elt(ast)),)?)*
+ }
+ }
+
+ pub fn visit_with<'a, V: Visitor<'a>>(&'a self, visitor: &mut V) {
+ match *self {
+ AstFragment::OptExpr(Some(ref expr)) => visitor.visit_expr(expr),
+ AstFragment::OptExpr(None) => {}
+ $($(AstFragment::$Kind(ref ast) => visitor.$visit_ast(ast),)?)*
+ $($(AstFragment::$Kind(ref ast) => for ast_elt in &ast[..] {
+ visitor.$visit_ast_elt(ast_elt, $($args)*);
+ })?)*
+ }
+ }
+ }
+
+ impl<'a> MacResult for crate::mbe::macro_rules::ParserAnyMacro<'a> {
+ $(fn $make_ast(self: Box<crate::mbe::macro_rules::ParserAnyMacro<'a>>)
+ -> Option<$AstTy> {
+ Some(self.make(AstFragmentKind::$Kind).$make_ast())
+ })*
+ }
+ }
+}
+
+ast_fragments! {
+ Expr(P<ast::Expr>) { "expression"; one fn visit_expr; fn visit_expr; fn make_expr; }
+ Pat(P<ast::Pat>) { "pattern"; one fn visit_pat; fn visit_pat; fn make_pat; }
+ Ty(P<ast::Ty>) { "type"; one fn visit_ty; fn visit_ty; fn make_ty; }
+ Stmts(SmallVec<[ast::Stmt; 1]>) {
+ "statement"; many fn flat_map_stmt; fn visit_stmt(); fn make_stmts;
+ }
+ Items(SmallVec<[P<ast::Item>; 1]>) {
+ "item"; many fn flat_map_item; fn visit_item(); fn make_items;
+ }
+ TraitItems(SmallVec<[P<ast::AssocItem>; 1]>) {
+ "trait item";
+ many fn flat_map_trait_item;
+ fn visit_assoc_item(AssocCtxt::Trait);
+ fn make_trait_items;
+ }
+ ImplItems(SmallVec<[P<ast::AssocItem>; 1]>) {
+ "impl item";
+ many fn flat_map_impl_item;
+ fn visit_assoc_item(AssocCtxt::Impl);
+ fn make_impl_items;
+ }
+ ForeignItems(SmallVec<[P<ast::ForeignItem>; 1]>) {
+ "foreign item";
+ many fn flat_map_foreign_item;
+ fn visit_foreign_item();
+ fn make_foreign_items;
+ }
+ Arms(SmallVec<[ast::Arm; 1]>) {
+ "match arm"; many fn flat_map_arm; fn visit_arm(); fn make_arms;
+ }
+ ExprFields(SmallVec<[ast::ExprField; 1]>) {
+ "field expression"; many fn flat_map_expr_field; fn visit_expr_field(); fn make_expr_fields;
+ }
+ PatFields(SmallVec<[ast::PatField; 1]>) {
+ "field pattern";
+ many fn flat_map_pat_field;
+ fn visit_pat_field();
+ fn make_pat_fields;
+ }
+ GenericParams(SmallVec<[ast::GenericParam; 1]>) {
+ "generic parameter";
+ many fn flat_map_generic_param;
+ fn visit_generic_param();
+ fn make_generic_params;
+ }
+ Params(SmallVec<[ast::Param; 1]>) {
+ "function parameter"; many fn flat_map_param; fn visit_param(); fn make_params;
+ }
+ FieldDefs(SmallVec<[ast::FieldDef; 1]>) {
+ "field";
+ many fn flat_map_field_def;
+ fn visit_field_def();
+ fn make_field_defs;
+ }
+ Variants(SmallVec<[ast::Variant; 1]>) {
+ "variant"; many fn flat_map_variant; fn visit_variant(); fn make_variants;
+ }
+ Crate(ast::Crate) { "crate"; one fn visit_crate; fn visit_crate; fn make_crate; }
+}
+
+pub enum SupportsMacroExpansion {
+ No,
+ Yes { supports_inner_attrs: bool },
+}
+
+impl AstFragmentKind {
+ pub(crate) fn dummy(self, span: Span) -> AstFragment {
+ self.make_from(DummyResult::any(span)).expect("couldn't create a dummy AST fragment")
+ }
+
+ pub fn supports_macro_expansion(self) -> SupportsMacroExpansion {
+ match self {
+ AstFragmentKind::OptExpr
+ | AstFragmentKind::Expr
+ | AstFragmentKind::Stmts
+ | AstFragmentKind::Ty
+ | AstFragmentKind::Pat => SupportsMacroExpansion::Yes { supports_inner_attrs: false },
+ AstFragmentKind::Items
+ | AstFragmentKind::TraitItems
+ | AstFragmentKind::ImplItems
+ | AstFragmentKind::ForeignItems
+ | AstFragmentKind::Crate => SupportsMacroExpansion::Yes { supports_inner_attrs: true },
+ AstFragmentKind::Arms
+ | AstFragmentKind::ExprFields
+ | AstFragmentKind::PatFields
+ | AstFragmentKind::GenericParams
+ | AstFragmentKind::Params
+ | AstFragmentKind::FieldDefs
+ | AstFragmentKind::Variants => SupportsMacroExpansion::No,
+ }
+ }
+
+ fn expect_from_annotatables<I: IntoIterator<Item = Annotatable>>(
+ self,
+ items: I,
+ ) -> AstFragment {
+ let mut items = items.into_iter();
+ match self {
+ AstFragmentKind::Arms => {
+ AstFragment::Arms(items.map(Annotatable::expect_arm).collect())
+ }
+ AstFragmentKind::ExprFields => {
+ AstFragment::ExprFields(items.map(Annotatable::expect_expr_field).collect())
+ }
+ AstFragmentKind::PatFields => {
+ AstFragment::PatFields(items.map(Annotatable::expect_pat_field).collect())
+ }
+ AstFragmentKind::GenericParams => {
+ AstFragment::GenericParams(items.map(Annotatable::expect_generic_param).collect())
+ }
+ AstFragmentKind::Params => {
+ AstFragment::Params(items.map(Annotatable::expect_param).collect())
+ }
+ AstFragmentKind::FieldDefs => {
+ AstFragment::FieldDefs(items.map(Annotatable::expect_field_def).collect())
+ }
+ AstFragmentKind::Variants => {
+ AstFragment::Variants(items.map(Annotatable::expect_variant).collect())
+ }
+ AstFragmentKind::Items => {
+ AstFragment::Items(items.map(Annotatable::expect_item).collect())
+ }
+ AstFragmentKind::ImplItems => {
+ AstFragment::ImplItems(items.map(Annotatable::expect_impl_item).collect())
+ }
+ AstFragmentKind::TraitItems => {
+ AstFragment::TraitItems(items.map(Annotatable::expect_trait_item).collect())
+ }
+ AstFragmentKind::ForeignItems => {
+ AstFragment::ForeignItems(items.map(Annotatable::expect_foreign_item).collect())
+ }
+ AstFragmentKind::Stmts => {
+ AstFragment::Stmts(items.map(Annotatable::expect_stmt).collect())
+ }
+ AstFragmentKind::Expr => AstFragment::Expr(
+ items.next().expect("expected exactly one expression").expect_expr(),
+ ),
+ AstFragmentKind::OptExpr => {
+ AstFragment::OptExpr(items.next().map(Annotatable::expect_expr))
+ }
+ AstFragmentKind::Crate => {
+ AstFragment::Crate(items.next().expect("expected exactly one crate").expect_crate())
+ }
+ AstFragmentKind::Pat | AstFragmentKind::Ty => {
+ panic!("patterns and types aren't annotatable")
+ }
+ }
+ }
+}
+
+pub struct Invocation {
+ pub kind: InvocationKind,
+ pub fragment_kind: AstFragmentKind,
+ pub expansion_data: ExpansionData,
+}
+
+pub enum InvocationKind {
+ Bang {
+ mac: ast::MacCall,
+ span: Span,
+ },
+ Attr {
+ attr: ast::Attribute,
+ // Re-insertion position for inert attributes.
+ pos: usize,
+ item: Annotatable,
+ // Required for resolving derive helper attributes.
+ derives: Vec<ast::Path>,
+ },
+ Derive {
+ path: ast::Path,
+ item: Annotatable,
+ },
+}
+
+impl InvocationKind {
+ fn placeholder_visibility(&self) -> Option<ast::Visibility> {
+ // HACK: For unnamed fields placeholders should have the same visibility as the actual
+ // fields because for tuple structs/variants resolve determines visibilities of their
+ // constructor using these field visibilities before attributes on them are are expanded.
+ // The assumption is that the attribute expansion cannot change field visibilities,
+ // and it holds because only inert attributes are supported in this position.
+ match self {
+ InvocationKind::Attr { item: Annotatable::FieldDef(field), .. }
+ | InvocationKind::Derive { item: Annotatable::FieldDef(field), .. }
+ if field.ident.is_none() =>
+ {
+ Some(field.vis.clone())
+ }
+ _ => None,
+ }
+ }
+}
+
+impl Invocation {
+ pub fn span(&self) -> Span {
+ match &self.kind {
+ InvocationKind::Bang { span, .. } => *span,
+ InvocationKind::Attr { attr, .. } => attr.span,
+ InvocationKind::Derive { path, .. } => path.span,
+ }
+ }
+}
+
+pub struct MacroExpander<'a, 'b> {
+ pub cx: &'a mut ExtCtxt<'b>,
+ monotonic: bool, // cf. `cx.monotonic_expander()`
+}
+
+impl<'a, 'b> MacroExpander<'a, 'b> {
+ pub fn new(cx: &'a mut ExtCtxt<'b>, monotonic: bool) -> Self {
+ MacroExpander { cx, monotonic }
+ }
+
+ pub fn expand_crate(&mut self, krate: ast::Crate) -> ast::Crate {
+ let file_path = match self.cx.source_map().span_to_filename(krate.spans.inner_span) {
+ FileName::Real(name) => name
+ .into_local_path()
+ .expect("attempting to resolve a file path in an external file"),
+ other => PathBuf::from(other.prefer_local().to_string()),
+ };
+ let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
+ self.cx.root_path = dir_path.clone();
+ self.cx.current_expansion.module = Rc::new(ModuleData {
+ mod_path: vec![Ident::from_str(&self.cx.ecfg.crate_name)],
+ file_path_stack: vec![file_path],
+ dir_path,
+ });
+ let krate = self.fully_expand_fragment(AstFragment::Crate(krate)).make_crate();
+ assert_eq!(krate.id, ast::CRATE_NODE_ID);
+ self.cx.trace_macros_diag();
+ krate
+ }
+
+ // Recursively expand all macro invocations in this AST fragment.
+ pub fn fully_expand_fragment(&mut self, input_fragment: AstFragment) -> AstFragment {
+ let orig_expansion_data = self.cx.current_expansion.clone();
+ let orig_force_mode = self.cx.force_mode;
+
+ // Collect all macro invocations and replace them with placeholders.
+ let (mut fragment_with_placeholders, mut invocations) =
+ self.collect_invocations(input_fragment, &[]);
+
+ // Optimization: if we resolve all imports now,
+ // we'll be able to immediately resolve most of imported macros.
+ self.resolve_imports();
+
+ // Resolve paths in all invocations and produce output expanded fragments for them, but
+ // do not insert them into our input AST fragment yet, only store in `expanded_fragments`.
+ // The output fragments also go through expansion recursively until no invocations are left.
+ // Unresolved macros produce dummy outputs as a recovery measure.
+ invocations.reverse();
+ let mut expanded_fragments = Vec::new();
+ let mut undetermined_invocations = Vec::new();
+ let (mut progress, mut force) = (false, !self.monotonic);
+ loop {
+ let Some((invoc, ext)) = invocations.pop() else {
+ self.resolve_imports();
+ if undetermined_invocations.is_empty() {
+ break;
+ }
+ invocations = mem::take(&mut undetermined_invocations);
+ force = !mem::replace(&mut progress, false);
+ if force && self.monotonic {
+ self.cx.sess.delay_span_bug(
+ invocations.last().unwrap().0.span(),
+ "expansion entered force mode without producing any errors",
+ );
+ }
+ continue;
+ };
+
+ let ext = match ext {
+ Some(ext) => ext,
+ None => {
+ let eager_expansion_root = if self.monotonic {
+ invoc.expansion_data.id
+ } else {
+ orig_expansion_data.id
+ };
+ match self.cx.resolver.resolve_macro_invocation(
+ &invoc,
+ eager_expansion_root,
+ force,
+ ) {
+ Ok(ext) => ext,
+ Err(Indeterminate) => {
+ // Cannot resolve, will retry this invocation later.
+ undetermined_invocations.push((invoc, None));
+ continue;
+ }
+ }
+ }
+ };
+
+ let ExpansionData { depth, id: expn_id, .. } = invoc.expansion_data;
+ let depth = depth - orig_expansion_data.depth;
+ self.cx.current_expansion = invoc.expansion_data.clone();
+ self.cx.force_mode = force;
+
+ let fragment_kind = invoc.fragment_kind;
+ let (expanded_fragment, new_invocations) = match self.expand_invoc(invoc, &ext.kind) {
+ ExpandResult::Ready(fragment) => {
+ let mut derive_invocations = Vec::new();
+ let derive_placeholders = self
+ .cx
+ .resolver
+ .take_derive_resolutions(expn_id)
+ .map(|derives| {
+ derive_invocations.reserve(derives.len());
+ derives
+ .into_iter()
+ .map(|(path, item, _exts)| {
+ // FIXME: Consider using the derive resolutions (`_exts`)
+ // instead of enqueuing the derives to be resolved again later.
+ let expn_id = LocalExpnId::fresh_empty();
+ derive_invocations.push((
+ Invocation {
+ kind: InvocationKind::Derive { path, item },
+ fragment_kind,
+ expansion_data: ExpansionData {
+ id: expn_id,
+ ..self.cx.current_expansion.clone()
+ },
+ },
+ None,
+ ));
+ NodeId::placeholder_from_expn_id(expn_id)
+ })
+ .collect::<Vec<_>>()
+ })
+ .unwrap_or_default();
+
+ let (fragment, collected_invocations) =
+ self.collect_invocations(fragment, &derive_placeholders);
+ // We choose to expand any derive invocations associated with this macro invocation
+ // *before* any macro invocations collected from the output fragment
+ derive_invocations.extend(collected_invocations);
+ (fragment, derive_invocations)
+ }
+ ExpandResult::Retry(invoc) => {
+ if force {
+ self.cx.span_bug(
+ invoc.span(),
+ "expansion entered force mode but is still stuck",
+ );
+ } else {
+ // Cannot expand, will retry this invocation later.
+ undetermined_invocations.push((invoc, Some(ext)));
+ continue;
+ }
+ }
+ };
+
+ progress = true;
+ if expanded_fragments.len() < depth {
+ expanded_fragments.push(Vec::new());
+ }
+ expanded_fragments[depth - 1].push((expn_id, expanded_fragment));
+ invocations.extend(new_invocations.into_iter().rev());
+ }
+
+ self.cx.current_expansion = orig_expansion_data;
+ self.cx.force_mode = orig_force_mode;
+
+ // Finally incorporate all the expanded macros into the input AST fragment.
+ let mut placeholder_expander = PlaceholderExpander::default();
+ while let Some(expanded_fragments) = expanded_fragments.pop() {
+ for (expn_id, expanded_fragment) in expanded_fragments.into_iter().rev() {
+ placeholder_expander
+ .add(NodeId::placeholder_from_expn_id(expn_id), expanded_fragment);
+ }
+ }
+ fragment_with_placeholders.mut_visit_with(&mut placeholder_expander);
+ fragment_with_placeholders
+ }
+
+ fn resolve_imports(&mut self) {
+ if self.monotonic {
+ self.cx.resolver.resolve_imports();
+ }
+ }
+
+ /// Collects all macro invocations reachable at this time in this AST fragment, and replace
+ /// them with "placeholders" - dummy macro invocations with specially crafted `NodeId`s.
+ /// Then call into resolver that builds a skeleton ("reduced graph") of the fragment and
+ /// prepares data for resolving paths of macro invocations.
+ fn collect_invocations(
+ &mut self,
+ mut fragment: AstFragment,
+ extra_placeholders: &[NodeId],
+ ) -> (AstFragment, Vec<(Invocation, Option<Lrc<SyntaxExtension>>)>) {
+ // Resolve `$crate`s in the fragment for pretty-printing.
+ self.cx.resolver.resolve_dollar_crates();
+
+ let mut invocations = {
+ let mut collector = InvocationCollector {
+ // Non-derive macro invocations cannot see the results of cfg expansion - they
+ // will either be removed along with the item, or invoked before the cfg/cfg_attr
+ // attribute is expanded. Therefore, we don't need to configure the tokens
+ // Derive macros *can* see the results of cfg-expansion - they are handled
+ // specially in `fully_expand_fragment`
+ cx: self.cx,
+ invocations: Vec::new(),
+ monotonic: self.monotonic,
+ };
+ fragment.mut_visit_with(&mut collector);
+ fragment.add_placeholders(extra_placeholders);
+ collector.invocations
+ };
+
+ if self.monotonic {
+ self.cx
+ .resolver
+ .visit_ast_fragment_with_placeholders(self.cx.current_expansion.id, &fragment);
+
+ if self.cx.sess.opts.unstable_opts.incremental_relative_spans {
+ for (invoc, _) in invocations.iter_mut() {
+ let expn_id = invoc.expansion_data.id;
+ let parent_def = self.cx.resolver.invocation_parent(expn_id);
+ let span = match &mut invoc.kind {
+ InvocationKind::Bang { ref mut span, .. } => span,
+ InvocationKind::Attr { attr, .. } => &mut attr.span,
+ InvocationKind::Derive { path, .. } => &mut path.span,
+ };
+ *span = span.with_parent(Some(parent_def));
+ }
+ }
+ }
+
+ (fragment, invocations)
+ }
+
+ fn error_recursion_limit_reached(&mut self) {
+ let expn_data = self.cx.current_expansion.id.expn_data();
+ let suggested_limit = match self.cx.ecfg.recursion_limit {
+ Limit(0) => Limit(2),
+ limit => limit * 2,
+ };
+ self.cx
+ .struct_span_err(
+ expn_data.call_site,
+ &format!("recursion limit reached while expanding `{}`", expn_data.kind.descr()),
+ )
+ .help(&format!(
+ "consider increasing the recursion limit by adding a \
+ `#![recursion_limit = \"{}\"]` attribute to your crate (`{}`)",
+ suggested_limit, self.cx.ecfg.crate_name,
+ ))
+ .emit();
+ self.cx.trace_macros_diag();
+ }
+
+ /// A macro's expansion does not fit in this fragment kind.
+ /// For example, a non-type macro in a type position.
+ fn error_wrong_fragment_kind(&mut self, kind: AstFragmentKind, mac: &ast::MacCall, span: Span) {
+ let msg = format!(
+ "non-{kind} macro in {kind} position: {path}",
+ kind = kind.name(),
+ path = pprust::path_to_string(&mac.path),
+ );
+ self.cx.span_err(span, &msg);
+ self.cx.trace_macros_diag();
+ }
+
+ fn expand_invoc(
+ &mut self,
+ invoc: Invocation,
+ ext: &SyntaxExtensionKind,
+ ) -> ExpandResult<AstFragment, Invocation> {
+ let recursion_limit =
+ self.cx.reduced_recursion_limit.unwrap_or(self.cx.ecfg.recursion_limit);
+ if !recursion_limit.value_within_limit(self.cx.current_expansion.depth) {
+ if self.cx.reduced_recursion_limit.is_none() {
+ self.error_recursion_limit_reached();
+ }
+
+ // Reduce the recursion limit by half each time it triggers.
+ self.cx.reduced_recursion_limit = Some(recursion_limit / 2);
+
+ return ExpandResult::Ready(invoc.fragment_kind.dummy(invoc.span()));
+ }
+
+ let (fragment_kind, span) = (invoc.fragment_kind, invoc.span());
+ ExpandResult::Ready(match invoc.kind {
+ InvocationKind::Bang { mac, .. } => match ext {
+ SyntaxExtensionKind::Bang(expander) => {
+ let Ok(tok_result) = expander.expand(self.cx, span, mac.args.inner_tokens()) else {
+ return ExpandResult::Ready(fragment_kind.dummy(span));
+ };
+ self.parse_ast_fragment(tok_result, fragment_kind, &mac.path, span)
+ }
+ SyntaxExtensionKind::LegacyBang(expander) => {
+ let prev = self.cx.current_expansion.prior_type_ascription;
+ self.cx.current_expansion.prior_type_ascription = mac.prior_type_ascription;
+ let tok_result = expander.expand(self.cx, span, mac.args.inner_tokens());
+ let result = if let Some(result) = fragment_kind.make_from(tok_result) {
+ result
+ } else {
+ self.error_wrong_fragment_kind(fragment_kind, &mac, span);
+ fragment_kind.dummy(span)
+ };
+ self.cx.current_expansion.prior_type_ascription = prev;
+ result
+ }
+ _ => unreachable!(),
+ },
+ InvocationKind::Attr { attr, pos, mut item, derives } => match ext {
+ SyntaxExtensionKind::Attr(expander) => {
+ self.gate_proc_macro_input(&item);
+ self.gate_proc_macro_attr_item(span, &item);
+ let tokens = match &item {
+ // FIXME: Collect tokens and use them instead of generating
+ // fake ones. These are unstable, so it needs to be
+ // fixed prior to stabilization
+ // Fake tokens when we are invoking an inner attribute, and
+ // we are invoking it on an out-of-line module or crate.
+ Annotatable::Crate(krate) => rustc_parse::fake_token_stream_for_crate(
+ &self.cx.sess.parse_sess,
+ krate,
+ ),
+ Annotatable::Item(item_inner)
+ if matches!(attr.style, AttrStyle::Inner)
+ && matches!(
+ item_inner.kind,
+ ItemKind::Mod(
+ _,
+ ModKind::Unloaded | ModKind::Loaded(_, Inline::No, _),
+ )
+ ) =>
+ {
+ rustc_parse::fake_token_stream_for_item(
+ &self.cx.sess.parse_sess,
+ item_inner,
+ )
+ }
+ _ => item.to_tokens(),
+ };
+ let attr_item = attr.unwrap_normal_item();
+ if let MacArgs::Eq(..) = attr_item.args {
+ self.cx.span_err(span, "key-value macro attributes are not supported");
+ }
+ let inner_tokens = attr_item.args.inner_tokens();
+ let Ok(tok_result) = expander.expand(self.cx, span, inner_tokens, tokens) else {
+ return ExpandResult::Ready(fragment_kind.dummy(span));
+ };
+ self.parse_ast_fragment(tok_result, fragment_kind, &attr_item.path, span)
+ }
+ SyntaxExtensionKind::LegacyAttr(expander) => {
+ match validate_attr::parse_meta(&self.cx.sess.parse_sess, &attr) {
+ Ok(meta) => {
+ let items = match expander.expand(self.cx, span, &meta, item) {
+ ExpandResult::Ready(items) => items,
+ ExpandResult::Retry(item) => {
+ // Reassemble the original invocation for retrying.
+ return ExpandResult::Retry(Invocation {
+ kind: InvocationKind::Attr { attr, pos, item, derives },
+ ..invoc
+ });
+ }
+ };
+ if fragment_kind == AstFragmentKind::Expr && items.is_empty() {
+ let msg =
+ "removing an expression is not supported in this position";
+ self.cx.span_err(span, msg);
+ fragment_kind.dummy(span)
+ } else {
+ fragment_kind.expect_from_annotatables(items)
+ }
+ }
+ Err(mut err) => {
+ err.emit();
+ fragment_kind.dummy(span)
+ }
+ }
+ }
+ SyntaxExtensionKind::NonMacroAttr => {
+ self.cx.expanded_inert_attrs.mark(&attr);
+ item.visit_attrs(|attrs| attrs.insert(pos, attr));
+ fragment_kind.expect_from_annotatables(iter::once(item))
+ }
+ _ => unreachable!(),
+ },
+ InvocationKind::Derive { path, item } => match ext {
+ SyntaxExtensionKind::Derive(expander)
+ | SyntaxExtensionKind::LegacyDerive(expander) => {
+ if let SyntaxExtensionKind::Derive(..) = ext {
+ self.gate_proc_macro_input(&item);
+ }
+ let meta = ast::MetaItem { kind: MetaItemKind::Word, span, path };
+ let items = match expander.expand(self.cx, span, &meta, item) {
+ ExpandResult::Ready(items) => items,
+ ExpandResult::Retry(item) => {
+ // Reassemble the original invocation for retrying.
+ return ExpandResult::Retry(Invocation {
+ kind: InvocationKind::Derive { path: meta.path, item },
+ ..invoc
+ });
+ }
+ };
+ fragment_kind.expect_from_annotatables(items)
+ }
+ _ => unreachable!(),
+ },
+ })
+ }
+
+ fn gate_proc_macro_attr_item(&self, span: Span, item: &Annotatable) {
+ let kind = match item {
+ Annotatable::Item(_)
+ | Annotatable::TraitItem(_)
+ | Annotatable::ImplItem(_)
+ | Annotatable::ForeignItem(_)
+ | Annotatable::Crate(..) => return,
+ Annotatable::Stmt(stmt) => {
+ // Attributes are stable on item statements,
+ // but unstable on all other kinds of statements
+ if stmt.is_item() {
+ return;
+ }
+ "statements"
+ }
+ Annotatable::Expr(_) => "expressions",
+ Annotatable::Arm(..)
+ | Annotatable::ExprField(..)
+ | Annotatable::PatField(..)
+ | Annotatable::GenericParam(..)
+ | Annotatable::Param(..)
+ | Annotatable::FieldDef(..)
+ | Annotatable::Variant(..) => panic!("unexpected annotatable"),
+ };
+ if self.cx.ecfg.proc_macro_hygiene() {
+ return;
+ }
+ feature_err(
+ &self.cx.sess.parse_sess,
+ sym::proc_macro_hygiene,
+ span,
+ &format!("custom attributes cannot be applied to {}", kind),
+ )
+ .emit();
+ }
+
+ fn gate_proc_macro_input(&self, annotatable: &Annotatable) {
+ struct GateProcMacroInput<'a> {
+ parse_sess: &'a ParseSess,
+ }
+
+ impl<'ast, 'a> Visitor<'ast> for GateProcMacroInput<'a> {
+ fn visit_item(&mut self, item: &'ast ast::Item) {
+ match &item.kind {
+ ItemKind::Mod(_, mod_kind)
+ if !matches!(mod_kind, ModKind::Loaded(_, Inline::Yes, _)) =>
+ {
+ feature_err(
+ self.parse_sess,
+ sym::proc_macro_hygiene,
+ item.span,
+ "non-inline modules in proc macro input are unstable",
+ )
+ .emit();
+ }
+ _ => {}
+ }
+
+ visit::walk_item(self, item);
+ }
+ }
+
+ if !self.cx.ecfg.proc_macro_hygiene() {
+ annotatable
+ .visit_with(&mut GateProcMacroInput { parse_sess: &self.cx.sess.parse_sess });
+ }
+ }
+
+ fn parse_ast_fragment(
+ &mut self,
+ toks: TokenStream,
+ kind: AstFragmentKind,
+ path: &ast::Path,
+ span: Span,
+ ) -> AstFragment {
+ let mut parser = self.cx.new_parser_from_tts(toks);
+ match parse_ast_fragment(&mut parser, kind) {
+ Ok(fragment) => {
+ ensure_complete_parse(&mut parser, path, kind.name(), span);
+ fragment
+ }
+ Err(mut err) => {
+ if err.span.is_dummy() {
+ err.set_span(span);
+ }
+ annotate_err_with_kind(&mut err, kind, span);
+ err.emit();
+ self.cx.trace_macros_diag();
+ kind.dummy(span)
+ }
+ }
+ }
+}
+
+pub fn parse_ast_fragment<'a>(
+ this: &mut Parser<'a>,
+ kind: AstFragmentKind,
+) -> PResult<'a, AstFragment> {
+ Ok(match kind {
+ AstFragmentKind::Items => {
+ let mut items = SmallVec::new();
+ while let Some(item) = this.parse_item(ForceCollect::No)? {
+ items.push(item);
+ }
+ AstFragment::Items(items)
+ }
+ AstFragmentKind::TraitItems => {
+ let mut items = SmallVec::new();
+ while let Some(item) = this.parse_trait_item(ForceCollect::No)? {
+ items.extend(item);
+ }
+ AstFragment::TraitItems(items)
+ }
+ AstFragmentKind::ImplItems => {
+ let mut items = SmallVec::new();
+ while let Some(item) = this.parse_impl_item(ForceCollect::No)? {
+ items.extend(item);
+ }
+ AstFragment::ImplItems(items)
+ }
+ AstFragmentKind::ForeignItems => {
+ let mut items = SmallVec::new();
+ while let Some(item) = this.parse_foreign_item(ForceCollect::No)? {
+ items.extend(item);
+ }
+ AstFragment::ForeignItems(items)
+ }
+ AstFragmentKind::Stmts => {
+ let mut stmts = SmallVec::new();
+ // Won't make progress on a `}`.
+ while this.token != token::Eof && this.token != token::CloseDelim(Delimiter::Brace) {
+ if let Some(stmt) = this.parse_full_stmt(AttemptLocalParseRecovery::Yes)? {
+ stmts.push(stmt);
+ }
+ }
+ AstFragment::Stmts(stmts)
+ }
+ AstFragmentKind::Expr => AstFragment::Expr(this.parse_expr()?),
+ AstFragmentKind::OptExpr => {
+ if this.token != token::Eof {
+ AstFragment::OptExpr(Some(this.parse_expr()?))
+ } else {
+ AstFragment::OptExpr(None)
+ }
+ }
+ AstFragmentKind::Ty => AstFragment::Ty(this.parse_ty()?),
+ AstFragmentKind::Pat => AstFragment::Pat(this.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::No,
+ RecoverColon::Yes,
+ CommaRecoveryMode::LikelyTuple,
+ )?),
+ AstFragmentKind::Crate => AstFragment::Crate(this.parse_crate_mod()?),
+ AstFragmentKind::Arms
+ | AstFragmentKind::ExprFields
+ | AstFragmentKind::PatFields
+ | AstFragmentKind::GenericParams
+ | AstFragmentKind::Params
+ | AstFragmentKind::FieldDefs
+ | AstFragmentKind::Variants => panic!("unexpected AST fragment kind"),
+ })
+}
+
+pub fn ensure_complete_parse<'a>(
+ this: &mut Parser<'a>,
+ macro_path: &ast::Path,
+ kind_name: &str,
+ span: Span,
+) {
+ if this.token != token::Eof {
+ let token = pprust::token_to_string(&this.token);
+ let msg = format!("macro expansion ignores token `{}` and any following", token);
+ // Avoid emitting backtrace info twice.
+ let def_site_span = this.token.span.with_ctxt(SyntaxContext::root());
+ let mut err = this.struct_span_err(def_site_span, &msg);
+ err.span_label(span, "caused by the macro expansion here");
+ let msg = format!(
+ "the usage of `{}!` is likely invalid in {} context",
+ pprust::path_to_string(macro_path),
+ kind_name,
+ );
+ err.note(&msg);
+ let semi_span = this.sess.source_map().next_point(span);
+
+ let semi_full_span = semi_span.to(this.sess.source_map().next_point(semi_span));
+ match this.sess.source_map().span_to_snippet(semi_full_span) {
+ Ok(ref snippet) if &snippet[..] != ";" && kind_name == "expression" => {
+ err.span_suggestion(
+ semi_span,
+ "you might be missing a semicolon here",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ }
+ err.emit();
+ }
+}
+
+/// Wraps a call to `noop_visit_*` / `noop_flat_map_*`
+/// for an AST node that supports attributes
+/// (see the `Annotatable` enum)
+/// This method assigns a `NodeId`, and sets that `NodeId`
+/// as our current 'lint node id'. If a macro call is found
+/// inside this AST node, we will use this AST node's `NodeId`
+/// to emit lints associated with that macro (allowing
+/// `#[allow]` / `#[deny]` to be applied close to
+/// the macro invocation).
+///
+/// Do *not* call this for a macro AST node
+/// (e.g. `ExprKind::MacCall`) - we cannot emit lints
+/// at these AST nodes, since they are removed and
+/// replaced with the result of macro expansion.
+///
+/// All other `NodeId`s are assigned by `visit_id`.
+/// * `self` is the 'self' parameter for the current method,
+/// * `id` is a mutable reference to the `NodeId` field
+/// of the current AST node.
+/// * `closure` is a closure that executes the
+/// `noop_visit_*` / `noop_flat_map_*` method
+/// for the current AST node.
+macro_rules! assign_id {
+ ($self:ident, $id:expr, $closure:expr) => {{
+ let old_id = $self.cx.current_expansion.lint_node_id;
+ if $self.monotonic {
+ debug_assert_eq!(*$id, ast::DUMMY_NODE_ID);
+ let new_id = $self.cx.resolver.next_node_id();
+ *$id = new_id;
+ $self.cx.current_expansion.lint_node_id = new_id;
+ }
+ let ret = ($closure)();
+ $self.cx.current_expansion.lint_node_id = old_id;
+ ret
+ }};
+}
+
+enum AddSemicolon {
+ Yes,
+ No,
+}
+
+/// A trait implemented for all `AstFragment` nodes and providing all pieces
+/// of functionality used by `InvocationCollector`.
+trait InvocationCollectorNode: HasAttrs + HasNodeId + Sized {
+ type OutputTy = SmallVec<[Self; 1]>;
+ type AttrsTy: Deref<Target = [ast::Attribute]> = Vec<ast::Attribute>;
+ const KIND: AstFragmentKind;
+ fn to_annotatable(self) -> Annotatable;
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy;
+ fn descr() -> &'static str {
+ unreachable!()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, _visitor: &mut V) -> Self::OutputTy {
+ unreachable!()
+ }
+ fn noop_visit<V: MutVisitor>(&mut self, _visitor: &mut V) {
+ unreachable!()
+ }
+ fn is_mac_call(&self) -> bool {
+ false
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ unreachable!()
+ }
+ fn pre_flat_map_node_collect_attr(_cfg: &StripUnconfigured<'_>, _attr: &ast::Attribute) {}
+ fn post_flat_map_node_collect_bang(_output: &mut Self::OutputTy, _add_semicolon: AddSemicolon) {
+ }
+ fn wrap_flat_map_node_noop_flat_map(
+ node: Self,
+ collector: &mut InvocationCollector<'_, '_>,
+ noop_flat_map: impl FnOnce(Self, &mut InvocationCollector<'_, '_>) -> Self::OutputTy,
+ ) -> Result<Self::OutputTy, Self> {
+ Ok(noop_flat_map(node, collector))
+ }
+}
+
+impl InvocationCollectorNode for P<ast::Item> {
+ const KIND: AstFragmentKind = AstFragmentKind::Items;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Item(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_items()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_item(self, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.kind, ItemKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ let node = self.into_inner();
+ match node.kind {
+ ItemKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+ fn wrap_flat_map_node_noop_flat_map(
+ mut node: Self,
+ collector: &mut InvocationCollector<'_, '_>,
+ noop_flat_map: impl FnOnce(Self, &mut InvocationCollector<'_, '_>) -> Self::OutputTy,
+ ) -> Result<Self::OutputTy, Self> {
+ if !matches!(node.kind, ItemKind::Mod(..)) {
+ return Ok(noop_flat_map(node, collector));
+ }
+
+ // Work around borrow checker not seeing through `P`'s deref.
+ let (ident, span, mut attrs) = (node.ident, node.span, mem::take(&mut node.attrs));
+ let ItemKind::Mod(_, mod_kind) = &mut node.kind else {
+ unreachable!()
+ };
+
+ let ecx = &mut collector.cx;
+ let (file_path, dir_path, dir_ownership) = match mod_kind {
+ ModKind::Loaded(_, inline, _) => {
+ // Inline `mod foo { ... }`, but we still need to push directories.
+ let (dir_path, dir_ownership) = mod_dir_path(
+ &ecx.sess,
+ ident,
+ &attrs,
+ &ecx.current_expansion.module,
+ ecx.current_expansion.dir_ownership,
+ *inline,
+ );
+ node.attrs = attrs;
+ (None, dir_path, dir_ownership)
+ }
+ ModKind::Unloaded => {
+ // We have an outline `mod foo;` so we need to parse the file.
+ let old_attrs_len = attrs.len();
+ let ParsedExternalMod { items, spans, file_path, dir_path, dir_ownership } =
+ parse_external_mod(
+ &ecx.sess,
+ ident,
+ span,
+ &ecx.current_expansion.module,
+ ecx.current_expansion.dir_ownership,
+ &mut attrs,
+ );
+
+ if let Some(lint_store) = ecx.lint_store {
+ lint_store.pre_expansion_lint(
+ ecx.sess,
+ ecx.resolver.registered_tools(),
+ ecx.current_expansion.lint_node_id,
+ &attrs,
+ &items,
+ ident.name.as_str(),
+ );
+ }
+
+ *mod_kind = ModKind::Loaded(items, Inline::No, spans);
+ node.attrs = attrs;
+ if node.attrs.len() > old_attrs_len {
+ // If we loaded an out-of-line module and added some inner attributes,
+ // then we need to re-configure it and re-collect attributes for
+ // resolution and expansion.
+ return Err(node);
+ }
+ (Some(file_path), dir_path, dir_ownership)
+ }
+ };
+
+ // Set the module info before we flat map.
+ let mut module = ecx.current_expansion.module.with_dir_path(dir_path);
+ module.mod_path.push(ident);
+ if let Some(file_path) = file_path {
+ module.file_path_stack.push(file_path);
+ }
+
+ let orig_module = mem::replace(&mut ecx.current_expansion.module, Rc::new(module));
+ let orig_dir_ownership =
+ mem::replace(&mut ecx.current_expansion.dir_ownership, dir_ownership);
+
+ let res = Ok(noop_flat_map(node, collector));
+
+ collector.cx.current_expansion.dir_ownership = orig_dir_ownership;
+ collector.cx.current_expansion.module = orig_module;
+ res
+ }
+}
+
+struct TraitItemTag;
+impl InvocationCollectorNode for AstNodeWrapper<P<ast::AssocItem>, TraitItemTag> {
+ type OutputTy = SmallVec<[P<ast::AssocItem>; 1]>;
+ const KIND: AstFragmentKind = AstFragmentKind::TraitItems;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::TraitItem(self.wrapped)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_trait_items()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_assoc_item(self.wrapped, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.wrapped.kind, AssocItemKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ let item = self.wrapped.into_inner();
+ match item.kind {
+ AssocItemKind::MacCall(mac) => (mac, item.attrs, AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+}
+
+struct ImplItemTag;
+impl InvocationCollectorNode for AstNodeWrapper<P<ast::AssocItem>, ImplItemTag> {
+ type OutputTy = SmallVec<[P<ast::AssocItem>; 1]>;
+ const KIND: AstFragmentKind = AstFragmentKind::ImplItems;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::ImplItem(self.wrapped)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_impl_items()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_assoc_item(self.wrapped, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.wrapped.kind, AssocItemKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ let item = self.wrapped.into_inner();
+ match item.kind {
+ AssocItemKind::MacCall(mac) => (mac, item.attrs, AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl InvocationCollectorNode for P<ast::ForeignItem> {
+ const KIND: AstFragmentKind = AstFragmentKind::ForeignItems;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::ForeignItem(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_foreign_items()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_foreign_item(self, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.kind, ForeignItemKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ let node = self.into_inner();
+ match node.kind {
+ ForeignItemKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl InvocationCollectorNode for ast::Variant {
+ const KIND: AstFragmentKind = AstFragmentKind::Variants;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Variant(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_variants()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_variant(self, visitor)
+ }
+}
+
+impl InvocationCollectorNode for ast::FieldDef {
+ const KIND: AstFragmentKind = AstFragmentKind::FieldDefs;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::FieldDef(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_field_defs()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_field_def(self, visitor)
+ }
+}
+
+impl InvocationCollectorNode for ast::PatField {
+ const KIND: AstFragmentKind = AstFragmentKind::PatFields;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::PatField(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_pat_fields()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_pat_field(self, visitor)
+ }
+}
+
+impl InvocationCollectorNode for ast::ExprField {
+ const KIND: AstFragmentKind = AstFragmentKind::ExprFields;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::ExprField(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_expr_fields()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_expr_field(self, visitor)
+ }
+}
+
+impl InvocationCollectorNode for ast::Param {
+ const KIND: AstFragmentKind = AstFragmentKind::Params;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Param(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_params()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_param(self, visitor)
+ }
+}
+
+impl InvocationCollectorNode for ast::GenericParam {
+ const KIND: AstFragmentKind = AstFragmentKind::GenericParams;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::GenericParam(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_generic_params()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_generic_param(self, visitor)
+ }
+}
+
+impl InvocationCollectorNode for ast::Arm {
+ const KIND: AstFragmentKind = AstFragmentKind::Arms;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Arm(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_arms()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_arm(self, visitor)
+ }
+}
+
+impl InvocationCollectorNode for ast::Stmt {
+ type AttrsTy = ast::AttrVec;
+ const KIND: AstFragmentKind = AstFragmentKind::Stmts;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Stmt(P(self))
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_stmts()
+ }
+ fn noop_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy {
+ noop_flat_map_stmt(self, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ match &self.kind {
+ StmtKind::MacCall(..) => true,
+ StmtKind::Item(item) => matches!(item.kind, ItemKind::MacCall(..)),
+ StmtKind::Semi(expr) => matches!(expr.kind, ExprKind::MacCall(..)),
+ StmtKind::Expr(..) => unreachable!(),
+ StmtKind::Local(..) | StmtKind::Empty => false,
+ }
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ // We pull macro invocations (both attributes and fn-like macro calls) out of their
+ // `StmtKind`s and treat them as statement macro invocations, not as items or expressions.
+ let (add_semicolon, mac, attrs) = match self.kind {
+ StmtKind::MacCall(mac) => {
+ let ast::MacCallStmt { mac, style, attrs, .. } = mac.into_inner();
+ (style == MacStmtStyle::Semicolon, mac, attrs)
+ }
+ StmtKind::Item(item) => match item.into_inner() {
+ ast::Item { kind: ItemKind::MacCall(mac), attrs, .. } => {
+ (mac.args.need_semicolon(), mac, attrs.into())
+ }
+ _ => unreachable!(),
+ },
+ StmtKind::Semi(expr) => match expr.into_inner() {
+ ast::Expr { kind: ExprKind::MacCall(mac), attrs, .. } => {
+ (mac.args.need_semicolon(), mac, attrs)
+ }
+ _ => unreachable!(),
+ },
+ _ => unreachable!(),
+ };
+ (mac, attrs, if add_semicolon { AddSemicolon::Yes } else { AddSemicolon::No })
+ }
+ fn post_flat_map_node_collect_bang(stmts: &mut Self::OutputTy, add_semicolon: AddSemicolon) {
+ // If this is a macro invocation with a semicolon, then apply that
+ // semicolon to the final statement produced by expansion.
+ if matches!(add_semicolon, AddSemicolon::Yes) {
+ if let Some(stmt) = stmts.pop() {
+ stmts.push(stmt.add_trailing_semicolon());
+ }
+ }
+ }
+}
+
+impl InvocationCollectorNode for ast::Crate {
+ type OutputTy = ast::Crate;
+ const KIND: AstFragmentKind = AstFragmentKind::Crate;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Crate(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_crate()
+ }
+ fn noop_visit<V: MutVisitor>(&mut self, visitor: &mut V) {
+ noop_visit_crate(self, visitor)
+ }
+}
+
+impl InvocationCollectorNode for P<ast::Ty> {
+ type OutputTy = P<ast::Ty>;
+ const KIND: AstFragmentKind = AstFragmentKind::Ty;
+ fn to_annotatable(self) -> Annotatable {
+ unreachable!()
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_ty()
+ }
+ fn noop_visit<V: MutVisitor>(&mut self, visitor: &mut V) {
+ noop_visit_ty(self, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.kind, ast::TyKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ let node = self.into_inner();
+ match node.kind {
+ TyKind::MacCall(mac) => (mac, Vec::new(), AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl InvocationCollectorNode for P<ast::Pat> {
+ type OutputTy = P<ast::Pat>;
+ const KIND: AstFragmentKind = AstFragmentKind::Pat;
+ fn to_annotatable(self) -> Annotatable {
+ unreachable!()
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_pat()
+ }
+ fn noop_visit<V: MutVisitor>(&mut self, visitor: &mut V) {
+ noop_visit_pat(self, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.kind, PatKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ let node = self.into_inner();
+ match node.kind {
+ PatKind::MacCall(mac) => (mac, Vec::new(), AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+}
+
+impl InvocationCollectorNode for P<ast::Expr> {
+ type OutputTy = P<ast::Expr>;
+ type AttrsTy = ast::AttrVec;
+ const KIND: AstFragmentKind = AstFragmentKind::Expr;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Expr(self)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_expr()
+ }
+ fn descr() -> &'static str {
+ "an expression"
+ }
+ fn noop_visit<V: MutVisitor>(&mut self, visitor: &mut V) {
+ noop_visit_expr(self, visitor)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.kind, ExprKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ let node = self.into_inner();
+ match node.kind {
+ ExprKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+}
+
+struct OptExprTag;
+impl InvocationCollectorNode for AstNodeWrapper<P<ast::Expr>, OptExprTag> {
+ type OutputTy = Option<P<ast::Expr>>;
+ type AttrsTy = ast::AttrVec;
+ const KIND: AstFragmentKind = AstFragmentKind::OptExpr;
+ fn to_annotatable(self) -> Annotatable {
+ Annotatable::Expr(self.wrapped)
+ }
+ fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy {
+ fragment.make_opt_expr()
+ }
+ fn noop_flat_map<V: MutVisitor>(mut self, visitor: &mut V) -> Self::OutputTy {
+ noop_visit_expr(&mut self.wrapped, visitor);
+ Some(self.wrapped)
+ }
+ fn is_mac_call(&self) -> bool {
+ matches!(self.wrapped.kind, ast::ExprKind::MacCall(..))
+ }
+ fn take_mac_call(self) -> (ast::MacCall, Self::AttrsTy, AddSemicolon) {
+ let node = self.wrapped.into_inner();
+ match node.kind {
+ ExprKind::MacCall(mac) => (mac, node.attrs, AddSemicolon::No),
+ _ => unreachable!(),
+ }
+ }
+ fn pre_flat_map_node_collect_attr(cfg: &StripUnconfigured<'_>, attr: &ast::Attribute) {
+ cfg.maybe_emit_expr_attr_err(&attr);
+ }
+}
+
+struct InvocationCollector<'a, 'b> {
+ cx: &'a mut ExtCtxt<'b>,
+ invocations: Vec<(Invocation, Option<Lrc<SyntaxExtension>>)>,
+ monotonic: bool,
+}
+
+impl<'a, 'b> InvocationCollector<'a, 'b> {
+ fn cfg(&self) -> StripUnconfigured<'_> {
+ StripUnconfigured {
+ sess: &self.cx.sess,
+ features: self.cx.ecfg.features,
+ config_tokens: false,
+ lint_node_id: self.cx.current_expansion.lint_node_id,
+ }
+ }
+
+ fn collect(&mut self, fragment_kind: AstFragmentKind, kind: InvocationKind) -> AstFragment {
+ let expn_id = LocalExpnId::fresh_empty();
+ let vis = kind.placeholder_visibility();
+ self.invocations.push((
+ Invocation {
+ kind,
+ fragment_kind,
+ expansion_data: ExpansionData {
+ id: expn_id,
+ depth: self.cx.current_expansion.depth + 1,
+ ..self.cx.current_expansion.clone()
+ },
+ },
+ None,
+ ));
+ placeholder(fragment_kind, NodeId::placeholder_from_expn_id(expn_id), vis)
+ }
+
+ fn collect_bang(&mut self, mac: ast::MacCall, kind: AstFragmentKind) -> AstFragment {
+ // cache the macro call span so that it can be
+ // easily adjusted for incremental compilation
+ let span = mac.span();
+ self.collect(kind, InvocationKind::Bang { mac, span })
+ }
+
+ fn collect_attr(
+ &mut self,
+ (attr, pos, derives): (ast::Attribute, usize, Vec<ast::Path>),
+ item: Annotatable,
+ kind: AstFragmentKind,
+ ) -> AstFragment {
+ self.collect(kind, InvocationKind::Attr { attr, pos, item, derives })
+ }
+
+ /// If `item` is an attribute invocation, remove the attribute and return it together with
+ /// its position and derives following it. We have to collect the derives in order to resolve
+ /// legacy derive helpers (helpers written before derives that introduce them).
+ fn take_first_attr(
+ &self,
+ item: &mut impl HasAttrs,
+ ) -> Option<(ast::Attribute, usize, Vec<ast::Path>)> {
+ let mut attr = None;
+
+ let mut cfg_pos = None;
+ let mut attr_pos = None;
+ for (pos, attr) in item.attrs().iter().enumerate() {
+ if !attr.is_doc_comment() && !self.cx.expanded_inert_attrs.is_marked(attr) {
+ let name = attr.ident().map(|ident| ident.name);
+ if name == Some(sym::cfg) || name == Some(sym::cfg_attr) {
+ cfg_pos = Some(pos); // a cfg attr found, no need to search anymore
+ break;
+ } else if attr_pos.is_none()
+ && !name.map_or(false, rustc_feature::is_builtin_attr_name)
+ {
+ attr_pos = Some(pos); // a non-cfg attr found, still may find a cfg attr
+ }
+ }
+ }
+
+ item.visit_attrs(|attrs| {
+ attr = Some(match (cfg_pos, attr_pos) {
+ (Some(pos), _) => (attrs.remove(pos), pos, Vec::new()),
+ (_, Some(pos)) => {
+ let attr = attrs.remove(pos);
+ let following_derives = attrs[pos..]
+ .iter()
+ .filter(|a| a.has_name(sym::derive))
+ .flat_map(|a| a.meta_item_list().unwrap_or_default())
+ .filter_map(|nested_meta| match nested_meta {
+ NestedMetaItem::MetaItem(ast::MetaItem {
+ kind: MetaItemKind::Word,
+ path,
+ ..
+ }) => Some(path),
+ _ => None,
+ })
+ .collect();
+
+ (attr, pos, following_derives)
+ }
+ _ => return,
+ });
+ });
+
+ attr
+ }
+
+ // Detect use of feature-gated or invalid attributes on macro invocations
+ // since they will not be detected after macro expansion.
+ fn check_attributes(&self, attrs: &[ast::Attribute], call: &ast::MacCall) {
+ let features = self.cx.ecfg.features.unwrap();
+ let mut attrs = attrs.iter().peekable();
+ let mut span: Option<Span> = None;
+ while let Some(attr) = attrs.next() {
+ rustc_ast_passes::feature_gate::check_attribute(attr, self.cx.sess, features);
+ validate_attr::check_meta(&self.cx.sess.parse_sess, attr);
+
+ let current_span = if let Some(sp) = span { sp.to(attr.span) } else { attr.span };
+ span = Some(current_span);
+
+ if attrs.peek().map_or(false, |next_attr| next_attr.doc_str().is_some()) {
+ continue;
+ }
+
+ if attr.is_doc_comment() {
+ self.cx.sess.parse_sess.buffer_lint_with_diagnostic(
+ &UNUSED_DOC_COMMENTS,
+ current_span,
+ self.cx.current_expansion.lint_node_id,
+ "unused doc comment",
+ BuiltinLintDiagnostics::UnusedDocComment(attr.span),
+ );
+ } else if rustc_attr::is_builtin_attr(attr) {
+ let attr_name = attr.ident().unwrap().name;
+ // `#[cfg]` and `#[cfg_attr]` are special - they are
+ // eagerly evaluated.
+ if attr_name != sym::cfg && attr_name != sym::cfg_attr {
+ self.cx.sess.parse_sess.buffer_lint_with_diagnostic(
+ &UNUSED_ATTRIBUTES,
+ attr.span,
+ self.cx.current_expansion.lint_node_id,
+ &format!("unused attribute `{}`", attr_name),
+ BuiltinLintDiagnostics::UnusedBuiltinAttribute {
+ attr_name,
+ macro_name: pprust::path_to_string(&call.path),
+ invoc_span: call.path.span,
+ },
+ );
+ }
+ }
+ }
+ }
+
+ fn expand_cfg_true(
+ &mut self,
+ node: &mut impl HasAttrs,
+ attr: ast::Attribute,
+ pos: usize,
+ ) -> bool {
+ let res = self.cfg().cfg_true(&attr);
+ if res {
+ // FIXME: `cfg(TRUE)` attributes do not currently remove themselves during expansion,
+ // and some tools like rustdoc and clippy rely on that. Find a way to remove them
+ // while keeping the tools working.
+ self.cx.expanded_inert_attrs.mark(&attr);
+ node.visit_attrs(|attrs| attrs.insert(pos, attr));
+ }
+ res
+ }
+
+ fn expand_cfg_attr(&self, node: &mut impl HasAttrs, attr: ast::Attribute, pos: usize) {
+ node.visit_attrs(|attrs| {
+ attrs.splice(pos..pos, self.cfg().expand_cfg_attr(attr, false));
+ });
+ }
+
+ fn flat_map_node<Node: InvocationCollectorNode<OutputTy: Default>>(
+ &mut self,
+ mut node: Node,
+ ) -> Node::OutputTy {
+ loop {
+ return match self.take_first_attr(&mut node) {
+ Some((attr, pos, derives)) => match attr.name_or_empty() {
+ sym::cfg => {
+ if self.expand_cfg_true(&mut node, attr, pos) {
+ continue;
+ }
+ Default::default()
+ }
+ sym::cfg_attr => {
+ self.expand_cfg_attr(&mut node, attr, pos);
+ continue;
+ }
+ _ => {
+ Node::pre_flat_map_node_collect_attr(&self.cfg(), &attr);
+ self.collect_attr((attr, pos, derives), node.to_annotatable(), Node::KIND)
+ .make_ast::<Node>()
+ }
+ },
+ None if node.is_mac_call() => {
+ let (mac, attrs, add_semicolon) = node.take_mac_call();
+ self.check_attributes(&attrs, &mac);
+ let mut res = self.collect_bang(mac, Node::KIND).make_ast::<Node>();
+ Node::post_flat_map_node_collect_bang(&mut res, add_semicolon);
+ res
+ }
+ None => {
+ match Node::wrap_flat_map_node_noop_flat_map(node, self, |mut node, this| {
+ assign_id!(this, node.node_id_mut(), || node.noop_flat_map(this))
+ }) {
+ Ok(output) => output,
+ Err(returned_node) => {
+ node = returned_node;
+ continue;
+ }
+ }
+ }
+ };
+ }
+ }
+
+ fn visit_node<Node: InvocationCollectorNode<OutputTy = Node> + DummyAstNode>(
+ &mut self,
+ node: &mut Node,
+ ) {
+ loop {
+ return match self.take_first_attr(node) {
+ Some((attr, pos, derives)) => match attr.name_or_empty() {
+ sym::cfg => {
+ let span = attr.span;
+ if self.expand_cfg_true(node, attr, pos) {
+ continue;
+ }
+ let msg =
+ format!("removing {} is not supported in this position", Node::descr());
+ self.cx.span_err(span, &msg);
+ continue;
+ }
+ sym::cfg_attr => {
+ self.expand_cfg_attr(node, attr, pos);
+ continue;
+ }
+ _ => visit_clobber(node, |node| {
+ self.collect_attr((attr, pos, derives), node.to_annotatable(), Node::KIND)
+ .make_ast::<Node>()
+ }),
+ },
+ None if node.is_mac_call() => {
+ visit_clobber(node, |node| {
+ // Do not clobber unless it's actually a macro (uncommon case).
+ let (mac, attrs, _) = node.take_mac_call();
+ self.check_attributes(&attrs, &mac);
+ self.collect_bang(mac, Node::KIND).make_ast::<Node>()
+ })
+ }
+ None => {
+ assign_id!(self, node.node_id_mut(), || node.noop_visit(self))
+ }
+ };
+ }
+ }
+}
+
+impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> {
+ fn flat_map_item(&mut self, node: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_trait_item(&mut self, node: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
+ self.flat_map_node(AstNodeWrapper::new(node, TraitItemTag))
+ }
+
+ fn flat_map_impl_item(&mut self, node: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
+ self.flat_map_node(AstNodeWrapper::new(node, ImplItemTag))
+ }
+
+ fn flat_map_foreign_item(
+ &mut self,
+ node: P<ast::ForeignItem>,
+ ) -> SmallVec<[P<ast::ForeignItem>; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_variant(&mut self, node: ast::Variant) -> SmallVec<[ast::Variant; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_field_def(&mut self, node: ast::FieldDef) -> SmallVec<[ast::FieldDef; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_pat_field(&mut self, node: ast::PatField) -> SmallVec<[ast::PatField; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_expr_field(&mut self, node: ast::ExprField) -> SmallVec<[ast::ExprField; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_param(&mut self, node: ast::Param) -> SmallVec<[ast::Param; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_generic_param(
+ &mut self,
+ node: ast::GenericParam,
+ ) -> SmallVec<[ast::GenericParam; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_arm(&mut self, node: ast::Arm) -> SmallVec<[ast::Arm; 1]> {
+ self.flat_map_node(node)
+ }
+
+ fn flat_map_stmt(&mut self, node: ast::Stmt) -> SmallVec<[ast::Stmt; 1]> {
+ // FIXME: invocations in semicolon-less expressions positions are expanded as expressions,
+ // changing that requires some compatibility measures.
+ if node.is_expr() {
+ // The only way that we can end up with a `MacCall` expression statement,
+ // (as opposed to a `StmtKind::MacCall`) is if we have a macro as the
+ // trailing expression in a block (e.g. `fn foo() { my_macro!() }`).
+ // Record this information, so that we can report a more specific
+ // `SEMICOLON_IN_EXPRESSIONS_FROM_MACROS` lint if needed.
+ // See #78991 for an investigation of treating macros in this position
+ // as statements, rather than expressions, during parsing.
+ return match &node.kind {
+ StmtKind::Expr(expr)
+ if matches!(**expr, ast::Expr { kind: ExprKind::MacCall(..), .. }) =>
+ {
+ self.cx.current_expansion.is_trailing_mac = true;
+ // Don't use `assign_id` for this statement - it may get removed
+ // entirely due to a `#[cfg]` on the contained expression
+ let res = noop_flat_map_stmt(node, self);
+ self.cx.current_expansion.is_trailing_mac = false;
+ res
+ }
+ _ => noop_flat_map_stmt(node, self),
+ };
+ }
+
+ self.flat_map_node(node)
+ }
+
+ fn visit_crate(&mut self, node: &mut ast::Crate) {
+ self.visit_node(node)
+ }
+
+ fn visit_ty(&mut self, node: &mut P<ast::Ty>) {
+ self.visit_node(node)
+ }
+
+ fn visit_pat(&mut self, node: &mut P<ast::Pat>) {
+ self.visit_node(node)
+ }
+
+ fn visit_expr(&mut self, node: &mut P<ast::Expr>) {
+ // FIXME: Feature gating is performed inconsistently between `Expr` and `OptExpr`.
+ if let Some(attr) = node.attrs.first() {
+ self.cfg().maybe_emit_expr_attr_err(attr);
+ }
+ self.visit_node(node)
+ }
+
+ fn filter_map_expr(&mut self, node: P<ast::Expr>) -> Option<P<ast::Expr>> {
+ self.flat_map_node(AstNodeWrapper::new(node, OptExprTag))
+ }
+
+ fn visit_block(&mut self, node: &mut P<ast::Block>) {
+ let orig_dir_ownership = mem::replace(
+ &mut self.cx.current_expansion.dir_ownership,
+ DirOwnership::UnownedViaBlock,
+ );
+ noop_visit_block(node, self);
+ self.cx.current_expansion.dir_ownership = orig_dir_ownership;
+ }
+
+ fn visit_id(&mut self, id: &mut NodeId) {
+ // We may have already assigned a `NodeId`
+ // by calling `assign_id`
+ if self.monotonic && *id == ast::DUMMY_NODE_ID {
+ *id = self.cx.resolver.next_node_id();
+ }
+ }
+}
+
+pub struct ExpansionConfig<'feat> {
+ pub crate_name: String,
+ pub features: Option<&'feat Features>,
+ pub recursion_limit: Limit,
+ pub trace_mac: bool,
+ pub should_test: bool, // If false, strip `#[test]` nodes
+ pub span_debug: bool, // If true, use verbose debugging for `proc_macro::Span`
+ pub proc_macro_backtrace: bool, // If true, show backtraces for proc-macro panics
+}
+
+impl<'feat> ExpansionConfig<'feat> {
+ pub fn default(crate_name: String) -> ExpansionConfig<'static> {
+ ExpansionConfig {
+ crate_name,
+ features: None,
+ recursion_limit: Limit::new(1024),
+ trace_mac: false,
+ should_test: false,
+ span_debug: false,
+ proc_macro_backtrace: false,
+ }
+ }
+
+ fn proc_macro_hygiene(&self) -> bool {
+ self.features.map_or(false, |features| features.proc_macro_hygiene)
+ }
+}
diff --git a/compiler/rustc_expand/src/lib.rs b/compiler/rustc_expand/src/lib.rs
new file mode 100644
index 000000000..9d0232822
--- /dev/null
+++ b/compiler/rustc_expand/src/lib.rs
@@ -0,0 +1,53 @@
+#![feature(array_windows)]
+#![feature(associated_type_bounds)]
+#![feature(associated_type_defaults)]
+#![feature(if_let_guard)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(macro_metavar_expr)]
+#![feature(proc_macro_diagnostic)]
+#![feature(proc_macro_internals)]
+#![feature(proc_macro_span)]
+#![feature(rustc_attrs)]
+#![feature(try_blocks)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate rustc_macros;
+
+extern crate proc_macro as pm;
+
+mod placeholders;
+mod proc_macro_server;
+
+pub use mbe::macro_rules::compile_declarative_macro;
+pub(crate) use rustc_span::hygiene;
+pub mod base;
+pub mod build;
+#[macro_use]
+pub mod config;
+pub mod expand;
+pub mod module;
+pub mod proc_macro;
+
+pub(crate) mod mbe;
+
+// HACK(Centril, #64197): These shouldn't really be here.
+// Rather, they should be with their respective modules which are defined in other crates.
+// However, since for now constructing a `ParseSess` sorta requires `config` from this crate,
+// these tests will need to live here in the interim.
+
+#[cfg(test)]
+mod tests;
+#[cfg(test)]
+mod parse {
+ mod tests;
+}
+#[cfg(test)]
+mod tokenstream {
+ mod tests;
+}
+#[cfg(test)]
+mod mut_visit {
+ mod tests;
+}
diff --git a/compiler/rustc_expand/src/mbe.rs b/compiler/rustc_expand/src/mbe.rs
new file mode 100644
index 000000000..f42576b16
--- /dev/null
+++ b/compiler/rustc_expand/src/mbe.rs
@@ -0,0 +1,110 @@
+//! This module implements declarative macros: old `macro_rules` and the newer
+//! `macro`. Declarative macros are also known as "macro by example", and that's
+//! why we call this module `mbe`. For external documentation, prefer the
+//! official terminology: "declarative macros".
+
+pub(crate) mod macro_check;
+pub(crate) mod macro_parser;
+pub(crate) mod macro_rules;
+pub(crate) mod metavar_expr;
+pub(crate) mod quoted;
+pub(crate) mod transcribe;
+
+use metavar_expr::MetaVarExpr;
+use rustc_ast::token::{Delimiter, NonterminalKind, Token, TokenKind};
+use rustc_ast::tokenstream::DelimSpan;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+/// Contains the sub-token-trees of a "delimited" token tree such as `(a b c)`.
+/// The delimiters are not represented explicitly in the `tts` vector.
+#[derive(PartialEq, Encodable, Decodable, Debug)]
+struct Delimited {
+ delim: Delimiter,
+ /// FIXME: #67062 has details about why this is sub-optimal.
+ tts: Vec<TokenTree>,
+}
+
+#[derive(PartialEq, Encodable, Decodable, Debug)]
+struct SequenceRepetition {
+ /// The sequence of token trees
+ tts: Vec<TokenTree>,
+ /// The optional separator
+ separator: Option<Token>,
+ /// Whether the sequence can be repeated zero (*), or one or more times (+)
+ kleene: KleeneToken,
+ /// The number of `Match`s that appear in the sequence (and subsequences)
+ num_captures: usize,
+}
+
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
+struct KleeneToken {
+ span: Span,
+ op: KleeneOp,
+}
+
+impl KleeneToken {
+ fn new(op: KleeneOp, span: Span) -> KleeneToken {
+ KleeneToken { span, op }
+ }
+}
+
+/// A Kleene-style [repetition operator](https://en.wikipedia.org/wiki/Kleene_star)
+/// for token sequences.
+#[derive(Clone, PartialEq, Encodable, Decodable, Debug, Copy)]
+enum KleeneOp {
+ /// Kleene star (`*`) for zero or more repetitions
+ ZeroOrMore,
+ /// Kleene plus (`+`) for one or more repetitions
+ OneOrMore,
+ /// Kleene optional (`?`) for zero or one repetitions
+ ZeroOrOne,
+}
+
+/// Similar to `tokenstream::TokenTree`, except that `Sequence`, `MetaVar`, `MetaVarDecl`, and
+/// `MetaVarExpr` are "first-class" token trees. Useful for parsing macros.
+#[derive(Debug, PartialEq, Encodable, Decodable)]
+enum TokenTree {
+ Token(Token),
+ /// A delimited sequence, e.g. `($e:expr)` (RHS) or `{ $e }` (LHS).
+ Delimited(DelimSpan, Delimited),
+ /// A kleene-style repetition sequence, e.g. `$($e:expr)*` (RHS) or `$($e),*` (LHS).
+ Sequence(DelimSpan, SequenceRepetition),
+ /// e.g., `$var`.
+ MetaVar(Span, Ident),
+ /// e.g., `$var:expr`. Only appears on the LHS.
+ MetaVarDecl(Span, Ident /* name to bind */, Option<NonterminalKind>),
+ /// A meta-variable expression inside `${...}`.
+ MetaVarExpr(DelimSpan, MetaVarExpr),
+}
+
+impl TokenTree {
+ /// Returns `true` if the given token tree is delimited.
+ fn is_delimited(&self) -> bool {
+ matches!(*self, TokenTree::Delimited(..))
+ }
+
+ /// Returns `true` if the given token tree is a token of the given kind.
+ fn is_token(&self, expected_kind: &TokenKind) -> bool {
+ match self {
+ TokenTree::Token(Token { kind: actual_kind, .. }) => actual_kind == expected_kind,
+ _ => false,
+ }
+ }
+
+ /// Retrieves the `TokenTree`'s span.
+ fn span(&self) -> Span {
+ match *self {
+ TokenTree::Token(Token { span, .. })
+ | TokenTree::MetaVar(span, _)
+ | TokenTree::MetaVarDecl(span, _, _) => span,
+ TokenTree::Delimited(span, _)
+ | TokenTree::MetaVarExpr(span, _)
+ | TokenTree::Sequence(span, _) => span.entire(),
+ }
+ }
+
+ fn token(kind: TokenKind, span: Span) -> TokenTree {
+ TokenTree::Token(Token::new(kind, span))
+ }
+}
diff --git a/compiler/rustc_expand/src/mbe/macro_check.rs b/compiler/rustc_expand/src/mbe/macro_check.rs
new file mode 100644
index 000000000..8994a2f78
--- /dev/null
+++ b/compiler/rustc_expand/src/mbe/macro_check.rs
@@ -0,0 +1,652 @@
+//! Checks that meta-variables in macro definition are correctly declared and used.
+//!
+//! # What is checked
+//!
+//! ## Meta-variables must not be bound twice
+//!
+//! ```compile_fail
+//! macro_rules! foo { ($x:tt $x:tt) => { $x }; }
+//! ```
+//!
+//! This check is sound (no false-negative) and complete (no false-positive).
+//!
+//! ## Meta-variables must not be free
+//!
+//! ```
+//! macro_rules! foo { () => { $x }; }
+//! ```
+//!
+//! This check is also done at macro instantiation but only if the branch is taken.
+//!
+//! ## Meta-variables must repeat at least as many times as their binder
+//!
+//! ```
+//! macro_rules! foo { ($($x:tt)*) => { $x }; }
+//! ```
+//!
+//! This check is also done at macro instantiation but only if the branch is taken.
+//!
+//! ## Meta-variables must repeat with the same Kleene operators as their binder
+//!
+//! ```
+//! macro_rules! foo { ($($x:tt)+) => { $($x)* }; }
+//! ```
+//!
+//! This check is not done at macro instantiation.
+//!
+//! # Disclaimer
+//!
+//! In the presence of nested macros (a macro defined in a macro), those checks may have false
+//! positives and false negatives. We try to detect those cases by recognizing potential macro
+//! definitions in RHSes, but nested macros may be hidden through the use of particular values of
+//! meta-variables.
+//!
+//! ## Examples of false positive
+//!
+//! False positives can come from cases where we don't recognize a nested macro, because it depends
+//! on particular values of meta-variables. In the following example, we think both instances of
+//! `$x` are free, which is a correct statement if `$name` is anything but `macro_rules`. But when
+//! `$name` is `macro_rules`, like in the instantiation below, then `$x:tt` is actually a binder of
+//! the nested macro and `$x` is bound to it.
+//!
+//! ```
+//! macro_rules! foo { ($name:ident) => { $name! bar { ($x:tt) => { $x }; } }; }
+//! foo!(macro_rules);
+//! ```
+//!
+//! False positives can also come from cases where we think there is a nested macro while there
+//! isn't. In the following example, we think `$x` is free, which is incorrect because `bar` is not
+//! a nested macro since it is not evaluated as code by `stringify!`.
+//!
+//! ```
+//! macro_rules! foo { () => { stringify!(macro_rules! bar { () => { $x }; }) }; }
+//! ```
+//!
+//! ## Examples of false negative
+//!
+//! False negatives can come from cases where we don't recognize a meta-variable, because it depends
+//! on particular values of meta-variables. In the following examples, we don't see that if `$d` is
+//! instantiated with `$` then `$d z` becomes `$z` in the nested macro definition and is thus a free
+//! meta-variable. Note however, that if `foo` is instantiated, then we would check the definition
+//! of `bar` and would see the issue.
+//!
+//! ```
+//! macro_rules! foo { ($d:tt) => { macro_rules! bar { ($y:tt) => { $d z }; } }; }
+//! ```
+//!
+//! # How it is checked
+//!
+//! There are 3 main functions: `check_binders`, `check_occurrences`, and `check_nested_macro`. They
+//! all need some kind of environment.
+//!
+//! ## Environments
+//!
+//! Environments are used to pass information.
+//!
+//! ### From LHS to RHS
+//!
+//! When checking a LHS with `check_binders`, we produce (and use) an environment for binders,
+//! namely `Binders`. This is a mapping from binder name to information about that binder: the span
+//! of the binder for error messages and the stack of Kleene operators under which it was bound in
+//! the LHS.
+//!
+//! This environment is used by both the LHS and RHS. The LHS uses it to detect duplicate binders.
+//! The RHS uses it to detect the other errors.
+//!
+//! ### From outer macro to inner macro
+//!
+//! When checking the RHS of an outer macro and we detect a nested macro definition, we push the
+//! current state, namely `MacroState`, to an environment of nested macro definitions. Each state
+//! stores the LHS binders when entering the macro definition as well as the stack of Kleene
+//! operators under which the inner macro is defined in the RHS.
+//!
+//! This environment is a stack representing the nesting of macro definitions. As such, the stack of
+//! Kleene operators under which a meta-variable is repeating is the concatenation of the stacks
+//! stored when entering a macro definition starting from the state in which the meta-variable is
+//! bound.
+use crate::mbe::{KleeneToken, TokenTree};
+
+use rustc_ast::token::{Delimiter, Token, TokenKind};
+use rustc_ast::{NodeId, DUMMY_NODE_ID};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::MultiSpan;
+use rustc_session::lint::builtin::{META_VARIABLE_MISUSE, MISSING_FRAGMENT_SPECIFIER};
+use rustc_session::parse::ParseSess;
+use rustc_span::symbol::kw;
+use rustc_span::{symbol::MacroRulesNormalizedIdent, Span};
+
+use smallvec::SmallVec;
+
+use std::iter;
+
+/// Stack represented as linked list.
+///
+/// Those are used for environments because they grow incrementally and are not mutable.
+enum Stack<'a, T> {
+ /// Empty stack.
+ Empty,
+ /// A non-empty stack.
+ Push {
+ /// The top element.
+ top: T,
+ /// The previous elements.
+ prev: &'a Stack<'a, T>,
+ },
+}
+
+impl<'a, T> Stack<'a, T> {
+ /// Returns whether a stack is empty.
+ fn is_empty(&self) -> bool {
+ matches!(*self, Stack::Empty)
+ }
+
+ /// Returns a new stack with an element of top.
+ fn push(&'a self, top: T) -> Stack<'a, T> {
+ Stack::Push { top, prev: self }
+ }
+}
+
+impl<'a, T> Iterator for &'a Stack<'a, T> {
+ type Item = &'a T;
+
+ // Iterates from top to bottom of the stack.
+ fn next(&mut self) -> Option<&'a T> {
+ match *self {
+ Stack::Empty => None,
+ Stack::Push { ref top, ref prev } => {
+ *self = prev;
+ Some(top)
+ }
+ }
+ }
+}
+
+impl From<&Stack<'_, KleeneToken>> for SmallVec<[KleeneToken; 1]> {
+ fn from(ops: &Stack<'_, KleeneToken>) -> SmallVec<[KleeneToken; 1]> {
+ let mut ops: SmallVec<[KleeneToken; 1]> = ops.cloned().collect();
+ // The stack is innermost on top. We want outermost first.
+ ops.reverse();
+ ops
+ }
+}
+
+/// Information attached to a meta-variable binder in LHS.
+struct BinderInfo {
+ /// The span of the meta-variable in LHS.
+ span: Span,
+ /// The stack of Kleene operators (outermost first).
+ ops: SmallVec<[KleeneToken; 1]>,
+}
+
+/// An environment of meta-variables to their binder information.
+type Binders = FxHashMap<MacroRulesNormalizedIdent, BinderInfo>;
+
+/// The state at which we entered a macro definition in the RHS of another macro definition.
+struct MacroState<'a> {
+ /// The binders of the branch where we entered the macro definition.
+ binders: &'a Binders,
+ /// The stack of Kleene operators (outermost first) where we entered the macro definition.
+ ops: SmallVec<[KleeneToken; 1]>,
+}
+
+/// Checks that meta-variables are used correctly in a macro definition.
+///
+/// Arguments:
+/// - `sess` is used to emit diagnostics and lints
+/// - `node_id` is used to emit lints
+/// - `span` is used when no spans are available
+/// - `lhses` and `rhses` should have the same length and represent the macro definition
+pub(super) fn check_meta_variables(
+ sess: &ParseSess,
+ node_id: NodeId,
+ span: Span,
+ lhses: &[TokenTree],
+ rhses: &[TokenTree],
+) -> bool {
+ if lhses.len() != rhses.len() {
+ sess.span_diagnostic.span_bug(span, "length mismatch between LHSes and RHSes")
+ }
+ let mut valid = true;
+ for (lhs, rhs) in iter::zip(lhses, rhses) {
+ let mut binders = Binders::default();
+ check_binders(sess, node_id, lhs, &Stack::Empty, &mut binders, &Stack::Empty, &mut valid);
+ check_occurrences(sess, node_id, rhs, &Stack::Empty, &binders, &Stack::Empty, &mut valid);
+ }
+ valid
+}
+
+/// Checks `lhs` as part of the LHS of a macro definition, extends `binders` with new binders, and
+/// sets `valid` to false in case of errors.
+///
+/// Arguments:
+/// - `sess` is used to emit diagnostics and lints
+/// - `node_id` is used to emit lints
+/// - `lhs` is checked as part of a LHS
+/// - `macros` is the stack of possible outer macros
+/// - `binders` contains the binders of the LHS
+/// - `ops` is the stack of Kleene operators from the LHS
+/// - `valid` is set in case of errors
+fn check_binders(
+ sess: &ParseSess,
+ node_id: NodeId,
+ lhs: &TokenTree,
+ macros: &Stack<'_, MacroState<'_>>,
+ binders: &mut Binders,
+ ops: &Stack<'_, KleeneToken>,
+ valid: &mut bool,
+) {
+ match *lhs {
+ TokenTree::Token(..) => {}
+ // This can only happen when checking a nested macro because this LHS is then in the RHS of
+ // the outer macro. See ui/macros/macro-of-higher-order.rs where $y:$fragment in the
+ // LHS of the nested macro (and RHS of the outer macro) is parsed as MetaVar(y) Colon
+ // MetaVar(fragment) and not as MetaVarDecl(y, fragment).
+ TokenTree::MetaVar(span, name) => {
+ if macros.is_empty() {
+ sess.span_diagnostic.span_bug(span, "unexpected MetaVar in lhs");
+ }
+ let name = MacroRulesNormalizedIdent::new(name);
+ // There are 3 possibilities:
+ if let Some(prev_info) = binders.get(&name) {
+ // 1. The meta-variable is already bound in the current LHS: This is an error.
+ let mut span = MultiSpan::from_span(span);
+ span.push_span_label(prev_info.span, "previous declaration");
+ buffer_lint(sess, span, node_id, "duplicate matcher binding");
+ } else if get_binder_info(macros, binders, name).is_none() {
+ // 2. The meta-variable is free: This is a binder.
+ binders.insert(name, BinderInfo { span, ops: ops.into() });
+ } else {
+ // 3. The meta-variable is bound: This is an occurrence.
+ check_occurrences(sess, node_id, lhs, macros, binders, ops, valid);
+ }
+ }
+ // Similarly, this can only happen when checking a toplevel macro.
+ TokenTree::MetaVarDecl(span, name, kind) => {
+ if kind.is_none() && node_id != DUMMY_NODE_ID {
+ // FIXME: Report this as a hard error eventually and remove equivalent errors from
+ // `parse_tt_inner` and `nameize`. Until then the error may be reported twice, once
+ // as a hard error and then once as a buffered lint.
+ sess.buffer_lint(
+ MISSING_FRAGMENT_SPECIFIER,
+ span,
+ node_id,
+ "missing fragment specifier",
+ );
+ }
+ if !macros.is_empty() {
+ sess.span_diagnostic.span_bug(span, "unexpected MetaVarDecl in nested lhs");
+ }
+ let name = MacroRulesNormalizedIdent::new(name);
+ if let Some(prev_info) = get_binder_info(macros, binders, name) {
+ // Duplicate binders at the top-level macro definition are errors. The lint is only
+ // for nested macro definitions.
+ sess.span_diagnostic
+ .struct_span_err(span, "duplicate matcher binding")
+ .span_label(span, "duplicate binding")
+ .span_label(prev_info.span, "previous binding")
+ .emit();
+ *valid = false;
+ } else {
+ binders.insert(name, BinderInfo { span, ops: ops.into() });
+ }
+ }
+ // `MetaVarExpr` can not appear in the LHS of a macro arm
+ TokenTree::MetaVarExpr(..) => {}
+ TokenTree::Delimited(_, ref del) => {
+ for tt in &del.tts {
+ check_binders(sess, node_id, tt, macros, binders, ops, valid);
+ }
+ }
+ TokenTree::Sequence(_, ref seq) => {
+ let ops = ops.push(seq.kleene);
+ for tt in &seq.tts {
+ check_binders(sess, node_id, tt, macros, binders, &ops, valid);
+ }
+ }
+ }
+}
+
+/// Returns the binder information of a meta-variable.
+///
+/// Arguments:
+/// - `macros` is the stack of possible outer macros
+/// - `binders` contains the current binders
+/// - `name` is the name of the meta-variable we are looking for
+fn get_binder_info<'a>(
+ mut macros: &'a Stack<'a, MacroState<'a>>,
+ binders: &'a Binders,
+ name: MacroRulesNormalizedIdent,
+) -> Option<&'a BinderInfo> {
+ binders.get(&name).or_else(|| macros.find_map(|state| state.binders.get(&name)))
+}
+
+/// Checks `rhs` as part of the RHS of a macro definition and sets `valid` to false in case of
+/// errors.
+///
+/// Arguments:
+/// - `sess` is used to emit diagnostics and lints
+/// - `node_id` is used to emit lints
+/// - `rhs` is checked as part of a RHS
+/// - `macros` is the stack of possible outer macros
+/// - `binders` contains the binders of the associated LHS
+/// - `ops` is the stack of Kleene operators from the RHS
+/// - `valid` is set in case of errors
+fn check_occurrences(
+ sess: &ParseSess,
+ node_id: NodeId,
+ rhs: &TokenTree,
+ macros: &Stack<'_, MacroState<'_>>,
+ binders: &Binders,
+ ops: &Stack<'_, KleeneToken>,
+ valid: &mut bool,
+) {
+ match *rhs {
+ TokenTree::Token(..) => {}
+ TokenTree::MetaVarDecl(span, _name, _kind) => {
+ sess.span_diagnostic.span_bug(span, "unexpected MetaVarDecl in rhs")
+ }
+ TokenTree::MetaVar(span, name) => {
+ let name = MacroRulesNormalizedIdent::new(name);
+ check_ops_is_prefix(sess, node_id, macros, binders, ops, span, name);
+ }
+ TokenTree::MetaVarExpr(dl, ref mve) => {
+ let Some(name) = mve.ident().map(MacroRulesNormalizedIdent::new) else {
+ return;
+ };
+ check_ops_is_prefix(sess, node_id, macros, binders, ops, dl.entire(), name);
+ }
+ TokenTree::Delimited(_, ref del) => {
+ check_nested_occurrences(sess, node_id, &del.tts, macros, binders, ops, valid);
+ }
+ TokenTree::Sequence(_, ref seq) => {
+ let ops = ops.push(seq.kleene);
+ check_nested_occurrences(sess, node_id, &seq.tts, macros, binders, &ops, valid);
+ }
+ }
+}
+
+/// Represents the processed prefix of a nested macro.
+#[derive(Clone, Copy, PartialEq, Eq)]
+enum NestedMacroState {
+ /// Nothing that matches a nested macro definition was processed yet.
+ Empty,
+ /// The token `macro_rules` was processed.
+ MacroRules,
+ /// The tokens `macro_rules!` were processed.
+ MacroRulesNot,
+ /// The tokens `macro_rules!` followed by a name were processed. The name may be either directly
+ /// an identifier or a meta-variable (that hopefully would be instantiated by an identifier).
+ MacroRulesNotName,
+ /// The keyword `macro` was processed.
+ Macro,
+ /// The keyword `macro` followed by a name was processed.
+ MacroName,
+ /// The keyword `macro` followed by a name and a token delimited by parentheses was processed.
+ MacroNameParen,
+}
+
+/// Checks `tts` as part of the RHS of a macro definition, tries to recognize nested macro
+/// definitions, and sets `valid` to false in case of errors.
+///
+/// Arguments:
+/// - `sess` is used to emit diagnostics and lints
+/// - `node_id` is used to emit lints
+/// - `tts` is checked as part of a RHS and may contain macro definitions
+/// - `macros` is the stack of possible outer macros
+/// - `binders` contains the binders of the associated LHS
+/// - `ops` is the stack of Kleene operators from the RHS
+/// - `valid` is set in case of errors
+fn check_nested_occurrences(
+ sess: &ParseSess,
+ node_id: NodeId,
+ tts: &[TokenTree],
+ macros: &Stack<'_, MacroState<'_>>,
+ binders: &Binders,
+ ops: &Stack<'_, KleeneToken>,
+ valid: &mut bool,
+) {
+ let mut state = NestedMacroState::Empty;
+ let nested_macros = macros.push(MacroState { binders, ops: ops.into() });
+ let mut nested_binders = Binders::default();
+ for tt in tts {
+ match (state, tt) {
+ (
+ NestedMacroState::Empty,
+ &TokenTree::Token(Token { kind: TokenKind::Ident(name, false), .. }),
+ ) => {
+ if name == kw::MacroRules {
+ state = NestedMacroState::MacroRules;
+ } else if name == kw::Macro {
+ state = NestedMacroState::Macro;
+ }
+ }
+ (
+ NestedMacroState::MacroRules,
+ &TokenTree::Token(Token { kind: TokenKind::Not, .. }),
+ ) => {
+ state = NestedMacroState::MacroRulesNot;
+ }
+ (
+ NestedMacroState::MacroRulesNot,
+ &TokenTree::Token(Token { kind: TokenKind::Ident(..), .. }),
+ ) => {
+ state = NestedMacroState::MacroRulesNotName;
+ }
+ (NestedMacroState::MacroRulesNot, &TokenTree::MetaVar(..)) => {
+ state = NestedMacroState::MacroRulesNotName;
+ // We check that the meta-variable is correctly used.
+ check_occurrences(sess, node_id, tt, macros, binders, ops, valid);
+ }
+ (NestedMacroState::MacroRulesNotName, &TokenTree::Delimited(_, ref del))
+ | (NestedMacroState::MacroName, &TokenTree::Delimited(_, ref del))
+ if del.delim == Delimiter::Brace =>
+ {
+ let macro_rules = state == NestedMacroState::MacroRulesNotName;
+ state = NestedMacroState::Empty;
+ let rest =
+ check_nested_macro(sess, node_id, macro_rules, &del.tts, &nested_macros, valid);
+ // If we did not check the whole macro definition, then check the rest as if outside
+ // the macro definition.
+ check_nested_occurrences(
+ sess,
+ node_id,
+ &del.tts[rest..],
+ macros,
+ binders,
+ ops,
+ valid,
+ );
+ }
+ (
+ NestedMacroState::Macro,
+ &TokenTree::Token(Token { kind: TokenKind::Ident(..), .. }),
+ ) => {
+ state = NestedMacroState::MacroName;
+ }
+ (NestedMacroState::Macro, &TokenTree::MetaVar(..)) => {
+ state = NestedMacroState::MacroName;
+ // We check that the meta-variable is correctly used.
+ check_occurrences(sess, node_id, tt, macros, binders, ops, valid);
+ }
+ (NestedMacroState::MacroName, &TokenTree::Delimited(_, ref del))
+ if del.delim == Delimiter::Parenthesis =>
+ {
+ state = NestedMacroState::MacroNameParen;
+ nested_binders = Binders::default();
+ check_binders(
+ sess,
+ node_id,
+ tt,
+ &nested_macros,
+ &mut nested_binders,
+ &Stack::Empty,
+ valid,
+ );
+ }
+ (NestedMacroState::MacroNameParen, &TokenTree::Delimited(_, ref del))
+ if del.delim == Delimiter::Brace =>
+ {
+ state = NestedMacroState::Empty;
+ check_occurrences(
+ sess,
+ node_id,
+ tt,
+ &nested_macros,
+ &nested_binders,
+ &Stack::Empty,
+ valid,
+ );
+ }
+ (_, ref tt) => {
+ state = NestedMacroState::Empty;
+ check_occurrences(sess, node_id, tt, macros, binders, ops, valid);
+ }
+ }
+ }
+}
+
+/// Checks the body of nested macro, returns where the check stopped, and sets `valid` to false in
+/// case of errors.
+///
+/// The token trees are checked as long as they look like a list of (LHS) => {RHS} token trees. This
+/// check is a best-effort to detect a macro definition. It returns the position in `tts` where we
+/// stopped checking because we detected we were not in a macro definition anymore.
+///
+/// Arguments:
+/// - `sess` is used to emit diagnostics and lints
+/// - `node_id` is used to emit lints
+/// - `macro_rules` specifies whether the macro is `macro_rules`
+/// - `tts` is checked as a list of (LHS) => {RHS}
+/// - `macros` is the stack of outer macros
+/// - `valid` is set in case of errors
+fn check_nested_macro(
+ sess: &ParseSess,
+ node_id: NodeId,
+ macro_rules: bool,
+ tts: &[TokenTree],
+ macros: &Stack<'_, MacroState<'_>>,
+ valid: &mut bool,
+) -> usize {
+ let n = tts.len();
+ let mut i = 0;
+ let separator = if macro_rules { TokenKind::Semi } else { TokenKind::Comma };
+ loop {
+ // We expect 3 token trees: `(LHS) => {RHS}`. The separator is checked after.
+ if i + 2 >= n
+ || !tts[i].is_delimited()
+ || !tts[i + 1].is_token(&TokenKind::FatArrow)
+ || !tts[i + 2].is_delimited()
+ {
+ break;
+ }
+ let lhs = &tts[i];
+ let rhs = &tts[i + 2];
+ let mut binders = Binders::default();
+ check_binders(sess, node_id, lhs, macros, &mut binders, &Stack::Empty, valid);
+ check_occurrences(sess, node_id, rhs, macros, &binders, &Stack::Empty, valid);
+ // Since the last semicolon is optional for `macro_rules` macros and decl_macro are not terminated,
+ // we increment our checked position by how many token trees we already checked (the 3
+ // above) before checking for the separator.
+ i += 3;
+ if i == n || !tts[i].is_token(&separator) {
+ break;
+ }
+ // We increment our checked position for the semicolon.
+ i += 1;
+ }
+ i
+}
+
+/// Checks that a meta-variable occurrence is valid.
+///
+/// Arguments:
+/// - `sess` is used to emit diagnostics and lints
+/// - `node_id` is used to emit lints
+/// - `macros` is the stack of possible outer macros
+/// - `binders` contains the binders of the associated LHS
+/// - `ops` is the stack of Kleene operators from the RHS
+/// - `span` is the span of the meta-variable to check
+/// - `name` is the name of the meta-variable to check
+fn check_ops_is_prefix(
+ sess: &ParseSess,
+ node_id: NodeId,
+ macros: &Stack<'_, MacroState<'_>>,
+ binders: &Binders,
+ ops: &Stack<'_, KleeneToken>,
+ span: Span,
+ name: MacroRulesNormalizedIdent,
+) {
+ let macros = macros.push(MacroState { binders, ops: ops.into() });
+ // Accumulates the stacks the operators of each state until (and including when) the
+ // meta-variable is found. The innermost stack is first.
+ let mut acc: SmallVec<[&SmallVec<[KleeneToken; 1]>; 1]> = SmallVec::new();
+ for state in &macros {
+ acc.push(&state.ops);
+ if let Some(binder) = state.binders.get(&name) {
+ // This variable concatenates the stack of operators from the RHS of the LHS where the
+ // meta-variable was defined to where it is used (in possibly nested macros). The
+ // outermost operator is first.
+ let mut occurrence_ops: SmallVec<[KleeneToken; 2]> = SmallVec::new();
+ // We need to iterate from the end to start with outermost stack.
+ for ops in acc.iter().rev() {
+ occurrence_ops.extend_from_slice(ops);
+ }
+ ops_is_prefix(sess, node_id, span, name, &binder.ops, &occurrence_ops);
+ return;
+ }
+ }
+ buffer_lint(sess, span.into(), node_id, &format!("unknown macro variable `{}`", name));
+}
+
+/// Returns whether `binder_ops` is a prefix of `occurrence_ops`.
+///
+/// The stack of Kleene operators of a meta-variable occurrence just needs to have the stack of
+/// Kleene operators of its binder as a prefix.
+///
+/// Consider $i in the following example:
+/// ```ignore (illustrative)
+/// ( $( $i:ident = $($j:ident),+ );* ) => { $($( $i += $j; )+)* }
+/// ```
+/// It occurs under the Kleene stack ["*", "+"] and is bound under ["*"] only.
+///
+/// Arguments:
+/// - `sess` is used to emit diagnostics and lints
+/// - `node_id` is used to emit lints
+/// - `span` is the span of the meta-variable being check
+/// - `name` is the name of the meta-variable being check
+/// - `binder_ops` is the stack of Kleene operators for the binder
+/// - `occurrence_ops` is the stack of Kleene operators for the occurrence
+fn ops_is_prefix(
+ sess: &ParseSess,
+ node_id: NodeId,
+ span: Span,
+ name: MacroRulesNormalizedIdent,
+ binder_ops: &[KleeneToken],
+ occurrence_ops: &[KleeneToken],
+) {
+ for (i, binder) in binder_ops.iter().enumerate() {
+ if i >= occurrence_ops.len() {
+ let mut span = MultiSpan::from_span(span);
+ span.push_span_label(binder.span, "expected repetition");
+ let message = &format!("variable '{}' is still repeating at this depth", name);
+ buffer_lint(sess, span, node_id, message);
+ return;
+ }
+ let occurrence = &occurrence_ops[i];
+ if occurrence.op != binder.op {
+ let mut span = MultiSpan::from_span(span);
+ span.push_span_label(binder.span, "expected repetition");
+ span.push_span_label(occurrence.span, "conflicting repetition");
+ let message = "meta-variable repeats with different Kleene operator";
+ buffer_lint(sess, span, node_id, message);
+ return;
+ }
+ }
+}
+
+fn buffer_lint(sess: &ParseSess, span: MultiSpan, node_id: NodeId, message: &str) {
+ // Macros loaded from other crates have dummy node ids.
+ if node_id != DUMMY_NODE_ID {
+ sess.buffer_lint(&META_VARIABLE_MISUSE, span, node_id, message);
+ }
+}
diff --git a/compiler/rustc_expand/src/mbe/macro_parser.rs b/compiler/rustc_expand/src/mbe/macro_parser.rs
new file mode 100644
index 000000000..4fa91dfea
--- /dev/null
+++ b/compiler/rustc_expand/src/mbe/macro_parser.rs
@@ -0,0 +1,704 @@
+//! This is an NFA-based parser, which calls out to the main Rust parser for named non-terminals
+//! (which it commits to fully when it hits one in a grammar). There's a set of current NFA threads
+//! and a set of next ones. Instead of NTs, we have a special case for Kleene star. The big-O, in
+//! pathological cases, is worse than traditional use of NFA or Earley parsing, but it's an easier
+//! fit for Macro-by-Example-style rules.
+//!
+//! (In order to prevent the pathological case, we'd need to lazily construct the resulting
+//! `NamedMatch`es at the very end. It'd be a pain, and require more memory to keep around old
+//! matcher positions, but it would also save overhead)
+//!
+//! We don't say this parser uses the Earley algorithm, because it's unnecessarily inaccurate.
+//! The macro parser restricts itself to the features of finite state automata. Earley parsers
+//! can be described as an extension of NFAs with completion rules, prediction rules, and recursion.
+//!
+//! Quick intro to how the parser works:
+//!
+//! A "matcher position" (a.k.a. "position" or "mp") is a dot in the middle of a matcher, usually
+//! written as a `·`. For example `· a $( a )* a b` is one, as is `a $( · a )* a b`.
+//!
+//! The parser walks through the input a token at a time, maintaining a list
+//! of threads consistent with the current position in the input string: `cur_mps`.
+//!
+//! As it processes them, it fills up `eof_mps` with threads that would be valid if
+//! the macro invocation is now over, `bb_mps` with threads that are waiting on
+//! a Rust non-terminal like `$e:expr`, and `next_mps` with threads that are waiting
+//! on a particular token. Most of the logic concerns moving the · through the
+//! repetitions indicated by Kleene stars. The rules for moving the · without
+//! consuming any input are called epsilon transitions. It only advances or calls
+//! out to the real Rust parser when no `cur_mps` threads remain.
+//!
+//! Example:
+//!
+//! ```text, ignore
+//! Start parsing a a a a b against [· a $( a )* a b].
+//!
+//! Remaining input: a a a a b
+//! next: [· a $( a )* a b]
+//!
+//! - - - Advance over an a. - - -
+//!
+//! Remaining input: a a a b
+//! cur: [a · $( a )* a b]
+//! Descend/Skip (first position).
+//! next: [a $( · a )* a b] [a $( a )* · a b].
+//!
+//! - - - Advance over an a. - - -
+//!
+//! Remaining input: a a b
+//! cur: [a $( a · )* a b] [a $( a )* a · b]
+//! Follow epsilon transition: Finish/Repeat (first position)
+//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
+//!
+//! - - - Advance over an a. - - - (this looks exactly like the last step)
+//!
+//! Remaining input: a b
+//! cur: [a $( a · )* a b] [a $( a )* a · b]
+//! Follow epsilon transition: Finish/Repeat (first position)
+//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
+//!
+//! - - - Advance over an a. - - - (this looks exactly like the last step)
+//!
+//! Remaining input: b
+//! cur: [a $( a · )* a b] [a $( a )* a · b]
+//! Follow epsilon transition: Finish/Repeat (first position)
+//! next: [a $( a )* · a b] [a $( · a )* a b] [a $( a )* a · b]
+//!
+//! - - - Advance over a b. - - -
+//!
+//! Remaining input: ''
+//! eof: [a $( a )* a b ·]
+//! ```
+
+pub(crate) use NamedMatch::*;
+pub(crate) use ParseResult::*;
+
+use crate::mbe::{KleeneOp, TokenTree};
+
+use rustc_ast::token::{self, DocComment, Nonterminal, NonterminalKind, Token};
+use rustc_lint_defs::pluralize;
+use rustc_parse::parser::{NtOrTt, Parser};
+use rustc_span::symbol::MacroRulesNormalizedIdent;
+use rustc_span::Span;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lrc;
+use rustc_span::symbol::Ident;
+use std::borrow::Cow;
+use std::collections::hash_map::Entry::{Occupied, Vacant};
+
+/// A unit within a matcher that a `MatcherPos` can refer to. Similar to (and derived from)
+/// `mbe::TokenTree`, but designed specifically for fast and easy traversal during matching.
+/// Notable differences to `mbe::TokenTree`:
+/// - It is non-recursive, i.e. there is no nesting.
+/// - The end pieces of each sequence (the separator, if present, and the Kleene op) are
+/// represented explicitly, as is the very end of the matcher.
+///
+/// This means a matcher can be represented by `&[MatcherLoc]`, and traversal mostly involves
+/// simply incrementing the current matcher position index by one.
+pub(super) enum MatcherLoc {
+ Token {
+ token: Token,
+ },
+ Delimited,
+ Sequence {
+ op: KleeneOp,
+ num_metavar_decls: usize,
+ idx_first_after: usize,
+ next_metavar: usize,
+ seq_depth: usize,
+ },
+ SequenceKleeneOpNoSep {
+ op: KleeneOp,
+ idx_first: usize,
+ },
+ SequenceSep {
+ separator: Token,
+ },
+ SequenceKleeneOpAfterSep {
+ idx_first: usize,
+ },
+ MetaVarDecl {
+ span: Span,
+ bind: Ident,
+ kind: Option<NonterminalKind>,
+ next_metavar: usize,
+ seq_depth: usize,
+ },
+ Eof,
+}
+
+pub(super) fn compute_locs(matcher: &[TokenTree]) -> Vec<MatcherLoc> {
+ fn inner(
+ tts: &[TokenTree],
+ locs: &mut Vec<MatcherLoc>,
+ next_metavar: &mut usize,
+ seq_depth: usize,
+ ) {
+ for tt in tts {
+ match tt {
+ TokenTree::Token(token) => {
+ locs.push(MatcherLoc::Token { token: token.clone() });
+ }
+ TokenTree::Delimited(span, delimited) => {
+ let open_token = Token::new(token::OpenDelim(delimited.delim), span.open);
+ let close_token = Token::new(token::CloseDelim(delimited.delim), span.close);
+
+ locs.push(MatcherLoc::Delimited);
+ locs.push(MatcherLoc::Token { token: open_token });
+ inner(&delimited.tts, locs, next_metavar, seq_depth);
+ locs.push(MatcherLoc::Token { token: close_token });
+ }
+ TokenTree::Sequence(_, seq) => {
+ // We can't determine `idx_first_after` and construct the final
+ // `MatcherLoc::Sequence` until after `inner()` is called and the sequence end
+ // pieces are processed. So we push a dummy value (`Eof` is cheapest to
+ // construct) now, and overwrite it with the proper value below.
+ let dummy = MatcherLoc::Eof;
+ locs.push(dummy);
+
+ let next_metavar_orig = *next_metavar;
+ let op = seq.kleene.op;
+ let idx_first = locs.len();
+ let idx_seq = idx_first - 1;
+ inner(&seq.tts, locs, next_metavar, seq_depth + 1);
+
+ if let Some(separator) = &seq.separator {
+ locs.push(MatcherLoc::SequenceSep { separator: separator.clone() });
+ locs.push(MatcherLoc::SequenceKleeneOpAfterSep { idx_first });
+ } else {
+ locs.push(MatcherLoc::SequenceKleeneOpNoSep { op, idx_first });
+ }
+
+ // Overwrite the dummy value pushed above with the proper value.
+ locs[idx_seq] = MatcherLoc::Sequence {
+ op,
+ num_metavar_decls: seq.num_captures,
+ idx_first_after: locs.len(),
+ next_metavar: next_metavar_orig,
+ seq_depth,
+ };
+ }
+ &TokenTree::MetaVarDecl(span, bind, kind) => {
+ locs.push(MatcherLoc::MetaVarDecl {
+ span,
+ bind,
+ kind,
+ next_metavar: *next_metavar,
+ seq_depth,
+ });
+ *next_metavar += 1;
+ }
+ TokenTree::MetaVar(..) | TokenTree::MetaVarExpr(..) => unreachable!(),
+ }
+ }
+ }
+
+ let mut locs = vec![];
+ let mut next_metavar = 0;
+ inner(matcher, &mut locs, &mut next_metavar, /* seq_depth */ 0);
+
+ // A final entry is needed for eof.
+ locs.push(MatcherLoc::Eof);
+
+ locs
+}
+
+/// A single matcher position, representing the state of matching.
+struct MatcherPos {
+ /// The index into `TtParser::locs`, which represents the "dot".
+ idx: usize,
+
+ /// The matches made against metavar decls so far. On a successful match, this vector ends up
+ /// with one element per metavar decl in the matcher. Each element records token trees matched
+ /// against the relevant metavar by the black box parser. An element will be a `MatchedSeq` if
+ /// the corresponding metavar decl is within a sequence.
+ ///
+ /// It is critical to performance that this is an `Lrc`, because it gets cloned frequently when
+ /// processing sequences. Mostly for sequence-ending possibilities that must be tried but end
+ /// up failing.
+ matches: Lrc<Vec<NamedMatch>>,
+}
+
+// This type is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(MatcherPos, 16);
+
+impl MatcherPos {
+ /// Adds `m` as a named match for the `metavar_idx`-th metavar. There are only two call sites,
+ /// and both are hot enough to be always worth inlining.
+ #[inline(always)]
+ fn push_match(&mut self, metavar_idx: usize, seq_depth: usize, m: NamedMatch) {
+ let matches = Lrc::make_mut(&mut self.matches);
+ match seq_depth {
+ 0 => {
+ // We are not within a sequence. Just append `m`.
+ assert_eq!(metavar_idx, matches.len());
+ matches.push(m);
+ }
+ _ => {
+ // We are within a sequence. Find the final `MatchedSeq` at the appropriate depth
+ // and append `m` to its vector.
+ let mut curr = &mut matches[metavar_idx];
+ for _ in 0..seq_depth - 1 {
+ match curr {
+ MatchedSeq(seq) => curr = seq.last_mut().unwrap(),
+ _ => unreachable!(),
+ }
+ }
+ match curr {
+ MatchedSeq(seq) => seq.push(m),
+ _ => unreachable!(),
+ }
+ }
+ }
+ }
+}
+
+enum EofMatcherPositions {
+ None,
+ One(MatcherPos),
+ Multiple,
+}
+
+/// Represents the possible results of an attempted parse.
+pub(crate) enum ParseResult<T> {
+ /// Parsed successfully.
+ Success(T),
+ /// Arm failed to match. If the second parameter is `token::Eof`, it indicates an unexpected
+ /// end of macro invocation. Otherwise, it indicates that no rules expected the given token.
+ Failure(Token, &'static str),
+ /// Fatal error (malformed macro?). Abort compilation.
+ Error(rustc_span::Span, String),
+ ErrorReported,
+}
+
+/// A `ParseResult` where the `Success` variant contains a mapping of
+/// `MacroRulesNormalizedIdent`s to `NamedMatch`es. This represents the mapping
+/// of metavars to the token trees they bind to.
+pub(crate) type NamedParseResult = ParseResult<FxHashMap<MacroRulesNormalizedIdent, NamedMatch>>;
+
+/// Count how many metavars declarations are in `matcher`.
+pub(super) fn count_metavar_decls(matcher: &[TokenTree]) -> usize {
+ matcher
+ .iter()
+ .map(|tt| match tt {
+ TokenTree::MetaVarDecl(..) => 1,
+ TokenTree::Sequence(_, seq) => seq.num_captures,
+ TokenTree::Delimited(_, delim) => count_metavar_decls(&delim.tts),
+ TokenTree::Token(..) => 0,
+ TokenTree::MetaVar(..) | TokenTree::MetaVarExpr(..) => unreachable!(),
+ })
+ .sum()
+}
+
+/// `NamedMatch` is a pattern-match result for a single metavar. All
+/// `MatchedNonterminal`s in the `NamedMatch` have the same non-terminal type
+/// (expr, item, etc).
+///
+/// The in-memory structure of a particular `NamedMatch` represents the match
+/// that occurred when a particular subset of a matcher was applied to a
+/// particular token tree.
+///
+/// The width of each `MatchedSeq` in the `NamedMatch`, and the identity of
+/// the `MatchedNtNonTts`s, will depend on the token tree it was applied
+/// to: each `MatchedSeq` corresponds to a single repetition in the originating
+/// token tree. The depth of the `NamedMatch` structure will therefore depend
+/// only on the nesting depth of repetitions in the originating token tree it
+/// was derived from.
+///
+/// In layperson's terms: `NamedMatch` will form a tree representing nested matches of a particular
+/// meta variable. For example, if we are matching the following macro against the following
+/// invocation...
+///
+/// ```rust
+/// macro_rules! foo {
+/// ($($($x:ident),+);+) => {}
+/// }
+///
+/// foo!(a, b, c, d; a, b, c, d, e);
+/// ```
+///
+/// Then, the tree will have the following shape:
+///
+/// ```ignore (private-internal)
+/// # use NamedMatch::*;
+/// MatchedSeq([
+/// MatchedSeq([
+/// MatchedNonterminal(a),
+/// MatchedNonterminal(b),
+/// MatchedNonterminal(c),
+/// MatchedNonterminal(d),
+/// ]),
+/// MatchedSeq([
+/// MatchedNonterminal(a),
+/// MatchedNonterminal(b),
+/// MatchedNonterminal(c),
+/// MatchedNonterminal(d),
+/// MatchedNonterminal(e),
+/// ])
+/// ])
+/// ```
+#[derive(Debug, Clone)]
+pub(crate) enum NamedMatch {
+ MatchedSeq(Vec<NamedMatch>),
+
+ // A metavar match of type `tt`.
+ MatchedTokenTree(rustc_ast::tokenstream::TokenTree),
+
+ // A metavar match of any type other than `tt`.
+ MatchedNonterminal(Lrc<Nonterminal>),
+}
+
+/// Performs a token equality check, ignoring syntax context (that is, an unhygienic comparison)
+fn token_name_eq(t1: &Token, t2: &Token) -> bool {
+ if let (Some((ident1, is_raw1)), Some((ident2, is_raw2))) = (t1.ident(), t2.ident()) {
+ ident1.name == ident2.name && is_raw1 == is_raw2
+ } else if let (Some(ident1), Some(ident2)) = (t1.lifetime(), t2.lifetime()) {
+ ident1.name == ident2.name
+ } else {
+ t1.kind == t2.kind
+ }
+}
+
+// Note: the vectors could be created and dropped within `parse_tt`, but to avoid excess
+// allocations we have a single vector for each kind that is cleared and reused repeatedly.
+pub struct TtParser {
+ macro_name: Ident,
+
+ /// The set of current mps to be processed. This should be empty by the end of a successful
+ /// execution of `parse_tt_inner`.
+ cur_mps: Vec<MatcherPos>,
+
+ /// The set of newly generated mps. These are used to replenish `cur_mps` in the function
+ /// `parse_tt`.
+ next_mps: Vec<MatcherPos>,
+
+ /// The set of mps that are waiting for the black-box parser.
+ bb_mps: Vec<MatcherPos>,
+
+ /// Pre-allocate an empty match array, so it can be cloned cheaply for macros with many rules
+ /// that have no metavars.
+ empty_matches: Lrc<Vec<NamedMatch>>,
+}
+
+impl TtParser {
+ pub(super) fn new(macro_name: Ident) -> TtParser {
+ TtParser {
+ macro_name,
+ cur_mps: vec![],
+ next_mps: vec![],
+ bb_mps: vec![],
+ empty_matches: Lrc::new(vec![]),
+ }
+ }
+
+ /// Process the matcher positions of `cur_mps` until it is empty. In the process, this will
+ /// produce more mps in `next_mps` and `bb_mps`.
+ ///
+ /// # Returns
+ ///
+ /// `Some(result)` if everything is finished, `None` otherwise. Note that matches are kept
+ /// track of through the mps generated.
+ fn parse_tt_inner(
+ &mut self,
+ matcher: &[MatcherLoc],
+ token: &Token,
+ ) -> Option<NamedParseResult> {
+ // Matcher positions that would be valid if the macro invocation was over now. Only
+ // modified if `token == Eof`.
+ let mut eof_mps = EofMatcherPositions::None;
+
+ while let Some(mut mp) = self.cur_mps.pop() {
+ match &matcher[mp.idx] {
+ MatcherLoc::Token { token: t } => {
+ // If it's a doc comment, we just ignore it and move on to the next tt in the
+ // matcher. This is a bug, but #95267 showed that existing programs rely on
+ // this behaviour, and changing it would require some care and a transition
+ // period.
+ //
+ // If the token matches, we can just advance the parser.
+ //
+ // Otherwise, this match has failed, there is nothing to do, and hopefully
+ // another mp in `cur_mps` will match.
+ if matches!(t, Token { kind: DocComment(..), .. }) {
+ mp.idx += 1;
+ self.cur_mps.push(mp);
+ } else if token_name_eq(&t, token) {
+ mp.idx += 1;
+ self.next_mps.push(mp);
+ }
+ }
+ MatcherLoc::Delimited => {
+ // Entering the delimeter is trivial.
+ mp.idx += 1;
+ self.cur_mps.push(mp);
+ }
+ &MatcherLoc::Sequence {
+ op,
+ num_metavar_decls,
+ idx_first_after,
+ next_metavar,
+ seq_depth,
+ } => {
+ // Install an empty vec for each metavar within the sequence.
+ for metavar_idx in next_metavar..next_metavar + num_metavar_decls {
+ mp.push_match(metavar_idx, seq_depth, MatchedSeq(vec![]));
+ }
+
+ if op == KleeneOp::ZeroOrMore || op == KleeneOp::ZeroOrOne {
+ // Try zero matches of this sequence, by skipping over it.
+ self.cur_mps.push(MatcherPos {
+ idx: idx_first_after,
+ matches: mp.matches.clone(), // a cheap clone
+ });
+ }
+
+ // Try one or more matches of this sequence, by entering it.
+ mp.idx += 1;
+ self.cur_mps.push(mp);
+ }
+ &MatcherLoc::SequenceKleeneOpNoSep { op, idx_first } => {
+ // We are past the end of a sequence with no separator. Try ending the
+ // sequence. If that's not possible, `ending_mp` will fail quietly when it is
+ // processed next time around the loop.
+ let ending_mp = MatcherPos {
+ idx: mp.idx + 1, // +1 skips the Kleene op
+ matches: mp.matches.clone(), // a cheap clone
+ };
+ self.cur_mps.push(ending_mp);
+
+ if op != KleeneOp::ZeroOrOne {
+ // Try another repetition.
+ mp.idx = idx_first;
+ self.cur_mps.push(mp);
+ }
+ }
+ MatcherLoc::SequenceSep { separator } => {
+ // We are past the end of a sequence with a separator but we haven't seen the
+ // separator yet. Try ending the sequence. If that's not possible, `ending_mp`
+ // will fail quietly when it is processed next time around the loop.
+ let ending_mp = MatcherPos {
+ idx: mp.idx + 2, // +2 skips the separator and the Kleene op
+ matches: mp.matches.clone(), // a cheap clone
+ };
+ self.cur_mps.push(ending_mp);
+
+ if token_name_eq(token, separator) {
+ // The separator matches the current token. Advance past it.
+ mp.idx += 1;
+ self.next_mps.push(mp);
+ }
+ }
+ &MatcherLoc::SequenceKleeneOpAfterSep { idx_first } => {
+ // We are past the sequence separator. This can't be a `?` Kleene op, because
+ // they don't permit separators. Try another repetition.
+ mp.idx = idx_first;
+ self.cur_mps.push(mp);
+ }
+ &MatcherLoc::MetaVarDecl { span, kind, .. } => {
+ // Built-in nonterminals never start with these tokens, so we can eliminate
+ // them from consideration. We use the span of the metavariable declaration
+ // to determine any edition-specific matching behavior for non-terminals.
+ if let Some(kind) = kind {
+ if Parser::nonterminal_may_begin_with(kind, token) {
+ self.bb_mps.push(mp);
+ }
+ } else {
+ // E.g. `$e` instead of `$e:expr`, reported as a hard error if actually used.
+ // Both this check and the one in `nameize` are necessary, surprisingly.
+ return Some(Error(span, "missing fragment specifier".to_string()));
+ }
+ }
+ MatcherLoc::Eof => {
+ // We are past the matcher's end, and not in a sequence. Try to end things.
+ debug_assert_eq!(mp.idx, matcher.len() - 1);
+ if *token == token::Eof {
+ eof_mps = match eof_mps {
+ EofMatcherPositions::None => EofMatcherPositions::One(mp),
+ EofMatcherPositions::One(_) | EofMatcherPositions::Multiple => {
+ EofMatcherPositions::Multiple
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // If we reached the end of input, check that there is EXACTLY ONE possible matcher.
+ // Otherwise, either the parse is ambiguous (which is an error) or there is a syntax error.
+ if *token == token::Eof {
+ Some(match eof_mps {
+ EofMatcherPositions::One(mut eof_mp) => {
+ // Need to take ownership of the matches from within the `Lrc`.
+ Lrc::make_mut(&mut eof_mp.matches);
+ let matches = Lrc::try_unwrap(eof_mp.matches).unwrap().into_iter();
+ self.nameize(matcher, matches)
+ }
+ EofMatcherPositions::Multiple => {
+ Error(token.span, "ambiguity: multiple successful parses".to_string())
+ }
+ EofMatcherPositions::None => Failure(
+ Token::new(
+ token::Eof,
+ if token.span.is_dummy() { token.span } else { token.span.shrink_to_hi() },
+ ),
+ "missing tokens in macro arguments",
+ ),
+ })
+ } else {
+ None
+ }
+ }
+
+ /// Match the token stream from `parser` against `matcher`.
+ pub(super) fn parse_tt(
+ &mut self,
+ parser: &mut Cow<'_, Parser<'_>>,
+ matcher: &[MatcherLoc],
+ ) -> NamedParseResult {
+ // A queue of possible matcher positions. We initialize it with the matcher position in
+ // which the "dot" is before the first token of the first token tree in `matcher`.
+ // `parse_tt_inner` then processes all of these possible matcher positions and produces
+ // possible next positions into `next_mps`. After some post-processing, the contents of
+ // `next_mps` replenish `cur_mps` and we start over again.
+ self.cur_mps.clear();
+ self.cur_mps.push(MatcherPos { idx: 0, matches: self.empty_matches.clone() });
+
+ loop {
+ self.next_mps.clear();
+ self.bb_mps.clear();
+
+ // Process `cur_mps` until either we have finished the input or we need to get some
+ // parsing from the black-box parser done.
+ if let Some(res) = self.parse_tt_inner(matcher, &parser.token) {
+ return res;
+ }
+
+ // `parse_tt_inner` handled all of `cur_mps`, so it's empty.
+ assert!(self.cur_mps.is_empty());
+
+ // Error messages here could be improved with links to original rules.
+ match (self.next_mps.len(), self.bb_mps.len()) {
+ (0, 0) => {
+ // There are no possible next positions AND we aren't waiting for the black-box
+ // parser: syntax error.
+ return Failure(
+ parser.token.clone(),
+ "no rules expected this token in macro call",
+ );
+ }
+
+ (_, 0) => {
+ // Dump all possible `next_mps` into `cur_mps` for the next iteration. Then
+ // process the next token.
+ self.cur_mps.append(&mut self.next_mps);
+ parser.to_mut().bump();
+ }
+
+ (0, 1) => {
+ // We need to call the black-box parser to get some nonterminal.
+ let mut mp = self.bb_mps.pop().unwrap();
+ let loc = &matcher[mp.idx];
+ if let &MatcherLoc::MetaVarDecl {
+ span,
+ kind: Some(kind),
+ next_metavar,
+ seq_depth,
+ ..
+ } = loc
+ {
+ // We use the span of the metavariable declaration to determine any
+ // edition-specific matching behavior for non-terminals.
+ let nt = match parser.to_mut().parse_nonterminal(kind) {
+ Err(mut err) => {
+ err.span_label(
+ span,
+ format!(
+ "while parsing argument for this `{kind}` macro fragment"
+ ),
+ )
+ .emit();
+ return ErrorReported;
+ }
+ Ok(nt) => nt,
+ };
+ let m = match nt {
+ NtOrTt::Nt(nt) => MatchedNonterminal(Lrc::new(nt)),
+ NtOrTt::Tt(tt) => MatchedTokenTree(tt),
+ };
+ mp.push_match(next_metavar, seq_depth, m);
+ mp.idx += 1;
+ } else {
+ unreachable!()
+ }
+ self.cur_mps.push(mp);
+ }
+
+ (_, _) => {
+ // Too many possibilities!
+ return self.ambiguity_error(matcher, parser.token.span);
+ }
+ }
+
+ assert!(!self.cur_mps.is_empty());
+ }
+ }
+
+ fn ambiguity_error(
+ &self,
+ matcher: &[MatcherLoc],
+ token_span: rustc_span::Span,
+ ) -> NamedParseResult {
+ let nts = self
+ .bb_mps
+ .iter()
+ .map(|mp| match &matcher[mp.idx] {
+ MatcherLoc::MetaVarDecl { bind, kind: Some(kind), .. } => {
+ format!("{} ('{}')", kind, bind)
+ }
+ _ => unreachable!(),
+ })
+ .collect::<Vec<String>>()
+ .join(" or ");
+
+ Error(
+ token_span,
+ format!(
+ "local ambiguity when calling macro `{}`: multiple parsing options: {}",
+ self.macro_name,
+ match self.next_mps.len() {
+ 0 => format!("built-in NTs {}.", nts),
+ n => format!("built-in NTs {} or {n} other option{s}.", nts, s = pluralize!(n)),
+ }
+ ),
+ )
+ }
+
+ fn nameize<I: Iterator<Item = NamedMatch>>(
+ &self,
+ matcher: &[MatcherLoc],
+ mut res: I,
+ ) -> NamedParseResult {
+ // Make that each metavar has _exactly one_ binding. If so, insert the binding into the
+ // `NamedParseResult`. Otherwise, it's an error.
+ let mut ret_val = FxHashMap::default();
+ for loc in matcher {
+ if let &MatcherLoc::MetaVarDecl { span, bind, kind, .. } = loc {
+ if kind.is_some() {
+ match ret_val.entry(MacroRulesNormalizedIdent::new(bind)) {
+ Vacant(spot) => spot.insert(res.next().unwrap()),
+ Occupied(..) => {
+ return Error(span, format!("duplicated bind name: {}", bind));
+ }
+ };
+ } else {
+ // E.g. `$e` instead of `$e:expr`, reported as a hard error if actually used.
+ // Both this check and the one in `parse_tt_inner` are necessary, surprisingly.
+ return Error(span, "missing fragment specifier".to_string());
+ }
+ }
+ }
+ Success(ret_val)
+ }
+}
diff --git a/compiler/rustc_expand/src/mbe/macro_rules.rs b/compiler/rustc_expand/src/mbe/macro_rules.rs
new file mode 100644
index 000000000..f7e1575af
--- /dev/null
+++ b/compiler/rustc_expand/src/mbe/macro_rules.rs
@@ -0,0 +1,1420 @@
+use crate::base::{DummyResult, ExtCtxt, MacResult, TTMacroExpander};
+use crate::base::{SyntaxExtension, SyntaxExtensionKind};
+use crate::expand::{ensure_complete_parse, parse_ast_fragment, AstFragment, AstFragmentKind};
+use crate::mbe;
+use crate::mbe::macro_check;
+use crate::mbe::macro_parser::{Error, ErrorReported, Failure, Success, TtParser};
+use crate::mbe::macro_parser::{MatchedSeq, MatchedTokenTree, MatcherLoc};
+use crate::mbe::transcribe::transcribe;
+
+use rustc_ast as ast;
+use rustc_ast::token::{self, Delimiter, NonterminalKind, Token, TokenKind, TokenKind::*};
+use rustc_ast::tokenstream::{DelimSpan, TokenStream};
+use rustc_ast::{NodeId, DUMMY_NODE_ID};
+use rustc_ast_pretty::pprust;
+use rustc_attr::{self as attr, TransparencyError};
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_feature::Features;
+use rustc_lint_defs::builtin::{
+ RUST_2021_INCOMPATIBLE_OR_PATTERNS, SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
+};
+use rustc_lint_defs::BuiltinLintDiagnostics;
+use rustc_parse::parser::Parser;
+use rustc_session::parse::ParseSess;
+use rustc_session::Session;
+use rustc_span::edition::Edition;
+use rustc_span::hygiene::Transparency;
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::{kw, sym, Ident, MacroRulesNormalizedIdent};
+use rustc_span::Span;
+
+use std::borrow::Cow;
+use std::collections::hash_map::Entry;
+use std::{mem, slice};
+use tracing::debug;
+
+pub(crate) struct ParserAnyMacro<'a> {
+ parser: Parser<'a>,
+
+ /// Span of the expansion site of the macro this parser is for
+ site_span: Span,
+ /// The ident of the macro we're parsing
+ macro_ident: Ident,
+ lint_node_id: NodeId,
+ is_trailing_mac: bool,
+ arm_span: Span,
+ /// Whether or not this macro is defined in the current crate
+ is_local: bool,
+}
+
+pub(crate) fn annotate_err_with_kind(err: &mut Diagnostic, kind: AstFragmentKind, span: Span) {
+ match kind {
+ AstFragmentKind::Ty => {
+ err.span_label(span, "this macro call doesn't expand to a type");
+ }
+ AstFragmentKind::Pat => {
+ err.span_label(span, "this macro call doesn't expand to a pattern");
+ }
+ _ => {}
+ };
+}
+
+fn emit_frag_parse_err(
+ mut e: DiagnosticBuilder<'_, rustc_errors::ErrorGuaranteed>,
+ parser: &Parser<'_>,
+ orig_parser: &mut Parser<'_>,
+ site_span: Span,
+ arm_span: Span,
+ kind: AstFragmentKind,
+) {
+ // FIXME(davidtwco): avoid depending on the error message text
+ if parser.token == token::Eof && e.message[0].0.expect_str().ends_with(", found `<eof>`") {
+ if !e.span.is_dummy() {
+ // early end of macro arm (#52866)
+ e.replace_span_with(parser.sess.source_map().next_point(parser.token.span));
+ }
+ let msg = &e.message[0];
+ e.message[0] = (
+ rustc_errors::DiagnosticMessage::Str(format!(
+ "macro expansion ends with an incomplete expression: {}",
+ msg.0.expect_str().replace(", found `<eof>`", ""),
+ )),
+ msg.1,
+ );
+ }
+ if e.span.is_dummy() {
+ // Get around lack of span in error (#30128)
+ e.replace_span_with(site_span);
+ if !parser.sess.source_map().is_imported(arm_span) {
+ e.span_label(arm_span, "in this macro arm");
+ }
+ } else if parser.sess.source_map().is_imported(parser.token.span) {
+ e.span_label(site_span, "in this macro invocation");
+ }
+ match kind {
+ // Try a statement if an expression is wanted but failed and suggest adding `;` to call.
+ AstFragmentKind::Expr => match parse_ast_fragment(orig_parser, AstFragmentKind::Stmts) {
+ Err(err) => err.cancel(),
+ Ok(_) => {
+ e.note(
+ "the macro call doesn't expand to an expression, but it can expand to a statement",
+ );
+ e.span_suggestion_verbose(
+ site_span.shrink_to_hi(),
+ "add `;` to interpret the expansion as a statement",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ },
+ _ => annotate_err_with_kind(&mut e, kind, site_span),
+ };
+ e.emit();
+}
+
+impl<'a> ParserAnyMacro<'a> {
+ pub(crate) fn make(mut self: Box<ParserAnyMacro<'a>>, kind: AstFragmentKind) -> AstFragment {
+ let ParserAnyMacro {
+ site_span,
+ macro_ident,
+ ref mut parser,
+ lint_node_id,
+ arm_span,
+ is_trailing_mac,
+ is_local,
+ } = *self;
+ let snapshot = &mut parser.create_snapshot_for_diagnostic();
+ let fragment = match parse_ast_fragment(parser, kind) {
+ Ok(f) => f,
+ Err(err) => {
+ emit_frag_parse_err(err, parser, snapshot, site_span, arm_span, kind);
+ return kind.dummy(site_span);
+ }
+ };
+
+ // We allow semicolons at the end of expressions -- e.g., the semicolon in
+ // `macro_rules! m { () => { panic!(); } }` isn't parsed by `.parse_expr()`,
+ // but `m!()` is allowed in expression positions (cf. issue #34706).
+ if kind == AstFragmentKind::Expr && parser.token == token::Semi {
+ if is_local {
+ parser.sess.buffer_lint_with_diagnostic(
+ SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
+ parser.token.span,
+ lint_node_id,
+ "trailing semicolon in macro used in expression position",
+ BuiltinLintDiagnostics::TrailingMacro(is_trailing_mac, macro_ident),
+ );
+ }
+ parser.bump();
+ }
+
+ // Make sure we don't have any tokens left to parse so we don't silently drop anything.
+ let path = ast::Path::from_ident(macro_ident.with_span_pos(site_span));
+ ensure_complete_parse(parser, &path, kind.name(), site_span);
+ fragment
+ }
+}
+
+struct MacroRulesMacroExpander {
+ node_id: NodeId,
+ name: Ident,
+ span: Span,
+ transparency: Transparency,
+ lhses: Vec<Vec<MatcherLoc>>,
+ rhses: Vec<mbe::TokenTree>,
+ valid: bool,
+}
+
+impl TTMacroExpander for MacroRulesMacroExpander {
+ fn expand<'cx>(
+ &self,
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ input: TokenStream,
+ ) -> Box<dyn MacResult + 'cx> {
+ if !self.valid {
+ return DummyResult::any(sp);
+ }
+ expand_macro(
+ cx,
+ sp,
+ self.span,
+ self.node_id,
+ self.name,
+ self.transparency,
+ input,
+ &self.lhses,
+ &self.rhses,
+ )
+ }
+}
+
+fn macro_rules_dummy_expander<'cx>(
+ _: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ _: TokenStream,
+) -> Box<dyn MacResult + 'cx> {
+ DummyResult::any(span)
+}
+
+fn trace_macros_note(cx_expansions: &mut FxIndexMap<Span, Vec<String>>, sp: Span, message: String) {
+ let sp = sp.macro_backtrace().last().map_or(sp, |trace| trace.call_site);
+ cx_expansions.entry(sp).or_default().push(message);
+}
+
+/// Expands the rules based macro defined by `lhses` and `rhses` for a given
+/// input `arg`.
+fn expand_macro<'cx>(
+ cx: &'cx mut ExtCtxt<'_>,
+ sp: Span,
+ def_span: Span,
+ node_id: NodeId,
+ name: Ident,
+ transparency: Transparency,
+ arg: TokenStream,
+ lhses: &[Vec<MatcherLoc>],
+ rhses: &[mbe::TokenTree],
+) -> Box<dyn MacResult + 'cx> {
+ let sess = &cx.sess.parse_sess;
+ // Macros defined in the current crate have a real node id,
+ // whereas macros from an external crate have a dummy id.
+ let is_local = node_id != DUMMY_NODE_ID;
+
+ if cx.trace_macros() {
+ let msg = format!("expanding `{}! {{ {} }}`", name, pprust::tts_to_string(&arg));
+ trace_macros_note(&mut cx.expansions, sp, msg);
+ }
+
+ // Which arm's failure should we report? (the one furthest along)
+ let mut best_failure: Option<(Token, &str)> = None;
+
+ // We create a base parser that can be used for the "black box" parts.
+ // Every iteration needs a fresh copy of that parser. However, the parser
+ // is not mutated on many of the iterations, particularly when dealing with
+ // macros like this:
+ //
+ // macro_rules! foo {
+ // ("a") => (A);
+ // ("b") => (B);
+ // ("c") => (C);
+ // // ... etc. (maybe hundreds more)
+ // }
+ //
+ // as seen in the `html5ever` benchmark. We use a `Cow` so that the base
+ // parser is only cloned when necessary (upon mutation). Furthermore, we
+ // reinitialize the `Cow` with the base parser at the start of every
+ // iteration, so that any mutated parsers are not reused. This is all quite
+ // hacky, but speeds up the `html5ever` benchmark significantly. (Issue
+ // 68836 suggests a more comprehensive but more complex change to deal with
+ // this situation.)
+ let parser = parser_from_cx(sess, arg.clone());
+
+ // Try each arm's matchers.
+ let mut tt_parser = TtParser::new(name);
+ for (i, lhs) in lhses.iter().enumerate() {
+ // Take a snapshot of the state of pre-expansion gating at this point.
+ // This is used so that if a matcher is not `Success(..)`ful,
+ // then the spans which became gated when parsing the unsuccessful matcher
+ // are not recorded. On the first `Success(..)`ful matcher, the spans are merged.
+ let mut gated_spans_snapshot = mem::take(&mut *sess.gated_spans.spans.borrow_mut());
+
+ match tt_parser.parse_tt(&mut Cow::Borrowed(&parser), lhs) {
+ Success(named_matches) => {
+ // The matcher was `Success(..)`ful.
+ // Merge the gated spans from parsing the matcher with the pre-existing ones.
+ sess.gated_spans.merge(gated_spans_snapshot);
+
+ let (rhs, rhs_span): (&mbe::Delimited, DelimSpan) = match &rhses[i] {
+ mbe::TokenTree::Delimited(span, delimited) => (&delimited, *span),
+ _ => cx.span_bug(sp, "malformed macro rhs"),
+ };
+ let arm_span = rhses[i].span();
+
+ let rhs_spans = rhs.tts.iter().map(|t| t.span()).collect::<Vec<_>>();
+ // rhs has holes ( `$id` and `$(...)` that need filled)
+ let mut tts = match transcribe(cx, &named_matches, &rhs, rhs_span, transparency) {
+ Ok(tts) => tts,
+ Err(mut err) => {
+ err.emit();
+ return DummyResult::any(arm_span);
+ }
+ };
+
+ // Replace all the tokens for the corresponding positions in the macro, to maintain
+ // proper positions in error reporting, while maintaining the macro_backtrace.
+ if rhs_spans.len() == tts.len() {
+ tts = tts.map_enumerated(|i, tt| {
+ let mut tt = tt.clone();
+ let mut sp = rhs_spans[i];
+ sp = sp.with_ctxt(tt.span().ctxt());
+ tt.set_span(sp);
+ tt
+ });
+ }
+
+ if cx.trace_macros() {
+ let msg = format!("to `{}`", pprust::tts_to_string(&tts));
+ trace_macros_note(&mut cx.expansions, sp, msg);
+ }
+
+ let mut p = Parser::new(sess, tts, false, None);
+ p.last_type_ascription = cx.current_expansion.prior_type_ascription;
+
+ if is_local {
+ cx.resolver.record_macro_rule_usage(node_id, i);
+ }
+
+ // Let the context choose how to interpret the result.
+ // Weird, but useful for X-macros.
+ return Box::new(ParserAnyMacro {
+ parser: p,
+
+ // Pass along the original expansion site and the name of the macro
+ // so we can print a useful error message if the parse of the expanded
+ // macro leaves unparsed tokens.
+ site_span: sp,
+ macro_ident: name,
+ lint_node_id: cx.current_expansion.lint_node_id,
+ is_trailing_mac: cx.current_expansion.is_trailing_mac,
+ arm_span,
+ is_local,
+ });
+ }
+ Failure(token, msg) => match best_failure {
+ Some((ref best_token, _)) if best_token.span.lo() >= token.span.lo() => {}
+ _ => best_failure = Some((token, msg)),
+ },
+ Error(err_sp, ref msg) => {
+ let span = err_sp.substitute_dummy(sp);
+ cx.struct_span_err(span, &msg).emit();
+ return DummyResult::any(span);
+ }
+ ErrorReported => return DummyResult::any(sp),
+ }
+
+ // The matcher was not `Success(..)`ful.
+ // Restore to the state before snapshotting and maybe try again.
+ mem::swap(&mut gated_spans_snapshot, &mut sess.gated_spans.spans.borrow_mut());
+ }
+ drop(parser);
+
+ let (token, label) = best_failure.expect("ran no matchers");
+ let span = token.span.substitute_dummy(sp);
+ let mut err = cx.struct_span_err(span, &parse_failure_msg(&token));
+ err.span_label(span, label);
+ if !def_span.is_dummy() && !cx.source_map().is_imported(def_span) {
+ err.span_label(cx.source_map().guess_head_span(def_span), "when calling this macro");
+ }
+ annotate_doc_comment(&mut err, sess.source_map(), span);
+ // Check whether there's a missing comma in this macro call, like `println!("{}" a);`
+ if let Some((arg, comma_span)) = arg.add_comma() {
+ for lhs in lhses {
+ let parser = parser_from_cx(sess, arg.clone());
+ if let Success(_) = tt_parser.parse_tt(&mut Cow::Borrowed(&parser), lhs) {
+ if comma_span.is_dummy() {
+ err.note("you might be missing a comma");
+ } else {
+ err.span_suggestion_short(
+ comma_span,
+ "missing comma here",
+ ", ",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ err.emit();
+ cx.trace_macros_diag();
+ DummyResult::any(sp)
+}
+
+// Note that macro-by-example's input is also matched against a token tree:
+// $( $lhs:tt => $rhs:tt );+
+//
+// Holy self-referential!
+
+/// Converts a macro item into a syntax extension.
+pub fn compile_declarative_macro(
+ sess: &Session,
+ features: &Features,
+ def: &ast::Item,
+ edition: Edition,
+) -> (SyntaxExtension, Vec<(usize, Span)>) {
+ debug!("compile_declarative_macro: {:?}", def);
+ let mk_syn_ext = |expander| {
+ SyntaxExtension::new(
+ sess,
+ SyntaxExtensionKind::LegacyBang(expander),
+ def.span,
+ Vec::new(),
+ edition,
+ def.ident.name,
+ &def.attrs,
+ )
+ };
+ let dummy_syn_ext = || (mk_syn_ext(Box::new(macro_rules_dummy_expander)), Vec::new());
+
+ let diag = &sess.parse_sess.span_diagnostic;
+ let lhs_nm = Ident::new(sym::lhs, def.span);
+ let rhs_nm = Ident::new(sym::rhs, def.span);
+ let tt_spec = Some(NonterminalKind::TT);
+
+ // Parse the macro_rules! invocation
+ let (macro_rules, body) = match &def.kind {
+ ast::ItemKind::MacroDef(def) => (def.macro_rules, def.body.inner_tokens()),
+ _ => unreachable!(),
+ };
+
+ // The pattern that macro_rules matches.
+ // The grammar for macro_rules! is:
+ // $( $lhs:tt => $rhs:tt );+
+ // ...quasiquoting this would be nice.
+ // These spans won't matter, anyways
+ let argument_gram = vec![
+ mbe::TokenTree::Sequence(
+ DelimSpan::dummy(),
+ mbe::SequenceRepetition {
+ tts: vec![
+ mbe::TokenTree::MetaVarDecl(def.span, lhs_nm, tt_spec),
+ mbe::TokenTree::token(token::FatArrow, def.span),
+ mbe::TokenTree::MetaVarDecl(def.span, rhs_nm, tt_spec),
+ ],
+ separator: Some(Token::new(
+ if macro_rules { token::Semi } else { token::Comma },
+ def.span,
+ )),
+ kleene: mbe::KleeneToken::new(mbe::KleeneOp::OneOrMore, def.span),
+ num_captures: 2,
+ },
+ ),
+ // to phase into semicolon-termination instead of semicolon-separation
+ mbe::TokenTree::Sequence(
+ DelimSpan::dummy(),
+ mbe::SequenceRepetition {
+ tts: vec![mbe::TokenTree::token(
+ if macro_rules { token::Semi } else { token::Comma },
+ def.span,
+ )],
+ separator: None,
+ kleene: mbe::KleeneToken::new(mbe::KleeneOp::ZeroOrMore, def.span),
+ num_captures: 0,
+ },
+ ),
+ ];
+ // Convert it into `MatcherLoc` form.
+ let argument_gram = mbe::macro_parser::compute_locs(&argument_gram);
+
+ let parser = Parser::new(&sess.parse_sess, body, true, rustc_parse::MACRO_ARGUMENTS);
+ let mut tt_parser =
+ TtParser::new(Ident::with_dummy_span(if macro_rules { kw::MacroRules } else { kw::Macro }));
+ let argument_map = match tt_parser.parse_tt(&mut Cow::Borrowed(&parser), &argument_gram) {
+ Success(m) => m,
+ Failure(token, msg) => {
+ let s = parse_failure_msg(&token);
+ let sp = token.span.substitute_dummy(def.span);
+ let mut err = sess.parse_sess.span_diagnostic.struct_span_err(sp, &s);
+ err.span_label(sp, msg);
+ annotate_doc_comment(&mut err, sess.source_map(), sp);
+ err.emit();
+ return dummy_syn_ext();
+ }
+ Error(sp, msg) => {
+ sess.parse_sess
+ .span_diagnostic
+ .struct_span_err(sp.substitute_dummy(def.span), &msg)
+ .emit();
+ return dummy_syn_ext();
+ }
+ ErrorReported => {
+ return dummy_syn_ext();
+ }
+ };
+
+ let mut valid = true;
+
+ // Extract the arguments:
+ let lhses = match argument_map[&MacroRulesNormalizedIdent::new(lhs_nm)] {
+ MatchedSeq(ref s) => s
+ .iter()
+ .map(|m| {
+ if let MatchedTokenTree(ref tt) = *m {
+ let tt = mbe::quoted::parse(
+ TokenStream::new(vec![tt.clone()]),
+ true,
+ &sess.parse_sess,
+ def.id,
+ features,
+ edition,
+ )
+ .pop()
+ .unwrap();
+ valid &= check_lhs_nt_follows(&sess.parse_sess, &def, &tt);
+ return tt;
+ }
+ sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
+ })
+ .collect::<Vec<mbe::TokenTree>>(),
+ _ => sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs"),
+ };
+
+ let rhses = match argument_map[&MacroRulesNormalizedIdent::new(rhs_nm)] {
+ MatchedSeq(ref s) => s
+ .iter()
+ .map(|m| {
+ if let MatchedTokenTree(ref tt) = *m {
+ return mbe::quoted::parse(
+ TokenStream::new(vec![tt.clone()]),
+ false,
+ &sess.parse_sess,
+ def.id,
+ features,
+ edition,
+ )
+ .pop()
+ .unwrap();
+ }
+ sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured lhs")
+ })
+ .collect::<Vec<mbe::TokenTree>>(),
+ _ => sess.parse_sess.span_diagnostic.span_bug(def.span, "wrong-structured rhs"),
+ };
+
+ for rhs in &rhses {
+ valid &= check_rhs(&sess.parse_sess, rhs);
+ }
+
+ // don't abort iteration early, so that errors for multiple lhses can be reported
+ for lhs in &lhses {
+ valid &= check_lhs_no_empty_seq(&sess.parse_sess, slice::from_ref(lhs));
+ }
+
+ valid &= macro_check::check_meta_variables(&sess.parse_sess, def.id, def.span, &lhses, &rhses);
+
+ let (transparency, transparency_error) = attr::find_transparency(&def.attrs, macro_rules);
+ match transparency_error {
+ Some(TransparencyError::UnknownTransparency(value, span)) => {
+ diag.span_err(span, &format!("unknown macro transparency: `{}`", value));
+ }
+ Some(TransparencyError::MultipleTransparencyAttrs(old_span, new_span)) => {
+ diag.span_err(vec![old_span, new_span], "multiple macro transparency attributes");
+ }
+ None => {}
+ }
+
+ // Compute the spans of the macro rules for unused rule linting.
+ // To avoid warning noise, only consider the rules of this
+ // macro for the lint, if all rules are valid.
+ // Also, we are only interested in non-foreign macros.
+ let rule_spans = if valid && def.id != DUMMY_NODE_ID {
+ lhses
+ .iter()
+ .zip(rhses.iter())
+ .enumerate()
+ // If the rhs contains an invocation like compile_error!,
+ // don't consider the rule for the unused rule lint.
+ .filter(|(_idx, (_lhs, rhs))| !has_compile_error_macro(rhs))
+ // We only take the span of the lhs here,
+ // so that the spans of created warnings are smaller.
+ .map(|(idx, (lhs, _rhs))| (idx, lhs.span()))
+ .collect::<Vec<_>>()
+ } else {
+ Vec::new()
+ };
+
+ // Convert the lhses into `MatcherLoc` form, which is better for doing the
+ // actual matching. Unless the matcher is invalid.
+ let lhses = if valid {
+ lhses
+ .iter()
+ .map(|lhs| {
+ // Ignore the delimiters around the matcher.
+ match lhs {
+ mbe::TokenTree::Delimited(_, delimited) => {
+ mbe::macro_parser::compute_locs(&delimited.tts)
+ }
+ _ => sess.parse_sess.span_diagnostic.span_bug(def.span, "malformed macro lhs"),
+ }
+ })
+ .collect()
+ } else {
+ vec![]
+ };
+
+ let expander = Box::new(MacroRulesMacroExpander {
+ name: def.ident,
+ span: def.span,
+ node_id: def.id,
+ transparency,
+ lhses,
+ rhses,
+ valid,
+ });
+ (mk_syn_ext(expander), rule_spans)
+}
+
+#[derive(SessionSubdiagnostic)]
+enum ExplainDocComment {
+ #[label(expand::explain_doc_comment_inner)]
+ Inner {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(expand::explain_doc_comment_outer)]
+ Outer {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+fn annotate_doc_comment(
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ sm: &SourceMap,
+ span: Span,
+) {
+ if let Ok(src) = sm.span_to_snippet(span) {
+ if src.starts_with("///") || src.starts_with("/**") {
+ err.subdiagnostic(ExplainDocComment::Outer { span });
+ } else if src.starts_with("//!") || src.starts_with("/*!") {
+ err.subdiagnostic(ExplainDocComment::Inner { span });
+ }
+ }
+}
+
+fn check_lhs_nt_follows(sess: &ParseSess, def: &ast::Item, lhs: &mbe::TokenTree) -> bool {
+ // lhs is going to be like TokenTree::Delimited(...), where the
+ // entire lhs is those tts. Or, it can be a "bare sequence", not wrapped in parens.
+ if let mbe::TokenTree::Delimited(_, delimited) = lhs {
+ check_matcher(sess, def, &delimited.tts)
+ } else {
+ let msg = "invalid macro matcher; matchers must be contained in balanced delimiters";
+ sess.span_diagnostic.span_err(lhs.span(), msg);
+ false
+ }
+ // we don't abort on errors on rejection, the driver will do that for us
+ // after parsing/expansion. we can report every error in every macro this way.
+}
+
+/// Checks that the lhs contains no repetition which could match an empty token
+/// tree, because then the matcher would hang indefinitely.
+fn check_lhs_no_empty_seq(sess: &ParseSess, tts: &[mbe::TokenTree]) -> bool {
+ use mbe::TokenTree;
+ for tt in tts {
+ match *tt {
+ TokenTree::Token(..)
+ | TokenTree::MetaVar(..)
+ | TokenTree::MetaVarDecl(..)
+ | TokenTree::MetaVarExpr(..) => (),
+ TokenTree::Delimited(_, ref del) => {
+ if !check_lhs_no_empty_seq(sess, &del.tts) {
+ return false;
+ }
+ }
+ TokenTree::Sequence(span, ref seq) => {
+ if seq.separator.is_none()
+ && seq.tts.iter().all(|seq_tt| match *seq_tt {
+ TokenTree::MetaVarDecl(_, _, Some(NonterminalKind::Vis)) => true,
+ TokenTree::Sequence(_, ref sub_seq) => {
+ sub_seq.kleene.op == mbe::KleeneOp::ZeroOrMore
+ || sub_seq.kleene.op == mbe::KleeneOp::ZeroOrOne
+ }
+ _ => false,
+ })
+ {
+ let sp = span.entire();
+ sess.span_diagnostic.span_err(sp, "repetition matches empty token tree");
+ return false;
+ }
+ if !check_lhs_no_empty_seq(sess, &seq.tts) {
+ return false;
+ }
+ }
+ }
+ }
+
+ true
+}
+
+fn check_rhs(sess: &ParseSess, rhs: &mbe::TokenTree) -> bool {
+ match *rhs {
+ mbe::TokenTree::Delimited(..) => return true,
+ _ => {
+ sess.span_diagnostic.span_err(rhs.span(), "macro rhs must be delimited");
+ }
+ }
+ false
+}
+
+fn check_matcher(sess: &ParseSess, def: &ast::Item, matcher: &[mbe::TokenTree]) -> bool {
+ let first_sets = FirstSets::new(matcher);
+ let empty_suffix = TokenSet::empty();
+ let err = sess.span_diagnostic.err_count();
+ check_matcher_core(sess, def, &first_sets, matcher, &empty_suffix);
+ err == sess.span_diagnostic.err_count()
+}
+
+fn has_compile_error_macro(rhs: &mbe::TokenTree) -> bool {
+ match rhs {
+ mbe::TokenTree::Delimited(_sp, d) => {
+ let has_compile_error = d.tts.array_windows::<3>().any(|[ident, bang, args]| {
+ if let mbe::TokenTree::Token(ident) = ident &&
+ let TokenKind::Ident(ident, _) = ident.kind &&
+ ident == sym::compile_error &&
+ let mbe::TokenTree::Token(bang) = bang &&
+ let TokenKind::Not = bang.kind &&
+ let mbe::TokenTree::Delimited(_, del) = args &&
+ del.delim != Delimiter::Invisible
+ {
+ true
+ } else {
+ false
+ }
+ });
+ if has_compile_error { true } else { d.tts.iter().any(has_compile_error_macro) }
+ }
+ _ => false,
+ }
+}
+
+// `The FirstSets` for a matcher is a mapping from subsequences in the
+// matcher to the FIRST set for that subsequence.
+//
+// This mapping is partially precomputed via a backwards scan over the
+// token trees of the matcher, which provides a mapping from each
+// repetition sequence to its *first* set.
+//
+// (Hypothetically, sequences should be uniquely identifiable via their
+// spans, though perhaps that is false, e.g., for macro-generated macros
+// that do not try to inject artificial span information. My plan is
+// to try to catch such cases ahead of time and not include them in
+// the precomputed mapping.)
+struct FirstSets<'tt> {
+ // this maps each TokenTree::Sequence `$(tt ...) SEP OP` that is uniquely identified by its
+ // span in the original matcher to the First set for the inner sequence `tt ...`.
+ //
+ // If two sequences have the same span in a matcher, then map that
+ // span to None (invalidating the mapping here and forcing the code to
+ // use a slow path).
+ first: FxHashMap<Span, Option<TokenSet<'tt>>>,
+}
+
+impl<'tt> FirstSets<'tt> {
+ fn new(tts: &'tt [mbe::TokenTree]) -> FirstSets<'tt> {
+ use mbe::TokenTree;
+
+ let mut sets = FirstSets { first: FxHashMap::default() };
+ build_recur(&mut sets, tts);
+ return sets;
+
+ // walks backward over `tts`, returning the FIRST for `tts`
+ // and updating `sets` at the same time for all sequence
+ // substructure we find within `tts`.
+ fn build_recur<'tt>(sets: &mut FirstSets<'tt>, tts: &'tt [TokenTree]) -> TokenSet<'tt> {
+ let mut first = TokenSet::empty();
+ for tt in tts.iter().rev() {
+ match *tt {
+ TokenTree::Token(..)
+ | TokenTree::MetaVar(..)
+ | TokenTree::MetaVarDecl(..)
+ | TokenTree::MetaVarExpr(..) => {
+ first.replace_with(TtHandle::TtRef(tt));
+ }
+ TokenTree::Delimited(span, ref delimited) => {
+ build_recur(sets, &delimited.tts);
+ first.replace_with(TtHandle::from_token_kind(
+ token::OpenDelim(delimited.delim),
+ span.open,
+ ));
+ }
+ TokenTree::Sequence(sp, ref seq_rep) => {
+ let subfirst = build_recur(sets, &seq_rep.tts);
+
+ match sets.first.entry(sp.entire()) {
+ Entry::Vacant(vac) => {
+ vac.insert(Some(subfirst.clone()));
+ }
+ Entry::Occupied(mut occ) => {
+ // if there is already an entry, then a span must have collided.
+ // This should not happen with typical macro_rules macros,
+ // but syntax extensions need not maintain distinct spans,
+ // so distinct syntax trees can be assigned the same span.
+ // In such a case, the map cannot be trusted; so mark this
+ // entry as unusable.
+ occ.insert(None);
+ }
+ }
+
+ // If the sequence contents can be empty, then the first
+ // token could be the separator token itself.
+
+ if let (Some(sep), true) = (&seq_rep.separator, subfirst.maybe_empty) {
+ first.add_one_maybe(TtHandle::from_token(sep.clone()));
+ }
+
+ // Reverse scan: Sequence comes before `first`.
+ if subfirst.maybe_empty
+ || seq_rep.kleene.op == mbe::KleeneOp::ZeroOrMore
+ || seq_rep.kleene.op == mbe::KleeneOp::ZeroOrOne
+ {
+ // If sequence is potentially empty, then
+ // union them (preserving first emptiness).
+ first.add_all(&TokenSet { maybe_empty: true, ..subfirst });
+ } else {
+ // Otherwise, sequence guaranteed
+ // non-empty; replace first.
+ first = subfirst;
+ }
+ }
+ }
+ }
+
+ first
+ }
+ }
+
+ // walks forward over `tts` until all potential FIRST tokens are
+ // identified.
+ fn first(&self, tts: &'tt [mbe::TokenTree]) -> TokenSet<'tt> {
+ use mbe::TokenTree;
+
+ let mut first = TokenSet::empty();
+ for tt in tts.iter() {
+ assert!(first.maybe_empty);
+ match *tt {
+ TokenTree::Token(..)
+ | TokenTree::MetaVar(..)
+ | TokenTree::MetaVarDecl(..)
+ | TokenTree::MetaVarExpr(..) => {
+ first.add_one(TtHandle::TtRef(tt));
+ return first;
+ }
+ TokenTree::Delimited(span, ref delimited) => {
+ first.add_one(TtHandle::from_token_kind(
+ token::OpenDelim(delimited.delim),
+ span.open,
+ ));
+ return first;
+ }
+ TokenTree::Sequence(sp, ref seq_rep) => {
+ let subfirst_owned;
+ let subfirst = match self.first.get(&sp.entire()) {
+ Some(&Some(ref subfirst)) => subfirst,
+ Some(&None) => {
+ subfirst_owned = self.first(&seq_rep.tts);
+ &subfirst_owned
+ }
+ None => {
+ panic!("We missed a sequence during FirstSets construction");
+ }
+ };
+
+ // If the sequence contents can be empty, then the first
+ // token could be the separator token itself.
+ if let (Some(sep), true) = (&seq_rep.separator, subfirst.maybe_empty) {
+ first.add_one_maybe(TtHandle::from_token(sep.clone()));
+ }
+
+ assert!(first.maybe_empty);
+ first.add_all(subfirst);
+ if subfirst.maybe_empty
+ || seq_rep.kleene.op == mbe::KleeneOp::ZeroOrMore
+ || seq_rep.kleene.op == mbe::KleeneOp::ZeroOrOne
+ {
+ // Continue scanning for more first
+ // tokens, but also make sure we
+ // restore empty-tracking state.
+ first.maybe_empty = true;
+ continue;
+ } else {
+ return first;
+ }
+ }
+ }
+ }
+
+ // we only exit the loop if `tts` was empty or if every
+ // element of `tts` matches the empty sequence.
+ assert!(first.maybe_empty);
+ first
+ }
+}
+
+// Most `mbe::TokenTree`s are pre-existing in the matcher, but some are defined
+// implicitly, such as opening/closing delimiters and sequence repetition ops.
+// This type encapsulates both kinds. It implements `Clone` while avoiding the
+// need for `mbe::TokenTree` to implement `Clone`.
+#[derive(Debug)]
+enum TtHandle<'tt> {
+ /// This is used in most cases.
+ TtRef(&'tt mbe::TokenTree),
+
+ /// This is only used for implicit token trees. The `mbe::TokenTree` *must*
+ /// be `mbe::TokenTree::Token`. No other variants are allowed. We store an
+ /// `mbe::TokenTree` rather than a `Token` so that `get()` can return a
+ /// `&mbe::TokenTree`.
+ Token(mbe::TokenTree),
+}
+
+impl<'tt> TtHandle<'tt> {
+ fn from_token(tok: Token) -> Self {
+ TtHandle::Token(mbe::TokenTree::Token(tok))
+ }
+
+ fn from_token_kind(kind: TokenKind, span: Span) -> Self {
+ TtHandle::from_token(Token::new(kind, span))
+ }
+
+ // Get a reference to a token tree.
+ fn get(&'tt self) -> &'tt mbe::TokenTree {
+ match self {
+ TtHandle::TtRef(tt) => tt,
+ TtHandle::Token(token_tt) => &token_tt,
+ }
+ }
+}
+
+impl<'tt> PartialEq for TtHandle<'tt> {
+ fn eq(&self, other: &TtHandle<'tt>) -> bool {
+ self.get() == other.get()
+ }
+}
+
+impl<'tt> Clone for TtHandle<'tt> {
+ fn clone(&self) -> Self {
+ match self {
+ TtHandle::TtRef(tt) => TtHandle::TtRef(tt),
+
+ // This variant *must* contain a `mbe::TokenTree::Token`, and not
+ // any other variant of `mbe::TokenTree`.
+ TtHandle::Token(mbe::TokenTree::Token(tok)) => {
+ TtHandle::Token(mbe::TokenTree::Token(tok.clone()))
+ }
+
+ _ => unreachable!(),
+ }
+ }
+}
+
+// A set of `mbe::TokenTree`s, which may include `TokenTree::Match`s
+// (for macro-by-example syntactic variables). It also carries the
+// `maybe_empty` flag; that is true if and only if the matcher can
+// match an empty token sequence.
+//
+// The First set is computed on submatchers like `$($a:expr b),* $(c)* d`,
+// which has corresponding FIRST = {$a:expr, c, d}.
+// Likewise, `$($a:expr b),* $(c)+ d` has FIRST = {$a:expr, c}.
+//
+// (Notably, we must allow for *-op to occur zero times.)
+#[derive(Clone, Debug)]
+struct TokenSet<'tt> {
+ tokens: Vec<TtHandle<'tt>>,
+ maybe_empty: bool,
+}
+
+impl<'tt> TokenSet<'tt> {
+ // Returns a set for the empty sequence.
+ fn empty() -> Self {
+ TokenSet { tokens: Vec::new(), maybe_empty: true }
+ }
+
+ // Returns the set `{ tok }` for the single-token (and thus
+ // non-empty) sequence [tok].
+ fn singleton(tt: TtHandle<'tt>) -> Self {
+ TokenSet { tokens: vec![tt], maybe_empty: false }
+ }
+
+ // Changes self to be the set `{ tok }`.
+ // Since `tok` is always present, marks self as non-empty.
+ fn replace_with(&mut self, tt: TtHandle<'tt>) {
+ self.tokens.clear();
+ self.tokens.push(tt);
+ self.maybe_empty = false;
+ }
+
+ // Changes self to be the empty set `{}`; meant for use when
+ // the particular token does not matter, but we want to
+ // record that it occurs.
+ fn replace_with_irrelevant(&mut self) {
+ self.tokens.clear();
+ self.maybe_empty = false;
+ }
+
+ // Adds `tok` to the set for `self`, marking sequence as non-empy.
+ fn add_one(&mut self, tt: TtHandle<'tt>) {
+ if !self.tokens.contains(&tt) {
+ self.tokens.push(tt);
+ }
+ self.maybe_empty = false;
+ }
+
+ // Adds `tok` to the set for `self`. (Leaves `maybe_empty` flag alone.)
+ fn add_one_maybe(&mut self, tt: TtHandle<'tt>) {
+ if !self.tokens.contains(&tt) {
+ self.tokens.push(tt);
+ }
+ }
+
+ // Adds all elements of `other` to this.
+ //
+ // (Since this is a set, we filter out duplicates.)
+ //
+ // If `other` is potentially empty, then preserves the previous
+ // setting of the empty flag of `self`. If `other` is guaranteed
+ // non-empty, then `self` is marked non-empty.
+ fn add_all(&mut self, other: &Self) {
+ for tt in &other.tokens {
+ if !self.tokens.contains(tt) {
+ self.tokens.push(tt.clone());
+ }
+ }
+ if !other.maybe_empty {
+ self.maybe_empty = false;
+ }
+ }
+}
+
+// Checks that `matcher` is internally consistent and that it
+// can legally be followed by a token `N`, for all `N` in `follow`.
+// (If `follow` is empty, then it imposes no constraint on
+// the `matcher`.)
+//
+// Returns the set of NT tokens that could possibly come last in
+// `matcher`. (If `matcher` matches the empty sequence, then
+// `maybe_empty` will be set to true.)
+//
+// Requires that `first_sets` is pre-computed for `matcher`;
+// see `FirstSets::new`.
+fn check_matcher_core<'tt>(
+ sess: &ParseSess,
+ def: &ast::Item,
+ first_sets: &FirstSets<'tt>,
+ matcher: &'tt [mbe::TokenTree],
+ follow: &TokenSet<'tt>,
+) -> TokenSet<'tt> {
+ use mbe::TokenTree;
+
+ let mut last = TokenSet::empty();
+
+ // 2. For each token and suffix [T, SUFFIX] in M:
+ // ensure that T can be followed by SUFFIX, and if SUFFIX may be empty,
+ // then ensure T can also be followed by any element of FOLLOW.
+ 'each_token: for i in 0..matcher.len() {
+ let token = &matcher[i];
+ let suffix = &matcher[i + 1..];
+
+ let build_suffix_first = || {
+ let mut s = first_sets.first(suffix);
+ if s.maybe_empty {
+ s.add_all(follow);
+ }
+ s
+ };
+
+ // (we build `suffix_first` on demand below; you can tell
+ // which cases are supposed to fall through by looking for the
+ // initialization of this variable.)
+ let suffix_first;
+
+ // First, update `last` so that it corresponds to the set
+ // of NT tokens that might end the sequence `... token`.
+ match *token {
+ TokenTree::Token(..)
+ | TokenTree::MetaVar(..)
+ | TokenTree::MetaVarDecl(..)
+ | TokenTree::MetaVarExpr(..) => {
+ if token_can_be_followed_by_any(token) {
+ // don't need to track tokens that work with any,
+ last.replace_with_irrelevant();
+ // ... and don't need to check tokens that can be
+ // followed by anything against SUFFIX.
+ continue 'each_token;
+ } else {
+ last.replace_with(TtHandle::TtRef(token));
+ suffix_first = build_suffix_first();
+ }
+ }
+ TokenTree::Delimited(span, ref d) => {
+ let my_suffix = TokenSet::singleton(TtHandle::from_token_kind(
+ token::CloseDelim(d.delim),
+ span.close,
+ ));
+ check_matcher_core(sess, def, first_sets, &d.tts, &my_suffix);
+ // don't track non NT tokens
+ last.replace_with_irrelevant();
+
+ // also, we don't need to check delimited sequences
+ // against SUFFIX
+ continue 'each_token;
+ }
+ TokenTree::Sequence(_, ref seq_rep) => {
+ suffix_first = build_suffix_first();
+ // The trick here: when we check the interior, we want
+ // to include the separator (if any) as a potential
+ // (but not guaranteed) element of FOLLOW. So in that
+ // case, we make a temp copy of suffix and stuff
+ // delimiter in there.
+ //
+ // FIXME: Should I first scan suffix_first to see if
+ // delimiter is already in it before I go through the
+ // work of cloning it? But then again, this way I may
+ // get a "tighter" span?
+ let mut new;
+ let my_suffix = if let Some(sep) = &seq_rep.separator {
+ new = suffix_first.clone();
+ new.add_one_maybe(TtHandle::from_token(sep.clone()));
+ &new
+ } else {
+ &suffix_first
+ };
+
+ // At this point, `suffix_first` is built, and
+ // `my_suffix` is some TokenSet that we can use
+ // for checking the interior of `seq_rep`.
+ let next = check_matcher_core(sess, def, first_sets, &seq_rep.tts, my_suffix);
+ if next.maybe_empty {
+ last.add_all(&next);
+ } else {
+ last = next;
+ }
+
+ // the recursive call to check_matcher_core already ran the 'each_last
+ // check below, so we can just keep going forward here.
+ continue 'each_token;
+ }
+ }
+
+ // (`suffix_first` guaranteed initialized once reaching here.)
+
+ // Now `last` holds the complete set of NT tokens that could
+ // end the sequence before SUFFIX. Check that every one works with `suffix`.
+ for tt in &last.tokens {
+ if let &TokenTree::MetaVarDecl(span, name, Some(kind)) = tt.get() {
+ for next_token in &suffix_first.tokens {
+ let next_token = next_token.get();
+
+ // Check if the old pat is used and the next token is `|`
+ // to warn about incompatibility with Rust 2021.
+ // We only emit this lint if we're parsing the original
+ // definition of this macro_rules, not while (re)parsing
+ // the macro when compiling another crate that is using the
+ // macro. (See #86567.)
+ // Macros defined in the current crate have a real node id,
+ // whereas macros from an external crate have a dummy id.
+ if def.id != DUMMY_NODE_ID
+ && matches!(kind, NonterminalKind::PatParam { inferred: true })
+ && matches!(next_token, TokenTree::Token(token) if token.kind == BinOp(token::BinOpToken::Or))
+ {
+ // It is suggestion to use pat_param, for example: $x:pat -> $x:pat_param.
+ let suggestion = quoted_tt_to_string(&TokenTree::MetaVarDecl(
+ span,
+ name,
+ Some(NonterminalKind::PatParam { inferred: false }),
+ ));
+ sess.buffer_lint_with_diagnostic(
+ &RUST_2021_INCOMPATIBLE_OR_PATTERNS,
+ span,
+ ast::CRATE_NODE_ID,
+ "the meaning of the `pat` fragment specifier is changing in Rust 2021, which may affect this macro",
+ BuiltinLintDiagnostics::OrPatternsBackCompat(span, suggestion),
+ );
+ }
+ match is_in_follow(next_token, kind) {
+ IsInFollow::Yes => {}
+ IsInFollow::No(possible) => {
+ let may_be = if last.tokens.len() == 1 && suffix_first.tokens.len() == 1
+ {
+ "is"
+ } else {
+ "may be"
+ };
+
+ let sp = next_token.span();
+ let mut err = sess.span_diagnostic.struct_span_err(
+ sp,
+ &format!(
+ "`${name}:{frag}` {may_be} followed by `{next}`, which \
+ is not allowed for `{frag}` fragments",
+ name = name,
+ frag = kind,
+ next = quoted_tt_to_string(next_token),
+ may_be = may_be
+ ),
+ );
+ err.span_label(sp, format!("not allowed after `{}` fragments", kind));
+
+ if kind == NonterminalKind::PatWithOr
+ && sess.edition.rust_2021()
+ && next_token.is_token(&BinOp(token::BinOpToken::Or))
+ {
+ let suggestion = quoted_tt_to_string(&TokenTree::MetaVarDecl(
+ span,
+ name,
+ Some(NonterminalKind::PatParam { inferred: false }),
+ ));
+ err.span_suggestion(
+ span,
+ "try a `pat_param` fragment specifier instead",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ let msg = "allowed there are: ";
+ match possible {
+ &[] => {}
+ &[t] => {
+ err.note(&format!(
+ "only {} is allowed after `{}` fragments",
+ t, kind,
+ ));
+ }
+ ts => {
+ err.note(&format!(
+ "{}{} or {}",
+ msg,
+ ts[..ts.len() - 1]
+ .iter()
+ .copied()
+ .collect::<Vec<_>>()
+ .join(", "),
+ ts[ts.len() - 1],
+ ));
+ }
+ }
+ err.emit();
+ }
+ }
+ }
+ }
+ }
+ }
+ last
+}
+
+fn token_can_be_followed_by_any(tok: &mbe::TokenTree) -> bool {
+ if let mbe::TokenTree::MetaVarDecl(_, _, Some(kind)) = *tok {
+ frag_can_be_followed_by_any(kind)
+ } else {
+ // (Non NT's can always be followed by anything in matchers.)
+ true
+ }
+}
+
+/// Returns `true` if a fragment of type `frag` can be followed by any sort of
+/// token. We use this (among other things) as a useful approximation
+/// for when `frag` can be followed by a repetition like `$(...)*` or
+/// `$(...)+`. In general, these can be a bit tricky to reason about,
+/// so we adopt a conservative position that says that any fragment
+/// specifier which consumes at most one token tree can be followed by
+/// a fragment specifier (indeed, these fragments can be followed by
+/// ANYTHING without fear of future compatibility hazards).
+fn frag_can_be_followed_by_any(kind: NonterminalKind) -> bool {
+ matches!(
+ kind,
+ NonterminalKind::Item // always terminated by `}` or `;`
+ | NonterminalKind::Block // exactly one token tree
+ | NonterminalKind::Ident // exactly one token tree
+ | NonterminalKind::Literal // exactly one token tree
+ | NonterminalKind::Meta // exactly one token tree
+ | NonterminalKind::Lifetime // exactly one token tree
+ | NonterminalKind::TT // exactly one token tree
+ )
+}
+
+enum IsInFollow {
+ Yes,
+ No(&'static [&'static str]),
+}
+
+/// Returns `true` if `frag` can legally be followed by the token `tok`. For
+/// fragments that can consume an unbounded number of tokens, `tok`
+/// must be within a well-defined follow set. This is intended to
+/// guarantee future compatibility: for example, without this rule, if
+/// we expanded `expr` to include a new binary operator, we might
+/// break macros that were relying on that binary operator as a
+/// separator.
+// when changing this do not forget to update doc/book/macros.md!
+fn is_in_follow(tok: &mbe::TokenTree, kind: NonterminalKind) -> IsInFollow {
+ use mbe::TokenTree;
+
+ if let TokenTree::Token(Token { kind: token::CloseDelim(_), .. }) = *tok {
+ // closing a token tree can never be matched by any fragment;
+ // iow, we always require that `(` and `)` match, etc.
+ IsInFollow::Yes
+ } else {
+ match kind {
+ NonterminalKind::Item => {
+ // since items *must* be followed by either a `;` or a `}`, we can
+ // accept anything after them
+ IsInFollow::Yes
+ }
+ NonterminalKind::Block => {
+ // anything can follow block, the braces provide an easy boundary to
+ // maintain
+ IsInFollow::Yes
+ }
+ NonterminalKind::Stmt | NonterminalKind::Expr => {
+ const TOKENS: &[&str] = &["`=>`", "`,`", "`;`"];
+ match tok {
+ TokenTree::Token(token) => match token.kind {
+ FatArrow | Comma | Semi => IsInFollow::Yes,
+ _ => IsInFollow::No(TOKENS),
+ },
+ _ => IsInFollow::No(TOKENS),
+ }
+ }
+ NonterminalKind::PatParam { .. } => {
+ const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`|`", "`if`", "`in`"];
+ match tok {
+ TokenTree::Token(token) => match token.kind {
+ FatArrow | Comma | Eq | BinOp(token::Or) => IsInFollow::Yes,
+ Ident(name, false) if name == kw::If || name == kw::In => IsInFollow::Yes,
+ _ => IsInFollow::No(TOKENS),
+ },
+ _ => IsInFollow::No(TOKENS),
+ }
+ }
+ NonterminalKind::PatWithOr { .. } => {
+ const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`if`", "`in`"];
+ match tok {
+ TokenTree::Token(token) => match token.kind {
+ FatArrow | Comma | Eq => IsInFollow::Yes,
+ Ident(name, false) if name == kw::If || name == kw::In => IsInFollow::Yes,
+ _ => IsInFollow::No(TOKENS),
+ },
+ _ => IsInFollow::No(TOKENS),
+ }
+ }
+ NonterminalKind::Path | NonterminalKind::Ty => {
+ const TOKENS: &[&str] = &[
+ "`{`", "`[`", "`=>`", "`,`", "`>`", "`=`", "`:`", "`;`", "`|`", "`as`",
+ "`where`",
+ ];
+ match tok {
+ TokenTree::Token(token) => match token.kind {
+ OpenDelim(Delimiter::Brace)
+ | OpenDelim(Delimiter::Bracket)
+ | Comma
+ | FatArrow
+ | Colon
+ | Eq
+ | Gt
+ | BinOp(token::Shr)
+ | Semi
+ | BinOp(token::Or) => IsInFollow::Yes,
+ Ident(name, false) if name == kw::As || name == kw::Where => {
+ IsInFollow::Yes
+ }
+ _ => IsInFollow::No(TOKENS),
+ },
+ TokenTree::MetaVarDecl(_, _, Some(NonterminalKind::Block)) => IsInFollow::Yes,
+ _ => IsInFollow::No(TOKENS),
+ }
+ }
+ NonterminalKind::Ident | NonterminalKind::Lifetime => {
+ // being a single token, idents and lifetimes are harmless
+ IsInFollow::Yes
+ }
+ NonterminalKind::Literal => {
+ // literals may be of a single token, or two tokens (negative numbers)
+ IsInFollow::Yes
+ }
+ NonterminalKind::Meta | NonterminalKind::TT => {
+ // being either a single token or a delimited sequence, tt is
+ // harmless
+ IsInFollow::Yes
+ }
+ NonterminalKind::Vis => {
+ // Explicitly disallow `priv`, on the off chance it comes back.
+ const TOKENS: &[&str] = &["`,`", "an ident", "a type"];
+ match tok {
+ TokenTree::Token(token) => match token.kind {
+ Comma => IsInFollow::Yes,
+ Ident(name, is_raw) if is_raw || name != kw::Priv => IsInFollow::Yes,
+ _ => {
+ if token.can_begin_type() {
+ IsInFollow::Yes
+ } else {
+ IsInFollow::No(TOKENS)
+ }
+ }
+ },
+ TokenTree::MetaVarDecl(
+ _,
+ _,
+ Some(NonterminalKind::Ident | NonterminalKind::Ty | NonterminalKind::Path),
+ ) => IsInFollow::Yes,
+ _ => IsInFollow::No(TOKENS),
+ }
+ }
+ }
+ }
+}
+
+fn quoted_tt_to_string(tt: &mbe::TokenTree) -> String {
+ match *tt {
+ mbe::TokenTree::Token(ref token) => pprust::token_to_string(&token).into(),
+ mbe::TokenTree::MetaVar(_, name) => format!("${}", name),
+ mbe::TokenTree::MetaVarDecl(_, name, Some(kind)) => format!("${}:{}", name, kind),
+ mbe::TokenTree::MetaVarDecl(_, name, None) => format!("${}:", name),
+ _ => panic!(
+ "{}",
+ "unexpected mbe::TokenTree::{Sequence or Delimited} \
+ in follow set checker"
+ ),
+ }
+}
+
+fn parser_from_cx(sess: &ParseSess, tts: TokenStream) -> Parser<'_> {
+ Parser::new(sess, tts, true, rustc_parse::MACRO_ARGUMENTS)
+}
+
+/// Generates an appropriate parsing failure message. For EOF, this is "unexpected end...". For
+/// other tokens, this is "unexpected token...".
+fn parse_failure_msg(tok: &Token) -> String {
+ match tok.kind {
+ token::Eof => "unexpected end of macro invocation".to_string(),
+ _ => format!("no rules expected the token `{}`", pprust::token_to_string(tok),),
+ }
+}
diff --git a/compiler/rustc_expand/src/mbe/metavar_expr.rs b/compiler/rustc_expand/src/mbe/metavar_expr.rs
new file mode 100644
index 000000000..fc808401a
--- /dev/null
+++ b/compiler/rustc_expand/src/mbe/metavar_expr.rs
@@ -0,0 +1,161 @@
+use rustc_ast::token::{self, Delimiter};
+use rustc_ast::tokenstream::{CursorRef, TokenStream, TokenTree};
+use rustc_ast::{LitIntType, LitKind};
+use rustc_ast_pretty::pprust;
+use rustc_errors::{Applicability, PResult};
+use rustc_session::parse::ParseSess;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+/// A meta-variable expression, for expansions based on properties of meta-variables.
+#[derive(Debug, Clone, PartialEq, Encodable, Decodable)]
+pub(crate) enum MetaVarExpr {
+ /// The number of repetitions of an identifier, optionally limited to a number
+ /// of outer-most repetition depths. If the depth limit is `None` then the depth is unlimited.
+ Count(Ident, Option<usize>),
+
+ /// Ignore a meta-variable for repetition without expansion.
+ Ignore(Ident),
+
+ /// The index of the repetition at a particular depth, where 0 is the inner-most
+ /// repetition. The `usize` is the depth.
+ Index(usize),
+
+ /// The length of the repetition at a particular depth, where 0 is the inner-most
+ /// repetition. The `usize` is the depth.
+ Length(usize),
+}
+
+impl MetaVarExpr {
+ /// Attempt to parse a meta-variable expression from a token stream.
+ pub(crate) fn parse<'sess>(
+ input: &TokenStream,
+ outer_span: Span,
+ sess: &'sess ParseSess,
+ ) -> PResult<'sess, MetaVarExpr> {
+ let mut tts = input.trees();
+ let ident = parse_ident(&mut tts, sess, outer_span)?;
+ let Some(TokenTree::Delimited(_, Delimiter::Parenthesis, args)) = tts.next() else {
+ let msg = "meta-variable expression parameter must be wrapped in parentheses";
+ return Err(sess.span_diagnostic.struct_span_err(ident.span, msg));
+ };
+ check_trailing_token(&mut tts, sess)?;
+ let mut iter = args.trees();
+ let rslt = match &*ident.as_str() {
+ "count" => parse_count(&mut iter, sess, ident.span)?,
+ "ignore" => MetaVarExpr::Ignore(parse_ident(&mut iter, sess, ident.span)?),
+ "index" => MetaVarExpr::Index(parse_depth(&mut iter, sess, ident.span)?),
+ "length" => MetaVarExpr::Length(parse_depth(&mut iter, sess, ident.span)?),
+ _ => {
+ let err_msg = "unrecognized meta-variable expression";
+ let mut err = sess.span_diagnostic.struct_span_err(ident.span, err_msg);
+ err.span_suggestion(
+ ident.span,
+ "supported expressions are count, ignore, index and length",
+ "",
+ Applicability::MachineApplicable,
+ );
+ return Err(err);
+ }
+ };
+ check_trailing_token(&mut iter, sess)?;
+ Ok(rslt)
+ }
+
+ pub(crate) fn ident(&self) -> Option<Ident> {
+ match *self {
+ MetaVarExpr::Count(ident, _) | MetaVarExpr::Ignore(ident) => Some(ident),
+ MetaVarExpr::Index(..) | MetaVarExpr::Length(..) => None,
+ }
+ }
+}
+
+// Checks if there are any remaining tokens. For example, `${ignore(ident ... a b c ...)}`
+fn check_trailing_token<'sess>(
+ iter: &mut CursorRef<'_>,
+ sess: &'sess ParseSess,
+) -> PResult<'sess, ()> {
+ if let Some(tt) = iter.next() {
+ let mut diag = sess
+ .span_diagnostic
+ .struct_span_err(tt.span(), &format!("unexpected token: {}", pprust::tt_to_string(tt)));
+ diag.span_note(tt.span(), "meta-variable expression must not have trailing tokens");
+ Err(diag)
+ } else {
+ Ok(())
+ }
+}
+
+/// Parse a meta-variable `count` expression: `count(ident[, depth])`
+fn parse_count<'sess>(
+ iter: &mut CursorRef<'_>,
+ sess: &'sess ParseSess,
+ span: Span,
+) -> PResult<'sess, MetaVarExpr> {
+ let ident = parse_ident(iter, sess, span)?;
+ let depth = if try_eat_comma(iter) { Some(parse_depth(iter, sess, span)?) } else { None };
+ Ok(MetaVarExpr::Count(ident, depth))
+}
+
+/// Parses the depth used by index(depth) and length(depth).
+fn parse_depth<'sess>(
+ iter: &mut CursorRef<'_>,
+ sess: &'sess ParseSess,
+ span: Span,
+) -> PResult<'sess, usize> {
+ let Some(tt) = iter.next() else { return Ok(0) };
+ let TokenTree::Token(token::Token {
+ kind: token::TokenKind::Literal(lit), ..
+ }, _) = tt else {
+ return Err(sess.span_diagnostic.struct_span_err(
+ span,
+ "meta-variable expression depth must be a literal"
+ ));
+ };
+ if let Ok(lit_kind) = LitKind::from_lit_token(*lit)
+ && let LitKind::Int(n_u128, LitIntType::Unsuffixed) = lit_kind
+ && let Ok(n_usize) = usize::try_from(n_u128)
+ {
+ Ok(n_usize)
+ }
+ else {
+ let msg = "only unsuffixes integer literals are supported in meta-variable expressions";
+ Err(sess.span_diagnostic.struct_span_err(span, msg))
+ }
+}
+
+/// Parses an generic ident
+fn parse_ident<'sess>(
+ iter: &mut CursorRef<'_>,
+ sess: &'sess ParseSess,
+ span: Span,
+) -> PResult<'sess, Ident> {
+ if let Some(tt) = iter.next() && let TokenTree::Token(token, _) = tt {
+ if let Some((elem, false)) = token.ident() {
+ return Ok(elem);
+ }
+ let token_str = pprust::token_to_string(token);
+ let mut err = sess.span_diagnostic.struct_span_err(
+ span,
+ &format!("expected identifier, found `{}`", &token_str)
+ );
+ err.span_suggestion(
+ token.span,
+ &format!("try removing `{}`", &token_str),
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ return Err(err);
+ }
+ Err(sess.span_diagnostic.struct_span_err(span, "expected identifier"))
+}
+
+/// Tries to move the iterator forward returning `true` if there is a comma. If not, then the
+/// iterator is not modified and the result is `false`.
+fn try_eat_comma(iter: &mut CursorRef<'_>) -> bool {
+ if let Some(TokenTree::Token(token::Token { kind: token::Comma, .. }, _)) = iter.look_ahead(0) {
+ let _ = iter.next();
+ return true;
+ }
+ false
+}
diff --git a/compiler/rustc_expand/src/mbe/quoted.rs b/compiler/rustc_expand/src/mbe/quoted.rs
new file mode 100644
index 000000000..ee17d54f6
--- /dev/null
+++ b/compiler/rustc_expand/src/mbe/quoted.rs
@@ -0,0 +1,366 @@
+use crate::mbe::macro_parser::count_metavar_decls;
+use crate::mbe::{Delimited, KleeneOp, KleeneToken, MetaVarExpr, SequenceRepetition, TokenTree};
+
+use rustc_ast::token::{self, Delimiter, Token};
+use rustc_ast::{tokenstream, NodeId};
+use rustc_ast_pretty::pprust;
+use rustc_feature::Features;
+use rustc_session::parse::{feature_err, ParseSess};
+use rustc_span::symbol::{kw, sym, Ident};
+
+use rustc_span::edition::Edition;
+use rustc_span::{Span, SyntaxContext};
+
+const VALID_FRAGMENT_NAMES_MSG: &str = "valid fragment specifiers are \
+ `ident`, `block`, `stmt`, `expr`, `pat`, `ty`, `lifetime`, \
+ `literal`, `path`, `meta`, `tt`, `item` and `vis`";
+
+/// Takes a `tokenstream::TokenStream` and returns a `Vec<self::TokenTree>`. Specifically, this
+/// takes a generic `TokenStream`, such as is used in the rest of the compiler, and returns a
+/// collection of `TokenTree` for use in parsing a macro.
+///
+/// # Parameters
+///
+/// - `input`: a token stream to read from, the contents of which we are parsing.
+/// - `parsing_patterns`: `parse` can be used to parse either the "patterns" or the "body" of a
+/// macro. Both take roughly the same form _except_ that:
+/// - In a pattern, metavars are declared with their "matcher" type. For example `$var:expr` or
+/// `$id:ident`. In this example, `expr` and `ident` are "matchers". They are not present in the
+/// body of a macro rule -- just in the pattern.
+/// - Metavariable expressions are only valid in the "body", not the "pattern".
+/// - `sess`: the parsing session. Any errors will be emitted to this session.
+/// - `node_id`: the NodeId of the macro we are parsing.
+/// - `features`: language features so we can do feature gating.
+///
+/// # Returns
+///
+/// A collection of `self::TokenTree`. There may also be some errors emitted to `sess`.
+pub(super) fn parse(
+ input: tokenstream::TokenStream,
+ parsing_patterns: bool,
+ sess: &ParseSess,
+ node_id: NodeId,
+ features: &Features,
+ edition: Edition,
+) -> Vec<TokenTree> {
+ // Will contain the final collection of `self::TokenTree`
+ let mut result = Vec::new();
+
+ // For each token tree in `input`, parse the token into a `self::TokenTree`, consuming
+ // additional trees if need be.
+ let mut trees = input.into_trees();
+ while let Some(tree) = trees.next() {
+ // Given the parsed tree, if there is a metavar and we are expecting matchers, actually
+ // parse out the matcher (i.e., in `$id:ident` this would parse the `:` and `ident`).
+ let tree = parse_tree(tree, &mut trees, parsing_patterns, sess, node_id, features, edition);
+ match tree {
+ TokenTree::MetaVar(start_sp, ident) if parsing_patterns => {
+ let span = match trees.next() {
+ Some(tokenstream::TokenTree::Token(Token { kind: token::Colon, span }, _)) => {
+ match trees.next() {
+ Some(tokenstream::TokenTree::Token(token, _)) => match token.ident() {
+ Some((frag, _)) => {
+ let span = token.span.with_lo(start_sp.lo());
+
+ let kind =
+ token::NonterminalKind::from_symbol(frag.name, || {
+ // FIXME(#85708) - once we properly decode a foreign
+ // crate's `SyntaxContext::root`, then we can replace
+ // this with just `span.edition()`. A
+ // `SyntaxContext::root()` from the current crate will
+ // have the edition of the current crate, and a
+ // `SyntaxContext::root()` from a foreign crate will
+ // have the edition of that crate (which we manually
+ // retrieve via the `edition` parameter).
+ if span.ctxt() == SyntaxContext::root() {
+ edition
+ } else {
+ span.edition()
+ }
+ })
+ .unwrap_or_else(
+ || {
+ let msg = format!(
+ "invalid fragment specifier `{}`",
+ frag.name
+ );
+ sess.span_diagnostic
+ .struct_span_err(span, &msg)
+ .help(VALID_FRAGMENT_NAMES_MSG)
+ .emit();
+ token::NonterminalKind::Ident
+ },
+ );
+ result.push(TokenTree::MetaVarDecl(span, ident, Some(kind)));
+ continue;
+ }
+ _ => token.span,
+ },
+ tree => tree.as_ref().map_or(span, tokenstream::TokenTree::span),
+ }
+ }
+ tree => tree.as_ref().map_or(start_sp, tokenstream::TokenTree::span),
+ };
+
+ result.push(TokenTree::MetaVarDecl(span, ident, None));
+ }
+
+ // Not a metavar or no matchers allowed, so just return the tree
+ _ => result.push(tree),
+ }
+ }
+ result
+}
+
+/// Asks for the `macro_metavar_expr` feature if it is not already declared
+fn maybe_emit_macro_metavar_expr_feature(features: &Features, sess: &ParseSess, span: Span) {
+ if !features.macro_metavar_expr {
+ let msg = "meta-variable expressions are unstable";
+ feature_err(&sess, sym::macro_metavar_expr, span, msg).emit();
+ }
+}
+
+/// Takes a `tokenstream::TokenTree` and returns a `self::TokenTree`. Specifically, this takes a
+/// generic `TokenTree`, such as is used in the rest of the compiler, and returns a `TokenTree`
+/// for use in parsing a macro.
+///
+/// Converting the given tree may involve reading more tokens.
+///
+/// # Parameters
+///
+/// - `tree`: the tree we wish to convert.
+/// - `outer_trees`: an iterator over trees. We may need to read more tokens from it in order to finish
+/// converting `tree`
+/// - `parsing_patterns`: same as [parse].
+/// - `sess`: the parsing session. Any errors will be emitted to this session.
+/// - `features`: language features so we can do feature gating.
+fn parse_tree(
+ tree: tokenstream::TokenTree,
+ outer_trees: &mut impl Iterator<Item = tokenstream::TokenTree>,
+ parsing_patterns: bool,
+ sess: &ParseSess,
+ node_id: NodeId,
+ features: &Features,
+ edition: Edition,
+) -> TokenTree {
+ // Depending on what `tree` is, we could be parsing different parts of a macro
+ match tree {
+ // `tree` is a `$` token. Look at the next token in `trees`
+ tokenstream::TokenTree::Token(Token { kind: token::Dollar, span }, _) => {
+ // FIXME: Handle `Invisible`-delimited groups in a more systematic way
+ // during parsing.
+ let mut next = outer_trees.next();
+ let mut trees: Box<dyn Iterator<Item = tokenstream::TokenTree>>;
+ if let Some(tokenstream::TokenTree::Delimited(_, Delimiter::Invisible, tts)) = next {
+ trees = Box::new(tts.into_trees());
+ next = trees.next();
+ } else {
+ trees = Box::new(outer_trees);
+ }
+
+ match next {
+ // `tree` is followed by a delimited set of token trees.
+ Some(tokenstream::TokenTree::Delimited(delim_span, delim, tts)) => {
+ if parsing_patterns {
+ if delim != Delimiter::Parenthesis {
+ span_dollar_dollar_or_metavar_in_the_lhs_err(
+ sess,
+ &Token { kind: token::OpenDelim(delim), span: delim_span.entire() },
+ );
+ }
+ } else {
+ match delim {
+ Delimiter::Brace => {
+ // The delimiter is `{`. This indicates the beginning
+ // of a meta-variable expression (e.g. `${count(ident)}`).
+ // Try to parse the meta-variable expression.
+ match MetaVarExpr::parse(&tts, delim_span.entire(), sess) {
+ Err(mut err) => {
+ err.emit();
+ // Returns early the same read `$` to avoid spanning
+ // unrelated diagnostics that could be performed afterwards
+ return TokenTree::token(token::Dollar, span);
+ }
+ Ok(elem) => {
+ maybe_emit_macro_metavar_expr_feature(
+ features,
+ sess,
+ delim_span.entire(),
+ );
+ return TokenTree::MetaVarExpr(delim_span, elem);
+ }
+ }
+ }
+ Delimiter::Parenthesis => {}
+ _ => {
+ let tok = pprust::token_kind_to_string(&token::OpenDelim(delim));
+ let msg = format!("expected `(` or `{{`, found `{}`", tok);
+ sess.span_diagnostic.span_err(delim_span.entire(), &msg);
+ }
+ }
+ }
+ // If we didn't find a metavar expression above, then we must have a
+ // repetition sequence in the macro (e.g. `$(pat)*`). Parse the
+ // contents of the sequence itself
+ let sequence = parse(tts, parsing_patterns, sess, node_id, features, edition);
+ // Get the Kleene operator and optional separator
+ let (separator, kleene) =
+ parse_sep_and_kleene_op(&mut trees, delim_span.entire(), sess);
+ // Count the number of captured "names" (i.e., named metavars)
+ let num_captures =
+ if parsing_patterns { count_metavar_decls(&sequence) } else { 0 };
+ TokenTree::Sequence(
+ delim_span,
+ SequenceRepetition { tts: sequence, separator, kleene, num_captures },
+ )
+ }
+
+ // `tree` is followed by an `ident`. This could be `$meta_var` or the `$crate`
+ // special metavariable that names the crate of the invocation.
+ Some(tokenstream::TokenTree::Token(token, _)) if token.is_ident() => {
+ let (ident, is_raw) = token.ident().unwrap();
+ let span = ident.span.with_lo(span.lo());
+ if ident.name == kw::Crate && !is_raw {
+ TokenTree::token(token::Ident(kw::DollarCrate, is_raw), span)
+ } else {
+ TokenTree::MetaVar(span, ident)
+ }
+ }
+
+ // `tree` is followed by another `$`. This is an escaped `$`.
+ Some(tokenstream::TokenTree::Token(Token { kind: token::Dollar, span }, _)) => {
+ if parsing_patterns {
+ span_dollar_dollar_or_metavar_in_the_lhs_err(
+ sess,
+ &Token { kind: token::Dollar, span },
+ );
+ } else {
+ maybe_emit_macro_metavar_expr_feature(features, sess, span);
+ }
+ TokenTree::token(token::Dollar, span)
+ }
+
+ // `tree` is followed by some other token. This is an error.
+ Some(tokenstream::TokenTree::Token(token, _)) => {
+ let msg = format!(
+ "expected identifier, found `{}`",
+ pprust::token_to_string(&token),
+ );
+ sess.span_diagnostic.span_err(token.span, &msg);
+ TokenTree::MetaVar(token.span, Ident::empty())
+ }
+
+ // There are no more tokens. Just return the `$` we already have.
+ None => TokenTree::token(token::Dollar, span),
+ }
+ }
+
+ // `tree` is an arbitrary token. Keep it.
+ tokenstream::TokenTree::Token(token, _) => TokenTree::Token(token),
+
+ // `tree` is the beginning of a delimited set of tokens (e.g., `(` or `{`). We need to
+ // descend into the delimited set and further parse it.
+ tokenstream::TokenTree::Delimited(span, delim, tts) => TokenTree::Delimited(
+ span,
+ Delimited {
+ delim,
+ tts: parse(tts, parsing_patterns, sess, node_id, features, edition),
+ },
+ ),
+ }
+}
+
+/// Takes a token and returns `Some(KleeneOp)` if the token is `+` `*` or `?`. Otherwise, return
+/// `None`.
+fn kleene_op(token: &Token) -> Option<KleeneOp> {
+ match token.kind {
+ token::BinOp(token::Star) => Some(KleeneOp::ZeroOrMore),
+ token::BinOp(token::Plus) => Some(KleeneOp::OneOrMore),
+ token::Question => Some(KleeneOp::ZeroOrOne),
+ _ => None,
+ }
+}
+
+/// Parse the next token tree of the input looking for a KleeneOp. Returns
+///
+/// - Ok(Ok((op, span))) if the next token tree is a KleeneOp
+/// - Ok(Err(tok, span)) if the next token tree is a token but not a KleeneOp
+/// - Err(span) if the next token tree is not a token
+fn parse_kleene_op(
+ input: &mut impl Iterator<Item = tokenstream::TokenTree>,
+ span: Span,
+) -> Result<Result<(KleeneOp, Span), Token>, Span> {
+ match input.next() {
+ Some(tokenstream::TokenTree::Token(token, _)) => match kleene_op(&token) {
+ Some(op) => Ok(Ok((op, token.span))),
+ None => Ok(Err(token)),
+ },
+ tree => Err(tree.as_ref().map_or(span, tokenstream::TokenTree::span)),
+ }
+}
+
+/// Attempt to parse a single Kleene star, possibly with a separator.
+///
+/// For example, in a pattern such as `$(a),*`, `a` is the pattern to be repeated, `,` is the
+/// separator, and `*` is the Kleene operator. This function is specifically concerned with parsing
+/// the last two tokens of such a pattern: namely, the optional separator and the Kleene operator
+/// itself. Note that here we are parsing the _macro_ itself, rather than trying to match some
+/// stream of tokens in an invocation of a macro.
+///
+/// This function will take some input iterator `input` corresponding to `span` and a parsing
+/// session `sess`. If the next one (or possibly two) tokens in `input` correspond to a Kleene
+/// operator and separator, then a tuple with `(separator, KleeneOp)` is returned. Otherwise, an
+/// error with the appropriate span is emitted to `sess` and a dummy value is returned.
+fn parse_sep_and_kleene_op(
+ input: &mut impl Iterator<Item = tokenstream::TokenTree>,
+ span: Span,
+ sess: &ParseSess,
+) -> (Option<Token>, KleeneToken) {
+ // We basically look at two token trees here, denoted as #1 and #2 below
+ let span = match parse_kleene_op(input, span) {
+ // #1 is a `?`, `+`, or `*` KleeneOp
+ Ok(Ok((op, span))) => return (None, KleeneToken::new(op, span)),
+
+ // #1 is a separator followed by #2, a KleeneOp
+ Ok(Err(token)) => match parse_kleene_op(input, token.span) {
+ // #2 is the `?` Kleene op, which does not take a separator (error)
+ Ok(Ok((KleeneOp::ZeroOrOne, span))) => {
+ // Error!
+ sess.span_diagnostic.span_err(
+ token.span,
+ "the `?` macro repetition operator does not take a separator",
+ );
+
+ // Return a dummy
+ return (None, KleeneToken::new(KleeneOp::ZeroOrMore, span));
+ }
+
+ // #2 is a KleeneOp :D
+ Ok(Ok((op, span))) => return (Some(token), KleeneToken::new(op, span)),
+
+ // #2 is a random token or not a token at all :(
+ Ok(Err(Token { span, .. })) | Err(span) => span,
+ },
+
+ // #1 is not a token
+ Err(span) => span,
+ };
+
+ // If we ever get to this point, we have experienced an "unexpected token" error
+ sess.span_diagnostic.span_err(span, "expected one of: `*`, `+`, or `?`");
+
+ // Return a dummy
+ (None, KleeneToken::new(KleeneOp::ZeroOrMore, span))
+}
+
+// `$$` or a meta-variable is the lhs of a macro but shouldn't.
+//
+// For example, `macro_rules! foo { ( ${length()} ) => {} }`
+fn span_dollar_dollar_or_metavar_in_the_lhs_err<'sess>(sess: &'sess ParseSess, token: &Token) {
+ sess.span_diagnostic
+ .span_err(token.span, &format!("unexpected token: {}", pprust::token_to_string(token)));
+ sess.span_diagnostic.span_note_without_error(
+ token.span,
+ "`$$` and meta-variable expressions are not allowed inside macro parameter definitions",
+ );
+}
diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs
new file mode 100644
index 000000000..e47ea83ac
--- /dev/null
+++ b/compiler/rustc_expand/src/mbe/transcribe.rs
@@ -0,0 +1,580 @@
+use crate::base::ExtCtxt;
+use crate::mbe::macro_parser::{MatchedNonterminal, MatchedSeq, MatchedTokenTree, NamedMatch};
+use crate::mbe::{self, MetaVarExpr};
+use rustc_ast::mut_visit::{self, MutVisitor};
+use rustc_ast::token::{self, Delimiter, Token, TokenKind};
+use rustc_ast::tokenstream::{DelimSpan, Spacing, TokenStream, TokenTree};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{pluralize, PResult};
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
+use rustc_span::hygiene::{LocalExpnId, Transparency};
+use rustc_span::symbol::{sym, Ident, MacroRulesNormalizedIdent};
+use rustc_span::Span;
+
+use smallvec::{smallvec, SmallVec};
+use std::mem;
+
+// A Marker adds the given mark to the syntax context.
+struct Marker(LocalExpnId, Transparency);
+
+impl MutVisitor for Marker {
+ const VISIT_TOKENS: bool = true;
+
+ fn visit_span(&mut self, span: &mut Span) {
+ *span = span.apply_mark(self.0.to_expn_id(), self.1)
+ }
+}
+
+/// An iterator over the token trees in a delimited token tree (`{ ... }`) or a sequence (`$(...)`).
+enum Frame<'a> {
+ Delimited { tts: &'a [mbe::TokenTree], idx: usize, delim: Delimiter, span: DelimSpan },
+ Sequence { tts: &'a [mbe::TokenTree], idx: usize, sep: Option<Token> },
+}
+
+impl<'a> Frame<'a> {
+ /// Construct a new frame around the delimited set of tokens.
+ fn new(src: &'a mbe::Delimited, span: DelimSpan) -> Frame<'a> {
+ Frame::Delimited { tts: &src.tts, idx: 0, delim: src.delim, span }
+ }
+}
+
+impl<'a> Iterator for Frame<'a> {
+ type Item = &'a mbe::TokenTree;
+
+ fn next(&mut self) -> Option<&'a mbe::TokenTree> {
+ match self {
+ Frame::Delimited { tts, ref mut idx, .. }
+ | Frame::Sequence { tts, ref mut idx, .. } => {
+ let res = tts.get(*idx);
+ *idx += 1;
+ res
+ }
+ }
+ }
+}
+
+/// This can do Macro-By-Example transcription.
+/// - `interp` is a map of meta-variables to the tokens (non-terminals) they matched in the
+/// invocation. We are assuming we already know there is a match.
+/// - `src` is the RHS of the MBE, that is, the "example" we are filling in.
+///
+/// For example,
+///
+/// ```rust
+/// macro_rules! foo {
+/// ($id:ident) => { println!("{}", stringify!($id)); }
+/// }
+///
+/// foo!(bar);
+/// ```
+///
+/// `interp` would contain `$id => bar` and `src` would contain `println!("{}", stringify!($id));`.
+///
+/// `transcribe` would return a `TokenStream` containing `println!("{}", stringify!(bar));`.
+///
+/// Along the way, we do some additional error checking.
+pub(super) fn transcribe<'a>(
+ cx: &ExtCtxt<'a>,
+ interp: &FxHashMap<MacroRulesNormalizedIdent, NamedMatch>,
+ src: &mbe::Delimited,
+ src_span: DelimSpan,
+ transparency: Transparency,
+) -> PResult<'a, TokenStream> {
+ // Nothing for us to transcribe...
+ if src.tts.is_empty() {
+ return Ok(TokenStream::default());
+ }
+
+ // We descend into the RHS (`src`), expanding things as we go. This stack contains the things
+ // we have yet to expand/are still expanding. We start the stack off with the whole RHS.
+ let mut stack: SmallVec<[Frame<'_>; 1]> = smallvec![Frame::new(&src, src_span)];
+
+ // As we descend in the RHS, we will need to be able to match nested sequences of matchers.
+ // `repeats` keeps track of where we are in matching at each level, with the last element being
+ // the most deeply nested sequence. This is used as a stack.
+ let mut repeats = Vec::new();
+
+ // `result` contains resulting token stream from the TokenTree we just finished processing. At
+ // the end, this will contain the full result of transcription, but at arbitrary points during
+ // `transcribe`, `result` will contain subsets of the final result.
+ //
+ // Specifically, as we descend into each TokenTree, we will push the existing results onto the
+ // `result_stack` and clear `results`. We will then produce the results of transcribing the
+ // TokenTree into `results`. Then, as we unwind back out of the `TokenTree`, we will pop the
+ // `result_stack` and append `results` too it to produce the new `results` up to that point.
+ //
+ // Thus, if we try to pop the `result_stack` and it is empty, we have reached the top-level
+ // again, and we are done transcribing.
+ let mut result: Vec<TokenTree> = Vec::new();
+ let mut result_stack = Vec::new();
+ let mut marker = Marker(cx.current_expansion.id, transparency);
+
+ loop {
+ // Look at the last frame on the stack.
+ // If it still has a TokenTree we have not looked at yet, use that tree.
+ let Some(tree) = stack.last_mut().unwrap().next() else {
+ // This else-case never produces a value for `tree` (it `continue`s or `return`s).
+
+ // Otherwise, if we have just reached the end of a sequence and we can keep repeating,
+ // go back to the beginning of the sequence.
+ if let Frame::Sequence { idx, sep, .. } = stack.last_mut().unwrap() {
+ let (repeat_idx, repeat_len) = repeats.last_mut().unwrap();
+ *repeat_idx += 1;
+ if repeat_idx < repeat_len {
+ *idx = 0;
+ if let Some(sep) = sep {
+ result.push(TokenTree::Token(sep.clone(), Spacing::Alone));
+ }
+ continue;
+ }
+ }
+
+ // We are done with the top of the stack. Pop it. Depending on what it was, we do
+ // different things. Note that the outermost item must be the delimited, wrapped RHS
+ // that was passed in originally to `transcribe`.
+ match stack.pop().unwrap() {
+ // Done with a sequence. Pop from repeats.
+ Frame::Sequence { .. } => {
+ repeats.pop();
+ }
+
+ // We are done processing a Delimited. If this is the top-level delimited, we are
+ // done. Otherwise, we unwind the result_stack to append what we have produced to
+ // any previous results.
+ Frame::Delimited { delim, span, .. } => {
+ if result_stack.is_empty() {
+ // No results left to compute! We are back at the top-level.
+ return Ok(TokenStream::new(result));
+ }
+
+ // Step back into the parent Delimited.
+ let tree = TokenTree::Delimited(span, delim, TokenStream::new(result));
+ result = result_stack.pop().unwrap();
+ result.push(tree);
+ }
+ }
+ continue;
+ };
+
+ // At this point, we know we are in the middle of a TokenTree (the last one on `stack`).
+ // `tree` contains the next `TokenTree` to be processed.
+ match tree {
+ // We are descending into a sequence. We first make sure that the matchers in the RHS
+ // and the matches in `interp` have the same shape. Otherwise, either the caller or the
+ // macro writer has made a mistake.
+ seq @ mbe::TokenTree::Sequence(_, delimited) => {
+ match lockstep_iter_size(&seq, interp, &repeats) {
+ LockstepIterSize::Unconstrained => {
+ return Err(cx.struct_span_err(
+ seq.span(), /* blame macro writer */
+ "attempted to repeat an expression containing no syntax variables \
+ matched as repeating at this depth",
+ ));
+ }
+
+ LockstepIterSize::Contradiction(msg) => {
+ // FIXME: this really ought to be caught at macro definition time... It
+ // happens when two meta-variables are used in the same repetition in a
+ // sequence, but they come from different sequence matchers and repeat
+ // different amounts.
+ return Err(cx.struct_span_err(seq.span(), &msg));
+ }
+
+ LockstepIterSize::Constraint(len, _) => {
+ // We do this to avoid an extra clone above. We know that this is a
+ // sequence already.
+ let mbe::TokenTree::Sequence(sp, seq) = seq else {
+ unreachable!()
+ };
+
+ // Is the repetition empty?
+ if len == 0 {
+ if seq.kleene.op == mbe::KleeneOp::OneOrMore {
+ // FIXME: this really ought to be caught at macro definition
+ // time... It happens when the Kleene operator in the matcher and
+ // the body for the same meta-variable do not match.
+ return Err(cx.struct_span_err(
+ sp.entire(),
+ "this must repeat at least once",
+ ));
+ }
+ } else {
+ // 0 is the initial counter (we have done 0 repetitions so far). `len`
+ // is the total number of repetitions we should generate.
+ repeats.push((0, len));
+
+ // The first time we encounter the sequence we push it to the stack. It
+ // then gets reused (see the beginning of the loop) until we are done
+ // repeating.
+ stack.push(Frame::Sequence {
+ idx: 0,
+ sep: seq.separator.clone(),
+ tts: &delimited.tts,
+ });
+ }
+ }
+ }
+ }
+
+ // Replace the meta-var with the matched token tree from the invocation.
+ mbe::TokenTree::MetaVar(mut sp, mut original_ident) => {
+ // Find the matched nonterminal from the macro invocation, and use it to replace
+ // the meta-var.
+ let ident = MacroRulesNormalizedIdent::new(original_ident);
+ if let Some(cur_matched) = lookup_cur_matched(ident, interp, &repeats) {
+ match cur_matched {
+ MatchedTokenTree(ref tt) => {
+ // `tt`s are emitted into the output stream directly as "raw tokens",
+ // without wrapping them into groups.
+ let token = tt.clone();
+ result.push(token);
+ }
+ MatchedNonterminal(ref nt) => {
+ // Other variables are emitted into the output stream as groups with
+ // `Delimiter::Invisible` to maintain parsing priorities.
+ // `Interpolated` is currently used for such groups in rustc parser.
+ marker.visit_span(&mut sp);
+ let token = TokenTree::token_alone(token::Interpolated(nt.clone()), sp);
+ result.push(token);
+ }
+ MatchedSeq(..) => {
+ // We were unable to descend far enough. This is an error.
+ return Err(cx.struct_span_err(
+ sp, /* blame the macro writer */
+ &format!("variable '{}' is still repeating at this depth", ident),
+ ));
+ }
+ }
+ } else {
+ // If we aren't able to match the meta-var, we push it back into the result but
+ // with modified syntax context. (I believe this supports nested macros).
+ marker.visit_span(&mut sp);
+ marker.visit_ident(&mut original_ident);
+ result.push(TokenTree::token_alone(token::Dollar, sp));
+ result.push(TokenTree::Token(
+ Token::from_ast_ident(original_ident),
+ Spacing::Alone,
+ ));
+ }
+ }
+
+ // Replace meta-variable expressions with the result of their expansion.
+ mbe::TokenTree::MetaVarExpr(sp, expr) => {
+ transcribe_metavar_expr(cx, expr, interp, &mut marker, &repeats, &mut result, &sp)?;
+ }
+
+ // If we are entering a new delimiter, we push its contents to the `stack` to be
+ // processed, and we push all of the currently produced results to the `result_stack`.
+ // We will produce all of the results of the inside of the `Delimited` and then we will
+ // jump back out of the Delimited, pop the result_stack and add the new results back to
+ // the previous results (from outside the Delimited).
+ mbe::TokenTree::Delimited(mut span, delimited) => {
+ mut_visit::visit_delim_span(&mut span, &mut marker);
+ stack.push(Frame::Delimited {
+ tts: &delimited.tts,
+ delim: delimited.delim,
+ idx: 0,
+ span,
+ });
+ result_stack.push(mem::take(&mut result));
+ }
+
+ // Nothing much to do here. Just push the token to the result, being careful to
+ // preserve syntax context.
+ mbe::TokenTree::Token(token) => {
+ let mut token = token.clone();
+ mut_visit::visit_token(&mut token, &mut marker);
+ let tt = TokenTree::Token(token, Spacing::Alone);
+ result.push(tt);
+ }
+
+ // There should be no meta-var declarations in the invocation of a macro.
+ mbe::TokenTree::MetaVarDecl(..) => panic!("unexpected `TokenTree::MetaVarDecl"),
+ }
+ }
+}
+
+/// Lookup the meta-var named `ident` and return the matched token tree from the invocation using
+/// the set of matches `interpolations`.
+///
+/// See the definition of `repeats` in the `transcribe` function. `repeats` is used to descend
+/// into the right place in nested matchers. If we attempt to descend too far, the macro writer has
+/// made a mistake, and we return `None`.
+fn lookup_cur_matched<'a>(
+ ident: MacroRulesNormalizedIdent,
+ interpolations: &'a FxHashMap<MacroRulesNormalizedIdent, NamedMatch>,
+ repeats: &[(usize, usize)],
+) -> Option<&'a NamedMatch> {
+ interpolations.get(&ident).map(|matched| {
+ let mut matched = matched;
+ for &(idx, _) in repeats {
+ match matched {
+ MatchedTokenTree(_) | MatchedNonterminal(_) => break,
+ MatchedSeq(ref ads) => matched = ads.get(idx).unwrap(),
+ }
+ }
+
+ matched
+ })
+}
+
+/// An accumulator over a TokenTree to be used with `fold`. During transcription, we need to make
+/// sure that the size of each sequence and all of its nested sequences are the same as the sizes
+/// of all the matched (nested) sequences in the macro invocation. If they don't match, somebody
+/// has made a mistake (either the macro writer or caller).
+#[derive(Clone)]
+enum LockstepIterSize {
+ /// No constraints on length of matcher. This is true for any TokenTree variants except a
+ /// `MetaVar` with an actual `MatchedSeq` (as opposed to a `MatchedNonterminal`).
+ Unconstrained,
+
+ /// A `MetaVar` with an actual `MatchedSeq`. The length of the match and the name of the
+ /// meta-var are returned.
+ Constraint(usize, MacroRulesNormalizedIdent),
+
+ /// Two `Constraint`s on the same sequence had different lengths. This is an error.
+ Contradiction(String),
+}
+
+impl LockstepIterSize {
+ /// Find incompatibilities in matcher/invocation sizes.
+ /// - `Unconstrained` is compatible with everything.
+ /// - `Contradiction` is incompatible with everything.
+ /// - `Constraint(len)` is only compatible with other constraints of the same length.
+ fn with(self, other: LockstepIterSize) -> LockstepIterSize {
+ match self {
+ LockstepIterSize::Unconstrained => other,
+ LockstepIterSize::Contradiction(_) => self,
+ LockstepIterSize::Constraint(l_len, ref l_id) => match other {
+ LockstepIterSize::Unconstrained => self,
+ LockstepIterSize::Contradiction(_) => other,
+ LockstepIterSize::Constraint(r_len, _) if l_len == r_len => self,
+ LockstepIterSize::Constraint(r_len, r_id) => {
+ let msg = format!(
+ "meta-variable `{}` repeats {} time{}, but `{}` repeats {} time{}",
+ l_id,
+ l_len,
+ pluralize!(l_len),
+ r_id,
+ r_len,
+ pluralize!(r_len),
+ );
+ LockstepIterSize::Contradiction(msg)
+ }
+ },
+ }
+ }
+}
+
+/// Given a `tree`, make sure that all sequences have the same length as the matches for the
+/// appropriate meta-vars in `interpolations`.
+///
+/// Note that if `repeats` does not match the exact correct depth of a meta-var,
+/// `lookup_cur_matched` will return `None`, which is why this still works even in the presence of
+/// multiple nested matcher sequences.
+///
+/// Example: `$($($x $y)+*);+` -- we need to make sure that `x` and `y` repeat the same amount as
+/// each other at the given depth when the macro was invoked. If they don't it might mean they were
+/// declared at unequal depths or there was a compile bug. For example, if we have 3 repetitions of
+/// the outer sequence and 4 repetitions of the inner sequence for `x`, we should have the same for
+/// `y`; otherwise, we can't transcribe them both at the given depth.
+fn lockstep_iter_size(
+ tree: &mbe::TokenTree,
+ interpolations: &FxHashMap<MacroRulesNormalizedIdent, NamedMatch>,
+ repeats: &[(usize, usize)],
+) -> LockstepIterSize {
+ use mbe::TokenTree;
+ match *tree {
+ TokenTree::Delimited(_, ref delimited) => {
+ delimited.tts.iter().fold(LockstepIterSize::Unconstrained, |size, tt| {
+ size.with(lockstep_iter_size(tt, interpolations, repeats))
+ })
+ }
+ TokenTree::Sequence(_, ref seq) => {
+ seq.tts.iter().fold(LockstepIterSize::Unconstrained, |size, tt| {
+ size.with(lockstep_iter_size(tt, interpolations, repeats))
+ })
+ }
+ TokenTree::MetaVar(_, name) | TokenTree::MetaVarDecl(_, name, _) => {
+ let name = MacroRulesNormalizedIdent::new(name);
+ match lookup_cur_matched(name, interpolations, repeats) {
+ Some(matched) => match matched {
+ MatchedTokenTree(_) | MatchedNonterminal(_) => LockstepIterSize::Unconstrained,
+ MatchedSeq(ref ads) => LockstepIterSize::Constraint(ads.len(), name),
+ },
+ _ => LockstepIterSize::Unconstrained,
+ }
+ }
+ TokenTree::MetaVarExpr(_, ref expr) => {
+ let default_rslt = LockstepIterSize::Unconstrained;
+ let Some(ident) = expr.ident() else { return default_rslt; };
+ let name = MacroRulesNormalizedIdent::new(ident);
+ match lookup_cur_matched(name, interpolations, repeats) {
+ Some(MatchedSeq(ref ads)) => {
+ default_rslt.with(LockstepIterSize::Constraint(ads.len(), name))
+ }
+ _ => default_rslt,
+ }
+ }
+ TokenTree::Token(..) => LockstepIterSize::Unconstrained,
+ }
+}
+
+/// Used solely by the `count` meta-variable expression, counts the outer-most repetitions at a
+/// given optional nested depth.
+///
+/// For example, a macro parameter of `$( { $( $foo:ident ),* } )*` called with `{ a, b } { c }`:
+///
+/// * `[ $( ${count(foo)} ),* ]` will return [2, 1] with a, b = 2 and c = 1
+/// * `[ $( ${count(foo, 0)} ),* ]` will be the same as `[ $( ${count(foo)} ),* ]`
+/// * `[ $( ${count(foo, 1)} ),* ]` will return an error because `${count(foo, 1)}` is
+/// declared inside a single repetition and the index `1` implies two nested repetitions.
+fn count_repetitions<'a>(
+ cx: &ExtCtxt<'a>,
+ depth_opt: Option<usize>,
+ mut matched: &NamedMatch,
+ repeats: &[(usize, usize)],
+ sp: &DelimSpan,
+) -> PResult<'a, usize> {
+ // Recursively count the number of matches in `matched` at given depth
+ // (or at the top-level of `matched` if no depth is given).
+ fn count<'a>(
+ cx: &ExtCtxt<'a>,
+ declared_lhs_depth: usize,
+ depth_opt: Option<usize>,
+ matched: &NamedMatch,
+ sp: &DelimSpan,
+ ) -> PResult<'a, usize> {
+ match matched {
+ MatchedTokenTree(_) | MatchedNonterminal(_) => {
+ if declared_lhs_depth == 0 {
+ return Err(cx.struct_span_err(
+ sp.entire(),
+ "`count` can not be placed inside the inner-most repetition",
+ ));
+ }
+ match depth_opt {
+ None => Ok(1),
+ Some(_) => Err(out_of_bounds_err(cx, declared_lhs_depth, sp.entire(), "count")),
+ }
+ }
+ MatchedSeq(ref named_matches) => {
+ let new_declared_lhs_depth = declared_lhs_depth + 1;
+ match depth_opt {
+ None => named_matches
+ .iter()
+ .map(|elem| count(cx, new_declared_lhs_depth, None, elem, sp))
+ .sum(),
+ Some(0) => Ok(named_matches.len()),
+ Some(depth) => named_matches
+ .iter()
+ .map(|elem| count(cx, new_declared_lhs_depth, Some(depth - 1), elem, sp))
+ .sum(),
+ }
+ }
+ }
+ }
+ // `repeats` records all of the nested levels at which we are currently
+ // matching meta-variables. The meta-var-expr `count($x)` only counts
+ // matches that occur in this "subtree" of the `NamedMatch` where we
+ // are currently transcribing, so we need to descend to that subtree
+ // before we start counting. `matched` contains the various levels of the
+ // tree as we descend, and its final value is the subtree we are currently at.
+ for &(idx, _) in repeats {
+ if let MatchedSeq(ref ads) = matched {
+ matched = &ads[idx];
+ }
+ }
+ count(cx, 0, depth_opt, matched, sp)
+}
+
+/// Returns a `NamedMatch` item declared on the LHS given an arbitrary [Ident]
+fn matched_from_ident<'ctx, 'interp, 'rslt>(
+ cx: &ExtCtxt<'ctx>,
+ ident: Ident,
+ interp: &'interp FxHashMap<MacroRulesNormalizedIdent, NamedMatch>,
+) -> PResult<'ctx, &'rslt NamedMatch>
+where
+ 'interp: 'rslt,
+{
+ let span = ident.span;
+ let key = MacroRulesNormalizedIdent::new(ident);
+ interp.get(&key).ok_or_else(|| {
+ cx.struct_span_err(
+ span,
+ &format!("variable `{}` is not recognized in meta-variable expression", key),
+ )
+ })
+}
+
+/// Used by meta-variable expressions when an user input is out of the actual declared bounds. For
+/// example, index(999999) in an repetition of only three elements.
+fn out_of_bounds_err<'a>(
+ cx: &ExtCtxt<'a>,
+ max: usize,
+ span: Span,
+ ty: &str,
+) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let msg = if max == 0 {
+ format!(
+ "meta-variable expression `{ty}` with depth parameter \
+ must be called inside of a macro repetition"
+ )
+ } else {
+ format!(
+ "depth parameter on meta-variable expression `{ty}` \
+ must be less than {max}"
+ )
+ };
+ cx.struct_span_err(span, &msg)
+}
+
+fn transcribe_metavar_expr<'a>(
+ cx: &ExtCtxt<'a>,
+ expr: &MetaVarExpr,
+ interp: &FxHashMap<MacroRulesNormalizedIdent, NamedMatch>,
+ marker: &mut Marker,
+ repeats: &[(usize, usize)],
+ result: &mut Vec<TokenTree>,
+ sp: &DelimSpan,
+) -> PResult<'a, ()> {
+ let mut visited_span = || {
+ let mut span = sp.entire();
+ marker.visit_span(&mut span);
+ span
+ };
+ match *expr {
+ MetaVarExpr::Count(original_ident, depth_opt) => {
+ let matched = matched_from_ident(cx, original_ident, interp)?;
+ let count = count_repetitions(cx, depth_opt, matched, &repeats, sp)?;
+ let tt = TokenTree::token_alone(
+ TokenKind::lit(token::Integer, sym::integer(count), None),
+ visited_span(),
+ );
+ result.push(tt);
+ }
+ MetaVarExpr::Ignore(original_ident) => {
+ // Used to ensure that `original_ident` is present in the LHS
+ let _ = matched_from_ident(cx, original_ident, interp)?;
+ }
+ MetaVarExpr::Index(depth) => match repeats.iter().nth_back(depth) {
+ Some((index, _)) => {
+ result.push(TokenTree::token_alone(
+ TokenKind::lit(token::Integer, sym::integer(*index), None),
+ visited_span(),
+ ));
+ }
+ None => return Err(out_of_bounds_err(cx, repeats.len(), sp.entire(), "index")),
+ },
+ MetaVarExpr::Length(depth) => match repeats.iter().nth_back(depth) {
+ Some((_, length)) => {
+ result.push(TokenTree::token_alone(
+ TokenKind::lit(token::Integer, sym::integer(*length), None),
+ visited_span(),
+ ));
+ }
+ None => return Err(out_of_bounds_err(cx, repeats.len(), sp.entire(), "length")),
+ },
+ }
+ Ok(())
+}
diff --git a/compiler/rustc_expand/src/module.rs b/compiler/rustc_expand/src/module.rs
new file mode 100644
index 000000000..0315d1163
--- /dev/null
+++ b/compiler/rustc_expand/src/module.rs
@@ -0,0 +1,298 @@
+use crate::base::ModuleData;
+use rustc_ast::ptr::P;
+use rustc_ast::{token, Attribute, Inline, Item, ModSpans};
+use rustc_errors::{struct_span_err, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_parse::new_parser_from_file;
+use rustc_parse::validate_attr;
+use rustc_session::parse::ParseSess;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+
+use std::path::{self, Path, PathBuf};
+
+#[derive(Copy, Clone)]
+pub enum DirOwnership {
+ Owned {
+ // None if `mod.rs`, `Some("foo")` if we're in `foo.rs`.
+ relative: Option<Ident>,
+ },
+ UnownedViaBlock,
+}
+
+// Public for rustfmt usage.
+pub struct ModulePathSuccess {
+ pub file_path: PathBuf,
+ pub dir_ownership: DirOwnership,
+}
+
+pub(crate) struct ParsedExternalMod {
+ pub items: Vec<P<Item>>,
+ pub spans: ModSpans,
+ pub file_path: PathBuf,
+ pub dir_path: PathBuf,
+ pub dir_ownership: DirOwnership,
+}
+
+pub enum ModError<'a> {
+ CircularInclusion(Vec<PathBuf>),
+ ModInBlock(Option<Ident>),
+ FileNotFound(Ident, PathBuf, PathBuf),
+ MultipleCandidates(Ident, PathBuf, PathBuf),
+ ParserError(DiagnosticBuilder<'a, ErrorGuaranteed>),
+}
+
+pub(crate) fn parse_external_mod(
+ sess: &Session,
+ ident: Ident,
+ span: Span, // The span to blame on errors.
+ module: &ModuleData,
+ mut dir_ownership: DirOwnership,
+ attrs: &mut Vec<Attribute>,
+) -> ParsedExternalMod {
+ // We bail on the first error, but that error does not cause a fatal error... (1)
+ let result: Result<_, ModError<'_>> = try {
+ // Extract the file path and the new ownership.
+ let mp = mod_file_path(sess, ident, &attrs, &module.dir_path, dir_ownership)?;
+ dir_ownership = mp.dir_ownership;
+
+ // Ensure file paths are acyclic.
+ if let Some(pos) = module.file_path_stack.iter().position(|p| p == &mp.file_path) {
+ Err(ModError::CircularInclusion(module.file_path_stack[pos..].to_vec()))?;
+ }
+
+ // Actually parse the external file as a module.
+ let mut parser = new_parser_from_file(&sess.parse_sess, &mp.file_path, Some(span));
+ let (mut inner_attrs, items, inner_span) =
+ parser.parse_mod(&token::Eof).map_err(|err| ModError::ParserError(err))?;
+ attrs.append(&mut inner_attrs);
+ (items, inner_span, mp.file_path)
+ };
+ // (1) ...instead, we return a dummy module.
+ let (items, spans, file_path) =
+ result.map_err(|err| err.report(sess, span)).unwrap_or_default();
+
+ // Extract the directory path for submodules of the module.
+ let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
+
+ ParsedExternalMod { items, spans, file_path, dir_path, dir_ownership }
+}
+
+pub(crate) fn mod_dir_path(
+ sess: &Session,
+ ident: Ident,
+ attrs: &[Attribute],
+ module: &ModuleData,
+ mut dir_ownership: DirOwnership,
+ inline: Inline,
+) -> (PathBuf, DirOwnership) {
+ match inline {
+ Inline::Yes if let Some(file_path) = mod_file_path_from_attr(sess, attrs, &module.dir_path) => {
+ // For inline modules file path from `#[path]` is actually the directory path
+ // for historical reasons, so we don't pop the last segment here.
+ (file_path, DirOwnership::Owned { relative: None })
+ }
+ Inline::Yes => {
+ // We have to push on the current module name in the case of relative
+ // paths in order to ensure that any additional module paths from inline
+ // `mod x { ... }` come after the relative extension.
+ //
+ // For example, a `mod z { ... }` inside `x/y.rs` should set the current
+ // directory path to `/x/y/z`, not `/x/z` with a relative offset of `y`.
+ let mut dir_path = module.dir_path.clone();
+ if let DirOwnership::Owned { relative } = &mut dir_ownership {
+ if let Some(ident) = relative.take() {
+ // Remove the relative offset.
+ dir_path.push(ident.as_str());
+ }
+ }
+ dir_path.push(ident.as_str());
+
+ (dir_path, dir_ownership)
+ }
+ Inline::No => {
+ // FIXME: This is a subset of `parse_external_mod` without actual parsing,
+ // check whether the logic for unloaded, loaded and inline modules can be unified.
+ let file_path = mod_file_path(sess, ident, &attrs, &module.dir_path, dir_ownership)
+ .map(|mp| {
+ dir_ownership = mp.dir_ownership;
+ mp.file_path
+ })
+ .unwrap_or_default();
+
+ // Extract the directory path for submodules of the module.
+ let dir_path = file_path.parent().unwrap_or(&file_path).to_owned();
+
+ (dir_path, dir_ownership)
+ }
+ }
+}
+
+fn mod_file_path<'a>(
+ sess: &'a Session,
+ ident: Ident,
+ attrs: &[Attribute],
+ dir_path: &Path,
+ dir_ownership: DirOwnership,
+) -> Result<ModulePathSuccess, ModError<'a>> {
+ if let Some(file_path) = mod_file_path_from_attr(sess, attrs, dir_path) {
+ // All `#[path]` files are treated as though they are a `mod.rs` file.
+ // This means that `mod foo;` declarations inside `#[path]`-included
+ // files are siblings,
+ //
+ // Note that this will produce weirdness when a file named `foo.rs` is
+ // `#[path]` included and contains a `mod foo;` declaration.
+ // If you encounter this, it's your own darn fault :P
+ let dir_ownership = DirOwnership::Owned { relative: None };
+ return Ok(ModulePathSuccess { file_path, dir_ownership });
+ }
+
+ let relative = match dir_ownership {
+ DirOwnership::Owned { relative } => relative,
+ DirOwnership::UnownedViaBlock => None,
+ };
+ let result = default_submod_path(&sess.parse_sess, ident, relative, dir_path);
+ match dir_ownership {
+ DirOwnership::Owned { .. } => result,
+ DirOwnership::UnownedViaBlock => Err(ModError::ModInBlock(match result {
+ Ok(_) | Err(ModError::MultipleCandidates(..)) => Some(ident),
+ _ => None,
+ })),
+ }
+}
+
+/// Derive a submodule path from the first found `#[path = "path_string"]`.
+/// The provided `dir_path` is joined with the `path_string`.
+fn mod_file_path_from_attr(
+ sess: &Session,
+ attrs: &[Attribute],
+ dir_path: &Path,
+) -> Option<PathBuf> {
+ // Extract path string from first `#[path = "path_string"]` attribute.
+ let first_path = attrs.iter().find(|at| at.has_name(sym::path))?;
+ let Some(path_sym) = first_path.value_str() else {
+ // This check is here mainly to catch attempting to use a macro,
+ // such as #[path = concat!(...)]. This isn't currently supported
+ // because otherwise the InvocationCollector would need to defer
+ // loading a module until the #[path] attribute was expanded, and
+ // it doesn't support that (and would likely add a bit of
+ // complexity). Usually bad forms are checked in AstValidator (via
+ // `check_builtin_attribute`), but by the time that runs the macro
+ // is expanded, and it doesn't give an error.
+ validate_attr::emit_fatal_malformed_builtin_attribute(
+ &sess.parse_sess,
+ first_path,
+ sym::path,
+ );
+ };
+
+ let path_str = path_sym.as_str();
+
+ // On windows, the base path might have the form
+ // `\\?\foo\bar` in which case it does not tolerate
+ // mixed `/` and `\` separators, so canonicalize
+ // `/` to `\`.
+ #[cfg(windows)]
+ let path_str = path_str.replace("/", "\\");
+
+ Some(dir_path.join(path_str))
+}
+
+/// Returns a path to a module.
+// Public for rustfmt usage.
+pub fn default_submod_path<'a>(
+ sess: &'a ParseSess,
+ ident: Ident,
+ relative: Option<Ident>,
+ dir_path: &Path,
+) -> Result<ModulePathSuccess, ModError<'a>> {
+ // If we're in a foo.rs file instead of a mod.rs file,
+ // we need to look for submodules in
+ // `./foo/<ident>.rs` and `./foo/<ident>/mod.rs` rather than
+ // `./<ident>.rs` and `./<ident>/mod.rs`.
+ let relative_prefix_string;
+ let relative_prefix = if let Some(ident) = relative {
+ relative_prefix_string = format!("{}{}", ident.name, path::MAIN_SEPARATOR);
+ &relative_prefix_string
+ } else {
+ ""
+ };
+
+ let default_path_str = format!("{}{}.rs", relative_prefix, ident.name);
+ let secondary_path_str =
+ format!("{}{}{}mod.rs", relative_prefix, ident.name, path::MAIN_SEPARATOR);
+ let default_path = dir_path.join(&default_path_str);
+ let secondary_path = dir_path.join(&secondary_path_str);
+ let default_exists = sess.source_map().file_exists(&default_path);
+ let secondary_exists = sess.source_map().file_exists(&secondary_path);
+
+ match (default_exists, secondary_exists) {
+ (true, false) => Ok(ModulePathSuccess {
+ file_path: default_path,
+ dir_ownership: DirOwnership::Owned { relative: Some(ident) },
+ }),
+ (false, true) => Ok(ModulePathSuccess {
+ file_path: secondary_path,
+ dir_ownership: DirOwnership::Owned { relative: None },
+ }),
+ (false, false) => Err(ModError::FileNotFound(ident, default_path, secondary_path)),
+ (true, true) => Err(ModError::MultipleCandidates(ident, default_path, secondary_path)),
+ }
+}
+
+impl ModError<'_> {
+ fn report(self, sess: &Session, span: Span) -> ErrorGuaranteed {
+ let diag = &sess.parse_sess.span_diagnostic;
+ match self {
+ ModError::CircularInclusion(file_paths) => {
+ let mut msg = String::from("circular modules: ");
+ for file_path in &file_paths {
+ msg.push_str(&file_path.display().to_string());
+ msg.push_str(" -> ");
+ }
+ msg.push_str(&file_paths[0].display().to_string());
+ diag.struct_span_err(span, &msg)
+ }
+ ModError::ModInBlock(ident) => {
+ let msg = "cannot declare a non-inline module inside a block unless it has a path attribute";
+ let mut err = diag.struct_span_err(span, msg);
+ if let Some(ident) = ident {
+ let note =
+ format!("maybe `use` the module `{}` instead of redeclaring it", ident);
+ err.span_note(span, &note);
+ }
+ err
+ }
+ ModError::FileNotFound(ident, default_path, secondary_path) => {
+ let mut err = struct_span_err!(
+ diag,
+ span,
+ E0583,
+ "file not found for module `{}`",
+ ident,
+ );
+ err.help(&format!(
+ "to create the module `{}`, create file \"{}\" or \"{}\"",
+ ident,
+ default_path.display(),
+ secondary_path.display(),
+ ));
+ err
+ }
+ ModError::MultipleCandidates(ident, default_path, secondary_path) => {
+ let mut err = struct_span_err!(
+ diag,
+ span,
+ E0761,
+ "file for module `{}` found at both \"{}\" and \"{}\"",
+ ident,
+ default_path.display(),
+ secondary_path.display(),
+ );
+ err.help("delete or rename one of them to remove the ambiguity");
+ err
+ }
+ ModError::ParserError(err) => err,
+ }.emit()
+ }
+}
diff --git a/compiler/rustc_expand/src/mut_visit/tests.rs b/compiler/rustc_expand/src/mut_visit/tests.rs
new file mode 100644
index 000000000..8974d45b4
--- /dev/null
+++ b/compiler/rustc_expand/src/mut_visit/tests.rs
@@ -0,0 +1,72 @@
+use crate::tests::{matches_codepattern, string_to_crate};
+
+use rustc_ast as ast;
+use rustc_ast::mut_visit::MutVisitor;
+use rustc_ast_pretty::pprust;
+use rustc_span::create_default_session_globals_then;
+use rustc_span::symbol::Ident;
+
+// This version doesn't care about getting comments or doc-strings in.
+fn print_crate_items(krate: &ast::Crate) -> String {
+ krate.items.iter().map(|i| pprust::item_to_string(i)).collect::<Vec<_>>().join(" ")
+}
+
+// Change every identifier to "zz".
+struct ToZzIdentMutVisitor;
+
+impl MutVisitor for ToZzIdentMutVisitor {
+ const VISIT_TOKENS: bool = true;
+
+ fn visit_ident(&mut self, ident: &mut Ident) {
+ *ident = Ident::from_str("zz");
+ }
+}
+
+// Maybe add to `expand.rs`.
+macro_rules! assert_pred {
+ ($pred:expr, $predname:expr, $a:expr , $b:expr) => {{
+ let pred_val = $pred;
+ let a_val = $a;
+ let b_val = $b;
+ if !(pred_val(&a_val, &b_val)) {
+ panic!("expected args satisfying {}, got {} and {}", $predname, a_val, b_val);
+ }
+ }};
+}
+
+// Make sure idents get transformed everywhere.
+#[test]
+fn ident_transformation() {
+ create_default_session_globals_then(|| {
+ let mut zz_visitor = ToZzIdentMutVisitor;
+ let mut krate =
+ string_to_crate("#[a] mod b {fn c (d : e, f : g) {h!(i,j,k);l;m}}".to_string());
+ zz_visitor.visit_crate(&mut krate);
+ assert_pred!(
+ matches_codepattern,
+ "matches_codepattern",
+ print_crate_items(&krate),
+ "#[zz]mod zz{fn zz(zz:zz,zz:zz){zz!(zz,zz,zz);zz;zz}}".to_string()
+ );
+ })
+}
+
+// Make sure idents get transformed even inside macro defs.
+#[test]
+fn ident_transformation_in_defs() {
+ create_default_session_globals_then(|| {
+ let mut zz_visitor = ToZzIdentMutVisitor;
+ let mut krate = string_to_crate(
+ "macro_rules! a {(b $c:expr $(d $e:token)f+ => \
+ (g $(d $d $e)+))} "
+ .to_string(),
+ );
+ zz_visitor.visit_crate(&mut krate);
+ assert_pred!(
+ matches_codepattern,
+ "matches_codepattern",
+ print_crate_items(&krate),
+ "macro_rules! zz{(zz$zz:zz$(zz $zz:zz)zz+=>(zz$(zz$zz$zz)+))}".to_string()
+ );
+ })
+}
diff --git a/compiler/rustc_expand/src/parse/tests.rs b/compiler/rustc_expand/src/parse/tests.rs
new file mode 100644
index 000000000..a3c631d33
--- /dev/null
+++ b/compiler/rustc_expand/src/parse/tests.rs
@@ -0,0 +1,358 @@
+use crate::tests::{matches_codepattern, string_to_stream, with_error_checking_parse};
+
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, Token};
+use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree};
+use rustc_ast::visit;
+use rustc_ast::{self as ast, PatKind};
+use rustc_ast_pretty::pprust::item_to_string;
+use rustc_errors::PResult;
+use rustc_parse::new_parser_from_source_str;
+use rustc_parse::parser::ForceCollect;
+use rustc_session::parse::ParseSess;
+use rustc_span::create_default_session_globals_then;
+use rustc_span::source_map::FilePathMapping;
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::{BytePos, FileName, Pos, Span};
+
+use std::path::PathBuf;
+
+fn sess() -> ParseSess {
+ ParseSess::new(FilePathMapping::empty())
+}
+
+/// Parses an item.
+///
+/// Returns `Ok(Some(item))` when successful, `Ok(None)` when no item was found, and `Err`
+/// when a syntax error occurred.
+fn parse_item_from_source_str(
+ name: FileName,
+ source: String,
+ sess: &ParseSess,
+) -> PResult<'_, Option<P<ast::Item>>> {
+ new_parser_from_source_str(sess, name, source).parse_item(ForceCollect::No)
+}
+
+// Produces a `rustc_span::span`.
+fn sp(a: u32, b: u32) -> Span {
+ Span::with_root_ctxt(BytePos(a), BytePos(b))
+}
+
+/// Parses a string, return an expression.
+fn string_to_expr(source_str: String) -> P<ast::Expr> {
+ with_error_checking_parse(source_str, &sess(), |p| p.parse_expr())
+}
+
+/// Parses a string, returns an item.
+fn string_to_item(source_str: String) -> Option<P<ast::Item>> {
+ with_error_checking_parse(source_str, &sess(), |p| p.parse_item(ForceCollect::No))
+}
+
+#[should_panic]
+#[test]
+fn bad_path_expr_1() {
+ create_default_session_globals_then(|| {
+ string_to_expr("::abc::def::return".to_string());
+ })
+}
+
+// Checks the token-tree-ization of macros.
+#[test]
+fn string_to_tts_macro() {
+ create_default_session_globals_then(|| {
+ let tts: Vec<_> =
+ string_to_stream("macro_rules! zip (($a)=>($a))".to_string()).into_trees().collect();
+ let tts: &[TokenTree] = &tts[..];
+
+ match tts {
+ [
+ TokenTree::Token(Token { kind: token::Ident(name_macro_rules, false), .. }, _),
+ TokenTree::Token(Token { kind: token::Not, .. }, _),
+ TokenTree::Token(Token { kind: token::Ident(name_zip, false), .. }, _),
+ TokenTree::Delimited(_, macro_delim, macro_tts),
+ ] if name_macro_rules == &kw::MacroRules && name_zip.as_str() == "zip" => {
+ let tts = &macro_tts.trees().collect::<Vec<_>>();
+ match &tts[..] {
+ [
+ TokenTree::Delimited(_, first_delim, first_tts),
+ TokenTree::Token(Token { kind: token::FatArrow, .. }, _),
+ TokenTree::Delimited(_, second_delim, second_tts),
+ ] if macro_delim == &Delimiter::Parenthesis => {
+ let tts = &first_tts.trees().collect::<Vec<_>>();
+ match &tts[..] {
+ [
+ TokenTree::Token(Token { kind: token::Dollar, .. }, _),
+ TokenTree::Token(Token { kind: token::Ident(name, false), .. }, _),
+ ] if first_delim == &Delimiter::Parenthesis && name.as_str() == "a" => {
+ }
+ _ => panic!("value 3: {:?} {:?}", first_delim, first_tts),
+ }
+ let tts = &second_tts.trees().collect::<Vec<_>>();
+ match &tts[..] {
+ [
+ TokenTree::Token(Token { kind: token::Dollar, .. }, _),
+ TokenTree::Token(Token { kind: token::Ident(name, false), .. }, _),
+ ] if second_delim == &Delimiter::Parenthesis
+ && name.as_str() == "a" => {}
+ _ => panic!("value 4: {:?} {:?}", second_delim, second_tts),
+ }
+ }
+ _ => panic!("value 2: {:?} {:?}", macro_delim, macro_tts),
+ }
+ }
+ _ => panic!("value: {:?}", tts),
+ }
+ })
+}
+
+#[test]
+fn string_to_tts_1() {
+ create_default_session_globals_then(|| {
+ let tts = string_to_stream("fn a (b : i32) { b; }".to_string());
+
+ let expected = TokenStream::new(vec![
+ TokenTree::token_alone(token::Ident(kw::Fn, false), sp(0, 2)),
+ TokenTree::token_alone(token::Ident(Symbol::intern("a"), false), sp(3, 4)),
+ TokenTree::Delimited(
+ DelimSpan::from_pair(sp(5, 6), sp(13, 14)),
+ Delimiter::Parenthesis,
+ TokenStream::new(vec![
+ TokenTree::token_alone(token::Ident(Symbol::intern("b"), false), sp(6, 7)),
+ TokenTree::token_alone(token::Colon, sp(8, 9)),
+ TokenTree::token_alone(token::Ident(sym::i32, false), sp(10, 13)),
+ ])
+ .into(),
+ ),
+ TokenTree::Delimited(
+ DelimSpan::from_pair(sp(15, 16), sp(20, 21)),
+ Delimiter::Brace,
+ TokenStream::new(vec![
+ TokenTree::token_joint(token::Ident(Symbol::intern("b"), false), sp(17, 18)),
+ TokenTree::token_alone(token::Semi, sp(18, 19)),
+ ])
+ .into(),
+ ),
+ ]);
+
+ assert_eq!(tts, expected);
+ })
+}
+
+#[test]
+fn parse_use() {
+ create_default_session_globals_then(|| {
+ let use_s = "use foo::bar::baz;";
+ let vitem = string_to_item(use_s.to_string()).unwrap();
+ let vitem_s = item_to_string(&vitem);
+ assert_eq!(&vitem_s[..], use_s);
+
+ let use_s = "use foo::bar as baz;";
+ let vitem = string_to_item(use_s.to_string()).unwrap();
+ let vitem_s = item_to_string(&vitem);
+ assert_eq!(&vitem_s[..], use_s);
+ })
+}
+
+#[test]
+fn parse_extern_crate() {
+ create_default_session_globals_then(|| {
+ let ex_s = "extern crate foo;";
+ let vitem = string_to_item(ex_s.to_string()).unwrap();
+ let vitem_s = item_to_string(&vitem);
+ assert_eq!(&vitem_s[..], ex_s);
+
+ let ex_s = "extern crate foo as bar;";
+ let vitem = string_to_item(ex_s.to_string()).unwrap();
+ let vitem_s = item_to_string(&vitem);
+ assert_eq!(&vitem_s[..], ex_s);
+ })
+}
+
+fn get_spans_of_pat_idents(src: &str) -> Vec<Span> {
+ let item = string_to_item(src.to_string()).unwrap();
+
+ struct PatIdentVisitor {
+ spans: Vec<Span>,
+ }
+ impl<'a> visit::Visitor<'a> for PatIdentVisitor {
+ fn visit_pat(&mut self, p: &'a ast::Pat) {
+ match p.kind {
+ PatKind::Ident(_, ref ident, _) => {
+ self.spans.push(ident.span.clone());
+ }
+ _ => {
+ visit::walk_pat(self, p);
+ }
+ }
+ }
+ }
+ let mut v = PatIdentVisitor { spans: Vec::new() };
+ visit::walk_item(&mut v, &item);
+ return v.spans;
+}
+
+#[test]
+fn span_of_self_arg_pat_idents_are_correct() {
+ create_default_session_globals_then(|| {
+ let srcs = [
+ "impl z { fn a (&self, &myarg: i32) {} }",
+ "impl z { fn a (&mut self, &myarg: i32) {} }",
+ "impl z { fn a (&'a self, &myarg: i32) {} }",
+ "impl z { fn a (self, &myarg: i32) {} }",
+ "impl z { fn a (self: Foo, &myarg: i32) {} }",
+ ];
+
+ for src in srcs {
+ let spans = get_spans_of_pat_idents(src);
+ let (lo, hi) = (spans[0].lo(), spans[0].hi());
+ assert!(
+ "self" == &src[lo.to_usize()..hi.to_usize()],
+ "\"{}\" != \"self\". src=\"{}\"",
+ &src[lo.to_usize()..hi.to_usize()],
+ src
+ )
+ }
+ })
+}
+
+#[test]
+fn parse_exprs() {
+ create_default_session_globals_then(|| {
+ // just make sure that they parse....
+ string_to_expr("3 + 4".to_string());
+ string_to_expr("a::z.froob(b,&(987+3))".to_string());
+ })
+}
+
+#[test]
+fn attrs_fix_bug() {
+ create_default_session_globals_then(|| {
+ string_to_item(
+ "pub fn mk_file_writer(path: &Path, flags: &[FileFlag])
+ -> Result<Box<Writer>, String> {
+#[cfg(windows)]
+fn wb() -> c_int {
+ (O_WRONLY | libc::consts::os::extra::O_BINARY) as c_int
+}
+
+#[cfg(unix)]
+fn wb() -> c_int { O_WRONLY as c_int }
+
+let mut fflags: c_int = wb();
+}"
+ .to_string(),
+ );
+ })
+}
+
+#[test]
+fn crlf_doc_comments() {
+ create_default_session_globals_then(|| {
+ let sess = sess();
+
+ let name_1 = FileName::Custom("crlf_source_1".to_string());
+ let source = "/// doc comment\r\nfn foo() {}".to_string();
+ let item = parse_item_from_source_str(name_1, source, &sess).unwrap().unwrap();
+ let doc = item.attrs.iter().filter_map(|at| at.doc_str()).next().unwrap();
+ assert_eq!(doc.as_str(), " doc comment");
+
+ let name_2 = FileName::Custom("crlf_source_2".to_string());
+ let source = "/// doc comment\r\n/// line 2\r\nfn foo() {}".to_string();
+ let item = parse_item_from_source_str(name_2, source, &sess).unwrap().unwrap();
+ let docs = item.attrs.iter().filter_map(|at| at.doc_str()).collect::<Vec<_>>();
+ let b: &[_] = &[Symbol::intern(" doc comment"), Symbol::intern(" line 2")];
+ assert_eq!(&docs[..], b);
+
+ let name_3 = FileName::Custom("clrf_source_3".to_string());
+ let source = "/** doc comment\r\n * with CRLF */\r\nfn foo() {}".to_string();
+ let item = parse_item_from_source_str(name_3, source, &sess).unwrap().unwrap();
+ let doc = item.attrs.iter().filter_map(|at| at.doc_str()).next().unwrap();
+ assert_eq!(doc.as_str(), " doc comment\n * with CRLF ");
+ });
+}
+
+#[test]
+fn ttdelim_span() {
+ fn parse_expr_from_source_str(
+ name: FileName,
+ source: String,
+ sess: &ParseSess,
+ ) -> PResult<'_, P<ast::Expr>> {
+ new_parser_from_source_str(sess, name, source).parse_expr()
+ }
+
+ create_default_session_globals_then(|| {
+ let sess = sess();
+ let expr = parse_expr_from_source_str(
+ PathBuf::from("foo").into(),
+ "foo!( fn main() { body } )".to_string(),
+ &sess,
+ )
+ .unwrap();
+
+ let tts: Vec<_> = match expr.kind {
+ ast::ExprKind::MacCall(ref mac) => mac.args.inner_tokens().into_trees().collect(),
+ _ => panic!("not a macro"),
+ };
+
+ let span = tts.iter().rev().next().unwrap().span();
+
+ match sess.source_map().span_to_snippet(span) {
+ Ok(s) => assert_eq!(&s[..], "{ body }"),
+ Err(_) => panic!("could not get snippet"),
+ }
+ });
+}
+
+// This tests that when parsing a string (rather than a file) we don't try
+// and read in a file for a module declaration and just parse a stub.
+// See `recurse_into_file_modules` in the parser.
+#[test]
+fn out_of_line_mod() {
+ create_default_session_globals_then(|| {
+ let item = parse_item_from_source_str(
+ PathBuf::from("foo").into(),
+ "mod foo { struct S; mod this_does_not_exist; }".to_owned(),
+ &sess(),
+ )
+ .unwrap()
+ .unwrap();
+
+ if let ast::ItemKind::Mod(_, ref mod_kind) = item.kind {
+ assert!(matches!(mod_kind, ast::ModKind::Loaded(items, ..) if items.len() == 2));
+ } else {
+ panic!();
+ }
+ });
+}
+
+#[test]
+fn eqmodws() {
+ assert_eq!(matches_codepattern("", ""), true);
+ assert_eq!(matches_codepattern("", "a"), false);
+ assert_eq!(matches_codepattern("a", ""), false);
+ assert_eq!(matches_codepattern("a", "a"), true);
+ assert_eq!(matches_codepattern("a b", "a \n\t\r b"), true);
+ assert_eq!(matches_codepattern("a b ", "a \n\t\r b"), true);
+ assert_eq!(matches_codepattern("a b", "a \n\t\r b "), false);
+ assert_eq!(matches_codepattern("a b", "a b"), true);
+ assert_eq!(matches_codepattern("ab", "a b"), false);
+ assert_eq!(matches_codepattern("a b", "ab"), true);
+ assert_eq!(matches_codepattern(" a b", "ab"), true);
+}
+
+#[test]
+fn pattern_whitespace() {
+ assert_eq!(matches_codepattern("", "\x0C"), false);
+ assert_eq!(matches_codepattern("a b ", "a \u{0085}\n\t\r b"), true);
+ assert_eq!(matches_codepattern("a b", "a \u{0085}\n\t\r b "), false);
+}
+
+#[test]
+fn non_pattern_whitespace() {
+ // These have the property 'White_Space' but not 'Pattern_White_Space'
+ assert_eq!(matches_codepattern("a b", "a\u{2002}b"), false);
+ assert_eq!(matches_codepattern("a b", "a\u{2002}b"), false);
+ assert_eq!(matches_codepattern("\u{205F}a b", "ab"), false);
+ assert_eq!(matches_codepattern("a \u{3000}b", "ab"), false);
+}
diff --git a/compiler/rustc_expand/src/placeholders.rs b/compiler/rustc_expand/src/placeholders.rs
new file mode 100644
index 000000000..0d5d6ee07
--- /dev/null
+++ b/compiler/rustc_expand/src/placeholders.rs
@@ -0,0 +1,373 @@
+use crate::expand::{AstFragment, AstFragmentKind};
+
+use rustc_ast as ast;
+use rustc_ast::mut_visit::*;
+use rustc_ast::ptr::P;
+use rustc_span::source_map::DUMMY_SP;
+use rustc_span::symbol::Ident;
+
+use smallvec::{smallvec, SmallVec};
+
+use rustc_data_structures::fx::FxHashMap;
+
+pub fn placeholder(
+ kind: AstFragmentKind,
+ id: ast::NodeId,
+ vis: Option<ast::Visibility>,
+) -> AstFragment {
+ fn mac_placeholder() -> ast::MacCall {
+ ast::MacCall {
+ path: ast::Path { span: DUMMY_SP, segments: Vec::new(), tokens: None },
+ args: P(ast::MacArgs::Empty),
+ prior_type_ascription: None,
+ }
+ }
+
+ let ident = Ident::empty();
+ let attrs = Vec::new();
+ let vis = vis.unwrap_or(ast::Visibility {
+ span: DUMMY_SP,
+ kind: ast::VisibilityKind::Inherited,
+ tokens: None,
+ });
+ let span = DUMMY_SP;
+ let expr_placeholder = || {
+ P(ast::Expr {
+ id,
+ span,
+ attrs: ast::AttrVec::new(),
+ kind: ast::ExprKind::MacCall(mac_placeholder()),
+ tokens: None,
+ })
+ };
+ let ty =
+ || P(ast::Ty { id, kind: ast::TyKind::MacCall(mac_placeholder()), span, tokens: None });
+ let pat =
+ || P(ast::Pat { id, kind: ast::PatKind::MacCall(mac_placeholder()), span, tokens: None });
+
+ match kind {
+ AstFragmentKind::Crate => AstFragment::Crate(ast::Crate {
+ attrs: Default::default(),
+ items: Default::default(),
+ spans: ast::ModSpans { inner_span: span, ..Default::default() },
+ id,
+ is_placeholder: true,
+ }),
+ AstFragmentKind::Expr => AstFragment::Expr(expr_placeholder()),
+ AstFragmentKind::OptExpr => AstFragment::OptExpr(Some(expr_placeholder())),
+ AstFragmentKind::Items => AstFragment::Items(smallvec![P(ast::Item {
+ id,
+ span,
+ ident,
+ vis,
+ attrs,
+ kind: ast::ItemKind::MacCall(mac_placeholder()),
+ tokens: None,
+ })]),
+ AstFragmentKind::TraitItems => AstFragment::TraitItems(smallvec![P(ast::AssocItem {
+ id,
+ span,
+ ident,
+ vis,
+ attrs,
+ kind: ast::AssocItemKind::MacCall(mac_placeholder()),
+ tokens: None,
+ })]),
+ AstFragmentKind::ImplItems => AstFragment::ImplItems(smallvec![P(ast::AssocItem {
+ id,
+ span,
+ ident,
+ vis,
+ attrs,
+ kind: ast::AssocItemKind::MacCall(mac_placeholder()),
+ tokens: None,
+ })]),
+ AstFragmentKind::ForeignItems => {
+ AstFragment::ForeignItems(smallvec![P(ast::ForeignItem {
+ id,
+ span,
+ ident,
+ vis,
+ attrs,
+ kind: ast::ForeignItemKind::MacCall(mac_placeholder()),
+ tokens: None,
+ })])
+ }
+ AstFragmentKind::Pat => AstFragment::Pat(P(ast::Pat {
+ id,
+ span,
+ kind: ast::PatKind::MacCall(mac_placeholder()),
+ tokens: None,
+ })),
+ AstFragmentKind::Ty => AstFragment::Ty(P(ast::Ty {
+ id,
+ span,
+ kind: ast::TyKind::MacCall(mac_placeholder()),
+ tokens: None,
+ })),
+ AstFragmentKind::Stmts => AstFragment::Stmts(smallvec![{
+ let mac = P(ast::MacCallStmt {
+ mac: mac_placeholder(),
+ style: ast::MacStmtStyle::Braces,
+ attrs: ast::AttrVec::new(),
+ tokens: None,
+ });
+ ast::Stmt { id, span, kind: ast::StmtKind::MacCall(mac) }
+ }]),
+ AstFragmentKind::Arms => AstFragment::Arms(smallvec![ast::Arm {
+ attrs: Default::default(),
+ body: expr_placeholder(),
+ guard: None,
+ id,
+ pat: pat(),
+ span,
+ is_placeholder: true,
+ }]),
+ AstFragmentKind::ExprFields => AstFragment::ExprFields(smallvec![ast::ExprField {
+ attrs: Default::default(),
+ expr: expr_placeholder(),
+ id,
+ ident,
+ is_shorthand: false,
+ span,
+ is_placeholder: true,
+ }]),
+ AstFragmentKind::PatFields => AstFragment::PatFields(smallvec![ast::PatField {
+ attrs: Default::default(),
+ id,
+ ident,
+ is_shorthand: false,
+ pat: pat(),
+ span,
+ is_placeholder: true,
+ }]),
+ AstFragmentKind::GenericParams => AstFragment::GenericParams(smallvec![{
+ ast::GenericParam {
+ attrs: Default::default(),
+ bounds: Default::default(),
+ id,
+ ident,
+ is_placeholder: true,
+ kind: ast::GenericParamKind::Lifetime,
+ colon_span: None,
+ }
+ }]),
+ AstFragmentKind::Params => AstFragment::Params(smallvec![ast::Param {
+ attrs: Default::default(),
+ id,
+ pat: pat(),
+ span,
+ ty: ty(),
+ is_placeholder: true,
+ }]),
+ AstFragmentKind::FieldDefs => AstFragment::FieldDefs(smallvec![ast::FieldDef {
+ attrs: Default::default(),
+ id,
+ ident: None,
+ span,
+ ty: ty(),
+ vis,
+ is_placeholder: true,
+ }]),
+ AstFragmentKind::Variants => AstFragment::Variants(smallvec![ast::Variant {
+ attrs: Default::default(),
+ data: ast::VariantData::Struct(Default::default(), false),
+ disr_expr: None,
+ id,
+ ident,
+ span,
+ vis,
+ is_placeholder: true,
+ }]),
+ }
+}
+
+#[derive(Default)]
+pub struct PlaceholderExpander {
+ expanded_fragments: FxHashMap<ast::NodeId, AstFragment>,
+}
+
+impl PlaceholderExpander {
+ pub fn add(&mut self, id: ast::NodeId, mut fragment: AstFragment) {
+ fragment.mut_visit_with(self);
+ self.expanded_fragments.insert(id, fragment);
+ }
+
+ fn remove(&mut self, id: ast::NodeId) -> AstFragment {
+ self.expanded_fragments.remove(&id).unwrap()
+ }
+}
+
+impl MutVisitor for PlaceholderExpander {
+ fn flat_map_arm(&mut self, arm: ast::Arm) -> SmallVec<[ast::Arm; 1]> {
+ if arm.is_placeholder {
+ self.remove(arm.id).make_arms()
+ } else {
+ noop_flat_map_arm(arm, self)
+ }
+ }
+
+ fn flat_map_expr_field(&mut self, field: ast::ExprField) -> SmallVec<[ast::ExprField; 1]> {
+ if field.is_placeholder {
+ self.remove(field.id).make_expr_fields()
+ } else {
+ noop_flat_map_expr_field(field, self)
+ }
+ }
+
+ fn flat_map_pat_field(&mut self, fp: ast::PatField) -> SmallVec<[ast::PatField; 1]> {
+ if fp.is_placeholder {
+ self.remove(fp.id).make_pat_fields()
+ } else {
+ noop_flat_map_pat_field(fp, self)
+ }
+ }
+
+ fn flat_map_generic_param(
+ &mut self,
+ param: ast::GenericParam,
+ ) -> SmallVec<[ast::GenericParam; 1]> {
+ if param.is_placeholder {
+ self.remove(param.id).make_generic_params()
+ } else {
+ noop_flat_map_generic_param(param, self)
+ }
+ }
+
+ fn flat_map_param(&mut self, p: ast::Param) -> SmallVec<[ast::Param; 1]> {
+ if p.is_placeholder {
+ self.remove(p.id).make_params()
+ } else {
+ noop_flat_map_param(p, self)
+ }
+ }
+
+ fn flat_map_field_def(&mut self, sf: ast::FieldDef) -> SmallVec<[ast::FieldDef; 1]> {
+ if sf.is_placeholder {
+ self.remove(sf.id).make_field_defs()
+ } else {
+ noop_flat_map_field_def(sf, self)
+ }
+ }
+
+ fn flat_map_variant(&mut self, variant: ast::Variant) -> SmallVec<[ast::Variant; 1]> {
+ if variant.is_placeholder {
+ self.remove(variant.id).make_variants()
+ } else {
+ noop_flat_map_variant(variant, self)
+ }
+ }
+
+ fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> {
+ match item.kind {
+ ast::ItemKind::MacCall(_) => self.remove(item.id).make_items(),
+ _ => noop_flat_map_item(item, self),
+ }
+ }
+
+ fn flat_map_trait_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
+ match item.kind {
+ ast::AssocItemKind::MacCall(_) => self.remove(item.id).make_trait_items(),
+ _ => noop_flat_map_assoc_item(item, self),
+ }
+ }
+
+ fn flat_map_impl_item(&mut self, item: P<ast::AssocItem>) -> SmallVec<[P<ast::AssocItem>; 1]> {
+ match item.kind {
+ ast::AssocItemKind::MacCall(_) => self.remove(item.id).make_impl_items(),
+ _ => noop_flat_map_assoc_item(item, self),
+ }
+ }
+
+ fn flat_map_foreign_item(
+ &mut self,
+ item: P<ast::ForeignItem>,
+ ) -> SmallVec<[P<ast::ForeignItem>; 1]> {
+ match item.kind {
+ ast::ForeignItemKind::MacCall(_) => self.remove(item.id).make_foreign_items(),
+ _ => noop_flat_map_foreign_item(item, self),
+ }
+ }
+
+ fn visit_expr(&mut self, expr: &mut P<ast::Expr>) {
+ match expr.kind {
+ ast::ExprKind::MacCall(_) => *expr = self.remove(expr.id).make_expr(),
+ _ => noop_visit_expr(expr, self),
+ }
+ }
+
+ fn filter_map_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
+ match expr.kind {
+ ast::ExprKind::MacCall(_) => self.remove(expr.id).make_opt_expr(),
+ _ => noop_filter_map_expr(expr, self),
+ }
+ }
+
+ fn flat_map_stmt(&mut self, stmt: ast::Stmt) -> SmallVec<[ast::Stmt; 1]> {
+ let (style, mut stmts) = match stmt.kind {
+ ast::StmtKind::MacCall(mac) => (mac.style, self.remove(stmt.id).make_stmts()),
+ _ => return noop_flat_map_stmt(stmt, self),
+ };
+
+ if style == ast::MacStmtStyle::Semicolon {
+ // Implement the proposal described in
+ // https://github.com/rust-lang/rust/issues/61733#issuecomment-509626449
+ //
+ // The macro invocation expands to the list of statements. If the
+ // list of statements is empty, then 'parse' the trailing semicolon
+ // on the original invocation as an empty statement. That is:
+ //
+ // `empty();` is parsed as a single `StmtKind::Empty`
+ //
+ // If the list of statements is non-empty, see if the final
+ // statement already has a trailing semicolon.
+ //
+ // If it doesn't have a semicolon, then 'parse' the trailing
+ // semicolon from the invocation as part of the final statement,
+ // using `stmt.add_trailing_semicolon()`
+ //
+ // If it does have a semicolon, then 'parse' the trailing semicolon
+ // from the invocation as a new StmtKind::Empty
+
+ // FIXME: We will need to preserve the original semicolon token and
+ // span as part of #15701
+ let empty_stmt =
+ ast::Stmt { id: ast::DUMMY_NODE_ID, kind: ast::StmtKind::Empty, span: DUMMY_SP };
+
+ if let Some(stmt) = stmts.pop() {
+ if stmt.has_trailing_semicolon() {
+ stmts.push(stmt);
+ stmts.push(empty_stmt);
+ } else {
+ stmts.push(stmt.add_trailing_semicolon());
+ }
+ } else {
+ stmts.push(empty_stmt);
+ }
+ }
+
+ stmts
+ }
+
+ fn visit_pat(&mut self, pat: &mut P<ast::Pat>) {
+ match pat.kind {
+ ast::PatKind::MacCall(_) => *pat = self.remove(pat.id).make_pat(),
+ _ => noop_visit_pat(pat, self),
+ }
+ }
+
+ fn visit_ty(&mut self, ty: &mut P<ast::Ty>) {
+ match ty.kind {
+ ast::TyKind::MacCall(_) => *ty = self.remove(ty.id).make_ty(),
+ _ => noop_visit_ty(ty, self),
+ }
+ }
+
+ fn visit_crate(&mut self, krate: &mut ast::Crate) {
+ if krate.is_placeholder {
+ *krate = self.remove(krate.id).make_crate();
+ } else {
+ noop_visit_crate(krate, self)
+ }
+ }
+}
diff --git a/compiler/rustc_expand/src/proc_macro.rs b/compiler/rustc_expand/src/proc_macro.rs
new file mode 100644
index 000000000..1a2ab9d19
--- /dev/null
+++ b/compiler/rustc_expand/src/proc_macro.rs
@@ -0,0 +1,181 @@
+use crate::base::{self, *};
+use crate::proc_macro_server;
+
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_ast::token;
+use rustc_ast::tokenstream::TokenStream;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::ErrorGuaranteed;
+use rustc_parse::parser::ForceCollect;
+use rustc_session::config::ProcMacroExecutionStrategy;
+use rustc_span::profiling::SpannedEventArgRecorder;
+use rustc_span::{Span, DUMMY_SP};
+
+struct CrossbeamMessagePipe<T> {
+ tx: crossbeam_channel::Sender<T>,
+ rx: crossbeam_channel::Receiver<T>,
+}
+
+impl<T> pm::bridge::server::MessagePipe<T> for CrossbeamMessagePipe<T> {
+ fn new() -> (Self, Self) {
+ let (tx1, rx1) = crossbeam_channel::bounded(1);
+ let (tx2, rx2) = crossbeam_channel::bounded(1);
+ (CrossbeamMessagePipe { tx: tx1, rx: rx2 }, CrossbeamMessagePipe { tx: tx2, rx: rx1 })
+ }
+
+ fn send(&mut self, value: T) {
+ self.tx.send(value).unwrap();
+ }
+
+ fn recv(&mut self) -> Option<T> {
+ self.rx.recv().ok()
+ }
+}
+
+fn exec_strategy(ecx: &ExtCtxt<'_>) -> impl pm::bridge::server::ExecutionStrategy {
+ pm::bridge::server::MaybeCrossThread::<CrossbeamMessagePipe<_>>::new(
+ ecx.sess.opts.unstable_opts.proc_macro_execution_strategy
+ == ProcMacroExecutionStrategy::CrossThread,
+ )
+}
+
+pub struct BangProcMacro {
+ pub client: pm::bridge::client::Client<pm::TokenStream, pm::TokenStream>,
+}
+
+impl base::BangProcMacro for BangProcMacro {
+ fn expand<'cx>(
+ &self,
+ ecx: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ input: TokenStream,
+ ) -> Result<TokenStream, ErrorGuaranteed> {
+ let _timer =
+ ecx.sess.prof.generic_activity_with_arg_recorder("expand_proc_macro", |recorder| {
+ recorder.record_arg_with_span(ecx.expansion_descr(), span);
+ });
+
+ let proc_macro_backtrace = ecx.ecfg.proc_macro_backtrace;
+ let strategy = exec_strategy(ecx);
+ let server = proc_macro_server::Rustc::new(ecx);
+ self.client.run(&strategy, server, input, proc_macro_backtrace).map_err(|e| {
+ let mut err = ecx.struct_span_err(span, "proc macro panicked");
+ if let Some(s) = e.as_str() {
+ err.help(&format!("message: {}", s));
+ }
+ err.emit()
+ })
+ }
+}
+
+pub struct AttrProcMacro {
+ pub client: pm::bridge::client::Client<(pm::TokenStream, pm::TokenStream), pm::TokenStream>,
+}
+
+impl base::AttrProcMacro for AttrProcMacro {
+ fn expand<'cx>(
+ &self,
+ ecx: &'cx mut ExtCtxt<'_>,
+ span: Span,
+ annotation: TokenStream,
+ annotated: TokenStream,
+ ) -> Result<TokenStream, ErrorGuaranteed> {
+ let _timer =
+ ecx.sess.prof.generic_activity_with_arg_recorder("expand_proc_macro", |recorder| {
+ recorder.record_arg_with_span(ecx.expansion_descr(), span);
+ });
+
+ let proc_macro_backtrace = ecx.ecfg.proc_macro_backtrace;
+ let strategy = exec_strategy(ecx);
+ let server = proc_macro_server::Rustc::new(ecx);
+ self.client.run(&strategy, server, annotation, annotated, proc_macro_backtrace).map_err(
+ |e| {
+ let mut err = ecx.struct_span_err(span, "custom attribute panicked");
+ if let Some(s) = e.as_str() {
+ err.help(&format!("message: {}", s));
+ }
+ err.emit()
+ },
+ )
+ }
+}
+
+pub struct DeriveProcMacro {
+ pub client: pm::bridge::client::Client<pm::TokenStream, pm::TokenStream>,
+}
+
+impl MultiItemModifier for DeriveProcMacro {
+ fn expand(
+ &self,
+ ecx: &mut ExtCtxt<'_>,
+ span: Span,
+ _meta_item: &ast::MetaItem,
+ item: Annotatable,
+ ) -> ExpandResult<Vec<Annotatable>, Annotatable> {
+ // We need special handling for statement items
+ // (e.g. `fn foo() { #[derive(Debug)] struct Bar; }`)
+ let is_stmt = matches!(item, Annotatable::Stmt(..));
+ let hack = crate::base::ann_pretty_printing_compatibility_hack(&item, &ecx.sess.parse_sess);
+ let input = if hack {
+ let nt = match item {
+ Annotatable::Item(item) => token::NtItem(item),
+ Annotatable::Stmt(stmt) => token::NtStmt(stmt),
+ _ => unreachable!(),
+ };
+ TokenStream::token_alone(token::Interpolated(Lrc::new(nt)), DUMMY_SP)
+ } else {
+ item.to_tokens()
+ };
+
+ let stream = {
+ let _timer =
+ ecx.sess.prof.generic_activity_with_arg_recorder("expand_proc_macro", |recorder| {
+ recorder.record_arg_with_span(ecx.expansion_descr(), span);
+ });
+ let proc_macro_backtrace = ecx.ecfg.proc_macro_backtrace;
+ let strategy = exec_strategy(ecx);
+ let server = proc_macro_server::Rustc::new(ecx);
+ match self.client.run(&strategy, server, input, proc_macro_backtrace) {
+ Ok(stream) => stream,
+ Err(e) => {
+ let mut err = ecx.struct_span_err(span, "proc-macro derive panicked");
+ if let Some(s) = e.as_str() {
+ err.help(&format!("message: {}", s));
+ }
+ err.emit();
+ return ExpandResult::Ready(vec![]);
+ }
+ }
+ };
+
+ let error_count_before = ecx.sess.parse_sess.span_diagnostic.err_count();
+ let mut parser =
+ rustc_parse::stream_to_parser(&ecx.sess.parse_sess, stream, Some("proc-macro derive"));
+ let mut items = vec![];
+
+ loop {
+ match parser.parse_item(ForceCollect::No) {
+ Ok(None) => break,
+ Ok(Some(item)) => {
+ if is_stmt {
+ items.push(Annotatable::Stmt(P(ecx.stmt_item(span, item))));
+ } else {
+ items.push(Annotatable::Item(item));
+ }
+ }
+ Err(mut err) => {
+ err.emit();
+ break;
+ }
+ }
+ }
+
+ // fail if there have been errors emitted
+ if ecx.sess.parse_sess.span_diagnostic.err_count() > error_count_before {
+ ecx.struct_span_err(span, "proc-macro derive produced unparseable tokens").emit();
+ }
+
+ ExpandResult::Ready(items)
+ }
+}
diff --git a/compiler/rustc_expand/src/proc_macro_server.rs b/compiler/rustc_expand/src/proc_macro_server.rs
new file mode 100644
index 000000000..7d9a4aed0
--- /dev/null
+++ b/compiler/rustc_expand/src/proc_macro_server.rs
@@ -0,0 +1,766 @@
+use crate::base::ExtCtxt;
+
+use rustc_ast as ast;
+use rustc_ast::token;
+use rustc_ast::tokenstream::{self, Spacing::*, TokenStream};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Diagnostic, MultiSpan, PResult};
+use rustc_parse::lexer::nfc_normalize;
+use rustc_parse::parse_stream_from_source_str;
+use rustc_session::parse::ParseSess;
+use rustc_span::def_id::CrateNum;
+use rustc_span::symbol::{self, sym, Symbol};
+use rustc_span::{BytePos, FileName, Pos, SourceFile, Span};
+
+use pm::bridge::{
+ server, DelimSpan, ExpnGlobals, Group, Ident, LitKind, Literal, Punct, TokenTree,
+};
+use pm::{Delimiter, Level, LineColumn};
+use std::ops::Bound;
+
+trait FromInternal<T> {
+ fn from_internal(x: T) -> Self;
+}
+
+trait ToInternal<T> {
+ fn to_internal(self) -> T;
+}
+
+impl FromInternal<token::Delimiter> for Delimiter {
+ fn from_internal(delim: token::Delimiter) -> Delimiter {
+ match delim {
+ token::Delimiter::Parenthesis => Delimiter::Parenthesis,
+ token::Delimiter::Brace => Delimiter::Brace,
+ token::Delimiter::Bracket => Delimiter::Bracket,
+ token::Delimiter::Invisible => Delimiter::None,
+ }
+ }
+}
+
+impl ToInternal<token::Delimiter> for Delimiter {
+ fn to_internal(self) -> token::Delimiter {
+ match self {
+ Delimiter::Parenthesis => token::Delimiter::Parenthesis,
+ Delimiter::Brace => token::Delimiter::Brace,
+ Delimiter::Bracket => token::Delimiter::Bracket,
+ Delimiter::None => token::Delimiter::Invisible,
+ }
+ }
+}
+
+impl FromInternal<token::LitKind> for LitKind {
+ fn from_internal(kind: token::LitKind) -> Self {
+ match kind {
+ token::Byte => LitKind::Byte,
+ token::Char => LitKind::Char,
+ token::Integer => LitKind::Integer,
+ token::Float => LitKind::Float,
+ token::Str => LitKind::Str,
+ token::StrRaw(n) => LitKind::StrRaw(n),
+ token::ByteStr => LitKind::ByteStr,
+ token::ByteStrRaw(n) => LitKind::ByteStrRaw(n),
+ token::Err => LitKind::Err,
+ token::Bool => unreachable!(),
+ }
+ }
+}
+
+impl ToInternal<token::LitKind> for LitKind {
+ fn to_internal(self) -> token::LitKind {
+ match self {
+ LitKind::Byte => token::Byte,
+ LitKind::Char => token::Char,
+ LitKind::Integer => token::Integer,
+ LitKind::Float => token::Float,
+ LitKind::Str => token::Str,
+ LitKind::StrRaw(n) => token::StrRaw(n),
+ LitKind::ByteStr => token::ByteStr,
+ LitKind::ByteStrRaw(n) => token::ByteStrRaw(n),
+ LitKind::Err => token::Err,
+ }
+ }
+}
+
+impl FromInternal<(TokenStream, &mut Rustc<'_, '_>)> for Vec<TokenTree<TokenStream, Span, Symbol>> {
+ fn from_internal((stream, rustc): (TokenStream, &mut Rustc<'_, '_>)) -> Self {
+ use rustc_ast::token::*;
+
+ // Estimate the capacity as `stream.len()` rounded up to the next power
+ // of two to limit the number of required reallocations.
+ let mut trees = Vec::with_capacity(stream.len().next_power_of_two());
+ let mut cursor = stream.into_trees();
+
+ while let Some(tree) = cursor.next() {
+ let (Token { kind, span }, joint) = match tree {
+ tokenstream::TokenTree::Delimited(span, delim, tts) => {
+ let delimiter = pm::Delimiter::from_internal(delim);
+ trees.push(TokenTree::Group(Group {
+ delimiter,
+ stream: Some(tts),
+ span: DelimSpan {
+ open: span.open,
+ close: span.close,
+ entire: span.entire(),
+ },
+ }));
+ continue;
+ }
+ tokenstream::TokenTree::Token(token, spacing) => (token, spacing == Joint),
+ };
+
+ let mut op = |s: &str| {
+ assert!(s.is_ascii());
+ trees.extend(s.as_bytes().iter().enumerate().map(|(idx, &ch)| {
+ TokenTree::Punct(Punct { ch, joint: joint || idx != s.len() - 1, span })
+ }));
+ };
+
+ match kind {
+ Eq => op("="),
+ Lt => op("<"),
+ Le => op("<="),
+ EqEq => op("=="),
+ Ne => op("!="),
+ Ge => op(">="),
+ Gt => op(">"),
+ AndAnd => op("&&"),
+ OrOr => op("||"),
+ Not => op("!"),
+ Tilde => op("~"),
+ BinOp(Plus) => op("+"),
+ BinOp(Minus) => op("-"),
+ BinOp(Star) => op("*"),
+ BinOp(Slash) => op("/"),
+ BinOp(Percent) => op("%"),
+ BinOp(Caret) => op("^"),
+ BinOp(And) => op("&"),
+ BinOp(Or) => op("|"),
+ BinOp(Shl) => op("<<"),
+ BinOp(Shr) => op(">>"),
+ BinOpEq(Plus) => op("+="),
+ BinOpEq(Minus) => op("-="),
+ BinOpEq(Star) => op("*="),
+ BinOpEq(Slash) => op("/="),
+ BinOpEq(Percent) => op("%="),
+ BinOpEq(Caret) => op("^="),
+ BinOpEq(And) => op("&="),
+ BinOpEq(Or) => op("|="),
+ BinOpEq(Shl) => op("<<="),
+ BinOpEq(Shr) => op(">>="),
+ At => op("@"),
+ Dot => op("."),
+ DotDot => op(".."),
+ DotDotDot => op("..."),
+ DotDotEq => op("..="),
+ Comma => op(","),
+ Semi => op(";"),
+ Colon => op(":"),
+ ModSep => op("::"),
+ RArrow => op("->"),
+ LArrow => op("<-"),
+ FatArrow => op("=>"),
+ Pound => op("#"),
+ Dollar => op("$"),
+ Question => op("?"),
+ SingleQuote => op("'"),
+
+ Ident(sym, is_raw) => trees.push(TokenTree::Ident(Ident { sym, is_raw, span })),
+ Lifetime(name) => {
+ let ident = symbol::Ident::new(name, span).without_first_quote();
+ trees.extend([
+ TokenTree::Punct(Punct { ch: b'\'', joint: true, span }),
+ TokenTree::Ident(Ident { sym: ident.name, is_raw: false, span }),
+ ]);
+ }
+ Literal(token::Lit { kind, symbol, suffix }) => {
+ trees.push(TokenTree::Literal(self::Literal {
+ kind: FromInternal::from_internal(kind),
+ symbol,
+ suffix,
+ span,
+ }));
+ }
+ DocComment(_, attr_style, data) => {
+ let mut escaped = String::new();
+ for ch in data.as_str().chars() {
+ escaped.extend(ch.escape_debug());
+ }
+ let stream = [
+ Ident(sym::doc, false),
+ Eq,
+ TokenKind::lit(token::Str, Symbol::intern(&escaped), None),
+ ]
+ .into_iter()
+ .map(|kind| tokenstream::TokenTree::token_alone(kind, span))
+ .collect();
+ trees.push(TokenTree::Punct(Punct { ch: b'#', joint: false, span }));
+ if attr_style == ast::AttrStyle::Inner {
+ trees.push(TokenTree::Punct(Punct { ch: b'!', joint: false, span }));
+ }
+ trees.push(TokenTree::Group(Group {
+ delimiter: pm::Delimiter::Bracket,
+ stream: Some(stream),
+ span: DelimSpan::from_single(span),
+ }));
+ }
+
+ Interpolated(nt) if let NtIdent(ident, is_raw) = *nt => {
+ trees.push(TokenTree::Ident(Ident { sym: ident.name, is_raw, span: ident.span }))
+ }
+
+ Interpolated(nt) => {
+ let stream = TokenStream::from_nonterminal_ast(&nt);
+ // A hack used to pass AST fragments to attribute and derive
+ // macros as a single nonterminal token instead of a token
+ // stream. Such token needs to be "unwrapped" and not
+ // represented as a delimited group.
+ // FIXME: It needs to be removed, but there are some
+ // compatibility issues (see #73345).
+ if crate::base::nt_pretty_printing_compatibility_hack(&nt, rustc.sess()) {
+ trees.extend(Self::from_internal((stream, rustc)));
+ } else {
+ trees.push(TokenTree::Group(Group {
+ delimiter: pm::Delimiter::None,
+ stream: Some(stream),
+ span: DelimSpan::from_single(span),
+ }))
+ }
+ }
+
+ OpenDelim(..) | CloseDelim(..) => unreachable!(),
+ Eof => unreachable!(),
+ }
+ }
+ trees
+ }
+}
+
+impl ToInternal<TokenStream> for (TokenTree<TokenStream, Span, Symbol>, &mut Rustc<'_, '_>) {
+ fn to_internal(self) -> TokenStream {
+ use rustc_ast::token::*;
+
+ let (tree, rustc) = self;
+ let (ch, joint, span) = match tree {
+ TokenTree::Punct(Punct { ch, joint, span }) => (ch, joint, span),
+ TokenTree::Group(Group { delimiter, stream, span: DelimSpan { open, close, .. } }) => {
+ return tokenstream::TokenStream::delimited(
+ tokenstream::DelimSpan { open, close },
+ delimiter.to_internal(),
+ stream.unwrap_or_default(),
+ );
+ }
+ TokenTree::Ident(self::Ident { sym, is_raw, span }) => {
+ rustc.sess().symbol_gallery.insert(sym, span);
+ return tokenstream::TokenStream::token_alone(Ident(sym, is_raw), span);
+ }
+ TokenTree::Literal(self::Literal {
+ kind: self::LitKind::Integer,
+ symbol,
+ suffix,
+ span,
+ }) if symbol.as_str().starts_with('-') => {
+ let minus = BinOp(BinOpToken::Minus);
+ let symbol = Symbol::intern(&symbol.as_str()[1..]);
+ let integer = TokenKind::lit(token::Integer, symbol, suffix);
+ let a = tokenstream::TokenTree::token_alone(minus, span);
+ let b = tokenstream::TokenTree::token_alone(integer, span);
+ return [a, b].into_iter().collect();
+ }
+ TokenTree::Literal(self::Literal {
+ kind: self::LitKind::Float,
+ symbol,
+ suffix,
+ span,
+ }) if symbol.as_str().starts_with('-') => {
+ let minus = BinOp(BinOpToken::Minus);
+ let symbol = Symbol::intern(&symbol.as_str()[1..]);
+ let float = TokenKind::lit(token::Float, symbol, suffix);
+ let a = tokenstream::TokenTree::token_alone(minus, span);
+ let b = tokenstream::TokenTree::token_alone(float, span);
+ return [a, b].into_iter().collect();
+ }
+ TokenTree::Literal(self::Literal { kind, symbol, suffix, span }) => {
+ return tokenstream::TokenStream::token_alone(
+ TokenKind::lit(kind.to_internal(), symbol, suffix),
+ span,
+ );
+ }
+ };
+
+ let kind = match ch {
+ b'=' => Eq,
+ b'<' => Lt,
+ b'>' => Gt,
+ b'!' => Not,
+ b'~' => Tilde,
+ b'+' => BinOp(Plus),
+ b'-' => BinOp(Minus),
+ b'*' => BinOp(Star),
+ b'/' => BinOp(Slash),
+ b'%' => BinOp(Percent),
+ b'^' => BinOp(Caret),
+ b'&' => BinOp(And),
+ b'|' => BinOp(Or),
+ b'@' => At,
+ b'.' => Dot,
+ b',' => Comma,
+ b';' => Semi,
+ b':' => Colon,
+ b'#' => Pound,
+ b'$' => Dollar,
+ b'?' => Question,
+ b'\'' => SingleQuote,
+ _ => unreachable!(),
+ };
+
+ if joint {
+ tokenstream::TokenStream::token_joint(kind, span)
+ } else {
+ tokenstream::TokenStream::token_alone(kind, span)
+ }
+ }
+}
+
+impl ToInternal<rustc_errors::Level> for Level {
+ fn to_internal(self) -> rustc_errors::Level {
+ match self {
+ Level::Error => rustc_errors::Level::Error { lint: false },
+ Level::Warning => rustc_errors::Level::Warning(None),
+ Level::Note => rustc_errors::Level::Note,
+ Level::Help => rustc_errors::Level::Help,
+ _ => unreachable!("unknown proc_macro::Level variant: {:?}", self),
+ }
+ }
+}
+
+pub struct FreeFunctions;
+
+pub(crate) struct Rustc<'a, 'b> {
+ ecx: &'a mut ExtCtxt<'b>,
+ def_site: Span,
+ call_site: Span,
+ mixed_site: Span,
+ krate: CrateNum,
+ rebased_spans: FxHashMap<usize, Span>,
+}
+
+impl<'a, 'b> Rustc<'a, 'b> {
+ pub fn new(ecx: &'a mut ExtCtxt<'b>) -> Self {
+ let expn_data = ecx.current_expansion.id.expn_data();
+ Rustc {
+ def_site: ecx.with_def_site_ctxt(expn_data.def_site),
+ call_site: ecx.with_call_site_ctxt(expn_data.call_site),
+ mixed_site: ecx.with_mixed_site_ctxt(expn_data.call_site),
+ krate: expn_data.macro_def_id.unwrap().krate,
+ rebased_spans: FxHashMap::default(),
+ ecx,
+ }
+ }
+
+ fn sess(&self) -> &ParseSess {
+ self.ecx.parse_sess()
+ }
+}
+
+impl server::Types for Rustc<'_, '_> {
+ type FreeFunctions = FreeFunctions;
+ type TokenStream = TokenStream;
+ type SourceFile = Lrc<SourceFile>;
+ type MultiSpan = Vec<Span>;
+ type Diagnostic = Diagnostic;
+ type Span = Span;
+ type Symbol = Symbol;
+}
+
+impl server::FreeFunctions for Rustc<'_, '_> {
+ fn track_env_var(&mut self, var: &str, value: Option<&str>) {
+ self.sess()
+ .env_depinfo
+ .borrow_mut()
+ .insert((Symbol::intern(var), value.map(Symbol::intern)));
+ }
+
+ fn track_path(&mut self, path: &str) {
+ self.sess().file_depinfo.borrow_mut().insert(Symbol::intern(path));
+ }
+
+ fn literal_from_str(&mut self, s: &str) -> Result<Literal<Self::Span, Self::Symbol>, ()> {
+ let name = FileName::proc_macro_source_code(s);
+ let mut parser = rustc_parse::new_parser_from_source_str(self.sess(), name, s.to_owned());
+
+ let first_span = parser.token.span.data();
+ let minus_present = parser.eat(&token::BinOp(token::Minus));
+
+ let lit_span = parser.token.span.data();
+ let token::Literal(mut lit) = parser.token.kind else {
+ return Err(());
+ };
+
+ // Check no comment or whitespace surrounding the (possibly negative)
+ // literal, or more tokens after it.
+ if (lit_span.hi.0 - first_span.lo.0) as usize != s.len() {
+ return Err(());
+ }
+
+ if minus_present {
+ // If minus is present, check no comment or whitespace in between it
+ // and the literal token.
+ if first_span.hi.0 != lit_span.lo.0 {
+ return Err(());
+ }
+
+ // Check literal is a kind we allow to be negated in a proc macro token.
+ match lit.kind {
+ token::LitKind::Bool
+ | token::LitKind::Byte
+ | token::LitKind::Char
+ | token::LitKind::Str
+ | token::LitKind::StrRaw(_)
+ | token::LitKind::ByteStr
+ | token::LitKind::ByteStrRaw(_)
+ | token::LitKind::Err => return Err(()),
+ token::LitKind::Integer | token::LitKind::Float => {}
+ }
+
+ // Synthesize a new symbol that includes the minus sign.
+ let symbol = Symbol::intern(&s[..1 + lit.symbol.as_str().len()]);
+ lit = token::Lit::new(lit.kind, symbol, lit.suffix);
+ }
+ let token::Lit { kind, symbol, suffix } = lit;
+ Ok(Literal {
+ kind: FromInternal::from_internal(kind),
+ symbol,
+ suffix,
+ span: self.call_site,
+ })
+ }
+}
+
+impl server::TokenStream for Rustc<'_, '_> {
+ fn is_empty(&mut self, stream: &Self::TokenStream) -> bool {
+ stream.is_empty()
+ }
+
+ fn from_str(&mut self, src: &str) -> Self::TokenStream {
+ parse_stream_from_source_str(
+ FileName::proc_macro_source_code(src),
+ src.to_string(),
+ self.sess(),
+ Some(self.call_site),
+ )
+ }
+
+ fn to_string(&mut self, stream: &Self::TokenStream) -> String {
+ pprust::tts_to_string(stream)
+ }
+
+ fn expand_expr(&mut self, stream: &Self::TokenStream) -> Result<Self::TokenStream, ()> {
+ // Parse the expression from our tokenstream.
+ let expr: PResult<'_, _> = try {
+ let mut p = rustc_parse::stream_to_parser(
+ self.sess(),
+ stream.clone(),
+ Some("proc_macro expand expr"),
+ );
+ let expr = p.parse_expr()?;
+ if p.token != token::Eof {
+ p.unexpected()?;
+ }
+ expr
+ };
+ let expr = expr.map_err(|mut err| {
+ err.emit();
+ })?;
+
+ // Perform eager expansion on the expression.
+ let expr = self
+ .ecx
+ .expander()
+ .fully_expand_fragment(crate::expand::AstFragment::Expr(expr))
+ .make_expr();
+
+ // NOTE: For now, limit `expand_expr` to exclusively expand to literals.
+ // This may be relaxed in the future.
+ // We don't use `TokenStream::from_ast` as the tokenstream currently cannot
+ // be recovered in the general case.
+ match &expr.kind {
+ ast::ExprKind::Lit(l) if l.token.kind == token::Bool => Ok(
+ tokenstream::TokenStream::token_alone(token::Ident(l.token.symbol, false), l.span),
+ ),
+ ast::ExprKind::Lit(l) => {
+ Ok(tokenstream::TokenStream::token_alone(token::Literal(l.token), l.span))
+ }
+ ast::ExprKind::Unary(ast::UnOp::Neg, e) => match &e.kind {
+ ast::ExprKind::Lit(l) => match l.token {
+ token::Lit { kind: token::Integer | token::Float, .. } => {
+ Ok(Self::TokenStream::from_iter([
+ // FIXME: The span of the `-` token is lost when
+ // parsing, so we cannot faithfully recover it here.
+ tokenstream::TokenTree::token_alone(token::BinOp(token::Minus), e.span),
+ tokenstream::TokenTree::token_alone(token::Literal(l.token), l.span),
+ ]))
+ }
+ _ => Err(()),
+ },
+ _ => Err(()),
+ },
+ _ => Err(()),
+ }
+ }
+
+ fn from_token_tree(
+ &mut self,
+ tree: TokenTree<Self::TokenStream, Self::Span, Self::Symbol>,
+ ) -> Self::TokenStream {
+ (tree, &mut *self).to_internal()
+ }
+
+ fn concat_trees(
+ &mut self,
+ base: Option<Self::TokenStream>,
+ trees: Vec<TokenTree<Self::TokenStream, Self::Span, Self::Symbol>>,
+ ) -> Self::TokenStream {
+ let mut builder = tokenstream::TokenStreamBuilder::new();
+ if let Some(base) = base {
+ builder.push(base);
+ }
+ for tree in trees {
+ builder.push((tree, &mut *self).to_internal());
+ }
+ builder.build()
+ }
+
+ fn concat_streams(
+ &mut self,
+ base: Option<Self::TokenStream>,
+ streams: Vec<Self::TokenStream>,
+ ) -> Self::TokenStream {
+ let mut builder = tokenstream::TokenStreamBuilder::new();
+ if let Some(base) = base {
+ builder.push(base);
+ }
+ for stream in streams {
+ builder.push(stream);
+ }
+ builder.build()
+ }
+
+ fn into_trees(
+ &mut self,
+ stream: Self::TokenStream,
+ ) -> Vec<TokenTree<Self::TokenStream, Self::Span, Self::Symbol>> {
+ FromInternal::from_internal((stream, self))
+ }
+}
+
+impl server::SourceFile for Rustc<'_, '_> {
+ fn eq(&mut self, file1: &Self::SourceFile, file2: &Self::SourceFile) -> bool {
+ Lrc::ptr_eq(file1, file2)
+ }
+
+ fn path(&mut self, file: &Self::SourceFile) -> String {
+ match file.name {
+ FileName::Real(ref name) => name
+ .local_path()
+ .expect("attempting to get a file path in an imported file in `proc_macro::SourceFile::path`")
+ .to_str()
+ .expect("non-UTF8 file path in `proc_macro::SourceFile::path`")
+ .to_string(),
+ _ => file.name.prefer_local().to_string(),
+ }
+ }
+
+ fn is_real(&mut self, file: &Self::SourceFile) -> bool {
+ file.is_real_file()
+ }
+}
+
+impl server::MultiSpan for Rustc<'_, '_> {
+ fn new(&mut self) -> Self::MultiSpan {
+ vec![]
+ }
+
+ fn push(&mut self, spans: &mut Self::MultiSpan, span: Self::Span) {
+ spans.push(span)
+ }
+}
+
+impl server::Diagnostic for Rustc<'_, '_> {
+ fn new(&mut self, level: Level, msg: &str, spans: Self::MultiSpan) -> Self::Diagnostic {
+ let mut diag = Diagnostic::new(level.to_internal(), msg);
+ diag.set_span(MultiSpan::from_spans(spans));
+ diag
+ }
+
+ fn sub(
+ &mut self,
+ diag: &mut Self::Diagnostic,
+ level: Level,
+ msg: &str,
+ spans: Self::MultiSpan,
+ ) {
+ diag.sub(level.to_internal(), msg, MultiSpan::from_spans(spans), None);
+ }
+
+ fn emit(&mut self, mut diag: Self::Diagnostic) {
+ self.sess().span_diagnostic.emit_diagnostic(&mut diag);
+ }
+}
+
+impl server::Span for Rustc<'_, '_> {
+ fn debug(&mut self, span: Self::Span) -> String {
+ if self.ecx.ecfg.span_debug {
+ format!("{:?}", span)
+ } else {
+ format!("{:?} bytes({}..{})", span.ctxt(), span.lo().0, span.hi().0)
+ }
+ }
+
+ fn source_file(&mut self, span: Self::Span) -> Self::SourceFile {
+ self.sess().source_map().lookup_char_pos(span.lo()).file
+ }
+
+ fn parent(&mut self, span: Self::Span) -> Option<Self::Span> {
+ span.parent_callsite()
+ }
+
+ fn source(&mut self, span: Self::Span) -> Self::Span {
+ span.source_callsite()
+ }
+
+ fn start(&mut self, span: Self::Span) -> LineColumn {
+ let loc = self.sess().source_map().lookup_char_pos(span.lo());
+ LineColumn { line: loc.line, column: loc.col.to_usize() }
+ }
+
+ fn end(&mut self, span: Self::Span) -> LineColumn {
+ let loc = self.sess().source_map().lookup_char_pos(span.hi());
+ LineColumn { line: loc.line, column: loc.col.to_usize() }
+ }
+
+ fn before(&mut self, span: Self::Span) -> Self::Span {
+ span.shrink_to_lo()
+ }
+
+ fn after(&mut self, span: Self::Span) -> Self::Span {
+ span.shrink_to_hi()
+ }
+
+ fn join(&mut self, first: Self::Span, second: Self::Span) -> Option<Self::Span> {
+ let self_loc = self.sess().source_map().lookup_char_pos(first.lo());
+ let other_loc = self.sess().source_map().lookup_char_pos(second.lo());
+
+ if self_loc.file.name != other_loc.file.name {
+ return None;
+ }
+
+ Some(first.to(second))
+ }
+
+ fn subspan(
+ &mut self,
+ span: Self::Span,
+ start: Bound<usize>,
+ end: Bound<usize>,
+ ) -> Option<Self::Span> {
+ let length = span.hi().to_usize() - span.lo().to_usize();
+
+ let start = match start {
+ Bound::Included(lo) => lo,
+ Bound::Excluded(lo) => lo.checked_add(1)?,
+ Bound::Unbounded => 0,
+ };
+
+ let end = match end {
+ Bound::Included(hi) => hi.checked_add(1)?,
+ Bound::Excluded(hi) => hi,
+ Bound::Unbounded => length,
+ };
+
+ // Bounds check the values, preventing addition overflow and OOB spans.
+ if start > u32::MAX as usize
+ || end > u32::MAX as usize
+ || (u32::MAX - start as u32) < span.lo().to_u32()
+ || (u32::MAX - end as u32) < span.lo().to_u32()
+ || start >= end
+ || end > length
+ {
+ return None;
+ }
+
+ let new_lo = span.lo() + BytePos::from_usize(start);
+ let new_hi = span.lo() + BytePos::from_usize(end);
+ Some(span.with_lo(new_lo).with_hi(new_hi))
+ }
+
+ fn resolved_at(&mut self, span: Self::Span, at: Self::Span) -> Self::Span {
+ span.with_ctxt(at.ctxt())
+ }
+
+ fn source_text(&mut self, span: Self::Span) -> Option<String> {
+ self.sess().source_map().span_to_snippet(span).ok()
+ }
+ /// Saves the provided span into the metadata of
+ /// *the crate we are currently compiling*, which must
+ /// be a proc-macro crate. This id can be passed to
+ /// `recover_proc_macro_span` when our current crate
+ /// is *run* as a proc-macro.
+ ///
+ /// Let's suppose that we have two crates - `my_client`
+ /// and `my_proc_macro`. The `my_proc_macro` crate
+ /// contains a procedural macro `my_macro`, which
+ /// is implemented as: `quote! { "hello" }`
+ ///
+ /// When we *compile* `my_proc_macro`, we will execute
+ /// the `quote` proc-macro. This will save the span of
+ /// "hello" into the metadata of `my_proc_macro`. As a result,
+ /// the body of `my_proc_macro` (after expansion) will end
+ /// up containing a call that looks like this:
+ /// `proc_macro::Ident::new("hello", proc_macro::Span::recover_proc_macro_span(0))`
+ ///
+ /// where `0` is the id returned by this function.
+ /// When `my_proc_macro` *executes* (during the compilation of `my_client`),
+ /// the call to `recover_proc_macro_span` will load the corresponding
+ /// span from the metadata of `my_proc_macro` (which we have access to,
+ /// since we've loaded `my_proc_macro` from disk in order to execute it).
+ /// In this way, we have obtained a span pointing into `my_proc_macro`
+ fn save_span(&mut self, span: Self::Span) -> usize {
+ self.sess().save_proc_macro_span(span)
+ }
+
+ fn recover_proc_macro_span(&mut self, id: usize) -> Self::Span {
+ let (resolver, krate, def_site) = (&*self.ecx.resolver, self.krate, self.def_site);
+ *self.rebased_spans.entry(id).or_insert_with(|| {
+ // FIXME: `SyntaxContext` for spans from proc macro crates is lost during encoding,
+ // replace it with a def-site context until we are encoding it properly.
+ resolver.get_proc_macro_quoted_span(krate, id).with_ctxt(def_site.ctxt())
+ })
+ }
+}
+
+impl server::Symbol for Rustc<'_, '_> {
+ fn normalize_and_validate_ident(&mut self, string: &str) -> Result<Self::Symbol, ()> {
+ let sym = nfc_normalize(string);
+ if rustc_lexer::is_ident(sym.as_str()) { Ok(sym) } else { Err(()) }
+ }
+}
+
+impl server::Server for Rustc<'_, '_> {
+ fn globals(&mut self) -> ExpnGlobals<Self::Span> {
+ ExpnGlobals {
+ def_site: self.def_site,
+ call_site: self.call_site,
+ mixed_site: self.mixed_site,
+ }
+ }
+
+ fn intern_symbol(string: &str) -> Self::Symbol {
+ Symbol::intern(string)
+ }
+
+ fn with_symbol_string(symbol: &Self::Symbol, f: impl FnOnce(&str)) {
+ f(&symbol.as_str())
+ }
+}
diff --git a/compiler/rustc_expand/src/tests.rs b/compiler/rustc_expand/src/tests.rs
new file mode 100644
index 000000000..e44f06081
--- /dev/null
+++ b/compiler/rustc_expand/src/tests.rs
@@ -0,0 +1,1016 @@
+use rustc_ast as ast;
+use rustc_ast::tokenstream::TokenStream;
+use rustc_parse::{new_parser_from_source_str, parser::Parser, source_file_to_stream};
+use rustc_session::parse::ParseSess;
+use rustc_span::create_default_session_if_not_set_then;
+use rustc_span::source_map::{FilePathMapping, SourceMap};
+use rustc_span::{BytePos, Span};
+
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::emitter::EmitterWriter;
+use rustc_errors::{Handler, MultiSpan, PResult};
+
+use std::io;
+use std::io::prelude::*;
+use std::iter::Peekable;
+use std::path::{Path, PathBuf};
+use std::str;
+use std::sync::{Arc, Mutex};
+
+/// Map string to parser (via tts).
+fn string_to_parser(ps: &ParseSess, source_str: String) -> Parser<'_> {
+ new_parser_from_source_str(ps, PathBuf::from("bogofile").into(), source_str)
+}
+
+pub(crate) fn with_error_checking_parse<'a, T, F>(s: String, ps: &'a ParseSess, f: F) -> T
+where
+ F: FnOnce(&mut Parser<'a>) -> PResult<'a, T>,
+{
+ let mut p = string_to_parser(&ps, s);
+ let x = f(&mut p).unwrap();
+ p.sess.span_diagnostic.abort_if_errors();
+ x
+}
+
+/// Maps a string to tts, using a made-up filename.
+pub(crate) fn string_to_stream(source_str: String) -> TokenStream {
+ let ps = ParseSess::new(FilePathMapping::empty());
+ source_file_to_stream(
+ &ps,
+ ps.source_map().new_source_file(PathBuf::from("bogofile").into(), source_str),
+ None,
+ )
+ .0
+}
+
+/// Parses a string, returns a crate.
+pub(crate) fn string_to_crate(source_str: String) -> ast::Crate {
+ let ps = ParseSess::new(FilePathMapping::empty());
+ with_error_checking_parse(source_str, &ps, |p| p.parse_crate_mod())
+}
+
+/// Does the given string match the pattern? whitespace in the first string
+/// may be deleted or replaced with other whitespace to match the pattern.
+/// This function is relatively Unicode-ignorant; fortunately, the careful design
+/// of UTF-8 mitigates this ignorance. It doesn't do NKF-normalization(?).
+pub(crate) fn matches_codepattern(a: &str, b: &str) -> bool {
+ let mut a_iter = a.chars().peekable();
+ let mut b_iter = b.chars().peekable();
+
+ loop {
+ let (a, b) = match (a_iter.peek(), b_iter.peek()) {
+ (None, None) => return true,
+ (None, _) => return false,
+ (Some(&a), None) => {
+ if rustc_lexer::is_whitespace(a) {
+ break; // Trailing whitespace check is out of loop for borrowck.
+ } else {
+ return false;
+ }
+ }
+ (Some(&a), Some(&b)) => (a, b),
+ };
+
+ if rustc_lexer::is_whitespace(a) && rustc_lexer::is_whitespace(b) {
+ // Skip whitespace for `a` and `b`.
+ scan_for_non_ws_or_end(&mut a_iter);
+ scan_for_non_ws_or_end(&mut b_iter);
+ } else if rustc_lexer::is_whitespace(a) {
+ // Skip whitespace for `a`.
+ scan_for_non_ws_or_end(&mut a_iter);
+ } else if a == b {
+ a_iter.next();
+ b_iter.next();
+ } else {
+ return false;
+ }
+ }
+
+ // Check if a has *only* trailing whitespace.
+ a_iter.all(rustc_lexer::is_whitespace)
+}
+
+/// Advances the given peekable `Iterator` until it reaches a non-whitespace character.
+fn scan_for_non_ws_or_end<I: Iterator<Item = char>>(iter: &mut Peekable<I>) {
+ while iter.peek().copied().map(rustc_lexer::is_whitespace) == Some(true) {
+ iter.next();
+ }
+}
+
+/// Identifies a position in the text by the n'th occurrence of a string.
+struct Position {
+ string: &'static str,
+ count: usize,
+}
+
+struct SpanLabel {
+ start: Position,
+ end: Position,
+ label: &'static str,
+}
+
+pub(crate) struct Shared<T: Write> {
+ pub data: Arc<Mutex<T>>,
+}
+
+impl<T: Write> Write for Shared<T> {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.data.lock().unwrap().write(buf)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.data.lock().unwrap().flush()
+ }
+}
+
+fn test_harness(file_text: &str, span_labels: Vec<SpanLabel>, expected_output: &str) {
+ create_default_session_if_not_set_then(|_| {
+ let output = Arc::new(Mutex::new(Vec::new()));
+
+ let fallback_bundle =
+ rustc_errors::fallback_fluent_bundle(rustc_errors::DEFAULT_LOCALE_RESOURCES, false);
+ let source_map = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ source_map.new_source_file(Path::new("test.rs").to_owned().into(), file_text.to_owned());
+
+ let primary_span = make_span(&file_text, &span_labels[0].start, &span_labels[0].end);
+ let mut msp = MultiSpan::from_span(primary_span);
+ for span_label in span_labels {
+ let span = make_span(&file_text, &span_label.start, &span_label.end);
+ msp.push_span_label(span, span_label.label);
+ println!("span: {:?} label: {:?}", span, span_label.label);
+ println!("text: {:?}", source_map.span_to_snippet(span));
+ }
+
+ let emitter = EmitterWriter::new(
+ Box::new(Shared { data: output.clone() }),
+ Some(source_map.clone()),
+ None,
+ fallback_bundle,
+ false,
+ false,
+ false,
+ None,
+ false,
+ );
+ let handler = Handler::with_emitter(true, None, Box::new(emitter));
+ handler.span_err(msp, "foo");
+
+ assert!(
+ expected_output.chars().next() == Some('\n'),
+ "expected output should begin with newline"
+ );
+ let expected_output = &expected_output[1..];
+
+ let bytes = output.lock().unwrap();
+ let actual_output = str::from_utf8(&bytes).unwrap();
+ println!("expected output:\n------\n{}------", expected_output);
+ println!("actual output:\n------\n{}------", actual_output);
+
+ assert!(expected_output == actual_output)
+ })
+}
+
+fn make_span(file_text: &str, start: &Position, end: &Position) -> Span {
+ let start = make_pos(file_text, start);
+ let end = make_pos(file_text, end) + end.string.len(); // just after matching thing ends
+ assert!(start <= end);
+ Span::with_root_ctxt(BytePos(start as u32), BytePos(end as u32))
+}
+
+fn make_pos(file_text: &str, pos: &Position) -> usize {
+ let mut remainder = file_text;
+ let mut offset = 0;
+ for _ in 0..pos.count {
+ if let Some(n) = remainder.find(&pos.string) {
+ offset += n;
+ remainder = &remainder[n + 1..];
+ } else {
+ panic!("failed to find {} instances of {:?} in {:?}", pos.count, pos.string, file_text);
+ }
+ }
+ offset
+}
+
+#[test]
+fn ends_on_col0() {
+ test_harness(
+ r#"
+fn foo() {
+}
+"#,
+ vec![SpanLabel {
+ start: Position { string: "{", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "test",
+ }],
+ r#"
+error: foo
+ --> test.rs:2:10
+ |
+2 | fn foo() {
+ | __________^
+3 | | }
+ | |_^ test
+
+"#,
+ );
+}
+
+#[test]
+fn ends_on_col2() {
+ test_harness(
+ r#"
+fn foo() {
+
+
+ }
+"#,
+ vec![SpanLabel {
+ start: Position { string: "{", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "test",
+ }],
+ r#"
+error: foo
+ --> test.rs:2:10
+ |
+2 | fn foo() {
+ | __________^
+3 | |
+4 | |
+5 | | }
+ | |___^ test
+
+"#,
+ );
+}
+#[test]
+fn non_nested() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0
+ X1 Y1
+ X2 Y2
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "X0", count: 1 },
+ end: Position { string: "X2", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Y0", count: 1 },
+ end: Position { string: "Y2", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | X0 Y0
+ | ____^__-
+ | | ___|
+ | ||
+4 | || X1 Y1
+5 | || X2 Y2
+ | ||____^__- `Y` is a good letter too
+ | |____|
+ | `X` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn nested() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0
+ Y1 X1
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "X0", count: 1 },
+ end: Position { string: "X1", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Y0", count: 1 },
+ end: Position { string: "Y1", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | X0 Y0
+ | ____^__-
+ | | ___|
+ | ||
+4 | || Y1 X1
+ | ||____-__^ `X` is a good letter
+ | |_____|
+ | `Y` is a good letter too
+
+"#,
+ );
+}
+
+#[test]
+fn different_overlap() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0 Z0
+ X1 Y1 Z1
+ X2 Y2 Z2
+ X3 Y3 Z3
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "Y0", count: 1 },
+ end: Position { string: "X2", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Z1", count: 1 },
+ end: Position { string: "X3", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:6
+ |
+3 | X0 Y0 Z0
+ | ______^
+4 | | X1 Y1 Z1
+ | |_________-
+5 | || X2 Y2 Z2
+ | ||____^ `X` is a good letter
+6 | | X3 Y3 Z3
+ | |_____- `Y` is a good letter too
+
+"#,
+ );
+}
+
+#[test]
+fn triple_overlap() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0 Z0
+ X1 Y1 Z1
+ X2 Y2 Z2
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "X0", count: 1 },
+ end: Position { string: "X2", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Y0", count: 1 },
+ end: Position { string: "Y2", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ SpanLabel {
+ start: Position { string: "Z0", count: 1 },
+ end: Position { string: "Z2", count: 1 },
+ label: "`Z` label",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | X0 Y0 Z0
+ | _____^__-__-
+ | | ____|__|
+ | || ___|
+ | |||
+4 | ||| X1 Y1 Z1
+5 | ||| X2 Y2 Z2
+ | |||____^__-__- `Z` label
+ | ||____|__|
+ | |____| `Y` is a good letter too
+ | `X` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn triple_exact_overlap() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0 Z0
+ X1 Y1 Z1
+ X2 Y2 Z2
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "X0", count: 1 },
+ end: Position { string: "X2", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "X0", count: 1 },
+ end: Position { string: "X2", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ SpanLabel {
+ start: Position { string: "X0", count: 1 },
+ end: Position { string: "X2", count: 1 },
+ label: "`Z` label",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | / X0 Y0 Z0
+4 | | X1 Y1 Z1
+5 | | X2 Y2 Z2
+ | | ^
+ | | |
+ | | `X` is a good letter
+ | |____`Y` is a good letter too
+ | `Z` label
+
+"#,
+ );
+}
+
+#[test]
+fn minimum_depth() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0 Z0
+ X1 Y1 Z1
+ X2 Y2 Z2
+ X3 Y3 Z3
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "Y0", count: 1 },
+ end: Position { string: "X1", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Y1", count: 1 },
+ end: Position { string: "Z2", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ SpanLabel {
+ start: Position { string: "X2", count: 1 },
+ end: Position { string: "Y3", count: 1 },
+ label: "`Z`",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:6
+ |
+3 | X0 Y0 Z0
+ | ______^
+4 | | X1 Y1 Z1
+ | |____^_-
+ | ||____|
+ | | `X` is a good letter
+5 | | X2 Y2 Z2
+ | |____-______- `Y` is a good letter too
+ | ____|
+ | |
+6 | | X3 Y3 Z3
+ | |________- `Z`
+
+"#,
+ );
+}
+
+#[test]
+fn non_overlaping() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0 Z0
+ X1 Y1 Z1
+ X2 Y2 Z2
+ X3 Y3 Z3
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "X0", count: 1 },
+ end: Position { string: "X1", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Y2", count: 1 },
+ end: Position { string: "Z3", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | / X0 Y0 Z0
+4 | | X1 Y1 Z1
+ | |____^ `X` is a good letter
+5 | X2 Y2 Z2
+ | ______-
+6 | | X3 Y3 Z3
+ | |__________- `Y` is a good letter too
+
+"#,
+ );
+}
+
+#[test]
+fn overlaping_start_and_end() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0 Z0
+ X1 Y1 Z1
+ X2 Y2 Z2
+ X3 Y3 Z3
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "Y0", count: 1 },
+ end: Position { string: "X1", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Z1", count: 1 },
+ end: Position { string: "Z3", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:6
+ |
+3 | X0 Y0 Z0
+ | ______^
+4 | | X1 Y1 Z1
+ | |____^____-
+ | ||____|
+ | | `X` is a good letter
+5 | | X2 Y2 Z2
+6 | | X3 Y3 Z3
+ | |___________- `Y` is a good letter too
+
+"#,
+ );
+}
+
+#[test]
+fn multiple_labels_primary_without_message() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "b", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "",
+ },
+ SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "`a` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "c", count: 1 },
+ end: Position { string: "c", count: 1 },
+ label: "",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:7
+ |
+3 | a { b { c } d }
+ | ----^^^^-^^-- `a` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn multiple_labels_secondary_without_message() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "`a` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "b", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | a { b { c } d }
+ | ^^^^-------^^ `a` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn multiple_labels_primary_without_message_2() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "b", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "`b` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "",
+ },
+ SpanLabel {
+ start: Position { string: "c", count: 1 },
+ end: Position { string: "c", count: 1 },
+ label: "",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:7
+ |
+3 | a { b { c } d }
+ | ----^^^^-^^--
+ | |
+ | `b` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn multiple_labels_secondary_without_message_2() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "",
+ },
+ SpanLabel {
+ start: Position { string: "b", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "`b` is a good letter",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | a { b { c } d }
+ | ^^^^-------^^
+ | |
+ | `b` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn multiple_labels_secondary_without_message_3() {
+ test_harness(
+ r#"
+fn foo() {
+ a bc d
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "b", count: 1 },
+ label: "`a` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "c", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | a bc d
+ | ^^^^----
+ | |
+ | `a` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn multiple_labels_without_message() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "",
+ },
+ SpanLabel {
+ start: Position { string: "b", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | a { b { c } d }
+ | ^^^^-------^^
+
+"#,
+ );
+}
+
+#[test]
+fn multiple_labels_without_message_2() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "b", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "",
+ },
+ SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "",
+ },
+ SpanLabel {
+ start: Position { string: "c", count: 1 },
+ end: Position { string: "c", count: 1 },
+ label: "",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:7
+ |
+3 | a { b { c } d }
+ | ----^^^^-^^--
+
+"#,
+ );
+}
+
+#[test]
+fn multiple_labels_with_message() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "`a` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "b", count: 1 },
+ end: Position { string: "}", count: 1 },
+ label: "`b` is a good letter",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | a { b { c } d }
+ | ^^^^-------^^
+ | | |
+ | | `b` is a good letter
+ | `a` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn single_label_with_message() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "`a` is a good letter",
+ }],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | a { b { c } d }
+ | ^^^^^^^^^^^^^ `a` is a good letter
+
+"#,
+ );
+}
+
+#[test]
+fn single_label_without_message() {
+ test_harness(
+ r#"
+fn foo() {
+ a { b { c } d }
+}
+"#,
+ vec![SpanLabel {
+ start: Position { string: "a", count: 1 },
+ end: Position { string: "d", count: 1 },
+ label: "",
+ }],
+ r#"
+error: foo
+ --> test.rs:3:3
+ |
+3 | a { b { c } d }
+ | ^^^^^^^^^^^^^
+
+"#,
+ );
+}
+
+#[test]
+fn long_snippet() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0 Z0
+ X1 Y1 Z1
+1
+2
+3
+4
+5
+6
+7
+8
+9
+10
+ X2 Y2 Z2
+ X3 Y3 Z3
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "Y0", count: 1 },
+ end: Position { string: "X1", count: 1 },
+ label: "`X` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Z1", count: 1 },
+ end: Position { string: "Z3", count: 1 },
+ label: "`Y` is a good letter too",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:6
+ |
+3 | X0 Y0 Z0
+ | ______^
+4 | | X1 Y1 Z1
+ | |____^____-
+ | ||____|
+ | | `X` is a good letter
+5 | | 1
+6 | | 2
+7 | | 3
+... |
+15 | | X2 Y2 Z2
+16 | | X3 Y3 Z3
+ | |___________- `Y` is a good letter too
+
+"#,
+ );
+}
+
+#[test]
+fn long_snippet_multiple_spans() {
+ test_harness(
+ r#"
+fn foo() {
+ X0 Y0 Z0
+1
+2
+3
+ X1 Y1 Z1
+4
+5
+6
+ X2 Y2 Z2
+7
+8
+9
+10
+ X3 Y3 Z3
+}
+"#,
+ vec![
+ SpanLabel {
+ start: Position { string: "Y0", count: 1 },
+ end: Position { string: "Y3", count: 1 },
+ label: "`Y` is a good letter",
+ },
+ SpanLabel {
+ start: Position { string: "Z1", count: 1 },
+ end: Position { string: "Z2", count: 1 },
+ label: "`Z` is a good letter too",
+ },
+ ],
+ r#"
+error: foo
+ --> test.rs:3:6
+ |
+3 | X0 Y0 Z0
+ | ______^
+4 | | 1
+5 | | 2
+6 | | 3
+7 | | X1 Y1 Z1
+ | |_________-
+8 | || 4
+9 | || 5
+10 | || 6
+11 | || X2 Y2 Z2
+ | ||__________- `Z` is a good letter too
+... |
+15 | | 10
+16 | | X3 Y3 Z3
+ | |_______^ `Y` is a good letter
+
+"#,
+ );
+}
diff --git a/compiler/rustc_expand/src/tokenstream/tests.rs b/compiler/rustc_expand/src/tokenstream/tests.rs
new file mode 100644
index 000000000..eed696810
--- /dev/null
+++ b/compiler/rustc_expand/src/tokenstream/tests.rs
@@ -0,0 +1,110 @@
+use crate::tests::string_to_stream;
+
+use rustc_ast::token;
+use rustc_ast::tokenstream::{TokenStream, TokenStreamBuilder};
+use rustc_span::create_default_session_globals_then;
+use rustc_span::{BytePos, Span, Symbol};
+
+fn string_to_ts(string: &str) -> TokenStream {
+ string_to_stream(string.to_owned())
+}
+
+fn sp(a: u32, b: u32) -> Span {
+ Span::with_root_ctxt(BytePos(a), BytePos(b))
+}
+
+#[test]
+fn test_concat() {
+ create_default_session_globals_then(|| {
+ let test_res = string_to_ts("foo::bar::baz");
+ let test_fst = string_to_ts("foo::bar");
+ let test_snd = string_to_ts("::baz");
+ let mut builder = TokenStreamBuilder::new();
+ builder.push(test_fst);
+ builder.push(test_snd);
+ let eq_res = builder.build();
+ assert_eq!(test_res.trees().count(), 5);
+ assert_eq!(eq_res.trees().count(), 5);
+ assert_eq!(test_res.eq_unspanned(&eq_res), true);
+ })
+}
+
+#[test]
+fn test_to_from_bijection() {
+ create_default_session_globals_then(|| {
+ let test_start = string_to_ts("foo::bar(baz)");
+ let test_end = test_start.trees().cloned().collect();
+ assert_eq!(test_start, test_end)
+ })
+}
+
+#[test]
+fn test_eq_0() {
+ create_default_session_globals_then(|| {
+ let test_res = string_to_ts("foo");
+ let test_eqs = string_to_ts("foo");
+ assert_eq!(test_res, test_eqs)
+ })
+}
+
+#[test]
+fn test_eq_1() {
+ create_default_session_globals_then(|| {
+ let test_res = string_to_ts("::bar::baz");
+ let test_eqs = string_to_ts("::bar::baz");
+ assert_eq!(test_res, test_eqs)
+ })
+}
+
+#[test]
+fn test_eq_3() {
+ create_default_session_globals_then(|| {
+ let test_res = string_to_ts("");
+ let test_eqs = string_to_ts("");
+ assert_eq!(test_res, test_eqs)
+ })
+}
+
+#[test]
+fn test_diseq_0() {
+ create_default_session_globals_then(|| {
+ let test_res = string_to_ts("::bar::baz");
+ let test_eqs = string_to_ts("bar::baz");
+ assert_eq!(test_res == test_eqs, false)
+ })
+}
+
+#[test]
+fn test_diseq_1() {
+ create_default_session_globals_then(|| {
+ let test_res = string_to_ts("(bar,baz)");
+ let test_eqs = string_to_ts("bar,baz");
+ assert_eq!(test_res == test_eqs, false)
+ })
+}
+
+#[test]
+fn test_is_empty() {
+ create_default_session_globals_then(|| {
+ let test0 = TokenStream::default();
+ let test1 = TokenStream::token_alone(token::Ident(Symbol::intern("a"), false), sp(0, 1));
+ let test2 = string_to_ts("foo(bar::baz)");
+
+ assert_eq!(test0.is_empty(), true);
+ assert_eq!(test1.is_empty(), false);
+ assert_eq!(test2.is_empty(), false);
+ })
+}
+
+#[test]
+fn test_dotdotdot() {
+ create_default_session_globals_then(|| {
+ let mut builder = TokenStreamBuilder::new();
+ builder.push(TokenStream::token_joint(token::Dot, sp(0, 1)));
+ builder.push(TokenStream::token_joint(token::Dot, sp(1, 2)));
+ builder.push(TokenStream::token_alone(token::Dot, sp(2, 3)));
+ let stream = builder.build();
+ assert!(stream.eq_unspanned(&string_to_ts("...")));
+ assert_eq!(stream.trees().count(), 1);
+ })
+}
diff --git a/compiler/rustc_feature/Cargo.toml b/compiler/rustc_feature/Cargo.toml
new file mode 100644
index 000000000..3d8d0db20
--- /dev/null
+++ b/compiler/rustc_feature/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "rustc_feature"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs
new file mode 100644
index 000000000..099c40b21
--- /dev/null
+++ b/compiler/rustc_feature/src/accepted.rs
@@ -0,0 +1,344 @@
+//! List of the accepted feature gates.
+
+use super::{to_nonzero, Feature, State};
+use rustc_span::symbol::sym;
+
+macro_rules! declare_features {
+ ($(
+ $(#[doc = $doc:tt])* (accepted, $feature:ident, $ver:expr, $issue:expr, None),
+ )+) => {
+ /// Those language feature has since been Accepted (it was once Active)
+ pub const ACCEPTED_FEATURES: &[Feature] = &[
+ $(
+ Feature {
+ state: State::Accepted,
+ name: sym::$feature,
+ since: $ver,
+ issue: to_nonzero($issue),
+ edition: None,
+ }
+ ),+
+ ];
+ }
+}
+
+#[rustfmt::skip]
+declare_features! (
+ // -------------------------------------------------------------------------
+ // feature-group-start: for testing purposes
+ // -------------------------------------------------------------------------
+
+ /// A temporary feature gate used to enable parser extensions needed
+ /// to bootstrap fix for #5723.
+ (accepted, issue_5723_bootstrap, "1.0.0", None, None),
+ /// These are used to test this portion of the compiler,
+ /// they don't actually mean anything.
+ (accepted, test_accepted_feature, "1.0.0", None, None),
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+ // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way.
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+
+ // -------------------------------------------------------------------------
+ // feature-group-end: for testing purposes
+ // -------------------------------------------------------------------------
+
+ // -------------------------------------------------------------------------
+ // feature-group-start: accepted features
+ // -------------------------------------------------------------------------
+
+ /// Allows `#[target_feature(...)]` on aarch64 platforms
+ (accepted, aarch64_target_feature, "1.61.0", Some(44839), None),
+ /// Allows the sysV64 ABI to be specified on all platforms
+ /// instead of just the platforms on which it is the C ABI.
+ (accepted, abi_sysv64, "1.24.0", Some(36167), None),
+ /// Allows using ADX intrinsics from `core::arch::{x86, x86_64}`.
+ (accepted, adx_target_feature, "1.61.0", Some(44839), None),
+ /// Allows the definition of associated constants in `trait` or `impl` blocks.
+ (accepted, associated_consts, "1.20.0", Some(29646), None),
+ /// Allows using associated `type`s in `trait`s.
+ (accepted, associated_types, "1.0.0", None, None),
+ /// Allows free and inherent `async fn`s, `async` blocks, and `<expr>.await` expressions.
+ (accepted, async_await, "1.39.0", Some(50547), None),
+ /// Allows all literals in attribute lists and values of key-value pairs.
+ (accepted, attr_literals, "1.30.0", Some(34981), None),
+ /// Allows overloading augmented assignment operations like `a += b`.
+ (accepted, augmented_assignments, "1.8.0", Some(28235), None),
+ /// Allows mixing bind-by-move in patterns and references to those identifiers in guards.
+ (accepted, bind_by_move_pattern_guards, "1.39.0", Some(15287), None),
+ /// Allows bindings in the subpattern of a binding pattern.
+ /// For example, you can write `x @ Some(y)`.
+ (accepted, bindings_after_at, "1.56.0", Some(65490), None),
+ /// Allows empty structs and enum variants with braces.
+ (accepted, braced_empty_structs, "1.8.0", Some(29720), None),
+ /// Allows `#[cfg_attr(predicate, multiple, attributes, here)]`.
+ (accepted, cfg_attr_multi, "1.33.0", Some(54881), None),
+ /// Allows the use of `#[cfg(doctest)]`, set when rustdoc is collecting doctests.
+ (accepted, cfg_doctest, "1.40.0", Some(62210), None),
+ /// Enables `#[cfg(panic = "...")]` config key.
+ (accepted, cfg_panic, "1.60.0", Some(77443), None),
+ /// Allows `cfg(target_feature = "...")`.
+ (accepted, cfg_target_feature, "1.27.0", Some(29717), None),
+ /// Allows `cfg(target_vendor = "...")`.
+ (accepted, cfg_target_vendor, "1.33.0", Some(29718), None),
+ /// Allows implementing `Clone` for closures where possible (RFC 2132).
+ (accepted, clone_closures, "1.26.0", Some(44490), None),
+ /// Allows coercing non capturing closures to function pointers.
+ (accepted, closure_to_fn_coercion, "1.19.0", Some(39817), None),
+ /// Allows usage of the `compile_error!` macro.
+ (accepted, compile_error, "1.20.0", Some(40872), None),
+ /// Allows `impl Trait` in function return types.
+ (accepted, conservative_impl_trait, "1.26.0", Some(34511), None),
+ /// Allows calling constructor functions in `const fn`.
+ (accepted, const_constructor, "1.40.0", Some(61456), None),
+ /// Allows using and casting function pointers in a `const fn`.
+ (accepted, const_fn_fn_ptr_basics, "1.61.0", Some(57563), None),
+ /// Allows trait bounds in `const fn`.
+ (accepted, const_fn_trait_bound, "1.61.0", Some(93706), None),
+ /// Allows calling `transmute` in const fn
+ (accepted, const_fn_transmute, "1.56.0", Some(53605), None),
+ /// Allows accessing fields of unions inside `const` functions.
+ (accepted, const_fn_union, "1.56.0", Some(51909), None),
+ /// Allows unsizing coercions in `const fn`.
+ (accepted, const_fn_unsize, "1.54.0", Some(64992), None),
+ /// Allows const generics to have default values (e.g. `struct Foo<const N: usize = 3>(...);`).
+ (accepted, const_generics_defaults, "1.59.0", Some(44580), None),
+ /// Allows the use of `if` and `match` in constants.
+ (accepted, const_if_match, "1.46.0", Some(49146), None),
+ /// Allows argument and return position `impl Trait` in a `const fn`.
+ (accepted, const_impl_trait, "1.61.0", Some(77463), None),
+ /// Allows indexing into constant arrays.
+ (accepted, const_indexing, "1.26.0", Some(29947), None),
+ /// Allows let bindings, assignments and destructuring in `const` functions and constants.
+ /// As long as control flow is not implemented in const eval, `&&` and `||` may not be used
+ /// at the same time as let bindings.
+ (accepted, const_let, "1.33.0", Some(48821), None),
+ /// Allows the use of `loop` and `while` in constants.
+ (accepted, const_loop, "1.46.0", Some(52000), None),
+ /// Allows panicking during const eval (producing compile-time errors).
+ (accepted, const_panic, "1.57.0", Some(51999), None),
+ /// Allows dereferencing raw pointers during const eval.
+ (accepted, const_raw_ptr_deref, "1.58.0", Some(51911), None),
+ /// Allows implementing `Copy` for closures where possible (RFC 2132).
+ (accepted, copy_closures, "1.26.0", Some(44490), None),
+ /// Allows `crate` in paths.
+ (accepted, crate_in_paths, "1.30.0", Some(45477), None),
+ /// Allows using assigning a default type to type parameters in algebraic data type definitions.
+ (accepted, default_type_params, "1.0.0", None, None),
+ /// Allows `#[deprecated]` attribute.
+ (accepted, deprecated, "1.9.0", Some(29935), None),
+ /// Allows `#[derive(Default)]` and `#[default]` on enums.
+ (accepted, derive_default_enum, "1.62.0", Some(86985), None),
+ /// Allows the use of destructuring assignments.
+ (accepted, destructuring_assignment, "1.59.0", Some(71126), None),
+ /// Allows `#[doc(alias = "...")]`.
+ (accepted, doc_alias, "1.48.0", Some(50146), None),
+ /// Allows `..` in tuple (struct) patterns.
+ (accepted, dotdot_in_tuple_patterns, "1.14.0", Some(33627), None),
+ /// Allows `..=` in patterns (RFC 1192).
+ (accepted, dotdoteq_in_patterns, "1.26.0", Some(28237), None),
+ /// Allows `Drop` types in constants (RFC 1440).
+ (accepted, drop_types_in_const, "1.22.0", Some(33156), None),
+ /// Allows using `dyn Trait` as a syntax for trait objects.
+ (accepted, dyn_trait, "1.27.0", Some(44662), None),
+ /// Allows integer match exhaustiveness checking (RFC 2591).
+ (accepted, exhaustive_integer_patterns, "1.33.0", Some(50907), None),
+ /// Allows explicit generic arguments specification with `impl Trait` present.
+ (accepted, explicit_generic_args_with_impl_trait, "1.63.0", Some(83701), None),
+ /// Allows arbitrary expressions in key-value attributes at parse time.
+ (accepted, extended_key_value_attributes, "1.54.0", Some(78835), None),
+ /// Allows resolving absolute paths as paths from other crates.
+ (accepted, extern_absolute_paths, "1.30.0", Some(44660), None),
+ /// Allows `extern crate foo as bar;`. This puts `bar` into extern prelude.
+ (accepted, extern_crate_item_prelude, "1.31.0", Some(55599), None),
+ /// Allows `extern crate self as foo;`.
+ /// This puts local crate root into extern prelude under name `foo`.
+ (accepted, extern_crate_self, "1.34.0", Some(56409), None),
+ /// Allows access to crate names passed via `--extern` through prelude.
+ (accepted, extern_prelude, "1.30.0", Some(44660), None),
+ /// Allows field shorthands (`x` meaning `x: x`) in struct literal expressions.
+ (accepted, field_init_shorthand, "1.17.0", Some(37340), None),
+ /// Allows `#[must_use]` on functions, and introduces must-use operators (RFC 1940).
+ (accepted, fn_must_use, "1.27.0", Some(43302), None),
+ /// Allows capturing variables in scope using format_args!
+ (accepted, format_args_capture, "1.58.0", Some(67984), None),
+ /// Allows attributes on lifetime/type formal parameters in generics (RFC 1327).
+ (accepted, generic_param_attrs, "1.27.0", Some(48848), None),
+ /// Allows the `#[global_allocator]` attribute.
+ (accepted, global_allocator, "1.28.0", Some(27389), None),
+ // FIXME: explain `globs`.
+ (accepted, globs, "1.0.0", None, None),
+ /// Allows using the `u128` and `i128` types.
+ (accepted, i128_type, "1.26.0", Some(35118), None),
+ /// Allows the use of `if let` expressions.
+ (accepted, if_let, "1.0.0", None, None),
+ /// Allows top level or-patterns (`p | q`) in `if let` and `while let`.
+ (accepted, if_while_or_patterns, "1.33.0", Some(48215), None),
+ /// Allows lifetime elision in `impl` headers. For example:
+ /// + `impl<I:Iterator> Iterator for &mut Iterator`
+ /// + `impl Debug for Foo<'_>`
+ (accepted, impl_header_lifetime_elision, "1.31.0", Some(15872), None),
+ /// Allows using `a..=b` and `..=b` as inclusive range syntaxes.
+ (accepted, inclusive_range_syntax, "1.26.0", Some(28237), None),
+ /// Allows inferring outlives requirements (RFC 2093).
+ (accepted, infer_outlives_requirements, "1.30.0", Some(44493), None),
+ /// Allows irrefutable patterns in `if let` and `while let` statements (RFC 2086).
+ (accepted, irrefutable_let_patterns, "1.33.0", Some(44495), None),
+ /// Allows some increased flexibility in the name resolution rules,
+ /// especially around globs and shadowing (RFC 1560).
+ (accepted, item_like_imports, "1.15.0", Some(35120), None),
+ /// Allows `break {expr}` with a value inside `loop`s.
+ (accepted, loop_break_value, "1.19.0", Some(37339), None),
+ /// Allows use of `?` as the Kleene "at most one" operator in macros.
+ (accepted, macro_at_most_once_rep, "1.32.0", Some(48075), None),
+ /// Allows macro attributes to observe output of `#[derive]`.
+ (accepted, macro_attributes_in_derive_output, "1.57.0", Some(81119), None),
+ /// Allows use of the `:lifetime` macro fragment specifier.
+ (accepted, macro_lifetime_matcher, "1.27.0", Some(34303), None),
+ /// Allows use of the `:literal` macro fragment specifier (RFC 1576).
+ (accepted, macro_literal_matcher, "1.32.0", Some(35625), None),
+ /// Allows `macro_rules!` items.
+ (accepted, macro_rules, "1.0.0", None, None),
+ /// Allows use of the `:vis` macro fragment specifier
+ (accepted, macro_vis_matcher, "1.30.0", Some(41022), None),
+ /// Allows macro invocations in `extern {}` blocks.
+ (accepted, macros_in_extern, "1.40.0", Some(49476), None),
+ /// Allows '|' at beginning of match arms (RFC 1925).
+ (accepted, match_beginning_vert, "1.25.0", Some(44101), None),
+ /// Allows default match binding modes (RFC 2005).
+ (accepted, match_default_bindings, "1.26.0", Some(42640), None),
+ /// Allows `impl Trait` with multiple unrelated lifetimes.
+ (accepted, member_constraints, "1.54.0", Some(61997), None),
+ /// Allows the definition of `const fn` functions.
+ (accepted, min_const_fn, "1.31.0", Some(53555), None),
+ /// The smallest useful subset of const generics.
+ (accepted, min_const_generics, "1.51.0", Some(74878), None),
+ /// Allows calling `const unsafe fn` inside `unsafe` blocks in `const fn` functions.
+ (accepted, min_const_unsafe_fn, "1.33.0", Some(55607), None),
+ /// Allows using `Self` and associated types in struct expressions and patterns.
+ (accepted, more_struct_aliases, "1.16.0", Some(37544), None),
+ /// Allows patterns with concurrent by-move and by-ref bindings.
+ /// For example, you can write `Foo(a, ref b)` where `a` is by-move and `b` is by-ref.
+ (accepted, move_ref_pattern, "1.49.0", Some(68354), None),
+ /// Allows specifying modifiers in the link attribute: `#[link(modifiers = "...")]`
+ (accepted, native_link_modifiers, "1.61.0", Some(81490), None),
+ /// Allows specifying the bundle link modifier
+ (accepted, native_link_modifiers_bundle, "1.63.0", Some(81490), None),
+ /// Allows specifying the whole-archive link modifier
+ (accepted, native_link_modifiers_whole_archive, "1.61.0", Some(81490), None),
+ /// Allows using non lexical lifetimes (RFC 2094).
+ (accepted, nll, "1.63.0", Some(43234), None),
+ /// Allows using `#![no_std]`.
+ (accepted, no_std, "1.6.0", None, None),
+ /// Allows defining identifiers beyond ASCII.
+ (accepted, non_ascii_idents, "1.53.0", Some(55467), None),
+ /// Allows future-proofing enums/structs with the `#[non_exhaustive]` attribute (RFC 2008).
+ (accepted, non_exhaustive, "1.40.0", Some(44109), None),
+ /// Allows `foo.rs` as an alternative to `foo/mod.rs`.
+ (accepted, non_modrs_mods, "1.30.0", Some(44660), None),
+ /// Allows the use of or-patterns (e.g., `0 | 1`).
+ (accepted, or_patterns, "1.53.0", Some(54883), None),
+ /// Allows annotating functions conforming to `fn(&PanicInfo) -> !` with `#[panic_handler]`.
+ /// This defines the behavior of panics.
+ (accepted, panic_handler, "1.30.0", Some(44489), None),
+ /// Allows attributes in formal function parameters.
+ (accepted, param_attrs, "1.39.0", Some(60406), None),
+ /// Allows parentheses in patterns.
+ (accepted, pattern_parentheses, "1.31.0", Some(51087), None),
+ /// Allows procedural macros in `proc-macro` crates.
+ (accepted, proc_macro, "1.29.0", Some(38356), None),
+ /// Allows multi-segment paths in attributes and derives.
+ (accepted, proc_macro_path_invoc, "1.30.0", Some(38356), None),
+ /// Allows `pub(restricted)` visibilities (RFC 1422).
+ (accepted, pub_restricted, "1.18.0", Some(32409), None),
+ /// Allows use of the postfix `?` operator in expressions.
+ (accepted, question_mark, "1.13.0", Some(31436), None),
+ /// Allows keywords to be escaped for use as identifiers.
+ (accepted, raw_identifiers, "1.30.0", Some(48589), None),
+ /// Allows relaxing the coherence rules such that
+ /// `impl<T> ForeignTrait<LocalType> for ForeignType<T>` is permitted.
+ (accepted, re_rebalance_coherence, "1.41.0", Some(55437), None),
+ /// Allows numeric fields in struct expressions and patterns.
+ (accepted, relaxed_adts, "1.19.0", Some(35626), None),
+ /// Lessens the requirements for structs to implement `Unsize`.
+ (accepted, relaxed_struct_unsize, "1.58.0", Some(81793), None),
+ /// Allows `repr(align(16))` struct attribute (RFC 1358).
+ (accepted, repr_align, "1.25.0", Some(33626), None),
+ /// Allows using `#[repr(align(X))]` on enums with equivalent semantics
+ /// to wrapping an enum in a wrapper struct with `#[repr(align(X))]`.
+ (accepted, repr_align_enum, "1.37.0", Some(57996), None),
+ /// Allows `#[repr(packed(N))]` attribute on structs.
+ (accepted, repr_packed, "1.33.0", Some(33158), None),
+ /// Allows `#[repr(transparent)]` attribute on newtype structs.
+ (accepted, repr_transparent, "1.28.0", Some(43036), None),
+ /// Allows code like `let x: &'static u32 = &42` to work (RFC 1414).
+ (accepted, rvalue_static_promotion, "1.21.0", Some(38865), None),
+ /// Allows `Self` in type definitions (RFC 2300).
+ (accepted, self_in_typedefs, "1.32.0", Some(49303), None),
+ /// Allows `Self` struct constructor (RFC 2302).
+ (accepted, self_struct_ctor, "1.32.0", Some(51994), None),
+ /// Allows using subslice patterns, `[a, .., b]` and `[a, xs @ .., b]`.
+ (accepted, slice_patterns, "1.42.0", Some(62254), None),
+ /// Allows use of `&foo[a..b]` as a slicing syntax.
+ (accepted, slicing_syntax, "1.0.0", None, None),
+ /// Allows elision of `'static` lifetimes in `static`s and `const`s.
+ (accepted, static_in_const, "1.17.0", Some(35897), None),
+ /// Allows the definition recursive static items.
+ (accepted, static_recursion, "1.17.0", Some(29719), None),
+ /// Allows attributes on struct literal fields.
+ (accepted, struct_field_attributes, "1.20.0", Some(38814), None),
+ /// Allows struct variants `Foo { baz: u8, .. }` in enums (RFC 418).
+ (accepted, struct_variant, "1.0.0", None, None),
+ /// Allows `#[target_feature(...)]`.
+ (accepted, target_feature, "1.27.0", None, None),
+ /// Allows `fn main()` with return types which implements `Termination` (RFC 1937).
+ (accepted, termination_trait, "1.26.0", Some(43301), None),
+ /// Allows `#[test]` functions where the return type implements `Termination` (RFC 1937).
+ (accepted, termination_trait_test, "1.27.0", Some(48854), None),
+ /// Allows attributes scoped to tools.
+ (accepted, tool_attributes, "1.30.0", Some(44690), None),
+ /// Allows scoped lints.
+ (accepted, tool_lints, "1.31.0", Some(44690), None),
+ /// Allows `#[track_caller]` to be used which provides
+ /// accurate caller location reporting during panic (RFC 2091).
+ (accepted, track_caller, "1.46.0", Some(47809), None),
+ /// Allows #[repr(transparent)] on univariant enums (RFC 2645).
+ (accepted, transparent_enums, "1.42.0", Some(60405), None),
+ /// Allows indexing tuples.
+ (accepted, tuple_indexing, "1.0.0", None, None),
+ /// Allows paths to enum variants on type aliases including `Self`.
+ (accepted, type_alias_enum_variants, "1.37.0", Some(49683), None),
+ /// Allows macros to appear in the type position.
+ (accepted, type_macros, "1.13.0", Some(27245), None),
+ /// Allows `const _: TYPE = VALUE`.
+ (accepted, underscore_const_names, "1.37.0", Some(54912), None),
+ /// Allows `use path as _;` and `extern crate c as _;`.
+ (accepted, underscore_imports, "1.33.0", Some(48216), None),
+ /// Allows `'_` placeholder lifetimes.
+ (accepted, underscore_lifetimes, "1.26.0", Some(44524), None),
+ /// Allows `use x::y;` to search `x` in the current scope.
+ (accepted, uniform_paths, "1.32.0", Some(53130), None),
+ /// Allows `impl Trait` in function arguments.
+ (accepted, universal_impl_trait, "1.26.0", Some(34511), None),
+ /// Allows arbitrary delimited token streams in non-macro attributes.
+ (accepted, unrestricted_attribute_tokens, "1.34.0", Some(55208), None),
+ /// The `unsafe_op_in_unsafe_fn` lint (allowed by default): no longer treat an unsafe function as an unsafe block.
+ (accepted, unsafe_block_in_unsafe_fn, "1.52.0", Some(71668), None),
+ /// Allows importing and reexporting macros with `use`,
+ /// enables macro modularization in general.
+ (accepted, use_extern_macros, "1.30.0", Some(35896), None),
+ /// Allows nested groups in `use` items (RFC 2128).
+ (accepted, use_nested_groups, "1.25.0", Some(44494), None),
+ /// Allows `#[used]` to preserve symbols (see llvm.compiler.used).
+ (accepted, used, "1.30.0", Some(40289), None),
+ /// Allows the use of `while let` expressions.
+ (accepted, while_let, "1.0.0", None, None),
+ /// Allows `#![windows_subsystem]`.
+ (accepted, windows_subsystem, "1.18.0", Some(37499), None),
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+ // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way.
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+
+ // -------------------------------------------------------------------------
+ // feature-group-end: accepted features
+ // -------------------------------------------------------------------------
+);
diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs
new file mode 100644
index 000000000..ef4a17564
--- /dev/null
+++ b/compiler/rustc_feature/src/active.rs
@@ -0,0 +1,552 @@
+//! List of the active feature gates.
+
+use super::{to_nonzero, Feature, State};
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_span::edition::Edition;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+
+macro_rules! set {
+ ($field: ident) => {{
+ fn f(features: &mut Features, _: Span) {
+ features.$field = true;
+ }
+ f as fn(&mut Features, Span)
+ }};
+}
+
+macro_rules! declare_features {
+ (__status_to_bool active) => {
+ false
+ };
+ (__status_to_bool incomplete) => {
+ true
+ };
+ ($(
+ $(#[doc = $doc:tt])* ($status:ident, $feature:ident, $ver:expr, $issue:expr, $edition:expr),
+ )+) => {
+ /// Represents active features that are currently being implemented or
+ /// currently being considered for addition/removal.
+ pub const ACTIVE_FEATURES:
+ &[Feature] =
+ &[$(
+ // (sym::$feature, $ver, $issue, $edition, set!($feature))
+ Feature {
+ state: State::Active { set: set!($feature) },
+ name: sym::$feature,
+ since: $ver,
+ issue: to_nonzero($issue),
+ edition: $edition,
+ }
+ ),+];
+
+ /// A set of features to be used by later passes.
+ #[derive(Clone, Default, Debug)]
+ pub struct Features {
+ /// `#![feature]` attrs for language features, for error reporting.
+ pub declared_lang_features: Vec<(Symbol, Span, Option<Symbol>)>,
+ /// `#![feature]` attrs for non-language (library) features.
+ pub declared_lib_features: Vec<(Symbol, Span)>,
+ /// Features enabled for this crate.
+ pub active_features: FxHashSet<Symbol>,
+ $(
+ $(#[doc = $doc])*
+ pub $feature: bool
+ ),+
+ }
+
+ impl Features {
+ pub fn walk_feature_fields(&self, mut f: impl FnMut(&str, bool)) {
+ $(f(stringify!($feature), self.$feature);)+
+ }
+
+ /// Is the given feature active?
+ pub fn active(&self, feature: Symbol) -> bool {
+ self.active_features.contains(&feature)
+ }
+
+ /// Is the given feature enabled?
+ ///
+ /// Panics if the symbol doesn't correspond to a declared feature.
+ pub fn enabled(&self, feature: Symbol) -> bool {
+ match feature {
+ $( sym::$feature => self.$feature, )*
+
+ _ => panic!("`{}` was not listed in `declare_features`", feature),
+ }
+ }
+
+ /// Some features are known to be incomplete and using them is likely to have
+ /// unanticipated results, such as compiler crashes. We warn the user about these
+ /// to alert them.
+ pub fn incomplete(&self, feature: Symbol) -> bool {
+ match feature {
+ $(
+ sym::$feature => declare_features!(__status_to_bool $status),
+ )*
+ // accepted and removed features aren't in this file but are never incomplete
+ _ if self.declared_lang_features.iter().any(|f| f.0 == feature) => false,
+ _ if self.declared_lib_features.iter().any(|f| f.0 == feature) => false,
+ _ => panic!("`{}` was not listed in `declare_features`", feature),
+ }
+ }
+ }
+ };
+}
+
+impl Feature {
+ /// Sets this feature in `Features`. Panics if called on a non-active feature.
+ pub fn set(&self, features: &mut Features, span: Span) {
+ match self.state {
+ State::Active { set } => set(features, span),
+ _ => panic!("called `set` on feature `{}` which is not `active`", self.name),
+ }
+ }
+}
+
+// See https://rustc-dev-guide.rust-lang.org/feature-gates.html#feature-gates for more
+// documentation about handling feature gates.
+//
+// If you change this, please modify `src/doc/unstable-book` as well.
+//
+// Don't ever remove anything from this list; move them to `accepted.rs` if
+// accepted or `removed.rs` if removed.
+//
+// The version numbers here correspond to the version in which the current status
+// was set. This is most important for knowing when a particular feature became
+// stable (active).
+//
+// Note that the features are grouped into internal/user-facing and then
+// sorted by version inside those groups. This is enforced with tidy.
+//
+// N.B., `tools/tidy/src/features.rs` parses this information directly out of the
+// source, so take care when modifying it.
+
+#[rustfmt::skip]
+declare_features! (
+ // -------------------------------------------------------------------------
+ // feature-group-start: internal feature gates (no tracking issue)
+ // -------------------------------------------------------------------------
+ // no-tracking-issue-start
+
+ /// Allows using the `thiscall` ABI.
+ (active, abi_thiscall, "1.19.0", None, None),
+ /// Allows using the `unadjusted` ABI; perma-unstable.
+ (active, abi_unadjusted, "1.16.0", None, None),
+ /// Allows using the `vectorcall` ABI.
+ (active, abi_vectorcall, "1.7.0", None, None),
+ /// Allows using `#![needs_allocator]`, an implementation detail of `#[global_allocator]`.
+ (active, allocator_internals, "1.20.0", None, None),
+ /// Allows using `#[allow_internal_unsafe]`. This is an
+ /// attribute on `macro_rules!` and can't use the attribute handling
+ /// below (it has to be checked before expansion possibly makes
+ /// macros disappear).
+ (active, allow_internal_unsafe, "1.0.0", None, None),
+ /// Allows using `#[allow_internal_unstable]`. This is an
+ /// attribute on `macro_rules!` and can't use the attribute handling
+ /// below (it has to be checked before expansion possibly makes
+ /// macros disappear).
+ (active, allow_internal_unstable, "1.0.0", None, None),
+ /// Allows using anonymous lifetimes in argument-position impl-trait.
+ (active, anonymous_lifetime_in_impl_trait, "1.63.0", None, None),
+ /// Allows identifying the `compiler_builtins` crate.
+ (active, compiler_builtins, "1.13.0", None, None),
+ /// Outputs useful `assert!` messages
+ (active, generic_assert, "1.63.0", None, None),
+ /// Allows using the `rust-intrinsic`'s "ABI".
+ (active, intrinsics, "1.0.0", None, None),
+ /// Allows using `#[lang = ".."]` attribute for linking items to special compiler logic.
+ (active, lang_items, "1.0.0", None, None),
+ /// Allows using `#[omit_gdb_pretty_printer_section]`.
+ (active, omit_gdb_pretty_printer_section, "1.5.0", None, None),
+ /// Allows using `#[prelude_import]` on glob `use` items.
+ (active, prelude_import, "1.2.0", None, None),
+ /// Used to identify crates that contain the profiler runtime.
+ (active, profiler_runtime, "1.18.0", None, None),
+ /// Allows using `rustc_*` attributes (RFC 572).
+ (active, rustc_attrs, "1.0.0", None, None),
+ /// Allows using the `#[stable]` and `#[unstable]` attributes.
+ (active, staged_api, "1.0.0", None, None),
+ /// Added for testing E0705; perma-unstable.
+ (active, test_2018_feature, "1.31.0", None, Some(Edition::Edition2018)),
+ /// Added for testing unstable lints; perma-unstable.
+ (active, test_unstable_lint, "1.60.0", None, None),
+ /// Allows non-`unsafe` —and thus, unsound— access to `Pin` constructions.
+ /// Marked `incomplete` since perma-unstable and unsound.
+ (incomplete, unsafe_pin_internals, "1.60.0", None, None),
+ /// Use for stable + negative coherence and strict coherence depending on trait's
+ /// rustc_strict_coherence value.
+ (active, with_negative_coherence, "1.60.0", None, None),
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+ // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way.
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+
+ // no-tracking-issue-end
+ // -------------------------------------------------------------------------
+ // feature-group-end: internal feature gates (no tracking issue)
+ // -------------------------------------------------------------------------
+
+ // -------------------------------------------------------------------------
+ // feature-group-start: internal feature gates
+ // -------------------------------------------------------------------------
+
+ /// Allows features specific to auto traits.
+ /// Renamed from `optin_builtin_traits`.
+ (active, auto_traits, "1.50.0", Some(13231), None),
+ /// Allows using `box` in patterns (RFC 469).
+ (active, box_patterns, "1.0.0", Some(29641), None),
+ /// Allows using the `box $expr` syntax.
+ (active, box_syntax, "1.0.0", Some(49733), None),
+ /// Allows `#[doc(notable_trait)]`.
+ /// Renamed from `doc_spotlight`.
+ (active, doc_notable_trait, "1.52.0", Some(45040), None),
+ /// Allows using the `may_dangle` attribute (RFC 1327).
+ (active, dropck_eyepatch, "1.10.0", Some(34761), None),
+ /// Allows using the `#[fundamental]` attribute.
+ (active, fundamental, "1.0.0", Some(29635), None),
+ /// Allows using `#[link_name="llvm.*"]`.
+ (active, link_llvm_intrinsics, "1.0.0", Some(29602), None),
+ /// Allows using the `#[linkage = ".."]` attribute.
+ (active, linkage, "1.0.0", Some(29603), None),
+ /// Allows declaring with `#![needs_panic_runtime]` that a panic runtime is needed.
+ (active, needs_panic_runtime, "1.10.0", Some(32837), None),
+ /// Allows using the `#![panic_runtime]` attribute.
+ (active, panic_runtime, "1.10.0", Some(32837), None),
+ /// Allows using `#[rustc_allow_const_fn_unstable]`.
+ /// This is an attribute on `const fn` for the same
+ /// purpose as `#[allow_internal_unstable]`.
+ (active, rustc_allow_const_fn_unstable, "1.49.0", Some(69399), None),
+ /// Allows using compiler's own crates.
+ (active, rustc_private, "1.0.0", Some(27812), None),
+ /// Allows using internal rustdoc features like `doc(primitive)` or `doc(keyword)`.
+ (active, rustdoc_internals, "1.58.0", Some(90418), None),
+ /// Allows using `#[start]` on a function indicating that it is the program entrypoint.
+ (active, start, "1.0.0", Some(29633), None),
+ /// Allows using `#[structural_match]` which indicates that a type is structurally matchable.
+ /// FIXME: Subsumed by trait `StructuralPartialEq`, cannot move to removed until a library
+ /// feature with the same name exists.
+ (active, structural_match, "1.8.0", Some(31434), None),
+ /// Allows using the `rust-call` ABI.
+ (active, unboxed_closures, "1.0.0", Some(29625), None),
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+ // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way.
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+
+ // -------------------------------------------------------------------------
+ // feature-group-end: internal feature gates
+ // -------------------------------------------------------------------------
+
+ // -------------------------------------------------------------------------
+ // feature-group-start: actual feature gates (target features)
+ // -------------------------------------------------------------------------
+
+ // FIXME: Document these and merge with the list below.
+
+ // Unstable `#[target_feature]` directives.
+ (active, aarch64_ver_target_feature, "1.27.0", Some(44839), None),
+ (active, arm_target_feature, "1.27.0", Some(44839), None),
+ (active, avx512_target_feature, "1.27.0", Some(44839), None),
+ (active, bpf_target_feature, "1.54.0", Some(44839), None),
+ (active, cmpxchg16b_target_feature, "1.32.0", Some(44839), None),
+ (active, ermsb_target_feature, "1.49.0", Some(44839), None),
+ (active, f16c_target_feature, "1.36.0", Some(44839), None),
+ (active, hexagon_target_feature, "1.27.0", Some(44839), None),
+ (active, mips_target_feature, "1.27.0", Some(44839), None),
+ (active, movbe_target_feature, "1.34.0", Some(44839), None),
+ (active, powerpc_target_feature, "1.27.0", Some(44839), None),
+ (active, riscv_target_feature, "1.45.0", Some(44839), None),
+ (active, rtm_target_feature, "1.35.0", Some(44839), None),
+ (active, sse4a_target_feature, "1.27.0", Some(44839), None),
+ (active, tbm_target_feature, "1.27.0", Some(44839), None),
+ (active, wasm_target_feature, "1.30.0", Some(44839), None),
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+ // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way.
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+
+ // -------------------------------------------------------------------------
+ // feature-group-end: actual feature gates (target features)
+ // -------------------------------------------------------------------------
+
+ // -------------------------------------------------------------------------
+ // feature-group-start: actual feature gates
+ // -------------------------------------------------------------------------
+
+ /// Allows using the `amdgpu-kernel` ABI.
+ (active, abi_amdgpu_kernel, "1.29.0", Some(51575), None),
+ /// Allows `extern "avr-interrupt" fn()` and `extern "avr-non-blocking-interrupt" fn()`.
+ (active, abi_avr_interrupt, "1.45.0", Some(69664), None),
+ /// Allows `extern "C-cmse-nonsecure-call" fn()`.
+ (active, abi_c_cmse_nonsecure_call, "1.51.0", Some(81391), None),
+ /// Allows using the `efiapi` ABI.
+ (active, abi_efiapi, "1.40.0", Some(65815), None),
+ /// Allows `extern "msp430-interrupt" fn()`.
+ (active, abi_msp430_interrupt, "1.16.0", Some(38487), None),
+ /// Allows `extern "ptx-*" fn()`.
+ (active, abi_ptx, "1.15.0", Some(38788), None),
+ /// Allows `extern "x86-interrupt" fn()`.
+ (active, abi_x86_interrupt, "1.17.0", Some(40180), None),
+ /// Allows additional const parameter types, such as `&'static str` or user defined types
+ (incomplete, adt_const_params, "1.56.0", Some(95174), None),
+ /// Allows defining an `#[alloc_error_handler]`.
+ (active, alloc_error_handler, "1.29.0", Some(51540), None),
+ /// Allows explicit discriminants on non-unit enum variants.
+ (active, arbitrary_enum_discriminant, "1.37.0", Some(60553), None),
+ /// Allows trait methods with arbitrary self types.
+ (active, arbitrary_self_types, "1.23.0", Some(44874), None),
+ /// Allows using `const` operands in inline assembly.
+ (active, asm_const, "1.58.0", Some(93332), None),
+ /// Enables experimental inline assembly support for additional architectures.
+ (active, asm_experimental_arch, "1.58.0", Some(93335), None),
+ /// Allows using `sym` operands in inline assembly.
+ (active, asm_sym, "1.58.0", Some(93333), None),
+ /// Allows the `may_unwind` option in inline assembly.
+ (active, asm_unwind, "1.58.0", Some(93334), None),
+ /// Allows users to enforce equality of associated constants `TraitImpl<AssocConst=3>`.
+ (active, associated_const_equality, "1.58.0", Some(92827), None),
+ /// Allows the user of associated type bounds.
+ (active, associated_type_bounds, "1.34.0", Some(52662), None),
+ /// Allows associated type defaults.
+ (active, associated_type_defaults, "1.2.0", Some(29661), None),
+ /// Allows `async || body` closures.
+ (active, async_closure, "1.37.0", Some(62290), None),
+ /// Allows `extern "C-unwind" fn` to enable unwinding across ABI boundaries.
+ (active, c_unwind, "1.52.0", Some(74990), None),
+ /// Allows using C-variadics.
+ (active, c_variadic, "1.34.0", Some(44930), None),
+ /// Allows capturing disjoint fields in a closure/generator (RFC 2229).
+ (incomplete, capture_disjoint_fields, "1.49.0", Some(53488), None),
+ /// Allows the use of `#[cfg(sanitize = "option")]`; set when -Zsanitizer is used.
+ (active, cfg_sanitize, "1.41.0", Some(39699), None),
+ /// Allows `cfg(target_abi = "...")`.
+ (active, cfg_target_abi, "1.55.0", Some(80970), None),
+ /// Allows `cfg(target(abi = "..."))`.
+ (active, cfg_target_compact, "1.63.0", Some(96901), None),
+ /// Allows `cfg(target_has_atomic_load_store = "...")`.
+ (active, cfg_target_has_atomic, "1.60.0", Some(94039), None),
+ /// Allows `cfg(target_has_atomic_equal_alignment = "...")`.
+ (active, cfg_target_has_atomic_equal_alignment, "1.60.0", Some(93822), None),
+ /// Allows `cfg(target_thread_local)`.
+ (active, cfg_target_thread_local, "1.7.0", Some(29594), None),
+ /// Allow conditional compilation depending on rust version
+ (active, cfg_version, "1.45.0", Some(64796), None),
+ /// Allows `for<...>` on closures and generators.
+ (active, closure_lifetime_binder, "1.64.0", Some(97362), None),
+ /// Allows `#[track_caller]` on closures and generators.
+ (active, closure_track_caller, "1.57.0", Some(87417), None),
+ /// Allows to use the `#[cmse_nonsecure_entry]` attribute.
+ (active, cmse_nonsecure_entry, "1.48.0", Some(75835), None),
+ /// Allows `async {}` expressions in const contexts.
+ (active, const_async_blocks, "1.53.0", Some(85368), None),
+ // Allows limiting the evaluation steps of const expressions
+ (active, const_eval_limit, "1.43.0", Some(67217), None),
+ /// Allows the definition of `const extern fn` and `const unsafe extern fn`.
+ (active, const_extern_fn, "1.40.0", Some(64926), None),
+ /// Allows basic arithmetic on floating point types in a `const fn`.
+ (active, const_fn_floating_point_arithmetic, "1.48.0", Some(57241), None),
+ /// Allows `for _ in _` loops in const contexts.
+ (active, const_for, "1.56.0", Some(87575), None),
+ /// Allows using `&mut` in constant functions.
+ (active, const_mut_refs, "1.41.0", Some(57349), None),
+ /// Be more precise when looking for live drops in a const context.
+ (active, const_precise_live_drops, "1.46.0", Some(73255), None),
+ /// Allows references to types with interior mutability within constants
+ (active, const_refs_to_cell, "1.51.0", Some(80384), None),
+ /// Allows `impl const Trait for T` syntax.
+ (active, const_trait_impl, "1.42.0", Some(67792), None),
+ /// Allows the `?` operator in const contexts.
+ (active, const_try, "1.56.0", Some(74935), None),
+ /// Allows non-builtin attributes in inner attribute position.
+ (active, custom_inner_attributes, "1.30.0", Some(54726), None),
+ /// Allows custom test frameworks with `#![test_runner]` and `#[test_case]`.
+ (active, custom_test_frameworks, "1.30.0", Some(50297), None),
+ /// Allows using `#[debugger_visualizer]`.
+ (active, debugger_visualizer, "1.62.0", Some(95939), None),
+ /// Allows declarative macros 2.0 (`macro`).
+ (active, decl_macro, "1.17.0", Some(39412), None),
+ /// Allows rustc to inject a default alloc_error_handler
+ (active, default_alloc_error_handler, "1.48.0", Some(66741), None),
+ /// Allows default type parameters to influence type inference.
+ (active, default_type_parameter_fallback, "1.3.0", Some(27336), None),
+ /// Allows using `#[deprecated_safe]` to deprecate the safeness of a function or trait
+ (active, deprecated_safe, "1.61.0", Some(94978), None),
+ /// Allows having using `suggestion` in the `#[deprecated]` attribute.
+ (active, deprecated_suggestion, "1.61.0", Some(94785), None),
+ /// Tells rustdoc to automatically generate `#[doc(cfg(...))]`.
+ (active, doc_auto_cfg, "1.58.0", Some(43781), None),
+ /// Allows `#[doc(cfg(...))]`.
+ (active, doc_cfg, "1.21.0", Some(43781), None),
+ /// Allows `#[doc(cfg_hide(...))]`.
+ (active, doc_cfg_hide, "1.57.0", Some(43781), None),
+ /// Allows `#[doc(masked)]`.
+ (active, doc_masked, "1.21.0", Some(44027), None),
+ /// Allows `X..Y` patterns.
+ (active, exclusive_range_pattern, "1.11.0", Some(37854), None),
+ /// Allows exhaustive pattern matching on types that contain uninhabited types.
+ (active, exhaustive_patterns, "1.13.0", Some(51085), None),
+ /// Allows defining `extern type`s.
+ (active, extern_types, "1.23.0", Some(43467), None),
+ /// Allows the use of `#[ffi_const]` on foreign functions.
+ (active, ffi_const, "1.45.0", Some(58328), None),
+ /// Allows the use of `#[ffi_pure]` on foreign functions.
+ (active, ffi_pure, "1.45.0", Some(58329), None),
+ /// Allows using `#[ffi_returns_twice]` on foreign functions.
+ (active, ffi_returns_twice, "1.34.0", Some(58314), None),
+ /// Allows using `#[repr(align(...))]` on function items
+ (active, fn_align, "1.53.0", Some(82232), None),
+ /// Allows defining generators.
+ (active, generators, "1.21.0", Some(43122), None),
+ /// Infer generic args for both consts and types.
+ (active, generic_arg_infer, "1.55.0", Some(85077), None),
+ /// Allows associated types to be generic, e.g., `type Foo<T>;` (RFC 1598).
+ (active, generic_associated_types, "1.23.0", Some(44265), None),
+ /// An extension to the `generic_associated_types` feature, allowing incomplete features.
+ (incomplete, generic_associated_types_extended, "1.61.0", Some(95451), None),
+ /// Allows non-trivial generic constants which have to have wfness manually propagated to callers
+ (incomplete, generic_const_exprs, "1.56.0", Some(76560), None),
+ /// Allows using `..X`, `..=X`, `...X`, and `X..` as a pattern.
+ (active, half_open_range_patterns, "1.41.0", Some(67264), None),
+ /// Allows `if let` guard in match arms.
+ (active, if_let_guard, "1.47.0", Some(51114), None),
+ /// Allows using imported `main` function
+ (active, imported_main, "1.53.0", Some(28937), None),
+ /// Allows associated types in inherent impls.
+ (incomplete, inherent_associated_types, "1.52.0", Some(8995), None),
+ /// Allow anonymous constants from an inline `const` block
+ (active, inline_const, "1.49.0", Some(76001), None),
+ /// Allow anonymous constants from an inline `const` block in pattern position
+ (incomplete, inline_const_pat, "1.58.0", Some(76001), None),
+ /// Allows using `pointer` and `reference` in intra-doc links
+ (active, intra_doc_pointers, "1.51.0", Some(80896), None),
+ /// Allows `#[instruction_set(_)]` attribute
+ (active, isa_attribute, "1.48.0", Some(74727), None),
+ /// Allows `'a: { break 'a; }`.
+ (active, label_break_value, "1.28.0", Some(48594), None),
+ // Allows setting the threshold for the `large_assignments` lint.
+ (active, large_assignments, "1.52.0", Some(83518), None),
+ /// Allows `if/while p && let q = r && ...` chains.
+ (active, let_chains, "1.37.0", Some(53667), None),
+ /// Allows `let...else` statements.
+ (active, let_else, "1.56.0", Some(87335), None),
+ /// Allows `#[link(..., cfg(..))]`.
+ (active, link_cfg, "1.14.0", Some(37406), None),
+ /// Allows using `reason` in lint attributes and the `#[expect(lint)]` lint check.
+ (active, lint_reasons, "1.31.0", Some(54503), None),
+ /// Give access to additional metadata about declarative macro meta-variables.
+ (active, macro_metavar_expr, "1.61.0", Some(83527), None),
+ /// Allows `#[marker]` on certain traits allowing overlapping implementations.
+ (active, marker_trait_attr, "1.30.0", Some(29864), None),
+ /// A minimal, sound subset of specialization intended to be used by the
+ /// standard library until the soundness issues with specialization
+ /// are fixed.
+ (active, min_specialization, "1.7.0", Some(31844), None),
+ /// Allows qualified paths in struct expressions, struct patterns and tuple struct patterns.
+ (active, more_qualified_paths, "1.54.0", Some(86935), None),
+ /// Allows the `#[must_not_suspend]` attribute.
+ (active, must_not_suspend, "1.57.0", Some(83310), None),
+ /// Allows using `#[naked]` on functions.
+ (active, naked_functions, "1.9.0", Some(32408), None),
+ /// Allows specifying the as-needed link modifier
+ (active, native_link_modifiers_as_needed, "1.53.0", Some(81490), None),
+ /// Allows specifying the verbatim link modifier
+ (active, native_link_modifiers_verbatim, "1.53.0", Some(81490), None),
+ /// Allow negative trait implementations.
+ (active, negative_impls, "1.44.0", Some(68318), None),
+ /// Allows the `!` type. Does not imply 'exhaustive_patterns' (below) any more.
+ (active, never_type, "1.13.0", Some(35121), None),
+ /// Allows diverging expressions to fall back to `!` rather than `()`.
+ (active, never_type_fallback, "1.41.0", Some(65992), None),
+ /// Allows `#![no_core]`.
+ (active, no_core, "1.3.0", Some(29639), None),
+ /// Allows function attribute `#[no_coverage]`, to bypass coverage
+ /// instrumentation of that function.
+ (active, no_coverage, "1.53.0", Some(84605), None),
+ /// Allows the use of `no_sanitize` attribute.
+ (active, no_sanitize, "1.42.0", Some(39699), None),
+ /// Allows using the `non_exhaustive_omitted_patterns` lint.
+ (active, non_exhaustive_omitted_patterns_lint, "1.57.0", Some(89554), None),
+ /// Allows making `dyn Trait` well-formed even if `Trait` is not object safe.
+ /// In that case, `dyn Trait: Trait` does not hold. Moreover, coercions and
+ /// casts in safe Rust to `dyn Trait` for such a `Trait` is also forbidden.
+ (active, object_safe_for_dispatch, "1.40.0", Some(43561), None),
+ /// Allows using `#[optimize(X)]`.
+ (active, optimize_attribute, "1.34.0", Some(54882), None),
+ /// Allows `extern "platform-intrinsic" { ... }`.
+ (active, platform_intrinsics, "1.4.0", Some(27731), None),
+ /// Allows using `#![plugin(myplugin)]`.
+ (active, plugin, "1.0.0", Some(29597), None),
+ /// Allows exhaustive integer pattern matching on `usize` and `isize`.
+ (active, precise_pointer_size_matching, "1.32.0", Some(56354), None),
+ /// Allows macro attributes on expressions, statements and non-inline modules.
+ (active, proc_macro_hygiene, "1.30.0", Some(54727), None),
+ /// Allows the use of raw-dylibs (RFC 2627).
+ (incomplete, raw_dylib, "1.40.0", Some(58713), None),
+ /// Allows `&raw const $place_expr` and `&raw mut $place_expr` expressions.
+ (active, raw_ref_op, "1.41.0", Some(64490), None),
+ /// Allows using the `#[register_attr]` attribute.
+ (active, register_attr, "1.41.0", Some(66080), None),
+ /// Allows using the `#[register_tool]` attribute.
+ (active, register_tool, "1.41.0", Some(66079), None),
+ /// Allows the `#[repr(i128)]` attribute for enums.
+ (incomplete, repr128, "1.16.0", Some(56071), None),
+ /// Allows `repr(simd)` and importing the various simd intrinsics.
+ (active, repr_simd, "1.4.0", Some(27731), None),
+ /// Allows `extern "rust-cold"`.
+ (active, rust_cold_cc, "1.63.0", Some(97544), None),
+ /// Allows the use of SIMD types in functions declared in `extern` blocks.
+ (active, simd_ffi, "1.0.0", Some(27731), None),
+ /// Allows specialization of implementations (RFC 1210).
+ (incomplete, specialization, "1.7.0", Some(31844), None),
+ /// Allows attributes on expressions and non-item statements.
+ (active, stmt_expr_attributes, "1.6.0", Some(15701), None),
+ /// Allows lints part of the strict provenance effort.
+ (active, strict_provenance, "1.61.0", Some(95228), None),
+ /// Allows the use of `#[target_feature]` on safe functions.
+ (active, target_feature_11, "1.45.0", Some(69098), None),
+ /// Allows using `#[thread_local]` on `static` items.
+ (active, thread_local, "1.0.0", Some(29594), None),
+ /// Allows defining `trait X = A + B;` alias items.
+ (active, trait_alias, "1.24.0", Some(41517), None),
+ /// Allows upcasting trait objects via supertraits.
+ /// Trait upcasting is casting, e.g., `dyn Foo -> dyn Bar` where `Foo: Bar`.
+ (incomplete, trait_upcasting, "1.56.0", Some(65991), None),
+ /// Allows #[repr(transparent)] on unions (RFC 2645).
+ (active, transparent_unions, "1.37.0", Some(60405), None),
+ /// Allows inconsistent bounds in where clauses.
+ (active, trivial_bounds, "1.28.0", Some(48214), None),
+ /// Allows using `try {...}` expressions.
+ (active, try_blocks, "1.29.0", Some(31436), None),
+ /// Allows `impl Trait` to be used inside type aliases (RFC 2515).
+ (active, type_alias_impl_trait, "1.38.0", Some(63063), None),
+ /// Allows the use of type ascription in expressions.
+ (active, type_ascription, "1.6.0", Some(23416), None),
+ /// Allows creation of instances of a struct by moving fields that have
+ /// not changed from prior instances of the same struct (RFC #2528)
+ (active, type_changing_struct_update, "1.58.0", Some(86555), None),
+ /// Allows unsized fn parameters.
+ (active, unsized_fn_params, "1.49.0", Some(48055), None),
+ /// Allows unsized rvalues at arguments and parameters.
+ (incomplete, unsized_locals, "1.30.0", Some(48055), None),
+ /// Allows unsized tuple coercion.
+ (active, unsized_tuple_coercion, "1.20.0", Some(42877), None),
+ /// Allows using the `#[used(linker)]` (or `#[used(compiler)]`) attribute.
+ (active, used_with_arg, "1.60.0", Some(93798), None),
+ /// Allows `extern "wasm" fn`
+ (active, wasm_abi, "1.53.0", Some(83788), None),
+ /// Allows `do yeet` expressions
+ (active, yeet_expr, "1.62.0", Some(96373), None),
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+ // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way.
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+
+ // -------------------------------------------------------------------------
+ // feature-group-end: actual feature gates
+ // -------------------------------------------------------------------------
+);
+
+/// Some features are not allowed to be used together at the same time, if
+/// the two are present, produce an error.
+///
+/// Currently empty, but we will probably need this again in the future,
+/// so let's keep it in for now.
+pub const INCOMPATIBLE_FEATURES: &[(Symbol, Symbol)] = &[];
diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs
new file mode 100644
index 000000000..0e73d8fd7
--- /dev/null
+++ b/compiler/rustc_feature/src/builtin_attrs.rs
@@ -0,0 +1,833 @@
+//! Built-in attributes and `cfg` flag gating.
+
+use AttributeDuplicates::*;
+use AttributeGate::*;
+use AttributeType::*;
+
+use crate::{Features, Stability};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_span::symbol::{sym, Symbol};
+
+use std::sync::LazyLock;
+
+type GateFn = fn(&Features) -> bool;
+
+macro_rules! cfg_fn {
+ ($field: ident) => {
+ (|features| features.$field) as GateFn
+ };
+}
+
+pub type GatedCfg = (Symbol, Symbol, GateFn);
+
+/// `cfg(...)`'s that are feature gated.
+const GATED_CFGS: &[GatedCfg] = &[
+ // (name in cfg, feature, function to check if the feature is enabled)
+ (sym::target_abi, sym::cfg_target_abi, cfg_fn!(cfg_target_abi)),
+ (sym::target_thread_local, sym::cfg_target_thread_local, cfg_fn!(cfg_target_thread_local)),
+ (
+ sym::target_has_atomic_equal_alignment,
+ sym::cfg_target_has_atomic_equal_alignment,
+ cfg_fn!(cfg_target_has_atomic_equal_alignment),
+ ),
+ (sym::target_has_atomic_load_store, sym::cfg_target_has_atomic, cfg_fn!(cfg_target_has_atomic)),
+ (sym::sanitize, sym::cfg_sanitize, cfg_fn!(cfg_sanitize)),
+ (sym::version, sym::cfg_version, cfg_fn!(cfg_version)),
+];
+
+/// Find a gated cfg determined by the `pred`icate which is given the cfg's name.
+pub fn find_gated_cfg(pred: impl Fn(Symbol) -> bool) -> Option<&'static GatedCfg> {
+ GATED_CFGS.iter().find(|(cfg_sym, ..)| pred(*cfg_sym))
+}
+
+// If you change this, please modify `src/doc/unstable-book` as well. You must
+// move that documentation into the relevant place in the other docs, and
+// remove the chapter on the flag.
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum AttributeType {
+ /// Normal, builtin attribute that is consumed
+ /// by the compiler before the unused_attribute check
+ Normal,
+
+ /// Builtin attribute that is only allowed at the crate level
+ CrateLevel,
+}
+
+#[derive(Clone, Copy)]
+pub enum AttributeGate {
+ /// Is gated by a given feature gate, reason
+ /// and function to check if enabled
+ Gated(Stability, Symbol, &'static str, fn(&Features) -> bool),
+
+ /// Ungated attribute, can be used on all release channels
+ Ungated,
+}
+
+// fn() is not Debug
+impl std::fmt::Debug for AttributeGate {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match *self {
+ Self::Gated(ref stab, name, expl, _) => {
+ write!(fmt, "Gated({:?}, {}, {})", stab, name, expl)
+ }
+ Self::Ungated => write!(fmt, "Ungated"),
+ }
+ }
+}
+
+impl AttributeGate {
+ fn is_deprecated(&self) -> bool {
+ matches!(*self, Self::Gated(Stability::Deprecated(_, _), ..))
+ }
+}
+
+/// A template that the attribute input must match.
+/// Only top-level shape (`#[attr]` vs `#[attr(...)]` vs `#[attr = ...]`) is considered now.
+#[derive(Clone, Copy, Default)]
+pub struct AttributeTemplate {
+ /// If `true`, the attribute is allowed to be a bare word like `#[test]`.
+ pub word: bool,
+ /// If `Some`, the attribute is allowed to take a list of items like `#[allow(..)]`.
+ pub list: Option<&'static str>,
+ /// If `Some`, the attribute is allowed to be a name/value pair where the
+ /// value is a string, like `#[must_use = "reason"]`.
+ pub name_value_str: Option<&'static str>,
+}
+
+/// How to handle multiple duplicate attributes on the same item.
+#[derive(Clone, Copy, Default)]
+pub enum AttributeDuplicates {
+ /// Duplicates of this attribute are allowed.
+ ///
+ /// This should only be used with attributes where duplicates have semantic
+ /// meaning, or some kind of "additive" behavior. For example, `#[warn(..)]`
+ /// can be specified multiple times, and it combines all the entries. Or use
+ /// this if there is validation done elsewhere.
+ #[default]
+ DuplicatesOk,
+ /// Duplicates after the first attribute will be an unused_attribute warning.
+ ///
+ /// This is usually used for "word" attributes, where they are used as a
+ /// boolean marker, like `#[used]`. It is not necessarily wrong that there
+ /// are duplicates, but the others should probably be removed.
+ WarnFollowing,
+ /// Same as `WarnFollowing`, but only issues warnings for word-style attributes.
+ ///
+ /// This is only for special cases, for example multiple `#[macro_use]` can
+ /// be warned, but multiple `#[macro_use(...)]` should not because the list
+ /// form has different meaning from the word form.
+ WarnFollowingWordOnly,
+ /// Duplicates after the first attribute will be an error.
+ ///
+ /// This should be used where duplicates would be ignored, but carry extra
+ /// meaning that could cause confusion. For example, `#[stable(since="1.0")]
+ /// #[stable(since="2.0")]`, which version should be used for `stable`?
+ ErrorFollowing,
+ /// Duplicates preceding the last instance of the attribute will be an error.
+ ///
+ /// This is the same as `ErrorFollowing`, except the last attribute is the
+ /// one that is "used". This is typically used in cases like codegen
+ /// attributes which usually only honor the last attribute.
+ ErrorPreceding,
+ /// Duplicates after the first attribute will be an unused_attribute warning
+ /// with a note that this will be an error in the future.
+ ///
+ /// This should be used for attributes that should be `ErrorFollowing`, but
+ /// because older versions of rustc silently accepted (and ignored) the
+ /// attributes, this is used to transition.
+ FutureWarnFollowing,
+ /// Duplicates preceding the last instance of the attribute will be a
+ /// warning, with a note that this will be an error in the future.
+ ///
+ /// This is the same as `FutureWarnFollowing`, except the last attribute is
+ /// the one that is "used". Ideally these can eventually migrate to
+ /// `ErrorPreceding`.
+ FutureWarnPreceding,
+}
+
+/// A conveniece macro to deal with `$($expr)?`.
+macro_rules! or_default {
+ ($default:expr,) => {
+ $default
+ };
+ ($default:expr, $next:expr) => {
+ $next
+ };
+}
+
+/// A convenience macro for constructing attribute templates.
+/// E.g., `template!(Word, List: "description")` means that the attribute
+/// supports forms `#[attr]` and `#[attr(description)]`.
+macro_rules! template {
+ (Word) => { template!(@ true, None, None) };
+ (List: $descr: expr) => { template!(@ false, Some($descr), None) };
+ (NameValueStr: $descr: expr) => { template!(@ false, None, Some($descr)) };
+ (Word, List: $descr: expr) => { template!(@ true, Some($descr), None) };
+ (Word, NameValueStr: $descr: expr) => { template!(@ true, None, Some($descr)) };
+ (List: $descr1: expr, NameValueStr: $descr2: expr) => {
+ template!(@ false, Some($descr1), Some($descr2))
+ };
+ (Word, List: $descr1: expr, NameValueStr: $descr2: expr) => {
+ template!(@ true, Some($descr1), Some($descr2))
+ };
+ (@ $word: expr, $list: expr, $name_value_str: expr) => { AttributeTemplate {
+ word: $word, list: $list, name_value_str: $name_value_str
+ } };
+}
+
+macro_rules! ungated {
+ ($attr:ident, $typ:expr, $tpl:expr, $duplicates:expr $(, @only_local: $only_local:expr)? $(,)?) => {
+ BuiltinAttribute {
+ name: sym::$attr,
+ only_local: or_default!(false, $($only_local)?),
+ type_: $typ,
+ template: $tpl,
+ gate: Ungated,
+ duplicates: $duplicates,
+ }
+ };
+}
+
+macro_rules! gated {
+ ($attr:ident, $typ:expr, $tpl:expr, $duplicates:expr $(, @only_local: $only_local:expr)?, $gate:ident, $msg:expr $(,)?) => {
+ BuiltinAttribute {
+ name: sym::$attr,
+ only_local: or_default!(false, $($only_local)?),
+ type_: $typ,
+ template: $tpl,
+ duplicates: $duplicates,
+ gate: Gated(Stability::Unstable, sym::$gate, $msg, cfg_fn!($gate)),
+ }
+ };
+ ($attr:ident, $typ:expr, $tpl:expr, $duplicates:expr $(, @only_local: $only_local:expr)?, $msg:expr $(,)?) => {
+ BuiltinAttribute {
+ name: sym::$attr,
+ only_local: or_default!(false, $($only_local)?),
+ type_: $typ,
+ template: $tpl,
+ duplicates: $duplicates,
+ gate: Gated(Stability::Unstable, sym::$attr, $msg, cfg_fn!($attr)),
+ }
+ };
+}
+
+macro_rules! rustc_attr {
+ (TEST, $attr:ident, $typ:expr, $tpl:expr, $duplicate:expr $(, @only_local: $only_local:expr)? $(,)?) => {
+ rustc_attr!(
+ $attr,
+ $typ,
+ $tpl,
+ $duplicate,
+ $(@only_local: $only_local,)?
+ concat!(
+ "the `#[",
+ stringify!($attr),
+ "]` attribute is just used for rustc unit tests \
+ and will never be stable",
+ ),
+ )
+ };
+ ($attr:ident, $typ:expr, $tpl:expr, $duplicates:expr $(, @only_local: $only_local:expr)?, $msg:expr $(,)?) => {
+ BuiltinAttribute {
+ name: sym::$attr,
+ only_local: or_default!(false, $($only_local)?),
+ type_: $typ,
+ template: $tpl,
+ duplicates: $duplicates,
+ gate: Gated(Stability::Unstable, sym::rustc_attrs, $msg, cfg_fn!(rustc_attrs)),
+ }
+ };
+}
+
+macro_rules! experimental {
+ ($attr:ident) => {
+ concat!("the `#[", stringify!($attr), "]` attribute is an experimental feature")
+ };
+}
+
+const IMPL_DETAIL: &str = "internal implementation detail";
+const INTERNAL_UNSTABLE: &str = "this is an internal attribute that will never be stable";
+
+pub struct BuiltinAttribute {
+ pub name: Symbol,
+ /// Whether this attribute is only used in the local crate.
+ ///
+ /// If so, it is not encoded in the crate metadata.
+ pub only_local: bool,
+ pub type_: AttributeType,
+ pub template: AttributeTemplate,
+ pub duplicates: AttributeDuplicates,
+ pub gate: AttributeGate,
+}
+
+/// Attributes that have a special meaning to rustc or rustdoc.
+#[rustfmt::skip]
+pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
+ // ==========================================================================
+ // Stable attributes:
+ // ==========================================================================
+
+ // Conditional compilation:
+ ungated!(cfg, Normal, template!(List: "predicate"), DuplicatesOk),
+ ungated!(cfg_attr, Normal, template!(List: "predicate, attr1, attr2, ..."), DuplicatesOk),
+
+ // Testing:
+ ungated!(ignore, Normal, template!(Word, NameValueStr: "reason"), WarnFollowing),
+ ungated!(
+ should_panic, Normal,
+ template!(Word, List: r#"expected = "reason"#, NameValueStr: "reason"), FutureWarnFollowing,
+ ),
+ // FIXME(Centril): This can be used on stable but shouldn't.
+ ungated!(reexport_test_harness_main, CrateLevel, template!(NameValueStr: "name"), ErrorFollowing),
+
+ // Macros:
+ ungated!(automatically_derived, Normal, template!(Word), WarnFollowing),
+ ungated!(macro_use, Normal, template!(Word, List: "name1, name2, ..."), WarnFollowingWordOnly),
+ ungated!(macro_escape, Normal, template!(Word), WarnFollowing), // Deprecated synonym for `macro_use`.
+ ungated!(macro_export, Normal, template!(Word, List: "local_inner_macros"), WarnFollowing),
+ ungated!(proc_macro, Normal, template!(Word), ErrorFollowing),
+ ungated!(
+ proc_macro_derive, Normal,
+ template!(List: "TraitName, /*opt*/ attributes(name1, name2, ...)"), ErrorFollowing,
+ ),
+ ungated!(proc_macro_attribute, Normal, template!(Word), ErrorFollowing),
+
+ // Lints:
+ ungated!(
+ warn, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk
+ ),
+ ungated!(
+ allow, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk
+ ),
+ gated!(
+ expect, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk,
+ lint_reasons, experimental!(expect)
+ ),
+ ungated!(
+ forbid, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk
+ ),
+ ungated!(
+ deny, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#), DuplicatesOk
+ ),
+ ungated!(must_use, Normal, template!(Word, NameValueStr: "reason"), FutureWarnFollowing),
+ gated!(
+ must_not_suspend, Normal, template!(Word, NameValueStr: "reason"), WarnFollowing,
+ experimental!(must_not_suspend)
+ ),
+ ungated!(
+ deprecated, Normal,
+ template!(
+ Word,
+ List: r#"/*opt*/ since = "version", /*opt*/ note = "reason""#,
+ NameValueStr: "reason"
+ ),
+ ErrorFollowing
+ ),
+
+ // Crate properties:
+ ungated!(crate_name, CrateLevel, template!(NameValueStr: "name"), FutureWarnFollowing),
+ ungated!(crate_type, CrateLevel, template!(NameValueStr: "bin|lib|..."), DuplicatesOk),
+ // crate_id is deprecated
+ ungated!(crate_id, CrateLevel, template!(NameValueStr: "ignored"), FutureWarnFollowing),
+
+ // ABI, linking, symbols, and FFI
+ ungated!(
+ link, Normal,
+ template!(List: r#"name = "...", /*opt*/ kind = "dylib|static|...", /*opt*/ wasm_import_module = "...""#),
+ DuplicatesOk,
+ ),
+ ungated!(link_name, Normal, template!(NameValueStr: "name"), FutureWarnPreceding),
+ ungated!(no_link, Normal, template!(Word), WarnFollowing),
+ ungated!(repr, Normal, template!(List: "C"), DuplicatesOk),
+ ungated!(export_name, Normal, template!(NameValueStr: "name"), FutureWarnPreceding),
+ ungated!(link_section, Normal, template!(NameValueStr: "name"), FutureWarnPreceding),
+ ungated!(no_mangle, Normal, template!(Word), WarnFollowing, @only_local: true),
+ ungated!(used, Normal, template!(Word, List: "compiler|linker"), WarnFollowing, @only_local: true),
+
+ // Limits:
+ ungated!(recursion_limit, CrateLevel, template!(NameValueStr: "N"), FutureWarnFollowing),
+ ungated!(type_length_limit, CrateLevel, template!(NameValueStr: "N"), FutureWarnFollowing),
+ gated!(
+ const_eval_limit, CrateLevel, template!(NameValueStr: "N"), ErrorFollowing,
+ const_eval_limit, experimental!(const_eval_limit)
+ ),
+ gated!(
+ move_size_limit, CrateLevel, template!(NameValueStr: "N"), ErrorFollowing,
+ large_assignments, experimental!(move_size_limit)
+ ),
+
+ // Entry point:
+ ungated!(start, Normal, template!(Word), WarnFollowing),
+ ungated!(no_start, CrateLevel, template!(Word), WarnFollowing),
+ ungated!(no_main, CrateLevel, template!(Word), WarnFollowing),
+
+ // Modules, prelude, and resolution:
+ ungated!(path, Normal, template!(NameValueStr: "file"), FutureWarnFollowing),
+ ungated!(no_std, CrateLevel, template!(Word), WarnFollowing),
+ ungated!(no_implicit_prelude, Normal, template!(Word), WarnFollowing),
+ ungated!(non_exhaustive, Normal, template!(Word), WarnFollowing),
+
+ // Runtime
+ ungated!(
+ windows_subsystem, CrateLevel,
+ template!(NameValueStr: "windows|console"), FutureWarnFollowing
+ ),
+ ungated!(panic_handler, Normal, template!(Word), WarnFollowing), // RFC 2070
+
+ // Code generation:
+ ungated!(inline, Normal, template!(Word, List: "always|never"), FutureWarnFollowing, @only_local: true),
+ ungated!(cold, Normal, template!(Word), WarnFollowing, @only_local: true),
+ ungated!(no_builtins, CrateLevel, template!(Word), WarnFollowing),
+ ungated!(target_feature, Normal, template!(List: r#"enable = "name""#), DuplicatesOk),
+ ungated!(track_caller, Normal, template!(Word), WarnFollowing),
+ gated!(
+ no_sanitize, Normal,
+ template!(List: "address, memory, thread"), DuplicatesOk,
+ experimental!(no_sanitize)
+ ),
+ gated!(no_coverage, Normal, template!(Word), WarnFollowing, experimental!(no_coverage)),
+
+ ungated!(
+ doc, Normal, template!(List: "hidden|inline|...", NameValueStr: "string"), DuplicatesOk
+ ),
+
+ // ==========================================================================
+ // Unstable attributes:
+ // ==========================================================================
+
+ // RFC #3191: #[debugger_visualizer] support
+ gated!(
+ debugger_visualizer, Normal, template!(List: r#"natvis_file = "...", gdb_script_file = "...""#),
+ DuplicatesOk, experimental!(debugger_visualizer)
+ ),
+
+ // Linking:
+ gated!(naked, Normal, template!(Word), WarnFollowing, @only_local: true, naked_functions, experimental!(naked)),
+ gated!(
+ link_ordinal, Normal, template!(List: "ordinal"), ErrorPreceding, raw_dylib,
+ experimental!(link_ordinal)
+ ),
+
+ // Plugins:
+ BuiltinAttribute {
+ name: sym::plugin,
+ only_local: false,
+ type_: CrateLevel,
+ template: template!(List: "name"),
+ duplicates: DuplicatesOk,
+ gate: Gated(
+ Stability::Deprecated(
+ "https://github.com/rust-lang/rust/pull/64675",
+ Some("may be removed in a future compiler version"),
+ ),
+ sym::plugin,
+ "compiler plugins are deprecated",
+ cfg_fn!(plugin)
+ ),
+ },
+
+ // Testing:
+ gated!(
+ test_runner, CrateLevel, template!(List: "path"), ErrorFollowing, custom_test_frameworks,
+ "custom test frameworks are an unstable feature",
+ ),
+ // RFC #1268
+ gated!(
+ marker, Normal, template!(Word), WarnFollowing, marker_trait_attr, experimental!(marker)
+ ),
+ gated!(
+ thread_local, Normal, template!(Word), WarnFollowing,
+ "`#[thread_local]` is an experimental feature, and does not currently handle destructors",
+ ),
+ gated!(no_core, CrateLevel, template!(Word), WarnFollowing, experimental!(no_core)),
+ // RFC 2412
+ gated!(
+ optimize, Normal, template!(List: "size|speed"), ErrorPreceding, optimize_attribute,
+ experimental!(optimize),
+ ),
+ // RFC 2867
+ gated!(
+ instruction_set, Normal, template!(List: "set"), ErrorPreceding,
+ isa_attribute, experimental!(instruction_set)
+ ),
+
+ gated!(
+ ffi_returns_twice, Normal, template!(Word), WarnFollowing, experimental!(ffi_returns_twice)
+ ),
+ gated!(ffi_pure, Normal, template!(Word), WarnFollowing, experimental!(ffi_pure)),
+ gated!(ffi_const, Normal, template!(Word), WarnFollowing, experimental!(ffi_const)),
+ gated!(
+ register_attr, CrateLevel, template!(List: "attr1, attr2, ..."), DuplicatesOk,
+ experimental!(register_attr),
+ ),
+ gated!(
+ register_tool, CrateLevel, template!(List: "tool1, tool2, ..."), DuplicatesOk,
+ experimental!(register_tool),
+ ),
+
+ gated!(
+ cmse_nonsecure_entry, Normal, template!(Word), WarnFollowing,
+ experimental!(cmse_nonsecure_entry)
+ ),
+ // RFC 2632
+ gated!(
+ const_trait, Normal, template!(Word), WarnFollowing, const_trait_impl,
+ "`const` is a temporary placeholder for marking a trait that is suitable for `const` \
+ `impls` and all default bodies as `const`, which may be removed or renamed in the \
+ future."
+ ),
+ // lang-team MCP 147
+ gated!(
+ deprecated_safe, Normal, template!(List: r#"since = "version", note = "...""#), ErrorFollowing,
+ experimental!(deprecated_safe),
+ ),
+
+ // ==========================================================================
+ // Internal attributes: Stability, deprecation, and unsafe:
+ // ==========================================================================
+
+ ungated!(feature, CrateLevel, template!(List: "name1, name2, ..."), DuplicatesOk),
+ // DuplicatesOk since it has its own validation
+ ungated!(
+ stable, Normal, template!(List: r#"feature = "name", since = "version""#), DuplicatesOk,
+ ),
+ ungated!(
+ unstable, Normal,
+ template!(List: r#"feature = "name", reason = "...", issue = "N""#), DuplicatesOk,
+ ),
+ ungated!(rustc_const_unstable, Normal, template!(List: r#"feature = "name""#), DuplicatesOk),
+ ungated!(rustc_const_stable, Normal, template!(List: r#"feature = "name""#), DuplicatesOk),
+ gated!(
+ allow_internal_unstable, Normal, template!(Word, List: "feat1, feat2, ..."), DuplicatesOk,
+ "allow_internal_unstable side-steps feature gating and stability checks",
+ ),
+ gated!(
+ rustc_allow_const_fn_unstable, Normal,
+ template!(Word, List: "feat1, feat2, ..."), DuplicatesOk,
+ "rustc_allow_const_fn_unstable side-steps feature gating and stability checks"
+ ),
+ gated!(
+ allow_internal_unsafe, Normal, template!(Word), WarnFollowing,
+ "allow_internal_unsafe side-steps the unsafe_code lint",
+ ),
+ rustc_attr!(rustc_allowed_through_unstable_modules, Normal, template!(Word), WarnFollowing,
+ "rustc_allowed_through_unstable_modules special cases accidental stabilizations of stable items \
+ through unstable paths"),
+
+ // ==========================================================================
+ // Internal attributes: Type system related:
+ // ==========================================================================
+
+ gated!(fundamental, Normal, template!(Word), WarnFollowing, experimental!(fundamental)),
+ gated!(
+ may_dangle, Normal, template!(Word), WarnFollowing, dropck_eyepatch,
+ "`may_dangle` has unstable semantics and may be removed in the future",
+ ),
+
+ // ==========================================================================
+ // Internal attributes: Runtime related:
+ // ==========================================================================
+
+ rustc_attr!(rustc_allocator, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
+ rustc_attr!(rustc_allocator_nounwind, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
+ rustc_attr!(rustc_reallocator, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
+ rustc_attr!(rustc_deallocator, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
+ rustc_attr!(rustc_allocator_zeroed, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
+ gated!(
+ alloc_error_handler, Normal, template!(Word), WarnFollowing,
+ experimental!(alloc_error_handler)
+ ),
+ gated!(
+ default_lib_allocator, Normal, template!(Word), WarnFollowing, allocator_internals,
+ experimental!(default_lib_allocator),
+ ),
+ gated!(
+ needs_allocator, Normal, template!(Word), WarnFollowing, allocator_internals,
+ experimental!(needs_allocator),
+ ),
+ gated!(panic_runtime, Normal, template!(Word), WarnFollowing, experimental!(panic_runtime)),
+ gated!(
+ needs_panic_runtime, Normal, template!(Word), WarnFollowing,
+ experimental!(needs_panic_runtime)
+ ),
+ gated!(
+ compiler_builtins, Normal, template!(Word), WarnFollowing,
+ "the `#[compiler_builtins]` attribute is used to identify the `compiler_builtins` crate \
+ which contains compiler-rt intrinsics and will never be stable",
+ ),
+ gated!(
+ profiler_runtime, Normal, template!(Word), WarnFollowing,
+ "the `#[profiler_runtime]` attribute is used to identify the `profiler_builtins` crate \
+ which contains the profiler runtime and will never be stable",
+ ),
+
+ // ==========================================================================
+ // Internal attributes, Linkage:
+ // ==========================================================================
+
+ gated!(
+ linkage, Normal, template!(NameValueStr: "external|internal|..."), ErrorPreceding, @only_local: true,
+ "the `linkage` attribute is experimental and not portable across platforms",
+ ),
+ rustc_attr!(
+ rustc_std_internal_symbol, Normal, template!(Word), WarnFollowing, @only_local: true, INTERNAL_UNSTABLE
+ ),
+
+ // ==========================================================================
+ // Internal attributes, Macro related:
+ // ==========================================================================
+
+ rustc_attr!(
+ rustc_builtin_macro, Normal,
+ template!(Word, List: "name, /*opt*/ attributes(name1, name2, ...)"), ErrorFollowing,
+ IMPL_DETAIL,
+ ),
+ rustc_attr!(rustc_proc_macro_decls, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE),
+ rustc_attr!(
+ rustc_macro_transparency, Normal,
+ template!(NameValueStr: "transparent|semitransparent|opaque"), ErrorFollowing,
+ "used internally for testing macro hygiene",
+ ),
+
+ // ==========================================================================
+ // Internal attributes, Diagnostics related:
+ // ==========================================================================
+
+ rustc_attr!(
+ rustc_on_unimplemented, Normal,
+ template!(
+ List: r#"/*opt*/ message = "...", /*opt*/ label = "...", /*opt*/ note = "...""#,
+ NameValueStr: "message"
+ ),
+ ErrorFollowing,
+ INTERNAL_UNSTABLE
+ ),
+ // Enumerates "identity-like" conversion methods to suggest on type mismatch.
+ rustc_attr!(
+ rustc_conversion_suggestion, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE
+ ),
+ // Prevents field reads in the marked trait or method to be considered
+ // during dead code analysis.
+ rustc_attr!(
+ rustc_trivial_field_reads, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE
+ ),
+ // Used by the `rustc::potential_query_instability` lint to warn methods which
+ // might not be stable during incremental compilation.
+ rustc_attr!(rustc_lint_query_instability, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE),
+ // Used by the `rustc::untranslatable_diagnostic` and `rustc::diagnostic_outside_of_impl` lints
+ // to assist in changes to diagnostic APIs.
+ rustc_attr!(rustc_lint_diagnostics, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE),
+ // Used by the `rustc::bad_opt_access` lint to identify `DebuggingOptions` and `CodegenOptions`
+ // types (as well as any others in future).
+ rustc_attr!(rustc_lint_opt_ty, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE),
+ // Used by the `rustc::bad_opt_access` lint on fields
+ // types (as well as any others in future).
+ rustc_attr!(rustc_lint_opt_deny_field_access, Normal, template!(List: "message"), WarnFollowing, INTERNAL_UNSTABLE),
+
+ // ==========================================================================
+ // Internal attributes, Const related:
+ // ==========================================================================
+
+ rustc_attr!(rustc_promotable, Normal, template!(Word), WarnFollowing, IMPL_DETAIL),
+ rustc_attr!(
+ rustc_legacy_const_generics, Normal, template!(List: "N"), ErrorFollowing,
+ INTERNAL_UNSTABLE
+ ),
+ // Do not const-check this function's body. It will always get replaced during CTFE.
+ rustc_attr!(
+ rustc_do_not_const_check, Normal, template!(Word), WarnFollowing, INTERNAL_UNSTABLE
+ ),
+
+ // ==========================================================================
+ // Internal attributes, Layout related:
+ // ==========================================================================
+
+ rustc_attr!(
+ rustc_layout_scalar_valid_range_start, Normal, template!(List: "value"), ErrorFollowing,
+ "the `#[rustc_layout_scalar_valid_range_start]` attribute is just used to enable \
+ niche optimizations in libcore and libstd and will never be stable",
+ ),
+ rustc_attr!(
+ rustc_layout_scalar_valid_range_end, Normal, template!(List: "value"), ErrorFollowing,
+ "the `#[rustc_layout_scalar_valid_range_end]` attribute is just used to enable \
+ niche optimizations in libcore and libstd and will never be stable",
+ ),
+ rustc_attr!(
+ rustc_nonnull_optimization_guaranteed, Normal, template!(Word), WarnFollowing,
+ "the `#[rustc_nonnull_optimization_guaranteed]` attribute is just used to enable \
+ niche optimizations in libcore and libstd and will never be stable",
+ ),
+
+ // ==========================================================================
+ // Internal attributes, Misc:
+ // ==========================================================================
+ gated!(
+ lang, Normal, template!(NameValueStr: "name"), DuplicatesOk, @only_local: true, lang_items,
+ "language items are subject to change",
+ ),
+ rustc_attr!(
+ rustc_pass_by_value, Normal,
+ template!(Word), ErrorFollowing,
+ "#[rustc_pass_by_value] is used to mark types that must be passed by value instead of reference."
+ ),
+ rustc_attr!(
+ rustc_coherence_is_core, AttributeType::CrateLevel, template!(Word), ErrorFollowing, @only_local: true,
+ "#![rustc_coherence_is_core] allows inherent methods on builtin types, only intended to be used in `core`."
+ ),
+ rustc_attr!(
+ rustc_allow_incoherent_impl, AttributeType::Normal, template!(Word), ErrorFollowing, @only_local: true,
+ "#[rustc_allow_incoherent_impl] has to be added to all impl items of an incoherent inherent impl."
+ ),
+ rustc_attr!(
+ rustc_has_incoherent_inherent_impls, AttributeType::Normal, template!(Word), ErrorFollowing,
+ "#[rustc_has_incoherent_inherent_impls] allows the addition of incoherent inherent impls for \
+ the given type by annotating all impl items with #[rustc_allow_incoherent_impl]."
+ ),
+ rustc_attr!(
+ rustc_box, AttributeType::Normal, template!(Word), ErrorFollowing,
+ "#[rustc_box] allows creating boxes \
+ and it is only intended to be used in `alloc`."
+ ),
+
+ BuiltinAttribute {
+ name: sym::rustc_diagnostic_item,
+ // FIXME: This can be `true` once we always use `tcx.is_diagnostic_item`.
+ only_local: false,
+ type_: Normal,
+ template: template!(NameValueStr: "name"),
+ duplicates: ErrorFollowing,
+ gate: Gated(
+ Stability::Unstable,
+ sym::rustc_attrs,
+ "diagnostic items compiler internal support for linting",
+ cfg_fn!(rustc_attrs),
+ ),
+ },
+ gated!(
+ // Used in resolve:
+ prelude_import, Normal, template!(Word), WarnFollowing,
+ "`#[prelude_import]` is for use by rustc only",
+ ),
+ gated!(
+ rustc_paren_sugar, Normal, template!(Word), WarnFollowing, unboxed_closures,
+ "unboxed_closures are still evolving",
+ ),
+ rustc_attr!(
+ rustc_inherit_overflow_checks, Normal, template!(Word), WarnFollowing, @only_local: true,
+ "the `#[rustc_inherit_overflow_checks]` attribute is just used to control \
+ overflow checking behavior of several libcore functions that are inlined \
+ across crates and will never be stable",
+ ),
+ rustc_attr!(
+ rustc_reservation_impl, Normal,
+ template!(NameValueStr: "reservation message"), ErrorFollowing,
+ "the `#[rustc_reservation_impl]` attribute is internally used \
+ for reserving for `for<T> From<!> for T` impl"
+ ),
+ rustc_attr!(
+ rustc_test_marker, Normal, template!(Word), WarnFollowing,
+ "the `#[rustc_test_marker]` attribute is used internally to track tests",
+ ),
+ rustc_attr!(
+ rustc_unsafe_specialization_marker, Normal, template!(Word), WarnFollowing,
+ "the `#[rustc_unsafe_specialization_marker]` attribute is used to check specializations"
+ ),
+ rustc_attr!(
+ rustc_specialization_trait, Normal, template!(Word), WarnFollowing,
+ "the `#[rustc_specialization_trait]` attribute is used to check specializations"
+ ),
+ rustc_attr!(
+ rustc_main, Normal, template!(Word), WarnFollowing,
+ "the `#[rustc_main]` attribute is used internally to specify test entry point function",
+ ),
+ rustc_attr!(
+ rustc_skip_array_during_method_dispatch, Normal, template!(Word), WarnFollowing,
+ "the `#[rustc_skip_array_during_method_dispatch]` attribute is used to exclude a trait \
+ from method dispatch when the receiver is an array, for compatibility in editions < 2021."
+ ),
+ rustc_attr!(
+ rustc_must_implement_one_of, Normal, template!(List: "function1, function2, ..."), ErrorFollowing,
+ "the `#[rustc_must_implement_one_of]` attribute is used to change minimal complete \
+ definition of a trait, it's currently in experimental form and should be changed before \
+ being exposed outside of the std"
+ ),
+
+ // ==========================================================================
+ // Internal attributes, Testing:
+ // ==========================================================================
+
+ rustc_attr!(TEST, rustc_outlives, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_capture_analysis, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_insignificant_dtor, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_strict_coherence, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_variance, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_layout, Normal, template!(List: "field1, field2, ..."), WarnFollowing),
+ rustc_attr!(TEST, rustc_regions, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(
+ TEST, rustc_error, Normal,
+ template!(Word, List: "delay_span_bug_from_inside_query"), WarnFollowingWordOnly
+ ),
+ rustc_attr!(TEST, rustc_dump_user_substs, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_evaluate_where_clauses, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(
+ TEST, rustc_if_this_changed, Normal, template!(Word, List: "DepNode"), DuplicatesOk
+ ),
+ rustc_attr!(
+ TEST, rustc_then_this_would_need, Normal, template!(List: "DepNode"), DuplicatesOk
+ ),
+ rustc_attr!(
+ TEST, rustc_clean, Normal,
+ template!(List: r#"cfg = "...", /*opt*/ label = "...", /*opt*/ except = "...""#),
+ DuplicatesOk,
+ ),
+ rustc_attr!(
+ TEST, rustc_partition_reused, Normal,
+ template!(List: r#"cfg = "...", module = "...""#), DuplicatesOk,
+ ),
+ rustc_attr!(
+ TEST, rustc_partition_codegened, Normal,
+ template!(List: r#"cfg = "...", module = "...""#), DuplicatesOk,
+ ),
+ rustc_attr!(
+ TEST, rustc_expected_cgu_reuse, Normal,
+ template!(List: r#"cfg = "...", module = "...", kind = "...""#), DuplicatesOk,
+ ),
+ rustc_attr!(TEST, rustc_symbol_name, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_polymorphize_error, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_def_path, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_mir, Normal, template!(List: "arg1, arg2, ..."), DuplicatesOk),
+ rustc_attr!(TEST, rustc_dump_program_clauses, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_dump_env_program_clauses, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_object_lifetime_default, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_dump_vtable, Normal, template!(Word), WarnFollowing),
+ rustc_attr!(TEST, rustc_dummy, Normal, template!(Word /* doesn't matter*/), DuplicatesOk),
+ gated!(
+ omit_gdb_pretty_printer_section, Normal, template!(Word), WarnFollowing,
+ "the `#[omit_gdb_pretty_printer_section]` attribute is just used for the Rust test suite",
+ ),
+];
+
+pub fn deprecated_attributes() -> Vec<&'static BuiltinAttribute> {
+ BUILTIN_ATTRIBUTES.iter().filter(|attr| attr.gate.is_deprecated()).collect()
+}
+
+pub fn is_builtin_attr_name(name: Symbol) -> bool {
+ BUILTIN_ATTRIBUTE_MAP.get(&name).is_some()
+}
+
+pub fn is_builtin_only_local(name: Symbol) -> bool {
+ BUILTIN_ATTRIBUTE_MAP.get(&name).map_or(false, |attr| attr.only_local)
+}
+
+pub static BUILTIN_ATTRIBUTE_MAP: LazyLock<FxHashMap<Symbol, &BuiltinAttribute>> =
+ LazyLock::new(|| {
+ let mut map = FxHashMap::default();
+ for attr in BUILTIN_ATTRIBUTES.iter() {
+ if map.insert(attr.name, attr).is_some() {
+ panic!("duplicate builtin attribute `{}`", attr.name);
+ }
+ }
+ map
+ });
diff --git a/compiler/rustc_feature/src/lib.rs b/compiler/rustc_feature/src/lib.rs
new file mode 100644
index 000000000..efb830527
--- /dev/null
+++ b/compiler/rustc_feature/src/lib.rs
@@ -0,0 +1,155 @@
+//! # Feature gates
+//!
+//! This crate declares the set of past and present unstable features in the compiler.
+//! Feature gate checking itself is done in `rustc_ast_passes/src/feature_gate.rs`
+//! at the moment.
+//!
+//! Features are enabled in programs via the crate-level attributes of
+//! `#![feature(...)]` with a comma-separated list of features.
+//!
+//! For the purpose of future feature-tracking, once a feature gate is added,
+//! even if it is stabilized or removed, *do not remove it*. Instead, move the
+//! symbol to the `accepted` or `removed` modules respectively.
+
+#![feature(once_cell)]
+
+mod accepted;
+mod active;
+mod builtin_attrs;
+mod removed;
+
+#[cfg(test)]
+mod tests;
+
+use rustc_span::{edition::Edition, symbol::Symbol, Span};
+use std::fmt;
+use std::num::NonZeroU32;
+
+#[derive(Clone, Copy)]
+pub enum State {
+ Accepted,
+ Active { set: fn(&mut Features, Span) },
+ Removed { reason: Option<&'static str> },
+ Stabilized { reason: Option<&'static str> },
+}
+
+impl fmt::Debug for State {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ State::Accepted { .. } => write!(f, "accepted"),
+ State::Active { .. } => write!(f, "active"),
+ State::Removed { .. } => write!(f, "removed"),
+ State::Stabilized { .. } => write!(f, "stabilized"),
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct Feature {
+ pub state: State,
+ pub name: Symbol,
+ pub since: &'static str,
+ issue: Option<NonZeroU32>,
+ pub edition: Option<Edition>,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum Stability {
+ Unstable,
+ // First argument is tracking issue link; second argument is an optional
+ // help message, which defaults to "remove this attribute".
+ Deprecated(&'static str, Option<&'static str>),
+}
+
+#[derive(Clone, Copy, Debug, Hash)]
+pub enum UnstableFeatures {
+ /// Hard errors for unstable features are active, as on beta/stable channels.
+ Disallow,
+ /// Allow features to be activated, as on nightly.
+ Allow,
+ /// Errors are bypassed for bootstrapping. This is required any time
+ /// during the build that feature-related lints are set to warn or above
+ /// because the build turns on warnings-as-errors and uses lots of unstable
+ /// features. As a result, this is always required for building Rust itself.
+ Cheat,
+}
+
+impl UnstableFeatures {
+ /// This takes into account `RUSTC_BOOTSTRAP`.
+ ///
+ /// If `krate` is [`Some`], then setting `RUSTC_BOOTSTRAP=krate` will enable the nightly features.
+ /// Otherwise, only `RUSTC_BOOTSTRAP=1` will work.
+ pub fn from_environment(krate: Option<&str>) -> Self {
+ // `true` if this is a feature-staged build, i.e., on the beta or stable channel.
+ let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
+ // Returns whether `krate` should be counted as unstable
+ let is_unstable_crate = |var: &str| {
+ krate.map_or(false, |name| var.split(',').any(|new_krate| new_krate == name))
+ };
+ // `true` if we should enable unstable features for bootstrapping.
+ let bootstrap = std::env::var("RUSTC_BOOTSTRAP")
+ .map_or(false, |var| var == "1" || is_unstable_crate(&var));
+ match (disable_unstable_features, bootstrap) {
+ (_, true) => UnstableFeatures::Cheat,
+ (true, _) => UnstableFeatures::Disallow,
+ (false, _) => UnstableFeatures::Allow,
+ }
+ }
+
+ pub fn is_nightly_build(&self) -> bool {
+ match *self {
+ UnstableFeatures::Allow | UnstableFeatures::Cheat => true,
+ UnstableFeatures::Disallow => false,
+ }
+ }
+}
+
+fn find_lang_feature_issue(feature: Symbol) -> Option<NonZeroU32> {
+ if let Some(info) = ACTIVE_FEATURES.iter().find(|t| t.name == feature) {
+ // FIXME (#28244): enforce that active features have issue numbers
+ // assert!(info.issue.is_some())
+ info.issue
+ } else {
+ // search in Accepted, Removed, or Stable Removed features
+ let found = ACCEPTED_FEATURES
+ .iter()
+ .chain(REMOVED_FEATURES)
+ .chain(STABLE_REMOVED_FEATURES)
+ .find(|t| t.name == feature);
+ match found {
+ Some(found) => found.issue,
+ None => panic!("feature `{}` is not declared anywhere", feature),
+ }
+ }
+}
+
+const fn to_nonzero(n: Option<u32>) -> Option<NonZeroU32> {
+ // Can be replaced with `n.and_then(NonZeroU32::new)` if that is ever usable
+ // in const context. Requires https://github.com/rust-lang/rfcs/pull/2632.
+ match n {
+ None => None,
+ Some(n) => NonZeroU32::new(n),
+ }
+}
+
+pub enum GateIssue {
+ Language,
+ Library(Option<NonZeroU32>),
+}
+
+pub fn find_feature_issue(feature: Symbol, issue: GateIssue) -> Option<NonZeroU32> {
+ match issue {
+ GateIssue::Language => find_lang_feature_issue(feature),
+ GateIssue::Library(lib) => lib,
+ }
+}
+
+pub use accepted::ACCEPTED_FEATURES;
+pub use active::{Features, ACTIVE_FEATURES, INCOMPATIBLE_FEATURES};
+pub use builtin_attrs::AttributeDuplicates;
+pub use builtin_attrs::{
+ deprecated_attributes, find_gated_cfg, is_builtin_attr_name, is_builtin_only_local,
+ AttributeGate, AttributeTemplate, AttributeType, BuiltinAttribute, GatedCfg,
+ BUILTIN_ATTRIBUTES, BUILTIN_ATTRIBUTE_MAP,
+};
+pub use removed::{REMOVED_FEATURES, STABLE_REMOVED_FEATURES};
diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs
new file mode 100644
index 000000000..2ddaf9201
--- /dev/null
+++ b/compiler/rustc_feature/src/removed.rs
@@ -0,0 +1,203 @@
+//! List of the removed feature gates.
+
+use super::{to_nonzero, Feature, State};
+use rustc_span::symbol::sym;
+
+macro_rules! declare_features {
+ ($(
+ $(#[doc = $doc:tt])* (removed, $feature:ident, $ver:expr, $issue:expr, None, $reason:expr),
+ )+) => {
+ /// Represents unstable features which have since been removed (it was once Active)
+ pub const REMOVED_FEATURES: &[Feature] = &[
+ $(
+ Feature {
+ state: State::Removed { reason: $reason },
+ name: sym::$feature,
+ since: $ver,
+ issue: to_nonzero($issue),
+ edition: None,
+ }
+ ),+
+ ];
+ };
+
+ ($(
+ $(#[doc = $doc:tt])* (stable_removed, $feature:ident, $ver:expr, $issue:expr, None),
+ )+) => {
+ /// Represents stable features which have since been removed (it was once Accepted)
+ pub const STABLE_REMOVED_FEATURES: &[Feature] = &[
+ $(
+ Feature {
+ state: State::Stabilized { reason: None },
+ name: sym::$feature,
+ since: $ver,
+ issue: to_nonzero($issue),
+ edition: None,
+ }
+ ),+
+ ];
+ };
+}
+
+#[rustfmt::skip]
+declare_features! (
+ // -------------------------------------------------------------------------
+ // feature-group-start: removed features
+ // -------------------------------------------------------------------------
+
+ (removed, advanced_slice_patterns, "1.0.0", Some(62254), None,
+ Some("merged into `#![feature(slice_patterns)]`")),
+ (removed, allocator, "1.0.0", None, None, None),
+ /// Allows a test to fail without failing the whole suite.
+ (removed, allow_fail, "1.19.0", Some(46488), None, Some("removed due to no clear use cases")),
+ (removed, await_macro, "1.38.0", Some(50547), None,
+ Some("subsumed by `.await` syntax")),
+ /// Allows comparing raw pointers during const eval.
+ (removed, const_compare_raw_pointers, "1.46.0", Some(53020), None,
+ Some("cannot be allowed in const eval in any meaningful way")),
+ /// Allows non-trivial generic constants which have to be manually propagated upwards.
+ (removed, const_evaluatable_checked, "1.48.0", Some(76560), None, Some("renamed to `generic_const_exprs`")),
+ /// Allows the definition of `const` functions with some advanced features.
+ (removed, const_fn, "1.54.0", Some(57563), None,
+ Some("split into finer-grained feature gates")),
+ /// Allows const generic types (e.g. `struct Foo<const N: usize>(...);`).
+ (removed, const_generics, "1.34.0", Some(44580), None,
+ Some("removed in favor of `#![feature(adt_const_params)]` and `#![feature(generic_const_exprs)]`")),
+ /// Allows `[x; N]` where `x` is a constant (RFC 2203).
+ (removed, const_in_array_repeat_expressions, "1.37.0", Some(49147), None,
+ Some("removed due to causing promotable bugs")),
+ /// Allows casting raw pointers to `usize` during const eval.
+ (removed, const_raw_ptr_to_usize_cast, "1.55.0", Some(51910), None,
+ Some("at compile-time, pointers do not have an integer value, so these casts cannot be properly supported")),
+ /// Allows `T: ?const Trait` syntax in bounds.
+ (removed, const_trait_bound_opt_out, "1.42.0", Some(67794), None,
+ Some("Removed in favor of `~const` bound in #![feature(const_trait_impl)]")),
+ /// Allows using `crate` as visibility modifier, synonymous with `pub(crate)`.
+ (removed, crate_visibility_modifier, "1.63.0", Some(53120), None, Some("removed in favor of `pub(crate)`")),
+ /// Allows using custom attributes (RFC 572).
+ (removed, custom_attribute, "1.0.0", Some(29642), None,
+ Some("removed in favor of `#![register_tool]` and `#![register_attr]`")),
+ /// Allows the use of `#[derive(Anything)]` as sugar for `#[derive_Anything]`.
+ (removed, custom_derive, "1.32.0", Some(29644), None,
+ Some("subsumed by `#[proc_macro_derive]`")),
+ /// Allows using `#[doc(keyword = "...")]`.
+ (removed, doc_keyword, "1.28.0", Some(51315), None,
+ Some("merged into `#![feature(rustdoc_internals)]`")),
+ /// Allows using `doc(primitive)` without a future-incompat warning.
+ (removed, doc_primitive, "1.56.0", Some(88070), None,
+ Some("merged into `#![feature(rustdoc_internals)]`")),
+ /// Allows `#[doc(spotlight)]`.
+ /// The attribute was renamed to `#[doc(notable_trait)]`
+ /// and the feature to `doc_notable_trait`.
+ (removed, doc_spotlight, "1.22.0", Some(45040), None,
+ Some("renamed to `doc_notable_trait`")),
+ /// Allows using `#[unsafe_destructor_blind_to_params]` (RFC 1238).
+ (removed, dropck_parametricity, "1.38.0", Some(28498), None, None),
+ /// Allows defining `existential type`s.
+ (removed, existential_type, "1.38.0", Some(63063), None,
+ Some("removed in favor of `#![feature(type_alias_impl_trait)]`")),
+ /// Paths of the form: `extern::foo::bar`
+ (removed, extern_in_paths, "1.33.0", Some(55600), None,
+ Some("subsumed by `::foo::bar` paths")),
+ /// Allows `#[doc(include = "some-file")]`.
+ (removed, external_doc, "1.54.0", Some(44732), None,
+ Some("use #[doc = include_str!(\"filename\")] instead, which handles macro invocations")),
+ /// Allows `impl Trait` in bindings (`let`, `const`, `static`).
+ (removed, impl_trait_in_bindings, "1.55.0", Some(63065), None,
+ Some("the implementation was not maintainable, the feature may get reintroduced once the current refactorings are done")),
+ (removed, import_shadowing, "1.0.0", None, None, None),
+ /// Allows in-band quantification of lifetime bindings (e.g., `fn foo(x: &'a u8) -> &'a u8`).
+ (removed, in_band_lifetimes, "1.23.0", Some(44524), None,
+ Some("removed due to unsolved ergonomic questions and added lifetime resolution complexity")),
+ /// Allows inferring `'static` outlives requirements (RFC 2093).
+ (removed, infer_static_outlives_requirements, "1.63.0", Some(54185), None,
+ Some("removed as it caused some confusion and discussion was inactive for years")),
+ /// Lazily evaluate constants. This allows constants to depend on type parameters.
+ (removed, lazy_normalization_consts, "1.46.0", Some(72219), None, Some("superseded by `generic_const_exprs`")),
+ /// Allows using the `#[link_args]` attribute.
+ (removed, link_args, "1.53.0", Some(29596), None,
+ Some("removed in favor of using `-C link-arg=ARG` on command line, \
+ which is available from cargo build scripts with `cargo:rustc-link-arg` now")),
+ (removed, macro_reexport, "1.0.0", Some(29638), None,
+ Some("subsumed by `pub use`")),
+ /// Allows using `#[main]` to replace the entrypoint `#[lang = "start"]` calls.
+ (removed, main, "1.53.0", Some(29634), None, None),
+ (removed, managed_boxes, "1.0.0", None, None, None),
+ /// Allows the use of type alias impl trait in function return positions
+ (removed, min_type_alias_impl_trait, "1.56.0", Some(63063), None,
+ Some("removed in favor of full type_alias_impl_trait")),
+ (removed, needs_allocator, "1.4.0", Some(27389), None,
+ Some("subsumed by `#![feature(allocator_internals)]`")),
+ /// Allows use of unary negate on unsigned integers, e.g., -e for e: u8
+ (removed, negate_unsigned, "1.0.0", Some(29645), None, None),
+ /// Allows `#[no_debug]`.
+ (removed, no_debug, "1.43.0", Some(29721), None, Some("removed due to lack of demand")),
+ /// Allows using `#[on_unimplemented(..)]` on traits.
+ /// (Moved to `rustc_attrs`.)
+ (removed, on_unimplemented, "1.40.0", None, None, None),
+ /// A way to temporarily opt out of opt in copy. This will *never* be accepted.
+ (removed, opt_out_copy, "1.0.0", None, None, None),
+ /// Allows features specific to OIBIT (now called auto traits).
+ /// Renamed to `auto_traits`.
+ (removed, optin_builtin_traits, "1.0.0", Some(13231), None,
+ Some("renamed to `auto_traits`")),
+ /// Allows overlapping impls of marker traits.
+ (removed, overlapping_marker_traits, "1.42.0", Some(29864), None,
+ Some("removed in favor of `#![feature(marker_trait_attr)]`")),
+ (removed, panic_implementation, "1.28.0", Some(44489), None,
+ Some("subsumed by `#[panic_handler]`")),
+ /// Allows using `#[plugin_registrar]` on functions.
+ (removed, plugin_registrar, "1.54.0", Some(29597), None,
+ Some("a __rustc_plugin_registrar symbol must now be defined instead")),
+ (removed, proc_macro_expr, "1.27.0", Some(54727), None,
+ Some("subsumed by `#![feature(proc_macro_hygiene)]`")),
+ (removed, proc_macro_gen, "1.27.0", Some(54727), None,
+ Some("subsumed by `#![feature(proc_macro_hygiene)]`")),
+ (removed, proc_macro_mod, "1.27.0", Some(54727), None,
+ Some("subsumed by `#![feature(proc_macro_hygiene)]`")),
+ (removed, proc_macro_non_items, "1.27.0", Some(54727), None,
+ Some("subsumed by `#![feature(proc_macro_hygiene)]`")),
+ (removed, pub_macro_rules, "1.53.0", Some(78855), None,
+ Some("removed due to being incomplete, in particular it does not work across crates")),
+ (removed, pushpop_unsafe, "1.2.0", None, None, None),
+ (removed, quad_precision_float, "1.0.0", None, None, None),
+ (removed, quote, "1.33.0", Some(29601), None, None),
+ (removed, reflect, "1.0.0", Some(27749), None, None),
+ /// Allows using the macros:
+ /// + `__diagnostic_used`
+ /// + `__register_diagnostic`
+ /// +`__build_diagnostic_array`
+ (removed, rustc_diagnostic_macros, "1.38.0", None, None, None),
+ /// Allows identifying crates that contain sanitizer runtimes.
+ (removed, sanitizer_runtime, "1.17.0", None, None, None),
+ (removed, simd, "1.0.0", Some(27731), None,
+ Some("removed in favor of `#[repr(simd)]`")),
+ /// Allows `#[link(kind = "static-nobundle", ...)]`.
+ (removed, static_nobundle, "1.16.0", Some(37403), None,
+ Some(r#"subsumed by `#[link(kind = "static", modifiers = "-bundle", ...)]`"#)),
+ (removed, struct_inherit, "1.0.0", None, None, None),
+ (removed, test_removed_feature, "1.0.0", None, None, None),
+ /// Allows using items which are missing stability attributes
+ (removed, unmarked_api, "1.0.0", None, None, None),
+ (removed, unsafe_no_drop_flag, "1.0.0", None, None, None),
+ /// Allows `union` fields that don't implement `Copy` as long as they don't have any drop glue.
+ (removed, untagged_unions, "1.13.0", Some(55149), None,
+ Some("unions with `Copy` and `ManuallyDrop` fields are stable; there is no intent to stabilize more")),
+ /// Allows `#[unwind(..)]`.
+ ///
+ /// Permits specifying whether a function should permit unwinding or abort on unwind.
+ (removed, unwind_attributes, "1.56.0", Some(58760), None, Some("use the C-unwind ABI instead")),
+ (removed, visible_private_types, "1.0.0", None, None, None),
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+ // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way.
+ // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!!
+
+ // -------------------------------------------------------------------------
+ // feature-group-end: removed features
+ // -------------------------------------------------------------------------
+);
+
+#[rustfmt::skip]
+declare_features! (
+ (stable_removed, no_stack_check, "1.0.0", None, None),
+);
diff --git a/compiler/rustc_feature/src/tests.rs b/compiler/rustc_feature/src/tests.rs
new file mode 100644
index 000000000..50433e44b
--- /dev/null
+++ b/compiler/rustc_feature/src/tests.rs
@@ -0,0 +1,23 @@
+use super::UnstableFeatures;
+
+#[test]
+fn rustc_bootstrap_parsing() {
+ let is_bootstrap = |env, krate| {
+ std::env::set_var("RUSTC_BOOTSTRAP", env);
+ matches!(UnstableFeatures::from_environment(krate), UnstableFeatures::Cheat)
+ };
+ assert!(is_bootstrap("1", None));
+ assert!(is_bootstrap("1", Some("x")));
+ // RUSTC_BOOTSTRAP allows specifying a specific crate
+ assert!(is_bootstrap("x", Some("x")));
+ // RUSTC_BOOTSTRAP allows multiple comma-delimited crates
+ assert!(is_bootstrap("x,y,z", Some("x")));
+ assert!(is_bootstrap("x,y,z", Some("y")));
+ // Crate that aren't specified do not get unstable features
+ assert!(!is_bootstrap("x", Some("a")));
+ assert!(!is_bootstrap("x,y,z", Some("a")));
+ assert!(!is_bootstrap("x,y,z", None));
+
+ // this is technically a breaking change, but there are no stability guarantees for RUSTC_BOOTSTRAP
+ assert!(!is_bootstrap("0", None));
+}
diff --git a/compiler/rustc_fs_util/Cargo.toml b/compiler/rustc_fs_util/Cargo.toml
new file mode 100644
index 000000000..34c3fe2a0
--- /dev/null
+++ b/compiler/rustc_fs_util/Cargo.toml
@@ -0,0 +1,4 @@
+[package]
+name = "rustc_fs_util"
+version = "0.0.0"
+edition = "2021"
diff --git a/compiler/rustc_fs_util/src/lib.rs b/compiler/rustc_fs_util/src/lib.rs
new file mode 100644
index 000000000..87e97c746
--- /dev/null
+++ b/compiler/rustc_fs_util/src/lib.rs
@@ -0,0 +1,90 @@
+use std::ffi::CString;
+use std::fs;
+use std::io;
+use std::path::{Path, PathBuf};
+
+// Unfortunately, on windows, it looks like msvcrt.dll is silently translating
+// verbatim paths under the hood to non-verbatim paths! This manifests itself as
+// gcc looking like it cannot accept paths of the form `\\?\C:\...`, but the
+// real bug seems to lie in msvcrt.dll.
+//
+// Verbatim paths are generally pretty rare, but the implementation of
+// `fs::canonicalize` currently generates paths of this form, meaning that we're
+// going to be passing quite a few of these down to gcc, so we need to deal with
+// this case.
+//
+// For now we just strip the "verbatim prefix" of `\\?\` from the path. This
+// will probably lose information in some cases, but there's not a whole lot
+// more we can do with a buggy msvcrt...
+//
+// For some more information, see this comment:
+// https://github.com/rust-lang/rust/issues/25505#issuecomment-102876737
+#[cfg(windows)]
+pub fn fix_windows_verbatim_for_gcc(p: &Path) -> PathBuf {
+ use std::ffi::OsString;
+ use std::path;
+ let mut components = p.components();
+ let prefix = match components.next() {
+ Some(path::Component::Prefix(p)) => p,
+ _ => return p.to_path_buf(),
+ };
+ match prefix.kind() {
+ path::Prefix::VerbatimDisk(disk) => {
+ let mut base = OsString::from(format!("{}:", disk as char));
+ base.push(components.as_path());
+ PathBuf::from(base)
+ }
+ path::Prefix::VerbatimUNC(server, share) => {
+ let mut base = OsString::from(r"\\");
+ base.push(server);
+ base.push(r"\");
+ base.push(share);
+ base.push(components.as_path());
+ PathBuf::from(base)
+ }
+ _ => p.to_path_buf(),
+ }
+}
+
+#[cfg(not(windows))]
+pub fn fix_windows_verbatim_for_gcc(p: &Path) -> PathBuf {
+ p.to_path_buf()
+}
+
+pub enum LinkOrCopy {
+ Link,
+ Copy,
+}
+
+/// Copies `p` into `q`, preferring to use hard-linking if possible. If
+/// `q` already exists, it is removed first.
+/// The result indicates which of the two operations has been performed.
+pub fn link_or_copy<P: AsRef<Path>, Q: AsRef<Path>>(p: P, q: Q) -> io::Result<LinkOrCopy> {
+ let p = p.as_ref();
+ let q = q.as_ref();
+ match fs::remove_file(&q) {
+ Ok(()) => (),
+ Err(err) if err.kind() == io::ErrorKind::NotFound => (),
+ Err(err) => return Err(err),
+ }
+
+ match fs::hard_link(p, q) {
+ Ok(()) => Ok(LinkOrCopy::Link),
+ Err(_) => match fs::copy(p, q) {
+ Ok(_) => Ok(LinkOrCopy::Copy),
+ Err(e) => Err(e),
+ },
+ }
+}
+
+#[cfg(unix)]
+pub fn path_to_c_string(p: &Path) -> CString {
+ use std::ffi::OsStr;
+ use std::os::unix::ffi::OsStrExt;
+ let p: &OsStr = p.as_ref();
+ CString::new(p.as_bytes()).unwrap()
+}
+#[cfg(windows)]
+pub fn path_to_c_string(p: &Path) -> CString {
+ CString::new(p.to_str().unwrap()).unwrap()
+}
diff --git a/compiler/rustc_graphviz/Cargo.toml b/compiler/rustc_graphviz/Cargo.toml
new file mode 100644
index 000000000..d657fdb1a
--- /dev/null
+++ b/compiler/rustc_graphviz/Cargo.toml
@@ -0,0 +1,4 @@
+[package]
+name = "rustc_graphviz"
+version = "0.0.0"
+edition = "2021"
diff --git a/compiler/rustc_graphviz/src/lib.rs b/compiler/rustc_graphviz/src/lib.rs
new file mode 100644
index 000000000..6eaff5c2f
--- /dev/null
+++ b/compiler/rustc_graphviz/src/lib.rs
@@ -0,0 +1,693 @@
+//! Generate files suitable for use with [Graphviz](https://www.graphviz.org/)
+//!
+//! The `render` function generates output (e.g., an `output.dot` file) for
+//! use with [Graphviz](https://www.graphviz.org/) by walking a labeled
+//! graph. (Graphviz can then automatically lay out the nodes and edges
+//! of the graph, and also optionally render the graph as an image or
+//! other [output formats](https://www.graphviz.org/docs/outputs), such as SVG.)
+//!
+//! Rather than impose some particular graph data structure on clients,
+//! this library exposes two traits that clients can implement on their
+//! own structs before handing them over to the rendering function.
+//!
+//! Note: This library does not yet provide access to the full
+//! expressiveness of the [DOT language](https://www.graphviz.org/doc/info/lang.html).
+//! For example, there are many [attributes](https://www.graphviz.org/doc/info/attrs.html)
+//! related to providing layout hints (e.g., left-to-right versus top-down, which
+//! algorithm to use, etc). The current intention of this library is to
+//! emit a human-readable .dot file with very regular structure suitable
+//! for easy post-processing.
+//!
+//! # Examples
+//!
+//! The first example uses a very simple graph representation: a list of
+//! pairs of ints, representing the edges (the node set is implicit).
+//! Each node label is derived directly from the int representing the node,
+//! while the edge labels are all empty strings.
+//!
+//! This example also illustrates how to use `Cow<[T]>` to return
+//! an owned vector or a borrowed slice as appropriate: we construct the
+//! node vector from scratch, but borrow the edge list (rather than
+//! constructing a copy of all the edges from scratch).
+//!
+//! The output from this example renders five nodes, with the first four
+//! forming a diamond-shaped acyclic graph and then pointing to the fifth
+//! which is cyclic.
+//!
+//! ```rust
+//! #![feature(rustc_private)]
+//!
+//! use std::io::Write;
+//! use rustc_graphviz as dot;
+//!
+//! type Nd = isize;
+//! type Ed = (isize,isize);
+//! struct Edges(Vec<Ed>);
+//!
+//! pub fn render_to<W: Write>(output: &mut W) {
+//! let edges = Edges(vec![(0,1), (0,2), (1,3), (2,3), (3,4), (4,4)]);
+//! dot::render(&edges, output).unwrap()
+//! }
+//!
+//! impl<'a> dot::Labeller<'a> for Edges {
+//! type Node = Nd;
+//! type Edge = Ed;
+//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example1").unwrap() }
+//!
+//! fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
+//! dot::Id::new(format!("N{}", *n)).unwrap()
+//! }
+//! }
+//!
+//! impl<'a> dot::GraphWalk<'a> for Edges {
+//! type Node = Nd;
+//! type Edge = Ed;
+//! fn nodes(&self) -> dot::Nodes<'a,Nd> {
+//! // (assumes that |N| \approxeq |E|)
+//! let &Edges(ref v) = self;
+//! let mut nodes = Vec::with_capacity(v.len());
+//! for &(s,t) in v {
+//! nodes.push(s); nodes.push(t);
+//! }
+//! nodes.sort();
+//! nodes.dedup();
+//! nodes.into()
+//! }
+//!
+//! fn edges(&'a self) -> dot::Edges<'a,Ed> {
+//! let &Edges(ref edges) = self;
+//! (&edges[..]).into()
+//! }
+//!
+//! fn source(&self, e: &Ed) -> Nd { let &(s,_) = e; s }
+//!
+//! fn target(&self, e: &Ed) -> Nd { let &(_,t) = e; t }
+//! }
+//!
+//! # pub fn main() { render_to(&mut Vec::new()) }
+//! ```
+//!
+//! ```no_run
+//! # pub fn render_to<W:std::io::Write>(output: &mut W) { unimplemented!() }
+//! pub fn main() {
+//! use std::fs::File;
+//! let mut f = File::create("example1.dot").unwrap();
+//! render_to(&mut f)
+//! }
+//! ```
+//!
+//! Output from first example (in `example1.dot`):
+//!
+//! ```dot
+//! digraph example1 {
+//! N0[label="N0"];
+//! N1[label="N1"];
+//! N2[label="N2"];
+//! N3[label="N3"];
+//! N4[label="N4"];
+//! N0 -> N1[label=""];
+//! N0 -> N2[label=""];
+//! N1 -> N3[label=""];
+//! N2 -> N3[label=""];
+//! N3 -> N4[label=""];
+//! N4 -> N4[label=""];
+//! }
+//! ```
+//!
+//! The second example illustrates using `node_label` and `edge_label` to
+//! add labels to the nodes and edges in the rendered graph. The graph
+//! here carries both `nodes` (the label text to use for rendering a
+//! particular node), and `edges` (again a list of `(source,target)`
+//! indices).
+//!
+//! This example also illustrates how to use a type (in this case the edge
+//! type) that shares substructure with the graph: the edge type here is a
+//! direct reference to the `(source,target)` pair stored in the graph's
+//! internal vector (rather than passing around a copy of the pair
+//! itself). Note that this implies that `fn edges(&'a self)` must
+//! construct a fresh `Vec<&'a (usize,usize)>` from the `Vec<(usize,usize)>`
+//! edges stored in `self`.
+//!
+//! Since both the set of nodes and the set of edges are always
+//! constructed from scratch via iterators, we use the `collect()` method
+//! from the `Iterator` trait to collect the nodes and edges into freshly
+//! constructed growable `Vec` values (rather than using `Cow` as in the
+//! first example above).
+//!
+//! The output from this example renders four nodes that make up the
+//! Hasse-diagram for the subsets of the set `{x, y}`. Each edge is
+//! labeled with the &sube; character (specified using the HTML character
+//! entity `&sube`).
+//!
+//! ```rust
+//! #![feature(rustc_private)]
+//!
+//! use std::io::Write;
+//! use rustc_graphviz as dot;
+//!
+//! type Nd = usize;
+//! type Ed<'a> = &'a (usize, usize);
+//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(usize,usize)> }
+//!
+//! pub fn render_to<W: Write>(output: &mut W) {
+//! let nodes = vec!["{x,y}","{x}","{y}","{}"];
+//! let edges = vec![(0,1), (0,2), (1,3), (2,3)];
+//! let graph = Graph { nodes: nodes, edges: edges };
+//!
+//! dot::render(&graph, output).unwrap()
+//! }
+//!
+//! impl<'a> dot::Labeller<'a> for Graph {
+//! type Node = Nd;
+//! type Edge = Ed<'a>;
+//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example2").unwrap() }
+//! fn node_id(&'a self, n: &Nd) -> dot::Id<'a> {
+//! dot::Id::new(format!("N{}", n)).unwrap()
+//! }
+//! fn node_label<'b>(&'b self, n: &Nd) -> dot::LabelText<'b> {
+//! dot::LabelText::LabelStr(self.nodes[*n].into())
+//! }
+//! fn edge_label<'b>(&'b self, _: &Ed) -> dot::LabelText<'b> {
+//! dot::LabelText::LabelStr("&sube;".into())
+//! }
+//! }
+//!
+//! impl<'a> dot::GraphWalk<'a> for Graph {
+//! type Node = Nd;
+//! type Edge = Ed<'a>;
+//! fn nodes(&self) -> dot::Nodes<'a,Nd> { (0..self.nodes.len()).collect() }
+//! fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { self.edges.iter().collect() }
+//! fn source(&self, e: &Ed) -> Nd { let & &(s,_) = e; s }
+//! fn target(&self, e: &Ed) -> Nd { let & &(_,t) = e; t }
+//! }
+//!
+//! # pub fn main() { render_to(&mut Vec::new()) }
+//! ```
+//!
+//! ```no_run
+//! # pub fn render_to<W:std::io::Write>(output: &mut W) { unimplemented!() }
+//! pub fn main() {
+//! use std::fs::File;
+//! let mut f = File::create("example2.dot").unwrap();
+//! render_to(&mut f)
+//! }
+//! ```
+//!
+//! The third example is similar to the second, except now each node and
+//! edge now carries a reference to the string label for each node as well
+//! as that node's index. (This is another illustration of how to share
+//! structure with the graph itself, and why one might want to do so.)
+//!
+//! The output from this example is the same as the second example: the
+//! Hasse-diagram for the subsets of the set `{x, y}`.
+//!
+//! ```rust
+//! #![feature(rustc_private)]
+//!
+//! use std::io::Write;
+//! use rustc_graphviz as dot;
+//!
+//! type Nd<'a> = (usize, &'a str);
+//! type Ed<'a> = (Nd<'a>, Nd<'a>);
+//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(usize,usize)> }
+//!
+//! pub fn render_to<W: Write>(output: &mut W) {
+//! let nodes = vec!["{x,y}","{x}","{y}","{}"];
+//! let edges = vec![(0,1), (0,2), (1,3), (2,3)];
+//! let graph = Graph { nodes: nodes, edges: edges };
+//!
+//! dot::render(&graph, output).unwrap()
+//! }
+//!
+//! impl<'a> dot::Labeller<'a> for Graph {
+//! type Node = Nd<'a>;
+//! type Edge = Ed<'a>;
+//! fn graph_id(&'a self) -> dot::Id<'a> { dot::Id::new("example3").unwrap() }
+//! fn node_id(&'a self, n: &Nd<'a>) -> dot::Id<'a> {
+//! dot::Id::new(format!("N{}", n.0)).unwrap()
+//! }
+//! fn node_label<'b>(&'b self, n: &Nd<'b>) -> dot::LabelText<'b> {
+//! let &(i, _) = n;
+//! dot::LabelText::LabelStr(self.nodes[i].into())
+//! }
+//! fn edge_label<'b>(&'b self, _: &Ed<'b>) -> dot::LabelText<'b> {
+//! dot::LabelText::LabelStr("&sube;".into())
+//! }
+//! }
+//!
+//! impl<'a> dot::GraphWalk<'a> for Graph {
+//! type Node = Nd<'a>;
+//! type Edge = Ed<'a>;
+//! fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> {
+//! self.nodes.iter().map(|s| &s[..]).enumerate().collect()
+//! }
+//! fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> {
+//! self.edges.iter()
+//! .map(|&(i,j)|((i, &self.nodes[i][..]),
+//! (j, &self.nodes[j][..])))
+//! .collect()
+//! }
+//! fn source(&self, e: &Ed<'a>) -> Nd<'a> { let &(s,_) = e; s }
+//! fn target(&self, e: &Ed<'a>) -> Nd<'a> { let &(_,t) = e; t }
+//! }
+//!
+//! # pub fn main() { render_to(&mut Vec::new()) }
+//! ```
+//!
+//! ```no_run
+//! # pub fn render_to<W:std::io::Write>(output: &mut W) { unimplemented!() }
+//! pub fn main() {
+//! use std::fs::File;
+//! let mut f = File::create("example3.dot").unwrap();
+//! render_to(&mut f)
+//! }
+//! ```
+//!
+//! # References
+//!
+//! * [Graphviz](https://www.graphviz.org/)
+//!
+//! * [DOT language](https://www.graphviz.org/doc/info/lang.html)
+
+#![doc(
+ html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
+ test(attr(allow(unused_variables), deny(warnings)))
+)]
+
+use LabelText::*;
+
+use std::borrow::Cow;
+use std::io;
+use std::io::prelude::*;
+
+/// The text for a graphviz label on a node or edge.
+pub enum LabelText<'a> {
+ /// This kind of label preserves the text directly as is.
+ ///
+ /// Occurrences of backslashes (`\`) are escaped, and thus appear
+ /// as backslashes in the rendered label.
+ LabelStr(Cow<'a, str>),
+
+ /// This kind of label uses the graphviz label escString type:
+ /// <https://www.graphviz.org/docs/attr-types/escString>
+ ///
+ /// Occurrences of backslashes (`\`) are not escaped; instead they
+ /// are interpreted as initiating an escString escape sequence.
+ ///
+ /// Escape sequences of particular interest: in addition to `\n`
+ /// to break a line (centering the line preceding the `\n`), there
+ /// are also the escape sequences `\l` which left-justifies the
+ /// preceding line and `\r` which right-justifies it.
+ EscStr(Cow<'a, str>),
+
+ /// This uses a graphviz [HTML string label][html]. The string is
+ /// printed exactly as given, but between `<` and `>`. **No
+ /// escaping is performed.**
+ ///
+ /// [html]: https://www.graphviz.org/doc/info/shapes.html#html
+ HtmlStr(Cow<'a, str>),
+}
+
+/// The style for a node or edge.
+/// See <https://www.graphviz.org/docs/attr-types/style/> for descriptions.
+/// Note that some of these are not valid for edges.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum Style {
+ None,
+ Solid,
+ Dashed,
+ Dotted,
+ Bold,
+ Rounded,
+ Diagonals,
+ Filled,
+ Striped,
+ Wedged,
+}
+
+impl Style {
+ pub fn as_slice(self) -> &'static str {
+ match self {
+ Style::None => "",
+ Style::Solid => "solid",
+ Style::Dashed => "dashed",
+ Style::Dotted => "dotted",
+ Style::Bold => "bold",
+ Style::Rounded => "rounded",
+ Style::Diagonals => "diagonals",
+ Style::Filled => "filled",
+ Style::Striped => "striped",
+ Style::Wedged => "wedged",
+ }
+ }
+}
+
+// There is a tension in the design of the labelling API.
+//
+// For example, I considered making a `Labeller<T>` trait that
+// provides labels for `T`, and then making the graph type `G`
+// implement `Labeller<Node>` and `Labeller<Edge>`. However, this is
+// not possible without functional dependencies. (One could work
+// around that, but I did not explore that avenue heavily.)
+//
+// Another approach that I actually used for a while was to make a
+// `Label<Context>` trait that is implemented by the client-specific
+// Node and Edge types (as well as an implementation on Graph itself
+// for the overall name for the graph). The main disadvantage of this
+// second approach (compared to having the `G` type parameter
+// implement a Labelling service) that I have encountered is that it
+// makes it impossible to use types outside of the current crate
+// directly as Nodes/Edges; you need to wrap them in newtype'd
+// structs. See e.g., the `No` and `Ed` structs in the examples. (In
+// practice clients using a graph in some other crate would need to
+// provide some sort of adapter shim over the graph anyway to
+// interface with this library).
+//
+// Another approach would be to make a single `Labeller<N,E>` trait
+// that provides three methods (graph_label, node_label, edge_label),
+// and then make `G` implement `Labeller<N,E>`. At first this did not
+// appeal to me, since I had thought I would need separate methods on
+// each data variant for dot-internal identifiers versus user-visible
+// labels. However, the identifier/label distinction only arises for
+// nodes; graphs themselves only have identifiers, and edges only have
+// labels.
+//
+// So in the end I decided to use the third approach described above.
+
+/// `Id` is a Graphviz `ID`.
+pub struct Id<'a> {
+ name: Cow<'a, str>,
+}
+
+impl<'a> Id<'a> {
+ /// Creates an `Id` named `name`.
+ ///
+ /// The caller must ensure that the input conforms to an
+ /// identifier format: it must be a non-empty string made up of
+ /// alphanumeric or underscore characters, not beginning with a
+ /// digit (i.e., the regular expression `[a-zA-Z_][a-zA-Z_0-9]*`).
+ ///
+ /// (Note: this format is a strict subset of the `ID` format
+ /// defined by the DOT language. This function may change in the
+ /// future to accept a broader subset, or the entirety, of DOT's
+ /// `ID` format.)
+ ///
+ /// Passing an invalid string (containing spaces, brackets,
+ /// quotes, ...) will return an empty `Err` value.
+ pub fn new<Name: Into<Cow<'a, str>>>(name: Name) -> Result<Id<'a>, ()> {
+ let name = name.into();
+ match name.chars().next() {
+ Some(c) if c.is_ascii_alphabetic() || c == '_' => {}
+ _ => return Err(()),
+ }
+ if !name.chars().all(|c| c.is_ascii_alphanumeric() || c == '_') {
+ return Err(());
+ }
+
+ Ok(Id { name })
+ }
+
+ pub fn as_slice(&'a self) -> &'a str {
+ &*self.name
+ }
+}
+
+/// Each instance of a type that implements `Label<C>` maps to a
+/// unique identifier with respect to `C`, which is used to identify
+/// it in the generated .dot file. They can also provide more
+/// elaborate (and non-unique) label text that is used in the graphviz
+/// rendered output.
+
+/// The graph instance is responsible for providing the DOT compatible
+/// identifiers for the nodes and (optionally) rendered labels for the nodes and
+/// edges, as well as an identifier for the graph itself.
+pub trait Labeller<'a> {
+ type Node;
+ type Edge;
+
+ /// Must return a DOT compatible identifier naming the graph.
+ fn graph_id(&'a self) -> Id<'a>;
+
+ /// Maps `n` to a unique identifier with respect to `self`. The
+ /// implementor is responsible for ensuring that the returned name
+ /// is a valid DOT identifier.
+ fn node_id(&'a self, n: &Self::Node) -> Id<'a>;
+
+ /// Maps `n` to one of the [graphviz `shape` names][1]. If `None`
+ /// is returned, no `shape` attribute is specified.
+ ///
+ /// [1]: https://www.graphviz.org/doc/info/shapes.html
+ fn node_shape(&'a self, _node: &Self::Node) -> Option<LabelText<'a>> {
+ None
+ }
+
+ /// Maps `n` to a label that will be used in the rendered output.
+ /// The label need not be unique, and may be the empty string; the
+ /// default is just the output from `node_id`.
+ fn node_label(&'a self, n: &Self::Node) -> LabelText<'a> {
+ LabelStr(self.node_id(n).name)
+ }
+
+ /// Maps `e` to a label that will be used in the rendered output.
+ /// The label need not be unique, and may be the empty string; the
+ /// default is in fact the empty string.
+ fn edge_label(&'a self, _e: &Self::Edge) -> LabelText<'a> {
+ LabelStr("".into())
+ }
+
+ /// Maps `n` to a style that will be used in the rendered output.
+ fn node_style(&'a self, _n: &Self::Node) -> Style {
+ Style::None
+ }
+
+ /// Maps `e` to a style that will be used in the rendered output.
+ fn edge_style(&'a self, _e: &Self::Edge) -> Style {
+ Style::None
+ }
+}
+
+/// Escape tags in such a way that it is suitable for inclusion in a
+/// Graphviz HTML label.
+pub fn escape_html(s: &str) -> String {
+ s.replace('&', "&amp;").replace('\"', "&quot;").replace('<', "&lt;").replace('>', "&gt;")
+}
+
+impl<'a> LabelText<'a> {
+ pub fn label<S: Into<Cow<'a, str>>>(s: S) -> LabelText<'a> {
+ LabelStr(s.into())
+ }
+
+ pub fn html<S: Into<Cow<'a, str>>>(s: S) -> LabelText<'a> {
+ HtmlStr(s.into())
+ }
+
+ fn escape_char<F>(c: char, mut f: F)
+ where
+ F: FnMut(char),
+ {
+ match c {
+ // not escaping \\, since Graphviz escString needs to
+ // interpret backslashes; see EscStr above.
+ '\\' => f(c),
+ _ => {
+ for c in c.escape_default() {
+ f(c)
+ }
+ }
+ }
+ }
+ fn escape_str(s: &str) -> String {
+ let mut out = String::with_capacity(s.len());
+ for c in s.chars() {
+ LabelText::escape_char(c, |c| out.push(c));
+ }
+ out
+ }
+
+ /// Renders text as string suitable for a label in a .dot file.
+ /// This includes quotes or suitable delimiters.
+ pub fn to_dot_string(&self) -> String {
+ match *self {
+ LabelStr(ref s) => format!("\"{}\"", s.escape_default()),
+ EscStr(ref s) => format!("\"{}\"", LabelText::escape_str(&s)),
+ HtmlStr(ref s) => format!("<{}>", s),
+ }
+ }
+
+ /// Decomposes content into string suitable for making EscStr that
+ /// yields same content as self. The result obeys the law
+ /// render(`lt`) == render(`EscStr(lt.pre_escaped_content())`) for
+ /// all `lt: LabelText`.
+ fn pre_escaped_content(self) -> Cow<'a, str> {
+ match self {
+ EscStr(s) => s,
+ LabelStr(s) => {
+ if s.contains('\\') {
+ (&*s).escape_default().to_string().into()
+ } else {
+ s
+ }
+ }
+ HtmlStr(s) => s,
+ }
+ }
+
+ /// Puts `suffix` on a line below this label, with a blank line separator.
+ pub fn suffix_line(self, suffix: LabelText<'_>) -> LabelText<'static> {
+ let mut prefix = self.pre_escaped_content().into_owned();
+ let suffix = suffix.pre_escaped_content();
+ prefix.push_str(r"\n\n");
+ prefix.push_str(&suffix);
+ EscStr(prefix.into())
+ }
+}
+
+pub type Nodes<'a, N> = Cow<'a, [N]>;
+pub type Edges<'a, E> = Cow<'a, [E]>;
+
+// (The type parameters in GraphWalk should be associated items,
+// when/if Rust supports such.)
+
+/// GraphWalk is an abstraction over a directed graph = (nodes,edges)
+/// made up of node handles `N` and edge handles `E`, where each `E`
+/// can be mapped to its source and target nodes.
+///
+/// The lifetime parameter `'a` is exposed in this trait (rather than
+/// introduced as a generic parameter on each method declaration) so
+/// that a client impl can choose `N` and `E` that have substructure
+/// that is bound by the self lifetime `'a`.
+///
+/// The `nodes` and `edges` method each return instantiations of
+/// `Cow<[T]>` to leave implementors the freedom to create
+/// entirely new vectors or to pass back slices into internally owned
+/// vectors.
+pub trait GraphWalk<'a> {
+ type Node: Clone;
+ type Edge: Clone;
+
+ /// Returns all the nodes in this graph.
+ fn nodes(&'a self) -> Nodes<'a, Self::Node>;
+ /// Returns all of the edges in this graph.
+ fn edges(&'a self) -> Edges<'a, Self::Edge>;
+ /// The source node for `edge`.
+ fn source(&'a self, edge: &Self::Edge) -> Self::Node;
+ /// The target node for `edge`.
+ fn target(&'a self, edge: &Self::Edge) -> Self::Node;
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum RenderOption {
+ NoEdgeLabels,
+ NoNodeLabels,
+ NoEdgeStyles,
+ NoNodeStyles,
+
+ Fontname(String),
+ DarkTheme,
+}
+
+/// Renders directed graph `g` into the writer `w` in DOT syntax.
+/// (Simple wrapper around `render_opts` that passes a default set of options.)
+pub fn render<'a, N, E, G, W>(g: &'a G, w: &mut W) -> io::Result<()>
+where
+ N: Clone + 'a,
+ E: Clone + 'a,
+ G: Labeller<'a, Node = N, Edge = E> + GraphWalk<'a, Node = N, Edge = E>,
+ W: Write,
+{
+ render_opts(g, w, &[])
+}
+
+/// Renders directed graph `g` into the writer `w` in DOT syntax.
+/// (Main entry point for the library.)
+pub fn render_opts<'a, N, E, G, W>(g: &'a G, w: &mut W, options: &[RenderOption]) -> io::Result<()>
+where
+ N: Clone + 'a,
+ E: Clone + 'a,
+ G: Labeller<'a, Node = N, Edge = E> + GraphWalk<'a, Node = N, Edge = E>,
+ W: Write,
+{
+ writeln!(w, "digraph {} {{", g.graph_id().as_slice())?;
+
+ // Global graph properties
+ let mut graph_attrs = Vec::new();
+ let mut content_attrs = Vec::new();
+ let font;
+ if let Some(fontname) = options.iter().find_map(|option| {
+ if let RenderOption::Fontname(fontname) = option { Some(fontname) } else { None }
+ }) {
+ font = format!(r#"fontname="{}""#, fontname);
+ graph_attrs.push(&font[..]);
+ content_attrs.push(&font[..]);
+ }
+ if options.contains(&RenderOption::DarkTheme) {
+ graph_attrs.push(r#"bgcolor="black""#);
+ graph_attrs.push(r#"fontcolor="white""#);
+ content_attrs.push(r#"color="white""#);
+ content_attrs.push(r#"fontcolor="white""#);
+ }
+ if !(graph_attrs.is_empty() && content_attrs.is_empty()) {
+ writeln!(w, r#" graph[{}];"#, graph_attrs.join(" "))?;
+ let content_attrs_str = content_attrs.join(" ");
+ writeln!(w, r#" node[{}];"#, content_attrs_str)?;
+ writeln!(w, r#" edge[{}];"#, content_attrs_str)?;
+ }
+
+ let mut text = Vec::new();
+ for n in g.nodes().iter() {
+ write!(w, " ")?;
+ let id = g.node_id(n);
+
+ let escaped = &g.node_label(n).to_dot_string();
+
+ write!(text, "{}", id.as_slice()).unwrap();
+
+ if !options.contains(&RenderOption::NoNodeLabels) {
+ write!(text, "[label={}]", escaped).unwrap();
+ }
+
+ let style = g.node_style(n);
+ if !options.contains(&RenderOption::NoNodeStyles) && style != Style::None {
+ write!(text, "[style=\"{}\"]", style.as_slice()).unwrap();
+ }
+
+ if let Some(s) = g.node_shape(n) {
+ write!(text, "[shape={}]", &s.to_dot_string()).unwrap();
+ }
+
+ writeln!(text, ";").unwrap();
+ w.write_all(&text)?;
+
+ text.clear();
+ }
+
+ for e in g.edges().iter() {
+ let escaped_label = &g.edge_label(e).to_dot_string();
+ write!(w, " ")?;
+ let source = g.source(e);
+ let target = g.target(e);
+ let source_id = g.node_id(&source);
+ let target_id = g.node_id(&target);
+
+ write!(text, "{} -> {}", source_id.as_slice(), target_id.as_slice()).unwrap();
+
+ if !options.contains(&RenderOption::NoEdgeLabels) {
+ write!(text, "[label={}]", escaped_label).unwrap();
+ }
+
+ let style = g.edge_style(e);
+ if !options.contains(&RenderOption::NoEdgeStyles) && style != Style::None {
+ write!(text, "[style=\"{}\"]", style.as_slice()).unwrap();
+ }
+
+ writeln!(text, ";").unwrap();
+ w.write_all(&text)?;
+
+ text.clear();
+ }
+
+ writeln!(w, "}}")
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_graphviz/src/tests.rs b/compiler/rustc_graphviz/src/tests.rs
new file mode 100644
index 000000000..154bae4cb
--- /dev/null
+++ b/compiler/rustc_graphviz/src/tests.rs
@@ -0,0 +1,408 @@
+use super::LabelText::{self, EscStr, HtmlStr, LabelStr};
+use super::{render, Edges, GraphWalk, Id, Labeller, Nodes, Style};
+use std::io;
+use std::io::prelude::*;
+use NodeLabels::*;
+
+/// each node is an index in a vector in the graph.
+type Node = usize;
+struct Edge {
+ from: usize,
+ to: usize,
+ label: &'static str,
+ style: Style,
+}
+
+fn edge(from: usize, to: usize, label: &'static str, style: Style) -> Edge {
+ Edge { from, to, label, style }
+}
+
+struct LabelledGraph {
+ /// The name for this graph. Used for labeling generated `digraph`.
+ name: &'static str,
+
+ /// Each node is an index into `node_labels`; these labels are
+ /// used as the label text for each node. (The node *names*,
+ /// which are unique identifiers, are derived from their index
+ /// in this array.)
+ ///
+ /// If a node maps to None here, then just use its name as its
+ /// text.
+ node_labels: Vec<Option<&'static str>>,
+
+ node_styles: Vec<Style>,
+
+ /// Each edge relates a from-index to a to-index along with a
+ /// label; `edges` collects them.
+ edges: Vec<Edge>,
+}
+
+// A simple wrapper around LabelledGraph that forces the labels to
+// be emitted as EscStr.
+struct LabelledGraphWithEscStrs {
+ graph: LabelledGraph,
+}
+
+enum NodeLabels<L> {
+ AllNodesLabelled(Vec<L>),
+ UnlabelledNodes(usize),
+ SomeNodesLabelled(Vec<Option<L>>),
+}
+
+type Trivial = NodeLabels<&'static str>;
+
+impl NodeLabels<&'static str> {
+ fn to_opt_strs(self) -> Vec<Option<&'static str>> {
+ match self {
+ UnlabelledNodes(len) => vec![None; len],
+ AllNodesLabelled(lbls) => lbls.into_iter().map(Some).collect(),
+ SomeNodesLabelled(lbls) => lbls,
+ }
+ }
+
+ fn len(&self) -> usize {
+ match self {
+ &UnlabelledNodes(len) => len,
+ &AllNodesLabelled(ref lbls) => lbls.len(),
+ &SomeNodesLabelled(ref lbls) => lbls.len(),
+ }
+ }
+}
+
+impl LabelledGraph {
+ fn new(
+ name: &'static str,
+ node_labels: Trivial,
+ edges: Vec<Edge>,
+ node_styles: Option<Vec<Style>>,
+ ) -> LabelledGraph {
+ let count = node_labels.len();
+ LabelledGraph {
+ name,
+ node_labels: node_labels.to_opt_strs(),
+ edges,
+ node_styles: match node_styles {
+ Some(nodes) => nodes,
+ None => vec![Style::None; count],
+ },
+ }
+ }
+}
+
+impl LabelledGraphWithEscStrs {
+ fn new(name: &'static str, node_labels: Trivial, edges: Vec<Edge>) -> LabelledGraphWithEscStrs {
+ LabelledGraphWithEscStrs { graph: LabelledGraph::new(name, node_labels, edges, None) }
+ }
+}
+
+fn id_name<'a>(n: &Node) -> Id<'a> {
+ Id::new(format!("N{}", *n)).unwrap()
+}
+
+impl<'a> Labeller<'a> for LabelledGraph {
+ type Node = Node;
+ type Edge = &'a Edge;
+ fn graph_id(&'a self) -> Id<'a> {
+ Id::new(self.name).unwrap()
+ }
+ fn node_id(&'a self, n: &Node) -> Id<'a> {
+ id_name(n)
+ }
+ fn node_label(&'a self, n: &Node) -> LabelText<'a> {
+ match self.node_labels[*n] {
+ Some(l) => LabelStr(l.into()),
+ None => LabelStr(id_name(n).name),
+ }
+ }
+ fn edge_label(&'a self, e: &&'a Edge) -> LabelText<'a> {
+ LabelStr(e.label.into())
+ }
+ fn node_style(&'a self, n: &Node) -> Style {
+ self.node_styles[*n]
+ }
+ fn edge_style(&'a self, e: &&'a Edge) -> Style {
+ e.style
+ }
+}
+
+impl<'a> Labeller<'a> for LabelledGraphWithEscStrs {
+ type Node = Node;
+ type Edge = &'a Edge;
+ fn graph_id(&'a self) -> Id<'a> {
+ self.graph.graph_id()
+ }
+ fn node_id(&'a self, n: &Node) -> Id<'a> {
+ self.graph.node_id(n)
+ }
+ fn node_label(&'a self, n: &Node) -> LabelText<'a> {
+ match self.graph.node_label(n) {
+ LabelStr(s) | EscStr(s) | HtmlStr(s) => EscStr(s),
+ }
+ }
+ fn edge_label(&'a self, e: &&'a Edge) -> LabelText<'a> {
+ match self.graph.edge_label(e) {
+ LabelStr(s) | EscStr(s) | HtmlStr(s) => EscStr(s),
+ }
+ }
+}
+
+impl<'a> GraphWalk<'a> for LabelledGraph {
+ type Node = Node;
+ type Edge = &'a Edge;
+ fn nodes(&'a self) -> Nodes<'a, Node> {
+ (0..self.node_labels.len()).collect()
+ }
+ fn edges(&'a self) -> Edges<'a, &'a Edge> {
+ self.edges.iter().collect()
+ }
+ fn source(&'a self, edge: &&'a Edge) -> Node {
+ edge.from
+ }
+ fn target(&'a self, edge: &&'a Edge) -> Node {
+ edge.to
+ }
+}
+
+impl<'a> GraphWalk<'a> for LabelledGraphWithEscStrs {
+ type Node = Node;
+ type Edge = &'a Edge;
+ fn nodes(&'a self) -> Nodes<'a, Node> {
+ self.graph.nodes()
+ }
+ fn edges(&'a self) -> Edges<'a, &'a Edge> {
+ self.graph.edges()
+ }
+ fn source(&'a self, edge: &&'a Edge) -> Node {
+ edge.from
+ }
+ fn target(&'a self, edge: &&'a Edge) -> Node {
+ edge.to
+ }
+}
+
+fn test_input(g: LabelledGraph) -> io::Result<String> {
+ let mut writer = Vec::new();
+ render(&g, &mut writer).unwrap();
+ let mut s = String::new();
+ Read::read_to_string(&mut &*writer, &mut s)?;
+ Ok(s)
+}
+
+// All of the tests use raw-strings as the format for the expected outputs,
+// so that you can cut-and-paste the content into a .dot file yourself to
+// see what the graphviz visualizer would produce.
+
+#[test]
+fn empty_graph() {
+ let labels: Trivial = UnlabelledNodes(0);
+ let r = test_input(LabelledGraph::new("empty_graph", labels, vec![], None));
+ assert_eq!(
+ r.unwrap(),
+ r#"digraph empty_graph {
+}
+"#
+ );
+}
+
+#[test]
+fn single_node() {
+ let labels: Trivial = UnlabelledNodes(1);
+ let r = test_input(LabelledGraph::new("single_node", labels, vec![], None));
+ assert_eq!(
+ r.unwrap(),
+ r#"digraph single_node {
+ N0[label="N0"];
+}
+"#
+ );
+}
+
+#[test]
+fn single_node_with_style() {
+ let labels: Trivial = UnlabelledNodes(1);
+ let styles = Some(vec![Style::Dashed]);
+ let r = test_input(LabelledGraph::new("single_node", labels, vec![], styles));
+ assert_eq!(
+ r.unwrap(),
+ r#"digraph single_node {
+ N0[label="N0"][style="dashed"];
+}
+"#
+ );
+}
+
+#[test]
+fn single_edge() {
+ let labels: Trivial = UnlabelledNodes(2);
+ let result = test_input(LabelledGraph::new(
+ "single_edge",
+ labels,
+ vec![edge(0, 1, "E", Style::None)],
+ None,
+ ));
+ assert_eq!(
+ result.unwrap(),
+ r#"digraph single_edge {
+ N0[label="N0"];
+ N1[label="N1"];
+ N0 -> N1[label="E"];
+}
+"#
+ );
+}
+
+#[test]
+fn single_edge_with_style() {
+ let labels: Trivial = UnlabelledNodes(2);
+ let result = test_input(LabelledGraph::new(
+ "single_edge",
+ labels,
+ vec![edge(0, 1, "E", Style::Bold)],
+ None,
+ ));
+ assert_eq!(
+ result.unwrap(),
+ r#"digraph single_edge {
+ N0[label="N0"];
+ N1[label="N1"];
+ N0 -> N1[label="E"][style="bold"];
+}
+"#
+ );
+}
+
+#[test]
+fn test_some_labelled() {
+ let labels: Trivial = SomeNodesLabelled(vec![Some("A"), None]);
+ let styles = Some(vec![Style::None, Style::Dotted]);
+ let result = test_input(LabelledGraph::new(
+ "test_some_labelled",
+ labels,
+ vec![edge(0, 1, "A-1", Style::None)],
+ styles,
+ ));
+ assert_eq!(
+ result.unwrap(),
+ r#"digraph test_some_labelled {
+ N0[label="A"];
+ N1[label="N1"][style="dotted"];
+ N0 -> N1[label="A-1"];
+}
+"#
+ );
+}
+
+#[test]
+fn single_cyclic_node() {
+ let labels: Trivial = UnlabelledNodes(1);
+ let r = test_input(LabelledGraph::new(
+ "single_cyclic_node",
+ labels,
+ vec![edge(0, 0, "E", Style::None)],
+ None,
+ ));
+ assert_eq!(
+ r.unwrap(),
+ r#"digraph single_cyclic_node {
+ N0[label="N0"];
+ N0 -> N0[label="E"];
+}
+"#
+ );
+}
+
+#[test]
+fn hasse_diagram() {
+ let labels = AllNodesLabelled(vec!["{x,y}", "{x}", "{y}", "{}"]);
+ let r = test_input(LabelledGraph::new(
+ "hasse_diagram",
+ labels,
+ vec![
+ edge(0, 1, "", Style::None),
+ edge(0, 2, "", Style::None),
+ edge(1, 3, "", Style::None),
+ edge(2, 3, "", Style::None),
+ ],
+ None,
+ ));
+ assert_eq!(
+ r.unwrap(),
+ r#"digraph hasse_diagram {
+ N0[label="{x,y}"];
+ N1[label="{x}"];
+ N2[label="{y}"];
+ N3[label="{}"];
+ N0 -> N1[label=""];
+ N0 -> N2[label=""];
+ N1 -> N3[label=""];
+ N2 -> N3[label=""];
+}
+"#
+ );
+}
+
+#[test]
+fn left_aligned_text() {
+ let labels = AllNodesLabelled(vec![
+ "if test {\
+ \\l branch1\
+ \\l} else {\
+ \\l branch2\
+ \\l}\
+ \\lafterward\
+ \\l",
+ "branch1",
+ "branch2",
+ "afterward",
+ ]);
+
+ let mut writer = Vec::new();
+
+ let g = LabelledGraphWithEscStrs::new(
+ "syntax_tree",
+ labels,
+ vec![
+ edge(0, 1, "then", Style::None),
+ edge(0, 2, "else", Style::None),
+ edge(1, 3, ";", Style::None),
+ edge(2, 3, ";", Style::None),
+ ],
+ );
+
+ render(&g, &mut writer).unwrap();
+ let mut r = String::new();
+ Read::read_to_string(&mut &*writer, &mut r).unwrap();
+
+ assert_eq!(
+ r,
+ r#"digraph syntax_tree {
+ N0[label="if test {\l branch1\l} else {\l branch2\l}\lafterward\l"];
+ N1[label="branch1"];
+ N2[label="branch2"];
+ N3[label="afterward"];
+ N0 -> N1[label="then"];
+ N0 -> N2[label="else"];
+ N1 -> N3[label=";"];
+ N2 -> N3[label=";"];
+}
+"#
+ );
+}
+
+#[test]
+fn simple_id_construction() {
+ let id1 = Id::new("hello");
+ match id1 {
+ Ok(_) => {}
+ Err(..) => panic!("'hello' is not a valid value for id anymore"),
+ }
+}
+
+#[test]
+fn badly_formatted_id() {
+ let id2 = Id::new("Weird { struct : ure } !!!");
+ match id2 {
+ Ok(_) => panic!("graphviz id suddenly allows spaces, brackets and stuff"),
+ Err(..) => {}
+ }
+}
diff --git a/compiler/rustc_hir/Cargo.toml b/compiler/rustc_hir/Cargo.toml
new file mode 100644
index 000000000..69ad623b7
--- /dev/null
+++ b/compiler/rustc_hir/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "rustc_hir"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_arena = { path = "../rustc_arena" }
+rustc_target = { path = "../rustc_target" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_error_messages = { path = "../rustc_error_messages" }
+rustc_index = { path = "../rustc_index" }
+rustc_span = { path = "../rustc_span" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_ast = { path = "../rustc_ast" }
+tracing = "0.1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+odht = { version = "0.3.1", features = ["nightly"] }
diff --git a/compiler/rustc_hir/src/arena.rs b/compiler/rustc_hir/src/arena.rs
new file mode 100644
index 000000000..44335b7f4
--- /dev/null
+++ b/compiler/rustc_hir/src/arena.rs
@@ -0,0 +1,55 @@
+/// This higher-order macro declares a list of types which can be allocated by `Arena`.
+///
+/// Specifying the `decode` modifier will add decode impls for `&T` and `&[T]`,
+/// where `T` is the type listed. These impls will appear in the implement_ty_decoder! macro.
+#[macro_export]
+macro_rules! arena_types {
+ ($macro:path) => (
+ $macro!([
+ // HIR types
+ [] hir_krate: rustc_hir::Crate<'tcx>,
+ [] arm: rustc_hir::Arm<'tcx>,
+ [] asm_operand: (rustc_hir::InlineAsmOperand<'tcx>, rustc_span::Span),
+ [] asm_template: rustc_ast::InlineAsmTemplatePiece,
+ [] attribute: rustc_ast::Attribute,
+ [] closure: rustc_hir::Closure<'tcx>,
+ [] block: rustc_hir::Block<'tcx>,
+ [] bare_fn_ty: rustc_hir::BareFnTy<'tcx>,
+ [] body: rustc_hir::Body<'tcx>,
+ [] generics: rustc_hir::Generics<'tcx>,
+ [] generic_arg: rustc_hir::GenericArg<'tcx>,
+ [] generic_args: rustc_hir::GenericArgs<'tcx>,
+ [] generic_bound: rustc_hir::GenericBound<'tcx>,
+ [] generic_param: rustc_hir::GenericParam<'tcx>,
+ [] expr: rustc_hir::Expr<'tcx>,
+ [] impl_: rustc_hir::Impl<'tcx>,
+ [] let_expr: rustc_hir::Let<'tcx>,
+ [] expr_field: rustc_hir::ExprField<'tcx>,
+ [] pat_field: rustc_hir::PatField<'tcx>,
+ [] fn_decl: rustc_hir::FnDecl<'tcx>,
+ [] foreign_item: rustc_hir::ForeignItem<'tcx>,
+ [] foreign_item_ref: rustc_hir::ForeignItemRef,
+ [] impl_item: rustc_hir::ImplItem<'tcx>,
+ [] impl_item_ref: rustc_hir::ImplItemRef,
+ [] item: rustc_hir::Item<'tcx>,
+ [] inline_asm: rustc_hir::InlineAsm<'tcx>,
+ [] local: rustc_hir::Local<'tcx>,
+ [] mod_: rustc_hir::Mod<'tcx>,
+ [] owner_info: rustc_hir::OwnerInfo<'tcx>,
+ [] param: rustc_hir::Param<'tcx>,
+ [] pat: rustc_hir::Pat<'tcx>,
+ [] path: rustc_hir::Path<'tcx>,
+ [] path_segment: rustc_hir::PathSegment<'tcx>,
+ [] poly_trait_ref: rustc_hir::PolyTraitRef<'tcx>,
+ [] qpath: rustc_hir::QPath<'tcx>,
+ [] stmt: rustc_hir::Stmt<'tcx>,
+ [] field_def: rustc_hir::FieldDef<'tcx>,
+ [] trait_item: rustc_hir::TraitItem<'tcx>,
+ [] trait_item_ref: rustc_hir::TraitItemRef,
+ [] ty: rustc_hir::Ty<'tcx>,
+ [] type_binding: rustc_hir::TypeBinding<'tcx>,
+ [] variant: rustc_hir::Variant<'tcx>,
+ [] where_predicate: rustc_hir::WherePredicate<'tcx>,
+ ]);
+ )
+}
diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs
new file mode 100644
index 000000000..be5b7eccb
--- /dev/null
+++ b/compiler/rustc_hir/src/def.rs
@@ -0,0 +1,749 @@
+use crate::hir;
+
+use rustc_ast as ast;
+use rustc_ast::NodeId;
+use rustc_macros::HashStable_Generic;
+use rustc_span::def_id::{DefId, LocalDefId};
+use rustc_span::hygiene::MacroKind;
+use rustc_span::Symbol;
+
+use std::array::IntoIter;
+use std::fmt::Debug;
+
+/// Encodes if a `DefKind::Ctor` is the constructor of an enum variant or a struct.
+#[derive(Clone, Copy, PartialEq, Eq, Encodable, Decodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum CtorOf {
+ /// This `DefKind::Ctor` is a synthesized constructor of a tuple or unit struct.
+ Struct,
+ /// This `DefKind::Ctor` is a synthesized constructor of a tuple or unit variant.
+ Variant,
+}
+
+/// What kind of constructor something is.
+#[derive(Clone, Copy, PartialEq, Eq, Encodable, Decodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum CtorKind {
+ /// Constructor function automatically created by a tuple struct/variant.
+ Fn,
+ /// Constructor constant automatically created by a unit struct/variant.
+ Const,
+ /// Unusable name in value namespace created by a struct variant.
+ Fictive,
+}
+
+/// An attribute that is not a macro; e.g., `#[inline]` or `#[rustfmt::skip]`.
+#[derive(Clone, Copy, PartialEq, Eq, Encodable, Decodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum NonMacroAttrKind {
+ /// Single-segment attribute defined by the language (`#[inline]`)
+ Builtin(Symbol),
+ /// Multi-segment custom attribute living in a "tool module" (`#[rustfmt::skip]`).
+ Tool,
+ /// Single-segment custom attribute registered by a derive macro (`#[serde(default)]`).
+ DeriveHelper,
+ /// Single-segment custom attribute registered by a derive macro
+ /// but used before that derive macro was expanded (deprecated).
+ DeriveHelperCompat,
+ /// Single-segment custom attribute registered with `#[register_attr]`.
+ Registered,
+}
+
+/// What kind of definition something is; e.g., `mod` vs `struct`.
+#[derive(Clone, Copy, PartialEq, Eq, Encodable, Decodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum DefKind {
+ // Type namespace
+ Mod,
+ /// Refers to the struct itself, [`DefKind::Ctor`] refers to its constructor if it exists.
+ Struct,
+ Union,
+ Enum,
+ /// Refers to the variant itself, [`DefKind::Ctor`] refers to its constructor if it exists.
+ Variant,
+ Trait,
+ /// Type alias: `type Foo = Bar;`
+ TyAlias,
+ /// Type from an `extern` block.
+ ForeignTy,
+ /// Trait alias: `trait IntIterator = Iterator<Item = i32>;`
+ TraitAlias,
+ /// Associated type: `trait MyTrait { type Assoc; }`
+ AssocTy,
+ /// Type parameter: the `T` in `struct Vec<T> { ... }`
+ TyParam,
+
+ // Value namespace
+ Fn,
+ Const,
+ /// Constant generic parameter: `struct Foo<const N: usize> { ... }`
+ ConstParam,
+ Static(ast::Mutability),
+ /// Refers to the struct or enum variant's constructor.
+ ///
+ /// The reason `Ctor` exists in addition to [`DefKind::Struct`] and
+ /// [`DefKind::Variant`] is because structs and enum variants exist
+ /// in the *type* namespace, whereas struct and enum variant *constructors*
+ /// exist in the *value* namespace.
+ ///
+ /// You may wonder why enum variants exist in the type namespace as opposed
+ /// to the value namespace. Check out [RFC 2593] for intuition on why that is.
+ ///
+ /// [RFC 2593]: https://github.com/rust-lang/rfcs/pull/2593
+ Ctor(CtorOf, CtorKind),
+ /// Associated function: `impl MyStruct { fn associated() {} }`
+ /// or `trait Foo { fn associated() {} }`
+ AssocFn,
+ /// Associated constant: `trait MyTrait { const ASSOC: usize; }`
+ AssocConst,
+
+ // Macro namespace
+ Macro(MacroKind),
+
+ // Not namespaced (or they are, but we don't treat them so)
+ ExternCrate,
+ Use,
+ /// An `extern` block.
+ ForeignMod,
+ /// Anonymous constant, e.g. the `1 + 2` in `[u8; 1 + 2]`
+ AnonConst,
+ /// An inline constant, e.g. `const { 1 + 2 }`
+ InlineConst,
+ /// Opaque type, aka `impl Trait`.
+ OpaqueTy,
+ Field,
+ /// Lifetime parameter: the `'a` in `struct Foo<'a> { ... }`
+ LifetimeParam,
+ /// A use of `global_asm!`.
+ GlobalAsm,
+ Impl,
+ Closure,
+ Generator,
+}
+
+impl DefKind {
+ pub fn descr(self, def_id: DefId) -> &'static str {
+ match self {
+ DefKind::Fn => "function",
+ DefKind::Mod if def_id.is_crate_root() && !def_id.is_local() => "crate",
+ DefKind::Mod => "module",
+ DefKind::Static(..) => "static",
+ DefKind::Enum => "enum",
+ DefKind::Variant => "variant",
+ DefKind::Ctor(CtorOf::Variant, CtorKind::Fn) => "tuple variant",
+ DefKind::Ctor(CtorOf::Variant, CtorKind::Const) => "unit variant",
+ DefKind::Ctor(CtorOf::Variant, CtorKind::Fictive) => "struct variant",
+ DefKind::Struct => "struct",
+ DefKind::Ctor(CtorOf::Struct, CtorKind::Fn) => "tuple struct",
+ DefKind::Ctor(CtorOf::Struct, CtorKind::Const) => "unit struct",
+ DefKind::Ctor(CtorOf::Struct, CtorKind::Fictive) => {
+ panic!("impossible struct constructor")
+ }
+ DefKind::OpaqueTy => "opaque type",
+ DefKind::TyAlias => "type alias",
+ DefKind::TraitAlias => "trait alias",
+ DefKind::AssocTy => "associated type",
+ DefKind::Union => "union",
+ DefKind::Trait => "trait",
+ DefKind::ForeignTy => "foreign type",
+ DefKind::AssocFn => "associated function",
+ DefKind::Const => "constant",
+ DefKind::AssocConst => "associated constant",
+ DefKind::TyParam => "type parameter",
+ DefKind::ConstParam => "const parameter",
+ DefKind::Macro(macro_kind) => macro_kind.descr(),
+ DefKind::LifetimeParam => "lifetime parameter",
+ DefKind::Use => "import",
+ DefKind::ForeignMod => "foreign module",
+ DefKind::AnonConst => "constant expression",
+ DefKind::InlineConst => "inline constant",
+ DefKind::Field => "field",
+ DefKind::Impl => "implementation",
+ DefKind::Closure => "closure",
+ DefKind::Generator => "generator",
+ DefKind::ExternCrate => "extern crate",
+ DefKind::GlobalAsm => "global assembly block",
+ }
+ }
+
+ /// Gets an English article for the definition.
+ pub fn article(&self) -> &'static str {
+ match *self {
+ DefKind::AssocTy
+ | DefKind::AssocConst
+ | DefKind::AssocFn
+ | DefKind::Enum
+ | DefKind::OpaqueTy
+ | DefKind::Impl
+ | DefKind::Use
+ | DefKind::InlineConst
+ | DefKind::ExternCrate => "an",
+ DefKind::Macro(macro_kind) => macro_kind.article(),
+ _ => "a",
+ }
+ }
+
+ pub fn ns(&self) -> Option<Namespace> {
+ match self {
+ DefKind::Mod
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Trait
+ | DefKind::OpaqueTy
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::TyParam => Some(Namespace::TypeNS),
+
+ DefKind::Fn
+ | DefKind::Const
+ | DefKind::ConstParam
+ | DefKind::Static(..)
+ | DefKind::Ctor(..)
+ | DefKind::AssocFn
+ | DefKind::AssocConst => Some(Namespace::ValueNS),
+
+ DefKind::Macro(..) => Some(Namespace::MacroNS),
+
+ // Not namespaced.
+ DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::Field
+ | DefKind::LifetimeParam
+ | DefKind::ExternCrate
+ | DefKind::Closure
+ | DefKind::Generator
+ | DefKind::Use
+ | DefKind::ForeignMod
+ | DefKind::GlobalAsm
+ | DefKind::Impl => None,
+ }
+ }
+
+ #[inline]
+ pub fn is_fn_like(self) -> bool {
+ match self {
+ DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Generator => true,
+ _ => false,
+ }
+ }
+
+ /// Whether `query get_codegen_attrs` should be used with this definition.
+ pub fn has_codegen_attrs(self) -> bool {
+ match self {
+ DefKind::Fn
+ | DefKind::AssocFn
+ | DefKind::Ctor(..)
+ | DefKind::Closure
+ | DefKind::Generator
+ | DefKind::Static(_) => true,
+ DefKind::Mod
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Trait
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::Const
+ | DefKind::AssocConst
+ | DefKind::Macro(..)
+ | DefKind::Use
+ | DefKind::ForeignMod
+ | DefKind::OpaqueTy
+ | DefKind::Impl
+ | DefKind::Field
+ | DefKind::TyParam
+ | DefKind::ConstParam
+ | DefKind::LifetimeParam
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::GlobalAsm
+ | DefKind::ExternCrate => false,
+ }
+ }
+}
+
+/// The resolution of a path or export.
+///
+/// For every path or identifier in Rust, the compiler must determine
+/// what the path refers to. This process is called name resolution,
+/// and `Res` is the primary result of name resolution.
+///
+/// For example, everything prefixed with `/* Res */` in this example has
+/// an associated `Res`:
+///
+/// ```
+/// fn str_to_string(s: & /* Res */ str) -> /* Res */ String {
+/// /* Res */ String::from(/* Res */ s)
+/// }
+///
+/// /* Res */ str_to_string("hello");
+/// ```
+///
+/// The associated `Res`s will be:
+///
+/// - `str` will resolve to [`Res::PrimTy`];
+/// - `String` will resolve to [`Res::Def`], and the `Res` will include the [`DefId`]
+/// for `String` as defined in the standard library;
+/// - `String::from` will also resolve to [`Res::Def`], with the [`DefId`]
+/// pointing to `String::from`;
+/// - `s` will resolve to [`Res::Local`];
+/// - the call to `str_to_string` will resolve to [`Res::Def`], with the [`DefId`]
+/// pointing to the definition of `str_to_string` in the current crate.
+//
+#[derive(Clone, Copy, PartialEq, Eq, Encodable, Decodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum Res<Id = hir::HirId> {
+ /// Definition having a unique ID (`DefId`), corresponds to something defined in user code.
+ ///
+ /// **Not bound to a specific namespace.**
+ Def(DefKind, DefId),
+
+ // Type namespace
+ /// A primitive type such as `i32` or `str`.
+ ///
+ /// **Belongs to the type namespace.**
+ PrimTy(hir::PrimTy),
+ /// The `Self` type, optionally with the [`DefId`] of the trait it belongs to and
+ /// optionally with the [`DefId`] of the item introducing the `Self` type alias.
+ ///
+ /// **Belongs to the type namespace.**
+ ///
+ /// Examples:
+ /// ```
+ /// struct Bar(Box<Self>);
+ /// // `Res::SelfTy { trait_: None, alias_of: Some(Bar) }`
+ ///
+ /// trait Foo {
+ /// fn foo() -> Box<Self>;
+ /// // `Res::SelfTy { trait_: Some(Foo), alias_of: None }`
+ /// }
+ ///
+ /// impl Bar {
+ /// fn blah() {
+ /// let _: Self;
+ /// // `Res::SelfTy { trait_: None, alias_of: Some(::{impl#0}) }`
+ /// }
+ /// }
+ ///
+ /// impl Foo for Bar {
+ /// fn foo() -> Box<Self> {
+ /// // `Res::SelfTy { trait_: Some(Foo), alias_of: Some(::{impl#1}) }`
+ /// let _: Self;
+ /// // `Res::SelfTy { trait_: Some(Foo), alias_of: Some(::{impl#1}) }`
+ ///
+ /// todo!()
+ /// }
+ /// }
+ /// ```
+ ///
+ /// *See also [`Res::SelfCtor`].*
+ ///
+ /// -----
+ ///
+ /// HACK(min_const_generics): self types also have an optional requirement to **not** mention
+ /// any generic parameters to allow the following with `min_const_generics`:
+ /// ```
+ /// # struct Foo;
+ /// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] { todo!() } }
+ ///
+ /// struct Bar([u8; baz::<Self>()]);
+ /// const fn baz<T>() -> usize { 10 }
+ /// ```
+ /// We do however allow `Self` in repeat expression even if it is generic to not break code
+ /// which already works on stable while causing the `const_evaluatable_unchecked` future compat lint:
+ /// ```
+ /// fn foo<T>() {
+ /// let _bar = [1_u8; std::mem::size_of::<*mut T>()];
+ /// }
+ /// ```
+ // FIXME(generic_const_exprs): Remove this bodge once that feature is stable.
+ SelfTy {
+ /// The trait this `Self` is a generic arg for.
+ trait_: Option<DefId>,
+ /// The item introducing the `Self` type alias. Can be used in the `type_of` query
+ /// to get the underlying type. Additionally whether the `Self` type is disallowed
+ /// from mentioning generics (i.e. when used in an anonymous constant).
+ alias_to: Option<(DefId, bool)>,
+ },
+ /// A tool attribute module; e.g., the `rustfmt` in `#[rustfmt::skip]`.
+ ///
+ /// **Belongs to the type namespace.**
+ ToolMod,
+
+ // Value namespace
+ /// The `Self` constructor, along with the [`DefId`]
+ /// of the impl it is associated with.
+ ///
+ /// **Belongs to the value namespace.**
+ ///
+ /// *See also [`Res::SelfTy`].*
+ SelfCtor(DefId),
+ /// A local variable or function parameter.
+ ///
+ /// **Belongs to the value namespace.**
+ Local(Id),
+
+ // Macro namespace
+ /// An attribute that is *not* implemented via macro.
+ /// E.g., `#[inline]` and `#[rustfmt::skip]`, which are essentially directives,
+ /// as opposed to `#[test]`, which is a builtin macro.
+ ///
+ /// **Belongs to the macro namespace.**
+ NonMacroAttr(NonMacroAttrKind), // e.g., `#[inline]` or `#[rustfmt::skip]`
+
+ // All namespaces
+ /// Name resolution failed. We use a dummy `Res` variant so later phases
+ /// of the compiler won't crash and can instead report more errors.
+ ///
+ /// **Not bound to a specific namespace.**
+ Err,
+}
+
+/// The result of resolving a path before lowering to HIR,
+/// with "module" segments resolved and associated item
+/// segments deferred to type checking.
+/// `base_res` is the resolution of the resolved part of the
+/// path, `unresolved_segments` is the number of unresolved
+/// segments.
+///
+/// ```text
+/// module::Type::AssocX::AssocY::MethodOrAssocType
+/// ^~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+/// base_res unresolved_segments = 3
+///
+/// <T as Trait>::AssocX::AssocY::MethodOrAssocType
+/// ^~~~~~~~~~~~~~ ^~~~~~~~~~~~~~~~~~~~~~~~~
+/// base_res unresolved_segments = 2
+/// ```
+#[derive(Copy, Clone, Debug)]
+pub struct PartialRes {
+ base_res: Res<NodeId>,
+ unresolved_segments: usize,
+}
+
+impl PartialRes {
+ #[inline]
+ pub fn new(base_res: Res<NodeId>) -> Self {
+ PartialRes { base_res, unresolved_segments: 0 }
+ }
+
+ #[inline]
+ pub fn with_unresolved_segments(base_res: Res<NodeId>, mut unresolved_segments: usize) -> Self {
+ if base_res == Res::Err {
+ unresolved_segments = 0
+ }
+ PartialRes { base_res, unresolved_segments }
+ }
+
+ #[inline]
+ pub fn base_res(&self) -> Res<NodeId> {
+ self.base_res
+ }
+
+ #[inline]
+ pub fn unresolved_segments(&self) -> usize {
+ self.unresolved_segments
+ }
+}
+
+/// Different kinds of symbols can coexist even if they share the same textual name.
+/// Therefore, they each have a separate universe (known as a "namespace").
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+pub enum Namespace {
+ /// The type namespace includes `struct`s, `enum`s, `union`s, `trait`s, and `mod`s
+ /// (and, by extension, crates).
+ ///
+ /// Note that the type namespace includes other items; this is not an
+ /// exhaustive list.
+ TypeNS,
+ /// The value namespace includes `fn`s, `const`s, `static`s, and local variables (including function arguments).
+ ValueNS,
+ /// The macro namespace includes `macro_rules!` macros, declarative `macro`s,
+ /// procedural macros, attribute macros, `derive` macros, and non-macro attributes
+ /// like `#[inline]` and `#[rustfmt::skip]`.
+ MacroNS,
+}
+
+impl Namespace {
+ /// The English description of the namespace.
+ pub fn descr(self) -> &'static str {
+ match self {
+ Self::TypeNS => "type",
+ Self::ValueNS => "value",
+ Self::MacroNS => "macro",
+ }
+ }
+}
+
+/// Just a helper ‒ separate structure for each namespace.
+#[derive(Copy, Clone, Default, Debug)]
+pub struct PerNS<T> {
+ pub value_ns: T,
+ pub type_ns: T,
+ pub macro_ns: T,
+}
+
+impl<T> PerNS<T> {
+ pub fn map<U, F: FnMut(T) -> U>(self, mut f: F) -> PerNS<U> {
+ PerNS { value_ns: f(self.value_ns), type_ns: f(self.type_ns), macro_ns: f(self.macro_ns) }
+ }
+
+ pub fn into_iter(self) -> IntoIter<T, 3> {
+ [self.value_ns, self.type_ns, self.macro_ns].into_iter()
+ }
+
+ pub fn iter(&self) -> IntoIter<&T, 3> {
+ [&self.value_ns, &self.type_ns, &self.macro_ns].into_iter()
+ }
+}
+
+impl<T> ::std::ops::Index<Namespace> for PerNS<T> {
+ type Output = T;
+
+ fn index(&self, ns: Namespace) -> &T {
+ match ns {
+ Namespace::ValueNS => &self.value_ns,
+ Namespace::TypeNS => &self.type_ns,
+ Namespace::MacroNS => &self.macro_ns,
+ }
+ }
+}
+
+impl<T> ::std::ops::IndexMut<Namespace> for PerNS<T> {
+ fn index_mut(&mut self, ns: Namespace) -> &mut T {
+ match ns {
+ Namespace::ValueNS => &mut self.value_ns,
+ Namespace::TypeNS => &mut self.type_ns,
+ Namespace::MacroNS => &mut self.macro_ns,
+ }
+ }
+}
+
+impl<T> PerNS<Option<T>> {
+ /// Returns `true` if all the items in this collection are `None`.
+ pub fn is_empty(&self) -> bool {
+ self.type_ns.is_none() && self.value_ns.is_none() && self.macro_ns.is_none()
+ }
+
+ /// Returns an iterator over the items which are `Some`.
+ pub fn present_items(self) -> impl Iterator<Item = T> {
+ [self.type_ns, self.value_ns, self.macro_ns].into_iter().flatten()
+ }
+}
+
+impl CtorKind {
+ pub fn from_ast(vdata: &ast::VariantData) -> CtorKind {
+ match *vdata {
+ ast::VariantData::Tuple(..) => CtorKind::Fn,
+ ast::VariantData::Unit(..) => CtorKind::Const,
+ ast::VariantData::Struct(..) => CtorKind::Fictive,
+ }
+ }
+
+ pub fn from_hir(vdata: &hir::VariantData<'_>) -> CtorKind {
+ match *vdata {
+ hir::VariantData::Tuple(..) => CtorKind::Fn,
+ hir::VariantData::Unit(..) => CtorKind::Const,
+ hir::VariantData::Struct(..) => CtorKind::Fictive,
+ }
+ }
+}
+
+impl NonMacroAttrKind {
+ pub fn descr(self) -> &'static str {
+ match self {
+ NonMacroAttrKind::Builtin(..) => "built-in attribute",
+ NonMacroAttrKind::Tool => "tool attribute",
+ NonMacroAttrKind::DeriveHelper | NonMacroAttrKind::DeriveHelperCompat => {
+ "derive helper attribute"
+ }
+ NonMacroAttrKind::Registered => "explicitly registered attribute",
+ }
+ }
+
+ pub fn article(self) -> &'static str {
+ match self {
+ NonMacroAttrKind::Registered => "an",
+ _ => "a",
+ }
+ }
+
+ /// Users of some attributes cannot mark them as used, so they are considered always used.
+ pub fn is_used(self) -> bool {
+ match self {
+ NonMacroAttrKind::Tool
+ | NonMacroAttrKind::DeriveHelper
+ | NonMacroAttrKind::DeriveHelperCompat => true,
+ NonMacroAttrKind::Builtin(..) | NonMacroAttrKind::Registered => false,
+ }
+ }
+}
+
+impl<Id> Res<Id> {
+ /// Return the `DefId` of this `Def` if it has an ID, else panic.
+ pub fn def_id(&self) -> DefId
+ where
+ Id: Debug,
+ {
+ self.opt_def_id()
+ .unwrap_or_else(|| panic!("attempted .def_id() on invalid res: {:?}", self))
+ }
+
+ /// Return `Some(..)` with the `DefId` of this `Res` if it has a ID, else `None`.
+ pub fn opt_def_id(&self) -> Option<DefId> {
+ match *self {
+ Res::Def(_, id) => Some(id),
+
+ Res::Local(..)
+ | Res::PrimTy(..)
+ | Res::SelfTy { .. }
+ | Res::SelfCtor(..)
+ | Res::ToolMod
+ | Res::NonMacroAttr(..)
+ | Res::Err => None,
+ }
+ }
+
+ /// Return the `DefId` of this `Res` if it represents a module.
+ pub fn mod_def_id(&self) -> Option<DefId> {
+ match *self {
+ Res::Def(DefKind::Mod, id) => Some(id),
+ _ => None,
+ }
+ }
+
+ /// A human readable name for the res kind ("function", "module", etc.).
+ pub fn descr(&self) -> &'static str {
+ match *self {
+ Res::Def(kind, def_id) => kind.descr(def_id),
+ Res::SelfCtor(..) => "self constructor",
+ Res::PrimTy(..) => "builtin type",
+ Res::Local(..) => "local variable",
+ Res::SelfTy { .. } => "self type",
+ Res::ToolMod => "tool module",
+ Res::NonMacroAttr(attr_kind) => attr_kind.descr(),
+ Res::Err => "unresolved item",
+ }
+ }
+
+ /// Gets an English article for the `Res`.
+ pub fn article(&self) -> &'static str {
+ match *self {
+ Res::Def(kind, _) => kind.article(),
+ Res::NonMacroAttr(kind) => kind.article(),
+ Res::Err => "an",
+ _ => "a",
+ }
+ }
+
+ pub fn map_id<R>(self, mut map: impl FnMut(Id) -> R) -> Res<R> {
+ match self {
+ Res::Def(kind, id) => Res::Def(kind, id),
+ Res::SelfCtor(id) => Res::SelfCtor(id),
+ Res::PrimTy(id) => Res::PrimTy(id),
+ Res::Local(id) => Res::Local(map(id)),
+ Res::SelfTy { trait_, alias_to } => Res::SelfTy { trait_, alias_to },
+ Res::ToolMod => Res::ToolMod,
+ Res::NonMacroAttr(attr_kind) => Res::NonMacroAttr(attr_kind),
+ Res::Err => Res::Err,
+ }
+ }
+
+ pub fn apply_id<R, E>(self, mut map: impl FnMut(Id) -> Result<R, E>) -> Result<Res<R>, E> {
+ Ok(match self {
+ Res::Def(kind, id) => Res::Def(kind, id),
+ Res::SelfCtor(id) => Res::SelfCtor(id),
+ Res::PrimTy(id) => Res::PrimTy(id),
+ Res::Local(id) => Res::Local(map(id)?),
+ Res::SelfTy { trait_, alias_to } => Res::SelfTy { trait_, alias_to },
+ Res::ToolMod => Res::ToolMod,
+ Res::NonMacroAttr(attr_kind) => Res::NonMacroAttr(attr_kind),
+ Res::Err => Res::Err,
+ })
+ }
+
+ #[track_caller]
+ pub fn expect_non_local<OtherId>(self) -> Res<OtherId> {
+ self.map_id(
+ #[track_caller]
+ |_| panic!("unexpected `Res::Local`"),
+ )
+ }
+
+ pub fn macro_kind(self) -> Option<MacroKind> {
+ match self {
+ Res::Def(DefKind::Macro(kind), _) => Some(kind),
+ Res::NonMacroAttr(..) => Some(MacroKind::Attr),
+ _ => None,
+ }
+ }
+
+ /// Returns `None` if this is `Res::Err`
+ pub fn ns(&self) -> Option<Namespace> {
+ match self {
+ Res::Def(kind, ..) => kind.ns(),
+ Res::PrimTy(..) | Res::SelfTy { .. } | Res::ToolMod => Some(Namespace::TypeNS),
+ Res::SelfCtor(..) | Res::Local(..) => Some(Namespace::ValueNS),
+ Res::NonMacroAttr(..) => Some(Namespace::MacroNS),
+ Res::Err => None,
+ }
+ }
+
+ /// Always returns `true` if `self` is `Res::Err`
+ pub fn matches_ns(&self, ns: Namespace) -> bool {
+ self.ns().map_or(true, |actual_ns| actual_ns == ns)
+ }
+
+ /// Returns whether such a resolved path can occur in a tuple struct/variant pattern
+ pub fn expected_in_tuple_struct_pat(&self) -> bool {
+ matches!(self, Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) | Res::SelfCtor(..))
+ }
+
+ /// Returns whether such a resolved path can occur in a unit struct/variant pattern
+ pub fn expected_in_unit_struct_pat(&self) -> bool {
+ matches!(self, Res::Def(DefKind::Ctor(_, CtorKind::Const), _) | Res::SelfCtor(..))
+ }
+}
+
+/// Resolution for a lifetime appearing in a type.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum LifetimeRes {
+ /// Successfully linked the lifetime to a generic parameter.
+ Param {
+ /// Id of the generic parameter that introduced it.
+ param: LocalDefId,
+ /// Id of the introducing place. That can be:
+ /// - an item's id, for the item's generic parameters;
+ /// - a TraitRef's ref_id, identifying the `for<...>` binder;
+ /// - a BareFn type's id.
+ ///
+ /// This information is used for impl-trait lifetime captures, to know when to or not to
+ /// capture any given lifetime.
+ binder: NodeId,
+ },
+ /// Created a generic parameter for an anonymous lifetime.
+ Fresh {
+ /// Id of the generic parameter that introduced it.
+ ///
+ /// Creating the associated `LocalDefId` is the responsibility of lowering.
+ param: NodeId,
+ /// Id of the introducing place. See `Param`.
+ binder: NodeId,
+ },
+ /// This variant is used for anonymous lifetimes that we did not resolve during
+ /// late resolution. Those lifetimes will be inferred by typechecking.
+ Infer,
+ /// Explicit `'static` lifetime.
+ Static,
+ /// Resolution failure.
+ Error,
+ /// HACK: This is used to recover the NodeId of an elided lifetime.
+ ElidedAnchor { start: NodeId, end: NodeId },
+}
diff --git a/compiler/rustc_hir/src/def_path_hash_map.rs b/compiler/rustc_hir/src/def_path_hash_map.rs
new file mode 100644
index 000000000..8bfb47af2
--- /dev/null
+++ b/compiler/rustc_hir/src/def_path_hash_map.rs
@@ -0,0 +1,37 @@
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_span::def_id::{DefIndex, DefPathHash};
+
+#[derive(Clone, Default)]
+pub struct Config;
+
+impl odht::Config for Config {
+ type Key = DefPathHash;
+ type Value = DefIndex;
+
+ type EncodedKey = [u8; 16];
+ type EncodedValue = [u8; 4];
+
+ type H = odht::UnHashFn;
+
+ #[inline]
+ fn encode_key(k: &DefPathHash) -> [u8; 16] {
+ k.0.to_le_bytes()
+ }
+
+ #[inline]
+ fn encode_value(v: &DefIndex) -> [u8; 4] {
+ v.as_u32().to_le_bytes()
+ }
+
+ #[inline]
+ fn decode_key(k: &[u8; 16]) -> DefPathHash {
+ DefPathHash(Fingerprint::from_le_bytes(*k))
+ }
+
+ #[inline]
+ fn decode_value(v: &[u8; 4]) -> DefIndex {
+ DefIndex::from_u32(u32::from_le_bytes(*v))
+ }
+}
+
+pub type DefPathHashMap = odht::HashTableOwned<Config>;
diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs
new file mode 100644
index 000000000..c2c551e78
--- /dev/null
+++ b/compiler/rustc_hir/src/definitions.rs
@@ -0,0 +1,440 @@
+//! For each definition, we track the following data. A definition
+//! here is defined somewhat circularly as "something with a `DefId`",
+//! but it generally corresponds to things like structs, enums, etc.
+//! There are also some rather random cases (like const initializer
+//! expressions) that are mostly just leftovers.
+
+pub use crate::def_id::DefPathHash;
+use crate::def_id::{CrateNum, DefIndex, LocalDefId, StableCrateId, CRATE_DEF_INDEX, LOCAL_CRATE};
+use crate::def_path_hash_map::DefPathHashMap;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::StableHasher;
+use rustc_index::vec::IndexVec;
+use rustc_span::symbol::{kw, sym, Symbol};
+
+use std::fmt::{self, Write};
+use std::hash::Hash;
+use tracing::debug;
+
+/// The `DefPathTable` maps `DefIndex`es to `DefKey`s and vice versa.
+/// Internally the `DefPathTable` holds a tree of `DefKey`s, where each `DefKey`
+/// stores the `DefIndex` of its parent.
+/// There is one `DefPathTable` for each crate.
+#[derive(Clone, Default, Debug)]
+pub struct DefPathTable {
+ index_to_key: IndexVec<DefIndex, DefKey>,
+ def_path_hashes: IndexVec<DefIndex, DefPathHash>,
+ def_path_hash_to_index: DefPathHashMap,
+}
+
+impl DefPathTable {
+ fn allocate(&mut self, key: DefKey, def_path_hash: DefPathHash) -> DefIndex {
+ let index = {
+ let index = DefIndex::from(self.index_to_key.len());
+ debug!("DefPathTable::insert() - {:?} <-> {:?}", key, index);
+ self.index_to_key.push(key);
+ index
+ };
+ self.def_path_hashes.push(def_path_hash);
+ debug_assert!(self.def_path_hashes.len() == self.index_to_key.len());
+
+ // Check for hash collisions of DefPathHashes. These should be
+ // exceedingly rare.
+ if let Some(existing) = self.def_path_hash_to_index.insert(&def_path_hash, &index) {
+ let def_path1 = DefPath::make(LOCAL_CRATE, existing, |idx| self.def_key(idx));
+ let def_path2 = DefPath::make(LOCAL_CRATE, index, |idx| self.def_key(idx));
+
+ // Continuing with colliding DefPathHashes can lead to correctness
+ // issues. We must abort compilation.
+ //
+ // The likelihood of such a collision is very small, so actually
+ // running into one could be indicative of a poor hash function
+ // being used.
+ //
+ // See the documentation for DefPathHash for more information.
+ panic!(
+ "found DefPathHash collision between {:?} and {:?}. \
+ Compilation cannot continue.",
+ def_path1, def_path2
+ );
+ }
+
+ // Assert that all DefPathHashes correctly contain the local crate's
+ // StableCrateId
+ #[cfg(debug_assertions)]
+ if let Some(root) = self.def_path_hashes.get(CRATE_DEF_INDEX) {
+ assert!(def_path_hash.stable_crate_id() == root.stable_crate_id());
+ }
+
+ index
+ }
+
+ #[inline(always)]
+ pub fn def_key(&self, index: DefIndex) -> DefKey {
+ self.index_to_key[index]
+ }
+
+ #[inline(always)]
+ pub fn def_path_hash(&self, index: DefIndex) -> DefPathHash {
+ let hash = self.def_path_hashes[index];
+ debug!("def_path_hash({:?}) = {:?}", index, hash);
+ hash
+ }
+
+ pub fn enumerated_keys_and_path_hashes(
+ &self,
+ ) -> impl Iterator<Item = (DefIndex, &DefKey, &DefPathHash)> + ExactSizeIterator + '_ {
+ self.index_to_key
+ .iter_enumerated()
+ .map(move |(index, key)| (index, key, &self.def_path_hashes[index]))
+ }
+}
+
+/// The definition table containing node definitions.
+/// It holds the `DefPathTable` for `LocalDefId`s/`DefPath`s.
+/// It also stores mappings to convert `LocalDefId`s to/from `HirId`s.
+#[derive(Clone, Debug)]
+pub struct Definitions {
+ table: DefPathTable,
+ next_disambiguator: FxHashMap<(LocalDefId, DefPathData), u32>,
+
+ /// The [StableCrateId] of the local crate.
+ stable_crate_id: StableCrateId,
+}
+
+/// A unique identifier that we can use to lookup a definition
+/// precisely. It combines the index of the definition's parent (if
+/// any) with a `DisambiguatedDefPathData`.
+#[derive(Copy, Clone, PartialEq, Debug, Encodable, Decodable)]
+pub struct DefKey {
+ /// The parent path.
+ pub parent: Option<DefIndex>,
+
+ /// The identifier of this node.
+ pub disambiguated_data: DisambiguatedDefPathData,
+}
+
+impl DefKey {
+ pub(crate) fn compute_stable_hash(&self, parent: DefPathHash) -> DefPathHash {
+ let mut hasher = StableHasher::new();
+
+ parent.hash(&mut hasher);
+
+ let DisambiguatedDefPathData { ref data, disambiguator } = self.disambiguated_data;
+
+ std::mem::discriminant(data).hash(&mut hasher);
+ if let Some(name) = data.get_opt_name() {
+ // Get a stable hash by considering the symbol chars rather than
+ // the symbol index.
+ name.as_str().hash(&mut hasher);
+ }
+
+ disambiguator.hash(&mut hasher);
+
+ let local_hash: u64 = hasher.finish();
+
+ // Construct the new DefPathHash, making sure that the `crate_id`
+ // portion of the hash is properly copied from the parent. This way the
+ // `crate_id` part will be recursively propagated from the root to all
+ // DefPathHashes in this DefPathTable.
+ DefPathHash::new(parent.stable_crate_id(), local_hash)
+ }
+
+ #[inline]
+ pub fn get_opt_name(&self) -> Option<Symbol> {
+ self.disambiguated_data.data.get_opt_name()
+ }
+}
+
+/// A pair of `DefPathData` and an integer disambiguator. The integer is
+/// normally `0`, but in the event that there are multiple defs with the
+/// same `parent` and `data`, we use this field to disambiguate
+/// between them. This introduces some artificial ordering dependency
+/// but means that if you have, e.g., two impls for the same type in
+/// the same module, they do get distinct `DefId`s.
+#[derive(Copy, Clone, PartialEq, Debug, Encodable, Decodable)]
+pub struct DisambiguatedDefPathData {
+ pub data: DefPathData,
+ pub disambiguator: u32,
+}
+
+impl DisambiguatedDefPathData {
+ pub fn fmt_maybe_verbose(&self, writer: &mut impl Write, verbose: bool) -> fmt::Result {
+ match self.data.name() {
+ DefPathDataName::Named(name) => {
+ if verbose && self.disambiguator != 0 {
+ write!(writer, "{}#{}", name, self.disambiguator)
+ } else {
+ writer.write_str(name.as_str())
+ }
+ }
+ DefPathDataName::Anon { namespace } => {
+ write!(writer, "{{{}#{}}}", namespace, self.disambiguator)
+ }
+ }
+ }
+}
+
+impl fmt::Display for DisambiguatedDefPathData {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.fmt_maybe_verbose(f, true)
+ }
+}
+
+#[derive(Clone, Debug, Encodable, Decodable)]
+pub struct DefPath {
+ /// The path leading from the crate root to the item.
+ pub data: Vec<DisambiguatedDefPathData>,
+
+ /// The crate root this path is relative to.
+ pub krate: CrateNum,
+}
+
+impl DefPath {
+ pub fn make<FN>(krate: CrateNum, start_index: DefIndex, mut get_key: FN) -> DefPath
+ where
+ FN: FnMut(DefIndex) -> DefKey,
+ {
+ let mut data = vec![];
+ let mut index = Some(start_index);
+ loop {
+ debug!("DefPath::make: krate={:?} index={:?}", krate, index);
+ let p = index.unwrap();
+ let key = get_key(p);
+ debug!("DefPath::make: key={:?}", key);
+ match key.disambiguated_data.data {
+ DefPathData::CrateRoot => {
+ assert!(key.parent.is_none());
+ break;
+ }
+ _ => {
+ data.push(key.disambiguated_data);
+ index = key.parent;
+ }
+ }
+ }
+ data.reverse();
+ DefPath { data, krate }
+ }
+
+ /// Returns a string representation of the `DefPath` without
+ /// the crate-prefix. This method is useful if you don't have
+ /// a `TyCtxt` available.
+ pub fn to_string_no_crate_verbose(&self) -> String {
+ let mut s = String::with_capacity(self.data.len() * 16);
+
+ for component in &self.data {
+ write!(s, "::{}", component).unwrap();
+ }
+
+ s
+ }
+
+ /// Returns a filename-friendly string of the `DefPath`, without
+ /// the crate-prefix. This method is useful if you don't have
+ /// a `TyCtxt` available.
+ pub fn to_filename_friendly_no_crate(&self) -> String {
+ let mut s = String::with_capacity(self.data.len() * 16);
+
+ let mut opt_delimiter = None;
+ for component in &self.data {
+ s.extend(opt_delimiter);
+ opt_delimiter = Some('-');
+ write!(s, "{}", component).unwrap();
+ }
+
+ s
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
+pub enum DefPathData {
+ // Root: these should only be used for the root nodes, because
+ // they are treated specially by the `def_path` function.
+ /// The crate root (marker).
+ CrateRoot,
+
+ // Different kinds of items and item-like things:
+ /// An impl.
+ Impl,
+ /// An `extern` block.
+ ForeignMod,
+ /// A `use` item.
+ Use,
+ /// A global asm item.
+ GlobalAsm,
+ /// Something in the type namespace.
+ TypeNs(Symbol),
+ /// Something in the value namespace.
+ ValueNs(Symbol),
+ /// Something in the macro namespace.
+ MacroNs(Symbol),
+ /// Something in the lifetime namespace.
+ LifetimeNs(Symbol),
+ /// A closure expression.
+ ClosureExpr,
+
+ // Subportions of items:
+ /// Implicit constructor for a unit or tuple-like struct or enum variant.
+ Ctor,
+ /// A constant expression (see `{ast,hir}::AnonConst`).
+ AnonConst,
+ /// An `impl Trait` type node.
+ ImplTrait,
+}
+
+impl Definitions {
+ pub fn def_path_table(&self) -> &DefPathTable {
+ &self.table
+ }
+
+ /// Gets the number of definitions.
+ pub fn def_index_count(&self) -> usize {
+ self.table.index_to_key.len()
+ }
+
+ #[inline]
+ pub fn def_key(&self, id: LocalDefId) -> DefKey {
+ self.table.def_key(id.local_def_index)
+ }
+
+ #[inline(always)]
+ pub fn def_path_hash(&self, id: LocalDefId) -> DefPathHash {
+ self.table.def_path_hash(id.local_def_index)
+ }
+
+ /// Returns the path from the crate root to `index`. The root
+ /// nodes are not included in the path (i.e., this will be an
+ /// empty vector for the crate root). For an inlined item, this
+ /// will be the path of the item in the external crate (but the
+ /// path will begin with the path to the external crate).
+ pub fn def_path(&self, id: LocalDefId) -> DefPath {
+ DefPath::make(LOCAL_CRATE, id.local_def_index, |index| {
+ self.def_key(LocalDefId { local_def_index: index })
+ })
+ }
+
+ /// Adds a root definition (no parent) and a few other reserved definitions.
+ pub fn new(stable_crate_id: StableCrateId) -> Definitions {
+ let key = DefKey {
+ parent: None,
+ disambiguated_data: DisambiguatedDefPathData {
+ data: DefPathData::CrateRoot,
+ disambiguator: 0,
+ },
+ };
+
+ let parent_hash = DefPathHash::new(stable_crate_id, 0);
+ let def_path_hash = key.compute_stable_hash(parent_hash);
+
+ // Create the root definition.
+ let mut table = DefPathTable::default();
+ let root = LocalDefId { local_def_index: table.allocate(key, def_path_hash) };
+ assert_eq!(root.local_def_index, CRATE_DEF_INDEX);
+
+ Definitions { table, next_disambiguator: Default::default(), stable_crate_id }
+ }
+
+ /// Adds a definition with a parent definition.
+ pub fn create_def(&mut self, parent: LocalDefId, data: DefPathData) -> LocalDefId {
+ // We can't use `Debug` implementation for `LocalDefId` here, since it tries to acquire a
+ // reference to `Definitions` and we're already holding a mutable reference.
+ debug!(
+ "create_def(parent={}, data={data:?})",
+ self.def_path(parent).to_string_no_crate_verbose(),
+ );
+
+ // The root node must be created with `create_root_def()`.
+ assert!(data != DefPathData::CrateRoot);
+
+ // Find the next free disambiguator for this key.
+ let disambiguator = {
+ let next_disamb = self.next_disambiguator.entry((parent, data)).or_insert(0);
+ let disambiguator = *next_disamb;
+ *next_disamb = next_disamb.checked_add(1).expect("disambiguator overflow");
+ disambiguator
+ };
+ let key = DefKey {
+ parent: Some(parent.local_def_index),
+ disambiguated_data: DisambiguatedDefPathData { data, disambiguator },
+ };
+
+ let parent_hash = self.table.def_path_hash(parent.local_def_index);
+ let def_path_hash = key.compute_stable_hash(parent_hash);
+
+ debug!("create_def: after disambiguation, key = {:?}", key);
+
+ // Create the definition.
+ LocalDefId { local_def_index: self.table.allocate(key, def_path_hash) }
+ }
+
+ pub fn iter_local_def_id(&self) -> impl Iterator<Item = LocalDefId> + '_ {
+ self.table.def_path_hashes.indices().map(|local_def_index| LocalDefId { local_def_index })
+ }
+
+ #[inline(always)]
+ pub fn local_def_path_hash_to_def_id(
+ &self,
+ hash: DefPathHash,
+ err: &mut dyn FnMut() -> !,
+ ) -> LocalDefId {
+ debug_assert!(hash.stable_crate_id() == self.stable_crate_id);
+ self.table
+ .def_path_hash_to_index
+ .get(&hash)
+ .map(|local_def_index| LocalDefId { local_def_index })
+ .unwrap_or_else(|| err())
+ }
+
+ pub fn def_path_hash_to_def_index_map(&self) -> &DefPathHashMap {
+ &self.table.def_path_hash_to_index
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum DefPathDataName {
+ Named(Symbol),
+ Anon { namespace: Symbol },
+}
+
+impl DefPathData {
+ pub fn get_opt_name(&self) -> Option<Symbol> {
+ use self::DefPathData::*;
+ match *self {
+ TypeNs(name) | ValueNs(name) | MacroNs(name) | LifetimeNs(name) => Some(name),
+
+ Impl | ForeignMod | CrateRoot | Use | GlobalAsm | ClosureExpr | Ctor | AnonConst
+ | ImplTrait => None,
+ }
+ }
+
+ pub fn name(&self) -> DefPathDataName {
+ use self::DefPathData::*;
+ match *self {
+ TypeNs(name) | ValueNs(name) | MacroNs(name) | LifetimeNs(name) => {
+ DefPathDataName::Named(name)
+ }
+ // Note that this does not show up in user print-outs.
+ CrateRoot => DefPathDataName::Anon { namespace: kw::Crate },
+ Impl => DefPathDataName::Anon { namespace: kw::Impl },
+ ForeignMod => DefPathDataName::Anon { namespace: kw::Extern },
+ Use => DefPathDataName::Anon { namespace: kw::Use },
+ GlobalAsm => DefPathDataName::Anon { namespace: sym::global_asm },
+ ClosureExpr => DefPathDataName::Anon { namespace: sym::closure },
+ Ctor => DefPathDataName::Anon { namespace: sym::constructor },
+ AnonConst => DefPathDataName::Anon { namespace: sym::constant },
+ ImplTrait => DefPathDataName::Anon { namespace: sym::opaque },
+ }
+ }
+}
+
+impl fmt::Display for DefPathData {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.name() {
+ DefPathDataName::Named(name) => f.write_str(name.as_str()),
+ // FIXME(#70334): this will generate legacy {{closure}}, {{impl}}, etc
+ DefPathDataName::Anon { namespace } => write!(f, "{{{{{}}}}}", namespace),
+ }
+ }
+}
diff --git a/compiler/rustc_hir/src/diagnostic_items.rs b/compiler/rustc_hir/src/diagnostic_items.rs
new file mode 100644
index 000000000..243014b00
--- /dev/null
+++ b/compiler/rustc_hir/src/diagnostic_items.rs
@@ -0,0 +1,17 @@
+use crate::def_id::DefId;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_span::Symbol;
+
+#[derive(Debug, Default)]
+pub struct DiagnosticItems {
+ pub id_to_name: FxHashMap<DefId, Symbol>,
+ pub name_to_id: FxHashMap<Symbol, DefId>,
+}
+
+impl<CTX: crate::HashStableContext> HashStable<CTX> for DiagnosticItems {
+ #[inline]
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.name_to_id.hash_stable(ctx, hasher);
+ }
+}
diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs
new file mode 100644
index 000000000..617433a98
--- /dev/null
+++ b/compiler/rustc_hir/src/hir.rs
@@ -0,0 +1,3506 @@
+use crate::def::{CtorKind, DefKind, Res};
+use crate::def_id::DefId;
+pub(crate) use crate::hir_id::{HirId, ItemLocalId};
+use crate::intravisit::FnKind;
+use crate::LangItem;
+
+use rustc_ast as ast;
+use rustc_ast::util::parser::ExprPrecedence;
+use rustc_ast::{Attribute, FloatTy, IntTy, Label, LitKind, TraitObjectSyntax, UintTy};
+pub use rustc_ast::{BorrowKind, ImplPolarity, IsAuto};
+pub use rustc_ast::{CaptureBy, Movability, Mutability};
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_error_messages::MultiSpan;
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable_Generic;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{def_id::LocalDefId, BytePos, Span, DUMMY_SP};
+use rustc_target::asm::InlineAsmRegOrRegClass;
+use rustc_target::spec::abi::Abi;
+
+use smallvec::SmallVec;
+use std::fmt;
+
+#[derive(Debug, Copy, Clone, Encodable, HashStable_Generic)]
+pub struct Lifetime {
+ pub hir_id: HirId,
+ pub span: Span,
+
+ /// Either "`'a`", referring to a named lifetime definition,
+ /// or "``" (i.e., `kw::Empty`), for elision placeholders.
+ ///
+ /// HIR lowering inserts these placeholders in type paths that
+ /// refer to type definitions needing lifetime parameters,
+ /// `&T` and `&mut T`, and trait objects without `... + 'a`.
+ pub name: LifetimeName,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Encodable, Hash, Copy)]
+#[derive(HashStable_Generic)]
+pub enum ParamName {
+ /// Some user-given name like `T` or `'x`.
+ Plain(Ident),
+
+ /// Synthetic name generated when user elided a lifetime in an impl header.
+ ///
+ /// E.g., the lifetimes in cases like these:
+ /// ```ignore (fragment)
+ /// impl Foo for &u32
+ /// impl Foo<'_> for u32
+ /// ```
+ /// in that case, we rewrite to
+ /// ```ignore (fragment)
+ /// impl<'f> Foo for &'f u32
+ /// impl<'f> Foo<'f> for u32
+ /// ```
+ /// where `'f` is something like `Fresh(0)`. The indices are
+ /// unique per impl, but not necessarily continuous.
+ Fresh,
+
+ /// Indicates an illegal name was given and an error has been
+ /// reported (so we should squelch other derived errors). Occurs
+ /// when, e.g., `'_` is used in the wrong place.
+ Error,
+}
+
+impl ParamName {
+ pub fn ident(&self) -> Ident {
+ match *self {
+ ParamName::Plain(ident) => ident,
+ ParamName::Fresh | ParamName::Error => Ident::with_dummy_span(kw::UnderscoreLifetime),
+ }
+ }
+
+ pub fn normalize_to_macros_2_0(&self) -> ParamName {
+ match *self {
+ ParamName::Plain(ident) => ParamName::Plain(ident.normalize_to_macros_2_0()),
+ param_name => param_name,
+ }
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Encodable, Hash, Copy)]
+#[derive(HashStable_Generic)]
+pub enum LifetimeName {
+ /// User-given names or fresh (synthetic) names.
+ Param(LocalDefId, ParamName),
+
+ /// Implicit lifetime in a context like `dyn Foo`. This is
+ /// distinguished from implicit lifetimes elsewhere because the
+ /// lifetime that they default to must appear elsewhere within the
+ /// enclosing type. This means that, in an `impl Trait` context, we
+ /// don't have to create a parameter for them. That is, `impl
+ /// Trait<Item = &u32>` expands to an opaque type like `type
+ /// Foo<'a> = impl Trait<Item = &'a u32>`, but `impl Trait<item =
+ /// dyn Bar>` expands to `type Foo = impl Trait<Item = dyn Bar +
+ /// 'static>`. The latter uses `ImplicitObjectLifetimeDefault` so
+ /// that surrounding code knows not to create a lifetime
+ /// parameter.
+ ImplicitObjectLifetimeDefault,
+
+ /// Indicates an error during lowering (usually `'_` in wrong place)
+ /// that was already reported.
+ Error,
+
+ /// User wrote an anonymous lifetime, either `'_` or nothing.
+ /// The semantics of this lifetime should be inferred by typechecking code.
+ Infer,
+
+ /// User wrote `'static`.
+ Static,
+}
+
+impl LifetimeName {
+ pub fn ident(&self) -> Ident {
+ match *self {
+ LifetimeName::ImplicitObjectLifetimeDefault | LifetimeName::Error => Ident::empty(),
+ LifetimeName::Infer => Ident::with_dummy_span(kw::UnderscoreLifetime),
+ LifetimeName::Static => Ident::with_dummy_span(kw::StaticLifetime),
+ LifetimeName::Param(_, param_name) => param_name.ident(),
+ }
+ }
+
+ pub fn is_anonymous(&self) -> bool {
+ match *self {
+ LifetimeName::ImplicitObjectLifetimeDefault
+ | LifetimeName::Infer
+ | LifetimeName::Param(_, ParamName::Fresh)
+ | LifetimeName::Error => true,
+ LifetimeName::Static | LifetimeName::Param(..) => false,
+ }
+ }
+
+ pub fn is_elided(&self) -> bool {
+ match self {
+ LifetimeName::ImplicitObjectLifetimeDefault | LifetimeName::Infer => true,
+
+ // It might seem surprising that `Fresh` counts as
+ // *not* elided -- but this is because, as far as the code
+ // in the compiler is concerned -- `Fresh` variants act
+ // equivalently to "some fresh name". They correspond to
+ // early-bound regions on an impl, in other words.
+ LifetimeName::Error | LifetimeName::Param(..) | LifetimeName::Static => false,
+ }
+ }
+
+ fn is_static(&self) -> bool {
+ self == &LifetimeName::Static
+ }
+
+ pub fn normalize_to_macros_2_0(&self) -> LifetimeName {
+ match *self {
+ LifetimeName::Param(def_id, param_name) => {
+ LifetimeName::Param(def_id, param_name.normalize_to_macros_2_0())
+ }
+ lifetime_name => lifetime_name,
+ }
+ }
+}
+
+impl fmt::Display for Lifetime {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.name.ident().fmt(f)
+ }
+}
+
+impl Lifetime {
+ pub fn is_elided(&self) -> bool {
+ self.name.is_elided()
+ }
+
+ pub fn is_static(&self) -> bool {
+ self.name.is_static()
+ }
+}
+
+/// A `Path` is essentially Rust's notion of a name; for instance,
+/// `std::cmp::PartialEq`. It's represented as a sequence of identifiers,
+/// along with a bunch of supporting information.
+#[derive(Debug, HashStable_Generic)]
+pub struct Path<'hir> {
+ pub span: Span,
+ /// The resolution for the path.
+ pub res: Res,
+ /// The segments in the path: the things separated by `::`.
+ pub segments: &'hir [PathSegment<'hir>],
+}
+
+impl Path<'_> {
+ pub fn is_global(&self) -> bool {
+ !self.segments.is_empty() && self.segments[0].ident.name == kw::PathRoot
+ }
+}
+
+/// A segment of a path: an identifier, an optional lifetime, and a set of
+/// types.
+#[derive(Debug, HashStable_Generic)]
+pub struct PathSegment<'hir> {
+ /// The identifier portion of this path segment.
+ pub ident: Ident,
+ // `id` and `res` are optional. We currently only use these in save-analysis,
+ // any path segments without these will not have save-analysis info and
+ // therefore will not have 'jump to def' in IDEs, but otherwise will not be
+ // affected. (In general, we don't bother to get the defs for synthesized
+ // segments, only for segments which have come from the AST).
+ pub hir_id: Option<HirId>,
+ pub res: Option<Res>,
+
+ /// Type/lifetime parameters attached to this path. They come in
+ /// two flavors: `Path<A,B,C>` and `Path(A,B) -> C`. Note that
+ /// this is more than just simple syntactic sugar; the use of
+ /// parens affects the region binding rules, so we preserve the
+ /// distinction.
+ pub args: Option<&'hir GenericArgs<'hir>>,
+
+ /// Whether to infer remaining type parameters, if any.
+ /// This only applies to expression and pattern paths, and
+ /// out of those only the segments with no type parameters
+ /// to begin with, e.g., `Vec::new` is `<Vec<..>>::new::<..>`.
+ pub infer_args: bool,
+}
+
+impl<'hir> PathSegment<'hir> {
+ /// Converts an identifier to the corresponding segment.
+ pub fn from_ident(ident: Ident) -> PathSegment<'hir> {
+ PathSegment { ident, hir_id: None, res: None, infer_args: true, args: None }
+ }
+
+ pub fn invalid() -> Self {
+ Self::from_ident(Ident::empty())
+ }
+
+ pub fn args(&self) -> &GenericArgs<'hir> {
+ if let Some(ref args) = self.args {
+ args
+ } else {
+ const DUMMY: &GenericArgs<'_> = &GenericArgs::none();
+ DUMMY
+ }
+ }
+}
+
+#[derive(Encodable, Debug, HashStable_Generic)]
+pub struct ConstArg {
+ pub value: AnonConst,
+ pub span: Span,
+}
+
+#[derive(Encodable, Debug, HashStable_Generic)]
+pub struct InferArg {
+ pub hir_id: HirId,
+ pub span: Span,
+}
+
+impl InferArg {
+ pub fn to_ty(&self) -> Ty<'_> {
+ Ty { kind: TyKind::Infer, span: self.span, hir_id: self.hir_id }
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum GenericArg<'hir> {
+ Lifetime(Lifetime),
+ Type(Ty<'hir>),
+ Const(ConstArg),
+ Infer(InferArg),
+}
+
+impl GenericArg<'_> {
+ pub fn span(&self) -> Span {
+ match self {
+ GenericArg::Lifetime(l) => l.span,
+ GenericArg::Type(t) => t.span,
+ GenericArg::Const(c) => c.span,
+ GenericArg::Infer(i) => i.span,
+ }
+ }
+
+ pub fn id(&self) -> HirId {
+ match self {
+ GenericArg::Lifetime(l) => l.hir_id,
+ GenericArg::Type(t) => t.hir_id,
+ GenericArg::Const(c) => c.value.hir_id,
+ GenericArg::Infer(i) => i.hir_id,
+ }
+ }
+
+ pub fn is_synthetic(&self) -> bool {
+ matches!(self, GenericArg::Lifetime(lifetime) if lifetime.name.ident() == Ident::empty())
+ }
+
+ pub fn descr(&self) -> &'static str {
+ match self {
+ GenericArg::Lifetime(_) => "lifetime",
+ GenericArg::Type(_) => "type",
+ GenericArg::Const(_) => "constant",
+ GenericArg::Infer(_) => "inferred",
+ }
+ }
+
+ pub fn to_ord(&self) -> ast::ParamKindOrd {
+ match self {
+ GenericArg::Lifetime(_) => ast::ParamKindOrd::Lifetime,
+ GenericArg::Type(_) => ast::ParamKindOrd::Type,
+ GenericArg::Const(_) => ast::ParamKindOrd::Const,
+ GenericArg::Infer(_) => ast::ParamKindOrd::Infer,
+ }
+ }
+
+ pub fn is_ty_or_const(&self) -> bool {
+ match self {
+ GenericArg::Lifetime(_) => false,
+ GenericArg::Type(_) | GenericArg::Const(_) | GenericArg::Infer(_) => true,
+ }
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct GenericArgs<'hir> {
+ /// The generic arguments for this path segment.
+ pub args: &'hir [GenericArg<'hir>],
+ /// Bindings (equality constraints) on associated types, if present.
+ /// E.g., `Foo<A = Bar>`.
+ pub bindings: &'hir [TypeBinding<'hir>],
+ /// Were arguments written in parenthesized form `Fn(T) -> U`?
+ /// This is required mostly for pretty-printing and diagnostics,
+ /// but also for changing lifetime elision rules to be "function-like".
+ pub parenthesized: bool,
+ /// The span encompassing arguments and the surrounding brackets `<>` or `()`
+ /// Foo<A, B, AssocTy = D> Fn(T, U, V) -> W
+ /// ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^
+ /// Note that this may be:
+ /// - empty, if there are no generic brackets (but there may be hidden lifetimes)
+ /// - dummy, if this was generated while desugaring
+ pub span_ext: Span,
+}
+
+impl<'hir> GenericArgs<'hir> {
+ pub const fn none() -> Self {
+ Self { args: &[], bindings: &[], parenthesized: false, span_ext: DUMMY_SP }
+ }
+
+ pub fn inputs(&self) -> &[Ty<'hir>] {
+ if self.parenthesized {
+ for arg in self.args {
+ match arg {
+ GenericArg::Lifetime(_) => {}
+ GenericArg::Type(ref ty) => {
+ if let TyKind::Tup(ref tys) = ty.kind {
+ return tys;
+ }
+ break;
+ }
+ GenericArg::Const(_) => {}
+ GenericArg::Infer(_) => {}
+ }
+ }
+ }
+ panic!("GenericArgs::inputs: not a `Fn(T) -> U`");
+ }
+
+ #[inline]
+ pub fn has_type_params(&self) -> bool {
+ self.args.iter().any(|arg| matches!(arg, GenericArg::Type(_)))
+ }
+
+ pub fn has_err(&self) -> bool {
+ self.args.iter().any(|arg| match arg {
+ GenericArg::Type(ty) => matches!(ty.kind, TyKind::Err),
+ _ => false,
+ }) || self.bindings.iter().any(|arg| match arg.kind {
+ TypeBindingKind::Equality { term: Term::Ty(ty) } => matches!(ty.kind, TyKind::Err),
+ _ => false,
+ })
+ }
+
+ #[inline]
+ pub fn num_type_params(&self) -> usize {
+ self.args.iter().filter(|arg| matches!(arg, GenericArg::Type(_))).count()
+ }
+
+ #[inline]
+ pub fn num_lifetime_params(&self) -> usize {
+ self.args.iter().filter(|arg| matches!(arg, GenericArg::Lifetime(_))).count()
+ }
+
+ #[inline]
+ pub fn has_lifetime_params(&self) -> bool {
+ self.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)))
+ }
+
+ #[inline]
+ pub fn num_generic_params(&self) -> usize {
+ self.args.iter().filter(|arg| !matches!(arg, GenericArg::Lifetime(_))).count()
+ }
+
+ /// The span encompassing the text inside the surrounding brackets.
+ /// It will also include bindings if they aren't in the form `-> Ret`
+ /// Returns `None` if the span is empty (e.g. no brackets) or dummy
+ pub fn span(&self) -> Option<Span> {
+ let span_ext = self.span_ext()?;
+ Some(span_ext.with_lo(span_ext.lo() + BytePos(1)).with_hi(span_ext.hi() - BytePos(1)))
+ }
+
+ /// Returns span encompassing arguments and their surrounding `<>` or `()`
+ pub fn span_ext(&self) -> Option<Span> {
+ Some(self.span_ext).filter(|span| !span.is_empty())
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.args.is_empty()
+ }
+}
+
+/// A modifier on a bound, currently this is only used for `?Sized`, where the
+/// modifier is `Maybe`. Negative bounds should also be handled here.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum TraitBoundModifier {
+ None,
+ Maybe,
+ MaybeConst,
+}
+
+/// The AST represents all type param bounds as types.
+/// `typeck::collect::compute_bounds` matches these against
+/// the "special" built-in traits (see `middle::lang_items`) and
+/// detects `Copy`, `Send` and `Sync`.
+#[derive(Clone, Debug, HashStable_Generic)]
+pub enum GenericBound<'hir> {
+ Trait(PolyTraitRef<'hir>, TraitBoundModifier),
+ // FIXME(davidtwco): Introduce `PolyTraitRef::LangItem`
+ LangItemTrait(LangItem, Span, HirId, &'hir GenericArgs<'hir>),
+ Outlives(Lifetime),
+}
+
+impl GenericBound<'_> {
+ pub fn trait_ref(&self) -> Option<&TraitRef<'_>> {
+ match self {
+ GenericBound::Trait(data, _) => Some(&data.trait_ref),
+ _ => None,
+ }
+ }
+
+ pub fn span(&self) -> Span {
+ match self {
+ GenericBound::Trait(t, ..) => t.span,
+ GenericBound::LangItemTrait(_, span, ..) => *span,
+ GenericBound::Outlives(l) => l.span,
+ }
+ }
+}
+
+pub type GenericBounds<'hir> = &'hir [GenericBound<'hir>];
+
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Debug, HashStable_Generic)]
+pub enum LifetimeParamKind {
+ // Indicates that the lifetime definition was explicitly declared (e.g., in
+ // `fn foo<'a>(x: &'a u8) -> &'a u8 { x }`).
+ Explicit,
+
+ // Indication that the lifetime was elided (e.g., in both cases in
+ // `fn foo(x: &u8) -> &'_ u8 { x }`).
+ Elided,
+
+ // Indication that the lifetime name was somehow in error.
+ Error,
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum GenericParamKind<'hir> {
+ /// A lifetime definition (e.g., `'a: 'b + 'c + 'd`).
+ Lifetime {
+ kind: LifetimeParamKind,
+ },
+ Type {
+ default: Option<&'hir Ty<'hir>>,
+ synthetic: bool,
+ },
+ Const {
+ ty: &'hir Ty<'hir>,
+ /// Optional default value for the const generic param
+ default: Option<AnonConst>,
+ },
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct GenericParam<'hir> {
+ pub hir_id: HirId,
+ pub name: ParamName,
+ pub span: Span,
+ pub pure_wrt_drop: bool,
+ pub kind: GenericParamKind<'hir>,
+ pub colon_span: Option<Span>,
+}
+
+impl<'hir> GenericParam<'hir> {
+ /// Synthetic type-parameters are inserted after normal ones.
+ /// In order for normal parameters to be able to refer to synthetic ones,
+ /// scans them first.
+ pub fn is_impl_trait(&self) -> bool {
+ matches!(self.kind, GenericParamKind::Type { synthetic: true, .. })
+ }
+
+ /// This can happen for `async fn`, e.g. `async fn f<'_>(&'_ self)`.
+ ///
+ /// See `lifetime_to_generic_param` in `rustc_ast_lowering` for more information.
+ pub fn is_elided_lifetime(&self) -> bool {
+ matches!(self.kind, GenericParamKind::Lifetime { kind: LifetimeParamKind::Elided })
+ }
+}
+
+#[derive(Default)]
+pub struct GenericParamCount {
+ pub lifetimes: usize,
+ pub types: usize,
+ pub consts: usize,
+ pub infer: usize,
+}
+
+/// Represents lifetimes and type parameters attached to a declaration
+/// of a function, enum, trait, etc.
+#[derive(Debug, HashStable_Generic)]
+pub struct Generics<'hir> {
+ pub params: &'hir [GenericParam<'hir>],
+ pub predicates: &'hir [WherePredicate<'hir>],
+ pub has_where_clause_predicates: bool,
+ pub where_clause_span: Span,
+ pub span: Span,
+}
+
+impl<'hir> Generics<'hir> {
+ pub const fn empty() -> &'hir Generics<'hir> {
+ const NOPE: Generics<'_> = Generics {
+ params: &[],
+ predicates: &[],
+ has_where_clause_predicates: false,
+ where_clause_span: DUMMY_SP,
+ span: DUMMY_SP,
+ };
+ &NOPE
+ }
+
+ pub fn get_named(&self, name: Symbol) -> Option<&GenericParam<'hir>> {
+ for param in self.params {
+ if name == param.name.ident().name {
+ return Some(param);
+ }
+ }
+ None
+ }
+
+ pub fn spans(&self) -> MultiSpan {
+ if self.params.is_empty() {
+ self.span.into()
+ } else {
+ self.params.iter().map(|p| p.span).collect::<Vec<Span>>().into()
+ }
+ }
+
+ /// If there are generic parameters, return where to introduce a new one.
+ pub fn span_for_param_suggestion(&self) -> Option<Span> {
+ if self.params.iter().any(|p| self.span.contains(p.span)) {
+ // `fn foo<A>(t: impl Trait)`
+ // ^ suggest `, T: Trait` here
+ let span = self.span.with_lo(self.span.hi() - BytePos(1)).shrink_to_lo();
+ Some(span)
+ } else {
+ None
+ }
+ }
+
+ /// `Span` where further predicates would be suggested, accounting for trailing commas, like
+ /// in `fn foo<T>(t: T) where T: Foo,` so we don't suggest two trailing commas.
+ pub fn tail_span_for_predicate_suggestion(&self) -> Span {
+ let end = self.where_clause_span.shrink_to_hi();
+ if self.has_where_clause_predicates {
+ self.predicates
+ .iter()
+ .filter(|p| p.in_where_clause())
+ .last()
+ .map_or(end, |p| p.span())
+ .shrink_to_hi()
+ .to(end)
+ } else {
+ end
+ }
+ }
+
+ pub fn add_where_or_trailing_comma(&self) -> &'static str {
+ if self.has_where_clause_predicates {
+ ","
+ } else if self.where_clause_span.is_empty() {
+ " where"
+ } else {
+ // No where clause predicates, but we have `where` token
+ ""
+ }
+ }
+
+ pub fn bounds_for_param(
+ &self,
+ param_def_id: LocalDefId,
+ ) -> impl Iterator<Item = &WhereBoundPredicate<'hir>> {
+ self.predicates.iter().filter_map(move |pred| match pred {
+ WherePredicate::BoundPredicate(bp) if bp.is_param_bound(param_def_id.to_def_id()) => {
+ Some(bp)
+ }
+ _ => None,
+ })
+ }
+
+ pub fn outlives_for_param(
+ &self,
+ param_def_id: LocalDefId,
+ ) -> impl Iterator<Item = &WhereRegionPredicate<'_>> {
+ self.predicates.iter().filter_map(move |pred| match pred {
+ WherePredicate::RegionPredicate(rp) if rp.is_param_bound(param_def_id) => Some(rp),
+ _ => None,
+ })
+ }
+
+ pub fn bounds_span_for_suggestions(&self, param_def_id: LocalDefId) -> Option<Span> {
+ self.bounds_for_param(param_def_id).flat_map(|bp| bp.bounds.iter().rev()).find_map(
+ |bound| {
+ // We include bounds that come from a `#[derive(_)]` but point at the user's code,
+ // as we use this method to get a span appropriate for suggestions.
+ let bs = bound.span();
+ if bs.can_be_used_for_suggestions() { Some(bs.shrink_to_hi()) } else { None }
+ },
+ )
+ }
+
+ pub fn span_for_predicate_removal(&self, pos: usize) -> Span {
+ let predicate = &self.predicates[pos];
+ let span = predicate.span();
+
+ if !predicate.in_where_clause() {
+ // <T: ?Sized, U>
+ // ^^^^^^^^
+ return span;
+ }
+
+ // We need to find out which comma to remove.
+ if pos < self.predicates.len() - 1 {
+ let next_pred = &self.predicates[pos + 1];
+ if next_pred.in_where_clause() {
+ // where T: ?Sized, Foo: Bar,
+ // ^^^^^^^^^^^
+ return span.until(next_pred.span());
+ }
+ }
+
+ if pos > 0 {
+ let prev_pred = &self.predicates[pos - 1];
+ if prev_pred.in_where_clause() {
+ // where Foo: Bar, T: ?Sized,
+ // ^^^^^^^^^^^
+ return prev_pred.span().shrink_to_hi().to(span);
+ }
+ }
+
+ // This is the only predicate in the where clause.
+ // where T: ?Sized
+ // ^^^^^^^^^^^^^^^
+ self.where_clause_span
+ }
+
+ pub fn span_for_bound_removal(&self, predicate_pos: usize, bound_pos: usize) -> Span {
+ let predicate = &self.predicates[predicate_pos];
+ let bounds = predicate.bounds();
+
+ if bounds.len() == 1 {
+ return self.span_for_predicate_removal(predicate_pos);
+ }
+
+ let span = bounds[bound_pos].span();
+ if bound_pos == 0 {
+ // where T: ?Sized + Bar, Foo: Bar,
+ // ^^^^^^^^^
+ span.to(bounds[1].span().shrink_to_lo())
+ } else {
+ // where T: Bar + ?Sized, Foo: Bar,
+ // ^^^^^^^^^
+ bounds[bound_pos - 1].span().shrink_to_hi().to(span)
+ }
+ }
+}
+
+/// A single predicate in a where-clause.
+#[derive(Debug, HashStable_Generic)]
+pub enum WherePredicate<'hir> {
+ /// A type binding (e.g., `for<'c> Foo: Send + Clone + 'c`).
+ BoundPredicate(WhereBoundPredicate<'hir>),
+ /// A lifetime predicate (e.g., `'a: 'b + 'c`).
+ RegionPredicate(WhereRegionPredicate<'hir>),
+ /// An equality predicate (unsupported).
+ EqPredicate(WhereEqPredicate<'hir>),
+}
+
+impl<'hir> WherePredicate<'hir> {
+ pub fn span(&self) -> Span {
+ match self {
+ WherePredicate::BoundPredicate(p) => p.span,
+ WherePredicate::RegionPredicate(p) => p.span,
+ WherePredicate::EqPredicate(p) => p.span,
+ }
+ }
+
+ pub fn in_where_clause(&self) -> bool {
+ match self {
+ WherePredicate::BoundPredicate(p) => p.origin == PredicateOrigin::WhereClause,
+ WherePredicate::RegionPredicate(p) => p.in_where_clause,
+ WherePredicate::EqPredicate(_) => false,
+ }
+ }
+
+ pub fn bounds(&self) -> GenericBounds<'hir> {
+ match self {
+ WherePredicate::BoundPredicate(p) => p.bounds,
+ WherePredicate::RegionPredicate(p) => p.bounds,
+ WherePredicate::EqPredicate(_) => &[],
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable_Generic, PartialEq, Eq)]
+pub enum PredicateOrigin {
+ WhereClause,
+ GenericParam,
+ ImplTrait,
+}
+
+/// A type bound (e.g., `for<'c> Foo: Send + Clone + 'c`).
+#[derive(Debug, HashStable_Generic)]
+pub struct WhereBoundPredicate<'hir> {
+ pub span: Span,
+ /// Origin of the predicate.
+ pub origin: PredicateOrigin,
+ /// Any generics from a `for` binding.
+ pub bound_generic_params: &'hir [GenericParam<'hir>],
+ /// The type being bounded.
+ pub bounded_ty: &'hir Ty<'hir>,
+ /// Trait and lifetime bounds (e.g., `Clone + Send + 'static`).
+ pub bounds: GenericBounds<'hir>,
+}
+
+impl<'hir> WhereBoundPredicate<'hir> {
+ /// Returns `true` if `param_def_id` matches the `bounded_ty` of this predicate.
+ pub fn is_param_bound(&self, param_def_id: DefId) -> bool {
+ self.bounded_ty.as_generic_param().map_or(false, |(def_id, _)| def_id == param_def_id)
+ }
+}
+
+/// A lifetime predicate (e.g., `'a: 'b + 'c`).
+#[derive(Debug, HashStable_Generic)]
+pub struct WhereRegionPredicate<'hir> {
+ pub span: Span,
+ pub in_where_clause: bool,
+ pub lifetime: Lifetime,
+ pub bounds: GenericBounds<'hir>,
+}
+
+impl<'hir> WhereRegionPredicate<'hir> {
+ /// Returns `true` if `param_def_id` matches the `lifetime` of this predicate.
+ pub fn is_param_bound(&self, param_def_id: LocalDefId) -> bool {
+ match self.lifetime.name {
+ LifetimeName::Param(id, _) => id == param_def_id,
+ _ => false,
+ }
+ }
+}
+
+/// An equality predicate (e.g., `T = int`); currently unsupported.
+#[derive(Debug, HashStable_Generic)]
+pub struct WhereEqPredicate<'hir> {
+ pub hir_id: HirId,
+ pub span: Span,
+ pub lhs_ty: &'hir Ty<'hir>,
+ pub rhs_ty: &'hir Ty<'hir>,
+}
+
+/// HIR node coupled with its parent's id in the same HIR owner.
+///
+/// The parent is trash when the node is a HIR owner.
+#[derive(Clone, Debug)]
+pub struct ParentedNode<'tcx> {
+ pub parent: ItemLocalId,
+ pub node: Node<'tcx>,
+}
+
+/// Attributes owned by a HIR owner.
+#[derive(Debug)]
+pub struct AttributeMap<'tcx> {
+ pub map: SortedMap<ItemLocalId, &'tcx [Attribute]>,
+ pub hash: Fingerprint,
+}
+
+impl<'tcx> AttributeMap<'tcx> {
+ pub const EMPTY: &'static AttributeMap<'static> =
+ &AttributeMap { map: SortedMap::new(), hash: Fingerprint::ZERO };
+
+ #[inline]
+ pub fn get(&self, id: ItemLocalId) -> &'tcx [Attribute] {
+ self.map.get(&id).copied().unwrap_or(&[])
+ }
+}
+
+/// Map of all HIR nodes inside the current owner.
+/// These nodes are mapped by `ItemLocalId` alongside the index of their parent node.
+/// The HIR tree, including bodies, is pre-hashed.
+pub struct OwnerNodes<'tcx> {
+ /// Pre-computed hash of the full HIR.
+ pub hash_including_bodies: Fingerprint,
+ /// Pre-computed hash of the item signature, sithout recursing into the body.
+ pub hash_without_bodies: Fingerprint,
+ /// Full HIR for the current owner.
+ // The zeroth node's parent should never be accessed: the owner's parent is computed by the
+ // hir_owner_parent query. It is set to `ItemLocalId::INVALID` to force an ICE if accidentally
+ // used.
+ pub nodes: IndexVec<ItemLocalId, Option<ParentedNode<'tcx>>>,
+ /// Content of local bodies.
+ pub bodies: SortedMap<ItemLocalId, &'tcx Body<'tcx>>,
+ /// Non-owning definitions contained in this owner.
+ pub local_id_to_def_id: SortedMap<ItemLocalId, LocalDefId>,
+}
+
+impl<'tcx> OwnerNodes<'tcx> {
+ pub fn node(&self) -> OwnerNode<'tcx> {
+ use rustc_index::vec::Idx;
+ let node = self.nodes[ItemLocalId::new(0)].as_ref().unwrap().node;
+ let node = node.as_owner().unwrap(); // Indexing must ensure it is an OwnerNode.
+ node
+ }
+}
+
+impl fmt::Debug for OwnerNodes<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OwnerNodes")
+ .field("node", &self.nodes[ItemLocalId::from_u32(0)])
+ .field("bodies", &self.bodies)
+ .field("local_id_to_def_id", &self.local_id_to_def_id)
+ .field("hash_without_bodies", &self.hash_without_bodies)
+ .field("hash_including_bodies", &self.hash_including_bodies)
+ .finish()
+ }
+}
+
+/// Full information resulting from lowering an AST node.
+#[derive(Debug, HashStable_Generic)]
+pub struct OwnerInfo<'hir> {
+ /// Contents of the HIR.
+ pub nodes: OwnerNodes<'hir>,
+ /// Map from each nested owner to its parent's local id.
+ pub parenting: FxHashMap<LocalDefId, ItemLocalId>,
+ /// Collected attributes of the HIR nodes.
+ pub attrs: AttributeMap<'hir>,
+ /// Map indicating what traits are in scope for places where this
+ /// is relevant; generated by resolve.
+ pub trait_map: FxHashMap<ItemLocalId, Box<[TraitCandidate]>>,
+}
+
+impl<'tcx> OwnerInfo<'tcx> {
+ #[inline]
+ pub fn node(&self) -> OwnerNode<'tcx> {
+ self.nodes.node()
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub enum MaybeOwner<T> {
+ Owner(T),
+ NonOwner(HirId),
+ /// Used as a placeholder for unused LocalDefId.
+ Phantom,
+}
+
+impl<T> MaybeOwner<T> {
+ pub fn as_owner(self) -> Option<T> {
+ match self {
+ MaybeOwner::Owner(i) => Some(i),
+ MaybeOwner::NonOwner(_) | MaybeOwner::Phantom => None,
+ }
+ }
+
+ pub fn map<U>(self, f: impl FnOnce(T) -> U) -> MaybeOwner<U> {
+ match self {
+ MaybeOwner::Owner(i) => MaybeOwner::Owner(f(i)),
+ MaybeOwner::NonOwner(hir_id) => MaybeOwner::NonOwner(hir_id),
+ MaybeOwner::Phantom => MaybeOwner::Phantom,
+ }
+ }
+
+ pub fn unwrap(self) -> T {
+ match self {
+ MaybeOwner::Owner(i) => i,
+ MaybeOwner::NonOwner(_) | MaybeOwner::Phantom => panic!("Not a HIR owner"),
+ }
+ }
+}
+
+/// The top-level data structure that stores the entire contents of
+/// the crate currently being compiled.
+///
+/// For more details, see the [rustc dev guide].
+///
+/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/hir.html
+#[derive(Debug)]
+pub struct Crate<'hir> {
+ pub owners: IndexVec<LocalDefId, MaybeOwner<&'hir OwnerInfo<'hir>>>,
+ pub hir_hash: Fingerprint,
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct Closure<'hir> {
+ pub binder: ClosureBinder,
+ pub capture_clause: CaptureBy,
+ pub bound_generic_params: &'hir [GenericParam<'hir>],
+ pub fn_decl: &'hir FnDecl<'hir>,
+ pub body: BodyId,
+ pub fn_decl_span: Span,
+ pub movability: Option<Movability>,
+}
+
+/// A block of statements `{ .. }`, which may have a label (in this case the
+/// `targeted_by_break` field will be `true`) and may be `unsafe` by means of
+/// the `rules` being anything but `DefaultBlock`.
+#[derive(Debug, HashStable_Generic)]
+pub struct Block<'hir> {
+ /// Statements in a block.
+ pub stmts: &'hir [Stmt<'hir>],
+ /// An expression at the end of the block
+ /// without a semicolon, if any.
+ pub expr: Option<&'hir Expr<'hir>>,
+ #[stable_hasher(ignore)]
+ pub hir_id: HirId,
+ /// Distinguishes between `unsafe { ... }` and `{ ... }`.
+ pub rules: BlockCheckMode,
+ pub span: Span,
+ /// If true, then there may exist `break 'a` values that aim to
+ /// break out of this block early.
+ /// Used by `'label: {}` blocks and by `try {}` blocks.
+ pub targeted_by_break: bool,
+}
+
+impl<'hir> Block<'hir> {
+ pub fn innermost_block(&self) -> &Block<'hir> {
+ let mut block = self;
+ while let Some(Expr { kind: ExprKind::Block(inner_block, _), .. }) = block.expr {
+ block = inner_block;
+ }
+ block
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct Pat<'hir> {
+ #[stable_hasher(ignore)]
+ pub hir_id: HirId,
+ pub kind: PatKind<'hir>,
+ pub span: Span,
+ // Whether to use default binding modes.
+ // At present, this is false only for destructuring assignment.
+ pub default_binding_modes: bool,
+}
+
+impl<'hir> Pat<'hir> {
+ // FIXME(#19596) this is a workaround, but there should be a better way
+ fn walk_short_(&self, it: &mut impl FnMut(&Pat<'hir>) -> bool) -> bool {
+ if !it(self) {
+ return false;
+ }
+
+ use PatKind::*;
+ match self.kind {
+ Wild | Lit(_) | Range(..) | Binding(.., None) | Path(_) => true,
+ Box(s) | Ref(s, _) | Binding(.., Some(s)) => s.walk_short_(it),
+ Struct(_, fields, _) => fields.iter().all(|field| field.pat.walk_short_(it)),
+ TupleStruct(_, s, _) | Tuple(s, _) | Or(s) => s.iter().all(|p| p.walk_short_(it)),
+ Slice(before, slice, after) => {
+ before.iter().chain(slice).chain(after.iter()).all(|p| p.walk_short_(it))
+ }
+ }
+ }
+
+ /// Walk the pattern in left-to-right order,
+ /// short circuiting (with `.all(..)`) if `false` is returned.
+ ///
+ /// Note that when visiting e.g. `Tuple(ps)`,
+ /// if visiting `ps[0]` returns `false`,
+ /// then `ps[1]` will not be visited.
+ pub fn walk_short(&self, mut it: impl FnMut(&Pat<'hir>) -> bool) -> bool {
+ self.walk_short_(&mut it)
+ }
+
+ // FIXME(#19596) this is a workaround, but there should be a better way
+ fn walk_(&self, it: &mut impl FnMut(&Pat<'hir>) -> bool) {
+ if !it(self) {
+ return;
+ }
+
+ use PatKind::*;
+ match self.kind {
+ Wild | Lit(_) | Range(..) | Binding(.., None) | Path(_) => {}
+ Box(s) | Ref(s, _) | Binding(.., Some(s)) => s.walk_(it),
+ Struct(_, fields, _) => fields.iter().for_each(|field| field.pat.walk_(it)),
+ TupleStruct(_, s, _) | Tuple(s, _) | Or(s) => s.iter().for_each(|p| p.walk_(it)),
+ Slice(before, slice, after) => {
+ before.iter().chain(slice).chain(after.iter()).for_each(|p| p.walk_(it))
+ }
+ }
+ }
+
+ /// Walk the pattern in left-to-right order.
+ ///
+ /// If `it(pat)` returns `false`, the children are not visited.
+ pub fn walk(&self, mut it: impl FnMut(&Pat<'hir>) -> bool) {
+ self.walk_(&mut it)
+ }
+
+ /// Walk the pattern in left-to-right order.
+ ///
+ /// If you always want to recurse, prefer this method over `walk`.
+ pub fn walk_always(&self, mut it: impl FnMut(&Pat<'_>)) {
+ self.walk(|p| {
+ it(p);
+ true
+ })
+ }
+}
+
+/// A single field in a struct pattern.
+///
+/// Patterns like the fields of Foo `{ x, ref y, ref mut z }`
+/// are treated the same as` x: x, y: ref y, z: ref mut z`,
+/// except `is_shorthand` is true.
+#[derive(Debug, HashStable_Generic)]
+pub struct PatField<'hir> {
+ #[stable_hasher(ignore)]
+ pub hir_id: HirId,
+ /// The identifier for the field.
+ pub ident: Ident,
+ /// The pattern the field is destructured to.
+ pub pat: &'hir Pat<'hir>,
+ pub is_shorthand: bool,
+ pub span: Span,
+}
+
+/// Explicit binding annotations given in the HIR for a binding. Note
+/// that this is not the final binding *mode* that we infer after type
+/// inference.
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum BindingAnnotation {
+ /// No binding annotation given: this means that the final binding mode
+ /// will depend on whether we have skipped through a `&` reference
+ /// when matching. For example, the `x` in `Some(x)` will have binding
+ /// mode `None`; if you do `let Some(x) = &Some(22)`, it will
+ /// ultimately be inferred to be by-reference.
+ ///
+ /// Note that implicit reference skipping is not implemented yet (#42640).
+ Unannotated,
+
+ /// Annotated with `mut x` -- could be either ref or not, similar to `None`.
+ Mutable,
+
+ /// Annotated as `ref`, like `ref x`
+ Ref,
+
+ /// Annotated as `ref mut x`.
+ RefMut,
+}
+
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum RangeEnd {
+ Included,
+ Excluded,
+}
+
+impl fmt::Display for RangeEnd {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match self {
+ RangeEnd::Included => "..=",
+ RangeEnd::Excluded => "..",
+ })
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum PatKind<'hir> {
+ /// Represents a wildcard pattern (i.e., `_`).
+ Wild,
+
+ /// A fresh binding `ref mut binding @ OPT_SUBPATTERN`.
+ /// The `HirId` is the canonical ID for the variable being bound,
+ /// (e.g., in `Ok(x) | Err(x)`, both `x` use the same canonical ID),
+ /// which is the pattern ID of the first `x`.
+ Binding(BindingAnnotation, HirId, Ident, Option<&'hir Pat<'hir>>),
+
+ /// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`).
+ /// The `bool` is `true` in the presence of a `..`.
+ Struct(QPath<'hir>, &'hir [PatField<'hir>], bool),
+
+ /// A tuple struct/variant pattern `Variant(x, y, .., z)`.
+ /// If the `..` pattern fragment is present, then `Option<usize>` denotes its position.
+ /// `0 <= position <= subpats.len()`
+ TupleStruct(QPath<'hir>, &'hir [Pat<'hir>], Option<usize>),
+
+ /// An or-pattern `A | B | C`.
+ /// Invariant: `pats.len() >= 2`.
+ Or(&'hir [Pat<'hir>]),
+
+ /// A path pattern for a unit struct/variant or a (maybe-associated) constant.
+ Path(QPath<'hir>),
+
+ /// A tuple pattern (e.g., `(a, b)`).
+ /// If the `..` pattern fragment is present, then `Option<usize>` denotes its position.
+ /// `0 <= position <= subpats.len()`
+ Tuple(&'hir [Pat<'hir>], Option<usize>),
+
+ /// A `box` pattern.
+ Box(&'hir Pat<'hir>),
+
+ /// A reference pattern (e.g., `&mut (a, b)`).
+ Ref(&'hir Pat<'hir>, Mutability),
+
+ /// A literal.
+ Lit(&'hir Expr<'hir>),
+
+ /// A range pattern (e.g., `1..=2` or `1..2`).
+ Range(Option<&'hir Expr<'hir>>, Option<&'hir Expr<'hir>>, RangeEnd),
+
+ /// A slice pattern, `[before_0, ..., before_n, (slice, after_0, ..., after_n)?]`.
+ ///
+ /// Here, `slice` is lowered from the syntax `($binding_mode $ident @)? ..`.
+ /// If `slice` exists, then `after` can be non-empty.
+ ///
+ /// The representation for e.g., `[a, b, .., c, d]` is:
+ /// ```ignore (illustrative)
+ /// PatKind::Slice([Binding(a), Binding(b)], Some(Wild), [Binding(c), Binding(d)])
+ /// ```
+ Slice(&'hir [Pat<'hir>], Option<&'hir Pat<'hir>>, &'hir [Pat<'hir>]),
+}
+
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum BinOpKind {
+ /// The `+` operator (addition).
+ Add,
+ /// The `-` operator (subtraction).
+ Sub,
+ /// The `*` operator (multiplication).
+ Mul,
+ /// The `/` operator (division).
+ Div,
+ /// The `%` operator (modulus).
+ Rem,
+ /// The `&&` operator (logical and).
+ And,
+ /// The `||` operator (logical or).
+ Or,
+ /// The `^` operator (bitwise xor).
+ BitXor,
+ /// The `&` operator (bitwise and).
+ BitAnd,
+ /// The `|` operator (bitwise or).
+ BitOr,
+ /// The `<<` operator (shift left).
+ Shl,
+ /// The `>>` operator (shift right).
+ Shr,
+ /// The `==` operator (equality).
+ Eq,
+ /// The `<` operator (less than).
+ Lt,
+ /// The `<=` operator (less than or equal to).
+ Le,
+ /// The `!=` operator (not equal to).
+ Ne,
+ /// The `>=` operator (greater than or equal to).
+ Ge,
+ /// The `>` operator (greater than).
+ Gt,
+}
+
+impl BinOpKind {
+ pub fn as_str(self) -> &'static str {
+ match self {
+ BinOpKind::Add => "+",
+ BinOpKind::Sub => "-",
+ BinOpKind::Mul => "*",
+ BinOpKind::Div => "/",
+ BinOpKind::Rem => "%",
+ BinOpKind::And => "&&",
+ BinOpKind::Or => "||",
+ BinOpKind::BitXor => "^",
+ BinOpKind::BitAnd => "&",
+ BinOpKind::BitOr => "|",
+ BinOpKind::Shl => "<<",
+ BinOpKind::Shr => ">>",
+ BinOpKind::Eq => "==",
+ BinOpKind::Lt => "<",
+ BinOpKind::Le => "<=",
+ BinOpKind::Ne => "!=",
+ BinOpKind::Ge => ">=",
+ BinOpKind::Gt => ">",
+ }
+ }
+
+ pub fn is_lazy(self) -> bool {
+ matches!(self, BinOpKind::And | BinOpKind::Or)
+ }
+
+ pub fn is_shift(self) -> bool {
+ matches!(self, BinOpKind::Shl | BinOpKind::Shr)
+ }
+
+ pub fn is_comparison(self) -> bool {
+ match self {
+ BinOpKind::Eq
+ | BinOpKind::Lt
+ | BinOpKind::Le
+ | BinOpKind::Ne
+ | BinOpKind::Gt
+ | BinOpKind::Ge => true,
+ BinOpKind::And
+ | BinOpKind::Or
+ | BinOpKind::Add
+ | BinOpKind::Sub
+ | BinOpKind::Mul
+ | BinOpKind::Div
+ | BinOpKind::Rem
+ | BinOpKind::BitXor
+ | BinOpKind::BitAnd
+ | BinOpKind::BitOr
+ | BinOpKind::Shl
+ | BinOpKind::Shr => false,
+ }
+ }
+
+ /// Returns `true` if the binary operator takes its arguments by value.
+ pub fn is_by_value(self) -> bool {
+ !self.is_comparison()
+ }
+}
+
+impl Into<ast::BinOpKind> for BinOpKind {
+ fn into(self) -> ast::BinOpKind {
+ match self {
+ BinOpKind::Add => ast::BinOpKind::Add,
+ BinOpKind::Sub => ast::BinOpKind::Sub,
+ BinOpKind::Mul => ast::BinOpKind::Mul,
+ BinOpKind::Div => ast::BinOpKind::Div,
+ BinOpKind::Rem => ast::BinOpKind::Rem,
+ BinOpKind::And => ast::BinOpKind::And,
+ BinOpKind::Or => ast::BinOpKind::Or,
+ BinOpKind::BitXor => ast::BinOpKind::BitXor,
+ BinOpKind::BitAnd => ast::BinOpKind::BitAnd,
+ BinOpKind::BitOr => ast::BinOpKind::BitOr,
+ BinOpKind::Shl => ast::BinOpKind::Shl,
+ BinOpKind::Shr => ast::BinOpKind::Shr,
+ BinOpKind::Eq => ast::BinOpKind::Eq,
+ BinOpKind::Lt => ast::BinOpKind::Lt,
+ BinOpKind::Le => ast::BinOpKind::Le,
+ BinOpKind::Ne => ast::BinOpKind::Ne,
+ BinOpKind::Ge => ast::BinOpKind::Ge,
+ BinOpKind::Gt => ast::BinOpKind::Gt,
+ }
+ }
+}
+
+pub type BinOp = Spanned<BinOpKind>;
+
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum UnOp {
+ /// The `*` operator (dereferencing).
+ Deref,
+ /// The `!` operator (logical negation).
+ Not,
+ /// The `-` operator (negation).
+ Neg,
+}
+
+impl UnOp {
+ pub fn as_str(self) -> &'static str {
+ match self {
+ Self::Deref => "*",
+ Self::Not => "!",
+ Self::Neg => "-",
+ }
+ }
+
+ /// Returns `true` if the unary operator takes its argument by value.
+ pub fn is_by_value(self) -> bool {
+ matches!(self, Self::Neg | Self::Not)
+ }
+}
+
+/// A statement.
+#[derive(Debug, HashStable_Generic)]
+pub struct Stmt<'hir> {
+ pub hir_id: HirId,
+ pub kind: StmtKind<'hir>,
+ pub span: Span,
+}
+
+/// The contents of a statement.
+#[derive(Debug, HashStable_Generic)]
+pub enum StmtKind<'hir> {
+ /// A local (`let`) binding.
+ Local(&'hir Local<'hir>),
+
+ /// An item binding.
+ Item(ItemId),
+
+ /// An expression without a trailing semi-colon (must have unit type).
+ Expr(&'hir Expr<'hir>),
+
+ /// An expression with a trailing semi-colon (may have any type).
+ Semi(&'hir Expr<'hir>),
+}
+
+/// Represents a `let` statement (i.e., `let <pat>:<ty> = <expr>;`).
+#[derive(Debug, HashStable_Generic)]
+pub struct Local<'hir> {
+ pub pat: &'hir Pat<'hir>,
+ /// Type annotation, if any (otherwise the type will be inferred).
+ pub ty: Option<&'hir Ty<'hir>>,
+ /// Initializer expression to set the value, if any.
+ pub init: Option<&'hir Expr<'hir>>,
+ /// Else block for a `let...else` binding.
+ pub els: Option<&'hir Block<'hir>>,
+ pub hir_id: HirId,
+ pub span: Span,
+ /// Can be `ForLoopDesugar` if the `let` statement is part of a `for` loop
+ /// desugaring. Otherwise will be `Normal`.
+ pub source: LocalSource,
+}
+
+/// Represents a single arm of a `match` expression, e.g.
+/// `<pat> (if <guard>) => <body>`.
+#[derive(Debug, HashStable_Generic)]
+pub struct Arm<'hir> {
+ #[stable_hasher(ignore)]
+ pub hir_id: HirId,
+ pub span: Span,
+ /// If this pattern and the optional guard matches, then `body` is evaluated.
+ pub pat: &'hir Pat<'hir>,
+ /// Optional guard clause.
+ pub guard: Option<Guard<'hir>>,
+ /// The expression the arm evaluates to if this arm matches.
+ pub body: &'hir Expr<'hir>,
+}
+
+/// Represents a `let <pat>[: <ty>] = <expr>` expression (not a Local), occurring in an `if-let` or
+/// `let-else`, evaluating to a boolean. Typically the pattern is refutable.
+///
+/// In an if-let, imagine it as `if (let <pat> = <expr>) { ... }`; in a let-else, it is part of the
+/// desugaring to if-let. Only let-else supports the type annotation at present.
+#[derive(Debug, HashStable_Generic)]
+pub struct Let<'hir> {
+ pub hir_id: HirId,
+ pub span: Span,
+ pub pat: &'hir Pat<'hir>,
+ pub ty: Option<&'hir Ty<'hir>>,
+ pub init: &'hir Expr<'hir>,
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum Guard<'hir> {
+ If(&'hir Expr<'hir>),
+ IfLet(&'hir Let<'hir>),
+}
+
+impl<'hir> Guard<'hir> {
+ /// Returns the body of the guard
+ ///
+ /// In other words, returns the e in either of the following:
+ ///
+ /// - `if e`
+ /// - `if let x = e`
+ pub fn body(&self) -> &'hir Expr<'hir> {
+ match self {
+ Guard::If(e) | Guard::IfLet(Let { init: e, .. }) => e,
+ }
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct ExprField<'hir> {
+ #[stable_hasher(ignore)]
+ pub hir_id: HirId,
+ pub ident: Ident,
+ pub expr: &'hir Expr<'hir>,
+ pub span: Span,
+ pub is_shorthand: bool,
+}
+
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum BlockCheckMode {
+ DefaultBlock,
+ UnsafeBlock(UnsafeSource),
+}
+
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum UnsafeSource {
+ CompilerGenerated,
+ UserProvided,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Hash, Debug)]
+pub struct BodyId {
+ pub hir_id: HirId,
+}
+
+/// The body of a function, closure, or constant value. In the case of
+/// a function, the body contains not only the function body itself
+/// (which is an expression), but also the argument patterns, since
+/// those are something that the caller doesn't really care about.
+///
+/// # Examples
+///
+/// ```
+/// fn foo((x, y): (u32, u32)) -> u32 {
+/// x + y
+/// }
+/// ```
+///
+/// Here, the `Body` associated with `foo()` would contain:
+///
+/// - an `params` array containing the `(x, y)` pattern
+/// - a `value` containing the `x + y` expression (maybe wrapped in a block)
+/// - `generator_kind` would be `None`
+///
+/// All bodies have an **owner**, which can be accessed via the HIR
+/// map using `body_owner_def_id()`.
+#[derive(Debug, HashStable_Generic)]
+pub struct Body<'hir> {
+ pub params: &'hir [Param<'hir>],
+ pub value: Expr<'hir>,
+ pub generator_kind: Option<GeneratorKind>,
+}
+
+impl<'hir> Body<'hir> {
+ pub fn id(&self) -> BodyId {
+ BodyId { hir_id: self.value.hir_id }
+ }
+
+ pub fn generator_kind(&self) -> Option<GeneratorKind> {
+ self.generator_kind
+ }
+}
+
+/// The type of source expression that caused this generator to be created.
+#[derive(Clone, PartialEq, PartialOrd, Eq, Hash, Debug, Copy)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum GeneratorKind {
+ /// An explicit `async` block or the body of an async function.
+ Async(AsyncGeneratorKind),
+
+ /// A generator literal created via a `yield` inside a closure.
+ Gen,
+}
+
+impl fmt::Display for GeneratorKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ GeneratorKind::Async(k) => fmt::Display::fmt(k, f),
+ GeneratorKind::Gen => f.write_str("generator"),
+ }
+ }
+}
+
+impl GeneratorKind {
+ pub fn descr(&self) -> &'static str {
+ match self {
+ GeneratorKind::Async(ask) => ask.descr(),
+ GeneratorKind::Gen => "generator",
+ }
+ }
+}
+
+/// In the case of a generator created as part of an async construct,
+/// which kind of async construct caused it to be created?
+///
+/// This helps error messages but is also used to drive coercions in
+/// type-checking (see #60424).
+#[derive(Clone, PartialEq, PartialOrd, Eq, Hash, Debug, Copy)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum AsyncGeneratorKind {
+ /// An explicit `async` block written by the user.
+ Block,
+
+ /// An explicit `async` closure written by the user.
+ Closure,
+
+ /// The `async` block generated as the body of an async function.
+ Fn,
+}
+
+impl fmt::Display for AsyncGeneratorKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match self {
+ AsyncGeneratorKind::Block => "`async` block",
+ AsyncGeneratorKind::Closure => "`async` closure body",
+ AsyncGeneratorKind::Fn => "`async fn` body",
+ })
+ }
+}
+
+impl AsyncGeneratorKind {
+ pub fn descr(&self) -> &'static str {
+ match self {
+ AsyncGeneratorKind::Block => "`async` block",
+ AsyncGeneratorKind::Closure => "`async` closure body",
+ AsyncGeneratorKind::Fn => "`async fn` body",
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum BodyOwnerKind {
+ /// Functions and methods.
+ Fn,
+
+ /// Closures
+ Closure,
+
+ /// Constants and associated constants.
+ Const,
+
+ /// Initializer of a `static` item.
+ Static(Mutability),
+}
+
+impl BodyOwnerKind {
+ pub fn is_fn_or_closure(self) -> bool {
+ match self {
+ BodyOwnerKind::Fn | BodyOwnerKind::Closure => true,
+ BodyOwnerKind::Const | BodyOwnerKind::Static(_) => false,
+ }
+ }
+}
+
+/// The kind of an item that requires const-checking.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum ConstContext {
+ /// A `const fn`.
+ ConstFn,
+
+ /// A `static` or `static mut`.
+ Static(Mutability),
+
+ /// A `const`, associated `const`, or other const context.
+ ///
+ /// Other contexts include:
+ /// - Array length expressions
+ /// - Enum discriminants
+ /// - Const generics
+ ///
+ /// For the most part, other contexts are treated just like a regular `const`, so they are
+ /// lumped into the same category.
+ Const,
+}
+
+impl ConstContext {
+ /// A description of this const context that can appear between backticks in an error message.
+ ///
+ /// E.g. `const` or `static mut`.
+ pub fn keyword_name(self) -> &'static str {
+ match self {
+ Self::Const => "const",
+ Self::Static(Mutability::Not) => "static",
+ Self::Static(Mutability::Mut) => "static mut",
+ Self::ConstFn => "const fn",
+ }
+ }
+}
+
+/// A colloquial, trivially pluralizable description of this const context for use in error
+/// messages.
+impl fmt::Display for ConstContext {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Self::Const => write!(f, "constant"),
+ Self::Static(_) => write!(f, "static"),
+ Self::ConstFn => write!(f, "constant function"),
+ }
+ }
+}
+
+// NOTE: `IntoDiagnosticArg` impl for `ConstContext` lives in `rustc_errors`
+// due to a cyclical dependency between hir that crate.
+
+/// A literal.
+pub type Lit = Spanned<LitKind>;
+
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Debug, HashStable_Generic)]
+pub enum ArrayLen {
+ Infer(HirId, Span),
+ Body(AnonConst),
+}
+
+impl ArrayLen {
+ pub fn hir_id(&self) -> HirId {
+ match self {
+ &ArrayLen::Infer(hir_id, _) | &ArrayLen::Body(AnonConst { hir_id, body: _ }) => hir_id,
+ }
+ }
+}
+
+/// A constant (expression) that's not an item or associated item,
+/// but needs its own `DefId` for type-checking, const-eval, etc.
+/// These are usually found nested inside types (e.g., array lengths)
+/// or expressions (e.g., repeat counts), and also used to define
+/// explicit discriminant values for enum variants.
+///
+/// You can check if this anon const is a default in a const param
+/// `const N: usize = { ... }` with `tcx.hir().opt_const_param_default_param_hir_id(..)`
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Debug, HashStable_Generic)]
+pub struct AnonConst {
+ pub hir_id: HirId,
+ pub body: BodyId,
+}
+
+/// An expression.
+#[derive(Debug)]
+pub struct Expr<'hir> {
+ pub hir_id: HirId,
+ pub kind: ExprKind<'hir>,
+ pub span: Span,
+}
+
+impl Expr<'_> {
+ pub fn precedence(&self) -> ExprPrecedence {
+ match self.kind {
+ ExprKind::Box(_) => ExprPrecedence::Box,
+ ExprKind::ConstBlock(_) => ExprPrecedence::ConstBlock,
+ ExprKind::Array(_) => ExprPrecedence::Array,
+ ExprKind::Call(..) => ExprPrecedence::Call,
+ ExprKind::MethodCall(..) => ExprPrecedence::MethodCall,
+ ExprKind::Tup(_) => ExprPrecedence::Tup,
+ ExprKind::Binary(op, ..) => ExprPrecedence::Binary(op.node.into()),
+ ExprKind::Unary(..) => ExprPrecedence::Unary,
+ ExprKind::Lit(_) => ExprPrecedence::Lit,
+ ExprKind::Type(..) | ExprKind::Cast(..) => ExprPrecedence::Cast,
+ ExprKind::DropTemps(ref expr, ..) => expr.precedence(),
+ ExprKind::If(..) => ExprPrecedence::If,
+ ExprKind::Let(..) => ExprPrecedence::Let,
+ ExprKind::Loop(..) => ExprPrecedence::Loop,
+ ExprKind::Match(..) => ExprPrecedence::Match,
+ ExprKind::Closure { .. } => ExprPrecedence::Closure,
+ ExprKind::Block(..) => ExprPrecedence::Block,
+ ExprKind::Assign(..) => ExprPrecedence::Assign,
+ ExprKind::AssignOp(..) => ExprPrecedence::AssignOp,
+ ExprKind::Field(..) => ExprPrecedence::Field,
+ ExprKind::Index(..) => ExprPrecedence::Index,
+ ExprKind::Path(..) => ExprPrecedence::Path,
+ ExprKind::AddrOf(..) => ExprPrecedence::AddrOf,
+ ExprKind::Break(..) => ExprPrecedence::Break,
+ ExprKind::Continue(..) => ExprPrecedence::Continue,
+ ExprKind::Ret(..) => ExprPrecedence::Ret,
+ ExprKind::InlineAsm(..) => ExprPrecedence::InlineAsm,
+ ExprKind::Struct(..) => ExprPrecedence::Struct,
+ ExprKind::Repeat(..) => ExprPrecedence::Repeat,
+ ExprKind::Yield(..) => ExprPrecedence::Yield,
+ ExprKind::Err => ExprPrecedence::Err,
+ }
+ }
+
+ // Whether this looks like a place expr, without checking for deref
+ // adjustments.
+ // This will return `true` in some potentially surprising cases such as
+ // `CONSTANT.field`.
+ pub fn is_syntactic_place_expr(&self) -> bool {
+ self.is_place_expr(|_| true)
+ }
+
+ /// Whether this is a place expression.
+ ///
+ /// `allow_projections_from` should return `true` if indexing a field or index expression based
+ /// on the given expression should be considered a place expression.
+ pub fn is_place_expr(&self, mut allow_projections_from: impl FnMut(&Self) -> bool) -> bool {
+ match self.kind {
+ ExprKind::Path(QPath::Resolved(_, ref path)) => {
+ matches!(path.res, Res::Local(..) | Res::Def(DefKind::Static(_), _) | Res::Err)
+ }
+
+ // Type ascription inherits its place expression kind from its
+ // operand. See:
+ // https://github.com/rust-lang/rfcs/blob/master/text/0803-type-ascription.md#type-ascription-and-temporaries
+ ExprKind::Type(ref e, _) => e.is_place_expr(allow_projections_from),
+
+ ExprKind::Unary(UnOp::Deref, _) => true,
+
+ ExprKind::Field(ref base, _) | ExprKind::Index(ref base, _) => {
+ allow_projections_from(base) || base.is_place_expr(allow_projections_from)
+ }
+
+ // Lang item paths cannot currently be local variables or statics.
+ ExprKind::Path(QPath::LangItem(..)) => false,
+
+ // Partially qualified paths in expressions can only legally
+ // refer to associated items which are always rvalues.
+ ExprKind::Path(QPath::TypeRelative(..))
+ | ExprKind::Call(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Struct(..)
+ | ExprKind::Tup(..)
+ | ExprKind::If(..)
+ | ExprKind::Match(..)
+ | ExprKind::Closure { .. }
+ | ExprKind::Block(..)
+ | ExprKind::Repeat(..)
+ | ExprKind::Array(..)
+ | ExprKind::Break(..)
+ | ExprKind::Continue(..)
+ | ExprKind::Ret(..)
+ | ExprKind::Let(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Assign(..)
+ | ExprKind::InlineAsm(..)
+ | ExprKind::AssignOp(..)
+ | ExprKind::Lit(_)
+ | ExprKind::ConstBlock(..)
+ | ExprKind::Unary(..)
+ | ExprKind::Box(..)
+ | ExprKind::AddrOf(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Yield(..)
+ | ExprKind::Cast(..)
+ | ExprKind::DropTemps(..)
+ | ExprKind::Err => false,
+ }
+ }
+
+ /// If `Self.kind` is `ExprKind::DropTemps(expr)`, drill down until we get a non-`DropTemps`
+ /// `Expr`. This is used in suggestions to ignore this `ExprKind` as it is semantically
+ /// silent, only signaling the ownership system. By doing this, suggestions that check the
+ /// `ExprKind` of any given `Expr` for presentation don't have to care about `DropTemps`
+ /// beyond remembering to call this function before doing analysis on it.
+ pub fn peel_drop_temps(&self) -> &Self {
+ let mut expr = self;
+ while let ExprKind::DropTemps(inner) = &expr.kind {
+ expr = inner;
+ }
+ expr
+ }
+
+ pub fn peel_blocks(&self) -> &Self {
+ let mut expr = self;
+ while let ExprKind::Block(Block { expr: Some(inner), .. }, _) = &expr.kind {
+ expr = inner;
+ }
+ expr
+ }
+
+ pub fn can_have_side_effects(&self) -> bool {
+ match self.peel_drop_temps().kind {
+ ExprKind::Path(_) | ExprKind::Lit(_) => false,
+ ExprKind::Type(base, _)
+ | ExprKind::Unary(_, base)
+ | ExprKind::Field(base, _)
+ | ExprKind::Index(base, _)
+ | ExprKind::AddrOf(.., base)
+ | ExprKind::Cast(base, _) => {
+ // This isn't exactly true for `Index` and all `Unary`, but we are using this
+ // method exclusively for diagnostics and there's a *cultural* pressure against
+ // them being used only for its side-effects.
+ base.can_have_side_effects()
+ }
+ ExprKind::Struct(_, fields, init) => fields
+ .iter()
+ .map(|field| field.expr)
+ .chain(init.into_iter())
+ .all(|e| e.can_have_side_effects()),
+
+ ExprKind::Array(args)
+ | ExprKind::Tup(args)
+ | ExprKind::Call(
+ Expr {
+ kind:
+ ExprKind::Path(QPath::Resolved(
+ None,
+ Path { res: Res::Def(DefKind::Ctor(_, CtorKind::Fn), _), .. },
+ )),
+ ..
+ },
+ args,
+ ) => args.iter().all(|arg| arg.can_have_side_effects()),
+ ExprKind::If(..)
+ | ExprKind::Match(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Call(..)
+ | ExprKind::Closure { .. }
+ | ExprKind::Block(..)
+ | ExprKind::Repeat(..)
+ | ExprKind::Break(..)
+ | ExprKind::Continue(..)
+ | ExprKind::Ret(..)
+ | ExprKind::Let(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Assign(..)
+ | ExprKind::InlineAsm(..)
+ | ExprKind::AssignOp(..)
+ | ExprKind::ConstBlock(..)
+ | ExprKind::Box(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Yield(..)
+ | ExprKind::DropTemps(..)
+ | ExprKind::Err => true,
+ }
+ }
+
+ // To a first-order approximation, is this a pattern
+ pub fn is_approximately_pattern(&self) -> bool {
+ match &self.kind {
+ ExprKind::Box(_)
+ | ExprKind::Array(_)
+ | ExprKind::Call(..)
+ | ExprKind::Tup(_)
+ | ExprKind::Lit(_)
+ | ExprKind::Path(_)
+ | ExprKind::Struct(..) => true,
+ _ => false,
+ }
+ }
+
+ pub fn method_ident(&self) -> Option<Ident> {
+ match self.kind {
+ ExprKind::MethodCall(receiver_method, ..) => Some(receiver_method.ident),
+ ExprKind::Unary(_, expr) | ExprKind::AddrOf(.., expr) => expr.method_ident(),
+ _ => None,
+ }
+ }
+}
+
+/// Checks if the specified expression is a built-in range literal.
+/// (See: `LoweringContext::lower_expr()`).
+pub fn is_range_literal(expr: &Expr<'_>) -> bool {
+ match expr.kind {
+ // All built-in range literals but `..=` and `..` desugar to `Struct`s.
+ ExprKind::Struct(ref qpath, _, _) => matches!(
+ **qpath,
+ QPath::LangItem(
+ LangItem::Range
+ | LangItem::RangeTo
+ | LangItem::RangeFrom
+ | LangItem::RangeFull
+ | LangItem::RangeToInclusive,
+ ..
+ )
+ ),
+
+ // `..=` desugars into `::std::ops::RangeInclusive::new(...)`.
+ ExprKind::Call(ref func, _) => {
+ matches!(func.kind, ExprKind::Path(QPath::LangItem(LangItem::RangeInclusiveNew, ..)))
+ }
+
+ _ => false,
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum ExprKind<'hir> {
+ /// A `box x` expression.
+ Box(&'hir Expr<'hir>),
+ /// Allow anonymous constants from an inline `const` block
+ ConstBlock(AnonConst),
+ /// An array (e.g., `[a, b, c, d]`).
+ Array(&'hir [Expr<'hir>]),
+ /// A function call.
+ ///
+ /// The first field resolves to the function itself (usually an `ExprKind::Path`),
+ /// and the second field is the list of arguments.
+ /// This also represents calling the constructor of
+ /// tuple-like ADTs such as tuple structs and enum variants.
+ Call(&'hir Expr<'hir>, &'hir [Expr<'hir>]),
+ /// A method call (e.g., `x.foo::<'static, Bar, Baz>(a, b, c, d)`).
+ ///
+ /// The `PathSegment` represents the method name and its generic arguments
+ /// (within the angle brackets).
+ /// The first element of the `&[Expr]` is the expression that evaluates
+ /// to the object on which the method is being called on (the receiver),
+ /// and the remaining elements are the rest of the arguments.
+ /// Thus, `x.foo::<Bar, Baz>(a, b, c, d)` is represented as
+ /// `ExprKind::MethodCall(PathSegment { foo, [Bar, Baz] }, [x, a, b, c, d], span)`.
+ /// The final `Span` represents the span of the function and arguments
+ /// (e.g. `foo::<Bar, Baz>(a, b, c, d)` in `x.foo::<Bar, Baz>(a, b, c, d)`
+ ///
+ /// To resolve the called method to a `DefId`, call [`type_dependent_def_id`] with
+ /// the `hir_id` of the `MethodCall` node itself.
+ ///
+ /// [`type_dependent_def_id`]: ../../rustc_middle/ty/struct.TypeckResults.html#method.type_dependent_def_id
+ MethodCall(&'hir PathSegment<'hir>, &'hir [Expr<'hir>], Span),
+ /// A tuple (e.g., `(a, b, c, d)`).
+ Tup(&'hir [Expr<'hir>]),
+ /// A binary operation (e.g., `a + b`, `a * b`).
+ Binary(BinOp, &'hir Expr<'hir>, &'hir Expr<'hir>),
+ /// A unary operation (e.g., `!x`, `*x`).
+ Unary(UnOp, &'hir Expr<'hir>),
+ /// A literal (e.g., `1`, `"foo"`).
+ Lit(Lit),
+ /// A cast (e.g., `foo as f64`).
+ Cast(&'hir Expr<'hir>, &'hir Ty<'hir>),
+ /// A type reference (e.g., `Foo`).
+ Type(&'hir Expr<'hir>, &'hir Ty<'hir>),
+ /// Wraps the expression in a terminating scope.
+ /// This makes it semantically equivalent to `{ let _t = expr; _t }`.
+ ///
+ /// This construct only exists to tweak the drop order in HIR lowering.
+ /// An example of that is the desugaring of `for` loops.
+ DropTemps(&'hir Expr<'hir>),
+ /// A `let $pat = $expr` expression.
+ ///
+ /// These are not `Local` and only occur as expressions.
+ /// The `let Some(x) = foo()` in `if let Some(x) = foo()` is an example of `Let(..)`.
+ Let(&'hir Let<'hir>),
+ /// An `if` block, with an optional else block.
+ ///
+ /// I.e., `if <expr> { <expr> } else { <expr> }`.
+ If(&'hir Expr<'hir>, &'hir Expr<'hir>, Option<&'hir Expr<'hir>>),
+ /// A conditionless loop (can be exited with `break`, `continue`, or `return`).
+ ///
+ /// I.e., `'label: loop { <block> }`.
+ ///
+ /// The `Span` is the loop header (`for x in y`/`while let pat = expr`).
+ Loop(&'hir Block<'hir>, Option<Label>, LoopSource, Span),
+ /// A `match` block, with a source that indicates whether or not it is
+ /// the result of a desugaring, and if so, which kind.
+ Match(&'hir Expr<'hir>, &'hir [Arm<'hir>], MatchSource),
+ /// A closure (e.g., `move |a, b, c| {a + b + c}`).
+ ///
+ /// The `Span` is the argument block `|...|`.
+ ///
+ /// This may also be a generator literal or an `async block` as indicated by the
+ /// `Option<Movability>`.
+ Closure(&'hir Closure<'hir>),
+ /// A block (e.g., `'label: { ... }`).
+ Block(&'hir Block<'hir>, Option<Label>),
+
+ /// An assignment (e.g., `a = foo()`).
+ Assign(&'hir Expr<'hir>, &'hir Expr<'hir>, Span),
+ /// An assignment with an operator.
+ ///
+ /// E.g., `a += 1`.
+ AssignOp(BinOp, &'hir Expr<'hir>, &'hir Expr<'hir>),
+ /// Access of a named (e.g., `obj.foo`) or unnamed (e.g., `obj.0`) struct or tuple field.
+ Field(&'hir Expr<'hir>, Ident),
+ /// An indexing operation (`foo[2]`).
+ Index(&'hir Expr<'hir>, &'hir Expr<'hir>),
+
+ /// Path to a definition, possibly containing lifetime or type parameters.
+ Path(QPath<'hir>),
+
+ /// A referencing operation (i.e., `&a` or `&mut a`).
+ AddrOf(BorrowKind, Mutability, &'hir Expr<'hir>),
+ /// A `break`, with an optional label to break.
+ Break(Destination, Option<&'hir Expr<'hir>>),
+ /// A `continue`, with an optional label.
+ Continue(Destination),
+ /// A `return`, with an optional value to be returned.
+ Ret(Option<&'hir Expr<'hir>>),
+
+ /// Inline assembly (from `asm!`), with its outputs and inputs.
+ InlineAsm(&'hir InlineAsm<'hir>),
+
+ /// A struct or struct-like variant literal expression.
+ ///
+ /// E.g., `Foo {x: 1, y: 2}`, or `Foo {x: 1, .. base}`,
+ /// where `base` is the `Option<Expr>`.
+ Struct(&'hir QPath<'hir>, &'hir [ExprField<'hir>], Option<&'hir Expr<'hir>>),
+
+ /// An array literal constructed from one repeated element.
+ ///
+ /// E.g., `[1; 5]`. The first expression is the element
+ /// to be repeated; the second is the number of times to repeat it.
+ Repeat(&'hir Expr<'hir>, ArrayLen),
+
+ /// A suspension point for generators (i.e., `yield <expr>`).
+ Yield(&'hir Expr<'hir>, YieldSource),
+
+ /// A placeholder for an expression that wasn't syntactically well formed in some way.
+ Err,
+}
+
+/// Represents an optionally `Self`-qualified value/type path or associated extension.
+///
+/// To resolve the path to a `DefId`, call [`qpath_res`].
+///
+/// [`qpath_res`]: ../../rustc_middle/ty/struct.TypeckResults.html#method.qpath_res
+#[derive(Debug, HashStable_Generic)]
+pub enum QPath<'hir> {
+ /// Path to a definition, optionally "fully-qualified" with a `Self`
+ /// type, if the path points to an associated item in a trait.
+ ///
+ /// E.g., an unqualified path like `Clone::clone` has `None` for `Self`,
+ /// while `<Vec<T> as Clone>::clone` has `Some(Vec<T>)` for `Self`,
+ /// even though they both have the same two-segment `Clone::clone` `Path`.
+ Resolved(Option<&'hir Ty<'hir>>, &'hir Path<'hir>),
+
+ /// Type-related paths (e.g., `<T>::default` or `<T>::Output`).
+ /// Will be resolved by type-checking to an associated item.
+ ///
+ /// UFCS source paths can desugar into this, with `Vec::new` turning into
+ /// `<Vec>::new`, and `T::X::Y::method` into `<<<T>::X>::Y>::method`,
+ /// the `X` and `Y` nodes each being a `TyKind::Path(QPath::TypeRelative(..))`.
+ TypeRelative(&'hir Ty<'hir>, &'hir PathSegment<'hir>),
+
+ /// Reference to a `#[lang = "foo"]` item. `HirId` of the inner expr.
+ LangItem(LangItem, Span, Option<HirId>),
+}
+
+impl<'hir> QPath<'hir> {
+ /// Returns the span of this `QPath`.
+ pub fn span(&self) -> Span {
+ match *self {
+ QPath::Resolved(_, path) => path.span,
+ QPath::TypeRelative(qself, ps) => qself.span.to(ps.ident.span),
+ QPath::LangItem(_, span, _) => span,
+ }
+ }
+
+ /// Returns the span of the qself of this `QPath`. For example, `()` in
+ /// `<() as Trait>::method`.
+ pub fn qself_span(&self) -> Span {
+ match *self {
+ QPath::Resolved(_, path) => path.span,
+ QPath::TypeRelative(qself, _) => qself.span,
+ QPath::LangItem(_, span, _) => span,
+ }
+ }
+
+ /// Returns the span of the last segment of this `QPath`. For example, `method` in
+ /// `<() as Trait>::method`.
+ pub fn last_segment_span(&self) -> Span {
+ match *self {
+ QPath::Resolved(_, path) => path.segments.last().unwrap().ident.span,
+ QPath::TypeRelative(_, segment) => segment.ident.span,
+ QPath::LangItem(_, span, _) => span,
+ }
+ }
+}
+
+/// Hints at the original code for a let statement.
+#[derive(Copy, Clone, Encodable, Debug, HashStable_Generic)]
+pub enum LocalSource {
+ /// A `match _ { .. }`.
+ Normal,
+ /// When lowering async functions, we create locals within the `async move` so that
+ /// all parameters are dropped after the future is polled.
+ ///
+ /// ```ignore (pseudo-Rust)
+ /// async fn foo(<pattern> @ x: Type) {
+ /// async move {
+ /// let <pattern> = x;
+ /// }
+ /// }
+ /// ```
+ AsyncFn,
+ /// A desugared `<expr>.await`.
+ AwaitDesugar,
+ /// A desugared `expr = expr`, where the LHS is a tuple, struct or array.
+ /// The span is that of the `=` sign.
+ AssignDesugar(Span),
+}
+
+/// Hints at the original code for a `match _ { .. }`.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum MatchSource {
+ /// A `match _ { .. }`.
+ Normal,
+ /// A desugared `for _ in _ { .. }` loop.
+ ForLoopDesugar,
+ /// A desugared `?` operator.
+ TryDesugar,
+ /// A desugared `<expr>.await`.
+ AwaitDesugar,
+}
+
+impl MatchSource {
+ #[inline]
+ pub const fn name(self) -> &'static str {
+ use MatchSource::*;
+ match self {
+ Normal => "match",
+ ForLoopDesugar => "for",
+ TryDesugar => "?",
+ AwaitDesugar => ".await",
+ }
+ }
+}
+
+/// The loop type that yielded an `ExprKind::Loop`.
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum LoopSource {
+ /// A `loop { .. }` loop.
+ Loop,
+ /// A `while _ { .. }` loop.
+ While,
+ /// A `for _ in _ { .. }` loop.
+ ForLoop,
+}
+
+impl LoopSource {
+ pub fn name(self) -> &'static str {
+ match self {
+ LoopSource::Loop => "loop",
+ LoopSource::While => "while",
+ LoopSource::ForLoop => "for",
+ }
+ }
+}
+
+#[derive(Copy, Clone, Encodable, Debug, HashStable_Generic)]
+pub enum LoopIdError {
+ OutsideLoopScope,
+ UnlabeledCfInWhileCondition,
+ UnresolvedLabel,
+}
+
+impl fmt::Display for LoopIdError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match self {
+ LoopIdError::OutsideLoopScope => "not inside loop scope",
+ LoopIdError::UnlabeledCfInWhileCondition => {
+ "unlabeled control flow (break or continue) in while condition"
+ }
+ LoopIdError::UnresolvedLabel => "label not found",
+ })
+ }
+}
+
+#[derive(Copy, Clone, Encodable, Debug, HashStable_Generic)]
+pub struct Destination {
+ // This is `Some(_)` iff there is an explicit user-specified `label
+ pub label: Option<Label>,
+
+ // These errors are caught and then reported during the diagnostics pass in
+ // librustc_passes/loops.rs
+ pub target_id: Result<HirId, LoopIdError>,
+}
+
+/// The yield kind that caused an `ExprKind::Yield`.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Encodable, Decodable, HashStable_Generic)]
+pub enum YieldSource {
+ /// An `<expr>.await`.
+ Await { expr: Option<HirId> },
+ /// A plain `yield`.
+ Yield,
+}
+
+impl YieldSource {
+ pub fn is_await(&self) -> bool {
+ matches!(self, YieldSource::Await { .. })
+ }
+}
+
+impl fmt::Display for YieldSource {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match self {
+ YieldSource::Await { .. } => "`await`",
+ YieldSource::Yield => "`yield`",
+ })
+ }
+}
+
+impl From<GeneratorKind> for YieldSource {
+ fn from(kind: GeneratorKind) -> Self {
+ match kind {
+ // Guess based on the kind of the current generator.
+ GeneratorKind::Gen => Self::Yield,
+ GeneratorKind::Async(_) => Self::Await { expr: None },
+ }
+ }
+}
+
+// N.B., if you change this, you'll probably want to change the corresponding
+// type structure in middle/ty.rs as well.
+#[derive(Debug, HashStable_Generic)]
+pub struct MutTy<'hir> {
+ pub ty: &'hir Ty<'hir>,
+ pub mutbl: Mutability,
+}
+
+/// Represents a function's signature in a trait declaration,
+/// trait implementation, or a free function.
+#[derive(Debug, HashStable_Generic)]
+pub struct FnSig<'hir> {
+ pub header: FnHeader,
+ pub decl: &'hir FnDecl<'hir>,
+ pub span: Span,
+}
+
+// The bodies for items are stored "out of line", in a separate
+// hashmap in the `Crate`. Here we just record the hir-id of the item
+// so it can fetched later.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct TraitItemId {
+ pub def_id: LocalDefId,
+}
+
+impl TraitItemId {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+}
+
+/// Represents an item declaration within a trait declaration,
+/// possibly including a default implementation. A trait item is
+/// either required (meaning it doesn't have an implementation, just a
+/// signature) or provided (meaning it has a default implementation).
+#[derive(Debug, HashStable_Generic)]
+pub struct TraitItem<'hir> {
+ pub ident: Ident,
+ pub def_id: LocalDefId,
+ pub generics: &'hir Generics<'hir>,
+ pub kind: TraitItemKind<'hir>,
+ pub span: Span,
+ pub defaultness: Defaultness,
+}
+
+impl TraitItem<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+
+ pub fn trait_item_id(&self) -> TraitItemId {
+ TraitItemId { def_id: self.def_id }
+ }
+}
+
+/// Represents a trait method's body (or just argument names).
+#[derive(Encodable, Debug, HashStable_Generic)]
+pub enum TraitFn<'hir> {
+ /// No default body in the trait, just a signature.
+ Required(&'hir [Ident]),
+
+ /// Both signature and body are provided in the trait.
+ Provided(BodyId),
+}
+
+/// Represents a trait method or associated constant or type
+#[derive(Debug, HashStable_Generic)]
+pub enum TraitItemKind<'hir> {
+ /// An associated constant with an optional value (otherwise `impl`s must contain a value).
+ Const(&'hir Ty<'hir>, Option<BodyId>),
+ /// An associated function with an optional body.
+ Fn(FnSig<'hir>, TraitFn<'hir>),
+ /// An associated type with (possibly empty) bounds and optional concrete
+ /// type.
+ Type(GenericBounds<'hir>, Option<&'hir Ty<'hir>>),
+}
+
+// The bodies for items are stored "out of line", in a separate
+// hashmap in the `Crate`. Here we just record the hir-id of the item
+// so it can fetched later.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct ImplItemId {
+ pub def_id: LocalDefId,
+}
+
+impl ImplItemId {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+}
+
+/// Represents anything within an `impl` block.
+#[derive(Debug, HashStable_Generic)]
+pub struct ImplItem<'hir> {
+ pub ident: Ident,
+ pub def_id: LocalDefId,
+ pub generics: &'hir Generics<'hir>,
+ pub kind: ImplItemKind<'hir>,
+ pub defaultness: Defaultness,
+ pub span: Span,
+ pub vis_span: Span,
+}
+
+impl ImplItem<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+
+ pub fn impl_item_id(&self) -> ImplItemId {
+ ImplItemId { def_id: self.def_id }
+ }
+}
+
+/// Represents various kinds of content within an `impl`.
+#[derive(Debug, HashStable_Generic)]
+pub enum ImplItemKind<'hir> {
+ /// An associated constant of the given type, set to the constant result
+ /// of the expression.
+ Const(&'hir Ty<'hir>, BodyId),
+ /// An associated function implementation with the given signature and body.
+ Fn(FnSig<'hir>, BodyId),
+ /// An associated type.
+ TyAlias(&'hir Ty<'hir>),
+}
+
+// The name of the associated type for `Fn` return types.
+pub const FN_OUTPUT_NAME: Symbol = sym::Output;
+
+/// Bind a type to an associated type (i.e., `A = Foo`).
+///
+/// Bindings like `A: Debug` are represented as a special type `A =
+/// $::Debug` that is understood by the astconv code.
+///
+/// FIXME(alexreg): why have a separate type for the binding case,
+/// wouldn't it be better to make the `ty` field an enum like the
+/// following?
+///
+/// ```ignore (pseudo-rust)
+/// enum TypeBindingKind {
+/// Equals(...),
+/// Binding(...),
+/// }
+/// ```
+#[derive(Debug, HashStable_Generic)]
+pub struct TypeBinding<'hir> {
+ pub hir_id: HirId,
+ pub ident: Ident,
+ pub gen_args: &'hir GenericArgs<'hir>,
+ pub kind: TypeBindingKind<'hir>,
+ pub span: Span,
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum Term<'hir> {
+ Ty(&'hir Ty<'hir>),
+ Const(AnonConst),
+}
+
+impl<'hir> From<&'hir Ty<'hir>> for Term<'hir> {
+ fn from(ty: &'hir Ty<'hir>) -> Self {
+ Term::Ty(ty)
+ }
+}
+
+impl<'hir> From<AnonConst> for Term<'hir> {
+ fn from(c: AnonConst) -> Self {
+ Term::Const(c)
+ }
+}
+
+// Represents the two kinds of type bindings.
+#[derive(Debug, HashStable_Generic)]
+pub enum TypeBindingKind<'hir> {
+ /// E.g., `Foo<Bar: Send>`.
+ Constraint { bounds: &'hir [GenericBound<'hir>] },
+ /// E.g., `Foo<Bar = ()>`, `Foo<Bar = ()>`
+ Equality { term: Term<'hir> },
+}
+
+impl TypeBinding<'_> {
+ pub fn ty(&self) -> &Ty<'_> {
+ match self.kind {
+ TypeBindingKind::Equality { term: Term::Ty(ref ty) } => ty,
+ _ => panic!("expected equality type binding for parenthesized generic args"),
+ }
+ }
+ pub fn opt_const(&self) -> Option<&'_ AnonConst> {
+ match self.kind {
+ TypeBindingKind::Equality { term: Term::Const(ref c) } => Some(c),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Ty<'hir> {
+ pub hir_id: HirId,
+ pub kind: TyKind<'hir>,
+ pub span: Span,
+}
+
+impl<'hir> Ty<'hir> {
+ /// Returns `true` if `param_def_id` matches the `bounded_ty` of this predicate.
+ pub fn as_generic_param(&self) -> Option<(DefId, Ident)> {
+ let TyKind::Path(QPath::Resolved(None, path)) = self.kind else {
+ return None;
+ };
+ let [segment] = &path.segments else {
+ return None;
+ };
+ match path.res {
+ Res::Def(DefKind::TyParam, def_id)
+ | Res::SelfTy { trait_: Some(def_id), alias_to: None } => Some((def_id, segment.ident)),
+ _ => None,
+ }
+ }
+}
+
+/// Not represented directly in the AST; referred to by name through a `ty_path`.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum PrimTy {
+ Int(IntTy),
+ Uint(UintTy),
+ Float(FloatTy),
+ Str,
+ Bool,
+ Char,
+}
+
+impl PrimTy {
+ /// All of the primitive types
+ pub const ALL: [Self; 17] = [
+ // any changes here should also be reflected in `PrimTy::from_name`
+ Self::Int(IntTy::I8),
+ Self::Int(IntTy::I16),
+ Self::Int(IntTy::I32),
+ Self::Int(IntTy::I64),
+ Self::Int(IntTy::I128),
+ Self::Int(IntTy::Isize),
+ Self::Uint(UintTy::U8),
+ Self::Uint(UintTy::U16),
+ Self::Uint(UintTy::U32),
+ Self::Uint(UintTy::U64),
+ Self::Uint(UintTy::U128),
+ Self::Uint(UintTy::Usize),
+ Self::Float(FloatTy::F32),
+ Self::Float(FloatTy::F64),
+ Self::Bool,
+ Self::Char,
+ Self::Str,
+ ];
+
+ /// Like [`PrimTy::name`], but returns a &str instead of a symbol.
+ ///
+ /// Used by clippy.
+ pub fn name_str(self) -> &'static str {
+ match self {
+ PrimTy::Int(i) => i.name_str(),
+ PrimTy::Uint(u) => u.name_str(),
+ PrimTy::Float(f) => f.name_str(),
+ PrimTy::Str => "str",
+ PrimTy::Bool => "bool",
+ PrimTy::Char => "char",
+ }
+ }
+
+ pub fn name(self) -> Symbol {
+ match self {
+ PrimTy::Int(i) => i.name(),
+ PrimTy::Uint(u) => u.name(),
+ PrimTy::Float(f) => f.name(),
+ PrimTy::Str => sym::str,
+ PrimTy::Bool => sym::bool,
+ PrimTy::Char => sym::char,
+ }
+ }
+
+ /// Returns the matching `PrimTy` for a `Symbol` such as "str" or "i32".
+ /// Returns `None` if no matching type is found.
+ pub fn from_name(name: Symbol) -> Option<Self> {
+ let ty = match name {
+ // any changes here should also be reflected in `PrimTy::ALL`
+ sym::i8 => Self::Int(IntTy::I8),
+ sym::i16 => Self::Int(IntTy::I16),
+ sym::i32 => Self::Int(IntTy::I32),
+ sym::i64 => Self::Int(IntTy::I64),
+ sym::i128 => Self::Int(IntTy::I128),
+ sym::isize => Self::Int(IntTy::Isize),
+ sym::u8 => Self::Uint(UintTy::U8),
+ sym::u16 => Self::Uint(UintTy::U16),
+ sym::u32 => Self::Uint(UintTy::U32),
+ sym::u64 => Self::Uint(UintTy::U64),
+ sym::u128 => Self::Uint(UintTy::U128),
+ sym::usize => Self::Uint(UintTy::Usize),
+ sym::f32 => Self::Float(FloatTy::F32),
+ sym::f64 => Self::Float(FloatTy::F64),
+ sym::bool => Self::Bool,
+ sym::char => Self::Char,
+ sym::str => Self::Str,
+ _ => return None,
+ };
+ Some(ty)
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct BareFnTy<'hir> {
+ pub unsafety: Unsafety,
+ pub abi: Abi,
+ pub generic_params: &'hir [GenericParam<'hir>],
+ pub decl: &'hir FnDecl<'hir>,
+ pub param_names: &'hir [Ident],
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct OpaqueTy<'hir> {
+ pub generics: &'hir Generics<'hir>,
+ pub bounds: GenericBounds<'hir>,
+ pub origin: OpaqueTyOrigin,
+}
+
+/// From whence the opaque type came.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum OpaqueTyOrigin {
+ /// `-> impl Trait`
+ FnReturn(LocalDefId),
+ /// `async fn`
+ AsyncFn(LocalDefId),
+ /// type aliases: `type Foo = impl Trait;`
+ TyAlias,
+}
+
+/// The various kinds of types recognized by the compiler.
+#[derive(Debug, HashStable_Generic)]
+pub enum TyKind<'hir> {
+ /// A variable length slice (i.e., `[T]`).
+ Slice(&'hir Ty<'hir>),
+ /// A fixed length array (i.e., `[T; n]`).
+ Array(&'hir Ty<'hir>, ArrayLen),
+ /// A raw pointer (i.e., `*const T` or `*mut T`).
+ Ptr(MutTy<'hir>),
+ /// A reference (i.e., `&'a T` or `&'a mut T`).
+ Rptr(Lifetime, MutTy<'hir>),
+ /// A bare function (e.g., `fn(usize) -> bool`).
+ BareFn(&'hir BareFnTy<'hir>),
+ /// The never type (`!`).
+ Never,
+ /// A tuple (`(A, B, C, D, ...)`).
+ Tup(&'hir [Ty<'hir>]),
+ /// A path to a type definition (`module::module::...::Type`), or an
+ /// associated type (e.g., `<Vec<T> as Trait>::Type` or `<T>::Target`).
+ ///
+ /// Type parameters may be stored in each `PathSegment`.
+ Path(QPath<'hir>),
+ /// An opaque type definition itself. This is only used for `impl Trait`.
+ ///
+ /// The generic argument list contains the lifetimes (and in the future
+ /// possibly parameters) that are actually bound on the `impl Trait`.
+ OpaqueDef(ItemId, &'hir [GenericArg<'hir>]),
+ /// A trait object type `Bound1 + Bound2 + Bound3`
+ /// where `Bound` is a trait or a lifetime.
+ TraitObject(&'hir [PolyTraitRef<'hir>], Lifetime, TraitObjectSyntax),
+ /// Unused for now.
+ Typeof(AnonConst),
+ /// `TyKind::Infer` means the type should be inferred instead of it having been
+ /// specified. This can appear anywhere in a type.
+ Infer,
+ /// Placeholder for a type that has failed to be defined.
+ Err,
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum InlineAsmOperand<'hir> {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ expr: Expr<'hir>,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ expr: Option<Expr<'hir>>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ expr: Expr<'hir>,
+ },
+ SplitInOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_expr: Expr<'hir>,
+ out_expr: Option<Expr<'hir>>,
+ },
+ Const {
+ anon_const: AnonConst,
+ },
+ SymFn {
+ anon_const: AnonConst,
+ },
+ SymStatic {
+ path: QPath<'hir>,
+ def_id: DefId,
+ },
+}
+
+impl<'hir> InlineAsmOperand<'hir> {
+ pub fn reg(&self) -> Option<InlineAsmRegOrRegClass> {
+ match *self {
+ Self::In { reg, .. }
+ | Self::Out { reg, .. }
+ | Self::InOut { reg, .. }
+ | Self::SplitInOut { reg, .. } => Some(reg),
+ Self::Const { .. } | Self::SymFn { .. } | Self::SymStatic { .. } => None,
+ }
+ }
+
+ pub fn is_clobber(&self) -> bool {
+ matches!(
+ self,
+ InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(_), late: _, expr: None }
+ )
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct InlineAsm<'hir> {
+ pub template: &'hir [InlineAsmTemplatePiece],
+ pub template_strs: &'hir [(Symbol, Option<Symbol>, Span)],
+ pub operands: &'hir [(InlineAsmOperand<'hir>, Span)],
+ pub options: InlineAsmOptions,
+ pub line_spans: &'hir [Span],
+}
+
+/// Represents a parameter in a function header.
+#[derive(Debug, HashStable_Generic)]
+pub struct Param<'hir> {
+ pub hir_id: HirId,
+ pub pat: &'hir Pat<'hir>,
+ pub ty_span: Span,
+ pub span: Span,
+}
+
+/// Represents the header (not the body) of a function declaration.
+#[derive(Debug, HashStable_Generic)]
+pub struct FnDecl<'hir> {
+ /// The types of the function's parameters.
+ ///
+ /// Additional argument data is stored in the function's [body](Body::params).
+ pub inputs: &'hir [Ty<'hir>],
+ pub output: FnRetTy<'hir>,
+ pub c_variadic: bool,
+ /// Does the function have an implicit self?
+ pub implicit_self: ImplicitSelfKind,
+}
+
+/// Represents what type of implicit self a function has, if any.
+#[derive(Copy, Clone, Encodable, Decodable, Debug, HashStable_Generic)]
+pub enum ImplicitSelfKind {
+ /// Represents a `fn x(self);`.
+ Imm,
+ /// Represents a `fn x(mut self);`.
+ Mut,
+ /// Represents a `fn x(&self);`.
+ ImmRef,
+ /// Represents a `fn x(&mut self);`.
+ MutRef,
+ /// Represents when a function does not have a self argument or
+ /// when a function has a `self: X` argument.
+ None,
+}
+
+impl ImplicitSelfKind {
+ /// Does this represent an implicit self?
+ pub fn has_implicit_self(&self) -> bool {
+ !matches!(*self, ImplicitSelfKind::None)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Encodable, Decodable, Debug)]
+#[derive(HashStable_Generic)]
+pub enum IsAsync {
+ Async,
+ NotAsync,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable, HashStable_Generic)]
+pub enum Defaultness {
+ Default { has_value: bool },
+ Final,
+}
+
+impl Defaultness {
+ pub fn has_value(&self) -> bool {
+ match *self {
+ Defaultness::Default { has_value } => has_value,
+ Defaultness::Final => true,
+ }
+ }
+
+ pub fn is_final(&self) -> bool {
+ *self == Defaultness::Final
+ }
+
+ pub fn is_default(&self) -> bool {
+ matches!(*self, Defaultness::Default { .. })
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum FnRetTy<'hir> {
+ /// Return type is not specified.
+ ///
+ /// Functions default to `()` and
+ /// closures default to inference. Span points to where return
+ /// type would be inserted.
+ DefaultReturn(Span),
+ /// Everything else.
+ Return(&'hir Ty<'hir>),
+}
+
+impl FnRetTy<'_> {
+ #[inline]
+ pub fn span(&self) -> Span {
+ match *self {
+ Self::DefaultReturn(span) => span,
+ Self::Return(ref ty) => ty.span,
+ }
+ }
+}
+
+/// Represents `for<...>` binder before a closure
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub enum ClosureBinder {
+ /// Binder is not specified.
+ Default,
+ /// Binder is specified.
+ ///
+ /// Span points to the whole `for<...>`.
+ For { span: Span },
+}
+
+#[derive(Encodable, Debug, HashStable_Generic)]
+pub struct Mod<'hir> {
+ pub spans: ModSpans,
+ pub item_ids: &'hir [ItemId],
+}
+
+#[derive(Copy, Clone, Debug, HashStable_Generic, Encodable)]
+pub struct ModSpans {
+ /// A span from the first token past `{` to the last token until `}`.
+ /// For `mod foo;`, the inner span ranges from the first token
+ /// to the last token in the external file.
+ pub inner_span: Span,
+ pub inject_use_span: Span,
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct EnumDef<'hir> {
+ pub variants: &'hir [Variant<'hir>],
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct Variant<'hir> {
+ /// Name of the variant.
+ pub ident: Ident,
+ /// Id of the variant (not the constructor, see `VariantData::ctor_hir_id()`).
+ pub id: HirId,
+ /// Fields and constructor id of the variant.
+ pub data: VariantData<'hir>,
+ /// Explicit discriminant (e.g., `Foo = 1`).
+ pub disr_expr: Option<AnonConst>,
+ /// Span
+ pub span: Span,
+}
+
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum UseKind {
+ /// One import, e.g., `use foo::bar` or `use foo::bar as baz`.
+ /// Also produced for each element of a list `use`, e.g.
+ /// `use foo::{a, b}` lowers to `use foo::a; use foo::b;`.
+ Single,
+
+ /// Glob import, e.g., `use foo::*`.
+ Glob,
+
+ /// Degenerate list import, e.g., `use foo::{a, b}` produces
+ /// an additional `use foo::{}` for performing checks such as
+ /// unstable feature gating. May be removed in the future.
+ ListStem,
+}
+
+/// References to traits in impls.
+///
+/// `resolve` maps each `TraitRef`'s `ref_id` to its defining trait; that's all
+/// that the `ref_id` is for. Note that `ref_id`'s value is not the `HirId` of the
+/// trait being referred to but just a unique `HirId` that serves as a key
+/// within the resolution map.
+#[derive(Clone, Debug, HashStable_Generic)]
+pub struct TraitRef<'hir> {
+ pub path: &'hir Path<'hir>,
+ // Don't hash the `ref_id`. It is tracked via the thing it is used to access.
+ #[stable_hasher(ignore)]
+ pub hir_ref_id: HirId,
+}
+
+impl TraitRef<'_> {
+ /// Gets the `DefId` of the referenced trait. It _must_ actually be a trait or trait alias.
+ pub fn trait_def_id(&self) -> Option<DefId> {
+ match self.path.res {
+ Res::Def(DefKind::Trait | DefKind::TraitAlias, did) => Some(did),
+ Res::Err => None,
+ _ => unreachable!(),
+ }
+ }
+}
+
+#[derive(Clone, Debug, HashStable_Generic)]
+pub struct PolyTraitRef<'hir> {
+ /// The `'a` in `for<'a> Foo<&'a T>`.
+ pub bound_generic_params: &'hir [GenericParam<'hir>],
+
+ /// The `Foo<&'a T>` in `for<'a> Foo<&'a T>`.
+ pub trait_ref: TraitRef<'hir>,
+
+ pub span: Span,
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct FieldDef<'hir> {
+ pub span: Span,
+ pub vis_span: Span,
+ pub ident: Ident,
+ pub hir_id: HirId,
+ pub ty: &'hir Ty<'hir>,
+}
+
+impl FieldDef<'_> {
+ // Still necessary in couple of places
+ pub fn is_positional(&self) -> bool {
+ let first = self.ident.as_str().as_bytes()[0];
+ (b'0'..=b'9').contains(&first)
+ }
+}
+
+/// Fields and constructor IDs of enum variants and structs.
+#[derive(Debug, HashStable_Generic)]
+pub enum VariantData<'hir> {
+ /// A struct variant.
+ ///
+ /// E.g., `Bar { .. }` as in `enum Foo { Bar { .. } }`.
+ Struct(&'hir [FieldDef<'hir>], /* recovered */ bool),
+ /// A tuple variant.
+ ///
+ /// E.g., `Bar(..)` as in `enum Foo { Bar(..) }`.
+ Tuple(&'hir [FieldDef<'hir>], HirId),
+ /// A unit variant.
+ ///
+ /// E.g., `Bar = ..` as in `enum Foo { Bar = .. }`.
+ Unit(HirId),
+}
+
+impl<'hir> VariantData<'hir> {
+ /// Return the fields of this variant.
+ pub fn fields(&self) -> &'hir [FieldDef<'hir>] {
+ match *self {
+ VariantData::Struct(ref fields, ..) | VariantData::Tuple(ref fields, ..) => fields,
+ _ => &[],
+ }
+ }
+
+ /// Return the `HirId` of this variant's constructor, if it has one.
+ pub fn ctor_hir_id(&self) -> Option<HirId> {
+ match *self {
+ VariantData::Struct(_, _) => None,
+ VariantData::Tuple(_, hir_id) | VariantData::Unit(hir_id) => Some(hir_id),
+ }
+ }
+}
+
+// The bodies for items are stored "out of line", in a separate
+// hashmap in the `Crate`. Here we just record the hir-id of the item
+// so it can fetched later.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, Hash, HashStable_Generic)]
+pub struct ItemId {
+ pub def_id: LocalDefId,
+}
+
+impl ItemId {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+}
+
+/// An item
+///
+/// The name might be a dummy name in case of anonymous items
+#[derive(Debug, HashStable_Generic)]
+pub struct Item<'hir> {
+ pub ident: Ident,
+ pub def_id: LocalDefId,
+ pub kind: ItemKind<'hir>,
+ pub span: Span,
+ pub vis_span: Span,
+}
+
+impl Item<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+
+ pub fn item_id(&self) -> ItemId {
+ ItemId { def_id: self.def_id }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub enum Unsafety {
+ Unsafe,
+ Normal,
+}
+
+impl Unsafety {
+ pub fn prefix_str(&self) -> &'static str {
+ match self {
+ Self::Unsafe => "unsafe ",
+ Self::Normal => "",
+ }
+ }
+}
+
+impl fmt::Display for Unsafety {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match *self {
+ Self::Unsafe => "unsafe",
+ Self::Normal => "normal",
+ })
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+pub enum Constness {
+ Const,
+ NotConst,
+}
+
+impl fmt::Display for Constness {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match *self {
+ Self::Const => "const",
+ Self::NotConst => "non-const",
+ })
+ }
+}
+
+#[derive(Copy, Clone, Encodable, Debug, HashStable_Generic)]
+pub struct FnHeader {
+ pub unsafety: Unsafety,
+ pub constness: Constness,
+ pub asyncness: IsAsync,
+ pub abi: Abi,
+}
+
+impl FnHeader {
+ pub fn is_async(&self) -> bool {
+ matches!(&self.asyncness, IsAsync::Async)
+ }
+
+ pub fn is_const(&self) -> bool {
+ matches!(&self.constness, Constness::Const)
+ }
+
+ pub fn is_unsafe(&self) -> bool {
+ matches!(&self.unsafety, Unsafety::Unsafe)
+ }
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub enum ItemKind<'hir> {
+ /// An `extern crate` item, with optional *original* crate name if the crate was renamed.
+ ///
+ /// E.g., `extern crate foo` or `extern crate foo_bar as foo`.
+ ExternCrate(Option<Symbol>),
+
+ /// `use foo::bar::*;` or `use foo::bar::baz as quux;`
+ ///
+ /// or just
+ ///
+ /// `use foo::bar::baz;` (with `as baz` implicitly on the right).
+ Use(&'hir Path<'hir>, UseKind),
+
+ /// A `static` item.
+ Static(&'hir Ty<'hir>, Mutability, BodyId),
+ /// A `const` item.
+ Const(&'hir Ty<'hir>, BodyId),
+ /// A function declaration.
+ Fn(FnSig<'hir>, &'hir Generics<'hir>, BodyId),
+ /// A MBE macro definition (`macro_rules!` or `macro`).
+ Macro(ast::MacroDef, MacroKind),
+ /// A module.
+ Mod(Mod<'hir>),
+ /// An external module, e.g. `extern { .. }`.
+ ForeignMod { abi: Abi, items: &'hir [ForeignItemRef] },
+ /// Module-level inline assembly (from `global_asm!`).
+ GlobalAsm(&'hir InlineAsm<'hir>),
+ /// A type alias, e.g., `type Foo = Bar<u8>`.
+ TyAlias(&'hir Ty<'hir>, &'hir Generics<'hir>),
+ /// An opaque `impl Trait` type alias, e.g., `type Foo = impl Bar;`.
+ OpaqueTy(OpaqueTy<'hir>),
+ /// An enum definition, e.g., `enum Foo<A, B> {C<A>, D<B>}`.
+ Enum(EnumDef<'hir>, &'hir Generics<'hir>),
+ /// A struct definition, e.g., `struct Foo<A> {x: A}`.
+ Struct(VariantData<'hir>, &'hir Generics<'hir>),
+ /// A union definition, e.g., `union Foo<A, B> {x: A, y: B}`.
+ Union(VariantData<'hir>, &'hir Generics<'hir>),
+ /// A trait definition.
+ Trait(IsAuto, Unsafety, &'hir Generics<'hir>, GenericBounds<'hir>, &'hir [TraitItemRef]),
+ /// A trait alias.
+ TraitAlias(&'hir Generics<'hir>, GenericBounds<'hir>),
+
+ /// An implementation, e.g., `impl<A> Trait for Foo { .. }`.
+ Impl(&'hir Impl<'hir>),
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct Impl<'hir> {
+ pub unsafety: Unsafety,
+ pub polarity: ImplPolarity,
+ pub defaultness: Defaultness,
+ // We do not put a `Span` in `Defaultness` because it breaks foreign crate metadata
+ // decoding as `Span`s cannot be decoded when a `Session` is not available.
+ pub defaultness_span: Option<Span>,
+ pub constness: Constness,
+ pub generics: &'hir Generics<'hir>,
+
+ /// The trait being implemented, if any.
+ pub of_trait: Option<TraitRef<'hir>>,
+
+ pub self_ty: &'hir Ty<'hir>,
+ pub items: &'hir [ImplItemRef],
+}
+
+impl ItemKind<'_> {
+ pub fn generics(&self) -> Option<&Generics<'_>> {
+ Some(match *self {
+ ItemKind::Fn(_, ref generics, _)
+ | ItemKind::TyAlias(_, ref generics)
+ | ItemKind::OpaqueTy(OpaqueTy { ref generics, .. })
+ | ItemKind::Enum(_, ref generics)
+ | ItemKind::Struct(_, ref generics)
+ | ItemKind::Union(_, ref generics)
+ | ItemKind::Trait(_, _, ref generics, _, _)
+ | ItemKind::TraitAlias(ref generics, _)
+ | ItemKind::Impl(Impl { ref generics, .. }) => generics,
+ _ => return None,
+ })
+ }
+
+ pub fn descr(&self) -> &'static str {
+ match self {
+ ItemKind::ExternCrate(..) => "extern crate",
+ ItemKind::Use(..) => "`use` import",
+ ItemKind::Static(..) => "static item",
+ ItemKind::Const(..) => "constant item",
+ ItemKind::Fn(..) => "function",
+ ItemKind::Macro(..) => "macro",
+ ItemKind::Mod(..) => "module",
+ ItemKind::ForeignMod { .. } => "extern block",
+ ItemKind::GlobalAsm(..) => "global asm item",
+ ItemKind::TyAlias(..) => "type alias",
+ ItemKind::OpaqueTy(..) => "opaque type",
+ ItemKind::Enum(..) => "enum",
+ ItemKind::Struct(..) => "struct",
+ ItemKind::Union(..) => "union",
+ ItemKind::Trait(..) => "trait",
+ ItemKind::TraitAlias(..) => "trait alias",
+ ItemKind::Impl(..) => "implementation",
+ }
+ }
+}
+
+/// A reference from an trait to one of its associated items. This
+/// contains the item's id, naturally, but also the item's name and
+/// some other high-level details (like whether it is an associated
+/// type or method, and whether it is public). This allows other
+/// passes to find the impl they want without loading the ID (which
+/// means fewer edges in the incremental compilation graph).
+#[derive(Encodable, Debug, HashStable_Generic)]
+pub struct TraitItemRef {
+ pub id: TraitItemId,
+ pub ident: Ident,
+ pub kind: AssocItemKind,
+ pub span: Span,
+}
+
+/// A reference from an impl to one of its associated items. This
+/// contains the item's ID, naturally, but also the item's name and
+/// some other high-level details (like whether it is an associated
+/// type or method, and whether it is public). This allows other
+/// passes to find the impl they want without loading the ID (which
+/// means fewer edges in the incremental compilation graph).
+#[derive(Debug, HashStable_Generic)]
+pub struct ImplItemRef {
+ pub id: ImplItemId,
+ pub ident: Ident,
+ pub kind: AssocItemKind,
+ pub span: Span,
+ /// When we are in a trait impl, link to the trait-item's id.
+ pub trait_item_def_id: Option<DefId>,
+}
+
+#[derive(Copy, Clone, PartialEq, Encodable, Debug, HashStable_Generic)]
+pub enum AssocItemKind {
+ Const,
+ Fn { has_self: bool },
+ Type,
+}
+
+// The bodies for items are stored "out of line", in a separate
+// hashmap in the `Crate`. Here we just record the hir-id of the item
+// so it can fetched later.
+#[derive(Copy, Clone, PartialEq, Eq, Encodable, Decodable, Debug, HashStable_Generic)]
+pub struct ForeignItemId {
+ pub def_id: LocalDefId,
+}
+
+impl ForeignItemId {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+}
+
+/// A reference from a foreign block to one of its items. This
+/// contains the item's ID, naturally, but also the item's name and
+/// some other high-level details (like whether it is an associated
+/// type or method, and whether it is public). This allows other
+/// passes to find the impl they want without loading the ID (which
+/// means fewer edges in the incremental compilation graph).
+#[derive(Debug, HashStable_Generic)]
+pub struct ForeignItemRef {
+ pub id: ForeignItemId,
+ pub ident: Ident,
+ pub span: Span,
+}
+
+#[derive(Debug, HashStable_Generic)]
+pub struct ForeignItem<'hir> {
+ pub ident: Ident,
+ pub kind: ForeignItemKind<'hir>,
+ pub def_id: LocalDefId,
+ pub span: Span,
+ pub vis_span: Span,
+}
+
+impl ForeignItem<'_> {
+ #[inline]
+ pub fn hir_id(&self) -> HirId {
+ // Items are always HIR owners.
+ HirId::make_owner(self.def_id)
+ }
+
+ pub fn foreign_item_id(&self) -> ForeignItemId {
+ ForeignItemId { def_id: self.def_id }
+ }
+}
+
+/// An item within an `extern` block.
+#[derive(Debug, HashStable_Generic)]
+pub enum ForeignItemKind<'hir> {
+ /// A foreign function.
+ Fn(&'hir FnDecl<'hir>, &'hir [Ident], &'hir Generics<'hir>),
+ /// A foreign static item (`static ext: u8`).
+ Static(&'hir Ty<'hir>, Mutability),
+ /// A foreign type.
+ Type,
+}
+
+/// A variable captured by a closure.
+#[derive(Debug, Copy, Clone, Encodable, HashStable_Generic)]
+pub struct Upvar {
+ // First span where it is accessed (there can be multiple).
+ pub span: Span,
+}
+
+// The TraitCandidate's import_ids is empty if the trait is defined in the same module, and
+// has length > 0 if the trait is found through an chain of imports, starting with the
+// import/use statement in the scope where the trait is used.
+#[derive(Encodable, Decodable, Clone, Debug, HashStable_Generic)]
+pub struct TraitCandidate {
+ pub def_id: DefId,
+ pub import_ids: SmallVec<[LocalDefId; 1]>,
+}
+
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub enum OwnerNode<'hir> {
+ Item(&'hir Item<'hir>),
+ ForeignItem(&'hir ForeignItem<'hir>),
+ TraitItem(&'hir TraitItem<'hir>),
+ ImplItem(&'hir ImplItem<'hir>),
+ Crate(&'hir Mod<'hir>),
+}
+
+impl<'hir> OwnerNode<'hir> {
+ pub fn ident(&self) -> Option<Ident> {
+ match self {
+ OwnerNode::Item(Item { ident, .. })
+ | OwnerNode::ForeignItem(ForeignItem { ident, .. })
+ | OwnerNode::ImplItem(ImplItem { ident, .. })
+ | OwnerNode::TraitItem(TraitItem { ident, .. }) => Some(*ident),
+ OwnerNode::Crate(..) => None,
+ }
+ }
+
+ pub fn span(&self) -> Span {
+ match self {
+ OwnerNode::Item(Item { span, .. })
+ | OwnerNode::ForeignItem(ForeignItem { span, .. })
+ | OwnerNode::ImplItem(ImplItem { span, .. })
+ | OwnerNode::TraitItem(TraitItem { span, .. }) => *span,
+ OwnerNode::Crate(Mod { spans: ModSpans { inner_span, .. }, .. }) => *inner_span,
+ }
+ }
+
+ pub fn fn_decl(&self) -> Option<&FnDecl<'hir>> {
+ match self {
+ OwnerNode::TraitItem(TraitItem { kind: TraitItemKind::Fn(fn_sig, _), .. })
+ | OwnerNode::ImplItem(ImplItem { kind: ImplItemKind::Fn(fn_sig, _), .. })
+ | OwnerNode::Item(Item { kind: ItemKind::Fn(fn_sig, _, _), .. }) => Some(fn_sig.decl),
+ OwnerNode::ForeignItem(ForeignItem {
+ kind: ForeignItemKind::Fn(fn_decl, _, _),
+ ..
+ }) => Some(fn_decl),
+ _ => None,
+ }
+ }
+
+ pub fn body_id(&self) -> Option<BodyId> {
+ match self {
+ OwnerNode::TraitItem(TraitItem {
+ kind: TraitItemKind::Fn(_, TraitFn::Provided(body_id)),
+ ..
+ })
+ | OwnerNode::ImplItem(ImplItem { kind: ImplItemKind::Fn(_, body_id), .. })
+ | OwnerNode::Item(Item { kind: ItemKind::Fn(.., body_id), .. }) => Some(*body_id),
+ _ => None,
+ }
+ }
+
+ pub fn generics(self) -> Option<&'hir Generics<'hir>> {
+ Node::generics(self.into())
+ }
+
+ pub fn def_id(self) -> LocalDefId {
+ match self {
+ OwnerNode::Item(Item { def_id, .. })
+ | OwnerNode::TraitItem(TraitItem { def_id, .. })
+ | OwnerNode::ImplItem(ImplItem { def_id, .. })
+ | OwnerNode::ForeignItem(ForeignItem { def_id, .. }) => *def_id,
+ OwnerNode::Crate(..) => crate::CRATE_HIR_ID.owner,
+ }
+ }
+
+ pub fn expect_item(self) -> &'hir Item<'hir> {
+ match self {
+ OwnerNode::Item(n) => n,
+ _ => panic!(),
+ }
+ }
+
+ pub fn expect_foreign_item(self) -> &'hir ForeignItem<'hir> {
+ match self {
+ OwnerNode::ForeignItem(n) => n,
+ _ => panic!(),
+ }
+ }
+
+ pub fn expect_impl_item(self) -> &'hir ImplItem<'hir> {
+ match self {
+ OwnerNode::ImplItem(n) => n,
+ _ => panic!(),
+ }
+ }
+
+ pub fn expect_trait_item(self) -> &'hir TraitItem<'hir> {
+ match self {
+ OwnerNode::TraitItem(n) => n,
+ _ => panic!(),
+ }
+ }
+}
+
+impl<'hir> Into<OwnerNode<'hir>> for &'hir Item<'hir> {
+ fn into(self) -> OwnerNode<'hir> {
+ OwnerNode::Item(self)
+ }
+}
+
+impl<'hir> Into<OwnerNode<'hir>> for &'hir ForeignItem<'hir> {
+ fn into(self) -> OwnerNode<'hir> {
+ OwnerNode::ForeignItem(self)
+ }
+}
+
+impl<'hir> Into<OwnerNode<'hir>> for &'hir ImplItem<'hir> {
+ fn into(self) -> OwnerNode<'hir> {
+ OwnerNode::ImplItem(self)
+ }
+}
+
+impl<'hir> Into<OwnerNode<'hir>> for &'hir TraitItem<'hir> {
+ fn into(self) -> OwnerNode<'hir> {
+ OwnerNode::TraitItem(self)
+ }
+}
+
+impl<'hir> Into<Node<'hir>> for OwnerNode<'hir> {
+ fn into(self) -> Node<'hir> {
+ match self {
+ OwnerNode::Item(n) => Node::Item(n),
+ OwnerNode::ForeignItem(n) => Node::ForeignItem(n),
+ OwnerNode::ImplItem(n) => Node::ImplItem(n),
+ OwnerNode::TraitItem(n) => Node::TraitItem(n),
+ OwnerNode::Crate(n) => Node::Crate(n),
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub enum Node<'hir> {
+ Param(&'hir Param<'hir>),
+ Item(&'hir Item<'hir>),
+ ForeignItem(&'hir ForeignItem<'hir>),
+ TraitItem(&'hir TraitItem<'hir>),
+ ImplItem(&'hir ImplItem<'hir>),
+ Variant(&'hir Variant<'hir>),
+ Field(&'hir FieldDef<'hir>),
+ AnonConst(&'hir AnonConst),
+ Expr(&'hir Expr<'hir>),
+ Stmt(&'hir Stmt<'hir>),
+ PathSegment(&'hir PathSegment<'hir>),
+ Ty(&'hir Ty<'hir>),
+ TypeBinding(&'hir TypeBinding<'hir>),
+ TraitRef(&'hir TraitRef<'hir>),
+ Pat(&'hir Pat<'hir>),
+ Arm(&'hir Arm<'hir>),
+ Block(&'hir Block<'hir>),
+ Local(&'hir Local<'hir>),
+
+ /// `Ctor` refers to the constructor of an enum variant or struct. Only tuple or unit variants
+ /// with synthesized constructors.
+ Ctor(&'hir VariantData<'hir>),
+
+ Lifetime(&'hir Lifetime),
+ GenericParam(&'hir GenericParam<'hir>),
+
+ Crate(&'hir Mod<'hir>),
+
+ Infer(&'hir InferArg),
+}
+
+impl<'hir> Node<'hir> {
+ /// Get the identifier of this `Node`, if applicable.
+ ///
+ /// # Edge cases
+ ///
+ /// Calling `.ident()` on a [`Node::Ctor`] will return `None`
+ /// because `Ctor`s do not have identifiers themselves.
+ /// Instead, call `.ident()` on the parent struct/variant, like so:
+ ///
+ /// ```ignore (illustrative)
+ /// ctor
+ /// .ctor_hir_id()
+ /// .and_then(|ctor_id| tcx.hir().find(tcx.hir().get_parent_node(ctor_id)))
+ /// .and_then(|parent| parent.ident())
+ /// ```
+ pub fn ident(&self) -> Option<Ident> {
+ match self {
+ Node::TraitItem(TraitItem { ident, .. })
+ | Node::ImplItem(ImplItem { ident, .. })
+ | Node::ForeignItem(ForeignItem { ident, .. })
+ | Node::Field(FieldDef { ident, .. })
+ | Node::Variant(Variant { ident, .. })
+ | Node::Item(Item { ident, .. })
+ | Node::PathSegment(PathSegment { ident, .. }) => Some(*ident),
+ Node::Lifetime(lt) => Some(lt.name.ident()),
+ Node::GenericParam(p) => Some(p.name.ident()),
+ Node::TypeBinding(b) => Some(b.ident),
+ Node::Param(..)
+ | Node::AnonConst(..)
+ | Node::Expr(..)
+ | Node::Stmt(..)
+ | Node::Block(..)
+ | Node::Ctor(..)
+ | Node::Pat(..)
+ | Node::Arm(..)
+ | Node::Local(..)
+ | Node::Crate(..)
+ | Node::Ty(..)
+ | Node::TraitRef(..)
+ | Node::Infer(..) => None,
+ }
+ }
+
+ pub fn fn_decl(&self) -> Option<&'hir FnDecl<'hir>> {
+ match self {
+ Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(fn_sig, _), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(fn_sig, _), .. })
+ | Node::Item(Item { kind: ItemKind::Fn(fn_sig, _, _), .. }) => Some(fn_sig.decl),
+ Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(fn_decl, _, _), .. }) => {
+ Some(fn_decl)
+ }
+ _ => None,
+ }
+ }
+
+ pub fn fn_sig(&self) -> Option<&'hir FnSig<'hir>> {
+ match self {
+ Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(fn_sig, _), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(fn_sig, _), .. })
+ | Node::Item(Item { kind: ItemKind::Fn(fn_sig, _, _), .. }) => Some(fn_sig),
+ _ => None,
+ }
+ }
+
+ pub fn body_id(&self) -> Option<BodyId> {
+ match self {
+ Node::TraitItem(TraitItem {
+ kind: TraitItemKind::Fn(_, TraitFn::Provided(body_id)),
+ ..
+ })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(_, body_id), .. })
+ | Node::Item(Item { kind: ItemKind::Fn(.., body_id), .. }) => Some(*body_id),
+ _ => None,
+ }
+ }
+
+ pub fn generics(self) -> Option<&'hir Generics<'hir>> {
+ match self {
+ Node::ForeignItem(ForeignItem {
+ kind: ForeignItemKind::Fn(_, _, generics), ..
+ })
+ | Node::TraitItem(TraitItem { generics, .. })
+ | Node::ImplItem(ImplItem { generics, .. }) => Some(generics),
+ Node::Item(item) => item.kind.generics(),
+ _ => None,
+ }
+ }
+
+ pub fn as_owner(self) -> Option<OwnerNode<'hir>> {
+ match self {
+ Node::Item(i) => Some(OwnerNode::Item(i)),
+ Node::ForeignItem(i) => Some(OwnerNode::ForeignItem(i)),
+ Node::TraitItem(i) => Some(OwnerNode::TraitItem(i)),
+ Node::ImplItem(i) => Some(OwnerNode::ImplItem(i)),
+ Node::Crate(i) => Some(OwnerNode::Crate(i)),
+ _ => None,
+ }
+ }
+
+ pub fn fn_kind(self) -> Option<FnKind<'hir>> {
+ match self {
+ Node::Item(i) => match i.kind {
+ ItemKind::Fn(ref sig, ref generics, _) => {
+ Some(FnKind::ItemFn(i.ident, generics, sig.header))
+ }
+ _ => None,
+ },
+ Node::TraitItem(ti) => match ti.kind {
+ TraitItemKind::Fn(ref sig, TraitFn::Provided(_)) => {
+ Some(FnKind::Method(ti.ident, sig))
+ }
+ _ => None,
+ },
+ Node::ImplItem(ii) => match ii.kind {
+ ImplItemKind::Fn(ref sig, _) => Some(FnKind::Method(ii.ident, sig)),
+ _ => None,
+ },
+ Node::Expr(e) => match e.kind {
+ ExprKind::Closure { .. } => Some(FnKind::Closure),
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+
+ /// Get the fields for the tuple-constructor,
+ /// if this node is a tuple constructor, otherwise None
+ pub fn tuple_fields(&self) -> Option<&'hir [FieldDef<'hir>]> {
+ if let Node::Ctor(&VariantData::Tuple(fields, _)) = self { Some(fields) } else { None }
+ }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ rustc_data_structures::static_assert_size!(Block<'static>, 48);
+ rustc_data_structures::static_assert_size!(Expr<'static>, 56);
+ rustc_data_structures::static_assert_size!(ForeignItem<'static>, 72);
+ rustc_data_structures::static_assert_size!(GenericBound<'_>, 48);
+ rustc_data_structures::static_assert_size!(Generics<'static>, 56);
+ rustc_data_structures::static_assert_size!(ImplItem<'static>, 88);
+ rustc_data_structures::static_assert_size!(Impl<'static>, 80);
+ rustc_data_structures::static_assert_size!(Item<'static>, 80);
+ rustc_data_structures::static_assert_size!(Pat<'static>, 88);
+ rustc_data_structures::static_assert_size!(QPath<'static>, 24);
+ rustc_data_structures::static_assert_size!(TraitItem<'static>, 96);
+ rustc_data_structures::static_assert_size!(Ty<'static>, 72);
+}
diff --git a/compiler/rustc_hir/src/hir_id.rs b/compiler/rustc_hir/src/hir_id.rs
new file mode 100644
index 000000000..346ac9e96
--- /dev/null
+++ b/compiler/rustc_hir/src/hir_id.rs
@@ -0,0 +1,89 @@
+use crate::def_id::{LocalDefId, CRATE_DEF_ID};
+use std::fmt;
+
+/// Uniquely identifies a node in the HIR of the current crate. It is
+/// composed of the `owner`, which is the `LocalDefId` of the directly enclosing
+/// `hir::Item`, `hir::TraitItem`, or `hir::ImplItem` (i.e., the closest "item-like"),
+/// and the `local_id` which is unique within the given owner.
+///
+/// This two-level structure makes for more stable values: One can move an item
+/// around within the source code, or add or remove stuff before it, without
+/// the `local_id` part of the `HirId` changing, which is a very useful property in
+/// incremental compilation where we have to persist things through changes to
+/// the code base.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable_Generic)]
+#[rustc_pass_by_value]
+pub struct HirId {
+ pub owner: LocalDefId,
+ pub local_id: ItemLocalId,
+}
+
+impl HirId {
+ #[inline]
+ pub fn expect_owner(self) -> LocalDefId {
+ assert_eq!(self.local_id.index(), 0);
+ self.owner
+ }
+
+ #[inline]
+ pub fn as_owner(self) -> Option<LocalDefId> {
+ if self.local_id.index() == 0 { Some(self.owner) } else { None }
+ }
+
+ #[inline]
+ pub fn is_owner(self) -> bool {
+ self.local_id.index() == 0
+ }
+
+ #[inline]
+ pub fn make_owner(owner: LocalDefId) -> Self {
+ Self { owner, local_id: ItemLocalId::from_u32(0) }
+ }
+
+ pub fn index(self) -> (usize, usize) {
+ (rustc_index::vec::Idx::index(self.owner), rustc_index::vec::Idx::index(self.local_id))
+ }
+}
+
+impl fmt::Display for HirId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+
+impl Ord for HirId {
+ fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+ (self.index()).cmp(&(other.index()))
+ }
+}
+
+impl PartialOrd for HirId {
+ fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+ Some(self.cmp(&other))
+ }
+}
+
+rustc_data_structures::define_id_collections!(HirIdMap, HirIdSet, HirId);
+rustc_data_structures::define_id_collections!(ItemLocalMap, ItemLocalSet, ItemLocalId);
+
+rustc_index::newtype_index! {
+ /// An `ItemLocalId` uniquely identifies something within a given "item-like";
+ /// that is, within a `hir::Item`, `hir::TraitItem`, or `hir::ImplItem`. There is no
+ /// guarantee that the numerical value of a given `ItemLocalId` corresponds to
+ /// the node's position within the owning item in any way, but there is a
+ /// guarantee that the `LocalItemId`s within an owner occupy a dense range of
+ /// integers starting at zero, so a mapping that maps all or most nodes within
+ /// an "item-like" to something else can be implemented by a `Vec` instead of a
+ /// tree or hash map.
+ #[derive(HashStable_Generic)]
+ pub struct ItemLocalId { .. }
+}
+
+impl ItemLocalId {
+ /// Signal local id which should never be used.
+ pub const INVALID: ItemLocalId = ItemLocalId::MAX;
+}
+
+/// The `HirId` corresponding to `CRATE_NODE_ID` and `CRATE_DEF_ID`.
+pub const CRATE_HIR_ID: HirId = HirId { owner: CRATE_DEF_ID, local_id: ItemLocalId::from_u32(0) };
diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs
new file mode 100644
index 000000000..e676acebe
--- /dev/null
+++ b/compiler/rustc_hir/src/intravisit.rs
@@ -0,0 +1,1232 @@
+//! HIR walker for walking the contents of nodes.
+//!
+//! Here are the three available patterns for the visitor strategy,
+//! in roughly the order of desirability:
+//!
+//! 1. **Shallow visit**: Get a simple callback for every item (or item-like thing) in the HIR.
+//! - Example: find all items with a `#[foo]` attribute on them.
+//! - How: Use the `hir_crate_items` or `hir_module_items` query to traverse over item-like ids
+//! (ItemId, TraitItemId, etc.) and use tcx.def_kind and `tcx.hir().item*(id)` to filter and
+//! access actual item-like thing, respectively.
+//! - Pro: Efficient; just walks the lists of item ids and gives users control whether to access
+//! the hir_owners themselves or not.
+//! - Con: Don't get information about nesting
+//! - Con: Don't have methods for specific bits of HIR, like "on
+//! every expr, do this".
+//! 2. **Deep visit**: Want to scan for specific kinds of HIR nodes within
+//! an item, but don't care about how item-like things are nested
+//! within one another.
+//! - Example: Examine each expression to look for its type and do some check or other.
+//! - How: Implement `intravisit::Visitor` and override the `NestedFilter` type to
+//! `nested_filter::OnlyBodies` (and implement `nested_visit_map`), and use
+//! `tcx.hir().visit_all_item_likes_in_crate(&mut visitor)`. Within your
+//! `intravisit::Visitor` impl, implement methods like `visit_expr()` (don't forget to invoke
+//! `intravisit::walk_expr()` to keep walking the subparts).
+//! - Pro: Visitor methods for any kind of HIR node, not just item-like things.
+//! - Pro: Integrates well into dependency tracking.
+//! - Con: Don't get information about nesting between items
+//! 3. **Nested visit**: Want to visit the whole HIR and you care about the nesting between
+//! item-like things.
+//! - Example: Lifetime resolution, which wants to bring lifetimes declared on the
+//! impl into scope while visiting the impl-items, and then back out again.
+//! - How: Implement `intravisit::Visitor` and override the `NestedFilter` type to
+//! `nested_filter::All` (and implement `nested_visit_map`). Walk your crate with
+//! `tcx.hir().walk_toplevel_module(visitor)` invoked on `tcx.hir().krate()`.
+//! - Pro: Visitor methods for any kind of HIR node, not just item-like things.
+//! - Pro: Preserves nesting information
+//! - Con: Does not integrate well into dependency tracking.
+//!
+//! If you have decided to use this visitor, here are some general
+//! notes on how to do so:
+//!
+//! Each overridden visit method has full control over what
+//! happens with its node, it can do its own traversal of the node's children,
+//! call `intravisit::walk_*` to apply the default traversal algorithm, or prevent
+//! deeper traversal by doing nothing.
+//!
+//! When visiting the HIR, the contents of nested items are NOT visited
+//! by default. This is different from the AST visitor, which does a deep walk.
+//! Hence this module is called `intravisit`; see the method `visit_nested_item`
+//! for more details.
+//!
+//! Note: it is an important invariant that the default visitor walks
+//! the body of a function in "execution order" - more concretely, if
+//! we consider the reverse post-order (RPO) of the CFG implied by the HIR,
+//! then a pre-order traversal of the HIR is consistent with the CFG RPO
+//! on the *initial CFG point* of each HIR node, while a post-order traversal
+//! of the HIR is consistent with the CFG RPO on each *final CFG point* of
+//! each CFG node.
+//!
+//! One thing that follows is that if HIR node A always starts/ends executing
+//! before HIR node B, then A appears in traversal pre/postorder before B,
+//! respectively. (This follows from RPO respecting CFG domination).
+//!
+//! This order consistency is required in a few places in rustc, for
+//! example generator inference, and possibly also HIR borrowck.
+
+use crate::hir::*;
+use rustc_ast::walk_list;
+use rustc_ast::{Attribute, Label};
+use rustc_span::symbol::{Ident, Symbol};
+use rustc_span::Span;
+
+pub trait IntoVisitor<'hir> {
+ type Visitor: Visitor<'hir>;
+ fn into_visitor(&self) -> Self::Visitor;
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum FnKind<'a> {
+ /// `#[xxx] pub async/const/extern "Abi" fn foo()`
+ ItemFn(Ident, &'a Generics<'a>, FnHeader),
+
+ /// `fn foo(&self)`
+ Method(Ident, &'a FnSig<'a>),
+
+ /// `|x, y| {}`
+ Closure,
+}
+
+impl<'a> FnKind<'a> {
+ pub fn header(&self) -> Option<&FnHeader> {
+ match *self {
+ FnKind::ItemFn(_, _, ref header) => Some(header),
+ FnKind::Method(_, ref sig) => Some(&sig.header),
+ FnKind::Closure => None,
+ }
+ }
+
+ pub fn constness(self) -> Constness {
+ self.header().map_or(Constness::NotConst, |header| header.constness)
+ }
+
+ pub fn asyncness(self) -> IsAsync {
+ self.header().map_or(IsAsync::NotAsync, |header| header.asyncness)
+ }
+}
+
+/// An abstract representation of the HIR `rustc_middle::hir::map::Map`.
+pub trait Map<'hir> {
+ /// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found.
+ fn find(&self, hir_id: HirId) -> Option<Node<'hir>>;
+ fn body(&self, id: BodyId) -> &'hir Body<'hir>;
+ fn item(&self, id: ItemId) -> &'hir Item<'hir>;
+ fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem<'hir>;
+ fn impl_item(&self, id: ImplItemId) -> &'hir ImplItem<'hir>;
+ fn foreign_item(&self, id: ForeignItemId) -> &'hir ForeignItem<'hir>;
+}
+
+// Used when no map is actually available, forcing manual implementation of nested visitors.
+impl<'hir> Map<'hir> for ! {
+ fn find(&self, _: HirId) -> Option<Node<'hir>> {
+ *self;
+ }
+ fn body(&self, _: BodyId) -> &'hir Body<'hir> {
+ *self;
+ }
+ fn item(&self, _: ItemId) -> &'hir Item<'hir> {
+ *self;
+ }
+ fn trait_item(&self, _: TraitItemId) -> &'hir TraitItem<'hir> {
+ *self;
+ }
+ fn impl_item(&self, _: ImplItemId) -> &'hir ImplItem<'hir> {
+ *self;
+ }
+ fn foreign_item(&self, _: ForeignItemId) -> &'hir ForeignItem<'hir> {
+ *self;
+ }
+}
+
+pub mod nested_filter {
+ use super::Map;
+
+ /// Specifies what nested things a visitor wants to visit. By "nested
+ /// things", we are referring to bits of HIR that are not directly embedded
+ /// within one another but rather indirectly, through a table in the crate.
+ /// This is done to control dependencies during incremental compilation: the
+ /// non-inline bits of HIR can be tracked and hashed separately.
+ ///
+ /// The most common choice is `OnlyBodies`, which will cause the visitor to
+ /// visit fn bodies for fns that it encounters, and closure bodies, but
+ /// skip over nested item-like things.
+ ///
+ /// See the comments on `ItemLikeVisitor` for more details on the overall
+ /// visit strategy.
+ pub trait NestedFilter<'hir> {
+ type Map: Map<'hir>;
+
+ /// Whether the visitor visits nested "item-like" things.
+ /// E.g., item, impl-item.
+ const INTER: bool;
+ /// Whether the visitor visits "intra item-like" things.
+ /// E.g., function body, closure, `AnonConst`
+ const INTRA: bool;
+ }
+
+ /// Do not visit any nested things. When you add a new
+ /// "non-nested" thing, you will want to audit such uses to see if
+ /// they remain valid.
+ ///
+ /// Use this if you are only walking some particular kind of tree
+ /// (i.e., a type, or fn signature) and you don't want to thread a
+ /// HIR map around.
+ pub struct None(());
+ impl NestedFilter<'_> for None {
+ type Map = !;
+ const INTER: bool = false;
+ const INTRA: bool = false;
+ }
+}
+
+use nested_filter::NestedFilter;
+
+/// Each method of the Visitor trait is a hook to be potentially
+/// overridden. Each method's default implementation recursively visits
+/// the substructure of the input via the corresponding `walk` method;
+/// e.g., the `visit_mod` method by default calls `intravisit::walk_mod`.
+///
+/// Note that this visitor does NOT visit nested items by default
+/// (this is why the module is called `intravisit`, to distinguish it
+/// from the AST's `visit` module, which acts differently). If you
+/// simply want to visit all items in the crate in some order, you
+/// should call `tcx.hir().visit_all_item_likes_in_crate`. Otherwise, see the comment
+/// on `visit_nested_item` for details on how to visit nested items.
+///
+/// If you want to ensure that your code handles every variant
+/// explicitly, you need to override each method. (And you also need
+/// to monitor future changes to `Visitor` in case a new method with a
+/// new default implementation gets introduced.)
+pub trait Visitor<'v>: Sized {
+ // this type should not be overridden, it exists for convenient usage as `Self::Map`
+ type Map: Map<'v> = <Self::NestedFilter as NestedFilter<'v>>::Map;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Nested items.
+
+ /// Override this type to control which nested HIR are visited; see
+ /// [`NestedFilter`] for details. If you override this type, you
+ /// must also override [`nested_visit_map`](Self::nested_visit_map).
+ ///
+ /// **If for some reason you want the nested behavior, but don't
+ /// have a `Map` at your disposal:** then override the
+ /// `visit_nested_XXX` methods. If a new `visit_nested_XXX` variant is
+ /// added in the future, it will cause a panic which can be detected
+ /// and fixed appropriately.
+ type NestedFilter: NestedFilter<'v> = nested_filter::None;
+
+ /// If `type NestedFilter` is set to visit nested items, this method
+ /// must also be overridden to provide a map to retrieve nested items.
+ fn nested_visit_map(&mut self) -> Self::Map {
+ panic!(
+ "nested_visit_map must be implemented or consider using \
+ `type NestedFilter = nested_filter::None` (the default)"
+ );
+ }
+
+ /// Invoked when a nested item is encountered. By default, when
+ /// `Self::NestedFilter` is `nested_filter::None`, this method does
+ /// nothing. **You probably don't want to override this method** --
+ /// instead, override [`Self::NestedFilter`] or use the "shallow" or
+ /// "deep" visit patterns described on
+ /// `itemlikevisit::ItemLikeVisitor`. The only reason to override
+ /// this method is if you want a nested pattern but cannot supply a
+ /// [`Map`]; see `nested_visit_map` for advice.
+ fn visit_nested_item(&mut self, id: ItemId) {
+ if Self::NestedFilter::INTER {
+ let item = self.nested_visit_map().item(id);
+ self.visit_item(item);
+ }
+ }
+
+ /// Like `visit_nested_item()`, but for trait items. See
+ /// `visit_nested_item()` for advice on when to override this
+ /// method.
+ fn visit_nested_trait_item(&mut self, id: TraitItemId) {
+ if Self::NestedFilter::INTER {
+ let item = self.nested_visit_map().trait_item(id);
+ self.visit_trait_item(item);
+ }
+ }
+
+ /// Like `visit_nested_item()`, but for impl items. See
+ /// `visit_nested_item()` for advice on when to override this
+ /// method.
+ fn visit_nested_impl_item(&mut self, id: ImplItemId) {
+ if Self::NestedFilter::INTER {
+ let item = self.nested_visit_map().impl_item(id);
+ self.visit_impl_item(item);
+ }
+ }
+
+ /// Like `visit_nested_item()`, but for foreign items. See
+ /// `visit_nested_item()` for advice on when to override this
+ /// method.
+ fn visit_nested_foreign_item(&mut self, id: ForeignItemId) {
+ if Self::NestedFilter::INTER {
+ let item = self.nested_visit_map().foreign_item(id);
+ self.visit_foreign_item(item);
+ }
+ }
+
+ /// Invoked to visit the body of a function, method or closure. Like
+ /// `visit_nested_item`, does nothing by default unless you override
+ /// `Self::NestedFilter`.
+ fn visit_nested_body(&mut self, id: BodyId) {
+ if Self::NestedFilter::INTRA {
+ let body = self.nested_visit_map().body(id);
+ self.visit_body(body);
+ }
+ }
+
+ fn visit_param(&mut self, param: &'v Param<'v>) {
+ walk_param(self, param)
+ }
+
+ /// Visits the top-level item and (optionally) nested items / impl items. See
+ /// `visit_nested_item` for details.
+ fn visit_item(&mut self, i: &'v Item<'v>) {
+ walk_item(self, i)
+ }
+
+ fn visit_body(&mut self, b: &'v Body<'v>) {
+ walk_body(self, b);
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+
+ fn visit_id(&mut self, _hir_id: HirId) {
+ // Nothing to do.
+ }
+ fn visit_name(&mut self, _span: Span, _name: Symbol) {
+ // Nothing to do.
+ }
+ fn visit_ident(&mut self, ident: Ident) {
+ walk_ident(self, ident)
+ }
+ fn visit_mod(&mut self, m: &'v Mod<'v>, _s: Span, n: HirId) {
+ walk_mod(self, m, n)
+ }
+ fn visit_foreign_item(&mut self, i: &'v ForeignItem<'v>) {
+ walk_foreign_item(self, i)
+ }
+ fn visit_local(&mut self, l: &'v Local<'v>) {
+ walk_local(self, l)
+ }
+ fn visit_block(&mut self, b: &'v Block<'v>) {
+ walk_block(self, b)
+ }
+ fn visit_stmt(&mut self, s: &'v Stmt<'v>) {
+ walk_stmt(self, s)
+ }
+ fn visit_arm(&mut self, a: &'v Arm<'v>) {
+ walk_arm(self, a)
+ }
+ fn visit_pat(&mut self, p: &'v Pat<'v>) {
+ walk_pat(self, p)
+ }
+ fn visit_array_length(&mut self, len: &'v ArrayLen) {
+ walk_array_len(self, len)
+ }
+ fn visit_anon_const(&mut self, c: &'v AnonConst) {
+ walk_anon_const(self, c)
+ }
+ fn visit_expr(&mut self, ex: &'v Expr<'v>) {
+ walk_expr(self, ex)
+ }
+ fn visit_let_expr(&mut self, lex: &'v Let<'v>) {
+ walk_let_expr(self, lex)
+ }
+ fn visit_ty(&mut self, t: &'v Ty<'v>) {
+ walk_ty(self, t)
+ }
+ fn visit_generic_param(&mut self, p: &'v GenericParam<'v>) {
+ walk_generic_param(self, p)
+ }
+ fn visit_const_param_default(&mut self, _param: HirId, ct: &'v AnonConst) {
+ walk_const_param_default(self, ct)
+ }
+ fn visit_generics(&mut self, g: &'v Generics<'v>) {
+ walk_generics(self, g)
+ }
+ fn visit_where_predicate(&mut self, predicate: &'v WherePredicate<'v>) {
+ walk_where_predicate(self, predicate)
+ }
+ fn visit_fn_decl(&mut self, fd: &'v FnDecl<'v>) {
+ walk_fn_decl(self, fd)
+ }
+ fn visit_fn(&mut self, fk: FnKind<'v>, fd: &'v FnDecl<'v>, b: BodyId, s: Span, id: HirId) {
+ walk_fn(self, fk, fd, b, s, id)
+ }
+ fn visit_use(&mut self, path: &'v Path<'v>, hir_id: HirId) {
+ walk_use(self, path, hir_id)
+ }
+ fn visit_trait_item(&mut self, ti: &'v TraitItem<'v>) {
+ walk_trait_item(self, ti)
+ }
+ fn visit_trait_item_ref(&mut self, ii: &'v TraitItemRef) {
+ walk_trait_item_ref(self, ii)
+ }
+ fn visit_impl_item(&mut self, ii: &'v ImplItem<'v>) {
+ walk_impl_item(self, ii)
+ }
+ fn visit_foreign_item_ref(&mut self, ii: &'v ForeignItemRef) {
+ walk_foreign_item_ref(self, ii)
+ }
+ fn visit_impl_item_ref(&mut self, ii: &'v ImplItemRef) {
+ walk_impl_item_ref(self, ii)
+ }
+ fn visit_trait_ref(&mut self, t: &'v TraitRef<'v>) {
+ walk_trait_ref(self, t)
+ }
+ fn visit_param_bound(&mut self, bounds: &'v GenericBound<'v>) {
+ walk_param_bound(self, bounds)
+ }
+ fn visit_poly_trait_ref(&mut self, t: &'v PolyTraitRef<'v>, m: TraitBoundModifier) {
+ walk_poly_trait_ref(self, t, m)
+ }
+ fn visit_variant_data(
+ &mut self,
+ s: &'v VariantData<'v>,
+ _: Symbol,
+ _: &'v Generics<'v>,
+ _parent_id: HirId,
+ _: Span,
+ ) {
+ walk_struct_def(self, s)
+ }
+ fn visit_field_def(&mut self, s: &'v FieldDef<'v>) {
+ walk_field_def(self, s)
+ }
+ fn visit_enum_def(
+ &mut self,
+ enum_definition: &'v EnumDef<'v>,
+ generics: &'v Generics<'v>,
+ item_id: HirId,
+ _: Span,
+ ) {
+ walk_enum_def(self, enum_definition, generics, item_id)
+ }
+ fn visit_variant(&mut self, v: &'v Variant<'v>, g: &'v Generics<'v>, item_id: HirId) {
+ walk_variant(self, v, g, item_id)
+ }
+ fn visit_label(&mut self, label: &'v Label) {
+ walk_label(self, label)
+ }
+ fn visit_infer(&mut self, inf: &'v InferArg) {
+ walk_inf(self, inf);
+ }
+ fn visit_generic_arg(&mut self, generic_arg: &'v GenericArg<'v>) {
+ match generic_arg {
+ GenericArg::Lifetime(lt) => self.visit_lifetime(lt),
+ GenericArg::Type(ty) => self.visit_ty(ty),
+ GenericArg::Const(ct) => self.visit_anon_const(&ct.value),
+ GenericArg::Infer(inf) => self.visit_infer(inf),
+ }
+ }
+ fn visit_lifetime(&mut self, lifetime: &'v Lifetime) {
+ walk_lifetime(self, lifetime)
+ }
+ fn visit_qpath(&mut self, qpath: &'v QPath<'v>, id: HirId, span: Span) {
+ walk_qpath(self, qpath, id, span)
+ }
+ fn visit_path(&mut self, path: &'v Path<'v>, _id: HirId) {
+ walk_path(self, path)
+ }
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v PathSegment<'v>) {
+ walk_path_segment(self, path_span, path_segment)
+ }
+ fn visit_generic_args(&mut self, path_span: Span, generic_args: &'v GenericArgs<'v>) {
+ walk_generic_args(self, path_span, generic_args)
+ }
+ fn visit_assoc_type_binding(&mut self, type_binding: &'v TypeBinding<'v>) {
+ walk_assoc_type_binding(self, type_binding)
+ }
+ fn visit_attribute(&mut self, _attr: &'v Attribute) {}
+ fn visit_associated_item_kind(&mut self, kind: &'v AssocItemKind) {
+ walk_associated_item_kind(self, kind);
+ }
+ fn visit_defaultness(&mut self, defaultness: &'v Defaultness) {
+ walk_defaultness(self, defaultness);
+ }
+ fn visit_inline_asm(&mut self, asm: &'v InlineAsm<'v>, id: HirId) {
+ walk_inline_asm(self, asm, id);
+ }
+}
+
+pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod<'v>, mod_hir_id: HirId) {
+ visitor.visit_id(mod_hir_id);
+ for &item_id in module.item_ids {
+ visitor.visit_nested_item(item_id);
+ }
+}
+
+pub fn walk_body<'v, V: Visitor<'v>>(visitor: &mut V, body: &'v Body<'v>) {
+ walk_list!(visitor, visit_param, body.params);
+ visitor.visit_expr(&body.value);
+}
+
+pub fn walk_local<'v, V: Visitor<'v>>(visitor: &mut V, local: &'v Local<'v>) {
+ // Intentionally visiting the expr first - the initialization expr
+ // dominates the local's definition.
+ walk_list!(visitor, visit_expr, &local.init);
+ visitor.visit_id(local.hir_id);
+ visitor.visit_pat(&local.pat);
+ if let Some(els) = local.els {
+ visitor.visit_block(els);
+ }
+ walk_list!(visitor, visit_ty, &local.ty);
+}
+
+pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, ident: Ident) {
+ visitor.visit_name(ident.span, ident.name);
+}
+
+pub fn walk_label<'v, V: Visitor<'v>>(visitor: &mut V, label: &'v Label) {
+ visitor.visit_ident(label.ident);
+}
+
+pub fn walk_lifetime<'v, V: Visitor<'v>>(visitor: &mut V, lifetime: &'v Lifetime) {
+ visitor.visit_id(lifetime.hir_id);
+ match lifetime.name {
+ LifetimeName::Param(_, ParamName::Plain(ident)) => {
+ visitor.visit_ident(ident);
+ }
+ LifetimeName::Param(_, ParamName::Fresh)
+ | LifetimeName::Param(_, ParamName::Error)
+ | LifetimeName::Static
+ | LifetimeName::Error
+ | LifetimeName::ImplicitObjectLifetimeDefault
+ | LifetimeName::Infer => {}
+ }
+}
+
+pub fn walk_poly_trait_ref<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ trait_ref: &'v PolyTraitRef<'v>,
+ _modifier: TraitBoundModifier,
+) {
+ walk_list!(visitor, visit_generic_param, trait_ref.bound_generic_params);
+ visitor.visit_trait_ref(&trait_ref.trait_ref);
+}
+
+pub fn walk_trait_ref<'v, V: Visitor<'v>>(visitor: &mut V, trait_ref: &'v TraitRef<'v>) {
+ visitor.visit_id(trait_ref.hir_ref_id);
+ visitor.visit_path(&trait_ref.path, trait_ref.hir_ref_id)
+}
+
+pub fn walk_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v Param<'v>) {
+ visitor.visit_id(param.hir_id);
+ visitor.visit_pat(&param.pat);
+}
+
+pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) {
+ visitor.visit_ident(item.ident);
+ match item.kind {
+ ItemKind::ExternCrate(orig_name) => {
+ visitor.visit_id(item.hir_id());
+ if let Some(orig_name) = orig_name {
+ visitor.visit_name(item.span, orig_name);
+ }
+ }
+ ItemKind::Use(ref path, _) => {
+ visitor.visit_use(path, item.hir_id());
+ }
+ ItemKind::Static(ref typ, _, body) | ItemKind::Const(ref typ, body) => {
+ visitor.visit_id(item.hir_id());
+ visitor.visit_ty(typ);
+ visitor.visit_nested_body(body);
+ }
+ ItemKind::Fn(ref sig, ref generics, body_id) => visitor.visit_fn(
+ FnKind::ItemFn(item.ident, generics, sig.header),
+ &sig.decl,
+ body_id,
+ item.span,
+ item.hir_id(),
+ ),
+ ItemKind::Macro(..) => {
+ visitor.visit_id(item.hir_id());
+ }
+ ItemKind::Mod(ref module) => {
+ // `visit_mod()` takes care of visiting the `Item`'s `HirId`.
+ visitor.visit_mod(module, item.span, item.hir_id())
+ }
+ ItemKind::ForeignMod { abi: _, items } => {
+ visitor.visit_id(item.hir_id());
+ walk_list!(visitor, visit_foreign_item_ref, items);
+ }
+ ItemKind::GlobalAsm(asm) => {
+ visitor.visit_id(item.hir_id());
+ visitor.visit_inline_asm(asm, item.hir_id());
+ }
+ ItemKind::TyAlias(ref ty, ref generics) => {
+ visitor.visit_id(item.hir_id());
+ visitor.visit_ty(ty);
+ visitor.visit_generics(generics)
+ }
+ ItemKind::OpaqueTy(OpaqueTy { ref generics, bounds, .. }) => {
+ visitor.visit_id(item.hir_id());
+ walk_generics(visitor, generics);
+ walk_list!(visitor, visit_param_bound, bounds);
+ }
+ ItemKind::Enum(ref enum_definition, ref generics) => {
+ visitor.visit_generics(generics);
+ // `visit_enum_def()` takes care of visiting the `Item`'s `HirId`.
+ visitor.visit_enum_def(enum_definition, generics, item.hir_id(), item.span)
+ }
+ ItemKind::Impl(Impl {
+ unsafety: _,
+ defaultness: _,
+ polarity: _,
+ constness: _,
+ defaultness_span: _,
+ ref generics,
+ ref of_trait,
+ ref self_ty,
+ items,
+ }) => {
+ visitor.visit_id(item.hir_id());
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_trait_ref, of_trait);
+ visitor.visit_ty(self_ty);
+ walk_list!(visitor, visit_impl_item_ref, *items);
+ }
+ ItemKind::Struct(ref struct_definition, ref generics)
+ | ItemKind::Union(ref struct_definition, ref generics) => {
+ visitor.visit_generics(generics);
+ visitor.visit_id(item.hir_id());
+ visitor.visit_variant_data(
+ struct_definition,
+ item.ident.name,
+ generics,
+ item.hir_id(),
+ item.span,
+ );
+ }
+ ItemKind::Trait(.., ref generics, bounds, trait_item_refs) => {
+ visitor.visit_id(item.hir_id());
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_param_bound, bounds);
+ walk_list!(visitor, visit_trait_item_ref, trait_item_refs);
+ }
+ ItemKind::TraitAlias(ref generics, bounds) => {
+ visitor.visit_id(item.hir_id());
+ visitor.visit_generics(generics);
+ walk_list!(visitor, visit_param_bound, bounds);
+ }
+ }
+}
+
+pub fn walk_inline_asm<'v, V: Visitor<'v>>(visitor: &mut V, asm: &'v InlineAsm<'v>, id: HirId) {
+ for (op, op_sp) in asm.operands {
+ match op {
+ InlineAsmOperand::In { expr, .. } | InlineAsmOperand::InOut { expr, .. } => {
+ visitor.visit_expr(expr)
+ }
+ InlineAsmOperand::Out { expr, .. } => {
+ if let Some(expr) = expr {
+ visitor.visit_expr(expr);
+ }
+ }
+ InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ visitor.visit_expr(in_expr);
+ if let Some(out_expr) = out_expr {
+ visitor.visit_expr(out_expr);
+ }
+ }
+ InlineAsmOperand::Const { anon_const, .. }
+ | InlineAsmOperand::SymFn { anon_const, .. } => visitor.visit_anon_const(anon_const),
+ InlineAsmOperand::SymStatic { path, .. } => visitor.visit_qpath(path, id, *op_sp),
+ }
+ }
+}
+
+pub fn walk_use<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path<'v>, hir_id: HirId) {
+ visitor.visit_id(hir_id);
+ visitor.visit_path(path, hir_id);
+}
+
+pub fn walk_enum_def<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ enum_definition: &'v EnumDef<'v>,
+ generics: &'v Generics<'v>,
+ item_id: HirId,
+) {
+ visitor.visit_id(item_id);
+ walk_list!(visitor, visit_variant, enum_definition.variants, generics, item_id);
+}
+
+pub fn walk_variant<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ variant: &'v Variant<'v>,
+ generics: &'v Generics<'v>,
+ parent_item_id: HirId,
+) {
+ visitor.visit_ident(variant.ident);
+ visitor.visit_id(variant.id);
+ visitor.visit_variant_data(
+ &variant.data,
+ variant.ident.name,
+ generics,
+ parent_item_id,
+ variant.span,
+ );
+ walk_list!(visitor, visit_anon_const, &variant.disr_expr);
+}
+
+pub fn walk_ty<'v, V: Visitor<'v>>(visitor: &mut V, typ: &'v Ty<'v>) {
+ visitor.visit_id(typ.hir_id);
+
+ match typ.kind {
+ TyKind::Slice(ref ty) => visitor.visit_ty(ty),
+ TyKind::Ptr(ref mutable_type) => visitor.visit_ty(&mutable_type.ty),
+ TyKind::Rptr(ref lifetime, ref mutable_type) => {
+ visitor.visit_lifetime(lifetime);
+ visitor.visit_ty(&mutable_type.ty)
+ }
+ TyKind::Never => {}
+ TyKind::Tup(tuple_element_types) => {
+ walk_list!(visitor, visit_ty, tuple_element_types);
+ }
+ TyKind::BareFn(ref function_declaration) => {
+ walk_list!(visitor, visit_generic_param, function_declaration.generic_params);
+ visitor.visit_fn_decl(&function_declaration.decl);
+ }
+ TyKind::Path(ref qpath) => {
+ visitor.visit_qpath(qpath, typ.hir_id, typ.span);
+ }
+ TyKind::OpaqueDef(item_id, lifetimes) => {
+ visitor.visit_nested_item(item_id);
+ walk_list!(visitor, visit_generic_arg, lifetimes);
+ }
+ TyKind::Array(ref ty, ref length) => {
+ visitor.visit_ty(ty);
+ visitor.visit_array_length(length)
+ }
+ TyKind::TraitObject(bounds, ref lifetime, _syntax) => {
+ for bound in bounds {
+ visitor.visit_poly_trait_ref(bound, TraitBoundModifier::None);
+ }
+ visitor.visit_lifetime(lifetime);
+ }
+ TyKind::Typeof(ref expression) => visitor.visit_anon_const(expression),
+ TyKind::Infer | TyKind::Err => {}
+ }
+}
+
+pub fn walk_inf<'v, V: Visitor<'v>>(visitor: &mut V, inf: &'v InferArg) {
+ visitor.visit_id(inf.hir_id);
+}
+
+pub fn walk_qpath<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ qpath: &'v QPath<'v>,
+ id: HirId,
+ span: Span,
+) {
+ match *qpath {
+ QPath::Resolved(ref maybe_qself, ref path) => {
+ walk_list!(visitor, visit_ty, maybe_qself);
+ visitor.visit_path(path, id)
+ }
+ QPath::TypeRelative(ref qself, ref segment) => {
+ visitor.visit_ty(qself);
+ visitor.visit_path_segment(span, segment);
+ }
+ QPath::LangItem(..) => {}
+ }
+}
+
+pub fn walk_path<'v, V: Visitor<'v>>(visitor: &mut V, path: &'v Path<'v>) {
+ for segment in path.segments {
+ visitor.visit_path_segment(path.span, segment);
+ }
+}
+
+pub fn walk_path_segment<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ path_span: Span,
+ segment: &'v PathSegment<'v>,
+) {
+ visitor.visit_ident(segment.ident);
+ walk_list!(visitor, visit_id, segment.hir_id);
+ if let Some(ref args) = segment.args {
+ visitor.visit_generic_args(path_span, args);
+ }
+}
+
+pub fn walk_generic_args<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ _path_span: Span,
+ generic_args: &'v GenericArgs<'v>,
+) {
+ walk_list!(visitor, visit_generic_arg, generic_args.args);
+ walk_list!(visitor, visit_assoc_type_binding, generic_args.bindings);
+}
+
+pub fn walk_assoc_type_binding<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ type_binding: &'v TypeBinding<'v>,
+) {
+ visitor.visit_id(type_binding.hir_id);
+ visitor.visit_ident(type_binding.ident);
+ visitor.visit_generic_args(type_binding.span, type_binding.gen_args);
+ match type_binding.kind {
+ TypeBindingKind::Equality { ref term } => match term {
+ Term::Ty(ref ty) => visitor.visit_ty(ty),
+ Term::Const(ref c) => visitor.visit_anon_const(c),
+ },
+ TypeBindingKind::Constraint { bounds } => walk_list!(visitor, visit_param_bound, bounds),
+ }
+}
+
+pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat<'v>) {
+ visitor.visit_id(pattern.hir_id);
+ match pattern.kind {
+ PatKind::TupleStruct(ref qpath, children, _) => {
+ visitor.visit_qpath(qpath, pattern.hir_id, pattern.span);
+ walk_list!(visitor, visit_pat, children);
+ }
+ PatKind::Path(ref qpath) => {
+ visitor.visit_qpath(qpath, pattern.hir_id, pattern.span);
+ }
+ PatKind::Struct(ref qpath, fields, _) => {
+ visitor.visit_qpath(qpath, pattern.hir_id, pattern.span);
+ for field in fields {
+ visitor.visit_id(field.hir_id);
+ visitor.visit_ident(field.ident);
+ visitor.visit_pat(&field.pat)
+ }
+ }
+ PatKind::Or(pats) => walk_list!(visitor, visit_pat, pats),
+ PatKind::Tuple(tuple_elements, _) => {
+ walk_list!(visitor, visit_pat, tuple_elements);
+ }
+ PatKind::Box(ref subpattern) | PatKind::Ref(ref subpattern, _) => {
+ visitor.visit_pat(subpattern)
+ }
+ PatKind::Binding(_, _hir_id, ident, ref optional_subpattern) => {
+ visitor.visit_ident(ident);
+ walk_list!(visitor, visit_pat, optional_subpattern);
+ }
+ PatKind::Lit(ref expression) => visitor.visit_expr(expression),
+ PatKind::Range(ref lower_bound, ref upper_bound, _) => {
+ walk_list!(visitor, visit_expr, lower_bound);
+ walk_list!(visitor, visit_expr, upper_bound);
+ }
+ PatKind::Wild => (),
+ PatKind::Slice(prepatterns, ref slice_pattern, postpatterns) => {
+ walk_list!(visitor, visit_pat, prepatterns);
+ walk_list!(visitor, visit_pat, slice_pattern);
+ walk_list!(visitor, visit_pat, postpatterns);
+ }
+ }
+}
+
+pub fn walk_foreign_item<'v, V: Visitor<'v>>(visitor: &mut V, foreign_item: &'v ForeignItem<'v>) {
+ visitor.visit_id(foreign_item.hir_id());
+ visitor.visit_ident(foreign_item.ident);
+
+ match foreign_item.kind {
+ ForeignItemKind::Fn(ref function_declaration, param_names, ref generics) => {
+ visitor.visit_generics(generics);
+ visitor.visit_fn_decl(function_declaration);
+ for &param_name in param_names {
+ visitor.visit_ident(param_name);
+ }
+ }
+ ForeignItemKind::Static(ref typ, _) => visitor.visit_ty(typ),
+ ForeignItemKind::Type => (),
+ }
+}
+
+pub fn walk_param_bound<'v, V: Visitor<'v>>(visitor: &mut V, bound: &'v GenericBound<'v>) {
+ match *bound {
+ GenericBound::Trait(ref typ, modifier) => {
+ visitor.visit_poly_trait_ref(typ, modifier);
+ }
+ GenericBound::LangItemTrait(_, span, hir_id, args) => {
+ visitor.visit_id(hir_id);
+ visitor.visit_generic_args(span, args);
+ }
+ GenericBound::Outlives(ref lifetime) => visitor.visit_lifetime(lifetime),
+ }
+}
+
+pub fn walk_generic_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v GenericParam<'v>) {
+ visitor.visit_id(param.hir_id);
+ match param.name {
+ ParamName::Plain(ident) => visitor.visit_ident(ident),
+ ParamName::Error | ParamName::Fresh => {}
+ }
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => {}
+ GenericParamKind::Type { ref default, .. } => walk_list!(visitor, visit_ty, default),
+ GenericParamKind::Const { ref ty, ref default } => {
+ visitor.visit_ty(ty);
+ if let Some(ref default) = default {
+ visitor.visit_const_param_default(param.hir_id, default);
+ }
+ }
+ }
+}
+
+pub fn walk_const_param_default<'v, V: Visitor<'v>>(visitor: &mut V, ct: &'v AnonConst) {
+ visitor.visit_anon_const(ct)
+}
+
+pub fn walk_generics<'v, V: Visitor<'v>>(visitor: &mut V, generics: &'v Generics<'v>) {
+ walk_list!(visitor, visit_generic_param, generics.params);
+ walk_list!(visitor, visit_where_predicate, generics.predicates);
+}
+
+pub fn walk_where_predicate<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ predicate: &'v WherePredicate<'v>,
+) {
+ match *predicate {
+ WherePredicate::BoundPredicate(WhereBoundPredicate {
+ ref bounded_ty,
+ bounds,
+ bound_generic_params,
+ ..
+ }) => {
+ visitor.visit_ty(bounded_ty);
+ walk_list!(visitor, visit_param_bound, bounds);
+ walk_list!(visitor, visit_generic_param, bound_generic_params);
+ }
+ WherePredicate::RegionPredicate(WhereRegionPredicate { ref lifetime, bounds, .. }) => {
+ visitor.visit_lifetime(lifetime);
+ walk_list!(visitor, visit_param_bound, bounds);
+ }
+ WherePredicate::EqPredicate(WhereEqPredicate {
+ hir_id, ref lhs_ty, ref rhs_ty, ..
+ }) => {
+ visitor.visit_id(hir_id);
+ visitor.visit_ty(lhs_ty);
+ visitor.visit_ty(rhs_ty);
+ }
+ }
+}
+
+pub fn walk_fn_ret_ty<'v, V: Visitor<'v>>(visitor: &mut V, ret_ty: &'v FnRetTy<'v>) {
+ if let FnRetTy::Return(ref output_ty) = *ret_ty {
+ visitor.visit_ty(output_ty)
+ }
+}
+
+pub fn walk_fn_decl<'v, V: Visitor<'v>>(visitor: &mut V, function_declaration: &'v FnDecl<'v>) {
+ for ty in function_declaration.inputs {
+ visitor.visit_ty(ty)
+ }
+ walk_fn_ret_ty(visitor, &function_declaration.output)
+}
+
+pub fn walk_fn_kind<'v, V: Visitor<'v>>(visitor: &mut V, function_kind: FnKind<'v>) {
+ match function_kind {
+ FnKind::ItemFn(_, generics, ..) => {
+ visitor.visit_generics(generics);
+ }
+ FnKind::Closure | FnKind::Method(..) => {}
+ }
+}
+
+pub fn walk_fn<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ function_kind: FnKind<'v>,
+ function_declaration: &'v FnDecl<'v>,
+ body_id: BodyId,
+ _span: Span,
+ id: HirId,
+) {
+ visitor.visit_id(id);
+ visitor.visit_fn_decl(function_declaration);
+ walk_fn_kind(visitor, function_kind);
+ visitor.visit_nested_body(body_id)
+}
+
+pub fn walk_trait_item<'v, V: Visitor<'v>>(visitor: &mut V, trait_item: &'v TraitItem<'v>) {
+ // N.B., deliberately force a compilation error if/when new fields are added.
+ let TraitItem { ident, generics, ref defaultness, ref kind, span, def_id: _ } = *trait_item;
+ let hir_id = trait_item.hir_id();
+ visitor.visit_ident(ident);
+ visitor.visit_generics(&generics);
+ visitor.visit_defaultness(&defaultness);
+ match *kind {
+ TraitItemKind::Const(ref ty, default) => {
+ visitor.visit_id(hir_id);
+ visitor.visit_ty(ty);
+ walk_list!(visitor, visit_nested_body, default);
+ }
+ TraitItemKind::Fn(ref sig, TraitFn::Required(param_names)) => {
+ visitor.visit_id(hir_id);
+ visitor.visit_fn_decl(&sig.decl);
+ for &param_name in param_names {
+ visitor.visit_ident(param_name);
+ }
+ }
+ TraitItemKind::Fn(ref sig, TraitFn::Provided(body_id)) => {
+ visitor.visit_fn(FnKind::Method(ident, sig), &sig.decl, body_id, span, hir_id);
+ }
+ TraitItemKind::Type(bounds, ref default) => {
+ visitor.visit_id(hir_id);
+ walk_list!(visitor, visit_param_bound, bounds);
+ walk_list!(visitor, visit_ty, default);
+ }
+ }
+}
+
+pub fn walk_trait_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, trait_item_ref: &'v TraitItemRef) {
+ // N.B., deliberately force a compilation error if/when new fields are added.
+ let TraitItemRef { id, ident, ref kind, span: _ } = *trait_item_ref;
+ visitor.visit_nested_trait_item(id);
+ visitor.visit_ident(ident);
+ visitor.visit_associated_item_kind(kind);
+}
+
+pub fn walk_impl_item<'v, V: Visitor<'v>>(visitor: &mut V, impl_item: &'v ImplItem<'v>) {
+ // N.B., deliberately force a compilation error if/when new fields are added.
+ let ImplItem {
+ def_id: _,
+ ident,
+ ref generics,
+ ref kind,
+ ref defaultness,
+ span: _,
+ vis_span: _,
+ } = *impl_item;
+
+ visitor.visit_ident(ident);
+ visitor.visit_generics(generics);
+ visitor.visit_defaultness(defaultness);
+ match *kind {
+ ImplItemKind::Const(ref ty, body) => {
+ visitor.visit_id(impl_item.hir_id());
+ visitor.visit_ty(ty);
+ visitor.visit_nested_body(body);
+ }
+ ImplItemKind::Fn(ref sig, body_id) => {
+ visitor.visit_fn(
+ FnKind::Method(impl_item.ident, sig),
+ &sig.decl,
+ body_id,
+ impl_item.span,
+ impl_item.hir_id(),
+ );
+ }
+ ImplItemKind::TyAlias(ref ty) => {
+ visitor.visit_id(impl_item.hir_id());
+ visitor.visit_ty(ty);
+ }
+ }
+}
+
+pub fn walk_foreign_item_ref<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ foreign_item_ref: &'v ForeignItemRef,
+) {
+ // N.B., deliberately force a compilation error if/when new fields are added.
+ let ForeignItemRef { id, ident, span: _ } = *foreign_item_ref;
+ visitor.visit_nested_foreign_item(id);
+ visitor.visit_ident(ident);
+}
+
+pub fn walk_impl_item_ref<'v, V: Visitor<'v>>(visitor: &mut V, impl_item_ref: &'v ImplItemRef) {
+ // N.B., deliberately force a compilation error if/when new fields are added.
+ let ImplItemRef { id, ident, ref kind, span: _, trait_item_def_id: _ } = *impl_item_ref;
+ visitor.visit_nested_impl_item(id);
+ visitor.visit_ident(ident);
+ visitor.visit_associated_item_kind(kind);
+}
+
+pub fn walk_struct_def<'v, V: Visitor<'v>>(
+ visitor: &mut V,
+ struct_definition: &'v VariantData<'v>,
+) {
+ walk_list!(visitor, visit_id, struct_definition.ctor_hir_id());
+ walk_list!(visitor, visit_field_def, struct_definition.fields());
+}
+
+pub fn walk_field_def<'v, V: Visitor<'v>>(visitor: &mut V, field: &'v FieldDef<'v>) {
+ visitor.visit_id(field.hir_id);
+ visitor.visit_ident(field.ident);
+ visitor.visit_ty(&field.ty);
+}
+
+pub fn walk_block<'v, V: Visitor<'v>>(visitor: &mut V, block: &'v Block<'v>) {
+ visitor.visit_id(block.hir_id);
+ walk_list!(visitor, visit_stmt, block.stmts);
+ walk_list!(visitor, visit_expr, &block.expr);
+}
+
+pub fn walk_stmt<'v, V: Visitor<'v>>(visitor: &mut V, statement: &'v Stmt<'v>) {
+ visitor.visit_id(statement.hir_id);
+ match statement.kind {
+ StmtKind::Local(ref local) => visitor.visit_local(local),
+ StmtKind::Item(item) => visitor.visit_nested_item(item),
+ StmtKind::Expr(ref expression) | StmtKind::Semi(ref expression) => {
+ visitor.visit_expr(expression)
+ }
+ }
+}
+
+pub fn walk_array_len<'v, V: Visitor<'v>>(visitor: &mut V, len: &'v ArrayLen) {
+ match len {
+ &ArrayLen::Infer(hir_id, _span) => visitor.visit_id(hir_id),
+ ArrayLen::Body(c) => visitor.visit_anon_const(c),
+ }
+}
+
+pub fn walk_anon_const<'v, V: Visitor<'v>>(visitor: &mut V, constant: &'v AnonConst) {
+ visitor.visit_id(constant.hir_id);
+ visitor.visit_nested_body(constant.body);
+}
+
+pub fn walk_let_expr<'v, V: Visitor<'v>>(visitor: &mut V, let_expr: &'v Let<'v>) {
+ // match the visit order in walk_local
+ visitor.visit_expr(let_expr.init);
+ visitor.visit_id(let_expr.hir_id);
+ visitor.visit_pat(let_expr.pat);
+ walk_list!(visitor, visit_ty, let_expr.ty);
+}
+
+pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>) {
+ visitor.visit_id(expression.hir_id);
+ match expression.kind {
+ ExprKind::Box(ref subexpression) => visitor.visit_expr(subexpression),
+ ExprKind::Array(subexpressions) => {
+ walk_list!(visitor, visit_expr, subexpressions);
+ }
+ ExprKind::ConstBlock(ref anon_const) => visitor.visit_anon_const(anon_const),
+ ExprKind::Repeat(ref element, ref count) => {
+ visitor.visit_expr(element);
+ visitor.visit_array_length(count)
+ }
+ ExprKind::Struct(ref qpath, fields, ref optional_base) => {
+ visitor.visit_qpath(qpath, expression.hir_id, expression.span);
+ for field in fields {
+ visitor.visit_id(field.hir_id);
+ visitor.visit_ident(field.ident);
+ visitor.visit_expr(&field.expr)
+ }
+ walk_list!(visitor, visit_expr, optional_base);
+ }
+ ExprKind::Tup(subexpressions) => {
+ walk_list!(visitor, visit_expr, subexpressions);
+ }
+ ExprKind::Call(ref callee_expression, arguments) => {
+ visitor.visit_expr(callee_expression);
+ walk_list!(visitor, visit_expr, arguments);
+ }
+ ExprKind::MethodCall(ref segment, arguments, _) => {
+ visitor.visit_path_segment(expression.span, segment);
+ walk_list!(visitor, visit_expr, arguments);
+ }
+ ExprKind::Binary(_, ref left_expression, ref right_expression) => {
+ visitor.visit_expr(left_expression);
+ visitor.visit_expr(right_expression)
+ }
+ ExprKind::AddrOf(_, _, ref subexpression) | ExprKind::Unary(_, ref subexpression) => {
+ visitor.visit_expr(subexpression)
+ }
+ ExprKind::Cast(ref subexpression, ref typ) | ExprKind::Type(ref subexpression, ref typ) => {
+ visitor.visit_expr(subexpression);
+ visitor.visit_ty(typ)
+ }
+ ExprKind::DropTemps(ref subexpression) => {
+ visitor.visit_expr(subexpression);
+ }
+ ExprKind::Let(ref let_expr) => visitor.visit_let_expr(let_expr),
+ ExprKind::If(ref cond, ref then, ref else_opt) => {
+ visitor.visit_expr(cond);
+ visitor.visit_expr(then);
+ walk_list!(visitor, visit_expr, else_opt);
+ }
+ ExprKind::Loop(ref block, ref opt_label, _, _) => {
+ walk_list!(visitor, visit_label, opt_label);
+ visitor.visit_block(block);
+ }
+ ExprKind::Match(ref subexpression, arms, _) => {
+ visitor.visit_expr(subexpression);
+ walk_list!(visitor, visit_arm, arms);
+ }
+ ExprKind::Closure(&Closure {
+ binder: _,
+ bound_generic_params,
+ fn_decl,
+ body,
+ capture_clause: _,
+ fn_decl_span: _,
+ movability: _,
+ }) => {
+ walk_list!(visitor, visit_generic_param, bound_generic_params);
+ visitor.visit_fn(FnKind::Closure, fn_decl, body, expression.span, expression.hir_id)
+ }
+ ExprKind::Block(ref block, ref opt_label) => {
+ walk_list!(visitor, visit_label, opt_label);
+ visitor.visit_block(block);
+ }
+ ExprKind::Assign(ref lhs, ref rhs, _) => {
+ visitor.visit_expr(rhs);
+ visitor.visit_expr(lhs)
+ }
+ ExprKind::AssignOp(_, ref left_expression, ref right_expression) => {
+ visitor.visit_expr(right_expression);
+ visitor.visit_expr(left_expression);
+ }
+ ExprKind::Field(ref subexpression, ident) => {
+ visitor.visit_expr(subexpression);
+ visitor.visit_ident(ident);
+ }
+ ExprKind::Index(ref main_expression, ref index_expression) => {
+ visitor.visit_expr(main_expression);
+ visitor.visit_expr(index_expression)
+ }
+ ExprKind::Path(ref qpath) => {
+ visitor.visit_qpath(qpath, expression.hir_id, expression.span);
+ }
+ ExprKind::Break(ref destination, ref opt_expr) => {
+ walk_list!(visitor, visit_label, &destination.label);
+ walk_list!(visitor, visit_expr, opt_expr);
+ }
+ ExprKind::Continue(ref destination) => {
+ walk_list!(visitor, visit_label, &destination.label);
+ }
+ ExprKind::Ret(ref optional_expression) => {
+ walk_list!(visitor, visit_expr, optional_expression);
+ }
+ ExprKind::InlineAsm(ref asm) => {
+ visitor.visit_inline_asm(asm, expression.hir_id);
+ }
+ ExprKind::Yield(ref subexpression, _) => {
+ visitor.visit_expr(subexpression);
+ }
+ ExprKind::Lit(_) | ExprKind::Err => {}
+ }
+}
+
+pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm<'v>) {
+ visitor.visit_id(arm.hir_id);
+ visitor.visit_pat(&arm.pat);
+ if let Some(ref g) = arm.guard {
+ match g {
+ Guard::If(ref e) => visitor.visit_expr(e),
+ Guard::IfLet(ref l) => {
+ visitor.visit_let_expr(l);
+ }
+ }
+ }
+ visitor.visit_expr(&arm.body);
+}
+
+pub fn walk_associated_item_kind<'v, V: Visitor<'v>>(_: &mut V, _: &'v AssocItemKind) {
+ // No visitable content here: this fn exists so you can call it if
+ // the right thing to do, should content be added in the future,
+ // would be to walk it.
+}
+
+pub fn walk_defaultness<'v, V: Visitor<'v>>(_: &mut V, _: &'v Defaultness) {
+ // No visitable content here: this fn exists so you can call it if
+ // the right thing to do, should content be added in the future,
+ // would be to walk it.
+}
diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs
new file mode 100644
index 000000000..c337be12a
--- /dev/null
+++ b/compiler/rustc_hir/src/lang_items.rs
@@ -0,0 +1,339 @@
+//! Defines language items.
+//!
+//! Language items are items that represent concepts intrinsic to the language
+//! itself. Examples are:
+//!
+//! * Traits that specify "kinds"; e.g., `Sync`, `Send`.
+//! * Traits that represent operators; e.g., `Add`, `Sub`, `Index`.
+//! * Functions called by the compiler itself.
+
+use crate::def_id::DefId;
+use crate::{MethodKind, Target};
+
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_macros::HashStable_Generic;
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::Span;
+
+use std::sync::LazyLock;
+
+pub enum LangItemGroup {
+ Op,
+ Fn,
+}
+
+const NUM_GROUPS: usize = 2;
+
+macro_rules! expand_group {
+ () => {
+ None
+ };
+ ($group:expr) => {
+ Some($group)
+ };
+}
+
+// The actual lang items defined come at the end of this file in one handy table.
+// So you probably just want to nip down to the end.
+macro_rules! language_item_table {
+ (
+ $( $(#[$attr:meta])* $variant:ident $($group:expr)?, $module:ident :: $name:ident, $method:ident, $target:expr, $generics:expr; )*
+ ) => {
+
+ enum_from_u32! {
+ /// A representation of all the valid language items in Rust.
+ #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable)]
+ pub enum LangItem {
+ $(
+ #[doc = concat!("The `", stringify!($name), "` lang item.")]
+ ///
+ $(#[$attr])*
+ $variant,
+ )*
+ }
+ }
+
+ impl LangItem {
+ /// Returns the `name` symbol in `#[lang = "$name"]`.
+ /// For example, [`LangItem::PartialEq`]`.name()`
+ /// would result in [`sym::eq`] since it is `#[lang = "eq"]`.
+ pub fn name(self) -> Symbol {
+ match self {
+ $( LangItem::$variant => $module::$name, )*
+ }
+ }
+
+ /// The [group](LangItemGroup) that this lang item belongs to,
+ /// or `None` if it doesn't belong to a group.
+ pub fn group(self) -> Option<LangItemGroup> {
+ use LangItemGroup::*;
+ match self {
+ $( LangItem::$variant => expand_group!($($group)*), )*
+ }
+ }
+
+ pub fn required_generics(&self) -> GenericRequirement {
+ match self {
+ $( LangItem::$variant => $generics, )*
+ }
+ }
+ }
+
+ /// All of the language items, defined or not.
+ /// Defined lang items can come from the current crate or its dependencies.
+ #[derive(HashStable_Generic, Debug)]
+ pub struct LanguageItems {
+ /// Mappings from lang items to their possibly found [`DefId`]s.
+ /// The index corresponds to the order in [`LangItem`].
+ pub items: Vec<Option<DefId>>,
+ /// Lang items that were not found during collection.
+ pub missing: Vec<LangItem>,
+ /// Mapping from [`LangItemGroup`] discriminants to all
+ /// [`DefId`]s of lang items in that group.
+ pub groups: [Vec<DefId>; NUM_GROUPS],
+ }
+
+ impl LanguageItems {
+ /// Construct an empty collection of lang items and no missing ones.
+ pub fn new() -> Self {
+ fn init_none(_: LangItem) -> Option<DefId> { None }
+ const EMPTY: Vec<DefId> = Vec::new();
+
+ Self {
+ items: vec![$(init_none(LangItem::$variant)),*],
+ missing: Vec::new(),
+ groups: [EMPTY; NUM_GROUPS],
+ }
+ }
+
+ /// Returns the mappings to the possibly found `DefId`s for each lang item.
+ pub fn items(&self) -> &[Option<DefId>] {
+ &*self.items
+ }
+
+ /// Requires that a given `LangItem` was bound and returns the corresponding `DefId`.
+ /// If it wasn't bound, e.g. due to a missing `#[lang = "<it.name()>"]`,
+ /// returns an error message as a string.
+ pub fn require(&self, it: LangItem) -> Result<DefId, String> {
+ self.items[it as usize].ok_or_else(|| format!("requires `{}` lang_item", it.name()))
+ }
+
+ /// Returns the [`DefId`]s of all lang items in a group.
+ pub fn group(&self, group: LangItemGroup) -> &[DefId] {
+ self.groups[group as usize].as_ref()
+ }
+
+ $(
+ #[doc = concat!("Returns the [`DefId`] of the `", stringify!($name), "` lang item if it is defined.")]
+ pub fn $method(&self) -> Option<DefId> {
+ self.items[LangItem::$variant as usize]
+ }
+ )*
+ }
+
+ /// A mapping from the name of the lang item to its order and the form it must be of.
+ pub static ITEM_REFS: LazyLock<FxIndexMap<Symbol, (usize, Target)>> = LazyLock::new(|| {
+ let mut item_refs = FxIndexMap::default();
+ $( item_refs.insert($module::$name, (LangItem::$variant as usize, $target)); )*
+ item_refs
+ });
+
+// End of the macro
+ }
+}
+
+impl<CTX> HashStable<CTX> for LangItem {
+ fn hash_stable(&self, _: &mut CTX, hasher: &mut StableHasher) {
+ ::std::hash::Hash::hash(self, hasher);
+ }
+}
+
+/// Extracts the first `lang = "$name"` out of a list of attributes.
+/// The attributes `#[panic_handler]` and `#[alloc_error_handler]`
+/// are also extracted out when found.
+pub fn extract(attrs: &[ast::Attribute]) -> Option<(Symbol, Span)> {
+ attrs.iter().find_map(|attr| {
+ Some(match attr {
+ _ if attr.has_name(sym::lang) => (attr.value_str()?, attr.span),
+ _ if attr.has_name(sym::panic_handler) => (sym::panic_impl, attr.span),
+ _ if attr.has_name(sym::alloc_error_handler) => (sym::oom, attr.span),
+ _ => return None,
+ })
+ })
+}
+
+language_item_table! {
+// Variant name, Name, Method name, Target Generic requirements;
+ Sized, sym::sized, sized_trait, Target::Trait, GenericRequirement::Exact(0);
+ Unsize, sym::unsize, unsize_trait, Target::Trait, GenericRequirement::Minimum(1);
+ /// Trait injected by `#[derive(PartialEq)]`, (i.e. "Partial EQ").
+ StructuralPeq, sym::structural_peq, structural_peq_trait, Target::Trait, GenericRequirement::None;
+ /// Trait injected by `#[derive(Eq)]`, (i.e. "Total EQ"; no, I will not apologize).
+ StructuralTeq, sym::structural_teq, structural_teq_trait, Target::Trait, GenericRequirement::None;
+ Copy, sym::copy, copy_trait, Target::Trait, GenericRequirement::Exact(0);
+ Clone, sym::clone, clone_trait, Target::Trait, GenericRequirement::None;
+ Sync, sym::sync, sync_trait, Target::Trait, GenericRequirement::Exact(0);
+ DiscriminantKind, sym::discriminant_kind, discriminant_kind_trait, Target::Trait, GenericRequirement::None;
+ /// The associated item of the [`DiscriminantKind`] trait.
+ Discriminant, sym::discriminant_type, discriminant_type, Target::AssocTy, GenericRequirement::None;
+
+ PointeeTrait, sym::pointee_trait, pointee_trait, Target::Trait, GenericRequirement::None;
+ Metadata, sym::metadata_type, metadata_type, Target::AssocTy, GenericRequirement::None;
+ DynMetadata, sym::dyn_metadata, dyn_metadata, Target::Struct, GenericRequirement::None;
+
+ Freeze, sym::freeze, freeze_trait, Target::Trait, GenericRequirement::Exact(0);
+
+ Drop, sym::drop, drop_trait, Target::Trait, GenericRequirement::None;
+ Destruct, sym::destruct, destruct_trait, Target::Trait, GenericRequirement::None;
+
+ CoerceUnsized, sym::coerce_unsized, coerce_unsized_trait, Target::Trait, GenericRequirement::Minimum(1);
+ DispatchFromDyn, sym::dispatch_from_dyn, dispatch_from_dyn_trait, Target::Trait, GenericRequirement::Minimum(1);
+
+ // language items relating to transmutability
+ TransmuteTrait, sym::transmute_trait, transmute_trait, Target::Trait, GenericRequirement::Exact(6);
+
+ Add(Op), sym::add, add_trait, Target::Trait, GenericRequirement::Exact(1);
+ Sub(Op), sym::sub, sub_trait, Target::Trait, GenericRequirement::Exact(1);
+ Mul(Op), sym::mul, mul_trait, Target::Trait, GenericRequirement::Exact(1);
+ Div(Op), sym::div, div_trait, Target::Trait, GenericRequirement::Exact(1);
+ Rem(Op), sym::rem, rem_trait, Target::Trait, GenericRequirement::Exact(1);
+ Neg(Op), sym::neg, neg_trait, Target::Trait, GenericRequirement::Exact(0);
+ Not(Op), sym::not, not_trait, Target::Trait, GenericRequirement::Exact(0);
+ BitXor(Op), sym::bitxor, bitxor_trait, Target::Trait, GenericRequirement::Exact(1);
+ BitAnd(Op), sym::bitand, bitand_trait, Target::Trait, GenericRequirement::Exact(1);
+ BitOr(Op), sym::bitor, bitor_trait, Target::Trait, GenericRequirement::Exact(1);
+ Shl(Op), sym::shl, shl_trait, Target::Trait, GenericRequirement::Exact(1);
+ Shr(Op), sym::shr, shr_trait, Target::Trait, GenericRequirement::Exact(1);
+ AddAssign(Op), sym::add_assign, add_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ SubAssign(Op), sym::sub_assign, sub_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ MulAssign(Op), sym::mul_assign, mul_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ DivAssign(Op), sym::div_assign, div_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ RemAssign(Op), sym::rem_assign, rem_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ BitXorAssign(Op), sym::bitxor_assign, bitxor_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ BitAndAssign(Op), sym::bitand_assign, bitand_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ BitOrAssign(Op), sym::bitor_assign, bitor_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ ShlAssign(Op), sym::shl_assign, shl_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ ShrAssign(Op), sym::shr_assign, shr_assign_trait, Target::Trait, GenericRequirement::Exact(1);
+ Index(Op), sym::index, index_trait, Target::Trait, GenericRequirement::Exact(1);
+ IndexMut(Op), sym::index_mut, index_mut_trait, Target::Trait, GenericRequirement::Exact(1);
+
+ UnsafeCell, sym::unsafe_cell, unsafe_cell_type, Target::Struct, GenericRequirement::None;
+ VaList, sym::va_list, va_list, Target::Struct, GenericRequirement::None;
+
+ Deref, sym::deref, deref_trait, Target::Trait, GenericRequirement::Exact(0);
+ DerefMut, sym::deref_mut, deref_mut_trait, Target::Trait, GenericRequirement::Exact(0);
+ DerefTarget, sym::deref_target, deref_target, Target::AssocTy, GenericRequirement::None;
+ Receiver, sym::receiver, receiver_trait, Target::Trait, GenericRequirement::None;
+
+ Fn(Fn), kw::Fn, fn_trait, Target::Trait, GenericRequirement::Exact(1);
+ FnMut(Fn), sym::fn_mut, fn_mut_trait, Target::Trait, GenericRequirement::Exact(1);
+ FnOnce(Fn), sym::fn_once, fn_once_trait, Target::Trait, GenericRequirement::Exact(1);
+
+ FnOnceOutput, sym::fn_once_output, fn_once_output, Target::AssocTy, GenericRequirement::None;
+
+ Future, sym::future_trait, future_trait, Target::Trait, GenericRequirement::Exact(0);
+ GeneratorState, sym::generator_state, gen_state, Target::Enum, GenericRequirement::None;
+ Generator, sym::generator, gen_trait, Target::Trait, GenericRequirement::Minimum(1);
+ GeneratorReturn, sym::generator_return, generator_return, Target::AssocTy, GenericRequirement::None;
+ Unpin, sym::unpin, unpin_trait, Target::Trait, GenericRequirement::None;
+ Pin, sym::pin, pin_type, Target::Struct, GenericRequirement::None;
+
+ PartialEq(Op), sym::eq, eq_trait, Target::Trait, GenericRequirement::Exact(1);
+ PartialOrd(Op), sym::partial_ord, partial_ord_trait, Target::Trait, GenericRequirement::Exact(1);
+
+ // A number of panic-related lang items. The `panic` item corresponds to divide-by-zero and
+ // various panic cases with `match`. The `panic_bounds_check` item is for indexing arrays.
+ //
+ // The `begin_unwind` lang item has a predefined symbol name and is sort of a "weak lang item"
+ // in the sense that a crate is not required to have it defined to use it, but a final product
+ // is required to define it somewhere. Additionally, there are restrictions on crates that use
+ // a weak lang item, but do not have it defined.
+ Panic, sym::panic, panic_fn, Target::Fn, GenericRequirement::Exact(0);
+ PanicFmt, sym::panic_fmt, panic_fmt, Target::Fn, GenericRequirement::None;
+ PanicDisplay, sym::panic_display, panic_display, Target::Fn, GenericRequirement::None;
+ ConstPanicFmt, sym::const_panic_fmt, const_panic_fmt, Target::Fn, GenericRequirement::None;
+ PanicBoundsCheck, sym::panic_bounds_check, panic_bounds_check_fn, Target::Fn, GenericRequirement::Exact(0);
+ PanicInfo, sym::panic_info, panic_info, Target::Struct, GenericRequirement::None;
+ PanicLocation, sym::panic_location, panic_location, Target::Struct, GenericRequirement::None;
+ PanicImpl, sym::panic_impl, panic_impl, Target::Fn, GenericRequirement::None;
+ PanicNoUnwind, sym::panic_no_unwind, panic_no_unwind, Target::Fn, GenericRequirement::Exact(0);
+ /// libstd panic entry point. Necessary for const eval to be able to catch it
+ BeginPanic, sym::begin_panic, begin_panic_fn, Target::Fn, GenericRequirement::None;
+
+ ExchangeMalloc, sym::exchange_malloc, exchange_malloc_fn, Target::Fn, GenericRequirement::None;
+ BoxFree, sym::box_free, box_free_fn, Target::Fn, GenericRequirement::Minimum(1);
+ DropInPlace, sym::drop_in_place, drop_in_place_fn, Target::Fn, GenericRequirement::Minimum(1);
+ Oom, sym::oom, oom, Target::Fn, GenericRequirement::None;
+ AllocLayout, sym::alloc_layout, alloc_layout, Target::Struct, GenericRequirement::None;
+ ConstEvalSelect, sym::const_eval_select, const_eval_select, Target::Fn, GenericRequirement::Exact(4);
+ ConstConstEvalSelect, sym::const_eval_select_ct,const_eval_select_ct, Target::Fn, GenericRequirement::Exact(4);
+
+ Start, sym::start, start_fn, Target::Fn, GenericRequirement::Exact(1);
+
+ EhPersonality, sym::eh_personality, eh_personality, Target::Fn, GenericRequirement::None;
+ EhCatchTypeinfo, sym::eh_catch_typeinfo, eh_catch_typeinfo, Target::Static, GenericRequirement::None;
+
+ OwnedBox, sym::owned_box, owned_box, Target::Struct, GenericRequirement::Minimum(1);
+
+ PhantomData, sym::phantom_data, phantom_data, Target::Struct, GenericRequirement::Exact(1);
+
+ ManuallyDrop, sym::manually_drop, manually_drop, Target::Struct, GenericRequirement::None;
+
+ MaybeUninit, sym::maybe_uninit, maybe_uninit, Target::Union, GenericRequirement::None;
+
+ /// Align offset for stride != 1; must not panic.
+ AlignOffset, sym::align_offset, align_offset_fn, Target::Fn, GenericRequirement::None;
+
+ Termination, sym::termination, termination, Target::Trait, GenericRequirement::None;
+
+ Try, sym::Try, try_trait, Target::Trait, GenericRequirement::None;
+
+ SliceLen, sym::slice_len_fn, slice_len_fn, Target::Method(MethodKind::Inherent), GenericRequirement::None;
+
+ // Language items from AST lowering
+ TryTraitFromResidual, sym::from_residual, from_residual_fn, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::None;
+ TryTraitFromOutput, sym::from_output, from_output_fn, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::None;
+ TryTraitBranch, sym::branch, branch_fn, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::None;
+ TryTraitFromYeet, sym::from_yeet, from_yeet_fn, Target::Fn, GenericRequirement::None;
+
+ PollReady, sym::Ready, poll_ready_variant, Target::Variant, GenericRequirement::None;
+ PollPending, sym::Pending, poll_pending_variant, Target::Variant, GenericRequirement::None;
+
+ FromGenerator, sym::from_generator, from_generator_fn, Target::Fn, GenericRequirement::None;
+ GetContext, sym::get_context, get_context_fn, Target::Fn, GenericRequirement::None;
+
+ FuturePoll, sym::poll, future_poll_fn, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::None;
+
+ FromFrom, sym::from, from_fn, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::None;
+
+ OptionSome, sym::Some, option_some_variant, Target::Variant, GenericRequirement::None;
+ OptionNone, sym::None, option_none_variant, Target::Variant, GenericRequirement::None;
+
+ ResultOk, sym::Ok, result_ok_variant, Target::Variant, GenericRequirement::None;
+ ResultErr, sym::Err, result_err_variant, Target::Variant, GenericRequirement::None;
+
+ ControlFlowContinue, sym::Continue, cf_continue_variant, Target::Variant, GenericRequirement::None;
+ ControlFlowBreak, sym::Break, cf_break_variant, Target::Variant, GenericRequirement::None;
+
+ IntoFutureIntoFuture, sym::into_future, into_future_fn, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::None;
+ IntoIterIntoIter, sym::into_iter, into_iter_fn, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::None;
+ IteratorNext, sym::next, next_fn, Target::Method(MethodKind::Trait { body: false}), GenericRequirement::None;
+
+ PinNewUnchecked, sym::new_unchecked, new_unchecked_fn, Target::Method(MethodKind::Inherent), GenericRequirement::None;
+
+ RangeFrom, sym::RangeFrom, range_from_struct, Target::Struct, GenericRequirement::None;
+ RangeFull, sym::RangeFull, range_full_struct, Target::Struct, GenericRequirement::None;
+ RangeInclusiveStruct, sym::RangeInclusive, range_inclusive_struct, Target::Struct, GenericRequirement::None;
+ RangeInclusiveNew, sym::range_inclusive_new, range_inclusive_new_method, Target::Method(MethodKind::Inherent), GenericRequirement::None;
+ Range, sym::Range, range_struct, Target::Struct, GenericRequirement::None;
+ RangeToInclusive, sym::RangeToInclusive, range_to_inclusive_struct, Target::Struct, GenericRequirement::None;
+ RangeTo, sym::RangeTo, range_to_struct, Target::Struct, GenericRequirement::None;
+}
+
+pub enum GenericRequirement {
+ None,
+ Minimum(usize),
+ Exact(usize),
+}
diff --git a/compiler/rustc_hir/src/lib.rs b/compiler/rustc_hir/src/lib.rs
new file mode 100644
index 000000000..0f9e6fa7b
--- /dev/null
+++ b/compiler/rustc_hir/src/lib.rs
@@ -0,0 +1,47 @@
+//! HIR datatypes. See the [rustc dev guide] for more info.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/hir.html
+
+#![feature(associated_type_defaults)]
+#![feature(closure_track_caller)]
+#![feature(const_btree_new)]
+#![feature(let_else)]
+#![feature(once_cell)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(rustc_attrs)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate rustc_macros;
+
+#[macro_use]
+extern crate rustc_data_structures;
+
+extern crate self as rustc_hir;
+
+mod arena;
+pub mod def;
+pub mod def_path_hash_map;
+pub mod definitions;
+pub mod diagnostic_items;
+pub use rustc_span::def_id;
+mod hir;
+pub mod hir_id;
+pub mod intravisit;
+pub mod lang_items;
+pub mod pat_util;
+mod stable_hash_impls;
+mod target;
+pub mod weak_lang_items;
+
+#[cfg(test)]
+mod tests;
+
+pub use hir::*;
+pub use hir_id::*;
+pub use lang_items::{LangItem, LanguageItems};
+pub use stable_hash_impls::HashStableContext;
+pub use target::{MethodKind, Target};
+
+arena_types!(rustc_arena::declare_arena);
diff --git a/compiler/rustc_hir/src/pat_util.rs b/compiler/rustc_hir/src/pat_util.rs
new file mode 100644
index 000000000..93112199b
--- /dev/null
+++ b/compiler/rustc_hir/src/pat_util.rs
@@ -0,0 +1,157 @@
+use crate::def::{CtorOf, DefKind, Res};
+use crate::def_id::DefId;
+use crate::hir::{self, HirId, PatKind};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+use std::iter::{Enumerate, ExactSizeIterator};
+
+pub struct EnumerateAndAdjust<I> {
+ enumerate: Enumerate<I>,
+ gap_pos: usize,
+ gap_len: usize,
+}
+
+impl<I> Iterator for EnumerateAndAdjust<I>
+where
+ I: Iterator,
+{
+ type Item = (usize, <I as Iterator>::Item);
+
+ fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
+ self.enumerate
+ .next()
+ .map(|(i, elem)| (if i < self.gap_pos { i } else { i + self.gap_len }, elem))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.enumerate.size_hint()
+ }
+}
+
+pub trait EnumerateAndAdjustIterator {
+ fn enumerate_and_adjust(
+ self,
+ expected_len: usize,
+ gap_pos: Option<usize>,
+ ) -> EnumerateAndAdjust<Self>
+ where
+ Self: Sized;
+}
+
+impl<T: ExactSizeIterator> EnumerateAndAdjustIterator for T {
+ fn enumerate_and_adjust(
+ self,
+ expected_len: usize,
+ gap_pos: Option<usize>,
+ ) -> EnumerateAndAdjust<Self>
+ where
+ Self: Sized,
+ {
+ let actual_len = self.len();
+ EnumerateAndAdjust {
+ enumerate: self.enumerate(),
+ gap_pos: gap_pos.unwrap_or(expected_len),
+ gap_len: expected_len - actual_len,
+ }
+ }
+}
+
+impl hir::Pat<'_> {
+ /// Call `f` on every "binding" in a pattern, e.g., on `a` in
+ /// `match foo() { Some(a) => (), None => () }`
+ pub fn each_binding(&self, mut f: impl FnMut(hir::BindingAnnotation, HirId, Span, Ident)) {
+ self.walk_always(|p| {
+ if let PatKind::Binding(binding_mode, _, ident, _) = p.kind {
+ f(binding_mode, p.hir_id, p.span, ident);
+ }
+ });
+ }
+
+ /// Call `f` on every "binding" in a pattern, e.g., on `a` in
+ /// `match foo() { Some(a) => (), None => () }`.
+ ///
+ /// When encountering an or-pattern `p_0 | ... | p_n` only `p_0` will be visited.
+ pub fn each_binding_or_first(
+ &self,
+ f: &mut impl FnMut(hir::BindingAnnotation, HirId, Span, Ident),
+ ) {
+ self.walk(|p| match &p.kind {
+ PatKind::Or(ps) => {
+ ps[0].each_binding_or_first(f);
+ false
+ }
+ PatKind::Binding(bm, _, ident, _) => {
+ f(*bm, p.hir_id, p.span, *ident);
+ true
+ }
+ _ => true,
+ })
+ }
+
+ pub fn simple_ident(&self) -> Option<Ident> {
+ match self.kind {
+ PatKind::Binding(
+ hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
+ _,
+ ident,
+ None,
+ ) => Some(ident),
+ _ => None,
+ }
+ }
+
+ /// Returns variants that are necessary to exist for the pattern to match.
+ pub fn necessary_variants(&self) -> Vec<DefId> {
+ let mut variants = vec![];
+ self.walk(|p| match &p.kind {
+ PatKind::Or(_) => false,
+ PatKind::Path(hir::QPath::Resolved(_, path))
+ | PatKind::TupleStruct(hir::QPath::Resolved(_, path), ..)
+ | PatKind::Struct(hir::QPath::Resolved(_, path), ..) => {
+ if let Res::Def(DefKind::Variant | DefKind::Ctor(CtorOf::Variant, ..), id) =
+ path.res
+ {
+ variants.push(id);
+ }
+ true
+ }
+ _ => true,
+ });
+ // We remove duplicates by inserting into a `FxHashSet` to avoid re-ordering
+ // the bounds
+ let mut duplicates = FxHashSet::default();
+ variants.retain(|def_id| duplicates.insert(*def_id));
+ variants
+ }
+
+ /// Checks if the pattern contains any `ref` or `ref mut` bindings, and if
+ /// yes whether it contains mutable or just immutables ones.
+ //
+ // FIXME(tschottdorf): this is problematic as the HIR is being scraped, but
+ // ref bindings are be implicit after #42640 (default match binding modes). See issue #44848.
+ pub fn contains_explicit_ref_binding(&self) -> Option<hir::Mutability> {
+ let mut result = None;
+ self.each_binding(|annotation, _, _, _| match annotation {
+ hir::BindingAnnotation::Ref => match result {
+ None | Some(hir::Mutability::Not) => result = Some(hir::Mutability::Not),
+ _ => {}
+ },
+ hir::BindingAnnotation::RefMut => result = Some(hir::Mutability::Mut),
+ _ => {}
+ });
+ result
+ }
+
+ /// If the pattern is `Some(<pat>)` from a desugared for loop, returns the inner pattern
+ pub fn for_loop_some(&self) -> Option<&Self> {
+ if self.span.desugaring_kind() == Some(DesugaringKind::ForLoop) {
+ if let hir::PatKind::Struct(_, [pat_field], _) = self.kind {
+ return Some(pat_field.pat);
+ }
+ }
+ None
+ }
+}
diff --git a/compiler/rustc_hir/src/stable_hash_impls.rs b/compiler/rustc_hir/src/stable_hash_impls.rs
new file mode 100644
index 000000000..8ccd59e8e
--- /dev/null
+++ b/compiler/rustc_hir/src/stable_hash_impls.rs
@@ -0,0 +1,143 @@
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+
+use crate::hir::{
+ AttributeMap, BodyId, Crate, Expr, ForeignItemId, ImplItemId, ItemId, OwnerNodes, TraitItemId,
+ Ty,
+};
+use crate::hir_id::{HirId, ItemLocalId};
+use rustc_span::def_id::DefPathHash;
+
+/// Requirements for a `StableHashingContext` to be used in this crate.
+/// This is a hack to allow using the `HashStable_Generic` derive macro
+/// instead of implementing everything in `rustc_middle`.
+pub trait HashStableContext:
+ rustc_ast::HashStableContext + rustc_target::HashStableContext
+{
+ fn hash_body_id(&mut self, _: BodyId, hasher: &mut StableHasher);
+ fn hash_hir_expr(&mut self, _: &Expr<'_>, hasher: &mut StableHasher);
+ fn hash_hir_ty(&mut self, _: &Ty<'_>, hasher: &mut StableHasher);
+}
+
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for HirId {
+ type KeyType = (DefPathHash, ItemLocalId);
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> (DefPathHash, ItemLocalId) {
+ let def_path_hash = self.owner.to_stable_hash_key(hcx);
+ (def_path_hash, self.local_id)
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ItemLocalId {
+ type KeyType = ItemLocalId;
+
+ #[inline]
+ fn to_stable_hash_key(&self, _: &HirCtx) -> ItemLocalId {
+ *self
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for BodyId {
+ type KeyType = (DefPathHash, ItemLocalId);
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> (DefPathHash, ItemLocalId) {
+ let BodyId { hir_id } = *self;
+ hir_id.to_stable_hash_key(hcx)
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ItemId {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
+ self.def_id.to_stable_hash_key(hcx)
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for TraitItemId {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
+ self.def_id.to_stable_hash_key(hcx)
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ImplItemId {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
+ self.def_id.to_stable_hash_key(hcx)
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> ToStableHashKey<HirCtx> for ForeignItemId {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &HirCtx) -> DefPathHash {
+ self.def_id.to_stable_hash_key(hcx)
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for BodyId {
+ fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
+ hcx.hash_body_id(*self, hasher)
+ }
+}
+
+// The following implementations of HashStable for `ItemId`, `TraitItemId`, and
+// `ImplItemId` deserve special attention. Normally we do not hash `NodeId`s within
+// the HIR, since they just signify a HIR nodes own path. But `ItemId` et al
+// are used when another item in the HIR is *referenced* and we certainly
+// want to pick up on a reference changing its target, so we hash the NodeIds
+// in "DefPath Mode".
+
+impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for Expr<'_> {
+ fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
+ hcx.hash_hir_expr(self, hasher)
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for Ty<'_> {
+ fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
+ hcx.hash_hir_ty(self, hasher)
+ }
+}
+
+impl<'tcx, HirCtx: crate::HashStableContext> HashStable<HirCtx> for OwnerNodes<'tcx> {
+ fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
+ // We ignore the `nodes` and `bodies` fields since these refer to information included in
+ // `hash` which is hashed in the collector and used for the crate hash.
+ // `local_id_to_def_id` is also ignored because is dependent on the body, then just hashing
+ // the body satisfies the condition of two nodes being different have different
+ // `hash_stable` results.
+ let OwnerNodes {
+ hash_including_bodies,
+ hash_without_bodies: _,
+ nodes: _,
+ bodies: _,
+ local_id_to_def_id: _,
+ } = *self;
+ hash_including_bodies.hash_stable(hcx, hasher);
+ }
+}
+
+impl<'tcx, HirCtx: crate::HashStableContext> HashStable<HirCtx> for AttributeMap<'tcx> {
+ fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
+ // We ignore the `map` since it refers to information included in `hash` which is hashed in
+ // the collector and used for the crate hash.
+ let AttributeMap { hash, map: _ } = *self;
+ hash.hash_stable(hcx, hasher);
+ }
+}
+
+impl<HirCtx: crate::HashStableContext> HashStable<HirCtx> for Crate<'_> {
+ fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) {
+ let Crate { owners: _, hir_hash } = self;
+ hir_hash.hash_stable(hcx, hasher)
+ }
+}
diff --git a/compiler/rustc_hir/src/target.rs b/compiler/rustc_hir/src/target.rs
new file mode 100644
index 000000000..6236dea10
--- /dev/null
+++ b/compiler/rustc_hir/src/target.rs
@@ -0,0 +1,188 @@
+//! This module implements some validity checks for attributes.
+//! In particular it verifies that `#[inline]` and `#[repr]` attributes are
+//! attached to items that actually support them and if there are
+//! conflicts between multiple such attributes attached to the same
+//! item.
+
+use crate::hir;
+use crate::{Item, ItemKind, TraitItem, TraitItemKind};
+
+use crate::def::DefKind;
+use std::fmt::{self, Display};
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum GenericParamKind {
+ Type,
+ Lifetime,
+ Const,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum MethodKind {
+ Trait { body: bool },
+ Inherent,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum Target {
+ ExternCrate,
+ Use,
+ Static,
+ Const,
+ Fn,
+ Closure,
+ Mod,
+ ForeignMod,
+ GlobalAsm,
+ TyAlias,
+ OpaqueTy,
+ Enum,
+ Variant,
+ Struct,
+ Field,
+ Union,
+ Trait,
+ TraitAlias,
+ Impl,
+ Expression,
+ Statement,
+ Arm,
+ AssocConst,
+ Method(MethodKind),
+ AssocTy,
+ ForeignFn,
+ ForeignStatic,
+ ForeignTy,
+ GenericParam(GenericParamKind),
+ MacroDef,
+ Param,
+}
+
+impl Display for Target {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", Self::name(*self))
+ }
+}
+
+impl Target {
+ pub fn from_item(item: &Item<'_>) -> Target {
+ match item.kind {
+ ItemKind::ExternCrate(..) => Target::ExternCrate,
+ ItemKind::Use(..) => Target::Use,
+ ItemKind::Static(..) => Target::Static,
+ ItemKind::Const(..) => Target::Const,
+ ItemKind::Fn(..) => Target::Fn,
+ ItemKind::Macro(..) => Target::MacroDef,
+ ItemKind::Mod(..) => Target::Mod,
+ ItemKind::ForeignMod { .. } => Target::ForeignMod,
+ ItemKind::GlobalAsm(..) => Target::GlobalAsm,
+ ItemKind::TyAlias(..) => Target::TyAlias,
+ ItemKind::OpaqueTy(..) => Target::OpaqueTy,
+ ItemKind::Enum(..) => Target::Enum,
+ ItemKind::Struct(..) => Target::Struct,
+ ItemKind::Union(..) => Target::Union,
+ ItemKind::Trait(..) => Target::Trait,
+ ItemKind::TraitAlias(..) => Target::TraitAlias,
+ ItemKind::Impl { .. } => Target::Impl,
+ }
+ }
+
+ // FIXME: For now, should only be used with def_kinds from ItemIds
+ pub fn from_def_kind(def_kind: DefKind) -> Target {
+ match def_kind {
+ DefKind::ExternCrate => Target::ExternCrate,
+ DefKind::Use => Target::Use,
+ DefKind::Static(..) => Target::Static,
+ DefKind::Const => Target::Const,
+ DefKind::Fn => Target::Fn,
+ DefKind::Macro(..) => Target::MacroDef,
+ DefKind::Mod => Target::Mod,
+ DefKind::ForeignMod => Target::ForeignMod,
+ DefKind::GlobalAsm => Target::GlobalAsm,
+ DefKind::TyAlias => Target::TyAlias,
+ DefKind::OpaqueTy => Target::OpaqueTy,
+ DefKind::Enum => Target::Enum,
+ DefKind::Struct => Target::Struct,
+ DefKind::Union => Target::Union,
+ DefKind::Trait => Target::Trait,
+ DefKind::TraitAlias => Target::TraitAlias,
+ DefKind::Impl => Target::Impl,
+ _ => panic!("impossible case reached"),
+ }
+ }
+
+ pub fn from_trait_item(trait_item: &TraitItem<'_>) -> Target {
+ match trait_item.kind {
+ TraitItemKind::Const(..) => Target::AssocConst,
+ TraitItemKind::Fn(_, hir::TraitFn::Required(_)) => {
+ Target::Method(MethodKind::Trait { body: false })
+ }
+ TraitItemKind::Fn(_, hir::TraitFn::Provided(_)) => {
+ Target::Method(MethodKind::Trait { body: true })
+ }
+ TraitItemKind::Type(..) => Target::AssocTy,
+ }
+ }
+
+ pub fn from_foreign_item(foreign_item: &hir::ForeignItem<'_>) -> Target {
+ match foreign_item.kind {
+ hir::ForeignItemKind::Fn(..) => Target::ForeignFn,
+ hir::ForeignItemKind::Static(..) => Target::ForeignStatic,
+ hir::ForeignItemKind::Type => Target::ForeignTy,
+ }
+ }
+
+ pub fn from_generic_param(generic_param: &hir::GenericParam<'_>) -> Target {
+ match generic_param.kind {
+ hir::GenericParamKind::Type { .. } => Target::GenericParam(GenericParamKind::Type),
+ hir::GenericParamKind::Lifetime { .. } => {
+ Target::GenericParam(GenericParamKind::Lifetime)
+ }
+ hir::GenericParamKind::Const { .. } => Target::GenericParam(GenericParamKind::Const),
+ }
+ }
+
+ pub fn name(self) -> &'static str {
+ match self {
+ Target::ExternCrate => "extern crate",
+ Target::Use => "use",
+ Target::Static => "static item",
+ Target::Const => "constant item",
+ Target::Fn => "function",
+ Target::Closure => "closure",
+ Target::Mod => "module",
+ Target::ForeignMod => "foreign module",
+ Target::GlobalAsm => "global asm",
+ Target::TyAlias => "type alias",
+ Target::OpaqueTy => "opaque type",
+ Target::Enum => "enum",
+ Target::Variant => "enum variant",
+ Target::Struct => "struct",
+ Target::Field => "struct field",
+ Target::Union => "union",
+ Target::Trait => "trait",
+ Target::TraitAlias => "trait alias",
+ Target::Impl => "implementation block",
+ Target::Expression => "expression",
+ Target::Statement => "statement",
+ Target::Arm => "match arm",
+ Target::AssocConst => "associated const",
+ Target::Method(kind) => match kind {
+ MethodKind::Inherent => "inherent method",
+ MethodKind::Trait { body: false } => "required trait method",
+ MethodKind::Trait { body: true } => "provided trait method",
+ },
+ Target::AssocTy => "associated type",
+ Target::ForeignFn => "foreign function",
+ Target::ForeignStatic => "foreign static item",
+ Target::ForeignTy => "foreign type",
+ Target::GenericParam(kind) => match kind {
+ GenericParamKind::Type => "type parameter",
+ GenericParamKind::Lifetime => "lifetime parameter",
+ GenericParamKind::Const => "const parameter",
+ },
+ Target::MacroDef => "macro def",
+ Target::Param => "function param",
+ }
+ }
+}
diff --git a/compiler/rustc_hir/src/tests.rs b/compiler/rustc_hir/src/tests.rs
new file mode 100644
index 000000000..4636d5152
--- /dev/null
+++ b/compiler/rustc_hir/src/tests.rs
@@ -0,0 +1,36 @@
+use crate::definitions::{DefKey, DefPathData, DisambiguatedDefPathData};
+use rustc_span::def_id::{DefPathHash, StableCrateId};
+
+#[test]
+fn def_path_hash_depends_on_crate_id() {
+ // This test makes sure that *both* halves of a DefPathHash depend on
+ // the crate-id of the defining crate. This is a desirable property
+ // because the crate-id can be more easily changed than the DefPath
+ // of an item, so, in the case of a crate-local DefPathHash collision,
+ // the user can simply "role the dice again" for all DefPathHashes in
+ // the crate by changing the crate disambiguator (e.g. via bumping the
+ // crate's version number).
+
+ let id0 = StableCrateId::new("foo", false, vec!["1".to_string()]);
+ let id1 = StableCrateId::new("foo", false, vec!["2".to_string()]);
+
+ let h0 = mk_test_hash(id0);
+ let h1 = mk_test_hash(id1);
+
+ assert_ne!(h0.stable_crate_id(), h1.stable_crate_id());
+ assert_ne!(h0.local_hash(), h1.local_hash());
+
+ fn mk_test_hash(stable_crate_id: StableCrateId) -> DefPathHash {
+ let parent_hash = DefPathHash::new(stable_crate_id, 0);
+
+ let key = DefKey {
+ parent: None,
+ disambiguated_data: DisambiguatedDefPathData {
+ data: DefPathData::CrateRoot,
+ disambiguator: 0,
+ },
+ };
+
+ key.compute_stable_hash(parent_hash)
+ }
+}
diff --git a/compiler/rustc_hir/src/weak_lang_items.rs b/compiler/rustc_hir/src/weak_lang_items.rs
new file mode 100644
index 000000000..b6a85c047
--- /dev/null
+++ b/compiler/rustc_hir/src/weak_lang_items.rs
@@ -0,0 +1,47 @@
+//! Validity checking for weak lang items
+
+use crate::def_id::DefId;
+use crate::{lang_items, LangItem, LanguageItems};
+
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_span::symbol::{sym, Symbol};
+
+use std::sync::LazyLock;
+
+macro_rules! weak_lang_items {
+ ($($name:ident, $item:ident, $sym:ident;)*) => (
+
+pub static WEAK_ITEMS_REFS: LazyLock<FxIndexMap<Symbol, LangItem>> = LazyLock::new(|| {
+ let mut map = FxIndexMap::default();
+ $(map.insert(sym::$name, LangItem::$item);)*
+ map
+});
+
+pub fn link_name(attrs: &[ast::Attribute]) -> Option<Symbol>
+{
+ lang_items::extract(attrs).and_then(|(name, _)| {
+ $(if name == sym::$name {
+ Some(sym::$sym)
+ } else)* {
+ None
+ }
+ })
+}
+
+impl LanguageItems {
+ pub fn is_weak_lang_item(&self, item_def_id: DefId) -> bool {
+ let did = Some(item_def_id);
+
+ $(self.$name() == did)||*
+ }
+}
+
+) }
+
+weak_lang_items! {
+ panic_impl, PanicImpl, rust_begin_unwind;
+ eh_personality, EhPersonality, rust_eh_personality;
+ eh_catch_typeinfo, EhCatchTypeinfo, rust_eh_catch_typeinfo;
+ oom, Oom, rust_oom;
+}
diff --git a/compiler/rustc_hir_pretty/Cargo.toml b/compiler/rustc_hir_pretty/Cargo.toml
new file mode 100644
index 000000000..46a8e7dee
--- /dev/null
+++ b/compiler/rustc_hir_pretty/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "rustc_hir_pretty"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_target = { path = "../rustc_target" }
+rustc_span = { path = "../rustc_span" }
+rustc_ast = { path = "../rustc_ast" }
diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs
new file mode 100644
index 000000000..e0179bd3e
--- /dev/null
+++ b/compiler/rustc_hir_pretty/src/lib.rs
@@ -0,0 +1,2413 @@
+#![recursion_limit = "256"]
+
+use rustc_ast as ast;
+use rustc_ast::util::parser::{self, AssocOp, Fixity};
+use rustc_ast_pretty::pp::Breaks::{Consistent, Inconsistent};
+use rustc_ast_pretty::pp::{self, Breaks};
+use rustc_ast_pretty::pprust::{Comments, PrintState};
+use rustc_hir as hir;
+use rustc_hir::LifetimeParamKind;
+use rustc_hir::{GenericArg, GenericParam, GenericParamKind, Node, Term};
+use rustc_hir::{GenericBound, PatKind, RangeEnd, TraitBoundModifier};
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::{kw, Ident, IdentPrinter, Symbol};
+use rustc_span::{self, FileName};
+use rustc_target::spec::abi::Abi;
+
+use std::cell::Cell;
+use std::vec;
+
+pub fn id_to_string(map: &dyn rustc_hir::intravisit::Map<'_>, hir_id: hir::HirId) -> String {
+ to_string(&map, |s| s.print_node(map.find(hir_id).unwrap()))
+}
+
+pub enum AnnNode<'a> {
+ Name(&'a Symbol),
+ Block(&'a hir::Block<'a>),
+ Item(&'a hir::Item<'a>),
+ SubItem(hir::HirId),
+ Expr(&'a hir::Expr<'a>),
+ Pat(&'a hir::Pat<'a>),
+ Arm(&'a hir::Arm<'a>),
+}
+
+pub enum Nested {
+ Item(hir::ItemId),
+ TraitItem(hir::TraitItemId),
+ ImplItem(hir::ImplItemId),
+ ForeignItem(hir::ForeignItemId),
+ Body(hir::BodyId),
+ BodyParamPat(hir::BodyId, usize),
+}
+
+pub trait PpAnn {
+ fn nested(&self, _state: &mut State<'_>, _nested: Nested) {}
+ fn pre(&self, _state: &mut State<'_>, _node: AnnNode<'_>) {}
+ fn post(&self, _state: &mut State<'_>, _node: AnnNode<'_>) {}
+}
+
+pub struct NoAnn;
+impl PpAnn for NoAnn {}
+pub const NO_ANN: &dyn PpAnn = &NoAnn;
+
+/// Identical to the `PpAnn` implementation for `hir::Crate`,
+/// except it avoids creating a dependency on the whole crate.
+impl PpAnn for &dyn rustc_hir::intravisit::Map<'_> {
+ fn nested(&self, state: &mut State<'_>, nested: Nested) {
+ match nested {
+ Nested::Item(id) => state.print_item(self.item(id)),
+ Nested::TraitItem(id) => state.print_trait_item(self.trait_item(id)),
+ Nested::ImplItem(id) => state.print_impl_item(self.impl_item(id)),
+ Nested::ForeignItem(id) => state.print_foreign_item(self.foreign_item(id)),
+ Nested::Body(id) => state.print_expr(&self.body(id).value),
+ Nested::BodyParamPat(id, i) => state.print_pat(self.body(id).params[i].pat),
+ }
+ }
+}
+
+pub struct State<'a> {
+ pub s: pp::Printer,
+ comments: Option<Comments<'a>>,
+ attrs: &'a dyn Fn(hir::HirId) -> &'a [ast::Attribute],
+ ann: &'a (dyn PpAnn + 'a),
+}
+
+impl<'a> State<'a> {
+ pub fn print_node(&mut self, node: Node<'_>) {
+ match node {
+ Node::Param(a) => self.print_param(a),
+ Node::Item(a) => self.print_item(a),
+ Node::ForeignItem(a) => self.print_foreign_item(a),
+ Node::TraitItem(a) => self.print_trait_item(a),
+ Node::ImplItem(a) => self.print_impl_item(a),
+ Node::Variant(a) => self.print_variant(a),
+ Node::AnonConst(a) => self.print_anon_const(a),
+ Node::Expr(a) => self.print_expr(a),
+ Node::Stmt(a) => self.print_stmt(a),
+ Node::PathSegment(a) => self.print_path_segment(a),
+ Node::Ty(a) => self.print_type(a),
+ Node::TypeBinding(a) => self.print_type_binding(a),
+ Node::TraitRef(a) => self.print_trait_ref(a),
+ Node::Pat(a) => self.print_pat(a),
+ Node::Arm(a) => self.print_arm(a),
+ Node::Infer(_) => self.word("_"),
+ Node::Block(a) => {
+ // Containing cbox, will be closed by print-block at `}`.
+ self.cbox(INDENT_UNIT);
+ // Head-ibox, will be closed by print-block after `{`.
+ self.ibox(0);
+ self.print_block(a);
+ }
+ Node::Lifetime(a) => self.print_lifetime(a),
+ Node::GenericParam(_) => panic!("cannot print Node::GenericParam"),
+ Node::Field(_) => panic!("cannot print Node::Field"),
+ // These cases do not carry enough information in the
+ // `hir_map` to reconstruct their full structure for pretty
+ // printing.
+ Node::Ctor(..) => panic!("cannot print isolated Ctor"),
+ Node::Local(a) => self.print_local_decl(a),
+ Node::Crate(..) => panic!("cannot print Crate"),
+ }
+ }
+}
+
+impl std::ops::Deref for State<'_> {
+ type Target = pp::Printer;
+ fn deref(&self) -> &Self::Target {
+ &self.s
+ }
+}
+
+impl std::ops::DerefMut for State<'_> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.s
+ }
+}
+
+impl<'a> PrintState<'a> for State<'a> {
+ fn comments(&mut self) -> &mut Option<Comments<'a>> {
+ &mut self.comments
+ }
+
+ fn print_ident(&mut self, ident: Ident) {
+ self.word(IdentPrinter::for_ast_ident(ident, ident.is_raw_guess()).to_string());
+ self.ann.post(self, AnnNode::Name(&ident.name))
+ }
+
+ fn print_generic_args(&mut self, _: &ast::GenericArgs, _colons_before_params: bool) {
+ panic!("AST generic args printed by HIR pretty-printer");
+ }
+}
+
+pub const INDENT_UNIT: isize = 4;
+
+/// Requires you to pass an input filename and reader so that
+/// it can scan the input text for comments to copy forward.
+pub fn print_crate<'a>(
+ sm: &'a SourceMap,
+ krate: &hir::Mod<'_>,
+ filename: FileName,
+ input: String,
+ attrs: &'a dyn Fn(hir::HirId) -> &'a [ast::Attribute],
+ ann: &'a dyn PpAnn,
+) -> String {
+ let mut s = State::new_from_input(sm, filename, input, attrs, ann);
+
+ // When printing the AST, we sometimes need to inject `#[no_std]` here.
+ // Since you can't compile the HIR, it's not necessary.
+
+ s.print_mod(krate, (*attrs)(hir::CRATE_HIR_ID));
+ s.print_remaining_comments();
+ s.s.eof()
+}
+
+impl<'a> State<'a> {
+ pub fn new_from_input(
+ sm: &'a SourceMap,
+ filename: FileName,
+ input: String,
+ attrs: &'a dyn Fn(hir::HirId) -> &'a [ast::Attribute],
+ ann: &'a dyn PpAnn,
+ ) -> State<'a> {
+ State {
+ s: pp::Printer::new(),
+ comments: Some(Comments::new(sm, filename, input)),
+ attrs,
+ ann,
+ }
+ }
+
+ fn attrs(&self, id: hir::HirId) -> &'a [ast::Attribute] {
+ (self.attrs)(id)
+ }
+}
+
+pub fn to_string<F>(ann: &dyn PpAnn, f: F) -> String
+where
+ F: FnOnce(&mut State<'_>),
+{
+ let mut printer = State { s: pp::Printer::new(), comments: None, attrs: &|_| &[], ann };
+ f(&mut printer);
+ printer.s.eof()
+}
+
+pub fn generic_params_to_string(generic_params: &[GenericParam<'_>]) -> String {
+ to_string(NO_ANN, |s| s.print_generic_params(generic_params))
+}
+
+pub fn bounds_to_string<'b>(bounds: impl IntoIterator<Item = &'b hir::GenericBound<'b>>) -> String {
+ to_string(NO_ANN, |s| s.print_bounds("", bounds))
+}
+
+pub fn ty_to_string(ty: &hir::Ty<'_>) -> String {
+ to_string(NO_ANN, |s| s.print_type(ty))
+}
+
+pub fn path_segment_to_string(segment: &hir::PathSegment<'_>) -> String {
+ to_string(NO_ANN, |s| s.print_path_segment(segment))
+}
+
+pub fn path_to_string(segment: &hir::Path<'_>) -> String {
+ to_string(NO_ANN, |s| s.print_path(segment, false))
+}
+
+pub fn qpath_to_string(segment: &hir::QPath<'_>) -> String {
+ to_string(NO_ANN, |s| s.print_qpath(segment, false))
+}
+
+pub fn fn_to_string(
+ decl: &hir::FnDecl<'_>,
+ header: hir::FnHeader,
+ name: Option<Symbol>,
+ generics: &hir::Generics<'_>,
+ arg_names: &[Ident],
+ body_id: Option<hir::BodyId>,
+) -> String {
+ to_string(NO_ANN, |s| s.print_fn(decl, header, name, generics, arg_names, body_id))
+}
+
+pub fn enum_def_to_string(
+ enum_definition: &hir::EnumDef<'_>,
+ generics: &hir::Generics<'_>,
+ name: Symbol,
+ span: rustc_span::Span,
+) -> String {
+ to_string(NO_ANN, |s| s.print_enum_def(enum_definition, generics, name, span))
+}
+
+impl<'a> State<'a> {
+ pub fn bclose_maybe_open(&mut self, span: rustc_span::Span, close_box: bool) {
+ self.maybe_print_comment(span.hi());
+ self.break_offset_if_not_bol(1, -(INDENT_UNIT as isize));
+ self.word("}");
+ if close_box {
+ self.end(); // close the outer-box
+ }
+ }
+
+ pub fn bclose(&mut self, span: rustc_span::Span) {
+ self.bclose_maybe_open(span, true)
+ }
+
+ pub fn commasep_cmnt<T, F, G>(&mut self, b: Breaks, elts: &[T], mut op: F, mut get_span: G)
+ where
+ F: FnMut(&mut State<'_>, &T),
+ G: FnMut(&T) -> rustc_span::Span,
+ {
+ self.rbox(0, b);
+ let len = elts.len();
+ let mut i = 0;
+ for elt in elts {
+ self.maybe_print_comment(get_span(elt).hi());
+ op(self, elt);
+ i += 1;
+ if i < len {
+ self.word(",");
+ self.maybe_print_trailing_comment(get_span(elt), Some(get_span(&elts[i]).hi()));
+ self.space_if_not_bol();
+ }
+ }
+ self.end();
+ }
+
+ pub fn commasep_exprs(&mut self, b: Breaks, exprs: &[hir::Expr<'_>]) {
+ self.commasep_cmnt(b, exprs, |s, e| s.print_expr(e), |e| e.span);
+ }
+
+ pub fn print_mod(&mut self, _mod: &hir::Mod<'_>, attrs: &[ast::Attribute]) {
+ self.print_inner_attributes(attrs);
+ for &item_id in _mod.item_ids {
+ self.ann.nested(self, Nested::Item(item_id));
+ }
+ }
+
+ pub fn print_opt_lifetime(&mut self, lifetime: &hir::Lifetime) {
+ if !lifetime.is_elided() {
+ self.print_lifetime(lifetime);
+ self.nbsp();
+ }
+ }
+
+ pub fn print_type(&mut self, ty: &hir::Ty<'_>) {
+ self.maybe_print_comment(ty.span.lo());
+ self.ibox(0);
+ match ty.kind {
+ hir::TyKind::Slice(ty) => {
+ self.word("[");
+ self.print_type(ty);
+ self.word("]");
+ }
+ hir::TyKind::Ptr(ref mt) => {
+ self.word("*");
+ self.print_mt(mt, true);
+ }
+ hir::TyKind::Rptr(ref lifetime, ref mt) => {
+ self.word("&");
+ self.print_opt_lifetime(lifetime);
+ self.print_mt(mt, false);
+ }
+ hir::TyKind::Never => {
+ self.word("!");
+ }
+ hir::TyKind::Tup(elts) => {
+ self.popen();
+ self.commasep(Inconsistent, elts, |s, ty| s.print_type(ty));
+ if elts.len() == 1 {
+ self.word(",");
+ }
+ self.pclose();
+ }
+ hir::TyKind::BareFn(f) => {
+ self.print_ty_fn(f.abi, f.unsafety, f.decl, None, f.generic_params, f.param_names);
+ }
+ hir::TyKind::OpaqueDef(..) => self.word("/*impl Trait*/"),
+ hir::TyKind::Path(ref qpath) => self.print_qpath(qpath, false),
+ hir::TyKind::TraitObject(bounds, ref lifetime, syntax) => {
+ if syntax == ast::TraitObjectSyntax::Dyn {
+ self.word_space("dyn");
+ }
+ let mut first = true;
+ for bound in bounds {
+ if first {
+ first = false;
+ } else {
+ self.nbsp();
+ self.word_space("+");
+ }
+ self.print_poly_trait_ref(bound);
+ }
+ if !lifetime.is_elided() {
+ self.nbsp();
+ self.word_space("+");
+ self.print_lifetime(lifetime);
+ }
+ }
+ hir::TyKind::Array(ty, ref length) => {
+ self.word("[");
+ self.print_type(ty);
+ self.word("; ");
+ self.print_array_length(length);
+ self.word("]");
+ }
+ hir::TyKind::Typeof(ref e) => {
+ self.word("typeof(");
+ self.print_anon_const(e);
+ self.word(")");
+ }
+ hir::TyKind::Err => {
+ self.popen();
+ self.word("/*ERROR*/");
+ self.pclose();
+ }
+ hir::TyKind::Infer => {
+ self.word("_");
+ }
+ }
+ self.end()
+ }
+
+ pub fn print_foreign_item(&mut self, item: &hir::ForeignItem<'_>) {
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(item.span.lo());
+ self.print_outer_attributes(self.attrs(item.hir_id()));
+ match item.kind {
+ hir::ForeignItemKind::Fn(decl, arg_names, generics) => {
+ self.head("");
+ self.print_fn(
+ decl,
+ hir::FnHeader {
+ unsafety: hir::Unsafety::Normal,
+ constness: hir::Constness::NotConst,
+ abi: Abi::Rust,
+ asyncness: hir::IsAsync::NotAsync,
+ },
+ Some(item.ident.name),
+ generics,
+ arg_names,
+ None,
+ );
+ self.end(); // end head-ibox
+ self.word(";");
+ self.end() // end the outer fn box
+ }
+ hir::ForeignItemKind::Static(t, m) => {
+ self.head("static");
+ if m == hir::Mutability::Mut {
+ self.word_space("mut");
+ }
+ self.print_ident(item.ident);
+ self.word_space(":");
+ self.print_type(t);
+ self.word(";");
+ self.end(); // end the head-ibox
+ self.end() // end the outer cbox
+ }
+ hir::ForeignItemKind::Type => {
+ self.head("type");
+ self.print_ident(item.ident);
+ self.word(";");
+ self.end(); // end the head-ibox
+ self.end() // end the outer cbox
+ }
+ }
+ }
+
+ fn print_associated_const(
+ &mut self,
+ ident: Ident,
+ ty: &hir::Ty<'_>,
+ default: Option<hir::BodyId>,
+ ) {
+ self.head("");
+ self.word_space("const");
+ self.print_ident(ident);
+ self.word_space(":");
+ self.print_type(ty);
+ if let Some(expr) = default {
+ self.space();
+ self.word_space("=");
+ self.ann.nested(self, Nested::Body(expr));
+ }
+ self.word(";")
+ }
+
+ fn print_associated_type(
+ &mut self,
+ ident: Ident,
+ generics: &hir::Generics<'_>,
+ bounds: Option<hir::GenericBounds<'_>>,
+ ty: Option<&hir::Ty<'_>>,
+ ) {
+ self.word_space("type");
+ self.print_ident(ident);
+ self.print_generic_params(generics.params);
+ if let Some(bounds) = bounds {
+ self.print_bounds(":", bounds);
+ }
+ self.print_where_clause(generics);
+ if let Some(ty) = ty {
+ self.space();
+ self.word_space("=");
+ self.print_type(ty);
+ }
+ self.word(";")
+ }
+
+ fn print_item_type(
+ &mut self,
+ item: &hir::Item<'_>,
+ generics: &hir::Generics<'_>,
+ inner: impl Fn(&mut Self),
+ ) {
+ self.head("type");
+ self.print_ident(item.ident);
+ self.print_generic_params(generics.params);
+ self.end(); // end the inner ibox
+
+ self.print_where_clause(generics);
+ self.space();
+ inner(self);
+ self.word(";");
+ self.end(); // end the outer ibox
+ }
+
+ /// Pretty-print an item
+ pub fn print_item(&mut self, item: &hir::Item<'_>) {
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(item.span.lo());
+ let attrs = self.attrs(item.hir_id());
+ self.print_outer_attributes(attrs);
+ self.ann.pre(self, AnnNode::Item(item));
+ match item.kind {
+ hir::ItemKind::ExternCrate(orig_name) => {
+ self.head("extern crate");
+ if let Some(orig_name) = orig_name {
+ self.print_name(orig_name);
+ self.space();
+ self.word("as");
+ self.space();
+ }
+ self.print_ident(item.ident);
+ self.word(";");
+ self.end(); // end inner head-block
+ self.end(); // end outer head-block
+ }
+ hir::ItemKind::Use(path, kind) => {
+ self.head("use");
+ self.print_path(path, false);
+
+ match kind {
+ hir::UseKind::Single => {
+ if path.segments.last().unwrap().ident != item.ident {
+ self.space();
+ self.word_space("as");
+ self.print_ident(item.ident);
+ }
+ self.word(";");
+ }
+ hir::UseKind::Glob => self.word("::*;"),
+ hir::UseKind::ListStem => self.word("::{};"),
+ }
+ self.end(); // end inner head-block
+ self.end(); // end outer head-block
+ }
+ hir::ItemKind::Static(ty, m, expr) => {
+ self.head("static");
+ if m == hir::Mutability::Mut {
+ self.word_space("mut");
+ }
+ self.print_ident(item.ident);
+ self.word_space(":");
+ self.print_type(ty);
+ self.space();
+ self.end(); // end the head-ibox
+
+ self.word_space("=");
+ self.ann.nested(self, Nested::Body(expr));
+ self.word(";");
+ self.end(); // end the outer cbox
+ }
+ hir::ItemKind::Const(ty, expr) => {
+ self.head("const");
+ self.print_ident(item.ident);
+ self.word_space(":");
+ self.print_type(ty);
+ self.space();
+ self.end(); // end the head-ibox
+
+ self.word_space("=");
+ self.ann.nested(self, Nested::Body(expr));
+ self.word(";");
+ self.end(); // end the outer cbox
+ }
+ hir::ItemKind::Fn(ref sig, param_names, body) => {
+ self.head("");
+ self.print_fn(
+ sig.decl,
+ sig.header,
+ Some(item.ident.name),
+ param_names,
+ &[],
+ Some(body),
+ );
+ self.word(" ");
+ self.end(); // need to close a box
+ self.end(); // need to close a box
+ self.ann.nested(self, Nested::Body(body));
+ }
+ hir::ItemKind::Macro(ref macro_def, _) => {
+ self.print_mac_def(macro_def, &item.ident, item.span, |_| {});
+ }
+ hir::ItemKind::Mod(ref _mod) => {
+ self.head("mod");
+ self.print_ident(item.ident);
+ self.nbsp();
+ self.bopen();
+ self.print_mod(_mod, attrs);
+ self.bclose(item.span);
+ }
+ hir::ItemKind::ForeignMod { abi, items } => {
+ self.head("extern");
+ self.word_nbsp(abi.to_string());
+ self.bopen();
+ self.print_inner_attributes(self.attrs(item.hir_id()));
+ for item in items {
+ self.ann.nested(self, Nested::ForeignItem(item.id));
+ }
+ self.bclose(item.span);
+ }
+ hir::ItemKind::GlobalAsm(asm) => {
+ self.head("global_asm!");
+ self.print_inline_asm(asm);
+ self.end()
+ }
+ hir::ItemKind::TyAlias(ty, generics) => {
+ self.print_item_type(item, generics, |state| {
+ state.word_space("=");
+ state.print_type(ty);
+ });
+ }
+ hir::ItemKind::OpaqueTy(ref opaque_ty) => {
+ self.print_item_type(item, opaque_ty.generics, |state| {
+ let mut real_bounds = Vec::with_capacity(opaque_ty.bounds.len());
+ for b in opaque_ty.bounds {
+ if let GenericBound::Trait(ptr, hir::TraitBoundModifier::Maybe) = b {
+ state.space();
+ state.word_space("for ?");
+ state.print_trait_ref(&ptr.trait_ref);
+ } else {
+ real_bounds.push(b);
+ }
+ }
+ state.print_bounds("= impl", real_bounds);
+ });
+ }
+ hir::ItemKind::Enum(ref enum_definition, params) => {
+ self.print_enum_def(enum_definition, params, item.ident.name, item.span);
+ }
+ hir::ItemKind::Struct(ref struct_def, generics) => {
+ self.head("struct");
+ self.print_struct(struct_def, generics, item.ident.name, item.span, true);
+ }
+ hir::ItemKind::Union(ref struct_def, generics) => {
+ self.head("union");
+ self.print_struct(struct_def, generics, item.ident.name, item.span, true);
+ }
+ hir::ItemKind::Impl(&hir::Impl {
+ unsafety,
+ polarity,
+ defaultness,
+ constness,
+ defaultness_span: _,
+ generics,
+ ref of_trait,
+ self_ty,
+ items,
+ }) => {
+ self.head("");
+ self.print_defaultness(defaultness);
+ self.print_unsafety(unsafety);
+ self.word_nbsp("impl");
+
+ if !generics.params.is_empty() {
+ self.print_generic_params(generics.params);
+ self.space();
+ }
+
+ if constness == hir::Constness::Const {
+ self.word_nbsp("const");
+ }
+
+ if let hir::ImplPolarity::Negative(_) = polarity {
+ self.word("!");
+ }
+
+ if let Some(t) = of_trait {
+ self.print_trait_ref(t);
+ self.space();
+ self.word_space("for");
+ }
+
+ self.print_type(self_ty);
+ self.print_where_clause(generics);
+
+ self.space();
+ self.bopen();
+ self.print_inner_attributes(attrs);
+ for impl_item in items {
+ self.ann.nested(self, Nested::ImplItem(impl_item.id));
+ }
+ self.bclose(item.span);
+ }
+ hir::ItemKind::Trait(is_auto, unsafety, generics, bounds, trait_items) => {
+ self.head("");
+ self.print_is_auto(is_auto);
+ self.print_unsafety(unsafety);
+ self.word_nbsp("trait");
+ self.print_ident(item.ident);
+ self.print_generic_params(generics.params);
+ let mut real_bounds = Vec::with_capacity(bounds.len());
+ for b in bounds {
+ if let GenericBound::Trait(ptr, hir::TraitBoundModifier::Maybe) = b {
+ self.space();
+ self.word_space("for ?");
+ self.print_trait_ref(&ptr.trait_ref);
+ } else {
+ real_bounds.push(b);
+ }
+ }
+ self.print_bounds(":", real_bounds);
+ self.print_where_clause(generics);
+ self.word(" ");
+ self.bopen();
+ for trait_item in trait_items {
+ self.ann.nested(self, Nested::TraitItem(trait_item.id));
+ }
+ self.bclose(item.span);
+ }
+ hir::ItemKind::TraitAlias(generics, bounds) => {
+ self.head("trait");
+ self.print_ident(item.ident);
+ self.print_generic_params(generics.params);
+ let mut real_bounds = Vec::with_capacity(bounds.len());
+ // FIXME(durka) this seems to be some quite outdated syntax
+ for b in bounds {
+ if let GenericBound::Trait(ptr, hir::TraitBoundModifier::Maybe) = b {
+ self.space();
+ self.word_space("for ?");
+ self.print_trait_ref(&ptr.trait_ref);
+ } else {
+ real_bounds.push(b);
+ }
+ }
+ self.nbsp();
+ self.print_bounds("=", real_bounds);
+ self.print_where_clause(generics);
+ self.word(";");
+ self.end(); // end inner head-block
+ self.end(); // end outer head-block
+ }
+ }
+ self.ann.post(self, AnnNode::Item(item))
+ }
+
+ pub fn print_trait_ref(&mut self, t: &hir::TraitRef<'_>) {
+ self.print_path(t.path, false);
+ }
+
+ fn print_formal_generic_params(&mut self, generic_params: &[hir::GenericParam<'_>]) {
+ if !generic_params.is_empty() {
+ self.word("for");
+ self.print_generic_params(generic_params);
+ self.nbsp();
+ }
+ }
+
+ fn print_poly_trait_ref(&mut self, t: &hir::PolyTraitRef<'_>) {
+ self.print_formal_generic_params(t.bound_generic_params);
+ self.print_trait_ref(&t.trait_ref);
+ }
+
+ pub fn print_enum_def(
+ &mut self,
+ enum_definition: &hir::EnumDef<'_>,
+ generics: &hir::Generics<'_>,
+ name: Symbol,
+ span: rustc_span::Span,
+ ) {
+ self.head("enum");
+ self.print_name(name);
+ self.print_generic_params(generics.params);
+ self.print_where_clause(generics);
+ self.space();
+ self.print_variants(enum_definition.variants, span);
+ }
+
+ pub fn print_variants(&mut self, variants: &[hir::Variant<'_>], span: rustc_span::Span) {
+ self.bopen();
+ for v in variants {
+ self.space_if_not_bol();
+ self.maybe_print_comment(v.span.lo());
+ self.print_outer_attributes(self.attrs(v.id));
+ self.ibox(INDENT_UNIT);
+ self.print_variant(v);
+ self.word(",");
+ self.end();
+ self.maybe_print_trailing_comment(v.span, None);
+ }
+ self.bclose(span)
+ }
+
+ pub fn print_defaultness(&mut self, defaultness: hir::Defaultness) {
+ match defaultness {
+ hir::Defaultness::Default { .. } => self.word_nbsp("default"),
+ hir::Defaultness::Final => (),
+ }
+ }
+
+ pub fn print_struct(
+ &mut self,
+ struct_def: &hir::VariantData<'_>,
+ generics: &hir::Generics<'_>,
+ name: Symbol,
+ span: rustc_span::Span,
+ print_finalizer: bool,
+ ) {
+ self.print_name(name);
+ self.print_generic_params(generics.params);
+ match struct_def {
+ hir::VariantData::Tuple(..) | hir::VariantData::Unit(..) => {
+ if let hir::VariantData::Tuple(..) = struct_def {
+ self.popen();
+ self.commasep(Inconsistent, struct_def.fields(), |s, field| {
+ s.maybe_print_comment(field.span.lo());
+ s.print_outer_attributes(s.attrs(field.hir_id));
+ s.print_type(field.ty);
+ });
+ self.pclose();
+ }
+ self.print_where_clause(generics);
+ if print_finalizer {
+ self.word(";");
+ }
+ self.end();
+ self.end() // close the outer-box
+ }
+ hir::VariantData::Struct(..) => {
+ self.print_where_clause(generics);
+ self.nbsp();
+ self.bopen();
+ self.hardbreak_if_not_bol();
+
+ for field in struct_def.fields() {
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(field.span.lo());
+ self.print_outer_attributes(self.attrs(field.hir_id));
+ self.print_ident(field.ident);
+ self.word_nbsp(":");
+ self.print_type(field.ty);
+ self.word(",");
+ }
+
+ self.bclose(span)
+ }
+ }
+ }
+
+ pub fn print_variant(&mut self, v: &hir::Variant<'_>) {
+ self.head("");
+ let generics = hir::Generics::empty();
+ self.print_struct(&v.data, generics, v.ident.name, v.span, false);
+ if let Some(ref d) = v.disr_expr {
+ self.space();
+ self.word_space("=");
+ self.print_anon_const(d);
+ }
+ }
+ pub fn print_method_sig(
+ &mut self,
+ ident: Ident,
+ m: &hir::FnSig<'_>,
+ generics: &hir::Generics<'_>,
+ arg_names: &[Ident],
+ body_id: Option<hir::BodyId>,
+ ) {
+ self.print_fn(m.decl, m.header, Some(ident.name), generics, arg_names, body_id);
+ }
+
+ pub fn print_trait_item(&mut self, ti: &hir::TraitItem<'_>) {
+ self.ann.pre(self, AnnNode::SubItem(ti.hir_id()));
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(ti.span.lo());
+ self.print_outer_attributes(self.attrs(ti.hir_id()));
+ match ti.kind {
+ hir::TraitItemKind::Const(ty, default) => {
+ self.print_associated_const(ti.ident, ty, default);
+ }
+ hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Required(arg_names)) => {
+ self.print_method_sig(ti.ident, sig, ti.generics, arg_names, None);
+ self.word(";");
+ }
+ hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
+ self.head("");
+ self.print_method_sig(ti.ident, sig, ti.generics, &[], Some(body));
+ self.nbsp();
+ self.end(); // need to close a box
+ self.end(); // need to close a box
+ self.ann.nested(self, Nested::Body(body));
+ }
+ hir::TraitItemKind::Type(bounds, default) => {
+ self.print_associated_type(ti.ident, ti.generics, Some(bounds), default);
+ }
+ }
+ self.ann.post(self, AnnNode::SubItem(ti.hir_id()))
+ }
+
+ pub fn print_impl_item(&mut self, ii: &hir::ImplItem<'_>) {
+ self.ann.pre(self, AnnNode::SubItem(ii.hir_id()));
+ self.hardbreak_if_not_bol();
+ self.maybe_print_comment(ii.span.lo());
+ self.print_outer_attributes(self.attrs(ii.hir_id()));
+
+ match ii.kind {
+ hir::ImplItemKind::Const(ty, expr) => {
+ self.print_associated_const(ii.ident, ty, Some(expr));
+ }
+ hir::ImplItemKind::Fn(ref sig, body) => {
+ self.head("");
+ self.print_method_sig(ii.ident, sig, ii.generics, &[], Some(body));
+ self.nbsp();
+ self.end(); // need to close a box
+ self.end(); // need to close a box
+ self.ann.nested(self, Nested::Body(body));
+ }
+ hir::ImplItemKind::TyAlias(ty) => {
+ self.print_associated_type(ii.ident, ii.generics, None, Some(ty));
+ }
+ }
+ self.ann.post(self, AnnNode::SubItem(ii.hir_id()))
+ }
+
+ pub fn print_local(
+ &mut self,
+ init: Option<&hir::Expr<'_>>,
+ els: Option<&hir::Block<'_>>,
+ decl: impl Fn(&mut Self),
+ ) {
+ self.space_if_not_bol();
+ self.ibox(INDENT_UNIT);
+ self.word_nbsp("let");
+
+ self.ibox(INDENT_UNIT);
+ decl(self);
+ self.end();
+
+ if let Some(init) = init {
+ self.nbsp();
+ self.word_space("=");
+ self.print_expr(init);
+ }
+
+ if let Some(els) = els {
+ self.nbsp();
+ self.word_space("else");
+ self.print_block(els);
+ }
+
+ self.end()
+ }
+
+ pub fn print_stmt(&mut self, st: &hir::Stmt<'_>) {
+ self.maybe_print_comment(st.span.lo());
+ match st.kind {
+ hir::StmtKind::Local(loc) => {
+ self.print_local(loc.init, loc.els, |this| this.print_local_decl(loc));
+ }
+ hir::StmtKind::Item(item) => self.ann.nested(self, Nested::Item(item)),
+ hir::StmtKind::Expr(expr) => {
+ self.space_if_not_bol();
+ self.print_expr(expr);
+ }
+ hir::StmtKind::Semi(expr) => {
+ self.space_if_not_bol();
+ self.print_expr(expr);
+ self.word(";");
+ }
+ }
+ if stmt_ends_with_semi(&st.kind) {
+ self.word(";");
+ }
+ self.maybe_print_trailing_comment(st.span, None)
+ }
+
+ pub fn print_block(&mut self, blk: &hir::Block<'_>) {
+ self.print_block_with_attrs(blk, &[])
+ }
+
+ pub fn print_block_unclosed(&mut self, blk: &hir::Block<'_>) {
+ self.print_block_maybe_unclosed(blk, &[], false)
+ }
+
+ pub fn print_block_with_attrs(&mut self, blk: &hir::Block<'_>, attrs: &[ast::Attribute]) {
+ self.print_block_maybe_unclosed(blk, attrs, true)
+ }
+
+ pub fn print_block_maybe_unclosed(
+ &mut self,
+ blk: &hir::Block<'_>,
+ attrs: &[ast::Attribute],
+ close_box: bool,
+ ) {
+ match blk.rules {
+ hir::BlockCheckMode::UnsafeBlock(..) => self.word_space("unsafe"),
+ hir::BlockCheckMode::DefaultBlock => (),
+ }
+ self.maybe_print_comment(blk.span.lo());
+ self.ann.pre(self, AnnNode::Block(blk));
+ self.bopen();
+
+ self.print_inner_attributes(attrs);
+
+ for st in blk.stmts {
+ self.print_stmt(st);
+ }
+ if let Some(expr) = blk.expr {
+ self.space_if_not_bol();
+ self.print_expr(expr);
+ self.maybe_print_trailing_comment(expr.span, Some(blk.span.hi()));
+ }
+ self.bclose_maybe_open(blk.span, close_box);
+ self.ann.post(self, AnnNode::Block(blk))
+ }
+
+ fn print_else(&mut self, els: Option<&hir::Expr<'_>>) {
+ if let Some(els_inner) = els {
+ match els_inner.kind {
+ // Another `else if` block.
+ hir::ExprKind::If(i, then, e) => {
+ self.cbox(INDENT_UNIT - 1);
+ self.ibox(0);
+ self.word(" else if ");
+ self.print_expr_as_cond(i);
+ self.space();
+ self.print_expr(then);
+ self.print_else(e);
+ }
+ // Final `else` block.
+ hir::ExprKind::Block(b, _) => {
+ self.cbox(INDENT_UNIT - 1);
+ self.ibox(0);
+ self.word(" else ");
+ self.print_block(b);
+ }
+ // Constraints would be great here!
+ _ => {
+ panic!("print_if saw if with weird alternative");
+ }
+ }
+ }
+ }
+
+ pub fn print_if(
+ &mut self,
+ test: &hir::Expr<'_>,
+ blk: &hir::Expr<'_>,
+ elseopt: Option<&hir::Expr<'_>>,
+ ) {
+ self.head("if");
+ self.print_expr_as_cond(test);
+ self.space();
+ self.print_expr(blk);
+ self.print_else(elseopt)
+ }
+
+ pub fn print_array_length(&mut self, len: &hir::ArrayLen) {
+ match len {
+ hir::ArrayLen::Infer(_, _) => self.word("_"),
+ hir::ArrayLen::Body(ct) => self.print_anon_const(ct),
+ }
+ }
+
+ pub fn print_anon_const(&mut self, constant: &hir::AnonConst) {
+ self.ann.nested(self, Nested::Body(constant.body))
+ }
+
+ fn print_call_post(&mut self, args: &[hir::Expr<'_>]) {
+ self.popen();
+ self.commasep_exprs(Inconsistent, args);
+ self.pclose()
+ }
+
+ fn print_expr_maybe_paren(&mut self, expr: &hir::Expr<'_>, prec: i8) {
+ self.print_expr_cond_paren(expr, expr.precedence().order() < prec)
+ }
+
+ /// Prints an expr using syntax that's acceptable in a condition position, such as the `cond` in
+ /// `if cond { ... }`.
+ pub fn print_expr_as_cond(&mut self, expr: &hir::Expr<'_>) {
+ self.print_expr_cond_paren(expr, Self::cond_needs_par(expr))
+ }
+
+ /// Prints `expr` or `(expr)` when `needs_par` holds.
+ fn print_expr_cond_paren(&mut self, expr: &hir::Expr<'_>, needs_par: bool) {
+ if needs_par {
+ self.popen();
+ }
+ if let hir::ExprKind::DropTemps(actual_expr) = expr.kind {
+ self.print_expr(actual_expr);
+ } else {
+ self.print_expr(expr);
+ }
+ if needs_par {
+ self.pclose();
+ }
+ }
+
+ /// Print a `let pat = expr` expression.
+ fn print_let(&mut self, pat: &hir::Pat<'_>, ty: Option<&hir::Ty<'_>>, init: &hir::Expr<'_>) {
+ self.word_space("let");
+ self.print_pat(pat);
+ if let Some(ty) = ty {
+ self.word_space(":");
+ self.print_type(ty);
+ }
+ self.space();
+ self.word_space("=");
+ let npals = || parser::needs_par_as_let_scrutinee(init.precedence().order());
+ self.print_expr_cond_paren(init, Self::cond_needs_par(init) || npals())
+ }
+
+ // Does `expr` need parentheses when printed in a condition position?
+ //
+ // These cases need parens due to the parse error observed in #26461: `if return {}`
+ // parses as the erroneous construct `if (return {})`, not `if (return) {}`.
+ fn cond_needs_par(expr: &hir::Expr<'_>) -> bool {
+ match expr.kind {
+ hir::ExprKind::Break(..) | hir::ExprKind::Closure { .. } | hir::ExprKind::Ret(..) => {
+ true
+ }
+ _ => contains_exterior_struct_lit(expr),
+ }
+ }
+
+ fn print_expr_vec(&mut self, exprs: &[hir::Expr<'_>]) {
+ self.ibox(INDENT_UNIT);
+ self.word("[");
+ self.commasep_exprs(Inconsistent, exprs);
+ self.word("]");
+ self.end()
+ }
+
+ fn print_expr_anon_const(&mut self, anon_const: &hir::AnonConst) {
+ self.ibox(INDENT_UNIT);
+ self.word_space("const");
+ self.print_anon_const(anon_const);
+ self.end()
+ }
+
+ fn print_expr_repeat(&mut self, element: &hir::Expr<'_>, count: &hir::ArrayLen) {
+ self.ibox(INDENT_UNIT);
+ self.word("[");
+ self.print_expr(element);
+ self.word_space(";");
+ self.print_array_length(count);
+ self.word("]");
+ self.end()
+ }
+
+ fn print_expr_struct(
+ &mut self,
+ qpath: &hir::QPath<'_>,
+ fields: &[hir::ExprField<'_>],
+ wth: Option<&hir::Expr<'_>>,
+ ) {
+ self.print_qpath(qpath, true);
+ self.word("{");
+ self.commasep_cmnt(
+ Consistent,
+ fields,
+ |s, field| {
+ s.ibox(INDENT_UNIT);
+ if !field.is_shorthand {
+ s.print_ident(field.ident);
+ s.word_space(":");
+ }
+ s.print_expr(field.expr);
+ s.end()
+ },
+ |f| f.span,
+ );
+ if let Some(expr) = wth {
+ self.ibox(INDENT_UNIT);
+ if !fields.is_empty() {
+ self.word(",");
+ self.space();
+ }
+ self.word("..");
+ self.print_expr(expr);
+ self.end();
+ } else if !fields.is_empty() {
+ self.word(",");
+ }
+
+ self.word("}");
+ }
+
+ fn print_expr_tup(&mut self, exprs: &[hir::Expr<'_>]) {
+ self.popen();
+ self.commasep_exprs(Inconsistent, exprs);
+ if exprs.len() == 1 {
+ self.word(",");
+ }
+ self.pclose()
+ }
+
+ fn print_expr_call(&mut self, func: &hir::Expr<'_>, args: &[hir::Expr<'_>]) {
+ let prec = match func.kind {
+ hir::ExprKind::Field(..) => parser::PREC_FORCE_PAREN,
+ _ => parser::PREC_POSTFIX,
+ };
+
+ self.print_expr_maybe_paren(func, prec);
+ self.print_call_post(args)
+ }
+
+ fn print_expr_method_call(&mut self, segment: &hir::PathSegment<'_>, args: &[hir::Expr<'_>]) {
+ let base_args = &args[1..];
+ self.print_expr_maybe_paren(&args[0], parser::PREC_POSTFIX);
+ self.word(".");
+ self.print_ident(segment.ident);
+
+ let generic_args = segment.args();
+ if !generic_args.args.is_empty() || !generic_args.bindings.is_empty() {
+ self.print_generic_args(generic_args, segment.infer_args, true);
+ }
+
+ self.print_call_post(base_args)
+ }
+
+ fn print_expr_binary(&mut self, op: hir::BinOp, lhs: &hir::Expr<'_>, rhs: &hir::Expr<'_>) {
+ let assoc_op = bin_op_to_assoc_op(op.node);
+ let prec = assoc_op.precedence() as i8;
+ let fixity = assoc_op.fixity();
+
+ let (left_prec, right_prec) = match fixity {
+ Fixity::Left => (prec, prec + 1),
+ Fixity::Right => (prec + 1, prec),
+ Fixity::None => (prec + 1, prec + 1),
+ };
+
+ let left_prec = match (&lhs.kind, op.node) {
+ // These cases need parens: `x as i32 < y` has the parser thinking that `i32 < y` is
+ // the beginning of a path type. It starts trying to parse `x as (i32 < y ...` instead
+ // of `(x as i32) < ...`. We need to convince it _not_ to do that.
+ (&hir::ExprKind::Cast { .. }, hir::BinOpKind::Lt | hir::BinOpKind::Shl) => {
+ parser::PREC_FORCE_PAREN
+ }
+ (&hir::ExprKind::Let { .. }, _) if !parser::needs_par_as_let_scrutinee(prec) => {
+ parser::PREC_FORCE_PAREN
+ }
+ _ => left_prec,
+ };
+
+ self.print_expr_maybe_paren(lhs, left_prec);
+ self.space();
+ self.word_space(op.node.as_str());
+ self.print_expr_maybe_paren(rhs, right_prec)
+ }
+
+ fn print_expr_unary(&mut self, op: hir::UnOp, expr: &hir::Expr<'_>) {
+ self.word(op.as_str());
+ self.print_expr_maybe_paren(expr, parser::PREC_PREFIX)
+ }
+
+ fn print_expr_addr_of(
+ &mut self,
+ kind: hir::BorrowKind,
+ mutability: hir::Mutability,
+ expr: &hir::Expr<'_>,
+ ) {
+ self.word("&");
+ match kind {
+ hir::BorrowKind::Ref => self.print_mutability(mutability, false),
+ hir::BorrowKind::Raw => {
+ self.word_nbsp("raw");
+ self.print_mutability(mutability, true);
+ }
+ }
+ self.print_expr_maybe_paren(expr, parser::PREC_PREFIX)
+ }
+
+ fn print_literal(&mut self, lit: &hir::Lit) {
+ self.maybe_print_comment(lit.span.lo());
+ self.word(lit.node.to_lit_token().to_string())
+ }
+
+ fn print_inline_asm(&mut self, asm: &hir::InlineAsm<'_>) {
+ enum AsmArg<'a> {
+ Template(String),
+ Operand(&'a hir::InlineAsmOperand<'a>),
+ Options(ast::InlineAsmOptions),
+ }
+
+ let mut args = vec![AsmArg::Template(ast::InlineAsmTemplatePiece::to_string(asm.template))];
+ args.extend(asm.operands.iter().map(|(o, _)| AsmArg::Operand(o)));
+ if !asm.options.is_empty() {
+ args.push(AsmArg::Options(asm.options));
+ }
+
+ self.popen();
+ self.commasep(Consistent, &args, |s, arg| match *arg {
+ AsmArg::Template(ref template) => s.print_string(template, ast::StrStyle::Cooked),
+ AsmArg::Operand(op) => match *op {
+ hir::InlineAsmOperand::In { reg, ref expr } => {
+ s.word("in");
+ s.popen();
+ s.word(format!("{}", reg));
+ s.pclose();
+ s.space();
+ s.print_expr(expr);
+ }
+ hir::InlineAsmOperand::Out { reg, late, ref expr } => {
+ s.word(if late { "lateout" } else { "out" });
+ s.popen();
+ s.word(format!("{}", reg));
+ s.pclose();
+ s.space();
+ match expr {
+ Some(expr) => s.print_expr(expr),
+ None => s.word("_"),
+ }
+ }
+ hir::InlineAsmOperand::InOut { reg, late, ref expr } => {
+ s.word(if late { "inlateout" } else { "inout" });
+ s.popen();
+ s.word(format!("{}", reg));
+ s.pclose();
+ s.space();
+ s.print_expr(expr);
+ }
+ hir::InlineAsmOperand::SplitInOut { reg, late, ref in_expr, ref out_expr } => {
+ s.word(if late { "inlateout" } else { "inout" });
+ s.popen();
+ s.word(format!("{}", reg));
+ s.pclose();
+ s.space();
+ s.print_expr(in_expr);
+ s.space();
+ s.word_space("=>");
+ match out_expr {
+ Some(out_expr) => s.print_expr(out_expr),
+ None => s.word("_"),
+ }
+ }
+ hir::InlineAsmOperand::Const { ref anon_const } => {
+ s.word("const");
+ s.space();
+ s.print_anon_const(anon_const);
+ }
+ hir::InlineAsmOperand::SymFn { ref anon_const } => {
+ s.word("sym_fn");
+ s.space();
+ s.print_anon_const(anon_const);
+ }
+ hir::InlineAsmOperand::SymStatic { ref path, def_id: _ } => {
+ s.word("sym_static");
+ s.space();
+ s.print_qpath(path, true);
+ }
+ },
+ AsmArg::Options(opts) => {
+ s.word("options");
+ s.popen();
+ let mut options = vec![];
+ if opts.contains(ast::InlineAsmOptions::PURE) {
+ options.push("pure");
+ }
+ if opts.contains(ast::InlineAsmOptions::NOMEM) {
+ options.push("nomem");
+ }
+ if opts.contains(ast::InlineAsmOptions::READONLY) {
+ options.push("readonly");
+ }
+ if opts.contains(ast::InlineAsmOptions::PRESERVES_FLAGS) {
+ options.push("preserves_flags");
+ }
+ if opts.contains(ast::InlineAsmOptions::NORETURN) {
+ options.push("noreturn");
+ }
+ if opts.contains(ast::InlineAsmOptions::NOSTACK) {
+ options.push("nostack");
+ }
+ if opts.contains(ast::InlineAsmOptions::ATT_SYNTAX) {
+ options.push("att_syntax");
+ }
+ if opts.contains(ast::InlineAsmOptions::RAW) {
+ options.push("raw");
+ }
+ if opts.contains(ast::InlineAsmOptions::MAY_UNWIND) {
+ options.push("may_unwind");
+ }
+ s.commasep(Inconsistent, &options, |s, &opt| {
+ s.word(opt);
+ });
+ s.pclose();
+ }
+ });
+ self.pclose();
+ }
+
+ pub fn print_expr(&mut self, expr: &hir::Expr<'_>) {
+ self.maybe_print_comment(expr.span.lo());
+ self.print_outer_attributes(self.attrs(expr.hir_id));
+ self.ibox(INDENT_UNIT);
+ self.ann.pre(self, AnnNode::Expr(expr));
+ match expr.kind {
+ hir::ExprKind::Box(expr) => {
+ self.word_space("box");
+ self.print_expr_maybe_paren(expr, parser::PREC_PREFIX);
+ }
+ hir::ExprKind::Array(exprs) => {
+ self.print_expr_vec(exprs);
+ }
+ hir::ExprKind::ConstBlock(ref anon_const) => {
+ self.print_expr_anon_const(anon_const);
+ }
+ hir::ExprKind::Repeat(element, ref count) => {
+ self.print_expr_repeat(element, count);
+ }
+ hir::ExprKind::Struct(qpath, fields, wth) => {
+ self.print_expr_struct(qpath, fields, wth);
+ }
+ hir::ExprKind::Tup(exprs) => {
+ self.print_expr_tup(exprs);
+ }
+ hir::ExprKind::Call(func, args) => {
+ self.print_expr_call(func, args);
+ }
+ hir::ExprKind::MethodCall(segment, args, _) => {
+ self.print_expr_method_call(segment, args);
+ }
+ hir::ExprKind::Binary(op, lhs, rhs) => {
+ self.print_expr_binary(op, lhs, rhs);
+ }
+ hir::ExprKind::Unary(op, expr) => {
+ self.print_expr_unary(op, expr);
+ }
+ hir::ExprKind::AddrOf(k, m, expr) => {
+ self.print_expr_addr_of(k, m, expr);
+ }
+ hir::ExprKind::Lit(ref lit) => {
+ self.print_literal(lit);
+ }
+ hir::ExprKind::Cast(expr, ty) => {
+ let prec = AssocOp::As.precedence() as i8;
+ self.print_expr_maybe_paren(expr, prec);
+ self.space();
+ self.word_space("as");
+ self.print_type(ty);
+ }
+ hir::ExprKind::Type(expr, ty) => {
+ let prec = AssocOp::Colon.precedence() as i8;
+ self.print_expr_maybe_paren(expr, prec);
+ self.word_space(":");
+ self.print_type(ty);
+ }
+ hir::ExprKind::DropTemps(init) => {
+ // Print `{`:
+ self.cbox(INDENT_UNIT);
+ self.ibox(0);
+ self.bopen();
+
+ // Print `let _t = $init;`:
+ let temp = Ident::from_str("_t");
+ self.print_local(Some(init), None, |this| this.print_ident(temp));
+ self.word(";");
+
+ // Print `_t`:
+ self.space_if_not_bol();
+ self.print_ident(temp);
+
+ // Print `}`:
+ self.bclose_maybe_open(expr.span, true);
+ }
+ hir::ExprKind::Let(&hir::Let { pat, ty, init, .. }) => {
+ self.print_let(pat, ty, init);
+ }
+ hir::ExprKind::If(test, blk, elseopt) => {
+ self.print_if(test, blk, elseopt);
+ }
+ hir::ExprKind::Loop(blk, opt_label, _, _) => {
+ if let Some(label) = opt_label {
+ self.print_ident(label.ident);
+ self.word_space(":");
+ }
+ self.head("loop");
+ self.print_block(blk);
+ }
+ hir::ExprKind::Match(expr, arms, _) => {
+ self.cbox(INDENT_UNIT);
+ self.ibox(INDENT_UNIT);
+ self.word_nbsp("match");
+ self.print_expr_as_cond(expr);
+ self.space();
+ self.bopen();
+ for arm in arms {
+ self.print_arm(arm);
+ }
+ self.bclose(expr.span);
+ }
+ hir::ExprKind::Closure(&hir::Closure {
+ binder,
+ capture_clause,
+ bound_generic_params,
+ fn_decl,
+ body,
+ fn_decl_span: _,
+ movability: _,
+ }) => {
+ self.print_closure_binder(binder, bound_generic_params);
+ self.print_capture_clause(capture_clause);
+
+ self.print_closure_params(fn_decl, body);
+ self.space();
+
+ // This is a bare expression.
+ self.ann.nested(self, Nested::Body(body));
+ self.end(); // need to close a box
+
+ // A box will be closed by `print_expr`, but we didn't want an overall
+ // wrapper so we closed the corresponding opening. so create an
+ // empty box to satisfy the close.
+ self.ibox(0);
+ }
+ hir::ExprKind::Block(blk, opt_label) => {
+ if let Some(label) = opt_label {
+ self.print_ident(label.ident);
+ self.word_space(":");
+ }
+ // containing cbox, will be closed by print-block at `}`
+ self.cbox(INDENT_UNIT);
+ // head-box, will be closed by print-block after `{`
+ self.ibox(0);
+ self.print_block(blk);
+ }
+ hir::ExprKind::Assign(lhs, rhs, _) => {
+ let prec = AssocOp::Assign.precedence() as i8;
+ self.print_expr_maybe_paren(lhs, prec + 1);
+ self.space();
+ self.word_space("=");
+ self.print_expr_maybe_paren(rhs, prec);
+ }
+ hir::ExprKind::AssignOp(op, lhs, rhs) => {
+ let prec = AssocOp::Assign.precedence() as i8;
+ self.print_expr_maybe_paren(lhs, prec + 1);
+ self.space();
+ self.word(op.node.as_str());
+ self.word_space("=");
+ self.print_expr_maybe_paren(rhs, prec);
+ }
+ hir::ExprKind::Field(expr, ident) => {
+ self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
+ self.word(".");
+ self.print_ident(ident);
+ }
+ hir::ExprKind::Index(expr, index) => {
+ self.print_expr_maybe_paren(expr, parser::PREC_POSTFIX);
+ self.word("[");
+ self.print_expr(index);
+ self.word("]");
+ }
+ hir::ExprKind::Path(ref qpath) => self.print_qpath(qpath, true),
+ hir::ExprKind::Break(destination, opt_expr) => {
+ self.word("break");
+ if let Some(label) = destination.label {
+ self.space();
+ self.print_ident(label.ident);
+ }
+ if let Some(expr) = opt_expr {
+ self.space();
+ self.print_expr_maybe_paren(expr, parser::PREC_JUMP);
+ }
+ }
+ hir::ExprKind::Continue(destination) => {
+ self.word("continue");
+ if let Some(label) = destination.label {
+ self.space();
+ self.print_ident(label.ident);
+ }
+ }
+ hir::ExprKind::Ret(result) => {
+ self.word("return");
+ if let Some(expr) = result {
+ self.word(" ");
+ self.print_expr_maybe_paren(expr, parser::PREC_JUMP);
+ }
+ }
+ hir::ExprKind::InlineAsm(asm) => {
+ self.word("asm!");
+ self.print_inline_asm(asm);
+ }
+ hir::ExprKind::Yield(expr, _) => {
+ self.word_space("yield");
+ self.print_expr_maybe_paren(expr, parser::PREC_JUMP);
+ }
+ hir::ExprKind::Err => {
+ self.popen();
+ self.word("/*ERROR*/");
+ self.pclose();
+ }
+ }
+ self.ann.post(self, AnnNode::Expr(expr));
+ self.end()
+ }
+
+ pub fn print_local_decl(&mut self, loc: &hir::Local<'_>) {
+ self.print_pat(loc.pat);
+ if let Some(ty) = loc.ty {
+ self.word_space(":");
+ self.print_type(ty);
+ }
+ }
+
+ pub fn print_name(&mut self, name: Symbol) {
+ self.print_ident(Ident::with_dummy_span(name))
+ }
+
+ pub fn print_path(&mut self, path: &hir::Path<'_>, colons_before_params: bool) {
+ self.maybe_print_comment(path.span.lo());
+
+ for (i, segment) in path.segments.iter().enumerate() {
+ if i > 0 {
+ self.word("::")
+ }
+ if segment.ident.name != kw::PathRoot {
+ self.print_ident(segment.ident);
+ self.print_generic_args(segment.args(), segment.infer_args, colons_before_params);
+ }
+ }
+ }
+
+ pub fn print_path_segment(&mut self, segment: &hir::PathSegment<'_>) {
+ if segment.ident.name != kw::PathRoot {
+ self.print_ident(segment.ident);
+ self.print_generic_args(segment.args(), segment.infer_args, false);
+ }
+ }
+
+ pub fn print_qpath(&mut self, qpath: &hir::QPath<'_>, colons_before_params: bool) {
+ match *qpath {
+ hir::QPath::Resolved(None, path) => self.print_path(path, colons_before_params),
+ hir::QPath::Resolved(Some(qself), path) => {
+ self.word("<");
+ self.print_type(qself);
+ self.space();
+ self.word_space("as");
+
+ for (i, segment) in path.segments[..path.segments.len() - 1].iter().enumerate() {
+ if i > 0 {
+ self.word("::")
+ }
+ if segment.ident.name != kw::PathRoot {
+ self.print_ident(segment.ident);
+ self.print_generic_args(
+ segment.args(),
+ segment.infer_args,
+ colons_before_params,
+ );
+ }
+ }
+
+ self.word(">");
+ self.word("::");
+ let item_segment = path.segments.last().unwrap();
+ self.print_ident(item_segment.ident);
+ self.print_generic_args(
+ item_segment.args(),
+ item_segment.infer_args,
+ colons_before_params,
+ )
+ }
+ hir::QPath::TypeRelative(qself, item_segment) => {
+ // If we've got a compound-qualified-path, let's push an additional pair of angle
+ // brackets, so that we pretty-print `<<A::B>::C>` as `<A::B>::C`, instead of just
+ // `A::B::C` (since the latter could be ambiguous to the user)
+ if let hir::TyKind::Path(hir::QPath::Resolved(None, _)) = qself.kind {
+ self.print_type(qself);
+ } else {
+ self.word("<");
+ self.print_type(qself);
+ self.word(">");
+ }
+
+ self.word("::");
+ self.print_ident(item_segment.ident);
+ self.print_generic_args(
+ item_segment.args(),
+ item_segment.infer_args,
+ colons_before_params,
+ )
+ }
+ hir::QPath::LangItem(lang_item, span, _) => {
+ self.word("#[lang = \"");
+ self.print_ident(Ident::new(lang_item.name(), span));
+ self.word("\"]");
+ }
+ }
+ }
+
+ fn print_generic_args(
+ &mut self,
+ generic_args: &hir::GenericArgs<'_>,
+ infer_args: bool,
+ colons_before_params: bool,
+ ) {
+ if generic_args.parenthesized {
+ self.word("(");
+ self.commasep(Inconsistent, generic_args.inputs(), |s, ty| s.print_type(ty));
+ self.word(")");
+
+ self.space_if_not_bol();
+ self.word_space("->");
+ self.print_type(generic_args.bindings[0].ty());
+ } else {
+ let start = if colons_before_params { "::<" } else { "<" };
+ let empty = Cell::new(true);
+ let start_or_comma = |this: &mut Self| {
+ if empty.get() {
+ empty.set(false);
+ this.word(start)
+ } else {
+ this.word_space(",")
+ }
+ };
+
+ let mut nonelided_generic_args: bool = false;
+ let elide_lifetimes = generic_args.args.iter().all(|arg| match arg {
+ GenericArg::Lifetime(lt) => lt.is_elided(),
+ _ => {
+ nonelided_generic_args = true;
+ true
+ }
+ });
+
+ if nonelided_generic_args {
+ start_or_comma(self);
+ self.commasep(
+ Inconsistent,
+ generic_args.args,
+ |s, generic_arg| match generic_arg {
+ GenericArg::Lifetime(lt) if !elide_lifetimes => s.print_lifetime(lt),
+ GenericArg::Lifetime(_) => {}
+ GenericArg::Type(ty) => s.print_type(ty),
+ GenericArg::Const(ct) => s.print_anon_const(&ct.value),
+ GenericArg::Infer(_inf) => s.word("_"),
+ },
+ );
+ }
+
+ // FIXME(eddyb): this would leak into error messages (e.g.,
+ // "non-exhaustive patterns: `Some::<..>(_)` not covered").
+ if infer_args && false {
+ start_or_comma(self);
+ self.word("..");
+ }
+
+ for binding in generic_args.bindings {
+ start_or_comma(self);
+ self.print_type_binding(binding);
+ }
+
+ if !empty.get() {
+ self.word(">")
+ }
+ }
+ }
+
+ pub fn print_type_binding(&mut self, binding: &hir::TypeBinding<'_>) {
+ self.print_ident(binding.ident);
+ self.print_generic_args(binding.gen_args, false, false);
+ self.space();
+ match binding.kind {
+ hir::TypeBindingKind::Equality { ref term } => {
+ self.word_space("=");
+ match term {
+ Term::Ty(ty) => self.print_type(ty),
+ Term::Const(ref c) => self.print_anon_const(c),
+ }
+ }
+ hir::TypeBindingKind::Constraint { bounds } => {
+ self.print_bounds(":", bounds);
+ }
+ }
+ }
+
+ pub fn print_pat(&mut self, pat: &hir::Pat<'_>) {
+ self.maybe_print_comment(pat.span.lo());
+ self.ann.pre(self, AnnNode::Pat(pat));
+ // Pat isn't normalized, but the beauty of it
+ // is that it doesn't matter
+ match pat.kind {
+ PatKind::Wild => self.word("_"),
+ PatKind::Binding(binding_mode, _, ident, sub) => {
+ match binding_mode {
+ hir::BindingAnnotation::Ref => {
+ self.word_nbsp("ref");
+ self.print_mutability(hir::Mutability::Not, false);
+ }
+ hir::BindingAnnotation::RefMut => {
+ self.word_nbsp("ref");
+ self.print_mutability(hir::Mutability::Mut, false);
+ }
+ hir::BindingAnnotation::Unannotated => {}
+ hir::BindingAnnotation::Mutable => {
+ self.word_nbsp("mut");
+ }
+ }
+ self.print_ident(ident);
+ if let Some(p) = sub {
+ self.word("@");
+ self.print_pat(p);
+ }
+ }
+ PatKind::TupleStruct(ref qpath, elts, ddpos) => {
+ self.print_qpath(qpath, true);
+ self.popen();
+ if let Some(ddpos) = ddpos {
+ self.commasep(Inconsistent, &elts[..ddpos], |s, p| s.print_pat(p));
+ if ddpos != 0 {
+ self.word_space(",");
+ }
+ self.word("..");
+ if ddpos != elts.len() {
+ self.word(",");
+ self.commasep(Inconsistent, &elts[ddpos..], |s, p| s.print_pat(p));
+ }
+ } else {
+ self.commasep(Inconsistent, elts, |s, p| s.print_pat(p));
+ }
+ self.pclose();
+ }
+ PatKind::Path(ref qpath) => {
+ self.print_qpath(qpath, true);
+ }
+ PatKind::Struct(ref qpath, fields, etc) => {
+ self.print_qpath(qpath, true);
+ self.nbsp();
+ self.word("{");
+ let empty = fields.is_empty() && !etc;
+ if !empty {
+ self.space();
+ }
+ self.commasep_cmnt(
+ Consistent,
+ fields,
+ |s, f| {
+ s.cbox(INDENT_UNIT);
+ if !f.is_shorthand {
+ s.print_ident(f.ident);
+ s.word_nbsp(":");
+ }
+ s.print_pat(f.pat);
+ s.end()
+ },
+ |f| f.pat.span,
+ );
+ if etc {
+ if !fields.is_empty() {
+ self.word_space(",");
+ }
+ self.word("..");
+ }
+ if !empty {
+ self.space();
+ }
+ self.word("}");
+ }
+ PatKind::Or(pats) => {
+ self.strsep("|", true, Inconsistent, pats, |s, p| s.print_pat(p));
+ }
+ PatKind::Tuple(elts, ddpos) => {
+ self.popen();
+ if let Some(ddpos) = ddpos {
+ self.commasep(Inconsistent, &elts[..ddpos], |s, p| s.print_pat(p));
+ if ddpos != 0 {
+ self.word_space(",");
+ }
+ self.word("..");
+ if ddpos != elts.len() {
+ self.word(",");
+ self.commasep(Inconsistent, &elts[ddpos..], |s, p| s.print_pat(p));
+ }
+ } else {
+ self.commasep(Inconsistent, elts, |s, p| s.print_pat(p));
+ if elts.len() == 1 {
+ self.word(",");
+ }
+ }
+ self.pclose();
+ }
+ PatKind::Box(inner) => {
+ let is_range_inner = matches!(inner.kind, PatKind::Range(..));
+ self.word("box ");
+ if is_range_inner {
+ self.popen();
+ }
+ self.print_pat(inner);
+ if is_range_inner {
+ self.pclose();
+ }
+ }
+ PatKind::Ref(inner, mutbl) => {
+ let is_range_inner = matches!(inner.kind, PatKind::Range(..));
+ self.word("&");
+ self.word(mutbl.prefix_str());
+ if is_range_inner {
+ self.popen();
+ }
+ self.print_pat(inner);
+ if is_range_inner {
+ self.pclose();
+ }
+ }
+ PatKind::Lit(e) => self.print_expr(e),
+ PatKind::Range(begin, end, end_kind) => {
+ if let Some(expr) = begin {
+ self.print_expr(expr);
+ }
+ match end_kind {
+ RangeEnd::Included => self.word("..."),
+ RangeEnd::Excluded => self.word(".."),
+ }
+ if let Some(expr) = end {
+ self.print_expr(expr);
+ }
+ }
+ PatKind::Slice(before, slice, after) => {
+ self.word("[");
+ self.commasep(Inconsistent, before, |s, p| s.print_pat(p));
+ if let Some(p) = slice {
+ if !before.is_empty() {
+ self.word_space(",");
+ }
+ if let PatKind::Wild = p.kind {
+ // Print nothing.
+ } else {
+ self.print_pat(p);
+ }
+ self.word("..");
+ if !after.is_empty() {
+ self.word_space(",");
+ }
+ }
+ self.commasep(Inconsistent, after, |s, p| s.print_pat(p));
+ self.word("]");
+ }
+ }
+ self.ann.post(self, AnnNode::Pat(pat))
+ }
+
+ pub fn print_param(&mut self, arg: &hir::Param<'_>) {
+ self.print_outer_attributes(self.attrs(arg.hir_id));
+ self.print_pat(arg.pat);
+ }
+
+ pub fn print_arm(&mut self, arm: &hir::Arm<'_>) {
+ // I have no idea why this check is necessary, but here it
+ // is :(
+ if self.attrs(arm.hir_id).is_empty() {
+ self.space();
+ }
+ self.cbox(INDENT_UNIT);
+ self.ann.pre(self, AnnNode::Arm(arm));
+ self.ibox(0);
+ self.print_outer_attributes(self.attrs(arm.hir_id));
+ self.print_pat(arm.pat);
+ self.space();
+ if let Some(ref g) = arm.guard {
+ match *g {
+ hir::Guard::If(e) => {
+ self.word_space("if");
+ self.print_expr(e);
+ self.space();
+ }
+ hir::Guard::IfLet(&hir::Let { pat, ty, init, .. }) => {
+ self.word_nbsp("if");
+ self.print_let(pat, ty, init);
+ }
+ }
+ }
+ self.word_space("=>");
+
+ match arm.body.kind {
+ hir::ExprKind::Block(blk, opt_label) => {
+ if let Some(label) = opt_label {
+ self.print_ident(label.ident);
+ self.word_space(":");
+ }
+ // the block will close the pattern's ibox
+ self.print_block_unclosed(blk);
+
+ // If it is a user-provided unsafe block, print a comma after it
+ if let hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::UserProvided) = blk.rules
+ {
+ self.word(",");
+ }
+ }
+ _ => {
+ self.end(); // close the ibox for the pattern
+ self.print_expr(arm.body);
+ self.word(",");
+ }
+ }
+ self.ann.post(self, AnnNode::Arm(arm));
+ self.end() // close enclosing cbox
+ }
+
+ pub fn print_fn(
+ &mut self,
+ decl: &hir::FnDecl<'_>,
+ header: hir::FnHeader,
+ name: Option<Symbol>,
+ generics: &hir::Generics<'_>,
+ arg_names: &[Ident],
+ body_id: Option<hir::BodyId>,
+ ) {
+ self.print_fn_header_info(header);
+
+ if let Some(name) = name {
+ self.nbsp();
+ self.print_name(name);
+ }
+ self.print_generic_params(generics.params);
+
+ self.popen();
+ let mut i = 0;
+ // Make sure we aren't supplied *both* `arg_names` and `body_id`.
+ assert!(arg_names.is_empty() || body_id.is_none());
+ self.commasep(Inconsistent, decl.inputs, |s, ty| {
+ s.ibox(INDENT_UNIT);
+ if let Some(arg_name) = arg_names.get(i) {
+ s.word(arg_name.to_string());
+ s.word(":");
+ s.space();
+ } else if let Some(body_id) = body_id {
+ s.ann.nested(s, Nested::BodyParamPat(body_id, i));
+ s.word(":");
+ s.space();
+ }
+ i += 1;
+ s.print_type(ty);
+ s.end()
+ });
+ if decl.c_variadic {
+ self.word(", ...");
+ }
+ self.pclose();
+
+ self.print_fn_output(decl);
+ self.print_where_clause(generics)
+ }
+
+ fn print_closure_params(&mut self, decl: &hir::FnDecl<'_>, body_id: hir::BodyId) {
+ self.word("|");
+ let mut i = 0;
+ self.commasep(Inconsistent, decl.inputs, |s, ty| {
+ s.ibox(INDENT_UNIT);
+
+ s.ann.nested(s, Nested::BodyParamPat(body_id, i));
+ i += 1;
+
+ if let hir::TyKind::Infer = ty.kind {
+ // Print nothing.
+ } else {
+ s.word(":");
+ s.space();
+ s.print_type(ty);
+ }
+ s.end();
+ });
+ self.word("|");
+
+ if let hir::FnRetTy::DefaultReturn(..) = decl.output {
+ return;
+ }
+
+ self.space_if_not_bol();
+ self.word_space("->");
+ match decl.output {
+ hir::FnRetTy::Return(ty) => {
+ self.print_type(ty);
+ self.maybe_print_comment(ty.span.lo());
+ }
+ hir::FnRetTy::DefaultReturn(..) => unreachable!(),
+ }
+ }
+
+ pub fn print_capture_clause(&mut self, capture_clause: hir::CaptureBy) {
+ match capture_clause {
+ hir::CaptureBy::Value => self.word_space("move"),
+ hir::CaptureBy::Ref => {}
+ }
+ }
+
+ pub fn print_closure_binder(
+ &mut self,
+ binder: hir::ClosureBinder,
+ generic_params: &[GenericParam<'_>],
+ ) {
+ let generic_params = generic_params
+ .iter()
+ .filter(|p| {
+ matches!(
+ p,
+ GenericParam {
+ kind: GenericParamKind::Lifetime { kind: LifetimeParamKind::Explicit },
+ ..
+ }
+ )
+ })
+ .collect::<Vec<_>>();
+
+ match binder {
+ hir::ClosureBinder::Default => {}
+ // we need to distinguish `|...| {}` from `for<> |...| {}` as `for<>` adds additional restrictions
+ hir::ClosureBinder::For { .. } if generic_params.is_empty() => self.word("for<>"),
+ hir::ClosureBinder::For { .. } => {
+ self.word("for");
+ self.word("<");
+
+ self.commasep(Inconsistent, &generic_params, |s, param| {
+ s.print_generic_param(param)
+ });
+
+ self.word(">");
+ self.nbsp();
+ }
+ }
+ }
+
+ pub fn print_bounds<'b>(
+ &mut self,
+ prefix: &'static str,
+ bounds: impl IntoIterator<Item = &'b hir::GenericBound<'b>>,
+ ) {
+ let mut first = true;
+ for bound in bounds {
+ if first {
+ self.word(prefix);
+ }
+ if !(first && prefix.is_empty()) {
+ self.nbsp();
+ }
+ if first {
+ first = false;
+ } else {
+ self.word_space("+");
+ }
+
+ match bound {
+ GenericBound::Trait(tref, modifier) => {
+ if modifier == &TraitBoundModifier::Maybe {
+ self.word("?");
+ }
+ self.print_poly_trait_ref(tref);
+ }
+ GenericBound::LangItemTrait(lang_item, span, ..) => {
+ self.word("#[lang = \"");
+ self.print_ident(Ident::new(lang_item.name(), *span));
+ self.word("\"]");
+ }
+ GenericBound::Outlives(lt) => {
+ self.print_lifetime(lt);
+ }
+ }
+ }
+ }
+
+ pub fn print_generic_params(&mut self, generic_params: &[GenericParam<'_>]) {
+ if !generic_params.is_empty() {
+ self.word("<");
+
+ self.commasep(Inconsistent, generic_params, |s, param| s.print_generic_param(param));
+
+ self.word(">");
+ }
+ }
+
+ pub fn print_generic_param(&mut self, param: &GenericParam<'_>) {
+ if let GenericParamKind::Const { .. } = param.kind {
+ self.word_space("const");
+ }
+
+ self.print_ident(param.name.ident());
+
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => {}
+ GenericParamKind::Type { default, .. } => {
+ if let Some(default) = default {
+ self.space();
+ self.word_space("=");
+ self.print_type(default);
+ }
+ }
+ GenericParamKind::Const { ty, ref default } => {
+ self.word_space(":");
+ self.print_type(ty);
+ if let Some(default) = default {
+ self.space();
+ self.word_space("=");
+ self.print_anon_const(default);
+ }
+ }
+ }
+ }
+
+ pub fn print_lifetime(&mut self, lifetime: &hir::Lifetime) {
+ self.print_ident(lifetime.name.ident())
+ }
+
+ pub fn print_where_clause(&mut self, generics: &hir::Generics<'_>) {
+ if generics.predicates.is_empty() {
+ return;
+ }
+
+ self.space();
+ self.word_space("where");
+
+ for (i, predicate) in generics.predicates.iter().enumerate() {
+ if i != 0 {
+ self.word_space(",");
+ }
+
+ match *predicate {
+ hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ bound_generic_params,
+ bounded_ty,
+ bounds,
+ ..
+ }) => {
+ self.print_formal_generic_params(bound_generic_params);
+ self.print_type(bounded_ty);
+ self.print_bounds(":", bounds);
+ }
+ hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+ ref lifetime,
+ bounds,
+ ..
+ }) => {
+ self.print_lifetime(lifetime);
+ self.word(":");
+
+ for (i, bound) in bounds.iter().enumerate() {
+ match bound {
+ GenericBound::Outlives(lt) => {
+ self.print_lifetime(lt);
+ }
+ _ => panic!(),
+ }
+
+ if i != 0 {
+ self.word(":");
+ }
+ }
+ }
+ hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
+ lhs_ty, rhs_ty, ..
+ }) => {
+ self.print_type(lhs_ty);
+ self.space();
+ self.word_space("=");
+ self.print_type(rhs_ty);
+ }
+ }
+ }
+ }
+
+ pub fn print_mutability(&mut self, mutbl: hir::Mutability, print_const: bool) {
+ match mutbl {
+ hir::Mutability::Mut => self.word_nbsp("mut"),
+ hir::Mutability::Not => {
+ if print_const {
+ self.word_nbsp("const")
+ }
+ }
+ }
+ }
+
+ pub fn print_mt(&mut self, mt: &hir::MutTy<'_>, print_const: bool) {
+ self.print_mutability(mt.mutbl, print_const);
+ self.print_type(mt.ty);
+ }
+
+ pub fn print_fn_output(&mut self, decl: &hir::FnDecl<'_>) {
+ if let hir::FnRetTy::DefaultReturn(..) = decl.output {
+ return;
+ }
+
+ self.space_if_not_bol();
+ self.ibox(INDENT_UNIT);
+ self.word_space("->");
+ match decl.output {
+ hir::FnRetTy::DefaultReturn(..) => unreachable!(),
+ hir::FnRetTy::Return(ty) => self.print_type(ty),
+ }
+ self.end();
+
+ if let hir::FnRetTy::Return(output) = decl.output {
+ self.maybe_print_comment(output.span.lo());
+ }
+ }
+
+ pub fn print_ty_fn(
+ &mut self,
+ abi: Abi,
+ unsafety: hir::Unsafety,
+ decl: &hir::FnDecl<'_>,
+ name: Option<Symbol>,
+ generic_params: &[hir::GenericParam<'_>],
+ arg_names: &[Ident],
+ ) {
+ self.ibox(INDENT_UNIT);
+ self.print_formal_generic_params(generic_params);
+ let generics = hir::Generics::empty();
+ self.print_fn(
+ decl,
+ hir::FnHeader {
+ unsafety,
+ abi,
+ constness: hir::Constness::NotConst,
+ asyncness: hir::IsAsync::NotAsync,
+ },
+ name,
+ generics,
+ arg_names,
+ None,
+ );
+ self.end();
+ }
+
+ pub fn print_fn_header_info(&mut self, header: hir::FnHeader) {
+ match header.constness {
+ hir::Constness::NotConst => {}
+ hir::Constness::Const => self.word_nbsp("const"),
+ }
+
+ match header.asyncness {
+ hir::IsAsync::NotAsync => {}
+ hir::IsAsync::Async => self.word_nbsp("async"),
+ }
+
+ self.print_unsafety(header.unsafety);
+
+ if header.abi != Abi::Rust {
+ self.word_nbsp("extern");
+ self.word_nbsp(header.abi.to_string());
+ }
+
+ self.word("fn")
+ }
+
+ pub fn print_unsafety(&mut self, s: hir::Unsafety) {
+ match s {
+ hir::Unsafety::Normal => {}
+ hir::Unsafety::Unsafe => self.word_nbsp("unsafe"),
+ }
+ }
+
+ pub fn print_is_auto(&mut self, s: hir::IsAuto) {
+ match s {
+ hir::IsAuto::Yes => self.word_nbsp("auto"),
+ hir::IsAuto::No => {}
+ }
+ }
+}
+
+/// Does this expression require a semicolon to be treated
+/// as a statement? The negation of this: 'can this expression
+/// be used as a statement without a semicolon' -- is used
+/// as an early-bail-out in the parser so that, for instance,
+/// if true {...} else {...}
+/// |x| 5
+/// isn't parsed as (if true {...} else {...} | x) | 5
+//
+// Duplicated from `parse::classify`, but adapted for the HIR.
+fn expr_requires_semi_to_be_stmt(e: &hir::Expr<'_>) -> bool {
+ !matches!(
+ e.kind,
+ hir::ExprKind::If(..)
+ | hir::ExprKind::Match(..)
+ | hir::ExprKind::Block(..)
+ | hir::ExprKind::Loop(..)
+ )
+}
+
+/// This statement requires a semicolon after it.
+/// note that in one case (stmt_semi), we've already
+/// seen the semicolon, and thus don't need another.
+fn stmt_ends_with_semi(stmt: &hir::StmtKind<'_>) -> bool {
+ match *stmt {
+ hir::StmtKind::Local(_) => true,
+ hir::StmtKind::Item(_) => false,
+ hir::StmtKind::Expr(e) => expr_requires_semi_to_be_stmt(e),
+ hir::StmtKind::Semi(..) => false,
+ }
+}
+
+fn bin_op_to_assoc_op(op: hir::BinOpKind) -> AssocOp {
+ use crate::hir::BinOpKind::*;
+ match op {
+ Add => AssocOp::Add,
+ Sub => AssocOp::Subtract,
+ Mul => AssocOp::Multiply,
+ Div => AssocOp::Divide,
+ Rem => AssocOp::Modulus,
+
+ And => AssocOp::LAnd,
+ Or => AssocOp::LOr,
+
+ BitXor => AssocOp::BitXor,
+ BitAnd => AssocOp::BitAnd,
+ BitOr => AssocOp::BitOr,
+ Shl => AssocOp::ShiftLeft,
+ Shr => AssocOp::ShiftRight,
+
+ Eq => AssocOp::Equal,
+ Lt => AssocOp::Less,
+ Le => AssocOp::LessEqual,
+ Ne => AssocOp::NotEqual,
+ Ge => AssocOp::GreaterEqual,
+ Gt => AssocOp::Greater,
+ }
+}
+
+/// Expressions that syntactically contain an "exterior" struct literal, i.e., not surrounded by any
+/// parens or other delimiters, e.g., `X { y: 1 }`, `X { y: 1 }.method()`, `foo == X { y: 1 }` and
+/// `X { y: 1 } == foo` all do, but `(X { y: 1 }) == foo` does not.
+fn contains_exterior_struct_lit(value: &hir::Expr<'_>) -> bool {
+ match value.kind {
+ hir::ExprKind::Struct(..) => true,
+
+ hir::ExprKind::Assign(lhs, rhs, _)
+ | hir::ExprKind::AssignOp(_, lhs, rhs)
+ | hir::ExprKind::Binary(_, lhs, rhs) => {
+ // `X { y: 1 } + X { y: 2 }`
+ contains_exterior_struct_lit(lhs) || contains_exterior_struct_lit(rhs)
+ }
+ hir::ExprKind::Unary(_, x)
+ | hir::ExprKind::Cast(x, _)
+ | hir::ExprKind::Type(x, _)
+ | hir::ExprKind::Field(x, _)
+ | hir::ExprKind::Index(x, _) => {
+ // `&X { y: 1 }, X { y: 1 }.y`
+ contains_exterior_struct_lit(x)
+ }
+
+ hir::ExprKind::MethodCall(.., exprs, _) => {
+ // `X { y: 1 }.bar(...)`
+ contains_exterior_struct_lit(&exprs[0])
+ }
+
+ _ => false,
+ }
+}
diff --git a/compiler/rustc_incremental/Cargo.toml b/compiler/rustc_incremental/Cargo.toml
new file mode 100644
index 000000000..d3c425a07
--- /dev/null
+++ b/compiler/rustc_incremental/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "rustc_incremental"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_graphviz = { path = "../rustc_graphviz" }
+tracing = "0.1"
+rand = "0.8.4"
+rustc_middle = { path = "../rustc_middle" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_span = { path = "../rustc_span" }
+rustc_fs_util = { path = "../rustc_fs_util" }
+rustc_session = { path = "../rustc_session" }
+rustc_errors = { path = "../rustc_errors" }
diff --git a/compiler/rustc_incremental/src/assert_dep_graph.rs b/compiler/rustc_incremental/src/assert_dep_graph.rs
new file mode 100644
index 000000000..69e482ce8
--- /dev/null
+++ b/compiler/rustc_incremental/src/assert_dep_graph.rs
@@ -0,0 +1,446 @@
+//! This pass is only used for the UNIT TESTS and DEBUGGING NEEDS
+//! around dependency graph construction. It serves two purposes; it
+//! will dump graphs in graphviz form to disk, and it searches for
+//! `#[rustc_if_this_changed]` and `#[rustc_then_this_would_need]`
+//! annotations. These annotations can be used to test whether paths
+//! exist in the graph. These checks run after codegen, so they view the
+//! the final state of the dependency graph. Note that there are
+//! similar assertions found in `persist::dirty_clean` which check the
+//! **initial** state of the dependency graph, just after it has been
+//! loaded from disk.
+//!
+//! In this code, we report errors on each `rustc_if_this_changed`
+//! annotation. If a path exists in all cases, then we would report
+//! "all path(s) exist". Otherwise, we report: "no path to `foo`" for
+//! each case where no path exists. `ui` tests can then be
+//! used to check when paths exist or do not.
+//!
+//! The full form of the `rustc_if_this_changed` annotation is
+//! `#[rustc_if_this_changed("foo")]`, which will report a
+//! source node of `foo(def_id)`. The `"foo"` is optional and
+//! defaults to `"Hir"` if omitted.
+//!
+//! Example:
+//!
+//! ```ignore (needs flags)
+//! #[rustc_if_this_changed(Hir)]
+//! fn foo() { }
+//!
+//! #[rustc_then_this_would_need(codegen)] //~ ERROR no path from `foo`
+//! fn bar() { }
+//!
+//! #[rustc_then_this_would_need(codegen)] //~ ERROR OK
+//! fn baz() { foo(); }
+//! ```
+
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::graph::implementation::{Direction, NodeIndex, INCOMING, OUTGOING};
+use rustc_graphviz as dot;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_middle::dep_graph::{
+ DepGraphQuery, DepKind, DepNode, DepNodeExt, DepNodeFilter, EdgeFilter,
+};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+
+use std::env;
+use std::fs::{self, File};
+use std::io::{BufWriter, Write};
+
+#[allow(missing_docs)]
+pub fn assert_dep_graph(tcx: TyCtxt<'_>) {
+ tcx.dep_graph.with_ignore(|| {
+ if tcx.sess.opts.unstable_opts.dump_dep_graph {
+ tcx.dep_graph.with_query(dump_graph);
+ }
+
+ if !tcx.sess.opts.unstable_opts.query_dep_graph {
+ return;
+ }
+
+ // if the `rustc_attrs` feature is not enabled, then the
+ // attributes we are interested in cannot be present anyway, so
+ // skip the walk.
+ if !tcx.features().rustc_attrs {
+ return;
+ }
+
+ // Find annotations supplied by user (if any).
+ let (if_this_changed, then_this_would_need) = {
+ let mut visitor =
+ IfThisChanged { tcx, if_this_changed: vec![], then_this_would_need: vec![] };
+ visitor.process_attrs(hir::CRATE_HIR_ID);
+ tcx.hir().visit_all_item_likes_in_crate(&mut visitor);
+ (visitor.if_this_changed, visitor.then_this_would_need)
+ };
+
+ if !if_this_changed.is_empty() || !then_this_would_need.is_empty() {
+ assert!(
+ tcx.sess.opts.unstable_opts.query_dep_graph,
+ "cannot use the `#[{}]` or `#[{}]` annotations \
+ without supplying `-Z query-dep-graph`",
+ sym::rustc_if_this_changed,
+ sym::rustc_then_this_would_need
+ );
+ }
+
+ // Check paths.
+ check_paths(tcx, &if_this_changed, &then_this_would_need);
+ })
+}
+
+type Sources = Vec<(Span, DefId, DepNode)>;
+type Targets = Vec<(Span, Symbol, hir::HirId, DepNode)>;
+
+struct IfThisChanged<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ if_this_changed: Sources,
+ then_this_would_need: Targets,
+}
+
+impl<'tcx> IfThisChanged<'tcx> {
+ fn argument(&self, attr: &ast::Attribute) -> Option<Symbol> {
+ let mut value = None;
+ for list_item in attr.meta_item_list().unwrap_or_default() {
+ match list_item.ident() {
+ Some(ident) if list_item.is_word() && value.is_none() => value = Some(ident.name),
+ _ =>
+ // FIXME better-encapsulate meta_item (don't directly access `node`)
+ {
+ span_bug!(list_item.span(), "unexpected meta-item {:?}", list_item)
+ }
+ }
+ }
+ value
+ }
+
+ fn process_attrs(&mut self, hir_id: hir::HirId) {
+ let def_id = self.tcx.hir().local_def_id(hir_id);
+ let def_path_hash = self.tcx.def_path_hash(def_id.to_def_id());
+ let attrs = self.tcx.hir().attrs(hir_id);
+ for attr in attrs {
+ if attr.has_name(sym::rustc_if_this_changed) {
+ let dep_node_interned = self.argument(attr);
+ let dep_node = match dep_node_interned {
+ None => {
+ DepNode::from_def_path_hash(self.tcx, def_path_hash, DepKind::hir_owner)
+ }
+ Some(n) => {
+ match DepNode::from_label_string(self.tcx, n.as_str(), def_path_hash) {
+ Ok(n) => n,
+ Err(()) => {
+ self.tcx.sess.span_fatal(
+ attr.span,
+ &format!("unrecognized DepNode variant {:?}", n),
+ );
+ }
+ }
+ }
+ };
+ self.if_this_changed.push((attr.span, def_id.to_def_id(), dep_node));
+ } else if attr.has_name(sym::rustc_then_this_would_need) {
+ let dep_node_interned = self.argument(attr);
+ let dep_node = match dep_node_interned {
+ Some(n) => {
+ match DepNode::from_label_string(self.tcx, n.as_str(), def_path_hash) {
+ Ok(n) => n,
+ Err(()) => {
+ self.tcx.sess.span_fatal(
+ attr.span,
+ &format!("unrecognized DepNode variant {:?}", n),
+ );
+ }
+ }
+ }
+ None => {
+ self.tcx.sess.span_fatal(attr.span, "missing DepNode variant");
+ }
+ };
+ self.then_this_would_need.push((
+ attr.span,
+ dep_node_interned.unwrap(),
+ hir_id,
+ dep_node,
+ ));
+ }
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for IfThisChanged<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ self.process_attrs(item.hir_id());
+ intravisit::walk_item(self, item);
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ self.process_attrs(trait_item.hir_id());
+ intravisit::walk_trait_item(self, trait_item);
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ self.process_attrs(impl_item.hir_id());
+ intravisit::walk_impl_item(self, impl_item);
+ }
+
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ self.process_attrs(s.hir_id);
+ intravisit::walk_field_def(self, s);
+ }
+}
+
+fn check_paths<'tcx>(tcx: TyCtxt<'tcx>, if_this_changed: &Sources, then_this_would_need: &Targets) {
+ // Return early here so as not to construct the query, which is not cheap.
+ if if_this_changed.is_empty() {
+ for &(target_span, _, _, _) in then_this_would_need {
+ tcx.sess.span_err(target_span, "no `#[rustc_if_this_changed]` annotation detected");
+ }
+ return;
+ }
+ tcx.dep_graph.with_query(|query| {
+ for &(_, source_def_id, ref source_dep_node) in if_this_changed {
+ let dependents = query.transitive_predecessors(source_dep_node);
+ for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need {
+ if !dependents.contains(&target_dep_node) {
+ tcx.sess.span_err(
+ target_span,
+ &format!(
+ "no path from `{}` to `{}`",
+ tcx.def_path_str(source_def_id),
+ target_pass
+ ),
+ );
+ } else {
+ tcx.sess.span_err(target_span, "OK");
+ }
+ }
+ }
+ });
+}
+
+fn dump_graph(query: &DepGraphQuery) {
+ let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| "dep_graph".to_string());
+
+ let nodes = match env::var("RUST_DEP_GRAPH_FILTER") {
+ Ok(string) => {
+ // Expect one of: "-> target", "source -> target", or "source ->".
+ let edge_filter =
+ EdgeFilter::new(&string).unwrap_or_else(|e| bug!("invalid filter: {}", e));
+ let sources = node_set(&query, &edge_filter.source);
+ let targets = node_set(&query, &edge_filter.target);
+ filter_nodes(&query, &sources, &targets)
+ }
+ Err(_) => query.nodes().into_iter().map(|n| n.kind).collect(),
+ };
+ let edges = filter_edges(&query, &nodes);
+
+ {
+ // dump a .txt file with just the edges:
+ let txt_path = format!("{}.txt", path);
+ let mut file = BufWriter::new(File::create(&txt_path).unwrap());
+ for &(ref source, ref target) in &edges {
+ write!(file, "{:?} -> {:?}\n", source, target).unwrap();
+ }
+ }
+
+ {
+ // dump a .dot file in graphviz format:
+ let dot_path = format!("{}.dot", path);
+ let mut v = Vec::new();
+ dot::render(&GraphvizDepGraph(nodes, edges), &mut v).unwrap();
+ fs::write(dot_path, v).unwrap();
+ }
+}
+
+#[allow(missing_docs)]
+pub struct GraphvizDepGraph(FxHashSet<DepKind>, Vec<(DepKind, DepKind)>);
+
+impl<'a> dot::GraphWalk<'a> for GraphvizDepGraph {
+ type Node = DepKind;
+ type Edge = (DepKind, DepKind);
+ fn nodes(&self) -> dot::Nodes<'_, DepKind> {
+ let nodes: Vec<_> = self.0.iter().cloned().collect();
+ nodes.into()
+ }
+ fn edges(&self) -> dot::Edges<'_, (DepKind, DepKind)> {
+ self.1[..].into()
+ }
+ fn source(&self, edge: &(DepKind, DepKind)) -> DepKind {
+ edge.0
+ }
+ fn target(&self, edge: &(DepKind, DepKind)) -> DepKind {
+ edge.1
+ }
+}
+
+impl<'a> dot::Labeller<'a> for GraphvizDepGraph {
+ type Node = DepKind;
+ type Edge = (DepKind, DepKind);
+ fn graph_id(&self) -> dot::Id<'_> {
+ dot::Id::new("DependencyGraph").unwrap()
+ }
+ fn node_id(&self, n: &DepKind) -> dot::Id<'_> {
+ let s: String = format!("{:?}", n)
+ .chars()
+ .map(|c| if c == '_' || c.is_alphanumeric() { c } else { '_' })
+ .collect();
+ debug!("n={:?} s={:?}", n, s);
+ dot::Id::new(s).unwrap()
+ }
+ fn node_label(&self, n: &DepKind) -> dot::LabelText<'_> {
+ dot::LabelText::label(format!("{:?}", n))
+ }
+}
+
+// Given an optional filter like `"x,y,z"`, returns either `None` (no
+// filter) or the set of nodes whose labels contain all of those
+// substrings.
+fn node_set<'q>(
+ query: &'q DepGraphQuery,
+ filter: &DepNodeFilter,
+) -> Option<FxHashSet<&'q DepNode>> {
+ debug!("node_set(filter={:?})", filter);
+
+ if filter.accepts_all() {
+ return None;
+ }
+
+ Some(query.nodes().into_iter().filter(|n| filter.test(n)).collect())
+}
+
+fn filter_nodes<'q>(
+ query: &'q DepGraphQuery,
+ sources: &Option<FxHashSet<&'q DepNode>>,
+ targets: &Option<FxHashSet<&'q DepNode>>,
+) -> FxHashSet<DepKind> {
+ if let Some(sources) = sources {
+ if let Some(targets) = targets {
+ walk_between(query, sources, targets)
+ } else {
+ walk_nodes(query, sources, OUTGOING)
+ }
+ } else if let Some(targets) = targets {
+ walk_nodes(query, targets, INCOMING)
+ } else {
+ query.nodes().into_iter().map(|n| n.kind).collect()
+ }
+}
+
+fn walk_nodes<'q>(
+ query: &'q DepGraphQuery,
+ starts: &FxHashSet<&'q DepNode>,
+ direction: Direction,
+) -> FxHashSet<DepKind> {
+ let mut set = FxHashSet::default();
+ for &start in starts {
+ debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING);
+ if set.insert(start.kind) {
+ let mut stack = vec![query.indices[start]];
+ while let Some(index) = stack.pop() {
+ for (_, edge) in query.graph.adjacent_edges(index, direction) {
+ let neighbor_index = edge.source_or_target(direction);
+ let neighbor = query.graph.node_data(neighbor_index);
+ if set.insert(neighbor.kind) {
+ stack.push(neighbor_index);
+ }
+ }
+ }
+ }
+ }
+ set
+}
+
+fn walk_between<'q>(
+ query: &'q DepGraphQuery,
+ sources: &FxHashSet<&'q DepNode>,
+ targets: &FxHashSet<&'q DepNode>,
+) -> FxHashSet<DepKind> {
+ // This is a bit tricky. We want to include a node only if it is:
+ // (a) reachable from a source and (b) will reach a target. And we
+ // have to be careful about cycles etc. Luckily efficiency is not
+ // a big concern!
+
+ #[derive(Copy, Clone, PartialEq)]
+ enum State {
+ Undecided,
+ Deciding,
+ Included,
+ Excluded,
+ }
+
+ let mut node_states = vec![State::Undecided; query.graph.len_nodes()];
+
+ for &target in targets {
+ node_states[query.indices[target].0] = State::Included;
+ }
+
+ for source in sources.iter().map(|&n| query.indices[n]) {
+ recurse(query, &mut node_states, source);
+ }
+
+ return query
+ .nodes()
+ .into_iter()
+ .filter(|&n| {
+ let index = query.indices[n];
+ node_states[index.0] == State::Included
+ })
+ .map(|n| n.kind)
+ .collect();
+
+ fn recurse(query: &DepGraphQuery, node_states: &mut [State], node: NodeIndex) -> bool {
+ match node_states[node.0] {
+ // known to reach a target
+ State::Included => return true,
+
+ // known not to reach a target
+ State::Excluded => return false,
+
+ // backedge, not yet known, say false
+ State::Deciding => return false,
+
+ State::Undecided => {}
+ }
+
+ node_states[node.0] = State::Deciding;
+
+ for neighbor_index in query.graph.successor_nodes(node) {
+ if recurse(query, node_states, neighbor_index) {
+ node_states[node.0] = State::Included;
+ }
+ }
+
+ // if we didn't find a path to target, then set to excluded
+ if node_states[node.0] == State::Deciding {
+ node_states[node.0] = State::Excluded;
+ false
+ } else {
+ assert!(node_states[node.0] == State::Included);
+ true
+ }
+ }
+}
+
+fn filter_edges<'q>(
+ query: &'q DepGraphQuery,
+ nodes: &FxHashSet<DepKind>,
+) -> Vec<(DepKind, DepKind)> {
+ let uniq: FxHashSet<_> = query
+ .edges()
+ .into_iter()
+ .map(|(s, t)| (s.kind, t.kind))
+ .filter(|(source, target)| nodes.contains(source) && nodes.contains(target))
+ .collect();
+ uniq.into_iter().collect()
+}
diff --git a/compiler/rustc_incremental/src/assert_module_sources.rs b/compiler/rustc_incremental/src/assert_module_sources.rs
new file mode 100644
index 000000000..89d419bc8
--- /dev/null
+++ b/compiler/rustc_incremental/src/assert_module_sources.rs
@@ -0,0 +1,178 @@
+//! This pass is only used for UNIT TESTS related to incremental
+//! compilation. It tests whether a particular `.o` file will be re-used
+//! from a previous compilation or whether it must be regenerated.
+//!
+//! The user adds annotations to the crate of the following form:
+//!
+//! ```
+//! # #![feature(rustc_attrs)]
+//! #![rustc_partition_reused(module="spike", cfg="rpass2")]
+//! #![rustc_partition_codegened(module="spike-x", cfg="rpass2")]
+//! ```
+//!
+//! The first indicates (in the cfg `rpass2`) that `spike.o` will be
+//! reused, the second that `spike-x.o` will be recreated. If these
+//! annotations are inaccurate, errors are reported.
+//!
+//! The reason that we use `cfg=...` and not `#[cfg_attr]` is so that
+//! the HIR doesn't change as a result of the annotations, which might
+//! perturb the reuse results.
+//!
+//! `#![rustc_expected_cgu_reuse(module="spike", cfg="rpass2", kind="post-lto")]
+//! allows for doing a more fine-grained check to see if pre- or post-lto data
+//! was re-used.
+
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::mir::mono::CodegenUnitNameBuilder;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::cgu_reuse_tracker::*;
+use rustc_span::symbol::{sym, Symbol};
+
+#[allow(missing_docs)]
+pub fn assert_module_sources(tcx: TyCtxt<'_>) {
+ tcx.dep_graph.with_ignore(|| {
+ if tcx.sess.opts.incremental.is_none() {
+ return;
+ }
+
+ let available_cgus =
+ tcx.collect_and_partition_mono_items(()).1.iter().map(|cgu| cgu.name()).collect();
+
+ let ams = AssertModuleSource { tcx, available_cgus };
+
+ for attr in tcx.hir().attrs(rustc_hir::CRATE_HIR_ID) {
+ ams.check_attr(attr);
+ }
+ })
+}
+
+struct AssertModuleSource<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ available_cgus: FxHashSet<Symbol>,
+}
+
+impl<'tcx> AssertModuleSource<'tcx> {
+ fn check_attr(&self, attr: &ast::Attribute) {
+ let (expected_reuse, comp_kind) = if attr.has_name(sym::rustc_partition_reused) {
+ (CguReuse::PreLto, ComparisonKind::AtLeast)
+ } else if attr.has_name(sym::rustc_partition_codegened) {
+ (CguReuse::No, ComparisonKind::Exact)
+ } else if attr.has_name(sym::rustc_expected_cgu_reuse) {
+ match self.field(attr, sym::kind) {
+ sym::no => (CguReuse::No, ComparisonKind::Exact),
+ sym::pre_dash_lto => (CguReuse::PreLto, ComparisonKind::Exact),
+ sym::post_dash_lto => (CguReuse::PostLto, ComparisonKind::Exact),
+ sym::any => (CguReuse::PreLto, ComparisonKind::AtLeast),
+ other => {
+ self.tcx.sess.span_fatal(
+ attr.span,
+ &format!("unknown cgu-reuse-kind `{}` specified", other),
+ );
+ }
+ }
+ } else {
+ return;
+ };
+
+ if !self.tcx.sess.opts.unstable_opts.query_dep_graph {
+ self.tcx.sess.span_fatal(
+ attr.span,
+ "found CGU-reuse attribute but `-Zquery-dep-graph` was not specified",
+ );
+ }
+
+ if !self.check_config(attr) {
+ debug!("check_attr: config does not match, ignoring attr");
+ return;
+ }
+
+ let user_path = self.field(attr, sym::module).to_string();
+ let crate_name = self.tcx.crate_name(LOCAL_CRATE).to_string();
+
+ if !user_path.starts_with(&crate_name) {
+ let msg = format!(
+ "Found malformed codegen unit name `{}`. \
+ Codegen units names must always start with the name of the \
+ crate (`{}` in this case).",
+ user_path, crate_name
+ );
+ self.tcx.sess.span_fatal(attr.span, &msg);
+ }
+
+ // Split of the "special suffix" if there is one.
+ let (user_path, cgu_special_suffix) = if let Some(index) = user_path.rfind('.') {
+ (&user_path[..index], Some(&user_path[index + 1..]))
+ } else {
+ (&user_path[..], None)
+ };
+
+ let mut iter = user_path.split('-');
+
+ // Remove the crate name
+ assert_eq!(iter.next().unwrap(), crate_name);
+
+ let cgu_path_components = iter.collect::<Vec<_>>();
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(self.tcx);
+ let cgu_name =
+ cgu_name_builder.build_cgu_name(LOCAL_CRATE, cgu_path_components, cgu_special_suffix);
+
+ debug!("mapping '{}' to cgu name '{}'", self.field(attr, sym::module), cgu_name);
+
+ if !self.available_cgus.contains(&cgu_name) {
+ let mut cgu_names: Vec<&str> =
+ self.available_cgus.iter().map(|cgu| cgu.as_str()).collect();
+ cgu_names.sort();
+ self.tcx.sess.span_err(
+ attr.span,
+ &format!(
+ "no module named `{}` (mangled: {}). Available modules: {}",
+ user_path,
+ cgu_name,
+ cgu_names.join(", ")
+ ),
+ );
+ }
+
+ self.tcx.sess.cgu_reuse_tracker.set_expectation(
+ cgu_name,
+ &user_path,
+ attr.span,
+ expected_reuse,
+ comp_kind,
+ );
+ }
+
+ fn field(&self, attr: &ast::Attribute, name: Symbol) -> Symbol {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
+ if item.has_name(name) {
+ if let Some(value) = item.value_str() {
+ return value;
+ } else {
+ self.tcx.sess.span_fatal(
+ item.span(),
+ &format!("associated value expected for `{}`", name),
+ );
+ }
+ }
+ }
+
+ self.tcx.sess.span_fatal(attr.span, &format!("no field `{}`", name));
+ }
+
+ /// Scan for a `cfg="foo"` attribute and check whether we have a
+ /// cfg flag called `foo`.
+ fn check_config(&self, attr: &ast::Attribute) -> bool {
+ let config = &self.tcx.sess.parse_sess.config;
+ let value = self.field(attr, sym::cfg);
+ debug!("check_config(config={:?}, value={:?})", config, value);
+ if config.iter().any(|&(name, _)| name == value) {
+ debug!("check_config: matched");
+ return true;
+ }
+ debug!("check_config: no match found");
+ false
+ }
+}
diff --git a/compiler/rustc_incremental/src/lib.rs b/compiler/rustc_incremental/src/lib.rs
new file mode 100644
index 000000000..1e88e8091
--- /dev/null
+++ b/compiler/rustc_incremental/src/lib.rs
@@ -0,0 +1,30 @@
+//! Support for serializing the dep-graph and reloading it.
+
+#![deny(missing_docs)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(let_else)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate rustc_middle;
+#[macro_use]
+extern crate tracing;
+
+mod assert_dep_graph;
+pub mod assert_module_sources;
+mod persist;
+
+use assert_dep_graph::assert_dep_graph;
+pub use persist::copy_cgu_workproduct_to_incr_comp_cache_dir;
+pub use persist::delete_workproduct_files;
+pub use persist::finalize_session_directory;
+pub use persist::garbage_collect_session_directories;
+pub use persist::in_incr_comp_dir;
+pub use persist::in_incr_comp_dir_sess;
+pub use persist::load_query_result_cache;
+pub use persist::prepare_session_directory;
+pub use persist::save_dep_graph;
+pub use persist::save_work_product_index;
+pub use persist::LoadResult;
+pub use persist::{build_dep_graph, load_dep_graph, DepGraphFuture};
diff --git a/compiler/rustc_incremental/src/persist/README.md b/compiler/rustc_incremental/src/persist/README.md
new file mode 100644
index 000000000..b01fe219e
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/README.md
@@ -0,0 +1,3 @@
+For info on how the incremental compilation works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html
diff --git a/compiler/rustc_incremental/src/persist/data.rs b/compiler/rustc_incremental/src/persist/data.rs
new file mode 100644
index 000000000..81e541097
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/data.rs
@@ -0,0 +1,13 @@
+//! The data that we will serialize and deserialize.
+
+use rustc_macros::{Decodable, Encodable};
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+
+#[derive(Debug, Encodable, Decodable)]
+pub struct SerializedWorkProduct {
+ /// node that produced the work-product
+ pub id: WorkProductId,
+
+ /// work-product data itself
+ pub work_product: WorkProduct,
+}
diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs
new file mode 100644
index 000000000..710c4a01b
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs
@@ -0,0 +1,480 @@
+//! Debugging code to test fingerprints computed for query results. For each node marked with
+//! `#[rustc_clean]` we will compare the fingerprint from the current and from the previous
+//! compilation session as appropriate:
+//!
+//! - `#[rustc_clean(cfg="rev2", except="typeck")]` if we are
+//! in `#[cfg(rev2)]`, then the fingerprints associated with
+//! `DepNode::typeck(X)` must be DIFFERENT (`X` is the `DefId` of the
+//! current node).
+//! - `#[rustc_clean(cfg="rev2")]` same as above, except that the
+//! fingerprints must be the SAME (along with all other fingerprints).
+//!
+//! - `#[rustc_clean(cfg="rev2", loaded_from_disk='typeck")]` asserts that
+//! the query result for `DepNode::typeck(X)` was actually
+//! loaded from disk (not just marked green). This can be useful
+//! to ensure that a test is actually exercising the deserialization
+//! logic for a particular query result. This can be combined with
+//! `except`
+//!
+//! Errors are reported if we are in the suitable configuration but
+//! the required condition is not met.
+
+use rustc_ast::{self as ast, Attribute, NestedMetaItem};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit;
+use rustc_hir::Node as HirNode;
+use rustc_hir::{ImplItemKind, ItemKind as HirItem, TraitItemKind};
+use rustc_middle::dep_graph::{label_strs, DepNode, DepNodeExt};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use std::iter::FromIterator;
+use std::vec::Vec;
+
+const LOADED_FROM_DISK: Symbol = sym::loaded_from_disk;
+const EXCEPT: Symbol = sym::except;
+const CFG: Symbol = sym::cfg;
+
+// Base and Extra labels to build up the labels
+
+/// For typedef, constants, and statics
+const BASE_CONST: &[&str] = &[label_strs::type_of];
+
+/// DepNodes for functions + methods
+const BASE_FN: &[&str] = &[
+ // Callers will depend on the signature of these items, so we better test
+ label_strs::fn_sig,
+ label_strs::generics_of,
+ label_strs::predicates_of,
+ label_strs::type_of,
+ // And a big part of compilation (that we eventually want to cache) is type inference
+ // information:
+ label_strs::typeck,
+];
+
+/// DepNodes for Hir, which is pretty much everything
+const BASE_HIR: &[&str] = &[
+ // hir_owner and hir_owner_nodes should be computed for all nodes
+ label_strs::hir_owner,
+ label_strs::hir_owner_nodes,
+];
+
+/// `impl` implementation of struct/trait
+const BASE_IMPL: &[&str] =
+ &[label_strs::associated_item_def_ids, label_strs::generics_of, label_strs::impl_trait_ref];
+
+/// DepNodes for mir_built/Optimized, which is relevant in "executable"
+/// code, i.e., functions+methods
+const BASE_MIR: &[&str] = &[label_strs::optimized_mir, label_strs::promoted_mir];
+
+/// Struct, Enum and Union DepNodes
+///
+/// Note that changing the type of a field does not change the type of the struct or enum, but
+/// adding/removing fields or changing a fields name or visibility does.
+const BASE_STRUCT: &[&str] =
+ &[label_strs::generics_of, label_strs::predicates_of, label_strs::type_of];
+
+/// Trait definition `DepNode`s.
+/// Extra `DepNode`s for functions and methods.
+const EXTRA_ASSOCIATED: &[&str] = &[label_strs::associated_item];
+
+const EXTRA_TRAIT: &[&str] = &[];
+
+// Fully Built Labels
+
+const LABELS_CONST: &[&[&str]] = &[BASE_HIR, BASE_CONST];
+
+/// Constant/Typedef in an impl
+const LABELS_CONST_IN_IMPL: &[&[&str]] = &[BASE_HIR, BASE_CONST, EXTRA_ASSOCIATED];
+
+/// Trait-Const/Typedef DepNodes
+const LABELS_CONST_IN_TRAIT: &[&[&str]] = &[BASE_HIR, BASE_CONST, EXTRA_ASSOCIATED, EXTRA_TRAIT];
+
+/// Function `DepNode`s.
+const LABELS_FN: &[&[&str]] = &[BASE_HIR, BASE_MIR, BASE_FN];
+
+/// Method `DepNode`s.
+const LABELS_FN_IN_IMPL: &[&[&str]] = &[BASE_HIR, BASE_MIR, BASE_FN, EXTRA_ASSOCIATED];
+
+/// Trait method `DepNode`s.
+const LABELS_FN_IN_TRAIT: &[&[&str]] =
+ &[BASE_HIR, BASE_MIR, BASE_FN, EXTRA_ASSOCIATED, EXTRA_TRAIT];
+
+/// For generic cases like inline-assembly, modules, etc.
+const LABELS_HIR_ONLY: &[&[&str]] = &[BASE_HIR];
+
+/// Impl `DepNode`s.
+const LABELS_TRAIT: &[&[&str]] = &[
+ BASE_HIR,
+ &[label_strs::associated_item_def_ids, label_strs::predicates_of, label_strs::generics_of],
+];
+
+/// Impl `DepNode`s.
+const LABELS_IMPL: &[&[&str]] = &[BASE_HIR, BASE_IMPL];
+
+/// Abstract data type (struct, enum, union) `DepNode`s.
+const LABELS_ADT: &[&[&str]] = &[BASE_HIR, BASE_STRUCT];
+
+// FIXME: Struct/Enum/Unions Fields (there is currently no way to attach these)
+//
+// Fields are kind of separate from their containers, as they can change independently from
+// them. We should at least check
+//
+// type_of for these.
+
+type Labels = FxHashSet<String>;
+
+/// Represents the requested configuration by rustc_clean/dirty
+struct Assertion {
+ clean: Labels,
+ dirty: Labels,
+ loaded_from_disk: Labels,
+}
+
+pub fn check_dirty_clean_annotations(tcx: TyCtxt<'_>) {
+ if !tcx.sess.opts.unstable_opts.query_dep_graph {
+ return;
+ }
+
+ // can't add `#[rustc_clean]` etc without opting in to this feature
+ if !tcx.features().rustc_attrs {
+ return;
+ }
+
+ tcx.dep_graph.with_ignore(|| {
+ let mut dirty_clean_visitor = DirtyCleanVisitor { tcx, checked_attrs: Default::default() };
+
+ let crate_items = tcx.hir_crate_items(());
+
+ for id in crate_items.items() {
+ dirty_clean_visitor.check_item(id.def_id);
+ }
+
+ for id in crate_items.trait_items() {
+ dirty_clean_visitor.check_item(id.def_id);
+ }
+
+ for id in crate_items.impl_items() {
+ dirty_clean_visitor.check_item(id.def_id);
+ }
+
+ for id in crate_items.foreign_items() {
+ dirty_clean_visitor.check_item(id.def_id);
+ }
+
+ let mut all_attrs = FindAllAttrs { tcx, found_attrs: vec![] };
+ tcx.hir().walk_attributes(&mut all_attrs);
+
+ // Note that we cannot use the existing "unused attribute"-infrastructure
+ // here, since that is running before codegen. This is also the reason why
+ // all codegen-specific attributes are `AssumedUsed` in rustc_ast::feature_gate.
+ all_attrs.report_unchecked_attrs(dirty_clean_visitor.checked_attrs);
+ })
+}
+
+pub struct DirtyCleanVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ checked_attrs: FxHashSet<ast::AttrId>,
+}
+
+impl<'tcx> DirtyCleanVisitor<'tcx> {
+ /// Possibly "deserialize" the attribute into a clean/dirty assertion
+ fn assertion_maybe(&mut self, item_id: LocalDefId, attr: &Attribute) -> Option<Assertion> {
+ assert!(attr.has_name(sym::rustc_clean));
+ if !check_config(self.tcx, attr) {
+ // skip: not the correct `cfg=`
+ return None;
+ }
+ let assertion = self.assertion_auto(item_id, attr);
+ Some(assertion)
+ }
+
+ /// Gets the "auto" assertion on pre-validated attr, along with the `except` labels.
+ fn assertion_auto(&mut self, item_id: LocalDefId, attr: &Attribute) -> Assertion {
+ let (name, mut auto) = self.auto_labels(item_id, attr);
+ let except = self.except(attr);
+ let loaded_from_disk = self.loaded_from_disk(attr);
+ for e in except.iter() {
+ if !auto.remove(e) {
+ let msg = format!(
+ "`except` specified DepNodes that can not be affected for \"{}\": \"{}\"",
+ name, e
+ );
+ self.tcx.sess.span_fatal(attr.span, &msg);
+ }
+ }
+ Assertion { clean: auto, dirty: except, loaded_from_disk }
+ }
+
+ /// `loaded_from_disk=` attribute value
+ fn loaded_from_disk(&self, attr: &Attribute) -> Labels {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
+ if item.has_name(LOADED_FROM_DISK) {
+ let value = expect_associated_value(self.tcx, &item);
+ return self.resolve_labels(&item, value);
+ }
+ }
+ // If `loaded_from_disk=` is not specified, don't assert anything
+ Labels::default()
+ }
+
+ /// `except=` attribute value
+ fn except(&self, attr: &Attribute) -> Labels {
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
+ if item.has_name(EXCEPT) {
+ let value = expect_associated_value(self.tcx, &item);
+ return self.resolve_labels(&item, value);
+ }
+ }
+ // if no `label` or `except` is given, only the node's group are asserted
+ Labels::default()
+ }
+
+ /// Return all DepNode labels that should be asserted for this item.
+ /// index=0 is the "name" used for error messages
+ fn auto_labels(&mut self, item_id: LocalDefId, attr: &Attribute) -> (&'static str, Labels) {
+ let node = self.tcx.hir().get_by_def_id(item_id);
+ let (name, labels) = match node {
+ HirNode::Item(item) => {
+ match item.kind {
+ // note: these are in the same order as hir::Item_;
+ // FIXME(michaelwoerister): do commented out ones
+
+ // // An `extern crate` item, with optional original crate name,
+ // HirItem::ExternCrate(..), // intentionally no assertions
+
+ // // `use foo::bar::*;` or `use foo::bar::baz as quux;`
+ // HirItem::Use(..), // intentionally no assertions
+
+ // A `static` item
+ HirItem::Static(..) => ("ItemStatic", LABELS_CONST),
+
+ // A `const` item
+ HirItem::Const(..) => ("ItemConst", LABELS_CONST),
+
+ // A function declaration
+ HirItem::Fn(..) => ("ItemFn", LABELS_FN),
+
+ // // A module
+ HirItem::Mod(..) => ("ItemMod", LABELS_HIR_ONLY),
+
+ // // An external module
+ HirItem::ForeignMod { .. } => ("ItemForeignMod", LABELS_HIR_ONLY),
+
+ // Module-level inline assembly (from global_asm!)
+ HirItem::GlobalAsm(..) => ("ItemGlobalAsm", LABELS_HIR_ONLY),
+
+ // A type alias, e.g., `type Foo = Bar<u8>`
+ HirItem::TyAlias(..) => ("ItemTy", LABELS_HIR_ONLY),
+
+ // An enum definition, e.g., `enum Foo<A, B> {C<A>, D<B>}`
+ HirItem::Enum(..) => ("ItemEnum", LABELS_ADT),
+
+ // A struct definition, e.g., `struct Foo<A> {x: A}`
+ HirItem::Struct(..) => ("ItemStruct", LABELS_ADT),
+
+ // A union definition, e.g., `union Foo<A, B> {x: A, y: B}`
+ HirItem::Union(..) => ("ItemUnion", LABELS_ADT),
+
+ // Represents a Trait Declaration
+ HirItem::Trait(..) => ("ItemTrait", LABELS_TRAIT),
+
+ // An implementation, eg `impl<A> Trait for Foo { .. }`
+ HirItem::Impl { .. } => ("ItemKind::Impl", LABELS_IMPL),
+
+ _ => self.tcx.sess.span_fatal(
+ attr.span,
+ &format!(
+ "clean/dirty auto-assertions not yet defined \
+ for Node::Item.node={:?}",
+ item.kind
+ ),
+ ),
+ }
+ }
+ HirNode::TraitItem(item) => match item.kind {
+ TraitItemKind::Fn(..) => ("Node::TraitItem", LABELS_FN_IN_TRAIT),
+ TraitItemKind::Const(..) => ("NodeTraitConst", LABELS_CONST_IN_TRAIT),
+ TraitItemKind::Type(..) => ("NodeTraitType", LABELS_CONST_IN_TRAIT),
+ },
+ HirNode::ImplItem(item) => match item.kind {
+ ImplItemKind::Fn(..) => ("Node::ImplItem", LABELS_FN_IN_IMPL),
+ ImplItemKind::Const(..) => ("NodeImplConst", LABELS_CONST_IN_IMPL),
+ ImplItemKind::TyAlias(..) => ("NodeImplType", LABELS_CONST_IN_IMPL),
+ },
+ _ => self.tcx.sess.span_fatal(
+ attr.span,
+ &format!("clean/dirty auto-assertions not yet defined for {:?}", node),
+ ),
+ };
+ let labels =
+ Labels::from_iter(labels.iter().flat_map(|s| s.iter().map(|l| (*l).to_string())));
+ (name, labels)
+ }
+
+ fn resolve_labels(&self, item: &NestedMetaItem, value: Symbol) -> Labels {
+ let mut out = Labels::default();
+ for label in value.as_str().split(',') {
+ let label = label.trim();
+ if DepNode::has_label_string(label) {
+ if out.contains(label) {
+ self.tcx.sess.span_fatal(
+ item.span(),
+ &format!("dep-node label `{}` is repeated", label),
+ );
+ }
+ out.insert(label.to_string());
+ } else {
+ self.tcx
+ .sess
+ .span_fatal(item.span(), &format!("dep-node label `{}` not recognized", label));
+ }
+ }
+ out
+ }
+
+ fn dep_node_str(&self, dep_node: &DepNode) -> String {
+ if let Some(def_id) = dep_node.extract_def_id(self.tcx) {
+ format!("{:?}({})", dep_node.kind, self.tcx.def_path_str(def_id))
+ } else {
+ format!("{:?}({:?})", dep_node.kind, dep_node.hash)
+ }
+ }
+
+ fn assert_dirty(&self, item_span: Span, dep_node: DepNode) {
+ debug!("assert_dirty({:?})", dep_node);
+
+ if self.tcx.dep_graph.is_green(&dep_node) {
+ let dep_node_str = self.dep_node_str(&dep_node);
+ self.tcx
+ .sess
+ .span_err(item_span, &format!("`{}` should be dirty but is not", dep_node_str));
+ }
+ }
+
+ fn assert_clean(&self, item_span: Span, dep_node: DepNode) {
+ debug!("assert_clean({:?})", dep_node);
+
+ if self.tcx.dep_graph.is_red(&dep_node) {
+ let dep_node_str = self.dep_node_str(&dep_node);
+ self.tcx
+ .sess
+ .span_err(item_span, &format!("`{}` should be clean but is not", dep_node_str));
+ }
+ }
+
+ fn assert_loaded_from_disk(&self, item_span: Span, dep_node: DepNode) {
+ debug!("assert_loaded_from_disk({:?})", dep_node);
+
+ if !self.tcx.dep_graph.debug_was_loaded_from_disk(dep_node) {
+ let dep_node_str = self.dep_node_str(&dep_node);
+ self.tcx.sess.span_err(
+ item_span,
+ &format!("`{}` should have been loaded from disk but it was not", dep_node_str),
+ );
+ }
+ }
+
+ fn check_item(&mut self, item_id: LocalDefId) {
+ let item_span = self.tcx.def_span(item_id.to_def_id());
+ let def_path_hash = self.tcx.def_path_hash(item_id.to_def_id());
+ for attr in self.tcx.get_attrs(item_id.to_def_id(), sym::rustc_clean) {
+ let Some(assertion) = self.assertion_maybe(item_id, attr) else {
+ continue;
+ };
+ self.checked_attrs.insert(attr.id);
+ for label in assertion.clean {
+ let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
+ self.assert_clean(item_span, dep_node);
+ }
+ for label in assertion.dirty {
+ let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
+ self.assert_dirty(item_span, dep_node);
+ }
+ for label in assertion.loaded_from_disk {
+ let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
+ self.assert_loaded_from_disk(item_span, dep_node);
+ }
+ }
+ }
+}
+
+/// Given a `#[rustc_clean]` attribute, scan for a `cfg="foo"` attribute and check whether we have
+/// a cfg flag called `foo`.
+fn check_config(tcx: TyCtxt<'_>, attr: &Attribute) -> bool {
+ debug!("check_config(attr={:?})", attr);
+ let config = &tcx.sess.parse_sess.config;
+ debug!("check_config: config={:?}", config);
+ let mut cfg = None;
+ for item in attr.meta_item_list().unwrap_or_else(Vec::new) {
+ if item.has_name(CFG) {
+ let value = expect_associated_value(tcx, &item);
+ debug!("check_config: searching for cfg {:?}", value);
+ cfg = Some(config.contains(&(value, None)));
+ } else if !(item.has_name(EXCEPT) || item.has_name(LOADED_FROM_DISK)) {
+ tcx.sess.span_err(attr.span, &format!("unknown item `{}`", item.name_or_empty()));
+ }
+ }
+
+ match cfg {
+ None => tcx.sess.span_fatal(attr.span, "no cfg attribute"),
+ Some(c) => c,
+ }
+}
+
+fn expect_associated_value(tcx: TyCtxt<'_>, item: &NestedMetaItem) -> Symbol {
+ if let Some(value) = item.value_str() {
+ value
+ } else {
+ let msg = if let Some(ident) = item.ident() {
+ format!("associated value expected for `{}`", ident)
+ } else {
+ "expected an associated value".to_string()
+ };
+
+ tcx.sess.span_fatal(item.span(), &msg);
+ }
+}
+
+// A visitor that collects all #[rustc_clean] attributes from
+// the HIR. It is used to verify that we really ran checks for all annotated
+// nodes.
+pub struct FindAllAttrs<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ found_attrs: Vec<&'tcx Attribute>,
+}
+
+impl<'tcx> FindAllAttrs<'tcx> {
+ fn is_active_attr(&mut self, attr: &Attribute) -> bool {
+ if attr.has_name(sym::rustc_clean) && check_config(self.tcx, attr) {
+ return true;
+ }
+
+ false
+ }
+
+ fn report_unchecked_attrs(&self, mut checked_attrs: FxHashSet<ast::AttrId>) {
+ for attr in &self.found_attrs {
+ if !checked_attrs.contains(&attr.id) {
+ self.tcx.sess.span_err(attr.span, "found unchecked `#[rustc_clean]` attribute");
+ checked_attrs.insert(attr.id);
+ }
+ }
+ }
+}
+
+impl<'tcx> intravisit::Visitor<'tcx> for FindAllAttrs<'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_attribute(&mut self, attr: &'tcx Attribute) {
+ if self.is_active_attr(attr) {
+ self.found_attrs.push(attr);
+ }
+ }
+}
diff --git a/compiler/rustc_incremental/src/persist/file_format.rs b/compiler/rustc_incremental/src/persist/file_format.rs
new file mode 100644
index 000000000..2dbd4b6bc
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/file_format.rs
@@ -0,0 +1,195 @@
+//! This module defines a generic file format that allows to check if a given
+//! file generated by incremental compilation was generated by a compatible
+//! compiler version. This file format is used for the on-disk version of the
+//! dependency graph and the exported metadata hashes.
+//!
+//! In practice "compatible compiler version" means "exactly the same compiler
+//! version", since the header encodes the git commit hash of the compiler.
+//! Since we can always just ignore the incremental compilation cache and
+//! compiler versions don't change frequently for the typical user, being
+//! conservative here practically has no downside.
+
+use std::env;
+use std::fs;
+use std::io::{self, Read};
+use std::path::{Path, PathBuf};
+
+use rustc_data_structures::memmap::Mmap;
+use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
+use rustc_serialize::Encoder;
+use rustc_session::Session;
+
+/// The first few bytes of files generated by incremental compilation.
+const FILE_MAGIC: &[u8] = b"RSIC";
+
+/// Change this if the header format changes.
+const HEADER_FORMAT_VERSION: u16 = 0;
+
+/// A version string that hopefully is always different for compiler versions
+/// with different encodings of incremental compilation artifacts. Contains
+/// the Git commit hash.
+const RUSTC_VERSION: Option<&str> = option_env!("CFG_VERSION");
+
+pub(crate) fn write_file_header(stream: &mut FileEncoder, nightly_build: bool) {
+ stream.emit_raw_bytes(FILE_MAGIC);
+ stream
+ .emit_raw_bytes(&[(HEADER_FORMAT_VERSION >> 0) as u8, (HEADER_FORMAT_VERSION >> 8) as u8]);
+
+ let rustc_version = rustc_version(nightly_build);
+ assert_eq!(rustc_version.len(), (rustc_version.len() as u8) as usize);
+ stream.emit_raw_bytes(&[rustc_version.len() as u8]);
+ stream.emit_raw_bytes(rustc_version.as_bytes());
+}
+
+pub(crate) fn save_in<F>(sess: &Session, path_buf: PathBuf, name: &str, encode: F)
+where
+ F: FnOnce(FileEncoder) -> FileEncodeResult,
+{
+ debug!("save: storing data in {}", path_buf.display());
+
+ // Delete the old file, if any.
+ // Note: It's important that we actually delete the old file and not just
+ // truncate and overwrite it, since it might be a shared hard-link, the
+ // underlying data of which we don't want to modify.
+ //
+ // We have to ensure we have dropped the memory maps to this file
+ // before performing this removal.
+ match fs::remove_file(&path_buf) {
+ Ok(()) => {
+ debug!("save: remove old file");
+ }
+ Err(err) if err.kind() == io::ErrorKind::NotFound => (),
+ Err(err) => {
+ sess.err(&format!(
+ "unable to delete old {} at `{}`: {}",
+ name,
+ path_buf.display(),
+ err
+ ));
+ return;
+ }
+ }
+
+ let mut encoder = match FileEncoder::new(&path_buf) {
+ Ok(encoder) => encoder,
+ Err(err) => {
+ sess.err(&format!("failed to create {} at `{}`: {}", name, path_buf.display(), err));
+ return;
+ }
+ };
+
+ write_file_header(&mut encoder, sess.is_nightly_build());
+
+ match encode(encoder) {
+ Ok(position) => {
+ sess.prof.artifact_size(
+ &name.replace(' ', "_"),
+ path_buf.file_name().unwrap().to_string_lossy(),
+ position as u64,
+ );
+ debug!("save: data written to disk successfully");
+ }
+ Err(err) => {
+ sess.err(&format!("failed to write {} to `{}`: {}", name, path_buf.display(), err));
+ }
+ }
+}
+
+/// Reads the contents of a file with a file header as defined in this module.
+///
+/// - Returns `Ok(Some(data, pos))` if the file existed and was generated by a
+/// compatible compiler version. `data` is the entire contents of the file
+/// and `pos` points to the first byte after the header.
+/// - Returns `Ok(None)` if the file did not exist or was generated by an
+/// incompatible version of the compiler.
+/// - Returns `Err(..)` if some kind of IO error occurred while reading the
+/// file.
+pub fn read_file(
+ report_incremental_info: bool,
+ path: &Path,
+ nightly_build: bool,
+) -> io::Result<Option<(Mmap, usize)>> {
+ let file = match fs::File::open(path) {
+ Ok(file) => file,
+ Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(None),
+ Err(err) => return Err(err),
+ };
+ // SAFETY: This process must not modify nor remove the backing file while the memory map lives.
+ // For the dep-graph and the work product index, it is as soon as the decoding is done.
+ // For the query result cache, the memory map is dropped in save_dep_graph before calling
+ // save_in and trying to remove the backing file.
+ //
+ // There is no way to prevent another process from modifying this file.
+ let mmap = unsafe { Mmap::map(file) }?;
+
+ let mut file = io::Cursor::new(&*mmap);
+
+ // Check FILE_MAGIC
+ {
+ debug_assert!(FILE_MAGIC.len() == 4);
+ let mut file_magic = [0u8; 4];
+ file.read_exact(&mut file_magic)?;
+ if file_magic != FILE_MAGIC {
+ report_format_mismatch(report_incremental_info, path, "Wrong FILE_MAGIC");
+ return Ok(None);
+ }
+ }
+
+ // Check HEADER_FORMAT_VERSION
+ {
+ debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2);
+ let mut header_format_version = [0u8; 2];
+ file.read_exact(&mut header_format_version)?;
+ let header_format_version =
+ (header_format_version[0] as u16) | ((header_format_version[1] as u16) << 8);
+
+ if header_format_version != HEADER_FORMAT_VERSION {
+ report_format_mismatch(report_incremental_info, path, "Wrong HEADER_FORMAT_VERSION");
+ return Ok(None);
+ }
+ }
+
+ // Check RUSTC_VERSION
+ {
+ let mut rustc_version_str_len = [0u8; 1];
+ file.read_exact(&mut rustc_version_str_len)?;
+ let rustc_version_str_len = rustc_version_str_len[0] as usize;
+ let mut buffer = vec![0; rustc_version_str_len];
+ file.read_exact(&mut buffer)?;
+
+ if buffer != rustc_version(nightly_build).as_bytes() {
+ report_format_mismatch(report_incremental_info, path, "Different compiler version");
+ return Ok(None);
+ }
+ }
+
+ let post_header_start_pos = file.position() as usize;
+ Ok(Some((mmap, post_header_start_pos)))
+}
+
+fn report_format_mismatch(report_incremental_info: bool, file: &Path, message: &str) {
+ debug!("read_file: {}", message);
+
+ if report_incremental_info {
+ eprintln!(
+ "[incremental] ignoring cache artifact `{}`: {}",
+ file.file_name().unwrap().to_string_lossy(),
+ message
+ );
+ }
+}
+
+fn rustc_version(nightly_build: bool) -> String {
+ if nightly_build {
+ if let Some(val) = env::var_os("RUSTC_FORCE_RUSTC_VERSION") {
+ return val.to_string_lossy().into_owned();
+ }
+ }
+
+ RUSTC_VERSION
+ .expect(
+ "Cannot use rustc without explicit version for \
+ incremental compilation",
+ )
+ .to_string()
+}
diff --git a/compiler/rustc_incremental/src/persist/fs.rs b/compiler/rustc_incremental/src/persist/fs.rs
new file mode 100644
index 000000000..25c1b2e1c
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/fs.rs
@@ -0,0 +1,984 @@
+//! This module manages how the incremental compilation cache is represented in
+//! the file system.
+//!
+//! Incremental compilation caches are managed according to a copy-on-write
+//! strategy: Once a complete, consistent cache version is finalized, it is
+//! never modified. Instead, when a subsequent compilation session is started,
+//! the compiler will allocate a new version of the cache that starts out as
+//! a copy of the previous version. Then only this new copy is modified and it
+//! will not be visible to other processes until it is finalized. This ensures
+//! that multiple compiler processes can be executed concurrently for the same
+//! crate without interfering with each other or blocking each other.
+//!
+//! More concretely this is implemented via the following protocol:
+//!
+//! 1. For a newly started compilation session, the compiler allocates a
+//! new `session` directory within the incremental compilation directory.
+//! This session directory will have a unique name that ends with the suffix
+//! "-working" and that contains a creation timestamp.
+//! 2. Next, the compiler looks for the newest finalized session directory,
+//! that is, a session directory from a previous compilation session that
+//! has been marked as valid and consistent. A session directory is
+//! considered finalized if the "-working" suffix in the directory name has
+//! been replaced by the SVH of the crate.
+//! 3. Once the compiler has found a valid, finalized session directory, it will
+//! hard-link/copy its contents into the new "-working" directory. If all
+//! goes well, it will have its own, private copy of the source directory and
+//! subsequently not have to worry about synchronizing with other compiler
+//! processes.
+//! 4. Now the compiler can do its normal compilation process, which involves
+//! reading and updating its private session directory.
+//! 5. When compilation finishes without errors, the private session directory
+//! will be in a state where it can be used as input for other compilation
+//! sessions. That is, it will contain a dependency graph and cache artifacts
+//! that are consistent with the state of the source code it was compiled
+//! from, with no need to change them ever again. At this point, the compiler
+//! finalizes and "publishes" its private session directory by renaming it
+//! from "s-{timestamp}-{random}-working" to "s-{timestamp}-{SVH}".
+//! 6. At this point the "old" session directory that we copied our data from
+//! at the beginning of the session has become obsolete because we have just
+//! published a more current version. Thus the compiler will delete it.
+//!
+//! ## Garbage Collection
+//!
+//! Naively following the above protocol might lead to old session directories
+//! piling up if a compiler instance crashes for some reason before its able to
+//! remove its private session directory. In order to avoid wasting disk space,
+//! the compiler also does some garbage collection each time it is started in
+//! incremental compilation mode. Specifically, it will scan the incremental
+//! compilation directory for private session directories that are not in use
+//! any more and will delete those. It will also delete any finalized session
+//! directories for a given crate except for the most recent one.
+//!
+//! ## Synchronization
+//!
+//! There is some synchronization needed in order for the compiler to be able to
+//! determine whether a given private session directory is not in used any more.
+//! This is done by creating a lock file for each session directory and
+//! locking it while the directory is still being used. Since file locks have
+//! operating system support, we can rely on the lock being released if the
+//! compiler process dies for some unexpected reason. Thus, when garbage
+//! collecting private session directories, the collecting process can determine
+//! whether the directory is still in use by trying to acquire a lock on the
+//! file. If locking the file fails, the original process must still be alive.
+//! If locking the file succeeds, we know that the owning process is not alive
+//! any more and we can safely delete the directory.
+//! There is still a small time window between the original process creating the
+//! lock file and actually locking it. In order to minimize the chance that
+//! another process tries to acquire the lock in just that instance, only
+//! session directories that are older than a few seconds are considered for
+//! garbage collection.
+//!
+//! Another case that has to be considered is what happens if one process
+//! deletes a finalized session directory that another process is currently
+//! trying to copy from. This case is also handled via the lock file. Before
+//! a process starts copying a finalized session directory, it will acquire a
+//! shared lock on the directory's lock file. Any garbage collecting process,
+//! on the other hand, will acquire an exclusive lock on the lock file.
+//! Thus, if a directory is being collected, any reader process will fail
+//! acquiring the shared lock and will leave the directory alone. Conversely,
+//! if a collecting process can't acquire the exclusive lock because the
+//! directory is currently being read from, it will leave collecting that
+//! directory to another process at a later point in time.
+//! The exact same scheme is also used when reading the metadata hashes file
+//! from an extern crate. When a crate is compiled, the hash values of its
+//! metadata are stored in a file in its session directory. When the
+//! compilation session of another crate imports the first crate's metadata,
+//! it also has to read in the accompanying metadata hashes. It thus will access
+//! the finalized session directory of all crates it links to and while doing
+//! so, it will also place a read lock on that the respective session directory
+//! so that it won't be deleted while the metadata hashes are loaded.
+//!
+//! ## Preconditions
+//!
+//! This system relies on two features being available in the file system in
+//! order to work really well: file locking and hard linking.
+//! If hard linking is not available (like on FAT) the data in the cache
+//! actually has to be copied at the beginning of each session.
+//! If file locking does not work reliably (like on NFS), some of the
+//! synchronization will go haywire.
+//! In both cases we recommend to locate the incremental compilation directory
+//! on a file system that supports these things.
+//! It might be a good idea though to try and detect whether we are on an
+//! unsupported file system and emit a warning in that case. This is not yet
+//! implemented.
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::{base_n, flock};
+use rustc_errors::ErrorGuaranteed;
+use rustc_fs_util::{link_or_copy, LinkOrCopy};
+use rustc_session::{Session, StableCrateId};
+
+use std::fs as std_fs;
+use std::io::{self, ErrorKind};
+use std::mem;
+use std::path::{Path, PathBuf};
+use std::time::{Duration, SystemTime, UNIX_EPOCH};
+
+use rand::{thread_rng, RngCore};
+
+#[cfg(test)]
+mod tests;
+
+const LOCK_FILE_EXT: &str = ".lock";
+const DEP_GRAPH_FILENAME: &str = "dep-graph.bin";
+const STAGING_DEP_GRAPH_FILENAME: &str = "dep-graph.part.bin";
+const WORK_PRODUCTS_FILENAME: &str = "work-products.bin";
+const QUERY_CACHE_FILENAME: &str = "query-cache.bin";
+
+// We encode integers using the following base, so they are shorter than decimal
+// or hexadecimal numbers (we want short file and directory names). Since these
+// numbers will be used in file names, we choose an encoding that is not
+// case-sensitive (as opposed to base64, for example).
+const INT_ENCODE_BASE: usize = base_n::CASE_INSENSITIVE;
+
+/// Returns the path to a session's dependency graph.
+pub fn dep_graph_path(sess: &Session) -> PathBuf {
+ in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME)
+}
+/// Returns the path to a session's staging dependency graph.
+///
+/// On the difference between dep-graph and staging dep-graph,
+/// see `build_dep_graph`.
+pub fn staging_dep_graph_path(sess: &Session) -> PathBuf {
+ in_incr_comp_dir_sess(sess, STAGING_DEP_GRAPH_FILENAME)
+}
+pub fn work_products_path(sess: &Session) -> PathBuf {
+ in_incr_comp_dir_sess(sess, WORK_PRODUCTS_FILENAME)
+}
+/// Returns the path to a session's query cache.
+pub fn query_cache_path(sess: &Session) -> PathBuf {
+ in_incr_comp_dir_sess(sess, QUERY_CACHE_FILENAME)
+}
+
+/// Locks a given session directory.
+pub fn lock_file_path(session_dir: &Path) -> PathBuf {
+ let crate_dir = session_dir.parent().unwrap();
+
+ let directory_name = session_dir.file_name().unwrap().to_string_lossy();
+ assert_no_characters_lost(&directory_name);
+
+ let dash_indices: Vec<_> = directory_name.match_indices('-').map(|(idx, _)| idx).collect();
+ if dash_indices.len() != 3 {
+ bug!(
+ "Encountered incremental compilation session directory with \
+ malformed name: {}",
+ session_dir.display()
+ )
+ }
+
+ crate_dir.join(&directory_name[0..dash_indices[2]]).with_extension(&LOCK_FILE_EXT[1..])
+}
+
+/// Returns the path for a given filename within the incremental compilation directory
+/// in the current session.
+pub fn in_incr_comp_dir_sess(sess: &Session, file_name: &str) -> PathBuf {
+ in_incr_comp_dir(&sess.incr_comp_session_dir(), file_name)
+}
+
+/// Returns the path for a given filename within the incremental compilation directory,
+/// not necessarily from the current session.
+///
+/// To ensure the file is part of the current session, use [`in_incr_comp_dir_sess`].
+pub fn in_incr_comp_dir(incr_comp_session_dir: &Path, file_name: &str) -> PathBuf {
+ incr_comp_session_dir.join(file_name)
+}
+
+/// Allocates the private session directory.
+///
+/// If the result of this function is `Ok`, we have a valid incremental
+/// compilation session directory. A valid session
+/// directory is one that contains a locked lock file. It may or may not contain
+/// a dep-graph and work products from a previous session.
+///
+/// This always attempts to load a dep-graph from the directory.
+/// If loading fails for some reason, we fallback to a disabled `DepGraph`.
+/// See [`rustc_interface::queries::dep_graph`].
+///
+/// If this function returns an error, it may leave behind an invalid session directory.
+/// The garbage collection will take care of it.
+///
+/// [`rustc_interface::queries::dep_graph`]: ../../rustc_interface/struct.Queries.html#structfield.dep_graph
+pub fn prepare_session_directory(
+ sess: &Session,
+ crate_name: &str,
+ stable_crate_id: StableCrateId,
+) -> Result<(), ErrorGuaranteed> {
+ if sess.opts.incremental.is_none() {
+ return Ok(());
+ }
+
+ let _timer = sess.timer("incr_comp_prepare_session_directory");
+
+ debug!("prepare_session_directory");
+
+ // {incr-comp-dir}/{crate-name-and-disambiguator}
+ let crate_dir = crate_path(sess, crate_name, stable_crate_id);
+ debug!("crate-dir: {}", crate_dir.display());
+ create_dir(sess, &crate_dir, "crate")?;
+
+ // Hack: canonicalize the path *after creating the directory*
+ // because, on windows, long paths can cause problems;
+ // canonicalization inserts this weird prefix that makes windows
+ // tolerate long paths.
+ let crate_dir = match crate_dir.canonicalize() {
+ Ok(v) => v,
+ Err(err) => {
+ let reported = sess.err(&format!(
+ "incremental compilation: error canonicalizing path `{}`: {}",
+ crate_dir.display(),
+ err
+ ));
+ return Err(reported);
+ }
+ };
+
+ let mut source_directories_already_tried = FxHashSet::default();
+
+ loop {
+ // Generate a session directory of the form:
+ //
+ // {incr-comp-dir}/{crate-name-and-disambiguator}/s-{timestamp}-{random}-working
+ let session_dir = generate_session_dir_path(&crate_dir);
+ debug!("session-dir: {}", session_dir.display());
+
+ // Lock the new session directory. If this fails, return an
+ // error without retrying
+ let (directory_lock, lock_file_path) = lock_directory(sess, &session_dir)?;
+
+ // Now that we have the lock, we can actually create the session
+ // directory
+ create_dir(sess, &session_dir, "session")?;
+
+ // Find a suitable source directory to copy from. Ignore those that we
+ // have already tried before.
+ let source_directory = find_source_directory(&crate_dir, &source_directories_already_tried);
+
+ let Some(source_directory) = source_directory else {
+ // There's nowhere to copy from, we're done
+ debug!(
+ "no source directory found. Continuing with empty session \
+ directory."
+ );
+
+ sess.init_incr_comp_session(session_dir, directory_lock, false);
+ return Ok(());
+ };
+
+ debug!("attempting to copy data from source: {}", source_directory.display());
+
+ // Try copying over all files from the source directory
+ if let Ok(allows_links) = copy_files(sess, &session_dir, &source_directory) {
+ debug!("successfully copied data from: {}", source_directory.display());
+
+ if !allows_links {
+ sess.warn(&format!(
+ "Hard linking files in the incremental \
+ compilation cache failed. Copying files \
+ instead. Consider moving the cache \
+ directory to a file system which supports \
+ hard linking in session dir `{}`",
+ session_dir.display()
+ ));
+ }
+
+ sess.init_incr_comp_session(session_dir, directory_lock, true);
+ return Ok(());
+ } else {
+ debug!("copying failed - trying next directory");
+
+ // Something went wrong while trying to copy/link files from the
+ // source directory. Try again with a different one.
+ source_directories_already_tried.insert(source_directory);
+
+ // Try to remove the session directory we just allocated. We don't
+ // know if there's any garbage in it from the failed copy action.
+ if let Err(err) = safe_remove_dir_all(&session_dir) {
+ sess.warn(&format!(
+ "Failed to delete partly initialized \
+ session dir `{}`: {}",
+ session_dir.display(),
+ err
+ ));
+ }
+
+ delete_session_dir_lock_file(sess, &lock_file_path);
+ mem::drop(directory_lock);
+ }
+ }
+}
+
+/// This function finalizes and thus 'publishes' the session directory by
+/// renaming it to `s-{timestamp}-{svh}` and releasing the file lock.
+/// If there have been compilation errors, however, this function will just
+/// delete the presumably invalid session directory.
+pub fn finalize_session_directory(sess: &Session, svh: Svh) {
+ if sess.opts.incremental.is_none() {
+ return;
+ }
+
+ let _timer = sess.timer("incr_comp_finalize_session_directory");
+
+ let incr_comp_session_dir: PathBuf = sess.incr_comp_session_dir().clone();
+
+ if sess.has_errors_or_delayed_span_bugs() {
+ // If there have been any errors during compilation, we don't want to
+ // publish this session directory. Rather, we'll just delete it.
+
+ debug!(
+ "finalize_session_directory() - invalidating session directory: {}",
+ incr_comp_session_dir.display()
+ );
+
+ if let Err(err) = safe_remove_dir_all(&*incr_comp_session_dir) {
+ sess.warn(&format!(
+ "Error deleting incremental compilation \
+ session directory `{}`: {}",
+ incr_comp_session_dir.display(),
+ err
+ ));
+ }
+
+ let lock_file_path = lock_file_path(&*incr_comp_session_dir);
+ delete_session_dir_lock_file(sess, &lock_file_path);
+ sess.mark_incr_comp_session_as_invalid();
+ }
+
+ debug!("finalize_session_directory() - session directory: {}", incr_comp_session_dir.display());
+
+ let old_sub_dir_name = incr_comp_session_dir.file_name().unwrap().to_string_lossy();
+ assert_no_characters_lost(&old_sub_dir_name);
+
+ // Keep the 's-{timestamp}-{random-number}' prefix, but replace the
+ // '-working' part with the SVH of the crate
+ let dash_indices: Vec<_> = old_sub_dir_name.match_indices('-').map(|(idx, _)| idx).collect();
+ if dash_indices.len() != 3 {
+ bug!(
+ "Encountered incremental compilation session directory with \
+ malformed name: {}",
+ incr_comp_session_dir.display()
+ )
+ }
+
+ // State: "s-{timestamp}-{random-number}-"
+ let mut new_sub_dir_name = String::from(&old_sub_dir_name[..=dash_indices[2]]);
+
+ // Append the svh
+ base_n::push_str(svh.as_u64() as u128, INT_ENCODE_BASE, &mut new_sub_dir_name);
+
+ // Create the full path
+ let new_path = incr_comp_session_dir.parent().unwrap().join(new_sub_dir_name);
+ debug!("finalize_session_directory() - new path: {}", new_path.display());
+
+ match rename_path_with_retry(&*incr_comp_session_dir, &new_path, 3) {
+ Ok(_) => {
+ debug!("finalize_session_directory() - directory renamed successfully");
+
+ // This unlocks the directory
+ sess.finalize_incr_comp_session(new_path);
+ }
+ Err(e) => {
+ // Warn about the error. However, no need to abort compilation now.
+ sess.warn(&format!(
+ "Error finalizing incremental compilation \
+ session directory `{}`: {}",
+ incr_comp_session_dir.display(),
+ e
+ ));
+
+ debug!("finalize_session_directory() - error, marking as invalid");
+ // Drop the file lock, so we can garage collect
+ sess.mark_incr_comp_session_as_invalid();
+ }
+ }
+
+ let _ = garbage_collect_session_directories(sess);
+}
+
+pub fn delete_all_session_dir_contents(sess: &Session) -> io::Result<()> {
+ let sess_dir_iterator = sess.incr_comp_session_dir().read_dir()?;
+ for entry in sess_dir_iterator {
+ let entry = entry?;
+ safe_remove_file(&entry.path())?
+ }
+ Ok(())
+}
+
+fn copy_files(sess: &Session, target_dir: &Path, source_dir: &Path) -> Result<bool, ()> {
+ // We acquire a shared lock on the lock file of the directory, so that
+ // nobody deletes it out from under us while we are reading from it.
+ let lock_file_path = lock_file_path(source_dir);
+
+ // not exclusive
+ let Ok(_lock) = flock::Lock::new(
+ &lock_file_path,
+ false, // don't wait,
+ false, // don't create
+ false,
+ ) else {
+ // Could not acquire the lock, don't try to copy from here
+ return Err(());
+ };
+
+ let Ok(source_dir_iterator) = source_dir.read_dir() else {
+ return Err(());
+ };
+
+ let mut files_linked = 0;
+ let mut files_copied = 0;
+
+ for entry in source_dir_iterator {
+ match entry {
+ Ok(entry) => {
+ let file_name = entry.file_name();
+
+ let target_file_path = target_dir.join(file_name);
+ let source_path = entry.path();
+
+ debug!("copying into session dir: {}", source_path.display());
+ match link_or_copy(source_path, target_file_path) {
+ Ok(LinkOrCopy::Link) => files_linked += 1,
+ Ok(LinkOrCopy::Copy) => files_copied += 1,
+ Err(_) => return Err(()),
+ }
+ }
+ Err(_) => return Err(()),
+ }
+ }
+
+ if sess.opts.unstable_opts.incremental_info {
+ eprintln!(
+ "[incremental] session directory: \
+ {} files hard-linked",
+ files_linked
+ );
+ eprintln!(
+ "[incremental] session directory: \
+ {} files copied",
+ files_copied
+ );
+ }
+
+ Ok(files_linked > 0 || files_copied == 0)
+}
+
+/// Generates unique directory path of the form:
+/// {crate_dir}/s-{timestamp}-{random-number}-working
+fn generate_session_dir_path(crate_dir: &Path) -> PathBuf {
+ let timestamp = timestamp_to_string(SystemTime::now());
+ debug!("generate_session_dir_path: timestamp = {}", timestamp);
+ let random_number = thread_rng().next_u32();
+ debug!("generate_session_dir_path: random_number = {}", random_number);
+
+ let directory_name = format!(
+ "s-{}-{}-working",
+ timestamp,
+ base_n::encode(random_number as u128, INT_ENCODE_BASE)
+ );
+ debug!("generate_session_dir_path: directory_name = {}", directory_name);
+ let directory_path = crate_dir.join(directory_name);
+ debug!("generate_session_dir_path: directory_path = {}", directory_path.display());
+ directory_path
+}
+
+fn create_dir(sess: &Session, path: &Path, dir_tag: &str) -> Result<(), ErrorGuaranteed> {
+ match std_fs::create_dir_all(path) {
+ Ok(()) => {
+ debug!("{} directory created successfully", dir_tag);
+ Ok(())
+ }
+ Err(err) => {
+ let reported = sess.err(&format!(
+ "Could not create incremental compilation {} \
+ directory `{}`: {}",
+ dir_tag,
+ path.display(),
+ err
+ ));
+ Err(reported)
+ }
+ }
+}
+
+/// Allocate the lock-file and lock it.
+fn lock_directory(
+ sess: &Session,
+ session_dir: &Path,
+) -> Result<(flock::Lock, PathBuf), ErrorGuaranteed> {
+ let lock_file_path = lock_file_path(session_dir);
+ debug!("lock_directory() - lock_file: {}", lock_file_path.display());
+
+ match flock::Lock::new(
+ &lock_file_path,
+ false, // don't wait
+ true, // create the lock file
+ true,
+ ) {
+ // the lock should be exclusive
+ Ok(lock) => Ok((lock, lock_file_path)),
+ Err(lock_err) => {
+ let mut err = sess.struct_err(&format!(
+ "incremental compilation: could not create \
+ session directory lock file: {}",
+ lock_err
+ ));
+ if flock::Lock::error_unsupported(&lock_err) {
+ err.note(&format!(
+ "the filesystem for the incremental path at {} \
+ does not appear to support locking, consider changing the \
+ incremental path to a filesystem that supports locking \
+ or disable incremental compilation",
+ session_dir.display()
+ ));
+ if std::env::var_os("CARGO").is_some() {
+ err.help(
+ "incremental compilation can be disabled by setting the \
+ environment variable CARGO_INCREMENTAL=0 (see \
+ https://doc.rust-lang.org/cargo/reference/profiles.html#incremental)",
+ );
+ err.help(
+ "the entire build directory can be changed to a different \
+ filesystem by setting the environment variable CARGO_TARGET_DIR \
+ to a different path (see \
+ https://doc.rust-lang.org/cargo/reference/config.html#buildtarget-dir)",
+ );
+ }
+ }
+ Err(err.emit())
+ }
+ }
+}
+
+fn delete_session_dir_lock_file(sess: &Session, lock_file_path: &Path) {
+ if let Err(err) = safe_remove_file(&lock_file_path) {
+ sess.warn(&format!(
+ "Error deleting lock file for incremental \
+ compilation session directory `{}`: {}",
+ lock_file_path.display(),
+ err
+ ));
+ }
+}
+
+/// Finds the most recent published session directory that is not in the
+/// ignore-list.
+fn find_source_directory(
+ crate_dir: &Path,
+ source_directories_already_tried: &FxHashSet<PathBuf>,
+) -> Option<PathBuf> {
+ let iter = crate_dir
+ .read_dir()
+ .unwrap() // FIXME
+ .filter_map(|e| e.ok().map(|e| e.path()));
+
+ find_source_directory_in_iter(iter, source_directories_already_tried)
+}
+
+fn find_source_directory_in_iter<I>(
+ iter: I,
+ source_directories_already_tried: &FxHashSet<PathBuf>,
+) -> Option<PathBuf>
+where
+ I: Iterator<Item = PathBuf>,
+{
+ let mut best_candidate = (UNIX_EPOCH, None);
+
+ for session_dir in iter {
+ debug!("find_source_directory_in_iter - inspecting `{}`", session_dir.display());
+
+ let directory_name = session_dir.file_name().unwrap().to_string_lossy();
+ assert_no_characters_lost(&directory_name);
+
+ if source_directories_already_tried.contains(&session_dir)
+ || !is_session_directory(&directory_name)
+ || !is_finalized(&directory_name)
+ {
+ debug!("find_source_directory_in_iter - ignoring");
+ continue;
+ }
+
+ let timestamp = extract_timestamp_from_session_dir(&directory_name).unwrap_or_else(|_| {
+ bug!("unexpected incr-comp session dir: {}", session_dir.display())
+ });
+
+ if timestamp > best_candidate.0 {
+ best_candidate = (timestamp, Some(session_dir.clone()));
+ }
+ }
+
+ best_candidate.1
+}
+
+fn is_finalized(directory_name: &str) -> bool {
+ !directory_name.ends_with("-working")
+}
+
+fn is_session_directory(directory_name: &str) -> bool {
+ directory_name.starts_with("s-") && !directory_name.ends_with(LOCK_FILE_EXT)
+}
+
+fn is_session_directory_lock_file(file_name: &str) -> bool {
+ file_name.starts_with("s-") && file_name.ends_with(LOCK_FILE_EXT)
+}
+
+fn extract_timestamp_from_session_dir(directory_name: &str) -> Result<SystemTime, ()> {
+ if !is_session_directory(directory_name) {
+ return Err(());
+ }
+
+ let dash_indices: Vec<_> = directory_name.match_indices('-').map(|(idx, _)| idx).collect();
+ if dash_indices.len() != 3 {
+ return Err(());
+ }
+
+ string_to_timestamp(&directory_name[dash_indices[0] + 1..dash_indices[1]])
+}
+
+fn timestamp_to_string(timestamp: SystemTime) -> String {
+ let duration = timestamp.duration_since(UNIX_EPOCH).unwrap();
+ let micros = duration.as_secs() * 1_000_000 + (duration.subsec_nanos() as u64) / 1000;
+ base_n::encode(micros as u128, INT_ENCODE_BASE)
+}
+
+fn string_to_timestamp(s: &str) -> Result<SystemTime, ()> {
+ let micros_since_unix_epoch = u64::from_str_radix(s, INT_ENCODE_BASE as u32);
+
+ if micros_since_unix_epoch.is_err() {
+ return Err(());
+ }
+
+ let micros_since_unix_epoch = micros_since_unix_epoch.unwrap();
+
+ let duration = Duration::new(
+ micros_since_unix_epoch / 1_000_000,
+ 1000 * (micros_since_unix_epoch % 1_000_000) as u32,
+ );
+ Ok(UNIX_EPOCH + duration)
+}
+
+fn crate_path(sess: &Session, crate_name: &str, stable_crate_id: StableCrateId) -> PathBuf {
+ let incr_dir = sess.opts.incremental.as_ref().unwrap().clone();
+
+ let stable_crate_id = base_n::encode(stable_crate_id.to_u64() as u128, INT_ENCODE_BASE);
+
+ let crate_name = format!("{}-{}", crate_name, stable_crate_id);
+ incr_dir.join(crate_name)
+}
+
+fn assert_no_characters_lost(s: &str) {
+ if s.contains('\u{FFFD}') {
+ bug!("Could not losslessly convert '{}'.", s)
+ }
+}
+
+fn is_old_enough_to_be_collected(timestamp: SystemTime) -> bool {
+ timestamp < SystemTime::now() - Duration::from_secs(10)
+}
+
+/// Runs garbage collection for the current session.
+pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
+ debug!("garbage_collect_session_directories() - begin");
+
+ let session_directory = sess.incr_comp_session_dir();
+ debug!(
+ "garbage_collect_session_directories() - session directory: {}",
+ session_directory.display()
+ );
+
+ let crate_directory = session_directory.parent().unwrap();
+ debug!(
+ "garbage_collect_session_directories() - crate directory: {}",
+ crate_directory.display()
+ );
+
+ // First do a pass over the crate directory, collecting lock files and
+ // session directories
+ let mut session_directories = FxHashSet::default();
+ let mut lock_files = FxHashSet::default();
+
+ for dir_entry in crate_directory.read_dir()? {
+ let Ok(dir_entry) = dir_entry else {
+ // Ignore any errors
+ continue;
+ };
+
+ let entry_name = dir_entry.file_name();
+ let entry_name = entry_name.to_string_lossy();
+
+ if is_session_directory_lock_file(&entry_name) {
+ assert_no_characters_lost(&entry_name);
+ lock_files.insert(entry_name.into_owned());
+ } else if is_session_directory(&entry_name) {
+ assert_no_characters_lost(&entry_name);
+ session_directories.insert(entry_name.into_owned());
+ } else {
+ // This is something we don't know, leave it alone
+ }
+ }
+
+ // Now map from lock files to session directories
+ let lock_file_to_session_dir: FxHashMap<String, Option<String>> = lock_files
+ .into_iter()
+ .map(|lock_file_name| {
+ assert!(lock_file_name.ends_with(LOCK_FILE_EXT));
+ let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len();
+ let session_dir = {
+ let dir_prefix = &lock_file_name[0..dir_prefix_end];
+ session_directories.iter().find(|dir_name| dir_name.starts_with(dir_prefix))
+ };
+ (lock_file_name, session_dir.map(String::clone))
+ })
+ .collect();
+
+ // Delete all lock files, that don't have an associated directory. They must
+ // be some kind of leftover
+ for (lock_file_name, directory_name) in &lock_file_to_session_dir {
+ if directory_name.is_none() {
+ let Ok(timestamp) = extract_timestamp_from_session_dir(lock_file_name) else {
+ debug!(
+ "found lock-file with malformed timestamp: {}",
+ crate_directory.join(&lock_file_name).display()
+ );
+ // Ignore it
+ continue;
+ };
+
+ let lock_file_path = crate_directory.join(&**lock_file_name);
+
+ if is_old_enough_to_be_collected(timestamp) {
+ debug!(
+ "garbage_collect_session_directories() - deleting \
+ garbage lock file: {}",
+ lock_file_path.display()
+ );
+ delete_session_dir_lock_file(sess, &lock_file_path);
+ } else {
+ debug!(
+ "garbage_collect_session_directories() - lock file with \
+ no session dir not old enough to be collected: {}",
+ lock_file_path.display()
+ );
+ }
+ }
+ }
+
+ // Filter out `None` directories
+ let lock_file_to_session_dir: FxHashMap<String, String> = lock_file_to_session_dir
+ .into_iter()
+ .filter_map(|(lock_file_name, directory_name)| directory_name.map(|n| (lock_file_name, n)))
+ .collect();
+
+ // Delete all session directories that don't have a lock file.
+ for directory_name in session_directories {
+ if !lock_file_to_session_dir.values().any(|dir| *dir == directory_name) {
+ let path = crate_directory.join(directory_name);
+ if let Err(err) = safe_remove_dir_all(&path) {
+ sess.warn(&format!(
+ "Failed to garbage collect invalid incremental \
+ compilation session directory `{}`: {}",
+ path.display(),
+ err
+ ));
+ }
+ }
+ }
+
+ // Now garbage collect the valid session directories.
+ let mut deletion_candidates = vec![];
+
+ for (lock_file_name, directory_name) in &lock_file_to_session_dir {
+ debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
+
+ let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
+ debug!(
+ "found session-dir with malformed timestamp: {}",
+ crate_directory.join(directory_name).display()
+ );
+ // Ignore it
+ continue;
+ };
+
+ if is_finalized(directory_name) {
+ let lock_file_path = crate_directory.join(lock_file_name);
+ match flock::Lock::new(
+ &lock_file_path,
+ false, // don't wait
+ false, // don't create the lock-file
+ true,
+ ) {
+ // get an exclusive lock
+ Ok(lock) => {
+ debug!(
+ "garbage_collect_session_directories() - \
+ successfully acquired lock"
+ );
+ debug!(
+ "garbage_collect_session_directories() - adding \
+ deletion candidate: {}",
+ directory_name
+ );
+
+ // Note that we are holding on to the lock
+ deletion_candidates.push((
+ timestamp,
+ crate_directory.join(directory_name),
+ Some(lock),
+ ));
+ }
+ Err(_) => {
+ debug!(
+ "garbage_collect_session_directories() - \
+ not collecting, still in use"
+ );
+ }
+ }
+ } else if is_old_enough_to_be_collected(timestamp) {
+ // When cleaning out "-working" session directories, i.e.
+ // session directories that might still be in use by another
+ // compiler instance, we only look a directories that are
+ // at least ten seconds old. This is supposed to reduce the
+ // chance of deleting a directory in the time window where
+ // the process has allocated the directory but has not yet
+ // acquired the file-lock on it.
+
+ // Try to acquire the directory lock. If we can't, it
+ // means that the owning process is still alive and we
+ // leave this directory alone.
+ let lock_file_path = crate_directory.join(lock_file_name);
+ match flock::Lock::new(
+ &lock_file_path,
+ false, // don't wait
+ false, // don't create the lock-file
+ true,
+ ) {
+ // get an exclusive lock
+ Ok(lock) => {
+ debug!(
+ "garbage_collect_session_directories() - \
+ successfully acquired lock"
+ );
+
+ delete_old(sess, &crate_directory.join(directory_name));
+
+ // Let's make it explicit that the file lock is released at this point,
+ // or rather, that we held on to it until here
+ mem::drop(lock);
+ }
+ Err(_) => {
+ debug!(
+ "garbage_collect_session_directories() - \
+ not collecting, still in use"
+ );
+ }
+ }
+ } else {
+ debug!(
+ "garbage_collect_session_directories() - not finalized, not \
+ old enough"
+ );
+ }
+ }
+
+ // Delete all but the most recent of the candidates
+ for (path, lock) in all_except_most_recent(deletion_candidates) {
+ debug!("garbage_collect_session_directories() - deleting `{}`", path.display());
+
+ if let Err(err) = safe_remove_dir_all(&path) {
+ sess.warn(&format!(
+ "Failed to garbage collect finalized incremental \
+ compilation session directory `{}`: {}",
+ path.display(),
+ err
+ ));
+ } else {
+ delete_session_dir_lock_file(sess, &lock_file_path(&path));
+ }
+
+ // Let's make it explicit that the file lock is released at this point,
+ // or rather, that we held on to it until here
+ mem::drop(lock);
+ }
+
+ Ok(())
+}
+
+fn delete_old(sess: &Session, path: &Path) {
+ debug!("garbage_collect_session_directories() - deleting `{}`", path.display());
+
+ if let Err(err) = safe_remove_dir_all(&path) {
+ sess.warn(&format!(
+ "Failed to garbage collect incremental compilation session directory `{}`: {}",
+ path.display(),
+ err
+ ));
+ } else {
+ delete_session_dir_lock_file(sess, &lock_file_path(&path));
+ }
+}
+
+fn all_except_most_recent(
+ deletion_candidates: Vec<(SystemTime, PathBuf, Option<flock::Lock>)>,
+) -> FxHashMap<PathBuf, Option<flock::Lock>> {
+ let most_recent = deletion_candidates.iter().map(|&(timestamp, ..)| timestamp).max();
+
+ if let Some(most_recent) = most_recent {
+ deletion_candidates
+ .into_iter()
+ .filter(|&(timestamp, ..)| timestamp != most_recent)
+ .map(|(_, path, lock)| (path, lock))
+ .collect()
+ } else {
+ FxHashMap::default()
+ }
+}
+
+/// Since paths of artifacts within session directories can get quite long, we
+/// need to support deleting files with very long paths. The regular
+/// WinApi functions only support paths up to 260 characters, however. In order
+/// to circumvent this limitation, we canonicalize the path of the directory
+/// before passing it to std::fs::remove_dir_all(). This will convert the path
+/// into the '\\?\' format, which supports much longer paths.
+fn safe_remove_dir_all(p: &Path) -> io::Result<()> {
+ let canonicalized = match std_fs::canonicalize(p) {
+ Ok(canonicalized) => canonicalized,
+ Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(()),
+ Err(err) => return Err(err),
+ };
+
+ std_fs::remove_dir_all(canonicalized)
+}
+
+fn safe_remove_file(p: &Path) -> io::Result<()> {
+ let canonicalized = match std_fs::canonicalize(p) {
+ Ok(canonicalized) => canonicalized,
+ Err(err) if err.kind() == io::ErrorKind::NotFound => return Ok(()),
+ Err(err) => return Err(err),
+ };
+
+ match std_fs::remove_file(canonicalized) {
+ Err(err) if err.kind() == io::ErrorKind::NotFound => Ok(()),
+ result => result,
+ }
+}
+
+// On Windows the compiler would sometimes fail to rename the session directory because
+// the OS thought something was still being accessed in it. So we retry a few times to give
+// the OS time to catch up.
+// See https://github.com/rust-lang/rust/issues/86929.
+fn rename_path_with_retry(from: &Path, to: &Path, mut retries_left: usize) -> std::io::Result<()> {
+ loop {
+ match std_fs::rename(from, to) {
+ Ok(()) => return Ok(()),
+ Err(e) => {
+ if retries_left > 0 && e.kind() == ErrorKind::PermissionDenied {
+ // Try again after a short waiting period.
+ std::thread::sleep(Duration::from_millis(50));
+ retries_left -= 1;
+ } else {
+ return Err(e);
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_incremental/src/persist/fs/tests.rs b/compiler/rustc_incremental/src/persist/fs/tests.rs
new file mode 100644
index 000000000..184796948
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/fs/tests.rs
@@ -0,0 +1,84 @@
+use super::*;
+
+#[test]
+fn test_all_except_most_recent() {
+ assert_eq!(
+ all_except_most_recent(vec![
+ (UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None),
+ (UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None),
+ (UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None),
+ (UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None),
+ (UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None),
+ ])
+ .keys()
+ .cloned()
+ .collect::<FxHashSet<PathBuf>>(),
+ [PathBuf::from("1"), PathBuf::from("2"), PathBuf::from("3"), PathBuf::from("4"),]
+ .into_iter()
+ .collect::<FxHashSet<PathBuf>>()
+ );
+
+ assert_eq!(
+ all_except_most_recent(vec![]).keys().cloned().collect::<FxHashSet<PathBuf>>(),
+ FxHashSet::default()
+ );
+}
+
+#[test]
+fn test_timestamp_serialization() {
+ for i in 0..1_000u64 {
+ let time = UNIX_EPOCH + Duration::new(i * 1_434_578, (i as u32) * 239_000);
+ let s = timestamp_to_string(time);
+ assert_eq!(Ok(time), string_to_timestamp(&s));
+ }
+}
+
+#[test]
+fn test_find_source_directory_in_iter() {
+ let already_visited = FxHashSet::default();
+
+ // Find newest
+ assert_eq!(
+ find_source_directory_in_iter(
+ [
+ PathBuf::from("crate-dir/s-3234-0000-svh"),
+ PathBuf::from("crate-dir/s-2234-0000-svh"),
+ PathBuf::from("crate-dir/s-1234-0000-svh")
+ ]
+ .into_iter(),
+ &already_visited
+ ),
+ Some(PathBuf::from("crate-dir/s-3234-0000-svh"))
+ );
+
+ // Filter out "-working"
+ assert_eq!(
+ find_source_directory_in_iter(
+ [
+ PathBuf::from("crate-dir/s-3234-0000-working"),
+ PathBuf::from("crate-dir/s-2234-0000-svh"),
+ PathBuf::from("crate-dir/s-1234-0000-svh")
+ ]
+ .into_iter(),
+ &already_visited
+ ),
+ Some(PathBuf::from("crate-dir/s-2234-0000-svh"))
+ );
+
+ // Handle empty
+ assert_eq!(find_source_directory_in_iter([].into_iter(), &already_visited), None);
+
+ // Handle only working
+ assert_eq!(
+ find_source_directory_in_iter(
+ [
+ PathBuf::from("crate-dir/s-3234-0000-working"),
+ PathBuf::from("crate-dir/s-2234-0000-working"),
+ PathBuf::from("crate-dir/s-1234-0000-working")
+ ]
+ .into_iter(),
+ &already_visited
+ ),
+ None
+ );
+}
diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs
new file mode 100644
index 000000000..1c5fd9169
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/load.rs
@@ -0,0 +1,235 @@
+//! Code to save/load the dep-graph from files.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::memmap::Mmap;
+use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId};
+use rustc_middle::ty::OnDiskCache;
+use rustc_serialize::opaque::MemDecoder;
+use rustc_serialize::Decodable;
+use rustc_session::config::IncrementalStateAssertion;
+use rustc_session::Session;
+use std::path::Path;
+
+use super::data::*;
+use super::file_format;
+use super::fs::*;
+use super::work_product;
+
+type WorkProductMap = FxHashMap<WorkProductId, WorkProduct>;
+
+#[derive(Debug)]
+/// Represents the result of an attempt to load incremental compilation data.
+pub enum LoadResult<T> {
+ /// Loading was successful.
+ Ok {
+ #[allow(missing_docs)]
+ data: T,
+ },
+ /// The file either didn't exist or was produced by an incompatible compiler version.
+ DataOutOfDate,
+ /// An error occurred.
+ Error {
+ #[allow(missing_docs)]
+ message: String,
+ },
+}
+
+impl<T: Default> LoadResult<T> {
+ /// Accesses the data returned in [`LoadResult::Ok`].
+ pub fn open(self, sess: &Session) -> T {
+ // Check for errors when using `-Zassert-incremental-state`
+ match (sess.opts.assert_incr_state, &self) {
+ (Some(IncrementalStateAssertion::NotLoaded), LoadResult::Ok { .. }) => {
+ sess.fatal(
+ "We asserted that the incremental cache should not be loaded, \
+ but it was loaded.",
+ );
+ }
+ (
+ Some(IncrementalStateAssertion::Loaded),
+ LoadResult::Error { .. } | LoadResult::DataOutOfDate,
+ ) => {
+ sess.fatal(
+ "We asserted that an existing incremental cache directory should \
+ be successfully loaded, but it was not.",
+ );
+ }
+ _ => {}
+ };
+
+ match self {
+ LoadResult::Error { message } => {
+ sess.warn(&message);
+ Default::default()
+ }
+ LoadResult::DataOutOfDate => {
+ if let Err(err) = delete_all_session_dir_contents(sess) {
+ sess.err(&format!(
+ "Failed to delete invalidated or incompatible \
+ incremental compilation session directory contents `{}`: {}.",
+ dep_graph_path(sess).display(),
+ err
+ ));
+ }
+ Default::default()
+ }
+ LoadResult::Ok { data } => data,
+ }
+ }
+}
+
+fn load_data(
+ report_incremental_info: bool,
+ path: &Path,
+ nightly_build: bool,
+) -> LoadResult<(Mmap, usize)> {
+ match file_format::read_file(report_incremental_info, path, nightly_build) {
+ Ok(Some(data_and_pos)) => LoadResult::Ok { data: data_and_pos },
+ Ok(None) => {
+ // The file either didn't exist or was produced by an incompatible
+ // compiler version. Neither is an error.
+ LoadResult::DataOutOfDate
+ }
+ Err(err) => LoadResult::Error {
+ message: format!("could not load dep-graph from `{}`: {}", path.display(), err),
+ },
+ }
+}
+
+fn delete_dirty_work_product(sess: &Session, swp: SerializedWorkProduct) {
+ debug!("delete_dirty_work_product({:?})", swp);
+ work_product::delete_workproduct_files(sess, &swp.work_product);
+}
+
+/// Either a result that has already be computed or a
+/// handle that will let us wait until it is computed
+/// by a background thread.
+pub enum MaybeAsync<T> {
+ Sync(T),
+ Async(std::thread::JoinHandle<T>),
+}
+
+impl<T> MaybeAsync<LoadResult<T>> {
+ /// Accesses the data returned in [`LoadResult::Ok`] in an asynchronous way if possible.
+ pub fn open(self) -> LoadResult<T> {
+ match self {
+ MaybeAsync::Sync(result) => result,
+ MaybeAsync::Async(handle) => handle.join().unwrap_or_else(|e| LoadResult::Error {
+ message: format!("could not decode incremental cache: {:?}", e),
+ }),
+ }
+ }
+}
+
+/// An asynchronous type for computing the dependency graph.
+pub type DepGraphFuture = MaybeAsync<LoadResult<(SerializedDepGraph, WorkProductMap)>>;
+
+/// Launch a thread and load the dependency graph in the background.
+pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
+ // Since `sess` isn't `Sync`, we perform all accesses to `sess`
+ // before we fire the background thread.
+
+ let prof = sess.prof.clone();
+
+ if sess.opts.incremental.is_none() {
+ // No incremental compilation.
+ return MaybeAsync::Sync(LoadResult::Ok { data: Default::default() });
+ }
+
+ let _timer = sess.prof.generic_activity("incr_comp_prepare_load_dep_graph");
+
+ // Calling `sess.incr_comp_session_dir()` will panic if `sess.opts.incremental.is_none()`.
+ // Fortunately, we just checked that this isn't the case.
+ let path = dep_graph_path(&sess);
+ let report_incremental_info = sess.opts.unstable_opts.incremental_info;
+ let expected_hash = sess.opts.dep_tracking_hash(false);
+
+ let mut prev_work_products = FxHashMap::default();
+ let nightly_build = sess.is_nightly_build();
+
+ // If we are only building with -Zquery-dep-graph but without an actual
+ // incr. comp. session directory, we skip this. Otherwise we'd fail
+ // when trying to load work products.
+ if sess.incr_comp_session_dir_opt().is_some() {
+ let work_products_path = work_products_path(sess);
+ let load_result = load_data(report_incremental_info, &work_products_path, nightly_build);
+
+ if let LoadResult::Ok { data: (work_products_data, start_pos) } = load_result {
+ // Decode the list of work_products
+ let mut work_product_decoder = MemDecoder::new(&work_products_data[..], start_pos);
+ let work_products: Vec<SerializedWorkProduct> =
+ Decodable::decode(&mut work_product_decoder);
+
+ for swp in work_products {
+ let all_files_exist = swp.work_product.saved_files.iter().all(|(_, path)| {
+ let exists = in_incr_comp_dir_sess(sess, path).exists();
+ if !exists && sess.opts.unstable_opts.incremental_info {
+ eprintln!("incremental: could not find file for work product: {path}",);
+ }
+ exists
+ });
+
+ if all_files_exist {
+ debug!("reconcile_work_products: all files for {:?} exist", swp);
+ prev_work_products.insert(swp.id, swp.work_product);
+ } else {
+ debug!("reconcile_work_products: some file for {:?} does not exist", swp);
+ delete_dirty_work_product(sess, swp);
+ }
+ }
+ }
+ }
+
+ MaybeAsync::Async(std::thread::spawn(move || {
+ let _prof_timer = prof.generic_activity("incr_comp_load_dep_graph");
+
+ match load_data(report_incremental_info, &path, nightly_build) {
+ LoadResult::DataOutOfDate => LoadResult::DataOutOfDate,
+ LoadResult::Error { message } => LoadResult::Error { message },
+ LoadResult::Ok { data: (bytes, start_pos) } => {
+ let mut decoder = MemDecoder::new(&bytes, start_pos);
+ let prev_commandline_args_hash = u64::decode(&mut decoder);
+
+ if prev_commandline_args_hash != expected_hash {
+ if report_incremental_info {
+ eprintln!(
+ "[incremental] completely ignoring cache because of \
+ differing commandline arguments"
+ );
+ }
+ // We can't reuse the cache, purge it.
+ debug!("load_dep_graph_new: differing commandline arg hashes");
+
+ // No need to do any further work
+ return LoadResult::DataOutOfDate;
+ }
+
+ let dep_graph = SerializedDepGraph::decode(&mut decoder);
+
+ LoadResult::Ok { data: (dep_graph, prev_work_products) }
+ }
+ }
+ }))
+}
+
+/// Attempts to load the query result cache from disk
+///
+/// If we are not in incremental compilation mode, returns `None`.
+/// Otherwise, tries to load the query result cache from disk,
+/// creating an empty cache if it could not be loaded.
+pub fn load_query_result_cache<'a, C: OnDiskCache<'a>>(sess: &'a Session) -> Option<C> {
+ if sess.opts.incremental.is_none() {
+ return None;
+ }
+
+ let _prof_timer = sess.prof.generic_activity("incr_comp_load_query_result_cache");
+
+ match load_data(
+ sess.opts.unstable_opts.incremental_info,
+ &query_cache_path(sess),
+ sess.is_nightly_build(),
+ ) {
+ LoadResult::Ok { data: (bytes, start_pos) } => Some(C::new(sess, bytes, start_pos)),
+ _ => Some(C::new_empty(sess.source_map())),
+ }
+}
diff --git a/compiler/rustc_incremental/src/persist/mod.rs b/compiler/rustc_incremental/src/persist/mod.rs
new file mode 100644
index 000000000..1336189bc
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/mod.rs
@@ -0,0 +1,25 @@
+//! When in incremental mode, this pass dumps out the dependency graph
+//! into the given directory. At the same time, it also hashes the
+//! various HIR nodes.
+
+mod data;
+mod dirty_clean;
+mod file_format;
+mod fs;
+mod load;
+mod save;
+mod work_product;
+
+pub use fs::finalize_session_directory;
+pub use fs::garbage_collect_session_directories;
+pub use fs::in_incr_comp_dir;
+pub use fs::in_incr_comp_dir_sess;
+pub use fs::prepare_session_directory;
+pub use load::load_query_result_cache;
+pub use load::LoadResult;
+pub use load::{load_dep_graph, DepGraphFuture};
+pub use save::build_dep_graph;
+pub use save::save_dep_graph;
+pub use save::save_work_product_index;
+pub use work_product::copy_cgu_workproduct_to_incr_comp_cache_dir;
+pub use work_product::delete_workproduct_files;
diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs
new file mode 100644
index 000000000..710350314
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/save.rs
@@ -0,0 +1,188 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::join;
+use rustc_middle::dep_graph::{DepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
+use rustc_middle::ty::TyCtxt;
+use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
+use rustc_serialize::Encodable as RustcEncodable;
+use rustc_session::Session;
+use std::fs;
+
+use super::data::*;
+use super::dirty_clean;
+use super::file_format;
+use super::fs::*;
+use super::work_product;
+
+/// Saves and writes the [`DepGraph`] to the file system.
+///
+/// This function saves both the dep-graph and the query result cache,
+/// and drops the result cache.
+///
+/// This function should only run after all queries have completed.
+/// Trying to execute a query afterwards would attempt to read the result cache we just dropped.
+pub fn save_dep_graph(tcx: TyCtxt<'_>) {
+ debug!("save_dep_graph()");
+ tcx.dep_graph.with_ignore(|| {
+ let sess = tcx.sess;
+ if sess.opts.incremental.is_none() {
+ return;
+ }
+ // This is going to be deleted in finalize_session_directory, so let's not create it
+ if sess.has_errors_or_delayed_span_bugs() {
+ return;
+ }
+
+ let query_cache_path = query_cache_path(sess);
+ let dep_graph_path = dep_graph_path(sess);
+ let staging_dep_graph_path = staging_dep_graph_path(sess);
+
+ sess.time("assert_dep_graph", || crate::assert_dep_graph(tcx));
+ sess.time("check_dirty_clean", || dirty_clean::check_dirty_clean_annotations(tcx));
+
+ if sess.opts.unstable_opts.incremental_info {
+ tcx.dep_graph.print_incremental_info()
+ }
+
+ join(
+ move || {
+ sess.time("incr_comp_persist_result_cache", || {
+ // Drop the memory map so that we can remove the file and write to it.
+ if let Some(odc) = &tcx.on_disk_cache {
+ odc.drop_serialized_data(tcx);
+ }
+
+ file_format::save_in(sess, query_cache_path, "query cache", |e| {
+ encode_query_cache(tcx, e)
+ });
+ });
+ },
+ move || {
+ sess.time("incr_comp_persist_dep_graph", || {
+ if let Err(err) = tcx.dep_graph.encode(&tcx.sess.prof) {
+ sess.err(&format!(
+ "failed to write dependency graph to `{}`: {}",
+ staging_dep_graph_path.display(),
+ err
+ ));
+ }
+ if let Err(err) = fs::rename(&staging_dep_graph_path, &dep_graph_path) {
+ sess.err(&format!(
+ "failed to move dependency graph from `{}` to `{}`: {}",
+ staging_dep_graph_path.display(),
+ dep_graph_path.display(),
+ err
+ ));
+ }
+ });
+ },
+ );
+ })
+}
+
+/// Saves the work product index.
+pub fn save_work_product_index(
+ sess: &Session,
+ dep_graph: &DepGraph,
+ new_work_products: FxHashMap<WorkProductId, WorkProduct>,
+) {
+ if sess.opts.incremental.is_none() {
+ return;
+ }
+ // This is going to be deleted in finalize_session_directory, so let's not create it
+ if sess.has_errors_or_delayed_span_bugs() {
+ return;
+ }
+
+ debug!("save_work_product_index()");
+ dep_graph.assert_ignored();
+ let path = work_products_path(sess);
+ file_format::save_in(sess, path, "work product index", |mut e| {
+ encode_work_product_index(&new_work_products, &mut e);
+ e.finish()
+ });
+
+ // We also need to clean out old work-products, as not all of them are
+ // deleted during invalidation. Some object files don't change their
+ // content, they are just not needed anymore.
+ let previous_work_products = dep_graph.previous_work_products();
+ for (id, wp) in previous_work_products.iter() {
+ if !new_work_products.contains_key(id) {
+ work_product::delete_workproduct_files(sess, wp);
+ debug_assert!(
+ !wp.saved_files.iter().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
+ );
+ }
+ }
+
+ // Check that we did not delete one of the current work-products:
+ debug_assert!({
+ new_work_products.iter().all(|(_, wp)| {
+ wp.saved_files.iter().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
+ })
+ });
+}
+
+fn encode_work_product_index(
+ work_products: &FxHashMap<WorkProductId, WorkProduct>,
+ encoder: &mut FileEncoder,
+) {
+ let serialized_products: Vec<_> = work_products
+ .iter()
+ .map(|(id, work_product)| SerializedWorkProduct {
+ id: *id,
+ work_product: work_product.clone(),
+ })
+ .collect();
+
+ serialized_products.encode(encoder)
+}
+
+fn encode_query_cache(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult {
+ tcx.sess.time("incr_comp_serialize_result_cache", || tcx.serialize_query_result_cache(encoder))
+}
+
+/// Builds the dependency graph.
+///
+/// This function creates the *staging dep-graph*. When the dep-graph is modified by a query
+/// execution, the new dependency information is not kept in memory but directly
+/// output to this file. `save_dep_graph` then finalizes the staging dep-graph
+/// and moves it to the permanent dep-graph path
+pub fn build_dep_graph(
+ sess: &Session,
+ prev_graph: SerializedDepGraph,
+ prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
+) -> Option<DepGraph> {
+ if sess.opts.incremental.is_none() {
+ // No incremental compilation.
+ return None;
+ }
+
+ // Stream the dep-graph to an alternate file, to avoid overwriting anything in case of errors.
+ let path_buf = staging_dep_graph_path(sess);
+
+ let mut encoder = match FileEncoder::new(&path_buf) {
+ Ok(encoder) => encoder,
+ Err(err) => {
+ sess.err(&format!(
+ "failed to create dependency graph at `{}`: {}",
+ path_buf.display(),
+ err
+ ));
+ return None;
+ }
+ };
+
+ file_format::write_file_header(&mut encoder, sess.is_nightly_build());
+
+ // First encode the commandline arguments hash
+ sess.opts.dep_tracking_hash(false).encode(&mut encoder);
+
+ Some(DepGraph::new(
+ &sess.prof,
+ prev_graph,
+ prev_work_products,
+ encoder,
+ sess.opts.unstable_opts.query_dep_graph,
+ sess.opts.unstable_opts.incremental_info,
+ ))
+}
diff --git a/compiler/rustc_incremental/src/persist/work_product.rs b/compiler/rustc_incremental/src/persist/work_product.rs
new file mode 100644
index 000000000..2f1853c44
--- /dev/null
+++ b/compiler/rustc_incremental/src/persist/work_product.rs
@@ -0,0 +1,59 @@
+//! Functions for saving and removing intermediate [work products].
+//!
+//! [work products]: WorkProduct
+
+use crate::persist::fs::*;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_fs_util::link_or_copy;
+use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
+use rustc_session::Session;
+use std::fs as std_fs;
+use std::path::Path;
+
+/// Copies a CGU work product to the incremental compilation directory, so next compilation can find and reuse it.
+pub fn copy_cgu_workproduct_to_incr_comp_cache_dir(
+ sess: &Session,
+ cgu_name: &str,
+ files: &[(&'static str, &Path)],
+) -> Option<(WorkProductId, WorkProduct)> {
+ debug!(?cgu_name, ?files);
+ sess.opts.incremental.as_ref()?;
+
+ let mut saved_files = FxHashMap::default();
+ for (ext, path) in files {
+ let file_name = format!("{cgu_name}.{ext}");
+ let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name);
+ match link_or_copy(path, &path_in_incr_dir) {
+ Ok(_) => {
+ let _ = saved_files.insert(ext.to_string(), file_name);
+ }
+ Err(err) => {
+ sess.warn(&format!(
+ "error copying object file `{}` to incremental directory as `{}`: {}",
+ path.display(),
+ path_in_incr_dir.display(),
+ err
+ ));
+ }
+ }
+ }
+
+ let work_product = WorkProduct { cgu_name: cgu_name.to_string(), saved_files };
+ debug!(?work_product);
+ let work_product_id = WorkProductId::from_cgu_name(cgu_name);
+ Some((work_product_id, work_product))
+}
+
+/// Removes files for a given work product.
+pub fn delete_workproduct_files(sess: &Session, work_product: &WorkProduct) {
+ for (_, path) in &work_product.saved_files {
+ let path = in_incr_comp_dir_sess(sess, path);
+ if let Err(err) = std_fs::remove_file(&path) {
+ sess.warn(&format!(
+ "file-system error deleting outdated file `{}`: {}",
+ path.display(),
+ err
+ ));
+ }
+ }
+}
diff --git a/compiler/rustc_index/Cargo.toml b/compiler/rustc_index/Cargo.toml
new file mode 100644
index 000000000..8a81a93a9
--- /dev/null
+++ b/compiler/rustc_index/Cargo.toml
@@ -0,0 +1,13 @@
+[package]
+name = "rustc_index"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+arrayvec = { version = "0.7", default-features = false }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_macros = { path = "../rustc_macros" }
+smallvec = "1.8.1"
diff --git a/compiler/rustc_index/src/bit_set.rs b/compiler/rustc_index/src/bit_set.rs
new file mode 100644
index 000000000..777112442
--- /dev/null
+++ b/compiler/rustc_index/src/bit_set.rs
@@ -0,0 +1,2098 @@
+use crate::vec::{Idx, IndexVec};
+use arrayvec::ArrayVec;
+use std::fmt;
+use std::iter;
+use std::marker::PhantomData;
+use std::mem;
+use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl};
+use std::rc::Rc;
+use std::slice;
+
+use rustc_macros::{Decodable, Encodable};
+
+use Chunk::*;
+
+#[cfg(test)]
+mod tests;
+
+type Word = u64;
+const WORD_BYTES: usize = mem::size_of::<Word>();
+const WORD_BITS: usize = WORD_BYTES * 8;
+
+// The choice of chunk size has some trade-offs.
+//
+// A big chunk size tends to favour cases where many large `ChunkedBitSet`s are
+// present, because they require fewer `Chunk`s, reducing the number of
+// allocations and reducing peak memory usage. Also, fewer chunk operations are
+// required, though more of them might be `Mixed`.
+//
+// A small chunk size tends to favour cases where many small `ChunkedBitSet`s
+// are present, because less space is wasted at the end of the final chunk (if
+// it's not full).
+const CHUNK_WORDS: usize = 32;
+const CHUNK_BITS: usize = CHUNK_WORDS * WORD_BITS; // 2048 bits
+
+/// ChunkSize is small to keep `Chunk` small. The static assertion ensures it's
+/// not too small.
+type ChunkSize = u16;
+const _: () = assert!(CHUNK_BITS <= ChunkSize::MAX as usize);
+
+pub trait BitRelations<Rhs> {
+ fn union(&mut self, other: &Rhs) -> bool;
+ fn subtract(&mut self, other: &Rhs) -> bool;
+ fn intersect(&mut self, other: &Rhs) -> bool;
+}
+
+#[inline]
+fn inclusive_start_end<T: Idx>(
+ range: impl RangeBounds<T>,
+ domain: usize,
+) -> Option<(usize, usize)> {
+ // Both start and end are inclusive.
+ let start = match range.start_bound().cloned() {
+ Bound::Included(start) => start.index(),
+ Bound::Excluded(start) => start.index() + 1,
+ Bound::Unbounded => 0,
+ };
+ let end = match range.end_bound().cloned() {
+ Bound::Included(end) => end.index(),
+ Bound::Excluded(end) => end.index().checked_sub(1)?,
+ Bound::Unbounded => domain - 1,
+ };
+ assert!(end < domain);
+ if start > end {
+ return None;
+ }
+ Some((start, end))
+}
+
+macro_rules! bit_relations_inherent_impls {
+ () => {
+ /// Sets `self = self | other` and returns `true` if `self` changed
+ /// (i.e., if new bits were added).
+ pub fn union<Rhs>(&mut self, other: &Rhs) -> bool
+ where
+ Self: BitRelations<Rhs>,
+ {
+ <Self as BitRelations<Rhs>>::union(self, other)
+ }
+
+ /// Sets `self = self - other` and returns `true` if `self` changed.
+ /// (i.e., if any bits were removed).
+ pub fn subtract<Rhs>(&mut self, other: &Rhs) -> bool
+ where
+ Self: BitRelations<Rhs>,
+ {
+ <Self as BitRelations<Rhs>>::subtract(self, other)
+ }
+
+ /// Sets `self = self & other` and return `true` if `self` changed.
+ /// (i.e., if any bits were removed).
+ pub fn intersect<Rhs>(&mut self, other: &Rhs) -> bool
+ where
+ Self: BitRelations<Rhs>,
+ {
+ <Self as BitRelations<Rhs>>::intersect(self, other)
+ }
+ };
+}
+
+/// A fixed-size bitset type with a dense representation.
+///
+/// NOTE: Use [`GrowableBitSet`] if you need support for resizing after creation.
+///
+/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
+/// just be `usize`.
+///
+/// All operations that involve an element will panic if the element is equal
+/// to or greater than the domain size. All operations that involve two bitsets
+/// will panic if the bitsets have differing domain sizes.
+///
+#[derive(Eq, PartialEq, Hash, Decodable, Encodable)]
+pub struct BitSet<T> {
+ domain_size: usize,
+ words: Vec<Word>,
+ marker: PhantomData<T>,
+}
+
+impl<T> BitSet<T> {
+ /// Gets the domain size.
+ pub fn domain_size(&self) -> usize {
+ self.domain_size
+ }
+}
+
+impl<T: Idx> BitSet<T> {
+ /// Creates a new, empty bitset with a given `domain_size`.
+ #[inline]
+ pub fn new_empty(domain_size: usize) -> BitSet<T> {
+ let num_words = num_words(domain_size);
+ BitSet { domain_size, words: vec![0; num_words], marker: PhantomData }
+ }
+
+ /// Creates a new, filled bitset with a given `domain_size`.
+ #[inline]
+ pub fn new_filled(domain_size: usize) -> BitSet<T> {
+ let num_words = num_words(domain_size);
+ let mut result = BitSet { domain_size, words: vec![!0; num_words], marker: PhantomData };
+ result.clear_excess_bits();
+ result
+ }
+
+ /// Clear all elements.
+ #[inline]
+ pub fn clear(&mut self) {
+ self.words.fill(0);
+ }
+
+ /// Clear excess bits in the final word.
+ fn clear_excess_bits(&mut self) {
+ clear_excess_bits_in_final_word(self.domain_size, &mut self.words);
+ }
+
+ /// Count the number of set bits in the set.
+ pub fn count(&self) -> usize {
+ self.words.iter().map(|e| e.count_ones() as usize).sum()
+ }
+
+ /// Returns `true` if `self` contains `elem`.
+ #[inline]
+ pub fn contains(&self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let (word_index, mask) = word_index_and_mask(elem);
+ (self.words[word_index] & mask) != 0
+ }
+
+ /// Is `self` is a (non-strict) superset of `other`?
+ #[inline]
+ pub fn superset(&self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ self.words.iter().zip(&other.words).all(|(a, b)| (a & b) == *b)
+ }
+
+ /// Is the set empty?
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.words.iter().all(|a| *a == 0)
+ }
+
+ /// Insert `elem`. Returns whether the set has changed.
+ #[inline]
+ pub fn insert(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let (word_index, mask) = word_index_and_mask(elem);
+ let word_ref = &mut self.words[word_index];
+ let word = *word_ref;
+ let new_word = word | mask;
+ *word_ref = new_word;
+ new_word != word
+ }
+
+ #[inline]
+ pub fn insert_range(&mut self, elems: impl RangeBounds<T>) {
+ let Some((start, end)) = inclusive_start_end(elems, self.domain_size) else {
+ return;
+ };
+
+ let (start_word_index, start_mask) = word_index_and_mask(start);
+ let (end_word_index, end_mask) = word_index_and_mask(end);
+
+ // Set all words in between start and end (exclusively of both).
+ for word_index in (start_word_index + 1)..end_word_index {
+ self.words[word_index] = !0;
+ }
+
+ if start_word_index != end_word_index {
+ // Start and end are in different words, so we handle each in turn.
+ //
+ // We set all leading bits. This includes the start_mask bit.
+ self.words[start_word_index] |= !(start_mask - 1);
+ // And all trailing bits (i.e. from 0..=end) in the end word,
+ // including the end.
+ self.words[end_word_index] |= end_mask | end_mask - 1;
+ } else {
+ self.words[start_word_index] |= end_mask | (end_mask - start_mask);
+ }
+ }
+
+ /// Sets all bits to true.
+ pub fn insert_all(&mut self) {
+ self.words.fill(!0);
+ self.clear_excess_bits();
+ }
+
+ /// Returns `true` if the set has changed.
+ #[inline]
+ pub fn remove(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let (word_index, mask) = word_index_and_mask(elem);
+ let word_ref = &mut self.words[word_index];
+ let word = *word_ref;
+ let new_word = word & !mask;
+ *word_ref = new_word;
+ new_word != word
+ }
+
+ /// Gets a slice of the underlying words.
+ pub fn words(&self) -> &[Word] {
+ &self.words
+ }
+
+ /// Iterates over the indices of set bits in a sorted order.
+ #[inline]
+ pub fn iter(&self) -> BitIter<'_, T> {
+ BitIter::new(&self.words)
+ }
+
+ /// Duplicates the set as a hybrid set.
+ pub fn to_hybrid(&self) -> HybridBitSet<T> {
+ // Note: we currently don't bother trying to make a Sparse set.
+ HybridBitSet::Dense(self.to_owned())
+ }
+
+ /// Set `self = self | other`. In contrast to `union` returns `true` if the set contains at
+ /// least one bit that is not in `other` (i.e. `other` is not a superset of `self`).
+ ///
+ /// This is an optimization for union of a hybrid bitset.
+ fn reverse_union_sparse(&mut self, sparse: &SparseBitSet<T>) -> bool {
+ assert!(sparse.domain_size == self.domain_size);
+ self.clear_excess_bits();
+
+ let mut not_already = false;
+ // Index of the current word not yet merged.
+ let mut current_index = 0;
+ // Mask of bits that came from the sparse set in the current word.
+ let mut new_bit_mask = 0;
+ for (word_index, mask) in sparse.iter().map(|x| word_index_and_mask(*x)) {
+ // Next bit is in a word not inspected yet.
+ if word_index > current_index {
+ self.words[current_index] |= new_bit_mask;
+ // Were there any bits in the old word that did not occur in the sparse set?
+ not_already |= (self.words[current_index] ^ new_bit_mask) != 0;
+ // Check all words we skipped for any set bit.
+ not_already |= self.words[current_index + 1..word_index].iter().any(|&x| x != 0);
+ // Update next word.
+ current_index = word_index;
+ // Reset bit mask, no bits have been merged yet.
+ new_bit_mask = 0;
+ }
+ // Add bit and mark it as coming from the sparse set.
+ // self.words[word_index] |= mask;
+ new_bit_mask |= mask;
+ }
+ self.words[current_index] |= new_bit_mask;
+ // Any bits in the last inspected word that were not in the sparse set?
+ not_already |= (self.words[current_index] ^ new_bit_mask) != 0;
+ // Any bits in the tail? Note `clear_excess_bits` before.
+ not_already |= self.words[current_index + 1..].iter().any(|&x| x != 0);
+
+ not_already
+ }
+
+ fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
+ let (start, end) = inclusive_start_end(range, self.domain_size)?;
+ let (start_word_index, _) = word_index_and_mask(start);
+ let (end_word_index, end_mask) = word_index_and_mask(end);
+
+ let end_word = self.words[end_word_index] & (end_mask | (end_mask - 1));
+ if end_word != 0 {
+ let pos = max_bit(end_word) + WORD_BITS * end_word_index;
+ if start <= pos {
+ return Some(T::new(pos));
+ }
+ }
+
+ // We exclude end_word_index from the range here, because we don't want
+ // to limit ourselves to *just* the last word: the bits set it in may be
+ // after `end`, so it may not work out.
+ if let Some(offset) =
+ self.words[start_word_index..end_word_index].iter().rposition(|&w| w != 0)
+ {
+ let word_idx = start_word_index + offset;
+ let start_word = self.words[word_idx];
+ let pos = max_bit(start_word) + WORD_BITS * word_idx;
+ if start <= pos {
+ return Some(T::new(pos));
+ }
+ }
+
+ None
+ }
+
+ bit_relations_inherent_impls! {}
+}
+
+// dense REL dense
+impl<T: Idx> BitRelations<BitSet<T>> for BitSet<T> {
+ fn union(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ bitwise(&mut self.words, &other.words, |a, b| a | b)
+ }
+
+ fn subtract(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ bitwise(&mut self.words, &other.words, |a, b| a & !b)
+ }
+
+ fn intersect(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ bitwise(&mut self.words, &other.words, |a, b| a & b)
+ }
+}
+
+impl<T: Idx> From<GrowableBitSet<T>> for BitSet<T> {
+ fn from(bit_set: GrowableBitSet<T>) -> Self {
+ bit_set.bit_set
+ }
+}
+
+/// A fixed-size bitset type with a partially dense, partially sparse
+/// representation. The bitset is broken into chunks, and chunks that are all
+/// zeros or all ones are represented and handled very efficiently.
+///
+/// This type is especially efficient for sets that typically have a large
+/// `domain_size` with significant stretches of all zeros or all ones, and also
+/// some stretches with lots of 0s and 1s mixed in a way that causes trouble
+/// for `IntervalSet`.
+///
+/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
+/// just be `usize`.
+///
+/// All operations that involve an element will panic if the element is equal
+/// to or greater than the domain size. All operations that involve two bitsets
+/// will panic if the bitsets have differing domain sizes.
+#[derive(Debug, PartialEq, Eq)]
+pub struct ChunkedBitSet<T> {
+ domain_size: usize,
+
+ /// The chunks. Each one contains exactly CHUNK_BITS values, except the
+ /// last one which contains 1..=CHUNK_BITS values.
+ chunks: Box<[Chunk]>,
+
+ marker: PhantomData<T>,
+}
+
+// Note: the chunk domain size is duplicated in each variant. This is a bit
+// inconvenient, but it allows the type size to be smaller than if we had an
+// outer struct containing a chunk domain size plus the `Chunk`, because the
+// compiler can place the chunk domain size after the tag.
+#[derive(Clone, Debug, PartialEq, Eq)]
+enum Chunk {
+ /// A chunk that is all zeros; we don't represent the zeros explicitly.
+ Zeros(ChunkSize),
+
+ /// A chunk that is all ones; we don't represent the ones explicitly.
+ Ones(ChunkSize),
+
+ /// A chunk that has a mix of zeros and ones, which are represented
+ /// explicitly and densely. It never has all zeros or all ones.
+ ///
+ /// If this is the final chunk there may be excess, unused words. This
+ /// turns out to be both simpler and have better performance than
+ /// allocating the minimum number of words, largely because we avoid having
+ /// to store the length, which would make this type larger. These excess
+ /// words are always be zero, as are any excess bits in the final in-use
+ /// word.
+ ///
+ /// The second field is the count of 1s set in the chunk, and must satisfy
+ /// `0 < count < chunk_domain_size`.
+ ///
+ /// The words are within an `Rc` because it's surprisingly common to
+ /// duplicate an entire chunk, e.g. in `ChunkedBitSet::clone_from()`, or
+ /// when a `Mixed` chunk is union'd into a `Zeros` chunk. When we do need
+ /// to modify a chunk we use `Rc::make_mut`.
+ Mixed(ChunkSize, ChunkSize, Rc<[Word; CHUNK_WORDS]>),
+}
+
+// This type is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+crate::static_assert_size!(Chunk, 16);
+
+impl<T> ChunkedBitSet<T> {
+ pub fn domain_size(&self) -> usize {
+ self.domain_size
+ }
+
+ #[cfg(test)]
+ fn assert_valid(&self) {
+ if self.domain_size == 0 {
+ assert!(self.chunks.is_empty());
+ return;
+ }
+
+ assert!((self.chunks.len() - 1) * CHUNK_BITS <= self.domain_size);
+ assert!(self.chunks.len() * CHUNK_BITS >= self.domain_size);
+ for chunk in self.chunks.iter() {
+ chunk.assert_valid();
+ }
+ }
+}
+
+impl<T: Idx> ChunkedBitSet<T> {
+ /// Creates a new bitset with a given `domain_size` and chunk kind.
+ fn new(domain_size: usize, is_empty: bool) -> Self {
+ let chunks = if domain_size == 0 {
+ Box::new([])
+ } else {
+ // All the chunks have a chunk_domain_size of `CHUNK_BITS` except
+ // the final one.
+ let final_chunk_domain_size = {
+ let n = domain_size % CHUNK_BITS;
+ if n == 0 { CHUNK_BITS } else { n }
+ };
+ let mut chunks =
+ vec![Chunk::new(CHUNK_BITS, is_empty); num_chunks(domain_size)].into_boxed_slice();
+ *chunks.last_mut().unwrap() = Chunk::new(final_chunk_domain_size, is_empty);
+ chunks
+ };
+ ChunkedBitSet { domain_size, chunks, marker: PhantomData }
+ }
+
+ /// Creates a new, empty bitset with a given `domain_size`.
+ #[inline]
+ pub fn new_empty(domain_size: usize) -> Self {
+ ChunkedBitSet::new(domain_size, /* is_empty */ true)
+ }
+
+ /// Creates a new, filled bitset with a given `domain_size`.
+ #[inline]
+ pub fn new_filled(domain_size: usize) -> Self {
+ ChunkedBitSet::new(domain_size, /* is_empty */ false)
+ }
+
+ #[cfg(test)]
+ fn chunks(&self) -> &[Chunk] {
+ &self.chunks
+ }
+
+ /// Count the number of bits in the set.
+ pub fn count(&self) -> usize {
+ self.chunks.iter().map(|chunk| chunk.count()).sum()
+ }
+
+ /// Returns `true` if `self` contains `elem`.
+ #[inline]
+ pub fn contains(&self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let chunk = &self.chunks[chunk_index(elem)];
+ match &chunk {
+ Zeros(_) => false,
+ Ones(_) => true,
+ Mixed(_, _, words) => {
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ (words[word_index] & mask) != 0
+ }
+ }
+ }
+
+ #[inline]
+ pub fn iter(&self) -> ChunkedBitIter<'_, T> {
+ ChunkedBitIter::new(self)
+ }
+
+ /// Insert `elem`. Returns whether the set has changed.
+ pub fn insert(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let chunk_index = chunk_index(elem);
+ let chunk = &mut self.chunks[chunk_index];
+ match *chunk {
+ Zeros(chunk_domain_size) => {
+ if chunk_domain_size > 1 {
+ // We take some effort to avoid copying the words.
+ let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
+ // SAFETY: `words` can safely be all zeroes.
+ let mut words = unsafe { words.assume_init() };
+ let words_ref = Rc::get_mut(&mut words).unwrap();
+
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ words_ref[word_index] |= mask;
+ *chunk = Mixed(chunk_domain_size, 1, words);
+ } else {
+ *chunk = Ones(chunk_domain_size);
+ }
+ true
+ }
+ Ones(_) => false,
+ Mixed(chunk_domain_size, ref mut count, ref mut words) => {
+ // We skip all the work if the bit is already set.
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ if (words[word_index] & mask) == 0 {
+ *count += 1;
+ if *count < chunk_domain_size {
+ let words = Rc::make_mut(words);
+ words[word_index] |= mask;
+ } else {
+ *chunk = Ones(chunk_domain_size);
+ }
+ true
+ } else {
+ false
+ }
+ }
+ }
+ }
+
+ /// Sets all bits to true.
+ pub fn insert_all(&mut self) {
+ for chunk in self.chunks.iter_mut() {
+ *chunk = match *chunk {
+ Zeros(chunk_domain_size)
+ | Ones(chunk_domain_size)
+ | Mixed(chunk_domain_size, ..) => Ones(chunk_domain_size),
+ }
+ }
+ }
+
+ /// Returns `true` if the set has changed.
+ pub fn remove(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let chunk_index = chunk_index(elem);
+ let chunk = &mut self.chunks[chunk_index];
+ match *chunk {
+ Zeros(_) => false,
+ Ones(chunk_domain_size) => {
+ if chunk_domain_size > 1 {
+ // We take some effort to avoid copying the words.
+ let words = Rc::<[Word; CHUNK_WORDS]>::new_zeroed();
+ // SAFETY: `words` can safely be all zeroes.
+ let mut words = unsafe { words.assume_init() };
+ let words_ref = Rc::get_mut(&mut words).unwrap();
+
+ // Set only the bits in use.
+ let num_words = num_words(chunk_domain_size as usize);
+ words_ref[..num_words].fill(!0);
+ clear_excess_bits_in_final_word(
+ chunk_domain_size as usize,
+ &mut words_ref[..num_words],
+ );
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ words_ref[word_index] &= !mask;
+ *chunk = Mixed(chunk_domain_size, chunk_domain_size - 1, words);
+ } else {
+ *chunk = Zeros(chunk_domain_size);
+ }
+ true
+ }
+ Mixed(chunk_domain_size, ref mut count, ref mut words) => {
+ // We skip all the work if the bit is already clear.
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ if (words[word_index] & mask) != 0 {
+ *count -= 1;
+ if *count > 0 {
+ let words = Rc::make_mut(words);
+ words[word_index] &= !mask;
+ } else {
+ *chunk = Zeros(chunk_domain_size);
+ }
+ true
+ } else {
+ false
+ }
+ }
+ }
+ }
+
+ bit_relations_inherent_impls! {}
+}
+
+impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> {
+ fn union(&mut self, other: &ChunkedBitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size);
+ debug_assert_eq!(self.chunks.len(), other.chunks.len());
+
+ let mut changed = false;
+ for (mut self_chunk, other_chunk) in self.chunks.iter_mut().zip(other.chunks.iter()) {
+ match (&mut self_chunk, &other_chunk) {
+ (_, Zeros(_)) | (Ones(_), _) => {}
+ (Zeros(self_chunk_domain_size), Ones(other_chunk_domain_size))
+ | (Mixed(self_chunk_domain_size, ..), Ones(other_chunk_domain_size))
+ | (Zeros(self_chunk_domain_size), Mixed(other_chunk_domain_size, ..)) => {
+ // `other_chunk` fully overwrites `self_chunk`
+ debug_assert_eq!(self_chunk_domain_size, other_chunk_domain_size);
+ *self_chunk = other_chunk.clone();
+ changed = true;
+ }
+ (
+ Mixed(
+ self_chunk_domain_size,
+ ref mut self_chunk_count,
+ ref mut self_chunk_words,
+ ),
+ Mixed(_other_chunk_domain_size, _other_chunk_count, other_chunk_words),
+ ) => {
+ // First check if the operation would change
+ // `self_chunk.words`. If not, we can avoid allocating some
+ // words, and this happens often enough that it's a
+ // performance win. Also, we only need to operate on the
+ // in-use words, hence the slicing.
+ let op = |a, b| a | b;
+ let num_words = num_words(*self_chunk_domain_size as usize);
+ if bitwise_changes(
+ &self_chunk_words[0..num_words],
+ &other_chunk_words[0..num_words],
+ op,
+ ) {
+ let self_chunk_words = Rc::make_mut(self_chunk_words);
+ let has_changed = bitwise(
+ &mut self_chunk_words[0..num_words],
+ &other_chunk_words[0..num_words],
+ op,
+ );
+ debug_assert!(has_changed);
+ *self_chunk_count = self_chunk_words[0..num_words]
+ .iter()
+ .map(|w| w.count_ones() as ChunkSize)
+ .sum();
+ if *self_chunk_count == *self_chunk_domain_size {
+ *self_chunk = Ones(*self_chunk_domain_size);
+ }
+ changed = true;
+ }
+ }
+ }
+ }
+ changed
+ }
+
+ fn subtract(&mut self, _other: &ChunkedBitSet<T>) -> bool {
+ unimplemented!("implement if/when necessary");
+ }
+
+ fn intersect(&mut self, _other: &ChunkedBitSet<T>) -> bool {
+ unimplemented!("implement if/when necessary");
+ }
+}
+
+impl<T: Idx> BitRelations<HybridBitSet<T>> for ChunkedBitSet<T> {
+ fn union(&mut self, other: &HybridBitSet<T>) -> bool {
+ // FIXME: This is slow if `other` is dense, but it hasn't been a problem
+ // in practice so far.
+ // If a faster implementation of this operation is required, consider
+ // reopening https://github.com/rust-lang/rust/pull/94625
+ assert_eq!(self.domain_size, other.domain_size());
+ sequential_update(|elem| self.insert(elem), other.iter())
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
+ // FIXME: This is slow if `other` is dense, but it hasn't been a problem
+ // in practice so far.
+ // If a faster implementation of this operation is required, consider
+ // reopening https://github.com/rust-lang/rust/pull/94625
+ assert_eq!(self.domain_size, other.domain_size());
+ sequential_update(|elem| self.remove(elem), other.iter())
+ }
+
+ fn intersect(&mut self, _other: &HybridBitSet<T>) -> bool {
+ unimplemented!("implement if/when necessary");
+ }
+}
+
+impl<T: Idx> BitRelations<ChunkedBitSet<T>> for BitSet<T> {
+ fn union(&mut self, other: &ChunkedBitSet<T>) -> bool {
+ sequential_update(|elem| self.insert(elem), other.iter())
+ }
+
+ fn subtract(&mut self, _other: &ChunkedBitSet<T>) -> bool {
+ unimplemented!("implement if/when necessary");
+ }
+
+ fn intersect(&mut self, other: &ChunkedBitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size);
+ let mut changed = false;
+ for (i, chunk) in other.chunks.iter().enumerate() {
+ let mut words = &mut self.words[i * CHUNK_WORDS..];
+ if words.len() > CHUNK_WORDS {
+ words = &mut words[..CHUNK_WORDS];
+ }
+ match chunk {
+ Chunk::Zeros(..) => {
+ for word in words {
+ if *word != 0 {
+ changed = true;
+ *word = 0;
+ }
+ }
+ }
+ Chunk::Ones(..) => (),
+ Chunk::Mixed(_, _, data) => {
+ for (i, word) in words.iter_mut().enumerate() {
+ let new_val = *word & data[i];
+ if new_val != *word {
+ changed = true;
+ *word = new_val;
+ }
+ }
+ }
+ }
+ }
+ changed
+ }
+}
+
+impl<T> Clone for ChunkedBitSet<T> {
+ fn clone(&self) -> Self {
+ ChunkedBitSet {
+ domain_size: self.domain_size,
+ chunks: self.chunks.clone(),
+ marker: PhantomData,
+ }
+ }
+
+ /// WARNING: this implementation of clone_from will panic if the two
+ /// bitsets have different domain sizes. This constraint is not inherent to
+ /// `clone_from`, but it works with the existing call sites and allows a
+ /// faster implementation, which is important because this function is hot.
+ fn clone_from(&mut self, from: &Self) {
+ assert_eq!(self.domain_size, from.domain_size);
+ debug_assert_eq!(self.chunks.len(), from.chunks.len());
+
+ self.chunks.clone_from(&from.chunks)
+ }
+}
+
+pub struct ChunkedBitIter<'a, T: Idx> {
+ index: usize,
+ bitset: &'a ChunkedBitSet<T>,
+}
+
+impl<'a, T: Idx> ChunkedBitIter<'a, T> {
+ #[inline]
+ fn new(bitset: &'a ChunkedBitSet<T>) -> ChunkedBitIter<'a, T> {
+ ChunkedBitIter { index: 0, bitset }
+ }
+}
+
+impl<'a, T: Idx> Iterator for ChunkedBitIter<'a, T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ while self.index < self.bitset.domain_size() {
+ let elem = T::new(self.index);
+ let chunk = &self.bitset.chunks[chunk_index(elem)];
+ match &chunk {
+ Zeros(chunk_domain_size) => {
+ self.index += *chunk_domain_size as usize;
+ }
+ Ones(_chunk_domain_size) => {
+ self.index += 1;
+ return Some(elem);
+ }
+ Mixed(_chunk_domain_size, _, words) => loop {
+ let elem = T::new(self.index);
+ self.index += 1;
+ let (word_index, mask) = chunk_word_index_and_mask(elem);
+ if (words[word_index] & mask) != 0 {
+ return Some(elem);
+ }
+ if self.index % CHUNK_BITS == 0 {
+ break;
+ }
+ },
+ }
+ }
+ None
+ }
+
+ fn fold<B, F>(mut self, mut init: B, mut f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ // If `next` has already been called, we may not be at the start of a chunk, so we first
+ // advance the iterator to the start of the next chunk, before proceeding in chunk sized
+ // steps.
+ while self.index % CHUNK_BITS != 0 {
+ let Some(item) = self.next() else {
+ return init
+ };
+ init = f(init, item);
+ }
+ let start_chunk = self.index / CHUNK_BITS;
+ let chunks = &self.bitset.chunks[start_chunk..];
+ for (i, chunk) in chunks.iter().enumerate() {
+ let base = (start_chunk + i) * CHUNK_BITS;
+ match chunk {
+ Chunk::Zeros(_) => (),
+ Chunk::Ones(limit) => {
+ for j in 0..(*limit as usize) {
+ init = f(init, T::new(base + j));
+ }
+ }
+ Chunk::Mixed(_, _, words) => {
+ init = BitIter::new(&**words).fold(init, |val, mut item: T| {
+ item.increment_by(base);
+ f(val, item)
+ });
+ }
+ }
+ }
+ init
+ }
+}
+
+impl Chunk {
+ #[cfg(test)]
+ fn assert_valid(&self) {
+ match *self {
+ Zeros(chunk_domain_size) | Ones(chunk_domain_size) => {
+ assert!(chunk_domain_size as usize <= CHUNK_BITS);
+ }
+ Mixed(chunk_domain_size, count, ref words) => {
+ assert!(chunk_domain_size as usize <= CHUNK_BITS);
+ assert!(0 < count && count < chunk_domain_size);
+
+ // Check the number of set bits matches `count`.
+ assert_eq!(
+ words.iter().map(|w| w.count_ones() as ChunkSize).sum::<ChunkSize>(),
+ count
+ );
+
+ // Check the not-in-use words are all zeroed.
+ let num_words = num_words(chunk_domain_size as usize);
+ if num_words < CHUNK_WORDS {
+ assert_eq!(
+ words[num_words..]
+ .iter()
+ .map(|w| w.count_ones() as ChunkSize)
+ .sum::<ChunkSize>(),
+ 0
+ );
+ }
+ }
+ }
+ }
+
+ fn new(chunk_domain_size: usize, is_empty: bool) -> Self {
+ debug_assert!(chunk_domain_size <= CHUNK_BITS);
+ let chunk_domain_size = chunk_domain_size as ChunkSize;
+ if is_empty { Zeros(chunk_domain_size) } else { Ones(chunk_domain_size) }
+ }
+
+ /// Count the number of 1s in the chunk.
+ fn count(&self) -> usize {
+ match *self {
+ Zeros(_) => 0,
+ Ones(chunk_domain_size) => chunk_domain_size as usize,
+ Mixed(_, count, _) => count as usize,
+ }
+ }
+}
+
+// Applies a function to mutate a bitset, and returns true if any
+// of the applications return true
+fn sequential_update<T: Idx>(
+ mut self_update: impl FnMut(T) -> bool,
+ it: impl Iterator<Item = T>,
+) -> bool {
+ it.fold(false, |changed, elem| self_update(elem) | changed)
+}
+
+// Optimization of intersection for SparseBitSet that's generic
+// over the RHS
+fn sparse_intersect<T: Idx>(
+ set: &mut SparseBitSet<T>,
+ other_contains: impl Fn(&T) -> bool,
+) -> bool {
+ let size = set.elems.len();
+ set.elems.retain(|elem| other_contains(elem));
+ set.elems.len() != size
+}
+
+// Optimization of dense/sparse intersection. The resulting set is
+// guaranteed to be at most the size of the sparse set, and hence can be
+// represented as a sparse set. Therefore the sparse set is copied and filtered,
+// then returned as the new set.
+fn dense_sparse_intersect<T: Idx>(
+ dense: &BitSet<T>,
+ sparse: &SparseBitSet<T>,
+) -> (SparseBitSet<T>, bool) {
+ let mut sparse_copy = sparse.clone();
+ sparse_intersect(&mut sparse_copy, |el| dense.contains(*el));
+ let n = sparse_copy.len();
+ (sparse_copy, n != dense.count())
+}
+
+// hybrid REL dense
+impl<T: Idx> BitRelations<BitSet<T>> for HybridBitSet<T> {
+ fn union(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size);
+ match self {
+ HybridBitSet::Sparse(sparse) => {
+ // `self` is sparse and `other` is dense. To
+ // merge them, we have two available strategies:
+ // * Densify `self` then merge other
+ // * Clone other then integrate bits from `self`
+ // The second strategy requires dedicated method
+ // since the usual `union` returns the wrong
+ // result. In the dedicated case the computation
+ // is slightly faster if the bits of the sparse
+ // bitset map to only few words of the dense
+ // representation, i.e. indices are near each
+ // other.
+ //
+ // Benchmarking seems to suggest that the second
+ // option is worth it.
+ let mut new_dense = other.clone();
+ let changed = new_dense.reverse_union_sparse(sparse);
+ *self = HybridBitSet::Dense(new_dense);
+ changed
+ }
+
+ HybridBitSet::Dense(dense) => dense.union(other),
+ }
+ }
+
+ fn subtract(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size);
+ match self {
+ HybridBitSet::Sparse(sparse) => {
+ sequential_update(|elem| sparse.remove(elem), other.iter())
+ }
+ HybridBitSet::Dense(dense) => dense.subtract(other),
+ }
+ }
+
+ fn intersect(&mut self, other: &BitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size);
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse_intersect(sparse, |elem| other.contains(*elem)),
+ HybridBitSet::Dense(dense) => dense.intersect(other),
+ }
+ }
+}
+
+// dense REL hybrid
+impl<T: Idx> BitRelations<HybridBitSet<T>> for BitSet<T> {
+ fn union(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size());
+ match other {
+ HybridBitSet::Sparse(sparse) => {
+ sequential_update(|elem| self.insert(elem), sparse.iter().cloned())
+ }
+ HybridBitSet::Dense(dense) => self.union(dense),
+ }
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size());
+ match other {
+ HybridBitSet::Sparse(sparse) => {
+ sequential_update(|elem| self.remove(elem), sparse.iter().cloned())
+ }
+ HybridBitSet::Dense(dense) => self.subtract(dense),
+ }
+ }
+
+ fn intersect(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size, other.domain_size());
+ match other {
+ HybridBitSet::Sparse(sparse) => {
+ let (updated, changed) = dense_sparse_intersect(self, sparse);
+
+ // We can't directly assign the SparseBitSet to the BitSet, and
+ // doing `*self = updated.to_dense()` would cause a drop / reallocation. Instead,
+ // the BitSet is cleared and `updated` is copied into `self`.
+ self.clear();
+ for elem in updated.iter() {
+ self.insert(*elem);
+ }
+ changed
+ }
+ HybridBitSet::Dense(dense) => self.intersect(dense),
+ }
+ }
+}
+
+// hybrid REL hybrid
+impl<T: Idx> BitRelations<HybridBitSet<T>> for HybridBitSet<T> {
+ fn union(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size());
+ match self {
+ HybridBitSet::Sparse(_) => {
+ match other {
+ HybridBitSet::Sparse(other_sparse) => {
+ // Both sets are sparse. Add the elements in
+ // `other_sparse` to `self` one at a time. This
+ // may or may not cause `self` to be densified.
+ let mut changed = false;
+ for elem in other_sparse.iter() {
+ changed |= self.insert(*elem);
+ }
+ changed
+ }
+
+ HybridBitSet::Dense(other_dense) => self.union(other_dense),
+ }
+ }
+
+ HybridBitSet::Dense(self_dense) => self_dense.union(other),
+ }
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size());
+ match self {
+ HybridBitSet::Sparse(self_sparse) => {
+ sequential_update(|elem| self_sparse.remove(elem), other.iter())
+ }
+ HybridBitSet::Dense(self_dense) => self_dense.subtract(other),
+ }
+ }
+
+ fn intersect(&mut self, other: &HybridBitSet<T>) -> bool {
+ assert_eq!(self.domain_size(), other.domain_size());
+ match self {
+ HybridBitSet::Sparse(self_sparse) => {
+ sparse_intersect(self_sparse, |elem| other.contains(*elem))
+ }
+ HybridBitSet::Dense(self_dense) => match other {
+ HybridBitSet::Sparse(other_sparse) => {
+ let (updated, changed) = dense_sparse_intersect(self_dense, other_sparse);
+ *self = HybridBitSet::Sparse(updated);
+ changed
+ }
+ HybridBitSet::Dense(other_dense) => self_dense.intersect(other_dense),
+ },
+ }
+ }
+}
+
+impl<T> Clone for BitSet<T> {
+ fn clone(&self) -> Self {
+ BitSet { domain_size: self.domain_size, words: self.words.clone(), marker: PhantomData }
+ }
+
+ fn clone_from(&mut self, from: &Self) {
+ self.domain_size = from.domain_size;
+ self.words.clone_from(&from.words);
+ }
+}
+
+impl<T: Idx> fmt::Debug for BitSet<T> {
+ fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+ w.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<T: Idx> ToString for BitSet<T> {
+ fn to_string(&self) -> String {
+ let mut result = String::new();
+ let mut sep = '[';
+
+ // Note: this is a little endian printout of bytes.
+
+ // i tracks how many bits we have printed so far.
+ let mut i = 0;
+ for word in &self.words {
+ let mut word = *word;
+ for _ in 0..WORD_BYTES {
+ // for each byte in `word`:
+ let remain = self.domain_size - i;
+ // If less than a byte remains, then mask just that many bits.
+ let mask = if remain <= 8 { (1 << remain) - 1 } else { 0xFF };
+ assert!(mask <= 0xFF);
+ let byte = word & mask;
+
+ result.push_str(&format!("{}{:02x}", sep, byte));
+
+ if remain <= 8 {
+ break;
+ }
+ word >>= 8;
+ i += 8;
+ sep = '-';
+ }
+ sep = '|';
+ }
+ result.push(']');
+
+ result
+ }
+}
+
+pub struct BitIter<'a, T: Idx> {
+ /// A copy of the current word, but with any already-visited bits cleared.
+ /// (This lets us use `trailing_zeros()` to find the next set bit.) When it
+ /// is reduced to 0, we move onto the next word.
+ word: Word,
+
+ /// The offset (measured in bits) of the current word.
+ offset: usize,
+
+ /// Underlying iterator over the words.
+ iter: slice::Iter<'a, Word>,
+
+ marker: PhantomData<T>,
+}
+
+impl<'a, T: Idx> BitIter<'a, T> {
+ #[inline]
+ fn new(words: &'a [Word]) -> BitIter<'a, T> {
+ // We initialize `word` and `offset` to degenerate values. On the first
+ // call to `next()` we will fall through to getting the first word from
+ // `iter`, which sets `word` to the first word (if there is one) and
+ // `offset` to 0. Doing it this way saves us from having to maintain
+ // additional state about whether we have started.
+ BitIter {
+ word: 0,
+ offset: usize::MAX - (WORD_BITS - 1),
+ iter: words.iter(),
+ marker: PhantomData,
+ }
+ }
+}
+
+impl<'a, T: Idx> Iterator for BitIter<'a, T> {
+ type Item = T;
+ fn next(&mut self) -> Option<T> {
+ loop {
+ if self.word != 0 {
+ // Get the position of the next set bit in the current word,
+ // then clear the bit.
+ let bit_pos = self.word.trailing_zeros() as usize;
+ let bit = 1 << bit_pos;
+ self.word ^= bit;
+ return Some(T::new(bit_pos + self.offset));
+ }
+
+ // Move onto the next word. `wrapping_add()` is needed to handle
+ // the degenerate initial value given to `offset` in `new()`.
+ let word = self.iter.next()?;
+ self.word = *word;
+ self.offset = self.offset.wrapping_add(WORD_BITS);
+ }
+ }
+}
+
+#[inline]
+fn bitwise<Op>(out_vec: &mut [Word], in_vec: &[Word], op: Op) -> bool
+where
+ Op: Fn(Word, Word) -> Word,
+{
+ assert_eq!(out_vec.len(), in_vec.len());
+ let mut changed = 0;
+ for (out_elem, in_elem) in iter::zip(out_vec, in_vec) {
+ let old_val = *out_elem;
+ let new_val = op(old_val, *in_elem);
+ *out_elem = new_val;
+ // This is essentially equivalent to a != with changed being a bool, but
+ // in practice this code gets auto-vectorized by the compiler for most
+ // operators. Using != here causes us to generate quite poor code as the
+ // compiler tries to go back to a boolean on each loop iteration.
+ changed |= old_val ^ new_val;
+ }
+ changed != 0
+}
+
+/// Does this bitwise operation change `out_vec`?
+#[inline]
+fn bitwise_changes<Op>(out_vec: &[Word], in_vec: &[Word], op: Op) -> bool
+where
+ Op: Fn(Word, Word) -> Word,
+{
+ assert_eq!(out_vec.len(), in_vec.len());
+ for (out_elem, in_elem) in iter::zip(out_vec, in_vec) {
+ let old_val = *out_elem;
+ let new_val = op(old_val, *in_elem);
+ if old_val != new_val {
+ return true;
+ }
+ }
+ false
+}
+
+const SPARSE_MAX: usize = 8;
+
+/// A fixed-size bitset type with a sparse representation and a maximum of
+/// `SPARSE_MAX` elements. The elements are stored as a sorted `ArrayVec` with
+/// no duplicates.
+///
+/// This type is used by `HybridBitSet`; do not use directly.
+#[derive(Clone, Debug)]
+pub struct SparseBitSet<T> {
+ domain_size: usize,
+ elems: ArrayVec<T, SPARSE_MAX>,
+}
+
+impl<T: Idx> SparseBitSet<T> {
+ fn new_empty(domain_size: usize) -> Self {
+ SparseBitSet { domain_size, elems: ArrayVec::new() }
+ }
+
+ fn len(&self) -> usize {
+ self.elems.len()
+ }
+
+ fn is_empty(&self) -> bool {
+ self.elems.len() == 0
+ }
+
+ fn contains(&self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ self.elems.contains(&elem)
+ }
+
+ fn insert(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ let changed = if let Some(i) = self.elems.iter().position(|&e| e.index() >= elem.index()) {
+ if self.elems[i] == elem {
+ // `elem` is already in the set.
+ false
+ } else {
+ // `elem` is smaller than one or more existing elements.
+ self.elems.insert(i, elem);
+ true
+ }
+ } else {
+ // `elem` is larger than all existing elements.
+ self.elems.push(elem);
+ true
+ };
+ assert!(self.len() <= SPARSE_MAX);
+ changed
+ }
+
+ fn remove(&mut self, elem: T) -> bool {
+ assert!(elem.index() < self.domain_size);
+ if let Some(i) = self.elems.iter().position(|&e| e == elem) {
+ self.elems.remove(i);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn to_dense(&self) -> BitSet<T> {
+ let mut dense = BitSet::new_empty(self.domain_size);
+ for elem in self.elems.iter() {
+ dense.insert(*elem);
+ }
+ dense
+ }
+
+ fn iter(&self) -> slice::Iter<'_, T> {
+ self.elems.iter()
+ }
+
+ bit_relations_inherent_impls! {}
+}
+
+impl<T: Idx + Ord> SparseBitSet<T> {
+ fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T> {
+ let mut last_leq = None;
+ for e in self.iter() {
+ if range.contains(e) {
+ last_leq = Some(*e);
+ }
+ }
+ last_leq
+ }
+}
+
+/// A fixed-size bitset type with a hybrid representation: sparse when there
+/// are up to a `SPARSE_MAX` elements in the set, but dense when there are more
+/// than `SPARSE_MAX`.
+///
+/// This type is especially efficient for sets that typically have a small
+/// number of elements, but a large `domain_size`, and are cleared frequently.
+///
+/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
+/// just be `usize`.
+///
+/// All operations that involve an element will panic if the element is equal
+/// to or greater than the domain size. All operations that involve two bitsets
+/// will panic if the bitsets have differing domain sizes.
+#[derive(Clone)]
+pub enum HybridBitSet<T> {
+ Sparse(SparseBitSet<T>),
+ Dense(BitSet<T>),
+}
+
+impl<T: Idx> fmt::Debug for HybridBitSet<T> {
+ fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Sparse(b) => b.fmt(w),
+ Self::Dense(b) => b.fmt(w),
+ }
+ }
+}
+
+impl<T: Idx> HybridBitSet<T> {
+ pub fn new_empty(domain_size: usize) -> Self {
+ HybridBitSet::Sparse(SparseBitSet::new_empty(domain_size))
+ }
+
+ pub fn domain_size(&self) -> usize {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.domain_size,
+ HybridBitSet::Dense(dense) => dense.domain_size,
+ }
+ }
+
+ pub fn clear(&mut self) {
+ let domain_size = self.domain_size();
+ *self = HybridBitSet::new_empty(domain_size);
+ }
+
+ pub fn contains(&self, elem: T) -> bool {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.contains(elem),
+ HybridBitSet::Dense(dense) => dense.contains(elem),
+ }
+ }
+
+ pub fn superset(&self, other: &HybridBitSet<T>) -> bool {
+ match (self, other) {
+ (HybridBitSet::Dense(self_dense), HybridBitSet::Dense(other_dense)) => {
+ self_dense.superset(other_dense)
+ }
+ _ => {
+ assert!(self.domain_size() == other.domain_size());
+ other.iter().all(|elem| self.contains(elem))
+ }
+ }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.is_empty(),
+ HybridBitSet::Dense(dense) => dense.is_empty(),
+ }
+ }
+
+ /// Returns the previous element present in the bitset from `elem`,
+ /// inclusively of elem. That is, will return `Some(elem)` if elem is in the
+ /// bitset.
+ pub fn last_set_in(&self, range: impl RangeBounds<T>) -> Option<T>
+ where
+ T: Ord,
+ {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.last_set_in(range),
+ HybridBitSet::Dense(dense) => dense.last_set_in(range),
+ }
+ }
+
+ pub fn insert(&mut self, elem: T) -> bool {
+ // No need to check `elem` against `self.domain_size` here because all
+ // the match cases check it, one way or another.
+ match self {
+ HybridBitSet::Sparse(sparse) if sparse.len() < SPARSE_MAX => {
+ // The set is sparse and has space for `elem`.
+ sparse.insert(elem)
+ }
+ HybridBitSet::Sparse(sparse) if sparse.contains(elem) => {
+ // The set is sparse and does not have space for `elem`, but
+ // that doesn't matter because `elem` is already present.
+ false
+ }
+ HybridBitSet::Sparse(sparse) => {
+ // The set is sparse and full. Convert to a dense set.
+ let mut dense = sparse.to_dense();
+ let changed = dense.insert(elem);
+ assert!(changed);
+ *self = HybridBitSet::Dense(dense);
+ changed
+ }
+ HybridBitSet::Dense(dense) => dense.insert(elem),
+ }
+ }
+
+ pub fn insert_range(&mut self, elems: impl RangeBounds<T>) {
+ // No need to check `elem` against `self.domain_size` here because all
+ // the match cases check it, one way or another.
+ let start = match elems.start_bound().cloned() {
+ Bound::Included(start) => start.index(),
+ Bound::Excluded(start) => start.index() + 1,
+ Bound::Unbounded => 0,
+ };
+ let end = match elems.end_bound().cloned() {
+ Bound::Included(end) => end.index() + 1,
+ Bound::Excluded(end) => end.index(),
+ Bound::Unbounded => self.domain_size() - 1,
+ };
+ let Some(len) = end.checked_sub(start) else { return };
+ match self {
+ HybridBitSet::Sparse(sparse) if sparse.len() + len < SPARSE_MAX => {
+ // The set is sparse and has space for `elems`.
+ for elem in start..end {
+ sparse.insert(T::new(elem));
+ }
+ }
+ HybridBitSet::Sparse(sparse) => {
+ // The set is sparse and full. Convert to a dense set.
+ let mut dense = sparse.to_dense();
+ dense.insert_range(elems);
+ *self = HybridBitSet::Dense(dense);
+ }
+ HybridBitSet::Dense(dense) => dense.insert_range(elems),
+ }
+ }
+
+ pub fn insert_all(&mut self) {
+ let domain_size = self.domain_size();
+ match self {
+ HybridBitSet::Sparse(_) => {
+ *self = HybridBitSet::Dense(BitSet::new_filled(domain_size));
+ }
+ HybridBitSet::Dense(dense) => dense.insert_all(),
+ }
+ }
+
+ pub fn remove(&mut self, elem: T) -> bool {
+ // Note: we currently don't bother going from Dense back to Sparse.
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.remove(elem),
+ HybridBitSet::Dense(dense) => dense.remove(elem),
+ }
+ }
+
+ /// Converts to a dense set, consuming itself in the process.
+ pub fn to_dense(self) -> BitSet<T> {
+ match self {
+ HybridBitSet::Sparse(sparse) => sparse.to_dense(),
+ HybridBitSet::Dense(dense) => dense,
+ }
+ }
+
+ pub fn iter(&self) -> HybridIter<'_, T> {
+ match self {
+ HybridBitSet::Sparse(sparse) => HybridIter::Sparse(sparse.iter()),
+ HybridBitSet::Dense(dense) => HybridIter::Dense(dense.iter()),
+ }
+ }
+
+ bit_relations_inherent_impls! {}
+}
+
+pub enum HybridIter<'a, T: Idx> {
+ Sparse(slice::Iter<'a, T>),
+ Dense(BitIter<'a, T>),
+}
+
+impl<'a, T: Idx> Iterator for HybridIter<'a, T> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ match self {
+ HybridIter::Sparse(sparse) => sparse.next().copied(),
+ HybridIter::Dense(dense) => dense.next(),
+ }
+ }
+}
+
+/// A resizable bitset type with a dense representation.
+///
+/// `T` is an index type, typically a newtyped `usize` wrapper, but it can also
+/// just be `usize`.
+///
+/// All operations that involve an element will panic if the element is equal
+/// to or greater than the domain size.
+#[derive(Clone, Debug, PartialEq)]
+pub struct GrowableBitSet<T: Idx> {
+ bit_set: BitSet<T>,
+}
+
+impl<T: Idx> Default for GrowableBitSet<T> {
+ fn default() -> Self {
+ GrowableBitSet::new_empty()
+ }
+}
+
+impl<T: Idx> GrowableBitSet<T> {
+ /// Ensure that the set can hold at least `min_domain_size` elements.
+ pub fn ensure(&mut self, min_domain_size: usize) {
+ if self.bit_set.domain_size < min_domain_size {
+ self.bit_set.domain_size = min_domain_size;
+ }
+
+ let min_num_words = num_words(min_domain_size);
+ if self.bit_set.words.len() < min_num_words {
+ self.bit_set.words.resize(min_num_words, 0)
+ }
+ }
+
+ pub fn new_empty() -> GrowableBitSet<T> {
+ GrowableBitSet { bit_set: BitSet::new_empty(0) }
+ }
+
+ pub fn with_capacity(capacity: usize) -> GrowableBitSet<T> {
+ GrowableBitSet { bit_set: BitSet::new_empty(capacity) }
+ }
+
+ /// Returns `true` if the set has changed.
+ #[inline]
+ pub fn insert(&mut self, elem: T) -> bool {
+ self.ensure(elem.index() + 1);
+ self.bit_set.insert(elem)
+ }
+
+ /// Returns `true` if the set has changed.
+ #[inline]
+ pub fn remove(&mut self, elem: T) -> bool {
+ self.ensure(elem.index() + 1);
+ self.bit_set.remove(elem)
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.bit_set.is_empty()
+ }
+
+ #[inline]
+ pub fn contains(&self, elem: T) -> bool {
+ let (word_index, mask) = word_index_and_mask(elem);
+ self.bit_set.words.get(word_index).map_or(false, |word| (word & mask) != 0)
+ }
+
+ #[inline]
+ pub fn iter(&self) -> BitIter<'_, T> {
+ self.bit_set.iter()
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.bit_set.count()
+ }
+}
+
+impl<T: Idx> From<BitSet<T>> for GrowableBitSet<T> {
+ fn from(bit_set: BitSet<T>) -> Self {
+ Self { bit_set }
+ }
+}
+
+/// A fixed-size 2D bit matrix type with a dense representation.
+///
+/// `R` and `C` are index types used to identify rows and columns respectively;
+/// typically newtyped `usize` wrappers, but they can also just be `usize`.
+///
+/// All operations that involve a row and/or column index will panic if the
+/// index exceeds the relevant bound.
+#[derive(Clone, Eq, PartialEq, Hash, Decodable, Encodable)]
+pub struct BitMatrix<R: Idx, C: Idx> {
+ num_rows: usize,
+ num_columns: usize,
+ words: Vec<Word>,
+ marker: PhantomData<(R, C)>,
+}
+
+impl<R: Idx, C: Idx> BitMatrix<R, C> {
+ /// Creates a new `rows x columns` matrix, initially empty.
+ pub fn new(num_rows: usize, num_columns: usize) -> BitMatrix<R, C> {
+ // For every element, we need one bit for every other
+ // element. Round up to an even number of words.
+ let words_per_row = num_words(num_columns);
+ BitMatrix {
+ num_rows,
+ num_columns,
+ words: vec![0; num_rows * words_per_row],
+ marker: PhantomData,
+ }
+ }
+
+ /// Creates a new matrix, with `row` used as the value for every row.
+ pub fn from_row_n(row: &BitSet<C>, num_rows: usize) -> BitMatrix<R, C> {
+ let num_columns = row.domain_size();
+ let words_per_row = num_words(num_columns);
+ assert_eq!(words_per_row, row.words().len());
+ BitMatrix {
+ num_rows,
+ num_columns,
+ words: iter::repeat(row.words()).take(num_rows).flatten().cloned().collect(),
+ marker: PhantomData,
+ }
+ }
+
+ pub fn rows(&self) -> impl Iterator<Item = R> {
+ (0..self.num_rows).map(R::new)
+ }
+
+ /// The range of bits for a given row.
+ fn range(&self, row: R) -> (usize, usize) {
+ let words_per_row = num_words(self.num_columns);
+ let start = row.index() * words_per_row;
+ (start, start + words_per_row)
+ }
+
+ /// Sets the cell at `(row, column)` to true. Put another way, insert
+ /// `column` to the bitset for `row`.
+ ///
+ /// Returns `true` if this changed the matrix.
+ pub fn insert(&mut self, row: R, column: C) -> bool {
+ assert!(row.index() < self.num_rows && column.index() < self.num_columns);
+ let (start, _) = self.range(row);
+ let (word_index, mask) = word_index_and_mask(column);
+ let words = &mut self.words[..];
+ let word = words[start + word_index];
+ let new_word = word | mask;
+ words[start + word_index] = new_word;
+ word != new_word
+ }
+
+ /// Do the bits from `row` contain `column`? Put another way, is
+ /// the matrix cell at `(row, column)` true? Put yet another way,
+ /// if the matrix represents (transitive) reachability, can
+ /// `row` reach `column`?
+ pub fn contains(&self, row: R, column: C) -> bool {
+ assert!(row.index() < self.num_rows && column.index() < self.num_columns);
+ let (start, _) = self.range(row);
+ let (word_index, mask) = word_index_and_mask(column);
+ (self.words[start + word_index] & mask) != 0
+ }
+
+ /// Returns those indices that are true in rows `a` and `b`. This
+ /// is an *O*(*n*) operation where *n* is the number of elements
+ /// (somewhat independent from the actual size of the
+ /// intersection, in particular).
+ pub fn intersect_rows(&self, row1: R, row2: R) -> Vec<C> {
+ assert!(row1.index() < self.num_rows && row2.index() < self.num_rows);
+ let (row1_start, row1_end) = self.range(row1);
+ let (row2_start, row2_end) = self.range(row2);
+ let mut result = Vec::with_capacity(self.num_columns);
+ for (base, (i, j)) in (row1_start..row1_end).zip(row2_start..row2_end).enumerate() {
+ let mut v = self.words[i] & self.words[j];
+ for bit in 0..WORD_BITS {
+ if v == 0 {
+ break;
+ }
+ if v & 0x1 != 0 {
+ result.push(C::new(base * WORD_BITS + bit));
+ }
+ v >>= 1;
+ }
+ }
+ result
+ }
+
+ /// Adds the bits from row `read` to the bits from row `write`, and
+ /// returns `true` if anything changed.
+ ///
+ /// This is used when computing transitive reachability because if
+ /// you have an edge `write -> read`, because in that case
+ /// `write` can reach everything that `read` can (and
+ /// potentially more).
+ pub fn union_rows(&mut self, read: R, write: R) -> bool {
+ assert!(read.index() < self.num_rows && write.index() < self.num_rows);
+ let (read_start, read_end) = self.range(read);
+ let (write_start, write_end) = self.range(write);
+ let words = &mut self.words[..];
+ let mut changed = false;
+ for (read_index, write_index) in iter::zip(read_start..read_end, write_start..write_end) {
+ let word = words[write_index];
+ let new_word = word | words[read_index];
+ words[write_index] = new_word;
+ changed |= word != new_word;
+ }
+ changed
+ }
+
+ /// Adds the bits from `with` to the bits from row `write`, and
+ /// returns `true` if anything changed.
+ pub fn union_row_with(&mut self, with: &BitSet<C>, write: R) -> bool {
+ assert!(write.index() < self.num_rows);
+ assert_eq!(with.domain_size(), self.num_columns);
+ let (write_start, write_end) = self.range(write);
+ let mut changed = false;
+ for (read_index, write_index) in iter::zip(0..with.words().len(), write_start..write_end) {
+ let word = self.words[write_index];
+ let new_word = word | with.words()[read_index];
+ self.words[write_index] = new_word;
+ changed |= word != new_word;
+ }
+ changed
+ }
+
+ /// Sets every cell in `row` to true.
+ pub fn insert_all_into_row(&mut self, row: R) {
+ assert!(row.index() < self.num_rows);
+ let (start, end) = self.range(row);
+ let words = &mut self.words[..];
+ for index in start..end {
+ words[index] = !0;
+ }
+ clear_excess_bits_in_final_word(self.num_columns, &mut self.words[..end]);
+ }
+
+ /// Gets a slice of the underlying words.
+ pub fn words(&self) -> &[Word] {
+ &self.words
+ }
+
+ /// Iterates through all the columns set to true in a given row of
+ /// the matrix.
+ pub fn iter(&self, row: R) -> BitIter<'_, C> {
+ assert!(row.index() < self.num_rows);
+ let (start, end) = self.range(row);
+ BitIter::new(&self.words[start..end])
+ }
+
+ /// Returns the number of elements in `row`.
+ pub fn count(&self, row: R) -> usize {
+ let (start, end) = self.range(row);
+ self.words[start..end].iter().map(|e| e.count_ones() as usize).sum()
+ }
+}
+
+impl<R: Idx, C: Idx> fmt::Debug for BitMatrix<R, C> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// Forces its contents to print in regular mode instead of alternate mode.
+ struct OneLinePrinter<T>(T);
+ impl<T: fmt::Debug> fmt::Debug for OneLinePrinter<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{:?}", self.0)
+ }
+ }
+
+ write!(fmt, "BitMatrix({}x{}) ", self.num_rows, self.num_columns)?;
+ let items = self.rows().flat_map(|r| self.iter(r).map(move |c| (r, c)));
+ fmt.debug_set().entries(items.map(OneLinePrinter)).finish()
+ }
+}
+
+/// A fixed-column-size, variable-row-size 2D bit matrix with a moderately
+/// sparse representation.
+///
+/// Initially, every row has no explicit representation. If any bit within a
+/// row is set, the entire row is instantiated as `Some(<HybridBitSet>)`.
+/// Furthermore, any previously uninstantiated rows prior to it will be
+/// instantiated as `None`. Those prior rows may themselves become fully
+/// instantiated later on if any of their bits are set.
+///
+/// `R` and `C` are index types used to identify rows and columns respectively;
+/// typically newtyped `usize` wrappers, but they can also just be `usize`.
+#[derive(Clone, Debug)]
+pub struct SparseBitMatrix<R, C>
+where
+ R: Idx,
+ C: Idx,
+{
+ num_columns: usize,
+ rows: IndexVec<R, Option<HybridBitSet<C>>>,
+}
+
+impl<R: Idx, C: Idx> SparseBitMatrix<R, C> {
+ /// Creates a new empty sparse bit matrix with no rows or columns.
+ pub fn new(num_columns: usize) -> Self {
+ Self { num_columns, rows: IndexVec::new() }
+ }
+
+ fn ensure_row(&mut self, row: R) -> &mut HybridBitSet<C> {
+ // Instantiate any missing rows up to and including row `row` with an empty HybridBitSet.
+ // Then replace row `row` with a full HybridBitSet if necessary.
+ self.rows.get_or_insert_with(row, || HybridBitSet::new_empty(self.num_columns))
+ }
+
+ /// Sets the cell at `(row, column)` to true. Put another way, insert
+ /// `column` to the bitset for `row`.
+ ///
+ /// Returns `true` if this changed the matrix.
+ pub fn insert(&mut self, row: R, column: C) -> bool {
+ self.ensure_row(row).insert(column)
+ }
+
+ /// Sets the cell at `(row, column)` to false. Put another way, delete
+ /// `column` from the bitset for `row`. Has no effect if `row` does not
+ /// exist.
+ ///
+ /// Returns `true` if this changed the matrix.
+ pub fn remove(&mut self, row: R, column: C) -> bool {
+ match self.rows.get_mut(row) {
+ Some(Some(row)) => row.remove(column),
+ _ => false,
+ }
+ }
+
+ /// Sets all columns at `row` to false. Has no effect if `row` does
+ /// not exist.
+ pub fn clear(&mut self, row: R) {
+ if let Some(Some(row)) = self.rows.get_mut(row) {
+ row.clear();
+ }
+ }
+
+ /// Do the bits from `row` contain `column`? Put another way, is
+ /// the matrix cell at `(row, column)` true? Put yet another way,
+ /// if the matrix represents (transitive) reachability, can
+ /// `row` reach `column`?
+ pub fn contains(&self, row: R, column: C) -> bool {
+ self.row(row).map_or(false, |r| r.contains(column))
+ }
+
+ /// Adds the bits from row `read` to the bits from row `write`, and
+ /// returns `true` if anything changed.
+ ///
+ /// This is used when computing transitive reachability because if
+ /// you have an edge `write -> read`, because in that case
+ /// `write` can reach everything that `read` can (and
+ /// potentially more).
+ pub fn union_rows(&mut self, read: R, write: R) -> bool {
+ if read == write || self.row(read).is_none() {
+ return false;
+ }
+
+ self.ensure_row(write);
+ if let (Some(read_row), Some(write_row)) = self.rows.pick2_mut(read, write) {
+ write_row.union(read_row)
+ } else {
+ unreachable!()
+ }
+ }
+
+ /// Insert all bits in the given row.
+ pub fn insert_all_into_row(&mut self, row: R) {
+ self.ensure_row(row).insert_all();
+ }
+
+ pub fn rows(&self) -> impl Iterator<Item = R> {
+ self.rows.indices()
+ }
+
+ /// Iterates through all the columns set to true in a given row of
+ /// the matrix.
+ pub fn iter<'a>(&'a self, row: R) -> impl Iterator<Item = C> + 'a {
+ self.row(row).into_iter().flat_map(|r| r.iter())
+ }
+
+ pub fn row(&self, row: R) -> Option<&HybridBitSet<C>> {
+ self.rows.get(row)?.as_ref()
+ }
+
+ /// Intersects `row` with `set`. `set` can be either `BitSet` or
+ /// `HybridBitSet`. Has no effect if `row` does not exist.
+ ///
+ /// Returns true if the row was changed.
+ pub fn intersect_row<Set>(&mut self, row: R, set: &Set) -> bool
+ where
+ HybridBitSet<C>: BitRelations<Set>,
+ {
+ match self.rows.get_mut(row) {
+ Some(Some(row)) => row.intersect(set),
+ _ => false,
+ }
+ }
+
+ /// Subtracts `set from `row`. `set` can be either `BitSet` or
+ /// `HybridBitSet`. Has no effect if `row` does not exist.
+ ///
+ /// Returns true if the row was changed.
+ pub fn subtract_row<Set>(&mut self, row: R, set: &Set) -> bool
+ where
+ HybridBitSet<C>: BitRelations<Set>,
+ {
+ match self.rows.get_mut(row) {
+ Some(Some(row)) => row.subtract(set),
+ _ => false,
+ }
+ }
+
+ /// Unions `row` with `set`. `set` can be either `BitSet` or
+ /// `HybridBitSet`.
+ ///
+ /// Returns true if the row was changed.
+ pub fn union_row<Set>(&mut self, row: R, set: &Set) -> bool
+ where
+ HybridBitSet<C>: BitRelations<Set>,
+ {
+ self.ensure_row(row).union(set)
+ }
+}
+
+#[inline]
+fn num_words<T: Idx>(domain_size: T) -> usize {
+ (domain_size.index() + WORD_BITS - 1) / WORD_BITS
+}
+
+#[inline]
+fn num_chunks<T: Idx>(domain_size: T) -> usize {
+ assert!(domain_size.index() > 0);
+ (domain_size.index() + CHUNK_BITS - 1) / CHUNK_BITS
+}
+
+#[inline]
+fn word_index_and_mask<T: Idx>(elem: T) -> (usize, Word) {
+ let elem = elem.index();
+ let word_index = elem / WORD_BITS;
+ let mask = 1 << (elem % WORD_BITS);
+ (word_index, mask)
+}
+
+#[inline]
+fn chunk_index<T: Idx>(elem: T) -> usize {
+ elem.index() / CHUNK_BITS
+}
+
+#[inline]
+fn chunk_word_index_and_mask<T: Idx>(elem: T) -> (usize, Word) {
+ let chunk_elem = elem.index() % CHUNK_BITS;
+ word_index_and_mask(chunk_elem)
+}
+
+fn clear_excess_bits_in_final_word(domain_size: usize, words: &mut [Word]) {
+ let num_bits_in_final_word = domain_size % WORD_BITS;
+ if num_bits_in_final_word > 0 {
+ let mask = (1 << num_bits_in_final_word) - 1;
+ words[words.len() - 1] &= mask;
+ }
+}
+
+#[inline]
+fn max_bit(word: Word) -> usize {
+ WORD_BITS - 1 - word.leading_zeros() as usize
+}
+
+/// Integral type used to represent the bit set.
+pub trait FiniteBitSetTy:
+ BitAnd<Output = Self>
+ + BitAndAssign
+ + BitOrAssign
+ + Clone
+ + Copy
+ + Shl
+ + Not<Output = Self>
+ + PartialEq
+ + Sized
+{
+ /// Size of the domain representable by this type, e.g. 64 for `u64`.
+ const DOMAIN_SIZE: u32;
+
+ /// Value which represents the `FiniteBitSet` having every bit set.
+ const FILLED: Self;
+ /// Value which represents the `FiniteBitSet` having no bits set.
+ const EMPTY: Self;
+
+ /// Value for one as the integral type.
+ const ONE: Self;
+ /// Value for zero as the integral type.
+ const ZERO: Self;
+
+ /// Perform a checked left shift on the integral type.
+ fn checked_shl(self, rhs: u32) -> Option<Self>;
+ /// Perform a checked right shift on the integral type.
+ fn checked_shr(self, rhs: u32) -> Option<Self>;
+}
+
+impl FiniteBitSetTy for u32 {
+ const DOMAIN_SIZE: u32 = 32;
+
+ const FILLED: Self = Self::MAX;
+ const EMPTY: Self = Self::MIN;
+
+ const ONE: Self = 1u32;
+ const ZERO: Self = 0u32;
+
+ fn checked_shl(self, rhs: u32) -> Option<Self> {
+ self.checked_shl(rhs)
+ }
+
+ fn checked_shr(self, rhs: u32) -> Option<Self> {
+ self.checked_shr(rhs)
+ }
+}
+
+impl std::fmt::Debug for FiniteBitSet<u32> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:032b}", self.0)
+ }
+}
+
+impl FiniteBitSetTy for u64 {
+ const DOMAIN_SIZE: u32 = 64;
+
+ const FILLED: Self = Self::MAX;
+ const EMPTY: Self = Self::MIN;
+
+ const ONE: Self = 1u64;
+ const ZERO: Self = 0u64;
+
+ fn checked_shl(self, rhs: u32) -> Option<Self> {
+ self.checked_shl(rhs)
+ }
+
+ fn checked_shr(self, rhs: u32) -> Option<Self> {
+ self.checked_shr(rhs)
+ }
+}
+
+impl std::fmt::Debug for FiniteBitSet<u64> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:064b}", self.0)
+ }
+}
+
+impl FiniteBitSetTy for u128 {
+ const DOMAIN_SIZE: u32 = 128;
+
+ const FILLED: Self = Self::MAX;
+ const EMPTY: Self = Self::MIN;
+
+ const ONE: Self = 1u128;
+ const ZERO: Self = 0u128;
+
+ fn checked_shl(self, rhs: u32) -> Option<Self> {
+ self.checked_shl(rhs)
+ }
+
+ fn checked_shr(self, rhs: u32) -> Option<Self> {
+ self.checked_shr(rhs)
+ }
+}
+
+impl std::fmt::Debug for FiniteBitSet<u128> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:0128b}", self.0)
+ }
+}
+
+/// A fixed-sized bitset type represented by an integer type. Indices outwith than the range
+/// representable by `T` are considered set.
+#[derive(Copy, Clone, Eq, PartialEq, Decodable, Encodable)]
+pub struct FiniteBitSet<T: FiniteBitSetTy>(pub T);
+
+impl<T: FiniteBitSetTy> FiniteBitSet<T> {
+ /// Creates a new, empty bitset.
+ pub fn new_empty() -> Self {
+ Self(T::EMPTY)
+ }
+
+ /// Sets the `index`th bit.
+ pub fn set(&mut self, index: u32) {
+ self.0 |= T::ONE.checked_shl(index).unwrap_or(T::ZERO);
+ }
+
+ /// Unsets the `index`th bit.
+ pub fn clear(&mut self, index: u32) {
+ self.0 &= !T::ONE.checked_shl(index).unwrap_or(T::ZERO);
+ }
+
+ /// Sets the `i`th to `j`th bits.
+ pub fn set_range(&mut self, range: Range<u32>) {
+ let bits = T::FILLED
+ .checked_shl(range.end - range.start)
+ .unwrap_or(T::ZERO)
+ .not()
+ .checked_shl(range.start)
+ .unwrap_or(T::ZERO);
+ self.0 |= bits;
+ }
+
+ /// Is the set empty?
+ pub fn is_empty(&self) -> bool {
+ self.0 == T::EMPTY
+ }
+
+ /// Returns the domain size of the bitset.
+ pub fn within_domain(&self, index: u32) -> bool {
+ index < T::DOMAIN_SIZE
+ }
+
+ /// Returns if the `index`th bit is set.
+ pub fn contains(&self, index: u32) -> Option<bool> {
+ self.within_domain(index)
+ .then(|| ((self.0.checked_shr(index).unwrap_or(T::ONE)) & T::ONE) == T::ONE)
+ }
+}
+
+impl<T: FiniteBitSetTy> Default for FiniteBitSet<T> {
+ fn default() -> Self {
+ Self::new_empty()
+ }
+}
diff --git a/compiler/rustc_index/src/bit_set/tests.rs b/compiler/rustc_index/src/bit_set/tests.rs
new file mode 100644
index 000000000..351d62fee
--- /dev/null
+++ b/compiler/rustc_index/src/bit_set/tests.rs
@@ -0,0 +1,873 @@
+use super::*;
+
+extern crate test;
+use std::hint::black_box;
+use test::Bencher;
+
+#[test]
+fn test_new_filled() {
+ for i in 0..128 {
+ let idx_buf = BitSet::new_filled(i);
+ let elems: Vec<usize> = idx_buf.iter().collect();
+ let expected: Vec<usize> = (0..i).collect();
+ assert_eq!(elems, expected);
+ }
+}
+
+#[test]
+fn bitset_iter_works() {
+ let mut bitset: BitSet<usize> = BitSet::new_empty(100);
+ bitset.insert(1);
+ bitset.insert(10);
+ bitset.insert(19);
+ bitset.insert(62);
+ bitset.insert(63);
+ bitset.insert(64);
+ bitset.insert(65);
+ bitset.insert(66);
+ bitset.insert(99);
+ assert_eq!(bitset.iter().collect::<Vec<_>>(), [1, 10, 19, 62, 63, 64, 65, 66, 99]);
+}
+
+#[test]
+fn bitset_iter_works_2() {
+ let mut bitset: BitSet<usize> = BitSet::new_empty(320);
+ bitset.insert(0);
+ bitset.insert(127);
+ bitset.insert(191);
+ bitset.insert(255);
+ bitset.insert(319);
+ assert_eq!(bitset.iter().collect::<Vec<_>>(), [0, 127, 191, 255, 319]);
+}
+
+#[test]
+fn bitset_clone_from() {
+ let mut a: BitSet<usize> = BitSet::new_empty(10);
+ a.insert(4);
+ a.insert(7);
+ a.insert(9);
+
+ let mut b = BitSet::new_empty(2);
+ b.clone_from(&a);
+ assert_eq!(b.domain_size(), 10);
+ assert_eq!(b.iter().collect::<Vec<_>>(), [4, 7, 9]);
+
+ b.clone_from(&BitSet::new_empty(40));
+ assert_eq!(b.domain_size(), 40);
+ assert_eq!(b.iter().collect::<Vec<_>>(), []);
+}
+
+#[test]
+fn union_two_sets() {
+ let mut set1: BitSet<usize> = BitSet::new_empty(65);
+ let mut set2: BitSet<usize> = BitSet::new_empty(65);
+ assert!(set1.insert(3));
+ assert!(!set1.insert(3));
+ assert!(set2.insert(5));
+ assert!(set2.insert(64));
+ assert!(set1.union(&set2));
+ assert!(!set1.union(&set2));
+ assert!(set1.contains(3));
+ assert!(!set1.contains(4));
+ assert!(set1.contains(5));
+ assert!(!set1.contains(63));
+ assert!(set1.contains(64));
+}
+
+#[test]
+fn hybrid_bitset() {
+ let mut sparse038: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ assert!(sparse038.is_empty());
+ assert!(sparse038.insert(0));
+ assert!(sparse038.insert(1));
+ assert!(sparse038.insert(8));
+ assert!(sparse038.insert(3));
+ assert!(!sparse038.insert(3));
+ assert!(sparse038.remove(1));
+ assert!(!sparse038.is_empty());
+ assert_eq!(sparse038.iter().collect::<Vec<_>>(), [0, 3, 8]);
+
+ for i in 0..256 {
+ if i == 0 || i == 3 || i == 8 {
+ assert!(sparse038.contains(i));
+ } else {
+ assert!(!sparse038.contains(i));
+ }
+ }
+
+ let mut sparse01358 = sparse038.clone();
+ assert!(sparse01358.insert(1));
+ assert!(sparse01358.insert(5));
+ assert_eq!(sparse01358.iter().collect::<Vec<_>>(), [0, 1, 3, 5, 8]);
+
+ let mut dense10 = HybridBitSet::new_empty(256);
+ for i in 0..10 {
+ assert!(dense10.insert(i));
+ }
+ assert!(!dense10.is_empty());
+ assert_eq!(dense10.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut dense256 = HybridBitSet::new_empty(256);
+ assert!(dense256.is_empty());
+ dense256.insert_all();
+ assert!(!dense256.is_empty());
+ for i in 0..256 {
+ assert!(dense256.contains(i));
+ }
+
+ assert!(sparse038.superset(&sparse038)); // sparse + sparse (self)
+ assert!(sparse01358.superset(&sparse038)); // sparse + sparse
+ assert!(dense10.superset(&sparse038)); // dense + sparse
+ assert!(dense10.superset(&dense10)); // dense + dense (self)
+ assert!(dense256.superset(&dense10)); // dense + dense
+
+ let mut hybrid = sparse038.clone();
+ assert!(!sparse01358.union(&hybrid)); // no change
+ assert!(hybrid.union(&sparse01358));
+ assert!(hybrid.superset(&sparse01358) && sparse01358.superset(&hybrid));
+ assert!(!dense256.union(&dense10));
+
+ // dense / sparse where dense superset sparse
+ assert!(!dense10.clone().union(&sparse01358));
+ assert!(sparse01358.clone().union(&dense10));
+ assert!(dense10.clone().intersect(&sparse01358));
+ assert!(!sparse01358.clone().intersect(&dense10));
+ assert!(dense10.clone().subtract(&sparse01358));
+ assert!(sparse01358.clone().subtract(&dense10));
+
+ // dense / sparse where sparse superset dense
+ let dense038 = sparse038.to_dense();
+ assert!(!sparse01358.clone().union(&dense038));
+ assert!(dense038.clone().union(&sparse01358));
+ assert!(sparse01358.clone().intersect(&dense038));
+ assert!(!dense038.clone().intersect(&sparse01358));
+ assert!(sparse01358.clone().subtract(&dense038));
+ assert!(dense038.clone().subtract(&sparse01358));
+
+ let mut dense = dense10.clone();
+ assert!(dense.union(&dense256));
+ assert!(dense.superset(&dense256) && dense256.superset(&dense));
+ assert!(hybrid.union(&dense256));
+ assert!(hybrid.superset(&dense256) && dense256.superset(&hybrid));
+
+ assert!(!dense10.clone().intersect(&dense256));
+ assert!(dense256.clone().intersect(&dense10));
+ assert!(dense10.clone().subtract(&dense256));
+ assert!(dense256.clone().subtract(&dense10));
+
+ assert_eq!(dense256.iter().count(), 256);
+ let mut dense0 = dense256;
+ for i in 0..256 {
+ assert!(dense0.remove(i));
+ }
+ assert!(!dense0.remove(0));
+ assert!(dense0.is_empty());
+}
+
+#[test]
+fn chunked_bitset() {
+ let mut b0 = ChunkedBitSet::<usize>::new_empty(0);
+ let b0b = b0.clone();
+ assert_eq!(b0, ChunkedBitSet { domain_size: 0, chunks: Box::new([]), marker: PhantomData });
+
+ // There are no valid insert/remove/contains operations on a 0-domain
+ // bitset, but we can test `union`.
+ b0.assert_valid();
+ assert!(!b0.union(&b0b));
+ assert_eq!(b0.chunks(), vec![]);
+ assert_eq!(b0.count(), 0);
+ b0.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b1 = ChunkedBitSet::<usize>::new_empty(1);
+ assert_eq!(
+ b1,
+ ChunkedBitSet { domain_size: 1, chunks: Box::new([Zeros(1)]), marker: PhantomData }
+ );
+
+ b1.assert_valid();
+ assert!(!b1.contains(0));
+ assert_eq!(b1.count(), 0);
+ assert!(b1.insert(0));
+ assert!(b1.contains(0));
+ assert_eq!(b1.count(), 1);
+ assert_eq!(b1.chunks(), [Ones(1)]);
+ assert!(!b1.insert(0));
+ assert!(b1.remove(0));
+ assert!(!b1.contains(0));
+ assert_eq!(b1.count(), 0);
+ assert_eq!(b1.chunks(), [Zeros(1)]);
+ b1.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b100 = ChunkedBitSet::<usize>::new_filled(100);
+ assert_eq!(
+ b100,
+ ChunkedBitSet { domain_size: 100, chunks: Box::new([Ones(100)]), marker: PhantomData }
+ );
+
+ b100.assert_valid();
+ for i in 0..100 {
+ assert!(b100.contains(i));
+ }
+ assert_eq!(b100.count(), 100);
+ assert!(b100.remove(3));
+ assert!(b100.insert(3));
+ assert_eq!(b100.chunks(), vec![Ones(100)]);
+ assert!(
+ b100.remove(20) && b100.remove(30) && b100.remove(40) && b100.remove(99) && b100.insert(30)
+ );
+ assert_eq!(b100.count(), 97);
+ assert!(!b100.contains(20) && b100.contains(30) && !b100.contains(99) && b100.contains(50));
+ assert_eq!(
+ b100.chunks(),
+ vec![Mixed(
+ 100,
+ 97,
+ #[rustfmt::skip]
+ Rc::new([
+ 0b11111111_11111111_11111110_11111111_11111111_11101111_11111111_11111111,
+ 0b00000000_00000000_00000000_00000111_11111111_11111111_11111111_11111111,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0,
+ ])
+ )],
+ );
+ b100.assert_valid();
+ let mut num_removed = 0;
+ for i in 0..100 {
+ if b100.remove(i) {
+ num_removed += 1;
+ }
+ }
+ assert_eq!(num_removed, 97);
+ assert_eq!(b100.chunks(), vec![Zeros(100)]);
+ b100.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b2548 = ChunkedBitSet::<usize>::new_empty(2548);
+ assert_eq!(
+ b2548,
+ ChunkedBitSet {
+ domain_size: 2548,
+ chunks: Box::new([Zeros(2048), Zeros(500)]),
+ marker: PhantomData,
+ }
+ );
+
+ b2548.assert_valid();
+ b2548.insert(14);
+ b2548.remove(14);
+ assert_eq!(b2548.chunks(), vec![Zeros(2048), Zeros(500)]);
+ b2548.insert_all();
+ for i in 0..2548 {
+ assert!(b2548.contains(i));
+ }
+ assert_eq!(b2548.count(), 2548);
+ assert_eq!(b2548.chunks(), vec![Ones(2048), Ones(500)]);
+ b2548.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b4096 = ChunkedBitSet::<usize>::new_empty(4096);
+ assert_eq!(
+ b4096,
+ ChunkedBitSet {
+ domain_size: 4096,
+ chunks: Box::new([Zeros(2048), Zeros(2048)]),
+ marker: PhantomData,
+ }
+ );
+
+ b4096.assert_valid();
+ for i in 0..4096 {
+ assert!(!b4096.contains(i));
+ }
+ assert!(b4096.insert(0) && b4096.insert(4095) && !b4096.insert(4095));
+ assert!(
+ b4096.contains(0) && !b4096.contains(2047) && !b4096.contains(2048) && b4096.contains(4095)
+ );
+ assert_eq!(
+ b4096.chunks(),
+ #[rustfmt::skip]
+ vec![
+ Mixed(2048, 1, Rc::new([
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+ ])),
+ Mixed(2048, 1, Rc::new([
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x8000_0000_0000_0000
+ ])),
+ ],
+ );
+ assert_eq!(b4096.count(), 2);
+ b4096.assert_valid();
+
+ //-----------------------------------------------------------------------
+
+ let mut b10000 = ChunkedBitSet::<usize>::new_empty(10000);
+ assert_eq!(
+ b10000,
+ ChunkedBitSet {
+ domain_size: 10000,
+ chunks: Box::new([Zeros(2048), Zeros(2048), Zeros(2048), Zeros(2048), Zeros(1808),]),
+ marker: PhantomData,
+ }
+ );
+
+ b10000.assert_valid();
+ assert!(b10000.insert(3000) && b10000.insert(5000));
+ assert_eq!(
+ b10000.chunks(),
+ #[rustfmt::skip]
+ vec![
+ Zeros(2048),
+ Mixed(2048, 1, Rc::new([
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0100_0000_0000_0000, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ])),
+ Mixed(2048, 1, Rc::new([
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0100, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ ])),
+ Zeros(2048),
+ Zeros(1808),
+ ],
+ );
+ let mut b10000b = ChunkedBitSet::<usize>::new_empty(10000);
+ b10000b.clone_from(&b10000);
+ assert_eq!(b10000, b10000b);
+ for i in 6000..7000 {
+ b10000b.insert(i);
+ }
+ assert_eq!(b10000b.count(), 1002);
+ b10000b.assert_valid();
+ b10000b.clone_from(&b10000);
+ assert_eq!(b10000b.count(), 2);
+ for i in 2000..8000 {
+ b10000b.insert(i);
+ }
+ b10000.union(&b10000b);
+ assert_eq!(b10000.count(), 6000);
+ b10000.union(&b10000b);
+ assert_eq!(b10000.count(), 6000);
+ b10000.assert_valid();
+ b10000b.assert_valid();
+}
+
+fn with_elements_chunked(elements: &[usize], domain_size: usize) -> ChunkedBitSet<usize> {
+ let mut s = ChunkedBitSet::new_empty(domain_size);
+ for &e in elements {
+ assert!(s.insert(e));
+ }
+ s
+}
+
+fn with_elements_standard(elements: &[usize], domain_size: usize) -> BitSet<usize> {
+ let mut s = BitSet::new_empty(domain_size);
+ for &e in elements {
+ assert!(s.insert(e));
+ }
+ s
+}
+
+#[test]
+fn chunked_bitset_into_bitset_operations() {
+ let a = vec![1, 5, 7, 11, 15, 2000, 3000];
+ let b = vec![3, 4, 11, 3000, 4000];
+ let aub = vec![1, 3, 4, 5, 7, 11, 15, 2000, 3000, 4000];
+ let aib = vec![11, 3000];
+
+ let b = with_elements_chunked(&b, 9876);
+
+ let mut union = with_elements_standard(&a, 9876);
+ assert!(union.union(&b));
+ assert!(!union.union(&b));
+ assert!(union.iter().eq(aub.iter().copied()));
+
+ let mut intersection = with_elements_standard(&a, 9876);
+ assert!(intersection.intersect(&b));
+ assert!(!intersection.intersect(&b));
+ assert!(intersection.iter().eq(aib.iter().copied()));
+}
+
+#[test]
+fn chunked_bitset_iter() {
+ fn check_iter(bit: &ChunkedBitSet<usize>, vec: &Vec<usize>) {
+ // Test collecting via both `.next()` and `.fold()` calls, to make sure both are correct
+ let mut collect_next = Vec::new();
+ let mut bit_iter = bit.iter();
+ while let Some(item) = bit_iter.next() {
+ collect_next.push(item);
+ }
+ assert_eq!(vec, &collect_next);
+
+ let collect_fold = bit.iter().fold(Vec::new(), |mut v, item| {
+ v.push(item);
+ v
+ });
+ assert_eq!(vec, &collect_fold);
+ }
+
+ // Empty
+ let vec: Vec<usize> = Vec::new();
+ let bit = with_elements_chunked(&vec, 9000);
+ check_iter(&bit, &vec);
+
+ // Filled
+ let n = 10000;
+ let vec: Vec<usize> = (0..n).collect();
+ let bit = with_elements_chunked(&vec, n);
+ check_iter(&bit, &vec);
+
+ // Filled with trailing zeros
+ let n = 10000;
+ let vec: Vec<usize> = (0..n).collect();
+ let bit = with_elements_chunked(&vec, 2 * n);
+ check_iter(&bit, &vec);
+
+ // Mixed
+ let n = 12345;
+ let vec: Vec<usize> = vec![0, 1, 2, 2010, 2047, 2099, 6000, 6002, 6004];
+ let bit = with_elements_chunked(&vec, n);
+ check_iter(&bit, &vec);
+}
+
+#[test]
+fn grow() {
+ let mut set: GrowableBitSet<usize> = GrowableBitSet::with_capacity(65);
+ for index in 0..65 {
+ assert!(set.insert(index));
+ assert!(!set.insert(index));
+ }
+ set.ensure(128);
+
+ // Check if the bits set before growing are still set
+ for index in 0..65 {
+ assert!(set.contains(index));
+ }
+
+ // Check if the new bits are all un-set
+ for index in 65..128 {
+ assert!(!set.contains(index));
+ }
+
+ // Check that we can set all new bits without running out of bounds
+ for index in 65..128 {
+ assert!(set.insert(index));
+ assert!(!set.insert(index));
+ }
+}
+
+#[test]
+fn matrix_intersection() {
+ let mut matrix: BitMatrix<usize, usize> = BitMatrix::new(200, 200);
+
+ // (*) Elements reachable from both 2 and 65.
+
+ matrix.insert(2, 3);
+ matrix.insert(2, 6);
+ matrix.insert(2, 10); // (*)
+ matrix.insert(2, 64); // (*)
+ matrix.insert(2, 65);
+ matrix.insert(2, 130);
+ matrix.insert(2, 160); // (*)
+
+ matrix.insert(64, 133);
+
+ matrix.insert(65, 2);
+ matrix.insert(65, 8);
+ matrix.insert(65, 10); // (*)
+ matrix.insert(65, 64); // (*)
+ matrix.insert(65, 68);
+ matrix.insert(65, 133);
+ matrix.insert(65, 160); // (*)
+
+ let intersection = matrix.intersect_rows(2, 64);
+ assert!(intersection.is_empty());
+
+ let intersection = matrix.intersect_rows(2, 65);
+ assert_eq!(intersection, &[10, 64, 160]);
+}
+
+#[test]
+fn matrix_iter() {
+ let mut matrix: BitMatrix<usize, usize> = BitMatrix::new(64, 100);
+ matrix.insert(3, 22);
+ matrix.insert(3, 75);
+ matrix.insert(2, 99);
+ matrix.insert(4, 0);
+ matrix.union_rows(3, 5);
+ matrix.insert_all_into_row(6);
+
+ let expected = [99];
+ let mut iter = expected.iter();
+ for i in matrix.iter(2) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [22, 75];
+ let mut iter = expected.iter();
+ assert_eq!(matrix.count(3), expected.len());
+ for i in matrix.iter(3) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [0];
+ let mut iter = expected.iter();
+ assert_eq!(matrix.count(4), expected.len());
+ for i in matrix.iter(4) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [22, 75];
+ let mut iter = expected.iter();
+ assert_eq!(matrix.count(5), expected.len());
+ for i in matrix.iter(5) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ assert_eq!(matrix.count(6), 100);
+ let mut count = 0;
+ for (idx, i) in matrix.iter(6).enumerate() {
+ assert_eq!(idx, i);
+ count += 1;
+ }
+ assert_eq!(count, 100);
+
+ if let Some(i) = matrix.iter(7).next() {
+ panic!("expected no elements in row, but contains element {:?}", i);
+ }
+}
+
+#[test]
+fn sparse_matrix_iter() {
+ let mut matrix: SparseBitMatrix<usize, usize> = SparseBitMatrix::new(100);
+ matrix.insert(3, 22);
+ matrix.insert(3, 75);
+ matrix.insert(2, 99);
+ matrix.insert(4, 0);
+ matrix.union_rows(3, 5);
+
+ let expected = [99];
+ let mut iter = expected.iter();
+ for i in matrix.iter(2) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [22, 75];
+ let mut iter = expected.iter();
+ for i in matrix.iter(3) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [0];
+ let mut iter = expected.iter();
+ for i in matrix.iter(4) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+
+ let expected = [22, 75];
+ let mut iter = expected.iter();
+ for i in matrix.iter(5) {
+ let j = *iter.next().unwrap();
+ assert_eq!(i, j);
+ }
+ assert!(iter.next().is_none());
+}
+
+#[test]
+fn sparse_matrix_operations() {
+ let mut matrix: SparseBitMatrix<usize, usize> = SparseBitMatrix::new(100);
+ matrix.insert(3, 22);
+ matrix.insert(3, 75);
+ matrix.insert(2, 99);
+ matrix.insert(4, 0);
+
+ let mut disjoint: HybridBitSet<usize> = HybridBitSet::new_empty(100);
+ disjoint.insert(33);
+
+ let mut superset = HybridBitSet::new_empty(100);
+ superset.insert(22);
+ superset.insert(75);
+ superset.insert(33);
+
+ let mut subset = HybridBitSet::new_empty(100);
+ subset.insert(22);
+
+ // SparseBitMatrix::remove
+ {
+ let mut matrix = matrix.clone();
+ matrix.remove(3, 22);
+ assert!(!matrix.row(3).unwrap().contains(22));
+ matrix.remove(0, 0);
+ assert!(matrix.row(0).is_none());
+ }
+
+ // SparseBitMatrix::clear
+ {
+ let mut matrix = matrix.clone();
+ matrix.clear(3);
+ assert!(!matrix.row(3).unwrap().contains(75));
+ matrix.clear(0);
+ assert!(matrix.row(0).is_none());
+ }
+
+ // SparseBitMatrix::intersect_row
+ {
+ let mut matrix = matrix.clone();
+ assert!(!matrix.intersect_row(3, &superset));
+ assert!(matrix.intersect_row(3, &subset));
+ matrix.intersect_row(0, &disjoint);
+ assert!(matrix.row(0).is_none());
+ }
+
+ // SparseBitMatrix::subtract_row
+ {
+ let mut matrix = matrix.clone();
+ assert!(!matrix.subtract_row(3, &disjoint));
+ assert!(matrix.subtract_row(3, &subset));
+ assert!(matrix.subtract_row(3, &superset));
+ matrix.intersect_row(0, &disjoint);
+ assert!(matrix.row(0).is_none());
+ }
+
+ // SparseBitMatrix::union_row
+ {
+ let mut matrix = matrix.clone();
+ assert!(!matrix.union_row(3, &subset));
+ assert!(matrix.union_row(3, &disjoint));
+ matrix.union_row(0, &disjoint);
+ assert!(matrix.row(0).is_some());
+ }
+}
+
+#[test]
+fn dense_insert_range() {
+ #[track_caller]
+ fn check<R>(domain: usize, range: R)
+ where
+ R: RangeBounds<usize> + Clone + IntoIterator<Item = usize> + std::fmt::Debug,
+ {
+ let mut set = BitSet::new_empty(domain);
+ set.insert_range(range.clone());
+ for i in set.iter() {
+ assert!(range.contains(&i));
+ }
+ for i in range.clone() {
+ assert!(set.contains(i), "{} in {:?}, inserted {:?}", i, set, range);
+ }
+ }
+ check(300, 10..10);
+ check(300, WORD_BITS..WORD_BITS * 2);
+ check(300, WORD_BITS - 1..WORD_BITS * 2);
+ check(300, WORD_BITS - 1..WORD_BITS);
+ check(300, 10..100);
+ check(300, 10..30);
+ check(300, 0..5);
+ check(300, 0..250);
+ check(300, 200..250);
+
+ check(300, 10..=10);
+ check(300, WORD_BITS..=WORD_BITS * 2);
+ check(300, WORD_BITS - 1..=WORD_BITS * 2);
+ check(300, WORD_BITS - 1..=WORD_BITS);
+ check(300, 10..=100);
+ check(300, 10..=30);
+ check(300, 0..=5);
+ check(300, 0..=250);
+ check(300, 200..=250);
+
+ for i in 0..WORD_BITS * 2 {
+ for j in i..WORD_BITS * 2 {
+ check(WORD_BITS * 2, i..j);
+ check(WORD_BITS * 2, i..=j);
+ check(300, i..j);
+ check(300, i..=j);
+ }
+ }
+}
+
+#[test]
+fn dense_last_set_before() {
+ fn easy(set: &BitSet<usize>, needle: impl RangeBounds<usize>) -> Option<usize> {
+ let mut last_leq = None;
+ for e in set.iter() {
+ if needle.contains(&e) {
+ last_leq = Some(e);
+ }
+ }
+ last_leq
+ }
+
+ #[track_caller]
+ fn cmp(set: &BitSet<usize>, needle: impl RangeBounds<usize> + Clone + std::fmt::Debug) {
+ assert_eq!(
+ set.last_set_in(needle.clone()),
+ easy(set, needle.clone()),
+ "{:?} in {:?}",
+ needle,
+ set
+ );
+ }
+ let mut set = BitSet::new_empty(300);
+ cmp(&set, 50..=50);
+ set.insert(WORD_BITS);
+ cmp(&set, WORD_BITS..=WORD_BITS);
+ set.insert(WORD_BITS - 1);
+ cmp(&set, 0..=WORD_BITS - 1);
+ cmp(&set, 0..=5);
+ cmp(&set, 10..100);
+ set.insert(100);
+ cmp(&set, 100..110);
+ cmp(&set, 99..100);
+ cmp(&set, 99..=100);
+
+ for i in 0..=WORD_BITS * 2 {
+ for j in i..=WORD_BITS * 2 {
+ for k in 0..WORD_BITS * 2 {
+ let mut set = BitSet::new_empty(300);
+ cmp(&set, i..j);
+ cmp(&set, i..=j);
+ set.insert(k);
+ cmp(&set, i..j);
+ cmp(&set, i..=j);
+ }
+ }
+ }
+}
+
+/// Merge dense hybrid set into empty sparse hybrid set.
+#[bench]
+fn union_hybrid_sparse_empty_to_dense(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ for i in 0..10 {
+ assert!(pre_dense.insert(i));
+ }
+ let pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+/// Merge dense hybrid set into full hybrid set with same indices.
+#[bench]
+fn union_hybrid_sparse_full_to_dense(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ for i in 0..10 {
+ assert!(pre_dense.insert(i));
+ }
+ let mut pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(256);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_sparse.insert(i));
+ }
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+/// Merge dense hybrid set into full hybrid set with indices over the whole domain.
+#[bench]
+fn union_hybrid_sparse_domain_to_dense(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX * 64);
+ for i in 0..10 {
+ assert!(pre_dense.insert(i));
+ }
+ let mut pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX * 64);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_sparse.insert(i * 64));
+ }
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+/// Merge dense hybrid set into empty hybrid set where the domain is very small.
+#[bench]
+fn union_hybrid_sparse_empty_small_domain(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_dense.insert(i));
+ }
+ let pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX);
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+/// Merge dense hybrid set into full hybrid set where the domain is very small.
+#[bench]
+fn union_hybrid_sparse_full_small_domain(b: &mut Bencher) {
+ let mut pre_dense: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_dense.insert(i));
+ }
+ let mut pre_sparse: HybridBitSet<usize> = HybridBitSet::new_empty(SPARSE_MAX);
+ for i in 0..SPARSE_MAX {
+ assert!(pre_sparse.insert(i));
+ }
+ b.iter(|| {
+ let dense = pre_dense.clone();
+ let mut sparse = pre_sparse.clone();
+ sparse.union(&dense);
+ })
+}
+
+#[bench]
+fn bench_insert(b: &mut Bencher) {
+ let mut bs = BitSet::new_filled(99999usize);
+ b.iter(|| {
+ black_box(bs.insert(black_box(100u32)));
+ });
+}
+
+#[bench]
+fn bench_remove(b: &mut Bencher) {
+ let mut bs = BitSet::new_filled(99999usize);
+ b.iter(|| {
+ black_box(bs.remove(black_box(100u32)));
+ });
+}
+
+#[bench]
+fn bench_iter(b: &mut Bencher) {
+ let bs = BitSet::new_filled(99999usize);
+ b.iter(|| {
+ bs.iter().map(|b: usize| black_box(b)).for_each(drop);
+ });
+}
+
+#[bench]
+fn bench_intersect(b: &mut Bencher) {
+ let mut ba: BitSet<u32> = BitSet::new_filled(99999usize);
+ let bb = BitSet::new_filled(99999usize);
+ b.iter(|| {
+ ba.intersect(black_box(&bb));
+ });
+}
diff --git a/compiler/rustc_index/src/interval.rs b/compiler/rustc_index/src/interval.rs
new file mode 100644
index 000000000..3592fb330
--- /dev/null
+++ b/compiler/rustc_index/src/interval.rs
@@ -0,0 +1,305 @@
+use std::iter::Step;
+use std::marker::PhantomData;
+use std::ops::RangeBounds;
+use std::ops::{Bound, Range};
+
+use crate::vec::Idx;
+use crate::vec::IndexVec;
+use smallvec::SmallVec;
+
+#[cfg(test)]
+mod tests;
+
+/// Stores a set of intervals on the indices.
+///
+/// The elements in `map` are sorted and non-adjacent, which means
+/// the second value of the previous element is *greater* than the
+/// first value of the following element.
+#[derive(Debug, Clone)]
+pub struct IntervalSet<I> {
+ // Start, end
+ map: SmallVec<[(u32, u32); 4]>,
+ domain: usize,
+ _data: PhantomData<I>,
+}
+
+#[inline]
+fn inclusive_start<T: Idx>(range: impl RangeBounds<T>) -> u32 {
+ match range.start_bound() {
+ Bound::Included(start) => start.index() as u32,
+ Bound::Excluded(start) => start.index() as u32 + 1,
+ Bound::Unbounded => 0,
+ }
+}
+
+#[inline]
+fn inclusive_end<T: Idx>(domain: usize, range: impl RangeBounds<T>) -> Option<u32> {
+ let end = match range.end_bound() {
+ Bound::Included(end) => end.index() as u32,
+ Bound::Excluded(end) => end.index().checked_sub(1)? as u32,
+ Bound::Unbounded => domain.checked_sub(1)? as u32,
+ };
+ Some(end)
+}
+
+impl<I: Idx> IntervalSet<I> {
+ pub fn new(domain: usize) -> IntervalSet<I> {
+ IntervalSet { map: SmallVec::new(), domain, _data: PhantomData }
+ }
+
+ pub fn clear(&mut self) {
+ self.map.clear();
+ }
+
+ pub fn iter(&self) -> impl Iterator<Item = I> + '_
+ where
+ I: Step,
+ {
+ self.iter_intervals().flatten()
+ }
+
+ /// Iterates through intervals stored in the set, in order.
+ pub fn iter_intervals(&self) -> impl Iterator<Item = std::ops::Range<I>> + '_
+ where
+ I: Step,
+ {
+ self.map.iter().map(|&(start, end)| I::new(start as usize)..I::new(end as usize + 1))
+ }
+
+ /// Returns true if we increased the number of elements present.
+ pub fn insert(&mut self, point: I) -> bool {
+ self.insert_range(point..=point)
+ }
+
+ /// Returns true if we increased the number of elements present.
+ pub fn insert_range(&mut self, range: impl RangeBounds<I> + Clone) -> bool {
+ let start = inclusive_start(range.clone());
+ let Some(end) = inclusive_end(self.domain, range) else {
+ // empty range
+ return false;
+ };
+ if start > end {
+ return false;
+ }
+
+ // This condition looks a bit weird, but actually makes sense.
+ //
+ // if r.0 == end + 1, then we're actually adjacent, so we want to
+ // continue to the next range. We're looking here for the first
+ // range which starts *non-adjacently* to our end.
+ let next = self.map.partition_point(|r| r.0 <= end + 1);
+ let result = if let Some(right) = next.checked_sub(1) {
+ let (prev_start, prev_end) = self.map[right];
+ if prev_end + 1 >= start {
+ // If the start for the inserted range is adjacent to the
+ // end of the previous, we can extend the previous range.
+ if start < prev_start {
+ // The first range which ends *non-adjacently* to our start.
+ // And we can ensure that left <= right.
+ let left = self.map.partition_point(|l| l.1 + 1 < start);
+ let min = std::cmp::min(self.map[left].0, start);
+ let max = std::cmp::max(prev_end, end);
+ self.map[right] = (min, max);
+ if left != right {
+ self.map.drain(left..right);
+ }
+ true
+ } else {
+ // We overlap with the previous range, increase it to
+ // include us.
+ //
+ // Make sure we're actually going to *increase* it though --
+ // it may be that end is just inside the previously existing
+ // set.
+ if end > prev_end {
+ self.map[right].1 = end;
+ true
+ } else {
+ false
+ }
+ }
+ } else {
+ // Otherwise, we don't overlap, so just insert
+ self.map.insert(right + 1, (start, end));
+ true
+ }
+ } else {
+ if self.map.is_empty() {
+ // Quite common in practice, and expensive to call memcpy
+ // with length zero.
+ self.map.push((start, end));
+ } else {
+ self.map.insert(next, (start, end));
+ }
+ true
+ };
+ debug_assert!(
+ self.check_invariants(),
+ "wrong intervals after insert {:?}..={:?} to {:?}",
+ start,
+ end,
+ self
+ );
+ result
+ }
+
+ pub fn contains(&self, needle: I) -> bool {
+ let needle = needle.index() as u32;
+ let Some(last) = self.map.partition_point(|r| r.0 <= needle).checked_sub(1) else {
+ // All ranges in the map start after the new range's end
+ return false;
+ };
+ let (_, prev_end) = &self.map[last];
+ needle <= *prev_end
+ }
+
+ pub fn superset(&self, other: &IntervalSet<I>) -> bool
+ where
+ I: Step,
+ {
+ let mut sup_iter = self.iter_intervals();
+ let mut current = None;
+ let contains = |sup: Range<I>, sub: Range<I>, current: &mut Option<Range<I>>| {
+ if sup.end < sub.start {
+ // if `sup.end == sub.start`, the next sup doesn't contain `sub.start`
+ None // continue to the next sup
+ } else if sup.end >= sub.end && sup.start <= sub.start {
+ *current = Some(sup); // save the current sup
+ Some(true)
+ } else {
+ Some(false)
+ }
+ };
+ other.iter_intervals().all(|sub| {
+ current
+ .take()
+ .and_then(|sup| contains(sup, sub.clone(), &mut current))
+ .or_else(|| sup_iter.find_map(|sup| contains(sup, sub.clone(), &mut current)))
+ .unwrap_or(false)
+ })
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.map.is_empty()
+ }
+
+ /// Returns the maximum (last) element present in the set from `range`.
+ pub fn last_set_in(&self, range: impl RangeBounds<I> + Clone) -> Option<I> {
+ let start = inclusive_start(range.clone());
+ let Some(end) = inclusive_end(self.domain, range) else {
+ // empty range
+ return None;
+ };
+ if start > end {
+ return None;
+ }
+ let Some(last) = self.map.partition_point(|r| r.0 <= end).checked_sub(1) else {
+ // All ranges in the map start after the new range's end
+ return None;
+ };
+ let (_, prev_end) = &self.map[last];
+ if start <= *prev_end { Some(I::new(std::cmp::min(*prev_end, end) as usize)) } else { None }
+ }
+
+ pub fn insert_all(&mut self) {
+ self.clear();
+ if let Some(end) = self.domain.checked_sub(1) {
+ self.map.push((0, end.try_into().unwrap()));
+ }
+ debug_assert!(self.check_invariants());
+ }
+
+ pub fn union(&mut self, other: &IntervalSet<I>) -> bool
+ where
+ I: Step,
+ {
+ assert_eq!(self.domain, other.domain);
+ let mut did_insert = false;
+ for range in other.iter_intervals() {
+ did_insert |= self.insert_range(range);
+ }
+ debug_assert!(self.check_invariants());
+ did_insert
+ }
+
+ // Check the intervals are valid, sorted and non-adjacent
+ fn check_invariants(&self) -> bool {
+ let mut current: Option<u32> = None;
+ for (start, end) in &self.map {
+ if start > end || current.map_or(false, |x| x + 1 >= *start) {
+ return false;
+ }
+ current = Some(*end);
+ }
+ current.map_or(true, |x| x < self.domain as u32)
+ }
+}
+
+/// This data structure optimizes for cases where the stored bits in each row
+/// are expected to be highly contiguous (long ranges of 1s or 0s), in contrast
+/// to BitMatrix and SparseBitMatrix which are optimized for
+/// "random"/non-contiguous bits and cheap(er) point queries at the expense of
+/// memory usage.
+#[derive(Clone)]
+pub struct SparseIntervalMatrix<R, C>
+where
+ R: Idx,
+ C: Idx,
+{
+ rows: IndexVec<R, IntervalSet<C>>,
+ column_size: usize,
+}
+
+impl<R: Idx, C: Step + Idx> SparseIntervalMatrix<R, C> {
+ pub fn new(column_size: usize) -> SparseIntervalMatrix<R, C> {
+ SparseIntervalMatrix { rows: IndexVec::new(), column_size }
+ }
+
+ pub fn rows(&self) -> impl Iterator<Item = R> {
+ self.rows.indices()
+ }
+
+ pub fn row(&self, row: R) -> Option<&IntervalSet<C>> {
+ self.rows.get(row)
+ }
+
+ fn ensure_row(&mut self, row: R) -> &mut IntervalSet<C> {
+ self.rows.ensure_contains_elem(row, || IntervalSet::new(self.column_size));
+ &mut self.rows[row]
+ }
+
+ pub fn union_row(&mut self, row: R, from: &IntervalSet<C>) -> bool
+ where
+ C: Step,
+ {
+ self.ensure_row(row).union(from)
+ }
+
+ pub fn union_rows(&mut self, read: R, write: R) -> bool
+ where
+ C: Step,
+ {
+ if read == write || self.rows.get(read).is_none() {
+ return false;
+ }
+ self.ensure_row(write);
+ let (read_row, write_row) = self.rows.pick2_mut(read, write);
+ write_row.union(read_row)
+ }
+
+ pub fn insert_all_into_row(&mut self, row: R) {
+ self.ensure_row(row).insert_all();
+ }
+
+ pub fn insert_range(&mut self, row: R, range: impl RangeBounds<C> + Clone) {
+ self.ensure_row(row).insert_range(range);
+ }
+
+ pub fn insert(&mut self, row: R, point: C) -> bool {
+ self.ensure_row(row).insert(point)
+ }
+
+ pub fn contains(&self, row: R, point: C) -> bool {
+ self.row(row).map_or(false, |r| r.contains(point))
+ }
+}
diff --git a/compiler/rustc_index/src/interval/tests.rs b/compiler/rustc_index/src/interval/tests.rs
new file mode 100644
index 000000000..375af60f6
--- /dev/null
+++ b/compiler/rustc_index/src/interval/tests.rs
@@ -0,0 +1,199 @@
+use super::*;
+
+#[test]
+fn insert_collapses() {
+ let mut set = IntervalSet::<u32>::new(10000);
+ set.insert_range(9831..=9837);
+ set.insert_range(43..=9830);
+ assert_eq!(set.iter_intervals().collect::<Vec<_>>(), [43..9838]);
+}
+
+#[test]
+fn contains() {
+ let mut set = IntervalSet::new(300);
+ set.insert(0u32);
+ assert!(set.contains(0));
+ set.insert_range(0..10);
+ assert!(set.contains(9));
+ assert!(!set.contains(10));
+ set.insert_range(10..11);
+ assert!(set.contains(10));
+}
+
+#[test]
+fn insert() {
+ for i in 0..30usize {
+ let mut set = IntervalSet::new(300);
+ for j in i..30usize {
+ set.insert(j);
+ for k in i..j {
+ assert!(set.contains(k));
+ }
+ }
+ }
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..1u32);
+ assert!(set.contains(0), "{:?}", set.map);
+ assert!(!set.contains(1));
+ set.insert_range(1..1);
+ assert!(set.contains(0));
+ assert!(!set.contains(1));
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(4..5u32);
+ set.insert_range(5..10);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [4, 5, 6, 7, 8, 9]);
+ set.insert_range(3..7);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(3..5);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(0..3);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(0..10);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(5..10);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+
+ let mut set = IntervalSet::new(300);
+ set.insert_range(0..10u32);
+ set.insert_range(5..13);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]);
+}
+
+#[test]
+fn insert_range() {
+ #[track_caller]
+ fn check<R>(range: R)
+ where
+ R: RangeBounds<usize> + Clone + IntoIterator<Item = usize> + std::fmt::Debug,
+ {
+ let mut set = IntervalSet::new(300);
+ set.insert_range(range.clone());
+ for i in set.iter() {
+ assert!(range.contains(&i));
+ }
+ for i in range.clone() {
+ assert!(set.contains(i), "A: {} in {:?}, inserted {:?}", i, set, range);
+ }
+ set.insert_range(range.clone());
+ for i in set.iter() {
+ assert!(range.contains(&i), "{} in {:?}", i, set);
+ }
+ for i in range.clone() {
+ assert!(set.contains(i), "B: {} in {:?}, inserted {:?}", i, set, range);
+ }
+ }
+ check(10..10);
+ check(10..100);
+ check(10..30);
+ check(0..5);
+ check(0..250);
+ check(200..250);
+
+ check(10..=10);
+ check(10..=100);
+ check(10..=30);
+ check(0..=5);
+ check(0..=250);
+ check(200..=250);
+
+ for i in 0..30 {
+ for j in i..30 {
+ check(i..j);
+ check(i..=j);
+ }
+ }
+}
+
+#[test]
+fn insert_range_dual() {
+ let mut set = IntervalSet::<u32>::new(300);
+ set.insert_range(0..3);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2]);
+ set.insert_range(5..7);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 5, 6]);
+ set.insert_range(3..4);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 5, 6]);
+ set.insert_range(3..5);
+ assert_eq!(set.iter().collect::<Vec<_>>(), [0, 1, 2, 3, 4, 5, 6]);
+}
+
+#[test]
+fn last_set_before_adjacent() {
+ let mut set = IntervalSet::<u32>::new(300);
+ set.insert_range(0..3);
+ set.insert_range(3..5);
+ assert_eq!(set.last_set_in(0..3), Some(2));
+ assert_eq!(set.last_set_in(0..5), Some(4));
+ assert_eq!(set.last_set_in(3..5), Some(4));
+ set.insert_range(2..5);
+ assert_eq!(set.last_set_in(0..3), Some(2));
+ assert_eq!(set.last_set_in(0..5), Some(4));
+ assert_eq!(set.last_set_in(3..5), Some(4));
+}
+
+#[test]
+fn last_set_in() {
+ fn easy(set: &IntervalSet<usize>, needle: impl RangeBounds<usize>) -> Option<usize> {
+ let mut last_leq = None;
+ for e in set.iter() {
+ if needle.contains(&e) {
+ last_leq = Some(e);
+ }
+ }
+ last_leq
+ }
+
+ #[track_caller]
+ fn cmp(set: &IntervalSet<usize>, needle: impl RangeBounds<usize> + Clone + std::fmt::Debug) {
+ assert_eq!(
+ set.last_set_in(needle.clone()),
+ easy(set, needle.clone()),
+ "{:?} in {:?}",
+ needle,
+ set
+ );
+ }
+ let mut set = IntervalSet::new(300);
+ cmp(&set, 50..=50);
+ set.insert(64);
+ cmp(&set, 64..=64);
+ set.insert(64 - 1);
+ cmp(&set, 0..=64 - 1);
+ cmp(&set, 0..=5);
+ cmp(&set, 10..100);
+ set.insert(100);
+ cmp(&set, 100..110);
+ cmp(&set, 99..100);
+ cmp(&set, 99..=100);
+
+ for i in 0..=30 {
+ for j in i..=30 {
+ for k in 0..30 {
+ let mut set = IntervalSet::new(100);
+ cmp(&set, ..j);
+ cmp(&set, i..);
+ cmp(&set, i..j);
+ cmp(&set, i..=j);
+ set.insert(k);
+ cmp(&set, ..j);
+ cmp(&set, i..);
+ cmp(&set, i..j);
+ cmp(&set, i..=j);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs
new file mode 100644
index 000000000..33c3c536f
--- /dev/null
+++ b/compiler/rustc_index/src/lib.rs
@@ -0,0 +1,23 @@
+#![feature(allow_internal_unstable)]
+#![feature(bench_black_box)]
+#![feature(extend_one)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(new_uninit)]
+#![feature(step_trait)]
+#![feature(stmt_expr_attributes)]
+#![feature(test)]
+
+pub mod bit_set;
+pub mod interval;
+pub mod vec;
+
+pub use rustc_macros::newtype_index;
+
+/// Type size assertion. The first argument is a type and the second argument is its expected size.
+#[macro_export]
+macro_rules! static_assert_size {
+ ($ty:ty, $size:expr) => {
+ const _: [(); $size] = [(); ::std::mem::size_of::<$ty>()];
+ };
+}
diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs
new file mode 100644
index 000000000..30ff36421
--- /dev/null
+++ b/compiler/rustc_index/src/vec.rs
@@ -0,0 +1,409 @@
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+use std::fmt;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::iter::FromIterator;
+use std::marker::PhantomData;
+use std::ops::{Index, IndexMut, RangeBounds};
+use std::slice;
+use std::vec;
+
+/// Represents some newtyped `usize` wrapper.
+///
+/// Purpose: avoid mixing indexes for different bitvector domains.
+pub trait Idx: Copy + 'static + Eq + PartialEq + Debug + Hash {
+ fn new(idx: usize) -> Self;
+
+ fn index(self) -> usize;
+
+ fn increment_by(&mut self, amount: usize) {
+ *self = self.plus(amount);
+ }
+
+ fn plus(self, amount: usize) -> Self {
+ Self::new(self.index() + amount)
+ }
+}
+
+impl Idx for usize {
+ #[inline]
+ fn new(idx: usize) -> Self {
+ idx
+ }
+ #[inline]
+ fn index(self) -> usize {
+ self
+ }
+}
+
+impl Idx for u32 {
+ #[inline]
+ fn new(idx: usize) -> Self {
+ assert!(idx <= u32::MAX as usize);
+ idx as u32
+ }
+ #[inline]
+ fn index(self) -> usize {
+ self as usize
+ }
+}
+
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct IndexVec<I: Idx, T> {
+ pub raw: Vec<T>,
+ _marker: PhantomData<fn(&I)>,
+}
+
+// Whether `IndexVec` is `Send` depends only on the data,
+// not the phantom data.
+unsafe impl<I: Idx, T> Send for IndexVec<I, T> where T: Send {}
+
+impl<S: Encoder, I: Idx, T: Encodable<S>> Encodable<S> for IndexVec<I, T> {
+ fn encode(&self, s: &mut S) {
+ Encodable::encode(&self.raw, s);
+ }
+}
+
+impl<D: Decoder, I: Idx, T: Decodable<D>> Decodable<D> for IndexVec<I, T> {
+ fn decode(d: &mut D) -> Self {
+ IndexVec { raw: Decodable::decode(d), _marker: PhantomData }
+ }
+}
+
+impl<I: Idx, T: fmt::Debug> fmt::Debug for IndexVec<I, T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.raw, fmt)
+ }
+}
+
+impl<I: Idx, T> IndexVec<I, T> {
+ #[inline]
+ pub fn new() -> Self {
+ IndexVec { raw: Vec::new(), _marker: PhantomData }
+ }
+
+ #[inline]
+ pub fn from_raw(raw: Vec<T>) -> Self {
+ IndexVec { raw, _marker: PhantomData }
+ }
+
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ IndexVec { raw: Vec::with_capacity(capacity), _marker: PhantomData }
+ }
+
+ #[inline]
+ pub fn from_elem<S>(elem: T, universe: &IndexVec<I, S>) -> Self
+ where
+ T: Clone,
+ {
+ IndexVec { raw: vec![elem; universe.len()], _marker: PhantomData }
+ }
+
+ #[inline]
+ pub fn from_elem_n(elem: T, n: usize) -> Self
+ where
+ T: Clone,
+ {
+ IndexVec { raw: vec![elem; n], _marker: PhantomData }
+ }
+
+ /// Create an `IndexVec` with `n` elements, where the value of each
+ /// element is the result of `func(i)`. (The underlying vector will
+ /// be allocated only once, with a capacity of at least `n`.)
+ #[inline]
+ pub fn from_fn_n(func: impl FnMut(I) -> T, n: usize) -> Self {
+ let indices = (0..n).map(I::new);
+ Self::from_raw(indices.map(func).collect())
+ }
+
+ #[inline]
+ pub fn push(&mut self, d: T) -> I {
+ let idx = I::new(self.len());
+ self.raw.push(d);
+ idx
+ }
+
+ #[inline]
+ pub fn pop(&mut self) -> Option<T> {
+ self.raw.pop()
+ }
+
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.raw.len()
+ }
+
+ /// Gives the next index that will be assigned when `push` is
+ /// called.
+ #[inline]
+ pub fn next_index(&self) -> I {
+ I::new(self.len())
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.raw.is_empty()
+ }
+
+ #[inline]
+ pub fn into_iter(self) -> vec::IntoIter<T> {
+ self.raw.into_iter()
+ }
+
+ #[inline]
+ pub fn into_iter_enumerated(
+ self,
+ ) -> impl DoubleEndedIterator<Item = (I, T)> + ExactSizeIterator {
+ self.raw.into_iter().enumerate().map(|(n, t)| (I::new(n), t))
+ }
+
+ #[inline]
+ pub fn iter(&self) -> slice::Iter<'_, T> {
+ self.raw.iter()
+ }
+
+ #[inline]
+ pub fn iter_enumerated(
+ &self,
+ ) -> impl DoubleEndedIterator<Item = (I, &T)> + ExactSizeIterator + '_ {
+ self.raw.iter().enumerate().map(|(n, t)| (I::new(n), t))
+ }
+
+ #[inline]
+ pub fn indices(&self) -> impl DoubleEndedIterator<Item = I> + ExactSizeIterator + 'static {
+ (0..self.len()).map(|n| I::new(n))
+ }
+
+ #[inline]
+ pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> {
+ self.raw.iter_mut()
+ }
+
+ #[inline]
+ pub fn iter_enumerated_mut(
+ &mut self,
+ ) -> impl DoubleEndedIterator<Item = (I, &mut T)> + ExactSizeIterator + '_ {
+ self.raw.iter_mut().enumerate().map(|(n, t)| (I::new(n), t))
+ }
+
+ #[inline]
+ pub fn drain<'a, R: RangeBounds<usize>>(
+ &'a mut self,
+ range: R,
+ ) -> impl Iterator<Item = T> + 'a {
+ self.raw.drain(range)
+ }
+
+ #[inline]
+ pub fn drain_enumerated<'a, R: RangeBounds<usize>>(
+ &'a mut self,
+ range: R,
+ ) -> impl Iterator<Item = (I, T)> + 'a {
+ self.raw.drain(range).enumerate().map(|(n, t)| (I::new(n), t))
+ }
+
+ #[inline]
+ pub fn last(&self) -> Option<I> {
+ self.len().checked_sub(1).map(I::new)
+ }
+
+ #[inline]
+ pub fn shrink_to_fit(&mut self) {
+ self.raw.shrink_to_fit()
+ }
+
+ #[inline]
+ pub fn swap(&mut self, a: I, b: I) {
+ self.raw.swap(a.index(), b.index())
+ }
+
+ #[inline]
+ pub fn truncate(&mut self, a: usize) {
+ self.raw.truncate(a)
+ }
+
+ #[inline]
+ pub fn get(&self, index: I) -> Option<&T> {
+ self.raw.get(index.index())
+ }
+
+ #[inline]
+ pub fn get_mut(&mut self, index: I) -> Option<&mut T> {
+ self.raw.get_mut(index.index())
+ }
+
+ /// Returns mutable references to two distinct elements, `a` and `b`.
+ ///
+ /// Panics if `a == b`.
+ #[inline]
+ pub fn pick2_mut(&mut self, a: I, b: I) -> (&mut T, &mut T) {
+ let (ai, bi) = (a.index(), b.index());
+ assert!(ai != bi);
+
+ if ai < bi {
+ let (c1, c2) = self.raw.split_at_mut(bi);
+ (&mut c1[ai], &mut c2[0])
+ } else {
+ let (c2, c1) = self.pick2_mut(b, a);
+ (c1, c2)
+ }
+ }
+
+ /// Returns mutable references to three distinct elements.
+ ///
+ /// Panics if the elements are not distinct.
+ #[inline]
+ pub fn pick3_mut(&mut self, a: I, b: I, c: I) -> (&mut T, &mut T, &mut T) {
+ let (ai, bi, ci) = (a.index(), b.index(), c.index());
+ assert!(ai != bi && bi != ci && ci != ai);
+ let len = self.raw.len();
+ assert!(ai < len && bi < len && ci < len);
+ let ptr = self.raw.as_mut_ptr();
+ unsafe { (&mut *ptr.add(ai), &mut *ptr.add(bi), &mut *ptr.add(ci)) }
+ }
+
+ pub fn convert_index_type<Ix: Idx>(self) -> IndexVec<Ix, T> {
+ IndexVec { raw: self.raw, _marker: PhantomData }
+ }
+
+ /// Grows the index vector so that it contains an entry for
+ /// `elem`; if that is already true, then has no
+ /// effect. Otherwise, inserts new values as needed by invoking
+ /// `fill_value`.
+ #[inline]
+ pub fn ensure_contains_elem(&mut self, elem: I, fill_value: impl FnMut() -> T) {
+ let min_new_len = elem.index() + 1;
+ if self.len() < min_new_len {
+ self.raw.resize_with(min_new_len, fill_value);
+ }
+ }
+
+ #[inline]
+ pub fn resize_to_elem(&mut self, elem: I, fill_value: impl FnMut() -> T) {
+ let min_new_len = elem.index() + 1;
+ self.raw.resize_with(min_new_len, fill_value);
+ }
+}
+
+/// `IndexVec` is often used as a map, so it provides some map-like APIs.
+impl<I: Idx, T> IndexVec<I, Option<T>> {
+ #[inline]
+ pub fn insert(&mut self, index: I, value: T) -> Option<T> {
+ self.ensure_contains_elem(index, || None);
+ self[index].replace(value)
+ }
+
+ #[inline]
+ pub fn get_or_insert_with(&mut self, index: I, value: impl FnOnce() -> T) -> &mut T {
+ self.ensure_contains_elem(index, || None);
+ self[index].get_or_insert_with(value)
+ }
+
+ #[inline]
+ pub fn remove(&mut self, index: I) -> Option<T> {
+ self.ensure_contains_elem(index, || None);
+ self[index].take()
+ }
+}
+
+impl<I: Idx, T: Clone> IndexVec<I, T> {
+ #[inline]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ self.raw.resize(new_len, value)
+ }
+}
+
+impl<I: Idx, T: Ord> IndexVec<I, T> {
+ #[inline]
+ pub fn binary_search(&self, value: &T) -> Result<I, I> {
+ match self.raw.binary_search(value) {
+ Ok(i) => Ok(Idx::new(i)),
+ Err(i) => Err(Idx::new(i)),
+ }
+ }
+}
+
+impl<I: Idx, T> Index<I> for IndexVec<I, T> {
+ type Output = T;
+
+ #[inline]
+ fn index(&self, index: I) -> &T {
+ &self.raw[index.index()]
+ }
+}
+
+impl<I: Idx, T> IndexMut<I> for IndexVec<I, T> {
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut T {
+ &mut self.raw[index.index()]
+ }
+}
+
+impl<I: Idx, T> Default for IndexVec<I, T> {
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<I: Idx, T> Extend<T> for IndexVec<I, T> {
+ #[inline]
+ fn extend<J: IntoIterator<Item = T>>(&mut self, iter: J) {
+ self.raw.extend(iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.raw.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.raw.reserve(additional);
+ }
+}
+
+impl<I: Idx, T> FromIterator<T> for IndexVec<I, T> {
+ #[inline]
+ fn from_iter<J>(iter: J) -> Self
+ where
+ J: IntoIterator<Item = T>,
+ {
+ IndexVec { raw: FromIterator::from_iter(iter), _marker: PhantomData }
+ }
+}
+
+impl<I: Idx, T> IntoIterator for IndexVec<I, T> {
+ type Item = T;
+ type IntoIter = vec::IntoIter<T>;
+
+ #[inline]
+ fn into_iter(self) -> vec::IntoIter<T> {
+ self.raw.into_iter()
+ }
+}
+
+impl<'a, I: Idx, T> IntoIterator for &'a IndexVec<I, T> {
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+
+ #[inline]
+ fn into_iter(self) -> slice::Iter<'a, T> {
+ self.raw.iter()
+ }
+}
+
+impl<'a, I: Idx, T> IntoIterator for &'a mut IndexVec<I, T> {
+ type Item = &'a mut T;
+ type IntoIter = slice::IterMut<'a, T>;
+
+ #[inline]
+ fn into_iter(self) -> slice::IterMut<'a, T> {
+ self.raw.iter_mut()
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_index/src/vec/tests.rs b/compiler/rustc_index/src/vec/tests.rs
new file mode 100644
index 000000000..915d2e8bc
--- /dev/null
+++ b/compiler/rustc_index/src/vec/tests.rs
@@ -0,0 +1,55 @@
+#![allow(dead_code)]
+
+// Allows the macro invocation below to work
+use crate as rustc_index;
+
+rustc_macros::newtype_index!(struct MyIdx { MAX = 0xFFFF_FFFA });
+
+#[test]
+fn index_size_is_optimized() {
+ use std::mem::size_of;
+
+ assert_eq!(size_of::<MyIdx>(), 4);
+ // Uses 0xFFFF_FFFB
+ assert_eq!(size_of::<Option<MyIdx>>(), 4);
+ // Uses 0xFFFF_FFFC
+ assert_eq!(size_of::<Option<Option<MyIdx>>>(), 4);
+ // Uses 0xFFFF_FFFD
+ assert_eq!(size_of::<Option<Option<Option<MyIdx>>>>(), 4);
+ // Uses 0xFFFF_FFFE
+ assert_eq!(size_of::<Option<Option<Option<Option<MyIdx>>>>>(), 4);
+ // Uses 0xFFFF_FFFF
+ assert_eq!(size_of::<Option<Option<Option<Option<Option<MyIdx>>>>>>(), 4);
+ // Uses a tag
+ assert_eq!(size_of::<Option<Option<Option<Option<Option<Option<MyIdx>>>>>>>(), 8);
+}
+
+#[test]
+fn range_iterator_iterates_forwards() {
+ let range = MyIdx::from_u32(1)..MyIdx::from_u32(4);
+ assert_eq!(
+ range.collect::<Vec<_>>(),
+ [MyIdx::from_u32(1), MyIdx::from_u32(2), MyIdx::from_u32(3)]
+ );
+}
+
+#[test]
+fn range_iterator_iterates_backwards() {
+ let range = MyIdx::from_u32(1)..MyIdx::from_u32(4);
+ assert_eq!(
+ range.rev().collect::<Vec<_>>(),
+ [MyIdx::from_u32(3), MyIdx::from_u32(2), MyIdx::from_u32(1)]
+ );
+}
+
+#[test]
+fn range_count_is_correct() {
+ let range = MyIdx::from_u32(1)..MyIdx::from_u32(4);
+ assert_eq!(range.count(), 3);
+}
+
+#[test]
+fn range_size_hint_is_correct() {
+ let range = MyIdx::from_u32(1)..MyIdx::from_u32(4);
+ assert_eq!(range.size_hint(), (3, Some(3)));
+}
diff --git a/compiler/rustc_infer/Cargo.toml b/compiler/rustc_infer/Cargo.toml
new file mode 100644
index 000000000..02ac83a5e
--- /dev/null
+++ b/compiler/rustc_infer/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "rustc_infer"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+tracing = "0.1"
+rustc_middle = { path = "../rustc_middle" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_infer/src/infer/at.rs b/compiler/rustc_infer/src/infer/at.rs
new file mode 100644
index 000000000..130214a65
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/at.rs
@@ -0,0 +1,429 @@
+//! A nice interface for working with the infcx. The basic idea is to
+//! do `infcx.at(cause, param_env)`, which sets the "cause" of the
+//! operation as well as the surrounding parameter environment. Then
+//! you can do something like `.sub(a, b)` or `.eq(a, b)` to create a
+//! subtype or equality relationship respectively. The first argument
+//! is always the "expected" output from the POV of diagnostics.
+//!
+//! Examples:
+//! ```ignore (fragment)
+//! infcx.at(cause, param_env).sub(a, b)
+//! // requires that `a <: b`, with `a` considered the "expected" type
+//!
+//! infcx.at(cause, param_env).sup(a, b)
+//! // requires that `b <: a`, with `a` considered the "expected" type
+//!
+//! infcx.at(cause, param_env).eq(a, b)
+//! // requires that `a == b`, with `a` considered the "expected" type
+//! ```
+//! For finer-grained control, you can also do use `trace`:
+//! ```ignore (fragment)
+//! infcx.at(...).trace(a, b).sub(&c, &d)
+//! ```
+//! This will set `a` and `b` as the "root" values for
+//! error-reporting, but actually operate on `c` and `d`. This is
+//! sometimes useful when the types of `c` and `d` are not traceable
+//! things. (That system should probably be refactored.)
+
+use super::*;
+
+use rustc_middle::ty::relate::{Relate, TypeRelation};
+use rustc_middle::ty::{Const, ImplSubject};
+
+pub struct At<'a, 'tcx> {
+ pub infcx: &'a InferCtxt<'a, 'tcx>,
+ pub cause: &'a ObligationCause<'tcx>,
+ pub param_env: ty::ParamEnv<'tcx>,
+ /// Whether we should define opaque types
+ /// or just treat them opaquely.
+ /// Currently only used to prevent predicate
+ /// matching from matching anything against opaque
+ /// types.
+ pub define_opaque_types: bool,
+}
+
+pub struct Trace<'a, 'tcx> {
+ at: At<'a, 'tcx>,
+ a_is_expected: bool,
+ trace: TypeTrace<'tcx>,
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ #[inline]
+ pub fn at(
+ &'a self,
+ cause: &'a ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> At<'a, 'tcx> {
+ At { infcx: self, cause, param_env, define_opaque_types: true }
+ }
+
+ /// Forks the inference context, creating a new inference context with the same inference
+ /// variables in the same state. This can be used to "branch off" many tests from the same
+ /// common state. Used in coherence.
+ pub fn fork(&self) -> Self {
+ Self {
+ tcx: self.tcx,
+ defining_use_anchor: self.defining_use_anchor,
+ considering_regions: self.considering_regions,
+ in_progress_typeck_results: self.in_progress_typeck_results,
+ inner: self.inner.clone(),
+ skip_leak_check: self.skip_leak_check.clone(),
+ lexical_region_resolutions: self.lexical_region_resolutions.clone(),
+ selection_cache: self.selection_cache.clone(),
+ evaluation_cache: self.evaluation_cache.clone(),
+ reported_trait_errors: self.reported_trait_errors.clone(),
+ reported_closure_mismatch: self.reported_closure_mismatch.clone(),
+ tainted_by_errors_flag: self.tainted_by_errors_flag.clone(),
+ err_count_on_creation: self.err_count_on_creation,
+ in_snapshot: self.in_snapshot.clone(),
+ universe: self.universe.clone(),
+ }
+ }
+}
+
+pub trait ToTrace<'tcx>: Relate<'tcx> + Copy {
+ fn to_trace(
+ tcx: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx>;
+}
+
+impl<'a, 'tcx> At<'a, 'tcx> {
+ pub fn define_opaque_types(self, define_opaque_types: bool) -> Self {
+ Self { define_opaque_types, ..self }
+ }
+
+ /// Hacky routine for equating two impl headers in coherence.
+ pub fn eq_impl_headers(
+ self,
+ expected: &ty::ImplHeader<'tcx>,
+ actual: &ty::ImplHeader<'tcx>,
+ ) -> InferResult<'tcx, ()> {
+ debug!("eq_impl_header({:?} = {:?})", expected, actual);
+ match (expected.trait_ref, actual.trait_ref) {
+ (Some(a_ref), Some(b_ref)) => self.eq(a_ref, b_ref),
+ (None, None) => self.eq(expected.self_ty, actual.self_ty),
+ _ => bug!("mk_eq_impl_headers given mismatched impl kinds"),
+ }
+ }
+
+ /// Makes `a <: b`, where `a` may or may not be expected.
+ ///
+ /// See [`At::trace_exp`] and [`Trace::sub`] for a version of
+ /// this method that only requires `T: Relate<'tcx>`
+ pub fn sub_exp<T>(self, a_is_expected: bool, a: T, b: T) -> InferResult<'tcx, ()>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.trace_exp(a_is_expected, a, b).sub(a, b)
+ }
+
+ /// Makes `actual <: expected`. For example, if type-checking a
+ /// call like `foo(x)`, where `foo: fn(i32)`, you might have
+ /// `sup(i32, x)`, since the "expected" type is the type that
+ /// appears in the signature.
+ ///
+ /// See [`At::trace`] and [`Trace::sub`] for a version of
+ /// this method that only requires `T: Relate<'tcx>`
+ pub fn sup<T>(self, expected: T, actual: T) -> InferResult<'tcx, ()>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.sub_exp(false, actual, expected)
+ }
+
+ /// Makes `expected <: actual`.
+ ///
+ /// See [`At::trace`] and [`Trace::sub`] for a version of
+ /// this method that only requires `T: Relate<'tcx>`
+ pub fn sub<T>(self, expected: T, actual: T) -> InferResult<'tcx, ()>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.sub_exp(true, expected, actual)
+ }
+
+ /// Makes `expected <: actual`.
+ ///
+ /// See [`At::trace_exp`] and [`Trace::eq`] for a version of
+ /// this method that only requires `T: Relate<'tcx>`
+ pub fn eq_exp<T>(self, a_is_expected: bool, a: T, b: T) -> InferResult<'tcx, ()>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.trace_exp(a_is_expected, a, b).eq(a, b)
+ }
+
+ /// Makes `expected <: actual`.
+ ///
+ /// See [`At::trace`] and [`Trace::eq`] for a version of
+ /// this method that only requires `T: Relate<'tcx>`
+ pub fn eq<T>(self, expected: T, actual: T) -> InferResult<'tcx, ()>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.trace(expected, actual).eq(expected, actual)
+ }
+
+ pub fn relate<T>(self, expected: T, variance: ty::Variance, actual: T) -> InferResult<'tcx, ()>
+ where
+ T: ToTrace<'tcx>,
+ {
+ match variance {
+ ty::Variance::Covariant => self.sub(expected, actual),
+ ty::Variance::Invariant => self.eq(expected, actual),
+ ty::Variance::Contravariant => self.sup(expected, actual),
+
+ // We could make this make sense but it's not readily
+ // exposed and I don't feel like dealing with it. Note
+ // that bivariance in general does a bit more than just
+ // *nothing*, it checks that the types are the same
+ // "modulo variance" basically.
+ ty::Variance::Bivariant => panic!("Bivariant given to `relate()`"),
+ }
+ }
+
+ /// Computes the least-upper-bound, or mutual supertype, of two
+ /// values. The order of the arguments doesn't matter, but since
+ /// this can result in an error (e.g., if asked to compute LUB of
+ /// u32 and i32), it is meaningful to call one of them the
+ /// "expected type".
+ ///
+ /// See [`At::trace`] and [`Trace::lub`] for a version of
+ /// this method that only requires `T: Relate<'tcx>`
+ pub fn lub<T>(self, expected: T, actual: T) -> InferResult<'tcx, T>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.trace(expected, actual).lub(expected, actual)
+ }
+
+ /// Computes the greatest-lower-bound, or mutual subtype, of two
+ /// values. As with `lub` order doesn't matter, except for error
+ /// cases.
+ ///
+ /// See [`At::trace`] and [`Trace::glb`] for a version of
+ /// this method that only requires `T: Relate<'tcx>`
+ pub fn glb<T>(self, expected: T, actual: T) -> InferResult<'tcx, T>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.trace(expected, actual).glb(expected, actual)
+ }
+
+ /// Sets the "trace" values that will be used for
+ /// error-reporting, but doesn't actually perform any operation
+ /// yet (this is useful when you want to set the trace using
+ /// distinct values from those you wish to operate upon).
+ pub fn trace<T>(self, expected: T, actual: T) -> Trace<'a, 'tcx>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.trace_exp(true, expected, actual)
+ }
+
+ /// Like `trace`, but the expected value is determined by the
+ /// boolean argument (if true, then the first argument `a` is the
+ /// "expected" value).
+ pub fn trace_exp<T>(self, a_is_expected: bool, a: T, b: T) -> Trace<'a, 'tcx>
+ where
+ T: ToTrace<'tcx>,
+ {
+ let trace = ToTrace::to_trace(self.infcx.tcx, self.cause, a_is_expected, a, b);
+ Trace { at: self, trace, a_is_expected }
+ }
+}
+
+impl<'a, 'tcx> Trace<'a, 'tcx> {
+ /// Makes `a <: b` where `a` may or may not be expected (if
+ /// `a_is_expected` is true, then `a` is expected).
+ #[instrument(skip(self), level = "debug")]
+ pub fn sub<T>(self, a: T, b: T) -> InferResult<'tcx, ()>
+ where
+ T: Relate<'tcx>,
+ {
+ let Trace { at, trace, a_is_expected } = self;
+ at.infcx.commit_if_ok(|_| {
+ let mut fields = at.infcx.combine_fields(trace, at.param_env, at.define_opaque_types);
+ fields
+ .sub(a_is_expected)
+ .relate(a, b)
+ .map(move |_| InferOk { value: (), obligations: fields.obligations })
+ })
+ }
+
+ /// Makes `a == b`; the expectation is set by the call to
+ /// `trace()`.
+ #[instrument(skip(self), level = "debug")]
+ pub fn eq<T>(self, a: T, b: T) -> InferResult<'tcx, ()>
+ where
+ T: Relate<'tcx>,
+ {
+ let Trace { at, trace, a_is_expected } = self;
+ at.infcx.commit_if_ok(|_| {
+ let mut fields = at.infcx.combine_fields(trace, at.param_env, at.define_opaque_types);
+ fields
+ .equate(a_is_expected)
+ .relate(a, b)
+ .map(move |_| InferOk { value: (), obligations: fields.obligations })
+ })
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn lub<T>(self, a: T, b: T) -> InferResult<'tcx, T>
+ where
+ T: Relate<'tcx>,
+ {
+ let Trace { at, trace, a_is_expected } = self;
+ at.infcx.commit_if_ok(|_| {
+ let mut fields = at.infcx.combine_fields(trace, at.param_env, at.define_opaque_types);
+ fields
+ .lub(a_is_expected)
+ .relate(a, b)
+ .map(move |t| InferOk { value: t, obligations: fields.obligations })
+ })
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn glb<T>(self, a: T, b: T) -> InferResult<'tcx, T>
+ where
+ T: Relate<'tcx>,
+ {
+ let Trace { at, trace, a_is_expected } = self;
+ at.infcx.commit_if_ok(|_| {
+ let mut fields = at.infcx.combine_fields(trace, at.param_env, at.define_opaque_types);
+ fields
+ .glb(a_is_expected)
+ .relate(a, b)
+ .map(move |t| InferOk { value: t, obligations: fields.obligations })
+ })
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for ImplSubject<'tcx> {
+ fn to_trace(
+ tcx: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ match (a, b) {
+ (ImplSubject::Trait(trait_ref_a), ImplSubject::Trait(trait_ref_b)) => {
+ ToTrace::to_trace(tcx, cause, a_is_expected, trait_ref_a, trait_ref_b)
+ }
+ (ImplSubject::Inherent(ty_a), ImplSubject::Inherent(ty_b)) => {
+ ToTrace::to_trace(tcx, cause, a_is_expected, ty_a, ty_b)
+ }
+ (ImplSubject::Trait(_), ImplSubject::Inherent(_))
+ | (ImplSubject::Inherent(_), ImplSubject::Trait(_)) => {
+ bug!("can not trace TraitRef and Ty");
+ }
+ }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for Ty<'tcx> {
+ fn to_trace(
+ _: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: Terms(ExpectedFound::new(a_is_expected, a.into(), b.into())),
+ }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for ty::Region<'tcx> {
+ fn to_trace(
+ _: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace { cause: cause.clone(), values: Regions(ExpectedFound::new(a_is_expected, a, b)) }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for Const<'tcx> {
+ fn to_trace(
+ _: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: Terms(ExpectedFound::new(a_is_expected, a.into(), b.into())),
+ }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for ty::Term<'tcx> {
+ fn to_trace(
+ _: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace { cause: cause.clone(), values: Terms(ExpectedFound::new(a_is_expected, a, b)) }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for ty::TraitRef<'tcx> {
+ fn to_trace(
+ _: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: TraitRefs(ExpectedFound::new(a_is_expected, a, b)),
+ }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for ty::PolyTraitRef<'tcx> {
+ fn to_trace(
+ _: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: PolyTraitRefs(ExpectedFound::new(a_is_expected, a, b)),
+ }
+ }
+}
+
+impl<'tcx> ToTrace<'tcx> for ty::ProjectionTy<'tcx> {
+ fn to_trace(
+ tcx: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Self,
+ b: Self,
+ ) -> TypeTrace<'tcx> {
+ let a_ty = tcx.mk_projection(a.item_def_id, a.substs);
+ let b_ty = tcx.mk_projection(b.item_def_id, b.substs);
+ TypeTrace {
+ cause: cause.clone(),
+ values: Terms(ExpectedFound::new(a_is_expected, a_ty.into(), b_ty.into())),
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
new file mode 100644
index 000000000..ca7862c9d
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/canonical/canonicalizer.rs
@@ -0,0 +1,791 @@
+//! This module contains the "canonicalizer" itself.
+//!
+//! For an overview of what canonicalization is and how it fits into
+//! rustc, check out the [chapter in the rustc dev guide][c].
+//!
+//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
+
+use crate::infer::canonical::{
+ Canonical, CanonicalTyVarKind, CanonicalVarInfo, CanonicalVarKind, Canonicalized,
+ OriginalQueryValues,
+};
+use crate::infer::InferCtxt;
+use rustc_middle::ty::flags::FlagComputation;
+use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::{self, BoundVar, InferConst, List, Ty, TyCtxt, TypeFlags};
+use std::sync::atomic::Ordering;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::vec::Idx;
+use smallvec::SmallVec;
+
+impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+ /// Canonicalizes a query value `V`. When we canonicalize a query,
+ /// we not only canonicalize unbound inference variables, but we
+ /// *also* replace all free regions whatsoever. So for example a
+ /// query like `T: Trait<'static>` would be canonicalized to
+ ///
+ /// ```text
+ /// T: Trait<'?0>
+ /// ```
+ ///
+ /// with a mapping M that maps `'?0` to `'static`.
+ ///
+ /// To get a good understanding of what is happening here, check
+ /// out the [chapter in the rustc dev guide][c].
+ ///
+ /// [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html#canonicalizing-the-query
+ pub fn canonicalize_query<V>(
+ &self,
+ value: V,
+ query_state: &mut OriginalQueryValues<'tcx>,
+ ) -> Canonicalized<'tcx, V>
+ where
+ V: TypeFoldable<'tcx>,
+ {
+ self.tcx.sess.perf_stats.queries_canonicalized.fetch_add(1, Ordering::Relaxed);
+
+ Canonicalizer::canonicalize(value, self, self.tcx, &CanonicalizeAllFreeRegions, query_state)
+ }
+
+ /// Like [Self::canonicalize_query], but preserves distinct universes. For
+ /// example, canonicalizing `&'?0: Trait<'?1>`, where `'?0` is in `U1` and
+ /// `'?1` is in `U3` would be canonicalized to have ?0` in `U1` and `'?1`
+ /// in `U2`.
+ ///
+ /// This is used for Chalk integration.
+ pub fn canonicalize_query_preserving_universes<V>(
+ &self,
+ value: V,
+ query_state: &mut OriginalQueryValues<'tcx>,
+ ) -> Canonicalized<'tcx, V>
+ where
+ V: TypeFoldable<'tcx>,
+ {
+ self.tcx.sess.perf_stats.queries_canonicalized.fetch_add(1, Ordering::Relaxed);
+
+ Canonicalizer::canonicalize(
+ value,
+ self,
+ self.tcx,
+ &CanonicalizeAllFreeRegionsPreservingUniverses,
+ query_state,
+ )
+ }
+
+ /// Canonicalizes a query *response* `V`. When we canonicalize a
+ /// query response, we only canonicalize unbound inference
+ /// variables, and we leave other free regions alone. So,
+ /// continuing with the example from `canonicalize_query`, if
+ /// there was an input query `T: Trait<'static>`, it would have
+ /// been canonicalized to
+ ///
+ /// ```text
+ /// T: Trait<'?0>
+ /// ```
+ ///
+ /// with a mapping M that maps `'?0` to `'static`. But if we found that there
+ /// exists only one possible impl of `Trait`, and it looks like
+ /// ```ignore (illustrative)
+ /// impl<T> Trait<'static> for T { .. }
+ /// ```
+ /// then we would prepare a query result R that (among other
+ /// things) includes a mapping to `'?0 := 'static`. When
+ /// canonicalizing this query result R, we would leave this
+ /// reference to `'static` alone.
+ ///
+ /// To get a good understanding of what is happening here, check
+ /// out the [chapter in the rustc dev guide][c].
+ ///
+ /// [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html#canonicalizing-the-query-result
+ pub fn canonicalize_response<V>(&self, value: V) -> Canonicalized<'tcx, V>
+ where
+ V: TypeFoldable<'tcx>,
+ {
+ let mut query_state = OriginalQueryValues::default();
+ Canonicalizer::canonicalize(
+ value,
+ self,
+ self.tcx,
+ &CanonicalizeQueryResponse,
+ &mut query_state,
+ )
+ }
+
+ pub fn canonicalize_user_type_annotation<V>(&self, value: V) -> Canonicalized<'tcx, V>
+ where
+ V: TypeFoldable<'tcx>,
+ {
+ let mut query_state = OriginalQueryValues::default();
+ Canonicalizer::canonicalize(
+ value,
+ self,
+ self.tcx,
+ &CanonicalizeUserTypeAnnotation,
+ &mut query_state,
+ )
+ }
+
+ /// A variant of `canonicalize_query` that does not
+ /// canonicalize `'static`. This is useful when
+ /// the query implementation can perform more efficient
+ /// handling of `'static` regions (e.g. trait evaluation).
+ pub fn canonicalize_query_keep_static<V>(
+ &self,
+ value: V,
+ query_state: &mut OriginalQueryValues<'tcx>,
+ ) -> Canonicalized<'tcx, V>
+ where
+ V: TypeFoldable<'tcx>,
+ {
+ self.tcx.sess.perf_stats.queries_canonicalized.fetch_add(1, Ordering::Relaxed);
+
+ Canonicalizer::canonicalize(
+ value,
+ self,
+ self.tcx,
+ &CanonicalizeFreeRegionsOtherThanStatic,
+ query_state,
+ )
+ }
+}
+
+/// Controls how we canonicalize "free regions" that are not inference
+/// variables. This depends on what we are canonicalizing *for* --
+/// e.g., if we are canonicalizing to create a query, we want to
+/// replace those with inference variables, since we want to make a
+/// maximally general query. But if we are canonicalizing a *query
+/// response*, then we don't typically replace free regions, as they
+/// must have been introduced from other parts of the system.
+trait CanonicalizeMode {
+ fn canonicalize_free_region<'tcx>(
+ &self,
+ canonicalizer: &mut Canonicalizer<'_, 'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx>;
+
+ fn any(&self) -> bool;
+
+ // Do we preserve universe of variables.
+ fn preserve_universes(&self) -> bool;
+}
+
+struct CanonicalizeQueryResponse;
+
+impl CanonicalizeMode for CanonicalizeQueryResponse {
+ fn canonicalize_free_region<'tcx>(
+ &self,
+ canonicalizer: &mut Canonicalizer<'_, 'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReFree(_)
+ | ty::ReErased
+ | ty::ReStatic
+ | ty::ReEmpty(ty::UniverseIndex::ROOT)
+ | ty::ReEarlyBound(..) => r,
+
+ ty::RePlaceholder(placeholder) => canonicalizer.canonical_var_for_region(
+ CanonicalVarInfo { kind: CanonicalVarKind::PlaceholderRegion(placeholder) },
+ r,
+ ),
+
+ ty::ReVar(vid) => {
+ let universe = canonicalizer.region_var_universe(vid);
+ canonicalizer.canonical_var_for_region(
+ CanonicalVarInfo { kind: CanonicalVarKind::Region(universe) },
+ r,
+ )
+ }
+
+ ty::ReEmpty(ui) => {
+ bug!("canonicalizing 'empty in universe {:?}", ui) // FIXME
+ }
+
+ _ => {
+ // Other than `'static` or `'empty`, the query
+ // response should be executing in a fully
+ // canonicalized environment, so there shouldn't be
+ // any other region names it can come up.
+ //
+ // rust-lang/rust#57464: `impl Trait` can leak local
+ // scopes (in manner violating typeck). Therefore, use
+ // `delay_span_bug` to allow type error over an ICE.
+ ty::tls::with(|tcx| {
+ tcx.sess.delay_span_bug(
+ rustc_span::DUMMY_SP,
+ &format!("unexpected region in query response: `{:?}`", r),
+ );
+ });
+ r
+ }
+ }
+ }
+
+ fn any(&self) -> bool {
+ false
+ }
+
+ fn preserve_universes(&self) -> bool {
+ true
+ }
+}
+
+struct CanonicalizeUserTypeAnnotation;
+
+impl CanonicalizeMode for CanonicalizeUserTypeAnnotation {
+ fn canonicalize_free_region<'tcx>(
+ &self,
+ canonicalizer: &mut Canonicalizer<'_, 'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReErased | ty::ReStatic => r,
+ ty::ReVar(_) => canonicalizer.canonical_var_for_region_in_root_universe(r),
+ _ => {
+ // We only expect region names that the user can type.
+ bug!("unexpected region in query response: `{:?}`", r)
+ }
+ }
+ }
+
+ fn any(&self) -> bool {
+ false
+ }
+
+ fn preserve_universes(&self) -> bool {
+ false
+ }
+}
+
+struct CanonicalizeAllFreeRegions;
+
+impl CanonicalizeMode for CanonicalizeAllFreeRegions {
+ fn canonicalize_free_region<'tcx>(
+ &self,
+ canonicalizer: &mut Canonicalizer<'_, 'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ canonicalizer.canonical_var_for_region_in_root_universe(r)
+ }
+
+ fn any(&self) -> bool {
+ true
+ }
+
+ fn preserve_universes(&self) -> bool {
+ false
+ }
+}
+
+struct CanonicalizeAllFreeRegionsPreservingUniverses;
+
+impl CanonicalizeMode for CanonicalizeAllFreeRegionsPreservingUniverses {
+ fn canonicalize_free_region<'tcx>(
+ &self,
+ canonicalizer: &mut Canonicalizer<'_, 'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ let universe = canonicalizer.infcx.universe_of_region(r);
+ canonicalizer.canonical_var_for_region(
+ CanonicalVarInfo { kind: CanonicalVarKind::Region(universe) },
+ r,
+ )
+ }
+
+ fn any(&self) -> bool {
+ true
+ }
+
+ fn preserve_universes(&self) -> bool {
+ true
+ }
+}
+
+struct CanonicalizeFreeRegionsOtherThanStatic;
+
+impl CanonicalizeMode for CanonicalizeFreeRegionsOtherThanStatic {
+ fn canonicalize_free_region<'tcx>(
+ &self,
+ canonicalizer: &mut Canonicalizer<'_, 'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ if r.is_static() { r } else { canonicalizer.canonical_var_for_region_in_root_universe(r) }
+ }
+
+ fn any(&self) -> bool {
+ true
+ }
+
+ fn preserve_universes(&self) -> bool {
+ false
+ }
+}
+
+struct Canonicalizer<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ tcx: TyCtxt<'tcx>,
+ variables: SmallVec<[CanonicalVarInfo<'tcx>; 8]>,
+ query_state: &'cx mut OriginalQueryValues<'tcx>,
+ // Note that indices is only used once `var_values` is big enough to be
+ // heap-allocated.
+ indices: FxHashMap<GenericArg<'tcx>, BoundVar>,
+ canonicalize_mode: &'cx dyn CanonicalizeMode,
+ needs_canonical_flags: TypeFlags,
+
+ binder_index: ty::DebruijnIndex,
+}
+
+impl<'cx, 'tcx> TypeFolder<'tcx> for Canonicalizer<'cx, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T>(&mut self, t: ty::Binder<'tcx, T>) -> ty::Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.binder_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.binder_index.shift_out(1);
+ t
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(index, ..) => {
+ if index >= self.binder_index {
+ bug!("escaping late-bound region during canonicalization");
+ } else {
+ r
+ }
+ }
+
+ ty::ReVar(vid) => {
+ let resolved_vid = self
+ .infcx
+ .inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .opportunistic_resolve_var(vid);
+ debug!(
+ "canonical: region var found with vid {:?}, \
+ opportunistically resolved to {:?}",
+ vid, r
+ );
+ let r = self.tcx.reuse_or_mk_region(r, ty::ReVar(resolved_vid));
+ self.canonicalize_mode.canonicalize_free_region(self, r)
+ }
+
+ ty::ReStatic
+ | ty::ReEarlyBound(..)
+ | ty::ReFree(_)
+ | ty::ReEmpty(_)
+ | ty::RePlaceholder(..)
+ | ty::ReErased => self.canonicalize_mode.canonicalize_free_region(self, r),
+ }
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ ty::Infer(ty::TyVar(vid)) => {
+ debug!("canonical: type var found with vid {:?}", vid);
+ match self.infcx.probe_ty_var(vid) {
+ // `t` could be a float / int variable; canonicalize that instead.
+ Ok(t) => {
+ debug!("(resolved to {:?})", t);
+ self.fold_ty(t)
+ }
+
+ // `TyVar(vid)` is unresolved, track its universe index in the canonicalized
+ // result.
+ Err(mut ui) => {
+ if !self.canonicalize_mode.preserve_universes() {
+ // FIXME: perf problem described in #55921.
+ ui = ty::UniverseIndex::ROOT;
+ }
+ self.canonicalize_ty_var(
+ CanonicalVarInfo {
+ kind: CanonicalVarKind::Ty(CanonicalTyVarKind::General(ui)),
+ },
+ t,
+ )
+ }
+ }
+ }
+
+ ty::Infer(ty::IntVar(_)) => self.canonicalize_ty_var(
+ CanonicalVarInfo { kind: CanonicalVarKind::Ty(CanonicalTyVarKind::Int) },
+ t,
+ ),
+
+ ty::Infer(ty::FloatVar(_)) => self.canonicalize_ty_var(
+ CanonicalVarInfo { kind: CanonicalVarKind::Ty(CanonicalTyVarKind::Float) },
+ t,
+ ),
+
+ ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("encountered a fresh type during canonicalization")
+ }
+
+ ty::Placeholder(placeholder) => self.canonicalize_ty_var(
+ CanonicalVarInfo { kind: CanonicalVarKind::PlaceholderTy(placeholder) },
+ t,
+ ),
+
+ ty::Bound(debruijn, _) => {
+ if debruijn >= self.binder_index {
+ bug!("escaping bound type during canonicalization")
+ } else {
+ t
+ }
+ }
+
+ ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Adt(..)
+ | ty::Str
+ | ty::Error(_)
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Dynamic(..)
+ | ty::Never
+ | ty::Tuple(..)
+ | ty::Projection(..)
+ | ty::Foreign(..)
+ | ty::Param(..)
+ | ty::Opaque(..) => {
+ if t.flags().intersects(self.needs_canonical_flags) {
+ t.super_fold_with(self)
+ } else {
+ t
+ }
+ }
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ match ct.kind() {
+ ty::ConstKind::Infer(InferConst::Var(vid)) => {
+ debug!("canonical: const var found with vid {:?}", vid);
+ match self.infcx.probe_const_var(vid) {
+ Ok(c) => {
+ debug!("(resolved to {:?})", c);
+ return self.fold_const(c);
+ }
+
+ // `ConstVar(vid)` is unresolved, track its universe index in the
+ // canonicalized result
+ Err(mut ui) => {
+ if !self.canonicalize_mode.preserve_universes() {
+ // FIXME: perf problem described in #55921.
+ ui = ty::UniverseIndex::ROOT;
+ }
+ return self.canonicalize_const_var(
+ CanonicalVarInfo { kind: CanonicalVarKind::Const(ui, ct.ty()) },
+ ct,
+ );
+ }
+ }
+ }
+ ty::ConstKind::Infer(InferConst::Fresh(_)) => {
+ bug!("encountered a fresh const during canonicalization")
+ }
+ ty::ConstKind::Bound(debruijn, _) => {
+ if debruijn >= self.binder_index {
+ bug!("escaping bound type during canonicalization")
+ } else {
+ return ct;
+ }
+ }
+ ty::ConstKind::Placeholder(placeholder) => {
+ return self.canonicalize_const_var(
+ CanonicalVarInfo {
+ kind: CanonicalVarKind::PlaceholderConst(placeholder, ct.ty()),
+ },
+ ct,
+ );
+ }
+ _ => {}
+ }
+
+ let flags = FlagComputation::for_const(ct);
+ if flags.intersects(self.needs_canonical_flags) { ct.super_fold_with(self) } else { ct }
+ }
+}
+
+impl<'cx, 'tcx> Canonicalizer<'cx, 'tcx> {
+ /// The main `canonicalize` method, shared impl of
+ /// `canonicalize_query` and `canonicalize_response`.
+ fn canonicalize<V>(
+ value: V,
+ infcx: &InferCtxt<'_, 'tcx>,
+ tcx: TyCtxt<'tcx>,
+ canonicalize_region_mode: &dyn CanonicalizeMode,
+ query_state: &mut OriginalQueryValues<'tcx>,
+ ) -> Canonicalized<'tcx, V>
+ where
+ V: TypeFoldable<'tcx>,
+ {
+ let needs_canonical_flags = if canonicalize_region_mode.any() {
+ TypeFlags::NEEDS_INFER |
+ TypeFlags::HAS_FREE_REGIONS | // `HAS_RE_PLACEHOLDER` implies `HAS_FREE_REGIONS`
+ TypeFlags::HAS_TY_PLACEHOLDER |
+ TypeFlags::HAS_CT_PLACEHOLDER
+ } else {
+ TypeFlags::NEEDS_INFER
+ | TypeFlags::HAS_RE_PLACEHOLDER
+ | TypeFlags::HAS_TY_PLACEHOLDER
+ | TypeFlags::HAS_CT_PLACEHOLDER
+ };
+
+ // Fast path: nothing that needs to be canonicalized.
+ if !value.has_type_flags(needs_canonical_flags) {
+ let canon_value = Canonical {
+ max_universe: ty::UniverseIndex::ROOT,
+ variables: List::empty(),
+ value,
+ };
+ return canon_value;
+ }
+
+ let mut canonicalizer = Canonicalizer {
+ infcx,
+ tcx,
+ canonicalize_mode: canonicalize_region_mode,
+ needs_canonical_flags,
+ variables: SmallVec::new(),
+ query_state,
+ indices: FxHashMap::default(),
+ binder_index: ty::INNERMOST,
+ };
+ let out_value = value.fold_with(&mut canonicalizer);
+
+ // Once we have canonicalized `out_value`, it should not
+ // contain anything that ties it to this inference context
+ // anymore.
+ debug_assert!(!out_value.needs_infer() && !out_value.has_placeholders());
+
+ let canonical_variables =
+ tcx.intern_canonical_var_infos(&canonicalizer.universe_canonicalized_variables());
+
+ let max_universe = canonical_variables
+ .iter()
+ .map(|cvar| cvar.universe())
+ .max()
+ .unwrap_or(ty::UniverseIndex::ROOT);
+
+ Canonical { max_universe, variables: canonical_variables, value: out_value }
+ }
+
+ /// Creates a canonical variable replacing `kind` from the input,
+ /// or returns an existing variable if `kind` has already been
+ /// seen. `kind` is expected to be an unbound variable (or
+ /// potentially a free region).
+ fn canonical_var(&mut self, info: CanonicalVarInfo<'tcx>, kind: GenericArg<'tcx>) -> BoundVar {
+ let Canonicalizer { variables, query_state, indices, .. } = self;
+
+ let var_values = &mut query_state.var_values;
+
+ let universe = info.universe();
+ if universe != ty::UniverseIndex::ROOT {
+ assert!(self.canonicalize_mode.preserve_universes());
+
+ // Insert universe into the universe map. To preserve the order of the
+ // universes in the value being canonicalized, we don't update the
+ // universe in `info` until we have finished canonicalizing.
+ match query_state.universe_map.binary_search(&universe) {
+ Err(idx) => query_state.universe_map.insert(idx, universe),
+ Ok(_) => {}
+ }
+ }
+
+ // This code is hot. `variables` and `var_values` are usually small
+ // (fewer than 8 elements ~95% of the time). They are SmallVec's to
+ // avoid allocations in those cases. We also don't use `indices` to
+ // determine if a kind has been seen before until the limit of 8 has
+ // been exceeded, to also avoid allocations for `indices`.
+ if !var_values.spilled() {
+ // `var_values` is stack-allocated. `indices` isn't used yet. Do a
+ // direct linear search of `var_values`.
+ if let Some(idx) = var_values.iter().position(|&k| k == kind) {
+ // `kind` is already present in `var_values`.
+ BoundVar::new(idx)
+ } else {
+ // `kind` isn't present in `var_values`. Append it. Likewise
+ // for `info` and `variables`.
+ variables.push(info);
+ var_values.push(kind);
+ assert_eq!(variables.len(), var_values.len());
+
+ // If `var_values` has become big enough to be heap-allocated,
+ // fill up `indices` to facilitate subsequent lookups.
+ if var_values.spilled() {
+ assert!(indices.is_empty());
+ *indices = var_values
+ .iter()
+ .enumerate()
+ .map(|(i, &kind)| (kind, BoundVar::new(i)))
+ .collect();
+ }
+ // The cv is the index of the appended element.
+ BoundVar::new(var_values.len() - 1)
+ }
+ } else {
+ // `var_values` is large. Do a hashmap search via `indices`.
+ *indices.entry(kind).or_insert_with(|| {
+ variables.push(info);
+ var_values.push(kind);
+ assert_eq!(variables.len(), var_values.len());
+ BoundVar::new(variables.len() - 1)
+ })
+ }
+ }
+
+ /// Replaces the universe indexes used in `var_values` with their index in
+ /// `query_state.universe_map`. This minimizes the maximum universe used in
+ /// the canonicalized value.
+ fn universe_canonicalized_variables(self) -> SmallVec<[CanonicalVarInfo<'tcx>; 8]> {
+ if self.query_state.universe_map.len() == 1 {
+ return self.variables;
+ }
+
+ let reverse_universe_map: FxHashMap<ty::UniverseIndex, ty::UniverseIndex> = self
+ .query_state
+ .universe_map
+ .iter()
+ .enumerate()
+ .map(|(idx, universe)| (*universe, ty::UniverseIndex::from_usize(idx)))
+ .collect();
+
+ self.variables
+ .iter()
+ .map(|v| CanonicalVarInfo {
+ kind: match v.kind {
+ CanonicalVarKind::Ty(CanonicalTyVarKind::Int | CanonicalTyVarKind::Float) => {
+ return *v;
+ }
+ CanonicalVarKind::Ty(CanonicalTyVarKind::General(u)) => {
+ CanonicalVarKind::Ty(CanonicalTyVarKind::General(reverse_universe_map[&u]))
+ }
+ CanonicalVarKind::Region(u) => {
+ CanonicalVarKind::Region(reverse_universe_map[&u])
+ }
+ CanonicalVarKind::Const(u, t) => {
+ CanonicalVarKind::Const(reverse_universe_map[&u], t)
+ }
+ CanonicalVarKind::PlaceholderTy(placeholder) => {
+ CanonicalVarKind::PlaceholderTy(ty::Placeholder {
+ universe: reverse_universe_map[&placeholder.universe],
+ ..placeholder
+ })
+ }
+ CanonicalVarKind::PlaceholderRegion(placeholder) => {
+ CanonicalVarKind::PlaceholderRegion(ty::Placeholder {
+ universe: reverse_universe_map[&placeholder.universe],
+ ..placeholder
+ })
+ }
+ CanonicalVarKind::PlaceholderConst(placeholder, t) => {
+ CanonicalVarKind::PlaceholderConst(
+ ty::Placeholder {
+ universe: reverse_universe_map[&placeholder.universe],
+ ..placeholder
+ },
+ t,
+ )
+ }
+ },
+ })
+ .collect()
+ }
+
+ /// Shorthand helper that creates a canonical region variable for
+ /// `r` (always in the root universe). The reason that we always
+ /// put these variables into the root universe is because this
+ /// method is used during **query construction:** in that case, we
+ /// are taking all the regions and just putting them into the most
+ /// generic context we can. This may generate solutions that don't
+ /// fit (e.g., that equate some region variable with a placeholder
+ /// it can't name) on the caller side, but that's ok, the caller
+ /// can figure that out. In the meantime, it maximizes our
+ /// caching.
+ ///
+ /// (This works because unification never fails -- and hence trait
+ /// selection is never affected -- due to a universe mismatch.)
+ fn canonical_var_for_region_in_root_universe(
+ &mut self,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ self.canonical_var_for_region(
+ CanonicalVarInfo { kind: CanonicalVarKind::Region(ty::UniverseIndex::ROOT) },
+ r,
+ )
+ }
+
+ /// Returns the universe in which `vid` is defined.
+ fn region_var_universe(&self, vid: ty::RegionVid) -> ty::UniverseIndex {
+ self.infcx.inner.borrow_mut().unwrap_region_constraints().var_universe(vid)
+ }
+
+ /// Creates a canonical variable (with the given `info`)
+ /// representing the region `r`; return a region referencing it.
+ fn canonical_var_for_region(
+ &mut self,
+ info: CanonicalVarInfo<'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ let var = self.canonical_var(info, r.into());
+ let br = ty::BoundRegion { var, kind: ty::BrAnon(var.as_u32()) };
+ let region = ty::ReLateBound(self.binder_index, br);
+ self.tcx().mk_region(region)
+ }
+
+ /// Given a type variable `ty_var` of the given kind, first check
+ /// if `ty_var` is bound to anything; if so, canonicalize
+ /// *that*. Otherwise, create a new canonical variable for
+ /// `ty_var`.
+ fn canonicalize_ty_var(&mut self, info: CanonicalVarInfo<'tcx>, ty_var: Ty<'tcx>) -> Ty<'tcx> {
+ let infcx = self.infcx;
+ let bound_to = infcx.shallow_resolve(ty_var);
+ if bound_to != ty_var {
+ self.fold_ty(bound_to)
+ } else {
+ let var = self.canonical_var(info, ty_var.into());
+ self.tcx().mk_ty(ty::Bound(self.binder_index, var.into()))
+ }
+ }
+
+ /// Given a type variable `const_var` of the given kind, first check
+ /// if `const_var` is bound to anything; if so, canonicalize
+ /// *that*. Otherwise, create a new canonical variable for
+ /// `const_var`.
+ fn canonicalize_const_var(
+ &mut self,
+ info: CanonicalVarInfo<'tcx>,
+ const_var: ty::Const<'tcx>,
+ ) -> ty::Const<'tcx> {
+ let infcx = self.infcx;
+ let bound_to = infcx.shallow_resolve(const_var);
+ if bound_to != const_var {
+ self.fold_const(bound_to)
+ } else {
+ let var = self.canonical_var(info, const_var.into());
+ self.tcx().mk_const(ty::ConstS {
+ kind: ty::ConstKind::Bound(self.binder_index, var),
+ ty: self.fold_ty(const_var.ty()),
+ })
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/canonical/mod.rs b/compiler/rustc_infer/src/infer/canonical/mod.rs
new file mode 100644
index 000000000..a9294a85e
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/canonical/mod.rs
@@ -0,0 +1,159 @@
+//! **Canonicalization** is the key to constructing a query in the
+//! middle of type inference. Ordinarily, it is not possible to store
+//! types from type inference in query keys, because they contain
+//! references to inference variables whose lifetimes are too short
+//! and so forth. Canonicalizing a value T1 using `canonicalize_query`
+//! produces two things:
+//!
+//! - a value T2 where each unbound inference variable has been
+//! replaced with a **canonical variable**;
+//! - a map M (of type `CanonicalVarValues`) from those canonical
+//! variables back to the original.
+//!
+//! We can then do queries using T2. These will give back constraints
+//! on the canonical variables which can be translated, using the map
+//! M, into constraints in our source context. This process of
+//! translating the results back is done by the
+//! `instantiate_query_result` method.
+//!
+//! For a more detailed look at what is happening here, check
+//! out the [chapter in the rustc dev guide][c].
+//!
+//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
+
+use crate::infer::{ConstVariableOrigin, ConstVariableOriginKind};
+use crate::infer::{InferCtxt, RegionVariableOrigin, TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::{self, BoundVar, List};
+use rustc_span::source_map::Span;
+
+pub use rustc_middle::infer::canonical::*;
+use substitute::CanonicalExt;
+
+mod canonicalizer;
+pub mod query_response;
+mod substitute;
+
+impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+ /// Creates a substitution S for the canonical value with fresh
+ /// inference variables and applies it to the canonical value.
+ /// Returns both the instantiated result *and* the substitution S.
+ ///
+ /// This is only meant to be invoked as part of constructing an
+ /// inference context at the start of a query (see
+ /// `InferCtxtBuilder::enter_with_canonical`). It basically
+ /// brings the canonical value "into scope" within your new infcx.
+ ///
+ /// At the end of processing, the substitution S (once
+ /// canonicalized) then represents the values that you computed
+ /// for each of the canonical inputs to your query.
+ pub fn instantiate_canonical_with_fresh_inference_vars<T>(
+ &self,
+ span: Span,
+ canonical: &Canonical<'tcx, T>,
+ ) -> (T, CanonicalVarValues<'tcx>)
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ // For each universe that is referred to in the incoming
+ // query, create a universe in our local inference context. In
+ // practice, as of this writing, all queries have no universes
+ // in them, so this code has no effect, but it is looking
+ // forward to the day when we *do* want to carry universes
+ // through into queries.
+ let universes: IndexVec<ty::UniverseIndex, _> = std::iter::once(ty::UniverseIndex::ROOT)
+ .chain((0..canonical.max_universe.as_u32()).map(|_| self.create_next_universe()))
+ .collect();
+
+ let canonical_inference_vars =
+ self.instantiate_canonical_vars(span, canonical.variables, |ui| universes[ui]);
+ let result = canonical.substitute(self.tcx, &canonical_inference_vars);
+ (result, canonical_inference_vars)
+ }
+
+ /// Given the "infos" about the canonical variables from some
+ /// canonical, creates fresh variables with the same
+ /// characteristics (see `instantiate_canonical_var` for
+ /// details). You can then use `substitute` to instantiate the
+ /// canonical variable with these inference variables.
+ fn instantiate_canonical_vars(
+ &self,
+ span: Span,
+ variables: &List<CanonicalVarInfo<'tcx>>,
+ universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
+ ) -> CanonicalVarValues<'tcx> {
+ let var_values: IndexVec<BoundVar, GenericArg<'tcx>> = variables
+ .iter()
+ .map(|info| self.instantiate_canonical_var(span, info, &universe_map))
+ .collect();
+
+ CanonicalVarValues { var_values }
+ }
+
+ /// Given the "info" about a canonical variable, creates a fresh
+ /// variable for it. If this is an existentially quantified
+ /// variable, then you'll get a new inference variable; if it is a
+ /// universally quantified variable, you get a placeholder.
+ fn instantiate_canonical_var(
+ &self,
+ span: Span,
+ cv_info: CanonicalVarInfo<'tcx>,
+ universe_map: impl Fn(ty::UniverseIndex) -> ty::UniverseIndex,
+ ) -> GenericArg<'tcx> {
+ match cv_info.kind {
+ CanonicalVarKind::Ty(ty_kind) => {
+ let ty = match ty_kind {
+ CanonicalTyVarKind::General(ui) => self.next_ty_var_in_universe(
+ TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span },
+ universe_map(ui),
+ ),
+
+ CanonicalTyVarKind::Int => self.next_int_var(),
+
+ CanonicalTyVarKind::Float => self.next_float_var(),
+ };
+ ty.into()
+ }
+
+ CanonicalVarKind::PlaceholderTy(ty::PlaceholderType { universe, name }) => {
+ let universe_mapped = universe_map(universe);
+ let placeholder_mapped = ty::PlaceholderType { universe: universe_mapped, name };
+ self.tcx.mk_ty(ty::Placeholder(placeholder_mapped)).into()
+ }
+
+ CanonicalVarKind::Region(ui) => self
+ .next_region_var_in_universe(
+ RegionVariableOrigin::MiscVariable(span),
+ universe_map(ui),
+ )
+ .into(),
+
+ CanonicalVarKind::PlaceholderRegion(ty::PlaceholderRegion { universe, name }) => {
+ let universe_mapped = universe_map(universe);
+ let placeholder_mapped = ty::PlaceholderRegion { universe: universe_mapped, name };
+ self.tcx.mk_region(ty::RePlaceholder(placeholder_mapped)).into()
+ }
+
+ CanonicalVarKind::Const(ui, ty) => self
+ .next_const_var_in_universe(
+ ty,
+ ConstVariableOrigin { kind: ConstVariableOriginKind::MiscVariable, span },
+ universe_map(ui),
+ )
+ .into(),
+
+ CanonicalVarKind::PlaceholderConst(ty::PlaceholderConst { universe, name }, ty) => {
+ let universe_mapped = universe_map(universe);
+ let placeholder_mapped = ty::PlaceholderConst { universe: universe_mapped, name };
+ self.tcx
+ .mk_const(ty::ConstS {
+ kind: ty::ConstKind::Placeholder(placeholder_mapped),
+ ty,
+ })
+ .into()
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/canonical/query_response.rs b/compiler/rustc_infer/src/infer/canonical/query_response.rs
new file mode 100644
index 000000000..8dc20544f
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/canonical/query_response.rs
@@ -0,0 +1,741 @@
+//! This module contains the code to instantiate a "query result", and
+//! in particular to extract out the resulting region obligations and
+//! encode them therein.
+//!
+//! For an overview of what canonicalization is and how it fits into
+//! rustc, check out the [chapter in the rustc dev guide][c].
+//!
+//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
+
+use crate::infer::canonical::substitute::{substitute_value, CanonicalExt};
+use crate::infer::canonical::{
+ Canonical, CanonicalVarValues, CanonicalizedQueryResponse, Certainty, OriginalQueryValues,
+ QueryOutlivesConstraint, QueryRegionConstraints, QueryResponse,
+};
+use crate::infer::nll_relate::{NormalizationStrategy, TypeRelating, TypeRelatingDelegate};
+use crate::infer::region_constraints::{Constraint, RegionConstraintData};
+use crate::infer::{InferCtxt, InferOk, InferResult, NllRegionVariableOrigin};
+use crate::traits::query::{Fallible, NoSolution};
+use crate::traits::TraitEngine;
+use crate::traits::{Obligation, ObligationCause, PredicateObligation};
+use rustc_data_structures::captures::Captures;
+use rustc_index::vec::Idx;
+use rustc_index::vec::IndexVec;
+use rustc_middle::arena::ArenaAllocatable;
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::relate::TypeRelation;
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
+use rustc_middle::ty::{self, BoundVar, Const, ToPredicate, Ty, TyCtxt};
+use rustc_span::Span;
+use std::fmt::Debug;
+use std::iter;
+
+impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+ /// This method is meant to be invoked as the final step of a canonical query
+ /// implementation. It is given:
+ ///
+ /// - the instantiated variables `inference_vars` created from the query key
+ /// - the result `answer` of the query
+ /// - a fulfillment context `fulfill_cx` that may contain various obligations which
+ /// have yet to be proven.
+ ///
+ /// Given this, the function will process the obligations pending
+ /// in `fulfill_cx`:
+ ///
+ /// - If all the obligations can be proven successfully, it will
+ /// package up any resulting region obligations (extracted from
+ /// `infcx`) along with the fully resolved value `answer` into a
+ /// query result (which is then itself canonicalized).
+ /// - If some obligations can be neither proven nor disproven, then
+ /// the same thing happens, but the resulting query is marked as ambiguous.
+ /// - Finally, if any of the obligations result in a hard error,
+ /// then `Err(NoSolution)` is returned.
+ #[instrument(skip(self, inference_vars, answer, fulfill_cx), level = "trace")]
+ pub fn make_canonicalized_query_response<T>(
+ &self,
+ inference_vars: CanonicalVarValues<'tcx>,
+ answer: T,
+ fulfill_cx: &mut dyn TraitEngine<'tcx>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, T>>
+ where
+ T: Debug + TypeFoldable<'tcx>,
+ Canonical<'tcx, QueryResponse<'tcx, T>>: ArenaAllocatable<'tcx>,
+ {
+ let query_response = self.make_query_response(inference_vars, answer, fulfill_cx)?;
+ let canonical_result = self.canonicalize_response(query_response);
+
+ debug!("canonical_result = {:#?}", canonical_result);
+
+ Ok(self.tcx.arena.alloc(canonical_result))
+ }
+
+ /// A version of `make_canonicalized_query_response` that does
+ /// not pack in obligations, for contexts that want to drop
+ /// pending obligations instead of treating them as an ambiguity (e.g.
+ /// typeck "probing" contexts).
+ ///
+ /// If you DO want to keep track of pending obligations (which
+ /// include all region obligations, so this includes all cases
+ /// that care about regions) with this function, you have to
+ /// do it yourself, by e.g., having them be a part of the answer.
+ pub fn make_query_response_ignoring_pending_obligations<T>(
+ &self,
+ inference_vars: CanonicalVarValues<'tcx>,
+ answer: T,
+ ) -> Canonical<'tcx, QueryResponse<'tcx, T>>
+ where
+ T: Debug + TypeFoldable<'tcx>,
+ {
+ self.canonicalize_response(QueryResponse {
+ var_values: inference_vars,
+ region_constraints: QueryRegionConstraints::default(),
+ certainty: Certainty::Proven, // Ambiguities are OK!
+ opaque_types: vec![],
+ value: answer,
+ })
+ }
+
+ /// Helper for `make_canonicalized_query_response` that does
+ /// everything up until the final canonicalization.
+ #[instrument(skip(self, fulfill_cx), level = "debug")]
+ fn make_query_response<T>(
+ &self,
+ inference_vars: CanonicalVarValues<'tcx>,
+ answer: T,
+ fulfill_cx: &mut dyn TraitEngine<'tcx>,
+ ) -> Result<QueryResponse<'tcx, T>, NoSolution>
+ where
+ T: Debug + TypeFoldable<'tcx>,
+ {
+ let tcx = self.tcx;
+
+ // Select everything, returning errors.
+ let true_errors = fulfill_cx.select_where_possible(self);
+ debug!("true_errors = {:#?}", true_errors);
+
+ if !true_errors.is_empty() {
+ // FIXME -- we don't indicate *why* we failed to solve
+ debug!("make_query_response: true_errors={:#?}", true_errors);
+ return Err(NoSolution);
+ }
+
+ // Anything left unselected *now* must be an ambiguity.
+ let ambig_errors = fulfill_cx.select_all_or_error(self);
+ debug!("ambig_errors = {:#?}", ambig_errors);
+
+ let region_obligations = self.take_registered_region_obligations();
+ let region_constraints = self.with_region_constraints(|region_constraints| {
+ make_query_region_constraints(
+ tcx,
+ region_obligations.iter().map(|r_o| (r_o.sup_type, r_o.sub_region)),
+ region_constraints,
+ )
+ });
+
+ let certainty =
+ if ambig_errors.is_empty() { Certainty::Proven } else { Certainty::Ambiguous };
+
+ let opaque_types = self.take_opaque_types_for_query_response();
+
+ Ok(QueryResponse {
+ var_values: inference_vars,
+ region_constraints,
+ certainty,
+ value: answer,
+ opaque_types,
+ })
+ }
+
+ fn take_opaque_types_for_query_response(&self) -> Vec<(Ty<'tcx>, Ty<'tcx>)> {
+ self.inner
+ .borrow_mut()
+ .opaque_type_storage
+ .take_opaque_types()
+ .into_iter()
+ .map(|(k, v)| (self.tcx.mk_opaque(k.def_id.to_def_id(), k.substs), v.hidden_type.ty))
+ .collect()
+ }
+
+ /// Given the (canonicalized) result to a canonical query,
+ /// instantiates the result so it can be used, plugging in the
+ /// values from the canonical query. (Note that the result may
+ /// have been ambiguous; you should check the certainty level of
+ /// the query before applying this function.)
+ ///
+ /// To get a good understanding of what is happening here, check
+ /// out the [chapter in the rustc dev guide][c].
+ ///
+ /// [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html#processing-the-canonicalized-query-result
+ pub fn instantiate_query_response_and_region_obligations<R>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ original_values: &OriginalQueryValues<'tcx>,
+ query_response: &Canonical<'tcx, QueryResponse<'tcx, R>>,
+ ) -> InferResult<'tcx, R>
+ where
+ R: Debug + TypeFoldable<'tcx>,
+ {
+ let InferOk { value: result_subst, mut obligations } =
+ self.query_response_substitution(cause, param_env, original_values, query_response)?;
+
+ obligations.extend(self.query_outlives_constraints_into_obligations(
+ cause,
+ param_env,
+ &query_response.value.region_constraints.outlives,
+ &result_subst,
+ ));
+
+ let user_result: R =
+ query_response.substitute_projected(self.tcx, &result_subst, |q_r| q_r.value.clone());
+
+ Ok(InferOk { value: user_result, obligations })
+ }
+
+ /// An alternative to
+ /// `instantiate_query_response_and_region_obligations` that is more
+ /// efficient for NLL. NLL is a bit more advanced in the
+ /// "transition to chalk" than the rest of the compiler. During
+ /// the NLL type check, all of the "processing" of types and
+ /// things happens in queries -- the NLL checker itself is only
+ /// interested in the region obligations (`'a: 'b` or `T: 'b`)
+ /// that come out of these queries, which it wants to convert into
+ /// MIR-based constraints and solve. Therefore, it is most
+ /// convenient for the NLL Type Checker to **directly consume**
+ /// the `QueryOutlivesConstraint` values that arise from doing a
+ /// query. This is contrast to other parts of the compiler, which
+ /// would prefer for those `QueryOutlivesConstraint` to be converted
+ /// into the older infcx-style constraints (e.g., calls to
+ /// `sub_regions` or `register_region_obligation`).
+ ///
+ /// Therefore, `instantiate_nll_query_response_and_region_obligations` performs the same
+ /// basic operations as `instantiate_query_response_and_region_obligations` but
+ /// it returns its result differently:
+ ///
+ /// - It creates a substitution `S` that maps from the original
+ /// query variables to the values computed in the query
+ /// result. If any errors arise, they are propagated back as an
+ /// `Err` result.
+ /// - In the case of a successful substitution, we will append
+ /// `QueryOutlivesConstraint` values onto the
+ /// `output_query_region_constraints` vector for the solver to
+ /// use (if an error arises, some values may also be pushed, but
+ /// they should be ignored).
+ /// - It **can happen** (though it rarely does currently) that
+ /// equating types and things will give rise to subobligations
+ /// that must be processed. In this case, those subobligations
+ /// are propagated back in the return value.
+ /// - Finally, the query result (of type `R`) is propagated back,
+ /// after applying the substitution `S`.
+ pub fn instantiate_nll_query_response_and_region_obligations<R>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ original_values: &OriginalQueryValues<'tcx>,
+ query_response: &Canonical<'tcx, QueryResponse<'tcx, R>>,
+ output_query_region_constraints: &mut QueryRegionConstraints<'tcx>,
+ ) -> InferResult<'tcx, R>
+ where
+ R: Debug + TypeFoldable<'tcx>,
+ {
+ let InferOk { value: result_subst, mut obligations } = self
+ .query_response_substitution_guess(cause, param_env, original_values, query_response)?;
+
+ // Compute `QueryOutlivesConstraint` values that unify each of
+ // the original values `v_o` that was canonicalized into a
+ // variable...
+
+ for (index, original_value) in original_values.var_values.iter().enumerate() {
+ // ...with the value `v_r` of that variable from the query.
+ let result_value = query_response.substitute_projected(self.tcx, &result_subst, |v| {
+ v.var_values[BoundVar::new(index)]
+ });
+ match (original_value.unpack(), result_value.unpack()) {
+ (GenericArgKind::Lifetime(re1), GenericArgKind::Lifetime(re2))
+ if re1.is_erased() && re2.is_erased() =>
+ {
+ // No action needed.
+ }
+
+ (GenericArgKind::Lifetime(v_o), GenericArgKind::Lifetime(v_r)) => {
+ // To make `v_o = v_r`, we emit `v_o: v_r` and `v_r: v_o`.
+ if v_o != v_r {
+ output_query_region_constraints
+ .outlives
+ .push(ty::Binder::dummy(ty::OutlivesPredicate(v_o.into(), v_r)));
+ output_query_region_constraints
+ .outlives
+ .push(ty::Binder::dummy(ty::OutlivesPredicate(v_r.into(), v_o)));
+ }
+ }
+
+ (GenericArgKind::Type(v1), GenericArgKind::Type(v2)) => {
+ TypeRelating::new(
+ self,
+ QueryTypeRelatingDelegate {
+ infcx: self,
+ param_env,
+ cause,
+ obligations: &mut obligations,
+ },
+ ty::Variance::Invariant,
+ )
+ .relate(v1, v2)?;
+ }
+
+ (GenericArgKind::Const(v1), GenericArgKind::Const(v2)) => {
+ TypeRelating::new(
+ self,
+ QueryTypeRelatingDelegate {
+ infcx: self,
+ param_env,
+ cause,
+ obligations: &mut obligations,
+ },
+ ty::Variance::Invariant,
+ )
+ .relate(v1, v2)?;
+ }
+
+ _ => {
+ bug!("kind mismatch, cannot unify {:?} and {:?}", original_value, result_value);
+ }
+ }
+ }
+
+ // ...also include the other query region constraints from the query.
+ output_query_region_constraints.outlives.extend(
+ query_response.value.region_constraints.outlives.iter().filter_map(|&r_c| {
+ let r_c = substitute_value(self.tcx, &result_subst, r_c);
+
+ // Screen out `'a: 'a` cases -- we skip the binder here but
+ // only compare the inner values to one another, so they are still at
+ // consistent binding levels.
+ let ty::OutlivesPredicate(k1, r2) = r_c.skip_binder();
+ if k1 != r2.into() { Some(r_c) } else { None }
+ }),
+ );
+
+ // ...also include the query member constraints.
+ output_query_region_constraints.member_constraints.extend(
+ query_response
+ .value
+ .region_constraints
+ .member_constraints
+ .iter()
+ .map(|p_c| substitute_value(self.tcx, &result_subst, p_c.clone())),
+ );
+
+ let user_result: R =
+ query_response.substitute_projected(self.tcx, &result_subst, |q_r| q_r.value.clone());
+
+ Ok(InferOk { value: user_result, obligations })
+ }
+
+ /// Given the original values and the (canonicalized) result from
+ /// computing a query, returns a substitution that can be applied
+ /// to the query result to convert the result back into the
+ /// original namespace.
+ ///
+ /// The substitution also comes accompanied with subobligations
+ /// that arose from unification; these might occur if (for
+ /// example) we are doing lazy normalization and the value
+ /// assigned to a type variable is unified with an unnormalized
+ /// projection.
+ fn query_response_substitution<R>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ original_values: &OriginalQueryValues<'tcx>,
+ query_response: &Canonical<'tcx, QueryResponse<'tcx, R>>,
+ ) -> InferResult<'tcx, CanonicalVarValues<'tcx>>
+ where
+ R: Debug + TypeFoldable<'tcx>,
+ {
+ debug!(
+ "query_response_substitution(original_values={:#?}, query_response={:#?})",
+ original_values, query_response,
+ );
+
+ let mut value = self.query_response_substitution_guess(
+ cause,
+ param_env,
+ original_values,
+ query_response,
+ )?;
+
+ value.obligations.extend(
+ self.unify_query_response_substitution_guess(
+ cause,
+ param_env,
+ original_values,
+ &value.value,
+ query_response,
+ )?
+ .into_obligations(),
+ );
+
+ Ok(value)
+ }
+
+ /// Given the original values and the (canonicalized) result from
+ /// computing a query, returns a **guess** at a substitution that
+ /// can be applied to the query result to convert the result back
+ /// into the original namespace. This is called a **guess**
+ /// because it uses a quick heuristic to find the values for each
+ /// canonical variable; if that quick heuristic fails, then we
+ /// will instantiate fresh inference variables for each canonical
+ /// variable instead. Therefore, the result of this method must be
+ /// properly unified
+ fn query_response_substitution_guess<R>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ original_values: &OriginalQueryValues<'tcx>,
+ query_response: &Canonical<'tcx, QueryResponse<'tcx, R>>,
+ ) -> InferResult<'tcx, CanonicalVarValues<'tcx>>
+ where
+ R: Debug + TypeFoldable<'tcx>,
+ {
+ debug!(
+ "query_response_substitution_guess(original_values={:#?}, query_response={:#?})",
+ original_values, query_response,
+ );
+
+ // For each new universe created in the query result that did
+ // not appear in the original query, create a local
+ // superuniverse.
+ let mut universe_map = original_values.universe_map.clone();
+ let num_universes_in_query = original_values.universe_map.len();
+ let num_universes_in_response = query_response.max_universe.as_usize() + 1;
+ for _ in num_universes_in_query..num_universes_in_response {
+ universe_map.push(self.create_next_universe());
+ }
+ assert!(!universe_map.is_empty()); // always have the root universe
+ assert_eq!(universe_map[ty::UniverseIndex::ROOT.as_usize()], ty::UniverseIndex::ROOT);
+
+ // Every canonical query result includes values for each of
+ // the inputs to the query. Therefore, we begin by unifying
+ // these values with the original inputs that were
+ // canonicalized.
+ let result_values = &query_response.value.var_values;
+ assert_eq!(original_values.var_values.len(), result_values.len());
+
+ // Quickly try to find initial values for the canonical
+ // variables in the result in terms of the query. We do this
+ // by iterating down the values that the query gave to each of
+ // the canonical inputs. If we find that one of those values
+ // is directly equal to one of the canonical variables in the
+ // result, then we can type the corresponding value from the
+ // input. See the example above.
+ let mut opt_values: IndexVec<BoundVar, Option<GenericArg<'tcx>>> =
+ IndexVec::from_elem_n(None, query_response.variables.len());
+
+ // In terms of our example above, we are iterating over pairs like:
+ // [(?A, Vec<?0>), ('static, '?1), (?B, ?0)]
+ for (original_value, result_value) in iter::zip(&original_values.var_values, result_values)
+ {
+ match result_value.unpack() {
+ GenericArgKind::Type(result_value) => {
+ // e.g., here `result_value` might be `?0` in the example above...
+ if let ty::Bound(debruijn, b) = *result_value.kind() {
+ // ...in which case we would set `canonical_vars[0]` to `Some(?U)`.
+
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(debruijn, ty::INNERMOST);
+ opt_values[b.var] = Some(*original_value);
+ }
+ }
+ GenericArgKind::Lifetime(result_value) => {
+ // e.g., here `result_value` might be `'?1` in the example above...
+ if let ty::ReLateBound(debruijn, br) = *result_value {
+ // ... in which case we would set `canonical_vars[0]` to `Some('static)`.
+
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(debruijn, ty::INNERMOST);
+ opt_values[br.var] = Some(*original_value);
+ }
+ }
+ GenericArgKind::Const(result_value) => {
+ if let ty::ConstKind::Bound(debrujin, b) = result_value.kind() {
+ // ...in which case we would set `canonical_vars[0]` to `Some(const X)`.
+
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(debrujin, ty::INNERMOST);
+ opt_values[b] = Some(*original_value);
+ }
+ }
+ }
+ }
+
+ // Create a result substitution: if we found a value for a
+ // given variable in the loop above, use that. Otherwise, use
+ // a fresh inference variable.
+ let result_subst = CanonicalVarValues {
+ var_values: query_response
+ .variables
+ .iter()
+ .enumerate()
+ .map(|(index, info)| {
+ if info.is_existential() {
+ match opt_values[BoundVar::new(index)] {
+ Some(k) => k,
+ None => self.instantiate_canonical_var(cause.span, info, |u| {
+ universe_map[u.as_usize()]
+ }),
+ }
+ } else {
+ self.instantiate_canonical_var(cause.span, info, |u| {
+ universe_map[u.as_usize()]
+ })
+ }
+ })
+ .collect(),
+ };
+
+ let mut obligations = vec![];
+
+ // Carry all newly resolved opaque types to the caller's scope
+ for &(a, b) in &query_response.value.opaque_types {
+ let a = substitute_value(self.tcx, &result_subst, a);
+ let b = substitute_value(self.tcx, &result_subst, b);
+ obligations.extend(self.handle_opaque_type(a, b, true, cause, param_env)?.obligations);
+ }
+
+ Ok(InferOk { value: result_subst, obligations })
+ }
+
+ /// Given a "guess" at the values for the canonical variables in
+ /// the input, try to unify with the *actual* values found in the
+ /// query result. Often, but not always, this is a no-op, because
+ /// we already found the mapping in the "guessing" step.
+ ///
+ /// See also: `query_response_substitution_guess`
+ fn unify_query_response_substitution_guess<R>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ original_values: &OriginalQueryValues<'tcx>,
+ result_subst: &CanonicalVarValues<'tcx>,
+ query_response: &Canonical<'tcx, QueryResponse<'tcx, R>>,
+ ) -> InferResult<'tcx, ()>
+ where
+ R: Debug + TypeFoldable<'tcx>,
+ {
+ // A closure that yields the result value for the given
+ // canonical variable; this is taken from
+ // `query_response.var_values` after applying the substitution
+ // `result_subst`.
+ let substituted_query_response = |index: BoundVar| -> GenericArg<'tcx> {
+ query_response.substitute_projected(self.tcx, &result_subst, |v| v.var_values[index])
+ };
+
+ // Unify the original value for each variable with the value
+ // taken from `query_response` (after applying `result_subst`).
+ self.unify_canonical_vars(cause, param_env, original_values, substituted_query_response)
+ }
+
+ /// Converts the region constraints resulting from a query into an
+ /// iterator of obligations.
+ fn query_outlives_constraints_into_obligations<'a>(
+ &'a self,
+ cause: &'a ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ unsubstituted_region_constraints: &'a [QueryOutlivesConstraint<'tcx>],
+ result_subst: &'a CanonicalVarValues<'tcx>,
+ ) -> impl Iterator<Item = PredicateObligation<'tcx>> + 'a + Captures<'tcx> {
+ unsubstituted_region_constraints.iter().map(move |&constraint| {
+ let predicate = substitute_value(self.tcx, result_subst, constraint);
+ self.query_outlives_constraint_to_obligation(predicate, cause.clone(), param_env)
+ })
+ }
+
+ pub fn query_outlives_constraint_to_obligation(
+ &self,
+ predicate: QueryOutlivesConstraint<'tcx>,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Obligation<'tcx, ty::Predicate<'tcx>> {
+ let ty::OutlivesPredicate(k1, r2) = predicate.skip_binder();
+
+ let atom = match k1.unpack() {
+ GenericArgKind::Lifetime(r1) => {
+ ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(r1, r2))
+ }
+ GenericArgKind::Type(t1) => {
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(t1, r2))
+ }
+ GenericArgKind::Const(..) => {
+ // Consts cannot outlive one another, so we don't expect to
+ // encounter this branch.
+ span_bug!(cause.span, "unexpected const outlives {:?}", predicate);
+ }
+ };
+ let predicate = predicate.rebind(atom).to_predicate(self.tcx);
+
+ Obligation::new(cause, param_env, predicate)
+ }
+
+ /// Given two sets of values for the same set of canonical variables, unify them.
+ /// The second set is produced lazily by supplying indices from the first set.
+ fn unify_canonical_vars(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ variables1: &OriginalQueryValues<'tcx>,
+ variables2: impl Fn(BoundVar) -> GenericArg<'tcx>,
+ ) -> InferResult<'tcx, ()> {
+ self.commit_if_ok(|_| {
+ let mut obligations = vec![];
+ for (index, value1) in variables1.var_values.iter().enumerate() {
+ let value2 = variables2(BoundVar::new(index));
+
+ match (value1.unpack(), value2.unpack()) {
+ (GenericArgKind::Type(v1), GenericArgKind::Type(v2)) => {
+ obligations
+ .extend(self.at(cause, param_env).eq(v1, v2)?.into_obligations());
+ }
+ (GenericArgKind::Lifetime(re1), GenericArgKind::Lifetime(re2))
+ if re1.is_erased() && re2.is_erased() =>
+ {
+ // no action needed
+ }
+ (GenericArgKind::Lifetime(v1), GenericArgKind::Lifetime(v2)) => {
+ obligations
+ .extend(self.at(cause, param_env).eq(v1, v2)?.into_obligations());
+ }
+ (GenericArgKind::Const(v1), GenericArgKind::Const(v2)) => {
+ let ok = self.at(cause, param_env).eq(v1, v2)?;
+ obligations.extend(ok.into_obligations());
+ }
+ _ => {
+ bug!("kind mismatch, cannot unify {:?} and {:?}", value1, value2,);
+ }
+ }
+ }
+ Ok(InferOk { value: (), obligations })
+ })
+ }
+}
+
+/// Given the region obligations and constraints scraped from the infcx,
+/// creates query region constraints.
+pub fn make_query_region_constraints<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ outlives_obligations: impl Iterator<Item = (Ty<'tcx>, ty::Region<'tcx>)>,
+ region_constraints: &RegionConstraintData<'tcx>,
+) -> QueryRegionConstraints<'tcx> {
+ let RegionConstraintData { constraints, verifys, givens, member_constraints } =
+ region_constraints;
+
+ assert!(verifys.is_empty());
+ assert!(givens.is_empty());
+
+ let outlives: Vec<_> = constraints
+ .iter()
+ .map(|(k, _)| match *k {
+ // Swap regions because we are going from sub (<=) to outlives
+ // (>=).
+ Constraint::VarSubVar(v1, v2) => ty::OutlivesPredicate(
+ tcx.mk_region(ty::ReVar(v2)).into(),
+ tcx.mk_region(ty::ReVar(v1)),
+ ),
+ Constraint::VarSubReg(v1, r2) => {
+ ty::OutlivesPredicate(r2.into(), tcx.mk_region(ty::ReVar(v1)))
+ }
+ Constraint::RegSubVar(r1, v2) => {
+ ty::OutlivesPredicate(tcx.mk_region(ty::ReVar(v2)).into(), r1)
+ }
+ Constraint::RegSubReg(r1, r2) => ty::OutlivesPredicate(r2.into(), r1),
+ })
+ .map(ty::Binder::dummy) // no bound vars in the code above
+ .chain(
+ outlives_obligations
+ .map(|(ty, r)| ty::OutlivesPredicate(ty.into(), r))
+ .map(ty::Binder::dummy), // no bound vars in the code above
+ )
+ .collect();
+
+ QueryRegionConstraints { outlives, member_constraints: member_constraints.clone() }
+}
+
+struct QueryTypeRelatingDelegate<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ obligations: &'a mut Vec<PredicateObligation<'tcx>>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: &'a ObligationCause<'tcx>,
+}
+
+impl<'tcx> TypeRelatingDelegate<'tcx> for QueryTypeRelatingDelegate<'_, 'tcx> {
+ fn span(&self) -> Span {
+ self.cause.span
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+
+ fn create_next_universe(&mut self) -> ty::UniverseIndex {
+ self.infcx.create_next_universe()
+ }
+
+ fn next_existential_region_var(&mut self, from_forall: bool) -> ty::Region<'tcx> {
+ let origin = NllRegionVariableOrigin::Existential { from_forall };
+ self.infcx.next_nll_region_var(origin)
+ }
+
+ fn next_placeholder_region(&mut self, placeholder: ty::PlaceholderRegion) -> ty::Region<'tcx> {
+ self.infcx.tcx.mk_region(ty::RePlaceholder(placeholder))
+ }
+
+ fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx> {
+ self.infcx.next_nll_region_var_in_universe(
+ NllRegionVariableOrigin::Existential { from_forall: false },
+ universe,
+ )
+ }
+
+ fn push_outlives(
+ &mut self,
+ sup: ty::Region<'tcx>,
+ sub: ty::Region<'tcx>,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ ) {
+ self.obligations.push(Obligation {
+ cause: self.cause.clone(),
+ param_env: self.param_env,
+ predicate: ty::Binder::dummy(ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(
+ sup, sub,
+ )))
+ .to_predicate(self.infcx.tcx),
+ recursion_depth: 0,
+ });
+ }
+
+ fn const_equate(&mut self, _a: Const<'tcx>, _b: Const<'tcx>) {
+ span_bug!(self.cause.span(), "generic_const_exprs: unreachable `const_equate`");
+ }
+
+ fn normalization() -> NormalizationStrategy {
+ NormalizationStrategy::Eager
+ }
+
+ fn forbid_inference_vars() -> bool {
+ true
+ }
+
+ fn register_opaque_type(
+ &mut self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ a_is_expected: bool,
+ ) -> Result<(), TypeError<'tcx>> {
+ self.obligations.extend(
+ self.infcx
+ .handle_opaque_type(a, b, a_is_expected, &self.cause, self.param_env)?
+ .obligations,
+ );
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/canonical/substitute.rs b/compiler/rustc_infer/src/infer/canonical/substitute.rs
new file mode 100644
index 000000000..34b611342
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/canonical/substitute.rs
@@ -0,0 +1,91 @@
+//! This module contains code to substitute new values into a
+//! `Canonical<'tcx, T>`.
+//!
+//! For an overview of what canonicalization is and how it fits into
+//! rustc, check out the [chapter in the rustc dev guide][c].
+//!
+//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
+
+use crate::infer::canonical::{Canonical, CanonicalVarValues};
+use rustc_middle::ty::fold::{FnMutDelegate, TypeFoldable};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{self, TyCtxt};
+
+pub(super) trait CanonicalExt<'tcx, V> {
+ /// Instantiate the wrapped value, replacing each canonical value
+ /// with the value given in `var_values`.
+ fn substitute(&self, tcx: TyCtxt<'tcx>, var_values: &CanonicalVarValues<'tcx>) -> V
+ where
+ V: TypeFoldable<'tcx>;
+
+ /// Allows one to apply a substitute to some subset of
+ /// `self.value`. Invoke `projection_fn` with `self.value` to get
+ /// a value V that is expressed in terms of the same canonical
+ /// variables bound in `self` (usually this extracts from subset
+ /// of `self`). Apply the substitution `var_values` to this value
+ /// V, replacing each of the canonical variables.
+ fn substitute_projected<T>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ var_values: &CanonicalVarValues<'tcx>,
+ projection_fn: impl FnOnce(&V) -> T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>;
+}
+
+impl<'tcx, V> CanonicalExt<'tcx, V> for Canonical<'tcx, V> {
+ fn substitute(&self, tcx: TyCtxt<'tcx>, var_values: &CanonicalVarValues<'tcx>) -> V
+ where
+ V: TypeFoldable<'tcx>,
+ {
+ self.substitute_projected(tcx, var_values, |value| value.clone())
+ }
+
+ fn substitute_projected<T>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ var_values: &CanonicalVarValues<'tcx>,
+ projection_fn: impl FnOnce(&V) -> T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ assert_eq!(self.variables.len(), var_values.len());
+ let value = projection_fn(&self.value);
+ substitute_value(tcx, var_values, value)
+ }
+}
+
+/// Substitute the values from `var_values` into `value`. `var_values`
+/// must be values for the set of canonical variables that appear in
+/// `value`.
+pub(super) fn substitute_value<'tcx, T>(
+ tcx: TyCtxt<'tcx>,
+ var_values: &CanonicalVarValues<'tcx>,
+ value: T,
+) -> T
+where
+ T: TypeFoldable<'tcx>,
+{
+ if var_values.var_values.is_empty() {
+ value
+ } else {
+ let delegate = FnMutDelegate {
+ regions: |br: ty::BoundRegion| match var_values.var_values[br.var].unpack() {
+ GenericArgKind::Lifetime(l) => l,
+ r => bug!("{:?} is a region but value is {:?}", br, r),
+ },
+ types: |bound_ty: ty::BoundTy| match var_values.var_values[bound_ty.var].unpack() {
+ GenericArgKind::Type(ty) => ty,
+ r => bug!("{:?} is a type but value is {:?}", bound_ty, r),
+ },
+ consts: |bound_ct: ty::BoundVar, _| match var_values.var_values[bound_ct].unpack() {
+ GenericArgKind::Const(ct) => ct,
+ c => bug!("{:?} is a const but value is {:?}", bound_ct, c),
+ },
+ };
+
+ tcx.replace_escaping_bound_vars_uncached(value, delegate)
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/combine.rs b/compiler/rustc_infer/src/infer/combine.rs
new file mode 100644
index 000000000..8bf1de34a
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/combine.rs
@@ -0,0 +1,1001 @@
+//! There are four type combiners: [Equate], [Sub], [Lub], and [Glb].
+//! Each implements the trait [TypeRelation] and contains methods for
+//! combining two instances of various things and yielding a new instance.
+//! These combiner methods always yield a `Result<T>`. To relate two
+//! types, you can use `infcx.at(cause, param_env)` which then allows
+//! you to use the relevant methods of [At](super::at::At).
+//!
+//! Combiners mostly do their specific behavior and then hand off the
+//! bulk of the work to [InferCtxt::super_combine_tys] and
+//! [InferCtxt::super_combine_consts].
+//!
+//! Combining two types may have side-effects on the inference contexts
+//! which can be undone by using snapshots. You probably want to use
+//! either [InferCtxt::commit_if_ok] or [InferCtxt::probe].
+//!
+//! On success, the LUB/GLB operations return the appropriate bound. The
+//! return value of `Equate` or `Sub` shouldn't really be used.
+//!
+//! ## Contravariance
+//!
+//! We explicitly track which argument is expected using
+//! [TypeRelation::a_is_expected], so when dealing with contravariance
+//! this should be correctly updated.
+
+use super::equate::Equate;
+use super::glb::Glb;
+use super::lub::Lub;
+use super::sub::Sub;
+use super::type_variable::TypeVariableValue;
+use super::{InferCtxt, MiscVariable, TypeTrace};
+use crate::traits::{Obligation, PredicateObligations};
+use rustc_data_structures::sso::SsoHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue};
+use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
+use rustc_middle::traits::ObligationCause;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::relate::{self, Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, InferConst, ToPredicate, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::ty::{IntType, UintType};
+use rustc_span::{Span, DUMMY_SP};
+
+#[derive(Clone)]
+pub struct CombineFields<'infcx, 'tcx> {
+ pub infcx: &'infcx InferCtxt<'infcx, 'tcx>,
+ pub trace: TypeTrace<'tcx>,
+ pub cause: Option<ty::relate::Cause>,
+ pub param_env: ty::ParamEnv<'tcx>,
+ pub obligations: PredicateObligations<'tcx>,
+ /// Whether we should define opaque types
+ /// or just treat them opaquely.
+ /// Currently only used to prevent predicate
+ /// matching from matching anything against opaque
+ /// types.
+ pub define_opaque_types: bool,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum RelationDir {
+ SubtypeOf,
+ SupertypeOf,
+ EqTo,
+}
+
+impl<'infcx, 'tcx> InferCtxt<'infcx, 'tcx> {
+ pub fn super_combine_tys<R>(
+ &self,
+ relation: &mut R,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ) -> RelateResult<'tcx, Ty<'tcx>>
+ where
+ R: TypeRelation<'tcx>,
+ {
+ let a_is_expected = relation.a_is_expected();
+
+ match (a.kind(), b.kind()) {
+ // Relate integral variables to other types
+ (&ty::Infer(ty::IntVar(a_id)), &ty::Infer(ty::IntVar(b_id))) => {
+ self.inner
+ .borrow_mut()
+ .int_unification_table()
+ .unify_var_var(a_id, b_id)
+ .map_err(|e| int_unification_error(a_is_expected, e))?;
+ Ok(a)
+ }
+ (&ty::Infer(ty::IntVar(v_id)), &ty::Int(v)) => {
+ self.unify_integral_variable(a_is_expected, v_id, IntType(v))
+ }
+ (&ty::Int(v), &ty::Infer(ty::IntVar(v_id))) => {
+ self.unify_integral_variable(!a_is_expected, v_id, IntType(v))
+ }
+ (&ty::Infer(ty::IntVar(v_id)), &ty::Uint(v)) => {
+ self.unify_integral_variable(a_is_expected, v_id, UintType(v))
+ }
+ (&ty::Uint(v), &ty::Infer(ty::IntVar(v_id))) => {
+ self.unify_integral_variable(!a_is_expected, v_id, UintType(v))
+ }
+
+ // Relate floating-point variables to other types
+ (&ty::Infer(ty::FloatVar(a_id)), &ty::Infer(ty::FloatVar(b_id))) => {
+ self.inner
+ .borrow_mut()
+ .float_unification_table()
+ .unify_var_var(a_id, b_id)
+ .map_err(|e| float_unification_error(relation.a_is_expected(), e))?;
+ Ok(a)
+ }
+ (&ty::Infer(ty::FloatVar(v_id)), &ty::Float(v)) => {
+ self.unify_float_variable(a_is_expected, v_id, v)
+ }
+ (&ty::Float(v), &ty::Infer(ty::FloatVar(v_id))) => {
+ self.unify_float_variable(!a_is_expected, v_id, v)
+ }
+
+ // All other cases of inference are errors
+ (&ty::Infer(_), _) | (_, &ty::Infer(_)) => {
+ Err(TypeError::Sorts(ty::relate::expected_found(relation, a, b)))
+ }
+
+ _ => ty::relate::super_relate_tys(relation, a, b),
+ }
+ }
+
+ pub fn super_combine_consts<R>(
+ &self,
+ relation: &mut R,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>>
+ where
+ R: ConstEquateRelation<'tcx>,
+ {
+ debug!("{}.consts({:?}, {:?})", relation.tag(), a, b);
+ if a == b {
+ return Ok(a);
+ }
+
+ let a = self.shallow_resolve(a);
+ let b = self.shallow_resolve(b);
+
+ let a_is_expected = relation.a_is_expected();
+
+ match (a.kind(), b.kind()) {
+ (
+ ty::ConstKind::Infer(InferConst::Var(a_vid)),
+ ty::ConstKind::Infer(InferConst::Var(b_vid)),
+ ) => {
+ self.inner
+ .borrow_mut()
+ .const_unification_table()
+ .unify_var_var(a_vid, b_vid)
+ .map_err(|e| const_unification_error(a_is_expected, e))?;
+ return Ok(a);
+ }
+
+ // All other cases of inference with other variables are errors.
+ (ty::ConstKind::Infer(InferConst::Var(_)), ty::ConstKind::Infer(_))
+ | (ty::ConstKind::Infer(_), ty::ConstKind::Infer(InferConst::Var(_))) => {
+ bug!("tried to combine ConstKind::Infer/ConstKind::Infer(InferConst::Var)")
+ }
+
+ (ty::ConstKind::Infer(InferConst::Var(vid)), _) => {
+ return self.unify_const_variable(relation.param_env(), vid, b, a_is_expected);
+ }
+
+ (_, ty::ConstKind::Infer(InferConst::Var(vid))) => {
+ return self.unify_const_variable(relation.param_env(), vid, a, !a_is_expected);
+ }
+ (ty::ConstKind::Unevaluated(..), _) if self.tcx.lazy_normalization() => {
+ // FIXME(#59490): Need to remove the leak check to accommodate
+ // escaping bound variables here.
+ if !a.has_escaping_bound_vars() && !b.has_escaping_bound_vars() {
+ relation.const_equate_obligation(a, b);
+ }
+ return Ok(b);
+ }
+ (_, ty::ConstKind::Unevaluated(..)) if self.tcx.lazy_normalization() => {
+ // FIXME(#59490): Need to remove the leak check to accommodate
+ // escaping bound variables here.
+ if !a.has_escaping_bound_vars() && !b.has_escaping_bound_vars() {
+ relation.const_equate_obligation(a, b);
+ }
+ return Ok(a);
+ }
+ _ => {}
+ }
+
+ ty::relate::super_relate_consts(relation, a, b)
+ }
+
+ /// Unifies the const variable `target_vid` with the given constant.
+ ///
+ /// This also tests if the given const `ct` contains an inference variable which was previously
+ /// unioned with `target_vid`. If this is the case, inferring `target_vid` to `ct`
+ /// would result in an infinite type as we continuously replace an inference variable
+ /// in `ct` with `ct` itself.
+ ///
+ /// This is especially important as unevaluated consts use their parents generics.
+ /// They therefore often contain unused substs, making these errors far more likely.
+ ///
+ /// A good example of this is the following:
+ ///
+ /// ```compile_fail,E0308
+ /// #![feature(generic_const_exprs)]
+ ///
+ /// fn bind<const N: usize>(value: [u8; N]) -> [u8; 3 + 4] {
+ /// todo!()
+ /// }
+ ///
+ /// fn main() {
+ /// let mut arr = Default::default();
+ /// arr = bind(arr);
+ /// }
+ /// ```
+ ///
+ /// Here `3 + 4` ends up as `ConstKind::Unevaluated` which uses the generics
+ /// of `fn bind` (meaning that its substs contain `N`).
+ ///
+ /// `bind(arr)` now infers that the type of `arr` must be `[u8; N]`.
+ /// The assignment `arr = bind(arr)` now tries to equate `N` with `3 + 4`.
+ ///
+ /// As `3 + 4` contains `N` in its substs, this must not succeed.
+ ///
+ /// See `src/test/ui/const-generics/occurs-check/` for more examples where this is relevant.
+ #[instrument(level = "debug", skip(self))]
+ fn unify_const_variable(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ target_vid: ty::ConstVid<'tcx>,
+ ct: ty::Const<'tcx>,
+ vid_is_expected: bool,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ let (for_universe, span) = {
+ let mut inner = self.inner.borrow_mut();
+ let variable_table = &mut inner.const_unification_table();
+ let var_value = variable_table.probe_value(target_vid);
+ match var_value.val {
+ ConstVariableValue::Known { value } => {
+ bug!("instantiating {:?} which has a known value {:?}", target_vid, value)
+ }
+ ConstVariableValue::Unknown { universe } => (universe, var_value.origin.span),
+ }
+ };
+ let value = ConstInferUnifier { infcx: self, span, param_env, for_universe, target_vid }
+ .relate(ct, ct)?;
+
+ self.inner
+ .borrow_mut()
+ .const_unification_table()
+ .unify_var_value(
+ target_vid,
+ ConstVarValue {
+ origin: ConstVariableOrigin {
+ kind: ConstVariableOriginKind::ConstInference,
+ span: DUMMY_SP,
+ },
+ val: ConstVariableValue::Known { value },
+ },
+ )
+ .map(|()| value)
+ .map_err(|e| const_unification_error(vid_is_expected, e))
+ }
+
+ fn unify_integral_variable(
+ &self,
+ vid_is_expected: bool,
+ vid: ty::IntVid,
+ val: ty::IntVarValue,
+ ) -> RelateResult<'tcx, Ty<'tcx>> {
+ self.inner
+ .borrow_mut()
+ .int_unification_table()
+ .unify_var_value(vid, Some(val))
+ .map_err(|e| int_unification_error(vid_is_expected, e))?;
+ match val {
+ IntType(v) => Ok(self.tcx.mk_mach_int(v)),
+ UintType(v) => Ok(self.tcx.mk_mach_uint(v)),
+ }
+ }
+
+ fn unify_float_variable(
+ &self,
+ vid_is_expected: bool,
+ vid: ty::FloatVid,
+ val: ty::FloatTy,
+ ) -> RelateResult<'tcx, Ty<'tcx>> {
+ self.inner
+ .borrow_mut()
+ .float_unification_table()
+ .unify_var_value(vid, Some(ty::FloatVarValue(val)))
+ .map_err(|e| float_unification_error(vid_is_expected, e))?;
+ Ok(self.tcx.mk_mach_float(val))
+ }
+}
+
+impl<'infcx, 'tcx> CombineFields<'infcx, 'tcx> {
+ pub fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ pub fn equate<'a>(&'a mut self, a_is_expected: bool) -> Equate<'a, 'infcx, 'tcx> {
+ Equate::new(self, a_is_expected)
+ }
+
+ pub fn sub<'a>(&'a mut self, a_is_expected: bool) -> Sub<'a, 'infcx, 'tcx> {
+ Sub::new(self, a_is_expected)
+ }
+
+ pub fn lub<'a>(&'a mut self, a_is_expected: bool) -> Lub<'a, 'infcx, 'tcx> {
+ Lub::new(self, a_is_expected)
+ }
+
+ pub fn glb<'a>(&'a mut self, a_is_expected: bool) -> Glb<'a, 'infcx, 'tcx> {
+ Glb::new(self, a_is_expected)
+ }
+
+ /// Here, `dir` is either `EqTo`, `SubtypeOf`, or `SupertypeOf`.
+ /// The idea is that we should ensure that the type `a_ty` is equal
+ /// to, a subtype of, or a supertype of (respectively) the type
+ /// to which `b_vid` is bound.
+ ///
+ /// Since `b_vid` has not yet been instantiated with a type, we
+ /// will first instantiate `b_vid` with a *generalized* version
+ /// of `a_ty`. Generalization introduces other inference
+ /// variables wherever subtyping could occur.
+ #[instrument(skip(self), level = "debug")]
+ pub fn instantiate(
+ &mut self,
+ a_ty: Ty<'tcx>,
+ dir: RelationDir,
+ b_vid: ty::TyVid,
+ a_is_expected: bool,
+ ) -> RelateResult<'tcx, ()> {
+ use self::RelationDir::*;
+
+ // Get the actual variable that b_vid has been inferred to
+ debug_assert!(self.infcx.inner.borrow_mut().type_variables().probe(b_vid).is_unknown());
+
+ // Generalize type of `a_ty` appropriately depending on the
+ // direction. As an example, assume:
+ //
+ // - `a_ty == &'x ?1`, where `'x` is some free region and `?1` is an
+ // inference variable,
+ // - and `dir` == `SubtypeOf`.
+ //
+ // Then the generalized form `b_ty` would be `&'?2 ?3`, where
+ // `'?2` and `?3` are fresh region/type inference
+ // variables. (Down below, we will relate `a_ty <: b_ty`,
+ // adding constraints like `'x: '?2` and `?1 <: ?3`.)
+ let Generalization { ty: b_ty, needs_wf } = self.generalize(a_ty, b_vid, dir)?;
+ debug!(?b_ty);
+ self.infcx.inner.borrow_mut().type_variables().instantiate(b_vid, b_ty);
+
+ if needs_wf {
+ self.obligations.push(Obligation::new(
+ self.trace.cause.clone(),
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(b_ty.into()))
+ .to_predicate(self.infcx.tcx),
+ ));
+ }
+
+ // Finally, relate `b_ty` to `a_ty`, as described in previous comment.
+ //
+ // FIXME(#16847): This code is non-ideal because all these subtype
+ // relations wind up attributed to the same spans. We need
+ // to associate causes/spans with each of the relations in
+ // the stack to get this right.
+ match dir {
+ EqTo => self.equate(a_is_expected).relate(a_ty, b_ty),
+ SubtypeOf => self.sub(a_is_expected).relate(a_ty, b_ty),
+ SupertypeOf => self.sub(a_is_expected).relate_with_variance(
+ ty::Contravariant,
+ ty::VarianceDiagInfo::default(),
+ a_ty,
+ b_ty,
+ ),
+ }?;
+
+ Ok(())
+ }
+
+ /// Attempts to generalize `ty` for the type variable `for_vid`.
+ /// This checks for cycle -- that is, whether the type `ty`
+ /// references `for_vid`. The `dir` is the "direction" for which we
+ /// a performing the generalization (i.e., are we producing a type
+ /// that can be used as a supertype etc).
+ ///
+ /// Preconditions:
+ ///
+ /// - `for_vid` is a "root vid"
+ #[instrument(skip(self), level = "trace")]
+ fn generalize(
+ &self,
+ ty: Ty<'tcx>,
+ for_vid: ty::TyVid,
+ dir: RelationDir,
+ ) -> RelateResult<'tcx, Generalization<'tcx>> {
+ // Determine the ambient variance within which `ty` appears.
+ // The surrounding equation is:
+ //
+ // ty [op] ty2
+ //
+ // where `op` is either `==`, `<:`, or `:>`. This maps quite
+ // naturally.
+ let ambient_variance = match dir {
+ RelationDir::EqTo => ty::Invariant,
+ RelationDir::SubtypeOf => ty::Covariant,
+ RelationDir::SupertypeOf => ty::Contravariant,
+ };
+
+ trace!(?ambient_variance);
+
+ let for_universe = match self.infcx.inner.borrow_mut().type_variables().probe(for_vid) {
+ v @ TypeVariableValue::Known { .. } => {
+ bug!("instantiating {:?} which has a known value {:?}", for_vid, v,)
+ }
+ TypeVariableValue::Unknown { universe } => universe,
+ };
+
+ trace!(?for_universe);
+ trace!(?self.trace);
+
+ let mut generalize = Generalizer {
+ infcx: self.infcx,
+ cause: &self.trace.cause,
+ for_vid_sub_root: self.infcx.inner.borrow_mut().type_variables().sub_root_var(for_vid),
+ for_universe,
+ ambient_variance,
+ needs_wf: false,
+ root_ty: ty,
+ param_env: self.param_env,
+ cache: SsoHashMap::new(),
+ };
+
+ let ty = match generalize.relate(ty, ty) {
+ Ok(ty) => ty,
+ Err(e) => {
+ debug!(?e, "failure");
+ return Err(e);
+ }
+ };
+ let needs_wf = generalize.needs_wf;
+ trace!(?ty, ?needs_wf, "success");
+ Ok(Generalization { ty, needs_wf })
+ }
+
+ pub fn add_const_equate_obligation(
+ &mut self,
+ a_is_expected: bool,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) {
+ let predicate = if a_is_expected {
+ ty::PredicateKind::ConstEquate(a, b)
+ } else {
+ ty::PredicateKind::ConstEquate(b, a)
+ };
+ self.obligations.push(Obligation::new(
+ self.trace.cause.clone(),
+ self.param_env,
+ ty::Binder::dummy(predicate).to_predicate(self.tcx()),
+ ));
+ }
+}
+
+struct Generalizer<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+
+ /// The span, used when creating new type variables and things.
+ cause: &'cx ObligationCause<'tcx>,
+
+ /// The vid of the type variable that is in the process of being
+ /// instantiated; if we find this within the type we are folding,
+ /// that means we would have created a cyclic type.
+ for_vid_sub_root: ty::TyVid,
+
+ /// The universe of the type variable that is in the process of
+ /// being instantiated. Any fresh variables that we create in this
+ /// process should be in that same universe.
+ for_universe: ty::UniverseIndex,
+
+ /// Track the variance as we descend into the type.
+ ambient_variance: ty::Variance,
+
+ /// See the field `needs_wf` in `Generalization`.
+ needs_wf: bool,
+
+ /// The root type that we are generalizing. Used when reporting cycles.
+ root_ty: Ty<'tcx>,
+
+ param_env: ty::ParamEnv<'tcx>,
+
+ cache: SsoHashMap<Ty<'tcx>, RelateResult<'tcx, Ty<'tcx>>>,
+}
+
+/// Result from a generalization operation. This includes
+/// not only the generalized type, but also a bool flag
+/// indicating whether further WF checks are needed.
+struct Generalization<'tcx> {
+ ty: Ty<'tcx>,
+
+ /// If true, then the generalized type may not be well-formed,
+ /// even if the source type is well-formed, so we should add an
+ /// additional check to enforce that it is. This arises in
+ /// particular around 'bivariant' type parameters that are only
+ /// constrained by a where-clause. As an example, imagine a type:
+ ///
+ /// struct Foo<A, B> where A: Iterator<Item = B> {
+ /// data: A
+ /// }
+ ///
+ /// here, `A` will be covariant, but `B` is
+ /// unconstrained. However, whatever it is, for `Foo` to be WF, it
+ /// must be equal to `A::Item`. If we have an input `Foo<?A, ?B>`,
+ /// then after generalization we will wind up with a type like
+ /// `Foo<?C, ?D>`. When we enforce that `Foo<?A, ?B> <: Foo<?C,
+ /// ?D>` (or `>:`), we will wind up with the requirement that `?A
+ /// <: ?C`, but no particular relationship between `?B` and `?D`
+ /// (after all, we do not know the variance of the normalized form
+ /// of `A::Item` with respect to `A`). If we do nothing else, this
+ /// may mean that `?D` goes unconstrained (as in #41677). So, in
+ /// this scenario where we create a new type variable in a
+ /// bivariant context, we set the `needs_wf` flag to true. This
+ /// will force the calling code to check that `WF(Foo<?C, ?D>)`
+ /// holds, which in turn implies that `?C::Item == ?D`. So once
+ /// `?C` is constrained, that should suffice to restrict `?D`.
+ needs_wf: bool,
+}
+
+impl<'tcx> TypeRelation<'tcx> for Generalizer<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+
+ fn tag(&self) -> &'static str {
+ "Generalizer"
+ }
+
+ fn a_is_expected(&self) -> bool {
+ true
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
+ }
+
+ fn relate_item_substs(
+ &mut self,
+ item_def_id: DefId,
+ a_subst: SubstsRef<'tcx>,
+ b_subst: SubstsRef<'tcx>,
+ ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ if self.ambient_variance == ty::Variance::Invariant {
+ // Avoid fetching the variance if we are in an invariant
+ // context; no need, and it can induce dependency cycles
+ // (e.g., #41849).
+ relate::relate_substs(self, a_subst, b_subst)
+ } else {
+ let tcx = self.tcx();
+ let opt_variances = tcx.variances_of(item_def_id);
+ relate::relate_substs_with_variances(
+ self,
+ item_def_id,
+ &opt_variances,
+ a_subst,
+ b_subst,
+ )
+ }
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ variance: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ let old_ambient_variance = self.ambient_variance;
+ self.ambient_variance = self.ambient_variance.xform(variance);
+
+ let result = self.relate(a, b);
+ self.ambient_variance = old_ambient_variance;
+ result
+ }
+
+ fn tys(&mut self, t: Ty<'tcx>, t2: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ assert_eq!(t, t2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
+
+ if let Some(result) = self.cache.get(&t) {
+ return result.clone();
+ }
+ debug!("generalize: t={:?}", t);
+
+ // Check to see whether the type we are generalizing references
+ // any other type variable related to `vid` via
+ // subtyping. This is basically our "occurs check", preventing
+ // us from creating infinitely sized types.
+ let result = match *t.kind() {
+ ty::Infer(ty::TyVar(vid)) => {
+ let vid = self.infcx.inner.borrow_mut().type_variables().root_var(vid);
+ let sub_vid = self.infcx.inner.borrow_mut().type_variables().sub_root_var(vid);
+ if sub_vid == self.for_vid_sub_root {
+ // If sub-roots are equal, then `for_vid` and
+ // `vid` are related via subtyping.
+ Err(TypeError::CyclicTy(self.root_ty))
+ } else {
+ let probe = self.infcx.inner.borrow_mut().type_variables().probe(vid);
+ match probe {
+ TypeVariableValue::Known { value: u } => {
+ debug!("generalize: known value {:?}", u);
+ self.relate(u, u)
+ }
+ TypeVariableValue::Unknown { universe } => {
+ match self.ambient_variance {
+ // Invariant: no need to make a fresh type variable.
+ ty::Invariant => {
+ if self.for_universe.can_name(universe) {
+ return Ok(t);
+ }
+ }
+
+ // Bivariant: make a fresh var, but we
+ // may need a WF predicate. See
+ // comment on `needs_wf` field for
+ // more info.
+ ty::Bivariant => self.needs_wf = true,
+
+ // Co/contravariant: this will be
+ // sufficiently constrained later on.
+ ty::Covariant | ty::Contravariant => (),
+ }
+
+ let origin =
+ *self.infcx.inner.borrow_mut().type_variables().var_origin(vid);
+ let new_var_id = self
+ .infcx
+ .inner
+ .borrow_mut()
+ .type_variables()
+ .new_var(self.for_universe, origin);
+ let u = self.tcx().mk_ty_var(new_var_id);
+
+ // Record that we replaced `vid` with `new_var_id` as part of a generalization
+ // operation. This is needed to detect cyclic types. To see why, see the
+ // docs in the `type_variables` module.
+ self.infcx.inner.borrow_mut().type_variables().sub(vid, new_var_id);
+ debug!("generalize: replacing original vid={:?} with new={:?}", vid, u);
+ Ok(u)
+ }
+ }
+ }
+ }
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) => {
+ // No matter what mode we are in,
+ // integer/floating-point types must be equal to be
+ // relatable.
+ Ok(t)
+ }
+ _ => relate::super_relate_tys(self, t, t),
+ };
+
+ self.cache.insert(t, result.clone());
+ return result;
+ }
+
+ fn regions(
+ &mut self,
+ r: ty::Region<'tcx>,
+ r2: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ assert_eq!(r, r2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
+
+ debug!("generalize: regions r={:?}", r);
+
+ match *r {
+ // Never make variables for regions bound within the type itself,
+ // nor for erased regions.
+ ty::ReLateBound(..) | ty::ReErased => {
+ return Ok(r);
+ }
+
+ ty::RePlaceholder(..)
+ | ty::ReVar(..)
+ | ty::ReEmpty(_)
+ | ty::ReStatic
+ | ty::ReEarlyBound(..)
+ | ty::ReFree(..) => {
+ // see common code below
+ }
+ }
+
+ // If we are in an invariant context, we can re-use the region
+ // as is, unless it happens to be in some universe that we
+ // can't name. (In the case of a region *variable*, we could
+ // use it if we promoted it into our universe, but we don't
+ // bother.)
+ if let ty::Invariant = self.ambient_variance {
+ let r_universe = self.infcx.universe_of_region(r);
+ if self.for_universe.can_name(r_universe) {
+ return Ok(r);
+ }
+ }
+
+ // FIXME: This is non-ideal because we don't give a
+ // very descriptive origin for this region variable.
+ Ok(self.infcx.next_region_var_in_universe(MiscVariable(self.cause.span), self.for_universe))
+ }
+
+ fn consts(
+ &mut self,
+ c: ty::Const<'tcx>,
+ c2: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ assert_eq!(c, c2); // we are abusing TypeRelation here; both LHS and RHS ought to be ==
+
+ match c.kind() {
+ ty::ConstKind::Infer(InferConst::Var(vid)) => {
+ let mut inner = self.infcx.inner.borrow_mut();
+ let variable_table = &mut inner.const_unification_table();
+ let var_value = variable_table.probe_value(vid);
+ match var_value.val {
+ ConstVariableValue::Known { value: u } => {
+ drop(inner);
+ self.relate(u, u)
+ }
+ ConstVariableValue::Unknown { universe } => {
+ if self.for_universe.can_name(universe) {
+ Ok(c)
+ } else {
+ let new_var_id = variable_table.new_key(ConstVarValue {
+ origin: var_value.origin,
+ val: ConstVariableValue::Unknown { universe: self.for_universe },
+ });
+ Ok(self.tcx().mk_const_var(new_var_id, c.ty()))
+ }
+ }
+ }
+ }
+ ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
+ if self.tcx().lazy_normalization() =>
+ {
+ assert_eq!(promoted, None);
+ let substs = self.relate_with_variance(
+ ty::Variance::Invariant,
+ ty::VarianceDiagInfo::default(),
+ substs,
+ substs,
+ )?;
+ Ok(self.tcx().mk_const(ty::ConstS {
+ ty: c.ty(),
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted }),
+ }))
+ }
+ _ => relate::super_relate_consts(self, c, c),
+ }
+ }
+}
+
+pub trait ConstEquateRelation<'tcx>: TypeRelation<'tcx> {
+ /// Register an obligation that both constants must be equal to each other.
+ ///
+ /// If they aren't equal then the relation doesn't hold.
+ fn const_equate_obligation(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>);
+}
+
+pub fn const_unification_error<'tcx>(
+ a_is_expected: bool,
+ (a, b): (ty::Const<'tcx>, ty::Const<'tcx>),
+) -> TypeError<'tcx> {
+ TypeError::ConstMismatch(ExpectedFound::new(a_is_expected, a, b))
+}
+
+fn int_unification_error<'tcx>(
+ a_is_expected: bool,
+ v: (ty::IntVarValue, ty::IntVarValue),
+) -> TypeError<'tcx> {
+ let (a, b) = v;
+ TypeError::IntMismatch(ExpectedFound::new(a_is_expected, a, b))
+}
+
+fn float_unification_error<'tcx>(
+ a_is_expected: bool,
+ v: (ty::FloatVarValue, ty::FloatVarValue),
+) -> TypeError<'tcx> {
+ let (ty::FloatVarValue(a), ty::FloatVarValue(b)) = v;
+ TypeError::FloatMismatch(ExpectedFound::new(a_is_expected, a, b))
+}
+
+struct ConstInferUnifier<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+
+ span: Span,
+
+ param_env: ty::ParamEnv<'tcx>,
+
+ for_universe: ty::UniverseIndex,
+
+ /// The vid of the const variable that is in the process of being
+ /// instantiated; if we find this within the const we are folding,
+ /// that means we would have created a cyclic const.
+ target_vid: ty::ConstVid<'tcx>,
+}
+
+// We use `TypeRelation` here to propagate `RelateResult` upwards.
+//
+// Both inputs are expected to be the same.
+impl<'tcx> TypeRelation<'tcx> for ConstInferUnifier<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+
+ fn tag(&self) -> &'static str {
+ "ConstInferUnifier"
+ }
+
+ fn a_is_expected(&self) -> bool {
+ true
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ _variance: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ // We don't care about variance here.
+ self.relate(a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn tys(&mut self, t: Ty<'tcx>, _t: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ debug_assert_eq!(t, _t);
+ debug!("ConstInferUnifier: t={:?}", t);
+
+ match t.kind() {
+ &ty::Infer(ty::TyVar(vid)) => {
+ let vid = self.infcx.inner.borrow_mut().type_variables().root_var(vid);
+ let probe = self.infcx.inner.borrow_mut().type_variables().probe(vid);
+ match probe {
+ TypeVariableValue::Known { value: u } => {
+ debug!("ConstOccursChecker: known value {:?}", u);
+ self.tys(u, u)
+ }
+ TypeVariableValue::Unknown { universe } => {
+ if self.for_universe.can_name(universe) {
+ return Ok(t);
+ }
+
+ let origin =
+ *self.infcx.inner.borrow_mut().type_variables().var_origin(vid);
+ let new_var_id = self
+ .infcx
+ .inner
+ .borrow_mut()
+ .type_variables()
+ .new_var(self.for_universe, origin);
+ let u = self.tcx().mk_ty_var(new_var_id);
+ debug!(
+ "ConstInferUnifier: replacing original vid={:?} with new={:?}",
+ vid, u
+ );
+ Ok(u)
+ }
+ }
+ }
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) => Ok(t),
+ _ => relate::super_relate_tys(self, t, t),
+ }
+ }
+
+ fn regions(
+ &mut self,
+ r: ty::Region<'tcx>,
+ _r: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug_assert_eq!(r, _r);
+ debug!("ConstInferUnifier: r={:?}", r);
+
+ match *r {
+ // Never make variables for regions bound within the type itself,
+ // nor for erased regions.
+ ty::ReLateBound(..) | ty::ReErased => {
+ return Ok(r);
+ }
+
+ ty::RePlaceholder(..)
+ | ty::ReVar(..)
+ | ty::ReEmpty(_)
+ | ty::ReStatic
+ | ty::ReEarlyBound(..)
+ | ty::ReFree(..) => {
+ // see common code below
+ }
+ }
+
+ let r_universe = self.infcx.universe_of_region(r);
+ if self.for_universe.can_name(r_universe) {
+ return Ok(r);
+ } else {
+ // FIXME: This is non-ideal because we don't give a
+ // very descriptive origin for this region variable.
+ Ok(self.infcx.next_region_var_in_universe(MiscVariable(self.span), self.for_universe))
+ }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn consts(
+ &mut self,
+ c: ty::Const<'tcx>,
+ _c: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ debug_assert_eq!(c, _c);
+ debug!("ConstInferUnifier: c={:?}", c);
+
+ match c.kind() {
+ ty::ConstKind::Infer(InferConst::Var(vid)) => {
+ // Check if the current unification would end up
+ // unifying `target_vid` with a const which contains
+ // an inference variable which is unioned with `target_vid`.
+ //
+ // Not doing so can easily result in stack overflows.
+ if self
+ .infcx
+ .inner
+ .borrow_mut()
+ .const_unification_table()
+ .unioned(self.target_vid, vid)
+ {
+ return Err(TypeError::CyclicConst(c));
+ }
+
+ let var_value =
+ self.infcx.inner.borrow_mut().const_unification_table().probe_value(vid);
+ match var_value.val {
+ ConstVariableValue::Known { value: u } => self.consts(u, u),
+ ConstVariableValue::Unknown { universe } => {
+ if self.for_universe.can_name(universe) {
+ Ok(c)
+ } else {
+ let new_var_id =
+ self.infcx.inner.borrow_mut().const_unification_table().new_key(
+ ConstVarValue {
+ origin: var_value.origin,
+ val: ConstVariableValue::Unknown {
+ universe: self.for_universe,
+ },
+ },
+ );
+ Ok(self.tcx().mk_const_var(new_var_id, c.ty()))
+ }
+ }
+ }
+ }
+ ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted })
+ if self.tcx().lazy_normalization() =>
+ {
+ assert_eq!(promoted, None);
+ let substs = self.relate_with_variance(
+ ty::Variance::Invariant,
+ ty::VarianceDiagInfo::default(),
+ substs,
+ substs,
+ )?;
+ Ok(self.tcx().mk_const(ty::ConstS {
+ ty: c.ty(),
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted }),
+ }))
+ }
+ _ => relate::super_relate_consts(self, c, c),
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/equate.rs b/compiler/rustc_infer/src/infer/equate.rs
new file mode 100644
index 000000000..3b1798ca7
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/equate.rs
@@ -0,0 +1,169 @@
+use super::combine::{CombineFields, ConstEquateRelation, RelationDir};
+use super::Subtype;
+
+use rustc_middle::ty::relate::{self, Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::TyVar;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+use rustc_hir::def_id::DefId;
+
+/// Ensures `a` is made equal to `b`. Returns `a` on success.
+pub struct Equate<'combine, 'infcx, 'tcx> {
+ fields: &'combine mut CombineFields<'infcx, 'tcx>,
+ a_is_expected: bool,
+}
+
+impl<'combine, 'infcx, 'tcx> Equate<'combine, 'infcx, 'tcx> {
+ pub fn new(
+ fields: &'combine mut CombineFields<'infcx, 'tcx>,
+ a_is_expected: bool,
+ ) -> Equate<'combine, 'infcx, 'tcx> {
+ Equate { fields, a_is_expected }
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for Equate<'_, '_, 'tcx> {
+ fn tag(&self) -> &'static str {
+ "Equate"
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.fields.tcx()
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.fields.param_env
+ }
+
+ fn a_is_expected(&self) -> bool {
+ self.a_is_expected
+ }
+
+ fn relate_item_substs(
+ &mut self,
+ _item_def_id: DefId,
+ a_subst: SubstsRef<'tcx>,
+ b_subst: SubstsRef<'tcx>,
+ ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ // N.B., once we are equating types, we don't care about
+ // variance, so don't try to lookup the variance here. This
+ // also avoids some cycles (e.g., #41849) since looking up
+ // variance requires computing types which can require
+ // performing trait matching (which then performs equality
+ // unification).
+
+ relate::relate_substs(self, a_subst, b_subst)
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ _: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ self.relate(a, b)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ if a == b {
+ return Ok(a);
+ }
+
+ trace!(a = ?a.kind(), b = ?b.kind());
+
+ let infcx = self.fields.infcx;
+
+ let a = infcx.inner.borrow_mut().type_variables().replace_if_possible(a);
+ let b = infcx.inner.borrow_mut().type_variables().replace_if_possible(b);
+
+ match (a.kind(), b.kind()) {
+ (&ty::Infer(TyVar(a_id)), &ty::Infer(TyVar(b_id))) => {
+ infcx.inner.borrow_mut().type_variables().equate(a_id, b_id);
+ }
+
+ (&ty::Infer(TyVar(a_id)), _) => {
+ self.fields.instantiate(b, RelationDir::EqTo, a_id, self.a_is_expected)?;
+ }
+
+ (_, &ty::Infer(TyVar(b_id))) => {
+ self.fields.instantiate(a, RelationDir::EqTo, b_id, self.a_is_expected)?;
+ }
+
+ (&ty::Opaque(a_def_id, _), &ty::Opaque(b_def_id, _)) if a_def_id == b_def_id => {
+ self.fields.infcx.super_combine_tys(self, a, b)?;
+ }
+ (&ty::Opaque(did, ..), _) | (_, &ty::Opaque(did, ..))
+ if self.fields.define_opaque_types && did.is_local() =>
+ {
+ self.fields.obligations.extend(
+ infcx
+ .handle_opaque_type(
+ a,
+ b,
+ self.a_is_expected(),
+ &self.fields.trace.cause,
+ self.param_env(),
+ )?
+ .obligations,
+ );
+ }
+
+ _ => {
+ self.fields.infcx.super_combine_tys(self, a, b)?;
+ }
+ }
+
+ Ok(a)
+ }
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!("{}.regions({:?}, {:?})", self.tag(), a, b);
+ let origin = Subtype(Box::new(self.fields.trace.clone()));
+ self.fields
+ .infcx
+ .inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .make_eqregion(origin, a, b);
+ Ok(a)
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ self.fields.infcx.super_combine_consts(self, a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ if a.skip_binder().has_escaping_bound_vars() || b.skip_binder().has_escaping_bound_vars() {
+ self.fields.higher_ranked_sub(a, b, self.a_is_expected)?;
+ self.fields.higher_ranked_sub(b, a, self.a_is_expected)?;
+ } else {
+ // Fast path for the common case.
+ self.relate(a.skip_binder(), b.skip_binder())?;
+ }
+ Ok(a)
+ }
+}
+
+impl<'tcx> ConstEquateRelation<'tcx> for Equate<'_, '_, 'tcx> {
+ fn const_equate_obligation(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>) {
+ self.fields.add_const_equate_obligation(self.a_is_expected, a, b);
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
new file mode 100644
index 000000000..20864c657
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/mod.rs
@@ -0,0 +1,3121 @@
+//! Error Reporting Code for the inference engine
+//!
+//! Because of the way inference, and in particular region inference,
+//! works, it often happens that errors are not detected until far after
+//! the relevant line of code has been type-checked. Therefore, there is
+//! an elaborate system to track why a particular constraint in the
+//! inference graph arose so that we can explain to the user what gave
+//! rise to a particular error.
+//!
+//! The system is based around a set of "origin" types. An "origin" is the
+//! reason that a constraint or inference variable arose. There are
+//! different "origin" enums for different kinds of constraints/variables
+//! (e.g., `TypeOrigin`, `RegionVariableOrigin`). An origin always has
+//! a span, but also more information so that we can generate a meaningful
+//! error message.
+//!
+//! Having a catalog of all the different reasons an error can arise is
+//! also useful for other reasons, like cross-referencing FAQs etc, though
+//! we are not really taking advantage of this yet.
+//!
+//! # Region Inference
+//!
+//! Region inference is particularly tricky because it always succeeds "in
+//! the moment" and simply registers a constraint. Then, at the end, we
+//! can compute the full graph and report errors, so we need to be able to
+//! store and later report what gave rise to the conflicting constraints.
+//!
+//! # Subtype Trace
+//!
+//! Determining whether `T1 <: T2` often involves a number of subtypes and
+//! subconstraints along the way. A "TypeTrace" is an extended version
+//! of an origin that traces the types and other values that were being
+//! compared. It is not necessarily comprehensive (in fact, at the time of
+//! this writing it only tracks the root values being compared) but I'd
+//! like to extend it to include significant "waypoints". For example, if
+//! you are comparing `(T1, T2) <: (T3, T4)`, and the problem is that `T2
+//! <: T4` fails, I'd like the trace to include enough information to say
+//! "in the 2nd element of the tuple". Similarly, failures when comparing
+//! arguments or return types in fn types should be able to cite the
+//! specific position, etc.
+//!
+//! # Reality vs plan
+//!
+//! Of course, there is still a LOT of code in typeck that has yet to be
+//! ported to this system, and which relies on string concatenation at the
+//! time of error detection.
+
+use super::lexical_region_resolve::RegionResolutionError;
+use super::region_constraints::GenericKind;
+use super::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TypeTrace, ValuePairs};
+
+use crate::infer;
+use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type;
+use crate::traits::error_reporting::report_object_safety_error;
+use crate::traits::{
+ IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
+ StatementAsExpression,
+};
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, struct_span_err, Diagnostic, ErrorGuaranteed};
+use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticStyledString, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::Node;
+use rustc_middle::dep_graph::DepContext;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{
+ self, error::TypeError, Binder, List, Region, Subst, Ty, TyCtxt, TypeFoldable,
+ TypeSuperVisitable, TypeVisitable,
+};
+use rustc_span::{sym, symbol::kw, BytePos, DesugaringKind, Pos, Span};
+use rustc_target::spec::abi;
+use std::ops::ControlFlow;
+use std::{cmp, fmt, iter};
+
+mod note;
+
+mod need_type_info;
+pub use need_type_info::TypeAnnotationNeeded;
+
+pub mod nice_region_error;
+
+pub(super) fn note_and_explain_region<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ err: &mut Diagnostic,
+ prefix: &str,
+ region: ty::Region<'tcx>,
+ suffix: &str,
+ alt_span: Option<Span>,
+) {
+ let (description, span) = match *region {
+ ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReStatic => {
+ msg_span_from_free_region(tcx, region, alt_span)
+ }
+
+ ty::ReEmpty(ty::UniverseIndex::ROOT) => ("the empty lifetime".to_owned(), alt_span),
+
+ // uh oh, hope no user ever sees THIS
+ ty::ReEmpty(ui) => (format!("the empty lifetime in universe {:?}", ui), alt_span),
+
+ ty::RePlaceholder(_) => return,
+
+ // FIXME(#13998) RePlaceholder should probably print like
+ // ReFree rather than dumping Debug output on the user.
+ //
+ // We shouldn't really be having unification failures with ReVar
+ // and ReLateBound though.
+ ty::ReVar(_) | ty::ReLateBound(..) | ty::ReErased => {
+ (format!("lifetime {:?}", region), alt_span)
+ }
+ };
+
+ emit_msg_span(err, prefix, description, span, suffix);
+}
+
+fn explain_free_region<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ err: &mut Diagnostic,
+ prefix: &str,
+ region: ty::Region<'tcx>,
+ suffix: &str,
+) {
+ let (description, span) = msg_span_from_free_region(tcx, region, None);
+
+ label_msg_span(err, prefix, description, span, suffix);
+}
+
+fn msg_span_from_free_region<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ region: ty::Region<'tcx>,
+ alt_span: Option<Span>,
+) -> (String, Option<Span>) {
+ match *region {
+ ty::ReEarlyBound(_) | ty::ReFree(_) => {
+ let (msg, span) = msg_span_from_early_bound_and_free_regions(tcx, region);
+ (msg, Some(span))
+ }
+ ty::ReStatic => ("the static lifetime".to_owned(), alt_span),
+ ty::ReEmpty(ty::UniverseIndex::ROOT) => ("an empty lifetime".to_owned(), alt_span),
+ ty::ReEmpty(ui) => (format!("an empty lifetime in universe {:?}", ui), alt_span),
+ _ => bug!("{:?}", region),
+ }
+}
+
+fn msg_span_from_early_bound_and_free_regions<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ region: ty::Region<'tcx>,
+) -> (String, Span) {
+ let scope = region.free_region_binding_scope(tcx).expect_local();
+ match *region {
+ ty::ReEarlyBound(ref br) => {
+ let mut sp = tcx.def_span(scope);
+ if let Some(param) =
+ tcx.hir().get_generics(scope).and_then(|generics| generics.get_named(br.name))
+ {
+ sp = param.span;
+ }
+ let text = if br.has_name() {
+ format!("the lifetime `{}` as defined here", br.name)
+ } else {
+ format!("the anonymous lifetime as defined here")
+ };
+ (text, sp)
+ }
+ ty::ReFree(ref fr) => {
+ if !fr.bound_region.is_named()
+ && let Some((ty, _)) = find_anon_type(tcx, region, &fr.bound_region)
+ {
+ ("the anonymous lifetime defined here".to_string(), ty.span)
+ } else {
+ match fr.bound_region {
+ ty::BoundRegionKind::BrNamed(_, name) => {
+ let mut sp = tcx.def_span(scope);
+ if let Some(param) =
+ tcx.hir().get_generics(scope).and_then(|generics| generics.get_named(name))
+ {
+ sp = param.span;
+ }
+ let text = if name == kw::UnderscoreLifetime {
+ format!("the anonymous lifetime as defined here")
+ } else {
+ format!("the lifetime `{}` as defined here", name)
+ };
+ (text, sp)
+ }
+ ty::BrAnon(idx) => (
+ format!("the anonymous lifetime #{} defined here", idx + 1),
+ tcx.def_span(scope)
+ ),
+ _ => (
+ format!("the lifetime `{}` as defined here", region),
+ tcx.def_span(scope),
+ ),
+ }
+ }
+ }
+ _ => bug!(),
+ }
+}
+
+fn emit_msg_span(
+ err: &mut Diagnostic,
+ prefix: &str,
+ description: String,
+ span: Option<Span>,
+ suffix: &str,
+) {
+ let message = format!("{}{}{}", prefix, description, suffix);
+
+ if let Some(span) = span {
+ err.span_note(span, &message);
+ } else {
+ err.note(&message);
+ }
+}
+
+fn label_msg_span(
+ err: &mut Diagnostic,
+ prefix: &str,
+ description: String,
+ span: Option<Span>,
+ suffix: &str,
+) {
+ let message = format!("{}{}{}", prefix, description, suffix);
+
+ if let Some(span) = span {
+ err.span_label(span, &message);
+ } else {
+ err.note(&message);
+ }
+}
+
+pub fn unexpected_hidden_region_diagnostic<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ hidden_ty: Ty<'tcx>,
+ hidden_region: ty::Region<'tcx>,
+ opaque_ty: ty::OpaqueTypeKey<'tcx>,
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let opaque_ty = tcx.mk_opaque(opaque_ty.def_id.to_def_id(), opaque_ty.substs);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0700,
+ "hidden type for `{opaque_ty}` captures lifetime that does not appear in bounds",
+ );
+
+ // Explain the region we are capturing.
+ match *hidden_region {
+ ty::ReEmpty(ty::UniverseIndex::ROOT) => {
+ // All lifetimes shorter than the function body are `empty` in
+ // lexical region resolution. The default explanation of "an empty
+ // lifetime" isn't really accurate here.
+ let message = format!(
+ "hidden type `{}` captures lifetime smaller than the function body",
+ hidden_ty
+ );
+ err.span_note(span, &message);
+ }
+ ty::ReEarlyBound(_) | ty::ReFree(_) | ty::ReStatic | ty::ReEmpty(_) => {
+ // Assuming regionck succeeded (*), we ought to always be
+ // capturing *some* region from the fn header, and hence it
+ // ought to be free. So under normal circumstances, we will go
+ // down this path which gives a decent human readable
+ // explanation.
+ //
+ // (*) if not, the `tainted_by_errors` field would be set to
+ // `Some(ErrorGuaranteed)` in any case, so we wouldn't be here at all.
+ explain_free_region(
+ tcx,
+ &mut err,
+ &format!("hidden type `{}` captures ", hidden_ty),
+ hidden_region,
+ "",
+ );
+ if let Some(reg_info) = tcx.is_suitable_region(hidden_region) {
+ let fn_returns = tcx.return_type_impl_or_dyn_traits(reg_info.def_id);
+ nice_region_error::suggest_new_region_bound(
+ tcx,
+ &mut err,
+ fn_returns,
+ hidden_region.to_string(),
+ None,
+ format!("captures `{}`", hidden_region),
+ None,
+ )
+ }
+ }
+ _ => {
+ // Ugh. This is a painful case: the hidden region is not one
+ // that we can easily summarize or explain. This can happen
+ // in a case like
+ // `src/test/ui/multiple-lifetimes/ordinary-bounds-unsuited.rs`:
+ //
+ // ```
+ // fn upper_bounds<'a, 'b>(a: Ordinary<'a>, b: Ordinary<'b>) -> impl Trait<'a, 'b> {
+ // if condition() { a } else { b }
+ // }
+ // ```
+ //
+ // Here the captured lifetime is the intersection of `'a` and
+ // `'b`, which we can't quite express.
+
+ // We can at least report a really cryptic error for now.
+ note_and_explain_region(
+ tcx,
+ &mut err,
+ &format!("hidden type `{}` captures ", hidden_ty),
+ hidden_region,
+ "",
+ None,
+ );
+ }
+ }
+
+ err
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ pub fn report_region_errors(
+ &self,
+ generic_param_scope: LocalDefId,
+ errors: &[RegionResolutionError<'tcx>],
+ ) {
+ debug!("report_region_errors(): {} errors to start", errors.len());
+
+ // try to pre-process the errors, which will group some of them
+ // together into a `ProcessedErrors` group:
+ let errors = self.process_errors(errors);
+
+ debug!("report_region_errors: {} errors after preprocessing", errors.len());
+
+ for error in errors {
+ debug!("report_region_errors: error = {:?}", error);
+
+ if !self.try_report_nice_region_error(&error) {
+ match error.clone() {
+ // These errors could indicate all manner of different
+ // problems with many different solutions. Rather
+ // than generate a "one size fits all" error, what we
+ // attempt to do is go through a number of specific
+ // scenarios and try to find the best way to present
+ // the error. If all of these fails, we fall back to a rather
+ // general bit of code that displays the error information
+ RegionResolutionError::ConcreteFailure(origin, sub, sup) => {
+ if sub.is_placeholder() || sup.is_placeholder() {
+ self.report_placeholder_failure(origin, sub, sup).emit();
+ } else {
+ self.report_concrete_failure(origin, sub, sup).emit();
+ }
+ }
+
+ RegionResolutionError::GenericBoundFailure(origin, param_ty, sub) => {
+ self.report_generic_bound_failure(
+ generic_param_scope,
+ origin.span(),
+ Some(origin),
+ param_ty,
+ sub,
+ );
+ }
+
+ RegionResolutionError::SubSupConflict(
+ _,
+ var_origin,
+ sub_origin,
+ sub_r,
+ sup_origin,
+ sup_r,
+ _,
+ ) => {
+ if sub_r.is_placeholder() {
+ self.report_placeholder_failure(sub_origin, sub_r, sup_r).emit();
+ } else if sup_r.is_placeholder() {
+ self.report_placeholder_failure(sup_origin, sub_r, sup_r).emit();
+ } else {
+ self.report_sub_sup_conflict(
+ var_origin, sub_origin, sub_r, sup_origin, sup_r,
+ );
+ }
+ }
+
+ RegionResolutionError::UpperBoundUniverseConflict(
+ _,
+ _,
+ var_universe,
+ sup_origin,
+ sup_r,
+ ) => {
+ assert!(sup_r.is_placeholder());
+
+ // Make a dummy value for the "sub region" --
+ // this is the initial value of the
+ // placeholder. In practice, we expect more
+ // tailored errors that don't really use this
+ // value.
+ let sub_r = self.tcx.mk_region(ty::ReEmpty(var_universe));
+
+ self.report_placeholder_failure(sup_origin, sub_r, sup_r).emit();
+ }
+ }
+ }
+ }
+ }
+
+ // This method goes through all the errors and try to group certain types
+ // of error together, for the purpose of suggesting explicit lifetime
+ // parameters to the user. This is done so that we can have a more
+ // complete view of what lifetimes should be the same.
+ // If the return value is an empty vector, it means that processing
+ // failed (so the return value of this method should not be used).
+ //
+ // The method also attempts to weed out messages that seem like
+ // duplicates that will be unhelpful to the end-user. But
+ // obviously it never weeds out ALL errors.
+ fn process_errors(
+ &self,
+ errors: &[RegionResolutionError<'tcx>],
+ ) -> Vec<RegionResolutionError<'tcx>> {
+ debug!("process_errors()");
+
+ // We want to avoid reporting generic-bound failures if we can
+ // avoid it: these have a very high rate of being unhelpful in
+ // practice. This is because they are basically secondary
+ // checks that test the state of the region graph after the
+ // rest of inference is done, and the other kinds of errors
+ // indicate that the region constraint graph is internally
+ // inconsistent, so these test results are likely to be
+ // meaningless.
+ //
+ // Therefore, we filter them out of the list unless they are
+ // the only thing in the list.
+
+ let is_bound_failure = |e: &RegionResolutionError<'tcx>| match *e {
+ RegionResolutionError::GenericBoundFailure(..) => true,
+ RegionResolutionError::ConcreteFailure(..)
+ | RegionResolutionError::SubSupConflict(..)
+ | RegionResolutionError::UpperBoundUniverseConflict(..) => false,
+ };
+
+ let mut errors = if errors.iter().all(|e| is_bound_failure(e)) {
+ errors.to_owned()
+ } else {
+ errors.iter().filter(|&e| !is_bound_failure(e)).cloned().collect()
+ };
+
+ // sort the errors by span, for better error message stability.
+ errors.sort_by_key(|u| match *u {
+ RegionResolutionError::ConcreteFailure(ref sro, _, _) => sro.span(),
+ RegionResolutionError::GenericBoundFailure(ref sro, _, _) => sro.span(),
+ RegionResolutionError::SubSupConflict(_, ref rvo, _, _, _, _, _) => rvo.span(),
+ RegionResolutionError::UpperBoundUniverseConflict(_, ref rvo, _, _, _) => rvo.span(),
+ });
+ errors
+ }
+
+ /// Adds a note if the types come from similarly named crates
+ fn check_and_note_conflicting_crates(&self, err: &mut Diagnostic, terr: &TypeError<'tcx>) {
+ use hir::def_id::CrateNum;
+ use rustc_hir::definitions::DisambiguatedDefPathData;
+ use ty::print::Printer;
+ use ty::subst::GenericArg;
+
+ struct AbsolutePathPrinter<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ }
+
+ struct NonTrivialPath;
+
+ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
+ type Error = NonTrivialPath;
+
+ type Path = Vec<String>;
+ type Region = !;
+ type Type = !;
+ type DynExistential = !;
+ type Const = !;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+ Err(NonTrivialPath)
+ }
+
+ fn print_type(self, _ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ Err(NonTrivialPath)
+ }
+
+ fn print_dyn_existential(
+ self,
+ _predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ Err(NonTrivialPath)
+ }
+
+ fn print_const(self, _ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ Err(NonTrivialPath)
+ }
+
+ fn path_crate(self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ Ok(vec![self.tcx.crate_name(cnum).to_string()])
+ }
+ fn path_qualified(
+ self,
+ _self_ty: Ty<'tcx>,
+ _trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ Err(NonTrivialPath)
+ }
+
+ fn path_append_impl(
+ self,
+ _print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _disambiguated_data: &DisambiguatedDefPathData,
+ _self_ty: Ty<'tcx>,
+ _trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ Err(NonTrivialPath)
+ }
+ fn path_append(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error> {
+ let mut path = print_prefix(self)?;
+ path.push(disambiguated_data.to_string());
+ Ok(path)
+ }
+ fn path_generic_args(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ print_prefix(self)
+ }
+ }
+
+ let report_path_match = |err: &mut Diagnostic, did1: DefId, did2: DefId| {
+ // Only external crates, if either is from a local
+ // module we could have false positives
+ if !(did1.is_local() || did2.is_local()) && did1.krate != did2.krate {
+ let abs_path =
+ |def_id| AbsolutePathPrinter { tcx: self.tcx }.print_def_path(def_id, &[]);
+
+ // We compare strings because DefPath can be different
+ // for imported and non-imported crates
+ let same_path = || -> Result<_, NonTrivialPath> {
+ Ok(self.tcx.def_path_str(did1) == self.tcx.def_path_str(did2)
+ || abs_path(did1)? == abs_path(did2)?)
+ };
+ if same_path().unwrap_or(false) {
+ let crate_name = self.tcx.crate_name(did1.krate);
+ err.note(&format!(
+ "perhaps two different versions of crate `{}` are being used?",
+ crate_name
+ ));
+ }
+ }
+ };
+ match *terr {
+ TypeError::Sorts(ref exp_found) => {
+ // if they are both "path types", there's a chance of ambiguity
+ // due to different versions of the same crate
+ if let (&ty::Adt(exp_adt, _), &ty::Adt(found_adt, _)) =
+ (exp_found.expected.kind(), exp_found.found.kind())
+ {
+ report_path_match(err, exp_adt.did(), found_adt.did());
+ }
+ }
+ TypeError::Traits(ref exp_found) => {
+ report_path_match(err, exp_found.expected, exp_found.found);
+ }
+ _ => (), // FIXME(#22750) handle traits and stuff
+ }
+ }
+
+ fn note_error_origin(
+ &self,
+ err: &mut Diagnostic,
+ cause: &ObligationCause<'tcx>,
+ exp_found: Option<ty::error::ExpectedFound<Ty<'tcx>>>,
+ terr: &TypeError<'tcx>,
+ ) {
+ match *cause.code() {
+ ObligationCauseCode::Pattern { origin_expr: true, span: Some(span), root_ty } => {
+ let ty = self.resolve_vars_if_possible(root_ty);
+ if !matches!(ty.kind(), ty::Infer(ty::InferTy::TyVar(_) | ty::InferTy::FreshTy(_)))
+ {
+ // don't show type `_`
+ if span.desugaring_kind() == Some(DesugaringKind::ForLoop)
+ && let ty::Adt(def, substs) = ty.kind()
+ && Some(def.did()) == self.tcx.get_diagnostic_item(sym::Option)
+ {
+ err.span_label(span, format!("this is an iterator with items of type `{}`", substs.type_at(0)));
+ } else {
+ err.span_label(span, format!("this expression has type `{}`", ty));
+ }
+ }
+ if let Some(ty::error::ExpectedFound { found, .. }) = exp_found
+ && ty.is_box() && ty.boxed_ty() == found
+ && let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span)
+ {
+ err.span_suggestion(
+ span,
+ "consider dereferencing the boxed value",
+ format!("*{}", snippet),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ ObligationCauseCode::Pattern { origin_expr: false, span: Some(span), .. } => {
+ err.span_label(span, "expected due to this");
+ }
+ ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
+ arm_block_id,
+ arm_span,
+ arm_ty,
+ prior_arm_block_id,
+ prior_arm_span,
+ prior_arm_ty,
+ source,
+ ref prior_arms,
+ scrut_hir_id,
+ opt_suggest_box_span,
+ scrut_span,
+ ..
+ }) => match source {
+ hir::MatchSource::TryDesugar => {
+ if let Some(ty::error::ExpectedFound { expected, .. }) = exp_found {
+ let scrut_expr = self.tcx.hir().expect_expr(scrut_hir_id);
+ let scrut_ty = if let hir::ExprKind::Call(_, args) = &scrut_expr.kind {
+ let arg_expr = args.first().expect("try desugaring call w/out arg");
+ self.in_progress_typeck_results.and_then(|typeck_results| {
+ typeck_results.borrow().expr_ty_opt(arg_expr)
+ })
+ } else {
+ bug!("try desugaring w/out call expr as scrutinee");
+ };
+
+ match scrut_ty {
+ Some(ty) if expected == ty => {
+ let source_map = self.tcx.sess.source_map();
+ err.span_suggestion(
+ source_map.end_point(cause.span),
+ "try removing this `?`",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {}
+ }
+ }
+ }
+ _ => {
+ // `prior_arm_ty` can be `!`, `expected` will have better info when present.
+ let t = self.resolve_vars_if_possible(match exp_found {
+ Some(ty::error::ExpectedFound { expected, .. }) => expected,
+ _ => prior_arm_ty,
+ });
+ let source_map = self.tcx.sess.source_map();
+ let mut any_multiline_arm = source_map.is_multiline(arm_span);
+ if prior_arms.len() <= 4 {
+ for sp in prior_arms {
+ any_multiline_arm |= source_map.is_multiline(*sp);
+ err.span_label(*sp, format!("this is found to be of type `{}`", t));
+ }
+ } else if let Some(sp) = prior_arms.last() {
+ any_multiline_arm |= source_map.is_multiline(*sp);
+ err.span_label(
+ *sp,
+ format!("this and all prior arms are found to be of type `{}`", t),
+ );
+ }
+ let outer_error_span = if any_multiline_arm {
+ // Cover just `match` and the scrutinee expression, not
+ // the entire match body, to reduce diagram noise.
+ cause.span.shrink_to_lo().to(scrut_span)
+ } else {
+ cause.span
+ };
+ let msg = "`match` arms have incompatible types";
+ err.span_label(outer_error_span, msg);
+ self.suggest_remove_semi_or_return_binding(
+ err,
+ prior_arm_block_id,
+ prior_arm_ty,
+ prior_arm_span,
+ arm_block_id,
+ arm_ty,
+ arm_span,
+ );
+ if let Some(ret_sp) = opt_suggest_box_span {
+ // Get return type span and point to it.
+ self.suggest_boxing_for_return_impl_trait(
+ err,
+ ret_sp,
+ prior_arms.iter().chain(std::iter::once(&arm_span)).map(|s| *s),
+ );
+ }
+ }
+ },
+ ObligationCauseCode::IfExpression(box IfExpressionCause {
+ then_id,
+ else_id,
+ then_ty,
+ else_ty,
+ outer_span,
+ opt_suggest_box_span,
+ }) => {
+ let then_span = self.find_block_span_from_hir_id(then_id);
+ let else_span = self.find_block_span_from_hir_id(else_id);
+ err.span_label(then_span, "expected because of this");
+ if let Some(sp) = outer_span {
+ err.span_label(sp, "`if` and `else` have incompatible types");
+ }
+ self.suggest_remove_semi_or_return_binding(
+ err,
+ Some(then_id),
+ then_ty,
+ then_span,
+ Some(else_id),
+ else_ty,
+ else_span,
+ );
+ if let Some(ret_sp) = opt_suggest_box_span {
+ self.suggest_boxing_for_return_impl_trait(
+ err,
+ ret_sp,
+ [then_span, else_span].into_iter(),
+ );
+ }
+ }
+ ObligationCauseCode::LetElse => {
+ err.help("try adding a diverging expression, such as `return` or `panic!(..)`");
+ err.help("...or use `match` instead of `let...else`");
+ }
+ _ => {
+ if let ObligationCauseCode::BindingObligation(_, binding_span) =
+ cause.code().peel_derives()
+ {
+ if matches!(terr, TypeError::RegionsPlaceholderMismatch) {
+ err.span_note(*binding_span, "the lifetime requirement is introduced here");
+ }
+ }
+ }
+ }
+ }
+
+ fn suggest_remove_semi_or_return_binding(
+ &self,
+ err: &mut Diagnostic,
+ first_id: Option<hir::HirId>,
+ first_ty: Ty<'tcx>,
+ first_span: Span,
+ second_id: Option<hir::HirId>,
+ second_ty: Ty<'tcx>,
+ second_span: Span,
+ ) {
+ let remove_semicolon = [
+ (first_id, self.resolve_vars_if_possible(second_ty)),
+ (second_id, self.resolve_vars_if_possible(first_ty)),
+ ]
+ .into_iter()
+ .find_map(|(id, ty)| {
+ let hir::Node::Block(blk) = self.tcx.hir().get(id?) else { return None };
+ self.could_remove_semicolon(blk, ty)
+ });
+ match remove_semicolon {
+ Some((sp, StatementAsExpression::NeedsBoxing)) => {
+ err.multipart_suggestion(
+ "consider removing this semicolon and boxing the expressions",
+ vec![
+ (first_span.shrink_to_lo(), "Box::new(".to_string()),
+ (first_span.shrink_to_hi(), ")".to_string()),
+ (second_span.shrink_to_lo(), "Box::new(".to_string()),
+ (second_span.shrink_to_hi(), ")".to_string()),
+ (sp, String::new()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ Some((sp, StatementAsExpression::CorrectType)) => {
+ err.span_suggestion_short(
+ sp,
+ "consider removing this semicolon",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ None => {
+ for (id, ty) in [(first_id, second_ty), (second_id, first_ty)] {
+ if let Some(id) = id
+ && let hir::Node::Block(blk) = self.tcx.hir().get(id)
+ && self.consider_returning_binding(blk, ty, err)
+ {
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ fn suggest_boxing_for_return_impl_trait(
+ &self,
+ err: &mut Diagnostic,
+ return_sp: Span,
+ arm_spans: impl Iterator<Item = Span>,
+ ) {
+ err.multipart_suggestion(
+ "you could change the return type to be a boxed trait object",
+ vec![
+ (return_sp.with_hi(return_sp.lo() + BytePos(4)), "Box<dyn".to_string()),
+ (return_sp.shrink_to_hi(), ">".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ let sugg = arm_spans
+ .flat_map(|sp| {
+ [(sp.shrink_to_lo(), "Box::new(".to_string()), (sp.shrink_to_hi(), ")".to_string())]
+ .into_iter()
+ })
+ .collect::<Vec<_>>();
+ err.multipart_suggestion(
+ "if you change the return type to expect trait objects, box the returned expressions",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ /// Given that `other_ty` is the same as a type argument for `name` in `sub`, populate `value`
+ /// highlighting `name` and every type argument that isn't at `pos` (which is `other_ty`), and
+ /// populate `other_value` with `other_ty`.
+ ///
+ /// ```text
+ /// Foo<Bar<Qux>>
+ /// ^^^^--------^ this is highlighted
+ /// | |
+ /// | this type argument is exactly the same as the other type, not highlighted
+ /// this is highlighted
+ /// Bar<Qux>
+ /// -------- this type is the same as a type argument in the other type, not highlighted
+ /// ```
+ fn highlight_outer(
+ &self,
+ value: &mut DiagnosticStyledString,
+ other_value: &mut DiagnosticStyledString,
+ name: String,
+ sub: ty::subst::SubstsRef<'tcx>,
+ pos: usize,
+ other_ty: Ty<'tcx>,
+ ) {
+ // `value` and `other_value` hold two incomplete type representation for display.
+ // `name` is the path of both types being compared. `sub`
+ value.push_highlighted(name);
+ let len = sub.len();
+ if len > 0 {
+ value.push_highlighted("<");
+ }
+
+ // Output the lifetimes for the first type
+ let lifetimes = sub
+ .regions()
+ .map(|lifetime| {
+ let s = lifetime.to_string();
+ if s.is_empty() { "'_".to_string() } else { s }
+ })
+ .collect::<Vec<_>>()
+ .join(", ");
+ if !lifetimes.is_empty() {
+ if sub.regions().count() < len {
+ value.push_normal(lifetimes + ", ");
+ } else {
+ value.push_normal(lifetimes);
+ }
+ }
+
+ // Highlight all the type arguments that aren't at `pos` and compare the type argument at
+ // `pos` and `other_ty`.
+ for (i, type_arg) in sub.types().enumerate() {
+ if i == pos {
+ let values = self.cmp(type_arg, other_ty);
+ value.0.extend((values.0).0);
+ other_value.0.extend((values.1).0);
+ } else {
+ value.push_highlighted(type_arg.to_string());
+ }
+
+ if len > 0 && i != len - 1 {
+ value.push_normal(", ");
+ }
+ }
+ if len > 0 {
+ value.push_highlighted(">");
+ }
+ }
+
+ /// If `other_ty` is the same as a type argument present in `sub`, highlight `path` in `t1_out`,
+ /// as that is the difference to the other type.
+ ///
+ /// For the following code:
+ ///
+ /// ```ignore (illustrative)
+ /// let x: Foo<Bar<Qux>> = foo::<Bar<Qux>>();
+ /// ```
+ ///
+ /// The type error output will behave in the following way:
+ ///
+ /// ```text
+ /// Foo<Bar<Qux>>
+ /// ^^^^--------^ this is highlighted
+ /// | |
+ /// | this type argument is exactly the same as the other type, not highlighted
+ /// this is highlighted
+ /// Bar<Qux>
+ /// -------- this type is the same as a type argument in the other type, not highlighted
+ /// ```
+ fn cmp_type_arg(
+ &self,
+ mut t1_out: &mut DiagnosticStyledString,
+ mut t2_out: &mut DiagnosticStyledString,
+ path: String,
+ sub: &'tcx [ty::GenericArg<'tcx>],
+ other_path: String,
+ other_ty: Ty<'tcx>,
+ ) -> Option<()> {
+ // FIXME/HACK: Go back to `SubstsRef` to use its inherent methods,
+ // ideally that shouldn't be necessary.
+ let sub = self.tcx.intern_substs(sub);
+ for (i, ta) in sub.types().enumerate() {
+ if ta == other_ty {
+ self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, other_ty);
+ return Some(());
+ }
+ if let ty::Adt(def, _) = ta.kind() {
+ let path_ = self.tcx.def_path_str(def.did());
+ if path_ == other_path {
+ self.highlight_outer(&mut t1_out, &mut t2_out, path, sub, i, other_ty);
+ return Some(());
+ }
+ }
+ }
+ None
+ }
+
+ /// Adds a `,` to the type representation only if it is appropriate.
+ fn push_comma(
+ &self,
+ value: &mut DiagnosticStyledString,
+ other_value: &mut DiagnosticStyledString,
+ len: usize,
+ pos: usize,
+ ) {
+ if len > 0 && pos != len - 1 {
+ value.push_normal(", ");
+ other_value.push_normal(", ");
+ }
+ }
+
+ /// Given two `fn` signatures highlight only sub-parts that are different.
+ fn cmp_fn_sig(
+ &self,
+ sig1: &ty::PolyFnSig<'tcx>,
+ sig2: &ty::PolyFnSig<'tcx>,
+ ) -> (DiagnosticStyledString, DiagnosticStyledString) {
+ let get_lifetimes = |sig| {
+ use rustc_hir::def::Namespace;
+ let (_, sig, reg) = ty::print::FmtPrinter::new(self.tcx, Namespace::TypeNS)
+ .name_all_regions(sig)
+ .unwrap();
+ let lts: Vec<String> = reg.into_iter().map(|(_, kind)| kind.to_string()).collect();
+ (if lts.is_empty() { String::new() } else { format!("for<{}> ", lts.join(", ")) }, sig)
+ };
+
+ let (lt1, sig1) = get_lifetimes(sig1);
+ let (lt2, sig2) = get_lifetimes(sig2);
+
+ // unsafe extern "C" for<'a> fn(&'a T) -> &'a T
+ let mut values = (
+ DiagnosticStyledString::normal("".to_string()),
+ DiagnosticStyledString::normal("".to_string()),
+ );
+
+ // unsafe extern "C" for<'a> fn(&'a T) -> &'a T
+ // ^^^^^^
+ values.0.push(sig1.unsafety.prefix_str(), sig1.unsafety != sig2.unsafety);
+ values.1.push(sig2.unsafety.prefix_str(), sig1.unsafety != sig2.unsafety);
+
+ // unsafe extern "C" for<'a> fn(&'a T) -> &'a T
+ // ^^^^^^^^^^
+ if sig1.abi != abi::Abi::Rust {
+ values.0.push(format!("extern {} ", sig1.abi), sig1.abi != sig2.abi);
+ }
+ if sig2.abi != abi::Abi::Rust {
+ values.1.push(format!("extern {} ", sig2.abi), sig1.abi != sig2.abi);
+ }
+
+ // unsafe extern "C" for<'a> fn(&'a T) -> &'a T
+ // ^^^^^^^^
+ let lifetime_diff = lt1 != lt2;
+ values.0.push(lt1, lifetime_diff);
+ values.1.push(lt2, lifetime_diff);
+
+ // unsafe extern "C" for<'a> fn(&'a T) -> &'a T
+ // ^^^
+ values.0.push_normal("fn(");
+ values.1.push_normal("fn(");
+
+ // unsafe extern "C" for<'a> fn(&'a T) -> &'a T
+ // ^^^^^
+ let len1 = sig1.inputs().len();
+ let len2 = sig2.inputs().len();
+ if len1 == len2 {
+ for (i, (l, r)) in iter::zip(sig1.inputs(), sig2.inputs()).enumerate() {
+ let (x1, x2) = self.cmp(*l, *r);
+ (values.0).0.extend(x1.0);
+ (values.1).0.extend(x2.0);
+ self.push_comma(&mut values.0, &mut values.1, len1, i);
+ }
+ } else {
+ for (i, l) in sig1.inputs().iter().enumerate() {
+ values.0.push_highlighted(l.to_string());
+ if i != len1 - 1 {
+ values.0.push_highlighted(", ");
+ }
+ }
+ for (i, r) in sig2.inputs().iter().enumerate() {
+ values.1.push_highlighted(r.to_string());
+ if i != len2 - 1 {
+ values.1.push_highlighted(", ");
+ }
+ }
+ }
+
+ if sig1.c_variadic {
+ if len1 > 0 {
+ values.0.push_normal(", ");
+ }
+ values.0.push("...", !sig2.c_variadic);
+ }
+ if sig2.c_variadic {
+ if len2 > 0 {
+ values.1.push_normal(", ");
+ }
+ values.1.push("...", !sig1.c_variadic);
+ }
+
+ // unsafe extern "C" for<'a> fn(&'a T) -> &'a T
+ // ^
+ values.0.push_normal(")");
+ values.1.push_normal(")");
+
+ // unsafe extern "C" for<'a> fn(&'a T) -> &'a T
+ // ^^^^^^^^
+ let output1 = sig1.output();
+ let output2 = sig2.output();
+ let (x1, x2) = self.cmp(output1, output2);
+ if !output1.is_unit() {
+ values.0.push_normal(" -> ");
+ (values.0).0.extend(x1.0);
+ }
+ if !output2.is_unit() {
+ values.1.push_normal(" -> ");
+ (values.1).0.extend(x2.0);
+ }
+ values
+ }
+
+ /// Compares two given types, eliding parts that are the same between them and highlighting
+ /// relevant differences, and return two representation of those types for highlighted printing.
+ pub fn cmp(
+ &self,
+ t1: Ty<'tcx>,
+ t2: Ty<'tcx>,
+ ) -> (DiagnosticStyledString, DiagnosticStyledString) {
+ debug!("cmp(t1={}, t1.kind={:?}, t2={}, t2.kind={:?})", t1, t1.kind(), t2, t2.kind());
+
+ // helper functions
+ fn equals<'tcx>(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
+ match (a.kind(), b.kind()) {
+ (a, b) if *a == *b => true,
+ (&ty::Int(_), &ty::Infer(ty::InferTy::IntVar(_)))
+ | (
+ &ty::Infer(ty::InferTy::IntVar(_)),
+ &ty::Int(_) | &ty::Infer(ty::InferTy::IntVar(_)),
+ )
+ | (&ty::Float(_), &ty::Infer(ty::InferTy::FloatVar(_)))
+ | (
+ &ty::Infer(ty::InferTy::FloatVar(_)),
+ &ty::Float(_) | &ty::Infer(ty::InferTy::FloatVar(_)),
+ ) => true,
+ _ => false,
+ }
+ }
+
+ fn push_ty_ref<'tcx>(
+ region: ty::Region<'tcx>,
+ ty: Ty<'tcx>,
+ mutbl: hir::Mutability,
+ s: &mut DiagnosticStyledString,
+ ) {
+ let mut r = region.to_string();
+ if r == "'_" {
+ r.clear();
+ } else {
+ r.push(' ');
+ }
+ s.push_highlighted(format!("&{}{}", r, mutbl.prefix_str()));
+ s.push_normal(ty.to_string());
+ }
+
+ // process starts here
+ match (t1.kind(), t2.kind()) {
+ (&ty::Adt(def1, sub1), &ty::Adt(def2, sub2)) => {
+ let did1 = def1.did();
+ let did2 = def2.did();
+ let sub_no_defaults_1 =
+ self.tcx.generics_of(did1).own_substs_no_defaults(self.tcx, sub1);
+ let sub_no_defaults_2 =
+ self.tcx.generics_of(did2).own_substs_no_defaults(self.tcx, sub2);
+ let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
+ let path1 = self.tcx.def_path_str(did1);
+ let path2 = self.tcx.def_path_str(did2);
+ if did1 == did2 {
+ // Easy case. Replace same types with `_` to shorten the output and highlight
+ // the differing ones.
+ // let x: Foo<Bar, Qux> = y::<Foo<Quz, Qux>>();
+ // Foo<Bar, _>
+ // Foo<Quz, _>
+ // --- ^ type argument elided
+ // |
+ // highlighted in output
+ values.0.push_normal(path1);
+ values.1.push_normal(path2);
+
+ // Avoid printing out default generic parameters that are common to both
+ // types.
+ let len1 = sub_no_defaults_1.len();
+ let len2 = sub_no_defaults_2.len();
+ let common_len = cmp::min(len1, len2);
+ let remainder1: Vec<_> = sub1.types().skip(common_len).collect();
+ let remainder2: Vec<_> = sub2.types().skip(common_len).collect();
+ let common_default_params =
+ iter::zip(remainder1.iter().rev(), remainder2.iter().rev())
+ .filter(|(a, b)| a == b)
+ .count();
+ let len = sub1.len() - common_default_params;
+ let consts_offset = len - sub1.consts().count();
+
+ // Only draw `<...>` if there are lifetime/type arguments.
+ if len > 0 {
+ values.0.push_normal("<");
+ values.1.push_normal("<");
+ }
+
+ fn lifetime_display(lifetime: Region<'_>) -> String {
+ let s = lifetime.to_string();
+ if s.is_empty() { "'_".to_string() } else { s }
+ }
+ // At one point we'd like to elide all lifetimes here, they are irrelevant for
+ // all diagnostics that use this output
+ //
+ // Foo<'x, '_, Bar>
+ // Foo<'y, '_, Qux>
+ // ^^ ^^ --- type arguments are not elided
+ // | |
+ // | elided as they were the same
+ // not elided, they were different, but irrelevant
+ //
+ // For bound lifetimes, keep the names of the lifetimes,
+ // even if they are the same so that it's clear what's happening
+ // if we have something like
+ //
+ // for<'r, 's> fn(Inv<'r>, Inv<'s>)
+ // for<'r> fn(Inv<'r>, Inv<'r>)
+ let lifetimes = sub1.regions().zip(sub2.regions());
+ for (i, lifetimes) in lifetimes.enumerate() {
+ let l1 = lifetime_display(lifetimes.0);
+ let l2 = lifetime_display(lifetimes.1);
+ if lifetimes.0 != lifetimes.1 {
+ values.0.push_highlighted(l1);
+ values.1.push_highlighted(l2);
+ } else if lifetimes.0.is_late_bound() {
+ values.0.push_normal(l1);
+ values.1.push_normal(l2);
+ } else {
+ values.0.push_normal("'_");
+ values.1.push_normal("'_");
+ }
+ self.push_comma(&mut values.0, &mut values.1, len, i);
+ }
+
+ // We're comparing two types with the same path, so we compare the type
+ // arguments for both. If they are the same, do not highlight and elide from the
+ // output.
+ // Foo<_, Bar>
+ // Foo<_, Qux>
+ // ^ elided type as this type argument was the same in both sides
+ let type_arguments = sub1.types().zip(sub2.types());
+ let regions_len = sub1.regions().count();
+ let num_display_types = consts_offset - regions_len;
+ for (i, (ta1, ta2)) in type_arguments.take(num_display_types).enumerate() {
+ let i = i + regions_len;
+ if ta1 == ta2 {
+ values.0.push_normal("_");
+ values.1.push_normal("_");
+ } else {
+ let (x1, x2) = self.cmp(ta1, ta2);
+ (values.0).0.extend(x1.0);
+ (values.1).0.extend(x2.0);
+ }
+ self.push_comma(&mut values.0, &mut values.1, len, i);
+ }
+
+ // Do the same for const arguments, if they are equal, do not highlight and
+ // elide them from the output.
+ let const_arguments = sub1.consts().zip(sub2.consts());
+ for (i, (ca1, ca2)) in const_arguments.enumerate() {
+ let i = i + consts_offset;
+ if ca1 == ca2 {
+ values.0.push_normal("_");
+ values.1.push_normal("_");
+ } else {
+ values.0.push_highlighted(ca1.to_string());
+ values.1.push_highlighted(ca2.to_string());
+ }
+ self.push_comma(&mut values.0, &mut values.1, len, i);
+ }
+
+ // Close the type argument bracket.
+ // Only draw `<...>` if there are lifetime/type arguments.
+ if len > 0 {
+ values.0.push_normal(">");
+ values.1.push_normal(">");
+ }
+ values
+ } else {
+ // Check for case:
+ // let x: Foo<Bar<Qux> = foo::<Bar<Qux>>();
+ // Foo<Bar<Qux>
+ // ------- this type argument is exactly the same as the other type
+ // Bar<Qux>
+ if self
+ .cmp_type_arg(
+ &mut values.0,
+ &mut values.1,
+ path1.clone(),
+ sub_no_defaults_1,
+ path2.clone(),
+ t2,
+ )
+ .is_some()
+ {
+ return values;
+ }
+ // Check for case:
+ // let x: Bar<Qux> = y:<Foo<Bar<Qux>>>();
+ // Bar<Qux>
+ // Foo<Bar<Qux>>
+ // ------- this type argument is exactly the same as the other type
+ if self
+ .cmp_type_arg(
+ &mut values.1,
+ &mut values.0,
+ path2,
+ sub_no_defaults_2,
+ path1,
+ t1,
+ )
+ .is_some()
+ {
+ return values;
+ }
+
+ // We can't find anything in common, highlight relevant part of type path.
+ // let x: foo::bar::Baz<Qux> = y:<foo::bar::Bar<Zar>>();
+ // foo::bar::Baz<Qux>
+ // foo::bar::Bar<Zar>
+ // -------- this part of the path is different
+
+ let t1_str = t1.to_string();
+ let t2_str = t2.to_string();
+ let min_len = t1_str.len().min(t2_str.len());
+
+ const SEPARATOR: &str = "::";
+ let separator_len = SEPARATOR.len();
+ let split_idx: usize =
+ iter::zip(t1_str.split(SEPARATOR), t2_str.split(SEPARATOR))
+ .take_while(|(mod1_str, mod2_str)| mod1_str == mod2_str)
+ .map(|(mod_str, _)| mod_str.len() + separator_len)
+ .sum();
+
+ debug!(
+ "cmp: separator_len={}, split_idx={}, min_len={}",
+ separator_len, split_idx, min_len
+ );
+
+ if split_idx >= min_len {
+ // paths are identical, highlight everything
+ (
+ DiagnosticStyledString::highlighted(t1_str),
+ DiagnosticStyledString::highlighted(t2_str),
+ )
+ } else {
+ let (common, uniq1) = t1_str.split_at(split_idx);
+ let (_, uniq2) = t2_str.split_at(split_idx);
+ debug!("cmp: common={}, uniq1={}, uniq2={}", common, uniq1, uniq2);
+
+ values.0.push_normal(common);
+ values.0.push_highlighted(uniq1);
+ values.1.push_normal(common);
+ values.1.push_highlighted(uniq2);
+
+ values
+ }
+ }
+ }
+
+ // When finding T != &T, highlight only the borrow
+ (&ty::Ref(r1, ref_ty1, mutbl1), _) if equals(ref_ty1, t2) => {
+ let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
+ push_ty_ref(r1, ref_ty1, mutbl1, &mut values.0);
+ values.1.push_normal(t2.to_string());
+ values
+ }
+ (_, &ty::Ref(r2, ref_ty2, mutbl2)) if equals(t1, ref_ty2) => {
+ let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
+ values.0.push_normal(t1.to_string());
+ push_ty_ref(r2, ref_ty2, mutbl2, &mut values.1);
+ values
+ }
+
+ // When encountering &T != &mut T, highlight only the borrow
+ (&ty::Ref(r1, ref_ty1, mutbl1), &ty::Ref(r2, ref_ty2, mutbl2))
+ if equals(ref_ty1, ref_ty2) =>
+ {
+ let mut values = (DiagnosticStyledString::new(), DiagnosticStyledString::new());
+ push_ty_ref(r1, ref_ty1, mutbl1, &mut values.0);
+ push_ty_ref(r2, ref_ty2, mutbl2, &mut values.1);
+ values
+ }
+
+ // When encountering tuples of the same size, highlight only the differing types
+ (&ty::Tuple(substs1), &ty::Tuple(substs2)) if substs1.len() == substs2.len() => {
+ let mut values =
+ (DiagnosticStyledString::normal("("), DiagnosticStyledString::normal("("));
+ let len = substs1.len();
+ for (i, (left, right)) in substs1.iter().zip(substs2).enumerate() {
+ let (x1, x2) = self.cmp(left, right);
+ (values.0).0.extend(x1.0);
+ (values.1).0.extend(x2.0);
+ self.push_comma(&mut values.0, &mut values.1, len, i);
+ }
+ if len == 1 {
+ // Keep the output for single element tuples as `(ty,)`.
+ values.0.push_normal(",");
+ values.1.push_normal(",");
+ }
+ values.0.push_normal(")");
+ values.1.push_normal(")");
+ values
+ }
+
+ (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => {
+ let sig1 = self.tcx.bound_fn_sig(*did1).subst(self.tcx, substs1);
+ let sig2 = self.tcx.bound_fn_sig(*did2).subst(self.tcx, substs2);
+ let mut values = self.cmp_fn_sig(&sig1, &sig2);
+ let path1 = format!(" {{{}}}", self.tcx.def_path_str_with_substs(*did1, substs1));
+ let path2 = format!(" {{{}}}", self.tcx.def_path_str_with_substs(*did2, substs2));
+ let same_path = path1 == path2;
+ values.0.push(path1, !same_path);
+ values.1.push(path2, !same_path);
+ values
+ }
+
+ (ty::FnDef(did1, substs1), ty::FnPtr(sig2)) => {
+ let sig1 = self.tcx.bound_fn_sig(*did1).subst(self.tcx, substs1);
+ let mut values = self.cmp_fn_sig(&sig1, sig2);
+ values.0.push_highlighted(format!(
+ " {{{}}}",
+ self.tcx.def_path_str_with_substs(*did1, substs1)
+ ));
+ values
+ }
+
+ (ty::FnPtr(sig1), ty::FnDef(did2, substs2)) => {
+ let sig2 = self.tcx.bound_fn_sig(*did2).subst(self.tcx, substs2);
+ let mut values = self.cmp_fn_sig(sig1, &sig2);
+ values.1.push_normal(format!(
+ " {{{}}}",
+ self.tcx.def_path_str_with_substs(*did2, substs2)
+ ));
+ values
+ }
+
+ (ty::FnPtr(sig1), ty::FnPtr(sig2)) => self.cmp_fn_sig(sig1, sig2),
+
+ _ => {
+ if t1 == t2 {
+ // The two types are the same, elide and don't highlight.
+ (DiagnosticStyledString::normal("_"), DiagnosticStyledString::normal("_"))
+ } else {
+ // We couldn't find anything in common, highlight everything.
+ (
+ DiagnosticStyledString::highlighted(t1.to_string()),
+ DiagnosticStyledString::highlighted(t2.to_string()),
+ )
+ }
+ }
+ }
+ }
+
+ /// Extend a type error with extra labels pointing at "non-trivial" types, like closures and
+ /// the return type of `async fn`s.
+ ///
+ /// `secondary_span` gives the caller the opportunity to expand `diag` with a `span_label`.
+ ///
+ /// `swap_secondary_and_primary` is used to make projection errors in particular nicer by using
+ /// the message in `secondary_span` as the primary label, and apply the message that would
+ /// otherwise be used for the primary label on the `secondary_span` `Span`. This applies on
+ /// E0271, like `src/test/ui/issues/issue-39970.stderr`.
+ #[tracing::instrument(
+ level = "debug",
+ skip(self, diag, secondary_span, swap_secondary_and_primary, force_label)
+ )]
+ pub fn note_type_err(
+ &self,
+ diag: &mut Diagnostic,
+ cause: &ObligationCause<'tcx>,
+ secondary_span: Option<(Span, String)>,
+ mut values: Option<ValuePairs<'tcx>>,
+ terr: &TypeError<'tcx>,
+ swap_secondary_and_primary: bool,
+ force_label: bool,
+ ) {
+ let span = cause.span();
+
+ // For some types of errors, expected-found does not make
+ // sense, so just ignore the values we were given.
+ if let TypeError::CyclicTy(_) = terr {
+ values = None;
+ }
+ struct OpaqueTypesVisitor<'tcx> {
+ types: FxHashMap<TyCategory, FxHashSet<Span>>,
+ expected: FxHashMap<TyCategory, FxHashSet<Span>>,
+ found: FxHashMap<TyCategory, FxHashSet<Span>>,
+ ignore_span: Span,
+ tcx: TyCtxt<'tcx>,
+ }
+
+ impl<'tcx> OpaqueTypesVisitor<'tcx> {
+ fn visit_expected_found(
+ tcx: TyCtxt<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ignore_span: Span,
+ ) -> Self {
+ let mut types_visitor = OpaqueTypesVisitor {
+ types: Default::default(),
+ expected: Default::default(),
+ found: Default::default(),
+ ignore_span,
+ tcx,
+ };
+ // The visitor puts all the relevant encountered types in `self.types`, but in
+ // here we want to visit two separate types with no relation to each other, so we
+ // move the results from `types` to `expected` or `found` as appropriate.
+ expected.visit_with(&mut types_visitor);
+ std::mem::swap(&mut types_visitor.expected, &mut types_visitor.types);
+ found.visit_with(&mut types_visitor);
+ std::mem::swap(&mut types_visitor.found, &mut types_visitor.types);
+ types_visitor
+ }
+
+ fn report(&self, err: &mut Diagnostic) {
+ self.add_labels_for_types(err, "expected", &self.expected);
+ self.add_labels_for_types(err, "found", &self.found);
+ }
+
+ fn add_labels_for_types(
+ &self,
+ err: &mut Diagnostic,
+ target: &str,
+ types: &FxHashMap<TyCategory, FxHashSet<Span>>,
+ ) {
+ for (key, values) in types.iter() {
+ let count = values.len();
+ let kind = key.descr();
+ let mut returned_async_output_error = false;
+ for &sp in values {
+ if sp.is_desugaring(DesugaringKind::Async) && !returned_async_output_error {
+ if [sp] != err.span.primary_spans() {
+ let mut span: MultiSpan = sp.into();
+ span.push_span_label(
+ sp,
+ format!(
+ "checked the `Output` of this `async fn`, {}{} {}{}",
+ if count > 1 { "one of the " } else { "" },
+ target,
+ kind,
+ pluralize!(count),
+ ),
+ );
+ err.span_note(
+ span,
+ "while checking the return type of the `async fn`",
+ );
+ } else {
+ err.span_label(
+ sp,
+ format!(
+ "checked the `Output` of this `async fn`, {}{} {}{}",
+ if count > 1 { "one of the " } else { "" },
+ target,
+ kind,
+ pluralize!(count),
+ ),
+ );
+ err.note("while checking the return type of the `async fn`");
+ }
+ returned_async_output_error = true;
+ } else {
+ err.span_label(
+ sp,
+ format!(
+ "{}{} {}{}",
+ if count == 1 { "the " } else { "one of the " },
+ target,
+ kind,
+ pluralize!(count),
+ ),
+ );
+ }
+ }
+ }
+ }
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for OpaqueTypesVisitor<'tcx> {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let Some((kind, def_id)) = TyCategory::from_ty(self.tcx, t) {
+ let span = self.tcx.def_span(def_id);
+ // Avoid cluttering the output when the "found" and error span overlap:
+ //
+ // error[E0308]: mismatched types
+ // --> $DIR/issue-20862.rs:2:5
+ // |
+ // LL | |y| x + y
+ // | ^^^^^^^^^
+ // | |
+ // | the found closure
+ // | expected `()`, found closure
+ // |
+ // = note: expected unit type `()`
+ // found closure `[closure@$DIR/issue-20862.rs:2:5: 2:14 x:_]`
+ if !self.ignore_span.overlaps(span) {
+ self.types.entry(kind).or_default().insert(span);
+ }
+ }
+ t.super_visit_with(self)
+ }
+ }
+
+ debug!("note_type_err(diag={:?})", diag);
+ enum Mismatch<'a> {
+ Variable(ty::error::ExpectedFound<Ty<'a>>),
+ Fixed(&'static str),
+ }
+ let (expected_found, exp_found, is_simple_error, values) = match values {
+ None => (None, Mismatch::Fixed("type"), false, None),
+ Some(values) => {
+ let values = self.resolve_vars_if_possible(values);
+ let (is_simple_error, exp_found) = match values {
+ ValuePairs::Terms(infer::ExpectedFound {
+ expected: ty::Term::Ty(expected),
+ found: ty::Term::Ty(found),
+ }) => {
+ let is_simple_err = expected.is_simple_text() && found.is_simple_text();
+ OpaqueTypesVisitor::visit_expected_found(self.tcx, expected, found, span)
+ .report(diag);
+
+ (
+ is_simple_err,
+ Mismatch::Variable(infer::ExpectedFound { expected, found }),
+ )
+ }
+ ValuePairs::TraitRefs(_) | ValuePairs::PolyTraitRefs(_) => {
+ (false, Mismatch::Fixed("trait"))
+ }
+ _ => (false, Mismatch::Fixed("type")),
+ };
+ let vals = match self.values_str(values) {
+ Some((expected, found)) => Some((expected, found)),
+ None => {
+ // Derived error. Cancel the emitter.
+ // NOTE(eddyb) this was `.cancel()`, but `diag`
+ // is borrowed, so we can't fully defuse it.
+ diag.downgrade_to_delayed_bug();
+ return;
+ }
+ };
+ (vals, exp_found, is_simple_error, Some(values))
+ }
+ };
+
+ match terr {
+ // Ignore msg for object safe coercion
+ // since E0038 message will be printed
+ TypeError::ObjectUnsafeCoercion(_) => {}
+ _ => {
+ let mut label_or_note = |span: Span, msg: &str| {
+ if force_label || &[span] == diag.span.primary_spans() {
+ diag.span_label(span, msg);
+ } else {
+ diag.span_note(span, msg);
+ }
+ };
+ if let Some((sp, msg)) = secondary_span {
+ if swap_secondary_and_primary {
+ let terr = if let Some(infer::ValuePairs::Terms(infer::ExpectedFound {
+ expected,
+ ..
+ })) = values
+ {
+ format!("expected this to be `{}`", expected)
+ } else {
+ terr.to_string()
+ };
+ label_or_note(sp, &terr);
+ label_or_note(span, &msg);
+ } else {
+ label_or_note(span, &terr.to_string());
+ label_or_note(sp, &msg);
+ }
+ } else {
+ label_or_note(span, &terr.to_string());
+ }
+ }
+ };
+ if let Some((expected, found)) = expected_found {
+ let (expected_label, found_label, exp_found) = match exp_found {
+ Mismatch::Variable(ef) => (
+ ef.expected.prefix_string(self.tcx),
+ ef.found.prefix_string(self.tcx),
+ Some(ef),
+ ),
+ Mismatch::Fixed(s) => (s.into(), s.into(), None),
+ };
+ match (&terr, expected == found) {
+ (TypeError::Sorts(values), extra) => {
+ let sort_string = |ty: Ty<'tcx>| match (extra, ty.kind()) {
+ (true, ty::Opaque(def_id, _)) => {
+ let sm = self.tcx.sess.source_map();
+ let pos = sm.lookup_char_pos(self.tcx.def_span(*def_id).lo());
+ format!(
+ " (opaque type at <{}:{}:{}>)",
+ sm.filename_for_diagnostics(&pos.file.name),
+ pos.line,
+ pos.col.to_usize() + 1,
+ )
+ }
+ (true, _) => format!(" ({})", ty.sort_string(self.tcx)),
+ (false, _) => "".to_string(),
+ };
+ if !(values.expected.is_simple_text() && values.found.is_simple_text())
+ || (exp_found.map_or(false, |ef| {
+ // This happens when the type error is a subset of the expectation,
+ // like when you have two references but one is `usize` and the other
+ // is `f32`. In those cases we still want to show the `note`. If the
+ // value from `ef` is `Infer(_)`, then we ignore it.
+ if !ef.expected.is_ty_infer() {
+ ef.expected != values.expected
+ } else if !ef.found.is_ty_infer() {
+ ef.found != values.found
+ } else {
+ false
+ }
+ }))
+ {
+ diag.note_expected_found_extra(
+ &expected_label,
+ expected,
+ &found_label,
+ found,
+ &sort_string(values.expected),
+ &sort_string(values.found),
+ );
+ }
+ }
+ (TypeError::ObjectUnsafeCoercion(_), _) => {
+ diag.note_unsuccessful_coercion(found, expected);
+ }
+ (_, _) => {
+ debug!(
+ "note_type_err: exp_found={:?}, expected={:?} found={:?}",
+ exp_found, expected, found
+ );
+ if !is_simple_error || terr.must_include_note() {
+ diag.note_expected_found(&expected_label, expected, &found_label, found);
+ }
+ }
+ }
+ }
+ let exp_found = match exp_found {
+ Mismatch::Variable(exp_found) => Some(exp_found),
+ Mismatch::Fixed(_) => None,
+ };
+ let exp_found = match terr {
+ // `terr` has more accurate type information than `exp_found` in match expressions.
+ ty::error::TypeError::Sorts(terr)
+ if exp_found.map_or(false, |ef| terr.found == ef.found) =>
+ {
+ Some(*terr)
+ }
+ _ => exp_found,
+ };
+ debug!("exp_found {:?} terr {:?} cause.code {:?}", exp_found, terr, cause.code());
+ if let Some(exp_found) = exp_found {
+ let should_suggest_fixes =
+ if let ObligationCauseCode::Pattern { root_ty, .. } = cause.code() {
+ // Skip if the root_ty of the pattern is not the same as the expected_ty.
+ // If these types aren't equal then we've probably peeled off a layer of arrays.
+ self.same_type_modulo_infer(*root_ty, exp_found.expected)
+ } else {
+ true
+ };
+
+ if should_suggest_fixes {
+ self.suggest_tuple_pattern(cause, &exp_found, diag);
+ self.suggest_as_ref_where_appropriate(span, &exp_found, diag);
+ self.suggest_accessing_field_where_appropriate(cause, &exp_found, diag);
+ self.suggest_await_on_expect_found(cause, span, &exp_found, diag);
+ }
+ }
+
+ // In some (most?) cases cause.body_id points to actual body, but in some cases
+ // it's an actual definition. According to the comments (e.g. in
+ // librustc_typeck/check/compare_method.rs:compare_predicate_entailment) the latter
+ // is relied upon by some other code. This might (or might not) need cleanup.
+ let body_owner_def_id =
+ self.tcx.hir().opt_local_def_id(cause.body_id).unwrap_or_else(|| {
+ self.tcx.hir().body_owner_def_id(hir::BodyId { hir_id: cause.body_id })
+ });
+ self.check_and_note_conflicting_crates(diag, terr);
+ self.tcx.note_and_explain_type_err(diag, terr, cause, span, body_owner_def_id.to_def_id());
+
+ if let Some(ValuePairs::PolyTraitRefs(exp_found)) = values
+ && let ty::Closure(def_id, _) = exp_found.expected.skip_binder().self_ty().kind()
+ && let Some(def_id) = def_id.as_local()
+ {
+ let span = self.tcx.def_span(def_id);
+ diag.span_note(span, "this closure does not fulfill the lifetime requirements");
+ }
+
+ // It reads better to have the error origin as the final
+ // thing.
+ self.note_error_origin(diag, cause, exp_found, terr);
+
+ debug!(?diag);
+ }
+
+ fn suggest_tuple_pattern(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ exp_found: &ty::error::ExpectedFound<Ty<'tcx>>,
+ diag: &mut Diagnostic,
+ ) {
+ // Heavily inspired by `FnCtxt::suggest_compatible_variants`, with
+ // some modifications due to that being in typeck and this being in infer.
+ if let ObligationCauseCode::Pattern { .. } = cause.code() {
+ if let ty::Adt(expected_adt, substs) = exp_found.expected.kind() {
+ let compatible_variants: Vec<_> = expected_adt
+ .variants()
+ .iter()
+ .filter(|variant| {
+ variant.fields.len() == 1 && variant.ctor_kind == hir::def::CtorKind::Fn
+ })
+ .filter_map(|variant| {
+ let sole_field = &variant.fields[0];
+ let sole_field_ty = sole_field.ty(self.tcx, substs);
+ if self.same_type_modulo_infer(sole_field_ty, exp_found.found) {
+ let variant_path =
+ with_no_trimmed_paths!(self.tcx.def_path_str(variant.def_id));
+ // FIXME #56861: DRYer prelude filtering
+ if let Some(path) = variant_path.strip_prefix("std::prelude::") {
+ if let Some((_, path)) = path.split_once("::") {
+ return Some(path.to_string());
+ }
+ }
+ Some(variant_path)
+ } else {
+ None
+ }
+ })
+ .collect();
+ match &compatible_variants[..] {
+ [] => {}
+ [variant] => {
+ diag.multipart_suggestion_verbose(
+ &format!("try wrapping the pattern in `{}`", variant),
+ vec![
+ (cause.span.shrink_to_lo(), format!("{}(", variant)),
+ (cause.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {
+ // More than one matching variant.
+ diag.multipart_suggestions(
+ &format!(
+ "try wrapping the pattern in a variant of `{}`",
+ self.tcx.def_path_str(expected_adt.did())
+ ),
+ compatible_variants.into_iter().map(|variant| {
+ vec![
+ (cause.span.shrink_to_lo(), format!("{}(", variant)),
+ (cause.span.shrink_to_hi(), ")".to_string()),
+ ]
+ }),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+ }
+
+ pub fn get_impl_future_output_ty(&self, ty: Ty<'tcx>) -> Option<Binder<'tcx, Ty<'tcx>>> {
+ if let ty::Opaque(def_id, substs) = ty.kind() {
+ let future_trait = self.tcx.require_lang_item(LangItem::Future, None);
+ // Future::Output
+ let item_def_id = self.tcx.associated_item_def_ids(future_trait)[0];
+
+ let bounds = self.tcx.bound_explicit_item_bounds(*def_id);
+
+ for predicate in bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) {
+ let predicate = predicate.subst(self.tcx, substs);
+ let output = predicate
+ .kind()
+ .map_bound(|kind| match kind {
+ ty::PredicateKind::Projection(projection_predicate)
+ if projection_predicate.projection_ty.item_def_id == item_def_id =>
+ {
+ projection_predicate.term.ty()
+ }
+ _ => None,
+ })
+ .transpose();
+ if output.is_some() {
+ // We don't account for multiple `Future::Output = Ty` constraints.
+ return output;
+ }
+ }
+ }
+ None
+ }
+
+ /// A possible error is to forget to add `.await` when using futures:
+ ///
+ /// ```compile_fail,E0308
+ /// async fn make_u32() -> u32 {
+ /// 22
+ /// }
+ ///
+ /// fn take_u32(x: u32) {}
+ ///
+ /// async fn foo() {
+ /// let x = make_u32();
+ /// take_u32(x);
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the found type `T` implements `Future<Output=U>` where `U` is the
+ /// expected type. If this is the case, and we are inside of an async body, it suggests adding
+ /// `.await` to the tail of the expression.
+ fn suggest_await_on_expect_found(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ exp_span: Span,
+ exp_found: &ty::error::ExpectedFound<Ty<'tcx>>,
+ diag: &mut Diagnostic,
+ ) {
+ debug!(
+ "suggest_await_on_expect_found: exp_span={:?}, expected_ty={:?}, found_ty={:?}",
+ exp_span, exp_found.expected, exp_found.found,
+ );
+
+ if let ObligationCauseCode::CompareImplItemObligation { .. } = cause.code() {
+ return;
+ }
+
+ match (
+ self.get_impl_future_output_ty(exp_found.expected).map(Binder::skip_binder),
+ self.get_impl_future_output_ty(exp_found.found).map(Binder::skip_binder),
+ ) {
+ (Some(exp), Some(found)) if self.same_type_modulo_infer(exp, found) => match cause
+ .code()
+ {
+ ObligationCauseCode::IfExpression(box IfExpressionCause { then_id, .. }) => {
+ let then_span = self.find_block_span_from_hir_id(*then_id);
+ diag.multipart_suggestion(
+ "consider `await`ing on both `Future`s",
+ vec![
+ (then_span.shrink_to_hi(), ".await".to_string()),
+ (exp_span.shrink_to_hi(), ".await".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
+ prior_arms,
+ ..
+ }) => {
+ if let [.., arm_span] = &prior_arms[..] {
+ diag.multipart_suggestion(
+ "consider `await`ing on both `Future`s",
+ vec![
+ (arm_span.shrink_to_hi(), ".await".to_string()),
+ (exp_span.shrink_to_hi(), ".await".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ diag.help("consider `await`ing on both `Future`s");
+ }
+ }
+ _ => {
+ diag.help("consider `await`ing on both `Future`s");
+ }
+ },
+ (_, Some(ty)) if self.same_type_modulo_infer(exp_found.expected, ty) => {
+ diag.span_suggestion_verbose(
+ exp_span.shrink_to_hi(),
+ "consider `await`ing on the `Future`",
+ ".await",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ (Some(ty), _) if self.same_type_modulo_infer(ty, exp_found.found) => match cause.code()
+ {
+ ObligationCauseCode::Pattern { span: Some(then_span), .. } => {
+ diag.span_suggestion_verbose(
+ then_span.shrink_to_hi(),
+ "consider `await`ing on the `Future`",
+ ".await",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ ObligationCauseCode::IfExpression(box IfExpressionCause { then_id, .. }) => {
+ let then_span = self.find_block_span_from_hir_id(*then_id);
+ diag.span_suggestion_verbose(
+ then_span.shrink_to_hi(),
+ "consider `await`ing on the `Future`",
+ ".await",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
+ ref prior_arms,
+ ..
+ }) => {
+ diag.multipart_suggestion_verbose(
+ "consider `await`ing on the `Future`",
+ prior_arms
+ .iter()
+ .map(|arm| (arm.shrink_to_hi(), ".await".to_string()))
+ .collect(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ }
+
+ fn suggest_accessing_field_where_appropriate(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ exp_found: &ty::error::ExpectedFound<Ty<'tcx>>,
+ diag: &mut Diagnostic,
+ ) {
+ debug!(
+ "suggest_accessing_field_where_appropriate(cause={:?}, exp_found={:?})",
+ cause, exp_found
+ );
+ if let ty::Adt(expected_def, expected_substs) = exp_found.expected.kind() {
+ if expected_def.is_enum() {
+ return;
+ }
+
+ if let Some((name, ty)) = expected_def
+ .non_enum_variant()
+ .fields
+ .iter()
+ .filter(|field| field.vis.is_accessible_from(field.did, self.tcx))
+ .map(|field| (field.name, field.ty(self.tcx, expected_substs)))
+ .find(|(_, ty)| self.same_type_modulo_infer(*ty, exp_found.found))
+ {
+ if let ObligationCauseCode::Pattern { span: Some(span), .. } = *cause.code() {
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ let suggestion = if expected_def.is_struct() {
+ format!("{}.{}", snippet, name)
+ } else if expected_def.is_union() {
+ format!("unsafe {{ {}.{} }}", snippet, name)
+ } else {
+ return;
+ };
+ diag.span_suggestion(
+ span,
+ &format!(
+ "you might have meant to use field `{}` whose type is `{}`",
+ name, ty
+ ),
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+ }
+
+ /// When encountering a case where `.as_ref()` on a `Result` or `Option` would be appropriate,
+ /// suggests it.
+ fn suggest_as_ref_where_appropriate(
+ &self,
+ span: Span,
+ exp_found: &ty::error::ExpectedFound<Ty<'tcx>>,
+ diag: &mut Diagnostic,
+ ) {
+ if let (ty::Adt(exp_def, exp_substs), ty::Ref(_, found_ty, _)) =
+ (exp_found.expected.kind(), exp_found.found.kind())
+ {
+ if let ty::Adt(found_def, found_substs) = *found_ty.kind() {
+ let path_str = format!("{:?}", exp_def);
+ if exp_def == &found_def {
+ let opt_msg = "you can convert from `&Option<T>` to `Option<&T>` using \
+ `.as_ref()`";
+ let result_msg = "you can convert from `&Result<T, E>` to \
+ `Result<&T, &E>` using `.as_ref()`";
+ let have_as_ref = &[
+ ("std::option::Option", opt_msg),
+ ("core::option::Option", opt_msg),
+ ("std::result::Result", result_msg),
+ ("core::result::Result", result_msg),
+ ];
+ if let Some(msg) = have_as_ref
+ .iter()
+ .find_map(|(path, msg)| (&path_str == path).then_some(msg))
+ {
+ let mut show_suggestion = true;
+ for (exp_ty, found_ty) in
+ iter::zip(exp_substs.types(), found_substs.types())
+ {
+ match *exp_ty.kind() {
+ ty::Ref(_, exp_ty, _) => {
+ match (exp_ty.kind(), found_ty.kind()) {
+ (_, ty::Param(_))
+ | (_, ty::Infer(_))
+ | (ty::Param(_), _)
+ | (ty::Infer(_), _) => {}
+ _ if self.same_type_modulo_infer(exp_ty, found_ty) => {}
+ _ => show_suggestion = false,
+ };
+ }
+ ty::Param(_) | ty::Infer(_) => {}
+ _ => show_suggestion = false,
+ }
+ }
+ if let (Ok(snippet), true) =
+ (self.tcx.sess.source_map().span_to_snippet(span), show_suggestion)
+ {
+ diag.span_suggestion(
+ span,
+ *msg,
+ format!("{}.as_ref()", snippet),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ pub fn report_and_explain_type_error(
+ &self,
+ trace: TypeTrace<'tcx>,
+ terr: &TypeError<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ use crate::traits::ObligationCauseCode::MatchExpressionArm;
+
+ debug!("report_and_explain_type_error(trace={:?}, terr={:?})", trace, terr);
+
+ let span = trace.cause.span();
+ let failure_code = trace.cause.as_failure_code(terr);
+ let mut diag = match failure_code {
+ FailureCode::Error0038(did) => {
+ let violations = self.tcx.object_safety_violations(did);
+ report_object_safety_error(self.tcx, span, did, violations)
+ }
+ FailureCode::Error0317(failure_str) => {
+ struct_span_err!(self.tcx.sess, span, E0317, "{}", failure_str)
+ }
+ FailureCode::Error0580(failure_str) => {
+ struct_span_err!(self.tcx.sess, span, E0580, "{}", failure_str)
+ }
+ FailureCode::Error0308(failure_str) => {
+ let mut err = struct_span_err!(self.tcx.sess, span, E0308, "{}", failure_str);
+ if let Some((expected, found)) = trace.values.ty() {
+ match (expected.kind(), found.kind()) {
+ (ty::Tuple(_), ty::Tuple(_)) => {}
+ // If a tuple of length one was expected and the found expression has
+ // parentheses around it, perhaps the user meant to write `(expr,)` to
+ // build a tuple (issue #86100)
+ (ty::Tuple(fields), _) => {
+ self.emit_tuple_wrap_err(&mut err, span, found, fields)
+ }
+ // If a character was expected and the found expression is a string literal
+ // containing a single character, perhaps the user meant to write `'c'` to
+ // specify a character literal (issue #92479)
+ (ty::Char, ty::Ref(_, r, _)) if r.is_str() => {
+ if let Ok(code) = self.tcx.sess().source_map().span_to_snippet(span)
+ && let Some(code) = code.strip_prefix('"').and_then(|s| s.strip_suffix('"'))
+ && code.chars().count() == 1
+ {
+ err.span_suggestion(
+ span,
+ "if you meant to write a `char` literal, use single quotes",
+ format!("'{}'", code),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ // If a string was expected and the found expression is a character literal,
+ // perhaps the user meant to write `"s"` to specify a string literal.
+ (ty::Ref(_, r, _), ty::Char) if r.is_str() => {
+ if let Ok(code) = self.tcx.sess().source_map().span_to_snippet(span) {
+ if let Some(code) =
+ code.strip_prefix('\'').and_then(|s| s.strip_suffix('\''))
+ {
+ err.span_suggestion(
+ span,
+ "if you meant to write a `str` literal, use double quotes",
+ format!("\"{}\"", code),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ let code = trace.cause.code();
+ if let &MatchExpressionArm(box MatchExpressionArmCause { source, .. }) = code
+ && let hir::MatchSource::TryDesugar = source
+ && let Some((expected_ty, found_ty)) = self.values_str(trace.values)
+ {
+ err.note(&format!(
+ "`?` operator cannot convert from `{}` to `{}`",
+ found_ty.content(),
+ expected_ty.content(),
+ ));
+ }
+ err
+ }
+ FailureCode::Error0644(failure_str) => {
+ struct_span_err!(self.tcx.sess, span, E0644, "{}", failure_str)
+ }
+ };
+ self.note_type_err(&mut diag, &trace.cause, None, Some(trace.values), terr, false, false);
+ diag
+ }
+
+ fn emit_tuple_wrap_err(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ found: Ty<'tcx>,
+ expected_fields: &List<Ty<'tcx>>,
+ ) {
+ let [expected_tup_elem] = expected_fields[..] else { return };
+
+ if !self.same_type_modulo_infer(expected_tup_elem, found) {
+ return;
+ }
+
+ let Ok(code) = self.tcx.sess().source_map().span_to_snippet(span)
+ else { return };
+
+ let msg = "use a trailing comma to create a tuple with one element";
+ if code.starts_with('(') && code.ends_with(')') {
+ let before_close = span.hi() - BytePos::from_u32(1);
+ err.span_suggestion(
+ span.with_hi(before_close).shrink_to_hi(),
+ msg,
+ ",",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.multipart_suggestion(
+ msg,
+ vec![(span.shrink_to_lo(), "(".into()), (span.shrink_to_hi(), ",)".into())],
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+
+ fn values_str(
+ &self,
+ values: ValuePairs<'tcx>,
+ ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> {
+ match values {
+ infer::Regions(exp_found) => self.expected_found_str(exp_found),
+ infer::Terms(exp_found) => self.expected_found_str_term(exp_found),
+ infer::TraitRefs(exp_found) => {
+ let pretty_exp_found = ty::error::ExpectedFound {
+ expected: exp_found.expected.print_only_trait_path(),
+ found: exp_found.found.print_only_trait_path(),
+ };
+ match self.expected_found_str(pretty_exp_found) {
+ Some((expected, found)) if expected == found => {
+ self.expected_found_str(exp_found)
+ }
+ ret => ret,
+ }
+ }
+ infer::PolyTraitRefs(exp_found) => {
+ let pretty_exp_found = ty::error::ExpectedFound {
+ expected: exp_found.expected.print_only_trait_path(),
+ found: exp_found.found.print_only_trait_path(),
+ };
+ match self.expected_found_str(pretty_exp_found) {
+ Some((expected, found)) if expected == found => {
+ self.expected_found_str(exp_found)
+ }
+ ret => ret,
+ }
+ }
+ }
+ }
+
+ fn expected_found_str_term(
+ &self,
+ exp_found: ty::error::ExpectedFound<ty::Term<'tcx>>,
+ ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> {
+ let exp_found = self.resolve_vars_if_possible(exp_found);
+ if exp_found.references_error() {
+ return None;
+ }
+
+ Some(match (exp_found.expected, exp_found.found) {
+ (ty::Term::Ty(expected), ty::Term::Ty(found)) => self.cmp(expected, found),
+ (expected, found) => (
+ DiagnosticStyledString::highlighted(expected.to_string()),
+ DiagnosticStyledString::highlighted(found.to_string()),
+ ),
+ })
+ }
+
+ /// Returns a string of the form "expected `{}`, found `{}`".
+ fn expected_found_str<T: fmt::Display + TypeFoldable<'tcx>>(
+ &self,
+ exp_found: ty::error::ExpectedFound<T>,
+ ) -> Option<(DiagnosticStyledString, DiagnosticStyledString)> {
+ let exp_found = self.resolve_vars_if_possible(exp_found);
+ if exp_found.references_error() {
+ return None;
+ }
+
+ Some((
+ DiagnosticStyledString::highlighted(exp_found.expected.to_string()),
+ DiagnosticStyledString::highlighted(exp_found.found.to_string()),
+ ))
+ }
+
+ pub fn report_generic_bound_failure(
+ &self,
+ generic_param_scope: LocalDefId,
+ span: Span,
+ origin: Option<SubregionOrigin<'tcx>>,
+ bound_kind: GenericKind<'tcx>,
+ sub: Region<'tcx>,
+ ) {
+ self.construct_generic_bound_failure(generic_param_scope, span, origin, bound_kind, sub)
+ .emit();
+ }
+
+ pub fn construct_generic_bound_failure(
+ &self,
+ generic_param_scope: LocalDefId,
+ span: Span,
+ origin: Option<SubregionOrigin<'tcx>>,
+ bound_kind: GenericKind<'tcx>,
+ sub: Region<'tcx>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ // Attempt to obtain the span of the parameter so we can
+ // suggest adding an explicit lifetime bound to it.
+ let generics = self.tcx.generics_of(generic_param_scope);
+ // type_param_span is (span, has_bounds)
+ let type_param_span = match bound_kind {
+ GenericKind::Param(ref param) => {
+ // Account for the case where `param` corresponds to `Self`,
+ // which doesn't have the expected type argument.
+ if !(generics.has_self && param.index == 0) {
+ let type_param = generics.type_param(param, self.tcx);
+ type_param.def_id.as_local().map(|def_id| {
+ // Get the `hir::Param` to verify whether it already has any bounds.
+ // We do this to avoid suggesting code that ends up as `T: 'a'b`,
+ // instead we suggest `T: 'a + 'b` in that case.
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let ast_generics = self.tcx.hir().get_generics(hir_id.owner);
+ let bounds =
+ ast_generics.and_then(|g| g.bounds_span_for_suggestions(def_id));
+ // `sp` only covers `T`, change it so that it covers
+ // `T:` when appropriate
+ if let Some(span) = bounds {
+ (span, true)
+ } else {
+ let sp = self.tcx.def_span(def_id);
+ (sp.shrink_to_hi(), false)
+ }
+ })
+ } else {
+ None
+ }
+ }
+ _ => None,
+ };
+
+ let new_lt = {
+ let mut possible = (b'a'..=b'z').map(|c| format!("'{}", c as char));
+ let lts_names =
+ iter::successors(Some(generics), |g| g.parent.map(|p| self.tcx.generics_of(p)))
+ .flat_map(|g| &g.params)
+ .filter(|p| matches!(p.kind, ty::GenericParamDefKind::Lifetime))
+ .map(|p| p.name.as_str())
+ .collect::<Vec<_>>();
+ possible
+ .find(|candidate| !lts_names.contains(&&candidate[..]))
+ .unwrap_or("'lt".to_string())
+ };
+
+ let add_lt_sugg = generics
+ .params
+ .first()
+ .and_then(|param| param.def_id.as_local())
+ .map(|def_id| (self.tcx.def_span(def_id).shrink_to_lo(), format!("{}, ", new_lt)));
+
+ let labeled_user_string = match bound_kind {
+ GenericKind::Param(ref p) => format!("the parameter type `{}`", p),
+ GenericKind::Projection(ref p) => format!("the associated type `{}`", p),
+ };
+
+ if let Some(SubregionOrigin::CompareImplItemObligation {
+ span,
+ impl_item_def_id,
+ trait_item_def_id,
+ }) = origin
+ {
+ return self.report_extra_impl_obligation(
+ span,
+ impl_item_def_id,
+ trait_item_def_id,
+ &format!("`{}: {}`", bound_kind, sub),
+ );
+ }
+
+ fn binding_suggestion<'tcx, S: fmt::Display>(
+ err: &mut Diagnostic,
+ type_param_span: Option<(Span, bool)>,
+ bound_kind: GenericKind<'tcx>,
+ sub: S,
+ ) {
+ let msg = "consider adding an explicit lifetime bound";
+ if let Some((sp, has_lifetimes)) = type_param_span {
+ let suggestion =
+ if has_lifetimes { format!(" + {}", sub) } else { format!(": {}", sub) };
+ err.span_suggestion_verbose(
+ sp,
+ &format!("{}...", msg),
+ suggestion,
+ Applicability::MaybeIncorrect, // Issue #41966
+ );
+ } else {
+ let consider = format!("{} `{}: {}`...", msg, bound_kind, sub,);
+ err.help(&consider);
+ }
+ }
+
+ let new_binding_suggestion =
+ |err: &mut Diagnostic, type_param_span: Option<(Span, bool)>| {
+ let msg = "consider introducing an explicit lifetime bound";
+ if let Some((sp, has_lifetimes)) = type_param_span {
+ let suggestion = if has_lifetimes {
+ format!(" + {}", new_lt)
+ } else {
+ format!(": {}", new_lt)
+ };
+ let mut sugg =
+ vec![(sp, suggestion), (span.shrink_to_hi(), format!(" + {}", new_lt))];
+ if let Some(lt) = add_lt_sugg {
+ sugg.push(lt);
+ sugg.rotate_right(1);
+ }
+ // `MaybeIncorrect` due to issue #41966.
+ err.multipart_suggestion(msg, sugg, Applicability::MaybeIncorrect);
+ }
+ };
+
+ #[derive(Debug)]
+ enum SubOrigin<'hir> {
+ GAT(&'hir hir::Generics<'hir>),
+ Impl,
+ Trait,
+ Fn,
+ Unknown,
+ }
+ let sub_origin = 'origin: {
+ match *sub {
+ ty::ReEarlyBound(ty::EarlyBoundRegion { def_id, .. }) => {
+ let node = self.tcx.hir().get_if_local(def_id).unwrap();
+ match node {
+ Node::GenericParam(param) => {
+ for h in self.tcx.hir().parent_iter(param.hir_id) {
+ break 'origin match h.1 {
+ Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::TyAlias(..),
+ generics,
+ ..
+ })
+ | Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Type(..),
+ generics,
+ ..
+ }) => SubOrigin::GAT(generics),
+ Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(..),
+ ..
+ })
+ | Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(..),
+ ..
+ })
+ | Node::Item(hir::Item {
+ kind: hir::ItemKind::Fn(..), ..
+ }) => SubOrigin::Fn,
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(..),
+ ..
+ }) => SubOrigin::Trait,
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(..), ..
+ }) => SubOrigin::Impl,
+ _ => continue,
+ };
+ }
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+ SubOrigin::Unknown
+ };
+ debug!(?sub_origin);
+
+ let mut err = match (*sub, sub_origin) {
+ // In the case of GATs, we have to be careful. If we a type parameter `T` on an impl,
+ // but a lifetime `'a` on an associated type, then we might need to suggest adding
+ // `where T: 'a`. Importantly, this is on the GAT span, not on the `T` declaration.
+ (ty::ReEarlyBound(ty::EarlyBoundRegion { name: _, .. }), SubOrigin::GAT(generics)) => {
+ // Does the required lifetime have a nice name we can print?
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0309,
+ "{} may not live long enough",
+ labeled_user_string
+ );
+ let pred = format!("{}: {}", bound_kind, sub);
+ let suggestion = format!("{} {}", generics.add_where_or_trailing_comma(), pred,);
+ err.span_suggestion(
+ generics.tail_span_for_predicate_suggestion(),
+ "consider adding a where clause",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ err
+ }
+ (
+ ty::ReEarlyBound(ty::EarlyBoundRegion { name, .. })
+ | ty::ReFree(ty::FreeRegion { bound_region: ty::BrNamed(_, name), .. }),
+ _,
+ ) if name != kw::UnderscoreLifetime => {
+ // Does the required lifetime have a nice name we can print?
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0309,
+ "{} may not live long enough",
+ labeled_user_string
+ );
+ // Explicitly use the name instead of `sub`'s `Display` impl. The `Display` impl
+ // for the bound is not suitable for suggestions when `-Zverbose` is set because it
+ // uses `Debug` output, so we handle it specially here so that suggestions are
+ // always correct.
+ binding_suggestion(&mut err, type_param_span, bound_kind, name);
+ err
+ }
+
+ (ty::ReStatic, _) => {
+ // Does the required lifetime have a nice name we can print?
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0310,
+ "{} may not live long enough",
+ labeled_user_string
+ );
+ binding_suggestion(&mut err, type_param_span, bound_kind, "'static");
+ err
+ }
+
+ _ => {
+ // If not, be less specific.
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0311,
+ "{} may not live long enough",
+ labeled_user_string
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ &format!("{} must be valid for ", labeled_user_string),
+ sub,
+ "...",
+ None,
+ );
+ if let Some(infer::RelateParamBound(_, t, _)) = origin {
+ let return_impl_trait =
+ self.tcx.return_type_impl_trait(generic_param_scope).is_some();
+ let t = self.resolve_vars_if_possible(t);
+ match t.kind() {
+ // We've got:
+ // fn get_later<G, T>(g: G, dest: &mut T) -> impl FnOnce() + '_
+ // suggest:
+ // fn get_later<'a, G: 'a, T>(g: G, dest: &mut T) -> impl FnOnce() + '_ + 'a
+ ty::Closure(_, _substs) | ty::Opaque(_, _substs) if return_impl_trait => {
+ new_binding_suggestion(&mut err, type_param_span);
+ }
+ _ => {
+ binding_suggestion(&mut err, type_param_span, bound_kind, new_lt);
+ }
+ }
+ }
+ err
+ }
+ };
+
+ if let Some(origin) = origin {
+ self.note_region_origin(&mut err, &origin);
+ }
+ err
+ }
+
+ fn report_sub_sup_conflict(
+ &self,
+ var_origin: RegionVariableOrigin,
+ sub_origin: SubregionOrigin<'tcx>,
+ sub_region: Region<'tcx>,
+ sup_origin: SubregionOrigin<'tcx>,
+ sup_region: Region<'tcx>,
+ ) {
+ let mut err = self.report_inference_failure(var_origin);
+
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "first, the lifetime cannot outlive ",
+ sup_region,
+ "...",
+ None,
+ );
+
+ debug!("report_sub_sup_conflict: var_origin={:?}", var_origin);
+ debug!("report_sub_sup_conflict: sub_region={:?}", sub_region);
+ debug!("report_sub_sup_conflict: sub_origin={:?}", sub_origin);
+ debug!("report_sub_sup_conflict: sup_region={:?}", sup_region);
+ debug!("report_sub_sup_conflict: sup_origin={:?}", sup_origin);
+
+ if let (&infer::Subtype(ref sup_trace), &infer::Subtype(ref sub_trace)) =
+ (&sup_origin, &sub_origin)
+ {
+ debug!("report_sub_sup_conflict: sup_trace={:?}", sup_trace);
+ debug!("report_sub_sup_conflict: sub_trace={:?}", sub_trace);
+ debug!("report_sub_sup_conflict: sup_trace.values={:?}", sup_trace.values);
+ debug!("report_sub_sup_conflict: sub_trace.values={:?}", sub_trace.values);
+
+ if let (Some((sup_expected, sup_found)), Some((sub_expected, sub_found))) =
+ (self.values_str(sup_trace.values), self.values_str(sub_trace.values))
+ {
+ if sub_expected == sup_expected && sub_found == sup_found {
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "...but the lifetime must also be valid for ",
+ sub_region,
+ "...",
+ None,
+ );
+ err.span_note(
+ sup_trace.cause.span,
+ &format!("...so that the {}", sup_trace.cause.as_requirement_str()),
+ );
+
+ err.note_expected_found(&"", sup_expected, &"", sup_found);
+ err.emit();
+ return;
+ }
+ }
+ }
+
+ self.note_region_origin(&mut err, &sup_origin);
+
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "but, the lifetime must be valid for ",
+ sub_region,
+ "...",
+ None,
+ );
+
+ self.note_region_origin(&mut err, &sub_origin);
+ err.emit();
+ }
+
+ /// Determine whether an error associated with the given span and definition
+ /// should be treated as being caused by the implicit `From` conversion
+ /// within `?` desugaring.
+ pub fn is_try_conversion(&self, span: Span, trait_def_id: DefId) -> bool {
+ span.is_desugaring(DesugaringKind::QuestionMark)
+ && self.tcx.is_diagnostic_item(sym::From, trait_def_id)
+ }
+
+ /// Structurally compares two types, modulo any inference variables.
+ ///
+ /// Returns `true` if two types are equal, or if one type is an inference variable compatible
+ /// with the other type. A TyVar inference type is compatible with any type, and an IntVar or
+ /// FloatVar inference type are compatible with themselves or their concrete types (Int and
+ /// Float types, respectively). When comparing two ADTs, these rules apply recursively.
+ pub fn same_type_modulo_infer(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
+ let (a, b) = self.resolve_vars_if_possible((a, b));
+ match (a.kind(), b.kind()) {
+ (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b)) => {
+ if def_a != def_b {
+ return false;
+ }
+
+ substs_a
+ .types()
+ .zip(substs_b.types())
+ .all(|(a, b)| self.same_type_modulo_infer(a, b))
+ }
+ (&ty::FnDef(did_a, substs_a), &ty::FnDef(did_b, substs_b)) => {
+ if did_a != did_b {
+ return false;
+ }
+
+ substs_a
+ .types()
+ .zip(substs_b.types())
+ .all(|(a, b)| self.same_type_modulo_infer(a, b))
+ }
+ (&ty::Int(_) | &ty::Uint(_), &ty::Infer(ty::InferTy::IntVar(_)))
+ | (
+ &ty::Infer(ty::InferTy::IntVar(_)),
+ &ty::Int(_) | &ty::Uint(_) | &ty::Infer(ty::InferTy::IntVar(_)),
+ )
+ | (&ty::Float(_), &ty::Infer(ty::InferTy::FloatVar(_)))
+ | (
+ &ty::Infer(ty::InferTy::FloatVar(_)),
+ &ty::Float(_) | &ty::Infer(ty::InferTy::FloatVar(_)),
+ )
+ | (&ty::Infer(ty::InferTy::TyVar(_)), _)
+ | (_, &ty::Infer(ty::InferTy::TyVar(_))) => true,
+ (&ty::Ref(_, ty_a, mut_a), &ty::Ref(_, ty_b, mut_b)) => {
+ mut_a == mut_b && self.same_type_modulo_infer(ty_a, ty_b)
+ }
+ (&ty::RawPtr(a), &ty::RawPtr(b)) => {
+ a.mutbl == b.mutbl && self.same_type_modulo_infer(a.ty, b.ty)
+ }
+ (&ty::Slice(a), &ty::Slice(b)) => self.same_type_modulo_infer(a, b),
+ (&ty::Array(a_ty, a_ct), &ty::Array(b_ty, b_ct)) => {
+ self.same_type_modulo_infer(a_ty, b_ty) && a_ct == b_ct
+ }
+ (&ty::Tuple(a), &ty::Tuple(b)) => {
+ if a.len() != b.len() {
+ return false;
+ }
+ std::iter::zip(a.iter(), b.iter()).all(|(a, b)| self.same_type_modulo_infer(a, b))
+ }
+ (&ty::FnPtr(a), &ty::FnPtr(b)) => {
+ let a = a.skip_binder().inputs_and_output;
+ let b = b.skip_binder().inputs_and_output;
+ if a.len() != b.len() {
+ return false;
+ }
+ std::iter::zip(a.iter(), b.iter()).all(|(a, b)| self.same_type_modulo_infer(a, b))
+ }
+ // FIXME(compiler-errors): This needs to be generalized more
+ _ => a == b,
+ }
+ }
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ fn report_inference_failure(
+ &self,
+ var_origin: RegionVariableOrigin,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let br_string = |br: ty::BoundRegionKind| {
+ let mut s = match br {
+ ty::BrNamed(_, name) => name.to_string(),
+ _ => String::new(),
+ };
+ if !s.is_empty() {
+ s.push(' ');
+ }
+ s
+ };
+ let var_description = match var_origin {
+ infer::MiscVariable(_) => String::new(),
+ infer::PatternRegion(_) => " for pattern".to_string(),
+ infer::AddrOfRegion(_) => " for borrow expression".to_string(),
+ infer::Autoref(_) => " for autoref".to_string(),
+ infer::Coercion(_) => " for automatic coercion".to_string(),
+ infer::LateBoundRegion(_, br, infer::FnCall) => {
+ format!(" for lifetime parameter {}in function call", br_string(br))
+ }
+ infer::LateBoundRegion(_, br, infer::HigherRankedType) => {
+ format!(" for lifetime parameter {}in generic type", br_string(br))
+ }
+ infer::LateBoundRegion(_, br, infer::AssocTypeProjection(def_id)) => format!(
+ " for lifetime parameter {}in trait containing associated type `{}`",
+ br_string(br),
+ self.tcx.associated_item(def_id).name
+ ),
+ infer::EarlyBoundRegion(_, name) => format!(" for lifetime parameter `{}`", name),
+ infer::UpvarRegion(ref upvar_id, _) => {
+ let var_name = self.tcx.hir().name(upvar_id.var_path.hir_id);
+ format!(" for capture of `{}` by closure", var_name)
+ }
+ infer::Nll(..) => bug!("NLL variable found in lexical phase"),
+ };
+
+ struct_span_err!(
+ self.tcx.sess,
+ var_origin.span(),
+ E0495,
+ "cannot infer an appropriate lifetime{} due to conflicting requirements",
+ var_description
+ )
+ }
+}
+
+pub enum FailureCode {
+ Error0038(DefId),
+ Error0317(&'static str),
+ Error0580(&'static str),
+ Error0308(&'static str),
+ Error0644(&'static str),
+}
+
+pub trait ObligationCauseExt<'tcx> {
+ fn as_failure_code(&self, terr: &TypeError<'tcx>) -> FailureCode;
+ fn as_requirement_str(&self) -> &'static str;
+}
+
+impl<'tcx> ObligationCauseExt<'tcx> for ObligationCause<'tcx> {
+ fn as_failure_code(&self, terr: &TypeError<'tcx>) -> FailureCode {
+ use self::FailureCode::*;
+ use crate::traits::ObligationCauseCode::*;
+ match self.code() {
+ CompareImplItemObligation { kind: ty::AssocKind::Fn, .. } => {
+ Error0308("method not compatible with trait")
+ }
+ CompareImplItemObligation { kind: ty::AssocKind::Type, .. } => {
+ Error0308("type not compatible with trait")
+ }
+ CompareImplItemObligation { kind: ty::AssocKind::Const, .. } => {
+ Error0308("const not compatible with trait")
+ }
+ MatchExpressionArm(box MatchExpressionArmCause { source, .. }) => {
+ Error0308(match source {
+ hir::MatchSource::TryDesugar => "`?` operator has incompatible types",
+ _ => "`match` arms have incompatible types",
+ })
+ }
+ IfExpression { .. } => Error0308("`if` and `else` have incompatible types"),
+ IfExpressionWithNoElse => Error0317("`if` may be missing an `else` clause"),
+ LetElse => Error0308("`else` clause of `let...else` does not diverge"),
+ MainFunctionType => Error0580("`main` function has wrong type"),
+ StartFunctionType => Error0308("`#[start]` function has wrong type"),
+ IntrinsicType => Error0308("intrinsic has wrong type"),
+ MethodReceiver => Error0308("mismatched `self` parameter type"),
+
+ // In the case where we have no more specific thing to
+ // say, also take a look at the error code, maybe we can
+ // tailor to that.
+ _ => match terr {
+ TypeError::CyclicTy(ty) if ty.is_closure() || ty.is_generator() => {
+ Error0644("closure/generator type that references itself")
+ }
+ TypeError::IntrinsicCast => {
+ Error0308("cannot coerce intrinsics to function pointers")
+ }
+ TypeError::ObjectUnsafeCoercion(did) => Error0038(*did),
+ _ => Error0308("mismatched types"),
+ },
+ }
+ }
+
+ fn as_requirement_str(&self) -> &'static str {
+ use crate::traits::ObligationCauseCode::*;
+ match self.code() {
+ CompareImplItemObligation { kind: ty::AssocKind::Fn, .. } => {
+ "method type is compatible with trait"
+ }
+ CompareImplItemObligation { kind: ty::AssocKind::Type, .. } => {
+ "associated type is compatible with trait"
+ }
+ CompareImplItemObligation { kind: ty::AssocKind::Const, .. } => {
+ "const is compatible with trait"
+ }
+ ExprAssignable => "expression is assignable",
+ IfExpression { .. } => "`if` and `else` have incompatible types",
+ IfExpressionWithNoElse => "`if` missing an `else` returns `()`",
+ MainFunctionType => "`main` function has the correct type",
+ StartFunctionType => "`#[start]` function has the correct type",
+ IntrinsicType => "intrinsic has the correct type",
+ MethodReceiver => "method receiver has the correct type",
+ _ => "types are compatible",
+ }
+ }
+}
+
+/// This is a bare signal of what kind of type we're dealing with. `ty::TyKind` tracks
+/// extra information about each type, but we only care about the category.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub enum TyCategory {
+ Closure,
+ Opaque,
+ Generator(hir::GeneratorKind),
+ Foreign,
+}
+
+impl TyCategory {
+ fn descr(&self) -> &'static str {
+ match self {
+ Self::Closure => "closure",
+ Self::Opaque => "opaque type",
+ Self::Generator(gk) => gk.descr(),
+ Self::Foreign => "foreign type",
+ }
+ }
+
+ pub fn from_ty(tcx: TyCtxt<'_>, ty: Ty<'_>) -> Option<(Self, DefId)> {
+ match *ty.kind() {
+ ty::Closure(def_id, _) => Some((Self::Closure, def_id)),
+ ty::Opaque(def_id, _) => Some((Self::Opaque, def_id)),
+ ty::Generator(def_id, ..) => {
+ Some((Self::Generator(tcx.generator_kind(def_id).unwrap()), def_id))
+ }
+ ty::Foreign(def_id) => Some((Self::Foreign, def_id)),
+ _ => None,
+ }
+ }
+}
+
+impl<'tcx> InferCtxt<'_, 'tcx> {
+ /// Given a [`hir::Block`], get the span of its last expression or
+ /// statement, peeling off any inner blocks.
+ pub fn find_block_span(&self, block: &'tcx hir::Block<'tcx>) -> Span {
+ let block = block.innermost_block();
+ if let Some(expr) = &block.expr {
+ expr.span
+ } else if let Some(stmt) = block.stmts.last() {
+ // possibly incorrect trailing `;` in the else arm
+ stmt.span
+ } else {
+ // empty block; point at its entirety
+ block.span
+ }
+ }
+
+ /// Given a [`hir::HirId`] for a block, get the span of its last expression
+ /// or statement, peeling off any inner blocks.
+ pub fn find_block_span_from_hir_id(&self, hir_id: hir::HirId) -> Span {
+ match self.tcx.hir().get(hir_id) {
+ hir::Node::Block(blk) => self.find_block_span(blk),
+ // The parser was in a weird state if either of these happen, but
+ // it's better not to panic.
+ hir::Node::Expr(e) => e.span,
+ _ => rustc_span::DUMMY_SP,
+ }
+ }
+
+ /// Be helpful when the user wrote `{... expr; }` and taking the `;` off
+ /// is enough to fix the error.
+ pub fn could_remove_semicolon(
+ &self,
+ blk: &'tcx hir::Block<'tcx>,
+ expected_ty: Ty<'tcx>,
+ ) -> Option<(Span, StatementAsExpression)> {
+ let blk = blk.innermost_block();
+ // Do not suggest if we have a tail expr.
+ if blk.expr.is_some() {
+ return None;
+ }
+ let last_stmt = blk.stmts.last()?;
+ let hir::StmtKind::Semi(ref last_expr) = last_stmt.kind else {
+ return None;
+ };
+ let last_expr_ty = self.in_progress_typeck_results?.borrow().expr_ty_opt(*last_expr)?;
+ let needs_box = match (last_expr_ty.kind(), expected_ty.kind()) {
+ _ if last_expr_ty.references_error() => return None,
+ _ if self.same_type_modulo_infer(last_expr_ty, expected_ty) => {
+ StatementAsExpression::CorrectType
+ }
+ (ty::Opaque(last_def_id, _), ty::Opaque(exp_def_id, _))
+ if last_def_id == exp_def_id =>
+ {
+ StatementAsExpression::CorrectType
+ }
+ (ty::Opaque(last_def_id, last_bounds), ty::Opaque(exp_def_id, exp_bounds)) => {
+ debug!(
+ "both opaque, likely future {:?} {:?} {:?} {:?}",
+ last_def_id, last_bounds, exp_def_id, exp_bounds
+ );
+
+ let last_local_id = last_def_id.as_local()?;
+ let exp_local_id = exp_def_id.as_local()?;
+
+ match (
+ &self.tcx.hir().expect_item(last_local_id).kind,
+ &self.tcx.hir().expect_item(exp_local_id).kind,
+ ) {
+ (
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds: last_bounds, .. }),
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds: exp_bounds, .. }),
+ ) if iter::zip(*last_bounds, *exp_bounds).all(|(left, right)| {
+ match (left, right) {
+ (
+ hir::GenericBound::Trait(tl, ml),
+ hir::GenericBound::Trait(tr, mr),
+ ) if tl.trait_ref.trait_def_id() == tr.trait_ref.trait_def_id()
+ && ml == mr =>
+ {
+ true
+ }
+ (
+ hir::GenericBound::LangItemTrait(langl, _, _, argsl),
+ hir::GenericBound::LangItemTrait(langr, _, _, argsr),
+ ) if langl == langr => {
+ // FIXME: consider the bounds!
+ debug!("{:?} {:?}", argsl, argsr);
+ true
+ }
+ _ => false,
+ }
+ }) =>
+ {
+ StatementAsExpression::NeedsBoxing
+ }
+ _ => StatementAsExpression::CorrectType,
+ }
+ }
+ _ => return None,
+ };
+ let span = if last_stmt.span.from_expansion() {
+ let mac_call = rustc_span::source_map::original_sp(last_stmt.span, blk.span);
+ self.tcx.sess.source_map().mac_call_stmt_semi_span(mac_call)?
+ } else {
+ last_stmt.span.with_lo(last_stmt.span.hi() - BytePos(1))
+ };
+ Some((span, needs_box))
+ }
+
+ /// Suggest returning a local binding with a compatible type if the block
+ /// has no return expression.
+ pub fn consider_returning_binding(
+ &self,
+ blk: &'tcx hir::Block<'tcx>,
+ expected_ty: Ty<'tcx>,
+ err: &mut Diagnostic,
+ ) -> bool {
+ let blk = blk.innermost_block();
+ // Do not suggest if we have a tail expr.
+ if blk.expr.is_some() {
+ return false;
+ }
+ let mut shadowed = FxHashSet::default();
+ let mut candidate_idents = vec![];
+ let mut find_compatible_candidates = |pat: &hir::Pat<'_>| {
+ if let hir::PatKind::Binding(_, hir_id, ident, _) = &pat.kind
+ && let Some(pat_ty) = self
+ .in_progress_typeck_results
+ .and_then(|typeck_results| typeck_results.borrow().node_type_opt(*hir_id))
+ {
+ let pat_ty = self.resolve_vars_if_possible(pat_ty);
+ if self.same_type_modulo_infer(pat_ty, expected_ty)
+ && !(pat_ty, expected_ty).references_error()
+ && shadowed.insert(ident.name)
+ {
+ candidate_idents.push((*ident, pat_ty));
+ }
+ }
+ true
+ };
+
+ let hir = self.tcx.hir();
+ for stmt in blk.stmts.iter().rev() {
+ let hir::StmtKind::Local(local) = &stmt.kind else { continue; };
+ local.pat.walk(&mut find_compatible_candidates);
+ }
+ match hir.find(hir.get_parent_node(blk.hir_id)) {
+ Some(hir::Node::Expr(hir::Expr { hir_id, .. })) => {
+ match hir.find(hir.get_parent_node(*hir_id)) {
+ Some(hir::Node::Arm(hir::Arm { pat, .. })) => {
+ pat.walk(&mut find_compatible_candidates);
+ }
+ Some(
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, _, body), .. })
+ | hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(_, body),
+ ..
+ })
+ | hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(body)),
+ ..
+ })
+ | hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { body, .. }),
+ ..
+ }),
+ ) => {
+ for param in hir.body(*body).params {
+ param.pat.walk(&mut find_compatible_candidates);
+ }
+ }
+ Some(hir::Node::Expr(hir::Expr {
+ kind:
+ hir::ExprKind::If(
+ hir::Expr { kind: hir::ExprKind::Let(let_), .. },
+ then_block,
+ _,
+ ),
+ ..
+ })) if then_block.hir_id == *hir_id => {
+ let_.pat.walk(&mut find_compatible_candidates);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+
+ match &candidate_idents[..] {
+ [(ident, _ty)] => {
+ let sm = self.tcx.sess.source_map();
+ if let Some(stmt) = blk.stmts.last() {
+ let stmt_span = sm.stmt_span(stmt.span, blk.span);
+ let sugg = if sm.is_multiline(blk.span)
+ && let Some(spacing) = sm.indentation_before(stmt_span)
+ {
+ format!("\n{spacing}{ident}")
+ } else {
+ format!(" {ident}")
+ };
+ err.span_suggestion_verbose(
+ stmt_span.shrink_to_hi(),
+ format!("consider returning the local binding `{ident}`"),
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ let sugg = if sm.is_multiline(blk.span)
+ && let Some(spacing) = sm.indentation_before(blk.span.shrink_to_lo())
+ {
+ format!("\n{spacing} {ident}\n{spacing}")
+ } else {
+ format!(" {ident} ")
+ };
+ let left_span = sm.span_through_char(blk.span, '{').shrink_to_hi();
+ err.span_suggestion_verbose(
+ sm.span_extend_while(left_span, |c| c.is_whitespace()).unwrap_or(left_span),
+ format!("consider returning the local binding `{ident}`"),
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ true
+ }
+ values if (1..3).contains(&values.len()) => {
+ let spans = values.iter().map(|(ident, _)| ident.span).collect::<Vec<_>>();
+ err.span_note(spans, "consider returning one of these bindings");
+ true
+ }
+ _ => false,
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
new file mode 100644
index 000000000..561d1354e
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/need_type_info.rs
@@ -0,0 +1,1134 @@
+use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use crate::infer::InferCtxt;
+use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::def::{CtorOf, DefKind, Namespace};
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Body, Closure, Expr, ExprKind, FnRetTy, HirId, Local, LocalSource};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow, AutoBorrowMutability};
+use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Print, Printer};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef};
+use rustc_middle::ty::{self, DefIdTree, InferConst};
+use rustc_middle::ty::{IsSuggestable, Ty, TyCtxt, TypeckResults};
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::{BytePos, Span};
+use std::borrow::Cow;
+use std::iter;
+
+pub enum TypeAnnotationNeeded {
+ /// ```compile_fail,E0282
+ /// let x = "hello".chars().rev().collect();
+ /// ```
+ E0282,
+ /// An implementation cannot be chosen unambiguously because of lack of information.
+ /// ```compile_fail,E0283
+ /// let _ = Default::default();
+ /// ```
+ E0283,
+ /// ```compile_fail,E0284
+ /// let mut d: u64 = 2;
+ /// d = d % 1u32.into();
+ /// ```
+ E0284,
+}
+
+impl Into<rustc_errors::DiagnosticId> for TypeAnnotationNeeded {
+ fn into(self) -> rustc_errors::DiagnosticId {
+ match self {
+ Self::E0282 => rustc_errors::error_code!(E0282),
+ Self::E0283 => rustc_errors::error_code!(E0283),
+ Self::E0284 => rustc_errors::error_code!(E0284),
+ }
+ }
+}
+
+/// Information about a constant or a type containing inference variables.
+pub struct InferenceDiagnosticsData {
+ pub name: String,
+ pub span: Option<Span>,
+ pub kind: UnderspecifiedArgKind,
+ pub parent: Option<InferenceDiagnosticsParentData>,
+}
+
+/// Data on the parent definition where a generic argument was declared.
+pub struct InferenceDiagnosticsParentData {
+ prefix: &'static str,
+ name: String,
+}
+
+pub enum UnderspecifiedArgKind {
+ Type { prefix: Cow<'static, str> },
+ Const { is_parameter: bool },
+}
+
+impl InferenceDiagnosticsData {
+ /// Generate a label for a generic argument which can't be inferred. When not
+ /// much is known about the argument, `use_diag` may be used to describe the
+ /// labeled value.
+ fn cannot_infer_msg(&self) -> String {
+ if self.name == "_" && matches!(self.kind, UnderspecifiedArgKind::Type { .. }) {
+ return "cannot infer type".to_string();
+ }
+
+ let suffix = match &self.parent {
+ Some(parent) => parent.suffix_string(),
+ None => String::new(),
+ };
+
+ // For example: "cannot infer type for type parameter `T`"
+ format!("cannot infer {} `{}`{}", self.kind.prefix_string(), self.name, suffix)
+ }
+
+ fn where_x_is_specified(&self, in_type: Ty<'_>) -> String {
+ if in_type.is_ty_infer() {
+ String::new()
+ } else if self.name == "_" {
+ // FIXME: Consider specializing this message if there is a single `_`
+ // in the type.
+ ", where the placeholders `_` are specified".to_string()
+ } else {
+ format!(", where the {} `{}` is specified", self.kind.prefix_string(), self.name)
+ }
+ }
+}
+
+impl InferenceDiagnosticsParentData {
+ fn for_parent_def_id(
+ tcx: TyCtxt<'_>,
+ parent_def_id: DefId,
+ ) -> Option<InferenceDiagnosticsParentData> {
+ let parent_name =
+ tcx.def_key(parent_def_id).disambiguated_data.data.get_opt_name()?.to_string();
+
+ Some(InferenceDiagnosticsParentData {
+ prefix: tcx.def_kind(parent_def_id).descr(parent_def_id),
+ name: parent_name,
+ })
+ }
+
+ fn for_def_id(tcx: TyCtxt<'_>, def_id: DefId) -> Option<InferenceDiagnosticsParentData> {
+ Self::for_parent_def_id(tcx, tcx.parent(def_id))
+ }
+
+ fn suffix_string(&self) -> String {
+ format!(" declared on the {} `{}`", self.prefix, self.name)
+ }
+}
+
+impl UnderspecifiedArgKind {
+ fn prefix_string(&self) -> Cow<'static, str> {
+ match self {
+ Self::Type { prefix } => format!("type for {}", prefix).into(),
+ Self::Const { is_parameter: true } => "the value of const parameter".into(),
+ Self::Const { is_parameter: false } => "the value of the constant".into(),
+ }
+ }
+}
+
+fn fmt_printer<'a, 'tcx>(infcx: &'a InferCtxt<'_, 'tcx>, ns: Namespace) -> FmtPrinter<'a, 'tcx> {
+ let mut printer = FmtPrinter::new(infcx.tcx, ns);
+ let ty_getter = move |ty_vid| {
+ if infcx.probe_ty_var(ty_vid).is_ok() {
+ warn!("resolved ty var in error message");
+ }
+ if let TypeVariableOriginKind::TypeParameterDefinition(name, _) =
+ infcx.inner.borrow_mut().type_variables().var_origin(ty_vid).kind
+ {
+ Some(name)
+ } else {
+ None
+ }
+ };
+ printer.ty_infer_name_resolver = Some(Box::new(ty_getter));
+ let const_getter = move |ct_vid| {
+ if infcx.probe_const_var(ct_vid).is_ok() {
+ warn!("resolved const var in error message");
+ }
+ if let ConstVariableOriginKind::ConstParameterDefinition(name, _) =
+ infcx.inner.borrow_mut().const_unification_table().probe_value(ct_vid).origin.kind
+ {
+ return Some(name);
+ } else {
+ None
+ }
+ };
+ printer.const_infer_name_resolver = Some(Box::new(const_getter));
+ printer
+}
+
+fn ty_to_string<'tcx>(infcx: &InferCtxt<'_, 'tcx>, ty: Ty<'tcx>) -> String {
+ let printer = fmt_printer(infcx, Namespace::TypeNS);
+ let ty = infcx.resolve_vars_if_possible(ty);
+ match ty.kind() {
+ // We don't want the regular output for `fn`s because it includes its path in
+ // invalid pseudo-syntax, we want the `fn`-pointer output instead.
+ ty::FnDef(..) => ty.fn_sig(infcx.tcx).print(printer).unwrap().into_buffer(),
+ // FIXME: The same thing for closures, but this only works when the closure
+ // does not capture anything.
+ //
+ // We do have to hide the `extern "rust-call"` ABI in that case though,
+ // which is too much of a bother for now.
+ _ => ty.print(printer).unwrap().into_buffer(),
+ }
+}
+
+/// We don't want to directly use `ty_to_string` for closures as their type isn't really
+/// something users are familar with. Directly printing the `fn_sig` of closures also
+/// doesn't work as they actually use the "rust-call" API.
+fn closure_as_fn_str<'tcx>(infcx: &InferCtxt<'_, 'tcx>, ty: Ty<'tcx>) -> String {
+ let ty::Closure(_, substs) = ty.kind() else { unreachable!() };
+ let fn_sig = substs.as_closure().sig();
+ let args = fn_sig
+ .inputs()
+ .skip_binder()
+ .iter()
+ .next()
+ .map(|args| {
+ args.tuple_fields()
+ .iter()
+ .map(|arg| ty_to_string(infcx, arg))
+ .collect::<Vec<_>>()
+ .join(", ")
+ })
+ .unwrap_or_default();
+ let ret = if fn_sig.output().skip_binder().is_unit() {
+ String::new()
+ } else {
+ format!(" -> {}", ty_to_string(infcx, fn_sig.output().skip_binder()))
+ };
+ format!("fn({}){}", args, ret)
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ /// Extracts data used by diagnostic for either types or constants
+ /// which were stuck during inference.
+ pub fn extract_inference_diagnostics_data(
+ &self,
+ arg: GenericArg<'tcx>,
+ highlight: Option<ty::print::RegionHighlightMode<'tcx>>,
+ ) -> InferenceDiagnosticsData {
+ match arg.unpack() {
+ GenericArgKind::Type(ty) => {
+ if let ty::Infer(ty::TyVar(ty_vid)) = *ty.kind() {
+ let mut inner = self.inner.borrow_mut();
+ let ty_vars = &inner.type_variables();
+ let var_origin = ty_vars.var_origin(ty_vid);
+ if let TypeVariableOriginKind::TypeParameterDefinition(name, def_id) =
+ var_origin.kind
+ {
+ if name != kw::SelfUpper {
+ return InferenceDiagnosticsData {
+ name: name.to_string(),
+ span: Some(var_origin.span),
+ kind: UnderspecifiedArgKind::Type {
+ prefix: "type parameter".into(),
+ },
+ parent: def_id.and_then(|def_id| {
+ InferenceDiagnosticsParentData::for_def_id(self.tcx, def_id)
+ }),
+ };
+ }
+ }
+ }
+
+ let mut printer = ty::print::FmtPrinter::new(self.tcx, Namespace::TypeNS);
+ if let Some(highlight) = highlight {
+ printer.region_highlight_mode = highlight;
+ }
+ InferenceDiagnosticsData {
+ name: ty.print(printer).unwrap().into_buffer(),
+ span: None,
+ kind: UnderspecifiedArgKind::Type { prefix: ty.prefix_string(self.tcx) },
+ parent: None,
+ }
+ }
+ GenericArgKind::Const(ct) => {
+ if let ty::ConstKind::Infer(InferConst::Var(vid)) = ct.kind() {
+ let origin =
+ self.inner.borrow_mut().const_unification_table().probe_value(vid).origin;
+ if let ConstVariableOriginKind::ConstParameterDefinition(name, def_id) =
+ origin.kind
+ {
+ return InferenceDiagnosticsData {
+ name: name.to_string(),
+ span: Some(origin.span),
+ kind: UnderspecifiedArgKind::Const { is_parameter: true },
+ parent: InferenceDiagnosticsParentData::for_def_id(self.tcx, def_id),
+ };
+ }
+
+ debug_assert!(!origin.span.is_dummy());
+ let mut printer = ty::print::FmtPrinter::new(self.tcx, Namespace::ValueNS);
+ if let Some(highlight) = highlight {
+ printer.region_highlight_mode = highlight;
+ }
+ InferenceDiagnosticsData {
+ name: ct.print(printer).unwrap().into_buffer(),
+ span: Some(origin.span),
+ kind: UnderspecifiedArgKind::Const { is_parameter: false },
+ parent: None,
+ }
+ } else {
+ // If we end up here the `FindInferSourceVisitor`
+ // won't work, as its expected argument isn't an inference variable.
+ //
+ // FIXME: Ideally we should look into the generic constant
+ // to figure out which inference var is actually unresolved so that
+ // this path is unreachable.
+ let mut printer = ty::print::FmtPrinter::new(self.tcx, Namespace::ValueNS);
+ if let Some(highlight) = highlight {
+ printer.region_highlight_mode = highlight;
+ }
+ InferenceDiagnosticsData {
+ name: ct.print(printer).unwrap().into_buffer(),
+ span: None,
+ kind: UnderspecifiedArgKind::Const { is_parameter: false },
+ parent: None,
+ }
+ }
+ }
+ GenericArgKind::Lifetime(_) => bug!("unexpected lifetime"),
+ }
+ }
+
+ /// Used as a fallback in [InferCtxt::emit_inference_failure_err]
+ /// in case we weren't able to get a better error.
+ fn bad_inference_failure_err(
+ &self,
+ span: Span,
+ arg_data: InferenceDiagnosticsData,
+ error_code: TypeAnnotationNeeded,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let error_code = error_code.into();
+ let mut err =
+ self.tcx.sess.struct_span_err_with_code(span, "type annotations needed", error_code);
+ err.span_label(span, arg_data.cannot_infer_msg());
+ err
+ }
+
+ pub fn emit_inference_failure_err(
+ &self,
+ body_id: Option<hir::BodyId>,
+ failure_span: Span,
+ arg: GenericArg<'tcx>,
+ error_code: TypeAnnotationNeeded,
+ should_label_span: bool,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let arg = self.resolve_vars_if_possible(arg);
+ let arg_data = self.extract_inference_diagnostics_data(arg, None);
+
+ let Some(typeck_results) = self.in_progress_typeck_results else {
+ // If we don't have any typeck results we're outside
+ // of a body, so we won't be able to get better info
+ // here.
+ return self.bad_inference_failure_err(failure_span, arg_data, error_code);
+ };
+ let typeck_results = typeck_results.borrow();
+ let typeck_results = &typeck_results;
+
+ let mut local_visitor = FindInferSourceVisitor::new(&self, typeck_results, arg);
+ if let Some(body_id) = body_id {
+ let expr = self.tcx.hir().expect_expr(body_id.hir_id);
+ local_visitor.visit_expr(expr);
+ }
+
+ let Some(InferSource { span, kind }) = local_visitor.infer_source else {
+ return self.bad_inference_failure_err(failure_span, arg_data, error_code)
+ };
+
+ let error_code = error_code.into();
+ let mut err = self.tcx.sess.struct_span_err_with_code(
+ span,
+ &format!("type annotations needed{}", kind.ty_msg(self)),
+ error_code,
+ );
+
+ if should_label_span && !failure_span.overlaps(span) {
+ err.span_label(failure_span, "type must be known at this point");
+ }
+
+ match kind {
+ InferSourceKind::LetBinding { insert_span, pattern_name, ty } => {
+ let suggestion_msg = if let Some(name) = pattern_name {
+ format!(
+ "consider giving `{}` an explicit type{}",
+ name,
+ arg_data.where_x_is_specified(ty)
+ )
+ } else {
+ format!(
+ "consider giving this pattern a type{}",
+ arg_data.where_x_is_specified(ty)
+ )
+ };
+ err.span_suggestion_verbose(
+ insert_span,
+ &suggestion_msg,
+ format!(": {}", ty_to_string(self, ty)),
+ Applicability::HasPlaceholders,
+ );
+ }
+ InferSourceKind::ClosureArg { insert_span, ty } => {
+ err.span_suggestion_verbose(
+ insert_span,
+ &format!(
+ "consider giving this closure parameter an explicit type{}",
+ arg_data.where_x_is_specified(ty)
+ ),
+ format!(": {}", ty_to_string(self, ty)),
+ Applicability::HasPlaceholders,
+ );
+ }
+ InferSourceKind::GenericArg {
+ insert_span,
+ argument_index,
+ generics_def_id,
+ def_id: _,
+ generic_args,
+ } => {
+ let generics = self.tcx.generics_of(generics_def_id);
+ let is_type = matches!(arg.unpack(), GenericArgKind::Type(_));
+
+ let cannot_infer_msg = format!(
+ "cannot infer {} of the {} parameter `{}`{}",
+ if is_type { "type" } else { "the value" },
+ if is_type { "type" } else { "const" },
+ generics.params[argument_index].name,
+ // We use the `generics_def_id` here, as even when suggesting `None::<T>`,
+ // the type parameter `T` was still declared on the enum, not on the
+ // variant.
+ InferenceDiagnosticsParentData::for_parent_def_id(self.tcx, generics_def_id)
+ .map_or(String::new(), |parent| parent.suffix_string()),
+ );
+
+ err.span_label(span, cannot_infer_msg);
+
+ let args = fmt_printer(self, Namespace::TypeNS)
+ .comma_sep(generic_args.iter().copied().map(|arg| {
+ if arg.is_suggestable(self.tcx, true) {
+ return arg;
+ }
+
+ match arg.unpack() {
+ GenericArgKind::Lifetime(_) => bug!("unexpected lifetime"),
+ GenericArgKind::Type(_) => self
+ .next_ty_var(TypeVariableOrigin {
+ span: rustc_span::DUMMY_SP,
+ kind: TypeVariableOriginKind::MiscVariable,
+ })
+ .into(),
+ GenericArgKind::Const(arg) => self
+ .next_const_var(
+ arg.ty(),
+ ConstVariableOrigin {
+ span: rustc_span::DUMMY_SP,
+ kind: ConstVariableOriginKind::MiscVariable,
+ },
+ )
+ .into(),
+ }
+ }))
+ .unwrap()
+ .into_buffer();
+
+ err.span_suggestion_verbose(
+ insert_span,
+ &format!(
+ "consider specifying the generic argument{}",
+ pluralize!(generic_args.len()),
+ ),
+ format!("::<{}>", args),
+ Applicability::HasPlaceholders,
+ );
+ }
+ InferSourceKind::FullyQualifiedMethodCall { receiver, successor, substs, def_id } => {
+ let printer = fmt_printer(self, Namespace::ValueNS);
+ let def_path = printer.print_def_path(def_id, substs).unwrap().into_buffer();
+
+ // We only care about whether we have to add `&` or `&mut ` for now.
+ // This is the case if the last adjustment is a borrow and the
+ // first adjustment was not a builtin deref.
+ let adjustment = match typeck_results.expr_adjustments(receiver) {
+ [
+ Adjustment { kind: Adjust::Deref(None), target: _ },
+ ..,
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), target: _ },
+ ] => "",
+ [
+ ..,
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mut_)), target: _ },
+ ] => match mut_ {
+ AutoBorrowMutability::Mut { .. } => "&mut ",
+ AutoBorrowMutability::Not => "&",
+ },
+ _ => "",
+ };
+
+ let suggestion = vec![
+ (receiver.span.shrink_to_lo(), format!("{def_path}({adjustment}")),
+ (receiver.span.shrink_to_hi().with_hi(successor.1), successor.0.to_string()),
+ ];
+ err.multipart_suggestion_verbose(
+ "try using a fully qualified path to specify the expected types",
+ suggestion,
+ Applicability::HasPlaceholders,
+ );
+ }
+ InferSourceKind::ClosureReturn { ty, data, should_wrap_expr } => {
+ let ret = ty_to_string(self, ty);
+ let (arrow, post) = match data {
+ FnRetTy::DefaultReturn(_) => ("-> ", " "),
+ _ => ("", ""),
+ };
+ let suggestion = match should_wrap_expr {
+ Some(end_span) => vec![
+ (data.span(), format!("{}{}{}{{ ", arrow, ret, post)),
+ (end_span, " }".to_string()),
+ ],
+ None => vec![(data.span(), format!("{}{}{}", arrow, ret, post))],
+ };
+ err.multipart_suggestion_verbose(
+ "try giving this closure an explicit return type",
+ suggestion,
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ err
+ }
+
+ pub fn need_type_info_err_in_generator(
+ &self,
+ kind: hir::GeneratorKind,
+ span: Span,
+ ty: Ty<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let ty = self.resolve_vars_if_possible(ty);
+ let data = self.extract_inference_diagnostics_data(ty.into(), None);
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0698,
+ "type inside {} must be known in this context",
+ kind,
+ );
+ err.span_label(span, data.cannot_infer_msg());
+ err
+ }
+}
+
+#[derive(Debug)]
+struct InferSource<'tcx> {
+ span: Span,
+ kind: InferSourceKind<'tcx>,
+}
+
+#[derive(Debug)]
+enum InferSourceKind<'tcx> {
+ LetBinding {
+ insert_span: Span,
+ pattern_name: Option<Ident>,
+ ty: Ty<'tcx>,
+ },
+ ClosureArg {
+ insert_span: Span,
+ ty: Ty<'tcx>,
+ },
+ GenericArg {
+ insert_span: Span,
+ argument_index: usize,
+ generics_def_id: DefId,
+ def_id: DefId,
+ generic_args: &'tcx [GenericArg<'tcx>],
+ },
+ FullyQualifiedMethodCall {
+ receiver: &'tcx Expr<'tcx>,
+ /// If the method has other arguments, this is ", " and the start of the first argument,
+ /// while for methods without arguments this is ")" and the end of the method call.
+ successor: (&'static str, BytePos),
+ substs: SubstsRef<'tcx>,
+ def_id: DefId,
+ },
+ ClosureReturn {
+ ty: Ty<'tcx>,
+ data: &'tcx FnRetTy<'tcx>,
+ should_wrap_expr: Option<Span>,
+ },
+}
+
+impl<'tcx> InferSource<'tcx> {
+ fn from_expansion(&self) -> bool {
+ let source_from_expansion = match self.kind {
+ InferSourceKind::LetBinding { insert_span, .. }
+ | InferSourceKind::ClosureArg { insert_span, .. }
+ | InferSourceKind::GenericArg { insert_span, .. } => insert_span.from_expansion(),
+ InferSourceKind::FullyQualifiedMethodCall { receiver, .. } => {
+ receiver.span.from_expansion()
+ }
+ InferSourceKind::ClosureReturn { data, should_wrap_expr, .. } => {
+ data.span().from_expansion() || should_wrap_expr.map_or(false, Span::from_expansion)
+ }
+ };
+ source_from_expansion || self.span.from_expansion()
+ }
+}
+
+impl<'tcx> InferSourceKind<'tcx> {
+ fn ty_msg(&self, infcx: &InferCtxt<'_, 'tcx>) -> String {
+ match *self {
+ InferSourceKind::LetBinding { ty, .. }
+ | InferSourceKind::ClosureArg { ty, .. }
+ | InferSourceKind::ClosureReturn { ty, .. } => {
+ if ty.is_closure() {
+ format!(" for the closure `{}`", closure_as_fn_str(infcx, ty))
+ } else if !ty.is_ty_infer() {
+ format!(" for `{}`", ty_to_string(infcx, ty))
+ } else {
+ String::new()
+ }
+ }
+ // FIXME: We should be able to add some additional info here.
+ InferSourceKind::GenericArg { .. }
+ | InferSourceKind::FullyQualifiedMethodCall { .. } => String::new(),
+ }
+ }
+}
+
+#[derive(Debug)]
+struct InsertableGenericArgs<'tcx> {
+ insert_span: Span,
+ substs: SubstsRef<'tcx>,
+ generics_def_id: DefId,
+ def_id: DefId,
+}
+
+/// A visitor which searches for the "best" spot to use in the inference error.
+///
+/// For this it walks over the hir body and tries to check all places where
+/// inference variables could be bound.
+///
+/// While doing so, the currently best spot is stored in `infer_source`.
+/// For details on how we rank spots, see [Self::source_cost]
+struct FindInferSourceVisitor<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ typeck_results: &'a TypeckResults<'tcx>,
+
+ target: GenericArg<'tcx>,
+
+ attempt: usize,
+ infer_source_cost: usize,
+ infer_source: Option<InferSource<'tcx>>,
+}
+
+impl<'a, 'tcx> FindInferSourceVisitor<'a, 'tcx> {
+ fn new(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ typeck_results: &'a TypeckResults<'tcx>,
+ target: GenericArg<'tcx>,
+ ) -> Self {
+ FindInferSourceVisitor {
+ infcx,
+ typeck_results,
+
+ target,
+
+ attempt: 0,
+ infer_source_cost: usize::MAX,
+ infer_source: None,
+ }
+ }
+
+ /// Computes cost for the given source.
+ ///
+ /// Sources with a small cost are prefer and should result
+ /// in a clearer and idiomatic suggestion.
+ fn source_cost(&self, source: &InferSource<'tcx>) -> usize {
+ #[derive(Clone, Copy)]
+ struct CostCtxt<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ }
+ impl<'tcx> CostCtxt<'tcx> {
+ fn arg_cost(self, arg: GenericArg<'tcx>) -> usize {
+ match arg.unpack() {
+ GenericArgKind::Lifetime(_) => 0, // erased
+ GenericArgKind::Type(ty) => self.ty_cost(ty),
+ GenericArgKind::Const(_) => 3, // some non-zero value
+ }
+ }
+ fn ty_cost(self, ty: Ty<'tcx>) -> usize {
+ match *ty.kind() {
+ ty::Closure(..) => 1000,
+ ty::FnDef(..) => 150,
+ ty::FnPtr(..) => 30,
+ ty::Adt(def, substs) => {
+ 5 + self
+ .tcx
+ .generics_of(def.did())
+ .own_substs_no_defaults(self.tcx, substs)
+ .iter()
+ .map(|&arg| self.arg_cost(arg))
+ .sum::<usize>()
+ }
+ ty::Tuple(args) => 5 + args.iter().map(|arg| self.ty_cost(arg)).sum::<usize>(),
+ ty::Ref(_, ty, _) => 2 + self.ty_cost(ty),
+ ty::Infer(..) => 0,
+ _ => 1,
+ }
+ }
+ }
+
+ // The sources are listed in order of preference here.
+ let tcx = self.infcx.tcx;
+ let ctx = CostCtxt { tcx };
+ let base_cost = match source.kind {
+ InferSourceKind::LetBinding { ty, .. } => ctx.ty_cost(ty),
+ InferSourceKind::ClosureArg { ty, .. } => ctx.ty_cost(ty),
+ InferSourceKind::GenericArg { def_id, generic_args, .. } => {
+ let variant_cost = match tcx.def_kind(def_id) {
+ // `None::<u32>` and friends are ugly.
+ DefKind::Variant | DefKind::Ctor(CtorOf::Variant, _) => 15,
+ _ => 10,
+ };
+ variant_cost + generic_args.iter().map(|&arg| ctx.arg_cost(arg)).sum::<usize>()
+ }
+ InferSourceKind::FullyQualifiedMethodCall { substs, .. } => {
+ 20 + substs.iter().map(|arg| ctx.arg_cost(arg)).sum::<usize>()
+ }
+ InferSourceKind::ClosureReturn { ty, should_wrap_expr, .. } => {
+ 30 + ctx.ty_cost(ty) + if should_wrap_expr.is_some() { 10 } else { 0 }
+ }
+ };
+
+ let suggestion_may_apply = if source.from_expansion() { 10000 } else { 0 };
+
+ base_cost + suggestion_may_apply
+ }
+
+ /// Uses `fn source_cost` to determine whether this inference source is preferable to
+ /// previous sources. We generally prefer earlier sources.
+ #[instrument(level = "debug", skip(self))]
+ fn update_infer_source(&mut self, new_source: InferSource<'tcx>) {
+ let cost = self.source_cost(&new_source) + self.attempt;
+ debug!(?cost);
+ self.attempt += 1;
+ if cost < self.infer_source_cost {
+ self.infer_source_cost = cost;
+ self.infer_source = Some(new_source);
+ }
+ }
+
+ fn node_substs_opt(&self, hir_id: HirId) -> Option<SubstsRef<'tcx>> {
+ let substs = self.typeck_results.node_substs_opt(hir_id);
+ self.infcx.resolve_vars_if_possible(substs)
+ }
+
+ fn opt_node_type(&self, hir_id: HirId) -> Option<Ty<'tcx>> {
+ let ty = self.typeck_results.node_type_opt(hir_id);
+ self.infcx.resolve_vars_if_possible(ty)
+ }
+
+ // Check whether this generic argument is the inference variable we
+ // are looking for.
+ fn generic_arg_is_target(&self, arg: GenericArg<'tcx>) -> bool {
+ if arg == self.target {
+ return true;
+ }
+
+ match (arg.unpack(), self.target.unpack()) {
+ (GenericArgKind::Type(inner_ty), GenericArgKind::Type(target_ty)) => {
+ use ty::{Infer, TyVar};
+ match (inner_ty.kind(), target_ty.kind()) {
+ (&Infer(TyVar(a_vid)), &Infer(TyVar(b_vid))) => {
+ self.infcx.inner.borrow_mut().type_variables().sub_unified(a_vid, b_vid)
+ }
+ _ => false,
+ }
+ }
+ (GenericArgKind::Const(inner_ct), GenericArgKind::Const(target_ct)) => {
+ use ty::InferConst::*;
+ match (inner_ct.kind(), target_ct.kind()) {
+ (ty::ConstKind::Infer(Var(a_vid)), ty::ConstKind::Infer(Var(b_vid))) => self
+ .infcx
+ .inner
+ .borrow_mut()
+ .const_unification_table()
+ .unioned(a_vid, b_vid),
+ _ => false,
+ }
+ }
+ _ => false,
+ }
+ }
+
+ /// Does this generic argument contain our target inference variable
+ /// in a way which can be written by the user.
+ fn generic_arg_contains_target(&self, arg: GenericArg<'tcx>) -> bool {
+ let mut walker = arg.walk();
+ while let Some(inner) = walker.next() {
+ if self.generic_arg_is_target(inner) {
+ return true;
+ }
+ match inner.unpack() {
+ GenericArgKind::Lifetime(_) => {}
+ GenericArgKind::Type(ty) => {
+ if matches!(ty.kind(), ty::Opaque(..) | ty::Closure(..) | ty::Generator(..)) {
+ // Opaque types can't be named by the user right now.
+ //
+ // Both the generic arguments of closures and generators can
+ // also not be named. We may want to only look into the closure
+ // signature in case it has no captures, as that can be represented
+ // using `fn(T) -> R`.
+
+ // FIXME(type_alias_impl_trait): These opaque types
+ // can actually be named, so it would make sense to
+ // adjust this case and add a test for it.
+ walker.skip_current_subtree();
+ }
+ }
+ GenericArgKind::Const(ct) => {
+ if matches!(ct.kind(), ty::ConstKind::Unevaluated(..)) {
+ // You can't write the generic arguments for
+ // unevaluated constants.
+ walker.skip_current_subtree();
+ }
+ }
+ }
+ }
+ false
+ }
+
+ fn expr_inferred_subst_iter(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Box<dyn Iterator<Item = InsertableGenericArgs<'tcx>> + 'a> {
+ let tcx = self.infcx.tcx;
+ match expr.kind {
+ hir::ExprKind::Path(ref path) => {
+ if let Some(substs) = self.node_substs_opt(expr.hir_id) {
+ return self.path_inferred_subst_iter(expr.hir_id, substs, path);
+ }
+ }
+ // FIXME(#98711): Ideally we would also deal with type relative
+ // paths here, even if that is quite rare.
+ //
+ // See the `need_type_info/expr-struct-type-relative-gat.rs` test
+ // for an example where that would be needed.
+ //
+ // However, the `type_dependent_def_id` for `Self::Output` in an
+ // impl is currently the `DefId` of `Output` in the trait definition
+ // which makes this somewhat difficult and prevents us from just
+ // using `self.path_inferred_subst_iter` here.
+ hir::ExprKind::Struct(&hir::QPath::Resolved(_self_ty, path), _, _) => {
+ if let Some(ty) = self.opt_node_type(expr.hir_id) {
+ if let ty::Adt(_, substs) = ty.kind() {
+ return Box::new(self.resolved_path_inferred_subst_iter(path, substs));
+ }
+ }
+ }
+ hir::ExprKind::MethodCall(segment, _, _) => {
+ if let Some(def_id) = self.typeck_results.type_dependent_def_id(expr.hir_id) {
+ let generics = tcx.generics_of(def_id);
+ let insertable: Option<_> = try {
+ if generics.has_impl_trait() {
+ None?
+ }
+ let substs = self.node_substs_opt(expr.hir_id)?;
+ let span = tcx.hir().span(segment.hir_id?);
+ let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
+ InsertableGenericArgs {
+ insert_span,
+ substs,
+ generics_def_id: def_id,
+ def_id,
+ }
+ };
+ return Box::new(insertable.into_iter());
+ }
+ }
+ _ => {}
+ }
+
+ Box::new(iter::empty())
+ }
+
+ fn resolved_path_inferred_subst_iter(
+ &self,
+ path: &'tcx hir::Path<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> impl Iterator<Item = InsertableGenericArgs<'tcx>> + 'a {
+ let tcx = self.infcx.tcx;
+ // The last segment of a path often has `Res::Err` and the
+ // correct `Res` is the one of the whole path.
+ //
+ // FIXME: We deal with that one separately for now,
+ // would be good to remove this special case.
+ let last_segment_using_path_data: Option<_> = try {
+ let generics_def_id = tcx.res_generics_def_id(path.res)?;
+ let generics = tcx.generics_of(generics_def_id);
+ if generics.has_impl_trait() {
+ None?
+ }
+ let insert_span =
+ path.segments.last().unwrap().ident.span.shrink_to_hi().with_hi(path.span.hi());
+ InsertableGenericArgs {
+ insert_span,
+ substs,
+ generics_def_id,
+ def_id: path.res.def_id(),
+ }
+ };
+
+ path.segments
+ .iter()
+ .filter_map(move |segment| {
+ let res = segment.res?;
+ let generics_def_id = tcx.res_generics_def_id(res)?;
+ let generics = tcx.generics_of(generics_def_id);
+ if generics.has_impl_trait() {
+ return None;
+ }
+ let span = tcx.hir().span(segment.hir_id?);
+ let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
+ Some(InsertableGenericArgs {
+ insert_span,
+ substs,
+ generics_def_id,
+ def_id: res.def_id(),
+ })
+ })
+ .chain(last_segment_using_path_data)
+ }
+
+ fn path_inferred_subst_iter(
+ &self,
+ hir_id: HirId,
+ substs: SubstsRef<'tcx>,
+ qpath: &'tcx hir::QPath<'tcx>,
+ ) -> Box<dyn Iterator<Item = InsertableGenericArgs<'tcx>> + 'a> {
+ let tcx = self.infcx.tcx;
+ match qpath {
+ hir::QPath::Resolved(_self_ty, path) => {
+ Box::new(self.resolved_path_inferred_subst_iter(path, substs))
+ }
+ hir::QPath::TypeRelative(ty, segment) => {
+ let Some(def_id) = self.typeck_results.type_dependent_def_id(hir_id) else {
+ return Box::new(iter::empty());
+ };
+
+ let generics = tcx.generics_of(def_id);
+ let segment: Option<_> = try {
+ if !segment.infer_args || generics.has_impl_trait() {
+ None?;
+ }
+ let span = tcx.hir().span(segment.hir_id?);
+ let insert_span = segment.ident.span.shrink_to_hi().with_hi(span.hi());
+ InsertableGenericArgs { insert_span, substs, generics_def_id: def_id, def_id }
+ };
+
+ let parent_def_id = generics.parent.unwrap();
+ if tcx.def_kind(parent_def_id) == DefKind::Impl {
+ let parent_ty = tcx.bound_type_of(parent_def_id).subst(tcx, substs);
+ match (parent_ty.kind(), &ty.kind) {
+ (
+ ty::Adt(def, substs),
+ hir::TyKind::Path(hir::QPath::Resolved(_self_ty, path)),
+ ) => {
+ if tcx.res_generics_def_id(path.res) != Some(def.did()) {
+ match path.res {
+ Res::Def(DefKind::TyAlias, _) => {
+ // FIXME: Ideally we should support this. For that
+ // we have to map back from the self type to the
+ // type alias though. That's difficult.
+ //
+ // See the `need_type_info/type-alias.rs` test for
+ // some examples.
+ }
+ // There cannot be inference variables in the self type,
+ // so there's nothing for us to do here.
+ Res::SelfTy { .. } => {}
+ _ => warn!(
+ "unexpected path: def={:?} substs={:?} path={:?}",
+ def, substs, path,
+ ),
+ }
+ } else {
+ return Box::new(
+ self.resolved_path_inferred_subst_iter(path, substs)
+ .chain(segment),
+ );
+ }
+ }
+ _ => (),
+ }
+ }
+
+ Box::new(segment.into_iter())
+ }
+ hir::QPath::LangItem(_, _, _) => Box::new(iter::empty()),
+ }
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.infcx.tcx.hir()
+ }
+
+ fn visit_local(&mut self, local: &'tcx Local<'tcx>) {
+ intravisit::walk_local(self, local);
+
+ if let Some(ty) = self.opt_node_type(local.hir_id) {
+ if self.generic_arg_contains_target(ty.into()) {
+ match local.source {
+ LocalSource::Normal if local.ty.is_none() => {
+ self.update_infer_source(InferSource {
+ span: local.pat.span,
+ kind: InferSourceKind::LetBinding {
+ insert_span: local.pat.span.shrink_to_hi(),
+ pattern_name: local.pat.simple_ident(),
+ ty,
+ },
+ })
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+
+ /// For closures, we first visit the parameters and then the content,
+ /// as we prefer those.
+ fn visit_body(&mut self, body: &'tcx Body<'tcx>) {
+ for param in body.params {
+ debug!(
+ "param: span {:?}, ty_span {:?}, pat.span {:?}",
+ param.span, param.ty_span, param.pat.span
+ );
+ if param.ty_span != param.pat.span {
+ debug!("skipping param: has explicit type");
+ continue;
+ }
+
+ let Some(param_ty) = self.opt_node_type(param.hir_id) else {
+ continue
+ };
+
+ if self.generic_arg_contains_target(param_ty.into()) {
+ self.update_infer_source(InferSource {
+ span: param.pat.span,
+ kind: InferSourceKind::ClosureArg {
+ insert_span: param.pat.span.shrink_to_hi(),
+ ty: param_ty,
+ },
+ })
+ }
+ }
+ intravisit::walk_body(self, body);
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ let tcx = self.infcx.tcx;
+ match expr.kind {
+ // When encountering `func(arg)` first look into `arg` and then `func`,
+ // as `arg` is "more specific".
+ ExprKind::Call(func, args) => {
+ for arg in args {
+ self.visit_expr(arg);
+ }
+ self.visit_expr(func);
+ }
+ _ => intravisit::walk_expr(self, expr),
+ }
+
+ for args in self.expr_inferred_subst_iter(expr) {
+ debug!(?args);
+ let InsertableGenericArgs { insert_span, substs, generics_def_id, def_id } = args;
+ let generics = tcx.generics_of(generics_def_id);
+ if let Some(argument_index) = generics
+ .own_substs(substs)
+ .iter()
+ .position(|&arg| self.generic_arg_contains_target(arg))
+ {
+ let substs = self.infcx.resolve_vars_if_possible(substs);
+ let generic_args = &generics.own_substs_no_defaults(tcx, substs)
+ [generics.own_counts().lifetimes..];
+ let span = match expr.kind {
+ ExprKind::MethodCall(path, _, _) => path.ident.span,
+ _ => expr.span,
+ };
+
+ self.update_infer_source(InferSource {
+ span,
+ kind: InferSourceKind::GenericArg {
+ insert_span,
+ argument_index,
+ generics_def_id,
+ def_id,
+ generic_args,
+ },
+ });
+ }
+ }
+
+ if let Some(node_ty) = self.opt_node_type(expr.hir_id) {
+ if let (
+ &ExprKind::Closure(&Closure { fn_decl, body, fn_decl_span, .. }),
+ ty::Closure(_, substs),
+ ) = (&expr.kind, node_ty.kind())
+ {
+ let output = substs.as_closure().sig().output().skip_binder();
+ if self.generic_arg_contains_target(output.into()) {
+ let body = self.infcx.tcx.hir().body(body);
+ let should_wrap_expr = if matches!(body.value.kind, ExprKind::Block(..)) {
+ None
+ } else {
+ Some(body.value.span.shrink_to_hi())
+ };
+ self.update_infer_source(InferSource {
+ span: fn_decl_span,
+ kind: InferSourceKind::ClosureReturn {
+ ty: output,
+ data: &fn_decl.output,
+ should_wrap_expr,
+ },
+ })
+ }
+ }
+ }
+
+ let has_impl_trait = |def_id| {
+ iter::successors(Some(tcx.generics_of(def_id)), |generics| {
+ generics.parent.map(|def_id| tcx.generics_of(def_id))
+ })
+ .any(|generics| generics.has_impl_trait())
+ };
+ if let ExprKind::MethodCall(path, args, span) = expr.kind
+ && let Some(substs) = self.node_substs_opt(expr.hir_id)
+ && substs.iter().any(|arg| self.generic_arg_contains_target(arg))
+ && let Some(def_id) = self.typeck_results.type_dependent_def_id(expr.hir_id)
+ && self.infcx.tcx.trait_of_item(def_id).is_some()
+ && !has_impl_trait(def_id)
+ {
+ let successor =
+ args.get(1).map_or_else(|| (")", span.hi()), |arg| (", ", arg.span.lo()));
+ let substs = self.infcx.resolve_vars_if_possible(substs);
+ self.update_infer_source(InferSource {
+ span: path.ident.span,
+ kind: InferSourceKind::FullyQualifiedMethodCall {
+ receiver: args.first().unwrap(),
+ successor,
+ substs,
+ def_id,
+ }
+ })
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
new file mode 100644
index 000000000..9a2ab3e32
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/different_lifetimes.rs
@@ -0,0 +1,234 @@
+//! Error Reporting for Anonymous Region Lifetime Errors
+//! where both the regions are anonymous.
+
+use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type;
+use crate::infer::error_reporting::nice_region_error::util::AnonymousParamInfo;
+use crate::infer::error_reporting::nice_region_error::NiceRegionError;
+use crate::infer::lexical_region_resolve::RegionResolutionError;
+use crate::infer::SubregionOrigin;
+use crate::infer::TyCtxt;
+
+use rustc_errors::{struct_span_err, Applicability, Diagnostic, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::{GenericParamKind, Ty};
+use rustc_middle::ty::Region;
+use rustc_span::symbol::kw;
+
+impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
+ /// Print the error message for lifetime errors when both the concerned regions are anonymous.
+ ///
+ /// Consider a case where we have
+ ///
+ /// ```compile_fail,E0623
+ /// fn foo(x: &mut Vec<&u8>, y: &u8) {
+ /// x.push(y);
+ /// }
+ /// ```
+ ///
+ /// The example gives
+ ///
+ /// ```text
+ /// fn foo(x: &mut Vec<&u8>, y: &u8) {
+ /// --- --- these references are declared with different lifetimes...
+ /// x.push(y);
+ /// ^ ...but data from `y` flows into `x` here
+ /// ```
+ ///
+ /// It has been extended for the case of structs too.
+ ///
+ /// Consider the example
+ ///
+ /// ```no_run
+ /// struct Ref<'a> { x: &'a u32 }
+ /// ```
+ ///
+ /// ```text
+ /// fn foo(mut x: Vec<Ref>, y: Ref) {
+ /// --- --- these structs are declared with different lifetimes...
+ /// x.push(y);
+ /// ^ ...but data from `y` flows into `x` here
+ /// }
+ /// ```
+ ///
+ /// It will later be extended to trait objects.
+ pub(super) fn try_report_anon_anon_conflict(&self) -> Option<ErrorGuaranteed> {
+ let (span, sub, sup) = self.regions()?;
+
+ if let Some(RegionResolutionError::ConcreteFailure(
+ SubregionOrigin::ReferenceOutlivesReferent(..),
+ ..,
+ )) = self.error
+ {
+ // This error doesn't make much sense in this case.
+ return None;
+ }
+
+ // Determine whether the sub and sup consist of both anonymous (elided) regions.
+ let anon_reg_sup = self.tcx().is_suitable_region(sup)?;
+
+ let anon_reg_sub = self.tcx().is_suitable_region(sub)?;
+ let scope_def_id_sup = anon_reg_sup.def_id;
+ let bregion_sup = anon_reg_sup.boundregion;
+ let scope_def_id_sub = anon_reg_sub.def_id;
+ let bregion_sub = anon_reg_sub.boundregion;
+
+ let ty_sup = find_anon_type(self.tcx(), sup, &bregion_sup)?;
+
+ let ty_sub = find_anon_type(self.tcx(), sub, &bregion_sub)?;
+
+ debug!(
+ "try_report_anon_anon_conflict: found_param1={:?} sup={:?} br1={:?}",
+ ty_sub, sup, bregion_sup
+ );
+ debug!(
+ "try_report_anon_anon_conflict: found_param2={:?} sub={:?} br2={:?}",
+ ty_sup, sub, bregion_sub
+ );
+
+ let (ty_sup, ty_fndecl_sup) = ty_sup;
+ let (ty_sub, ty_fndecl_sub) = ty_sub;
+
+ let AnonymousParamInfo { param: anon_param_sup, .. } =
+ self.find_param_with_region(sup, sup)?;
+ let AnonymousParamInfo { param: anon_param_sub, .. } =
+ self.find_param_with_region(sub, sub)?;
+
+ let sup_is_ret_type =
+ self.is_return_type_anon(scope_def_id_sup, bregion_sup, ty_fndecl_sup);
+ let sub_is_ret_type =
+ self.is_return_type_anon(scope_def_id_sub, bregion_sub, ty_fndecl_sub);
+
+ let span_label_var1 = match anon_param_sup.pat.simple_ident() {
+ Some(simple_ident) => format!(" from `{}`", simple_ident),
+ None => String::new(),
+ };
+
+ let span_label_var2 = match anon_param_sub.pat.simple_ident() {
+ Some(simple_ident) => format!(" into `{}`", simple_ident),
+ None => String::new(),
+ };
+
+ debug!(
+ "try_report_anon_anon_conflict: sub_is_ret_type={:?} sup_is_ret_type={:?}",
+ sub_is_ret_type, sup_is_ret_type
+ );
+
+ let mut err = struct_span_err!(self.tcx().sess, span, E0623, "lifetime mismatch");
+
+ match (sup_is_ret_type, sub_is_ret_type) {
+ (ret_capture @ Some(ret_span), _) | (_, ret_capture @ Some(ret_span)) => {
+ let param_span =
+ if sup_is_ret_type == ret_capture { ty_sub.span } else { ty_sup.span };
+
+ err.span_label(
+ param_span,
+ "this parameter and the return type are declared with different lifetimes...",
+ );
+ err.span_label(ret_span, "");
+ err.span_label(span, format!("...but data{} is returned here", span_label_var1));
+ }
+
+ (None, None) => {
+ if ty_sup.hir_id == ty_sub.hir_id {
+ err.span_label(ty_sup.span, "this type is declared with multiple lifetimes...");
+ err.span_label(ty_sub.span, "");
+ err.span_label(span, "...but data with one lifetime flows into the other here");
+ } else {
+ err.span_label(
+ ty_sup.span,
+ "these two types are declared with different lifetimes...",
+ );
+ err.span_label(ty_sub.span, "");
+ err.span_label(
+ span,
+ format!("...but data{} flows{} here", span_label_var1, span_label_var2),
+ );
+ }
+ }
+ }
+
+ if suggest_adding_lifetime_params(self.tcx(), sub, ty_sup, ty_sub, &mut err) {
+ err.note("each elided lifetime in input position becomes a distinct lifetime");
+ }
+
+ let reported = err.emit();
+ Some(reported)
+ }
+}
+
+pub fn suggest_adding_lifetime_params<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sub: Region<'tcx>,
+ ty_sup: &Ty<'_>,
+ ty_sub: &Ty<'_>,
+ err: &mut Diagnostic,
+) -> bool {
+ let (
+ hir::Ty { kind: hir::TyKind::Rptr(lifetime_sub, _), .. },
+ hir::Ty { kind: hir::TyKind::Rptr(lifetime_sup, _), .. },
+ ) = (ty_sub, ty_sup) else {
+ return false;
+ };
+
+ if !lifetime_sub.name.is_anonymous() || !lifetime_sup.name.is_anonymous() {
+ return false;
+ };
+
+ let Some(anon_reg) = tcx.is_suitable_region(sub) else {
+ return false;
+ };
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(anon_reg.def_id);
+
+ let node = tcx.hir().get(hir_id);
+ let is_impl = matches!(&node, hir::Node::ImplItem(_));
+ let generics = match node {
+ hir::Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, ref generics, ..), .. })
+ | hir::Node::TraitItem(&hir::TraitItem { ref generics, .. })
+ | hir::Node::ImplItem(&hir::ImplItem { ref generics, .. }) => generics,
+ _ => return false,
+ };
+
+ let suggestion_param_name = generics
+ .params
+ .iter()
+ .filter(|p| matches!(p.kind, GenericParamKind::Lifetime { .. }))
+ .map(|p| p.name.ident().name)
+ .find(|i| *i != kw::UnderscoreLifetime);
+ let introduce_new = suggestion_param_name.is_none();
+ let suggestion_param_name =
+ suggestion_param_name.map(|n| n.to_string()).unwrap_or_else(|| "'a".to_owned());
+
+ debug!(?lifetime_sup.span);
+ debug!(?lifetime_sub.span);
+ let make_suggestion = |span: rustc_span::Span| {
+ if span.is_empty() {
+ (span, format!("{}, ", suggestion_param_name))
+ } else if let Ok("&") = tcx.sess.source_map().span_to_snippet(span).as_deref() {
+ (span.shrink_to_hi(), format!("{} ", suggestion_param_name))
+ } else {
+ (span, suggestion_param_name.clone())
+ }
+ };
+ let mut suggestions =
+ vec![make_suggestion(lifetime_sub.span), make_suggestion(lifetime_sup.span)];
+
+ if introduce_new {
+ let new_param_suggestion =
+ if let Some(first) = generics.params.iter().find(|p| !p.name.ident().span.is_empty()) {
+ (first.span.shrink_to_lo(), format!("{}, ", suggestion_param_name))
+ } else {
+ (generics.span, format!("<{}>", suggestion_param_name))
+ };
+
+ suggestions.push(new_param_suggestion);
+ }
+
+ let mut sugg = String::from("consider introducing a named lifetime parameter");
+ if is_impl {
+ sugg.push_str(" and update trait if needed");
+ }
+ err.multipart_suggestion(sugg, suggestions, Applicability::MaybeIncorrect);
+
+ true
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs
new file mode 100644
index 000000000..c1b201da6
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/find_anon_type.rs
@@ -0,0 +1,234 @@
+use rustc_hir as hir;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_middle::hir::map::Map;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::resolve_lifetime as rl;
+use rustc_middle::ty::{self, Region, TyCtxt};
+
+/// This function calls the `visit_ty` method for the parameters
+/// corresponding to the anonymous regions. The `nested_visitor.found_type`
+/// contains the anonymous type.
+///
+/// # Arguments
+/// region - the anonymous region corresponding to the anon_anon conflict
+/// br - the bound region corresponding to the above region which is of type `BrAnon(_)`
+///
+/// # Example
+/// ```compile_fail,E0623
+/// fn foo(x: &mut Vec<&u8>, y: &u8)
+/// { x.push(y); }
+/// ```
+/// The function returns the nested type corresponding to the anonymous region
+/// for e.g., `&u8` and `Vec<&u8>`.
+pub fn find_anon_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ region: Region<'tcx>,
+ br: &ty::BoundRegionKind,
+) -> Option<(&'tcx hir::Ty<'tcx>, &'tcx hir::FnSig<'tcx>)> {
+ let anon_reg = tcx.is_suitable_region(region)?;
+ let hir_id = tcx.hir().local_def_id_to_hir_id(anon_reg.def_id);
+ let fn_sig = tcx.hir().get(hir_id).fn_sig()?;
+
+ fn_sig
+ .decl
+ .inputs
+ .iter()
+ .find_map(|arg| find_component_for_bound_region(tcx, arg, br))
+ .map(|ty| (ty, fn_sig))
+}
+
+// This method creates a FindNestedTypeVisitor which returns the type corresponding
+// to the anonymous region.
+fn find_component_for_bound_region<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ arg: &'tcx hir::Ty<'tcx>,
+ br: &ty::BoundRegionKind,
+) -> Option<&'tcx hir::Ty<'tcx>> {
+ let mut nested_visitor = FindNestedTypeVisitor {
+ tcx,
+ bound_region: *br,
+ found_type: None,
+ current_index: ty::INNERMOST,
+ };
+ nested_visitor.visit_ty(arg);
+ nested_visitor.found_type
+}
+
+// The FindNestedTypeVisitor captures the corresponding `hir::Ty` of the
+// anonymous region. The example above would lead to a conflict between
+// the two anonymous lifetimes for &u8 in x and y respectively. This visitor
+// would be invoked twice, once for each lifetime, and would
+// walk the types like &mut Vec<&u8> and &u8 looking for the HIR
+// where that lifetime appears. This allows us to highlight the
+// specific part of the type in the error message.
+struct FindNestedTypeVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ // The bound_region corresponding to the Refree(freeregion)
+ // associated with the anonymous region we are looking for.
+ bound_region: ty::BoundRegionKind,
+ // The type where the anonymous lifetime appears
+ // for e.g., Vec<`&u8`> and <`&u8`>
+ found_type: Option<&'tcx hir::Ty<'tcx>>,
+ current_index: ty::DebruijnIndex,
+}
+
+impl<'tcx> Visitor<'tcx> for FindNestedTypeVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
+ match arg.kind {
+ hir::TyKind::BareFn(_) => {
+ self.current_index.shift_in(1);
+ intravisit::walk_ty(self, arg);
+ self.current_index.shift_out(1);
+ return;
+ }
+
+ hir::TyKind::TraitObject(bounds, ..) => {
+ for bound in bounds {
+ self.current_index.shift_in(1);
+ self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
+ self.current_index.shift_out(1);
+ }
+ }
+
+ hir::TyKind::Rptr(ref lifetime, _) => {
+ // the lifetime of the TyRptr
+ let hir_id = lifetime.hir_id;
+ match (self.tcx.named_region(hir_id), self.bound_region) {
+ // Find the index of the named region that was part of the
+ // error. We will then search the function parameters for a bound
+ // region at the right depth with the same index
+ (Some(rl::Region::EarlyBound(_, id)), ty::BrNamed(def_id, _)) => {
+ debug!("EarlyBound id={:?} def_id={:?}", id, def_id);
+ if id == def_id {
+ self.found_type = Some(arg);
+ return; // we can stop visiting now
+ }
+ }
+
+ // Find the index of the named region that was part of the
+ // error. We will then search the function parameters for a bound
+ // region at the right depth with the same index
+ (
+ Some(rl::Region::LateBound(debruijn_index, _, id)),
+ ty::BrNamed(def_id, _),
+ ) => {
+ debug!(
+ "FindNestedTypeVisitor::visit_ty: LateBound depth = {:?}",
+ debruijn_index
+ );
+ debug!("LateBound id={:?} def_id={:?}", id, def_id);
+ if debruijn_index == self.current_index && id == def_id {
+ self.found_type = Some(arg);
+ return; // we can stop visiting now
+ }
+ }
+
+ (
+ Some(
+ rl::Region::Static
+ | rl::Region::Free(_, _)
+ | rl::Region::EarlyBound(_, _)
+ | rl::Region::LateBound(_, _, _),
+ )
+ | None,
+ _,
+ ) => {
+ debug!("no arg found");
+ }
+ }
+ }
+ // Checks if it is of type `hir::TyKind::Path` which corresponds to a struct.
+ hir::TyKind::Path(_) => {
+ let subvisitor = &mut TyPathVisitor {
+ tcx: self.tcx,
+ found_it: false,
+ bound_region: self.bound_region,
+ current_index: self.current_index,
+ };
+ intravisit::walk_ty(subvisitor, arg); // call walk_ty; as visit_ty is empty,
+ // this will visit only outermost type
+ if subvisitor.found_it {
+ self.found_type = Some(arg);
+ }
+ }
+ _ => {}
+ }
+ // walk the embedded contents: e.g., if we are visiting `Vec<&Foo>`,
+ // go on to visit `&Foo`
+ intravisit::walk_ty(self, arg);
+ }
+}
+
+// The visitor captures the corresponding `hir::Ty` of the anonymous region
+// in the case of structs ie. `hir::TyKind::Path`.
+// This visitor would be invoked for each lifetime corresponding to a struct,
+// and would walk the types like Vec<Ref> in the above example and Ref looking for the HIR
+// where that lifetime appears. This allows us to highlight the
+// specific part of the type in the error message.
+struct TyPathVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ found_it: bool,
+ bound_region: ty::BoundRegionKind,
+ current_index: ty::DebruijnIndex,
+}
+
+impl<'tcx> Visitor<'tcx> for TyPathVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Map<'tcx> {
+ self.tcx.hir()
+ }
+
+ fn visit_lifetime(&mut self, lifetime: &hir::Lifetime) {
+ match (self.tcx.named_region(lifetime.hir_id), self.bound_region) {
+ // the lifetime of the TyPath!
+ (Some(rl::Region::EarlyBound(_, id)), ty::BrNamed(def_id, _)) => {
+ debug!("EarlyBound id={:?} def_id={:?}", id, def_id);
+ if id == def_id {
+ self.found_it = true;
+ return; // we can stop visiting now
+ }
+ }
+
+ (Some(rl::Region::LateBound(debruijn_index, _, id)), ty::BrNamed(def_id, _)) => {
+ debug!("FindNestedTypeVisitor::visit_ty: LateBound depth = {:?}", debruijn_index,);
+ debug!("id={:?}", id);
+ debug!("def_id={:?}", def_id);
+ if debruijn_index == self.current_index && id == def_id {
+ self.found_it = true;
+ return; // we can stop visiting now
+ }
+ }
+
+ (
+ Some(
+ rl::Region::Static
+ | rl::Region::EarlyBound(_, _)
+ | rl::Region::LateBound(_, _, _)
+ | rl::Region::Free(_, _),
+ )
+ | None,
+ _,
+ ) => {
+ debug!("no arg found");
+ }
+ }
+ }
+
+ fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
+ // ignore nested types
+ //
+ // If you have a type like `Foo<'a, &Ty>` we
+ // are only interested in the immediate lifetimes ('a).
+ //
+ // Making `visit_ty` empty will ignore the `&Ty` embedded
+ // inside, it will get reached by the outer visitor.
+ debug!("`Ty` corresponding to a struct is {:?}", arg);
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs
new file mode 100644
index 000000000..893ca3cf7
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mismatched_static_lifetime.rs
@@ -0,0 +1,102 @@
+//! Error Reporting for when the lifetime for a type doesn't match the `impl` selected for a predicate
+//! to hold.
+
+use crate::infer::error_reporting::nice_region_error::NiceRegionError;
+use crate::infer::error_reporting::note_and_explain_region;
+use crate::infer::lexical_region_resolve::RegionResolutionError;
+use crate::infer::{SubregionOrigin, TypeTrace};
+use crate::traits::ObligationCauseCode;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::intravisit::Visitor;
+use rustc_middle::ty::TypeVisitor;
+
+impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
+ pub(super) fn try_report_mismatched_static_lifetime(&self) -> Option<ErrorGuaranteed> {
+ let error = self.error.as_ref()?;
+ debug!("try_report_mismatched_static_lifetime {:?}", error);
+
+ let RegionResolutionError::ConcreteFailure(origin, sub, sup) = error.clone() else {
+ return None;
+ };
+ if !sub.is_static() {
+ return None;
+ }
+ let SubregionOrigin::Subtype(box TypeTrace { ref cause, .. }) = origin else {
+ return None;
+ };
+ // If we added a "points at argument expression" obligation, we remove it here, we care
+ // about the original obligation only.
+ let code = match cause.code() {
+ ObligationCauseCode::FunctionArgumentObligation { parent_code, .. } => &*parent_code,
+ code => code,
+ };
+ let ObligationCauseCode::MatchImpl(parent, impl_def_id) = code else {
+ return None;
+ };
+ let ObligationCauseCode::BindingObligation(_def_id, binding_span) = *parent.code() else {
+ return None;
+ };
+ let mut err = self.tcx().sess.struct_span_err(cause.span, "incompatible lifetime on type");
+ // FIXME: we should point at the lifetime
+ let mut multi_span: MultiSpan = vec![binding_span].into();
+ multi_span.push_span_label(binding_span, "introduces a `'static` lifetime requirement");
+ err.span_note(multi_span, "because this has an unmet lifetime requirement");
+ note_and_explain_region(self.tcx(), &mut err, "", sup, "...", Some(binding_span));
+ if let Some(impl_node) = self.tcx().hir().get_if_local(*impl_def_id) {
+ // If an impl is local, then maybe this isn't what they want. Try to
+ // be as helpful as possible with implicit lifetimes.
+
+ // First, let's get the hir self type of the impl
+ let hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { self_ty: impl_self_ty, .. }),
+ ..
+ }) = impl_node else {
+ bug!("Node not an impl.");
+ };
+
+ // Next, let's figure out the set of trait objects with implicit static bounds
+ let ty = self.tcx().type_of(*impl_def_id);
+ let mut v = super::static_impl_trait::TraitObjectVisitor(FxHashSet::default());
+ v.visit_ty(ty);
+ let mut traits = vec![];
+ for matching_def_id in v.0 {
+ let mut hir_v =
+ super::static_impl_trait::HirTraitObjectVisitor(&mut traits, matching_def_id);
+ hir_v.visit_ty(&impl_self_ty);
+ }
+
+ if traits.is_empty() {
+ // If there are no trait object traits to point at, either because
+ // there aren't trait objects or because none are implicit, then just
+ // write a single note on the impl itself.
+
+ let impl_span = self.tcx().def_span(*impl_def_id);
+ err.span_note(impl_span, "...does not necessarily outlive the static lifetime introduced by the compatible `impl`");
+ } else {
+ // Otherwise, point at all implicit static lifetimes
+
+ err.note("...does not necessarily outlive the static lifetime introduced by the compatible `impl`");
+ for span in &traits {
+ err.span_note(*span, "this has an implicit `'static` lifetime requirement");
+ // It would be nice to put this immediately under the above note, but they get
+ // pushed to the end.
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "consider relaxing the implicit `'static` requirement",
+ " + '_",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ } else {
+ // Otherwise just point out the impl.
+
+ let impl_span = self.tcx().def_span(*impl_def_id);
+ err.span_note(impl_span, "...does not necessarily outlive the static lifetime introduced by the compatible `impl`");
+ }
+ let reported = err.emit();
+ Some(reported)
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs
new file mode 100644
index 000000000..53d9acf7d
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/mod.rs
@@ -0,0 +1,77 @@
+use crate::infer::lexical_region_resolve::RegionResolutionError;
+use crate::infer::lexical_region_resolve::RegionResolutionError::*;
+use crate::infer::InferCtxt;
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::source_map::Span;
+
+mod different_lifetimes;
+pub mod find_anon_type;
+mod mismatched_static_lifetime;
+mod named_anon_conflict;
+mod placeholder_error;
+mod static_impl_trait;
+mod trait_impl_difference;
+mod util;
+
+pub use different_lifetimes::suggest_adding_lifetime_params;
+pub use find_anon_type::find_anon_type;
+pub use static_impl_trait::{suggest_new_region_bound, HirTraitObjectVisitor, TraitObjectVisitor};
+pub use util::find_param_with_region;
+
+impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+ pub fn try_report_nice_region_error(&self, error: &RegionResolutionError<'tcx>) -> bool {
+ NiceRegionError::new(self, error.clone()).try_report().is_some()
+ }
+}
+
+pub struct NiceRegionError<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ error: Option<RegionResolutionError<'tcx>>,
+ regions: Option<(Span, ty::Region<'tcx>, ty::Region<'tcx>)>,
+}
+
+impl<'cx, 'tcx> NiceRegionError<'cx, 'tcx> {
+ pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>, error: RegionResolutionError<'tcx>) -> Self {
+ Self { infcx, error: Some(error), regions: None }
+ }
+
+ pub fn new_from_span(
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ span: Span,
+ sub: ty::Region<'tcx>,
+ sup: ty::Region<'tcx>,
+ ) -> Self {
+ Self { infcx, error: None, regions: Some((span, sub, sup)) }
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ pub fn try_report_from_nll(&self) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ // Due to the improved diagnostics returned by the MIR borrow checker, only a subset of
+ // the nice region errors are required when running under the MIR borrow checker.
+ self.try_report_named_anon_conflict().or_else(|| self.try_report_placeholder_conflict())
+ }
+
+ pub fn try_report(&self) -> Option<ErrorGuaranteed> {
+ self.try_report_from_nll()
+ .map(|mut diag| diag.emit())
+ .or_else(|| self.try_report_impl_not_conforming_to_trait())
+ .or_else(|| self.try_report_anon_anon_conflict())
+ .or_else(|| self.try_report_static_impl_trait())
+ .or_else(|| self.try_report_mismatched_static_lifetime())
+ }
+
+ pub(super) fn regions(&self) -> Option<(Span, ty::Region<'tcx>, ty::Region<'tcx>)> {
+ match (&self.error, self.regions) {
+ (Some(ConcreteFailure(origin, sub, sup)), None) => Some((origin.span(), *sub, *sup)),
+ (Some(SubSupConflict(_, _, origin, sub, _, sup, _)), None) => {
+ Some((origin.span(), *sub, *sup))
+ }
+ (None, Some((span, sub, sup))) => Some((span, sub, sup)),
+ _ => None,
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
new file mode 100644
index 000000000..76cb76d9f
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/named_anon_conflict.rs
@@ -0,0 +1,116 @@
+//! Error Reporting for Anonymous Region Lifetime Errors
+//! where one region is named and the other is anonymous.
+use crate::infer::error_reporting::nice_region_error::find_anon_type::find_anon_type;
+use crate::infer::error_reporting::nice_region_error::NiceRegionError;
+use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_middle::ty;
+use rustc_span::symbol::kw;
+
+impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
+ /// When given a `ConcreteFailure` for a function with parameters containing a named region and
+ /// an anonymous region, emit an descriptive diagnostic error.
+ pub(super) fn try_report_named_anon_conflict(
+ &self,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ let (span, sub, sup) = self.regions()?;
+
+ debug!(
+ "try_report_named_anon_conflict(sub={:?}, sup={:?}, error={:?})",
+ sub, sup, self.error,
+ );
+
+ // Determine whether the sub and sup consist of one named region ('a)
+ // and one anonymous (elided) region. If so, find the parameter arg
+ // where the anonymous region appears (there must always be one; we
+ // only introduced anonymous regions in parameters) as well as a
+ // version new_ty of its type where the anonymous region is replaced
+ // with the named one.
+ let (named, anon, anon_param_info, region_info) = if sub.has_name()
+ && self.tcx().is_suitable_region(sup).is_some()
+ && self.find_param_with_region(sup, sub).is_some()
+ {
+ (
+ sub,
+ sup,
+ self.find_param_with_region(sup, sub).unwrap(),
+ self.tcx().is_suitable_region(sup).unwrap(),
+ )
+ } else if sup.has_name()
+ && self.tcx().is_suitable_region(sub).is_some()
+ && self.find_param_with_region(sub, sup).is_some()
+ {
+ (
+ sup,
+ sub,
+ self.find_param_with_region(sub, sup).unwrap(),
+ self.tcx().is_suitable_region(sub).unwrap(),
+ )
+ } else {
+ return None; // inapplicable
+ };
+
+ // Suggesting to add a `'static` lifetime to a parameter is nearly always incorrect,
+ // and can steer users down the wrong path.
+ if named.is_static() {
+ return None;
+ }
+
+ debug!("try_report_named_anon_conflict: named = {:?}", named);
+ debug!("try_report_named_anon_conflict: anon_param_info = {:?}", anon_param_info);
+ debug!("try_report_named_anon_conflict: region_info = {:?}", region_info);
+
+ let param = anon_param_info.param;
+ let new_ty = anon_param_info.param_ty;
+ let new_ty_span = anon_param_info.param_ty_span;
+ let br = anon_param_info.bound_region;
+ let is_first = anon_param_info.is_first;
+ let scope_def_id = region_info.def_id;
+ let is_impl_item = region_info.is_impl_item;
+
+ match br {
+ ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) => {}
+ _ => {
+ /* not an anonymous region */
+ debug!("try_report_named_anon_conflict: not an anonymous region");
+ return None;
+ }
+ }
+
+ if is_impl_item {
+ debug!("try_report_named_anon_conflict: impl item, bail out");
+ return None;
+ }
+
+ if find_anon_type(self.tcx(), anon, &br).is_some()
+ && self.is_self_anon(is_first, scope_def_id)
+ {
+ return None;
+ }
+
+ let (error_var, span_label_var) = match param.pat.simple_ident() {
+ Some(simple_ident) => (
+ format!("the type of `{}`", simple_ident),
+ format!("the type of `{}`", simple_ident),
+ ),
+ None => ("parameter type".to_owned(), "type".to_owned()),
+ };
+
+ let mut diag = struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0621,
+ "explicit lifetime required in {}",
+ error_var
+ );
+
+ diag.span_label(span, format!("lifetime `{}` required", named));
+ diag.span_suggestion(
+ new_ty_span,
+ &format!("add explicit lifetime `{}` to {}", named, span_label_var),
+ new_ty,
+ Applicability::Unspecified,
+ );
+
+ Some(diag)
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
new file mode 100644
index 000000000..998699158
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/placeholder_error.rs
@@ -0,0 +1,501 @@
+use crate::infer::error_reporting::nice_region_error::NiceRegionError;
+use crate::infer::lexical_region_resolve::RegionResolutionError;
+use crate::infer::ValuePairs;
+use crate::infer::{SubregionOrigin, TypeTrace};
+use crate::traits::{ObligationCause, ObligationCauseCode};
+use rustc_data_structures::intern::Interned;
+use rustc_errors::{Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir::def::Namespace;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::error::ExpectedFound;
+use rustc_middle::ty::print::{FmtPrinter, Print, RegionHighlightMode};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, RePlaceholder, ReVar, Region, TyCtxt};
+
+use std::fmt::{self, Write};
+
+impl<'tcx> NiceRegionError<'_, 'tcx> {
+ /// When given a `ConcreteFailure` for a function with arguments containing a named region and
+ /// an anonymous region, emit a descriptive diagnostic error.
+ pub(super) fn try_report_placeholder_conflict(
+ &self,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ match &self.error {
+ ///////////////////////////////////////////////////////////////////////////
+ // NB. The ordering of cases in this match is very
+ // sensitive, because we are often matching against
+ // specific cases and then using an `_` to match all
+ // others.
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Check for errors from comparing trait failures -- first
+ // with two placeholders, then with one.
+ Some(RegionResolutionError::SubSupConflict(
+ vid,
+ _,
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
+ sub_placeholder @ Region(Interned(RePlaceholder(_), _)),
+ _,
+ sup_placeholder @ Region(Interned(RePlaceholder(_), _)),
+ _,
+ )) => self.try_report_trait_placeholder_mismatch(
+ Some(self.tcx().mk_region(ReVar(*vid))),
+ cause,
+ Some(*sub_placeholder),
+ Some(*sup_placeholder),
+ values,
+ ),
+
+ Some(RegionResolutionError::SubSupConflict(
+ vid,
+ _,
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
+ sub_placeholder @ Region(Interned(RePlaceholder(_), _)),
+ _,
+ _,
+ _,
+ )) => self.try_report_trait_placeholder_mismatch(
+ Some(self.tcx().mk_region(ReVar(*vid))),
+ cause,
+ Some(*sub_placeholder),
+ None,
+ values,
+ ),
+
+ Some(RegionResolutionError::SubSupConflict(
+ vid,
+ _,
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
+ _,
+ _,
+ sup_placeholder @ Region(Interned(RePlaceholder(_), _)),
+ _,
+ )) => self.try_report_trait_placeholder_mismatch(
+ Some(self.tcx().mk_region(ReVar(*vid))),
+ cause,
+ None,
+ Some(*sup_placeholder),
+ values,
+ ),
+
+ Some(RegionResolutionError::SubSupConflict(
+ vid,
+ _,
+ _,
+ _,
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
+ sup_placeholder @ Region(Interned(RePlaceholder(_), _)),
+ _,
+ )) => self.try_report_trait_placeholder_mismatch(
+ Some(self.tcx().mk_region(ReVar(*vid))),
+ cause,
+ None,
+ Some(*sup_placeholder),
+ values,
+ ),
+
+ Some(RegionResolutionError::UpperBoundUniverseConflict(
+ vid,
+ _,
+ _,
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
+ sup_placeholder @ Region(Interned(RePlaceholder(_), _)),
+ )) => self.try_report_trait_placeholder_mismatch(
+ Some(self.tcx().mk_region(ReVar(*vid))),
+ cause,
+ None,
+ Some(*sup_placeholder),
+ values,
+ ),
+
+ Some(RegionResolutionError::ConcreteFailure(
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
+ sub_region @ Region(Interned(RePlaceholder(_), _)),
+ sup_region @ Region(Interned(RePlaceholder(_), _)),
+ )) => self.try_report_trait_placeholder_mismatch(
+ None,
+ cause,
+ Some(*sub_region),
+ Some(*sup_region),
+ values,
+ ),
+
+ Some(RegionResolutionError::ConcreteFailure(
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
+ sub_region @ Region(Interned(RePlaceholder(_), _)),
+ sup_region,
+ )) => self.try_report_trait_placeholder_mismatch(
+ (!sup_region.has_name()).then_some(*sup_region),
+ cause,
+ Some(*sub_region),
+ None,
+ values,
+ ),
+
+ Some(RegionResolutionError::ConcreteFailure(
+ SubregionOrigin::Subtype(box TypeTrace { cause, values }),
+ sub_region,
+ sup_region @ Region(Interned(RePlaceholder(_), _)),
+ )) => self.try_report_trait_placeholder_mismatch(
+ (!sub_region.has_name()).then_some(*sub_region),
+ cause,
+ None,
+ Some(*sup_region),
+ values,
+ ),
+
+ _ => None,
+ }
+ }
+
+ fn try_report_trait_placeholder_mismatch(
+ &self,
+ vid: Option<Region<'tcx>>,
+ cause: &ObligationCause<'tcx>,
+ sub_placeholder: Option<Region<'tcx>>,
+ sup_placeholder: Option<Region<'tcx>>,
+ value_pairs: &ValuePairs<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ let (expected_substs, found_substs, trait_def_id) = match value_pairs {
+ ValuePairs::TraitRefs(ExpectedFound { expected, found })
+ if expected.def_id == found.def_id =>
+ {
+ (expected.substs, found.substs, expected.def_id)
+ }
+ ValuePairs::PolyTraitRefs(ExpectedFound { expected, found })
+ if expected.def_id() == found.def_id() =>
+ {
+ // It's possible that the placeholders come from a binder
+ // outside of this value pair. Use `no_bound_vars` as a
+ // simple heuristic for that.
+ (expected.no_bound_vars()?.substs, found.no_bound_vars()?.substs, expected.def_id())
+ }
+ _ => return None,
+ };
+
+ Some(self.report_trait_placeholder_mismatch(
+ vid,
+ cause,
+ sub_placeholder,
+ sup_placeholder,
+ trait_def_id,
+ expected_substs,
+ found_substs,
+ ))
+ }
+
+ // error[E0308]: implementation of `Foo` does not apply to enough lifetimes
+ // --> /home/nmatsakis/tmp/foo.rs:12:5
+ // |
+ // 12 | all::<&'static u32>();
+ // | ^^^^^^^^^^^^^^^^^^^ lifetime mismatch
+ // |
+ // = note: Due to a where-clause on the function `all`,
+ // = note: `T` must implement `...` for any two lifetimes `'1` and `'2`.
+ // = note: However, the type `T` only implements `...` for some specific lifetime `'2`.
+ #[instrument(level = "debug", skip(self))]
+ fn report_trait_placeholder_mismatch(
+ &self,
+ vid: Option<Region<'tcx>>,
+ cause: &ObligationCause<'tcx>,
+ sub_placeholder: Option<Region<'tcx>>,
+ sup_placeholder: Option<Region<'tcx>>,
+ trait_def_id: DefId,
+ expected_substs: SubstsRef<'tcx>,
+ actual_substs: SubstsRef<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let span = cause.span();
+ let msg = format!(
+ "implementation of `{}` is not general enough",
+ self.tcx().def_path_str(trait_def_id),
+ );
+ let mut err = self.tcx().sess.struct_span_err(span, &msg);
+
+ let leading_ellipsis = if let ObligationCauseCode::ItemObligation(def_id) = *cause.code() {
+ err.span_label(span, "doesn't satisfy where-clause");
+ err.span_label(
+ self.tcx().def_span(def_id),
+ &format!("due to a where-clause on `{}`...", self.tcx().def_path_str(def_id)),
+ );
+ true
+ } else {
+ err.span_label(span, &msg);
+ false
+ };
+
+ let expected_trait_ref = self.infcx.resolve_vars_if_possible(ty::TraitRef {
+ def_id: trait_def_id,
+ substs: expected_substs,
+ });
+ let actual_trait_ref = self
+ .infcx
+ .resolve_vars_if_possible(ty::TraitRef { def_id: trait_def_id, substs: actual_substs });
+
+ // Search the expected and actual trait references to see (a)
+ // whether the sub/sup placeholders appear in them (sometimes
+ // you have a trait ref like `T: Foo<fn(&u8)>`, where the
+ // placeholder was created as part of an inner type) and (b)
+ // whether the inference variable appears. In each case,
+ // assign a counter value in each case if so.
+ let mut counter = 0;
+ let mut has_sub = None;
+ let mut has_sup = None;
+
+ let mut actual_has_vid = None;
+ let mut expected_has_vid = None;
+
+ self.tcx().for_each_free_region(&expected_trait_ref, |r| {
+ if Some(r) == sub_placeholder && has_sub.is_none() {
+ has_sub = Some(counter);
+ counter += 1;
+ } else if Some(r) == sup_placeholder && has_sup.is_none() {
+ has_sup = Some(counter);
+ counter += 1;
+ }
+
+ if Some(r) == vid && expected_has_vid.is_none() {
+ expected_has_vid = Some(counter);
+ counter += 1;
+ }
+ });
+
+ self.tcx().for_each_free_region(&actual_trait_ref, |r| {
+ if Some(r) == vid && actual_has_vid.is_none() {
+ actual_has_vid = Some(counter);
+ counter += 1;
+ }
+ });
+
+ let actual_self_ty_has_vid =
+ self.tcx().any_free_region_meets(&actual_trait_ref.self_ty(), |r| Some(r) == vid);
+
+ let expected_self_ty_has_vid =
+ self.tcx().any_free_region_meets(&expected_trait_ref.self_ty(), |r| Some(r) == vid);
+
+ let any_self_ty_has_vid = actual_self_ty_has_vid || expected_self_ty_has_vid;
+
+ debug!(
+ ?actual_has_vid,
+ ?expected_has_vid,
+ ?has_sub,
+ ?has_sup,
+ ?actual_self_ty_has_vid,
+ ?expected_self_ty_has_vid,
+ );
+
+ self.explain_actual_impl_that_was_found(
+ &mut err,
+ sub_placeholder,
+ sup_placeholder,
+ has_sub,
+ has_sup,
+ expected_trait_ref,
+ actual_trait_ref,
+ vid,
+ expected_has_vid,
+ actual_has_vid,
+ any_self_ty_has_vid,
+ leading_ellipsis,
+ );
+
+ err
+ }
+
+ /// Add notes with details about the expected and actual trait refs, with attention to cases
+ /// when placeholder regions are involved: either the trait or the self type containing
+ /// them needs to be mentioned the closest to the placeholders.
+ /// This makes the error messages read better, however at the cost of some complexity
+ /// due to the number of combinations we have to deal with.
+ fn explain_actual_impl_that_was_found(
+ &self,
+ err: &mut Diagnostic,
+ sub_placeholder: Option<Region<'tcx>>,
+ sup_placeholder: Option<Region<'tcx>>,
+ has_sub: Option<usize>,
+ has_sup: Option<usize>,
+ expected_trait_ref: ty::TraitRef<'tcx>,
+ actual_trait_ref: ty::TraitRef<'tcx>,
+ vid: Option<Region<'tcx>>,
+ expected_has_vid: Option<usize>,
+ actual_has_vid: Option<usize>,
+ any_self_ty_has_vid: bool,
+ leading_ellipsis: bool,
+ ) {
+ // HACK(eddyb) maybe move this in a more central location.
+ #[derive(Copy, Clone)]
+ struct Highlighted<'tcx, T> {
+ tcx: TyCtxt<'tcx>,
+ highlight: RegionHighlightMode<'tcx>,
+ value: T,
+ }
+
+ impl<'tcx, T> Highlighted<'tcx, T> {
+ fn map<U>(self, f: impl FnOnce(T) -> U) -> Highlighted<'tcx, U> {
+ Highlighted { tcx: self.tcx, highlight: self.highlight, value: f(self.value) }
+ }
+ }
+
+ impl<'tcx, T> fmt::Display for Highlighted<'tcx, T>
+ where
+ T: for<'a> Print<
+ 'tcx,
+ FmtPrinter<'a, 'tcx>,
+ Error = fmt::Error,
+ Output = FmtPrinter<'a, 'tcx>,
+ >,
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut printer = ty::print::FmtPrinter::new(self.tcx, Namespace::TypeNS);
+ printer.region_highlight_mode = self.highlight;
+
+ let s = self.value.print(printer)?.into_buffer();
+ f.write_str(&s)
+ }
+ }
+
+ // The weird thing here with the `maybe_highlighting_region` calls and the
+ // the match inside is meant to be like this:
+ //
+ // - The match checks whether the given things (placeholders, etc) appear
+ // in the types are about to print
+ // - Meanwhile, the `maybe_highlighting_region` calls set up
+ // highlights so that, if they do appear, we will replace
+ // them `'0` and whatever. (This replacement takes place
+ // inside the closure given to `maybe_highlighting_region`.)
+ //
+ // There is some duplication between the calls -- i.e., the
+ // `maybe_highlighting_region` checks if (e.g.) `has_sub` is
+ // None, an then we check again inside the closure, but this
+ // setup sort of minimized the number of calls and so form.
+
+ let highlight_trait_ref = |trait_ref| Highlighted {
+ tcx: self.tcx(),
+ highlight: RegionHighlightMode::new(self.tcx()),
+ value: trait_ref,
+ };
+
+ let same_self_type = actual_trait_ref.self_ty() == expected_trait_ref.self_ty();
+
+ let mut expected_trait_ref = highlight_trait_ref(expected_trait_ref);
+ expected_trait_ref.highlight.maybe_highlighting_region(sub_placeholder, has_sub);
+ expected_trait_ref.highlight.maybe_highlighting_region(sup_placeholder, has_sup);
+ err.note(&{
+ let passive_voice = match (has_sub, has_sup) {
+ (Some(_), _) | (_, Some(_)) => any_self_ty_has_vid,
+ (None, None) => {
+ expected_trait_ref.highlight.maybe_highlighting_region(vid, expected_has_vid);
+ match expected_has_vid {
+ Some(_) => true,
+ None => any_self_ty_has_vid,
+ }
+ }
+ };
+
+ let mut note = if same_self_type {
+ let mut self_ty = expected_trait_ref.map(|tr| tr.self_ty());
+ self_ty.highlight.maybe_highlighting_region(vid, actual_has_vid);
+
+ if self_ty.value.is_closure()
+ && self
+ .tcx()
+ .fn_trait_kind_from_lang_item(expected_trait_ref.value.def_id)
+ .is_some()
+ {
+ let closure_sig = self_ty.map(|closure| {
+ if let ty::Closure(_, substs) = closure.kind() {
+ self.tcx().signature_unclosure(
+ substs.as_closure().sig(),
+ rustc_hir::Unsafety::Normal,
+ )
+ } else {
+ bug!("type is not longer closure");
+ }
+ });
+
+ format!(
+ "{}closure with signature `{}` must implement `{}`",
+ if leading_ellipsis { "..." } else { "" },
+ closure_sig,
+ expected_trait_ref.map(|tr| tr.print_only_trait_path()),
+ )
+ } else {
+ format!(
+ "{}`{}` must implement `{}`",
+ if leading_ellipsis { "..." } else { "" },
+ self_ty,
+ expected_trait_ref.map(|tr| tr.print_only_trait_path()),
+ )
+ }
+ } else if passive_voice {
+ format!(
+ "{}`{}` would have to be implemented for the type `{}`",
+ if leading_ellipsis { "..." } else { "" },
+ expected_trait_ref.map(|tr| tr.print_only_trait_path()),
+ expected_trait_ref.map(|tr| tr.self_ty()),
+ )
+ } else {
+ format!(
+ "{}`{}` must implement `{}`",
+ if leading_ellipsis { "..." } else { "" },
+ expected_trait_ref.map(|tr| tr.self_ty()),
+ expected_trait_ref.map(|tr| tr.print_only_trait_path()),
+ )
+ };
+
+ match (has_sub, has_sup) {
+ (Some(n1), Some(n2)) => {
+ let _ = write!(
+ note,
+ ", for any two lifetimes `'{}` and `'{}`...",
+ std::cmp::min(n1, n2),
+ std::cmp::max(n1, n2),
+ );
+ }
+ (Some(n), _) | (_, Some(n)) => {
+ let _ = write!(note, ", for any lifetime `'{}`...", n,);
+ }
+ (None, None) => {
+ if let Some(n) = expected_has_vid {
+ let _ = write!(note, ", for some specific lifetime `'{}`...", n,);
+ }
+ }
+ }
+
+ note
+ });
+
+ let mut actual_trait_ref = highlight_trait_ref(actual_trait_ref);
+ actual_trait_ref.highlight.maybe_highlighting_region(vid, actual_has_vid);
+ err.note(&{
+ let passive_voice = match actual_has_vid {
+ Some(_) => any_self_ty_has_vid,
+ None => true,
+ };
+
+ let mut note = if same_self_type {
+ format!(
+ "...but it actually implements `{}`",
+ actual_trait_ref.map(|tr| tr.print_only_trait_path()),
+ )
+ } else if passive_voice {
+ format!(
+ "...but `{}` is actually implemented for the type `{}`",
+ actual_trait_ref.map(|tr| tr.print_only_trait_path()),
+ actual_trait_ref.map(|tr| tr.self_ty()),
+ )
+ } else {
+ format!(
+ "...but `{}` actually implements `{}`",
+ actual_trait_ref.map(|tr| tr.self_ty()),
+ actual_trait_ref.map(|tr| tr.print_only_trait_path()),
+ )
+ };
+
+ if let Some(n) = actual_has_vid {
+ let _ = write!(note, ", for some specific lifetime `'{}`", n);
+ }
+
+ note
+ });
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
new file mode 100644
index 000000000..9886c572a
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/static_impl_trait.rs
@@ -0,0 +1,577 @@
+//! Error Reporting for static impl Traits.
+
+use crate::infer::error_reporting::nice_region_error::NiceRegionError;
+use crate::infer::lexical_region_resolve::RegionResolutionError;
+use crate::infer::{SubregionOrigin, TypeTrace};
+use crate::traits::{ObligationCauseCode, UnifyReceiverContext};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{struct_span_err, Applicability, Diagnostic, ErrorGuaranteed, MultiSpan};
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::{walk_ty, Visitor};
+use rustc_hir::{self as hir, GenericBound, Item, ItemKind, Lifetime, LifetimeName, Node, TyKind};
+use rustc_middle::ty::{
+ self, AssocItemContainer, StaticLifetimeVisitor, Ty, TyCtxt, TypeSuperVisitable, TypeVisitor,
+};
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+use std::ops::ControlFlow;
+
+impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
+ /// Print the error message for lifetime errors when the return type is a static `impl Trait`,
+ /// `dyn Trait` or if a method call on a trait object introduces a static requirement.
+ pub(super) fn try_report_static_impl_trait(&self) -> Option<ErrorGuaranteed> {
+ debug!("try_report_static_impl_trait(error={:?})", self.error);
+ let tcx = self.tcx();
+ let (var_origin, sub_origin, sub_r, sup_origin, sup_r, spans) = match self.error.as_ref()? {
+ RegionResolutionError::SubSupConflict(
+ _,
+ var_origin,
+ sub_origin,
+ sub_r,
+ sup_origin,
+ sup_r,
+ spans,
+ ) if sub_r.is_static() => (var_origin, sub_origin, sub_r, sup_origin, sup_r, spans),
+ RegionResolutionError::ConcreteFailure(
+ SubregionOrigin::Subtype(box TypeTrace { cause, .. }),
+ sub_r,
+ sup_r,
+ ) if sub_r.is_static() => {
+ // This is for an implicit `'static` requirement coming from `impl dyn Trait {}`.
+ if let ObligationCauseCode::UnifyReceiver(ctxt) = cause.code() {
+ // This may have a closure and it would cause ICE
+ // through `find_param_with_region` (#78262).
+ let anon_reg_sup = tcx.is_suitable_region(*sup_r)?;
+ let fn_returns = tcx.return_type_impl_or_dyn_traits(anon_reg_sup.def_id);
+ if fn_returns.is_empty() {
+ return None;
+ }
+
+ let param = self.find_param_with_region(*sup_r, *sub_r)?;
+ let lifetime = if sup_r.has_name() {
+ format!("lifetime `{}`", sup_r)
+ } else {
+ "an anonymous lifetime `'_`".to_string()
+ };
+ let mut err = struct_span_err!(
+ tcx.sess,
+ cause.span,
+ E0772,
+ "{} has {} but calling `{}` introduces an implicit `'static` lifetime \
+ requirement",
+ param
+ .param
+ .pat
+ .simple_ident()
+ .map(|s| format!("`{}`", s))
+ .unwrap_or_else(|| "`fn` parameter".to_string()),
+ lifetime,
+ ctxt.assoc_item.name,
+ );
+ err.span_label(param.param_ty_span, &format!("this data with {}...", lifetime));
+ err.span_label(
+ cause.span,
+ &format!(
+ "...is used and required to live as long as `'static` here \
+ because of an implicit lifetime bound on the {}",
+ match ctxt.assoc_item.container {
+ AssocItemContainer::TraitContainer => {
+ let id = ctxt.assoc_item.container_id(tcx);
+ format!("`impl` of `{}`", tcx.def_path_str(id))
+ }
+ AssocItemContainer::ImplContainer => "inherent `impl`".to_string(),
+ },
+ ),
+ );
+ if self.find_impl_on_dyn_trait(&mut err, param.param_ty, &ctxt) {
+ let reported = err.emit();
+ return Some(reported);
+ } else {
+ err.cancel();
+ }
+ }
+ return None;
+ }
+ _ => return None,
+ };
+ debug!(
+ "try_report_static_impl_trait(var={:?}, sub={:?} {:?} sup={:?} {:?})",
+ var_origin, sub_origin, sub_r, sup_origin, sup_r
+ );
+ let anon_reg_sup = tcx.is_suitable_region(*sup_r)?;
+ debug!("try_report_static_impl_trait: anon_reg_sup={:?}", anon_reg_sup);
+ let sp = var_origin.span();
+ let return_sp = sub_origin.span();
+ let param = self.find_param_with_region(*sup_r, *sub_r)?;
+ let (lifetime_name, lifetime) = if sup_r.has_name() {
+ (sup_r.to_string(), format!("lifetime `{}`", sup_r))
+ } else {
+ ("'_".to_owned(), "an anonymous lifetime `'_`".to_string())
+ };
+ let param_name = param
+ .param
+ .pat
+ .simple_ident()
+ .map(|s| format!("`{}`", s))
+ .unwrap_or_else(|| "`fn` parameter".to_string());
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0759,
+ "{} has {} but it needs to satisfy a `'static` lifetime requirement",
+ param_name,
+ lifetime,
+ );
+
+ let (mention_influencer, influencer_point) =
+ if sup_origin.span().overlaps(param.param_ty_span) {
+ // Account for `async fn` like in `async-await/issues/issue-62097.rs`.
+ // The desugaring of `async `fn`s causes `sup_origin` and `param` to point at the same
+ // place (but with different `ctxt`, hence `overlaps` instead of `==` above).
+ //
+ // This avoids the following:
+ //
+ // LL | pub async fn run_dummy_fn(&self) {
+ // | ^^^^^
+ // | |
+ // | this data with an anonymous lifetime `'_`...
+ // | ...is captured here...
+ (false, sup_origin.span())
+ } else {
+ (!sup_origin.span().overlaps(return_sp), param.param_ty_span)
+ };
+ err.span_label(influencer_point, &format!("this data with {}...", lifetime));
+
+ debug!("try_report_static_impl_trait: param_info={:?}", param);
+
+ let mut spans = spans.clone();
+
+ if mention_influencer {
+ spans.push(sup_origin.span());
+ }
+ // We dedup the spans *ignoring* expansion context.
+ spans.sort();
+ spans.dedup_by_key(|span| (span.lo(), span.hi()));
+
+ // We try to make the output have fewer overlapping spans if possible.
+ let require_msg = if spans.is_empty() {
+ "...is used and required to live as long as `'static` here"
+ } else {
+ "...and is required to live as long as `'static` here"
+ };
+ let require_span =
+ if sup_origin.span().overlaps(return_sp) { sup_origin.span() } else { return_sp };
+
+ for span in &spans {
+ err.span_label(*span, "...is used here...");
+ }
+
+ if spans.iter().any(|sp| sp.overlaps(return_sp) || *sp > return_sp) {
+ // If any of the "captured here" labels appears on the same line or after
+ // `require_span`, we put it on a note to ensure the text flows by appearing
+ // always at the end.
+ err.span_note(require_span, require_msg);
+ } else {
+ // We don't need a note, it's already at the end, it can be shown as a `span_label`.
+ err.span_label(require_span, require_msg);
+ }
+
+ if let SubregionOrigin::RelateParamBound(_, _, Some(bound)) = sub_origin {
+ err.span_note(*bound, "`'static` lifetime requirement introduced by this bound");
+ }
+ if let SubregionOrigin::Subtype(box TypeTrace { cause, .. }) = sub_origin {
+ if let ObligationCauseCode::ReturnValue(hir_id)
+ | ObligationCauseCode::BlockTailExpression(hir_id) = cause.code()
+ {
+ let parent_id = tcx.hir().get_parent_item(*hir_id);
+ let parent_id = tcx.hir().local_def_id_to_hir_id(parent_id);
+ if let Some(fn_decl) = tcx.hir().fn_decl_by_hir_id(parent_id) {
+ let mut span: MultiSpan = fn_decl.output.span().into();
+ let mut add_label = true;
+ if let hir::FnRetTy::Return(ty) = fn_decl.output {
+ let mut v = StaticLifetimeVisitor(vec![], tcx.hir());
+ v.visit_ty(ty);
+ if !v.0.is_empty() {
+ span = v.0.clone().into();
+ for sp in v.0 {
+ span.push_span_label(sp, "`'static` requirement introduced here");
+ }
+ add_label = false;
+ }
+ }
+ if add_label {
+ span.push_span_label(
+ fn_decl.output.span(),
+ "requirement introduced by this return type",
+ );
+ }
+ span.push_span_label(cause.span, "because of this returned expression");
+ err.span_note(
+ span,
+ "`'static` lifetime requirement introduced by the return type",
+ );
+ }
+ }
+ }
+
+ let fn_returns = tcx.return_type_impl_or_dyn_traits(anon_reg_sup.def_id);
+
+ let mut override_error_code = None;
+ if let SubregionOrigin::Subtype(box TypeTrace { cause, .. }) = &sup_origin
+ && let ObligationCauseCode::UnifyReceiver(ctxt) = cause.code()
+ // Handle case of `impl Foo for dyn Bar { fn qux(&self) {} }` introducing a
+ // `'static` lifetime when called as a method on a binding: `bar.qux()`.
+ && self.find_impl_on_dyn_trait(&mut err, param.param_ty, &ctxt)
+ {
+ override_error_code = Some(ctxt.assoc_item.name);
+ }
+
+ if let SubregionOrigin::Subtype(box TypeTrace { cause, .. }) = &sub_origin
+ && let code = match cause.code() {
+ ObligationCauseCode::MatchImpl(parent, ..) => parent.code(),
+ _ => cause.code(),
+ }
+ && let (&ObligationCauseCode::ItemObligation(item_def_id), None) = (code, override_error_code)
+ {
+ // Same case of `impl Foo for dyn Bar { fn qux(&self) {} }` introducing a `'static`
+ // lifetime as above, but called using a fully-qualified path to the method:
+ // `Foo::qux(bar)`.
+ let mut v = TraitObjectVisitor(FxHashSet::default());
+ v.visit_ty(param.param_ty);
+ if let Some((ident, self_ty)) =
+ self.get_impl_ident_and_self_ty_from_trait(item_def_id, &v.0)
+ && self.suggest_constrain_dyn_trait_in_impl(&mut err, &v.0, ident, self_ty)
+ {
+ override_error_code = Some(ident.name);
+ }
+ }
+ if let (Some(ident), true) = (override_error_code, fn_returns.is_empty()) {
+ // Provide a more targeted error code and description.
+ err.code(rustc_errors::error_code!(E0772));
+ err.set_primary_message(&format!(
+ "{} has {} but calling `{}` introduces an implicit `'static` lifetime \
+ requirement",
+ param_name, lifetime, ident,
+ ));
+ }
+
+ let arg = match param.param.pat.simple_ident() {
+ Some(simple_ident) => format!("argument `{}`", simple_ident),
+ None => "the argument".to_string(),
+ };
+ let captures = format!("captures data from {}", arg);
+ suggest_new_region_bound(
+ tcx,
+ &mut err,
+ fn_returns,
+ lifetime_name,
+ Some(arg),
+ captures,
+ Some((param.param_ty_span, param.param_ty.to_string())),
+ );
+
+ let reported = err.emit();
+ Some(reported)
+ }
+}
+
+pub fn suggest_new_region_bound(
+ tcx: TyCtxt<'_>,
+ err: &mut Diagnostic,
+ fn_returns: Vec<&rustc_hir::Ty<'_>>,
+ lifetime_name: String,
+ arg: Option<String>,
+ captures: String,
+ param: Option<(Span, String)>,
+) {
+ debug!("try_report_static_impl_trait: fn_return={:?}", fn_returns);
+ // FIXME: account for the need of parens in `&(dyn Trait + '_)`
+ let consider = "consider changing the";
+ let declare = "to declare that the";
+ let explicit = format!("you can add an explicit `{}` lifetime bound", lifetime_name);
+ let explicit_static =
+ arg.map(|arg| format!("explicit `'static` bound to the lifetime of {}", arg));
+ let add_static_bound = "alternatively, add an explicit `'static` bound to this reference";
+ let plus_lt = format!(" + {}", lifetime_name);
+ for fn_return in fn_returns {
+ if fn_return.span.desugaring_kind().is_some() {
+ // Skip `async` desugaring `impl Future`.
+ continue;
+ }
+ match fn_return.kind {
+ TyKind::OpaqueDef(item_id, _) => {
+ let item = tcx.hir().item(item_id);
+ let ItemKind::OpaqueTy(opaque) = &item.kind else {
+ return;
+ };
+
+ if let Some(span) = opaque
+ .bounds
+ .iter()
+ .filter_map(|arg| match arg {
+ GenericBound::Outlives(Lifetime {
+ name: LifetimeName::Static,
+ span,
+ ..
+ }) => Some(*span),
+ _ => None,
+ })
+ .next()
+ {
+ if let Some(explicit_static) = &explicit_static {
+ err.span_suggestion_verbose(
+ span,
+ &format!("{} `impl Trait`'s {}", consider, explicit_static),
+ &lifetime_name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ if let Some((param_span, param_ty)) = param.clone() {
+ err.span_suggestion_verbose(
+ param_span,
+ add_static_bound,
+ param_ty,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ } else if opaque
+ .bounds
+ .iter()
+ .filter_map(|arg| match arg {
+ GenericBound::Outlives(Lifetime { name, span, .. })
+ if name.ident().to_string() == lifetime_name =>
+ {
+ Some(*span)
+ }
+ _ => None,
+ })
+ .next()
+ .is_some()
+ {
+ } else {
+ err.span_suggestion_verbose(
+ fn_return.span.shrink_to_hi(),
+ &format!(
+ "{declare} `impl Trait` {captures}, {explicit}",
+ declare = declare,
+ captures = captures,
+ explicit = explicit,
+ ),
+ &plus_lt,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ TyKind::TraitObject(_, lt, _) => match lt.name {
+ LifetimeName::ImplicitObjectLifetimeDefault => {
+ err.span_suggestion_verbose(
+ fn_return.span.shrink_to_hi(),
+ &format!(
+ "{declare} trait object {captures}, {explicit}",
+ declare = declare,
+ captures = captures,
+ explicit = explicit,
+ ),
+ &plus_lt,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ name if name.ident().to_string() != lifetime_name => {
+ // With this check we avoid suggesting redundant bounds. This
+ // would happen if there are nested impl/dyn traits and only
+ // one of them has the bound we'd suggest already there, like
+ // in `impl Foo<X = dyn Bar> + '_`.
+ if let Some(explicit_static) = &explicit_static {
+ err.span_suggestion_verbose(
+ lt.span,
+ &format!("{} trait object's {}", consider, explicit_static),
+ &lifetime_name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ if let Some((param_span, param_ty)) = param.clone() {
+ err.span_suggestion_verbose(
+ param_span,
+ add_static_bound,
+ param_ty,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ }
+}
+
+impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
+ fn get_impl_ident_and_self_ty_from_trait(
+ &self,
+ def_id: DefId,
+ trait_objects: &FxHashSet<DefId>,
+ ) -> Option<(Ident, &'tcx hir::Ty<'tcx>)> {
+ let tcx = self.tcx();
+ match tcx.hir().get_if_local(def_id) {
+ Some(Node::ImplItem(impl_item)) => {
+ match tcx.hir().find_by_def_id(tcx.hir().get_parent_item(impl_item.hir_id())) {
+ Some(Node::Item(Item {
+ kind: ItemKind::Impl(hir::Impl { self_ty, .. }),
+ ..
+ })) => Some((impl_item.ident, self_ty)),
+ _ => None,
+ }
+ }
+ Some(Node::TraitItem(trait_item)) => {
+ let trait_did = tcx.hir().get_parent_item(trait_item.hir_id());
+ match tcx.hir().find_by_def_id(trait_did) {
+ Some(Node::Item(Item { kind: ItemKind::Trait(..), .. })) => {
+ // The method being called is defined in the `trait`, but the `'static`
+ // obligation comes from the `impl`. Find that `impl` so that we can point
+ // at it in the suggestion.
+ let trait_did = trait_did.to_def_id();
+ match tcx
+ .hir()
+ .trait_impls(trait_did)
+ .iter()
+ .filter_map(|&impl_did| {
+ match tcx.hir().get_if_local(impl_did.to_def_id()) {
+ Some(Node::Item(Item {
+ kind: ItemKind::Impl(hir::Impl { self_ty, .. }),
+ ..
+ })) if trait_objects.iter().all(|did| {
+ // FIXME: we should check `self_ty` against the receiver
+ // type in the `UnifyReceiver` context, but for now, use
+ // this imperfect proxy. This will fail if there are
+ // multiple `impl`s for the same trait like
+ // `impl Foo for Box<dyn Bar>` and `impl Foo for dyn Bar`.
+ // In that case, only the first one will get suggestions.
+ let mut traits = vec![];
+ let mut hir_v = HirTraitObjectVisitor(&mut traits, *did);
+ hir_v.visit_ty(self_ty);
+ !traits.is_empty()
+ }) =>
+ {
+ Some(self_ty)
+ }
+ _ => None,
+ }
+ })
+ .next()
+ {
+ Some(self_ty) => Some((trait_item.ident, self_ty)),
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ }
+
+ /// When we call a method coming from an `impl Foo for dyn Bar`, `dyn Bar` introduces a default
+ /// `'static` obligation. Suggest relaxing that implicit bound.
+ fn find_impl_on_dyn_trait(
+ &self,
+ err: &mut Diagnostic,
+ ty: Ty<'_>,
+ ctxt: &UnifyReceiverContext<'tcx>,
+ ) -> bool {
+ let tcx = self.tcx();
+
+ // Find the method being called.
+ let Ok(Some(instance)) = ty::Instance::resolve(
+ tcx,
+ ctxt.param_env,
+ ctxt.assoc_item.def_id,
+ self.infcx.resolve_vars_if_possible(ctxt.substs),
+ ) else {
+ return false;
+ };
+
+ let mut v = TraitObjectVisitor(FxHashSet::default());
+ v.visit_ty(ty);
+
+ // Get the `Ident` of the method being called and the corresponding `impl` (to point at
+ // `Bar` in `impl Foo for dyn Bar {}` and the definition of the method being called).
+ let Some((ident, self_ty)) = self.get_impl_ident_and_self_ty_from_trait(instance.def_id(), &v.0) else {
+ return false;
+ };
+
+ // Find the trait object types in the argument, so we point at *only* the trait object.
+ self.suggest_constrain_dyn_trait_in_impl(err, &v.0, ident, self_ty)
+ }
+
+ fn suggest_constrain_dyn_trait_in_impl(
+ &self,
+ err: &mut Diagnostic,
+ found_dids: &FxHashSet<DefId>,
+ ident: Ident,
+ self_ty: &hir::Ty<'_>,
+ ) -> bool {
+ let mut suggested = false;
+ for found_did in found_dids {
+ let mut traits = vec![];
+ let mut hir_v = HirTraitObjectVisitor(&mut traits, *found_did);
+ hir_v.visit_ty(&self_ty);
+ for span in &traits {
+ let mut multi_span: MultiSpan = vec![*span].into();
+ multi_span
+ .push_span_label(*span, "this has an implicit `'static` lifetime requirement");
+ multi_span.push_span_label(
+ ident.span,
+ "calling this method introduces the `impl`'s 'static` requirement",
+ );
+ err.span_note(multi_span, "the used `impl` has a `'static` requirement");
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "consider relaxing the implicit `'static` requirement",
+ " + '_",
+ Applicability::MaybeIncorrect,
+ );
+ suggested = true;
+ }
+ }
+ suggested
+ }
+}
+
+/// Collect all the trait objects in a type that could have received an implicit `'static` lifetime.
+pub struct TraitObjectVisitor(pub FxHashSet<DefId>);
+
+impl<'tcx> TypeVisitor<'tcx> for TraitObjectVisitor {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match t.kind() {
+ ty::Dynamic(preds, re) if re.is_static() => {
+ if let Some(def_id) = preds.principal_def_id() {
+ self.0.insert(def_id);
+ }
+ ControlFlow::CONTINUE
+ }
+ _ => t.super_visit_with(self),
+ }
+ }
+}
+
+/// Collect all `hir::Ty<'_>` `Span`s for trait objects with an implicit lifetime.
+pub struct HirTraitObjectVisitor<'a>(pub &'a mut Vec<Span>, pub DefId);
+
+impl<'a, 'tcx> Visitor<'tcx> for HirTraitObjectVisitor<'a> {
+ fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) {
+ if let TyKind::TraitObject(
+ poly_trait_refs,
+ Lifetime { name: LifetimeName::ImplicitObjectLifetimeDefault, .. },
+ _,
+ ) = t.kind
+ {
+ for ptr in poly_trait_refs {
+ if Some(self.1) == ptr.trait_ref.trait_def_id() {
+ self.0.push(ptr.span);
+ }
+ }
+ }
+ walk_ty(self, t);
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs
new file mode 100644
index 000000000..da465a764
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/trait_impl_difference.rs
@@ -0,0 +1,176 @@
+//! Error Reporting for `impl` items that do not match the obligations from their `trait`.
+
+use crate::infer::error_reporting::nice_region_error::NiceRegionError;
+use crate::infer::lexical_region_resolve::RegionResolutionError;
+use crate::infer::Subtype;
+use crate::traits::ObligationCauseCode::CompareImplItemObligation;
+use rustc_errors::{ErrorGuaranteed, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::Visitor;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::print::RegionHighlightMode;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitor};
+use rustc_span::Span;
+
+use std::ops::ControlFlow;
+
+impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
+ /// Print the error message for lifetime errors when the `impl` doesn't conform to the `trait`.
+ pub(super) fn try_report_impl_not_conforming_to_trait(&self) -> Option<ErrorGuaranteed> {
+ let error = self.error.as_ref()?;
+ debug!("try_report_impl_not_conforming_to_trait {:?}", error);
+ if let RegionResolutionError::SubSupConflict(
+ _,
+ var_origin,
+ sub_origin,
+ _sub,
+ sup_origin,
+ _sup,
+ _,
+ ) = error.clone()
+ && let (Subtype(sup_trace), Subtype(sub_trace)) = (&sup_origin, &sub_origin)
+ && let sub_expected_found @ Some((sub_expected, sub_found)) = sub_trace.values.ty()
+ && let sup_expected_found @ Some(_) = sup_trace.values.ty()
+ && let CompareImplItemObligation { trait_item_def_id, .. } = sub_trace.cause.code()
+ && sup_expected_found == sub_expected_found
+ {
+ let guar =
+ self.emit_err(var_origin.span(), sub_expected, sub_found, *trait_item_def_id);
+ return Some(guar);
+ }
+ None
+ }
+
+ fn emit_err(
+ &self,
+ sp: Span,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ trait_def_id: DefId,
+ ) -> ErrorGuaranteed {
+ let trait_sp = self.tcx().def_span(trait_def_id);
+ let mut err = self
+ .tcx()
+ .sess
+ .struct_span_err(sp, "`impl` item signature doesn't match `trait` item signature");
+
+ // Mark all unnamed regions in the type with a number.
+ // This diagnostic is called in response to lifetime errors, so be informative.
+ struct HighlightBuilder<'tcx> {
+ highlight: RegionHighlightMode<'tcx>,
+ counter: usize,
+ }
+
+ impl<'tcx> HighlightBuilder<'tcx> {
+ fn build(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> RegionHighlightMode<'tcx> {
+ let mut builder =
+ HighlightBuilder { highlight: RegionHighlightMode::new(tcx), counter: 1 };
+ builder.visit_ty(ty);
+ builder.highlight
+ }
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for HighlightBuilder<'tcx> {
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if !r.has_name() && self.counter <= 3 {
+ self.highlight.highlighting_region(r, self.counter);
+ self.counter += 1;
+ }
+ r.super_visit_with(self)
+ }
+ }
+
+ let expected_highlight = HighlightBuilder::build(self.tcx(), expected);
+ let expected = self
+ .infcx
+ .extract_inference_diagnostics_data(expected.into(), Some(expected_highlight))
+ .name;
+ let found_highlight = HighlightBuilder::build(self.tcx(), found);
+ let found =
+ self.infcx.extract_inference_diagnostics_data(found.into(), Some(found_highlight)).name;
+
+ err.span_label(sp, &format!("found `{}`", found));
+ err.span_label(trait_sp, &format!("expected `{}`", expected));
+
+ // Get the span of all the used type parameters in the method.
+ let assoc_item = self.tcx().associated_item(trait_def_id);
+ let mut visitor = TypeParamSpanVisitor { tcx: self.tcx(), types: vec![] };
+ match assoc_item.kind {
+ ty::AssocKind::Fn => {
+ let hir = self.tcx().hir();
+ if let Some(hir_id) =
+ assoc_item.def_id.as_local().map(|id| hir.local_def_id_to_hir_id(id))
+ {
+ if let Some(decl) = hir.fn_decl_by_hir_id(hir_id) {
+ visitor.visit_fn_decl(decl);
+ }
+ }
+ }
+ _ => {}
+ }
+ let mut type_param_span: MultiSpan = visitor.types.to_vec().into();
+ for &span in &visitor.types {
+ type_param_span
+ .push_span_label(span, "consider borrowing this type parameter in the trait");
+ }
+
+ err.note(&format!("expected `{}`\n found `{}`", expected, found));
+
+ err.span_help(
+ type_param_span,
+ "the lifetime requirements from the `impl` do not correspond to the requirements in \
+ the `trait`",
+ );
+ if visitor.types.is_empty() {
+ err.help(
+ "verify the lifetime relationships in the `trait` and `impl` between the `self` \
+ argument, the other inputs and its output",
+ );
+ }
+ err.emit()
+ }
+}
+
+struct TypeParamSpanVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ types: Vec<Span>,
+}
+
+impl<'tcx> Visitor<'tcx> for TypeParamSpanVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
+ match arg.kind {
+ hir::TyKind::Rptr(_, ref mut_ty) => {
+ // We don't want to suggest looking into borrowing `&T` or `&Self`.
+ hir::intravisit::walk_ty(self, mut_ty.ty);
+ return;
+ }
+ hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments {
+ [segment]
+ if segment
+ .res
+ .map(|res| {
+ matches!(
+ res,
+ Res::SelfTy { trait_: _, alias_to: _ }
+ | Res::Def(hir::def::DefKind::TyParam, _)
+ )
+ })
+ .unwrap_or(false) =>
+ {
+ self.types.push(path.span);
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ hir::intravisit::walk_ty(self, arg);
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
new file mode 100644
index 000000000..3e9d491af
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/nice_region_error/util.rs
@@ -0,0 +1,167 @@
+//! Helper functions corresponding to lifetime errors due to
+//! anonymous regions.
+
+use crate::infer::error_reporting::nice_region_error::NiceRegionError;
+use crate::infer::TyCtxt;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::{self, Binder, DefIdTree, Region, Ty, TypeVisitable};
+use rustc_span::Span;
+
+/// Information about the anonymous region we are searching for.
+#[derive(Debug)]
+pub struct AnonymousParamInfo<'tcx> {
+ /// The parameter corresponding to the anonymous region.
+ pub param: &'tcx hir::Param<'tcx>,
+ /// The type corresponding to the anonymous region parameter.
+ pub param_ty: Ty<'tcx>,
+ /// The ty::BoundRegionKind corresponding to the anonymous region.
+ pub bound_region: ty::BoundRegionKind,
+ /// The `Span` of the parameter type.
+ pub param_ty_span: Span,
+ /// Signals that the argument is the first parameter in the declaration.
+ pub is_first: bool,
+}
+
+// This method walks the Type of the function body parameters using
+// `fold_regions()` function and returns the
+// &hir::Param of the function parameter corresponding to the anonymous
+// region and the Ty corresponding to the named region.
+// Currently only the case where the function declaration consists of
+// one named region and one anonymous region is handled.
+// Consider the example `fn foo<'a>(x: &'a i32, y: &i32) -> &'a i32`
+// Here, we would return the hir::Param for y, we return the type &'a
+// i32, which is the type of y but with the anonymous region replaced
+// with 'a, the corresponding bound region and is_first which is true if
+// the hir::Param is the first parameter in the function declaration.
+#[instrument(skip(tcx), level = "debug")]
+pub fn find_param_with_region<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ anon_region: Region<'tcx>,
+ replace_region: Region<'tcx>,
+) -> Option<AnonymousParamInfo<'tcx>> {
+ let (id, bound_region) = match *anon_region {
+ ty::ReFree(ref free_region) => (free_region.scope, free_region.bound_region),
+ ty::ReEarlyBound(ebr) => {
+ (tcx.parent(ebr.def_id), ty::BoundRegionKind::BrNamed(ebr.def_id, ebr.name))
+ }
+ _ => return None, // not a free region
+ };
+
+ let hir = &tcx.hir();
+ let def_id = id.as_local()?;
+ let hir_id = hir.local_def_id_to_hir_id(def_id);
+
+ // FIXME: use def_kind
+ // Don't perform this on closures
+ match hir.get(hir_id) {
+ hir::Node::Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => {
+ return None;
+ }
+ _ => {}
+ }
+
+ let body_id = hir.maybe_body_owned_by(def_id)?;
+
+ let owner_id = hir.body_owner(body_id);
+ let fn_decl = hir.fn_decl_by_hir_id(owner_id).unwrap();
+ let poly_fn_sig = tcx.fn_sig(id);
+
+ let fn_sig = tcx.liberate_late_bound_regions(id, poly_fn_sig);
+ let body = hir.body(body_id);
+ body.params
+ .iter()
+ .take(if fn_sig.c_variadic {
+ fn_sig.inputs().len()
+ } else {
+ assert_eq!(fn_sig.inputs().len(), body.params.len());
+ body.params.len()
+ })
+ .enumerate()
+ .find_map(|(index, param)| {
+ // May return None; sometimes the tables are not yet populated.
+ let ty = fn_sig.inputs()[index];
+ let mut found_anon_region = false;
+ let new_param_ty = tcx.fold_regions(ty, |r, _| {
+ if r == anon_region {
+ found_anon_region = true;
+ replace_region
+ } else {
+ r
+ }
+ });
+ if found_anon_region {
+ let ty_hir_id = fn_decl.inputs[index].hir_id;
+ let param_ty_span = hir.span(ty_hir_id);
+ let is_first = index == 0;
+ Some(AnonymousParamInfo {
+ param,
+ param_ty: new_param_ty,
+ param_ty_span,
+ bound_region,
+ is_first,
+ })
+ } else {
+ None
+ }
+ })
+}
+
+impl<'a, 'tcx> NiceRegionError<'a, 'tcx> {
+ pub(super) fn find_param_with_region(
+ &self,
+ anon_region: Region<'tcx>,
+ replace_region: Region<'tcx>,
+ ) -> Option<AnonymousParamInfo<'tcx>> {
+ find_param_with_region(self.tcx(), anon_region, replace_region)
+ }
+
+ // Here, we check for the case where the anonymous region
+ // is in the return type as written by the user.
+ // FIXME(#42703) - Need to handle certain cases here.
+ pub(super) fn is_return_type_anon(
+ &self,
+ scope_def_id: LocalDefId,
+ br: ty::BoundRegionKind,
+ hir_sig: &hir::FnSig<'_>,
+ ) -> Option<Span> {
+ let fn_ty = self.tcx().type_of(scope_def_id);
+ if let ty::FnDef(_, _) = fn_ty.kind() {
+ let ret_ty = fn_ty.fn_sig(self.tcx()).output();
+ let span = hir_sig.decl.output.span();
+ let future_output = if hir_sig.header.is_async() {
+ ret_ty.map_bound(|ty| self.infcx.get_impl_future_output_ty(ty)).transpose()
+ } else {
+ None
+ };
+ return match future_output {
+ Some(output) if self.includes_region(output, br) => Some(span),
+ None if self.includes_region(ret_ty, br) => Some(span),
+ _ => None,
+ };
+ }
+ None
+ }
+
+ fn includes_region(
+ &self,
+ ty: Binder<'tcx, impl TypeVisitable<'tcx>>,
+ region: ty::BoundRegionKind,
+ ) -> bool {
+ let late_bound_regions = self.tcx().collect_referenced_late_bound_regions(&ty);
+ late_bound_regions.iter().any(|r| *r == region)
+ }
+
+ // Here we check for the case where anonymous region
+ // corresponds to self and if yes, we display E0312.
+ // FIXME(#42700) - Need to format self properly to
+ // enable E0621 for it.
+ pub(super) fn is_self_anon(&self, is_first: bool, scope_def_id: LocalDefId) -> bool {
+ is_first
+ && self
+ .tcx()
+ .opt_associated_item(scope_def_id.to_def_id())
+ .map(|i| i.fn_has_self_parameter)
+ == Some(true)
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/error_reporting/note.rs b/compiler/rustc_infer/src/infer/error_reporting/note.rs
new file mode 100644
index 000000000..c1940c5c0
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/error_reporting/note.rs
@@ -0,0 +1,414 @@
+use crate::infer::error_reporting::{note_and_explain_region, ObligationCauseExt};
+use crate::infer::{self, InferCtxt, SubregionOrigin};
+use rustc_errors::{struct_span_err, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_middle::traits::ObligationCauseCode;
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::{self, Region};
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ pub(super) fn note_region_origin(&self, err: &mut Diagnostic, origin: &SubregionOrigin<'tcx>) {
+ let mut label_or_note = |span, msg: &str| {
+ let sub_count = err.children.iter().filter(|d| d.span.is_dummy()).count();
+ let expanded_sub_count = err.children.iter().filter(|d| !d.span.is_dummy()).count();
+ let span_is_primary = err.span.primary_spans().iter().all(|&sp| sp == span);
+ if span_is_primary && sub_count == 0 && expanded_sub_count == 0 {
+ err.span_label(span, msg);
+ } else if span_is_primary && expanded_sub_count == 0 {
+ err.note(msg);
+ } else {
+ err.span_note(span, msg);
+ }
+ };
+ match *origin {
+ infer::Subtype(ref trace) => {
+ if let Some((expected, found)) = self.values_str(trace.values) {
+ label_or_note(
+ trace.cause.span,
+ &format!("...so that the {}", trace.cause.as_requirement_str()),
+ );
+
+ err.note_expected_found(&"", expected, &"", found);
+ } else {
+ // FIXME: this really should be handled at some earlier stage. Our
+ // handling of region checking when type errors are present is
+ // *terrible*.
+
+ label_or_note(
+ trace.cause.span,
+ &format!("...so that {}", trace.cause.as_requirement_str()),
+ );
+ }
+ }
+ infer::Reborrow(span) => {
+ label_or_note(span, "...so that reference does not outlive borrowed content");
+ }
+ infer::ReborrowUpvar(span, ref upvar_id) => {
+ let var_name = self.tcx.hir().name(upvar_id.var_path.hir_id);
+ label_or_note(span, &format!("...so that closure can access `{}`", var_name));
+ }
+ infer::RelateObjectBound(span) => {
+ label_or_note(span, "...so that it can be closed over into an object");
+ }
+ infer::DataBorrowed(ty, span) => {
+ label_or_note(
+ span,
+ &format!(
+ "...so that the type `{}` is not borrowed for too long",
+ self.ty_to_string(ty)
+ ),
+ );
+ }
+ infer::ReferenceOutlivesReferent(ty, span) => {
+ label_or_note(
+ span,
+ &format!(
+ "...so that the reference type `{}` does not outlive the data it points at",
+ self.ty_to_string(ty)
+ ),
+ );
+ }
+ infer::RelateParamBound(span, t, opt_span) => {
+ label_or_note(
+ span,
+ &format!(
+ "...so that the type `{}` will meet its required lifetime bounds{}",
+ self.ty_to_string(t),
+ if opt_span.is_some() { "..." } else { "" },
+ ),
+ );
+ if let Some(span) = opt_span {
+ err.span_note(span, "...that is required by this bound");
+ }
+ }
+ infer::RelateRegionParamBound(span) => {
+ label_or_note(
+ span,
+ "...so that the declared lifetime parameter bounds are satisfied",
+ );
+ }
+ infer::CompareImplItemObligation { span, .. } => {
+ label_or_note(
+ span,
+ "...so that the definition in impl matches the definition from the trait",
+ );
+ }
+ infer::CheckAssociatedTypeBounds { ref parent, .. } => {
+ self.note_region_origin(err, &parent);
+ }
+ }
+ }
+
+ pub(super) fn report_concrete_failure(
+ &self,
+ origin: SubregionOrigin<'tcx>,
+ sub: Region<'tcx>,
+ sup: Region<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ match origin {
+ infer::Subtype(box trace) => {
+ let terr = TypeError::RegionsDoesNotOutlive(sup, sub);
+ let mut err = self.report_and_explain_type_error(trace, &terr);
+ match (*sub, *sup) {
+ (ty::RePlaceholder(_), ty::RePlaceholder(_)) => {}
+ (ty::RePlaceholder(_), _) => {
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "",
+ sup,
+ " doesn't meet the lifetime requirements",
+ None,
+ );
+ }
+ (_, ty::RePlaceholder(_)) => {
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "the required lifetime does not necessarily outlive ",
+ sub,
+ "",
+ None,
+ );
+ }
+ _ => {
+ note_and_explain_region(self.tcx, &mut err, "", sup, "...", None);
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "...does not necessarily outlive ",
+ sub,
+ "",
+ None,
+ );
+ }
+ }
+ err
+ }
+ infer::Reborrow(span) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0312,
+ "lifetime of reference outlives lifetime of borrowed content..."
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "...the reference is valid for ",
+ sub,
+ "...",
+ None,
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "...but the borrowed content is only valid for ",
+ sup,
+ "",
+ None,
+ );
+ err
+ }
+ infer::ReborrowUpvar(span, ref upvar_id) => {
+ let var_name = self.tcx.hir().name(upvar_id.var_path.hir_id);
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0313,
+ "lifetime of borrowed pointer outlives lifetime of captured variable `{}`...",
+ var_name
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "...the borrowed pointer is valid for ",
+ sub,
+ "...",
+ None,
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ &format!("...but `{}` is only valid for ", var_name),
+ sup,
+ "",
+ None,
+ );
+ err
+ }
+ infer::RelateObjectBound(span) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0476,
+ "lifetime of the source pointer does not outlive lifetime bound of the \
+ object type"
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "object type is valid for ",
+ sub,
+ "",
+ None,
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "source pointer is only valid for ",
+ sup,
+ "",
+ None,
+ );
+ err
+ }
+ infer::RelateParamBound(span, ty, opt_span) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0477,
+ "the type `{}` does not fulfill the required lifetime",
+ self.ty_to_string(ty)
+ );
+ match *sub {
+ ty::ReStatic => note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "type must satisfy ",
+ sub,
+ if opt_span.is_some() { " as required by this binding" } else { "" },
+ opt_span,
+ ),
+ _ => note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "type must outlive ",
+ sub,
+ if opt_span.is_some() { " as required by this binding" } else { "" },
+ opt_span,
+ ),
+ }
+ err
+ }
+ infer::RelateRegionParamBound(span) => {
+ let mut err =
+ struct_span_err!(self.tcx.sess, span, E0478, "lifetime bound not satisfied");
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "lifetime parameter instantiated with ",
+ sup,
+ "",
+ None,
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "but lifetime parameter must outlive ",
+ sub,
+ "",
+ None,
+ );
+ err
+ }
+ infer::DataBorrowed(ty, span) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0490,
+ "a value of type `{}` is borrowed for too long",
+ self.ty_to_string(ty)
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "the type is valid for ",
+ sub,
+ "",
+ None,
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "but the borrow lasts for ",
+ sup,
+ "",
+ None,
+ );
+ err
+ }
+ infer::ReferenceOutlivesReferent(ty, span) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0491,
+ "in type `{}`, reference has a longer lifetime than the data it references",
+ self.ty_to_string(ty)
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "the pointer is valid for ",
+ sub,
+ "",
+ None,
+ );
+ note_and_explain_region(
+ self.tcx,
+ &mut err,
+ "but the referenced data is only valid for ",
+ sup,
+ "",
+ None,
+ );
+ err
+ }
+ infer::CompareImplItemObligation { span, impl_item_def_id, trait_item_def_id } => self
+ .report_extra_impl_obligation(
+ span,
+ impl_item_def_id,
+ trait_item_def_id,
+ &format!("`{}: {}`", sup, sub),
+ ),
+ infer::CheckAssociatedTypeBounds { impl_item_def_id, trait_item_def_id, parent } => {
+ let mut err = self.report_concrete_failure(*parent, sub, sup);
+
+ let trait_item_span = self.tcx.def_span(trait_item_def_id);
+ let item_name = self.tcx.item_name(impl_item_def_id.to_def_id());
+ err.span_label(
+ trait_item_span,
+ format!("definition of `{}` from trait", item_name),
+ );
+
+ let trait_predicates = self.tcx.explicit_predicates_of(trait_item_def_id);
+ let impl_predicates = self.tcx.explicit_predicates_of(impl_item_def_id);
+
+ let impl_predicates: rustc_data_structures::fx::FxHashSet<_> =
+ impl_predicates.predicates.into_iter().map(|(pred, _)| pred).collect();
+ let clauses: Vec<_> = trait_predicates
+ .predicates
+ .into_iter()
+ .filter(|&(pred, _)| !impl_predicates.contains(pred))
+ .map(|(pred, _)| format!("{}", pred))
+ .collect();
+
+ if !clauses.is_empty() {
+ let generics = self.tcx.hir().get_generics(impl_item_def_id).unwrap();
+ let where_clause_span = generics.tail_span_for_predicate_suggestion();
+
+ let suggestion = format!(
+ "{} {}",
+ generics.add_where_or_trailing_comma(),
+ clauses.join(", "),
+ );
+ err.span_suggestion(
+ where_clause_span,
+ &format!(
+ "try copying {} from the trait",
+ if clauses.len() > 1 { "these clauses" } else { "this clause" }
+ ),
+ suggestion,
+ rustc_errors::Applicability::MaybeIncorrect,
+ );
+ }
+
+ err
+ }
+ }
+ }
+
+ pub(super) fn report_placeholder_failure(
+ &self,
+ placeholder_origin: SubregionOrigin<'tcx>,
+ sub: Region<'tcx>,
+ sup: Region<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ // I can't think how to do better than this right now. -nikomatsakis
+ debug!(?placeholder_origin, ?sub, ?sup, "report_placeholder_failure");
+ match placeholder_origin {
+ infer::Subtype(box ref trace)
+ if matches!(
+ &trace.cause.code().peel_derives(),
+ ObligationCauseCode::BindingObligation(..)
+ ) =>
+ {
+ // Hack to get around the borrow checker because trace.cause has an `Rc`.
+ if let ObligationCauseCode::BindingObligation(_, span) =
+ &trace.cause.code().peel_derives()
+ {
+ let span = *span;
+ let mut err = self.report_concrete_failure(placeholder_origin, sub, sup);
+ err.span_note(span, "the lifetime requirement is introduced here");
+ err
+ } else {
+ unreachable!()
+ }
+ }
+ infer::Subtype(box trace) => {
+ let terr = TypeError::RegionsPlaceholderMismatch;
+ return self.report_and_explain_type_error(trace, &terr);
+ }
+ _ => return self.report_concrete_failure(placeholder_origin, sub, sup),
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/free_regions.rs b/compiler/rustc_infer/src/infer/free_regions.rs
new file mode 100644
index 000000000..d566634a4
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/free_regions.rs
@@ -0,0 +1,119 @@
+//! This module handles the relationships between "free regions", i.e., lifetime parameters.
+//! Ordinarily, free regions are unrelated to one another, but they can be related via implied
+//! or explicit bounds. In that case, we track the bounds using the `TransitiveRelation` type,
+//! and use that to decide when one free region outlives another, and so forth.
+
+use rustc_data_structures::transitive_relation::TransitiveRelation;
+use rustc_middle::ty::{Lift, Region, TyCtxt};
+
+/// Combines a `FreeRegionMap` and a `TyCtxt`.
+///
+/// This stuff is a bit convoluted and should be refactored, but as we
+/// transition to NLL, it'll all go away anyhow.
+pub(crate) struct RegionRelations<'a, 'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+
+ /// Free-region relationships.
+ pub free_regions: &'a FreeRegionMap<'tcx>,
+}
+
+impl<'a, 'tcx> RegionRelations<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, free_regions: &'a FreeRegionMap<'tcx>) -> Self {
+ Self { tcx, free_regions }
+ }
+
+ pub fn lub_free_regions(&self, r_a: Region<'tcx>, r_b: Region<'tcx>) -> Region<'tcx> {
+ self.free_regions.lub_free_regions(self.tcx, r_a, r_b)
+ }
+}
+
+#[derive(Clone, Debug, Default)]
+pub struct FreeRegionMap<'tcx> {
+ // Stores the relation `a < b`, where `a` and `b` are regions.
+ //
+ // Invariant: only free regions like `'x` or `'static` are stored
+ // in this relation, not scopes.
+ relation: TransitiveRelation<Region<'tcx>>,
+}
+
+impl<'tcx> FreeRegionMap<'tcx> {
+ pub fn elements(&self) -> impl Iterator<Item = Region<'tcx>> + '_ {
+ self.relation.elements().copied()
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.relation.is_empty()
+ }
+
+ // Record that `'sup:'sub`. Or, put another way, `'sub <= 'sup`.
+ // (with the exception that `'static: 'x` is not notable)
+ pub fn relate_regions(&mut self, sub: Region<'tcx>, sup: Region<'tcx>) {
+ debug!("relate_regions(sub={:?}, sup={:?})", sub, sup);
+ if sub.is_free_or_static() && sup.is_free() {
+ self.relation.add(sub, sup)
+ }
+ }
+
+ /// Tests whether `r_a <= r_b`.
+ ///
+ /// Both regions must meet `is_free_or_static`.
+ ///
+ /// Subtle: one tricky case that this code gets correct is as
+ /// follows. If we know that `r_b: 'static`, then this function
+ /// will return true, even though we don't know anything that
+ /// directly relates `r_a` and `r_b`.
+ pub fn sub_free_regions(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ r_a: Region<'tcx>,
+ r_b: Region<'tcx>,
+ ) -> bool {
+ assert!(r_a.is_free_or_static() && r_b.is_free_or_static());
+ let re_static = tcx.lifetimes.re_static;
+ if self.check_relation(re_static, r_b) {
+ // `'a <= 'static` is always true, and not stored in the
+ // relation explicitly, so check if `'b` is `'static` (or
+ // equivalent to it)
+ true
+ } else {
+ self.check_relation(r_a, r_b)
+ }
+ }
+
+ /// Check whether `r_a <= r_b` is found in the relation.
+ fn check_relation(&self, r_a: Region<'tcx>, r_b: Region<'tcx>) -> bool {
+ r_a == r_b || self.relation.contains(r_a, r_b)
+ }
+
+ /// Computes the least-upper-bound of two free regions. In some
+ /// cases, this is more conservative than necessary, in order to
+ /// avoid making arbitrary choices. See
+ /// `TransitiveRelation::postdom_upper_bound` for more details.
+ pub fn lub_free_regions(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ r_a: Region<'tcx>,
+ r_b: Region<'tcx>,
+ ) -> Region<'tcx> {
+ debug!("lub_free_regions(r_a={:?}, r_b={:?})", r_a, r_b);
+ assert!(r_a.is_free());
+ assert!(r_b.is_free());
+ let result = if r_a == r_b {
+ r_a
+ } else {
+ match self.relation.postdom_upper_bound(r_a, r_b) {
+ None => tcx.lifetimes.re_static,
+ Some(r) => r,
+ }
+ };
+ debug!("lub_free_regions(r_a={:?}, r_b={:?}) = {:?}", r_a, r_b, result);
+ result
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for FreeRegionMap<'a> {
+ type Lifted = FreeRegionMap<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<FreeRegionMap<'tcx>> {
+ self.relation.maybe_map(|fr| tcx.lift(fr)).map(|relation| FreeRegionMap { relation })
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/freshen.rs b/compiler/rustc_infer/src/infer/freshen.rs
new file mode 100644
index 000000000..84004d2b2
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/freshen.rs
@@ -0,0 +1,255 @@
+//! Freshening is the process of replacing unknown variables with fresh types. The idea is that
+//! the type, after freshening, contains no inference variables but instead contains either a
+//! value for each variable or fresh "arbitrary" types wherever a variable would have been.
+//!
+//! Freshening is used primarily to get a good type for inserting into a cache. The result
+//! summarizes what the type inferencer knows "so far". The primary place it is used right now is
+//! in the trait matching algorithm, which needs to be able to cache whether an `impl` self type
+//! matches some other type X -- *without* affecting `X`. That means if that if the type `X` is in
+//! fact an unbound type variable, we want the match to be regarded as ambiguous, because depending
+//! on what type that type variable is ultimately assigned, the match may or may not succeed.
+//!
+//! To handle closures, freshened types also have to contain the signature and kind of any
+//! closure in the local inference context, as otherwise the cache key might be invalidated.
+//! The way this is done is somewhat hacky - the closure signature is appended to the substs,
+//! as well as the closure kind "encoded" as a type. Also, special handling is needed when
+//! the closure signature contains a reference to the original closure.
+//!
+//! Note that you should be careful not to allow the output of freshening to leak to the user in
+//! error messages or in any other form. Freshening is only really useful as an internal detail.
+//!
+//! Because of the manipulation required to handle closures, doing arbitrary operations on
+//! freshened types is not recommended. However, in addition to doing equality/hash
+//! comparisons (for caching), it is possible to do a `ty::_match` operation between
+//! 2 freshened types - this works even with the closure encoding.
+//!
+//! __An important detail concerning regions.__ The freshener also replaces *all* free regions with
+//! 'erased. The reason behind this is that, in general, we do not take region relationships into
+//! account when making type-overloaded decisions. This is important because of the design of the
+//! region inferencer, which is not based on unification but rather on accumulating and then
+//! solving a set of constraints. In contrast, the type inferencer assigns a value to each type
+//! variable only once, and it does so as soon as it can, so it is reasonable to ask what the type
+//! inferencer knows "so far".
+use super::InferCtxt;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::infer::unify_key::ToType;
+use rustc_middle::ty::fold::TypeFolder;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable};
+use std::collections::hash_map::Entry;
+
+pub struct TypeFreshener<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ ty_freshen_count: u32,
+ const_freshen_count: u32,
+ ty_freshen_map: FxHashMap<ty::InferTy, Ty<'tcx>>,
+ const_freshen_map: FxHashMap<ty::InferConst<'tcx>, ty::Const<'tcx>>,
+ keep_static: bool,
+}
+
+impl<'a, 'tcx> TypeFreshener<'a, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'tcx>, keep_static: bool) -> TypeFreshener<'a, 'tcx> {
+ TypeFreshener {
+ infcx,
+ ty_freshen_count: 0,
+ const_freshen_count: 0,
+ ty_freshen_map: Default::default(),
+ const_freshen_map: Default::default(),
+ keep_static,
+ }
+ }
+
+ fn freshen_ty<F>(
+ &mut self,
+ opt_ty: Option<Ty<'tcx>>,
+ key: ty::InferTy,
+ freshener: F,
+ ) -> Ty<'tcx>
+ where
+ F: FnOnce(u32) -> ty::InferTy,
+ {
+ if let Some(ty) = opt_ty {
+ return ty.fold_with(self);
+ }
+
+ match self.ty_freshen_map.entry(key) {
+ Entry::Occupied(entry) => *entry.get(),
+ Entry::Vacant(entry) => {
+ let index = self.ty_freshen_count;
+ self.ty_freshen_count += 1;
+ let t = self.infcx.tcx.mk_ty_infer(freshener(index));
+ entry.insert(t);
+ t
+ }
+ }
+ }
+
+ fn freshen_const<F>(
+ &mut self,
+ opt_ct: Option<ty::Const<'tcx>>,
+ key: ty::InferConst<'tcx>,
+ freshener: F,
+ ty: Ty<'tcx>,
+ ) -> ty::Const<'tcx>
+ where
+ F: FnOnce(u32) -> ty::InferConst<'tcx>,
+ {
+ if let Some(ct) = opt_ct {
+ return ct.fold_with(self);
+ }
+
+ match self.const_freshen_map.entry(key) {
+ Entry::Occupied(entry) => *entry.get(),
+ Entry::Vacant(entry) => {
+ let index = self.const_freshen_count;
+ self.const_freshen_count += 1;
+ let ct = self.infcx.tcx.mk_const_infer(freshener(index), ty);
+ entry.insert(ct);
+ ct
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for TypeFreshener<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(..) => {
+ // leave bound regions alone
+ r
+ }
+
+ ty::ReEarlyBound(..)
+ | ty::ReFree(_)
+ | ty::ReVar(_)
+ | ty::RePlaceholder(..)
+ | ty::ReEmpty(_)
+ | ty::ReErased => {
+ // replace all free regions with 'erased
+ self.tcx().lifetimes.re_erased
+ }
+ ty::ReStatic => {
+ if self.keep_static {
+ r
+ } else {
+ self.tcx().lifetimes.re_erased
+ }
+ }
+ }
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ if !t.needs_infer() && !t.has_erasable_regions() {
+ return t;
+ }
+
+ let tcx = self.infcx.tcx;
+
+ match *t.kind() {
+ ty::Infer(ty::TyVar(v)) => {
+ let opt_ty = self.infcx.inner.borrow_mut().type_variables().probe(v).known();
+ self.freshen_ty(opt_ty, ty::TyVar(v), ty::FreshTy)
+ }
+
+ ty::Infer(ty::IntVar(v)) => self.freshen_ty(
+ self.infcx
+ .inner
+ .borrow_mut()
+ .int_unification_table()
+ .probe_value(v)
+ .map(|v| v.to_type(tcx)),
+ ty::IntVar(v),
+ ty::FreshIntTy,
+ ),
+
+ ty::Infer(ty::FloatVar(v)) => self.freshen_ty(
+ self.infcx
+ .inner
+ .borrow_mut()
+ .float_unification_table()
+ .probe_value(v)
+ .map(|v| v.to_type(tcx)),
+ ty::FloatVar(v),
+ ty::FreshFloatTy,
+ ),
+
+ ty::Infer(ty::FreshTy(ct) | ty::FreshIntTy(ct) | ty::FreshFloatTy(ct)) => {
+ if ct >= self.ty_freshen_count {
+ bug!(
+ "Encountered a freshend type with id {} \
+ but our counter is only at {}",
+ ct,
+ self.ty_freshen_count
+ );
+ }
+ t
+ }
+
+ ty::Generator(..)
+ | ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Adt(..)
+ | ty::Str
+ | ty::Error(_)
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Dynamic(..)
+ | ty::Never
+ | ty::Tuple(..)
+ | ty::Projection(..)
+ | ty::Foreign(..)
+ | ty::Param(..)
+ | ty::Closure(..)
+ | ty::GeneratorWitness(..)
+ | ty::Opaque(..) => t.super_fold_with(self),
+
+ ty::Placeholder(..) | ty::Bound(..) => bug!("unexpected type {:?}", t),
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ match ct.kind() {
+ ty::ConstKind::Infer(ty::InferConst::Var(v)) => {
+ let opt_ct = self
+ .infcx
+ .inner
+ .borrow_mut()
+ .const_unification_table()
+ .probe_value(v)
+ .val
+ .known();
+ self.freshen_const(opt_ct, ty::InferConst::Var(v), ty::InferConst::Fresh, ct.ty())
+ }
+ ty::ConstKind::Infer(ty::InferConst::Fresh(i)) => {
+ if i >= self.const_freshen_count {
+ bug!(
+ "Encountered a freshend const with id {} \
+ but our counter is only at {}",
+ i,
+ self.const_freshen_count,
+ );
+ }
+ ct
+ }
+
+ ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => {
+ bug!("unexpected const {:?}", ct)
+ }
+
+ ty::ConstKind::Param(_)
+ | ty::ConstKind::Value(_)
+ | ty::ConstKind::Unevaluated(..)
+ | ty::ConstKind::Error(_) => ct.super_fold_with(self),
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/fudge.rs b/compiler/rustc_infer/src/infer/fudge.rs
new file mode 100644
index 000000000..2f0eadce6
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/fudge.rs
@@ -0,0 +1,246 @@
+use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::{self, ConstVid, FloatVid, IntVid, RegionVid, Ty, TyCtxt, TyVid};
+
+use super::type_variable::TypeVariableOrigin;
+use super::InferCtxt;
+use super::{ConstVariableOrigin, RegionVariableOrigin, UnificationTable};
+
+use rustc_data_structures::snapshot_vec as sv;
+use rustc_data_structures::unify as ut;
+use ut::UnifyKey;
+
+use std::ops::Range;
+
+fn vars_since_snapshot<'tcx, T>(
+ table: &mut UnificationTable<'_, 'tcx, T>,
+ snapshot_var_len: usize,
+) -> Range<T>
+where
+ T: UnifyKey,
+ super::UndoLog<'tcx>: From<sv::UndoLog<ut::Delegate<T>>>,
+{
+ T::from_index(snapshot_var_len as u32)..T::from_index(table.len() as u32)
+}
+
+fn const_vars_since_snapshot<'tcx>(
+ table: &mut UnificationTable<'_, 'tcx, ConstVid<'tcx>>,
+ snapshot_var_len: usize,
+) -> (Range<ConstVid<'tcx>>, Vec<ConstVariableOrigin>) {
+ let range = vars_since_snapshot(table, snapshot_var_len);
+ (
+ range.start..range.end,
+ (range.start.index..range.end.index)
+ .map(|index| table.probe_value(ConstVid::from_index(index)).origin)
+ .collect(),
+ )
+}
+
+struct VariableLengths {
+ type_var_len: usize,
+ const_var_len: usize,
+ int_var_len: usize,
+ float_var_len: usize,
+ region_constraints_len: usize,
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ fn variable_lengths(&self) -> VariableLengths {
+ let mut inner = self.inner.borrow_mut();
+ VariableLengths {
+ type_var_len: inner.type_variables().num_vars(),
+ const_var_len: inner.const_unification_table().len(),
+ int_var_len: inner.int_unification_table().len(),
+ float_var_len: inner.float_unification_table().len(),
+ region_constraints_len: inner.unwrap_region_constraints().num_region_vars(),
+ }
+ }
+
+ /// This rather funky routine is used while processing expected
+ /// types. What happens here is that we want to propagate a
+ /// coercion through the return type of a fn to its
+ /// argument. Consider the type of `Option::Some`, which is
+ /// basically `for<T> fn(T) -> Option<T>`. So if we have an
+ /// expression `Some(&[1, 2, 3])`, and that has the expected type
+ /// `Option<&[u32]>`, we would like to type check `&[1, 2, 3]`
+ /// with the expectation of `&[u32]`. This will cause us to coerce
+ /// from `&[u32; 3]` to `&[u32]` and make the users life more
+ /// pleasant.
+ ///
+ /// The way we do this is using `fudge_inference_if_ok`. What the
+ /// routine actually does is to start a snapshot and execute the
+ /// closure `f`. In our example above, what this closure will do
+ /// is to unify the expectation (`Option<&[u32]>`) with the actual
+ /// return type (`Option<?T>`, where `?T` represents the variable
+ /// instantiated for `T`). This will cause `?T` to be unified
+ /// with `&?a [u32]`, where `?a` is a fresh lifetime variable. The
+ /// input type (`?T`) is then returned by `f()`.
+ ///
+ /// At this point, `fudge_inference_if_ok` will normalize all type
+ /// variables, converting `?T` to `&?a [u32]` and end the
+ /// snapshot. The problem is that we can't just return this type
+ /// out, because it references the region variable `?a`, and that
+ /// region variable was popped when we popped the snapshot.
+ ///
+ /// So what we do is to keep a list (`region_vars`, in the code below)
+ /// of region variables created during the snapshot (here, `?a`). We
+ /// fold the return value and replace any such regions with a *new*
+ /// region variable (e.g., `?b`) and return the result (`&?b [u32]`).
+ /// This can then be used as the expectation for the fn argument.
+ ///
+ /// The important point here is that, for soundness purposes, the
+ /// regions in question are not particularly important. We will
+ /// use the expected types to guide coercions, but we will still
+ /// type-check the resulting types from those coercions against
+ /// the actual types (`?T`, `Option<?T>`) -- and remember that
+ /// after the snapshot is popped, the variable `?T` is no longer
+ /// unified.
+ #[instrument(skip(self, f), level = "debug")]
+ pub fn fudge_inference_if_ok<T, E, F>(&self, f: F) -> Result<T, E>
+ where
+ F: FnOnce() -> Result<T, E>,
+ T: TypeFoldable<'tcx>,
+ {
+ let variable_lengths = self.variable_lengths();
+ let (mut fudger, value) = self.probe(|_| {
+ match f() {
+ Ok(value) => {
+ let value = self.resolve_vars_if_possible(value);
+
+ // At this point, `value` could in principle refer
+ // to inference variables that have been created during
+ // the snapshot. Once we exit `probe()`, those are
+ // going to be popped, so we will have to
+ // eliminate any references to them.
+
+ let mut inner = self.inner.borrow_mut();
+ let type_vars =
+ inner.type_variables().vars_since_snapshot(variable_lengths.type_var_len);
+ let int_vars = vars_since_snapshot(
+ &mut inner.int_unification_table(),
+ variable_lengths.int_var_len,
+ );
+ let float_vars = vars_since_snapshot(
+ &mut inner.float_unification_table(),
+ variable_lengths.float_var_len,
+ );
+ let region_vars = inner
+ .unwrap_region_constraints()
+ .vars_since_snapshot(variable_lengths.region_constraints_len);
+ let const_vars = const_vars_since_snapshot(
+ &mut inner.const_unification_table(),
+ variable_lengths.const_var_len,
+ );
+
+ let fudger = InferenceFudger {
+ infcx: self,
+ type_vars,
+ int_vars,
+ float_vars,
+ region_vars,
+ const_vars,
+ };
+
+ Ok((fudger, value))
+ }
+ Err(e) => Err(e),
+ }
+ })?;
+
+ // At this point, we need to replace any of the now-popped
+ // type/region variables that appear in `value` with a fresh
+ // variable of the appropriate kind. We can't do this during
+ // the probe because they would just get popped then too. =)
+
+ // Micro-optimization: if no variables have been created, then
+ // `value` can't refer to any of them. =) So we can just return it.
+ if fudger.type_vars.0.is_empty()
+ && fudger.int_vars.is_empty()
+ && fudger.float_vars.is_empty()
+ && fudger.region_vars.0.is_empty()
+ && fudger.const_vars.0.is_empty()
+ {
+ Ok(value)
+ } else {
+ Ok(value.fold_with(&mut fudger))
+ }
+ }
+}
+
+pub struct InferenceFudger<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ type_vars: (Range<TyVid>, Vec<TypeVariableOrigin>),
+ int_vars: Range<IntVid>,
+ float_vars: Range<FloatVid>,
+ region_vars: (Range<RegionVid>, Vec<RegionVariableOrigin>),
+ const_vars: (Range<ConstVid<'tcx>>, Vec<ConstVariableOrigin>),
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for InferenceFudger<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match *ty.kind() {
+ ty::Infer(ty::InferTy::TyVar(vid)) => {
+ if self.type_vars.0.contains(&vid) {
+ // This variable was created during the fudging.
+ // Recreate it with a fresh variable here.
+ let idx = (vid.as_usize() - self.type_vars.0.start.as_usize()) as usize;
+ let origin = self.type_vars.1[idx];
+ self.infcx.next_ty_var(origin)
+ } else {
+ // This variable was created before the
+ // "fudging". Since we refresh all type
+ // variables to their binding anyhow, we know
+ // that it is unbound, so we can just return
+ // it.
+ debug_assert!(
+ self.infcx.inner.borrow_mut().type_variables().probe(vid).is_unknown()
+ );
+ ty
+ }
+ }
+ ty::Infer(ty::InferTy::IntVar(vid)) => {
+ if self.int_vars.contains(&vid) {
+ self.infcx.next_int_var()
+ } else {
+ ty
+ }
+ }
+ ty::Infer(ty::InferTy::FloatVar(vid)) => {
+ if self.float_vars.contains(&vid) {
+ self.infcx.next_float_var()
+ } else {
+ ty
+ }
+ }
+ _ => ty.super_fold_with(self),
+ }
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ if let ty::ReVar(vid) = *r && self.region_vars.0.contains(&vid) {
+ let idx = vid.index() - self.region_vars.0.start.index();
+ let origin = self.region_vars.1[idx];
+ return self.infcx.next_region_var(origin);
+ }
+ r
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if let ty::ConstKind::Infer(ty::InferConst::Var(vid)) = ct.kind() {
+ if self.const_vars.0.contains(&vid) {
+ // This variable was created during the fudging.
+ // Recreate it with a fresh variable here.
+ let idx = (vid.index - self.const_vars.0.start.index) as usize;
+ let origin = self.const_vars.1[idx];
+ self.infcx.next_const_var(ct.ty(), origin)
+ } else {
+ ct
+ }
+ } else {
+ ct.super_fold_with(self)
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/glb.rs b/compiler/rustc_infer/src/infer/glb.rs
new file mode 100644
index 000000000..1570a08f3
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/glb.rs
@@ -0,0 +1,144 @@
+//! Greatest lower bound. See [`lattice`].
+
+use super::combine::CombineFields;
+use super::lattice::{self, LatticeDir};
+use super::InferCtxt;
+use super::Subtype;
+
+use crate::infer::combine::ConstEquateRelation;
+use crate::traits::{ObligationCause, PredicateObligation};
+use rustc_middle::ty::relate::{Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+/// "Greatest lower bound" (common subtype)
+pub struct Glb<'combine, 'infcx, 'tcx> {
+ fields: &'combine mut CombineFields<'infcx, 'tcx>,
+ a_is_expected: bool,
+}
+
+impl<'combine, 'infcx, 'tcx> Glb<'combine, 'infcx, 'tcx> {
+ pub fn new(
+ fields: &'combine mut CombineFields<'infcx, 'tcx>,
+ a_is_expected: bool,
+ ) -> Glb<'combine, 'infcx, 'tcx> {
+ Glb { fields, a_is_expected }
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for Glb<'_, '_, 'tcx> {
+ fn tag(&self) -> &'static str {
+ "Glb"
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.fields.tcx()
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.fields.param_env
+ }
+
+ fn a_is_expected(&self) -> bool {
+ self.a_is_expected
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ variance: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ match variance {
+ ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b),
+ ty::Covariant => self.relate(a, b),
+ // FIXME(#41044) -- not correct, need test
+ ty::Bivariant => Ok(a),
+ ty::Contravariant => self.fields.lub(self.a_is_expected).relate(a, b),
+ }
+ }
+
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ lattice::super_lattice_tys(self, a, b)
+ }
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!("{}.regions({:?}, {:?})", self.tag(), a, b);
+
+ let origin = Subtype(Box::new(self.fields.trace.clone()));
+ Ok(self.fields.infcx.inner.borrow_mut().unwrap_region_constraints().glb_regions(
+ self.tcx(),
+ origin,
+ a,
+ b,
+ ))
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ self.fields.infcx.super_combine_consts(self, a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ debug!("binders(a={:?}, b={:?})", a, b);
+ if a.skip_binder().has_escaping_bound_vars() || b.skip_binder().has_escaping_bound_vars() {
+ // When higher-ranked types are involved, computing the GLB is
+ // very challenging, switch to invariance. This is obviously
+ // overly conservative but works ok in practice.
+ self.relate_with_variance(
+ ty::Variance::Invariant,
+ ty::VarianceDiagInfo::default(),
+ a,
+ b,
+ )?;
+ Ok(a)
+ } else {
+ Ok(ty::Binder::dummy(self.relate(a.skip_binder(), b.skip_binder())?))
+ }
+ }
+}
+
+impl<'combine, 'infcx, 'tcx> LatticeDir<'infcx, 'tcx> for Glb<'combine, 'infcx, 'tcx> {
+ fn infcx(&self) -> &'infcx InferCtxt<'infcx, 'tcx> {
+ self.fields.infcx
+ }
+
+ fn cause(&self) -> &ObligationCause<'tcx> {
+ &self.fields.trace.cause
+ }
+
+ fn add_obligations(&mut self, obligations: Vec<PredicateObligation<'tcx>>) {
+ self.fields.obligations.extend(obligations)
+ }
+
+ fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> {
+ let mut sub = self.fields.sub(self.a_is_expected);
+ sub.relate(v, a)?;
+ sub.relate(v, b)?;
+ Ok(())
+ }
+
+ fn define_opaque_types(&self) -> bool {
+ self.fields.define_opaque_types
+ }
+}
+
+impl<'tcx> ConstEquateRelation<'tcx> for Glb<'_, '_, 'tcx> {
+ fn const_equate_obligation(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>) {
+ self.fields.add_const_equate_obligation(self.a_is_expected, a, b);
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/higher_ranked/README.md b/compiler/rustc_infer/src/infer/higher_ranked/README.md
new file mode 100644
index 000000000..533d0ef7e
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/higher_ranked/README.md
@@ -0,0 +1,8 @@
+To learn more about how Higher-ranked trait bounds work in the _old_ trait
+solver, see [this chapter][oldhrtb] of the rustc-dev-guide.
+
+To learn more about how they work in the _new_ trait solver, see [this
+chapter][newhrtb].
+
+[oldhrtb]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html
+[newhrtb]: https://rustc-dev-guide.rust-lang.org/borrow_check/region_inference.html#placeholders-and-universes
diff --git a/compiler/rustc_infer/src/infer/higher_ranked/mod.rs b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs
new file mode 100644
index 000000000..d0d9efe15
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/higher_ranked/mod.rs
@@ -0,0 +1,137 @@
+//! Helper routines for higher-ranked things. See the `doc` module at
+//! the end of the file for details.
+
+use super::combine::CombineFields;
+use super::{HigherRankedType, InferCtxt};
+use crate::infer::CombinedSnapshot;
+use rustc_middle::ty::fold::FnMutDelegate;
+use rustc_middle::ty::relate::{Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::{self, Binder, TypeFoldable};
+
+impl<'a, 'tcx> CombineFields<'a, 'tcx> {
+ /// Checks whether `for<..> sub <: for<..> sup` holds.
+ ///
+ /// For this to hold, **all** instantiations of the super type
+ /// have to be a super type of **at least one** instantiation of
+ /// the subtype.
+ ///
+ /// This is implemented by first entering a new universe.
+ /// We then replace all bound variables in `sup` with placeholders,
+ /// and all bound variables in `sub` with inference vars.
+ /// We can then just relate the two resulting types as normal.
+ ///
+ /// Note: this is a subtle algorithm. For a full explanation, please see
+ /// the [rustc dev guide][rd]
+ ///
+ /// [rd]: https://rustc-dev-guide.rust-lang.org/borrow_check/region_inference/placeholders_and_universes.html
+ #[instrument(skip(self), level = "debug")]
+ pub fn higher_ranked_sub<T>(
+ &mut self,
+ sub: Binder<'tcx, T>,
+ sup: Binder<'tcx, T>,
+ sub_is_expected: bool,
+ ) -> RelateResult<'tcx, ()>
+ where
+ T: Relate<'tcx>,
+ {
+ let span = self.trace.cause.span;
+ // First, we instantiate each bound region in the supertype with a
+ // fresh placeholder region. Note that this automatically creates
+ // a new universe if needed.
+ let sup_prime = self.infcx.replace_bound_vars_with_placeholders(sup);
+
+ // Next, we instantiate each bound region in the subtype
+ // with a fresh region variable. These region variables --
+ // but no other pre-existing region variables -- can name
+ // the placeholders.
+ let sub_prime = self.infcx.replace_bound_vars_with_fresh_vars(span, HigherRankedType, sub);
+
+ debug!("a_prime={:?}", sub_prime);
+ debug!("b_prime={:?}", sup_prime);
+
+ // Compare types now that bound regions have been replaced.
+ let result = self.sub(sub_is_expected).relate(sub_prime, sup_prime)?;
+
+ debug!("OK result={result:?}");
+ // NOTE: returning the result here would be dangerous as it contains
+ // placeholders which **must not** be named afterwards.
+ Ok(())
+ }
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ /// Replaces all bound variables (lifetimes, types, and constants) bound by
+ /// `binder` with placeholder variables in a new universe. This means that the
+ /// new placeholders can only be named by inference variables created after
+ /// this method has been called.
+ ///
+ /// This is the first step of checking subtyping when higher-ranked things are involved.
+ /// For more details visit the relevant sections of the [rustc dev guide].
+ ///
+ /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html
+ #[instrument(level = "debug", skip(self))]
+ pub fn replace_bound_vars_with_placeholders<T>(&self, binder: ty::Binder<'tcx, T>) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ if let Some(inner) = binder.no_bound_vars() {
+ return inner;
+ }
+
+ let next_universe = self.create_next_universe();
+
+ let delegate = FnMutDelegate {
+ regions: |br: ty::BoundRegion| {
+ self.tcx.mk_region(ty::RePlaceholder(ty::PlaceholderRegion {
+ universe: next_universe,
+ name: br.kind,
+ }))
+ },
+ types: |bound_ty: ty::BoundTy| {
+ self.tcx.mk_ty(ty::Placeholder(ty::PlaceholderType {
+ universe: next_universe,
+ name: bound_ty.var,
+ }))
+ },
+ consts: |bound_var: ty::BoundVar, ty| {
+ self.tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Placeholder(ty::PlaceholderConst {
+ universe: next_universe,
+ name: bound_var,
+ }),
+ ty,
+ })
+ },
+ };
+
+ let result = self.tcx.replace_bound_vars_uncached(binder, delegate);
+ debug!(?next_universe, ?result);
+ result
+ }
+
+ /// See [RegionConstraintCollector::leak_check][1].
+ ///
+ /// [1]: crate::infer::region_constraints::RegionConstraintCollector::leak_check
+ pub fn leak_check(
+ &self,
+ overly_polymorphic: bool,
+ snapshot: &CombinedSnapshot<'_, 'tcx>,
+ ) -> RelateResult<'tcx, ()> {
+ // If the user gave `-Zno-leak-check`, or we have been
+ // configured to skip the leak check, then skip the leak check
+ // completely. The leak check is deprecated. Any legitimate
+ // subtyping errors that it would have caught will now be
+ // caught later on, during region checking. However, we
+ // continue to use it for a transition period.
+ if self.tcx.sess.opts.unstable_opts.no_leak_check || self.skip_leak_check.get() {
+ return Ok(());
+ }
+
+ self.inner.borrow_mut().unwrap_region_constraints().leak_check(
+ self.tcx,
+ overly_polymorphic,
+ self.universe(),
+ snapshot,
+ )
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/lattice.rs b/compiler/rustc_infer/src/infer/lattice.rs
new file mode 100644
index 000000000..1e3293efa
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/lattice.rs
@@ -0,0 +1,124 @@
+//! # Lattice variables
+//!
+//! Generic code for operating on [lattices] of inference variables
+//! that are characterized by an upper- and lower-bound.
+//!
+//! The code is defined quite generically so that it can be
+//! applied both to type variables, which represent types being inferred,
+//! and fn variables, which represent function types being inferred.
+//! (It may eventually be applied to their types as well.)
+//! In some cases, the functions are also generic with respect to the
+//! operation on the lattice (GLB vs LUB).
+//!
+//! ## Note
+//!
+//! Although all the functions are generic, for simplicity, comments in the source code
+//! generally refer to type variables and the LUB operation.
+//!
+//! [lattices]: https://en.wikipedia.org/wiki/Lattice_(order)
+
+use super::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use super::InferCtxt;
+
+use crate::traits::{ObligationCause, PredicateObligation};
+use rustc_middle::ty::relate::{RelateResult, TypeRelation};
+use rustc_middle::ty::TyVar;
+use rustc_middle::ty::{self, Ty};
+
+/// Trait for returning data about a lattice, and for abstracting
+/// over the "direction" of the lattice operation (LUB/GLB).
+///
+/// GLB moves "down" the lattice (to smaller values); LUB moves
+/// "up" the lattice (to bigger values).
+pub trait LatticeDir<'f, 'tcx>: TypeRelation<'tcx> {
+ fn infcx(&self) -> &'f InferCtxt<'f, 'tcx>;
+
+ fn cause(&self) -> &ObligationCause<'tcx>;
+
+ fn add_obligations(&mut self, obligations: Vec<PredicateObligation<'tcx>>);
+
+ fn define_opaque_types(&self) -> bool;
+
+ // Relates the type `v` to `a` and `b` such that `v` represents
+ // the LUB/GLB of `a` and `b` as appropriate.
+ //
+ // Subtle hack: ordering *may* be significant here. This method
+ // relates `v` to `a` first, which may help us to avoid unnecessary
+ // type variable obligations. See caller for details.
+ fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()>;
+}
+
+/// Relates two types using a given lattice.
+#[instrument(skip(this), level = "debug")]
+pub fn super_lattice_tys<'a, 'tcx: 'a, L>(
+ this: &mut L,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+) -> RelateResult<'tcx, Ty<'tcx>>
+where
+ L: LatticeDir<'a, 'tcx>,
+{
+ debug!("{}", this.tag());
+
+ if a == b {
+ return Ok(a);
+ }
+
+ let infcx = this.infcx();
+
+ let a = infcx.inner.borrow_mut().type_variables().replace_if_possible(a);
+ let b = infcx.inner.borrow_mut().type_variables().replace_if_possible(b);
+
+ match (a.kind(), b.kind()) {
+ // If one side is known to be a variable and one is not,
+ // create a variable (`v`) to represent the LUB. Make sure to
+ // relate `v` to the non-type-variable first (by passing it
+ // first to `relate_bound`). Otherwise, we would produce a
+ // subtype obligation that must then be processed.
+ //
+ // Example: if the LHS is a type variable, and RHS is
+ // `Box<i32>`, then we current compare `v` to the RHS first,
+ // which will instantiate `v` with `Box<i32>`. Then when `v`
+ // is compared to the LHS, we instantiate LHS with `Box<i32>`.
+ // But if we did in reverse order, we would create a `v <:
+ // LHS` (or vice versa) constraint and then instantiate
+ // `v`. This would require further processing to achieve same
+ // end-result; in particular, this screws up some of the logic
+ // in coercion, which expects LUB to figure out that the LHS
+ // is (e.g.) `Box<i32>`. A more obvious solution might be to
+ // iterate on the subtype obligations that are returned, but I
+ // think this suffices. -nmatsakis
+ (&ty::Infer(TyVar(..)), _) => {
+ let v = infcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::LatticeVariable,
+ span: this.cause().span,
+ });
+ this.relate_bound(v, b, a)?;
+ Ok(v)
+ }
+ (_, &ty::Infer(TyVar(..))) => {
+ let v = infcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::LatticeVariable,
+ span: this.cause().span,
+ });
+ this.relate_bound(v, a, b)?;
+ Ok(v)
+ }
+
+ (&ty::Opaque(a_def_id, _), &ty::Opaque(b_def_id, _)) if a_def_id == b_def_id => {
+ infcx.super_combine_tys(this, a, b)
+ }
+ (&ty::Opaque(did, ..), _) | (_, &ty::Opaque(did, ..))
+ if this.define_opaque_types() && did.is_local() =>
+ {
+ this.add_obligations(
+ infcx
+ .handle_opaque_type(a, b, this.a_is_expected(), this.cause(), this.param_env())?
+ .obligations,
+ );
+ Ok(a)
+ }
+
+ _ => infcx.super_combine_tys(this, a, b),
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/lexical_region_resolve/README.md b/compiler/rustc_infer/src/infer/lexical_region_resolve/README.md
new file mode 100644
index 000000000..0a7da8c80
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/lexical_region_resolve/README.md
@@ -0,0 +1,6 @@
+Lexical Region Resolution was removed in https://github.com/rust-lang/rust/pull/64790.
+
+Rust now uses Non-lexical lifetimes. For more info, please see the [borrowck
+chapter][bc] in the rustc-dev-guide.
+
+[bc]: https://rustc-dev-guide.rust-lang.org/borrow_check/region_inference.html
diff --git a/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
new file mode 100644
index 000000000..3783cfb4c
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/lexical_region_resolve/mod.rs
@@ -0,0 +1,891 @@
+//! Lexical region resolution.
+
+use crate::infer::region_constraints::Constraint;
+use crate::infer::region_constraints::GenericKind;
+use crate::infer::region_constraints::RegionConstraintData;
+use crate::infer::region_constraints::VarInfos;
+use crate::infer::region_constraints::VerifyBound;
+use crate::infer::RegionRelations;
+use crate::infer::RegionVariableOrigin;
+use crate::infer::SubregionOrigin;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::graph::implementation::{
+ Direction, Graph, NodeIndex, INCOMING, OUTGOING,
+};
+use rustc_data_structures::intern::Interned;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{ReEarlyBound, ReEmpty, ReErased, ReFree, ReStatic};
+use rustc_middle::ty::{ReLateBound, RePlaceholder, ReVar};
+use rustc_middle::ty::{Region, RegionVid};
+use rustc_span::Span;
+use std::fmt;
+
+use super::outlives::test_type_match;
+
+/// This function performs lexical region resolution given a complete
+/// set of constraints and variable origins. It performs a fixed-point
+/// iteration to find region values which satisfy all constraints,
+/// assuming such values can be found. It returns the final values of
+/// all the variables as well as a set of errors that must be reported.
+#[instrument(level = "debug", skip(region_rels, var_infos, data))]
+pub(crate) fn resolve<'tcx>(
+ param_env: ty::ParamEnv<'tcx>,
+ region_rels: &RegionRelations<'_, 'tcx>,
+ var_infos: VarInfos,
+ data: RegionConstraintData<'tcx>,
+) -> (LexicalRegionResolutions<'tcx>, Vec<RegionResolutionError<'tcx>>) {
+ let mut errors = vec![];
+ let mut resolver = LexicalResolver { param_env, region_rels, var_infos, data };
+ let values = resolver.infer_variable_values(&mut errors);
+ (values, errors)
+}
+
+/// Contains the result of lexical region resolution. Offers methods
+/// to lookup up the final value of a region variable.
+#[derive(Clone)]
+pub struct LexicalRegionResolutions<'tcx> {
+ pub(crate) values: IndexVec<RegionVid, VarValue<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum VarValue<'tcx> {
+ Value(Region<'tcx>),
+ ErrorValue,
+}
+
+#[derive(Clone, Debug)]
+pub enum RegionResolutionError<'tcx> {
+ /// `ConcreteFailure(o, a, b)`:
+ ///
+ /// `o` requires that `a <= b`, but this does not hold
+ ConcreteFailure(SubregionOrigin<'tcx>, Region<'tcx>, Region<'tcx>),
+
+ /// `GenericBoundFailure(p, s, a)
+ ///
+ /// The parameter/associated-type `p` must be known to outlive the lifetime
+ /// `a` (but none of the known bounds are sufficient).
+ GenericBoundFailure(SubregionOrigin<'tcx>, GenericKind<'tcx>, Region<'tcx>),
+
+ /// `SubSupConflict(v, v_origin, sub_origin, sub_r, sup_origin, sup_r)`:
+ ///
+ /// Could not infer a value for `v` (which has origin `v_origin`)
+ /// because `sub_r <= v` (due to `sub_origin`) but `v <= sup_r` (due to `sup_origin`) and
+ /// `sub_r <= sup_r` does not hold.
+ SubSupConflict(
+ RegionVid,
+ RegionVariableOrigin,
+ SubregionOrigin<'tcx>,
+ Region<'tcx>,
+ SubregionOrigin<'tcx>,
+ Region<'tcx>,
+ Vec<Span>, // All the influences on a given value that didn't meet its constraints.
+ ),
+
+ /// Indicates a `'b: 'a` constraint where `'a` is in a universe that
+ /// cannot name the placeholder `'b`.
+ UpperBoundUniverseConflict(
+ RegionVid,
+ RegionVariableOrigin,
+ ty::UniverseIndex, // the universe index of the region variable
+ SubregionOrigin<'tcx>, // cause of the constraint
+ Region<'tcx>, // the placeholder `'b`
+ ),
+}
+
+struct RegionAndOrigin<'tcx> {
+ region: Region<'tcx>,
+ origin: SubregionOrigin<'tcx>,
+}
+
+type RegionGraph<'tcx> = Graph<(), Constraint<'tcx>>;
+
+struct LexicalResolver<'cx, 'tcx> {
+ param_env: ty::ParamEnv<'tcx>,
+ region_rels: &'cx RegionRelations<'cx, 'tcx>,
+ var_infos: VarInfos,
+ data: RegionConstraintData<'tcx>,
+}
+
+impl<'cx, 'tcx> LexicalResolver<'cx, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.region_rels.tcx
+ }
+
+ fn infer_variable_values(
+ &mut self,
+ errors: &mut Vec<RegionResolutionError<'tcx>>,
+ ) -> LexicalRegionResolutions<'tcx> {
+ let mut var_data = self.construct_var_data(self.tcx());
+
+ if cfg!(debug_assertions) {
+ self.dump_constraints();
+ }
+
+ let graph = self.construct_graph();
+ self.expand_givens(&graph);
+ self.expansion(&mut var_data);
+ self.collect_errors(&mut var_data, errors);
+ self.collect_var_errors(&var_data, &graph, errors);
+ var_data
+ }
+
+ fn num_vars(&self) -> usize {
+ self.var_infos.len()
+ }
+
+ /// Initially, the value for all variables is set to `'empty`, the
+ /// empty region. The `expansion` phase will grow this larger.
+ fn construct_var_data(&self, tcx: TyCtxt<'tcx>) -> LexicalRegionResolutions<'tcx> {
+ LexicalRegionResolutions {
+ values: IndexVec::from_fn_n(
+ |vid| {
+ let vid_universe = self.var_infos[vid].universe;
+ let re_empty = tcx.mk_region(ty::ReEmpty(vid_universe));
+ VarValue::Value(re_empty)
+ },
+ self.num_vars(),
+ ),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn dump_constraints(&self) {
+ for (idx, (constraint, _)) in self.data.constraints.iter().enumerate() {
+ debug!("Constraint {} => {:?}", idx, constraint);
+ }
+ }
+
+ fn expand_givens(&mut self, graph: &RegionGraph<'_>) {
+ // Givens are a kind of horrible hack to account for
+ // constraints like 'c <= '0 that are known to hold due to
+ // closure signatures (see the comment above on the `givens`
+ // field). They should go away. But until they do, the role
+ // of this fn is to account for the transitive nature:
+ //
+ // Given 'c <= '0
+ // and '0 <= '1
+ // then 'c <= '1
+
+ let seeds: Vec<_> = self.data.givens.iter().cloned().collect();
+ for (r, vid) in seeds {
+ // While all things transitively reachable in the graph
+ // from the variable (`'0` in the example above).
+ let seed_index = NodeIndex(vid.index() as usize);
+ for succ_index in graph.depth_traverse(seed_index, OUTGOING) {
+ let succ_index = succ_index.0;
+
+ // The first N nodes correspond to the region
+ // variables. Other nodes correspond to constant
+ // regions.
+ if succ_index < self.num_vars() {
+ let succ_vid = RegionVid::new(succ_index);
+
+ // Add `'c <= '1`.
+ self.data.givens.insert((r, succ_vid));
+ }
+ }
+ }
+ }
+
+ fn expansion(&self, var_values: &mut LexicalRegionResolutions<'tcx>) {
+ let mut constraints = IndexVec::from_elem_n(Vec::new(), var_values.values.len());
+ let mut changes = Vec::new();
+ for constraint in self.data.constraints.keys() {
+ let (a_vid, a_region, b_vid, b_data) = match *constraint {
+ Constraint::RegSubVar(a_region, b_vid) => {
+ let b_data = var_values.value_mut(b_vid);
+ (None, a_region, b_vid, b_data)
+ }
+ Constraint::VarSubVar(a_vid, b_vid) => match *var_values.value(a_vid) {
+ VarValue::ErrorValue => continue,
+ VarValue::Value(a_region) => {
+ let b_data = var_values.value_mut(b_vid);
+ (Some(a_vid), a_region, b_vid, b_data)
+ }
+ },
+ Constraint::RegSubReg(..) | Constraint::VarSubReg(..) => {
+ // These constraints are checked after expansion
+ // is done, in `collect_errors`.
+ continue;
+ }
+ };
+ if self.expand_node(a_region, b_vid, b_data) {
+ changes.push(b_vid);
+ }
+ if let Some(a_vid) = a_vid {
+ match b_data {
+ VarValue::Value(Region(Interned(ReStatic, _))) | VarValue::ErrorValue => (),
+ _ => {
+ constraints[a_vid].push((a_vid, b_vid));
+ constraints[b_vid].push((a_vid, b_vid));
+ }
+ }
+ }
+ }
+
+ while let Some(vid) = changes.pop() {
+ constraints[vid].retain(|&(a_vid, b_vid)| {
+ let VarValue::Value(a_region) = *var_values.value(a_vid) else {
+ return false;
+ };
+ let b_data = var_values.value_mut(b_vid);
+ if self.expand_node(a_region, b_vid, b_data) {
+ changes.push(b_vid);
+ }
+ !matches!(
+ b_data,
+ VarValue::Value(Region(Interned(ReStatic, _))) | VarValue::ErrorValue
+ )
+ });
+ }
+ }
+
+ fn expand_node(
+ &self,
+ a_region: Region<'tcx>,
+ b_vid: RegionVid,
+ b_data: &mut VarValue<'tcx>,
+ ) -> bool {
+ debug!("expand_node({:?}, {:?} == {:?})", a_region, b_vid, b_data);
+
+ match *a_region {
+ // Check if this relationship is implied by a given.
+ ty::ReEarlyBound(_) | ty::ReFree(_) => {
+ if self.data.givens.contains(&(a_region, b_vid)) {
+ debug!("given");
+ return false;
+ }
+ }
+
+ _ => {}
+ }
+
+ match *b_data {
+ VarValue::Value(cur_region) => {
+ // This is a specialized version of the `lub_concrete_regions`
+ // check below for a common case, here purely as an
+ // optimization.
+ let b_universe = self.var_infos[b_vid].universe;
+ if let ReEmpty(a_universe) = *a_region && a_universe == b_universe {
+ return false;
+ }
+
+ let mut lub = self.lub_concrete_regions(a_region, cur_region);
+ if lub == cur_region {
+ return false;
+ }
+
+ // Watch out for `'b: !1` relationships, where the
+ // universe of `'b` can't name the placeholder `!1`. In
+ // that case, we have to grow `'b` to be `'static` for the
+ // relationship to hold. This is obviously a kind of sub-optimal
+ // choice -- in the future, when we incorporate a knowledge
+ // of the parameter environment, we might be able to find a
+ // tighter bound than `'static`.
+ //
+ // (This might e.g. arise from being asked to prove `for<'a> { 'b: 'a }`.)
+ if let ty::RePlaceholder(p) = *lub && b_universe.cannot_name(p.universe) {
+ lub = self.tcx().lifetimes.re_static;
+ }
+
+ debug!("Expanding value of {:?} from {:?} to {:?}", b_vid, cur_region, lub);
+
+ *b_data = VarValue::Value(lub);
+ true
+ }
+
+ VarValue::ErrorValue => false,
+ }
+ }
+
+ /// True if `a <= b`, but not defined over inference variables.
+ #[instrument(level = "trace", skip(self))]
+ fn sub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> bool {
+ let tcx = self.tcx();
+ let sub_free_regions = |r1, r2| self.region_rels.free_regions.sub_free_regions(tcx, r1, r2);
+
+ // Check for the case where we know that `'b: 'static` -- in that case,
+ // `a <= b` for all `a`.
+ let b_free_or_static = b.is_free_or_static();
+ if b_free_or_static && sub_free_regions(tcx.lifetimes.re_static, b) {
+ return true;
+ }
+
+ // If both `a` and `b` are free, consult the declared
+ // relationships. Note that this can be more precise than the
+ // `lub` relationship defined below, since sometimes the "lub"
+ // is actually the `postdom_upper_bound` (see
+ // `TransitiveRelation` for more details).
+ let a_free_or_static = a.is_free_or_static();
+ if a_free_or_static && b_free_or_static {
+ return sub_free_regions(a, b);
+ }
+
+ // For other cases, leverage the LUB code to find the LUB and
+ // check if it is equal to `b`.
+ self.lub_concrete_regions(a, b) == b
+ }
+
+ /// Returns the least-upper-bound of `a` and `b`; i.e., the
+ /// smallest region `c` such that `a <= c` and `b <= c`.
+ ///
+ /// Neither `a` nor `b` may be an inference variable (hence the
+ /// term "concrete regions").
+ #[instrument(level = "trace", skip(self))]
+ fn lub_concrete_regions(&self, a: Region<'tcx>, b: Region<'tcx>) -> Region<'tcx> {
+ let r = match (*a, *b) {
+ (ReLateBound(..), _) | (_, ReLateBound(..)) | (ReErased, _) | (_, ReErased) => {
+ bug!("cannot relate region: LUB({:?}, {:?})", a, b);
+ }
+
+ (ReVar(v_id), _) | (_, ReVar(v_id)) => {
+ span_bug!(
+ self.var_infos[v_id].origin.span(),
+ "lub_concrete_regions invoked with non-concrete \
+ regions: {:?}, {:?}",
+ a,
+ b
+ );
+ }
+
+ (ReStatic, _) | (_, ReStatic) => {
+ // nothing lives longer than `'static`
+ self.tcx().lifetimes.re_static
+ }
+
+ (ReEmpty(_), ReEarlyBound(_) | ReFree(_)) => {
+ // All empty regions are less than early-bound, free,
+ // and scope regions.
+ b
+ }
+
+ (ReEarlyBound(_) | ReFree(_), ReEmpty(_)) => {
+ // All empty regions are less than early-bound, free,
+ // and scope regions.
+ a
+ }
+
+ (ReEmpty(a_ui), ReEmpty(b_ui)) => {
+ // Empty regions are ordered according to the universe
+ // they are associated with.
+ let ui = a_ui.min(b_ui);
+ self.tcx().mk_region(ReEmpty(ui))
+ }
+
+ (ReEmpty(empty_ui), RePlaceholder(placeholder))
+ | (RePlaceholder(placeholder), ReEmpty(empty_ui)) => {
+ // If this empty region is from a universe that can
+ // name the placeholder, then the placeholder is
+ // larger; otherwise, the only ancestor is `'static`.
+ if empty_ui.can_name(placeholder.universe) {
+ self.tcx().mk_region(RePlaceholder(placeholder))
+ } else {
+ self.tcx().lifetimes.re_static
+ }
+ }
+
+ (ReEarlyBound(_) | ReFree(_), ReEarlyBound(_) | ReFree(_)) => {
+ self.region_rels.lub_free_regions(a, b)
+ }
+
+ // For these types, we cannot define any additional
+ // relationship:
+ (RePlaceholder(..), _) | (_, RePlaceholder(..)) => {
+ if a == b {
+ a
+ } else {
+ self.tcx().lifetimes.re_static
+ }
+ }
+ };
+
+ debug!("lub_concrete_regions({:?}, {:?}) = {:?}", a, b, r);
+
+ r
+ }
+
+ /// After expansion is complete, go and check upper bounds (i.e.,
+ /// cases where the region cannot grow larger than a fixed point)
+ /// and check that they are satisfied.
+ #[instrument(skip(self, var_data, errors))]
+ fn collect_errors(
+ &self,
+ var_data: &mut LexicalRegionResolutions<'tcx>,
+ errors: &mut Vec<RegionResolutionError<'tcx>>,
+ ) {
+ for (constraint, origin) in &self.data.constraints {
+ debug!(?constraint, ?origin);
+ match *constraint {
+ Constraint::RegSubVar(..) | Constraint::VarSubVar(..) => {
+ // Expansion will ensure that these constraints hold. Ignore.
+ }
+
+ Constraint::RegSubReg(sub, sup) => {
+ if self.sub_concrete_regions(sub, sup) {
+ continue;
+ }
+
+ debug!(
+ "region error at {:?}: \
+ cannot verify that {:?} <= {:?}",
+ origin, sub, sup
+ );
+
+ errors.push(RegionResolutionError::ConcreteFailure(
+ (*origin).clone(),
+ sub,
+ sup,
+ ));
+ }
+
+ Constraint::VarSubReg(a_vid, b_region) => {
+ let a_data = var_data.value_mut(a_vid);
+ debug!("contraction: {:?} == {:?}, {:?}", a_vid, a_data, b_region);
+
+ let VarValue::Value(a_region) = *a_data else {
+ continue;
+ };
+
+ // Do not report these errors immediately:
+ // instead, set the variable value to error and
+ // collect them later.
+ if !self.sub_concrete_regions(a_region, b_region) {
+ debug!(
+ "region error at {:?}: \
+ cannot verify that {:?}={:?} <= {:?}",
+ origin, a_vid, a_region, b_region
+ );
+ *a_data = VarValue::ErrorValue;
+ }
+ }
+ }
+ }
+
+ for verify in &self.data.verifys {
+ debug!("collect_errors: verify={:?}", verify);
+ let sub = var_data.normalize(self.tcx(), verify.region);
+
+ let verify_kind_ty = verify.kind.to_ty(self.tcx());
+ let verify_kind_ty = var_data.normalize(self.tcx(), verify_kind_ty);
+ if self.bound_is_met(&verify.bound, var_data, verify_kind_ty, sub) {
+ continue;
+ }
+
+ debug!(
+ "collect_errors: region error at {:?}: \
+ cannot verify that {:?} <= {:?}",
+ verify.origin, verify.region, verify.bound
+ );
+
+ errors.push(RegionResolutionError::GenericBoundFailure(
+ verify.origin.clone(),
+ verify.kind,
+ sub,
+ ));
+ }
+ }
+
+ /// Go over the variables that were declared to be error variables
+ /// and create a `RegionResolutionError` for each of them.
+ fn collect_var_errors(
+ &self,
+ var_data: &LexicalRegionResolutions<'tcx>,
+ graph: &RegionGraph<'tcx>,
+ errors: &mut Vec<RegionResolutionError<'tcx>>,
+ ) {
+ debug!("collect_var_errors, var_data = {:#?}", var_data.values);
+
+ // This is the best way that I have found to suppress
+ // duplicate and related errors. Basically we keep a set of
+ // flags for every node. Whenever an error occurs, we will
+ // walk some portion of the graph looking to find pairs of
+ // conflicting regions to report to the user. As we walk, we
+ // trip the flags from false to true, and if we find that
+ // we've already reported an error involving any particular
+ // node we just stop and don't report the current error. The
+ // idea is to report errors that derive from independent
+ // regions of the graph, but not those that derive from
+ // overlapping locations.
+ let mut dup_vec = IndexVec::from_elem_n(None, self.num_vars());
+
+ for (node_vid, value) in var_data.values.iter_enumerated() {
+ match *value {
+ VarValue::Value(_) => { /* Inference successful */ }
+ VarValue::ErrorValue => {
+ // Inference impossible: this value contains
+ // inconsistent constraints.
+ //
+ // I think that in this case we should report an
+ // error now -- unlike the case above, we can't
+ // wait to see whether the user needs the result
+ // of this variable. The reason is that the mere
+ // existence of this variable implies that the
+ // region graph is inconsistent, whether or not it
+ // is used.
+ //
+ // For example, we may have created a region
+ // variable that is the GLB of two other regions
+ // which do not have a GLB. Even if that variable
+ // is not used, it implies that those two regions
+ // *should* have a GLB.
+ //
+ // At least I think this is true. It may be that
+ // the mere existence of a conflict in a region
+ // variable that is not used is not a problem, so
+ // if this rule starts to create problems we'll
+ // have to revisit this portion of the code and
+ // think hard about it. =) -- nikomatsakis
+
+ // Obtain the spans for all the places that can
+ // influence the constraints on this value for
+ // richer diagnostics in `static_impl_trait`.
+ let influences: Vec<Span> = self
+ .data
+ .constraints
+ .iter()
+ .filter_map(|(constraint, origin)| match (constraint, origin) {
+ (
+ Constraint::VarSubVar(_, sup),
+ SubregionOrigin::DataBorrowed(_, sp),
+ ) if sup == &node_vid => Some(*sp),
+ _ => None,
+ })
+ .collect();
+
+ self.collect_error_for_expanding_node(
+ graph,
+ &mut dup_vec,
+ node_vid,
+ errors,
+ influences,
+ );
+ }
+ }
+ }
+ }
+
+ fn construct_graph(&self) -> RegionGraph<'tcx> {
+ let num_vars = self.num_vars();
+
+ let mut graph = Graph::new();
+
+ for _ in 0..num_vars {
+ graph.add_node(());
+ }
+
+ // Issue #30438: two distinct dummy nodes, one for incoming
+ // edges (dummy_source) and another for outgoing edges
+ // (dummy_sink). In `dummy -> a -> b -> dummy`, using one
+ // dummy node leads one to think (erroneously) there exists a
+ // path from `b` to `a`. Two dummy nodes sidesteps the issue.
+ let dummy_source = graph.add_node(());
+ let dummy_sink = graph.add_node(());
+
+ for constraint in self.data.constraints.keys() {
+ match *constraint {
+ Constraint::VarSubVar(a_id, b_id) => {
+ graph.add_edge(
+ NodeIndex(a_id.index() as usize),
+ NodeIndex(b_id.index() as usize),
+ *constraint,
+ );
+ }
+ Constraint::RegSubVar(_, b_id) => {
+ graph.add_edge(dummy_source, NodeIndex(b_id.index() as usize), *constraint);
+ }
+ Constraint::VarSubReg(a_id, _) => {
+ graph.add_edge(NodeIndex(a_id.index() as usize), dummy_sink, *constraint);
+ }
+ Constraint::RegSubReg(..) => {
+ // this would be an edge from `dummy_source` to
+ // `dummy_sink`; just ignore it.
+ }
+ }
+ }
+
+ graph
+ }
+
+ fn collect_error_for_expanding_node(
+ &self,
+ graph: &RegionGraph<'tcx>,
+ dup_vec: &mut IndexVec<RegionVid, Option<RegionVid>>,
+ node_idx: RegionVid,
+ errors: &mut Vec<RegionResolutionError<'tcx>>,
+ influences: Vec<Span>,
+ ) {
+ // Errors in expanding nodes result from a lower-bound that is
+ // not contained by an upper-bound.
+ let (mut lower_bounds, lower_vid_bounds, lower_dup) =
+ self.collect_bounding_regions(graph, node_idx, INCOMING, Some(dup_vec));
+ let (mut upper_bounds, _, upper_dup) =
+ self.collect_bounding_regions(graph, node_idx, OUTGOING, Some(dup_vec));
+
+ if lower_dup || upper_dup {
+ return;
+ }
+
+ // We place free regions first because we are special casing
+ // SubSupConflict(ReFree, ReFree) when reporting error, and so
+ // the user will more likely get a specific suggestion.
+ fn region_order_key(x: &RegionAndOrigin<'_>) -> u8 {
+ match *x.region {
+ ReEarlyBound(_) => 0,
+ ReFree(_) => 1,
+ _ => 2,
+ }
+ }
+ lower_bounds.sort_by_key(region_order_key);
+ upper_bounds.sort_by_key(region_order_key);
+
+ let node_universe = self.var_infos[node_idx].universe;
+
+ for lower_bound in &lower_bounds {
+ let effective_lower_bound = if let ty::RePlaceholder(p) = *lower_bound.region {
+ if node_universe.cannot_name(p.universe) {
+ self.tcx().lifetimes.re_static
+ } else {
+ lower_bound.region
+ }
+ } else {
+ lower_bound.region
+ };
+
+ for upper_bound in &upper_bounds {
+ if !self.sub_concrete_regions(effective_lower_bound, upper_bound.region) {
+ let origin = self.var_infos[node_idx].origin;
+ debug!(
+ "region inference error at {:?} for {:?}: SubSupConflict sub: {:?} \
+ sup: {:?}",
+ origin, node_idx, lower_bound.region, upper_bound.region
+ );
+
+ errors.push(RegionResolutionError::SubSupConflict(
+ node_idx,
+ origin,
+ lower_bound.origin.clone(),
+ lower_bound.region,
+ upper_bound.origin.clone(),
+ upper_bound.region,
+ influences,
+ ));
+ return;
+ }
+ }
+ }
+
+ // If we have a scenario like `exists<'a> { forall<'b> { 'b:
+ // 'a } }`, we wind up without any lower-bound -- all we have
+ // are placeholders as upper bounds, but the universe of the
+ // variable `'a`, or some variable that `'a` has to outlive, doesn't
+ // permit those placeholders.
+ let min_universe = lower_vid_bounds
+ .into_iter()
+ .map(|vid| self.var_infos[vid].universe)
+ .min()
+ .expect("lower_vid_bounds should at least include `node_idx`");
+
+ for upper_bound in &upper_bounds {
+ if let ty::RePlaceholder(p) = *upper_bound.region {
+ if min_universe.cannot_name(p.universe) {
+ let origin = self.var_infos[node_idx].origin;
+ errors.push(RegionResolutionError::UpperBoundUniverseConflict(
+ node_idx,
+ origin,
+ min_universe,
+ upper_bound.origin.clone(),
+ upper_bound.region,
+ ));
+ return;
+ }
+ }
+ }
+
+ // Errors in earlier passes can yield error variables without
+ // resolution errors here; delay ICE in favor of those errors.
+ self.tcx().sess.delay_span_bug(
+ self.var_infos[node_idx].origin.span(),
+ &format!(
+ "collect_error_for_expanding_node() could not find \
+ error for var {:?} in universe {:?}, lower_bounds={:#?}, \
+ upper_bounds={:#?}",
+ node_idx, node_universe, lower_bounds, upper_bounds
+ ),
+ );
+ }
+
+ /// Collects all regions that "bound" the variable `orig_node_idx` in the
+ /// given direction.
+ ///
+ /// If `dup_vec` is `Some` it's used to track duplicates between successive
+ /// calls of this function.
+ ///
+ /// The return tuple fields are:
+ /// - a list of all concrete regions bounding the given region.
+ /// - the set of all region variables bounding the given region.
+ /// - a `bool` that's true if the returned region variables overlap with
+ /// those returned by a previous call for another region.
+ fn collect_bounding_regions(
+ &self,
+ graph: &RegionGraph<'tcx>,
+ orig_node_idx: RegionVid,
+ dir: Direction,
+ mut dup_vec: Option<&mut IndexVec<RegionVid, Option<RegionVid>>>,
+ ) -> (Vec<RegionAndOrigin<'tcx>>, FxHashSet<RegionVid>, bool) {
+ struct WalkState<'tcx> {
+ set: FxHashSet<RegionVid>,
+ stack: Vec<RegionVid>,
+ result: Vec<RegionAndOrigin<'tcx>>,
+ dup_found: bool,
+ }
+ let mut state = WalkState {
+ set: Default::default(),
+ stack: vec![orig_node_idx],
+ result: Vec::new(),
+ dup_found: false,
+ };
+ state.set.insert(orig_node_idx);
+
+ // to start off the process, walk the source node in the
+ // direction specified
+ process_edges(&self.data, &mut state, graph, orig_node_idx, dir);
+
+ while let Some(node_idx) = state.stack.pop() {
+ // check whether we've visited this node on some previous walk
+ if let Some(dup_vec) = &mut dup_vec {
+ if dup_vec[node_idx].is_none() {
+ dup_vec[node_idx] = Some(orig_node_idx);
+ } else if dup_vec[node_idx] != Some(orig_node_idx) {
+ state.dup_found = true;
+ }
+
+ debug!(
+ "collect_concrete_regions(orig_node_idx={:?}, node_idx={:?})",
+ orig_node_idx, node_idx
+ );
+ }
+
+ process_edges(&self.data, &mut state, graph, node_idx, dir);
+ }
+
+ let WalkState { result, dup_found, set, .. } = state;
+ return (result, set, dup_found);
+
+ fn process_edges<'tcx>(
+ this: &RegionConstraintData<'tcx>,
+ state: &mut WalkState<'tcx>,
+ graph: &RegionGraph<'tcx>,
+ source_vid: RegionVid,
+ dir: Direction,
+ ) {
+ debug!("process_edges(source_vid={:?}, dir={:?})", source_vid, dir);
+
+ let source_node_index = NodeIndex(source_vid.index() as usize);
+ for (_, edge) in graph.adjacent_edges(source_node_index, dir) {
+ match edge.data {
+ Constraint::VarSubVar(from_vid, to_vid) => {
+ let opp_vid = if from_vid == source_vid { to_vid } else { from_vid };
+ if state.set.insert(opp_vid) {
+ state.stack.push(opp_vid);
+ }
+ }
+
+ Constraint::RegSubVar(region, _) | Constraint::VarSubReg(_, region) => {
+ state.result.push(RegionAndOrigin {
+ region,
+ origin: this.constraints.get(&edge.data).unwrap().clone(),
+ });
+ }
+
+ Constraint::RegSubReg(..) => panic!(
+ "cannot reach reg-sub-reg edge in region inference \
+ post-processing"
+ ),
+ }
+ }
+ }
+ }
+
+ fn bound_is_met(
+ &self,
+ bound: &VerifyBound<'tcx>,
+ var_values: &LexicalRegionResolutions<'tcx>,
+ generic_ty: Ty<'tcx>,
+ min: ty::Region<'tcx>,
+ ) -> bool {
+ match bound {
+ VerifyBound::IfEq(verify_if_eq_b) => {
+ let verify_if_eq_b = var_values.normalize(self.region_rels.tcx, *verify_if_eq_b);
+ match test_type_match::extract_verify_if_eq(
+ self.tcx(),
+ self.param_env,
+ &verify_if_eq_b,
+ generic_ty,
+ ) {
+ Some(r) => {
+ self.bound_is_met(&VerifyBound::OutlivedBy(r), var_values, generic_ty, min)
+ }
+
+ None => false,
+ }
+ }
+
+ VerifyBound::OutlivedBy(r) => {
+ self.sub_concrete_regions(min, var_values.normalize(self.tcx(), *r))
+ }
+
+ VerifyBound::IsEmpty => {
+ matches!(*min, ty::ReEmpty(_))
+ }
+
+ VerifyBound::AnyBound(bs) => {
+ bs.iter().any(|b| self.bound_is_met(b, var_values, generic_ty, min))
+ }
+
+ VerifyBound::AllBounds(bs) => {
+ bs.iter().all(|b| self.bound_is_met(b, var_values, generic_ty, min))
+ }
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for RegionAndOrigin<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "RegionAndOrigin({:?},{:?})", self.region, self.origin)
+ }
+}
+
+impl<'tcx> LexicalRegionResolutions<'tcx> {
+ fn normalize<T>(&self, tcx: TyCtxt<'tcx>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ tcx.fold_regions(value, |r, _db| self.resolve_region(tcx, r))
+ }
+
+ fn value(&self, rid: RegionVid) -> &VarValue<'tcx> {
+ &self.values[rid]
+ }
+
+ fn value_mut(&mut self, rid: RegionVid) -> &mut VarValue<'tcx> {
+ &mut self.values[rid]
+ }
+
+ pub(crate) fn resolve_region(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ r: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ let result = match *r {
+ ty::ReVar(rid) => match self.values[rid] {
+ VarValue::Value(r) => r,
+ VarValue::ErrorValue => tcx.lifetimes.re_static,
+ },
+ _ => r,
+ };
+ debug!("resolve_region({:?}) = {:?}", r, result);
+ result
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/lub.rs b/compiler/rustc_infer/src/infer/lub.rs
new file mode 100644
index 000000000..9f96d52c8
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/lub.rs
@@ -0,0 +1,144 @@
+//! Least upper bound. See [`lattice`].
+
+use super::combine::CombineFields;
+use super::lattice::{self, LatticeDir};
+use super::InferCtxt;
+use super::Subtype;
+
+use crate::infer::combine::ConstEquateRelation;
+use crate::traits::{ObligationCause, PredicateObligation};
+use rustc_middle::ty::relate::{Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+/// "Least upper bound" (common supertype)
+pub struct Lub<'combine, 'infcx, 'tcx> {
+ fields: &'combine mut CombineFields<'infcx, 'tcx>,
+ a_is_expected: bool,
+}
+
+impl<'combine, 'infcx, 'tcx> Lub<'combine, 'infcx, 'tcx> {
+ pub fn new(
+ fields: &'combine mut CombineFields<'infcx, 'tcx>,
+ a_is_expected: bool,
+ ) -> Lub<'combine, 'infcx, 'tcx> {
+ Lub { fields, a_is_expected }
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for Lub<'_, '_, 'tcx> {
+ fn tag(&self) -> &'static str {
+ "Lub"
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.fields.tcx()
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.fields.param_env
+ }
+
+ fn a_is_expected(&self) -> bool {
+ self.a_is_expected
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ variance: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ match variance {
+ ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b),
+ ty::Covariant => self.relate(a, b),
+ // FIXME(#41044) -- not correct, need test
+ ty::Bivariant => Ok(a),
+ ty::Contravariant => self.fields.glb(self.a_is_expected).relate(a, b),
+ }
+ }
+
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ lattice::super_lattice_tys(self, a, b)
+ }
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!("{}.regions({:?}, {:?})", self.tag(), a, b);
+
+ let origin = Subtype(Box::new(self.fields.trace.clone()));
+ Ok(self.fields.infcx.inner.borrow_mut().unwrap_region_constraints().lub_regions(
+ self.tcx(),
+ origin,
+ a,
+ b,
+ ))
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ self.fields.infcx.super_combine_consts(self, a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ debug!("binders(a={:?}, b={:?})", a, b);
+ if a.skip_binder().has_escaping_bound_vars() || b.skip_binder().has_escaping_bound_vars() {
+ // When higher-ranked types are involved, computing the LUB is
+ // very challenging, switch to invariance. This is obviously
+ // overly conservative but works ok in practice.
+ self.relate_with_variance(
+ ty::Variance::Invariant,
+ ty::VarianceDiagInfo::default(),
+ a,
+ b,
+ )?;
+ Ok(a)
+ } else {
+ Ok(ty::Binder::dummy(self.relate(a.skip_binder(), b.skip_binder())?))
+ }
+ }
+}
+
+impl<'tcx> ConstEquateRelation<'tcx> for Lub<'_, '_, 'tcx> {
+ fn const_equate_obligation(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>) {
+ self.fields.add_const_equate_obligation(self.a_is_expected, a, b);
+ }
+}
+
+impl<'combine, 'infcx, 'tcx> LatticeDir<'infcx, 'tcx> for Lub<'combine, 'infcx, 'tcx> {
+ fn infcx(&self) -> &'infcx InferCtxt<'infcx, 'tcx> {
+ self.fields.infcx
+ }
+
+ fn cause(&self) -> &ObligationCause<'tcx> {
+ &self.fields.trace.cause
+ }
+
+ fn add_obligations(&mut self, obligations: Vec<PredicateObligation<'tcx>>) {
+ self.fields.obligations.extend(obligations)
+ }
+
+ fn relate_bound(&mut self, v: Ty<'tcx>, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, ()> {
+ let mut sub = self.fields.sub(self.a_is_expected);
+ sub.relate(a, v)?;
+ sub.relate(b, v)?;
+ Ok(())
+ }
+
+ fn define_opaque_types(&self) -> bool {
+ self.fields.define_opaque_types
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs
new file mode 100644
index 000000000..d7d1b5fa2
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/mod.rs
@@ -0,0 +1,2066 @@
+pub use self::freshen::TypeFreshener;
+pub use self::lexical_region_resolve::RegionResolutionError;
+pub use self::LateBoundRegionConversionTime::*;
+pub use self::RegionVariableOrigin::*;
+pub use self::SubregionOrigin::*;
+pub use self::ValuePairs::*;
+
+use self::opaque_types::OpaqueTypeStorage;
+pub(crate) use self::undo_log::{InferCtxtUndoLogs, Snapshot, UndoLog};
+
+use crate::traits::{self, ObligationCause, PredicateObligations, TraitEngine, TraitEngineExt};
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::undo_log::Rollback;
+use rustc_data_structures::unify as ut;
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::infer::canonical::{Canonical, CanonicalVarValues};
+use rustc_middle::infer::unify_key::{ConstVarValue, ConstVariableValue};
+use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind, ToType};
+use rustc_middle::mir::interpret::{ErrorHandled, EvalToValTreeResult};
+use rustc_middle::traits::select;
+use rustc_middle::ty::abstract_const::{AbstractConst, FailureKind};
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::fold::BoundVarReplacerDelegate;
+use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::relate::RelateResult;
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, SubstsRef};
+use rustc_middle::ty::visit::TypeVisitable;
+pub use rustc_middle::ty::IntVarValue;
+use rustc_middle::ty::{self, GenericParamDefKind, InferConst, Ty, TyCtxt};
+use rustc_middle::ty::{ConstVid, FloatVid, IntVid, TyVid};
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+
+use std::cell::{Cell, Ref, RefCell};
+use std::fmt;
+
+use self::combine::CombineFields;
+use self::free_regions::RegionRelations;
+use self::lexical_region_resolve::LexicalRegionResolutions;
+use self::outlives::env::OutlivesEnvironment;
+use self::region_constraints::{GenericKind, RegionConstraintData, VarInfos, VerifyBound};
+use self::region_constraints::{
+ RegionConstraintCollector, RegionConstraintStorage, RegionSnapshot,
+};
+use self::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+
+pub mod at;
+pub mod canonical;
+mod combine;
+mod equate;
+pub mod error_reporting;
+pub mod free_regions;
+mod freshen;
+mod fudge;
+mod glb;
+mod higher_ranked;
+pub mod lattice;
+mod lexical_region_resolve;
+mod lub;
+pub mod nll_relate;
+pub mod opaque_types;
+pub mod outlives;
+mod projection;
+pub mod region_constraints;
+pub mod resolve;
+mod sub;
+pub mod type_variable;
+mod undo_log;
+
+#[must_use]
+#[derive(Debug)]
+pub struct InferOk<'tcx, T> {
+ pub value: T,
+ pub obligations: PredicateObligations<'tcx>,
+}
+pub type InferResult<'tcx, T> = Result<InferOk<'tcx, T>, TypeError<'tcx>>;
+
+pub type Bound<T> = Option<T>;
+pub type UnitResult<'tcx> = RelateResult<'tcx, ()>; // "unify result"
+pub type FixupResult<'tcx, T> = Result<T, FixupError<'tcx>>; // "fixup result"
+
+pub(crate) type UnificationTable<'a, 'tcx, T> = ut::UnificationTable<
+ ut::InPlace<T, &'a mut ut::UnificationStorage<T>, &'a mut InferCtxtUndoLogs<'tcx>>,
+>;
+
+/// This type contains all the things within `InferCtxt` that sit within a
+/// `RefCell` and are involved with taking/rolling back snapshots. Snapshot
+/// operations are hot enough that we want only one call to `borrow_mut` per
+/// call to `start_snapshot` and `rollback_to`.
+#[derive(Clone)]
+pub struct InferCtxtInner<'tcx> {
+ /// Cache for projections. This cache is snapshotted along with the infcx.
+ ///
+ /// Public so that `traits::project` can use it.
+ pub projection_cache: traits::ProjectionCacheStorage<'tcx>,
+
+ /// We instantiate `UnificationTable` with `bounds<Ty>` because the types
+ /// that might instantiate a general type variable have an order,
+ /// represented by its upper and lower bounds.
+ type_variable_storage: type_variable::TypeVariableStorage<'tcx>,
+
+ /// Map from const parameter variable to the kind of const it represents.
+ const_unification_storage: ut::UnificationTableStorage<ty::ConstVid<'tcx>>,
+
+ /// Map from integral variable to the kind of integer it represents.
+ int_unification_storage: ut::UnificationTableStorage<ty::IntVid>,
+
+ /// Map from floating variable to the kind of float it represents.
+ float_unification_storage: ut::UnificationTableStorage<ty::FloatVid>,
+
+ /// Tracks the set of region variables and the constraints between them.
+ /// This is initially `Some(_)` but when
+ /// `resolve_regions_and_report_errors` is invoked, this gets set to `None`
+ /// -- further attempts to perform unification, etc., may fail if new
+ /// region constraints would've been added.
+ region_constraint_storage: Option<RegionConstraintStorage<'tcx>>,
+
+ /// A set of constraints that regionck must validate. Each
+ /// constraint has the form `T:'a`, meaning "some type `T` must
+ /// outlive the lifetime 'a". These constraints derive from
+ /// instantiated type parameters. So if you had a struct defined
+ /// like
+ /// ```ignore (illustrative)
+ /// struct Foo<T:'static> { ... }
+ /// ```
+ /// then in some expression `let x = Foo { ... }` it will
+ /// instantiate the type parameter `T` with a fresh type `$0`. At
+ /// the same time, it will record a region obligation of
+ /// `$0:'static`. This will get checked later by regionck. (We
+ /// can't generally check these things right away because we have
+ /// to wait until types are resolved.)
+ ///
+ /// These are stored in a map keyed to the id of the innermost
+ /// enclosing fn body / static initializer expression. This is
+ /// because the location where the obligation was incurred can be
+ /// relevant with respect to which sublifetime assumptions are in
+ /// place. The reason that we store under the fn-id, and not
+ /// something more fine-grained, is so that it is easier for
+ /// regionck to be sure that it has found *all* the region
+ /// obligations (otherwise, it's easy to fail to walk to a
+ /// particular node-id).
+ ///
+ /// Before running `resolve_regions_and_report_errors`, the creator
+ /// of the inference context is expected to invoke
+ /// [`InferCtxt::process_registered_region_obligations`]
+ /// for each body-id in this map, which will process the
+ /// obligations within. This is expected to be done 'late enough'
+ /// that all type inference variables have been bound and so forth.
+ region_obligations: Vec<RegionObligation<'tcx>>,
+
+ undo_log: InferCtxtUndoLogs<'tcx>,
+
+ /// Caches for opaque type inference.
+ pub opaque_type_storage: OpaqueTypeStorage<'tcx>,
+}
+
+impl<'tcx> InferCtxtInner<'tcx> {
+ fn new() -> InferCtxtInner<'tcx> {
+ InferCtxtInner {
+ projection_cache: Default::default(),
+ type_variable_storage: type_variable::TypeVariableStorage::new(),
+ undo_log: InferCtxtUndoLogs::default(),
+ const_unification_storage: ut::UnificationTableStorage::new(),
+ int_unification_storage: ut::UnificationTableStorage::new(),
+ float_unification_storage: ut::UnificationTableStorage::new(),
+ region_constraint_storage: Some(RegionConstraintStorage::new()),
+ region_obligations: vec![],
+ opaque_type_storage: Default::default(),
+ }
+ }
+
+ #[inline]
+ pub fn region_obligations(&self) -> &[RegionObligation<'tcx>] {
+ &self.region_obligations
+ }
+
+ #[inline]
+ pub fn projection_cache(&mut self) -> traits::ProjectionCache<'_, 'tcx> {
+ self.projection_cache.with_log(&mut self.undo_log)
+ }
+
+ #[inline]
+ fn type_variables(&mut self) -> type_variable::TypeVariableTable<'_, 'tcx> {
+ self.type_variable_storage.with_log(&mut self.undo_log)
+ }
+
+ #[inline]
+ pub fn opaque_types(&mut self) -> opaque_types::OpaqueTypeTable<'_, 'tcx> {
+ self.opaque_type_storage.with_log(&mut self.undo_log)
+ }
+
+ #[inline]
+ fn int_unification_table(
+ &mut self,
+ ) -> ut::UnificationTable<
+ ut::InPlace<
+ ty::IntVid,
+ &mut ut::UnificationStorage<ty::IntVid>,
+ &mut InferCtxtUndoLogs<'tcx>,
+ >,
+ > {
+ self.int_unification_storage.with_log(&mut self.undo_log)
+ }
+
+ #[inline]
+ fn float_unification_table(
+ &mut self,
+ ) -> ut::UnificationTable<
+ ut::InPlace<
+ ty::FloatVid,
+ &mut ut::UnificationStorage<ty::FloatVid>,
+ &mut InferCtxtUndoLogs<'tcx>,
+ >,
+ > {
+ self.float_unification_storage.with_log(&mut self.undo_log)
+ }
+
+ #[inline]
+ fn const_unification_table(
+ &mut self,
+ ) -> ut::UnificationTable<
+ ut::InPlace<
+ ty::ConstVid<'tcx>,
+ &mut ut::UnificationStorage<ty::ConstVid<'tcx>>,
+ &mut InferCtxtUndoLogs<'tcx>,
+ >,
+ > {
+ self.const_unification_storage.with_log(&mut self.undo_log)
+ }
+
+ #[inline]
+ pub fn unwrap_region_constraints(&mut self) -> RegionConstraintCollector<'_, 'tcx> {
+ self.region_constraint_storage
+ .as_mut()
+ .expect("region constraints already solved")
+ .with_log(&mut self.undo_log)
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum DefiningAnchor {
+ /// `DefId` of the item.
+ Bind(LocalDefId),
+ /// When opaque types are not resolved, we `Bubble` up, meaning
+ /// return the opaque/hidden type pair from query, for caller of query to handle it.
+ Bubble,
+ /// Used to catch type mismatch errors when handling opaque types.
+ Error,
+}
+
+pub struct InferCtxt<'a, 'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+
+ /// The `DefId` of the item in whose context we are performing inference or typeck.
+ /// It is used to check whether an opaque type use is a defining use.
+ ///
+ /// If it is `DefiningAnchor::Bubble`, we can't resolve opaque types here and need to bubble up
+ /// the obligation. This frequently happens for
+ /// short lived InferCtxt within queries. The opaque type obligations are forwarded
+ /// to the outside until the end up in an `InferCtxt` for typeck or borrowck.
+ ///
+ /// It is default value is `DefiningAnchor::Error`, this way it is easier to catch errors that
+ /// might come up during inference or typeck.
+ pub defining_use_anchor: DefiningAnchor,
+
+ /// Whether this inference context should care about region obligations in
+ /// the root universe. Most notably, this is used during hir typeck as region
+ /// solving is left to borrowck instead.
+ pub considering_regions: bool,
+
+ /// During type-checking/inference of a body, `in_progress_typeck_results`
+ /// contains a reference to the typeck results being built up, which are
+ /// used for reading closure kinds/signatures as they are inferred,
+ /// and for error reporting logic to read arbitrary node types.
+ pub in_progress_typeck_results: Option<&'a RefCell<ty::TypeckResults<'tcx>>>,
+
+ pub inner: RefCell<InferCtxtInner<'tcx>>,
+
+ /// If set, this flag causes us to skip the 'leak check' during
+ /// higher-ranked subtyping operations. This flag is a temporary one used
+ /// to manage the removal of the leak-check: for the time being, we still run the
+ /// leak-check, but we issue warnings. This flag can only be set to true
+ /// when entering a snapshot.
+ skip_leak_check: Cell<bool>,
+
+ /// Once region inference is done, the values for each variable.
+ lexical_region_resolutions: RefCell<Option<LexicalRegionResolutions<'tcx>>>,
+
+ /// Caches the results of trait selection. This cache is used
+ /// for things that have to do with the parameters in scope.
+ pub selection_cache: select::SelectionCache<'tcx>,
+
+ /// Caches the results of trait evaluation.
+ pub evaluation_cache: select::EvaluationCache<'tcx>,
+
+ /// the set of predicates on which errors have been reported, to
+ /// avoid reporting the same error twice.
+ pub reported_trait_errors: RefCell<FxHashMap<Span, Vec<ty::Predicate<'tcx>>>>,
+
+ pub reported_closure_mismatch: RefCell<FxHashSet<(Span, Option<Span>)>>,
+
+ /// When an error occurs, we want to avoid reporting "derived"
+ /// errors that are due to this original failure. Normally, we
+ /// handle this with the `err_count_on_creation` count, which
+ /// basically just tracks how many errors were reported when we
+ /// started type-checking a fn and checks to see if any new errors
+ /// have been reported since then. Not great, but it works.
+ ///
+ /// However, when errors originated in other passes -- notably
+ /// resolve -- this heuristic breaks down. Therefore, we have this
+ /// auxiliary flag that one can set whenever one creates a
+ /// type-error that is due to an error in a prior pass.
+ ///
+ /// Don't read this flag directly, call `is_tainted_by_errors()`
+ /// and `set_tainted_by_errors()`.
+ tainted_by_errors_flag: Cell<bool>,
+
+ /// Track how many errors were reported when this infcx is created.
+ /// If the number of errors increases, that's also a sign (line
+ /// `tainted_by_errors`) to avoid reporting certain kinds of errors.
+ // FIXME(matthewjasper) Merge into `tainted_by_errors_flag`
+ err_count_on_creation: usize,
+
+ /// This flag is true while there is an active snapshot.
+ in_snapshot: Cell<bool>,
+
+ /// What is the innermost universe we have created? Starts out as
+ /// `UniverseIndex::root()` but grows from there as we enter
+ /// universal quantifiers.
+ ///
+ /// N.B., at present, we exclude the universal quantifiers on the
+ /// item we are type-checking, and just consider those names as
+ /// part of the root universe. So this would only get incremented
+ /// when we enter into a higher-ranked (`for<..>`) type or trait
+ /// bound.
+ universe: Cell<ty::UniverseIndex>,
+}
+
+/// See the `error_reporting` module for more details.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable)]
+pub enum ValuePairs<'tcx> {
+ Regions(ExpectedFound<ty::Region<'tcx>>),
+ Terms(ExpectedFound<ty::Term<'tcx>>),
+ TraitRefs(ExpectedFound<ty::TraitRef<'tcx>>),
+ PolyTraitRefs(ExpectedFound<ty::PolyTraitRef<'tcx>>),
+}
+
+impl<'tcx> ValuePairs<'tcx> {
+ pub fn ty(&self) -> Option<(Ty<'tcx>, Ty<'tcx>)> {
+ if let ValuePairs::Terms(ExpectedFound {
+ expected: ty::Term::Ty(expected),
+ found: ty::Term::Ty(found),
+ }) = self
+ {
+ Some((*expected, *found))
+ } else {
+ None
+ }
+ }
+}
+
+/// The trace designates the path through inference that we took to
+/// encounter an error or subtyping constraint.
+///
+/// See the `error_reporting` module for more details.
+#[derive(Clone, Debug)]
+pub struct TypeTrace<'tcx> {
+ pub cause: ObligationCause<'tcx>,
+ pub values: ValuePairs<'tcx>,
+}
+
+/// The origin of a `r1 <= r2` constraint.
+///
+/// See `error_reporting` module for more details
+#[derive(Clone, Debug)]
+pub enum SubregionOrigin<'tcx> {
+ /// Arose from a subtyping relation
+ Subtype(Box<TypeTrace<'tcx>>),
+
+ /// When casting `&'a T` to an `&'b Trait` object,
+ /// relating `'a` to `'b`
+ RelateObjectBound(Span),
+
+ /// Some type parameter was instantiated with the given type,
+ /// and that type must outlive some region.
+ RelateParamBound(Span, Ty<'tcx>, Option<Span>),
+
+ /// The given region parameter was instantiated with a region
+ /// that must outlive some other region.
+ RelateRegionParamBound(Span),
+
+ /// Creating a pointer `b` to contents of another reference
+ Reborrow(Span),
+
+ /// Creating a pointer `b` to contents of an upvar
+ ReborrowUpvar(Span, ty::UpvarId),
+
+ /// Data with type `Ty<'tcx>` was borrowed
+ DataBorrowed(Ty<'tcx>, Span),
+
+ /// (&'a &'b T) where a >= b
+ ReferenceOutlivesReferent(Ty<'tcx>, Span),
+
+ /// Comparing the signature and requirements of an impl method against
+ /// the containing trait.
+ CompareImplItemObligation { span: Span, impl_item_def_id: LocalDefId, trait_item_def_id: DefId },
+
+ /// Checking that the bounds of a trait's associated type hold for a given impl
+ CheckAssociatedTypeBounds {
+ parent: Box<SubregionOrigin<'tcx>>,
+ impl_item_def_id: LocalDefId,
+ trait_item_def_id: DefId,
+ },
+}
+
+// `SubregionOrigin` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(SubregionOrigin<'_>, 32);
+
+/// Times when we replace late-bound regions with variables:
+#[derive(Clone, Copy, Debug)]
+pub enum LateBoundRegionConversionTime {
+ /// when a fn is called
+ FnCall,
+
+ /// when two higher-ranked types are compared
+ HigherRankedType,
+
+ /// when projecting an associated type
+ AssocTypeProjection(DefId),
+}
+
+/// Reasons to create a region inference variable
+///
+/// See `error_reporting` module for more details
+#[derive(Copy, Clone, Debug)]
+pub enum RegionVariableOrigin {
+ /// Region variables created for ill-categorized reasons,
+ /// mostly indicates places in need of refactoring
+ MiscVariable(Span),
+
+ /// Regions created by a `&P` or `[...]` pattern
+ PatternRegion(Span),
+
+ /// Regions created by `&` operator
+ AddrOfRegion(Span),
+
+ /// Regions created as part of an autoref of a method receiver
+ Autoref(Span),
+
+ /// Regions created as part of an automatic coercion
+ Coercion(Span),
+
+ /// Region variables created as the values for early-bound regions
+ EarlyBoundRegion(Span, Symbol),
+
+ /// Region variables created for bound regions
+ /// in a function or method that is called
+ LateBoundRegion(Span, ty::BoundRegionKind, LateBoundRegionConversionTime),
+
+ UpvarRegion(ty::UpvarId, Span),
+
+ /// This origin is used for the inference variables that we create
+ /// during NLL region processing.
+ Nll(NllRegionVariableOrigin),
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum NllRegionVariableOrigin {
+ /// During NLL region processing, we create variables for free
+ /// regions that we encounter in the function signature and
+ /// elsewhere. This origin indices we've got one of those.
+ FreeRegion,
+
+ /// "Universal" instantiation of a higher-ranked region (e.g.,
+ /// from a `for<'a> T` binder). Meant to represent "any region".
+ Placeholder(ty::PlaceholderRegion),
+
+ Existential {
+ /// If this is true, then this variable was created to represent a lifetime
+ /// bound in a `for` binder. For example, it might have been created to
+ /// represent the lifetime `'a` in a type like `for<'a> fn(&'a u32)`.
+ /// Such variables are created when we are trying to figure out if there
+ /// is any valid instantiation of `'a` that could fit into some scenario.
+ ///
+ /// This is used to inform error reporting: in the case that we are trying to
+ /// determine whether there is any valid instantiation of a `'a` variable that meets
+ /// some constraint C, we want to blame the "source" of that `for` type,
+ /// rather than blaming the source of the constraint C.
+ from_forall: bool,
+ },
+}
+
+// FIXME(eddyb) investigate overlap between this and `TyOrConstInferVar`.
+#[derive(Copy, Clone, Debug)]
+pub enum FixupError<'tcx> {
+ UnresolvedIntTy(IntVid),
+ UnresolvedFloatTy(FloatVid),
+ UnresolvedTy(TyVid),
+ UnresolvedConst(ConstVid<'tcx>),
+}
+
+/// See the `region_obligations` field for more information.
+#[derive(Clone)]
+pub struct RegionObligation<'tcx> {
+ pub sub_region: ty::Region<'tcx>,
+ pub sup_type: Ty<'tcx>,
+ pub origin: SubregionOrigin<'tcx>,
+}
+
+impl<'tcx> fmt::Display for FixupError<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use self::FixupError::*;
+
+ match *self {
+ UnresolvedIntTy(_) => write!(
+ f,
+ "cannot determine the type of this integer; \
+ add a suffix to specify the type explicitly"
+ ),
+ UnresolvedFloatTy(_) => write!(
+ f,
+ "cannot determine the type of this number; \
+ add a suffix to specify the type explicitly"
+ ),
+ UnresolvedTy(_) => write!(f, "unconstrained type"),
+ UnresolvedConst(_) => write!(f, "unconstrained const value"),
+ }
+ }
+}
+
+/// A temporary returned by `tcx.infer_ctxt()`. This is necessary
+/// for multiple `InferCtxt` to share the same `in_progress_typeck_results`
+/// without using `Rc` or something similar.
+pub struct InferCtxtBuilder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ defining_use_anchor: DefiningAnchor,
+ considering_regions: bool,
+ fresh_typeck_results: Option<RefCell<ty::TypeckResults<'tcx>>>,
+}
+
+pub trait TyCtxtInferExt<'tcx> {
+ fn infer_ctxt(self) -> InferCtxtBuilder<'tcx>;
+}
+
+impl<'tcx> TyCtxtInferExt<'tcx> for TyCtxt<'tcx> {
+ fn infer_ctxt(self) -> InferCtxtBuilder<'tcx> {
+ InferCtxtBuilder {
+ tcx: self,
+ defining_use_anchor: DefiningAnchor::Error,
+ considering_regions: true,
+ fresh_typeck_results: None,
+ }
+ }
+}
+
+impl<'tcx> InferCtxtBuilder<'tcx> {
+ /// Used only by `rustc_typeck` during body type-checking/inference,
+ /// will initialize `in_progress_typeck_results` with fresh `TypeckResults`.
+ /// Will also change the scope for opaque type defining use checks to the given owner.
+ pub fn with_fresh_in_progress_typeck_results(mut self, table_owner: LocalDefId) -> Self {
+ self.fresh_typeck_results = Some(RefCell::new(ty::TypeckResults::new(table_owner)));
+ self.with_opaque_type_inference(DefiningAnchor::Bind(table_owner))
+ }
+
+ /// Whenever the `InferCtxt` should be able to handle defining uses of opaque types,
+ /// you need to call this function. Otherwise the opaque type will be treated opaquely.
+ ///
+ /// It is only meant to be called in two places, for typeck
+ /// (via `with_fresh_in_progress_typeck_results`) and for the inference context used
+ /// in mir borrowck.
+ pub fn with_opaque_type_inference(mut self, defining_use_anchor: DefiningAnchor) -> Self {
+ self.defining_use_anchor = defining_use_anchor;
+ self
+ }
+
+ pub fn ignoring_regions(mut self) -> Self {
+ self.considering_regions = false;
+ self
+ }
+
+ /// Given a canonical value `C` as a starting point, create an
+ /// inference context that contains each of the bound values
+ /// within instantiated as a fresh variable. The `f` closure is
+ /// invoked with the new infcx, along with the instantiated value
+ /// `V` and a substitution `S`. This substitution `S` maps from
+ /// the bound values in `C` to their instantiated values in `V`
+ /// (in other words, `S(C) = V`).
+ pub fn enter_with_canonical<T, R>(
+ &mut self,
+ span: Span,
+ canonical: &Canonical<'tcx, T>,
+ f: impl for<'a> FnOnce(InferCtxt<'a, 'tcx>, T, CanonicalVarValues<'tcx>) -> R,
+ ) -> R
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.enter(|infcx| {
+ let (value, subst) =
+ infcx.instantiate_canonical_with_fresh_inference_vars(span, canonical);
+ f(infcx, value, subst)
+ })
+ }
+
+ pub fn enter<R>(&mut self, f: impl for<'a> FnOnce(InferCtxt<'a, 'tcx>) -> R) -> R {
+ let InferCtxtBuilder {
+ tcx,
+ defining_use_anchor,
+ considering_regions,
+ ref fresh_typeck_results,
+ } = *self;
+ let in_progress_typeck_results = fresh_typeck_results.as_ref();
+ f(InferCtxt {
+ tcx,
+ defining_use_anchor,
+ considering_regions,
+ in_progress_typeck_results,
+ inner: RefCell::new(InferCtxtInner::new()),
+ lexical_region_resolutions: RefCell::new(None),
+ selection_cache: Default::default(),
+ evaluation_cache: Default::default(),
+ reported_trait_errors: Default::default(),
+ reported_closure_mismatch: Default::default(),
+ tainted_by_errors_flag: Cell::new(false),
+ err_count_on_creation: tcx.sess.err_count(),
+ in_snapshot: Cell::new(false),
+ skip_leak_check: Cell::new(false),
+ universe: Cell::new(ty::UniverseIndex::ROOT),
+ })
+ }
+}
+
+impl<'tcx, T> InferOk<'tcx, T> {
+ pub fn unit(self) -> InferOk<'tcx, ()> {
+ InferOk { value: (), obligations: self.obligations }
+ }
+
+ /// Extracts `value`, registering any obligations into `fulfill_cx`.
+ pub fn into_value_registering_obligations(
+ self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ fulfill_cx: &mut dyn TraitEngine<'tcx>,
+ ) -> T {
+ let InferOk { value, obligations } = self;
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+ value
+ }
+}
+
+impl<'tcx> InferOk<'tcx, ()> {
+ pub fn into_obligations(self) -> PredicateObligations<'tcx> {
+ self.obligations
+ }
+}
+
+#[must_use = "once you start a snapshot, you should always consume it"]
+pub struct CombinedSnapshot<'a, 'tcx> {
+ undo_snapshot: Snapshot<'tcx>,
+ region_constraints_snapshot: RegionSnapshot,
+ universe: ty::UniverseIndex,
+ was_in_snapshot: bool,
+ _in_progress_typeck_results: Option<Ref<'a, ty::TypeckResults<'tcx>>>,
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ /// calls `tcx.try_unify_abstract_consts` after
+ /// canonicalizing the consts.
+ #[instrument(skip(self), level = "debug")]
+ pub fn try_unify_abstract_consts(
+ &self,
+ a: ty::Unevaluated<'tcx, ()>,
+ b: ty::Unevaluated<'tcx, ()>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> bool {
+ // Reject any attempt to unify two unevaluated constants that contain inference
+ // variables, since inference variables in queries lead to ICEs.
+ if a.substs.has_infer_types_or_consts()
+ || b.substs.has_infer_types_or_consts()
+ || param_env.has_infer_types_or_consts()
+ {
+ debug!("a or b or param_env contain infer vars in its substs -> cannot unify");
+ return false;
+ }
+
+ let param_env_and = param_env.and((a, b));
+ let erased = self.tcx.erase_regions(param_env_and);
+ debug!("after erase_regions: {:?}", erased);
+
+ self.tcx.try_unify_abstract_consts(erased)
+ }
+
+ pub fn is_in_snapshot(&self) -> bool {
+ self.in_snapshot.get()
+ }
+
+ pub fn freshen<T: TypeFoldable<'tcx>>(&self, t: T) -> T {
+ t.fold_with(&mut self.freshener())
+ }
+
+ /// Returns the origin of the type variable identified by `vid`, or `None`
+ /// if this is not a type variable.
+ ///
+ /// No attempt is made to resolve `ty`.
+ pub fn type_var_origin(&'a self, ty: Ty<'tcx>) -> Option<TypeVariableOrigin> {
+ match *ty.kind() {
+ ty::Infer(ty::TyVar(vid)) => {
+ Some(*self.inner.borrow_mut().type_variables().var_origin(vid))
+ }
+ _ => None,
+ }
+ }
+
+ pub fn freshener<'b>(&'b self) -> TypeFreshener<'b, 'tcx> {
+ freshen::TypeFreshener::new(self, false)
+ }
+
+ /// Like `freshener`, but does not replace `'static` regions.
+ pub fn freshener_keep_static<'b>(&'b self) -> TypeFreshener<'b, 'tcx> {
+ freshen::TypeFreshener::new(self, true)
+ }
+
+ pub fn unsolved_variables(&self) -> Vec<Ty<'tcx>> {
+ let mut inner = self.inner.borrow_mut();
+ let mut vars: Vec<Ty<'_>> = inner
+ .type_variables()
+ .unsolved_variables()
+ .into_iter()
+ .map(|t| self.tcx.mk_ty_var(t))
+ .collect();
+ vars.extend(
+ (0..inner.int_unification_table().len())
+ .map(|i| ty::IntVid { index: i as u32 })
+ .filter(|&vid| inner.int_unification_table().probe_value(vid).is_none())
+ .map(|v| self.tcx.mk_int_var(v)),
+ );
+ vars.extend(
+ (0..inner.float_unification_table().len())
+ .map(|i| ty::FloatVid { index: i as u32 })
+ .filter(|&vid| inner.float_unification_table().probe_value(vid).is_none())
+ .map(|v| self.tcx.mk_float_var(v)),
+ );
+ vars
+ }
+
+ fn combine_fields(
+ &'a self,
+ trace: TypeTrace<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ define_opaque_types: bool,
+ ) -> CombineFields<'a, 'tcx> {
+ CombineFields {
+ infcx: self,
+ trace,
+ cause: None,
+ param_env,
+ obligations: PredicateObligations::new(),
+ define_opaque_types,
+ }
+ }
+
+ /// Clear the "currently in a snapshot" flag, invoke the closure,
+ /// then restore the flag to its original value. This flag is a
+ /// debugging measure designed to detect cases where we start a
+ /// snapshot, create type variables, and register obligations
+ /// which may involve those type variables in the fulfillment cx,
+ /// potentially leaving "dangling type variables" behind.
+ /// In such cases, an assertion will fail when attempting to
+ /// register obligations, within a snapshot. Very useful, much
+ /// better than grovelling through megabytes of `RUSTC_LOG` output.
+ ///
+ /// HOWEVER, in some cases the flag is unhelpful. In particular, we
+ /// sometimes create a "mini-fulfilment-cx" in which we enroll
+ /// obligations. As long as this fulfillment cx is fully drained
+ /// before we return, this is not a problem, as there won't be any
+ /// escaping obligations in the main cx. In those cases, you can
+ /// use this function.
+ pub fn save_and_restore_in_snapshot_flag<F, R>(&self, func: F) -> R
+ where
+ F: FnOnce(&Self) -> R,
+ {
+ let flag = self.in_snapshot.replace(false);
+ let result = func(self);
+ self.in_snapshot.set(flag);
+ result
+ }
+
+ fn start_snapshot(&self) -> CombinedSnapshot<'a, 'tcx> {
+ debug!("start_snapshot()");
+
+ let in_snapshot = self.in_snapshot.replace(true);
+
+ let mut inner = self.inner.borrow_mut();
+
+ CombinedSnapshot {
+ undo_snapshot: inner.undo_log.start_snapshot(),
+ region_constraints_snapshot: inner.unwrap_region_constraints().start_snapshot(),
+ universe: self.universe(),
+ was_in_snapshot: in_snapshot,
+ // Borrow typeck results "in progress" (i.e., during typeck)
+ // to ban writes from within a snapshot to them.
+ _in_progress_typeck_results: self
+ .in_progress_typeck_results
+ .map(|typeck_results| typeck_results.borrow()),
+ }
+ }
+
+ #[instrument(skip(self, snapshot), level = "debug")]
+ fn rollback_to(&self, cause: &str, snapshot: CombinedSnapshot<'a, 'tcx>) {
+ let CombinedSnapshot {
+ undo_snapshot,
+ region_constraints_snapshot,
+ universe,
+ was_in_snapshot,
+ _in_progress_typeck_results,
+ } = snapshot;
+
+ self.in_snapshot.set(was_in_snapshot);
+ self.universe.set(universe);
+
+ let mut inner = self.inner.borrow_mut();
+ inner.rollback_to(undo_snapshot);
+ inner.unwrap_region_constraints().rollback_to(region_constraints_snapshot);
+ }
+
+ #[instrument(skip(self, snapshot), level = "debug")]
+ fn commit_from(&self, snapshot: CombinedSnapshot<'a, 'tcx>) {
+ let CombinedSnapshot {
+ undo_snapshot,
+ region_constraints_snapshot: _,
+ universe: _,
+ was_in_snapshot,
+ _in_progress_typeck_results,
+ } = snapshot;
+
+ self.in_snapshot.set(was_in_snapshot);
+
+ self.inner.borrow_mut().commit(undo_snapshot);
+ }
+
+ /// Execute `f` and commit the bindings if closure `f` returns `Ok(_)`.
+ #[instrument(skip(self, f), level = "debug")]
+ pub fn commit_if_ok<T, E, F>(&self, f: F) -> Result<T, E>
+ where
+ F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> Result<T, E>,
+ {
+ let snapshot = self.start_snapshot();
+ let r = f(&snapshot);
+ debug!("commit_if_ok() -- r.is_ok() = {}", r.is_ok());
+ match r {
+ Ok(_) => {
+ self.commit_from(snapshot);
+ }
+ Err(_) => {
+ self.rollback_to("commit_if_ok -- error", snapshot);
+ }
+ }
+ r
+ }
+
+ /// Execute `f` then unroll any bindings it creates.
+ #[instrument(skip(self, f), level = "debug")]
+ pub fn probe<R, F>(&self, f: F) -> R
+ where
+ F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> R,
+ {
+ let snapshot = self.start_snapshot();
+ let r = f(&snapshot);
+ self.rollback_to("probe", snapshot);
+ r
+ }
+
+ /// If `should_skip` is true, then execute `f` then unroll any bindings it creates.
+ #[instrument(skip(self, f), level = "debug")]
+ pub fn probe_maybe_skip_leak_check<R, F>(&self, should_skip: bool, f: F) -> R
+ where
+ F: FnOnce(&CombinedSnapshot<'a, 'tcx>) -> R,
+ {
+ let snapshot = self.start_snapshot();
+ let was_skip_leak_check = self.skip_leak_check.get();
+ if should_skip {
+ self.skip_leak_check.set(true);
+ }
+ let r = f(&snapshot);
+ self.rollback_to("probe", snapshot);
+ self.skip_leak_check.set(was_skip_leak_check);
+ r
+ }
+
+ /// Scan the constraints produced since `snapshot` began and returns:
+ ///
+ /// - `None` -- if none of them involve "region outlives" constraints
+ /// - `Some(true)` -- if there are `'a: 'b` constraints where `'a` or `'b` is a placeholder
+ /// - `Some(false)` -- if there are `'a: 'b` constraints but none involve placeholders
+ pub fn region_constraints_added_in_snapshot(
+ &self,
+ snapshot: &CombinedSnapshot<'a, 'tcx>,
+ ) -> Option<bool> {
+ self.inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .region_constraints_added_in_snapshot(&snapshot.undo_snapshot)
+ }
+
+ pub fn opaque_types_added_in_snapshot(&self, snapshot: &CombinedSnapshot<'a, 'tcx>) -> bool {
+ self.inner.borrow().undo_log.opaque_types_in_snapshot(&snapshot.undo_snapshot)
+ }
+
+ pub fn add_given(&self, sub: ty::Region<'tcx>, sup: ty::RegionVid) {
+ self.inner.borrow_mut().unwrap_region_constraints().add_given(sub, sup);
+ }
+
+ pub fn can_sub<T>(&self, param_env: ty::ParamEnv<'tcx>, a: T, b: T) -> UnitResult<'tcx>
+ where
+ T: at::ToTrace<'tcx>,
+ {
+ let origin = &ObligationCause::dummy();
+ self.probe(|_| {
+ self.at(origin, param_env).sub(a, b).map(|InferOk { obligations: _, .. }| {
+ // Ignore obligations, since we are unrolling
+ // everything anyway.
+ })
+ })
+ }
+
+ pub fn can_eq<T>(&self, param_env: ty::ParamEnv<'tcx>, a: T, b: T) -> UnitResult<'tcx>
+ where
+ T: at::ToTrace<'tcx>,
+ {
+ let origin = &ObligationCause::dummy();
+ self.probe(|_| {
+ self.at(origin, param_env).eq(a, b).map(|InferOk { obligations: _, .. }| {
+ // Ignore obligations, since we are unrolling
+ // everything anyway.
+ })
+ })
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn sub_regions(
+ &self,
+ origin: SubregionOrigin<'tcx>,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) {
+ self.inner.borrow_mut().unwrap_region_constraints().make_subregion(origin, a, b);
+ }
+
+ /// Require that the region `r` be equal to one of the regions in
+ /// the set `regions`.
+ #[instrument(skip(self), level = "debug")]
+ pub fn member_constraint(
+ &self,
+ key: ty::OpaqueTypeKey<'tcx>,
+ definition_span: Span,
+ hidden_ty: Ty<'tcx>,
+ region: ty::Region<'tcx>,
+ in_regions: &Lrc<Vec<ty::Region<'tcx>>>,
+ ) {
+ self.inner.borrow_mut().unwrap_region_constraints().member_constraint(
+ key,
+ definition_span,
+ hidden_ty,
+ region,
+ in_regions,
+ );
+ }
+
+ /// Processes a `Coerce` predicate from the fulfillment context.
+ /// This is NOT the preferred way to handle coercion, which is to
+ /// invoke `FnCtxt::coerce` or a similar method (see `coercion.rs`).
+ ///
+ /// This method here is actually a fallback that winds up being
+ /// invoked when `FnCtxt::coerce` encounters unresolved type variables
+ /// and records a coercion predicate. Presently, this method is equivalent
+ /// to `subtype_predicate` -- that is, "coercing" `a` to `b` winds up
+ /// actually requiring `a <: b`. This is of course a valid coercion,
+ /// but it's not as flexible as `FnCtxt::coerce` would be.
+ ///
+ /// (We may refactor this in the future, but there are a number of
+ /// practical obstacles. Among other things, `FnCtxt::coerce` presently
+ /// records adjustments that are required on the HIR in order to perform
+ /// the coercion, and we don't currently have a way to manage that.)
+ pub fn coerce_predicate(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ predicate: ty::PolyCoercePredicate<'tcx>,
+ ) -> Option<InferResult<'tcx, ()>> {
+ let subtype_predicate = predicate.map_bound(|p| ty::SubtypePredicate {
+ a_is_expected: false, // when coercing from `a` to `b`, `b` is expected
+ a: p.a,
+ b: p.b,
+ });
+ self.subtype_predicate(cause, param_env, subtype_predicate)
+ }
+
+ pub fn subtype_predicate(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ predicate: ty::PolySubtypePredicate<'tcx>,
+ ) -> Option<InferResult<'tcx, ()>> {
+ // Check for two unresolved inference variables, in which case we can
+ // make no progress. This is partly a micro-optimization, but it's
+ // also an opportunity to "sub-unify" the variables. This isn't
+ // *necessary* to prevent cycles, because they would eventually be sub-unified
+ // anyhow during generalization, but it helps with diagnostics (we can detect
+ // earlier that they are sub-unified).
+ //
+ // Note that we can just skip the binders here because
+ // type variables can't (at present, at
+ // least) capture any of the things bound by this binder.
+ //
+ // Note that this sub here is not just for diagnostics - it has semantic
+ // effects as well.
+ let r_a = self.shallow_resolve(predicate.skip_binder().a);
+ let r_b = self.shallow_resolve(predicate.skip_binder().b);
+ match (r_a.kind(), r_b.kind()) {
+ (&ty::Infer(ty::TyVar(a_vid)), &ty::Infer(ty::TyVar(b_vid))) => {
+ self.inner.borrow_mut().type_variables().sub(a_vid, b_vid);
+ return None;
+ }
+ _ => {}
+ }
+
+ Some(self.commit_if_ok(|_snapshot| {
+ let ty::SubtypePredicate { a_is_expected, a, b } =
+ self.replace_bound_vars_with_placeholders(predicate);
+
+ let ok = self.at(cause, param_env).sub_exp(a_is_expected, a, b)?;
+
+ Ok(ok.unit())
+ }))
+ }
+
+ pub fn region_outlives_predicate(
+ &self,
+ cause: &traits::ObligationCause<'tcx>,
+ predicate: ty::PolyRegionOutlivesPredicate<'tcx>,
+ ) {
+ let ty::OutlivesPredicate(r_a, r_b) = self.replace_bound_vars_with_placeholders(predicate);
+ let origin =
+ SubregionOrigin::from_obligation_cause(cause, || RelateRegionParamBound(cause.span));
+ self.sub_regions(origin, r_b, r_a); // `b : a` ==> `a <= b`
+ }
+
+ /// Number of type variables created so far.
+ pub fn num_ty_vars(&self) -> usize {
+ self.inner.borrow_mut().type_variables().num_vars()
+ }
+
+ pub fn next_ty_var_id(&self, origin: TypeVariableOrigin) -> TyVid {
+ self.inner.borrow_mut().type_variables().new_var(self.universe(), origin)
+ }
+
+ pub fn next_ty_var(&self, origin: TypeVariableOrigin) -> Ty<'tcx> {
+ self.tcx.mk_ty_var(self.next_ty_var_id(origin))
+ }
+
+ pub fn next_ty_var_id_in_universe(
+ &self,
+ origin: TypeVariableOrigin,
+ universe: ty::UniverseIndex,
+ ) -> TyVid {
+ self.inner.borrow_mut().type_variables().new_var(universe, origin)
+ }
+
+ pub fn next_ty_var_in_universe(
+ &self,
+ origin: TypeVariableOrigin,
+ universe: ty::UniverseIndex,
+ ) -> Ty<'tcx> {
+ let vid = self.next_ty_var_id_in_universe(origin, universe);
+ self.tcx.mk_ty_var(vid)
+ }
+
+ pub fn next_const_var(&self, ty: Ty<'tcx>, origin: ConstVariableOrigin) -> ty::Const<'tcx> {
+ self.tcx.mk_const_var(self.next_const_var_id(origin), ty)
+ }
+
+ pub fn next_const_var_in_universe(
+ &self,
+ ty: Ty<'tcx>,
+ origin: ConstVariableOrigin,
+ universe: ty::UniverseIndex,
+ ) -> ty::Const<'tcx> {
+ let vid = self
+ .inner
+ .borrow_mut()
+ .const_unification_table()
+ .new_key(ConstVarValue { origin, val: ConstVariableValue::Unknown { universe } });
+ self.tcx.mk_const_var(vid, ty)
+ }
+
+ pub fn next_const_var_id(&self, origin: ConstVariableOrigin) -> ConstVid<'tcx> {
+ self.inner.borrow_mut().const_unification_table().new_key(ConstVarValue {
+ origin,
+ val: ConstVariableValue::Unknown { universe: self.universe() },
+ })
+ }
+
+ fn next_int_var_id(&self) -> IntVid {
+ self.inner.borrow_mut().int_unification_table().new_key(None)
+ }
+
+ pub fn next_int_var(&self) -> Ty<'tcx> {
+ self.tcx.mk_int_var(self.next_int_var_id())
+ }
+
+ fn next_float_var_id(&self) -> FloatVid {
+ self.inner.borrow_mut().float_unification_table().new_key(None)
+ }
+
+ pub fn next_float_var(&self) -> Ty<'tcx> {
+ self.tcx.mk_float_var(self.next_float_var_id())
+ }
+
+ /// Creates a fresh region variable with the next available index.
+ /// The variable will be created in the maximum universe created
+ /// thus far, allowing it to name any region created thus far.
+ pub fn next_region_var(&self, origin: RegionVariableOrigin) -> ty::Region<'tcx> {
+ self.next_region_var_in_universe(origin, self.universe())
+ }
+
+ /// Creates a fresh region variable with the next available index
+ /// in the given universe; typically, you can use
+ /// `next_region_var` and just use the maximal universe.
+ pub fn next_region_var_in_universe(
+ &self,
+ origin: RegionVariableOrigin,
+ universe: ty::UniverseIndex,
+ ) -> ty::Region<'tcx> {
+ let region_var =
+ self.inner.borrow_mut().unwrap_region_constraints().new_region_var(universe, origin);
+ self.tcx.mk_region(ty::ReVar(region_var))
+ }
+
+ /// Return the universe that the region `r` was created in. For
+ /// most regions (e.g., `'static`, named regions from the user,
+ /// etc) this is the root universe U0. For inference variables or
+ /// placeholders, however, it will return the universe which which
+ /// they are associated.
+ pub fn universe_of_region(&self, r: ty::Region<'tcx>) -> ty::UniverseIndex {
+ self.inner.borrow_mut().unwrap_region_constraints().universe(r)
+ }
+
+ /// Number of region variables created so far.
+ pub fn num_region_vars(&self) -> usize {
+ self.inner.borrow_mut().unwrap_region_constraints().num_region_vars()
+ }
+
+ /// Just a convenient wrapper of `next_region_var` for using during NLL.
+ pub fn next_nll_region_var(&self, origin: NllRegionVariableOrigin) -> ty::Region<'tcx> {
+ self.next_region_var(RegionVariableOrigin::Nll(origin))
+ }
+
+ /// Just a convenient wrapper of `next_region_var` for using during NLL.
+ pub fn next_nll_region_var_in_universe(
+ &self,
+ origin: NllRegionVariableOrigin,
+ universe: ty::UniverseIndex,
+ ) -> ty::Region<'tcx> {
+ self.next_region_var_in_universe(RegionVariableOrigin::Nll(origin), universe)
+ }
+
+ pub fn var_for_def(&self, span: Span, param: &ty::GenericParamDef) -> GenericArg<'tcx> {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ // Create a region inference variable for the given
+ // region parameter definition.
+ self.next_region_var(EarlyBoundRegion(span, param.name)).into()
+ }
+ GenericParamDefKind::Type { .. } => {
+ // Create a type inference variable for the given
+ // type parameter definition. The substitutions are
+ // for actual parameters that may be referred to by
+ // the default of this type parameter, if it exists.
+ // e.g., `struct Foo<A, B, C = (A, B)>(...);` when
+ // used in a path such as `Foo::<T, U>::new()` will
+ // use an inference variable for `C` with `[T, U]`
+ // as the substitutions for the default, `(T, U)`.
+ let ty_var_id = self.inner.borrow_mut().type_variables().new_var(
+ self.universe(),
+ TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeParameterDefinition(
+ param.name,
+ Some(param.def_id),
+ ),
+ span,
+ },
+ );
+
+ self.tcx.mk_ty_var(ty_var_id).into()
+ }
+ GenericParamDefKind::Const { .. } => {
+ let origin = ConstVariableOrigin {
+ kind: ConstVariableOriginKind::ConstParameterDefinition(
+ param.name,
+ param.def_id,
+ ),
+ span,
+ };
+ let const_var_id =
+ self.inner.borrow_mut().const_unification_table().new_key(ConstVarValue {
+ origin,
+ val: ConstVariableValue::Unknown { universe: self.universe() },
+ });
+ self.tcx.mk_const_var(const_var_id, self.tcx.type_of(param.def_id)).into()
+ }
+ }
+ }
+
+ /// Given a set of generics defined on a type or impl, returns a substitution mapping each
+ /// type/region parameter to a fresh inference variable.
+ pub fn fresh_substs_for_item(&self, span: Span, def_id: DefId) -> SubstsRef<'tcx> {
+ InternalSubsts::for_item(self.tcx, def_id, |param, _| self.var_for_def(span, param))
+ }
+
+ /// Returns `true` if errors have been reported since this infcx was
+ /// created. This is sometimes used as a heuristic to skip
+ /// reporting errors that often occur as a result of earlier
+ /// errors, but where it's hard to be 100% sure (e.g., unresolved
+ /// inference variables, regionck errors).
+ pub fn is_tainted_by_errors(&self) -> bool {
+ debug!(
+ "is_tainted_by_errors(err_count={}, err_count_on_creation={}, \
+ tainted_by_errors_flag={})",
+ self.tcx.sess.err_count(),
+ self.err_count_on_creation,
+ self.tainted_by_errors_flag.get()
+ );
+
+ if self.tcx.sess.err_count() > self.err_count_on_creation {
+ return true; // errors reported since this infcx was made
+ }
+ self.tainted_by_errors_flag.get()
+ }
+
+ /// Set the "tainted by errors" flag to true. We call this when we
+ /// observe an error from a prior pass.
+ pub fn set_tainted_by_errors(&self) {
+ debug!("set_tainted_by_errors()");
+ self.tainted_by_errors_flag.set(true)
+ }
+
+ pub fn skip_region_resolution(&self) {
+ let (var_infos, _) = {
+ let mut inner = self.inner.borrow_mut();
+ let inner = &mut *inner;
+ // Note: `inner.region_obligations` may not be empty, because we
+ // didn't necessarily call `process_registered_region_obligations`.
+ // This is okay, because that doesn't introduce new vars.
+ inner
+ .region_constraint_storage
+ .take()
+ .expect("regions already resolved")
+ .with_log(&mut inner.undo_log)
+ .into_infos_and_data()
+ };
+
+ let lexical_region_resolutions = LexicalRegionResolutions {
+ values: rustc_index::vec::IndexVec::from_elem_n(
+ crate::infer::lexical_region_resolve::VarValue::Value(self.tcx.lifetimes.re_erased),
+ var_infos.len(),
+ ),
+ };
+
+ let old_value = self.lexical_region_resolutions.replace(Some(lexical_region_resolutions));
+ assert!(old_value.is_none());
+ }
+
+ /// Process the region constraints and return any any errors that
+ /// result. After this, no more unification operations should be
+ /// done -- or the compiler will panic -- but it is legal to use
+ /// `resolve_vars_if_possible` as well as `fully_resolve`.
+ pub fn resolve_regions(
+ &self,
+ outlives_env: &OutlivesEnvironment<'tcx>,
+ ) -> Vec<RegionResolutionError<'tcx>> {
+ let (var_infos, data) = {
+ let mut inner = self.inner.borrow_mut();
+ let inner = &mut *inner;
+ assert!(
+ self.is_tainted_by_errors() || inner.region_obligations.is_empty(),
+ "region_obligations not empty: {:#?}",
+ inner.region_obligations
+ );
+ inner
+ .region_constraint_storage
+ .take()
+ .expect("regions already resolved")
+ .with_log(&mut inner.undo_log)
+ .into_infos_and_data()
+ };
+
+ let region_rels = &RegionRelations::new(self.tcx, outlives_env.free_region_map());
+
+ let (lexical_region_resolutions, errors) =
+ lexical_region_resolve::resolve(outlives_env.param_env, region_rels, var_infos, data);
+
+ let old_value = self.lexical_region_resolutions.replace(Some(lexical_region_resolutions));
+ assert!(old_value.is_none());
+
+ errors
+ }
+
+ /// Process the region constraints and report any errors that
+ /// result. After this, no more unification operations should be
+ /// done -- or the compiler will panic -- but it is legal to use
+ /// `resolve_vars_if_possible` as well as `fully_resolve`.
+ ///
+ /// Make sure to call [`InferCtxt::process_registered_region_obligations`]
+ /// first, or preferrably use [`InferCtxt::check_region_obligations_and_report_errors`]
+ /// to do both of these operations together.
+ pub fn resolve_regions_and_report_errors(
+ &self,
+ generic_param_scope: LocalDefId,
+ outlives_env: &OutlivesEnvironment<'tcx>,
+ ) {
+ let errors = self.resolve_regions(outlives_env);
+
+ if !self.is_tainted_by_errors() {
+ // As a heuristic, just skip reporting region errors
+ // altogether if other errors have been reported while
+ // this infcx was in use. This is totally hokey but
+ // otherwise we have a hard time separating legit region
+ // errors from silly ones.
+ self.report_region_errors(generic_param_scope, &errors);
+ }
+ }
+
+ /// Obtains (and clears) the current set of region
+ /// constraints. The inference context is still usable: further
+ /// unifications will simply add new constraints.
+ ///
+ /// This method is not meant to be used with normal lexical region
+ /// resolution. Rather, it is used in the NLL mode as a kind of
+ /// interim hack: basically we run normal type-check and generate
+ /// region constraints as normal, but then we take them and
+ /// translate them into the form that the NLL solver
+ /// understands. See the NLL module for mode details.
+ pub fn take_and_reset_region_constraints(&self) -> RegionConstraintData<'tcx> {
+ assert!(
+ self.inner.borrow().region_obligations.is_empty(),
+ "region_obligations not empty: {:#?}",
+ self.inner.borrow().region_obligations
+ );
+
+ self.inner.borrow_mut().unwrap_region_constraints().take_and_reset_data()
+ }
+
+ /// Gives temporary access to the region constraint data.
+ pub fn with_region_constraints<R>(
+ &self,
+ op: impl FnOnce(&RegionConstraintData<'tcx>) -> R,
+ ) -> R {
+ let mut inner = self.inner.borrow_mut();
+ op(inner.unwrap_region_constraints().data())
+ }
+
+ pub fn region_var_origin(&self, vid: ty::RegionVid) -> RegionVariableOrigin {
+ let mut inner = self.inner.borrow_mut();
+ let inner = &mut *inner;
+ inner
+ .region_constraint_storage
+ .as_mut()
+ .expect("regions already resolved")
+ .with_log(&mut inner.undo_log)
+ .var_origin(vid)
+ }
+
+ /// Takes ownership of the list of variable regions. This implies
+ /// that all the region constraints have already been taken, and
+ /// hence that `resolve_regions_and_report_errors` can never be
+ /// called. This is used only during NLL processing to "hand off" ownership
+ /// of the set of region variables into the NLL region context.
+ pub fn take_region_var_origins(&self) -> VarInfos {
+ let mut inner = self.inner.borrow_mut();
+ let (var_infos, data) = inner
+ .region_constraint_storage
+ .take()
+ .expect("regions already resolved")
+ .with_log(&mut inner.undo_log)
+ .into_infos_and_data();
+ assert!(data.is_empty());
+ var_infos
+ }
+
+ pub fn ty_to_string(&self, t: Ty<'tcx>) -> String {
+ self.resolve_vars_if_possible(t).to_string()
+ }
+
+ /// If `TyVar(vid)` resolves to a type, return that type. Else, return the
+ /// universe index of `TyVar(vid)`.
+ pub fn probe_ty_var(&self, vid: TyVid) -> Result<Ty<'tcx>, ty::UniverseIndex> {
+ use self::type_variable::TypeVariableValue;
+
+ match self.inner.borrow_mut().type_variables().probe(vid) {
+ TypeVariableValue::Known { value } => Ok(value),
+ TypeVariableValue::Unknown { universe } => Err(universe),
+ }
+ }
+
+ /// Resolve any type variables found in `value` -- but only one
+ /// level. So, if the variable `?X` is bound to some type
+ /// `Foo<?Y>`, then this would return `Foo<?Y>` (but `?Y` may
+ /// itself be bound to a type).
+ ///
+ /// Useful when you only need to inspect the outermost level of
+ /// the type and don't care about nested types (or perhaps you
+ /// will be resolving them as well, e.g. in a loop).
+ pub fn shallow_resolve<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ value.fold_with(&mut ShallowResolver { infcx: self })
+ }
+
+ pub fn root_var(&self, var: ty::TyVid) -> ty::TyVid {
+ self.inner.borrow_mut().type_variables().root_var(var)
+ }
+
+ /// Where possible, replaces type/const variables in
+ /// `value` with their final value. Note that region variables
+ /// are unaffected. If a type/const variable has not been unified, it
+ /// is left as is. This is an idempotent operation that does
+ /// not affect inference state in any way and so you can do it
+ /// at will.
+ pub fn resolve_vars_if_possible<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ if !value.needs_infer() {
+ return value; // Avoid duplicated subst-folding.
+ }
+ let mut r = resolve::OpportunisticVarResolver::new(self);
+ value.fold_with(&mut r)
+ }
+
+ pub fn resolve_numeric_literals_with_default<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ if !value.needs_infer() {
+ return value; // Avoid duplicated subst-folding.
+ }
+ let mut r = InferenceLiteralEraser { tcx: self.tcx };
+ value.fold_with(&mut r)
+ }
+
+ /// Returns the first unresolved variable contained in `T`. In the
+ /// process of visiting `T`, this will resolve (where possible)
+ /// type variables in `T`, but it never constructs the final,
+ /// resolved type, so it's more efficient than
+ /// `resolve_vars_if_possible()`.
+ pub fn unresolved_type_vars<T>(&self, value: &T) -> Option<(Ty<'tcx>, Option<Span>)>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ value.visit_with(&mut resolve::UnresolvedTypeFinder::new(self)).break_value()
+ }
+
+ pub fn probe_const_var(
+ &self,
+ vid: ty::ConstVid<'tcx>,
+ ) -> Result<ty::Const<'tcx>, ty::UniverseIndex> {
+ match self.inner.borrow_mut().const_unification_table().probe_value(vid).val {
+ ConstVariableValue::Known { value } => Ok(value),
+ ConstVariableValue::Unknown { universe } => Err(universe),
+ }
+ }
+
+ pub fn fully_resolve<T: TypeFoldable<'tcx>>(&self, value: T) -> FixupResult<'tcx, T> {
+ /*!
+ * Attempts to resolve all type/region/const variables in
+ * `value`. Region inference must have been run already (e.g.,
+ * by calling `resolve_regions_and_report_errors`). If some
+ * variable was never unified, an `Err` results.
+ *
+ * This method is idempotent, but it not typically not invoked
+ * except during the writeback phase.
+ */
+
+ resolve::fully_resolve(self, value)
+ }
+
+ // [Note-Type-error-reporting]
+ // An invariant is that anytime the expected or actual type is Error (the special
+ // error type, meaning that an error occurred when typechecking this expression),
+ // this is a derived error. The error cascaded from another error (that was already
+ // reported), so it's not useful to display it to the user.
+ // The following methods implement this logic.
+ // They check if either the actual or expected type is Error, and don't print the error
+ // in this case. The typechecker should only ever report type errors involving mismatched
+ // types using one of these methods, and should not call span_err directly for such
+ // errors.
+
+ pub fn type_error_struct_with_diag<M>(
+ &self,
+ sp: Span,
+ mk_diag: M,
+ actual_ty: Ty<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>
+ where
+ M: FnOnce(String) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ {
+ let actual_ty = self.resolve_vars_if_possible(actual_ty);
+ debug!("type_error_struct_with_diag({:?}, {:?})", sp, actual_ty);
+
+ let mut err = mk_diag(self.ty_to_string(actual_ty));
+
+ // Don't report an error if actual type is `Error`.
+ if actual_ty.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ err
+ }
+
+ pub fn report_mismatched_types(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ err: TypeError<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let trace = TypeTrace::types(cause, true, expected, actual);
+ self.report_and_explain_type_error(trace, &err)
+ }
+
+ pub fn report_mismatched_consts(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: ty::Const<'tcx>,
+ actual: ty::Const<'tcx>,
+ err: TypeError<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let trace = TypeTrace::consts(cause, true, expected, actual);
+ self.report_and_explain_type_error(trace, &err)
+ }
+
+ pub fn replace_bound_vars_with_fresh_vars<T>(
+ &self,
+ span: Span,
+ lbrct: LateBoundRegionConversionTime,
+ value: ty::Binder<'tcx, T>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ if let Some(inner) = value.no_bound_vars() {
+ return inner;
+ }
+
+ struct ToFreshVars<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ span: Span,
+ lbrct: LateBoundRegionConversionTime,
+ map: FxHashMap<ty::BoundVar, ty::GenericArg<'tcx>>,
+ }
+
+ impl<'tcx> BoundVarReplacerDelegate<'tcx> for ToFreshVars<'_, 'tcx> {
+ fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx> {
+ self.map
+ .entry(br.var)
+ .or_insert_with(|| {
+ self.infcx
+ .next_region_var(LateBoundRegion(self.span, br.kind, self.lbrct))
+ .into()
+ })
+ .expect_region()
+ }
+ fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx> {
+ self.map
+ .entry(bt.var)
+ .or_insert_with(|| {
+ self.infcx
+ .next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.span,
+ })
+ .into()
+ })
+ .expect_ty()
+ }
+ fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx> {
+ self.map
+ .entry(bv)
+ .or_insert_with(|| {
+ self.infcx
+ .next_const_var(
+ ty,
+ ConstVariableOrigin {
+ kind: ConstVariableOriginKind::MiscVariable,
+ span: self.span,
+ },
+ )
+ .into()
+ })
+ .expect_const()
+ }
+ }
+ let delegate = ToFreshVars { infcx: self, span, lbrct, map: Default::default() };
+ self.tcx.replace_bound_vars_uncached(value, delegate)
+ }
+
+ /// See the [`region_constraints::RegionConstraintCollector::verify_generic_bound`] method.
+ pub fn verify_generic_bound(
+ &self,
+ origin: SubregionOrigin<'tcx>,
+ kind: GenericKind<'tcx>,
+ a: ty::Region<'tcx>,
+ bound: VerifyBound<'tcx>,
+ ) {
+ debug!("verify_generic_bound({:?}, {:?} <: {:?})", kind, a, bound);
+
+ self.inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .verify_generic_bound(origin, kind, a, bound);
+ }
+
+ /// Obtains the latest type of the given closure; this may be a
+ /// closure in the current function, in which case its
+ /// `ClosureKind` may not yet be known.
+ pub fn closure_kind(&self, closure_substs: SubstsRef<'tcx>) -> Option<ty::ClosureKind> {
+ let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ let closure_kind_ty = self.shallow_resolve(closure_kind_ty);
+ closure_kind_ty.to_opt_closure_kind()
+ }
+
+ /// Clears the selection, evaluation, and projection caches. This is useful when
+ /// repeatedly attempting to select an `Obligation` while changing only
+ /// its `ParamEnv`, since `FulfillmentContext` doesn't use probing.
+ pub fn clear_caches(&self) {
+ self.selection_cache.clear();
+ self.evaluation_cache.clear();
+ self.inner.borrow_mut().projection_cache().clear();
+ }
+
+ pub fn universe(&self) -> ty::UniverseIndex {
+ self.universe.get()
+ }
+
+ /// Creates and return a fresh universe that extends all previous
+ /// universes. Updates `self.universe` to that new universe.
+ pub fn create_next_universe(&self) -> ty::UniverseIndex {
+ let u = self.universe.get().next_universe();
+ self.universe.set(u);
+ u
+ }
+
+ pub fn try_const_eval_resolve(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ unevaluated: ty::Unevaluated<'tcx>,
+ ty: Ty<'tcx>,
+ span: Option<Span>,
+ ) -> Result<ty::Const<'tcx>, ErrorHandled> {
+ match self.const_eval_resolve(param_env, unevaluated, span) {
+ Ok(Some(val)) => Ok(ty::Const::from_value(self.tcx, val, ty)),
+ Ok(None) => {
+ let tcx = self.tcx;
+ let def_id = unevaluated.def.did;
+ span_bug!(
+ tcx.def_span(def_id),
+ "unable to construct a constant value for the unevaluated constant {:?}",
+ unevaluated
+ );
+ }
+ Err(err) => Err(err),
+ }
+ }
+
+ /// Resolves and evaluates a constant.
+ ///
+ /// The constant can be located on a trait like `<A as B>::C`, in which case the given
+ /// substitutions and environment are used to resolve the constant. Alternatively if the
+ /// constant has generic parameters in scope the substitutions are used to evaluate the value of
+ /// the constant. For example in `fn foo<T>() { let _ = [0; bar::<T>()]; }` the repeat count
+ /// constant `bar::<T>()` requires a substitution for `T`, if the substitution for `T` is still
+ /// too generic for the constant to be evaluated then `Err(ErrorHandled::TooGeneric)` is
+ /// returned.
+ ///
+ /// This handles inferences variables within both `param_env` and `substs` by
+ /// performing the operation on their respective canonical forms.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_resolve(
+ &self,
+ mut param_env: ty::ParamEnv<'tcx>,
+ unevaluated: ty::Unevaluated<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToValTreeResult<'tcx> {
+ let mut substs = self.resolve_vars_if_possible(unevaluated.substs);
+ debug!(?substs);
+
+ // Postpone the evaluation of constants whose substs depend on inference
+ // variables
+ if substs.has_infer_types_or_consts() {
+ let ac = AbstractConst::new(self.tcx, unevaluated.shrink());
+ match ac {
+ Ok(None) => {
+ substs = InternalSubsts::identity_for_item(self.tcx, unevaluated.def.did);
+ param_env = self.tcx.param_env(unevaluated.def.did);
+ }
+ Ok(Some(ct)) => {
+ if ct.unify_failure_kind(self.tcx) == FailureKind::Concrete {
+ substs = replace_param_and_infer_substs_with_placeholder(self.tcx, substs);
+ } else {
+ return Err(ErrorHandled::TooGeneric);
+ }
+ }
+ Err(guar) => return Err(ErrorHandled::Reported(guar)),
+ }
+ }
+
+ let param_env_erased = self.tcx.erase_regions(param_env);
+ let substs_erased = self.tcx.erase_regions(substs);
+ debug!(?param_env_erased);
+ debug!(?substs_erased);
+
+ let unevaluated = ty::Unevaluated {
+ def: unevaluated.def,
+ substs: substs_erased,
+ promoted: unevaluated.promoted,
+ };
+
+ // The return value is the evaluated value which doesn't contain any reference to inference
+ // variables, thus we don't need to substitute back the original values.
+ self.tcx.const_eval_resolve_for_typeck(param_env_erased, unevaluated, span)
+ }
+
+ /// `ty_or_const_infer_var_changed` is equivalent to one of these two:
+ /// * `shallow_resolve(ty) != ty` (where `ty.kind = ty::Infer(_)`)
+ /// * `shallow_resolve(ct) != ct` (where `ct.kind = ty::ConstKind::Infer(_)`)
+ ///
+ /// However, `ty_or_const_infer_var_changed` is more efficient. It's always
+ /// inlined, despite being large, because it has only two call sites that
+ /// are extremely hot (both in `traits::fulfill`'s checking of `stalled_on`
+ /// inference variables), and it handles both `Ty` and `ty::Const` without
+ /// having to resort to storing full `GenericArg`s in `stalled_on`.
+ #[inline(always)]
+ pub fn ty_or_const_infer_var_changed(&self, infer_var: TyOrConstInferVar<'tcx>) -> bool {
+ match infer_var {
+ TyOrConstInferVar::Ty(v) => {
+ use self::type_variable::TypeVariableValue;
+
+ // If `inlined_probe` returns a `Known` value, it never equals
+ // `ty::Infer(ty::TyVar(v))`.
+ match self.inner.borrow_mut().type_variables().inlined_probe(v) {
+ TypeVariableValue::Unknown { .. } => false,
+ TypeVariableValue::Known { .. } => true,
+ }
+ }
+
+ TyOrConstInferVar::TyInt(v) => {
+ // If `inlined_probe_value` returns a value it's always a
+ // `ty::Int(_)` or `ty::UInt(_)`, which never matches a
+ // `ty::Infer(_)`.
+ self.inner.borrow_mut().int_unification_table().inlined_probe_value(v).is_some()
+ }
+
+ TyOrConstInferVar::TyFloat(v) => {
+ // If `probe_value` returns a value it's always a
+ // `ty::Float(_)`, which never matches a `ty::Infer(_)`.
+ //
+ // Not `inlined_probe_value(v)` because this call site is colder.
+ self.inner.borrow_mut().float_unification_table().probe_value(v).is_some()
+ }
+
+ TyOrConstInferVar::Const(v) => {
+ // If `probe_value` returns a `Known` value, it never equals
+ // `ty::ConstKind::Infer(ty::InferConst::Var(v))`.
+ //
+ // Not `inlined_probe_value(v)` because this call site is colder.
+ match self.inner.borrow_mut().const_unification_table().probe_value(v).val {
+ ConstVariableValue::Unknown { .. } => false,
+ ConstVariableValue::Known { .. } => true,
+ }
+ }
+ }
+ }
+}
+
+/// Helper for `ty_or_const_infer_var_changed` (see comment on that), currently
+/// used only for `traits::fulfill`'s list of `stalled_on` inference variables.
+#[derive(Copy, Clone, Debug)]
+pub enum TyOrConstInferVar<'tcx> {
+ /// Equivalent to `ty::Infer(ty::TyVar(_))`.
+ Ty(TyVid),
+ /// Equivalent to `ty::Infer(ty::IntVar(_))`.
+ TyInt(IntVid),
+ /// Equivalent to `ty::Infer(ty::FloatVar(_))`.
+ TyFloat(FloatVid),
+
+ /// Equivalent to `ty::ConstKind::Infer(ty::InferConst::Var(_))`.
+ Const(ConstVid<'tcx>),
+}
+
+impl<'tcx> TyOrConstInferVar<'tcx> {
+ /// Tries to extract an inference variable from a type or a constant, returns `None`
+ /// for types other than `ty::Infer(_)` (or `InferTy::Fresh*`) and
+ /// for constants other than `ty::ConstKind::Infer(_)` (or `InferConst::Fresh`).
+ pub fn maybe_from_generic_arg(arg: GenericArg<'tcx>) -> Option<Self> {
+ match arg.unpack() {
+ GenericArgKind::Type(ty) => Self::maybe_from_ty(ty),
+ GenericArgKind::Const(ct) => Self::maybe_from_const(ct),
+ GenericArgKind::Lifetime(_) => None,
+ }
+ }
+
+ /// Tries to extract an inference variable from a type, returns `None`
+ /// for types other than `ty::Infer(_)` (or `InferTy::Fresh*`).
+ pub fn maybe_from_ty(ty: Ty<'tcx>) -> Option<Self> {
+ match *ty.kind() {
+ ty::Infer(ty::TyVar(v)) => Some(TyOrConstInferVar::Ty(v)),
+ ty::Infer(ty::IntVar(v)) => Some(TyOrConstInferVar::TyInt(v)),
+ ty::Infer(ty::FloatVar(v)) => Some(TyOrConstInferVar::TyFloat(v)),
+ _ => None,
+ }
+ }
+
+ /// Tries to extract an inference variable from a constant, returns `None`
+ /// for constants other than `ty::ConstKind::Infer(_)` (or `InferConst::Fresh`).
+ pub fn maybe_from_const(ct: ty::Const<'tcx>) -> Option<Self> {
+ match ct.kind() {
+ ty::ConstKind::Infer(InferConst::Var(v)) => Some(TyOrConstInferVar::Const(v)),
+ _ => None,
+ }
+ }
+}
+
+/// Replace `{integer}` with `i32` and `{float}` with `f64`.
+/// Used only for diagnostics.
+struct InferenceLiteralEraser<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TypeFolder<'tcx> for InferenceLiteralEraser<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.kind() {
+ ty::Infer(ty::IntVar(_) | ty::FreshIntTy(_)) => self.tcx.types.i32,
+ ty::Infer(ty::FloatVar(_) | ty::FreshFloatTy(_)) => self.tcx.types.f64,
+ _ => ty.super_fold_with(self),
+ }
+ }
+}
+
+struct ShallowResolver<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for ShallowResolver<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ /// If `ty` is a type variable of some kind, resolve it one level
+ /// (but do not resolve types found in the result). If `typ` is
+ /// not a type variable, just return it unmodified.
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match *ty.kind() {
+ ty::Infer(ty::TyVar(v)) => {
+ // Not entirely obvious: if `typ` is a type variable,
+ // it can be resolved to an int/float variable, which
+ // can then be recursively resolved, hence the
+ // recursion. Note though that we prevent type
+ // variables from unifying to other type variables
+ // directly (though they may be embedded
+ // structurally), and we prevent cycles in any case,
+ // so this recursion should always be of very limited
+ // depth.
+ //
+ // Note: if these two lines are combined into one we get
+ // dynamic borrow errors on `self.inner`.
+ let known = self.infcx.inner.borrow_mut().type_variables().probe(v).known();
+ known.map_or(ty, |t| self.fold_ty(t))
+ }
+
+ ty::Infer(ty::IntVar(v)) => self
+ .infcx
+ .inner
+ .borrow_mut()
+ .int_unification_table()
+ .probe_value(v)
+ .map_or(ty, |v| v.to_type(self.infcx.tcx)),
+
+ ty::Infer(ty::FloatVar(v)) => self
+ .infcx
+ .inner
+ .borrow_mut()
+ .float_unification_table()
+ .probe_value(v)
+ .map_or(ty, |v| v.to_type(self.infcx.tcx)),
+
+ _ => ty,
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if let ty::ConstKind::Infer(InferConst::Var(vid)) = ct.kind() {
+ self.infcx
+ .inner
+ .borrow_mut()
+ .const_unification_table()
+ .probe_value(vid)
+ .val
+ .known()
+ .unwrap_or(ct)
+ } else {
+ ct
+ }
+ }
+}
+
+impl<'tcx> TypeTrace<'tcx> {
+ pub fn span(&self) -> Span {
+ self.cause.span
+ }
+
+ pub fn types(
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: Terms(ExpectedFound::new(a_is_expected, a.into(), b.into())),
+ }
+ }
+
+ pub fn consts(
+ cause: &ObligationCause<'tcx>,
+ a_is_expected: bool,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> TypeTrace<'tcx> {
+ TypeTrace {
+ cause: cause.clone(),
+ values: Terms(ExpectedFound::new(a_is_expected, a.into(), b.into())),
+ }
+ }
+}
+
+impl<'tcx> SubregionOrigin<'tcx> {
+ pub fn span(&self) -> Span {
+ match *self {
+ Subtype(ref a) => a.span(),
+ RelateObjectBound(a) => a,
+ RelateParamBound(a, ..) => a,
+ RelateRegionParamBound(a) => a,
+ Reborrow(a) => a,
+ ReborrowUpvar(a, _) => a,
+ DataBorrowed(_, a) => a,
+ ReferenceOutlivesReferent(_, a) => a,
+ CompareImplItemObligation { span, .. } => span,
+ CheckAssociatedTypeBounds { ref parent, .. } => parent.span(),
+ }
+ }
+
+ pub fn from_obligation_cause<F>(cause: &traits::ObligationCause<'tcx>, default: F) -> Self
+ where
+ F: FnOnce() -> Self,
+ {
+ match *cause.code() {
+ traits::ObligationCauseCode::ReferenceOutlivesReferent(ref_type) => {
+ SubregionOrigin::ReferenceOutlivesReferent(ref_type, cause.span)
+ }
+
+ traits::ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id,
+ trait_item_def_id,
+ kind: _,
+ } => SubregionOrigin::CompareImplItemObligation {
+ span: cause.span,
+ impl_item_def_id,
+ trait_item_def_id,
+ },
+
+ traits::ObligationCauseCode::CheckAssociatedTypeBounds {
+ impl_item_def_id,
+ trait_item_def_id,
+ } => SubregionOrigin::CheckAssociatedTypeBounds {
+ impl_item_def_id,
+ trait_item_def_id,
+ parent: Box::new(default()),
+ },
+
+ _ => default(),
+ }
+ }
+}
+
+impl RegionVariableOrigin {
+ pub fn span(&self) -> Span {
+ match *self {
+ MiscVariable(a)
+ | PatternRegion(a)
+ | AddrOfRegion(a)
+ | Autoref(a)
+ | Coercion(a)
+ | EarlyBoundRegion(a, ..)
+ | LateBoundRegion(a, ..)
+ | UpvarRegion(_, a) => a,
+ Nll(..) => bug!("NLL variable used with `span`"),
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for RegionObligation<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "RegionObligation(sub_region={:?}, sup_type={:?})",
+ self.sub_region, self.sup_type
+ )
+ }
+}
+
+/// Replaces substs that reference param or infer variables with suitable
+/// placeholders. This function is meant to remove these param and infer
+/// substs when they're not actually needed to evaluate a constant.
+fn replace_param_and_infer_substs_with_placeholder<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+) -> SubstsRef<'tcx> {
+ tcx.mk_substs(substs.iter().enumerate().map(|(idx, arg)| {
+ match arg.unpack() {
+ GenericArgKind::Type(_)
+ if arg.has_param_types_or_consts() || arg.has_infer_types_or_consts() =>
+ {
+ tcx.mk_ty(ty::Placeholder(ty::PlaceholderType {
+ universe: ty::UniverseIndex::ROOT,
+ name: ty::BoundVar::from_usize(idx),
+ }))
+ .into()
+ }
+ GenericArgKind::Const(ct)
+ if ct.has_infer_types_or_consts() || ct.has_param_types_or_consts() =>
+ {
+ let ty = ct.ty();
+ // If the type references param or infer, replace that too...
+ if ty.has_param_types_or_consts() || ty.has_infer_types_or_consts() {
+ bug!("const `{ct}`'s type should not reference params or types");
+ }
+ tcx.mk_const(ty::ConstS {
+ ty,
+ kind: ty::ConstKind::Placeholder(ty::PlaceholderConst {
+ universe: ty::UniverseIndex::ROOT,
+ name: ty::BoundVar::from_usize(idx),
+ }),
+ })
+ .into()
+ }
+ _ => arg,
+ }
+ }))
+}
diff --git a/compiler/rustc_infer/src/infer/nll_relate/mod.rs b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
new file mode 100644
index 000000000..bab4f3e9e
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/nll_relate/mod.rs
@@ -0,0 +1,1080 @@
+//! This code is kind of an alternate way of doing subtyping,
+//! supertyping, and type equating, distinct from the `combine.rs`
+//! code but very similar in its effect and design. Eventually the two
+//! ought to be merged. This code is intended for use in NLL and chalk.
+//!
+//! Here are the key differences:
+//!
+//! - This code may choose to bypass some checks (e.g., the occurs check)
+//! in the case where we know that there are no unbound type inference
+//! variables. This is the case for NLL, because at NLL time types are fully
+//! inferred up-to regions.
+//! - This code uses "universes" to handle higher-ranked regions and
+//! not the leak-check. This is "more correct" than what rustc does
+//! and we are generally migrating in this direction, but NLL had to
+//! get there first.
+//!
+//! Also, this code assumes that there are no bound types at all, not even
+//! free ones. This is ok because:
+//! - we are not relating anything quantified over some type variable
+//! - we will have instantiated all the bound type vars already (the one
+//! thing we relate in chalk are basically domain goals and their
+//! constituents)
+
+use crate::infer::combine::ConstEquateRelation;
+use crate::infer::InferCtxt;
+use crate::infer::{ConstVarValue, ConstVariableValue};
+use crate::infer::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::relate::{self, Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use rustc_middle::ty::{self, InferConst, Ty, TyCtxt};
+use rustc_span::Span;
+use std::fmt::Debug;
+use std::ops::ControlFlow;
+
+#[derive(PartialEq)]
+pub enum NormalizationStrategy {
+ Lazy,
+ Eager,
+}
+
+pub struct TypeRelating<'me, 'tcx, D>
+where
+ D: TypeRelatingDelegate<'tcx>,
+{
+ infcx: &'me InferCtxt<'me, 'tcx>,
+
+ /// Callback to use when we deduce an outlives relationship.
+ delegate: D,
+
+ /// How are we relating `a` and `b`?
+ ///
+ /// - Covariant means `a <: b`.
+ /// - Contravariant means `b <: a`.
+ /// - Invariant means `a == b.
+ /// - Bivariant means that it doesn't matter.
+ ambient_variance: ty::Variance,
+
+ ambient_variance_info: ty::VarianceDiagInfo<'tcx>,
+
+ /// When we pass through a set of binders (e.g., when looking into
+ /// a `fn` type), we push a new bound region scope onto here. This
+ /// will contain the instantiated region for each region in those
+ /// binders. When we then encounter a `ReLateBound(d, br)`, we can
+ /// use the De Bruijn index `d` to find the right scope, and then
+ /// bound region name `br` to find the specific instantiation from
+ /// within that scope. See `replace_bound_region`.
+ ///
+ /// This field stores the instantiations for late-bound regions in
+ /// the `a` type.
+ a_scopes: Vec<BoundRegionScope<'tcx>>,
+
+ /// Same as `a_scopes`, but for the `b` type.
+ b_scopes: Vec<BoundRegionScope<'tcx>>,
+}
+
+pub trait TypeRelatingDelegate<'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx>;
+ fn span(&self) -> Span;
+
+ /// Push a constraint `sup: sub` -- this constraint must be
+ /// satisfied for the two types to be related. `sub` and `sup` may
+ /// be regions from the type or new variables created through the
+ /// delegate.
+ fn push_outlives(
+ &mut self,
+ sup: ty::Region<'tcx>,
+ sub: ty::Region<'tcx>,
+ info: ty::VarianceDiagInfo<'tcx>,
+ );
+
+ fn const_equate(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>);
+ fn register_opaque_type(
+ &mut self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ a_is_expected: bool,
+ ) -> Result<(), TypeError<'tcx>>;
+
+ /// Creates a new universe index. Used when instantiating placeholders.
+ fn create_next_universe(&mut self) -> ty::UniverseIndex;
+
+ /// Creates a new region variable representing a higher-ranked
+ /// region that is instantiated existentially. This creates an
+ /// inference variable, typically.
+ ///
+ /// So e.g., if you have `for<'a> fn(..) <: for<'b> fn(..)`, then
+ /// we will invoke this method to instantiate `'a` with an
+ /// inference variable (though `'b` would be instantiated first,
+ /// as a placeholder).
+ fn next_existential_region_var(&mut self, was_placeholder: bool) -> ty::Region<'tcx>;
+
+ /// Creates a new region variable representing a
+ /// higher-ranked region that is instantiated universally.
+ /// This creates a new region placeholder, typically.
+ ///
+ /// So e.g., if you have `for<'a> fn(..) <: for<'b> fn(..)`, then
+ /// we will invoke this method to instantiate `'b` with a
+ /// placeholder region.
+ fn next_placeholder_region(&mut self, placeholder: ty::PlaceholderRegion) -> ty::Region<'tcx>;
+
+ /// Creates a new existential region in the given universe. This
+ /// is used when handling subtyping and type variables -- if we
+ /// have that `?X <: Foo<'a>`, for example, we would instantiate
+ /// `?X` with a type like `Foo<'?0>` where `'?0` is a fresh
+ /// existential variable created by this function. We would then
+ /// relate `Foo<'?0>` with `Foo<'a>` (and probably add an outlives
+ /// relation stating that `'?0: 'a`).
+ fn generalize_existential(&mut self, universe: ty::UniverseIndex) -> ty::Region<'tcx>;
+
+ /// Define the normalization strategy to use, eager or lazy.
+ fn normalization() -> NormalizationStrategy;
+
+ /// Enables some optimizations if we do not expect inference variables
+ /// in the RHS of the relation.
+ fn forbid_inference_vars() -> bool;
+}
+
+#[derive(Clone, Debug, Default)]
+struct BoundRegionScope<'tcx> {
+ map: FxHashMap<ty::BoundRegion, ty::Region<'tcx>>,
+}
+
+#[derive(Copy, Clone)]
+struct UniversallyQuantified(bool);
+
+impl<'me, 'tcx, D> TypeRelating<'me, 'tcx, D>
+where
+ D: TypeRelatingDelegate<'tcx>,
+{
+ pub fn new(
+ infcx: &'me InferCtxt<'me, 'tcx>,
+ delegate: D,
+ ambient_variance: ty::Variance,
+ ) -> Self {
+ Self {
+ infcx,
+ delegate,
+ ambient_variance,
+ ambient_variance_info: ty::VarianceDiagInfo::default(),
+ a_scopes: vec![],
+ b_scopes: vec![],
+ }
+ }
+
+ fn ambient_covariance(&self) -> bool {
+ match self.ambient_variance {
+ ty::Variance::Covariant | ty::Variance::Invariant => true,
+ ty::Variance::Contravariant | ty::Variance::Bivariant => false,
+ }
+ }
+
+ fn ambient_contravariance(&self) -> bool {
+ match self.ambient_variance {
+ ty::Variance::Contravariant | ty::Variance::Invariant => true,
+ ty::Variance::Covariant | ty::Variance::Bivariant => false,
+ }
+ }
+
+ fn create_scope(
+ &mut self,
+ value: ty::Binder<'tcx, impl Relate<'tcx>>,
+ universally_quantified: UniversallyQuantified,
+ ) -> BoundRegionScope<'tcx> {
+ let mut scope = BoundRegionScope::default();
+
+ // Create a callback that creates (via the delegate) either an
+ // existential or placeholder region as needed.
+ let mut next_region = {
+ let delegate = &mut self.delegate;
+ let mut lazy_universe = None;
+ move |br: ty::BoundRegion| {
+ if universally_quantified.0 {
+ // The first time this closure is called, create a
+ // new universe for the placeholders we will make
+ // from here out.
+ let universe = lazy_universe.unwrap_or_else(|| {
+ let universe = delegate.create_next_universe();
+ lazy_universe = Some(universe);
+ universe
+ });
+
+ let placeholder = ty::PlaceholderRegion { universe, name: br.kind };
+ delegate.next_placeholder_region(placeholder)
+ } else {
+ delegate.next_existential_region_var(true)
+ }
+ }
+ };
+
+ value.skip_binder().visit_with(&mut ScopeInstantiator {
+ next_region: &mut next_region,
+ target_index: ty::INNERMOST,
+ bound_region_scope: &mut scope,
+ });
+
+ scope
+ }
+
+ /// When we encounter binders during the type traversal, we record
+ /// the value to substitute for each of the things contained in
+ /// that binder. (This will be either a universal placeholder or
+ /// an existential inference variable.) Given the De Bruijn index
+ /// `debruijn` (and name `br`) of some binder we have now
+ /// encountered, this routine finds the value that we instantiated
+ /// the region with; to do so, it indexes backwards into the list
+ /// of ambient scopes `scopes`.
+ fn lookup_bound_region(
+ debruijn: ty::DebruijnIndex,
+ br: &ty::BoundRegion,
+ first_free_index: ty::DebruijnIndex,
+ scopes: &[BoundRegionScope<'tcx>],
+ ) -> ty::Region<'tcx> {
+ // The debruijn index is a "reverse index" into the
+ // scopes listing. So when we have INNERMOST (0), we
+ // want the *last* scope pushed, and so forth.
+ let debruijn_index = debruijn.index() - first_free_index.index();
+ let scope = &scopes[scopes.len() - debruijn_index - 1];
+
+ // Find this bound region in that scope to map to a
+ // particular region.
+ scope.map[br]
+ }
+
+ /// If `r` is a bound region, find the scope in which it is bound
+ /// (from `scopes`) and return the value that we instantiated it
+ /// with. Otherwise just return `r`.
+ fn replace_bound_region(
+ &self,
+ r: ty::Region<'tcx>,
+ first_free_index: ty::DebruijnIndex,
+ scopes: &[BoundRegionScope<'tcx>],
+ ) -> ty::Region<'tcx> {
+ debug!("replace_bound_regions(scopes={:?})", scopes);
+ if let ty::ReLateBound(debruijn, br) = *r {
+ Self::lookup_bound_region(debruijn, &br, first_free_index, scopes)
+ } else {
+ r
+ }
+ }
+
+ /// Push a new outlives requirement into our output set of
+ /// constraints.
+ fn push_outlives(
+ &mut self,
+ sup: ty::Region<'tcx>,
+ sub: ty::Region<'tcx>,
+ info: ty::VarianceDiagInfo<'tcx>,
+ ) {
+ debug!("push_outlives({:?}: {:?})", sup, sub);
+
+ self.delegate.push_outlives(sup, sub, info);
+ }
+
+ /// Relate a projection type and some value type lazily. This will always
+ /// succeed, but we push an additional `ProjectionEq` goal depending
+ /// on the value type:
+ /// - if the value type is any type `T` which is not a projection, we push
+ /// `ProjectionEq(projection = T)`.
+ /// - if the value type is another projection `other_projection`, we create
+ /// a new inference variable `?U` and push the two goals
+ /// `ProjectionEq(projection = ?U)`, `ProjectionEq(other_projection = ?U)`.
+ fn relate_projection_ty(
+ &mut self,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ value_ty: Ty<'tcx>,
+ ) -> Ty<'tcx> {
+ use rustc_span::DUMMY_SP;
+
+ match *value_ty.kind() {
+ ty::Projection(other_projection_ty) => {
+ let var = self.infcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: DUMMY_SP,
+ });
+ // FIXME(lazy-normalization): This will always ICE, because the recursive
+ // call will end up in the _ arm below.
+ self.relate_projection_ty(projection_ty, var);
+ self.relate_projection_ty(other_projection_ty, var);
+ var
+ }
+
+ _ => bug!("should never be invoked with eager normalization"),
+ }
+ }
+
+ /// Relate a type inference variable with a value type. This works
+ /// by creating a "generalization" G of the value where all the
+ /// lifetimes are replaced with fresh inference values. This
+ /// generalization G becomes the value of the inference variable,
+ /// and is then related in turn to the value. So e.g. if you had
+ /// `vid = ?0` and `value = &'a u32`, we might first instantiate
+ /// `?0` to a type like `&'0 u32` where `'0` is a fresh variable,
+ /// and then relate `&'0 u32` with `&'a u32` (resulting in
+ /// relations between `'0` and `'a`).
+ ///
+ /// The variable `pair` can be either a `(vid, ty)` or `(ty, vid)`
+ /// -- in other words, it is always an (unresolved) inference
+ /// variable `vid` and a type `ty` that are being related, but the
+ /// vid may appear either as the "a" type or the "b" type,
+ /// depending on where it appears in the tuple. The trait
+ /// `VidValuePair` lets us work with the vid/type while preserving
+ /// the "sidedness" when necessary -- the sidedness is relevant in
+ /// particular for the variance and set of in-scope things.
+ fn relate_ty_var<PAIR: VidValuePair<'tcx>>(
+ &mut self,
+ pair: PAIR,
+ ) -> RelateResult<'tcx, Ty<'tcx>> {
+ debug!("relate_ty_var({:?})", pair);
+
+ let vid = pair.vid();
+ let value_ty = pair.value_ty();
+
+ // FIXME(invariance) -- this logic assumes invariance, but that is wrong.
+ // This only presently applies to chalk integration, as NLL
+ // doesn't permit type variables to appear on both sides (and
+ // doesn't use lazy norm).
+ match *value_ty.kind() {
+ ty::Infer(ty::TyVar(value_vid)) => {
+ // Two type variables: just equate them.
+ self.infcx.inner.borrow_mut().type_variables().equate(vid, value_vid);
+ return Ok(value_ty);
+ }
+
+ ty::Projection(projection_ty) if D::normalization() == NormalizationStrategy::Lazy => {
+ return Ok(self.relate_projection_ty(projection_ty, self.infcx.tcx.mk_ty_var(vid)));
+ }
+
+ _ => (),
+ }
+
+ let generalized_ty = self.generalize_value(value_ty, vid)?;
+ debug!("relate_ty_var: generalized_ty = {:?}", generalized_ty);
+
+ if D::forbid_inference_vars() {
+ // In NLL, we don't have type inference variables
+ // floating around, so we can do this rather imprecise
+ // variant of the occurs-check.
+ assert!(!generalized_ty.has_infer_types_or_consts());
+ }
+
+ self.infcx.inner.borrow_mut().type_variables().instantiate(vid, generalized_ty);
+
+ // The generalized values we extract from `canonical_var_values` have
+ // been fully instantiated and hence the set of scopes we have
+ // doesn't matter -- just to be sure, put an empty vector
+ // in there.
+ let old_a_scopes = std::mem::take(pair.vid_scopes(self));
+
+ // Relate the generalized kind to the original one.
+ let result = pair.relate_generalized_ty(self, generalized_ty);
+
+ // Restore the old scopes now.
+ *pair.vid_scopes(self) = old_a_scopes;
+
+ debug!("relate_ty_var: complete, result = {:?}", result);
+ result
+ }
+
+ fn generalize_value<T: Relate<'tcx>>(
+ &mut self,
+ value: T,
+ for_vid: ty::TyVid,
+ ) -> RelateResult<'tcx, T> {
+ let universe = self.infcx.probe_ty_var(for_vid).unwrap_err();
+
+ let mut generalizer = TypeGeneralizer {
+ infcx: self.infcx,
+ delegate: &mut self.delegate,
+ first_free_index: ty::INNERMOST,
+ ambient_variance: self.ambient_variance,
+ for_vid_sub_root: self.infcx.inner.borrow_mut().type_variables().sub_root_var(for_vid),
+ universe,
+ };
+
+ generalizer.relate(value, value)
+ }
+}
+
+/// When we instantiate an inference variable with a value in
+/// `relate_ty_var`, we always have the pair of a `TyVid` and a `Ty`,
+/// but the ordering may vary (depending on whether the inference
+/// variable was found on the `a` or `b` sides). Therefore, this trait
+/// allows us to factor out common code, while preserving the order
+/// when needed.
+trait VidValuePair<'tcx>: Debug {
+ /// Extract the inference variable (which could be either the
+ /// first or second part of the tuple).
+ fn vid(&self) -> ty::TyVid;
+
+ /// Extract the value it is being related to (which will be the
+ /// opposite part of the tuple from the vid).
+ fn value_ty(&self) -> Ty<'tcx>;
+
+ /// Extract the scopes that apply to whichever side of the tuple
+ /// the vid was found on. See the comment where this is called
+ /// for more details on why we want them.
+ fn vid_scopes<'r, D: TypeRelatingDelegate<'tcx>>(
+ &self,
+ relate: &'r mut TypeRelating<'_, 'tcx, D>,
+ ) -> &'r mut Vec<BoundRegionScope<'tcx>>;
+
+ /// Given a generalized type G that should replace the vid, relate
+ /// G to the value, putting G on whichever side the vid would have
+ /// appeared.
+ fn relate_generalized_ty<D>(
+ &self,
+ relate: &mut TypeRelating<'_, 'tcx, D>,
+ generalized_ty: Ty<'tcx>,
+ ) -> RelateResult<'tcx, Ty<'tcx>>
+ where
+ D: TypeRelatingDelegate<'tcx>;
+}
+
+impl<'tcx> VidValuePair<'tcx> for (ty::TyVid, Ty<'tcx>) {
+ fn vid(&self) -> ty::TyVid {
+ self.0
+ }
+
+ fn value_ty(&self) -> Ty<'tcx> {
+ self.1
+ }
+
+ fn vid_scopes<'r, D>(
+ &self,
+ relate: &'r mut TypeRelating<'_, 'tcx, D>,
+ ) -> &'r mut Vec<BoundRegionScope<'tcx>>
+ where
+ D: TypeRelatingDelegate<'tcx>,
+ {
+ &mut relate.a_scopes
+ }
+
+ fn relate_generalized_ty<D>(
+ &self,
+ relate: &mut TypeRelating<'_, 'tcx, D>,
+ generalized_ty: Ty<'tcx>,
+ ) -> RelateResult<'tcx, Ty<'tcx>>
+ where
+ D: TypeRelatingDelegate<'tcx>,
+ {
+ relate.relate(generalized_ty, self.value_ty())
+ }
+}
+
+// In this case, the "vid" is the "b" type.
+impl<'tcx> VidValuePair<'tcx> for (Ty<'tcx>, ty::TyVid) {
+ fn vid(&self) -> ty::TyVid {
+ self.1
+ }
+
+ fn value_ty(&self) -> Ty<'tcx> {
+ self.0
+ }
+
+ fn vid_scopes<'r, D>(
+ &self,
+ relate: &'r mut TypeRelating<'_, 'tcx, D>,
+ ) -> &'r mut Vec<BoundRegionScope<'tcx>>
+ where
+ D: TypeRelatingDelegate<'tcx>,
+ {
+ &mut relate.b_scopes
+ }
+
+ fn relate_generalized_ty<D>(
+ &self,
+ relate: &mut TypeRelating<'_, 'tcx, D>,
+ generalized_ty: Ty<'tcx>,
+ ) -> RelateResult<'tcx, Ty<'tcx>>
+ where
+ D: TypeRelatingDelegate<'tcx>,
+ {
+ relate.relate(self.value_ty(), generalized_ty)
+ }
+}
+
+impl<'tcx, D> TypeRelation<'tcx> for TypeRelating<'_, 'tcx, D>
+where
+ D: TypeRelatingDelegate<'tcx>,
+{
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.delegate.param_env()
+ }
+
+ fn tag(&self) -> &'static str {
+ "nll::subtype"
+ }
+
+ fn a_is_expected(&self) -> bool {
+ true
+ }
+
+ #[instrument(skip(self, info), level = "trace")]
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ variance: ty::Variance,
+ info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ let old_ambient_variance = self.ambient_variance;
+ self.ambient_variance = self.ambient_variance.xform(variance);
+ self.ambient_variance_info = self.ambient_variance_info.xform(info);
+
+ debug!(?self.ambient_variance);
+
+ let r = self.relate(a, b)?;
+
+ self.ambient_variance = old_ambient_variance;
+
+ debug!(?r);
+
+ Ok(r)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn tys(&mut self, a: Ty<'tcx>, mut b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ let infcx = self.infcx;
+
+ let a = self.infcx.shallow_resolve(a);
+
+ if !D::forbid_inference_vars() {
+ b = self.infcx.shallow_resolve(b);
+ }
+
+ if a == b {
+ // Subtle: if a or b has a bound variable that we are lazily
+ // substituting, then even if a == b, it could be that the values we
+ // will substitute for those bound variables are *not* the same, and
+ // hence returning `Ok(a)` is incorrect.
+ if !a.has_escaping_bound_vars() && !b.has_escaping_bound_vars() {
+ return Ok(a);
+ }
+ }
+
+ match (a.kind(), b.kind()) {
+ (_, &ty::Infer(ty::TyVar(vid))) => {
+ if D::forbid_inference_vars() {
+ // Forbid inference variables in the RHS.
+ bug!("unexpected inference var {:?}", b)
+ } else {
+ self.relate_ty_var((a, vid))
+ }
+ }
+
+ (&ty::Infer(ty::TyVar(vid)), _) => self.relate_ty_var((vid, b)),
+
+ (&ty::Opaque(a_def_id, _), &ty::Opaque(b_def_id, _)) if a_def_id == b_def_id => {
+ self.infcx.super_combine_tys(self, a, b)
+ }
+ (&ty::Opaque(did, ..), _) | (_, &ty::Opaque(did, ..)) if did.is_local() => {
+ let (a, b) = if self.a_is_expected() { (a, b) } else { (b, a) };
+ let mut generalize = |ty, ty_is_expected| {
+ let var = infcx.next_ty_var_id_in_universe(
+ TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.delegate.span(),
+ },
+ ty::UniverseIndex::ROOT,
+ );
+ if ty_is_expected {
+ self.relate_ty_var((ty, var))
+ } else {
+ self.relate_ty_var((var, ty))
+ }
+ };
+ let (a, b) = match (a.kind(), b.kind()) {
+ (&ty::Opaque(..), _) => (a, generalize(b, false)?),
+ (_, &ty::Opaque(..)) => (generalize(a, true)?, b),
+ _ => unreachable!(),
+ };
+ self.delegate.register_opaque_type(a, b, true)?;
+ trace!(a = ?a.kind(), b = ?b.kind(), "opaque type instantiated");
+ Ok(a)
+ }
+
+ (&ty::Projection(projection_ty), _)
+ if D::normalization() == NormalizationStrategy::Lazy =>
+ {
+ Ok(self.relate_projection_ty(projection_ty, b))
+ }
+
+ (_, &ty::Projection(projection_ty))
+ if D::normalization() == NormalizationStrategy::Lazy =>
+ {
+ Ok(self.relate_projection_ty(projection_ty, a))
+ }
+
+ _ => {
+ debug!(?a, ?b, ?self.ambient_variance);
+
+ // Will also handle unification of `IntVar` and `FloatVar`.
+ self.infcx.super_combine_tys(self, a, b)
+ }
+ }
+ }
+
+ #[instrument(skip(self), level = "trace")]
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!(?self.ambient_variance);
+
+ let v_a = self.replace_bound_region(a, ty::INNERMOST, &self.a_scopes);
+ let v_b = self.replace_bound_region(b, ty::INNERMOST, &self.b_scopes);
+
+ debug!(?v_a);
+ debug!(?v_b);
+
+ if self.ambient_covariance() {
+ // Covariance: a <= b. Hence, `b: a`.
+ self.push_outlives(v_b, v_a, self.ambient_variance_info);
+ }
+
+ if self.ambient_contravariance() {
+ // Contravariant: b <= a. Hence, `a: b`.
+ self.push_outlives(v_a, v_b, self.ambient_variance_info);
+ }
+
+ Ok(a)
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ mut b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ let a = self.infcx.shallow_resolve(a);
+
+ if !D::forbid_inference_vars() {
+ b = self.infcx.shallow_resolve(b);
+ }
+
+ match b.kind() {
+ ty::ConstKind::Infer(InferConst::Var(_)) if D::forbid_inference_vars() => {
+ // Forbid inference variables in the RHS.
+ self.infcx.tcx.sess.delay_span_bug(
+ self.delegate.span(),
+ format!("unexpected inference var {:?}", b,),
+ );
+ Ok(a)
+ }
+ // FIXME(invariance): see the related FIXME above.
+ _ => self.infcx.super_combine_consts(self, a, b),
+ }
+ }
+
+ #[instrument(skip(self), level = "trace")]
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ // We want that
+ //
+ // ```
+ // for<'a> fn(&'a u32) -> &'a u32 <:
+ // fn(&'b u32) -> &'b u32
+ // ```
+ //
+ // but not
+ //
+ // ```
+ // fn(&'a u32) -> &'a u32 <:
+ // for<'b> fn(&'b u32) -> &'b u32
+ // ```
+ //
+ // We therefore proceed as follows:
+ //
+ // - Instantiate binders on `b` universally, yielding a universe U1.
+ // - Instantiate binders on `a` existentially in U1.
+
+ debug!(?self.ambient_variance);
+
+ if let (Some(a), Some(b)) = (a.no_bound_vars(), b.no_bound_vars()) {
+ // Fast path for the common case.
+ self.relate(a, b)?;
+ return Ok(ty::Binder::dummy(a));
+ }
+
+ if self.ambient_covariance() {
+ // Covariance, so we want `for<..> A <: for<..> B` --
+ // therefore we compare any instantiation of A (i.e., A
+ // instantiated with existentials) against every
+ // instantiation of B (i.e., B instantiated with
+ // universals).
+
+ let b_scope = self.create_scope(b, UniversallyQuantified(true));
+ let a_scope = self.create_scope(a, UniversallyQuantified(false));
+
+ debug!(?a_scope, "(existential)");
+ debug!(?b_scope, "(universal)");
+
+ self.b_scopes.push(b_scope);
+ self.a_scopes.push(a_scope);
+
+ // Reset the ambient variance to covariant. This is needed
+ // to correctly handle cases like
+ //
+ // for<'a> fn(&'a u32, &'a u32) == for<'b, 'c> fn(&'b u32, &'c u32)
+ //
+ // Somewhat surprisingly, these two types are actually
+ // **equal**, even though the one on the right looks more
+ // polymorphic. The reason is due to subtyping. To see it,
+ // consider that each function can call the other:
+ //
+ // - The left function can call the right with `'b` and
+ // `'c` both equal to `'a`
+ //
+ // - The right function can call the left with `'a` set to
+ // `{P}`, where P is the point in the CFG where the call
+ // itself occurs. Note that `'b` and `'c` must both
+ // include P. At the point, the call works because of
+ // subtyping (i.e., `&'b u32 <: &{P} u32`).
+ let variance = std::mem::replace(&mut self.ambient_variance, ty::Variance::Covariant);
+
+ self.relate(a.skip_binder(), b.skip_binder())?;
+
+ self.ambient_variance = variance;
+
+ self.b_scopes.pop().unwrap();
+ self.a_scopes.pop().unwrap();
+ }
+
+ if self.ambient_contravariance() {
+ // Contravariance, so we want `for<..> A :> for<..> B`
+ // -- therefore we compare every instantiation of A (i.e.,
+ // A instantiated with universals) against any
+ // instantiation of B (i.e., B instantiated with
+ // existentials). Opposite of above.
+
+ let a_scope = self.create_scope(a, UniversallyQuantified(true));
+ let b_scope = self.create_scope(b, UniversallyQuantified(false));
+
+ debug!(?a_scope, "(universal)");
+ debug!(?b_scope, "(existential)");
+
+ self.a_scopes.push(a_scope);
+ self.b_scopes.push(b_scope);
+
+ // Reset ambient variance to contravariance. See the
+ // covariant case above for an explanation.
+ let variance =
+ std::mem::replace(&mut self.ambient_variance, ty::Variance::Contravariant);
+
+ self.relate(a.skip_binder(), b.skip_binder())?;
+
+ self.ambient_variance = variance;
+
+ self.b_scopes.pop().unwrap();
+ self.a_scopes.pop().unwrap();
+ }
+
+ Ok(a)
+ }
+}
+
+impl<'tcx, D> ConstEquateRelation<'tcx> for TypeRelating<'_, 'tcx, D>
+where
+ D: TypeRelatingDelegate<'tcx>,
+{
+ fn const_equate_obligation(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>) {
+ self.delegate.const_equate(a, b);
+ }
+}
+
+/// When we encounter a binder like `for<..> fn(..)`, we actually have
+/// to walk the `fn` value to find all the values bound by the `for`
+/// (these are not explicitly present in the ty representation right
+/// now). This visitor handles that: it descends the type, tracking
+/// binder depth, and finds late-bound regions targeting the
+/// `for<..`>. For each of those, it creates an entry in
+/// `bound_region_scope`.
+struct ScopeInstantiator<'me, 'tcx> {
+ next_region: &'me mut dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ // The debruijn index of the scope we are instantiating.
+ target_index: ty::DebruijnIndex,
+ bound_region_scope: &'me mut BoundRegionScope<'tcx>,
+}
+
+impl<'me, 'tcx> TypeVisitor<'tcx> for ScopeInstantiator<'me, 'tcx> {
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &ty::Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.target_index.shift_in(1);
+ t.super_visit_with(self);
+ self.target_index.shift_out(1);
+
+ ControlFlow::CONTINUE
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let ScopeInstantiator { bound_region_scope, next_region, .. } = self;
+
+ match *r {
+ ty::ReLateBound(debruijn, br) if debruijn == self.target_index => {
+ bound_region_scope.map.entry(br).or_insert_with(|| next_region(br));
+ }
+
+ _ => {}
+ }
+
+ ControlFlow::CONTINUE
+ }
+}
+
+/// The "type generalizer" is used when handling inference variables.
+///
+/// The basic strategy for handling a constraint like `?A <: B` is to
+/// apply a "generalization strategy" to the type `B` -- this replaces
+/// all the lifetimes in the type `B` with fresh inference
+/// variables. (You can read more about the strategy in this [blog
+/// post].)
+///
+/// As an example, if we had `?A <: &'x u32`, we would generalize `&'x
+/// u32` to `&'0 u32` where `'0` is a fresh variable. This becomes the
+/// value of `A`. Finally, we relate `&'0 u32 <: &'x u32`, which
+/// establishes `'0: 'x` as a constraint.
+///
+/// As a side-effect of this generalization procedure, we also replace
+/// all the bound regions that we have traversed with concrete values,
+/// so that the resulting generalized type is independent from the
+/// scopes.
+///
+/// [blog post]: https://is.gd/0hKvIr
+struct TypeGeneralizer<'me, 'tcx, D>
+where
+ D: TypeRelatingDelegate<'tcx>,
+{
+ infcx: &'me InferCtxt<'me, 'tcx>,
+
+ delegate: &'me mut D,
+
+ /// After we generalize this type, we are going to relate it to
+ /// some other type. What will be the variance at this point?
+ ambient_variance: ty::Variance,
+
+ first_free_index: ty::DebruijnIndex,
+
+ /// The vid of the type variable that is in the process of being
+ /// instantiated. If we find this within the value we are folding,
+ /// that means we would have created a cyclic value.
+ for_vid_sub_root: ty::TyVid,
+
+ /// The universe of the type variable that is in the process of being
+ /// instantiated. If we find anything that this universe cannot name,
+ /// we reject the relation.
+ universe: ty::UniverseIndex,
+}
+
+impl<'tcx, D> TypeRelation<'tcx> for TypeGeneralizer<'_, 'tcx, D>
+where
+ D: TypeRelatingDelegate<'tcx>,
+{
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.delegate.param_env()
+ }
+
+ fn tag(&self) -> &'static str {
+ "nll::generalizer"
+ }
+
+ fn a_is_expected(&self) -> bool {
+ true
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ variance: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ debug!(
+ "TypeGeneralizer::relate_with_variance(variance={:?}, a={:?}, b={:?})",
+ variance, a, b
+ );
+
+ let old_ambient_variance = self.ambient_variance;
+ self.ambient_variance = self.ambient_variance.xform(variance);
+
+ debug!(
+ "TypeGeneralizer::relate_with_variance: ambient_variance = {:?}",
+ self.ambient_variance
+ );
+
+ let r = self.relate(a, b)?;
+
+ self.ambient_variance = old_ambient_variance;
+
+ debug!("TypeGeneralizer::relate_with_variance: r={:?}", r);
+
+ Ok(r)
+ }
+
+ fn tys(&mut self, a: Ty<'tcx>, _: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ use crate::infer::type_variable::TypeVariableValue;
+
+ debug!("TypeGeneralizer::tys(a={:?})", a);
+
+ match *a.kind() {
+ ty::Infer(ty::TyVar(_)) | ty::Infer(ty::IntVar(_)) | ty::Infer(ty::FloatVar(_))
+ if D::forbid_inference_vars() =>
+ {
+ bug!("unexpected inference variable encountered in NLL generalization: {:?}", a);
+ }
+
+ ty::Infer(ty::TyVar(vid)) => {
+ let mut inner = self.infcx.inner.borrow_mut();
+ let variables = &mut inner.type_variables();
+ let vid = variables.root_var(vid);
+ let sub_vid = variables.sub_root_var(vid);
+ if sub_vid == self.for_vid_sub_root {
+ // If sub-roots are equal, then `for_vid` and
+ // `vid` are related via subtyping.
+ debug!("TypeGeneralizer::tys: occurs check failed");
+ Err(TypeError::Mismatch)
+ } else {
+ match variables.probe(vid) {
+ TypeVariableValue::Known { value: u } => {
+ drop(variables);
+ self.relate(u, u)
+ }
+ TypeVariableValue::Unknown { universe: _universe } => {
+ if self.ambient_variance == ty::Bivariant {
+ // FIXME: we may need a WF predicate (related to #54105).
+ }
+
+ let origin = *variables.var_origin(vid);
+
+ // Replacing with a new variable in the universe `self.universe`,
+ // it will be unified later with the original type variable in
+ // the universe `_universe`.
+ let new_var_id = variables.new_var(self.universe, origin);
+
+ let u = self.tcx().mk_ty_var(new_var_id);
+ debug!("generalize: replacing original vid={:?} with new={:?}", vid, u);
+ Ok(u)
+ }
+ }
+ }
+ }
+
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_)) => {
+ // No matter what mode we are in,
+ // integer/floating-point types must be equal to be
+ // relatable.
+ Ok(a)
+ }
+
+ ty::Placeholder(placeholder) => {
+ if self.universe.cannot_name(placeholder.universe) {
+ debug!(
+ "TypeGeneralizer::tys: root universe {:?} cannot name\
+ placeholder in universe {:?}",
+ self.universe, placeholder.universe
+ );
+ Err(TypeError::Mismatch)
+ } else {
+ Ok(a)
+ }
+ }
+
+ _ => relate::super_relate_tys(self, a, a),
+ }
+ }
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ _: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!("TypeGeneralizer::regions(a={:?})", a);
+
+ if let ty::ReLateBound(debruijn, _) = *a && debruijn < self.first_free_index {
+ return Ok(a);
+ }
+
+ // For now, we just always create a fresh region variable to
+ // replace all the regions in the source type. In the main
+ // type checker, we special case the case where the ambient
+ // variance is `Invariant` and try to avoid creating a fresh
+ // region variable, but since this comes up so much less in
+ // NLL (only when users use `_` etc) it is much less
+ // important.
+ //
+ // As an aside, since these new variables are created in
+ // `self.universe` universe, this also serves to enforce the
+ // universe scoping rules.
+ //
+ // FIXME(#54105) -- if the ambient variance is bivariant,
+ // though, we may however need to check well-formedness or
+ // risk a problem like #41677 again.
+
+ let replacement_region_vid = self.delegate.generalize_existential(self.universe);
+
+ Ok(replacement_region_vid)
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ _: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ match a.kind() {
+ ty::ConstKind::Infer(InferConst::Var(_)) if D::forbid_inference_vars() => {
+ bug!("unexpected inference variable encountered in NLL generalization: {:?}", a);
+ }
+ ty::ConstKind::Infer(InferConst::Var(vid)) => {
+ let mut inner = self.infcx.inner.borrow_mut();
+ let variable_table = &mut inner.const_unification_table();
+ let var_value = variable_table.probe_value(vid);
+ match var_value.val.known() {
+ Some(u) => self.relate(u, u),
+ None => {
+ let new_var_id = variable_table.new_key(ConstVarValue {
+ origin: var_value.origin,
+ val: ConstVariableValue::Unknown { universe: self.universe },
+ });
+ Ok(self.tcx().mk_const_var(new_var_id, a.ty()))
+ }
+ }
+ }
+ ty::ConstKind::Unevaluated(..) if self.tcx().lazy_normalization() => Ok(a),
+ _ => relate::super_relate_consts(self, a, a),
+ }
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ _: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ debug!("TypeGeneralizer::binders(a={:?})", a);
+
+ self.first_free_index.shift_in(1);
+ let result = self.relate(a.skip_binder(), a.skip_binder())?;
+ self.first_free_index.shift_out(1);
+ Ok(a.rebind(result))
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/opaque_types.rs b/compiler/rustc_infer/src/infer/opaque_types.rs
new file mode 100644
index 000000000..e579afbf3
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/opaque_types.rs
@@ -0,0 +1,649 @@
+use crate::infer::{DefiningAnchor, InferCtxt, InferOk};
+use crate::traits;
+use hir::def_id::{DefId, LocalDefId};
+use hir::{HirId, OpaqueTyOrigin};
+use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::vec_map::VecMap;
+use rustc_hir as hir;
+use rustc_middle::traits::ObligationCause;
+use rustc_middle::ty::fold::BottomUpFolder;
+use rustc_middle::ty::subst::{GenericArgKind, Subst};
+use rustc_middle::ty::{
+ self, OpaqueHiddenType, OpaqueTypeKey, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable,
+ TypeVisitable, TypeVisitor,
+};
+use rustc_span::Span;
+
+use std::ops::ControlFlow;
+
+pub type OpaqueTypeMap<'tcx> = VecMap<OpaqueTypeKey<'tcx>, OpaqueTypeDecl<'tcx>>;
+
+mod table;
+
+pub use table::{OpaqueTypeStorage, OpaqueTypeTable};
+
+use super::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use super::InferResult;
+
+/// Information about the opaque types whose values we
+/// are inferring in this function (these are the `impl Trait` that
+/// appear in the return type).
+#[derive(Clone, Debug)]
+pub struct OpaqueTypeDecl<'tcx> {
+ /// The hidden types that have been inferred for this opaque type.
+ /// There can be multiple, but they are all `lub`ed together at the end
+ /// to obtain the canonical hidden type.
+ pub hidden_type: OpaqueHiddenType<'tcx>,
+
+ /// The origin of the opaque type.
+ pub origin: hir::OpaqueTyOrigin,
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ /// This is a backwards compatibility hack to prevent breaking changes from
+ /// lazy TAIT around RPIT handling.
+ pub fn replace_opaque_types_with_inference_vars<T: TypeFoldable<'tcx>>(
+ &self,
+ value: T,
+ body_id: HirId,
+ span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> InferOk<'tcx, T> {
+ if !value.has_opaque_types() {
+ return InferOk { value, obligations: vec![] };
+ }
+ let mut obligations = vec![];
+ let replace_opaque_type = |def_id: DefId| {
+ def_id
+ .as_local()
+ .map_or(false, |def_id| self.opaque_type_origin(def_id, span).is_some())
+ };
+ let value = value.fold_with(&mut ty::fold::BottomUpFolder {
+ tcx: self.tcx,
+ lt_op: |lt| lt,
+ ct_op: |ct| ct,
+ ty_op: |ty| match *ty.kind() {
+ ty::Opaque(def_id, _substs) if replace_opaque_type(def_id) => {
+ let def_span = self.tcx.def_span(def_id);
+ let span = if span.contains(def_span) { def_span } else { span };
+ let code = traits::ObligationCauseCode::OpaqueReturnType(None);
+ let cause = ObligationCause::new(span, body_id, code);
+ // FIXME(compiler-errors): We probably should add a new TypeVariableOriginKind
+ // for opaque types, and then use that kind to fix the spans for type errors
+ // that we see later on.
+ let ty_var = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ });
+ obligations.extend(
+ self.handle_opaque_type(ty, ty_var, true, &cause, param_env)
+ .unwrap()
+ .obligations,
+ );
+ ty_var
+ }
+ _ => ty,
+ },
+ });
+ InferOk { value, obligations }
+ }
+
+ pub fn handle_opaque_type(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ a_is_expected: bool,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> InferResult<'tcx, ()> {
+ if a.references_error() || b.references_error() {
+ return Ok(InferOk { value: (), obligations: vec![] });
+ }
+ let (a, b) = if a_is_expected { (a, b) } else { (b, a) };
+ let process = |a: Ty<'tcx>, b: Ty<'tcx>| match *a.kind() {
+ ty::Opaque(def_id, substs) if def_id.is_local() => {
+ let def_id = def_id.expect_local();
+ let origin = match self.defining_use_anchor {
+ DefiningAnchor::Bind(_) => {
+ // Check that this is `impl Trait` type is
+ // declared by `parent_def_id` -- i.e., one whose
+ // value we are inferring. At present, this is
+ // always true during the first phase of
+ // type-check, but not always true later on during
+ // NLL. Once we support named opaque types more fully,
+ // this same scenario will be able to arise during all phases.
+ //
+ // Here is an example using type alias `impl Trait`
+ // that indicates the distinction we are checking for:
+ //
+ // ```rust
+ // mod a {
+ // pub type Foo = impl Iterator;
+ // pub fn make_foo() -> Foo { .. }
+ // }
+ //
+ // mod b {
+ // fn foo() -> a::Foo { a::make_foo() }
+ // }
+ // ```
+ //
+ // Here, the return type of `foo` references an
+ // `Opaque` indeed, but not one whose value is
+ // presently being inferred. You can get into a
+ // similar situation with closure return types
+ // today:
+ //
+ // ```rust
+ // fn foo() -> impl Iterator { .. }
+ // fn bar() {
+ // let x = || foo(); // returns the Opaque assoc with `foo`
+ // }
+ // ```
+ self.opaque_type_origin(def_id, cause.span)?
+ }
+ DefiningAnchor::Bubble => self.opaque_ty_origin_unchecked(def_id, cause.span),
+ DefiningAnchor::Error => return None,
+ };
+ if let ty::Opaque(did2, _) = *b.kind() {
+ // We could accept this, but there are various ways to handle this situation, and we don't
+ // want to make a decision on it right now. Likely this case is so super rare anyway, that
+ // no one encounters it in practice.
+ // It does occur however in `fn fut() -> impl Future<Output = i32> { async { 42 } }`,
+ // where it is of no concern, so we only check for TAITs.
+ if let Some(OpaqueTyOrigin::TyAlias) =
+ did2.as_local().and_then(|did2| self.opaque_type_origin(did2, cause.span))
+ {
+ self.tcx
+ .sess
+ .struct_span_err(
+ cause.span,
+ "opaque type's hidden type cannot be another opaque type from the same scope",
+ )
+ .span_label(cause.span, "one of the two opaque types used here has to be outside its defining scope")
+ .span_note(
+ self.tcx.def_span(def_id),
+ "opaque type whose hidden type is being assigned",
+ )
+ .span_note(
+ self.tcx.def_span(did2),
+ "opaque type being used as hidden type",
+ )
+ .emit();
+ }
+ }
+ Some(self.register_hidden_type(
+ OpaqueTypeKey { def_id, substs },
+ cause.clone(),
+ param_env,
+ b,
+ origin,
+ ))
+ }
+ _ => None,
+ };
+ if let Some(res) = process(a, b) {
+ res
+ } else if let Some(res) = process(b, a) {
+ res
+ } else {
+ // Rerun equality check, but this time error out due to
+ // different types.
+ match self.at(cause, param_env).define_opaque_types(false).eq(a, b) {
+ Ok(_) => span_bug!(
+ cause.span,
+ "opaque types are never equal to anything but themselves: {:#?}",
+ (a.kind(), b.kind())
+ ),
+ Err(e) => Err(e),
+ }
+ }
+ }
+
+ /// Given the map `opaque_types` containing the opaque
+ /// `impl Trait` types whose underlying, hidden types are being
+ /// inferred, this method adds constraints to the regions
+ /// appearing in those underlying hidden types to ensure that they
+ /// at least do not refer to random scopes within the current
+ /// function. These constraints are not (quite) sufficient to
+ /// guarantee that the regions are actually legal values; that
+ /// final condition is imposed after region inference is done.
+ ///
+ /// # The Problem
+ ///
+ /// Let's work through an example to explain how it works. Assume
+ /// the current function is as follows:
+ ///
+ /// ```text
+ /// fn foo<'a, 'b>(..) -> (impl Bar<'a>, impl Bar<'b>)
+ /// ```
+ ///
+ /// Here, we have two `impl Trait` types whose values are being
+ /// inferred (the `impl Bar<'a>` and the `impl
+ /// Bar<'b>`). Conceptually, this is sugar for a setup where we
+ /// define underlying opaque types (`Foo1`, `Foo2`) and then, in
+ /// the return type of `foo`, we *reference* those definitions:
+ ///
+ /// ```text
+ /// type Foo1<'x> = impl Bar<'x>;
+ /// type Foo2<'x> = impl Bar<'x>;
+ /// fn foo<'a, 'b>(..) -> (Foo1<'a>, Foo2<'b>) { .. }
+ /// // ^^^^ ^^
+ /// // | |
+ /// // | substs
+ /// // def_id
+ /// ```
+ ///
+ /// As indicating in the comments above, each of those references
+ /// is (in the compiler) basically a substitution (`substs`)
+ /// applied to the type of a suitable `def_id` (which identifies
+ /// `Foo1` or `Foo2`).
+ ///
+ /// Now, at this point in compilation, what we have done is to
+ /// replace each of the references (`Foo1<'a>`, `Foo2<'b>`) with
+ /// fresh inference variables C1 and C2. We wish to use the values
+ /// of these variables to infer the underlying types of `Foo1` and
+ /// `Foo2`. That is, this gives rise to higher-order (pattern) unification
+ /// constraints like:
+ ///
+ /// ```text
+ /// for<'a> (Foo1<'a> = C1)
+ /// for<'b> (Foo1<'b> = C2)
+ /// ```
+ ///
+ /// For these equation to be satisfiable, the types `C1` and `C2`
+ /// can only refer to a limited set of regions. For example, `C1`
+ /// can only refer to `'static` and `'a`, and `C2` can only refer
+ /// to `'static` and `'b`. The job of this function is to impose that
+ /// constraint.
+ ///
+ /// Up to this point, C1 and C2 are basically just random type
+ /// inference variables, and hence they may contain arbitrary
+ /// regions. In fact, it is fairly likely that they do! Consider
+ /// this possible definition of `foo`:
+ ///
+ /// ```text
+ /// fn foo<'a, 'b>(x: &'a i32, y: &'b i32) -> (impl Bar<'a>, impl Bar<'b>) {
+ /// (&*x, &*y)
+ /// }
+ /// ```
+ ///
+ /// Here, the values for the concrete types of the two impl
+ /// traits will include inference variables:
+ ///
+ /// ```text
+ /// &'0 i32
+ /// &'1 i32
+ /// ```
+ ///
+ /// Ordinarily, the subtyping rules would ensure that these are
+ /// sufficiently large. But since `impl Bar<'a>` isn't a specific
+ /// type per se, we don't get such constraints by default. This
+ /// is where this function comes into play. It adds extra
+ /// constraints to ensure that all the regions which appear in the
+ /// inferred type are regions that could validly appear.
+ ///
+ /// This is actually a bit of a tricky constraint in general. We
+ /// want to say that each variable (e.g., `'0`) can only take on
+ /// values that were supplied as arguments to the opaque type
+ /// (e.g., `'a` for `Foo1<'a>`) or `'static`, which is always in
+ /// scope. We don't have a constraint quite of this kind in the current
+ /// region checker.
+ ///
+ /// # The Solution
+ ///
+ /// We generally prefer to make `<=` constraints, since they
+ /// integrate best into the region solver. To do that, we find the
+ /// "minimum" of all the arguments that appear in the substs: that
+ /// is, some region which is less than all the others. In the case
+ /// of `Foo1<'a>`, that would be `'a` (it's the only choice, after
+ /// all). Then we apply that as a least bound to the variables
+ /// (e.g., `'a <= '0`).
+ ///
+ /// In some cases, there is no minimum. Consider this example:
+ ///
+ /// ```text
+ /// fn baz<'a, 'b>() -> impl Trait<'a, 'b> { ... }
+ /// ```
+ ///
+ /// Here we would report a more complex "in constraint", like `'r
+ /// in ['a, 'b, 'static]` (where `'r` is some region appearing in
+ /// the hidden type).
+ ///
+ /// # Constrain regions, not the hidden concrete type
+ ///
+ /// Note that generating constraints on each region `Rc` is *not*
+ /// the same as generating an outlives constraint on `Tc` itself.
+ /// For example, if we had a function like this:
+ ///
+ /// ```
+ /// # #![feature(type_alias_impl_trait)]
+ /// # fn main() {}
+ /// # trait Foo<'a> {}
+ /// # impl<'a, T> Foo<'a> for (&'a u32, T) {}
+ /// fn foo<'a, T>(x: &'a u32, y: T) -> impl Foo<'a> {
+ /// (x, y)
+ /// }
+ ///
+ /// // Equivalent to:
+ /// # mod dummy { use super::*;
+ /// type FooReturn<'a, T> = impl Foo<'a>;
+ /// fn foo<'a, T>(x: &'a u32, y: T) -> FooReturn<'a, T> {
+ /// (x, y)
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// then the hidden type `Tc` would be `(&'0 u32, T)` (where `'0`
+ /// is an inference variable). If we generated a constraint that
+ /// `Tc: 'a`, then this would incorrectly require that `T: 'a` --
+ /// but this is not necessary, because the opaque type we
+ /// create will be allowed to reference `T`. So we only generate a
+ /// constraint that `'0: 'a`.
+ #[instrument(level = "debug", skip(self))]
+ pub fn register_member_constraints(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ opaque_type_key: OpaqueTypeKey<'tcx>,
+ concrete_ty: Ty<'tcx>,
+ span: Span,
+ ) {
+ let def_id = opaque_type_key.def_id;
+
+ let tcx = self.tcx;
+
+ let concrete_ty = self.resolve_vars_if_possible(concrete_ty);
+
+ debug!(?concrete_ty);
+
+ let first_own_region = match self.opaque_ty_origin_unchecked(def_id, span) {
+ hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) => {
+ // We lower
+ //
+ // fn foo<'l0..'ln>() -> impl Trait<'l0..'lm>
+ //
+ // into
+ //
+ // type foo::<'p0..'pn>::Foo<'q0..'qm>
+ // fn foo<l0..'ln>() -> foo::<'static..'static>::Foo<'l0..'lm>.
+ //
+ // For these types we only iterate over `'l0..lm` below.
+ tcx.generics_of(def_id).parent_count
+ }
+ // These opaque type inherit all lifetime parameters from their
+ // parent, so we have to check them all.
+ hir::OpaqueTyOrigin::TyAlias => 0,
+ };
+
+ // For a case like `impl Foo<'a, 'b>`, we would generate a constraint
+ // `'r in ['a, 'b, 'static]` for each region `'r` that appears in the
+ // hidden type (i.e., it must be equal to `'a`, `'b`, or `'static`).
+ //
+ // `conflict1` and `conflict2` are the two region bounds that we
+ // detected which were unrelated. They are used for diagnostics.
+
+ // Create the set of choice regions: each region in the hidden
+ // type can be equal to any of the region parameters of the
+ // opaque type definition.
+ let choice_regions: Lrc<Vec<ty::Region<'tcx>>> = Lrc::new(
+ opaque_type_key.substs[first_own_region..]
+ .iter()
+ .filter_map(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(r) => Some(r),
+ GenericArgKind::Type(_) | GenericArgKind::Const(_) => None,
+ })
+ .chain(std::iter::once(self.tcx.lifetimes.re_static))
+ .collect(),
+ );
+
+ concrete_ty.visit_with(&mut ConstrainOpaqueTypeRegionVisitor {
+ op: |r| self.member_constraint(opaque_type_key, span, concrete_ty, r, &choice_regions),
+ });
+ }
+
+ #[instrument(skip(self), level = "trace")]
+ pub fn opaque_type_origin(&self, def_id: LocalDefId, span: Span) -> Option<OpaqueTyOrigin> {
+ let opaque_hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let parent_def_id = match self.defining_use_anchor {
+ DefiningAnchor::Bubble | DefiningAnchor::Error => return None,
+ DefiningAnchor::Bind(bind) => bind,
+ };
+ let item_kind = &self.tcx.hir().expect_item(def_id).kind;
+
+ let hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) = item_kind else {
+ span_bug!(
+ span,
+ "weird opaque type: {:#?}, {:#?}",
+ def_id,
+ item_kind
+ )
+ };
+ let in_definition_scope = match *origin {
+ // Async `impl Trait`
+ hir::OpaqueTyOrigin::AsyncFn(parent) => parent == parent_def_id,
+ // Anonymous `impl Trait`
+ hir::OpaqueTyOrigin::FnReturn(parent) => parent == parent_def_id,
+ // Named `type Foo = impl Bar;`
+ hir::OpaqueTyOrigin::TyAlias => {
+ may_define_opaque_type(self.tcx, parent_def_id, opaque_hir_id)
+ }
+ };
+ trace!(?origin);
+ in_definition_scope.then_some(*origin)
+ }
+
+ #[instrument(skip(self), level = "trace")]
+ fn opaque_ty_origin_unchecked(&self, def_id: LocalDefId, span: Span) -> OpaqueTyOrigin {
+ let origin = match self.tcx.hir().expect_item(def_id).kind {
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => origin,
+ ref itemkind => {
+ span_bug!(span, "weird opaque type: {:?}, {:#?}", def_id, itemkind)
+ }
+ };
+ trace!(?origin);
+ origin
+ }
+}
+
+// Visitor that requires that (almost) all regions in the type visited outlive
+// `least_region`. We cannot use `push_outlives_components` because regions in
+// closure signatures are not included in their outlives components. We need to
+// ensure all regions outlive the given bound so that we don't end up with,
+// say, `ReVar` appearing in a return type and causing ICEs when other
+// functions end up with region constraints involving regions from other
+// functions.
+//
+// We also cannot use `for_each_free_region` because for closures it includes
+// the regions parameters from the enclosing item.
+//
+// We ignore any type parameters because impl trait values are assumed to
+// capture all the in-scope type parameters.
+struct ConstrainOpaqueTypeRegionVisitor<OP> {
+ op: OP,
+}
+
+impl<'tcx, OP> TypeVisitor<'tcx> for ConstrainOpaqueTypeRegionVisitor<OP>
+where
+ OP: FnMut(ty::Region<'tcx>),
+{
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &ty::Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ t.super_visit_with(self);
+ ControlFlow::CONTINUE
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ // ignore bound regions, keep visiting
+ ty::ReLateBound(_, _) => ControlFlow::CONTINUE,
+ _ => {
+ (self.op)(r);
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // We're only interested in types involving regions
+ if !ty.flags().intersects(ty::TypeFlags::HAS_FREE_REGIONS) {
+ return ControlFlow::CONTINUE;
+ }
+
+ match ty.kind() {
+ ty::Closure(_, ref substs) => {
+ // Skip lifetime parameters of the enclosing item(s)
+
+ substs.as_closure().tupled_upvars_ty().visit_with(self);
+ substs.as_closure().sig_as_fn_ptr_ty().visit_with(self);
+ }
+
+ ty::Generator(_, ref substs, _) => {
+ // Skip lifetime parameters of the enclosing item(s)
+ // Also skip the witness type, because that has no free regions.
+
+ substs.as_generator().tupled_upvars_ty().visit_with(self);
+ substs.as_generator().return_ty().visit_with(self);
+ substs.as_generator().yield_ty().visit_with(self);
+ substs.as_generator().resume_ty().visit_with(self);
+ }
+ _ => {
+ ty.super_visit_with(self);
+ }
+ }
+
+ ControlFlow::CONTINUE
+ }
+}
+
+pub enum UseKind {
+ DefiningUse,
+ OpaqueUse,
+}
+
+impl UseKind {
+ pub fn is_defining(self) -> bool {
+ match self {
+ UseKind::DefiningUse => true,
+ UseKind::OpaqueUse => false,
+ }
+ }
+}
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ #[instrument(skip(self), level = "debug")]
+ pub fn register_hidden_type(
+ &self,
+ opaque_type_key: OpaqueTypeKey<'tcx>,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ hidden_ty: Ty<'tcx>,
+ origin: hir::OpaqueTyOrigin,
+ ) -> InferResult<'tcx, ()> {
+ let tcx = self.tcx;
+ let OpaqueTypeKey { def_id, substs } = opaque_type_key;
+
+ // Ideally, we'd get the span where *this specific `ty` came
+ // from*, but right now we just use the span from the overall
+ // value being folded. In simple cases like `-> impl Foo`,
+ // these are the same span, but not in cases like `-> (impl
+ // Foo, impl Bar)`.
+ let span = cause.span;
+
+ let mut obligations = vec![];
+ let prev = self.inner.borrow_mut().opaque_types().register(
+ OpaqueTypeKey { def_id, substs },
+ OpaqueHiddenType { ty: hidden_ty, span },
+ origin,
+ );
+ if let Some(prev) = prev {
+ obligations = self.at(&cause, param_env).eq(prev, hidden_ty)?.obligations;
+ }
+
+ let item_bounds = tcx.bound_explicit_item_bounds(def_id.to_def_id());
+
+ for predicate in item_bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) {
+ debug!(?predicate);
+ let predicate = predicate.subst(tcx, substs);
+
+ let predicate = predicate.fold_with(&mut BottomUpFolder {
+ tcx,
+ ty_op: |ty| match *ty.kind() {
+ // We can't normalize associated types from `rustc_infer`,
+ // but we can eagerly register inference variables for them.
+ ty::Projection(projection_ty) if !projection_ty.has_escaping_bound_vars() => {
+ self.infer_projection(
+ param_env,
+ projection_ty,
+ cause.clone(),
+ 0,
+ &mut obligations,
+ )
+ }
+ // Replace all other mentions of the same opaque type with the hidden type,
+ // as the bounds must hold on the hidden type after all.
+ ty::Opaque(def_id2, substs2)
+ if def_id.to_def_id() == def_id2 && substs == substs2 =>
+ {
+ hidden_ty
+ }
+ _ => ty,
+ },
+ lt_op: |lt| lt,
+ ct_op: |ct| ct,
+ });
+
+ if let ty::PredicateKind::Projection(projection) = predicate.kind().skip_binder() {
+ if projection.term.references_error() {
+ // No point on adding these obligations since there's a type error involved.
+ return Ok(InferOk { value: (), obligations: vec![] });
+ }
+ trace!("{:#?}", projection.term);
+ }
+ // Require that the predicate holds for the concrete type.
+ debug!(?predicate);
+ obligations.push(traits::Obligation::new(cause.clone(), param_env, predicate));
+ }
+ Ok(InferOk { value: (), obligations })
+ }
+}
+
+/// Returns `true` if `opaque_hir_id` is a sibling or a child of a sibling of `def_id`.
+///
+/// Example:
+/// ```ignore UNSOLVED (is this a bug?)
+/// # #![feature(type_alias_impl_trait)]
+/// pub mod foo {
+/// pub mod bar {
+/// pub trait Bar { /* ... */ }
+/// pub type Baz = impl Bar;
+///
+/// # impl Bar for () {}
+/// fn f1() -> Baz { /* ... */ }
+/// }
+/// fn f2() -> bar::Baz { /* ... */ }
+/// }
+/// ```
+///
+/// Here, `def_id` is the `LocalDefId` of the defining use of the opaque type (e.g., `f1` or `f2`),
+/// and `opaque_hir_id` is the `HirId` of the definition of the opaque type `Baz`.
+/// For the above example, this function returns `true` for `f1` and `false` for `f2`.
+fn may_define_opaque_type(tcx: TyCtxt<'_>, def_id: LocalDefId, opaque_hir_id: hir::HirId) -> bool {
+ let mut hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ // Named opaque types can be defined by any siblings or children of siblings.
+ let scope = tcx.hir().get_defining_scope(opaque_hir_id);
+ // We walk up the node tree until we hit the root or the scope of the opaque type.
+ while hir_id != scope && hir_id != hir::CRATE_HIR_ID {
+ hir_id = tcx.hir().local_def_id_to_hir_id(tcx.hir().get_parent_item(hir_id));
+ }
+ // Syntactically, we are allowed to define the concrete type if:
+ let res = hir_id == scope;
+ trace!(
+ "may_define_opaque_type(def={:?}, opaque_node={:?}) = {}",
+ tcx.hir().find(hir_id),
+ tcx.hir().get(opaque_hir_id),
+ res
+ );
+ res
+}
diff --git a/compiler/rustc_infer/src/infer/opaque_types/table.rs b/compiler/rustc_infer/src/infer/opaque_types/table.rs
new file mode 100644
index 000000000..fb12da0cc
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/opaque_types/table.rs
@@ -0,0 +1,80 @@
+use rustc_data_structures::undo_log::UndoLogs;
+use rustc_hir::OpaqueTyOrigin;
+use rustc_middle::ty::{self, OpaqueHiddenType, OpaqueTypeKey, Ty};
+use rustc_span::DUMMY_SP;
+
+use crate::infer::{InferCtxtUndoLogs, UndoLog};
+
+use super::{OpaqueTypeDecl, OpaqueTypeMap};
+
+#[derive(Default, Debug, Clone)]
+pub struct OpaqueTypeStorage<'tcx> {
+ // Opaque types found in explicit return types and their
+ // associated fresh inference variable. Writeback resolves these
+ // variables to get the concrete type, which can be used to
+ // 'de-opaque' OpaqueTypeDecl, after typeck is done with all functions.
+ pub opaque_types: OpaqueTypeMap<'tcx>,
+}
+
+impl<'tcx> OpaqueTypeStorage<'tcx> {
+ #[instrument(level = "debug")]
+ pub(crate) fn remove(&mut self, key: OpaqueTypeKey<'tcx>, idx: Option<OpaqueHiddenType<'tcx>>) {
+ if let Some(idx) = idx {
+ self.opaque_types.get_mut(&key).unwrap().hidden_type = idx;
+ } else {
+ match self.opaque_types.remove(&key) {
+ None => bug!("reverted opaque type inference that was never registered: {:?}", key),
+ Some(_) => {}
+ }
+ }
+ }
+
+ #[instrument(level = "debug")]
+ pub fn take_opaque_types(&mut self) -> OpaqueTypeMap<'tcx> {
+ std::mem::take(&mut self.opaque_types)
+ }
+
+ #[inline]
+ pub(crate) fn with_log<'a>(
+ &'a mut self,
+ undo_log: &'a mut InferCtxtUndoLogs<'tcx>,
+ ) -> OpaqueTypeTable<'a, 'tcx> {
+ OpaqueTypeTable { storage: self, undo_log }
+ }
+}
+
+impl<'tcx> Drop for OpaqueTypeStorage<'tcx> {
+ fn drop(&mut self) {
+ if !self.opaque_types.is_empty() {
+ ty::tls::with(|tcx| {
+ tcx.sess.delay_span_bug(DUMMY_SP, &format!("{:?}", self.opaque_types))
+ });
+ }
+ }
+}
+
+pub struct OpaqueTypeTable<'a, 'tcx> {
+ storage: &'a mut OpaqueTypeStorage<'tcx>,
+
+ undo_log: &'a mut InferCtxtUndoLogs<'tcx>,
+}
+
+impl<'a, 'tcx> OpaqueTypeTable<'a, 'tcx> {
+ #[instrument(skip(self), level = "debug")]
+ pub(crate) fn register(
+ &mut self,
+ key: OpaqueTypeKey<'tcx>,
+ hidden_type: OpaqueHiddenType<'tcx>,
+ origin: OpaqueTyOrigin,
+ ) -> Option<Ty<'tcx>> {
+ if let Some(decl) = self.storage.opaque_types.get_mut(&key) {
+ let prev = std::mem::replace(&mut decl.hidden_type, hidden_type);
+ self.undo_log.push(UndoLog::OpaqueTypes(key, Some(prev)));
+ return Some(prev.ty);
+ }
+ let decl = OpaqueTypeDecl { hidden_type, origin };
+ self.storage.opaque_types.insert(key, decl);
+ self.undo_log.push(UndoLog::OpaqueTypes(key, None));
+ None
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/outlives/components.rs b/compiler/rustc_infer/src/infer/outlives/components.rs
new file mode 100644
index 000000000..b2d7f4a66
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/outlives/components.rs
@@ -0,0 +1,219 @@
+// The outlines relation `T: 'a` or `'a: 'b`. This code frequently
+// refers to rules defined in RFC 1214 (`OutlivesFooBar`), so see that
+// RFC for reference.
+
+use rustc_data_structures::sso::SsoHashSet;
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
+use smallvec::{smallvec, SmallVec};
+
+#[derive(Debug)]
+pub enum Component<'tcx> {
+ Region(ty::Region<'tcx>),
+ Param(ty::ParamTy),
+ UnresolvedInferenceVariable(ty::InferTy),
+
+ // Projections like `T::Foo` are tricky because a constraint like
+ // `T::Foo: 'a` can be satisfied in so many ways. There may be a
+ // where-clause that says `T::Foo: 'a`, or the defining trait may
+ // include a bound like `type Foo: 'static`, or -- in the most
+ // conservative way -- we can prove that `T: 'a` (more generally,
+ // that all components in the projection outlive `'a`). This code
+ // is not in a position to judge which is the best technique, so
+ // we just product the projection as a component and leave it to
+ // the consumer to decide (but see `EscapingProjection` below).
+ Projection(ty::ProjectionTy<'tcx>),
+
+ // In the case where a projection has escaping regions -- meaning
+ // regions bound within the type itself -- we always use
+ // the most conservative rule, which requires that all components
+ // outlive the bound. So for example if we had a type like this:
+ //
+ // for<'a> Trait1< <T as Trait2<'a,'b>>::Foo >
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~
+ //
+ // then the inner projection (underlined) has an escaping region
+ // `'a`. We consider that outer trait `'c` to meet a bound if `'b`
+ // outlives `'b: 'c`, and we don't consider whether the trait
+ // declares that `Foo: 'static` etc. Therefore, we just return the
+ // free components of such a projection (in this case, `'b`).
+ //
+ // However, in the future, we may want to get smarter, and
+ // actually return a "higher-ranked projection" here. Therefore,
+ // we mark that these components are part of an escaping
+ // projection, so that implied bounds code can avoid relying on
+ // them. This gives us room to improve the regionck reasoning in
+ // the future without breaking backwards compat.
+ EscapingProjection(Vec<Component<'tcx>>),
+}
+
+/// Push onto `out` all the things that must outlive `'a` for the condition
+/// `ty0: 'a` to hold. Note that `ty0` must be a **fully resolved type**.
+pub fn push_outlives_components<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty0: Ty<'tcx>,
+ out: &mut SmallVec<[Component<'tcx>; 4]>,
+) {
+ let mut visited = SsoHashSet::new();
+ compute_components(tcx, ty0, out, &mut visited);
+ debug!("components({:?}) = {:?}", ty0, out);
+}
+
+fn compute_components<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ out: &mut SmallVec<[Component<'tcx>; 4]>,
+ visited: &mut SsoHashSet<GenericArg<'tcx>>,
+) {
+ // Descend through the types, looking for the various "base"
+ // components and collecting them into `out`. This is not written
+ // with `collect()` because of the need to sometimes skip subtrees
+ // in the `subtys` iterator (e.g., when encountering a
+ // projection).
+ match *ty.kind() {
+ ty::FnDef(_, substs) => {
+ // HACK(eddyb) ignore lifetimes found shallowly in `substs`.
+ // This is inconsistent with `ty::Adt` (including all substs)
+ // and with `ty::Closure` (ignoring all substs other than
+ // upvars, of which a `ty::FnDef` doesn't have any), but
+ // consistent with previous (accidental) behavior.
+ // See https://github.com/rust-lang/rust/issues/70917
+ // for further background and discussion.
+ for child in substs {
+ match child.unpack() {
+ GenericArgKind::Type(ty) => {
+ compute_components(tcx, ty, out, visited);
+ }
+ GenericArgKind::Lifetime(_) => {}
+ GenericArgKind::Const(_) => {
+ compute_components_recursive(tcx, child, out, visited);
+ }
+ }
+ }
+ }
+
+ ty::Array(element, _) => {
+ // Don't look into the len const as it doesn't affect regions
+ compute_components(tcx, element, out, visited);
+ }
+
+ ty::Closure(_, ref substs) => {
+ let tupled_ty = substs.as_closure().tupled_upvars_ty();
+ compute_components(tcx, tupled_ty, out, visited);
+ }
+
+ ty::Generator(_, ref substs, _) => {
+ // Same as the closure case
+ let tupled_ty = substs.as_generator().tupled_upvars_ty();
+ compute_components(tcx, tupled_ty, out, visited);
+
+ // We ignore regions in the generator interior as we don't
+ // want these to affect region inference
+ }
+
+ // All regions are bound inside a witness
+ ty::GeneratorWitness(..) => (),
+
+ // OutlivesTypeParameterEnv -- the actual checking that `X:'a`
+ // is implied by the environment is done in regionck.
+ ty::Param(p) => {
+ out.push(Component::Param(p));
+ }
+
+ // For projections, we prefer to generate an obligation like
+ // `<P0 as Trait<P1...Pn>>::Foo: 'a`, because this gives the
+ // regionck more ways to prove that it holds. However,
+ // regionck is not (at least currently) prepared to deal with
+ // higher-ranked regions that may appear in the
+ // trait-ref. Therefore, if we see any higher-ranked regions,
+ // we simply fallback to the most restrictive rule, which
+ // requires that `Pi: 'a` for all `i`.
+ ty::Projection(ref data) => {
+ if !data.has_escaping_bound_vars() {
+ // best case: no escaping regions, so push the
+ // projection and skip the subtree (thus generating no
+ // constraints for Pi). This defers the choice between
+ // the rules OutlivesProjectionEnv,
+ // OutlivesProjectionTraitDef, and
+ // OutlivesProjectionComponents to regionck.
+ out.push(Component::Projection(*data));
+ } else {
+ // fallback case: hard code
+ // OutlivesProjectionComponents. Continue walking
+ // through and constrain Pi.
+ let mut subcomponents = smallvec![];
+ let mut subvisited = SsoHashSet::new();
+ compute_components_recursive(tcx, ty.into(), &mut subcomponents, &mut subvisited);
+ out.push(Component::EscapingProjection(subcomponents.into_iter().collect()));
+ }
+ }
+
+ // We assume that inference variables are fully resolved.
+ // So, if we encounter an inference variable, just record
+ // the unresolved variable as a component.
+ ty::Infer(infer_ty) => {
+ out.push(Component::UnresolvedInferenceVariable(infer_ty));
+ }
+
+ // Most types do not introduce any region binders, nor
+ // involve any other subtle cases, and so the WF relation
+ // simply constraints any regions referenced directly by
+ // the type and then visits the types that are lexically
+ // contained within. (The comments refer to relevant rules
+ // from RFC1214.)
+ ty::Bool | // OutlivesScalar
+ ty::Char | // OutlivesScalar
+ ty::Int(..) | // OutlivesScalar
+ ty::Uint(..) | // OutlivesScalar
+ ty::Float(..) | // OutlivesScalar
+ ty::Never | // ...
+ ty::Adt(..) | // OutlivesNominalType
+ ty::Opaque(..) | // OutlivesNominalType (ish)
+ ty::Foreign(..) | // OutlivesNominalType
+ ty::Str | // OutlivesScalar (ish)
+ ty::Slice(..) | // ...
+ ty::RawPtr(..) | // ...
+ ty::Ref(..) | // OutlivesReference
+ ty::Tuple(..) | // ...
+ ty::FnPtr(_) | // OutlivesFunction (*)
+ ty::Dynamic(..) | // OutlivesObject, OutlivesFragment (*)
+ ty::Placeholder(..) |
+ ty::Bound(..) |
+ ty::Error(_) => {
+ // (*) Function pointers and trait objects are both binders.
+ // In the RFC, this means we would add the bound regions to
+ // the "bound regions list". In our representation, no such
+ // list is maintained explicitly, because bound regions
+ // themselves can be readily identified.
+ compute_components_recursive(tcx, ty.into(), out, visited);
+ }
+ }
+}
+
+/// Collect [Component]s for *all* the substs of `parent`.
+///
+/// This should not be used to get the components of `parent` itself.
+/// Use [push_outlives_components] instead.
+pub(super) fn compute_components_recursive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ parent: GenericArg<'tcx>,
+ out: &mut SmallVec<[Component<'tcx>; 4]>,
+ visited: &mut SsoHashSet<GenericArg<'tcx>>,
+) {
+ for child in parent.walk_shallow(visited) {
+ match child.unpack() {
+ GenericArgKind::Type(ty) => {
+ compute_components(tcx, ty, out, visited);
+ }
+ GenericArgKind::Lifetime(lt) => {
+ // Ignore late-bound regions.
+ if !lt.is_late_bound() {
+ out.push(Component::Region(lt));
+ }
+ }
+ GenericArgKind::Const(_) => {
+ compute_components_recursive(tcx, child, out, visited);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/outlives/env.rs b/compiler/rustc_infer/src/infer/outlives/env.rs
new file mode 100644
index 000000000..b2decd64f
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/outlives/env.rs
@@ -0,0 +1,131 @@
+use crate::infer::free_regions::FreeRegionMap;
+use crate::infer::{GenericKind, InferCtxt};
+use crate::traits::query::OutlivesBound;
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_middle::ty::{self, ReEarlyBound, ReFree, ReVar, Region};
+
+use super::explicit_outlives_bounds;
+
+/// The `OutlivesEnvironment` collects information about what outlives
+/// what in a given type-checking setting. For example, if we have a
+/// where-clause like `where T: 'a` in scope, then the
+/// `OutlivesEnvironment` would record that (in its
+/// `region_bound_pairs` field). Similarly, it contains methods for
+/// processing and adding implied bounds into the outlives
+/// environment.
+///
+/// Other code at present does not typically take a
+/// `&OutlivesEnvironment`, but rather takes some of its fields (e.g.,
+/// `process_registered_region_obligations` wants the
+/// region-bound-pairs). There is no mistaking it: the current setup
+/// of tracking region information is quite scattered! The
+/// `OutlivesEnvironment`, for example, needs to sometimes be combined
+/// with the `middle::RegionRelations`, to yield a full picture of how
+/// (lexical) lifetimes interact. However, I'm reluctant to do more
+/// refactoring here, since the setup with NLL is quite different.
+/// For example, NLL has no need of `RegionRelations`, and is solely
+/// interested in the `OutlivesEnvironment`. -nmatsakis
+#[derive(Clone)]
+pub struct OutlivesEnvironment<'tcx> {
+ pub param_env: ty::ParamEnv<'tcx>,
+ free_region_map: FreeRegionMap<'tcx>,
+
+ // Contains the implied region bounds in scope for our current body.
+ //
+ // Example:
+ //
+ // ```
+ // fn foo<'a, 'b, T>(x: &'a T, y: &'b ()) {
+ // bar(x, y, |y: &'b T| { .. } // body B1)
+ // } // body B0
+ // ```
+ //
+ // Here, when checking the body B0, the list would be `[T: 'a]`, because we
+ // infer that `T` must outlive `'a` from the implied bounds on the
+ // fn declaration.
+ //
+ // For the body B1 however, the list would be `[T: 'a, T: 'b]`, because we
+ // also can see that -- within the closure body! -- `T` must
+ // outlive `'b`. This is not necessarily true outside the closure
+ // body, since the closure may never be called.
+ region_bound_pairs: RegionBoundPairs<'tcx>,
+}
+
+/// "Region-bound pairs" tracks outlives relations that are known to
+/// be true, either because of explicit where-clauses like `T: 'a` or
+/// because of implied bounds.
+pub type RegionBoundPairs<'tcx> =
+ FxIndexSet<ty::OutlivesPredicate<GenericKind<'tcx>, Region<'tcx>>>;
+
+impl<'a, 'tcx> OutlivesEnvironment<'tcx> {
+ pub fn new(param_env: ty::ParamEnv<'tcx>) -> Self {
+ let mut env = OutlivesEnvironment {
+ param_env,
+ free_region_map: Default::default(),
+ region_bound_pairs: Default::default(),
+ };
+
+ env.add_outlives_bounds(None, explicit_outlives_bounds(param_env));
+
+ env
+ }
+
+ /// Borrows current value of the `free_region_map`.
+ pub fn free_region_map(&self) -> &FreeRegionMap<'tcx> {
+ &self.free_region_map
+ }
+
+ /// Borrows current `region_bound_pairs`.
+ pub fn region_bound_pairs(&self) -> &RegionBoundPairs<'tcx> {
+ &self.region_bound_pairs
+ }
+
+ /// Processes outlives bounds that are known to hold, whether from implied or other sources.
+ ///
+ /// The `infcx` parameter is optional; if the implied bounds may
+ /// contain inference variables, it must be supplied, in which
+ /// case we will register "givens" on the inference context. (See
+ /// `RegionConstraintData`.)
+ pub fn add_outlives_bounds<I>(
+ &mut self,
+ infcx: Option<&InferCtxt<'a, 'tcx>>,
+ outlives_bounds: I,
+ ) where
+ I: IntoIterator<Item = OutlivesBound<'tcx>>,
+ {
+ // Record relationships such as `T:'x` that don't go into the
+ // free-region-map but which we use here.
+ for outlives_bound in outlives_bounds {
+ debug!("add_outlives_bounds: outlives_bound={:?}", outlives_bound);
+ match outlives_bound {
+ OutlivesBound::RegionSubParam(r_a, param_b) => {
+ self.region_bound_pairs
+ .insert(ty::OutlivesPredicate(GenericKind::Param(param_b), r_a));
+ }
+ OutlivesBound::RegionSubProjection(r_a, projection_b) => {
+ self.region_bound_pairs
+ .insert(ty::OutlivesPredicate(GenericKind::Projection(projection_b), r_a));
+ }
+ OutlivesBound::RegionSubRegion(r_a, r_b) => {
+ if let (ReEarlyBound(_) | ReFree(_), ReVar(vid_b)) = (r_a.kind(), r_b.kind()) {
+ infcx
+ .expect("no infcx provided but region vars found")
+ .add_given(r_a, vid_b);
+ } else {
+ // In principle, we could record (and take
+ // advantage of) every relationship here, but
+ // we are also free not to -- it simply means
+ // strictly less that we can successfully type
+ // check. Right now we only look for things
+ // relationships between free regions. (It may
+ // also be that we should revise our inference
+ // system to be more general and to make use
+ // of *every* relationship that arises here,
+ // but presently we do not.)
+ self.free_region_map.relate_regions(r_a, r_b);
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/outlives/mod.rs b/compiler/rustc_infer/src/infer/outlives/mod.rs
new file mode 100644
index 000000000..2a085288f
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/outlives/mod.rs
@@ -0,0 +1,37 @@
+//! Various code related to computing outlives relations.
+
+pub mod components;
+pub mod env;
+pub mod obligations;
+pub mod test_type_match;
+pub mod verify;
+
+use rustc_middle::traits::query::OutlivesBound;
+use rustc_middle::ty;
+
+#[instrument(level = "debug", skip(param_env))]
+pub fn explicit_outlives_bounds<'tcx>(
+ param_env: ty::ParamEnv<'tcx>,
+) -> impl Iterator<Item = OutlivesBound<'tcx>> + 'tcx {
+ param_env
+ .caller_bounds()
+ .into_iter()
+ .map(ty::Predicate::kind)
+ .filter_map(ty::Binder::no_bound_vars)
+ .filter_map(move |kind| match kind {
+ ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::Trait(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::TypeOutlives(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
+ ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(r_a, r_b)) => {
+ Some(OutlivesBound::RegionSubRegion(r_b, r_a))
+ }
+ })
+}
diff --git a/compiler/rustc_infer/src/infer/outlives/obligations.rs b/compiler/rustc_infer/src/infer/outlives/obligations.rs
new file mode 100644
index 000000000..ad052f58c
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/outlives/obligations.rs
@@ -0,0 +1,470 @@
+//! Code that handles "type-outlives" constraints like `T: 'a`. This
+//! is based on the `push_outlives_components` function defined in rustc_infer,
+//! but it adds a bit of heuristics on top, in particular to deal with
+//! associated types and projections.
+//!
+//! When we process a given `T: 'a` obligation, we may produce two
+//! kinds of constraints for the region inferencer:
+//!
+//! - Relationships between inference variables and other regions.
+//! For example, if we have `&'?0 u32: 'a`, then we would produce
+//! a constraint that `'a <= '?0`.
+//! - "Verifys" that must be checked after inferencing is done.
+//! For example, if we know that, for some type parameter `T`,
+//! `T: 'a + 'b`, and we have a requirement that `T: '?1`,
+//! then we add a "verify" that checks that `'?1 <= 'a || '?1 <= 'b`.
+//! - Note the difference with the previous case: here, the region
+//! variable must be less than something else, so this doesn't
+//! affect how inference works (it finds the smallest region that
+//! will do); it's just a post-condition that we have to check.
+//!
+//! **The key point is that once this function is done, we have
+//! reduced all of our "type-region outlives" obligations into relationships
+//! between individual regions.**
+//!
+//! One key input to this function is the set of "region-bound pairs".
+//! These are basically the relationships between type parameters and
+//! regions that are in scope at the point where the outlives
+//! obligation was incurred. **When type-checking a function,
+//! particularly in the face of closures, this is not known until
+//! regionck runs!** This is because some of those bounds come
+//! from things we have yet to infer.
+//!
+//! Consider:
+//!
+//! ```
+//! fn bar<T>(a: T, b: impl for<'a> Fn(&'a T)) {}
+//! fn foo<T>(x: T) {
+//! bar(x, |y| { /* ... */})
+//! // ^ closure arg
+//! }
+//! ```
+//!
+//! Here, the type of `y` may involve inference variables and the
+//! like, and it may also contain implied bounds that are needed to
+//! type-check the closure body (e.g., here it informs us that `T`
+//! outlives the late-bound region `'a`).
+//!
+//! Note that by delaying the gathering of implied bounds until all
+//! inference information is known, we may find relationships between
+//! bound regions and other regions in the environment. For example,
+//! when we first check a closure like the one expected as argument
+//! to `foo`:
+//!
+//! ```
+//! fn foo<U, F: for<'a> FnMut(&'a U)>(_f: F) {}
+//! ```
+//!
+//! the type of the closure's first argument would be `&'a ?U`. We
+//! might later infer `?U` to something like `&'b u32`, which would
+//! imply that `'b: 'a`.
+
+use crate::infer::outlives::components::{push_outlives_components, Component};
+use crate::infer::outlives::env::OutlivesEnvironment;
+use crate::infer::outlives::env::RegionBoundPairs;
+use crate::infer::outlives::verify::VerifyBoundCx;
+use crate::infer::{
+ self, GenericKind, InferCtxt, RegionObligation, SubregionOrigin, UndoLog, VerifyBound,
+};
+use crate::traits::{ObligationCause, ObligationCauseCode};
+use rustc_data_structures::undo_log::UndoLogs;
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{self, Region, Ty, TyCtxt, TypeVisitable};
+use smallvec::smallvec;
+
+impl<'cx, 'tcx> InferCtxt<'cx, 'tcx> {
+ /// Registers that the given region obligation must be resolved
+ /// from within the scope of `body_id`. These regions are enqueued
+ /// and later processed by regionck, when full type information is
+ /// available (see `region_obligations` field for more
+ /// information).
+ #[instrument(level = "debug", skip(self))]
+ pub fn register_region_obligation(&self, obligation: RegionObligation<'tcx>) {
+ let mut inner = self.inner.borrow_mut();
+ inner.undo_log.push(UndoLog::PushRegionObligation);
+ inner.region_obligations.push(obligation);
+ }
+
+ pub fn register_region_obligation_with_cause(
+ &self,
+ sup_type: Ty<'tcx>,
+ sub_region: Region<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ ) {
+ let origin = SubregionOrigin::from_obligation_cause(cause, || {
+ infer::RelateParamBound(
+ cause.span,
+ sup_type,
+ match cause.code().peel_derives() {
+ ObligationCauseCode::BindingObligation(_, span) => Some(*span),
+ _ => None,
+ },
+ )
+ });
+
+ self.register_region_obligation(RegionObligation { sup_type, sub_region, origin });
+ }
+
+ /// Trait queries just want to pass back type obligations "as is"
+ pub fn take_registered_region_obligations(&self) -> Vec<RegionObligation<'tcx>> {
+ std::mem::take(&mut self.inner.borrow_mut().region_obligations)
+ }
+
+ /// NOTE: Prefer using [`InferCtxt::check_region_obligations_and_report_errors`]
+ /// instead of calling this directly.
+ ///
+ /// Process the region obligations that must be proven (during
+ /// `regionck`) for the given `body_id`, given information about
+ /// the region bounds in scope and so forth. This function must be
+ /// invoked for all relevant body-ids before region inference is
+ /// done (or else an assert will fire).
+ ///
+ /// See the `region_obligations` field of `InferCtxt` for some
+ /// comments about how this function fits into the overall expected
+ /// flow of the inferencer. The key point is that it is
+ /// invoked after all type-inference variables have been bound --
+ /// towards the end of regionck. This also ensures that the
+ /// region-bound-pairs are available (see comments above regarding
+ /// closures).
+ ///
+ /// # Parameters
+ ///
+ /// - `region_bound_pairs_map`: the set of region bounds implied by
+ /// the parameters and where-clauses. In particular, each pair
+ /// `('a, K)` in this list tells us that the bounds in scope
+ /// indicate that `K: 'a`, where `K` is either a generic
+ /// parameter like `T` or a projection like `T::Item`.
+ /// - `param_env` is the parameter environment for the enclosing function.
+ /// - `body_id` is the body-id whose region obligations are being
+ /// processed.
+ #[instrument(level = "debug", skip(self, region_bound_pairs))]
+ pub fn process_registered_region_obligations(
+ &self,
+ region_bound_pairs: &RegionBoundPairs<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) {
+ assert!(
+ !self.in_snapshot.get(),
+ "cannot process registered region obligations in a snapshot"
+ );
+
+ let my_region_obligations = self.take_registered_region_obligations();
+
+ for RegionObligation { sup_type, sub_region, origin } in my_region_obligations {
+ debug!(
+ "process_registered_region_obligations: sup_type={:?} sub_region={:?} origin={:?}",
+ sup_type, sub_region, origin
+ );
+
+ let sup_type = self.resolve_vars_if_possible(sup_type);
+
+ let outlives =
+ &mut TypeOutlives::new(self, self.tcx, &region_bound_pairs, None, param_env);
+ outlives.type_must_outlive(origin, sup_type, sub_region);
+ }
+ }
+
+ /// Processes registered region obliations and resolves regions, reporting
+ /// any errors if any were raised. Prefer using this function over manually
+ /// calling `resolve_regions_and_report_errors`.
+ pub fn check_region_obligations_and_report_errors(
+ &self,
+ generic_param_scope: LocalDefId,
+ outlives_env: &OutlivesEnvironment<'tcx>,
+ ) {
+ self.process_registered_region_obligations(
+ outlives_env.region_bound_pairs(),
+ outlives_env.param_env,
+ );
+
+ self.resolve_regions_and_report_errors(generic_param_scope, outlives_env)
+ }
+}
+
+/// The `TypeOutlives` struct has the job of "lowering" a `T: 'a`
+/// obligation into a series of `'a: 'b` constraints and "verify"s, as
+/// described on the module comment. The final constraints are emitted
+/// via a "delegate" of type `D` -- this is usually the `infcx`, which
+/// accrues them into the `region_obligations` code, but for NLL we
+/// use something else.
+pub struct TypeOutlives<'cx, 'tcx, D>
+where
+ D: TypeOutlivesDelegate<'tcx>,
+{
+ // See the comments on `process_registered_region_obligations` for the meaning
+ // of these fields.
+ delegate: D,
+ tcx: TyCtxt<'tcx>,
+ verify_bound: VerifyBoundCx<'cx, 'tcx>,
+}
+
+pub trait TypeOutlivesDelegate<'tcx> {
+ fn push_sub_region_constraint(
+ &mut self,
+ origin: SubregionOrigin<'tcx>,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ );
+
+ fn push_verify(
+ &mut self,
+ origin: SubregionOrigin<'tcx>,
+ kind: GenericKind<'tcx>,
+ a: ty::Region<'tcx>,
+ bound: VerifyBound<'tcx>,
+ );
+}
+
+impl<'cx, 'tcx, D> TypeOutlives<'cx, 'tcx, D>
+where
+ D: TypeOutlivesDelegate<'tcx>,
+{
+ pub fn new(
+ delegate: D,
+ tcx: TyCtxt<'tcx>,
+ region_bound_pairs: &'cx RegionBoundPairs<'tcx>,
+ implicit_region_bound: Option<ty::Region<'tcx>>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ Self {
+ delegate,
+ tcx,
+ verify_bound: VerifyBoundCx::new(
+ tcx,
+ region_bound_pairs,
+ implicit_region_bound,
+ param_env,
+ ),
+ }
+ }
+
+ /// Adds constraints to inference such that `T: 'a` holds (or
+ /// reports an error if it cannot).
+ ///
+ /// # Parameters
+ ///
+ /// - `origin`, the reason we need this constraint
+ /// - `ty`, the type `T`
+ /// - `region`, the region `'a`
+ pub fn type_must_outlive(
+ &mut self,
+ origin: infer::SubregionOrigin<'tcx>,
+ ty: Ty<'tcx>,
+ region: ty::Region<'tcx>,
+ ) {
+ debug!("type_must_outlive(ty={:?}, region={:?}, origin={:?})", ty, region, origin);
+
+ assert!(!ty.has_escaping_bound_vars());
+
+ let mut components = smallvec![];
+ push_outlives_components(self.tcx, ty, &mut components);
+ self.components_must_outlive(origin, &components, region);
+ }
+
+ fn components_must_outlive(
+ &mut self,
+ origin: infer::SubregionOrigin<'tcx>,
+ components: &[Component<'tcx>],
+ region: ty::Region<'tcx>,
+ ) {
+ for component in components.iter() {
+ let origin = origin.clone();
+ match component {
+ Component::Region(region1) => {
+ self.delegate.push_sub_region_constraint(origin, region, *region1);
+ }
+ Component::Param(param_ty) => {
+ self.param_ty_must_outlive(origin, region, *param_ty);
+ }
+ Component::Projection(projection_ty) => {
+ self.projection_must_outlive(origin, region, *projection_ty);
+ }
+ Component::EscapingProjection(subcomponents) => {
+ self.components_must_outlive(origin, &subcomponents, region);
+ }
+ Component::UnresolvedInferenceVariable(v) => {
+ // ignore this, we presume it will yield an error
+ // later, since if a type variable is not resolved by
+ // this point it never will be
+ self.tcx.sess.delay_span_bug(
+ origin.span(),
+ &format!("unresolved inference variable in outlives: {:?}", v),
+ );
+ }
+ }
+ }
+ }
+
+ fn param_ty_must_outlive(
+ &mut self,
+ origin: infer::SubregionOrigin<'tcx>,
+ region: ty::Region<'tcx>,
+ param_ty: ty::ParamTy,
+ ) {
+ debug!(
+ "param_ty_must_outlive(region={:?}, param_ty={:?}, origin={:?})",
+ region, param_ty, origin
+ );
+
+ let generic = GenericKind::Param(param_ty);
+ let verify_bound = self.verify_bound.generic_bound(generic);
+ self.delegate.push_verify(origin, generic, region, verify_bound);
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn projection_must_outlive(
+ &mut self,
+ origin: infer::SubregionOrigin<'tcx>,
+ region: ty::Region<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ ) {
+ // This case is thorny for inference. The fundamental problem is
+ // that there are many cases where we have choice, and inference
+ // doesn't like choice (the current region inference in
+ // particular). :) First off, we have to choose between using the
+ // OutlivesProjectionEnv, OutlivesProjectionTraitDef, and
+ // OutlivesProjectionComponent rules, any one of which is
+ // sufficient. If there are no inference variables involved, it's
+ // not hard to pick the right rule, but if there are, we're in a
+ // bit of a catch 22: if we picked which rule we were going to
+ // use, we could add constraints to the region inference graph
+ // that make it apply, but if we don't add those constraints, the
+ // rule might not apply (but another rule might). For now, we err
+ // on the side of adding too few edges into the graph.
+
+ // Compute the bounds we can derive from the trait definition.
+ // These are guaranteed to apply, no matter the inference
+ // results.
+ let trait_bounds: Vec<_> =
+ self.verify_bound.projection_declared_bounds_from_trait(projection_ty).collect();
+
+ debug!(?trait_bounds);
+
+ // Compute the bounds we can derive from the environment. This
+ // is an "approximate" match -- in some cases, these bounds
+ // may not apply.
+ let mut approx_env_bounds =
+ self.verify_bound.projection_approx_declared_bounds_from_env(projection_ty);
+ debug!("projection_must_outlive: approx_env_bounds={:?}", approx_env_bounds);
+
+ // Remove outlives bounds that we get from the environment but
+ // which are also deducible from the trait. This arises (cc
+ // #55756) in cases where you have e.g., `<T as Foo<'a>>::Item:
+ // 'a` in the environment but `trait Foo<'b> { type Item: 'b
+ // }` in the trait definition.
+ approx_env_bounds.retain(|bound_outlives| {
+ // OK to skip binder because we only manipulate and compare against other
+ // values from the same binder. e.g. if we have (e.g.) `for<'a> <T as Trait<'a>>::Item: 'a`
+ // in `bound`, the `'a` will be a `^1` (bound, debruijn index == innermost) region.
+ // If the declaration is `trait Trait<'b> { type Item: 'b; }`, then `projection_declared_bounds_from_trait`
+ // will be invoked with `['b => ^1]` and so we will get `^1` returned.
+ let bound = bound_outlives.skip_binder();
+ match *bound.0.kind() {
+ ty::Projection(projection_ty) => self
+ .verify_bound
+ .projection_declared_bounds_from_trait(projection_ty)
+ .all(|r| r != bound.1),
+
+ _ => panic!("expected only projection types from env, not {:?}", bound.0),
+ }
+ });
+
+ // If declared bounds list is empty, the only applicable rule is
+ // OutlivesProjectionComponent. If there are inference variables,
+ // then, we can break down the outlives into more primitive
+ // components without adding unnecessary edges.
+ //
+ // If there are *no* inference variables, however, we COULD do
+ // this, but we choose not to, because the error messages are less
+ // good. For example, a requirement like `T::Item: 'r` would be
+ // translated to a requirement that `T: 'r`; when this is reported
+ // to the user, it will thus say "T: 'r must hold so that T::Item:
+ // 'r holds". But that makes it sound like the only way to fix
+ // the problem is to add `T: 'r`, which isn't true. So, if there are no
+ // inference variables, we use a verify constraint instead of adding
+ // edges, which winds up enforcing the same condition.
+ let needs_infer = projection_ty.needs_infer();
+ if approx_env_bounds.is_empty() && trait_bounds.is_empty() && needs_infer {
+ debug!("projection_must_outlive: no declared bounds");
+
+ for k in projection_ty.substs {
+ match k.unpack() {
+ GenericArgKind::Lifetime(lt) => {
+ self.delegate.push_sub_region_constraint(origin.clone(), region, lt);
+ }
+ GenericArgKind::Type(ty) => {
+ self.type_must_outlive(origin.clone(), ty, region);
+ }
+ GenericArgKind::Const(_) => {
+ // Const parameters don't impose constraints.
+ }
+ }
+ }
+
+ return;
+ }
+
+ // If we found a unique bound `'b` from the trait, and we
+ // found nothing else from the environment, then the best
+ // action is to require that `'b: 'r`, so do that.
+ //
+ // This is best no matter what rule we use:
+ //
+ // - OutlivesProjectionEnv: these would translate to the requirement that `'b:'r`
+ // - OutlivesProjectionTraitDef: these would translate to the requirement that `'b:'r`
+ // - OutlivesProjectionComponent: this would require `'b:'r`
+ // in addition to other conditions
+ if !trait_bounds.is_empty()
+ && trait_bounds[1..]
+ .iter()
+ .map(|r| Some(*r))
+ .chain(
+ // NB: The environment may contain `for<'a> T: 'a` style bounds.
+ // In that case, we don't know if they are equal to the trait bound
+ // or not (since we don't *know* whether the environment bound even applies),
+ // so just map to `None` here if there are bound vars, ensuring that
+ // the call to `all` will fail below.
+ approx_env_bounds.iter().map(|b| b.map_bound(|b| b.1).no_bound_vars()),
+ )
+ .all(|b| b == Some(trait_bounds[0]))
+ {
+ let unique_bound = trait_bounds[0];
+ debug!("projection_must_outlive: unique trait bound = {:?}", unique_bound);
+ debug!("projection_must_outlive: unique declared bound appears in trait ref");
+ self.delegate.push_sub_region_constraint(origin, region, unique_bound);
+ return;
+ }
+
+ // Fallback to verifying after the fact that there exists a
+ // declared bound, or that all the components appearing in the
+ // projection outlive; in some cases, this may add insufficient
+ // edges into the inference graph, leading to inference failures
+ // even though a satisfactory solution exists.
+ let generic = GenericKind::Projection(projection_ty);
+ let verify_bound = self.verify_bound.generic_bound(generic);
+ debug!("projection_must_outlive: pushing {:?}", verify_bound);
+ self.delegate.push_verify(origin, generic, region, verify_bound);
+ }
+}
+
+impl<'cx, 'tcx> TypeOutlivesDelegate<'tcx> for &'cx InferCtxt<'cx, 'tcx> {
+ fn push_sub_region_constraint(
+ &mut self,
+ origin: SubregionOrigin<'tcx>,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) {
+ self.sub_regions(origin, a, b)
+ }
+
+ fn push_verify(
+ &mut self,
+ origin: SubregionOrigin<'tcx>,
+ kind: GenericKind<'tcx>,
+ a: ty::Region<'tcx>,
+ bound: VerifyBound<'tcx>,
+ ) {
+ self.verify_generic_bound(origin, kind, a, bound)
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/outlives/test_type_match.rs b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
new file mode 100644
index 000000000..772e297b7
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/outlives/test_type_match.rs
@@ -0,0 +1,207 @@
+use std::collections::hash_map::Entry;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::TypeVisitable;
+use rustc_middle::ty::{
+ self,
+ error::TypeError,
+ relate::{self, Relate, RelateResult, TypeRelation},
+ Ty, TyCtxt,
+};
+
+use crate::infer::region_constraints::VerifyIfEq;
+
+/// Given a "verify-if-eq" type test like:
+///
+/// exists<'a...> {
+/// verify_if_eq(some_type, bound_region)
+/// }
+///
+/// and the type `test_ty` that the type test is being tested against,
+/// returns:
+///
+/// * `None` if `some_type` cannot be made equal to `test_ty`,
+/// no matter the values of the variables in `exists`.
+/// * `Some(r)` with a suitable bound (typically the value of `bound_region`, modulo
+/// any bound existential variables, which will be substituted) for the
+/// type under test.
+///
+/// NB: This function uses a simplistic, syntactic version of type equality.
+/// In other words, it may spuriously return `None` even if the type-under-test
+/// is in fact equal to `some_type`. In practice, though, this is used on types
+/// that are either projections like `T::Item` or `T` and it works fine, but it
+/// could have trouble when complex types with higher-ranked binders and the
+/// like are used. This is a particular challenge since this function is invoked
+/// very late in inference and hence cannot make use of the normal inference
+/// machinery.
+#[tracing::instrument(level = "debug", skip(tcx, param_env))]
+pub fn extract_verify_if_eq<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ verify_if_eq_b: &ty::Binder<'tcx, VerifyIfEq<'tcx>>,
+ test_ty: Ty<'tcx>,
+) -> Option<ty::Region<'tcx>> {
+ assert!(!verify_if_eq_b.has_escaping_bound_vars());
+ let mut m = Match::new(tcx, param_env);
+ let verify_if_eq = verify_if_eq_b.skip_binder();
+ m.relate(verify_if_eq.ty, test_ty).ok()?;
+
+ if let ty::RegionKind::ReLateBound(depth, br) = verify_if_eq.bound.kind() {
+ assert!(depth == ty::INNERMOST);
+ match m.map.get(&br) {
+ Some(&r) => Some(r),
+ None => {
+ // If there is no mapping, then this region is unconstrained.
+ // In that case, we escalate to `'static`.
+ Some(tcx.lifetimes.re_static)
+ }
+ }
+ } else {
+ // The region does not contain any bound variables, so we don't need
+ // to do any substitution.
+ //
+ // Example:
+ //
+ // for<'a> <T as Foo<'a>>::Item: 'b
+ //
+ // In this case, we've now matched and found a value for
+ // `'a`, but it doesn't affect the bound `'b`.
+ Some(verify_if_eq.bound)
+ }
+}
+
+/// True if a (potentially higher-ranked) outlives
+#[tracing::instrument(level = "debug", skip(tcx, param_env))]
+pub(super) fn can_match_erased_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ outlives_predicate: ty::Binder<'tcx, ty::TypeOutlivesPredicate<'tcx>>,
+ erased_ty: Ty<'tcx>,
+) -> bool {
+ assert!(!outlives_predicate.has_escaping_bound_vars());
+ let erased_outlives_predicate = tcx.erase_regions(outlives_predicate);
+ let outlives_ty = erased_outlives_predicate.skip_binder().0;
+ if outlives_ty == erased_ty {
+ // pointless micro-optimization
+ true
+ } else {
+ Match::new(tcx, param_env).relate(outlives_ty, erased_ty).is_ok()
+ }
+}
+
+struct Match<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ pattern_depth: ty::DebruijnIndex,
+ map: FxHashMap<ty::BoundRegion, ty::Region<'tcx>>,
+}
+
+impl<'tcx> Match<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Match<'tcx> {
+ Match { tcx, param_env, pattern_depth: ty::INNERMOST, map: FxHashMap::default() }
+ }
+}
+
+impl<'tcx> Match<'tcx> {
+ /// Creates the "Error" variant that signals "no match".
+ fn no_match<T>(&self) -> RelateResult<'tcx, T> {
+ Err(TypeError::Mismatch)
+ }
+
+ /// Binds the pattern variable `br` to `value`; returns an `Err` if the pattern
+ /// is already bound to a different value.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn bind(
+ &mut self,
+ br: ty::BoundRegion,
+ value: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ match self.map.entry(br) {
+ Entry::Occupied(entry) => {
+ if *entry.get() == value {
+ Ok(value)
+ } else {
+ self.no_match()
+ }
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(value);
+ Ok(value)
+ }
+ }
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for Match<'tcx> {
+ fn tag(&self) -> &'static str {
+ "Match"
+ }
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+ fn a_is_expected(&self) -> bool {
+ true
+ } // irrelevant
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ _: ty::Variance,
+ _: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ self.relate(a, b)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn regions(
+ &mut self,
+ pattern: ty::Region<'tcx>,
+ value: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!("self.pattern_depth = {:?}", self.pattern_depth);
+ if let ty::RegionKind::ReLateBound(depth, br) = pattern.kind() && depth == self.pattern_depth {
+ self.bind(br, value)
+ } else if pattern == value {
+ Ok(pattern)
+ } else {
+ self.no_match()
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn tys(&mut self, pattern: Ty<'tcx>, value: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ if pattern == value { Ok(pattern) } else { relate::super_relate_tys(self, pattern, value) }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn consts(
+ &mut self,
+ pattern: ty::Const<'tcx>,
+ value: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ debug!("{}.consts({:?}, {:?})", self.tag(), pattern, value);
+ if pattern == value {
+ Ok(pattern)
+ } else {
+ relate::super_relate_consts(self, pattern, value)
+ }
+ }
+
+ fn binders<T>(
+ &mut self,
+ pattern: ty::Binder<'tcx, T>,
+ value: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ self.pattern_depth.shift_in(1);
+ let result = Ok(pattern.rebind(self.relate(pattern.skip_binder(), value.skip_binder())?));
+ self.pattern_depth.shift_out(1);
+ result
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/outlives/verify.rs b/compiler/rustc_infer/src/infer/outlives/verify.rs
new file mode 100644
index 000000000..c7d7ef40d
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/outlives/verify.rs
@@ -0,0 +1,373 @@
+use crate::infer::outlives::components::{compute_components_recursive, Component};
+use crate::infer::outlives::env::RegionBoundPairs;
+use crate::infer::region_constraints::VerifyIfEq;
+use crate::infer::{GenericKind, VerifyBound};
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::sso::SsoHashSet;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::subst::{GenericArg, Subst};
+use rustc_middle::ty::{self, EarlyBinder, OutlivesPredicate, Ty, TyCtxt};
+
+use smallvec::smallvec;
+
+/// The `TypeOutlives` struct has the job of "lowering" a `T: 'a`
+/// obligation into a series of `'a: 'b` constraints and "verifys", as
+/// described on the module comment. The final constraints are emitted
+/// via a "delegate" of type `D` -- this is usually the `infcx`, which
+/// accrues them into the `region_obligations` code, but for NLL we
+/// use something else.
+pub struct VerifyBoundCx<'cx, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ region_bound_pairs: &'cx RegionBoundPairs<'tcx>,
+ /// During borrowck, if there are no outlives bounds on a generic
+ /// parameter `T`, we assume that `T: 'in_fn_body` holds.
+ ///
+ /// Outside of borrowck the only way to prove `T: '?0` is by
+ /// setting `'?0` to `'empty`.
+ implicit_region_bound: Option<ty::Region<'tcx>>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'cx, 'tcx> VerifyBoundCx<'cx, 'tcx> {
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ region_bound_pairs: &'cx RegionBoundPairs<'tcx>,
+ implicit_region_bound: Option<ty::Region<'tcx>>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ Self { tcx, region_bound_pairs, implicit_region_bound, param_env }
+ }
+
+ /// Returns a "verify bound" that encodes what we know about
+ /// `generic` and the regions it outlives.
+ pub fn generic_bound(&self, generic: GenericKind<'tcx>) -> VerifyBound<'tcx> {
+ let mut visited = SsoHashSet::new();
+ match generic {
+ GenericKind::Param(param_ty) => self.param_bound(param_ty),
+ GenericKind::Projection(projection_ty) => {
+ self.projection_bound(projection_ty, &mut visited)
+ }
+ }
+ }
+
+ fn param_bound(&self, param_ty: ty::ParamTy) -> VerifyBound<'tcx> {
+ debug!("param_bound(param_ty={:?})", param_ty);
+
+ // Start with anything like `T: 'a` we can scrape from the
+ // environment. If the environment contains something like
+ // `for<'a> T: 'a`, then we know that `T` outlives everything.
+ let declared_bounds_from_env = self.declared_generic_bounds_from_env(param_ty);
+ let mut param_bounds = vec![];
+ for declared_bound in declared_bounds_from_env {
+ let bound_region = declared_bound.map_bound(|outlives| outlives.1);
+ if let Some(region) = bound_region.no_bound_vars() {
+ // This is `T: 'a` for some free region `'a`.
+ param_bounds.push(VerifyBound::OutlivedBy(region));
+ } else {
+ // This is `for<'a> T: 'a`. This means that `T` outlives everything! All done here.
+ return VerifyBound::AllBounds(vec![]);
+ }
+ }
+
+ // Add in the default bound of fn body that applies to all in
+ // scope type parameters:
+ if let Some(r) = self.implicit_region_bound {
+ param_bounds.push(VerifyBound::OutlivedBy(r));
+ }
+
+ if param_bounds.is_empty() {
+ // We know that all types `T` outlive `'empty`, so if we
+ // can find no other bound, then check that the region
+ // being tested is `'empty`.
+ VerifyBound::IsEmpty
+ } else if param_bounds.len() == 1 {
+ // Micro-opt: no need to store the vector if it's just len 1
+ param_bounds.pop().unwrap()
+ } else {
+ // If we can find any other bound `R` such that `T: R`, then
+ // we don't need to check for `'empty`, because `R: 'empty`.
+ VerifyBound::AnyBound(param_bounds)
+ }
+ }
+
+ /// Given a projection like `T::Item`, searches the environment
+ /// for where-clauses like `T::Item: 'a`. Returns the set of
+ /// regions `'a` that it finds.
+ ///
+ /// This is an "approximate" check -- it may not find all
+ /// applicable bounds, and not all the bounds it returns can be
+ /// relied upon. In particular, this check ignores region
+ /// identity. So, for example, if we have `<T as
+ /// Trait<'0>>::Item` where `'0` is a region variable, and the
+ /// user has `<T as Trait<'a>>::Item: 'b` in the environment, then
+ /// the clause from the environment only applies if `'0 = 'a`,
+ /// which we don't know yet. But we would still include `'b` in
+ /// this list.
+ pub fn projection_approx_declared_bounds_from_env(
+ &self,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ ) -> Vec<ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>> {
+ let projection_ty = GenericKind::Projection(projection_ty).to_ty(self.tcx);
+ let erased_projection_ty = self.tcx.erase_regions(projection_ty);
+ self.declared_generic_bounds_from_env_for_erased_ty(erased_projection_ty)
+ }
+
+ /// Searches the where-clauses in scope for regions that
+ /// `projection_ty` is known to outlive. Currently requires an
+ /// exact match.
+ pub fn projection_declared_bounds_from_trait(
+ &self,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ ) -> impl Iterator<Item = ty::Region<'tcx>> + 'cx + Captures<'tcx> {
+ self.declared_projection_bounds_from_trait(projection_ty)
+ }
+
+ pub fn projection_bound(
+ &self,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ visited: &mut SsoHashSet<GenericArg<'tcx>>,
+ ) -> VerifyBound<'tcx> {
+ debug!("projection_bound(projection_ty={:?})", projection_ty);
+
+ let projection_ty_as_ty =
+ self.tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs);
+
+ // Search the env for where clauses like `P: 'a`.
+ let env_bounds = self
+ .projection_approx_declared_bounds_from_env(projection_ty)
+ .into_iter()
+ .map(|binder| {
+ if let Some(ty::OutlivesPredicate(ty, r)) = binder.no_bound_vars() && ty == projection_ty_as_ty {
+ // Micro-optimize if this is an exact match (this
+ // occurs often when there are no region variables
+ // involved).
+ VerifyBound::OutlivedBy(r)
+ } else {
+ let verify_if_eq_b = binder.map_bound(|ty::OutlivesPredicate(ty, bound)| VerifyIfEq { ty, bound });
+ VerifyBound::IfEq(verify_if_eq_b)
+ }
+ });
+
+ // Extend with bounds that we can find from the trait.
+ let trait_bounds = self
+ .projection_declared_bounds_from_trait(projection_ty)
+ .map(|r| VerifyBound::OutlivedBy(r));
+
+ // see the extensive comment in projection_must_outlive
+ let recursive_bound = {
+ let mut components = smallvec![];
+ let ty = self.tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs);
+ compute_components_recursive(self.tcx, ty.into(), &mut components, visited);
+ self.bound_from_components(&components, visited)
+ };
+
+ VerifyBound::AnyBound(env_bounds.chain(trait_bounds).collect()).or(recursive_bound)
+ }
+
+ fn bound_from_components(
+ &self,
+ components: &[Component<'tcx>],
+ visited: &mut SsoHashSet<GenericArg<'tcx>>,
+ ) -> VerifyBound<'tcx> {
+ let mut bounds = components
+ .iter()
+ .map(|component| self.bound_from_single_component(component, visited))
+ .filter(|bound| {
+ // Remove bounds that must hold, since they are not interesting.
+ !bound.must_hold()
+ });
+
+ match (bounds.next(), bounds.next()) {
+ (Some(first), None) => first,
+ (first, second) => {
+ VerifyBound::AllBounds(first.into_iter().chain(second).chain(bounds).collect())
+ }
+ }
+ }
+
+ fn bound_from_single_component(
+ &self,
+ component: &Component<'tcx>,
+ visited: &mut SsoHashSet<GenericArg<'tcx>>,
+ ) -> VerifyBound<'tcx> {
+ match *component {
+ Component::Region(lt) => VerifyBound::OutlivedBy(lt),
+ Component::Param(param_ty) => self.param_bound(param_ty),
+ Component::Projection(projection_ty) => self.projection_bound(projection_ty, visited),
+ Component::EscapingProjection(ref components) => {
+ self.bound_from_components(components, visited)
+ }
+ Component::UnresolvedInferenceVariable(v) => {
+ // ignore this, we presume it will yield an error
+ // later, since if a type variable is not resolved by
+ // this point it never will be
+ self.tcx.sess.delay_span_bug(
+ rustc_span::DUMMY_SP,
+ &format!("unresolved inference variable in outlives: {:?}", v),
+ );
+ // add a bound that never holds
+ VerifyBound::AnyBound(vec![])
+ }
+ }
+ }
+
+ /// Searches the environment for where-clauses like `G: 'a` where
+ /// `G` is either some type parameter `T` or a projection like
+ /// `T::Item`. Returns a vector of the `'a` bounds it can find.
+ ///
+ /// This is a conservative check -- it may not find all applicable
+ /// bounds, but all the bounds it returns can be relied upon.
+ fn declared_generic_bounds_from_env(
+ &self,
+ param_ty: ty::ParamTy,
+ ) -> Vec<ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>> {
+ let generic_ty = param_ty.to_ty(self.tcx);
+ self.declared_generic_bounds_from_env_for_erased_ty(generic_ty)
+ }
+
+ /// Searches the environment to find all bounds that apply to `erased_ty`.
+ /// Obviously these must be approximate -- they are in fact both *over* and
+ /// and *under* approximated:
+ ///
+ /// * Over-approximated because we erase regions, so
+ /// * Under-approximated because we look for syntactic equality and so for complex types
+ /// like `<T as Foo<fn(&u32, &u32)>>::Item` or whatever we may fail to figure out
+ /// all the subtleties.
+ ///
+ /// In some cases, such as when `erased_ty` represents a `ty::Param`, however,
+ /// the result is precise.
+ fn declared_generic_bounds_from_env_for_erased_ty(
+ &self,
+ erased_ty: Ty<'tcx>,
+ ) -> Vec<ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>> {
+ let tcx = self.tcx;
+
+ // To start, collect bounds from user environment. Note that
+ // parameter environments are already elaborated, so we don't
+ // have to worry about that.
+ let c_b = self.param_env.caller_bounds();
+ let param_bounds = self.collect_outlives_from_predicate_list(erased_ty, c_b.into_iter());
+
+ // Next, collect regions we scraped from the well-formedness
+ // constraints in the fn signature. To do that, we walk the list
+ // of known relations from the fn ctxt.
+ //
+ // This is crucial because otherwise code like this fails:
+ //
+ // fn foo<'a, A>(x: &'a A) { x.bar() }
+ //
+ // The problem is that the type of `x` is `&'a A`. To be
+ // well-formed, then, A must outlive `'a`, but we don't know that
+ // this holds from first principles.
+ let from_region_bound_pairs =
+ self.region_bound_pairs.iter().filter_map(|&OutlivesPredicate(p, r)| {
+ debug!(
+ "declared_generic_bounds_from_env_for_erased_ty: region_bound_pair = {:?}",
+ (r, p)
+ );
+ let p_ty = p.to_ty(tcx);
+ let erased_p_ty = self.tcx.erase_regions(p_ty);
+ (erased_p_ty == erased_ty)
+ .then_some(ty::Binder::dummy(ty::OutlivesPredicate(p.to_ty(tcx), r)))
+ });
+
+ param_bounds
+ .chain(from_region_bound_pairs)
+ .inspect(|bound| {
+ debug!(
+ "declared_generic_bounds_from_env_for_erased_ty: result predicate = {:?}",
+ bound
+ )
+ })
+ .collect()
+ }
+
+ /// Given a projection like `<T as Foo<'x>>::Bar`, returns any bounds
+ /// declared in the trait definition. For example, if the trait were
+ ///
+ /// ```rust
+ /// trait Foo<'a> {
+ /// type Bar: 'a;
+ /// }
+ /// ```
+ ///
+ /// then this function would return `'x`. This is subject to the
+ /// limitations around higher-ranked bounds described in
+ /// `region_bounds_declared_on_associated_item`.
+ fn declared_projection_bounds_from_trait(
+ &self,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ ) -> impl Iterator<Item = ty::Region<'tcx>> + 'cx + Captures<'tcx> {
+ debug!("projection_bounds(projection_ty={:?})", projection_ty);
+ let tcx = self.tcx;
+ self.region_bounds_declared_on_associated_item(projection_ty.item_def_id)
+ .map(move |r| EarlyBinder(r).subst(tcx, projection_ty.substs))
+ }
+
+ /// Given the `DefId` of an associated item, returns any region
+ /// bounds attached to that associated item from the trait definition.
+ ///
+ /// For example:
+ ///
+ /// ```rust
+ /// trait Foo<'a> {
+ /// type Bar: 'a;
+ /// }
+ /// ```
+ ///
+ /// If we were given the `DefId` of `Foo::Bar`, we would return
+ /// `'a`. You could then apply the substitutions from the
+ /// projection to convert this into your namespace. This also
+ /// works if the user writes `where <Self as Foo<'a>>::Bar: 'a` on
+ /// the trait. In fact, it works by searching for just such a
+ /// where-clause.
+ ///
+ /// It will not, however, work for higher-ranked bounds like:
+ ///
+ /// ```compile_fail,E0311
+ /// trait Foo<'a, 'b>
+ /// where for<'x> <Self as Foo<'x, 'b>>::Bar: 'x
+ /// {
+ /// type Bar;
+ /// }
+ /// ```
+ ///
+ /// This is for simplicity, and because we are not really smart
+ /// enough to cope with such bounds anywhere.
+ fn region_bounds_declared_on_associated_item(
+ &self,
+ assoc_item_def_id: DefId,
+ ) -> impl Iterator<Item = ty::Region<'tcx>> {
+ let tcx = self.tcx;
+ let bounds = tcx.item_bounds(assoc_item_def_id);
+ bounds
+ .into_iter()
+ .filter_map(|p| p.to_opt_type_outlives())
+ .filter_map(|p| p.no_bound_vars())
+ .map(|b| b.1)
+ }
+
+ /// Searches through a predicate list for a predicate `T: 'a`.
+ ///
+ /// Careful: does not elaborate predicates, and just uses `==`
+ /// when comparing `ty` for equality, so `ty` must be something
+ /// that does not involve inference variables and where you
+ /// otherwise want a precise match.
+ fn collect_outlives_from_predicate_list(
+ &self,
+ erased_ty: Ty<'tcx>,
+ predicates: impl Iterator<Item = ty::Predicate<'tcx>>,
+ ) -> impl Iterator<Item = ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>>
+ {
+ let tcx = self.tcx;
+ let param_env = self.param_env;
+ predicates.filter_map(|p| p.to_opt_type_outlives()).filter(move |outlives_predicate| {
+ super::test_type_match::can_match_erased_ty(
+ tcx,
+ param_env,
+ *outlives_predicate,
+ erased_ty,
+ )
+ })
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/projection.rs b/compiler/rustc_infer/src/infer/projection.rs
new file mode 100644
index 000000000..b45a6514d
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/projection.rs
@@ -0,0 +1,40 @@
+use rustc_middle::traits::ObligationCause;
+use rustc_middle::ty::{self, ToPredicate, Ty};
+
+use crate::traits::{Obligation, PredicateObligation};
+
+use super::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use super::InferCtxt;
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ /// Instead of normalizing an associated type projection,
+ /// this function generates an inference variable and registers
+ /// an obligation that this inference variable must be the result
+ /// of the given projection. This allows us to proceed with projections
+ /// while they cannot be resolved yet due to missing information or
+ /// simply due to the lack of access to the trait resolution machinery.
+ pub fn infer_projection(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>,
+ recursion_depth: usize,
+ obligations: &mut Vec<PredicateObligation<'tcx>>,
+ ) -> Ty<'tcx> {
+ let def_id = projection_ty.item_def_id;
+ let ty_var = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::NormalizeProjectionType,
+ span: self.tcx.def_span(def_id),
+ });
+ let projection =
+ ty::Binder::dummy(ty::ProjectionPredicate { projection_ty, term: ty_var.into() });
+ let obligation = Obligation::with_depth(
+ cause,
+ recursion_depth,
+ param_env,
+ projection.to_predicate(self.tcx),
+ );
+ obligations.push(obligation);
+ ty_var
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/region_constraints/README.md b/compiler/rustc_infer/src/infer/region_constraints/README.md
new file mode 100644
index 000000000..0231dd066
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/region_constraints/README.md
@@ -0,0 +1,3 @@
+For info on how the current borrowck works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
diff --git a/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs b/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs
new file mode 100644
index 000000000..397efe6ee
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/region_constraints/leak_check.rs
@@ -0,0 +1,447 @@
+use super::*;
+use crate::infer::CombinedSnapshot;
+use rustc_data_structures::{
+ graph::{scc::Sccs, vec_graph::VecGraph},
+ undo_log::UndoLogs,
+};
+use rustc_index::vec::Idx;
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::relate::RelateResult;
+
+impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
+ /// Searches new universes created during `snapshot`, looking for
+ /// placeholders that may "leak" out from the universes they are contained
+ /// in. If any leaking placeholders are found, then an `Err` is returned
+ /// (typically leading to the snapshot being reversed).
+ ///
+ /// The leak check *used* to be the only way we had to handle higher-ranked
+ /// obligations. Now that we have integrated universes into the region
+ /// solvers, this is no longer the case, but we retain the leak check for
+ /// backwards compatibility purposes. In particular, it lets us make "early"
+ /// decisions about whether a region error will be reported that are used in
+ /// coherence and elsewhere -- see #56105 and #59490 for more details. The
+ /// eventual fate of the leak checker is not yet settled.
+ ///
+ /// The leak checker works by searching for the following error patterns:
+ ///
+ /// * P1: P2, where P1 != P2
+ /// * P1: R, where R is in some universe that cannot name P1
+ ///
+ /// The idea here is that each of these patterns represents something that
+ /// the region solver would eventually report as an error, so we can detect
+ /// the error early. There is a fly in the ointment, though, in that this is
+ /// not entirely true. In particular, in the future, we may extend the
+ /// environment with implied bounds or other info about how placeholders
+ /// relate to regions in outer universes. In that case, `P1: R` for example
+ /// might become solvable.
+ ///
+ /// # Summary of the implementation
+ ///
+ /// The leak checks as follows. First, we construct a graph where `R2: R1`
+ /// implies `R2 -> R1`, and we compute the SCCs.
+ ///
+ /// For each SCC S, we compute:
+ ///
+ /// * what placeholder P it must be equal to, if any
+ /// * if there are multiple placeholders that must be equal, report an error because `P1: P2`
+ /// * the minimum universe of its constituents
+ ///
+ /// Then we walk the SCCs in dependency order and compute
+ ///
+ /// * what placeholder they must outlive transitively
+ /// * if they must also be equal to a placeholder, report an error because `P1: P2`
+ /// * minimum universe U of all SCCs they must outlive
+ /// * if they must also be equal to a placeholder P, and U cannot name P, report an error, as that
+ /// indicates `P: R` and `R` is in an incompatible universe
+ ///
+ /// # Historical note
+ ///
+ /// Older variants of the leak check used to report errors for these
+ /// patterns, but we no longer do:
+ ///
+ /// * R: P1, even if R cannot name P1, because R = 'static is a valid sol'n
+ /// * R: P1, R: P2, as above
+ pub fn leak_check(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ overly_polymorphic: bool,
+ max_universe: ty::UniverseIndex,
+ snapshot: &CombinedSnapshot<'_, 'tcx>,
+ ) -> RelateResult<'tcx, ()> {
+ debug!(
+ "leak_check(max_universe={:?}, snapshot.universe={:?}, overly_polymorphic={:?})",
+ max_universe, snapshot.universe, overly_polymorphic
+ );
+
+ assert!(UndoLogs::<super::UndoLog<'_>>::in_snapshot(&self.undo_log));
+
+ let universe_at_start_of_snapshot = snapshot.universe;
+ if universe_at_start_of_snapshot == max_universe {
+ return Ok(());
+ }
+
+ let mini_graph =
+ &MiniGraph::new(tcx, self.undo_log.region_constraints(), &self.storage.data.verifys);
+
+ let mut leak_check = LeakCheck::new(
+ tcx,
+ universe_at_start_of_snapshot,
+ max_universe,
+ overly_polymorphic,
+ mini_graph,
+ self,
+ );
+ leak_check.assign_placeholder_values()?;
+ leak_check.propagate_scc_value()?;
+ Ok(())
+ }
+}
+
+struct LeakCheck<'me, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ universe_at_start_of_snapshot: ty::UniverseIndex,
+ /// Only used when reporting region errors.
+ overly_polymorphic: bool,
+ mini_graph: &'me MiniGraph<'tcx>,
+ rcc: &'me RegionConstraintCollector<'me, 'tcx>,
+
+ // Initially, for each SCC S, stores a placeholder `P` such that `S = P`
+ // must hold.
+ //
+ // Later, during the [`LeakCheck::propagate_scc_value`] function, this array
+ // is repurposed to store some placeholder `P` such that the weaker
+ // condition `S: P` must hold. (This is true if `S: S1` transitively and `S1
+ // = P`.)
+ scc_placeholders: IndexVec<LeakCheckScc, Option<ty::PlaceholderRegion>>,
+
+ // For each SCC S, track the minimum universe that flows into it. Note that
+ // this is both the minimum of the universes for every region that is a
+ // member of the SCC, but also if you have `R1: R2`, then the universe of
+ // `R2` must be less than the universe of `R1` (i.e., `R1` flows `R2`). To
+ // see that, imagine that you have `P1: R` -- in that case, `R` must be
+ // either the placeholder `P1` or the empty region in that same universe.
+ //
+ // To detect errors, we look for an SCC S where the values in
+ // `scc_values[S]` (if any) cannot be stored into `scc_universes[S]`.
+ scc_universes: IndexVec<LeakCheckScc, SccUniverse<'tcx>>,
+}
+
+impl<'me, 'tcx> LeakCheck<'me, 'tcx> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ universe_at_start_of_snapshot: ty::UniverseIndex,
+ max_universe: ty::UniverseIndex,
+ overly_polymorphic: bool,
+ mini_graph: &'me MiniGraph<'tcx>,
+ rcc: &'me RegionConstraintCollector<'me, 'tcx>,
+ ) -> Self {
+ let dummy_scc_universe = SccUniverse { universe: max_universe, region: None };
+ Self {
+ tcx,
+ universe_at_start_of_snapshot,
+ overly_polymorphic,
+ mini_graph,
+ rcc,
+ scc_placeholders: IndexVec::from_elem_n(None, mini_graph.sccs.num_sccs()),
+ scc_universes: IndexVec::from_elem_n(dummy_scc_universe, mini_graph.sccs.num_sccs()),
+ }
+ }
+
+ /// Compute what placeholders (if any) each SCC must be equal to.
+ /// Also compute the minimum universe of all the regions in each SCC.
+ fn assign_placeholder_values(&mut self) -> RelateResult<'tcx, ()> {
+ // First walk: find each placeholder that is from a newly created universe.
+ for (region, leak_check_node) in &self.mini_graph.nodes {
+ let scc = self.mini_graph.sccs.scc(*leak_check_node);
+
+ // Set the universe of each SCC to be the minimum of its constituent universes
+ let universe = self.rcc.universe(*region);
+ debug!(
+ "assign_placeholder_values: scc={:?} universe={:?} region={:?}",
+ scc, universe, region
+ );
+ self.scc_universes[scc].take_min(universe, *region);
+
+ // Detect those SCCs that directly contain a placeholder
+ if let ty::RePlaceholder(placeholder) = **region {
+ if self.universe_at_start_of_snapshot.cannot_name(placeholder.universe) {
+ self.assign_scc_value(scc, placeholder)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ // assign_scc_value(S, P): Update `scc_values` to account for the fact that `P: S` must hold.
+ // This may create an error.
+ fn assign_scc_value(
+ &mut self,
+ scc: LeakCheckScc,
+ placeholder: ty::PlaceholderRegion,
+ ) -> RelateResult<'tcx, ()> {
+ match self.scc_placeholders[scc] {
+ Some(p) => {
+ assert_ne!(p, placeholder);
+ return Err(self.placeholder_error(p, placeholder));
+ }
+ None => {
+ self.scc_placeholders[scc] = Some(placeholder);
+ }
+ };
+
+ Ok(())
+ }
+
+ /// For each SCC S, iterate over each successor S1 where `S: S1`:
+ ///
+ /// * Compute
+ /// Iterate over each SCC `S` and ensure that, for each `S1` where `S1: S`,
+ /// `universe(S) <= universe(S1)`. This executes after
+ /// `assign_placeholder_values`, so `universe(S)` is already the minimum
+ /// universe of any of its direct constituents.
+ fn propagate_scc_value(&mut self) -> RelateResult<'tcx, ()> {
+ // Loop invariants:
+ //
+ // On start of the loop iteration for `scc1`:
+ //
+ // * `scc_universes[scc1]` contains the minimum universe of the
+ // constituents of `scc1`
+ // * `scc_placeholder[scc1]` stores the placeholder that `scc1` must
+ // be equal to (if any)
+ //
+ // For each successor `scc2` where `scc1: scc2`:
+ //
+ // * `scc_placeholder[scc2]` stores some placeholder `P` where
+ // `scc2: P` (if any)
+ // * `scc_universes[scc2]` contains the minimum universe of the
+ // constituents of `scc2` and any of its successors
+ for scc1 in self.mini_graph.sccs.all_sccs() {
+ debug!(
+ "propagate_scc_value: scc={:?} with universe {:?}",
+ scc1, self.scc_universes[scc1]
+ );
+
+ // Walk over each `scc2` such that `scc1: scc2` and compute:
+ //
+ // * `scc1_universe`: the minimum universe of `scc2` and the constituents of `scc1`
+ // * `succ_bound`: placeholder `P` that the successors must outlive, if any (if there are multiple,
+ // we pick one arbitrarily)
+ let mut scc1_universe = self.scc_universes[scc1];
+ let mut succ_bound = None;
+ for &scc2 in self.mini_graph.sccs.successors(scc1) {
+ let SccUniverse { universe: scc2_universe, region: scc2_region } =
+ self.scc_universes[scc2];
+
+ scc1_universe.take_min(scc2_universe, scc2_region.unwrap());
+
+ if let Some(b) = self.scc_placeholders[scc2] {
+ succ_bound = Some(b);
+ }
+ }
+
+ // Update minimum universe of scc1.
+ self.scc_universes[scc1] = scc1_universe;
+
+ // At this point, `scc_placeholders[scc1]` stores the placeholder that
+ // `scc1` must be equal to, if any.
+ if let Some(scc1_placeholder) = self.scc_placeholders[scc1] {
+ debug!(
+ "propagate_scc_value: scc1={:?} placeholder={:?} scc1_universe={:?}",
+ scc1, scc1_placeholder, scc1_universe
+ );
+
+ // Check if `P1: R` for some `R` in a universe that cannot name
+ // P1. That's an error.
+ if scc1_universe.universe.cannot_name(scc1_placeholder.universe) {
+ return Err(self.error(scc1_placeholder, scc1_universe.region.unwrap()));
+ }
+
+ // Check if we have some placeholder where `S: P2`
+ // (transitively). In that case, since `S = P1`, that implies
+ // `P1: P2`, which is an error condition.
+ if let Some(scc2_placeholder) = succ_bound {
+ assert_ne!(scc1_placeholder, scc2_placeholder);
+ return Err(self.placeholder_error(scc1_placeholder, scc2_placeholder));
+ }
+ } else {
+ // Otherwise, we can reach a placeholder if some successor can.
+ self.scc_placeholders[scc1] = succ_bound;
+ }
+
+ // At this point, `scc_placeholder[scc1]` stores some placeholder that `scc1` must outlive (if any).
+ }
+ Ok(())
+ }
+
+ fn placeholder_error(
+ &self,
+ placeholder1: ty::PlaceholderRegion,
+ placeholder2: ty::PlaceholderRegion,
+ ) -> TypeError<'tcx> {
+ self.error(placeholder1, self.tcx.mk_region(ty::RePlaceholder(placeholder2)))
+ }
+
+ fn error(
+ &self,
+ placeholder: ty::PlaceholderRegion,
+ other_region: ty::Region<'tcx>,
+ ) -> TypeError<'tcx> {
+ debug!("error: placeholder={:?}, other_region={:?}", placeholder, other_region);
+ if self.overly_polymorphic {
+ TypeError::RegionsOverlyPolymorphic(placeholder.name, other_region)
+ } else {
+ TypeError::RegionsInsufficientlyPolymorphic(placeholder.name, other_region)
+ }
+ }
+}
+
+// States we need to distinguish:
+//
+// * must be equal to a placeholder (i.e., a placeholder is in the SCC)
+// * it could conflict with some other regions in the SCC in different universes
+// * or a different placeholder
+// * `P1: S` and `S` must be equal to a placeholder
+// * `P1: S` and `S` is in an incompatible universe
+//
+// So if we
+//
+// (a) compute which placeholder (if any) each SCC must be equal to
+// (b) compute its minimum universe
+// (c) compute *some* placeholder where `S: P1` (any one will do)
+//
+// then we get an error if:
+//
+// - it must be equal to a placeholder `P1` and minimum universe cannot name `P1`
+// - `S: P1` and minimum universe cannot name `P1`
+// - `S: P1` and we must be equal to `P2`
+//
+// So we want to track:
+//
+// * Equal placeholder (if any)
+// * Some bounding placeholder (if any)
+// * Minimum universe
+//
+// * We compute equal placeholder + minimum universe of constituents in first pass
+// * Then we walk in order and compute from our dependencies `S1` where `S: S1` (`S -> S1`)
+// * bounding placeholder (if any)
+// * minimum universe
+// * And if we must be equal to a placeholder then we check it against
+// * minimum universe
+// * no bounding placeholder
+
+/// Tracks the "minimum universe" for each SCC, along with some region that
+/// caused it to change.
+#[derive(Copy, Clone, Debug)]
+struct SccUniverse<'tcx> {
+ /// For some SCC S, the minimum universe of:
+ ///
+ /// * each region R in S
+ /// * each SCC S1 such that S: S1
+ universe: ty::UniverseIndex,
+
+ /// Some region that caused `universe` to be what it is.
+ region: Option<ty::Region<'tcx>>,
+}
+
+impl<'tcx> SccUniverse<'tcx> {
+ /// If `universe` is less than our current universe, then update
+ /// `self.universe` and `self.region`.
+ fn take_min(&mut self, universe: ty::UniverseIndex, region: ty::Region<'tcx>) {
+ if universe < self.universe || self.region.is_none() {
+ self.universe = universe;
+ self.region = Some(region);
+ }
+ }
+}
+
+rustc_index::newtype_index! {
+ struct LeakCheckNode {
+ DEBUG_FORMAT = "LeakCheckNode({})"
+ }
+}
+
+rustc_index::newtype_index! {
+ struct LeakCheckScc {
+ DEBUG_FORMAT = "LeakCheckScc({})"
+ }
+}
+
+/// Represents the graph of constraints. For each `R1: R2` constraint we create
+/// an edge `R1 -> R2` in the graph.
+struct MiniGraph<'tcx> {
+ /// Map from a region to the index of the node in the graph.
+ nodes: FxHashMap<ty::Region<'tcx>, LeakCheckNode>,
+
+ /// Map from node index to SCC, and stores the successors of each SCC. All
+ /// the regions in the same SCC are equal to one another, and if `S1 -> S2`,
+ /// then `S1: S2`.
+ sccs: Sccs<LeakCheckNode, LeakCheckScc>,
+}
+
+impl<'tcx> MiniGraph<'tcx> {
+ fn new<'a>(
+ tcx: TyCtxt<'tcx>,
+ undo_log: impl Iterator<Item = &'a UndoLog<'tcx>>,
+ verifys: &[Verify<'tcx>],
+ ) -> Self
+ where
+ 'tcx: 'a,
+ {
+ let mut nodes = FxHashMap::default();
+ let mut edges = Vec::new();
+
+ // Note that if `R2: R1`, we get a callback `r1, r2`, so `target` is first parameter.
+ Self::iterate_undo_log(tcx, undo_log, verifys, |target, source| {
+ let source_node = Self::add_node(&mut nodes, source);
+ let target_node = Self::add_node(&mut nodes, target);
+ edges.push((source_node, target_node));
+ });
+ let graph = VecGraph::new(nodes.len(), edges);
+ let sccs = Sccs::new(&graph);
+ Self { nodes, sccs }
+ }
+
+ /// Invokes `each_edge(R1, R2)` for each edge where `R2: R1`
+ fn iterate_undo_log<'a>(
+ tcx: TyCtxt<'tcx>,
+ undo_log: impl Iterator<Item = &'a UndoLog<'tcx>>,
+ verifys: &[Verify<'tcx>],
+ mut each_edge: impl FnMut(ty::Region<'tcx>, ty::Region<'tcx>),
+ ) where
+ 'tcx: 'a,
+ {
+ for undo_entry in undo_log {
+ match undo_entry {
+ &AddConstraint(Constraint::VarSubVar(a, b)) => {
+ each_edge(tcx.mk_region(ReVar(a)), tcx.mk_region(ReVar(b)));
+ }
+ &AddConstraint(Constraint::RegSubVar(a, b)) => {
+ each_edge(a, tcx.mk_region(ReVar(b)));
+ }
+ &AddConstraint(Constraint::VarSubReg(a, b)) => {
+ each_edge(tcx.mk_region(ReVar(a)), b);
+ }
+ &AddConstraint(Constraint::RegSubReg(a, b)) => {
+ each_edge(a, b);
+ }
+ &AddGiven(a, b) => {
+ each_edge(a, tcx.mk_region(ReVar(b)));
+ }
+ &AddVerify(i) => span_bug!(
+ verifys[i].origin.span(),
+ "we never add verifications while doing higher-ranked things",
+ ),
+ &AddCombination(..) | &AddVar(..) => {}
+ }
+ }
+ }
+
+ fn add_node(
+ nodes: &mut FxHashMap<ty::Region<'tcx>, LeakCheckNode>,
+ r: ty::Region<'tcx>,
+ ) -> LeakCheckNode {
+ let l = nodes.len();
+ *nodes.entry(r).or_insert(LeakCheckNode::new(l))
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/region_constraints/mod.rs b/compiler/rustc_infer/src/infer/region_constraints/mod.rs
new file mode 100644
index 000000000..0d4472a1c
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/region_constraints/mod.rs
@@ -0,0 +1,821 @@
+//! See `README.md`.
+
+use self::CombineMapType::*;
+use self::UndoLog::*;
+
+use super::{
+ InferCtxtUndoLogs, MiscVariable, RegionVariableOrigin, Rollback, Snapshot, SubregionOrigin,
+};
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::intern::Interned;
+use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::undo_log::UndoLogs;
+use rustc_data_structures::unify as ut;
+use rustc_index::vec::IndexVec;
+use rustc_middle::infer::unify_key::{RegionVidKey, UnifiedRegion};
+use rustc_middle::ty::ReStatic;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::ty::{ReLateBound, ReVar};
+use rustc_middle::ty::{Region, RegionVid};
+use rustc_span::Span;
+
+use std::collections::BTreeMap;
+use std::ops::Range;
+use std::{cmp, fmt, mem};
+
+mod leak_check;
+
+pub use rustc_middle::infer::MemberConstraint;
+
+#[derive(Clone, Default)]
+pub struct RegionConstraintStorage<'tcx> {
+ /// For each `RegionVid`, the corresponding `RegionVariableOrigin`.
+ var_infos: IndexVec<RegionVid, RegionVariableInfo>,
+
+ data: RegionConstraintData<'tcx>,
+
+ /// For a given pair of regions (R1, R2), maps to a region R3 that
+ /// is designated as their LUB (edges R1 <= R3 and R2 <= R3
+ /// exist). This prevents us from making many such regions.
+ lubs: CombineMap<'tcx>,
+
+ /// For a given pair of regions (R1, R2), maps to a region R3 that
+ /// is designated as their GLB (edges R3 <= R1 and R3 <= R2
+ /// exist). This prevents us from making many such regions.
+ glbs: CombineMap<'tcx>,
+
+ /// When we add a R1 == R2 constraint, we currently add (a) edges
+ /// R1 <= R2 and R2 <= R1 and (b) we unify the two regions in this
+ /// table. You can then call `opportunistic_resolve_var` early
+ /// which will map R1 and R2 to some common region (i.e., either
+ /// R1 or R2). This is important when fulfillment, dropck and other such
+ /// code is iterating to a fixed point, because otherwise we sometimes
+ /// would wind up with a fresh stream of region variables that have been
+ /// equated but appear distinct.
+ pub(super) unification_table: ut::UnificationTableStorage<RegionVidKey<'tcx>>,
+
+ /// a flag set to true when we perform any unifications; this is used
+ /// to micro-optimize `take_and_reset_data`
+ any_unifications: bool,
+}
+
+pub struct RegionConstraintCollector<'a, 'tcx> {
+ storage: &'a mut RegionConstraintStorage<'tcx>,
+ undo_log: &'a mut InferCtxtUndoLogs<'tcx>,
+}
+
+impl<'tcx> std::ops::Deref for RegionConstraintCollector<'_, 'tcx> {
+ type Target = RegionConstraintStorage<'tcx>;
+ #[inline]
+ fn deref(&self) -> &RegionConstraintStorage<'tcx> {
+ self.storage
+ }
+}
+
+impl<'tcx> std::ops::DerefMut for RegionConstraintCollector<'_, 'tcx> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut RegionConstraintStorage<'tcx> {
+ self.storage
+ }
+}
+
+pub type VarInfos = IndexVec<RegionVid, RegionVariableInfo>;
+
+/// The full set of region constraints gathered up by the collector.
+/// Describes constraints between the region variables and other
+/// regions, as well as other conditions that must be verified, or
+/// assumptions that can be made.
+#[derive(Debug, Default, Clone)]
+pub struct RegionConstraintData<'tcx> {
+ /// Constraints of the form `A <= B`, where either `A` or `B` can
+ /// be a region variable (or neither, as it happens).
+ pub constraints: BTreeMap<Constraint<'tcx>, SubregionOrigin<'tcx>>,
+
+ /// Constraints of the form `R0 member of [R1, ..., Rn]`, meaning that
+ /// `R0` must be equal to one of the regions `R1..Rn`. These occur
+ /// with `impl Trait` quite frequently.
+ pub member_constraints: Vec<MemberConstraint<'tcx>>,
+
+ /// A "verify" is something that we need to verify after inference
+ /// is done, but which does not directly affect inference in any
+ /// way.
+ ///
+ /// An example is a `A <= B` where neither `A` nor `B` are
+ /// inference variables.
+ pub verifys: Vec<Verify<'tcx>>,
+
+ /// A "given" is a relationship that is known to hold. In
+ /// particular, we often know from closure fn signatures that a
+ /// particular free region must be a subregion of a region
+ /// variable:
+ ///
+ /// foo.iter().filter(<'a> |x: &'a &'b T| ...)
+ ///
+ /// In situations like this, `'b` is in fact a region variable
+ /// introduced by the call to `iter()`, and `'a` is a bound region
+ /// on the closure (as indicated by the `<'a>` prefix). If we are
+ /// naive, we wind up inferring that `'b` must be `'static`,
+ /// because we require that it be greater than `'a` and we do not
+ /// know what `'a` is precisely.
+ ///
+ /// This hashmap is used to avoid that naive scenario. Basically
+ /// we record the fact that `'a <= 'b` is implied by the fn
+ /// signature, and then ignore the constraint when solving
+ /// equations. This is a bit of a hack but seems to work.
+ pub givens: FxHashSet<(Region<'tcx>, ty::RegionVid)>,
+}
+
+/// Represents a constraint that influences the inference process.
+#[derive(Clone, Copy, PartialEq, Eq, Debug, PartialOrd, Ord)]
+pub enum Constraint<'tcx> {
+ /// A region variable is a subregion of another.
+ VarSubVar(RegionVid, RegionVid),
+
+ /// A concrete region is a subregion of region variable.
+ RegSubVar(Region<'tcx>, RegionVid),
+
+ /// A region variable is a subregion of a concrete region. This does not
+ /// directly affect inference, but instead is checked after
+ /// inference is complete.
+ VarSubReg(RegionVid, Region<'tcx>),
+
+ /// A constraint where neither side is a variable. This does not
+ /// directly affect inference, but instead is checked after
+ /// inference is complete.
+ RegSubReg(Region<'tcx>, Region<'tcx>),
+}
+
+impl Constraint<'_> {
+ pub fn involves_placeholders(&self) -> bool {
+ match self {
+ Constraint::VarSubVar(_, _) => false,
+ Constraint::VarSubReg(_, r) | Constraint::RegSubVar(r, _) => r.is_placeholder(),
+ Constraint::RegSubReg(r, s) => r.is_placeholder() || s.is_placeholder(),
+ }
+ }
+}
+
+#[derive(Debug, Clone)]
+pub struct Verify<'tcx> {
+ pub kind: GenericKind<'tcx>,
+ pub origin: SubregionOrigin<'tcx>,
+ pub region: Region<'tcx>,
+ pub bound: VerifyBound<'tcx>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)]
+pub enum GenericKind<'tcx> {
+ Param(ty::ParamTy),
+ Projection(ty::ProjectionTy<'tcx>),
+}
+
+/// Describes the things that some `GenericKind` value `G` is known to
+/// outlive. Each variant of `VerifyBound` can be thought of as a
+/// function:
+/// ```ignore (pseudo-rust)
+/// fn(min: Region) -> bool { .. }
+/// ```
+/// where `true` means that the region `min` meets that `G: min`.
+/// (False means nothing.)
+///
+/// So, for example, if we have the type `T` and we have in scope that
+/// `T: 'a` and `T: 'b`, then the verify bound might be:
+/// ```ignore (pseudo-rust)
+/// fn(min: Region) -> bool {
+/// ('a: min) || ('b: min)
+/// }
+/// ```
+/// This is described with an `AnyRegion('a, 'b)` node.
+#[derive(Debug, Clone)]
+pub enum VerifyBound<'tcx> {
+ /// See [`VerifyIfEq`] docs
+ IfEq(ty::Binder<'tcx, VerifyIfEq<'tcx>>),
+
+ /// Given a region `R`, expands to the function:
+ ///
+ /// ```ignore (pseudo-rust)
+ /// fn(min) -> bool {
+ /// R: min
+ /// }
+ /// ```
+ ///
+ /// This is used when we can establish that `G: R` -- therefore,
+ /// if `R: min`, then by transitivity `G: min`.
+ OutlivedBy(Region<'tcx>),
+
+ /// Given a region `R`, true if it is `'empty`.
+ IsEmpty,
+
+ /// Given a set of bounds `B`, expands to the function:
+ ///
+ /// ```ignore (pseudo-rust)
+ /// fn(min) -> bool {
+ /// exists (b in B) { b(min) }
+ /// }
+ /// ```
+ ///
+ /// In other words, if we meet some bound in `B`, that suffices.
+ /// This is used when all the bounds in `B` are known to apply to `G`.
+ AnyBound(Vec<VerifyBound<'tcx>>),
+
+ /// Given a set of bounds `B`, expands to the function:
+ ///
+ /// ```ignore (pseudo-rust)
+ /// fn(min) -> bool {
+ /// forall (b in B) { b(min) }
+ /// }
+ /// ```
+ ///
+ /// In other words, if we meet *all* bounds in `B`, that suffices.
+ /// This is used when *some* bound in `B` is known to suffice, but
+ /// we don't know which.
+ AllBounds(Vec<VerifyBound<'tcx>>),
+}
+
+/// This is a "conditional bound" that checks the result of inference
+/// and supplies a bound if it ended up being relevant. It's used in situations
+/// like this:
+///
+/// ```rust
+/// fn foo<'a, 'b, T: SomeTrait<'a>>
+/// where
+/// <T as SomeTrait<'a>>::Item: 'b
+/// ```
+///
+/// If we have an obligation like `<T as SomeTrait<'?x>>::Item: 'c`, then
+/// we don't know yet whether it suffices to show that `'b: 'c`. If `'?x` winds
+/// up being equal to `'a`, then the where-clauses on function applies, and
+/// in that case we can show `'b: 'c`. But if `'?x` winds up being something
+/// else, the bound isn't relevant.
+///
+/// In the [`VerifyBound`], this struct is enclosed in `Binder to account
+/// for cases like
+///
+/// ```rust
+/// where for<'a> <T as SomeTrait<'a>::Item: 'a
+/// ```
+///
+/// The idea is that we have to find some instantiation of `'a` that can
+/// make `<T as SomeTrait<'a>>::Item` equal to the final value of `G`,
+/// the generic we are checking.
+///
+/// ```ignore (pseudo-rust)
+/// fn(min) -> bool {
+/// exists<'a> {
+/// if G == K {
+/// B(min)
+/// } else {
+/// false
+/// }
+/// }
+/// }
+/// ```
+#[derive(Debug, Copy, Clone, TypeFoldable, TypeVisitable)]
+pub struct VerifyIfEq<'tcx> {
+ /// Type which must match the generic `G`
+ pub ty: Ty<'tcx>,
+
+ /// Bound that applies if `ty` is equal.
+ pub bound: Region<'tcx>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub(crate) struct TwoRegions<'tcx> {
+ a: Region<'tcx>,
+ b: Region<'tcx>,
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub(crate) enum UndoLog<'tcx> {
+ /// We added `RegionVid`.
+ AddVar(RegionVid),
+
+ /// We added the given `constraint`.
+ AddConstraint(Constraint<'tcx>),
+
+ /// We added the given `verify`.
+ AddVerify(usize),
+
+ /// We added the given `given`.
+ AddGiven(Region<'tcx>, ty::RegionVid),
+
+ /// We added a GLB/LUB "combination variable".
+ AddCombination(CombineMapType, TwoRegions<'tcx>),
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub(crate) enum CombineMapType {
+ Lub,
+ Glb,
+}
+
+type CombineMap<'tcx> = FxHashMap<TwoRegions<'tcx>, RegionVid>;
+
+#[derive(Debug, Clone, Copy)]
+pub struct RegionVariableInfo {
+ pub origin: RegionVariableOrigin,
+ pub universe: ty::UniverseIndex,
+}
+
+pub struct RegionSnapshot {
+ any_unifications: bool,
+}
+
+impl<'tcx> RegionConstraintStorage<'tcx> {
+ pub fn new() -> Self {
+ Self::default()
+ }
+
+ #[inline]
+ pub(crate) fn with_log<'a>(
+ &'a mut self,
+ undo_log: &'a mut InferCtxtUndoLogs<'tcx>,
+ ) -> RegionConstraintCollector<'a, 'tcx> {
+ RegionConstraintCollector { storage: self, undo_log }
+ }
+
+ fn rollback_undo_entry(&mut self, undo_entry: UndoLog<'tcx>) {
+ match undo_entry {
+ AddVar(vid) => {
+ self.var_infos.pop().unwrap();
+ assert_eq!(self.var_infos.len(), vid.index() as usize);
+ }
+ AddConstraint(ref constraint) => {
+ self.data.constraints.remove(constraint);
+ }
+ AddVerify(index) => {
+ self.data.verifys.pop();
+ assert_eq!(self.data.verifys.len(), index);
+ }
+ AddGiven(sub, sup) => {
+ self.data.givens.remove(&(sub, sup));
+ }
+ AddCombination(Glb, ref regions) => {
+ self.glbs.remove(regions);
+ }
+ AddCombination(Lub, ref regions) => {
+ self.lubs.remove(regions);
+ }
+ }
+ }
+}
+
+impl<'tcx> RegionConstraintCollector<'_, 'tcx> {
+ pub fn num_region_vars(&self) -> usize {
+ self.var_infos.len()
+ }
+
+ pub fn region_constraint_data(&self) -> &RegionConstraintData<'tcx> {
+ &self.data
+ }
+
+ /// Once all the constraints have been gathered, extract out the final data.
+ ///
+ /// Not legal during a snapshot.
+ pub fn into_infos_and_data(self) -> (VarInfos, RegionConstraintData<'tcx>) {
+ assert!(!UndoLogs::<super::UndoLog<'_>>::in_snapshot(&self.undo_log));
+ (mem::take(&mut self.storage.var_infos), mem::take(&mut self.storage.data))
+ }
+
+ /// Takes (and clears) the current set of constraints. Note that
+ /// the set of variables remains intact, but all relationships
+ /// between them are reset. This is used during NLL checking to
+ /// grab the set of constraints that arose from a particular
+ /// operation.
+ ///
+ /// We don't want to leak relationships between variables between
+ /// points because just because (say) `r1 == r2` was true at some
+ /// point P in the graph doesn't imply that it will be true at
+ /// some other point Q, in NLL.
+ ///
+ /// Not legal during a snapshot.
+ pub fn take_and_reset_data(&mut self) -> RegionConstraintData<'tcx> {
+ assert!(!UndoLogs::<super::UndoLog<'_>>::in_snapshot(&self.undo_log));
+
+ // If you add a new field to `RegionConstraintCollector`, you
+ // should think carefully about whether it needs to be cleared
+ // or updated in some way.
+ let RegionConstraintStorage {
+ var_infos: _,
+ data,
+ lubs,
+ glbs,
+ unification_table: _,
+ any_unifications,
+ } = self.storage;
+
+ // Clear the tables of (lubs, glbs), so that we will create
+ // fresh regions if we do a LUB operation. As it happens,
+ // LUB/GLB are not performed by the MIR type-checker, which is
+ // the one that uses this method, but it's good to be correct.
+ lubs.clear();
+ glbs.clear();
+
+ let data = mem::take(data);
+
+ // Clear all unifications and recreate the variables a "now
+ // un-unified" state. Note that when we unify `a` and `b`, we
+ // also insert `a <= b` and a `b <= a` edges, so the
+ // `RegionConstraintData` contains the relationship here.
+ if *any_unifications {
+ *any_unifications = false;
+ self.unification_table().reset_unifications(|_| UnifiedRegion(None));
+ }
+
+ data
+ }
+
+ pub fn data(&self) -> &RegionConstraintData<'tcx> {
+ &self.data
+ }
+
+ pub fn start_snapshot(&mut self) -> RegionSnapshot {
+ debug!("RegionConstraintCollector: start_snapshot");
+ RegionSnapshot { any_unifications: self.any_unifications }
+ }
+
+ pub fn rollback_to(&mut self, snapshot: RegionSnapshot) {
+ debug!("RegionConstraintCollector: rollback_to({:?})", snapshot);
+ self.any_unifications = snapshot.any_unifications;
+ }
+
+ pub fn new_region_var(
+ &mut self,
+ universe: ty::UniverseIndex,
+ origin: RegionVariableOrigin,
+ ) -> RegionVid {
+ let vid = self.var_infos.push(RegionVariableInfo { origin, universe });
+
+ let u_vid = self.unification_table().new_key(UnifiedRegion(None));
+ assert_eq!(vid, u_vid.vid);
+ self.undo_log.push(AddVar(vid));
+ debug!("created new region variable {:?} in {:?} with origin {:?}", vid, universe, origin);
+ vid
+ }
+
+ /// Returns the universe for the given variable.
+ pub fn var_universe(&self, vid: RegionVid) -> ty::UniverseIndex {
+ self.var_infos[vid].universe
+ }
+
+ /// Returns the origin for the given variable.
+ pub fn var_origin(&self, vid: RegionVid) -> RegionVariableOrigin {
+ self.var_infos[vid].origin
+ }
+
+ fn add_constraint(&mut self, constraint: Constraint<'tcx>, origin: SubregionOrigin<'tcx>) {
+ // cannot add constraints once regions are resolved
+ debug!("RegionConstraintCollector: add_constraint({:?})", constraint);
+
+ // never overwrite an existing (constraint, origin) - only insert one if it isn't
+ // present in the map yet. This prevents origins from outside the snapshot being
+ // replaced with "less informative" origins e.g., during calls to `can_eq`
+ let undo_log = &mut self.undo_log;
+ self.storage.data.constraints.entry(constraint).or_insert_with(|| {
+ undo_log.push(AddConstraint(constraint));
+ origin
+ });
+ }
+
+ fn add_verify(&mut self, verify: Verify<'tcx>) {
+ // cannot add verifys once regions are resolved
+ debug!("RegionConstraintCollector: add_verify({:?})", verify);
+
+ // skip no-op cases known to be satisfied
+ if let VerifyBound::AllBounds(ref bs) = verify.bound && bs.is_empty() {
+ return;
+ }
+
+ let index = self.data.verifys.len();
+ self.data.verifys.push(verify);
+ self.undo_log.push(AddVerify(index));
+ }
+
+ pub fn add_given(&mut self, sub: Region<'tcx>, sup: ty::RegionVid) {
+ // cannot add givens once regions are resolved
+ if self.data.givens.insert((sub, sup)) {
+ debug!("add_given({:?} <= {:?})", sub, sup);
+
+ self.undo_log.push(AddGiven(sub, sup));
+ }
+ }
+
+ pub fn make_eqregion(
+ &mut self,
+ origin: SubregionOrigin<'tcx>,
+ sub: Region<'tcx>,
+ sup: Region<'tcx>,
+ ) {
+ if sub != sup {
+ // Eventually, it would be nice to add direct support for
+ // equating regions.
+ self.make_subregion(origin.clone(), sub, sup);
+ self.make_subregion(origin, sup, sub);
+
+ match (sub, sup) {
+ (Region(Interned(ReVar(sub), _)), Region(Interned(ReVar(sup), _))) => {
+ debug!("make_eqregion: unifying {:?} with {:?}", sub, sup);
+ self.unification_table().union(*sub, *sup);
+ self.any_unifications = true;
+ }
+ (Region(Interned(ReVar(vid), _)), value)
+ | (value, Region(Interned(ReVar(vid), _))) => {
+ debug!("make_eqregion: unifying {:?} with {:?}", vid, value);
+ self.unification_table().union_value(*vid, UnifiedRegion(Some(value)));
+ self.any_unifications = true;
+ }
+ (_, _) => {}
+ }
+ }
+ }
+
+ pub fn member_constraint(
+ &mut self,
+ key: ty::OpaqueTypeKey<'tcx>,
+ definition_span: Span,
+ hidden_ty: Ty<'tcx>,
+ member_region: ty::Region<'tcx>,
+ choice_regions: &Lrc<Vec<ty::Region<'tcx>>>,
+ ) {
+ debug!("member_constraint({:?} in {:#?})", member_region, choice_regions);
+
+ if choice_regions.iter().any(|&r| r == member_region) {
+ return;
+ }
+
+ self.data.member_constraints.push(MemberConstraint {
+ key,
+ definition_span,
+ hidden_ty,
+ member_region,
+ choice_regions: choice_regions.clone(),
+ });
+ }
+
+ #[instrument(skip(self, origin), level = "debug")]
+ pub fn make_subregion(
+ &mut self,
+ origin: SubregionOrigin<'tcx>,
+ sub: Region<'tcx>,
+ sup: Region<'tcx>,
+ ) {
+ // cannot add constraints once regions are resolved
+ debug!("origin = {:#?}", origin);
+
+ match (*sub, *sup) {
+ (ReLateBound(..), _) | (_, ReLateBound(..)) => {
+ span_bug!(origin.span(), "cannot relate bound region: {:?} <= {:?}", sub, sup);
+ }
+ (_, ReStatic) => {
+ // all regions are subregions of static, so we can ignore this
+ }
+ (ReVar(sub_id), ReVar(sup_id)) => {
+ self.add_constraint(Constraint::VarSubVar(sub_id, sup_id), origin);
+ }
+ (_, ReVar(sup_id)) => {
+ self.add_constraint(Constraint::RegSubVar(sub, sup_id), origin);
+ }
+ (ReVar(sub_id), _) => {
+ self.add_constraint(Constraint::VarSubReg(sub_id, sup), origin);
+ }
+ _ => {
+ self.add_constraint(Constraint::RegSubReg(sub, sup), origin);
+ }
+ }
+ }
+
+ pub fn verify_generic_bound(
+ &mut self,
+ origin: SubregionOrigin<'tcx>,
+ kind: GenericKind<'tcx>,
+ sub: Region<'tcx>,
+ bound: VerifyBound<'tcx>,
+ ) {
+ self.add_verify(Verify { kind, origin, region: sub, bound });
+ }
+
+ pub fn lub_regions(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ origin: SubregionOrigin<'tcx>,
+ a: Region<'tcx>,
+ b: Region<'tcx>,
+ ) -> Region<'tcx> {
+ // cannot add constraints once regions are resolved
+ debug!("RegionConstraintCollector: lub_regions({:?}, {:?})", a, b);
+ if a.is_static() || b.is_static() {
+ a // nothing lives longer than static
+ } else if a == b {
+ a // LUB(a,a) = a
+ } else {
+ self.combine_vars(tcx, Lub, a, b, origin)
+ }
+ }
+
+ pub fn glb_regions(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ origin: SubregionOrigin<'tcx>,
+ a: Region<'tcx>,
+ b: Region<'tcx>,
+ ) -> Region<'tcx> {
+ // cannot add constraints once regions are resolved
+ debug!("RegionConstraintCollector: glb_regions({:?}, {:?})", a, b);
+ if a.is_static() {
+ b // static lives longer than everything else
+ } else if b.is_static() {
+ a // static lives longer than everything else
+ } else if a == b {
+ a // GLB(a,a) = a
+ } else {
+ self.combine_vars(tcx, Glb, a, b, origin)
+ }
+ }
+
+ /// Resolves the passed RegionVid to the root RegionVid in the unification table
+ pub fn opportunistic_resolve_var(&mut self, rid: ty::RegionVid) -> ty::RegionVid {
+ self.unification_table().find(rid).vid
+ }
+
+ /// If the Region is a `ReVar`, then resolves it either to the root value in
+ /// the unification table, if it exists, or to the root `ReVar` in the table.
+ /// If the Region is not a `ReVar`, just returns the Region itself.
+ pub fn opportunistic_resolve_region(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ region: ty::Region<'tcx>,
+ ) -> ty::Region<'tcx> {
+ match *region {
+ ty::ReVar(rid) => {
+ let unified_region = self.unification_table().probe_value(rid);
+ unified_region.0.unwrap_or_else(|| {
+ let root = self.unification_table().find(rid).vid;
+ tcx.reuse_or_mk_region(region, ty::ReVar(root))
+ })
+ }
+ _ => region,
+ }
+ }
+
+ fn combine_map(&mut self, t: CombineMapType) -> &mut CombineMap<'tcx> {
+ match t {
+ Glb => &mut self.glbs,
+ Lub => &mut self.lubs,
+ }
+ }
+
+ fn combine_vars(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ t: CombineMapType,
+ a: Region<'tcx>,
+ b: Region<'tcx>,
+ origin: SubregionOrigin<'tcx>,
+ ) -> Region<'tcx> {
+ let vars = TwoRegions { a, b };
+ if let Some(&c) = self.combine_map(t).get(&vars) {
+ return tcx.mk_region(ReVar(c));
+ }
+ let a_universe = self.universe(a);
+ let b_universe = self.universe(b);
+ let c_universe = cmp::max(a_universe, b_universe);
+ let c = self.new_region_var(c_universe, MiscVariable(origin.span()));
+ self.combine_map(t).insert(vars, c);
+ self.undo_log.push(AddCombination(t, vars));
+ let new_r = tcx.mk_region(ReVar(c));
+ for old_r in [a, b] {
+ match t {
+ Glb => self.make_subregion(origin.clone(), new_r, old_r),
+ Lub => self.make_subregion(origin.clone(), old_r, new_r),
+ }
+ }
+ debug!("combine_vars() c={:?}", c);
+ new_r
+ }
+
+ pub fn universe(&self, region: Region<'tcx>) -> ty::UniverseIndex {
+ match *region {
+ ty::ReStatic | ty::ReErased | ty::ReFree(..) | ty::ReEarlyBound(..) => {
+ ty::UniverseIndex::ROOT
+ }
+ ty::ReEmpty(ui) => ui,
+ ty::RePlaceholder(placeholder) => placeholder.universe,
+ ty::ReVar(vid) => self.var_universe(vid),
+ ty::ReLateBound(..) => bug!("universe(): encountered bound region {:?}", region),
+ }
+ }
+
+ pub fn vars_since_snapshot(
+ &self,
+ value_count: usize,
+ ) -> (Range<RegionVid>, Vec<RegionVariableOrigin>) {
+ let range = RegionVid::from(value_count)..RegionVid::from(self.unification_table.len());
+ (
+ range.clone(),
+ (range.start.index()..range.end.index())
+ .map(|index| self.var_infos[ty::RegionVid::from(index)].origin)
+ .collect(),
+ )
+ }
+
+ /// See `InferCtxt::region_constraints_added_in_snapshot`.
+ pub fn region_constraints_added_in_snapshot(&self, mark: &Snapshot<'tcx>) -> Option<bool> {
+ self.undo_log
+ .region_constraints_in_snapshot(mark)
+ .map(|&elt| match elt {
+ AddConstraint(constraint) => Some(constraint.involves_placeholders()),
+ _ => None,
+ })
+ .max()
+ .unwrap_or(None)
+ }
+
+ #[inline]
+ fn unification_table(&mut self) -> super::UnificationTable<'_, 'tcx, RegionVidKey<'tcx>> {
+ ut::UnificationTable::with_log(&mut self.storage.unification_table, self.undo_log)
+ }
+}
+
+impl fmt::Debug for RegionSnapshot {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "RegionSnapshot")
+ }
+}
+
+impl<'tcx> fmt::Debug for GenericKind<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ GenericKind::Param(ref p) => write!(f, "{:?}", p),
+ GenericKind::Projection(ref p) => write!(f, "{:?}", p),
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for GenericKind<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ GenericKind::Param(ref p) => write!(f, "{}", p),
+ GenericKind::Projection(ref p) => write!(f, "{}", p),
+ }
+ }
+}
+
+impl<'tcx> GenericKind<'tcx> {
+ pub fn to_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ GenericKind::Param(ref p) => p.to_ty(tcx),
+ GenericKind::Projection(ref p) => tcx.mk_projection(p.item_def_id, p.substs),
+ }
+ }
+}
+
+impl<'tcx> VerifyBound<'tcx> {
+ pub fn must_hold(&self) -> bool {
+ match self {
+ VerifyBound::IfEq(..) => false,
+ VerifyBound::OutlivedBy(re) => re.is_static(),
+ VerifyBound::IsEmpty => false,
+ VerifyBound::AnyBound(bs) => bs.iter().any(|b| b.must_hold()),
+ VerifyBound::AllBounds(bs) => bs.iter().all(|b| b.must_hold()),
+ }
+ }
+
+ pub fn cannot_hold(&self) -> bool {
+ match self {
+ VerifyBound::IfEq(..) => false,
+ VerifyBound::IsEmpty => false,
+ VerifyBound::OutlivedBy(_) => false,
+ VerifyBound::AnyBound(bs) => bs.iter().all(|b| b.cannot_hold()),
+ VerifyBound::AllBounds(bs) => bs.iter().any(|b| b.cannot_hold()),
+ }
+ }
+
+ pub fn or(self, vb: VerifyBound<'tcx>) -> VerifyBound<'tcx> {
+ if self.must_hold() || vb.cannot_hold() {
+ self
+ } else if self.cannot_hold() || vb.must_hold() {
+ vb
+ } else {
+ VerifyBound::AnyBound(vec![self, vb])
+ }
+ }
+}
+
+impl<'tcx> RegionConstraintData<'tcx> {
+ /// Returns `true` if this region constraint data contains no constraints, and `false`
+ /// otherwise.
+ pub fn is_empty(&self) -> bool {
+ let RegionConstraintData { constraints, member_constraints, verifys, givens } = self;
+ constraints.is_empty()
+ && member_constraints.is_empty()
+ && verifys.is_empty()
+ && givens.is_empty()
+ }
+}
+
+impl<'tcx> Rollback<UndoLog<'tcx>> for RegionConstraintStorage<'tcx> {
+ fn reverse(&mut self, undo: UndoLog<'tcx>) {
+ self.rollback_undo_entry(undo)
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/resolve.rs b/compiler/rustc_infer/src/infer/resolve.rs
new file mode 100644
index 000000000..3d99f0958
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/resolve.rs
@@ -0,0 +1,237 @@
+use super::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use super::{FixupError, FixupResult, InferCtxt, Span};
+use rustc_middle::mir;
+use rustc_middle::ty::fold::{FallibleTypeFolder, TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitor};
+use rustc_middle::ty::{self, Const, InferConst, Ty, TyCtxt, TypeFoldable, TypeVisitable};
+
+use std::ops::ControlFlow;
+
+///////////////////////////////////////////////////////////////////////////
+// OPPORTUNISTIC VAR RESOLVER
+
+/// The opportunistic resolver can be used at any time. It simply replaces
+/// type/const variables that have been unified with the things they have
+/// been unified with (similar to `shallow_resolve`, but deep). This is
+/// useful for printing messages etc but also required at various
+/// points for correctness.
+pub struct OpportunisticVarResolver<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> OpportunisticVarResolver<'a, 'tcx> {
+ #[inline]
+ pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+ OpportunisticVarResolver { infcx }
+ }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for OpportunisticVarResolver<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ if !t.has_infer_types_or_consts() {
+ t // micro-optimize -- if there is nothing in this type that this fold affects...
+ } else {
+ let t = self.infcx.shallow_resolve(t);
+ t.super_fold_with(self)
+ }
+ }
+
+ fn fold_const(&mut self, ct: Const<'tcx>) -> Const<'tcx> {
+ if !ct.has_infer_types_or_consts() {
+ ct // micro-optimize -- if there is nothing in this const that this fold affects...
+ } else {
+ let ct = self.infcx.shallow_resolve(ct);
+ ct.super_fold_with(self)
+ }
+ }
+
+ fn fold_mir_const(&mut self, constant: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ constant.super_fold_with(self)
+ }
+}
+
+/// The opportunistic region resolver opportunistically resolves regions
+/// variables to the variable with the least variable id. It is used when
+/// normalizing projections to avoid hitting the recursion limit by creating
+/// many versions of a predicate for types that in the end have to unify.
+///
+/// If you want to resolve type and const variables as well, call
+/// [InferCtxt::resolve_vars_if_possible] first.
+pub struct OpportunisticRegionResolver<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> OpportunisticRegionResolver<'a, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+ OpportunisticRegionResolver { infcx }
+ }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for OpportunisticRegionResolver<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ if !t.has_infer_regions() {
+ t // micro-optimize -- if there is nothing in this type that this fold affects...
+ } else {
+ t.super_fold_with(self)
+ }
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReVar(rid) => {
+ let resolved = self
+ .infcx
+ .inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .opportunistic_resolve_var(rid);
+ TypeFolder::tcx(self).reuse_or_mk_region(r, ty::ReVar(resolved))
+ }
+ _ => r,
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if !ct.has_infer_regions() {
+ ct // micro-optimize -- if there is nothing in this const that this fold affects...
+ } else {
+ ct.super_fold_with(self)
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// UNRESOLVED TYPE FINDER
+
+/// The unresolved type **finder** walks a type searching for
+/// type variables that don't yet have a value. The first unresolved type is stored.
+/// It does not construct the fully resolved type (which might
+/// involve some hashing and so forth).
+pub struct UnresolvedTypeFinder<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> UnresolvedTypeFinder<'a, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+ UnresolvedTypeFinder { infcx }
+ }
+}
+
+impl<'a, 'tcx> TypeVisitor<'tcx> for UnresolvedTypeFinder<'a, 'tcx> {
+ type BreakTy = (Ty<'tcx>, Option<Span>);
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let t = self.infcx.shallow_resolve(t);
+ if t.has_infer_types() {
+ if let ty::Infer(infer_ty) = *t.kind() {
+ // Since we called `shallow_resolve` above, this must
+ // be an (as yet...) unresolved inference variable.
+ let ty_var_span = if let ty::TyVar(ty_vid) = infer_ty {
+ let mut inner = self.infcx.inner.borrow_mut();
+ let ty_vars = &inner.type_variables();
+ if let TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeParameterDefinition(_, _),
+ span,
+ } = *ty_vars.var_origin(ty_vid)
+ {
+ Some(span)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+ ControlFlow::Break((t, ty_var_span))
+ } else {
+ // Otherwise, visit its contents.
+ t.super_visit_with(self)
+ }
+ } else {
+ // All type variables in inference types must already be resolved,
+ // - no need to visit the contents, continue visiting.
+ ControlFlow::CONTINUE
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// FULL TYPE RESOLUTION
+
+/// Full type resolution replaces all type and region variables with
+/// their concrete results. If any variable cannot be replaced (never unified, etc)
+/// then an `Err` result is returned.
+pub fn fully_resolve<'a, 'tcx, T>(infcx: &InferCtxt<'a, 'tcx>, value: T) -> FixupResult<'tcx, T>
+where
+ T: TypeFoldable<'tcx>,
+{
+ value.try_fold_with(&mut FullTypeResolver { infcx })
+}
+
+// N.B. This type is not public because the protocol around checking the
+// `err` field is not enforceable otherwise.
+struct FullTypeResolver<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> FallibleTypeFolder<'tcx> for FullTypeResolver<'a, 'tcx> {
+ type Error = FixupError<'tcx>;
+
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn try_fold_ty(&mut self, t: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
+ if !t.needs_infer() {
+ Ok(t) // micro-optimize -- if there is nothing in this type that this fold affects...
+ } else {
+ let t = self.infcx.shallow_resolve(t);
+ match *t.kind() {
+ ty::Infer(ty::TyVar(vid)) => Err(FixupError::UnresolvedTy(vid)),
+ ty::Infer(ty::IntVar(vid)) => Err(FixupError::UnresolvedIntTy(vid)),
+ ty::Infer(ty::FloatVar(vid)) => Err(FixupError::UnresolvedFloatTy(vid)),
+ ty::Infer(_) => {
+ bug!("Unexpected type in full type resolver: {:?}", t);
+ }
+ _ => t.try_super_fold_with(self),
+ }
+ }
+ }
+
+ fn try_fold_region(&mut self, r: ty::Region<'tcx>) -> Result<ty::Region<'tcx>, Self::Error> {
+ match *r {
+ ty::ReVar(_) => Ok(self
+ .infcx
+ .lexical_region_resolutions
+ .borrow()
+ .as_ref()
+ .expect("region resolution not performed")
+ .resolve_region(self.infcx.tcx, r)),
+ _ => Ok(r),
+ }
+ }
+
+ fn try_fold_const(&mut self, c: ty::Const<'tcx>) -> Result<ty::Const<'tcx>, Self::Error> {
+ if !c.needs_infer() {
+ Ok(c) // micro-optimize -- if there is nothing in this const that this fold affects...
+ } else {
+ let c = self.infcx.shallow_resolve(c);
+ match c.kind() {
+ ty::ConstKind::Infer(InferConst::Var(vid)) => {
+ return Err(FixupError::UnresolvedConst(vid));
+ }
+ ty::ConstKind::Infer(InferConst::Fresh(_)) => {
+ bug!("Unexpected const in full const resolver: {:?}", c);
+ }
+ _ => {}
+ }
+ c.try_super_fold_with(self)
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/sub.rs b/compiler/rustc_infer/src/infer/sub.rs
new file mode 100644
index 000000000..b27571275
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/sub.rs
@@ -0,0 +1,210 @@
+use super::combine::{CombineFields, RelationDir};
+use super::SubregionOrigin;
+
+use crate::infer::combine::ConstEquateRelation;
+use crate::infer::{TypeVariableOrigin, TypeVariableOriginKind};
+use crate::traits::Obligation;
+use rustc_middle::ty::relate::{Cause, Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::TyVar;
+use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt};
+use std::mem;
+
+/// Ensures `a` is made a subtype of `b`. Returns `a` on success.
+pub struct Sub<'combine, 'infcx, 'tcx> {
+ fields: &'combine mut CombineFields<'infcx, 'tcx>,
+ a_is_expected: bool,
+}
+
+impl<'combine, 'infcx, 'tcx> Sub<'combine, 'infcx, 'tcx> {
+ pub fn new(
+ f: &'combine mut CombineFields<'infcx, 'tcx>,
+ a_is_expected: bool,
+ ) -> Sub<'combine, 'infcx, 'tcx> {
+ Sub { fields: f, a_is_expected }
+ }
+
+ fn with_expected_switched<R, F: FnOnce(&mut Self) -> R>(&mut self, f: F) -> R {
+ self.a_is_expected = !self.a_is_expected;
+ let result = f(self);
+ self.a_is_expected = !self.a_is_expected;
+ result
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for Sub<'_, '_, 'tcx> {
+ fn tag(&self) -> &'static str {
+ "Sub"
+ }
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.fields.infcx.tcx
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.fields.param_env
+ }
+
+ fn a_is_expected(&self) -> bool {
+ self.a_is_expected
+ }
+
+ fn with_cause<F, R>(&mut self, cause: Cause, f: F) -> R
+ where
+ F: FnOnce(&mut Self) -> R,
+ {
+ debug!("sub with_cause={:?}", cause);
+ let old_cause = mem::replace(&mut self.fields.cause, Some(cause));
+ let r = f(self);
+ debug!("sub old_cause={:?}", old_cause);
+ self.fields.cause = old_cause;
+ r
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ variance: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ match variance {
+ ty::Invariant => self.fields.equate(self.a_is_expected).relate(a, b),
+ ty::Covariant => self.relate(a, b),
+ ty::Bivariant => Ok(a),
+ ty::Contravariant => self.with_expected_switched(|this| this.relate(b, a)),
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ if a == b {
+ return Ok(a);
+ }
+
+ let infcx = self.fields.infcx;
+ let a = infcx.inner.borrow_mut().type_variables().replace_if_possible(a);
+ let b = infcx.inner.borrow_mut().type_variables().replace_if_possible(b);
+
+ match (a.kind(), b.kind()) {
+ (&ty::Infer(TyVar(_)), &ty::Infer(TyVar(_))) => {
+ // Shouldn't have any LBR here, so we can safely put
+ // this under a binder below without fear of accidental
+ // capture.
+ assert!(!a.has_escaping_bound_vars());
+ assert!(!b.has_escaping_bound_vars());
+
+ // can't make progress on `A <: B` if both A and B are
+ // type variables, so record an obligation.
+ self.fields.obligations.push(Obligation::new(
+ self.fields.trace.cause.clone(),
+ self.fields.param_env,
+ ty::Binder::dummy(ty::PredicateKind::Subtype(ty::SubtypePredicate {
+ a_is_expected: self.a_is_expected,
+ a,
+ b,
+ }))
+ .to_predicate(self.tcx()),
+ ));
+
+ Ok(a)
+ }
+ (&ty::Infer(TyVar(a_id)), _) => {
+ self.fields.instantiate(b, RelationDir::SupertypeOf, a_id, !self.a_is_expected)?;
+ Ok(a)
+ }
+ (_, &ty::Infer(TyVar(b_id))) => {
+ self.fields.instantiate(a, RelationDir::SubtypeOf, b_id, self.a_is_expected)?;
+ Ok(a)
+ }
+
+ (&ty::Error(_), _) | (_, &ty::Error(_)) => {
+ infcx.set_tainted_by_errors();
+ Ok(self.tcx().ty_error())
+ }
+
+ (&ty::Opaque(a_def_id, _), &ty::Opaque(b_def_id, _)) if a_def_id == b_def_id => {
+ self.fields.infcx.super_combine_tys(self, a, b)?;
+ Ok(a)
+ }
+ (&ty::Opaque(did, ..), _) | (_, &ty::Opaque(did, ..))
+ if self.fields.define_opaque_types && did.is_local() =>
+ {
+ let mut generalize = |ty, ty_is_expected| {
+ let var = infcx.next_ty_var_id_in_universe(
+ TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.fields.trace.cause.span,
+ },
+ ty::UniverseIndex::ROOT,
+ );
+ self.fields.instantiate(ty, RelationDir::SubtypeOf, var, ty_is_expected)?;
+ Ok(infcx.tcx.mk_ty_var(var))
+ };
+ let (a, b) = if self.a_is_expected { (a, b) } else { (b, a) };
+ let (a, b) = match (a.kind(), b.kind()) {
+ (&ty::Opaque(..), _) => (a, generalize(b, true)?),
+ (_, &ty::Opaque(..)) => (generalize(a, false)?, b),
+ _ => unreachable!(),
+ };
+ self.fields.obligations.extend(
+ infcx
+ .handle_opaque_type(a, b, true, &self.fields.trace.cause, self.param_env())?
+ .obligations,
+ );
+ Ok(a)
+ }
+
+ _ => {
+ self.fields.infcx.super_combine_tys(self, a, b)?;
+ Ok(a)
+ }
+ }
+ }
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!("{}.regions({:?}, {:?}) self.cause={:?}", self.tag(), a, b, self.fields.cause);
+
+ // FIXME -- we have more fine-grained information available
+ // from the "cause" field, we could perhaps give more tailored
+ // error messages.
+ let origin = SubregionOrigin::Subtype(Box::new(self.fields.trace.clone()));
+ self.fields
+ .infcx
+ .inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .make_subregion(origin, a, b);
+
+ Ok(a)
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ self.fields.infcx.super_combine_consts(self, a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ self.fields.higher_ranked_sub(a, b, self.a_is_expected)?;
+ Ok(a)
+ }
+}
+
+impl<'tcx> ConstEquateRelation<'tcx> for Sub<'_, '_, 'tcx> {
+ fn const_equate_obligation(&mut self, a: ty::Const<'tcx>, b: ty::Const<'tcx>) {
+ self.fields.add_const_equate_obligation(self.a_is_expected, a, b);
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/type_variable.rs b/compiler/rustc_infer/src/infer/type_variable.rs
new file mode 100644
index 000000000..a0e2965b6
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/type_variable.rs
@@ -0,0 +1,460 @@
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::{self, Ty, TyVid};
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+
+use crate::infer::InferCtxtUndoLogs;
+
+use rustc_data_structures::snapshot_vec as sv;
+use rustc_data_structures::unify as ut;
+use std::cmp;
+use std::marker::PhantomData;
+use std::ops::Range;
+
+use rustc_data_structures::undo_log::{Rollback, UndoLogs};
+
+/// Represents a single undo-able action that affects a type inference variable.
+#[derive(Clone)]
+pub(crate) enum UndoLog<'tcx> {
+ EqRelation(sv::UndoLog<ut::Delegate<TyVidEqKey<'tcx>>>),
+ SubRelation(sv::UndoLog<ut::Delegate<ty::TyVid>>),
+ Values(sv::UndoLog<Delegate>),
+}
+
+/// Convert from a specific kind of undo to the more general UndoLog
+impl<'tcx> From<sv::UndoLog<ut::Delegate<TyVidEqKey<'tcx>>>> for UndoLog<'tcx> {
+ fn from(l: sv::UndoLog<ut::Delegate<TyVidEqKey<'tcx>>>) -> Self {
+ UndoLog::EqRelation(l)
+ }
+}
+
+/// Convert from a specific kind of undo to the more general UndoLog
+impl<'tcx> From<sv::UndoLog<ut::Delegate<ty::TyVid>>> for UndoLog<'tcx> {
+ fn from(l: sv::UndoLog<ut::Delegate<ty::TyVid>>) -> Self {
+ UndoLog::SubRelation(l)
+ }
+}
+
+/// Convert from a specific kind of undo to the more general UndoLog
+impl<'tcx> From<sv::UndoLog<Delegate>> for UndoLog<'tcx> {
+ fn from(l: sv::UndoLog<Delegate>) -> Self {
+ UndoLog::Values(l)
+ }
+}
+
+/// Convert from a specific kind of undo to the more general UndoLog
+impl<'tcx> From<Instantiate> for UndoLog<'tcx> {
+ fn from(l: Instantiate) -> Self {
+ UndoLog::Values(sv::UndoLog::Other(l))
+ }
+}
+
+impl<'tcx> Rollback<UndoLog<'tcx>> for TypeVariableStorage<'tcx> {
+ fn reverse(&mut self, undo: UndoLog<'tcx>) {
+ match undo {
+ UndoLog::EqRelation(undo) => self.eq_relations.reverse(undo),
+ UndoLog::SubRelation(undo) => self.sub_relations.reverse(undo),
+ UndoLog::Values(undo) => self.values.reverse(undo),
+ }
+ }
+}
+
+#[derive(Clone)]
+pub struct TypeVariableStorage<'tcx> {
+ values: sv::SnapshotVecStorage<Delegate>,
+
+ /// Two variables are unified in `eq_relations` when we have a
+ /// constraint `?X == ?Y`. This table also stores, for each key,
+ /// the known value.
+ eq_relations: ut::UnificationTableStorage<TyVidEqKey<'tcx>>,
+
+ /// Two variables are unified in `sub_relations` when we have a
+ /// constraint `?X <: ?Y` *or* a constraint `?Y <: ?X`. This second
+ /// table exists only to help with the occurs check. In particular,
+ /// we want to report constraints like these as an occurs check
+ /// violation:
+ /// ``` text
+ /// ?1 <: ?3
+ /// Box<?3> <: ?1
+ /// ```
+ /// Without this second table, what would happen in a case like
+ /// this is that we would instantiate `?1` with a generalized
+ /// type like `Box<?6>`. We would then relate `Box<?3> <: Box<?6>`
+ /// and infer that `?3 <: ?6`. Next, since `?1` was instantiated,
+ /// we would process `?1 <: ?3`, generalize `?1 = Box<?6>` to `Box<?9>`,
+ /// and instantiate `?3` with `Box<?9>`. Finally, we would relate
+ /// `?6 <: ?9`. But now that we instantiated `?3`, we can process
+ /// `?3 <: ?6`, which gives us `Box<?9> <: ?6`... and the cycle
+ /// continues. (This is `occurs-check-2.rs`.)
+ ///
+ /// What prevents this cycle is that when we generalize
+ /// `Box<?3>` to `Box<?6>`, we also sub-unify `?3` and `?6`
+ /// (in the generalizer). When we then process `Box<?6> <: ?3`,
+ /// the occurs check then fails because `?6` and `?3` are sub-unified,
+ /// and hence generalization fails.
+ ///
+ /// This is reasonable because, in Rust, subtypes have the same
+ /// "skeleton" and hence there is no possible type such that
+ /// (e.g.) `Box<?3> <: ?3` for any `?3`.
+ ///
+ /// In practice, we sometimes sub-unify variables in other spots, such
+ /// as when processing subtype predicates. This is not necessary but is
+ /// done to aid diagnostics, as it allows us to be more effective when
+ /// we guide the user towards where they should insert type hints.
+ sub_relations: ut::UnificationTableStorage<ty::TyVid>,
+}
+
+pub struct TypeVariableTable<'a, 'tcx> {
+ storage: &'a mut TypeVariableStorage<'tcx>,
+
+ undo_log: &'a mut InferCtxtUndoLogs<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct TypeVariableOrigin {
+ pub kind: TypeVariableOriginKind,
+ pub span: Span,
+}
+
+/// Reasons to create a type inference variable
+#[derive(Copy, Clone, Debug)]
+pub enum TypeVariableOriginKind {
+ MiscVariable,
+ NormalizeProjectionType,
+ TypeInference,
+ TypeParameterDefinition(Symbol, Option<DefId>),
+
+ /// One of the upvars or closure kind parameters in a `ClosureSubsts`
+ /// (before it has been determined).
+ // FIXME(eddyb) distinguish upvar inference variables from the rest.
+ ClosureSynthetic,
+ SubstitutionPlaceholder,
+ AutoDeref,
+ AdjustmentType,
+
+ /// In type check, when we are type checking a function that
+ /// returns `-> dyn Foo`, we substitute a type variable for the
+ /// return type for diagnostic purposes.
+ DynReturnFn,
+ LatticeVariable,
+}
+
+#[derive(Clone)]
+pub(crate) struct TypeVariableData {
+ origin: TypeVariableOrigin,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum TypeVariableValue<'tcx> {
+ Known { value: Ty<'tcx> },
+ Unknown { universe: ty::UniverseIndex },
+}
+
+impl<'tcx> TypeVariableValue<'tcx> {
+ /// If this value is known, returns the type it is known to be.
+ /// Otherwise, `None`.
+ pub fn known(&self) -> Option<Ty<'tcx>> {
+ match *self {
+ TypeVariableValue::Unknown { .. } => None,
+ TypeVariableValue::Known { value } => Some(value),
+ }
+ }
+
+ pub fn is_unknown(&self) -> bool {
+ match *self {
+ TypeVariableValue::Unknown { .. } => true,
+ TypeVariableValue::Known { .. } => false,
+ }
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct Instantiate;
+
+pub(crate) struct Delegate;
+
+impl<'tcx> TypeVariableStorage<'tcx> {
+ pub fn new() -> TypeVariableStorage<'tcx> {
+ TypeVariableStorage {
+ values: sv::SnapshotVecStorage::new(),
+ eq_relations: ut::UnificationTableStorage::new(),
+ sub_relations: ut::UnificationTableStorage::new(),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn with_log<'a>(
+ &'a mut self,
+ undo_log: &'a mut InferCtxtUndoLogs<'tcx>,
+ ) -> TypeVariableTable<'a, 'tcx> {
+ TypeVariableTable { storage: self, undo_log }
+ }
+}
+
+impl<'tcx> TypeVariableTable<'_, 'tcx> {
+ /// Returns the origin that was given when `vid` was created.
+ ///
+ /// Note that this function does not return care whether
+ /// `vid` has been unified with something else or not.
+ pub fn var_origin(&self, vid: ty::TyVid) -> &TypeVariableOrigin {
+ &self.storage.values.get(vid.as_usize()).origin
+ }
+
+ /// Records that `a == b`, depending on `dir`.
+ ///
+ /// Precondition: neither `a` nor `b` are known.
+ pub fn equate(&mut self, a: ty::TyVid, b: ty::TyVid) {
+ debug_assert!(self.probe(a).is_unknown());
+ debug_assert!(self.probe(b).is_unknown());
+ self.eq_relations().union(a, b);
+ self.sub_relations().union(a, b);
+ }
+
+ /// Records that `a <: b`, depending on `dir`.
+ ///
+ /// Precondition: neither `a` nor `b` are known.
+ pub fn sub(&mut self, a: ty::TyVid, b: ty::TyVid) {
+ debug_assert!(self.probe(a).is_unknown());
+ debug_assert!(self.probe(b).is_unknown());
+ self.sub_relations().union(a, b);
+ }
+
+ /// Instantiates `vid` with the type `ty`.
+ ///
+ /// Precondition: `vid` must not have been previously instantiated.
+ pub fn instantiate(&mut self, vid: ty::TyVid, ty: Ty<'tcx>) {
+ let vid = self.root_var(vid);
+ debug_assert!(self.probe(vid).is_unknown());
+ debug_assert!(
+ self.eq_relations().probe_value(vid).is_unknown(),
+ "instantiating type variable `{:?}` twice: new-value = {:?}, old-value={:?}",
+ vid,
+ ty,
+ self.eq_relations().probe_value(vid)
+ );
+ self.eq_relations().union_value(vid, TypeVariableValue::Known { value: ty });
+
+ // Hack: we only need this so that `types_escaping_snapshot`
+ // can see what has been unified; see the Delegate impl for
+ // more details.
+ self.undo_log.push(Instantiate);
+ }
+
+ /// Creates a new type variable.
+ ///
+ /// - `diverging`: indicates if this is a "diverging" type
+ /// variable, e.g., one created as the type of a `return`
+ /// expression. The code in this module doesn't care if a
+ /// variable is diverging, but the main Rust type-checker will
+ /// sometimes "unify" such variables with the `!` or `()` types.
+ /// - `origin`: indicates *why* the type variable was created.
+ /// The code in this module doesn't care, but it can be useful
+ /// for improving error messages.
+ pub fn new_var(
+ &mut self,
+ universe: ty::UniverseIndex,
+ origin: TypeVariableOrigin,
+ ) -> ty::TyVid {
+ let eq_key = self.eq_relations().new_key(TypeVariableValue::Unknown { universe });
+
+ let sub_key = self.sub_relations().new_key(());
+ assert_eq!(eq_key.vid, sub_key);
+
+ let index = self.values().push(TypeVariableData { origin });
+ assert_eq!(eq_key.vid.as_u32(), index as u32);
+
+ debug!("new_var(index={:?}, universe={:?}, origin={:?})", eq_key.vid, universe, origin);
+
+ eq_key.vid
+ }
+
+ /// Returns the number of type variables created thus far.
+ pub fn num_vars(&self) -> usize {
+ self.storage.values.len()
+ }
+
+ /// Returns the "root" variable of `vid` in the `eq_relations`
+ /// equivalence table. All type variables that have been equated
+ /// will yield the same root variable (per the union-find
+ /// algorithm), so `root_var(a) == root_var(b)` implies that `a ==
+ /// b` (transitively).
+ pub fn root_var(&mut self, vid: ty::TyVid) -> ty::TyVid {
+ self.eq_relations().find(vid).vid
+ }
+
+ /// Returns the "root" variable of `vid` in the `sub_relations`
+ /// equivalence table. All type variables that have been are
+ /// related via equality or subtyping will yield the same root
+ /// variable (per the union-find algorithm), so `sub_root_var(a)
+ /// == sub_root_var(b)` implies that:
+ /// ```text
+ /// exists X. (a <: X || X <: a) && (b <: X || X <: b)
+ /// ```
+ pub fn sub_root_var(&mut self, vid: ty::TyVid) -> ty::TyVid {
+ self.sub_relations().find(vid)
+ }
+
+ /// Returns `true` if `a` and `b` have same "sub-root" (i.e., exists some
+ /// type X such that `forall i in {a, b}. (i <: X || X <: i)`.
+ pub fn sub_unified(&mut self, a: ty::TyVid, b: ty::TyVid) -> bool {
+ self.sub_root_var(a) == self.sub_root_var(b)
+ }
+
+ /// Retrieves the type to which `vid` has been instantiated, if
+ /// any.
+ pub fn probe(&mut self, vid: ty::TyVid) -> TypeVariableValue<'tcx> {
+ self.inlined_probe(vid)
+ }
+
+ /// An always-inlined variant of `probe`, for very hot call sites.
+ #[inline(always)]
+ pub fn inlined_probe(&mut self, vid: ty::TyVid) -> TypeVariableValue<'tcx> {
+ self.eq_relations().inlined_probe_value(vid)
+ }
+
+ /// If `t` is a type-inference variable, and it has been
+ /// instantiated, then return the with which it was
+ /// instantiated. Otherwise, returns `t`.
+ pub fn replace_if_possible(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ ty::Infer(ty::TyVar(v)) => match self.probe(v) {
+ TypeVariableValue::Unknown { .. } => t,
+ TypeVariableValue::Known { value } => value,
+ },
+ _ => t,
+ }
+ }
+
+ #[inline]
+ fn values(
+ &mut self,
+ ) -> sv::SnapshotVec<Delegate, &mut Vec<TypeVariableData>, &mut InferCtxtUndoLogs<'tcx>> {
+ self.storage.values.with_log(self.undo_log)
+ }
+
+ #[inline]
+ fn eq_relations(&mut self) -> super::UnificationTable<'_, 'tcx, TyVidEqKey<'tcx>> {
+ self.storage.eq_relations.with_log(self.undo_log)
+ }
+
+ #[inline]
+ fn sub_relations(&mut self) -> super::UnificationTable<'_, 'tcx, ty::TyVid> {
+ self.storage.sub_relations.with_log(self.undo_log)
+ }
+
+ /// Returns a range of the type variables created during the snapshot.
+ pub fn vars_since_snapshot(
+ &mut self,
+ value_count: usize,
+ ) -> (Range<TyVid>, Vec<TypeVariableOrigin>) {
+ let range = TyVid::from_usize(value_count)..TyVid::from_usize(self.num_vars());
+ (
+ range.start..range.end,
+ (range.start.as_usize()..range.end.as_usize())
+ .map(|index| self.storage.values.get(index).origin)
+ .collect(),
+ )
+ }
+
+ /// Returns indices of all variables that are not yet
+ /// instantiated.
+ pub fn unsolved_variables(&mut self) -> Vec<ty::TyVid> {
+ (0..self.storage.values.len())
+ .filter_map(|i| {
+ let vid = ty::TyVid::from_usize(i);
+ match self.probe(vid) {
+ TypeVariableValue::Unknown { .. } => Some(vid),
+ TypeVariableValue::Known { .. } => None,
+ }
+ })
+ .collect()
+ }
+}
+
+impl sv::SnapshotVecDelegate for Delegate {
+ type Value = TypeVariableData;
+ type Undo = Instantiate;
+
+ fn reverse(_values: &mut Vec<TypeVariableData>, _action: Instantiate) {
+ // We don't actually have to *do* anything to reverse an
+ // instantiation; the value for a variable is stored in the
+ // `eq_relations` and hence its rollback code will handle
+ // it. In fact, we could *almost* just remove the
+ // `SnapshotVec` entirely, except that we would have to
+ // reproduce *some* of its logic, since we want to know which
+ // type variables have been instantiated since the snapshot
+ // was started, so we can implement `types_escaping_snapshot`.
+ //
+ // (If we extended the `UnificationTable` to let us see which
+ // values have been unified and so forth, that might also
+ // suffice.)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+/// These structs (a newtyped TyVid) are used as the unification key
+/// for the `eq_relations`; they carry a `TypeVariableValue` along
+/// with them.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub(crate) struct TyVidEqKey<'tcx> {
+ vid: ty::TyVid,
+
+ // in the table, we map each ty-vid to one of these:
+ phantom: PhantomData<TypeVariableValue<'tcx>>,
+}
+
+impl<'tcx> From<ty::TyVid> for TyVidEqKey<'tcx> {
+ #[inline] // make this function eligible for inlining - it is quite hot.
+ fn from(vid: ty::TyVid) -> Self {
+ TyVidEqKey { vid, phantom: PhantomData }
+ }
+}
+
+impl<'tcx> ut::UnifyKey for TyVidEqKey<'tcx> {
+ type Value = TypeVariableValue<'tcx>;
+ #[inline(always)]
+ fn index(&self) -> u32 {
+ self.vid.as_u32()
+ }
+ #[inline]
+ fn from_index(i: u32) -> Self {
+ TyVidEqKey::from(ty::TyVid::from_u32(i))
+ }
+ fn tag() -> &'static str {
+ "TyVidEqKey"
+ }
+}
+
+impl<'tcx> ut::UnifyValue for TypeVariableValue<'tcx> {
+ type Error = ut::NoError;
+
+ fn unify_values(value1: &Self, value2: &Self) -> Result<Self, ut::NoError> {
+ match (value1, value2) {
+ // We never equate two type variables, both of which
+ // have known types. Instead, we recursively equate
+ // those types.
+ (&TypeVariableValue::Known { .. }, &TypeVariableValue::Known { .. }) => {
+ bug!("equating two type variables, both of which have known types")
+ }
+
+ // If one side is known, prefer that one.
+ (&TypeVariableValue::Known { .. }, &TypeVariableValue::Unknown { .. }) => Ok(*value1),
+ (&TypeVariableValue::Unknown { .. }, &TypeVariableValue::Known { .. }) => Ok(*value2),
+
+ // If both sides are *unknown*, it hardly matters, does it?
+ (
+ &TypeVariableValue::Unknown { universe: universe1 },
+ &TypeVariableValue::Unknown { universe: universe2 },
+ ) => {
+ // If we unify two unbound variables, ?T and ?U, then whatever
+ // value they wind up taking (which must be the same value) must
+ // be nameable by both universes. Therefore, the resulting
+ // universe is the minimum of the two universes, because that is
+ // the one which contains the fewest names in scope.
+ let universe = cmp::min(universe1, universe2);
+ Ok(TypeVariableValue::Unknown { universe })
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/infer/undo_log.rs b/compiler/rustc_infer/src/infer/undo_log.rs
new file mode 100644
index 000000000..74a26ebc3
--- /dev/null
+++ b/compiler/rustc_infer/src/infer/undo_log.rs
@@ -0,0 +1,220 @@
+use std::marker::PhantomData;
+
+use rustc_data_structures::snapshot_vec as sv;
+use rustc_data_structures::undo_log::{Rollback, UndoLogs};
+use rustc_data_structures::unify as ut;
+use rustc_middle::infer::unify_key::RegionVidKey;
+use rustc_middle::ty::{self, OpaqueHiddenType, OpaqueTypeKey};
+
+use crate::{
+ infer::{region_constraints, type_variable, InferCtxtInner},
+ traits,
+};
+
+pub struct Snapshot<'tcx> {
+ pub(crate) undo_len: usize,
+ _marker: PhantomData<&'tcx ()>,
+}
+
+/// Records the "undo" data for a single operation that affects some form of inference variable.
+#[derive(Clone)]
+pub(crate) enum UndoLog<'tcx> {
+ OpaqueTypes(OpaqueTypeKey<'tcx>, Option<OpaqueHiddenType<'tcx>>),
+ TypeVariables(type_variable::UndoLog<'tcx>),
+ ConstUnificationTable(sv::UndoLog<ut::Delegate<ty::ConstVid<'tcx>>>),
+ IntUnificationTable(sv::UndoLog<ut::Delegate<ty::IntVid>>),
+ FloatUnificationTable(sv::UndoLog<ut::Delegate<ty::FloatVid>>),
+ RegionConstraintCollector(region_constraints::UndoLog<'tcx>),
+ RegionUnificationTable(sv::UndoLog<ut::Delegate<RegionVidKey<'tcx>>>),
+ ProjectionCache(traits::UndoLog<'tcx>),
+ PushRegionObligation,
+}
+
+macro_rules! impl_from {
+ ($($ctor: ident ($ty: ty),)*) => {
+ $(
+ impl<'tcx> From<$ty> for UndoLog<'tcx> {
+ fn from(x: $ty) -> Self {
+ UndoLog::$ctor(x.into())
+ }
+ }
+ )*
+ }
+}
+
+// Upcast from a single kind of "undoable action" to the general enum
+impl_from! {
+ RegionConstraintCollector(region_constraints::UndoLog<'tcx>),
+ TypeVariables(type_variable::UndoLog<'tcx>),
+
+ TypeVariables(sv::UndoLog<ut::Delegate<type_variable::TyVidEqKey<'tcx>>>),
+ TypeVariables(sv::UndoLog<ut::Delegate<ty::TyVid>>),
+ TypeVariables(sv::UndoLog<type_variable::Delegate>),
+ TypeVariables(type_variable::Instantiate),
+
+ IntUnificationTable(sv::UndoLog<ut::Delegate<ty::IntVid>>),
+
+ FloatUnificationTable(sv::UndoLog<ut::Delegate<ty::FloatVid>>),
+
+ ConstUnificationTable(sv::UndoLog<ut::Delegate<ty::ConstVid<'tcx>>>),
+
+ RegionUnificationTable(sv::UndoLog<ut::Delegate<RegionVidKey<'tcx>>>),
+ ProjectionCache(traits::UndoLog<'tcx>),
+}
+
+/// The Rollback trait defines how to rollback a particular action.
+impl<'tcx> Rollback<UndoLog<'tcx>> for InferCtxtInner<'tcx> {
+ fn reverse(&mut self, undo: UndoLog<'tcx>) {
+ match undo {
+ UndoLog::OpaqueTypes(key, idx) => self.opaque_type_storage.remove(key, idx),
+ UndoLog::TypeVariables(undo) => self.type_variable_storage.reverse(undo),
+ UndoLog::ConstUnificationTable(undo) => self.const_unification_storage.reverse(undo),
+ UndoLog::IntUnificationTable(undo) => self.int_unification_storage.reverse(undo),
+ UndoLog::FloatUnificationTable(undo) => self.float_unification_storage.reverse(undo),
+ UndoLog::RegionConstraintCollector(undo) => {
+ self.region_constraint_storage.as_mut().unwrap().reverse(undo)
+ }
+ UndoLog::RegionUnificationTable(undo) => {
+ self.region_constraint_storage.as_mut().unwrap().unification_table.reverse(undo)
+ }
+ UndoLog::ProjectionCache(undo) => self.projection_cache.reverse(undo),
+ UndoLog::PushRegionObligation => {
+ self.region_obligations.pop();
+ }
+ }
+ }
+}
+
+/// The combined undo log for all the various unification tables. For each change to the storage
+/// for any kind of inference variable, we record an UndoLog entry in the vector here.
+#[derive(Clone)]
+pub(crate) struct InferCtxtUndoLogs<'tcx> {
+ logs: Vec<UndoLog<'tcx>>,
+ num_open_snapshots: usize,
+}
+
+impl Default for InferCtxtUndoLogs<'_> {
+ fn default() -> Self {
+ Self { logs: Default::default(), num_open_snapshots: Default::default() }
+ }
+}
+
+/// The UndoLogs trait defines how we undo a particular kind of action (of type T). We can undo any
+/// action that is convertable into an UndoLog (per the From impls above).
+impl<'tcx, T> UndoLogs<T> for InferCtxtUndoLogs<'tcx>
+where
+ UndoLog<'tcx>: From<T>,
+{
+ #[inline]
+ fn num_open_snapshots(&self) -> usize {
+ self.num_open_snapshots
+ }
+
+ #[inline]
+ fn push(&mut self, undo: T) {
+ if self.in_snapshot() {
+ self.logs.push(undo.into())
+ }
+ }
+
+ fn clear(&mut self) {
+ self.logs.clear();
+ self.num_open_snapshots = 0;
+ }
+
+ fn extend<J>(&mut self, undos: J)
+ where
+ Self: Sized,
+ J: IntoIterator<Item = T>,
+ {
+ if self.in_snapshot() {
+ self.logs.extend(undos.into_iter().map(UndoLog::from))
+ }
+ }
+}
+
+impl<'tcx> InferCtxtInner<'tcx> {
+ pub fn rollback_to(&mut self, snapshot: Snapshot<'tcx>) {
+ debug!("rollback_to({})", snapshot.undo_len);
+ self.undo_log.assert_open_snapshot(&snapshot);
+
+ while self.undo_log.logs.len() > snapshot.undo_len {
+ let undo = self.undo_log.logs.pop().unwrap();
+ self.reverse(undo);
+ }
+
+ if self.undo_log.num_open_snapshots == 1 {
+ // The root snapshot. It's safe to clear the undo log because
+ // there's no snapshot further out that we might need to roll back
+ // to.
+ assert!(snapshot.undo_len == 0);
+ self.undo_log.logs.clear();
+ }
+
+ self.undo_log.num_open_snapshots -= 1;
+ }
+
+ pub fn commit(&mut self, snapshot: Snapshot<'tcx>) {
+ debug!("commit({})", snapshot.undo_len);
+
+ if self.undo_log.num_open_snapshots == 1 {
+ // The root snapshot. It's safe to clear the undo log because
+ // there's no snapshot further out that we might need to roll back
+ // to.
+ assert!(snapshot.undo_len == 0);
+ self.undo_log.logs.clear();
+ }
+
+ self.undo_log.num_open_snapshots -= 1;
+ }
+}
+
+impl<'tcx> InferCtxtUndoLogs<'tcx> {
+ pub fn start_snapshot(&mut self) -> Snapshot<'tcx> {
+ self.num_open_snapshots += 1;
+ Snapshot { undo_len: self.logs.len(), _marker: PhantomData }
+ }
+
+ pub(crate) fn region_constraints_in_snapshot(
+ &self,
+ s: &Snapshot<'tcx>,
+ ) -> impl Iterator<Item = &'_ region_constraints::UndoLog<'tcx>> + Clone {
+ self.logs[s.undo_len..].iter().filter_map(|log| match log {
+ UndoLog::RegionConstraintCollector(log) => Some(log),
+ _ => None,
+ })
+ }
+
+ pub(crate) fn opaque_types_in_snapshot(&self, s: &Snapshot<'tcx>) -> bool {
+ self.logs[s.undo_len..].iter().any(|log| matches!(log, UndoLog::OpaqueTypes(..)))
+ }
+
+ pub(crate) fn region_constraints(
+ &self,
+ ) -> impl Iterator<Item = &'_ region_constraints::UndoLog<'tcx>> + Clone {
+ self.logs.iter().filter_map(|log| match log {
+ UndoLog::RegionConstraintCollector(log) => Some(log),
+ _ => None,
+ })
+ }
+
+ fn assert_open_snapshot(&self, snapshot: &Snapshot<'tcx>) {
+ // Failures here may indicate a failure to follow a stack discipline.
+ assert!(self.logs.len() >= snapshot.undo_len);
+ assert!(self.num_open_snapshots > 0);
+ }
+}
+
+impl<'tcx> std::ops::Index<usize> for InferCtxtUndoLogs<'tcx> {
+ type Output = UndoLog<'tcx>;
+
+ fn index(&self, key: usize) -> &Self::Output {
+ &self.logs[key]
+ }
+}
+
+impl<'tcx> std::ops::IndexMut<usize> for InferCtxtUndoLogs<'tcx> {
+ fn index_mut(&mut self, key: usize) -> &mut Self::Output {
+ &mut self.logs[key]
+ }
+}
diff --git a/compiler/rustc_infer/src/lib.rs b/compiler/rustc_infer/src/lib.rs
new file mode 100644
index 000000000..7769a68ba
--- /dev/null
+++ b/compiler/rustc_infer/src/lib.rs
@@ -0,0 +1,39 @@
+//! This crates defines the type inference engine.
+//!
+//! - **Type inference.** The type inference code can be found in the `infer` module;
+//! this code handles low-level equality and subtyping operations. The
+//! type check pass in the compiler is found in the `rustc_typeck` crate.
+//!
+//! For more information about how rustc works, see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![allow(rustc::potential_query_instability)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(extend_one)]
+#![feature(label_break_value)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(try_blocks)]
+#![recursion_limit = "512"] // For rustdoc
+
+#[macro_use]
+extern crate rustc_macros;
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[macro_use]
+extern crate rustc_data_structures;
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+pub mod infer;
+pub mod traits;
diff --git a/compiler/rustc_infer/src/traits/engine.rs b/compiler/rustc_infer/src/traits/engine.rs
new file mode 100644
index 000000000..736278ba0
--- /dev/null
+++ b/compiler/rustc_infer/src/traits/engine.rs
@@ -0,0 +1,76 @@
+use crate::infer::InferCtxt;
+use crate::traits::Obligation;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::{self, ToPredicate, Ty};
+
+use super::FulfillmentError;
+use super::{ObligationCause, PredicateObligation};
+
+pub trait TraitEngine<'tcx>: 'tcx {
+ fn normalize_projection_type(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>,
+ ) -> Ty<'tcx>;
+
+ /// Requires that `ty` must implement the trait with `def_id` in
+ /// the given environment. This trait must not have any type
+ /// parameters (except for `Self`).
+ fn register_bound(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ def_id: DefId,
+ cause: ObligationCause<'tcx>,
+ ) {
+ let trait_ref = ty::TraitRef { def_id, substs: infcx.tcx.mk_substs_trait(ty, &[]) };
+ self.register_predicate_obligation(
+ infcx,
+ Obligation {
+ cause,
+ recursion_depth: 0,
+ param_env,
+ predicate: ty::Binder::dummy(trait_ref).without_const().to_predicate(infcx.tcx),
+ },
+ );
+ }
+
+ fn register_predicate_obligation(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ obligation: PredicateObligation<'tcx>,
+ );
+
+ fn select_all_or_error(&mut self, infcx: &InferCtxt<'_, 'tcx>) -> Vec<FulfillmentError<'tcx>>;
+
+ fn select_where_possible(&mut self, infcx: &InferCtxt<'_, 'tcx>)
+ -> Vec<FulfillmentError<'tcx>>;
+
+ fn pending_obligations(&self) -> Vec<PredicateObligation<'tcx>>;
+
+ fn relationships(&mut self) -> &mut FxHashMap<ty::TyVid, ty::FoundRelationships>;
+}
+
+pub trait TraitEngineExt<'tcx> {
+ fn register_predicate_obligations(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ obligations: impl IntoIterator<Item = PredicateObligation<'tcx>>,
+ );
+}
+
+impl<'tcx, T: ?Sized + TraitEngine<'tcx>> TraitEngineExt<'tcx> for T {
+ fn register_predicate_obligations(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ obligations: impl IntoIterator<Item = PredicateObligation<'tcx>>,
+ ) {
+ for obligation in obligations {
+ self.register_predicate_obligation(infcx, obligation);
+ }
+ }
+}
diff --git a/compiler/rustc_infer/src/traits/error_reporting/mod.rs b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
new file mode 100644
index 000000000..95b6c4ce1
--- /dev/null
+++ b/compiler/rustc_infer/src/traits/error_reporting/mod.rs
@@ -0,0 +1,108 @@
+use super::ObjectSafetyViolation;
+
+use crate::infer::InferCtxt;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{struct_span_err, DiagnosticBuilder, ErrorGuaranteed, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::Span;
+use std::fmt;
+use std::iter;
+
+impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
+ pub fn report_extra_impl_obligation(
+ &self,
+ error_span: Span,
+ impl_item_def_id: LocalDefId,
+ trait_item_def_id: DefId,
+ requirement: &dyn fmt::Display,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ error_span,
+ E0276,
+ "impl has stricter requirements than trait"
+ );
+
+ if let Some(span) = self.tcx.hir().span_if_local(trait_item_def_id) {
+ let item_name = self.tcx.item_name(impl_item_def_id.to_def_id());
+ err.span_label(span, format!("definition of `{}` from trait", item_name));
+ }
+
+ err.span_label(error_span, format!("impl has extra requirement {}", requirement));
+
+ err
+ }
+}
+
+pub fn report_object_safety_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ trait_def_id: DefId,
+ violations: &[ObjectSafetyViolation],
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let trait_str = tcx.def_path_str(trait_def_id);
+ let trait_span = tcx.hir().get_if_local(trait_def_id).and_then(|node| match node {
+ hir::Node::Item(item) => Some(item.ident.span),
+ _ => None,
+ });
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0038,
+ "the trait `{}` cannot be made into an object",
+ trait_str
+ );
+ err.span_label(span, format!("`{}` cannot be made into an object", trait_str));
+
+ let mut reported_violations = FxHashSet::default();
+ let mut multi_span = vec![];
+ let mut messages = vec![];
+ for violation in violations {
+ if let ObjectSafetyViolation::SizedSelf(sp) = &violation && !sp.is_empty() {
+ // Do not report `SizedSelf` without spans pointing at `SizedSelf` obligations
+ // with a `Span`.
+ reported_violations.insert(ObjectSafetyViolation::SizedSelf(vec![].into()));
+ }
+ if reported_violations.insert(violation.clone()) {
+ let spans = violation.spans();
+ let msg = if trait_span.is_none() || spans.is_empty() {
+ format!("the trait cannot be made into an object because {}", violation.error_msg())
+ } else {
+ format!("...because {}", violation.error_msg())
+ };
+ if spans.is_empty() {
+ err.note(&msg);
+ } else {
+ for span in spans {
+ multi_span.push(span);
+ messages.push(msg.clone());
+ }
+ }
+ }
+ }
+ let has_multi_span = !multi_span.is_empty();
+ let mut note_span = MultiSpan::from_spans(multi_span.clone());
+ if let (Some(trait_span), true) = (trait_span, has_multi_span) {
+ note_span.push_span_label(trait_span, "this trait cannot be made into an object...");
+ }
+ for (span, msg) in iter::zip(multi_span, messages) {
+ note_span.push_span_label(span, msg);
+ }
+ err.span_note(
+ note_span,
+ "for a trait to be \"object safe\" it needs to allow building a vtable to allow the call \
+ to be resolvable dynamically; for more information visit \
+ <https://doc.rust-lang.org/reference/items/traits.html#object-safety>",
+ );
+ if trait_span.is_some() {
+ let mut reported_violations: Vec<_> = reported_violations.into_iter().collect();
+ reported_violations.sort();
+ for violation in reported_violations {
+ // Only provide the help if its a local trait, otherwise it's not actionable.
+ violation.solution(&mut err);
+ }
+ }
+ err
+}
diff --git a/compiler/rustc_infer/src/traits/mod.rs b/compiler/rustc_infer/src/traits/mod.rs
new file mode 100644
index 000000000..4df4de21a
--- /dev/null
+++ b/compiler/rustc_infer/src/traits/mod.rs
@@ -0,0 +1,170 @@
+//! Trait Resolution. See the [rustc-dev-guide] for more information on how this works.
+//!
+//! [rustc-dev-guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html
+
+mod engine;
+pub mod error_reporting;
+mod project;
+mod structural_impls;
+pub mod util;
+
+use rustc_hir as hir;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::{self, Const, Ty, TyCtxt};
+use rustc_span::Span;
+
+pub use self::FulfillmentErrorCode::*;
+pub use self::ImplSource::*;
+pub use self::ObligationCauseCode::*;
+pub use self::SelectionError::*;
+
+pub use self::engine::{TraitEngine, TraitEngineExt};
+pub use self::project::MismatchedProjectionTypes;
+pub(crate) use self::project::UndoLog;
+pub use self::project::{
+ Normalized, NormalizedTy, ProjectionCache, ProjectionCacheEntry, ProjectionCacheKey,
+ ProjectionCacheStorage, Reveal,
+};
+pub use rustc_middle::traits::*;
+
+/// An `Obligation` represents some trait reference (e.g., `i32: Eq`) for
+/// which the "impl_source" must be found. The process of finding an "impl_source" is
+/// called "resolving" the `Obligation`. This process consists of
+/// either identifying an `impl` (e.g., `impl Eq for i32`) that
+/// satisfies the obligation, or else finding a bound that is in
+/// scope. The eventual result is usually a `Selection` (defined below).
+#[derive(Clone, PartialEq, Eq, Hash)]
+pub struct Obligation<'tcx, T> {
+ /// The reason we have to prove this thing.
+ pub cause: ObligationCause<'tcx>,
+
+ /// The environment in which we should prove this thing.
+ pub param_env: ty::ParamEnv<'tcx>,
+
+ /// The thing we are trying to prove.
+ pub predicate: T,
+
+ /// If we started proving this as a result of trying to prove
+ /// something else, track the total depth to ensure termination.
+ /// If this goes over a certain threshold, we abort compilation --
+ /// in such cases, we can not say whether or not the predicate
+ /// holds for certain. Stupid halting problem; such a drag.
+ pub recursion_depth: usize,
+}
+
+pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>;
+pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>;
+
+impl<'tcx> PredicateObligation<'tcx> {
+ /// Flips the polarity of the inner predicate.
+ ///
+ /// Given `T: Trait` predicate it returns `T: !Trait` and given `T: !Trait` returns `T: Trait`.
+ pub fn flip_polarity(&self, tcx: TyCtxt<'tcx>) -> Option<PredicateObligation<'tcx>> {
+ Some(PredicateObligation {
+ cause: self.cause.clone(),
+ param_env: self.param_env,
+ predicate: self.predicate.flip_polarity(tcx)?,
+ recursion_depth: self.recursion_depth,
+ })
+ }
+}
+
+impl<'tcx> TraitObligation<'tcx> {
+ /// Returns `true` if the trait predicate is considered `const` in its ParamEnv.
+ pub fn is_const(&self) -> bool {
+ match (self.predicate.skip_binder().constness, self.param_env.constness()) {
+ (ty::BoundConstness::ConstIfConst, hir::Constness::Const) => true,
+ _ => false,
+ }
+ }
+
+ pub fn derived_cause(
+ &self,
+ variant: impl FnOnce(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
+ ) -> ObligationCause<'tcx> {
+ self.cause.clone().derived_cause(self.predicate, variant)
+ }
+}
+
+// `PredicateObligation` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(PredicateObligation<'_>, 48);
+
+pub type PredicateObligations<'tcx> = Vec<PredicateObligation<'tcx>>;
+
+pub type Selection<'tcx> = ImplSource<'tcx, PredicateObligation<'tcx>>;
+
+pub struct FulfillmentError<'tcx> {
+ pub obligation: PredicateObligation<'tcx>,
+ pub code: FulfillmentErrorCode<'tcx>,
+ /// Diagnostics only: the 'root' obligation which resulted in
+ /// the failure to process `obligation`. This is the obligation
+ /// that was initially passed to `register_predicate_obligation`
+ pub root_obligation: PredicateObligation<'tcx>,
+}
+
+#[derive(Clone)]
+pub enum FulfillmentErrorCode<'tcx> {
+ CodeSelectionError(SelectionError<'tcx>),
+ CodeProjectionError(MismatchedProjectionTypes<'tcx>),
+ CodeSubtypeError(ExpectedFound<Ty<'tcx>>, TypeError<'tcx>), // always comes from a SubtypePredicate
+ CodeConstEquateError(ExpectedFound<Const<'tcx>>, TypeError<'tcx>),
+ CodeAmbiguity,
+}
+
+impl<'tcx, O> Obligation<'tcx, O> {
+ pub fn new(
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ predicate: O,
+ ) -> Obligation<'tcx, O> {
+ Obligation { cause, param_env, recursion_depth: 0, predicate }
+ }
+
+ pub fn with_depth(
+ cause: ObligationCause<'tcx>,
+ recursion_depth: usize,
+ param_env: ty::ParamEnv<'tcx>,
+ predicate: O,
+ ) -> Obligation<'tcx, O> {
+ Obligation { cause, param_env, recursion_depth, predicate }
+ }
+
+ pub fn misc(
+ span: Span,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ trait_ref: O,
+ ) -> Obligation<'tcx, O> {
+ Obligation::new(ObligationCause::misc(span, body_id), param_env, trait_ref)
+ }
+
+ pub fn with<P>(&self, value: P) -> Obligation<'tcx, P> {
+ Obligation {
+ cause: self.cause.clone(),
+ param_env: self.param_env,
+ recursion_depth: self.recursion_depth,
+ predicate: value,
+ }
+ }
+}
+
+impl<'tcx> FulfillmentError<'tcx> {
+ pub fn new(
+ obligation: PredicateObligation<'tcx>,
+ code: FulfillmentErrorCode<'tcx>,
+ root_obligation: PredicateObligation<'tcx>,
+ ) -> FulfillmentError<'tcx> {
+ FulfillmentError { obligation, code, root_obligation }
+ }
+}
+
+impl<'tcx> TraitObligation<'tcx> {
+ pub fn polarity(&self) -> ty::ImplPolarity {
+ self.predicate.skip_binder().polarity
+ }
+
+ pub fn self_ty(&self) -> ty::Binder<'tcx, Ty<'tcx>> {
+ self.predicate.map_bound(|p| p.self_ty())
+ }
+}
diff --git a/compiler/rustc_infer/src/traits/project.rs b/compiler/rustc_infer/src/traits/project.rs
new file mode 100644
index 000000000..5d22f9f97
--- /dev/null
+++ b/compiler/rustc_infer/src/traits/project.rs
@@ -0,0 +1,255 @@
+//! Code for projecting associated types out of trait references.
+
+use super::PredicateObligation;
+
+use crate::infer::InferCtxtUndoLogs;
+
+use rustc_data_structures::{
+ snapshot_map::{self, SnapshotMapRef, SnapshotMapStorage},
+ undo_log::Rollback,
+};
+use rustc_middle::ty::{self, Ty};
+
+pub use rustc_middle::traits::{EvaluationResult, Reveal};
+
+pub(crate) type UndoLog<'tcx> =
+ snapshot_map::UndoLog<ProjectionCacheKey<'tcx>, ProjectionCacheEntry<'tcx>>;
+
+#[derive(Clone)]
+pub struct MismatchedProjectionTypes<'tcx> {
+ pub err: ty::error::TypeError<'tcx>,
+}
+
+#[derive(Clone, TypeFoldable, TypeVisitable)]
+pub struct Normalized<'tcx, T> {
+ pub value: T,
+ pub obligations: Vec<PredicateObligation<'tcx>>,
+}
+
+pub type NormalizedTy<'tcx> = Normalized<'tcx, Ty<'tcx>>;
+
+impl<'tcx, T> Normalized<'tcx, T> {
+ pub fn with<U>(self, value: U) -> Normalized<'tcx, U> {
+ Normalized { value, obligations: self.obligations }
+ }
+}
+
+// # Cache
+
+/// The projection cache. Unlike the standard caches, this can include
+/// infcx-dependent type variables, therefore we have to roll the
+/// cache back each time we roll a snapshot back, to avoid assumptions
+/// on yet-unresolved inference variables. Types with placeholder
+/// regions also have to be removed when the respective snapshot ends.
+///
+/// Because of that, projection cache entries can be "stranded" and left
+/// inaccessible when type variables inside the key are resolved. We make no
+/// attempt to recover or remove "stranded" entries, but rather let them be
+/// (for the lifetime of the infcx).
+///
+/// Entries in the projection cache might contain inference variables
+/// that will be resolved by obligations on the projection cache entry (e.g.,
+/// when a type parameter in the associated type is constrained through
+/// an "RFC 447" projection on the impl).
+///
+/// When working with a fulfillment context, the derived obligations of each
+/// projection cache entry will be registered on the fulfillcx, so any users
+/// that can wait for a fulfillcx fixed point need not care about this. However,
+/// users that don't wait for a fixed point (e.g., trait evaluation) have to
+/// resolve the obligations themselves to make sure the projected result is
+/// ok and avoid issues like #43132.
+///
+/// If that is done, after evaluation the obligations, it is a good idea to
+/// call `ProjectionCache::complete` to make sure the obligations won't be
+/// re-evaluated and avoid an exponential worst-case.
+//
+// FIXME: we probably also want some sort of cross-infcx cache here to
+// reduce the amount of duplication. Let's see what we get with the Chalk reforms.
+pub struct ProjectionCache<'a, 'tcx> {
+ map: &'a mut SnapshotMapStorage<ProjectionCacheKey<'tcx>, ProjectionCacheEntry<'tcx>>,
+ undo_log: &'a mut InferCtxtUndoLogs<'tcx>,
+}
+
+#[derive(Clone, Default)]
+pub struct ProjectionCacheStorage<'tcx> {
+ map: SnapshotMapStorage<ProjectionCacheKey<'tcx>, ProjectionCacheEntry<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
+pub struct ProjectionCacheKey<'tcx> {
+ ty: ty::ProjectionTy<'tcx>,
+}
+
+impl<'tcx> ProjectionCacheKey<'tcx> {
+ pub fn new(ty: ty::ProjectionTy<'tcx>) -> Self {
+ Self { ty }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub enum ProjectionCacheEntry<'tcx> {
+ InProgress,
+ Ambiguous,
+ Recur,
+ Error,
+ NormalizedTy {
+ ty: Normalized<'tcx, ty::Term<'tcx>>,
+ /// If we were able to successfully evaluate the
+ /// corresponding cache entry key during predicate
+ /// evaluation, then this field stores the final
+ /// result obtained from evaluating all of the projection
+ /// sub-obligations. During evaluation, we will skip
+ /// evaluating the cached sub-obligations in `ty`
+ /// if this field is set. Evaluation only
+ /// cares about the final result, so we don't
+ /// care about any region constraint side-effects
+ /// produced by evaluating the sub-boligations.
+ ///
+ /// Additionally, we will clear out the sub-obligations
+ /// entirely if we ever evaluate the cache entry (along
+ /// with all its sub obligations) to `EvaluatedToOk`.
+ /// This affects all users of the cache, not just evaluation.
+ /// Since a result of `EvaluatedToOk` means that there were
+ /// no region obligations that need to be tracked, it's
+ /// fine to forget about the sub-obligations - they
+ /// don't provide any additional information. However,
+ /// we do *not* discard any obligations when we see
+ /// `EvaluatedToOkModuloRegions` - we don't know
+ /// which sub-obligations may introduce region constraints,
+ /// so we keep them all to be safe.
+ ///
+ /// When we are not performing evaluation
+ /// (e.g. in `FulfillmentContext`), we ignore this field,
+ /// and always re-process the cached sub-obligations
+ /// (which may have been cleared out - see the above
+ /// paragraph).
+ /// This ensures that we do not lose any regions
+ /// constraints that arise from processing the
+ /// sub-obligations.
+ complete: Option<EvaluationResult>,
+ },
+}
+
+impl<'tcx> ProjectionCacheStorage<'tcx> {
+ #[inline]
+ pub(crate) fn with_log<'a>(
+ &'a mut self,
+ undo_log: &'a mut InferCtxtUndoLogs<'tcx>,
+ ) -> ProjectionCache<'a, 'tcx> {
+ ProjectionCache { map: &mut self.map, undo_log }
+ }
+}
+
+impl<'tcx> ProjectionCache<'_, 'tcx> {
+ #[inline]
+ fn map(
+ &mut self,
+ ) -> SnapshotMapRef<
+ '_,
+ ProjectionCacheKey<'tcx>,
+ ProjectionCacheEntry<'tcx>,
+ InferCtxtUndoLogs<'tcx>,
+ > {
+ self.map.with_log(self.undo_log)
+ }
+
+ pub fn clear(&mut self) {
+ self.map().clear();
+ }
+
+ /// Try to start normalize `key`; returns an error if
+ /// normalization already occurred (this error corresponds to a
+ /// cache hit, so it's actually a good thing).
+ pub fn try_start(
+ &mut self,
+ key: ProjectionCacheKey<'tcx>,
+ ) -> Result<(), ProjectionCacheEntry<'tcx>> {
+ let mut map = self.map();
+ if let Some(entry) = map.get(&key) {
+ return Err(entry.clone());
+ }
+
+ map.insert(key, ProjectionCacheEntry::InProgress);
+ Ok(())
+ }
+
+ /// Indicates that `key` was normalized to `value`.
+ pub fn insert_term(
+ &mut self,
+ key: ProjectionCacheKey<'tcx>,
+ value: Normalized<'tcx, ty::Term<'tcx>>,
+ ) {
+ debug!(
+ "ProjectionCacheEntry::insert_ty: adding cache entry: key={:?}, value={:?}",
+ key, value
+ );
+ let mut map = self.map();
+ if let Some(ProjectionCacheEntry::Recur) = map.get(&key) {
+ debug!("Not overwriting Recur");
+ return;
+ }
+ let fresh_key =
+ map.insert(key, ProjectionCacheEntry::NormalizedTy { ty: value, complete: None });
+ assert!(!fresh_key, "never started projecting `{:?}`", key);
+ }
+
+ /// Mark the relevant projection cache key as having its derived obligations
+ /// complete, so they won't have to be re-computed (this is OK to do in a
+ /// snapshot - if the snapshot is rolled back, the obligations will be
+ /// marked as incomplete again).
+ pub fn complete(&mut self, key: ProjectionCacheKey<'tcx>, result: EvaluationResult) {
+ let mut map = self.map();
+ match map.get(&key) {
+ Some(&ProjectionCacheEntry::NormalizedTy { ref ty, complete: _ }) => {
+ info!("ProjectionCacheEntry::complete({:?}) - completing {:?}", key, ty);
+ let mut ty = ty.clone();
+ if result.must_apply_considering_regions() {
+ ty.obligations = vec![];
+ }
+ map.insert(key, ProjectionCacheEntry::NormalizedTy { ty, complete: Some(result) });
+ }
+ ref value => {
+ // Type inference could "strand behind" old cache entries. Leave
+ // them alone for now.
+ info!("ProjectionCacheEntry::complete({:?}) - ignoring {:?}", key, value);
+ }
+ };
+ }
+
+ pub fn is_complete(&mut self, key: ProjectionCacheKey<'tcx>) -> Option<EvaluationResult> {
+ self.map().get(&key).and_then(|res| match res {
+ ProjectionCacheEntry::NormalizedTy { ty: _, complete } => *complete,
+ _ => None,
+ })
+ }
+
+ /// Indicates that trying to normalize `key` resulted in
+ /// ambiguity. No point in trying it again then until we gain more
+ /// type information (in which case, the "fully resolved" key will
+ /// be different).
+ pub fn ambiguous(&mut self, key: ProjectionCacheKey<'tcx>) {
+ let fresh = self.map().insert(key, ProjectionCacheEntry::Ambiguous);
+ assert!(!fresh, "never started projecting `{:?}`", key);
+ }
+
+ /// Indicates that while trying to normalize `key`, `key` was required to
+ /// be normalized again. Selection or evaluation should eventually report
+ /// an error here.
+ pub fn recur(&mut self, key: ProjectionCacheKey<'tcx>) {
+ let fresh = self.map().insert(key, ProjectionCacheEntry::Recur);
+ assert!(!fresh, "never started projecting `{:?}`", key);
+ }
+
+ /// Indicates that trying to normalize `key` resulted in
+ /// error.
+ pub fn error(&mut self, key: ProjectionCacheKey<'tcx>) {
+ let fresh = self.map().insert(key, ProjectionCacheEntry::Error);
+ assert!(!fresh, "never started projecting `{:?}`", key);
+ }
+}
+
+impl<'tcx> Rollback<UndoLog<'tcx>> for ProjectionCacheStorage<'tcx> {
+ fn reverse(&mut self, undo: UndoLog<'tcx>) {
+ self.map.reverse(undo);
+ }
+}
diff --git a/compiler/rustc_infer/src/traits/structural_impls.rs b/compiler/rustc_infer/src/traits/structural_impls.rs
new file mode 100644
index 000000000..573d2d1e3
--- /dev/null
+++ b/compiler/rustc_infer/src/traits/structural_impls.rs
@@ -0,0 +1,79 @@
+use crate::traits;
+use crate::traits::project::Normalized;
+use rustc_middle::ty;
+use rustc_middle::ty::fold::{FallibleTypeFolder, TypeFoldable};
+use rustc_middle::ty::visit::{TypeVisitable, TypeVisitor};
+
+use std::fmt;
+use std::ops::ControlFlow;
+
+// Structural impls for the structs in `traits`.
+
+impl<'tcx, T: fmt::Debug> fmt::Debug for Normalized<'tcx, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Normalized({:?}, {:?})", self.value, self.obligations)
+ }
+}
+
+impl<'tcx, O: fmt::Debug> fmt::Debug for traits::Obligation<'tcx, O> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if ty::tls::with(|tcx| tcx.sess.verbose()) {
+ write!(
+ f,
+ "Obligation(predicate={:?}, cause={:?}, param_env={:?}, depth={})",
+ self.predicate, self.cause, self.param_env, self.recursion_depth
+ )
+ } else {
+ write!(f, "Obligation(predicate={:?}, depth={})", self.predicate, self.recursion_depth)
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for traits::FulfillmentError<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "FulfillmentError({:?},{:?})", self.obligation, self.code)
+ }
+}
+
+impl<'tcx> fmt::Debug for traits::FulfillmentErrorCode<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ super::CodeSelectionError(ref e) => write!(f, "{:?}", e),
+ super::CodeProjectionError(ref e) => write!(f, "{:?}", e),
+ super::CodeSubtypeError(ref a, ref b) => {
+ write!(f, "CodeSubtypeError({:?}, {:?})", a, b)
+ }
+ super::CodeConstEquateError(ref a, ref b) => {
+ write!(f, "CodeConstEquateError({:?}, {:?})", a, b)
+ }
+ super::CodeAmbiguity => write!(f, "Ambiguity"),
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for traits::MismatchedProjectionTypes<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "MismatchedProjectionTypes({:?})", self.err)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// TypeFoldable implementations.
+
+impl<'tcx, O: TypeFoldable<'tcx>> TypeFoldable<'tcx> for traits::Obligation<'tcx, O> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(traits::Obligation {
+ cause: self.cause,
+ recursion_depth: self.recursion_depth,
+ predicate: self.predicate.try_fold_with(folder)?,
+ param_env: self.param_env.try_fold_with(folder)?,
+ })
+ }
+}
+
+impl<'tcx, O: TypeVisitable<'tcx>> TypeVisitable<'tcx> for traits::Obligation<'tcx, O> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.predicate.visit_with(visitor)?;
+ self.param_env.visit_with(visitor)
+ }
+}
diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs
new file mode 100644
index 000000000..f5a1edf6d
--- /dev/null
+++ b/compiler/rustc_infer/src/traits/util.rs
@@ -0,0 +1,390 @@
+use smallvec::smallvec;
+
+use crate::infer::outlives::components::{push_outlives_components, Component};
+use crate::traits::{Obligation, ObligationCause, PredicateObligation};
+use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
+use rustc_middle::ty::{self, ToPredicate, TyCtxt};
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+pub fn anonymize_predicate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ pred: ty::Predicate<'tcx>,
+) -> ty::Predicate<'tcx> {
+ let new = tcx.anonymize_bound_vars(pred.kind());
+ tcx.reuse_or_mk_predicate(pred, new)
+}
+
+pub struct PredicateSet<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ set: FxHashSet<ty::Predicate<'tcx>>,
+}
+
+impl<'tcx> PredicateSet<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>) -> Self {
+ Self { tcx, set: Default::default() }
+ }
+
+ pub fn insert(&mut self, pred: ty::Predicate<'tcx>) -> bool {
+ // We have to be careful here because we want
+ //
+ // for<'a> Foo<&'a i32>
+ //
+ // and
+ //
+ // for<'b> Foo<&'b i32>
+ //
+ // to be considered equivalent. So normalize all late-bound
+ // regions before we throw things into the underlying set.
+ self.set.insert(anonymize_predicate(self.tcx, pred))
+ }
+}
+
+impl<'tcx> Extend<ty::Predicate<'tcx>> for PredicateSet<'tcx> {
+ fn extend<I: IntoIterator<Item = ty::Predicate<'tcx>>>(&mut self, iter: I) {
+ for pred in iter {
+ self.insert(pred);
+ }
+ }
+
+ fn extend_one(&mut self, pred: ty::Predicate<'tcx>) {
+ self.insert(pred);
+ }
+
+ fn extend_reserve(&mut self, additional: usize) {
+ Extend::<ty::Predicate<'tcx>>::extend_reserve(&mut self.set, additional);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// `Elaboration` iterator
+///////////////////////////////////////////////////////////////////////////
+
+/// "Elaboration" is the process of identifying all the predicates that
+/// are implied by a source predicate. Currently, this basically means
+/// walking the "supertraits" and other similar assumptions. For example,
+/// if we know that `T: Ord`, the elaborator would deduce that `T: PartialOrd`
+/// holds as well. Similarly, if we have `trait Foo: 'static`, and we know that
+/// `T: Foo`, then we know that `T: 'static`.
+pub struct Elaborator<'tcx> {
+ stack: Vec<PredicateObligation<'tcx>>,
+ visited: PredicateSet<'tcx>,
+}
+
+pub fn elaborate_trait_ref<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+) -> Elaborator<'tcx> {
+ elaborate_predicates(tcx, std::iter::once(trait_ref.without_const().to_predicate(tcx)))
+}
+
+pub fn elaborate_trait_refs<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_refs: impl Iterator<Item = ty::PolyTraitRef<'tcx>>,
+) -> Elaborator<'tcx> {
+ let predicates = trait_refs.map(|trait_ref| trait_ref.without_const().to_predicate(tcx));
+ elaborate_predicates(tcx, predicates)
+}
+
+pub fn elaborate_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: impl Iterator<Item = ty::Predicate<'tcx>>,
+) -> Elaborator<'tcx> {
+ let obligations = predicates
+ .map(|predicate| {
+ predicate_obligation(predicate, ty::ParamEnv::empty(), ObligationCause::dummy())
+ })
+ .collect();
+ elaborate_obligations(tcx, obligations)
+}
+
+pub fn elaborate_predicates_with_span<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: impl Iterator<Item = (ty::Predicate<'tcx>, Span)>,
+) -> Elaborator<'tcx> {
+ let obligations = predicates
+ .map(|(predicate, span)| {
+ predicate_obligation(
+ predicate,
+ ty::ParamEnv::empty(),
+ ObligationCause::dummy_with_span(span),
+ )
+ })
+ .collect();
+ elaborate_obligations(tcx, obligations)
+}
+
+pub fn elaborate_obligations<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mut obligations: Vec<PredicateObligation<'tcx>>,
+) -> Elaborator<'tcx> {
+ let mut visited = PredicateSet::new(tcx);
+ obligations.retain(|obligation| visited.insert(obligation.predicate));
+ Elaborator { stack: obligations, visited }
+}
+
+fn predicate_obligation<'tcx>(
+ predicate: ty::Predicate<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+) -> PredicateObligation<'tcx> {
+ Obligation { cause, param_env, recursion_depth: 0, predicate }
+}
+
+impl<'tcx> Elaborator<'tcx> {
+ pub fn filter_to_traits(self) -> FilterToTraits<Self> {
+ FilterToTraits::new(self)
+ }
+
+ fn elaborate(&mut self, obligation: &PredicateObligation<'tcx>) {
+ let tcx = self.visited.tcx;
+
+ let bound_predicate = obligation.predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(data) => {
+ // Get predicates declared on the trait.
+ let predicates = tcx.super_predicates_of(data.def_id());
+
+ let obligations = predicates.predicates.iter().map(|&(mut pred, _)| {
+ // when parent predicate is non-const, elaborate it to non-const predicates.
+ if data.constness == ty::BoundConstness::NotConst {
+ pred = pred.without_const(tcx);
+ }
+
+ predicate_obligation(
+ pred.subst_supertrait(tcx, &bound_predicate.rebind(data.trait_ref)),
+ obligation.param_env,
+ obligation.cause.clone(),
+ )
+ });
+ debug!(?data, ?obligations, "super_predicates");
+
+ // Only keep those bounds that we haven't already seen.
+ // This is necessary to prevent infinite recursion in some
+ // cases. One common case is when people define
+ // `trait Sized: Sized { }` rather than `trait Sized { }`.
+ let visited = &mut self.visited;
+ let obligations = obligations.filter(|o| visited.insert(o.predicate));
+
+ self.stack.extend(obligations);
+ }
+ ty::PredicateKind::WellFormed(..) => {
+ // Currently, we do not elaborate WF predicates,
+ // although we easily could.
+ }
+ ty::PredicateKind::ObjectSafe(..) => {
+ // Currently, we do not elaborate object-safe
+ // predicates.
+ }
+ ty::PredicateKind::Subtype(..) => {
+ // Currently, we do not "elaborate" predicates like `X <: Y`,
+ // though conceivably we might.
+ }
+ ty::PredicateKind::Coerce(..) => {
+ // Currently, we do not "elaborate" predicates like `X -> Y`,
+ // though conceivably we might.
+ }
+ ty::PredicateKind::Projection(..) => {
+ // Nothing to elaborate in a projection predicate.
+ }
+ ty::PredicateKind::ClosureKind(..) => {
+ // Nothing to elaborate when waiting for a closure's kind to be inferred.
+ }
+ ty::PredicateKind::ConstEvaluatable(..) => {
+ // Currently, we do not elaborate const-evaluatable
+ // predicates.
+ }
+ ty::PredicateKind::ConstEquate(..) => {
+ // Currently, we do not elaborate const-equate
+ // predicates.
+ }
+ ty::PredicateKind::RegionOutlives(..) => {
+ // Nothing to elaborate from `'a: 'b`.
+ }
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_max, r_min)) => {
+ // We know that `T: 'a` for some type `T`. We can
+ // often elaborate this. For example, if we know that
+ // `[U]: 'a`, that implies that `U: 'a`. Similarly, if
+ // we know `&'a U: 'b`, then we know that `'a: 'b` and
+ // `U: 'b`.
+ //
+ // We can basically ignore bound regions here. So for
+ // example `for<'c> Foo<'a,'c>: 'b` can be elaborated to
+ // `'a: 'b`.
+
+ // Ignore `for<'a> T: 'a` -- we might in the future
+ // consider this as evidence that `T: 'static`, but
+ // I'm a bit wary of such constructions and so for now
+ // I want to be conservative. --nmatsakis
+ if r_min.is_late_bound() {
+ return;
+ }
+
+ let visited = &mut self.visited;
+ let mut components = smallvec![];
+ push_outlives_components(tcx, ty_max, &mut components);
+ self.stack.extend(
+ components
+ .into_iter()
+ .filter_map(|component| match component {
+ Component::Region(r) => {
+ if r.is_late_bound() {
+ None
+ } else {
+ Some(ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(
+ r, r_min,
+ )))
+ }
+ }
+
+ Component::Param(p) => {
+ let ty = tcx.mk_ty_param(p.index, p.name);
+ Some(ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(
+ ty, r_min,
+ )))
+ }
+
+ Component::UnresolvedInferenceVariable(_) => None,
+
+ Component::Projection(projection) => {
+ // We might end up here if we have `Foo<<Bar as Baz>::Assoc>: 'a`.
+ // With this, we can deduce that `<Bar as Baz>::Assoc: 'a`.
+ let ty =
+ tcx.mk_projection(projection.item_def_id, projection.substs);
+ Some(ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(
+ ty, r_min,
+ )))
+ }
+
+ Component::EscapingProjection(_) => {
+ // We might be able to do more here, but we don't
+ // want to deal with escaping vars right now.
+ None
+ }
+ })
+ .map(ty::Binder::dummy)
+ .map(|predicate_kind| predicate_kind.to_predicate(tcx))
+ .filter(|&predicate| visited.insert(predicate))
+ .map(|predicate| {
+ predicate_obligation(
+ predicate,
+ obligation.param_env,
+ obligation.cause.clone(),
+ )
+ }),
+ );
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(..) => {
+ // Nothing to elaborate
+ }
+ }
+ }
+}
+
+impl<'tcx> Iterator for Elaborator<'tcx> {
+ type Item = PredicateObligation<'tcx>;
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.stack.len(), None)
+ }
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // Extract next item from top-most stack frame, if any.
+ if let Some(obligation) = self.stack.pop() {
+ self.elaborate(&obligation);
+ Some(obligation)
+ } else {
+ None
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Supertrait iterator
+///////////////////////////////////////////////////////////////////////////
+
+pub type Supertraits<'tcx> = FilterToTraits<Elaborator<'tcx>>;
+
+pub fn supertraits<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+) -> Supertraits<'tcx> {
+ elaborate_trait_ref(tcx, trait_ref).filter_to_traits()
+}
+
+pub fn transitive_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ bounds: impl Iterator<Item = ty::PolyTraitRef<'tcx>>,
+) -> Supertraits<'tcx> {
+ elaborate_trait_refs(tcx, bounds).filter_to_traits()
+}
+
+/// A specialized variant of `elaborate_trait_refs` that only elaborates trait references that may
+/// define the given associated type `assoc_name`. It uses the
+/// `super_predicates_that_define_assoc_type` query to avoid enumerating super-predicates that
+/// aren't related to `assoc_item`. This is used when resolving types like `Self::Item` or
+/// `T::Item` and helps to avoid cycle errors (see e.g. #35237).
+pub fn transitive_bounds_that_define_assoc_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ bounds: impl Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ assoc_name: Ident,
+) -> impl Iterator<Item = ty::PolyTraitRef<'tcx>> {
+ let mut stack: Vec<_> = bounds.collect();
+ let mut visited = FxIndexSet::default();
+
+ std::iter::from_fn(move || {
+ while let Some(trait_ref) = stack.pop() {
+ let anon_trait_ref = tcx.anonymize_bound_vars(trait_ref);
+ if visited.insert(anon_trait_ref) {
+ let super_predicates = tcx.super_predicates_that_define_assoc_type((
+ trait_ref.def_id(),
+ Some(assoc_name),
+ ));
+ for (super_predicate, _) in super_predicates.predicates {
+ let subst_predicate = super_predicate.subst_supertrait(tcx, &trait_ref);
+ if let Some(binder) = subst_predicate.to_opt_poly_trait_pred() {
+ stack.push(binder.map_bound(|t| t.trait_ref));
+ }
+ }
+
+ return Some(trait_ref);
+ }
+ }
+
+ return None;
+ })
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Other
+///////////////////////////////////////////////////////////////////////////
+
+/// A filter around an iterator of predicates that makes it yield up
+/// just trait references.
+pub struct FilterToTraits<I> {
+ base_iterator: I,
+}
+
+impl<I> FilterToTraits<I> {
+ fn new(base: I) -> FilterToTraits<I> {
+ FilterToTraits { base_iterator: base }
+ }
+}
+
+impl<'tcx, I: Iterator<Item = PredicateObligation<'tcx>>> Iterator for FilterToTraits<I> {
+ type Item = ty::PolyTraitRef<'tcx>;
+
+ fn next(&mut self) -> Option<ty::PolyTraitRef<'tcx>> {
+ while let Some(obligation) = self.base_iterator.next() {
+ if let Some(data) = obligation.predicate.to_opt_poly_trait_pred() {
+ return Some(data.map_bound(|t| t.trait_ref));
+ }
+ }
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (_, upper) = self.base_iterator.size_hint();
+ (0, upper)
+ }
+}
diff --git a/compiler/rustc_interface/Cargo.toml b/compiler/rustc_interface/Cargo.toml
new file mode 100644
index 000000000..1ecbc876c
--- /dev/null
+++ b/compiler/rustc_interface/Cargo.toml
@@ -0,0 +1,61 @@
+[package]
+name = "rustc_interface"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+libloading = "0.7.1"
+tracing = "0.1"
+rustc-rayon-core = { version = "0.4.0", optional = true }
+rayon = { version = "0.4.0", package = "rustc-rayon", optional = true }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_ast = { path = "../rustc_ast" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_borrowck = { path = "../rustc_borrowck" }
+rustc_builtin_macros = { path = "../rustc_builtin_macros" }
+rustc_expand = { path = "../rustc_expand" }
+rustc_parse = { path = "../rustc_parse" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_ast_lowering = { path = "../rustc_ast_lowering" }
+rustc_ast_passes = { path = "../rustc_ast_passes" }
+rustc_incremental = { path = "../rustc_incremental" }
+rustc_traits = { path = "../rustc_traits" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_codegen_ssa = { path = "../rustc_codegen_ssa" }
+rustc_symbol_mangling = { path = "../rustc_symbol_mangling" }
+rustc_codegen_llvm = { path = "../rustc_codegen_llvm", optional = true }
+rustc_hir = { path = "../rustc_hir" }
+rustc_metadata = { path = "../rustc_metadata" }
+rustc_const_eval = { path = "../rustc_const_eval" }
+rustc_mir_build = { path = "../rustc_mir_build" }
+rustc_mir_transform = { path = "../rustc_mir_transform" }
+rustc_monomorphize = { path = "../rustc_monomorphize" }
+rustc_passes = { path = "../rustc_passes" }
+rustc_typeck = { path = "../rustc_typeck" }
+rustc_lint = { path = "../rustc_lint" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_plugin_impl = { path = "../rustc_plugin_impl" }
+rustc_privacy = { path = "../rustc_privacy" }
+rustc_query_impl = { path = "../rustc_query_impl" }
+rustc_resolve = { path = "../rustc_resolve" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_ty_utils = { path = "../rustc_ty_utils" }
+
+[target.'cfg(unix)'.dependencies]
+libc = "0.2"
+
+[target.'cfg(windows)'.dependencies]
+winapi = { version = "0.3", features = ["libloaderapi"] }
+
+[dev-dependencies]
+rustc_target = { path = "../rustc_target" }
+
+[features]
+llvm = ['rustc_codegen_llvm']
+rustc_use_parallel_compiler = ['rayon', 'rustc-rayon-core', 'rustc_query_impl/rustc_use_parallel_compiler']
diff --git a/compiler/rustc_interface/src/callbacks.rs b/compiler/rustc_interface/src/callbacks.rs
new file mode 100644
index 000000000..76442de69
--- /dev/null
+++ b/compiler/rustc_interface/src/callbacks.rs
@@ -0,0 +1,59 @@
+//! Throughout the compiler tree, there are several places which want to have
+//! access to state or queries while being inside crates that are dependencies
+//! of `rustc_middle`. To facilitate this, we have the
+//! `rustc_data_structures::AtomicRef` type, which allows us to setup a global
+//! static which can then be set in this file at program startup.
+//!
+//! See `SPAN_TRACK` for an example of how to set things up.
+//!
+//! The functions in this file should fall back to the default set in their
+//! origin crate when the `TyCtxt` is not present in TLS.
+
+use rustc_errors::{Diagnostic, TRACK_DIAGNOSTICS};
+use rustc_middle::ty::tls;
+use std::fmt;
+
+fn track_span_parent(def_id: rustc_span::def_id::LocalDefId) {
+ tls::with_opt(|tcx| {
+ if let Some(tcx) = tcx {
+ let _span = tcx.source_span(def_id);
+ // Sanity check: relative span's parent must be an absolute span.
+ debug_assert_eq!(_span.data_untracked().parent, None);
+ }
+ })
+}
+
+/// This is a callback from `rustc_ast` as it cannot access the implicit state
+/// in `rustc_middle` otherwise. It is used when diagnostic messages are
+/// emitted and stores them in the current query, if there is one.
+fn track_diagnostic(diagnostic: &Diagnostic) {
+ tls::with_context_opt(|icx| {
+ if let Some(icx) = icx {
+ if let Some(diagnostics) = icx.diagnostics {
+ let mut diagnostics = diagnostics.lock();
+ diagnostics.extend(Some(diagnostic.clone()));
+ }
+ }
+ })
+}
+
+/// This is a callback from `rustc_hir` as it cannot access the implicit state
+/// in `rustc_middle` otherwise.
+fn def_id_debug(def_id: rustc_hir::def_id::DefId, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "DefId({}:{}", def_id.krate, def_id.index.index())?;
+ tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ write!(f, " ~ {}", tcx.def_path_debug_str(def_id))?;
+ }
+ Ok(())
+ })?;
+ write!(f, ")")
+}
+
+/// Sets up the callbacks in prior crates which we want to refer to the
+/// TyCtxt in.
+pub fn setup_callbacks() {
+ rustc_span::SPAN_TRACK.swap(&(track_span_parent as fn(_)));
+ rustc_hir::def_id::DEF_ID_DEBUG.swap(&(def_id_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+ TRACK_DIAGNOSTICS.swap(&(track_diagnostic as fn(&_)));
+}
diff --git a/compiler/rustc_interface/src/interface.rs b/compiler/rustc_interface/src/interface.rs
new file mode 100644
index 000000000..94f81b660
--- /dev/null
+++ b/compiler/rustc_interface/src/interface.rs
@@ -0,0 +1,362 @@
+pub use crate::passes::BoxedResolver;
+use crate::util;
+
+use rustc_ast::token;
+use rustc_ast::{self as ast, LitKind, MetaItemKind};
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::OnDrop;
+use rustc_errors::registry::Registry;
+use rustc_errors::{ErrorGuaranteed, Handler};
+use rustc_lint::LintStore;
+use rustc_middle::ty;
+use rustc_parse::maybe_new_parser_from_source_str;
+use rustc_query_impl::QueryCtxt;
+use rustc_session::config::{self, CheckCfg, ErrorOutputType, Input, OutputFilenames};
+use rustc_session::early_error;
+use rustc_session::lint;
+use rustc_session::parse::{CrateConfig, ParseSess};
+use rustc_session::{DiagnosticOutput, Session};
+use rustc_span::source_map::{FileLoader, FileName};
+use rustc_span::symbol::sym;
+use std::path::PathBuf;
+use std::result;
+
+pub type Result<T> = result::Result<T, ErrorGuaranteed>;
+
+/// Represents a compiler session.
+///
+/// Can be used to run `rustc_interface` queries.
+/// Created by passing [`Config`] to [`run_compiler`].
+pub struct Compiler {
+ pub(crate) sess: Lrc<Session>,
+ codegen_backend: Lrc<Box<dyn CodegenBackend>>,
+ pub(crate) input: Input,
+ pub(crate) input_path: Option<PathBuf>,
+ pub(crate) output_dir: Option<PathBuf>,
+ pub(crate) output_file: Option<PathBuf>,
+ pub(crate) temps_dir: Option<PathBuf>,
+ pub(crate) register_lints: Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>>,
+ pub(crate) override_queries:
+ Option<fn(&Session, &mut ty::query::Providers, &mut ty::query::ExternProviders)>,
+}
+
+impl Compiler {
+ pub fn session(&self) -> &Lrc<Session> {
+ &self.sess
+ }
+ pub fn codegen_backend(&self) -> &Lrc<Box<dyn CodegenBackend>> {
+ &self.codegen_backend
+ }
+ pub fn input(&self) -> &Input {
+ &self.input
+ }
+ pub fn output_dir(&self) -> &Option<PathBuf> {
+ &self.output_dir
+ }
+ pub fn output_file(&self) -> &Option<PathBuf> {
+ &self.output_file
+ }
+ pub fn temps_dir(&self) -> &Option<PathBuf> {
+ &self.temps_dir
+ }
+ pub fn register_lints(&self) -> &Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>> {
+ &self.register_lints
+ }
+ pub fn build_output_filenames(
+ &self,
+ sess: &Session,
+ attrs: &[ast::Attribute],
+ ) -> OutputFilenames {
+ util::build_output_filenames(
+ &self.input,
+ &self.output_dir,
+ &self.output_file,
+ &self.temps_dir,
+ attrs,
+ sess,
+ )
+ }
+}
+
+/// Converts strings provided as `--cfg [cfgspec]` into a `crate_cfg`.
+pub fn parse_cfgspecs(cfgspecs: Vec<String>) -> FxHashSet<(String, Option<String>)> {
+ rustc_span::create_default_session_if_not_set_then(move |_| {
+ let cfg = cfgspecs
+ .into_iter()
+ .map(|s| {
+ let sess = ParseSess::with_silent_emitter(Some(format!(
+ "this error occurred on the command line: `--cfg={}`",
+ s
+ )));
+ let filename = FileName::cfg_spec_source_code(&s);
+
+ macro_rules! error {
+ ($reason: expr) => {
+ early_error(
+ ErrorOutputType::default(),
+ &format!(concat!("invalid `--cfg` argument: `{}` (", $reason, ")"), s),
+ );
+ };
+ }
+
+ match maybe_new_parser_from_source_str(&sess, filename, s.to_string()) {
+ Ok(mut parser) => match parser.parse_meta_item() {
+ Ok(meta_item) if parser.token == token::Eof => {
+ if meta_item.path.segments.len() != 1 {
+ error!("argument key must be an identifier");
+ }
+ match &meta_item.kind {
+ MetaItemKind::List(..) => {}
+ MetaItemKind::NameValue(lit) if !lit.kind.is_str() => {
+ error!("argument value must be a string");
+ }
+ MetaItemKind::NameValue(..) | MetaItemKind::Word => {
+ let ident = meta_item.ident().expect("multi-segment cfg key");
+ return (ident.name, meta_item.value_str());
+ }
+ }
+ }
+ Ok(..) => {}
+ Err(err) => err.cancel(),
+ },
+ Err(errs) => drop(errs),
+ }
+
+ // If the user tried to use a key="value" flag, but is missing the quotes, provide
+ // a hint about how to resolve this.
+ if s.contains('=') && !s.contains("=\"") && !s.ends_with('"') {
+ error!(concat!(
+ r#"expected `key` or `key="value"`, ensure escaping is appropriate"#,
+ r#" for your shell, try 'key="value"' or key=\"value\""#
+ ));
+ } else {
+ error!(r#"expected `key` or `key="value"`"#);
+ }
+ })
+ .collect::<CrateConfig>();
+ cfg.into_iter().map(|(a, b)| (a.to_string(), b.map(|b| b.to_string()))).collect()
+ })
+}
+
+/// Converts strings provided as `--check-cfg [specs]` into a `CheckCfg`.
+pub fn parse_check_cfg(specs: Vec<String>) -> CheckCfg {
+ rustc_span::create_default_session_if_not_set_then(move |_| {
+ let mut cfg = CheckCfg::default();
+
+ 'specs: for s in specs {
+ let sess = ParseSess::with_silent_emitter(Some(format!(
+ "this error occurred on the command line: `--check-cfg={}`",
+ s
+ )));
+ let filename = FileName::cfg_spec_source_code(&s);
+
+ macro_rules! error {
+ ($reason: expr) => {
+ early_error(
+ ErrorOutputType::default(),
+ &format!(
+ concat!("invalid `--check-cfg` argument: `{}` (", $reason, ")"),
+ s
+ ),
+ );
+ };
+ }
+
+ match maybe_new_parser_from_source_str(&sess, filename, s.to_string()) {
+ Ok(mut parser) => match parser.parse_meta_item() {
+ Ok(meta_item) if parser.token == token::Eof => {
+ if let Some(args) = meta_item.meta_item_list() {
+ if meta_item.has_name(sym::names) {
+ let names_valid =
+ cfg.names_valid.get_or_insert_with(|| FxHashSet::default());
+ for arg in args {
+ if arg.is_word() && arg.ident().is_some() {
+ let ident = arg.ident().expect("multi-segment cfg key");
+ names_valid.insert(ident.name.to_string());
+ } else {
+ error!("`names()` arguments must be simple identifers");
+ }
+ }
+ continue 'specs;
+ } else if meta_item.has_name(sym::values) {
+ if let Some((name, values)) = args.split_first() {
+ if name.is_word() && name.ident().is_some() {
+ let ident = name.ident().expect("multi-segment cfg key");
+ let ident_values = cfg
+ .values_valid
+ .entry(ident.name.to_string())
+ .or_insert_with(|| FxHashSet::default());
+
+ for val in values {
+ if let Some(LitKind::Str(s, _)) =
+ val.literal().map(|lit| &lit.kind)
+ {
+ ident_values.insert(s.to_string());
+ } else {
+ error!(
+ "`values()` arguments must be string literals"
+ );
+ }
+ }
+
+ continue 'specs;
+ } else {
+ error!(
+ "`values()` first argument must be a simple identifer"
+ );
+ }
+ } else if args.is_empty() {
+ cfg.well_known_values = true;
+ continue 'specs;
+ }
+ }
+ }
+ }
+ Ok(..) => {}
+ Err(err) => err.cancel(),
+ },
+ Err(errs) => drop(errs),
+ }
+
+ error!(
+ "expected `names(name1, name2, ... nameN)` or \
+ `values(name, \"value1\", \"value2\", ... \"valueN\")`"
+ );
+ }
+
+ if let Some(names_valid) = &mut cfg.names_valid {
+ names_valid.extend(cfg.values_valid.keys().cloned());
+ }
+ cfg
+ })
+}
+
+/// The compiler configuration
+pub struct Config {
+ /// Command line options
+ pub opts: config::Options,
+
+ /// cfg! configuration in addition to the default ones
+ pub crate_cfg: FxHashSet<(String, Option<String>)>,
+ pub crate_check_cfg: CheckCfg,
+
+ pub input: Input,
+ pub input_path: Option<PathBuf>,
+ pub output_dir: Option<PathBuf>,
+ pub output_file: Option<PathBuf>,
+ pub file_loader: Option<Box<dyn FileLoader + Send + Sync>>,
+ pub diagnostic_output: DiagnosticOutput,
+
+ pub lint_caps: FxHashMap<lint::LintId, lint::Level>,
+
+ /// This is a callback from the driver that is called when [`ParseSess`] is created.
+ pub parse_sess_created: Option<Box<dyn FnOnce(&mut ParseSess) + Send>>,
+
+ /// This is a callback from the driver that is called when we're registering lints;
+ /// it is called during plugin registration when we have the LintStore in a non-shared state.
+ ///
+ /// Note that if you find a Some here you probably want to call that function in the new
+ /// function being registered.
+ pub register_lints: Option<Box<dyn Fn(&Session, &mut LintStore) + Send + Sync>>,
+
+ /// This is a callback from the driver that is called just after we have populated
+ /// the list of queries.
+ ///
+ /// The second parameter is local providers and the third parameter is external providers.
+ pub override_queries:
+ Option<fn(&Session, &mut ty::query::Providers, &mut ty::query::ExternProviders)>,
+
+ /// This is a callback from the driver that is called to create a codegen backend.
+ pub make_codegen_backend:
+ Option<Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>>,
+
+ /// Registry of diagnostics codes.
+ pub registry: Registry,
+}
+
+pub fn create_compiler_and_run<R>(config: Config, f: impl FnOnce(&Compiler) -> R) -> R {
+ crate::callbacks::setup_callbacks();
+
+ let registry = &config.registry;
+ let (mut sess, codegen_backend) = util::create_session(
+ config.opts,
+ config.crate_cfg,
+ config.crate_check_cfg,
+ config.diagnostic_output,
+ config.file_loader,
+ config.input_path.clone(),
+ config.lint_caps,
+ config.make_codegen_backend,
+ registry.clone(),
+ );
+
+ if let Some(parse_sess_created) = config.parse_sess_created {
+ parse_sess_created(
+ &mut Lrc::get_mut(&mut sess)
+ .expect("create_session() should never share the returned session")
+ .parse_sess,
+ );
+ }
+
+ let temps_dir = sess.opts.unstable_opts.temps_dir.as_ref().map(|o| PathBuf::from(&o));
+
+ let compiler = Compiler {
+ sess,
+ codegen_backend,
+ input: config.input,
+ input_path: config.input_path,
+ output_dir: config.output_dir,
+ output_file: config.output_file,
+ temps_dir,
+ register_lints: config.register_lints,
+ override_queries: config.override_queries,
+ };
+
+ rustc_span::with_source_map(compiler.sess.parse_sess.clone_source_map(), move || {
+ let r = {
+ let _sess_abort_error = OnDrop(|| {
+ compiler.sess.finish_diagnostics(registry);
+ });
+
+ f(&compiler)
+ };
+
+ let prof = compiler.sess.prof.clone();
+ prof.generic_activity("drop_compiler").run(move || drop(compiler));
+ r
+ })
+}
+
+// JUSTIFICATION: before session exists, only config
+#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+pub fn run_compiler<R: Send>(config: Config, f: impl FnOnce(&Compiler) -> R + Send) -> R {
+ tracing::trace!("run_compiler");
+ util::run_in_thread_pool_with_globals(
+ config.opts.edition,
+ config.opts.unstable_opts.threads,
+ || create_compiler_and_run(config, f),
+ )
+}
+
+pub fn try_print_query_stack(handler: &Handler, num_frames: Option<usize>) {
+ eprintln!("query stack during panic:");
+
+ // Be careful relying on global state here: this code is called from
+ // a panic hook, which means that the global `Handler` may be in a weird
+ // state if it was responsible for triggering the panic.
+ let i = ty::tls::with_context_opt(|icx| {
+ if let Some(icx) = icx {
+ QueryCtxt::from_tcx(icx.tcx).try_print_query_stack(icx.query, handler, num_frames)
+ } else {
+ 0
+ }
+ });
+
+ if num_frames == None || num_frames >= Some(i) {
+ eprintln!("end of query stack");
+ } else {
+ eprintln!("we're just showing a limited slice of the query stack");
+ }
+}
diff --git a/compiler/rustc_interface/src/lib.rs b/compiler/rustc_interface/src/lib.rs
new file mode 100644
index 000000000..d443057eb
--- /dev/null
+++ b/compiler/rustc_interface/src/lib.rs
@@ -0,0 +1,22 @@
+#![feature(box_patterns)]
+#![feature(let_else)]
+#![feature(internal_output_capture)]
+#![feature(thread_spawn_unchecked)]
+#![feature(once_cell)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+mod callbacks;
+pub mod interface;
+mod passes;
+mod proc_macro_decls;
+mod queries;
+pub mod util;
+
+pub use callbacks::setup_callbacks;
+pub use interface::{run_compiler, Config};
+pub use passes::{DEFAULT_EXTERN_QUERY_PROVIDERS, DEFAULT_QUERY_PROVIDERS};
+pub use queries::Queries;
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs
new file mode 100644
index 000000000..8f0835917
--- /dev/null
+++ b/compiler/rustc_interface/src/passes.rs
@@ -0,0 +1,1046 @@
+use crate::interface::{Compiler, Result};
+use crate::proc_macro_decls;
+use crate::util;
+
+use ast::CRATE_NODE_ID;
+use rustc_ast::{self as ast, visit};
+use rustc_borrowck as mir_borrowck;
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_data_structures::parallel;
+use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal};
+use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan, PResult};
+use rustc_expand::base::{ExtCtxt, LintStoreExpand, ResolverExpand};
+use rustc_hir::def_id::StableCrateId;
+use rustc_hir::definitions::Definitions;
+use rustc_lint::{BufferedEarlyLint, EarlyCheckNode, LintStore};
+use rustc_metadata::creader::CStore;
+use rustc_middle::arena::Arena;
+use rustc_middle::dep_graph::DepGraph;
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_middle::ty::{self, GlobalCtxt, RegisteredTools, TyCtxt};
+use rustc_mir_build as mir_build;
+use rustc_parse::{parse_crate_from_file, parse_crate_from_source_str, validate_attr};
+use rustc_passes::{self, hir_stats, layout_test};
+use rustc_plugin_impl as plugin;
+use rustc_query_impl::{OnDiskCache, Queries as TcxQueries};
+use rustc_resolve::{Resolver, ResolverArenas};
+use rustc_session::config::{CrateType, Input, OutputFilenames, OutputType};
+use rustc_session::cstore::{CrateStoreDyn, MetadataLoader, MetadataLoaderDyn};
+use rustc_session::output::filename_for_input;
+use rustc_session::search_paths::PathKind;
+use rustc_session::{Limit, Session};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::FileName;
+use rustc_trait_selection::traits;
+use rustc_typeck as typeck;
+use tracing::{info, warn};
+
+use std::any::Any;
+use std::cell::RefCell;
+use std::ffi::OsString;
+use std::io::{self, BufWriter, Write};
+use std::marker::PhantomPinned;
+use std::path::{Path, PathBuf};
+use std::pin::Pin;
+use std::rc::Rc;
+use std::sync::LazyLock;
+use std::{env, fs, iter};
+
+pub fn parse<'a>(sess: &'a Session, input: &Input) -> PResult<'a, ast::Crate> {
+ let krate = sess.time("parse_crate", || match input {
+ Input::File(file) => parse_crate_from_file(file, &sess.parse_sess),
+ Input::Str { input, name } => {
+ parse_crate_from_source_str(name.clone(), input.clone(), &sess.parse_sess)
+ }
+ })?;
+
+ if sess.opts.unstable_opts.input_stats {
+ eprintln!("Lines of code: {}", sess.source_map().count_lines());
+ eprintln!("Pre-expansion node count: {}", count_nodes(&krate));
+ }
+
+ if let Some(ref s) = sess.opts.unstable_opts.show_span {
+ rustc_ast_passes::show_span::run(sess.diagnostic(), s, &krate);
+ }
+
+ if sess.opts.unstable_opts.hir_stats {
+ hir_stats::print_ast_stats(&krate, "PRE EXPANSION AST STATS");
+ }
+
+ Ok(krate)
+}
+
+fn count_nodes(krate: &ast::Crate) -> usize {
+ let mut counter = rustc_ast_passes::node_count::NodeCounter::new();
+ visit::walk_crate(&mut counter, krate);
+ counter.count
+}
+
+pub use boxed_resolver::BoxedResolver;
+mod boxed_resolver {
+ use super::*;
+
+ pub struct BoxedResolver(Pin<Box<BoxedResolverInner>>);
+
+ struct BoxedResolverInner {
+ session: Lrc<Session>,
+ resolver_arenas: Option<ResolverArenas<'static>>,
+ resolver: Option<Resolver<'static>>,
+ _pin: PhantomPinned,
+ }
+
+ // Note: Drop order is important to prevent dangling references. Resolver must be dropped first,
+ // then resolver_arenas and session.
+ impl Drop for BoxedResolverInner {
+ fn drop(&mut self) {
+ self.resolver.take();
+ self.resolver_arenas.take();
+ }
+ }
+
+ impl BoxedResolver {
+ pub(super) fn new(
+ session: Lrc<Session>,
+ make_resolver: impl for<'a> FnOnce(&'a Session, &'a ResolverArenas<'a>) -> Resolver<'a>,
+ ) -> BoxedResolver {
+ let mut boxed_resolver = Box::new(BoxedResolverInner {
+ session,
+ resolver_arenas: Some(Resolver::arenas()),
+ resolver: None,
+ _pin: PhantomPinned,
+ });
+ // SAFETY: `make_resolver` takes a resolver arena with an arbitrary lifetime and
+ // returns a resolver with the same lifetime as the arena. We ensure that the arena
+ // outlives the resolver in the drop impl and elsewhere so these transmutes are sound.
+ unsafe {
+ let resolver = make_resolver(
+ std::mem::transmute::<&Session, &Session>(&boxed_resolver.session),
+ std::mem::transmute::<&ResolverArenas<'_>, &ResolverArenas<'_>>(
+ boxed_resolver.resolver_arenas.as_ref().unwrap(),
+ ),
+ );
+ boxed_resolver.resolver = Some(resolver);
+ BoxedResolver(Pin::new_unchecked(boxed_resolver))
+ }
+ }
+
+ pub fn access<F: for<'a> FnOnce(&mut Resolver<'a>) -> R, R>(&mut self, f: F) -> R {
+ // SAFETY: The resolver doesn't need to be pinned.
+ let mut resolver = unsafe {
+ self.0.as_mut().map_unchecked_mut(|boxed_resolver| &mut boxed_resolver.resolver)
+ };
+ f((&mut *resolver).as_mut().unwrap())
+ }
+
+ pub fn to_resolver_outputs(
+ resolver: Rc<RefCell<BoxedResolver>>,
+ ) -> (Definitions, Box<CrateStoreDyn>, ty::ResolverOutputs, ty::ResolverAstLowering)
+ {
+ match Rc::try_unwrap(resolver) {
+ Ok(resolver) => {
+ let mut resolver = resolver.into_inner();
+ // SAFETY: The resolver doesn't need to be pinned.
+ let mut resolver = unsafe {
+ resolver
+ .0
+ .as_mut()
+ .map_unchecked_mut(|boxed_resolver| &mut boxed_resolver.resolver)
+ };
+ resolver.take().unwrap().into_outputs()
+ }
+ Err(resolver) => resolver.borrow_mut().access(|resolver| resolver.clone_outputs()),
+ }
+ }
+ }
+}
+
+pub fn create_resolver(
+ sess: Lrc<Session>,
+ metadata_loader: Box<MetadataLoaderDyn>,
+ krate: &ast::Crate,
+ crate_name: &str,
+) -> BoxedResolver {
+ tracing::trace!("create_resolver");
+ BoxedResolver::new(sess, move |sess, resolver_arenas| {
+ Resolver::new(sess, krate, crate_name, metadata_loader, resolver_arenas)
+ })
+}
+
+pub fn register_plugins<'a>(
+ sess: &'a Session,
+ metadata_loader: &'a dyn MetadataLoader,
+ register_lints: impl Fn(&Session, &mut LintStore),
+ mut krate: ast::Crate,
+ crate_name: &str,
+) -> Result<(ast::Crate, LintStore)> {
+ krate = sess.time("attributes_injection", || {
+ rustc_builtin_macros::cmdline_attrs::inject(
+ krate,
+ &sess.parse_sess,
+ &sess.opts.unstable_opts.crate_attr,
+ )
+ });
+
+ let (krate, features) = rustc_expand::config::features(sess, krate, CRATE_NODE_ID);
+ // these need to be set "early" so that expansion sees `quote` if enabled.
+ sess.init_features(features);
+
+ let crate_types = util::collect_crate_types(sess, &krate.attrs);
+ sess.init_crate_types(crate_types);
+
+ let stable_crate_id = StableCrateId::new(
+ crate_name,
+ sess.crate_types().contains(&CrateType::Executable),
+ sess.opts.cg.metadata.clone(),
+ );
+ sess.stable_crate_id.set(stable_crate_id).expect("not yet initialized");
+ rustc_incremental::prepare_session_directory(sess, crate_name, stable_crate_id)?;
+
+ if sess.opts.incremental.is_some() {
+ sess.time("incr_comp_garbage_collect_session_directories", || {
+ if let Err(e) = rustc_incremental::garbage_collect_session_directories(sess) {
+ warn!(
+ "Error while trying to garbage collect incremental \
+ compilation cache directory: {}",
+ e
+ );
+ }
+ });
+ }
+
+ let mut lint_store = rustc_lint::new_lint_store(
+ sess.opts.unstable_opts.no_interleave_lints,
+ sess.enable_internal_lints(),
+ );
+ register_lints(sess, &mut lint_store);
+
+ let registrars =
+ sess.time("plugin_loading", || plugin::load::load_plugins(sess, metadata_loader, &krate));
+ sess.time("plugin_registration", || {
+ let mut registry = plugin::Registry { lint_store: &mut lint_store };
+ for registrar in registrars {
+ registrar(&mut registry);
+ }
+ });
+
+ Ok((krate, lint_store))
+}
+
+fn pre_expansion_lint<'a>(
+ sess: &Session,
+ lint_store: &LintStore,
+ registered_tools: &RegisteredTools,
+ check_node: impl EarlyCheckNode<'a>,
+ node_name: &str,
+) {
+ sess.prof.generic_activity_with_arg("pre_AST_expansion_lint_checks", node_name).run(|| {
+ rustc_lint::check_ast_node(
+ sess,
+ true,
+ lint_store,
+ registered_tools,
+ None,
+ rustc_lint::BuiltinCombinedPreExpansionLintPass::new(),
+ check_node,
+ );
+ });
+}
+
+// Cannot implement directly for `LintStore` due to trait coherence.
+struct LintStoreExpandImpl<'a>(&'a LintStore);
+
+impl LintStoreExpand for LintStoreExpandImpl<'_> {
+ fn pre_expansion_lint(
+ &self,
+ sess: &Session,
+ registered_tools: &RegisteredTools,
+ node_id: ast::NodeId,
+ attrs: &[ast::Attribute],
+ items: &[rustc_ast::ptr::P<ast::Item>],
+ name: &str,
+ ) {
+ pre_expansion_lint(sess, self.0, registered_tools, (node_id, attrs, items), name);
+ }
+}
+
+/// Runs the "early phases" of the compiler: initial `cfg` processing, loading compiler plugins,
+/// syntax expansion, secondary `cfg` expansion, synthesis of a test
+/// harness if one is to be provided, injection of a dependency on the
+/// standard library and prelude, and name resolution.
+pub fn configure_and_expand(
+ sess: &Session,
+ lint_store: &LintStore,
+ mut krate: ast::Crate,
+ crate_name: &str,
+ resolver: &mut Resolver<'_>,
+) -> Result<ast::Crate> {
+ tracing::trace!("configure_and_expand");
+ pre_expansion_lint(sess, lint_store, resolver.registered_tools(), &krate, crate_name);
+ rustc_builtin_macros::register_builtin_macros(resolver);
+
+ krate = sess.time("crate_injection", || {
+ rustc_builtin_macros::standard_library_imports::inject(krate, resolver, sess)
+ });
+
+ util::check_attr_crate_type(sess, &krate.attrs, &mut resolver.lint_buffer());
+
+ // Expand all macros
+ krate = sess.time("macro_expand_crate", || {
+ // Windows dlls do not have rpaths, so they don't know how to find their
+ // dependencies. It's up to us to tell the system where to find all the
+ // dependent dlls. Note that this uses cfg!(windows) as opposed to
+ // targ_cfg because syntax extensions are always loaded for the host
+ // compiler, not for the target.
+ //
+ // This is somewhat of an inherently racy operation, however, as
+ // multiple threads calling this function could possibly continue
+ // extending PATH far beyond what it should. To solve this for now we
+ // just don't add any new elements to PATH which are already there
+ // within PATH. This is basically a targeted fix at #17360 for rustdoc
+ // which runs rustc in parallel but has been seen (#33844) to cause
+ // problems with PATH becoming too long.
+ let mut old_path = OsString::new();
+ if cfg!(windows) {
+ old_path = env::var_os("PATH").unwrap_or(old_path);
+ let mut new_path = sess.host_filesearch(PathKind::All).search_path_dirs();
+ for path in env::split_paths(&old_path) {
+ if !new_path.contains(&path) {
+ new_path.push(path);
+ }
+ }
+ env::set_var(
+ "PATH",
+ &env::join_paths(
+ new_path.iter().filter(|p| env::join_paths(iter::once(p)).is_ok()),
+ )
+ .unwrap(),
+ );
+ }
+
+ // Create the config for macro expansion
+ let features = sess.features_untracked();
+ let recursion_limit = get_recursion_limit(&krate.attrs, sess);
+ let cfg = rustc_expand::expand::ExpansionConfig {
+ features: Some(features),
+ recursion_limit,
+ trace_mac: sess.opts.unstable_opts.trace_macros,
+ should_test: sess.opts.test,
+ span_debug: sess.opts.unstable_opts.span_debug,
+ proc_macro_backtrace: sess.opts.unstable_opts.proc_macro_backtrace,
+ ..rustc_expand::expand::ExpansionConfig::default(crate_name.to_string())
+ };
+
+ let lint_store = LintStoreExpandImpl(lint_store);
+ let mut ecx = ExtCtxt::new(sess, cfg, resolver, Some(&lint_store));
+ // Expand macros now!
+ let krate = sess.time("expand_crate", || ecx.monotonic_expander().expand_crate(krate));
+
+ // The rest is error reporting
+
+ sess.parse_sess.buffered_lints.with_lock(|buffered_lints: &mut Vec<BufferedEarlyLint>| {
+ buffered_lints.append(&mut ecx.buffered_early_lint);
+ });
+
+ sess.time("check_unused_macros", || {
+ ecx.check_unused_macros();
+ });
+
+ let recursion_limit_hit = ecx.reduced_recursion_limit.is_some();
+
+ if cfg!(windows) {
+ env::set_var("PATH", &old_path);
+ }
+
+ if recursion_limit_hit {
+ // If we hit a recursion limit, exit early to avoid later passes getting overwhelmed
+ // with a large AST
+ Err(ErrorGuaranteed::unchecked_claim_error_was_emitted())
+ } else {
+ Ok(krate)
+ }
+ })?;
+
+ sess.time("maybe_building_test_harness", || {
+ rustc_builtin_macros::test_harness::inject(sess, resolver, &mut krate)
+ });
+
+ let has_proc_macro_decls = sess.time("AST_validation", || {
+ rustc_ast_passes::ast_validation::check_crate(sess, &krate, resolver.lint_buffer())
+ });
+
+ let crate_types = sess.crate_types();
+ let is_executable_crate = crate_types.contains(&CrateType::Executable);
+ let is_proc_macro_crate = crate_types.contains(&CrateType::ProcMacro);
+
+ if crate_types.len() > 1 {
+ if is_executable_crate {
+ sess.err("cannot mix `bin` crate type with others");
+ }
+ if is_proc_macro_crate {
+ sess.err("cannot mix `proc-macro` crate type with others");
+ }
+ }
+
+ // For backwards compatibility, we don't try to run proc macro injection
+ // if rustdoc is run on a proc macro crate without '--crate-type proc-macro' being
+ // specified. This should only affect users who manually invoke 'rustdoc', as
+ // 'cargo doc' will automatically pass the proper '--crate-type' flags.
+ // However, we do emit a warning, to let such users know that they should
+ // start passing '--crate-type proc-macro'
+ if has_proc_macro_decls && sess.opts.actually_rustdoc && !is_proc_macro_crate {
+ let mut msg = sess.diagnostic().struct_warn(
+ "Trying to document proc macro crate \
+ without passing '--crate-type proc-macro to rustdoc",
+ );
+
+ msg.warn("The generated documentation may be incorrect");
+ msg.emit();
+ } else {
+ krate = sess.time("maybe_create_a_macro_crate", || {
+ let is_test_crate = sess.opts.test;
+ rustc_builtin_macros::proc_macro_harness::inject(
+ sess,
+ resolver,
+ krate,
+ is_proc_macro_crate,
+ has_proc_macro_decls,
+ is_test_crate,
+ sess.diagnostic(),
+ )
+ });
+ }
+
+ // Done with macro expansion!
+
+ if sess.opts.unstable_opts.input_stats {
+ eprintln!("Post-expansion node count: {}", count_nodes(&krate));
+ }
+
+ if sess.opts.unstable_opts.hir_stats {
+ hir_stats::print_ast_stats(&krate, "POST EXPANSION AST STATS");
+ }
+
+ resolver.resolve_crate(&krate);
+
+ // Needs to go *after* expansion to be able to check the results of macro expansion.
+ sess.time("complete_gated_feature_checking", || {
+ rustc_ast_passes::feature_gate::check_crate(&krate, sess);
+ });
+
+ // Add all buffered lints from the `ParseSess` to the `Session`.
+ sess.parse_sess.buffered_lints.with_lock(|buffered_lints| {
+ info!("{} parse sess buffered_lints", buffered_lints.len());
+ for early_lint in buffered_lints.drain(..) {
+ resolver.lint_buffer().add_early_lint(early_lint);
+ }
+ });
+
+ // Gate identifiers containing invalid Unicode codepoints that were recovered during lexing.
+ sess.parse_sess.bad_unicode_identifiers.with_lock(|identifiers| {
+ let mut identifiers: Vec<_> = identifiers.drain().collect();
+ identifiers.sort_by_key(|&(key, _)| key);
+ for (ident, mut spans) in identifiers.into_iter() {
+ spans.sort();
+ if ident == sym::ferris {
+ let first_span = spans[0];
+ sess.diagnostic()
+ .struct_span_err(
+ MultiSpan::from(spans),
+ "Ferris cannot be used as an identifier",
+ )
+ .span_suggestion(
+ first_span,
+ "try using their name instead",
+ "ferris",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ } else {
+ sess.diagnostic().span_err(
+ MultiSpan::from(spans),
+ &format!("identifiers cannot contain emoji: `{}`", ident),
+ );
+ }
+ }
+ });
+
+ sess.time("early_lint_checks", || {
+ let lint_buffer = Some(std::mem::take(resolver.lint_buffer()));
+ rustc_lint::check_ast_node(
+ sess,
+ false,
+ lint_store,
+ resolver.registered_tools(),
+ lint_buffer,
+ rustc_lint::BuiltinCombinedEarlyLintPass::new(),
+ &krate,
+ )
+ });
+
+ Ok(krate)
+}
+
+// Returns all the paths that correspond to generated files.
+fn generated_output_paths(
+ sess: &Session,
+ outputs: &OutputFilenames,
+ exact_name: bool,
+ crate_name: &str,
+) -> Vec<PathBuf> {
+ let mut out_filenames = Vec::new();
+ for output_type in sess.opts.output_types.keys() {
+ let file = outputs.path(*output_type);
+ match *output_type {
+ // If the filename has been overridden using `-o`, it will not be modified
+ // by appending `.rlib`, `.exe`, etc., so we can skip this transformation.
+ OutputType::Exe if !exact_name => {
+ for crate_type in sess.crate_types().iter() {
+ let p = filename_for_input(sess, *crate_type, crate_name, outputs);
+ out_filenames.push(p);
+ }
+ }
+ OutputType::DepInfo if sess.opts.unstable_opts.dep_info_omit_d_target => {
+ // Don't add the dep-info output when omitting it from dep-info targets
+ }
+ _ => {
+ out_filenames.push(file);
+ }
+ }
+ }
+ out_filenames
+}
+
+// Runs `f` on every output file path and returns the first non-None result, or None if `f`
+// returns None for every file path.
+fn check_output<F, T>(output_paths: &[PathBuf], f: F) -> Option<T>
+where
+ F: Fn(&PathBuf) -> Option<T>,
+{
+ for output_path in output_paths {
+ if let Some(result) = f(output_path) {
+ return Some(result);
+ }
+ }
+ None
+}
+
+fn output_contains_path(output_paths: &[PathBuf], input_path: &Path) -> bool {
+ let input_path = input_path.canonicalize().ok();
+ if input_path.is_none() {
+ return false;
+ }
+ let check = |output_path: &PathBuf| {
+ if output_path.canonicalize().ok() == input_path { Some(()) } else { None }
+ };
+ check_output(output_paths, check).is_some()
+}
+
+fn output_conflicts_with_dir(output_paths: &[PathBuf]) -> Option<PathBuf> {
+ let check = |output_path: &PathBuf| output_path.is_dir().then(|| output_path.clone());
+ check_output(output_paths, check)
+}
+
+fn escape_dep_filename(filename: &str) -> String {
+ // Apparently clang and gcc *only* escape spaces:
+ // https://llvm.org/klaus/clang/commit/9d50634cfc268ecc9a7250226dd5ca0e945240d4
+ filename.replace(' ', "\\ ")
+}
+
+// Makefile comments only need escaping newlines and `\`.
+// The result can be unescaped by anything that can unescape `escape_default` and friends.
+fn escape_dep_env(symbol: Symbol) -> String {
+ let s = symbol.as_str();
+ let mut escaped = String::with_capacity(s.len());
+ for c in s.chars() {
+ match c {
+ '\n' => escaped.push_str(r"\n"),
+ '\r' => escaped.push_str(r"\r"),
+ '\\' => escaped.push_str(r"\\"),
+ _ => escaped.push(c),
+ }
+ }
+ escaped
+}
+
+fn write_out_deps(
+ sess: &Session,
+ boxed_resolver: &RefCell<BoxedResolver>,
+ outputs: &OutputFilenames,
+ out_filenames: &[PathBuf],
+) {
+ // Write out dependency rules to the dep-info file if requested
+ if !sess.opts.output_types.contains_key(&OutputType::DepInfo) {
+ return;
+ }
+ let deps_filename = outputs.path(OutputType::DepInfo);
+
+ let result = (|| -> io::Result<()> {
+ // Build a list of files used to compile the output and
+ // write Makefile-compatible dependency rules
+ let mut files: Vec<String> = sess
+ .source_map()
+ .files()
+ .iter()
+ .filter(|fmap| fmap.is_real_file())
+ .filter(|fmap| !fmap.is_imported())
+ .map(|fmap| escape_dep_filename(&fmap.name.prefer_local().to_string()))
+ .collect();
+
+ // Account for explicitly marked-to-track files
+ // (e.g. accessed in proc macros).
+ let file_depinfo = sess.parse_sess.file_depinfo.borrow();
+ let extra_tracked_files = file_depinfo.iter().map(|path_sym| {
+ let path = PathBuf::from(path_sym.as_str());
+ let file = FileName::from(path);
+ escape_dep_filename(&file.prefer_local().to_string())
+ });
+ files.extend(extra_tracked_files);
+
+ if sess.binary_dep_depinfo() {
+ if let Some(ref backend) = sess.opts.unstable_opts.codegen_backend {
+ if backend.contains('.') {
+ // If the backend name contain a `.`, it is the path to an external dynamic
+ // library. If not, it is not a path.
+ files.push(backend.to_string());
+ }
+ }
+
+ boxed_resolver.borrow_mut().access(|resolver| {
+ for cnum in resolver.cstore().crates_untracked() {
+ let source = resolver.cstore().crate_source_untracked(cnum);
+ if let Some((path, _)) = &source.dylib {
+ files.push(escape_dep_filename(&path.display().to_string()));
+ }
+ if let Some((path, _)) = &source.rlib {
+ files.push(escape_dep_filename(&path.display().to_string()));
+ }
+ if let Some((path, _)) = &source.rmeta {
+ files.push(escape_dep_filename(&path.display().to_string()));
+ }
+ }
+ });
+ }
+
+ let mut file = BufWriter::new(fs::File::create(&deps_filename)?);
+ for path in out_filenames {
+ writeln!(file, "{}: {}\n", path.display(), files.join(" "))?;
+ }
+
+ // Emit a fake target for each input file to the compilation. This
+ // prevents `make` from spitting out an error if a file is later
+ // deleted. For more info see #28735
+ for path in files {
+ writeln!(file, "{}:", path)?;
+ }
+
+ // Emit special comments with information about accessed environment variables.
+ let env_depinfo = sess.parse_sess.env_depinfo.borrow();
+ if !env_depinfo.is_empty() {
+ let mut envs: Vec<_> = env_depinfo
+ .iter()
+ .map(|(k, v)| (escape_dep_env(*k), v.map(escape_dep_env)))
+ .collect();
+ envs.sort_unstable();
+ writeln!(file)?;
+ for (k, v) in envs {
+ write!(file, "# env-dep:{}", k)?;
+ if let Some(v) = v {
+ write!(file, "={}", v)?;
+ }
+ writeln!(file)?;
+ }
+ }
+
+ Ok(())
+ })();
+
+ match result {
+ Ok(_) => {
+ if sess.opts.json_artifact_notifications {
+ sess.parse_sess
+ .span_diagnostic
+ .emit_artifact_notification(&deps_filename, "dep-info");
+ }
+ }
+ Err(e) => sess.fatal(&format!(
+ "error writing dependencies to `{}`: {}",
+ deps_filename.display(),
+ e
+ )),
+ }
+}
+
+pub fn prepare_outputs(
+ sess: &Session,
+ compiler: &Compiler,
+ krate: &ast::Crate,
+ boxed_resolver: &RefCell<BoxedResolver>,
+ crate_name: &str,
+) -> Result<OutputFilenames> {
+ let _timer = sess.timer("prepare_outputs");
+
+ // FIXME: rustdoc passes &[] instead of &krate.attrs here
+ let outputs = util::build_output_filenames(
+ &compiler.input,
+ &compiler.output_dir,
+ &compiler.output_file,
+ &compiler.temps_dir,
+ &krate.attrs,
+ sess,
+ );
+
+ let output_paths =
+ generated_output_paths(sess, &outputs, compiler.output_file.is_some(), crate_name);
+
+ // Ensure the source file isn't accidentally overwritten during compilation.
+ if let Some(ref input_path) = compiler.input_path {
+ if sess.opts.will_create_output_file() {
+ if output_contains_path(&output_paths, input_path) {
+ let reported = sess.err(&format!(
+ "the input file \"{}\" would be overwritten by the generated \
+ executable",
+ input_path.display()
+ ));
+ return Err(reported);
+ }
+ if let Some(dir_path) = output_conflicts_with_dir(&output_paths) {
+ let reported = sess.err(&format!(
+ "the generated executable for the input file \"{}\" conflicts with the \
+ existing directory \"{}\"",
+ input_path.display(),
+ dir_path.display()
+ ));
+ return Err(reported);
+ }
+ }
+ }
+
+ if let Some(ref dir) = compiler.temps_dir {
+ if fs::create_dir_all(dir).is_err() {
+ let reported =
+ sess.err("failed to find or create the directory specified by `--temps-dir`");
+ return Err(reported);
+ }
+ }
+
+ write_out_deps(sess, boxed_resolver, &outputs, &output_paths);
+
+ let only_dep_info = sess.opts.output_types.contains_key(&OutputType::DepInfo)
+ && sess.opts.output_types.len() == 1;
+
+ if !only_dep_info {
+ if let Some(ref dir) = compiler.output_dir {
+ if fs::create_dir_all(dir).is_err() {
+ let reported =
+ sess.err("failed to find or create the directory specified by `--out-dir`");
+ return Err(reported);
+ }
+ }
+ }
+
+ Ok(outputs)
+}
+
+pub static DEFAULT_QUERY_PROVIDERS: LazyLock<Providers> = LazyLock::new(|| {
+ let providers = &mut Providers::default();
+ providers.analysis = analysis;
+ providers.hir_crate = rustc_ast_lowering::lower_to_hir;
+ proc_macro_decls::provide(providers);
+ rustc_const_eval::provide(providers);
+ rustc_middle::hir::provide(providers);
+ mir_borrowck::provide(providers);
+ mir_build::provide(providers);
+ rustc_mir_transform::provide(providers);
+ rustc_monomorphize::provide(providers);
+ rustc_privacy::provide(providers);
+ typeck::provide(providers);
+ ty::provide(providers);
+ traits::provide(providers);
+ rustc_passes::provide(providers);
+ rustc_resolve::provide(providers);
+ rustc_traits::provide(providers);
+ rustc_ty_utils::provide(providers);
+ rustc_metadata::provide(providers);
+ rustc_lint::provide(providers);
+ rustc_symbol_mangling::provide(providers);
+ rustc_codegen_ssa::provide(providers);
+ *providers
+});
+
+pub static DEFAULT_EXTERN_QUERY_PROVIDERS: LazyLock<ExternProviders> = LazyLock::new(|| {
+ let mut extern_providers = ExternProviders::default();
+ rustc_metadata::provide_extern(&mut extern_providers);
+ rustc_codegen_ssa::provide_extern(&mut extern_providers);
+ extern_providers
+});
+
+pub struct QueryContext<'tcx> {
+ gcx: &'tcx GlobalCtxt<'tcx>,
+}
+
+impl<'tcx> QueryContext<'tcx> {
+ pub fn enter<F, R>(&mut self, f: F) -> R
+ where
+ F: FnOnce(TyCtxt<'tcx>) -> R,
+ {
+ let icx = ty::tls::ImplicitCtxt::new(self.gcx);
+ ty::tls::enter_context(&icx, |_| f(icx.tcx))
+ }
+}
+
+pub fn create_global_ctxt<'tcx>(
+ compiler: &'tcx Compiler,
+ lint_store: Lrc<LintStore>,
+ krate: Lrc<ast::Crate>,
+ dep_graph: DepGraph,
+ resolver: Rc<RefCell<BoxedResolver>>,
+ outputs: OutputFilenames,
+ crate_name: &str,
+ queries: &'tcx OnceCell<TcxQueries<'tcx>>,
+ global_ctxt: &'tcx OnceCell<GlobalCtxt<'tcx>>,
+ arena: &'tcx WorkerLocal<Arena<'tcx>>,
+ hir_arena: &'tcx WorkerLocal<rustc_hir::Arena<'tcx>>,
+) -> QueryContext<'tcx> {
+ // We're constructing the HIR here; we don't care what we will
+ // read, since we haven't even constructed the *input* to
+ // incr. comp. yet.
+ dep_graph.assert_ignored();
+
+ let (definitions, cstore, resolver_outputs, resolver_for_lowering) =
+ BoxedResolver::to_resolver_outputs(resolver);
+
+ let sess = &compiler.session();
+ let query_result_on_disk_cache = rustc_incremental::load_query_result_cache(sess);
+
+ let codegen_backend = compiler.codegen_backend();
+ let mut local_providers = *DEFAULT_QUERY_PROVIDERS;
+ codegen_backend.provide(&mut local_providers);
+
+ let mut extern_providers = *DEFAULT_EXTERN_QUERY_PROVIDERS;
+ codegen_backend.provide_extern(&mut extern_providers);
+
+ if let Some(callback) = compiler.override_queries {
+ callback(sess, &mut local_providers, &mut extern_providers);
+ }
+
+ let queries = queries.get_or_init(|| {
+ TcxQueries::new(local_providers, extern_providers, query_result_on_disk_cache)
+ });
+
+ let gcx = sess.time("setup_global_ctxt", || {
+ global_ctxt.get_or_init(move || {
+ TyCtxt::create_global_ctxt(
+ sess,
+ lint_store,
+ arena,
+ hir_arena,
+ definitions,
+ cstore,
+ resolver_outputs,
+ resolver_for_lowering,
+ krate,
+ dep_graph,
+ queries.on_disk_cache.as_ref().map(OnDiskCache::as_dyn),
+ queries.as_dyn(),
+ rustc_query_impl::query_callbacks(arena),
+ crate_name,
+ outputs,
+ )
+ })
+ });
+
+ QueryContext { gcx }
+}
+
+/// Runs the resolution, type-checking, region checking and other
+/// miscellaneous analysis passes on the crate.
+fn analysis(tcx: TyCtxt<'_>, (): ()) -> Result<()> {
+ rustc_passes::hir_id_validator::check_crate(tcx);
+
+ let sess = tcx.sess;
+ let mut entry_point = None;
+
+ sess.time("misc_checking_1", || {
+ parallel!(
+ {
+ entry_point = sess.time("looking_for_entry_point", || tcx.entry_fn(()));
+
+ sess.time("looking_for_derive_registrar", || {
+ tcx.ensure().proc_macro_decls_static(())
+ });
+
+ CStore::from_tcx(tcx).report_unused_deps(tcx);
+ },
+ {
+ tcx.hir().par_for_each_module(|module| {
+ tcx.ensure().check_mod_loops(module);
+ tcx.ensure().check_mod_attrs(module);
+ tcx.ensure().check_mod_naked_functions(module);
+ tcx.ensure().check_mod_unstable_api_usage(module);
+ tcx.ensure().check_mod_const_bodies(module);
+ });
+ },
+ {
+ sess.time("unused_lib_feature_checking", || {
+ rustc_passes::stability::check_unused_or_stable_features(tcx)
+ });
+ },
+ {
+ // We force these queries to run,
+ // since they might not otherwise get called.
+ // This marks the corresponding crate-level attributes
+ // as used, and ensures that their values are valid.
+ tcx.ensure().limits(());
+ tcx.ensure().stability_index(());
+ }
+ );
+ });
+
+ // passes are timed inside typeck
+ typeck::check_crate(tcx)?;
+
+ sess.time("misc_checking_2", || {
+ parallel!(
+ {
+ sess.time("match_checking", || {
+ tcx.hir().par_body_owners(|def_id| tcx.ensure().check_match(def_id.to_def_id()))
+ });
+ },
+ {
+ sess.time("liveness_and_intrinsic_checking", || {
+ tcx.hir().par_for_each_module(|module| {
+ // this must run before MIR dump, because
+ // "not all control paths return a value" is reported here.
+ //
+ // maybe move the check to a MIR pass?
+ tcx.ensure().check_mod_liveness(module);
+ });
+ });
+ }
+ );
+ });
+
+ sess.time("MIR_borrow_checking", || {
+ tcx.hir().par_body_owners(|def_id| tcx.ensure().mir_borrowck(def_id));
+ });
+
+ sess.time("MIR_effect_checking", || {
+ for def_id in tcx.hir().body_owners() {
+ tcx.ensure().thir_check_unsafety(def_id);
+ if !tcx.sess.opts.unstable_opts.thir_unsafeck {
+ rustc_mir_transform::check_unsafety::check_unsafety(tcx, def_id);
+ }
+ tcx.ensure().has_ffi_unwind_calls(def_id);
+
+ if tcx.hir().body_const_context(def_id).is_some() {
+ tcx.ensure()
+ .mir_drops_elaborated_and_const_checked(ty::WithOptConstParam::unknown(def_id));
+ }
+ }
+ });
+
+ sess.time("layout_testing", || layout_test::test_layout(tcx));
+
+ // Avoid overwhelming user with errors if borrow checking failed.
+ // I'm not sure how helpful this is, to be honest, but it avoids a
+ // lot of annoying errors in the ui tests (basically,
+ // lint warnings and so on -- kindck used to do this abort, but
+ // kindck is gone now). -nmatsakis
+ if let Some(reported) = sess.has_errors() {
+ return Err(reported);
+ }
+
+ sess.time("misc_checking_3", || {
+ parallel!(
+ {
+ tcx.ensure().privacy_access_levels(());
+
+ parallel!(
+ {
+ tcx.ensure().check_private_in_public(());
+ },
+ {
+ tcx.hir()
+ .par_for_each_module(|module| tcx.ensure().check_mod_deathness(module));
+ },
+ {
+ sess.time("lint_checking", || {
+ rustc_lint::check_crate(tcx, || {
+ rustc_lint::BuiltinCombinedLateLintPass::new()
+ });
+ });
+ }
+ );
+ },
+ {
+ sess.time("privacy_checking_modules", || {
+ tcx.hir().par_for_each_module(|module| {
+ tcx.ensure().check_mod_privacy(module);
+ });
+ });
+ }
+ );
+
+ // This check has to be run after all lints are done processing. We don't
+ // define a lint filter, as all lint checks should have finished at this point.
+ sess.time("check_lint_expectations", || tcx.check_expectations(None));
+ });
+
+ Ok(())
+}
+
+/// Runs the codegen backend, after which the AST and analysis can
+/// be discarded.
+pub fn start_codegen<'tcx>(
+ codegen_backend: &dyn CodegenBackend,
+ tcx: TyCtxt<'tcx>,
+ outputs: &OutputFilenames,
+) -> Box<dyn Any> {
+ info!("Pre-codegen\n{:?}", tcx.debug_stats());
+
+ let (metadata, need_metadata_module) =
+ rustc_metadata::fs::encode_and_write_metadata(tcx, outputs);
+
+ let codegen = tcx.sess.time("codegen_crate", move || {
+ codegen_backend.codegen_crate(tcx, metadata, need_metadata_module)
+ });
+
+ // Don't run these test assertions when not doing codegen. Compiletest tries to build
+ // build-fail tests in check mode first and expects it to not give an error in that case.
+ if tcx.sess.opts.output_types.should_codegen() {
+ rustc_incremental::assert_module_sources::assert_module_sources(tcx);
+ rustc_symbol_mangling::test::report_symbol_names(tcx);
+ }
+
+ info!("Post-codegen\n{:?}", tcx.debug_stats());
+
+ if tcx.sess.opts.output_types.contains_key(&OutputType::Mir) {
+ if let Err(e) = rustc_mir_transform::dump_mir::emit_mir(tcx, outputs) {
+ tcx.sess.err(&format!("could not emit MIR: {}", e));
+ tcx.sess.abort_if_errors();
+ }
+ }
+
+ codegen
+}
+
+fn get_recursion_limit(krate_attrs: &[ast::Attribute], sess: &Session) -> Limit {
+ if let Some(attr) = krate_attrs
+ .iter()
+ .find(|attr| attr.has_name(sym::recursion_limit) && attr.value_str().is_none())
+ {
+ // This is here mainly to check for using a macro, such as
+ // #![recursion_limit = foo!()]. That is not supported since that
+ // would require expanding this while in the middle of expansion,
+ // which needs to know the limit before expanding. Otherwise,
+ // validation would normally be caught in AstValidator (via
+ // `check_builtin_attribute`), but by the time that runs the macro
+ // is expanded, and it doesn't give an error.
+ validate_attr::emit_fatal_malformed_builtin_attribute(
+ &sess.parse_sess,
+ attr,
+ sym::recursion_limit,
+ );
+ }
+ rustc_middle::middle::limits::get_recursion_limit(krate_attrs, sess)
+}
diff --git a/compiler/rustc_interface/src/proc_macro_decls.rs b/compiler/rustc_interface/src/proc_macro_decls.rs
new file mode 100644
index 000000000..5371c513d
--- /dev/null
+++ b/compiler/rustc_interface/src/proc_macro_decls.rs
@@ -0,0 +1,27 @@
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
+
+fn proc_macro_decls_static(tcx: TyCtxt<'_>, (): ()) -> Option<LocalDefId> {
+ let mut finder = Finder { tcx, decls: None };
+
+ for id in tcx.hir().items() {
+ let attrs = finder.tcx.hir().attrs(id.hir_id());
+ if finder.tcx.sess.contains_name(attrs, sym::rustc_proc_macro_decls) {
+ finder.decls = Some(id.def_id);
+ }
+ }
+
+ finder.decls
+}
+
+struct Finder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ decls: Option<hir::def_id::LocalDefId>,
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { proc_macro_decls_static, ..*providers };
+}
diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs
new file mode 100644
index 000000000..73402ae08
--- /dev/null
+++ b/compiler/rustc_interface/src/queries.rs
@@ -0,0 +1,402 @@
+use crate::interface::{Compiler, Result};
+use crate::passes::{self, BoxedResolver, QueryContext};
+
+use rustc_ast as ast;
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_codegen_ssa::CodegenResults;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::{Lrc, OnceCell, WorkerLocal};
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_incremental::DepGraphFuture;
+use rustc_lint::LintStore;
+use rustc_middle::arena::Arena;
+use rustc_middle::dep_graph::DepGraph;
+use rustc_middle::ty::{GlobalCtxt, TyCtxt};
+use rustc_query_impl::Queries as TcxQueries;
+use rustc_session::config::{self, OutputFilenames, OutputType};
+use rustc_session::{output::find_crate_name, Session};
+use rustc_span::symbol::sym;
+use std::any::Any;
+use std::cell::{Ref, RefCell, RefMut};
+use std::rc::Rc;
+
+/// Represent the result of a query.
+///
+/// This result can be stolen with the [`take`] method and generated with the [`compute`] method.
+///
+/// [`take`]: Self::take
+/// [`compute`]: Self::compute
+pub struct Query<T> {
+ result: RefCell<Option<Result<T>>>,
+}
+
+impl<T> Query<T> {
+ fn compute<F: FnOnce() -> Result<T>>(&self, f: F) -> Result<&Query<T>> {
+ let mut result = self.result.borrow_mut();
+ if result.is_none() {
+ *result = Some(f());
+ }
+ result.as_ref().unwrap().as_ref().map(|_| self).map_err(|err| *err)
+ }
+
+ /// Takes ownership of the query result. Further attempts to take or peek the query
+ /// result will panic unless it is generated by calling the `compute` method.
+ pub fn take(&self) -> T {
+ self.result.borrow_mut().take().expect("missing query result").unwrap()
+ }
+
+ /// Borrows the query result using the RefCell. Panics if the result is stolen.
+ pub fn peek(&self) -> Ref<'_, T> {
+ Ref::map(self.result.borrow(), |r| {
+ r.as_ref().unwrap().as_ref().expect("missing query result")
+ })
+ }
+
+ /// Mutably borrows the query result using the RefCell. Panics if the result is stolen.
+ pub fn peek_mut(&self) -> RefMut<'_, T> {
+ RefMut::map(self.result.borrow_mut(), |r| {
+ r.as_mut().unwrap().as_mut().expect("missing query result")
+ })
+ }
+}
+
+impl<T> Default for Query<T> {
+ fn default() -> Self {
+ Query { result: RefCell::new(None) }
+ }
+}
+
+pub struct Queries<'tcx> {
+ compiler: &'tcx Compiler,
+ gcx: OnceCell<GlobalCtxt<'tcx>>,
+ queries: OnceCell<TcxQueries<'tcx>>,
+
+ arena: WorkerLocal<Arena<'tcx>>,
+ hir_arena: WorkerLocal<rustc_hir::Arena<'tcx>>,
+
+ dep_graph_future: Query<Option<DepGraphFuture>>,
+ parse: Query<ast::Crate>,
+ crate_name: Query<String>,
+ register_plugins: Query<(ast::Crate, Lrc<LintStore>)>,
+ expansion: Query<(Lrc<ast::Crate>, Rc<RefCell<BoxedResolver>>, Lrc<LintStore>)>,
+ dep_graph: Query<DepGraph>,
+ prepare_outputs: Query<OutputFilenames>,
+ global_ctxt: Query<QueryContext<'tcx>>,
+ ongoing_codegen: Query<Box<dyn Any>>,
+}
+
+impl<'tcx> Queries<'tcx> {
+ pub fn new(compiler: &'tcx Compiler) -> Queries<'tcx> {
+ Queries {
+ compiler,
+ gcx: OnceCell::new(),
+ queries: OnceCell::new(),
+ arena: WorkerLocal::new(|_| Arena::default()),
+ hir_arena: WorkerLocal::new(|_| rustc_hir::Arena::default()),
+ dep_graph_future: Default::default(),
+ parse: Default::default(),
+ crate_name: Default::default(),
+ register_plugins: Default::default(),
+ expansion: Default::default(),
+ dep_graph: Default::default(),
+ prepare_outputs: Default::default(),
+ global_ctxt: Default::default(),
+ ongoing_codegen: Default::default(),
+ }
+ }
+
+ fn session(&self) -> &Lrc<Session> {
+ &self.compiler.sess
+ }
+ fn codegen_backend(&self) -> &Lrc<Box<dyn CodegenBackend>> {
+ self.compiler.codegen_backend()
+ }
+
+ fn dep_graph_future(&self) -> Result<&Query<Option<DepGraphFuture>>> {
+ self.dep_graph_future.compute(|| {
+ let sess = self.session();
+ Ok(sess.opts.build_dep_graph().then(|| rustc_incremental::load_dep_graph(sess)))
+ })
+ }
+
+ pub fn parse(&self) -> Result<&Query<ast::Crate>> {
+ self.parse.compute(|| {
+ passes::parse(self.session(), &self.compiler.input)
+ .map_err(|mut parse_error| parse_error.emit())
+ })
+ }
+
+ pub fn register_plugins(&self) -> Result<&Query<(ast::Crate, Lrc<LintStore>)>> {
+ self.register_plugins.compute(|| {
+ let crate_name = self.crate_name()?.peek().clone();
+ let krate = self.parse()?.take();
+
+ let empty: &(dyn Fn(&Session, &mut LintStore) + Sync + Send) = &|_, _| {};
+ let (krate, lint_store) = passes::register_plugins(
+ self.session(),
+ &*self.codegen_backend().metadata_loader(),
+ self.compiler.register_lints.as_deref().unwrap_or_else(|| empty),
+ krate,
+ &crate_name,
+ )?;
+
+ // Compute the dependency graph (in the background). We want to do
+ // this as early as possible, to give the DepGraph maximum time to
+ // load before dep_graph() is called, but it also can't happen
+ // until after rustc_incremental::prepare_session_directory() is
+ // called, which happens within passes::register_plugins().
+ self.dep_graph_future().ok();
+
+ Ok((krate, Lrc::new(lint_store)))
+ })
+ }
+
+ pub fn crate_name(&self) -> Result<&Query<String>> {
+ self.crate_name.compute(|| {
+ Ok({
+ let parse_result = self.parse()?;
+ let krate = parse_result.peek();
+ // parse `#[crate_name]` even if `--crate-name` was passed, to make sure it matches.
+ find_crate_name(self.session(), &krate.attrs, &self.compiler.input)
+ })
+ })
+ }
+
+ pub fn expansion(
+ &self,
+ ) -> Result<&Query<(Lrc<ast::Crate>, Rc<RefCell<BoxedResolver>>, Lrc<LintStore>)>> {
+ tracing::trace!("expansion");
+ self.expansion.compute(|| {
+ let crate_name = self.crate_name()?.peek().clone();
+ let (krate, lint_store) = self.register_plugins()?.take();
+ let _timer = self.session().timer("configure_and_expand");
+ let sess = self.session();
+ let mut resolver = passes::create_resolver(
+ sess.clone(),
+ self.codegen_backend().metadata_loader(),
+ &krate,
+ &crate_name,
+ );
+ let krate = resolver.access(|resolver| {
+ passes::configure_and_expand(sess, &lint_store, krate, &crate_name, resolver)
+ })?;
+ Ok((Lrc::new(krate), Rc::new(RefCell::new(resolver)), lint_store))
+ })
+ }
+
+ fn dep_graph(&self) -> Result<&Query<DepGraph>> {
+ self.dep_graph.compute(|| {
+ let sess = self.session();
+ let future_opt = self.dep_graph_future()?.take();
+ let dep_graph = future_opt
+ .and_then(|future| {
+ let (prev_graph, prev_work_products) =
+ sess.time("blocked_on_dep_graph_loading", || future.open().open(sess));
+
+ rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products)
+ })
+ .unwrap_or_else(DepGraph::new_disabled);
+ Ok(dep_graph)
+ })
+ }
+
+ pub fn prepare_outputs(&self) -> Result<&Query<OutputFilenames>> {
+ self.prepare_outputs.compute(|| {
+ let (krate, boxed_resolver, _) = &*self.expansion()?.peek();
+ let crate_name = self.crate_name()?.peek();
+ passes::prepare_outputs(
+ self.session(),
+ self.compiler,
+ krate,
+ &*boxed_resolver,
+ &crate_name,
+ )
+ })
+ }
+
+ pub fn global_ctxt(&'tcx self) -> Result<&Query<QueryContext<'tcx>>> {
+ self.global_ctxt.compute(|| {
+ let crate_name = self.crate_name()?.peek().clone();
+ let outputs = self.prepare_outputs()?.peek().clone();
+ let dep_graph = self.dep_graph()?.peek().clone();
+ let (krate, resolver, lint_store) = self.expansion()?.take();
+ Ok(passes::create_global_ctxt(
+ self.compiler,
+ lint_store,
+ krate,
+ dep_graph,
+ resolver,
+ outputs,
+ &crate_name,
+ &self.queries,
+ &self.gcx,
+ &self.arena,
+ &self.hir_arena,
+ ))
+ })
+ }
+
+ pub fn ongoing_codegen(&'tcx self) -> Result<&Query<Box<dyn Any>>> {
+ self.ongoing_codegen.compute(|| {
+ let outputs = self.prepare_outputs()?;
+ self.global_ctxt()?.peek_mut().enter(|tcx| {
+ tcx.analysis(()).ok();
+
+ // Don't do code generation if there were any errors
+ self.session().compile_status()?;
+
+ // Hook for UI tests.
+ Self::check_for_rustc_errors_attr(tcx);
+
+ Ok(passes::start_codegen(&***self.codegen_backend(), tcx, &*outputs.peek()))
+ })
+ })
+ }
+
+ /// Check for the `#[rustc_error]` annotation, which forces an error in codegen. This is used
+ /// to write UI tests that actually test that compilation succeeds without reporting
+ /// an error.
+ fn check_for_rustc_errors_attr(tcx: TyCtxt<'_>) {
+ let Some((def_id, _)) = tcx.entry_fn(()) else { return };
+ for attr in tcx.get_attrs(def_id, sym::rustc_error) {
+ match attr.meta_item_list() {
+ // Check if there is a `#[rustc_error(delay_span_bug_from_inside_query)]`.
+ Some(list)
+ if list.iter().any(|list_item| {
+ matches!(
+ list_item.ident().map(|i| i.name),
+ Some(sym::delay_span_bug_from_inside_query)
+ )
+ }) =>
+ {
+ tcx.ensure().trigger_delay_span_bug(def_id);
+ }
+
+ // Bare `#[rustc_error]`.
+ None => {
+ tcx.sess.span_fatal(
+ tcx.def_span(def_id),
+ "fatal error triggered by #[rustc_error]",
+ );
+ }
+
+ // Some other attribute.
+ Some(_) => {
+ tcx.sess.span_warn(
+ tcx.def_span(def_id),
+ "unexpected annotation used with `#[rustc_error(...)]!",
+ );
+ }
+ }
+ }
+ }
+
+ pub fn linker(&'tcx self) -> Result<Linker> {
+ let sess = self.session().clone();
+ let codegen_backend = self.codegen_backend().clone();
+
+ let dep_graph = self.dep_graph()?.peek().clone();
+ let prepare_outputs = self.prepare_outputs()?.take();
+ let crate_hash = self.global_ctxt()?.peek_mut().enter(|tcx| tcx.crate_hash(LOCAL_CRATE));
+ let ongoing_codegen = self.ongoing_codegen()?.take();
+
+ Ok(Linker {
+ sess,
+ codegen_backend,
+
+ dep_graph,
+ prepare_outputs,
+ crate_hash,
+ ongoing_codegen,
+ })
+ }
+}
+
+pub struct Linker {
+ // compilation inputs
+ sess: Lrc<Session>,
+ codegen_backend: Lrc<Box<dyn CodegenBackend>>,
+
+ // compilation outputs
+ dep_graph: DepGraph,
+ prepare_outputs: OutputFilenames,
+ crate_hash: Svh,
+ ongoing_codegen: Box<dyn Any>,
+}
+
+impl Linker {
+ pub fn link(self) -> Result<()> {
+ let (codegen_results, work_products) = self.codegen_backend.join_codegen(
+ self.ongoing_codegen,
+ &self.sess,
+ &self.prepare_outputs,
+ )?;
+
+ self.sess.compile_status()?;
+
+ let sess = &self.sess;
+ let dep_graph = self.dep_graph;
+ sess.time("serialize_work_products", || {
+ rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)
+ });
+
+ let prof = self.sess.prof.clone();
+ prof.generic_activity("drop_dep_graph").run(move || drop(dep_graph));
+
+ // Now that we won't touch anything in the incremental compilation directory
+ // any more, we can finalize it (which involves renaming it)
+ rustc_incremental::finalize_session_directory(&self.sess, self.crate_hash);
+
+ if !self
+ .sess
+ .opts
+ .output_types
+ .keys()
+ .any(|&i| i == OutputType::Exe || i == OutputType::Metadata)
+ {
+ return Ok(());
+ }
+
+ if sess.opts.unstable_opts.no_link {
+ let encoded = CodegenResults::serialize_rlink(&codegen_results);
+ let rlink_file = self.prepare_outputs.with_extension(config::RLINK_EXT);
+ std::fs::write(&rlink_file, encoded).map_err(|err| {
+ sess.fatal(&format!("failed to write file {}: {}", rlink_file.display(), err));
+ })?;
+ return Ok(());
+ }
+
+ let _timer = sess.prof.verbose_generic_activity("link_crate");
+ self.codegen_backend.link(&self.sess, codegen_results, &self.prepare_outputs)
+ }
+}
+
+impl Compiler {
+ pub fn enter<F, T>(&self, f: F) -> T
+ where
+ F: for<'tcx> FnOnce(&'tcx Queries<'tcx>) -> T,
+ {
+ let mut _timer = None;
+ let queries = Queries::new(self);
+ let ret = f(&queries);
+
+ // NOTE: intentionally does not compute the global context if it hasn't been built yet,
+ // since that likely means there was a parse error.
+ if let Some(Ok(gcx)) = &mut *queries.global_ctxt.result.borrow_mut() {
+ // We assume that no queries are run past here. If there are new queries
+ // after this point, they'll show up as "<unknown>" in self-profiling data.
+ {
+ let _prof_timer =
+ queries.session().prof.generic_activity("self_profile_alloc_query_strings");
+ gcx.enter(rustc_query_impl::alloc_self_profile_query_strings);
+ }
+
+ self.session()
+ .time("serialize_dep_graph", || gcx.enter(rustc_incremental::save_dep_graph));
+ }
+
+ _timer = Some(self.session().timer("free_global_ctxt"));
+
+ ret
+ }
+}
diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs
new file mode 100644
index 000000000..a9fdfa241
--- /dev/null
+++ b/compiler/rustc_interface/src/tests.rs
@@ -0,0 +1,830 @@
+#![cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+use crate::interface::parse_cfgspecs;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{emitter::HumanReadableErrorType, registry, ColorConfig};
+use rustc_session::config::InstrumentCoverage;
+use rustc_session::config::Strip;
+use rustc_session::config::{build_configuration, build_session_options, to_crate_config};
+use rustc_session::config::{
+ rustc_optgroups, ErrorOutputType, ExternLocation, LocationDetail, Options, Passes,
+};
+use rustc_session::config::{
+ BranchProtection, Externs, OomStrategy, OutputType, OutputTypes, PAuthKey, PacRet,
+ ProcMacroExecutionStrategy, SymbolManglingVersion, WasiExecModel,
+};
+use rustc_session::config::{CFGuard, ExternEntry, LinkerPluginLto, LtoCli, SwitchWithOptPath};
+use rustc_session::lint::Level;
+use rustc_session::search_paths::SearchPath;
+use rustc_session::utils::{CanonicalizedPath, NativeLib, NativeLibKind};
+use rustc_session::{build_session, getopts, DiagnosticOutput, Session};
+use rustc_span::edition::{Edition, DEFAULT_EDITION};
+use rustc_span::symbol::sym;
+use rustc_span::SourceFileHashAlgorithm;
+use rustc_target::spec::{CodeModel, LinkerFlavor, MergeFunctions, PanicStrategy};
+use rustc_target::spec::{
+ RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TlsModel,
+};
+
+use std::collections::{BTreeMap, BTreeSet};
+use std::iter::FromIterator;
+use std::num::NonZeroUsize;
+use std::path::{Path, PathBuf};
+
+type CfgSpecs = FxHashSet<(String, Option<String>)>;
+
+fn build_session_options_and_crate_config(matches: getopts::Matches) -> (Options, CfgSpecs) {
+ let sessopts = build_session_options(&matches);
+ let cfg = parse_cfgspecs(matches.opt_strs("cfg"));
+ (sessopts, cfg)
+}
+
+fn mk_session(matches: getopts::Matches) -> (Session, CfgSpecs) {
+ let registry = registry::Registry::new(&[]);
+ let (sessopts, cfg) = build_session_options_and_crate_config(matches);
+ let sess = build_session(
+ sessopts,
+ None,
+ None,
+ registry,
+ DiagnosticOutput::Default,
+ Default::default(),
+ None,
+ None,
+ );
+ (sess, cfg)
+}
+
+fn new_public_extern_entry<S, I>(locations: I) -> ExternEntry
+where
+ S: Into<String>,
+ I: IntoIterator<Item = S>,
+{
+ let locations: BTreeSet<CanonicalizedPath> =
+ locations.into_iter().map(|s| CanonicalizedPath::new(Path::new(&s.into()))).collect();
+
+ ExternEntry {
+ location: ExternLocation::ExactPaths(locations),
+ is_private_dep: false,
+ add_prelude: true,
+ nounused_dep: false,
+ }
+}
+
+fn optgroups() -> getopts::Options {
+ let mut opts = getopts::Options::new();
+ for group in rustc_optgroups() {
+ (group.apply)(&mut opts);
+ }
+ return opts;
+}
+
+fn mk_map<K: Ord, V>(entries: Vec<(K, V)>) -> BTreeMap<K, V> {
+ BTreeMap::from_iter(entries.into_iter())
+}
+
+fn assert_same_clone(x: &Options) {
+ assert_eq!(x.dep_tracking_hash(true), x.clone().dep_tracking_hash(true));
+ assert_eq!(x.dep_tracking_hash(false), x.clone().dep_tracking_hash(false));
+}
+
+fn assert_same_hash(x: &Options, y: &Options) {
+ assert_eq!(x.dep_tracking_hash(true), y.dep_tracking_hash(true));
+ assert_eq!(x.dep_tracking_hash(false), y.dep_tracking_hash(false));
+ // Check clone
+ assert_same_clone(x);
+ assert_same_clone(y);
+}
+
+fn assert_different_hash(x: &Options, y: &Options) {
+ assert_ne!(x.dep_tracking_hash(true), y.dep_tracking_hash(true));
+ assert_ne!(x.dep_tracking_hash(false), y.dep_tracking_hash(false));
+ // Check clone
+ assert_same_clone(x);
+ assert_same_clone(y);
+}
+
+fn assert_non_crate_hash_different(x: &Options, y: &Options) {
+ assert_eq!(x.dep_tracking_hash(true), y.dep_tracking_hash(true));
+ assert_ne!(x.dep_tracking_hash(false), y.dep_tracking_hash(false));
+ // Check clone
+ assert_same_clone(x);
+ assert_same_clone(y);
+}
+
+// When the user supplies --test we should implicitly supply --cfg test
+#[test]
+fn test_switch_implies_cfg_test() {
+ rustc_span::create_default_session_globals_then(|| {
+ let matches = optgroups().parse(&["--test".to_string()]).unwrap();
+ let (sess, cfg) = mk_session(matches);
+ let cfg = build_configuration(&sess, to_crate_config(cfg));
+ assert!(cfg.contains(&(sym::test, None)));
+ });
+}
+
+// When the user supplies --test and --cfg test, don't implicitly add another --cfg test
+#[test]
+fn test_switch_implies_cfg_test_unless_cfg_test() {
+ rustc_span::create_default_session_globals_then(|| {
+ let matches = optgroups().parse(&["--test".to_string(), "--cfg=test".to_string()]).unwrap();
+ let (sess, cfg) = mk_session(matches);
+ let cfg = build_configuration(&sess, to_crate_config(cfg));
+ let mut test_items = cfg.iter().filter(|&&(name, _)| name == sym::test);
+ assert!(test_items.next().is_some());
+ assert!(test_items.next().is_none());
+ });
+}
+
+#[test]
+fn test_can_print_warnings() {
+ rustc_span::create_default_session_globals_then(|| {
+ let matches = optgroups().parse(&["-Awarnings".to_string()]).unwrap();
+ let (sess, _) = mk_session(matches);
+ assert!(!sess.diagnostic().can_emit_warnings());
+ });
+
+ rustc_span::create_default_session_globals_then(|| {
+ let matches =
+ optgroups().parse(&["-Awarnings".to_string(), "-Dwarnings".to_string()]).unwrap();
+ let (sess, _) = mk_session(matches);
+ assert!(sess.diagnostic().can_emit_warnings());
+ });
+
+ rustc_span::create_default_session_globals_then(|| {
+ let matches = optgroups().parse(&["-Adead_code".to_string()]).unwrap();
+ let (sess, _) = mk_session(matches);
+ assert!(sess.diagnostic().can_emit_warnings());
+ });
+}
+
+#[test]
+fn test_output_types_tracking_hash_different_paths() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+ let mut v3 = Options::default();
+
+ v1.output_types = OutputTypes::new(&[(OutputType::Exe, Some(PathBuf::from("./some/thing")))]);
+ v2.output_types = OutputTypes::new(&[(OutputType::Exe, Some(PathBuf::from("/some/thing")))]);
+ v3.output_types = OutputTypes::new(&[(OutputType::Exe, None)]);
+
+ assert_non_crate_hash_different(&v1, &v2);
+ assert_non_crate_hash_different(&v1, &v3);
+ assert_non_crate_hash_different(&v2, &v3);
+}
+
+#[test]
+fn test_output_types_tracking_hash_different_construction_order() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+
+ v1.output_types = OutputTypes::new(&[
+ (OutputType::Exe, Some(PathBuf::from("./some/thing"))),
+ (OutputType::Bitcode, Some(PathBuf::from("./some/thing.bc"))),
+ ]);
+
+ v2.output_types = OutputTypes::new(&[
+ (OutputType::Bitcode, Some(PathBuf::from("./some/thing.bc"))),
+ (OutputType::Exe, Some(PathBuf::from("./some/thing"))),
+ ]);
+
+ assert_same_hash(&v1, &v2);
+}
+
+#[test]
+fn test_externs_tracking_hash_different_construction_order() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+ let mut v3 = Options::default();
+
+ v1.externs = Externs::new(mk_map(vec![
+ (String::from("a"), new_public_extern_entry(vec!["b", "c"])),
+ (String::from("d"), new_public_extern_entry(vec!["e", "f"])),
+ ]));
+
+ v2.externs = Externs::new(mk_map(vec![
+ (String::from("d"), new_public_extern_entry(vec!["e", "f"])),
+ (String::from("a"), new_public_extern_entry(vec!["b", "c"])),
+ ]));
+
+ v3.externs = Externs::new(mk_map(vec![
+ (String::from("a"), new_public_extern_entry(vec!["b", "c"])),
+ (String::from("d"), new_public_extern_entry(vec!["f", "e"])),
+ ]));
+
+ assert_same_hash(&v1, &v2);
+ assert_same_hash(&v1, &v3);
+ assert_same_hash(&v2, &v3);
+}
+
+#[test]
+fn test_lints_tracking_hash_different_values() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+ let mut v3 = Options::default();
+
+ v1.lint_opts = vec![
+ (String::from("a"), Level::Allow),
+ (String::from("b"), Level::Warn),
+ (String::from("c"), Level::Deny),
+ (String::from("d"), Level::Forbid),
+ ];
+
+ v2.lint_opts = vec![
+ (String::from("a"), Level::Allow),
+ (String::from("b"), Level::Warn),
+ (String::from("X"), Level::Deny),
+ (String::from("d"), Level::Forbid),
+ ];
+
+ v3.lint_opts = vec![
+ (String::from("a"), Level::Allow),
+ (String::from("b"), Level::Warn),
+ (String::from("c"), Level::Forbid),
+ (String::from("d"), Level::Deny),
+ ];
+
+ assert_non_crate_hash_different(&v1, &v2);
+ assert_non_crate_hash_different(&v1, &v3);
+ assert_non_crate_hash_different(&v2, &v3);
+}
+
+#[test]
+fn test_lints_tracking_hash_different_construction_order() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+
+ v1.lint_opts = vec![
+ (String::from("a"), Level::Allow),
+ (String::from("b"), Level::Warn),
+ (String::from("c"), Level::Deny),
+ (String::from("d"), Level::Forbid),
+ ];
+
+ v2.lint_opts = vec![
+ (String::from("a"), Level::Allow),
+ (String::from("c"), Level::Deny),
+ (String::from("b"), Level::Warn),
+ (String::from("d"), Level::Forbid),
+ ];
+
+ // The hash should be order-dependent
+ assert_non_crate_hash_different(&v1, &v2);
+}
+
+#[test]
+fn test_lint_cap_hash_different() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+ let v3 = Options::default();
+
+ v1.lint_cap = Some(Level::Forbid);
+ v2.lint_cap = Some(Level::Allow);
+
+ assert_non_crate_hash_different(&v1, &v2);
+ assert_non_crate_hash_different(&v1, &v3);
+ assert_non_crate_hash_different(&v2, &v3);
+}
+
+#[test]
+fn test_search_paths_tracking_hash_different_order() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+ let mut v3 = Options::default();
+ let mut v4 = Options::default();
+
+ const JSON: ErrorOutputType = ErrorOutputType::Json {
+ pretty: false,
+ json_rendered: HumanReadableErrorType::Default(ColorConfig::Never),
+ };
+
+ // Reference
+ v1.search_paths.push(SearchPath::from_cli_opt("native=abc", JSON));
+ v1.search_paths.push(SearchPath::from_cli_opt("crate=def", JSON));
+ v1.search_paths.push(SearchPath::from_cli_opt("dependency=ghi", JSON));
+ v1.search_paths.push(SearchPath::from_cli_opt("framework=jkl", JSON));
+ v1.search_paths.push(SearchPath::from_cli_opt("all=mno", JSON));
+
+ v2.search_paths.push(SearchPath::from_cli_opt("native=abc", JSON));
+ v2.search_paths.push(SearchPath::from_cli_opt("dependency=ghi", JSON));
+ v2.search_paths.push(SearchPath::from_cli_opt("crate=def", JSON));
+ v2.search_paths.push(SearchPath::from_cli_opt("framework=jkl", JSON));
+ v2.search_paths.push(SearchPath::from_cli_opt("all=mno", JSON));
+
+ v3.search_paths.push(SearchPath::from_cli_opt("crate=def", JSON));
+ v3.search_paths.push(SearchPath::from_cli_opt("framework=jkl", JSON));
+ v3.search_paths.push(SearchPath::from_cli_opt("native=abc", JSON));
+ v3.search_paths.push(SearchPath::from_cli_opt("dependency=ghi", JSON));
+ v3.search_paths.push(SearchPath::from_cli_opt("all=mno", JSON));
+
+ v4.search_paths.push(SearchPath::from_cli_opt("all=mno", JSON));
+ v4.search_paths.push(SearchPath::from_cli_opt("native=abc", JSON));
+ v4.search_paths.push(SearchPath::from_cli_opt("crate=def", JSON));
+ v4.search_paths.push(SearchPath::from_cli_opt("dependency=ghi", JSON));
+ v4.search_paths.push(SearchPath::from_cli_opt("framework=jkl", JSON));
+
+ assert_same_hash(&v1, &v2);
+ assert_same_hash(&v1, &v3);
+ assert_same_hash(&v1, &v4);
+}
+
+#[test]
+fn test_native_libs_tracking_hash_different_values() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+ let mut v3 = Options::default();
+ let mut v4 = Options::default();
+ let mut v5 = Options::default();
+
+ // Reference
+ v1.libs = vec![
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ ];
+
+ // Change label
+ v2.libs = vec![
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("X"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ ];
+
+ // Change kind
+ v3.libs = vec![
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ ];
+
+ // Change new-name
+ v4.libs = vec![
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: Some(String::from("X")),
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ ];
+
+ // Change verbatim
+ v5.libs = vec![
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: Some(true),
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ ];
+
+ assert_different_hash(&v1, &v2);
+ assert_different_hash(&v1, &v3);
+ assert_different_hash(&v1, &v4);
+ assert_different_hash(&v1, &v5);
+}
+
+#[test]
+fn test_native_libs_tracking_hash_different_order() {
+ let mut v1 = Options::default();
+ let mut v2 = Options::default();
+ let mut v3 = Options::default();
+
+ // Reference
+ v1.libs = vec![
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ ];
+
+ v2.libs = vec![
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ ];
+
+ v3.libs = vec![
+ NativeLib {
+ name: String::from("c"),
+ new_name: None,
+ kind: NativeLibKind::Unspecified,
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("a"),
+ new_name: None,
+ kind: NativeLibKind::Static { bundle: None, whole_archive: None },
+ verbatim: None,
+ },
+ NativeLib {
+ name: String::from("b"),
+ new_name: None,
+ kind: NativeLibKind::Framework { as_needed: None },
+ verbatim: None,
+ },
+ ];
+
+ // The hash should be order-dependent
+ assert_different_hash(&v1, &v2);
+ assert_different_hash(&v1, &v3);
+ assert_different_hash(&v2, &v3);
+}
+
+#[test]
+fn test_codegen_options_tracking_hash() {
+ let reference = Options::default();
+ let mut opts = Options::default();
+
+ macro_rules! untracked {
+ ($name: ident, $non_default_value: expr) => {
+ assert_ne!(opts.cg.$name, $non_default_value);
+ opts.cg.$name = $non_default_value;
+ assert_same_hash(&reference, &opts);
+ };
+ }
+
+ // Make sure that changing an [UNTRACKED] option leaves the hash unchanged.
+ // This list is in alphabetical order.
+ untracked!(ar, String::from("abc"));
+ untracked!(codegen_units, Some(42));
+ untracked!(default_linker_libraries, true);
+ untracked!(extra_filename, String::from("extra-filename"));
+ untracked!(incremental, Some(String::from("abc")));
+ // `link_arg` is omitted because it just forwards to `link_args`.
+ untracked!(link_args, vec![String::from("abc"), String::from("def")]);
+ untracked!(link_self_contained, Some(true));
+ untracked!(linker, Some(PathBuf::from("linker")));
+ untracked!(linker_flavor, Some(LinkerFlavor::Gcc));
+ untracked!(no_stack_check, true);
+ untracked!(remark, Passes::Some(vec![String::from("pass1"), String::from("pass2")]));
+ untracked!(rpath, true);
+ untracked!(save_temps, true);
+ untracked!(strip, Strip::Debuginfo);
+
+ macro_rules! tracked {
+ ($name: ident, $non_default_value: expr) => {
+ opts = reference.clone();
+ assert_ne!(opts.cg.$name, $non_default_value);
+ opts.cg.$name = $non_default_value;
+ assert_different_hash(&reference, &opts);
+ };
+ }
+
+ // Make sure that changing a [TRACKED] option changes the hash.
+ // This list is in alphabetical order.
+ tracked!(code_model, Some(CodeModel::Large));
+ tracked!(control_flow_guard, CFGuard::Checks);
+ tracked!(debug_assertions, Some(true));
+ tracked!(debuginfo, 0xdeadbeef);
+ tracked!(embed_bitcode, false);
+ tracked!(force_frame_pointers, Some(false));
+ tracked!(force_unwind_tables, Some(true));
+ tracked!(inline_threshold, Some(0xf007ba11));
+ tracked!(instrument_coverage, Some(InstrumentCoverage::All));
+ tracked!(linker_plugin_lto, LinkerPluginLto::LinkerPluginAuto);
+ tracked!(link_dead_code, Some(true));
+ tracked!(llvm_args, vec![String::from("1"), String::from("2")]);
+ tracked!(lto, LtoCli::Fat);
+ tracked!(metadata, vec![String::from("A"), String::from("B")]);
+ tracked!(no_prepopulate_passes, true);
+ tracked!(no_redzone, Some(true));
+ tracked!(no_vectorize_loops, true);
+ tracked!(no_vectorize_slp, true);
+ tracked!(opt_level, "3".to_string());
+ tracked!(overflow_checks, Some(true));
+ tracked!(panic, Some(PanicStrategy::Abort));
+ tracked!(passes, vec![String::from("1"), String::from("2")]);
+ tracked!(prefer_dynamic, true);
+ tracked!(profile_generate, SwitchWithOptPath::Enabled(None));
+ tracked!(profile_use, Some(PathBuf::from("abc")));
+ tracked!(relocation_model, Some(RelocModel::Pic));
+ tracked!(soft_float, true);
+ tracked!(split_debuginfo, Some(SplitDebuginfo::Packed));
+ tracked!(symbol_mangling_version, Some(SymbolManglingVersion::V0));
+ tracked!(target_cpu, Some(String::from("abc")));
+ tracked!(target_feature, String::from("all the features, all of them"));
+}
+
+#[test]
+fn test_top_level_options_tracked_no_crate() {
+ let reference = Options::default();
+ let mut opts;
+
+ macro_rules! tracked {
+ ($name: ident, $non_default_value: expr) => {
+ opts = reference.clone();
+ assert_ne!(opts.$name, $non_default_value);
+ opts.$name = $non_default_value;
+ // The crate hash should be the same
+ assert_eq!(reference.dep_tracking_hash(true), opts.dep_tracking_hash(true));
+ // The incremental hash should be different
+ assert_ne!(reference.dep_tracking_hash(false), opts.dep_tracking_hash(false));
+ };
+ }
+
+ // Make sure that changing a [TRACKED_NO_CRATE_HASH] option leaves the crate hash unchanged but changes the incremental hash.
+ // This list is in alphabetical order.
+ tracked!(remap_path_prefix, vec![("/home/bors/rust".into(), "src".into())]);
+ tracked!(
+ real_rust_source_base_dir,
+ Some("/home/bors/rust/.rustup/toolchains/nightly/lib/rustlib/src/rust".into())
+ );
+}
+
+#[test]
+fn test_unstable_options_tracking_hash() {
+ let reference = Options::default();
+ let mut opts = Options::default();
+
+ macro_rules! untracked {
+ ($name: ident, $non_default_value: expr) => {
+ assert_ne!(opts.unstable_opts.$name, $non_default_value);
+ opts.unstable_opts.$name = $non_default_value;
+ assert_same_hash(&reference, &opts);
+ };
+ }
+
+ // Make sure that changing an [UNTRACKED] option leaves the hash unchanged.
+ // This list is in alphabetical order.
+ untracked!(assert_incr_state, Some(String::from("loaded")));
+ untracked!(deduplicate_diagnostics, false);
+ untracked!(dep_tasks, true);
+ untracked!(dlltool, Some(PathBuf::from("custom_dlltool.exe")));
+ untracked!(dont_buffer_diagnostics, true);
+ untracked!(dump_dep_graph, true);
+ untracked!(dump_drop_tracking_cfg, Some("cfg.dot".to_string()));
+ untracked!(dump_mir, Some(String::from("abc")));
+ untracked!(dump_mir_dataflow, true);
+ untracked!(dump_mir_dir, String::from("abc"));
+ untracked!(dump_mir_exclude_pass_number, true);
+ untracked!(dump_mir_graphviz, true);
+ untracked!(emit_stack_sizes, true);
+ untracked!(future_incompat_test, true);
+ untracked!(hir_stats, true);
+ untracked!(identify_regions, true);
+ untracked!(incremental_ignore_spans, true);
+ untracked!(incremental_info, true);
+ untracked!(incremental_verify_ich, true);
+ untracked!(input_stats, true);
+ untracked!(keep_hygiene_data, true);
+ untracked!(link_native_libraries, false);
+ untracked!(llvm_time_trace, true);
+ untracked!(ls, true);
+ untracked!(macro_backtrace, true);
+ untracked!(meta_stats, true);
+ untracked!(mir_pretty_relative_line_numbers, true);
+ untracked!(nll_facts, true);
+ untracked!(no_analysis, true);
+ untracked!(no_interleave_lints, true);
+ untracked!(no_leak_check, true);
+ untracked!(no_parallel_llvm, true);
+ untracked!(parse_only, true);
+ untracked!(perf_stats, true);
+ // `pre_link_arg` is omitted because it just forwards to `pre_link_args`.
+ untracked!(pre_link_args, vec![String::from("abc"), String::from("def")]);
+ untracked!(profile_closures, true);
+ untracked!(print_llvm_passes, true);
+ untracked!(print_mono_items, Some(String::from("abc")));
+ untracked!(print_type_sizes, true);
+ untracked!(proc_macro_backtrace, true);
+ untracked!(proc_macro_execution_strategy, ProcMacroExecutionStrategy::CrossThread);
+ untracked!(query_dep_graph, true);
+ untracked!(save_analysis, true);
+ untracked!(self_profile, SwitchWithOptPath::Enabled(None));
+ untracked!(self_profile_events, Some(vec![String::new()]));
+ untracked!(span_debug, true);
+ untracked!(span_free_formats, true);
+ untracked!(temps_dir, Some(String::from("abc")));
+ untracked!(threads, 99);
+ untracked!(time, true);
+ untracked!(time_llvm_passes, true);
+ untracked!(time_passes, true);
+ untracked!(trace_macros, true);
+ untracked!(trim_diagnostic_paths, false);
+ untracked!(ui_testing, true);
+ untracked!(unpretty, Some("expanded".to_string()));
+ untracked!(unstable_options, true);
+ untracked!(validate_mir, true);
+ untracked!(verbose, true);
+
+ macro_rules! tracked {
+ ($name: ident, $non_default_value: expr) => {
+ opts = reference.clone();
+ assert_ne!(opts.unstable_opts.$name, $non_default_value);
+ opts.unstable_opts.$name = $non_default_value;
+ assert_different_hash(&reference, &opts);
+ };
+ }
+
+ // Make sure that changing a [TRACKED] option changes the hash.
+ // This list is in alphabetical order.
+ tracked!(allow_features, Some(vec![String::from("lang_items")]));
+ tracked!(always_encode_mir, true);
+ tracked!(asm_comments, true);
+ tracked!(assume_incomplete_release, true);
+ tracked!(binary_dep_depinfo, true);
+ tracked!(box_noalias, Some(false));
+ tracked!(
+ branch_protection,
+ Some(BranchProtection {
+ bti: true,
+ pac_ret: Some(PacRet { leaf: true, key: PAuthKey::B })
+ })
+ );
+ tracked!(chalk, true);
+ tracked!(codegen_backend, Some("abc".to_string()));
+ tracked!(crate_attr, vec!["abc".to_string()]);
+ tracked!(debug_info_for_profiling, true);
+ tracked!(debug_macros, true);
+ tracked!(dep_info_omit_d_target, true);
+ tracked!(drop_tracking, true);
+ tracked!(export_executable_symbols, true);
+ tracked!(dual_proc_macros, true);
+ tracked!(dwarf_version, Some(5));
+ tracked!(emit_thin_lto, false);
+ tracked!(fewer_names, Some(true));
+ tracked!(force_unstable_if_unmarked, true);
+ tracked!(fuel, Some(("abc".to_string(), 99)));
+ tracked!(function_sections, Some(false));
+ tracked!(human_readable_cgu_names, true);
+ tracked!(inline_in_all_cgus, Some(true));
+ tracked!(inline_mir, Some(true));
+ tracked!(inline_mir_hint_threshold, Some(123));
+ tracked!(inline_mir_threshold, Some(123));
+ tracked!(instrument_coverage, Some(InstrumentCoverage::All));
+ tracked!(instrument_mcount, true);
+ tracked!(link_only, true);
+ tracked!(llvm_plugins, vec![String::from("plugin_name")]);
+ tracked!(location_detail, LocationDetail { file: true, line: false, column: false });
+ tracked!(merge_functions, Some(MergeFunctions::Disabled));
+ tracked!(mir_emit_retag, true);
+ tracked!(mir_enable_passes, vec![("DestProp".to_string(), false)]);
+ tracked!(mir_opt_level, Some(4));
+ tracked!(move_size_limit, Some(4096));
+ tracked!(mutable_noalias, Some(true));
+ tracked!(new_llvm_pass_manager, Some(true));
+ tracked!(no_generate_arange_section, true);
+ tracked!(no_link, true);
+ tracked!(no_unique_section_names, true);
+ tracked!(no_profiler_runtime, true);
+ tracked!(oom, OomStrategy::Panic);
+ tracked!(osx_rpath_install_name, true);
+ tracked!(panic_abort_tests, true);
+ tracked!(panic_in_drop, PanicStrategy::Abort);
+ tracked!(pick_stable_methods_before_any_unstable, false);
+ tracked!(plt, Some(true));
+ tracked!(polonius, true);
+ tracked!(precise_enum_drop_elaboration, false);
+ tracked!(print_fuel, Some("abc".to_string()));
+ tracked!(profile, true);
+ tracked!(profile_emit, Some(PathBuf::from("abc")));
+ tracked!(profiler_runtime, "abc".to_string());
+ tracked!(profile_sample_use, Some(PathBuf::from("abc")));
+ tracked!(relax_elf_relocations, Some(true));
+ tracked!(relro_level, Some(RelroLevel::Full));
+ tracked!(remap_cwd_prefix, Some(PathBuf::from("abc")));
+ tracked!(report_delayed_bugs, true);
+ tracked!(sanitizer, SanitizerSet::ADDRESS);
+ tracked!(sanitizer_memory_track_origins, 2);
+ tracked!(sanitizer_recover, SanitizerSet::ADDRESS);
+ tracked!(saturating_float_casts, Some(true));
+ tracked!(share_generics, Some(true));
+ tracked!(show_span, Some(String::from("abc")));
+ tracked!(simulate_remapped_rust_src_base, Some(PathBuf::from("/rustc/abc")));
+ tracked!(src_hash_algorithm, Some(SourceFileHashAlgorithm::Sha1));
+ tracked!(stack_protector, StackProtector::All);
+ tracked!(symbol_mangling_version, Some(SymbolManglingVersion::V0));
+ tracked!(teach, true);
+ tracked!(thinlto, Some(true));
+ tracked!(thir_unsafeck, true);
+ tracked!(tls_model, Some(TlsModel::GeneralDynamic));
+ tracked!(translate_remapped_path_to_local_path, false);
+ tracked!(trap_unreachable, Some(false));
+ tracked!(treat_err_as_bug, NonZeroUsize::new(1));
+ tracked!(tune_cpu, Some(String::from("abc")));
+ tracked!(uninit_const_chunk_threshold, 123);
+ tracked!(unleash_the_miri_inside_of_you, true);
+ tracked!(use_ctors_section, Some(true));
+ tracked!(verify_llvm_ir, true);
+ tracked!(virtual_function_elimination, true);
+ tracked!(wasi_exec_model, Some(WasiExecModel::Reactor));
+
+ macro_rules! tracked_no_crate_hash {
+ ($name: ident, $non_default_value: expr) => {
+ opts = reference.clone();
+ assert_ne!(opts.unstable_opts.$name, $non_default_value);
+ opts.unstable_opts.$name = $non_default_value;
+ assert_non_crate_hash_different(&reference, &opts);
+ };
+ }
+ tracked_no_crate_hash!(no_codegen, true);
+}
+
+#[test]
+fn test_edition_parsing() {
+ // test default edition
+ let options = Options::default();
+ assert!(options.edition == DEFAULT_EDITION);
+
+ let matches = optgroups().parse(&["--edition=2018".to_string()]).unwrap();
+ let (sessopts, _) = build_session_options_and_crate_config(matches);
+ assert!(sessopts.edition == Edition::Edition2018)
+}
diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs
new file mode 100644
index 000000000..5e5596f13
--- /dev/null
+++ b/compiler/rustc_interface/src/util.rs
@@ -0,0 +1,672 @@
+use libloading::Library;
+use rustc_ast as ast;
+use rustc_codegen_ssa::traits::CodegenBackend;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+#[cfg(parallel_compiler)]
+use rustc_data_structures::jobserver;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::registry::Registry;
+#[cfg(parallel_compiler)]
+use rustc_middle::ty::tls;
+use rustc_parse::validate_attr;
+#[cfg(parallel_compiler)]
+use rustc_query_impl::{QueryContext, QueryCtxt};
+use rustc_session as session;
+use rustc_session::config::CheckCfg;
+use rustc_session::config::{self, CrateType};
+use rustc_session::config::{ErrorOutputType, Input, OutputFilenames};
+use rustc_session::lint::{self, BuiltinLintDiagnostics, LintBuffer};
+use rustc_session::parse::CrateConfig;
+use rustc_session::{early_error, filesearch, output, DiagnosticOutput, Session};
+use rustc_span::edition::Edition;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::source_map::FileLoader;
+use rustc_span::symbol::{sym, Symbol};
+use std::env;
+use std::env::consts::{DLL_PREFIX, DLL_SUFFIX};
+use std::mem;
+#[cfg(not(parallel_compiler))]
+use std::panic;
+use std::path::{Path, PathBuf};
+use std::sync::atomic::{AtomicBool, Ordering};
+use std::sync::OnceLock;
+use std::thread;
+use tracing::info;
+
+/// Function pointer type that constructs a new CodegenBackend.
+pub type MakeBackendFn = fn() -> Box<dyn CodegenBackend>;
+
+/// Adds `target_feature = "..."` cfgs for a variety of platform
+/// specific features (SSE, NEON etc.).
+///
+/// This is performed by checking whether a set of permitted features
+/// is available on the target machine, by querying LLVM.
+pub fn add_configuration(
+ cfg: &mut CrateConfig,
+ sess: &mut Session,
+ codegen_backend: &dyn CodegenBackend,
+) {
+ let tf = sym::target_feature;
+
+ let unstable_target_features = codegen_backend.target_features(sess, true);
+ sess.unstable_target_features.extend(unstable_target_features.iter().cloned());
+
+ let target_features = codegen_backend.target_features(sess, false);
+ sess.target_features.extend(target_features.iter().cloned());
+
+ cfg.extend(target_features.into_iter().map(|feat| (tf, Some(feat))));
+
+ if sess.crt_static(None) {
+ cfg.insert((tf, Some(sym::crt_dash_static)));
+ }
+}
+
+pub fn create_session(
+ sopts: config::Options,
+ cfg: FxHashSet<(String, Option<String>)>,
+ check_cfg: CheckCfg,
+ diagnostic_output: DiagnosticOutput,
+ file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>,
+ input_path: Option<PathBuf>,
+ lint_caps: FxHashMap<lint::LintId, lint::Level>,
+ make_codegen_backend: Option<
+ Box<dyn FnOnce(&config::Options) -> Box<dyn CodegenBackend> + Send>,
+ >,
+ descriptions: Registry,
+) -> (Lrc<Session>, Lrc<Box<dyn CodegenBackend>>) {
+ let codegen_backend = if let Some(make_codegen_backend) = make_codegen_backend {
+ make_codegen_backend(&sopts)
+ } else {
+ get_codegen_backend(
+ &sopts.maybe_sysroot,
+ sopts.unstable_opts.codegen_backend.as_ref().map(|name| &name[..]),
+ )
+ };
+
+ // target_override is documented to be called before init(), so this is okay
+ let target_override = codegen_backend.target_override(&sopts);
+
+ let bundle = match rustc_errors::fluent_bundle(
+ sopts.maybe_sysroot.clone(),
+ sysroot_candidates(),
+ sopts.unstable_opts.translate_lang.clone(),
+ sopts.unstable_opts.translate_additional_ftl.as_deref(),
+ sopts.unstable_opts.translate_directionality_markers,
+ ) {
+ Ok(bundle) => bundle,
+ Err(e) => {
+ early_error(sopts.error_format, &format!("failed to load fluent bundle: {e}"));
+ }
+ };
+
+ let mut sess = session::build_session(
+ sopts,
+ input_path,
+ bundle,
+ descriptions,
+ diagnostic_output,
+ lint_caps,
+ file_loader,
+ target_override,
+ );
+
+ codegen_backend.init(&sess);
+
+ let mut cfg = config::build_configuration(&sess, config::to_crate_config(cfg));
+ add_configuration(&mut cfg, &mut sess, &*codegen_backend);
+
+ let mut check_cfg = config::to_crate_check_config(check_cfg);
+ check_cfg.fill_well_known();
+
+ sess.parse_sess.config = cfg;
+ sess.parse_sess.check_config = check_cfg;
+
+ (Lrc::new(sess), Lrc::new(codegen_backend))
+}
+
+const STACK_SIZE: usize = 8 * 1024 * 1024;
+
+fn get_stack_size() -> Option<usize> {
+ // FIXME: Hacks on hacks. If the env is trying to override the stack size
+ // then *don't* set it explicitly.
+ env::var_os("RUST_MIN_STACK").is_none().then_some(STACK_SIZE)
+}
+
+/// Like a `thread::Builder::spawn` followed by a `join()`, but avoids the need
+/// for `'static` bounds.
+#[cfg(not(parallel_compiler))]
+fn scoped_thread<F: FnOnce() -> R + Send, R: Send>(cfg: thread::Builder, f: F) -> R {
+ // SAFETY: join() is called immediately, so any closure captures are still
+ // alive.
+ match unsafe { cfg.spawn_unchecked(f) }.unwrap().join() {
+ Ok(v) => v,
+ Err(e) => panic::resume_unwind(e),
+ }
+}
+
+#[cfg(not(parallel_compiler))]
+pub fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
+ edition: Edition,
+ _threads: usize,
+ f: F,
+) -> R {
+ let mut cfg = thread::Builder::new().name("rustc".to_string());
+
+ if let Some(size) = get_stack_size() {
+ cfg = cfg.stack_size(size);
+ }
+
+ let main_handler = move || rustc_span::create_session_globals_then(edition, f);
+
+ scoped_thread(cfg, main_handler)
+}
+
+/// Creates a new thread and forwards information in thread locals to it.
+/// The new thread runs the deadlock handler.
+/// Must only be called when a deadlock is about to happen.
+#[cfg(parallel_compiler)]
+unsafe fn handle_deadlock() {
+ let registry = rustc_rayon_core::Registry::current();
+
+ let query_map = tls::with(|tcx| {
+ QueryCtxt::from_tcx(tcx)
+ .try_collect_active_jobs()
+ .expect("active jobs shouldn't be locked in deadlock handler")
+ });
+ thread::spawn(move || rustc_query_impl::deadlock(query_map, &registry));
+}
+
+#[cfg(parallel_compiler)]
+pub fn run_in_thread_pool_with_globals<F: FnOnce() -> R + Send, R: Send>(
+ edition: Edition,
+ threads: usize,
+ f: F,
+) -> R {
+ let mut config = rayon::ThreadPoolBuilder::new()
+ .thread_name(|_| "rustc".to_string())
+ .acquire_thread_handler(jobserver::acquire_thread)
+ .release_thread_handler(jobserver::release_thread)
+ .num_threads(threads)
+ .deadlock_handler(|| unsafe { handle_deadlock() });
+
+ if let Some(size) = get_stack_size() {
+ config = config.stack_size(size);
+ }
+
+ let with_pool = move |pool: &rayon::ThreadPool| pool.install(f);
+
+ rustc_span::create_session_globals_then(edition, || {
+ rustc_span::with_session_globals(|session_globals| {
+ // The main handler runs for each Rayon worker thread and sets up
+ // the thread local rustc uses. `session_globals` is captured and set
+ // on the new threads.
+ let main_handler = move |thread: rayon::ThreadBuilder| {
+ rustc_span::set_session_globals_then(session_globals, || thread.run())
+ };
+
+ config.build_scoped(main_handler, with_pool).unwrap()
+ })
+ })
+}
+
+fn load_backend_from_dylib(path: &Path) -> MakeBackendFn {
+ let lib = unsafe { Library::new(path) }.unwrap_or_else(|err| {
+ let err = format!("couldn't load codegen backend {:?}: {}", path, err);
+ early_error(ErrorOutputType::default(), &err);
+ });
+
+ let backend_sym = unsafe { lib.get::<MakeBackendFn>(b"__rustc_codegen_backend") }
+ .unwrap_or_else(|e| {
+ let err = format!("couldn't load codegen backend: {}", e);
+ early_error(ErrorOutputType::default(), &err);
+ });
+
+ // Intentionally leak the dynamic library. We can't ever unload it
+ // since the library can make things that will live arbitrarily long.
+ let backend_sym = unsafe { backend_sym.into_raw() };
+ mem::forget(lib);
+
+ *backend_sym
+}
+
+/// Get the codegen backend based on the name and specified sysroot.
+///
+/// A name of `None` indicates that the default backend should be used.
+pub fn get_codegen_backend(
+ maybe_sysroot: &Option<PathBuf>,
+ backend_name: Option<&str>,
+) -> Box<dyn CodegenBackend> {
+ static LOAD: OnceLock<unsafe fn() -> Box<dyn CodegenBackend>> = OnceLock::new();
+
+ let load = LOAD.get_or_init(|| {
+ let default_codegen_backend = option_env!("CFG_DEFAULT_CODEGEN_BACKEND").unwrap_or("llvm");
+
+ match backend_name.unwrap_or(default_codegen_backend) {
+ filename if filename.contains('.') => load_backend_from_dylib(filename.as_ref()),
+ #[cfg(feature = "llvm")]
+ "llvm" => rustc_codegen_llvm::LlvmCodegenBackend::new,
+ backend_name => get_codegen_sysroot(maybe_sysroot, backend_name),
+ }
+ });
+
+ // SAFETY: In case of a builtin codegen backend this is safe. In case of an external codegen
+ // backend we hope that the backend links against the same rustc_driver version. If this is not
+ // the case, we get UB.
+ unsafe { load() }
+}
+
+// This is used for rustdoc, but it uses similar machinery to codegen backend
+// loading, so we leave the code here. It is potentially useful for other tools
+// that want to invoke the rustc binary while linking to rustc as well.
+pub fn rustc_path<'a>() -> Option<&'a Path> {
+ static RUSTC_PATH: OnceLock<Option<PathBuf>> = OnceLock::new();
+
+ const BIN_PATH: &str = env!("RUSTC_INSTALL_BINDIR");
+
+ RUSTC_PATH.get_or_init(|| get_rustc_path_inner(BIN_PATH)).as_ref().map(|v| &**v)
+}
+
+fn get_rustc_path_inner(bin_path: &str) -> Option<PathBuf> {
+ sysroot_candidates().iter().find_map(|sysroot| {
+ let candidate = sysroot.join(bin_path).join(if cfg!(target_os = "windows") {
+ "rustc.exe"
+ } else {
+ "rustc"
+ });
+ candidate.exists().then_some(candidate)
+ })
+}
+
+fn sysroot_candidates() -> Vec<PathBuf> {
+ let target = session::config::host_triple();
+ let mut sysroot_candidates = vec![filesearch::get_or_default_sysroot()];
+ let path = current_dll_path().and_then(|s| s.canonicalize().ok());
+ if let Some(dll) = path {
+ // use `parent` twice to chop off the file name and then also the
+ // directory containing the dll which should be either `lib` or `bin`.
+ if let Some(path) = dll.parent().and_then(|p| p.parent()) {
+ // The original `path` pointed at the `rustc_driver` crate's dll.
+ // Now that dll should only be in one of two locations. The first is
+ // in the compiler's libdir, for example `$sysroot/lib/*.dll`. The
+ // other is the target's libdir, for example
+ // `$sysroot/lib/rustlib/$target/lib/*.dll`.
+ //
+ // We don't know which, so let's assume that if our `path` above
+ // ends in `$target` we *could* be in the target libdir, and always
+ // assume that we may be in the main libdir.
+ sysroot_candidates.push(path.to_owned());
+
+ if path.ends_with(target) {
+ sysroot_candidates.extend(
+ path.parent() // chop off `$target`
+ .and_then(|p| p.parent()) // chop off `rustlib`
+ .and_then(|p| p.parent()) // chop off `lib`
+ .map(|s| s.to_owned()),
+ );
+ }
+ }
+ }
+
+ return sysroot_candidates;
+
+ #[cfg(unix)]
+ fn current_dll_path() -> Option<PathBuf> {
+ use std::ffi::{CStr, OsStr};
+ use std::os::unix::prelude::*;
+
+ unsafe {
+ let addr = current_dll_path as usize as *mut _;
+ let mut info = mem::zeroed();
+ if libc::dladdr(addr, &mut info) == 0 {
+ info!("dladdr failed");
+ return None;
+ }
+ if info.dli_fname.is_null() {
+ info!("dladdr returned null pointer");
+ return None;
+ }
+ let bytes = CStr::from_ptr(info.dli_fname).to_bytes();
+ let os = OsStr::from_bytes(bytes);
+ Some(PathBuf::from(os))
+ }
+ }
+
+ #[cfg(windows)]
+ fn current_dll_path() -> Option<PathBuf> {
+ use std::ffi::OsString;
+ use std::io;
+ use std::os::windows::prelude::*;
+ use std::ptr;
+
+ use winapi::um::libloaderapi::{
+ GetModuleFileNameW, GetModuleHandleExW, GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
+ };
+
+ unsafe {
+ let mut module = ptr::null_mut();
+ let r = GetModuleHandleExW(
+ GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS,
+ current_dll_path as usize as *mut _,
+ &mut module,
+ );
+ if r == 0 {
+ info!("GetModuleHandleExW failed: {}", io::Error::last_os_error());
+ return None;
+ }
+ let mut space = Vec::with_capacity(1024);
+ let r = GetModuleFileNameW(module, space.as_mut_ptr(), space.capacity() as u32);
+ if r == 0 {
+ info!("GetModuleFileNameW failed: {}", io::Error::last_os_error());
+ return None;
+ }
+ let r = r as usize;
+ if r >= space.capacity() {
+ info!("our buffer was too small? {}", io::Error::last_os_error());
+ return None;
+ }
+ space.set_len(r);
+ let os = OsString::from_wide(&space);
+ Some(PathBuf::from(os))
+ }
+ }
+}
+
+fn get_codegen_sysroot(maybe_sysroot: &Option<PathBuf>, backend_name: &str) -> MakeBackendFn {
+ // For now we only allow this function to be called once as it'll dlopen a
+ // few things, which seems to work best if we only do that once. In
+ // general this assertion never trips due to the once guard in `get_codegen_backend`,
+ // but there's a few manual calls to this function in this file we protect
+ // against.
+ static LOADED: AtomicBool = AtomicBool::new(false);
+ assert!(
+ !LOADED.fetch_or(true, Ordering::SeqCst),
+ "cannot load the default codegen backend twice"
+ );
+
+ let target = session::config::host_triple();
+ let sysroot_candidates = sysroot_candidates();
+
+ let sysroot = maybe_sysroot
+ .iter()
+ .chain(sysroot_candidates.iter())
+ .map(|sysroot| {
+ filesearch::make_target_lib_path(sysroot, target).with_file_name("codegen-backends")
+ })
+ .find(|f| {
+ info!("codegen backend candidate: {}", f.display());
+ f.exists()
+ });
+ let sysroot = sysroot.unwrap_or_else(|| {
+ let candidates = sysroot_candidates
+ .iter()
+ .map(|p| p.display().to_string())
+ .collect::<Vec<_>>()
+ .join("\n* ");
+ let err = format!(
+ "failed to find a `codegen-backends` folder \
+ in the sysroot candidates:\n* {}",
+ candidates
+ );
+ early_error(ErrorOutputType::default(), &err);
+ });
+ info!("probing {} for a codegen backend", sysroot.display());
+
+ let d = sysroot.read_dir().unwrap_or_else(|e| {
+ let err = format!(
+ "failed to load default codegen backend, couldn't \
+ read `{}`: {}",
+ sysroot.display(),
+ e
+ );
+ early_error(ErrorOutputType::default(), &err);
+ });
+
+ let mut file: Option<PathBuf> = None;
+
+ let expected_names = &[
+ format!("rustc_codegen_{}-{}", backend_name, release_str().expect("CFG_RELEASE")),
+ format!("rustc_codegen_{}", backend_name),
+ ];
+ for entry in d.filter_map(|e| e.ok()) {
+ let path = entry.path();
+ let Some(filename) = path.file_name().and_then(|s| s.to_str()) else { continue };
+ if !(filename.starts_with(DLL_PREFIX) && filename.ends_with(DLL_SUFFIX)) {
+ continue;
+ }
+ let name = &filename[DLL_PREFIX.len()..filename.len() - DLL_SUFFIX.len()];
+ if !expected_names.iter().any(|expected| expected == name) {
+ continue;
+ }
+ if let Some(ref prev) = file {
+ let err = format!(
+ "duplicate codegen backends found\n\
+ first: {}\n\
+ second: {}\n\
+ ",
+ prev.display(),
+ path.display()
+ );
+ early_error(ErrorOutputType::default(), &err);
+ }
+ file = Some(path.clone());
+ }
+
+ match file {
+ Some(ref s) => load_backend_from_dylib(s),
+ None => {
+ let err = format!("unsupported builtin codegen backend `{}`", backend_name);
+ early_error(ErrorOutputType::default(), &err);
+ }
+ }
+}
+
+pub(crate) fn check_attr_crate_type(
+ sess: &Session,
+ attrs: &[ast::Attribute],
+ lint_buffer: &mut LintBuffer,
+) {
+ // Unconditionally collect crate types from attributes to make them used
+ for a in attrs.iter() {
+ if a.has_name(sym::crate_type) {
+ if let Some(n) = a.value_str() {
+ if categorize_crate_type(n).is_some() {
+ return;
+ }
+
+ if let ast::MetaItemKind::NameValue(spanned) = a.meta_kind().unwrap() {
+ let span = spanned.span;
+ let lev_candidate = find_best_match_for_name(
+ &CRATE_TYPES.iter().map(|(k, _)| *k).collect::<Vec<_>>(),
+ n,
+ None,
+ );
+ if let Some(candidate) = lev_candidate {
+ lint_buffer.buffer_lint_with_diagnostic(
+ lint::builtin::UNKNOWN_CRATE_TYPES,
+ ast::CRATE_NODE_ID,
+ span,
+ "invalid `crate_type` value",
+ BuiltinLintDiagnostics::UnknownCrateTypes(
+ span,
+ "did you mean".to_string(),
+ format!("\"{}\"", candidate),
+ ),
+ );
+ } else {
+ lint_buffer.buffer_lint(
+ lint::builtin::UNKNOWN_CRATE_TYPES,
+ ast::CRATE_NODE_ID,
+ span,
+ "invalid `crate_type` value",
+ );
+ }
+ }
+ } else {
+ // This is here mainly to check for using a macro, such as
+ // #![crate_type = foo!()]. That is not supported since the
+ // crate type needs to be known very early in compilation long
+ // before expansion. Otherwise, validation would normally be
+ // caught in AstValidator (via `check_builtin_attribute`), but
+ // by the time that runs the macro is expanded, and it doesn't
+ // give an error.
+ validate_attr::emit_fatal_malformed_builtin_attribute(
+ &sess.parse_sess,
+ a,
+ sym::crate_type,
+ );
+ }
+ }
+ }
+}
+
+const CRATE_TYPES: &[(Symbol, CrateType)] = &[
+ (sym::rlib, CrateType::Rlib),
+ (sym::dylib, CrateType::Dylib),
+ (sym::cdylib, CrateType::Cdylib),
+ (sym::lib, config::default_lib_output()),
+ (sym::staticlib, CrateType::Staticlib),
+ (sym::proc_dash_macro, CrateType::ProcMacro),
+ (sym::bin, CrateType::Executable),
+];
+
+fn categorize_crate_type(s: Symbol) -> Option<CrateType> {
+ Some(CRATE_TYPES.iter().find(|(key, _)| *key == s)?.1)
+}
+
+pub fn collect_crate_types(session: &Session, attrs: &[ast::Attribute]) -> Vec<CrateType> {
+ // Unconditionally collect crate types from attributes to make them used
+ let attr_types: Vec<CrateType> = attrs
+ .iter()
+ .filter_map(|a| {
+ if a.has_name(sym::crate_type) {
+ match a.value_str() {
+ Some(s) => categorize_crate_type(s),
+ _ => None,
+ }
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ // If we're generating a test executable, then ignore all other output
+ // styles at all other locations
+ if session.opts.test {
+ return vec![CrateType::Executable];
+ }
+
+ // Only check command line flags if present. If no types are specified by
+ // command line, then reuse the empty `base` Vec to hold the types that
+ // will be found in crate attributes.
+ // JUSTIFICATION: before wrapper fn is available
+ #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ let mut base = session.opts.crate_types.clone();
+ if base.is_empty() {
+ base.extend(attr_types);
+ if base.is_empty() {
+ base.push(output::default_output_for_target(session));
+ } else {
+ base.sort();
+ base.dedup();
+ }
+ }
+
+ base.retain(|crate_type| {
+ let res = !output::invalid_output_for_target(session, *crate_type);
+
+ if !res {
+ session.warn(&format!(
+ "dropping unsupported crate type `{}` for target `{}`",
+ *crate_type, session.opts.target_triple
+ ));
+ }
+
+ res
+ });
+
+ base
+}
+
+pub fn build_output_filenames(
+ input: &Input,
+ odir: &Option<PathBuf>,
+ ofile: &Option<PathBuf>,
+ temps_dir: &Option<PathBuf>,
+ attrs: &[ast::Attribute],
+ sess: &Session,
+) -> OutputFilenames {
+ match *ofile {
+ None => {
+ // "-" as input file will cause the parser to read from stdin so we
+ // have to make up a name
+ // We want to toss everything after the final '.'
+ let dirpath = (*odir).as_ref().cloned().unwrap_or_default();
+
+ // If a crate name is present, we use it as the link name
+ let stem = sess
+ .opts
+ .crate_name
+ .clone()
+ .or_else(|| rustc_attr::find_crate_name(sess, attrs).map(|n| n.to_string()))
+ .unwrap_or_else(|| input.filestem().to_owned());
+
+ OutputFilenames::new(
+ dirpath,
+ stem,
+ None,
+ temps_dir.clone(),
+ sess.opts.cg.extra_filename.clone(),
+ sess.opts.output_types.clone(),
+ )
+ }
+
+ Some(ref out_file) => {
+ let unnamed_output_types =
+ sess.opts.output_types.values().filter(|a| a.is_none()).count();
+ let ofile = if unnamed_output_types > 1 {
+ sess.warn(
+ "due to multiple output types requested, the explicitly specified \
+ output file name will be adapted for each output type",
+ );
+ None
+ } else {
+ if !sess.opts.cg.extra_filename.is_empty() {
+ sess.warn("ignoring -C extra-filename flag due to -o flag");
+ }
+ Some(out_file.clone())
+ };
+ if *odir != None {
+ sess.warn("ignoring --out-dir flag due to -o flag");
+ }
+
+ OutputFilenames::new(
+ out_file.parent().unwrap_or_else(|| Path::new("")).to_path_buf(),
+ out_file.file_stem().unwrap_or_default().to_str().unwrap().to_string(),
+ ofile,
+ temps_dir.clone(),
+ sess.opts.cg.extra_filename.clone(),
+ sess.opts.output_types.clone(),
+ )
+ }
+ }
+}
+
+/// Returns a version string such as "1.46.0 (04488afe3 2020-08-24)"
+pub fn version_str() -> Option<&'static str> {
+ option_env!("CFG_VERSION")
+}
+
+/// Returns a version string such as "0.12.0-dev".
+pub fn release_str() -> Option<&'static str> {
+ option_env!("CFG_RELEASE")
+}
+
+/// Returns the full SHA1 hash of HEAD of the Git repo from which rustc was built.
+pub fn commit_hash_str() -> Option<&'static str> {
+ option_env!("CFG_VER_HASH")
+}
+
+/// Returns the "commit date" of HEAD of the Git repo from which rustc was built as a static string.
+pub fn commit_date_str() -> Option<&'static str> {
+ option_env!("CFG_VER_DATE")
+}
diff --git a/compiler/rustc_lexer/Cargo.toml b/compiler/rustc_lexer/Cargo.toml
new file mode 100644
index 000000000..35af11053
--- /dev/null
+++ b/compiler/rustc_lexer/Cargo.toml
@@ -0,0 +1,23 @@
+[package]
+name = "rustc_lexer"
+version = "0.1.0"
+license = "MIT OR Apache-2.0"
+edition = "2021"
+
+repository = "https://github.com/rust-lang/rust/"
+description = """
+Rust lexer used by rustc. No stability guarantees are provided.
+"""
+
+# Note: do not remove this blank `[lib]` section.
+# This will be used when publishing this crate as `rustc-ap-rustc_lexer`.
+[lib]
+doctest = false
+
+# Note that this crate purposefully does not depend on other rustc crates
+[dependencies]
+unicode-xid = "0.2.0"
+unic-emoji-char = "0.9.0"
+
+[dev-dependencies]
+expect-test = "1.0"
diff --git a/compiler/rustc_lexer/src/cursor.rs b/compiler/rustc_lexer/src/cursor.rs
new file mode 100644
index 000000000..21557a9c8
--- /dev/null
+++ b/compiler/rustc_lexer/src/cursor.rs
@@ -0,0 +1,93 @@
+use std::str::Chars;
+
+/// Peekable iterator over a char sequence.
+///
+/// Next characters can be peeked via `first` method,
+/// and position can be shifted forward via `bump` method.
+pub(crate) struct Cursor<'a> {
+ initial_len: usize,
+ /// Iterator over chars. Slightly faster than a &str.
+ chars: Chars<'a>,
+ #[cfg(debug_assertions)]
+ prev: char,
+}
+
+pub(crate) const EOF_CHAR: char = '\0';
+
+impl<'a> Cursor<'a> {
+ pub(crate) fn new(input: &'a str) -> Cursor<'a> {
+ Cursor {
+ initial_len: input.len(),
+ chars: input.chars(),
+ #[cfg(debug_assertions)]
+ prev: EOF_CHAR,
+ }
+ }
+
+ /// Returns the last eaten symbol (or `'\0'` in release builds).
+ /// (For debug assertions only.)
+ pub(crate) fn prev(&self) -> char {
+ #[cfg(debug_assertions)]
+ {
+ self.prev
+ }
+
+ #[cfg(not(debug_assertions))]
+ {
+ EOF_CHAR
+ }
+ }
+
+ /// Peeks the next symbol from the input stream without consuming it.
+ /// If requested position doesn't exist, `EOF_CHAR` is returned.
+ /// However, getting `EOF_CHAR` doesn't always mean actual end of file,
+ /// it should be checked with `is_eof` method.
+ pub(crate) fn first(&self) -> char {
+ // `.next()` optimizes better than `.nth(0)`
+ self.chars.clone().next().unwrap_or(EOF_CHAR)
+ }
+
+ /// Peeks the second symbol from the input stream without consuming it.
+ pub(crate) fn second(&self) -> char {
+ // `.next()` optimizes better than `.nth(1)`
+ let mut iter = self.chars.clone();
+ iter.next();
+ iter.next().unwrap_or(EOF_CHAR)
+ }
+
+ /// Checks if there is nothing more to consume.
+ pub(crate) fn is_eof(&self) -> bool {
+ self.chars.as_str().is_empty()
+ }
+
+ /// Returns amount of already consumed symbols.
+ pub(crate) fn len_consumed(&self) -> u32 {
+ (self.initial_len - self.chars.as_str().len()) as u32
+ }
+
+ /// Resets the number of bytes consumed to 0.
+ pub(crate) fn reset_len_consumed(&mut self) {
+ self.initial_len = self.chars.as_str().len();
+ }
+
+ /// Moves to the next character.
+ pub(crate) fn bump(&mut self) -> Option<char> {
+ let c = self.chars.next()?;
+
+ #[cfg(debug_assertions)]
+ {
+ self.prev = c;
+ }
+
+ Some(c)
+ }
+
+ /// Eats symbols while predicate returns true or until the end of file is reached.
+ pub(crate) fn eat_while(&mut self, mut predicate: impl FnMut(char) -> bool) {
+ // It was tried making optimized version of this for eg. line comments, but
+ // LLVM can inline all of this and compile it down to fast iteration over bytes.
+ while predicate(self.first()) && !self.is_eof() {
+ self.bump();
+ }
+ }
+}
diff --git a/compiler/rustc_lexer/src/lib.rs b/compiler/rustc_lexer/src/lib.rs
new file mode 100644
index 000000000..6d311af90
--- /dev/null
+++ b/compiler/rustc_lexer/src/lib.rs
@@ -0,0 +1,843 @@
+//! Low-level Rust lexer.
+//!
+//! The idea with `rustc_lexer` is to make a reusable library,
+//! by separating out pure lexing and rustc-specific concerns, like spans,
+//! error reporting, and interning. So, rustc_lexer operates directly on `&str`,
+//! produces simple tokens which are a pair of type-tag and a bit of original text,
+//! and does not report errors, instead storing them as flags on the token.
+//!
+//! Tokens produced by this lexer are not yet ready for parsing the Rust syntax.
+//! For that see [`rustc_parse::lexer`], which converts this basic token stream
+//! into wide tokens used by actual parser.
+//!
+//! The purpose of this crate is to convert raw sources into a labeled sequence
+//! of well-known token types, so building an actual Rust token stream will
+//! be easier.
+//!
+//! The main entity of this crate is the [`TokenKind`] enum which represents common
+//! lexeme types.
+//!
+//! [`rustc_parse::lexer`]: ../rustc_parse/lexer/index.html
+// We want to be able to build this crate with a stable compiler, so no
+// `#![feature]` attributes should be added.
+
+mod cursor;
+pub mod unescape;
+
+#[cfg(test)]
+mod tests;
+
+use self::LiteralKind::*;
+use self::TokenKind::*;
+use crate::cursor::{Cursor, EOF_CHAR};
+use std::convert::TryFrom;
+
+/// Parsed token.
+/// It doesn't contain information about data that has been parsed,
+/// only the type of the token and its size.
+#[derive(Debug)]
+pub struct Token {
+ pub kind: TokenKind,
+ pub len: u32,
+}
+
+impl Token {
+ fn new(kind: TokenKind, len: u32) -> Token {
+ Token { kind, len }
+ }
+}
+
+/// Enum representing common lexeme types.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum TokenKind {
+ // Multi-char tokens:
+ /// "// comment"
+ LineComment { doc_style: Option<DocStyle> },
+ /// `/* block comment */`
+ ///
+ /// Block comments can be recursive, so the sequence like `/* /* */`
+ /// will not be considered terminated and will result in a parsing error.
+ BlockComment { doc_style: Option<DocStyle>, terminated: bool },
+ /// Any whitespace characters sequence.
+ Whitespace,
+ /// "ident" or "continue"
+ /// At this step keywords are also considered identifiers.
+ Ident,
+ /// Like the above, but containing invalid unicode codepoints.
+ InvalidIdent,
+ /// "r#ident"
+ RawIdent,
+ /// An unknown prefix like `foo#`, `foo'`, `foo"`. Note that only the
+ /// prefix (`foo`) is included in the token, not the separator (which is
+ /// lexed as its own distinct token). In Rust 2021 and later, reserved
+ /// prefixes are reported as errors; in earlier editions, they result in a
+ /// (allowed by default) lint, and are treated as regular identifier
+ /// tokens.
+ UnknownPrefix,
+ /// "12_u8", "1.0e-40", "b"123"". See `LiteralKind` for more details.
+ Literal { kind: LiteralKind, suffix_start: u32 },
+ /// "'a"
+ Lifetime { starts_with_number: bool },
+
+ // One-char tokens:
+ /// ";"
+ Semi,
+ /// ","
+ Comma,
+ /// "."
+ Dot,
+ /// "("
+ OpenParen,
+ /// ")"
+ CloseParen,
+ /// "{"
+ OpenBrace,
+ /// "}"
+ CloseBrace,
+ /// "["
+ OpenBracket,
+ /// "]"
+ CloseBracket,
+ /// "@"
+ At,
+ /// "#"
+ Pound,
+ /// "~"
+ Tilde,
+ /// "?"
+ Question,
+ /// ":"
+ Colon,
+ /// "$"
+ Dollar,
+ /// "="
+ Eq,
+ /// "!"
+ Bang,
+ /// "<"
+ Lt,
+ /// ">"
+ Gt,
+ /// "-"
+ Minus,
+ /// "&"
+ And,
+ /// "|"
+ Or,
+ /// "+"
+ Plus,
+ /// "*"
+ Star,
+ /// "/"
+ Slash,
+ /// "^"
+ Caret,
+ /// "%"
+ Percent,
+
+ /// Unknown token, not expected by the lexer, e.g. "№"
+ Unknown,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum DocStyle {
+ Outer,
+ Inner,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum LiteralKind {
+ /// "12_u8", "0o100", "0b120i99"
+ Int { base: Base, empty_int: bool },
+ /// "12.34f32", "0b100.100"
+ Float { base: Base, empty_exponent: bool },
+ /// "'a'", "'\\'", "'''", "';"
+ Char { terminated: bool },
+ /// "b'a'", "b'\\'", "b'''", "b';"
+ Byte { terminated: bool },
+ /// ""abc"", ""abc"
+ Str { terminated: bool },
+ /// "b"abc"", "b"abc"
+ ByteStr { terminated: bool },
+ /// "r"abc"", "r#"abc"#", "r####"ab"###"c"####", "r#"a". `None` indicates
+ /// an invalid literal.
+ RawStr { n_hashes: Option<u8> },
+ /// "br"abc"", "br#"abc"#", "br####"ab"###"c"####", "br#"a". `None`
+ /// indicates an invalid literal.
+ RawByteStr { n_hashes: Option<u8> },
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum RawStrError {
+ /// Non `#` characters exist between `r` and `"`, e.g. `r##~"abcde"##`
+ InvalidStarter { bad_char: char },
+ /// The string was not terminated, e.g. `r###"abcde"##`.
+ /// `possible_terminator_offset` is the number of characters after `r` or
+ /// `br` where they may have intended to terminate it.
+ NoTerminator { expected: u32, found: u32, possible_terminator_offset: Option<u32> },
+ /// More than 255 `#`s exist.
+ TooManyDelimiters { found: u32 },
+}
+
+/// Base of numeric literal encoding according to its prefix.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Base {
+ /// Literal starts with "0b".
+ Binary,
+ /// Literal starts with "0o".
+ Octal,
+ /// Literal starts with "0x".
+ Hexadecimal,
+ /// Literal doesn't contain a prefix.
+ Decimal,
+}
+
+/// `rustc` allows files to have a shebang, e.g. "#!/usr/bin/rustrun",
+/// but shebang isn't a part of rust syntax.
+pub fn strip_shebang(input: &str) -> Option<usize> {
+ // Shebang must start with `#!` literally, without any preceding whitespace.
+ // For simplicity we consider any line starting with `#!` a shebang,
+ // regardless of restrictions put on shebangs by specific platforms.
+ if let Some(input_tail) = input.strip_prefix("#!") {
+ // Ok, this is a shebang but if the next non-whitespace token is `[`,
+ // then it may be valid Rust code, so consider it Rust code.
+ let next_non_whitespace_token = tokenize(input_tail).map(|tok| tok.kind).find(|tok| {
+ !matches!(
+ tok,
+ TokenKind::Whitespace
+ | TokenKind::LineComment { doc_style: None }
+ | TokenKind::BlockComment { doc_style: None, .. }
+ )
+ });
+ if next_non_whitespace_token != Some(TokenKind::OpenBracket) {
+ // No other choice than to consider this a shebang.
+ return Some(2 + input_tail.lines().next().unwrap_or_default().len());
+ }
+ }
+ None
+}
+
+/// Parses the first token from the provided input string.
+#[inline]
+pub fn first_token(input: &str) -> Token {
+ debug_assert!(!input.is_empty());
+ Cursor::new(input).advance_token()
+}
+
+/// Validates a raw string literal. Used for getting more information about a
+/// problem with a `RawStr`/`RawByteStr` with a `None` field.
+#[inline]
+pub fn validate_raw_str(input: &str, prefix_len: u32) -> Result<(), RawStrError> {
+ debug_assert!(!input.is_empty());
+ let mut cursor = Cursor::new(input);
+ // Move past the leading `r` or `br`.
+ for _ in 0..prefix_len {
+ cursor.bump().unwrap();
+ }
+ cursor.raw_double_quoted_string(prefix_len).map(|_| ())
+}
+
+/// Creates an iterator that produces tokens from the input string.
+pub fn tokenize(input: &str) -> impl Iterator<Item = Token> + '_ {
+ let mut cursor = Cursor::new(input);
+ std::iter::from_fn(move || {
+ if cursor.is_eof() {
+ None
+ } else {
+ cursor.reset_len_consumed();
+ Some(cursor.advance_token())
+ }
+ })
+}
+
+/// True if `c` is considered a whitespace according to Rust language definition.
+/// See [Rust language reference](https://doc.rust-lang.org/reference/whitespace.html)
+/// for definitions of these classes.
+pub fn is_whitespace(c: char) -> bool {
+ // This is Pattern_White_Space.
+ //
+ // Note that this set is stable (ie, it doesn't change with different
+ // Unicode versions), so it's ok to just hard-code the values.
+
+ matches!(
+ c,
+ // Usual ASCII suspects
+ '\u{0009}' // \t
+ | '\u{000A}' // \n
+ | '\u{000B}' // vertical tab
+ | '\u{000C}' // form feed
+ | '\u{000D}' // \r
+ | '\u{0020}' // space
+
+ // NEXT LINE from latin1
+ | '\u{0085}'
+
+ // Bidi markers
+ | '\u{200E}' // LEFT-TO-RIGHT MARK
+ | '\u{200F}' // RIGHT-TO-LEFT MARK
+
+ // Dedicated whitespace characters from Unicode
+ | '\u{2028}' // LINE SEPARATOR
+ | '\u{2029}' // PARAGRAPH SEPARATOR
+ )
+}
+
+/// True if `c` is valid as a first character of an identifier.
+/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
+/// a formal definition of valid identifier name.
+pub fn is_id_start(c: char) -> bool {
+ // This is XID_Start OR '_' (which formally is not a XID_Start).
+ c == '_' || unicode_xid::UnicodeXID::is_xid_start(c)
+}
+
+/// True if `c` is valid as a non-first character of an identifier.
+/// See [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html) for
+/// a formal definition of valid identifier name.
+pub fn is_id_continue(c: char) -> bool {
+ unicode_xid::UnicodeXID::is_xid_continue(c)
+}
+
+/// The passed string is lexically an identifier.
+pub fn is_ident(string: &str) -> bool {
+ let mut chars = string.chars();
+ if let Some(start) = chars.next() {
+ is_id_start(start) && chars.all(is_id_continue)
+ } else {
+ false
+ }
+}
+
+impl Cursor<'_> {
+ /// Parses a token from the input string.
+ fn advance_token(&mut self) -> Token {
+ let first_char = self.bump().unwrap();
+ let token_kind = match first_char {
+ // Slash, comment or block comment.
+ '/' => match self.first() {
+ '/' => self.line_comment(),
+ '*' => self.block_comment(),
+ _ => Slash,
+ },
+
+ // Whitespace sequence.
+ c if is_whitespace(c) => self.whitespace(),
+
+ // Raw identifier, raw string literal or identifier.
+ 'r' => match (self.first(), self.second()) {
+ ('#', c1) if is_id_start(c1) => self.raw_ident(),
+ ('#', _) | ('"', _) => {
+ let res = self.raw_double_quoted_string(1);
+ let suffix_start = self.len_consumed();
+ if res.is_ok() {
+ self.eat_literal_suffix();
+ }
+ let kind = RawStr { n_hashes: res.ok() };
+ Literal { kind, suffix_start }
+ }
+ _ => self.ident_or_unknown_prefix(),
+ },
+
+ // Byte literal, byte string literal, raw byte string literal or identifier.
+ 'b' => match (self.first(), self.second()) {
+ ('\'', _) => {
+ self.bump();
+ let terminated = self.single_quoted_string();
+ let suffix_start = self.len_consumed();
+ if terminated {
+ self.eat_literal_suffix();
+ }
+ let kind = Byte { terminated };
+ Literal { kind, suffix_start }
+ }
+ ('"', _) => {
+ self.bump();
+ let terminated = self.double_quoted_string();
+ let suffix_start = self.len_consumed();
+ if terminated {
+ self.eat_literal_suffix();
+ }
+ let kind = ByteStr { terminated };
+ Literal { kind, suffix_start }
+ }
+ ('r', '"') | ('r', '#') => {
+ self.bump();
+ let res = self.raw_double_quoted_string(2);
+ let suffix_start = self.len_consumed();
+ if res.is_ok() {
+ self.eat_literal_suffix();
+ }
+ let kind = RawByteStr { n_hashes: res.ok() };
+ Literal { kind, suffix_start }
+ }
+ _ => self.ident_or_unknown_prefix(),
+ },
+
+ // Identifier (this should be checked after other variant that can
+ // start as identifier).
+ c if is_id_start(c) => self.ident_or_unknown_prefix(),
+
+ // Numeric literal.
+ c @ '0'..='9' => {
+ let literal_kind = self.number(c);
+ let suffix_start = self.len_consumed();
+ self.eat_literal_suffix();
+ TokenKind::Literal { kind: literal_kind, suffix_start }
+ }
+
+ // One-symbol tokens.
+ ';' => Semi,
+ ',' => Comma,
+ '.' => Dot,
+ '(' => OpenParen,
+ ')' => CloseParen,
+ '{' => OpenBrace,
+ '}' => CloseBrace,
+ '[' => OpenBracket,
+ ']' => CloseBracket,
+ '@' => At,
+ '#' => Pound,
+ '~' => Tilde,
+ '?' => Question,
+ ':' => Colon,
+ '$' => Dollar,
+ '=' => Eq,
+ '!' => Bang,
+ '<' => Lt,
+ '>' => Gt,
+ '-' => Minus,
+ '&' => And,
+ '|' => Or,
+ '+' => Plus,
+ '*' => Star,
+ '^' => Caret,
+ '%' => Percent,
+
+ // Lifetime or character literal.
+ '\'' => self.lifetime_or_char(),
+
+ // String literal.
+ '"' => {
+ let terminated = self.double_quoted_string();
+ let suffix_start = self.len_consumed();
+ if terminated {
+ self.eat_literal_suffix();
+ }
+ let kind = Str { terminated };
+ Literal { kind, suffix_start }
+ }
+ // Identifier starting with an emoji. Only lexed for graceful error recovery.
+ c if !c.is_ascii() && unic_emoji_char::is_emoji(c) => {
+ self.fake_ident_or_unknown_prefix()
+ }
+ _ => Unknown,
+ };
+ Token::new(token_kind, self.len_consumed())
+ }
+
+ fn line_comment(&mut self) -> TokenKind {
+ debug_assert!(self.prev() == '/' && self.first() == '/');
+ self.bump();
+
+ let doc_style = match self.first() {
+ // `//!` is an inner line doc comment.
+ '!' => Some(DocStyle::Inner),
+ // `////` (more than 3 slashes) is not considered a doc comment.
+ '/' if self.second() != '/' => Some(DocStyle::Outer),
+ _ => None,
+ };
+
+ self.eat_while(|c| c != '\n');
+ LineComment { doc_style }
+ }
+
+ fn block_comment(&mut self) -> TokenKind {
+ debug_assert!(self.prev() == '/' && self.first() == '*');
+ self.bump();
+
+ let doc_style = match self.first() {
+ // `/*!` is an inner block doc comment.
+ '!' => Some(DocStyle::Inner),
+ // `/***` (more than 2 stars) is not considered a doc comment.
+ // `/**/` is not considered a doc comment.
+ '*' if !matches!(self.second(), '*' | '/') => Some(DocStyle::Outer),
+ _ => None,
+ };
+
+ let mut depth = 1usize;
+ while let Some(c) = self.bump() {
+ match c {
+ '/' if self.first() == '*' => {
+ self.bump();
+ depth += 1;
+ }
+ '*' if self.first() == '/' => {
+ self.bump();
+ depth -= 1;
+ if depth == 0 {
+ // This block comment is closed, so for a construction like "/* */ */"
+ // there will be a successfully parsed block comment "/* */"
+ // and " */" will be processed separately.
+ break;
+ }
+ }
+ _ => (),
+ }
+ }
+
+ BlockComment { doc_style, terminated: depth == 0 }
+ }
+
+ fn whitespace(&mut self) -> TokenKind {
+ debug_assert!(is_whitespace(self.prev()));
+ self.eat_while(is_whitespace);
+ Whitespace
+ }
+
+ fn raw_ident(&mut self) -> TokenKind {
+ debug_assert!(self.prev() == 'r' && self.first() == '#' && is_id_start(self.second()));
+ // Eat "#" symbol.
+ self.bump();
+ // Eat the identifier part of RawIdent.
+ self.eat_identifier();
+ RawIdent
+ }
+
+ fn ident_or_unknown_prefix(&mut self) -> TokenKind {
+ debug_assert!(is_id_start(self.prev()));
+ // Start is already eaten, eat the rest of identifier.
+ self.eat_while(is_id_continue);
+ // Known prefixes must have been handled earlier. So if
+ // we see a prefix here, it is definitely an unknown prefix.
+ match self.first() {
+ '#' | '"' | '\'' => UnknownPrefix,
+ c if !c.is_ascii() && unic_emoji_char::is_emoji(c) => {
+ self.fake_ident_or_unknown_prefix()
+ }
+ _ => Ident,
+ }
+ }
+
+ fn fake_ident_or_unknown_prefix(&mut self) -> TokenKind {
+ // Start is already eaten, eat the rest of identifier.
+ self.eat_while(|c| {
+ unicode_xid::UnicodeXID::is_xid_continue(c)
+ || (!c.is_ascii() && unic_emoji_char::is_emoji(c))
+ || c == '\u{200d}'
+ });
+ // Known prefixes must have been handled earlier. So if
+ // we see a prefix here, it is definitely an unknown prefix.
+ match self.first() {
+ '#' | '"' | '\'' => UnknownPrefix,
+ _ => InvalidIdent,
+ }
+ }
+
+ fn number(&mut self, first_digit: char) -> LiteralKind {
+ debug_assert!('0' <= self.prev() && self.prev() <= '9');
+ let mut base = Base::Decimal;
+ if first_digit == '0' {
+ // Attempt to parse encoding base.
+ let has_digits = match self.first() {
+ 'b' => {
+ base = Base::Binary;
+ self.bump();
+ self.eat_decimal_digits()
+ }
+ 'o' => {
+ base = Base::Octal;
+ self.bump();
+ self.eat_decimal_digits()
+ }
+ 'x' => {
+ base = Base::Hexadecimal;
+ self.bump();
+ self.eat_hexadecimal_digits()
+ }
+ // Not a base prefix.
+ '0'..='9' | '_' | '.' | 'e' | 'E' => {
+ self.eat_decimal_digits();
+ true
+ }
+ // Just a 0.
+ _ => return Int { base, empty_int: false },
+ };
+ // Base prefix was provided, but there were no digits
+ // after it, e.g. "0x".
+ if !has_digits {
+ return Int { base, empty_int: true };
+ }
+ } else {
+ // No base prefix, parse number in the usual way.
+ self.eat_decimal_digits();
+ };
+
+ match self.first() {
+ // Don't be greedy if this is actually an
+ // integer literal followed by field/method access or a range pattern
+ // (`0..2` and `12.foo()`)
+ '.' if self.second() != '.' && !is_id_start(self.second()) => {
+ // might have stuff after the ., and if it does, it needs to start
+ // with a number
+ self.bump();
+ let mut empty_exponent = false;
+ if self.first().is_digit(10) {
+ self.eat_decimal_digits();
+ match self.first() {
+ 'e' | 'E' => {
+ self.bump();
+ empty_exponent = !self.eat_float_exponent();
+ }
+ _ => (),
+ }
+ }
+ Float { base, empty_exponent }
+ }
+ 'e' | 'E' => {
+ self.bump();
+ let empty_exponent = !self.eat_float_exponent();
+ Float { base, empty_exponent }
+ }
+ _ => Int { base, empty_int: false },
+ }
+ }
+
+ fn lifetime_or_char(&mut self) -> TokenKind {
+ debug_assert!(self.prev() == '\'');
+
+ let can_be_a_lifetime = if self.second() == '\'' {
+ // It's surely not a lifetime.
+ false
+ } else {
+ // If the first symbol is valid for identifier, it can be a lifetime.
+ // Also check if it's a number for a better error reporting (so '0 will
+ // be reported as invalid lifetime and not as unterminated char literal).
+ is_id_start(self.first()) || self.first().is_digit(10)
+ };
+
+ if !can_be_a_lifetime {
+ let terminated = self.single_quoted_string();
+ let suffix_start = self.len_consumed();
+ if terminated {
+ self.eat_literal_suffix();
+ }
+ let kind = Char { terminated };
+ return Literal { kind, suffix_start };
+ }
+
+ // Either a lifetime or a character literal with
+ // length greater than 1.
+
+ let starts_with_number = self.first().is_digit(10);
+
+ // Skip the literal contents.
+ // First symbol can be a number (which isn't a valid identifier start),
+ // so skip it without any checks.
+ self.bump();
+ self.eat_while(is_id_continue);
+
+ // Check if after skipping literal contents we've met a closing
+ // single quote (which means that user attempted to create a
+ // string with single quotes).
+ if self.first() == '\'' {
+ self.bump();
+ let kind = Char { terminated: true };
+ Literal { kind, suffix_start: self.len_consumed() }
+ } else {
+ Lifetime { starts_with_number }
+ }
+ }
+
+ fn single_quoted_string(&mut self) -> bool {
+ debug_assert!(self.prev() == '\'');
+ // Check if it's a one-symbol literal.
+ if self.second() == '\'' && self.first() != '\\' {
+ self.bump();
+ self.bump();
+ return true;
+ }
+
+ // Literal has more than one symbol.
+
+ // Parse until either quotes are terminated or error is detected.
+ loop {
+ match self.first() {
+ // Quotes are terminated, finish parsing.
+ '\'' => {
+ self.bump();
+ return true;
+ }
+ // Probably beginning of the comment, which we don't want to include
+ // to the error report.
+ '/' => break,
+ // Newline without following '\'' means unclosed quote, stop parsing.
+ '\n' if self.second() != '\'' => break,
+ // End of file, stop parsing.
+ EOF_CHAR if self.is_eof() => break,
+ // Escaped slash is considered one character, so bump twice.
+ '\\' => {
+ self.bump();
+ self.bump();
+ }
+ // Skip the character.
+ _ => {
+ self.bump();
+ }
+ }
+ }
+ // String was not terminated.
+ false
+ }
+
+ /// Eats double-quoted string and returns true
+ /// if string is terminated.
+ fn double_quoted_string(&mut self) -> bool {
+ debug_assert!(self.prev() == '"');
+ while let Some(c) = self.bump() {
+ match c {
+ '"' => {
+ return true;
+ }
+ '\\' if self.first() == '\\' || self.first() == '"' => {
+ // Bump again to skip escaped character.
+ self.bump();
+ }
+ _ => (),
+ }
+ }
+ // End of file reached.
+ false
+ }
+
+ /// Eats the double-quoted string and returns `n_hashes` and an error if encountered.
+ fn raw_double_quoted_string(&mut self, prefix_len: u32) -> Result<u8, RawStrError> {
+ // Wrap the actual function to handle the error with too many hashes.
+ // This way, it eats the whole raw string.
+ let n_hashes = self.raw_string_unvalidated(prefix_len)?;
+ // Only up to 255 `#`s are allowed in raw strings
+ match u8::try_from(n_hashes) {
+ Ok(num) => Ok(num),
+ Err(_) => Err(RawStrError::TooManyDelimiters { found: n_hashes }),
+ }
+ }
+
+ fn raw_string_unvalidated(&mut self, prefix_len: u32) -> Result<u32, RawStrError> {
+ debug_assert!(self.prev() == 'r');
+ let start_pos = self.len_consumed();
+ let mut possible_terminator_offset = None;
+ let mut max_hashes = 0;
+
+ // Count opening '#' symbols.
+ let mut eaten = 0;
+ while self.first() == '#' {
+ eaten += 1;
+ self.bump();
+ }
+ let n_start_hashes = eaten;
+
+ // Check that string is started.
+ match self.bump() {
+ Some('"') => (),
+ c => {
+ let c = c.unwrap_or(EOF_CHAR);
+ return Err(RawStrError::InvalidStarter { bad_char: c });
+ }
+ }
+
+ // Skip the string contents and on each '#' character met, check if this is
+ // a raw string termination.
+ loop {
+ self.eat_while(|c| c != '"');
+
+ if self.is_eof() {
+ return Err(RawStrError::NoTerminator {
+ expected: n_start_hashes,
+ found: max_hashes,
+ possible_terminator_offset,
+ });
+ }
+
+ // Eat closing double quote.
+ self.bump();
+
+ // Check that amount of closing '#' symbols
+ // is equal to the amount of opening ones.
+ // Note that this will not consume extra trailing `#` characters:
+ // `r###"abcde"####` is lexed as a `RawStr { n_hashes: 3 }`
+ // followed by a `#` token.
+ let mut n_end_hashes = 0;
+ while self.first() == '#' && n_end_hashes < n_start_hashes {
+ n_end_hashes += 1;
+ self.bump();
+ }
+
+ if n_end_hashes == n_start_hashes {
+ return Ok(n_start_hashes);
+ } else if n_end_hashes > max_hashes {
+ // Keep track of possible terminators to give a hint about
+ // where there might be a missing terminator
+ possible_terminator_offset =
+ Some(self.len_consumed() - start_pos - n_end_hashes + prefix_len);
+ max_hashes = n_end_hashes;
+ }
+ }
+ }
+
+ fn eat_decimal_digits(&mut self) -> bool {
+ let mut has_digits = false;
+ loop {
+ match self.first() {
+ '_' => {
+ self.bump();
+ }
+ '0'..='9' => {
+ has_digits = true;
+ self.bump();
+ }
+ _ => break,
+ }
+ }
+ has_digits
+ }
+
+ fn eat_hexadecimal_digits(&mut self) -> bool {
+ let mut has_digits = false;
+ loop {
+ match self.first() {
+ '_' => {
+ self.bump();
+ }
+ '0'..='9' | 'a'..='f' | 'A'..='F' => {
+ has_digits = true;
+ self.bump();
+ }
+ _ => break,
+ }
+ }
+ has_digits
+ }
+
+ /// Eats the float exponent. Returns true if at least one digit was met,
+ /// and returns false otherwise.
+ fn eat_float_exponent(&mut self) -> bool {
+ debug_assert!(self.prev() == 'e' || self.prev() == 'E');
+ if self.first() == '-' || self.first() == '+' {
+ self.bump();
+ }
+ self.eat_decimal_digits()
+ }
+
+ // Eats the suffix of the literal, e.g. "_u8".
+ fn eat_literal_suffix(&mut self) {
+ self.eat_identifier();
+ }
+
+ // Eats the identifier.
+ fn eat_identifier(&mut self) {
+ if !is_id_start(self.first()) {
+ return;
+ }
+ self.bump();
+
+ self.eat_while(is_id_continue);
+ }
+}
diff --git a/compiler/rustc_lexer/src/tests.rs b/compiler/rustc_lexer/src/tests.rs
new file mode 100644
index 000000000..e4c1787f2
--- /dev/null
+++ b/compiler/rustc_lexer/src/tests.rs
@@ -0,0 +1,298 @@
+use super::*;
+
+use expect_test::{expect, Expect};
+
+fn check_raw_str(s: &str, expected: Result<u8, RawStrError>) {
+ let s = &format!("r{}", s);
+ let mut cursor = Cursor::new(s);
+ cursor.bump();
+ let res = cursor.raw_double_quoted_string(0);
+ assert_eq!(res, expected);
+}
+
+#[test]
+fn test_naked_raw_str() {
+ check_raw_str(r#""abc""#, Ok(0));
+}
+
+#[test]
+fn test_raw_no_start() {
+ check_raw_str(r##""abc"#"##, Ok(0));
+}
+
+#[test]
+fn test_too_many_terminators() {
+ // this error is handled in the parser later
+ check_raw_str(r###"#"abc"##"###, Ok(1));
+}
+
+#[test]
+fn test_unterminated() {
+ check_raw_str(
+ r#"#"abc"#,
+ Err(RawStrError::NoTerminator { expected: 1, found: 0, possible_terminator_offset: None }),
+ );
+ check_raw_str(
+ r###"##"abc"#"###,
+ Err(RawStrError::NoTerminator {
+ expected: 2,
+ found: 1,
+ possible_terminator_offset: Some(7),
+ }),
+ );
+ // We're looking for "# not just any #
+ check_raw_str(
+ r###"##"abc#"###,
+ Err(RawStrError::NoTerminator { expected: 2, found: 0, possible_terminator_offset: None }),
+ )
+}
+
+#[test]
+fn test_invalid_start() {
+ check_raw_str(r##"#~"abc"#"##, Err(RawStrError::InvalidStarter { bad_char: '~' }));
+}
+
+#[test]
+fn test_unterminated_no_pound() {
+ // https://github.com/rust-lang/rust/issues/70677
+ check_raw_str(
+ r#"""#,
+ Err(RawStrError::NoTerminator { expected: 0, found: 0, possible_terminator_offset: None }),
+ );
+}
+
+#[test]
+fn test_too_many_hashes() {
+ let max_count = u8::MAX;
+ let hashes1 = "#".repeat(max_count as usize);
+ let hashes2 = "#".repeat(max_count as usize + 1);
+ let middle = "\"abc\"";
+ let s1 = [&hashes1, middle, &hashes1].join("");
+ let s2 = [&hashes2, middle, &hashes2].join("");
+
+ // Valid number of hashes (255 = 2^8 - 1 = u8::MAX).
+ check_raw_str(&s1, Ok(255));
+
+ // One more hash sign (256 = 2^8) becomes too many.
+ check_raw_str(&s2, Err(RawStrError::TooManyDelimiters { found: u32::from(max_count) + 1 }));
+}
+
+#[test]
+fn test_valid_shebang() {
+ // https://github.com/rust-lang/rust/issues/70528
+ let input = "#!/usr/bin/rustrun\nlet x = 5;";
+ assert_eq!(strip_shebang(input), Some(18));
+}
+
+#[test]
+fn test_invalid_shebang_valid_rust_syntax() {
+ // https://github.com/rust-lang/rust/issues/70528
+ let input = "#! [bad_attribute]";
+ assert_eq!(strip_shebang(input), None);
+}
+
+#[test]
+fn test_shebang_second_line() {
+ // Because shebangs are interpreted by the kernel, they must be on the first line
+ let input = "\n#!/bin/bash";
+ assert_eq!(strip_shebang(input), None);
+}
+
+#[test]
+fn test_shebang_space() {
+ let input = "#! /bin/bash";
+ assert_eq!(strip_shebang(input), Some(input.len()));
+}
+
+#[test]
+fn test_shebang_empty_shebang() {
+ let input = "#! \n[attribute(foo)]";
+ assert_eq!(strip_shebang(input), None);
+}
+
+#[test]
+fn test_invalid_shebang_comment() {
+ let input = "#!//bin/ami/a/comment\n[";
+ assert_eq!(strip_shebang(input), None)
+}
+
+#[test]
+fn test_invalid_shebang_another_comment() {
+ let input = "#!/*bin/ami/a/comment*/\n[attribute";
+ assert_eq!(strip_shebang(input), None)
+}
+
+#[test]
+fn test_shebang_valid_rust_after() {
+ let input = "#!/*bin/ami/a/comment*/\npub fn main() {}";
+ assert_eq!(strip_shebang(input), Some(23))
+}
+
+#[test]
+fn test_shebang_followed_by_attrib() {
+ let input = "#!/bin/rust-scripts\n#![allow_unused(true)]";
+ assert_eq!(strip_shebang(input), Some(19));
+}
+
+fn check_lexing(src: &str, expect: Expect) {
+ let actual: String = tokenize(src).map(|token| format!("{:?}\n", token)).collect();
+ expect.assert_eq(&actual)
+}
+
+#[test]
+fn smoke_test() {
+ check_lexing(
+ "/* my source file */ fn main() { println!(\"zebra\"); }\n",
+ expect![[r#"
+ Token { kind: BlockComment { doc_style: None, terminated: true }, len: 20 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Ident, len: 2 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Ident, len: 4 }
+ Token { kind: OpenParen, len: 1 }
+ Token { kind: CloseParen, len: 1 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: OpenBrace, len: 1 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Ident, len: 7 }
+ Token { kind: Bang, len: 1 }
+ Token { kind: OpenParen, len: 1 }
+ Token { kind: Literal { kind: Str { terminated: true }, suffix_start: 7 }, len: 7 }
+ Token { kind: CloseParen, len: 1 }
+ Token { kind: Semi, len: 1 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: CloseBrace, len: 1 }
+ Token { kind: Whitespace, len: 1 }
+ "#]],
+ )
+}
+
+#[test]
+fn comment_flavors() {
+ check_lexing(
+ r"
+// line
+//// line as well
+/// outer doc line
+//! inner doc line
+/* block */
+/**/
+/*** also block */
+/** outer doc block */
+/*! inner doc block */
+",
+ expect![[r#"
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: LineComment { doc_style: None }, len: 7 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: LineComment { doc_style: None }, len: 17 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: LineComment { doc_style: Some(Outer) }, len: 18 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: LineComment { doc_style: Some(Inner) }, len: 18 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: BlockComment { doc_style: None, terminated: true }, len: 11 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: BlockComment { doc_style: None, terminated: true }, len: 4 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: BlockComment { doc_style: None, terminated: true }, len: 18 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: BlockComment { doc_style: Some(Outer), terminated: true }, len: 22 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: BlockComment { doc_style: Some(Inner), terminated: true }, len: 22 }
+ Token { kind: Whitespace, len: 1 }
+ "#]],
+ )
+}
+
+#[test]
+fn nested_block_comments() {
+ check_lexing(
+ "/* /* */ */'a'",
+ expect![[r#"
+ Token { kind: BlockComment { doc_style: None, terminated: true }, len: 11 }
+ Token { kind: Literal { kind: Char { terminated: true }, suffix_start: 3 }, len: 3 }
+ "#]],
+ )
+}
+
+#[test]
+fn characters() {
+ check_lexing(
+ "'a' ' ' '\\n'",
+ expect![[r#"
+ Token { kind: Literal { kind: Char { terminated: true }, suffix_start: 3 }, len: 3 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Char { terminated: true }, suffix_start: 3 }, len: 3 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Char { terminated: true }, suffix_start: 4 }, len: 4 }
+ "#]],
+ );
+}
+
+#[test]
+fn lifetime() {
+ check_lexing(
+ "'abc",
+ expect![[r#"
+ Token { kind: Lifetime { starts_with_number: false }, len: 4 }
+ "#]],
+ );
+}
+
+#[test]
+fn raw_string() {
+ check_lexing(
+ "r###\"\"#a\\b\x00c\"\"###",
+ expect![[r#"
+ Token { kind: Literal { kind: RawStr { n_hashes: Some(3) }, suffix_start: 17 }, len: 17 }
+ "#]],
+ )
+}
+
+#[test]
+fn literal_suffixes() {
+ check_lexing(
+ r####"
+'a'
+b'a'
+"a"
+b"a"
+1234
+0b101
+0xABC
+1.0
+1.0e10
+2us
+r###"raw"###suffix
+br###"raw"###suffix
+"####,
+ expect![[r#"
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Char { terminated: true }, suffix_start: 3 }, len: 3 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Byte { terminated: true }, suffix_start: 4 }, len: 4 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Str { terminated: true }, suffix_start: 3 }, len: 3 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: ByteStr { terminated: true }, suffix_start: 4 }, len: 4 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Int { base: Decimal, empty_int: false }, suffix_start: 4 }, len: 4 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Int { base: Binary, empty_int: false }, suffix_start: 5 }, len: 5 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Int { base: Hexadecimal, empty_int: false }, suffix_start: 5 }, len: 5 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Float { base: Decimal, empty_exponent: false }, suffix_start: 3 }, len: 3 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Float { base: Decimal, empty_exponent: false }, suffix_start: 6 }, len: 6 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: Int { base: Decimal, empty_int: false }, suffix_start: 1 }, len: 3 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: RawStr { n_hashes: Some(3) }, suffix_start: 12 }, len: 18 }
+ Token { kind: Whitespace, len: 1 }
+ Token { kind: Literal { kind: RawByteStr { n_hashes: Some(3) }, suffix_start: 13 }, len: 19 }
+ Token { kind: Whitespace, len: 1 }
+ "#]],
+ )
+}
diff --git a/compiler/rustc_lexer/src/unescape.rs b/compiler/rustc_lexer/src/unescape.rs
new file mode 100644
index 000000000..3da6bc146
--- /dev/null
+++ b/compiler/rustc_lexer/src/unescape.rs
@@ -0,0 +1,377 @@
+//! Utilities for validating string and char literals and turning them into
+//! values they represent.
+
+use std::ops::Range;
+use std::str::Chars;
+
+#[cfg(test)]
+mod tests;
+
+/// Errors and warnings that can occur during string unescaping.
+#[derive(Debug, PartialEq, Eq)]
+pub enum EscapeError {
+ /// Expected 1 char, but 0 were found.
+ ZeroChars,
+ /// Expected 1 char, but more than 1 were found.
+ MoreThanOneChar,
+
+ /// Escaped '\' character without continuation.
+ LoneSlash,
+ /// Invalid escape character (e.g. '\z').
+ InvalidEscape,
+ /// Raw '\r' encountered.
+ BareCarriageReturn,
+ /// Raw '\r' encountered in raw string.
+ BareCarriageReturnInRawString,
+ /// Unescaped character that was expected to be escaped (e.g. raw '\t').
+ EscapeOnlyChar,
+
+ /// Numeric character escape is too short (e.g. '\x1').
+ TooShortHexEscape,
+ /// Invalid character in numeric escape (e.g. '\xz')
+ InvalidCharInHexEscape,
+ /// Character code in numeric escape is non-ascii (e.g. '\xFF').
+ OutOfRangeHexEscape,
+
+ /// '\u' not followed by '{'.
+ NoBraceInUnicodeEscape,
+ /// Non-hexadecimal value in '\u{..}'.
+ InvalidCharInUnicodeEscape,
+ /// '\u{}'
+ EmptyUnicodeEscape,
+ /// No closing brace in '\u{..}', e.g. '\u{12'.
+ UnclosedUnicodeEscape,
+ /// '\u{_12}'
+ LeadingUnderscoreUnicodeEscape,
+ /// More than 6 characters in '\u{..}', e.g. '\u{10FFFF_FF}'
+ OverlongUnicodeEscape,
+ /// Invalid in-bound unicode character code, e.g. '\u{DFFF}'.
+ LoneSurrogateUnicodeEscape,
+ /// Out of bounds unicode character code, e.g. '\u{FFFFFF}'.
+ OutOfRangeUnicodeEscape,
+
+ /// Unicode escape code in byte literal.
+ UnicodeEscapeInByte,
+ /// Non-ascii character in byte literal.
+ NonAsciiCharInByte,
+ /// Non-ascii character in byte string literal.
+ NonAsciiCharInByteString,
+
+ /// After a line ending with '\', the next line contains whitespace
+ /// characters that are not skipped.
+ UnskippedWhitespaceWarning,
+
+ /// After a line ending with '\', multiple lines are skipped.
+ MultipleSkippedLinesWarning,
+}
+
+impl EscapeError {
+ /// Returns true for actual errors, as opposed to warnings.
+ pub fn is_fatal(&self) -> bool {
+ !matches!(
+ self,
+ EscapeError::UnskippedWhitespaceWarning | EscapeError::MultipleSkippedLinesWarning
+ )
+ }
+}
+
+/// Takes a contents of a literal (without quotes) and produces a
+/// sequence of escaped characters or errors.
+/// Values are returned through invoking of the provided callback.
+pub fn unescape_literal<F>(literal_text: &str, mode: Mode, callback: &mut F)
+where
+ F: FnMut(Range<usize>, Result<char, EscapeError>),
+{
+ match mode {
+ Mode::Char | Mode::Byte => {
+ let mut chars = literal_text.chars();
+ let result = unescape_char_or_byte(&mut chars, mode);
+ // The Chars iterator moved forward.
+ callback(0..(literal_text.len() - chars.as_str().len()), result);
+ }
+ Mode::Str | Mode::ByteStr => unescape_str_or_byte_str(literal_text, mode, callback),
+ // NOTE: Raw strings do not perform any explicit character escaping, here we
+ // only translate CRLF to LF and produce errors on bare CR.
+ Mode::RawStr | Mode::RawByteStr => {
+ unescape_raw_str_or_byte_str(literal_text, mode, callback)
+ }
+ }
+}
+
+/// Takes a contents of a byte, byte string or raw byte string (without quotes)
+/// and produces a sequence of bytes or errors.
+/// Values are returned through invoking of the provided callback.
+pub fn unescape_byte_literal<F>(literal_text: &str, mode: Mode, callback: &mut F)
+where
+ F: FnMut(Range<usize>, Result<u8, EscapeError>),
+{
+ assert!(mode.is_bytes());
+ unescape_literal(literal_text, mode, &mut |range, result| {
+ callback(range, result.map(byte_from_char));
+ })
+}
+
+/// Takes a contents of a char literal (without quotes), and returns an
+/// unescaped char or an error
+pub fn unescape_char(literal_text: &str) -> Result<char, (usize, EscapeError)> {
+ let mut chars = literal_text.chars();
+ unescape_char_or_byte(&mut chars, Mode::Char)
+ .map_err(|err| (literal_text.len() - chars.as_str().len(), err))
+}
+
+/// Takes a contents of a byte literal (without quotes), and returns an
+/// unescaped byte or an error.
+pub fn unescape_byte(literal_text: &str) -> Result<u8, (usize, EscapeError)> {
+ let mut chars = literal_text.chars();
+ unescape_char_or_byte(&mut chars, Mode::Byte)
+ .map(byte_from_char)
+ .map_err(|err| (literal_text.len() - chars.as_str().len(), err))
+}
+
+/// What kind of literal do we parse.
+#[derive(Debug, Clone, Copy)]
+pub enum Mode {
+ Char,
+ Str,
+ Byte,
+ ByteStr,
+ RawStr,
+ RawByteStr,
+}
+
+impl Mode {
+ pub fn in_single_quotes(self) -> bool {
+ match self {
+ Mode::Char | Mode::Byte => true,
+ Mode::Str | Mode::ByteStr | Mode::RawStr | Mode::RawByteStr => false,
+ }
+ }
+
+ pub fn in_double_quotes(self) -> bool {
+ !self.in_single_quotes()
+ }
+
+ pub fn is_bytes(self) -> bool {
+ match self {
+ Mode::Byte | Mode::ByteStr | Mode::RawByteStr => true,
+ Mode::Char | Mode::Str | Mode::RawStr => false,
+ }
+ }
+}
+
+fn scan_escape(chars: &mut Chars<'_>, mode: Mode) -> Result<char, EscapeError> {
+ // Previous character was '\\', unescape what follows.
+
+ let second_char = chars.next().ok_or(EscapeError::LoneSlash)?;
+
+ let res = match second_char {
+ '"' => '"',
+ 'n' => '\n',
+ 'r' => '\r',
+ 't' => '\t',
+ '\\' => '\\',
+ '\'' => '\'',
+ '0' => '\0',
+
+ 'x' => {
+ // Parse hexadecimal character code.
+
+ let hi = chars.next().ok_or(EscapeError::TooShortHexEscape)?;
+ let hi = hi.to_digit(16).ok_or(EscapeError::InvalidCharInHexEscape)?;
+
+ let lo = chars.next().ok_or(EscapeError::TooShortHexEscape)?;
+ let lo = lo.to_digit(16).ok_or(EscapeError::InvalidCharInHexEscape)?;
+
+ let value = hi * 16 + lo;
+
+ // For a byte literal verify that it is within ASCII range.
+ if !mode.is_bytes() && !is_ascii(value) {
+ return Err(EscapeError::OutOfRangeHexEscape);
+ }
+ let value = value as u8;
+
+ value as char
+ }
+
+ 'u' => {
+ // We've parsed '\u', now we have to parse '{..}'.
+
+ if chars.next() != Some('{') {
+ return Err(EscapeError::NoBraceInUnicodeEscape);
+ }
+
+ // First character must be a hexadecimal digit.
+ let mut n_digits = 1;
+ let mut value: u32 = match chars.next().ok_or(EscapeError::UnclosedUnicodeEscape)? {
+ '_' => return Err(EscapeError::LeadingUnderscoreUnicodeEscape),
+ '}' => return Err(EscapeError::EmptyUnicodeEscape),
+ c => c.to_digit(16).ok_or(EscapeError::InvalidCharInUnicodeEscape)?,
+ };
+
+ // First character is valid, now parse the rest of the number
+ // and closing brace.
+ loop {
+ match chars.next() {
+ None => return Err(EscapeError::UnclosedUnicodeEscape),
+ Some('_') => continue,
+ Some('}') => {
+ if n_digits > 6 {
+ return Err(EscapeError::OverlongUnicodeEscape);
+ }
+
+ // Incorrect syntax has higher priority for error reporting
+ // than unallowed value for a literal.
+ if mode.is_bytes() {
+ return Err(EscapeError::UnicodeEscapeInByte);
+ }
+
+ break std::char::from_u32(value).ok_or_else(|| {
+ if value > 0x10FFFF {
+ EscapeError::OutOfRangeUnicodeEscape
+ } else {
+ EscapeError::LoneSurrogateUnicodeEscape
+ }
+ })?;
+ }
+ Some(c) => {
+ let digit =
+ c.to_digit(16).ok_or(EscapeError::InvalidCharInUnicodeEscape)?;
+ n_digits += 1;
+ if n_digits > 6 {
+ // Stop updating value since we're sure that it's incorrect already.
+ continue;
+ }
+ let digit = digit as u32;
+ value = value * 16 + digit;
+ }
+ };
+ }
+ }
+ _ => return Err(EscapeError::InvalidEscape),
+ };
+ Ok(res)
+}
+
+#[inline]
+fn ascii_check(first_char: char, mode: Mode) -> Result<char, EscapeError> {
+ if mode.is_bytes() && !first_char.is_ascii() {
+ // Byte literal can't be a non-ascii character.
+ Err(EscapeError::NonAsciiCharInByte)
+ } else {
+ Ok(first_char)
+ }
+}
+
+fn unescape_char_or_byte(chars: &mut Chars<'_>, mode: Mode) -> Result<char, EscapeError> {
+ let first_char = chars.next().ok_or(EscapeError::ZeroChars)?;
+ let res = match first_char {
+ '\\' => scan_escape(chars, mode),
+ '\n' | '\t' | '\'' => Err(EscapeError::EscapeOnlyChar),
+ '\r' => Err(EscapeError::BareCarriageReturn),
+ _ => ascii_check(first_char, mode),
+ }?;
+ if chars.next().is_some() {
+ return Err(EscapeError::MoreThanOneChar);
+ }
+ Ok(res)
+}
+
+/// Takes a contents of a string literal (without quotes) and produces a
+/// sequence of escaped characters or errors.
+fn unescape_str_or_byte_str<F>(src: &str, mode: Mode, callback: &mut F)
+where
+ F: FnMut(Range<usize>, Result<char, EscapeError>),
+{
+ assert!(mode.in_double_quotes());
+ let initial_len = src.len();
+ let mut chars = src.chars();
+ while let Some(first_char) = chars.next() {
+ let start = initial_len - chars.as_str().len() - first_char.len_utf8();
+
+ let unescaped_char = match first_char {
+ '\\' => {
+ let second_char = chars.clone().next();
+ match second_char {
+ Some('\n') => {
+ // Rust language specification requires us to skip whitespaces
+ // if unescaped '\' character is followed by '\n'.
+ // For details see [Rust language reference]
+ // (https://doc.rust-lang.org/reference/tokens.html#string-literals).
+ skip_ascii_whitespace(&mut chars, start, callback);
+ continue;
+ }
+ _ => scan_escape(&mut chars, mode),
+ }
+ }
+ '\n' => Ok('\n'),
+ '\t' => Ok('\t'),
+ '"' => Err(EscapeError::EscapeOnlyChar),
+ '\r' => Err(EscapeError::BareCarriageReturn),
+ _ => ascii_check(first_char, mode),
+ };
+ let end = initial_len - chars.as_str().len();
+ callback(start..end, unescaped_char);
+ }
+
+ fn skip_ascii_whitespace<F>(chars: &mut Chars<'_>, start: usize, callback: &mut F)
+ where
+ F: FnMut(Range<usize>, Result<char, EscapeError>),
+ {
+ let tail = chars.as_str();
+ let first_non_space = tail
+ .bytes()
+ .position(|b| b != b' ' && b != b'\t' && b != b'\n' && b != b'\r')
+ .unwrap_or(tail.len());
+ if tail[1..first_non_space].contains('\n') {
+ // The +1 accounts for the escaping slash.
+ let end = start + first_non_space + 1;
+ callback(start..end, Err(EscapeError::MultipleSkippedLinesWarning));
+ }
+ let tail = &tail[first_non_space..];
+ if let Some(c) = tail.chars().nth(0) {
+ // For error reporting, we would like the span to contain the character that was not
+ // skipped. The +1 is necessary to account for the leading \ that started the escape.
+ let end = start + first_non_space + c.len_utf8() + 1;
+ if c.is_whitespace() {
+ callback(start..end, Err(EscapeError::UnskippedWhitespaceWarning));
+ }
+ }
+ *chars = tail.chars();
+ }
+}
+
+/// Takes a contents of a string literal (without quotes) and produces a
+/// sequence of characters or errors.
+/// NOTE: Raw strings do not perform any explicit character escaping, here we
+/// only translate CRLF to LF and produce errors on bare CR.
+fn unescape_raw_str_or_byte_str<F>(literal_text: &str, mode: Mode, callback: &mut F)
+where
+ F: FnMut(Range<usize>, Result<char, EscapeError>),
+{
+ assert!(mode.in_double_quotes());
+ let initial_len = literal_text.len();
+
+ let mut chars = literal_text.chars();
+ while let Some(curr) = chars.next() {
+ let start = initial_len - chars.as_str().len() - curr.len_utf8();
+
+ let result = match curr {
+ '\r' => Err(EscapeError::BareCarriageReturnInRawString),
+ c if mode.is_bytes() && !c.is_ascii() => Err(EscapeError::NonAsciiCharInByteString),
+ c => Ok(c),
+ };
+ let end = initial_len - chars.as_str().len();
+
+ callback(start..end, result);
+ }
+}
+
+fn byte_from_char(c: char) -> u8 {
+ let res = c as u32;
+ assert!(res <= u8::MAX as u32, "guaranteed because of Mode::ByteStr");
+ res as u8
+}
+
+fn is_ascii(x: u32) -> bool {
+ x <= 0x7F
+}
diff --git a/compiler/rustc_lexer/src/unescape/tests.rs b/compiler/rustc_lexer/src/unescape/tests.rs
new file mode 100644
index 000000000..fa61554af
--- /dev/null
+++ b/compiler/rustc_lexer/src/unescape/tests.rs
@@ -0,0 +1,297 @@
+use super::*;
+
+#[test]
+fn test_unescape_char_bad() {
+ fn check(literal_text: &str, expected_error: EscapeError) {
+ let actual_result = unescape_char(literal_text).map_err(|(_offset, err)| err);
+ assert_eq!(actual_result, Err(expected_error));
+ }
+
+ check("", EscapeError::ZeroChars);
+ check(r"\", EscapeError::LoneSlash);
+
+ check("\n", EscapeError::EscapeOnlyChar);
+ check("\t", EscapeError::EscapeOnlyChar);
+ check("'", EscapeError::EscapeOnlyChar);
+ check("\r", EscapeError::BareCarriageReturn);
+
+ check("spam", EscapeError::MoreThanOneChar);
+ check(r"\x0ff", EscapeError::MoreThanOneChar);
+ check(r#"\"a"#, EscapeError::MoreThanOneChar);
+ check(r"\na", EscapeError::MoreThanOneChar);
+ check(r"\ra", EscapeError::MoreThanOneChar);
+ check(r"\ta", EscapeError::MoreThanOneChar);
+ check(r"\\a", EscapeError::MoreThanOneChar);
+ check(r"\'a", EscapeError::MoreThanOneChar);
+ check(r"\0a", EscapeError::MoreThanOneChar);
+ check(r"\u{0}x", EscapeError::MoreThanOneChar);
+ check(r"\u{1F63b}}", EscapeError::MoreThanOneChar);
+
+ check(r"\v", EscapeError::InvalidEscape);
+ check(r"\💩", EscapeError::InvalidEscape);
+ check(r"\●", EscapeError::InvalidEscape);
+ check("\\\r", EscapeError::InvalidEscape);
+
+ check(r"\x", EscapeError::TooShortHexEscape);
+ check(r"\x0", EscapeError::TooShortHexEscape);
+ check(r"\xf", EscapeError::TooShortHexEscape);
+ check(r"\xa", EscapeError::TooShortHexEscape);
+ check(r"\xx", EscapeError::InvalidCharInHexEscape);
+ check(r"\xы", EscapeError::InvalidCharInHexEscape);
+ check(r"\x🦀", EscapeError::InvalidCharInHexEscape);
+ check(r"\xtt", EscapeError::InvalidCharInHexEscape);
+ check(r"\xff", EscapeError::OutOfRangeHexEscape);
+ check(r"\xFF", EscapeError::OutOfRangeHexEscape);
+ check(r"\x80", EscapeError::OutOfRangeHexEscape);
+
+ check(r"\u", EscapeError::NoBraceInUnicodeEscape);
+ check(r"\u[0123]", EscapeError::NoBraceInUnicodeEscape);
+ check(r"\u{0x}", EscapeError::InvalidCharInUnicodeEscape);
+ check(r"\u{", EscapeError::UnclosedUnicodeEscape);
+ check(r"\u{0000", EscapeError::UnclosedUnicodeEscape);
+ check(r"\u{}", EscapeError::EmptyUnicodeEscape);
+ check(r"\u{_0000}", EscapeError::LeadingUnderscoreUnicodeEscape);
+ check(r"\u{0000000}", EscapeError::OverlongUnicodeEscape);
+ check(r"\u{FFFFFF}", EscapeError::OutOfRangeUnicodeEscape);
+ check(r"\u{ffffff}", EscapeError::OutOfRangeUnicodeEscape);
+ check(r"\u{ffffff}", EscapeError::OutOfRangeUnicodeEscape);
+
+ check(r"\u{DC00}", EscapeError::LoneSurrogateUnicodeEscape);
+ check(r"\u{DDDD}", EscapeError::LoneSurrogateUnicodeEscape);
+ check(r"\u{DFFF}", EscapeError::LoneSurrogateUnicodeEscape);
+
+ check(r"\u{D800}", EscapeError::LoneSurrogateUnicodeEscape);
+ check(r"\u{DAAA}", EscapeError::LoneSurrogateUnicodeEscape);
+ check(r"\u{DBFF}", EscapeError::LoneSurrogateUnicodeEscape);
+}
+
+#[test]
+fn test_unescape_char_good() {
+ fn check(literal_text: &str, expected_char: char) {
+ let actual_result = unescape_char(literal_text);
+ assert_eq!(actual_result, Ok(expected_char));
+ }
+
+ check("a", 'a');
+ check("ы", 'ы');
+ check("🦀", '🦀');
+
+ check(r#"\""#, '"');
+ check(r"\n", '\n');
+ check(r"\r", '\r');
+ check(r"\t", '\t');
+ check(r"\\", '\\');
+ check(r"\'", '\'');
+ check(r"\0", '\0');
+
+ check(r"\x00", '\0');
+ check(r"\x5a", 'Z');
+ check(r"\x5A", 'Z');
+ check(r"\x7f", 127 as char);
+
+ check(r"\u{0}", '\0');
+ check(r"\u{000000}", '\0');
+ check(r"\u{41}", 'A');
+ check(r"\u{0041}", 'A');
+ check(r"\u{00_41}", 'A');
+ check(r"\u{4__1__}", 'A');
+ check(r"\u{1F63b}", '😻');
+}
+
+#[test]
+fn test_unescape_str_warn() {
+ fn check(literal: &str, expected: &[(Range<usize>, Result<char, EscapeError>)]) {
+ let mut unescaped = Vec::with_capacity(literal.len());
+ unescape_literal(literal, Mode::Str, &mut |range, res| unescaped.push((range, res)));
+ assert_eq!(unescaped, expected);
+ }
+
+ // Check we can handle escaped newlines at the end of a file.
+ check("\\\n", &[]);
+ check("\\\n ", &[]);
+
+ check(
+ "\\\n \u{a0} x",
+ &[
+ (0..5, Err(EscapeError::UnskippedWhitespaceWarning)),
+ (3..5, Ok('\u{a0}')),
+ (5..6, Ok(' ')),
+ (6..7, Ok('x')),
+ ],
+ );
+ check("\\\n \n x", &[(0..7, Err(EscapeError::MultipleSkippedLinesWarning)), (7..8, Ok('x'))]);
+}
+
+#[test]
+fn test_unescape_str_good() {
+ fn check(literal_text: &str, expected: &str) {
+ let mut buf = Ok(String::with_capacity(literal_text.len()));
+ unescape_literal(literal_text, Mode::Str, &mut |range, c| {
+ if let Ok(b) = &mut buf {
+ match c {
+ Ok(c) => b.push(c),
+ Err(e) => buf = Err((range, e)),
+ }
+ }
+ });
+ let buf = buf.as_ref().map(|it| it.as_ref());
+ assert_eq!(buf, Ok(expected))
+ }
+
+ check("foo", "foo");
+ check("", "");
+ check(" \t\n", " \t\n");
+
+ check("hello \\\n world", "hello world");
+ check("thread's", "thread's")
+}
+
+#[test]
+fn test_unescape_byte_bad() {
+ fn check(literal_text: &str, expected_error: EscapeError) {
+ let actual_result = unescape_byte(literal_text).map_err(|(_offset, err)| err);
+ assert_eq!(actual_result, Err(expected_error));
+ }
+
+ check("", EscapeError::ZeroChars);
+ check(r"\", EscapeError::LoneSlash);
+
+ check("\n", EscapeError::EscapeOnlyChar);
+ check("\t", EscapeError::EscapeOnlyChar);
+ check("'", EscapeError::EscapeOnlyChar);
+ check("\r", EscapeError::BareCarriageReturn);
+
+ check("spam", EscapeError::MoreThanOneChar);
+ check(r"\x0ff", EscapeError::MoreThanOneChar);
+ check(r#"\"a"#, EscapeError::MoreThanOneChar);
+ check(r"\na", EscapeError::MoreThanOneChar);
+ check(r"\ra", EscapeError::MoreThanOneChar);
+ check(r"\ta", EscapeError::MoreThanOneChar);
+ check(r"\\a", EscapeError::MoreThanOneChar);
+ check(r"\'a", EscapeError::MoreThanOneChar);
+ check(r"\0a", EscapeError::MoreThanOneChar);
+
+ check(r"\v", EscapeError::InvalidEscape);
+ check(r"\💩", EscapeError::InvalidEscape);
+ check(r"\●", EscapeError::InvalidEscape);
+
+ check(r"\x", EscapeError::TooShortHexEscape);
+ check(r"\x0", EscapeError::TooShortHexEscape);
+ check(r"\xa", EscapeError::TooShortHexEscape);
+ check(r"\xf", EscapeError::TooShortHexEscape);
+ check(r"\xx", EscapeError::InvalidCharInHexEscape);
+ check(r"\xы", EscapeError::InvalidCharInHexEscape);
+ check(r"\x🦀", EscapeError::InvalidCharInHexEscape);
+ check(r"\xtt", EscapeError::InvalidCharInHexEscape);
+
+ check(r"\u", EscapeError::NoBraceInUnicodeEscape);
+ check(r"\u[0123]", EscapeError::NoBraceInUnicodeEscape);
+ check(r"\u{0x}", EscapeError::InvalidCharInUnicodeEscape);
+ check(r"\u{", EscapeError::UnclosedUnicodeEscape);
+ check(r"\u{0000", EscapeError::UnclosedUnicodeEscape);
+ check(r"\u{}", EscapeError::EmptyUnicodeEscape);
+ check(r"\u{_0000}", EscapeError::LeadingUnderscoreUnicodeEscape);
+ check(r"\u{0000000}", EscapeError::OverlongUnicodeEscape);
+
+ check("ы", EscapeError::NonAsciiCharInByte);
+ check("🦀", EscapeError::NonAsciiCharInByte);
+
+ check(r"\u{0}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{000000}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{41}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{0041}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{00_41}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{4__1__}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{1F63b}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{0}x", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{1F63b}}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{FFFFFF}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{ffffff}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{ffffff}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{DC00}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{DDDD}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{DFFF}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{D800}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{DAAA}", EscapeError::UnicodeEscapeInByte);
+ check(r"\u{DBFF}", EscapeError::UnicodeEscapeInByte);
+}
+
+#[test]
+fn test_unescape_byte_good() {
+ fn check(literal_text: &str, expected_byte: u8) {
+ let actual_result = unescape_byte(literal_text);
+ assert_eq!(actual_result, Ok(expected_byte));
+ }
+
+ check("a", b'a');
+
+ check(r#"\""#, b'"');
+ check(r"\n", b'\n');
+ check(r"\r", b'\r');
+ check(r"\t", b'\t');
+ check(r"\\", b'\\');
+ check(r"\'", b'\'');
+ check(r"\0", b'\0');
+
+ check(r"\x00", b'\0');
+ check(r"\x5a", b'Z');
+ check(r"\x5A", b'Z');
+ check(r"\x7f", 127);
+ check(r"\x80", 128);
+ check(r"\xff", 255);
+ check(r"\xFF", 255);
+}
+
+#[test]
+fn test_unescape_byte_str_good() {
+ fn check(literal_text: &str, expected: &[u8]) {
+ let mut buf = Ok(Vec::with_capacity(literal_text.len()));
+ unescape_byte_literal(literal_text, Mode::ByteStr, &mut |range, c| {
+ if let Ok(b) = &mut buf {
+ match c {
+ Ok(c) => b.push(c),
+ Err(e) => buf = Err((range, e)),
+ }
+ }
+ });
+ let buf = buf.as_ref().map(|it| it.as_ref());
+ assert_eq!(buf, Ok(expected))
+ }
+
+ check("foo", b"foo");
+ check("", b"");
+ check(" \t\n", b" \t\n");
+
+ check("hello \\\n world", b"hello world");
+ check("thread's", b"thread's")
+}
+
+#[test]
+fn test_unescape_raw_str() {
+ fn check(literal: &str, expected: &[(Range<usize>, Result<char, EscapeError>)]) {
+ let mut unescaped = Vec::with_capacity(literal.len());
+ unescape_literal(literal, Mode::RawStr, &mut |range, res| unescaped.push((range, res)));
+ assert_eq!(unescaped, expected);
+ }
+
+ check("\r", &[(0..1, Err(EscapeError::BareCarriageReturnInRawString))]);
+ check("\rx", &[(0..1, Err(EscapeError::BareCarriageReturnInRawString)), (1..2, Ok('x'))]);
+}
+
+#[test]
+fn test_unescape_raw_byte_str() {
+ fn check(literal: &str, expected: &[(Range<usize>, Result<u8, EscapeError>)]) {
+ let mut unescaped = Vec::with_capacity(literal.len());
+ unescape_byte_literal(literal, Mode::RawByteStr, &mut |range, res| {
+ unescaped.push((range, res))
+ });
+ assert_eq!(unescaped, expected);
+ }
+
+ check("\r", &[(0..1, Err(EscapeError::BareCarriageReturnInRawString))]);
+ check("🦀", &[(0..4, Err(EscapeError::NonAsciiCharInByteString))]);
+ check(
+ "🦀a",
+ &[(0..4, Err(EscapeError::NonAsciiCharInByteString)), (4..5, Ok(byte_from_char('a')))],
+ );
+}
diff --git a/compiler/rustc_lint/Cargo.toml b/compiler/rustc_lint/Cargo.toml
new file mode 100644
index 000000000..7c0f2c440
--- /dev/null
+++ b/compiler/rustc_lint/Cargo.toml
@@ -0,0 +1,25 @@
+[package]
+name = "rustc_lint"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+tracing = "0.1"
+unicode-security = "0.0.5"
+rustc_middle = { path = "../rustc_middle" }
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_target = { path = "../rustc_target" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_index = { path = "../rustc_index" }
+rustc_session = { path = "../rustc_session" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_parse_format = { path = "../rustc_parse_format" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_type_ir = { path = "../rustc_type_ir" }
+rustc_macros = { path = "../rustc_macros" }
diff --git a/compiler/rustc_lint/src/array_into_iter.rs b/compiler/rustc_lint/src/array_into_iter.rs
new file mode 100644
index 000000000..121fefdc6
--- /dev/null
+++ b/compiler/rustc_lint/src/array_into_iter.rs
@@ -0,0 +1,155 @@
+use crate::{LateContext, LateLintPass, LintContext};
+use rustc_errors::{fluent, Applicability};
+use rustc_hir as hir;
+use rustc_middle::ty;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment};
+use rustc_session::lint::FutureIncompatibilityReason;
+use rustc_span::edition::Edition;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+declare_lint! {
+ /// The `array_into_iter` lint detects calling `into_iter` on arrays.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,edition2018
+ /// # #![allow(unused)]
+ /// [1, 2, 3].into_iter().for_each(|n| { *n; });
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Since Rust 1.53, arrays implement `IntoIterator`. However, to avoid
+ /// breakage, `array.into_iter()` in Rust 2015 and 2018 code will still
+ /// behave as `(&array).into_iter()`, returning an iterator over
+ /// references, just like in Rust 1.52 and earlier.
+ /// This only applies to the method call syntax `array.into_iter()`, not to
+ /// any other syntax such as `for _ in array` or `IntoIterator::into_iter(array)`.
+ pub ARRAY_INTO_ITER,
+ Warn,
+ "detects calling `into_iter` on arrays in Rust 2015 and 2018",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/IntoIterator-for-arrays.html>",
+ reason: FutureIncompatibilityReason::EditionSemanticsChange(Edition::Edition2021),
+ };
+}
+
+#[derive(Copy, Clone, Default)]
+pub struct ArrayIntoIter {
+ for_expr_span: Span,
+}
+
+impl_lint_pass!(ArrayIntoIter => [ARRAY_INTO_ITER]);
+
+impl<'tcx> LateLintPass<'tcx> for ArrayIntoIter {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
+ // Save the span of expressions in `for _ in expr` syntax,
+ // so we can give a better suggestion for those later.
+ if let hir::ExprKind::Match(arg, [_], hir::MatchSource::ForLoopDesugar) = &expr.kind {
+ if let hir::ExprKind::Call(path, [arg]) = &arg.kind {
+ if let hir::ExprKind::Path(hir::QPath::LangItem(
+ hir::LangItem::IntoIterIntoIter,
+ ..,
+ )) = &path.kind
+ {
+ self.for_expr_span = arg.span;
+ }
+ }
+ }
+
+ // We only care about method call expressions.
+ if let hir::ExprKind::MethodCall(call, args, _) = &expr.kind {
+ if call.ident.name != sym::into_iter {
+ return;
+ }
+
+ // Check if the method call actually calls the libcore
+ // `IntoIterator::into_iter`.
+ let def_id = cx.typeck_results().type_dependent_def_id(expr.hir_id).unwrap();
+ match cx.tcx.trait_of_item(def_id) {
+ Some(trait_id) if cx.tcx.is_diagnostic_item(sym::IntoIterator, trait_id) => {}
+ _ => return,
+ };
+
+ // As this is a method call expression, we have at least one argument.
+ let receiver_arg = &args[0];
+ let receiver_ty = cx.typeck_results().expr_ty(receiver_arg);
+ let adjustments = cx.typeck_results().expr_adjustments(receiver_arg);
+
+ let Some(Adjustment { kind: Adjust::Borrow(_), target }) = adjustments.last() else {
+ return
+ };
+
+ let types =
+ std::iter::once(receiver_ty).chain(adjustments.iter().map(|adj| adj.target));
+
+ let mut found_array = false;
+
+ for ty in types {
+ match ty.kind() {
+ // If we run into a &[T; N] or &[T] first, there's nothing to warn about.
+ // It'll resolve to the reference version.
+ ty::Ref(_, inner_ty, _) if inner_ty.is_array() => return,
+ ty::Ref(_, inner_ty, _) if matches!(inner_ty.kind(), ty::Slice(..)) => return,
+ // Found an actual array type without matching a &[T; N] first.
+ // This is the problematic case.
+ ty::Array(..) => {
+ found_array = true;
+ break;
+ }
+ _ => {}
+ }
+ }
+
+ if !found_array {
+ return;
+ }
+
+ // Emit lint diagnostic.
+ let target = match *target.kind() {
+ ty::Ref(_, inner_ty, _) if inner_ty.is_array() => "[T; N]",
+ ty::Ref(_, inner_ty, _) if matches!(inner_ty.kind(), ty::Slice(..)) => "[T]",
+ // We know the original first argument type is an array type,
+ // we know that the first adjustment was an autoref coercion
+ // and we know that `IntoIterator` is the trait involved. The
+ // array cannot be coerced to something other than a reference
+ // to an array or to a slice.
+ _ => bug!("array type coerced to something other than array or slice"),
+ };
+ cx.struct_span_lint(ARRAY_INTO_ITER, call.ident.span, |lint| {
+ let mut diag = lint.build(fluent::lint::array_into_iter);
+ diag.set_arg("target", target);
+ diag.span_suggestion(
+ call.ident.span,
+ fluent::lint::use_iter_suggestion,
+ "iter",
+ Applicability::MachineApplicable,
+ );
+ if self.for_expr_span == expr.span {
+ diag.span_suggestion(
+ receiver_arg.span.shrink_to_hi().to(expr.span.shrink_to_hi()),
+ fluent::lint::remove_into_iter_suggestion,
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ } else if receiver_ty.is_array() {
+ diag.multipart_suggestion(
+ fluent::lint::use_explicit_into_iter_suggestion,
+ vec![
+ (expr.span.shrink_to_lo(), "IntoIterator::into_iter(".into()),
+ (
+ receiver_arg.span.shrink_to_hi().to(expr.span.shrink_to_hi()),
+ ")".into(),
+ ),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ diag.emit();
+ })
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs
new file mode 100644
index 000000000..bd58021f7
--- /dev/null
+++ b/compiler/rustc_lint/src/builtin.rs
@@ -0,0 +1,3172 @@
+//! Lints in the Rust compiler.
+//!
+//! This contains lints which can feasibly be implemented as their own
+//! AST visitor. Also see `rustc_session::lint::builtin`, which contains the
+//! definitions of lints that are emitted directly inside the main compiler.
+//!
+//! To add a new lint to rustc, declare it here using `declare_lint!()`.
+//! Then add code to emit the new lint in the appropriate circumstances.
+//! You can do that in an existing `LintPass` if it makes sense, or in a
+//! new `LintPass`, or using `Session::add_lint` elsewhere in the
+//! compiler. Only do the latter if the check can't be written cleanly as a
+//! `LintPass` (also, note that such lints will need to be defined in
+//! `rustc_session::lint::builtin`, not here).
+//!
+//! If you define a new `EarlyLintPass`, you will also need to add it to the
+//! `add_early_builtin!` or `add_early_builtin_with_new!` invocation in
+//! `lib.rs`. Use the former for unit-like structs and the latter for structs
+//! with a `pub fn new()`.
+//!
+//! If you define a new `LateLintPass`, you will also need to add it to the
+//! `late_lint_methods!` invocation in `lib.rs`.
+
+use crate::{
+ types::{transparent_newtype_field, CItemKind},
+ EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext,
+};
+use rustc_ast::attr;
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
+use rustc_ast::visit::{FnCtxt, FnKind};
+use rustc_ast::{self as ast, *};
+use rustc_ast_pretty::pprust::{self, expr_to_string};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::{
+ fluent, Applicability, Diagnostic, DiagnosticMessage, DiagnosticStyledString,
+ LintDiagnosticBuilder, MultiSpan,
+};
+use rustc_feature::{deprecated_attributes, AttributeGate, BuiltinAttribute, GateIssue, Stability};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalDefIdSet, CRATE_DEF_ID};
+use rustc_hir::{ForeignItemKind, GenericParamKind, HirId, PatKind, PredicateOrigin};
+use rustc_index::vec::Idx;
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty::layout::{LayoutError, LayoutOf};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::Instance;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::lint::{BuiltinLintDiagnostics, FutureIncompatibilityReason};
+use rustc_span::edition::Edition;
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{BytePos, InnerSpan, Span};
+use rustc_target::abi::VariantIdx;
+use rustc_trait_selection::traits::{self, misc::can_type_implement_copy};
+
+use crate::nonstandard_style::{method_context, MethodLateContext};
+
+use std::fmt::Write;
+use tracing::{debug, trace};
+
+// hardwired lints from librustc_middle
+pub use rustc_session::lint::builtin::*;
+
+declare_lint! {
+ /// The `while_true` lint detects `while true { }`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,no_run
+ /// while true {
+ ///
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// `while true` should be replaced with `loop`. A `loop` expression is
+ /// the preferred way to write an infinite loop because it more directly
+ /// expresses the intent of the loop.
+ WHILE_TRUE,
+ Warn,
+ "suggest using `loop { }` instead of `while true { }`"
+}
+
+declare_lint_pass!(WhileTrue => [WHILE_TRUE]);
+
+/// Traverse through any amount of parenthesis and return the first non-parens expression.
+fn pierce_parens(mut expr: &ast::Expr) -> &ast::Expr {
+ while let ast::ExprKind::Paren(sub) = &expr.kind {
+ expr = sub;
+ }
+ expr
+}
+
+impl EarlyLintPass for WhileTrue {
+ fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
+ if let ast::ExprKind::While(cond, _, label) = &e.kind {
+ if let ast::ExprKind::Lit(ref lit) = pierce_parens(cond).kind {
+ if let ast::LitKind::Bool(true) = lit.kind {
+ if !lit.span.from_expansion() {
+ let condition_span = e.span.with_hi(cond.span.hi());
+ cx.struct_span_lint(WHILE_TRUE, condition_span, |lint| {
+ lint.build(fluent::lint::builtin_while_true)
+ .span_suggestion_short(
+ condition_span,
+ fluent::lint::suggestion,
+ format!(
+ "{}loop",
+ label.map_or_else(String::new, |label| format!(
+ "{}: ",
+ label.ident,
+ ))
+ ),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ })
+ }
+ }
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `box_pointers` lints use of the Box type.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(box_pointers)]
+ /// struct Foo {
+ /// x: Box<isize>,
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint is mostly historical, and not particularly useful. `Box<T>`
+ /// used to be built into the language, and the only way to do heap
+ /// allocation. Today's Rust can call into other allocators, etc.
+ BOX_POINTERS,
+ Allow,
+ "use of owned (Box type) heap memory"
+}
+
+declare_lint_pass!(BoxPointers => [BOX_POINTERS]);
+
+impl BoxPointers {
+ fn check_heap_type(&self, cx: &LateContext<'_>, span: Span, ty: Ty<'_>) {
+ for leaf in ty.walk() {
+ if let GenericArgKind::Type(leaf_ty) = leaf.unpack() {
+ if leaf_ty.is_box() {
+ cx.struct_span_lint(BOX_POINTERS, span, |lint| {
+ lint.build(fluent::lint::builtin_box_pointers).set_arg("ty", ty).emit();
+ });
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for BoxPointers {
+ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ match it.kind {
+ hir::ItemKind::Fn(..)
+ | hir::ItemKind::TyAlias(..)
+ | hir::ItemKind::Enum(..)
+ | hir::ItemKind::Struct(..)
+ | hir::ItemKind::Union(..) => {
+ self.check_heap_type(cx, it.span, cx.tcx.type_of(it.def_id))
+ }
+ _ => (),
+ }
+
+ // If it's a struct, we also have to check the fields' types
+ match it.kind {
+ hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
+ for struct_field in struct_def.fields() {
+ let def_id = cx.tcx.hir().local_def_id(struct_field.hir_id);
+ self.check_heap_type(cx, struct_field.span, cx.tcx.type_of(def_id));
+ }
+ }
+ _ => (),
+ }
+ }
+
+ fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
+ let ty = cx.typeck_results().node_type(e.hir_id);
+ self.check_heap_type(cx, e.span, ty);
+ }
+}
+
+declare_lint! {
+ /// The `non_shorthand_field_patterns` lint detects using `Struct { x: x }`
+ /// instead of `Struct { x }` in a pattern.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// struct Point {
+ /// x: i32,
+ /// y: i32,
+ /// }
+ ///
+ ///
+ /// fn main() {
+ /// let p = Point {
+ /// x: 5,
+ /// y: 5,
+ /// };
+ ///
+ /// match p {
+ /// Point { x: x, y: y } => (),
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The preferred style is to avoid the repetition of specifying both the
+ /// field name and the binding name if both identifiers are the same.
+ NON_SHORTHAND_FIELD_PATTERNS,
+ Warn,
+ "using `Struct { x: x }` instead of `Struct { x }` in a pattern"
+}
+
+declare_lint_pass!(NonShorthandFieldPatterns => [NON_SHORTHAND_FIELD_PATTERNS]);
+
+impl<'tcx> LateLintPass<'tcx> for NonShorthandFieldPatterns {
+ fn check_pat(&mut self, cx: &LateContext<'_>, pat: &hir::Pat<'_>) {
+ if let PatKind::Struct(ref qpath, field_pats, _) = pat.kind {
+ let variant = cx
+ .typeck_results()
+ .pat_ty(pat)
+ .ty_adt_def()
+ .expect("struct pattern type is not an ADT")
+ .variant_of_res(cx.qpath_res(qpath, pat.hir_id));
+ for fieldpat in field_pats {
+ if fieldpat.is_shorthand {
+ continue;
+ }
+ if fieldpat.span.from_expansion() {
+ // Don't lint if this is a macro expansion: macro authors
+ // shouldn't have to worry about this kind of style issue
+ // (Issue #49588)
+ continue;
+ }
+ if let PatKind::Binding(binding_annot, _, ident, None) = fieldpat.pat.kind {
+ if cx.tcx.find_field_index(ident, &variant)
+ == Some(cx.tcx.field_index(fieldpat.hir_id, cx.typeck_results()))
+ {
+ cx.struct_span_lint(NON_SHORTHAND_FIELD_PATTERNS, fieldpat.span, |lint| {
+ let binding = match binding_annot {
+ hir::BindingAnnotation::Unannotated => None,
+ hir::BindingAnnotation::Mutable => Some("mut"),
+ hir::BindingAnnotation::Ref => Some("ref"),
+ hir::BindingAnnotation::RefMut => Some("ref mut"),
+ };
+ let suggested_ident = if let Some(binding) = binding {
+ format!("{} {}", binding, ident)
+ } else {
+ ident.to_string()
+ };
+ lint.build(fluent::lint::builtin_non_shorthand_field_patterns)
+ .set_arg("ident", ident.clone())
+ .span_suggestion(
+ fieldpat.span,
+ fluent::lint::suggestion,
+ suggested_ident,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+ }
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `unsafe_code` lint catches usage of `unsafe` code.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(unsafe_code)]
+ /// fn main() {
+ /// unsafe {
+ ///
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint is intended to restrict the usage of `unsafe`, which can be
+ /// difficult to use correctly.
+ UNSAFE_CODE,
+ Allow,
+ "usage of `unsafe` code"
+}
+
+declare_lint_pass!(UnsafeCode => [UNSAFE_CODE]);
+
+impl UnsafeCode {
+ fn report_unsafe(
+ &self,
+ cx: &EarlyContext<'_>,
+ span: Span,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ // This comes from a macro that has `#[allow_internal_unsafe]`.
+ if span.allows_unsafe() {
+ return;
+ }
+
+ cx.struct_span_lint(UNSAFE_CODE, span, decorate);
+ }
+
+ fn report_overridden_symbol_name(
+ &self,
+ cx: &EarlyContext<'_>,
+ span: Span,
+ msg: DiagnosticMessage,
+ ) {
+ self.report_unsafe(cx, span, |lint| {
+ lint.build(msg).note(fluent::lint::builtin_overridden_symbol_name).emit();
+ })
+ }
+
+ fn report_overridden_symbol_section(
+ &self,
+ cx: &EarlyContext<'_>,
+ span: Span,
+ msg: DiagnosticMessage,
+ ) {
+ self.report_unsafe(cx, span, |lint| {
+ lint.build(msg).note(fluent::lint::builtin_overridden_symbol_section).emit();
+ })
+ }
+}
+
+impl EarlyLintPass for UnsafeCode {
+ fn check_attribute(&mut self, cx: &EarlyContext<'_>, attr: &ast::Attribute) {
+ if attr.has_name(sym::allow_internal_unsafe) {
+ self.report_unsafe(cx, attr.span, |lint| {
+ lint.build(fluent::lint::builtin_allow_internal_unsafe).emit();
+ });
+ }
+ }
+
+ fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
+ if let ast::ExprKind::Block(ref blk, _) = e.kind {
+ // Don't warn about generated blocks; that'll just pollute the output.
+ if blk.rules == ast::BlockCheckMode::Unsafe(ast::UserProvided) {
+ self.report_unsafe(cx, blk.span, |lint| {
+ lint.build(fluent::lint::builtin_unsafe_block).emit();
+ });
+ }
+ }
+ }
+
+ fn check_item(&mut self, cx: &EarlyContext<'_>, it: &ast::Item) {
+ match it.kind {
+ ast::ItemKind::Trait(box ast::Trait { unsafety: ast::Unsafe::Yes(_), .. }) => self
+ .report_unsafe(cx, it.span, |lint| {
+ lint.build(fluent::lint::builtin_unsafe_trait).emit();
+ }),
+
+ ast::ItemKind::Impl(box ast::Impl { unsafety: ast::Unsafe::Yes(_), .. }) => self
+ .report_unsafe(cx, it.span, |lint| {
+ lint.build(fluent::lint::builtin_unsafe_impl).emit();
+ }),
+
+ ast::ItemKind::Fn(..) => {
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
+ self.report_overridden_symbol_name(
+ cx,
+ attr.span,
+ fluent::lint::builtin_no_mangle_fn,
+ );
+ }
+
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
+ self.report_overridden_symbol_name(
+ cx,
+ attr.span,
+ fluent::lint::builtin_export_name_fn,
+ );
+ }
+
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::link_section) {
+ self.report_overridden_symbol_section(
+ cx,
+ attr.span,
+ fluent::lint::builtin_link_section_fn,
+ );
+ }
+ }
+
+ ast::ItemKind::Static(..) => {
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
+ self.report_overridden_symbol_name(
+ cx,
+ attr.span,
+ fluent::lint::builtin_no_mangle_static,
+ );
+ }
+
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
+ self.report_overridden_symbol_name(
+ cx,
+ attr.span,
+ fluent::lint::builtin_export_name_static,
+ );
+ }
+
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::link_section) {
+ self.report_overridden_symbol_section(
+ cx,
+ attr.span,
+ fluent::lint::builtin_link_section_static,
+ );
+ }
+ }
+
+ _ => {}
+ }
+ }
+
+ fn check_impl_item(&mut self, cx: &EarlyContext<'_>, it: &ast::AssocItem) {
+ if let ast::AssocItemKind::Fn(..) = it.kind {
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::no_mangle) {
+ self.report_overridden_symbol_name(
+ cx,
+ attr.span,
+ fluent::lint::builtin_no_mangle_method,
+ );
+ }
+ if let Some(attr) = cx.sess().find_by_name(&it.attrs, sym::export_name) {
+ self.report_overridden_symbol_name(
+ cx,
+ attr.span,
+ fluent::lint::builtin_export_name_method,
+ );
+ }
+ }
+ }
+
+ fn check_fn(&mut self, cx: &EarlyContext<'_>, fk: FnKind<'_>, span: Span, _: ast::NodeId) {
+ if let FnKind::Fn(
+ ctxt,
+ _,
+ ast::FnSig { header: ast::FnHeader { unsafety: ast::Unsafe::Yes(_), .. }, .. },
+ _,
+ _,
+ body,
+ ) = fk
+ {
+ let msg = match ctxt {
+ FnCtxt::Foreign => return,
+ FnCtxt::Free => fluent::lint::builtin_decl_unsafe_fn,
+ FnCtxt::Assoc(_) if body.is_none() => fluent::lint::builtin_decl_unsafe_method,
+ FnCtxt::Assoc(_) => fluent::lint::builtin_impl_unsafe_method,
+ };
+ self.report_unsafe(cx, span, |lint| {
+ lint.build(msg).emit();
+ });
+ }
+ }
+}
+
+declare_lint! {
+ /// The `missing_docs` lint detects missing documentation for public items.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(missing_docs)]
+ /// pub fn foo() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint is intended to ensure that a library is well-documented.
+ /// Items without documentation can be difficult for users to understand
+ /// how to use properly.
+ ///
+ /// This lint is "allow" by default because it can be noisy, and not all
+ /// projects may want to enforce everything to be documented.
+ pub MISSING_DOCS,
+ Allow,
+ "detects missing documentation for public members",
+ report_in_external_macro
+}
+
+pub struct MissingDoc {
+ /// Stack of whether `#[doc(hidden)]` is set at each level which has lint attributes.
+ doc_hidden_stack: Vec<bool>,
+}
+
+impl_lint_pass!(MissingDoc => [MISSING_DOCS]);
+
+fn has_doc(attr: &ast::Attribute) -> bool {
+ if attr.is_doc_comment() {
+ return true;
+ }
+
+ if !attr.has_name(sym::doc) {
+ return false;
+ }
+
+ if attr.value_str().is_some() {
+ return true;
+ }
+
+ if let Some(list) = attr.meta_item_list() {
+ for meta in list {
+ if meta.has_name(sym::hidden) {
+ return true;
+ }
+ }
+ }
+
+ false
+}
+
+impl MissingDoc {
+ pub fn new() -> MissingDoc {
+ MissingDoc { doc_hidden_stack: vec![false] }
+ }
+
+ fn doc_hidden(&self) -> bool {
+ *self.doc_hidden_stack.last().expect("empty doc_hidden_stack")
+ }
+
+ fn check_missing_docs_attrs(
+ &self,
+ cx: &LateContext<'_>,
+ def_id: LocalDefId,
+ article: &'static str,
+ desc: &'static str,
+ ) {
+ // If we're building a test harness, then warning about
+ // documentation is probably not really relevant right now.
+ if cx.sess().opts.test {
+ return;
+ }
+
+ // `#[doc(hidden)]` disables missing_docs check.
+ if self.doc_hidden() {
+ return;
+ }
+
+ // Only check publicly-visible items, using the result from the privacy pass.
+ // It's an option so the crate root can also use this function (it doesn't
+ // have a `NodeId`).
+ if def_id != CRATE_DEF_ID {
+ if !cx.access_levels.is_exported(def_id) {
+ return;
+ }
+ }
+
+ let attrs = cx.tcx.hir().attrs(cx.tcx.hir().local_def_id_to_hir_id(def_id));
+ let has_doc = attrs.iter().any(has_doc);
+ if !has_doc {
+ cx.struct_span_lint(MISSING_DOCS, cx.tcx.def_span(def_id), |lint| {
+ lint.build(fluent::lint::builtin_missing_doc)
+ .set_arg("article", article)
+ .set_arg("desc", desc)
+ .emit();
+ });
+ }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for MissingDoc {
+ fn enter_lint_attrs(&mut self, _cx: &LateContext<'_>, attrs: &[ast::Attribute]) {
+ let doc_hidden = self.doc_hidden()
+ || attrs.iter().any(|attr| {
+ attr.has_name(sym::doc)
+ && match attr.meta_item_list() {
+ None => false,
+ Some(l) => attr::list_contains_name(&l, sym::hidden),
+ }
+ });
+ self.doc_hidden_stack.push(doc_hidden);
+ }
+
+ fn exit_lint_attrs(&mut self, _: &LateContext<'_>, _attrs: &[ast::Attribute]) {
+ self.doc_hidden_stack.pop().expect("empty doc_hidden_stack");
+ }
+
+ fn check_crate(&mut self, cx: &LateContext<'_>) {
+ self.check_missing_docs_attrs(cx, CRATE_DEF_ID, "the", "crate");
+ }
+
+ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ match it.kind {
+ hir::ItemKind::Trait(..) => {
+ // Issue #11592: traits are always considered exported, even when private.
+ if cx.tcx.visibility(it.def_id)
+ == ty::Visibility::Restricted(
+ cx.tcx.parent_module_from_def_id(it.def_id).to_def_id(),
+ )
+ {
+ return;
+ }
+ }
+ hir::ItemKind::TyAlias(..)
+ | hir::ItemKind::Fn(..)
+ | hir::ItemKind::Macro(..)
+ | hir::ItemKind::Mod(..)
+ | hir::ItemKind::Enum(..)
+ | hir::ItemKind::Struct(..)
+ | hir::ItemKind::Union(..)
+ | hir::ItemKind::Const(..)
+ | hir::ItemKind::Static(..) => {}
+
+ _ => return,
+ };
+
+ let (article, desc) = cx.tcx.article_and_description(it.def_id.to_def_id());
+
+ self.check_missing_docs_attrs(cx, it.def_id, article, desc);
+ }
+
+ fn check_trait_item(&mut self, cx: &LateContext<'_>, trait_item: &hir::TraitItem<'_>) {
+ let (article, desc) = cx.tcx.article_and_description(trait_item.def_id.to_def_id());
+
+ self.check_missing_docs_attrs(cx, trait_item.def_id, article, desc);
+ }
+
+ fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) {
+ // If the method is an impl for a trait, don't doc.
+ if method_context(cx, impl_item.hir_id()) == MethodLateContext::TraitImpl {
+ return;
+ }
+
+ // If the method is an impl for an item with docs_hidden, don't doc.
+ if method_context(cx, impl_item.hir_id()) == MethodLateContext::PlainImpl {
+ let parent = cx.tcx.hir().get_parent_item(impl_item.hir_id());
+ let impl_ty = cx.tcx.type_of(parent);
+ let outerdef = match impl_ty.kind() {
+ ty::Adt(def, _) => Some(def.did()),
+ ty::Foreign(def_id) => Some(*def_id),
+ _ => None,
+ };
+ let is_hidden = match outerdef {
+ Some(id) => cx.tcx.is_doc_hidden(id),
+ None => false,
+ };
+ if is_hidden {
+ return;
+ }
+ }
+
+ let (article, desc) = cx.tcx.article_and_description(impl_item.def_id.to_def_id());
+ self.check_missing_docs_attrs(cx, impl_item.def_id, article, desc);
+ }
+
+ fn check_foreign_item(&mut self, cx: &LateContext<'_>, foreign_item: &hir::ForeignItem<'_>) {
+ let (article, desc) = cx.tcx.article_and_description(foreign_item.def_id.to_def_id());
+ self.check_missing_docs_attrs(cx, foreign_item.def_id, article, desc);
+ }
+
+ fn check_field_def(&mut self, cx: &LateContext<'_>, sf: &hir::FieldDef<'_>) {
+ if !sf.is_positional() {
+ let def_id = cx.tcx.hir().local_def_id(sf.hir_id);
+ self.check_missing_docs_attrs(cx, def_id, "a", "struct field")
+ }
+ }
+
+ fn check_variant(&mut self, cx: &LateContext<'_>, v: &hir::Variant<'_>) {
+ self.check_missing_docs_attrs(cx, cx.tcx.hir().local_def_id(v.id), "a", "variant");
+ }
+}
+
+declare_lint! {
+ /// The `missing_copy_implementations` lint detects potentially-forgotten
+ /// implementations of [`Copy`].
+ ///
+ /// [`Copy`]: https://doc.rust-lang.org/std/marker/trait.Copy.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(missing_copy_implementations)]
+ /// pub struct Foo {
+ /// pub field: i32
+ /// }
+ /// # fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Historically (before 1.0), types were automatically marked as `Copy`
+ /// if possible. This was changed so that it required an explicit opt-in
+ /// by implementing the `Copy` trait. As part of this change, a lint was
+ /// added to alert if a copyable type was not marked `Copy`.
+ ///
+ /// This lint is "allow" by default because this code isn't bad; it is
+ /// common to write newtypes like this specifically so that a `Copy` type
+ /// is no longer `Copy`. `Copy` types can result in unintended copies of
+ /// large data which can impact performance.
+ pub MISSING_COPY_IMPLEMENTATIONS,
+ Allow,
+ "detects potentially-forgotten implementations of `Copy`"
+}
+
+declare_lint_pass!(MissingCopyImplementations => [MISSING_COPY_IMPLEMENTATIONS]);
+
+impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations {
+ fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
+ if !cx.access_levels.is_reachable(item.def_id) {
+ return;
+ }
+ let (def, ty) = match item.kind {
+ hir::ItemKind::Struct(_, ref ast_generics) => {
+ if !ast_generics.params.is_empty() {
+ return;
+ }
+ let def = cx.tcx.adt_def(item.def_id);
+ (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
+ }
+ hir::ItemKind::Union(_, ref ast_generics) => {
+ if !ast_generics.params.is_empty() {
+ return;
+ }
+ let def = cx.tcx.adt_def(item.def_id);
+ (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
+ }
+ hir::ItemKind::Enum(_, ref ast_generics) => {
+ if !ast_generics.params.is_empty() {
+ return;
+ }
+ let def = cx.tcx.adt_def(item.def_id);
+ (def, cx.tcx.mk_adt(def, cx.tcx.intern_substs(&[])))
+ }
+ _ => return,
+ };
+ if def.has_dtor(cx.tcx) {
+ return;
+ }
+ let param_env = ty::ParamEnv::empty();
+ if ty.is_copy_modulo_regions(cx.tcx.at(item.span), param_env) {
+ return;
+ }
+ if can_type_implement_copy(
+ cx.tcx,
+ param_env,
+ ty,
+ traits::ObligationCause::misc(item.span, item.hir_id()),
+ )
+ .is_ok()
+ {
+ cx.struct_span_lint(MISSING_COPY_IMPLEMENTATIONS, item.span, |lint| {
+ lint.build(fluent::lint::builtin_missing_copy_impl).emit();
+ })
+ }
+ }
+}
+
+declare_lint! {
+ /// The `missing_debug_implementations` lint detects missing
+ /// implementations of [`fmt::Debug`].
+ ///
+ /// [`fmt::Debug`]: https://doc.rust-lang.org/std/fmt/trait.Debug.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(missing_debug_implementations)]
+ /// pub struct Foo;
+ /// # fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Having a `Debug` implementation on all types can assist with
+ /// debugging, as it provides a convenient way to format and display a
+ /// value. Using the `#[derive(Debug)]` attribute will automatically
+ /// generate a typical implementation, or a custom implementation can be
+ /// added by manually implementing the `Debug` trait.
+ ///
+ /// This lint is "allow" by default because adding `Debug` to all types can
+ /// have a negative impact on compile time and code size. It also requires
+ /// boilerplate to be added to every type, which can be an impediment.
+ MISSING_DEBUG_IMPLEMENTATIONS,
+ Allow,
+ "detects missing implementations of Debug"
+}
+
+#[derive(Default)]
+pub struct MissingDebugImplementations {
+ impling_types: Option<LocalDefIdSet>,
+}
+
+impl_lint_pass!(MissingDebugImplementations => [MISSING_DEBUG_IMPLEMENTATIONS]);
+
+impl<'tcx> LateLintPass<'tcx> for MissingDebugImplementations {
+ fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
+ if !cx.access_levels.is_reachable(item.def_id) {
+ return;
+ }
+
+ match item.kind {
+ hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) | hir::ItemKind::Enum(..) => {}
+ _ => return,
+ }
+
+ let Some(debug) = cx.tcx.get_diagnostic_item(sym::Debug) else {
+ return
+ };
+
+ if self.impling_types.is_none() {
+ let mut impls = LocalDefIdSet::default();
+ cx.tcx.for_each_impl(debug, |d| {
+ if let Some(ty_def) = cx.tcx.type_of(d).ty_adt_def() {
+ if let Some(def_id) = ty_def.did().as_local() {
+ impls.insert(def_id);
+ }
+ }
+ });
+
+ self.impling_types = Some(impls);
+ debug!("{:?}", self.impling_types);
+ }
+
+ if !self.impling_types.as_ref().unwrap().contains(&item.def_id) {
+ cx.struct_span_lint(MISSING_DEBUG_IMPLEMENTATIONS, item.span, |lint| {
+ lint.build(fluent::lint::builtin_missing_debug_impl)
+ .set_arg("debug", cx.tcx.def_path_str(debug))
+ .emit();
+ });
+ }
+ }
+}
+
+declare_lint! {
+ /// The `anonymous_parameters` lint detects anonymous parameters in trait
+ /// definitions.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,edition2015,compile_fail
+ /// #![deny(anonymous_parameters)]
+ /// // edition 2015
+ /// pub trait Foo {
+ /// fn foo(usize);
+ /// }
+ /// fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This syntax is mostly a historical accident, and can be worked around
+ /// quite easily by adding an `_` pattern or a descriptive identifier:
+ ///
+ /// ```rust
+ /// trait Foo {
+ /// fn foo(_: usize);
+ /// }
+ /// ```
+ ///
+ /// This syntax is now a hard error in the 2018 edition. In the 2015
+ /// edition, this lint is "warn" by default. This lint
+ /// enables the [`cargo fix`] tool with the `--edition` flag to
+ /// automatically transition old code from the 2015 edition to 2018. The
+ /// tool will run this lint and automatically apply the
+ /// suggested fix from the compiler (which is to add `_` to each
+ /// parameter). This provides a completely automated way to update old
+ /// code for a new edition. See [issue #41686] for more details.
+ ///
+ /// [issue #41686]: https://github.com/rust-lang/rust/issues/41686
+ /// [`cargo fix`]: https://doc.rust-lang.org/cargo/commands/cargo-fix.html
+ pub ANONYMOUS_PARAMETERS,
+ Warn,
+ "detects anonymous parameters",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #41686 <https://github.com/rust-lang/rust/issues/41686>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2018),
+ };
+}
+
+declare_lint_pass!(
+ /// Checks for use of anonymous parameters (RFC 1685).
+ AnonymousParameters => [ANONYMOUS_PARAMETERS]
+);
+
+impl EarlyLintPass for AnonymousParameters {
+ fn check_trait_item(&mut self, cx: &EarlyContext<'_>, it: &ast::AssocItem) {
+ if cx.sess().edition() != Edition::Edition2015 {
+ // This is a hard error in future editions; avoid linting and erroring
+ return;
+ }
+ if let ast::AssocItemKind::Fn(box Fn { ref sig, .. }) = it.kind {
+ for arg in sig.decl.inputs.iter() {
+ if let ast::PatKind::Ident(_, ident, None) = arg.pat.kind {
+ if ident.name == kw::Empty {
+ cx.struct_span_lint(ANONYMOUS_PARAMETERS, arg.pat.span, |lint| {
+ let ty_snip = cx.sess().source_map().span_to_snippet(arg.ty.span);
+
+ let (ty_snip, appl) = if let Ok(ref snip) = ty_snip {
+ (snip.as_str(), Applicability::MachineApplicable)
+ } else {
+ ("<type>", Applicability::HasPlaceholders)
+ };
+
+ lint.build(fluent::lint::builtin_anonymous_params)
+ .span_suggestion(
+ arg.pat.span,
+ fluent::lint::suggestion,
+ format!("_: {}", ty_snip),
+ appl,
+ )
+ .emit();
+ })
+ }
+ }
+ }
+ }
+ }
+}
+
+/// Check for use of attributes which have been deprecated.
+#[derive(Clone)]
+pub struct DeprecatedAttr {
+ // This is not free to compute, so we want to keep it around, rather than
+ // compute it for every attribute.
+ depr_attrs: Vec<&'static BuiltinAttribute>,
+}
+
+impl_lint_pass!(DeprecatedAttr => []);
+
+impl DeprecatedAttr {
+ pub fn new() -> DeprecatedAttr {
+ DeprecatedAttr { depr_attrs: deprecated_attributes() }
+ }
+}
+
+impl EarlyLintPass for DeprecatedAttr {
+ fn check_attribute(&mut self, cx: &EarlyContext<'_>, attr: &ast::Attribute) {
+ for BuiltinAttribute { name, gate, .. } in &self.depr_attrs {
+ if attr.ident().map(|ident| ident.name) == Some(*name) {
+ if let &AttributeGate::Gated(
+ Stability::Deprecated(link, suggestion),
+ name,
+ reason,
+ _,
+ ) = gate
+ {
+ cx.struct_span_lint(DEPRECATED, attr.span, |lint| {
+ // FIXME(davidtwco) translatable deprecated attr
+ lint.build(fluent::lint::builtin_deprecated_attr_link)
+ .set_arg("name", name)
+ .set_arg("reason", reason)
+ .set_arg("link", link)
+ .span_suggestion_short(
+ attr.span,
+ suggestion.map(|s| s.into()).unwrap_or(
+ fluent::lint::builtin_deprecated_attr_default_suggestion,
+ ),
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+ return;
+ }
+ }
+ if attr.has_name(sym::no_start) || attr.has_name(sym::crate_id) {
+ cx.struct_span_lint(DEPRECATED, attr.span, |lint| {
+ lint.build(fluent::lint::builtin_deprecated_attr_used)
+ .set_arg("name", pprust::path_to_string(&attr.get_normal_item().path))
+ .span_suggestion_short(
+ attr.span,
+ fluent::lint::builtin_deprecated_attr_default_suggestion,
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+ }
+}
+
+fn warn_if_doc(cx: &EarlyContext<'_>, node_span: Span, node_kind: &str, attrs: &[ast::Attribute]) {
+ use rustc_ast::token::CommentKind;
+
+ let mut attrs = attrs.iter().peekable();
+
+ // Accumulate a single span for sugared doc comments.
+ let mut sugared_span: Option<Span> = None;
+
+ while let Some(attr) = attrs.next() {
+ let is_doc_comment = attr.is_doc_comment();
+ if is_doc_comment {
+ sugared_span =
+ Some(sugared_span.map_or(attr.span, |span| span.with_hi(attr.span.hi())));
+ }
+
+ if attrs.peek().map_or(false, |next_attr| next_attr.is_doc_comment()) {
+ continue;
+ }
+
+ let span = sugared_span.take().unwrap_or(attr.span);
+
+ if is_doc_comment || attr.has_name(sym::doc) {
+ cx.struct_span_lint(UNUSED_DOC_COMMENTS, span, |lint| {
+ let mut err = lint.build(fluent::lint::builtin_unused_doc_comment);
+ err.set_arg("kind", node_kind);
+ err.span_label(node_span, fluent::lint::label);
+ match attr.kind {
+ AttrKind::DocComment(CommentKind::Line, _) | AttrKind::Normal(..) => {
+ err.help(fluent::lint::plain_help);
+ }
+ AttrKind::DocComment(CommentKind::Block, _) => {
+ err.help(fluent::lint::block_help);
+ }
+ }
+ err.emit();
+ });
+ }
+ }
+}
+
+impl EarlyLintPass for UnusedDocComment {
+ fn check_stmt(&mut self, cx: &EarlyContext<'_>, stmt: &ast::Stmt) {
+ let kind = match stmt.kind {
+ ast::StmtKind::Local(..) => "statements",
+ // Disabled pending discussion in #78306
+ ast::StmtKind::Item(..) => return,
+ // expressions will be reported by `check_expr`.
+ ast::StmtKind::Empty
+ | ast::StmtKind::Semi(_)
+ | ast::StmtKind::Expr(_)
+ | ast::StmtKind::MacCall(_) => return,
+ };
+
+ warn_if_doc(cx, stmt.span, kind, stmt.kind.attrs());
+ }
+
+ fn check_arm(&mut self, cx: &EarlyContext<'_>, arm: &ast::Arm) {
+ let arm_span = arm.pat.span.with_hi(arm.body.span.hi());
+ warn_if_doc(cx, arm_span, "match arms", &arm.attrs);
+ }
+
+ fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &ast::Expr) {
+ warn_if_doc(cx, expr.span, "expressions", &expr.attrs);
+ }
+
+ fn check_generic_param(&mut self, cx: &EarlyContext<'_>, param: &ast::GenericParam) {
+ warn_if_doc(cx, param.ident.span, "generic parameters", &param.attrs);
+ }
+
+ fn check_block(&mut self, cx: &EarlyContext<'_>, block: &ast::Block) {
+ warn_if_doc(cx, block.span, "blocks", &block.attrs());
+ }
+
+ fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) {
+ if let ast::ItemKind::ForeignMod(_) = item.kind {
+ warn_if_doc(cx, item.span, "extern blocks", &item.attrs);
+ }
+ }
+}
+
+declare_lint! {
+ /// The `no_mangle_const_items` lint detects any `const` items with the
+ /// [`no_mangle` attribute].
+ ///
+ /// [`no_mangle` attribute]: https://doc.rust-lang.org/reference/abi.html#the-no_mangle-attribute
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #[no_mangle]
+ /// const FOO: i32 = 5;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Constants do not have their symbols exported, and therefore, this
+ /// probably means you meant to use a [`static`], not a [`const`].
+ ///
+ /// [`static`]: https://doc.rust-lang.org/reference/items/static-items.html
+ /// [`const`]: https://doc.rust-lang.org/reference/items/constant-items.html
+ NO_MANGLE_CONST_ITEMS,
+ Deny,
+ "const items will not have their symbols exported"
+}
+
+declare_lint! {
+ /// The `no_mangle_generic_items` lint detects generic items that must be
+ /// mangled.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #[no_mangle]
+ /// fn foo<T>(t: T) {
+ ///
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A function with generics must have its symbol mangled to accommodate
+ /// the generic parameter. The [`no_mangle` attribute] has no effect in
+ /// this situation, and should be removed.
+ ///
+ /// [`no_mangle` attribute]: https://doc.rust-lang.org/reference/abi.html#the-no_mangle-attribute
+ NO_MANGLE_GENERIC_ITEMS,
+ Warn,
+ "generic items must be mangled"
+}
+
+declare_lint_pass!(InvalidNoMangleItems => [NO_MANGLE_CONST_ITEMS, NO_MANGLE_GENERIC_ITEMS]);
+
+impl<'tcx> LateLintPass<'tcx> for InvalidNoMangleItems {
+ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ let attrs = cx.tcx.hir().attrs(it.hir_id());
+ let check_no_mangle_on_generic_fn = |no_mangle_attr: &ast::Attribute,
+ impl_generics: Option<&hir::Generics<'_>>,
+ generics: &hir::Generics<'_>,
+ span| {
+ for param in
+ generics.params.iter().chain(impl_generics.map(|g| g.params).into_iter().flatten())
+ {
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => {}
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
+ cx.struct_span_lint(NO_MANGLE_GENERIC_ITEMS, span, |lint| {
+ lint.build(fluent::lint::builtin_no_mangle_generic)
+ .span_suggestion_short(
+ no_mangle_attr.span,
+ fluent::lint::suggestion,
+ "",
+ // Use of `#[no_mangle]` suggests FFI intent; correct
+ // fix may be to monomorphize source by hand
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ });
+ break;
+ }
+ }
+ }
+ };
+ match it.kind {
+ hir::ItemKind::Fn(.., ref generics, _) => {
+ if let Some(no_mangle_attr) = cx.sess().find_by_name(attrs, sym::no_mangle) {
+ check_no_mangle_on_generic_fn(no_mangle_attr, None, generics, it.span);
+ }
+ }
+ hir::ItemKind::Const(..) => {
+ if cx.sess().contains_name(attrs, sym::no_mangle) {
+ // Const items do not refer to a particular location in memory, and therefore
+ // don't have anything to attach a symbol to
+ cx.struct_span_lint(NO_MANGLE_CONST_ITEMS, it.span, |lint| {
+ let mut err = lint.build(fluent::lint::builtin_const_no_mangle);
+
+ // account for "pub const" (#45562)
+ let start = cx
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(it.span)
+ .map(|snippet| snippet.find("const").unwrap_or(0))
+ .unwrap_or(0) as u32;
+ // `const` is 5 chars
+ let const_span = it.span.with_hi(BytePos(it.span.lo().0 + start + 5));
+ err.span_suggestion(
+ const_span,
+ fluent::lint::suggestion,
+ "pub static",
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ });
+ }
+ }
+ hir::ItemKind::Impl(hir::Impl { generics, items, .. }) => {
+ for it in *items {
+ if let hir::AssocItemKind::Fn { .. } = it.kind {
+ if let Some(no_mangle_attr) = cx
+ .sess()
+ .find_by_name(cx.tcx.hir().attrs(it.id.hir_id()), sym::no_mangle)
+ {
+ check_no_mangle_on_generic_fn(
+ no_mangle_attr,
+ Some(generics),
+ cx.tcx.hir().get_generics(it.id.def_id).unwrap(),
+ it.span,
+ );
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+}
+
+declare_lint! {
+ /// The `mutable_transmutes` lint catches transmuting from `&T` to `&mut
+ /// T` because it is [undefined behavior].
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// unsafe {
+ /// let y = std::mem::transmute::<&i32, &mut i32>(&5);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Certain assumptions are made about aliasing of data, and this transmute
+ /// violates those assumptions. Consider using [`UnsafeCell`] instead.
+ ///
+ /// [`UnsafeCell`]: https://doc.rust-lang.org/std/cell/struct.UnsafeCell.html
+ MUTABLE_TRANSMUTES,
+ Deny,
+ "transmuting &T to &mut T is undefined behavior, even if the reference is unused"
+}
+
+declare_lint_pass!(MutableTransmutes => [MUTABLE_TRANSMUTES]);
+
+impl<'tcx> LateLintPass<'tcx> for MutableTransmutes {
+ fn check_expr(&mut self, cx: &LateContext<'_>, expr: &hir::Expr<'_>) {
+ if let Some((&ty::Ref(_, _, from_mt), &ty::Ref(_, _, to_mt))) =
+ get_transmute_from_to(cx, expr).map(|(ty1, ty2)| (ty1.kind(), ty2.kind()))
+ {
+ if to_mt == hir::Mutability::Mut && from_mt == hir::Mutability::Not {
+ cx.struct_span_lint(MUTABLE_TRANSMUTES, expr.span, |lint| {
+ lint.build(fluent::lint::builtin_mutable_transmutes).emit();
+ });
+ }
+ }
+
+ fn get_transmute_from_to<'tcx>(
+ cx: &LateContext<'tcx>,
+ expr: &hir::Expr<'_>,
+ ) -> Option<(Ty<'tcx>, Ty<'tcx>)> {
+ let def = if let hir::ExprKind::Path(ref qpath) = expr.kind {
+ cx.qpath_res(qpath, expr.hir_id)
+ } else {
+ return None;
+ };
+ if let Res::Def(DefKind::Fn, did) = def {
+ if !def_id_is_transmute(cx, did) {
+ return None;
+ }
+ let sig = cx.typeck_results().node_type(expr.hir_id).fn_sig(cx.tcx);
+ let from = sig.inputs().skip_binder()[0];
+ let to = sig.output().skip_binder();
+ return Some((from, to));
+ }
+ None
+ }
+
+ fn def_id_is_transmute(cx: &LateContext<'_>, def_id: DefId) -> bool {
+ cx.tcx.is_intrinsic(def_id) && cx.tcx.item_name(def_id) == sym::transmute
+ }
+ }
+}
+
+declare_lint! {
+ /// The `unstable_features` is deprecated and should no longer be used.
+ UNSTABLE_FEATURES,
+ Allow,
+ "enabling unstable features (deprecated. do not use)"
+}
+
+declare_lint_pass!(
+ /// Forbids using the `#[feature(...)]` attribute
+ UnstableFeatures => [UNSTABLE_FEATURES]
+);
+
+impl<'tcx> LateLintPass<'tcx> for UnstableFeatures {
+ fn check_attribute(&mut self, cx: &LateContext<'_>, attr: &ast::Attribute) {
+ if attr.has_name(sym::feature) {
+ if let Some(items) = attr.meta_item_list() {
+ for item in items {
+ cx.struct_span_lint(UNSTABLE_FEATURES, item.span(), |lint| {
+ lint.build(fluent::lint::builtin_unstable_features).emit();
+ });
+ }
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `unreachable_pub` lint triggers for `pub` items not reachable from
+ /// the crate root.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(unreachable_pub)]
+ /// mod foo {
+ /// pub mod bar {
+ ///
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A bare `pub` visibility may be misleading if the item is not actually
+ /// publicly exported from the crate. The `pub(crate)` visibility is
+ /// recommended to be used instead, which more clearly expresses the intent
+ /// that the item is only visible within its own crate.
+ ///
+ /// This lint is "allow" by default because it will trigger for a large
+ /// amount existing Rust code, and has some false-positives. Eventually it
+ /// is desired for this to become warn-by-default.
+ pub UNREACHABLE_PUB,
+ Allow,
+ "`pub` items not reachable from crate root"
+}
+
+declare_lint_pass!(
+ /// Lint for items marked `pub` that aren't reachable from other crates.
+ UnreachablePub => [UNREACHABLE_PUB]
+);
+
+impl UnreachablePub {
+ fn perform_lint(
+ &self,
+ cx: &LateContext<'_>,
+ what: &str,
+ def_id: LocalDefId,
+ vis_span: Span,
+ exportable: bool,
+ ) {
+ let mut applicability = Applicability::MachineApplicable;
+ if cx.tcx.visibility(def_id).is_public() && !cx.access_levels.is_reachable(def_id) {
+ if vis_span.from_expansion() {
+ applicability = Applicability::MaybeIncorrect;
+ }
+ let def_span = cx.tcx.def_span(def_id);
+ cx.struct_span_lint(UNREACHABLE_PUB, def_span, |lint| {
+ let mut err = lint.build(fluent::lint::builtin_unreachable_pub);
+ err.set_arg("what", what);
+
+ err.span_suggestion(
+ vis_span,
+ fluent::lint::suggestion,
+ "pub(crate)",
+ applicability,
+ );
+ if exportable {
+ err.help(fluent::lint::help);
+ }
+ err.emit();
+ });
+ }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for UnreachablePub {
+ fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
+ // Do not warn for fake `use` statements.
+ if let hir::ItemKind::Use(_, hir::UseKind::ListStem) = &item.kind {
+ return;
+ }
+ self.perform_lint(cx, "item", item.def_id, item.vis_span, true);
+ }
+
+ fn check_foreign_item(&mut self, cx: &LateContext<'_>, foreign_item: &hir::ForeignItem<'tcx>) {
+ self.perform_lint(cx, "item", foreign_item.def_id, foreign_item.vis_span, true);
+ }
+
+ fn check_field_def(&mut self, cx: &LateContext<'_>, field: &hir::FieldDef<'_>) {
+ let def_id = cx.tcx.hir().local_def_id(field.hir_id);
+ self.perform_lint(cx, "field", def_id, field.vis_span, false);
+ }
+
+ fn check_impl_item(&mut self, cx: &LateContext<'_>, impl_item: &hir::ImplItem<'_>) {
+ // Only lint inherent impl items.
+ if cx.tcx.associated_item(impl_item.def_id).trait_item_def_id.is_none() {
+ self.perform_lint(cx, "item", impl_item.def_id, impl_item.vis_span, false);
+ }
+ }
+}
+
+declare_lint! {
+ /// The `type_alias_bounds` lint detects bounds in type aliases.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// type SendVec<T: Send> = Vec<T>;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The trait bounds in a type alias are currently ignored, and should not
+ /// be included to avoid confusion. This was previously allowed
+ /// unintentionally; this may become a hard error in the future.
+ TYPE_ALIAS_BOUNDS,
+ Warn,
+ "bounds in type aliases are not enforced"
+}
+
+declare_lint_pass!(
+ /// Lint for trait and lifetime bounds in type aliases being mostly ignored.
+ /// They are relevant when using associated types, but otherwise neither checked
+ /// at definition site nor enforced at use site.
+ TypeAliasBounds => [TYPE_ALIAS_BOUNDS]
+);
+
+impl TypeAliasBounds {
+ fn is_type_variable_assoc(qpath: &hir::QPath<'_>) -> bool {
+ match *qpath {
+ hir::QPath::TypeRelative(ref ty, _) => {
+ // If this is a type variable, we found a `T::Assoc`.
+ match ty.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => {
+ matches!(path.res, Res::Def(DefKind::TyParam, _))
+ }
+ _ => false,
+ }
+ }
+ hir::QPath::Resolved(..) | hir::QPath::LangItem(..) => false,
+ }
+ }
+
+ fn suggest_changing_assoc_types(ty: &hir::Ty<'_>, err: &mut Diagnostic) {
+ // Access to associates types should use `<T as Bound>::Assoc`, which does not need a
+ // bound. Let's see if this type does that.
+
+ // We use a HIR visitor to walk the type.
+ use rustc_hir::intravisit::{self, Visitor};
+ struct WalkAssocTypes<'a> {
+ err: &'a mut Diagnostic,
+ }
+ impl Visitor<'_> for WalkAssocTypes<'_> {
+ fn visit_qpath(&mut self, qpath: &hir::QPath<'_>, id: hir::HirId, span: Span) {
+ if TypeAliasBounds::is_type_variable_assoc(qpath) {
+ self.err.span_help(span, fluent::lint::builtin_type_alias_bounds_help);
+ }
+ intravisit::walk_qpath(self, qpath, id, span)
+ }
+ }
+
+ // Let's go for a walk!
+ let mut visitor = WalkAssocTypes { err };
+ visitor.visit_ty(ty);
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for TypeAliasBounds {
+ fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) {
+ let hir::ItemKind::TyAlias(ty, type_alias_generics) = &item.kind else {
+ return
+ };
+ if let hir::TyKind::OpaqueDef(..) = ty.kind {
+ // Bounds are respected for `type X = impl Trait`
+ return;
+ }
+ // There must not be a where clause
+ if type_alias_generics.predicates.is_empty() {
+ return;
+ }
+
+ let mut where_spans = Vec::new();
+ let mut inline_spans = Vec::new();
+ let mut inline_sugg = Vec::new();
+ for p in type_alias_generics.predicates {
+ let span = p.span();
+ if p.in_where_clause() {
+ where_spans.push(span);
+ } else {
+ for b in p.bounds() {
+ inline_spans.push(b.span());
+ }
+ inline_sugg.push((span, String::new()));
+ }
+ }
+
+ let mut suggested_changing_assoc_types = false;
+ if !where_spans.is_empty() {
+ cx.lint(TYPE_ALIAS_BOUNDS, |lint| {
+ let mut err = lint.build(fluent::lint::builtin_type_alias_where_clause);
+ err.set_span(where_spans);
+ err.span_suggestion(
+ type_alias_generics.where_clause_span,
+ fluent::lint::suggestion,
+ "",
+ Applicability::MachineApplicable,
+ );
+ if !suggested_changing_assoc_types {
+ TypeAliasBounds::suggest_changing_assoc_types(ty, &mut err);
+ suggested_changing_assoc_types = true;
+ }
+ err.emit();
+ });
+ }
+
+ if !inline_spans.is_empty() {
+ cx.lint(TYPE_ALIAS_BOUNDS, |lint| {
+ let mut err = lint.build(fluent::lint::builtin_type_alias_generic_bounds);
+ err.set_span(inline_spans);
+ err.multipart_suggestion(
+ fluent::lint::suggestion,
+ inline_sugg,
+ Applicability::MachineApplicable,
+ );
+ if !suggested_changing_assoc_types {
+ TypeAliasBounds::suggest_changing_assoc_types(ty, &mut err);
+ }
+ err.emit();
+ });
+ }
+ }
+}
+
+declare_lint_pass!(
+ /// Lint constants that are erroneous.
+ /// Without this lint, we might not get any diagnostic if the constant is
+ /// unused within this crate, even though downstream crates can't use it
+ /// without producing an error.
+ UnusedBrokenConst => []
+);
+
+impl<'tcx> LateLintPass<'tcx> for UnusedBrokenConst {
+ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ match it.kind {
+ hir::ItemKind::Const(_, body_id) => {
+ let def_id = cx.tcx.hir().body_owner_def_id(body_id).to_def_id();
+ // trigger the query once for all constants since that will already report the errors
+ cx.tcx.ensure().const_eval_poly(def_id);
+ }
+ hir::ItemKind::Static(_, _, body_id) => {
+ let def_id = cx.tcx.hir().body_owner_def_id(body_id).to_def_id();
+ cx.tcx.ensure().eval_static_initializer(def_id);
+ }
+ _ => {}
+ }
+ }
+}
+
+declare_lint! {
+ /// The `trivial_bounds` lint detects trait bounds that don't depend on
+ /// any type parameters.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(trivial_bounds)]
+ /// pub struct A where i32: Copy;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Usually you would not write a trait bound that you know is always
+ /// true, or never true. However, when using macros, the macro may not
+ /// know whether or not the constraint would hold or not at the time when
+ /// generating the code. Currently, the compiler does not alert you if the
+ /// constraint is always true, and generates an error if it is never true.
+ /// The `trivial_bounds` feature changes this to be a warning in both
+ /// cases, giving macros more freedom and flexibility to generate code,
+ /// while still providing a signal when writing non-macro code that
+ /// something is amiss.
+ ///
+ /// See [RFC 2056] for more details. This feature is currently only
+ /// available on the nightly channel, see [tracking issue #48214].
+ ///
+ /// [RFC 2056]: https://github.com/rust-lang/rfcs/blob/master/text/2056-allow-trivial-where-clause-constraints.md
+ /// [tracking issue #48214]: https://github.com/rust-lang/rust/issues/48214
+ TRIVIAL_BOUNDS,
+ Warn,
+ "these bounds don't depend on an type parameters"
+}
+
+declare_lint_pass!(
+ /// Lint for trait and lifetime bounds that don't depend on type parameters
+ /// which either do nothing, or stop the item from being used.
+ TrivialConstraints => [TRIVIAL_BOUNDS]
+);
+
+impl<'tcx> LateLintPass<'tcx> for TrivialConstraints {
+ fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
+ use rustc_middle::ty::visit::TypeVisitable;
+ use rustc_middle::ty::PredicateKind::*;
+
+ if cx.tcx.features().trivial_bounds {
+ let predicates = cx.tcx.predicates_of(item.def_id);
+ for &(predicate, span) in predicates.predicates {
+ let predicate_kind_name = match predicate.kind().skip_binder() {
+ Trait(..) => "trait",
+ TypeOutlives(..) |
+ RegionOutlives(..) => "lifetime",
+
+ // Ignore projections, as they can only be global
+ // if the trait bound is global
+ Projection(..) |
+ // Ignore bounds that a user can't type
+ WellFormed(..) |
+ ObjectSafe(..) |
+ ClosureKind(..) |
+ Subtype(..) |
+ Coerce(..) |
+ ConstEvaluatable(..) |
+ ConstEquate(..) |
+ TypeWellFormedFromEnv(..) => continue,
+ };
+ if predicate.is_global() {
+ cx.struct_span_lint(TRIVIAL_BOUNDS, span, |lint| {
+ lint.build(fluent::lint::builtin_trivial_bounds)
+ .set_arg("predicate_kind_name", predicate_kind_name)
+ .set_arg("predicate", predicate)
+ .emit();
+ });
+ }
+ }
+ }
+ }
+}
+
+declare_lint_pass!(
+ /// Does nothing as a lint pass, but registers some `Lint`s
+ /// which are used by other parts of the compiler.
+ SoftLints => [
+ WHILE_TRUE,
+ BOX_POINTERS,
+ NON_SHORTHAND_FIELD_PATTERNS,
+ UNSAFE_CODE,
+ MISSING_DOCS,
+ MISSING_COPY_IMPLEMENTATIONS,
+ MISSING_DEBUG_IMPLEMENTATIONS,
+ ANONYMOUS_PARAMETERS,
+ UNUSED_DOC_COMMENTS,
+ NO_MANGLE_CONST_ITEMS,
+ NO_MANGLE_GENERIC_ITEMS,
+ MUTABLE_TRANSMUTES,
+ UNSTABLE_FEATURES,
+ UNREACHABLE_PUB,
+ TYPE_ALIAS_BOUNDS,
+ TRIVIAL_BOUNDS
+ ]
+);
+
+declare_lint! {
+ /// The `ellipsis_inclusive_range_patterns` lint detects the [`...` range
+ /// pattern], which is deprecated.
+ ///
+ /// [`...` range pattern]: https://doc.rust-lang.org/reference/patterns.html#range-patterns
+ ///
+ /// ### Example
+ ///
+ /// ```rust,edition2018
+ /// let x = 123;
+ /// match x {
+ /// 0...100 => {}
+ /// _ => {}
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The `...` range pattern syntax was changed to `..=` to avoid potential
+ /// confusion with the [`..` range expression]. Use the new form instead.
+ ///
+ /// [`..` range expression]: https://doc.rust-lang.org/reference/expressions/range-expr.html
+ pub ELLIPSIS_INCLUSIVE_RANGE_PATTERNS,
+ Warn,
+ "`...` range patterns are deprecated",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/warnings-promoted-to-error.html>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ };
+}
+
+#[derive(Default)]
+pub struct EllipsisInclusiveRangePatterns {
+ /// If `Some(_)`, suppress all subsequent pattern
+ /// warnings for better diagnostics.
+ node_id: Option<ast::NodeId>,
+}
+
+impl_lint_pass!(EllipsisInclusiveRangePatterns => [ELLIPSIS_INCLUSIVE_RANGE_PATTERNS]);
+
+impl EarlyLintPass for EllipsisInclusiveRangePatterns {
+ fn check_pat(&mut self, cx: &EarlyContext<'_>, pat: &ast::Pat) {
+ if self.node_id.is_some() {
+ // Don't recursively warn about patterns inside range endpoints.
+ return;
+ }
+
+ use self::ast::{PatKind, RangeSyntax::DotDotDot};
+
+ /// If `pat` is a `...` pattern, return the start and end of the range, as well as the span
+ /// corresponding to the ellipsis.
+ fn matches_ellipsis_pat(pat: &ast::Pat) -> Option<(Option<&Expr>, &Expr, Span)> {
+ match &pat.kind {
+ PatKind::Range(
+ a,
+ Some(b),
+ Spanned { span, node: RangeEnd::Included(DotDotDot) },
+ ) => Some((a.as_deref(), b, *span)),
+ _ => None,
+ }
+ }
+
+ let (parenthesise, endpoints) = match &pat.kind {
+ PatKind::Ref(subpat, _) => (true, matches_ellipsis_pat(&subpat)),
+ _ => (false, matches_ellipsis_pat(pat)),
+ };
+
+ if let Some((start, end, join)) = endpoints {
+ let msg = fluent::lint::builtin_ellipsis_inclusive_range_patterns;
+ let suggestion = fluent::lint::suggestion;
+ if parenthesise {
+ self.node_id = Some(pat.id);
+ let end = expr_to_string(&end);
+ let replace = match start {
+ Some(start) => format!("&({}..={})", expr_to_string(&start), end),
+ None => format!("&(..={})", end),
+ };
+ if join.edition() >= Edition::Edition2021 {
+ let mut err = cx.sess().struct_span_err_with_code(
+ pat.span,
+ msg,
+ rustc_errors::error_code!(E0783),
+ );
+ err.span_suggestion(
+ pat.span,
+ suggestion,
+ replace,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ } else {
+ cx.struct_span_lint(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, pat.span, |lint| {
+ lint.build(msg)
+ .span_suggestion(
+ pat.span,
+ suggestion,
+ replace,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+ } else {
+ let replace = "..=";
+ if join.edition() >= Edition::Edition2021 {
+ let mut err = cx.sess().struct_span_err_with_code(
+ pat.span,
+ msg,
+ rustc_errors::error_code!(E0783),
+ );
+ err.span_suggestion_short(
+ join,
+ suggestion,
+ replace,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ } else {
+ cx.struct_span_lint(ELLIPSIS_INCLUSIVE_RANGE_PATTERNS, join, |lint| {
+ lint.build(msg)
+ .span_suggestion_short(
+ join,
+ suggestion,
+ replace,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+ };
+ }
+ }
+
+ fn check_pat_post(&mut self, _cx: &EarlyContext<'_>, pat: &ast::Pat) {
+ if let Some(node_id) = self.node_id {
+ if pat.id == node_id {
+ self.node_id = None
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `unnameable_test_items` lint detects [`#[test]`][test] functions
+ /// that are not able to be run by the test harness because they are in a
+ /// position where they are not nameable.
+ ///
+ /// [test]: https://doc.rust-lang.org/reference/attributes/testing.html#the-test-attribute
+ ///
+ /// ### Example
+ ///
+ /// ```rust,test
+ /// fn main() {
+ /// #[test]
+ /// fn foo() {
+ /// // This test will not fail because it does not run.
+ /// assert_eq!(1, 2);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In order for the test harness to run a test, the test function must be
+ /// located in a position where it can be accessed from the crate root.
+ /// This generally means it must be defined in a module, and not anywhere
+ /// else such as inside another function. The compiler previously allowed
+ /// this without an error, so a lint was added as an alert that a test is
+ /// not being used. Whether or not this should be allowed has not yet been
+ /// decided, see [RFC 2471] and [issue #36629].
+ ///
+ /// [RFC 2471]: https://github.com/rust-lang/rfcs/pull/2471#issuecomment-397414443
+ /// [issue #36629]: https://github.com/rust-lang/rust/issues/36629
+ UNNAMEABLE_TEST_ITEMS,
+ Warn,
+ "detects an item that cannot be named being marked as `#[test_case]`",
+ report_in_external_macro
+}
+
+pub struct UnnameableTestItems {
+ boundary: Option<LocalDefId>, // Id of the item under which things are not nameable
+ items_nameable: bool,
+}
+
+impl_lint_pass!(UnnameableTestItems => [UNNAMEABLE_TEST_ITEMS]);
+
+impl UnnameableTestItems {
+ pub fn new() -> Self {
+ Self { boundary: None, items_nameable: true }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for UnnameableTestItems {
+ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ if self.items_nameable {
+ if let hir::ItemKind::Mod(..) = it.kind {
+ } else {
+ self.items_nameable = false;
+ self.boundary = Some(it.def_id);
+ }
+ return;
+ }
+
+ let attrs = cx.tcx.hir().attrs(it.hir_id());
+ if let Some(attr) = cx.sess().find_by_name(attrs, sym::rustc_test_marker) {
+ cx.struct_span_lint(UNNAMEABLE_TEST_ITEMS, attr.span, |lint| {
+ lint.build(fluent::lint::builtin_unnameable_test_items).emit();
+ });
+ }
+ }
+
+ fn check_item_post(&mut self, _cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ if !self.items_nameable && self.boundary == Some(it.def_id) {
+ self.items_nameable = true;
+ }
+ }
+}
+
+declare_lint! {
+ /// The `keyword_idents` lint detects edition keywords being used as an
+ /// identifier.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,edition2015,compile_fail
+ /// #![deny(keyword_idents)]
+ /// // edition 2015
+ /// fn dyn() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Rust [editions] allow the language to evolve without breaking
+ /// backwards compatibility. This lint catches code that uses new keywords
+ /// that are added to the language that are used as identifiers (such as a
+ /// variable name, function name, etc.). If you switch the compiler to a
+ /// new edition without updating the code, then it will fail to compile if
+ /// you are using a new keyword as an identifier.
+ ///
+ /// You can manually change the identifiers to a non-keyword, or use a
+ /// [raw identifier], for example `r#dyn`, to transition to a new edition.
+ ///
+ /// This lint solves the problem automatically. It is "allow" by default
+ /// because the code is perfectly valid in older editions. The [`cargo
+ /// fix`] tool with the `--edition` flag will switch this lint to "warn"
+ /// and automatically apply the suggested fix from the compiler (which is
+ /// to use a raw identifier). This provides a completely automated way to
+ /// update old code for a new edition.
+ ///
+ /// [editions]: https://doc.rust-lang.org/edition-guide/
+ /// [raw identifier]: https://doc.rust-lang.org/reference/identifiers.html
+ /// [`cargo fix`]: https://doc.rust-lang.org/cargo/commands/cargo-fix.html
+ pub KEYWORD_IDENTS,
+ Allow,
+ "detects edition keywords being used as an identifier",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #49716 <https://github.com/rust-lang/rust/issues/49716>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2018),
+ };
+}
+
+declare_lint_pass!(
+ /// Check for uses of edition keywords used as an identifier.
+ KeywordIdents => [KEYWORD_IDENTS]
+);
+
+struct UnderMacro(bool);
+
+impl KeywordIdents {
+ fn check_tokens(&mut self, cx: &EarlyContext<'_>, tokens: TokenStream) {
+ for tt in tokens.into_trees() {
+ match tt {
+ // Only report non-raw idents.
+ TokenTree::Token(token, _) => {
+ if let Some((ident, false)) = token.ident() {
+ self.check_ident_token(cx, UnderMacro(true), ident);
+ }
+ }
+ TokenTree::Delimited(_, _, tts) => self.check_tokens(cx, tts),
+ }
+ }
+ }
+
+ fn check_ident_token(
+ &mut self,
+ cx: &EarlyContext<'_>,
+ UnderMacro(under_macro): UnderMacro,
+ ident: Ident,
+ ) {
+ let next_edition = match cx.sess().edition() {
+ Edition::Edition2015 => {
+ match ident.name {
+ kw::Async | kw::Await | kw::Try => Edition::Edition2018,
+
+ // rust-lang/rust#56327: Conservatively do not
+ // attempt to report occurrences of `dyn` within
+ // macro definitions or invocations, because `dyn`
+ // can legitimately occur as a contextual keyword
+ // in 2015 code denoting its 2018 meaning, and we
+ // do not want rustfix to inject bugs into working
+ // code by rewriting such occurrences.
+ //
+ // But if we see `dyn` outside of a macro, we know
+ // its precise role in the parsed AST and thus are
+ // assured this is truly an attempt to use it as
+ // an identifier.
+ kw::Dyn if !under_macro => Edition::Edition2018,
+
+ _ => return,
+ }
+ }
+
+ // There are no new keywords yet for the 2018 edition and beyond.
+ _ => return,
+ };
+
+ // Don't lint `r#foo`.
+ if cx.sess().parse_sess.raw_identifier_spans.borrow().contains(&ident.span) {
+ return;
+ }
+
+ cx.struct_span_lint(KEYWORD_IDENTS, ident.span, |lint| {
+ lint.build(fluent::lint::builtin_keyword_idents)
+ .set_arg("kw", ident.clone())
+ .set_arg("next", next_edition)
+ .span_suggestion(
+ ident.span,
+ fluent::lint::suggestion,
+ format!("r#{}", ident),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+}
+
+impl EarlyLintPass for KeywordIdents {
+ fn check_mac_def(&mut self, cx: &EarlyContext<'_>, mac_def: &ast::MacroDef, _id: ast::NodeId) {
+ self.check_tokens(cx, mac_def.body.inner_tokens());
+ }
+ fn check_mac(&mut self, cx: &EarlyContext<'_>, mac: &ast::MacCall) {
+ self.check_tokens(cx, mac.args.inner_tokens());
+ }
+ fn check_ident(&mut self, cx: &EarlyContext<'_>, ident: Ident) {
+ self.check_ident_token(cx, UnderMacro(false), ident);
+ }
+}
+
+declare_lint_pass!(ExplicitOutlivesRequirements => [EXPLICIT_OUTLIVES_REQUIREMENTS]);
+
+impl ExplicitOutlivesRequirements {
+ fn lifetimes_outliving_lifetime<'tcx>(
+ inferred_outlives: &'tcx [(ty::Predicate<'tcx>, Span)],
+ index: u32,
+ ) -> Vec<ty::Region<'tcx>> {
+ inferred_outlives
+ .iter()
+ .filter_map(|(pred, _)| match pred.kind().skip_binder() {
+ ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(a, b)) => match *a {
+ ty::ReEarlyBound(ebr) if ebr.index == index => Some(b),
+ _ => None,
+ },
+ _ => None,
+ })
+ .collect()
+ }
+
+ fn lifetimes_outliving_type<'tcx>(
+ inferred_outlives: &'tcx [(ty::Predicate<'tcx>, Span)],
+ index: u32,
+ ) -> Vec<ty::Region<'tcx>> {
+ inferred_outlives
+ .iter()
+ .filter_map(|(pred, _)| match pred.kind().skip_binder() {
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(a, b)) => {
+ a.is_param(index).then_some(b)
+ }
+ _ => None,
+ })
+ .collect()
+ }
+
+ fn collect_outlives_bound_spans<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ bounds: &hir::GenericBounds<'_>,
+ inferred_outlives: &[ty::Region<'tcx>],
+ ) -> Vec<(usize, Span)> {
+ use rustc_middle::middle::resolve_lifetime::Region;
+
+ bounds
+ .iter()
+ .enumerate()
+ .filter_map(|(i, bound)| {
+ if let hir::GenericBound::Outlives(lifetime) = bound {
+ let is_inferred = match tcx.named_region(lifetime.hir_id) {
+ Some(Region::EarlyBound(index, ..)) => inferred_outlives.iter().any(|r| {
+ if let ty::ReEarlyBound(ebr) = **r { ebr.index == index } else { false }
+ }),
+ _ => false,
+ };
+ is_inferred.then_some((i, bound.span()))
+ } else {
+ None
+ }
+ })
+ .filter(|(_, span)| !in_external_macro(tcx.sess, *span))
+ .collect()
+ }
+
+ fn consolidate_outlives_bound_spans(
+ &self,
+ lo: Span,
+ bounds: &hir::GenericBounds<'_>,
+ bound_spans: Vec<(usize, Span)>,
+ ) -> Vec<Span> {
+ if bounds.is_empty() {
+ return Vec::new();
+ }
+ if bound_spans.len() == bounds.len() {
+ let (_, last_bound_span) = bound_spans[bound_spans.len() - 1];
+ // If all bounds are inferable, we want to delete the colon, so
+ // start from just after the parameter (span passed as argument)
+ vec![lo.to(last_bound_span)]
+ } else {
+ let mut merged = Vec::new();
+ let mut last_merged_i = None;
+
+ let mut from_start = true;
+ for (i, bound_span) in bound_spans {
+ match last_merged_i {
+ // If the first bound is inferable, our span should also eat the leading `+`.
+ None if i == 0 => {
+ merged.push(bound_span.to(bounds[1].span().shrink_to_lo()));
+ last_merged_i = Some(0);
+ }
+ // If consecutive bounds are inferable, merge their spans
+ Some(h) if i == h + 1 => {
+ if let Some(tail) = merged.last_mut() {
+ // Also eat the trailing `+` if the first
+ // more-than-one bound is inferable
+ let to_span = if from_start && i < bounds.len() {
+ bounds[i + 1].span().shrink_to_lo()
+ } else {
+ bound_span
+ };
+ *tail = tail.to(to_span);
+ last_merged_i = Some(i);
+ } else {
+ bug!("another bound-span visited earlier");
+ }
+ }
+ _ => {
+ // When we find a non-inferable bound, subsequent inferable bounds
+ // won't be consecutive from the start (and we'll eat the leading
+ // `+` rather than the trailing one)
+ from_start = false;
+ merged.push(bounds[i - 1].span().shrink_to_hi().to(bound_span));
+ last_merged_i = Some(i);
+ }
+ }
+ }
+ merged
+ }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements {
+ fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'_>) {
+ use rustc_middle::middle::resolve_lifetime::Region;
+
+ let def_id = item.def_id;
+ if let hir::ItemKind::Struct(_, ref hir_generics)
+ | hir::ItemKind::Enum(_, ref hir_generics)
+ | hir::ItemKind::Union(_, ref hir_generics) = item.kind
+ {
+ let inferred_outlives = cx.tcx.inferred_outlives_of(def_id);
+ if inferred_outlives.is_empty() {
+ return;
+ }
+
+ let ty_generics = cx.tcx.generics_of(def_id);
+
+ let mut bound_count = 0;
+ let mut lint_spans = Vec::new();
+ let mut where_lint_spans = Vec::new();
+ let mut dropped_predicate_count = 0;
+ let num_predicates = hir_generics.predicates.len();
+ for (i, where_predicate) in hir_generics.predicates.iter().enumerate() {
+ let (relevant_lifetimes, bounds, span, in_where_clause) = match where_predicate {
+ hir::WherePredicate::RegionPredicate(predicate) => {
+ if let Some(Region::EarlyBound(index, ..)) =
+ cx.tcx.named_region(predicate.lifetime.hir_id)
+ {
+ (
+ Self::lifetimes_outliving_lifetime(inferred_outlives, index),
+ &predicate.bounds,
+ predicate.span,
+ predicate.in_where_clause,
+ )
+ } else {
+ continue;
+ }
+ }
+ hir::WherePredicate::BoundPredicate(predicate) => {
+ // FIXME we can also infer bounds on associated types,
+ // and should check for them here.
+ match predicate.bounded_ty.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => {
+ let Res::Def(DefKind::TyParam, def_id) = path.res else {
+ continue
+ };
+ let index = ty_generics.param_def_id_to_index[&def_id];
+ (
+ Self::lifetimes_outliving_type(inferred_outlives, index),
+ &predicate.bounds,
+ predicate.span,
+ predicate.origin == PredicateOrigin::WhereClause,
+ )
+ }
+ _ => {
+ continue;
+ }
+ }
+ }
+ _ => continue,
+ };
+ if relevant_lifetimes.is_empty() {
+ continue;
+ }
+
+ let bound_spans =
+ self.collect_outlives_bound_spans(cx.tcx, bounds, &relevant_lifetimes);
+ bound_count += bound_spans.len();
+
+ let drop_predicate = bound_spans.len() == bounds.len();
+ if drop_predicate {
+ dropped_predicate_count += 1;
+ }
+
+ if drop_predicate && !in_where_clause {
+ lint_spans.push(span);
+ } else if drop_predicate && i + 1 < num_predicates {
+ // If all the bounds on a predicate were inferable and there are
+ // further predicates, we want to eat the trailing comma.
+ let next_predicate_span = hir_generics.predicates[i + 1].span();
+ where_lint_spans.push(span.to(next_predicate_span.shrink_to_lo()));
+ } else {
+ where_lint_spans.extend(self.consolidate_outlives_bound_spans(
+ span.shrink_to_lo(),
+ bounds,
+ bound_spans,
+ ));
+ }
+ }
+
+ // If all predicates are inferable, drop the entire clause
+ // (including the `where`)
+ if hir_generics.has_where_clause_predicates && dropped_predicate_count == num_predicates
+ {
+ let where_span = hir_generics.where_clause_span;
+ // Extend the where clause back to the closing `>` of the
+ // generics, except for tuple struct, which have the `where`
+ // after the fields of the struct.
+ let full_where_span =
+ if let hir::ItemKind::Struct(hir::VariantData::Tuple(..), _) = item.kind {
+ where_span
+ } else {
+ hir_generics.span.shrink_to_hi().to(where_span)
+ };
+ lint_spans.push(full_where_span);
+ } else {
+ lint_spans.extend(where_lint_spans);
+ }
+
+ if !lint_spans.is_empty() {
+ cx.struct_span_lint(EXPLICIT_OUTLIVES_REQUIREMENTS, lint_spans.clone(), |lint| {
+ lint.build(fluent::lint::builtin_explicit_outlives)
+ .set_arg("count", bound_count)
+ .multipart_suggestion(
+ fluent::lint::suggestion,
+ lint_spans
+ .into_iter()
+ .map(|span| (span, String::new()))
+ .collect::<Vec<_>>(),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `incomplete_features` lint detects unstable features enabled with
+ /// the [`feature` attribute] that may function improperly in some or all
+ /// cases.
+ ///
+ /// [`feature` attribute]: https://doc.rust-lang.org/nightly/unstable-book/
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(generic_const_exprs)]
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Although it is encouraged for people to experiment with unstable
+ /// features, some of them are known to be incomplete or faulty. This lint
+ /// is a signal that the feature has not yet been finished, and you may
+ /// experience problems with it.
+ pub INCOMPLETE_FEATURES,
+ Warn,
+ "incomplete features that may function improperly in some or all cases"
+}
+
+declare_lint_pass!(
+ /// Check for used feature gates in `INCOMPLETE_FEATURES` in `rustc_feature/src/active.rs`.
+ IncompleteFeatures => [INCOMPLETE_FEATURES]
+);
+
+impl EarlyLintPass for IncompleteFeatures {
+ fn check_crate(&mut self, cx: &EarlyContext<'_>, _: &ast::Crate) {
+ let features = cx.sess().features_untracked();
+ features
+ .declared_lang_features
+ .iter()
+ .map(|(name, span, _)| (name, span))
+ .chain(features.declared_lib_features.iter().map(|(name, span)| (name, span)))
+ .filter(|(&name, _)| features.incomplete(name))
+ .for_each(|(&name, &span)| {
+ cx.struct_span_lint(INCOMPLETE_FEATURES, span, |lint| {
+ let mut builder = lint.build(fluent::lint::builtin_incomplete_features);
+ builder.set_arg("name", name);
+ if let Some(n) = rustc_feature::find_feature_issue(name, GateIssue::Language) {
+ builder.set_arg("n", n);
+ builder.note(fluent::lint::note);
+ }
+ if HAS_MIN_FEATURES.contains(&name) {
+ builder.help(fluent::lint::help);
+ }
+ builder.emit();
+ })
+ });
+ }
+}
+
+const HAS_MIN_FEATURES: &[Symbol] = &[sym::specialization];
+
+declare_lint! {
+ /// The `invalid_value` lint detects creating a value that is not valid,
+ /// such as a null reference.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,no_run
+ /// # #![allow(unused)]
+ /// unsafe {
+ /// let x: &'static i32 = std::mem::zeroed();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In some situations the compiler can detect that the code is creating
+ /// an invalid value, which should be avoided.
+ ///
+ /// In particular, this lint will check for improper use of
+ /// [`mem::zeroed`], [`mem::uninitialized`], [`mem::transmute`], and
+ /// [`MaybeUninit::assume_init`] that can cause [undefined behavior]. The
+ /// lint should provide extra information to indicate what the problem is
+ /// and a possible solution.
+ ///
+ /// [`mem::zeroed`]: https://doc.rust-lang.org/std/mem/fn.zeroed.html
+ /// [`mem::uninitialized`]: https://doc.rust-lang.org/std/mem/fn.uninitialized.html
+ /// [`mem::transmute`]: https://doc.rust-lang.org/std/mem/fn.transmute.html
+ /// [`MaybeUninit::assume_init`]: https://doc.rust-lang.org/std/mem/union.MaybeUninit.html#method.assume_init
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ pub INVALID_VALUE,
+ Warn,
+ "an invalid value is being created (such as a null reference)"
+}
+
+declare_lint_pass!(InvalidValue => [INVALID_VALUE]);
+
+impl<'tcx> LateLintPass<'tcx> for InvalidValue {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &hir::Expr<'_>) {
+ #[derive(Debug, Copy, Clone, PartialEq)]
+ enum InitKind {
+ Zeroed,
+ Uninit,
+ }
+
+ /// Information about why a type cannot be initialized this way.
+ /// Contains an error message and optionally a span to point at.
+ type InitError = (String, Option<Span>);
+
+ /// Test if this constant is all-0.
+ fn is_zero(expr: &hir::Expr<'_>) -> bool {
+ use hir::ExprKind::*;
+ use rustc_ast::LitKind::*;
+ match &expr.kind {
+ Lit(lit) => {
+ if let Int(i, _) = lit.node {
+ i == 0
+ } else {
+ false
+ }
+ }
+ Tup(tup) => tup.iter().all(is_zero),
+ _ => false,
+ }
+ }
+
+ /// Determine if this expression is a "dangerous initialization".
+ fn is_dangerous_init(cx: &LateContext<'_>, expr: &hir::Expr<'_>) -> Option<InitKind> {
+ if let hir::ExprKind::Call(ref path_expr, ref args) = expr.kind {
+ // Find calls to `mem::{uninitialized,zeroed}` methods.
+ if let hir::ExprKind::Path(ref qpath) = path_expr.kind {
+ let def_id = cx.qpath_res(qpath, path_expr.hir_id).opt_def_id()?;
+ match cx.tcx.get_diagnostic_name(def_id) {
+ Some(sym::mem_zeroed) => return Some(InitKind::Zeroed),
+ Some(sym::mem_uninitialized) => return Some(InitKind::Uninit),
+ Some(sym::transmute) if is_zero(&args[0]) => return Some(InitKind::Zeroed),
+ _ => {}
+ }
+ }
+ } else if let hir::ExprKind::MethodCall(_, ref args, _) = expr.kind {
+ // Find problematic calls to `MaybeUninit::assume_init`.
+ let def_id = cx.typeck_results().type_dependent_def_id(expr.hir_id)?;
+ if cx.tcx.is_diagnostic_item(sym::assume_init, def_id) {
+ // This is a call to *some* method named `assume_init`.
+ // See if the `self` parameter is one of the dangerous constructors.
+ if let hir::ExprKind::Call(ref path_expr, _) = args[0].kind {
+ if let hir::ExprKind::Path(ref qpath) = path_expr.kind {
+ let def_id = cx.qpath_res(qpath, path_expr.hir_id).opt_def_id()?;
+ match cx.tcx.get_diagnostic_name(def_id) {
+ Some(sym::maybe_uninit_zeroed) => return Some(InitKind::Zeroed),
+ Some(sym::maybe_uninit_uninit) => return Some(InitKind::Uninit),
+ _ => {}
+ }
+ }
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Test if this enum has several actually "existing" variants.
+ /// Zero-sized uninhabited variants do not always have a tag assigned and thus do not "exist".
+ fn is_multi_variant<'tcx>(adt: ty::AdtDef<'tcx>) -> bool {
+ // As an approximation, we only count dataless variants. Those are definitely inhabited.
+ let existing_variants = adt.variants().iter().filter(|v| v.fields.is_empty()).count();
+ existing_variants > 1
+ }
+
+ /// Return `Some` only if we are sure this type does *not*
+ /// allow zero initialization.
+ fn ty_find_init_error<'tcx>(
+ cx: &LateContext<'tcx>,
+ ty: Ty<'tcx>,
+ init: InitKind,
+ ) -> Option<InitError> {
+ use rustc_type_ir::sty::TyKind::*;
+ match ty.kind() {
+ // Primitive types that don't like 0 as a value.
+ Ref(..) => Some(("references must be non-null".to_string(), None)),
+ Adt(..) if ty.is_box() => Some(("`Box` must be non-null".to_string(), None)),
+ FnPtr(..) => Some(("function pointers must be non-null".to_string(), None)),
+ Never => Some(("the `!` type has no valid value".to_string(), None)),
+ RawPtr(tm) if matches!(tm.ty.kind(), Dynamic(..)) =>
+ // raw ptr to dyn Trait
+ {
+ Some(("the vtable of a wide raw pointer must be non-null".to_string(), None))
+ }
+ // Primitive types with other constraints.
+ Bool if init == InitKind::Uninit => {
+ Some(("booleans must be either `true` or `false`".to_string(), None))
+ }
+ Char if init == InitKind::Uninit => {
+ Some(("characters must be a valid Unicode codepoint".to_string(), None))
+ }
+ // Recurse and checks for some compound types.
+ Adt(adt_def, substs) if !adt_def.is_union() => {
+ // First check if this ADT has a layout attribute (like `NonNull` and friends).
+ use std::ops::Bound;
+ match cx.tcx.layout_scalar_valid_range(adt_def.did()) {
+ // We exploit here that `layout_scalar_valid_range` will never
+ // return `Bound::Excluded`. (And we have tests checking that we
+ // handle the attribute correctly.)
+ (Bound::Included(lo), _) if lo > 0 => {
+ return Some((format!("`{}` must be non-null", ty), None));
+ }
+ (Bound::Included(_), _) | (_, Bound::Included(_))
+ if init == InitKind::Uninit =>
+ {
+ return Some((
+ format!(
+ "`{}` must be initialized inside its custom valid range",
+ ty,
+ ),
+ None,
+ ));
+ }
+ _ => {}
+ }
+ // Now, recurse.
+ match adt_def.variants().len() {
+ 0 => Some(("enums with no variants have no valid value".to_string(), None)),
+ 1 => {
+ // Struct, or enum with exactly one variant.
+ // Proceed recursively, check all fields.
+ let variant = &adt_def.variant(VariantIdx::from_u32(0));
+ variant.fields.iter().find_map(|field| {
+ ty_find_init_error(cx, field.ty(cx.tcx, substs), init).map(
+ |(mut msg, span)| {
+ if span.is_none() {
+ // Point to this field, should be helpful for figuring
+ // out where the source of the error is.
+ let span = cx.tcx.def_span(field.did);
+ write!(
+ &mut msg,
+ " (in this {} field)",
+ adt_def.descr()
+ )
+ .unwrap();
+ (msg, Some(span))
+ } else {
+ // Just forward.
+ (msg, span)
+ }
+ },
+ )
+ })
+ }
+ // Multi-variant enum.
+ _ => {
+ if init == InitKind::Uninit && is_multi_variant(*adt_def) {
+ let span = cx.tcx.def_span(adt_def.did());
+ Some((
+ "enums have to be initialized to a variant".to_string(),
+ Some(span),
+ ))
+ } else {
+ // In principle, for zero-initialization we could figure out which variant corresponds
+ // to tag 0, and check that... but for now we just accept all zero-initializations.
+ None
+ }
+ }
+ }
+ }
+ Tuple(..) => {
+ // Proceed recursively, check all fields.
+ ty.tuple_fields().iter().find_map(|field| ty_find_init_error(cx, field, init))
+ }
+ Array(ty, len) => {
+ if matches!(len.try_eval_usize(cx.tcx, cx.param_env), Some(v) if v > 0) {
+ // Array length known at array non-empty -- recurse.
+ ty_find_init_error(cx, *ty, init)
+ } else {
+ // Empty array or size unknown.
+ None
+ }
+ }
+ // Conservative fallback.
+ _ => None,
+ }
+ }
+
+ if let Some(init) = is_dangerous_init(cx, expr) {
+ // This conjures an instance of a type out of nothing,
+ // using zeroed or uninitialized memory.
+ // We are extremely conservative with what we warn about.
+ let conjured_ty = cx.typeck_results().expr_ty(expr);
+ if let Some((msg, span)) =
+ with_no_trimmed_paths!(ty_find_init_error(cx, conjured_ty, init))
+ {
+ // FIXME(davidtwco): make translatable
+ cx.struct_span_lint(INVALID_VALUE, expr.span, |lint| {
+ let mut err = lint.build(&format!(
+ "the type `{}` does not permit {}",
+ conjured_ty,
+ match init {
+ InitKind::Zeroed => "zero-initialization",
+ InitKind::Uninit => "being left uninitialized",
+ },
+ ));
+ err.span_label(expr.span, "this code causes undefined behavior when executed");
+ err.span_label(
+ expr.span,
+ "help: use `MaybeUninit<T>` instead, \
+ and only call `assume_init` after initialization is done",
+ );
+ if let Some(span) = span {
+ err.span_note(span, &msg);
+ } else {
+ err.note(&msg);
+ }
+ err.emit();
+ });
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `clashing_extern_declarations` lint detects when an `extern fn`
+ /// has been declared with the same name but different types.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// mod m {
+ /// extern "C" {
+ /// fn foo();
+ /// }
+ /// }
+ ///
+ /// extern "C" {
+ /// fn foo(_: u32);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Because two symbols of the same name cannot be resolved to two
+ /// different functions at link time, and one function cannot possibly
+ /// have two types, a clashing extern declaration is almost certainly a
+ /// mistake. Check to make sure that the `extern` definitions are correct
+ /// and equivalent, and possibly consider unifying them in one location.
+ ///
+ /// This lint does not run between crates because a project may have
+ /// dependencies which both rely on the same extern function, but declare
+ /// it in a different (but valid) way. For example, they may both declare
+ /// an opaque type for one or more of the arguments (which would end up
+ /// distinct types), or use types that are valid conversions in the
+ /// language the `extern fn` is defined in. In these cases, the compiler
+ /// can't say that the clashing declaration is incorrect.
+ pub CLASHING_EXTERN_DECLARATIONS,
+ Warn,
+ "detects when an extern fn has been declared with the same name but different types"
+}
+
+pub struct ClashingExternDeclarations {
+ /// Map of function symbol name to the first-seen hir id for that symbol name.. If seen_decls
+ /// contains an entry for key K, it means a symbol with name K has been seen by this lint and
+ /// the symbol should be reported as a clashing declaration.
+ // FIXME: Technically, we could just store a &'tcx str here without issue; however, the
+ // `impl_lint_pass` macro doesn't currently support lints parametric over a lifetime.
+ seen_decls: FxHashMap<Symbol, HirId>,
+}
+
+/// Differentiate between whether the name for an extern decl came from the link_name attribute or
+/// just from declaration itself. This is important because we don't want to report clashes on
+/// symbol name if they don't actually clash because one or the other links against a symbol with a
+/// different name.
+enum SymbolName {
+ /// The name of the symbol + the span of the annotation which introduced the link name.
+ Link(Symbol, Span),
+ /// No link name, so just the name of the symbol.
+ Normal(Symbol),
+}
+
+impl SymbolName {
+ fn get_name(&self) -> Symbol {
+ match self {
+ SymbolName::Link(s, _) | SymbolName::Normal(s) => *s,
+ }
+ }
+}
+
+impl ClashingExternDeclarations {
+ pub(crate) fn new() -> Self {
+ ClashingExternDeclarations { seen_decls: FxHashMap::default() }
+ }
+ /// Insert a new foreign item into the seen set. If a symbol with the same name already exists
+ /// for the item, return its HirId without updating the set.
+ fn insert(&mut self, tcx: TyCtxt<'_>, fi: &hir::ForeignItem<'_>) -> Option<HirId> {
+ let did = fi.def_id.to_def_id();
+ let instance = Instance::new(did, ty::List::identity_for_item(tcx, did));
+ let name = Symbol::intern(tcx.symbol_name(instance).name);
+ if let Some(&hir_id) = self.seen_decls.get(&name) {
+ // Avoid updating the map with the new entry when we do find a collision. We want to
+ // make sure we're always pointing to the first definition as the previous declaration.
+ // This lets us avoid emitting "knock-on" diagnostics.
+ Some(hir_id)
+ } else {
+ self.seen_decls.insert(name, fi.hir_id())
+ }
+ }
+
+ /// Get the name of the symbol that's linked against for a given extern declaration. That is,
+ /// the name specified in a #[link_name = ...] attribute if one was specified, else, just the
+ /// symbol's name.
+ fn name_of_extern_decl(tcx: TyCtxt<'_>, fi: &hir::ForeignItem<'_>) -> SymbolName {
+ if let Some((overridden_link_name, overridden_link_name_span)) =
+ tcx.codegen_fn_attrs(fi.def_id).link_name.map(|overridden_link_name| {
+ // FIXME: Instead of searching through the attributes again to get span
+ // information, we could have codegen_fn_attrs also give span information back for
+ // where the attribute was defined. However, until this is found to be a
+ // bottleneck, this does just fine.
+ (
+ overridden_link_name,
+ tcx.get_attr(fi.def_id.to_def_id(), sym::link_name).unwrap().span,
+ )
+ })
+ {
+ SymbolName::Link(overridden_link_name, overridden_link_name_span)
+ } else {
+ SymbolName::Normal(fi.ident.name)
+ }
+ }
+
+ /// Checks whether two types are structurally the same enough that the declarations shouldn't
+ /// clash. We need this so we don't emit a lint when two modules both declare an extern struct,
+ /// with the same members (as the declarations shouldn't clash).
+ fn structurally_same_type<'tcx>(
+ cx: &LateContext<'tcx>,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ckind: CItemKind,
+ ) -> bool {
+ fn structurally_same_type_impl<'tcx>(
+ seen_types: &mut FxHashSet<(Ty<'tcx>, Ty<'tcx>)>,
+ cx: &LateContext<'tcx>,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ckind: CItemKind,
+ ) -> bool {
+ debug!("structurally_same_type_impl(cx, a = {:?}, b = {:?})", a, b);
+ let tcx = cx.tcx;
+
+ // Given a transparent newtype, reach through and grab the inner
+ // type unless the newtype makes the type non-null.
+ let non_transparent_ty = |ty: Ty<'tcx>| -> Ty<'tcx> {
+ let mut ty = ty;
+ loop {
+ if let ty::Adt(def, substs) = *ty.kind() {
+ let is_transparent = def.repr().transparent();
+ let is_non_null = crate::types::nonnull_optimization_guaranteed(tcx, def);
+ debug!(
+ "non_transparent_ty({:?}) -- type is transparent? {}, type is non-null? {}",
+ ty, is_transparent, is_non_null
+ );
+ if is_transparent && !is_non_null {
+ debug_assert!(def.variants().len() == 1);
+ let v = &def.variant(VariantIdx::new(0));
+ ty = transparent_newtype_field(tcx, v)
+ .expect(
+ "single-variant transparent structure with zero-sized field",
+ )
+ .ty(tcx, substs);
+ continue;
+ }
+ }
+ debug!("non_transparent_ty -> {:?}", ty);
+ return ty;
+ }
+ };
+
+ let a = non_transparent_ty(a);
+ let b = non_transparent_ty(b);
+
+ if !seen_types.insert((a, b)) {
+ // We've encountered a cycle. There's no point going any further -- the types are
+ // structurally the same.
+ return true;
+ }
+ let tcx = cx.tcx;
+ if a == b {
+ // All nominally-same types are structurally same, too.
+ true
+ } else {
+ // Do a full, depth-first comparison between the two.
+ use rustc_type_ir::sty::TyKind::*;
+ let a_kind = a.kind();
+ let b_kind = b.kind();
+
+ let compare_layouts = |a, b| -> Result<bool, LayoutError<'tcx>> {
+ debug!("compare_layouts({:?}, {:?})", a, b);
+ let a_layout = &cx.layout_of(a)?.layout.abi();
+ let b_layout = &cx.layout_of(b)?.layout.abi();
+ debug!(
+ "comparing layouts: {:?} == {:?} = {}",
+ a_layout,
+ b_layout,
+ a_layout == b_layout
+ );
+ Ok(a_layout == b_layout)
+ };
+
+ #[allow(rustc::usage_of_ty_tykind)]
+ let is_primitive_or_pointer = |kind: &ty::TyKind<'_>| {
+ kind.is_primitive() || matches!(kind, RawPtr(..) | Ref(..))
+ };
+
+ ensure_sufficient_stack(|| {
+ match (a_kind, b_kind) {
+ (Adt(a_def, _), Adt(b_def, _)) => {
+ // We can immediately rule out these types as structurally same if
+ // their layouts differ.
+ match compare_layouts(a, b) {
+ Ok(false) => return false,
+ _ => (), // otherwise, continue onto the full, fields comparison
+ }
+
+ // Grab a flattened representation of all fields.
+ let a_fields = a_def.variants().iter().flat_map(|v| v.fields.iter());
+ let b_fields = b_def.variants().iter().flat_map(|v| v.fields.iter());
+
+ // Perform a structural comparison for each field.
+ a_fields.eq_by(
+ b_fields,
+ |&ty::FieldDef { did: a_did, .. },
+ &ty::FieldDef { did: b_did, .. }| {
+ structurally_same_type_impl(
+ seen_types,
+ cx,
+ tcx.type_of(a_did),
+ tcx.type_of(b_did),
+ ckind,
+ )
+ },
+ )
+ }
+ (Array(a_ty, a_const), Array(b_ty, b_const)) => {
+ // For arrays, we also check the constness of the type.
+ a_const.kind() == b_const.kind()
+ && structurally_same_type_impl(seen_types, cx, *a_ty, *b_ty, ckind)
+ }
+ (Slice(a_ty), Slice(b_ty)) => {
+ structurally_same_type_impl(seen_types, cx, *a_ty, *b_ty, ckind)
+ }
+ (RawPtr(a_tymut), RawPtr(b_tymut)) => {
+ a_tymut.mutbl == b_tymut.mutbl
+ && structurally_same_type_impl(
+ seen_types, cx, a_tymut.ty, b_tymut.ty, ckind,
+ )
+ }
+ (Ref(_a_region, a_ty, a_mut), Ref(_b_region, b_ty, b_mut)) => {
+ // For structural sameness, we don't need the region to be same.
+ a_mut == b_mut
+ && structurally_same_type_impl(seen_types, cx, *a_ty, *b_ty, ckind)
+ }
+ (FnDef(..), FnDef(..)) => {
+ let a_poly_sig = a.fn_sig(tcx);
+ let b_poly_sig = b.fn_sig(tcx);
+
+ // We don't compare regions, but leaving bound regions around ICEs, so
+ // we erase them.
+ let a_sig = tcx.erase_late_bound_regions(a_poly_sig);
+ let b_sig = tcx.erase_late_bound_regions(b_poly_sig);
+
+ (a_sig.abi, a_sig.unsafety, a_sig.c_variadic)
+ == (b_sig.abi, b_sig.unsafety, b_sig.c_variadic)
+ && a_sig.inputs().iter().eq_by(b_sig.inputs().iter(), |a, b| {
+ structurally_same_type_impl(seen_types, cx, *a, *b, ckind)
+ })
+ && structurally_same_type_impl(
+ seen_types,
+ cx,
+ a_sig.output(),
+ b_sig.output(),
+ ckind,
+ )
+ }
+ (Tuple(a_substs), Tuple(b_substs)) => {
+ a_substs.iter().eq_by(b_substs.iter(), |a_ty, b_ty| {
+ structurally_same_type_impl(seen_types, cx, a_ty, b_ty, ckind)
+ })
+ }
+ // For these, it's not quite as easy to define structural-sameness quite so easily.
+ // For the purposes of this lint, take the conservative approach and mark them as
+ // not structurally same.
+ (Dynamic(..), Dynamic(..))
+ | (Error(..), Error(..))
+ | (Closure(..), Closure(..))
+ | (Generator(..), Generator(..))
+ | (GeneratorWitness(..), GeneratorWitness(..))
+ | (Projection(..), Projection(..))
+ | (Opaque(..), Opaque(..)) => false,
+
+ // These definitely should have been caught above.
+ (Bool, Bool) | (Char, Char) | (Never, Never) | (Str, Str) => unreachable!(),
+
+ // An Adt and a primitive or pointer type. This can be FFI-safe if non-null
+ // enum layout optimisation is being applied.
+ (Adt(..), other_kind) | (other_kind, Adt(..))
+ if is_primitive_or_pointer(other_kind) =>
+ {
+ let (primitive, adt) =
+ if is_primitive_or_pointer(a.kind()) { (a, b) } else { (b, a) };
+ if let Some(ty) = crate::types::repr_nullable_ptr(cx, adt, ckind) {
+ ty == primitive
+ } else {
+ compare_layouts(a, b).unwrap_or(false)
+ }
+ }
+ // Otherwise, just compare the layouts. This may fail to lint for some
+ // incompatible types, but at the very least, will stop reads into
+ // uninitialised memory.
+ _ => compare_layouts(a, b).unwrap_or(false),
+ }
+ })
+ }
+ }
+ let mut seen_types = FxHashSet::default();
+ structurally_same_type_impl(&mut seen_types, cx, a, b, ckind)
+ }
+}
+
+impl_lint_pass!(ClashingExternDeclarations => [CLASHING_EXTERN_DECLARATIONS]);
+
+impl<'tcx> LateLintPass<'tcx> for ClashingExternDeclarations {
+ fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, this_fi: &hir::ForeignItem<'_>) {
+ trace!("ClashingExternDeclarations: check_foreign_item: {:?}", this_fi);
+ if let ForeignItemKind::Fn(..) = this_fi.kind {
+ let tcx = cx.tcx;
+ if let Some(existing_hid) = self.insert(tcx, this_fi) {
+ let existing_decl_ty = tcx.type_of(tcx.hir().local_def_id(existing_hid));
+ let this_decl_ty = tcx.type_of(this_fi.def_id);
+ debug!(
+ "ClashingExternDeclarations: Comparing existing {:?}: {:?} to this {:?}: {:?}",
+ existing_hid, existing_decl_ty, this_fi.def_id, this_decl_ty
+ );
+ // Check that the declarations match.
+ if !Self::structurally_same_type(
+ cx,
+ existing_decl_ty,
+ this_decl_ty,
+ CItemKind::Declaration,
+ ) {
+ let orig_fi = tcx.hir().expect_foreign_item(existing_hid.expect_owner());
+ let orig = Self::name_of_extern_decl(tcx, orig_fi);
+
+ // We want to ensure that we use spans for both decls that include where the
+ // name was defined, whether that was from the link_name attribute or not.
+ let get_relevant_span =
+ |fi: &hir::ForeignItem<'_>| match Self::name_of_extern_decl(tcx, fi) {
+ SymbolName::Normal(_) => fi.span,
+ SymbolName::Link(_, annot_span) => fi.span.to(annot_span),
+ };
+ // Finally, emit the diagnostic.
+ tcx.struct_span_lint_hir(
+ CLASHING_EXTERN_DECLARATIONS,
+ this_fi.hir_id(),
+ get_relevant_span(this_fi),
+ |lint| {
+ let mut expected_str = DiagnosticStyledString::new();
+ expected_str.push(existing_decl_ty.fn_sig(tcx).to_string(), false);
+ let mut found_str = DiagnosticStyledString::new();
+ found_str.push(this_decl_ty.fn_sig(tcx).to_string(), true);
+
+ lint.build(if orig.get_name() == this_fi.ident.name {
+ fluent::lint::builtin_clashing_extern_same_name
+ } else {
+ fluent::lint::builtin_clashing_extern_diff_name
+ })
+ .set_arg("this_fi", this_fi.ident.name)
+ .set_arg("orig", orig.get_name())
+ .span_label(
+ get_relevant_span(orig_fi),
+ fluent::lint::previous_decl_label,
+ )
+ .span_label(get_relevant_span(this_fi), fluent::lint::mismatch_label)
+ // FIXME(davidtwco): translatable expected/found
+ .note_expected_found(&"", expected_str, &"", found_str)
+ .emit();
+ },
+ );
+ }
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `deref_nullptr` lint detects when an null pointer is dereferenced,
+ /// which causes [undefined behavior].
+ ///
+ /// ### Example
+ ///
+ /// ```rust,no_run
+ /// # #![allow(unused)]
+ /// use std::ptr;
+ /// unsafe {
+ /// let x = &*ptr::null::<i32>();
+ /// let x = ptr::addr_of!(*ptr::null::<i32>());
+ /// let x = *(0 as *const i32);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Dereferencing a null pointer causes [undefined behavior] even as a place expression,
+ /// like `&*(0 as *const i32)` or `addr_of!(*(0 as *const i32))`.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ pub DEREF_NULLPTR,
+ Warn,
+ "detects when an null pointer is dereferenced"
+}
+
+declare_lint_pass!(DerefNullPtr => [DEREF_NULLPTR]);
+
+impl<'tcx> LateLintPass<'tcx> for DerefNullPtr {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &hir::Expr<'_>) {
+ /// test if expression is a null ptr
+ fn is_null_ptr(cx: &LateContext<'_>, expr: &hir::Expr<'_>) -> bool {
+ match &expr.kind {
+ rustc_hir::ExprKind::Cast(ref expr, ref ty) => {
+ if let rustc_hir::TyKind::Ptr(_) = ty.kind {
+ return is_zero(expr) || is_null_ptr(cx, expr);
+ }
+ }
+ // check for call to `core::ptr::null` or `core::ptr::null_mut`
+ rustc_hir::ExprKind::Call(ref path, _) => {
+ if let rustc_hir::ExprKind::Path(ref qpath) = path.kind {
+ if let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id() {
+ return matches!(
+ cx.tcx.get_diagnostic_name(def_id),
+ Some(sym::ptr_null | sym::ptr_null_mut)
+ );
+ }
+ }
+ }
+ _ => {}
+ }
+ false
+ }
+
+ /// test if expression is the literal `0`
+ fn is_zero(expr: &hir::Expr<'_>) -> bool {
+ match &expr.kind {
+ rustc_hir::ExprKind::Lit(ref lit) => {
+ if let LitKind::Int(a, _) = lit.node {
+ return a == 0;
+ }
+ }
+ _ => {}
+ }
+ false
+ }
+
+ if let rustc_hir::ExprKind::Unary(rustc_hir::UnOp::Deref, expr_deref) = expr.kind {
+ if is_null_ptr(cx, expr_deref) {
+ cx.struct_span_lint(DEREF_NULLPTR, expr.span, |lint| {
+ let mut err = lint.build(fluent::lint::builtin_deref_nullptr);
+ err.span_label(expr.span, fluent::lint::label);
+ err.emit();
+ });
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `named_asm_labels` lint detects the use of named labels in the
+ /// inline `asm!` macro.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// use std::arch::asm;
+ ///
+ /// fn main() {
+ /// unsafe {
+ /// asm!("foo: bar");
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// LLVM is allowed to duplicate inline assembly blocks for any
+ /// reason, for example when it is in a function that gets inlined. Because
+ /// of this, GNU assembler [local labels] *must* be used instead of labels
+ /// with a name. Using named labels might cause assembler or linker errors.
+ ///
+ /// See the explanation in [Rust By Example] for more details.
+ ///
+ /// [local labels]: https://sourceware.org/binutils/docs/as/Symbol-Names.html#Local-Labels
+ /// [Rust By Example]: https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html#labels
+ pub NAMED_ASM_LABELS,
+ Deny,
+ "named labels in inline assembly",
+}
+
+declare_lint_pass!(NamedAsmLabels => [NAMED_ASM_LABELS]);
+
+impl<'tcx> LateLintPass<'tcx> for NamedAsmLabels {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
+ if let hir::Expr {
+ kind: hir::ExprKind::InlineAsm(hir::InlineAsm { template_strs, .. }),
+ ..
+ } = expr
+ {
+ for (template_sym, template_snippet, template_span) in template_strs.iter() {
+ let template_str = template_sym.as_str();
+ let find_label_span = |needle: &str| -> Option<Span> {
+ if let Some(template_snippet) = template_snippet {
+ let snippet = template_snippet.as_str();
+ if let Some(pos) = snippet.find(needle) {
+ let end = pos
+ + snippet[pos..]
+ .find(|c| c == ':')
+ .unwrap_or(snippet[pos..].len() - 1);
+ let inner = InnerSpan::new(pos, end);
+ return Some(template_span.from_inner(inner));
+ }
+ }
+
+ None
+ };
+
+ let mut found_labels = Vec::new();
+
+ // A semicolon might not actually be specified as a separator for all targets, but it seems like LLVM accepts it always
+ let statements = template_str.split(|c| matches!(c, '\n' | ';'));
+ for statement in statements {
+ // If there's a comment, trim it from the statement
+ let statement = statement.find("//").map_or(statement, |idx| &statement[..idx]);
+ let mut start_idx = 0;
+ for (idx, _) in statement.match_indices(':') {
+ let possible_label = statement[start_idx..idx].trim();
+ let mut chars = possible_label.chars();
+ let Some(c) = chars.next() else {
+ // Empty string means a leading ':' in this section, which is not a label
+ break
+ };
+ // A label starts with an alphabetic character or . or _ and continues with alphanumeric characters, _, or $
+ if (c.is_alphabetic() || matches!(c, '.' | '_'))
+ && chars.all(|c| c.is_alphanumeric() || matches!(c, '_' | '$'))
+ {
+ found_labels.push(possible_label);
+ } else {
+ // If we encounter a non-label, there cannot be any further labels, so stop checking
+ break;
+ }
+
+ start_idx = idx + 1;
+ }
+ }
+
+ debug!("NamedAsmLabels::check_expr(): found_labels: {:#?}", &found_labels);
+
+ if found_labels.len() > 0 {
+ let spans = found_labels
+ .into_iter()
+ .filter_map(|label| find_label_span(label))
+ .collect::<Vec<Span>>();
+ // If there were labels but we couldn't find a span, combine the warnings and use the template span
+ let target_spans: MultiSpan =
+ if spans.len() > 0 { spans.into() } else { (*template_span).into() };
+
+ cx.lookup_with_diagnostics(
+ NAMED_ASM_LABELS,
+ Some(target_spans),
+ |diag| {
+ diag.build(fluent::lint::builtin_asm_labels).emit();
+ },
+ BuiltinLintDiagnostics::NamedAsmLabel(
+ "only local labels of the form `<number>:` should be used in inline asm"
+ .to_string(),
+ ),
+ );
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs
new file mode 100644
index 000000000..b95fc341d
--- /dev/null
+++ b/compiler/rustc_lint/src/context.rs
@@ -0,0 +1,1259 @@
+//! Implementation of lint checking.
+//!
+//! The lint checking is mostly consolidated into one pass which runs
+//! after all other analyses. Throughout compilation, lint warnings
+//! can be added via the `add_lint` method on the Session structure. This
+//! requires a span and an ID of the node that the lint is being added to. The
+//! lint isn't actually emitted at that time because it is unknown what the
+//! actual lint level at that location is.
+//!
+//! To actually emit lint warnings/errors, a separate pass is used.
+//! A context keeps track of the current state of all lint levels.
+//! Upon entering a node of the ast which can modify the lint settings, the
+//! previous lint state is pushed onto a stack and the ast is then recursed
+//! upon. As the ast is traversed, this keeps track of the current lint level
+//! for all lint attributes.
+
+use self::TargetLint::*;
+
+use crate::levels::LintLevelsBuilder;
+use crate::passes::{EarlyLintPassObject, LateLintPassObject};
+use rustc_ast::util::unicode::TEXT_FLOW_CONTROL_CHARS;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync;
+use rustc_errors::{add_elided_lifetime_in_path_suggestion, struct_span_err};
+use rustc_errors::{
+ Applicability, DecorateLint, LintDiagnosticBuilder, MultiSpan, SuggestionStyle,
+};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+use rustc_middle::middle::privacy::AccessLevels;
+use rustc_middle::middle::stability;
+use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, print::Printer, subst::GenericArg, RegisteredTools, Ty, TyCtxt};
+use rustc_session::lint::{BuiltinLintDiagnostics, LintExpectationId};
+use rustc_session::lint::{FutureIncompatibleInfo, Level, Lint, LintBuffer, LintId};
+use rustc_session::Session;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::{BytePos, Span, DUMMY_SP};
+use rustc_target::abi;
+use tracing::debug;
+
+use std::cell::Cell;
+use std::iter;
+use std::slice;
+
+/// Information about the registered lints.
+///
+/// This is basically the subset of `Context` that we can
+/// build early in the compile pipeline.
+pub struct LintStore {
+ /// Registered lints.
+ lints: Vec<&'static Lint>,
+
+ /// Constructor functions for each variety of lint pass.
+ ///
+ /// These should only be called once, but since we want to avoid locks or
+ /// interior mutability, we don't enforce this (and lints should, in theory,
+ /// be compatible with being constructed more than once, though not
+ /// necessarily in a sane manner. This is safe though.)
+ pub pre_expansion_passes: Vec<Box<dyn Fn() -> EarlyLintPassObject + sync::Send + sync::Sync>>,
+ pub early_passes: Vec<Box<dyn Fn() -> EarlyLintPassObject + sync::Send + sync::Sync>>,
+ pub late_passes: Vec<Box<dyn Fn() -> LateLintPassObject + sync::Send + sync::Sync>>,
+ /// This is unique in that we construct them per-module, so not once.
+ pub late_module_passes: Vec<Box<dyn Fn() -> LateLintPassObject + sync::Send + sync::Sync>>,
+
+ /// Lints indexed by name.
+ by_name: FxHashMap<String, TargetLint>,
+
+ /// Map of registered lint groups to what lints they expand to.
+ lint_groups: FxHashMap<&'static str, LintGroup>,
+}
+
+/// The target of the `by_name` map, which accounts for renaming/deprecation.
+#[derive(Debug)]
+enum TargetLint {
+ /// A direct lint target
+ Id(LintId),
+
+ /// Temporary renaming, used for easing migration pain; see #16545
+ Renamed(String, LintId),
+
+ /// Lint with this name existed previously, but has been removed/deprecated.
+ /// The string argument is the reason for removal.
+ Removed(String),
+
+ /// A lint name that should give no warnings and have no effect.
+ ///
+ /// This is used by rustc to avoid warning about old rustdoc lints before rustdoc registers them as tool lints.
+ Ignored,
+}
+
+pub enum FindLintError {
+ NotFound,
+ Removed,
+}
+
+struct LintAlias {
+ name: &'static str,
+ /// Whether deprecation warnings should be suppressed for this alias.
+ silent: bool,
+}
+
+struct LintGroup {
+ lint_ids: Vec<LintId>,
+ from_plugin: bool,
+ depr: Option<LintAlias>,
+}
+
+#[derive(Debug)]
+pub enum CheckLintNameResult<'a> {
+ Ok(&'a [LintId]),
+ /// Lint doesn't exist. Potentially contains a suggestion for a correct lint name.
+ NoLint(Option<Symbol>),
+ /// The lint refers to a tool that has not been registered.
+ NoTool,
+ /// The lint is either renamed or removed. This is the warning
+ /// message, and an optional new name (`None` if removed).
+ Warning(String, Option<String>),
+ /// The lint is from a tool. If the Option is None, then either
+ /// the lint does not exist in the tool or the code was not
+ /// compiled with the tool and therefore the lint was never
+ /// added to the `LintStore`. Otherwise the `LintId` will be
+ /// returned as if it where a rustc lint.
+ Tool(Result<&'a [LintId], (Option<&'a [LintId]>, String)>),
+}
+
+impl LintStore {
+ pub fn new() -> LintStore {
+ LintStore {
+ lints: vec![],
+ pre_expansion_passes: vec![],
+ early_passes: vec![],
+ late_passes: vec![],
+ late_module_passes: vec![],
+ by_name: Default::default(),
+ lint_groups: Default::default(),
+ }
+ }
+
+ pub fn get_lints<'t>(&'t self) -> &'t [&'static Lint] {
+ &self.lints
+ }
+
+ pub fn get_lint_groups<'t>(
+ &'t self,
+ ) -> impl Iterator<Item = (&'static str, Vec<LintId>, bool)> + 't {
+ // This function is not used in a way which observes the order of lints.
+ #[allow(rustc::potential_query_instability)]
+ self.lint_groups
+ .iter()
+ .filter(|(_, LintGroup { depr, .. })| {
+ // Don't display deprecated lint groups.
+ depr.is_none()
+ })
+ .map(|(k, LintGroup { lint_ids, from_plugin, .. })| {
+ (*k, lint_ids.clone(), *from_plugin)
+ })
+ }
+
+ pub fn register_early_pass(
+ &mut self,
+ pass: impl Fn() -> EarlyLintPassObject + 'static + sync::Send + sync::Sync,
+ ) {
+ self.early_passes.push(Box::new(pass));
+ }
+
+ /// This lint pass is softly deprecated. It misses expanded code and has caused a few
+ /// errors in the past. Currently, it is only used in Clippy. New implementations
+ /// should avoid using this interface, as it might be removed in the future.
+ ///
+ /// * See [rust#69838](https://github.com/rust-lang/rust/pull/69838)
+ /// * See [rust-clippy#5518](https://github.com/rust-lang/rust-clippy/pull/5518)
+ pub fn register_pre_expansion_pass(
+ &mut self,
+ pass: impl Fn() -> EarlyLintPassObject + 'static + sync::Send + sync::Sync,
+ ) {
+ self.pre_expansion_passes.push(Box::new(pass));
+ }
+
+ pub fn register_late_pass(
+ &mut self,
+ pass: impl Fn() -> LateLintPassObject + 'static + sync::Send + sync::Sync,
+ ) {
+ self.late_passes.push(Box::new(pass));
+ }
+
+ pub fn register_late_mod_pass(
+ &mut self,
+ pass: impl Fn() -> LateLintPassObject + 'static + sync::Send + sync::Sync,
+ ) {
+ self.late_module_passes.push(Box::new(pass));
+ }
+
+ // Helper method for register_early/late_pass
+ pub fn register_lints(&mut self, lints: &[&'static Lint]) {
+ for lint in lints {
+ self.lints.push(lint);
+
+ let id = LintId::of(lint);
+ if self.by_name.insert(lint.name_lower(), Id(id)).is_some() {
+ bug!("duplicate specification of lint {}", lint.name_lower())
+ }
+
+ if let Some(FutureIncompatibleInfo { reason, .. }) = lint.future_incompatible {
+ if let Some(edition) = reason.edition() {
+ self.lint_groups
+ .entry(edition.lint_name())
+ .or_insert(LintGroup {
+ lint_ids: vec![],
+ from_plugin: lint.is_plugin,
+ depr: None,
+ })
+ .lint_ids
+ .push(id);
+ } else {
+ // Lints belonging to the `future_incompatible` lint group are lints where a
+ // future version of rustc will cause existing code to stop compiling.
+ // Lints tied to an edition don't count because they are opt-in.
+ self.lint_groups
+ .entry("future_incompatible")
+ .or_insert(LintGroup {
+ lint_ids: vec![],
+ from_plugin: lint.is_plugin,
+ depr: None,
+ })
+ .lint_ids
+ .push(id);
+ }
+ }
+ }
+ }
+
+ pub fn register_group_alias(&mut self, lint_name: &'static str, alias: &'static str) {
+ self.lint_groups.insert(
+ alias,
+ LintGroup {
+ lint_ids: vec![],
+ from_plugin: false,
+ depr: Some(LintAlias { name: lint_name, silent: true }),
+ },
+ );
+ }
+
+ pub fn register_group(
+ &mut self,
+ from_plugin: bool,
+ name: &'static str,
+ deprecated_name: Option<&'static str>,
+ to: Vec<LintId>,
+ ) {
+ let new = self
+ .lint_groups
+ .insert(name, LintGroup { lint_ids: to, from_plugin, depr: None })
+ .is_none();
+ if let Some(deprecated) = deprecated_name {
+ self.lint_groups.insert(
+ deprecated,
+ LintGroup {
+ lint_ids: vec![],
+ from_plugin,
+ depr: Some(LintAlias { name, silent: false }),
+ },
+ );
+ }
+
+ if !new {
+ bug!("duplicate specification of lint group {}", name);
+ }
+ }
+
+ /// This lint should give no warning and have no effect.
+ ///
+ /// This is used by rustc to avoid warning about old rustdoc lints before rustdoc registers them as tool lints.
+ #[track_caller]
+ pub fn register_ignored(&mut self, name: &str) {
+ if self.by_name.insert(name.to_string(), Ignored).is_some() {
+ bug!("duplicate specification of lint {}", name);
+ }
+ }
+
+ /// This lint has been renamed; warn about using the new name and apply the lint.
+ #[track_caller]
+ pub fn register_renamed(&mut self, old_name: &str, new_name: &str) {
+ let Some(&Id(target)) = self.by_name.get(new_name) else {
+ bug!("invalid lint renaming of {} to {}", old_name, new_name);
+ };
+ self.by_name.insert(old_name.to_string(), Renamed(new_name.to_string(), target));
+ }
+
+ pub fn register_removed(&mut self, name: &str, reason: &str) {
+ self.by_name.insert(name.into(), Removed(reason.into()));
+ }
+
+ pub fn find_lints(&self, mut lint_name: &str) -> Result<Vec<LintId>, FindLintError> {
+ match self.by_name.get(lint_name) {
+ Some(&Id(lint_id)) => Ok(vec![lint_id]),
+ Some(&Renamed(_, lint_id)) => Ok(vec![lint_id]),
+ Some(&Removed(_)) => Err(FindLintError::Removed),
+ Some(&Ignored) => Ok(vec![]),
+ None => loop {
+ return match self.lint_groups.get(lint_name) {
+ Some(LintGroup { lint_ids, depr, .. }) => {
+ if let Some(LintAlias { name, .. }) = depr {
+ lint_name = name;
+ continue;
+ }
+ Ok(lint_ids.clone())
+ }
+ None => Err(FindLintError::Removed),
+ };
+ },
+ }
+ }
+
+ /// Checks the validity of lint names derived from the command line.
+ pub fn check_lint_name_cmdline(
+ &self,
+ sess: &Session,
+ lint_name: &str,
+ level: Level,
+ registered_tools: &RegisteredTools,
+ ) {
+ let (tool_name, lint_name_only) = parse_lint_and_tool_name(lint_name);
+ if lint_name_only == crate::WARNINGS.name_lower() && matches!(level, Level::ForceWarn(_)) {
+ struct_span_err!(
+ sess,
+ DUMMY_SP,
+ E0602,
+ "`{}` lint group is not supported with ´--force-warn´",
+ crate::WARNINGS.name_lower()
+ )
+ .emit();
+ return;
+ }
+ let db = match self.check_lint_name(lint_name_only, tool_name, registered_tools) {
+ CheckLintNameResult::Ok(_) => None,
+ CheckLintNameResult::Warning(ref msg, _) => Some(sess.struct_warn(msg)),
+ CheckLintNameResult::NoLint(suggestion) => {
+ let mut err =
+ struct_span_err!(sess, DUMMY_SP, E0602, "unknown lint: `{}`", lint_name);
+
+ if let Some(suggestion) = suggestion {
+ err.help(&format!("did you mean: `{}`", suggestion));
+ }
+
+ Some(err.forget_guarantee())
+ }
+ CheckLintNameResult::Tool(result) => match result {
+ Err((Some(_), new_name)) => Some(sess.struct_warn(&format!(
+ "lint name `{}` is deprecated \
+ and does not have an effect anymore. \
+ Use: {}",
+ lint_name, new_name
+ ))),
+ _ => None,
+ },
+ CheckLintNameResult::NoTool => Some(
+ struct_span_err!(
+ sess,
+ DUMMY_SP,
+ E0602,
+ "unknown lint tool: `{}`",
+ tool_name.unwrap()
+ )
+ .forget_guarantee(),
+ ),
+ };
+
+ if let Some(mut db) = db {
+ let msg = format!(
+ "requested on the command line with `{} {}`",
+ match level {
+ Level::Allow => "-A",
+ Level::Warn => "-W",
+ Level::ForceWarn(_) => "--force-warn",
+ Level::Deny => "-D",
+ Level::Forbid => "-F",
+ Level::Expect(_) => {
+ unreachable!("lints with the level of `expect` should not run this code");
+ }
+ },
+ lint_name
+ );
+ db.note(&msg);
+ db.emit();
+ }
+ }
+
+ /// True if this symbol represents a lint group name.
+ pub fn is_lint_group(&self, lint_name: Symbol) -> bool {
+ debug!(
+ "is_lint_group(lint_name={:?}, lint_groups={:?})",
+ lint_name,
+ self.lint_groups.keys().collect::<Vec<_>>()
+ );
+ let lint_name_str = lint_name.as_str();
+ self.lint_groups.contains_key(lint_name_str) || {
+ let warnings_name_str = crate::WARNINGS.name_lower();
+ lint_name_str == warnings_name_str
+ }
+ }
+
+ /// Checks the name of a lint for its existence, and whether it was
+ /// renamed or removed. Generates a DiagnosticBuilder containing a
+ /// warning for renamed and removed lints. This is over both lint
+ /// names from attributes and those passed on the command line. Since
+ /// it emits non-fatal warnings and there are *two* lint passes that
+ /// inspect attributes, this is only run from the late pass to avoid
+ /// printing duplicate warnings.
+ pub fn check_lint_name(
+ &self,
+ lint_name: &str,
+ tool_name: Option<Symbol>,
+ registered_tools: &RegisteredTools,
+ ) -> CheckLintNameResult<'_> {
+ if let Some(tool_name) = tool_name {
+ // FIXME: rustc and rustdoc are considered tools for lints, but not for attributes.
+ if tool_name != sym::rustc
+ && tool_name != sym::rustdoc
+ && !registered_tools.contains(&Ident::with_dummy_span(tool_name))
+ {
+ return CheckLintNameResult::NoTool;
+ }
+ }
+
+ let complete_name = if let Some(tool_name) = tool_name {
+ format!("{}::{}", tool_name, lint_name)
+ } else {
+ lint_name.to_string()
+ };
+ // If the lint was scoped with `tool::` check if the tool lint exists
+ if let Some(tool_name) = tool_name {
+ match self.by_name.get(&complete_name) {
+ None => match self.lint_groups.get(&*complete_name) {
+ // If the lint isn't registered, there are two possibilities:
+ None => {
+ // 1. The tool is currently running, so this lint really doesn't exist.
+ // FIXME: should this handle tools that never register a lint, like rustfmt?
+ tracing::debug!("lints={:?}", self.by_name.keys().collect::<Vec<_>>());
+ let tool_prefix = format!("{}::", tool_name);
+ return if self.by_name.keys().any(|lint| lint.starts_with(&tool_prefix)) {
+ self.no_lint_suggestion(&complete_name)
+ } else {
+ // 2. The tool isn't currently running, so no lints will be registered.
+ // To avoid giving a false positive, ignore all unknown lints.
+ CheckLintNameResult::Tool(Err((None, String::new())))
+ };
+ }
+ Some(LintGroup { lint_ids, .. }) => {
+ return CheckLintNameResult::Tool(Ok(&lint_ids));
+ }
+ },
+ Some(&Id(ref id)) => return CheckLintNameResult::Tool(Ok(slice::from_ref(id))),
+ // If the lint was registered as removed or renamed by the lint tool, we don't need
+ // to treat tool_lints and rustc lints different and can use the code below.
+ _ => {}
+ }
+ }
+ match self.by_name.get(&complete_name) {
+ Some(&Renamed(ref new_name, _)) => CheckLintNameResult::Warning(
+ format!("lint `{}` has been renamed to `{}`", complete_name, new_name),
+ Some(new_name.to_owned()),
+ ),
+ Some(&Removed(ref reason)) => CheckLintNameResult::Warning(
+ format!("lint `{}` has been removed: {}", complete_name, reason),
+ None,
+ ),
+ None => match self.lint_groups.get(&*complete_name) {
+ // If neither the lint, nor the lint group exists check if there is a `clippy::`
+ // variant of this lint
+ None => self.check_tool_name_for_backwards_compat(&complete_name, "clippy"),
+ Some(LintGroup { lint_ids, depr, .. }) => {
+ // Check if the lint group name is deprecated
+ if let Some(LintAlias { name, silent }) = depr {
+ let LintGroup { lint_ids, .. } = self.lint_groups.get(name).unwrap();
+ return if *silent {
+ CheckLintNameResult::Ok(&lint_ids)
+ } else {
+ CheckLintNameResult::Tool(Err((Some(&lint_ids), (*name).to_string())))
+ };
+ }
+ CheckLintNameResult::Ok(&lint_ids)
+ }
+ },
+ Some(&Id(ref id)) => CheckLintNameResult::Ok(slice::from_ref(id)),
+ Some(&Ignored) => CheckLintNameResult::Ok(&[]),
+ }
+ }
+
+ fn no_lint_suggestion(&self, lint_name: &str) -> CheckLintNameResult<'_> {
+ let name_lower = lint_name.to_lowercase();
+
+ if lint_name.chars().any(char::is_uppercase) && self.find_lints(&name_lower).is_ok() {
+ // First check if the lint name is (partly) in upper case instead of lower case...
+ return CheckLintNameResult::NoLint(Some(Symbol::intern(&name_lower)));
+ }
+ // ...if not, search for lints with a similar name
+ let groups = self.lint_groups.keys().copied().map(Symbol::intern);
+ let lints = self.lints.iter().map(|l| Symbol::intern(&l.name_lower()));
+ let names: Vec<Symbol> = groups.chain(lints).collect();
+ let suggestion = find_best_match_for_name(&names, Symbol::intern(&name_lower), None);
+ CheckLintNameResult::NoLint(suggestion)
+ }
+
+ fn check_tool_name_for_backwards_compat(
+ &self,
+ lint_name: &str,
+ tool_name: &str,
+ ) -> CheckLintNameResult<'_> {
+ let complete_name = format!("{}::{}", tool_name, lint_name);
+ match self.by_name.get(&complete_name) {
+ None => match self.lint_groups.get(&*complete_name) {
+ // Now we are sure, that this lint exists nowhere
+ None => self.no_lint_suggestion(lint_name),
+ Some(LintGroup { lint_ids, depr, .. }) => {
+ // Reaching this would be weird, but let's cover this case anyway
+ if let Some(LintAlias { name, silent }) = depr {
+ let LintGroup { lint_ids, .. } = self.lint_groups.get(name).unwrap();
+ return if *silent {
+ CheckLintNameResult::Tool(Err((Some(&lint_ids), complete_name)))
+ } else {
+ CheckLintNameResult::Tool(Err((Some(&lint_ids), (*name).to_string())))
+ };
+ }
+ CheckLintNameResult::Tool(Err((Some(&lint_ids), complete_name)))
+ }
+ },
+ Some(&Id(ref id)) => {
+ CheckLintNameResult::Tool(Err((Some(slice::from_ref(id)), complete_name)))
+ }
+ Some(other) => {
+ tracing::debug!("got renamed lint {:?}", other);
+ CheckLintNameResult::NoLint(None)
+ }
+ }
+ }
+}
+
+/// Context for lint checking outside of type inference.
+pub struct LateContext<'tcx> {
+ /// Type context we're checking in.
+ pub tcx: TyCtxt<'tcx>,
+
+ /// Current body, or `None` if outside a body.
+ pub enclosing_body: Option<hir::BodyId>,
+
+ /// Type-checking results for the current body. Access using the `typeck_results`
+ /// and `maybe_typeck_results` methods, which handle querying the typeck results on demand.
+ // FIXME(eddyb) move all the code accessing internal fields like this,
+ // to this module, to avoid exposing it to lint logic.
+ pub(super) cached_typeck_results: Cell<Option<&'tcx ty::TypeckResults<'tcx>>>,
+
+ /// Parameter environment for the item we are in.
+ pub param_env: ty::ParamEnv<'tcx>,
+
+ /// Items accessible from the crate being checked.
+ pub access_levels: &'tcx AccessLevels,
+
+ /// The store of registered lints and the lint levels.
+ pub lint_store: &'tcx LintStore,
+
+ pub last_node_with_lint_attrs: hir::HirId,
+
+ /// Generic type parameters in scope for the item we are in.
+ pub generics: Option<&'tcx hir::Generics<'tcx>>,
+
+ /// We are only looking at one module
+ pub only_module: bool,
+}
+
+/// Context for lint checking of the AST, after expansion, before lowering to HIR.
+pub struct EarlyContext<'a> {
+ pub builder: LintLevelsBuilder<'a>,
+ pub buffered: LintBuffer,
+}
+
+pub trait LintPassObject: Sized {}
+
+impl LintPassObject for EarlyLintPassObject {}
+
+impl LintPassObject for LateLintPassObject {}
+
+pub trait LintContext: Sized {
+ type PassObject: LintPassObject;
+
+ fn sess(&self) -> &Session;
+ fn lints(&self) -> &LintStore;
+
+ fn lookup_with_diagnostics(
+ &self,
+ lint: &'static Lint,
+ span: Option<impl Into<MultiSpan>>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ diagnostic: BuiltinLintDiagnostics,
+ ) {
+ self.lookup(lint, span, |lint| {
+ // We first generate a blank diagnostic.
+ let mut db = lint.build("");
+
+ // Now, set up surrounding context.
+ let sess = self.sess();
+ match diagnostic {
+ BuiltinLintDiagnostics::UnicodeTextFlow(span, content) => {
+ let spans: Vec<_> = content
+ .char_indices()
+ .filter_map(|(i, c)| {
+ TEXT_FLOW_CONTROL_CHARS.contains(&c).then(|| {
+ let lo = span.lo() + BytePos(2 + i as u32);
+ (c, span.with_lo(lo).with_hi(lo + BytePos(c.len_utf8() as u32)))
+ })
+ })
+ .collect();
+ let (an, s) = match spans.len() {
+ 1 => ("an ", ""),
+ _ => ("", "s"),
+ };
+ db.span_label(span, &format!(
+ "this comment contains {}invisible unicode text flow control codepoint{}",
+ an,
+ s,
+ ));
+ for (c, span) in &spans {
+ db.span_label(*span, format!("{:?}", c));
+ }
+ db.note(
+ "these kind of unicode codepoints change the way text flows on \
+ applications that support them, but can cause confusion because they \
+ change the order of characters on the screen",
+ );
+ if !spans.is_empty() {
+ db.multipart_suggestion_with_style(
+ "if their presence wasn't intentional, you can remove them",
+ spans.into_iter().map(|(_, span)| (span, "".to_string())).collect(),
+ Applicability::MachineApplicable,
+ SuggestionStyle::HideCodeAlways,
+ );
+ }
+ },
+ BuiltinLintDiagnostics::Normal => (),
+ BuiltinLintDiagnostics::AbsPathWithModule(span) => {
+ let (sugg, app) = match sess.source_map().span_to_snippet(span) {
+ Ok(ref s) => {
+ // FIXME(Manishearth) ideally the emitting code
+ // can tell us whether or not this is global
+ let opt_colon =
+ if s.trim_start().starts_with("::") { "" } else { "::" };
+
+ (format!("crate{}{}", opt_colon, s), Applicability::MachineApplicable)
+ }
+ Err(_) => ("crate::<path>".to_string(), Applicability::HasPlaceholders),
+ };
+ db.span_suggestion(span, "use `crate`", sugg, app);
+ }
+ BuiltinLintDiagnostics::ProcMacroDeriveResolutionFallback(span) => {
+ db.span_label(
+ span,
+ "names from parent modules are not accessible without an explicit import",
+ );
+ }
+ BuiltinLintDiagnostics::MacroExpandedMacroExportsAccessedByAbsolutePaths(
+ span_def,
+ ) => {
+ db.span_note(span_def, "the macro is defined here");
+ }
+ BuiltinLintDiagnostics::ElidedLifetimesInPaths(
+ n,
+ path_span,
+ incl_angl_brckt,
+ insertion_span,
+ ) => {
+ add_elided_lifetime_in_path_suggestion(
+ sess.source_map(),
+ &mut db,
+ n,
+ path_span,
+ incl_angl_brckt,
+ insertion_span,
+ );
+ }
+ BuiltinLintDiagnostics::UnknownCrateTypes(span, note, sugg) => {
+ db.span_suggestion(span, &note, sugg, Applicability::MaybeIncorrect);
+ }
+ BuiltinLintDiagnostics::UnusedImports(message, replaces, in_test_module) => {
+ if !replaces.is_empty() {
+ db.tool_only_multipart_suggestion(
+ &message,
+ replaces,
+ Applicability::MachineApplicable,
+ );
+ }
+
+ if let Some(span) = in_test_module {
+ db.span_help(
+ self.sess().source_map().guess_head_span(span),
+ "consider adding a `#[cfg(test)]` to the containing module",
+ );
+ }
+ }
+ BuiltinLintDiagnostics::RedundantImport(spans, ident) => {
+ for (span, is_imported) in spans {
+ let introduced = if is_imported { "imported" } else { "defined" };
+ db.span_label(
+ span,
+ format!("the item `{}` is already {} here", ident, introduced),
+ );
+ }
+ }
+ BuiltinLintDiagnostics::DeprecatedMacro(suggestion, span) => {
+ stability::deprecation_suggestion(&mut db, "macro", suggestion, span)
+ }
+ BuiltinLintDiagnostics::UnusedDocComment(span) => {
+ db.span_label(span, "rustdoc does not generate documentation for macro invocations");
+ db.help("to document an item produced by a macro, \
+ the macro must produce the documentation as part of its expansion");
+ }
+ BuiltinLintDiagnostics::PatternsInFnsWithoutBody(span, ident) => {
+ db.span_suggestion(span, "remove `mut` from the parameter", ident, Applicability::MachineApplicable);
+ }
+ BuiltinLintDiagnostics::MissingAbi(span, default_abi) => {
+ db.span_label(span, "ABI should be specified here");
+ db.help(&format!("the default ABI is {}", default_abi.name()));
+ }
+ BuiltinLintDiagnostics::LegacyDeriveHelpers(span) => {
+ db.span_label(span, "the attribute is introduced here");
+ }
+ BuiltinLintDiagnostics::ProcMacroBackCompat(note) => {
+ db.note(&note);
+ }
+ BuiltinLintDiagnostics::OrPatternsBackCompat(span,suggestion) => {
+ db.span_suggestion(span, "use pat_param to preserve semantics", suggestion, Applicability::MachineApplicable);
+ }
+ BuiltinLintDiagnostics::ReservedPrefix(span) => {
+ db.span_label(span, "unknown prefix");
+ db.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "insert whitespace here to avoid this being parsed as a prefix in Rust 2021",
+ " ",
+ Applicability::MachineApplicable,
+ );
+ }
+ BuiltinLintDiagnostics::UnusedBuiltinAttribute {
+ attr_name,
+ macro_name,
+ invoc_span
+ } => {
+ db.span_note(
+ invoc_span,
+ &format!("the built-in attribute `{attr_name}` will be ignored, since it's applied to the macro invocation `{macro_name}`")
+ );
+ }
+ BuiltinLintDiagnostics::TrailingMacro(is_trailing, name) => {
+ if is_trailing {
+ db.note("macro invocations at the end of a block are treated as expressions");
+ db.note(&format!("to ignore the value produced by the macro, add a semicolon after the invocation of `{name}`"));
+ }
+ }
+ BuiltinLintDiagnostics::BreakWithLabelAndLoop(span) => {
+ db.multipart_suggestion(
+ "wrap this expression in parentheses",
+ vec![(span.shrink_to_lo(), "(".to_string()),
+ (span.shrink_to_hi(), ")".to_string())],
+ Applicability::MachineApplicable
+ );
+ }
+ BuiltinLintDiagnostics::NamedAsmLabel(help) => {
+ db.help(&help);
+ db.note("see the asm section of Rust By Example <https://doc.rust-lang.org/nightly/rust-by-example/unsafe/asm.html#labels> for more information");
+ },
+ BuiltinLintDiagnostics::UnexpectedCfg((name, name_span), None) => {
+ let Some(names_valid) = &sess.parse_sess.check_config.names_valid else {
+ bug!("it shouldn't be possible to have a diagnostic on a name if name checking is not enabled");
+ };
+ let possibilities: Vec<Symbol> = names_valid.iter().map(|s| *s).collect();
+
+ // Suggest the most probable if we found one
+ if let Some(best_match) = find_best_match_for_name(&possibilities, name, None) {
+ db.span_suggestion(name_span, "did you mean", best_match, Applicability::MaybeIncorrect);
+ }
+ },
+ BuiltinLintDiagnostics::UnexpectedCfg((name, name_span), Some((value, value_span))) => {
+ let Some(values) = &sess.parse_sess.check_config.values_valid.get(&name) else {
+ bug!("it shouldn't be possible to have a diagnostic on a value whose name is not in values");
+ };
+ let possibilities: Vec<Symbol> = values.iter().map(|&s| s).collect();
+
+ // Show the full list if all possible values for a given name, but don't do it
+ // for names as the possibilities could be very long
+ if !possibilities.is_empty() {
+ {
+ let mut possibilities = possibilities.iter().map(Symbol::as_str).collect::<Vec<_>>();
+ possibilities.sort();
+
+ let possibilities = possibilities.join(", ");
+ db.note(&format!("expected values for `{name}` are: {possibilities}"));
+ }
+
+ // Suggest the most probable if we found one
+ if let Some(best_match) = find_best_match_for_name(&possibilities, value, None) {
+ db.span_suggestion(value_span, "did you mean", format!("\"{best_match}\""), Applicability::MaybeIncorrect);
+ }
+ } else {
+ db.note(&format!("no expected value for `{name}`"));
+ if name != sym::feature {
+ db.span_suggestion(name_span.shrink_to_hi().to(value_span), "remove the value", "", Applicability::MaybeIncorrect);
+ }
+ }
+ },
+ BuiltinLintDiagnostics::DeprecatedWhereclauseLocation(new_span, suggestion) => {
+ db.multipart_suggestion(
+ "move it to the end of the type declaration",
+ vec![(db.span.primary_span().unwrap(), "".to_string()), (new_span, suggestion)],
+ Applicability::MachineApplicable,
+ );
+ db.note(
+ "see issue #89122 <https://github.com/rust-lang/rust/issues/89122> for more information",
+ );
+ },
+ BuiltinLintDiagnostics::SingleUseLifetime {
+ param_span,
+ use_span: Some((use_span, elide)),
+ deletion_span,
+ } => {
+ debug!(?param_span, ?use_span, ?deletion_span);
+ db.span_label(param_span, "this lifetime...");
+ db.span_label(use_span, "...is used only here");
+ let msg = "elide the single-use lifetime";
+ let (use_span, replace_lt) = if elide {
+ let use_span = sess.source_map().span_extend_while(
+ use_span,
+ char::is_whitespace,
+ ).unwrap_or(use_span);
+ (use_span, String::new())
+ } else {
+ (use_span, "'_".to_owned())
+ };
+ db.multipart_suggestion(
+ msg,
+ vec![(deletion_span, String::new()), (use_span, replace_lt)],
+ Applicability::MachineApplicable,
+ );
+ },
+ BuiltinLintDiagnostics::SingleUseLifetime {
+ param_span: _,
+ use_span: None,
+ deletion_span,
+ } => {
+ debug!(?deletion_span);
+ db.span_suggestion(
+ deletion_span,
+ "elide the unused lifetime",
+ "",
+ Applicability::MachineApplicable,
+ );
+ },
+ BuiltinLintDiagnostics::NamedArgumentUsedPositionally{ position_sp_to_replace, position_sp_for_msg, named_arg_sp, named_arg_name, is_formatting_arg} => {
+ db.span_label(named_arg_sp, "this named argument is referred to by position in formatting string");
+ if let Some(positional_arg_for_msg) = position_sp_for_msg {
+ let msg = format!("this formatting argument uses named argument `{}` by position", named_arg_name);
+ db.span_label(positional_arg_for_msg, msg);
+ }
+
+ if let Some(positional_arg_to_replace) = position_sp_to_replace {
+ let name = if is_formatting_arg { named_arg_name + "$" } else { named_arg_name };
+
+ db.span_suggestion_verbose(
+ positional_arg_to_replace,
+ "use the named argument by name to avoid ambiguity",
+ name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ // Rewrap `db`, and pass control to the user.
+ decorate(LintDiagnosticBuilder::new(db));
+ });
+ }
+
+ // FIXME: These methods should not take an Into<MultiSpan> -- instead, callers should need to
+ // set the span in their `decorate` function (preferably using set_span).
+ fn lookup<S: Into<MultiSpan>>(
+ &self,
+ lint: &'static Lint,
+ span: Option<S>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ );
+
+ /// Emit a lint at `span` from a lint struct (some type that implements `DecorateLint`,
+ /// typically generated by `#[derive(LintDiagnostic)]`).
+ fn emit_spanned_lint<S: Into<MultiSpan>>(
+ &self,
+ lint: &'static Lint,
+ span: S,
+ decorator: impl for<'a> DecorateLint<'a, ()>,
+ ) {
+ self.lookup(lint, Some(span), |diag| decorator.decorate_lint(diag));
+ }
+
+ fn struct_span_lint<S: Into<MultiSpan>>(
+ &self,
+ lint: &'static Lint,
+ span: S,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ self.lookup(lint, Some(span), decorate);
+ }
+
+ /// Emit a lint from a lint struct (some type that implements `DecorateLint`, typically
+ /// generated by `#[derive(LintDiagnostic)]`).
+ fn emit_lint(&self, lint: &'static Lint, decorator: impl for<'a> DecorateLint<'a, ()>) {
+ self.lookup(lint, None as Option<Span>, |diag| decorator.decorate_lint(diag));
+ }
+
+ /// Emit a lint at the appropriate level, with no associated span.
+ fn lint(
+ &self,
+ lint: &'static Lint,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ self.lookup(lint, None as Option<Span>, decorate);
+ }
+
+ /// This returns the lint level for the given lint at the current location.
+ fn get_lint_level(&self, lint: &'static Lint) -> Level;
+
+ /// This function can be used to manually fulfill an expectation. This can
+ /// be used for lints which contain several spans, and should be suppressed,
+ /// if either location was marked with an expectation.
+ ///
+ /// Note that this function should only be called for [`LintExpectationId`]s
+ /// retrieved from the current lint pass. Buffered or manually created ids can
+ /// cause ICEs.
+ fn fulfill_expectation(&self, expectation: LintExpectationId) {
+ // We need to make sure that submitted expectation ids are correctly fulfilled suppressed
+ // and stored between compilation sessions. To not manually do these steps, we simply create
+ // a dummy diagnostic and emit is as usual, which will be suppressed and stored like a normal
+ // expected lint diagnostic.
+ self.sess()
+ .struct_expect(
+ "this is a dummy diagnostic, to submit and store an expectation",
+ expectation,
+ )
+ .emit();
+ }
+}
+
+impl<'a> EarlyContext<'a> {
+ pub(crate) fn new(
+ sess: &'a Session,
+ warn_about_weird_lints: bool,
+ lint_store: &'a LintStore,
+ registered_tools: &'a RegisteredTools,
+ buffered: LintBuffer,
+ ) -> EarlyContext<'a> {
+ EarlyContext {
+ builder: LintLevelsBuilder::new(
+ sess,
+ warn_about_weird_lints,
+ lint_store,
+ registered_tools,
+ ),
+ buffered,
+ }
+ }
+}
+
+impl LintContext for LateContext<'_> {
+ type PassObject = LateLintPassObject;
+
+ /// Gets the overall compiler `Session` object.
+ fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ fn lints(&self) -> &LintStore {
+ &*self.lint_store
+ }
+
+ fn lookup<S: Into<MultiSpan>>(
+ &self,
+ lint: &'static Lint,
+ span: Option<S>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ let hir_id = self.last_node_with_lint_attrs;
+
+ match span {
+ Some(s) => self.tcx.struct_span_lint_hir(lint, hir_id, s, decorate),
+ None => self.tcx.struct_lint_node(lint, hir_id, decorate),
+ }
+ }
+
+ fn get_lint_level(&self, lint: &'static Lint) -> Level {
+ self.tcx.lint_level_at_node(lint, self.last_node_with_lint_attrs).0
+ }
+}
+
+impl LintContext for EarlyContext<'_> {
+ type PassObject = EarlyLintPassObject;
+
+ /// Gets the overall compiler `Session` object.
+ fn sess(&self) -> &Session {
+ &self.builder.sess()
+ }
+
+ fn lints(&self) -> &LintStore {
+ self.builder.lint_store()
+ }
+
+ fn lookup<S: Into<MultiSpan>>(
+ &self,
+ lint: &'static Lint,
+ span: Option<S>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ self.builder.struct_lint(lint, span.map(|s| s.into()), decorate)
+ }
+
+ fn get_lint_level(&self, lint: &'static Lint) -> Level {
+ self.builder.lint_level(lint).0
+ }
+}
+
+impl<'tcx> LateContext<'tcx> {
+ /// Gets the type-checking results for the current body,
+ /// or `None` if outside a body.
+ pub fn maybe_typeck_results(&self) -> Option<&'tcx ty::TypeckResults<'tcx>> {
+ self.cached_typeck_results.get().or_else(|| {
+ self.enclosing_body.map(|body| {
+ let typeck_results = self.tcx.typeck_body(body);
+ self.cached_typeck_results.set(Some(typeck_results));
+ typeck_results
+ })
+ })
+ }
+
+ /// Gets the type-checking results for the current body.
+ /// As this will ICE if called outside bodies, only call when working with
+ /// `Expr` or `Pat` nodes (they are guaranteed to be found only in bodies).
+ #[track_caller]
+ pub fn typeck_results(&self) -> &'tcx ty::TypeckResults<'tcx> {
+ self.maybe_typeck_results().expect("`LateContext::typeck_results` called outside of body")
+ }
+
+ /// Returns the final resolution of a `QPath`, or `Res::Err` if unavailable.
+ /// Unlike `.typeck_results().qpath_res(qpath, id)`, this can be used even outside
+ /// bodies (e.g. for paths in `hir::Ty`), without any risk of ICE-ing.
+ pub fn qpath_res(&self, qpath: &hir::QPath<'_>, id: hir::HirId) -> Res {
+ match *qpath {
+ hir::QPath::Resolved(_, ref path) => path.res,
+ hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => self
+ .maybe_typeck_results()
+ .filter(|typeck_results| typeck_results.hir_owner == id.owner)
+ .or_else(|| {
+ if self.tcx.has_typeck_results(id.owner.to_def_id()) {
+ Some(self.tcx.typeck(id.owner))
+ } else {
+ None
+ }
+ })
+ .and_then(|typeck_results| typeck_results.type_dependent_def(id))
+ .map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)),
+ }
+ }
+
+ /// Check if a `DefId`'s path matches the given absolute type path usage.
+ ///
+ /// Anonymous scopes such as `extern` imports are matched with `kw::Empty`;
+ /// inherent `impl` blocks are matched with the name of the type.
+ ///
+ /// Instead of using this method, it is often preferable to instead use
+ /// `rustc_diagnostic_item` or a `lang_item`. This is less prone to errors
+ /// as paths get invalidated if the target definition moves.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,ignore (no context or def id available)
+ /// if cx.match_def_path(def_id, &[sym::core, sym::option, sym::Option]) {
+ /// // The given `def_id` is that of an `Option` type
+ /// }
+ /// ```
+ ///
+ /// Used by clippy, but should be replaced by diagnostic items eventually.
+ pub fn match_def_path(&self, def_id: DefId, path: &[Symbol]) -> bool {
+ let names = self.get_def_path(def_id);
+
+ names.len() == path.len() && iter::zip(names, path).all(|(a, &b)| a == b)
+ }
+
+ /// Gets the absolute path of `def_id` as a vector of `Symbol`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust,ignore (no context or def id available)
+ /// let def_path = cx.get_def_path(def_id);
+ /// if let &[sym::core, sym::option, sym::Option] = &def_path[..] {
+ /// // The given `def_id` is that of an `Option` type
+ /// }
+ /// ```
+ pub fn get_def_path(&self, def_id: DefId) -> Vec<Symbol> {
+ pub struct AbsolutePathPrinter<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ }
+
+ impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
+ type Error = !;
+
+ type Path = Vec<Symbol>;
+ type Region = ();
+ type Type = ();
+ type DynExistential = ();
+ type Const = ();
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+ Ok(())
+ }
+
+ fn print_type(self, _ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ Ok(())
+ }
+
+ fn print_dyn_existential(
+ self,
+ _predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ Ok(())
+ }
+
+ fn print_const(self, _ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ Ok(())
+ }
+
+ fn path_crate(self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ Ok(vec![self.tcx.crate_name(cnum)])
+ }
+
+ fn path_qualified(
+ self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ if trait_ref.is_none() {
+ if let ty::Adt(def, substs) = self_ty.kind() {
+ return self.print_def_path(def.did(), substs);
+ }
+ }
+
+ // This shouldn't ever be needed, but just in case:
+ with_no_trimmed_paths!({
+ Ok(vec![match trait_ref {
+ Some(trait_ref) => Symbol::intern(&format!("{:?}", trait_ref)),
+ None => Symbol::intern(&format!("<{}>", self_ty)),
+ }])
+ })
+ }
+
+ fn path_append_impl(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _disambiguated_data: &DisambiguatedDefPathData,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ let mut path = print_prefix(self)?;
+
+ // This shouldn't ever be needed, but just in case:
+ path.push(match trait_ref {
+ Some(trait_ref) => {
+ with_no_trimmed_paths!(Symbol::intern(&format!(
+ "<impl {} for {}>",
+ trait_ref.print_only_trait_path(),
+ self_ty
+ )))
+ }
+ None => {
+ with_no_trimmed_paths!(Symbol::intern(&format!("<impl {}>", self_ty)))
+ }
+ });
+
+ Ok(path)
+ }
+
+ fn path_append(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error> {
+ let mut path = print_prefix(self)?;
+
+ // Skip `::{{extern}}` blocks and `::{{constructor}}` on tuple/unit structs.
+ if let DefPathData::ForeignMod | DefPathData::Ctor = disambiguated_data.data {
+ return Ok(path);
+ }
+
+ path.push(Symbol::intern(&disambiguated_data.data.to_string()));
+ Ok(path)
+ }
+
+ fn path_generic_args(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ print_prefix(self)
+ }
+ }
+
+ AbsolutePathPrinter { tcx: self.tcx }.print_def_path(def_id, &[]).unwrap()
+ }
+}
+
+impl<'tcx> abi::HasDataLayout for LateContext<'tcx> {
+ #[inline]
+ fn data_layout(&self) -> &abi::TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for LateContext<'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx> ty::layout::HasParamEnv<'tcx> for LateContext<'tcx> {
+ #[inline]
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for LateContext<'tcx> {
+ type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
+ err
+ }
+}
+
+pub fn parse_lint_and_tool_name(lint_name: &str) -> (Option<Symbol>, &str) {
+ match lint_name.split_once("::") {
+ Some((tool_name, lint_name)) => {
+ let tool_name = Symbol::intern(tool_name);
+
+ (Some(tool_name), lint_name)
+ }
+ None => (None, lint_name),
+ }
+}
diff --git a/compiler/rustc_lint/src/early.rs b/compiler/rustc_lint/src/early.rs
new file mode 100644
index 000000000..d13711c3a
--- /dev/null
+++ b/compiler/rustc_lint/src/early.rs
@@ -0,0 +1,456 @@
+//! Implementation of lint checking.
+//!
+//! The lint checking is mostly consolidated into one pass which runs
+//! after all other analyses. Throughout compilation, lint warnings
+//! can be added via the `add_lint` method on the Session structure. This
+//! requires a span and an ID of the node that the lint is being added to. The
+//! lint isn't actually emitted at that time because it is unknown what the
+//! actual lint level at that location is.
+//!
+//! To actually emit lint warnings/errors, a separate pass is used.
+//! A context keeps track of the current state of all lint levels.
+//! Upon entering a node of the ast which can modify the lint settings, the
+//! previous lint state is pushed onto a stack and the ast is then recursed
+//! upon. As the ast is traversed, this keeps track of the current lint level
+//! for all lint attributes.
+
+use crate::context::{EarlyContext, LintContext, LintStore};
+use crate::passes::{EarlyLintPass, EarlyLintPassObject};
+use rustc_ast::ptr::P;
+use rustc_ast::visit::{self as ast_visit, Visitor};
+use rustc_ast::{self as ast, walk_list, HasAttrs};
+use rustc_middle::ty::RegisteredTools;
+use rustc_session::lint::{BufferedEarlyLint, LintBuffer, LintPass};
+use rustc_session::Session;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+use std::slice;
+use tracing::debug;
+
+macro_rules! run_early_pass { ($cx:expr, $f:ident, $($args:expr),*) => ({
+ $cx.pass.$f(&$cx.context, $($args),*);
+}) }
+
+pub struct EarlyContextAndPass<'a, T: EarlyLintPass> {
+ context: EarlyContext<'a>,
+ pass: T,
+}
+
+impl<'a, T: EarlyLintPass> EarlyContextAndPass<'a, T> {
+ fn check_id(&mut self, id: ast::NodeId) {
+ for early_lint in self.context.buffered.take(id) {
+ let BufferedEarlyLint { span, msg, node_id: _, lint_id, diagnostic } = early_lint;
+ self.context.lookup_with_diagnostics(
+ lint_id.lint,
+ Some(span),
+ |lint| {
+ lint.build(&msg).emit();
+ },
+ diagnostic,
+ );
+ }
+ }
+
+ /// Merge the lints specified by any lint attributes into the
+ /// current lint context, call the provided function, then reset the
+ /// lints in effect to their previous state.
+ fn with_lint_attrs<F>(&mut self, id: ast::NodeId, attrs: &'a [ast::Attribute], f: F)
+ where
+ F: FnOnce(&mut Self),
+ {
+ let is_crate_node = id == ast::CRATE_NODE_ID;
+ let push = self.context.builder.push(attrs, is_crate_node, None);
+
+ self.check_id(id);
+ debug!("early context: enter_attrs({:?})", attrs);
+ run_early_pass!(self, enter_lint_attrs, attrs);
+ f(self);
+ debug!("early context: exit_attrs({:?})", attrs);
+ run_early_pass!(self, exit_lint_attrs, attrs);
+ self.context.builder.pop(push);
+ }
+}
+
+impl<'a, T: EarlyLintPass> ast_visit::Visitor<'a> for EarlyContextAndPass<'a, T> {
+ fn visit_param(&mut self, param: &'a ast::Param) {
+ self.with_lint_attrs(param.id, &param.attrs, |cx| {
+ run_early_pass!(cx, check_param, param);
+ ast_visit::walk_param(cx, param);
+ });
+ }
+
+ fn visit_item(&mut self, it: &'a ast::Item) {
+ self.with_lint_attrs(it.id, &it.attrs, |cx| {
+ run_early_pass!(cx, check_item, it);
+ ast_visit::walk_item(cx, it);
+ run_early_pass!(cx, check_item_post, it);
+ })
+ }
+
+ fn visit_foreign_item(&mut self, it: &'a ast::ForeignItem) {
+ self.with_lint_attrs(it.id, &it.attrs, |cx| {
+ ast_visit::walk_foreign_item(cx, it);
+ })
+ }
+
+ fn visit_pat(&mut self, p: &'a ast::Pat) {
+ run_early_pass!(self, check_pat, p);
+ self.check_id(p.id);
+ ast_visit::walk_pat(self, p);
+ run_early_pass!(self, check_pat_post, p);
+ }
+
+ fn visit_anon_const(&mut self, c: &'a ast::AnonConst) {
+ self.check_id(c.id);
+ ast_visit::walk_anon_const(self, c);
+ }
+
+ fn visit_expr(&mut self, e: &'a ast::Expr) {
+ self.with_lint_attrs(e.id, &e.attrs, |cx| {
+ run_early_pass!(cx, check_expr, e);
+ ast_visit::walk_expr(cx, e);
+ })
+ }
+
+ fn visit_expr_field(&mut self, f: &'a ast::ExprField) {
+ self.with_lint_attrs(f.id, &f.attrs, |cx| {
+ ast_visit::walk_expr_field(cx, f);
+ })
+ }
+
+ fn visit_stmt(&mut self, s: &'a ast::Stmt) {
+ // Add the statement's lint attributes to our
+ // current state when checking the statement itself.
+ // This allows us to handle attributes like
+ // `#[allow(unused_doc_comments)]`, which apply to
+ // sibling attributes on the same target
+ //
+ // Note that statements get their attributes from
+ // the AST struct that they wrap (e.g. an item)
+ self.with_lint_attrs(s.id, s.attrs(), |cx| {
+ run_early_pass!(cx, check_stmt, s);
+ cx.check_id(s.id);
+ });
+ // The visitor for the AST struct wrapped
+ // by the statement (e.g. `Item`) will call
+ // `with_lint_attrs`, so do this walk
+ // outside of the above `with_lint_attrs` call
+ ast_visit::walk_stmt(self, s);
+ }
+
+ fn visit_fn(&mut self, fk: ast_visit::FnKind<'a>, span: Span, id: ast::NodeId) {
+ run_early_pass!(self, check_fn, fk, span, id);
+ self.check_id(id);
+ ast_visit::walk_fn(self, fk, span);
+
+ // Explicitly check for lints associated with 'closure_id', since
+ // it does not have a corresponding AST node
+ if let ast_visit::FnKind::Fn(_, _, sig, _, _, _) = fk {
+ if let ast::Async::Yes { closure_id, .. } = sig.header.asyncness {
+ self.check_id(closure_id);
+ }
+ }
+ }
+
+ fn visit_variant_data(&mut self, s: &'a ast::VariantData) {
+ if let Some(ctor_hir_id) = s.ctor_id() {
+ self.check_id(ctor_hir_id);
+ }
+ ast_visit::walk_struct_def(self, s);
+ }
+
+ fn visit_field_def(&mut self, s: &'a ast::FieldDef) {
+ self.with_lint_attrs(s.id, &s.attrs, |cx| {
+ ast_visit::walk_field_def(cx, s);
+ })
+ }
+
+ fn visit_variant(&mut self, v: &'a ast::Variant) {
+ self.with_lint_attrs(v.id, &v.attrs, |cx| {
+ run_early_pass!(cx, check_variant, v);
+ ast_visit::walk_variant(cx, v);
+ })
+ }
+
+ fn visit_ty(&mut self, t: &'a ast::Ty) {
+ run_early_pass!(self, check_ty, t);
+ self.check_id(t.id);
+ ast_visit::walk_ty(self, t);
+ }
+
+ fn visit_ident(&mut self, ident: Ident) {
+ run_early_pass!(self, check_ident, ident);
+ }
+
+ fn visit_local(&mut self, l: &'a ast::Local) {
+ self.with_lint_attrs(l.id, &l.attrs, |cx| {
+ run_early_pass!(cx, check_local, l);
+ ast_visit::walk_local(cx, l);
+ })
+ }
+
+ fn visit_block(&mut self, b: &'a ast::Block) {
+ run_early_pass!(self, check_block, b);
+ self.check_id(b.id);
+ ast_visit::walk_block(self, b);
+ }
+
+ fn visit_arm(&mut self, a: &'a ast::Arm) {
+ self.with_lint_attrs(a.id, &a.attrs, |cx| {
+ run_early_pass!(cx, check_arm, a);
+ ast_visit::walk_arm(cx, a);
+ })
+ }
+
+ fn visit_expr_post(&mut self, e: &'a ast::Expr) {
+ // Explicitly check for lints associated with 'closure_id', since
+ // it does not have a corresponding AST node
+ match e.kind {
+ ast::ExprKind::Closure(_, _, ast::Async::Yes { closure_id, .. }, ..)
+ | ast::ExprKind::Async(_, closure_id, ..) => self.check_id(closure_id),
+ _ => {}
+ }
+ }
+
+ fn visit_generic_arg(&mut self, arg: &'a ast::GenericArg) {
+ run_early_pass!(self, check_generic_arg, arg);
+ ast_visit::walk_generic_arg(self, arg);
+ }
+
+ fn visit_generic_param(&mut self, param: &'a ast::GenericParam) {
+ run_early_pass!(self, check_generic_param, param);
+ self.check_id(param.id);
+ ast_visit::walk_generic_param(self, param);
+ }
+
+ fn visit_generics(&mut self, g: &'a ast::Generics) {
+ run_early_pass!(self, check_generics, g);
+ ast_visit::walk_generics(self, g);
+ }
+
+ fn visit_where_predicate(&mut self, p: &'a ast::WherePredicate) {
+ ast_visit::walk_where_predicate(self, p);
+ }
+
+ fn visit_poly_trait_ref(&mut self, t: &'a ast::PolyTraitRef, m: &'a ast::TraitBoundModifier) {
+ run_early_pass!(self, check_poly_trait_ref, t, m);
+ ast_visit::walk_poly_trait_ref(self, t, m);
+ }
+
+ fn visit_assoc_item(&mut self, item: &'a ast::AssocItem, ctxt: ast_visit::AssocCtxt) {
+ self.with_lint_attrs(item.id, &item.attrs, |cx| match ctxt {
+ ast_visit::AssocCtxt::Trait => {
+ run_early_pass!(cx, check_trait_item, item);
+ ast_visit::walk_assoc_item(cx, item, ctxt);
+ }
+ ast_visit::AssocCtxt::Impl => {
+ run_early_pass!(cx, check_impl_item, item);
+ ast_visit::walk_assoc_item(cx, item, ctxt);
+ }
+ });
+ }
+
+ fn visit_lifetime(&mut self, lt: &'a ast::Lifetime, _: ast_visit::LifetimeCtxt) {
+ self.check_id(lt.id);
+ }
+
+ fn visit_path(&mut self, p: &'a ast::Path, id: ast::NodeId) {
+ self.check_id(id);
+ ast_visit::walk_path(self, p);
+ }
+
+ fn visit_path_segment(&mut self, path_span: Span, s: &'a ast::PathSegment) {
+ self.check_id(s.id);
+ ast_visit::walk_path_segment(self, path_span, s);
+ }
+
+ fn visit_attribute(&mut self, attr: &'a ast::Attribute) {
+ run_early_pass!(self, check_attribute, attr);
+ }
+
+ fn visit_mac_def(&mut self, mac: &'a ast::MacroDef, id: ast::NodeId) {
+ run_early_pass!(self, check_mac_def, mac, id);
+ self.check_id(id);
+ }
+
+ fn visit_mac_call(&mut self, mac: &'a ast::MacCall) {
+ run_early_pass!(self, check_mac, mac);
+ ast_visit::walk_mac(self, mac);
+ }
+}
+
+struct EarlyLintPassObjects<'a> {
+ lints: &'a mut [EarlyLintPassObject],
+}
+
+#[allow(rustc::lint_pass_impl_without_macro)]
+impl LintPass for EarlyLintPassObjects<'_> {
+ fn name(&self) -> &'static str {
+ panic!()
+ }
+}
+
+macro_rules! expand_early_lint_pass_impl_methods {
+ ([$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => (
+ $(fn $name(&mut self, context: &EarlyContext<'_>, $($param: $arg),*) {
+ for obj in self.lints.iter_mut() {
+ obj.$name(context, $($param),*);
+ }
+ })*
+ )
+}
+
+macro_rules! early_lint_pass_impl {
+ ([], [$($methods:tt)*]) => (
+ impl EarlyLintPass for EarlyLintPassObjects<'_> {
+ expand_early_lint_pass_impl_methods!([$($methods)*]);
+ }
+ )
+}
+
+crate::early_lint_methods!(early_lint_pass_impl, []);
+
+/// Early lints work on different nodes - either on the crate root, or on freshly loaded modules.
+/// This trait generalizes over those nodes.
+pub trait EarlyCheckNode<'a>: Copy {
+ fn id(self) -> ast::NodeId;
+ fn attrs<'b>(self) -> &'b [ast::Attribute]
+ where
+ 'a: 'b;
+ fn check<'b>(self, cx: &mut EarlyContextAndPass<'b, impl EarlyLintPass>)
+ where
+ 'a: 'b;
+}
+
+impl<'a> EarlyCheckNode<'a> for &'a ast::Crate {
+ fn id(self) -> ast::NodeId {
+ ast::CRATE_NODE_ID
+ }
+ fn attrs<'b>(self) -> &'b [ast::Attribute]
+ where
+ 'a: 'b,
+ {
+ &self.attrs
+ }
+ fn check<'b>(self, cx: &mut EarlyContextAndPass<'b, impl EarlyLintPass>)
+ where
+ 'a: 'b,
+ {
+ run_early_pass!(cx, check_crate, self);
+ ast_visit::walk_crate(cx, self);
+ run_early_pass!(cx, check_crate_post, self);
+ }
+}
+
+impl<'a> EarlyCheckNode<'a> for (ast::NodeId, &'a [ast::Attribute], &'a [P<ast::Item>]) {
+ fn id(self) -> ast::NodeId {
+ self.0
+ }
+ fn attrs<'b>(self) -> &'b [ast::Attribute]
+ where
+ 'a: 'b,
+ {
+ self.1
+ }
+ fn check<'b>(self, cx: &mut EarlyContextAndPass<'b, impl EarlyLintPass>)
+ where
+ 'a: 'b,
+ {
+ walk_list!(cx, visit_attribute, self.1);
+ walk_list!(cx, visit_item, self.2);
+ }
+}
+
+fn early_lint_node<'a>(
+ sess: &Session,
+ warn_about_weird_lints: bool,
+ lint_store: &LintStore,
+ registered_tools: &RegisteredTools,
+ buffered: LintBuffer,
+ pass: impl EarlyLintPass,
+ check_node: impl EarlyCheckNode<'a>,
+) -> LintBuffer {
+ let mut cx = EarlyContextAndPass {
+ context: EarlyContext::new(
+ sess,
+ warn_about_weird_lints,
+ lint_store,
+ registered_tools,
+ buffered,
+ ),
+ pass,
+ };
+
+ cx.with_lint_attrs(check_node.id(), check_node.attrs(), |cx| check_node.check(cx));
+ cx.context.buffered
+}
+
+pub fn check_ast_node<'a>(
+ sess: &Session,
+ pre_expansion: bool,
+ lint_store: &LintStore,
+ registered_tools: &RegisteredTools,
+ lint_buffer: Option<LintBuffer>,
+ builtin_lints: impl EarlyLintPass,
+ check_node: impl EarlyCheckNode<'a>,
+) {
+ let passes =
+ if pre_expansion { &lint_store.pre_expansion_passes } else { &lint_store.early_passes };
+ let mut passes: Vec<_> = passes.iter().map(|p| (p)()).collect();
+ let mut buffered = lint_buffer.unwrap_or_default();
+
+ if sess.opts.unstable_opts.no_interleave_lints {
+ for (i, pass) in passes.iter_mut().enumerate() {
+ buffered =
+ sess.prof.extra_verbose_generic_activity("run_lint", pass.name()).run(|| {
+ early_lint_node(
+ sess,
+ !pre_expansion && i == 0,
+ lint_store,
+ registered_tools,
+ buffered,
+ EarlyLintPassObjects { lints: slice::from_mut(pass) },
+ check_node,
+ )
+ });
+ }
+ } else {
+ buffered = early_lint_node(
+ sess,
+ !pre_expansion,
+ lint_store,
+ registered_tools,
+ buffered,
+ builtin_lints,
+ check_node,
+ );
+
+ if !passes.is_empty() {
+ buffered = early_lint_node(
+ sess,
+ false,
+ lint_store,
+ registered_tools,
+ buffered,
+ EarlyLintPassObjects { lints: &mut passes[..] },
+ check_node,
+ );
+ }
+ }
+
+ // All of the buffered lints should have been emitted at this point.
+ // If not, that means that we somehow buffered a lint for a node id
+ // that was not lint-checked (perhaps it doesn't exist?). This is a bug.
+ for (id, lints) in buffered.map {
+ for early_lint in lints {
+ sess.delay_span_bug(
+ early_lint.span,
+ &format!(
+ "failed to process buffered lint here (dummy = {})",
+ id == ast::DUMMY_NODE_ID
+ ),
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs b/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs
new file mode 100644
index 000000000..f41ee6404
--- /dev/null
+++ b/compiler/rustc_lint/src/enum_intrinsics_non_enums.rs
@@ -0,0 +1,88 @@
+use crate::{context::LintContext, LateContext, LateLintPass};
+use rustc_errors::fluent;
+use rustc_hir as hir;
+use rustc_middle::ty::{visit::TypeVisitable, Ty};
+use rustc_span::{symbol::sym, Span};
+
+declare_lint! {
+ /// The `enum_intrinsics_non_enums` lint detects calls to
+ /// intrinsic functions that require an enum ([`core::mem::discriminant`],
+ /// [`core::mem::variant_count`]), but are called with a non-enum type.
+ ///
+ /// [`core::mem::discriminant`]: https://doc.rust-lang.org/core/mem/fn.discriminant.html
+ /// [`core::mem::variant_count`]: https://doc.rust-lang.org/core/mem/fn.variant_count.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(enum_intrinsics_non_enums)]
+ /// core::mem::discriminant::<i32>(&123);
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In order to accept any enum, the `mem::discriminant` and
+ /// `mem::variant_count` functions are generic over a type `T`.
+ /// This makes it technically possible for `T` to be a non-enum,
+ /// in which case the return value is unspecified.
+ ///
+ /// This lint prevents such incorrect usage of these functions.
+ ENUM_INTRINSICS_NON_ENUMS,
+ Deny,
+ "detects calls to `core::mem::discriminant` and `core::mem::variant_count` with non-enum types"
+}
+
+declare_lint_pass!(EnumIntrinsicsNonEnums => [ENUM_INTRINSICS_NON_ENUMS]);
+
+/// Returns `true` if we know for sure that the given type is not an enum. Note that for cases where
+/// the type is generic, we can't be certain if it will be an enum so we have to assume that it is.
+fn is_non_enum(t: Ty<'_>) -> bool {
+ !t.is_enum() && !t.needs_subst()
+}
+
+fn enforce_mem_discriminant(
+ cx: &LateContext<'_>,
+ func_expr: &hir::Expr<'_>,
+ expr_span: Span,
+ args_span: Span,
+) {
+ let ty_param = cx.typeck_results().node_substs(func_expr.hir_id).type_at(0);
+ if is_non_enum(ty_param) {
+ cx.struct_span_lint(ENUM_INTRINSICS_NON_ENUMS, expr_span, |builder| {
+ builder
+ .build(fluent::lint::enum_intrinsics_mem_discriminant)
+ .set_arg("ty_param", ty_param)
+ .span_note(args_span, fluent::lint::note)
+ .emit();
+ });
+ }
+}
+
+fn enforce_mem_variant_count(cx: &LateContext<'_>, func_expr: &hir::Expr<'_>, span: Span) {
+ let ty_param = cx.typeck_results().node_substs(func_expr.hir_id).type_at(0);
+ if is_non_enum(ty_param) {
+ cx.struct_span_lint(ENUM_INTRINSICS_NON_ENUMS, span, |builder| {
+ builder
+ .build(fluent::lint::enum_intrinsics_mem_variant)
+ .set_arg("ty_param", ty_param)
+ .note(fluent::lint::note)
+ .emit();
+ });
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for EnumIntrinsicsNonEnums {
+ fn check_expr(&mut self, cx: &LateContext<'_>, expr: &hir::Expr<'_>) {
+ let hir::ExprKind::Call(func, args) = &expr.kind else { return };
+ let hir::ExprKind::Path(qpath) = &func.kind else { return };
+ let Some(def_id) = cx.qpath_res(qpath, func.hir_id).opt_def_id() else { return };
+ let Some(name) = cx.tcx.get_diagnostic_name(def_id) else { return };
+ match name {
+ sym::mem_discriminant => enforce_mem_discriminant(cx, func, expr.span, args[0].span),
+ sym::mem_variant_count => enforce_mem_variant_count(cx, func, expr.span),
+ _ => {}
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/expect.rs b/compiler/rustc_lint/src/expect.rs
new file mode 100644
index 000000000..699e81543
--- /dev/null
+++ b/compiler/rustc_lint/src/expect.rs
@@ -0,0 +1,59 @@
+use crate::builtin;
+use rustc_errors::fluent;
+use rustc_hir::HirId;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::{lint::LintExpectation, ty::TyCtxt};
+use rustc_session::lint::LintExpectationId;
+use rustc_span::symbol::sym;
+use rustc_span::Symbol;
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { check_expectations, ..*providers };
+}
+
+fn check_expectations(tcx: TyCtxt<'_>, tool_filter: Option<Symbol>) {
+ if !tcx.sess.features_untracked().enabled(sym::lint_reasons) {
+ return;
+ }
+
+ let fulfilled_expectations = tcx.sess.diagnostic().steal_fulfilled_expectation_ids();
+ let lint_expectations = &tcx.lint_levels(()).lint_expectations;
+
+ for (id, expectation) in lint_expectations {
+ // This check will always be true, since `lint_expectations` only
+ // holds stable ids
+ if let LintExpectationId::Stable { hir_id, .. } = id {
+ if !fulfilled_expectations.contains(&id)
+ && tool_filter.map_or(true, |filter| expectation.lint_tool == Some(filter))
+ {
+ emit_unfulfilled_expectation_lint(tcx, *hir_id, expectation);
+ }
+ } else {
+ unreachable!("at this stage all `LintExpectationId`s are stable");
+ }
+ }
+}
+
+fn emit_unfulfilled_expectation_lint(
+ tcx: TyCtxt<'_>,
+ hir_id: HirId,
+ expectation: &LintExpectation,
+) {
+ tcx.struct_span_lint_hir(
+ builtin::UNFULFILLED_LINT_EXPECTATIONS,
+ hir_id,
+ expectation.emission_span,
+ |diag| {
+ let mut diag = diag.build(fluent::lint::expectation);
+ if let Some(rationale) = expectation.reason {
+ diag.note(rationale.as_str());
+ }
+
+ if expectation.is_unfulfilled_lint_expectations {
+ diag.note(fluent::lint::note);
+ }
+
+ diag.emit();
+ },
+ );
+}
diff --git a/compiler/rustc_lint/src/hidden_unicode_codepoints.rs b/compiler/rustc_lint/src/hidden_unicode_codepoints.rs
new file mode 100644
index 000000000..fe2712525
--- /dev/null
+++ b/compiler/rustc_lint/src/hidden_unicode_codepoints.rs
@@ -0,0 +1,141 @@
+use crate::{EarlyContext, EarlyLintPass, LintContext};
+use ast::util::unicode::{contains_text_flow_control_chars, TEXT_FLOW_CONTROL_CHARS};
+use rustc_ast as ast;
+use rustc_errors::{fluent, Applicability, SuggestionStyle};
+use rustc_span::{BytePos, Span, Symbol};
+
+declare_lint! {
+ /// The `text_direction_codepoint_in_literal` lint detects Unicode codepoints that change the
+ /// visual representation of text on screen in a way that does not correspond to their on
+ /// memory representation.
+ ///
+ /// ### Explanation
+ ///
+ /// The unicode characters `\u{202A}`, `\u{202B}`, `\u{202D}`, `\u{202E}`, `\u{2066}`,
+ /// `\u{2067}`, `\u{2068}`, `\u{202C}` and `\u{2069}` make the flow of text on screen change
+ /// its direction on software that supports these codepoints. This makes the text "abc" display
+ /// as "cba" on screen. By leveraging software that supports these, people can write specially
+ /// crafted literals that make the surrounding code seem like it's performing one action, when
+ /// in reality it is performing another. Because of this, we proactively lint against their
+ /// presence to avoid surprises.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(text_direction_codepoint_in_literal)]
+ /// fn main() {
+ /// println!("{:?}", '‮');
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ pub TEXT_DIRECTION_CODEPOINT_IN_LITERAL,
+ Deny,
+ "detect special Unicode codepoints that affect the visual representation of text on screen, \
+ changing the direction in which text flows",
+}
+
+declare_lint_pass!(HiddenUnicodeCodepoints => [TEXT_DIRECTION_CODEPOINT_IN_LITERAL]);
+
+impl HiddenUnicodeCodepoints {
+ fn lint_text_direction_codepoint(
+ &self,
+ cx: &EarlyContext<'_>,
+ text: Symbol,
+ span: Span,
+ padding: u32,
+ point_at_inner_spans: bool,
+ label: &str,
+ ) {
+ // Obtain the `Span`s for each of the forbidden chars.
+ let spans: Vec<_> = text
+ .as_str()
+ .char_indices()
+ .filter_map(|(i, c)| {
+ TEXT_FLOW_CONTROL_CHARS.contains(&c).then(|| {
+ let lo = span.lo() + BytePos(i as u32 + padding);
+ (c, span.with_lo(lo).with_hi(lo + BytePos(c.len_utf8() as u32)))
+ })
+ })
+ .collect();
+
+ cx.struct_span_lint(TEXT_DIRECTION_CODEPOINT_IN_LITERAL, span, |lint| {
+ let mut err = lint.build(fluent::lint::hidden_unicode_codepoints);
+ err.set_arg("label", label);
+ err.set_arg("count", spans.len());
+ err.span_label(span, fluent::lint::label);
+ err.note(fluent::lint::note);
+ if point_at_inner_spans {
+ for (c, span) in &spans {
+ err.span_label(*span, format!("{:?}", c));
+ }
+ }
+ if point_at_inner_spans && !spans.is_empty() {
+ err.multipart_suggestion_with_style(
+ fluent::lint::suggestion_remove,
+ spans.iter().map(|(_, span)| (*span, "".to_string())).collect(),
+ Applicability::MachineApplicable,
+ SuggestionStyle::HideCodeAlways,
+ );
+ err.multipart_suggestion(
+ fluent::lint::suggestion_escape,
+ spans
+ .into_iter()
+ .map(|(c, span)| {
+ let c = format!("{:?}", c);
+ (span, c[1..c.len() - 1].to_string())
+ })
+ .collect(),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // FIXME: in other suggestions we've reversed the inner spans of doc comments. We
+ // should do the same here to provide the same good suggestions as we do for
+ // literals above.
+ err.set_arg(
+ "escaped",
+ spans
+ .into_iter()
+ .map(|(c, _)| format!("{:?}", c))
+ .collect::<Vec<String>>()
+ .join(", "),
+ );
+ err.note(fluent::lint::suggestion_remove);
+ err.note(fluent::lint::no_suggestion_note_escape);
+ }
+ err.emit();
+ });
+ }
+}
+impl EarlyLintPass for HiddenUnicodeCodepoints {
+ fn check_attribute(&mut self, cx: &EarlyContext<'_>, attr: &ast::Attribute) {
+ if let ast::AttrKind::DocComment(_, comment) = attr.kind {
+ if contains_text_flow_control_chars(comment.as_str()) {
+ self.lint_text_direction_codepoint(cx, comment, attr.span, 0, false, "doc comment");
+ }
+ }
+ }
+
+ fn check_expr(&mut self, cx: &EarlyContext<'_>, expr: &ast::Expr) {
+ // byte strings are already handled well enough by `EscapeError::NonAsciiCharInByteString`
+ let (text, span, padding) = match &expr.kind {
+ ast::ExprKind::Lit(ast::Lit { token, kind, span }) => {
+ let text = token.symbol;
+ if !contains_text_flow_control_chars(text.as_str()) {
+ return;
+ }
+ let padding = match kind {
+ // account for `"` or `'`
+ ast::LitKind::Str(_, ast::StrStyle::Cooked) | ast::LitKind::Char(_) => 1,
+ // account for `r###"`
+ ast::LitKind::Str(_, ast::StrStyle::Raw(val)) => *val as u32 + 2,
+ _ => return,
+ };
+ (text, span, padding)
+ }
+ _ => return,
+ };
+ self.lint_text_direction_codepoint(cx, text, *span, padding, true, "literal");
+ }
+}
diff --git a/compiler/rustc_lint/src/internal.rs b/compiler/rustc_lint/src/internal.rs
new file mode 100644
index 000000000..c26d78247
--- /dev/null
+++ b/compiler/rustc_lint/src/internal.rs
@@ -0,0 +1,469 @@
+//! Some lints that are only useful in the compiler or crates that use compiler internals, such as
+//! Clippy.
+
+use crate::{EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext};
+use rustc_ast as ast;
+use rustc_errors::{fluent, Applicability};
+use rustc_hir::def::Res;
+use rustc_hir::{def_id::DefId, Expr, ExprKind, GenericArg, PatKind, Path, PathSegment, QPath};
+use rustc_hir::{HirId, Impl, Item, ItemKind, Node, Pat, Ty, TyKind};
+use rustc_middle::ty;
+use rustc_session::{declare_lint_pass, declare_tool_lint};
+use rustc_span::hygiene::{ExpnKind, MacroKind};
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::Span;
+use tracing::debug;
+
+declare_tool_lint! {
+ pub rustc::DEFAULT_HASH_TYPES,
+ Allow,
+ "forbid HashMap and HashSet and suggest the FxHash* variants",
+ report_in_external_macro: true
+}
+
+declare_lint_pass!(DefaultHashTypes => [DEFAULT_HASH_TYPES]);
+
+impl LateLintPass<'_> for DefaultHashTypes {
+ fn check_path(&mut self, cx: &LateContext<'_>, path: &Path<'_>, hir_id: HirId) {
+ let Res::Def(rustc_hir::def::DefKind::Struct, def_id) = path.res else { return };
+ if matches!(cx.tcx.hir().get(hir_id), Node::Item(Item { kind: ItemKind::Use(..), .. })) {
+ // don't lint imports, only actual usages
+ return;
+ }
+ let replace = match cx.tcx.get_diagnostic_name(def_id) {
+ Some(sym::HashMap) => "FxHashMap",
+ Some(sym::HashSet) => "FxHashSet",
+ _ => return,
+ };
+ cx.struct_span_lint(DEFAULT_HASH_TYPES, path.span, |lint| {
+ lint.build(fluent::lint::default_hash_types)
+ .set_arg("preferred", replace)
+ .set_arg("used", cx.tcx.item_name(def_id))
+ .note(fluent::lint::note)
+ .emit();
+ });
+ }
+}
+
+/// Helper function for lints that check for expressions with calls and use typeck results to
+/// get the `DefId` and `SubstsRef` of the function.
+fn typeck_results_of_method_fn<'tcx>(
+ cx: &LateContext<'tcx>,
+ expr: &Expr<'_>,
+) -> Option<(Span, DefId, ty::subst::SubstsRef<'tcx>)> {
+ match expr.kind {
+ ExprKind::MethodCall(segment, _, _)
+ if let Some(def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id) =>
+ {
+ Some((segment.ident.span, def_id, cx.typeck_results().node_substs(expr.hir_id)))
+ },
+ _ => {
+ match cx.typeck_results().node_type(expr.hir_id).kind() {
+ &ty::FnDef(def_id, substs) => Some((expr.span, def_id, substs)),
+ _ => None,
+ }
+ }
+ }
+}
+
+declare_tool_lint! {
+ pub rustc::POTENTIAL_QUERY_INSTABILITY,
+ Allow,
+ "require explicit opt-in when using potentially unstable methods or functions",
+ report_in_external_macro: true
+}
+
+declare_lint_pass!(QueryStability => [POTENTIAL_QUERY_INSTABILITY]);
+
+impl LateLintPass<'_> for QueryStability {
+ fn check_expr(&mut self, cx: &LateContext<'_>, expr: &Expr<'_>) {
+ let Some((span, def_id, substs)) = typeck_results_of_method_fn(cx, expr) else { return };
+ if let Ok(Some(instance)) = ty::Instance::resolve(cx.tcx, cx.param_env, def_id, substs) {
+ let def_id = instance.def_id();
+ if cx.tcx.has_attr(def_id, sym::rustc_lint_query_instability) {
+ cx.struct_span_lint(POTENTIAL_QUERY_INSTABILITY, span, |lint| {
+ lint.build(fluent::lint::query_instability)
+ .set_arg("query", cx.tcx.item_name(def_id))
+ .note(fluent::lint::note)
+ .emit();
+ })
+ }
+ }
+ }
+}
+
+declare_tool_lint! {
+ pub rustc::USAGE_OF_TY_TYKIND,
+ Allow,
+ "usage of `ty::TyKind` outside of the `ty::sty` module",
+ report_in_external_macro: true
+}
+
+declare_tool_lint! {
+ pub rustc::USAGE_OF_QUALIFIED_TY,
+ Allow,
+ "using `ty::{Ty,TyCtxt}` instead of importing it",
+ report_in_external_macro: true
+}
+
+declare_lint_pass!(TyTyKind => [
+ USAGE_OF_TY_TYKIND,
+ USAGE_OF_QUALIFIED_TY,
+]);
+
+impl<'tcx> LateLintPass<'tcx> for TyTyKind {
+ fn check_path(
+ &mut self,
+ cx: &LateContext<'tcx>,
+ path: &'tcx rustc_hir::Path<'tcx>,
+ _: rustc_hir::HirId,
+ ) {
+ if let Some(segment) = path.segments.iter().nth_back(1)
+ && let Some(res) = &segment.res
+ && lint_ty_kind_usage(cx, res)
+ {
+ let span = path.span.with_hi(
+ segment.args.map_or(segment.ident.span, |a| a.span_ext).hi()
+ );
+ cx.struct_span_lint(USAGE_OF_TY_TYKIND, path.span, |lint| {
+ lint.build(fluent::lint::tykind_kind)
+ .span_suggestion(
+ span,
+ fluent::lint::suggestion,
+ "ty",
+ Applicability::MaybeIncorrect, // ty maybe needs an import
+ )
+ .emit();
+ });
+ }
+ }
+
+ fn check_ty(&mut self, cx: &LateContext<'_>, ty: &'tcx Ty<'tcx>) {
+ match &ty.kind {
+ TyKind::Path(QPath::Resolved(_, path)) => {
+ if lint_ty_kind_usage(cx, &path.res) {
+ cx.struct_span_lint(USAGE_OF_TY_TYKIND, path.span, |lint| {
+ let hir = cx.tcx.hir();
+ match hir.find(hir.get_parent_node(ty.hir_id)) {
+ Some(Node::Pat(Pat {
+ kind:
+ PatKind::Path(qpath)
+ | PatKind::TupleStruct(qpath, ..)
+ | PatKind::Struct(qpath, ..),
+ ..
+ })) => {
+ if let QPath::TypeRelative(qpath_ty, ..) = qpath
+ && qpath_ty.hir_id == ty.hir_id
+ {
+ lint.build(fluent::lint::tykind_kind)
+ .span_suggestion(
+ path.span,
+ fluent::lint::suggestion,
+ "ty",
+ Applicability::MaybeIncorrect, // ty maybe needs an import
+ )
+ .emit();
+ return;
+ }
+ }
+ Some(Node::Expr(Expr {
+ kind: ExprKind::Path(qpath),
+ ..
+ })) => {
+ if let QPath::TypeRelative(qpath_ty, ..) = qpath
+ && qpath_ty.hir_id == ty.hir_id
+ {
+ lint.build(fluent::lint::tykind_kind)
+ .span_suggestion(
+ path.span,
+ fluent::lint::suggestion,
+ "ty",
+ Applicability::MaybeIncorrect, // ty maybe needs an import
+ )
+ .emit();
+ return;
+ }
+ }
+ // Can't unify these two branches because qpath below is `&&` and above is `&`
+ // and `A | B` paths don't play well together with adjustments, apparently.
+ Some(Node::Expr(Expr {
+ kind: ExprKind::Struct(qpath, ..),
+ ..
+ })) => {
+ if let QPath::TypeRelative(qpath_ty, ..) = qpath
+ && qpath_ty.hir_id == ty.hir_id
+ {
+ lint.build(fluent::lint::tykind_kind)
+ .span_suggestion(
+ path.span,
+ fluent::lint::suggestion,
+ "ty",
+ Applicability::MaybeIncorrect, // ty maybe needs an import
+ )
+ .emit();
+ return;
+ }
+ }
+ _ => {}
+ }
+ lint.build(fluent::lint::tykind).help(fluent::lint::help).emit();
+ })
+ } else if !ty.span.from_expansion() && let Some(t) = is_ty_or_ty_ctxt(cx, &path) {
+ if path.segments.len() > 1 {
+ cx.struct_span_lint(USAGE_OF_QUALIFIED_TY, path.span, |lint| {
+ lint.build(fluent::lint::ty_qualified)
+ .set_arg("ty", t.clone())
+ .span_suggestion(
+ path.span,
+ fluent::lint::suggestion,
+ t,
+ // The import probably needs to be changed
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ })
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+}
+
+fn lint_ty_kind_usage(cx: &LateContext<'_>, res: &Res) -> bool {
+ if let Some(did) = res.opt_def_id() {
+ cx.tcx.is_diagnostic_item(sym::TyKind, did) || cx.tcx.is_diagnostic_item(sym::IrTyKind, did)
+ } else {
+ false
+ }
+}
+
+fn is_ty_or_ty_ctxt(cx: &LateContext<'_>, path: &Path<'_>) -> Option<String> {
+ match &path.res {
+ Res::Def(_, def_id) => {
+ if let Some(name @ (sym::Ty | sym::TyCtxt)) = cx.tcx.get_diagnostic_name(*def_id) {
+ return Some(format!("{}{}", name, gen_args(path.segments.last().unwrap())));
+ }
+ }
+ // Only lint on `&Ty` and `&TyCtxt` if it is used outside of a trait.
+ Res::SelfTy { trait_: None, alias_to: Some((did, _)) } => {
+ if let ty::Adt(adt, substs) = cx.tcx.type_of(did).kind() {
+ if let Some(name @ (sym::Ty | sym::TyCtxt)) = cx.tcx.get_diagnostic_name(adt.did())
+ {
+ // NOTE: This path is currently unreachable as `Ty<'tcx>` is
+ // defined as a type alias meaning that `impl<'tcx> Ty<'tcx>`
+ // is not actually allowed.
+ //
+ // I(@lcnr) still kept this branch in so we don't miss this
+ // if we ever change it in the future.
+ return Some(format!("{}<{}>", name, substs[0]));
+ }
+ }
+ }
+ _ => (),
+ }
+
+ None
+}
+
+fn gen_args(segment: &PathSegment<'_>) -> String {
+ if let Some(args) = &segment.args {
+ let lifetimes = args
+ .args
+ .iter()
+ .filter_map(|arg| {
+ if let GenericArg::Lifetime(lt) = arg {
+ Some(lt.name.ident().to_string())
+ } else {
+ None
+ }
+ })
+ .collect::<Vec<_>>();
+
+ if !lifetimes.is_empty() {
+ return format!("<{}>", lifetimes.join(", "));
+ }
+ }
+
+ String::new()
+}
+
+declare_tool_lint! {
+ pub rustc::LINT_PASS_IMPL_WITHOUT_MACRO,
+ Allow,
+ "`impl LintPass` without the `declare_lint_pass!` or `impl_lint_pass!` macros"
+}
+
+declare_lint_pass!(LintPassImpl => [LINT_PASS_IMPL_WITHOUT_MACRO]);
+
+impl EarlyLintPass for LintPassImpl {
+ fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) {
+ if let ast::ItemKind::Impl(box ast::Impl { of_trait: Some(lint_pass), .. }) = &item.kind {
+ if let Some(last) = lint_pass.path.segments.last() {
+ if last.ident.name == sym::LintPass {
+ let expn_data = lint_pass.path.span.ctxt().outer_expn_data();
+ let call_site = expn_data.call_site;
+ if expn_data.kind != ExpnKind::Macro(MacroKind::Bang, sym::impl_lint_pass)
+ && call_site.ctxt().outer_expn_data().kind
+ != ExpnKind::Macro(MacroKind::Bang, sym::declare_lint_pass)
+ {
+ cx.struct_span_lint(
+ LINT_PASS_IMPL_WITHOUT_MACRO,
+ lint_pass.path.span,
+ |lint| {
+ lint.build(fluent::lint::lintpass_by_hand)
+ .help(fluent::lint::help)
+ .emit();
+ },
+ )
+ }
+ }
+ }
+ }
+ }
+}
+
+declare_tool_lint! {
+ pub rustc::EXISTING_DOC_KEYWORD,
+ Allow,
+ "Check that documented keywords in std and core actually exist",
+ report_in_external_macro: true
+}
+
+declare_lint_pass!(ExistingDocKeyword => [EXISTING_DOC_KEYWORD]);
+
+fn is_doc_keyword(s: Symbol) -> bool {
+ s <= kw::Union
+}
+
+impl<'tcx> LateLintPass<'tcx> for ExistingDocKeyword {
+ fn check_item(&mut self, cx: &LateContext<'_>, item: &rustc_hir::Item<'_>) {
+ for attr in cx.tcx.hir().attrs(item.hir_id()) {
+ if !attr.has_name(sym::doc) {
+ continue;
+ }
+ if let Some(list) = attr.meta_item_list() {
+ for nested in list {
+ if nested.has_name(sym::keyword) {
+ let v = nested
+ .value_str()
+ .expect("#[doc(keyword = \"...\")] expected a value!");
+ if is_doc_keyword(v) {
+ return;
+ }
+ cx.struct_span_lint(EXISTING_DOC_KEYWORD, attr.span, |lint| {
+ lint.build(fluent::lint::non_existant_doc_keyword)
+ .set_arg("keyword", v)
+ .help(fluent::lint::help)
+ .emit();
+ });
+ }
+ }
+ }
+ }
+ }
+}
+
+declare_tool_lint! {
+ pub rustc::UNTRANSLATABLE_DIAGNOSTIC,
+ Allow,
+ "prevent creation of diagnostics which cannot be translated",
+ report_in_external_macro: true
+}
+
+declare_tool_lint! {
+ pub rustc::DIAGNOSTIC_OUTSIDE_OF_IMPL,
+ Allow,
+ "prevent creation of diagnostics outside of `SessionDiagnostic`/`AddSubdiagnostic` impls",
+ report_in_external_macro: true
+}
+
+declare_lint_pass!(Diagnostics => [ UNTRANSLATABLE_DIAGNOSTIC, DIAGNOSTIC_OUTSIDE_OF_IMPL ]);
+
+impl LateLintPass<'_> for Diagnostics {
+ fn check_expr(&mut self, cx: &LateContext<'_>, expr: &Expr<'_>) {
+ let Some((span, def_id, substs)) = typeck_results_of_method_fn(cx, expr) else { return };
+ debug!(?span, ?def_id, ?substs);
+ let has_attr = ty::Instance::resolve(cx.tcx, cx.param_env, def_id, substs)
+ .ok()
+ .and_then(|inst| inst)
+ .map(|inst| cx.tcx.has_attr(inst.def_id(), sym::rustc_lint_diagnostics))
+ .unwrap_or(false);
+ if !has_attr {
+ return;
+ }
+
+ let mut found_impl = false;
+ for (_, parent) in cx.tcx.hir().parent_iter(expr.hir_id) {
+ debug!(?parent);
+ if let Node::Item(Item { kind: ItemKind::Impl(impl_), .. }) = parent &&
+ let Impl { of_trait: Some(of_trait), .. } = impl_ &&
+ let Some(def_id) = of_trait.trait_def_id() &&
+ let Some(name) = cx.tcx.get_diagnostic_name(def_id) &&
+ matches!(name, sym::SessionDiagnostic | sym::AddSubdiagnostic | sym::DecorateLint)
+ {
+ found_impl = true;
+ break;
+ }
+ }
+ debug!(?found_impl);
+ if !found_impl {
+ cx.struct_span_lint(DIAGNOSTIC_OUTSIDE_OF_IMPL, span, |lint| {
+ lint.build(fluent::lint::diag_out_of_impl).emit();
+ })
+ }
+
+ let mut found_diagnostic_message = false;
+ for ty in substs.types() {
+ debug!(?ty);
+ if let Some(adt_def) = ty.ty_adt_def() &&
+ let Some(name) = cx.tcx.get_diagnostic_name(adt_def.did()) &&
+ matches!(name, sym::DiagnosticMessage | sym::SubdiagnosticMessage)
+ {
+ found_diagnostic_message = true;
+ break;
+ }
+ }
+ debug!(?found_diagnostic_message);
+ if !found_diagnostic_message {
+ cx.struct_span_lint(UNTRANSLATABLE_DIAGNOSTIC, span, |lint| {
+ lint.build(fluent::lint::untranslatable_diag).emit();
+ })
+ }
+ }
+}
+
+declare_tool_lint! {
+ pub rustc::BAD_OPT_ACCESS,
+ Deny,
+ "prevent using options by field access when there is a wrapper function",
+ report_in_external_macro: true
+}
+
+declare_lint_pass!(BadOptAccess => [ BAD_OPT_ACCESS ]);
+
+impl LateLintPass<'_> for BadOptAccess {
+ fn check_expr(&mut self, cx: &LateContext<'_>, expr: &Expr<'_>) {
+ let ExprKind::Field(base, target) = expr.kind else { return };
+ let Some(adt_def) = cx.typeck_results().expr_ty(base).ty_adt_def() else { return };
+ // Skip types without `#[rustc_lint_opt_ty]` - only so that the rest of the lint can be
+ // avoided.
+ if !cx.tcx.has_attr(adt_def.did(), sym::rustc_lint_opt_ty) {
+ return;
+ }
+
+ for field in adt_def.all_fields() {
+ if field.name == target.name &&
+ let Some(attr) = cx.tcx.get_attr(field.did, sym::rustc_lint_opt_deny_field_access) &&
+ let Some(items) = attr.meta_item_list() &&
+ let Some(item) = items.first() &&
+ let Some(literal) = item.literal() &&
+ let ast::LitKind::Str(val, _) = literal.kind
+ {
+ cx.struct_span_lint(BAD_OPT_ACCESS, expr.span, |lint| {
+ lint.build(val.as_str()).emit(); }
+ );
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/late.rs b/compiler/rustc_lint/src/late.rs
new file mode 100644
index 000000000..a329b3751
--- /dev/null
+++ b/compiler/rustc_lint/src/late.rs
@@ -0,0 +1,482 @@
+//! Implementation of lint checking.
+//!
+//! The lint checking is mostly consolidated into one pass which runs
+//! after all other analyses. Throughout compilation, lint warnings
+//! can be added via the `add_lint` method on the Session structure. This
+//! requires a span and an ID of the node that the lint is being added to. The
+//! lint isn't actually emitted at that time because it is unknown what the
+//! actual lint level at that location is.
+//!
+//! To actually emit lint warnings/errors, a separate pass is used.
+//! A context keeps track of the current state of all lint levels.
+//! Upon entering a node of the ast which can modify the lint settings, the
+//! previous lint state is pushed onto a stack and the ast is then recursed
+//! upon. As the ast is traversed, this keeps track of the current lint level
+//! for all lint attributes.
+
+use crate::{passes::LateLintPassObject, LateContext, LateLintPass, LintStore};
+use rustc_ast as ast;
+use rustc_data_structures::sync::join;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit as hir_visit;
+use rustc_hir::intravisit::Visitor;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::lint::LintPass;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+
+use std::any::Any;
+use std::cell::Cell;
+use std::slice;
+use tracing::debug;
+
+/// Extract the `LintStore` from the query context.
+/// This function exists because we've erased `LintStore` as `dyn Any` in the context.
+pub fn unerased_lint_store(tcx: TyCtxt<'_>) -> &LintStore {
+ let store: &dyn Any = &*tcx.lint_store;
+ store.downcast_ref().unwrap()
+}
+
+macro_rules! lint_callback { ($cx:expr, $f:ident, $($args:expr),*) => ({
+ $cx.pass.$f(&$cx.context, $($args),*);
+}) }
+
+struct LateContextAndPass<'tcx, T: LateLintPass<'tcx>> {
+ context: LateContext<'tcx>,
+ pass: T,
+}
+
+impl<'tcx, T: LateLintPass<'tcx>> LateContextAndPass<'tcx, T> {
+ /// Merge the lints specified by any lint attributes into the
+ /// current lint context, call the provided function, then reset the
+ /// lints in effect to their previous state.
+ fn with_lint_attrs<F>(&mut self, id: hir::HirId, f: F)
+ where
+ F: FnOnce(&mut Self),
+ {
+ let attrs = self.context.tcx.hir().attrs(id);
+ let prev = self.context.last_node_with_lint_attrs;
+ self.context.last_node_with_lint_attrs = id;
+ debug!("late context: enter_attrs({:?})", attrs);
+ lint_callback!(self, enter_lint_attrs, attrs);
+ f(self);
+ debug!("late context: exit_attrs({:?})", attrs);
+ lint_callback!(self, exit_lint_attrs, attrs);
+ self.context.last_node_with_lint_attrs = prev;
+ }
+
+ fn with_param_env<F>(&mut self, id: hir::HirId, f: F)
+ where
+ F: FnOnce(&mut Self),
+ {
+ let old_param_env = self.context.param_env;
+ self.context.param_env =
+ self.context.tcx.param_env(self.context.tcx.hir().local_def_id(id));
+ f(self);
+ self.context.param_env = old_param_env;
+ }
+
+ fn process_mod(&mut self, m: &'tcx hir::Mod<'tcx>, s: Span, n: hir::HirId) {
+ lint_callback!(self, check_mod, m, s, n);
+ hir_visit::walk_mod(self, m, n);
+ }
+}
+
+impl<'tcx, T: LateLintPass<'tcx>> hir_visit::Visitor<'tcx> for LateContextAndPass<'tcx, T> {
+ type NestedFilter = nested_filter::All;
+
+ /// Because lints are scoped lexically, we want to walk nested
+ /// items in the context of the outer item, so enable
+ /// deep-walking.
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.context.tcx.hir()
+ }
+
+ fn visit_nested_body(&mut self, body_id: hir::BodyId) {
+ let old_enclosing_body = self.context.enclosing_body.replace(body_id);
+ let old_cached_typeck_results = self.context.cached_typeck_results.get();
+
+ // HACK(eddyb) avoid trashing `cached_typeck_results` when we're
+ // nested in `visit_fn`, which may have already resulted in them
+ // being queried.
+ if old_enclosing_body != Some(body_id) {
+ self.context.cached_typeck_results.set(None);
+ }
+
+ let body = self.context.tcx.hir().body(body_id);
+ self.visit_body(body);
+ self.context.enclosing_body = old_enclosing_body;
+
+ // See HACK comment above.
+ if old_enclosing_body != Some(body_id) {
+ self.context.cached_typeck_results.set(old_cached_typeck_results);
+ }
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ self.with_lint_attrs(param.hir_id, |cx| {
+ hir_visit::walk_param(cx, param);
+ });
+ }
+
+ fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) {
+ lint_callback!(self, check_body, body);
+ hir_visit::walk_body(self, body);
+ lint_callback!(self, check_body_post, body);
+ }
+
+ fn visit_item(&mut self, it: &'tcx hir::Item<'tcx>) {
+ let generics = self.context.generics.take();
+ self.context.generics = it.kind.generics();
+ let old_cached_typeck_results = self.context.cached_typeck_results.take();
+ let old_enclosing_body = self.context.enclosing_body.take();
+ self.with_lint_attrs(it.hir_id(), |cx| {
+ cx.with_param_env(it.hir_id(), |cx| {
+ lint_callback!(cx, check_item, it);
+ hir_visit::walk_item(cx, it);
+ lint_callback!(cx, check_item_post, it);
+ });
+ });
+ self.context.enclosing_body = old_enclosing_body;
+ self.context.cached_typeck_results.set(old_cached_typeck_results);
+ self.context.generics = generics;
+ }
+
+ fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) {
+ self.with_lint_attrs(it.hir_id(), |cx| {
+ cx.with_param_env(it.hir_id(), |cx| {
+ lint_callback!(cx, check_foreign_item, it);
+ hir_visit::walk_foreign_item(cx, it);
+ });
+ })
+ }
+
+ fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) {
+ lint_callback!(self, check_pat, p);
+ hir_visit::walk_pat(self, p);
+ }
+
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ self.with_lint_attrs(e.hir_id, |cx| {
+ lint_callback!(cx, check_expr, e);
+ hir_visit::walk_expr(cx, e);
+ lint_callback!(cx, check_expr_post, e);
+ })
+ }
+
+ fn visit_stmt(&mut self, s: &'tcx hir::Stmt<'tcx>) {
+ // See `EarlyContextAndPass::visit_stmt` for an explanation
+ // of why we call `walk_stmt` outside of `with_lint_attrs`
+ self.with_lint_attrs(s.hir_id, |cx| {
+ lint_callback!(cx, check_stmt, s);
+ });
+ hir_visit::walk_stmt(self, s);
+ }
+
+ fn visit_fn(
+ &mut self,
+ fk: hir_visit::FnKind<'tcx>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ body_id: hir::BodyId,
+ span: Span,
+ id: hir::HirId,
+ ) {
+ // Wrap in typeck results here, not just in visit_nested_body,
+ // in order for `check_fn` to be able to use them.
+ let old_enclosing_body = self.context.enclosing_body.replace(body_id);
+ let old_cached_typeck_results = self.context.cached_typeck_results.take();
+ let body = self.context.tcx.hir().body(body_id);
+ lint_callback!(self, check_fn, fk, decl, body, span, id);
+ hir_visit::walk_fn(self, fk, decl, body_id, span, id);
+ self.context.enclosing_body = old_enclosing_body;
+ self.context.cached_typeck_results.set(old_cached_typeck_results);
+ }
+
+ fn visit_variant_data(
+ &mut self,
+ s: &'tcx hir::VariantData<'tcx>,
+ _: Symbol,
+ _: &'tcx hir::Generics<'tcx>,
+ _: hir::HirId,
+ _: Span,
+ ) {
+ lint_callback!(self, check_struct_def, s);
+ hir_visit::walk_struct_def(self, s);
+ }
+
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ self.with_lint_attrs(s.hir_id, |cx| {
+ lint_callback!(cx, check_field_def, s);
+ hir_visit::walk_field_def(cx, s);
+ })
+ }
+
+ fn visit_variant(
+ &mut self,
+ v: &'tcx hir::Variant<'tcx>,
+ g: &'tcx hir::Generics<'tcx>,
+ item_id: hir::HirId,
+ ) {
+ self.with_lint_attrs(v.id, |cx| {
+ lint_callback!(cx, check_variant, v);
+ hir_visit::walk_variant(cx, v, g, item_id);
+ })
+ }
+
+ fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) {
+ lint_callback!(self, check_ty, t);
+ hir_visit::walk_ty(self, t);
+ }
+
+ fn visit_infer(&mut self, inf: &'tcx hir::InferArg) {
+ hir_visit::walk_inf(self, inf);
+ }
+
+ fn visit_mod(&mut self, m: &'tcx hir::Mod<'tcx>, s: Span, n: hir::HirId) {
+ if !self.context.only_module {
+ self.process_mod(m, s, n);
+ }
+ }
+
+ fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
+ self.with_lint_attrs(l.hir_id, |cx| {
+ lint_callback!(cx, check_local, l);
+ hir_visit::walk_local(cx, l);
+ })
+ }
+
+ fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) {
+ lint_callback!(self, check_block, b);
+ hir_visit::walk_block(self, b);
+ lint_callback!(self, check_block_post, b);
+ }
+
+ fn visit_arm(&mut self, a: &'tcx hir::Arm<'tcx>) {
+ lint_callback!(self, check_arm, a);
+ hir_visit::walk_arm(self, a);
+ }
+
+ fn visit_generic_param(&mut self, p: &'tcx hir::GenericParam<'tcx>) {
+ lint_callback!(self, check_generic_param, p);
+ hir_visit::walk_generic_param(self, p);
+ }
+
+ fn visit_generics(&mut self, g: &'tcx hir::Generics<'tcx>) {
+ lint_callback!(self, check_generics, g);
+ hir_visit::walk_generics(self, g);
+ }
+
+ fn visit_where_predicate(&mut self, p: &'tcx hir::WherePredicate<'tcx>) {
+ hir_visit::walk_where_predicate(self, p);
+ }
+
+ fn visit_poly_trait_ref(
+ &mut self,
+ t: &'tcx hir::PolyTraitRef<'tcx>,
+ m: hir::TraitBoundModifier,
+ ) {
+ lint_callback!(self, check_poly_trait_ref, t, m);
+ hir_visit::walk_poly_trait_ref(self, t, m);
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ let generics = self.context.generics.take();
+ self.context.generics = Some(&trait_item.generics);
+ self.with_lint_attrs(trait_item.hir_id(), |cx| {
+ cx.with_param_env(trait_item.hir_id(), |cx| {
+ lint_callback!(cx, check_trait_item, trait_item);
+ hir_visit::walk_trait_item(cx, trait_item);
+ });
+ });
+ self.context.generics = generics;
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ let generics = self.context.generics.take();
+ self.context.generics = Some(&impl_item.generics);
+ self.with_lint_attrs(impl_item.hir_id(), |cx| {
+ cx.with_param_env(impl_item.hir_id(), |cx| {
+ lint_callback!(cx, check_impl_item, impl_item);
+ hir_visit::walk_impl_item(cx, impl_item);
+ lint_callback!(cx, check_impl_item_post, impl_item);
+ });
+ });
+ self.context.generics = generics;
+ }
+
+ fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) {
+ hir_visit::walk_lifetime(self, lt);
+ }
+
+ fn visit_path(&mut self, p: &'tcx hir::Path<'tcx>, id: hir::HirId) {
+ lint_callback!(self, check_path, p, id);
+ hir_visit::walk_path(self, p);
+ }
+
+ fn visit_attribute(&mut self, attr: &'tcx ast::Attribute) {
+ lint_callback!(self, check_attribute, attr);
+ }
+}
+
+struct LateLintPassObjects<'a> {
+ lints: &'a mut [LateLintPassObject],
+}
+
+#[allow(rustc::lint_pass_impl_without_macro)]
+impl LintPass for LateLintPassObjects<'_> {
+ fn name(&self) -> &'static str {
+ panic!()
+ }
+}
+
+macro_rules! expand_late_lint_pass_impl_methods {
+ ([$hir:tt], [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => (
+ $(fn $name(&mut self, context: &LateContext<$hir>, $($param: $arg),*) {
+ for obj in self.lints.iter_mut() {
+ obj.$name(context, $($param),*);
+ }
+ })*
+ )
+}
+
+macro_rules! late_lint_pass_impl {
+ ([], [$hir:tt], $methods:tt) => {
+ impl<$hir> LateLintPass<$hir> for LateLintPassObjects<'_> {
+ expand_late_lint_pass_impl_methods!([$hir], $methods);
+ }
+ };
+}
+
+crate::late_lint_methods!(late_lint_pass_impl, [], ['tcx]);
+
+fn late_lint_mod_pass<'tcx, T: LateLintPass<'tcx>>(
+ tcx: TyCtxt<'tcx>,
+ module_def_id: LocalDefId,
+ pass: T,
+) {
+ let access_levels = &tcx.privacy_access_levels(());
+
+ let context = LateContext {
+ tcx,
+ enclosing_body: None,
+ cached_typeck_results: Cell::new(None),
+ param_env: ty::ParamEnv::empty(),
+ access_levels,
+ lint_store: unerased_lint_store(tcx),
+ last_node_with_lint_attrs: tcx.hir().local_def_id_to_hir_id(module_def_id),
+ generics: None,
+ only_module: true,
+ };
+
+ let mut cx = LateContextAndPass { context, pass };
+
+ let (module, span, hir_id) = tcx.hir().get_module(module_def_id);
+ cx.process_mod(module, span, hir_id);
+
+ // Visit the crate attributes
+ if hir_id == hir::CRATE_HIR_ID {
+ for attr in tcx.hir().attrs(hir::CRATE_HIR_ID).iter() {
+ cx.visit_attribute(attr)
+ }
+ }
+}
+
+pub fn late_lint_mod<'tcx, T: LateLintPass<'tcx>>(
+ tcx: TyCtxt<'tcx>,
+ module_def_id: LocalDefId,
+ builtin_lints: T,
+) {
+ if tcx.sess.opts.unstable_opts.no_interleave_lints {
+ // These passes runs in late_lint_crate with -Z no_interleave_lints
+ return;
+ }
+
+ late_lint_mod_pass(tcx, module_def_id, builtin_lints);
+
+ let mut passes: Vec<_> =
+ unerased_lint_store(tcx).late_module_passes.iter().map(|pass| (pass)()).collect();
+
+ if !passes.is_empty() {
+ late_lint_mod_pass(tcx, module_def_id, LateLintPassObjects { lints: &mut passes[..] });
+ }
+}
+
+fn late_lint_pass_crate<'tcx, T: LateLintPass<'tcx>>(tcx: TyCtxt<'tcx>, pass: T) {
+ let access_levels = &tcx.privacy_access_levels(());
+
+ let context = LateContext {
+ tcx,
+ enclosing_body: None,
+ cached_typeck_results: Cell::new(None),
+ param_env: ty::ParamEnv::empty(),
+ access_levels,
+ lint_store: unerased_lint_store(tcx),
+ last_node_with_lint_attrs: hir::CRATE_HIR_ID,
+ generics: None,
+ only_module: false,
+ };
+
+ let mut cx = LateContextAndPass { context, pass };
+
+ // Visit the whole crate.
+ cx.with_lint_attrs(hir::CRATE_HIR_ID, |cx| {
+ // since the root module isn't visited as an item (because it isn't an
+ // item), warn for it here.
+ lint_callback!(cx, check_crate,);
+ tcx.hir().walk_toplevel_module(cx);
+ tcx.hir().walk_attributes(cx);
+ lint_callback!(cx, check_crate_post,);
+ })
+}
+
+fn late_lint_crate<'tcx, T: LateLintPass<'tcx>>(tcx: TyCtxt<'tcx>, builtin_lints: T) {
+ let mut passes = unerased_lint_store(tcx).late_passes.iter().map(|p| (p)()).collect::<Vec<_>>();
+
+ if !tcx.sess.opts.unstable_opts.no_interleave_lints {
+ if !passes.is_empty() {
+ late_lint_pass_crate(tcx, LateLintPassObjects { lints: &mut passes[..] });
+ }
+
+ late_lint_pass_crate(tcx, builtin_lints);
+ } else {
+ for pass in &mut passes {
+ tcx.sess.prof.extra_verbose_generic_activity("run_late_lint", pass.name()).run(|| {
+ late_lint_pass_crate(tcx, LateLintPassObjects { lints: slice::from_mut(pass) });
+ });
+ }
+
+ let mut passes: Vec<_> =
+ unerased_lint_store(tcx).late_module_passes.iter().map(|pass| (pass)()).collect();
+
+ for pass in &mut passes {
+ tcx.sess.prof.extra_verbose_generic_activity("run_late_module_lint", pass.name()).run(
+ || {
+ late_lint_pass_crate(tcx, LateLintPassObjects { lints: slice::from_mut(pass) });
+ },
+ );
+ }
+ }
+}
+
+/// Performs lint checking on a crate.
+pub fn check_crate<'tcx, T: LateLintPass<'tcx>>(
+ tcx: TyCtxt<'tcx>,
+ builtin_lints: impl FnOnce() -> T + Send,
+) {
+ join(
+ || {
+ tcx.sess.time("crate_lints", || {
+ // Run whole crate non-incremental lints
+ late_lint_crate(tcx, builtin_lints());
+ });
+ },
+ || {
+ tcx.sess.time("module_lints", || {
+ // Run per-module lints
+ tcx.hir().par_for_each_module(|module| tcx.ensure().lint_mod(module));
+ });
+ },
+ );
+}
diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs
new file mode 100644
index 000000000..00e96f20d
--- /dev/null
+++ b/compiler/rustc_lint/src/levels.rs
@@ -0,0 +1,813 @@
+use crate::context::{CheckLintNameResult, LintStore};
+use crate::late::unerased_lint_store;
+use rustc_ast as ast;
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{struct_span_err, Applicability, Diagnostic, LintDiagnosticBuilder, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::{intravisit, HirId};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::lint::{
+ struct_lint_level, LevelAndSource, LintExpectation, LintLevelMap, LintLevelSets,
+ LintLevelSource, LintSet, LintStackIndex, COMMAND_LINE,
+};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{RegisteredTools, TyCtxt};
+use rustc_session::lint::{
+ builtin::{self, FORBIDDEN_LINT_GROUPS, SINGLE_USE_LIFETIMES, UNFULFILLED_LINT_EXPECTATIONS},
+ Level, Lint, LintExpectationId, LintId,
+};
+use rustc_session::parse::{add_feature_diagnostics, feature_err};
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use tracing::debug;
+
+fn lint_levels(tcx: TyCtxt<'_>, (): ()) -> LintLevelMap {
+ let store = unerased_lint_store(tcx);
+ let levels =
+ LintLevelsBuilder::new(tcx.sess, false, &store, &tcx.resolutions(()).registered_tools);
+ let mut builder = LintLevelMapBuilder { levels, tcx };
+ let krate = tcx.hir().krate();
+
+ builder.levels.id_to_set.reserve(krate.owners.len() + 1);
+
+ let push =
+ builder.levels.push(tcx.hir().attrs(hir::CRATE_HIR_ID), true, Some(hir::CRATE_HIR_ID));
+
+ builder.levels.register_id(hir::CRATE_HIR_ID);
+ tcx.hir().walk_toplevel_module(&mut builder);
+ builder.levels.pop(push);
+
+ builder.levels.update_unstable_expectation_ids();
+ builder.levels.build_map()
+}
+
+pub struct LintLevelsBuilder<'s> {
+ sess: &'s Session,
+ lint_expectations: Vec<(LintExpectationId, LintExpectation)>,
+ /// Each expectation has a stable and an unstable identifier. This map
+ /// is used to map from unstable to stable [`LintExpectationId`]s.
+ expectation_id_map: FxHashMap<LintExpectationId, LintExpectationId>,
+ sets: LintLevelSets,
+ id_to_set: FxHashMap<HirId, LintStackIndex>,
+ cur: LintStackIndex,
+ warn_about_weird_lints: bool,
+ store: &'s LintStore,
+ registered_tools: &'s RegisteredTools,
+}
+
+pub struct BuilderPush {
+ prev: LintStackIndex,
+ pub changed: bool,
+}
+
+impl<'s> LintLevelsBuilder<'s> {
+ pub fn new(
+ sess: &'s Session,
+ warn_about_weird_lints: bool,
+ store: &'s LintStore,
+ registered_tools: &'s RegisteredTools,
+ ) -> Self {
+ let mut builder = LintLevelsBuilder {
+ sess,
+ lint_expectations: Default::default(),
+ expectation_id_map: Default::default(),
+ sets: LintLevelSets::new(),
+ cur: COMMAND_LINE,
+ id_to_set: Default::default(),
+ warn_about_weird_lints,
+ store,
+ registered_tools,
+ };
+ builder.process_command_line(sess, store);
+ assert_eq!(builder.sets.list.len(), 1);
+ builder
+ }
+
+ pub(crate) fn sess(&self) -> &Session {
+ self.sess
+ }
+
+ pub(crate) fn lint_store(&self) -> &LintStore {
+ self.store
+ }
+
+ fn current_specs(&self) -> &FxHashMap<LintId, LevelAndSource> {
+ &self.sets.list[self.cur].specs
+ }
+
+ fn current_specs_mut(&mut self) -> &mut FxHashMap<LintId, LevelAndSource> {
+ &mut self.sets.list[self.cur].specs
+ }
+
+ fn process_command_line(&mut self, sess: &Session, store: &LintStore) {
+ self.sets.lint_cap = sess.opts.lint_cap.unwrap_or(Level::Forbid);
+
+ self.cur =
+ self.sets.list.push(LintSet { specs: FxHashMap::default(), parent: COMMAND_LINE });
+ for &(ref lint_name, level) in &sess.opts.lint_opts {
+ store.check_lint_name_cmdline(sess, &lint_name, level, self.registered_tools);
+ let orig_level = level;
+ let lint_flag_val = Symbol::intern(lint_name);
+
+ let Ok(ids) = store.find_lints(&lint_name) else {
+ // errors handled in check_lint_name_cmdline above
+ continue
+ };
+ for id in ids {
+ // ForceWarn and Forbid cannot be overridden
+ if let Some((Level::ForceWarn(_) | Level::Forbid, _)) =
+ self.current_specs().get(&id)
+ {
+ continue;
+ }
+
+ if self.check_gated_lint(id, DUMMY_SP) {
+ let src = LintLevelSource::CommandLine(lint_flag_val, orig_level);
+ self.current_specs_mut().insert(id, (level, src));
+ }
+ }
+ }
+ }
+
+ /// Attempts to insert the `id` to `level_src` map entry. If unsuccessful
+ /// (e.g. if a forbid was already inserted on the same scope), then emits a
+ /// diagnostic with no change to `specs`.
+ fn insert_spec(&mut self, id: LintId, (level, src): LevelAndSource) {
+ let (old_level, old_src) =
+ self.sets.get_lint_level(id.lint, self.cur, Some(self.current_specs()), &self.sess);
+ // Setting to a non-forbid level is an error if the lint previously had
+ // a forbid level. Note that this is not necessarily true even with a
+ // `#[forbid(..)]` attribute present, as that is overridden by `--cap-lints`.
+ //
+ // This means that this only errors if we're truly lowering the lint
+ // level from forbid.
+ if level != Level::Forbid {
+ if let Level::Forbid = old_level {
+ // Backwards compatibility check:
+ //
+ // We used to not consider `forbid(lint_group)`
+ // as preventing `allow(lint)` for some lint `lint` in
+ // `lint_group`. For now, issue a future-compatibility
+ // warning for this case.
+ let id_name = id.lint.name_lower();
+ let fcw_warning = match old_src {
+ LintLevelSource::Default => false,
+ LintLevelSource::Node(symbol, _, _) => self.store.is_lint_group(symbol),
+ LintLevelSource::CommandLine(symbol, _) => self.store.is_lint_group(symbol),
+ };
+ debug!(
+ "fcw_warning={:?}, specs.get(&id) = {:?}, old_src={:?}, id_name={:?}",
+ fcw_warning,
+ self.current_specs(),
+ old_src,
+ id_name
+ );
+
+ let decorate_diag = |diag: &mut Diagnostic| {
+ diag.span_label(src.span(), "overruled by previous forbid");
+ match old_src {
+ LintLevelSource::Default => {
+ diag.note(&format!(
+ "`forbid` lint level is the default for {}",
+ id.to_string()
+ ));
+ }
+ LintLevelSource::Node(_, forbid_source_span, reason) => {
+ diag.span_label(forbid_source_span, "`forbid` level set here");
+ if let Some(rationale) = reason {
+ diag.note(rationale.as_str());
+ }
+ }
+ LintLevelSource::CommandLine(_, _) => {
+ diag.note("`forbid` lint level was set on command line");
+ }
+ }
+ };
+ if !fcw_warning {
+ let mut diag_builder = struct_span_err!(
+ self.sess,
+ src.span(),
+ E0453,
+ "{}({}) incompatible with previous forbid",
+ level.as_str(),
+ src.name(),
+ );
+ decorate_diag(&mut diag_builder);
+ diag_builder.emit();
+ } else {
+ self.struct_lint(
+ FORBIDDEN_LINT_GROUPS,
+ Some(src.span().into()),
+ |diag_builder| {
+ let mut diag_builder = diag_builder.build(&format!(
+ "{}({}) incompatible with previous forbid",
+ level.as_str(),
+ src.name(),
+ ));
+ decorate_diag(&mut diag_builder);
+ diag_builder.emit();
+ },
+ );
+ }
+
+ // Retain the forbid lint level, unless we are
+ // issuing a FCW. In the FCW case, we want to
+ // respect the new setting.
+ if !fcw_warning {
+ return;
+ }
+ }
+ }
+
+ // The lint `unfulfilled_lint_expectations` can't be expected, as it would suppress itself.
+ // Handling expectations of this lint would add additional complexity with little to no
+ // benefit. The expect level for this lint will therefore be ignored.
+ if let Level::Expect(_) = level && id == LintId::of(UNFULFILLED_LINT_EXPECTATIONS) {
+ return;
+ }
+
+ match (old_level, level) {
+ // If the new level is an expectation store it in `ForceWarn`
+ (Level::ForceWarn(_), Level::Expect(expectation_id)) => self
+ .current_specs_mut()
+ .insert(id, (Level::ForceWarn(Some(expectation_id)), old_src)),
+ // Keep `ForceWarn` level but drop the expectation
+ (Level::ForceWarn(_), _) => {
+ self.current_specs_mut().insert(id, (Level::ForceWarn(None), old_src))
+ }
+ // Set the lint level as normal
+ _ => self.current_specs_mut().insert(id, (level, src)),
+ };
+ }
+
+ /// Pushes a list of AST lint attributes onto this context.
+ ///
+ /// This function will return a `BuilderPush` object which should be passed
+ /// to `pop` when this scope for the attributes provided is exited.
+ ///
+ /// This function will perform a number of tasks:
+ ///
+ /// * It'll validate all lint-related attributes in `attrs`
+ /// * It'll mark all lint-related attributes as used
+ /// * Lint levels will be updated based on the attributes provided
+ /// * Lint attributes are validated, e.g., a `#[forbid]` can't be switched to
+ /// `#[allow]`
+ ///
+ /// Don't forget to call `pop`!
+ pub(crate) fn push(
+ &mut self,
+ attrs: &[ast::Attribute],
+ is_crate_node: bool,
+ source_hir_id: Option<HirId>,
+ ) -> BuilderPush {
+ let prev = self.cur;
+ self.cur = self.sets.list.push(LintSet { specs: FxHashMap::default(), parent: prev });
+
+ let sess = self.sess;
+ let bad_attr = |span| struct_span_err!(sess, span, E0452, "malformed lint attribute input");
+ for (attr_index, attr) in attrs.iter().enumerate() {
+ if attr.has_name(sym::automatically_derived) {
+ self.current_specs_mut().insert(
+ LintId::of(SINGLE_USE_LIFETIMES),
+ (Level::Allow, LintLevelSource::Default),
+ );
+ continue;
+ }
+
+ let level = match Level::from_attr(attr) {
+ None => continue,
+ // This is the only lint level with a `LintExpectationId` that can be created from an attribute
+ Some(Level::Expect(unstable_id)) if let Some(hir_id) = source_hir_id => {
+ let stable_id = self.create_stable_id(unstable_id, hir_id, attr_index);
+
+ Level::Expect(stable_id)
+ }
+ Some(lvl) => lvl,
+ };
+
+ let Some(mut metas) = attr.meta_item_list() else {
+ continue
+ };
+
+ if metas.is_empty() {
+ // This emits the unused_attributes lint for `#[level()]`
+ continue;
+ }
+
+ // Before processing the lint names, look for a reason (RFC 2383)
+ // at the end.
+ let mut reason = None;
+ let tail_li = &metas[metas.len() - 1];
+ if let Some(item) = tail_li.meta_item() {
+ match item.kind {
+ ast::MetaItemKind::Word => {} // actual lint names handled later
+ ast::MetaItemKind::NameValue(ref name_value) => {
+ if item.path == sym::reason {
+ if let ast::LitKind::Str(rationale, _) = name_value.kind {
+ if !self.sess.features_untracked().lint_reasons {
+ feature_err(
+ &self.sess.parse_sess,
+ sym::lint_reasons,
+ item.span,
+ "lint reasons are experimental",
+ )
+ .emit();
+ }
+ reason = Some(rationale);
+ } else {
+ bad_attr(name_value.span)
+ .span_label(name_value.span, "reason must be a string literal")
+ .emit();
+ }
+ // found reason, reslice meta list to exclude it
+ metas.pop().unwrap();
+ } else {
+ bad_attr(item.span)
+ .span_label(item.span, "bad attribute argument")
+ .emit();
+ }
+ }
+ ast::MetaItemKind::List(_) => {
+ bad_attr(item.span).span_label(item.span, "bad attribute argument").emit();
+ }
+ }
+ }
+
+ for (lint_index, li) in metas.iter_mut().enumerate() {
+ let level = match level {
+ Level::Expect(mut id) => {
+ id.set_lint_index(Some(lint_index as u16));
+ Level::Expect(id)
+ }
+ level => level,
+ };
+
+ let sp = li.span();
+ let meta_item = match li {
+ ast::NestedMetaItem::MetaItem(meta_item) if meta_item.is_word() => meta_item,
+ _ => {
+ let mut err = bad_attr(sp);
+ let mut add_label = true;
+ if let Some(item) = li.meta_item() {
+ if let ast::MetaItemKind::NameValue(_) = item.kind {
+ if item.path == sym::reason {
+ err.span_label(sp, "reason in lint attribute must come last");
+ add_label = false;
+ }
+ }
+ }
+ if add_label {
+ err.span_label(sp, "bad attribute argument");
+ }
+ err.emit();
+ continue;
+ }
+ };
+ let tool_ident = if meta_item.path.segments.len() > 1 {
+ Some(meta_item.path.segments.remove(0).ident)
+ } else {
+ None
+ };
+ let tool_name = tool_ident.map(|ident| ident.name);
+ let name = pprust::path_to_string(&meta_item.path);
+ let lint_result =
+ self.store.check_lint_name(&name, tool_name, self.registered_tools);
+ match &lint_result {
+ CheckLintNameResult::Ok(ids) => {
+ // This checks for instances where the user writes `#[expect(unfulfilled_lint_expectations)]`
+ // in that case we want to avoid overriding the lint level but instead add an expectation that
+ // can't be fulfilled. The lint message will include an explanation, that the
+ // `unfulfilled_lint_expectations` lint can't be expected.
+ if let Level::Expect(expect_id) = level {
+ // The `unfulfilled_lint_expectations` lint is not part of any lint groups. Therefore. we
+ // only need to check the slice if it contains a single lint.
+ let is_unfulfilled_lint_expectations = match ids {
+ [lint] => *lint == LintId::of(UNFULFILLED_LINT_EXPECTATIONS),
+ _ => false,
+ };
+ self.lint_expectations.push((
+ expect_id,
+ LintExpectation::new(
+ reason,
+ sp,
+ is_unfulfilled_lint_expectations,
+ tool_name,
+ ),
+ ));
+ }
+ let src = LintLevelSource::Node(
+ meta_item.path.segments.last().expect("empty lint name").ident.name,
+ sp,
+ reason,
+ );
+ for &id in *ids {
+ if self.check_gated_lint(id, attr.span) {
+ self.insert_spec(id, (level, src));
+ }
+ }
+ }
+
+ CheckLintNameResult::Tool(result) => {
+ match *result {
+ Ok(ids) => {
+ let complete_name =
+ &format!("{}::{}", tool_ident.unwrap().name, name);
+ let src = LintLevelSource::Node(
+ Symbol::intern(complete_name),
+ sp,
+ reason,
+ );
+ for id in ids {
+ self.insert_spec(*id, (level, src));
+ }
+ if let Level::Expect(expect_id) = level {
+ self.lint_expectations.push((
+ expect_id,
+ LintExpectation::new(reason, sp, false, tool_name),
+ ));
+ }
+ }
+ Err((Some(ids), ref new_lint_name)) => {
+ let lint = builtin::RENAMED_AND_REMOVED_LINTS;
+ let (lvl, src) = self.sets.get_lint_level(
+ lint,
+ self.cur,
+ Some(self.current_specs()),
+ &sess,
+ );
+ struct_lint_level(
+ self.sess,
+ lint,
+ lvl,
+ src,
+ Some(sp.into()),
+ |lint| {
+ let msg = format!(
+ "lint name `{}` is deprecated \
+ and may not have an effect in the future.",
+ name
+ );
+ lint.build(&msg)
+ .span_suggestion(
+ sp,
+ "change it to",
+ new_lint_name,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ },
+ );
+
+ let src = LintLevelSource::Node(
+ Symbol::intern(&new_lint_name),
+ sp,
+ reason,
+ );
+ for id in ids {
+ self.insert_spec(*id, (level, src));
+ }
+ if let Level::Expect(expect_id) = level {
+ self.lint_expectations.push((
+ expect_id,
+ LintExpectation::new(reason, sp, false, tool_name),
+ ));
+ }
+ }
+ Err((None, _)) => {
+ // If Tool(Err(None, _)) is returned, then either the lint does not
+ // exist in the tool or the code was not compiled with the tool and
+ // therefore the lint was never added to the `LintStore`. To detect
+ // this is the responsibility of the lint tool.
+ }
+ }
+ }
+
+ &CheckLintNameResult::NoTool => {
+ let mut err = struct_span_err!(
+ sess,
+ tool_ident.map_or(DUMMY_SP, |ident| ident.span),
+ E0710,
+ "unknown tool name `{}` found in scoped lint: `{}::{}`",
+ tool_name.unwrap(),
+ tool_name.unwrap(),
+ pprust::path_to_string(&meta_item.path),
+ );
+ if sess.is_nightly_build() {
+ err.help(&format!(
+ "add `#![register_tool({})]` to the crate root",
+ tool_name.unwrap()
+ ));
+ }
+ err.emit();
+ continue;
+ }
+
+ _ if !self.warn_about_weird_lints => {}
+
+ CheckLintNameResult::Warning(msg, renamed) => {
+ let lint = builtin::RENAMED_AND_REMOVED_LINTS;
+ let (renamed_lint_level, src) = self.sets.get_lint_level(
+ lint,
+ self.cur,
+ Some(self.current_specs()),
+ &sess,
+ );
+ struct_lint_level(
+ self.sess,
+ lint,
+ renamed_lint_level,
+ src,
+ Some(sp.into()),
+ |lint| {
+ let mut err = lint.build(msg);
+ if let Some(new_name) = &renamed {
+ err.span_suggestion(
+ sp,
+ "use the new name",
+ new_name,
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ },
+ );
+ }
+ CheckLintNameResult::NoLint(suggestion) => {
+ let lint = builtin::UNKNOWN_LINTS;
+ let (level, src) = self.sets.get_lint_level(
+ lint,
+ self.cur,
+ Some(self.current_specs()),
+ self.sess,
+ );
+ struct_lint_level(self.sess, lint, level, src, Some(sp.into()), |lint| {
+ let name = if let Some(tool_ident) = tool_ident {
+ format!("{}::{}", tool_ident.name, name)
+ } else {
+ name.to_string()
+ };
+ let mut db = lint.build(format!("unknown lint: `{}`", name));
+ if let Some(suggestion) = suggestion {
+ db.span_suggestion(
+ sp,
+ "did you mean",
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ }
+ db.emit();
+ });
+ }
+ }
+ // If this lint was renamed, apply the new lint instead of ignoring the attribute.
+ // This happens outside of the match because the new lint should be applied even if
+ // we don't warn about the name change.
+ if let CheckLintNameResult::Warning(_, Some(new_name)) = lint_result {
+ // Ignore any errors or warnings that happen because the new name is inaccurate
+ // NOTE: `new_name` already includes the tool name, so we don't have to add it again.
+ if let CheckLintNameResult::Ok(ids) =
+ self.store.check_lint_name(&new_name, None, self.registered_tools)
+ {
+ let src = LintLevelSource::Node(Symbol::intern(&new_name), sp, reason);
+ for &id in ids {
+ if self.check_gated_lint(id, attr.span) {
+ self.insert_spec(id, (level, src));
+ }
+ }
+ if let Level::Expect(expect_id) = level {
+ self.lint_expectations.push((
+ expect_id,
+ LintExpectation::new(reason, sp, false, tool_name),
+ ));
+ }
+ } else {
+ panic!("renamed lint does not exist: {}", new_name);
+ }
+ }
+ }
+ }
+
+ if !is_crate_node {
+ for (id, &(level, ref src)) in self.current_specs().iter() {
+ if !id.lint.crate_level_only {
+ continue;
+ }
+
+ let LintLevelSource::Node(lint_attr_name, lint_attr_span, _) = *src else {
+ continue
+ };
+
+ let lint = builtin::UNUSED_ATTRIBUTES;
+ let (lint_level, lint_src) =
+ self.sets.get_lint_level(lint, self.cur, Some(self.current_specs()), self.sess);
+ struct_lint_level(
+ self.sess,
+ lint,
+ lint_level,
+ lint_src,
+ Some(lint_attr_span.into()),
+ |lint| {
+ let mut db = lint.build(&format!(
+ "{}({}) is ignored unless specified at crate level",
+ level.as_str(),
+ lint_attr_name
+ ));
+ db.emit();
+ },
+ );
+ // don't set a separate error for every lint in the group
+ break;
+ }
+ }
+
+ if self.current_specs().is_empty() {
+ self.sets.list.pop();
+ self.cur = prev;
+ }
+
+ BuilderPush { prev, changed: prev != self.cur }
+ }
+
+ fn create_stable_id(
+ &mut self,
+ unstable_id: LintExpectationId,
+ hir_id: HirId,
+ attr_index: usize,
+ ) -> LintExpectationId {
+ let stable_id =
+ LintExpectationId::Stable { hir_id, attr_index: attr_index as u16, lint_index: None };
+
+ self.expectation_id_map.insert(unstable_id, stable_id);
+
+ stable_id
+ }
+
+ /// Checks if the lint is gated on a feature that is not enabled.
+ ///
+ /// Returns `true` if the lint's feature is enabled.
+ fn check_gated_lint(&self, lint_id: LintId, span: Span) -> bool {
+ if let Some(feature) = lint_id.lint.feature_gate {
+ if !self.sess.features_untracked().enabled(feature) {
+ let lint = builtin::UNKNOWN_LINTS;
+ let (level, src) = self.lint_level(builtin::UNKNOWN_LINTS);
+ struct_lint_level(self.sess, lint, level, src, Some(span.into()), |lint_db| {
+ let mut db =
+ lint_db.build(&format!("unknown lint: `{}`", lint_id.lint.name_lower()));
+ db.note(&format!("the `{}` lint is unstable", lint_id.lint.name_lower(),));
+ add_feature_diagnostics(&mut db, &self.sess.parse_sess, feature);
+ db.emit();
+ });
+ return false;
+ }
+ }
+ true
+ }
+
+ /// Called after `push` when the scope of a set of attributes are exited.
+ pub fn pop(&mut self, push: BuilderPush) {
+ self.cur = push.prev;
+ }
+
+ /// Find the lint level for a lint.
+ pub fn lint_level(&self, lint: &'static Lint) -> (Level, LintLevelSource) {
+ self.sets.get_lint_level(lint, self.cur, None, self.sess)
+ }
+
+ /// Used to emit a lint-related diagnostic based on the current state of
+ /// this lint context.
+ pub fn struct_lint(
+ &self,
+ lint: &'static Lint,
+ span: Option<MultiSpan>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ let (level, src) = self.lint_level(lint);
+ struct_lint_level(self.sess, lint, level, src, span, decorate)
+ }
+
+ /// Registers the ID provided with the current set of lints stored in
+ /// this context.
+ pub fn register_id(&mut self, id: HirId) {
+ self.id_to_set.insert(id, self.cur);
+ }
+
+ fn update_unstable_expectation_ids(&self) {
+ self.sess.diagnostic().update_unstable_expectation_id(&self.expectation_id_map);
+ }
+
+ pub fn build_map(self) -> LintLevelMap {
+ LintLevelMap {
+ sets: self.sets,
+ id_to_set: self.id_to_set,
+ lint_expectations: self.lint_expectations,
+ }
+ }
+}
+
+struct LintLevelMapBuilder<'tcx> {
+ levels: LintLevelsBuilder<'tcx>,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl LintLevelMapBuilder<'_> {
+ fn with_lint_attrs<F>(&mut self, id: hir::HirId, f: F)
+ where
+ F: FnOnce(&mut Self),
+ {
+ let is_crate_hir = id == hir::CRATE_HIR_ID;
+ let attrs = self.tcx.hir().attrs(id);
+ let push = self.levels.push(attrs, is_crate_hir, Some(id));
+
+ if push.changed {
+ self.levels.register_id(id);
+ }
+ f(self);
+ self.levels.pop(push);
+ }
+}
+
+impl<'tcx> intravisit::Visitor<'tcx> for LintLevelMapBuilder<'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ self.with_lint_attrs(param.hir_id, |builder| {
+ intravisit::walk_param(builder, param);
+ });
+ }
+
+ fn visit_item(&mut self, it: &'tcx hir::Item<'tcx>) {
+ self.with_lint_attrs(it.hir_id(), |builder| {
+ intravisit::walk_item(builder, it);
+ });
+ }
+
+ fn visit_foreign_item(&mut self, it: &'tcx hir::ForeignItem<'tcx>) {
+ self.with_lint_attrs(it.hir_id(), |builder| {
+ intravisit::walk_foreign_item(builder, it);
+ })
+ }
+
+ fn visit_stmt(&mut self, e: &'tcx hir::Stmt<'tcx>) {
+ // We will call `with_lint_attrs` when we walk
+ // the `StmtKind`. The outer statement itself doesn't
+ // define the lint levels.
+ intravisit::walk_stmt(self, e);
+ }
+
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ self.with_lint_attrs(e.hir_id, |builder| {
+ intravisit::walk_expr(builder, e);
+ })
+ }
+
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ self.with_lint_attrs(s.hir_id, |builder| {
+ intravisit::walk_field_def(builder, s);
+ })
+ }
+
+ fn visit_variant(
+ &mut self,
+ v: &'tcx hir::Variant<'tcx>,
+ g: &'tcx hir::Generics<'tcx>,
+ item_id: hir::HirId,
+ ) {
+ self.with_lint_attrs(v.id, |builder| {
+ intravisit::walk_variant(builder, v, g, item_id);
+ })
+ }
+
+ fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
+ self.with_lint_attrs(l.hir_id, |builder| {
+ intravisit::walk_local(builder, l);
+ })
+ }
+
+ fn visit_arm(&mut self, a: &'tcx hir::Arm<'tcx>) {
+ self.with_lint_attrs(a.hir_id, |builder| {
+ intravisit::walk_arm(builder, a);
+ })
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ self.with_lint_attrs(trait_item.hir_id(), |builder| {
+ intravisit::walk_trait_item(builder, trait_item);
+ });
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ self.with_lint_attrs(impl_item.hir_id(), |builder| {
+ intravisit::walk_impl_item(builder, impl_item);
+ });
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.lint_levels = lint_levels;
+}
diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs
new file mode 100644
index 000000000..389a0b5d1
--- /dev/null
+++ b/compiler/rustc_lint/src/lib.rs
@@ -0,0 +1,538 @@
+//! Lints, aka compiler warnings.
+//!
+//! A 'lint' check is a kind of miscellaneous constraint that a user _might_
+//! want to enforce, but might reasonably want to permit as well, on a
+//! module-by-module basis. They contrast with static constraints enforced by
+//! other phases of the compiler, which are generally required to hold in order
+//! to compile the program at all.
+//!
+//! Most lints can be written as [LintPass] instances. These run after
+//! all other analyses. The `LintPass`es built into rustc are defined
+//! within [rustc_session::lint::builtin],
+//! which has further comments on how to add such a lint.
+//! rustc can also load user-defined lint plugins via the plugin mechanism.
+//!
+//! Some of rustc's lints are defined elsewhere in the compiler and work by
+//! calling `add_lint()` on the overall `Session` object. This works when
+//! it happens before the main lint pass, which emits the lints stored by
+//! `add_lint()`. To emit lints after the main lint pass (from codegen, for
+//! example) requires more effort. See `emit_lint` and `GatherNodeLevels`
+//! in `context.rs`.
+//!
+//! Some code also exists in [rustc_session::lint], [rustc_middle::lint].
+//!
+//! ## Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![allow(rustc::potential_query_instability)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(array_windows)]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(if_let_guard)]
+#![feature(iter_intersperse)]
+#![feature(iter_order_by)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(never_type)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate rustc_middle;
+#[macro_use]
+extern crate rustc_session;
+
+mod array_into_iter;
+pub mod builtin;
+mod context;
+mod early;
+mod enum_intrinsics_non_enums;
+mod expect;
+pub mod hidden_unicode_codepoints;
+mod internal;
+mod late;
+mod levels;
+mod methods;
+mod non_ascii_idents;
+mod non_fmt_panic;
+mod nonstandard_style;
+mod noop_method_call;
+mod pass_by_value;
+mod passes;
+mod redundant_semicolon;
+mod traits;
+mod types;
+mod unused;
+
+pub use array_into_iter::ARRAY_INTO_ITER;
+
+use rustc_ast as ast;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::lint::builtin::{
+ BARE_TRAIT_OBJECTS, ELIDED_LIFETIMES_IN_PATHS, EXPLICIT_OUTLIVES_REQUIREMENTS,
+};
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+use array_into_iter::ArrayIntoIter;
+use builtin::*;
+use enum_intrinsics_non_enums::EnumIntrinsicsNonEnums;
+use hidden_unicode_codepoints::*;
+use internal::*;
+use methods::*;
+use non_ascii_idents::*;
+use non_fmt_panic::NonPanicFmt;
+use nonstandard_style::*;
+use noop_method_call::*;
+use pass_by_value::*;
+use redundant_semicolon::*;
+use traits::*;
+use types::*;
+use unused::*;
+
+/// Useful for other parts of the compiler / Clippy.
+pub use builtin::SoftLints;
+pub use context::{CheckLintNameResult, FindLintError, LintStore};
+pub use context::{EarlyContext, LateContext, LintContext};
+pub use early::{check_ast_node, EarlyCheckNode};
+pub use late::{check_crate, unerased_lint_store};
+pub use passes::{EarlyLintPass, LateLintPass};
+pub use rustc_session::lint::Level::{self, *};
+pub use rustc_session::lint::{BufferedEarlyLint, FutureIncompatibleInfo, Lint, LintId};
+pub use rustc_session::lint::{LintArray, LintPass};
+
+pub fn provide(providers: &mut Providers) {
+ levels::provide(providers);
+ expect::provide(providers);
+ *providers = Providers { lint_mod, ..*providers };
+}
+
+fn lint_mod(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ late::late_lint_mod(tcx, module_def_id, BuiltinCombinedModuleLateLintPass::new());
+}
+
+macro_rules! pre_expansion_lint_passes {
+ ($macro:path, $args:tt) => {
+ $macro!($args, [KeywordIdents: KeywordIdents,]);
+ };
+}
+
+macro_rules! early_lint_passes {
+ ($macro:path, $args:tt) => {
+ $macro!(
+ $args,
+ [
+ UnusedParens: UnusedParens,
+ UnusedBraces: UnusedBraces,
+ UnusedImportBraces: UnusedImportBraces,
+ UnsafeCode: UnsafeCode,
+ AnonymousParameters: AnonymousParameters,
+ EllipsisInclusiveRangePatterns: EllipsisInclusiveRangePatterns::default(),
+ NonCamelCaseTypes: NonCamelCaseTypes,
+ DeprecatedAttr: DeprecatedAttr::new(),
+ WhileTrue: WhileTrue,
+ NonAsciiIdents: NonAsciiIdents,
+ HiddenUnicodeCodepoints: HiddenUnicodeCodepoints,
+ IncompleteFeatures: IncompleteFeatures,
+ RedundantSemicolons: RedundantSemicolons,
+ UnusedDocComment: UnusedDocComment,
+ ]
+ );
+ };
+}
+
+macro_rules! declare_combined_early_pass {
+ ([$name:ident], $passes:tt) => (
+ early_lint_methods!(declare_combined_early_lint_pass, [pub $name, $passes]);
+ )
+}
+
+pre_expansion_lint_passes!(declare_combined_early_pass, [BuiltinCombinedPreExpansionLintPass]);
+early_lint_passes!(declare_combined_early_pass, [BuiltinCombinedEarlyLintPass]);
+
+macro_rules! late_lint_passes {
+ ($macro:path, $args:tt) => {
+ $macro!(
+ $args,
+ [
+ // Tracks state across modules
+ UnnameableTestItems: UnnameableTestItems::new(),
+ // Tracks attributes of parents
+ MissingDoc: MissingDoc::new(),
+ // Builds a global list of all impls of `Debug`.
+ // FIXME: Turn the computation of types which implement Debug into a query
+ // and change this to a module lint pass
+ MissingDebugImplementations: MissingDebugImplementations::default(),
+ // Keeps a global list of foreign declarations.
+ ClashingExternDeclarations: ClashingExternDeclarations::new(),
+ ]
+ );
+ };
+}
+
+macro_rules! late_lint_mod_passes {
+ ($macro:path, $args:tt) => {
+ $macro!(
+ $args,
+ [
+ HardwiredLints: HardwiredLints,
+ ImproperCTypesDeclarations: ImproperCTypesDeclarations,
+ ImproperCTypesDefinitions: ImproperCTypesDefinitions,
+ VariantSizeDifferences: VariantSizeDifferences,
+ BoxPointers: BoxPointers,
+ PathStatements: PathStatements,
+ // Depends on referenced function signatures in expressions
+ UnusedResults: UnusedResults,
+ NonUpperCaseGlobals: NonUpperCaseGlobals,
+ NonShorthandFieldPatterns: NonShorthandFieldPatterns,
+ UnusedAllocation: UnusedAllocation,
+ // Depends on types used in type definitions
+ MissingCopyImplementations: MissingCopyImplementations,
+ // Depends on referenced function signatures in expressions
+ MutableTransmutes: MutableTransmutes,
+ TypeAliasBounds: TypeAliasBounds,
+ TrivialConstraints: TrivialConstraints,
+ TypeLimits: TypeLimits::new(),
+ NonSnakeCase: NonSnakeCase,
+ InvalidNoMangleItems: InvalidNoMangleItems,
+ // Depends on access levels
+ UnreachablePub: UnreachablePub,
+ ExplicitOutlivesRequirements: ExplicitOutlivesRequirements,
+ InvalidValue: InvalidValue,
+ DerefNullPtr: DerefNullPtr,
+ // May Depend on constants elsewhere
+ UnusedBrokenConst: UnusedBrokenConst,
+ UnstableFeatures: UnstableFeatures,
+ ArrayIntoIter: ArrayIntoIter::default(),
+ DropTraitConstraints: DropTraitConstraints,
+ TemporaryCStringAsPtr: TemporaryCStringAsPtr,
+ NonPanicFmt: NonPanicFmt,
+ NoopMethodCall: NoopMethodCall,
+ EnumIntrinsicsNonEnums: EnumIntrinsicsNonEnums,
+ InvalidAtomicOrdering: InvalidAtomicOrdering,
+ NamedAsmLabels: NamedAsmLabels,
+ ]
+ );
+ };
+}
+
+macro_rules! declare_combined_late_pass {
+ ([$v:vis $name:ident], $passes:tt) => (
+ late_lint_methods!(declare_combined_late_lint_pass, [$v $name, $passes], ['tcx]);
+ )
+}
+
+// FIXME: Make a separate lint type which do not require typeck tables
+late_lint_passes!(declare_combined_late_pass, [pub BuiltinCombinedLateLintPass]);
+
+late_lint_mod_passes!(declare_combined_late_pass, [BuiltinCombinedModuleLateLintPass]);
+
+pub fn new_lint_store(no_interleave_lints: bool, internal_lints: bool) -> LintStore {
+ let mut lint_store = LintStore::new();
+
+ register_builtins(&mut lint_store, no_interleave_lints);
+ if internal_lints {
+ register_internals(&mut lint_store);
+ }
+
+ lint_store
+}
+
+/// Tell the `LintStore` about all the built-in lints (the ones
+/// defined in this crate and the ones defined in
+/// `rustc_session::lint::builtin`).
+fn register_builtins(store: &mut LintStore, no_interleave_lints: bool) {
+ macro_rules! add_lint_group {
+ ($name:expr, $($lint:ident),*) => (
+ store.register_group(false, $name, None, vec![$(LintId::of($lint)),*]);
+ )
+ }
+
+ macro_rules! register_pass {
+ ($method:ident, $ty:ident, $constructor:expr) => {
+ store.register_lints(&$ty::get_lints());
+ store.$method(|| Box::new($constructor));
+ };
+ }
+
+ macro_rules! register_passes {
+ ($method:ident, [$($passes:ident: $constructor:expr,)*]) => (
+ $(
+ register_pass!($method, $passes, $constructor);
+ )*
+ )
+ }
+
+ if no_interleave_lints {
+ pre_expansion_lint_passes!(register_passes, register_pre_expansion_pass);
+ early_lint_passes!(register_passes, register_early_pass);
+ late_lint_passes!(register_passes, register_late_pass);
+ late_lint_mod_passes!(register_passes, register_late_mod_pass);
+ } else {
+ store.register_lints(&BuiltinCombinedPreExpansionLintPass::get_lints());
+ store.register_lints(&BuiltinCombinedEarlyLintPass::get_lints());
+ store.register_lints(&BuiltinCombinedModuleLateLintPass::get_lints());
+ store.register_lints(&BuiltinCombinedLateLintPass::get_lints());
+ }
+
+ add_lint_group!(
+ "nonstandard_style",
+ NON_CAMEL_CASE_TYPES,
+ NON_SNAKE_CASE,
+ NON_UPPER_CASE_GLOBALS
+ );
+
+ add_lint_group!(
+ "unused",
+ UNUSED_IMPORTS,
+ UNUSED_VARIABLES,
+ UNUSED_ASSIGNMENTS,
+ DEAD_CODE,
+ UNUSED_MUT,
+ UNREACHABLE_CODE,
+ UNREACHABLE_PATTERNS,
+ UNUSED_MUST_USE,
+ UNUSED_UNSAFE,
+ PATH_STATEMENTS,
+ UNUSED_ATTRIBUTES,
+ UNUSED_MACROS,
+ UNUSED_MACRO_RULES,
+ UNUSED_ALLOCATION,
+ UNUSED_DOC_COMMENTS,
+ UNUSED_EXTERN_CRATES,
+ UNUSED_FEATURES,
+ UNUSED_LABELS,
+ UNUSED_PARENS,
+ UNUSED_BRACES,
+ REDUNDANT_SEMICOLONS
+ );
+
+ add_lint_group!(
+ "rust_2018_idioms",
+ BARE_TRAIT_OBJECTS,
+ UNUSED_EXTERN_CRATES,
+ ELLIPSIS_INCLUSIVE_RANGE_PATTERNS,
+ ELIDED_LIFETIMES_IN_PATHS,
+ EXPLICIT_OUTLIVES_REQUIREMENTS // FIXME(#52665, #47816) not always applicable and not all
+ // macros are ready for this yet.
+ // UNREACHABLE_PUB,
+
+ // FIXME macro crates are not up for this yet, too much
+ // breakage is seen if we try to encourage this lint.
+ // MACRO_USE_EXTERN_CRATE
+ );
+
+ // Register renamed and removed lints.
+ store.register_renamed("single_use_lifetime", "single_use_lifetimes");
+ store.register_renamed("elided_lifetime_in_path", "elided_lifetimes_in_paths");
+ store.register_renamed("bare_trait_object", "bare_trait_objects");
+ store.register_renamed("unstable_name_collision", "unstable_name_collisions");
+ store.register_renamed("unused_doc_comment", "unused_doc_comments");
+ store.register_renamed("async_idents", "keyword_idents");
+ store.register_renamed("exceeding_bitshifts", "arithmetic_overflow");
+ store.register_renamed("redundant_semicolon", "redundant_semicolons");
+ store.register_renamed("overlapping_patterns", "overlapping_range_endpoints");
+ store.register_renamed("safe_packed_borrows", "unaligned_references");
+ store.register_renamed("disjoint_capture_migration", "rust_2021_incompatible_closure_captures");
+ store.register_renamed("or_patterns_back_compat", "rust_2021_incompatible_or_patterns");
+ store.register_renamed("non_fmt_panic", "non_fmt_panics");
+
+ // These were moved to tool lints, but rustc still sees them when compiling normally, before
+ // tool lints are registered, so `check_tool_name_for_backwards_compat` doesn't work. Use
+ // `register_removed` explicitly.
+ const RUSTDOC_LINTS: &[&str] = &[
+ "broken_intra_doc_links",
+ "private_intra_doc_links",
+ "missing_crate_level_docs",
+ "missing_doc_code_examples",
+ "private_doc_tests",
+ "invalid_codeblock_attributes",
+ "invalid_html_tags",
+ "non_autolinks",
+ ];
+ for rustdoc_lint in RUSTDOC_LINTS {
+ store.register_ignored(rustdoc_lint);
+ }
+ store.register_removed(
+ "intra_doc_link_resolution_failure",
+ "use `rustdoc::broken_intra_doc_links` instead",
+ );
+ store.register_removed("rustdoc", "use `rustdoc::all` instead");
+
+ store.register_removed("unknown_features", "replaced by an error");
+ store.register_removed("unsigned_negation", "replaced by negate_unsigned feature gate");
+ store.register_removed("negate_unsigned", "cast a signed value instead");
+ store.register_removed("raw_pointer_derive", "using derive with raw pointers is ok");
+ // Register lint group aliases.
+ store.register_group_alias("nonstandard_style", "bad_style");
+ // This was renamed to `raw_pointer_derive`, which was then removed,
+ // so it is also considered removed.
+ store.register_removed("raw_pointer_deriving", "using derive with raw pointers is ok");
+ store.register_removed("drop_with_repr_extern", "drop flags have been removed");
+ store.register_removed("fat_ptr_transmutes", "was accidentally removed back in 2014");
+ store.register_removed("deprecated_attr", "use `deprecated` instead");
+ store.register_removed(
+ "transmute_from_fn_item_types",
+ "always cast functions before transmuting them",
+ );
+ store.register_removed(
+ "hr_lifetime_in_assoc_type",
+ "converted into hard error, see issue #33685 \
+ <https://github.com/rust-lang/rust/issues/33685> for more information",
+ );
+ store.register_removed(
+ "inaccessible_extern_crate",
+ "converted into hard error, see issue #36886 \
+ <https://github.com/rust-lang/rust/issues/36886> for more information",
+ );
+ store.register_removed(
+ "super_or_self_in_global_path",
+ "converted into hard error, see issue #36888 \
+ <https://github.com/rust-lang/rust/issues/36888> for more information",
+ );
+ store.register_removed(
+ "overlapping_inherent_impls",
+ "converted into hard error, see issue #36889 \
+ <https://github.com/rust-lang/rust/issues/36889> for more information",
+ );
+ store.register_removed(
+ "illegal_floating_point_constant_pattern",
+ "converted into hard error, see issue #36890 \
+ <https://github.com/rust-lang/rust/issues/36890> for more information",
+ );
+ store.register_removed(
+ "illegal_struct_or_enum_constant_pattern",
+ "converted into hard error, see issue #36891 \
+ <https://github.com/rust-lang/rust/issues/36891> for more information",
+ );
+ store.register_removed(
+ "lifetime_underscore",
+ "converted into hard error, see issue #36892 \
+ <https://github.com/rust-lang/rust/issues/36892> for more information",
+ );
+ store.register_removed(
+ "extra_requirement_in_impl",
+ "converted into hard error, see issue #37166 \
+ <https://github.com/rust-lang/rust/issues/37166> for more information",
+ );
+ store.register_removed(
+ "legacy_imports",
+ "converted into hard error, see issue #38260 \
+ <https://github.com/rust-lang/rust/issues/38260> for more information",
+ );
+ store.register_removed(
+ "coerce_never",
+ "converted into hard error, see issue #48950 \
+ <https://github.com/rust-lang/rust/issues/48950> for more information",
+ );
+ store.register_removed(
+ "resolve_trait_on_defaulted_unit",
+ "converted into hard error, see issue #48950 \
+ <https://github.com/rust-lang/rust/issues/48950> for more information",
+ );
+ store.register_removed(
+ "private_no_mangle_fns",
+ "no longer a warning, `#[no_mangle]` functions always exported",
+ );
+ store.register_removed(
+ "private_no_mangle_statics",
+ "no longer a warning, `#[no_mangle]` statics always exported",
+ );
+ store.register_removed("bad_repr", "replaced with a generic attribute input check");
+ store.register_removed(
+ "duplicate_matcher_binding_name",
+ "converted into hard error, see issue #57742 \
+ <https://github.com/rust-lang/rust/issues/57742> for more information",
+ );
+ store.register_removed(
+ "incoherent_fundamental_impls",
+ "converted into hard error, see issue #46205 \
+ <https://github.com/rust-lang/rust/issues/46205> for more information",
+ );
+ store.register_removed(
+ "legacy_constructor_visibility",
+ "converted into hard error, see issue #39207 \
+ <https://github.com/rust-lang/rust/issues/39207> for more information",
+ );
+ store.register_removed(
+ "legacy_directory_ownership",
+ "converted into hard error, see issue #37872 \
+ <https://github.com/rust-lang/rust/issues/37872> for more information",
+ );
+ store.register_removed(
+ "safe_extern_statics",
+ "converted into hard error, see issue #36247 \
+ <https://github.com/rust-lang/rust/issues/36247> for more information",
+ );
+ store.register_removed(
+ "parenthesized_params_in_types_and_modules",
+ "converted into hard error, see issue #42238 \
+ <https://github.com/rust-lang/rust/issues/42238> for more information",
+ );
+ store.register_removed(
+ "duplicate_macro_exports",
+ "converted into hard error, see issue #35896 \
+ <https://github.com/rust-lang/rust/issues/35896> for more information",
+ );
+ store.register_removed(
+ "nested_impl_trait",
+ "converted into hard error, see issue #59014 \
+ <https://github.com/rust-lang/rust/issues/59014> for more information",
+ );
+ store.register_removed("plugin_as_library", "plugins have been deprecated and retired");
+ store.register_removed(
+ "unsupported_naked_functions",
+ "converted into hard error, see RFC 2972 \
+ <https://github.com/rust-lang/rfcs/blob/master/text/2972-constrained-naked.md> for more information",
+ );
+ store.register_removed(
+ "mutable_borrow_reservation_conflict",
+ "now allowed, see issue #59159 \
+ <https://github.com/rust-lang/rust/issues/59159> for more information",
+ );
+}
+
+fn register_internals(store: &mut LintStore) {
+ store.register_lints(&LintPassImpl::get_lints());
+ store.register_early_pass(|| Box::new(LintPassImpl));
+ store.register_lints(&DefaultHashTypes::get_lints());
+ store.register_late_pass(|| Box::new(DefaultHashTypes));
+ store.register_lints(&QueryStability::get_lints());
+ store.register_late_pass(|| Box::new(QueryStability));
+ store.register_lints(&ExistingDocKeyword::get_lints());
+ store.register_late_pass(|| Box::new(ExistingDocKeyword));
+ store.register_lints(&TyTyKind::get_lints());
+ store.register_late_pass(|| Box::new(TyTyKind));
+ store.register_lints(&Diagnostics::get_lints());
+ store.register_late_pass(|| Box::new(Diagnostics));
+ store.register_lints(&BadOptAccess::get_lints());
+ store.register_late_pass(|| Box::new(BadOptAccess));
+ store.register_lints(&PassByValue::get_lints());
+ store.register_late_pass(|| Box::new(PassByValue));
+ // FIXME(davidtwco): deliberately do not include `UNTRANSLATABLE_DIAGNOSTIC` and
+ // `DIAGNOSTIC_OUTSIDE_OF_IMPL` here because `-Wrustc::internal` is provided to every crate and
+ // these lints will trigger all of the time - change this once migration to diagnostic structs
+ // and translation is completed
+ store.register_group(
+ false,
+ "rustc::internal",
+ None,
+ vec![
+ LintId::of(DEFAULT_HASH_TYPES),
+ LintId::of(POTENTIAL_QUERY_INSTABILITY),
+ LintId::of(USAGE_OF_TY_TYKIND),
+ LintId::of(PASS_BY_VALUE),
+ LintId::of(LINT_PASS_IMPL_WITHOUT_MACRO),
+ LintId::of(USAGE_OF_QUALIFIED_TY),
+ LintId::of(EXISTING_DOC_KEYWORD),
+ LintId::of(BAD_OPT_ACCESS),
+ ],
+ );
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_lint/src/methods.rs b/compiler/rustc_lint/src/methods.rs
new file mode 100644
index 000000000..ff5a01749
--- /dev/null
+++ b/compiler/rustc_lint/src/methods.rs
@@ -0,0 +1,103 @@
+use crate::LateContext;
+use crate::LateLintPass;
+use crate::LintContext;
+use rustc_errors::fluent;
+use rustc_hir::{Expr, ExprKind, PathSegment};
+use rustc_middle::ty;
+use rustc_span::{symbol::sym, ExpnKind, Span};
+
+declare_lint! {
+ /// The `temporary_cstring_as_ptr` lint detects getting the inner pointer of
+ /// a temporary `CString`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// # #![allow(unused)]
+ /// # use std::ffi::CString;
+ /// let c_str = CString::new("foo").unwrap().as_ptr();
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The inner pointer of a `CString` lives only as long as the `CString` it
+ /// points to. Getting the inner pointer of a *temporary* `CString` allows the `CString`
+ /// to be dropped at the end of the statement, as it is not being referenced as far as the typesystem
+ /// is concerned. This means outside of the statement the pointer will point to freed memory, which
+ /// causes undefined behavior if the pointer is later dereferenced.
+ pub TEMPORARY_CSTRING_AS_PTR,
+ Warn,
+ "detects getting the inner pointer of a temporary `CString`"
+}
+
+declare_lint_pass!(TemporaryCStringAsPtr => [TEMPORARY_CSTRING_AS_PTR]);
+
+fn in_macro(span: Span) -> bool {
+ if span.from_expansion() {
+ !matches!(span.ctxt().outer_expn_data().kind, ExpnKind::Desugaring(..))
+ } else {
+ false
+ }
+}
+
+fn first_method_call<'tcx>(
+ expr: &'tcx Expr<'tcx>,
+) -> Option<(&'tcx PathSegment<'tcx>, &'tcx [Expr<'tcx>])> {
+ if let ExprKind::MethodCall(path, args, _) = &expr.kind {
+ if args.iter().any(|e| e.span.from_expansion()) { None } else { Some((path, *args)) }
+ } else {
+ None
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for TemporaryCStringAsPtr {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
+ if in_macro(expr.span) {
+ return;
+ }
+
+ match first_method_call(expr) {
+ Some((path, args)) if path.ident.name == sym::as_ptr => {
+ let unwrap_arg = &args[0];
+ let as_ptr_span = path.ident.span;
+ match first_method_call(unwrap_arg) {
+ Some((path, args))
+ if path.ident.name == sym::unwrap || path.ident.name == sym::expect =>
+ {
+ let source_arg = &args[0];
+ lint_cstring_as_ptr(cx, as_ptr_span, source_arg, unwrap_arg);
+ }
+ _ => return,
+ }
+ }
+ _ => return,
+ }
+ }
+}
+
+fn lint_cstring_as_ptr(
+ cx: &LateContext<'_>,
+ as_ptr_span: Span,
+ source: &rustc_hir::Expr<'_>,
+ unwrap: &rustc_hir::Expr<'_>,
+) {
+ let source_type = cx.typeck_results().expr_ty(source);
+ if let ty::Adt(def, substs) = source_type.kind() {
+ if cx.tcx.is_diagnostic_item(sym::Result, def.did()) {
+ if let ty::Adt(adt, _) = substs.type_at(0).kind() {
+ if cx.tcx.is_diagnostic_item(sym::cstring_type, adt.did()) {
+ cx.struct_span_lint(TEMPORARY_CSTRING_AS_PTR, as_ptr_span, |diag| {
+ diag.build(fluent::lint::cstring_ptr)
+ .span_label(as_ptr_span, fluent::lint::as_ptr_label)
+ .span_label(unwrap.span, fluent::lint::unwrap_label)
+ .note(fluent::lint::note)
+ .help(fluent::lint::help)
+ .emit();
+ });
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/non_ascii_idents.rs b/compiler/rustc_lint/src/non_ascii_idents.rs
new file mode 100644
index 000000000..764003e61
--- /dev/null
+++ b/compiler/rustc_lint/src/non_ascii_idents.rs
@@ -0,0 +1,345 @@
+use crate::{EarlyContext, EarlyLintPass, LintContext};
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::fluent;
+use rustc_span::symbol::Symbol;
+
+declare_lint! {
+ /// The `non_ascii_idents` lint detects non-ASCII identifiers.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// # #![allow(unused)]
+ /// #![deny(non_ascii_idents)]
+ /// fn main() {
+ /// let föö = 1;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint allows projects that wish to retain the limit of only using
+ /// ASCII characters to switch this lint to "forbid" (for example to ease
+ /// collaboration or for security reasons).
+ /// See [RFC 2457] for more details.
+ ///
+ /// [RFC 2457]: https://github.com/rust-lang/rfcs/blob/master/text/2457-non-ascii-idents.md
+ pub NON_ASCII_IDENTS,
+ Allow,
+ "detects non-ASCII identifiers",
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `uncommon_codepoints` lint detects uncommon Unicode codepoints in
+ /// identifiers.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// # #![allow(unused)]
+ /// const µ: f64 = 0.000001;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint warns about using characters which are not commonly used, and may
+ /// cause visual confusion.
+ ///
+ /// This lint is triggered by identifiers that contain a codepoint that is
+ /// not part of the set of "Allowed" codepoints as described by [Unicode®
+ /// Technical Standard #39 Unicode Security Mechanisms Section 3.1 General
+ /// Security Profile for Identifiers][TR39Allowed].
+ ///
+ /// Note that the set of uncommon codepoints may change over time. Beware
+ /// that if you "forbid" this lint that existing code may fail in the
+ /// future.
+ ///
+ /// [TR39Allowed]: https://www.unicode.org/reports/tr39/#General_Security_Profile
+ pub UNCOMMON_CODEPOINTS,
+ Warn,
+ "detects uncommon Unicode codepoints in identifiers",
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `confusable_idents` lint detects visually confusable pairs between
+ /// identifiers.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// // Latin Capital Letter E With Caron
+ /// pub const Ě: i32 = 1;
+ /// // Latin Capital Letter E With Breve
+ /// pub const Ĕ: i32 = 2;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint warns when different identifiers may appear visually similar,
+ /// which can cause confusion.
+ ///
+ /// The confusable detection algorithm is based on [Unicode® Technical
+ /// Standard #39 Unicode Security Mechanisms Section 4 Confusable
+ /// Detection][TR39Confusable]. For every distinct identifier X execute
+ /// the function `skeleton(X)`. If there exist two distinct identifiers X
+ /// and Y in the same crate where `skeleton(X) = skeleton(Y)` report it.
+ /// The compiler uses the same mechanism to check if an identifier is too
+ /// similar to a keyword.
+ ///
+ /// Note that the set of confusable characters may change over time.
+ /// Beware that if you "forbid" this lint that existing code may fail in
+ /// the future.
+ ///
+ /// [TR39Confusable]: https://www.unicode.org/reports/tr39/#Confusable_Detection
+ pub CONFUSABLE_IDENTS,
+ Warn,
+ "detects visually confusable pairs between identifiers",
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `mixed_script_confusables` lint detects visually confusable
+ /// characters in identifiers between different [scripts].
+ ///
+ /// [scripts]: https://en.wikipedia.org/wiki/Script_(Unicode)
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// // The Japanese katakana character エ can be confused with the Han character 工.
+ /// const エ: &'static str = "アイウ";
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint warns when characters between different scripts may appear
+ /// visually similar, which can cause confusion.
+ ///
+ /// If the crate contains other identifiers in the same script that have
+ /// non-confusable characters, then this lint will *not* be issued. For
+ /// example, if the example given above has another identifier with
+ /// katakana characters (such as `let カタカナ = 123;`), then this indicates
+ /// that you are intentionally using katakana, and it will not warn about
+ /// it.
+ ///
+ /// Note that the set of confusable characters may change over time.
+ /// Beware that if you "forbid" this lint that existing code may fail in
+ /// the future.
+ pub MIXED_SCRIPT_CONFUSABLES,
+ Warn,
+ "detects Unicode scripts whose mixed script confusables codepoints are solely used",
+ crate_level_only
+}
+
+declare_lint_pass!(NonAsciiIdents => [NON_ASCII_IDENTS, UNCOMMON_CODEPOINTS, CONFUSABLE_IDENTS, MIXED_SCRIPT_CONFUSABLES]);
+
+impl EarlyLintPass for NonAsciiIdents {
+ fn check_crate(&mut self, cx: &EarlyContext<'_>, _: &ast::Crate) {
+ use rustc_session::lint::Level;
+ use rustc_span::Span;
+ use std::collections::BTreeMap;
+ use unicode_security::GeneralSecurityProfile;
+
+ let check_non_ascii_idents = cx.builder.lint_level(NON_ASCII_IDENTS).0 != Level::Allow;
+ let check_uncommon_codepoints =
+ cx.builder.lint_level(UNCOMMON_CODEPOINTS).0 != Level::Allow;
+ let check_confusable_idents = cx.builder.lint_level(CONFUSABLE_IDENTS).0 != Level::Allow;
+ let check_mixed_script_confusables =
+ cx.builder.lint_level(MIXED_SCRIPT_CONFUSABLES).0 != Level::Allow;
+
+ if !check_non_ascii_idents
+ && !check_uncommon_codepoints
+ && !check_confusable_idents
+ && !check_mixed_script_confusables
+ {
+ return;
+ }
+
+ let mut has_non_ascii_idents = false;
+ let symbols = cx.sess().parse_sess.symbol_gallery.symbols.lock();
+
+ // Sort by `Span` so that error messages make sense with respect to the
+ // order of identifier locations in the code.
+ let mut symbols: Vec<_> = symbols.iter().collect();
+ symbols.sort_by_key(|k| k.1);
+
+ for (symbol, &sp) in symbols.iter() {
+ let symbol_str = symbol.as_str();
+ if symbol_str.is_ascii() {
+ continue;
+ }
+ has_non_ascii_idents = true;
+ cx.struct_span_lint(NON_ASCII_IDENTS, sp, |lint| {
+ lint.build(fluent::lint::identifier_non_ascii_char).emit();
+ });
+ if check_uncommon_codepoints
+ && !symbol_str.chars().all(GeneralSecurityProfile::identifier_allowed)
+ {
+ cx.struct_span_lint(UNCOMMON_CODEPOINTS, sp, |lint| {
+ lint.build(fluent::lint::identifier_uncommon_codepoints).emit();
+ })
+ }
+ }
+
+ if has_non_ascii_idents && check_confusable_idents {
+ let mut skeleton_map: FxHashMap<Symbol, (Symbol, Span, bool)> =
+ FxHashMap::with_capacity_and_hasher(symbols.len(), Default::default());
+ let mut skeleton_buf = String::new();
+
+ for (&symbol, &sp) in symbols.iter() {
+ use unicode_security::confusable_detection::skeleton;
+
+ let symbol_str = symbol.as_str();
+ let is_ascii = symbol_str.is_ascii();
+
+ // Get the skeleton as a `Symbol`.
+ skeleton_buf.clear();
+ skeleton_buf.extend(skeleton(&symbol_str));
+ let skeleton_sym = if *symbol_str == *skeleton_buf {
+ symbol
+ } else {
+ Symbol::intern(&skeleton_buf)
+ };
+
+ skeleton_map
+ .entry(skeleton_sym)
+ .and_modify(|(existing_symbol, existing_span, existing_is_ascii)| {
+ if !*existing_is_ascii || !is_ascii {
+ cx.struct_span_lint(CONFUSABLE_IDENTS, sp, |lint| {
+ lint.build(fluent::lint::confusable_identifier_pair)
+ .set_arg("existing_sym", *existing_symbol)
+ .set_arg("sym", symbol)
+ .span_label(*existing_span, fluent::lint::label)
+ .emit();
+ });
+ }
+ if *existing_is_ascii && !is_ascii {
+ *existing_symbol = symbol;
+ *existing_span = sp;
+ *existing_is_ascii = is_ascii;
+ }
+ })
+ .or_insert((symbol, sp, is_ascii));
+ }
+ }
+
+ if has_non_ascii_idents && check_mixed_script_confusables {
+ use unicode_security::is_potential_mixed_script_confusable_char;
+ use unicode_security::mixed_script::AugmentedScriptSet;
+
+ #[derive(Clone)]
+ enum ScriptSetUsage {
+ Suspicious(Vec<char>, Span),
+ Verified,
+ }
+
+ let mut script_states: FxHashMap<AugmentedScriptSet, ScriptSetUsage> =
+ FxHashMap::default();
+ let latin_augmented_script_set = AugmentedScriptSet::for_char('A');
+ script_states.insert(latin_augmented_script_set, ScriptSetUsage::Verified);
+
+ let mut has_suspicous = false;
+ for (symbol, &sp) in symbols.iter() {
+ let symbol_str = symbol.as_str();
+ for ch in symbol_str.chars() {
+ if ch.is_ascii() {
+ // all ascii characters are covered by exception.
+ continue;
+ }
+ if !GeneralSecurityProfile::identifier_allowed(ch) {
+ // this character is covered by `uncommon_codepoints` lint.
+ continue;
+ }
+ let augmented_script_set = AugmentedScriptSet::for_char(ch);
+ script_states
+ .entry(augmented_script_set)
+ .and_modify(|existing_state| {
+ if let ScriptSetUsage::Suspicious(ch_list, _) = existing_state {
+ if is_potential_mixed_script_confusable_char(ch) {
+ ch_list.push(ch);
+ } else {
+ *existing_state = ScriptSetUsage::Verified;
+ }
+ }
+ })
+ .or_insert_with(|| {
+ if !is_potential_mixed_script_confusable_char(ch) {
+ ScriptSetUsage::Verified
+ } else {
+ has_suspicous = true;
+ ScriptSetUsage::Suspicious(vec![ch], sp)
+ }
+ });
+ }
+ }
+
+ if has_suspicous {
+ let verified_augmented_script_sets = script_states
+ .iter()
+ .flat_map(|(k, v)| match v {
+ ScriptSetUsage::Verified => Some(*k),
+ _ => None,
+ })
+ .collect::<Vec<_>>();
+
+ // we're sorting the output here.
+ let mut lint_reports: BTreeMap<(Span, Vec<char>), AugmentedScriptSet> =
+ BTreeMap::new();
+
+ 'outerloop: for (augment_script_set, usage) in script_states {
+ let ScriptSetUsage::Suspicious(mut ch_list, sp) = usage else { continue };
+
+ if augment_script_set.is_all() {
+ continue;
+ }
+
+ for existing in verified_augmented_script_sets.iter() {
+ if existing.is_all() {
+ continue;
+ }
+ let mut intersect = *existing;
+ intersect.intersect_with(augment_script_set);
+ if !intersect.is_empty() && !intersect.is_all() {
+ continue 'outerloop;
+ }
+ }
+
+ // We sort primitive chars here and can use unstable sort
+ ch_list.sort_unstable();
+ ch_list.dedup();
+ lint_reports.insert((sp, ch_list), augment_script_set);
+ }
+
+ for ((sp, ch_list), script_set) in lint_reports {
+ cx.struct_span_lint(MIXED_SCRIPT_CONFUSABLES, sp, |lint| {
+ let mut includes = String::new();
+ for (idx, ch) in ch_list.into_iter().enumerate() {
+ if idx != 0 {
+ includes += ", ";
+ }
+ let char_info = format!("'{}' (U+{:04X})", ch, ch as u32);
+ includes += &char_info;
+ }
+ lint.build(fluent::lint::mixed_script_confusables)
+ .set_arg("set", script_set.to_string())
+ .set_arg("includes", includes)
+ .note(fluent::lint::includes_note)
+ .note(fluent::lint::note)
+ .emit();
+ });
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/non_fmt_panic.rs b/compiler/rustc_lint/src/non_fmt_panic.rs
new file mode 100644
index 000000000..cdad2d2e8
--- /dev/null
+++ b/compiler/rustc_lint/src/non_fmt_panic.rs
@@ -0,0 +1,357 @@
+use crate::{LateContext, LateLintPass, LintContext};
+use rustc_ast as ast;
+use rustc_errors::{fluent, Applicability};
+use rustc_hir as hir;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_parse_format::{ParseMode, Parser, Piece};
+use rustc_session::lint::FutureIncompatibilityReason;
+use rustc_span::edition::Edition;
+use rustc_span::{hygiene, sym, symbol::kw, InnerSpan, Span, Symbol};
+use rustc_trait_selection::infer::InferCtxtExt;
+
+declare_lint! {
+ /// The `non_fmt_panics` lint detects `panic!(..)` invocations where the first
+ /// argument is not a formatting string.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,no_run,edition2018
+ /// panic!("{}");
+ /// panic!(123);
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In Rust 2018 and earlier, `panic!(x)` directly uses `x` as the message.
+ /// That means that `panic!("{}")` panics with the message `"{}"` instead
+ /// of using it as a formatting string, and `panic!(123)` will panic with
+ /// an `i32` as message.
+ ///
+ /// Rust 2021 always interprets the first argument as format string.
+ NON_FMT_PANICS,
+ Warn,
+ "detect single-argument panic!() invocations in which the argument is not a format string",
+ @future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::EditionSemanticsChange(Edition::Edition2021),
+ explain_reason: false,
+ };
+ report_in_external_macro
+}
+
+declare_lint_pass!(NonPanicFmt => [NON_FMT_PANICS]);
+
+impl<'tcx> LateLintPass<'tcx> for NonPanicFmt {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Call(f, [arg]) = &expr.kind {
+ if let &ty::FnDef(def_id, _) = cx.typeck_results().expr_ty(f).kind() {
+ let f_diagnostic_name = cx.tcx.get_diagnostic_name(def_id);
+
+ if Some(def_id) == cx.tcx.lang_items().begin_panic_fn()
+ || Some(def_id) == cx.tcx.lang_items().panic_fn()
+ || f_diagnostic_name == Some(sym::panic_str)
+ {
+ if let Some(id) = f.span.ctxt().outer_expn_data().macro_def_id {
+ if matches!(
+ cx.tcx.get_diagnostic_name(id),
+ Some(sym::core_panic_2015_macro | sym::std_panic_2015_macro)
+ ) {
+ check_panic(cx, f, arg);
+ }
+ }
+ } else if f_diagnostic_name == Some(sym::unreachable_display) {
+ if let Some(id) = f.span.ctxt().outer_expn_data().macro_def_id {
+ if cx.tcx.is_diagnostic_item(sym::unreachable_2015_macro, id) {
+ check_panic(
+ cx,
+ f,
+ // This is safe because we checked above that the callee is indeed
+ // unreachable_display
+ match &arg.kind {
+ // Get the borrowed arg not the borrow
+ hir::ExprKind::AddrOf(ast::BorrowKind::Ref, _, arg) => arg,
+ _ => bug!("call to unreachable_display without borrow"),
+ },
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn check_panic<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>, arg: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Lit(lit) = &arg.kind {
+ if let ast::LitKind::Str(sym, _) = lit.node {
+ // The argument is a string literal.
+ check_panic_str(cx, f, arg, sym.as_str());
+ return;
+ }
+ }
+
+ // The argument is *not* a string literal.
+
+ let (span, panic, symbol) = panic_call(cx, f);
+
+ if in_external_macro(cx.sess(), span) {
+ // Nothing that can be done about it in the current crate.
+ return;
+ }
+
+ // Find the span of the argument to `panic!()` or `unreachable!`, before expansion in the
+ // case of `panic!(some_macro!())` or `unreachable!(some_macro!())`.
+ // We don't use source_callsite(), because this `panic!(..)` might itself
+ // be expanded from another macro, in which case we want to stop at that
+ // expansion.
+ let mut arg_span = arg.span;
+ let mut arg_macro = None;
+ while !span.contains(arg_span) {
+ let expn = arg_span.ctxt().outer_expn_data();
+ if expn.is_root() {
+ break;
+ }
+ arg_macro = expn.macro_def_id;
+ arg_span = expn.call_site;
+ }
+
+ cx.struct_span_lint(NON_FMT_PANICS, arg_span, |lint| {
+ let mut l = lint.build(fluent::lint::non_fmt_panic);
+ l.set_arg("name", symbol);
+ l.note(fluent::lint::note);
+ l.note(fluent::lint::more_info_note);
+ if !is_arg_inside_call(arg_span, span) {
+ // No clue where this argument is coming from.
+ l.emit();
+ return;
+ }
+ if arg_macro.map_or(false, |id| cx.tcx.is_diagnostic_item(sym::format_macro, id)) {
+ // A case of `panic!(format!(..))`.
+ l.note(fluent::lint::supports_fmt_note);
+ if let Some((open, close, _)) = find_delimiters(cx, arg_span) {
+ l.multipart_suggestion(
+ fluent::lint::supports_fmt_suggestion,
+ vec![
+ (arg_span.until(open.shrink_to_hi()), "".into()),
+ (close.until(arg_span.shrink_to_hi()), "".into()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ } else {
+ let ty = cx.typeck_results().expr_ty(arg);
+ // If this is a &str or String, we can confidently give the `"{}", ` suggestion.
+ let is_str = matches!(
+ ty.kind(),
+ ty::Ref(_, r, _) if *r.kind() == ty::Str,
+ ) || matches!(
+ ty.ty_adt_def(),
+ Some(ty_def) if cx.tcx.is_diagnostic_item(sym::String, ty_def.did()),
+ );
+
+ let (suggest_display, suggest_debug) = cx.tcx.infer_ctxt().enter(|infcx| {
+ let display = is_str
+ || cx.tcx.get_diagnostic_item(sym::Display).map(|t| {
+ infcx
+ .type_implements_trait(t, ty, InternalSubsts::empty(), cx.param_env)
+ .may_apply()
+ }) == Some(true);
+ let debug = !display
+ && cx.tcx.get_diagnostic_item(sym::Debug).map(|t| {
+ infcx
+ .type_implements_trait(t, ty, InternalSubsts::empty(), cx.param_env)
+ .may_apply()
+ }) == Some(true);
+ (display, debug)
+ });
+
+ let suggest_panic_any = !is_str && panic == sym::std_panic_macro;
+
+ let fmt_applicability = if suggest_panic_any {
+ // If we can use panic_any, use that as the MachineApplicable suggestion.
+ Applicability::MaybeIncorrect
+ } else {
+ // If we don't suggest panic_any, using a format string is our best bet.
+ Applicability::MachineApplicable
+ };
+
+ if suggest_display {
+ l.span_suggestion_verbose(
+ arg_span.shrink_to_lo(),
+ fluent::lint::display_suggestion,
+ "\"{}\", ",
+ fmt_applicability,
+ );
+ } else if suggest_debug {
+ l.set_arg("ty", ty);
+ l.span_suggestion_verbose(
+ arg_span.shrink_to_lo(),
+ fluent::lint::debug_suggestion,
+ "\"{:?}\", ",
+ fmt_applicability,
+ );
+ }
+
+ if suggest_panic_any {
+ if let Some((open, close, del)) = find_delimiters(cx, span) {
+ l.set_arg("already_suggested", suggest_display || suggest_debug);
+ l.multipart_suggestion(
+ fluent::lint::panic_suggestion,
+ if del == '(' {
+ vec![(span.until(open), "std::panic::panic_any".into())]
+ } else {
+ vec![
+ (span.until(open.shrink_to_hi()), "std::panic::panic_any(".into()),
+ (close, ")".into()),
+ ]
+ },
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ l.emit();
+ });
+}
+
+fn check_panic_str<'tcx>(
+ cx: &LateContext<'tcx>,
+ f: &'tcx hir::Expr<'tcx>,
+ arg: &'tcx hir::Expr<'tcx>,
+ fmt: &str,
+) {
+ if !fmt.contains(&['{', '}']) {
+ // No brace, no problem.
+ return;
+ }
+
+ let (span, _, _) = panic_call(cx, f);
+
+ if in_external_macro(cx.sess(), span) && in_external_macro(cx.sess(), arg.span) {
+ // Nothing that can be done about it in the current crate.
+ return;
+ }
+
+ let fmt_span = arg.span.source_callsite();
+
+ let (snippet, style) = match cx.sess().parse_sess.source_map().span_to_snippet(fmt_span) {
+ Ok(snippet) => {
+ // Count the number of `#`s between the `r` and `"`.
+ let style = snippet.strip_prefix('r').and_then(|s| s.find('"'));
+ (Some(snippet), style)
+ }
+ Err(_) => (None, None),
+ };
+
+ let mut fmt_parser = Parser::new(fmt, style, snippet.clone(), false, ParseMode::Format);
+ let n_arguments = (&mut fmt_parser).filter(|a| matches!(a, Piece::NextArgument(_))).count();
+
+ if n_arguments > 0 && fmt_parser.errors.is_empty() {
+ let arg_spans: Vec<_> = match &fmt_parser.arg_places[..] {
+ [] => vec![fmt_span],
+ v => v
+ .iter()
+ .map(|span| fmt_span.from_inner(InnerSpan::new(span.start, span.end)))
+ .collect(),
+ };
+ cx.struct_span_lint(NON_FMT_PANICS, arg_spans, |lint| {
+ let mut l = lint.build(fluent::lint::non_fmt_panic_unused);
+ l.set_arg("count", n_arguments);
+ l.note(fluent::lint::note);
+ if is_arg_inside_call(arg.span, span) {
+ l.span_suggestion(
+ arg.span.shrink_to_hi(),
+ fluent::lint::add_args_suggestion,
+ ", ...",
+ Applicability::HasPlaceholders,
+ );
+ l.span_suggestion(
+ arg.span.shrink_to_lo(),
+ fluent::lint::add_fmt_suggestion,
+ "\"{}\", ",
+ Applicability::MachineApplicable,
+ );
+ }
+ l.emit();
+ });
+ } else {
+ let brace_spans: Option<Vec<_>> =
+ snippet.filter(|s| s.starts_with('"') || s.starts_with("r#")).map(|s| {
+ s.char_indices()
+ .filter(|&(_, c)| c == '{' || c == '}')
+ .map(|(i, _)| fmt_span.from_inner(InnerSpan { start: i, end: i + 1 }))
+ .collect()
+ });
+ let count = brace_spans.as_ref().map(|v| v.len()).unwrap_or(/* any number >1 */ 2);
+ cx.struct_span_lint(NON_FMT_PANICS, brace_spans.unwrap_or_else(|| vec![span]), |lint| {
+ let mut l = lint.build(fluent::lint::non_fmt_panic_braces);
+ l.set_arg("count", count);
+ l.note(fluent::lint::note);
+ if is_arg_inside_call(arg.span, span) {
+ l.span_suggestion(
+ arg.span.shrink_to_lo(),
+ fluent::lint::suggestion,
+ "\"{}\", ",
+ Applicability::MachineApplicable,
+ );
+ }
+ l.emit();
+ });
+ }
+}
+
+/// Given the span of `some_macro!(args);`, gives the span of `(` and `)`,
+/// and the type of (opening) delimiter used.
+fn find_delimiters<'tcx>(cx: &LateContext<'tcx>, span: Span) -> Option<(Span, Span, char)> {
+ let snippet = cx.sess().parse_sess.source_map().span_to_snippet(span).ok()?;
+ let (open, open_ch) = snippet.char_indices().find(|&(_, c)| "([{".contains(c))?;
+ let close = snippet.rfind(|c| ")]}".contains(c))?;
+ Some((
+ span.from_inner(InnerSpan { start: open, end: open + 1 }),
+ span.from_inner(InnerSpan { start: close, end: close + 1 }),
+ open_ch,
+ ))
+}
+
+fn panic_call<'tcx>(cx: &LateContext<'tcx>, f: &'tcx hir::Expr<'tcx>) -> (Span, Symbol, Symbol) {
+ let mut expn = f.span.ctxt().outer_expn_data();
+
+ let mut panic_macro = kw::Empty;
+
+ // Unwrap more levels of macro expansion, as panic_2015!()
+ // was likely expanded from panic!() and possibly from
+ // [debug_]assert!().
+ loop {
+ let parent = expn.call_site.ctxt().outer_expn_data();
+ let Some(id) = parent.macro_def_id else { break };
+ let Some(name) = cx.tcx.get_diagnostic_name(id) else { break };
+ if !matches!(
+ name,
+ sym::core_panic_macro
+ | sym::std_panic_macro
+ | sym::assert_macro
+ | sym::debug_assert_macro
+ | sym::unreachable_macro
+ ) {
+ break;
+ }
+ expn = parent;
+ panic_macro = name;
+ }
+
+ let macro_symbol =
+ if let hygiene::ExpnKind::Macro(_, symbol) = expn.kind { symbol } else { sym::panic };
+ (expn.call_site, panic_macro, macro_symbol)
+}
+
+fn is_arg_inside_call(arg: Span, call: Span) -> bool {
+ // We only add suggestions if the argument we're looking at appears inside the
+ // panic call in the source file, to avoid invalid suggestions when macros are involved.
+ // We specifically check for the spans to not be identical, as that happens sometimes when
+ // proc_macros lie about spans and apply the same span to all the tokens they produce.
+ call.contains(arg) && !call.source_equal(arg)
+}
diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs
new file mode 100644
index 000000000..8d04d68bf
--- /dev/null
+++ b/compiler/rustc_lint/src/nonstandard_style.rs
@@ -0,0 +1,565 @@
+use crate::{EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext};
+use rustc_ast as ast;
+use rustc_attr as attr;
+use rustc_errors::{fluent, Applicability};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::intravisit::FnKind;
+use rustc_hir::{GenericParamKind, PatKind};
+use rustc_middle::ty;
+use rustc_span::symbol::sym;
+use rustc_span::{symbol::Ident, BytePos, Span};
+use rustc_target::spec::abi::Abi;
+
+#[derive(PartialEq)]
+pub enum MethodLateContext {
+ TraitAutoImpl,
+ TraitImpl,
+ PlainImpl,
+}
+
+pub fn method_context(cx: &LateContext<'_>, id: hir::HirId) -> MethodLateContext {
+ let def_id = cx.tcx.hir().local_def_id(id);
+ let item = cx.tcx.associated_item(def_id);
+ match item.container {
+ ty::TraitContainer => MethodLateContext::TraitAutoImpl,
+ ty::ImplContainer => match cx.tcx.impl_trait_ref(item.container_id(cx.tcx)) {
+ Some(_) => MethodLateContext::TraitImpl,
+ None => MethodLateContext::PlainImpl,
+ },
+ }
+}
+
+declare_lint! {
+ /// The `non_camel_case_types` lint detects types, variants, traits and
+ /// type parameters that don't have camel case names.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// struct my_struct;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The preferred style for these identifiers is to use "camel case", such
+ /// as `MyStruct`, where the first letter should not be lowercase, and
+ /// should not use underscores between letters. Underscores are allowed at
+ /// the beginning and end of the identifier, as well as between
+ /// non-letters (such as `X86_64`).
+ pub NON_CAMEL_CASE_TYPES,
+ Warn,
+ "types, variants, traits and type parameters should have camel case names"
+}
+
+declare_lint_pass!(NonCamelCaseTypes => [NON_CAMEL_CASE_TYPES]);
+
+/// Some unicode characters *have* case, are considered upper case or lower case, but they *can't*
+/// be upper cased or lower cased. For the purposes of the lint suggestion, we care about being able
+/// to change the char's case.
+fn char_has_case(c: char) -> bool {
+ let mut l = c.to_lowercase();
+ let mut u = c.to_uppercase();
+ while let Some(l) = l.next() {
+ match u.next() {
+ Some(u) if l != u => return true,
+ _ => {}
+ }
+ }
+ u.next().is_some()
+}
+
+fn is_camel_case(name: &str) -> bool {
+ let name = name.trim_matches('_');
+ if name.is_empty() {
+ return true;
+ }
+
+ // start with a non-lowercase letter rather than non-uppercase
+ // ones (some scripts don't have a concept of upper/lowercase)
+ !name.chars().next().unwrap().is_lowercase()
+ && !name.contains("__")
+ && !name.chars().collect::<Vec<_>>().array_windows().any(|&[fst, snd]| {
+ // contains a capitalisable character followed by, or preceded by, an underscore
+ char_has_case(fst) && snd == '_' || char_has_case(snd) && fst == '_'
+ })
+}
+
+fn to_camel_case(s: &str) -> String {
+ s.trim_matches('_')
+ .split('_')
+ .filter(|component| !component.is_empty())
+ .map(|component| {
+ let mut camel_cased_component = String::new();
+
+ let mut new_word = true;
+ let mut prev_is_lower_case = true;
+
+ for c in component.chars() {
+ // Preserve the case if an uppercase letter follows a lowercase letter, so that
+ // `camelCase` is converted to `CamelCase`.
+ if prev_is_lower_case && c.is_uppercase() {
+ new_word = true;
+ }
+
+ if new_word {
+ camel_cased_component.extend(c.to_uppercase());
+ } else {
+ camel_cased_component.extend(c.to_lowercase());
+ }
+
+ prev_is_lower_case = c.is_lowercase();
+ new_word = false;
+ }
+
+ camel_cased_component
+ })
+ .fold((String::new(), None), |(acc, prev): (String, Option<String>), next| {
+ // separate two components with an underscore if their boundary cannot
+ // be distinguished using an uppercase/lowercase case distinction
+ let join = if let Some(prev) = prev {
+ let l = prev.chars().last().unwrap();
+ let f = next.chars().next().unwrap();
+ !char_has_case(l) && !char_has_case(f)
+ } else {
+ false
+ };
+ (acc + if join { "_" } else { "" } + &next, Some(next))
+ })
+ .0
+}
+
+impl NonCamelCaseTypes {
+ fn check_case(&self, cx: &EarlyContext<'_>, sort: &str, ident: &Ident) {
+ let name = ident.name.as_str();
+
+ if !is_camel_case(name) {
+ cx.struct_span_lint(NON_CAMEL_CASE_TYPES, ident.span, |lint| {
+ let mut err = lint.build(fluent::lint::non_camel_case_type);
+ let cc = to_camel_case(name);
+ // We cannot provide meaningful suggestions
+ // if the characters are in the category of "Lowercase Letter".
+ if *name != cc {
+ err.span_suggestion(
+ ident.span,
+ fluent::lint::suggestion,
+ to_camel_case(name),
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(ident.span, fluent::lint::label);
+ }
+
+ err.set_arg("sort", sort);
+ err.set_arg("name", name);
+ err.emit();
+ })
+ }
+ }
+}
+
+impl EarlyLintPass for NonCamelCaseTypes {
+ fn check_item(&mut self, cx: &EarlyContext<'_>, it: &ast::Item) {
+ let has_repr_c = it
+ .attrs
+ .iter()
+ .any(|attr| attr::find_repr_attrs(cx.sess(), attr).contains(&attr::ReprC));
+
+ if has_repr_c {
+ return;
+ }
+
+ match it.kind {
+ ast::ItemKind::TyAlias(..)
+ | ast::ItemKind::Enum(..)
+ | ast::ItemKind::Struct(..)
+ | ast::ItemKind::Union(..) => self.check_case(cx, "type", &it.ident),
+ ast::ItemKind::Trait(..) => self.check_case(cx, "trait", &it.ident),
+ ast::ItemKind::TraitAlias(..) => self.check_case(cx, "trait alias", &it.ident),
+ _ => (),
+ }
+ }
+
+ fn check_trait_item(&mut self, cx: &EarlyContext<'_>, it: &ast::AssocItem) {
+ if let ast::AssocItemKind::TyAlias(..) = it.kind {
+ self.check_case(cx, "associated type", &it.ident);
+ }
+ }
+
+ fn check_variant(&mut self, cx: &EarlyContext<'_>, v: &ast::Variant) {
+ self.check_case(cx, "variant", &v.ident);
+ }
+
+ fn check_generic_param(&mut self, cx: &EarlyContext<'_>, param: &ast::GenericParam) {
+ if let ast::GenericParamKind::Type { .. } = param.kind {
+ self.check_case(cx, "type parameter", &param.ident);
+ }
+ }
+}
+
+declare_lint! {
+ /// The `non_snake_case` lint detects variables, methods, functions,
+ /// lifetime parameters and modules that don't have snake case names.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let MY_VALUE = 5;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The preferred style for these identifiers is to use "snake case",
+ /// where all the characters are in lowercase, with words separated with a
+ /// single underscore, such as `my_value`.
+ pub NON_SNAKE_CASE,
+ Warn,
+ "variables, methods, functions, lifetime parameters and modules should have snake case names"
+}
+
+declare_lint_pass!(NonSnakeCase => [NON_SNAKE_CASE]);
+
+impl NonSnakeCase {
+ fn to_snake_case(mut str: &str) -> String {
+ let mut words = vec![];
+ // Preserve leading underscores
+ str = str.trim_start_matches(|c: char| {
+ if c == '_' {
+ words.push(String::new());
+ true
+ } else {
+ false
+ }
+ });
+ for s in str.split('_') {
+ let mut last_upper = false;
+ let mut buf = String::new();
+ if s.is_empty() {
+ continue;
+ }
+ for ch in s.chars() {
+ if !buf.is_empty() && buf != "'" && ch.is_uppercase() && !last_upper {
+ words.push(buf);
+ buf = String::new();
+ }
+ last_upper = ch.is_uppercase();
+ buf.extend(ch.to_lowercase());
+ }
+ words.push(buf);
+ }
+ words.join("_")
+ }
+
+ /// Checks if a given identifier is snake case, and reports a diagnostic if not.
+ fn check_snake_case(&self, cx: &LateContext<'_>, sort: &str, ident: &Ident) {
+ fn is_snake_case(ident: &str) -> bool {
+ if ident.is_empty() {
+ return true;
+ }
+ let ident = ident.trim_start_matches('\'');
+ let ident = ident.trim_matches('_');
+
+ let mut allow_underscore = true;
+ ident.chars().all(|c| {
+ allow_underscore = match c {
+ '_' if !allow_underscore => return false,
+ '_' => false,
+ // It would be more obvious to use `c.is_lowercase()`,
+ // but some characters do not have a lowercase form
+ c if !c.is_uppercase() => true,
+ _ => return false,
+ };
+ true
+ })
+ }
+
+ let name = ident.name.as_str();
+
+ if !is_snake_case(name) {
+ cx.struct_span_lint(NON_SNAKE_CASE, ident.span, |lint| {
+ let sc = NonSnakeCase::to_snake_case(name);
+ let mut err = lint.build(fluent::lint::non_snake_case);
+ // We cannot provide meaningful suggestions
+ // if the characters are in the category of "Uppercase Letter".
+ if name != sc {
+ // We have a valid span in almost all cases, but we don't have one when linting a crate
+ // name provided via the command line.
+ if !ident.span.is_dummy() {
+ let sc_ident = Ident::from_str_and_span(&sc, ident.span);
+ let (message, suggestion) = if sc_ident.is_reserved() {
+ // We shouldn't suggest a reserved identifier to fix non-snake-case identifiers.
+ // Instead, recommend renaming the identifier entirely or, if permitted,
+ // escaping it to create a raw identifier.
+ if sc_ident.name.can_be_raw() {
+ (fluent::lint::rename_or_convert_suggestion, sc_ident.to_string())
+ } else {
+ err.note(fluent::lint::cannot_convert_note);
+ (fluent::lint::rename_suggestion, String::new())
+ }
+ } else {
+ (fluent::lint::convert_suggestion, sc.clone())
+ };
+
+ err.span_suggestion(
+ ident.span,
+ message,
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.help(fluent::lint::help);
+ }
+ } else {
+ err.span_label(ident.span, fluent::lint::label);
+ }
+
+ err.set_arg("sort", sort);
+ err.set_arg("name", name);
+ err.set_arg("sc", sc);
+ err.emit();
+ });
+ }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for NonSnakeCase {
+ fn check_mod(
+ &mut self,
+ cx: &LateContext<'_>,
+ _: &'tcx hir::Mod<'tcx>,
+ _: Span,
+ id: hir::HirId,
+ ) {
+ if id != hir::CRATE_HIR_ID {
+ return;
+ }
+
+ let crate_ident = if let Some(name) = &cx.tcx.sess.opts.crate_name {
+ Some(Ident::from_str(name))
+ } else {
+ cx.sess()
+ .find_by_name(&cx.tcx.hir().attrs(hir::CRATE_HIR_ID), sym::crate_name)
+ .and_then(|attr| attr.meta())
+ .and_then(|meta| {
+ meta.name_value_literal().and_then(|lit| {
+ if let ast::LitKind::Str(name, ..) = lit.kind {
+ // Discard the double quotes surrounding the literal.
+ let sp = cx
+ .sess()
+ .source_map()
+ .span_to_snippet(lit.span)
+ .ok()
+ .and_then(|snippet| {
+ let left = snippet.find('"')?;
+ let right =
+ snippet.rfind('"').map(|pos| snippet.len() - pos)?;
+
+ Some(
+ lit.span
+ .with_lo(lit.span.lo() + BytePos(left as u32 + 1))
+ .with_hi(lit.span.hi() - BytePos(right as u32)),
+ )
+ })
+ .unwrap_or(lit.span);
+
+ Some(Ident::new(name, sp))
+ } else {
+ None
+ }
+ })
+ })
+ };
+
+ if let Some(ident) = &crate_ident {
+ self.check_snake_case(cx, "crate", ident);
+ }
+ }
+
+ fn check_generic_param(&mut self, cx: &LateContext<'_>, param: &hir::GenericParam<'_>) {
+ if let GenericParamKind::Lifetime { .. } = param.kind {
+ self.check_snake_case(cx, "lifetime", &param.name.ident());
+ }
+ }
+
+ fn check_fn(
+ &mut self,
+ cx: &LateContext<'_>,
+ fk: FnKind<'_>,
+ _: &hir::FnDecl<'_>,
+ _: &hir::Body<'_>,
+ _: Span,
+ id: hir::HirId,
+ ) {
+ let attrs = cx.tcx.hir().attrs(id);
+ match &fk {
+ FnKind::Method(ident, sig, ..) => match method_context(cx, id) {
+ MethodLateContext::PlainImpl => {
+ if sig.header.abi != Abi::Rust && cx.sess().contains_name(attrs, sym::no_mangle)
+ {
+ return;
+ }
+ self.check_snake_case(cx, "method", ident);
+ }
+ MethodLateContext::TraitAutoImpl => {
+ self.check_snake_case(cx, "trait method", ident);
+ }
+ _ => (),
+ },
+ FnKind::ItemFn(ident, _, header) => {
+ // Skip foreign-ABI #[no_mangle] functions (Issue #31924)
+ if header.abi != Abi::Rust && cx.sess().contains_name(attrs, sym::no_mangle) {
+ return;
+ }
+ self.check_snake_case(cx, "function", ident);
+ }
+ FnKind::Closure => (),
+ }
+ }
+
+ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ if let hir::ItemKind::Mod(_) = it.kind {
+ self.check_snake_case(cx, "module", &it.ident);
+ }
+ }
+
+ fn check_trait_item(&mut self, cx: &LateContext<'_>, item: &hir::TraitItem<'_>) {
+ if let hir::TraitItemKind::Fn(_, hir::TraitFn::Required(pnames)) = item.kind {
+ self.check_snake_case(cx, "trait method", &item.ident);
+ for param_name in pnames {
+ self.check_snake_case(cx, "variable", param_name);
+ }
+ }
+ }
+
+ fn check_pat(&mut self, cx: &LateContext<'_>, p: &hir::Pat<'_>) {
+ if let PatKind::Binding(_, hid, ident, _) = p.kind {
+ if let hir::Node::Pat(parent_pat) = cx.tcx.hir().get(cx.tcx.hir().get_parent_node(hid))
+ {
+ if let PatKind::Struct(_, field_pats, _) = &parent_pat.kind {
+ if field_pats
+ .iter()
+ .any(|field| !field.is_shorthand && field.pat.hir_id == p.hir_id)
+ {
+ // Only check if a new name has been introduced, to avoid warning
+ // on both the struct definition and this pattern.
+ self.check_snake_case(cx, "variable", &ident);
+ }
+ return;
+ }
+ }
+ self.check_snake_case(cx, "variable", &ident);
+ }
+ }
+
+ fn check_struct_def(&mut self, cx: &LateContext<'_>, s: &hir::VariantData<'_>) {
+ for sf in s.fields() {
+ self.check_snake_case(cx, "structure field", &sf.ident);
+ }
+ }
+}
+
+declare_lint! {
+ /// The `non_upper_case_globals` lint detects static items that don't have
+ /// uppercase identifiers.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// static max_points: i32 = 5;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The preferred style is for static item names to use all uppercase
+ /// letters such as `MAX_POINTS`.
+ pub NON_UPPER_CASE_GLOBALS,
+ Warn,
+ "static constants should have uppercase identifiers"
+}
+
+declare_lint_pass!(NonUpperCaseGlobals => [NON_UPPER_CASE_GLOBALS]);
+
+impl NonUpperCaseGlobals {
+ fn check_upper_case(cx: &LateContext<'_>, sort: &str, ident: &Ident) {
+ let name = ident.name.as_str();
+ if name.chars().any(|c| c.is_lowercase()) {
+ cx.struct_span_lint(NON_UPPER_CASE_GLOBALS, ident.span, |lint| {
+ let uc = NonSnakeCase::to_snake_case(&name).to_uppercase();
+ let mut err = lint.build(fluent::lint::non_upper_case_global);
+ // We cannot provide meaningful suggestions
+ // if the characters are in the category of "Lowercase Letter".
+ if *name != uc {
+ err.span_suggestion(
+ ident.span,
+ fluent::lint::suggestion,
+ uc,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(ident.span, fluent::lint::label);
+ }
+
+ err.set_arg("sort", sort);
+ err.set_arg("name", name);
+ err.emit();
+ })
+ }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for NonUpperCaseGlobals {
+ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ let attrs = cx.tcx.hir().attrs(it.hir_id());
+ match it.kind {
+ hir::ItemKind::Static(..) if !cx.sess().contains_name(attrs, sym::no_mangle) => {
+ NonUpperCaseGlobals::check_upper_case(cx, "static variable", &it.ident);
+ }
+ hir::ItemKind::Const(..) => {
+ NonUpperCaseGlobals::check_upper_case(cx, "constant", &it.ident);
+ }
+ _ => {}
+ }
+ }
+
+ fn check_trait_item(&mut self, cx: &LateContext<'_>, ti: &hir::TraitItem<'_>) {
+ if let hir::TraitItemKind::Const(..) = ti.kind {
+ NonUpperCaseGlobals::check_upper_case(cx, "associated constant", &ti.ident);
+ }
+ }
+
+ fn check_impl_item(&mut self, cx: &LateContext<'_>, ii: &hir::ImplItem<'_>) {
+ if let hir::ImplItemKind::Const(..) = ii.kind {
+ NonUpperCaseGlobals::check_upper_case(cx, "associated constant", &ii.ident);
+ }
+ }
+
+ fn check_pat(&mut self, cx: &LateContext<'_>, p: &hir::Pat<'_>) {
+ // Lint for constants that look like binding identifiers (#7526)
+ if let PatKind::Path(hir::QPath::Resolved(None, ref path)) = p.kind {
+ if let Res::Def(DefKind::Const, _) = path.res {
+ if path.segments.len() == 1 {
+ NonUpperCaseGlobals::check_upper_case(
+ cx,
+ "constant in pattern",
+ &path.segments[0].ident,
+ );
+ }
+ }
+ }
+ }
+
+ fn check_generic_param(&mut self, cx: &LateContext<'_>, param: &hir::GenericParam<'_>) {
+ if let GenericParamKind::Const { .. } = param.kind {
+ NonUpperCaseGlobals::check_upper_case(cx, "const parameter", &param.name.ident());
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_lint/src/nonstandard_style/tests.rs b/compiler/rustc_lint/src/nonstandard_style/tests.rs
new file mode 100644
index 000000000..39c525b86
--- /dev/null
+++ b/compiler/rustc_lint/src/nonstandard_style/tests.rs
@@ -0,0 +1,21 @@
+use super::{is_camel_case, to_camel_case};
+
+#[test]
+fn camel_case() {
+ assert!(!is_camel_case("userData"));
+ assert_eq!(to_camel_case("userData"), "UserData");
+
+ assert!(is_camel_case("X86_64"));
+
+ assert!(!is_camel_case("X86__64"));
+ assert_eq!(to_camel_case("X86__64"), "X86_64");
+
+ assert!(!is_camel_case("Abc_123"));
+ assert_eq!(to_camel_case("Abc_123"), "Abc123");
+
+ assert!(!is_camel_case("A1_b2_c3"));
+ assert_eq!(to_camel_case("A1_b2_c3"), "A1B2C3");
+
+ assert!(!is_camel_case("ONE_TWO_THREE"));
+ assert_eq!(to_camel_case("ONE_TWO_THREE"), "OneTwoThree");
+}
diff --git a/compiler/rustc_lint/src/noop_method_call.rs b/compiler/rustc_lint/src/noop_method_call.rs
new file mode 100644
index 000000000..11a752ff0
--- /dev/null
+++ b/compiler/rustc_lint/src/noop_method_call.rs
@@ -0,0 +1,103 @@
+use crate::context::LintContext;
+use crate::rustc_middle::ty::TypeVisitable;
+use crate::LateContext;
+use crate::LateLintPass;
+use rustc_errors::fluent;
+use rustc_hir::def::DefKind;
+use rustc_hir::{Expr, ExprKind};
+use rustc_middle::ty;
+use rustc_span::symbol::sym;
+
+declare_lint! {
+ /// The `noop_method_call` lint detects specific calls to noop methods
+ /// such as a calling `<&T as Clone>::clone` where `T: !Clone`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// # #![allow(unused)]
+ /// #![warn(noop_method_call)]
+ /// struct Foo;
+ /// let foo = &Foo;
+ /// let clone: &Foo = foo.clone();
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Some method calls are noops meaning that they do nothing. Usually such methods
+ /// are the result of blanket implementations that happen to create some method invocations
+ /// that end up not doing anything. For instance, `Clone` is implemented on all `&T`, but
+ /// calling `clone` on a `&T` where `T` does not implement clone, actually doesn't do anything
+ /// as references are copy. This lint detects these calls and warns the user about them.
+ pub NOOP_METHOD_CALL,
+ Allow,
+ "detects the use of well-known noop methods"
+}
+
+declare_lint_pass!(NoopMethodCall => [NOOP_METHOD_CALL]);
+
+impl<'tcx> LateLintPass<'tcx> for NoopMethodCall {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
+ // We only care about method calls.
+ let ExprKind::MethodCall(call, elements, _) = &expr.kind else {
+ return
+ };
+ // We only care about method calls corresponding to the `Clone`, `Deref` and `Borrow`
+ // traits and ignore any other method call.
+ let (trait_id, did) = match cx.typeck_results().type_dependent_def(expr.hir_id) {
+ // Verify we are dealing with a method/associated function.
+ Some((DefKind::AssocFn, did)) => match cx.tcx.trait_of_item(did) {
+ // Check that we're dealing with a trait method for one of the traits we care about.
+ Some(trait_id)
+ if matches!(
+ cx.tcx.get_diagnostic_name(trait_id),
+ Some(sym::Borrow | sym::Clone | sym::Deref)
+ ) =>
+ {
+ (trait_id, did)
+ }
+ _ => return,
+ },
+ _ => return,
+ };
+ let substs = cx.typeck_results().node_substs(expr.hir_id);
+ if substs.needs_subst() {
+ // We can't resolve on types that require monomorphization, so we don't handle them if
+ // we need to perform substitution.
+ return;
+ }
+ let param_env = cx.tcx.param_env(trait_id);
+ // Resolve the trait method instance.
+ let Ok(Some(i)) = ty::Instance::resolve(cx.tcx, param_env, did, substs) else {
+ return
+ };
+ // (Re)check that it implements the noop diagnostic.
+ let Some(name) = cx.tcx.get_diagnostic_name(i.def_id()) else { return };
+ if !matches!(
+ name,
+ sym::noop_method_borrow | sym::noop_method_clone | sym::noop_method_deref
+ ) {
+ return;
+ }
+ let receiver = &elements[0];
+ let receiver_ty = cx.typeck_results().expr_ty(receiver);
+ let expr_ty = cx.typeck_results().expr_ty_adjusted(expr);
+ if receiver_ty != expr_ty {
+ // This lint will only trigger if the receiver type and resulting expression \
+ // type are the same, implying that the method call is unnecessary.
+ return;
+ }
+ let expr_span = expr.span;
+ let span = expr_span.with_lo(receiver.span.hi());
+ cx.struct_span_lint(NOOP_METHOD_CALL, span, |lint| {
+ lint.build(fluent::lint::noop_method_call)
+ .set_arg("method", call.ident.name)
+ .set_arg("receiver_ty", receiver_ty)
+ .span_label(span, fluent::lint::label)
+ .note(fluent::lint::note)
+ .emit();
+ });
+ }
+}
diff --git a/compiler/rustc_lint/src/pass_by_value.rs b/compiler/rustc_lint/src/pass_by_value.rs
new file mode 100644
index 000000000..af5e5faf1
--- /dev/null
+++ b/compiler/rustc_lint/src/pass_by_value.rs
@@ -0,0 +1,96 @@
+use crate::{LateContext, LateLintPass, LintContext};
+use rustc_errors::{fluent, Applicability};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::{GenericArg, PathSegment, QPath, TyKind};
+use rustc_middle::ty;
+use rustc_span::symbol::sym;
+
+declare_tool_lint! {
+ /// The `rustc_pass_by_value` lint marks a type with `#[rustc_pass_by_value]` requiring it to
+ /// always be passed by value. This is usually used for types that are thin wrappers around
+ /// references, so there is no benefit to an extra layer of indirection. (Example: `Ty` which
+ /// is a reference to an `Interned<TyS>`)
+ pub rustc::PASS_BY_VALUE,
+ Warn,
+ "pass by reference of a type flagged as `#[rustc_pass_by_value]`",
+ report_in_external_macro: true
+}
+
+declare_lint_pass!(PassByValue => [PASS_BY_VALUE]);
+
+impl<'tcx> LateLintPass<'tcx> for PassByValue {
+ fn check_ty(&mut self, cx: &LateContext<'_>, ty: &'tcx hir::Ty<'tcx>) {
+ match &ty.kind {
+ TyKind::Rptr(_, hir::MutTy { ty: inner_ty, mutbl: hir::Mutability::Not }) => {
+ if let Some(impl_did) = cx.tcx.impl_of_method(ty.hir_id.owner.to_def_id()) {
+ if cx.tcx.impl_trait_ref(impl_did).is_some() {
+ return;
+ }
+ }
+ if let Some(t) = path_for_pass_by_value(cx, &inner_ty) {
+ cx.struct_span_lint(PASS_BY_VALUE, ty.span, |lint| {
+ lint.build(fluent::lint::pass_by_value)
+ .set_arg("ty", t.clone())
+ .span_suggestion(
+ ty.span,
+ fluent::lint::suggestion,
+ t,
+ // Changing type of function argument
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ })
+ }
+ }
+ _ => {}
+ }
+ }
+}
+
+fn path_for_pass_by_value(cx: &LateContext<'_>, ty: &hir::Ty<'_>) -> Option<String> {
+ if let TyKind::Path(QPath::Resolved(_, path)) = &ty.kind {
+ match path.res {
+ Res::Def(_, def_id) if cx.tcx.has_attr(def_id, sym::rustc_pass_by_value) => {
+ let name = cx.tcx.item_name(def_id).to_ident_string();
+ let path_segment = path.segments.last().unwrap();
+ return Some(format!("{}{}", name, gen_args(cx, path_segment)));
+ }
+ Res::SelfTy { trait_: None, alias_to: Some((did, _)) } => {
+ if let ty::Adt(adt, substs) = cx.tcx.type_of(did).kind() {
+ if cx.tcx.has_attr(adt.did(), sym::rustc_pass_by_value) {
+ return Some(cx.tcx.def_path_str_with_substs(adt.did(), substs));
+ }
+ }
+ }
+ _ => (),
+ }
+ }
+
+ None
+}
+
+fn gen_args(cx: &LateContext<'_>, segment: &PathSegment<'_>) -> String {
+ if let Some(args) = &segment.args {
+ let params = args
+ .args
+ .iter()
+ .map(|arg| match arg {
+ GenericArg::Lifetime(lt) => lt.name.ident().to_string(),
+ GenericArg::Type(ty) => {
+ cx.tcx.sess.source_map().span_to_snippet(ty.span).unwrap_or_else(|_| "_".into())
+ }
+ GenericArg::Const(c) => {
+ cx.tcx.sess.source_map().span_to_snippet(c.span).unwrap_or_else(|_| "_".into())
+ }
+ GenericArg::Infer(_) => String::from("_"),
+ })
+ .collect::<Vec<_>>();
+
+ if !params.is_empty() {
+ return format!("<{}>", params.join(", "));
+ }
+ }
+
+ String::new()
+}
diff --git a/compiler/rustc_lint/src/passes.rs b/compiler/rustc_lint/src/passes.rs
new file mode 100644
index 000000000..cb7bd407e
--- /dev/null
+++ b/compiler/rustc_lint/src/passes.rs
@@ -0,0 +1,249 @@
+use crate::context::{EarlyContext, LateContext};
+
+use rustc_ast as ast;
+use rustc_data_structures::sync;
+use rustc_hir as hir;
+use rustc_session::lint::builtin::HardwiredLints;
+use rustc_session::lint::LintPass;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+#[macro_export]
+macro_rules! late_lint_methods {
+ ($macro:path, $args:tt, [$hir:tt]) => (
+ $macro!($args, [$hir], [
+ fn check_body(a: &$hir hir::Body<$hir>);
+ fn check_body_post(a: &$hir hir::Body<$hir>);
+ fn check_crate();
+ fn check_crate_post();
+ fn check_mod(a: &$hir hir::Mod<$hir>, b: Span, c: hir::HirId);
+ fn check_foreign_item(a: &$hir hir::ForeignItem<$hir>);
+ fn check_item(a: &$hir hir::Item<$hir>);
+ fn check_item_post(a: &$hir hir::Item<$hir>);
+ fn check_local(a: &$hir hir::Local<$hir>);
+ fn check_block(a: &$hir hir::Block<$hir>);
+ fn check_block_post(a: &$hir hir::Block<$hir>);
+ fn check_stmt(a: &$hir hir::Stmt<$hir>);
+ fn check_arm(a: &$hir hir::Arm<$hir>);
+ fn check_pat(a: &$hir hir::Pat<$hir>);
+ fn check_expr(a: &$hir hir::Expr<$hir>);
+ fn check_expr_post(a: &$hir hir::Expr<$hir>);
+ fn check_ty(a: &$hir hir::Ty<$hir>);
+ fn check_generic_param(a: &$hir hir::GenericParam<$hir>);
+ fn check_generics(a: &$hir hir::Generics<$hir>);
+ fn check_poly_trait_ref(a: &$hir hir::PolyTraitRef<$hir>, b: hir::TraitBoundModifier);
+ fn check_fn(
+ a: rustc_hir::intravisit::FnKind<$hir>,
+ b: &$hir hir::FnDecl<$hir>,
+ c: &$hir hir::Body<$hir>,
+ d: Span,
+ e: hir::HirId);
+ fn check_trait_item(a: &$hir hir::TraitItem<$hir>);
+ fn check_impl_item(a: &$hir hir::ImplItem<$hir>);
+ fn check_impl_item_post(a: &$hir hir::ImplItem<$hir>);
+ fn check_struct_def(a: &$hir hir::VariantData<$hir>);
+ fn check_field_def(a: &$hir hir::FieldDef<$hir>);
+ fn check_variant(a: &$hir hir::Variant<$hir>);
+ fn check_path(a: &$hir hir::Path<$hir>, b: hir::HirId);
+ fn check_attribute(a: &$hir ast::Attribute);
+
+ /// Called when entering a syntax node that can have lint attributes such
+ /// as `#[allow(...)]`. Called with *all* the attributes of that node.
+ fn enter_lint_attrs(a: &$hir [ast::Attribute]);
+
+ /// Counterpart to `enter_lint_attrs`.
+ fn exit_lint_attrs(a: &$hir [ast::Attribute]);
+ ]);
+ )
+}
+
+/// Trait for types providing lint checks.
+///
+/// Each `check` method checks a single syntax node, and should not
+/// invoke methods recursively (unlike `Visitor`). By default they
+/// do nothing.
+//
+// FIXME: eliminate the duplication with `Visitor`. But this also
+// contains a few lint-specific methods with no equivalent in `Visitor`.
+
+macro_rules! expand_lint_pass_methods {
+ ($context:ty, [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => (
+ $(#[inline(always)] fn $name(&mut self, _: $context, $(_: $arg),*) {})*
+ )
+}
+
+macro_rules! declare_late_lint_pass {
+ ([], [$hir:tt], [$($methods:tt)*]) => (
+ pub trait LateLintPass<$hir>: LintPass {
+ expand_lint_pass_methods!(&LateContext<$hir>, [$($methods)*]);
+ }
+ )
+}
+
+late_lint_methods!(declare_late_lint_pass, [], ['tcx]);
+
+impl LateLintPass<'_> for HardwiredLints {}
+
+#[macro_export]
+macro_rules! expand_combined_late_lint_pass_method {
+ ([$($passes:ident),*], $self: ident, $name: ident, $params:tt) => ({
+ $($self.$passes.$name $params;)*
+ })
+}
+
+#[macro_export]
+macro_rules! expand_combined_late_lint_pass_methods {
+ ($passes:tt, [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => (
+ $(fn $name(&mut self, context: &LateContext<'tcx>, $($param: $arg),*) {
+ expand_combined_late_lint_pass_method!($passes, self, $name, (context, $($param),*));
+ })*
+ )
+}
+
+#[macro_export]
+macro_rules! declare_combined_late_lint_pass {
+ ([$v:vis $name:ident, [$($passes:ident: $constructor:expr,)*]], [$hir:tt], $methods:tt) => (
+ #[allow(non_snake_case)]
+ $v struct $name {
+ $($passes: $passes,)*
+ }
+
+ impl $name {
+ $v fn new() -> Self {
+ Self {
+ $($passes: $constructor,)*
+ }
+ }
+
+ $v fn get_lints() -> LintArray {
+ let mut lints = Vec::new();
+ $(lints.extend_from_slice(&$passes::get_lints());)*
+ lints
+ }
+ }
+
+ impl<'tcx> LateLintPass<'tcx> for $name {
+ expand_combined_late_lint_pass_methods!([$($passes),*], $methods);
+ }
+
+ #[allow(rustc::lint_pass_impl_without_macro)]
+ impl LintPass for $name {
+ fn name(&self) -> &'static str {
+ panic!()
+ }
+ }
+ )
+}
+
+#[macro_export]
+macro_rules! early_lint_methods {
+ ($macro:path, $args:tt) => (
+ $macro!($args, [
+ fn check_param(a: &ast::Param);
+ fn check_ident(a: Ident);
+ fn check_crate(a: &ast::Crate);
+ fn check_crate_post(a: &ast::Crate);
+ fn check_item(a: &ast::Item);
+ fn check_item_post(a: &ast::Item);
+ fn check_local(a: &ast::Local);
+ fn check_block(a: &ast::Block);
+ fn check_stmt(a: &ast::Stmt);
+ fn check_arm(a: &ast::Arm);
+ fn check_pat(a: &ast::Pat);
+ fn check_pat_post(a: &ast::Pat);
+ fn check_expr(a: &ast::Expr);
+ fn check_ty(a: &ast::Ty);
+ fn check_generic_arg(a: &ast::GenericArg);
+ fn check_generic_param(a: &ast::GenericParam);
+ fn check_generics(a: &ast::Generics);
+ fn check_poly_trait_ref(a: &ast::PolyTraitRef,
+ b: &ast::TraitBoundModifier);
+ fn check_fn(a: rustc_ast::visit::FnKind<'_>, c: Span, d_: ast::NodeId);
+ fn check_trait_item(a: &ast::AssocItem);
+ fn check_impl_item(a: &ast::AssocItem);
+ fn check_variant(a: &ast::Variant);
+ fn check_attribute(a: &ast::Attribute);
+ fn check_mac_def(a: &ast::MacroDef, b: ast::NodeId);
+ fn check_mac(a: &ast::MacCall);
+
+ /// Called when entering a syntax node that can have lint attributes such
+ /// as `#[allow(...)]`. Called with *all* the attributes of that node.
+ fn enter_lint_attrs(a: &[ast::Attribute]);
+
+ /// Counterpart to `enter_lint_attrs`.
+ fn exit_lint_attrs(a: &[ast::Attribute]);
+ ]);
+ )
+}
+
+macro_rules! expand_early_lint_pass_methods {
+ ($context:ty, [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => (
+ $(#[inline(always)] fn $name(&mut self, _: $context, $(_: $arg),*) {})*
+ )
+}
+
+macro_rules! declare_early_lint_pass {
+ ([], [$($methods:tt)*]) => (
+ pub trait EarlyLintPass: LintPass {
+ expand_early_lint_pass_methods!(&EarlyContext<'_>, [$($methods)*]);
+ }
+ )
+}
+
+early_lint_methods!(declare_early_lint_pass, []);
+
+#[macro_export]
+macro_rules! expand_combined_early_lint_pass_method {
+ ([$($passes:ident),*], $self: ident, $name: ident, $params:tt) => ({
+ $($self.$passes.$name $params;)*
+ })
+}
+
+#[macro_export]
+macro_rules! expand_combined_early_lint_pass_methods {
+ ($passes:tt, [$($(#[$attr:meta])* fn $name:ident($($param:ident: $arg:ty),*);)*]) => (
+ $(fn $name(&mut self, context: &EarlyContext<'_>, $($param: $arg),*) {
+ expand_combined_early_lint_pass_method!($passes, self, $name, (context, $($param),*));
+ })*
+ )
+}
+
+#[macro_export]
+macro_rules! declare_combined_early_lint_pass {
+ ([$v:vis $name:ident, [$($passes:ident: $constructor:expr,)*]], $methods:tt) => (
+ #[allow(non_snake_case)]
+ $v struct $name {
+ $($passes: $passes,)*
+ }
+
+ impl $name {
+ $v fn new() -> Self {
+ Self {
+ $($passes: $constructor,)*
+ }
+ }
+
+ $v fn get_lints() -> LintArray {
+ let mut lints = Vec::new();
+ $(lints.extend_from_slice(&$passes::get_lints());)*
+ lints
+ }
+ }
+
+ impl EarlyLintPass for $name {
+ expand_combined_early_lint_pass_methods!([$($passes),*], $methods);
+ }
+
+ #[allow(rustc::lint_pass_impl_without_macro)]
+ impl LintPass for $name {
+ fn name(&self) -> &'static str {
+ panic!()
+ }
+ }
+ )
+}
+
+/// A lint pass boxed up as a trait object.
+pub type EarlyLintPassObject = Box<dyn EarlyLintPass + sync::Send + sync::Sync + 'static>;
+pub type LateLintPassObject =
+ Box<dyn for<'tcx> LateLintPass<'tcx> + sync::Send + sync::Sync + 'static>;
diff --git a/compiler/rustc_lint/src/redundant_semicolon.rs b/compiler/rustc_lint/src/redundant_semicolon.rs
new file mode 100644
index 000000000..26f413453
--- /dev/null
+++ b/compiler/rustc_lint/src/redundant_semicolon.rs
@@ -0,0 +1,58 @@
+use crate::{EarlyContext, EarlyLintPass, LintContext};
+use rustc_ast::{Block, StmtKind};
+use rustc_errors::{fluent, Applicability};
+use rustc_span::Span;
+
+declare_lint! {
+ /// The `redundant_semicolons` lint detects unnecessary trailing
+ /// semicolons.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let _ = 123;;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Extra semicolons are not needed, and may be removed to avoid confusion
+ /// and visual clutter.
+ pub REDUNDANT_SEMICOLONS,
+ Warn,
+ "detects unnecessary trailing semicolons"
+}
+
+declare_lint_pass!(RedundantSemicolons => [REDUNDANT_SEMICOLONS]);
+
+impl EarlyLintPass for RedundantSemicolons {
+ fn check_block(&mut self, cx: &EarlyContext<'_>, block: &Block) {
+ let mut seq = None;
+ for stmt in block.stmts.iter() {
+ match (&stmt.kind, &mut seq) {
+ (StmtKind::Empty, None) => seq = Some((stmt.span, false)),
+ (StmtKind::Empty, Some(seq)) => *seq = (seq.0.to(stmt.span), true),
+ (_, seq) => maybe_lint_redundant_semis(cx, seq),
+ }
+ }
+ maybe_lint_redundant_semis(cx, &mut seq);
+ }
+}
+
+fn maybe_lint_redundant_semis(cx: &EarlyContext<'_>, seq: &mut Option<(Span, bool)>) {
+ if let Some((span, multiple)) = seq.take() {
+ // FIXME: Find a better way of ignoring the trailing
+ // semicolon from macro expansion
+ if span == rustc_span::DUMMY_SP {
+ return;
+ }
+
+ cx.struct_span_lint(REDUNDANT_SEMICOLONS, span, |lint| {
+ lint.build(fluent::lint::redundant_semicolons)
+ .set_arg("multiple", multiple)
+ .span_suggestion(span, fluent::lint::suggestion, "", Applicability::MaybeIncorrect)
+ .emit();
+ });
+ }
+}
diff --git a/compiler/rustc_lint/src/tests.rs b/compiler/rustc_lint/src/tests.rs
new file mode 100644
index 000000000..fc9d6f636
--- /dev/null
+++ b/compiler/rustc_lint/src/tests.rs
@@ -0,0 +1,26 @@
+use crate::context::parse_lint_and_tool_name;
+use rustc_span::{create_default_session_globals_then, Symbol};
+
+#[test]
+fn parse_lint_no_tool() {
+ create_default_session_globals_then(|| {
+ assert_eq!(parse_lint_and_tool_name("foo"), (None, "foo"))
+ });
+}
+
+#[test]
+fn parse_lint_with_tool() {
+ create_default_session_globals_then(|| {
+ assert_eq!(parse_lint_and_tool_name("clippy::foo"), (Some(Symbol::intern("clippy")), "foo"))
+ });
+}
+
+#[test]
+fn parse_lint_multiple_path() {
+ create_default_session_globals_then(|| {
+ assert_eq!(
+ parse_lint_and_tool_name("clippy::foo::bar"),
+ (Some(Symbol::intern("clippy")), "foo::bar")
+ )
+ });
+}
diff --git a/compiler/rustc_lint/src/traits.rs b/compiler/rustc_lint/src/traits.rs
new file mode 100644
index 000000000..df1587c59
--- /dev/null
+++ b/compiler/rustc_lint/src/traits.rs
@@ -0,0 +1,134 @@
+use crate::LateContext;
+use crate::LateLintPass;
+use crate::LintContext;
+use rustc_errors::fluent;
+use rustc_hir as hir;
+use rustc_span::symbol::sym;
+
+declare_lint! {
+ /// The `drop_bounds` lint checks for generics with `std::ops::Drop` as
+ /// bounds.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// fn foo<T: Drop>() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A generic trait bound of the form `T: Drop` is most likely misleading
+ /// and not what the programmer intended (they probably should have used
+ /// `std::mem::needs_drop` instead).
+ ///
+ /// `Drop` bounds do not actually indicate whether a type can be trivially
+ /// dropped or not, because a composite type containing `Drop` types does
+ /// not necessarily implement `Drop` itself. Naïvely, one might be tempted
+ /// to write an implementation that assumes that a type can be trivially
+ /// dropped while also supplying a specialization for `T: Drop` that
+ /// actually calls the destructor. However, this breaks down e.g. when `T`
+ /// is `String`, which does not implement `Drop` itself but contains a
+ /// `Vec`, which does implement `Drop`, so assuming `T` can be trivially
+ /// dropped would lead to a memory leak here.
+ ///
+ /// Furthermore, the `Drop` trait only contains one method, `Drop::drop`,
+ /// which may not be called explicitly in user code (`E0040`), so there is
+ /// really no use case for using `Drop` in trait bounds, save perhaps for
+ /// some obscure corner cases, which can use `#[allow(drop_bounds)]`.
+ pub DROP_BOUNDS,
+ Warn,
+ "bounds of the form `T: Drop` are most likely incorrect"
+}
+
+declare_lint! {
+ /// The `dyn_drop` lint checks for trait objects with `std::ops::Drop`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// fn foo(_x: Box<dyn Drop>) {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A trait object bound of the form `dyn Drop` is most likely misleading
+ /// and not what the programmer intended.
+ ///
+ /// `Drop` bounds do not actually indicate whether a type can be trivially
+ /// dropped or not, because a composite type containing `Drop` types does
+ /// not necessarily implement `Drop` itself. Naïvely, one might be tempted
+ /// to write a deferred drop system, to pull cleaning up memory out of a
+ /// latency-sensitive code path, using `dyn Drop` trait objects. However,
+ /// this breaks down e.g. when `T` is `String`, which does not implement
+ /// `Drop`, but should probably be accepted.
+ ///
+ /// To write a trait object bound that accepts anything, use a placeholder
+ /// trait with a blanket implementation.
+ ///
+ /// ```rust
+ /// trait Placeholder {}
+ /// impl<T> Placeholder for T {}
+ /// fn foo(_x: Box<dyn Placeholder>) {}
+ /// ```
+ pub DYN_DROP,
+ Warn,
+ "trait objects of the form `dyn Drop` are useless"
+}
+
+declare_lint_pass!(
+ /// Lint for bounds of the form `T: Drop`, which usually
+ /// indicate an attempt to emulate `std::mem::needs_drop`.
+ DropTraitConstraints => [DROP_BOUNDS, DYN_DROP]
+);
+
+impl<'tcx> LateLintPass<'tcx> for DropTraitConstraints {
+ fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) {
+ use rustc_middle::ty::PredicateKind::*;
+
+ let predicates = cx.tcx.explicit_predicates_of(item.def_id);
+ for &(predicate, span) in predicates.predicates {
+ let Trait(trait_predicate) = predicate.kind().skip_binder() else {
+ continue
+ };
+ let def_id = trait_predicate.trait_ref.def_id;
+ if cx.tcx.lang_items().drop_trait() == Some(def_id) {
+ // Explicitly allow `impl Drop`, a drop-guards-as-Voldemort-type pattern.
+ if trait_predicate.trait_ref.self_ty().is_impl_trait() {
+ continue;
+ }
+ cx.struct_span_lint(DROP_BOUNDS, span, |lint| {
+ let Some(needs_drop) = cx.tcx.get_diagnostic_item(sym::needs_drop) else {
+ return
+ };
+ lint.build(fluent::lint::drop_trait_constraints)
+ .set_arg("predicate", predicate)
+ .set_arg("needs_drop", cx.tcx.def_path_str(needs_drop))
+ .emit();
+ });
+ }
+ }
+ }
+
+ fn check_ty(&mut self, cx: &LateContext<'_>, ty: &'tcx hir::Ty<'tcx>) {
+ let hir::TyKind::TraitObject(bounds, _lifetime, _syntax) = &ty.kind else {
+ return
+ };
+ for bound in &bounds[..] {
+ let def_id = bound.trait_ref.trait_def_id();
+ if cx.tcx.lang_items().drop_trait() == def_id {
+ cx.struct_span_lint(DYN_DROP, bound.span, |lint| {
+ let Some(needs_drop) = cx.tcx.get_diagnostic_item(sym::needs_drop) else {
+ return
+ };
+ lint.build(fluent::lint::drop_glue)
+ .set_arg("needs_drop", cx.tcx.def_path_str(needs_drop))
+ .emit();
+ });
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs
new file mode 100644
index 000000000..5c07afeb7
--- /dev/null
+++ b/compiler/rustc_lint/src/types.rs
@@ -0,0 +1,1576 @@
+use crate::{LateContext, LateLintPass, LintContext};
+use rustc_ast as ast;
+use rustc_attr as attr;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{fluent, Applicability, DiagnosticMessage};
+use rustc_hir as hir;
+use rustc_hir::{is_range_literal, Expr, ExprKind, Node};
+use rustc_macros::LintDiagnostic;
+use rustc_middle::ty::layout::{IntegerExt, LayoutOf, SizeSkeleton};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable};
+use rustc_span::source_map;
+use rustc_span::symbol::sym;
+use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_target::abi::{Abi, WrappingRange};
+use rustc_target::abi::{Integer, TagEncoding, Variants};
+use rustc_target::spec::abi::Abi as SpecAbi;
+
+use std::cmp;
+use std::iter;
+use std::ops::ControlFlow;
+use tracing::debug;
+
+declare_lint! {
+ /// The `unused_comparisons` lint detects comparisons made useless by
+ /// limits of the types involved.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// fn foo(x: u8) {
+ /// x >= 0;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A useless comparison may indicate a mistake, and should be fixed or
+ /// removed.
+ UNUSED_COMPARISONS,
+ Warn,
+ "comparisons made useless by limits of the types involved"
+}
+
+declare_lint! {
+ /// The `overflowing_literals` lint detects literal out of range for its
+ /// type.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// let x: u8 = 1000;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is usually a mistake to use a literal that overflows the type where
+ /// it is used. Either use a literal that is within range, or change the
+ /// type to be within the range of the literal.
+ OVERFLOWING_LITERALS,
+ Deny,
+ "literal out of range for its type"
+}
+
+declare_lint! {
+ /// The `variant_size_differences` lint detects enums with widely varying
+ /// variant sizes.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(variant_size_differences)]
+ /// enum En {
+ /// V0(u8),
+ /// VBig([u8; 1024]),
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It can be a mistake to add a variant to an enum that is much larger
+ /// than the other variants, bloating the overall size required for all
+ /// variants. This can impact performance and memory usage. This is
+ /// triggered if one variant is more than 3 times larger than the
+ /// second-largest variant.
+ ///
+ /// Consider placing the large variant's contents on the heap (for example
+ /// via [`Box`]) to keep the overall size of the enum itself down.
+ ///
+ /// This lint is "allow" by default because it can be noisy, and may not be
+ /// an actual problem. Decisions about this should be guided with
+ /// profiling and benchmarking.
+ ///
+ /// [`Box`]: https://doc.rust-lang.org/std/boxed/index.html
+ VARIANT_SIZE_DIFFERENCES,
+ Allow,
+ "detects enums with widely varying variant sizes"
+}
+
+#[derive(Copy, Clone)]
+pub struct TypeLimits {
+ /// Id of the last visited negated expression
+ negated_expr_id: Option<hir::HirId>,
+}
+
+impl_lint_pass!(TypeLimits => [UNUSED_COMPARISONS, OVERFLOWING_LITERALS]);
+
+impl TypeLimits {
+ pub fn new() -> TypeLimits {
+ TypeLimits { negated_expr_id: None }
+ }
+}
+
+/// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint.
+/// Returns `true` iff the lint was overridden.
+fn lint_overflowing_range_endpoint<'tcx>(
+ cx: &LateContext<'tcx>,
+ lit: &hir::Lit,
+ lit_val: u128,
+ max: u128,
+ expr: &'tcx hir::Expr<'tcx>,
+ parent_expr: &'tcx hir::Expr<'tcx>,
+ ty: &str,
+) -> bool {
+ // We only want to handle exclusive (`..`) ranges,
+ // which are represented as `ExprKind::Struct`.
+ let mut overwritten = false;
+ if let ExprKind::Struct(_, eps, _) = &parent_expr.kind {
+ if eps.len() != 2 {
+ return false;
+ }
+ // We can suggest using an inclusive range
+ // (`..=`) instead only if it is the `end` that is
+ // overflowing and only by 1.
+ if eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max {
+ cx.struct_span_lint(OVERFLOWING_LITERALS, parent_expr.span, |lint| {
+ let mut err = lint.build(fluent::lint::range_endpoint_out_of_range);
+ err.set_arg("ty", ty);
+ if let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) {
+ use ast::{LitIntType, LitKind};
+ // We need to preserve the literal's suffix,
+ // as it may determine typing information.
+ let suffix = match lit.node {
+ LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(),
+ LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(),
+ LitKind::Int(_, LitIntType::Unsuffixed) => "",
+ _ => bug!(),
+ };
+ let suggestion = format!("{}..={}{}", start, lit_val - 1, suffix);
+ err.span_suggestion(
+ parent_expr.span,
+ fluent::lint::suggestion,
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ overwritten = true;
+ }
+ });
+ }
+ }
+ overwritten
+}
+
+// For `isize` & `usize`, be conservative with the warnings, so that the
+// warnings are consistent between 32- and 64-bit platforms.
+fn int_ty_range(int_ty: ty::IntTy) -> (i128, i128) {
+ match int_ty {
+ ty::IntTy::Isize => (i64::MIN.into(), i64::MAX.into()),
+ ty::IntTy::I8 => (i8::MIN.into(), i8::MAX.into()),
+ ty::IntTy::I16 => (i16::MIN.into(), i16::MAX.into()),
+ ty::IntTy::I32 => (i32::MIN.into(), i32::MAX.into()),
+ ty::IntTy::I64 => (i64::MIN.into(), i64::MAX.into()),
+ ty::IntTy::I128 => (i128::MIN, i128::MAX),
+ }
+}
+
+fn uint_ty_range(uint_ty: ty::UintTy) -> (u128, u128) {
+ let max = match uint_ty {
+ ty::UintTy::Usize => u64::MAX.into(),
+ ty::UintTy::U8 => u8::MAX.into(),
+ ty::UintTy::U16 => u16::MAX.into(),
+ ty::UintTy::U32 => u32::MAX.into(),
+ ty::UintTy::U64 => u64::MAX.into(),
+ ty::UintTy::U128 => u128::MAX,
+ };
+ (0, max)
+}
+
+fn get_bin_hex_repr(cx: &LateContext<'_>, lit: &hir::Lit) -> Option<String> {
+ let src = cx.sess().source_map().span_to_snippet(lit.span).ok()?;
+ let firstch = src.chars().next()?;
+
+ if firstch == '0' {
+ match src.chars().nth(1) {
+ Some('x' | 'b') => return Some(src),
+ _ => return None,
+ }
+ }
+
+ None
+}
+
+fn report_bin_hex_error(
+ cx: &LateContext<'_>,
+ expr: &hir::Expr<'_>,
+ ty: attr::IntType,
+ repr_str: String,
+ val: u128,
+ negative: bool,
+) {
+ let size = Integer::from_attr(&cx.tcx, ty).size();
+ cx.struct_span_lint(OVERFLOWING_LITERALS, expr.span, |lint| {
+ let (t, actually) = match ty {
+ attr::IntType::SignedInt(t) => {
+ let actually = if negative {
+ -(size.sign_extend(val) as i128)
+ } else {
+ size.sign_extend(val) as i128
+ };
+ (t.name_str(), actually.to_string())
+ }
+ attr::IntType::UnsignedInt(t) => {
+ let actually = size.truncate(val);
+ (t.name_str(), actually.to_string())
+ }
+ };
+ let mut err = lint.build(fluent::lint::overflowing_bin_hex);
+ if negative {
+ // If the value is negative,
+ // emits a note about the value itself, apart from the literal.
+ err.note(fluent::lint::negative_note);
+ err.note(fluent::lint::negative_becomes_note);
+ } else {
+ err.note(fluent::lint::positive_note);
+ }
+ if let Some(sugg_ty) =
+ get_type_suggestion(cx.typeck_results().node_type(expr.hir_id), val, negative)
+ {
+ err.set_arg("suggestion_ty", sugg_ty);
+ if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') {
+ let (sans_suffix, _) = repr_str.split_at(pos);
+ err.span_suggestion(
+ expr.span,
+ fluent::lint::suggestion,
+ format!("{}{}", sans_suffix, sugg_ty),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.help(fluent::lint::help);
+ }
+ }
+ err.set_arg("ty", t);
+ err.set_arg("lit", repr_str);
+ err.set_arg("dec", val);
+ err.set_arg("actually", actually);
+ err.emit();
+ });
+}
+
+// This function finds the next fitting type and generates a suggestion string.
+// It searches for fitting types in the following way (`X < Y`):
+// - `iX`: if literal fits in `uX` => `uX`, else => `iY`
+// - `-iX` => `iY`
+// - `uX` => `uY`
+//
+// No suggestion for: `isize`, `usize`.
+fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static str> {
+ use ty::IntTy::*;
+ use ty::UintTy::*;
+ macro_rules! find_fit {
+ ($ty:expr, $val:expr, $negative:expr,
+ $($type:ident => [$($utypes:expr),*] => [$($itypes:expr),*]),+) => {
+ {
+ let _neg = if negative { 1 } else { 0 };
+ match $ty {
+ $($type => {
+ $(if !negative && val <= uint_ty_range($utypes).1 {
+ return Some($utypes.name_str())
+ })*
+ $(if val <= int_ty_range($itypes).1 as u128 + _neg {
+ return Some($itypes.name_str())
+ })*
+ None
+ },)+
+ _ => None
+ }
+ }
+ }
+ }
+ match t.kind() {
+ ty::Int(i) => find_fit!(i, val, negative,
+ I8 => [U8] => [I16, I32, I64, I128],
+ I16 => [U16] => [I32, I64, I128],
+ I32 => [U32] => [I64, I128],
+ I64 => [U64] => [I128],
+ I128 => [U128] => []),
+ ty::Uint(u) => find_fit!(u, val, negative,
+ U8 => [U8, U16, U32, U64, U128] => [],
+ U16 => [U16, U32, U64, U128] => [],
+ U32 => [U32, U64, U128] => [],
+ U64 => [U64, U128] => [],
+ U128 => [U128] => []),
+ _ => None,
+ }
+}
+
+fn lint_int_literal<'tcx>(
+ cx: &LateContext<'tcx>,
+ type_limits: &TypeLimits,
+ e: &'tcx hir::Expr<'tcx>,
+ lit: &hir::Lit,
+ t: ty::IntTy,
+ v: u128,
+) {
+ let int_type = t.normalize(cx.sess().target.pointer_width);
+ let (min, max) = int_ty_range(int_type);
+ let max = max as u128;
+ let negative = type_limits.negated_expr_id == Some(e.hir_id);
+
+ // Detect literal value out of range [min, max] inclusive
+ // avoiding use of -min to prevent overflow/panic
+ if (negative && v > max + 1) || (!negative && v > max) {
+ if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
+ report_bin_hex_error(
+ cx,
+ e,
+ attr::IntType::SignedInt(ty::ast_int_ty(t)),
+ repr_str,
+ v,
+ negative,
+ );
+ return;
+ }
+
+ let par_id = cx.tcx.hir().get_parent_node(e.hir_id);
+ if let Node::Expr(par_e) = cx.tcx.hir().get(par_id) {
+ if let hir::ExprKind::Struct(..) = par_e.kind {
+ if is_range_literal(par_e)
+ && lint_overflowing_range_endpoint(cx, lit, v, max, e, par_e, t.name_str())
+ {
+ // The overflowing literal lint was overridden.
+ return;
+ }
+ }
+ }
+
+ cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
+ let mut err = lint.build(fluent::lint::overflowing_int);
+ err.set_arg("ty", t.name_str());
+ err.set_arg(
+ "lit",
+ cx.sess()
+ .source_map()
+ .span_to_snippet(lit.span)
+ .expect("must get snippet from literal"),
+ );
+ err.set_arg("min", min);
+ err.set_arg("max", max);
+ err.note(fluent::lint::note);
+ if let Some(sugg_ty) =
+ get_type_suggestion(cx.typeck_results().node_type(e.hir_id), v, negative)
+ {
+ err.set_arg("suggestion_ty", sugg_ty);
+ err.help(fluent::lint::help);
+ }
+ err.emit();
+ });
+ }
+}
+
+fn lint_uint_literal<'tcx>(
+ cx: &LateContext<'tcx>,
+ e: &'tcx hir::Expr<'tcx>,
+ lit: &hir::Lit,
+ t: ty::UintTy,
+) {
+ let uint_type = t.normalize(cx.sess().target.pointer_width);
+ let (min, max) = uint_ty_range(uint_type);
+ let lit_val: u128 = match lit.node {
+ // _v is u8, within range by definition
+ ast::LitKind::Byte(_v) => return,
+ ast::LitKind::Int(v, _) => v,
+ _ => bug!(),
+ };
+ if lit_val < min || lit_val > max {
+ let parent_id = cx.tcx.hir().get_parent_node(e.hir_id);
+ if let Node::Expr(par_e) = cx.tcx.hir().get(parent_id) {
+ match par_e.kind {
+ hir::ExprKind::Cast(..) => {
+ if let ty::Char = cx.typeck_results().expr_ty(par_e).kind() {
+ cx.struct_span_lint(OVERFLOWING_LITERALS, par_e.span, |lint| {
+ lint.build(fluent::lint::only_cast_u8_to_char)
+ .span_suggestion(
+ par_e.span,
+ fluent::lint::suggestion,
+ format!("'\\u{{{:X}}}'", lit_val),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ return;
+ }
+ }
+ hir::ExprKind::Struct(..) if is_range_literal(par_e) => {
+ let t = t.name_str();
+ if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, par_e, t) {
+ // The overflowing literal lint was overridden.
+ return;
+ }
+ }
+ _ => {}
+ }
+ }
+ if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
+ report_bin_hex_error(
+ cx,
+ e,
+ attr::IntType::UnsignedInt(ty::ast_uint_ty(t)),
+ repr_str,
+ lit_val,
+ false,
+ );
+ return;
+ }
+ cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
+ lint.build(fluent::lint::overflowing_uint)
+ .set_arg("ty", t.name_str())
+ .set_arg(
+ "lit",
+ cx.sess()
+ .source_map()
+ .span_to_snippet(lit.span)
+ .expect("must get snippet from literal"),
+ )
+ .set_arg("min", min)
+ .set_arg("max", max)
+ .note(fluent::lint::note)
+ .emit();
+ });
+ }
+}
+
+fn lint_literal<'tcx>(
+ cx: &LateContext<'tcx>,
+ type_limits: &TypeLimits,
+ e: &'tcx hir::Expr<'tcx>,
+ lit: &hir::Lit,
+) {
+ match *cx.typeck_results().node_type(e.hir_id).kind() {
+ ty::Int(t) => {
+ match lit.node {
+ ast::LitKind::Int(v, ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed) => {
+ lint_int_literal(cx, type_limits, e, lit, t, v)
+ }
+ _ => bug!(),
+ };
+ }
+ ty::Uint(t) => lint_uint_literal(cx, e, lit, t),
+ ty::Float(t) => {
+ let is_infinite = match lit.node {
+ ast::LitKind::Float(v, _) => match t {
+ ty::FloatTy::F32 => v.as_str().parse().map(f32::is_infinite),
+ ty::FloatTy::F64 => v.as_str().parse().map(f64::is_infinite),
+ },
+ _ => bug!(),
+ };
+ if is_infinite == Ok(true) {
+ cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
+ lint.build(fluent::lint::overflowing_literal)
+ .set_arg("ty", t.name_str())
+ .set_arg(
+ "lit",
+ cx.sess()
+ .source_map()
+ .span_to_snippet(lit.span)
+ .expect("must get snippet from literal"),
+ )
+ .note(fluent::lint::note)
+ .emit();
+ });
+ }
+ }
+ _ => {}
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for TypeLimits {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>) {
+ match e.kind {
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => {
+ // propagate negation, if the negation itself isn't negated
+ if self.negated_expr_id != Some(e.hir_id) {
+ self.negated_expr_id = Some(expr.hir_id);
+ }
+ }
+ hir::ExprKind::Binary(binop, ref l, ref r) => {
+ if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
+ cx.struct_span_lint(UNUSED_COMPARISONS, e.span, |lint| {
+ lint.build(fluent::lint::unused_comparisons).emit();
+ });
+ }
+ }
+ hir::ExprKind::Lit(ref lit) => lint_literal(cx, self, e, lit),
+ _ => {}
+ };
+
+ fn is_valid<T: cmp::PartialOrd>(binop: hir::BinOp, v: T, min: T, max: T) -> bool {
+ match binop.node {
+ hir::BinOpKind::Lt => v > min && v <= max,
+ hir::BinOpKind::Le => v >= min && v < max,
+ hir::BinOpKind::Gt => v >= min && v < max,
+ hir::BinOpKind::Ge => v > min && v <= max,
+ hir::BinOpKind::Eq | hir::BinOpKind::Ne => v >= min && v <= max,
+ _ => bug!(),
+ }
+ }
+
+ fn rev_binop(binop: hir::BinOp) -> hir::BinOp {
+ source_map::respan(
+ binop.span,
+ match binop.node {
+ hir::BinOpKind::Lt => hir::BinOpKind::Gt,
+ hir::BinOpKind::Le => hir::BinOpKind::Ge,
+ hir::BinOpKind::Gt => hir::BinOpKind::Lt,
+ hir::BinOpKind::Ge => hir::BinOpKind::Le,
+ _ => return binop,
+ },
+ )
+ }
+
+ fn check_limits(
+ cx: &LateContext<'_>,
+ binop: hir::BinOp,
+ l: &hir::Expr<'_>,
+ r: &hir::Expr<'_>,
+ ) -> bool {
+ let (lit, expr, swap) = match (&l.kind, &r.kind) {
+ (&hir::ExprKind::Lit(_), _) => (l, r, true),
+ (_, &hir::ExprKind::Lit(_)) => (r, l, false),
+ _ => return true,
+ };
+ // Normalize the binop so that the literal is always on the RHS in
+ // the comparison
+ let norm_binop = if swap { rev_binop(binop) } else { binop };
+ match *cx.typeck_results().node_type(expr.hir_id).kind() {
+ ty::Int(int_ty) => {
+ let (min, max) = int_ty_range(int_ty);
+ let lit_val: i128 = match lit.kind {
+ hir::ExprKind::Lit(ref li) => match li.node {
+ ast::LitKind::Int(
+ v,
+ ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed,
+ ) => v as i128,
+ _ => return true,
+ },
+ _ => bug!(),
+ };
+ is_valid(norm_binop, lit_val, min, max)
+ }
+ ty::Uint(uint_ty) => {
+ let (min, max): (u128, u128) = uint_ty_range(uint_ty);
+ let lit_val: u128 = match lit.kind {
+ hir::ExprKind::Lit(ref li) => match li.node {
+ ast::LitKind::Int(v, _) => v,
+ _ => return true,
+ },
+ _ => bug!(),
+ };
+ is_valid(norm_binop, lit_val, min, max)
+ }
+ _ => true,
+ }
+ }
+
+ fn is_comparison(binop: hir::BinOp) -> bool {
+ matches!(
+ binop.node,
+ hir::BinOpKind::Eq
+ | hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Ne
+ | hir::BinOpKind::Ge
+ | hir::BinOpKind::Gt
+ )
+ }
+ }
+}
+
+declare_lint! {
+ /// The `improper_ctypes` lint detects incorrect use of types in foreign
+ /// modules.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// extern "C" {
+ /// static STATIC: String;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The compiler has several checks to verify that types used in `extern`
+ /// blocks are safe and follow certain rules to ensure proper
+ /// compatibility with the foreign interfaces. This lint is issued when it
+ /// detects a probable mistake in a definition. The lint usually should
+ /// provide a description of the issue, along with possibly a hint on how
+ /// to resolve it.
+ IMPROPER_CTYPES,
+ Warn,
+ "proper use of libc types in foreign modules"
+}
+
+declare_lint_pass!(ImproperCTypesDeclarations => [IMPROPER_CTYPES]);
+
+declare_lint! {
+ /// The `improper_ctypes_definitions` lint detects incorrect use of
+ /// [`extern` function] definitions.
+ ///
+ /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// # #![allow(unused)]
+ /// pub extern "C" fn str_type(p: &str) { }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// There are many parameter and return types that may be specified in an
+ /// `extern` function that are not compatible with the given ABI. This
+ /// lint is an alert that these types should not be used. The lint usually
+ /// should provide a description of the issue, along with possibly a hint
+ /// on how to resolve it.
+ IMPROPER_CTYPES_DEFINITIONS,
+ Warn,
+ "proper use of libc types in foreign item definitions"
+}
+
+declare_lint_pass!(ImproperCTypesDefinitions => [IMPROPER_CTYPES_DEFINITIONS]);
+
+#[derive(Clone, Copy)]
+pub(crate) enum CItemKind {
+ Declaration,
+ Definition,
+}
+
+struct ImproperCTypesVisitor<'a, 'tcx> {
+ cx: &'a LateContext<'tcx>,
+ mode: CItemKind,
+}
+
+enum FfiResult<'tcx> {
+ FfiSafe,
+ FfiPhantom(Ty<'tcx>),
+ FfiUnsafe { ty: Ty<'tcx>, reason: DiagnosticMessage, help: Option<DiagnosticMessage> },
+}
+
+pub(crate) fn nonnull_optimization_guaranteed<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::AdtDef<'tcx>,
+) -> bool {
+ tcx.has_attr(def.did(), sym::rustc_nonnull_optimization_guaranteed)
+}
+
+/// `repr(transparent)` structs can have a single non-ZST field, this function returns that
+/// field.
+pub fn transparent_newtype_field<'a, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ variant: &'a ty::VariantDef,
+) -> Option<&'a ty::FieldDef> {
+ let param_env = tcx.param_env(variant.def_id);
+ variant.fields.iter().find(|field| {
+ let field_ty = tcx.type_of(field.did);
+ let is_zst = tcx.layout_of(param_env.and(field_ty)).map_or(false, |layout| layout.is_zst());
+ !is_zst
+ })
+}
+
+/// Is type known to be non-null?
+fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
+ let tcx = cx.tcx;
+ match ty.kind() {
+ ty::FnPtr(_) => true,
+ ty::Ref(..) => true,
+ ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
+ ty::Adt(def, substs) if def.repr().transparent() && !def.is_union() => {
+ let marked_non_null = nonnull_optimization_guaranteed(tcx, *def);
+
+ if marked_non_null {
+ return true;
+ }
+
+ // `UnsafeCell` has its niche hidden.
+ if def.is_unsafe_cell() {
+ return false;
+ }
+
+ def.variants()
+ .iter()
+ .filter_map(|variant| transparent_newtype_field(cx.tcx, variant))
+ .any(|field| ty_is_known_nonnull(cx, field.ty(tcx, substs), mode))
+ }
+ _ => false,
+ }
+}
+
+/// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
+/// If the type passed in was not scalar, returns None.
+fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
+ let tcx = cx.tcx;
+ Some(match *ty.kind() {
+ ty::Adt(field_def, field_substs) => {
+ let inner_field_ty = {
+ let first_non_zst_ty = field_def
+ .variants()
+ .iter()
+ .filter_map(|v| transparent_newtype_field(cx.tcx, v));
+ debug_assert_eq!(
+ first_non_zst_ty.clone().count(),
+ 1,
+ "Wrong number of fields for transparent type"
+ );
+ first_non_zst_ty
+ .last()
+ .expect("No non-zst fields in transparent type.")
+ .ty(tcx, field_substs)
+ };
+ return get_nullable_type(cx, inner_field_ty);
+ }
+ ty::Int(ty) => tcx.mk_mach_int(ty),
+ ty::Uint(ty) => tcx.mk_mach_uint(ty),
+ ty::RawPtr(ty_mut) => tcx.mk_ptr(ty_mut),
+ // As these types are always non-null, the nullable equivalent of
+ // Option<T> of these types are their raw pointer counterparts.
+ ty::Ref(_region, ty, mutbl) => tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }),
+ ty::FnPtr(..) => {
+ // There is no nullable equivalent for Rust's function pointers -- you
+ // must use an Option<fn(..) -> _> to represent it.
+ ty
+ }
+
+ // We should only ever reach this case if ty_is_known_nonnull is extended
+ // to other types.
+ ref unhandled => {
+ debug!(
+ "get_nullable_type: Unhandled scalar kind: {:?} while checking {:?}",
+ unhandled, ty
+ );
+ return None;
+ }
+ })
+}
+
+/// Check if this enum can be safely exported based on the "nullable pointer optimization". If it
+/// can, return the type that `ty` can be safely converted to, otherwise return `None`.
+/// Currently restricted to function pointers, boxes, references, `core::num::NonZero*`,
+/// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes.
+/// FIXME: This duplicates code in codegen.
+pub(crate) fn repr_nullable_ptr<'tcx>(
+ cx: &LateContext<'tcx>,
+ ty: Ty<'tcx>,
+ ckind: CItemKind,
+) -> Option<Ty<'tcx>> {
+ debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty);
+ if let ty::Adt(ty_def, substs) = ty.kind() {
+ let field_ty = match &ty_def.variants().raw[..] {
+ [var_one, var_two] => match (&var_one.fields[..], &var_two.fields[..]) {
+ ([], [field]) | ([field], []) => field.ty(cx.tcx, substs),
+ _ => return None,
+ },
+ _ => return None,
+ };
+
+ if !ty_is_known_nonnull(cx, field_ty, ckind) {
+ return None;
+ }
+
+ // At this point, the field's type is known to be nonnull and the parent enum is Option-like.
+ // If the computed size for the field and the enum are different, the nonnull optimization isn't
+ // being applied (and we've got a problem somewhere).
+ let compute_size_skeleton = |t| SizeSkeleton::compute(t, cx.tcx, cx.param_env).unwrap();
+ if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) {
+ bug!("improper_ctypes: Option nonnull optimization not applied?");
+ }
+
+ // Return the nullable type this Option-like enum can be safely represented with.
+ let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
+ if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
+ match field_ty_scalar.valid_range(cx) {
+ WrappingRange { start: 0, end }
+ if end == field_ty_scalar.size(&cx.tcx).unsigned_int_max() - 1 =>
+ {
+ return Some(get_nullable_type(cx, field_ty).unwrap());
+ }
+ WrappingRange { start: 1, .. } => {
+ return Some(get_nullable_type(cx, field_ty).unwrap());
+ }
+ WrappingRange { start, end } => {
+ unreachable!("Unhandled start and end range: ({}, {})", start, end)
+ }
+ };
+ }
+ }
+ None
+}
+
+impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
+ /// Check if the type is array and emit an unsafe type lint.
+ fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
+ if let ty::Array(..) = ty.kind() {
+ self.emit_ffi_unsafe_type_lint(
+ ty,
+ sp,
+ fluent::lint::improper_ctypes_array_reason,
+ Some(fluent::lint::improper_ctypes_array_help),
+ );
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Checks if the given field's type is "ffi-safe".
+ fn check_field_type_for_ffi(
+ &self,
+ cache: &mut FxHashSet<Ty<'tcx>>,
+ field: &ty::FieldDef,
+ substs: SubstsRef<'tcx>,
+ ) -> FfiResult<'tcx> {
+ let field_ty = field.ty(self.cx.tcx, substs);
+ if field_ty.has_opaque_types() {
+ self.check_type_for_ffi(cache, field_ty)
+ } else {
+ let field_ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, field_ty);
+ self.check_type_for_ffi(cache, field_ty)
+ }
+ }
+
+ /// Checks if the given `VariantDef`'s field types are "ffi-safe".
+ fn check_variant_for_ffi(
+ &self,
+ cache: &mut FxHashSet<Ty<'tcx>>,
+ ty: Ty<'tcx>,
+ def: ty::AdtDef<'tcx>,
+ variant: &ty::VariantDef,
+ substs: SubstsRef<'tcx>,
+ ) -> FfiResult<'tcx> {
+ use FfiResult::*;
+
+ if def.repr().transparent() {
+ // Can assume that at most one field is not a ZST, so only check
+ // that field's type for FFI-safety.
+ if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
+ self.check_field_type_for_ffi(cache, field, substs)
+ } else {
+ // All fields are ZSTs; this means that the type should behave
+ // like (), which is FFI-unsafe
+ FfiUnsafe { ty, reason: fluent::lint::improper_ctypes_struct_zst, help: None }
+ }
+ } else {
+ // We can't completely trust repr(C) markings; make sure the fields are
+ // actually safe.
+ let mut all_phantom = !variant.fields.is_empty();
+ for field in &variant.fields {
+ match self.check_field_type_for_ffi(cache, &field, substs) {
+ FfiSafe => {
+ all_phantom = false;
+ }
+ FfiPhantom(..) if def.is_enum() => {
+ return FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_enum_phantomdata,
+ help: None,
+ };
+ }
+ FfiPhantom(..) => {}
+ r => return r,
+ }
+ }
+
+ if all_phantom { FfiPhantom(ty) } else { FfiSafe }
+ }
+ }
+
+ /// Checks if the given type is "ffi-safe" (has a stable, well-defined
+ /// representation which can be exported to C code).
+ fn check_type_for_ffi(&self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>) -> FfiResult<'tcx> {
+ use FfiResult::*;
+
+ let tcx = self.cx.tcx;
+
+ // Protect against infinite recursion, for example
+ // `struct S(*mut S);`.
+ // FIXME: A recursion limit is necessary as well, for irregular
+ // recursive types.
+ if !cache.insert(ty) {
+ return FfiSafe;
+ }
+
+ match *ty.kind() {
+ ty::Adt(def, substs) => {
+ if def.is_box() && matches!(self.mode, CItemKind::Definition) {
+ if ty.boxed_ty().is_sized(tcx.at(DUMMY_SP), self.cx.param_env) {
+ return FfiSafe;
+ } else {
+ return FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_box,
+ help: None,
+ };
+ }
+ }
+ if def.is_phantom_data() {
+ return FfiPhantom(ty);
+ }
+ match def.adt_kind() {
+ AdtKind::Struct | AdtKind::Union => {
+ if !def.repr().c() && !def.repr().transparent() {
+ return FfiUnsafe {
+ ty,
+ reason: if def.is_struct() {
+ fluent::lint::improper_ctypes_struct_layout_reason
+ } else {
+ fluent::lint::improper_ctypes_union_layout_reason
+ },
+ help: if def.is_struct() {
+ Some(fluent::lint::improper_ctypes_struct_layout_help)
+ } else {
+ Some(fluent::lint::improper_ctypes_union_layout_help)
+ },
+ };
+ }
+
+ let is_non_exhaustive =
+ def.non_enum_variant().is_field_list_non_exhaustive();
+ if is_non_exhaustive && !def.did().is_local() {
+ return FfiUnsafe {
+ ty,
+ reason: if def.is_struct() {
+ fluent::lint::improper_ctypes_struct_non_exhaustive
+ } else {
+ fluent::lint::improper_ctypes_union_non_exhaustive
+ },
+ help: None,
+ };
+ }
+
+ if def.non_enum_variant().fields.is_empty() {
+ return FfiUnsafe {
+ ty,
+ reason: if def.is_struct() {
+ fluent::lint::improper_ctypes_struct_fieldless_reason
+ } else {
+ fluent::lint::improper_ctypes_union_fieldless_reason
+ },
+ help: if def.is_struct() {
+ Some(fluent::lint::improper_ctypes_struct_fieldless_help)
+ } else {
+ Some(fluent::lint::improper_ctypes_union_fieldless_help)
+ },
+ };
+ }
+
+ self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs)
+ }
+ AdtKind::Enum => {
+ if def.variants().is_empty() {
+ // Empty enums are okay... although sort of useless.
+ return FfiSafe;
+ }
+
+ // Check for a repr() attribute to specify the size of the
+ // discriminant.
+ if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none()
+ {
+ // Special-case types like `Option<extern fn()>`.
+ if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
+ return FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_enum_repr_reason,
+ help: Some(fluent::lint::improper_ctypes_enum_repr_help),
+ };
+ }
+ }
+
+ if def.is_variant_list_non_exhaustive() && !def.did().is_local() {
+ return FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_non_exhaustive,
+ help: None,
+ };
+ }
+
+ // Check the contained variants.
+ for variant in def.variants() {
+ let is_non_exhaustive = variant.is_field_list_non_exhaustive();
+ if is_non_exhaustive && !variant.def_id.is_local() {
+ return FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_non_exhaustive_variant,
+ help: None,
+ };
+ }
+
+ match self.check_variant_for_ffi(cache, ty, def, variant, substs) {
+ FfiSafe => (),
+ r => return r,
+ }
+ }
+
+ FfiSafe
+ }
+ }
+ }
+
+ ty::Char => FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_char_reason,
+ help: Some(fluent::lint::improper_ctypes_char_help),
+ },
+
+ ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => {
+ FfiUnsafe { ty, reason: fluent::lint::improper_ctypes_128bit, help: None }
+ }
+
+ // Primitive types with a stable representation.
+ ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
+
+ ty::Slice(_) => FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_slice_reason,
+ help: Some(fluent::lint::improper_ctypes_slice_help),
+ },
+
+ ty::Dynamic(..) => {
+ FfiUnsafe { ty, reason: fluent::lint::improper_ctypes_dyn, help: None }
+ }
+
+ ty::Str => FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_str_reason,
+ help: Some(fluent::lint::improper_ctypes_str_help),
+ },
+
+ ty::Tuple(..) => FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_tuple_reason,
+ help: Some(fluent::lint::improper_ctypes_tuple_help),
+ },
+
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _)
+ if {
+ matches!(self.mode, CItemKind::Definition)
+ && ty.is_sized(self.cx.tcx.at(DUMMY_SP), self.cx.param_env)
+ } =>
+ {
+ FfiSafe
+ }
+
+ ty::RawPtr(ty::TypeAndMut { ty, .. })
+ if match ty.kind() {
+ ty::Tuple(tuple) => tuple.is_empty(),
+ _ => false,
+ } =>
+ {
+ FfiSafe
+ }
+
+ ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => {
+ self.check_type_for_ffi(cache, ty)
+ }
+
+ ty::Array(inner_ty, _) => self.check_type_for_ffi(cache, inner_ty),
+
+ ty::FnPtr(sig) => {
+ if self.is_internal_abi(sig.abi()) {
+ return FfiUnsafe {
+ ty,
+ reason: fluent::lint::improper_ctypes_fnptr_reason,
+ help: Some(fluent::lint::improper_ctypes_fnptr_help),
+ };
+ }
+
+ let sig = tcx.erase_late_bound_regions(sig);
+ if !sig.output().is_unit() {
+ let r = self.check_type_for_ffi(cache, sig.output());
+ match r {
+ FfiSafe => {}
+ _ => {
+ return r;
+ }
+ }
+ }
+ for arg in sig.inputs() {
+ let r = self.check_type_for_ffi(cache, *arg);
+ match r {
+ FfiSafe => {}
+ _ => {
+ return r;
+ }
+ }
+ }
+ FfiSafe
+ }
+
+ ty::Foreign(..) => FfiSafe,
+
+ // While opaque types are checked for earlier, if a projection in a struct field
+ // normalizes to an opaque type, then it will reach this branch.
+ ty::Opaque(..) => {
+ FfiUnsafe { ty, reason: fluent::lint::improper_ctypes_opaque, help: None }
+ }
+
+ // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe,
+ // so they are currently ignored for the purposes of this lint.
+ ty::Param(..) | ty::Projection(..) if matches!(self.mode, CItemKind::Definition) => {
+ FfiSafe
+ }
+
+ ty::Param(..)
+ | ty::Projection(..)
+ | ty::Infer(..)
+ | ty::Bound(..)
+ | ty::Error(_)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Placeholder(..)
+ | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty),
+ }
+ }
+
+ fn emit_ffi_unsafe_type_lint(
+ &mut self,
+ ty: Ty<'tcx>,
+ sp: Span,
+ note: DiagnosticMessage,
+ help: Option<DiagnosticMessage>,
+ ) {
+ let lint = match self.mode {
+ CItemKind::Declaration => IMPROPER_CTYPES,
+ CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS,
+ };
+
+ self.cx.struct_span_lint(lint, sp, |lint| {
+ let item_description = match self.mode {
+ CItemKind::Declaration => "block",
+ CItemKind::Definition => "fn",
+ };
+ let mut diag = lint.build(fluent::lint::improper_ctypes);
+ diag.set_arg("ty", ty);
+ diag.set_arg("desc", item_description);
+ diag.span_label(sp, fluent::lint::label);
+ if let Some(help) = help {
+ diag.help(help);
+ }
+ diag.note(note);
+ if let ty::Adt(def, _) = ty.kind() {
+ if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did()) {
+ diag.span_note(sp, fluent::lint::note);
+ }
+ }
+ diag.emit();
+ });
+ }
+
+ fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
+ struct ProhibitOpaqueTypes<'a, 'tcx> {
+ cx: &'a LateContext<'tcx>,
+ }
+
+ impl<'a, 'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueTypes<'a, 'tcx> {
+ type BreakTy = Ty<'tcx>;
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match ty.kind() {
+ ty::Opaque(..) => ControlFlow::Break(ty),
+ // Consider opaque types within projections FFI-safe if they do not normalize
+ // to more opaque types.
+ ty::Projection(..) => {
+ let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty);
+
+ // If `ty` is an opaque type directly then `super_visit_with` won't invoke
+ // this function again.
+ if ty.has_opaque_types() {
+ self.visit_ty(ty)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ _ => ty.super_visit_with(self),
+ }
+ }
+ }
+
+ if let Some(ty) = ty.visit_with(&mut ProhibitOpaqueTypes { cx: self.cx }).break_value() {
+ self.emit_ffi_unsafe_type_lint(ty, sp, fluent::lint::improper_ctypes_opaque, None);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn check_type_for_ffi_and_report_errors(
+ &mut self,
+ sp: Span,
+ ty: Ty<'tcx>,
+ is_static: bool,
+ is_return_type: bool,
+ ) {
+ // We have to check for opaque types before `normalize_erasing_regions`,
+ // which will replace opaque types with their underlying concrete type.
+ if self.check_for_opaque_ty(sp, ty) {
+ // We've already emitted an error due to an opaque type.
+ return;
+ }
+
+ // it is only OK to use this function because extern fns cannot have
+ // any generic types right now:
+ let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty);
+
+ // C doesn't really support passing arrays by value - the only way to pass an array by value
+ // is through a struct. So, first test that the top level isn't an array, and then
+ // recursively check the types inside.
+ if !is_static && self.check_for_array_ty(sp, ty) {
+ return;
+ }
+
+ // Don't report FFI errors for unit return types. This check exists here, and not in
+ // `check_foreign_fn` (where it would make more sense) so that normalization has definitely
+ // happened.
+ if is_return_type && ty.is_unit() {
+ return;
+ }
+
+ match self.check_type_for_ffi(&mut FxHashSet::default(), ty) {
+ FfiResult::FfiSafe => {}
+ FfiResult::FfiPhantom(ty) => {
+ self.emit_ffi_unsafe_type_lint(
+ ty,
+ sp,
+ fluent::lint::improper_ctypes_only_phantomdata,
+ None,
+ );
+ }
+ // If `ty` is a `repr(transparent)` newtype, and the non-zero-sized type is a generic
+ // argument, which after substitution, is `()`, then this branch can be hit.
+ FfiResult::FfiUnsafe { ty, .. } if is_return_type && ty.is_unit() => {}
+ FfiResult::FfiUnsafe { ty, reason, help } => {
+ self.emit_ffi_unsafe_type_lint(ty, sp, reason, help);
+ }
+ }
+ }
+
+ fn check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl<'_>) {
+ let def_id = self.cx.tcx.hir().local_def_id(id);
+ let sig = self.cx.tcx.fn_sig(def_id);
+ let sig = self.cx.tcx.erase_late_bound_regions(sig);
+
+ for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
+ self.check_type_for_ffi_and_report_errors(input_hir.span, *input_ty, false, false);
+ }
+
+ if let hir::FnRetTy::Return(ref ret_hir) = decl.output {
+ let ret_ty = sig.output();
+ self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty, false, true);
+ }
+ }
+
+ fn check_foreign_static(&mut self, id: hir::HirId, span: Span) {
+ let def_id = self.cx.tcx.hir().local_def_id(id);
+ let ty = self.cx.tcx.type_of(def_id);
+ self.check_type_for_ffi_and_report_errors(span, ty, true, false);
+ }
+
+ fn is_internal_abi(&self, abi: SpecAbi) -> bool {
+ matches!(
+ abi,
+ SpecAbi::Rust | SpecAbi::RustCall | SpecAbi::RustIntrinsic | SpecAbi::PlatformIntrinsic
+ )
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations {
+ fn check_foreign_item(&mut self, cx: &LateContext<'_>, it: &hir::ForeignItem<'_>) {
+ let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration };
+ let abi = cx.tcx.hir().get_foreign_abi(it.hir_id());
+
+ if !vis.is_internal_abi(abi) {
+ match it.kind {
+ hir::ForeignItemKind::Fn(ref decl, _, _) => {
+ vis.check_foreign_fn(it.hir_id(), decl);
+ }
+ hir::ForeignItemKind::Static(ref ty, _) => {
+ vis.check_foreign_static(it.hir_id(), ty.span);
+ }
+ hir::ForeignItemKind::Type => (),
+ }
+ }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions {
+ fn check_fn(
+ &mut self,
+ cx: &LateContext<'tcx>,
+ kind: hir::intravisit::FnKind<'tcx>,
+ decl: &'tcx hir::FnDecl<'_>,
+ _: &'tcx hir::Body<'_>,
+ _: Span,
+ hir_id: hir::HirId,
+ ) {
+ use hir::intravisit::FnKind;
+
+ let abi = match kind {
+ FnKind::ItemFn(_, _, header, ..) => header.abi,
+ FnKind::Method(_, sig, ..) => sig.header.abi,
+ _ => return,
+ };
+
+ let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition };
+ if !vis.is_internal_abi(abi) {
+ vis.check_foreign_fn(hir_id, decl);
+ }
+ }
+}
+
+declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
+
+impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
+ fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
+ if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
+ let t = cx.tcx.type_of(it.def_id);
+ let ty = cx.tcx.erase_regions(t);
+ let Ok(layout) = cx.layout_of(ty) else { return };
+ let Variants::Multiple {
+ tag_encoding: TagEncoding::Direct, tag, ref variants, ..
+ } = &layout.variants else {
+ return
+ };
+
+ let tag_size = tag.size(&cx.tcx).bytes();
+
+ debug!(
+ "enum `{}` is {} bytes large with layout:\n{:#?}",
+ t,
+ layout.size.bytes(),
+ layout
+ );
+
+ let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
+ .map(|(variant, variant_layout)| {
+ // Subtract the size of the enum tag.
+ let bytes = variant_layout.size().bytes().saturating_sub(tag_size);
+
+ debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
+ bytes
+ })
+ .enumerate()
+ .fold((0, 0, 0), |(l, s, li), (idx, size)| {
+ if size > l {
+ (size, l, idx)
+ } else if size > s {
+ (l, size, li)
+ } else {
+ (l, s, li)
+ }
+ });
+
+ // We only warn if the largest variant is at least thrice as large as
+ // the second-largest.
+ if largest > slargest * 3 && slargest > 0 {
+ cx.struct_span_lint(
+ VARIANT_SIZE_DIFFERENCES,
+ enum_definition.variants[largest_index].span,
+ |lint| {
+ lint.build(fluent::lint::variant_size_differences)
+ .set_arg("largest", largest)
+ .emit();
+ },
+ );
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `invalid_atomic_ordering` lint detects passing an `Ordering`
+ /// to an atomic operation that does not support that ordering.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// # use core::sync::atomic::{AtomicU8, Ordering};
+ /// let atom = AtomicU8::new(0);
+ /// let value = atom.load(Ordering::Release);
+ /// # let _ = value;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Some atomic operations are only supported for a subset of the
+ /// `atomic::Ordering` variants. Passing an unsupported variant will cause
+ /// an unconditional panic at runtime, which is detected by this lint.
+ ///
+ /// This lint will trigger in the following cases: (where `AtomicType` is an
+ /// atomic type from `core::sync::atomic`, such as `AtomicBool`,
+ /// `AtomicPtr`, `AtomicUsize`, or any of the other integer atomics).
+ ///
+ /// - Passing `Ordering::Acquire` or `Ordering::AcqRel` to
+ /// `AtomicType::store`.
+ ///
+ /// - Passing `Ordering::Release` or `Ordering::AcqRel` to
+ /// `AtomicType::load`.
+ ///
+ /// - Passing `Ordering::Relaxed` to `core::sync::atomic::fence` or
+ /// `core::sync::atomic::compiler_fence`.
+ ///
+ /// - Passing `Ordering::Release` or `Ordering::AcqRel` as the failure
+ /// ordering for any of `AtomicType::compare_exchange`,
+ /// `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`.
+ INVALID_ATOMIC_ORDERING,
+ Deny,
+ "usage of invalid atomic ordering in atomic operations and memory fences"
+}
+
+declare_lint_pass!(InvalidAtomicOrdering => [INVALID_ATOMIC_ORDERING]);
+
+impl InvalidAtomicOrdering {
+ fn inherent_atomic_method_call<'hir>(
+ cx: &LateContext<'_>,
+ expr: &Expr<'hir>,
+ recognized_names: &[Symbol], // used for fast path calculation
+ ) -> Option<(Symbol, &'hir [Expr<'hir>])> {
+ const ATOMIC_TYPES: &[Symbol] = &[
+ sym::AtomicBool,
+ sym::AtomicPtr,
+ sym::AtomicUsize,
+ sym::AtomicU8,
+ sym::AtomicU16,
+ sym::AtomicU32,
+ sym::AtomicU64,
+ sym::AtomicU128,
+ sym::AtomicIsize,
+ sym::AtomicI8,
+ sym::AtomicI16,
+ sym::AtomicI32,
+ sym::AtomicI64,
+ sym::AtomicI128,
+ ];
+ if let ExprKind::MethodCall(ref method_path, args, _) = &expr.kind
+ && recognized_names.contains(&method_path.ident.name)
+ && let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
+ && let Some(impl_did) = cx.tcx.impl_of_method(m_def_id)
+ && let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def()
+ // skip extension traits, only lint functions from the standard library
+ && cx.tcx.trait_id_of_impl(impl_did).is_none()
+ && let parent = cx.tcx.parent(adt.did())
+ && cx.tcx.is_diagnostic_item(sym::atomic_mod, parent)
+ && ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did()))
+ {
+ return Some((method_path.ident.name, args));
+ }
+ None
+ }
+
+ fn match_ordering(cx: &LateContext<'_>, ord_arg: &Expr<'_>) -> Option<Symbol> {
+ let ExprKind::Path(ref ord_qpath) = ord_arg.kind else { return None };
+ let did = cx.qpath_res(ord_qpath, ord_arg.hir_id).opt_def_id()?;
+ let tcx = cx.tcx;
+ let atomic_ordering = tcx.get_diagnostic_item(sym::Ordering);
+ let name = tcx.item_name(did);
+ let parent = tcx.parent(did);
+ [sym::Relaxed, sym::Release, sym::Acquire, sym::AcqRel, sym::SeqCst].into_iter().find(
+ |&ordering| {
+ name == ordering
+ && (Some(parent) == atomic_ordering
+ // needed in case this is a ctor, not a variant
+ || tcx.opt_parent(parent) == atomic_ordering)
+ },
+ )
+ }
+
+ fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
+ if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store])
+ && let Some((ordering_arg, invalid_ordering)) = match method {
+ sym::load => Some((&args[1], sym::Release)),
+ sym::store => Some((&args[2], sym::Acquire)),
+ _ => None,
+ }
+ && let Some(ordering) = Self::match_ordering(cx, ordering_arg)
+ && (ordering == invalid_ordering || ordering == sym::AcqRel)
+ {
+ cx.struct_span_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, |diag| {
+ if method == sym::load {
+ diag.build(fluent::lint::atomic_ordering_load)
+ .help(fluent::lint::help)
+ .emit()
+ } else {
+ debug_assert_eq!(method, sym::store);
+ diag.build(fluent::lint::atomic_ordering_store)
+ .help(fluent::lint::help)
+ .emit();
+ }
+ });
+ }
+ }
+
+ fn check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>) {
+ if let ExprKind::Call(ref func, ref args) = expr.kind
+ && let ExprKind::Path(ref func_qpath) = func.kind
+ && let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id()
+ && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence))
+ && Self::match_ordering(cx, &args[0]) == Some(sym::Relaxed)
+ {
+ cx.struct_span_lint(INVALID_ATOMIC_ORDERING, args[0].span, |diag| {
+ diag.build(fluent::lint::atomic_ordering_fence)
+ .help(fluent::lint::help)
+ .emit();
+ });
+ }
+ }
+
+ fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
+ let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak])
+ else {return };
+
+ let fail_order_arg = match method {
+ sym::fetch_update => &args[2],
+ sym::compare_exchange | sym::compare_exchange_weak => &args[4],
+ _ => return,
+ };
+
+ let Some(fail_ordering) = Self::match_ordering(cx, fail_order_arg) else { return };
+
+ if matches!(fail_ordering, sym::Release | sym::AcqRel) {
+ #[derive(LintDiagnostic)]
+ #[lint(lint::atomic_ordering_invalid)]
+ #[help]
+ struct InvalidAtomicOrderingDiag {
+ method: Symbol,
+ #[label]
+ fail_order_arg_span: Span,
+ }
+
+ cx.emit_spanned_lint(
+ INVALID_ATOMIC_ORDERING,
+ fail_order_arg.span,
+ InvalidAtomicOrderingDiag { method, fail_order_arg_span: fail_order_arg.span },
+ );
+ }
+ }
+}
+
+impl<'tcx> LateLintPass<'tcx> for InvalidAtomicOrdering {
+ fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
+ Self::check_atomic_load_store(cx, expr);
+ Self::check_memory_fence(cx, expr);
+ Self::check_atomic_compare_exchange(cx, expr);
+ }
+}
diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs
new file mode 100644
index 000000000..b6cf18291
--- /dev/null
+++ b/compiler/rustc_lint/src/unused.rs
@@ -0,0 +1,1197 @@
+use crate::Lint;
+use crate::{EarlyContext, EarlyLintPass, LateContext, LateLintPass, LintContext};
+use rustc_ast as ast;
+use rustc_ast::util::{classify, parser};
+use rustc_ast::{ExprKind, StmtKind};
+use rustc_errors::{fluent, pluralize, Applicability, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::adjustment;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::symbol::Symbol;
+use rustc_span::symbol::{kw, sym};
+use rustc_span::{BytePos, Span, DUMMY_SP};
+
+declare_lint! {
+ /// The `unused_must_use` lint detects unused result of a type flagged as
+ /// `#[must_use]`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// fn returns_result() -> Result<(), ()> {
+ /// Ok(())
+ /// }
+ ///
+ /// fn main() {
+ /// returns_result();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The `#[must_use]` attribute is an indicator that it is a mistake to
+ /// ignore the value. See [the reference] for more details.
+ ///
+ /// [the reference]: https://doc.rust-lang.org/reference/attributes/diagnostics.html#the-must_use-attribute
+ pub UNUSED_MUST_USE,
+ Warn,
+ "unused result of a type flagged as `#[must_use]`",
+ report_in_external_macro
+}
+
+declare_lint! {
+ /// The `unused_results` lint checks for the unused result of an
+ /// expression in a statement.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(unused_results)]
+ /// fn foo<T>() -> T { panic!() }
+ ///
+ /// fn main() {
+ /// foo::<usize>();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Ignoring the return value of a function may indicate a mistake. In
+ /// cases were it is almost certain that the result should be used, it is
+ /// recommended to annotate the function with the [`must_use` attribute].
+ /// Failure to use such a return value will trigger the [`unused_must_use`
+ /// lint] which is warn-by-default. The `unused_results` lint is
+ /// essentially the same, but triggers for *all* return values.
+ ///
+ /// This lint is "allow" by default because it can be noisy, and may not be
+ /// an actual problem. For example, calling the `remove` method of a `Vec`
+ /// or `HashMap` returns the previous value, which you may not care about.
+ /// Using this lint would require explicitly ignoring or discarding such
+ /// values.
+ ///
+ /// [`must_use` attribute]: https://doc.rust-lang.org/reference/attributes/diagnostics.html#the-must_use-attribute
+ /// [`unused_must_use` lint]: warn-by-default.html#unused-must-use
+ pub UNUSED_RESULTS,
+ Allow,
+ "unused result of an expression in a statement"
+}
+
+declare_lint_pass!(UnusedResults => [UNUSED_MUST_USE, UNUSED_RESULTS]);
+
+impl<'tcx> LateLintPass<'tcx> for UnusedResults {
+ fn check_stmt(&mut self, cx: &LateContext<'_>, s: &hir::Stmt<'_>) {
+ let expr = match s.kind {
+ hir::StmtKind::Semi(ref expr) => &**expr,
+ _ => return,
+ };
+
+ if let hir::ExprKind::Ret(..) = expr.kind {
+ return;
+ }
+
+ let ty = cx.typeck_results().expr_ty(&expr);
+ let type_permits_lack_of_use = check_must_use_ty(cx, ty, &expr, s.span, "", "", 1);
+
+ let mut fn_warned = false;
+ let mut op_warned = false;
+ let maybe_def_id = match expr.kind {
+ hir::ExprKind::Call(ref callee, _) => {
+ match callee.kind {
+ hir::ExprKind::Path(ref qpath) => {
+ match cx.qpath_res(qpath, callee.hir_id) {
+ Res::Def(DefKind::Fn | DefKind::AssocFn, def_id) => Some(def_id),
+ // `Res::Local` if it was a closure, for which we
+ // do not currently support must-use linting
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ }
+ hir::ExprKind::MethodCall(..) => cx.typeck_results().type_dependent_def_id(expr.hir_id),
+ _ => None,
+ };
+ if let Some(def_id) = maybe_def_id {
+ fn_warned = check_must_use_def(cx, def_id, s.span, "return value of ", "");
+ } else if type_permits_lack_of_use {
+ // We don't warn about unused unit or uninhabited types.
+ // (See https://github.com/rust-lang/rust/issues/43806 for details.)
+ return;
+ }
+
+ let must_use_op = match expr.kind {
+ // Hardcoding operators here seemed more expedient than the
+ // refactoring that would be needed to look up the `#[must_use]`
+ // attribute which does exist on the comparison trait methods
+ hir::ExprKind::Binary(bin_op, ..) => match bin_op.node {
+ hir::BinOpKind::Eq
+ | hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Ne
+ | hir::BinOpKind::Ge
+ | hir::BinOpKind::Gt => Some("comparison"),
+ hir::BinOpKind::Add
+ | hir::BinOpKind::Sub
+ | hir::BinOpKind::Div
+ | hir::BinOpKind::Mul
+ | hir::BinOpKind::Rem => Some("arithmetic operation"),
+ hir::BinOpKind::And | hir::BinOpKind::Or => Some("logical operation"),
+ hir::BinOpKind::BitXor
+ | hir::BinOpKind::BitAnd
+ | hir::BinOpKind::BitOr
+ | hir::BinOpKind::Shl
+ | hir::BinOpKind::Shr => Some("bitwise operation"),
+ },
+ hir::ExprKind::AddrOf(..) => Some("borrow"),
+ hir::ExprKind::Unary(..) => Some("unary operation"),
+ _ => None,
+ };
+
+ if let Some(must_use_op) = must_use_op {
+ cx.struct_span_lint(UNUSED_MUST_USE, expr.span, |lint| {
+ lint.build(fluent::lint::unused_op)
+ .set_arg("op", must_use_op)
+ .span_label(expr.span, fluent::lint::label)
+ .span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ fluent::lint::suggestion,
+ "let _ = ",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ op_warned = true;
+ }
+
+ if !(type_permits_lack_of_use || fn_warned || op_warned) {
+ cx.struct_span_lint(UNUSED_RESULTS, s.span, |lint| {
+ lint.build(fluent::lint::unused_result).set_arg("ty", ty).emit();
+ });
+ }
+
+ // Returns whether an error has been emitted (and thus another does not need to be later).
+ fn check_must_use_ty<'tcx>(
+ cx: &LateContext<'tcx>,
+ ty: Ty<'tcx>,
+ expr: &hir::Expr<'_>,
+ span: Span,
+ descr_pre: &str,
+ descr_post: &str,
+ plural_len: usize,
+ ) -> bool {
+ if ty.is_unit()
+ || cx.tcx.is_ty_uninhabited_from(
+ cx.tcx.parent_module(expr.hir_id).to_def_id(),
+ ty,
+ cx.param_env,
+ )
+ {
+ return true;
+ }
+
+ let plural_suffix = pluralize!(plural_len);
+
+ match *ty.kind() {
+ ty::Adt(..) if ty.is_box() => {
+ let boxed_ty = ty.boxed_ty();
+ let descr_pre = &format!("{}boxed ", descr_pre);
+ check_must_use_ty(cx, boxed_ty, expr, span, descr_pre, descr_post, plural_len)
+ }
+ ty::Adt(def, _) => check_must_use_def(cx, def.did(), span, descr_pre, descr_post),
+ ty::Opaque(def, _) => {
+ let mut has_emitted = false;
+ for &(predicate, _) in cx.tcx.explicit_item_bounds(def) {
+ // We only look at the `DefId`, so it is safe to skip the binder here.
+ if let ty::PredicateKind::Trait(ref poly_trait_predicate) =
+ predicate.kind().skip_binder()
+ {
+ let def_id = poly_trait_predicate.trait_ref.def_id;
+ let descr_pre =
+ &format!("{}implementer{} of ", descr_pre, plural_suffix,);
+ if check_must_use_def(cx, def_id, span, descr_pre, descr_post) {
+ has_emitted = true;
+ break;
+ }
+ }
+ }
+ has_emitted
+ }
+ ty::Dynamic(binder, _) => {
+ let mut has_emitted = false;
+ for predicate in binder.iter() {
+ if let ty::ExistentialPredicate::Trait(ref trait_ref) =
+ predicate.skip_binder()
+ {
+ let def_id = trait_ref.def_id;
+ let descr_post =
+ &format!(" trait object{}{}", plural_suffix, descr_post,);
+ if check_must_use_def(cx, def_id, span, descr_pre, descr_post) {
+ has_emitted = true;
+ break;
+ }
+ }
+ }
+ has_emitted
+ }
+ ty::Tuple(ref tys) => {
+ let mut has_emitted = false;
+ let comps = if let hir::ExprKind::Tup(comps) = expr.kind {
+ debug_assert_eq!(comps.len(), tys.len());
+ comps
+ } else {
+ &[]
+ };
+ for (i, ty) in tys.iter().enumerate() {
+ let descr_post = &format!(" in tuple element {}", i);
+ let e = comps.get(i).unwrap_or(expr);
+ let span = e.span;
+ if check_must_use_ty(cx, ty, e, span, descr_pre, descr_post, plural_len) {
+ has_emitted = true;
+ }
+ }
+ has_emitted
+ }
+ ty::Array(ty, len) => match len.try_eval_usize(cx.tcx, cx.param_env) {
+ // If the array is empty we don't lint, to avoid false positives
+ Some(0) | None => false,
+ // If the array is definitely non-empty, we can do `#[must_use]` checking.
+ Some(n) => {
+ let descr_pre = &format!("{}array{} of ", descr_pre, plural_suffix,);
+ check_must_use_ty(cx, ty, expr, span, descr_pre, descr_post, n as usize + 1)
+ }
+ },
+ ty::Closure(..) => {
+ cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
+ // FIXME(davidtwco): this isn't properly translatable becauses of the
+ // pre/post strings
+ lint.build(fluent::lint::unused_closure)
+ .set_arg("count", plural_len)
+ .set_arg("pre", descr_pre)
+ .set_arg("post", descr_post)
+ .note(fluent::lint::note)
+ .emit();
+ });
+ true
+ }
+ ty::Generator(..) => {
+ cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
+ // FIXME(davidtwco): this isn't properly translatable becauses of the
+ // pre/post strings
+ lint.build(fluent::lint::unused_generator)
+ .set_arg("count", plural_len)
+ .set_arg("pre", descr_pre)
+ .set_arg("post", descr_post)
+ .note(fluent::lint::note)
+ .emit();
+ });
+ true
+ }
+ _ => false,
+ }
+ }
+
+ // Returns whether an error has been emitted (and thus another does not need to be later).
+ // FIXME: Args desc_{pre,post}_path could be made lazy by taking Fn() -> &str, but this
+ // would make calling it a big awkward. Could also take String (so args are moved), but
+ // this would still require a copy into the format string, which would only be executed
+ // when needed.
+ fn check_must_use_def(
+ cx: &LateContext<'_>,
+ def_id: DefId,
+ span: Span,
+ descr_pre_path: &str,
+ descr_post_path: &str,
+ ) -> bool {
+ if let Some(attr) = cx.tcx.get_attr(def_id, sym::must_use) {
+ cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
+ // FIXME(davidtwco): this isn't properly translatable becauses of the pre/post
+ // strings
+ let mut err = lint.build(fluent::lint::unused_def);
+ err.set_arg("pre", descr_pre_path);
+ err.set_arg("post", descr_post_path);
+ err.set_arg("def", cx.tcx.def_path_str(def_id));
+ // check for #[must_use = "..."]
+ if let Some(note) = attr.value_str() {
+ err.note(note.as_str());
+ }
+ err.emit();
+ });
+ true
+ } else {
+ false
+ }
+ }
+ }
+}
+
+declare_lint! {
+ /// The `path_statements` lint detects path statements with no effect.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let x = 42;
+ ///
+ /// x;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is usually a mistake to have a statement that has no effect.
+ pub PATH_STATEMENTS,
+ Warn,
+ "path statements with no effect"
+}
+
+declare_lint_pass!(PathStatements => [PATH_STATEMENTS]);
+
+impl<'tcx> LateLintPass<'tcx> for PathStatements {
+ fn check_stmt(&mut self, cx: &LateContext<'_>, s: &hir::Stmt<'_>) {
+ if let hir::StmtKind::Semi(expr) = s.kind {
+ if let hir::ExprKind::Path(_) = expr.kind {
+ cx.struct_span_lint(PATH_STATEMENTS, s.span, |lint| {
+ let ty = cx.typeck_results().expr_ty(expr);
+ if ty.needs_drop(cx.tcx, cx.param_env) {
+ let mut lint = lint.build(fluent::lint::path_statement_drop);
+ if let Ok(snippet) = cx.sess().source_map().span_to_snippet(expr.span) {
+ lint.span_suggestion(
+ s.span,
+ fluent::lint::suggestion,
+ format!("drop({});", snippet),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ lint.span_help(s.span, fluent::lint::suggestion);
+ }
+ lint.emit();
+ } else {
+ lint.build(fluent::lint::path_statement_no_effect).emit();
+ }
+ });
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum UnusedDelimsCtx {
+ FunctionArg,
+ MethodArg,
+ AssignedValue,
+ AssignedValueLetElse,
+ IfCond,
+ WhileCond,
+ ForIterExpr,
+ MatchScrutineeExpr,
+ ReturnValue,
+ BlockRetValue,
+ LetScrutineeExpr,
+ ArrayLenExpr,
+ AnonConst,
+ MatchArmExpr,
+}
+
+impl From<UnusedDelimsCtx> for &'static str {
+ fn from(ctx: UnusedDelimsCtx) -> &'static str {
+ match ctx {
+ UnusedDelimsCtx::FunctionArg => "function argument",
+ UnusedDelimsCtx::MethodArg => "method argument",
+ UnusedDelimsCtx::AssignedValue | UnusedDelimsCtx::AssignedValueLetElse => {
+ "assigned value"
+ }
+ UnusedDelimsCtx::IfCond => "`if` condition",
+ UnusedDelimsCtx::WhileCond => "`while` condition",
+ UnusedDelimsCtx::ForIterExpr => "`for` iterator expression",
+ UnusedDelimsCtx::MatchScrutineeExpr => "`match` scrutinee expression",
+ UnusedDelimsCtx::ReturnValue => "`return` value",
+ UnusedDelimsCtx::BlockRetValue => "block return value",
+ UnusedDelimsCtx::LetScrutineeExpr => "`let` scrutinee expression",
+ UnusedDelimsCtx::ArrayLenExpr | UnusedDelimsCtx::AnonConst => "const expression",
+ UnusedDelimsCtx::MatchArmExpr => "match arm expression",
+ }
+ }
+}
+
+/// Used by both `UnusedParens` and `UnusedBraces` to prevent code duplication.
+trait UnusedDelimLint {
+ const DELIM_STR: &'static str;
+
+ /// Due to `ref` pattern, there can be a difference between using
+ /// `{ expr }` and `expr` in pattern-matching contexts. This means
+ /// that we should only lint `unused_parens` and not `unused_braces`
+ /// in this case.
+ ///
+ /// ```rust
+ /// let mut a = 7;
+ /// let ref b = { a }; // We actually borrow a copy of `a` here.
+ /// a += 1; // By mutating `a` we invalidate any borrows of `a`.
+ /// assert_eq!(b + 1, a); // `b` does not borrow `a`, so we can still use it here.
+ /// ```
+ const LINT_EXPR_IN_PATTERN_MATCHING_CTX: bool;
+
+ // this cannot be a constant is it refers to a static.
+ fn lint(&self) -> &'static Lint;
+
+ fn check_unused_delims_expr(
+ &self,
+ cx: &EarlyContext<'_>,
+ value: &ast::Expr,
+ ctx: UnusedDelimsCtx,
+ followed_by_block: bool,
+ left_pos: Option<BytePos>,
+ right_pos: Option<BytePos>,
+ );
+
+ fn is_expr_delims_necessary(
+ inner: &ast::Expr,
+ followed_by_block: bool,
+ followed_by_else: bool,
+ ) -> bool {
+ if followed_by_else {
+ match inner.kind {
+ ast::ExprKind::Binary(op, ..) if op.node.lazy() => return true,
+ _ if classify::expr_trailing_brace(inner).is_some() => return true,
+ _ => {}
+ }
+ }
+
+ // Prevent false-positives in cases like `fn x() -> u8 { ({ 0 } + 1) }`
+ let lhs_needs_parens = {
+ let mut innermost = inner;
+ loop {
+ innermost = match &innermost.kind {
+ ExprKind::Binary(_, lhs, _rhs) => lhs,
+ ExprKind::Call(fn_, _params) => fn_,
+ ExprKind::Cast(expr, _ty) => expr,
+ ExprKind::Type(expr, _ty) => expr,
+ ExprKind::Index(base, _subscript) => base,
+ _ => break false,
+ };
+ if !classify::expr_requires_semi_to_be_stmt(innermost) {
+ break true;
+ }
+ }
+ };
+
+ lhs_needs_parens
+ || (followed_by_block
+ && match &inner.kind {
+ ExprKind::Ret(_) | ExprKind::Break(..) | ExprKind::Yield(..) => true,
+ ExprKind::Range(_lhs, Some(rhs), _limits) => {
+ matches!(rhs.kind, ExprKind::Block(..))
+ }
+ _ => parser::contains_exterior_struct_lit(&inner),
+ })
+ }
+
+ fn emit_unused_delims_expr(
+ &self,
+ cx: &EarlyContext<'_>,
+ value: &ast::Expr,
+ ctx: UnusedDelimsCtx,
+ left_pos: Option<BytePos>,
+ right_pos: Option<BytePos>,
+ ) {
+ let spans = match value.kind {
+ ast::ExprKind::Block(ref block, None) if block.stmts.len() > 0 => {
+ let start = block.stmts[0].span;
+ let end = block.stmts[block.stmts.len() - 1].span;
+ if value.span.from_expansion() || start.from_expansion() || end.from_expansion() {
+ (
+ value.span.with_hi(value.span.lo() + BytePos(1)),
+ value.span.with_lo(value.span.hi() - BytePos(1)),
+ )
+ } else {
+ (value.span.with_hi(start.lo()), value.span.with_lo(end.hi()))
+ }
+ }
+ ast::ExprKind::Paren(ref expr) => {
+ if value.span.from_expansion() || expr.span.from_expansion() {
+ (
+ value.span.with_hi(value.span.lo() + BytePos(1)),
+ value.span.with_lo(value.span.hi() - BytePos(1)),
+ )
+ } else {
+ (value.span.with_hi(expr.span.lo()), value.span.with_lo(expr.span.hi()))
+ }
+ }
+ _ => return,
+ };
+ let keep_space = (
+ left_pos.map_or(false, |s| s >= value.span.lo()),
+ right_pos.map_or(false, |s| s <= value.span.hi()),
+ );
+ self.emit_unused_delims(cx, spans, ctx.into(), keep_space);
+ }
+
+ fn emit_unused_delims(
+ &self,
+ cx: &EarlyContext<'_>,
+ spans: (Span, Span),
+ msg: &str,
+ keep_space: (bool, bool),
+ ) {
+ // FIXME(flip1995): Quick and dirty fix for #70814. This should be fixed in rustdoc
+ // properly.
+ if spans.0 == DUMMY_SP || spans.1 == DUMMY_SP {
+ return;
+ }
+
+ cx.struct_span_lint(self.lint(), MultiSpan::from(vec![spans.0, spans.1]), |lint| {
+ let replacement = vec![
+ (spans.0, if keep_space.0 { " ".into() } else { "".into() }),
+ (spans.1, if keep_space.1 { " ".into() } else { "".into() }),
+ ];
+ lint.build(fluent::lint::unused_delim)
+ .set_arg("delim", Self::DELIM_STR)
+ .set_arg("item", msg)
+ .multipart_suggestion(
+ fluent::lint::suggestion,
+ replacement,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+
+ fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
+ use rustc_ast::ExprKind::*;
+ let (value, ctx, followed_by_block, left_pos, right_pos) = match e.kind {
+ // Do not lint `unused_braces` in `if let` expressions.
+ If(ref cond, ref block, _)
+ if !matches!(cond.kind, Let(_, _, _))
+ || Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX =>
+ {
+ let left = e.span.lo() + rustc_span::BytePos(2);
+ let right = block.span.lo();
+ (cond, UnusedDelimsCtx::IfCond, true, Some(left), Some(right))
+ }
+
+ // Do not lint `unused_braces` in `while let` expressions.
+ While(ref cond, ref block, ..)
+ if !matches!(cond.kind, Let(_, _, _))
+ || Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX =>
+ {
+ let left = e.span.lo() + rustc_span::BytePos(5);
+ let right = block.span.lo();
+ (cond, UnusedDelimsCtx::WhileCond, true, Some(left), Some(right))
+ }
+
+ ForLoop(_, ref cond, ref block, ..) => {
+ (cond, UnusedDelimsCtx::ForIterExpr, true, None, Some(block.span.lo()))
+ }
+
+ Match(ref head, _) if Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX => {
+ let left = e.span.lo() + rustc_span::BytePos(5);
+ (head, UnusedDelimsCtx::MatchScrutineeExpr, true, Some(left), None)
+ }
+
+ Ret(Some(ref value)) => {
+ let left = e.span.lo() + rustc_span::BytePos(3);
+ (value, UnusedDelimsCtx::ReturnValue, false, Some(left), None)
+ }
+
+ Assign(_, ref value, _) | AssignOp(.., ref value) => {
+ (value, UnusedDelimsCtx::AssignedValue, false, None, None)
+ }
+ // either function/method call, or something this lint doesn't care about
+ ref call_or_other => {
+ let (args_to_check, ctx) = match *call_or_other {
+ Call(_, ref args) => (&args[..], UnusedDelimsCtx::FunctionArg),
+ // first "argument" is self (which sometimes needs delims)
+ MethodCall(_, ref args, _) => (&args[1..], UnusedDelimsCtx::MethodArg),
+ // actual catch-all arm
+ _ => {
+ return;
+ }
+ };
+ // Don't lint if this is a nested macro expansion: otherwise, the lint could
+ // trigger in situations that macro authors shouldn't have to care about, e.g.,
+ // when a parenthesized token tree matched in one macro expansion is matched as
+ // an expression in another and used as a fn/method argument (Issue #47775)
+ if e.span.ctxt().outer_expn_data().call_site.from_expansion() {
+ return;
+ }
+ for arg in args_to_check {
+ self.check_unused_delims_expr(cx, arg, ctx, false, None, None);
+ }
+ return;
+ }
+ };
+ self.check_unused_delims_expr(cx, &value, ctx, followed_by_block, left_pos, right_pos);
+ }
+
+ fn check_stmt(&mut self, cx: &EarlyContext<'_>, s: &ast::Stmt) {
+ match s.kind {
+ StmtKind::Local(ref local) if Self::LINT_EXPR_IN_PATTERN_MATCHING_CTX => {
+ if let Some((init, els)) = local.kind.init_else_opt() {
+ let ctx = match els {
+ None => UnusedDelimsCtx::AssignedValue,
+ Some(_) => UnusedDelimsCtx::AssignedValueLetElse,
+ };
+ self.check_unused_delims_expr(cx, init, ctx, false, None, None);
+ }
+ }
+ StmtKind::Expr(ref expr) => {
+ self.check_unused_delims_expr(
+ cx,
+ &expr,
+ UnusedDelimsCtx::BlockRetValue,
+ false,
+ None,
+ None,
+ );
+ }
+ _ => {}
+ }
+ }
+
+ fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) {
+ use ast::ItemKind::*;
+
+ if let Const(.., Some(expr)) | Static(.., Some(expr)) = &item.kind {
+ self.check_unused_delims_expr(
+ cx,
+ expr,
+ UnusedDelimsCtx::AssignedValue,
+ false,
+ None,
+ None,
+ );
+ }
+ }
+}
+
+declare_lint! {
+ /// The `unused_parens` lint detects `if`, `match`, `while` and `return`
+ /// with parentheses; they do not need them.
+ ///
+ /// ### Examples
+ ///
+ /// ```rust
+ /// if(true) {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The parentheses are not needed, and should be removed. This is the
+ /// preferred style for writing these expressions.
+ pub(super) UNUSED_PARENS,
+ Warn,
+ "`if`, `match`, `while` and `return` do not need parentheses"
+}
+
+declare_lint_pass!(UnusedParens => [UNUSED_PARENS]);
+
+impl UnusedDelimLint for UnusedParens {
+ const DELIM_STR: &'static str = "parentheses";
+
+ const LINT_EXPR_IN_PATTERN_MATCHING_CTX: bool = true;
+
+ fn lint(&self) -> &'static Lint {
+ UNUSED_PARENS
+ }
+
+ fn check_unused_delims_expr(
+ &self,
+ cx: &EarlyContext<'_>,
+ value: &ast::Expr,
+ ctx: UnusedDelimsCtx,
+ followed_by_block: bool,
+ left_pos: Option<BytePos>,
+ right_pos: Option<BytePos>,
+ ) {
+ match value.kind {
+ ast::ExprKind::Paren(ref inner) => {
+ let followed_by_else = ctx == UnusedDelimsCtx::AssignedValueLetElse;
+ if !Self::is_expr_delims_necessary(inner, followed_by_block, followed_by_else)
+ && value.attrs.is_empty()
+ && !value.span.from_expansion()
+ && (ctx != UnusedDelimsCtx::LetScrutineeExpr
+ || !matches!(inner.kind, ast::ExprKind::Binary(
+ rustc_span::source_map::Spanned { node, .. },
+ _,
+ _,
+ ) if node.lazy()))
+ {
+ self.emit_unused_delims_expr(cx, value, ctx, left_pos, right_pos)
+ }
+ }
+ ast::ExprKind::Let(_, ref expr, _) => {
+ self.check_unused_delims_expr(
+ cx,
+ expr,
+ UnusedDelimsCtx::LetScrutineeExpr,
+ followed_by_block,
+ None,
+ None,
+ );
+ }
+ _ => {}
+ }
+ }
+}
+
+impl UnusedParens {
+ fn check_unused_parens_pat(
+ &self,
+ cx: &EarlyContext<'_>,
+ value: &ast::Pat,
+ avoid_or: bool,
+ avoid_mut: bool,
+ ) {
+ use ast::{BindingMode, Mutability, PatKind};
+
+ if let PatKind::Paren(inner) = &value.kind {
+ match inner.kind {
+ // The lint visitor will visit each subpattern of `p`. We do not want to lint
+ // any range pattern no matter where it occurs in the pattern. For something like
+ // `&(a..=b)`, there is a recursive `check_pat` on `a` and `b`, but we will assume
+ // that if there are unnecessary parens they serve a purpose of readability.
+ PatKind::Range(..) => return,
+ // Avoid `p0 | .. | pn` if we should.
+ PatKind::Or(..) if avoid_or => return,
+ // Avoid `mut x` and `mut x @ p` if we should:
+ PatKind::Ident(BindingMode::ByValue(Mutability::Mut), ..) if avoid_mut => return,
+ // Otherwise proceed with linting.
+ _ => {}
+ }
+ let spans = if value.span.from_expansion() || inner.span.from_expansion() {
+ (
+ value.span.with_hi(value.span.lo() + BytePos(1)),
+ value.span.with_lo(value.span.hi() - BytePos(1)),
+ )
+ } else {
+ (value.span.with_hi(inner.span.lo()), value.span.with_lo(inner.span.hi()))
+ };
+ self.emit_unused_delims(cx, spans, "pattern", (false, false));
+ }
+ }
+}
+
+impl EarlyLintPass for UnusedParens {
+ fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
+ match e.kind {
+ ExprKind::Let(ref pat, _, _) | ExprKind::ForLoop(ref pat, ..) => {
+ self.check_unused_parens_pat(cx, pat, false, false);
+ }
+ // We ignore parens in cases like `if (((let Some(0) = Some(1))))` because we already
+ // handle a hard error for them during AST lowering in `lower_expr_mut`, but we still
+ // want to complain about things like `if let 42 = (42)`.
+ ExprKind::If(ref cond, ref block, ref else_)
+ if matches!(cond.peel_parens().kind, ExprKind::Let(..)) =>
+ {
+ self.check_unused_delims_expr(
+ cx,
+ cond.peel_parens(),
+ UnusedDelimsCtx::LetScrutineeExpr,
+ true,
+ None,
+ None,
+ );
+ for stmt in &block.stmts {
+ <Self as UnusedDelimLint>::check_stmt(self, cx, stmt);
+ }
+ if let Some(e) = else_ {
+ <Self as UnusedDelimLint>::check_expr(self, cx, e);
+ }
+ return;
+ }
+ ExprKind::Match(ref _expr, ref arm) => {
+ for a in arm {
+ self.check_unused_delims_expr(
+ cx,
+ &a.body,
+ UnusedDelimsCtx::MatchArmExpr,
+ false,
+ None,
+ None,
+ );
+ }
+ }
+ _ => {}
+ }
+
+ <Self as UnusedDelimLint>::check_expr(self, cx, e)
+ }
+
+ fn check_pat(&mut self, cx: &EarlyContext<'_>, p: &ast::Pat) {
+ use ast::{Mutability, PatKind::*};
+ match &p.kind {
+ // Do not lint on `(..)` as that will result in the other arms being useless.
+ Paren(_)
+ // The other cases do not contain sub-patterns.
+ | Wild | Rest | Lit(..) | MacCall(..) | Range(..) | Ident(.., None) | Path(..) => {},
+ // These are list-like patterns; parens can always be removed.
+ TupleStruct(_, _, ps) | Tuple(ps) | Slice(ps) | Or(ps) => for p in ps {
+ self.check_unused_parens_pat(cx, p, false, false);
+ },
+ Struct(_, _, fps, _) => for f in fps {
+ self.check_unused_parens_pat(cx, &f.pat, false, false);
+ },
+ // Avoid linting on `i @ (p0 | .. | pn)` and `box (p0 | .. | pn)`, #64106.
+ Ident(.., Some(p)) | Box(p) => self.check_unused_parens_pat(cx, p, true, false),
+ // Avoid linting on `&(mut x)` as `&mut x` has a different meaning, #55342.
+ // Also avoid linting on `& mut? (p0 | .. | pn)`, #64106.
+ Ref(p, m) => self.check_unused_parens_pat(cx, p, true, *m == Mutability::Not),
+ }
+ }
+
+ fn check_stmt(&mut self, cx: &EarlyContext<'_>, s: &ast::Stmt) {
+ if let StmtKind::Local(ref local) = s.kind {
+ self.check_unused_parens_pat(cx, &local.pat, true, false);
+ }
+
+ <Self as UnusedDelimLint>::check_stmt(self, cx, s)
+ }
+
+ fn check_param(&mut self, cx: &EarlyContext<'_>, param: &ast::Param) {
+ self.check_unused_parens_pat(cx, &param.pat, true, false);
+ }
+
+ fn check_arm(&mut self, cx: &EarlyContext<'_>, arm: &ast::Arm) {
+ self.check_unused_parens_pat(cx, &arm.pat, false, false);
+ }
+
+ fn check_ty(&mut self, cx: &EarlyContext<'_>, ty: &ast::Ty) {
+ if let ast::TyKind::Paren(r) = &ty.kind {
+ match &r.kind {
+ ast::TyKind::TraitObject(..) => {}
+ ast::TyKind::ImplTrait(_, bounds) if bounds.len() > 1 => {}
+ ast::TyKind::Array(_, len) => {
+ self.check_unused_delims_expr(
+ cx,
+ &len.value,
+ UnusedDelimsCtx::ArrayLenExpr,
+ false,
+ None,
+ None,
+ );
+ }
+ _ => {
+ let spans = if ty.span.from_expansion() || r.span.from_expansion() {
+ (
+ ty.span.with_hi(ty.span.lo() + BytePos(1)),
+ ty.span.with_lo(ty.span.hi() - BytePos(1)),
+ )
+ } else {
+ (ty.span.with_hi(r.span.lo()), ty.span.with_lo(r.span.hi()))
+ };
+ self.emit_unused_delims(cx, spans, "type", (false, false));
+ }
+ }
+ }
+ }
+
+ fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) {
+ <Self as UnusedDelimLint>::check_item(self, cx, item)
+ }
+}
+
+declare_lint! {
+ /// The `unused_braces` lint detects unnecessary braces around an
+ /// expression.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// if { true } {
+ /// // ...
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The braces are not needed, and should be removed. This is the
+ /// preferred style for writing these expressions.
+ pub(super) UNUSED_BRACES,
+ Warn,
+ "unnecessary braces around an expression"
+}
+
+declare_lint_pass!(UnusedBraces => [UNUSED_BRACES]);
+
+impl UnusedDelimLint for UnusedBraces {
+ const DELIM_STR: &'static str = "braces";
+
+ const LINT_EXPR_IN_PATTERN_MATCHING_CTX: bool = false;
+
+ fn lint(&self) -> &'static Lint {
+ UNUSED_BRACES
+ }
+
+ fn check_unused_delims_expr(
+ &self,
+ cx: &EarlyContext<'_>,
+ value: &ast::Expr,
+ ctx: UnusedDelimsCtx,
+ followed_by_block: bool,
+ left_pos: Option<BytePos>,
+ right_pos: Option<BytePos>,
+ ) {
+ match value.kind {
+ ast::ExprKind::Block(ref inner, None)
+ if inner.rules == ast::BlockCheckMode::Default =>
+ {
+ // emit a warning under the following conditions:
+ //
+ // - the block does not have a label
+ // - the block is not `unsafe`
+ // - the block contains exactly one expression (do not lint `{ expr; }`)
+ // - `followed_by_block` is true and the internal expr may contain a `{`
+ // - the block is not multiline (do not lint multiline match arms)
+ // ```
+ // match expr {
+ // Pattern => {
+ // somewhat_long_expression
+ // }
+ // // ...
+ // }
+ // ```
+ // - the block has no attribute and was not created inside a macro
+ // - if the block is an `anon_const`, the inner expr must be a literal
+ // (do not lint `struct A<const N: usize>; let _: A<{ 2 + 3 }>;`)
+ //
+ // FIXME(const_generics): handle paths when #67075 is fixed.
+ if let [stmt] = inner.stmts.as_slice() {
+ if let ast::StmtKind::Expr(ref expr) = stmt.kind {
+ if !Self::is_expr_delims_necessary(expr, followed_by_block, false)
+ && (ctx != UnusedDelimsCtx::AnonConst
+ || matches!(expr.kind, ast::ExprKind::Lit(_)))
+ && !cx.sess().source_map().is_multiline(value.span)
+ && value.attrs.is_empty()
+ && !value.span.from_expansion()
+ {
+ self.emit_unused_delims_expr(cx, value, ctx, left_pos, right_pos)
+ }
+ }
+ }
+ }
+ ast::ExprKind::Let(_, ref expr, _) => {
+ self.check_unused_delims_expr(
+ cx,
+ expr,
+ UnusedDelimsCtx::LetScrutineeExpr,
+ followed_by_block,
+ None,
+ None,
+ );
+ }
+ _ => {}
+ }
+ }
+}
+
+impl EarlyLintPass for UnusedBraces {
+ fn check_stmt(&mut self, cx: &EarlyContext<'_>, s: &ast::Stmt) {
+ <Self as UnusedDelimLint>::check_stmt(self, cx, s)
+ }
+
+ fn check_expr(&mut self, cx: &EarlyContext<'_>, e: &ast::Expr) {
+ <Self as UnusedDelimLint>::check_expr(self, cx, e);
+
+ if let ExprKind::Repeat(_, ref anon_const) = e.kind {
+ self.check_unused_delims_expr(
+ cx,
+ &anon_const.value,
+ UnusedDelimsCtx::AnonConst,
+ false,
+ None,
+ None,
+ );
+ }
+ }
+
+ fn check_generic_arg(&mut self, cx: &EarlyContext<'_>, arg: &ast::GenericArg) {
+ if let ast::GenericArg::Const(ct) = arg {
+ self.check_unused_delims_expr(
+ cx,
+ &ct.value,
+ UnusedDelimsCtx::AnonConst,
+ false,
+ None,
+ None,
+ );
+ }
+ }
+
+ fn check_variant(&mut self, cx: &EarlyContext<'_>, v: &ast::Variant) {
+ if let Some(anon_const) = &v.disr_expr {
+ self.check_unused_delims_expr(
+ cx,
+ &anon_const.value,
+ UnusedDelimsCtx::AnonConst,
+ false,
+ None,
+ None,
+ );
+ }
+ }
+
+ fn check_ty(&mut self, cx: &EarlyContext<'_>, ty: &ast::Ty) {
+ match ty.kind {
+ ast::TyKind::Array(_, ref len) => {
+ self.check_unused_delims_expr(
+ cx,
+ &len.value,
+ UnusedDelimsCtx::ArrayLenExpr,
+ false,
+ None,
+ None,
+ );
+ }
+
+ ast::TyKind::Typeof(ref anon_const) => {
+ self.check_unused_delims_expr(
+ cx,
+ &anon_const.value,
+ UnusedDelimsCtx::AnonConst,
+ false,
+ None,
+ None,
+ );
+ }
+
+ _ => {}
+ }
+ }
+
+ fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) {
+ <Self as UnusedDelimLint>::check_item(self, cx, item)
+ }
+}
+
+declare_lint! {
+ /// The `unused_import_braces` lint catches unnecessary braces around an
+ /// imported item.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(unused_import_braces)]
+ /// use test::{A};
+ ///
+ /// pub mod test {
+ /// pub struct A;
+ /// }
+ /// # fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// If there is only a single item, then remove the braces (`use test::A;`
+ /// for example).
+ ///
+ /// This lint is "allow" by default because it is only enforcing a
+ /// stylistic choice.
+ UNUSED_IMPORT_BRACES,
+ Allow,
+ "unnecessary braces around an imported item"
+}
+
+declare_lint_pass!(UnusedImportBraces => [UNUSED_IMPORT_BRACES]);
+
+impl UnusedImportBraces {
+ fn check_use_tree(&self, cx: &EarlyContext<'_>, use_tree: &ast::UseTree, item: &ast::Item) {
+ if let ast::UseTreeKind::Nested(ref items) = use_tree.kind {
+ // Recursively check nested UseTrees
+ for &(ref tree, _) in items {
+ self.check_use_tree(cx, tree, item);
+ }
+
+ // Trigger the lint only if there is one nested item
+ if items.len() != 1 {
+ return;
+ }
+
+ // Trigger the lint if the nested item is a non-self single item
+ let node_name = match items[0].0.kind {
+ ast::UseTreeKind::Simple(rename, ..) => {
+ let orig_ident = items[0].0.prefix.segments.last().unwrap().ident;
+ if orig_ident.name == kw::SelfLower {
+ return;
+ }
+ rename.unwrap_or(orig_ident).name
+ }
+ ast::UseTreeKind::Glob => Symbol::intern("*"),
+ ast::UseTreeKind::Nested(_) => return,
+ };
+
+ cx.struct_span_lint(UNUSED_IMPORT_BRACES, item.span, |lint| {
+ lint.build(fluent::lint::unused_import_braces).set_arg("node", node_name).emit();
+ });
+ }
+ }
+}
+
+impl EarlyLintPass for UnusedImportBraces {
+ fn check_item(&mut self, cx: &EarlyContext<'_>, item: &ast::Item) {
+ if let ast::ItemKind::Use(ref use_tree) = item.kind {
+ self.check_use_tree(cx, use_tree, item);
+ }
+ }
+}
+
+declare_lint! {
+ /// The `unused_allocation` lint detects unnecessary allocations that can
+ /// be eliminated.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(box_syntax)]
+ /// fn main() {
+ /// let a = (box [1, 2, 3]).len();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// When a `box` expression is immediately coerced to a reference, then
+ /// the allocation is unnecessary, and a reference (using `&` or `&mut`)
+ /// should be used instead to avoid the allocation.
+ pub(super) UNUSED_ALLOCATION,
+ Warn,
+ "detects unnecessary allocations that can be eliminated"
+}
+
+declare_lint_pass!(UnusedAllocation => [UNUSED_ALLOCATION]);
+
+impl<'tcx> LateLintPass<'tcx> for UnusedAllocation {
+ fn check_expr(&mut self, cx: &LateContext<'_>, e: &hir::Expr<'_>) {
+ match e.kind {
+ hir::ExprKind::Box(_) => {}
+ _ => return,
+ }
+
+ for adj in cx.typeck_results().expr_adjustments(e) {
+ if let adjustment::Adjust::Borrow(adjustment::AutoBorrow::Ref(_, m)) = adj.kind {
+ cx.struct_span_lint(UNUSED_ALLOCATION, e.span, |lint| {
+ lint.build(match m {
+ adjustment::AutoBorrowMutability::Not => fluent::lint::unused_allocation,
+ adjustment::AutoBorrowMutability::Mut { .. } => {
+ fluent::lint::unused_allocation_mut
+ }
+ })
+ .emit();
+ });
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_lint_defs/Cargo.toml b/compiler/rustc_lint_defs/Cargo.toml
new file mode 100644
index 000000000..2bf34d82f
--- /dev/null
+++ b/compiler/rustc_lint_defs/Cargo.toml
@@ -0,0 +1,15 @@
+[package]
+name = "rustc_lint_defs"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+serde = { version = "1.0.125", features = ["derive"] }
+rustc_ast = { path = "../rustc_ast" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_error_messages = { path = "../rustc_error_messages" }
+rustc_span = { path = "../rustc_span" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_target = { path = "../rustc_target" }
+rustc_hir = { path = "../rustc_hir" }
diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs
new file mode 100644
index 000000000..f00165cd3
--- /dev/null
+++ b/compiler/rustc_lint_defs/src/builtin.rs
@@ -0,0 +1,4056 @@
+//! Some lints that are built in to the compiler.
+//!
+//! These are the built-in lints that are emitted direct in the main
+//! compiler code, rather than using their own custom pass. Those
+//! lints are all available in `rustc_lint::builtin`.
+
+use crate::{declare_lint, declare_lint_pass, FutureIncompatibilityReason};
+use rustc_span::edition::Edition;
+use rustc_span::symbol::sym;
+
+declare_lint! {
+ /// The `forbidden_lint_groups` lint detects violations of
+ /// `forbid` applied to a lint group. Due to a bug in the compiler,
+ /// these used to be overlooked entirely. They now generate a warning.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![forbid(warnings)]
+ /// #![deny(bad_style)]
+ ///
+ /// fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Recommended fix
+ ///
+ /// If your crate is using `#![forbid(warnings)]`,
+ /// we recommend that you change to `#![deny(warnings)]`.
+ ///
+ /// ### Explanation
+ ///
+ /// Due to a compiler bug, applying `forbid` to lint groups
+ /// previously had no effect. The bug is now fixed but instead of
+ /// enforcing `forbid` we issue this future-compatibility warning
+ /// to avoid breaking existing crates.
+ pub FORBIDDEN_LINT_GROUPS,
+ Warn,
+ "applying forbid to lint-groups",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #81670 <https://github.com/rust-lang/rust/issues/81670>",
+ };
+}
+
+declare_lint! {
+ /// The `ill_formed_attribute_input` lint detects ill-formed attribute
+ /// inputs that were previously accepted and used in practice.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #[inline = "this is not valid"]
+ /// fn foo() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previously, inputs for many built-in attributes weren't validated and
+ /// nonsensical attribute inputs were accepted. After validation was
+ /// added, it was determined that some existing projects made use of these
+ /// invalid forms. This is a [future-incompatible] lint to transition this
+ /// to a hard error in the future. See [issue #57571] for more details.
+ ///
+ /// Check the [attribute reference] for details on the valid inputs for
+ /// attributes.
+ ///
+ /// [issue #57571]: https://github.com/rust-lang/rust/issues/57571
+ /// [attribute reference]: https://doc.rust-lang.org/nightly/reference/attributes.html
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub ILL_FORMED_ATTRIBUTE_INPUT,
+ Deny,
+ "ill-formed attribute inputs that were previously accepted and used in practice",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #57571 <https://github.com/rust-lang/rust/issues/57571>",
+ };
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `conflicting_repr_hints` lint detects [`repr` attributes] with
+ /// conflicting hints.
+ ///
+ /// [`repr` attributes]: https://doc.rust-lang.org/reference/type-layout.html#representations
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #[repr(u32, u64)]
+ /// enum Foo {
+ /// Variant1,
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The compiler incorrectly accepted these conflicting representations in
+ /// the past. This is a [future-incompatible] lint to transition this to a
+ /// hard error in the future. See [issue #68585] for more details.
+ ///
+ /// To correct the issue, remove one of the conflicting hints.
+ ///
+ /// [issue #68585]: https://github.com/rust-lang/rust/issues/68585
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub CONFLICTING_REPR_HINTS,
+ Deny,
+ "conflicts between `#[repr(..)]` hints that were previously accepted and used in practice",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #68585 <https://github.com/rust-lang/rust/issues/68585>",
+ };
+}
+
+declare_lint! {
+ /// The `meta_variable_misuse` lint detects possible meta-variable misuse
+ /// in macro definitions.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(meta_variable_misuse)]
+ ///
+ /// macro_rules! foo {
+ /// () => {};
+ /// ($( $i:ident = $($j:ident),+ );*) => { $( $( $i = $k; )+ )* };
+ /// }
+ ///
+ /// fn main() {
+ /// foo!();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// There are quite a few different ways a [`macro_rules`] macro can be
+ /// improperly defined. Many of these errors were previously only detected
+ /// when the macro was expanded or not at all. This lint is an attempt to
+ /// catch some of these problems when the macro is *defined*.
+ ///
+ /// This lint is "allow" by default because it may have false positives
+ /// and other issues. See [issue #61053] for more details.
+ ///
+ /// [`macro_rules`]: https://doc.rust-lang.org/reference/macros-by-example.html
+ /// [issue #61053]: https://github.com/rust-lang/rust/issues/61053
+ pub META_VARIABLE_MISUSE,
+ Allow,
+ "possible meta-variable misuse at macro definition"
+}
+
+declare_lint! {
+ /// The `incomplete_include` lint detects the use of the [`include!`]
+ /// macro with a file that contains more than one expression.
+ ///
+ /// [`include!`]: https://doc.rust-lang.org/std/macro.include.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs separate file)
+ /// fn main() {
+ /// include!("foo.txt");
+ /// }
+ /// ```
+ ///
+ /// where the file `foo.txt` contains:
+ ///
+ /// ```text
+ /// println!("hi!");
+ /// ```
+ ///
+ /// produces:
+ ///
+ /// ```text
+ /// error: include macro expected single expression in source
+ /// --> foo.txt:1:14
+ /// |
+ /// 1 | println!("1");
+ /// | ^
+ /// |
+ /// = note: `#[deny(incomplete_include)]` on by default
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// The [`include!`] macro is currently only intended to be used to
+ /// include a single [expression] or multiple [items]. Historically it
+ /// would ignore any contents after the first expression, but that can be
+ /// confusing. In the example above, the `println!` expression ends just
+ /// before the semicolon, making the semicolon "extra" information that is
+ /// ignored. Perhaps even more surprising, if the included file had
+ /// multiple print statements, the subsequent ones would be ignored!
+ ///
+ /// One workaround is to place the contents in braces to create a [block
+ /// expression]. Also consider alternatives, like using functions to
+ /// encapsulate the expressions, or use [proc-macros].
+ ///
+ /// This is a lint instead of a hard error because existing projects were
+ /// found to hit this error. To be cautious, it is a lint for now. The
+ /// future semantics of the `include!` macro are also uncertain, see
+ /// [issue #35560].
+ ///
+ /// [items]: https://doc.rust-lang.org/reference/items.html
+ /// [expression]: https://doc.rust-lang.org/reference/expressions.html
+ /// [block expression]: https://doc.rust-lang.org/reference/expressions/block-expr.html
+ /// [proc-macros]: https://doc.rust-lang.org/reference/procedural-macros.html
+ /// [issue #35560]: https://github.com/rust-lang/rust/issues/35560
+ pub INCOMPLETE_INCLUDE,
+ Deny,
+ "trailing content in included file"
+}
+
+declare_lint! {
+ /// The `arithmetic_overflow` lint detects that an arithmetic operation
+ /// will [overflow].
+ ///
+ /// [overflow]: https://doc.rust-lang.org/reference/expressions/operator-expr.html#overflow
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// 1_i32 << 32;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is very likely a mistake to perform an arithmetic operation that
+ /// overflows its value. If the compiler is able to detect these kinds of
+ /// overflows at compile-time, it will trigger this lint. Consider
+ /// adjusting the expression to avoid overflow, or use a data type that
+ /// will not overflow.
+ pub ARITHMETIC_OVERFLOW,
+ Deny,
+ "arithmetic operation overflows"
+}
+
+declare_lint! {
+ /// The `unconditional_panic` lint detects an operation that will cause a
+ /// panic at runtime.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// # #![allow(unused)]
+ /// let x = 1 / 0;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint detects code that is very likely incorrect because it will
+ /// always panic, such as division by zero and out-of-bounds array
+ /// accesses. Consider adjusting your code if this is a bug, or using the
+ /// `panic!` or `unreachable!` macro instead in case the panic is intended.
+ pub UNCONDITIONAL_PANIC,
+ Deny,
+ "operation will cause a panic at runtime"
+}
+
+declare_lint! {
+ /// The `const_err` lint detects an erroneous expression while doing
+ /// constant evaluation.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![allow(unconditional_panic)]
+ /// const C: i32 = 1/0;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint detects constants that fail to evaluate. Allowing the lint will accept the
+ /// constant declaration, but any use of this constant will still lead to a hard error. This is
+ /// a future incompatibility lint; the plan is to eventually entirely forbid even declaring
+ /// constants that cannot be evaluated. See [issue #71800] for more details.
+ ///
+ /// [issue #71800]: https://github.com/rust-lang/rust/issues/71800
+ pub CONST_ERR,
+ Deny,
+ "constant evaluation encountered erroneous expression",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #71800 <https://github.com/rust-lang/rust/issues/71800>",
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
+ };
+ report_in_external_macro
+}
+
+declare_lint! {
+ /// The `unused_imports` lint detects imports that are never used.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// use std::collections::HashMap;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unused imports may signal a mistake or unfinished code, and clutter
+ /// the code, and should be removed. If you intended to re-export the item
+ /// to make it available outside of the module, add a visibility modifier
+ /// like `pub`.
+ pub UNUSED_IMPORTS,
+ Warn,
+ "imports that are never used"
+}
+
+declare_lint! {
+ /// The `must_not_suspend` lint guards against values that shouldn't be held across suspend points
+ /// (`.await`)
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(must_not_suspend)]
+ /// #![warn(must_not_suspend)]
+ ///
+ /// #[must_not_suspend]
+ /// struct SyncThing {}
+ ///
+ /// async fn yield_now() {}
+ ///
+ /// pub async fn uhoh() {
+ /// let guard = SyncThing {};
+ /// yield_now().await;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The `must_not_suspend` lint detects values that are marked with the `#[must_not_suspend]`
+ /// attribute being held across suspend points. A "suspend" point is usually a `.await` in an async
+ /// function.
+ ///
+ /// This attribute can be used to mark values that are semantically incorrect across suspends
+ /// (like certain types of timers), values that have async alternatives, and values that
+ /// regularly cause problems with the `Send`-ness of async fn's returned futures (like
+ /// `MutexGuard`'s)
+ ///
+ pub MUST_NOT_SUSPEND,
+ Allow,
+ "use of a `#[must_not_suspend]` value across a yield point",
+ @feature_gate = rustc_span::symbol::sym::must_not_suspend;
+}
+
+declare_lint! {
+ /// The `unused_extern_crates` lint guards against `extern crate` items
+ /// that are never used.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(unused_extern_crates)]
+ /// extern crate proc_macro;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// `extern crate` items that are unused have no effect and should be
+ /// removed. Note that there are some cases where specifying an `extern
+ /// crate` is desired for the side effect of ensuring the given crate is
+ /// linked, even though it is not otherwise directly referenced. The lint
+ /// can be silenced by aliasing the crate to an underscore, such as
+ /// `extern crate foo as _`. Also note that it is no longer idiomatic to
+ /// use `extern crate` in the [2018 edition], as extern crates are now
+ /// automatically added in scope.
+ ///
+ /// This lint is "allow" by default because it can be noisy, and produce
+ /// false-positives. If a dependency is being removed from a project, it
+ /// is recommended to remove it from the build configuration (such as
+ /// `Cargo.toml`) to ensure stale build entries aren't left behind.
+ ///
+ /// [2018 edition]: https://doc.rust-lang.org/edition-guide/rust-2018/module-system/path-clarity.html#no-more-extern-crate
+ pub UNUSED_EXTERN_CRATES,
+ Allow,
+ "extern crates that are never used"
+}
+
+declare_lint! {
+ /// The `unused_crate_dependencies` lint detects crate dependencies that
+ /// are never used.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs extern crate)
+ /// #![deny(unused_crate_dependencies)]
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// error: external crate `regex` unused in `lint_example`: remove the dependency or add `use regex as _;`
+ /// |
+ /// note: the lint level is defined here
+ /// --> src/lib.rs:1:9
+ /// |
+ /// 1 | #![deny(unused_crate_dependencies)]
+ /// | ^^^^^^^^^^^^^^^^^^^^^^^^^
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// After removing the code that uses a dependency, this usually also
+ /// requires removing the dependency from the build configuration.
+ /// However, sometimes that step can be missed, which leads to time wasted
+ /// building dependencies that are no longer used. This lint can be
+ /// enabled to detect dependencies that are never used (more specifically,
+ /// any dependency passed with the `--extern` command-line flag that is
+ /// never referenced via [`use`], [`extern crate`], or in any [path]).
+ ///
+ /// This lint is "allow" by default because it can provide false positives
+ /// depending on how the build system is configured. For example, when
+ /// using Cargo, a "package" consists of multiple crates (such as a
+ /// library and a binary), but the dependencies are defined for the
+ /// package as a whole. If there is a dependency that is only used in the
+ /// binary, but not the library, then the lint will be incorrectly issued
+ /// in the library.
+ ///
+ /// [path]: https://doc.rust-lang.org/reference/paths.html
+ /// [`use`]: https://doc.rust-lang.org/reference/items/use-declarations.html
+ /// [`extern crate`]: https://doc.rust-lang.org/reference/items/extern-crates.html
+ pub UNUSED_CRATE_DEPENDENCIES,
+ Allow,
+ "crate dependencies that are never used",
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `unused_qualifications` lint detects unnecessarily qualified
+ /// names.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(unused_qualifications)]
+ /// mod foo {
+ /// pub fn bar() {}
+ /// }
+ ///
+ /// fn main() {
+ /// use foo::bar;
+ /// foo::bar();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// If an item from another module is already brought into scope, then
+ /// there is no need to qualify it in this case. You can call `bar()`
+ /// directly, without the `foo::`.
+ ///
+ /// This lint is "allow" by default because it is somewhat pedantic, and
+ /// doesn't indicate an actual problem, but rather a stylistic choice, and
+ /// can be noisy when refactoring or moving around code.
+ pub UNUSED_QUALIFICATIONS,
+ Allow,
+ "detects unnecessarily qualified names"
+}
+
+declare_lint! {
+ /// The `unknown_lints` lint detects unrecognized lint attributes.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![allow(not_a_real_lint)]
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is usually a mistake to specify a lint that does not exist. Check
+ /// the spelling, and check the lint listing for the correct name. Also
+ /// consider if you are using an old version of the compiler, and the lint
+ /// is only available in a newer version.
+ pub UNKNOWN_LINTS,
+ Warn,
+ "unrecognized lint attribute"
+}
+
+declare_lint! {
+ /// The `unfulfilled_lint_expectations` lint detects lint trigger expectations
+ /// that have not been fulfilled.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(lint_reasons)]
+ ///
+ /// #[expect(unused_variables)]
+ /// let x = 10;
+ /// println!("{}", x);
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It was expected that the marked code would emit a lint. This expectation
+ /// has not been fulfilled.
+ ///
+ /// The `expect` attribute can be removed if this is intended behavior otherwise
+ /// it should be investigated why the expected lint is no longer issued.
+ ///
+ /// In rare cases, the expectation might be emitted at a different location than
+ /// shown in the shown code snippet. In most cases, the `#[expect]` attribute
+ /// works when added to the outer scope. A few lints can only be expected
+ /// on a crate level.
+ ///
+ /// Part of RFC 2383. The progress is being tracked in [#54503]
+ ///
+ /// [#54503]: https://github.com/rust-lang/rust/issues/54503
+ pub UNFULFILLED_LINT_EXPECTATIONS,
+ Warn,
+ "unfulfilled lint expectation",
+ @feature_gate = rustc_span::sym::lint_reasons;
+}
+
+declare_lint! {
+ /// The `unused_variables` lint detects variables which are not used in
+ /// any way.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let x = 5;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unused variables may signal a mistake or unfinished code. To silence
+ /// the warning for the individual variable, prefix it with an underscore
+ /// such as `_x`.
+ pub UNUSED_VARIABLES,
+ Warn,
+ "detect variables which are not used in any way"
+}
+
+declare_lint! {
+ /// The `unused_assignments` lint detects assignments that will never be read.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let mut x = 5;
+ /// x = 6;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unused assignments may signal a mistake or unfinished code. If the
+ /// variable is never used after being assigned, then the assignment can
+ /// be removed. Variables with an underscore prefix such as `_x` will not
+ /// trigger this lint.
+ pub UNUSED_ASSIGNMENTS,
+ Warn,
+ "detect assignments that will never be read"
+}
+
+declare_lint! {
+ /// The `dead_code` lint detects unused, unexported items.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// fn foo() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Dead code may signal a mistake or unfinished code. To silence the
+ /// warning for individual items, prefix the name with an underscore such
+ /// as `_foo`. If it was intended to expose the item outside of the crate,
+ /// consider adding a visibility modifier like `pub`. Otherwise consider
+ /// removing the unused code.
+ pub DEAD_CODE,
+ Warn,
+ "detect unused, unexported items"
+}
+
+declare_lint! {
+ /// The `unused_attributes` lint detects attributes that were not used by
+ /// the compiler.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![ignore]
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unused [attributes] may indicate the attribute is placed in the wrong
+ /// position. Consider removing it, or placing it in the correct position.
+ /// Also consider if you intended to use an _inner attribute_ (with a `!`
+ /// such as `#![allow(unused)]`) which applies to the item the attribute
+ /// is within, or an _outer attribute_ (without a `!` such as
+ /// `#[allow(unused)]`) which applies to the item *following* the
+ /// attribute.
+ ///
+ /// [attributes]: https://doc.rust-lang.org/reference/attributes.html
+ pub UNUSED_ATTRIBUTES,
+ Warn,
+ "detects attributes that were not used by the compiler"
+}
+
+declare_lint! {
+ /// The `unused_tuple_struct_fields` lint detects fields of tuple structs
+ /// that are never read.
+ ///
+ /// ### Example
+ ///
+ /// ```
+ /// #[warn(unused_tuple_struct_fields)]
+ /// struct S(i32, i32, i32);
+ /// let s = S(1, 2, 3);
+ /// let _ = (s.0, s.2);
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Tuple struct fields that are never read anywhere may indicate a
+ /// mistake or unfinished code. To silence this warning, consider
+ /// removing the unused field(s) or, to preserve the numbering of the
+ /// remaining fields, change the unused field(s) to have unit type.
+ pub UNUSED_TUPLE_STRUCT_FIELDS,
+ Allow,
+ "detects tuple struct fields that are never read"
+}
+
+declare_lint! {
+ /// The `unreachable_code` lint detects unreachable code paths.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,no_run
+ /// panic!("we never go past here!");
+ ///
+ /// let x = 5;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unreachable code may signal a mistake or unfinished code. If the code
+ /// is no longer in use, consider removing it.
+ pub UNREACHABLE_CODE,
+ Warn,
+ "detects unreachable code paths",
+ report_in_external_macro
+}
+
+declare_lint! {
+ /// The `unreachable_patterns` lint detects unreachable patterns.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let x = 5;
+ /// match x {
+ /// y => (),
+ /// 5 => (),
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This usually indicates a mistake in how the patterns are specified or
+ /// ordered. In this example, the `y` pattern will always match, so the
+ /// five is impossible to reach. Remember, match arms match in order, you
+ /// probably wanted to put the `5` case above the `y` case.
+ pub UNREACHABLE_PATTERNS,
+ Warn,
+ "detects unreachable patterns"
+}
+
+declare_lint! {
+ /// The `overlapping_range_endpoints` lint detects `match` arms that have [range patterns] that
+ /// overlap on their endpoints.
+ ///
+ /// [range patterns]: https://doc.rust-lang.org/nightly/reference/patterns.html#range-patterns
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let x = 123u8;
+ /// match x {
+ /// 0..=100 => { println!("small"); }
+ /// 100..=255 => { println!("large"); }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is likely a mistake to have range patterns in a match expression that overlap in this
+ /// way. Check that the beginning and end values are what you expect, and keep in mind that
+ /// with `..=` the left and right bounds are inclusive.
+ pub OVERLAPPING_RANGE_ENDPOINTS,
+ Warn,
+ "detects range patterns with overlapping endpoints"
+}
+
+declare_lint! {
+ /// The `bindings_with_variant_name` lint detects pattern bindings with
+ /// the same name as one of the matched variants.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// pub enum Enum {
+ /// Foo,
+ /// Bar,
+ /// }
+ ///
+ /// pub fn foo(x: Enum) {
+ /// match x {
+ /// Foo => {}
+ /// Bar => {}
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is usually a mistake to specify an enum variant name as an
+ /// [identifier pattern]. In the example above, the `match` arms are
+ /// specifying a variable name to bind the value of `x` to. The second arm
+ /// is ignored because the first one matches *all* values. The likely
+ /// intent is that the arm was intended to match on the enum variant.
+ ///
+ /// Two possible solutions are:
+ ///
+ /// * Specify the enum variant using a [path pattern], such as
+ /// `Enum::Foo`.
+ /// * Bring the enum variants into local scope, such as adding `use
+ /// Enum::*;` to the beginning of the `foo` function in the example
+ /// above.
+ ///
+ /// [identifier pattern]: https://doc.rust-lang.org/reference/patterns.html#identifier-patterns
+ /// [path pattern]: https://doc.rust-lang.org/reference/patterns.html#path-patterns
+ pub BINDINGS_WITH_VARIANT_NAME,
+ Warn,
+ "detects pattern bindings with the same name as one of the matched variants"
+}
+
+declare_lint! {
+ /// The `unused_macros` lint detects macros that were not used.
+ ///
+ /// Note that this lint is distinct from the `unused_macro_rules` lint,
+ /// which checks for single rules that never match of an otherwise used
+ /// macro, and thus never expand.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// macro_rules! unused {
+ /// () => {};
+ /// }
+ ///
+ /// fn main() {
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unused macros may signal a mistake or unfinished code. To silence the
+ /// warning for the individual macro, prefix the name with an underscore
+ /// such as `_my_macro`. If you intended to export the macro to make it
+ /// available outside of the crate, use the [`macro_export` attribute].
+ ///
+ /// [`macro_export` attribute]: https://doc.rust-lang.org/reference/macros-by-example.html#path-based-scope
+ pub UNUSED_MACROS,
+ Warn,
+ "detects macros that were not used"
+}
+
+declare_lint! {
+ /// The `unused_macro_rules` lint detects macro rules that were not used.
+ ///
+ /// Note that the lint is distinct from the `unused_macros` lint, which
+ /// fires if the entire macro is never called, while this lint fires for
+ /// single unused rules of the macro that is otherwise used.
+ /// `unused_macro_rules` fires only if `unused_macros` wouldn't fire.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #[warn(unused_macro_rules)]
+ /// macro_rules! unused_empty {
+ /// (hello) => { println!("Hello, world!") }; // This rule is unused
+ /// () => { println!("empty") }; // This rule is used
+ /// }
+ ///
+ /// fn main() {
+ /// unused_empty!(hello);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unused macro rules may signal a mistake or unfinished code. Furthermore,
+ /// they slow down compilation. Right now, silencing the warning is not
+ /// supported on a single rule level, so you have to add an allow to the
+ /// entire macro definition.
+ ///
+ /// If you intended to export the macro to make it
+ /// available outside of the crate, use the [`macro_export` attribute].
+ ///
+ /// [`macro_export` attribute]: https://doc.rust-lang.org/reference/macros-by-example.html#path-based-scope
+ pub UNUSED_MACRO_RULES,
+ Allow,
+ "detects macro rules that were not used"
+}
+
+declare_lint! {
+ /// The `warnings` lint allows you to change the level of other
+ /// lints which produce warnings.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![deny(warnings)]
+ /// fn foo() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The `warnings` lint is a bit special; by changing its level, you
+ /// change every other warning that would produce a warning to whatever
+ /// value you'd like. As such, you won't ever trigger this lint in your
+ /// code directly.
+ pub WARNINGS,
+ Warn,
+ "mass-change the level for lints which produce warnings"
+}
+
+declare_lint! {
+ /// The `unused_features` lint detects unused or unknown features found in
+ /// crate-level [`feature` attributes].
+ ///
+ /// [`feature` attributes]: https://doc.rust-lang.org/nightly/unstable-book/
+ ///
+ /// Note: This lint is currently not functional, see [issue #44232] for
+ /// more details.
+ ///
+ /// [issue #44232]: https://github.com/rust-lang/rust/issues/44232
+ pub UNUSED_FEATURES,
+ Warn,
+ "unused features found in crate-level `#[feature]` directives"
+}
+
+declare_lint! {
+ /// The `stable_features` lint detects a [`feature` attribute] that
+ /// has since been made stable.
+ ///
+ /// [`feature` attribute]: https://doc.rust-lang.org/nightly/unstable-book/
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(test_accepted_feature)]
+ /// fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// When a feature is stabilized, it is no longer necessary to include a
+ /// `#![feature]` attribute for it. To fix, simply remove the
+ /// `#![feature]` attribute.
+ pub STABLE_FEATURES,
+ Warn,
+ "stable features found in `#[feature]` directive"
+}
+
+declare_lint! {
+ /// The `unknown_crate_types` lint detects an unknown crate type found in
+ /// a [`crate_type` attribute].
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![crate_type="lol"]
+ /// fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// An unknown value give to the `crate_type` attribute is almost
+ /// certainly a mistake.
+ ///
+ /// [`crate_type` attribute]: https://doc.rust-lang.org/reference/linkage.html
+ pub UNKNOWN_CRATE_TYPES,
+ Deny,
+ "unknown crate type found in `#[crate_type]` directive",
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `trivial_casts` lint detects trivial casts which could be replaced
+ /// with coercion, which may require [type ascription] or a temporary
+ /// variable.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(trivial_casts)]
+ /// let x: &u32 = &42;
+ /// let y = x as *const u32;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A trivial cast is a cast `e as T` where `e` has type `U` and `U` is a
+ /// subtype of `T`. This type of cast is usually unnecessary, as it can be
+ /// usually be inferred.
+ ///
+ /// This lint is "allow" by default because there are situations, such as
+ /// with FFI interfaces or complex type aliases, where it triggers
+ /// incorrectly, or in situations where it will be more difficult to
+ /// clearly express the intent. It may be possible that this will become a
+ /// warning in the future, possibly with [type ascription] providing a
+ /// convenient way to work around the current issues. See [RFC 401] for
+ /// historical context.
+ ///
+ /// [type ascription]: https://github.com/rust-lang/rust/issues/23416
+ /// [RFC 401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md
+ pub TRIVIAL_CASTS,
+ Allow,
+ "detects trivial casts which could be removed"
+}
+
+declare_lint! {
+ /// The `trivial_numeric_casts` lint detects trivial numeric casts of types
+ /// which could be removed.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(trivial_numeric_casts)]
+ /// let x = 42_i32 as i32;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A trivial numeric cast is a cast of a numeric type to the same numeric
+ /// type. This type of cast is usually unnecessary.
+ ///
+ /// This lint is "allow" by default because there are situations, such as
+ /// with FFI interfaces or complex type aliases, where it triggers
+ /// incorrectly, or in situations where it will be more difficult to
+ /// clearly express the intent. It may be possible that this will become a
+ /// warning in the future, possibly with [type ascription] providing a
+ /// convenient way to work around the current issues. See [RFC 401] for
+ /// historical context.
+ ///
+ /// [type ascription]: https://github.com/rust-lang/rust/issues/23416
+ /// [RFC 401]: https://github.com/rust-lang/rfcs/blob/master/text/0401-coercions.md
+ pub TRIVIAL_NUMERIC_CASTS,
+ Allow,
+ "detects trivial casts of numeric types which could be removed"
+}
+
+declare_lint! {
+ /// The `private_in_public` lint detects private items in public
+ /// interfaces not caught by the old implementation.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// # #![allow(unused)]
+ /// struct SemiPriv;
+ ///
+ /// mod m1 {
+ /// struct Priv;
+ /// impl super::SemiPriv {
+ /// pub fn f(_: Priv) {}
+ /// }
+ /// }
+ /// # fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The visibility rules are intended to prevent exposing private items in
+ /// public interfaces. This is a [future-incompatible] lint to transition
+ /// this to a hard error in the future. See [issue #34537] for more
+ /// details.
+ ///
+ /// [issue #34537]: https://github.com/rust-lang/rust/issues/34537
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub PRIVATE_IN_PUBLIC,
+ Warn,
+ "detect private items in public interfaces not caught by the old implementation",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #34537 <https://github.com/rust-lang/rust/issues/34537>",
+ };
+}
+
+declare_lint! {
+ /// The `exported_private_dependencies` lint detects private dependencies
+ /// that are exposed in a public interface.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs-dependency)
+ /// pub fn foo() -> Option<some_private_dependency::Thing> {
+ /// None
+ /// }
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: type `bar::Thing` from private dependency 'bar' in public interface
+ /// --> src/lib.rs:3:1
+ /// |
+ /// 3 | pub fn foo() -> Option<bar::Thing> {
+ /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ /// |
+ /// = note: `#[warn(exported_private_dependencies)]` on by default
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// Dependencies can be marked as "private" to indicate that they are not
+ /// exposed in the public interface of a crate. This can be used by Cargo
+ /// to independently resolve those dependencies because it can assume it
+ /// does not need to unify them with other packages using that same
+ /// dependency. This lint is an indication of a violation of that
+ /// contract.
+ ///
+ /// To fix this, avoid exposing the dependency in your public interface.
+ /// Or, switch the dependency to a public dependency.
+ ///
+ /// Note that support for this is only available on the nightly channel.
+ /// See [RFC 1977] for more details, as well as the [Cargo documentation].
+ ///
+ /// [RFC 1977]: https://github.com/rust-lang/rfcs/blob/master/text/1977-public-private-dependencies.md
+ /// [Cargo documentation]: https://doc.rust-lang.org/nightly/cargo/reference/unstable.html#public-dependency
+ pub EXPORTED_PRIVATE_DEPENDENCIES,
+ Warn,
+ "public interface leaks type from a private dependency"
+}
+
+declare_lint! {
+ /// The `pub_use_of_private_extern_crate` lint detects a specific
+ /// situation of re-exporting a private `extern crate`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// extern crate core;
+ /// pub use core as reexported_core;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A public `use` declaration should not be used to publicly re-export a
+ /// private `extern crate`. `pub extern crate` should be used instead.
+ ///
+ /// This was historically allowed, but is not the intended behavior
+ /// according to the visibility rules. This is a [future-incompatible]
+ /// lint to transition this to a hard error in the future. See [issue
+ /// #34537] for more details.
+ ///
+ /// [issue #34537]: https://github.com/rust-lang/rust/issues/34537
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub PUB_USE_OF_PRIVATE_EXTERN_CRATE,
+ Deny,
+ "detect public re-exports of private extern crates",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #34537 <https://github.com/rust-lang/rust/issues/34537>",
+ };
+}
+
+declare_lint! {
+ /// The `invalid_type_param_default` lint detects type parameter defaults
+ /// erroneously allowed in an invalid location.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// fn foo<T=i32>(t: T) {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Default type parameters were only intended to be allowed in certain
+ /// situations, but historically the compiler allowed them everywhere.
+ /// This is a [future-incompatible] lint to transition this to a hard
+ /// error in the future. See [issue #36887] for more details.
+ ///
+ /// [issue #36887]: https://github.com/rust-lang/rust/issues/36887
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub INVALID_TYPE_PARAM_DEFAULT,
+ Deny,
+ "type parameter default erroneously allowed in invalid location",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #36887 <https://github.com/rust-lang/rust/issues/36887>",
+ };
+}
+
+declare_lint! {
+ /// The `renamed_and_removed_lints` lint detects lints that have been
+ /// renamed or removed.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![deny(raw_pointer_derive)]
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// To fix this, either remove the lint or use the new name. This can help
+ /// avoid confusion about lints that are no longer valid, and help
+ /// maintain consistency for renamed lints.
+ pub RENAMED_AND_REMOVED_LINTS,
+ Warn,
+ "lints that have been renamed or removed"
+}
+
+declare_lint! {
+ /// The `unaligned_references` lint detects unaligned references to fields
+ /// of [packed] structs.
+ ///
+ /// [packed]: https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers
+ ///
+ /// ### Example
+ ///
+ /// ```compile_fail
+ /// #[repr(packed)]
+ /// pub struct Foo {
+ /// field1: u64,
+ /// field2: u8,
+ /// }
+ ///
+ /// fn main() {
+ /// unsafe {
+ /// let foo = Foo { field1: 0, field2: 0 };
+ /// let _ = &foo.field1;
+ /// println!("{}", foo.field1); // An implicit `&` is added here, triggering the lint.
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Creating a reference to an insufficiently aligned packed field is [undefined behavior] and
+ /// should be disallowed. Using an `unsafe` block does not change anything about this. Instead,
+ /// the code should do a copy of the data in the packed field or use raw pointers and unaligned
+ /// accesses. See [issue #82523] for more information.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [issue #82523]: https://github.com/rust-lang/rust/issues/82523
+ pub UNALIGNED_REFERENCES,
+ Deny,
+ "detects unaligned references to fields of packed structs",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #82523 <https://github.com/rust-lang/rust/issues/82523>",
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
+ };
+ report_in_external_macro
+}
+
+declare_lint! {
+ /// The `const_item_mutation` lint detects attempts to mutate a `const`
+ /// item.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// const FOO: [i32; 1] = [0];
+ ///
+ /// fn main() {
+ /// FOO[0] = 1;
+ /// // This will print "[0]".
+ /// println!("{:?}", FOO);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Trying to directly mutate a `const` item is almost always a mistake.
+ /// What is happening in the example above is that a temporary copy of the
+ /// `const` is mutated, but the original `const` is not. Each time you
+ /// refer to the `const` by name (such as `FOO` in the example above), a
+ /// separate copy of the value is inlined at that location.
+ ///
+ /// This lint checks for writing directly to a field (`FOO.field =
+ /// some_value`) or array entry (`FOO[0] = val`), or taking a mutable
+ /// reference to the const item (`&mut FOO`), including through an
+ /// autoderef (`FOO.some_mut_self_method()`).
+ ///
+ /// There are various alternatives depending on what you are trying to
+ /// accomplish:
+ ///
+ /// * First, always reconsider using mutable globals, as they can be
+ /// difficult to use correctly, and can make the code more difficult to
+ /// use or understand.
+ /// * If you are trying to perform a one-time initialization of a global:
+ /// * If the value can be computed at compile-time, consider using
+ /// const-compatible values (see [Constant Evaluation]).
+ /// * For more complex single-initialization cases, consider using a
+ /// third-party crate, such as [`lazy_static`] or [`once_cell`].
+ /// * If you are using the [nightly channel], consider the new
+ /// [`lazy`] module in the standard library.
+ /// * If you truly need a mutable global, consider using a [`static`],
+ /// which has a variety of options:
+ /// * Simple data types can be directly defined and mutated with an
+ /// [`atomic`] type.
+ /// * More complex types can be placed in a synchronization primitive
+ /// like a [`Mutex`], which can be initialized with one of the options
+ /// listed above.
+ /// * A [mutable `static`] is a low-level primitive, requiring unsafe.
+ /// Typically This should be avoided in preference of something
+ /// higher-level like one of the above.
+ ///
+ /// [Constant Evaluation]: https://doc.rust-lang.org/reference/const_eval.html
+ /// [`static`]: https://doc.rust-lang.org/reference/items/static-items.html
+ /// [mutable `static`]: https://doc.rust-lang.org/reference/items/static-items.html#mutable-statics
+ /// [`lazy`]: https://doc.rust-lang.org/nightly/std/lazy/index.html
+ /// [`lazy_static`]: https://crates.io/crates/lazy_static
+ /// [`once_cell`]: https://crates.io/crates/once_cell
+ /// [`atomic`]: https://doc.rust-lang.org/std/sync/atomic/index.html
+ /// [`Mutex`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html
+ pub CONST_ITEM_MUTATION,
+ Warn,
+ "detects attempts to mutate a `const` item",
+}
+
+declare_lint! {
+ /// The `patterns_in_fns_without_body` lint detects `mut` identifier
+ /// patterns as a parameter in functions without a body.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// trait Trait {
+ /// fn foo(mut arg: u8);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// To fix this, remove `mut` from the parameter in the trait definition;
+ /// it can be used in the implementation. That is, the following is OK:
+ ///
+ /// ```rust
+ /// trait Trait {
+ /// fn foo(arg: u8); // Removed `mut` here
+ /// }
+ ///
+ /// impl Trait for i32 {
+ /// fn foo(mut arg: u8) { // `mut` here is OK
+ ///
+ /// }
+ /// }
+ /// ```
+ ///
+ /// Trait definitions can define functions without a body to specify a
+ /// function that implementors must define. The parameter names in the
+ /// body-less functions are only allowed to be `_` or an [identifier] for
+ /// documentation purposes (only the type is relevant). Previous versions
+ /// of the compiler erroneously allowed [identifier patterns] with the
+ /// `mut` keyword, but this was not intended to be allowed. This is a
+ /// [future-incompatible] lint to transition this to a hard error in the
+ /// future. See [issue #35203] for more details.
+ ///
+ /// [identifier]: https://doc.rust-lang.org/reference/identifiers.html
+ /// [identifier patterns]: https://doc.rust-lang.org/reference/patterns.html#identifier-patterns
+ /// [issue #35203]: https://github.com/rust-lang/rust/issues/35203
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub PATTERNS_IN_FNS_WITHOUT_BODY,
+ Deny,
+ "patterns in functions without body were erroneously allowed",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #35203 <https://github.com/rust-lang/rust/issues/35203>",
+ };
+}
+
+declare_lint! {
+ /// The `missing_fragment_specifier` lint is issued when an unused pattern in a
+ /// `macro_rules!` macro definition has a meta-variable (e.g. `$e`) that is not
+ /// followed by a fragment specifier (e.g. `:expr`).
+ ///
+ /// This warning can always be fixed by removing the unused pattern in the
+ /// `macro_rules!` macro definition.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// macro_rules! foo {
+ /// () => {};
+ /// ($name) => { };
+ /// }
+ ///
+ /// fn main() {
+ /// foo!();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// To fix this, remove the unused pattern from the `macro_rules!` macro definition:
+ ///
+ /// ```rust
+ /// macro_rules! foo {
+ /// () => {};
+ /// }
+ /// fn main() {
+ /// foo!();
+ /// }
+ /// ```
+ pub MISSING_FRAGMENT_SPECIFIER,
+ Deny,
+ "detects missing fragment specifiers in unused `macro_rules!` patterns",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #40107 <https://github.com/rust-lang/rust/issues/40107>",
+ };
+}
+
+declare_lint! {
+ /// The `late_bound_lifetime_arguments` lint detects generic lifetime
+ /// arguments in path segments with late bound lifetime parameters.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// struct S;
+ ///
+ /// impl S {
+ /// fn late<'a, 'b>(self, _: &'a u8, _: &'b u8) {}
+ /// }
+ ///
+ /// fn main() {
+ /// S.late::<'static>(&0, &0);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is not clear how to provide arguments for early-bound lifetime
+ /// parameters if they are intermixed with late-bound parameters in the
+ /// same list. For now, providing any explicit arguments will trigger this
+ /// lint if late-bound parameters are present, so in the future a solution
+ /// can be adopted without hitting backward compatibility issues. This is
+ /// a [future-incompatible] lint to transition this to a hard error in the
+ /// future. See [issue #42868] for more details, along with a description
+ /// of the difference between early and late-bound parameters.
+ ///
+ /// [issue #42868]: https://github.com/rust-lang/rust/issues/42868
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub LATE_BOUND_LIFETIME_ARGUMENTS,
+ Warn,
+ "detects generic lifetime arguments in path segments with late bound lifetime parameters",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #42868 <https://github.com/rust-lang/rust/issues/42868>",
+ };
+}
+
+declare_lint! {
+ /// The `order_dependent_trait_objects` lint detects a trait coherency
+ /// violation that would allow creating two trait impls for the same
+ /// dynamic trait object involving marker traits.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// pub trait Trait {}
+ ///
+ /// impl Trait for dyn Send + Sync { }
+ /// impl Trait for dyn Sync + Send { }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A previous bug caused the compiler to interpret traits with different
+ /// orders (such as `Send + Sync` and `Sync + Send`) as distinct types
+ /// when they were intended to be treated the same. This allowed code to
+ /// define separate trait implementations when there should be a coherence
+ /// error. This is a [future-incompatible] lint to transition this to a
+ /// hard error in the future. See [issue #56484] for more details.
+ ///
+ /// [issue #56484]: https://github.com/rust-lang/rust/issues/56484
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub ORDER_DEPENDENT_TRAIT_OBJECTS,
+ Deny,
+ "trait-object types were treated as different depending on marker-trait order",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #56484 <https://github.com/rust-lang/rust/issues/56484>",
+ };
+}
+
+declare_lint! {
+ /// The `coherence_leak_check` lint detects conflicting implementations of
+ /// a trait that are only distinguished by the old leak-check code.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// trait SomeTrait { }
+ /// impl SomeTrait for for<'a> fn(&'a u8) { }
+ /// impl<'a> SomeTrait for fn(&'a u8) { }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In the past, the compiler would accept trait implementations for
+ /// identical functions that differed only in where the lifetime binder
+ /// appeared. Due to a change in the borrow checker implementation to fix
+ /// several bugs, this is no longer allowed. However, since this affects
+ /// existing code, this is a [future-incompatible] lint to transition this
+ /// to a hard error in the future.
+ ///
+ /// Code relying on this pattern should introduce "[newtypes]",
+ /// like `struct Foo(for<'a> fn(&'a u8))`.
+ ///
+ /// See [issue #56105] for more details.
+ ///
+ /// [issue #56105]: https://github.com/rust-lang/rust/issues/56105
+ /// [newtypes]: https://doc.rust-lang.org/book/ch19-04-advanced-types.html#using-the-newtype-pattern-for-type-safety-and-abstraction
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub COHERENCE_LEAK_CHECK,
+ Warn,
+ "distinct impls distinguished only by the leak-check code",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #56105 <https://github.com/rust-lang/rust/issues/56105>",
+ };
+}
+
+declare_lint! {
+ /// The `deprecated` lint detects use of deprecated items.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #[deprecated]
+ /// fn foo() {}
+ ///
+ /// fn bar() {
+ /// foo();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Items may be marked "deprecated" with the [`deprecated` attribute] to
+ /// indicate that they should no longer be used. Usually the attribute
+ /// should include a note on what to use instead, or check the
+ /// documentation.
+ ///
+ /// [`deprecated` attribute]: https://doc.rust-lang.org/reference/attributes/diagnostics.html#the-deprecated-attribute
+ pub DEPRECATED,
+ Warn,
+ "detects use of deprecated items",
+ report_in_external_macro
+}
+
+declare_lint! {
+ /// The `unused_unsafe` lint detects unnecessary use of an `unsafe` block.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// unsafe {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// If nothing within the block requires `unsafe`, then remove the
+ /// `unsafe` marker because it is not required and may cause confusion.
+ pub UNUSED_UNSAFE,
+ Warn,
+ "unnecessary use of an `unsafe` block"
+}
+
+declare_lint! {
+ /// The `unused_mut` lint detects mut variables which don't need to be
+ /// mutable.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let mut x = 5;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The preferred style is to only mark variables as `mut` if it is
+ /// required.
+ pub UNUSED_MUT,
+ Warn,
+ "detect mut variables which don't need to be mutable"
+}
+
+declare_lint! {
+ /// The `unconditional_recursion` lint detects functions that cannot
+ /// return without calling themselves.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// fn foo() {
+ /// foo();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// It is usually a mistake to have a recursive call that does not have
+ /// some condition to cause it to terminate. If you really intend to have
+ /// an infinite loop, using a `loop` expression is recommended.
+ pub UNCONDITIONAL_RECURSION,
+ Warn,
+ "functions that cannot return without calling themselves"
+}
+
+declare_lint! {
+ /// The `single_use_lifetimes` lint detects lifetimes that are only used
+ /// once.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(single_use_lifetimes)]
+ ///
+ /// fn foo<'a>(x: &'a u32) {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Specifying an explicit lifetime like `'a` in a function or `impl`
+ /// should only be used to link together two things. Otherwise, you should
+ /// just use `'_` to indicate that the lifetime is not linked to anything,
+ /// or elide the lifetime altogether if possible.
+ ///
+ /// This lint is "allow" by default because it was introduced at a time
+ /// when `'_` and elided lifetimes were first being introduced, and this
+ /// lint would be too noisy. Also, there are some known false positives
+ /// that it produces. See [RFC 2115] for historical context, and [issue
+ /// #44752] for more details.
+ ///
+ /// [RFC 2115]: https://github.com/rust-lang/rfcs/blob/master/text/2115-argument-lifetimes.md
+ /// [issue #44752]: https://github.com/rust-lang/rust/issues/44752
+ pub SINGLE_USE_LIFETIMES,
+ Allow,
+ "detects lifetime parameters that are only used once"
+}
+
+declare_lint! {
+ /// The `unused_lifetimes` lint detects lifetime parameters that are never
+ /// used.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #[deny(unused_lifetimes)]
+ ///
+ /// pub fn foo<'a>() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unused lifetime parameters may signal a mistake or unfinished code.
+ /// Consider removing the parameter.
+ pub UNUSED_LIFETIMES,
+ Allow,
+ "detects lifetime parameters that are never used"
+}
+
+declare_lint! {
+ /// The `tyvar_behind_raw_pointer` lint detects raw pointer to an
+ /// inference variable.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,edition2015
+ /// // edition 2015
+ /// let data = std::ptr::null();
+ /// let _ = &data as *const *const ();
+ ///
+ /// if data.is_null() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This kind of inference was previously allowed, but with the future
+ /// arrival of [arbitrary self types], this can introduce ambiguity. To
+ /// resolve this, use an explicit type instead of relying on type
+ /// inference.
+ ///
+ /// This is a [future-incompatible] lint to transition this to a hard
+ /// error in the 2018 edition. See [issue #46906] for more details. This
+ /// is currently a hard-error on the 2018 edition, and is "warn" by
+ /// default in the 2015 edition.
+ ///
+ /// [arbitrary self types]: https://github.com/rust-lang/rust/issues/44874
+ /// [issue #46906]: https://github.com/rust-lang/rust/issues/46906
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub TYVAR_BEHIND_RAW_POINTER,
+ Warn,
+ "raw pointer to an inference variable",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #46906 <https://github.com/rust-lang/rust/issues/46906>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2018),
+ };
+}
+
+declare_lint! {
+ /// The `elided_lifetimes_in_paths` lint detects the use of hidden
+ /// lifetime parameters.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(elided_lifetimes_in_paths)]
+ /// struct Foo<'a> {
+ /// x: &'a u32
+ /// }
+ ///
+ /// fn foo(x: &Foo) {
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Elided lifetime parameters can make it difficult to see at a glance
+ /// that borrowing is occurring. This lint ensures that lifetime
+ /// parameters are always explicitly stated, even if it is the `'_`
+ /// [placeholder lifetime].
+ ///
+ /// This lint is "allow" by default because it has some known issues, and
+ /// may require a significant transition for old code.
+ ///
+ /// [placeholder lifetime]: https://doc.rust-lang.org/reference/lifetime-elision.html#lifetime-elision-in-functions
+ pub ELIDED_LIFETIMES_IN_PATHS,
+ Allow,
+ "hidden lifetime parameters in types are deprecated",
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `bare_trait_objects` lint suggests using `dyn Trait` for trait
+ /// objects.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,edition2018
+ /// trait Trait { }
+ ///
+ /// fn takes_trait_object(_: Box<Trait>) {
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Without the `dyn` indicator, it can be ambiguous or confusing when
+ /// reading code as to whether or not you are looking at a trait object.
+ /// The `dyn` keyword makes it explicit, and adds a symmetry to contrast
+ /// with [`impl Trait`].
+ ///
+ /// [`impl Trait`]: https://doc.rust-lang.org/book/ch10-02-traits.html#traits-as-parameters
+ pub BARE_TRAIT_OBJECTS,
+ Warn,
+ "suggest using `dyn Trait` for trait objects",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/warnings-promoted-to-error.html>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ };
+}
+
+declare_lint! {
+ /// The `absolute_paths_not_starting_with_crate` lint detects fully
+ /// qualified paths that start with a module name instead of `crate`,
+ /// `self`, or an extern crate name
+ ///
+ /// ### Example
+ ///
+ /// ```rust,edition2015,compile_fail
+ /// #![deny(absolute_paths_not_starting_with_crate)]
+ ///
+ /// mod foo {
+ /// pub fn bar() {}
+ /// }
+ ///
+ /// fn main() {
+ /// ::foo::bar();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Rust [editions] allow the language to evolve without breaking
+ /// backwards compatibility. This lint catches code that uses absolute
+ /// paths in the style of the 2015 edition. In the 2015 edition, absolute
+ /// paths (those starting with `::`) refer to either the crate root or an
+ /// external crate. In the 2018 edition it was changed so that they only
+ /// refer to external crates. The path prefix `crate::` should be used
+ /// instead to reference items from the crate root.
+ ///
+ /// If you switch the compiler from the 2015 to 2018 edition without
+ /// updating the code, then it will fail to compile if the old style paths
+ /// are used. You can manually change the paths to use the `crate::`
+ /// prefix to transition to the 2018 edition.
+ ///
+ /// This lint solves the problem automatically. It is "allow" by default
+ /// because the code is perfectly valid in the 2015 edition. The [`cargo
+ /// fix`] tool with the `--edition` flag will switch this lint to "warn"
+ /// and automatically apply the suggested fix from the compiler. This
+ /// provides a completely automated way to update old code to the 2018
+ /// edition.
+ ///
+ /// [editions]: https://doc.rust-lang.org/edition-guide/
+ /// [`cargo fix`]: https://doc.rust-lang.org/cargo/commands/cargo-fix.html
+ pub ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE,
+ Allow,
+ "fully qualified paths that start with a module name \
+ instead of `crate`, `self`, or an extern crate name",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #53130 <https://github.com/rust-lang/rust/issues/53130>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2018),
+ };
+}
+
+declare_lint! {
+ /// The `illegal_floating_point_literal_pattern` lint detects
+ /// floating-point literals used in patterns.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// let x = 42.0;
+ ///
+ /// match x {
+ /// 5.0 => {}
+ /// _ => {}
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previous versions of the compiler accepted floating-point literals in
+ /// patterns, but it was later determined this was a mistake. The
+ /// semantics of comparing floating-point values may not be clear in a
+ /// pattern when contrasted with "structural equality". Typically you can
+ /// work around this by using a [match guard], such as:
+ ///
+ /// ```rust
+ /// # let x = 42.0;
+ ///
+ /// match x {
+ /// y if y == 5.0 => {}
+ /// _ => {}
+ /// }
+ /// ```
+ ///
+ /// This is a [future-incompatible] lint to transition this to a hard
+ /// error in the future. See [issue #41620] for more details.
+ ///
+ /// [issue #41620]: https://github.com/rust-lang/rust/issues/41620
+ /// [match guard]: https://doc.rust-lang.org/reference/expressions/match-expr.html#match-guards
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub ILLEGAL_FLOATING_POINT_LITERAL_PATTERN,
+ Warn,
+ "floating-point literals cannot be used in patterns",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #41620 <https://github.com/rust-lang/rust/issues/41620>",
+ };
+}
+
+declare_lint! {
+ /// The `unstable_name_collisions` lint detects that you have used a name
+ /// that the standard library plans to add in the future.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// trait MyIterator : Iterator {
+ /// // is_sorted is an unstable method that already exists on the Iterator trait
+ /// fn is_sorted(self) -> bool where Self: Sized {true}
+ /// }
+ ///
+ /// impl<T: ?Sized> MyIterator for T where T: Iterator { }
+ ///
+ /// let x = vec![1, 2, 3];
+ /// let _ = x.iter().is_sorted();
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// When new methods are added to traits in the standard library, they are
+ /// usually added in an "unstable" form which is only available on the
+ /// [nightly channel] with a [`feature` attribute]. If there is any
+ /// pre-existing code which extends a trait to have a method with the same
+ /// name, then the names will collide. In the future, when the method is
+ /// stabilized, this will cause an error due to the ambiguity. This lint
+ /// is an early-warning to let you know that there may be a collision in
+ /// the future. This can be avoided by adding type annotations to
+ /// disambiguate which trait method you intend to call, such as
+ /// `MyIterator::is_sorted(my_iter)` or renaming or removing the method.
+ ///
+ /// [nightly channel]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+ /// [`feature` attribute]: https://doc.rust-lang.org/nightly/unstable-book/
+ pub UNSTABLE_NAME_COLLISIONS,
+ Warn,
+ "detects name collision with an existing but unstable method",
+ @future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::Custom(
+ "once this associated item is added to the standard library, \
+ the ambiguity may cause an error or change in behavior!"
+ ),
+ reference: "issue #48919 <https://github.com/rust-lang/rust/issues/48919>",
+ // Note: this item represents future incompatibility of all unstable functions in the
+ // standard library, and thus should never be removed or changed to an error.
+ };
+}
+
+declare_lint! {
+ /// The `irrefutable_let_patterns` lint detects [irrefutable patterns]
+ /// in [`if let`]s, [`while let`]s, and `if let` guards.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// if let _ = 123 {
+ /// println!("always runs!");
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// There usually isn't a reason to have an irrefutable pattern in an
+ /// `if let` or `while let` statement, because the pattern will always match
+ /// successfully. A [`let`] or [`loop`] statement will suffice. However,
+ /// when generating code with a macro, forbidding irrefutable patterns
+ /// would require awkward workarounds in situations where the macro
+ /// doesn't know if the pattern is refutable or not. This lint allows
+ /// macros to accept this form, while alerting for a possibly incorrect
+ /// use in normal code.
+ ///
+ /// See [RFC 2086] for more details.
+ ///
+ /// [irrefutable patterns]: https://doc.rust-lang.org/reference/patterns.html#refutability
+ /// [`if let`]: https://doc.rust-lang.org/reference/expressions/if-expr.html#if-let-expressions
+ /// [`while let`]: https://doc.rust-lang.org/reference/expressions/loop-expr.html#predicate-pattern-loops
+ /// [`let`]: https://doc.rust-lang.org/reference/statements.html#let-statements
+ /// [`loop`]: https://doc.rust-lang.org/reference/expressions/loop-expr.html#infinite-loops
+ /// [RFC 2086]: https://github.com/rust-lang/rfcs/blob/master/text/2086-allow-if-let-irrefutables.md
+ pub IRREFUTABLE_LET_PATTERNS,
+ Warn,
+ "detects irrefutable patterns in `if let` and `while let` statements"
+}
+
+declare_lint! {
+ /// The `unused_labels` lint detects [labels] that are never used.
+ ///
+ /// [labels]: https://doc.rust-lang.org/reference/expressions/loop-expr.html#loop-labels
+ ///
+ /// ### Example
+ ///
+ /// ```rust,no_run
+ /// 'unused_label: loop {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unused labels may signal a mistake or unfinished code. To silence the
+ /// warning for the individual label, prefix it with an underscore such as
+ /// `'_my_label:`.
+ pub UNUSED_LABELS,
+ Warn,
+ "detects labels that are never used"
+}
+
+declare_lint! {
+ /// The `where_clauses_object_safety` lint detects for [object safety] of
+ /// [where clauses].
+ ///
+ /// [object safety]: https://doc.rust-lang.org/reference/items/traits.html#object-safety
+ /// [where clauses]: https://doc.rust-lang.org/reference/items/generics.html#where-clauses
+ ///
+ /// ### Example
+ ///
+ /// ```rust,no_run
+ /// trait Trait {}
+ ///
+ /// trait X { fn foo(&self) where Self: Trait; }
+ ///
+ /// impl X for () { fn foo(&self) {} }
+ ///
+ /// impl Trait for dyn X {}
+ ///
+ /// // Segfault at opt-level 0, SIGILL otherwise.
+ /// pub fn main() { <dyn X as X>::foo(&()); }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The compiler previously allowed these object-unsafe bounds, which was
+ /// incorrect. This is a [future-incompatible] lint to transition this to
+ /// a hard error in the future. See [issue #51443] for more details.
+ ///
+ /// [issue #51443]: https://github.com/rust-lang/rust/issues/51443
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub WHERE_CLAUSES_OBJECT_SAFETY,
+ Warn,
+ "checks the object safety of where clauses",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #51443 <https://github.com/rust-lang/rust/issues/51443>",
+ };
+}
+
+declare_lint! {
+ /// The `proc_macro_derive_resolution_fallback` lint detects proc macro
+ /// derives using inaccessible names from parent modules.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (proc-macro)
+ /// // foo.rs
+ /// #![crate_type = "proc-macro"]
+ ///
+ /// extern crate proc_macro;
+ ///
+ /// use proc_macro::*;
+ ///
+ /// #[proc_macro_derive(Foo)]
+ /// pub fn foo1(a: TokenStream) -> TokenStream {
+ /// drop(a);
+ /// "mod __bar { static mut BAR: Option<Something> = None; }".parse().unwrap()
+ /// }
+ /// ```
+ ///
+ /// ```rust,ignore (needs-dependency)
+ /// // bar.rs
+ /// #[macro_use]
+ /// extern crate foo;
+ ///
+ /// struct Something;
+ ///
+ /// #[derive(Foo)]
+ /// struct Another;
+ ///
+ /// fn main() {}
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: cannot find type `Something` in this scope
+ /// --> src/main.rs:8:10
+ /// |
+ /// 8 | #[derive(Foo)]
+ /// | ^^^ names from parent modules are not accessible without an explicit import
+ /// |
+ /// = note: `#[warn(proc_macro_derive_resolution_fallback)]` on by default
+ /// = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ /// = note: for more information, see issue #50504 <https://github.com/rust-lang/rust/issues/50504>
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// If a proc-macro generates a module, the compiler unintentionally
+ /// allowed items in that module to refer to items in the crate root
+ /// without importing them. This is a [future-incompatible] lint to
+ /// transition this to a hard error in the future. See [issue #50504] for
+ /// more details.
+ ///
+ /// [issue #50504]: https://github.com/rust-lang/rust/issues/50504
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub PROC_MACRO_DERIVE_RESOLUTION_FALLBACK,
+ Deny,
+ "detects proc macro derives using inaccessible names from parent modules",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #83583 <https://github.com/rust-lang/rust/issues/83583>",
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
+ };
+}
+
+declare_lint! {
+ /// The `macro_use_extern_crate` lint detects the use of the
+ /// [`macro_use` attribute].
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs extern crate)
+ /// #![deny(macro_use_extern_crate)]
+ ///
+ /// #[macro_use]
+ /// extern crate serde_json;
+ ///
+ /// fn main() {
+ /// let _ = json!{{}};
+ /// }
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// error: deprecated `#[macro_use]` attribute used to import macros should be replaced at use sites with a `use` item to import the macro instead
+ /// --> src/main.rs:3:1
+ /// |
+ /// 3 | #[macro_use]
+ /// | ^^^^^^^^^^^^
+ /// |
+ /// note: the lint level is defined here
+ /// --> src/main.rs:1:9
+ /// |
+ /// 1 | #![deny(macro_use_extern_crate)]
+ /// | ^^^^^^^^^^^^^^^^^^^^^^
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// The [`macro_use` attribute] on an [`extern crate`] item causes
+ /// macros in that external crate to be brought into the prelude of the
+ /// crate, making the macros in scope everywhere. As part of the efforts
+ /// to simplify handling of dependencies in the [2018 edition], the use of
+ /// `extern crate` is being phased out. To bring macros from extern crates
+ /// into scope, it is recommended to use a [`use` import].
+ ///
+ /// This lint is "allow" by default because this is a stylistic choice
+ /// that has not been settled, see [issue #52043] for more information.
+ ///
+ /// [`macro_use` attribute]: https://doc.rust-lang.org/reference/macros-by-example.html#the-macro_use-attribute
+ /// [`use` import]: https://doc.rust-lang.org/reference/items/use-declarations.html
+ /// [issue #52043]: https://github.com/rust-lang/rust/issues/52043
+ pub MACRO_USE_EXTERN_CRATE,
+ Allow,
+ "the `#[macro_use]` attribute is now deprecated in favor of using macros \
+ via the module system"
+}
+
+declare_lint! {
+ /// The `macro_expanded_macro_exports_accessed_by_absolute_paths` lint
+ /// detects macro-expanded [`macro_export`] macros from the current crate
+ /// that cannot be referred to by absolute paths.
+ ///
+ /// [`macro_export`]: https://doc.rust-lang.org/reference/macros-by-example.html#path-based-scope
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// macro_rules! define_exported {
+ /// () => {
+ /// #[macro_export]
+ /// macro_rules! exported {
+ /// () => {};
+ /// }
+ /// };
+ /// }
+ ///
+ /// define_exported!();
+ ///
+ /// fn main() {
+ /// crate::exported!();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The intent is that all macros marked with the `#[macro_export]`
+ /// attribute are made available in the root of the crate. However, when a
+ /// `macro_rules!` definition is generated by another macro, the macro
+ /// expansion is unable to uphold this rule. This is a
+ /// [future-incompatible] lint to transition this to a hard error in the
+ /// future. See [issue #53495] for more details.
+ ///
+ /// [issue #53495]: https://github.com/rust-lang/rust/issues/53495
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub MACRO_EXPANDED_MACRO_EXPORTS_ACCESSED_BY_ABSOLUTE_PATHS,
+ Deny,
+ "macro-expanded `macro_export` macros from the current crate \
+ cannot be referred to by absolute paths",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #52234 <https://github.com/rust-lang/rust/issues/52234>",
+ };
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `explicit_outlives_requirements` lint detects unnecessary
+ /// lifetime bounds that can be inferred.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// # #![allow(unused)]
+ /// #![deny(explicit_outlives_requirements)]
+ ///
+ /// struct SharedRef<'a, T>
+ /// where
+ /// T: 'a,
+ /// {
+ /// data: &'a T,
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// If a `struct` contains a reference, such as `&'a T`, the compiler
+ /// requires that `T` outlives the lifetime `'a`. This historically
+ /// required writing an explicit lifetime bound to indicate this
+ /// requirement. However, this can be overly explicit, causing clutter and
+ /// unnecessary complexity. The language was changed to automatically
+ /// infer the bound if it is not specified. Specifically, if the struct
+ /// contains a reference, directly or indirectly, to `T` with lifetime
+ /// `'x`, then it will infer that `T: 'x` is a requirement.
+ ///
+ /// This lint is "allow" by default because it can be noisy for existing
+ /// code that already had these requirements. This is a stylistic choice,
+ /// as it is still valid to explicitly state the bound. It also has some
+ /// false positives that can cause confusion.
+ ///
+ /// See [RFC 2093] for more details.
+ ///
+ /// [RFC 2093]: https://github.com/rust-lang/rfcs/blob/master/text/2093-infer-outlives.md
+ pub EXPLICIT_OUTLIVES_REQUIREMENTS,
+ Allow,
+ "outlives requirements can be inferred"
+}
+
+declare_lint! {
+ /// The `indirect_structural_match` lint detects a `const` in a pattern
+ /// that manually implements [`PartialEq`] and [`Eq`].
+ ///
+ /// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html
+ /// [`Eq`]: https://doc.rust-lang.org/std/cmp/trait.Eq.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(indirect_structural_match)]
+ ///
+ /// struct NoDerive(i32);
+ /// impl PartialEq for NoDerive { fn eq(&self, _: &Self) -> bool { false } }
+ /// impl Eq for NoDerive { }
+ /// #[derive(PartialEq, Eq)]
+ /// struct WrapParam<T>(T);
+ /// const WRAP_INDIRECT_PARAM: & &WrapParam<NoDerive> = & &WrapParam(NoDerive(0));
+ /// fn main() {
+ /// match WRAP_INDIRECT_PARAM {
+ /// WRAP_INDIRECT_PARAM => { }
+ /// _ => { }
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The compiler unintentionally accepted this form in the past. This is a
+ /// [future-incompatible] lint to transition this to a hard error in the
+ /// future. See [issue #62411] for a complete description of the problem,
+ /// and some possible solutions.
+ ///
+ /// [issue #62411]: https://github.com/rust-lang/rust/issues/62411
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub INDIRECT_STRUCTURAL_MATCH,
+ Warn,
+ "constant used in pattern contains value of non-structural-match type in a field or a variant",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #62411 <https://github.com/rust-lang/rust/issues/62411>",
+ };
+}
+
+declare_lint! {
+ /// The `deprecated_in_future` lint is internal to rustc and should not be
+ /// used by user code.
+ ///
+ /// This lint is only enabled in the standard library. It works with the
+ /// use of `#[deprecated]` with a `since` field of a version in the future.
+ /// This allows something to be marked as deprecated in a future version,
+ /// and then this lint will ensure that the item is no longer used in the
+ /// standard library. See the [stability documentation] for more details.
+ ///
+ /// [stability documentation]: https://rustc-dev-guide.rust-lang.org/stability.html#deprecated
+ pub DEPRECATED_IN_FUTURE,
+ Allow,
+ "detects use of items that will be deprecated in a future version",
+ report_in_external_macro
+}
+
+declare_lint! {
+ /// The `pointer_structural_match` lint detects pointers used in patterns whose behaviour
+ /// cannot be relied upon across compiler versions and optimization levels.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(pointer_structural_match)]
+ /// fn foo(a: usize, b: usize) -> usize { a + b }
+ /// const FOO: fn(usize, usize) -> usize = foo;
+ /// fn main() {
+ /// match FOO {
+ /// FOO => {},
+ /// _ => {},
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previous versions of Rust allowed function pointers and wide raw pointers in patterns.
+ /// While these work in many cases as expected by users, it is possible that due to
+ /// optimizations pointers are "not equal to themselves" or pointers to different functions
+ /// compare as equal during runtime. This is because LLVM optimizations can deduplicate
+ /// functions if their bodies are the same, thus also making pointers to these functions point
+ /// to the same location. Additionally functions may get duplicated if they are instantiated
+ /// in different crates and not deduplicated again via LTO.
+ pub POINTER_STRUCTURAL_MATCH,
+ Allow,
+ "pointers are not structural-match",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #62411 <https://github.com/rust-lang/rust/issues/70861>",
+ };
+}
+
+declare_lint! {
+ /// The `nontrivial_structural_match` lint detects constants that are used in patterns,
+ /// whose type is not structural-match and whose initializer body actually uses values
+ /// that are not structural-match. So `Option<NotStructuralMatch>` is ok if the constant
+ /// is just `None`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(nontrivial_structural_match)]
+ ///
+ /// #[derive(Copy, Clone, Debug)]
+ /// struct NoDerive(u32);
+ /// impl PartialEq for NoDerive { fn eq(&self, _: &Self) -> bool { false } }
+ /// impl Eq for NoDerive { }
+ /// fn main() {
+ /// const INDEX: Option<NoDerive> = [None, Some(NoDerive(10))][0];
+ /// match None { Some(_) => panic!("whoops"), INDEX => dbg!(INDEX), };
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previous versions of Rust accepted constants in patterns, even if those constants' types
+ /// did not have `PartialEq` derived. Thus the compiler falls back to runtime execution of
+ /// `PartialEq`, which can report that two constants are not equal even if they are
+ /// bit-equivalent.
+ pub NONTRIVIAL_STRUCTURAL_MATCH,
+ Warn,
+ "constant used in pattern of non-structural-match type and the constant's initializer \
+ expression contains values of non-structural-match types",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #73448 <https://github.com/rust-lang/rust/issues/73448>",
+ };
+}
+
+declare_lint! {
+ /// The `ambiguous_associated_items` lint detects ambiguity between
+ /// [associated items] and [enum variants].
+ ///
+ /// [associated items]: https://doc.rust-lang.org/reference/items/associated-items.html
+ /// [enum variants]: https://doc.rust-lang.org/reference/items/enumerations.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// enum E {
+ /// V
+ /// }
+ ///
+ /// trait Tr {
+ /// type V;
+ /// fn foo() -> Self::V;
+ /// }
+ ///
+ /// impl Tr for E {
+ /// type V = u8;
+ /// // `Self::V` is ambiguous because it may refer to the associated type or
+ /// // the enum variant.
+ /// fn foo() -> Self::V { 0 }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previous versions of Rust did not allow accessing enum variants
+ /// through [type aliases]. When this ability was added (see [RFC 2338]), this
+ /// introduced some situations where it can be ambiguous what a type
+ /// was referring to.
+ ///
+ /// To fix this ambiguity, you should use a [qualified path] to explicitly
+ /// state which type to use. For example, in the above example the
+ /// function can be written as `fn f() -> <Self as Tr>::V { 0 }` to
+ /// specifically refer to the associated type.
+ ///
+ /// This is a [future-incompatible] lint to transition this to a hard
+ /// error in the future. See [issue #57644] for more details.
+ ///
+ /// [issue #57644]: https://github.com/rust-lang/rust/issues/57644
+ /// [type aliases]: https://doc.rust-lang.org/reference/items/type-aliases.html#type-aliases
+ /// [RFC 2338]: https://github.com/rust-lang/rfcs/blob/master/text/2338-type-alias-enum-variants.md
+ /// [qualified path]: https://doc.rust-lang.org/reference/paths.html#qualified-paths
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub AMBIGUOUS_ASSOCIATED_ITEMS,
+ Deny,
+ "ambiguous associated items",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #57644 <https://github.com/rust-lang/rust/issues/57644>",
+ };
+}
+
+declare_lint! {
+ /// The `soft_unstable` lint detects unstable features that were
+ /// unintentionally allowed on stable.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #[cfg(test)]
+ /// extern crate test;
+ ///
+ /// #[bench]
+ /// fn name(b: &mut test::Bencher) {
+ /// b.iter(|| 123)
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The [`bench` attribute] was accidentally allowed to be specified on
+ /// the [stable release channel]. Turning this to a hard error would have
+ /// broken some projects. This lint allows those projects to continue to
+ /// build correctly when [`--cap-lints`] is used, but otherwise signal an
+ /// error that `#[bench]` should not be used on the stable channel. This
+ /// is a [future-incompatible] lint to transition this to a hard error in
+ /// the future. See [issue #64266] for more details.
+ ///
+ /// [issue #64266]: https://github.com/rust-lang/rust/issues/64266
+ /// [`bench` attribute]: https://doc.rust-lang.org/nightly/unstable-book/library-features/test.html
+ /// [stable release channel]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html
+ /// [`--cap-lints`]: https://doc.rust-lang.org/rustc/lints/levels.html#capping-lints
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub SOFT_UNSTABLE,
+ Deny,
+ "a feature gate that doesn't break dependent crates",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #64266 <https://github.com/rust-lang/rust/issues/64266>",
+ };
+}
+
+declare_lint! {
+ /// The `inline_no_sanitize` lint detects incompatible use of
+ /// [`#[inline(always)]`][inline] and [`#[no_sanitize(...)]`][no_sanitize].
+ ///
+ /// [inline]: https://doc.rust-lang.org/reference/attributes/codegen.html#the-inline-attribute
+ /// [no_sanitize]: https://doc.rust-lang.org/nightly/unstable-book/language-features/no-sanitize.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(no_sanitize)]
+ ///
+ /// #[inline(always)]
+ /// #[no_sanitize(address)]
+ /// fn x() {}
+ ///
+ /// fn main() {
+ /// x()
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The use of the [`#[inline(always)]`][inline] attribute prevents the
+ /// the [`#[no_sanitize(...)]`][no_sanitize] attribute from working.
+ /// Consider temporarily removing `inline` attribute.
+ pub INLINE_NO_SANITIZE,
+ Warn,
+ "detects incompatible use of `#[inline(always)]` and `#[no_sanitize(...)]`",
+}
+
+declare_lint! {
+ /// The `asm_sub_register` lint detects using only a subset of a register
+ /// for inline asm inputs.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (fails on non-x86_64)
+ /// #[cfg(target_arch="x86_64")]
+ /// use std::arch::asm;
+ ///
+ /// fn main() {
+ /// #[cfg(target_arch="x86_64")]
+ /// unsafe {
+ /// asm!("mov {0}, {0}", in(reg) 0i16);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: formatting may not be suitable for sub-register argument
+ /// --> src/main.rs:7:19
+ /// |
+ /// 7 | asm!("mov {0}, {0}", in(reg) 0i16);
+ /// | ^^^ ^^^ ---- for this argument
+ /// |
+ /// = note: `#[warn(asm_sub_register)]` on by default
+ /// = help: use the `x` modifier to have the register formatted as `ax`
+ /// = help: or use the `r` modifier to keep the default formatting of `rax`
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// Registers on some architectures can use different names to refer to a
+ /// subset of the register. By default, the compiler will use the name for
+ /// the full register size. To explicitly use a subset of the register,
+ /// you can override the default by using a modifier on the template
+ /// string operand to specify when subregister to use. This lint is issued
+ /// if you pass in a value with a smaller data type than the default
+ /// register size, to alert you of possibly using the incorrect width. To
+ /// fix this, add the suggested modifier to the template, or cast the
+ /// value to the correct size.
+ ///
+ /// See [register template modifiers] in the reference for more details.
+ ///
+ /// [register template modifiers]: https://doc.rust-lang.org/nightly/reference/inline-assembly.html#template-modifiers
+ pub ASM_SUB_REGISTER,
+ Warn,
+ "using only a subset of a register for inline asm inputs",
+}
+
+declare_lint! {
+ /// The `bad_asm_style` lint detects the use of the `.intel_syntax` and
+ /// `.att_syntax` directives.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (fails on non-x86_64)
+ /// #[cfg(target_arch="x86_64")]
+ /// use std::arch::asm;
+ ///
+ /// fn main() {
+ /// #[cfg(target_arch="x86_64")]
+ /// unsafe {
+ /// asm!(
+ /// ".att_syntax",
+ /// "movq %{0}, %{0}", in(reg) 0usize
+ /// );
+ /// }
+ /// }
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: avoid using `.att_syntax`, prefer using `options(att_syntax)` instead
+ /// --> src/main.rs:8:14
+ /// |
+ /// 8 | ".att_syntax",
+ /// | ^^^^^^^^^^^
+ /// |
+ /// = note: `#[warn(bad_asm_style)]` on by default
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// On x86, `asm!` uses the intel assembly syntax by default. While this
+ /// can be switched using assembler directives like `.att_syntax`, using the
+ /// `att_syntax` option is recommended instead because it will also properly
+ /// prefix register placeholders with `%` as required by AT&T syntax.
+ pub BAD_ASM_STYLE,
+ Warn,
+ "incorrect use of inline assembly",
+}
+
+declare_lint! {
+ /// The `unsafe_op_in_unsafe_fn` lint detects unsafe operations in unsafe
+ /// functions without an explicit unsafe block.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(unsafe_op_in_unsafe_fn)]
+ ///
+ /// unsafe fn foo() {}
+ ///
+ /// unsafe fn bar() {
+ /// foo();
+ /// }
+ ///
+ /// fn main() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Currently, an [`unsafe fn`] allows any [unsafe] operation within its
+ /// body. However, this can increase the surface area of code that needs
+ /// to be scrutinized for proper behavior. The [`unsafe` block] provides a
+ /// convenient way to make it clear exactly which parts of the code are
+ /// performing unsafe operations. In the future, it is desired to change
+ /// it so that unsafe operations cannot be performed in an `unsafe fn`
+ /// without an `unsafe` block.
+ ///
+ /// The fix to this is to wrap the unsafe code in an `unsafe` block.
+ ///
+ /// This lint is "allow" by default since this will affect a large amount
+ /// of existing code, and the exact plan for increasing the severity is
+ /// still being considered. See [RFC #2585] and [issue #71668] for more
+ /// details.
+ ///
+ /// [`unsafe fn`]: https://doc.rust-lang.org/reference/unsafe-functions.html
+ /// [`unsafe` block]: https://doc.rust-lang.org/reference/expressions/block-expr.html#unsafe-blocks
+ /// [unsafe]: https://doc.rust-lang.org/reference/unsafety.html
+ /// [RFC #2585]: https://github.com/rust-lang/rfcs/blob/master/text/2585-unsafe-block-in-unsafe-fn.md
+ /// [issue #71668]: https://github.com/rust-lang/rust/issues/71668
+ pub UNSAFE_OP_IN_UNSAFE_FN,
+ Allow,
+ "unsafe operations in unsafe functions without an explicit unsafe block are deprecated",
+}
+
+declare_lint! {
+ /// The `cenum_impl_drop_cast` lint detects an `as` cast of a field-less
+ /// `enum` that implements [`Drop`].
+ ///
+ /// [`Drop`]: https://doc.rust-lang.org/std/ops/trait.Drop.html
+ ///
+ /// ### Example
+ ///
+ /// ```compile_fail
+ /// # #![allow(unused)]
+ /// enum E {
+ /// A,
+ /// }
+ ///
+ /// impl Drop for E {
+ /// fn drop(&mut self) {
+ /// println!("Drop");
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// let e = E::A;
+ /// let i = e as u32;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Casting a field-less `enum` that does not implement [`Copy`] to an
+ /// integer moves the value without calling `drop`. This can result in
+ /// surprising behavior if it was expected that `drop` should be called.
+ /// Calling `drop` automatically would be inconsistent with other move
+ /// operations. Since neither behavior is clear or consistent, it was
+ /// decided that a cast of this nature will no longer be allowed.
+ ///
+ /// This is a [future-incompatible] lint to transition this to a hard error
+ /// in the future. See [issue #73333] for more details.
+ ///
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ /// [issue #73333]: https://github.com/rust-lang/rust/issues/73333
+ /// [`Copy`]: https://doc.rust-lang.org/std/marker/trait.Copy.html
+ pub CENUM_IMPL_DROP_CAST,
+ Deny,
+ "a C-like enum implementing Drop is cast",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #73333 <https://github.com/rust-lang/rust/issues/73333>",
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
+ };
+}
+
+declare_lint! {
+ /// The `fuzzy_provenance_casts` lint detects an `as` cast between an integer
+ /// and a pointer.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(strict_provenance)]
+ /// #![warn(fuzzy_provenance_casts)]
+ ///
+ /// fn main() {
+ /// let _dangling = 16_usize as *const u8;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint is part of the strict provenance effort, see [issue #95228].
+ /// Casting an integer to a pointer is considered bad style, as a pointer
+ /// contains, besides the *address* also a *provenance*, indicating what
+ /// memory the pointer is allowed to read/write. Casting an integer, which
+ /// doesn't have provenance, to a pointer requires the compiler to assign
+ /// (guess) provenance. The compiler assigns "all exposed valid" (see the
+ /// docs of [`ptr::from_exposed_addr`] for more information about this
+ /// "exposing"). This penalizes the optimiser and is not well suited for
+ /// dynamic analysis/dynamic program verification (e.g. Miri or CHERI
+ /// platforms).
+ ///
+ /// It is much better to use [`ptr::with_addr`] instead to specify the
+ /// provenance you want. If using this function is not possible because the
+ /// code relies on exposed provenance then there is as an escape hatch
+ /// [`ptr::from_exposed_addr`].
+ ///
+ /// [issue #95228]: https://github.com/rust-lang/rust/issues/95228
+ /// [`ptr::with_addr`]: https://doc.rust-lang.org/core/ptr/fn.with_addr
+ /// [`ptr::from_exposed_addr`]: https://doc.rust-lang.org/core/ptr/fn.from_exposed_addr
+ pub FUZZY_PROVENANCE_CASTS,
+ Allow,
+ "a fuzzy integer to pointer cast is used",
+ @feature_gate = sym::strict_provenance;
+}
+
+declare_lint! {
+ /// The `lossy_provenance_casts` lint detects an `as` cast between a pointer
+ /// and an integer.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(strict_provenance)]
+ /// #![warn(lossy_provenance_casts)]
+ ///
+ /// fn main() {
+ /// let x: u8 = 37;
+ /// let _addr: usize = &x as *const u8 as usize;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// This lint is part of the strict provenance effort, see [issue #95228].
+ /// Casting a pointer to an integer is a lossy operation, because beyond
+ /// just an *address* a pointer may be associated with a particular
+ /// *provenance*. This information is used by the optimiser and for dynamic
+ /// analysis/dynamic program verification (e.g. Miri or CHERI platforms).
+ ///
+ /// Since this cast is lossy, it is considered good style to use the
+ /// [`ptr::addr`] method instead, which has a similar effect, but doesn't
+ /// "expose" the pointer provenance. This improves optimisation potential.
+ /// See the docs of [`ptr::addr`] and [`ptr::expose_addr`] for more information
+ /// about exposing pointer provenance.
+ ///
+ /// If your code can't comply with strict provenance and needs to expose
+ /// the provenance, then there is [`ptr::expose_addr`] as an escape hatch,
+ /// which preserves the behaviour of `as usize` casts while being explicit
+ /// about the semantics.
+ ///
+ /// [issue #95228]: https://github.com/rust-lang/rust/issues/95228
+ /// [`ptr::addr`]: https://doc.rust-lang.org/core/ptr/fn.addr
+ /// [`ptr::expose_addr`]: https://doc.rust-lang.org/core/ptr/fn.expose_addr
+ pub LOSSY_PROVENANCE_CASTS,
+ Allow,
+ "a lossy pointer to integer cast is used",
+ @feature_gate = sym::strict_provenance;
+}
+
+declare_lint! {
+ /// The `const_evaluatable_unchecked` lint detects a generic constant used
+ /// in a type.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// const fn foo<T>() -> usize {
+ /// if std::mem::size_of::<*mut T>() < 8 { // size of *mut T does not depend on T
+ /// 4
+ /// } else {
+ /// 8
+ /// }
+ /// }
+ ///
+ /// fn test<T>() {
+ /// let _ = [0; foo::<T>()];
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In the 1.43 release, some uses of generic parameters in array repeat
+ /// expressions were accidentally allowed. This is a [future-incompatible]
+ /// lint to transition this to a hard error in the future. See [issue
+ /// #76200] for a more detailed description and possible fixes.
+ ///
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ /// [issue #76200]: https://github.com/rust-lang/rust/issues/76200
+ pub CONST_EVALUATABLE_UNCHECKED,
+ Warn,
+ "detects a generic constant is used in a type without a emitting a warning",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #76200 <https://github.com/rust-lang/rust/issues/76200>",
+ };
+}
+
+declare_lint! {
+ /// The `function_item_references` lint detects function references that are
+ /// formatted with [`fmt::Pointer`] or transmuted.
+ ///
+ /// [`fmt::Pointer`]: https://doc.rust-lang.org/std/fmt/trait.Pointer.html
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// fn foo() { }
+ ///
+ /// fn main() {
+ /// println!("{:p}", &foo);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Taking a reference to a function may be mistaken as a way to obtain a
+ /// pointer to that function. This can give unexpected results when
+ /// formatting the reference as a pointer or transmuting it. This lint is
+ /// issued when function references are formatted as pointers, passed as
+ /// arguments bound by [`fmt::Pointer`] or transmuted.
+ pub FUNCTION_ITEM_REFERENCES,
+ Warn,
+ "suggest casting to a function pointer when attempting to take references to function items",
+}
+
+declare_lint! {
+ /// The `uninhabited_static` lint detects uninhabited statics.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// enum Void {}
+ /// extern {
+ /// static EXTERN: Void;
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Statics with an uninhabited type can never be initialized, so they are impossible to define.
+ /// However, this can be side-stepped with an `extern static`, leading to problems later in the
+ /// compiler which assumes that there are no initialized uninhabited places (such as locals or
+ /// statics). This was accidentally allowed, but is being phased out.
+ pub UNINHABITED_STATIC,
+ Warn,
+ "uninhabited static",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #74840 <https://github.com/rust-lang/rust/issues/74840>",
+ };
+}
+
+declare_lint! {
+ /// The `useless_deprecated` lint detects deprecation attributes with no effect.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// struct X;
+ ///
+ /// #[deprecated = "message"]
+ /// impl Default for X {
+ /// fn default() -> Self {
+ /// X
+ /// }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Deprecation attributes have no effect on trait implementations.
+ pub USELESS_DEPRECATED,
+ Deny,
+ "detects deprecation attributes with no effect",
+}
+
+declare_lint! {
+ /// The `undefined_naked_function_abi` lint detects naked function definitions that
+ /// either do not specify an ABI or specify the Rust ABI.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(naked_functions)]
+ ///
+ /// use std::arch::asm;
+ ///
+ /// #[naked]
+ /// pub fn default_abi() -> u32 {
+ /// unsafe { asm!("", options(noreturn)); }
+ /// }
+ ///
+ /// #[naked]
+ /// pub extern "Rust" fn rust_abi() -> u32 {
+ /// unsafe { asm!("", options(noreturn)); }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The Rust ABI is currently undefined. Therefore, naked functions should
+ /// specify a non-Rust ABI.
+ pub UNDEFINED_NAKED_FUNCTION_ABI,
+ Warn,
+ "undefined naked function ABI"
+}
+
+declare_lint! {
+ /// The `ineffective_unstable_trait_impl` lint detects `#[unstable]` attributes which are not used.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![feature(staged_api)]
+ ///
+ /// #[derive(Clone)]
+ /// #[stable(feature = "x", since = "1")]
+ /// struct S {}
+ ///
+ /// #[unstable(feature = "y", issue = "none")]
+ /// impl Copy for S {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// `staged_api` does not currently support using a stability attribute on `impl` blocks.
+ /// `impl`s are always stable if both the type and trait are stable, and always unstable otherwise.
+ pub INEFFECTIVE_UNSTABLE_TRAIT_IMPL,
+ Deny,
+ "detects `#[unstable]` on stable trait implementations for stable types"
+}
+
+declare_lint! {
+ /// The `semicolon_in_expressions_from_macros` lint detects trailing semicolons
+ /// in macro bodies when the macro is invoked in expression position.
+ /// This was previous accepted, but is being phased out.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(semicolon_in_expressions_from_macros)]
+ /// macro_rules! foo {
+ /// () => { true; }
+ /// }
+ ///
+ /// fn main() {
+ /// let val = match true {
+ /// true => false,
+ /// _ => foo!()
+ /// };
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previous, Rust ignored trailing semicolon in a macro
+ /// body when a macro was invoked in expression position.
+ /// However, this makes the treatment of semicolons in the language
+ /// inconsistent, and could lead to unexpected runtime behavior
+ /// in some circumstances (e.g. if the macro author expects
+ /// a value to be dropped).
+ ///
+ /// This is a [future-incompatible] lint to transition this
+ /// to a hard error in the future. See [issue #79813] for more details.
+ ///
+ /// [issue #79813]: https://github.com/rust-lang/rust/issues/79813
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
+ Warn,
+ "trailing semicolon in macro body used as expression",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #79813 <https://github.com/rust-lang/rust/issues/79813>",
+ };
+}
+
+declare_lint! {
+ /// The `legacy_derive_helpers` lint detects derive helper attributes
+ /// that are used before they are introduced.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs extern crate)
+ /// #[serde(rename_all = "camelCase")]
+ /// #[derive(Deserialize)]
+ /// struct S { /* fields */ }
+ /// ```
+ ///
+ /// produces:
+ ///
+ /// ```text
+ /// warning: derive helper attribute is used before it is introduced
+ /// --> $DIR/legacy-derive-helpers.rs:1:3
+ /// |
+ /// 1 | #[serde(rename_all = "camelCase")]
+ /// | ^^^^^
+ /// ...
+ /// 2 | #[derive(Deserialize)]
+ /// | ----------- the attribute is introduced here
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// Attributes like this work for historical reasons, but attribute expansion works in
+ /// left-to-right order in general, so, to resolve `#[serde]`, compiler has to try to "look
+ /// into the future" at not yet expanded part of the item , but such attempts are not always
+ /// reliable.
+ ///
+ /// To fix the warning place the helper attribute after its corresponding derive.
+ /// ```rust,ignore (needs extern crate)
+ /// #[derive(Deserialize)]
+ /// #[serde(rename_all = "camelCase")]
+ /// struct S { /* fields */ }
+ /// ```
+ pub LEGACY_DERIVE_HELPERS,
+ Warn,
+ "detects derive helper attributes that are used before they are introduced",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #79202 <https://github.com/rust-lang/rust/issues/79202>",
+ };
+}
+
+declare_lint! {
+ /// The `large_assignments` lint detects when objects of large
+ /// types are being moved around.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (can crash on some platforms)
+ /// let x = [0; 50000];
+ /// let y = x;
+ /// ```
+ ///
+ /// produces:
+ ///
+ /// ```text
+ /// warning: moving a large value
+ /// --> $DIR/move-large.rs:1:3
+ /// let y = x;
+ /// - Copied large value here
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// When using a large type in a plain assignment or in a function
+ /// argument, idiomatic code can be inefficient.
+ /// Ideally appropriate optimizations would resolve this, but such
+ /// optimizations are only done in a best-effort manner.
+ /// This lint will trigger on all sites of large moves and thus allow the
+ /// user to resolve them in code.
+ pub LARGE_ASSIGNMENTS,
+ Warn,
+ "detects large moves or copies",
+}
+
+declare_lint! {
+ /// The `deprecated_cfg_attr_crate_type_name` lint detects uses of the
+ /// `#![cfg_attr(..., crate_type = "...")]` and
+ /// `#![cfg_attr(..., crate_name = "...")]` attributes to conditionally
+ /// specify the crate type and name in the source code.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![cfg_attr(debug_assertions, crate_type = "lib")]
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ ///
+ /// ### Explanation
+ ///
+ /// The `#![crate_type]` and `#![crate_name]` attributes require a hack in
+ /// the compiler to be able to change the used crate type and crate name
+ /// after macros have been expanded. Neither attribute works in combination
+ /// with Cargo as it explicitly passes `--crate-type` and `--crate-name` on
+ /// the commandline. These values must match the value used in the source
+ /// code to prevent an error.
+ ///
+ /// To fix the warning use `--crate-type` on the commandline when running
+ /// rustc instead of `#![cfg_attr(..., crate_type = "...")]` and
+ /// `--crate-name` instead of `#![cfg_attr(..., crate_name = "...")]`.
+ pub DEPRECATED_CFG_ATTR_CRATE_TYPE_NAME,
+ Warn,
+ "detects usage of `#![cfg_attr(..., crate_type/crate_name = \"...\")]`",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #91632 <https://github.com/rust-lang/rust/issues/91632>",
+ };
+}
+
+declare_lint! {
+ /// The `unexpected_cfgs` lint detects unexpected conditional compilation conditions.
+ ///
+ /// ### Example
+ ///
+ /// ```text
+ /// rustc --check-cfg 'names()'
+ /// ```
+ ///
+ /// ```rust,ignore (needs command line option)
+ /// #[cfg(widnows)]
+ /// fn foo() {}
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: unknown condition name used
+ /// --> lint_example.rs:1:7
+ /// |
+ /// 1 | #[cfg(widnows)]
+ /// | ^^^^^^^
+ /// |
+ /// = note: `#[warn(unexpected_cfgs)]` on by default
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// This lint is only active when a `--check-cfg='names(...)'` option has been passed
+ /// to the compiler and triggers whenever an unknown condition name or value is used.
+ /// The known condition include names or values passed in `--check-cfg`, `--cfg`, and some
+ /// well-knows names and values built into the compiler.
+ pub UNEXPECTED_CFGS,
+ Warn,
+ "detects unexpected names and values in `#[cfg]` conditions",
+}
+
+declare_lint! {
+ /// The `repr_transparent_external_private_fields` lint
+ /// detects types marked `#[repr(transparent)]` that (transitively)
+ /// contain an external ZST type marked `#[non_exhaustive]` or containing
+ /// private fields
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs external crate)
+ /// #![deny(repr_transparent_external_private_fields)]
+ /// use foo::NonExhaustiveZst;
+ ///
+ /// #[repr(transparent)]
+ /// struct Bar(u32, ([u32; 0], NonExhaustiveZst));
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// error: zero-sized fields in repr(transparent) cannot contain external non-exhaustive types
+ /// --> src/main.rs:5:28
+ /// |
+ /// 5 | struct Bar(u32, ([u32; 0], NonExhaustiveZst));
+ /// | ^^^^^^^^^^^^^^^^
+ /// |
+ /// note: the lint level is defined here
+ /// --> src/main.rs:1:9
+ /// |
+ /// 1 | #![deny(repr_transparent_external_private_fields)]
+ /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ /// = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ /// = note: for more information, see issue #78586 <https://github.com/rust-lang/rust/issues/78586>
+ /// = note: this struct contains `NonExhaustiveZst`, which is marked with `#[non_exhaustive]`, and makes it not a breaking change to become non-zero-sized in the future.
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// Previous, Rust accepted fields that contain external private zero-sized types,
+ /// even though it should not be a breaking change to add a non-zero-sized field to
+ /// that private type.
+ ///
+ /// This is a [future-incompatible] lint to transition this
+ /// to a hard error in the future. See [issue #78586] for more details.
+ ///
+ /// [issue #78586]: https://github.com/rust-lang/rust/issues/78586
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
+ Warn,
+ "tranparent type contains an external ZST that is marked #[non_exhaustive] or contains private fields",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #78586 <https://github.com/rust-lang/rust/issues/78586>",
+ };
+}
+
+declare_lint_pass! {
+ /// Does nothing as a lint pass, but registers some `Lint`s
+ /// that are used by other parts of the compiler.
+ HardwiredLints => [
+ FORBIDDEN_LINT_GROUPS,
+ ILLEGAL_FLOATING_POINT_LITERAL_PATTERN,
+ ARITHMETIC_OVERFLOW,
+ UNCONDITIONAL_PANIC,
+ UNUSED_IMPORTS,
+ UNUSED_EXTERN_CRATES,
+ UNUSED_CRATE_DEPENDENCIES,
+ UNUSED_QUALIFICATIONS,
+ UNKNOWN_LINTS,
+ UNFULFILLED_LINT_EXPECTATIONS,
+ UNUSED_VARIABLES,
+ UNUSED_ASSIGNMENTS,
+ DEAD_CODE,
+ UNREACHABLE_CODE,
+ UNREACHABLE_PATTERNS,
+ OVERLAPPING_RANGE_ENDPOINTS,
+ BINDINGS_WITH_VARIANT_NAME,
+ UNUSED_MACROS,
+ UNUSED_MACRO_RULES,
+ WARNINGS,
+ UNUSED_FEATURES,
+ STABLE_FEATURES,
+ UNKNOWN_CRATE_TYPES,
+ TRIVIAL_CASTS,
+ TRIVIAL_NUMERIC_CASTS,
+ PRIVATE_IN_PUBLIC,
+ EXPORTED_PRIVATE_DEPENDENCIES,
+ PUB_USE_OF_PRIVATE_EXTERN_CRATE,
+ INVALID_TYPE_PARAM_DEFAULT,
+ CONST_ERR,
+ RENAMED_AND_REMOVED_LINTS,
+ UNALIGNED_REFERENCES,
+ CONST_ITEM_MUTATION,
+ PATTERNS_IN_FNS_WITHOUT_BODY,
+ MISSING_FRAGMENT_SPECIFIER,
+ LATE_BOUND_LIFETIME_ARGUMENTS,
+ ORDER_DEPENDENT_TRAIT_OBJECTS,
+ COHERENCE_LEAK_CHECK,
+ DEPRECATED,
+ UNUSED_UNSAFE,
+ UNUSED_MUT,
+ UNCONDITIONAL_RECURSION,
+ SINGLE_USE_LIFETIMES,
+ UNUSED_LIFETIMES,
+ UNUSED_LABELS,
+ TYVAR_BEHIND_RAW_POINTER,
+ ELIDED_LIFETIMES_IN_PATHS,
+ BARE_TRAIT_OBJECTS,
+ ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE,
+ UNSTABLE_NAME_COLLISIONS,
+ IRREFUTABLE_LET_PATTERNS,
+ WHERE_CLAUSES_OBJECT_SAFETY,
+ PROC_MACRO_DERIVE_RESOLUTION_FALLBACK,
+ MACRO_USE_EXTERN_CRATE,
+ MACRO_EXPANDED_MACRO_EXPORTS_ACCESSED_BY_ABSOLUTE_PATHS,
+ ILL_FORMED_ATTRIBUTE_INPUT,
+ CONFLICTING_REPR_HINTS,
+ META_VARIABLE_MISUSE,
+ DEPRECATED_IN_FUTURE,
+ AMBIGUOUS_ASSOCIATED_ITEMS,
+ INDIRECT_STRUCTURAL_MATCH,
+ POINTER_STRUCTURAL_MATCH,
+ NONTRIVIAL_STRUCTURAL_MATCH,
+ SOFT_UNSTABLE,
+ INLINE_NO_SANITIZE,
+ BAD_ASM_STYLE,
+ ASM_SUB_REGISTER,
+ UNSAFE_OP_IN_UNSAFE_FN,
+ INCOMPLETE_INCLUDE,
+ CENUM_IMPL_DROP_CAST,
+ FUZZY_PROVENANCE_CASTS,
+ LOSSY_PROVENANCE_CASTS,
+ CONST_EVALUATABLE_UNCHECKED,
+ INEFFECTIVE_UNSTABLE_TRAIT_IMPL,
+ MUST_NOT_SUSPEND,
+ UNINHABITED_STATIC,
+ FUNCTION_ITEM_REFERENCES,
+ USELESS_DEPRECATED,
+ MISSING_ABI,
+ INVALID_DOC_ATTRIBUTES,
+ SEMICOLON_IN_EXPRESSIONS_FROM_MACROS,
+ RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES,
+ LEGACY_DERIVE_HELPERS,
+ PROC_MACRO_BACK_COMPAT,
+ RUST_2021_INCOMPATIBLE_OR_PATTERNS,
+ LARGE_ASSIGNMENTS,
+ RUST_2021_PRELUDE_COLLISIONS,
+ RUST_2021_PREFIXES_INCOMPATIBLE_SYNTAX,
+ UNSUPPORTED_CALLING_CONVENTIONS,
+ BREAK_WITH_LABEL_AND_LOOP,
+ UNUSED_ATTRIBUTES,
+ UNUSED_TUPLE_STRUCT_FIELDS,
+ NON_EXHAUSTIVE_OMITTED_PATTERNS,
+ TEXT_DIRECTION_CODEPOINT_IN_COMMENT,
+ DEREF_INTO_DYN_SUPERTRAIT,
+ DEPRECATED_CFG_ATTR_CRATE_TYPE_NAME,
+ DUPLICATE_MACRO_ATTRIBUTES,
+ SUSPICIOUS_AUTO_TRAIT_IMPLS,
+ UNEXPECTED_CFGS,
+ DEPRECATED_WHERE_CLAUSE_LOCATION,
+ TEST_UNSTABLE_LINT,
+ FFI_UNWIND_CALLS,
+ REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
+ NAMED_ARGUMENTS_USED_POSITIONALLY,
+ ]
+}
+
+declare_lint! {
+ /// The `unused_doc_comments` lint detects doc comments that aren't used
+ /// by `rustdoc`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// /// docs for x
+ /// let x = 12;
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// `rustdoc` does not use doc comments in all positions, and so the doc
+ /// comment will be ignored. Try changing it to a normal comment with `//`
+ /// to avoid the warning.
+ pub UNUSED_DOC_COMMENTS,
+ Warn,
+ "detects doc comments that aren't used by rustdoc"
+}
+
+declare_lint! {
+ /// The `rust_2021_incompatible_closure_captures` lint detects variables that aren't completely
+ /// captured in Rust 2021, such that the `Drop` order of their fields may differ between
+ /// Rust 2018 and 2021.
+ ///
+ /// It can also detect when a variable implements a trait like `Send`, but one of its fields does not,
+ /// and the field is captured by a closure and used with the assumption that said field implements
+ /// the same trait as the root variable.
+ ///
+ /// ### Example of drop reorder
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(rust_2021_incompatible_closure_captures)]
+ /// # #![allow(unused)]
+ ///
+ /// struct FancyInteger(i32);
+ ///
+ /// impl Drop for FancyInteger {
+ /// fn drop(&mut self) {
+ /// println!("Just dropped {}", self.0);
+ /// }
+ /// }
+ ///
+ /// struct Point { x: FancyInteger, y: FancyInteger }
+ ///
+ /// fn main() {
+ /// let p = Point { x: FancyInteger(10), y: FancyInteger(20) };
+ ///
+ /// let c = || {
+ /// let x = p.x;
+ /// };
+ ///
+ /// c();
+ ///
+ /// // ... More code ...
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In the above example, `p.y` will be dropped at the end of `f` instead of
+ /// with `c` in Rust 2021.
+ ///
+ /// ### Example of auto-trait
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(rust_2021_incompatible_closure_captures)]
+ /// use std::thread;
+ ///
+ /// struct Pointer(*mut i32);
+ /// unsafe impl Send for Pointer {}
+ ///
+ /// fn main() {
+ /// let mut f = 10;
+ /// let fptr = Pointer(&mut f as *mut i32);
+ /// thread::spawn(move || unsafe {
+ /// *fptr.0 = 20;
+ /// });
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In the above example, only `fptr.0` is captured in Rust 2021.
+ /// The field is of type `*mut i32`, which doesn't implement `Send`,
+ /// making the code invalid as the field cannot be sent between threads safely.
+ pub RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES,
+ Allow,
+ "detects closures affected by Rust 2021 changes",
+ @future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::EditionSemanticsChange(Edition::Edition2021),
+ explain_reason: false,
+ };
+}
+
+declare_lint_pass!(UnusedDocComment => [UNUSED_DOC_COMMENTS]);
+
+declare_lint! {
+ /// The `missing_abi` lint detects cases where the ABI is omitted from
+ /// extern declarations.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(missing_abi)]
+ ///
+ /// extern fn foo() {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Historically, Rust implicitly selected C as the ABI for extern
+ /// declarations. We expect to add new ABIs, like `C-unwind`, in the future,
+ /// though this has not yet happened, and especially with their addition
+ /// seeing the ABI easily will make code review easier.
+ pub MISSING_ABI,
+ Allow,
+ "No declared ABI for extern declaration"
+}
+
+declare_lint! {
+ /// The `invalid_doc_attributes` lint detects when the `#[doc(...)]` is
+ /// misused.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(warnings)]
+ ///
+ /// pub mod submodule {
+ /// #![doc(test(no_crate_inject))]
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Previously, there were very like checks being performed on `#[doc(..)]`
+ /// unlike the other attributes. It'll now catch all the issues that it
+ /// silently ignored previously.
+ pub INVALID_DOC_ATTRIBUTES,
+ Warn,
+ "detects invalid `#[doc(...)]` attributes",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #82730 <https://github.com/rust-lang/rust/issues/82730>",
+ };
+}
+
+declare_lint! {
+ /// The `proc_macro_back_compat` lint detects uses of old versions of certain
+ /// proc-macro crates, which have hardcoded workarounds in the compiler.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs-dependency)
+ ///
+ /// use time_macros_impl::impl_macros;
+ /// struct Foo;
+ /// impl_macros!(Foo);
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: using an old version of `time-macros-impl`
+ /// ::: $DIR/group-compat-hack.rs:27:5
+ /// |
+ /// LL | impl_macros!(Foo);
+ /// | ------------------ in this macro invocation
+ /// |
+ /// = note: `#[warn(proc_macro_back_compat)]` on by default
+ /// = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release!
+ /// = note: for more information, see issue #83125 <https://github.com/rust-lang/rust/issues/83125>
+ /// = note: the `time-macros-impl` crate will stop compiling in futures version of Rust. Please update to the latest version of the `time` crate to avoid breakage
+ /// = note: this warning originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info)
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// Eventually, the backwards-compatibility hacks present in the compiler will be removed,
+ /// causing older versions of certain crates to stop compiling.
+ /// This is a [future-incompatible] lint to ease the transition to an error.
+ /// See [issue #83125] for more details.
+ ///
+ /// [issue #83125]: https://github.com/rust-lang/rust/issues/83125
+ /// [future-incompatible]: ../index.md#future-incompatible-lints
+ pub PROC_MACRO_BACK_COMPAT,
+ Deny,
+ "detects usage of old versions of certain proc-macro crates",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #83125 <https://github.com/rust-lang/rust/issues/83125>",
+ reason: FutureIncompatibilityReason::FutureReleaseErrorReportNow,
+ };
+}
+
+declare_lint! {
+ /// The `rust_2021_incompatible_or_patterns` lint detects usage of old versions of or-patterns.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(rust_2021_incompatible_or_patterns)]
+ ///
+ /// macro_rules! match_any {
+ /// ( $expr:expr , $( $( $pat:pat )|+ => $expr_arm:expr ),+ ) => {
+ /// match $expr {
+ /// $(
+ /// $( $pat => $expr_arm, )+
+ /// )+
+ /// }
+ /// };
+ /// }
+ ///
+ /// fn main() {
+ /// let result: Result<i64, i32> = Err(42);
+ /// let int: i64 = match_any!(result, Ok(i) | Err(i) => i.into());
+ /// assert_eq!(int, 42);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In Rust 2021, the `pat` matcher will match additional patterns, which include the `|` character.
+ pub RUST_2021_INCOMPATIBLE_OR_PATTERNS,
+ Allow,
+ "detects usage of old versions of or-patterns",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/or-patterns-macro-rules.html>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ };
+}
+
+declare_lint! {
+ /// The `rust_2021_prelude_collisions` lint detects the usage of trait methods which are ambiguous
+ /// with traits added to the prelude in future editions.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(rust_2021_prelude_collisions)]
+ ///
+ /// trait Foo {
+ /// fn try_into(self) -> Result<String, !>;
+ /// }
+ ///
+ /// impl Foo for &str {
+ /// fn try_into(self) -> Result<String, !> {
+ /// Ok(String::from(self))
+ /// }
+ /// }
+ ///
+ /// fn main() {
+ /// let x: String = "3".try_into().unwrap();
+ /// // ^^^^^^^^
+ /// // This call to try_into matches both Foo:try_into and TryInto::try_into as
+ /// // `TryInto` has been added to the Rust prelude in 2021 edition.
+ /// println!("{x}");
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In Rust 2021, one of the important introductions is the [prelude changes], which add
+ /// `TryFrom`, `TryInto`, and `FromIterator` into the standard library's prelude. Since this
+ /// results in an ambiguity as to which method/function to call when an existing `try_into`
+ /// method is called via dot-call syntax or a `try_from`/`from_iter` associated function
+ /// is called directly on a type.
+ ///
+ /// [prelude changes]: https://blog.rust-lang.org/inside-rust/2021/03/04/planning-rust-2021.html#prelude-changes
+ pub RUST_2021_PRELUDE_COLLISIONS,
+ Allow,
+ "detects the usage of trait methods which are ambiguous with traits added to the \
+ prelude in future editions",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/prelude.html>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ };
+}
+
+declare_lint! {
+ /// The `rust_2021_prefixes_incompatible_syntax` lint detects identifiers that will be parsed as a
+ /// prefix instead in Rust 2021.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,edition2018,compile_fail
+ /// #![deny(rust_2021_prefixes_incompatible_syntax)]
+ ///
+ /// macro_rules! m {
+ /// (z $x:expr) => ();
+ /// }
+ ///
+ /// m!(z"hey");
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In Rust 2015 and 2018, `z"hey"` is two tokens: the identifier `z`
+ /// followed by the string literal `"hey"`. In Rust 2021, the `z` is
+ /// considered a prefix for `"hey"`.
+ ///
+ /// This lint suggests to add whitespace between the `z` and `"hey"` tokens
+ /// to keep them separated in Rust 2021.
+ // Allow this lint -- rustdoc doesn't yet support threading edition into this lint's parser.
+ #[allow(rustdoc::invalid_rust_codeblocks)]
+ pub RUST_2021_PREFIXES_INCOMPATIBLE_SYNTAX,
+ Allow,
+ "identifiers that will be parsed as a prefix in Rust 2021",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "<https://doc.rust-lang.org/nightly/edition-guide/rust-2021/reserving-syntax.html>",
+ reason: FutureIncompatibilityReason::EditionError(Edition::Edition2021),
+ };
+ crate_level_only
+}
+
+declare_lint! {
+ /// The `unsupported_calling_conventions` lint is output whenever there is a use of the
+ /// `stdcall`, `fastcall`, `thiscall`, `vectorcall` calling conventions (or their unwind
+ /// variants) on targets that cannot meaningfully be supported for the requested target.
+ ///
+ /// For example `stdcall` does not make much sense for a x86_64 or, more apparently, powerpc
+ /// code, because this calling convention was never specified for those targets.
+ ///
+ /// Historically MSVC toolchains have fallen back to the regular C calling convention for
+ /// targets other than x86, but Rust doesn't really see a similar need to introduce a similar
+ /// hack across many more targets.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs specific targets)
+ /// extern "stdcall" fn stdcall() {}
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: use of calling convention not supported on this target
+ /// --> $DIR/unsupported.rs:39:1
+ /// |
+ /// LL | extern "stdcall" fn stdcall() {}
+ /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ /// |
+ /// = note: `#[warn(unsupported_calling_conventions)]` on by default
+ /// = warning: this was previously accepted by the compiler but is being phased out;
+ /// it will become a hard error in a future release!
+ /// = note: for more information, see issue ...
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// On most of the targets the behaviour of `stdcall` and similar calling conventions is not
+ /// defined at all, but was previously accepted due to a bug in the implementation of the
+ /// compiler.
+ pub UNSUPPORTED_CALLING_CONVENTIONS,
+ Warn,
+ "use of unsupported calling convention",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #87678 <https://github.com/rust-lang/rust/issues/87678>",
+ };
+}
+
+declare_lint! {
+ /// The `break_with_label_and_loop` lint detects labeled `break` expressions with
+ /// an unlabeled loop as their value expression.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// 'label: loop {
+ /// break 'label loop { break 42; };
+ /// };
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In Rust, loops can have a label, and `break` expressions can refer to that label to
+ /// break out of specific loops (and not necessarily the innermost one). `break` expressions
+ /// can also carry a value expression, which can be another loop. A labeled `break` with an
+ /// unlabeled loop as its value expression is easy to confuse with an unlabeled break with
+ /// a labeled loop and is thus discouraged (but allowed for compatibility); use parentheses
+ /// around the loop expression to silence this warning. Unlabeled `break` expressions with
+ /// labeled loops yield a hard error, which can also be silenced by wrapping the expression
+ /// in parentheses.
+ pub BREAK_WITH_LABEL_AND_LOOP,
+ Warn,
+ "`break` expression with label and unlabeled loop as value expression"
+}
+
+declare_lint! {
+ /// The `non_exhaustive_omitted_patterns` lint detects when a wildcard (`_` or `..`) in a
+ /// pattern for a `#[non_exhaustive]` struct or enum is reachable.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs separate crate)
+ /// // crate A
+ /// #[non_exhaustive]
+ /// pub enum Bar {
+ /// A,
+ /// B, // added variant in non breaking change
+ /// }
+ ///
+ /// // in crate B
+ /// #![feature(non_exhaustive_omitted_patterns_lint)]
+ ///
+ /// match Bar::A {
+ /// Bar::A => {},
+ /// #[warn(non_exhaustive_omitted_patterns)]
+ /// _ => {},
+ /// }
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: reachable patterns not covered of non exhaustive enum
+ /// --> $DIR/reachable-patterns.rs:70:9
+ /// |
+ /// LL | _ => {}
+ /// | ^ pattern `B` not covered
+ /// |
+ /// note: the lint level is defined here
+ /// --> $DIR/reachable-patterns.rs:69:16
+ /// |
+ /// LL | #[warn(non_exhaustive_omitted_patterns)]
+ /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ /// = help: ensure that all possible cases are being handled by adding the suggested match arms
+ /// = note: the matched value is of type `Bar` and the `non_exhaustive_omitted_patterns` attribute was found
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// Structs and enums tagged with `#[non_exhaustive]` force the user to add a
+ /// (potentially redundant) wildcard when pattern-matching, to allow for future
+ /// addition of fields or variants. The `non_exhaustive_omitted_patterns` lint
+ /// detects when such a wildcard happens to actually catch some fields/variants.
+ /// In other words, when the match without the wildcard would not be exhaustive.
+ /// This lets the user be informed if new fields/variants were added.
+ pub NON_EXHAUSTIVE_OMITTED_PATTERNS,
+ Allow,
+ "detect when patterns of types marked `non_exhaustive` are missed",
+ @feature_gate = sym::non_exhaustive_omitted_patterns_lint;
+}
+
+declare_lint! {
+ /// The `text_direction_codepoint_in_comment` lint detects Unicode codepoints in comments that
+ /// change the visual representation of text on screen in a way that does not correspond to
+ /// their on memory representation.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(text_direction_codepoint_in_comment)]
+ /// fn main() {
+ /// println!("{:?}"); // '‮');
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Unicode allows changing the visual flow of text on screen in order to support scripts that
+ /// are written right-to-left, but a specially crafted comment can make code that will be
+ /// compiled appear to be part of a comment, depending on the software used to read the code.
+ /// To avoid potential problems or confusion, such as in CVE-2021-42574, by default we deny
+ /// their use.
+ pub TEXT_DIRECTION_CODEPOINT_IN_COMMENT,
+ Deny,
+ "invisible directionality-changing codepoints in comment"
+}
+
+declare_lint! {
+ /// The `deref_into_dyn_supertrait` lint is output whenever there is a use of the
+ /// `Deref` implementation with a `dyn SuperTrait` type as `Output`.
+ ///
+ /// These implementations will become shadowed when the `trait_upcasting` feature is stabilized.
+ /// The `deref` functions will no longer be called implicitly, so there might be behavior change.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(deref_into_dyn_supertrait)]
+ /// #![allow(dead_code)]
+ ///
+ /// use core::ops::Deref;
+ ///
+ /// trait A {}
+ /// trait B: A {}
+ /// impl<'a> Deref for dyn 'a + B {
+ /// type Target = dyn A;
+ /// fn deref(&self) -> &Self::Target {
+ /// todo!()
+ /// }
+ /// }
+ ///
+ /// fn take_a(_: &dyn A) { }
+ ///
+ /// fn take_b(b: &dyn B) {
+ /// take_a(b);
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The dyn upcasting coercion feature adds new coercion rules, taking priority
+ /// over certain other coercion rules, which will cause some behavior change.
+ pub DEREF_INTO_DYN_SUPERTRAIT,
+ Warn,
+ "`Deref` implementation usage with a supertrait trait object for output might be shadowed in the future",
+ @future_incompatible = FutureIncompatibleInfo {
+ reference: "issue #89460 <https://github.com/rust-lang/rust/issues/89460>",
+ };
+}
+
+declare_lint! {
+ /// The `duplicate_macro_attributes` lint detects when a `#[test]`-like built-in macro
+ /// attribute is duplicated on an item. This lint may trigger on `bench`, `cfg_eval`, `test`
+ /// and `test_case`.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (needs --test)
+ /// #[test]
+ /// #[test]
+ /// fn foo() {}
+ /// ```
+ ///
+ /// This will produce:
+ ///
+ /// ```text
+ /// warning: duplicated attribute
+ /// --> src/lib.rs:2:1
+ /// |
+ /// 2 | #[test]
+ /// | ^^^^^^^
+ /// |
+ /// = note: `#[warn(duplicate_macro_attributes)]` on by default
+ /// ```
+ ///
+ /// ### Explanation
+ ///
+ /// A duplicated attribute may erroneously originate from a copy-paste and the effect of it
+ /// being duplicated may not be obvious or desirable.
+ ///
+ /// For instance, doubling the `#[test]` attributes registers the test to be run twice with no
+ /// change to its environment.
+ ///
+ /// [issue #90979]: https://github.com/rust-lang/rust/issues/90979
+ pub DUPLICATE_MACRO_ATTRIBUTES,
+ Warn,
+ "duplicated attribute"
+}
+
+declare_lint! {
+ /// The `suspicious_auto_trait_impls` lint checks for potentially incorrect
+ /// implementations of auto traits.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// struct Foo<T>(T);
+ ///
+ /// unsafe impl<T> Send for Foo<*const T> {}
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// A type can implement auto traits, e.g. `Send`, `Sync` and `Unpin`,
+ /// in two different ways: either by writing an explicit impl or if
+ /// all fields of the type implement that auto trait.
+ ///
+ /// The compiler disables the automatic implementation if an explicit one
+ /// exists for given type constructor. The exact rules governing this
+ /// are currently unsound and quite subtle and and will be modified in the future.
+ /// This change will cause the automatic implementation to be disabled in more
+ /// cases, potentially breaking some code.
+ pub SUSPICIOUS_AUTO_TRAIT_IMPLS,
+ Warn,
+ "the rules governing auto traits will change in the future",
+ @future_incompatible = FutureIncompatibleInfo {
+ reason: FutureIncompatibilityReason::FutureReleaseSemanticsChange,
+ reference: "issue #93367 <https://github.com/rust-lang/rust/issues/93367>",
+ };
+}
+
+declare_lint! {
+ /// The `deprecated_where_clause_location` lint detects when a where clause in front of the equals
+ /// in an associated type.
+ ///
+ /// ### Example
+ ///
+ /// ```rust
+ /// #![feature(generic_associated_types)]
+ ///
+ /// trait Trait {
+ /// type Assoc<'a> where Self: 'a;
+ /// }
+ ///
+ /// impl Trait for () {
+ /// type Assoc<'a> where Self: 'a = ();
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// The preferred location for where clauses on associated types in impls
+ /// is after the type. However, for most of generic associated types development,
+ /// it was only accepted before the equals. To provide a transition period and
+ /// further evaluate this change, both are currently accepted. At some point in
+ /// the future, this may be disallowed at an edition boundary; but, that is
+ /// undecided currently.
+ pub DEPRECATED_WHERE_CLAUSE_LOCATION,
+ Warn,
+ "deprecated where clause location"
+}
+
+declare_lint! {
+ /// The `test_unstable_lint` lint tests unstable lints and is perma-unstable.
+ ///
+ /// ### Example
+ ///
+ /// ```
+ /// #![allow(test_unstable_lint)]
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// In order to test the behavior of unstable lints, a permanently-unstable
+ /// lint is required. This lint can be used to trigger warnings and errors
+ /// from the compiler related to unstable lints.
+ pub TEST_UNSTABLE_LINT,
+ Deny,
+ "this unstable lint is only for testing",
+ @feature_gate = sym::test_unstable_lint;
+}
+
+declare_lint! {
+ /// The `ffi_unwind_calls` lint detects calls to foreign functions or function pointers with
+ /// `C-unwind` or other FFI-unwind ABIs.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,ignore (need FFI)
+ /// #![feature(ffi_unwind_calls)]
+ /// #![feature(c_unwind)]
+ ///
+ /// # mod impl {
+ /// # #[no_mangle]
+ /// # pub fn "C-unwind" fn foo() {}
+ /// # }
+ ///
+ /// extern "C-unwind" {
+ /// fn foo();
+ /// }
+ ///
+ /// fn bar() {
+ /// unsafe { foo(); }
+ /// let ptr: unsafe extern "C-unwind" fn() = foo;
+ /// unsafe { ptr(); }
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// For crates containing such calls, if they are compiled with `-C panic=unwind` then the
+ /// produced library cannot be linked with crates compiled with `-C panic=abort`. For crates
+ /// that desire this ability it is therefore necessary to avoid such calls.
+ pub FFI_UNWIND_CALLS,
+ Allow,
+ "call to foreign functions or function pointers with FFI-unwind ABI",
+ @feature_gate = sym::c_unwind;
+}
+
+declare_lint! {
+ /// The `named_arguments_used_positionally` lint detects cases where named arguments are only
+ /// used positionally in format strings. This usage is valid but potentially very confusing.
+ ///
+ /// ### Example
+ ///
+ /// ```rust,compile_fail
+ /// #![deny(named_arguments_used_positionally)]
+ /// fn main() {
+ /// let _x = 5;
+ /// println!("{}", _x = 1); // Prints 1, will trigger lint
+ ///
+ /// println!("{}", _x); // Prints 5, no lint emitted
+ /// println!("{_x}", _x = _x); // Prints 5, no lint emitted
+ /// }
+ /// ```
+ ///
+ /// {{produces}}
+ ///
+ /// ### Explanation
+ ///
+ /// Rust formatting strings can refer to named arguments by their position, but this usage is
+ /// potentially confusing. In particular, readers can incorrectly assume that the declaration
+ /// of named arguments is an assignment (which would produce the unit type).
+ /// For backwards compatibility, this is not a hard error.
+ pub NAMED_ARGUMENTS_USED_POSITIONALLY,
+ Warn,
+ "named arguments in format used positionally"
+}
diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs
new file mode 100644
index 000000000..6acbe97a7
--- /dev/null
+++ b/compiler/rustc_lint_defs/src/lib.rs
@@ -0,0 +1,722 @@
+#![feature(min_specialization)]
+
+#[macro_use]
+extern crate rustc_macros;
+
+pub use self::Level::*;
+use rustc_ast::node_id::{NodeId, NodeMap};
+use rustc_ast::{AttrId, Attribute};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use rustc_error_messages::MultiSpan;
+use rustc_hir::HashStableContext;
+use rustc_hir::HirId;
+use rustc_span::edition::Edition;
+use rustc_span::{sym, symbol::Ident, Span, Symbol};
+use rustc_target::spec::abi::Abi;
+
+use serde::{Deserialize, Serialize};
+
+pub mod builtin;
+
+#[macro_export]
+macro_rules! pluralize {
+ ($x:expr) => {
+ if $x != 1 { "s" } else { "" }
+ };
+ ("is", $x:expr) => {
+ if $x == 1 { "is" } else { "are" }
+ };
+ ("was", $x:expr) => {
+ if $x == 1 { "was" } else { "were" }
+ };
+ ("this", $x:expr) => {
+ if $x == 1 { "this" } else { "these" }
+ };
+}
+
+/// Indicates the confidence in the correctness of a suggestion.
+///
+/// All suggestions are marked with an `Applicability`. Tools use the applicability of a suggestion
+/// to determine whether it should be automatically applied or if the user should be consulted
+/// before applying the suggestion.
+#[derive(Copy, Clone, Debug, PartialEq, Hash, Encodable, Decodable, Serialize, Deserialize)]
+pub enum Applicability {
+ /// The suggestion is definitely what the user intended, or maintains the exact meaning of the code.
+ /// This suggestion should be automatically applied.
+ ///
+ /// In case of multiple `MachineApplicable` suggestions (whether as part of
+ /// the same `multipart_suggestion` or not), all of them should be
+ /// automatically applied.
+ MachineApplicable,
+
+ /// The suggestion may be what the user intended, but it is uncertain. The suggestion should
+ /// result in valid Rust code if it is applied.
+ MaybeIncorrect,
+
+ /// The suggestion contains placeholders like `(...)` or `{ /* fields */ }`. The suggestion
+ /// cannot be applied automatically because it will not result in valid Rust code. The user
+ /// will need to fill in the placeholders.
+ HasPlaceholders,
+
+ /// The applicability of the suggestion is unknown.
+ Unspecified,
+}
+
+/// Each lint expectation has a `LintExpectationId` assigned by the `LintLevelsBuilder`.
+/// Expected `Diagnostic`s get the lint level `Expect` which stores the `LintExpectationId`
+/// to match it with the actual expectation later on.
+///
+/// The `LintExpectationId` has to be stable between compilations, as diagnostic
+/// instances might be loaded from cache. Lint messages can be emitted during an
+/// `EarlyLintPass` operating on the AST and during a `LateLintPass` traversing the
+/// HIR tree. The AST doesn't have enough information to create a stable id. The
+/// `LintExpectationId` will instead store the [`AttrId`] defining the expectation.
+/// These `LintExpectationId` will be updated to use the stable [`HirId`] once the
+/// AST has been lowered. The transformation is done by the `LintLevelsBuilder`
+///
+/// Each lint inside the `expect` attribute is tracked individually, the `lint_index`
+/// identifies the lint inside the attribute and ensures that the IDs are unique.
+///
+/// The index values have a type of `u16` to reduce the size of the `LintExpectationId`.
+/// It's reasonable to assume that no user will define 2^16 attributes on one node or
+/// have that amount of lints listed. `u16` values should therefore suffice.
+#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash, Encodable, Decodable)]
+pub enum LintExpectationId {
+ /// Used for lints emitted during the `EarlyLintPass`. This id is not
+ /// hash stable and should not be cached.
+ Unstable { attr_id: AttrId, lint_index: Option<u16> },
+ /// The [`HirId`] that the lint expectation is attached to. This id is
+ /// stable and can be cached. The additional index ensures that nodes with
+ /// several expectations can correctly match diagnostics to the individual
+ /// expectation.
+ Stable { hir_id: HirId, attr_index: u16, lint_index: Option<u16> },
+}
+
+impl LintExpectationId {
+ pub fn is_stable(&self) -> bool {
+ match self {
+ LintExpectationId::Unstable { .. } => false,
+ LintExpectationId::Stable { .. } => true,
+ }
+ }
+
+ pub fn get_lint_index(&self) -> Option<u16> {
+ let (LintExpectationId::Unstable { lint_index, .. }
+ | LintExpectationId::Stable { lint_index, .. }) = self;
+
+ *lint_index
+ }
+
+ pub fn set_lint_index(&mut self, new_lint_index: Option<u16>) {
+ let (LintExpectationId::Unstable { ref mut lint_index, .. }
+ | LintExpectationId::Stable { ref mut lint_index, .. }) = self;
+
+ *lint_index = new_lint_index
+ }
+}
+
+impl<HCX: rustc_hir::HashStableContext> HashStable<HCX> for LintExpectationId {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ match self {
+ LintExpectationId::Stable { hir_id, attr_index, lint_index: Some(lint_index) } => {
+ hir_id.hash_stable(hcx, hasher);
+ attr_index.hash_stable(hcx, hasher);
+ lint_index.hash_stable(hcx, hasher);
+ }
+ _ => {
+ unreachable!(
+ "HashStable should only be called for filled and stable `LintExpectationId`"
+ )
+ }
+ }
+ }
+}
+
+impl<HCX: rustc_hir::HashStableContext> ToStableHashKey<HCX> for LintExpectationId {
+ type KeyType = (HirId, u16, u16);
+
+ #[inline]
+ fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType {
+ match self {
+ LintExpectationId::Stable { hir_id, attr_index, lint_index: Some(lint_index) } => {
+ (*hir_id, *attr_index, *lint_index)
+ }
+ _ => {
+ unreachable!("HashStable should only be called for a filled `LintExpectationId`")
+ }
+ }
+ }
+}
+
+/// Setting for how to handle a lint.
+///
+/// See: <https://doc.rust-lang.org/rustc/lints/levels.html>
+#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord, Debug, Hash, HashStable_Generic)]
+pub enum Level {
+ /// The `allow` level will not issue any message.
+ Allow,
+ /// The `expect` level will suppress the lint message but in turn produce a message
+ /// if the lint wasn't issued in the expected scope. `Expect` should not be used as
+ /// an initial level for a lint.
+ ///
+ /// Note that this still means that the lint is enabled in this position and should
+ /// be emitted, this will in turn fulfill the expectation and suppress the lint.
+ ///
+ /// See RFC 2383.
+ ///
+ /// The [`LintExpectationId`] is used to later link a lint emission to the actual
+ /// expectation. It can be ignored in most cases.
+ Expect(LintExpectationId),
+ /// The `warn` level will produce a warning if the lint was violated, however the
+ /// compiler will continue with its execution.
+ Warn,
+ /// This lint level is a special case of [`Warn`], that can't be overridden. This is used
+ /// to ensure that a lint can't be suppressed. This lint level can currently only be set
+ /// via the console and is therefore session specific.
+ ///
+ /// The [`LintExpectationId`] is intended to fulfill expectations marked via the
+ /// `#[expect]` attribute, that will still be suppressed due to the level.
+ ForceWarn(Option<LintExpectationId>),
+ /// The `deny` level will produce an error and stop further execution after the lint
+ /// pass is complete.
+ Deny,
+ /// `Forbid` is equivalent to the `deny` level but can't be overwritten like the previous
+ /// levels.
+ Forbid,
+}
+
+impl Level {
+ /// Converts a level to a lower-case string.
+ pub fn as_str(self) -> &'static str {
+ match self {
+ Level::Allow => "allow",
+ Level::Expect(_) => "expect",
+ Level::Warn => "warn",
+ Level::ForceWarn(_) => "force-warn",
+ Level::Deny => "deny",
+ Level::Forbid => "forbid",
+ }
+ }
+
+ /// Converts a lower-case string to a level. This will never construct the expect
+ /// level as that would require a [`LintExpectationId`]
+ pub fn from_str(x: &str) -> Option<Level> {
+ match x {
+ "allow" => Some(Level::Allow),
+ "warn" => Some(Level::Warn),
+ "deny" => Some(Level::Deny),
+ "forbid" => Some(Level::Forbid),
+ "expect" | _ => None,
+ }
+ }
+
+ /// Converts a symbol to a level.
+ pub fn from_attr(attr: &Attribute) -> Option<Level> {
+ match attr.name_or_empty() {
+ sym::allow => Some(Level::Allow),
+ sym::expect => Some(Level::Expect(LintExpectationId::Unstable {
+ attr_id: attr.id,
+ lint_index: None,
+ })),
+ sym::warn => Some(Level::Warn),
+ sym::deny => Some(Level::Deny),
+ sym::forbid => Some(Level::Forbid),
+ _ => None,
+ }
+ }
+
+ pub fn is_error(self) -> bool {
+ match self {
+ Level::Allow | Level::Expect(_) | Level::Warn | Level::ForceWarn(_) => false,
+ Level::Deny | Level::Forbid => true,
+ }
+ }
+
+ pub fn get_expectation_id(&self) -> Option<LintExpectationId> {
+ match self {
+ Level::Expect(id) | Level::ForceWarn(Some(id)) => Some(*id),
+ _ => None,
+ }
+ }
+}
+
+/// Specification of a single lint.
+#[derive(Copy, Clone, Debug)]
+pub struct Lint {
+ /// A string identifier for the lint.
+ ///
+ /// This identifies the lint in attributes and in command-line arguments.
+ /// In those contexts it is always lowercase, but this field is compared
+ /// in a way which is case-insensitive for ASCII characters. This allows
+ /// `declare_lint!()` invocations to follow the convention of upper-case
+ /// statics without repeating the name.
+ ///
+ /// The name is written with underscores, e.g., "unused_imports".
+ /// On the command line, underscores become dashes.
+ ///
+ /// See <https://rustc-dev-guide.rust-lang.org/diagnostics.html#lint-naming>
+ /// for naming guidelines.
+ pub name: &'static str,
+
+ /// Default level for the lint.
+ ///
+ /// See <https://rustc-dev-guide.rust-lang.org/diagnostics.html#diagnostic-levels>
+ /// for guidelines on choosing a default level.
+ pub default_level: Level,
+
+ /// Description of the lint or the issue it detects.
+ ///
+ /// e.g., "imports that are never used"
+ pub desc: &'static str,
+
+ /// Starting at the given edition, default to the given lint level. If this is `None`, then use
+ /// `default_level`.
+ pub edition_lint_opts: Option<(Edition, Level)>,
+
+ /// `true` if this lint is reported even inside expansions of external macros.
+ pub report_in_external_macro: bool,
+
+ pub future_incompatible: Option<FutureIncompatibleInfo>,
+
+ pub is_plugin: bool,
+
+ /// `Some` if this lint is feature gated, otherwise `None`.
+ pub feature_gate: Option<Symbol>,
+
+ pub crate_level_only: bool,
+}
+
+/// Extra information for a future incompatibility lint.
+#[derive(Copy, Clone, Debug)]
+pub struct FutureIncompatibleInfo {
+ /// e.g., a URL for an issue/PR/RFC or error code
+ pub reference: &'static str,
+ /// The reason for the lint used by diagnostics to provide
+ /// the right help message
+ pub reason: FutureIncompatibilityReason,
+ /// Whether to explain the reason to the user.
+ ///
+ /// Set to false for lints that already include a more detailed
+ /// explanation.
+ pub explain_reason: bool,
+}
+
+/// The reason for future incompatibility
+#[derive(Copy, Clone, Debug)]
+pub enum FutureIncompatibilityReason {
+ /// This will be an error in a future release
+ /// for all editions
+ FutureReleaseError,
+ /// This will be an error in a future release, and
+ /// Cargo should create a report even for dependencies
+ FutureReleaseErrorReportNow,
+ /// Code that changes meaning in some way in a
+ /// future release.
+ FutureReleaseSemanticsChange,
+ /// Previously accepted code that will become an
+ /// error in the provided edition
+ EditionError(Edition),
+ /// Code that changes meaning in some way in
+ /// the provided edition
+ EditionSemanticsChange(Edition),
+ /// A custom reason.
+ Custom(&'static str),
+}
+
+impl FutureIncompatibilityReason {
+ pub fn edition(self) -> Option<Edition> {
+ match self {
+ Self::EditionError(e) => Some(e),
+ Self::EditionSemanticsChange(e) => Some(e),
+ _ => None,
+ }
+ }
+}
+
+impl FutureIncompatibleInfo {
+ pub const fn default_fields_for_macro() -> Self {
+ FutureIncompatibleInfo {
+ reference: "",
+ reason: FutureIncompatibilityReason::FutureReleaseError,
+ explain_reason: true,
+ }
+ }
+}
+
+impl Lint {
+ pub const fn default_fields_for_macro() -> Self {
+ Lint {
+ name: "",
+ default_level: Level::Forbid,
+ desc: "",
+ edition_lint_opts: None,
+ is_plugin: false,
+ report_in_external_macro: false,
+ future_incompatible: None,
+ feature_gate: None,
+ crate_level_only: false,
+ }
+ }
+
+ /// Gets the lint's name, with ASCII letters converted to lowercase.
+ pub fn name_lower(&self) -> String {
+ self.name.to_ascii_lowercase()
+ }
+
+ pub fn default_level(&self, edition: Edition) -> Level {
+ self.edition_lint_opts
+ .filter(|(e, _)| *e <= edition)
+ .map(|(_, l)| l)
+ .unwrap_or(self.default_level)
+ }
+}
+
+/// Identifies a lint known to the compiler.
+#[derive(Clone, Copy, Debug)]
+pub struct LintId {
+ // Identity is based on pointer equality of this field.
+ pub lint: &'static Lint,
+}
+
+impl PartialEq for LintId {
+ fn eq(&self, other: &LintId) -> bool {
+ std::ptr::eq(self.lint, other.lint)
+ }
+}
+
+impl Eq for LintId {}
+
+impl std::hash::Hash for LintId {
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ let ptr = self.lint as *const Lint;
+ ptr.hash(state);
+ }
+}
+
+impl LintId {
+ /// Gets the `LintId` for a `Lint`.
+ pub fn of(lint: &'static Lint) -> LintId {
+ LintId { lint }
+ }
+
+ pub fn lint_name_raw(&self) -> &'static str {
+ self.lint.name
+ }
+
+ /// Gets the name of the lint.
+ pub fn to_string(&self) -> String {
+ self.lint.name_lower()
+ }
+}
+
+impl<HCX> HashStable<HCX> for LintId {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ self.lint_name_raw().hash_stable(hcx, hasher);
+ }
+}
+
+impl<HCX> ToStableHashKey<HCX> for LintId {
+ type KeyType = &'static str;
+
+ #[inline]
+ fn to_stable_hash_key(&self, _: &HCX) -> &'static str {
+ self.lint_name_raw()
+ }
+}
+
+// This could be a closure, but then implementing derive trait
+// becomes hacky (and it gets allocated).
+#[derive(Debug)]
+pub enum BuiltinLintDiagnostics {
+ Normal,
+ AbsPathWithModule(Span),
+ ProcMacroDeriveResolutionFallback(Span),
+ MacroExpandedMacroExportsAccessedByAbsolutePaths(Span),
+ ElidedLifetimesInPaths(usize, Span, bool, Span),
+ UnknownCrateTypes(Span, String, String),
+ UnusedImports(String, Vec<(Span, String)>, Option<Span>),
+ RedundantImport(Vec<(Span, bool)>, Ident),
+ DeprecatedMacro(Option<Symbol>, Span),
+ MissingAbi(Span, Abi),
+ UnusedDocComment(Span),
+ UnusedBuiltinAttribute {
+ attr_name: Symbol,
+ macro_name: String,
+ invoc_span: Span,
+ },
+ PatternsInFnsWithoutBody(Span, Ident),
+ LegacyDeriveHelpers(Span),
+ ProcMacroBackCompat(String),
+ OrPatternsBackCompat(Span, String),
+ ReservedPrefix(Span),
+ TrailingMacro(bool, Ident),
+ BreakWithLabelAndLoop(Span),
+ NamedAsmLabel(String),
+ UnicodeTextFlow(Span, String),
+ UnexpectedCfg((Symbol, Span), Option<(Symbol, Span)>),
+ DeprecatedWhereclauseLocation(Span, String),
+ SingleUseLifetime {
+ /// Span of the parameter which declares this lifetime.
+ param_span: Span,
+ /// Span of the code that should be removed when eliding this lifetime.
+ /// This span should include leading or trailing comma.
+ deletion_span: Span,
+ /// Span of the single use, or None if the lifetime is never used.
+ /// If true, the lifetime will be fully elided.
+ use_span: Option<(Span, bool)>,
+ },
+ NamedArgumentUsedPositionally {
+ /// Span where the named argument is used by position and will be replaced with the named
+ /// argument name
+ position_sp_to_replace: Option<Span>,
+ /// Span where the named argument is used by position and is used for lint messages
+ position_sp_for_msg: Option<Span>,
+ /// Span where the named argument's name is (so we know where to put the warning message)
+ named_arg_sp: Span,
+ /// String containing the named arguments name
+ named_arg_name: String,
+ /// Indicates if the named argument is used as a width/precision for formatting
+ is_formatting_arg: bool,
+ },
+}
+
+/// Lints that are buffered up early on in the `Session` before the
+/// `LintLevels` is calculated.
+pub struct BufferedEarlyLint {
+ /// The span of code that we are linting on.
+ pub span: MultiSpan,
+
+ /// The lint message.
+ pub msg: String,
+
+ /// The `NodeId` of the AST node that generated the lint.
+ pub node_id: NodeId,
+
+ /// A lint Id that can be passed to
+ /// `rustc_lint::early::EarlyContextAndPass::check_id`.
+ pub lint_id: LintId,
+
+ /// Customization of the `DiagnosticBuilder<'_>` for the lint.
+ pub diagnostic: BuiltinLintDiagnostics,
+}
+
+#[derive(Default)]
+pub struct LintBuffer {
+ pub map: NodeMap<Vec<BufferedEarlyLint>>,
+}
+
+impl LintBuffer {
+ pub fn add_early_lint(&mut self, early_lint: BufferedEarlyLint) {
+ let arr = self.map.entry(early_lint.node_id).or_default();
+ arr.push(early_lint);
+ }
+
+ pub fn add_lint(
+ &mut self,
+ lint: &'static Lint,
+ node_id: NodeId,
+ span: MultiSpan,
+ msg: &str,
+ diagnostic: BuiltinLintDiagnostics,
+ ) {
+ let lint_id = LintId::of(lint);
+ let msg = msg.to_string();
+ self.add_early_lint(BufferedEarlyLint { lint_id, node_id, span, msg, diagnostic });
+ }
+
+ pub fn take(&mut self, id: NodeId) -> Vec<BufferedEarlyLint> {
+ self.map.remove(&id).unwrap_or_default()
+ }
+
+ pub fn buffer_lint(
+ &mut self,
+ lint: &'static Lint,
+ id: NodeId,
+ sp: impl Into<MultiSpan>,
+ msg: &str,
+ ) {
+ self.add_lint(lint, id, sp.into(), msg, BuiltinLintDiagnostics::Normal)
+ }
+
+ pub fn buffer_lint_with_diagnostic(
+ &mut self,
+ lint: &'static Lint,
+ id: NodeId,
+ sp: impl Into<MultiSpan>,
+ msg: &str,
+ diagnostic: BuiltinLintDiagnostics,
+ ) {
+ self.add_lint(lint, id, sp.into(), msg, diagnostic)
+ }
+}
+
+/// Declares a static item of type `&'static Lint`.
+///
+/// See <https://rustc-dev-guide.rust-lang.org/diagnostics.html> for
+/// documentation and guidelines on writing lints.
+///
+/// The macro call should start with a doc comment explaining the lint
+/// which will be embedded in the rustc user documentation book. It should
+/// be written in markdown and have a format that looks like this:
+///
+/// ```rust,ignore (doc-example)
+/// /// The `my_lint_name` lint detects [short explanation here].
+/// ///
+/// /// ### Example
+/// ///
+/// /// ```rust
+/// /// [insert a concise example that triggers the lint]
+/// /// ```
+/// ///
+/// /// {{produces}}
+/// ///
+/// /// ### Explanation
+/// ///
+/// /// This should be a detailed explanation of *why* the lint exists,
+/// /// and also include suggestions on how the user should fix the problem.
+/// /// Try to keep the text simple enough that a beginner can understand,
+/// /// and include links to other documentation for terminology that a
+/// /// beginner may not be familiar with. If this is "allow" by default,
+/// /// it should explain why (are there false positives or other issues?). If
+/// /// this is a future-incompatible lint, it should say so, with text that
+/// /// looks roughly like this:
+/// ///
+/// /// This is a [future-incompatible] lint to transition this to a hard
+/// /// error in the future. See [issue #xxxxx] for more details.
+/// ///
+/// /// [issue #xxxxx]: https://github.com/rust-lang/rust/issues/xxxxx
+/// ```
+///
+/// The `{{produces}}` tag will be automatically replaced with the output from
+/// the example by the build system. If the lint example is too complex to run
+/// as a simple example (for example, it needs an extern crate), mark the code
+/// block with `ignore` and manually replace the `{{produces}}` line with the
+/// expected output in a `text` code block.
+///
+/// If this is a rustdoc-only lint, then only include a brief introduction
+/// with a link with the text `[rustdoc book]` so that the validator knows
+/// that this is for rustdoc only (see BROKEN_INTRA_DOC_LINKS as an example).
+///
+/// Commands to view and test the documentation:
+///
+/// * `./x.py doc --stage=1 src/doc/rustc --open`: Builds the rustc book and opens it.
+/// * `./x.py test src/tools/lint-docs`: Validates that the lint docs have the
+/// correct style, and that the code example actually emits the expected
+/// lint.
+///
+/// If you have already built the compiler, and you want to make changes to
+/// just the doc comments, then use the `--keep-stage=0` flag with the above
+/// commands to avoid rebuilding the compiler.
+#[macro_export]
+macro_rules! declare_lint {
+ ($(#[$attr:meta])* $vis: vis $NAME: ident, $Level: ident, $desc: expr) => (
+ $crate::declare_lint!(
+ $(#[$attr])* $vis $NAME, $Level, $desc,
+ );
+ );
+ ($(#[$attr:meta])* $vis: vis $NAME: ident, $Level: ident, $desc: expr,
+ $(@feature_gate = $gate:expr;)?
+ $(@future_incompatible = FutureIncompatibleInfo { $($field:ident : $val:expr),* $(,)* }; )?
+ $($v:ident),*) => (
+ $(#[$attr])*
+ $vis static $NAME: &$crate::Lint = &$crate::Lint {
+ name: stringify!($NAME),
+ default_level: $crate::$Level,
+ desc: $desc,
+ edition_lint_opts: None,
+ is_plugin: false,
+ $($v: true,)*
+ $(feature_gate: Some($gate),)*
+ $(future_incompatible: Some($crate::FutureIncompatibleInfo {
+ $($field: $val,)*
+ ..$crate::FutureIncompatibleInfo::default_fields_for_macro()
+ }),)*
+ ..$crate::Lint::default_fields_for_macro()
+ };
+ );
+ ($(#[$attr:meta])* $vis: vis $NAME: ident, $Level: ident, $desc: expr,
+ $lint_edition: expr => $edition_level: ident
+ ) => (
+ $(#[$attr])*
+ $vis static $NAME: &$crate::Lint = &$crate::Lint {
+ name: stringify!($NAME),
+ default_level: $crate::$Level,
+ desc: $desc,
+ edition_lint_opts: Some(($lint_edition, $crate::Level::$edition_level)),
+ report_in_external_macro: false,
+ is_plugin: false,
+ };
+ );
+}
+
+#[macro_export]
+macro_rules! declare_tool_lint {
+ (
+ $(#[$attr:meta])* $vis:vis $tool:ident ::$NAME:ident, $Level: ident, $desc: expr
+ ) => (
+ $crate::declare_tool_lint!{$(#[$attr])* $vis $tool::$NAME, $Level, $desc, false}
+ );
+ (
+ $(#[$attr:meta])* $vis:vis $tool:ident ::$NAME:ident, $Level:ident, $desc:expr,
+ report_in_external_macro: $rep:expr
+ ) => (
+ $crate::declare_tool_lint!{$(#[$attr])* $vis $tool::$NAME, $Level, $desc, $rep}
+ );
+ (
+ $(#[$attr:meta])* $vis:vis $tool:ident ::$NAME:ident, $Level:ident, $desc:expr,
+ $external:expr
+ ) => (
+ $(#[$attr])*
+ $vis static $NAME: &$crate::Lint = &$crate::Lint {
+ name: &concat!(stringify!($tool), "::", stringify!($NAME)),
+ default_level: $crate::$Level,
+ desc: $desc,
+ edition_lint_opts: None,
+ report_in_external_macro: $external,
+ future_incompatible: None,
+ is_plugin: true,
+ feature_gate: None,
+ crate_level_only: false,
+ };
+ );
+}
+
+/// Declares a static `LintArray` and return it as an expression.
+#[macro_export]
+macro_rules! lint_array {
+ ($( $lint:expr ),* ,) => { lint_array!( $($lint),* ) };
+ ($( $lint:expr ),*) => {{
+ vec![$($lint),*]
+ }}
+}
+
+pub type LintArray = Vec<&'static Lint>;
+
+pub trait LintPass {
+ fn name(&self) -> &'static str;
+}
+
+/// Implements `LintPass for $ty` with the given list of `Lint` statics.
+#[macro_export]
+macro_rules! impl_lint_pass {
+ ($ty:ty => [$($lint:expr),* $(,)?]) => {
+ impl $crate::LintPass for $ty {
+ fn name(&self) -> &'static str { stringify!($ty) }
+ }
+ impl $ty {
+ pub fn get_lints() -> $crate::LintArray { $crate::lint_array!($($lint),*) }
+ }
+ };
+}
+
+/// Declares a type named `$name` which implements `LintPass`.
+/// To the right of `=>` a comma separated list of `Lint` statics is given.
+#[macro_export]
+macro_rules! declare_lint_pass {
+ ($(#[$m:meta])* $name:ident => [$($lint:expr),* $(,)?]) => {
+ $(#[$m])* #[derive(Copy, Clone)] pub struct $name;
+ $crate::impl_lint_pass!($name => [$($lint),*]);
+ };
+}
diff --git a/compiler/rustc_llvm/Cargo.toml b/compiler/rustc_llvm/Cargo.toml
new file mode 100644
index 000000000..34556df3c
--- /dev/null
+++ b/compiler/rustc_llvm/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "rustc_llvm"
+version = "0.0.0"
+edition = "2021"
+
+[features]
+static-libstdcpp = []
+emscripten = []
+
+[dependencies]
+libc = "0.2.73"
+
+[build-dependencies]
+cc = "1.0.69"
diff --git a/compiler/rustc_llvm/build.rs b/compiler/rustc_llvm/build.rs
new file mode 100644
index 000000000..62ef5804d
--- /dev/null
+++ b/compiler/rustc_llvm/build.rs
@@ -0,0 +1,368 @@
+use std::env;
+use std::ffi::{OsStr, OsString};
+use std::fmt::Display;
+use std::path::{Path, PathBuf};
+use std::process::{Command, Stdio};
+
+const OPTIONAL_COMPONENTS: &[&str] = &[
+ "x86",
+ "arm",
+ "aarch64",
+ "amdgpu",
+ "avr",
+ "m68k",
+ "mips",
+ "powerpc",
+ "systemz",
+ "jsbackend",
+ "webassembly",
+ "msp430",
+ "sparc",
+ "nvptx",
+ "hexagon",
+ "riscv",
+ "bpf",
+];
+
+const REQUIRED_COMPONENTS: &[&str] =
+ &["ipo", "bitreader", "bitwriter", "linker", "asmparser", "lto", "coverage", "instrumentation"];
+
+fn detect_llvm_link() -> (&'static str, &'static str) {
+ // Force the link mode we want, preferring static by default, but
+ // possibly overridden by `configure --enable-llvm-link-shared`.
+ if tracked_env_var_os("LLVM_LINK_SHARED").is_some() {
+ ("dylib", "--link-shared")
+ } else {
+ ("static", "--link-static")
+ }
+}
+
+// Because Cargo adds the compiler's dylib path to our library search path, llvm-config may
+// break: the dylib path for the compiler, as of this writing, contains a copy of the LLVM
+// shared library, which means that when our freshly built llvm-config goes to load it's
+// associated LLVM, it actually loads the compiler's LLVM. In particular when building the first
+// compiler (i.e., in stage 0) that's a problem, as the compiler's LLVM is likely different from
+// the one we want to use. As such, we restore the environment to what bootstrap saw. This isn't
+// perfect -- we might actually want to see something from Cargo's added library paths -- but
+// for now it works.
+fn restore_library_path() {
+ let key = tracked_env_var_os("REAL_LIBRARY_PATH_VAR").expect("REAL_LIBRARY_PATH_VAR");
+ if let Some(env) = tracked_env_var_os("REAL_LIBRARY_PATH") {
+ env::set_var(&key, &env);
+ } else {
+ env::remove_var(&key);
+ }
+}
+
+/// Reads an environment variable and adds it to dependencies.
+/// Supposed to be used for all variables except those set for build scripts by cargo
+/// <https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-build-scripts>
+fn tracked_env_var_os<K: AsRef<OsStr> + Display>(key: K) -> Option<OsString> {
+ println!("cargo:rerun-if-env-changed={}", key);
+ env::var_os(key)
+}
+
+fn rerun_if_changed_anything_in_dir(dir: &Path) {
+ let mut stack = dir
+ .read_dir()
+ .unwrap()
+ .map(|e| e.unwrap())
+ .filter(|e| &*e.file_name() != ".git")
+ .collect::<Vec<_>>();
+ while let Some(entry) = stack.pop() {
+ let path = entry.path();
+ if entry.file_type().unwrap().is_dir() {
+ stack.extend(path.read_dir().unwrap().map(|e| e.unwrap()));
+ } else {
+ println!("cargo:rerun-if-changed={}", path.display());
+ }
+ }
+}
+
+#[track_caller]
+fn output(cmd: &mut Command) -> String {
+ let output = match cmd.stderr(Stdio::inherit()).output() {
+ Ok(status) => status,
+ Err(e) => {
+ println!("\n\nfailed to execute command: {:?}\nerror: {}\n\n", cmd, e);
+ std::process::exit(1);
+ }
+ };
+ if !output.status.success() {
+ panic!(
+ "command did not execute successfully: {:?}\n\
+ expected success, got: {}",
+ cmd, output.status
+ );
+ }
+ String::from_utf8(output.stdout).unwrap()
+}
+
+fn main() {
+ for component in REQUIRED_COMPONENTS.iter().chain(OPTIONAL_COMPONENTS.iter()) {
+ println!("cargo:rustc-check-cfg=values(llvm_component,\"{}\")", component);
+ }
+
+ if tracked_env_var_os("RUST_CHECK").is_some() {
+ // If we're just running `check`, there's no need for LLVM to be built.
+ return;
+ }
+
+ restore_library_path();
+
+ let target = env::var("TARGET").expect("TARGET was not set");
+ let llvm_config =
+ tracked_env_var_os("LLVM_CONFIG").map(|x| Some(PathBuf::from(x))).unwrap_or_else(|| {
+ if let Some(dir) = tracked_env_var_os("CARGO_TARGET_DIR").map(PathBuf::from) {
+ let to_test = dir
+ .parent()
+ .unwrap()
+ .parent()
+ .unwrap()
+ .join(&target)
+ .join("llvm/bin/llvm-config");
+ if Command::new(&to_test).output().is_ok() {
+ return Some(to_test);
+ }
+ }
+ None
+ });
+
+ if let Some(llvm_config) = &llvm_config {
+ println!("cargo:rerun-if-changed={}", llvm_config.display());
+ }
+ let llvm_config = llvm_config.unwrap_or_else(|| PathBuf::from("llvm-config"));
+
+ // Test whether we're cross-compiling LLVM. This is a pretty rare case
+ // currently where we're producing an LLVM for a different platform than
+ // what this build script is currently running on.
+ //
+ // In that case, there's no guarantee that we can actually run the target,
+ // so the build system works around this by giving us the LLVM_CONFIG for
+ // the host platform. This only really works if the host LLVM and target
+ // LLVM are compiled the same way, but for us that's typically the case.
+ //
+ // We *want* detect this cross compiling situation by asking llvm-config
+ // what its host-target is. If that's not the TARGET, then we're cross
+ // compiling. Unfortunately `llvm-config` seems either be buggy, or we're
+ // misconfiguring it, because the `i686-pc-windows-gnu` build of LLVM will
+ // report itself with a `--host-target` of `x86_64-pc-windows-gnu`. This
+ // tricks us into thinking we're doing a cross build when we aren't, so
+ // havoc ensues.
+ //
+ // In any case, if we're cross compiling, this generally just means that we
+ // can't trust all the output of llvm-config because it might be targeted
+ // for the host rather than the target. As a result a bunch of blocks below
+ // are gated on `if !is_crossed`
+ let target = env::var("TARGET").expect("TARGET was not set");
+ let host = env::var("HOST").expect("HOST was not set");
+ let is_crossed = target != host;
+
+ let components = output(Command::new(&llvm_config).arg("--components"));
+ let mut components = components.split_whitespace().collect::<Vec<_>>();
+ components.retain(|c| OPTIONAL_COMPONENTS.contains(c) || REQUIRED_COMPONENTS.contains(c));
+
+ for component in REQUIRED_COMPONENTS {
+ if !components.contains(component) {
+ panic!("require llvm component {} but wasn't found", component);
+ }
+ }
+
+ for component in components.iter() {
+ println!("cargo:rustc-cfg=llvm_component=\"{}\"", component);
+ }
+
+ // Link in our own LLVM shims, compiled with the same flags as LLVM
+ let mut cmd = Command::new(&llvm_config);
+ cmd.arg("--cxxflags");
+ let cxxflags = output(&mut cmd);
+ let mut cfg = cc::Build::new();
+ cfg.warnings(false);
+ for flag in cxxflags.split_whitespace() {
+ // Ignore flags like `-m64` when we're doing a cross build
+ if is_crossed && flag.starts_with("-m") {
+ continue;
+ }
+
+ if flag.starts_with("-flto") {
+ continue;
+ }
+
+ // -Wdate-time is not supported by the netbsd cross compiler
+ if is_crossed && target.contains("netbsd") && flag.contains("date-time") {
+ continue;
+ }
+
+ // Include path contains host directory, replace it with target
+ if is_crossed && flag.starts_with("-I") {
+ cfg.flag(&flag.replace(&host, &target));
+ continue;
+ }
+
+ cfg.flag(flag);
+ }
+
+ for component in &components {
+ let mut flag = String::from("LLVM_COMPONENT_");
+ flag.push_str(&component.to_uppercase());
+ cfg.define(&flag, None);
+ }
+
+ if tracked_env_var_os("LLVM_RUSTLLVM").is_some() {
+ cfg.define("LLVM_RUSTLLVM", None);
+ }
+
+ if tracked_env_var_os("LLVM_NDEBUG").is_some() {
+ cfg.define("NDEBUG", None);
+ cfg.debug(false);
+ }
+
+ rerun_if_changed_anything_in_dir(Path::new("llvm-wrapper"));
+ cfg.file("llvm-wrapper/PassWrapper.cpp")
+ .file("llvm-wrapper/RustWrapper.cpp")
+ .file("llvm-wrapper/ArchiveWrapper.cpp")
+ .file("llvm-wrapper/CoverageMappingWrapper.cpp")
+ .file("llvm-wrapper/Linker.cpp")
+ .cpp(true)
+ .cpp_link_stdlib(None) // we handle this below
+ .compile("llvm-wrapper");
+
+ let (llvm_kind, llvm_link_arg) = detect_llvm_link();
+
+ // Link in all LLVM libraries, if we're using the "wrong" llvm-config then
+ // we don't pick up system libs because unfortunately they're for the host
+ // of llvm-config, not the target that we're attempting to link.
+ let mut cmd = Command::new(&llvm_config);
+ cmd.arg(llvm_link_arg).arg("--libs");
+
+ if !is_crossed {
+ cmd.arg("--system-libs");
+ } else if target.contains("windows-gnu") {
+ println!("cargo:rustc-link-lib=shell32");
+ println!("cargo:rustc-link-lib=uuid");
+ } else if target.contains("netbsd") || target.contains("haiku") || target.contains("darwin") {
+ println!("cargo:rustc-link-lib=z");
+ }
+ cmd.args(&components);
+
+ for lib in output(&mut cmd).split_whitespace() {
+ let name = if let Some(stripped) = lib.strip_prefix("-l") {
+ stripped
+ } else if let Some(stripped) = lib.strip_prefix('-') {
+ stripped
+ } else if Path::new(lib).exists() {
+ // On MSVC llvm-config will print the full name to libraries, but
+ // we're only interested in the name part
+ let name = Path::new(lib).file_name().unwrap().to_str().unwrap();
+ name.trim_end_matches(".lib")
+ } else if lib.ends_with(".lib") {
+ // Some MSVC libraries just come up with `.lib` tacked on, so chop
+ // that off
+ lib.trim_end_matches(".lib")
+ } else {
+ continue;
+ };
+
+ // Don't need or want this library, but LLVM's CMake build system
+ // doesn't provide a way to disable it, so filter it here even though we
+ // may or may not have built it. We don't reference anything from this
+ // library and it otherwise may just pull in extra dependencies on
+ // libedit which we don't want
+ if name == "LLVMLineEditor" {
+ continue;
+ }
+
+ let kind = if name.starts_with("LLVM") { llvm_kind } else { "dylib" };
+ println!("cargo:rustc-link-lib={}={}", kind, name);
+ }
+
+ // LLVM ldflags
+ //
+ // If we're a cross-compile of LLVM then unfortunately we can't trust these
+ // ldflags (largely where all the LLVM libs are located). Currently just
+ // hack around this by replacing the host triple with the target and pray
+ // that those -L directories are the same!
+ let mut cmd = Command::new(&llvm_config);
+ cmd.arg(llvm_link_arg).arg("--ldflags");
+ for lib in output(&mut cmd).split_whitespace() {
+ if is_crossed {
+ if let Some(stripped) = lib.strip_prefix("-LIBPATH:") {
+ println!("cargo:rustc-link-search=native={}", stripped.replace(&host, &target));
+ } else if let Some(stripped) = lib.strip_prefix("-L") {
+ println!("cargo:rustc-link-search=native={}", stripped.replace(&host, &target));
+ }
+ } else if let Some(stripped) = lib.strip_prefix("-LIBPATH:") {
+ println!("cargo:rustc-link-search=native={}", stripped);
+ } else if let Some(stripped) = lib.strip_prefix("-l") {
+ println!("cargo:rustc-link-lib={}", stripped);
+ } else if let Some(stripped) = lib.strip_prefix("-L") {
+ println!("cargo:rustc-link-search=native={}", stripped);
+ }
+ }
+
+ // Some LLVM linker flags (-L and -l) may be needed even when linking
+ // rustc_llvm, for example when using static libc++, we may need to
+ // manually specify the library search path and -ldl -lpthread as link
+ // dependencies.
+ let llvm_linker_flags = tracked_env_var_os("LLVM_LINKER_FLAGS");
+ if let Some(s) = llvm_linker_flags {
+ for lib in s.into_string().unwrap().split_whitespace() {
+ if let Some(stripped) = lib.strip_prefix("-l") {
+ println!("cargo:rustc-link-lib={}", stripped);
+ } else if let Some(stripped) = lib.strip_prefix("-L") {
+ println!("cargo:rustc-link-search=native={}", stripped);
+ }
+ }
+ }
+
+ let llvm_static_stdcpp = tracked_env_var_os("LLVM_STATIC_STDCPP");
+ let llvm_use_libcxx = tracked_env_var_os("LLVM_USE_LIBCXX");
+
+ let stdcppname = if target.contains("openbsd") {
+ if target.contains("sparc64") { "estdc++" } else { "c++" }
+ } else if target.contains("darwin")
+ || target.contains("freebsd")
+ || target.contains("windows-gnullvm")
+ {
+ "c++"
+ } else if target.contains("netbsd") && llvm_static_stdcpp.is_some() {
+ // NetBSD uses a separate library when relocation is required
+ "stdc++_pic"
+ } else if llvm_use_libcxx.is_some() {
+ "c++"
+ } else {
+ "stdc++"
+ };
+
+ // RISC-V GCC erroneously requires libatomic for sub-word
+ // atomic operations. FreeBSD uses Clang as its system
+ // compiler and provides no libatomic in its base system so
+ // does not want this.
+ if !target.contains("freebsd") && target.starts_with("riscv") {
+ println!("cargo:rustc-link-lib=atomic");
+ }
+
+ // C++ runtime library
+ if !target.contains("msvc") {
+ if let Some(s) = llvm_static_stdcpp {
+ assert!(!cxxflags.contains("stdlib=libc++"));
+ let path = PathBuf::from(s);
+ println!("cargo:rustc-link-search=native={}", path.parent().unwrap().display());
+ if target.contains("windows") {
+ println!("cargo:rustc-link-lib=static:-bundle={}", stdcppname);
+ } else {
+ println!("cargo:rustc-link-lib=static={}", stdcppname);
+ }
+ } else if cxxflags.contains("stdlib=libc++") {
+ println!("cargo:rustc-link-lib=c++");
+ } else {
+ println!("cargo:rustc-link-lib={}", stdcppname);
+ }
+ }
+
+ // Libstdc++ depends on pthread which Rust doesn't link on MinGW
+ // since nothing else requires it.
+ if target.ends_with("windows-gnu") {
+ println!("cargo:rustc-link-lib=static:-bundle=pthread");
+ }
+}
diff --git a/compiler/rustc_llvm/llvm-wrapper/.editorconfig b/compiler/rustc_llvm/llvm-wrapper/.editorconfig
new file mode 100644
index 000000000..865cd45f7
--- /dev/null
+++ b/compiler/rustc_llvm/llvm-wrapper/.editorconfig
@@ -0,0 +1,6 @@
+[*.{h,cpp}]
+end_of_line = lf
+insert_final_newline = true
+charset = utf-8
+indent_style = space
+indent_size = 2
diff --git a/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp
new file mode 100644
index 000000000..97541e615
--- /dev/null
+++ b/compiler/rustc_llvm/llvm-wrapper/ArchiveWrapper.cpp
@@ -0,0 +1,222 @@
+#include "LLVMWrapper.h"
+
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ArchiveWriter.h"
+#include "llvm/Support/Path.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+struct RustArchiveMember {
+ const char *Filename;
+ const char *Name;
+ Archive::Child Child;
+
+ RustArchiveMember()
+ : Filename(nullptr), Name(nullptr),
+ Child(nullptr, nullptr, nullptr)
+ {
+ }
+ ~RustArchiveMember() {}
+};
+
+struct RustArchiveIterator {
+ bool First;
+ Archive::child_iterator Cur;
+ Archive::child_iterator End;
+ std::unique_ptr<Error> Err;
+
+ RustArchiveIterator(Archive::child_iterator Cur, Archive::child_iterator End,
+ std::unique_ptr<Error> Err)
+ : First(true),
+ Cur(Cur),
+ End(End),
+ Err(std::move(Err)) {}
+};
+
+enum class LLVMRustArchiveKind {
+ GNU,
+ BSD,
+ DARWIN,
+ COFF,
+};
+
+static Archive::Kind fromRust(LLVMRustArchiveKind Kind) {
+ switch (Kind) {
+ case LLVMRustArchiveKind::GNU:
+ return Archive::K_GNU;
+ case LLVMRustArchiveKind::BSD:
+ return Archive::K_BSD;
+ case LLVMRustArchiveKind::DARWIN:
+ return Archive::K_DARWIN;
+ case LLVMRustArchiveKind::COFF:
+ return Archive::K_COFF;
+ default:
+ report_fatal_error("Bad ArchiveKind.");
+ }
+}
+
+typedef OwningBinary<Archive> *LLVMRustArchiveRef;
+typedef RustArchiveMember *LLVMRustArchiveMemberRef;
+typedef Archive::Child *LLVMRustArchiveChildRef;
+typedef Archive::Child const *LLVMRustArchiveChildConstRef;
+typedef RustArchiveIterator *LLVMRustArchiveIteratorRef;
+
+extern "C" LLVMRustArchiveRef LLVMRustOpenArchive(char *Path) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufOr =
+ MemoryBuffer::getFile(Path, -1, false);
+ if (!BufOr) {
+ LLVMRustSetLastError(BufOr.getError().message().c_str());
+ return nullptr;
+ }
+
+ Expected<std::unique_ptr<Archive>> ArchiveOr =
+ Archive::create(BufOr.get()->getMemBufferRef());
+
+ if (!ArchiveOr) {
+ LLVMRustSetLastError(toString(ArchiveOr.takeError()).c_str());
+ return nullptr;
+ }
+
+ OwningBinary<Archive> *Ret = new OwningBinary<Archive>(
+ std::move(ArchiveOr.get()), std::move(BufOr.get()));
+
+ return Ret;
+}
+
+extern "C" void LLVMRustDestroyArchive(LLVMRustArchiveRef RustArchive) {
+ delete RustArchive;
+}
+
+extern "C" LLVMRustArchiveIteratorRef
+LLVMRustArchiveIteratorNew(LLVMRustArchiveRef RustArchive) {
+ Archive *Archive = RustArchive->getBinary();
+ std::unique_ptr<Error> Err = std::make_unique<Error>(Error::success());
+ auto Cur = Archive->child_begin(*Err);
+ if (*Err) {
+ LLVMRustSetLastError(toString(std::move(*Err)).c_str());
+ return nullptr;
+ }
+ auto End = Archive->child_end();
+ return new RustArchiveIterator(Cur, End, std::move(Err));
+}
+
+extern "C" LLVMRustArchiveChildConstRef
+LLVMRustArchiveIteratorNext(LLVMRustArchiveIteratorRef RAI) {
+ if (RAI->Cur == RAI->End)
+ return nullptr;
+
+ // Advancing the iterator validates the next child, and this can
+ // uncover an error. LLVM requires that we check all Errors,
+ // so we only advance the iterator if we actually need to fetch
+ // the next child.
+ // This means we must not advance the iterator in the *first* call,
+ // but instead advance it *before* fetching the child in all later calls.
+ if (!RAI->First) {
+ ++RAI->Cur;
+ if (*RAI->Err) {
+ LLVMRustSetLastError(toString(std::move(*RAI->Err)).c_str());
+ return nullptr;
+ }
+ } else {
+ RAI->First = false;
+ }
+
+ if (RAI->Cur == RAI->End)
+ return nullptr;
+
+ const Archive::Child &Child = *RAI->Cur.operator->();
+ Archive::Child *Ret = new Archive::Child(Child);
+
+ return Ret;
+}
+
+extern "C" void LLVMRustArchiveChildFree(LLVMRustArchiveChildRef Child) {
+ delete Child;
+}
+
+extern "C" void LLVMRustArchiveIteratorFree(LLVMRustArchiveIteratorRef RAI) {
+ delete RAI;
+}
+
+extern "C" const char *
+LLVMRustArchiveChildName(LLVMRustArchiveChildConstRef Child, size_t *Size) {
+ Expected<StringRef> NameOrErr = Child->getName();
+ if (!NameOrErr) {
+ // rustc_codegen_llvm currently doesn't use this error string, but it might be
+ // useful in the future, and in the mean time this tells LLVM that the
+ // error was not ignored and that it shouldn't abort the process.
+ LLVMRustSetLastError(toString(NameOrErr.takeError()).c_str());
+ return nullptr;
+ }
+ StringRef Name = NameOrErr.get();
+ *Size = Name.size();
+ return Name.data();
+}
+
+extern "C" const char *LLVMRustArchiveChildData(LLVMRustArchiveChildRef Child,
+ size_t *Size) {
+ StringRef Buf;
+ Expected<StringRef> BufOrErr = Child->getBuffer();
+ if (!BufOrErr) {
+ LLVMRustSetLastError(toString(BufOrErr.takeError()).c_str());
+ return nullptr;
+ }
+ Buf = BufOrErr.get();
+ *Size = Buf.size();
+ return Buf.data();
+}
+
+extern "C" LLVMRustArchiveMemberRef
+LLVMRustArchiveMemberNew(char *Filename, char *Name,
+ LLVMRustArchiveChildRef Child) {
+ RustArchiveMember *Member = new RustArchiveMember;
+ Member->Filename = Filename;
+ Member->Name = Name;
+ if (Child)
+ Member->Child = *Child;
+ return Member;
+}
+
+extern "C" void LLVMRustArchiveMemberFree(LLVMRustArchiveMemberRef Member) {
+ delete Member;
+}
+
+extern "C" LLVMRustResult
+LLVMRustWriteArchive(char *Dst, size_t NumMembers,
+ const LLVMRustArchiveMemberRef *NewMembers,
+ bool WriteSymbtab, LLVMRustArchiveKind RustKind) {
+
+ std::vector<NewArchiveMember> Members;
+ auto Kind = fromRust(RustKind);
+
+ for (size_t I = 0; I < NumMembers; I++) {
+ auto Member = NewMembers[I];
+ assert(Member->Name);
+ if (Member->Filename) {
+ Expected<NewArchiveMember> MOrErr =
+ NewArchiveMember::getFile(Member->Filename, true);
+ if (!MOrErr) {
+ LLVMRustSetLastError(toString(MOrErr.takeError()).c_str());
+ return LLVMRustResult::Failure;
+ }
+ MOrErr->MemberName = sys::path::filename(MOrErr->MemberName);
+ Members.push_back(std::move(*MOrErr));
+ } else {
+ Expected<NewArchiveMember> MOrErr =
+ NewArchiveMember::getOldMember(Member->Child, true);
+ if (!MOrErr) {
+ LLVMRustSetLastError(toString(MOrErr.takeError()).c_str());
+ return LLVMRustResult::Failure;
+ }
+ Members.push_back(std::move(*MOrErr));
+ }
+ }
+
+ auto Result = writeArchive(Dst, Members, WriteSymbtab, Kind, true, false);
+ if (!Result)
+ return LLVMRustResult::Success;
+ LLVMRustSetLastError(toString(std::move(Result)).c_str());
+
+ return LLVMRustResult::Failure;
+}
diff --git a/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
new file mode 100644
index 000000000..154f554d6
--- /dev/null
+++ b/compiler/rustc_llvm/llvm-wrapper/CoverageMappingWrapper.cpp
@@ -0,0 +1,117 @@
+#include "LLVMWrapper.h"
+#include "llvm/ProfileData/Coverage/CoverageMapping.h"
+#include "llvm/ProfileData/Coverage/CoverageMappingWriter.h"
+#include "llvm/ProfileData/InstrProf.h"
+#include "llvm/ADT/ArrayRef.h"
+
+#include <iostream>
+
+using namespace llvm;
+
+struct LLVMRustCounterMappingRegion {
+ coverage::Counter Count;
+ coverage::Counter FalseCount;
+ uint32_t FileID;
+ uint32_t ExpandedFileID;
+ uint32_t LineStart;
+ uint32_t ColumnStart;
+ uint32_t LineEnd;
+ uint32_t ColumnEnd;
+ coverage::CounterMappingRegion::RegionKind Kind;
+};
+
+extern "C" void LLVMRustCoverageWriteFilenamesSectionToBuffer(
+ const char* const Filenames[],
+ size_t FilenamesLen,
+ RustStringRef BufferOut) {
+#if LLVM_VERSION_GE(13,0)
+ SmallVector<std::string,32> FilenameRefs;
+ for (size_t i = 0; i < FilenamesLen; i++) {
+ FilenameRefs.push_back(std::string(Filenames[i]));
+ }
+#else
+ SmallVector<StringRef,32> FilenameRefs;
+ for (size_t i = 0; i < FilenamesLen; i++) {
+ FilenameRefs.push_back(StringRef(Filenames[i]));
+ }
+#endif
+ auto FilenamesWriter = coverage::CoverageFilenamesSectionWriter(
+ makeArrayRef(FilenameRefs));
+ RawRustStringOstream OS(BufferOut);
+ FilenamesWriter.write(OS);
+}
+
+extern "C" void LLVMRustCoverageWriteMappingToBuffer(
+ const unsigned *VirtualFileMappingIDs,
+ unsigned NumVirtualFileMappingIDs,
+ const coverage::CounterExpression *Expressions,
+ unsigned NumExpressions,
+ LLVMRustCounterMappingRegion *RustMappingRegions,
+ unsigned NumMappingRegions,
+ RustStringRef BufferOut) {
+ // Convert from FFI representation to LLVM representation.
+ SmallVector<coverage::CounterMappingRegion, 0> MappingRegions;
+ MappingRegions.reserve(NumMappingRegions);
+ for (const auto &Region : makeArrayRef(RustMappingRegions, NumMappingRegions)) {
+ MappingRegions.emplace_back(
+ Region.Count, Region.FalseCount, Region.FileID, Region.ExpandedFileID,
+ Region.LineStart, Region.ColumnStart, Region.LineEnd, Region.ColumnEnd,
+ Region.Kind);
+ }
+ auto CoverageMappingWriter = coverage::CoverageMappingWriter(
+ makeArrayRef(VirtualFileMappingIDs, NumVirtualFileMappingIDs),
+ makeArrayRef(Expressions, NumExpressions),
+ MappingRegions);
+ RawRustStringOstream OS(BufferOut);
+ CoverageMappingWriter.write(OS);
+}
+
+extern "C" LLVMValueRef LLVMRustCoverageCreatePGOFuncNameVar(LLVMValueRef F, const char *FuncName) {
+ StringRef FuncNameRef(FuncName);
+ return wrap(createPGOFuncNameVar(*cast<Function>(unwrap(F)), FuncNameRef));
+}
+
+extern "C" uint64_t LLVMRustCoverageHashCString(const char *StrVal) {
+ StringRef StrRef(StrVal);
+ return IndexedInstrProf::ComputeHash(StrRef);
+}
+
+extern "C" uint64_t LLVMRustCoverageHashByteArray(
+ const char *Bytes,
+ unsigned NumBytes) {
+ StringRef StrRef(Bytes, NumBytes);
+ return IndexedInstrProf::ComputeHash(StrRef);
+}
+
+static void WriteSectionNameToString(LLVMModuleRef M,
+ InstrProfSectKind SK,
+ RustStringRef Str) {
+ Triple TargetTriple(unwrap(M)->getTargetTriple());
+ auto name = getInstrProfSectionName(SK, TargetTriple.getObjectFormat());
+ RawRustStringOstream OS(Str);
+ OS << name;
+}
+
+extern "C" void LLVMRustCoverageWriteMapSectionNameToString(LLVMModuleRef M,
+ RustStringRef Str) {
+ WriteSectionNameToString(M, IPSK_covmap, Str);
+}
+
+extern "C" void LLVMRustCoverageWriteFuncSectionNameToString(LLVMModuleRef M,
+ RustStringRef Str) {
+ WriteSectionNameToString(M, IPSK_covfun, Str);
+}
+
+extern "C" void LLVMRustCoverageWriteMappingVarNameToString(RustStringRef Str) {
+ auto name = getCoverageMappingVarName();
+ RawRustStringOstream OS(Str);
+ OS << name;
+}
+
+extern "C" uint32_t LLVMRustCoverageMappingVersion() {
+#if LLVM_VERSION_GE(13, 0)
+ return coverage::CovMapVersion::Version6;
+#else
+ return coverage::CovMapVersion::Version5;
+#endif
+}
diff --git a/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h b/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h
new file mode 100644
index 000000000..015c1c52b
--- /dev/null
+++ b/compiler/rustc_llvm/llvm-wrapper/LLVMWrapper.h
@@ -0,0 +1,121 @@
+#include "llvm-c/BitReader.h"
+#include "llvm-c/Core.h"
+#include "llvm-c/Object.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Analysis/Lint.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Vectorize.h"
+
+#define LLVM_VERSION_GE(major, minor) \
+ (LLVM_VERSION_MAJOR > (major) || \
+ LLVM_VERSION_MAJOR == (major) && LLVM_VERSION_MINOR >= (minor))
+
+#define LLVM_VERSION_LT(major, minor) (!LLVM_VERSION_GE((major), (minor)))
+
+#include "llvm/IR/LegacyPassManager.h"
+
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/Linker/Linker.h"
+
+extern "C" void LLVMRustSetLastError(const char *);
+
+enum class LLVMRustResult { Success, Failure };
+
+enum LLVMRustAttribute {
+ AlwaysInline = 0,
+ ByVal = 1,
+ Cold = 2,
+ InlineHint = 3,
+ MinSize = 4,
+ Naked = 5,
+ NoAlias = 6,
+ NoCapture = 7,
+ NoInline = 8,
+ NonNull = 9,
+ NoRedZone = 10,
+ NoReturn = 11,
+ NoUnwind = 12,
+ OptimizeForSize = 13,
+ ReadOnly = 14,
+ SExt = 15,
+ StructRet = 16,
+ UWTable = 17,
+ ZExt = 18,
+ InReg = 19,
+ SanitizeThread = 20,
+ SanitizeAddress = 21,
+ SanitizeMemory = 22,
+ NonLazyBind = 23,
+ OptimizeNone = 24,
+ ReturnsTwice = 25,
+ ReadNone = 26,
+ InaccessibleMemOnly = 27,
+ SanitizeHWAddress = 28,
+ WillReturn = 29,
+ StackProtectReq = 30,
+ StackProtectStrong = 31,
+ StackProtect = 32,
+ NoUndef = 33,
+ SanitizeMemTag = 34,
+ NoCfCheck = 35,
+ ShadowCallStack = 36,
+ AllocSize = 37,
+#if LLVM_VERSION_GE(15, 0)
+ AllocatedPointer = 38,
+ AllocAlign = 39,
+#endif
+};
+
+typedef struct OpaqueRustString *RustStringRef;
+typedef struct LLVMOpaqueTwine *LLVMTwineRef;
+typedef struct LLVMOpaqueSMDiagnostic *LLVMSMDiagnosticRef;
+
+extern "C" void LLVMRustStringWriteImpl(RustStringRef Str, const char *Ptr,
+ size_t Size);
+
+class RawRustStringOstream : public llvm::raw_ostream {
+ RustStringRef Str;
+ uint64_t Pos;
+
+ void write_impl(const char *Ptr, size_t Size) override {
+ LLVMRustStringWriteImpl(Str, Ptr, Size);
+ Pos += Size;
+ }
+
+ uint64_t current_pos() const override { return Pos; }
+
+public:
+ explicit RawRustStringOstream(RustStringRef Str) : Str(Str), Pos(0) {}
+
+ ~RawRustStringOstream() {
+ // LLVM requires this.
+ flush();
+ }
+};
diff --git a/compiler/rustc_llvm/llvm-wrapper/Linker.cpp b/compiler/rustc_llvm/llvm-wrapper/Linker.cpp
new file mode 100644
index 000000000..8766e96f0
--- /dev/null
+++ b/compiler/rustc_llvm/llvm-wrapper/Linker.cpp
@@ -0,0 +1,48 @@
+#include "llvm/Linker/Linker.h"
+
+#include "LLVMWrapper.h"
+
+using namespace llvm;
+
+struct RustLinker {
+ Linker L;
+ LLVMContext &Ctx;
+
+ RustLinker(Module &M) :
+ L(M),
+ Ctx(M.getContext())
+ {}
+};
+
+extern "C" RustLinker*
+LLVMRustLinkerNew(LLVMModuleRef DstRef) {
+ Module *Dst = unwrap(DstRef);
+
+ return new RustLinker(*Dst);
+}
+
+extern "C" void
+LLVMRustLinkerFree(RustLinker *L) {
+ delete L;
+}
+
+extern "C" bool
+LLVMRustLinkerAdd(RustLinker *L, char *BC, size_t Len) {
+ std::unique_ptr<MemoryBuffer> Buf =
+ MemoryBuffer::getMemBufferCopy(StringRef(BC, Len));
+
+ Expected<std::unique_ptr<Module>> SrcOrError =
+ llvm::getLazyBitcodeModule(Buf->getMemBufferRef(), L->Ctx);
+ if (!SrcOrError) {
+ LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
+ return false;
+ }
+
+ auto Src = std::move(*SrcOrError);
+
+ if (L->L.linkInModule(std::move(Src))) {
+ LLVMRustSetLastError("");
+ return false;
+ }
+ return true;
+}
diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
new file mode 100644
index 000000000..0a6bd4999
--- /dev/null
+++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp
@@ -0,0 +1,1763 @@
+#include <stdio.h>
+
+#include <vector>
+#include <set>
+
+#include "LLVMWrapper.h"
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/IR/AssemblyAnnotationWriter.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Passes/PassPlugin.h"
+#include "llvm/Passes/StandardInstrumentations.h"
+#include "llvm/Support/CBindingWrapping.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#if LLVM_VERSION_LT(14, 0)
+#include "llvm/Support/TargetRegistry.h"
+#else
+#include "llvm/MC/TargetRegistry.h"
+#endif
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/IPO/AlwaysInliner.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
+#include "llvm/Transforms/Utils/AddDiscriminators.h"
+#include "llvm/Transforms/Utils/FunctionImportUtils.h"
+#include "llvm/LTO/LTO.h"
+#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm-c/Transforms/PassManagerBuilder.h"
+
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
+#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
+#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
+#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
+#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
+#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
+#include "llvm/Transforms/Utils/NameAnonGlobals.h"
+#include "llvm/Transforms/Utils.h"
+
+using namespace llvm;
+
+typedef struct LLVMOpaquePass *LLVMPassRef;
+typedef struct LLVMOpaqueTargetMachine *LLVMTargetMachineRef;
+
+DEFINE_STDCXX_CONVERSION_FUNCTIONS(Pass, LLVMPassRef)
+DEFINE_STDCXX_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
+
+extern "C" void LLVMInitializePasses() {
+ PassRegistry &Registry = *PassRegistry::getPassRegistry();
+ initializeCore(Registry);
+ initializeCodeGen(Registry);
+ initializeScalarOpts(Registry);
+ initializeVectorization(Registry);
+ initializeIPO(Registry);
+ initializeAnalysis(Registry);
+ initializeTransformUtils(Registry);
+ initializeInstCombine(Registry);
+ initializeInstrumentation(Registry);
+ initializeTarget(Registry);
+}
+
+extern "C" void LLVMTimeTraceProfilerInitialize() {
+ timeTraceProfilerInitialize(
+ /* TimeTraceGranularity */ 0,
+ /* ProcName */ "rustc");
+}
+
+extern "C" void LLVMTimeTraceProfilerFinishThread() {
+ timeTraceProfilerFinishThread();
+}
+
+extern "C" void LLVMTimeTraceProfilerFinish(const char* FileName) {
+ StringRef FN(FileName);
+ std::error_code EC;
+ raw_fd_ostream OS(FN, EC, sys::fs::CD_CreateAlways);
+
+ timeTraceProfilerWrite(OS);
+ timeTraceProfilerCleanup();
+}
+
+extern "C" LLVMPassRef LLVMRustFindAndCreatePass(const char *PassName) {
+#if LLVM_VERSION_LT(15, 0)
+ StringRef SR(PassName);
+ PassRegistry *PR = PassRegistry::getPassRegistry();
+
+ const PassInfo *PI = PR->getPassInfo(SR);
+ if (PI) {
+ return wrap(PI->createPass());
+ }
+ return nullptr;
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" LLVMPassRef LLVMRustCreateAddressSanitizerFunctionPass(bool Recover) {
+#if LLVM_VERSION_LT(15, 0)
+ const bool CompileKernel = false;
+ const bool UseAfterScope = true;
+
+ return wrap(createAddressSanitizerFunctionPass(CompileKernel, Recover, UseAfterScope));
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" LLVMPassRef LLVMRustCreateModuleAddressSanitizerPass(bool Recover) {
+#if LLVM_VERSION_LT(15, 0)
+ const bool CompileKernel = false;
+
+ return wrap(createModuleAddressSanitizerLegacyPassPass(CompileKernel, Recover));
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" LLVMPassRef LLVMRustCreateMemorySanitizerPass(int TrackOrigins, bool Recover) {
+#if LLVM_VERSION_LT(15, 0)
+ const bool CompileKernel = false;
+
+ return wrap(createMemorySanitizerLegacyPassPass(
+ MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel}));
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" LLVMPassRef LLVMRustCreateThreadSanitizerPass() {
+#if LLVM_VERSION_LT(15, 0)
+ return wrap(createThreadSanitizerLegacyPassPass());
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" LLVMPassRef LLVMRustCreateHWAddressSanitizerPass(bool Recover) {
+#if LLVM_VERSION_LT(15, 0)
+ const bool CompileKernel = false;
+
+ return wrap(createHWAddressSanitizerLegacyPassPass(CompileKernel, Recover));
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" void LLVMRustAddPass(LLVMPassManagerRef PMR, LLVMPassRef RustPass) {
+#if LLVM_VERSION_LT(15, 0)
+ assert(RustPass);
+ Pass *Pass = unwrap(RustPass);
+ PassManagerBase *PMB = unwrap(PMR);
+ PMB->add(Pass);
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" LLVMPassManagerBuilderRef LLVMRustPassManagerBuilderCreate() {
+#if LLVM_VERSION_LT(15, 0)
+ return LLVMPassManagerBuilderCreate();
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" void LLVMRustPassManagerBuilderDispose(LLVMPassManagerBuilderRef PMB) {
+#if LLVM_VERSION_LT(15, 0)
+ LLVMPassManagerBuilderDispose(PMB);
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" void LLVMRustPassManagerBuilderPopulateFunctionPassManager(
+ LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM) {
+#if LLVM_VERSION_LT(15, 0)
+ LLVMPassManagerBuilderPopulateFunctionPassManager(PMB, PM);
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" void LLVMRustPassManagerBuilderPopulateModulePassManager(
+ LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM) {
+#if LLVM_VERSION_LT(15, 0)
+ LLVMPassManagerBuilderPopulateModulePassManager(PMB, PM);
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" void LLVMRustPassManagerBuilderPopulateLTOPassManager(
+ LLVMPassManagerBuilderRef PMB, LLVMPassManagerRef PM, bool Internalize, bool RunInliner) {
+#if LLVM_VERSION_LT(15, 0)
+ LLVMPassManagerBuilderPopulateLTOPassManager(PMB, PM, Internalize, RunInliner);
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C"
+void LLVMRustPassManagerBuilderPopulateThinLTOPassManager(
+ LLVMPassManagerBuilderRef PMBR,
+ LLVMPassManagerRef PMR
+) {
+#if LLVM_VERSION_LT(15, 0)
+ unwrap(PMBR)->populateThinLTOPassManager(*unwrap(PMR));
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C" void LLVMRustPassManagerBuilderUseInlinerWithThreshold(
+ LLVMPassManagerBuilderRef PMB, unsigned Threshold) {
+#if LLVM_VERSION_LT(15, 0)
+ LLVMPassManagerBuilderUseInlinerWithThreshold(PMB, Threshold);
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+extern "C"
+void LLVMRustAddLastExtensionPasses(
+ LLVMPassManagerBuilderRef PMBR, LLVMPassRef *Passes, size_t NumPasses) {
+#if LLVM_VERSION_LT(15, 0)
+ auto AddExtensionPasses = [Passes, NumPasses](
+ const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ for (size_t I = 0; I < NumPasses; I++) {
+ PM.add(unwrap(Passes[I]));
+ }
+ };
+ // Add the passes to both of the pre-finalization extension points,
+ // so they are run for optimized and non-optimized builds.
+ unwrap(PMBR)->addExtension(PassManagerBuilder::EP_OptimizerLast,
+ AddExtensionPasses);
+ unwrap(PMBR)->addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ AddExtensionPasses);
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+#ifdef LLVM_COMPONENT_X86
+#define SUBTARGET_X86 SUBTARGET(X86)
+#else
+#define SUBTARGET_X86
+#endif
+
+#ifdef LLVM_COMPONENT_ARM
+#define SUBTARGET_ARM SUBTARGET(ARM)
+#else
+#define SUBTARGET_ARM
+#endif
+
+#ifdef LLVM_COMPONENT_AARCH64
+#define SUBTARGET_AARCH64 SUBTARGET(AArch64)
+#else
+#define SUBTARGET_AARCH64
+#endif
+
+#ifdef LLVM_COMPONENT_AVR
+#define SUBTARGET_AVR SUBTARGET(AVR)
+#else
+#define SUBTARGET_AVR
+#endif
+
+#ifdef LLVM_COMPONENT_M68k
+#define SUBTARGET_M68K SUBTARGET(M68k)
+#else
+#define SUBTARGET_M68K
+#endif
+
+#ifdef LLVM_COMPONENT_MIPS
+#define SUBTARGET_MIPS SUBTARGET(Mips)
+#else
+#define SUBTARGET_MIPS
+#endif
+
+#ifdef LLVM_COMPONENT_POWERPC
+#define SUBTARGET_PPC SUBTARGET(PPC)
+#else
+#define SUBTARGET_PPC
+#endif
+
+#ifdef LLVM_COMPONENT_SYSTEMZ
+#define SUBTARGET_SYSTEMZ SUBTARGET(SystemZ)
+#else
+#define SUBTARGET_SYSTEMZ
+#endif
+
+#ifdef LLVM_COMPONENT_MSP430
+#define SUBTARGET_MSP430 SUBTARGET(MSP430)
+#else
+#define SUBTARGET_MSP430
+#endif
+
+#ifdef LLVM_COMPONENT_RISCV
+#define SUBTARGET_RISCV SUBTARGET(RISCV)
+#else
+#define SUBTARGET_RISCV
+#endif
+
+#ifdef LLVM_COMPONENT_SPARC
+#define SUBTARGET_SPARC SUBTARGET(Sparc)
+#else
+#define SUBTARGET_SPARC
+#endif
+
+#ifdef LLVM_COMPONENT_HEXAGON
+#define SUBTARGET_HEXAGON SUBTARGET(Hexagon)
+#else
+#define SUBTARGET_HEXAGON
+#endif
+
+#define GEN_SUBTARGETS \
+ SUBTARGET_X86 \
+ SUBTARGET_ARM \
+ SUBTARGET_AARCH64 \
+ SUBTARGET_AVR \
+ SUBTARGET_M68K \
+ SUBTARGET_MIPS \
+ SUBTARGET_PPC \
+ SUBTARGET_SYSTEMZ \
+ SUBTARGET_MSP430 \
+ SUBTARGET_SPARC \
+ SUBTARGET_HEXAGON \
+ SUBTARGET_RISCV \
+
+#define SUBTARGET(x) \
+ namespace llvm { \
+ extern const SubtargetFeatureKV x##FeatureKV[]; \
+ extern const SubtargetFeatureKV x##SubTypeKV[]; \
+ }
+
+GEN_SUBTARGETS
+#undef SUBTARGET
+
+extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM,
+ const char *Feature) {
+ TargetMachine *Target = unwrap(TM);
+ const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
+ return MCInfo->checkFeatures(std::string("+") + Feature);
+}
+
+enum class LLVMRustCodeModel {
+ Tiny,
+ Small,
+ Kernel,
+ Medium,
+ Large,
+ None,
+};
+
+static Optional<CodeModel::Model> fromRust(LLVMRustCodeModel Model) {
+ switch (Model) {
+ case LLVMRustCodeModel::Tiny:
+ return CodeModel::Tiny;
+ case LLVMRustCodeModel::Small:
+ return CodeModel::Small;
+ case LLVMRustCodeModel::Kernel:
+ return CodeModel::Kernel;
+ case LLVMRustCodeModel::Medium:
+ return CodeModel::Medium;
+ case LLVMRustCodeModel::Large:
+ return CodeModel::Large;
+ case LLVMRustCodeModel::None:
+ return None;
+ default:
+ report_fatal_error("Bad CodeModel.");
+ }
+}
+
+enum class LLVMRustCodeGenOptLevel {
+ None,
+ Less,
+ Default,
+ Aggressive,
+};
+
+static CodeGenOpt::Level fromRust(LLVMRustCodeGenOptLevel Level) {
+ switch (Level) {
+ case LLVMRustCodeGenOptLevel::None:
+ return CodeGenOpt::None;
+ case LLVMRustCodeGenOptLevel::Less:
+ return CodeGenOpt::Less;
+ case LLVMRustCodeGenOptLevel::Default:
+ return CodeGenOpt::Default;
+ case LLVMRustCodeGenOptLevel::Aggressive:
+ return CodeGenOpt::Aggressive;
+ default:
+ report_fatal_error("Bad CodeGenOptLevel.");
+ }
+}
+
+enum class LLVMRustPassBuilderOptLevel {
+ O0,
+ O1,
+ O2,
+ O3,
+ Os,
+ Oz,
+};
+
+#if LLVM_VERSION_LT(14,0)
+using OptimizationLevel = PassBuilder::OptimizationLevel;
+#endif
+
+static OptimizationLevel fromRust(LLVMRustPassBuilderOptLevel Level) {
+ switch (Level) {
+ case LLVMRustPassBuilderOptLevel::O0:
+ return OptimizationLevel::O0;
+ case LLVMRustPassBuilderOptLevel::O1:
+ return OptimizationLevel::O1;
+ case LLVMRustPassBuilderOptLevel::O2:
+ return OptimizationLevel::O2;
+ case LLVMRustPassBuilderOptLevel::O3:
+ return OptimizationLevel::O3;
+ case LLVMRustPassBuilderOptLevel::Os:
+ return OptimizationLevel::Os;
+ case LLVMRustPassBuilderOptLevel::Oz:
+ return OptimizationLevel::Oz;
+ default:
+ report_fatal_error("Bad PassBuilderOptLevel.");
+ }
+}
+
+enum class LLVMRustRelocModel {
+ Static,
+ PIC,
+ DynamicNoPic,
+ ROPI,
+ RWPI,
+ ROPIRWPI,
+};
+
+static Reloc::Model fromRust(LLVMRustRelocModel RustReloc) {
+ switch (RustReloc) {
+ case LLVMRustRelocModel::Static:
+ return Reloc::Static;
+ case LLVMRustRelocModel::PIC:
+ return Reloc::PIC_;
+ case LLVMRustRelocModel::DynamicNoPic:
+ return Reloc::DynamicNoPIC;
+ case LLVMRustRelocModel::ROPI:
+ return Reloc::ROPI;
+ case LLVMRustRelocModel::RWPI:
+ return Reloc::RWPI;
+ case LLVMRustRelocModel::ROPIRWPI:
+ return Reloc::ROPI_RWPI;
+ }
+ report_fatal_error("Bad RelocModel.");
+}
+
+#ifdef LLVM_RUSTLLVM
+/// getLongestEntryLength - Return the length of the longest entry in the table.
+template<typename KV>
+static size_t getLongestEntryLength(ArrayRef<KV> Table) {
+ size_t MaxLen = 0;
+ for (auto &I : Table)
+ MaxLen = std::max(MaxLen, std::strlen(I.Key));
+ return MaxLen;
+}
+
+extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef TM) {
+ const TargetMachine *Target = unwrap(TM);
+ const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
+ const Triple::ArchType HostArch = Triple(sys::getProcessTriple()).getArch();
+ const Triple::ArchType TargetArch = Target->getTargetTriple().getArch();
+ const ArrayRef<SubtargetSubTypeKV> CPUTable = MCInfo->getCPUTable();
+ unsigned MaxCPULen = getLongestEntryLength(CPUTable);
+
+ printf("Available CPUs for this target:\n");
+ if (HostArch == TargetArch) {
+ const StringRef HostCPU = sys::getHostCPUName();
+ printf(" %-*s - Select the CPU of the current host (currently %.*s).\n",
+ MaxCPULen, "native", (int)HostCPU.size(), HostCPU.data());
+ }
+ for (auto &CPU : CPUTable)
+ printf(" %-*s\n", MaxCPULen, CPU.Key);
+ printf("\n");
+}
+
+extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef TM) {
+ const TargetMachine *Target = unwrap(TM);
+ const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
+ const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
+ return FeatTable.size();
+}
+
+extern "C" void LLVMRustGetTargetFeature(LLVMTargetMachineRef TM, size_t Index,
+ const char** Feature, const char** Desc) {
+ const TargetMachine *Target = unwrap(TM);
+ const MCSubtargetInfo *MCInfo = Target->getMCSubtargetInfo();
+ const ArrayRef<SubtargetFeatureKV> FeatTable = MCInfo->getFeatureTable();
+ const SubtargetFeatureKV Feat = FeatTable[Index];
+ *Feature = Feat.Key;
+ *Desc = Feat.Desc;
+}
+
+#else
+
+extern "C" void LLVMRustPrintTargetCPUs(LLVMTargetMachineRef) {
+ printf("Target CPU help is not supported by this LLVM version.\n\n");
+}
+
+extern "C" size_t LLVMRustGetTargetFeaturesCount(LLVMTargetMachineRef) {
+ return 0;
+}
+
+extern "C" void LLVMRustGetTargetFeature(LLVMTargetMachineRef, const char**, const char**) {}
+#endif
+
+extern "C" const char* LLVMRustGetHostCPUName(size_t *len) {
+ StringRef Name = sys::getHostCPUName();
+ *len = Name.size();
+ return Name.data();
+}
+
+extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine(
+ const char *TripleStr, const char *CPU, const char *Feature,
+ const char *ABIStr, LLVMRustCodeModel RustCM, LLVMRustRelocModel RustReloc,
+ LLVMRustCodeGenOptLevel RustOptLevel, bool UseSoftFloat,
+ bool FunctionSections,
+ bool DataSections,
+ bool UniqueSectionNames,
+ bool TrapUnreachable,
+ bool Singlethread,
+ bool AsmComments,
+ bool EmitStackSizeSection,
+ bool RelaxELFRelocations,
+ bool UseInitArray,
+ const char *SplitDwarfFile) {
+
+ auto OptLevel = fromRust(RustOptLevel);
+ auto RM = fromRust(RustReloc);
+ auto CM = fromRust(RustCM);
+
+ std::string Error;
+ Triple Trip(Triple::normalize(TripleStr));
+ const llvm::Target *TheTarget =
+ TargetRegistry::lookupTarget(Trip.getTriple(), Error);
+ if (TheTarget == nullptr) {
+ LLVMRustSetLastError(Error.c_str());
+ return nullptr;
+ }
+
+ TargetOptions Options;
+
+ Options.FloatABIType = FloatABI::Default;
+ if (UseSoftFloat) {
+ Options.FloatABIType = FloatABI::Soft;
+ }
+ Options.DataSections = DataSections;
+ Options.FunctionSections = FunctionSections;
+ Options.UniqueSectionNames = UniqueSectionNames;
+ Options.MCOptions.AsmVerbose = AsmComments;
+ Options.MCOptions.PreserveAsmComments = AsmComments;
+ Options.MCOptions.ABIName = ABIStr;
+ if (SplitDwarfFile) {
+ Options.MCOptions.SplitDwarfFile = SplitDwarfFile;
+ }
+ Options.RelaxELFRelocations = RelaxELFRelocations;
+ Options.UseInitArray = UseInitArray;
+
+ if (TrapUnreachable) {
+ // Tell LLVM to codegen `unreachable` into an explicit trap instruction.
+ // This limits the extent of possible undefined behavior in some cases, as
+ // it prevents control flow from "falling through" into whatever code
+ // happens to be laid out next in memory.
+ Options.TrapUnreachable = true;
+ }
+
+ if (Singlethread) {
+ Options.ThreadModel = ThreadModel::Single;
+ }
+
+ Options.EmitStackSizeSection = EmitStackSizeSection;
+
+ TargetMachine *TM = TheTarget->createTargetMachine(
+ Trip.getTriple(), CPU, Feature, Options, RM, CM, OptLevel);
+ return wrap(TM);
+}
+
+extern "C" void LLVMRustDisposeTargetMachine(LLVMTargetMachineRef TM) {
+ delete unwrap(TM);
+}
+
+extern "C" void LLVMRustConfigurePassManagerBuilder(
+ LLVMPassManagerBuilderRef PMBR, LLVMRustCodeGenOptLevel OptLevel,
+ bool MergeFunctions, bool SLPVectorize, bool LoopVectorize, bool PrepareForThinLTO,
+ const char* PGOGenPath, const char* PGOUsePath, const char* PGOSampleUsePath,
+ int SizeLevel) {
+#if LLVM_VERSION_LT(15, 0)
+ unwrap(PMBR)->MergeFunctions = MergeFunctions;
+ unwrap(PMBR)->SLPVectorize = SLPVectorize;
+ unwrap(PMBR)->OptLevel = fromRust(OptLevel);
+ unwrap(PMBR)->LoopVectorize = LoopVectorize;
+ unwrap(PMBR)->PrepareForThinLTO = PrepareForThinLTO;
+ unwrap(PMBR)->SizeLevel = SizeLevel;
+ unwrap(PMBR)->DisableUnrollLoops = SizeLevel != 0;
+
+ if (PGOGenPath) {
+ assert(!PGOUsePath && !PGOSampleUsePath);
+ unwrap(PMBR)->EnablePGOInstrGen = true;
+ unwrap(PMBR)->PGOInstrGen = PGOGenPath;
+ } else if (PGOUsePath) {
+ assert(!PGOSampleUsePath);
+ unwrap(PMBR)->PGOInstrUse = PGOUsePath;
+ } else if (PGOSampleUsePath) {
+ unwrap(PMBR)->PGOSampleUse = PGOSampleUsePath;
+ }
+#else
+ report_fatal_error("Legacy PM not supported with LLVM 15");
+#endif
+}
+
+// Unfortunately, the LLVM C API doesn't provide a way to set the `LibraryInfo`
+// field of a PassManagerBuilder, we expose our own method of doing so.
+extern "C" void LLVMRustAddBuilderLibraryInfo(LLVMPassManagerBuilderRef PMBR,
+ LLVMModuleRef M,
+ bool DisableSimplifyLibCalls) {
+ Triple TargetTriple(unwrap(M)->getTargetTriple());
+ TargetLibraryInfoImpl *TLI = new TargetLibraryInfoImpl(TargetTriple);
+ if (DisableSimplifyLibCalls)
+ TLI->disableAllFunctions();
+ unwrap(PMBR)->LibraryInfo = TLI;
+}
+
+// Unfortunately, the LLVM C API doesn't provide a way to create the
+// TargetLibraryInfo pass, so we use this method to do so.
+extern "C" void LLVMRustAddLibraryInfo(LLVMPassManagerRef PMR, LLVMModuleRef M,
+ bool DisableSimplifyLibCalls) {
+ Triple TargetTriple(unwrap(M)->getTargetTriple());
+ TargetLibraryInfoImpl TLII(TargetTriple);
+ if (DisableSimplifyLibCalls)
+ TLII.disableAllFunctions();
+ unwrap(PMR)->add(new TargetLibraryInfoWrapperPass(TLII));
+}
+
+// Unfortunately, the LLVM C API doesn't provide an easy way of iterating over
+// all the functions in a module, so we do that manually here. You'll find
+// similar code in clang's BackendUtil.cpp file.
+extern "C" void LLVMRustRunFunctionPassManager(LLVMPassManagerRef PMR,
+ LLVMModuleRef M) {
+ llvm::legacy::FunctionPassManager *P =
+ unwrap<llvm::legacy::FunctionPassManager>(PMR);
+ P->doInitialization();
+
+ // Upgrade all calls to old intrinsics first.
+ for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;)
+ UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
+
+ for (Module::iterator I = unwrap(M)->begin(), E = unwrap(M)->end(); I != E;
+ ++I)
+ if (!I->isDeclaration())
+ P->run(*I);
+
+ P->doFinalization();
+}
+
+extern "C" void LLVMRustSetLLVMOptions(int Argc, char **Argv) {
+ // Initializing the command-line options more than once is not allowed. So,
+ // check if they've already been initialized. (This could happen if we're
+ // being called from rustpkg, for example). If the arguments change, then
+ // that's just kinda unfortunate.
+ static bool Initialized = false;
+ if (Initialized)
+ return;
+ Initialized = true;
+ cl::ParseCommandLineOptions(Argc, Argv);
+}
+
+enum class LLVMRustFileType {
+ AssemblyFile,
+ ObjectFile,
+};
+
+static CodeGenFileType fromRust(LLVMRustFileType Type) {
+ switch (Type) {
+ case LLVMRustFileType::AssemblyFile:
+ return CGFT_AssemblyFile;
+ case LLVMRustFileType::ObjectFile:
+ return CGFT_ObjectFile;
+ default:
+ report_fatal_error("Bad FileType.");
+ }
+}
+
+extern "C" LLVMRustResult
+LLVMRustWriteOutputFile(LLVMTargetMachineRef Target, LLVMPassManagerRef PMR,
+ LLVMModuleRef M, const char *Path, const char *DwoPath,
+ LLVMRustFileType RustFileType) {
+ llvm::legacy::PassManager *PM = unwrap<llvm::legacy::PassManager>(PMR);
+ auto FileType = fromRust(RustFileType);
+
+ std::string ErrorInfo;
+ std::error_code EC;
+ raw_fd_ostream OS(Path, EC, sys::fs::OF_None);
+ if (EC)
+ ErrorInfo = EC.message();
+ if (ErrorInfo != "") {
+ LLVMRustSetLastError(ErrorInfo.c_str());
+ return LLVMRustResult::Failure;
+ }
+
+ buffer_ostream BOS(OS);
+ if (DwoPath) {
+ raw_fd_ostream DOS(DwoPath, EC, sys::fs::OF_None);
+ EC.clear();
+ if (EC)
+ ErrorInfo = EC.message();
+ if (ErrorInfo != "") {
+ LLVMRustSetLastError(ErrorInfo.c_str());
+ return LLVMRustResult::Failure;
+ }
+ buffer_ostream DBOS(DOS);
+ unwrap(Target)->addPassesToEmitFile(*PM, BOS, &DBOS, FileType, false);
+ PM->run(*unwrap(M));
+ } else {
+ unwrap(Target)->addPassesToEmitFile(*PM, BOS, nullptr, FileType, false);
+ PM->run(*unwrap(M));
+ }
+
+ // Apparently `addPassesToEmitFile` adds a pointer to our on-the-stack output
+ // stream (OS), so the only real safe place to delete this is here? Don't we
+ // wish this was written in Rust?
+ LLVMDisposePassManager(PMR);
+ return LLVMRustResult::Success;
+}
+
+extern "C" typedef void (*LLVMRustSelfProfileBeforePassCallback)(void*, // LlvmSelfProfiler
+ const char*, // pass name
+ const char*); // IR name
+extern "C" typedef void (*LLVMRustSelfProfileAfterPassCallback)(void*); // LlvmSelfProfiler
+
+std::string LLVMRustwrappedIrGetName(const llvm::Any &WrappedIr) {
+ if (any_isa<const Module *>(WrappedIr))
+ return any_cast<const Module *>(WrappedIr)->getName().str();
+ if (any_isa<const Function *>(WrappedIr))
+ return any_cast<const Function *>(WrappedIr)->getName().str();
+ if (any_isa<const Loop *>(WrappedIr))
+ return any_cast<const Loop *>(WrappedIr)->getName().str();
+ if (any_isa<const LazyCallGraph::SCC *>(WrappedIr))
+ return any_cast<const LazyCallGraph::SCC *>(WrappedIr)->getName();
+ return "<UNKNOWN>";
+}
+
+
+void LLVMSelfProfileInitializeCallbacks(
+ PassInstrumentationCallbacks& PIC, void* LlvmSelfProfiler,
+ LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
+ LLVMRustSelfProfileAfterPassCallback AfterPassCallback) {
+ PIC.registerBeforeNonSkippedPassCallback([LlvmSelfProfiler, BeforePassCallback](
+ StringRef Pass, llvm::Any Ir) {
+ std::string PassName = Pass.str();
+ std::string IrName = LLVMRustwrappedIrGetName(Ir);
+ BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
+ });
+
+ PIC.registerAfterPassCallback(
+ [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any IR,
+ const PreservedAnalyses &Preserved) {
+ AfterPassCallback(LlvmSelfProfiler);
+ });
+
+ PIC.registerAfterPassInvalidatedCallback(
+ [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, const PreservedAnalyses &Preserved) {
+ AfterPassCallback(LlvmSelfProfiler);
+ });
+
+ PIC.registerBeforeAnalysisCallback([LlvmSelfProfiler, BeforePassCallback](
+ StringRef Pass, llvm::Any Ir) {
+ std::string PassName = Pass.str();
+ std::string IrName = LLVMRustwrappedIrGetName(Ir);
+ BeforePassCallback(LlvmSelfProfiler, PassName.c_str(), IrName.c_str());
+ });
+
+ PIC.registerAfterAnalysisCallback(
+ [LlvmSelfProfiler, AfterPassCallback](StringRef Pass, llvm::Any Ir) {
+ AfterPassCallback(LlvmSelfProfiler);
+ });
+}
+
+enum class LLVMRustOptStage {
+ PreLinkNoLTO,
+ PreLinkThinLTO,
+ PreLinkFatLTO,
+ ThinLTO,
+ FatLTO,
+};
+
+struct LLVMRustSanitizerOptions {
+ bool SanitizeAddress;
+ bool SanitizeAddressRecover;
+ bool SanitizeMemory;
+ bool SanitizeMemoryRecover;
+ int SanitizeMemoryTrackOrigins;
+ bool SanitizeThread;
+ bool SanitizeHWAddress;
+ bool SanitizeHWAddressRecover;
+};
+
+extern "C" LLVMRustResult
+LLVMRustOptimizeWithNewPassManager(
+ LLVMModuleRef ModuleRef,
+ LLVMTargetMachineRef TMRef,
+ LLVMRustPassBuilderOptLevel OptLevelRust,
+ LLVMRustOptStage OptStage,
+ bool NoPrepopulatePasses, bool VerifyIR, bool UseThinLTOBuffers,
+ bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize,
+ bool DisableSimplifyLibCalls, bool EmitLifetimeMarkers,
+ LLVMRustSanitizerOptions *SanitizerOptions,
+ const char *PGOGenPath, const char *PGOUsePath,
+ bool InstrumentCoverage, bool InstrumentGCOV,
+ const char *PGOSampleUsePath, bool DebugInfoForProfiling,
+ void* LlvmSelfProfiler,
+ LLVMRustSelfProfileBeforePassCallback BeforePassCallback,
+ LLVMRustSelfProfileAfterPassCallback AfterPassCallback,
+ const char *ExtraPasses, size_t ExtraPassesLen,
+ const char *LLVMPlugins, size_t LLVMPluginsLen) {
+ Module *TheModule = unwrap(ModuleRef);
+ TargetMachine *TM = unwrap(TMRef);
+ OptimizationLevel OptLevel = fromRust(OptLevelRust);
+
+
+ PipelineTuningOptions PTO;
+ PTO.LoopUnrolling = UnrollLoops;
+ PTO.LoopInterleaving = UnrollLoops;
+ PTO.LoopVectorization = LoopVectorize;
+ PTO.SLPVectorization = SLPVectorize;
+ PTO.MergeFunctions = MergeFunctions;
+
+ // FIXME: We may want to expose this as an option.
+ bool DebugPassManager = false;
+
+ PassInstrumentationCallbacks PIC;
+ StandardInstrumentations SI(DebugPassManager);
+ SI.registerCallbacks(PIC);
+
+ if (LlvmSelfProfiler){
+ LLVMSelfProfileInitializeCallbacks(PIC,LlvmSelfProfiler,BeforePassCallback,AfterPassCallback);
+ }
+
+ Optional<PGOOptions> PGOOpt;
+ if (PGOGenPath) {
+ assert(!PGOUsePath && !PGOSampleUsePath);
+ PGOOpt = PGOOptions(PGOGenPath, "", "", PGOOptions::IRInstr,
+ PGOOptions::NoCSAction, DebugInfoForProfiling);
+ } else if (PGOUsePath) {
+ assert(!PGOSampleUsePath);
+ PGOOpt = PGOOptions(PGOUsePath, "", "", PGOOptions::IRUse,
+ PGOOptions::NoCSAction, DebugInfoForProfiling);
+ } else if (PGOSampleUsePath) {
+ PGOOpt = PGOOptions(PGOSampleUsePath, "", "", PGOOptions::SampleUse,
+ PGOOptions::NoCSAction, DebugInfoForProfiling);
+ } else if (DebugInfoForProfiling) {
+ PGOOpt = PGOOptions("", "", "", PGOOptions::NoAction,
+ PGOOptions::NoCSAction, DebugInfoForProfiling);
+ }
+
+#if LLVM_VERSION_GE(13, 0)
+ PassBuilder PB(TM, PTO, PGOOpt, &PIC);
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager MAM;
+#else
+ PassBuilder PB(DebugPassManager, TM, PTO, PGOOpt, &PIC);
+ LoopAnalysisManager LAM(DebugPassManager);
+ FunctionAnalysisManager FAM(DebugPassManager);
+ CGSCCAnalysisManager CGAM(DebugPassManager);
+ ModuleAnalysisManager MAM(DebugPassManager);
+#endif
+
+ FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
+
+ Triple TargetTriple(TheModule->getTargetTriple());
+ std::unique_ptr<TargetLibraryInfoImpl> TLII(new TargetLibraryInfoImpl(TargetTriple));
+ if (DisableSimplifyLibCalls)
+ TLII->disableAllFunctions();
+ FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
+
+ PB.registerModuleAnalyses(MAM);
+ PB.registerCGSCCAnalyses(CGAM);
+ PB.registerFunctionAnalyses(FAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+
+ // We manually collect pipeline callbacks so we can apply them at O0, where the
+ // PassBuilder does not create a pipeline.
+ std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
+ PipelineStartEPCallbacks;
+ std::vector<std::function<void(ModulePassManager &, OptimizationLevel)>>
+ OptimizerLastEPCallbacks;
+
+ if (VerifyIR) {
+ PipelineStartEPCallbacks.push_back(
+ [VerifyIR](ModulePassManager &MPM, OptimizationLevel Level) {
+ MPM.addPass(VerifierPass());
+ }
+ );
+ }
+
+ if (InstrumentGCOV) {
+ PipelineStartEPCallbacks.push_back(
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ MPM.addPass(GCOVProfilerPass(GCOVOptions::getDefault()));
+ }
+ );
+ }
+
+ if (InstrumentCoverage) {
+ PipelineStartEPCallbacks.push_back(
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+ InstrProfOptions Options;
+ MPM.addPass(InstrProfiling(Options, false));
+ }
+ );
+ }
+
+ if (SanitizerOptions) {
+ if (SanitizerOptions->SanitizeMemory) {
+ MemorySanitizerOptions Options(
+ SanitizerOptions->SanitizeMemoryTrackOrigins,
+ SanitizerOptions->SanitizeMemoryRecover,
+ /*CompileKernel=*/false);
+ OptimizerLastEPCallbacks.push_back(
+ [Options](ModulePassManager &MPM, OptimizationLevel Level) {
+#if LLVM_VERSION_GE(14, 0)
+ MPM.addPass(ModuleMemorySanitizerPass(Options));
+#else
+ MPM.addPass(MemorySanitizerPass(Options));
+#endif
+ MPM.addPass(createModuleToFunctionPassAdaptor(MemorySanitizerPass(Options)));
+ }
+ );
+ }
+
+ if (SanitizerOptions->SanitizeThread) {
+ OptimizerLastEPCallbacks.push_back(
+ [](ModulePassManager &MPM, OptimizationLevel Level) {
+#if LLVM_VERSION_GE(14, 0)
+ MPM.addPass(ModuleThreadSanitizerPass());
+#else
+ MPM.addPass(ThreadSanitizerPass());
+#endif
+ MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
+ }
+ );
+ }
+
+ if (SanitizerOptions->SanitizeAddress) {
+ OptimizerLastEPCallbacks.push_back(
+ [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
+#if LLVM_VERSION_LT(15, 0)
+ MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
+#endif
+#if LLVM_VERSION_GE(14, 0)
+ AddressSanitizerOptions opts = AddressSanitizerOptions{
+ /*CompileKernel=*/false,
+ SanitizerOptions->SanitizeAddressRecover,
+ /*UseAfterScope=*/true,
+ AsanDetectStackUseAfterReturnMode::Runtime,
+ };
+ MPM.addPass(ModuleAddressSanitizerPass(opts));
+#else
+ MPM.addPass(ModuleAddressSanitizerPass(
+ /*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover));
+ MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
+ /*CompileKernel=*/false, SanitizerOptions->SanitizeAddressRecover,
+ /*UseAfterScope=*/true)));
+#endif
+ }
+ );
+ }
+ if (SanitizerOptions->SanitizeHWAddress) {
+ OptimizerLastEPCallbacks.push_back(
+ [SanitizerOptions](ModulePassManager &MPM, OptimizationLevel Level) {
+#if LLVM_VERSION_GE(14, 0)
+ HWAddressSanitizerOptions opts(
+ /*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover,
+ /*DisableOptimization=*/false);
+ MPM.addPass(HWAddressSanitizerPass(opts));
+#else
+ MPM.addPass(HWAddressSanitizerPass(
+ /*CompileKernel=*/false, SanitizerOptions->SanitizeHWAddressRecover));
+#endif
+ }
+ );
+ }
+ }
+
+ if (LLVMPluginsLen) {
+ auto PluginsStr = StringRef(LLVMPlugins, LLVMPluginsLen);
+ SmallVector<StringRef> Plugins;
+ PluginsStr.split(Plugins, ',', -1, false);
+ for (auto PluginPath: Plugins) {
+ auto Plugin = PassPlugin::Load(PluginPath.str());
+ if (!Plugin) {
+ LLVMRustSetLastError(("Failed to load pass plugin" + PluginPath.str()).c_str());
+ continue;
+ }
+ Plugin->registerPassBuilderCallbacks(PB);
+ }
+ }
+
+#if LLVM_VERSION_GE(13, 0)
+ ModulePassManager MPM;
+#else
+ ModulePassManager MPM(DebugPassManager);
+#endif
+ bool NeedThinLTOBufferPasses = UseThinLTOBuffers;
+ if (!NoPrepopulatePasses) {
+ // The pre-link pipelines don't support O0 and require using budilO0DefaultPipeline() instead.
+ // At the same time, the LTO pipelines do support O0 and using them is required.
+ bool IsLTO = OptStage == LLVMRustOptStage::ThinLTO || OptStage == LLVMRustOptStage::FatLTO;
+ if (OptLevel == OptimizationLevel::O0 && !IsLTO) {
+ for (const auto &C : PipelineStartEPCallbacks)
+ PB.registerPipelineStartEPCallback(C);
+ for (const auto &C : OptimizerLastEPCallbacks)
+ PB.registerOptimizerLastEPCallback(C);
+
+ // Pass false as we manually schedule ThinLTOBufferPasses below.
+ MPM = PB.buildO0DefaultPipeline(OptLevel, /* PreLinkLTO */ false);
+ } else {
+ for (const auto &C : PipelineStartEPCallbacks)
+ PB.registerPipelineStartEPCallback(C);
+ if (OptStage != LLVMRustOptStage::PreLinkThinLTO) {
+ for (const auto &C : OptimizerLastEPCallbacks)
+ PB.registerOptimizerLastEPCallback(C);
+ }
+
+ switch (OptStage) {
+ case LLVMRustOptStage::PreLinkNoLTO:
+ MPM = PB.buildPerModuleDefaultPipeline(OptLevel, DebugPassManager);
+ break;
+ case LLVMRustOptStage::PreLinkThinLTO:
+ MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel);
+ // The ThinLTOPreLink pipeline already includes ThinLTOBuffer passes. However, callback
+ // passes may still run afterwards. This means we need to run the buffer passes again.
+ // FIXME: In LLVM 13, the ThinLTOPreLink pipeline also runs OptimizerLastEPCallbacks
+ // before the RequiredLTOPreLinkPasses, in which case we can remove these hacks.
+ if (OptimizerLastEPCallbacks.empty())
+ NeedThinLTOBufferPasses = false;
+ for (const auto &C : OptimizerLastEPCallbacks)
+ C(MPM, OptLevel);
+ break;
+ case LLVMRustOptStage::PreLinkFatLTO:
+ MPM = PB.buildLTOPreLinkDefaultPipeline(OptLevel);
+ NeedThinLTOBufferPasses = false;
+ break;
+ case LLVMRustOptStage::ThinLTO:
+ // FIXME: Does it make sense to pass the ModuleSummaryIndex?
+ // It only seems to be needed for C++ specific optimizations.
+ MPM = PB.buildThinLTODefaultPipeline(OptLevel, nullptr);
+ break;
+ case LLVMRustOptStage::FatLTO:
+ MPM = PB.buildLTODefaultPipeline(OptLevel, nullptr);
+ break;
+ }
+ }
+ } else {
+ // We're not building any of the default pipelines but we still want to
+ // add the verifier, instrumentation, etc passes if they were requested
+ for (const auto &C : PipelineStartEPCallbacks)
+ C(MPM, OptLevel);
+ for (const auto &C : OptimizerLastEPCallbacks)
+ C(MPM, OptLevel);
+ }
+
+ if (ExtraPassesLen) {
+ if (auto Err = PB.parsePassPipeline(MPM, StringRef(ExtraPasses, ExtraPassesLen))) {
+ std::string ErrMsg = toString(std::move(Err));
+ LLVMRustSetLastError(ErrMsg.c_str());
+ return LLVMRustResult::Failure;
+ }
+ }
+
+ if (NeedThinLTOBufferPasses) {
+ MPM.addPass(CanonicalizeAliasesPass());
+ MPM.addPass(NameAnonGlobalPass());
+ }
+
+ // Upgrade all calls to old intrinsics first.
+ for (Module::iterator I = TheModule->begin(), E = TheModule->end(); I != E;)
+ UpgradeCallsToIntrinsic(&*I++); // must be post-increment, as we remove
+
+ MPM.run(*TheModule, MAM);
+ return LLVMRustResult::Success;
+}
+
+// Callback to demangle function name
+// Parameters:
+// * name to be demangled
+// * name len
+// * output buffer
+// * output buffer len
+// Returns len of demangled string, or 0 if demangle failed.
+typedef size_t (*DemangleFn)(const char*, size_t, char*, size_t);
+
+
+namespace {
+
+class RustAssemblyAnnotationWriter : public AssemblyAnnotationWriter {
+ DemangleFn Demangle;
+ std::vector<char> Buf;
+
+public:
+ RustAssemblyAnnotationWriter(DemangleFn Demangle) : Demangle(Demangle) {}
+
+ // Return empty string if demangle failed
+ // or if name does not need to be demangled
+ StringRef CallDemangle(StringRef name) {
+ if (!Demangle) {
+ return StringRef();
+ }
+
+ if (Buf.size() < name.size() * 2) {
+ // Semangled name usually shorter than mangled,
+ // but allocate twice as much memory just in case
+ Buf.resize(name.size() * 2);
+ }
+
+ auto R = Demangle(name.data(), name.size(), Buf.data(), Buf.size());
+ if (!R) {
+ // Demangle failed.
+ return StringRef();
+ }
+
+ auto Demangled = StringRef(Buf.data(), R);
+ if (Demangled == name) {
+ // Do not print anything if demangled name is equal to mangled.
+ return StringRef();
+ }
+
+ return Demangled;
+ }
+
+ void emitFunctionAnnot(const Function *F,
+ formatted_raw_ostream &OS) override {
+ StringRef Demangled = CallDemangle(F->getName());
+ if (Demangled.empty()) {
+ return;
+ }
+
+ OS << "; " << Demangled << "\n";
+ }
+
+ void emitInstructionAnnot(const Instruction *I,
+ formatted_raw_ostream &OS) override {
+ const char *Name;
+ const Value *Value;
+ if (const CallInst *CI = dyn_cast<CallInst>(I)) {
+ Name = "call";
+ Value = CI->getCalledOperand();
+ } else if (const InvokeInst* II = dyn_cast<InvokeInst>(I)) {
+ Name = "invoke";
+ Value = II->getCalledOperand();
+ } else {
+ // Could demangle more operations, e. g.
+ // `store %place, @function`.
+ return;
+ }
+
+ if (!Value->hasName()) {
+ return;
+ }
+
+ StringRef Demangled = CallDemangle(Value->getName());
+ if (Demangled.empty()) {
+ return;
+ }
+
+ OS << "; " << Name << " " << Demangled << "\n";
+ }
+};
+
+} // namespace
+
+extern "C" LLVMRustResult
+LLVMRustPrintModule(LLVMModuleRef M, const char *Path, DemangleFn Demangle) {
+ std::string ErrorInfo;
+ std::error_code EC;
+ raw_fd_ostream OS(Path, EC, sys::fs::OF_None);
+ if (EC)
+ ErrorInfo = EC.message();
+ if (ErrorInfo != "") {
+ LLVMRustSetLastError(ErrorInfo.c_str());
+ return LLVMRustResult::Failure;
+ }
+
+ RustAssemblyAnnotationWriter AAW(Demangle);
+ formatted_raw_ostream FOS(OS);
+ unwrap(M)->print(FOS, &AAW);
+
+ return LLVMRustResult::Success;
+}
+
+extern "C" void LLVMRustPrintPasses() {
+ LLVMInitializePasses();
+ struct MyListener : PassRegistrationListener {
+ void passEnumerate(const PassInfo *Info) {
+ StringRef PassArg = Info->getPassArgument();
+ StringRef PassName = Info->getPassName();
+ if (!PassArg.empty()) {
+ // These unsigned->signed casts could theoretically overflow, but
+ // realistically never will (and even if, the result is implementation
+ // defined rather plain UB).
+ printf("%15.*s - %.*s\n", (int)PassArg.size(), PassArg.data(),
+ (int)PassName.size(), PassName.data());
+ }
+ }
+ } Listener;
+
+ PassRegistry *PR = PassRegistry::getPassRegistry();
+ PR->enumerateWith(&Listener);
+}
+
+extern "C" void LLVMRustAddAlwaysInlinePass(LLVMPassManagerBuilderRef PMBR,
+ bool AddLifetimes) {
+ unwrap(PMBR)->Inliner = llvm::createAlwaysInlinerLegacyPass(AddLifetimes);
+}
+
+extern "C" void LLVMRustRunRestrictionPass(LLVMModuleRef M, char **Symbols,
+ size_t Len) {
+ llvm::legacy::PassManager passes;
+
+ auto PreserveFunctions = [=](const GlobalValue &GV) {
+ for (size_t I = 0; I < Len; I++) {
+ if (GV.getName() == Symbols[I]) {
+ return true;
+ }
+ }
+ return false;
+ };
+
+ passes.add(llvm::createInternalizePass(PreserveFunctions));
+
+ passes.run(*unwrap(M));
+}
+
+extern "C" void
+LLVMRustSetDataLayoutFromTargetMachine(LLVMModuleRef Module,
+ LLVMTargetMachineRef TMR) {
+ TargetMachine *Target = unwrap(TMR);
+ unwrap(Module)->setDataLayout(Target->createDataLayout());
+}
+
+extern "C" void LLVMRustSetModulePICLevel(LLVMModuleRef M) {
+ unwrap(M)->setPICLevel(PICLevel::Level::BigPIC);
+}
+
+extern "C" void LLVMRustSetModulePIELevel(LLVMModuleRef M) {
+ unwrap(M)->setPIELevel(PIELevel::Level::Large);
+}
+
+extern "C" void LLVMRustSetModuleCodeModel(LLVMModuleRef M,
+ LLVMRustCodeModel Model) {
+ auto CM = fromRust(Model);
+ if (!CM.hasValue())
+ return;
+ unwrap(M)->setCodeModel(*CM);
+}
+
+// Here you'll find an implementation of ThinLTO as used by the Rust compiler
+// right now. This ThinLTO support is only enabled on "recent ish" versions of
+// LLVM, and otherwise it's just blanket rejected from other compilers.
+//
+// Most of this implementation is straight copied from LLVM. At the time of
+// this writing it wasn't *quite* suitable to reuse more code from upstream
+// for our purposes, but we should strive to upstream this support once it's
+// ready to go! I figure we may want a bit of testing locally first before
+// sending this upstream to LLVM. I hear though they're quite eager to receive
+// feedback like this!
+//
+// If you're reading this code and wondering "what in the world" or you're
+// working "good lord by LLVM upgrade is *still* failing due to these bindings"
+// then fear not! (ok maybe fear a little). All code here is mostly based
+// on `lib/LTO/ThinLTOCodeGenerator.cpp` in LLVM.
+//
+// You'll find that the general layout here roughly corresponds to the `run`
+// method in that file as well as `ProcessThinLTOModule`. Functions are
+// specifically commented below as well, but if you're updating this code
+// or otherwise trying to understand it, the LLVM source will be useful in
+// interpreting the mysteries within.
+//
+// Otherwise I'll apologize in advance, it probably requires a relatively
+// significant investment on your part to "truly understand" what's going on
+// here. Not saying I do myself, but it took me awhile staring at LLVM's source
+// and various online resources about ThinLTO to make heads or tails of all
+// this.
+
+// This is a shared data structure which *must* be threadsafe to share
+// read-only amongst threads. This also corresponds basically to the arguments
+// of the `ProcessThinLTOModule` function in the LLVM source.
+struct LLVMRustThinLTOData {
+ // The combined index that is the global analysis over all modules we're
+ // performing ThinLTO for. This is mostly managed by LLVM.
+ ModuleSummaryIndex Index;
+
+ // All modules we may look at, stored as in-memory serialized versions. This
+ // is later used when inlining to ensure we can extract any module to inline
+ // from.
+ StringMap<MemoryBufferRef> ModuleMap;
+
+ // A set that we manage of everything we *don't* want internalized. Note that
+ // this includes all transitive references right now as well, but it may not
+ // always!
+ DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
+
+ // Not 100% sure what these are, but they impact what's internalized and
+ // what's inlined across modules, I believe.
+ StringMap<FunctionImporter::ImportMapTy> ImportLists;
+ StringMap<FunctionImporter::ExportSetTy> ExportLists;
+ StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
+ StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
+
+ LLVMRustThinLTOData() : Index(/* HaveGVs = */ false) {}
+};
+
+// Just an argument to the `LLVMRustCreateThinLTOData` function below.
+struct LLVMRustThinLTOModule {
+ const char *identifier;
+ const char *data;
+ size_t len;
+};
+
+// This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`, not sure what it
+// does.
+static const GlobalValueSummary *
+getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
+ auto StrongDefForLinker = llvm::find_if(
+ GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
+ auto Linkage = Summary->linkage();
+ return !GlobalValue::isAvailableExternallyLinkage(Linkage) &&
+ !GlobalValue::isWeakForLinker(Linkage);
+ });
+ if (StrongDefForLinker != GVSummaryList.end())
+ return StrongDefForLinker->get();
+
+ auto FirstDefForLinker = llvm::find_if(
+ GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
+ auto Linkage = Summary->linkage();
+ return !GlobalValue::isAvailableExternallyLinkage(Linkage);
+ });
+ if (FirstDefForLinker == GVSummaryList.end())
+ return nullptr;
+ return FirstDefForLinker->get();
+}
+
+// The main entry point for creating the global ThinLTO analysis. The structure
+// here is basically the same as before threads are spawned in the `run`
+// function of `lib/LTO/ThinLTOCodeGenerator.cpp`.
+extern "C" LLVMRustThinLTOData*
+LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules,
+ int num_modules,
+ const char **preserved_symbols,
+ int num_symbols) {
+ auto Ret = std::make_unique<LLVMRustThinLTOData>();
+
+ // Load each module's summary and merge it into one combined index
+ for (int i = 0; i < num_modules; i++) {
+ auto module = &modules[i];
+ StringRef buffer(module->data, module->len);
+ MemoryBufferRef mem_buffer(buffer, module->identifier);
+
+ Ret->ModuleMap[module->identifier] = mem_buffer;
+
+ if (Error Err = readModuleSummaryIndex(mem_buffer, Ret->Index, i)) {
+ LLVMRustSetLastError(toString(std::move(Err)).c_str());
+ return nullptr;
+ }
+ }
+
+ // Collect for each module the list of function it defines (GUID -> Summary)
+ Ret->Index.collectDefinedGVSummariesPerModule(Ret->ModuleToDefinedGVSummaries);
+
+ // Convert the preserved symbols set from string to GUID, this is then needed
+ // for internalization.
+ for (int i = 0; i < num_symbols; i++) {
+ auto GUID = GlobalValue::getGUID(preserved_symbols[i]);
+ Ret->GUIDPreservedSymbols.insert(GUID);
+ }
+
+ // Collect the import/export lists for all modules from the call-graph in the
+ // combined index
+ //
+ // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp`
+ auto deadIsPrevailing = [&](GlobalValue::GUID G) {
+ return PrevailingType::Unknown;
+ };
+ // We don't have a complete picture in our use of ThinLTO, just our immediate
+ // crate, so we need `ImportEnabled = false` to limit internalization.
+ // Otherwise, we sometimes lose `static` values -- see #60184.
+ computeDeadSymbolsWithConstProp(Ret->Index, Ret->GUIDPreservedSymbols,
+ deadIsPrevailing, /* ImportEnabled = */ false);
+ ComputeCrossModuleImport(
+ Ret->Index,
+ Ret->ModuleToDefinedGVSummaries,
+ Ret->ImportLists,
+ Ret->ExportLists
+ );
+
+ // Resolve LinkOnce/Weak symbols, this has to be computed early be cause it
+ // impacts the caching.
+ //
+ // This is copied from `lib/LTO/ThinLTOCodeGenerator.cpp` with some of this
+ // being lifted from `lib/LTO/LTO.cpp` as well
+ DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
+ for (auto &I : Ret->Index) {
+ if (I.second.SummaryList.size() > 1)
+ PrevailingCopy[I.first] = getFirstDefinitionForLinker(I.second.SummaryList);
+ }
+ auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
+ const auto &Prevailing = PrevailingCopy.find(GUID);
+ if (Prevailing == PrevailingCopy.end())
+ return true;
+ return Prevailing->second == S;
+ };
+ auto recordNewLinkage = [&](StringRef ModuleIdentifier,
+ GlobalValue::GUID GUID,
+ GlobalValue::LinkageTypes NewLinkage) {
+ Ret->ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
+ };
+
+#if LLVM_VERSION_GE(13,0)
+ // Uses FromPrevailing visibility scheme which works for many binary
+ // formats. We probably could and should use ELF visibility scheme for many of
+ // our targets, however.
+ lto::Config conf;
+ thinLTOResolvePrevailingInIndex(conf, Ret->Index, isPrevailing, recordNewLinkage,
+ Ret->GUIDPreservedSymbols);
+#else
+ thinLTOResolvePrevailingInIndex(Ret->Index, isPrevailing, recordNewLinkage,
+ Ret->GUIDPreservedSymbols);
+#endif
+ // Here we calculate an `ExportedGUIDs` set for use in the `isExported`
+ // callback below. This callback below will dictate the linkage for all
+ // summaries in the index, and we basically just only want to ensure that dead
+ // symbols are internalized. Otherwise everything that's already external
+ // linkage will stay as external, and internal will stay as internal.
+ std::set<GlobalValue::GUID> ExportedGUIDs;
+ for (auto &List : Ret->Index) {
+ for (auto &GVS: List.second.SummaryList) {
+ if (GlobalValue::isLocalLinkage(GVS->linkage()))
+ continue;
+ auto GUID = GVS->getOriginalName();
+ if (GVS->flags().Live)
+ ExportedGUIDs.insert(GUID);
+ }
+ }
+ auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
+ const auto &ExportList = Ret->ExportLists.find(ModuleIdentifier);
+ return (ExportList != Ret->ExportLists.end() &&
+ ExportList->second.count(VI)) ||
+ ExportedGUIDs.count(VI.getGUID());
+ };
+ thinLTOInternalizeAndPromoteInIndex(Ret->Index, isExported, isPrevailing);
+
+ return Ret.release();
+}
+
+extern "C" void
+LLVMRustFreeThinLTOData(LLVMRustThinLTOData *Data) {
+ delete Data;
+}
+
+// Below are the various passes that happen *per module* when doing ThinLTO.
+//
+// In other words, these are the functions that are all run concurrently
+// with one another, one per module. The passes here correspond to the analysis
+// passes in `lib/LTO/ThinLTOCodeGenerator.cpp`, currently found in the
+// `ProcessThinLTOModule` function. Here they're split up into separate steps
+// so rustc can save off the intermediate bytecode between each step.
+
+static bool
+clearDSOLocalOnDeclarations(Module &Mod, TargetMachine &TM) {
+ // When linking an ELF shared object, dso_local should be dropped. We
+ // conservatively do this for -fpic.
+ bool ClearDSOLocalOnDeclarations =
+ TM.getTargetTriple().isOSBinFormatELF() &&
+ TM.getRelocationModel() != Reloc::Static &&
+ Mod.getPIELevel() == PIELevel::Default;
+ return ClearDSOLocalOnDeclarations;
+}
+
+extern "C" bool
+LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
+ LLVMTargetMachineRef TM) {
+ Module &Mod = *unwrap(M);
+ TargetMachine &Target = *unwrap(TM);
+
+ bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target);
+ bool error = renameModuleForThinLTO(Mod, Data->Index, ClearDSOLocal);
+
+ if (error) {
+ LLVMRustSetLastError("renameModuleForThinLTO failed");
+ return false;
+ }
+ return true;
+}
+
+extern "C" bool
+LLVMRustPrepareThinLTOResolveWeak(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
+ Module &Mod = *unwrap(M);
+ const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
+#if LLVM_VERSION_GE(14, 0)
+ thinLTOFinalizeInModule(Mod, DefinedGlobals, /*PropagateAttrs=*/true);
+#else
+ thinLTOResolvePrevailingInModule(Mod, DefinedGlobals);
+#endif
+ return true;
+}
+
+extern "C" bool
+LLVMRustPrepareThinLTOInternalize(const LLVMRustThinLTOData *Data, LLVMModuleRef M) {
+ Module &Mod = *unwrap(M);
+ const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(Mod.getModuleIdentifier());
+ thinLTOInternalizeModule(Mod, DefinedGlobals);
+ return true;
+}
+
+extern "C" bool
+LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, LLVMModuleRef M,
+ LLVMTargetMachineRef TM) {
+ Module &Mod = *unwrap(M);
+ TargetMachine &Target = *unwrap(TM);
+
+ const auto &ImportList = Data->ImportLists.lookup(Mod.getModuleIdentifier());
+ auto Loader = [&](StringRef Identifier) {
+ const auto &Memory = Data->ModuleMap.lookup(Identifier);
+ auto &Context = Mod.getContext();
+ auto MOrErr = getLazyBitcodeModule(Memory, Context, true, true);
+
+ if (!MOrErr)
+ return MOrErr;
+
+ // The rest of this closure is a workaround for
+ // https://bugs.llvm.org/show_bug.cgi?id=38184 where during ThinLTO imports
+ // we accidentally import wasm custom sections into different modules,
+ // duplicating them by in the final output artifact.
+ //
+ // The issue is worked around here by manually removing the
+ // `wasm.custom_sections` named metadata node from any imported module. This
+ // we know isn't used by any optimization pass so there's no need for it to
+ // be imported.
+ //
+ // Note that the metadata is currently lazily loaded, so we materialize it
+ // here before looking up if there's metadata inside. The `FunctionImporter`
+ // will immediately materialize metadata anyway after an import, so this
+ // shouldn't be a perf hit.
+ if (Error Err = (*MOrErr)->materializeMetadata()) {
+ Expected<std::unique_ptr<Module>> Ret(std::move(Err));
+ return Ret;
+ }
+
+ auto *WasmCustomSections = (*MOrErr)->getNamedMetadata("wasm.custom_sections");
+ if (WasmCustomSections)
+ WasmCustomSections->eraseFromParent();
+
+ return MOrErr;
+ };
+ bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target);
+ FunctionImporter Importer(Data->Index, Loader, ClearDSOLocal);
+ Expected<bool> Result = Importer.importFunctions(Mod, ImportList);
+ if (!Result) {
+ LLVMRustSetLastError(toString(Result.takeError()).c_str());
+ return false;
+ }
+ return true;
+}
+
+// This struct and various functions are sort of a hack right now, but the
+// problem is that we've got in-memory LLVM modules after we generate and
+// optimize all codegen-units for one compilation in rustc. To be compatible
+// with the LTO support above we need to serialize the modules plus their
+// ThinLTO summary into memory.
+//
+// This structure is basically an owned version of a serialize module, with
+// a ThinLTO summary attached.
+struct LLVMRustThinLTOBuffer {
+ std::string data;
+};
+
+extern "C" LLVMRustThinLTOBuffer*
+LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin) {
+ auto Ret = std::make_unique<LLVMRustThinLTOBuffer>();
+ {
+ raw_string_ostream OS(Ret->data);
+ {
+ legacy::PassManager PM;
+ if (is_thin) {
+ PM.add(createWriteThinLTOBitcodePass(OS));
+ } else {
+ PM.add(createBitcodeWriterPass(OS));
+ }
+ PM.run(*unwrap(M));
+ }
+ }
+ return Ret.release();
+}
+
+extern "C" void
+LLVMRustThinLTOBufferFree(LLVMRustThinLTOBuffer *Buffer) {
+ delete Buffer;
+}
+
+extern "C" const void*
+LLVMRustThinLTOBufferPtr(const LLVMRustThinLTOBuffer *Buffer) {
+ return Buffer->data.data();
+}
+
+extern "C" size_t
+LLVMRustThinLTOBufferLen(const LLVMRustThinLTOBuffer *Buffer) {
+ return Buffer->data.length();
+}
+
+// This is what we used to parse upstream bitcode for actual ThinLTO
+// processing. We'll call this once per module optimized through ThinLTO, and
+// it'll be called concurrently on many threads.
+extern "C" LLVMModuleRef
+LLVMRustParseBitcodeForLTO(LLVMContextRef Context,
+ const char *data,
+ size_t len,
+ const char *identifier) {
+ StringRef Data(data, len);
+ MemoryBufferRef Buffer(Data, identifier);
+ unwrap(Context)->enableDebugTypeODRUniquing();
+ Expected<std::unique_ptr<Module>> SrcOrError =
+ parseBitcodeFile(Buffer, *unwrap(Context));
+ if (!SrcOrError) {
+ LLVMRustSetLastError(toString(SrcOrError.takeError()).c_str());
+ return nullptr;
+ }
+ return wrap(std::move(*SrcOrError).release());
+}
+
+// Find the bitcode section in the object file data and return it as a slice.
+// Fail if the bitcode section is present but empty.
+//
+// On success, the return value is the pointer to the start of the slice and
+// `out_len` is filled with the (non-zero) length. On failure, the return value
+// is `nullptr` and `out_len` is set to zero.
+extern "C" const char*
+LLVMRustGetBitcodeSliceFromObjectData(const char *data,
+ size_t len,
+ size_t *out_len) {
+ *out_len = 0;
+
+ StringRef Data(data, len);
+ MemoryBufferRef Buffer(Data, ""); // The id is unused.
+
+ Expected<MemoryBufferRef> BitcodeOrError =
+ object::IRObjectFile::findBitcodeInMemBuffer(Buffer);
+ if (!BitcodeOrError) {
+ LLVMRustSetLastError(toString(BitcodeOrError.takeError()).c_str());
+ return nullptr;
+ }
+
+ *out_len = BitcodeOrError->getBufferSize();
+ return BitcodeOrError->getBufferStart();
+}
+
+// Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
+// the comment in `back/lto.rs` for why this exists.
+extern "C" void
+LLVMRustThinLTOGetDICompileUnit(LLVMModuleRef Mod,
+ DICompileUnit **A,
+ DICompileUnit **B) {
+ Module *M = unwrap(Mod);
+ DICompileUnit **Cur = A;
+ DICompileUnit **Next = B;
+ for (DICompileUnit *CU : M->debug_compile_units()) {
+ *Cur = CU;
+ Cur = Next;
+ Next = nullptr;
+ if (Cur == nullptr)
+ break;
+ }
+}
+
+// Rewrite all `DICompileUnit` pointers to the `DICompileUnit` specified. See
+// the comment in `back/lto.rs` for why this exists.
+extern "C" void
+LLVMRustThinLTOPatchDICompileUnit(LLVMModuleRef Mod, DICompileUnit *Unit) {
+ Module *M = unwrap(Mod);
+
+ // If the original source module didn't have a `DICompileUnit` then try to
+ // merge all the existing compile units. If there aren't actually any though
+ // then there's not much for us to do so return.
+ if (Unit == nullptr) {
+ for (DICompileUnit *CU : M->debug_compile_units()) {
+ Unit = CU;
+ break;
+ }
+ if (Unit == nullptr)
+ return;
+ }
+
+ // Use LLVM's built-in `DebugInfoFinder` to find a bunch of debuginfo and
+ // process it recursively. Note that we used to specifically iterate over
+ // instructions to ensure we feed everything into it, but `processModule`
+ // started doing this the same way in LLVM 7 (commit d769eb36ab2b8).
+ DebugInfoFinder Finder;
+ Finder.processModule(*M);
+
+ // After we've found all our debuginfo, rewrite all subprograms to point to
+ // the same `DICompileUnit`.
+ for (auto &F : Finder.subprograms()) {
+ F->replaceUnit(Unit);
+ }
+
+ // Erase any other references to other `DICompileUnit` instances, the verifier
+ // will later ensure that we don't actually have any other stale references to
+ // worry about.
+ auto *MD = M->getNamedMetadata("llvm.dbg.cu");
+ MD->clearOperands();
+ MD->addOperand(Unit);
+}
+
+// Computes the LTO cache key for the provided 'ModId' in the given 'Data',
+// storing the result in 'KeyOut'.
+// Currently, this cache key is a SHA-1 hash of anything that could affect
+// the result of optimizing this module (e.g. module imports, exports, liveness
+// of access globals, etc).
+// The precise details are determined by LLVM in `computeLTOCacheKey`, which is
+// used during the normal linker-plugin incremental thin-LTO process.
+extern "C" void
+LLVMRustComputeLTOCacheKey(RustStringRef KeyOut, const char *ModId, LLVMRustThinLTOData *Data) {
+ SmallString<40> Key;
+ llvm::lto::Config conf;
+ const auto &ImportList = Data->ImportLists.lookup(ModId);
+ const auto &ExportList = Data->ExportLists.lookup(ModId);
+ const auto &ResolvedODR = Data->ResolvedODR.lookup(ModId);
+ const auto &DefinedGlobals = Data->ModuleToDefinedGVSummaries.lookup(ModId);
+ std::set<GlobalValue::GUID> CfiFunctionDefs;
+ std::set<GlobalValue::GUID> CfiFunctionDecls;
+
+ // Based on the 'InProcessThinBackend' constructor in LLVM
+ for (auto &Name : Data->Index.cfiFunctionDefs())
+ CfiFunctionDefs.insert(
+ GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
+ for (auto &Name : Data->Index.cfiFunctionDecls())
+ CfiFunctionDecls.insert(
+ GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
+
+ llvm::computeLTOCacheKey(Key, conf, Data->Index, ModId,
+ ImportList, ExportList, ResolvedODR, DefinedGlobals, CfiFunctionDefs, CfiFunctionDecls
+ );
+
+ LLVMRustStringWriteImpl(KeyOut, Key.c_str(), Key.size());
+}
diff --git a/compiler/rustc_llvm/llvm-wrapper/README b/compiler/rustc_llvm/llvm-wrapper/README
new file mode 100644
index 000000000..e1c6dd07d
--- /dev/null
+++ b/compiler/rustc_llvm/llvm-wrapper/README
@@ -0,0 +1,16 @@
+This directory currently contains some LLVM support code. This will generally
+be sent upstream to LLVM in time; for now it lives here.
+
+NOTE: the LLVM C++ ABI is subject to between-version breakage and must *never*
+be exposed to Rust. To allow for easy auditing of that, all Rust-exposed types
+must be typedef-ed as "LLVMXyz", or "LLVMRustXyz" if they were defined here.
+
+Functions that return a failure status and leave the error in
+the LLVM last error should return an LLVMRustResult rather than an
+int or anything to avoid confusion.
+
+When translating enums, add a single `Other` variant as the first
+one to allow for new variants to be added. It should abort when used
+as an input.
+
+All other types must not be typedef-ed as such.
diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
new file mode 100644
index 000000000..5f5b5de79
--- /dev/null
+++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp
@@ -0,0 +1,1972 @@
+#include "LLVMWrapper.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DiagnosticHandler.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsARM.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/COFFImportFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Pass.h"
+#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/ADT/Optional.h"
+
+#include <iostream>
+
+//===----------------------------------------------------------------------===
+//
+// This file defines alternate interfaces to core functions that are more
+// readily callable by Rust's FFI.
+//
+//===----------------------------------------------------------------------===
+
+using namespace llvm;
+using namespace llvm::sys;
+using namespace llvm::object;
+
+// LLVMAtomicOrdering is already an enum - don't create another
+// one.
+static AtomicOrdering fromRust(LLVMAtomicOrdering Ordering) {
+ switch (Ordering) {
+ case LLVMAtomicOrderingNotAtomic:
+ return AtomicOrdering::NotAtomic;
+ case LLVMAtomicOrderingUnordered:
+ return AtomicOrdering::Unordered;
+ case LLVMAtomicOrderingMonotonic:
+ return AtomicOrdering::Monotonic;
+ case LLVMAtomicOrderingAcquire:
+ return AtomicOrdering::Acquire;
+ case LLVMAtomicOrderingRelease:
+ return AtomicOrdering::Release;
+ case LLVMAtomicOrderingAcquireRelease:
+ return AtomicOrdering::AcquireRelease;
+ case LLVMAtomicOrderingSequentiallyConsistent:
+ return AtomicOrdering::SequentiallyConsistent;
+ }
+
+ report_fatal_error("Invalid LLVMAtomicOrdering value!");
+}
+
+static LLVM_THREAD_LOCAL char *LastError;
+
+// Custom error handler for fatal LLVM errors.
+//
+// Notably it exits the process with code 101, unlike LLVM's default of 1.
+static void FatalErrorHandler(void *UserData,
+#if LLVM_VERSION_LT(14, 0)
+ const std::string& Reason,
+#else
+ const char* Reason,
+#endif
+ bool GenCrashDiag) {
+ // Do the same thing that the default error handler does.
+ std::cerr << "LLVM ERROR: " << Reason << std::endl;
+
+ // Since this error handler exits the process, we have to run any cleanup that
+ // LLVM would run after handling the error. This might change with an LLVM
+ // upgrade.
+ sys::RunInterruptHandlers();
+
+ exit(101);
+}
+
+extern "C" void LLVMRustInstallFatalErrorHandler() {
+ install_fatal_error_handler(FatalErrorHandler);
+}
+
+extern "C" void LLVMRustDisableSystemDialogsOnCrash() {
+ sys::DisableSystemDialogsOnCrash();
+}
+
+extern "C" char *LLVMRustGetLastError(void) {
+ char *Ret = LastError;
+ LastError = nullptr;
+ return Ret;
+}
+
+extern "C" void LLVMRustSetLastError(const char *Err) {
+ free((void *)LastError);
+ LastError = strdup(Err);
+}
+
+extern "C" LLVMContextRef LLVMRustContextCreate(bool shouldDiscardNames) {
+ auto ctx = new LLVMContext();
+ ctx->setDiscardValueNames(shouldDiscardNames);
+ return wrap(ctx);
+}
+
+extern "C" void LLVMRustSetNormalizedTarget(LLVMModuleRef M,
+ const char *Triple) {
+ unwrap(M)->setTargetTriple(Triple::normalize(Triple));
+}
+
+extern "C" void LLVMRustPrintPassTimings() {
+ raw_fd_ostream OS(2, false); // stderr.
+ TimerGroup::printAll(OS);
+}
+
+extern "C" LLVMValueRef LLVMRustGetNamedValue(LLVMModuleRef M, const char *Name,
+ size_t NameLen) {
+ return wrap(unwrap(M)->getNamedValue(StringRef(Name, NameLen)));
+}
+
+extern "C" LLVMValueRef LLVMRustGetOrInsertFunction(LLVMModuleRef M,
+ const char *Name,
+ size_t NameLen,
+ LLVMTypeRef FunctionTy) {
+ return wrap(unwrap(M)
+ ->getOrInsertFunction(StringRef(Name, NameLen),
+ unwrap<FunctionType>(FunctionTy))
+ .getCallee()
+ );
+}
+
+extern "C" LLVMValueRef
+LLVMRustGetOrInsertGlobal(LLVMModuleRef M, const char *Name, size_t NameLen, LLVMTypeRef Ty) {
+ Module *Mod = unwrap(M);
+ StringRef NameRef(Name, NameLen);
+
+ // We don't use Module::getOrInsertGlobal because that returns a Constant*,
+ // which may either be the real GlobalVariable*, or a constant bitcast of it
+ // if our type doesn't match the original declaration. We always want the
+ // GlobalVariable* so we can access linkage, visibility, etc.
+ GlobalVariable *GV = Mod->getGlobalVariable(NameRef, true);
+ if (!GV)
+ GV = new GlobalVariable(*Mod, unwrap(Ty), false,
+ GlobalValue::ExternalLinkage, nullptr, NameRef);
+ return wrap(GV);
+}
+
+extern "C" LLVMValueRef
+LLVMRustInsertPrivateGlobal(LLVMModuleRef M, LLVMTypeRef Ty) {
+ return wrap(new GlobalVariable(*unwrap(M),
+ unwrap(Ty),
+ false,
+ GlobalValue::PrivateLinkage,
+ nullptr));
+}
+
+extern "C" LLVMTypeRef LLVMRustMetadataTypeInContext(LLVMContextRef C) {
+ return wrap(Type::getMetadataTy(*unwrap(C)));
+}
+
+static Attribute::AttrKind fromRust(LLVMRustAttribute Kind) {
+ switch (Kind) {
+ case AlwaysInline:
+ return Attribute::AlwaysInline;
+ case ByVal:
+ return Attribute::ByVal;
+ case Cold:
+ return Attribute::Cold;
+ case InlineHint:
+ return Attribute::InlineHint;
+ case MinSize:
+ return Attribute::MinSize;
+ case Naked:
+ return Attribute::Naked;
+ case NoAlias:
+ return Attribute::NoAlias;
+ case NoCapture:
+ return Attribute::NoCapture;
+ case NoCfCheck:
+ return Attribute::NoCfCheck;
+ case NoInline:
+ return Attribute::NoInline;
+ case NonNull:
+ return Attribute::NonNull;
+ case NoRedZone:
+ return Attribute::NoRedZone;
+ case NoReturn:
+ return Attribute::NoReturn;
+ case NoUnwind:
+ return Attribute::NoUnwind;
+ case OptimizeForSize:
+ return Attribute::OptimizeForSize;
+ case ReadOnly:
+ return Attribute::ReadOnly;
+ case SExt:
+ return Attribute::SExt;
+ case StructRet:
+ return Attribute::StructRet;
+ case UWTable:
+ return Attribute::UWTable;
+ case ZExt:
+ return Attribute::ZExt;
+ case InReg:
+ return Attribute::InReg;
+ case SanitizeThread:
+ return Attribute::SanitizeThread;
+ case SanitizeAddress:
+ return Attribute::SanitizeAddress;
+ case SanitizeMemory:
+ return Attribute::SanitizeMemory;
+ case NonLazyBind:
+ return Attribute::NonLazyBind;
+ case OptimizeNone:
+ return Attribute::OptimizeNone;
+ case ReturnsTwice:
+ return Attribute::ReturnsTwice;
+ case ReadNone:
+ return Attribute::ReadNone;
+ case InaccessibleMemOnly:
+ return Attribute::InaccessibleMemOnly;
+ case SanitizeHWAddress:
+ return Attribute::SanitizeHWAddress;
+ case WillReturn:
+ return Attribute::WillReturn;
+ case StackProtectReq:
+ return Attribute::StackProtectReq;
+ case StackProtectStrong:
+ return Attribute::StackProtectStrong;
+ case StackProtect:
+ return Attribute::StackProtect;
+ case NoUndef:
+ return Attribute::NoUndef;
+ case SanitizeMemTag:
+ return Attribute::SanitizeMemTag;
+ case ShadowCallStack:
+ return Attribute::ShadowCallStack;
+ case AllocSize:
+ return Attribute::AllocSize;
+#if LLVM_VERSION_GE(15, 0)
+ case AllocatedPointer:
+ return Attribute::AllocatedPointer;
+ case AllocAlign:
+ return Attribute::AllocAlign;
+#endif
+ }
+ report_fatal_error("bad AttributeKind");
+}
+
+template<typename T> static inline void AddAttributes(T *t, unsigned Index,
+ LLVMAttributeRef *Attrs, size_t AttrsLen) {
+ AttributeList PAL = t->getAttributes();
+ AttributeList PALNew;
+#if LLVM_VERSION_LT(14, 0)
+ AttrBuilder B;
+ for (LLVMAttributeRef Attr : makeArrayRef(Attrs, AttrsLen))
+ B.addAttribute(unwrap(Attr));
+ PALNew = PAL.addAttributes(t->getContext(), Index, B);
+#else
+ AttrBuilder B(t->getContext());
+ for (LLVMAttributeRef Attr : makeArrayRef(Attrs, AttrsLen))
+ B.addAttribute(unwrap(Attr));
+ PALNew = PAL.addAttributesAtIndex(t->getContext(), Index, B);
+#endif
+ t->setAttributes(PALNew);
+}
+
+extern "C" void LLVMRustAddFunctionAttributes(LLVMValueRef Fn, unsigned Index,
+ LLVMAttributeRef *Attrs, size_t AttrsLen) {
+ Function *F = unwrap<Function>(Fn);
+ AddAttributes(F, Index, Attrs, AttrsLen);
+}
+
+extern "C" void LLVMRustAddCallSiteAttributes(LLVMValueRef Instr, unsigned Index,
+ LLVMAttributeRef *Attrs, size_t AttrsLen) {
+ CallBase *Call = unwrap<CallBase>(Instr);
+ AddAttributes(Call, Index, Attrs, AttrsLen);
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateAttrNoValue(LLVMContextRef C,
+ LLVMRustAttribute RustAttr) {
+ return wrap(Attribute::get(*unwrap(C), fromRust(RustAttr)));
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateAlignmentAttr(LLVMContextRef C,
+ uint64_t Bytes) {
+ return wrap(Attribute::getWithAlignment(*unwrap(C), llvm::Align(Bytes)));
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableAttr(LLVMContextRef C,
+ uint64_t Bytes) {
+ return wrap(Attribute::getWithDereferenceableBytes(*unwrap(C), Bytes));
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateDereferenceableOrNullAttr(LLVMContextRef C,
+ uint64_t Bytes) {
+ return wrap(Attribute::getWithDereferenceableOrNullBytes(*unwrap(C), Bytes));
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateByValAttr(LLVMContextRef C, LLVMTypeRef Ty) {
+ return wrap(Attribute::getWithByValType(*unwrap(C), unwrap(Ty)));
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateStructRetAttr(LLVMContextRef C, LLVMTypeRef Ty) {
+ return wrap(Attribute::getWithStructRetType(*unwrap(C), unwrap(Ty)));
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateElementTypeAttr(LLVMContextRef C, LLVMTypeRef Ty) {
+#if LLVM_VERSION_GE(15, 0)
+ return wrap(Attribute::get(*unwrap(C), Attribute::ElementType, unwrap(Ty)));
+#else
+ report_fatal_error("Should not be needed on LLVM < 15");
+#endif
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateUWTableAttr(LLVMContextRef C, bool Async) {
+#if LLVM_VERSION_LT(15, 0)
+ return wrap(Attribute::get(*unwrap(C), Attribute::UWTable));
+#else
+ return wrap(Attribute::getWithUWTableKind(
+ *unwrap(C), Async ? UWTableKind::Async : UWTableKind::Sync));
+#endif
+}
+
+extern "C" LLVMAttributeRef LLVMRustCreateAllocSizeAttr(LLVMContextRef C, uint32_t ElementSizeArg) {
+ return wrap(Attribute::getWithAllocSizeArgs(*unwrap(C), ElementSizeArg, None));
+}
+
+#if LLVM_VERSION_GE(15, 0)
+
+// These values **must** match ffi::AllocKindFlags.
+// It _happens_ to match the LLVM values of llvm::AllocFnKind,
+// but that's happenstance and we do explicit conversions before
+// passing them to LLVM.
+enum class LLVMRustAllocKindFlags : uint64_t {
+ Unknown = 0,
+ Alloc = 1,
+ Realloc = 1 << 1,
+ Free = 1 << 2,
+ Uninitialized = 1 << 3,
+ Zeroed = 1 << 4,
+ Aligned = 1 << 5,
+};
+
+static LLVMRustAllocKindFlags operator&(LLVMRustAllocKindFlags A, LLVMRustAllocKindFlags B) {
+ return static_cast<LLVMRustAllocKindFlags>(static_cast<uint64_t>(A) &
+ static_cast<uint64_t>(B));
+}
+
+static bool isSet(LLVMRustAllocKindFlags F) { return F != LLVMRustAllocKindFlags::Unknown; }
+
+static llvm::AllocFnKind allocKindFromRust(LLVMRustAllocKindFlags F) {
+ llvm::AllocFnKind AFK = llvm::AllocFnKind::Unknown;
+ if (isSet(F & LLVMRustAllocKindFlags::Alloc)) {
+ AFK |= llvm::AllocFnKind::Alloc;
+ }
+ if (isSet(F & LLVMRustAllocKindFlags::Realloc)) {
+ AFK |= llvm::AllocFnKind::Realloc;
+ }
+ if (isSet(F & LLVMRustAllocKindFlags::Free)) {
+ AFK |= llvm::AllocFnKind::Free;
+ }
+ if (isSet(F & LLVMRustAllocKindFlags::Uninitialized)) {
+ AFK |= llvm::AllocFnKind::Uninitialized;
+ }
+ if (isSet(F & LLVMRustAllocKindFlags::Zeroed)) {
+ AFK |= llvm::AllocFnKind::Zeroed;
+ }
+ if (isSet(F & LLVMRustAllocKindFlags::Aligned)) {
+ AFK |= llvm::AllocFnKind::Aligned;
+ }
+ return AFK;
+}
+#endif
+
+extern "C" LLVMAttributeRef LLVMRustCreateAllocKindAttr(LLVMContextRef C, uint64_t AllocKindArg) {
+#if LLVM_VERSION_GE(15, 0)
+ return wrap(Attribute::get(*unwrap(C), Attribute::AllocKind,
+ static_cast<uint64_t>(allocKindFromRust(static_cast<LLVMRustAllocKindFlags>(AllocKindArg)))));
+#else
+ report_fatal_error(
+ "allockind attributes are new in LLVM 15 and should not be used on older LLVMs");
+#endif
+}
+
+// Enable a fast-math flag
+//
+// https://llvm.org/docs/LangRef.html#fast-math-flags
+extern "C" void LLVMRustSetFastMath(LLVMValueRef V) {
+ if (auto I = dyn_cast<Instruction>(unwrap<Value>(V))) {
+ I->setFast(true);
+ }
+}
+
+extern "C" LLVMValueRef
+LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source,
+ const char *Name, LLVMAtomicOrdering Order) {
+ Value *Ptr = unwrap(Source);
+ LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name);
+ LI->setAtomic(fromRust(Order));
+ return wrap(LI);
+}
+
+extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B,
+ LLVMValueRef V,
+ LLVMValueRef Target,
+ LLVMAtomicOrdering Order) {
+ StoreInst *SI = unwrap(B)->CreateStore(unwrap(V), unwrap(Target));
+ SI->setAtomic(fromRust(Order));
+ return wrap(SI);
+}
+
+// FIXME: Use the C-API LLVMBuildAtomicCmpXchg and LLVMSetWeak
+// once we raise our minimum support to LLVM 10.
+extern "C" LLVMValueRef
+LLVMRustBuildAtomicCmpXchg(LLVMBuilderRef B, LLVMValueRef Target,
+ LLVMValueRef Old, LLVMValueRef Source,
+ LLVMAtomicOrdering Order,
+ LLVMAtomicOrdering FailureOrder, LLVMBool Weak) {
+#if LLVM_VERSION_GE(13,0)
+ // Rust probably knows the alignment of the target value and should be able to
+ // specify something more precise than MaybeAlign here. See also
+ // https://reviews.llvm.org/D97224 which may be a useful reference.
+ AtomicCmpXchgInst *ACXI = unwrap(B)->CreateAtomicCmpXchg(
+ unwrap(Target), unwrap(Old), unwrap(Source), llvm::MaybeAlign(), fromRust(Order),
+ fromRust(FailureOrder));
+#else
+ AtomicCmpXchgInst *ACXI = unwrap(B)->CreateAtomicCmpXchg(
+ unwrap(Target), unwrap(Old), unwrap(Source), fromRust(Order),
+ fromRust(FailureOrder));
+#endif
+ ACXI->setWeak(Weak);
+ return wrap(ACXI);
+}
+
+enum class LLVMRustSynchronizationScope {
+ SingleThread,
+ CrossThread,
+};
+
+static SyncScope::ID fromRust(LLVMRustSynchronizationScope Scope) {
+ switch (Scope) {
+ case LLVMRustSynchronizationScope::SingleThread:
+ return SyncScope::SingleThread;
+ case LLVMRustSynchronizationScope::CrossThread:
+ return SyncScope::System;
+ default:
+ report_fatal_error("bad SynchronizationScope.");
+ }
+}
+
+extern "C" LLVMValueRef
+LLVMRustBuildAtomicFence(LLVMBuilderRef B, LLVMAtomicOrdering Order,
+ LLVMRustSynchronizationScope Scope) {
+ return wrap(unwrap(B)->CreateFence(fromRust(Order), fromRust(Scope)));
+}
+
+enum class LLVMRustAsmDialect {
+ Att,
+ Intel,
+};
+
+static InlineAsm::AsmDialect fromRust(LLVMRustAsmDialect Dialect) {
+ switch (Dialect) {
+ case LLVMRustAsmDialect::Att:
+ return InlineAsm::AD_ATT;
+ case LLVMRustAsmDialect::Intel:
+ return InlineAsm::AD_Intel;
+ default:
+ report_fatal_error("bad AsmDialect.");
+ }
+}
+
+extern "C" LLVMValueRef
+LLVMRustInlineAsm(LLVMTypeRef Ty, char *AsmString, size_t AsmStringLen,
+ char *Constraints, size_t ConstraintsLen,
+ LLVMBool HasSideEffects, LLVMBool IsAlignStack,
+ LLVMRustAsmDialect Dialect, LLVMBool CanThrow) {
+#if LLVM_VERSION_GE(13, 0)
+ return wrap(InlineAsm::get(unwrap<FunctionType>(Ty),
+ StringRef(AsmString, AsmStringLen),
+ StringRef(Constraints, ConstraintsLen),
+ HasSideEffects, IsAlignStack,
+ fromRust(Dialect), CanThrow));
+#else
+ return wrap(InlineAsm::get(unwrap<FunctionType>(Ty),
+ StringRef(AsmString, AsmStringLen),
+ StringRef(Constraints, ConstraintsLen),
+ HasSideEffects, IsAlignStack,
+ fromRust(Dialect)));
+#endif
+}
+
+extern "C" bool LLVMRustInlineAsmVerify(LLVMTypeRef Ty, char *Constraints,
+ size_t ConstraintsLen) {
+#if LLVM_VERSION_LT(15, 0)
+ return InlineAsm::Verify(unwrap<FunctionType>(Ty),
+ StringRef(Constraints, ConstraintsLen));
+#else
+ // llvm::Error converts to true if it is an error.
+ return !llvm::errorToBool(InlineAsm::verify(
+ unwrap<FunctionType>(Ty), StringRef(Constraints, ConstraintsLen)));
+#endif
+}
+
+extern "C" void LLVMRustAppendModuleInlineAsm(LLVMModuleRef M, const char *Asm,
+ size_t AsmLen) {
+ unwrap(M)->appendModuleInlineAsm(StringRef(Asm, AsmLen));
+}
+
+typedef DIBuilder *LLVMRustDIBuilderRef;
+
+template <typename DIT> DIT *unwrapDIPtr(LLVMMetadataRef Ref) {
+ return (DIT *)(Ref ? unwrap<MDNode>(Ref) : nullptr);
+}
+
+#define DIDescriptor DIScope
+#define DIArray DINodeArray
+#define unwrapDI unwrapDIPtr
+
+// These values **must** match debuginfo::DIFlags! They also *happen*
+// to match LLVM, but that isn't required as we do giant sets of
+// matching below. The value shouldn't be directly passed to LLVM.
+enum class LLVMRustDIFlags : uint32_t {
+ FlagZero = 0,
+ FlagPrivate = 1,
+ FlagProtected = 2,
+ FlagPublic = 3,
+ FlagFwdDecl = (1 << 2),
+ FlagAppleBlock = (1 << 3),
+ FlagBlockByrefStruct = (1 << 4),
+ FlagVirtual = (1 << 5),
+ FlagArtificial = (1 << 6),
+ FlagExplicit = (1 << 7),
+ FlagPrototyped = (1 << 8),
+ FlagObjcClassComplete = (1 << 9),
+ FlagObjectPointer = (1 << 10),
+ FlagVector = (1 << 11),
+ FlagStaticMember = (1 << 12),
+ FlagLValueReference = (1 << 13),
+ FlagRValueReference = (1 << 14),
+ FlagExternalTypeRef = (1 << 15),
+ FlagIntroducedVirtual = (1 << 18),
+ FlagBitField = (1 << 19),
+ FlagNoReturn = (1 << 20),
+ // Do not add values that are not supported by the minimum LLVM
+ // version we support! see llvm/include/llvm/IR/DebugInfoFlags.def
+};
+
+inline LLVMRustDIFlags operator&(LLVMRustDIFlags A, LLVMRustDIFlags B) {
+ return static_cast<LLVMRustDIFlags>(static_cast<uint32_t>(A) &
+ static_cast<uint32_t>(B));
+}
+
+inline LLVMRustDIFlags operator|(LLVMRustDIFlags A, LLVMRustDIFlags B) {
+ return static_cast<LLVMRustDIFlags>(static_cast<uint32_t>(A) |
+ static_cast<uint32_t>(B));
+}
+
+inline LLVMRustDIFlags &operator|=(LLVMRustDIFlags &A, LLVMRustDIFlags B) {
+ return A = A | B;
+}
+
+inline bool isSet(LLVMRustDIFlags F) { return F != LLVMRustDIFlags::FlagZero; }
+
+inline LLVMRustDIFlags visibility(LLVMRustDIFlags F) {
+ return static_cast<LLVMRustDIFlags>(static_cast<uint32_t>(F) & 0x3);
+}
+
+static DINode::DIFlags fromRust(LLVMRustDIFlags Flags) {
+ DINode::DIFlags Result = DINode::DIFlags::FlagZero;
+
+ switch (visibility(Flags)) {
+ case LLVMRustDIFlags::FlagPrivate:
+ Result |= DINode::DIFlags::FlagPrivate;
+ break;
+ case LLVMRustDIFlags::FlagProtected:
+ Result |= DINode::DIFlags::FlagProtected;
+ break;
+ case LLVMRustDIFlags::FlagPublic:
+ Result |= DINode::DIFlags::FlagPublic;
+ break;
+ default:
+ // The rest are handled below
+ break;
+ }
+
+ if (isSet(Flags & LLVMRustDIFlags::FlagFwdDecl)) {
+ Result |= DINode::DIFlags::FlagFwdDecl;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagAppleBlock)) {
+ Result |= DINode::DIFlags::FlagAppleBlock;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagVirtual)) {
+ Result |= DINode::DIFlags::FlagVirtual;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagArtificial)) {
+ Result |= DINode::DIFlags::FlagArtificial;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagExplicit)) {
+ Result |= DINode::DIFlags::FlagExplicit;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagPrototyped)) {
+ Result |= DINode::DIFlags::FlagPrototyped;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagObjcClassComplete)) {
+ Result |= DINode::DIFlags::FlagObjcClassComplete;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagObjectPointer)) {
+ Result |= DINode::DIFlags::FlagObjectPointer;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagVector)) {
+ Result |= DINode::DIFlags::FlagVector;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagStaticMember)) {
+ Result |= DINode::DIFlags::FlagStaticMember;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagLValueReference)) {
+ Result |= DINode::DIFlags::FlagLValueReference;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagRValueReference)) {
+ Result |= DINode::DIFlags::FlagRValueReference;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagIntroducedVirtual)) {
+ Result |= DINode::DIFlags::FlagIntroducedVirtual;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagBitField)) {
+ Result |= DINode::DIFlags::FlagBitField;
+ }
+ if (isSet(Flags & LLVMRustDIFlags::FlagNoReturn)) {
+ Result |= DINode::DIFlags::FlagNoReturn;
+ }
+
+ return Result;
+}
+
+// These values **must** match debuginfo::DISPFlags! They also *happen*
+// to match LLVM, but that isn't required as we do giant sets of
+// matching below. The value shouldn't be directly passed to LLVM.
+enum class LLVMRustDISPFlags : uint32_t {
+ SPFlagZero = 0,
+ SPFlagVirtual = 1,
+ SPFlagPureVirtual = 2,
+ SPFlagLocalToUnit = (1 << 2),
+ SPFlagDefinition = (1 << 3),
+ SPFlagOptimized = (1 << 4),
+ SPFlagMainSubprogram = (1 << 5),
+ // Do not add values that are not supported by the minimum LLVM
+ // version we support! see llvm/include/llvm/IR/DebugInfoFlags.def
+ // (In LLVM < 8, createFunction supported these as separate bool arguments.)
+};
+
+inline LLVMRustDISPFlags operator&(LLVMRustDISPFlags A, LLVMRustDISPFlags B) {
+ return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(A) &
+ static_cast<uint32_t>(B));
+}
+
+inline LLVMRustDISPFlags operator|(LLVMRustDISPFlags A, LLVMRustDISPFlags B) {
+ return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(A) |
+ static_cast<uint32_t>(B));
+}
+
+inline LLVMRustDISPFlags &operator|=(LLVMRustDISPFlags &A, LLVMRustDISPFlags B) {
+ return A = A | B;
+}
+
+inline bool isSet(LLVMRustDISPFlags F) { return F != LLVMRustDISPFlags::SPFlagZero; }
+
+inline LLVMRustDISPFlags virtuality(LLVMRustDISPFlags F) {
+ return static_cast<LLVMRustDISPFlags>(static_cast<uint32_t>(F) & 0x3);
+}
+
+static DISubprogram::DISPFlags fromRust(LLVMRustDISPFlags SPFlags) {
+ DISubprogram::DISPFlags Result = DISubprogram::DISPFlags::SPFlagZero;
+
+ switch (virtuality(SPFlags)) {
+ case LLVMRustDISPFlags::SPFlagVirtual:
+ Result |= DISubprogram::DISPFlags::SPFlagVirtual;
+ break;
+ case LLVMRustDISPFlags::SPFlagPureVirtual:
+ Result |= DISubprogram::DISPFlags::SPFlagPureVirtual;
+ break;
+ default:
+ // The rest are handled below
+ break;
+ }
+
+ if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagLocalToUnit)) {
+ Result |= DISubprogram::DISPFlags::SPFlagLocalToUnit;
+ }
+ if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagDefinition)) {
+ Result |= DISubprogram::DISPFlags::SPFlagDefinition;
+ }
+ if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagOptimized)) {
+ Result |= DISubprogram::DISPFlags::SPFlagOptimized;
+ }
+ if (isSet(SPFlags & LLVMRustDISPFlags::SPFlagMainSubprogram)) {
+ Result |= DISubprogram::DISPFlags::SPFlagMainSubprogram;
+ }
+
+ return Result;
+}
+
+enum class LLVMRustDebugEmissionKind {
+ NoDebug,
+ FullDebug,
+ LineTablesOnly,
+};
+
+static DICompileUnit::DebugEmissionKind fromRust(LLVMRustDebugEmissionKind Kind) {
+ switch (Kind) {
+ case LLVMRustDebugEmissionKind::NoDebug:
+ return DICompileUnit::DebugEmissionKind::NoDebug;
+ case LLVMRustDebugEmissionKind::FullDebug:
+ return DICompileUnit::DebugEmissionKind::FullDebug;
+ case LLVMRustDebugEmissionKind::LineTablesOnly:
+ return DICompileUnit::DebugEmissionKind::LineTablesOnly;
+ default:
+ report_fatal_error("bad DebugEmissionKind.");
+ }
+}
+
+enum class LLVMRustChecksumKind {
+ None,
+ MD5,
+ SHA1,
+ SHA256,
+};
+
+static Optional<DIFile::ChecksumKind> fromRust(LLVMRustChecksumKind Kind) {
+ switch (Kind) {
+ case LLVMRustChecksumKind::None:
+ return None;
+ case LLVMRustChecksumKind::MD5:
+ return DIFile::ChecksumKind::CSK_MD5;
+ case LLVMRustChecksumKind::SHA1:
+ return DIFile::ChecksumKind::CSK_SHA1;
+ case LLVMRustChecksumKind::SHA256:
+ return DIFile::ChecksumKind::CSK_SHA256;
+ default:
+ report_fatal_error("bad ChecksumKind.");
+ }
+}
+
+extern "C" uint32_t LLVMRustDebugMetadataVersion() {
+ return DEBUG_METADATA_VERSION;
+}
+
+extern "C" uint32_t LLVMRustVersionPatch() { return LLVM_VERSION_PATCH; }
+
+extern "C" uint32_t LLVMRustVersionMinor() { return LLVM_VERSION_MINOR; }
+
+extern "C" uint32_t LLVMRustVersionMajor() { return LLVM_VERSION_MAJOR; }
+
+extern "C" void LLVMRustAddModuleFlag(
+ LLVMModuleRef M,
+ Module::ModFlagBehavior MergeBehavior,
+ const char *Name,
+ uint32_t Value) {
+ unwrap(M)->addModuleFlag(MergeBehavior, Name, Value);
+}
+
+extern "C" bool LLVMRustHasModuleFlag(LLVMModuleRef M, const char *Name,
+ size_t Len) {
+ return unwrap(M)->getModuleFlag(StringRef(Name, Len)) != nullptr;
+}
+
+extern "C" LLVMValueRef LLVMRustMetadataAsValue(LLVMContextRef C, LLVMMetadataRef MD) {
+ return wrap(MetadataAsValue::get(*unwrap(C), unwrap(MD)));
+}
+
+extern "C" void LLVMRustGlobalAddMetadata(
+ LLVMValueRef Global, unsigned Kind, LLVMMetadataRef MD) {
+ unwrap<GlobalObject>(Global)->addMetadata(Kind, *unwrap<MDNode>(MD));
+}
+
+extern "C" LLVMRustDIBuilderRef LLVMRustDIBuilderCreate(LLVMModuleRef M) {
+ return new DIBuilder(*unwrap(M));
+}
+
+extern "C" void LLVMRustDIBuilderDispose(LLVMRustDIBuilderRef Builder) {
+ delete Builder;
+}
+
+extern "C" void LLVMRustDIBuilderFinalize(LLVMRustDIBuilderRef Builder) {
+ Builder->finalize();
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateCompileUnit(
+ LLVMRustDIBuilderRef Builder, unsigned Lang, LLVMMetadataRef FileRef,
+ const char *Producer, size_t ProducerLen, bool isOptimized,
+ const char *Flags, unsigned RuntimeVer,
+ const char *SplitName, size_t SplitNameLen,
+ LLVMRustDebugEmissionKind Kind,
+ uint64_t DWOId, bool SplitDebugInlining) {
+ auto *File = unwrapDI<DIFile>(FileRef);
+
+ return wrap(Builder->createCompileUnit(Lang, File, StringRef(Producer, ProducerLen),
+ isOptimized, Flags, RuntimeVer,
+ StringRef(SplitName, SplitNameLen),
+ fromRust(Kind), DWOId, SplitDebugInlining));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFile(
+ LLVMRustDIBuilderRef Builder,
+ const char *Filename, size_t FilenameLen,
+ const char *Directory, size_t DirectoryLen, LLVMRustChecksumKind CSKind,
+ const char *Checksum, size_t ChecksumLen) {
+ Optional<DIFile::ChecksumKind> llvmCSKind = fromRust(CSKind);
+ Optional<DIFile::ChecksumInfo<StringRef>> CSInfo{};
+ if (llvmCSKind)
+ CSInfo.emplace(*llvmCSKind, StringRef{Checksum, ChecksumLen});
+ return wrap(Builder->createFile(StringRef(Filename, FilenameLen),
+ StringRef(Directory, DirectoryLen),
+ CSInfo));
+}
+
+extern "C" LLVMMetadataRef
+LLVMRustDIBuilderCreateSubroutineType(LLVMRustDIBuilderRef Builder,
+ LLVMMetadataRef ParameterTypes) {
+ return wrap(Builder->createSubroutineType(
+ DITypeRefArray(unwrap<MDTuple>(ParameterTypes))));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateFunction(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen,
+ const char *LinkageName, size_t LinkageNameLen,
+ LLVMMetadataRef File, unsigned LineNo,
+ LLVMMetadataRef Ty, unsigned ScopeLine, LLVMRustDIFlags Flags,
+ LLVMRustDISPFlags SPFlags, LLVMValueRef MaybeFn, LLVMMetadataRef TParam,
+ LLVMMetadataRef Decl) {
+ DITemplateParameterArray TParams =
+ DITemplateParameterArray(unwrap<MDTuple>(TParam));
+ DISubprogram::DISPFlags llvmSPFlags = fromRust(SPFlags);
+ DINode::DIFlags llvmFlags = fromRust(Flags);
+ DISubprogram *Sub = Builder->createFunction(
+ unwrapDI<DIScope>(Scope),
+ StringRef(Name, NameLen),
+ StringRef(LinkageName, LinkageNameLen),
+ unwrapDI<DIFile>(File), LineNo,
+ unwrapDI<DISubroutineType>(Ty), ScopeLine, llvmFlags,
+ llvmSPFlags, TParams, unwrapDIPtr<DISubprogram>(Decl));
+ if (MaybeFn)
+ unwrap<Function>(MaybeFn)->setSubprogram(Sub);
+ return wrap(Sub);
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateBasicType(
+ LLVMRustDIBuilderRef Builder, const char *Name, size_t NameLen,
+ uint64_t SizeInBits, unsigned Encoding) {
+ return wrap(Builder->createBasicType(StringRef(Name, NameLen), SizeInBits, Encoding));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateTypedef(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Type, const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Scope) {
+ return wrap(Builder->createTypedef(
+ unwrap<DIType>(Type), StringRef(Name, NameLen), unwrap<DIFile>(File),
+ LineNo, unwrapDIPtr<DIScope>(Scope)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreatePointerType(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef PointeeTy,
+ uint64_t SizeInBits, uint32_t AlignInBits, unsigned AddressSpace,
+ const char *Name, size_t NameLen) {
+ return wrap(Builder->createPointerType(unwrapDI<DIType>(PointeeTy),
+ SizeInBits, AlignInBits,
+ AddressSpace,
+ StringRef(Name, NameLen)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStructType(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
+ uint32_t AlignInBits, LLVMRustDIFlags Flags,
+ LLVMMetadataRef DerivedFrom, LLVMMetadataRef Elements,
+ unsigned RunTimeLang, LLVMMetadataRef VTableHolder,
+ const char *UniqueId, size_t UniqueIdLen) {
+ return wrap(Builder->createStructType(
+ unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
+ unwrapDI<DIFile>(File), LineNumber,
+ SizeInBits, AlignInBits, fromRust(Flags), unwrapDI<DIType>(DerivedFrom),
+ DINodeArray(unwrapDI<MDTuple>(Elements)), RunTimeLang,
+ unwrapDI<DIType>(VTableHolder), StringRef(UniqueId, UniqueIdLen)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantPart(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
+ uint32_t AlignInBits, LLVMRustDIFlags Flags, LLVMMetadataRef Discriminator,
+ LLVMMetadataRef Elements, const char *UniqueId, size_t UniqueIdLen) {
+ return wrap(Builder->createVariantPart(
+ unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
+ unwrapDI<DIFile>(File), LineNumber,
+ SizeInBits, AlignInBits, fromRust(Flags), unwrapDI<DIDerivedType>(Discriminator),
+ DINodeArray(unwrapDI<MDTuple>(Elements)), StringRef(UniqueId, UniqueIdLen)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateMemberType(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNo, uint64_t SizeInBits,
+ uint32_t AlignInBits, uint64_t OffsetInBits, LLVMRustDIFlags Flags,
+ LLVMMetadataRef Ty) {
+ return wrap(Builder->createMemberType(unwrapDI<DIDescriptor>(Scope),
+ StringRef(Name, NameLen),
+ unwrapDI<DIFile>(File), LineNo,
+ SizeInBits, AlignInBits, OffsetInBits,
+ fromRust(Flags), unwrapDI<DIType>(Ty)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariantMemberType(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen, LLVMMetadataRef File, unsigned LineNo,
+ uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits, LLVMValueRef Discriminant,
+ LLVMRustDIFlags Flags, LLVMMetadataRef Ty) {
+ llvm::ConstantInt* D = nullptr;
+ if (Discriminant) {
+ D = unwrap<llvm::ConstantInt>(Discriminant);
+ }
+ return wrap(Builder->createVariantMemberType(unwrapDI<DIDescriptor>(Scope),
+ StringRef(Name, NameLen),
+ unwrapDI<DIFile>(File), LineNo,
+ SizeInBits, AlignInBits, OffsetInBits, D,
+ fromRust(Flags), unwrapDI<DIType>(Ty)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateLexicalBlock(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ LLVMMetadataRef File, unsigned Line, unsigned Col) {
+ return wrap(Builder->createLexicalBlock(unwrapDI<DIDescriptor>(Scope),
+ unwrapDI<DIFile>(File), Line, Col));
+}
+
+extern "C" LLVMMetadataRef
+LLVMRustDIBuilderCreateLexicalBlockFile(LLVMRustDIBuilderRef Builder,
+ LLVMMetadataRef Scope,
+ LLVMMetadataRef File) {
+ return wrap(Builder->createLexicalBlockFile(unwrapDI<DIDescriptor>(Scope),
+ unwrapDI<DIFile>(File)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateStaticVariable(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Context,
+ const char *Name, size_t NameLen,
+ const char *LinkageName, size_t LinkageNameLen,
+ LLVMMetadataRef File, unsigned LineNo,
+ LLVMMetadataRef Ty, bool IsLocalToUnit, LLVMValueRef V,
+ LLVMMetadataRef Decl = nullptr, uint32_t AlignInBits = 0) {
+ llvm::GlobalVariable *InitVal = cast<llvm::GlobalVariable>(unwrap(V));
+
+ llvm::DIExpression *InitExpr = nullptr;
+ if (llvm::ConstantInt *IntVal = llvm::dyn_cast<llvm::ConstantInt>(InitVal)) {
+ InitExpr = Builder->createConstantValueExpression(
+ IntVal->getValue().getSExtValue());
+ } else if (llvm::ConstantFP *FPVal =
+ llvm::dyn_cast<llvm::ConstantFP>(InitVal)) {
+ InitExpr = Builder->createConstantValueExpression(
+ FPVal->getValueAPF().bitcastToAPInt().getZExtValue());
+ }
+
+ llvm::DIGlobalVariableExpression *VarExpr = Builder->createGlobalVariableExpression(
+ unwrapDI<DIDescriptor>(Context), StringRef(Name, NameLen),
+ StringRef(LinkageName, LinkageNameLen),
+ unwrapDI<DIFile>(File), LineNo, unwrapDI<DIType>(Ty), IsLocalToUnit,
+ /* isDefined */ true,
+ InitExpr, unwrapDIPtr<MDNode>(Decl),
+ /* templateParams */ nullptr,
+ AlignInBits);
+
+ InitVal->setMetadata("dbg", VarExpr);
+
+ return wrap(VarExpr);
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateVariable(
+ LLVMRustDIBuilderRef Builder, unsigned Tag, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNo,
+ LLVMMetadataRef Ty, bool AlwaysPreserve, LLVMRustDIFlags Flags,
+ unsigned ArgNo, uint32_t AlignInBits) {
+ if (Tag == 0x100) { // DW_TAG_auto_variable
+ return wrap(Builder->createAutoVariable(
+ unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
+ unwrapDI<DIFile>(File), LineNo,
+ unwrapDI<DIType>(Ty), AlwaysPreserve, fromRust(Flags), AlignInBits));
+ } else {
+ return wrap(Builder->createParameterVariable(
+ unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ArgNo,
+ unwrapDI<DIFile>(File), LineNo,
+ unwrapDI<DIType>(Ty), AlwaysPreserve, fromRust(Flags)));
+ }
+}
+
+extern "C" LLVMMetadataRef
+LLVMRustDIBuilderCreateArrayType(LLVMRustDIBuilderRef Builder, uint64_t Size,
+ uint32_t AlignInBits, LLVMMetadataRef Ty,
+ LLVMMetadataRef Subscripts) {
+ return wrap(
+ Builder->createArrayType(Size, AlignInBits, unwrapDI<DIType>(Ty),
+ DINodeArray(unwrapDI<MDTuple>(Subscripts))));
+}
+
+extern "C" LLVMMetadataRef
+LLVMRustDIBuilderGetOrCreateSubrange(LLVMRustDIBuilderRef Builder, int64_t Lo,
+ int64_t Count) {
+ return wrap(Builder->getOrCreateSubrange(Lo, Count));
+}
+
+extern "C" LLVMMetadataRef
+LLVMRustDIBuilderGetOrCreateArray(LLVMRustDIBuilderRef Builder,
+ LLVMMetadataRef *Ptr, unsigned Count) {
+ Metadata **DataValue = unwrap(Ptr);
+ return wrap(
+ Builder->getOrCreateArray(ArrayRef<Metadata *>(DataValue, Count)).get());
+}
+
+extern "C" LLVMValueRef LLVMRustDIBuilderInsertDeclareAtEnd(
+ LLVMRustDIBuilderRef Builder, LLVMValueRef V, LLVMMetadataRef VarInfo,
+ uint64_t *AddrOps, unsigned AddrOpsCount, LLVMMetadataRef DL,
+ LLVMBasicBlockRef InsertAtEnd) {
+ return wrap(Builder->insertDeclare(
+ unwrap(V), unwrap<DILocalVariable>(VarInfo),
+ Builder->createExpression(llvm::ArrayRef<uint64_t>(AddrOps, AddrOpsCount)),
+ DebugLoc(cast<MDNode>(unwrap(DL))),
+ unwrap(InsertAtEnd)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateEnumerator(
+ LLVMRustDIBuilderRef Builder, const char *Name, size_t NameLen,
+ int64_t Value, bool IsUnsigned) {
+ return wrap(Builder->createEnumerator(StringRef(Name, NameLen), Value, IsUnsigned));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateEnumerationType(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
+ uint32_t AlignInBits, LLVMMetadataRef Elements,
+ LLVMMetadataRef ClassTy, bool IsScoped) {
+ return wrap(Builder->createEnumerationType(
+ unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen),
+ unwrapDI<DIFile>(File), LineNumber,
+ SizeInBits, AlignInBits, DINodeArray(unwrapDI<MDTuple>(Elements)),
+ unwrapDI<DIType>(ClassTy), "", IsScoped));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateUnionType(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen,
+ LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
+ uint32_t AlignInBits, LLVMRustDIFlags Flags, LLVMMetadataRef Elements,
+ unsigned RunTimeLang, const char *UniqueId, size_t UniqueIdLen) {
+ return wrap(Builder->createUnionType(
+ unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), unwrapDI<DIFile>(File),
+ LineNumber, SizeInBits, AlignInBits, fromRust(Flags),
+ DINodeArray(unwrapDI<MDTuple>(Elements)), RunTimeLang,
+ StringRef(UniqueId, UniqueIdLen)));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateTemplateTypeParameter(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen, LLVMMetadataRef Ty) {
+ bool IsDefault = false; // FIXME: should we ever set this true?
+ return wrap(Builder->createTemplateTypeParameter(
+ unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), unwrapDI<DIType>(Ty), IsDefault));
+}
+
+extern "C" LLVMMetadataRef LLVMRustDIBuilderCreateNameSpace(
+ LLVMRustDIBuilderRef Builder, LLVMMetadataRef Scope,
+ const char *Name, size_t NameLen, bool ExportSymbols) {
+ return wrap(Builder->createNameSpace(
+ unwrapDI<DIDescriptor>(Scope), StringRef(Name, NameLen), ExportSymbols
+ ));
+}
+
+extern "C" void
+LLVMRustDICompositeTypeReplaceArrays(LLVMRustDIBuilderRef Builder,
+ LLVMMetadataRef CompositeTy,
+ LLVMMetadataRef Elements,
+ LLVMMetadataRef Params) {
+ DICompositeType *Tmp = unwrapDI<DICompositeType>(CompositeTy);
+ Builder->replaceArrays(Tmp, DINodeArray(unwrap<MDTuple>(Elements)),
+ DINodeArray(unwrap<MDTuple>(Params)));
+}
+
+extern "C" LLVMMetadataRef
+LLVMRustDIBuilderCreateDebugLocation(unsigned Line, unsigned Column,
+ LLVMMetadataRef ScopeRef,
+ LLVMMetadataRef InlinedAt) {
+ MDNode *Scope = unwrapDIPtr<MDNode>(ScopeRef);
+ DILocation *Loc = DILocation::get(
+ Scope->getContext(), Line, Column, Scope,
+ unwrapDIPtr<MDNode>(InlinedAt));
+ return wrap(Loc);
+}
+
+extern "C" uint64_t LLVMRustDIBuilderCreateOpDeref() {
+ return dwarf::DW_OP_deref;
+}
+
+extern "C" uint64_t LLVMRustDIBuilderCreateOpPlusUconst() {
+ return dwarf::DW_OP_plus_uconst;
+}
+
+extern "C" void LLVMRustWriteTypeToString(LLVMTypeRef Ty, RustStringRef Str) {
+ RawRustStringOstream OS(Str);
+ unwrap<llvm::Type>(Ty)->print(OS);
+}
+
+extern "C" void LLVMRustWriteValueToString(LLVMValueRef V,
+ RustStringRef Str) {
+ RawRustStringOstream OS(Str);
+ if (!V) {
+ OS << "(null)";
+ } else {
+ OS << "(";
+ unwrap<llvm::Value>(V)->getType()->print(OS);
+ OS << ":";
+ unwrap<llvm::Value>(V)->print(OS);
+ OS << ")";
+ }
+}
+
+// LLVMArrayType function does not support 64-bit ElementCount
+extern "C" LLVMTypeRef LLVMRustArrayType(LLVMTypeRef ElementTy,
+ uint64_t ElementCount) {
+ return wrap(ArrayType::get(unwrap(ElementTy), ElementCount));
+}
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Twine, LLVMTwineRef)
+
+extern "C" void LLVMRustWriteTwineToString(LLVMTwineRef T, RustStringRef Str) {
+ RawRustStringOstream OS(Str);
+ unwrap(T)->print(OS);
+}
+
+extern "C" void LLVMRustUnpackOptimizationDiagnostic(
+ LLVMDiagnosticInfoRef DI, RustStringRef PassNameOut,
+ LLVMValueRef *FunctionOut, unsigned* Line, unsigned* Column,
+ RustStringRef FilenameOut, RustStringRef MessageOut) {
+ // Undefined to call this not on an optimization diagnostic!
+ llvm::DiagnosticInfoOptimizationBase *Opt =
+ static_cast<llvm::DiagnosticInfoOptimizationBase *>(unwrap(DI));
+
+ RawRustStringOstream PassNameOS(PassNameOut);
+ PassNameOS << Opt->getPassName();
+ *FunctionOut = wrap(&Opt->getFunction());
+
+ RawRustStringOstream FilenameOS(FilenameOut);
+ DiagnosticLocation loc = Opt->getLocation();
+ if (loc.isValid()) {
+ *Line = loc.getLine();
+ *Column = loc.getColumn();
+ FilenameOS << loc.getAbsolutePath();
+ }
+
+ RawRustStringOstream MessageOS(MessageOut);
+ MessageOS << Opt->getMsg();
+}
+
+enum class LLVMRustDiagnosticLevel {
+ Error,
+ Warning,
+ Note,
+ Remark,
+};
+
+extern "C" void
+LLVMRustUnpackInlineAsmDiagnostic(LLVMDiagnosticInfoRef DI,
+ LLVMRustDiagnosticLevel *LevelOut,
+ unsigned *CookieOut,
+ LLVMTwineRef *MessageOut) {
+ // Undefined to call this not on an inline assembly diagnostic!
+ llvm::DiagnosticInfoInlineAsm *IA =
+ static_cast<llvm::DiagnosticInfoInlineAsm *>(unwrap(DI));
+
+ *CookieOut = IA->getLocCookie();
+ *MessageOut = wrap(&IA->getMsgStr());
+
+ switch (IA->getSeverity()) {
+ case DS_Error:
+ *LevelOut = LLVMRustDiagnosticLevel::Error;
+ break;
+ case DS_Warning:
+ *LevelOut = LLVMRustDiagnosticLevel::Warning;
+ break;
+ case DS_Note:
+ *LevelOut = LLVMRustDiagnosticLevel::Note;
+ break;
+ case DS_Remark:
+ *LevelOut = LLVMRustDiagnosticLevel::Remark;
+ break;
+ default:
+ report_fatal_error("Invalid LLVMRustDiagnosticLevel value!");
+ }
+}
+
+extern "C" void LLVMRustWriteDiagnosticInfoToString(LLVMDiagnosticInfoRef DI,
+ RustStringRef Str) {
+ RawRustStringOstream OS(Str);
+ DiagnosticPrinterRawOStream DP(OS);
+ unwrap(DI)->print(DP);
+}
+
+enum class LLVMRustDiagnosticKind {
+ Other,
+ InlineAsm,
+ StackSize,
+ DebugMetadataVersion,
+ SampleProfile,
+ OptimizationRemark,
+ OptimizationRemarkMissed,
+ OptimizationRemarkAnalysis,
+ OptimizationRemarkAnalysisFPCommute,
+ OptimizationRemarkAnalysisAliasing,
+ OptimizationRemarkOther,
+ OptimizationFailure,
+ PGOProfile,
+ Linker,
+ Unsupported,
+ SrcMgr,
+};
+
+static LLVMRustDiagnosticKind toRust(DiagnosticKind Kind) {
+ switch (Kind) {
+ case DK_InlineAsm:
+ return LLVMRustDiagnosticKind::InlineAsm;
+ case DK_StackSize:
+ return LLVMRustDiagnosticKind::StackSize;
+ case DK_DebugMetadataVersion:
+ return LLVMRustDiagnosticKind::DebugMetadataVersion;
+ case DK_SampleProfile:
+ return LLVMRustDiagnosticKind::SampleProfile;
+ case DK_OptimizationRemark:
+ case DK_MachineOptimizationRemark:
+ return LLVMRustDiagnosticKind::OptimizationRemark;
+ case DK_OptimizationRemarkMissed:
+ case DK_MachineOptimizationRemarkMissed:
+ return LLVMRustDiagnosticKind::OptimizationRemarkMissed;
+ case DK_OptimizationRemarkAnalysis:
+ case DK_MachineOptimizationRemarkAnalysis:
+ return LLVMRustDiagnosticKind::OptimizationRemarkAnalysis;
+ case DK_OptimizationRemarkAnalysisFPCommute:
+ return LLVMRustDiagnosticKind::OptimizationRemarkAnalysisFPCommute;
+ case DK_OptimizationRemarkAnalysisAliasing:
+ return LLVMRustDiagnosticKind::OptimizationRemarkAnalysisAliasing;
+ case DK_PGOProfile:
+ return LLVMRustDiagnosticKind::PGOProfile;
+ case DK_Linker:
+ return LLVMRustDiagnosticKind::Linker;
+ case DK_Unsupported:
+ return LLVMRustDiagnosticKind::Unsupported;
+#if LLVM_VERSION_GE(13, 0)
+ case DK_SrcMgr:
+ return LLVMRustDiagnosticKind::SrcMgr;
+#endif
+ default:
+ return (Kind >= DK_FirstRemark && Kind <= DK_LastRemark)
+ ? LLVMRustDiagnosticKind::OptimizationRemarkOther
+ : LLVMRustDiagnosticKind::Other;
+ }
+}
+
+extern "C" LLVMRustDiagnosticKind
+LLVMRustGetDiagInfoKind(LLVMDiagnosticInfoRef DI) {
+ return toRust((DiagnosticKind)unwrap(DI)->getKind());
+}
+
+// This is kept distinct from LLVMGetTypeKind, because when
+// a new type kind is added, the Rust-side enum must be
+// updated or UB will result.
+extern "C" LLVMTypeKind LLVMRustGetTypeKind(LLVMTypeRef Ty) {
+ switch (unwrap(Ty)->getTypeID()) {
+ case Type::VoidTyID:
+ return LLVMVoidTypeKind;
+ case Type::HalfTyID:
+ return LLVMHalfTypeKind;
+ case Type::FloatTyID:
+ return LLVMFloatTypeKind;
+ case Type::DoubleTyID:
+ return LLVMDoubleTypeKind;
+ case Type::X86_FP80TyID:
+ return LLVMX86_FP80TypeKind;
+ case Type::FP128TyID:
+ return LLVMFP128TypeKind;
+ case Type::PPC_FP128TyID:
+ return LLVMPPC_FP128TypeKind;
+ case Type::LabelTyID:
+ return LLVMLabelTypeKind;
+ case Type::MetadataTyID:
+ return LLVMMetadataTypeKind;
+ case Type::IntegerTyID:
+ return LLVMIntegerTypeKind;
+ case Type::FunctionTyID:
+ return LLVMFunctionTypeKind;
+ case Type::StructTyID:
+ return LLVMStructTypeKind;
+ case Type::ArrayTyID:
+ return LLVMArrayTypeKind;
+ case Type::PointerTyID:
+ return LLVMPointerTypeKind;
+ case Type::FixedVectorTyID:
+ return LLVMVectorTypeKind;
+ case Type::X86_MMXTyID:
+ return LLVMX86_MMXTypeKind;
+ case Type::TokenTyID:
+ return LLVMTokenTypeKind;
+ case Type::ScalableVectorTyID:
+ return LLVMScalableVectorTypeKind;
+ case Type::BFloatTyID:
+ return LLVMBFloatTypeKind;
+ case Type::X86_AMXTyID:
+ return LLVMX86_AMXTypeKind;
+#if LLVM_VERSION_GE(15, 0) && LLVM_VERSION_LT(16, 0)
+ case Type::DXILPointerTyID:
+ report_fatal_error("Rust does not support DirectX typed pointers.");
+ break;
+#endif
+#if LLVM_VERSION_GE(16, 0)
+ case Type::TypedPointerTyID:
+ report_fatal_error("Rust does not support typed pointers.");
+ break;
+#endif
+ }
+ report_fatal_error("Unhandled TypeID.");
+}
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(SMDiagnostic, LLVMSMDiagnosticRef)
+
+#if LLVM_VERSION_LT(13, 0)
+using LLVMInlineAsmDiagHandlerTy = LLVMContext::InlineAsmDiagHandlerTy;
+#else
+using LLVMInlineAsmDiagHandlerTy = void*;
+#endif
+
+extern "C" void LLVMRustSetInlineAsmDiagnosticHandler(
+ LLVMContextRef C, LLVMInlineAsmDiagHandlerTy H, void *CX) {
+ // Diagnostic handlers were unified in LLVM change 5de2d189e6ad, so starting
+ // with LLVM 13 this function is gone.
+#if LLVM_VERSION_LT(13, 0)
+ unwrap(C)->setInlineAsmDiagnosticHandler(H, CX);
+#endif
+}
+
+extern "C" LLVMSMDiagnosticRef LLVMRustGetSMDiagnostic(
+ LLVMDiagnosticInfoRef DI, unsigned *Cookie) {
+#if LLVM_VERSION_GE(13, 0)
+ llvm::DiagnosticInfoSrcMgr *SM = static_cast<llvm::DiagnosticInfoSrcMgr *>(unwrap(DI));
+ *Cookie = SM->getLocCookie();
+ return wrap(&SM->getSMDiag());
+#else
+ report_fatal_error("Shouldn't get called on older versions");
+#endif
+}
+
+extern "C" bool LLVMRustUnpackSMDiagnostic(LLVMSMDiagnosticRef DRef,
+ RustStringRef MessageOut,
+ RustStringRef BufferOut,
+ LLVMRustDiagnosticLevel* LevelOut,
+ unsigned* LocOut,
+ unsigned* RangesOut,
+ size_t* NumRanges) {
+ SMDiagnostic& D = *unwrap(DRef);
+ RawRustStringOstream MessageOS(MessageOut);
+ MessageOS << D.getMessage();
+
+ switch (D.getKind()) {
+ case SourceMgr::DK_Error:
+ *LevelOut = LLVMRustDiagnosticLevel::Error;
+ break;
+ case SourceMgr::DK_Warning:
+ *LevelOut = LLVMRustDiagnosticLevel::Warning;
+ break;
+ case SourceMgr::DK_Note:
+ *LevelOut = LLVMRustDiagnosticLevel::Note;
+ break;
+ case SourceMgr::DK_Remark:
+ *LevelOut = LLVMRustDiagnosticLevel::Remark;
+ break;
+ default:
+ report_fatal_error("Invalid LLVMRustDiagnosticLevel value!");
+ }
+
+ if (D.getLoc() == SMLoc())
+ return false;
+
+ const SourceMgr &LSM = *D.getSourceMgr();
+ const MemoryBuffer *LBuf = LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
+ LLVMRustStringWriteImpl(BufferOut, LBuf->getBufferStart(), LBuf->getBufferSize());
+
+ *LocOut = D.getLoc().getPointer() - LBuf->getBufferStart();
+
+ *NumRanges = std::min(*NumRanges, D.getRanges().size());
+ size_t LineStart = *LocOut - (size_t)D.getColumnNo();
+ for (size_t i = 0; i < *NumRanges; i++) {
+ RangesOut[i * 2] = LineStart + D.getRanges()[i].first;
+ RangesOut[i * 2 + 1] = LineStart + D.getRanges()[i].second;
+ }
+
+ return true;
+}
+
+extern "C" LLVMValueRef LLVMRustBuildCleanupPad(LLVMBuilderRef B,
+ LLVMValueRef ParentPad,
+ unsigned ArgCount,
+ LLVMValueRef *LLArgs,
+ const char *Name) {
+ Value **Args = unwrap(LLArgs);
+ if (ParentPad == nullptr) {
+ Type *Ty = Type::getTokenTy(unwrap(B)->getContext());
+ ParentPad = wrap(Constant::getNullValue(Ty));
+ }
+ return wrap(unwrap(B)->CreateCleanupPad(
+ unwrap(ParentPad), ArrayRef<Value *>(Args, ArgCount), Name));
+}
+
+extern "C" LLVMValueRef LLVMRustBuildCleanupRet(LLVMBuilderRef B,
+ LLVMValueRef CleanupPad,
+ LLVMBasicBlockRef UnwindBB) {
+ CleanupPadInst *Inst = cast<CleanupPadInst>(unwrap(CleanupPad));
+ return wrap(unwrap(B)->CreateCleanupRet(Inst, unwrap(UnwindBB)));
+}
+
+extern "C" LLVMValueRef
+LLVMRustBuildCatchPad(LLVMBuilderRef B, LLVMValueRef ParentPad,
+ unsigned ArgCount, LLVMValueRef *LLArgs, const char *Name) {
+ Value **Args = unwrap(LLArgs);
+ return wrap(unwrap(B)->CreateCatchPad(
+ unwrap(ParentPad), ArrayRef<Value *>(Args, ArgCount), Name));
+}
+
+extern "C" LLVMValueRef LLVMRustBuildCatchRet(LLVMBuilderRef B,
+ LLVMValueRef Pad,
+ LLVMBasicBlockRef BB) {
+ return wrap(unwrap(B)->CreateCatchRet(cast<CatchPadInst>(unwrap(Pad)),
+ unwrap(BB)));
+}
+
+extern "C" LLVMValueRef LLVMRustBuildCatchSwitch(LLVMBuilderRef B,
+ LLVMValueRef ParentPad,
+ LLVMBasicBlockRef BB,
+ unsigned NumHandlers,
+ const char *Name) {
+ if (ParentPad == nullptr) {
+ Type *Ty = Type::getTokenTy(unwrap(B)->getContext());
+ ParentPad = wrap(Constant::getNullValue(Ty));
+ }
+ return wrap(unwrap(B)->CreateCatchSwitch(unwrap(ParentPad), unwrap(BB),
+ NumHandlers, Name));
+}
+
+extern "C" void LLVMRustAddHandler(LLVMValueRef CatchSwitchRef,
+ LLVMBasicBlockRef Handler) {
+ Value *CatchSwitch = unwrap(CatchSwitchRef);
+ cast<CatchSwitchInst>(CatchSwitch)->addHandler(unwrap(Handler));
+}
+
+extern "C" OperandBundleDef *LLVMRustBuildOperandBundleDef(const char *Name,
+ LLVMValueRef *Inputs,
+ unsigned NumInputs) {
+ return new OperandBundleDef(Name, makeArrayRef(unwrap(Inputs), NumInputs));
+}
+
+extern "C" void LLVMRustFreeOperandBundleDef(OperandBundleDef *Bundle) {
+ delete Bundle;
+}
+
+extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ OperandBundleDef *Bundle) {
+ Value *Callee = unwrap(Fn);
+ FunctionType *FTy = unwrap<FunctionType>(Ty);
+ unsigned Len = Bundle ? 1 : 0;
+ ArrayRef<OperandBundleDef> Bundles = makeArrayRef(Bundle, Len);
+ return wrap(unwrap(B)->CreateCall(
+ FTy, Callee, makeArrayRef(unwrap(Args), NumArgs), Bundles));
+}
+
+extern "C" LLVMValueRef LLVMRustGetInstrProfIncrementIntrinsic(LLVMModuleRef M) {
+ return wrap(llvm::Intrinsic::getDeclaration(unwrap(M),
+ (llvm::Intrinsic::ID)llvm::Intrinsic::instrprof_increment));
+}
+
+extern "C" LLVMValueRef LLVMRustBuildMemCpy(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Src, unsigned SrcAlign,
+ LLVMValueRef Size, bool IsVolatile) {
+ return wrap(unwrap(B)->CreateMemCpy(
+ unwrap(Dst), MaybeAlign(DstAlign),
+ unwrap(Src), MaybeAlign(SrcAlign),
+ unwrap(Size), IsVolatile));
+}
+
+extern "C" LLVMValueRef LLVMRustBuildMemMove(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Src, unsigned SrcAlign,
+ LLVMValueRef Size, bool IsVolatile) {
+ return wrap(unwrap(B)->CreateMemMove(
+ unwrap(Dst), MaybeAlign(DstAlign),
+ unwrap(Src), MaybeAlign(SrcAlign),
+ unwrap(Size), IsVolatile));
+}
+
+extern "C" LLVMValueRef LLVMRustBuildMemSet(LLVMBuilderRef B,
+ LLVMValueRef Dst, unsigned DstAlign,
+ LLVMValueRef Val,
+ LLVMValueRef Size, bool IsVolatile) {
+ return wrap(unwrap(B)->CreateMemSet(
+ unwrap(Dst), unwrap(Val), unwrap(Size), MaybeAlign(DstAlign), IsVolatile));
+}
+
+extern "C" LLVMValueRef
+LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn,
+ LLVMValueRef *Args, unsigned NumArgs,
+ LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
+ OperandBundleDef *Bundle, const char *Name) {
+ Value *Callee = unwrap(Fn);
+ FunctionType *FTy = unwrap<FunctionType>(Ty);
+ unsigned Len = Bundle ? 1 : 0;
+ ArrayRef<OperandBundleDef> Bundles = makeArrayRef(Bundle, Len);
+ return wrap(unwrap(B)->CreateInvoke(FTy, Callee, unwrap(Then), unwrap(Catch),
+ makeArrayRef(unwrap(Args), NumArgs),
+ Bundles, Name));
+}
+
+extern "C" void LLVMRustPositionBuilderAtStart(LLVMBuilderRef B,
+ LLVMBasicBlockRef BB) {
+ auto Point = unwrap(BB)->getFirstInsertionPt();
+ unwrap(B)->SetInsertPoint(unwrap(BB), Point);
+}
+
+extern "C" void LLVMRustSetComdat(LLVMModuleRef M, LLVMValueRef V,
+ const char *Name, size_t NameLen) {
+ Triple TargetTriple(unwrap(M)->getTargetTriple());
+ GlobalObject *GV = unwrap<GlobalObject>(V);
+ if (TargetTriple.supportsCOMDAT()) {
+ StringRef NameRef(Name, NameLen);
+ GV->setComdat(unwrap(M)->getOrInsertComdat(NameRef));
+ }
+}
+
+enum class LLVMRustLinkage {
+ ExternalLinkage = 0,
+ AvailableExternallyLinkage = 1,
+ LinkOnceAnyLinkage = 2,
+ LinkOnceODRLinkage = 3,
+ WeakAnyLinkage = 4,
+ WeakODRLinkage = 5,
+ AppendingLinkage = 6,
+ InternalLinkage = 7,
+ PrivateLinkage = 8,
+ ExternalWeakLinkage = 9,
+ CommonLinkage = 10,
+};
+
+static LLVMRustLinkage toRust(LLVMLinkage Linkage) {
+ switch (Linkage) {
+ case LLVMExternalLinkage:
+ return LLVMRustLinkage::ExternalLinkage;
+ case LLVMAvailableExternallyLinkage:
+ return LLVMRustLinkage::AvailableExternallyLinkage;
+ case LLVMLinkOnceAnyLinkage:
+ return LLVMRustLinkage::LinkOnceAnyLinkage;
+ case LLVMLinkOnceODRLinkage:
+ return LLVMRustLinkage::LinkOnceODRLinkage;
+ case LLVMWeakAnyLinkage:
+ return LLVMRustLinkage::WeakAnyLinkage;
+ case LLVMWeakODRLinkage:
+ return LLVMRustLinkage::WeakODRLinkage;
+ case LLVMAppendingLinkage:
+ return LLVMRustLinkage::AppendingLinkage;
+ case LLVMInternalLinkage:
+ return LLVMRustLinkage::InternalLinkage;
+ case LLVMPrivateLinkage:
+ return LLVMRustLinkage::PrivateLinkage;
+ case LLVMExternalWeakLinkage:
+ return LLVMRustLinkage::ExternalWeakLinkage;
+ case LLVMCommonLinkage:
+ return LLVMRustLinkage::CommonLinkage;
+ default:
+ report_fatal_error("Invalid LLVMRustLinkage value!");
+ }
+}
+
+static LLVMLinkage fromRust(LLVMRustLinkage Linkage) {
+ switch (Linkage) {
+ case LLVMRustLinkage::ExternalLinkage:
+ return LLVMExternalLinkage;
+ case LLVMRustLinkage::AvailableExternallyLinkage:
+ return LLVMAvailableExternallyLinkage;
+ case LLVMRustLinkage::LinkOnceAnyLinkage:
+ return LLVMLinkOnceAnyLinkage;
+ case LLVMRustLinkage::LinkOnceODRLinkage:
+ return LLVMLinkOnceODRLinkage;
+ case LLVMRustLinkage::WeakAnyLinkage:
+ return LLVMWeakAnyLinkage;
+ case LLVMRustLinkage::WeakODRLinkage:
+ return LLVMWeakODRLinkage;
+ case LLVMRustLinkage::AppendingLinkage:
+ return LLVMAppendingLinkage;
+ case LLVMRustLinkage::InternalLinkage:
+ return LLVMInternalLinkage;
+ case LLVMRustLinkage::PrivateLinkage:
+ return LLVMPrivateLinkage;
+ case LLVMRustLinkage::ExternalWeakLinkage:
+ return LLVMExternalWeakLinkage;
+ case LLVMRustLinkage::CommonLinkage:
+ return LLVMCommonLinkage;
+ }
+ report_fatal_error("Invalid LLVMRustLinkage value!");
+}
+
+extern "C" LLVMRustLinkage LLVMRustGetLinkage(LLVMValueRef V) {
+ return toRust(LLVMGetLinkage(V));
+}
+
+extern "C" void LLVMRustSetLinkage(LLVMValueRef V,
+ LLVMRustLinkage RustLinkage) {
+ LLVMSetLinkage(V, fromRust(RustLinkage));
+}
+
+extern "C" LLVMValueRef LLVMRustConstInBoundsGEP2(LLVMTypeRef Ty,
+ LLVMValueRef ConstantVal,
+ LLVMValueRef *ConstantIndices,
+ unsigned NumIndices) {
+ ArrayRef<Constant *> IdxList(unwrap<Constant>(ConstantIndices, NumIndices),
+ NumIndices);
+ Constant *Val = unwrap<Constant>(ConstantVal);
+ return wrap(ConstantExpr::getInBoundsGetElementPtr(unwrap(Ty), Val, IdxList));
+}
+
+// Returns true if both high and low were successfully set. Fails in case constant wasn’t any of
+// the common sizes (1, 8, 16, 32, 64, 128 bits)
+extern "C" bool LLVMRustConstInt128Get(LLVMValueRef CV, bool sext, uint64_t *high, uint64_t *low)
+{
+ auto C = unwrap<llvm::ConstantInt>(CV);
+ if (C->getBitWidth() > 128) { return false; }
+ APInt AP;
+#if LLVM_VERSION_GE(15, 0)
+ if (sext) {
+ AP = C->getValue().sext(128);
+ } else {
+ AP = C->getValue().zext(128);
+ }
+#else
+ if (sext) {
+ AP = C->getValue().sextOrSelf(128);
+ } else {
+ AP = C->getValue().zextOrSelf(128);
+ }
+#endif
+ *low = AP.getLoBits(64).getZExtValue();
+ *high = AP.getHiBits(64).getZExtValue();
+ return true;
+}
+
+enum class LLVMRustVisibility {
+ Default = 0,
+ Hidden = 1,
+ Protected = 2,
+};
+
+static LLVMRustVisibility toRust(LLVMVisibility Vis) {
+ switch (Vis) {
+ case LLVMDefaultVisibility:
+ return LLVMRustVisibility::Default;
+ case LLVMHiddenVisibility:
+ return LLVMRustVisibility::Hidden;
+ case LLVMProtectedVisibility:
+ return LLVMRustVisibility::Protected;
+ }
+ report_fatal_error("Invalid LLVMRustVisibility value!");
+}
+
+static LLVMVisibility fromRust(LLVMRustVisibility Vis) {
+ switch (Vis) {
+ case LLVMRustVisibility::Default:
+ return LLVMDefaultVisibility;
+ case LLVMRustVisibility::Hidden:
+ return LLVMHiddenVisibility;
+ case LLVMRustVisibility::Protected:
+ return LLVMProtectedVisibility;
+ }
+ report_fatal_error("Invalid LLVMRustVisibility value!");
+}
+
+extern "C" LLVMRustVisibility LLVMRustGetVisibility(LLVMValueRef V) {
+ return toRust(LLVMGetVisibility(V));
+}
+
+// Oh hey, a binding that makes sense for once? (because LLVM’s own do not)
+extern "C" LLVMValueRef LLVMRustBuildIntCast(LLVMBuilderRef B, LLVMValueRef Val,
+ LLVMTypeRef DestTy, bool isSigned) {
+ return wrap(unwrap(B)->CreateIntCast(unwrap(Val), unwrap(DestTy), isSigned, ""));
+}
+
+extern "C" void LLVMRustSetVisibility(LLVMValueRef V,
+ LLVMRustVisibility RustVisibility) {
+ LLVMSetVisibility(V, fromRust(RustVisibility));
+}
+
+extern "C" void LLVMRustSetDSOLocal(LLVMValueRef Global, bool is_dso_local) {
+ unwrap<GlobalValue>(Global)->setDSOLocal(is_dso_local);
+}
+
+struct LLVMRustModuleBuffer {
+ std::string data;
+};
+
+extern "C" LLVMRustModuleBuffer*
+LLVMRustModuleBufferCreate(LLVMModuleRef M) {
+ auto Ret = std::make_unique<LLVMRustModuleBuffer>();
+ {
+ raw_string_ostream OS(Ret->data);
+ {
+ legacy::PassManager PM;
+ PM.add(createBitcodeWriterPass(OS));
+ PM.run(*unwrap(M));
+ }
+ }
+ return Ret.release();
+}
+
+extern "C" void
+LLVMRustModuleBufferFree(LLVMRustModuleBuffer *Buffer) {
+ delete Buffer;
+}
+
+extern "C" const void*
+LLVMRustModuleBufferPtr(const LLVMRustModuleBuffer *Buffer) {
+ return Buffer->data.data();
+}
+
+extern "C" size_t
+LLVMRustModuleBufferLen(const LLVMRustModuleBuffer *Buffer) {
+ return Buffer->data.length();
+}
+
+extern "C" uint64_t
+LLVMRustModuleCost(LLVMModuleRef M) {
+ auto f = unwrap(M)->functions();
+ return std::distance(std::begin(f), std::end(f));
+}
+
+// Vector reductions:
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceFAdd(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Src) {
+ return wrap(unwrap(B)->CreateFAddReduce(unwrap(Acc),unwrap(Src)));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceFMul(LLVMBuilderRef B, LLVMValueRef Acc, LLVMValueRef Src) {
+ return wrap(unwrap(B)->CreateFMulReduce(unwrap(Acc),unwrap(Src)));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceAdd(LLVMBuilderRef B, LLVMValueRef Src) {
+ return wrap(unwrap(B)->CreateAddReduce(unwrap(Src)));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceMul(LLVMBuilderRef B, LLVMValueRef Src) {
+ return wrap(unwrap(B)->CreateMulReduce(unwrap(Src)));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceAnd(LLVMBuilderRef B, LLVMValueRef Src) {
+ return wrap(unwrap(B)->CreateAndReduce(unwrap(Src)));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceOr(LLVMBuilderRef B, LLVMValueRef Src) {
+ return wrap(unwrap(B)->CreateOrReduce(unwrap(Src)));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceXor(LLVMBuilderRef B, LLVMValueRef Src) {
+ return wrap(unwrap(B)->CreateXorReduce(unwrap(Src)));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceMin(LLVMBuilderRef B, LLVMValueRef Src, bool IsSigned) {
+ return wrap(unwrap(B)->CreateIntMinReduce(unwrap(Src), IsSigned));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceMax(LLVMBuilderRef B, LLVMValueRef Src, bool IsSigned) {
+ return wrap(unwrap(B)->CreateIntMaxReduce(unwrap(Src), IsSigned));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceFMin(LLVMBuilderRef B, LLVMValueRef Src, bool NoNaN) {
+ Instruction *I = unwrap(B)->CreateFPMinReduce(unwrap(Src));
+ I->setHasNoNaNs(NoNaN);
+ return wrap(I);
+}
+extern "C" LLVMValueRef
+LLVMRustBuildVectorReduceFMax(LLVMBuilderRef B, LLVMValueRef Src, bool NoNaN) {
+ Instruction *I = unwrap(B)->CreateFPMaxReduce(unwrap(Src));
+ I->setHasNoNaNs(NoNaN);
+ return wrap(I);
+}
+
+extern "C" LLVMValueRef
+LLVMRustBuildMinNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS) {
+ return wrap(unwrap(B)->CreateMinNum(unwrap(LHS),unwrap(RHS)));
+}
+extern "C" LLVMValueRef
+LLVMRustBuildMaxNum(LLVMBuilderRef B, LLVMValueRef LHS, LLVMValueRef RHS) {
+ return wrap(unwrap(B)->CreateMaxNum(unwrap(LHS),unwrap(RHS)));
+}
+
+// This struct contains all necessary info about a symbol exported from a DLL.
+struct LLVMRustCOFFShortExport {
+ const char* name;
+ bool ordinal_present;
+ // The value of `ordinal` is only meaningful if `ordinal_present` is true.
+ uint16_t ordinal;
+};
+
+// Machine must be a COFF machine type, as defined in PE specs.
+extern "C" LLVMRustResult LLVMRustWriteImportLibrary(
+ const char* ImportName,
+ const char* Path,
+ const LLVMRustCOFFShortExport* Exports,
+ size_t NumExports,
+ uint16_t Machine,
+ bool MinGW)
+{
+ std::vector<llvm::object::COFFShortExport> ConvertedExports;
+ ConvertedExports.reserve(NumExports);
+
+ for (size_t i = 0; i < NumExports; ++i) {
+ bool ordinal_present = Exports[i].ordinal_present;
+ uint16_t ordinal = ordinal_present ? Exports[i].ordinal : 0;
+ ConvertedExports.push_back(llvm::object::COFFShortExport{
+ Exports[i].name, // Name
+ std::string{}, // ExtName
+ std::string{}, // SymbolName
+ std::string{}, // AliasTarget
+ ordinal, // Ordinal
+ ordinal_present, // Noname
+ false, // Data
+ false, // Private
+ false // Constant
+ });
+ }
+
+ auto Error = llvm::object::writeImportLibrary(
+ ImportName,
+ Path,
+ ConvertedExports,
+ static_cast<llvm::COFF::MachineTypes>(Machine),
+ MinGW);
+ if (Error) {
+ std::string errorString;
+ llvm::raw_string_ostream stream(errorString);
+ stream << Error;
+ stream.flush();
+ LLVMRustSetLastError(errorString.c_str());
+ return LLVMRustResult::Failure;
+ } else {
+ return LLVMRustResult::Success;
+ }
+}
+
+// Transfers ownership of DiagnosticHandler unique_ptr to the caller.
+extern "C" DiagnosticHandler *
+LLVMRustContextGetDiagnosticHandler(LLVMContextRef C) {
+ std::unique_ptr<DiagnosticHandler> DH = unwrap(C)->getDiagnosticHandler();
+ return DH.release();
+}
+
+// Sets unique_ptr to object of DiagnosticHandler to provide custom diagnostic
+// handling. Ownership of the handler is moved to the LLVMContext.
+extern "C" void LLVMRustContextSetDiagnosticHandler(LLVMContextRef C,
+ DiagnosticHandler *DH) {
+ unwrap(C)->setDiagnosticHandler(std::unique_ptr<DiagnosticHandler>(DH));
+}
+
+using LLVMDiagnosticHandlerTy = DiagnosticHandler::DiagnosticHandlerTy;
+
+// Configures a diagnostic handler that invokes provided callback when a
+// backend needs to emit a diagnostic.
+//
+// When RemarkAllPasses is true, remarks are enabled for all passes. Otherwise
+// the RemarkPasses array specifies individual passes for which remarks will be
+// enabled.
+extern "C" void LLVMRustContextConfigureDiagnosticHandler(
+ LLVMContextRef C, LLVMDiagnosticHandlerTy DiagnosticHandlerCallback,
+ void *DiagnosticHandlerContext, bool RemarkAllPasses,
+ const char * const * RemarkPasses, size_t RemarkPassesLen) {
+
+ class RustDiagnosticHandler final : public DiagnosticHandler {
+ public:
+ RustDiagnosticHandler(LLVMDiagnosticHandlerTy DiagnosticHandlerCallback,
+ void *DiagnosticHandlerContext,
+ bool RemarkAllPasses,
+ std::vector<std::string> RemarkPasses)
+ : DiagnosticHandlerCallback(DiagnosticHandlerCallback),
+ DiagnosticHandlerContext(DiagnosticHandlerContext),
+ RemarkAllPasses(RemarkAllPasses),
+ RemarkPasses(RemarkPasses) {}
+
+ virtual bool handleDiagnostics(const DiagnosticInfo &DI) override {
+ if (DiagnosticHandlerCallback) {
+ DiagnosticHandlerCallback(DI, DiagnosticHandlerContext);
+ return true;
+ }
+ return false;
+ }
+
+ bool isAnalysisRemarkEnabled(StringRef PassName) const override {
+ return isRemarkEnabled(PassName);
+ }
+
+ bool isMissedOptRemarkEnabled(StringRef PassName) const override {
+ return isRemarkEnabled(PassName);
+ }
+
+ bool isPassedOptRemarkEnabled(StringRef PassName) const override {
+ return isRemarkEnabled(PassName);
+ }
+
+ bool isAnyRemarkEnabled() const override {
+ return RemarkAllPasses || !RemarkPasses.empty();
+ }
+
+ private:
+ bool isRemarkEnabled(StringRef PassName) const {
+ if (RemarkAllPasses)
+ return true;
+
+ for (auto &Pass : RemarkPasses)
+ if (Pass == PassName)
+ return true;
+
+ return false;
+ }
+
+ LLVMDiagnosticHandlerTy DiagnosticHandlerCallback = nullptr;
+ void *DiagnosticHandlerContext = nullptr;
+
+ bool RemarkAllPasses = false;
+ std::vector<std::string> RemarkPasses;
+ };
+
+ std::vector<std::string> Passes;
+ for (size_t I = 0; I != RemarkPassesLen; ++I)
+ Passes.push_back(RemarkPasses[I]);
+
+ unwrap(C)->setDiagnosticHandler(std::make_unique<RustDiagnosticHandler>(
+ DiagnosticHandlerCallback, DiagnosticHandlerContext, RemarkAllPasses, Passes));
+}
+
+extern "C" void LLVMRustGetMangledName(LLVMValueRef V, RustStringRef Str) {
+ RawRustStringOstream OS(Str);
+ GlobalValue *GV = unwrap<GlobalValue>(V);
+ Mangler().getNameWithPrefix(OS, GV, true);
+}
+
+// LLVMGetAggregateElement was added in LLVM 15. For earlier LLVM versions just
+// use its implementation.
+#if LLVM_VERSION_LT(15, 0)
+extern "C" LLVMValueRef LLVMGetAggregateElement(LLVMValueRef C, unsigned Idx) {
+ return wrap(unwrap<Constant>(C)->getAggregateElement(Idx));
+}
+#endif
+
+extern "C" int32_t LLVMRustGetElementTypeArgIndex(LLVMValueRef CallSite) {
+#if LLVM_VERSION_GE(15, 0)
+ auto *CB = unwrap<CallBase>(CallSite);
+ switch (CB->getIntrinsicID()) {
+ case Intrinsic::arm_ldrex:
+ return 0;
+ case Intrinsic::arm_strex:
+ return 1;
+ }
+#endif
+ return -1;
+}
diff --git a/compiler/rustc_llvm/src/lib.rs b/compiler/rustc_llvm/src/lib.rs
new file mode 100644
index 000000000..8eade02a4
--- /dev/null
+++ b/compiler/rustc_llvm/src/lib.rs
@@ -0,0 +1,188 @@
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+
+// NOTE: This crate only exists to allow linking on mingw targets.
+
+use libc::{c_char, size_t};
+use std::cell::RefCell;
+use std::slice;
+
+#[repr(C)]
+pub struct RustString {
+ pub bytes: RefCell<Vec<u8>>,
+}
+
+impl RustString {
+ pub fn len(&self) -> usize {
+ self.bytes.borrow().len()
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.bytes.borrow().is_empty()
+ }
+}
+
+/// Appending to a Rust string -- used by RawRustStringOstream.
+#[no_mangle]
+pub unsafe extern "C" fn LLVMRustStringWriteImpl(
+ sr: &RustString,
+ ptr: *const c_char,
+ size: size_t,
+) {
+ let slice = slice::from_raw_parts(ptr as *const u8, size as usize);
+
+ sr.bytes.borrow_mut().extend_from_slice(slice);
+}
+
+/// Initialize targets enabled by the build script via `cfg(llvm_component = "...")`.
+/// N.B., this function can't be moved to `rustc_codegen_llvm` because of the `cfg`s.
+pub fn initialize_available_targets() {
+ macro_rules! init_target(
+ ($cfg:meta, $($method:ident),*) => { {
+ #[cfg($cfg)]
+ fn init() {
+ extern "C" {
+ $(fn $method();)*
+ }
+ unsafe {
+ $($method();)*
+ }
+ }
+ #[cfg(not($cfg))]
+ fn init() { }
+ init();
+ } }
+ );
+ init_target!(
+ llvm_component = "x86",
+ LLVMInitializeX86TargetInfo,
+ LLVMInitializeX86Target,
+ LLVMInitializeX86TargetMC,
+ LLVMInitializeX86AsmPrinter,
+ LLVMInitializeX86AsmParser
+ );
+ init_target!(
+ llvm_component = "arm",
+ LLVMInitializeARMTargetInfo,
+ LLVMInitializeARMTarget,
+ LLVMInitializeARMTargetMC,
+ LLVMInitializeARMAsmPrinter,
+ LLVMInitializeARMAsmParser
+ );
+ init_target!(
+ llvm_component = "aarch64",
+ LLVMInitializeAArch64TargetInfo,
+ LLVMInitializeAArch64Target,
+ LLVMInitializeAArch64TargetMC,
+ LLVMInitializeAArch64AsmPrinter,
+ LLVMInitializeAArch64AsmParser
+ );
+ init_target!(
+ llvm_component = "amdgpu",
+ LLVMInitializeAMDGPUTargetInfo,
+ LLVMInitializeAMDGPUTarget,
+ LLVMInitializeAMDGPUTargetMC,
+ LLVMInitializeAMDGPUAsmPrinter,
+ LLVMInitializeAMDGPUAsmParser
+ );
+ init_target!(
+ llvm_component = "avr",
+ LLVMInitializeAVRTargetInfo,
+ LLVMInitializeAVRTarget,
+ LLVMInitializeAVRTargetMC,
+ LLVMInitializeAVRAsmPrinter,
+ LLVMInitializeAVRAsmParser
+ );
+ init_target!(
+ llvm_component = "m68k",
+ LLVMInitializeM68kTargetInfo,
+ LLVMInitializeM68kTarget,
+ LLVMInitializeM68kTargetMC,
+ LLVMInitializeM68kAsmPrinter,
+ LLVMInitializeM68kAsmParser
+ );
+ init_target!(
+ llvm_component = "mips",
+ LLVMInitializeMipsTargetInfo,
+ LLVMInitializeMipsTarget,
+ LLVMInitializeMipsTargetMC,
+ LLVMInitializeMipsAsmPrinter,
+ LLVMInitializeMipsAsmParser
+ );
+ init_target!(
+ llvm_component = "powerpc",
+ LLVMInitializePowerPCTargetInfo,
+ LLVMInitializePowerPCTarget,
+ LLVMInitializePowerPCTargetMC,
+ LLVMInitializePowerPCAsmPrinter,
+ LLVMInitializePowerPCAsmParser
+ );
+ init_target!(
+ llvm_component = "systemz",
+ LLVMInitializeSystemZTargetInfo,
+ LLVMInitializeSystemZTarget,
+ LLVMInitializeSystemZTargetMC,
+ LLVMInitializeSystemZAsmPrinter,
+ LLVMInitializeSystemZAsmParser
+ );
+ init_target!(
+ llvm_component = "jsbackend",
+ LLVMInitializeJSBackendTargetInfo,
+ LLVMInitializeJSBackendTarget,
+ LLVMInitializeJSBackendTargetMC
+ );
+ init_target!(
+ llvm_component = "msp430",
+ LLVMInitializeMSP430TargetInfo,
+ LLVMInitializeMSP430Target,
+ LLVMInitializeMSP430TargetMC,
+ LLVMInitializeMSP430AsmPrinter,
+ LLVMInitializeMSP430AsmParser
+ );
+ init_target!(
+ llvm_component = "riscv",
+ LLVMInitializeRISCVTargetInfo,
+ LLVMInitializeRISCVTarget,
+ LLVMInitializeRISCVTargetMC,
+ LLVMInitializeRISCVAsmPrinter,
+ LLVMInitializeRISCVAsmParser
+ );
+ init_target!(
+ llvm_component = "sparc",
+ LLVMInitializeSparcTargetInfo,
+ LLVMInitializeSparcTarget,
+ LLVMInitializeSparcTargetMC,
+ LLVMInitializeSparcAsmPrinter,
+ LLVMInitializeSparcAsmParser
+ );
+ init_target!(
+ llvm_component = "nvptx",
+ LLVMInitializeNVPTXTargetInfo,
+ LLVMInitializeNVPTXTarget,
+ LLVMInitializeNVPTXTargetMC,
+ LLVMInitializeNVPTXAsmPrinter
+ );
+ init_target!(
+ llvm_component = "hexagon",
+ LLVMInitializeHexagonTargetInfo,
+ LLVMInitializeHexagonTarget,
+ LLVMInitializeHexagonTargetMC,
+ LLVMInitializeHexagonAsmPrinter,
+ LLVMInitializeHexagonAsmParser
+ );
+ init_target!(
+ llvm_component = "webassembly",
+ LLVMInitializeWebAssemblyTargetInfo,
+ LLVMInitializeWebAssemblyTarget,
+ LLVMInitializeWebAssemblyTargetMC,
+ LLVMInitializeWebAssemblyAsmPrinter,
+ LLVMInitializeWebAssemblyAsmParser
+ );
+ init_target!(
+ llvm_component = "bpf",
+ LLVMInitializeBPFTargetInfo,
+ LLVMInitializeBPFTarget,
+ LLVMInitializeBPFTargetMC,
+ LLVMInitializeBPFAsmPrinter,
+ LLVMInitializeBPFAsmParser
+ );
+}
diff --git a/compiler/rustc_log/Cargo.toml b/compiler/rustc_log/Cargo.toml
new file mode 100644
index 000000000..1b2cde605
--- /dev/null
+++ b/compiler/rustc_log/Cargo.toml
@@ -0,0 +1,16 @@
+[package]
+name = "rustc_log"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+atty = "0.2"
+tracing = "0.1.28"
+tracing-subscriber = { version = "0.3.3", default-features = false, features = ["fmt", "env-filter", "smallvec", "parking_lot", "ansi"] }
+tracing-tree = "0.2.0"
+
+[dev-dependencies]
+rustc_span = { path = "../rustc_span" }
+
+[features]
+max_level_info = ['tracing/max_level_info']
diff --git a/compiler/rustc_log/src/lib.rs b/compiler/rustc_log/src/lib.rs
new file mode 100644
index 000000000..f2ec80b0c
--- /dev/null
+++ b/compiler/rustc_log/src/lib.rs
@@ -0,0 +1,122 @@
+//! This crate allows tools to enable rust logging without having to magically
+//! match rustc's tracing crate version.
+//!
+//! For example if someone is working on rustc_ast and wants to write some
+//! minimal code against it to run in a debugger, with access to the `debug!`
+//! logs emitted by rustc_ast, that can be done by writing:
+//!
+//! ```toml
+//! [dependencies]
+//! rustc_ast = { path = "../rust/compiler/rustc_ast" }
+//! rustc_log = { path = "../rust/compiler/rustc_log" }
+//! rustc_span = { path = "../rust/compiler/rustc_span" }
+//! ```
+//!
+//! ```
+//! fn main() {
+//! rustc_log::init_rustc_env_logger().unwrap();
+//!
+//! let edition = rustc_span::edition::Edition::Edition2021;
+//! rustc_span::create_session_globals_then(edition, || {
+//! /* ... */
+//! });
+//! }
+//! ```
+//!
+//! Now `RUSTC_LOG=debug cargo run` will run your minimal main.rs and show
+//! rustc's debug logging. In a workflow like this, one might also add
+//! `std::env::set_var("RUSTC_LOG", "debug")` to the top of main so that `cargo
+//! run` by itself is sufficient to get logs.
+//!
+//! The reason rustc_log is a tiny separate crate, as opposed to exposing the
+//! same things in rustc_driver only, is to enable the above workflow. If you
+//! had to depend on rustc_driver in order to turn on rustc's debug logs, that's
+//! an enormously bigger dependency tree; every change you make to rustc_ast (or
+//! whichever piece of the compiler you are interested in) would involve
+//! rebuilding all the rest of rustc up to rustc_driver in order to run your
+//! main.rs. Whereas by depending only on rustc_log and the few crates you are
+//! debugging, you can make changes inside those crates and quickly run main.rs
+//! to read the debug logs.
+
+use std::env::{self, VarError};
+use std::fmt::{self, Display};
+use std::io;
+use tracing_subscriber::filter::{Directive, EnvFilter, LevelFilter};
+use tracing_subscriber::layer::SubscriberExt;
+
+pub fn init_rustc_env_logger() -> Result<(), Error> {
+ init_env_logger("RUSTC_LOG")
+}
+
+/// In contrast to `init_rustc_env_logger` this allows you to choose an env var
+/// other than `RUSTC_LOG`.
+pub fn init_env_logger(env: &str) -> Result<(), Error> {
+ let filter = match env::var(env) {
+ Ok(env) => EnvFilter::new(env),
+ _ => EnvFilter::default().add_directive(Directive::from(LevelFilter::WARN)),
+ };
+
+ let color_logs = match env::var(String::from(env) + "_COLOR") {
+ Ok(value) => match value.as_ref() {
+ "always" => true,
+ "never" => false,
+ "auto" => stderr_isatty(),
+ _ => return Err(Error::InvalidColorValue(value)),
+ },
+ Err(VarError::NotPresent) => stderr_isatty(),
+ Err(VarError::NotUnicode(_value)) => return Err(Error::NonUnicodeColorValue),
+ };
+
+ let verbose_entry_exit = match env::var_os(String::from(env) + "_ENTRY_EXIT") {
+ None => false,
+ Some(v) => &v != "0",
+ };
+
+ let layer = tracing_tree::HierarchicalLayer::default()
+ .with_writer(io::stderr)
+ .with_indent_lines(true)
+ .with_ansi(color_logs)
+ .with_targets(true)
+ .with_verbose_exit(verbose_entry_exit)
+ .with_verbose_entry(verbose_entry_exit)
+ .with_indent_amount(2);
+ #[cfg(parallel_compiler)]
+ let layer = layer.with_thread_ids(true).with_thread_names(true);
+
+ let subscriber = tracing_subscriber::Registry::default().with(filter).with(layer);
+ tracing::subscriber::set_global_default(subscriber).unwrap();
+
+ Ok(())
+}
+
+pub fn stdout_isatty() -> bool {
+ atty::is(atty::Stream::Stdout)
+}
+
+pub fn stderr_isatty() -> bool {
+ atty::is(atty::Stream::Stderr)
+}
+
+#[derive(Debug)]
+pub enum Error {
+ InvalidColorValue(String),
+ NonUnicodeColorValue,
+}
+
+impl std::error::Error for Error {}
+
+impl Display for Error {
+ fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Error::InvalidColorValue(value) => write!(
+ formatter,
+ "invalid log color value '{}': expected one of always, never, or auto",
+ value,
+ ),
+ Error::NonUnicodeColorValue => write!(
+ formatter,
+ "non-Unicode log color value: expected one of always, never, or auto",
+ ),
+ }
+ }
+}
diff --git a/compiler/rustc_macros/Cargo.toml b/compiler/rustc_macros/Cargo.toml
new file mode 100644
index 000000000..25b3aadc1
--- /dev/null
+++ b/compiler/rustc_macros/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "rustc_macros"
+version = "0.1.0"
+edition = "2021"
+
+[lib]
+proc-macro = true
+
+[dependencies]
+annotate-snippets = "0.8.0"
+fluent-bundle = "0.15.2"
+fluent-syntax = "0.11"
+synstructure = "0.12.1"
+syn = { version = "1", features = ["full"] }
+proc-macro2 = "1"
+quote = "1"
+unic-langid = { version = "0.9.0", features = ["macros"] }
diff --git a/compiler/rustc_macros/src/diagnostics/diagnostic.rs b/compiler/rustc_macros/src/diagnostics/diagnostic.rs
new file mode 100644
index 000000000..6b5b8b593
--- /dev/null
+++ b/compiler/rustc_macros/src/diagnostics/diagnostic.rs
@@ -0,0 +1,225 @@
+#![deny(unused_must_use)]
+
+use crate::diagnostics::diagnostic_builder::{DiagnosticDeriveBuilder, DiagnosticDeriveKind};
+use crate::diagnostics::error::{span_err, DiagnosticDeriveError};
+use crate::diagnostics::utils::{build_field_mapping, SetOnce};
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::spanned::Spanned;
+use synstructure::Structure;
+
+/// The central struct for constructing the `into_diagnostic` method from an annotated struct.
+pub(crate) struct SessionDiagnosticDerive<'a> {
+ structure: Structure<'a>,
+ sess: syn::Ident,
+ builder: DiagnosticDeriveBuilder,
+}
+
+impl<'a> SessionDiagnosticDerive<'a> {
+ pub(crate) fn new(diag: syn::Ident, sess: syn::Ident, structure: Structure<'a>) -> Self {
+ Self {
+ builder: DiagnosticDeriveBuilder {
+ diag,
+ fields: build_field_mapping(&structure),
+ kind: None,
+ code: None,
+ slug: None,
+ },
+ sess,
+ structure,
+ }
+ }
+
+ pub(crate) fn into_tokens(self) -> TokenStream {
+ let SessionDiagnosticDerive { mut structure, sess, mut builder } = self;
+
+ let ast = structure.ast();
+ let (implementation, param_ty) = {
+ if let syn::Data::Struct(..) = ast.data {
+ let preamble = builder.preamble(&structure);
+ let (attrs, args) = builder.body(&mut structure);
+
+ let span = ast.span().unwrap();
+ let diag = &builder.diag;
+ let init = match (builder.kind.value(), builder.slug.value()) {
+ (None, _) => {
+ span_err(span, "diagnostic kind not specified")
+ .help("use the `#[error(...)]` attribute to create an error")
+ .emit();
+ return DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ }
+ (Some(kind), None) => {
+ span_err(span, "diagnostic slug not specified")
+ .help(&format!(
+ "specify the slug as the first argument to the attribute, such as \
+ `#[{}(typeck::example_error)]`",
+ kind.descr()
+ ))
+ .emit();
+ return DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ }
+ (Some(DiagnosticDeriveKind::Lint), _) => {
+ span_err(span, "only `#[error(..)]` and `#[warning(..)]` are supported")
+ .help("use the `#[error(...)]` attribute to create a error")
+ .emit();
+ return DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ }
+ (Some(DiagnosticDeriveKind::Error), Some(slug)) => {
+ quote! {
+ let mut #diag = #sess.struct_err(rustc_errors::fluent::#slug);
+ }
+ }
+ (Some(DiagnosticDeriveKind::Warn), Some(slug)) => {
+ quote! {
+ let mut #diag = #sess.struct_warn(rustc_errors::fluent::#slug);
+ }
+ }
+ };
+
+ let implementation = quote! {
+ #init
+ #preamble
+ match self {
+ #attrs
+ }
+ match self {
+ #args
+ }
+ #diag
+ };
+ let param_ty = match builder.kind {
+ Some((DiagnosticDeriveKind::Error, _)) => {
+ quote! { rustc_errors::ErrorGuaranteed }
+ }
+ Some((DiagnosticDeriveKind::Lint | DiagnosticDeriveKind::Warn, _)) => {
+ quote! { () }
+ }
+ _ => unreachable!(),
+ };
+
+ (implementation, param_ty)
+ } else {
+ span_err(
+ ast.span().unwrap(),
+ "`#[derive(SessionDiagnostic)]` can only be used on structs",
+ )
+ .emit();
+
+ let implementation = DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ let param_ty = quote! { rustc_errors::ErrorGuaranteed };
+ (implementation, param_ty)
+ }
+ };
+
+ structure.gen_impl(quote! {
+ gen impl<'__session_diagnostic_sess> rustc_session::SessionDiagnostic<'__session_diagnostic_sess, #param_ty>
+ for @Self
+ {
+ fn into_diagnostic(
+ self,
+ #sess: &'__session_diagnostic_sess rustc_session::parse::ParseSess
+ ) -> rustc_errors::DiagnosticBuilder<'__session_diagnostic_sess, #param_ty> {
+ use rustc_errors::IntoDiagnosticArg;
+ #implementation
+ }
+ }
+ })
+ }
+}
+
+/// The central struct for constructing the `decorate_lint` method from an annotated struct.
+pub(crate) struct LintDiagnosticDerive<'a> {
+ structure: Structure<'a>,
+ builder: DiagnosticDeriveBuilder,
+}
+
+impl<'a> LintDiagnosticDerive<'a> {
+ pub(crate) fn new(diag: syn::Ident, structure: Structure<'a>) -> Self {
+ Self {
+ builder: DiagnosticDeriveBuilder {
+ diag,
+ fields: build_field_mapping(&structure),
+ kind: None,
+ code: None,
+ slug: None,
+ },
+ structure,
+ }
+ }
+
+ pub(crate) fn into_tokens(self) -> TokenStream {
+ let LintDiagnosticDerive { mut structure, mut builder } = self;
+
+ let ast = structure.ast();
+ let implementation = {
+ if let syn::Data::Struct(..) = ast.data {
+ let preamble = builder.preamble(&structure);
+ let (attrs, args) = builder.body(&mut structure);
+
+ let diag = &builder.diag;
+ let span = ast.span().unwrap();
+ let init = match (builder.kind.value(), builder.slug.value()) {
+ (None, _) => {
+ span_err(span, "diagnostic kind not specified")
+ .help("use the `#[error(...)]` attribute to create an error")
+ .emit();
+ return DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ }
+ (Some(kind), None) => {
+ span_err(span, "diagnostic slug not specified")
+ .help(&format!(
+ "specify the slug as the first argument to the attribute, such as \
+ `#[{}(typeck::example_error)]`",
+ kind.descr()
+ ))
+ .emit();
+ return DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ }
+ (Some(DiagnosticDeriveKind::Error | DiagnosticDeriveKind::Warn), _) => {
+ span_err(span, "only `#[lint(..)]` is supported")
+ .help("use the `#[lint(...)]` attribute to create a lint")
+ .emit();
+ return DiagnosticDeriveError::ErrorHandled.to_compile_error();
+ }
+ (Some(DiagnosticDeriveKind::Lint), Some(slug)) => {
+ quote! {
+ let mut #diag = #diag.build(rustc_errors::fluent::#slug);
+ }
+ }
+ };
+
+ let implementation = quote! {
+ #init
+ #preamble
+ match self {
+ #attrs
+ }
+ match self {
+ #args
+ }
+ #diag.emit();
+ };
+
+ implementation
+ } else {
+ span_err(
+ ast.span().unwrap(),
+ "`#[derive(LintDiagnostic)]` can only be used on structs",
+ )
+ .emit();
+
+ DiagnosticDeriveError::ErrorHandled.to_compile_error()
+ }
+ };
+
+ let diag = &builder.diag;
+ structure.gen_impl(quote! {
+ gen impl<'__a> rustc_errors::DecorateLint<'__a, ()> for @Self {
+ fn decorate_lint(self, #diag: rustc_errors::LintDiagnosticBuilder<'__a, ()>) {
+ use rustc_errors::IntoDiagnosticArg;
+ #implementation
+ }
+ }
+ })
+ }
+}
diff --git a/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs b/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
new file mode 100644
index 000000000..6c9561925
--- /dev/null
+++ b/compiler/rustc_macros/src/diagnostics/diagnostic_builder.rs
@@ -0,0 +1,630 @@
+#![deny(unused_must_use)]
+
+use crate::diagnostics::error::{
+ invalid_nested_attr, span_err, throw_invalid_attr, throw_invalid_nested_attr, throw_span_err,
+ DiagnosticDeriveError,
+};
+use crate::diagnostics::utils::{
+ report_error_if_not_applied_to_span, report_type_error, type_is_unit, type_matches_path,
+ Applicability, FieldInfo, FieldInnerTy, HasFieldMap, SetOnce,
+};
+use proc_macro2::{Ident, Span, TokenStream};
+use quote::{format_ident, quote};
+use std::collections::HashMap;
+use std::str::FromStr;
+use syn::{
+ parse_quote, spanned::Spanned, Attribute, Field, Meta, MetaList, MetaNameValue, NestedMeta,
+ Path, Type,
+};
+use synstructure::{BindingInfo, Structure};
+
+/// What kind of diagnostic is being derived - an error, a warning or a lint?
+#[derive(Copy, Clone)]
+pub(crate) enum DiagnosticDeriveKind {
+ /// `#[error(..)]`
+ Error,
+ /// `#[warn(..)]`
+ Warn,
+ /// `#[lint(..)]`
+ Lint,
+}
+
+impl DiagnosticDeriveKind {
+ /// Returns human-readable string corresponding to the kind.
+ pub fn descr(&self) -> &'static str {
+ match self {
+ DiagnosticDeriveKind::Error => "error",
+ DiagnosticDeriveKind::Warn => "warning",
+ DiagnosticDeriveKind::Lint => "lint",
+ }
+ }
+}
+
+/// Tracks persistent information required for building up individual calls to diagnostic methods
+/// for generated diagnostic derives - both `SessionDiagnostic` for errors/warnings and
+/// `LintDiagnostic` for lints.
+pub(crate) struct DiagnosticDeriveBuilder {
+ /// The identifier to use for the generated `DiagnosticBuilder` instance.
+ pub diag: syn::Ident,
+
+ /// Store a map of field name to its corresponding field. This is built on construction of the
+ /// derive builder.
+ pub fields: HashMap<String, TokenStream>,
+
+ /// Kind of diagnostic requested via the struct attribute.
+ pub kind: Option<(DiagnosticDeriveKind, proc_macro::Span)>,
+ /// Slug is a mandatory part of the struct attribute as corresponds to the Fluent message that
+ /// has the actual diagnostic message.
+ pub slug: Option<(Path, proc_macro::Span)>,
+ /// Error codes are a optional part of the struct attribute - this is only set to detect
+ /// multiple specifications.
+ pub code: Option<(String, proc_macro::Span)>,
+}
+
+impl HasFieldMap for DiagnosticDeriveBuilder {
+ fn get_field_binding(&self, field: &String) -> Option<&TokenStream> {
+ self.fields.get(field)
+ }
+}
+
+impl DiagnosticDeriveBuilder {
+ pub fn preamble<'s>(&mut self, structure: &Structure<'s>) -> TokenStream {
+ let ast = structure.ast();
+ let attrs = &ast.attrs;
+ let preamble = attrs.iter().map(|attr| {
+ self.generate_structure_code_for_attr(attr).unwrap_or_else(|v| v.to_compile_error())
+ });
+
+ quote! {
+ #(#preamble)*;
+ }
+ }
+
+ pub fn body<'s>(&mut self, structure: &mut Structure<'s>) -> (TokenStream, TokenStream) {
+ // Keep track of which fields need to be handled with a by-move binding.
+ let mut needs_moved = std::collections::HashSet::new();
+
+ // Generates calls to `span_label` and similar functions based on the attributes
+ // on fields. Code for suggestions uses formatting machinery and the value of
+ // other fields - because any given field can be referenced multiple times, it
+ // should be accessed through a borrow. When passing fields to `add_subdiagnostic`
+ // or `set_arg` (which happens below) for Fluent, we want to move the data, so that
+ // has to happen in a separate pass over the fields.
+ let attrs = structure
+ .clone()
+ .filter(|field_binding| {
+ let ast = &field_binding.ast();
+ !self.needs_move(ast) || {
+ needs_moved.insert(field_binding.binding.clone());
+ false
+ }
+ })
+ .each(|field_binding| self.generate_field_attrs_code(field_binding));
+
+ structure.bind_with(|_| synstructure::BindStyle::Move);
+ // When a field has attributes like `#[label]` or `#[note]` then it doesn't
+ // need to be passed as an argument to the diagnostic. But when a field has no
+ // attributes or a `#[subdiagnostic]` attribute then it must be passed as an
+ // argument to the diagnostic so that it can be referred to by Fluent messages.
+ let args = structure
+ .filter(|field_binding| needs_moved.contains(&field_binding.binding))
+ .each(|field_binding| self.generate_field_attrs_code(field_binding));
+
+ (attrs, args)
+ }
+
+ /// Returns `true` if `field` should generate a `set_arg` call rather than any other diagnostic
+ /// call (like `span_label`).
+ fn should_generate_set_arg(&self, field: &Field) -> bool {
+ field.attrs.is_empty()
+ }
+
+ /// Returns `true` if `field` needs to have code generated in the by-move branch of the
+ /// generated derive rather than the by-ref branch.
+ fn needs_move(&self, field: &Field) -> bool {
+ let generates_set_arg = self.should_generate_set_arg(field);
+ let is_multispan = type_matches_path(&field.ty, &["rustc_errors", "MultiSpan"]);
+ // FIXME(davidtwco): better support for one field needing to be in the by-move and
+ // by-ref branches.
+ let is_subdiagnostic = field
+ .attrs
+ .iter()
+ .map(|attr| attr.path.segments.last().unwrap().ident.to_string())
+ .any(|attr| attr == "subdiagnostic");
+
+ // `set_arg` calls take their argument by-move..
+ generates_set_arg
+ // If this is a `MultiSpan` field then it needs to be moved to be used by any
+ // attribute..
+ || is_multispan
+ // If this a `#[subdiagnostic]` then it needs to be moved as the other diagnostic is
+ // unlikely to be `Copy`..
+ || is_subdiagnostic
+ }
+
+ /// Establishes state in the `DiagnosticDeriveBuilder` resulting from the struct
+ /// attributes like `#[error(..)`, such as the diagnostic kind and slug. Generates
+ /// diagnostic builder calls for setting error code and creating note/help messages.
+ fn generate_structure_code_for_attr(
+ &mut self,
+ attr: &Attribute,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ let diag = &self.diag;
+ let span = attr.span().unwrap();
+
+ let name = attr.path.segments.last().unwrap().ident.to_string();
+ let name = name.as_str();
+ let meta = attr.parse_meta()?;
+
+ let is_help_note_or_warn = matches!(name, "help" | "note" | "warn_");
+
+ let nested = match meta {
+ // Most attributes are lists, like `#[error(..)]`/`#[warning(..)]` for most cases or
+ // `#[help(..)]`/`#[note(..)]` when the user is specifying a alternative slug.
+ Meta::List(MetaList { ref nested, .. }) => nested,
+ // Subdiagnostics without spans can be applied to the type too, and these are just
+ // paths: `#[help]` and `#[note]`
+ Meta::Path(_) if is_help_note_or_warn => {
+ let fn_name = if name == "warn_" {
+ Ident::new("warn", attr.span())
+ } else {
+ Ident::new(name, attr.span())
+ };
+ return Ok(quote! { #diag.#fn_name(rustc_errors::fluent::_subdiag::#fn_name); });
+ }
+ _ => throw_invalid_attr!(attr, &meta),
+ };
+
+ // Check the kind before doing any further processing so that there aren't misleading
+ // "no kind specified" errors if there are failures later.
+ match name {
+ "error" => self.kind.set_once((DiagnosticDeriveKind::Error, span)),
+ "warning" => self.kind.set_once((DiagnosticDeriveKind::Warn, span)),
+ "lint" => self.kind.set_once((DiagnosticDeriveKind::Lint, span)),
+ "help" | "note" | "warn_" => (),
+ _ => throw_invalid_attr!(attr, &meta, |diag| {
+ diag.help(
+ "only `error`, `warning`, `help`, `note` and `warn_` are valid attributes",
+ )
+ }),
+ }
+
+ // First nested element should always be the path, e.g. `#[error(typeck::invalid)]` or
+ // `#[help(typeck::another_help)]`.
+ let mut nested_iter = nested.into_iter();
+ if let Some(nested_attr) = nested_iter.next() {
+ // Report an error if there are any other list items after the path.
+ if is_help_note_or_warn && nested_iter.next().is_some() {
+ throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ diag.help(
+ "`help`, `note` and `warn_` struct attributes can only have one argument",
+ )
+ });
+ }
+
+ match nested_attr {
+ NestedMeta::Meta(Meta::Path(path)) if is_help_note_or_warn => {
+ let fn_name = proc_macro2::Ident::new(name, attr.span());
+ return Ok(quote! { #diag.#fn_name(rustc_errors::fluent::#path); });
+ }
+ NestedMeta::Meta(Meta::Path(path)) => {
+ self.slug.set_once((path.clone(), span));
+ }
+ NestedMeta::Meta(meta @ Meta::NameValue(_))
+ if !is_help_note_or_warn
+ && meta.path().segments.last().unwrap().ident == "code" =>
+ {
+ // don't error for valid follow-up attributes
+ }
+ nested_attr => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ diag.help("first argument of the attribute should be the diagnostic slug")
+ }),
+ };
+ }
+
+ // Remaining attributes are optional, only `code = ".."` at the moment.
+ let mut tokens = Vec::new();
+ for nested_attr in nested_iter {
+ let meta = match nested_attr {
+ syn::NestedMeta::Meta(meta) => meta,
+ _ => throw_invalid_nested_attr!(attr, &nested_attr),
+ };
+
+ let path = meta.path();
+ let nested_name = path.segments.last().unwrap().ident.to_string();
+ // Struct attributes are only allowed to be applied once, and the diagnostic
+ // changes will be set in the initialisation code.
+ if let Meta::NameValue(MetaNameValue { lit: syn::Lit::Str(s), .. }) = &meta {
+ let span = s.span().unwrap();
+ match nested_name.as_str() {
+ "code" => {
+ self.code.set_once((s.value(), span));
+ let code = &self.code.as_ref().map(|(v, _)| v);
+ tokens.push(quote! {
+ #diag.code(rustc_errors::DiagnosticId::Error(#code.to_string()));
+ });
+ }
+ _ => invalid_nested_attr(attr, &nested_attr)
+ .help("only `code` is a valid nested attributes following the slug")
+ .emit(),
+ }
+ } else {
+ invalid_nested_attr(attr, &nested_attr).emit()
+ }
+ }
+
+ Ok(tokens.drain(..).collect())
+ }
+
+ fn generate_field_attrs_code(&mut self, binding_info: &BindingInfo<'_>) -> TokenStream {
+ let field = binding_info.ast();
+ let field_binding = &binding_info.binding;
+
+ if self.should_generate_set_arg(&field) {
+ let diag = &self.diag;
+ let ident = field.ident.as_ref().unwrap();
+ return quote! {
+ #diag.set_arg(
+ stringify!(#ident),
+ #field_binding
+ );
+ };
+ }
+
+ let needs_move = self.needs_move(&field);
+ let inner_ty = FieldInnerTy::from_type(&field.ty);
+
+ field
+ .attrs
+ .iter()
+ .map(move |attr| {
+ let name = attr.path.segments.last().unwrap().ident.to_string();
+ let needs_clone =
+ name == "primary_span" && matches!(inner_ty, FieldInnerTy::Vec(_));
+ let (binding, needs_destructure) = if needs_clone {
+ // `primary_span` can accept a `Vec<Span>` so don't destructure that.
+ (quote! { #field_binding.clone() }, false)
+ } else if needs_move {
+ (quote! { #field_binding }, true)
+ } else {
+ (quote! { *#field_binding }, true)
+ };
+
+ let generated_code = self
+ .generate_inner_field_code(
+ attr,
+ FieldInfo {
+ binding: binding_info,
+ ty: inner_ty.inner_type().unwrap_or(&field.ty),
+ span: &field.span(),
+ },
+ binding,
+ )
+ .unwrap_or_else(|v| v.to_compile_error());
+
+ if needs_destructure {
+ inner_ty.with(field_binding, generated_code)
+ } else {
+ generated_code
+ }
+ })
+ .collect()
+ }
+
+ fn generate_inner_field_code(
+ &mut self,
+ attr: &Attribute,
+ info: FieldInfo<'_>,
+ binding: TokenStream,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ let meta = attr.parse_meta()?;
+ match meta {
+ Meta::Path(_) => self.generate_inner_field_code_path(attr, info, binding),
+ Meta::List(MetaList { .. }) => self.generate_inner_field_code_list(attr, info, binding),
+ _ => throw_invalid_attr!(attr, &meta),
+ }
+ }
+
+ fn generate_inner_field_code_path(
+ &mut self,
+ attr: &Attribute,
+ info: FieldInfo<'_>,
+ binding: TokenStream,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ assert!(matches!(attr.parse_meta()?, Meta::Path(_)));
+ let diag = &self.diag;
+
+ let meta = attr.parse_meta()?;
+
+ let ident = &attr.path.segments.last().unwrap().ident;
+ let name = ident.to_string();
+ let name = name.as_str();
+ match name {
+ "skip_arg" => {
+ // Don't need to do anything - by virtue of the attribute existing, the
+ // `set_arg` call will not be generated.
+ Ok(quote! {})
+ }
+ "primary_span" => {
+ report_error_if_not_applied_to_span(attr, &info)?;
+ Ok(quote! {
+ #diag.set_span(#binding);
+ })
+ }
+ "label" => {
+ report_error_if_not_applied_to_span(attr, &info)?;
+ Ok(self.add_spanned_subdiagnostic(binding, ident, parse_quote! { _subdiag::label }))
+ }
+ "note" | "help" | "warn_" => {
+ let warn_ident = Ident::new("warn", Span::call_site());
+ let (ident, path) = match name {
+ "note" => (ident, parse_quote! { _subdiag::note }),
+ "help" => (ident, parse_quote! { _subdiag::help }),
+ "warn_" => (&warn_ident, parse_quote! { _subdiag::warn }),
+ _ => unreachable!(),
+ };
+ if type_matches_path(&info.ty, &["rustc_span", "Span"]) {
+ Ok(self.add_spanned_subdiagnostic(binding, ident, path))
+ } else if type_is_unit(&info.ty) {
+ Ok(self.add_subdiagnostic(ident, path))
+ } else {
+ report_type_error(attr, "`Span` or `()`")?
+ }
+ }
+ "subdiagnostic" => Ok(quote! { #diag.subdiagnostic(#binding); }),
+ _ => throw_invalid_attr!(attr, &meta, |diag| {
+ diag.help(
+ "only `skip_arg`, `primary_span`, `label`, `note`, `help` and `subdiagnostic` \
+ are valid field attributes",
+ )
+ }),
+ }
+ }
+
+ fn generate_inner_field_code_list(
+ &mut self,
+ attr: &Attribute,
+ info: FieldInfo<'_>,
+ binding: TokenStream,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ let meta = attr.parse_meta()?;
+ let Meta::List(MetaList { ref path, ref nested, .. }) = meta else { unreachable!() };
+
+ let ident = &attr.path.segments.last().unwrap().ident;
+ let name = path.segments.last().unwrap().ident.to_string();
+ let name = name.as_ref();
+ match name {
+ "suggestion" | "suggestion_short" | "suggestion_hidden" | "suggestion_verbose" => {
+ return self.generate_inner_field_code_suggestion(attr, info);
+ }
+ "label" | "help" | "note" | "warn_" => (),
+ _ => throw_invalid_attr!(attr, &meta, |diag| {
+ diag.help(
+ "only `label`, `help`, `note`, `warn` or `suggestion{,_short,_hidden,_verbose}` are \
+ valid field attributes",
+ )
+ }),
+ }
+
+ // For `#[label(..)]`, `#[note(..)]` and `#[help(..)]`, the first nested element must be a
+ // path, e.g. `#[label(typeck::label)]`.
+ let mut nested_iter = nested.into_iter();
+ let msg = match nested_iter.next() {
+ Some(NestedMeta::Meta(Meta::Path(path))) => path.clone(),
+ Some(nested_attr) => throw_invalid_nested_attr!(attr, &nested_attr),
+ None => throw_invalid_attr!(attr, &meta),
+ };
+
+ // None of these attributes should have anything following the slug.
+ if nested_iter.next().is_some() {
+ throw_invalid_attr!(attr, &meta);
+ }
+
+ match name {
+ "label" => {
+ report_error_if_not_applied_to_span(attr, &info)?;
+ Ok(self.add_spanned_subdiagnostic(binding, ident, msg))
+ }
+ "note" | "help" if type_matches_path(&info.ty, &["rustc_span", "Span"]) => {
+ Ok(self.add_spanned_subdiagnostic(binding, ident, msg))
+ }
+ "note" | "help" if type_is_unit(&info.ty) => Ok(self.add_subdiagnostic(ident, msg)),
+ // `warn_` must be special-cased because the attribute `warn` already has meaning and
+ // so isn't used, despite the diagnostic API being named `warn`.
+ "warn_" if type_matches_path(&info.ty, &["rustc_span", "Span"]) => Ok(self
+ .add_spanned_subdiagnostic(binding, &Ident::new("warn", Span::call_site()), msg)),
+ "warn_" if type_is_unit(&info.ty) => {
+ Ok(self.add_subdiagnostic(&Ident::new("warn", Span::call_site()), msg))
+ }
+ "note" | "help" | "warn_" => report_type_error(attr, "`Span` or `()`")?,
+ _ => unreachable!(),
+ }
+ }
+
+ fn generate_inner_field_code_suggestion(
+ &mut self,
+ attr: &Attribute,
+ info: FieldInfo<'_>,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ let diag = &self.diag;
+
+ let mut meta = attr.parse_meta()?;
+ let Meta::List(MetaList { ref path, ref mut nested, .. }) = meta else { unreachable!() };
+
+ let (span_field, mut applicability) = self.span_and_applicability_of_ty(info)?;
+
+ let mut msg = None;
+ let mut code = None;
+
+ let mut nested_iter = nested.into_iter().peekable();
+ if let Some(nested_attr) = nested_iter.peek() {
+ if let NestedMeta::Meta(Meta::Path(path)) = nested_attr {
+ msg = Some(path.clone());
+ }
+ };
+ // Move the iterator forward if a path was found (don't otherwise so that
+ // code/applicability can be found or an error emitted).
+ if msg.is_some() {
+ let _ = nested_iter.next();
+ }
+
+ for nested_attr in nested_iter {
+ let meta = match nested_attr {
+ syn::NestedMeta::Meta(ref meta) => meta,
+ syn::NestedMeta::Lit(_) => throw_invalid_nested_attr!(attr, &nested_attr),
+ };
+
+ let nested_name = meta.path().segments.last().unwrap().ident.to_string();
+ let nested_name = nested_name.as_str();
+ match meta {
+ Meta::NameValue(MetaNameValue { lit: syn::Lit::Str(s), .. }) => {
+ let span = meta.span().unwrap();
+ match nested_name {
+ "code" => {
+ let formatted_str = self.build_format(&s.value(), s.span());
+ code = Some(formatted_str);
+ }
+ "applicability" => {
+ applicability = match applicability {
+ Some(v) => {
+ span_err(
+ span,
+ "applicability cannot be set in both the field and \
+ attribute",
+ )
+ .emit();
+ Some(v)
+ }
+ None => match Applicability::from_str(&s.value()) {
+ Ok(v) => Some(quote! { #v }),
+ Err(()) => {
+ span_err(span, "invalid applicability").emit();
+ None
+ }
+ },
+ }
+ }
+ _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ diag.help(
+ "only `message`, `code` and `applicability` are valid field \
+ attributes",
+ )
+ }),
+ }
+ }
+ _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ if matches!(meta, Meta::Path(_)) {
+ diag.help("a diagnostic slug must be the first argument to the attribute")
+ } else {
+ diag
+ }
+ }),
+ }
+ }
+
+ let applicability =
+ applicability.unwrap_or_else(|| quote!(rustc_errors::Applicability::Unspecified));
+
+ let name = path.segments.last().unwrap().ident.to_string();
+ let method = format_ident!("span_{}", name);
+
+ let msg = msg.unwrap_or_else(|| parse_quote! { _subdiag::suggestion });
+ let msg = quote! { rustc_errors::fluent::#msg };
+ let code = code.unwrap_or_else(|| quote! { String::new() });
+
+ Ok(quote! { #diag.#method(#span_field, #msg, #code, #applicability); })
+ }
+
+ /// Adds a spanned subdiagnostic by generating a `diag.span_$kind` call with the current slug
+ /// and `fluent_attr_identifier`.
+ fn add_spanned_subdiagnostic(
+ &self,
+ field_binding: TokenStream,
+ kind: &Ident,
+ fluent_attr_identifier: Path,
+ ) -> TokenStream {
+ let diag = &self.diag;
+ let fn_name = format_ident!("span_{}", kind);
+ quote! {
+ #diag.#fn_name(
+ #field_binding,
+ rustc_errors::fluent::#fluent_attr_identifier
+ );
+ }
+ }
+
+ /// Adds a subdiagnostic by generating a `diag.span_$kind` call with the current slug
+ /// and `fluent_attr_identifier`.
+ fn add_subdiagnostic(&self, kind: &Ident, fluent_attr_identifier: Path) -> TokenStream {
+ let diag = &self.diag;
+ quote! {
+ #diag.#kind(rustc_errors::fluent::#fluent_attr_identifier);
+ }
+ }
+
+ fn span_and_applicability_of_ty(
+ &self,
+ info: FieldInfo<'_>,
+ ) -> Result<(TokenStream, Option<TokenStream>), DiagnosticDeriveError> {
+ match &info.ty {
+ // If `ty` is `Span` w/out applicability, then use `Applicability::Unspecified`.
+ ty @ Type::Path(..) if type_matches_path(ty, &["rustc_span", "Span"]) => {
+ let binding = &info.binding.binding;
+ Ok((quote!(*#binding), None))
+ }
+ // If `ty` is `(Span, Applicability)` then return tokens accessing those.
+ Type::Tuple(tup) => {
+ let mut span_idx = None;
+ let mut applicability_idx = None;
+
+ for (idx, elem) in tup.elems.iter().enumerate() {
+ if type_matches_path(elem, &["rustc_span", "Span"]) {
+ if span_idx.is_none() {
+ span_idx = Some(syn::Index::from(idx));
+ } else {
+ throw_span_err!(
+ info.span.unwrap(),
+ "type of field annotated with `#[suggestion(...)]` contains more \
+ than one `Span`"
+ );
+ }
+ } else if type_matches_path(elem, &["rustc_errors", "Applicability"]) {
+ if applicability_idx.is_none() {
+ applicability_idx = Some(syn::Index::from(idx));
+ } else {
+ throw_span_err!(
+ info.span.unwrap(),
+ "type of field annotated with `#[suggestion(...)]` contains more \
+ than one Applicability"
+ );
+ }
+ }
+ }
+
+ if let Some(span_idx) = span_idx {
+ let binding = &info.binding.binding;
+ let span = quote!(#binding.#span_idx);
+ let applicability = applicability_idx
+ .map(|applicability_idx| quote!(#binding.#applicability_idx))
+ .unwrap_or_else(|| quote!(rustc_errors::Applicability::Unspecified));
+
+ return Ok((span, Some(applicability)));
+ }
+
+ throw_span_err!(info.span.unwrap(), "wrong types for suggestion", |diag| {
+ diag.help(
+ "`#[suggestion(...)]` on a tuple field must be applied to fields of type \
+ `(Span, Applicability)`",
+ )
+ });
+ }
+ // If `ty` isn't a `Span` or `(Span, Applicability)` then emit an error.
+ _ => throw_span_err!(info.span.unwrap(), "wrong field type for suggestion", |diag| {
+ diag.help(
+ "`#[suggestion(...)]` should be applied to fields of type `Span` or \
+ `(Span, Applicability)`",
+ )
+ }),
+ }
+ }
+}
diff --git a/compiler/rustc_macros/src/diagnostics/error.rs b/compiler/rustc_macros/src/diagnostics/error.rs
new file mode 100644
index 000000000..0b1ededa7
--- /dev/null
+++ b/compiler/rustc_macros/src/diagnostics/error.rs
@@ -0,0 +1,141 @@
+use proc_macro::{Diagnostic, Level, MultiSpan};
+use proc_macro2::TokenStream;
+use quote::quote;
+use syn::{spanned::Spanned, Attribute, Error as SynError, Meta, NestedMeta};
+
+#[derive(Debug)]
+pub(crate) enum DiagnosticDeriveError {
+ SynError(SynError),
+ ErrorHandled,
+}
+
+impl DiagnosticDeriveError {
+ pub(crate) fn to_compile_error(self) -> TokenStream {
+ match self {
+ DiagnosticDeriveError::SynError(e) => e.to_compile_error(),
+ DiagnosticDeriveError::ErrorHandled => {
+ // Return ! to avoid having to create a blank DiagnosticBuilder to return when an
+ // error has already been emitted to the compiler.
+ quote! {
+ { unreachable!(); }
+ }
+ }
+ }
+ }
+}
+
+impl From<SynError> for DiagnosticDeriveError {
+ fn from(e: SynError) -> Self {
+ DiagnosticDeriveError::SynError(e)
+ }
+}
+
+/// Helper function for use with `throw_*` macros - constraints `$f` to an `impl FnOnce`.
+pub(crate) fn _throw_err(
+ diag: Diagnostic,
+ f: impl FnOnce(Diagnostic) -> Diagnostic,
+) -> DiagnosticDeriveError {
+ f(diag).emit();
+ DiagnosticDeriveError::ErrorHandled
+}
+
+/// Helper function for printing `syn::Path` - doesn't handle arguments in paths and these are
+/// unlikely to come up much in use of the macro.
+fn path_to_string(path: &syn::Path) -> String {
+ let mut out = String::new();
+ for (i, segment) in path.segments.iter().enumerate() {
+ if i > 0 || path.leading_colon.is_some() {
+ out.push_str("::");
+ }
+ out.push_str(&segment.ident.to_string());
+ }
+ out
+}
+
+/// Returns an error diagnostic on span `span` with msg `msg`.
+pub(crate) fn span_err(span: impl MultiSpan, msg: &str) -> Diagnostic {
+ Diagnostic::spanned(span, Level::Error, msg)
+}
+
+/// Emit a diagnostic on span `$span` with msg `$msg` (optionally performing additional decoration
+/// using the `FnOnce` passed in `diag`) and return `Err(ErrorHandled)`.
+///
+/// For methods that return a `Result<_, DiagnosticDeriveError>`:
+macro_rules! throw_span_err {
+ ($span:expr, $msg:expr) => {{ throw_span_err!($span, $msg, |diag| diag) }};
+ ($span:expr, $msg:expr, $f:expr) => {{
+ let diag = span_err($span, $msg);
+ return Err(crate::diagnostics::error::_throw_err(diag, $f));
+ }};
+}
+
+pub(crate) use throw_span_err;
+
+/// Returns an error diagnostic for an invalid attribute.
+pub(crate) fn invalid_attr(attr: &Attribute, meta: &Meta) -> Diagnostic {
+ let span = attr.span().unwrap();
+ let path = path_to_string(&attr.path);
+ match meta {
+ Meta::Path(_) => span_err(span, &format!("`#[{}]` is not a valid attribute", path)),
+ Meta::NameValue(_) => {
+ span_err(span, &format!("`#[{} = ...]` is not a valid attribute", path))
+ }
+ Meta::List(_) => span_err(span, &format!("`#[{}(...)]` is not a valid attribute", path)),
+ }
+}
+
+/// Emit a error diagnostic for an invalid attribute (optionally performing additional decoration
+/// using the `FnOnce` passed in `diag`) and return `Err(ErrorHandled)`.
+///
+/// For methods that return a `Result<_, DiagnosticDeriveError>`:
+macro_rules! throw_invalid_attr {
+ ($attr:expr, $meta:expr) => {{ throw_invalid_attr!($attr, $meta, |diag| diag) }};
+ ($attr:expr, $meta:expr, $f:expr) => {{
+ let diag = crate::diagnostics::error::invalid_attr($attr, $meta);
+ return Err(crate::diagnostics::error::_throw_err(diag, $f));
+ }};
+}
+
+pub(crate) use throw_invalid_attr;
+
+/// Returns an error diagnostic for an invalid nested attribute.
+pub(crate) fn invalid_nested_attr(attr: &Attribute, nested: &NestedMeta) -> Diagnostic {
+ let name = attr.path.segments.last().unwrap().ident.to_string();
+ let name = name.as_str();
+
+ let span = nested.span().unwrap();
+ let meta = match nested {
+ syn::NestedMeta::Meta(meta) => meta,
+ syn::NestedMeta::Lit(_) => {
+ return span_err(span, &format!("`#[{}(\"...\")]` is not a valid attribute", name));
+ }
+ };
+
+ let span = meta.span().unwrap();
+ let path = path_to_string(meta.path());
+ match meta {
+ Meta::NameValue(..) => {
+ span_err(span, &format!("`#[{}({} = ...)]` is not a valid attribute", name, path))
+ }
+ Meta::Path(..) => {
+ span_err(span, &format!("`#[{}({})]` is not a valid attribute", name, path))
+ }
+ Meta::List(..) => {
+ span_err(span, &format!("`#[{}({}(...))]` is not a valid attribute", name, path))
+ }
+ }
+}
+
+/// Emit a error diagnostic for an invalid nested attribute (optionally performing additional
+/// decoration using the `FnOnce` passed in `diag`) and return `Err(ErrorHandled)`.
+///
+/// For methods that return a `Result<_, DiagnosticDeriveError>`:
+macro_rules! throw_invalid_nested_attr {
+ ($attr:expr, $nested_attr:expr) => {{ throw_invalid_nested_attr!($attr, $nested_attr, |diag| diag) }};
+ ($attr:expr, $nested_attr:expr, $f:expr) => {{
+ let diag = crate::diagnostics::error::invalid_nested_attr($attr, $nested_attr);
+ return Err(crate::diagnostics::error::_throw_err(diag, $f));
+ }};
+}
+
+pub(crate) use throw_invalid_nested_attr;
diff --git a/compiler/rustc_macros/src/diagnostics/fluent.rs b/compiler/rustc_macros/src/diagnostics/fluent.rs
new file mode 100644
index 000000000..562d5e9f4
--- /dev/null
+++ b/compiler/rustc_macros/src/diagnostics/fluent.rs
@@ -0,0 +1,277 @@
+use annotate_snippets::{
+ display_list::DisplayList,
+ snippet::{Annotation, AnnotationType, Slice, Snippet, SourceAnnotation},
+};
+use fluent_bundle::{FluentBundle, FluentError, FluentResource};
+use fluent_syntax::{
+ ast::{Attribute, Entry, Identifier, Message},
+ parser::ParserError,
+};
+use proc_macro::{Diagnostic, Level, Span};
+use proc_macro2::TokenStream;
+use quote::quote;
+use std::{
+ collections::{HashMap, HashSet},
+ fs::File,
+ io::Read,
+ path::{Path, PathBuf},
+};
+use syn::{
+ parse::{Parse, ParseStream},
+ parse_macro_input,
+ punctuated::Punctuated,
+ token, Ident, LitStr, Result,
+};
+use unic_langid::langid;
+
+struct Resource {
+ ident: Ident,
+ #[allow(dead_code)]
+ fat_arrow_token: token::FatArrow,
+ resource: LitStr,
+}
+
+impl Parse for Resource {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ Ok(Resource {
+ ident: input.parse()?,
+ fat_arrow_token: input.parse()?,
+ resource: input.parse()?,
+ })
+ }
+}
+
+struct Resources(Punctuated<Resource, token::Comma>);
+
+impl Parse for Resources {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ let mut resources = Punctuated::new();
+ loop {
+ if input.is_empty() || input.peek(token::Brace) {
+ break;
+ }
+ let value = input.parse()?;
+ resources.push_value(value);
+ if !input.peek(token::Comma) {
+ break;
+ }
+ let punct = input.parse()?;
+ resources.push_punct(punct);
+ }
+ Ok(Resources(resources))
+ }
+}
+
+/// Helper function for returning an absolute path for macro-invocation relative file paths.
+///
+/// If the input is already absolute, then the input is returned. If the input is not absolute,
+/// then it is appended to the directory containing the source file with this macro invocation.
+fn invocation_relative_path_to_absolute(span: Span, path: &str) -> PathBuf {
+ let path = Path::new(path);
+ if path.is_absolute() {
+ path.to_path_buf()
+ } else {
+ // `/a/b/c/foo/bar.rs` contains the current macro invocation
+ let mut source_file_path = span.source_file().path();
+ // `/a/b/c/foo/`
+ source_file_path.pop();
+ // `/a/b/c/foo/../locales/en-US/example.ftl`
+ source_file_path.push(path);
+ source_file_path
+ }
+}
+
+/// See [rustc_macros::fluent_messages].
+pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+ let resources = parse_macro_input!(input as Resources);
+
+ // Cannot iterate over individual messages in a bundle, so do that using the
+ // `FluentResource` instead. Construct a bundle anyway to find out if there are conflicting
+ // messages in the resources.
+ let mut bundle = FluentBundle::new(vec![langid!("en-US")]);
+
+ // Map of Fluent identifiers to the `Span` of the resource that defined them, used for better
+ // diagnostics.
+ let mut previous_defns = HashMap::new();
+
+ let mut includes = TokenStream::new();
+ let mut generated = TokenStream::new();
+ for res in resources.0 {
+ let ident_span = res.ident.span().unwrap();
+ let path_span = res.resource.span().unwrap();
+
+ // Set of Fluent attribute names already output, to avoid duplicate type errors - any given
+ // constant created for a given attribute is the same.
+ let mut previous_attrs = HashSet::new();
+
+ let relative_ftl_path = res.resource.value();
+ let absolute_ftl_path =
+ invocation_relative_path_to_absolute(ident_span, &relative_ftl_path);
+ // As this macro also outputs an `include_str!` for this file, the macro will always be
+ // re-executed when the file changes.
+ let mut resource_file = match File::open(absolute_ftl_path) {
+ Ok(resource_file) => resource_file,
+ Err(e) => {
+ Diagnostic::spanned(path_span, Level::Error, "could not open Fluent resource")
+ .note(e.to_string())
+ .emit();
+ continue;
+ }
+ };
+ let mut resource_contents = String::new();
+ if let Err(e) = resource_file.read_to_string(&mut resource_contents) {
+ Diagnostic::spanned(path_span, Level::Error, "could not read Fluent resource")
+ .note(e.to_string())
+ .emit();
+ continue;
+ }
+ let resource = match FluentResource::try_new(resource_contents) {
+ Ok(resource) => resource,
+ Err((this, errs)) => {
+ Diagnostic::spanned(path_span, Level::Error, "could not parse Fluent resource")
+ .help("see additional errors emitted")
+ .emit();
+ for ParserError { pos, slice: _, kind } in errs {
+ let mut err = kind.to_string();
+ // Entirely unnecessary string modification so that the error message starts
+ // with a lowercase as rustc errors do.
+ err.replace_range(
+ 0..1,
+ &err.chars().next().unwrap().to_lowercase().to_string(),
+ );
+
+ let line_starts: Vec<usize> = std::iter::once(0)
+ .chain(
+ this.source()
+ .char_indices()
+ .filter_map(|(i, c)| Some(i + 1).filter(|_| c == '\n')),
+ )
+ .collect();
+ let line_start = line_starts
+ .iter()
+ .enumerate()
+ .map(|(line, idx)| (line + 1, idx))
+ .filter(|(_, idx)| **idx <= pos.start)
+ .last()
+ .unwrap()
+ .0;
+
+ let snippet = Snippet {
+ title: Some(Annotation {
+ label: Some(&err),
+ id: None,
+ annotation_type: AnnotationType::Error,
+ }),
+ footer: vec![],
+ slices: vec![Slice {
+ source: this.source(),
+ line_start,
+ origin: Some(&relative_ftl_path),
+ fold: true,
+ annotations: vec![SourceAnnotation {
+ label: "",
+ annotation_type: AnnotationType::Error,
+ range: (pos.start, pos.end - 1),
+ }],
+ }],
+ opt: Default::default(),
+ };
+ let dl = DisplayList::from(snippet);
+ eprintln!("{}\n", dl);
+ }
+ continue;
+ }
+ };
+
+ let mut constants = TokenStream::new();
+ for entry in resource.entries() {
+ let span = res.ident.span();
+ if let Entry::Message(Message { id: Identifier { name }, attributes, .. }) = entry {
+ let _ = previous_defns.entry(name.to_string()).or_insert(ident_span);
+
+ // `typeck-foo-bar` => `foo_bar` (in `typeck.ftl`)
+ // `const-eval-baz` => `baz` (in `const_eval.ftl`)
+ let snake_name = Ident::new(
+ // FIXME: should probably trim prefix, not replace all occurrences
+ &name
+ .replace(&format!("{}-", res.ident).replace('_', "-"), "")
+ .replace('-', "_"),
+ span,
+ );
+ constants.extend(quote! {
+ pub const #snake_name: crate::DiagnosticMessage =
+ crate::DiagnosticMessage::FluentIdentifier(
+ std::borrow::Cow::Borrowed(#name),
+ None
+ );
+ });
+
+ for Attribute { id: Identifier { name: attr_name }, .. } in attributes {
+ let snake_name = Ident::new(&attr_name.replace('-', "_"), span);
+ if !previous_attrs.insert(snake_name.clone()) {
+ continue;
+ }
+
+ constants.extend(quote! {
+ pub const #snake_name: crate::SubdiagnosticMessage =
+ crate::SubdiagnosticMessage::FluentAttr(
+ std::borrow::Cow::Borrowed(#attr_name)
+ );
+ });
+ }
+ }
+ }
+
+ if let Err(errs) = bundle.add_resource(resource) {
+ for e in errs {
+ match e {
+ FluentError::Overriding { kind, id } => {
+ Diagnostic::spanned(
+ ident_span,
+ Level::Error,
+ format!("overrides existing {}: `{}`", kind, id),
+ )
+ .span_help(previous_defns[&id], "previously defined in this resource")
+ .emit();
+ }
+ FluentError::ResolverError(_) | FluentError::ParserError(_) => unreachable!(),
+ }
+ }
+ }
+
+ includes.extend(quote! { include_str!(#relative_ftl_path), });
+
+ let ident = res.ident;
+ generated.extend(quote! {
+ pub mod #ident {
+ #constants
+ }
+ });
+ }
+
+ quote! {
+ #[allow(non_upper_case_globals)]
+ #[doc(hidden)]
+ pub mod fluent_generated {
+ pub static DEFAULT_LOCALE_RESOURCES: &'static [&'static str] = &[
+ #includes
+ ];
+
+ #generated
+
+ pub mod _subdiag {
+ pub const help: crate::SubdiagnosticMessage =
+ crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("help"));
+ pub const note: crate::SubdiagnosticMessage =
+ crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("note"));
+ pub const warn: crate::SubdiagnosticMessage =
+ crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("warn"));
+ pub const label: crate::SubdiagnosticMessage =
+ crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("label"));
+ pub const suggestion: crate::SubdiagnosticMessage =
+ crate::SubdiagnosticMessage::FluentAttr(std::borrow::Cow::Borrowed("suggestion"));
+ }
+ }
+ }
+ .into()
+}
diff --git a/compiler/rustc_macros/src/diagnostics/mod.rs b/compiler/rustc_macros/src/diagnostics/mod.rs
new file mode 100644
index 000000000..399790026
--- /dev/null
+++ b/compiler/rustc_macros/src/diagnostics/mod.rs
@@ -0,0 +1,159 @@
+mod diagnostic;
+mod diagnostic_builder;
+mod error;
+mod fluent;
+mod subdiagnostic;
+mod utils;
+
+use diagnostic::{LintDiagnosticDerive, SessionDiagnosticDerive};
+pub(crate) use fluent::fluent_messages;
+use proc_macro2::TokenStream;
+use quote::format_ident;
+use subdiagnostic::SessionSubdiagnosticDerive;
+use synstructure::Structure;
+
+/// Implements `#[derive(SessionDiagnostic)]`, which allows for errors to be specified as a struct,
+/// independent from the actual diagnostics emitting code.
+///
+/// ```ignore (rust)
+/// # extern crate rustc_errors;
+/// # use rustc_errors::Applicability;
+/// # extern crate rustc_span;
+/// # use rustc_span::{symbol::Ident, Span};
+/// # extern crate rust_middle;
+/// # use rustc_middle::ty::Ty;
+/// #[derive(SessionDiagnostic)]
+/// #[error(borrowck::move_out_of_borrow, code = "E0505")]
+/// pub struct MoveOutOfBorrowError<'tcx> {
+/// pub name: Ident,
+/// pub ty: Ty<'tcx>,
+/// #[primary_span]
+/// #[label]
+/// pub span: Span,
+/// #[label(borrowck::first_borrow_label)]
+/// pub first_borrow_span: Span,
+/// #[suggestion(code = "{name}.clone()")]
+/// pub clone_sugg: Option<(Span, Applicability)>
+/// }
+/// ```
+///
+/// ```fluent
+/// move-out-of-borrow = cannot move out of {$name} because it is borrowed
+/// .label = cannot move out of borrow
+/// .first-borrow-label = `{$ty}` first borrowed here
+/// .suggestion = consider cloning here
+/// ```
+///
+/// Then, later, to emit the error:
+///
+/// ```ignore (rust)
+/// sess.emit_err(MoveOutOfBorrowError {
+/// expected,
+/// actual,
+/// span,
+/// first_borrow_span,
+/// clone_sugg: Some(suggestion, Applicability::MachineApplicable),
+/// });
+/// ```
+///
+/// See rustc dev guide for more examples on using the `#[derive(SessionDiagnostic)]`:
+/// <https://rustc-dev-guide.rust-lang.org/diagnostics/diagnostic-structs.html>
+pub fn session_diagnostic_derive(s: Structure<'_>) -> TokenStream {
+ SessionDiagnosticDerive::new(format_ident!("diag"), format_ident!("sess"), s).into_tokens()
+}
+
+/// Implements `#[derive(LintDiagnostic)]`, which allows for lints to be specified as a struct,
+/// independent from the actual lint emitting code.
+///
+/// ```ignore (rust)
+/// #[derive(LintDiagnostic)]
+/// #[lint(lint::atomic_ordering_invalid_fail_success)]
+/// pub struct AtomicOrderingInvalidLint {
+/// method: Symbol,
+/// success_ordering: Symbol,
+/// fail_ordering: Symbol,
+/// #[label(lint::fail_label)]
+/// fail_order_arg_span: Span,
+/// #[label(lint::success_label)]
+/// #[suggestion(
+/// code = "std::sync::atomic::Ordering::{success_suggestion}",
+/// applicability = "maybe-incorrect"
+/// )]
+/// success_order_arg_span: Span,
+/// }
+/// ```
+///
+/// ```fluent
+/// lint-atomic-ordering-invalid-fail-success = `{$method}`'s success ordering must be at least as strong as its failure ordering
+/// .fail-label = `{$fail_ordering}` failure ordering
+/// .success-label = `{$success_ordering}` success ordering
+/// .suggestion = consider using `{$success_suggestion}` success ordering instead
+/// ```
+///
+/// Then, later, to emit the error:
+///
+/// ```ignore (rust)
+/// cx.struct_span_lint(INVALID_ATOMIC_ORDERING, fail_order_arg_span, AtomicOrderingInvalidLint {
+/// method,
+/// success_ordering,
+/// fail_ordering,
+/// fail_order_arg_span,
+/// success_order_arg_span,
+/// });
+/// ```
+///
+/// See rustc dev guide for more examples on using the `#[derive(LintDiagnostic)]`:
+/// <https://rustc-dev-guide.rust-lang.org/diagnostics/sessiondiagnostic.html>
+pub fn lint_diagnostic_derive(s: Structure<'_>) -> TokenStream {
+ LintDiagnosticDerive::new(format_ident!("diag"), s).into_tokens()
+}
+
+/// Implements `#[derive(SessionSubdiagnostic)]`, which allows for labels, notes, helps and
+/// suggestions to be specified as a structs or enums, independent from the actual diagnostics
+/// emitting code or diagnostic derives.
+///
+/// ```ignore (rust)
+/// #[derive(SessionSubdiagnostic)]
+/// pub enum ExpectedIdentifierLabel<'tcx> {
+/// #[label(parser::expected_identifier)]
+/// WithoutFound {
+/// #[primary_span]
+/// span: Span,
+/// }
+/// #[label(parser::expected_identifier_found)]
+/// WithFound {
+/// #[primary_span]
+/// span: Span,
+/// found: String,
+/// }
+/// }
+///
+/// #[derive(SessionSubdiagnostic)]
+/// #[suggestion_verbose(parser::raw_identifier)]
+/// pub struct RawIdentifierSuggestion<'tcx> {
+/// #[primary_span]
+/// span: Span,
+/// #[applicability]
+/// applicability: Applicability,
+/// ident: Ident,
+/// }
+/// ```
+///
+/// ```fluent
+/// parser-expected-identifier = expected identifier
+///
+/// parser-expected-identifier-found = expected identifier, found {$found}
+///
+/// parser-raw-identifier = escape `{$ident}` to use it as an identifier
+/// ```
+///
+/// Then, later, to add the subdiagnostic:
+///
+/// ```ignore (rust)
+/// diag.subdiagnostic(ExpectedIdentifierLabel::WithoutFound { span });
+///
+/// diag.subdiagnostic(RawIdentifierSuggestion { span, applicability, ident });
+/// ```
+pub fn session_subdiagnostic_derive(s: Structure<'_>) -> TokenStream {
+ SessionSubdiagnosticDerive::new(s).into_tokens()
+}
diff --git a/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs b/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
new file mode 100644
index 000000000..edf4dbed9
--- /dev/null
+++ b/compiler/rustc_macros/src/diagnostics/subdiagnostic.rs
@@ -0,0 +1,491 @@
+#![deny(unused_must_use)]
+
+use crate::diagnostics::error::{
+ span_err, throw_invalid_attr, throw_invalid_nested_attr, throw_span_err, DiagnosticDeriveError,
+};
+use crate::diagnostics::utils::{
+ report_error_if_not_applied_to_applicability, report_error_if_not_applied_to_span,
+ Applicability, FieldInfo, FieldInnerTy, HasFieldMap, SetOnce,
+};
+use proc_macro2::TokenStream;
+use quote::{format_ident, quote};
+use std::collections::HashMap;
+use std::fmt;
+use std::str::FromStr;
+use syn::{parse_quote, spanned::Spanned, Meta, MetaList, MetaNameValue, NestedMeta, Path};
+use synstructure::{BindingInfo, Structure, VariantInfo};
+
+/// Which kind of suggestion is being created?
+#[derive(Clone, Copy)]
+enum SubdiagnosticSuggestionKind {
+ /// `#[suggestion]`
+ Normal,
+ /// `#[suggestion_short]`
+ Short,
+ /// `#[suggestion_hidden]`
+ Hidden,
+ /// `#[suggestion_verbose]`
+ Verbose,
+}
+
+/// Which kind of subdiagnostic is being created from a variant?
+#[derive(Clone, Copy)]
+enum SubdiagnosticKind {
+ /// `#[label(...)]`
+ Label,
+ /// `#[note(...)]`
+ Note,
+ /// `#[help(...)]`
+ Help,
+ /// `#[warn_(...)]`
+ Warn,
+ /// `#[suggestion{,_short,_hidden,_verbose}]`
+ Suggestion(SubdiagnosticSuggestionKind),
+}
+
+impl FromStr for SubdiagnosticKind {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "label" => Ok(SubdiagnosticKind::Label),
+ "note" => Ok(SubdiagnosticKind::Note),
+ "help" => Ok(SubdiagnosticKind::Help),
+ "warn_" => Ok(SubdiagnosticKind::Warn),
+ "suggestion" => Ok(SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Normal)),
+ "suggestion_short" => {
+ Ok(SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Short))
+ }
+ "suggestion_hidden" => {
+ Ok(SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Hidden))
+ }
+ "suggestion_verbose" => {
+ Ok(SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Verbose))
+ }
+ _ => Err(()),
+ }
+ }
+}
+
+impl quote::IdentFragment for SubdiagnosticKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ SubdiagnosticKind::Label => write!(f, "label"),
+ SubdiagnosticKind::Note => write!(f, "note"),
+ SubdiagnosticKind::Help => write!(f, "help"),
+ SubdiagnosticKind::Warn => write!(f, "warn"),
+ SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Normal) => {
+ write!(f, "suggestion")
+ }
+ SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Short) => {
+ write!(f, "suggestion_short")
+ }
+ SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Hidden) => {
+ write!(f, "suggestion_hidden")
+ }
+ SubdiagnosticKind::Suggestion(SubdiagnosticSuggestionKind::Verbose) => {
+ write!(f, "suggestion_verbose")
+ }
+ }
+ }
+
+ fn span(&self) -> Option<proc_macro2::Span> {
+ None
+ }
+}
+
+/// The central struct for constructing the `add_to_diagnostic` method from an annotated struct.
+pub(crate) struct SessionSubdiagnosticDerive<'a> {
+ structure: Structure<'a>,
+ diag: syn::Ident,
+}
+
+impl<'a> SessionSubdiagnosticDerive<'a> {
+ pub(crate) fn new(structure: Structure<'a>) -> Self {
+ let diag = format_ident!("diag");
+ Self { structure, diag }
+ }
+
+ pub(crate) fn into_tokens(self) -> TokenStream {
+ let SessionSubdiagnosticDerive { mut structure, diag } = self;
+ let implementation = {
+ let ast = structure.ast();
+ let span = ast.span().unwrap();
+ match ast.data {
+ syn::Data::Struct(..) | syn::Data::Enum(..) => (),
+ syn::Data::Union(..) => {
+ span_err(
+ span,
+ "`#[derive(SessionSubdiagnostic)]` can only be used on structs and enums",
+ );
+ }
+ }
+
+ if matches!(ast.data, syn::Data::Enum(..)) {
+ for attr in &ast.attrs {
+ span_err(
+ attr.span().unwrap(),
+ "unsupported type attribute for subdiagnostic enum",
+ )
+ .emit();
+ }
+ }
+
+ structure.bind_with(|_| synstructure::BindStyle::Move);
+ let variants_ = structure.each_variant(|variant| {
+ // Build the mapping of field names to fields. This allows attributes to peek
+ // values from other fields.
+ let mut fields_map = HashMap::new();
+ for binding in variant.bindings() {
+ let field = binding.ast();
+ if let Some(ident) = &field.ident {
+ fields_map.insert(ident.to_string(), quote! { #binding });
+ }
+ }
+
+ let mut builder = SessionSubdiagnosticDeriveBuilder {
+ diag: &diag,
+ variant,
+ span,
+ fields: fields_map,
+ kind: None,
+ slug: None,
+ code: None,
+ span_field: None,
+ applicability: None,
+ };
+ builder.into_tokens().unwrap_or_else(|v| v.to_compile_error())
+ });
+
+ quote! {
+ match self {
+ #variants_
+ }
+ }
+ };
+
+ let ret = structure.gen_impl(quote! {
+ gen impl rustc_errors::AddSubdiagnostic for @Self {
+ fn add_to_diagnostic(self, #diag: &mut rustc_errors::Diagnostic) {
+ use rustc_errors::{Applicability, IntoDiagnosticArg};
+ #implementation
+ }
+ }
+ });
+ ret
+ }
+}
+
+/// Tracks persistent information required for building up the call to add to the diagnostic
+/// for the final generated method. This is a separate struct to `SessionSubdiagnosticDerive`
+/// only to be able to destructure and split `self.builder` and the `self.structure` up to avoid a
+/// double mut borrow later on.
+struct SessionSubdiagnosticDeriveBuilder<'a> {
+ /// The identifier to use for the generated `DiagnosticBuilder` instance.
+ diag: &'a syn::Ident,
+
+ /// Info for the current variant (or the type if not an enum).
+ variant: &'a VariantInfo<'a>,
+ /// Span for the entire type.
+ span: proc_macro::Span,
+
+ /// Store a map of field name to its corresponding field. This is built on construction of the
+ /// derive builder.
+ fields: HashMap<String, TokenStream>,
+
+ /// Subdiagnostic kind of the type/variant.
+ kind: Option<(SubdiagnosticKind, proc_macro::Span)>,
+
+ /// Slug of the subdiagnostic - corresponds to the Fluent identifier for the message - from the
+ /// `#[kind(slug)]` attribute on the type or variant.
+ slug: Option<(Path, proc_macro::Span)>,
+ /// If a suggestion, the code to suggest as a replacement - from the `#[kind(code = "...")]`
+ /// attribute on the type or variant.
+ code: Option<(TokenStream, proc_macro::Span)>,
+
+ /// Identifier for the binding to the `#[primary_span]` field.
+ span_field: Option<(proc_macro2::Ident, proc_macro::Span)>,
+ /// If a suggestion, the identifier for the binding to the `#[applicability]` field or a
+ /// `rustc_errors::Applicability::*` variant directly.
+ applicability: Option<(TokenStream, proc_macro::Span)>,
+}
+
+impl<'a> HasFieldMap for SessionSubdiagnosticDeriveBuilder<'a> {
+ fn get_field_binding(&self, field: &String) -> Option<&TokenStream> {
+ self.fields.get(field)
+ }
+}
+
+impl<'a> SessionSubdiagnosticDeriveBuilder<'a> {
+ fn identify_kind(&mut self) -> Result<(), DiagnosticDeriveError> {
+ for attr in self.variant.ast().attrs {
+ let span = attr.span().unwrap();
+
+ let name = attr.path.segments.last().unwrap().ident.to_string();
+ let name = name.as_str();
+
+ let meta = attr.parse_meta()?;
+ let kind = match meta {
+ Meta::List(MetaList { ref nested, .. }) => {
+ let mut nested_iter = nested.into_iter();
+ if let Some(nested_attr) = nested_iter.next() {
+ match nested_attr {
+ NestedMeta::Meta(Meta::Path(path)) => {
+ self.slug.set_once((path.clone(), span));
+ }
+ NestedMeta::Meta(meta @ Meta::NameValue(_))
+ if matches!(
+ meta.path().segments.last().unwrap().ident.to_string().as_str(),
+ "code" | "applicability"
+ ) =>
+ {
+ // don't error for valid follow-up attributes
+ }
+ nested_attr => {
+ throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ diag.help(
+ "first argument of the attribute should be the diagnostic \
+ slug",
+ )
+ })
+ }
+ };
+ }
+
+ for nested_attr in nested_iter {
+ let meta = match nested_attr {
+ NestedMeta::Meta(ref meta) => meta,
+ _ => throw_invalid_nested_attr!(attr, &nested_attr),
+ };
+
+ let span = meta.span().unwrap();
+ let nested_name = meta.path().segments.last().unwrap().ident.to_string();
+ let nested_name = nested_name.as_str();
+
+ match meta {
+ Meta::NameValue(MetaNameValue { lit: syn::Lit::Str(s), .. }) => {
+ match nested_name {
+ "code" => {
+ let formatted_str = self.build_format(&s.value(), s.span());
+ self.code.set_once((formatted_str, span));
+ }
+ "applicability" => {
+ let value = match Applicability::from_str(&s.value()) {
+ Ok(v) => v,
+ Err(()) => {
+ span_err(span, "invalid applicability").emit();
+ Applicability::Unspecified
+ }
+ };
+ self.applicability.set_once((quote! { #value }, span));
+ }
+ _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ diag.help(
+ "only `code` and `applicability` are valid nested \
+ attributes",
+ )
+ }),
+ }
+ }
+ _ => throw_invalid_nested_attr!(attr, &nested_attr, |diag| {
+ if matches!(meta, Meta::Path(_)) {
+ diag.help(
+ "a diagnostic slug must be the first argument to the \
+ attribute",
+ )
+ } else {
+ diag
+ }
+ }),
+ }
+ }
+
+ let Ok(kind) = SubdiagnosticKind::from_str(name) else {
+ throw_invalid_attr!(attr, &meta)
+ };
+
+ kind
+ }
+ _ => throw_invalid_attr!(attr, &meta),
+ };
+
+ if matches!(
+ kind,
+ SubdiagnosticKind::Label | SubdiagnosticKind::Help | SubdiagnosticKind::Note
+ ) && self.code.is_some()
+ {
+ throw_span_err!(
+ span,
+ &format!("`code` is not a valid nested attribute of a `{}` attribute", name)
+ );
+ }
+
+ if matches!(
+ kind,
+ SubdiagnosticKind::Label | SubdiagnosticKind::Help | SubdiagnosticKind::Note
+ ) && self.applicability.is_some()
+ {
+ throw_span_err!(
+ span,
+ &format!(
+ "`applicability` is not a valid nested attribute of a `{}` attribute",
+ name
+ )
+ );
+ }
+
+ if self.slug.is_none() {
+ throw_span_err!(
+ span,
+ &format!(
+ "diagnostic slug must be first argument of a `#[{}(...)]` attribute",
+ name
+ )
+ );
+ }
+
+ self.kind.set_once((kind, span));
+ }
+
+ Ok(())
+ }
+
+ fn generate_field_code(
+ &mut self,
+ binding: &BindingInfo<'_>,
+ is_suggestion: bool,
+ ) -> Result<TokenStream, DiagnosticDeriveError> {
+ let ast = binding.ast();
+
+ let inner_ty = FieldInnerTy::from_type(&ast.ty);
+ let info = FieldInfo {
+ binding: binding,
+ ty: inner_ty.inner_type().unwrap_or(&ast.ty),
+ span: &ast.span(),
+ };
+
+ for attr in &ast.attrs {
+ let name = attr.path.segments.last().unwrap().ident.to_string();
+ let name = name.as_str();
+ let span = attr.span().unwrap();
+
+ let meta = attr.parse_meta()?;
+ match meta {
+ Meta::Path(_) => match name {
+ "primary_span" => {
+ report_error_if_not_applied_to_span(attr, &info)?;
+ self.span_field.set_once((binding.binding.clone(), span));
+ return Ok(quote! {});
+ }
+ "applicability" if is_suggestion => {
+ report_error_if_not_applied_to_applicability(attr, &info)?;
+ let binding = binding.binding.clone();
+ self.applicability.set_once((quote! { #binding }, span));
+ return Ok(quote! {});
+ }
+ "applicability" => {
+ span_err(span, "`#[applicability]` is only valid on suggestions").emit();
+ return Ok(quote! {});
+ }
+ "skip_arg" => {
+ return Ok(quote! {});
+ }
+ _ => throw_invalid_attr!(attr, &meta, |diag| {
+ diag.help(
+ "only `primary_span`, `applicability` and `skip_arg` are valid field \
+ attributes",
+ )
+ }),
+ },
+ _ => throw_invalid_attr!(attr, &meta),
+ }
+ }
+
+ let ident = ast.ident.as_ref().unwrap();
+
+ let diag = &self.diag;
+ let generated = quote! {
+ #diag.set_arg(
+ stringify!(#ident),
+ #binding
+ );
+ };
+
+ Ok(inner_ty.with(binding, generated))
+ }
+
+ fn into_tokens(&mut self) -> Result<TokenStream, DiagnosticDeriveError> {
+ self.identify_kind()?;
+ let Some(kind) = self.kind.map(|(kind, _)| kind) else {
+ throw_span_err!(
+ self.variant.ast().ident.span().unwrap(),
+ "subdiagnostic kind not specified"
+ );
+ };
+
+ let is_suggestion = matches!(kind, SubdiagnosticKind::Suggestion(_));
+
+ let mut args = TokenStream::new();
+ for binding in self.variant.bindings() {
+ let arg = self
+ .generate_field_code(binding, is_suggestion)
+ .unwrap_or_else(|v| v.to_compile_error());
+ args.extend(arg);
+ }
+
+ // Missing slug errors will already have been reported.
+ let slug = self
+ .slug
+ .as_ref()
+ .map(|(slug, _)| slug.clone())
+ .unwrap_or_else(|| parse_quote! { you::need::to::specify::a::slug });
+ let code = match self.code.as_ref() {
+ Some((code, _)) => Some(quote! { #code }),
+ None if is_suggestion => {
+ span_err(self.span, "suggestion without `code = \"...\"`").emit();
+ Some(quote! { /* macro error */ "..." })
+ }
+ None => None,
+ };
+
+ let span_field = self.span_field.as_ref().map(|(span, _)| span);
+ let applicability = match self.applicability.clone() {
+ Some((applicability, _)) => Some(applicability),
+ None if is_suggestion => {
+ span_err(self.span, "suggestion without `applicability`").emit();
+ Some(quote! { rustc_errors::Applicability::Unspecified })
+ }
+ None => None,
+ };
+
+ let diag = &self.diag;
+ let name = format_ident!("{}{}", if span_field.is_some() { "span_" } else { "" }, kind);
+ let message = quote! { rustc_errors::fluent::#slug };
+ let call = if matches!(kind, SubdiagnosticKind::Suggestion(..)) {
+ if let Some(span) = span_field {
+ quote! { #diag.#name(#span, #message, #code, #applicability); }
+ } else {
+ span_err(self.span, "suggestion without `#[primary_span]` field").emit();
+ quote! { unreachable!(); }
+ }
+ } else if matches!(kind, SubdiagnosticKind::Label) {
+ if let Some(span) = span_field {
+ quote! { #diag.#name(#span, #message); }
+ } else {
+ span_err(self.span, "label without `#[primary_span]` field").emit();
+ quote! { unreachable!(); }
+ }
+ } else {
+ if let Some(span) = span_field {
+ quote! { #diag.#name(#span, #message); }
+ } else {
+ quote! { #diag.#name(#message); }
+ }
+ };
+
+ Ok(quote! {
+ #call
+ #args
+ })
+ }
+}
diff --git a/compiler/rustc_macros/src/diagnostics/utils.rs b/compiler/rustc_macros/src/diagnostics/utils.rs
new file mode 100644
index 000000000..002abb152
--- /dev/null
+++ b/compiler/rustc_macros/src/diagnostics/utils.rs
@@ -0,0 +1,356 @@
+use crate::diagnostics::error::{span_err, throw_span_err, DiagnosticDeriveError};
+use proc_macro::Span;
+use proc_macro2::TokenStream;
+use quote::{format_ident, quote, ToTokens};
+use std::collections::{BTreeSet, HashMap};
+use std::str::FromStr;
+use syn::{spanned::Spanned, Attribute, Meta, Type, TypeTuple};
+use synstructure::{BindingInfo, Structure};
+
+/// Checks whether the type name of `ty` matches `name`.
+///
+/// Given some struct at `a::b::c::Foo`, this will return true for `c::Foo`, `b::c::Foo`, or
+/// `a::b::c::Foo`. This reasonably allows qualified names to be used in the macro.
+pub(crate) fn type_matches_path(ty: &Type, name: &[&str]) -> bool {
+ if let Type::Path(ty) = ty {
+ ty.path
+ .segments
+ .iter()
+ .map(|s| s.ident.to_string())
+ .rev()
+ .zip(name.iter().rev())
+ .all(|(x, y)| &x.as_str() == y)
+ } else {
+ false
+ }
+}
+
+/// Checks whether the type `ty` is `()`.
+pub(crate) fn type_is_unit(ty: &Type) -> bool {
+ if let Type::Tuple(TypeTuple { elems, .. }) = ty { elems.is_empty() } else { false }
+}
+
+/// Reports a type error for field with `attr`.
+pub(crate) fn report_type_error(
+ attr: &Attribute,
+ ty_name: &str,
+) -> Result<!, DiagnosticDeriveError> {
+ let name = attr.path.segments.last().unwrap().ident.to_string();
+ let meta = attr.parse_meta()?;
+
+ throw_span_err!(
+ attr.span().unwrap(),
+ &format!(
+ "the `#[{}{}]` attribute can only be applied to fields of type {}",
+ name,
+ match meta {
+ Meta::Path(_) => "",
+ Meta::NameValue(_) => " = ...",
+ Meta::List(_) => "(...)",
+ },
+ ty_name
+ )
+ );
+}
+
+/// Reports an error if the field's type does not match `path`.
+fn report_error_if_not_applied_to_ty(
+ attr: &Attribute,
+ info: &FieldInfo<'_>,
+ path: &[&str],
+ ty_name: &str,
+) -> Result<(), DiagnosticDeriveError> {
+ if !type_matches_path(&info.ty, path) {
+ report_type_error(attr, ty_name)?;
+ }
+
+ Ok(())
+}
+
+/// Reports an error if the field's type is not `Applicability`.
+pub(crate) fn report_error_if_not_applied_to_applicability(
+ attr: &Attribute,
+ info: &FieldInfo<'_>,
+) -> Result<(), DiagnosticDeriveError> {
+ report_error_if_not_applied_to_ty(
+ attr,
+ info,
+ &["rustc_errors", "Applicability"],
+ "`Applicability`",
+ )
+}
+
+/// Reports an error if the field's type is not `Span`.
+pub(crate) fn report_error_if_not_applied_to_span(
+ attr: &Attribute,
+ info: &FieldInfo<'_>,
+) -> Result<(), DiagnosticDeriveError> {
+ if !type_matches_path(&info.ty, &["rustc_span", "Span"])
+ && !type_matches_path(&info.ty, &["rustc_errors", "MultiSpan"])
+ {
+ report_type_error(attr, "`Span` or `MultiSpan`")?;
+ }
+
+ Ok(())
+}
+
+/// Inner type of a field and type of wrapper.
+pub(crate) enum FieldInnerTy<'ty> {
+ /// Field is wrapped in a `Option<$inner>`.
+ Option(&'ty Type),
+ /// Field is wrapped in a `Vec<$inner>`.
+ Vec(&'ty Type),
+ /// Field isn't wrapped in an outer type.
+ None,
+}
+
+impl<'ty> FieldInnerTy<'ty> {
+ /// Returns inner type for a field, if there is one.
+ ///
+ /// - If `ty` is an `Option`, returns `FieldInnerTy::Option { inner: (inner type) }`.
+ /// - If `ty` is a `Vec`, returns `FieldInnerTy::Vec { inner: (inner type) }`.
+ /// - Otherwise returns `None`.
+ pub(crate) fn from_type(ty: &'ty Type) -> Self {
+ let variant: &dyn Fn(&'ty Type) -> FieldInnerTy<'ty> =
+ if type_matches_path(ty, &["std", "option", "Option"]) {
+ &FieldInnerTy::Option
+ } else if type_matches_path(ty, &["std", "vec", "Vec"]) {
+ &FieldInnerTy::Vec
+ } else {
+ return FieldInnerTy::None;
+ };
+
+ if let Type::Path(ty_path) = ty {
+ let path = &ty_path.path;
+ let ty = path.segments.iter().last().unwrap();
+ if let syn::PathArguments::AngleBracketed(bracketed) = &ty.arguments {
+ if bracketed.args.len() == 1 {
+ if let syn::GenericArgument::Type(ty) = &bracketed.args[0] {
+ return variant(ty);
+ }
+ }
+ }
+ }
+
+ unreachable!();
+ }
+
+ /// Returns `Option` containing inner type if there is one.
+ pub(crate) fn inner_type(&self) -> Option<&'ty Type> {
+ match self {
+ FieldInnerTy::Option(inner) | FieldInnerTy::Vec(inner) => Some(inner),
+ FieldInnerTy::None => None,
+ }
+ }
+
+ /// Surrounds `inner` with destructured wrapper type, exposing inner type as `binding`.
+ pub(crate) fn with(&self, binding: impl ToTokens, inner: impl ToTokens) -> TokenStream {
+ match self {
+ FieldInnerTy::Option(..) => quote! {
+ if let Some(#binding) = #binding {
+ #inner
+ }
+ },
+ FieldInnerTy::Vec(..) => quote! {
+ for #binding in #binding {
+ #inner
+ }
+ },
+ FieldInnerTy::None => quote! { #inner },
+ }
+ }
+}
+
+/// Field information passed to the builder. Deliberately omits attrs to discourage the
+/// `generate_*` methods from walking the attributes themselves.
+pub(crate) struct FieldInfo<'a> {
+ pub(crate) binding: &'a BindingInfo<'a>,
+ pub(crate) ty: &'a Type,
+ pub(crate) span: &'a proc_macro2::Span,
+}
+
+/// Small helper trait for abstracting over `Option` fields that contain a value and a `Span`
+/// for error reporting if they are set more than once.
+pub(crate) trait SetOnce<T> {
+ fn set_once(&mut self, _: (T, Span));
+
+ fn value(self) -> Option<T>;
+}
+
+impl<T> SetOnce<T> for Option<(T, Span)> {
+ fn set_once(&mut self, (value, span): (T, Span)) {
+ match self {
+ None => {
+ *self = Some((value, span));
+ }
+ Some((_, prev_span)) => {
+ span_err(span, "specified multiple times")
+ .span_note(*prev_span, "previously specified here")
+ .emit();
+ }
+ }
+ }
+
+ fn value(self) -> Option<T> {
+ self.map(|(v, _)| v)
+ }
+}
+
+pub(crate) trait HasFieldMap {
+ /// Returns the binding for the field with the given name, if it exists on the type.
+ fn get_field_binding(&self, field: &String) -> Option<&TokenStream>;
+
+ /// In the strings in the attributes supplied to this macro, we want callers to be able to
+ /// reference fields in the format string. For example:
+ ///
+ /// ```ignore (not-usage-example)
+ /// /// Suggest `==` when users wrote `===`.
+ /// #[suggestion(slug = "parser-not-javascript-eq", code = "{lhs} == {rhs}")]
+ /// struct NotJavaScriptEq {
+ /// #[primary_span]
+ /// span: Span,
+ /// lhs: Ident,
+ /// rhs: Ident,
+ /// }
+ /// ```
+ ///
+ /// We want to automatically pick up that `{lhs}` refers `self.lhs` and `{rhs}` refers to
+ /// `self.rhs`, then generate this call to `format!`:
+ ///
+ /// ```ignore (not-usage-example)
+ /// format!("{lhs} == {rhs}", lhs = self.lhs, rhs = self.rhs)
+ /// ```
+ ///
+ /// This function builds the entire call to `format!`.
+ fn build_format(&self, input: &str, span: proc_macro2::Span) -> TokenStream {
+ // This set is used later to generate the final format string. To keep builds reproducible,
+ // the iteration order needs to be deterministic, hence why we use a `BTreeSet` here
+ // instead of a `HashSet`.
+ let mut referenced_fields: BTreeSet<String> = BTreeSet::new();
+
+ // At this point, we can start parsing the format string.
+ let mut it = input.chars().peekable();
+
+ // Once the start of a format string has been found, process the format string and spit out
+ // the referenced fields. Leaves `it` sitting on the closing brace of the format string, so
+ // the next call to `it.next()` retrieves the next character.
+ while let Some(c) = it.next() {
+ if c == '{' && *it.peek().unwrap_or(&'\0') != '{' {
+ let mut eat_argument = || -> Option<String> {
+ let mut result = String::new();
+ // Format specifiers look like:
+ //
+ // format := '{' [ argument ] [ ':' format_spec ] '}' .
+ //
+ // Therefore, we only need to eat until ':' or '}' to find the argument.
+ while let Some(c) = it.next() {
+ result.push(c);
+ let next = *it.peek().unwrap_or(&'\0');
+ if next == '}' {
+ break;
+ } else if next == ':' {
+ // Eat the ':' character.
+ assert_eq!(it.next().unwrap(), ':');
+ break;
+ }
+ }
+ // Eat until (and including) the matching '}'
+ while it.next()? != '}' {
+ continue;
+ }
+ Some(result)
+ };
+
+ if let Some(referenced_field) = eat_argument() {
+ referenced_fields.insert(referenced_field);
+ }
+ }
+ }
+
+ // At this point, `referenced_fields` contains a set of the unique fields that were
+ // referenced in the format string. Generate the corresponding "x = self.x" format
+ // string parameters:
+ let args = referenced_fields.into_iter().map(|field: String| {
+ let field_ident = format_ident!("{}", field);
+ let value = match self.get_field_binding(&field) {
+ Some(value) => value.clone(),
+ // This field doesn't exist. Emit a diagnostic.
+ None => {
+ span_err(
+ span.unwrap(),
+ &format!("`{}` doesn't refer to a field on this type", field),
+ )
+ .emit();
+ quote! {
+ "{#field}"
+ }
+ }
+ };
+ quote! {
+ #field_ident = #value
+ }
+ });
+ quote! {
+ format!(#input #(,#args)*)
+ }
+ }
+}
+
+/// `Applicability` of a suggestion - mirrors `rustc_errors::Applicability` - and used to represent
+/// the user's selection of applicability if specified in an attribute.
+pub(crate) enum Applicability {
+ MachineApplicable,
+ MaybeIncorrect,
+ HasPlaceholders,
+ Unspecified,
+}
+
+impl FromStr for Applicability {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "machine-applicable" => Ok(Applicability::MachineApplicable),
+ "maybe-incorrect" => Ok(Applicability::MaybeIncorrect),
+ "has-placeholders" => Ok(Applicability::HasPlaceholders),
+ "unspecified" => Ok(Applicability::Unspecified),
+ _ => Err(()),
+ }
+ }
+}
+
+impl quote::ToTokens for Applicability {
+ fn to_tokens(&self, tokens: &mut TokenStream) {
+ tokens.extend(match self {
+ Applicability::MachineApplicable => {
+ quote! { rustc_errors::Applicability::MachineApplicable }
+ }
+ Applicability::MaybeIncorrect => {
+ quote! { rustc_errors::Applicability::MaybeIncorrect }
+ }
+ Applicability::HasPlaceholders => {
+ quote! { rustc_errors::Applicability::HasPlaceholders }
+ }
+ Applicability::Unspecified => {
+ quote! { rustc_errors::Applicability::Unspecified }
+ }
+ });
+ }
+}
+
+/// Build the mapping of field names to fields. This allows attributes to peek values from
+/// other fields.
+pub(crate) fn build_field_mapping<'a>(structure: &Structure<'a>) -> HashMap<String, TokenStream> {
+ let mut fields_map = HashMap::new();
+
+ let ast = structure.ast();
+ if let syn::Data::Struct(syn::DataStruct { fields, .. }) = &ast.data {
+ for field in fields.iter() {
+ if let Some(ident) = &field.ident {
+ fields_map.insert(ident.to_string(), quote! { &self.#ident });
+ }
+ }
+ }
+
+ fields_map
+}
diff --git a/compiler/rustc_macros/src/hash_stable.rs b/compiler/rustc_macros/src/hash_stable.rs
new file mode 100644
index 000000000..63bdcea87
--- /dev/null
+++ b/compiler/rustc_macros/src/hash_stable.rs
@@ -0,0 +1,131 @@
+use proc_macro2::{self, Ident};
+use quote::quote;
+use syn::{self, parse_quote, Meta, NestedMeta};
+
+struct Attributes {
+ ignore: bool,
+ project: Option<Ident>,
+}
+
+fn parse_attributes(field: &syn::Field) -> Attributes {
+ let mut attrs = Attributes { ignore: false, project: None };
+ for attr in &field.attrs {
+ if let Ok(meta) = attr.parse_meta() {
+ if !meta.path().is_ident("stable_hasher") {
+ continue;
+ }
+ let mut any_attr = false;
+ if let Meta::List(list) = meta {
+ for nested in list.nested.iter() {
+ if let NestedMeta::Meta(meta) = nested {
+ if meta.path().is_ident("ignore") {
+ attrs.ignore = true;
+ any_attr = true;
+ }
+ if meta.path().is_ident("project") {
+ if let Meta::List(list) = meta {
+ if let Some(NestedMeta::Meta(meta)) = list.nested.iter().next() {
+ attrs.project = meta.path().get_ident().cloned();
+ any_attr = true;
+ }
+ }
+ }
+ }
+ }
+ }
+ if !any_attr {
+ panic!("error parsing stable_hasher");
+ }
+ }
+ }
+ attrs
+}
+
+pub fn hash_stable_generic_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ let generic: syn::GenericParam = parse_quote!(__CTX);
+ s.add_bounds(synstructure::AddBounds::Generics);
+ s.add_impl_generic(generic);
+ s.add_where_predicate(parse_quote! { __CTX: crate::HashStableContext });
+ let body = s.each(|bi| {
+ let attrs = parse_attributes(bi.ast());
+ if attrs.ignore {
+ quote! {}
+ } else if let Some(project) = attrs.project {
+ quote! {
+ (&#bi.#project).hash_stable(__hcx, __hasher);
+ }
+ } else {
+ quote! {
+ #bi.hash_stable(__hcx, __hasher);
+ }
+ }
+ });
+
+ let discriminant = match s.ast().data {
+ syn::Data::Enum(_) => quote! {
+ ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
+ },
+ syn::Data::Struct(_) => quote! {},
+ syn::Data::Union(_) => panic!("cannot derive on union"),
+ };
+
+ s.bound_impl(
+ quote!(::rustc_data_structures::stable_hasher::HashStable<__CTX>),
+ quote! {
+ #[inline]
+ fn hash_stable(
+ &self,
+ __hcx: &mut __CTX,
+ __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) {
+ #discriminant
+ match *self { #body }
+ }
+ },
+ )
+}
+
+pub fn hash_stable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ let generic: syn::GenericParam = parse_quote!('__ctx);
+ s.add_bounds(synstructure::AddBounds::Generics);
+ s.add_impl_generic(generic);
+ let body = s.each(|bi| {
+ let attrs = parse_attributes(bi.ast());
+ if attrs.ignore {
+ quote! {}
+ } else if let Some(project) = attrs.project {
+ quote! {
+ (&#bi.#project).hash_stable(__hcx, __hasher);
+ }
+ } else {
+ quote! {
+ #bi.hash_stable(__hcx, __hasher);
+ }
+ }
+ });
+
+ let discriminant = match s.ast().data {
+ syn::Data::Enum(_) => quote! {
+ ::std::mem::discriminant(self).hash_stable(__hcx, __hasher);
+ },
+ syn::Data::Struct(_) => quote! {},
+ syn::Data::Union(_) => panic!("cannot derive on union"),
+ };
+
+ s.bound_impl(
+ quote!(
+ ::rustc_data_structures::stable_hasher::HashStable<
+ ::rustc_query_system::ich::StableHashingContext<'__ctx>,
+ >
+ ),
+ quote! {
+ #[inline]
+ fn hash_stable(
+ &self,
+ __hcx: &mut ::rustc_query_system::ich::StableHashingContext<'__ctx>,
+ __hasher: &mut ::rustc_data_structures::stable_hasher::StableHasher) {
+ #discriminant
+ match *self { #body }
+ }
+ },
+ )
+}
diff --git a/compiler/rustc_macros/src/lib.rs b/compiler/rustc_macros/src/lib.rs
new file mode 100644
index 000000000..ab509b26f
--- /dev/null
+++ b/compiler/rustc_macros/src/lib.rs
@@ -0,0 +1,180 @@
+#![feature(allow_internal_unstable)]
+#![feature(let_else)]
+#![feature(never_type)]
+#![feature(proc_macro_diagnostic)]
+#![feature(proc_macro_span)]
+#![allow(rustc::default_hash_types)]
+#![recursion_limit = "128"]
+
+use synstructure::decl_derive;
+
+use proc_macro::TokenStream;
+
+mod diagnostics;
+mod hash_stable;
+mod lift;
+mod newtype;
+mod query;
+mod serialize;
+mod symbols;
+mod type_foldable;
+mod type_visitable;
+
+#[proc_macro]
+pub fn rustc_queries(input: TokenStream) -> TokenStream {
+ query::rustc_queries(input)
+}
+
+#[proc_macro]
+pub fn symbols(input: TokenStream) -> TokenStream {
+ symbols::symbols(input.into()).into()
+}
+
+/// Creates a struct type `S` that can be used as an index with
+/// `IndexVec` and so on.
+///
+/// There are two ways of interacting with these indices:
+///
+/// - The `From` impls are the preferred way. So you can do
+/// `S::from(v)` with a `usize` or `u32`. And you can convert back
+/// to an integer with `u32::from(s)`.
+///
+/// - Alternatively, you can use the methods `S::new(v)` and `s.index()`
+/// to create/return a value.
+///
+/// Internally, the index uses a u32, so the index must not exceed
+/// `u32::MAX`. You can also customize things like the `Debug` impl,
+/// what traits are derived, and so forth via the macro.
+#[proc_macro]
+#[allow_internal_unstable(step_trait, rustc_attrs, trusted_step)]
+pub fn newtype_index(input: TokenStream) -> TokenStream {
+ newtype::newtype(input)
+}
+
+/// Implements the `fluent_messages` macro, which performs compile-time validation of the
+/// compiler's Fluent resources (i.e. that the resources parse and don't multiply define the same
+/// messages) and generates constants that make using those messages in diagnostics more ergonomic.
+///
+/// For example, given the following invocation of the macro..
+///
+/// ```ignore (rust)
+/// fluent_messages! {
+/// typeck => "./typeck.ftl",
+/// }
+/// ```
+/// ..where `typeck.ftl` has the following contents..
+///
+/// ```fluent
+/// typeck-field-multiply-specified-in-initializer =
+/// field `{$ident}` specified more than once
+/// .label = used more than once
+/// .label-previous-use = first use of `{$ident}`
+/// ```
+/// ...then the macro parse the Fluent resource, emitting a diagnostic if it fails to do so, and
+/// will generate the following code:
+///
+/// ```ignore (rust)
+/// pub static DEFAULT_LOCALE_RESOURCES: &'static [&'static str] = &[
+/// include_str!("./typeck.ftl"),
+/// ];
+///
+/// mod fluent_generated {
+/// mod typeck {
+/// pub const field_multiply_specified_in_initializer: DiagnosticMessage =
+/// DiagnosticMessage::fluent("typeck-field-multiply-specified-in-initializer");
+/// pub const field_multiply_specified_in_initializer_label_previous_use: DiagnosticMessage =
+/// DiagnosticMessage::fluent_attr(
+/// "typeck-field-multiply-specified-in-initializer",
+/// "previous-use-label"
+/// );
+/// }
+/// }
+/// ```
+/// When emitting a diagnostic, the generated constants can be used as follows:
+///
+/// ```ignore (rust)
+/// let mut err = sess.struct_span_err(
+/// span,
+/// fluent::typeck::field_multiply_specified_in_initializer
+/// );
+/// err.span_default_label(span);
+/// err.span_label(
+/// previous_use_span,
+/// fluent::typeck::field_multiply_specified_in_initializer_label_previous_use
+/// );
+/// err.emit();
+/// ```
+#[proc_macro]
+pub fn fluent_messages(input: TokenStream) -> TokenStream {
+ diagnostics::fluent_messages(input)
+}
+
+decl_derive!([HashStable, attributes(stable_hasher)] => hash_stable::hash_stable_derive);
+decl_derive!(
+ [HashStable_Generic, attributes(stable_hasher)] =>
+ hash_stable::hash_stable_generic_derive
+);
+
+decl_derive!([Decodable] => serialize::decodable_derive);
+decl_derive!([Encodable] => serialize::encodable_derive);
+decl_derive!([TyDecodable] => serialize::type_decodable_derive);
+decl_derive!([TyEncodable] => serialize::type_encodable_derive);
+decl_derive!([MetadataDecodable] => serialize::meta_decodable_derive);
+decl_derive!([MetadataEncodable] => serialize::meta_encodable_derive);
+decl_derive!([TypeFoldable, attributes(type_foldable)] => type_foldable::type_foldable_derive);
+decl_derive!([TypeVisitable, attributes(type_visitable)] => type_visitable::type_visitable_derive);
+decl_derive!([Lift, attributes(lift)] => lift::lift_derive);
+decl_derive!(
+ [SessionDiagnostic, attributes(
+ // struct attributes
+ warning,
+ error,
+ lint,
+ help,
+ note,
+ warn_,
+ // field attributes
+ skip_arg,
+ primary_span,
+ label,
+ subdiagnostic,
+ suggestion,
+ suggestion_short,
+ suggestion_hidden,
+ suggestion_verbose)] => diagnostics::session_diagnostic_derive
+);
+decl_derive!(
+ [LintDiagnostic, attributes(
+ // struct attributes
+ warning,
+ error,
+ lint,
+ help,
+ note,
+ warn_,
+ // field attributes
+ skip_arg,
+ primary_span,
+ label,
+ subdiagnostic,
+ suggestion,
+ suggestion_short,
+ suggestion_hidden,
+ suggestion_verbose)] => diagnostics::lint_diagnostic_derive
+);
+decl_derive!(
+ [SessionSubdiagnostic, attributes(
+ // struct/variant attributes
+ label,
+ help,
+ note,
+ warn_,
+ suggestion,
+ suggestion_short,
+ suggestion_hidden,
+ suggestion_verbose,
+ // field attributes
+ skip_arg,
+ primary_span,
+ applicability)] => diagnostics::session_subdiagnostic_derive
+);
diff --git a/compiler/rustc_macros/src/lift.rs b/compiler/rustc_macros/src/lift.rs
new file mode 100644
index 000000000..ad7ac7404
--- /dev/null
+++ b/compiler/rustc_macros/src/lift.rs
@@ -0,0 +1,52 @@
+use quote::quote;
+use syn::{self, parse_quote};
+
+pub fn lift_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ s.add_bounds(synstructure::AddBounds::Generics);
+ s.bind_with(|_| synstructure::BindStyle::Move);
+
+ let tcx: syn::Lifetime = parse_quote!('tcx);
+ let newtcx: syn::GenericParam = parse_quote!('__lifted);
+
+ let lifted = {
+ let ast = s.ast();
+ let ident = &ast.ident;
+
+ // Replace `'tcx` lifetime by the `'__lifted` lifetime
+ let (_, generics, _) = ast.generics.split_for_impl();
+ let mut generics: syn::AngleBracketedGenericArguments = syn::parse_quote! { #generics };
+ for arg in generics.args.iter_mut() {
+ match arg {
+ syn::GenericArgument::Lifetime(l) if *l == tcx => {
+ *arg = parse_quote!('__lifted);
+ }
+ syn::GenericArgument::Type(t) => {
+ *arg = syn::parse_quote! { #t::Lifted };
+ }
+ _ => {}
+ }
+ }
+
+ quote! { #ident #generics }
+ };
+
+ let body = s.each_variant(|vi| {
+ let bindings = &vi.bindings();
+ vi.construct(|_, index| {
+ let bi = &bindings[index];
+ quote! { __tcx.lift(#bi)? }
+ })
+ });
+
+ s.add_impl_generic(newtcx);
+ s.bound_impl(
+ quote!(::rustc_middle::ty::Lift<'__lifted>),
+ quote! {
+ type Lifted = #lifted;
+
+ fn lift_to_tcx(self, __tcx: ::rustc_middle::ty::TyCtxt<'__lifted>) -> Option<#lifted> {
+ Some(match self { #body })
+ }
+ },
+ )
+}
diff --git a/compiler/rustc_macros/src/newtype.rs b/compiler/rustc_macros/src/newtype.rs
new file mode 100644
index 000000000..0a77b734c
--- /dev/null
+++ b/compiler/rustc_macros/src/newtype.rs
@@ -0,0 +1,333 @@
+use proc_macro2::{Span, TokenStream};
+use quote::quote;
+use syn::parse::*;
+use syn::punctuated::Punctuated;
+use syn::*;
+
+mod kw {
+ syn::custom_keyword!(derive);
+ syn::custom_keyword!(DEBUG_FORMAT);
+ syn::custom_keyword!(MAX);
+ syn::custom_keyword!(ENCODABLE);
+ syn::custom_keyword!(custom);
+ syn::custom_keyword!(ORD_IMPL);
+}
+
+#[derive(Debug)]
+enum DebugFormat {
+ // The user will provide a custom `Debug` impl, so we shouldn't generate
+ // one
+ Custom,
+ // Use the specified format string in the generated `Debug` impl
+ // By default, this is "{}"
+ Format(String),
+}
+
+// We parse the input and emit the output in a single step.
+// This field stores the final macro output
+struct Newtype(TokenStream);
+
+impl Parse for Newtype {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ let attrs = input.call(Attribute::parse_outer)?;
+ let vis: Visibility = input.parse()?;
+ input.parse::<Token![struct]>()?;
+ let name: Ident = input.parse()?;
+
+ let body;
+ braced!(body in input);
+
+ // Any additional `#[derive]` macro paths to apply
+ let mut derive_paths: Vec<Path> = Vec::new();
+ let mut debug_format: Option<DebugFormat> = None;
+ let mut max = None;
+ let mut consts = Vec::new();
+ let mut encodable = true;
+ let mut ord = true;
+
+ // Parse an optional trailing comma
+ let try_comma = || -> Result<()> {
+ if body.lookahead1().peek(Token![,]) {
+ body.parse::<Token![,]>()?;
+ }
+ Ok(())
+ };
+
+ if body.lookahead1().peek(Token![..]) {
+ body.parse::<Token![..]>()?;
+ } else {
+ loop {
+ if body.lookahead1().peek(kw::derive) {
+ body.parse::<kw::derive>()?;
+ let derives;
+ bracketed!(derives in body);
+ let derives: Punctuated<Path, Token![,]> =
+ derives.parse_terminated(Path::parse)?;
+ try_comma()?;
+ derive_paths.extend(derives);
+ continue;
+ }
+ if body.lookahead1().peek(kw::DEBUG_FORMAT) {
+ body.parse::<kw::DEBUG_FORMAT>()?;
+ body.parse::<Token![=]>()?;
+ let new_debug_format = if body.lookahead1().peek(kw::custom) {
+ body.parse::<kw::custom>()?;
+ DebugFormat::Custom
+ } else {
+ let format_str: LitStr = body.parse()?;
+ DebugFormat::Format(format_str.value())
+ };
+ try_comma()?;
+ if let Some(old) = debug_format.replace(new_debug_format) {
+ panic!("Specified multiple debug format options: {:?}", old);
+ }
+ continue;
+ }
+ if body.lookahead1().peek(kw::MAX) {
+ body.parse::<kw::MAX>()?;
+ body.parse::<Token![=]>()?;
+ let val: Lit = body.parse()?;
+ try_comma()?;
+ if let Some(old) = max.replace(val) {
+ panic!("Specified multiple MAX: {:?}", old);
+ }
+ continue;
+ }
+ if body.lookahead1().peek(kw::ENCODABLE) {
+ body.parse::<kw::ENCODABLE>()?;
+ body.parse::<Token![=]>()?;
+ body.parse::<kw::custom>()?;
+ try_comma()?;
+ encodable = false;
+ continue;
+ }
+ if body.lookahead1().peek(kw::ORD_IMPL) {
+ body.parse::<kw::ORD_IMPL>()?;
+ body.parse::<Token![=]>()?;
+ body.parse::<kw::custom>()?;
+ ord = false;
+ continue;
+ }
+
+ // We've parsed everything that the user provided, so we're done
+ if body.is_empty() {
+ break;
+ }
+
+ // Otherwise, we are parsing a user-defined constant
+ let const_attrs = body.call(Attribute::parse_outer)?;
+ body.parse::<Token![const]>()?;
+ let const_name: Ident = body.parse()?;
+ body.parse::<Token![=]>()?;
+ let const_val: Expr = body.parse()?;
+ try_comma()?;
+ consts.push(quote! { #(#const_attrs)* #vis const #const_name: #name = #name::from_u32(#const_val); });
+ }
+ }
+
+ let debug_format = debug_format.unwrap_or(DebugFormat::Format("{}".to_string()));
+ // shave off 256 indices at the end to allow space for packing these indices into enums
+ let max = max.unwrap_or_else(|| Lit::Int(LitInt::new("0xFFFF_FF00", Span::call_site())));
+
+ let encodable_impls = if encodable {
+ quote! {
+ impl<D: ::rustc_serialize::Decoder> ::rustc_serialize::Decodable<D> for #name {
+ fn decode(d: &mut D) -> Self {
+ Self::from_u32(d.read_u32())
+ }
+ }
+ impl<E: ::rustc_serialize::Encoder> ::rustc_serialize::Encodable<E> for #name {
+ fn encode(&self, e: &mut E) {
+ e.emit_u32(self.private);
+ }
+ }
+ }
+ } else {
+ quote! {}
+ };
+
+ if ord {
+ derive_paths.push(parse_quote!(Ord));
+ derive_paths.push(parse_quote!(PartialOrd));
+ }
+
+ let step = if ord {
+ quote! {
+ impl ::std::iter::Step for #name {
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ <usize as ::std::iter::Step>::steps_between(
+ &Self::index(*start),
+ &Self::index(*end),
+ )
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, u: usize) -> Option<Self> {
+ Self::index(start).checked_add(u).map(Self::from_usize)
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, u: usize) -> Option<Self> {
+ Self::index(start).checked_sub(u).map(Self::from_usize)
+ }
+ }
+
+ // Safety: The implementation of `Step` upholds all invariants.
+ unsafe impl ::std::iter::TrustedStep for #name {}
+ }
+ } else {
+ quote! {}
+ };
+
+ let debug_impl = match debug_format {
+ DebugFormat::Custom => quote! {},
+ DebugFormat::Format(format) => {
+ quote! {
+ impl ::std::fmt::Debug for #name {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ write!(fmt, #format, self.as_u32())
+ }
+ }
+ }
+ }
+ };
+
+ Ok(Self(quote! {
+ #(#attrs)*
+ #[derive(Clone, Copy, PartialEq, Eq, Hash, #(#derive_paths),*)]
+ #[rustc_layout_scalar_valid_range_end(#max)]
+ #[rustc_pass_by_value]
+ #vis struct #name {
+ private: u32,
+ }
+
+ #(#consts)*
+
+ impl #name {
+ /// Maximum value the index can take, as a `u32`.
+ #vis const MAX_AS_U32: u32 = #max;
+
+ /// Maximum value the index can take.
+ #vis const MAX: Self = Self::from_u32(#max);
+
+ /// Creates a new index from a given `usize`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `value` exceeds `MAX`.
+ #[inline]
+ #vis const fn from_usize(value: usize) -> Self {
+ assert!(value <= (#max as usize));
+ // SAFETY: We just checked that `value <= max`.
+ unsafe {
+ Self::from_u32_unchecked(value as u32)
+ }
+ }
+
+ /// Creates a new index from a given `u32`.
+ ///
+ /// # Panics
+ ///
+ /// Will panic if `value` exceeds `MAX`.
+ #[inline]
+ #vis const fn from_u32(value: u32) -> Self {
+ assert!(value <= #max);
+ // SAFETY: We just checked that `value <= max`.
+ unsafe {
+ Self::from_u32_unchecked(value)
+ }
+ }
+
+ /// Creates a new index from a given `u32`.
+ ///
+ /// # Safety
+ ///
+ /// The provided value must be less than or equal to the maximum value for the newtype.
+ /// Providing a value outside this range is undefined due to layout restrictions.
+ ///
+ /// Prefer using `from_u32`.
+ #[inline]
+ #vis const unsafe fn from_u32_unchecked(value: u32) -> Self {
+ Self { private: value }
+ }
+
+ /// Extracts the value of this index as a `usize`.
+ #[inline]
+ #vis const fn index(self) -> usize {
+ self.as_usize()
+ }
+
+ /// Extracts the value of this index as a `u32`.
+ #[inline]
+ #vis const fn as_u32(self) -> u32 {
+ self.private
+ }
+
+ /// Extracts the value of this index as a `usize`.
+ #[inline]
+ #vis const fn as_usize(self) -> usize {
+ self.as_u32() as usize
+ }
+ }
+
+ impl std::ops::Add<usize> for #name {
+ type Output = Self;
+
+ fn add(self, other: usize) -> Self {
+ Self::from_usize(self.index() + other)
+ }
+ }
+
+ impl rustc_index::vec::Idx for #name {
+ #[inline]
+ fn new(value: usize) -> Self {
+ Self::from_usize(value)
+ }
+
+ #[inline]
+ fn index(self) -> usize {
+ self.as_usize()
+ }
+ }
+
+ #step
+
+ impl From<#name> for u32 {
+ #[inline]
+ fn from(v: #name) -> u32 {
+ v.as_u32()
+ }
+ }
+
+ impl From<#name> for usize {
+ #[inline]
+ fn from(v: #name) -> usize {
+ v.as_usize()
+ }
+ }
+
+ impl From<usize> for #name {
+ #[inline]
+ fn from(value: usize) -> Self {
+ Self::from_usize(value)
+ }
+ }
+
+ impl From<u32> for #name {
+ #[inline]
+ fn from(value: u32) -> Self {
+ Self::from_u32(value)
+ }
+ }
+
+ #encodable_impls
+ #debug_impl
+ }))
+ }
+}
+
+pub fn newtype(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+ let input = parse_macro_input!(input as Newtype);
+ input.0.into()
+}
diff --git a/compiler/rustc_macros/src/query.rs b/compiler/rustc_macros/src/query.rs
new file mode 100644
index 000000000..a69126533
--- /dev/null
+++ b/compiler/rustc_macros/src/query.rs
@@ -0,0 +1,566 @@
+use proc_macro::TokenStream;
+use proc_macro2::{Delimiter, TokenTree};
+use quote::{quote, quote_spanned};
+use syn::parse::{Parse, ParseStream, Result};
+use syn::punctuated::Punctuated;
+use syn::spanned::Spanned;
+use syn::{
+ braced, parenthesized, parse_macro_input, parse_quote, AttrStyle, Attribute, Block, Error,
+ Expr, Ident, ReturnType, Token, Type,
+};
+
+mod kw {
+ syn::custom_keyword!(query);
+}
+
+/// Ident or a wildcard `_`.
+struct IdentOrWild(Ident);
+
+impl Parse for IdentOrWild {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ Ok(if input.peek(Token![_]) {
+ let underscore = input.parse::<Token![_]>()?;
+ IdentOrWild(Ident::new("_", underscore.span()))
+ } else {
+ IdentOrWild(input.parse()?)
+ })
+ }
+}
+
+/// A modifier for a query
+enum QueryModifier {
+ /// The description of the query.
+ Desc(Option<Ident>, Punctuated<Expr, Token![,]>),
+
+ /// Use this type for the in-memory cache.
+ Storage(Type),
+
+ /// Cache the query to disk if the `Expr` returns true.
+ Cache(Option<IdentOrWild>, Block),
+
+ /// Custom code to load the query from disk.
+ LoadCached(Ident, Ident, Block),
+
+ /// A cycle error for this query aborting the compilation with a fatal error.
+ FatalCycle(Ident),
+
+ /// A cycle error results in a delay_bug call
+ CycleDelayBug(Ident),
+
+ /// Don't hash the result, instead just mark a query red if it runs
+ NoHash(Ident),
+
+ /// Generate a dep node based on the dependencies of the query
+ Anon(Ident),
+
+ /// Always evaluate the query, ignoring its dependencies
+ EvalAlways(Ident),
+
+ /// Use a separate query provider for local and extern crates
+ SeparateProvideExtern(Ident),
+
+ /// Always remap the ParamEnv's constness before hashing and passing to the query provider
+ RemapEnvConstness(Ident),
+}
+
+impl Parse for QueryModifier {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ let modifier: Ident = input.parse()?;
+ if modifier == "desc" {
+ // Parse a description modifier like:
+ // `desc { |tcx| "foo {}", tcx.item_path(key) }`
+ let attr_content;
+ braced!(attr_content in input);
+ let tcx = if attr_content.peek(Token![|]) {
+ attr_content.parse::<Token![|]>()?;
+ let tcx = attr_content.parse()?;
+ attr_content.parse::<Token![|]>()?;
+ Some(tcx)
+ } else {
+ None
+ };
+ let desc = attr_content.parse_terminated(Expr::parse)?;
+ Ok(QueryModifier::Desc(tcx, desc))
+ } else if modifier == "cache_on_disk_if" {
+ // Parse a cache modifier like:
+ // `cache(tcx, value) { |tcx| key.is_local() }`
+ let has_args = if let TokenTree::Group(group) = input.fork().parse()? {
+ group.delimiter() == Delimiter::Parenthesis
+ } else {
+ false
+ };
+ let args = if has_args {
+ let args;
+ parenthesized!(args in input);
+ let tcx = args.parse()?;
+ Some(tcx)
+ } else {
+ None
+ };
+ let block = input.parse()?;
+ Ok(QueryModifier::Cache(args, block))
+ } else if modifier == "load_cached" {
+ // Parse a load_cached modifier like:
+ // `load_cached(tcx, id) { tcx.on_disk_cache.try_load_query_result(tcx, id) }`
+ let args;
+ parenthesized!(args in input);
+ let tcx = args.parse()?;
+ args.parse::<Token![,]>()?;
+ let id = args.parse()?;
+ let block = input.parse()?;
+ Ok(QueryModifier::LoadCached(tcx, id, block))
+ } else if modifier == "storage" {
+ let args;
+ parenthesized!(args in input);
+ let ty = args.parse()?;
+ Ok(QueryModifier::Storage(ty))
+ } else if modifier == "fatal_cycle" {
+ Ok(QueryModifier::FatalCycle(modifier))
+ } else if modifier == "cycle_delay_bug" {
+ Ok(QueryModifier::CycleDelayBug(modifier))
+ } else if modifier == "no_hash" {
+ Ok(QueryModifier::NoHash(modifier))
+ } else if modifier == "anon" {
+ Ok(QueryModifier::Anon(modifier))
+ } else if modifier == "eval_always" {
+ Ok(QueryModifier::EvalAlways(modifier))
+ } else if modifier == "separate_provide_extern" {
+ Ok(QueryModifier::SeparateProvideExtern(modifier))
+ } else if modifier == "remap_env_constness" {
+ Ok(QueryModifier::RemapEnvConstness(modifier))
+ } else {
+ Err(Error::new(modifier.span(), "unknown query modifier"))
+ }
+ }
+}
+
+/// Ensures only doc comment attributes are used
+fn check_attributes(attrs: Vec<Attribute>) -> Result<Vec<Attribute>> {
+ let inner = |attr: Attribute| {
+ if !attr.path.is_ident("doc") {
+ Err(Error::new(attr.span(), "attributes not supported on queries"))
+ } else if attr.style != AttrStyle::Outer {
+ Err(Error::new(
+ attr.span(),
+ "attributes must be outer attributes (`///`), not inner attributes",
+ ))
+ } else {
+ Ok(attr)
+ }
+ };
+ attrs.into_iter().map(inner).collect()
+}
+
+/// A compiler query. `query ... { ... }`
+struct Query {
+ doc_comments: Vec<Attribute>,
+ modifiers: List<QueryModifier>,
+ name: Ident,
+ key: IdentOrWild,
+ arg: Type,
+ result: ReturnType,
+}
+
+impl Parse for Query {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ let doc_comments = check_attributes(input.call(Attribute::parse_outer)?)?;
+
+ // Parse the query declaration. Like `query type_of(key: DefId) -> Ty<'tcx>`
+ input.parse::<kw::query>()?;
+ let name: Ident = input.parse()?;
+ let arg_content;
+ parenthesized!(arg_content in input);
+ let key = arg_content.parse()?;
+ arg_content.parse::<Token![:]>()?;
+ let arg = arg_content.parse()?;
+ let result = input.parse()?;
+
+ // Parse the query modifiers
+ let content;
+ braced!(content in input);
+ let modifiers = content.parse()?;
+
+ Ok(Query { doc_comments, modifiers, name, key, arg, result })
+ }
+}
+
+/// A type used to greedily parse another type until the input is empty.
+struct List<T>(Vec<T>);
+
+impl<T: Parse> Parse for List<T> {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ let mut list = Vec::new();
+ while !input.is_empty() {
+ list.push(input.parse()?);
+ }
+ Ok(List(list))
+ }
+}
+
+struct QueryModifiers {
+ /// The description of the query.
+ desc: (Option<Ident>, Punctuated<Expr, Token![,]>),
+
+ /// Use this type for the in-memory cache.
+ storage: Option<Type>,
+
+ /// Cache the query to disk if the `Block` returns true.
+ cache: Option<(Option<IdentOrWild>, Block)>,
+
+ /// Custom code to load the query from disk.
+ load_cached: Option<(Ident, Ident, Block)>,
+
+ /// A cycle error for this query aborting the compilation with a fatal error.
+ fatal_cycle: Option<Ident>,
+
+ /// A cycle error results in a delay_bug call
+ cycle_delay_bug: Option<Ident>,
+
+ /// Don't hash the result, instead just mark a query red if it runs
+ no_hash: Option<Ident>,
+
+ /// Generate a dep node based on the dependencies of the query
+ anon: Option<Ident>,
+
+ // Always evaluate the query, ignoring its dependencies
+ eval_always: Option<Ident>,
+
+ /// Use a separate query provider for local and extern crates
+ separate_provide_extern: Option<Ident>,
+
+ /// Always remap the ParamEnv's constness before hashing.
+ remap_env_constness: Option<Ident>,
+}
+
+/// Process query modifiers into a struct, erroring on duplicates
+fn process_modifiers(query: &mut Query) -> QueryModifiers {
+ let mut load_cached = None;
+ let mut storage = None;
+ let mut cache = None;
+ let mut desc = None;
+ let mut fatal_cycle = None;
+ let mut cycle_delay_bug = None;
+ let mut no_hash = None;
+ let mut anon = None;
+ let mut eval_always = None;
+ let mut separate_provide_extern = None;
+ let mut remap_env_constness = None;
+ for modifier in query.modifiers.0.drain(..) {
+ match modifier {
+ QueryModifier::LoadCached(tcx, id, block) => {
+ if load_cached.is_some() {
+ panic!("duplicate modifier `load_cached` for query `{}`", query.name);
+ }
+ load_cached = Some((tcx, id, block));
+ }
+ QueryModifier::Storage(ty) => {
+ if storage.is_some() {
+ panic!("duplicate modifier `storage` for query `{}`", query.name);
+ }
+ storage = Some(ty);
+ }
+ QueryModifier::Cache(args, expr) => {
+ if cache.is_some() {
+ panic!("duplicate modifier `cache` for query `{}`", query.name);
+ }
+ cache = Some((args, expr));
+ }
+ QueryModifier::Desc(tcx, list) => {
+ if desc.is_some() {
+ panic!("duplicate modifier `desc` for query `{}`", query.name);
+ }
+ // If there are no doc-comments, give at least some idea of what
+ // it does by showing the query description.
+ if query.doc_comments.is_empty() {
+ use ::syn::*;
+ let mut list = list.iter();
+ let format_str: String = match list.next() {
+ Some(&Expr::Lit(ExprLit { lit: Lit::Str(ref lit_str), .. })) => {
+ lit_str.value().replace("`{}`", "{}") // We add them later anyways for consistency
+ }
+ _ => panic!("Expected a string literal"),
+ };
+ let mut fmt_fragments = format_str.split("{}");
+ let mut doc_string = fmt_fragments.next().unwrap().to_string();
+ list.map(::quote::ToTokens::to_token_stream).zip(fmt_fragments).for_each(
+ |(tts, next_fmt_fragment)| {
+ use ::core::fmt::Write;
+ write!(
+ &mut doc_string,
+ " `{}` {}",
+ tts.to_string().replace(" . ", "."),
+ next_fmt_fragment,
+ )
+ .unwrap();
+ },
+ );
+ let doc_string = format!(
+ "[query description - consider adding a doc-comment!] {}",
+ doc_string
+ );
+ let comment = parse_quote! {
+ #[doc = #doc_string]
+ };
+ query.doc_comments.push(comment);
+ }
+ desc = Some((tcx, list));
+ }
+ QueryModifier::FatalCycle(ident) => {
+ if fatal_cycle.is_some() {
+ panic!("duplicate modifier `fatal_cycle` for query `{}`", query.name);
+ }
+ fatal_cycle = Some(ident);
+ }
+ QueryModifier::CycleDelayBug(ident) => {
+ if cycle_delay_bug.is_some() {
+ panic!("duplicate modifier `cycle_delay_bug` for query `{}`", query.name);
+ }
+ cycle_delay_bug = Some(ident);
+ }
+ QueryModifier::NoHash(ident) => {
+ if no_hash.is_some() {
+ panic!("duplicate modifier `no_hash` for query `{}`", query.name);
+ }
+ no_hash = Some(ident);
+ }
+ QueryModifier::Anon(ident) => {
+ if anon.is_some() {
+ panic!("duplicate modifier `anon` for query `{}`", query.name);
+ }
+ anon = Some(ident);
+ }
+ QueryModifier::EvalAlways(ident) => {
+ if eval_always.is_some() {
+ panic!("duplicate modifier `eval_always` for query `{}`", query.name);
+ }
+ eval_always = Some(ident);
+ }
+ QueryModifier::SeparateProvideExtern(ident) => {
+ if separate_provide_extern.is_some() {
+ panic!(
+ "duplicate modifier `separate_provide_extern` for query `{}`",
+ query.name
+ );
+ }
+ separate_provide_extern = Some(ident);
+ }
+ QueryModifier::RemapEnvConstness(ident) => {
+ if remap_env_constness.is_some() {
+ panic!("duplicate modifier `remap_env_constness` for query `{}`", query.name);
+ }
+ remap_env_constness = Some(ident)
+ }
+ }
+ }
+ let desc = desc.unwrap_or_else(|| {
+ panic!("no description provided for query `{}`", query.name);
+ });
+ QueryModifiers {
+ load_cached,
+ storage,
+ cache,
+ desc,
+ fatal_cycle,
+ cycle_delay_bug,
+ no_hash,
+ anon,
+ eval_always,
+ separate_provide_extern,
+ remap_env_constness,
+ }
+}
+
+/// Add the impl of QueryDescription for the query to `impls` if one is requested
+fn add_query_description_impl(
+ query: &Query,
+ modifiers: QueryModifiers,
+ impls: &mut proc_macro2::TokenStream,
+) {
+ let name = &query.name;
+ let key = &query.key.0;
+
+ // Find out if we should cache the query on disk
+ let cache = if let Some((args, expr)) = modifiers.cache.as_ref() {
+ let try_load_from_disk = if let Some((tcx, id, block)) = modifiers.load_cached.as_ref() {
+ // Use custom code to load the query from disk
+ quote! {
+ const TRY_LOAD_FROM_DISK: Option<fn(QueryCtxt<$tcx>, SerializedDepNodeIndex) -> Option<Self::Value>>
+ = Some(|#tcx, #id| { #block });
+ }
+ } else {
+ // Use the default code to load the query from disk
+ quote! {
+ const TRY_LOAD_FROM_DISK: Option<fn(QueryCtxt<$tcx>, SerializedDepNodeIndex) -> Option<Self::Value>>
+ = Some(|tcx, id| tcx.on_disk_cache().as_ref()?.try_load_query_result(*tcx, id));
+ }
+ };
+
+ let tcx = args
+ .as_ref()
+ .map(|t| {
+ let t = &t.0;
+ quote! { #t }
+ })
+ .unwrap_or_else(|| quote! { _ });
+ // expr is a `Block`, meaning that `{ #expr }` gets expanded
+ // to `{ { stmts... } }`, which triggers the `unused_braces` lint.
+ quote! {
+ #[allow(unused_variables, unused_braces)]
+ #[inline]
+ fn cache_on_disk(#tcx: TyCtxt<'tcx>, #key: &Self::Key) -> bool {
+ #expr
+ }
+
+ #try_load_from_disk
+ }
+ } else {
+ if modifiers.load_cached.is_some() {
+ panic!("load_cached modifier on query `{}` without a cache modifier", name);
+ }
+ quote! {
+ #[inline]
+ fn cache_on_disk(_: TyCtxt<'tcx>, _: &Self::Key) -> bool {
+ false
+ }
+
+ const TRY_LOAD_FROM_DISK: Option<fn(QueryCtxt<$tcx>, SerializedDepNodeIndex) -> Option<Self::Value>> = None;
+ }
+ };
+
+ let (tcx, desc) = modifiers.desc;
+ let tcx = tcx.as_ref().map_or_else(|| quote! { _ }, |t| quote! { #t });
+
+ let desc = quote! {
+ #[allow(unused_variables)]
+ fn describe(tcx: QueryCtxt<$tcx>, key: Self::Key) -> String {
+ let (#tcx, #key) = (*tcx, key);
+ ::rustc_middle::ty::print::with_no_trimmed_paths!(
+ format!(#desc)
+ )
+ }
+ };
+
+ impls.extend(quote! {
+ (#name<$tcx:tt>) => {
+ #desc
+ #cache
+ };
+ });
+}
+
+pub fn rustc_queries(input: TokenStream) -> TokenStream {
+ let queries = parse_macro_input!(input as List<Query>);
+
+ let mut query_stream = quote! {};
+ let mut query_description_stream = quote! {};
+ let mut dep_node_def_stream = quote! {};
+ let mut cached_queries = quote! {};
+
+ for mut query in queries.0 {
+ let modifiers = process_modifiers(&mut query);
+ let name = &query.name;
+ let arg = &query.arg;
+ let result_full = &query.result;
+ let result = match query.result {
+ ReturnType::Default => quote! { -> () },
+ _ => quote! { #result_full },
+ };
+
+ if modifiers.cache.is_some() {
+ cached_queries.extend(quote! {
+ #name,
+ });
+ }
+
+ let mut attributes = Vec::new();
+
+ // Pass on the fatal_cycle modifier
+ if let Some(fatal_cycle) = &modifiers.fatal_cycle {
+ attributes.push(quote! { (#fatal_cycle) });
+ };
+ // Pass on the storage modifier
+ if let Some(ref ty) = modifiers.storage {
+ let span = ty.span();
+ attributes.push(quote_spanned! {span=> (storage #ty) });
+ };
+ // Pass on the cycle_delay_bug modifier
+ if let Some(cycle_delay_bug) = &modifiers.cycle_delay_bug {
+ attributes.push(quote! { (#cycle_delay_bug) });
+ };
+ // Pass on the no_hash modifier
+ if let Some(no_hash) = &modifiers.no_hash {
+ attributes.push(quote! { (#no_hash) });
+ };
+ // Pass on the anon modifier
+ if let Some(anon) = &modifiers.anon {
+ attributes.push(quote! { (#anon) });
+ };
+ // Pass on the eval_always modifier
+ if let Some(eval_always) = &modifiers.eval_always {
+ attributes.push(quote! { (#eval_always) });
+ };
+ // Pass on the separate_provide_extern modifier
+ if let Some(separate_provide_extern) = &modifiers.separate_provide_extern {
+ attributes.push(quote! { (#separate_provide_extern) });
+ }
+ // Pass on the remap_env_constness modifier
+ if let Some(remap_env_constness) = &modifiers.remap_env_constness {
+ attributes.push(quote! { (#remap_env_constness) });
+ }
+
+ // This uses the span of the query definition for the commas,
+ // which can be important if we later encounter any ambiguity
+ // errors with any of the numerous macro_rules! macros that
+ // we use. Using the call-site span would result in a span pointing
+ // at the entire `rustc_queries!` invocation, which wouldn't
+ // be very useful.
+ let span = name.span();
+ let attribute_stream = quote_spanned! {span=> #(#attributes),*};
+ let doc_comments = query.doc_comments.iter();
+ // Add the query to the group
+ query_stream.extend(quote! {
+ #(#doc_comments)*
+ [#attribute_stream] fn #name(#arg) #result,
+ });
+
+ // Create a dep node for the query
+ dep_node_def_stream.extend(quote! {
+ [#attribute_stream] #name(#arg),
+ });
+
+ add_query_description_impl(&query, modifiers, &mut query_description_stream);
+ }
+
+ TokenStream::from(quote! {
+ #[macro_export]
+ macro_rules! rustc_query_append {
+ ([$($macro:tt)*][$($other:tt)*]) => {
+ $($macro)* {
+ $($other)*
+
+ #query_stream
+
+ }
+ }
+ }
+ macro_rules! rustc_dep_node_append {
+ ([$($macro:tt)*][$($other:tt)*]) => {
+ $($macro)*(
+ $($other)*
+
+ #dep_node_def_stream
+ );
+ }
+ }
+ #[macro_export]
+ macro_rules! rustc_cached_queries {
+ ($($macro:tt)*) => {
+ $($macro)*(#cached_queries);
+ }
+ }
+ #[macro_export]
+ macro_rules! rustc_query_description {
+ #query_description_stream
+ }
+ })
+}
diff --git a/compiler/rustc_macros/src/serialize.rs b/compiler/rustc_macros/src/serialize.rs
new file mode 100644
index 000000000..82e6972d0
--- /dev/null
+++ b/compiler/rustc_macros/src/serialize.rs
@@ -0,0 +1,224 @@
+use proc_macro2::TokenStream;
+use quote::{quote, quote_spanned};
+use syn::parse_quote;
+use syn::spanned::Spanned;
+
+pub fn type_decodable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ let decoder_ty = quote! { __D };
+ if !s.ast().generics.lifetimes().any(|lt| lt.lifetime.ident == "tcx") {
+ s.add_impl_generic(parse_quote! { 'tcx });
+ }
+ s.add_impl_generic(parse_quote! {#decoder_ty: ::rustc_type_ir::codec::TyDecoder<I = ::rustc_middle::ty::TyCtxt<'tcx>>});
+ s.add_bounds(synstructure::AddBounds::Generics);
+
+ decodable_body(s, decoder_ty)
+}
+
+pub fn meta_decodable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ if !s.ast().generics.lifetimes().any(|lt| lt.lifetime.ident == "tcx") {
+ s.add_impl_generic(parse_quote! { 'tcx });
+ }
+ s.add_impl_generic(parse_quote! { '__a });
+ let decoder_ty = quote! { DecodeContext<'__a, 'tcx> };
+ s.add_bounds(synstructure::AddBounds::Generics);
+
+ decodable_body(s, decoder_ty)
+}
+
+pub fn decodable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ let decoder_ty = quote! { __D };
+ s.add_impl_generic(parse_quote! {#decoder_ty: ::rustc_serialize::Decoder});
+ s.add_bounds(synstructure::AddBounds::Generics);
+
+ decodable_body(s, decoder_ty)
+}
+
+fn decodable_body(
+ s: synstructure::Structure<'_>,
+ decoder_ty: TokenStream,
+) -> proc_macro2::TokenStream {
+ if let syn::Data::Union(_) = s.ast().data {
+ panic!("cannot derive on union")
+ }
+ let ty_name = s.ast().ident.to_string();
+ let decode_body = match s.variants() {
+ [vi] => vi.construct(|field, _index| decode_field(field)),
+ variants => {
+ let match_inner: TokenStream = variants
+ .iter()
+ .enumerate()
+ .map(|(idx, vi)| {
+ let construct = vi.construct(|field, _index| decode_field(field));
+ quote! { #idx => { #construct } }
+ })
+ .collect();
+ let message = format!(
+ "invalid enum variant tag while decoding `{}`, expected 0..{}",
+ ty_name,
+ variants.len()
+ );
+ quote! {
+ match ::rustc_serialize::Decoder::read_usize(__decoder) {
+ #match_inner
+ _ => panic!(#message),
+ }
+ }
+ }
+ };
+
+ s.bound_impl(
+ quote!(::rustc_serialize::Decodable<#decoder_ty>),
+ quote! {
+ fn decode(__decoder: &mut #decoder_ty) -> Self {
+ #decode_body
+ }
+ },
+ )
+}
+
+fn decode_field(field: &syn::Field) -> proc_macro2::TokenStream {
+ let field_span = field.ident.as_ref().map_or(field.ty.span(), |ident| ident.span());
+
+ let decode_inner_method = if let syn::Type::Reference(_) = field.ty {
+ quote! { ::rustc_middle::ty::codec::RefDecodable::decode }
+ } else {
+ quote! { ::rustc_serialize::Decodable::decode }
+ };
+ let __decoder = quote! { __decoder };
+ // Use the span of the field for the method call, so
+ // that backtraces will point to the field.
+ quote_spanned! {field_span=> #decode_inner_method(#__decoder) }
+}
+
+pub fn type_encodable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ if !s.ast().generics.lifetimes().any(|lt| lt.lifetime.ident == "tcx") {
+ s.add_impl_generic(parse_quote! {'tcx});
+ }
+ let encoder_ty = quote! { __E };
+ s.add_impl_generic(parse_quote! {#encoder_ty: ::rustc_type_ir::codec::TyEncoder<I = ::rustc_middle::ty::TyCtxt<'tcx>>});
+ s.add_bounds(synstructure::AddBounds::Generics);
+
+ encodable_body(s, encoder_ty, false)
+}
+
+pub fn meta_encodable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ if !s.ast().generics.lifetimes().any(|lt| lt.lifetime.ident == "tcx") {
+ s.add_impl_generic(parse_quote! {'tcx});
+ }
+ s.add_impl_generic(parse_quote! { '__a });
+ let encoder_ty = quote! { EncodeContext<'__a, 'tcx> };
+ s.add_bounds(synstructure::AddBounds::Generics);
+
+ encodable_body(s, encoder_ty, true)
+}
+
+pub fn encodable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ let encoder_ty = quote! { __E };
+ s.add_impl_generic(parse_quote! { #encoder_ty: ::rustc_serialize::Encoder});
+ s.add_bounds(synstructure::AddBounds::Generics);
+
+ encodable_body(s, encoder_ty, false)
+}
+
+fn encodable_body(
+ mut s: synstructure::Structure<'_>,
+ encoder_ty: TokenStream,
+ allow_unreachable_code: bool,
+) -> proc_macro2::TokenStream {
+ if let syn::Data::Union(_) = s.ast().data {
+ panic!("cannot derive on union")
+ }
+
+ s.bind_with(|binding| {
+ // Handle the lack of a blanket reference impl.
+ if let syn::Type::Reference(_) = binding.ast().ty {
+ synstructure::BindStyle::Move
+ } else {
+ synstructure::BindStyle::Ref
+ }
+ });
+
+ let encode_body = match s.variants() {
+ [_] => {
+ let encode_inner = s.each_variant(|vi| {
+ vi.bindings()
+ .iter()
+ .map(|binding| {
+ let bind_ident = &binding.binding;
+ let result = quote! {
+ ::rustc_serialize::Encodable::<#encoder_ty>::encode(
+ #bind_ident,
+ __encoder,
+ );
+ };
+ result
+ })
+ .collect::<TokenStream>()
+ });
+ quote! {
+ match *self { #encode_inner }
+ }
+ }
+ _ => {
+ let mut variant_idx = 0usize;
+ let encode_inner = s.each_variant(|vi| {
+ let encode_fields: TokenStream = vi
+ .bindings()
+ .iter()
+ .map(|binding| {
+ let bind_ident = &binding.binding;
+ let result = quote! {
+ ::rustc_serialize::Encodable::<#encoder_ty>::encode(
+ #bind_ident,
+ __encoder,
+ );
+ };
+ result
+ })
+ .collect();
+
+ let result = if !vi.bindings().is_empty() {
+ quote! {
+ ::rustc_serialize::Encoder::emit_enum_variant(
+ __encoder,
+ #variant_idx,
+ |__encoder| { #encode_fields }
+ )
+ }
+ } else {
+ quote! {
+ ::rustc_serialize::Encoder::emit_fieldless_enum_variant::<#variant_idx>(
+ __encoder,
+ )
+ }
+ };
+ variant_idx += 1;
+ result
+ });
+ quote! {
+ match *self {
+ #encode_inner
+ }
+ }
+ }
+ };
+
+ let lints = if allow_unreachable_code {
+ quote! { #![allow(unreachable_code)] }
+ } else {
+ quote! {}
+ };
+
+ s.bound_impl(
+ quote!(::rustc_serialize::Encodable<#encoder_ty>),
+ quote! {
+ fn encode(
+ &self,
+ __encoder: &mut #encoder_ty,
+ ) {
+ #lints
+ #encode_body
+ }
+ },
+ )
+}
diff --git a/compiler/rustc_macros/src/symbols.rs b/compiler/rustc_macros/src/symbols.rs
new file mode 100644
index 000000000..1b245f2a7
--- /dev/null
+++ b/compiler/rustc_macros/src/symbols.rs
@@ -0,0 +1,236 @@
+//! Proc macro which builds the Symbol table
+//!
+//! # Debugging
+//!
+//! Since this proc-macro does some non-trivial work, debugging it is important.
+//! This proc-macro can be invoked as an ordinary unit test, like so:
+//!
+//! ```bash
+//! cd compiler/rustc_macros
+//! cargo test symbols::test_symbols -- --nocapture
+//! ```
+//!
+//! This unit test finds the `symbols!` invocation in `compiler/rustc_span/src/symbol.rs`
+//! and runs it. It verifies that the output token stream can be parsed as valid module
+//! items and that no errors were produced.
+//!
+//! You can also view the generated code by using `cargo expand`:
+//!
+//! ```bash
+//! cargo install cargo-expand # this is necessary only once
+//! cd compiler/rustc_span
+//! cargo expand > /tmp/rustc_span.rs # it's a big file
+//! ```
+
+use proc_macro2::{Span, TokenStream};
+use quote::quote;
+use std::collections::HashMap;
+use syn::parse::{Parse, ParseStream, Result};
+use syn::{braced, punctuated::Punctuated, Ident, LitStr, Token};
+
+#[cfg(test)]
+mod tests;
+
+mod kw {
+ syn::custom_keyword!(Keywords);
+ syn::custom_keyword!(Symbols);
+}
+
+struct Keyword {
+ name: Ident,
+ value: LitStr,
+}
+
+impl Parse for Keyword {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ let name = input.parse()?;
+ input.parse::<Token![:]>()?;
+ let value = input.parse()?;
+
+ Ok(Keyword { name, value })
+ }
+}
+
+struct Symbol {
+ name: Ident,
+ value: Option<LitStr>,
+}
+
+impl Parse for Symbol {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ let name = input.parse()?;
+ let value = match input.parse::<Token![:]>() {
+ Ok(_) => Some(input.parse()?),
+ Err(_) => None,
+ };
+
+ Ok(Symbol { name, value })
+ }
+}
+
+struct Input {
+ keywords: Punctuated<Keyword, Token![,]>,
+ symbols: Punctuated<Symbol, Token![,]>,
+}
+
+impl Parse for Input {
+ fn parse(input: ParseStream<'_>) -> Result<Self> {
+ input.parse::<kw::Keywords>()?;
+ let content;
+ braced!(content in input);
+ let keywords = Punctuated::parse_terminated(&content)?;
+
+ input.parse::<kw::Symbols>()?;
+ let content;
+ braced!(content in input);
+ let symbols = Punctuated::parse_terminated(&content)?;
+
+ Ok(Input { keywords, symbols })
+ }
+}
+
+#[derive(Default)]
+struct Errors {
+ list: Vec<syn::Error>,
+}
+
+impl Errors {
+ fn error(&mut self, span: Span, message: String) {
+ self.list.push(syn::Error::new(span, message));
+ }
+}
+
+pub fn symbols(input: TokenStream) -> TokenStream {
+ let (mut output, errors) = symbols_with_errors(input);
+
+ // If we generated any errors, then report them as compiler_error!() macro calls.
+ // This lets the errors point back to the most relevant span. It also allows us
+ // to report as many errors as we can during a single run.
+ output.extend(errors.into_iter().map(|e| e.to_compile_error()));
+
+ output
+}
+
+fn symbols_with_errors(input: TokenStream) -> (TokenStream, Vec<syn::Error>) {
+ let mut errors = Errors::default();
+
+ let input: Input = match syn::parse2(input) {
+ Ok(input) => input,
+ Err(e) => {
+ // This allows us to display errors at the proper span, while minimizing
+ // unrelated errors caused by bailing out (and not generating code).
+ errors.list.push(e);
+ Input { keywords: Default::default(), symbols: Default::default() }
+ }
+ };
+
+ let mut keyword_stream = quote! {};
+ let mut symbols_stream = quote! {};
+ let mut prefill_stream = quote! {};
+ let mut counter = 0u32;
+ let mut keys =
+ HashMap::<String, Span>::with_capacity(input.keywords.len() + input.symbols.len() + 10);
+ let mut prev_key: Option<(Span, String)> = None;
+
+ let mut check_dup = |span: Span, str: &str, errors: &mut Errors| {
+ if let Some(prev_span) = keys.get(str) {
+ errors.error(span, format!("Symbol `{}` is duplicated", str));
+ errors.error(*prev_span, "location of previous definition".to_string());
+ } else {
+ keys.insert(str.to_string(), span);
+ }
+ };
+
+ let mut check_order = |span: Span, str: &str, errors: &mut Errors| {
+ if let Some((prev_span, ref prev_str)) = prev_key {
+ if str < prev_str {
+ errors.error(span, format!("Symbol `{}` must precede `{}`", str, prev_str));
+ errors.error(prev_span, format!("location of previous symbol `{}`", prev_str));
+ }
+ }
+ prev_key = Some((span, str.to_string()));
+ };
+
+ // Generate the listed keywords.
+ for keyword in input.keywords.iter() {
+ let name = &keyword.name;
+ let value = &keyword.value;
+ let value_string = value.value();
+ check_dup(keyword.name.span(), &value_string, &mut errors);
+ prefill_stream.extend(quote! {
+ #value,
+ });
+ keyword_stream.extend(quote! {
+ pub const #name: Symbol = Symbol::new(#counter);
+ });
+ counter += 1;
+ }
+
+ // Generate the listed symbols.
+ for symbol in input.symbols.iter() {
+ let name = &symbol.name;
+ let value = match &symbol.value {
+ Some(value) => value.value(),
+ None => name.to_string(),
+ };
+ check_dup(symbol.name.span(), &value, &mut errors);
+ check_order(symbol.name.span(), &name.to_string(), &mut errors);
+
+ prefill_stream.extend(quote! {
+ #value,
+ });
+ symbols_stream.extend(quote! {
+ pub const #name: Symbol = Symbol::new(#counter);
+ });
+ counter += 1;
+ }
+
+ // Generate symbols for the strings "0", "1", ..., "9".
+ let digits_base = counter;
+ counter += 10;
+ for n in 0..10 {
+ let n = n.to_string();
+ check_dup(Span::call_site(), &n, &mut errors);
+ prefill_stream.extend(quote! {
+ #n,
+ });
+ }
+ let _ = counter; // for future use
+
+ let output = quote! {
+ const SYMBOL_DIGITS_BASE: u32 = #digits_base;
+
+ #[doc(hidden)]
+ #[allow(non_upper_case_globals)]
+ mod kw_generated {
+ use super::Symbol;
+ #keyword_stream
+ }
+
+ #[allow(non_upper_case_globals)]
+ #[doc(hidden)]
+ pub mod sym_generated {
+ use super::Symbol;
+ #symbols_stream
+ }
+
+ impl Interner {
+ pub(crate) fn fresh() -> Self {
+ Interner::prefill(&[
+ #prefill_stream
+ ])
+ }
+ }
+ };
+
+ (output, errors.list)
+
+ // To see the generated code, use the "cargo expand" command.
+ // Do this once to install:
+ // cargo install cargo-expand
+ //
+ // Then, cd to rustc_span and run:
+ // cargo expand > /tmp/rustc_span_expanded.rs
+ //
+ // and read that file.
+}
diff --git a/compiler/rustc_macros/src/symbols/tests.rs b/compiler/rustc_macros/src/symbols/tests.rs
new file mode 100644
index 000000000..842d2a977
--- /dev/null
+++ b/compiler/rustc_macros/src/symbols/tests.rs
@@ -0,0 +1,102 @@
+use super::*;
+
+// This test is mainly here for interactive development. Use this test while
+// you're working on the proc-macro defined in this file.
+#[test]
+fn test_symbols() {
+ // We textually include the symbol.rs file, which contains the list of all
+ // symbols, keywords, and common words. Then we search for the
+ // `symbols! { ... }` call.
+
+ static SYMBOL_RS_FILE: &str = include_str!("../../../rustc_span/src/symbol.rs");
+
+ let file = syn::parse_file(SYMBOL_RS_FILE).unwrap();
+ let symbols_path: syn::Path = syn::parse_quote!(symbols);
+
+ let m: &syn::ItemMacro = file
+ .items
+ .iter()
+ .filter_map(|i| {
+ if let syn::Item::Macro(m) = i {
+ if m.mac.path == symbols_path { Some(m) } else { None }
+ } else {
+ None
+ }
+ })
+ .next()
+ .expect("did not find `symbols!` macro invocation.");
+
+ let body_tokens = m.mac.tokens.clone();
+
+ test_symbols_macro(body_tokens, &[]);
+}
+
+fn test_symbols_macro(input: TokenStream, expected_errors: &[&str]) {
+ let (output, found_errors) = symbols_with_errors(input);
+
+ // It should always parse.
+ let _parsed_file = syn::parse2::<syn::File>(output).unwrap();
+
+ assert_eq!(
+ found_errors.len(),
+ expected_errors.len(),
+ "Macro generated a different number of errors than expected"
+ );
+
+ for (found_error, &expected_error) in found_errors.iter().zip(expected_errors) {
+ let found_error_str = format!("{}", found_error);
+ assert_eq!(found_error_str, expected_error);
+ }
+}
+
+#[test]
+fn check_dup_keywords() {
+ let input = quote! {
+ Keywords {
+ Crate: "crate",
+ Crate: "crate",
+ }
+ Symbols {}
+ };
+ test_symbols_macro(input, &["Symbol `crate` is duplicated", "location of previous definition"]);
+}
+
+#[test]
+fn check_dup_symbol() {
+ let input = quote! {
+ Keywords {}
+ Symbols {
+ splat,
+ splat,
+ }
+ };
+ test_symbols_macro(input, &["Symbol `splat` is duplicated", "location of previous definition"]);
+}
+
+#[test]
+fn check_dup_symbol_and_keyword() {
+ let input = quote! {
+ Keywords {
+ Splat: "splat",
+ }
+ Symbols {
+ splat,
+ }
+ };
+ test_symbols_macro(input, &["Symbol `splat` is duplicated", "location of previous definition"]);
+}
+
+#[test]
+fn check_symbol_order() {
+ let input = quote! {
+ Keywords {}
+ Symbols {
+ zebra,
+ aardvark,
+ }
+ };
+ test_symbols_macro(
+ input,
+ &["Symbol `aardvark` must precede `zebra`", "location of previous symbol `zebra`"],
+ );
+}
diff --git a/compiler/rustc_macros/src/type_foldable.rs b/compiler/rustc_macros/src/type_foldable.rs
new file mode 100644
index 000000000..23e619221
--- /dev/null
+++ b/compiler/rustc_macros/src/type_foldable.rs
@@ -0,0 +1,36 @@
+use quote::quote;
+use syn::parse_quote;
+
+pub fn type_foldable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ if let syn::Data::Union(_) = s.ast().data {
+ panic!("cannot derive on union")
+ }
+
+ if !s.ast().generics.lifetimes().any(|lt| lt.lifetime.ident == "tcx") {
+ s.add_impl_generic(parse_quote! { 'tcx });
+ }
+
+ s.add_bounds(synstructure::AddBounds::Generics);
+ s.bind_with(|_| synstructure::BindStyle::Move);
+ let body_fold = s.each_variant(|vi| {
+ let bindings = vi.bindings();
+ vi.construct(|_, index| {
+ let bind = &bindings[index];
+ quote! {
+ ::rustc_middle::ty::fold::TypeFoldable::try_fold_with(#bind, __folder)?
+ }
+ })
+ });
+
+ s.bound_impl(
+ quote!(::rustc_middle::ty::fold::TypeFoldable<'tcx>),
+ quote! {
+ fn try_fold_with<__F: ::rustc_middle::ty::fold::FallibleTypeFolder<'tcx>>(
+ self,
+ __folder: &mut __F
+ ) -> Result<Self, __F::Error> {
+ Ok(match self { #body_fold })
+ }
+ },
+ )
+}
diff --git a/compiler/rustc_macros/src/type_visitable.rs b/compiler/rustc_macros/src/type_visitable.rs
new file mode 100644
index 000000000..14e6aa6e0
--- /dev/null
+++ b/compiler/rustc_macros/src/type_visitable.rs
@@ -0,0 +1,33 @@
+use quote::quote;
+use syn::parse_quote;
+
+pub fn type_visitable_derive(mut s: synstructure::Structure<'_>) -> proc_macro2::TokenStream {
+ if let syn::Data::Union(_) = s.ast().data {
+ panic!("cannot derive on union")
+ }
+
+ if !s.ast().generics.lifetimes().any(|lt| lt.lifetime.ident == "tcx") {
+ s.add_impl_generic(parse_quote! { 'tcx });
+ }
+
+ s.add_bounds(synstructure::AddBounds::Generics);
+ let body_visit = s.each(|bind| {
+ quote! {
+ ::rustc_middle::ty::visit::TypeVisitable::visit_with(#bind, __visitor)?;
+ }
+ });
+ s.bind_with(|_| synstructure::BindStyle::Move);
+
+ s.bound_impl(
+ quote!(::rustc_middle::ty::visit::TypeVisitable<'tcx>),
+ quote! {
+ fn visit_with<__V: ::rustc_middle::ty::visit::TypeVisitor<'tcx>>(
+ &self,
+ __visitor: &mut __V
+ ) -> ::std::ops::ControlFlow<__V::BreakTy> {
+ match *self { #body_visit }
+ ::std::ops::ControlFlow::CONTINUE
+ }
+ },
+ )
+}
diff --git a/compiler/rustc_metadata/Cargo.toml b/compiler/rustc_metadata/Cargo.toml
new file mode 100644
index 000000000..2c5db9d8b
--- /dev/null
+++ b/compiler/rustc_metadata/Cargo.toml
@@ -0,0 +1,31 @@
+[package]
+name = "rustc_metadata"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+libloading = "0.7.1"
+odht = { version = "0.3.1", features = ["nightly"] }
+snap = "1"
+tracing = "0.1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+tempfile = "3.2"
+rustc_middle = { path = "../rustc_middle" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_hir_pretty = { path = "../rustc_hir_pretty" }
+rustc_target = { path = "../rustc_target" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_expand = { path = "../rustc_expand" }
+rustc_span = { path = "../rustc_span" }
+rustc_session = { path = "../rustc_session" }
+rustc_type_ir = { path = "../rustc_type_ir" }
diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs
new file mode 100644
index 000000000..708d0b1fd
--- /dev/null
+++ b/compiler/rustc_metadata/src/creader.rs
@@ -0,0 +1,1041 @@
+//! Validates all used crates and extern libraries and loads their metadata
+
+use crate::locator::{CrateError, CrateLocator, CratePaths};
+use crate::rmeta::{CrateDep, CrateMetadata, CrateNumMap, CrateRoot, MetadataBlob};
+
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_ast::{self as ast, *};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::Lrc;
+use rustc_expand::base::SyntaxExtension;
+use rustc_hir::def_id::{CrateNum, LocalDefId, StableCrateId, LOCAL_CRATE};
+use rustc_hir::definitions::Definitions;
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{self, CrateType, ExternLocation};
+use rustc_session::cstore::{CrateDepKind, CrateSource, ExternCrate};
+use rustc_session::cstore::{ExternCrateSource, MetadataLoaderDyn};
+use rustc_session::lint;
+use rustc_session::output::validate_crate_name;
+use rustc_session::search_paths::PathKind;
+use rustc_session::Session;
+use rustc_span::edition::Edition;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::spec::{PanicStrategy, TargetTriple};
+
+use proc_macro::bridge::client::ProcMacro;
+use std::ops::Fn;
+use std::path::Path;
+use std::{cmp, env};
+use tracing::{debug, info};
+
+#[derive(Clone)]
+pub struct CStore {
+ metas: IndexVec<CrateNum, Option<Lrc<CrateMetadata>>>,
+ injected_panic_runtime: Option<CrateNum>,
+ /// This crate needs an allocator and either provides it itself, or finds it in a dependency.
+ /// If the above is true, then this field denotes the kind of the found allocator.
+ allocator_kind: Option<AllocatorKind>,
+ /// This crate has a `#[global_allocator]` item.
+ has_global_allocator: bool,
+
+ /// This map is used to verify we get no hash conflicts between
+ /// `StableCrateId` values.
+ pub(crate) stable_crate_ids: FxHashMap<StableCrateId, CrateNum>,
+
+ /// Unused externs of the crate
+ unused_externs: Vec<Symbol>,
+}
+
+impl std::fmt::Debug for CStore {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("CStore").finish_non_exhaustive()
+ }
+}
+
+pub struct CrateLoader<'a> {
+ // Immutable configuration.
+ sess: &'a Session,
+ metadata_loader: Box<MetadataLoaderDyn>,
+ local_crate_name: Symbol,
+ // Mutable output.
+ cstore: CStore,
+ used_extern_options: FxHashSet<Symbol>,
+}
+
+pub enum LoadedMacro {
+ MacroDef(ast::Item, Edition),
+ ProcMacro(SyntaxExtension),
+}
+
+pub(crate) struct Library {
+ pub source: CrateSource,
+ pub metadata: MetadataBlob,
+}
+
+enum LoadResult {
+ Previous(CrateNum),
+ Loaded(Library),
+}
+
+/// A reference to `CrateMetadata` that can also give access to whole crate store when necessary.
+#[derive(Clone, Copy)]
+pub(crate) struct CrateMetadataRef<'a> {
+ pub cdata: &'a CrateMetadata,
+ pub cstore: &'a CStore,
+}
+
+impl std::ops::Deref for CrateMetadataRef<'_> {
+ type Target = CrateMetadata;
+
+ fn deref(&self) -> &Self::Target {
+ self.cdata
+ }
+}
+
+struct CrateDump<'a>(&'a CStore);
+
+impl<'a> std::fmt::Debug for CrateDump<'a> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ writeln!(fmt, "resolved crates:")?;
+ for (cnum, data) in self.0.iter_crate_data() {
+ writeln!(fmt, " name: {}", data.name())?;
+ writeln!(fmt, " cnum: {}", cnum)?;
+ writeln!(fmt, " hash: {}", data.hash())?;
+ writeln!(fmt, " reqd: {:?}", data.dep_kind())?;
+ let CrateSource { dylib, rlib, rmeta } = data.source();
+ if let Some(dylib) = dylib {
+ writeln!(fmt, " dylib: {}", dylib.0.display())?;
+ }
+ if let Some(rlib) = rlib {
+ writeln!(fmt, " rlib: {}", rlib.0.display())?;
+ }
+ if let Some(rmeta) = rmeta {
+ writeln!(fmt, " rmeta: {}", rmeta.0.display())?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl CStore {
+ pub fn from_tcx(tcx: TyCtxt<'_>) -> &CStore {
+ tcx.cstore_untracked()
+ .as_any()
+ .downcast_ref::<CStore>()
+ .expect("`tcx.cstore` is not a `CStore`")
+ }
+
+ fn alloc_new_crate_num(&mut self) -> CrateNum {
+ self.metas.push(None);
+ CrateNum::new(self.metas.len() - 1)
+ }
+
+ pub fn has_crate_data(&self, cnum: CrateNum) -> bool {
+ self.metas[cnum].is_some()
+ }
+
+ pub(crate) fn get_crate_data(&self, cnum: CrateNum) -> CrateMetadataRef<'_> {
+ let cdata = self.metas[cnum]
+ .as_ref()
+ .unwrap_or_else(|| panic!("Failed to get crate data for {:?}", cnum));
+ CrateMetadataRef { cdata, cstore: self }
+ }
+
+ fn set_crate_data(&mut self, cnum: CrateNum, data: CrateMetadata) {
+ assert!(self.metas[cnum].is_none(), "Overwriting crate metadata entry");
+ self.metas[cnum] = Some(Lrc::new(data));
+ }
+
+ pub(crate) fn iter_crate_data(&self) -> impl Iterator<Item = (CrateNum, &CrateMetadata)> {
+ self.metas
+ .iter_enumerated()
+ .filter_map(|(cnum, data)| data.as_ref().map(|data| (cnum, &**data)))
+ }
+
+ fn push_dependencies_in_postorder(&self, deps: &mut Vec<CrateNum>, cnum: CrateNum) {
+ if !deps.contains(&cnum) {
+ let data = self.get_crate_data(cnum);
+ for &dep in data.dependencies().iter() {
+ if dep != cnum {
+ self.push_dependencies_in_postorder(deps, dep);
+ }
+ }
+
+ deps.push(cnum);
+ }
+ }
+
+ pub(crate) fn crate_dependencies_in_postorder(&self, cnum: CrateNum) -> Vec<CrateNum> {
+ let mut deps = Vec::new();
+ if cnum == LOCAL_CRATE {
+ for (cnum, _) in self.iter_crate_data() {
+ self.push_dependencies_in_postorder(&mut deps, cnum);
+ }
+ } else {
+ self.push_dependencies_in_postorder(&mut deps, cnum);
+ }
+ deps
+ }
+
+ fn crate_dependencies_in_reverse_postorder(&self, cnum: CrateNum) -> Vec<CrateNum> {
+ let mut deps = self.crate_dependencies_in_postorder(cnum);
+ deps.reverse();
+ deps
+ }
+
+ pub(crate) fn injected_panic_runtime(&self) -> Option<CrateNum> {
+ self.injected_panic_runtime
+ }
+
+ pub(crate) fn allocator_kind(&self) -> Option<AllocatorKind> {
+ self.allocator_kind
+ }
+
+ pub(crate) fn has_global_allocator(&self) -> bool {
+ self.has_global_allocator
+ }
+
+ pub fn report_unused_deps(&self, tcx: TyCtxt<'_>) {
+ let json_unused_externs = tcx.sess.opts.json_unused_externs;
+
+ // We put the check for the option before the lint_level_at_node call
+ // because the call mutates internal state and introducing it
+ // leads to some ui tests failing.
+ if !json_unused_externs.is_enabled() {
+ return;
+ }
+ let level = tcx
+ .lint_level_at_node(lint::builtin::UNUSED_CRATE_DEPENDENCIES, rustc_hir::CRATE_HIR_ID)
+ .0;
+ if level != lint::Level::Allow {
+ let unused_externs =
+ self.unused_externs.iter().map(|ident| ident.to_ident_string()).collect::<Vec<_>>();
+ let unused_externs = unused_externs.iter().map(String::as_str).collect::<Vec<&str>>();
+ tcx.sess.parse_sess.span_diagnostic.emit_unused_externs(
+ level,
+ json_unused_externs.is_loud(),
+ &unused_externs,
+ );
+ }
+ }
+}
+
+impl<'a> CrateLoader<'a> {
+ pub fn new(
+ sess: &'a Session,
+ metadata_loader: Box<MetadataLoaderDyn>,
+ local_crate_name: &str,
+ ) -> Self {
+ let mut stable_crate_ids = FxHashMap::default();
+ stable_crate_ids.insert(sess.local_stable_crate_id(), LOCAL_CRATE);
+
+ CrateLoader {
+ sess,
+ metadata_loader,
+ local_crate_name: Symbol::intern(local_crate_name),
+ cstore: CStore {
+ // We add an empty entry for LOCAL_CRATE (which maps to zero) in
+ // order to make array indices in `metas` match with the
+ // corresponding `CrateNum`. This first entry will always remain
+ // `None`.
+ metas: IndexVec::from_elem_n(None, 1),
+ injected_panic_runtime: None,
+ allocator_kind: None,
+ has_global_allocator: false,
+ stable_crate_ids,
+ unused_externs: Vec::new(),
+ },
+ used_extern_options: Default::default(),
+ }
+ }
+
+ pub fn cstore(&self) -> &CStore {
+ &self.cstore
+ }
+
+ pub fn into_cstore(self) -> CStore {
+ self.cstore
+ }
+
+ fn existing_match(&self, name: Symbol, hash: Option<Svh>, kind: PathKind) -> Option<CrateNum> {
+ for (cnum, data) in self.cstore.iter_crate_data() {
+ if data.name() != name {
+ tracing::trace!("{} did not match {}", data.name(), name);
+ continue;
+ }
+
+ match hash {
+ Some(hash) if hash == data.hash() => return Some(cnum),
+ Some(hash) => {
+ debug!("actual hash {} did not match expected {}", hash, data.hash());
+ continue;
+ }
+ None => {}
+ }
+
+ // When the hash is None we're dealing with a top-level dependency
+ // in which case we may have a specification on the command line for
+ // this library. Even though an upstream library may have loaded
+ // something of the same name, we have to make sure it was loaded
+ // from the exact same location as well.
+ //
+ // We're also sure to compare *paths*, not actual byte slices. The
+ // `source` stores paths which are normalized which may be different
+ // from the strings on the command line.
+ let source = self.cstore.get_crate_data(cnum).cdata.source();
+ if let Some(entry) = self.sess.opts.externs.get(name.as_str()) {
+ // Only use `--extern crate_name=path` here, not `--extern crate_name`.
+ if let Some(mut files) = entry.files() {
+ if files.any(|l| {
+ let l = l.canonicalized();
+ source.dylib.as_ref().map(|(p, _)| p) == Some(l)
+ || source.rlib.as_ref().map(|(p, _)| p) == Some(l)
+ || source.rmeta.as_ref().map(|(p, _)| p) == Some(l)
+ }) {
+ return Some(cnum);
+ }
+ }
+ continue;
+ }
+
+ // Alright, so we've gotten this far which means that `data` has the
+ // right name, we don't have a hash, and we don't have a --extern
+ // pointing for ourselves. We're still not quite yet done because we
+ // have to make sure that this crate was found in the crate lookup
+ // path (this is a top-level dependency) as we don't want to
+ // implicitly load anything inside the dependency lookup path.
+ let prev_kind = source
+ .dylib
+ .as_ref()
+ .or(source.rlib.as_ref())
+ .or(source.rmeta.as_ref())
+ .expect("No sources for crate")
+ .1;
+ if kind.matches(prev_kind) {
+ return Some(cnum);
+ } else {
+ debug!(
+ "failed to load existing crate {}; kind {:?} did not match prev_kind {:?}",
+ name, kind, prev_kind
+ );
+ }
+ }
+
+ None
+ }
+
+ fn verify_no_symbol_conflicts(&self, root: &CrateRoot) -> Result<(), CrateError> {
+ // Check for (potential) conflicts with the local crate
+ if self.sess.local_stable_crate_id() == root.stable_crate_id() {
+ return Err(CrateError::SymbolConflictsCurrent(root.name()));
+ }
+
+ // Check for conflicts with any crate loaded so far
+ for (_, other) in self.cstore.iter_crate_data() {
+ // Same stable crate id but different SVH
+ if other.stable_crate_id() == root.stable_crate_id() && other.hash() != root.hash() {
+ return Err(CrateError::SymbolConflictsOthers(root.name()));
+ }
+ }
+
+ Ok(())
+ }
+
+ fn verify_no_stable_crate_id_hash_conflicts(
+ &mut self,
+ root: &CrateRoot,
+ cnum: CrateNum,
+ ) -> Result<(), CrateError> {
+ if let Some(existing) = self.cstore.stable_crate_ids.insert(root.stable_crate_id(), cnum) {
+ let crate_name0 = root.name();
+ let crate_name1 = self.cstore.get_crate_data(existing).name();
+ return Err(CrateError::StableCrateIdCollision(crate_name0, crate_name1));
+ }
+
+ Ok(())
+ }
+
+ fn register_crate(
+ &mut self,
+ host_lib: Option<Library>,
+ root: Option<&CratePaths>,
+ lib: Library,
+ dep_kind: CrateDepKind,
+ name: Symbol,
+ ) -> Result<CrateNum, CrateError> {
+ let _prof_timer = self.sess.prof.generic_activity("metadata_register_crate");
+
+ let Library { source, metadata } = lib;
+ let crate_root = metadata.get_root();
+ let host_hash = host_lib.as_ref().map(|lib| lib.metadata.get_root().hash());
+
+ let private_dep =
+ self.sess.opts.externs.get(name.as_str()).map_or(false, |e| e.is_private_dep);
+
+ // Claim this crate number and cache it
+ let cnum = self.cstore.alloc_new_crate_num();
+
+ info!(
+ "register crate `{}` (cnum = {}. private_dep = {})",
+ crate_root.name(),
+ cnum,
+ private_dep
+ );
+
+ // Maintain a reference to the top most crate.
+ // Stash paths for top-most crate locally if necessary.
+ let crate_paths;
+ let root = if let Some(root) = root {
+ root
+ } else {
+ crate_paths = CratePaths::new(crate_root.name(), source.clone());
+ &crate_paths
+ };
+
+ let cnum_map = self.resolve_crate_deps(root, &crate_root, &metadata, cnum, dep_kind)?;
+
+ let raw_proc_macros = if crate_root.is_proc_macro_crate() {
+ let temp_root;
+ let (dlsym_source, dlsym_root) = match &host_lib {
+ Some(host_lib) => (&host_lib.source, {
+ temp_root = host_lib.metadata.get_root();
+ &temp_root
+ }),
+ None => (&source, &crate_root),
+ };
+ let dlsym_dylib = dlsym_source.dylib.as_ref().expect("no dylib for a proc-macro crate");
+ Some(self.dlsym_proc_macros(&dlsym_dylib.0, dlsym_root.stable_crate_id())?)
+ } else {
+ None
+ };
+
+ // Perform some verification *after* resolve_crate_deps() above is
+ // known to have been successful. It seems that - in error cases - the
+ // cstore can be in a temporarily invalid state between cnum allocation
+ // and dependency resolution and the verification code would produce
+ // ICEs in that case (see #83045).
+ self.verify_no_symbol_conflicts(&crate_root)?;
+ self.verify_no_stable_crate_id_hash_conflicts(&crate_root, cnum)?;
+
+ let crate_metadata = CrateMetadata::new(
+ self.sess,
+ &self.cstore,
+ metadata,
+ crate_root,
+ raw_proc_macros,
+ cnum,
+ cnum_map,
+ dep_kind,
+ source,
+ private_dep,
+ host_hash,
+ );
+
+ self.cstore.set_crate_data(cnum, crate_metadata);
+
+ Ok(cnum)
+ }
+
+ fn load_proc_macro<'b>(
+ &self,
+ locator: &mut CrateLocator<'b>,
+ path_kind: PathKind,
+ host_hash: Option<Svh>,
+ ) -> Result<Option<(LoadResult, Option<Library>)>, CrateError>
+ where
+ 'a: 'b,
+ {
+ // Use a new crate locator so trying to load a proc macro doesn't affect the error
+ // message we emit
+ let mut proc_macro_locator = locator.clone();
+
+ // Try to load a proc macro
+ proc_macro_locator.is_proc_macro = true;
+
+ // Load the proc macro crate for the target
+ let (locator, target_result) = if self.sess.opts.unstable_opts.dual_proc_macros {
+ proc_macro_locator.reset();
+ let result = match self.load(&mut proc_macro_locator)? {
+ Some(LoadResult::Previous(cnum)) => {
+ return Ok(Some((LoadResult::Previous(cnum), None)));
+ }
+ Some(LoadResult::Loaded(library)) => Some(LoadResult::Loaded(library)),
+ None => return Ok(None),
+ };
+ locator.hash = host_hash;
+ // Use the locator when looking for the host proc macro crate, as that is required
+ // so we want it to affect the error message
+ (locator, result)
+ } else {
+ (&mut proc_macro_locator, None)
+ };
+
+ // Load the proc macro crate for the host
+
+ locator.reset();
+ locator.is_proc_macro = true;
+ locator.target = &self.sess.host;
+ locator.triple = TargetTriple::from_triple(config::host_triple());
+ locator.filesearch = self.sess.host_filesearch(path_kind);
+
+ let Some(host_result) = self.load(locator)? else {
+ return Ok(None);
+ };
+
+ Ok(Some(if self.sess.opts.unstable_opts.dual_proc_macros {
+ let host_result = match host_result {
+ LoadResult::Previous(..) => {
+ panic!("host and target proc macros must be loaded in lock-step")
+ }
+ LoadResult::Loaded(library) => library,
+ };
+ (target_result.unwrap(), Some(host_result))
+ } else {
+ (host_result, None)
+ }))
+ }
+
+ fn resolve_crate<'b>(
+ &'b mut self,
+ name: Symbol,
+ span: Span,
+ dep_kind: CrateDepKind,
+ ) -> Option<CrateNum> {
+ self.used_extern_options.insert(name);
+ match self.maybe_resolve_crate(name, dep_kind, None) {
+ Ok(cnum) => Some(cnum),
+ Err(err) => {
+ let missing_core =
+ self.maybe_resolve_crate(sym::core, CrateDepKind::Explicit, None).is_err();
+ err.report(&self.sess, span, missing_core);
+ None
+ }
+ }
+ }
+
+ fn maybe_resolve_crate<'b>(
+ &'b mut self,
+ name: Symbol,
+ mut dep_kind: CrateDepKind,
+ dep: Option<(&'b CratePaths, &'b CrateDep)>,
+ ) -> Result<CrateNum, CrateError> {
+ info!("resolving crate `{}`", name);
+ if !name.as_str().is_ascii() {
+ return Err(CrateError::NonAsciiName(name));
+ }
+ let (root, hash, host_hash, extra_filename, path_kind) = match dep {
+ Some((root, dep)) => (
+ Some(root),
+ Some(dep.hash),
+ dep.host_hash,
+ Some(&dep.extra_filename[..]),
+ PathKind::Dependency,
+ ),
+ None => (None, None, None, None, PathKind::Crate),
+ };
+ let result = if let Some(cnum) = self.existing_match(name, hash, path_kind) {
+ (LoadResult::Previous(cnum), None)
+ } else {
+ info!("falling back to a load");
+ let mut locator = CrateLocator::new(
+ self.sess,
+ &*self.metadata_loader,
+ name,
+ hash,
+ extra_filename,
+ false, // is_host
+ path_kind,
+ );
+
+ match self.load(&mut locator)? {
+ Some(res) => (res, None),
+ None => {
+ dep_kind = CrateDepKind::MacrosOnly;
+ match self.load_proc_macro(&mut locator, path_kind, host_hash)? {
+ Some(res) => res,
+ None => return Err(locator.into_error(root.cloned())),
+ }
+ }
+ }
+ };
+
+ match result {
+ (LoadResult::Previous(cnum), None) => {
+ let data = self.cstore.get_crate_data(cnum);
+ if data.is_proc_macro_crate() {
+ dep_kind = CrateDepKind::MacrosOnly;
+ }
+ data.update_dep_kind(|data_dep_kind| cmp::max(data_dep_kind, dep_kind));
+ Ok(cnum)
+ }
+ (LoadResult::Loaded(library), host_library) => {
+ self.register_crate(host_library, root, library, dep_kind, name)
+ }
+ _ => panic!(),
+ }
+ }
+
+ fn load(&self, locator: &mut CrateLocator<'_>) -> Result<Option<LoadResult>, CrateError> {
+ let Some(library) = locator.maybe_load_library_crate()? else {
+ return Ok(None);
+ };
+
+ // In the case that we're loading a crate, but not matching
+ // against a hash, we could load a crate which has the same hash
+ // as an already loaded crate. If this is the case prevent
+ // duplicates by just using the first crate.
+ //
+ // Note that we only do this for target triple crates, though, as we
+ // don't want to match a host crate against an equivalent target one
+ // already loaded.
+ let root = library.metadata.get_root();
+ // FIXME: why is this condition necessary? It was adding in #33625 but I
+ // don't know why and the original author doesn't remember ...
+ let can_reuse_cratenum =
+ locator.triple == self.sess.opts.target_triple || locator.is_proc_macro;
+ Ok(Some(if can_reuse_cratenum {
+ let mut result = LoadResult::Loaded(library);
+ for (cnum, data) in self.cstore.iter_crate_data() {
+ if data.name() == root.name() && root.hash() == data.hash() {
+ assert!(locator.hash.is_none());
+ info!("load success, going to previous cnum: {}", cnum);
+ result = LoadResult::Previous(cnum);
+ break;
+ }
+ }
+ result
+ } else {
+ LoadResult::Loaded(library)
+ }))
+ }
+
+ fn update_extern_crate(&self, cnum: CrateNum, extern_crate: ExternCrate) {
+ let cmeta = self.cstore.get_crate_data(cnum);
+ if cmeta.update_extern_crate(extern_crate) {
+ // Propagate the extern crate info to dependencies if it was updated.
+ let extern_crate = ExternCrate { dependency_of: cnum, ..extern_crate };
+ for &dep_cnum in cmeta.dependencies().iter() {
+ self.update_extern_crate(dep_cnum, extern_crate);
+ }
+ }
+ }
+
+ // Go through the crate metadata and load any crates that it references
+ fn resolve_crate_deps(
+ &mut self,
+ root: &CratePaths,
+ crate_root: &CrateRoot,
+ metadata: &MetadataBlob,
+ krate: CrateNum,
+ dep_kind: CrateDepKind,
+ ) -> Result<CrateNumMap, CrateError> {
+ debug!("resolving deps of external crate");
+ if crate_root.is_proc_macro_crate() {
+ return Ok(CrateNumMap::new());
+ }
+
+ // The map from crate numbers in the crate we're resolving to local crate numbers.
+ // We map 0 and all other holes in the map to our parent crate. The "additional"
+ // self-dependencies should be harmless.
+ let deps = crate_root.decode_crate_deps(metadata);
+ let mut crate_num_map = CrateNumMap::with_capacity(1 + deps.len());
+ crate_num_map.push(krate);
+ for dep in deps {
+ info!(
+ "resolving dep crate {} hash: `{}` extra filename: `{}`",
+ dep.name, dep.hash, dep.extra_filename
+ );
+ let dep_kind = match dep_kind {
+ CrateDepKind::MacrosOnly => CrateDepKind::MacrosOnly,
+ _ => dep.kind,
+ };
+ let cnum = self.maybe_resolve_crate(dep.name, dep_kind, Some((root, &dep)))?;
+ crate_num_map.push(cnum);
+ }
+
+ debug!("resolve_crate_deps: cnum_map for {:?} is {:?}", krate, crate_num_map);
+ Ok(crate_num_map)
+ }
+
+ fn dlsym_proc_macros(
+ &self,
+ path: &Path,
+ stable_crate_id: StableCrateId,
+ ) -> Result<&'static [ProcMacro], CrateError> {
+ // Make sure the path contains a / or the linker will search for it.
+ let path = env::current_dir().unwrap().join(path);
+ let lib = unsafe { libloading::Library::new(path) }
+ .map_err(|err| CrateError::DlOpen(err.to_string()))?;
+
+ let sym_name = self.sess.generate_proc_macro_decls_symbol(stable_crate_id);
+ let sym = unsafe { lib.get::<*const &[ProcMacro]>(sym_name.as_bytes()) }
+ .map_err(|err| CrateError::DlSym(err.to_string()))?;
+
+ // Intentionally leak the dynamic library. We can't ever unload it
+ // since the library can make things that will live arbitrarily long.
+ let sym = unsafe { sym.into_raw() };
+ std::mem::forget(lib);
+
+ Ok(unsafe { **sym })
+ }
+
+ fn inject_panic_runtime(&mut self, krate: &ast::Crate) {
+ // If we're only compiling an rlib, then there's no need to select a
+ // panic runtime, so we just skip this section entirely.
+ let any_non_rlib = self.sess.crate_types().iter().any(|ct| *ct != CrateType::Rlib);
+ if !any_non_rlib {
+ info!("panic runtime injection skipped, only generating rlib");
+ return;
+ }
+
+ // If we need a panic runtime, we try to find an existing one here. At
+ // the same time we perform some general validation of the DAG we've got
+ // going such as ensuring everything has a compatible panic strategy.
+ //
+ // The logic for finding the panic runtime here is pretty much the same
+ // as the allocator case with the only addition that the panic strategy
+ // compilation mode also comes into play.
+ let desired_strategy = self.sess.panic_strategy();
+ let mut runtime_found = false;
+ let mut needs_panic_runtime =
+ self.sess.contains_name(&krate.attrs, sym::needs_panic_runtime);
+
+ for (cnum, data) in self.cstore.iter_crate_data() {
+ needs_panic_runtime = needs_panic_runtime || data.needs_panic_runtime();
+ if data.is_panic_runtime() {
+ // Inject a dependency from all #![needs_panic_runtime] to this
+ // #![panic_runtime] crate.
+ self.inject_dependency_if(cnum, "a panic runtime", &|data| {
+ data.needs_panic_runtime()
+ });
+ runtime_found = runtime_found || data.dep_kind() == CrateDepKind::Explicit;
+ }
+ }
+
+ // If an explicitly linked and matching panic runtime was found, or if
+ // we just don't need one at all, then we're done here and there's
+ // nothing else to do.
+ if !needs_panic_runtime || runtime_found {
+ return;
+ }
+
+ // By this point we know that we (a) need a panic runtime and (b) no
+ // panic runtime was explicitly linked. Here we just load an appropriate
+ // default runtime for our panic strategy and then inject the
+ // dependencies.
+ //
+ // We may resolve to an already loaded crate (as the crate may not have
+ // been explicitly linked prior to this) and we may re-inject
+ // dependencies again, but both of those situations are fine.
+ //
+ // Also note that we have yet to perform validation of the crate graph
+ // in terms of everyone has a compatible panic runtime format, that's
+ // performed later as part of the `dependency_format` module.
+ let name = match desired_strategy {
+ PanicStrategy::Unwind => sym::panic_unwind,
+ PanicStrategy::Abort => sym::panic_abort,
+ };
+ info!("panic runtime not found -- loading {}", name);
+
+ let Some(cnum) = self.resolve_crate(name, DUMMY_SP, CrateDepKind::Implicit) else { return; };
+ let data = self.cstore.get_crate_data(cnum);
+
+ // Sanity check the loaded crate to ensure it is indeed a panic runtime
+ // and the panic strategy is indeed what we thought it was.
+ if !data.is_panic_runtime() {
+ self.sess.err(&format!("the crate `{}` is not a panic runtime", name));
+ }
+ if data.required_panic_strategy() != Some(desired_strategy) {
+ self.sess.err(&format!(
+ "the crate `{}` does not have the panic \
+ strategy `{}`",
+ name,
+ desired_strategy.desc()
+ ));
+ }
+
+ self.cstore.injected_panic_runtime = Some(cnum);
+ self.inject_dependency_if(cnum, "a panic runtime", &|data| data.needs_panic_runtime());
+ }
+
+ fn inject_profiler_runtime(&mut self, krate: &ast::Crate) {
+ if self.sess.opts.unstable_opts.no_profiler_runtime
+ || !(self.sess.instrument_coverage()
+ || self.sess.opts.unstable_opts.profile
+ || self.sess.opts.cg.profile_generate.enabled())
+ {
+ return;
+ }
+
+ info!("loading profiler");
+
+ let name = Symbol::intern(&self.sess.opts.unstable_opts.profiler_runtime);
+ if name == sym::profiler_builtins && self.sess.contains_name(&krate.attrs, sym::no_core) {
+ self.sess.err(
+ "`profiler_builtins` crate (required by compiler options) \
+ is not compatible with crate attribute `#![no_core]`",
+ );
+ }
+
+ let Some(cnum) = self.resolve_crate(name, DUMMY_SP, CrateDepKind::Implicit) else { return; };
+ let data = self.cstore.get_crate_data(cnum);
+
+ // Sanity check the loaded crate to ensure it is indeed a profiler runtime
+ if !data.is_profiler_runtime() {
+ self.sess.err(&format!("the crate `{}` is not a profiler runtime", name));
+ }
+ }
+
+ fn inject_allocator_crate(&mut self, krate: &ast::Crate) {
+ self.cstore.has_global_allocator = match &*global_allocator_spans(&self.sess, krate) {
+ [span1, span2, ..] => {
+ self.sess
+ .struct_span_err(*span2, "cannot define multiple global allocators")
+ .span_label(*span2, "cannot define a new global allocator")
+ .span_label(*span1, "previous global allocator defined here")
+ .emit();
+ true
+ }
+ spans => !spans.is_empty(),
+ };
+
+ // Check to see if we actually need an allocator. This desire comes
+ // about through the `#![needs_allocator]` attribute and is typically
+ // written down in liballoc.
+ if !self.sess.contains_name(&krate.attrs, sym::needs_allocator)
+ && !self.cstore.iter_crate_data().any(|(_, data)| data.needs_allocator())
+ {
+ return;
+ }
+
+ // At this point we've determined that we need an allocator. Let's see
+ // if our compilation session actually needs an allocator based on what
+ // we're emitting.
+ let all_rlib = self.sess.crate_types().iter().all(|ct| matches!(*ct, CrateType::Rlib));
+ if all_rlib {
+ return;
+ }
+
+ // Ok, we need an allocator. Not only that but we're actually going to
+ // create an artifact that needs one linked in. Let's go find the one
+ // that we're going to link in.
+ //
+ // First up we check for global allocators. Look at the crate graph here
+ // and see what's a global allocator, including if we ourselves are a
+ // global allocator.
+ let mut global_allocator =
+ self.cstore.has_global_allocator.then(|| Symbol::intern("this crate"));
+ for (_, data) in self.cstore.iter_crate_data() {
+ if data.has_global_allocator() {
+ match global_allocator {
+ Some(other_crate) => {
+ self.sess.err(&format!(
+ "the `#[global_allocator]` in {} conflicts with global allocator in: {}",
+ other_crate,
+ data.name()
+ ));
+ }
+ None => global_allocator = Some(data.name()),
+ }
+ }
+ }
+
+ if global_allocator.is_some() {
+ self.cstore.allocator_kind = Some(AllocatorKind::Global);
+ return;
+ }
+
+ // Ok we haven't found a global allocator but we still need an
+ // allocator. At this point our allocator request is typically fulfilled
+ // by the standard library, denoted by the `#![default_lib_allocator]`
+ // attribute.
+ if !self.sess.contains_name(&krate.attrs, sym::default_lib_allocator)
+ && !self.cstore.iter_crate_data().any(|(_, data)| data.has_default_lib_allocator())
+ {
+ self.sess.err(
+ "no global memory allocator found but one is required; link to std or add \
+ `#[global_allocator]` to a static item that implements the GlobalAlloc trait",
+ );
+ }
+ self.cstore.allocator_kind = Some(AllocatorKind::Default);
+ }
+
+ fn inject_dependency_if(
+ &self,
+ krate: CrateNum,
+ what: &str,
+ needs_dep: &dyn Fn(&CrateMetadata) -> bool,
+ ) {
+ // don't perform this validation if the session has errors, as one of
+ // those errors may indicate a circular dependency which could cause
+ // this to stack overflow.
+ if self.sess.has_errors().is_some() {
+ return;
+ }
+
+ // Before we inject any dependencies, make sure we don't inject a
+ // circular dependency by validating that this crate doesn't
+ // transitively depend on any crates satisfying `needs_dep`.
+ for dep in self.cstore.crate_dependencies_in_reverse_postorder(krate) {
+ let data = self.cstore.get_crate_data(dep);
+ if needs_dep(&data) {
+ self.sess.err(&format!(
+ "the crate `{}` cannot depend \
+ on a crate that needs {}, but \
+ it depends on `{}`",
+ self.cstore.get_crate_data(krate).name(),
+ what,
+ data.name()
+ ));
+ }
+ }
+
+ // All crates satisfying `needs_dep` do not explicitly depend on the
+ // crate provided for this compile, but in order for this compilation to
+ // be successfully linked we need to inject a dependency (to order the
+ // crates on the command line correctly).
+ for (cnum, data) in self.cstore.iter_crate_data() {
+ if needs_dep(data) {
+ info!("injecting a dep from {} to {}", cnum, krate);
+ data.add_dependency(krate);
+ }
+ }
+ }
+
+ fn report_unused_deps(&mut self, krate: &ast::Crate) {
+ // Make a point span rather than covering the whole file
+ let span = krate.spans.inner_span.shrink_to_lo();
+ // Complain about anything left over
+ for (name, entry) in self.sess.opts.externs.iter() {
+ if let ExternLocation::FoundInLibrarySearchDirectories = entry.location {
+ // Don't worry about pathless `--extern foo` sysroot references
+ continue;
+ }
+ if entry.nounused_dep {
+ // We're not worried about this one
+ continue;
+ }
+ let name_interned = Symbol::intern(name);
+ if self.used_extern_options.contains(&name_interned) {
+ continue;
+ }
+
+ // Got a real unused --extern
+ if self.sess.opts.json_unused_externs.is_enabled() {
+ self.cstore.unused_externs.push(name_interned);
+ continue;
+ }
+
+ self.sess.parse_sess.buffer_lint(
+ lint::builtin::UNUSED_CRATE_DEPENDENCIES,
+ span,
+ ast::CRATE_NODE_ID,
+ &format!(
+ "external crate `{}` unused in `{}`: remove the dependency or add `use {} as _;`",
+ name,
+ self.local_crate_name,
+ name),
+ );
+ }
+ }
+
+ pub fn postprocess(&mut self, krate: &ast::Crate) {
+ self.inject_profiler_runtime(krate);
+ self.inject_allocator_crate(krate);
+ self.inject_panic_runtime(krate);
+
+ self.report_unused_deps(krate);
+
+ info!("{:?}", CrateDump(&self.cstore));
+ }
+
+ pub fn process_extern_crate(
+ &mut self,
+ item: &ast::Item,
+ definitions: &Definitions,
+ def_id: LocalDefId,
+ ) -> Option<CrateNum> {
+ match item.kind {
+ ast::ItemKind::ExternCrate(orig_name) => {
+ debug!(
+ "resolving extern crate stmt. ident: {} orig_name: {:?}",
+ item.ident, orig_name
+ );
+ let name = match orig_name {
+ Some(orig_name) => {
+ validate_crate_name(self.sess, orig_name.as_str(), Some(item.span));
+ orig_name
+ }
+ None => item.ident.name,
+ };
+ let dep_kind = if self.sess.contains_name(&item.attrs, sym::no_link) {
+ CrateDepKind::MacrosOnly
+ } else {
+ CrateDepKind::Explicit
+ };
+
+ let cnum = self.resolve_crate(name, item.span, dep_kind)?;
+
+ let path_len = definitions.def_path(def_id).data.len();
+ self.update_extern_crate(
+ cnum,
+ ExternCrate {
+ src: ExternCrateSource::Extern(def_id.to_def_id()),
+ span: item.span,
+ path_len,
+ dependency_of: LOCAL_CRATE,
+ },
+ );
+ Some(cnum)
+ }
+ _ => bug!(),
+ }
+ }
+
+ pub fn process_path_extern(&mut self, name: Symbol, span: Span) -> Option<CrateNum> {
+ let cnum = self.resolve_crate(name, span, CrateDepKind::Explicit)?;
+
+ self.update_extern_crate(
+ cnum,
+ ExternCrate {
+ src: ExternCrateSource::Path,
+ span,
+ // to have the least priority in `update_extern_crate`
+ path_len: usize::MAX,
+ dependency_of: LOCAL_CRATE,
+ },
+ );
+
+ Some(cnum)
+ }
+
+ pub fn maybe_process_path_extern(&mut self, name: Symbol) -> Option<CrateNum> {
+ self.maybe_resolve_crate(name, CrateDepKind::Explicit, None).ok()
+ }
+}
+
+fn global_allocator_spans(sess: &Session, krate: &ast::Crate) -> Vec<Span> {
+ struct Finder<'a> {
+ sess: &'a Session,
+ name: Symbol,
+ spans: Vec<Span>,
+ }
+ impl<'ast, 'a> visit::Visitor<'ast> for Finder<'a> {
+ fn visit_item(&mut self, item: &'ast ast::Item) {
+ if item.ident.name == self.name
+ && self.sess.contains_name(&item.attrs, sym::rustc_std_internal_symbol)
+ {
+ self.spans.push(item.span);
+ }
+ visit::walk_item(self, item)
+ }
+ }
+
+ let name = Symbol::intern(&AllocatorKind::Global.fn_name(sym::alloc));
+ let mut f = Finder { sess, name, spans: Vec::new() };
+ visit::walk_crate(&mut f, krate);
+ f.spans
+}
diff --git a/compiler/rustc_metadata/src/dependency_format.rs b/compiler/rustc_metadata/src/dependency_format.rs
new file mode 100644
index 000000000..b765c34f8
--- /dev/null
+++ b/compiler/rustc_metadata/src/dependency_format.rs
@@ -0,0 +1,435 @@
+//! Resolution of mixing rlibs and dylibs
+//!
+//! When producing a final artifact, such as a dynamic library, the compiler has
+//! a choice between linking an rlib or linking a dylib of all upstream
+//! dependencies. The linking phase must guarantee, however, that a library only
+//! show up once in the object file. For example, it is illegal for library A to
+//! be statically linked to B and C in separate dylibs, and then link B and C
+//! into a crate D (because library A appears twice).
+//!
+//! The job of this module is to calculate what format each upstream crate
+//! should be used when linking each output type requested in this session. This
+//! generally follows this set of rules:
+//!
+//! 1. Each library must appear exactly once in the output.
+//! 2. Each rlib contains only one library (it's just an object file)
+//! 3. Each dylib can contain more than one library (due to static linking),
+//! and can also bring in many dynamic dependencies.
+//!
+//! With these constraints in mind, it's generally a very difficult problem to
+//! find a solution that's not "all rlibs" or "all dylibs". I have suspicions
+//! that NP-ness may come into the picture here...
+//!
+//! The current selection algorithm below looks mostly similar to:
+//!
+//! 1. If static linking is required, then require all upstream dependencies
+//! to be available as rlibs. If not, generate an error.
+//! 2. If static linking is requested (generating an executable), then
+//! attempt to use all upstream dependencies as rlibs. If any are not
+//! found, bail out and continue to step 3.
+//! 3. Static linking has failed, at least one library must be dynamically
+//! linked. Apply a heuristic by greedily maximizing the number of
+//! dynamically linked libraries.
+//! 4. Each upstream dependency available as a dynamic library is
+//! registered. The dependencies all propagate, adding to a map. It is
+//! possible for a dylib to add a static library as a dependency, but it
+//! is illegal for two dylibs to add the same static library as a
+//! dependency. The same dylib can be added twice. Additionally, it is
+//! illegal to add a static dependency when it was previously found as a
+//! dylib (and vice versa)
+//! 5. After all dynamic dependencies have been traversed, re-traverse the
+//! remaining dependencies and add them statically (if they haven't been
+//! added already).
+//!
+//! While not perfect, this algorithm should help support use-cases such as leaf
+//! dependencies being static while the larger tree of inner dependencies are
+//! all dynamic. This isn't currently very well battle tested, so it will likely
+//! fall short in some use cases.
+//!
+//! Currently, there is no way to specify the preference of linkage with a
+//! particular library (other than a global dynamic/static switch).
+//! Additionally, the algorithm is geared towards finding *any* solution rather
+//! than finding a number of solutions (there are normally quite a few).
+
+use crate::creader::CStore;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::CrateNum;
+use rustc_middle::middle::dependency_format::{Dependencies, DependencyList, Linkage};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::CrateType;
+use rustc_session::cstore::CrateDepKind;
+use rustc_session::cstore::LinkagePreference::{self, RequireDynamic, RequireStatic};
+
+pub(crate) fn calculate(tcx: TyCtxt<'_>) -> Dependencies {
+ tcx.sess
+ .crate_types()
+ .iter()
+ .map(|&ty| {
+ let linkage = calculate_type(tcx, ty);
+ verify_ok(tcx, &linkage);
+ (ty, linkage)
+ })
+ .collect::<Vec<_>>()
+}
+
+fn calculate_type(tcx: TyCtxt<'_>, ty: CrateType) -> DependencyList {
+ let sess = &tcx.sess;
+
+ if !sess.opts.output_types.should_codegen() {
+ return Vec::new();
+ }
+
+ let preferred_linkage = match ty {
+ // Generating a dylib without `-C prefer-dynamic` means that we're going
+ // to try to eagerly statically link all dependencies. This is normally
+ // done for end-product dylibs, not intermediate products.
+ //
+ // Treat cdylibs similarly. If `-C prefer-dynamic` is set, the caller may
+ // be code-size conscious, but without it, it makes sense to statically
+ // link a cdylib.
+ CrateType::Dylib | CrateType::Cdylib if !sess.opts.cg.prefer_dynamic => Linkage::Static,
+ CrateType::Dylib | CrateType::Cdylib => Linkage::Dynamic,
+
+ // If the global prefer_dynamic switch is turned off, or the final
+ // executable will be statically linked, prefer static crate linkage.
+ CrateType::Executable if !sess.opts.cg.prefer_dynamic || sess.crt_static(Some(ty)) => {
+ Linkage::Static
+ }
+ CrateType::Executable => Linkage::Dynamic,
+
+ // proc-macro crates are mostly cdylibs, but we also need metadata.
+ CrateType::ProcMacro => Linkage::Static,
+
+ // No linkage happens with rlibs, we just needed the metadata (which we
+ // got long ago), so don't bother with anything.
+ CrateType::Rlib => Linkage::NotLinked,
+
+ // staticlibs must have all static dependencies.
+ CrateType::Staticlib => Linkage::Static,
+ };
+
+ if preferred_linkage == Linkage::NotLinked {
+ // If the crate is not linked, there are no link-time dependencies.
+ return Vec::new();
+ }
+
+ if preferred_linkage == Linkage::Static {
+ // Attempt static linkage first. For dylibs and executables, we may be
+ // able to retry below with dynamic linkage.
+ if let Some(v) = attempt_static(tcx) {
+ return v;
+ }
+
+ // Staticlibs and static executables must have all static dependencies.
+ // If any are not found, generate some nice pretty errors.
+ if ty == CrateType::Staticlib
+ || (ty == CrateType::Executable
+ && sess.crt_static(Some(ty))
+ && !sess.target.crt_static_allows_dylibs)
+ {
+ for &cnum in tcx.crates(()).iter() {
+ if tcx.dep_kind(cnum).macros_only() {
+ continue;
+ }
+ let src = tcx.used_crate_source(cnum);
+ if src.rlib.is_some() {
+ continue;
+ }
+ sess.err(&format!(
+ "crate `{}` required to be available in rlib format, \
+ but was not found in this form",
+ tcx.crate_name(cnum)
+ ));
+ }
+ return Vec::new();
+ }
+ }
+
+ let mut formats = FxHashMap::default();
+
+ // Sweep all crates for found dylibs. Add all dylibs, as well as their
+ // dependencies, ensuring there are no conflicts. The only valid case for a
+ // dependency to be relied upon twice is for both cases to rely on a dylib.
+ for &cnum in tcx.crates(()).iter() {
+ if tcx.dep_kind(cnum).macros_only() {
+ continue;
+ }
+ let name = tcx.crate_name(cnum);
+ let src = tcx.used_crate_source(cnum);
+ if src.dylib.is_some() {
+ tracing::info!("adding dylib: {}", name);
+ add_library(tcx, cnum, RequireDynamic, &mut formats);
+ let deps = tcx.dylib_dependency_formats(cnum);
+ for &(depnum, style) in deps.iter() {
+ tracing::info!("adding {:?}: {}", style, tcx.crate_name(depnum));
+ add_library(tcx, depnum, style, &mut formats);
+ }
+ }
+ }
+
+ // Collect what we've got so far in the return vector.
+ let last_crate = tcx.crates(()).len();
+ let mut ret = (1..last_crate + 1)
+ .map(|cnum| match formats.get(&CrateNum::new(cnum)) {
+ Some(&RequireDynamic) => Linkage::Dynamic,
+ Some(&RequireStatic) => Linkage::IncludedFromDylib,
+ None => Linkage::NotLinked,
+ })
+ .collect::<Vec<_>>();
+
+ // Run through the dependency list again, and add any missing libraries as
+ // static libraries.
+ //
+ // If the crate hasn't been included yet and it's not actually required
+ // (e.g., it's an allocator) then we skip it here as well.
+ for &cnum in tcx.crates(()).iter() {
+ let src = tcx.used_crate_source(cnum);
+ if src.dylib.is_none()
+ && !formats.contains_key(&cnum)
+ && tcx.dep_kind(cnum) == CrateDepKind::Explicit
+ {
+ assert!(src.rlib.is_some() || src.rmeta.is_some());
+ tracing::info!("adding staticlib: {}", tcx.crate_name(cnum));
+ add_library(tcx, cnum, RequireStatic, &mut formats);
+ ret[cnum.as_usize() - 1] = Linkage::Static;
+ }
+ }
+
+ // We've gotten this far because we're emitting some form of a final
+ // artifact which means that we may need to inject dependencies of some
+ // form.
+ //
+ // Things like allocators and panic runtimes may not have been activated
+ // quite yet, so do so here.
+ activate_injected_dep(CStore::from_tcx(tcx).injected_panic_runtime(), &mut ret, &|cnum| {
+ tcx.is_panic_runtime(cnum)
+ });
+
+ // When dylib B links to dylib A, then when using B we must also link to A.
+ // It could be the case, however, that the rlib for A is present (hence we
+ // found metadata), but the dylib for A has since been removed.
+ //
+ // For situations like this, we perform one last pass over the dependencies,
+ // making sure that everything is available in the requested format.
+ for (cnum, kind) in ret.iter().enumerate() {
+ let cnum = CrateNum::new(cnum + 1);
+ let src = tcx.used_crate_source(cnum);
+ match *kind {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => {}
+ Linkage::Static if src.rlib.is_some() => continue,
+ Linkage::Dynamic if src.dylib.is_some() => continue,
+ kind => {
+ let kind = match kind {
+ Linkage::Static => "rlib",
+ _ => "dylib",
+ };
+ sess.err(&format!(
+ "crate `{}` required to be available in {} format, \
+ but was not found in this form",
+ tcx.crate_name(cnum),
+ kind
+ ));
+ }
+ }
+ }
+
+ ret
+}
+
+fn add_library(
+ tcx: TyCtxt<'_>,
+ cnum: CrateNum,
+ link: LinkagePreference,
+ m: &mut FxHashMap<CrateNum, LinkagePreference>,
+) {
+ match m.get(&cnum) {
+ Some(&link2) => {
+ // If the linkages differ, then we'd have two copies of the library
+ // if we continued linking. If the linkages are both static, then we
+ // would also have two copies of the library (static from two
+ // different locations).
+ //
+ // This error is probably a little obscure, but I imagine that it
+ // can be refined over time.
+ if link2 != link || link == RequireStatic {
+ tcx.sess
+ .struct_err(&format!(
+ "cannot satisfy dependencies so `{}` only \
+ shows up once",
+ tcx.crate_name(cnum)
+ ))
+ .help(
+ "having upstream crates all available in one format \
+ will likely make this go away",
+ )
+ .emit();
+ }
+ }
+ None => {
+ m.insert(cnum, link);
+ }
+ }
+}
+
+fn attempt_static(tcx: TyCtxt<'_>) -> Option<DependencyList> {
+ let all_crates_available_as_rlib = tcx
+ .crates(())
+ .iter()
+ .copied()
+ .filter_map(|cnum| {
+ if tcx.dep_kind(cnum).macros_only() {
+ return None;
+ }
+ Some(tcx.used_crate_source(cnum).rlib.is_some())
+ })
+ .all(|is_rlib| is_rlib);
+ if !all_crates_available_as_rlib {
+ return None;
+ }
+
+ // All crates are available in an rlib format, so we're just going to link
+ // everything in explicitly so long as it's actually required.
+ let mut ret = tcx
+ .crates(())
+ .iter()
+ .map(|&cnum| {
+ if tcx.dep_kind(cnum) == CrateDepKind::Explicit {
+ Linkage::Static
+ } else {
+ Linkage::NotLinked
+ }
+ })
+ .collect::<Vec<_>>();
+
+ // Our allocator/panic runtime may not have been linked above if it wasn't
+ // explicitly linked, which is the case for any injected dependency. Handle
+ // that here and activate them.
+ activate_injected_dep(CStore::from_tcx(tcx).injected_panic_runtime(), &mut ret, &|cnum| {
+ tcx.is_panic_runtime(cnum)
+ });
+
+ Some(ret)
+}
+
+// Given a list of how to link upstream dependencies so far, ensure that an
+// injected dependency is activated. This will not do anything if one was
+// transitively included already (e.g., via a dylib or explicitly so).
+//
+// If an injected dependency was not found then we're guaranteed the
+// metadata::creader module has injected that dependency (not listed as
+// a required dependency) in one of the session's field. If this field is not
+// set then this compilation doesn't actually need the dependency and we can
+// also skip this step entirely.
+fn activate_injected_dep(
+ injected: Option<CrateNum>,
+ list: &mut DependencyList,
+ replaces_injected: &dyn Fn(CrateNum) -> bool,
+) {
+ for (i, slot) in list.iter().enumerate() {
+ let cnum = CrateNum::new(i + 1);
+ if !replaces_injected(cnum) {
+ continue;
+ }
+ if *slot != Linkage::NotLinked {
+ return;
+ }
+ }
+ if let Some(injected) = injected {
+ let idx = injected.as_usize() - 1;
+ assert_eq!(list[idx], Linkage::NotLinked);
+ list[idx] = Linkage::Static;
+ }
+}
+
+// After the linkage for a crate has been determined we need to verify that
+// there's only going to be one allocator in the output.
+fn verify_ok(tcx: TyCtxt<'_>, list: &[Linkage]) {
+ let sess = &tcx.sess;
+ if list.is_empty() {
+ return;
+ }
+ let mut panic_runtime = None;
+ for (i, linkage) in list.iter().enumerate() {
+ if let Linkage::NotLinked = *linkage {
+ continue;
+ }
+ let cnum = CrateNum::new(i + 1);
+
+ if tcx.is_panic_runtime(cnum) {
+ if let Some((prev, _)) = panic_runtime {
+ let prev_name = tcx.crate_name(prev);
+ let cur_name = tcx.crate_name(cnum);
+ sess.err(&format!(
+ "cannot link together two \
+ panic runtimes: {} and {}",
+ prev_name, cur_name
+ ));
+ }
+ panic_runtime = Some((
+ cnum,
+ tcx.required_panic_strategy(cnum).unwrap_or_else(|| {
+ bug!("cannot determine panic strategy of a panic runtime");
+ }),
+ ));
+ }
+ }
+
+ // If we found a panic runtime, then we know by this point that it's the
+ // only one, but we perform validation here that all the panic strategy
+ // compilation modes for the whole DAG are valid.
+ if let Some((runtime_cnum, found_strategy)) = panic_runtime {
+ let desired_strategy = sess.panic_strategy();
+
+ // First up, validate that our selected panic runtime is indeed exactly
+ // our same strategy.
+ if found_strategy != desired_strategy {
+ sess.err(&format!(
+ "the linked panic runtime `{}` is \
+ not compiled with this crate's \
+ panic strategy `{}`",
+ tcx.crate_name(runtime_cnum),
+ desired_strategy.desc()
+ ));
+ }
+
+ // Next up, verify that all other crates are compatible with this panic
+ // strategy. If the dep isn't linked, we ignore it, and if our strategy
+ // is abort then it's compatible with everything. Otherwise all crates'
+ // panic strategy must match our own.
+ for (i, linkage) in list.iter().enumerate() {
+ if let Linkage::NotLinked = *linkage {
+ continue;
+ }
+ let cnum = CrateNum::new(i + 1);
+ if cnum == runtime_cnum || tcx.is_compiler_builtins(cnum) {
+ continue;
+ }
+
+ if let Some(found_strategy) = tcx.required_panic_strategy(cnum) && desired_strategy != found_strategy {
+ sess.err(&format!(
+ "the crate `{}` requires \
+ panic strategy `{}` which is \
+ incompatible with this crate's \
+ strategy of `{}`",
+ tcx.crate_name(cnum),
+ found_strategy.desc(),
+ desired_strategy.desc()
+ ));
+ }
+
+ let found_drop_strategy = tcx.panic_in_drop_strategy(cnum);
+ if tcx.sess.opts.unstable_opts.panic_in_drop != found_drop_strategy {
+ sess.err(&format!(
+ "the crate `{}` is compiled with the \
+ panic-in-drop strategy `{}` which is \
+ incompatible with this crate's \
+ strategy of `{}`",
+ tcx.crate_name(cnum),
+ found_drop_strategy.desc(),
+ tcx.sess.opts.unstable_opts.panic_in_drop.desc()
+ ));
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_metadata/src/foreign_modules.rs b/compiler/rustc_metadata/src/foreign_modules.rs
new file mode 100644
index 000000000..2ca4cd17f
--- /dev/null
+++ b/compiler/rustc_metadata/src/foreign_modules.rs
@@ -0,0 +1,19 @@
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::cstore::ForeignModule;
+
+pub(crate) fn collect(tcx: TyCtxt<'_>) -> Vec<ForeignModule> {
+ let mut modules = Vec::new();
+ for id in tcx.hir().items() {
+ if !matches!(tcx.def_kind(id.def_id), DefKind::ForeignMod) {
+ continue;
+ }
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::ForeignMod { items, .. } = item.kind {
+ let foreign_items = items.iter().map(|it| it.id.def_id.to_def_id()).collect();
+ modules.push(ForeignModule { foreign_items, def_id: id.def_id.to_def_id() });
+ }
+ }
+ modules
+}
diff --git a/compiler/rustc_metadata/src/fs.rs b/compiler/rustc_metadata/src/fs.rs
new file mode 100644
index 000000000..e6072901a
--- /dev/null
+++ b/compiler/rustc_metadata/src/fs.rs
@@ -0,0 +1,137 @@
+use crate::{encode_metadata, EncodedMetadata};
+
+use rustc_data_structures::temp_dir::MaybeTempDir;
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{CrateType, OutputFilenames, OutputType};
+use rustc_session::output::filename_for_metadata;
+use rustc_session::Session;
+use tempfile::Builder as TempFileBuilder;
+
+use std::fs;
+use std::path::{Path, PathBuf};
+
+// FIXME(eddyb) maybe include the crate name in this?
+pub const METADATA_FILENAME: &str = "lib.rmeta";
+
+/// We use a temp directory here to avoid races between concurrent rustc processes,
+/// such as builds in the same directory using the same filename for metadata while
+/// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a
+/// directory being searched for `extern crate` (observing an incomplete file).
+/// The returned path is the temporary file containing the complete metadata.
+pub fn emit_metadata(sess: &Session, metadata: &[u8], tmpdir: &MaybeTempDir) -> PathBuf {
+ let out_filename = tmpdir.as_ref().join(METADATA_FILENAME);
+ let result = fs::write(&out_filename, metadata);
+
+ if let Err(e) = result {
+ sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
+ }
+
+ out_filename
+}
+
+pub fn encode_and_write_metadata(
+ tcx: TyCtxt<'_>,
+ outputs: &OutputFilenames,
+) -> (EncodedMetadata, bool) {
+ #[derive(PartialEq, Eq, PartialOrd, Ord)]
+ enum MetadataKind {
+ None,
+ Uncompressed,
+ Compressed,
+ }
+
+ let metadata_kind = tcx
+ .sess
+ .crate_types()
+ .iter()
+ .map(|ty| match *ty {
+ CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => MetadataKind::None,
+
+ CrateType::Rlib => MetadataKind::Uncompressed,
+
+ CrateType::Dylib | CrateType::ProcMacro => MetadataKind::Compressed,
+ })
+ .max()
+ .unwrap_or(MetadataKind::None);
+
+ let crate_name = tcx.crate_name(LOCAL_CRATE);
+ let out_filename = filename_for_metadata(tcx.sess, crate_name.as_str(), outputs);
+ // To avoid races with another rustc process scanning the output directory,
+ // we need to write the file somewhere else and atomically move it to its
+ // final destination, with an `fs::rename` call. In order for the rename to
+ // always succeed, the temporary file needs to be on the same filesystem,
+ // which is why we create it inside the output directory specifically.
+ let metadata_tmpdir = TempFileBuilder::new()
+ .prefix("rmeta")
+ .tempdir_in(out_filename.parent().unwrap_or_else(|| Path::new("")))
+ .unwrap_or_else(|err| tcx.sess.fatal(&format!("couldn't create a temp dir: {}", err)));
+ let metadata_tmpdir = MaybeTempDir::new(metadata_tmpdir, tcx.sess.opts.cg.save_temps);
+ let metadata_filename = metadata_tmpdir.as_ref().join(METADATA_FILENAME);
+
+ // Always create a file at `metadata_filename`, even if we have nothing to write to it.
+ // This simplifies the creation of the output `out_filename` when requested.
+ match metadata_kind {
+ MetadataKind::None => {
+ std::fs::File::create(&metadata_filename).unwrap_or_else(|e| {
+ tcx.sess.fatal(&format!(
+ "failed to create the file {}: {}",
+ metadata_filename.display(),
+ e
+ ))
+ });
+ }
+ MetadataKind::Uncompressed | MetadataKind::Compressed => {
+ encode_metadata(tcx, &metadata_filename);
+ }
+ };
+
+ let _prof_timer = tcx.sess.prof.generic_activity("write_crate_metadata");
+
+ // If the user requests metadata as output, rename `metadata_filename`
+ // to the expected output `out_filename`. The match above should ensure
+ // this file always exists.
+ let need_metadata_file = tcx.sess.opts.output_types.contains_key(&OutputType::Metadata);
+ let (metadata_filename, metadata_tmpdir) = if need_metadata_file {
+ if let Err(e) = non_durable_rename(&metadata_filename, &out_filename) {
+ tcx.sess.fatal(&format!("failed to write {}: {}", out_filename.display(), e));
+ }
+ if tcx.sess.opts.json_artifact_notifications {
+ tcx.sess
+ .parse_sess
+ .span_diagnostic
+ .emit_artifact_notification(&out_filename, "metadata");
+ }
+ (out_filename, None)
+ } else {
+ (metadata_filename, Some(metadata_tmpdir))
+ };
+
+ // Load metadata back to memory: codegen may need to include it in object files.
+ let metadata =
+ EncodedMetadata::from_path(metadata_filename, metadata_tmpdir).unwrap_or_else(|e| {
+ tcx.sess.fatal(&format!("failed to create encoded metadata from file: {}", e))
+ });
+
+ let need_metadata_module = metadata_kind == MetadataKind::Compressed;
+
+ (metadata, need_metadata_module)
+}
+
+#[cfg(not(target_os = "linux"))]
+pub fn non_durable_rename(src: &Path, dst: &Path) -> std::io::Result<()> {
+ std::fs::rename(src, dst)
+}
+
+/// This function attempts to bypass the auto_da_alloc heuristic implemented by some filesystems
+/// such as btrfs and ext4. When renaming over a file that already exists then they will "helpfully"
+/// write back the source file before committing the rename in case a developer forgot some of
+/// the fsyncs in the open/write/fsync(file)/rename/fsync(dir) dance for atomic file updates.
+///
+/// To avoid triggering this heuristic we delete the destination first, if it exists.
+/// The cost of an extra syscall is much lower than getting descheduled for the sync IO.
+#[cfg(target_os = "linux")]
+pub fn non_durable_rename(src: &Path, dst: &Path) -> std::io::Result<()> {
+ let _ = std::fs::remove_file(dst);
+ std::fs::rename(src, dst)
+}
diff --git a/compiler/rustc_metadata/src/lib.rs b/compiler/rustc_metadata/src/lib.rs
new file mode 100644
index 000000000..6440f3e39
--- /dev/null
+++ b/compiler/rustc_metadata/src/lib.rs
@@ -0,0 +1,41 @@
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(decl_macro)]
+#![feature(drain_filter)]
+#![feature(generators)]
+#![feature(generic_associated_types)]
+#![feature(iter_from_generator)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(once_cell)]
+#![feature(proc_macro_internals)]
+#![feature(macro_metavar_expr)]
+#![feature(min_specialization)]
+#![feature(slice_as_chunks)]
+#![feature(trusted_len)]
+#![feature(try_blocks)]
+#![feature(never_type)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+extern crate proc_macro;
+
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate rustc_middle;
+#[macro_use]
+extern crate rustc_data_structures;
+
+pub use rmeta::{provide, provide_extern};
+
+mod dependency_format;
+mod foreign_modules;
+mod native_libs;
+mod rmeta;
+
+pub mod creader;
+pub mod fs;
+pub mod locator;
+
+pub use fs::{emit_metadata, METADATA_FILENAME};
+pub use rmeta::{encode_metadata, EncodedMetadata, METADATA_HEADER};
diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs
new file mode 100644
index 000000000..2c1c84b0b
--- /dev/null
+++ b/compiler/rustc_metadata/src/locator.rs
@@ -0,0 +1,1222 @@
+//! Finds crate binaries and loads their metadata
+//!
+//! Might I be the first to welcome you to a world of platform differences,
+//! version requirements, dependency graphs, conflicting desires, and fun! This
+//! is the major guts (along with metadata::creader) of the compiler for loading
+//! crates and resolving dependencies. Let's take a tour!
+//!
+//! # The problem
+//!
+//! Each invocation of the compiler is immediately concerned with one primary
+//! problem, to connect a set of crates to resolved crates on the filesystem.
+//! Concretely speaking, the compiler follows roughly these steps to get here:
+//!
+//! 1. Discover a set of `extern crate` statements.
+//! 2. Transform these directives into crate names. If the directive does not
+//! have an explicit name, then the identifier is the name.
+//! 3. For each of these crate names, find a corresponding crate on the
+//! filesystem.
+//!
+//! Sounds easy, right? Let's walk into some of the nuances.
+//!
+//! ## Transitive Dependencies
+//!
+//! Let's say we've got three crates: A, B, and C. A depends on B, and B depends
+//! on C. When we're compiling A, we primarily need to find and locate B, but we
+//! also end up needing to find and locate C as well.
+//!
+//! The reason for this is that any of B's types could be composed of C's types,
+//! any function in B could return a type from C, etc. To be able to guarantee
+//! that we can always type-check/translate any function, we have to have
+//! complete knowledge of the whole ecosystem, not just our immediate
+//! dependencies.
+//!
+//! So now as part of the "find a corresponding crate on the filesystem" step
+//! above, this involves also finding all crates for *all upstream
+//! dependencies*. This includes all dependencies transitively.
+//!
+//! ## Rlibs and Dylibs
+//!
+//! The compiler has two forms of intermediate dependencies. These are dubbed
+//! rlibs and dylibs for the static and dynamic variants, respectively. An rlib
+//! is a rustc-defined file format (currently just an ar archive) while a dylib
+//! is a platform-defined dynamic library. Each library has a metadata somewhere
+//! inside of it.
+//!
+//! A third kind of dependency is an rmeta file. These are metadata files and do
+//! not contain any code, etc. To a first approximation, these are treated in the
+//! same way as rlibs. Where there is both an rlib and an rmeta file, the rlib
+//! gets priority (even if the rmeta file is newer). An rmeta file is only
+//! useful for checking a downstream crate, attempting to link one will cause an
+//! error.
+//!
+//! When translating a crate name to a crate on the filesystem, we all of a
+//! sudden need to take into account both rlibs and dylibs! Linkage later on may
+//! use either one of these files, as each has their pros/cons. The job of crate
+//! loading is to discover what's possible by finding all candidates.
+//!
+//! Most parts of this loading systems keep the dylib/rlib as just separate
+//! variables.
+//!
+//! ## Where to look?
+//!
+//! We can't exactly scan your whole hard drive when looking for dependencies,
+//! so we need to places to look. Currently the compiler will implicitly add the
+//! target lib search path ($prefix/lib/rustlib/$target/lib) to any compilation,
+//! and otherwise all -L flags are added to the search paths.
+//!
+//! ## What criterion to select on?
+//!
+//! This is a pretty tricky area of loading crates. Given a file, how do we know
+//! whether it's the right crate? Currently, the rules look along these lines:
+//!
+//! 1. Does the filename match an rlib/dylib pattern? That is to say, does the
+//! filename have the right prefix/suffix?
+//! 2. Does the filename have the right prefix for the crate name being queried?
+//! This is filtering for files like `libfoo*.rlib` and such. If the crate
+//! we're looking for was originally compiled with -C extra-filename, the
+//! extra filename will be included in this prefix to reduce reading
+//! metadata from crates that would otherwise share our prefix.
+//! 3. Is the file an actual rust library? This is done by loading the metadata
+//! from the library and making sure it's actually there.
+//! 4. Does the name in the metadata agree with the name of the library?
+//! 5. Does the target in the metadata agree with the current target?
+//! 6. Does the SVH match? (more on this later)
+//!
+//! If the file answers `yes` to all these questions, then the file is
+//! considered as being *candidate* for being accepted. It is illegal to have
+//! more than two candidates as the compiler has no method by which to resolve
+//! this conflict. Additionally, rlib/dylib candidates are considered
+//! separately.
+//!
+//! After all this has happened, we have 1 or two files as candidates. These
+//! represent the rlib/dylib file found for a library, and they're returned as
+//! being found.
+//!
+//! ### What about versions?
+//!
+//! A lot of effort has been put forth to remove versioning from the compiler.
+//! There have been forays in the past to have versioning baked in, but it was
+//! largely always deemed insufficient to the point that it was recognized that
+//! it's probably something the compiler shouldn't do anyway due to its
+//! complicated nature and the state of the half-baked solutions.
+//!
+//! With a departure from versioning, the primary criterion for loading crates
+//! is just the name of a crate. If we stopped here, it would imply that you
+//! could never link two crates of the same name from different sources
+//! together, which is clearly a bad state to be in.
+//!
+//! To resolve this problem, we come to the next section!
+//!
+//! # Expert Mode
+//!
+//! A number of flags have been added to the compiler to solve the "version
+//! problem" in the previous section, as well as generally enabling more
+//! powerful usage of the crate loading system of the compiler. The goal of
+//! these flags and options are to enable third-party tools to drive the
+//! compiler with prior knowledge about how the world should look.
+//!
+//! ## The `--extern` flag
+//!
+//! The compiler accepts a flag of this form a number of times:
+//!
+//! ```text
+//! --extern crate-name=path/to/the/crate.rlib
+//! ```
+//!
+//! This flag is basically the following letter to the compiler:
+//!
+//! > Dear rustc,
+//! >
+//! > When you are attempting to load the immediate dependency `crate-name`, I
+//! > would like you to assume that the library is located at
+//! > `path/to/the/crate.rlib`, and look nowhere else. Also, please do not
+//! > assume that the path I specified has the name `crate-name`.
+//!
+//! This flag basically overrides most matching logic except for validating that
+//! the file is indeed a rust library. The same `crate-name` can be specified
+//! twice to specify the rlib/dylib pair.
+//!
+//! ## Enabling "multiple versions"
+//!
+//! This basically boils down to the ability to specify arbitrary packages to
+//! the compiler. For example, if crate A wanted to use Bv1 and Bv2, then it
+//! would look something like:
+//!
+//! ```compile_fail,E0463
+//! extern crate b1;
+//! extern crate b2;
+//!
+//! fn main() {}
+//! ```
+//!
+//! and the compiler would be invoked as:
+//!
+//! ```text
+//! rustc a.rs --extern b1=path/to/libb1.rlib --extern b2=path/to/libb2.rlib
+//! ```
+//!
+//! In this scenario there are two crates named `b` and the compiler must be
+//! manually driven to be informed where each crate is.
+//!
+//! ## Frobbing symbols
+//!
+//! One of the immediate problems with linking the same library together twice
+//! in the same problem is dealing with duplicate symbols. The primary way to
+//! deal with this in rustc is to add hashes to the end of each symbol.
+//!
+//! In order to force hashes to change between versions of a library, if
+//! desired, the compiler exposes an option `-C metadata=foo`, which is used to
+//! initially seed each symbol hash. The string `foo` is prepended to each
+//! string-to-hash to ensure that symbols change over time.
+//!
+//! ## Loading transitive dependencies
+//!
+//! Dealing with same-named-but-distinct crates is not just a local problem, but
+//! one that also needs to be dealt with for transitive dependencies. Note that
+//! in the letter above `--extern` flags only apply to the *local* set of
+//! dependencies, not the upstream transitive dependencies. Consider this
+//! dependency graph:
+//!
+//! ```text
+//! A.1 A.2
+//! | |
+//! | |
+//! B C
+//! \ /
+//! \ /
+//! D
+//! ```
+//!
+//! In this scenario, when we compile `D`, we need to be able to distinctly
+//! resolve `A.1` and `A.2`, but an `--extern` flag cannot apply to these
+//! transitive dependencies.
+//!
+//! Note that the key idea here is that `B` and `C` are both *already compiled*.
+//! That is, they have already resolved their dependencies. Due to unrelated
+//! technical reasons, when a library is compiled, it is only compatible with
+//! the *exact same* version of the upstream libraries it was compiled against.
+//! We use the "Strict Version Hash" to identify the exact copy of an upstream
+//! library.
+//!
+//! With this knowledge, we know that `B` and `C` will depend on `A` with
+//! different SVH values, so we crawl the normal `-L` paths looking for
+//! `liba*.rlib` and filter based on the contained SVH.
+//!
+//! In the end, this ends up not needing `--extern` to specify upstream
+//! transitive dependencies.
+//!
+//! # Wrapping up
+//!
+//! That's the general overview of loading crates in the compiler, but it's by
+//! no means all of the necessary details. Take a look at the rest of
+//! metadata::locator or metadata::creader for all the juicy details!
+
+use crate::creader::Library;
+use crate::rmeta::{rustc_version, MetadataBlob, METADATA_HEADER};
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::owning_ref::OwningRef;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::MetadataRef;
+use rustc_errors::{struct_span_err, FatalError};
+use rustc_session::config::{self, CrateType};
+use rustc_session::cstore::{CrateSource, MetadataLoader};
+use rustc_session::filesearch::FileSearch;
+use rustc_session::search_paths::PathKind;
+use rustc_session::utils::CanonicalizedPath;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use rustc_target::spec::{Target, TargetTriple};
+
+use snap::read::FrameDecoder;
+use std::fmt::Write as _;
+use std::io::{Read, Result as IoResult, Write};
+use std::path::{Path, PathBuf};
+use std::{cmp, fmt, fs};
+use tracing::{debug, info};
+
+#[derive(Clone)]
+pub(crate) struct CrateLocator<'a> {
+ // Immutable per-session configuration.
+ only_needs_metadata: bool,
+ sysroot: &'a Path,
+ metadata_loader: &'a dyn MetadataLoader,
+
+ // Immutable per-search configuration.
+ crate_name: Symbol,
+ exact_paths: Vec<CanonicalizedPath>,
+ pub hash: Option<Svh>,
+ extra_filename: Option<&'a str>,
+ pub target: &'a Target,
+ pub triple: TargetTriple,
+ pub filesearch: FileSearch<'a>,
+ pub is_proc_macro: bool,
+
+ // Mutable in-progress state or output.
+ crate_rejections: CrateRejections,
+}
+
+#[derive(Clone)]
+pub(crate) struct CratePaths {
+ name: Symbol,
+ source: CrateSource,
+}
+
+impl CratePaths {
+ pub(crate) fn new(name: Symbol, source: CrateSource) -> CratePaths {
+ CratePaths { name, source }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub(crate) enum CrateFlavor {
+ Rlib,
+ Rmeta,
+ Dylib,
+}
+
+impl fmt::Display for CrateFlavor {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match *self {
+ CrateFlavor::Rlib => "rlib",
+ CrateFlavor::Rmeta => "rmeta",
+ CrateFlavor::Dylib => "dylib",
+ })
+ }
+}
+
+impl<'a> CrateLocator<'a> {
+ pub(crate) fn new(
+ sess: &'a Session,
+ metadata_loader: &'a dyn MetadataLoader,
+ crate_name: Symbol,
+ hash: Option<Svh>,
+ extra_filename: Option<&'a str>,
+ is_host: bool,
+ path_kind: PathKind,
+ ) -> CrateLocator<'a> {
+ // The all loop is because `--crate-type=rlib --crate-type=rlib` is
+ // legal and produces both inside this type.
+ let is_rlib = sess.crate_types().iter().all(|c| *c == CrateType::Rlib);
+ let needs_object_code = sess.opts.output_types.should_codegen();
+ // If we're producing an rlib, then we don't need object code.
+ // Or, if we're not producing object code, then we don't need it either
+ // (e.g., if we're a cdylib but emitting just metadata).
+ let only_needs_metadata = is_rlib || !needs_object_code;
+
+ CrateLocator {
+ only_needs_metadata,
+ sysroot: &sess.sysroot,
+ metadata_loader,
+ crate_name,
+ exact_paths: if hash.is_none() {
+ sess.opts
+ .externs
+ .get(crate_name.as_str())
+ .into_iter()
+ .filter_map(|entry| entry.files())
+ .flatten()
+ .cloned()
+ .collect()
+ } else {
+ // SVH being specified means this is a transitive dependency,
+ // so `--extern` options do not apply.
+ Vec::new()
+ },
+ hash,
+ extra_filename,
+ target: if is_host { &sess.host } else { &sess.target },
+ triple: if is_host {
+ TargetTriple::from_triple(config::host_triple())
+ } else {
+ sess.opts.target_triple.clone()
+ },
+ filesearch: if is_host {
+ sess.host_filesearch(path_kind)
+ } else {
+ sess.target_filesearch(path_kind)
+ },
+ is_proc_macro: false,
+ crate_rejections: CrateRejections::default(),
+ }
+ }
+
+ pub(crate) fn reset(&mut self) {
+ self.crate_rejections.via_hash.clear();
+ self.crate_rejections.via_triple.clear();
+ self.crate_rejections.via_kind.clear();
+ self.crate_rejections.via_version.clear();
+ self.crate_rejections.via_filename.clear();
+ self.crate_rejections.via_invalid.clear();
+ }
+
+ pub(crate) fn maybe_load_library_crate(&mut self) -> Result<Option<Library>, CrateError> {
+ if !self.exact_paths.is_empty() {
+ return self.find_commandline_library();
+ }
+ let mut seen_paths = FxHashSet::default();
+ if let Some(extra_filename) = self.extra_filename {
+ if let library @ Some(_) = self.find_library_crate(extra_filename, &mut seen_paths)? {
+ return Ok(library);
+ }
+ }
+ self.find_library_crate("", &mut seen_paths)
+ }
+
+ fn find_library_crate(
+ &mut self,
+ extra_prefix: &str,
+ seen_paths: &mut FxHashSet<PathBuf>,
+ ) -> Result<Option<Library>, CrateError> {
+ let rmeta_prefix = &format!("lib{}{}", self.crate_name, extra_prefix);
+ let rlib_prefix = rmeta_prefix;
+ let dylib_prefix =
+ &format!("{}{}{}", self.target.dll_prefix, self.crate_name, extra_prefix);
+ let staticlib_prefix =
+ &format!("{}{}{}", self.target.staticlib_prefix, self.crate_name, extra_prefix);
+
+ let rmeta_suffix = ".rmeta";
+ let rlib_suffix = ".rlib";
+ let dylib_suffix = &self.target.dll_suffix;
+ let staticlib_suffix = &self.target.staticlib_suffix;
+
+ let mut candidates: FxHashMap<_, (FxHashMap<_, _>, FxHashMap<_, _>, FxHashMap<_, _>)> =
+ Default::default();
+
+ // First, find all possible candidate rlibs and dylibs purely based on
+ // the name of the files themselves. We're trying to match against an
+ // exact crate name and a possibly an exact hash.
+ //
+ // During this step, we can filter all found libraries based on the
+ // name and id found in the crate id (we ignore the path portion for
+ // filename matching), as well as the exact hash (if specified). If we
+ // end up having many candidates, we must look at the metadata to
+ // perform exact matches against hashes/crate ids. Note that opening up
+ // the metadata is where we do an exact match against the full contents
+ // of the crate id (path/name/id).
+ //
+ // The goal of this step is to look at as little metadata as possible.
+ // Unfortunately, the prefix-based matching sometimes is over-eager.
+ // E.g. if `rlib_suffix` is `libstd` it'll match the file
+ // `libstd_detect-8d6701fb958915ad.rlib` (incorrect) as well as
+ // `libstd-f3ab5b1dea981f17.rlib` (correct). But this is hard to avoid
+ // given that `extra_filename` comes from the `-C extra-filename`
+ // option and thus can be anything, and the incorrect match will be
+ // handled safely in `extract_one`.
+ for search_path in self.filesearch.search_paths() {
+ debug!("searching {}", search_path.dir.display());
+ for spf in search_path.files.iter() {
+ debug!("testing {}", spf.path.display());
+
+ let f = &spf.file_name_str;
+ let (hash, kind) = if f.starts_with(rlib_prefix) && f.ends_with(rlib_suffix) {
+ (&f[rlib_prefix.len()..(f.len() - rlib_suffix.len())], CrateFlavor::Rlib)
+ } else if f.starts_with(rmeta_prefix) && f.ends_with(rmeta_suffix) {
+ (&f[rmeta_prefix.len()..(f.len() - rmeta_suffix.len())], CrateFlavor::Rmeta)
+ } else if f.starts_with(dylib_prefix) && f.ends_with(dylib_suffix.as_ref()) {
+ (&f[dylib_prefix.len()..(f.len() - dylib_suffix.len())], CrateFlavor::Dylib)
+ } else {
+ if f.starts_with(staticlib_prefix) && f.ends_with(staticlib_suffix.as_ref()) {
+ self.crate_rejections.via_kind.push(CrateMismatch {
+ path: spf.path.clone(),
+ got: "static".to_string(),
+ });
+ }
+ continue;
+ };
+
+ info!("lib candidate: {}", spf.path.display());
+
+ let (rlibs, rmetas, dylibs) = candidates.entry(hash.to_string()).or_default();
+ let path = fs::canonicalize(&spf.path).unwrap_or_else(|_| spf.path.clone());
+ if seen_paths.contains(&path) {
+ continue;
+ };
+ seen_paths.insert(path.clone());
+ match kind {
+ CrateFlavor::Rlib => rlibs.insert(path, search_path.kind),
+ CrateFlavor::Rmeta => rmetas.insert(path, search_path.kind),
+ CrateFlavor::Dylib => dylibs.insert(path, search_path.kind),
+ };
+ }
+ }
+
+ // We have now collected all known libraries into a set of candidates
+ // keyed of the filename hash listed. For each filename, we also have a
+ // list of rlibs/dylibs that apply. Here, we map each of these lists
+ // (per hash), to a Library candidate for returning.
+ //
+ // A Library candidate is created if the metadata for the set of
+ // libraries corresponds to the crate id and hash criteria that this
+ // search is being performed for.
+ let mut libraries = FxHashMap::default();
+ for (_hash, (rlibs, rmetas, dylibs)) in candidates {
+ if let Some((svh, lib)) = self.extract_lib(rlibs, rmetas, dylibs)? {
+ libraries.insert(svh, lib);
+ }
+ }
+
+ // Having now translated all relevant found hashes into libraries, see
+ // what we've got and figure out if we found multiple candidates for
+ // libraries or not.
+ match libraries.len() {
+ 0 => Ok(None),
+ 1 => Ok(Some(libraries.into_iter().next().unwrap().1)),
+ _ => Err(CrateError::MultipleMatchingCrates(self.crate_name, libraries)),
+ }
+ }
+
+ fn extract_lib(
+ &mut self,
+ rlibs: FxHashMap<PathBuf, PathKind>,
+ rmetas: FxHashMap<PathBuf, PathKind>,
+ dylibs: FxHashMap<PathBuf, PathKind>,
+ ) -> Result<Option<(Svh, Library)>, CrateError> {
+ let mut slot = None;
+ // Order here matters, rmeta should come first. See comment in
+ // `extract_one` below.
+ let source = CrateSource {
+ rmeta: self.extract_one(rmetas, CrateFlavor::Rmeta, &mut slot)?,
+ rlib: self.extract_one(rlibs, CrateFlavor::Rlib, &mut slot)?,
+ dylib: self.extract_one(dylibs, CrateFlavor::Dylib, &mut slot)?,
+ };
+ Ok(slot.map(|(svh, metadata)| (svh, Library { source, metadata })))
+ }
+
+ fn needs_crate_flavor(&self, flavor: CrateFlavor) -> bool {
+ if flavor == CrateFlavor::Dylib && self.is_proc_macro {
+ return true;
+ }
+
+ if self.only_needs_metadata {
+ flavor == CrateFlavor::Rmeta
+ } else {
+ // we need all flavors (perhaps not true, but what we do for now)
+ true
+ }
+ }
+
+ // Attempts to extract *one* library from the set `m`. If the set has no
+ // elements, `None` is returned. If the set has more than one element, then
+ // the errors and notes are emitted about the set of libraries.
+ //
+ // With only one library in the set, this function will extract it, and then
+ // read the metadata from it if `*slot` is `None`. If the metadata couldn't
+ // be read, it is assumed that the file isn't a valid rust library (no
+ // errors are emitted).
+ fn extract_one(
+ &mut self,
+ m: FxHashMap<PathBuf, PathKind>,
+ flavor: CrateFlavor,
+ slot: &mut Option<(Svh, MetadataBlob)>,
+ ) -> Result<Option<(PathBuf, PathKind)>, CrateError> {
+ // If we are producing an rlib, and we've already loaded metadata, then
+ // we should not attempt to discover further crate sources (unless we're
+ // locating a proc macro; exact logic is in needs_crate_flavor). This means
+ // that under -Zbinary-dep-depinfo we will not emit a dependency edge on
+ // the *unused* rlib, and by returning `None` here immediately we
+ // guarantee that we do indeed not use it.
+ //
+ // See also #68149 which provides more detail on why emitting the
+ // dependency on the rlib is a bad thing.
+ //
+ // We currently do not verify that these other sources are even in sync,
+ // and this is arguably a bug (see #10786), but because reading metadata
+ // is quite slow (especially from dylibs) we currently do not read it
+ // from the other crate sources.
+ if slot.is_some() {
+ if m.is_empty() || !self.needs_crate_flavor(flavor) {
+ return Ok(None);
+ } else if m.len() == 1 {
+ return Ok(Some(m.into_iter().next().unwrap()));
+ }
+ }
+
+ let mut ret: Option<(PathBuf, PathKind)> = None;
+ let mut err_data: Option<Vec<PathBuf>> = None;
+ for (lib, kind) in m {
+ info!("{} reading metadata from: {}", flavor, lib.display());
+ if flavor == CrateFlavor::Rmeta && lib.metadata().map_or(false, |m| m.len() == 0) {
+ // Empty files will cause get_metadata_section to fail. Rmeta
+ // files can be empty, for example with binaries (which can
+ // often appear with `cargo check` when checking a library as
+ // a unittest). We don't want to emit a user-visible warning
+ // in this case as it is not a real problem.
+ debug!("skipping empty file");
+ continue;
+ }
+ let (hash, metadata) =
+ match get_metadata_section(self.target, flavor, &lib, self.metadata_loader) {
+ Ok(blob) => {
+ if let Some(h) = self.crate_matches(&blob, &lib) {
+ (h, blob)
+ } else {
+ info!("metadata mismatch");
+ continue;
+ }
+ }
+ Err(MetadataError::LoadFailure(err)) => {
+ info!("no metadata found: {}", err);
+ // The file was present and created by the same compiler version, but we
+ // couldn't load it for some reason. Give a hard error instead of silently
+ // ignoring it, but only if we would have given an error anyway.
+ self.crate_rejections
+ .via_invalid
+ .push(CrateMismatch { path: lib, got: err });
+ continue;
+ }
+ Err(err @ MetadataError::NotPresent(_)) => {
+ info!("no metadata found: {}", err);
+ continue;
+ }
+ };
+ // If we see multiple hashes, emit an error about duplicate candidates.
+ if slot.as_ref().map_or(false, |s| s.0 != hash) {
+ if let Some(candidates) = err_data {
+ return Err(CrateError::MultipleCandidates(
+ self.crate_name,
+ flavor,
+ candidates,
+ ));
+ }
+ err_data = Some(vec![ret.as_ref().unwrap().0.clone()]);
+ *slot = None;
+ }
+ if let Some(candidates) = &mut err_data {
+ candidates.push(lib);
+ continue;
+ }
+
+ // Ok so at this point we've determined that `(lib, kind)` above is
+ // a candidate crate to load, and that `slot` is either none (this
+ // is the first crate of its kind) or if some the previous path has
+ // the exact same hash (e.g., it's the exact same crate).
+ //
+ // In principle these two candidate crates are exactly the same so
+ // we can choose either of them to link. As a stupidly gross hack,
+ // however, we favor crate in the sysroot.
+ //
+ // You can find more info in rust-lang/rust#39518 and various linked
+ // issues, but the general gist is that during testing libstd the
+ // compilers has two candidates to choose from: one in the sysroot
+ // and one in the deps folder. These two crates are the exact same
+ // crate but if the compiler chooses the one in the deps folder
+ // it'll cause spurious errors on Windows.
+ //
+ // As a result, we favor the sysroot crate here. Note that the
+ // candidates are all canonicalized, so we canonicalize the sysroot
+ // as well.
+ if let Some((prev, _)) = &ret {
+ let sysroot = self.sysroot;
+ let sysroot = sysroot.canonicalize().unwrap_or_else(|_| sysroot.to_path_buf());
+ if prev.starts_with(&sysroot) {
+ continue;
+ }
+ }
+ *slot = Some((hash, metadata));
+ ret = Some((lib, kind));
+ }
+
+ if let Some(candidates) = err_data {
+ Err(CrateError::MultipleCandidates(self.crate_name, flavor, candidates))
+ } else {
+ Ok(ret)
+ }
+ }
+
+ fn crate_matches(&mut self, metadata: &MetadataBlob, libpath: &Path) -> Option<Svh> {
+ let rustc_version = rustc_version();
+ let found_version = metadata.get_rustc_version();
+ if found_version != rustc_version {
+ info!("Rejecting via version: expected {} got {}", rustc_version, found_version);
+ self.crate_rejections
+ .via_version
+ .push(CrateMismatch { path: libpath.to_path_buf(), got: found_version });
+ return None;
+ }
+
+ let root = metadata.get_root();
+ if root.is_proc_macro_crate() != self.is_proc_macro {
+ info!(
+ "Rejecting via proc macro: expected {} got {}",
+ self.is_proc_macro,
+ root.is_proc_macro_crate(),
+ );
+ return None;
+ }
+
+ if self.exact_paths.is_empty() && self.crate_name != root.name() {
+ info!("Rejecting via crate name");
+ return None;
+ }
+
+ if root.triple() != &self.triple {
+ info!("Rejecting via crate triple: expected {} got {}", self.triple, root.triple());
+ self.crate_rejections.via_triple.push(CrateMismatch {
+ path: libpath.to_path_buf(),
+ got: root.triple().to_string(),
+ });
+ return None;
+ }
+
+ let hash = root.hash();
+ if let Some(expected_hash) = self.hash {
+ if hash != expected_hash {
+ info!("Rejecting via hash: expected {} got {}", expected_hash, hash);
+ self.crate_rejections
+ .via_hash
+ .push(CrateMismatch { path: libpath.to_path_buf(), got: hash.to_string() });
+ return None;
+ }
+ }
+
+ Some(hash)
+ }
+
+ fn find_commandline_library(&mut self) -> Result<Option<Library>, CrateError> {
+ // First, filter out all libraries that look suspicious. We only accept
+ // files which actually exist that have the correct naming scheme for
+ // rlibs/dylibs.
+ let mut rlibs = FxHashMap::default();
+ let mut rmetas = FxHashMap::default();
+ let mut dylibs = FxHashMap::default();
+ for loc in &self.exact_paths {
+ if !loc.canonicalized().exists() {
+ return Err(CrateError::ExternLocationNotExist(
+ self.crate_name,
+ loc.original().clone(),
+ ));
+ }
+ let Some(file) = loc.original().file_name().and_then(|s| s.to_str()) else {
+ return Err(CrateError::ExternLocationNotFile(
+ self.crate_name,
+ loc.original().clone(),
+ ));
+ };
+
+ if file.starts_with("lib") && (file.ends_with(".rlib") || file.ends_with(".rmeta"))
+ || file.starts_with(self.target.dll_prefix.as_ref())
+ && file.ends_with(self.target.dll_suffix.as_ref())
+ {
+ // Make sure there's at most one rlib and at most one dylib.
+ // Note to take care and match against the non-canonicalized name:
+ // some systems save build artifacts into content-addressed stores
+ // that do not preserve extensions, and then link to them using
+ // e.g. symbolic links. If we canonicalize too early, we resolve
+ // the symlink, the file type is lost and we might treat rlibs and
+ // rmetas as dylibs.
+ let loc_canon = loc.canonicalized().clone();
+ let loc = loc.original();
+ if loc.file_name().unwrap().to_str().unwrap().ends_with(".rlib") {
+ rlibs.insert(loc_canon, PathKind::ExternFlag);
+ } else if loc.file_name().unwrap().to_str().unwrap().ends_with(".rmeta") {
+ rmetas.insert(loc_canon, PathKind::ExternFlag);
+ } else {
+ dylibs.insert(loc_canon, PathKind::ExternFlag);
+ }
+ } else {
+ self.crate_rejections
+ .via_filename
+ .push(CrateMismatch { path: loc.original().clone(), got: String::new() });
+ }
+ }
+
+ // Extract the dylib/rlib/rmeta triple.
+ Ok(self.extract_lib(rlibs, rmetas, dylibs)?.map(|(_, lib)| lib))
+ }
+
+ pub(crate) fn into_error(self, root: Option<CratePaths>) -> CrateError {
+ CrateError::LocatorCombined(CombinedLocatorError {
+ crate_name: self.crate_name,
+ root,
+ triple: self.triple,
+ dll_prefix: self.target.dll_prefix.to_string(),
+ dll_suffix: self.target.dll_suffix.to_string(),
+ crate_rejections: self.crate_rejections,
+ })
+ }
+}
+
+fn get_metadata_section<'p>(
+ target: &Target,
+ flavor: CrateFlavor,
+ filename: &'p Path,
+ loader: &dyn MetadataLoader,
+) -> Result<MetadataBlob, MetadataError<'p>> {
+ if !filename.exists() {
+ return Err(MetadataError::NotPresent(filename));
+ }
+ let raw_bytes: MetadataRef = match flavor {
+ CrateFlavor::Rlib => {
+ loader.get_rlib_metadata(target, filename).map_err(MetadataError::LoadFailure)?
+ }
+ CrateFlavor::Dylib => {
+ let buf =
+ loader.get_dylib_metadata(target, filename).map_err(MetadataError::LoadFailure)?;
+ // The header is uncompressed
+ let header_len = METADATA_HEADER.len();
+ debug!("checking {} bytes of metadata-version stamp", header_len);
+ let header = &buf[..cmp::min(header_len, buf.len())];
+ if header != METADATA_HEADER {
+ return Err(MetadataError::LoadFailure(format!(
+ "invalid metadata version found: {}",
+ filename.display()
+ )));
+ }
+
+ // Header is okay -> inflate the actual metadata
+ let compressed_bytes = &buf[header_len..];
+ debug!("inflating {} bytes of compressed metadata", compressed_bytes.len());
+ // Assume the decompressed data will be at least the size of the compressed data, so we
+ // don't have to grow the buffer as much.
+ let mut inflated = Vec::with_capacity(compressed_bytes.len());
+ match FrameDecoder::new(compressed_bytes).read_to_end(&mut inflated) {
+ Ok(_) => rustc_erase_owner!(OwningRef::new(inflated).map_owner_box()),
+ Err(_) => {
+ return Err(MetadataError::LoadFailure(format!(
+ "failed to decompress metadata: {}",
+ filename.display()
+ )));
+ }
+ }
+ }
+ CrateFlavor::Rmeta => {
+ // mmap the file, because only a small fraction of it is read.
+ let file = std::fs::File::open(filename).map_err(|_| {
+ MetadataError::LoadFailure(format!(
+ "failed to open rmeta metadata: '{}'",
+ filename.display()
+ ))
+ })?;
+ let mmap = unsafe { Mmap::map(file) };
+ let mmap = mmap.map_err(|_| {
+ MetadataError::LoadFailure(format!(
+ "failed to mmap rmeta metadata: '{}'",
+ filename.display()
+ ))
+ })?;
+
+ rustc_erase_owner!(OwningRef::new(mmap).map_owner_box())
+ }
+ };
+ let blob = MetadataBlob::new(raw_bytes);
+ if blob.is_compatible() {
+ Ok(blob)
+ } else {
+ Err(MetadataError::LoadFailure(format!(
+ "invalid metadata version found: {}",
+ filename.display()
+ )))
+ }
+}
+
+/// Look for a plugin registrar. Returns its library path and crate disambiguator.
+pub fn find_plugin_registrar(
+ sess: &Session,
+ metadata_loader: &dyn MetadataLoader,
+ span: Span,
+ name: Symbol,
+) -> PathBuf {
+ find_plugin_registrar_impl(sess, metadata_loader, name).unwrap_or_else(|err| {
+ // `core` is always available if we got as far as loading plugins.
+ err.report(sess, span, false);
+ FatalError.raise()
+ })
+}
+
+fn find_plugin_registrar_impl<'a>(
+ sess: &'a Session,
+ metadata_loader: &dyn MetadataLoader,
+ name: Symbol,
+) -> Result<PathBuf, CrateError> {
+ info!("find plugin registrar `{}`", name);
+ let mut locator = CrateLocator::new(
+ sess,
+ metadata_loader,
+ name,
+ None, // hash
+ None, // extra_filename
+ true, // is_host
+ PathKind::Crate,
+ );
+
+ match locator.maybe_load_library_crate()? {
+ Some(library) => match library.source.dylib {
+ Some(dylib) => Ok(dylib.0),
+ None => Err(CrateError::NonDylibPlugin(name)),
+ },
+ None => Err(locator.into_error(None)),
+ }
+}
+
+/// A diagnostic function for dumping crate metadata to an output stream.
+pub fn list_file_metadata(
+ target: &Target,
+ path: &Path,
+ metadata_loader: &dyn MetadataLoader,
+ out: &mut dyn Write,
+) -> IoResult<()> {
+ let filename = path.file_name().unwrap().to_str().unwrap();
+ let flavor = if filename.ends_with(".rlib") {
+ CrateFlavor::Rlib
+ } else if filename.ends_with(".rmeta") {
+ CrateFlavor::Rmeta
+ } else {
+ CrateFlavor::Dylib
+ };
+ match get_metadata_section(target, flavor, path, metadata_loader) {
+ Ok(metadata) => metadata.list_crate_metadata(out),
+ Err(msg) => write!(out, "{}\n", msg),
+ }
+}
+
+// ------------------------------------------ Error reporting -------------------------------------
+
+#[derive(Clone)]
+struct CrateMismatch {
+ path: PathBuf,
+ got: String,
+}
+
+#[derive(Clone, Default)]
+struct CrateRejections {
+ via_hash: Vec<CrateMismatch>,
+ via_triple: Vec<CrateMismatch>,
+ via_kind: Vec<CrateMismatch>,
+ via_version: Vec<CrateMismatch>,
+ via_filename: Vec<CrateMismatch>,
+ via_invalid: Vec<CrateMismatch>,
+}
+
+/// Candidate rejection reasons collected during crate search.
+/// If no candidate is accepted, then these reasons are presented to the user,
+/// otherwise they are ignored.
+pub(crate) struct CombinedLocatorError {
+ crate_name: Symbol,
+ root: Option<CratePaths>,
+ triple: TargetTriple,
+ dll_prefix: String,
+ dll_suffix: String,
+ crate_rejections: CrateRejections,
+}
+
+pub(crate) enum CrateError {
+ NonAsciiName(Symbol),
+ ExternLocationNotExist(Symbol, PathBuf),
+ ExternLocationNotFile(Symbol, PathBuf),
+ MultipleCandidates(Symbol, CrateFlavor, Vec<PathBuf>),
+ MultipleMatchingCrates(Symbol, FxHashMap<Svh, Library>),
+ SymbolConflictsCurrent(Symbol),
+ SymbolConflictsOthers(Symbol),
+ StableCrateIdCollision(Symbol, Symbol),
+ DlOpen(String),
+ DlSym(String),
+ LocatorCombined(CombinedLocatorError),
+ NonDylibPlugin(Symbol),
+}
+
+enum MetadataError<'a> {
+ /// The file was missing.
+ NotPresent(&'a Path),
+ /// The file was present and invalid.
+ LoadFailure(String),
+}
+
+impl fmt::Display for MetadataError<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ MetadataError::NotPresent(filename) => {
+ f.write_str(&format!("no such file: '{}'", filename.display()))
+ }
+ MetadataError::LoadFailure(msg) => f.write_str(msg),
+ }
+ }
+}
+
+impl CrateError {
+ pub(crate) fn report(self, sess: &Session, span: Span, missing_core: bool) {
+ let mut diag = match self {
+ CrateError::NonAsciiName(crate_name) => sess.struct_span_err(
+ span,
+ &format!("cannot load a crate with a non-ascii name `{}`", crate_name),
+ ),
+ CrateError::ExternLocationNotExist(crate_name, loc) => sess.struct_span_err(
+ span,
+ &format!("extern location for {} does not exist: {}", crate_name, loc.display()),
+ ),
+ CrateError::ExternLocationNotFile(crate_name, loc) => sess.struct_span_err(
+ span,
+ &format!("extern location for {} is not a file: {}", crate_name, loc.display()),
+ ),
+ CrateError::MultipleCandidates(crate_name, flavor, candidates) => {
+ let mut err = struct_span_err!(
+ sess,
+ span,
+ E0465,
+ "multiple {} candidates for `{}` found",
+ flavor,
+ crate_name,
+ );
+ for (i, candidate) in candidates.iter().enumerate() {
+ err.span_note(span, &format!("candidate #{}: {}", i + 1, candidate.display()));
+ }
+ err
+ }
+ CrateError::MultipleMatchingCrates(crate_name, libraries) => {
+ let mut err = struct_span_err!(
+ sess,
+ span,
+ E0464,
+ "multiple matching crates for `{}`",
+ crate_name
+ );
+ let mut libraries: Vec<_> = libraries.into_values().collect();
+ // Make ordering of candidates deterministic.
+ // This has to `clone()` to work around lifetime restrictions with `sort_by_key()`.
+ // `sort_by()` could be used instead, but this is in the error path,
+ // so the performance shouldn't matter.
+ libraries.sort_by_cached_key(|lib| lib.source.paths().next().unwrap().clone());
+ let candidates = libraries
+ .iter()
+ .map(|lib| {
+ let crate_name = lib.metadata.get_root().name();
+ let crate_name = crate_name.as_str();
+ let mut paths = lib.source.paths();
+
+ // This `unwrap()` should be okay because there has to be at least one
+ // source file. `CrateSource`'s docs confirm that too.
+ let mut s = format!(
+ "\ncrate `{}`: {}",
+ crate_name,
+ paths.next().unwrap().display()
+ );
+ let padding = 8 + crate_name.len();
+ for path in paths {
+ write!(s, "\n{:>padding$}", path.display(), padding = padding).unwrap();
+ }
+ s
+ })
+ .collect::<String>();
+ err.note(&format!("candidates:{}", candidates));
+ err
+ }
+ CrateError::SymbolConflictsCurrent(root_name) => struct_span_err!(
+ sess,
+ span,
+ E0519,
+ "the current crate is indistinguishable from one of its dependencies: it has the \
+ same crate-name `{}` and was compiled with the same `-C metadata` arguments. \
+ This will result in symbol conflicts between the two.",
+ root_name,
+ ),
+ CrateError::SymbolConflictsOthers(root_name) => struct_span_err!(
+ sess,
+ span,
+ E0523,
+ "found two different crates with name `{}` that are not distinguished by differing \
+ `-C metadata`. This will result in symbol conflicts between the two.",
+ root_name,
+ ),
+ CrateError::StableCrateIdCollision(crate_name0, crate_name1) => {
+ let msg = format!(
+ "found crates (`{}` and `{}`) with colliding StableCrateId values.",
+ crate_name0, crate_name1
+ );
+ sess.struct_span_err(span, &msg)
+ }
+ CrateError::DlOpen(s) | CrateError::DlSym(s) => sess.struct_span_err(span, &s),
+ CrateError::LocatorCombined(locator) => {
+ let crate_name = locator.crate_name;
+ let add = match &locator.root {
+ None => String::new(),
+ Some(r) => format!(" which `{}` depends on", r.name),
+ };
+ let mut msg = "the following crate versions were found:".to_string();
+ let mut err = if !locator.crate_rejections.via_hash.is_empty() {
+ let mut err = struct_span_err!(
+ sess,
+ span,
+ E0460,
+ "found possibly newer version of crate `{}`{}",
+ crate_name,
+ add,
+ );
+ err.note("perhaps that crate needs to be recompiled?");
+ let mismatches = locator.crate_rejections.via_hash.iter();
+ for CrateMismatch { path, .. } in mismatches {
+ msg.push_str(&format!("\ncrate `{}`: {}", crate_name, path.display()));
+ }
+ if let Some(r) = locator.root {
+ for path in r.source.paths() {
+ msg.push_str(&format!("\ncrate `{}`: {}", r.name, path.display()));
+ }
+ }
+ err.note(&msg);
+ err
+ } else if !locator.crate_rejections.via_triple.is_empty() {
+ let mut err = struct_span_err!(
+ sess,
+ span,
+ E0461,
+ "couldn't find crate `{}` with expected target triple {}{}",
+ crate_name,
+ locator.triple,
+ add,
+ );
+ let mismatches = locator.crate_rejections.via_triple.iter();
+ for CrateMismatch { path, got } in mismatches {
+ msg.push_str(&format!(
+ "\ncrate `{}`, target triple {}: {}",
+ crate_name,
+ got,
+ path.display(),
+ ));
+ }
+ err.note(&msg);
+ err
+ } else if !locator.crate_rejections.via_kind.is_empty() {
+ let mut err = struct_span_err!(
+ sess,
+ span,
+ E0462,
+ "found staticlib `{}` instead of rlib or dylib{}",
+ crate_name,
+ add,
+ );
+ err.help("please recompile that crate using --crate-type lib");
+ let mismatches = locator.crate_rejections.via_kind.iter();
+ for CrateMismatch { path, .. } in mismatches {
+ msg.push_str(&format!("\ncrate `{}`: {}", crate_name, path.display()));
+ }
+ err.note(&msg);
+ err
+ } else if !locator.crate_rejections.via_version.is_empty() {
+ let mut err = struct_span_err!(
+ sess,
+ span,
+ E0514,
+ "found crate `{}` compiled by an incompatible version of rustc{}",
+ crate_name,
+ add,
+ );
+ err.help(&format!(
+ "please recompile that crate using this compiler ({}) \
+ (consider running `cargo clean` first)",
+ rustc_version(),
+ ));
+ let mismatches = locator.crate_rejections.via_version.iter();
+ for CrateMismatch { path, got } in mismatches {
+ msg.push_str(&format!(
+ "\ncrate `{}` compiled by {}: {}",
+ crate_name,
+ got,
+ path.display(),
+ ));
+ }
+ err.note(&msg);
+ err
+ } else if !locator.crate_rejections.via_invalid.is_empty() {
+ let mut err = struct_span_err!(
+ sess,
+ span,
+ E0786,
+ "found invalid metadata files for crate `{}`{}",
+ crate_name,
+ add,
+ );
+ for CrateMismatch { path: _, got } in locator.crate_rejections.via_invalid {
+ err.note(&got);
+ }
+ err
+ } else {
+ let mut err = struct_span_err!(
+ sess,
+ span,
+ E0463,
+ "can't find crate for `{}`{}",
+ crate_name,
+ add,
+ );
+
+ if (crate_name == sym::std || crate_name == sym::core)
+ && locator.triple != TargetTriple::from_triple(config::host_triple())
+ {
+ if missing_core {
+ err.note(&format!(
+ "the `{}` target may not be installed",
+ locator.triple
+ ));
+ } else {
+ err.note(&format!(
+ "the `{}` target may not support the standard library",
+ locator.triple
+ ));
+ }
+ // NOTE: this suggests using rustup, even though the user may not have it installed.
+ // That's because they could choose to install it; or this may give them a hint which
+ // target they need to install from their distro.
+ if missing_core {
+ err.help(&format!(
+ "consider downloading the target with `rustup target add {}`",
+ locator.triple
+ ));
+ }
+ // Suggest using #![no_std]. #[no_core] is unstable and not really supported anyway.
+ // NOTE: this is a dummy span if `extern crate std` was injected by the compiler.
+ // If it's not a dummy, that means someone added `extern crate std` explicitly and `#![no_std]` won't help.
+ if !missing_core && span.is_dummy() {
+ let current_crate =
+ sess.opts.crate_name.as_deref().unwrap_or("<unknown>");
+ err.note(&format!(
+ "`std` is required by `{}` because it does not declare `#![no_std]`",
+ current_crate
+ ));
+ }
+ if sess.is_nightly_build() {
+ err.help("consider building the standard library from source with `cargo build -Zbuild-std`");
+ }
+ } else if crate_name
+ == Symbol::intern(&sess.opts.unstable_opts.profiler_runtime)
+ {
+ err.note("the compiler may have been built without the profiler runtime");
+ } else if crate_name.as_str().starts_with("rustc_") {
+ err.help(
+ "maybe you need to install the missing components with: \
+ `rustup component add rust-src rustc-dev llvm-tools-preview`",
+ );
+ }
+ err.span_label(span, "can't find crate");
+ err
+ };
+
+ if !locator.crate_rejections.via_filename.is_empty() {
+ let mismatches = locator.crate_rejections.via_filename.iter();
+ for CrateMismatch { path, .. } in mismatches {
+ err.note(&format!(
+ "extern location for {} is of an unknown type: {}",
+ crate_name,
+ path.display(),
+ ))
+ .help(&format!(
+ "file name should be lib*.rlib or {}*.{}",
+ locator.dll_prefix, locator.dll_suffix
+ ));
+ }
+ }
+ err
+ }
+ CrateError::NonDylibPlugin(crate_name) => struct_span_err!(
+ sess,
+ span,
+ E0457,
+ "plugin `{}` only found in rlib format, but must be available in dylib format",
+ crate_name,
+ ),
+ };
+
+ diag.emit();
+ }
+}
diff --git a/compiler/rustc_metadata/src/native_libs.rs b/compiler/rustc_metadata/src/native_libs.rs
new file mode 100644
index 000000000..9f6079ecb
--- /dev/null
+++ b/compiler/rustc_metadata/src/native_libs.rs
@@ -0,0 +1,504 @@
+use rustc_ast::{NestedMetaItem, CRATE_NODE_ID};
+use rustc_attr as attr;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_middle::ty::{List, ParamEnv, ParamEnvAnd, Ty, TyCtxt};
+use rustc_session::cstore::{DllCallingConvention, DllImport, NativeLib};
+use rustc_session::parse::feature_err;
+use rustc_session::utils::NativeLibKind;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_target::spec::abi::Abi;
+
+pub(crate) fn collect(tcx: TyCtxt<'_>) -> Vec<NativeLib> {
+ let mut collector = Collector { tcx, libs: Vec::new() };
+ for id in tcx.hir().items() {
+ collector.process_item(id);
+ }
+ collector.process_command_line();
+ collector.libs
+}
+
+pub(crate) fn relevant_lib(sess: &Session, lib: &NativeLib) -> bool {
+ match lib.cfg {
+ Some(ref cfg) => attr::cfg_matches(cfg, &sess.parse_sess, CRATE_NODE_ID, None),
+ None => true,
+ }
+}
+
+struct Collector<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ libs: Vec<NativeLib>,
+}
+
+impl<'tcx> Collector<'tcx> {
+ fn process_item(&mut self, id: rustc_hir::ItemId) {
+ if !matches!(self.tcx.def_kind(id.def_id), DefKind::ForeignMod) {
+ return;
+ }
+
+ let it = self.tcx.hir().item(id);
+ let hir::ItemKind::ForeignMod { abi, items: foreign_mod_items } = it.kind else {
+ return;
+ };
+
+ if abi == Abi::Rust || abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
+ return;
+ }
+
+ // Process all of the #[link(..)]-style arguments
+ let sess = &self.tcx.sess;
+ let features = self.tcx.features();
+ for m in self.tcx.hir().attrs(it.hir_id()).iter().filter(|a| a.has_name(sym::link)) {
+ let Some(items) = m.meta_item_list() else {
+ continue;
+ };
+
+ let mut name = None;
+ let mut kind = None;
+ let mut modifiers = None;
+ let mut cfg = None;
+ let mut wasm_import_module = None;
+ for item in items.iter() {
+ match item.name_or_empty() {
+ sym::name => {
+ if name.is_some() {
+ let msg = "multiple `name` arguments in a single `#[link]` attribute";
+ sess.span_err(item.span(), msg);
+ continue;
+ }
+ let Some(link_name) = item.value_str() else {
+ let msg = "link name must be of the form `name = \"string\"`";
+ sess.span_err(item.span(), msg);
+ continue;
+ };
+ let span = item.name_value_literal_span().unwrap();
+ if link_name.is_empty() {
+ struct_span_err!(sess, span, E0454, "link name must not be empty")
+ .span_label(span, "empty link name")
+ .emit();
+ }
+ name = Some((link_name, span));
+ }
+ sym::kind => {
+ if kind.is_some() {
+ let msg = "multiple `kind` arguments in a single `#[link]` attribute";
+ sess.span_err(item.span(), msg);
+ continue;
+ }
+ let Some(link_kind) = item.value_str() else {
+ let msg = "link kind must be of the form `kind = \"string\"`";
+ sess.span_err(item.span(), msg);
+ continue;
+ };
+
+ let span = item.name_value_literal_span().unwrap();
+ let link_kind = match link_kind.as_str() {
+ "static" => NativeLibKind::Static { bundle: None, whole_archive: None },
+ "dylib" => NativeLibKind::Dylib { as_needed: None },
+ "framework" => {
+ if !sess.target.is_like_osx {
+ struct_span_err!(
+ sess,
+ span,
+ E0455,
+ "link kind `framework` is only supported on Apple targets"
+ )
+ .emit();
+ }
+ NativeLibKind::Framework { as_needed: None }
+ }
+ "raw-dylib" => {
+ if !sess.target.is_like_windows {
+ struct_span_err!(
+ sess,
+ span,
+ E0455,
+ "link kind `raw-dylib` is only supported on Windows targets"
+ )
+ .emit();
+ } else if !features.raw_dylib {
+ feature_err(
+ &sess.parse_sess,
+ sym::raw_dylib,
+ span,
+ "link kind `raw-dylib` is unstable",
+ )
+ .emit();
+ }
+ NativeLibKind::RawDylib
+ }
+ kind => {
+ let msg = format!(
+ "unknown link kind `{kind}`, expected one of: \
+ static, dylib, framework, raw-dylib"
+ );
+ struct_span_err!(sess, span, E0458, "{}", msg)
+ .span_label(span, "unknown link kind")
+ .emit();
+ continue;
+ }
+ };
+ kind = Some(link_kind);
+ }
+ sym::modifiers => {
+ if modifiers.is_some() {
+ let msg =
+ "multiple `modifiers` arguments in a single `#[link]` attribute";
+ sess.span_err(item.span(), msg);
+ continue;
+ }
+ let Some(link_modifiers) = item.value_str() else {
+ let msg = "link modifiers must be of the form `modifiers = \"string\"`";
+ sess.span_err(item.span(), msg);
+ continue;
+ };
+ modifiers = Some((link_modifiers, item.name_value_literal_span().unwrap()));
+ }
+ sym::cfg => {
+ if cfg.is_some() {
+ let msg = "multiple `cfg` arguments in a single `#[link]` attribute";
+ sess.span_err(item.span(), msg);
+ continue;
+ }
+ let Some(link_cfg) = item.meta_item_list() else {
+ let msg = "link cfg must be of the form `cfg(/* predicate */)`";
+ sess.span_err(item.span(), msg);
+ continue;
+ };
+ let [NestedMetaItem::MetaItem(link_cfg)] = link_cfg else {
+ let msg = "link cfg must have a single predicate argument";
+ sess.span_err(item.span(), msg);
+ continue;
+ };
+ if !features.link_cfg {
+ feature_err(
+ &sess.parse_sess,
+ sym::link_cfg,
+ item.span(),
+ "link cfg is unstable",
+ )
+ .emit();
+ }
+ cfg = Some(link_cfg.clone());
+ }
+ sym::wasm_import_module => {
+ if wasm_import_module.is_some() {
+ let msg = "multiple `wasm_import_module` arguments \
+ in a single `#[link]` attribute";
+ sess.span_err(item.span(), msg);
+ continue;
+ }
+ let Some(link_wasm_import_module) = item.value_str() else {
+ let msg = "wasm import module must be of the form \
+ `wasm_import_module = \"string\"`";
+ sess.span_err(item.span(), msg);
+ continue;
+ };
+ wasm_import_module = Some((link_wasm_import_module, item.span()));
+ }
+ _ => {
+ let msg = "unexpected `#[link]` argument, expected one of: \
+ name, kind, modifiers, cfg, wasm_import_module";
+ sess.span_err(item.span(), msg);
+ }
+ }
+ }
+
+ // Do this outside the above loop so we don't depend on modifiers coming after kinds
+ let mut verbatim = None;
+ if let Some((modifiers, span)) = modifiers {
+ for modifier in modifiers.as_str().split(',') {
+ let (modifier, value) = match modifier.strip_prefix(&['+', '-']) {
+ Some(m) => (m, modifier.starts_with('+')),
+ None => {
+ sess.span_err(
+ span,
+ "invalid linking modifier syntax, expected '+' or '-' prefix \
+ before one of: bundle, verbatim, whole-archive, as-needed",
+ );
+ continue;
+ }
+ };
+
+ macro report_unstable_modifier($feature: ident) {
+ if !features.$feature {
+ feature_err(
+ &sess.parse_sess,
+ sym::$feature,
+ span,
+ &format!("linking modifier `{modifier}` is unstable"),
+ )
+ .emit();
+ }
+ }
+ let assign_modifier = |dst: &mut Option<bool>| {
+ if dst.is_some() {
+ let msg = format!(
+ "multiple `{modifier}` modifiers in a single `modifiers` argument"
+ );
+ sess.span_err(span, &msg);
+ } else {
+ *dst = Some(value);
+ }
+ };
+ match (modifier, &mut kind) {
+ ("bundle", Some(NativeLibKind::Static { bundle, .. })) => {
+ assign_modifier(bundle)
+ }
+ ("bundle", _) => {
+ sess.span_err(
+ span,
+ "linking modifier `bundle` is only compatible with \
+ `static` linking kind",
+ );
+ }
+
+ ("verbatim", _) => {
+ report_unstable_modifier!(native_link_modifiers_verbatim);
+ assign_modifier(&mut verbatim)
+ }
+
+ ("whole-archive", Some(NativeLibKind::Static { whole_archive, .. })) => {
+ assign_modifier(whole_archive)
+ }
+ ("whole-archive", _) => {
+ sess.span_err(
+ span,
+ "linking modifier `whole-archive` is only compatible with \
+ `static` linking kind",
+ );
+ }
+
+ ("as-needed", Some(NativeLibKind::Dylib { as_needed }))
+ | ("as-needed", Some(NativeLibKind::Framework { as_needed })) => {
+ report_unstable_modifier!(native_link_modifiers_as_needed);
+ assign_modifier(as_needed)
+ }
+ ("as-needed", _) => {
+ sess.span_err(
+ span,
+ "linking modifier `as-needed` is only compatible with \
+ `dylib` and `framework` linking kinds",
+ );
+ }
+
+ _ => {
+ sess.span_err(
+ span,
+ format!(
+ "unknown linking modifier `{modifier}`, expected one of: \
+ bundle, verbatim, whole-archive, as-needed"
+ ),
+ );
+ }
+ }
+ }
+ }
+
+ if let Some((_, span)) = wasm_import_module {
+ if name.is_some() || kind.is_some() || modifiers.is_some() || cfg.is_some() {
+ let msg = "`wasm_import_module` is incompatible with \
+ other arguments in `#[link]` attributes";
+ sess.span_err(span, msg);
+ }
+ } else if name.is_none() {
+ struct_span_err!(
+ sess,
+ m.span,
+ E0459,
+ "`#[link]` attribute requires a `name = \"string\"` argument"
+ )
+ .span_label(m.span, "missing `name` argument")
+ .emit();
+ }
+
+ let dll_imports = match kind {
+ Some(NativeLibKind::RawDylib) => {
+ if let Some((name, span)) = name && name.as_str().contains('\0') {
+ sess.span_err(
+ span,
+ "link name must not contain NUL characters if link kind is `raw-dylib`",
+ );
+ }
+ foreign_mod_items
+ .iter()
+ .map(|child_item| self.build_dll_import(abi, child_item))
+ .collect()
+ }
+ _ => Vec::new(),
+ };
+ self.libs.push(NativeLib {
+ name: name.map(|(name, _)| name),
+ kind: kind.unwrap_or(NativeLibKind::Unspecified),
+ cfg,
+ foreign_module: Some(it.def_id.to_def_id()),
+ wasm_import_module: wasm_import_module.map(|(name, _)| name),
+ verbatim,
+ dll_imports,
+ });
+ }
+ }
+
+ // Process libs passed on the command line
+ fn process_command_line(&mut self) {
+ // First, check for errors
+ let mut renames = FxHashSet::default();
+ for lib in &self.tcx.sess.opts.libs {
+ if let NativeLibKind::Framework { .. } = lib.kind && !self.tcx.sess.target.is_like_osx {
+ // Cannot check this when parsing options because the target is not yet available.
+ self.tcx.sess.err("library kind `framework` is only supported on Apple targets");
+ }
+ if let Some(ref new_name) = lib.new_name {
+ let any_duplicate = self
+ .libs
+ .iter()
+ .filter_map(|lib| lib.name.as_ref())
+ .any(|n| n.as_str() == lib.name);
+ if new_name.is_empty() {
+ self.tcx.sess.err(format!(
+ "an empty renaming target was specified for library `{}`",
+ lib.name
+ ));
+ } else if !any_duplicate {
+ self.tcx.sess.err(format!(
+ "renaming of the library `{}` was specified, \
+ however this crate contains no `#[link(...)]` \
+ attributes referencing this library",
+ lib.name
+ ));
+ } else if !renames.insert(&lib.name) {
+ self.tcx.sess.err(format!(
+ "multiple renamings were \
+ specified for library `{}`",
+ lib.name
+ ));
+ }
+ }
+ }
+
+ // Update kind and, optionally, the name of all native libraries
+ // (there may be more than one) with the specified name. If any
+ // library is mentioned more than once, keep the latest mention
+ // of it, so that any possible dependent libraries appear before
+ // it. (This ensures that the linker is able to see symbols from
+ // all possible dependent libraries before linking in the library
+ // in question.)
+ for passed_lib in &self.tcx.sess.opts.libs {
+ // If we've already added any native libraries with the same
+ // name, they will be pulled out into `existing`, so that we
+ // can move them to the end of the list below.
+ let mut existing = self
+ .libs
+ .drain_filter(|lib| {
+ if let Some(lib_name) = lib.name {
+ if lib_name.as_str() == passed_lib.name {
+ // FIXME: This whole logic is questionable, whether modifiers are
+ // involved or not, library reordering and kind overriding without
+ // explicit `:rename` in particular.
+ if lib.has_modifiers() || passed_lib.has_modifiers() {
+ let msg = "overriding linking modifiers from command line is not supported";
+ match lib.foreign_module {
+ Some(def_id) => self.tcx.sess.span_err(self.tcx.def_span(def_id), msg),
+ None => self.tcx.sess.err(msg),
+ };
+ }
+ if passed_lib.kind != NativeLibKind::Unspecified {
+ lib.kind = passed_lib.kind;
+ }
+ if let Some(new_name) = &passed_lib.new_name {
+ lib.name = Some(Symbol::intern(new_name));
+ }
+ lib.verbatim = passed_lib.verbatim;
+ return true;
+ }
+ }
+ false
+ })
+ .collect::<Vec<_>>();
+ if existing.is_empty() {
+ // Add if not found
+ let new_name: Option<&str> = passed_lib.new_name.as_deref();
+ self.libs.push(NativeLib {
+ name: Some(Symbol::intern(new_name.unwrap_or(&passed_lib.name))),
+ kind: passed_lib.kind,
+ cfg: None,
+ foreign_module: None,
+ wasm_import_module: None,
+ verbatim: passed_lib.verbatim,
+ dll_imports: Vec::new(),
+ });
+ } else {
+ // Move all existing libraries with the same name to the
+ // end of the command line.
+ self.libs.append(&mut existing);
+ }
+ }
+ }
+
+ fn i686_arg_list_size(&self, item: &hir::ForeignItemRef) -> usize {
+ let argument_types: &List<Ty<'_>> = self.tcx.erase_late_bound_regions(
+ self.tcx
+ .type_of(item.id.def_id)
+ .fn_sig(self.tcx)
+ .inputs()
+ .map_bound(|slice| self.tcx.mk_type_list(slice.iter())),
+ );
+
+ argument_types
+ .iter()
+ .map(|ty| {
+ let layout = self
+ .tcx
+ .layout_of(ParamEnvAnd { param_env: ParamEnv::empty(), value: ty })
+ .expect("layout")
+ .layout;
+ // In both stdcall and fastcall, we always round up the argument size to the
+ // nearest multiple of 4 bytes.
+ (layout.size().bytes_usize() + 3) & !3
+ })
+ .sum()
+ }
+
+ fn build_dll_import(&self, abi: Abi, item: &hir::ForeignItemRef) -> DllImport {
+ let calling_convention = if self.tcx.sess.target.arch == "x86" {
+ match abi {
+ Abi::C { .. } | Abi::Cdecl { .. } => DllCallingConvention::C,
+ Abi::Stdcall { .. } | Abi::System { .. } => {
+ DllCallingConvention::Stdcall(self.i686_arg_list_size(item))
+ }
+ Abi::Fastcall { .. } => {
+ DllCallingConvention::Fastcall(self.i686_arg_list_size(item))
+ }
+ Abi::Vectorcall { .. } => {
+ DllCallingConvention::Vectorcall(self.i686_arg_list_size(item))
+ }
+ _ => {
+ self.tcx.sess.span_fatal(
+ item.span,
+ r#"ABI not supported by `#[link(kind = "raw-dylib")]` on i686"#,
+ );
+ }
+ }
+ } else {
+ match abi {
+ Abi::C { .. } | Abi::Win64 { .. } | Abi::System { .. } => DllCallingConvention::C,
+ _ => {
+ self.tcx.sess.span_fatal(
+ item.span,
+ r#"ABI not supported by `#[link(kind = "raw-dylib")]` on this architecture"#,
+ );
+ }
+ }
+ };
+
+ DllImport {
+ name: item.ident.name,
+ ordinal: self.tcx.codegen_fn_attrs(item.id.def_id).link_ordinal,
+ calling_convention,
+ span: item.span,
+ }
+ }
+}
diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs
new file mode 100644
index 000000000..40dc4fb05
--- /dev/null
+++ b/compiler/rustc_metadata/src/rmeta/decoder.rs
@@ -0,0 +1,1820 @@
+// Decoding metadata from a single crate's metadata
+
+use crate::creader::{CStore, CrateMetadataRef};
+use crate::rmeta::*;
+
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::{Lock, LockGuard, Lrc, OnceCell};
+use rustc_data_structures::unhash::UnhashMap;
+use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind};
+use rustc_expand::proc_macro::{AttrProcMacro, BangProcMacro, DeriveProcMacro};
+use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
+use rustc_hir::def_id::{CrateNum, DefId, DefIndex, CRATE_DEF_INDEX, LOCAL_CRATE};
+use rustc_hir::definitions::{DefKey, DefPath, DefPathData, DefPathHash};
+use rustc_hir::diagnostic_items::DiagnosticItems;
+use rustc_hir::lang_items;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::metadata::ModChild;
+use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
+use rustc_middle::mir::interpret::{AllocDecodingSession, AllocDecodingState};
+use rustc_middle::ty::codec::TyDecoder;
+use rustc_middle::ty::fast_reject::SimplifiedType;
+use rustc_middle::ty::GeneratorDiagnosticData;
+use rustc_middle::ty::{self, ParameterizedOverTcx, Ty, TyCtxt, Visibility};
+use rustc_serialize::opaque::MemDecoder;
+use rustc_serialize::{Decodable, Decoder};
+use rustc_session::cstore::{
+ CrateSource, ExternCrate, ForeignModule, LinkagePreference, NativeLib,
+};
+use rustc_session::Session;
+use rustc_span::hygiene::{ExpnIndex, MacroKind};
+use rustc_span::source_map::{respan, Spanned};
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::{self, BytePos, ExpnId, Pos, Span, SyntaxContext, DUMMY_SP};
+
+use proc_macro::bridge::client::ProcMacro;
+use std::io;
+use std::iter::TrustedLen;
+use std::mem;
+use std::num::NonZeroUsize;
+use std::path::Path;
+use tracing::debug;
+
+pub(super) use cstore_impl::provide;
+pub use cstore_impl::provide_extern;
+use rustc_span::hygiene::HygieneDecodeContext;
+
+mod cstore_impl;
+
+/// A reference to the raw binary version of crate metadata.
+/// A `MetadataBlob` internally is just a reference counted pointer to
+/// the actual data, so cloning it is cheap.
+#[derive(Clone)]
+pub(crate) struct MetadataBlob(Lrc<MetadataRef>);
+
+// This is needed so we can create an OwningRef into the blob.
+// The data behind a `MetadataBlob` has a stable address because it is
+// contained within an Rc/Arc.
+unsafe impl rustc_data_structures::owning_ref::StableAddress for MetadataBlob {}
+
+// This is needed so we can create an OwningRef into the blob.
+impl std::ops::Deref for MetadataBlob {
+ type Target = [u8];
+
+ #[inline]
+ fn deref(&self) -> &[u8] {
+ &self.0[..]
+ }
+}
+
+// A map from external crate numbers (as decoded from some crate file) to
+// local crate numbers (as generated during this session). Each external
+// crate may refer to types in other external crates, and each has their
+// own crate numbers.
+pub(crate) type CrateNumMap = IndexVec<CrateNum, CrateNum>;
+
+pub(crate) struct CrateMetadata {
+ /// The primary crate data - binary metadata blob.
+ blob: MetadataBlob,
+
+ // --- Some data pre-decoded from the metadata blob, usually for performance ---
+ /// NOTE(eddyb) we pass `'static` to a `'tcx` parameter because this
+ /// lifetime is only used behind `LazyValue`, `LazyArray`, or `LazyTable`, and therefore acts like a
+ /// universal (`for<'tcx>`), that is paired up with whichever `TyCtxt`
+ /// is being used to decode those values.
+ root: CrateRoot,
+ /// Trait impl data.
+ /// FIXME: Used only from queries and can use query cache,
+ /// so pre-decoding can probably be avoided.
+ trait_impls: FxHashMap<(u32, DefIndex), LazyArray<(DefIndex, Option<SimplifiedType>)>>,
+ /// Inherent impls which do not follow the normal coherence rules.
+ ///
+ /// These can be introduced using either `#![rustc_coherence_is_core]`
+ /// or `#[rustc_allow_incoherent_impl]`.
+ incoherent_impls: FxHashMap<SimplifiedType, LazyArray<DefIndex>>,
+ /// Proc macro descriptions for this crate, if it's a proc macro crate.
+ raw_proc_macros: Option<&'static [ProcMacro]>,
+ /// Source maps for code from the crate.
+ source_map_import_info: OnceCell<Vec<ImportedSourceFile>>,
+ /// For every definition in this crate, maps its `DefPathHash` to its `DefIndex`.
+ def_path_hash_map: DefPathHashMapRef<'static>,
+ /// Likewise for ExpnHash.
+ expn_hash_map: OnceCell<UnhashMap<ExpnHash, ExpnIndex>>,
+ /// Used for decoding interpret::AllocIds in a cached & thread-safe manner.
+ alloc_decoding_state: AllocDecodingState,
+ /// Caches decoded `DefKey`s.
+ def_key_cache: Lock<FxHashMap<DefIndex, DefKey>>,
+ /// Caches decoded `DefPathHash`es.
+ def_path_hash_cache: Lock<FxHashMap<DefIndex, DefPathHash>>,
+
+ // --- Other significant crate properties ---
+ /// ID of this crate, from the current compilation session's point of view.
+ cnum: CrateNum,
+ /// Maps crate IDs as they are were seen from this crate's compilation sessions into
+ /// IDs as they are seen from the current compilation session.
+ cnum_map: CrateNumMap,
+ /// Same ID set as `cnum_map` plus maybe some injected crates like panic runtime.
+ dependencies: Lock<Vec<CrateNum>>,
+ /// How to link (or not link) this crate to the currently compiled crate.
+ dep_kind: Lock<CrateDepKind>,
+ /// Filesystem location of this crate.
+ source: Lrc<CrateSource>,
+ /// Whether or not this crate should be consider a private dependency
+ /// for purposes of the 'exported_private_dependencies' lint
+ private_dep: bool,
+ /// The hash for the host proc macro. Used to support `-Z dual-proc-macro`.
+ host_hash: Option<Svh>,
+
+ /// Additional data used for decoding `HygieneData` (e.g. `SyntaxContext`
+ /// and `ExpnId`).
+ /// Note that we store a `HygieneDecodeContext` for each `CrateMetadat`. This is
+ /// because `SyntaxContext` ids are not globally unique, so we need
+ /// to track which ids we've decoded on a per-crate basis.
+ hygiene_context: HygieneDecodeContext,
+
+ // --- Data used only for improving diagnostics ---
+ /// Information about the `extern crate` item or path that caused this crate to be loaded.
+ /// If this is `None`, then the crate was injected (e.g., by the allocator).
+ extern_crate: Lock<Option<ExternCrate>>,
+}
+
+/// Holds information about a rustc_span::SourceFile imported from another crate.
+/// See `imported_source_files()` for more information.
+struct ImportedSourceFile {
+ /// This SourceFile's byte-offset within the source_map of its original crate
+ original_start_pos: rustc_span::BytePos,
+ /// The end of this SourceFile within the source_map of its original crate
+ original_end_pos: rustc_span::BytePos,
+ /// The imported SourceFile's representation within the local source_map
+ translated_source_file: Lrc<rustc_span::SourceFile>,
+}
+
+pub(super) struct DecodeContext<'a, 'tcx> {
+ opaque: MemDecoder<'a>,
+ cdata: Option<CrateMetadataRef<'a>>,
+ blob: &'a MetadataBlob,
+ sess: Option<&'tcx Session>,
+ tcx: Option<TyCtxt<'tcx>>,
+
+ // Cache the last used source_file for translating spans as an optimization.
+ last_source_file_index: usize,
+
+ lazy_state: LazyState,
+
+ // Used for decoding interpret::AllocIds in a cached & thread-safe manner.
+ alloc_decoding_session: Option<AllocDecodingSession<'a>>,
+}
+
+/// Abstract over the various ways one can create metadata decoders.
+pub(super) trait Metadata<'a, 'tcx>: Copy {
+ fn blob(self) -> &'a MetadataBlob;
+
+ fn cdata(self) -> Option<CrateMetadataRef<'a>> {
+ None
+ }
+ fn sess(self) -> Option<&'tcx Session> {
+ None
+ }
+ fn tcx(self) -> Option<TyCtxt<'tcx>> {
+ None
+ }
+
+ fn decoder(self, pos: usize) -> DecodeContext<'a, 'tcx> {
+ let tcx = self.tcx();
+ DecodeContext {
+ opaque: MemDecoder::new(self.blob(), pos),
+ cdata: self.cdata(),
+ blob: self.blob(),
+ sess: self.sess().or(tcx.map(|tcx| tcx.sess)),
+ tcx,
+ last_source_file_index: 0,
+ lazy_state: LazyState::NoNode,
+ alloc_decoding_session: self
+ .cdata()
+ .map(|cdata| cdata.cdata.alloc_decoding_state.new_decoding_session()),
+ }
+ }
+}
+
+impl<'a, 'tcx> Metadata<'a, 'tcx> for &'a MetadataBlob {
+ #[inline]
+ fn blob(self) -> &'a MetadataBlob {
+ self
+ }
+}
+
+impl<'a, 'tcx> Metadata<'a, 'tcx> for (&'a MetadataBlob, &'tcx Session) {
+ #[inline]
+ fn blob(self) -> &'a MetadataBlob {
+ self.0
+ }
+
+ #[inline]
+ fn sess(self) -> Option<&'tcx Session> {
+ let (_, sess) = self;
+ Some(sess)
+ }
+}
+
+impl<'a, 'tcx> Metadata<'a, 'tcx> for CrateMetadataRef<'a> {
+ #[inline]
+ fn blob(self) -> &'a MetadataBlob {
+ &self.cdata.blob
+ }
+ #[inline]
+ fn cdata(self) -> Option<CrateMetadataRef<'a>> {
+ Some(self)
+ }
+}
+
+impl<'a, 'tcx> Metadata<'a, 'tcx> for (CrateMetadataRef<'a>, &'tcx Session) {
+ #[inline]
+ fn blob(self) -> &'a MetadataBlob {
+ &self.0.cdata.blob
+ }
+ #[inline]
+ fn cdata(self) -> Option<CrateMetadataRef<'a>> {
+ Some(self.0)
+ }
+ #[inline]
+ fn sess(self) -> Option<&'tcx Session> {
+ Some(self.1)
+ }
+}
+
+impl<'a, 'tcx> Metadata<'a, 'tcx> for (CrateMetadataRef<'a>, TyCtxt<'tcx>) {
+ #[inline]
+ fn blob(self) -> &'a MetadataBlob {
+ &self.0.cdata.blob
+ }
+ #[inline]
+ fn cdata(self) -> Option<CrateMetadataRef<'a>> {
+ Some(self.0)
+ }
+ #[inline]
+ fn tcx(self) -> Option<TyCtxt<'tcx>> {
+ Some(self.1)
+ }
+}
+
+impl<T: ParameterizedOverTcx> LazyValue<T> {
+ fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>(self, metadata: M) -> T::Value<'tcx>
+ where
+ T::Value<'tcx>: Decodable<DecodeContext<'a, 'tcx>>,
+ {
+ let mut dcx = metadata.decoder(self.position.get());
+ dcx.lazy_state = LazyState::NodeStart(self.position);
+ T::Value::decode(&mut dcx)
+ }
+}
+
+struct DecodeIterator<'a, 'tcx, T> {
+ elem_counter: std::ops::Range<usize>,
+ dcx: DecodeContext<'a, 'tcx>,
+ _phantom: PhantomData<fn() -> T>,
+}
+
+impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> Iterator for DecodeIterator<'a, 'tcx, T> {
+ type Item = T;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.elem_counter.next().map(|_| T::decode(&mut self.dcx))
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.elem_counter.size_hint()
+ }
+}
+
+impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> ExactSizeIterator
+ for DecodeIterator<'a, 'tcx, T>
+{
+ fn len(&self) -> usize {
+ self.elem_counter.len()
+ }
+}
+
+unsafe impl<'a, 'tcx, T: Decodable<DecodeContext<'a, 'tcx>>> TrustedLen
+ for DecodeIterator<'a, 'tcx, T>
+{
+}
+
+impl<T: ParameterizedOverTcx> LazyArray<T> {
+ fn decode<'a, 'tcx, M: Metadata<'a, 'tcx>>(
+ self,
+ metadata: M,
+ ) -> DecodeIterator<'a, 'tcx, T::Value<'tcx>>
+ where
+ T::Value<'tcx>: Decodable<DecodeContext<'a, 'tcx>>,
+ {
+ let mut dcx = metadata.decoder(self.position.get());
+ dcx.lazy_state = LazyState::NodeStart(self.position);
+ DecodeIterator { elem_counter: (0..self.num_elems), dcx, _phantom: PhantomData }
+ }
+}
+
+impl<'a, 'tcx> DecodeContext<'a, 'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ debug_assert!(self.tcx.is_some(), "missing TyCtxt in DecodeContext");
+ self.tcx.unwrap()
+ }
+
+ #[inline]
+ pub fn blob(&self) -> &'a MetadataBlob {
+ self.blob
+ }
+
+ #[inline]
+ pub fn cdata(&self) -> CrateMetadataRef<'a> {
+ debug_assert!(self.cdata.is_some(), "missing CrateMetadata in DecodeContext");
+ self.cdata.unwrap()
+ }
+
+ #[inline]
+ fn map_encoded_cnum_to_current(&self, cnum: CrateNum) -> CrateNum {
+ self.cdata().map_encoded_cnum_to_current(cnum)
+ }
+
+ #[inline]
+ fn read_lazy_offset_then<T>(&mut self, f: impl Fn(NonZeroUsize) -> T) -> T {
+ let distance = self.read_usize();
+ let position = match self.lazy_state {
+ LazyState::NoNode => bug!("read_lazy_with_meta: outside of a metadata node"),
+ LazyState::NodeStart(start) => {
+ let start = start.get();
+ assert!(distance <= start);
+ start - distance
+ }
+ LazyState::Previous(last_pos) => last_pos.get() + distance,
+ };
+ let position = NonZeroUsize::new(position).unwrap();
+ self.lazy_state = LazyState::Previous(position);
+ f(position)
+ }
+
+ fn read_lazy<T>(&mut self) -> LazyValue<T> {
+ self.read_lazy_offset_then(|pos| LazyValue::from_position(pos))
+ }
+
+ fn read_lazy_array<T>(&mut self, len: usize) -> LazyArray<T> {
+ self.read_lazy_offset_then(|pos| LazyArray::from_position_and_num_elems(pos, len))
+ }
+
+ fn read_lazy_table<I, T>(&mut self, len: usize) -> LazyTable<I, T> {
+ self.read_lazy_offset_then(|pos| LazyTable::from_position_and_encoded_size(pos, len))
+ }
+
+ #[inline]
+ pub fn read_raw_bytes(&mut self, len: usize) -> &[u8] {
+ self.opaque.read_raw_bytes(len)
+ }
+}
+
+impl<'a, 'tcx> TyDecoder for DecodeContext<'a, 'tcx> {
+ const CLEAR_CROSS_CRATE: bool = true;
+
+ type I = TyCtxt<'tcx>;
+
+ #[inline]
+ fn interner(&self) -> Self::I {
+ self.tcx()
+ }
+
+ #[inline]
+ fn peek_byte(&self) -> u8 {
+ self.opaque.data[self.opaque.position()]
+ }
+
+ #[inline]
+ fn position(&self) -> usize {
+ self.opaque.position()
+ }
+
+ fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx>
+ where
+ F: FnOnce(&mut Self) -> Ty<'tcx>,
+ {
+ let tcx = self.tcx();
+
+ let key = ty::CReaderCacheKey { cnum: Some(self.cdata().cnum), pos: shorthand };
+
+ if let Some(&ty) = tcx.ty_rcache.borrow().get(&key) {
+ return ty;
+ }
+
+ let ty = or_insert_with(self);
+ tcx.ty_rcache.borrow_mut().insert(key, ty);
+ ty
+ }
+
+ fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
+ where
+ F: FnOnce(&mut Self) -> R,
+ {
+ let new_opaque = MemDecoder::new(self.opaque.data, pos);
+ let old_opaque = mem::replace(&mut self.opaque, new_opaque);
+ let old_state = mem::replace(&mut self.lazy_state, LazyState::NoNode);
+ let r = f(self);
+ self.opaque = old_opaque;
+ self.lazy_state = old_state;
+ r
+ }
+
+ fn decode_alloc_id(&mut self) -> rustc_middle::mir::interpret::AllocId {
+ if let Some(alloc_decoding_session) = self.alloc_decoding_session {
+ alloc_decoding_session.decode_alloc_id(self)
+ } else {
+ bug!("Attempting to decode interpret::AllocId without CrateMetadata")
+ }
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for CrateNum {
+ fn decode(d: &mut DecodeContext<'a, 'tcx>) -> CrateNum {
+ let cnum = CrateNum::from_u32(d.read_u32());
+ d.map_encoded_cnum_to_current(cnum)
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefIndex {
+ fn decode(d: &mut DecodeContext<'a, 'tcx>) -> DefIndex {
+ DefIndex::from_u32(d.read_u32())
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnIndex {
+ fn decode(d: &mut DecodeContext<'a, 'tcx>) -> ExpnIndex {
+ ExpnIndex::from_u32(d.read_u32())
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for SyntaxContext {
+ fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> SyntaxContext {
+ let cdata = decoder.cdata();
+ let sess = decoder.sess.unwrap();
+ let cname = cdata.root.name;
+ rustc_span::hygiene::decode_syntax_context(decoder, &cdata.hygiene_context, |_, id| {
+ debug!("SpecializedDecoder<SyntaxContext>: decoding {}", id);
+ cdata
+ .root
+ .syntax_contexts
+ .get(cdata, id)
+ .unwrap_or_else(|| panic!("Missing SyntaxContext {:?} for crate {:?}", id, cname))
+ .decode((cdata, sess))
+ })
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for ExpnId {
+ fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> ExpnId {
+ let local_cdata = decoder.cdata();
+ let sess = decoder.sess.unwrap();
+
+ let cnum = CrateNum::decode(decoder);
+ let index = u32::decode(decoder);
+
+ let expn_id = rustc_span::hygiene::decode_expn_id(cnum, index, |expn_id| {
+ let ExpnId { krate: cnum, local_id: index } = expn_id;
+ // Lookup local `ExpnData`s in our own crate data. Foreign `ExpnData`s
+ // are stored in the owning crate, to avoid duplication.
+ debug_assert_ne!(cnum, LOCAL_CRATE);
+ let crate_data = if cnum == local_cdata.cnum {
+ local_cdata
+ } else {
+ local_cdata.cstore.get_crate_data(cnum)
+ };
+ let expn_data = crate_data
+ .root
+ .expn_data
+ .get(crate_data, index)
+ .unwrap()
+ .decode((crate_data, sess));
+ let expn_hash = crate_data
+ .root
+ .expn_hashes
+ .get(crate_data, index)
+ .unwrap()
+ .decode((crate_data, sess));
+ (expn_data, expn_hash)
+ });
+ expn_id
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for Span {
+ fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Span {
+ let ctxt = SyntaxContext::decode(decoder);
+ let tag = u8::decode(decoder);
+
+ if tag == TAG_PARTIAL_SPAN {
+ return DUMMY_SP.with_ctxt(ctxt);
+ }
+
+ debug_assert!(tag == TAG_VALID_SPAN_LOCAL || tag == TAG_VALID_SPAN_FOREIGN);
+
+ let lo = BytePos::decode(decoder);
+ let len = BytePos::decode(decoder);
+ let hi = lo + len;
+
+ let Some(sess) = decoder.sess else {
+ bug!("Cannot decode Span without Session.")
+ };
+
+ // There are two possibilities here:
+ // 1. This is a 'local span', which is located inside a `SourceFile`
+ // that came from this crate. In this case, we use the source map data
+ // encoded in this crate. This branch should be taken nearly all of the time.
+ // 2. This is a 'foreign span', which is located inside a `SourceFile`
+ // that came from a *different* crate (some crate upstream of the one
+ // whose metadata we're looking at). For example, consider this dependency graph:
+ //
+ // A -> B -> C
+ //
+ // Suppose that we're currently compiling crate A, and start deserializing
+ // metadata from crate B. When we deserialize a Span from crate B's metadata,
+ // there are two possibilities:
+ //
+ // 1. The span references a file from crate B. This makes it a 'local' span,
+ // which means that we can use crate B's serialized source map information.
+ // 2. The span references a file from crate C. This makes it a 'foreign' span,
+ // which means we need to use Crate *C* (not crate B) to determine the source
+ // map information. We only record source map information for a file in the
+ // crate that 'owns' it, so deserializing a Span may require us to look at
+ // a transitive dependency.
+ //
+ // When we encode a foreign span, we adjust its 'lo' and 'high' values
+ // to be based on the *foreign* crate (e.g. crate C), not the crate
+ // we are writing metadata for (e.g. crate B). This allows us to
+ // treat the 'local' and 'foreign' cases almost identically during deserialization:
+ // we can call `imported_source_files` for the proper crate, and binary search
+ // through the returned slice using our span.
+ let imported_source_files = if tag == TAG_VALID_SPAN_LOCAL {
+ decoder.cdata().imported_source_files(sess)
+ } else {
+ // When we encode a proc-macro crate, all `Span`s should be encoded
+ // with `TAG_VALID_SPAN_LOCAL`
+ if decoder.cdata().root.is_proc_macro_crate() {
+ // Decode `CrateNum` as u32 - using `CrateNum::decode` will ICE
+ // since we don't have `cnum_map` populated.
+ let cnum = u32::decode(decoder);
+ panic!(
+ "Decoding of crate {:?} tried to access proc-macro dep {:?}",
+ decoder.cdata().root.name,
+ cnum
+ );
+ }
+ // tag is TAG_VALID_SPAN_FOREIGN, checked by `debug_assert` above
+ let cnum = CrateNum::decode(decoder);
+ debug!(
+ "SpecializedDecoder<Span>::specialized_decode: loading source files from cnum {:?}",
+ cnum
+ );
+
+ // Decoding 'foreign' spans should be rare enough that it's
+ // not worth it to maintain a per-CrateNum cache for `last_source_file_index`.
+ // We just set it to 0, to ensure that we don't try to access something out
+ // of bounds for our initial 'guess'
+ decoder.last_source_file_index = 0;
+
+ let foreign_data = decoder.cdata().cstore.get_crate_data(cnum);
+ foreign_data.imported_source_files(sess)
+ };
+
+ let source_file = {
+ // Optimize for the case that most spans within a translated item
+ // originate from the same source_file.
+ let last_source_file = &imported_source_files[decoder.last_source_file_index];
+
+ if lo >= last_source_file.original_start_pos && lo <= last_source_file.original_end_pos
+ {
+ last_source_file
+ } else {
+ let index = imported_source_files
+ .binary_search_by_key(&lo, |source_file| source_file.original_start_pos)
+ .unwrap_or_else(|index| index - 1);
+
+ // Don't try to cache the index for foreign spans,
+ // as this would require a map from CrateNums to indices
+ if tag == TAG_VALID_SPAN_LOCAL {
+ decoder.last_source_file_index = index;
+ }
+ &imported_source_files[index]
+ }
+ };
+
+ // Make sure our binary search above is correct.
+ debug_assert!(
+ lo >= source_file.original_start_pos && lo <= source_file.original_end_pos,
+ "Bad binary search: lo={:?} source_file.original_start_pos={:?} source_file.original_end_pos={:?}",
+ lo,
+ source_file.original_start_pos,
+ source_file.original_end_pos
+ );
+
+ // Make sure we correctly filtered out invalid spans during encoding
+ debug_assert!(
+ hi >= source_file.original_start_pos && hi <= source_file.original_end_pos,
+ "Bad binary search: hi={:?} source_file.original_start_pos={:?} source_file.original_end_pos={:?}",
+ hi,
+ source_file.original_start_pos,
+ source_file.original_end_pos
+ );
+
+ let lo =
+ (lo + source_file.translated_source_file.start_pos) - source_file.original_start_pos;
+ let hi =
+ (hi + source_file.translated_source_file.start_pos) - source_file.original_start_pos;
+
+ // Do not try to decode parent for foreign spans.
+ Span::new(lo, hi, ctxt, None)
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for &'tcx [ty::abstract_const::Node<'tcx>] {
+ fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Self {
+ ty::codec::RefDecodable::decode(d)
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] {
+ fn decode(d: &mut DecodeContext<'a, 'tcx>) -> Self {
+ ty::codec::RefDecodable::decode(d)
+ }
+}
+
+impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyValue<T> {
+ fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self {
+ decoder.read_lazy()
+ }
+}
+
+impl<'a, 'tcx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyArray<T> {
+ fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self {
+ let len = decoder.read_usize();
+ if len == 0 { LazyArray::empty() } else { decoder.read_lazy_array(len) }
+ }
+}
+
+impl<'a, 'tcx, I: Idx, T> Decodable<DecodeContext<'a, 'tcx>> for LazyTable<I, T> {
+ fn decode(decoder: &mut DecodeContext<'a, 'tcx>) -> Self {
+ let len = decoder.read_usize();
+ decoder.read_lazy_table(len)
+ }
+}
+
+implement_ty_decoder!(DecodeContext<'a, 'tcx>);
+
+impl MetadataBlob {
+ pub(crate) fn new(metadata_ref: MetadataRef) -> MetadataBlob {
+ MetadataBlob(Lrc::new(metadata_ref))
+ }
+
+ pub(crate) fn is_compatible(&self) -> bool {
+ self.blob().starts_with(METADATA_HEADER)
+ }
+
+ pub(crate) fn get_rustc_version(&self) -> String {
+ LazyValue::<String>::from_position(NonZeroUsize::new(METADATA_HEADER.len() + 4).unwrap())
+ .decode(self)
+ }
+
+ pub(crate) fn get_root(&self) -> CrateRoot {
+ let slice = &self.blob()[..];
+ let offset = METADATA_HEADER.len();
+ let pos = (((slice[offset + 0] as u32) << 24)
+ | ((slice[offset + 1] as u32) << 16)
+ | ((slice[offset + 2] as u32) << 8)
+ | ((slice[offset + 3] as u32) << 0)) as usize;
+ LazyValue::<CrateRoot>::from_position(NonZeroUsize::new(pos).unwrap()).decode(self)
+ }
+
+ pub(crate) fn list_crate_metadata(&self, out: &mut dyn io::Write) -> io::Result<()> {
+ let root = self.get_root();
+ writeln!(out, "Crate info:")?;
+ writeln!(out, "name {}{}", root.name, root.extra_filename)?;
+ writeln!(out, "hash {} stable_crate_id {:?}", root.hash, root.stable_crate_id)?;
+ writeln!(out, "proc_macro {:?}", root.proc_macro_data.is_some())?;
+ writeln!(out, "=External Dependencies=")?;
+ for (i, dep) in root.crate_deps.decode(self).enumerate() {
+ writeln!(
+ out,
+ "{} {}{} hash {} host_hash {:?} kind {:?}",
+ i + 1,
+ dep.name,
+ dep.extra_filename,
+ dep.hash,
+ dep.host_hash,
+ dep.kind
+ )?;
+ }
+ write!(out, "\n")?;
+ Ok(())
+ }
+}
+
+impl CrateRoot {
+ pub(crate) fn is_proc_macro_crate(&self) -> bool {
+ self.proc_macro_data.is_some()
+ }
+
+ pub(crate) fn name(&self) -> Symbol {
+ self.name
+ }
+
+ pub(crate) fn hash(&self) -> Svh {
+ self.hash
+ }
+
+ pub(crate) fn stable_crate_id(&self) -> StableCrateId {
+ self.stable_crate_id
+ }
+
+ pub(crate) fn triple(&self) -> &TargetTriple {
+ &self.triple
+ }
+
+ pub(crate) fn decode_crate_deps<'a>(
+ &self,
+ metadata: &'a MetadataBlob,
+ ) -> impl ExactSizeIterator<Item = CrateDep> + Captures<'a> {
+ self.crate_deps.decode(metadata)
+ }
+}
+
+impl<'a, 'tcx> CrateMetadataRef<'a> {
+ fn raw_proc_macro(self, id: DefIndex) -> &'a ProcMacro {
+ // DefIndex's in root.proc_macro_data have a one-to-one correspondence
+ // with items in 'raw_proc_macros'.
+ let pos = self
+ .root
+ .proc_macro_data
+ .as_ref()
+ .unwrap()
+ .macros
+ .decode(self)
+ .position(|i| i == id)
+ .unwrap();
+ &self.raw_proc_macros.unwrap()[pos]
+ }
+
+ fn opt_item_name(self, item_index: DefIndex) -> Option<Symbol> {
+ self.def_key(item_index).disambiguated_data.data.get_opt_name()
+ }
+
+ fn item_name(self, item_index: DefIndex) -> Symbol {
+ self.opt_item_name(item_index).expect("no encoded ident for item")
+ }
+
+ fn opt_item_ident(self, item_index: DefIndex, sess: &Session) -> Option<Ident> {
+ let name = self.opt_item_name(item_index)?;
+ let span =
+ self.root.tables.def_ident_span.get(self, item_index).unwrap().decode((self, sess));
+ Some(Ident::new(name, span))
+ }
+
+ fn item_ident(self, item_index: DefIndex, sess: &Session) -> Ident {
+ self.opt_item_ident(item_index, sess).expect("no encoded ident for item")
+ }
+
+ fn maybe_kind(self, item_id: DefIndex) -> Option<EntryKind> {
+ self.root.tables.kind.get(self, item_id).map(|k| k.decode(self))
+ }
+
+ #[inline]
+ pub(super) fn map_encoded_cnum_to_current(self, cnum: CrateNum) -> CrateNum {
+ if cnum == LOCAL_CRATE { self.cnum } else { self.cnum_map[cnum] }
+ }
+
+ fn kind(self, item_id: DefIndex) -> EntryKind {
+ self.maybe_kind(item_id).unwrap_or_else(|| {
+ bug!(
+ "CrateMetadata::kind({:?}): id not found, in crate {:?} with number {}",
+ item_id,
+ self.root.name,
+ self.cnum,
+ )
+ })
+ }
+
+ fn def_kind(self, item_id: DefIndex) -> DefKind {
+ self.root.tables.opt_def_kind.get(self, item_id).unwrap_or_else(|| {
+ bug!(
+ "CrateMetadata::def_kind({:?}): id not found, in crate {:?} with number {}",
+ item_id,
+ self.root.name,
+ self.cnum,
+ )
+ })
+ }
+
+ fn get_span(self, index: DefIndex, sess: &Session) -> Span {
+ self.root
+ .tables
+ .def_span
+ .get(self, index)
+ .unwrap_or_else(|| panic!("Missing span for {:?}", index))
+ .decode((self, sess))
+ }
+
+ fn load_proc_macro(self, id: DefIndex, sess: &Session) -> SyntaxExtension {
+ let (name, kind, helper_attrs) = match *self.raw_proc_macro(id) {
+ ProcMacro::CustomDerive { trait_name, attributes, client } => {
+ let helper_attrs =
+ attributes.iter().cloned().map(Symbol::intern).collect::<Vec<_>>();
+ (
+ trait_name,
+ SyntaxExtensionKind::Derive(Box::new(DeriveProcMacro { client })),
+ helper_attrs,
+ )
+ }
+ ProcMacro::Attr { name, client } => {
+ (name, SyntaxExtensionKind::Attr(Box::new(AttrProcMacro { client })), Vec::new())
+ }
+ ProcMacro::Bang { name, client } => {
+ (name, SyntaxExtensionKind::Bang(Box::new(BangProcMacro { client })), Vec::new())
+ }
+ };
+
+ let attrs: Vec<_> = self.get_item_attrs(id, sess).collect();
+ SyntaxExtension::new(
+ sess,
+ kind,
+ self.get_span(id, sess),
+ helper_attrs,
+ self.root.edition,
+ Symbol::intern(name),
+ &attrs,
+ )
+ }
+
+ fn get_variant(self, kind: &EntryKind, index: DefIndex, parent_did: DefId) -> ty::VariantDef {
+ let data = match kind {
+ EntryKind::Variant(data) | EntryKind::Struct(data) | EntryKind::Union(data) => {
+ data.decode(self)
+ }
+ _ => bug!(),
+ };
+
+ let adt_kind = match kind {
+ EntryKind::Variant(_) => ty::AdtKind::Enum,
+ EntryKind::Struct(..) => ty::AdtKind::Struct,
+ EntryKind::Union(..) => ty::AdtKind::Union,
+ _ => bug!(),
+ };
+
+ let variant_did =
+ if adt_kind == ty::AdtKind::Enum { Some(self.local_def_id(index)) } else { None };
+ let ctor_did = data.ctor.map(|index| self.local_def_id(index));
+
+ ty::VariantDef::new(
+ self.item_name(index),
+ variant_did,
+ ctor_did,
+ data.discr,
+ self.root
+ .tables
+ .children
+ .get(self, index)
+ .unwrap_or_else(LazyArray::empty)
+ .decode(self)
+ .map(|index| ty::FieldDef {
+ did: self.local_def_id(index),
+ name: self.item_name(index),
+ vis: self.get_visibility(index),
+ })
+ .collect(),
+ data.ctor_kind,
+ adt_kind,
+ parent_did,
+ false,
+ data.is_non_exhaustive,
+ )
+ }
+
+ fn get_adt_def(self, item_id: DefIndex, tcx: TyCtxt<'tcx>) -> ty::AdtDef<'tcx> {
+ let kind = self.kind(item_id);
+ let did = self.local_def_id(item_id);
+
+ let adt_kind = match kind {
+ EntryKind::Enum => ty::AdtKind::Enum,
+ EntryKind::Struct(_) => ty::AdtKind::Struct,
+ EntryKind::Union(_) => ty::AdtKind::Union,
+ _ => bug!("get_adt_def called on a non-ADT {:?}", did),
+ };
+ let repr = self.root.tables.repr_options.get(self, item_id).unwrap().decode(self);
+
+ let variants = if let ty::AdtKind::Enum = adt_kind {
+ self.root
+ .tables
+ .children
+ .get(self, item_id)
+ .unwrap_or_else(LazyArray::empty)
+ .decode(self)
+ .map(|index| self.get_variant(&self.kind(index), index, did))
+ .collect()
+ } else {
+ std::iter::once(self.get_variant(&kind, item_id, did)).collect()
+ };
+
+ tcx.alloc_adt_def(did, adt_kind, variants, repr)
+ }
+
+ fn get_generics(self, item_id: DefIndex, sess: &Session) -> ty::Generics {
+ self.root.tables.generics_of.get(self, item_id).unwrap().decode((self, sess))
+ }
+
+ fn get_visibility(self, id: DefIndex) -> ty::Visibility {
+ self.root.tables.visibility.get(self, id).unwrap().decode(self)
+ }
+
+ fn get_trait_item_def_id(self, id: DefIndex) -> Option<DefId> {
+ self.root.tables.trait_item_def_id.get(self, id).map(|d| d.decode_from_cdata(self))
+ }
+
+ fn get_expn_that_defined(self, id: DefIndex, sess: &Session) -> ExpnId {
+ self.root.tables.expn_that_defined.get(self, id).unwrap().decode((self, sess))
+ }
+
+ fn get_debugger_visualizers(self) -> Vec<rustc_span::DebuggerVisualizerFile> {
+ self.root.debugger_visualizers.decode(self).collect::<Vec<_>>()
+ }
+
+ /// Iterates over all the stability attributes in the given crate.
+ fn get_lib_features(self, tcx: TyCtxt<'tcx>) -> &'tcx [(Symbol, Option<Symbol>)] {
+ tcx.arena.alloc_from_iter(self.root.lib_features.decode(self))
+ }
+
+ /// Iterates over the stability implications in the given crate (when a `#[unstable]` attribute
+ /// has an `implied_by` meta item, then the mapping from the implied feature to the actual
+ /// feature is a stability implication).
+ fn get_stability_implications(self, tcx: TyCtxt<'tcx>) -> &'tcx [(Symbol, Symbol)] {
+ tcx.arena.alloc_from_iter(self.root.stability_implications.decode(self))
+ }
+
+ /// Iterates over the language items in the given crate.
+ fn get_lang_items(self, tcx: TyCtxt<'tcx>) -> &'tcx [(DefId, usize)] {
+ tcx.arena.alloc_from_iter(
+ self.root
+ .lang_items
+ .decode(self)
+ .map(move |(def_index, index)| (self.local_def_id(def_index), index)),
+ )
+ }
+
+ /// Iterates over the diagnostic items in the given crate.
+ fn get_diagnostic_items(self) -> DiagnosticItems {
+ let mut id_to_name = FxHashMap::default();
+ let name_to_id = self
+ .root
+ .diagnostic_items
+ .decode(self)
+ .map(|(name, def_index)| {
+ let id = self.local_def_id(def_index);
+ id_to_name.insert(id, name);
+ (name, id)
+ })
+ .collect();
+ DiagnosticItems { id_to_name, name_to_id }
+ }
+
+ /// Iterates over all named children of the given module,
+ /// including both proper items and reexports.
+ /// Module here is understood in name resolution sense - it can be a `mod` item,
+ /// or a crate root, or an enum, or a trait.
+ fn for_each_module_child(
+ self,
+ id: DefIndex,
+ mut callback: impl FnMut(ModChild),
+ sess: &Session,
+ ) {
+ if let Some(data) = &self.root.proc_macro_data {
+ // If we are loading as a proc macro, we want to return
+ // the view of this crate as a proc macro crate.
+ if id == CRATE_DEF_INDEX {
+ for def_index in data.macros.decode(self) {
+ let raw_macro = self.raw_proc_macro(def_index);
+ let res = Res::Def(
+ DefKind::Macro(macro_kind(raw_macro)),
+ self.local_def_id(def_index),
+ );
+ let ident = self.item_ident(def_index, sess);
+ callback(ModChild {
+ ident,
+ res,
+ vis: ty::Visibility::Public,
+ span: ident.span,
+ macro_rules: false,
+ });
+ }
+ }
+ return;
+ }
+
+ // Iterate over all children.
+ if let Some(children) = self.root.tables.children.get(self, id) {
+ for child_index in children.decode((self, sess)) {
+ let ident = self.item_ident(child_index, sess);
+ let kind = self.def_kind(child_index);
+ let def_id = self.local_def_id(child_index);
+ let res = Res::Def(kind, def_id);
+ let vis = self.get_visibility(child_index);
+ let span = self.get_span(child_index, sess);
+ let macro_rules = match kind {
+ DefKind::Macro(..) => match self.kind(child_index) {
+ EntryKind::MacroDef(_, macro_rules) => macro_rules,
+ _ => unreachable!(),
+ },
+ _ => false,
+ };
+
+ callback(ModChild { ident, res, vis, span, macro_rules });
+
+ // For non-re-export structs and variants add their constructors to children.
+ // Re-export lists automatically contain constructors when necessary.
+ match kind {
+ DefKind::Struct => {
+ if let Some((ctor_def_id, ctor_kind)) =
+ self.get_ctor_def_id_and_kind(child_index)
+ {
+ let ctor_res =
+ Res::Def(DefKind::Ctor(CtorOf::Struct, ctor_kind), ctor_def_id);
+ let vis = self.get_visibility(ctor_def_id.index);
+ callback(ModChild {
+ ident,
+ res: ctor_res,
+ vis,
+ span,
+ macro_rules: false,
+ });
+ }
+ }
+ DefKind::Variant => {
+ // Braced variants, unlike structs, generate unusable names in
+ // value namespace, they are reserved for possible future use.
+ // It's ok to use the variant's id as a ctor id since an
+ // error will be reported on any use of such resolution anyway.
+ let (ctor_def_id, ctor_kind) = self
+ .get_ctor_def_id_and_kind(child_index)
+ .unwrap_or((def_id, CtorKind::Fictive));
+ let ctor_res =
+ Res::Def(DefKind::Ctor(CtorOf::Variant, ctor_kind), ctor_def_id);
+ let mut vis = self.get_visibility(ctor_def_id.index);
+ if ctor_def_id == def_id && vis.is_public() {
+ // For non-exhaustive variants lower the constructor visibility to
+ // within the crate. We only need this for fictive constructors,
+ // for other constructors correct visibilities
+ // were already encoded in metadata.
+ let mut attrs = self.get_item_attrs(def_id.index, sess);
+ if attrs.any(|item| item.has_name(sym::non_exhaustive)) {
+ let crate_def_id = self.local_def_id(CRATE_DEF_INDEX);
+ vis = ty::Visibility::Restricted(crate_def_id);
+ }
+ }
+ callback(ModChild { ident, res: ctor_res, vis, span, macro_rules: false });
+ }
+ _ => {}
+ }
+ }
+ }
+
+ match self.kind(id) {
+ EntryKind::Mod(exports) => {
+ for exp in exports.decode((self, sess)) {
+ callback(exp);
+ }
+ }
+ EntryKind::Enum | EntryKind::Trait => {}
+ _ => bug!("`for_each_module_child` is called on a non-module: {:?}", self.def_kind(id)),
+ }
+ }
+
+ fn is_ctfe_mir_available(self, id: DefIndex) -> bool {
+ self.root.tables.mir_for_ctfe.get(self, id).is_some()
+ }
+
+ fn is_item_mir_available(self, id: DefIndex) -> bool {
+ self.root.tables.optimized_mir.get(self, id).is_some()
+ }
+
+ fn module_expansion(self, id: DefIndex, sess: &Session) -> ExpnId {
+ match self.kind(id) {
+ EntryKind::Mod(_) | EntryKind::Enum | EntryKind::Trait => {
+ self.get_expn_that_defined(id, sess)
+ }
+ _ => panic!("Expected module, found {:?}", self.local_def_id(id)),
+ }
+ }
+
+ fn get_fn_has_self_parameter(self, id: DefIndex) -> bool {
+ match self.kind(id) {
+ EntryKind::AssocFn { has_self, .. } => has_self,
+ _ => false,
+ }
+ }
+
+ fn get_associated_item_def_ids(
+ self,
+ id: DefIndex,
+ sess: &'a Session,
+ ) -> impl Iterator<Item = DefId> + 'a {
+ self.root
+ .tables
+ .children
+ .get(self, id)
+ .unwrap_or_else(LazyArray::empty)
+ .decode((self, sess))
+ .map(move |child_index| self.local_def_id(child_index))
+ }
+
+ fn get_associated_item(self, id: DefIndex) -> ty::AssocItem {
+ let name = self.item_name(id);
+
+ let (kind, container, has_self) = match self.kind(id) {
+ EntryKind::AssocConst(container) => (ty::AssocKind::Const, container, false),
+ EntryKind::AssocFn { container, has_self } => (ty::AssocKind::Fn, container, has_self),
+ EntryKind::AssocType(container) => (ty::AssocKind::Type, container, false),
+ _ => bug!("cannot get associated-item of `{:?}`", id),
+ };
+
+ ty::AssocItem {
+ name,
+ kind,
+ def_id: self.local_def_id(id),
+ trait_item_def_id: self.get_trait_item_def_id(id),
+ container,
+ fn_has_self_parameter: has_self,
+ }
+ }
+
+ fn get_ctor_def_id_and_kind(self, node_id: DefIndex) -> Option<(DefId, CtorKind)> {
+ match self.kind(node_id) {
+ EntryKind::Struct(data) | EntryKind::Variant(data) => {
+ let vdata = data.decode(self);
+ vdata.ctor.map(|index| (self.local_def_id(index), vdata.ctor_kind))
+ }
+ _ => None,
+ }
+ }
+
+ fn get_item_attrs(
+ self,
+ id: DefIndex,
+ sess: &'a Session,
+ ) -> impl Iterator<Item = ast::Attribute> + 'a {
+ self.root
+ .tables
+ .attributes
+ .get(self, id)
+ .unwrap_or_else(|| {
+ // Structure and variant constructors don't have any attributes encoded for them,
+ // but we assume that someone passing a constructor ID actually wants to look at
+ // the attributes on the corresponding struct or variant.
+ let def_key = self.def_key(id);
+ assert_eq!(def_key.disambiguated_data.data, DefPathData::Ctor);
+ let parent_id = def_key.parent.expect("no parent for a constructor");
+ self.root
+ .tables
+ .attributes
+ .get(self, parent_id)
+ .expect("no encoded attributes for a structure or variant")
+ })
+ .decode((self, sess))
+ }
+
+ fn get_struct_field_names(
+ self,
+ id: DefIndex,
+ sess: &'a Session,
+ ) -> impl Iterator<Item = Spanned<Symbol>> + 'a {
+ self.root
+ .tables
+ .children
+ .get(self, id)
+ .unwrap_or_else(LazyArray::empty)
+ .decode(self)
+ .map(move |index| respan(self.get_span(index, sess), self.item_name(index)))
+ }
+
+ fn get_struct_field_visibilities(self, id: DefIndex) -> impl Iterator<Item = Visibility> + 'a {
+ self.root
+ .tables
+ .children
+ .get(self, id)
+ .unwrap_or_else(LazyArray::empty)
+ .decode(self)
+ .map(move |field_index| self.get_visibility(field_index))
+ }
+
+ fn get_inherent_implementations_for_type(
+ self,
+ tcx: TyCtxt<'tcx>,
+ id: DefIndex,
+ ) -> &'tcx [DefId] {
+ tcx.arena.alloc_from_iter(
+ self.root
+ .tables
+ .inherent_impls
+ .get(self, id)
+ .unwrap_or_else(LazyArray::empty)
+ .decode(self)
+ .map(|index| self.local_def_id(index)),
+ )
+ }
+
+ /// Decodes all inherent impls in the crate (for rustdoc).
+ fn get_inherent_impls(self) -> impl Iterator<Item = (DefId, DefId)> + 'a {
+ (0..self.root.tables.inherent_impls.size()).flat_map(move |i| {
+ let ty_index = DefIndex::from_usize(i);
+ let ty_def_id = self.local_def_id(ty_index);
+ self.root
+ .tables
+ .inherent_impls
+ .get(self, ty_index)
+ .unwrap_or_else(LazyArray::empty)
+ .decode(self)
+ .map(move |impl_index| (ty_def_id, self.local_def_id(impl_index)))
+ })
+ }
+
+ /// Decodes all traits in the crate (for rustdoc and rustc diagnostics).
+ fn get_traits(self) -> impl Iterator<Item = DefId> + 'a {
+ self.root.traits.decode(self).map(move |index| self.local_def_id(index))
+ }
+
+ /// Decodes all trait impls in the crate (for rustdoc).
+ fn get_trait_impls(self) -> impl Iterator<Item = (DefId, DefId, Option<SimplifiedType>)> + 'a {
+ self.cdata.trait_impls.iter().flat_map(move |(&(trait_cnum_raw, trait_index), impls)| {
+ let trait_def_id = DefId {
+ krate: self.cnum_map[CrateNum::from_u32(trait_cnum_raw)],
+ index: trait_index,
+ };
+ impls.decode(self).map(move |(impl_index, simplified_self_ty)| {
+ (trait_def_id, self.local_def_id(impl_index), simplified_self_ty)
+ })
+ })
+ }
+
+ fn get_all_incoherent_impls(self) -> impl Iterator<Item = DefId> + 'a {
+ self.cdata
+ .incoherent_impls
+ .values()
+ .flat_map(move |impls| impls.decode(self).map(move |idx| self.local_def_id(idx)))
+ }
+
+ fn get_incoherent_impls(self, tcx: TyCtxt<'tcx>, simp: SimplifiedType) -> &'tcx [DefId] {
+ if let Some(impls) = self.cdata.incoherent_impls.get(&simp) {
+ tcx.arena.alloc_from_iter(impls.decode(self).map(|idx| self.local_def_id(idx)))
+ } else {
+ &[]
+ }
+ }
+
+ fn get_implementations_of_trait(
+ self,
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ ) -> &'tcx [(DefId, Option<SimplifiedType>)] {
+ if self.trait_impls.is_empty() {
+ return &[];
+ }
+
+ // Do a reverse lookup beforehand to avoid touching the crate_num
+ // hash map in the loop below.
+ let key = match self.reverse_translate_def_id(trait_def_id) {
+ Some(def_id) => (def_id.krate.as_u32(), def_id.index),
+ None => return &[],
+ };
+
+ if let Some(impls) = self.trait_impls.get(&key) {
+ tcx.arena.alloc_from_iter(
+ impls
+ .decode(self)
+ .map(|(idx, simplified_self_ty)| (self.local_def_id(idx), simplified_self_ty)),
+ )
+ } else {
+ &[]
+ }
+ }
+
+ fn get_native_libraries(self, sess: &'a Session) -> impl Iterator<Item = NativeLib> + 'a {
+ self.root.native_libraries.decode((self, sess))
+ }
+
+ fn get_proc_macro_quoted_span(self, index: usize, sess: &Session) -> Span {
+ self.root
+ .tables
+ .proc_macro_quoted_spans
+ .get(self, index)
+ .unwrap_or_else(|| panic!("Missing proc macro quoted span: {:?}", index))
+ .decode((self, sess))
+ }
+
+ fn get_foreign_modules(self, sess: &'a Session) -> impl Iterator<Item = ForeignModule> + '_ {
+ self.root.foreign_modules.decode((self, sess))
+ }
+
+ fn get_dylib_dependency_formats(
+ self,
+ tcx: TyCtxt<'tcx>,
+ ) -> &'tcx [(CrateNum, LinkagePreference)] {
+ tcx.arena.alloc_from_iter(
+ self.root.dylib_dependency_formats.decode(self).enumerate().flat_map(|(i, link)| {
+ let cnum = CrateNum::new(i + 1);
+ link.map(|link| (self.cnum_map[cnum], link))
+ }),
+ )
+ }
+
+ fn get_missing_lang_items(self, tcx: TyCtxt<'tcx>) -> &'tcx [lang_items::LangItem] {
+ tcx.arena.alloc_from_iter(self.root.lang_items_missing.decode(self))
+ }
+
+ fn exported_symbols(
+ self,
+ tcx: TyCtxt<'tcx>,
+ ) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
+ tcx.arena.alloc_from_iter(self.root.exported_symbols.decode((self, tcx)))
+ }
+
+ fn get_macro(self, id: DefIndex, sess: &Session) -> ast::MacroDef {
+ match self.kind(id) {
+ EntryKind::MacroDef(mac_args, macro_rules) => {
+ ast::MacroDef { body: P(mac_args.decode((self, sess))), macro_rules }
+ }
+ _ => bug!(),
+ }
+ }
+
+ fn is_foreign_item(self, id: DefIndex) -> bool {
+ match self.kind(id) {
+ EntryKind::ForeignStatic | EntryKind::ForeignFn => true,
+ _ => false,
+ }
+ }
+
+ #[inline]
+ fn def_key(self, index: DefIndex) -> DefKey {
+ *self
+ .def_key_cache
+ .lock()
+ .entry(index)
+ .or_insert_with(|| self.root.tables.def_keys.get(self, index).unwrap().decode(self))
+ }
+
+ // Returns the path leading to the thing with this `id`.
+ fn def_path(self, id: DefIndex) -> DefPath {
+ debug!("def_path(cnum={:?}, id={:?})", self.cnum, id);
+ DefPath::make(self.cnum, id, |parent| self.def_key(parent))
+ }
+
+ fn def_path_hash_unlocked(
+ self,
+ index: DefIndex,
+ def_path_hashes: &mut FxHashMap<DefIndex, DefPathHash>,
+ ) -> DefPathHash {
+ *def_path_hashes
+ .entry(index)
+ .or_insert_with(|| self.root.tables.def_path_hashes.get(self, index).unwrap())
+ }
+
+ #[inline]
+ fn def_path_hash(self, index: DefIndex) -> DefPathHash {
+ let mut def_path_hashes = self.def_path_hash_cache.lock();
+ self.def_path_hash_unlocked(index, &mut def_path_hashes)
+ }
+
+ #[inline]
+ fn def_path_hash_to_def_index(self, hash: DefPathHash) -> DefIndex {
+ self.def_path_hash_map.def_path_hash_to_def_index(&hash)
+ }
+
+ fn expn_hash_to_expn_id(self, sess: &Session, index_guess: u32, hash: ExpnHash) -> ExpnId {
+ debug_assert_eq!(ExpnId::from_hash(hash), None);
+ let index_guess = ExpnIndex::from_u32(index_guess);
+ let old_hash = self.root.expn_hashes.get(self, index_guess).map(|lazy| lazy.decode(self));
+
+ let index = if old_hash == Some(hash) {
+ // Fast path: the expn and its index is unchanged from the
+ // previous compilation session. There is no need to decode anything
+ // else.
+ index_guess
+ } else {
+ // Slow path: We need to find out the new `DefIndex` of the provided
+ // `DefPathHash`, if its still exists. This requires decoding every `DefPathHash`
+ // stored in this crate.
+ let map = self.cdata.expn_hash_map.get_or_init(|| {
+ let end_id = self.root.expn_hashes.size() as u32;
+ let mut map =
+ UnhashMap::with_capacity_and_hasher(end_id as usize, Default::default());
+ for i in 0..end_id {
+ let i = ExpnIndex::from_u32(i);
+ if let Some(hash) = self.root.expn_hashes.get(self, i) {
+ map.insert(hash.decode(self), i);
+ }
+ }
+ map
+ });
+ map[&hash]
+ };
+
+ let data = self.root.expn_data.get(self, index).unwrap().decode((self, sess));
+ rustc_span::hygiene::register_expn_id(self.cnum, index, data, hash)
+ }
+
+ /// Imports the source_map from an external crate into the source_map of the crate
+ /// currently being compiled (the "local crate").
+ ///
+ /// The import algorithm works analogous to how AST items are inlined from an
+ /// external crate's metadata:
+ /// For every SourceFile in the external source_map an 'inline' copy is created in the
+ /// local source_map. The correspondence relation between external and local
+ /// SourceFiles is recorded in the `ImportedSourceFile` objects returned from this
+ /// function. When an item from an external crate is later inlined into this
+ /// crate, this correspondence information is used to translate the span
+ /// information of the inlined item so that it refers the correct positions in
+ /// the local source_map (see `<decoder::DecodeContext as SpecializedDecoder<Span>>`).
+ ///
+ /// The import algorithm in the function below will reuse SourceFiles already
+ /// existing in the local source_map. For example, even if the SourceFile of some
+ /// source file of libstd gets imported many times, there will only ever be
+ /// one SourceFile object for the corresponding file in the local source_map.
+ ///
+ /// Note that imported SourceFiles do not actually contain the source code of the
+ /// file they represent, just information about length, line breaks, and
+ /// multibyte characters. This information is enough to generate valid debuginfo
+ /// for items inlined from other crates.
+ ///
+ /// Proc macro crates don't currently export spans, so this function does not have
+ /// to work for them.
+ fn imported_source_files(self, sess: &Session) -> &'a [ImportedSourceFile] {
+ fn filter<'a>(sess: &Session, path: Option<&'a Path>) -> Option<&'a Path> {
+ path.filter(|_| {
+ // Only spend time on further checks if we have what to translate *to*.
+ sess.opts.real_rust_source_base_dir.is_some()
+ // Some tests need the translation to be always skipped.
+ && sess.opts.unstable_opts.translate_remapped_path_to_local_path
+ })
+ .filter(|virtual_dir| {
+ // Don't translate away `/rustc/$hash` if we're still remapping to it,
+ // since that means we're still building `std`/`rustc` that need it,
+ // and we don't want the real path to leak into codegen/debuginfo.
+ !sess.opts.remap_path_prefix.iter().any(|(_from, to)| to == virtual_dir)
+ })
+ }
+
+ // Translate the virtual `/rustc/$hash` prefix back to a real directory
+ // that should hold actual sources, where possible.
+ //
+ // NOTE: if you update this, you might need to also update bootstrap's code for generating
+ // the `rust-src` component in `Src::run` in `src/bootstrap/dist.rs`.
+ let virtual_rust_source_base_dir = [
+ filter(sess, option_env!("CFG_VIRTUAL_RUST_SOURCE_BASE_DIR").map(Path::new)),
+ filter(sess, sess.opts.unstable_opts.simulate_remapped_rust_src_base.as_deref()),
+ ];
+
+ let try_to_translate_virtual_to_real = |name: &mut rustc_span::FileName| {
+ debug!(
+ "try_to_translate_virtual_to_real(name={:?}): \
+ virtual_rust_source_base_dir={:?}, real_rust_source_base_dir={:?}",
+ name, virtual_rust_source_base_dir, sess.opts.real_rust_source_base_dir,
+ );
+
+ for virtual_dir in virtual_rust_source_base_dir.iter().flatten() {
+ if let Some(real_dir) = &sess.opts.real_rust_source_base_dir {
+ if let rustc_span::FileName::Real(old_name) = name {
+ if let rustc_span::RealFileName::Remapped { local_path: _, virtual_name } =
+ old_name
+ {
+ if let Ok(rest) = virtual_name.strip_prefix(virtual_dir) {
+ let virtual_name = virtual_name.clone();
+
+ // The std library crates are in
+ // `$sysroot/lib/rustlib/src/rust/library`, whereas other crates
+ // may be in `$sysroot/lib/rustlib/src/rust/` directly. So we
+ // detect crates from the std libs and handle them specially.
+ const STD_LIBS: &[&str] = &[
+ "core",
+ "alloc",
+ "std",
+ "test",
+ "term",
+ "unwind",
+ "proc_macro",
+ "panic_abort",
+ "panic_unwind",
+ "profiler_builtins",
+ "rtstartup",
+ "rustc-std-workspace-core",
+ "rustc-std-workspace-alloc",
+ "rustc-std-workspace-std",
+ "backtrace",
+ ];
+ let is_std_lib = STD_LIBS.iter().any(|l| rest.starts_with(l));
+
+ let new_path = if is_std_lib {
+ real_dir.join("library").join(rest)
+ } else {
+ real_dir.join(rest)
+ };
+
+ debug!(
+ "try_to_translate_virtual_to_real: `{}` -> `{}`",
+ virtual_name.display(),
+ new_path.display(),
+ );
+ let new_name = rustc_span::RealFileName::Remapped {
+ local_path: Some(new_path),
+ virtual_name,
+ };
+ *old_name = new_name;
+ }
+ }
+ }
+ }
+ }
+ };
+
+ self.cdata.source_map_import_info.get_or_init(|| {
+ let external_source_map = self.root.source_map.decode(self);
+
+ external_source_map
+ .map(|source_file_to_import| {
+ // We can't reuse an existing SourceFile, so allocate a new one
+ // containing the information we need.
+ let rustc_span::SourceFile {
+ mut name,
+ src_hash,
+ start_pos,
+ end_pos,
+ lines,
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
+ name_hash,
+ ..
+ } = source_file_to_import;
+
+ // If this file is under $sysroot/lib/rustlib/src/ but has not been remapped
+ // during rust bootstrapping by `remap-debuginfo = true`, and the user
+ // wish to simulate that behaviour by -Z simulate-remapped-rust-src-base,
+ // then we change `name` to a similar state as if the rust was bootstrapped
+ // with `remap-debuginfo = true`.
+ // This is useful for testing so that tests about the effects of
+ // `try_to_translate_virtual_to_real` don't have to worry about how the
+ // compiler is bootstrapped.
+ if let Some(virtual_dir) =
+ &sess.opts.unstable_opts.simulate_remapped_rust_src_base
+ {
+ if let Some(real_dir) = &sess.opts.real_rust_source_base_dir {
+ if let rustc_span::FileName::Real(ref mut old_name) = name {
+ if let rustc_span::RealFileName::LocalPath(local) = old_name {
+ if let Ok(rest) = local.strip_prefix(real_dir) {
+ *old_name = rustc_span::RealFileName::Remapped {
+ local_path: None,
+ virtual_name: virtual_dir.join(rest),
+ };
+ }
+ }
+ }
+ }
+ }
+
+ // If this file's path has been remapped to `/rustc/$hash`,
+ // we might be able to reverse that (also see comments above,
+ // on `try_to_translate_virtual_to_real`).
+ try_to_translate_virtual_to_real(&mut name);
+
+ let source_length = (end_pos - start_pos).to_usize();
+
+ let local_version = sess.source_map().new_imported_source_file(
+ name,
+ src_hash,
+ name_hash,
+ source_length,
+ self.cnum,
+ lines,
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
+ start_pos,
+ end_pos,
+ );
+ debug!(
+ "CrateMetaData::imported_source_files alloc \
+ source_file {:?} original (start_pos {:?} end_pos {:?}) \
+ translated (start_pos {:?} end_pos {:?})",
+ local_version.name,
+ start_pos,
+ end_pos,
+ local_version.start_pos,
+ local_version.end_pos
+ );
+
+ ImportedSourceFile {
+ original_start_pos: start_pos,
+ original_end_pos: end_pos,
+ translated_source_file: local_version,
+ }
+ })
+ .collect()
+ })
+ }
+
+ fn get_generator_diagnostic_data(
+ self,
+ tcx: TyCtxt<'tcx>,
+ id: DefIndex,
+ ) -> Option<GeneratorDiagnosticData<'tcx>> {
+ self.root
+ .tables
+ .generator_diagnostic_data
+ .get(self, id)
+ .map(|param| param.decode((self, tcx)))
+ .map(|generator_data| GeneratorDiagnosticData {
+ generator_interior_types: generator_data.generator_interior_types,
+ hir_owner: generator_data.hir_owner,
+ nodes_types: generator_data.nodes_types,
+ adjustments: generator_data.adjustments,
+ })
+ }
+
+ fn get_may_have_doc_links(self, index: DefIndex) -> bool {
+ self.root.tables.may_have_doc_links.get(self, index).is_some()
+ }
+
+ fn get_is_intrinsic(self, index: DefIndex) -> bool {
+ self.root.tables.is_intrinsic.get(self, index).is_some()
+ }
+}
+
+impl CrateMetadata {
+ pub(crate) fn new(
+ sess: &Session,
+ cstore: &CStore,
+ blob: MetadataBlob,
+ root: CrateRoot,
+ raw_proc_macros: Option<&'static [ProcMacro]>,
+ cnum: CrateNum,
+ cnum_map: CrateNumMap,
+ dep_kind: CrateDepKind,
+ source: CrateSource,
+ private_dep: bool,
+ host_hash: Option<Svh>,
+ ) -> CrateMetadata {
+ let trait_impls = root
+ .impls
+ .decode((&blob, sess))
+ .map(|trait_impls| (trait_impls.trait_id, trait_impls.impls))
+ .collect();
+ let alloc_decoding_state =
+ AllocDecodingState::new(root.interpret_alloc_index.decode(&blob).collect());
+ let dependencies = Lock::new(cnum_map.iter().cloned().collect());
+
+ // Pre-decode the DefPathHash->DefIndex table. This is a cheap operation
+ // that does not copy any data. It just does some data verification.
+ let def_path_hash_map = root.def_path_hash_map.decode(&blob);
+
+ let mut cdata = CrateMetadata {
+ blob,
+ root,
+ trait_impls,
+ incoherent_impls: Default::default(),
+ raw_proc_macros,
+ source_map_import_info: OnceCell::new(),
+ def_path_hash_map,
+ expn_hash_map: Default::default(),
+ alloc_decoding_state,
+ cnum,
+ cnum_map,
+ dependencies,
+ dep_kind: Lock::new(dep_kind),
+ source: Lrc::new(source),
+ private_dep,
+ host_hash,
+ extern_crate: Lock::new(None),
+ hygiene_context: Default::default(),
+ def_key_cache: Default::default(),
+ def_path_hash_cache: Default::default(),
+ };
+
+ // Need `CrateMetadataRef` to decode `DefId`s in simplified types.
+ cdata.incoherent_impls = cdata
+ .root
+ .incoherent_impls
+ .decode(CrateMetadataRef { cdata: &cdata, cstore })
+ .map(|incoherent_impls| (incoherent_impls.self_ty, incoherent_impls.impls))
+ .collect();
+
+ cdata
+ }
+
+ pub(crate) fn dependencies(&self) -> LockGuard<'_, Vec<CrateNum>> {
+ self.dependencies.borrow()
+ }
+
+ pub(crate) fn add_dependency(&self, cnum: CrateNum) {
+ self.dependencies.borrow_mut().push(cnum);
+ }
+
+ pub(crate) fn update_extern_crate(&self, new_extern_crate: ExternCrate) -> bool {
+ let mut extern_crate = self.extern_crate.borrow_mut();
+ let update = Some(new_extern_crate.rank()) > extern_crate.as_ref().map(ExternCrate::rank);
+ if update {
+ *extern_crate = Some(new_extern_crate);
+ }
+ update
+ }
+
+ pub(crate) fn source(&self) -> &CrateSource {
+ &*self.source
+ }
+
+ pub(crate) fn dep_kind(&self) -> CrateDepKind {
+ *self.dep_kind.lock()
+ }
+
+ pub(crate) fn update_dep_kind(&self, f: impl FnOnce(CrateDepKind) -> CrateDepKind) {
+ self.dep_kind.with_lock(|dep_kind| *dep_kind = f(*dep_kind))
+ }
+
+ pub(crate) fn required_panic_strategy(&self) -> Option<PanicStrategy> {
+ self.root.required_panic_strategy
+ }
+
+ pub(crate) fn needs_panic_runtime(&self) -> bool {
+ self.root.needs_panic_runtime
+ }
+
+ pub(crate) fn is_panic_runtime(&self) -> bool {
+ self.root.panic_runtime
+ }
+
+ pub(crate) fn is_profiler_runtime(&self) -> bool {
+ self.root.profiler_runtime
+ }
+
+ pub(crate) fn needs_allocator(&self) -> bool {
+ self.root.needs_allocator
+ }
+
+ pub(crate) fn has_global_allocator(&self) -> bool {
+ self.root.has_global_allocator
+ }
+
+ pub(crate) fn has_default_lib_allocator(&self) -> bool {
+ self.root.has_default_lib_allocator
+ }
+
+ pub(crate) fn is_proc_macro_crate(&self) -> bool {
+ self.root.is_proc_macro_crate()
+ }
+
+ pub(crate) fn name(&self) -> Symbol {
+ self.root.name
+ }
+
+ pub(crate) fn stable_crate_id(&self) -> StableCrateId {
+ self.root.stable_crate_id
+ }
+
+ pub(crate) fn hash(&self) -> Svh {
+ self.root.hash
+ }
+
+ fn num_def_ids(&self) -> usize {
+ self.root.tables.def_keys.size()
+ }
+
+ fn local_def_id(&self, index: DefIndex) -> DefId {
+ DefId { krate: self.cnum, index }
+ }
+
+ // Translate a DefId from the current compilation environment to a DefId
+ // for an external crate.
+ fn reverse_translate_def_id(&self, did: DefId) -> Option<DefId> {
+ for (local, &global) in self.cnum_map.iter_enumerated() {
+ if global == did.krate {
+ return Some(DefId { krate: local, index: did.index });
+ }
+ }
+
+ None
+ }
+}
+
+// Cannot be implemented on 'ProcMacro', as libproc_macro
+// does not depend on librustc_ast
+fn macro_kind(raw: &ProcMacro) -> MacroKind {
+ match raw {
+ ProcMacro::CustomDerive { .. } => MacroKind::Derive,
+ ProcMacro::Attr { .. } => MacroKind::Attr,
+ ProcMacro::Bang { .. } => MacroKind::Bang,
+ }
+}
diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
new file mode 100644
index 000000000..38ce50e83
--- /dev/null
+++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs
@@ -0,0 +1,680 @@
+use crate::creader::{CStore, LoadedMacro};
+use crate::foreign_modules;
+use crate::native_libs;
+
+use rustc_ast as ast;
+use rustc_attr::Deprecation;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LOCAL_CRATE};
+use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
+use rustc_middle::arena::ArenaAllocatable;
+use rustc_middle::metadata::ModChild;
+use rustc_middle::middle::exported_symbols::ExportedSymbol;
+use rustc_middle::middle::stability::DeprecationEntry;
+use rustc_middle::ty::fast_reject::SimplifiedType;
+use rustc_middle::ty::query::{ExternProviders, Providers};
+use rustc_middle::ty::{self, TyCtxt, Visibility};
+use rustc_session::cstore::{CrateSource, CrateStore};
+use rustc_session::utils::NativeLibKind;
+use rustc_session::{Session, StableCrateId};
+use rustc_span::hygiene::{ExpnHash, ExpnId};
+use rustc_span::source_map::{Span, Spanned};
+use rustc_span::symbol::{kw, Symbol};
+
+use rustc_data_structures::sync::Lrc;
+use smallvec::SmallVec;
+use std::any::Any;
+
+use super::{Decodable, DecodeContext, DecodeIterator};
+
+trait ProcessQueryValue<'tcx, T> {
+ fn process_decoded(self, _tcx: TyCtxt<'tcx>, _err: impl Fn() -> !) -> T;
+}
+
+impl<T> ProcessQueryValue<'_, Option<T>> for Option<T> {
+ #[inline(always)]
+ fn process_decoded(self, _tcx: TyCtxt<'_>, _err: impl Fn() -> !) -> Option<T> {
+ self
+ }
+}
+
+impl<T> ProcessQueryValue<'_, T> for Option<T> {
+ #[inline(always)]
+ fn process_decoded(self, _tcx: TyCtxt<'_>, err: impl Fn() -> !) -> T {
+ if let Some(value) = self { value } else { err() }
+ }
+}
+
+impl<'tcx, T: ArenaAllocatable<'tcx>> ProcessQueryValue<'tcx, &'tcx T> for Option<T> {
+ #[inline(always)]
+ fn process_decoded(self, tcx: TyCtxt<'tcx>, err: impl Fn() -> !) -> &'tcx T {
+ if let Some(value) = self { tcx.arena.alloc(value) } else { err() }
+ }
+}
+
+impl<T, E> ProcessQueryValue<'_, Result<Option<T>, E>> for Option<T> {
+ #[inline(always)]
+ fn process_decoded(self, _tcx: TyCtxt<'_>, _err: impl Fn() -> !) -> Result<Option<T>, E> {
+ Ok(self)
+ }
+}
+
+impl<'a, 'tcx, T: Copy + Decodable<DecodeContext<'a, 'tcx>>> ProcessQueryValue<'tcx, &'tcx [T]>
+ for Option<DecodeIterator<'a, 'tcx, T>>
+{
+ #[inline(always)]
+ fn process_decoded(self, tcx: TyCtxt<'tcx>, _err: impl Fn() -> !) -> &'tcx [T] {
+ if let Some(iter) = self { tcx.arena.alloc_from_iter(iter) } else { &[] }
+ }
+}
+
+impl ProcessQueryValue<'_, Option<DeprecationEntry>> for Option<Deprecation> {
+ #[inline(always)]
+ fn process_decoded(self, _tcx: TyCtxt<'_>, _err: impl Fn() -> !) -> Option<DeprecationEntry> {
+ self.map(DeprecationEntry::external)
+ }
+}
+
+macro_rules! provide_one {
+ (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => { table }) => {
+ provide_one! {
+ <$lt> $tcx, $def_id, $other, $cdata, $name => {
+ $cdata
+ .root
+ .tables
+ .$name
+ .get($cdata, $def_id.index)
+ .map(|lazy| lazy.decode(($cdata, $tcx)))
+ .process_decoded($tcx, || panic!("{:?} does not have a {:?}", $def_id, stringify!($name)))
+ }
+ }
+ };
+ (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => { table_direct }) => {
+ provide_one! {
+ <$lt> $tcx, $def_id, $other, $cdata, $name => {
+ // We don't decode `table_direct`, since it's not a Lazy, but an actual value
+ $cdata
+ .root
+ .tables
+ .$name
+ .get($cdata, $def_id.index)
+ .process_decoded($tcx, || panic!("{:?} does not have a {:?}", $def_id, stringify!($name)))
+ }
+ }
+ };
+ (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident, $name:ident => $compute:block) => {
+ fn $name<$lt>(
+ $tcx: TyCtxt<$lt>,
+ def_id_arg: ty::query::query_keys::$name<$lt>,
+ ) -> ty::query::query_values::$name<$lt> {
+ let _prof_timer =
+ $tcx.prof.generic_activity(concat!("metadata_decode_entry_", stringify!($name)));
+
+ #[allow(unused_variables)]
+ let ($def_id, $other) = def_id_arg.into_args();
+ assert!(!$def_id.is_local());
+
+ // External query providers call `crate_hash` in order to register a dependency
+ // on the crate metadata. The exception is `crate_hash` itself, which obviously
+ // doesn't need to do this (and can't, as it would cause a query cycle).
+ use rustc_middle::dep_graph::DepKind;
+ if DepKind::$name != DepKind::crate_hash && $tcx.dep_graph.is_fully_enabled() {
+ $tcx.ensure().crate_hash($def_id.krate);
+ }
+
+ let $cdata = CStore::from_tcx($tcx).get_crate_data($def_id.krate);
+
+ $compute
+ }
+ };
+}
+
+macro_rules! provide {
+ (<$lt:tt> $tcx:ident, $def_id:ident, $other:ident, $cdata:ident,
+ $($name:ident => { $($compute:tt)* })*) => {
+ pub fn provide_extern(providers: &mut ExternProviders) {
+ $(provide_one! {
+ <$lt> $tcx, $def_id, $other, $cdata, $name => { $($compute)* }
+ })*
+
+ *providers = ExternProviders {
+ $($name,)*
+ ..*providers
+ };
+ }
+ }
+}
+
+// small trait to work around different signature queries all being defined via
+// the macro above.
+trait IntoArgs {
+ type Other;
+ fn into_args(self) -> (DefId, Self::Other);
+}
+
+impl IntoArgs for DefId {
+ type Other = ();
+ fn into_args(self) -> (DefId, ()) {
+ (self, ())
+ }
+}
+
+impl IntoArgs for CrateNum {
+ type Other = ();
+ fn into_args(self) -> (DefId, ()) {
+ (self.as_def_id(), ())
+ }
+}
+
+impl IntoArgs for (CrateNum, DefId) {
+ type Other = DefId;
+ fn into_args(self) -> (DefId, DefId) {
+ (self.0.as_def_id(), self.1)
+ }
+}
+
+impl<'tcx> IntoArgs for ty::InstanceDef<'tcx> {
+ type Other = ();
+ fn into_args(self) -> (DefId, ()) {
+ (self.def_id(), ())
+ }
+}
+
+impl IntoArgs for (CrateNum, SimplifiedType) {
+ type Other = SimplifiedType;
+ fn into_args(self) -> (DefId, SimplifiedType) {
+ (self.0.as_def_id(), self.1)
+ }
+}
+
+provide! { <'tcx> tcx, def_id, other, cdata,
+ explicit_item_bounds => { table }
+ explicit_predicates_of => { table }
+ generics_of => { table }
+ inferred_outlives_of => { table }
+ super_predicates_of => { table }
+ type_of => { table }
+ variances_of => { table }
+ fn_sig => { table }
+ codegen_fn_attrs => { table }
+ impl_trait_ref => { table }
+ const_param_default => { table }
+ thir_abstract_const => { table }
+ optimized_mir => { table }
+ mir_for_ctfe => { table }
+ promoted_mir => { table }
+ def_span => { table }
+ def_ident_span => { table }
+ lookup_stability => { table }
+ lookup_const_stability => { table }
+ lookup_deprecation_entry => { table }
+ visibility => { table }
+ unused_generic_params => { table }
+ opt_def_kind => { table_direct }
+ impl_parent => { table }
+ impl_polarity => { table_direct }
+ impl_defaultness => { table_direct }
+ constness => { table_direct }
+ coerce_unsized_info => { table }
+ mir_const_qualif => { table }
+ rendered_const => { table }
+ asyncness => { table_direct }
+ fn_arg_names => { table }
+ generator_kind => { table }
+ trait_def => { table }
+
+ adt_def => { cdata.get_adt_def(def_id.index, tcx) }
+ adt_destructor => {
+ let _ = cdata;
+ tcx.calculate_dtor(def_id, |_,_| Ok(()))
+ }
+ associated_item_def_ids => {
+ tcx.arena.alloc_from_iter(cdata.get_associated_item_def_ids(def_id.index, tcx.sess))
+ }
+ associated_item => { cdata.get_associated_item(def_id.index) }
+ inherent_impls => { cdata.get_inherent_implementations_for_type(tcx, def_id.index) }
+ is_foreign_item => { cdata.is_foreign_item(def_id.index) }
+ item_attrs => { tcx.arena.alloc_from_iter(cdata.get_item_attrs(def_id.index, tcx.sess)) }
+ is_mir_available => { cdata.is_item_mir_available(def_id.index) }
+ is_ctfe_mir_available => { cdata.is_ctfe_mir_available(def_id.index) }
+
+ dylib_dependency_formats => { cdata.get_dylib_dependency_formats(tcx) }
+ is_private_dep => { cdata.private_dep }
+ is_panic_runtime => { cdata.root.panic_runtime }
+ is_compiler_builtins => { cdata.root.compiler_builtins }
+ has_global_allocator => { cdata.root.has_global_allocator }
+ has_panic_handler => { cdata.root.has_panic_handler }
+ is_profiler_runtime => { cdata.root.profiler_runtime }
+ required_panic_strategy => { cdata.root.required_panic_strategy }
+ panic_in_drop_strategy => { cdata.root.panic_in_drop_strategy }
+ extern_crate => {
+ let r = *cdata.extern_crate.lock();
+ r.map(|c| &*tcx.arena.alloc(c))
+ }
+ is_no_builtins => { cdata.root.no_builtins }
+ symbol_mangling_version => { cdata.root.symbol_mangling_version }
+ reachable_non_generics => {
+ let reachable_non_generics = tcx
+ .exported_symbols(cdata.cnum)
+ .iter()
+ .filter_map(|&(exported_symbol, export_info)| {
+ if let ExportedSymbol::NonGeneric(def_id) = exported_symbol {
+ Some((def_id, export_info))
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ reachable_non_generics
+ }
+ native_libraries => { cdata.get_native_libraries(tcx.sess).collect() }
+ foreign_modules => { cdata.get_foreign_modules(tcx.sess).map(|m| (m.def_id, m)).collect() }
+ crate_hash => { cdata.root.hash }
+ crate_host_hash => { cdata.host_hash }
+ crate_name => { cdata.root.name }
+
+ extra_filename => { cdata.root.extra_filename.clone() }
+
+ traits_in_crate => { tcx.arena.alloc_from_iter(cdata.get_traits()) }
+ implementations_of_trait => { cdata.get_implementations_of_trait(tcx, other) }
+ crate_incoherent_impls => { cdata.get_incoherent_impls(tcx, other) }
+
+ dep_kind => {
+ let r = *cdata.dep_kind.lock();
+ r
+ }
+ module_children => {
+ let mut result = SmallVec::<[_; 8]>::new();
+ cdata.for_each_module_child(def_id.index, |child| result.push(child), tcx.sess);
+ tcx.arena.alloc_slice(&result)
+ }
+ defined_lib_features => { cdata.get_lib_features(tcx) }
+ stability_implications => {
+ cdata.get_stability_implications(tcx).iter().copied().collect()
+ }
+ is_intrinsic => { cdata.get_is_intrinsic(def_id.index) }
+ defined_lang_items => { cdata.get_lang_items(tcx) }
+ diagnostic_items => { cdata.get_diagnostic_items() }
+ missing_lang_items => { cdata.get_missing_lang_items(tcx) }
+
+ missing_extern_crate_item => {
+ let r = matches!(*cdata.extern_crate.borrow(), Some(extern_crate) if !extern_crate.is_direct());
+ r
+ }
+
+ used_crate_source => { Lrc::clone(&cdata.source) }
+ debugger_visualizers => { cdata.get_debugger_visualizers() }
+
+ exported_symbols => {
+ let syms = cdata.exported_symbols(tcx);
+
+ // FIXME rust-lang/rust#64319, rust-lang/rust#64872: We want
+ // to block export of generics from dylibs, but we must fix
+ // rust-lang/rust#65890 before we can do that robustly.
+
+ syms
+ }
+
+ crate_extern_paths => { cdata.source().paths().cloned().collect() }
+ expn_that_defined => { cdata.get_expn_that_defined(def_id.index, tcx.sess) }
+ generator_diagnostic_data => { cdata.get_generator_diagnostic_data(tcx, def_id.index) }
+}
+
+pub(in crate::rmeta) fn provide(providers: &mut Providers) {
+ // FIXME(#44234) - almost all of these queries have no sub-queries and
+ // therefore no actual inputs, they're just reading tables calculated in
+ // resolve! Does this work? Unsure! That's what the issue is about
+ *providers = Providers {
+ allocator_kind: |tcx, ()| CStore::from_tcx(tcx).allocator_kind(),
+ is_dllimport_foreign_item: |tcx, id| match tcx.native_library_kind(id) {
+ Some(
+ NativeLibKind::Dylib { .. } | NativeLibKind::RawDylib | NativeLibKind::Unspecified,
+ ) => true,
+ _ => false,
+ },
+ is_statically_included_foreign_item: |tcx, id| {
+ matches!(tcx.native_library_kind(id), Some(NativeLibKind::Static { .. }))
+ },
+ is_private_dep: |_tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ false
+ },
+ native_library_kind: |tcx, id| {
+ tcx.native_libraries(id.krate)
+ .iter()
+ .filter(|lib| native_libs::relevant_lib(&tcx.sess, lib))
+ .find(|lib| {
+ let Some(fm_id) = lib.foreign_module else {
+ return false;
+ };
+ let map = tcx.foreign_modules(id.krate);
+ map.get(&fm_id)
+ .expect("failed to find foreign module")
+ .foreign_items
+ .contains(&id)
+ })
+ .map(|l| l.kind)
+ },
+ native_libraries: |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ native_libs::collect(tcx)
+ },
+ foreign_modules: |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ foreign_modules::collect(tcx).into_iter().map(|m| (m.def_id, m)).collect()
+ },
+
+ // Returns a map from a sufficiently visible external item (i.e., an
+ // external item that is visible from at least one local module) to a
+ // sufficiently visible parent (considering modules that re-export the
+ // external item to be parents).
+ visible_parent_map: |tcx, ()| {
+ use std::collections::hash_map::Entry;
+ use std::collections::vec_deque::VecDeque;
+
+ let mut visible_parent_map: DefIdMap<DefId> = Default::default();
+ // This is a secondary visible_parent_map, storing the DefId of
+ // parents that re-export the child as `_` or module parents
+ // which are `#[doc(hidden)]`. Since we prefer paths that don't
+ // do this, merge this map at the end, only if we're missing
+ // keys from the former.
+ // This is a rudimentary check that does not catch all cases,
+ // just the easiest.
+ let mut fallback_map: DefIdMap<DefId> = Default::default();
+
+ // Issue 46112: We want the map to prefer the shortest
+ // paths when reporting the path to an item. Therefore we
+ // build up the map via a breadth-first search (BFS),
+ // which naturally yields minimal-length paths.
+ //
+ // Note that it needs to be a BFS over the whole forest of
+ // crates, not just each individual crate; otherwise you
+ // only get paths that are locally minimal with respect to
+ // whatever crate we happened to encounter first in this
+ // traversal, but not globally minimal across all crates.
+ let bfs_queue = &mut VecDeque::new();
+
+ for &cnum in tcx.crates(()) {
+ // Ignore crates without a corresponding local `extern crate` item.
+ if tcx.missing_extern_crate_item(cnum) {
+ continue;
+ }
+
+ bfs_queue.push_back(cnum.as_def_id());
+ }
+
+ let mut add_child = |bfs_queue: &mut VecDeque<_>, child: &ModChild, parent: DefId| {
+ if !child.vis.is_public() {
+ return;
+ }
+
+ if let Some(def_id) = child.res.opt_def_id() {
+ if child.ident.name == kw::Underscore {
+ fallback_map.insert(def_id, parent);
+ return;
+ }
+
+ if ty::util::is_doc_hidden(tcx, parent) {
+ fallback_map.insert(def_id, parent);
+ return;
+ }
+
+ match visible_parent_map.entry(def_id) {
+ Entry::Occupied(mut entry) => {
+ // If `child` is defined in crate `cnum`, ensure
+ // that it is mapped to a parent in `cnum`.
+ if def_id.is_local() && entry.get().is_local() {
+ entry.insert(parent);
+ }
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(parent);
+ if matches!(
+ child.res,
+ Res::Def(DefKind::Mod | DefKind::Enum | DefKind::Trait, _)
+ ) {
+ bfs_queue.push_back(def_id);
+ }
+ }
+ }
+ }
+ };
+
+ while let Some(def) = bfs_queue.pop_front() {
+ for child in tcx.module_children(def).iter() {
+ add_child(bfs_queue, child, def);
+ }
+ }
+
+ // Fill in any missing entries with the less preferable path.
+ // If this path re-exports the child as `_`, we still use this
+ // path in a diagnostic that suggests importing `::*`.
+ for (child, parent) in fallback_map {
+ visible_parent_map.entry(child).or_insert(parent);
+ }
+
+ visible_parent_map
+ },
+
+ dependency_formats: |tcx, ()| Lrc::new(crate::dependency_format::calculate(tcx)),
+ has_global_allocator: |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ CStore::from_tcx(tcx).has_global_allocator()
+ },
+ postorder_cnums: |tcx, ()| {
+ tcx.arena
+ .alloc_slice(&CStore::from_tcx(tcx).crate_dependencies_in_postorder(LOCAL_CRATE))
+ },
+ crates: |tcx, ()| tcx.arena.alloc_from_iter(CStore::from_tcx(tcx).crates_untracked()),
+ ..*providers
+ };
+}
+
+impl CStore {
+ pub fn struct_field_names_untracked<'a>(
+ &'a self,
+ def: DefId,
+ sess: &'a Session,
+ ) -> impl Iterator<Item = Spanned<Symbol>> + 'a {
+ self.get_crate_data(def.krate).get_struct_field_names(def.index, sess)
+ }
+
+ pub fn struct_field_visibilities_untracked(
+ &self,
+ def: DefId,
+ ) -> impl Iterator<Item = Visibility> + '_ {
+ self.get_crate_data(def.krate).get_struct_field_visibilities(def.index)
+ }
+
+ pub fn ctor_def_id_and_kind_untracked(&self, def: DefId) -> Option<(DefId, CtorKind)> {
+ self.get_crate_data(def.krate).get_ctor_def_id_and_kind(def.index)
+ }
+
+ pub fn visibility_untracked(&self, def: DefId) -> Visibility {
+ self.get_crate_data(def.krate).get_visibility(def.index)
+ }
+
+ pub fn module_children_untracked(&self, def_id: DefId, sess: &Session) -> Vec<ModChild> {
+ let mut result = vec![];
+ self.get_crate_data(def_id.krate).for_each_module_child(
+ def_id.index,
+ |child| result.push(child),
+ sess,
+ );
+ result
+ }
+
+ pub fn load_macro_untracked(&self, id: DefId, sess: &Session) -> LoadedMacro {
+ let _prof_timer = sess.prof.generic_activity("metadata_load_macro");
+
+ let data = self.get_crate_data(id.krate);
+ if data.root.is_proc_macro_crate() {
+ return LoadedMacro::ProcMacro(data.load_proc_macro(id.index, sess));
+ }
+
+ let span = data.get_span(id.index, sess);
+
+ LoadedMacro::MacroDef(
+ ast::Item {
+ ident: data.item_ident(id.index, sess),
+ id: ast::DUMMY_NODE_ID,
+ span,
+ attrs: data.get_item_attrs(id.index, sess).collect(),
+ kind: ast::ItemKind::MacroDef(data.get_macro(id.index, sess)),
+ vis: ast::Visibility {
+ span: span.shrink_to_lo(),
+ kind: ast::VisibilityKind::Inherited,
+ tokens: None,
+ },
+ tokens: None,
+ },
+ data.root.edition,
+ )
+ }
+
+ pub fn fn_has_self_parameter_untracked(&self, def: DefId) -> bool {
+ self.get_crate_data(def.krate).get_fn_has_self_parameter(def.index)
+ }
+
+ pub fn crate_source_untracked(&self, cnum: CrateNum) -> Lrc<CrateSource> {
+ self.get_crate_data(cnum).source.clone()
+ }
+
+ pub fn get_span_untracked(&self, def_id: DefId, sess: &Session) -> Span {
+ self.get_crate_data(def_id.krate).get_span(def_id.index, sess)
+ }
+
+ pub fn def_kind(&self, def: DefId) -> DefKind {
+ self.get_crate_data(def.krate).def_kind(def.index)
+ }
+
+ pub fn crates_untracked(&self) -> impl Iterator<Item = CrateNum> + '_ {
+ self.iter_crate_data().map(|(cnum, _)| cnum)
+ }
+
+ pub fn item_generics_num_lifetimes(&self, def_id: DefId, sess: &Session) -> usize {
+ self.get_crate_data(def_id.krate).get_generics(def_id.index, sess).own_counts().lifetimes
+ }
+
+ pub fn module_expansion_untracked(&self, def_id: DefId, sess: &Session) -> ExpnId {
+ self.get_crate_data(def_id.krate).module_expansion(def_id.index, sess)
+ }
+
+ /// Only public-facing way to traverse all the definitions in a non-local crate.
+ /// Critically useful for this third-party project: <https://github.com/hacspec/hacspec>.
+ /// See <https://github.com/rust-lang/rust/pull/85889> for context.
+ pub fn num_def_ids_untracked(&self, cnum: CrateNum) -> usize {
+ self.get_crate_data(cnum).num_def_ids()
+ }
+
+ pub fn item_attrs_untracked<'a>(
+ &'a self,
+ def_id: DefId,
+ sess: &'a Session,
+ ) -> impl Iterator<Item = ast::Attribute> + 'a {
+ self.get_crate_data(def_id.krate).get_item_attrs(def_id.index, sess)
+ }
+
+ pub fn get_proc_macro_quoted_span_untracked(
+ &self,
+ cnum: CrateNum,
+ id: usize,
+ sess: &Session,
+ ) -> Span {
+ self.get_crate_data(cnum).get_proc_macro_quoted_span(id, sess)
+ }
+
+ /// Decodes all traits in the crate (for rustdoc).
+ pub fn traits_in_crate_untracked(&self, cnum: CrateNum) -> impl Iterator<Item = DefId> + '_ {
+ self.get_crate_data(cnum).get_traits()
+ }
+
+ /// Decodes all trait impls in the crate (for rustdoc).
+ pub fn trait_impls_in_crate_untracked(
+ &self,
+ cnum: CrateNum,
+ ) -> impl Iterator<Item = (DefId, DefId, Option<SimplifiedType>)> + '_ {
+ self.get_crate_data(cnum).get_trait_impls()
+ }
+
+ /// Decodes all inherent impls in the crate (for rustdoc).
+ pub fn inherent_impls_in_crate_untracked(
+ &self,
+ cnum: CrateNum,
+ ) -> impl Iterator<Item = (DefId, DefId)> + '_ {
+ self.get_crate_data(cnum).get_inherent_impls()
+ }
+
+ /// Decodes all incoherent inherent impls in the crate (for rustdoc).
+ pub fn incoherent_impls_in_crate_untracked(
+ &self,
+ cnum: CrateNum,
+ ) -> impl Iterator<Item = DefId> + '_ {
+ self.get_crate_data(cnum).get_all_incoherent_impls()
+ }
+
+ pub fn associated_item_def_ids_untracked<'a>(
+ &'a self,
+ def_id: DefId,
+ sess: &'a Session,
+ ) -> impl Iterator<Item = DefId> + 'a {
+ self.get_crate_data(def_id.krate).get_associated_item_def_ids(def_id.index, sess)
+ }
+
+ pub fn may_have_doc_links_untracked(&self, def_id: DefId) -> bool {
+ self.get_crate_data(def_id.krate).get_may_have_doc_links(def_id.index)
+ }
+}
+
+impl CrateStore for CStore {
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+
+ fn crate_name(&self, cnum: CrateNum) -> Symbol {
+ self.get_crate_data(cnum).root.name
+ }
+
+ fn stable_crate_id(&self, cnum: CrateNum) -> StableCrateId {
+ self.get_crate_data(cnum).root.stable_crate_id
+ }
+
+ fn stable_crate_id_to_crate_num(&self, stable_crate_id: StableCrateId) -> CrateNum {
+ self.stable_crate_ids[&stable_crate_id]
+ }
+
+ /// Returns the `DefKey` for a given `DefId`. This indicates the
+ /// parent `DefId` as well as some idea of what kind of data the
+ /// `DefId` refers to.
+ fn def_key(&self, def: DefId) -> DefKey {
+ self.get_crate_data(def.krate).def_key(def.index)
+ }
+
+ fn def_path(&self, def: DefId) -> DefPath {
+ self.get_crate_data(def.krate).def_path(def.index)
+ }
+
+ fn def_path_hash(&self, def: DefId) -> DefPathHash {
+ self.get_crate_data(def.krate).def_path_hash(def.index)
+ }
+
+ fn def_path_hash_to_def_id(&self, cnum: CrateNum, hash: DefPathHash) -> DefId {
+ let def_index = self.get_crate_data(cnum).def_path_hash_to_def_index(hash);
+ DefId { krate: cnum, index: def_index }
+ }
+
+ fn expn_hash_to_expn_id(
+ &self,
+ sess: &Session,
+ cnum: CrateNum,
+ index_guess: u32,
+ hash: ExpnHash,
+ ) -> ExpnId {
+ self.get_crate_data(cnum).expn_hash_to_expn_id(sess, index_guess, hash)
+ }
+
+ fn import_source_files(&self, sess: &Session, cnum: CrateNum) {
+ self.get_crate_data(cnum).imported_source_files(sess);
+ }
+}
diff --git a/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs b/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs
new file mode 100644
index 000000000..40c94b372
--- /dev/null
+++ b/compiler/rustc_metadata/src/rmeta/def_path_hash_map.rs
@@ -0,0 +1,65 @@
+use crate::rmeta::DecodeContext;
+use crate::rmeta::EncodeContext;
+use crate::rmeta::MetadataBlob;
+use rustc_data_structures::owning_ref::OwningRef;
+use rustc_hir::def_path_hash_map::{Config as HashMapConfig, DefPathHashMap};
+use rustc_middle::parameterized_over_tcx;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_span::def_id::{DefIndex, DefPathHash};
+
+pub(crate) enum DefPathHashMapRef<'tcx> {
+ OwnedFromMetadata(odht::HashTable<HashMapConfig, OwningRef<MetadataBlob, [u8]>>),
+ BorrowedFromTcx(&'tcx DefPathHashMap),
+}
+
+parameterized_over_tcx! {
+ DefPathHashMapRef,
+}
+
+impl DefPathHashMapRef<'_> {
+ #[inline]
+ pub fn def_path_hash_to_def_index(&self, def_path_hash: &DefPathHash) -> DefIndex {
+ match *self {
+ DefPathHashMapRef::OwnedFromMetadata(ref map) => map.get(def_path_hash).unwrap(),
+ DefPathHashMapRef::BorrowedFromTcx(_) => {
+ panic!("DefPathHashMap::BorrowedFromTcx variant only exists for serialization")
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for DefPathHashMapRef<'tcx> {
+ fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) {
+ match *self {
+ DefPathHashMapRef::BorrowedFromTcx(def_path_hash_map) => {
+ let bytes = def_path_hash_map.raw_bytes();
+ e.emit_usize(bytes.len());
+ e.emit_raw_bytes(bytes);
+ }
+ DefPathHashMapRef::OwnedFromMetadata(_) => {
+ panic!("DefPathHashMap::OwnedFromMetadata variant only exists for deserialization")
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> Decodable<DecodeContext<'a, 'tcx>> for DefPathHashMapRef<'static> {
+ fn decode(d: &mut DecodeContext<'a, 'tcx>) -> DefPathHashMapRef<'static> {
+ // Import TyDecoder so we can access the DecodeContext::position() method
+ use crate::rustc_middle::ty::codec::TyDecoder;
+
+ let len = d.read_usize();
+ let pos = d.position();
+ let o = OwningRef::new(d.blob().clone()).map(|x| &x[pos..pos + len]);
+
+ // Although we already have the data we need via the OwningRef, we still need
+ // to advance the DecodeContext's position so it's in a valid state after
+ // the method. We use read_raw_bytes() for that.
+ let _ = d.read_raw_bytes(len);
+
+ let inner = odht::HashTable::from_raw_bytes(o).unwrap_or_else(|e| {
+ panic!("decode error: {}", e);
+ });
+ DefPathHashMapRef::OwnedFromMetadata(inner)
+ }
+}
diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs
new file mode 100644
index 000000000..33278367c
--- /dev/null
+++ b/compiler/rustc_metadata/src/rmeta/encoder.rs
@@ -0,0 +1,2302 @@
+use crate::rmeta::def_path_hash_map::DefPathHashMapRef;
+use crate::rmeta::table::TableBuilder;
+use crate::rmeta::*;
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
+use rustc_data_structures::memmap::{Mmap, MmapMut};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{join, par_iter, Lrc, ParallelIterator};
+use rustc_data_structures::temp_dir::MaybeTempDir;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{
+ CrateNum, DefId, DefIndex, LocalDefId, CRATE_DEF_ID, CRATE_DEF_INDEX, LOCAL_CRATE,
+};
+use rustc_hir::definitions::DefPathData;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::lang_items;
+use rustc_hir::{AnonConst, GenericParamKind};
+use rustc_index::bit_set::GrowableBitSet;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::dependency_format::Linkage;
+use rustc_middle::middle::exported_symbols::{
+ metadata_symbol_name, ExportedSymbol, SymbolExportInfo,
+};
+use rustc_middle::mir::interpret;
+use rustc_middle::traits::specialization_graph;
+use rustc_middle::ty::codec::TyEncoder;
+use rustc_middle::ty::fast_reject::{self, SimplifiedType, TreatParams};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, SymbolName, Ty, TyCtxt};
+use rustc_serialize::{opaque, Decodable, Decoder, Encodable, Encoder};
+use rustc_session::config::CrateType;
+use rustc_session::cstore::{ForeignModule, LinkagePreference, NativeLib};
+use rustc_span::hygiene::{ExpnIndex, HygieneEncodeContext, MacroKind};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{
+ self, DebuggerVisualizerFile, ExternalSource, FileName, SourceFile, Span, SyntaxContext,
+};
+use rustc_target::abi::VariantIdx;
+use std::borrow::Borrow;
+use std::hash::Hash;
+use std::io::{Read, Seek, Write};
+use std::iter;
+use std::num::NonZeroUsize;
+use std::path::{Path, PathBuf};
+use tracing::{debug, trace};
+
+pub(super) struct EncodeContext<'a, 'tcx> {
+ opaque: opaque::FileEncoder,
+ tcx: TyCtxt<'tcx>,
+ feat: &'tcx rustc_feature::Features,
+
+ tables: TableBuilders,
+
+ lazy_state: LazyState,
+ type_shorthands: FxHashMap<Ty<'tcx>, usize>,
+ predicate_shorthands: FxHashMap<ty::PredicateKind<'tcx>, usize>,
+
+ interpret_allocs: FxIndexSet<interpret::AllocId>,
+
+ // This is used to speed up Span encoding.
+ // The `usize` is an index into the `MonotonicVec`
+ // that stores the `SourceFile`
+ source_file_cache: (Lrc<SourceFile>, usize),
+ // The indices (into the `SourceMap`'s `MonotonicVec`)
+ // of all of the `SourceFiles` that we need to serialize.
+ // When we serialize a `Span`, we insert the index of its
+ // `SourceFile` into the `GrowableBitSet`.
+ //
+ // This needs to be a `GrowableBitSet` and not a
+ // regular `BitSet` because we may actually import new `SourceFiles`
+ // during metadata encoding, due to executing a query
+ // with a result containing a foreign `Span`.
+ required_source_files: Option<GrowableBitSet<usize>>,
+ is_proc_macro: bool,
+ hygiene_ctxt: &'a HygieneEncodeContext,
+}
+
+/// If the current crate is a proc-macro, returns early with `Lazy:empty()`.
+/// This is useful for skipping the encoding of things that aren't needed
+/// for proc-macro crates.
+macro_rules! empty_proc_macro {
+ ($self:ident) => {
+ if $self.is_proc_macro {
+ return LazyArray::empty();
+ }
+ };
+}
+
+macro_rules! encoder_methods {
+ ($($name:ident($ty:ty);)*) => {
+ $(fn $name(&mut self, value: $ty) {
+ self.opaque.$name(value)
+ })*
+ }
+}
+
+impl<'a, 'tcx> Encoder for EncodeContext<'a, 'tcx> {
+ encoder_methods! {
+ emit_usize(usize);
+ emit_u128(u128);
+ emit_u64(u64);
+ emit_u32(u32);
+ emit_u16(u16);
+ emit_u8(u8);
+
+ emit_isize(isize);
+ emit_i128(i128);
+ emit_i64(i64);
+ emit_i32(i32);
+ emit_i16(i16);
+ emit_i8(i8);
+
+ emit_bool(bool);
+ emit_f64(f64);
+ emit_f32(f32);
+ emit_char(char);
+ emit_str(&str);
+ emit_raw_bytes(&[u8]);
+ }
+}
+
+impl<'a, 'tcx, T> Encodable<EncodeContext<'a, 'tcx>> for LazyValue<T> {
+ fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) {
+ e.emit_lazy_distance(self.position);
+ }
+}
+
+impl<'a, 'tcx, T> Encodable<EncodeContext<'a, 'tcx>> for LazyArray<T> {
+ fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) {
+ e.emit_usize(self.num_elems);
+ if self.num_elems > 0 {
+ e.emit_lazy_distance(self.position)
+ }
+ }
+}
+
+impl<'a, 'tcx, I, T> Encodable<EncodeContext<'a, 'tcx>> for LazyTable<I, T> {
+ fn encode(&self, e: &mut EncodeContext<'a, 'tcx>) {
+ e.emit_usize(self.encoded_size);
+ e.emit_lazy_distance(self.position);
+ }
+}
+
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for CrateNum {
+ fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
+ if *self != LOCAL_CRATE && s.is_proc_macro {
+ panic!("Attempted to encode non-local CrateNum {:?} for proc-macro crate", self);
+ }
+ s.emit_u32(self.as_u32());
+ }
+}
+
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for DefIndex {
+ fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
+ s.emit_u32(self.as_u32());
+ }
+}
+
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for ExpnIndex {
+ fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
+ s.emit_u32(self.as_u32());
+ }
+}
+
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for SyntaxContext {
+ fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
+ rustc_span::hygiene::raw_encode_syntax_context(*self, &s.hygiene_ctxt, s);
+ }
+}
+
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for ExpnId {
+ fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
+ if self.krate == LOCAL_CRATE {
+ // We will only write details for local expansions. Non-local expansions will fetch
+ // data from the corresponding crate's metadata.
+ // FIXME(#43047) FIXME(#74731) We may eventually want to avoid relying on external
+ // metadata from proc-macro crates.
+ s.hygiene_ctxt.schedule_expn_data_for_encoding(*self);
+ }
+ self.krate.encode(s);
+ self.local_id.encode(s);
+ }
+}
+
+impl<'a, 'tcx> Encodable<EncodeContext<'a, 'tcx>> for Span {
+ fn encode(&self, s: &mut EncodeContext<'a, 'tcx>) {
+ let span = self.data();
+
+ // Don't serialize any `SyntaxContext`s from a proc-macro crate,
+ // since we don't load proc-macro dependencies during serialization.
+ // This means that any hygiene information from macros used *within*
+ // a proc-macro crate (e.g. invoking a macro that expands to a proc-macro
+ // definition) will be lost.
+ //
+ // This can show up in two ways:
+ //
+ // 1. Any hygiene information associated with identifier of
+ // a proc macro (e.g. `#[proc_macro] pub fn $name`) will be lost.
+ // Since proc-macros can only be invoked from a different crate,
+ // real code should never need to care about this.
+ //
+ // 2. Using `Span::def_site` or `Span::mixed_site` will not
+ // include any hygiene information associated with the definition
+ // site. This means that a proc-macro cannot emit a `$crate`
+ // identifier which resolves to one of its dependencies,
+ // which also should never come up in practice.
+ //
+ // Additionally, this affects `Span::parent`, and any other
+ // span inspection APIs that would otherwise allow traversing
+ // the `SyntaxContexts` associated with a span.
+ //
+ // None of these user-visible effects should result in any
+ // cross-crate inconsistencies (getting one behavior in the same
+ // crate, and a different behavior in another crate) due to the
+ // limited surface that proc-macros can expose.
+ //
+ // IMPORTANT: If this is ever changed, be sure to update
+ // `rustc_span::hygiene::raw_encode_expn_id` to handle
+ // encoding `ExpnData` for proc-macro crates.
+ if s.is_proc_macro {
+ SyntaxContext::root().encode(s);
+ } else {
+ span.ctxt.encode(s);
+ }
+
+ if self.is_dummy() {
+ return TAG_PARTIAL_SPAN.encode(s);
+ }
+
+ // The Span infrastructure should make sure that this invariant holds:
+ debug_assert!(span.lo <= span.hi);
+
+ if !s.source_file_cache.0.contains(span.lo) {
+ let source_map = s.tcx.sess.source_map();
+ let source_file_index = source_map.lookup_source_file_idx(span.lo);
+ s.source_file_cache =
+ (source_map.files()[source_file_index].clone(), source_file_index);
+ }
+
+ if !s.source_file_cache.0.contains(span.hi) {
+ // Unfortunately, macro expansion still sometimes generates Spans
+ // that malformed in this way.
+ return TAG_PARTIAL_SPAN.encode(s);
+ }
+
+ let source_files = s.required_source_files.as_mut().expect("Already encoded SourceMap!");
+ // Record the fact that we need to encode the data for this `SourceFile`
+ source_files.insert(s.source_file_cache.1);
+
+ // There are two possible cases here:
+ // 1. This span comes from a 'foreign' crate - e.g. some crate upstream of the
+ // crate we are writing metadata for. When the metadata for *this* crate gets
+ // deserialized, the deserializer will need to know which crate it originally came
+ // from. We use `TAG_VALID_SPAN_FOREIGN` to indicate that a `CrateNum` should
+ // be deserialized after the rest of the span data, which tells the deserializer
+ // which crate contains the source map information.
+ // 2. This span comes from our own crate. No special handling is needed - we just
+ // write `TAG_VALID_SPAN_LOCAL` to let the deserializer know that it should use
+ // our own source map information.
+ //
+ // If we're a proc-macro crate, we always treat this as a local `Span`.
+ // In `encode_source_map`, we serialize foreign `SourceFile`s into our metadata
+ // if we're a proc-macro crate.
+ // This allows us to avoid loading the dependencies of proc-macro crates: all of
+ // the information we need to decode `Span`s is stored in the proc-macro crate.
+ let (tag, lo, hi) = if s.source_file_cache.0.is_imported() && !s.is_proc_macro {
+ // To simplify deserialization, we 'rebase' this span onto the crate it originally came from
+ // (the crate that 'owns' the file it references. These rebased 'lo' and 'hi' values
+ // are relative to the source map information for the 'foreign' crate whose CrateNum
+ // we write into the metadata. This allows `imported_source_files` to binary
+ // search through the 'foreign' crate's source map information, using the
+ // deserialized 'lo' and 'hi' values directly.
+ //
+ // All of this logic ensures that the final result of deserialization is a 'normal'
+ // Span that can be used without any additional trouble.
+ let external_start_pos = {
+ // Introduce a new scope so that we drop the 'lock()' temporary
+ match &*s.source_file_cache.0.external_src.lock() {
+ ExternalSource::Foreign { original_start_pos, .. } => *original_start_pos,
+ src => panic!("Unexpected external source {:?}", src),
+ }
+ };
+ let lo = (span.lo - s.source_file_cache.0.start_pos) + external_start_pos;
+ let hi = (span.hi - s.source_file_cache.0.start_pos) + external_start_pos;
+
+ (TAG_VALID_SPAN_FOREIGN, lo, hi)
+ } else {
+ (TAG_VALID_SPAN_LOCAL, span.lo, span.hi)
+ };
+
+ tag.encode(s);
+ lo.encode(s);
+
+ // Encode length which is usually less than span.hi and profits more
+ // from the variable-length integer encoding that we use.
+ let len = hi - lo;
+ len.encode(s);
+
+ if tag == TAG_VALID_SPAN_FOREIGN {
+ // This needs to be two lines to avoid holding the `s.source_file_cache`
+ // while calling `cnum.encode(s)`
+ let cnum = s.source_file_cache.0.cnum;
+ cnum.encode(s);
+ }
+ }
+}
+
+impl<'a, 'tcx> TyEncoder for EncodeContext<'a, 'tcx> {
+ const CLEAR_CROSS_CRATE: bool = true;
+
+ type I = TyCtxt<'tcx>;
+
+ fn position(&self) -> usize {
+ self.opaque.position()
+ }
+
+ fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize> {
+ &mut self.type_shorthands
+ }
+
+ fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::PredicateKind<'tcx>, usize> {
+ &mut self.predicate_shorthands
+ }
+
+ fn encode_alloc_id(&mut self, alloc_id: &rustc_middle::mir::interpret::AllocId) {
+ let (index, _) = self.interpret_allocs.insert_full(*alloc_id);
+
+ index.encode(self);
+ }
+}
+
+// Shorthand for `$self.$tables.$table.set($def_id.index, $self.lazy_value($value))`, which would
+// normally need extra variables to avoid errors about multiple mutable borrows.
+macro_rules! record {
+ ($self:ident.$tables:ident.$table:ident[$def_id:expr] <- $value:expr) => {{
+ {
+ let value = $value;
+ let lazy = $self.lazy(value);
+ $self.$tables.$table.set($def_id.index, lazy);
+ }
+ }};
+}
+
+// Shorthand for `$self.$tables.$table.set($def_id.index, $self.lazy_value($value))`, which would
+// normally need extra variables to avoid errors about multiple mutable borrows.
+macro_rules! record_array {
+ ($self:ident.$tables:ident.$table:ident[$def_id:expr] <- $value:expr) => {{
+ {
+ let value = $value;
+ let lazy = $self.lazy_array(value);
+ $self.$tables.$table.set($def_id.index, lazy);
+ }
+ }};
+}
+
+impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
+ fn emit_lazy_distance(&mut self, position: NonZeroUsize) {
+ let pos = position.get();
+ let distance = match self.lazy_state {
+ LazyState::NoNode => bug!("emit_lazy_distance: outside of a metadata node"),
+ LazyState::NodeStart(start) => {
+ let start = start.get();
+ assert!(pos <= start);
+ start - pos
+ }
+ LazyState::Previous(last_pos) => {
+ assert!(
+ last_pos <= position,
+ "make sure that the calls to `lazy*` \
+ are in the same order as the metadata fields",
+ );
+ position.get() - last_pos.get()
+ }
+ };
+ self.lazy_state = LazyState::Previous(NonZeroUsize::new(pos).unwrap());
+ self.emit_usize(distance);
+ }
+
+ fn lazy<T: ParameterizedOverTcx, B: Borrow<T::Value<'tcx>>>(&mut self, value: B) -> LazyValue<T>
+ where
+ T::Value<'tcx>: Encodable<EncodeContext<'a, 'tcx>>,
+ {
+ let pos = NonZeroUsize::new(self.position()).unwrap();
+
+ assert_eq!(self.lazy_state, LazyState::NoNode);
+ self.lazy_state = LazyState::NodeStart(pos);
+ value.borrow().encode(self);
+ self.lazy_state = LazyState::NoNode;
+
+ assert!(pos.get() <= self.position());
+
+ LazyValue::from_position(pos)
+ }
+
+ fn lazy_array<T: ParameterizedOverTcx, I: IntoIterator<Item = B>, B: Borrow<T::Value<'tcx>>>(
+ &mut self,
+ values: I,
+ ) -> LazyArray<T>
+ where
+ T::Value<'tcx>: Encodable<EncodeContext<'a, 'tcx>>,
+ {
+ let pos = NonZeroUsize::new(self.position()).unwrap();
+
+ assert_eq!(self.lazy_state, LazyState::NoNode);
+ self.lazy_state = LazyState::NodeStart(pos);
+ let len = values.into_iter().map(|value| value.borrow().encode(self)).count();
+ self.lazy_state = LazyState::NoNode;
+
+ assert!(pos.get() <= self.position());
+
+ LazyArray::from_position_and_num_elems(pos, len)
+ }
+
+ fn encode_info_for_items(&mut self) {
+ self.encode_info_for_mod(CRATE_DEF_ID, self.tcx.hir().root_module());
+
+ // Proc-macro crates only export proc-macro items, which are looked
+ // up using `proc_macro_data`
+ if self.is_proc_macro {
+ return;
+ }
+
+ self.tcx.hir().visit_all_item_likes_in_crate(self);
+ }
+
+ fn encode_def_path_table(&mut self) {
+ let table = self.tcx.def_path_table();
+ if self.is_proc_macro {
+ for def_index in std::iter::once(CRATE_DEF_INDEX)
+ .chain(self.tcx.resolutions(()).proc_macros.iter().map(|p| p.local_def_index))
+ {
+ let def_key = self.lazy(table.def_key(def_index));
+ let def_path_hash = table.def_path_hash(def_index);
+ self.tables.def_keys.set(def_index, def_key);
+ self.tables.def_path_hashes.set(def_index, def_path_hash);
+ }
+ } else {
+ for (def_index, def_key, def_path_hash) in table.enumerated_keys_and_path_hashes() {
+ let def_key = self.lazy(def_key);
+ self.tables.def_keys.set(def_index, def_key);
+ self.tables.def_path_hashes.set(def_index, *def_path_hash);
+ }
+ }
+ }
+
+ fn encode_def_path_hash_map(&mut self) -> LazyValue<DefPathHashMapRef<'static>> {
+ self.lazy(DefPathHashMapRef::BorrowedFromTcx(self.tcx.def_path_hash_to_def_index_map()))
+ }
+
+ fn encode_source_map(&mut self) -> LazyArray<rustc_span::SourceFile> {
+ let source_map = self.tcx.sess.source_map();
+ let all_source_files = source_map.files();
+
+ // By replacing the `Option` with `None`, we ensure that we can't
+ // accidentally serialize any more `Span`s after the source map encoding
+ // is done.
+ let required_source_files = self.required_source_files.take().unwrap();
+
+ let working_directory = &self.tcx.sess.opts.working_dir;
+
+ let adapted = all_source_files
+ .iter()
+ .enumerate()
+ .filter(|(idx, source_file)| {
+ // Only serialize `SourceFile`s that were used
+ // during the encoding of a `Span`
+ required_source_files.contains(*idx) &&
+ // Don't serialize imported `SourceFile`s, unless
+ // we're in a proc-macro crate.
+ (!source_file.is_imported() || self.is_proc_macro)
+ })
+ .map(|(_, source_file)| {
+ // At export time we expand all source file paths to absolute paths because
+ // downstream compilation sessions can have a different compiler working
+ // directory, so relative paths from this or any other upstream crate
+ // won't be valid anymore.
+ //
+ // At this point we also erase the actual on-disk path and only keep
+ // the remapped version -- as is necessary for reproducible builds.
+ match source_file.name {
+ FileName::Real(ref original_file_name) => {
+ let adapted_file_name =
+ source_map.path_mapping().to_embeddable_absolute_path(
+ original_file_name.clone(),
+ working_directory,
+ );
+
+ if adapted_file_name != *original_file_name {
+ let mut adapted: SourceFile = (**source_file).clone();
+ adapted.name = FileName::Real(adapted_file_name);
+ adapted.name_hash = {
+ let mut hasher: StableHasher = StableHasher::new();
+ adapted.name.hash(&mut hasher);
+ hasher.finish::<u128>()
+ };
+ Lrc::new(adapted)
+ } else {
+ // Nothing to adapt
+ source_file.clone()
+ }
+ }
+ // expanded code, not from a file
+ _ => source_file.clone(),
+ }
+ })
+ .map(|mut source_file| {
+ // We're serializing this `SourceFile` into our crate metadata,
+ // so mark it as coming from this crate.
+ // This also ensures that we don't try to deserialize the
+ // `CrateNum` for a proc-macro dependency - since proc macro
+ // dependencies aren't loaded when we deserialize a proc-macro,
+ // trying to remap the `CrateNum` would fail.
+ if self.is_proc_macro {
+ Lrc::make_mut(&mut source_file).cnum = LOCAL_CRATE;
+ }
+ source_file
+ })
+ .collect::<Vec<_>>();
+
+ self.lazy_array(adapted.iter().map(|rc| &**rc))
+ }
+
+ fn encode_crate_root(&mut self) -> LazyValue<CrateRoot> {
+ let tcx = self.tcx;
+ let mut i = 0;
+ let preamble_bytes = self.position() - i;
+
+ // Encode the crate deps
+ i = self.position();
+ let crate_deps = self.encode_crate_deps();
+ let dylib_dependency_formats = self.encode_dylib_dependency_formats();
+ let dep_bytes = self.position() - i;
+
+ // Encode the lib features.
+ i = self.position();
+ let lib_features = self.encode_lib_features();
+ let lib_feature_bytes = self.position() - i;
+
+ // Encode the stability implications.
+ i = self.position();
+ let stability_implications = self.encode_stability_implications();
+ let stability_implications_bytes = self.position() - i;
+
+ // Encode the language items.
+ i = self.position();
+ let lang_items = self.encode_lang_items();
+ let lang_items_missing = self.encode_lang_items_missing();
+ let lang_item_bytes = self.position() - i;
+
+ // Encode the diagnostic items.
+ i = self.position();
+ let diagnostic_items = self.encode_diagnostic_items();
+ let diagnostic_item_bytes = self.position() - i;
+
+ // Encode the native libraries used
+ i = self.position();
+ let native_libraries = self.encode_native_libraries();
+ let native_lib_bytes = self.position() - i;
+
+ i = self.position();
+ let foreign_modules = self.encode_foreign_modules();
+ let foreign_modules_bytes = self.position() - i;
+
+ // Encode DefPathTable
+ i = self.position();
+ self.encode_def_path_table();
+ let def_path_table_bytes = self.position() - i;
+
+ // Encode the def IDs of traits, for rustdoc and diagnostics.
+ i = self.position();
+ let traits = self.encode_traits();
+ let traits_bytes = self.position() - i;
+
+ // Encode the def IDs of impls, for coherence checking.
+ i = self.position();
+ let impls = self.encode_impls();
+ let impls_bytes = self.position() - i;
+
+ i = self.position();
+ let incoherent_impls = self.encode_incoherent_impls();
+ let incoherent_impls_bytes = self.position() - i;
+
+ // Encode MIR.
+ i = self.position();
+ self.encode_mir();
+ let mir_bytes = self.position() - i;
+
+ // Encode the items.
+ i = self.position();
+ self.encode_def_ids();
+ self.encode_info_for_items();
+ let item_bytes = self.position() - i;
+
+ // Encode the allocation index
+ i = self.position();
+ let interpret_alloc_index = {
+ let mut interpret_alloc_index = Vec::new();
+ let mut n = 0;
+ trace!("beginning to encode alloc ids");
+ loop {
+ let new_n = self.interpret_allocs.len();
+ // if we have found new ids, serialize those, too
+ if n == new_n {
+ // otherwise, abort
+ break;
+ }
+ trace!("encoding {} further alloc ids", new_n - n);
+ for idx in n..new_n {
+ let id = self.interpret_allocs[idx];
+ let pos = self.position() as u32;
+ interpret_alloc_index.push(pos);
+ interpret::specialized_encode_alloc_id(self, tcx, id);
+ }
+ n = new_n;
+ }
+ self.lazy_array(interpret_alloc_index)
+ };
+ let interpret_alloc_index_bytes = self.position() - i;
+
+ // Encode the proc macro data. This affects 'tables',
+ // so we need to do this before we encode the tables.
+ // This overwrites def_keys, so it must happen after encode_def_path_table.
+ i = self.position();
+ let proc_macro_data = self.encode_proc_macros();
+ let proc_macro_data_bytes = self.position() - i;
+
+ i = self.position();
+ let tables = self.tables.encode(&mut self.opaque);
+ let tables_bytes = self.position() - i;
+
+ i = self.position();
+ let debugger_visualizers = self.encode_debugger_visualizers();
+ let debugger_visualizers_bytes = self.position() - i;
+
+ // Encode exported symbols info. This is prefetched in `encode_metadata` so we encode
+ // this as late as possible to give the prefetching as much time as possible to complete.
+ i = self.position();
+ let exported_symbols = tcx.exported_symbols(LOCAL_CRATE);
+ let exported_symbols = self.encode_exported_symbols(&exported_symbols);
+ let exported_symbols_bytes = self.position() - i;
+
+ // Encode the hygiene data,
+ // IMPORTANT: this *must* be the last thing that we encode (other than `SourceMap`). The process
+ // of encoding other items (e.g. `optimized_mir`) may cause us to load
+ // data from the incremental cache. If this causes us to deserialize a `Span`,
+ // then we may load additional `SyntaxContext`s into the global `HygieneData`.
+ // Therefore, we need to encode the hygiene data last to ensure that we encode
+ // any `SyntaxContext`s that might be used.
+ i = self.position();
+ let (syntax_contexts, expn_data, expn_hashes) = self.encode_hygiene();
+ let hygiene_bytes = self.position() - i;
+
+ i = self.position();
+ let def_path_hash_map = self.encode_def_path_hash_map();
+ let def_path_hash_map_bytes = self.position() - i;
+
+ // Encode source_map. This needs to be done last,
+ // since encoding `Span`s tells us which `SourceFiles` we actually
+ // need to encode.
+ i = self.position();
+ let source_map = self.encode_source_map();
+ let source_map_bytes = self.position() - i;
+
+ i = self.position();
+ let attrs = tcx.hir().krate_attrs();
+ let has_default_lib_allocator = tcx.sess.contains_name(&attrs, sym::default_lib_allocator);
+ let root = self.lazy(CrateRoot {
+ name: tcx.crate_name(LOCAL_CRATE),
+ extra_filename: tcx.sess.opts.cg.extra_filename.clone(),
+ triple: tcx.sess.opts.target_triple.clone(),
+ hash: tcx.crate_hash(LOCAL_CRATE),
+ stable_crate_id: tcx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(),
+ required_panic_strategy: tcx.required_panic_strategy(LOCAL_CRATE),
+ panic_in_drop_strategy: tcx.sess.opts.unstable_opts.panic_in_drop,
+ edition: tcx.sess.edition(),
+ has_global_allocator: tcx.has_global_allocator(LOCAL_CRATE),
+ has_panic_handler: tcx.has_panic_handler(LOCAL_CRATE),
+ has_default_lib_allocator,
+ proc_macro_data,
+ debugger_visualizers,
+ compiler_builtins: tcx.sess.contains_name(&attrs, sym::compiler_builtins),
+ needs_allocator: tcx.sess.contains_name(&attrs, sym::needs_allocator),
+ needs_panic_runtime: tcx.sess.contains_name(&attrs, sym::needs_panic_runtime),
+ no_builtins: tcx.sess.contains_name(&attrs, sym::no_builtins),
+ panic_runtime: tcx.sess.contains_name(&attrs, sym::panic_runtime),
+ profiler_runtime: tcx.sess.contains_name(&attrs, sym::profiler_runtime),
+ symbol_mangling_version: tcx.sess.opts.get_symbol_mangling_version(),
+
+ crate_deps,
+ dylib_dependency_formats,
+ lib_features,
+ stability_implications,
+ lang_items,
+ diagnostic_items,
+ lang_items_missing,
+ native_libraries,
+ foreign_modules,
+ source_map,
+ traits,
+ impls,
+ incoherent_impls,
+ exported_symbols,
+ interpret_alloc_index,
+ tables,
+ syntax_contexts,
+ expn_data,
+ expn_hashes,
+ def_path_hash_map,
+ });
+ let final_bytes = self.position() - i;
+
+ let total_bytes = self.position();
+
+ let computed_total_bytes = preamble_bytes
+ + dep_bytes
+ + lib_feature_bytes
+ + stability_implications_bytes
+ + lang_item_bytes
+ + diagnostic_item_bytes
+ + native_lib_bytes
+ + foreign_modules_bytes
+ + def_path_table_bytes
+ + traits_bytes
+ + impls_bytes
+ + incoherent_impls_bytes
+ + mir_bytes
+ + item_bytes
+ + interpret_alloc_index_bytes
+ + proc_macro_data_bytes
+ + tables_bytes
+ + debugger_visualizers_bytes
+ + exported_symbols_bytes
+ + hygiene_bytes
+ + def_path_hash_map_bytes
+ + source_map_bytes
+ + final_bytes;
+ assert_eq!(total_bytes, computed_total_bytes);
+
+ if tcx.sess.meta_stats() {
+ self.opaque.flush();
+
+ // Rewind and re-read all the metadata to count the zero bytes we wrote.
+ let pos_before_rewind = self.opaque.file().stream_position().unwrap();
+ let mut zero_bytes = 0;
+ self.opaque.file().rewind().unwrap();
+ let file = std::io::BufReader::new(self.opaque.file());
+ for e in file.bytes() {
+ if e.unwrap() == 0 {
+ zero_bytes += 1;
+ }
+ }
+ assert_eq!(self.opaque.file().stream_position().unwrap(), pos_before_rewind);
+
+ let perc = |bytes| (bytes * 100) as f64 / total_bytes as f64;
+ let p = |label, bytes| {
+ eprintln!("{:>21}: {:>8} bytes ({:4.1}%)", label, bytes, perc(bytes));
+ };
+
+ eprintln!("");
+ eprintln!(
+ "{} metadata bytes, of which {} bytes ({:.1}%) are zero",
+ total_bytes,
+ zero_bytes,
+ perc(zero_bytes)
+ );
+ p("preamble", preamble_bytes);
+ p("dep", dep_bytes);
+ p("lib feature", lib_feature_bytes);
+ p("stability_implications", stability_implications_bytes);
+ p("lang item", lang_item_bytes);
+ p("diagnostic item", diagnostic_item_bytes);
+ p("native lib", native_lib_bytes);
+ p("foreign modules", foreign_modules_bytes);
+ p("def-path table", def_path_table_bytes);
+ p("traits", traits_bytes);
+ p("impls", impls_bytes);
+ p("incoherent_impls", incoherent_impls_bytes);
+ p("mir", mir_bytes);
+ p("item", item_bytes);
+ p("interpret_alloc_index", interpret_alloc_index_bytes);
+ p("proc-macro-data", proc_macro_data_bytes);
+ p("tables", tables_bytes);
+ p("debugger visualizers", debugger_visualizers_bytes);
+ p("exported symbols", exported_symbols_bytes);
+ p("hygiene", hygiene_bytes);
+ p("def-path hashes", def_path_hash_map_bytes);
+ p("source_map", source_map_bytes);
+ p("final", final_bytes);
+ eprintln!("");
+ }
+
+ root
+ }
+}
+
+fn should_encode_visibility(def_kind: DefKind) -> bool {
+ match def_kind {
+ DefKind::Mod
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Trait
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::Fn
+ | DefKind::Const
+ | DefKind::Static(..)
+ | DefKind::Ctor(..)
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::Macro(..)
+ | DefKind::Use
+ | DefKind::ForeignMod
+ | DefKind::OpaqueTy
+ | DefKind::Impl
+ | DefKind::Field => true,
+ DefKind::TyParam
+ | DefKind::ConstParam
+ | DefKind::LifetimeParam
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::GlobalAsm
+ | DefKind::Closure
+ | DefKind::Generator
+ | DefKind::ExternCrate => false,
+ }
+}
+
+fn should_encode_stability(def_kind: DefKind) -> bool {
+ match def_kind {
+ DefKind::Mod
+ | DefKind::Ctor(..)
+ | DefKind::Variant
+ | DefKind::Field
+ | DefKind::Struct
+ | DefKind::AssocTy
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::TyParam
+ | DefKind::ConstParam
+ | DefKind::Static(..)
+ | DefKind::Const
+ | DefKind::Fn
+ | DefKind::ForeignMod
+ | DefKind::TyAlias
+ | DefKind::OpaqueTy
+ | DefKind::Enum
+ | DefKind::Union
+ | DefKind::Impl
+ | DefKind::Trait
+ | DefKind::TraitAlias
+ | DefKind::Macro(..)
+ | DefKind::ForeignTy => true,
+ DefKind::Use
+ | DefKind::LifetimeParam
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::GlobalAsm
+ | DefKind::Closure
+ | DefKind::Generator
+ | DefKind::ExternCrate => false,
+ }
+}
+
+/// Whether we should encode MIR.
+///
+/// Computing, optimizing and encoding the MIR is a relatively expensive operation.
+/// We want to avoid this work when not required. Therefore:
+/// - we only compute `mir_for_ctfe` on items with const-eval semantics;
+/// - we skip `optimized_mir` for check runs.
+///
+/// Return a pair, resp. for CTFE and for LLVM.
+fn should_encode_mir(tcx: TyCtxt<'_>, def_id: LocalDefId) -> (bool, bool) {
+ match tcx.def_kind(def_id) {
+ // Constructors
+ DefKind::Ctor(_, _) => {
+ let mir_opt_base = tcx.sess.opts.output_types.should_codegen()
+ || tcx.sess.opts.unstable_opts.always_encode_mir;
+ (true, mir_opt_base)
+ }
+ // Constants
+ DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::AssocConst
+ | DefKind::Static(..)
+ | DefKind::Const => (true, false),
+ // Full-fledged functions
+ DefKind::AssocFn | DefKind::Fn => {
+ let generics = tcx.generics_of(def_id);
+ let needs_inline = (generics.requires_monomorphization(tcx)
+ || tcx.codegen_fn_attrs(def_id).requests_inline())
+ && tcx.sess.opts.output_types.should_codegen();
+ // The function has a `const` modifier or is in a `#[const_trait]`.
+ let is_const_fn = tcx.is_const_fn_raw(def_id.to_def_id())
+ || tcx.is_const_default_method(def_id.to_def_id());
+ let always_encode_mir = tcx.sess.opts.unstable_opts.always_encode_mir;
+ (is_const_fn, needs_inline || always_encode_mir)
+ }
+ // Closures can't be const fn.
+ DefKind::Closure => {
+ let generics = tcx.generics_of(def_id);
+ let needs_inline = (generics.requires_monomorphization(tcx)
+ || tcx.codegen_fn_attrs(def_id).requests_inline())
+ && tcx.sess.opts.output_types.should_codegen();
+ let always_encode_mir = tcx.sess.opts.unstable_opts.always_encode_mir;
+ (false, needs_inline || always_encode_mir)
+ }
+ // Generators require optimized MIR to compute layout.
+ DefKind::Generator => (false, true),
+ // The others don't have MIR.
+ _ => (false, false),
+ }
+}
+
+fn should_encode_variances(def_kind: DefKind) -> bool {
+ match def_kind {
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Fn
+ | DefKind::Ctor(..)
+ | DefKind::AssocFn => true,
+ DefKind::Mod
+ | DefKind::Field
+ | DefKind::AssocTy
+ | DefKind::AssocConst
+ | DefKind::TyParam
+ | DefKind::ConstParam
+ | DefKind::Static(..)
+ | DefKind::Const
+ | DefKind::ForeignMod
+ | DefKind::TyAlias
+ | DefKind::OpaqueTy
+ | DefKind::Impl
+ | DefKind::Trait
+ | DefKind::TraitAlias
+ | DefKind::Macro(..)
+ | DefKind::ForeignTy
+ | DefKind::Use
+ | DefKind::LifetimeParam
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::GlobalAsm
+ | DefKind::Closure
+ | DefKind::Generator
+ | DefKind::ExternCrate => false,
+ }
+}
+
+fn should_encode_generics(def_kind: DefKind) -> bool {
+ match def_kind {
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Trait
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::Fn
+ | DefKind::Const
+ | DefKind::Static(..)
+ | DefKind::Ctor(..)
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::OpaqueTy
+ | DefKind::Impl
+ | DefKind::Field
+ | DefKind::TyParam
+ | DefKind::Closure
+ | DefKind::Generator => true,
+ DefKind::Mod
+ | DefKind::ForeignMod
+ | DefKind::ConstParam
+ | DefKind::Macro(..)
+ | DefKind::Use
+ | DefKind::LifetimeParam
+ | DefKind::GlobalAsm
+ | DefKind::ExternCrate => false,
+ }
+}
+
+impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
+ fn encode_attrs(&mut self, def_id: LocalDefId) {
+ let mut attrs = self
+ .tcx
+ .hir()
+ .attrs(self.tcx.hir().local_def_id_to_hir_id(def_id))
+ .iter()
+ .filter(|attr| !rustc_feature::is_builtin_only_local(attr.name_or_empty()));
+
+ record_array!(self.tables.attributes[def_id.to_def_id()] <- attrs.clone());
+ if attrs.any(|attr| attr.may_have_doc_links()) {
+ self.tables.may_have_doc_links.set(def_id.local_def_index, ());
+ }
+ }
+
+ fn encode_def_ids(&mut self) {
+ if self.is_proc_macro {
+ return;
+ }
+ let tcx = self.tcx;
+ for local_id in tcx.iter_local_def_id() {
+ let def_id = local_id.to_def_id();
+ let def_kind = tcx.opt_def_kind(local_id);
+ let Some(def_kind) = def_kind else { continue };
+ self.tables.opt_def_kind.set(def_id.index, def_kind);
+ record!(self.tables.def_span[def_id] <- tcx.def_span(def_id));
+ self.encode_attrs(local_id);
+ record!(self.tables.expn_that_defined[def_id] <- self.tcx.expn_that_defined(def_id));
+ if let Some(ident_span) = tcx.def_ident_span(def_id) {
+ record!(self.tables.def_ident_span[def_id] <- ident_span);
+ }
+ if def_kind.has_codegen_attrs() {
+ record!(self.tables.codegen_fn_attrs[def_id] <- self.tcx.codegen_fn_attrs(def_id));
+ }
+ if should_encode_visibility(def_kind) {
+ record!(self.tables.visibility[def_id] <- self.tcx.visibility(def_id));
+ }
+ if should_encode_stability(def_kind) {
+ self.encode_stability(def_id);
+ self.encode_const_stability(def_id);
+ self.encode_deprecation(def_id);
+ }
+ if should_encode_variances(def_kind) {
+ let v = self.tcx.variances_of(def_id);
+ record_array!(self.tables.variances_of[def_id] <- v);
+ }
+ if should_encode_generics(def_kind) {
+ let g = tcx.generics_of(def_id);
+ record!(self.tables.generics_of[def_id] <- g);
+ record!(self.tables.explicit_predicates_of[def_id] <- self.tcx.explicit_predicates_of(def_id));
+ let inferred_outlives = self.tcx.inferred_outlives_of(def_id);
+ if !inferred_outlives.is_empty() {
+ record_array!(self.tables.inferred_outlives_of[def_id] <- inferred_outlives);
+ }
+ }
+ if let DefKind::Trait | DefKind::TraitAlias = def_kind {
+ record!(self.tables.super_predicates_of[def_id] <- self.tcx.super_predicates_of(def_id));
+ }
+ }
+ let inherent_impls = tcx.crate_inherent_impls(());
+ for (def_id, implementations) in inherent_impls.inherent_impls.iter() {
+ if implementations.is_empty() {
+ continue;
+ }
+ record_array!(self.tables.inherent_impls[def_id.to_def_id()] <- implementations.iter().map(|&def_id| {
+ assert!(def_id.is_local());
+ def_id.index
+ }));
+ }
+ }
+
+ fn encode_item_type(&mut self, def_id: DefId) {
+ debug!("EncodeContext::encode_item_type({:?})", def_id);
+ record!(self.tables.type_of[def_id] <- self.tcx.type_of(def_id));
+ }
+
+ fn encode_enum_variant_info(&mut self, def: ty::AdtDef<'tcx>, index: VariantIdx) {
+ let tcx = self.tcx;
+ let variant = &def.variant(index);
+ let def_id = variant.def_id;
+ debug!("EncodeContext::encode_enum_variant_info({:?})", def_id);
+
+ let data = VariantData {
+ ctor_kind: variant.ctor_kind,
+ discr: variant.discr,
+ ctor: variant.ctor_def_id.map(|did| did.index),
+ is_non_exhaustive: variant.is_field_list_non_exhaustive(),
+ };
+
+ record!(self.tables.kind[def_id] <- EntryKind::Variant(self.lazy(data)));
+ self.tables.constness.set(def_id.index, hir::Constness::Const);
+ record_array!(self.tables.children[def_id] <- variant.fields.iter().map(|f| {
+ assert!(f.did.is_local());
+ f.did.index
+ }));
+ self.encode_item_type(def_id);
+ if variant.ctor_kind == CtorKind::Fn {
+ // FIXME(eddyb) encode signature only in `encode_enum_variant_ctor`.
+ if let Some(ctor_def_id) = variant.ctor_def_id {
+ record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(ctor_def_id));
+ }
+ }
+ }
+
+ fn encode_enum_variant_ctor(&mut self, def: ty::AdtDef<'tcx>, index: VariantIdx) {
+ let tcx = self.tcx;
+ let variant = &def.variant(index);
+ let def_id = variant.ctor_def_id.unwrap();
+ debug!("EncodeContext::encode_enum_variant_ctor({:?})", def_id);
+
+ // FIXME(eddyb) encode only the `CtorKind` for constructors.
+ let data = VariantData {
+ ctor_kind: variant.ctor_kind,
+ discr: variant.discr,
+ ctor: Some(def_id.index),
+ is_non_exhaustive: variant.is_field_list_non_exhaustive(),
+ };
+
+ record!(self.tables.kind[def_id] <- EntryKind::Variant(self.lazy(data)));
+ self.tables.constness.set(def_id.index, hir::Constness::Const);
+ self.encode_item_type(def_id);
+ if variant.ctor_kind == CtorKind::Fn {
+ record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
+ }
+ }
+
+ fn encode_info_for_mod(&mut self, local_def_id: LocalDefId, md: &hir::Mod<'_>) {
+ let tcx = self.tcx;
+ let def_id = local_def_id.to_def_id();
+ debug!("EncodeContext::encode_info_for_mod({:?})", def_id);
+
+ // If we are encoding a proc-macro crates, `encode_info_for_mod` will
+ // only ever get called for the crate root. We still want to encode
+ // the crate root for consistency with other crates (some of the resolver
+ // code uses it). However, we skip encoding anything relating to child
+ // items - we encode information about proc-macros later on.
+ let reexports = if !self.is_proc_macro {
+ match tcx.module_reexports(local_def_id) {
+ Some(exports) => self.lazy_array(exports),
+ _ => LazyArray::empty(),
+ }
+ } else {
+ LazyArray::empty()
+ };
+
+ record!(self.tables.kind[def_id] <- EntryKind::Mod(reexports));
+ if self.is_proc_macro {
+ // Encode this here because we don't do it in encode_def_ids.
+ record!(self.tables.expn_that_defined[def_id] <- tcx.expn_that_defined(local_def_id));
+ } else {
+ record_array!(self.tables.children[def_id] <- iter::from_generator(|| {
+ for item_id in md.item_ids {
+ match tcx.hir().item(*item_id).kind {
+ // Foreign items are planted into their parent modules
+ // from name resolution point of view.
+ hir::ItemKind::ForeignMod { items, .. } => {
+ for foreign_item in items {
+ yield foreign_item.id.def_id.local_def_index;
+ }
+ }
+ // Only encode named non-reexport children, reexports are encoded
+ // separately and unnamed items are not used by name resolution.
+ hir::ItemKind::ExternCrate(..) => continue,
+ _ if tcx.def_key(item_id.def_id.to_def_id()).get_opt_name().is_some() => {
+ yield item_id.def_id.local_def_index;
+ }
+ _ => continue,
+ }
+ }
+ }));
+ }
+ }
+
+ fn encode_field(
+ &mut self,
+ adt_def: ty::AdtDef<'tcx>,
+ variant_index: VariantIdx,
+ field_index: usize,
+ ) {
+ let variant = &adt_def.variant(variant_index);
+ let field = &variant.fields[field_index];
+
+ let def_id = field.did;
+ debug!("EncodeContext::encode_field({:?})", def_id);
+
+ record!(self.tables.kind[def_id] <- EntryKind::Field);
+ self.encode_item_type(def_id);
+ }
+
+ fn encode_struct_ctor(&mut self, adt_def: ty::AdtDef<'tcx>, def_id: DefId) {
+ debug!("EncodeContext::encode_struct_ctor({:?})", def_id);
+ let tcx = self.tcx;
+ let variant = adt_def.non_enum_variant();
+
+ let data = VariantData {
+ ctor_kind: variant.ctor_kind,
+ discr: variant.discr,
+ ctor: Some(def_id.index),
+ is_non_exhaustive: variant.is_field_list_non_exhaustive(),
+ };
+
+ record!(self.tables.repr_options[def_id] <- adt_def.repr());
+ self.tables.constness.set(def_id.index, hir::Constness::Const);
+ record!(self.tables.kind[def_id] <- EntryKind::Struct(self.lazy(data)));
+ self.encode_item_type(def_id);
+ if variant.ctor_kind == CtorKind::Fn {
+ record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
+ }
+ }
+
+ fn encode_explicit_item_bounds(&mut self, def_id: DefId) {
+ debug!("EncodeContext::encode_explicit_item_bounds({:?})", def_id);
+ let bounds = self.tcx.explicit_item_bounds(def_id);
+ if !bounds.is_empty() {
+ record_array!(self.tables.explicit_item_bounds[def_id] <- bounds);
+ }
+ }
+
+ fn encode_info_for_trait_item(&mut self, def_id: DefId) {
+ debug!("EncodeContext::encode_info_for_trait_item({:?})", def_id);
+ let tcx = self.tcx;
+
+ let ast_item = tcx.hir().expect_trait_item(def_id.expect_local());
+ self.tables.impl_defaultness.set(def_id.index, ast_item.defaultness);
+ let trait_item = tcx.associated_item(def_id);
+
+ match trait_item.kind {
+ ty::AssocKind::Const => {
+ let rendered = rustc_hir_pretty::to_string(
+ &(&self.tcx.hir() as &dyn intravisit::Map<'_>),
+ |s| s.print_trait_item(ast_item),
+ );
+
+ record!(self.tables.kind[def_id] <- EntryKind::AssocConst(ty::AssocItemContainer::TraitContainer));
+ record!(self.tables.mir_const_qualif[def_id] <- mir::ConstQualifs::default());
+ record!(self.tables.rendered_const[def_id] <- rendered);
+ }
+ ty::AssocKind::Fn => {
+ let hir::TraitItemKind::Fn(m_sig, m) = &ast_item.kind else { bug!() };
+ match *m {
+ hir::TraitFn::Required(ref names) => {
+ record_array!(self.tables.fn_arg_names[def_id] <- *names)
+ }
+ hir::TraitFn::Provided(body) => {
+ record_array!(self.tables.fn_arg_names[def_id] <- self.tcx.hir().body_param_names(body))
+ }
+ };
+ self.tables.asyncness.set(def_id.index, m_sig.header.asyncness);
+ self.tables.constness.set(def_id.index, hir::Constness::NotConst);
+ record!(self.tables.kind[def_id] <- EntryKind::AssocFn {
+ container: ty::AssocItemContainer::TraitContainer,
+ has_self: trait_item.fn_has_self_parameter,
+ });
+ }
+ ty::AssocKind::Type => {
+ self.encode_explicit_item_bounds(def_id);
+ record!(self.tables.kind[def_id] <- EntryKind::AssocType(ty::AssocItemContainer::TraitContainer));
+ }
+ }
+ match trait_item.kind {
+ ty::AssocKind::Const | ty::AssocKind::Fn => {
+ self.encode_item_type(def_id);
+ }
+ ty::AssocKind::Type => {
+ if ast_item.defaultness.has_value() {
+ self.encode_item_type(def_id);
+ }
+ }
+ }
+ if trait_item.kind == ty::AssocKind::Fn {
+ record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
+ }
+ }
+
+ fn encode_info_for_impl_item(&mut self, def_id: DefId) {
+ debug!("EncodeContext::encode_info_for_impl_item({:?})", def_id);
+ let tcx = self.tcx;
+
+ let ast_item = self.tcx.hir().expect_impl_item(def_id.expect_local());
+ self.tables.impl_defaultness.set(def_id.index, ast_item.defaultness);
+ let impl_item = self.tcx.associated_item(def_id);
+
+ match impl_item.kind {
+ ty::AssocKind::Const => {
+ if let hir::ImplItemKind::Const(_, body_id) = ast_item.kind {
+ let qualifs = self.tcx.at(ast_item.span).mir_const_qualif(def_id);
+ let const_data = self.encode_rendered_const_for_body(body_id);
+
+ record!(self.tables.kind[def_id] <- EntryKind::AssocConst(ty::AssocItemContainer::ImplContainer));
+ record!(self.tables.mir_const_qualif[def_id] <- qualifs);
+ record!(self.tables.rendered_const[def_id] <- const_data);
+ } else {
+ bug!()
+ }
+ }
+ ty::AssocKind::Fn => {
+ let hir::ImplItemKind::Fn(ref sig, body) = ast_item.kind else { bug!() };
+ self.tables.asyncness.set(def_id.index, sig.header.asyncness);
+ record_array!(self.tables.fn_arg_names[def_id] <- self.tcx.hir().body_param_names(body));
+ // Can be inside `impl const Trait`, so using sig.header.constness is not reliable
+ let constness = if self.tcx.is_const_fn_raw(def_id) {
+ hir::Constness::Const
+ } else {
+ hir::Constness::NotConst
+ };
+ self.tables.constness.set(def_id.index, constness);
+ record!(self.tables.kind[def_id] <- EntryKind::AssocFn {
+ container: ty::AssocItemContainer::ImplContainer,
+ has_self: impl_item.fn_has_self_parameter,
+ });
+ }
+ ty::AssocKind::Type => {
+ record!(self.tables.kind[def_id] <- EntryKind::AssocType(ty::AssocItemContainer::ImplContainer));
+ }
+ }
+ self.encode_item_type(def_id);
+ if let Some(trait_item_def_id) = impl_item.trait_item_def_id {
+ self.tables.trait_item_def_id.set(def_id.index, trait_item_def_id.into());
+ }
+ if impl_item.kind == ty::AssocKind::Fn {
+ record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
+ if tcx.is_intrinsic(def_id) {
+ self.tables.is_intrinsic.set(def_id.index, ());
+ }
+ }
+ }
+
+ fn encode_mir(&mut self) {
+ if self.is_proc_macro {
+ return;
+ }
+
+ let keys_and_jobs = self
+ .tcx
+ .mir_keys(())
+ .iter()
+ .filter_map(|&def_id| {
+ let (encode_const, encode_opt) = should_encode_mir(self.tcx, def_id);
+ if encode_const || encode_opt {
+ Some((def_id, encode_const, encode_opt))
+ } else {
+ None
+ }
+ })
+ .collect::<Vec<_>>();
+ for (def_id, encode_const, encode_opt) in keys_and_jobs.into_iter() {
+ debug_assert!(encode_const || encode_opt);
+
+ debug!("EntryBuilder::encode_mir({:?})", def_id);
+ if encode_opt {
+ record!(self.tables.optimized_mir[def_id.to_def_id()] <- self.tcx.optimized_mir(def_id));
+ }
+ if encode_const {
+ record!(self.tables.mir_for_ctfe[def_id.to_def_id()] <- self.tcx.mir_for_ctfe(def_id));
+
+ // FIXME(generic_const_exprs): this feels wrong to have in `encode_mir`
+ let abstract_const = self.tcx.thir_abstract_const(def_id);
+ if let Ok(Some(abstract_const)) = abstract_const {
+ record!(self.tables.thir_abstract_const[def_id.to_def_id()] <- abstract_const);
+ }
+ }
+ record!(self.tables.promoted_mir[def_id.to_def_id()] <- self.tcx.promoted_mir(def_id));
+
+ let instance =
+ ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id.to_def_id()));
+ let unused = self.tcx.unused_generic_params(instance);
+ if !unused.is_empty() {
+ record!(self.tables.unused_generic_params[def_id.to_def_id()] <- unused);
+ }
+ }
+ }
+
+ fn encode_stability(&mut self, def_id: DefId) {
+ debug!("EncodeContext::encode_stability({:?})", def_id);
+
+ // The query lookup can take a measurable amount of time in crates with many items. Check if
+ // the stability attributes are even enabled before using their queries.
+ if self.feat.staged_api || self.tcx.sess.opts.unstable_opts.force_unstable_if_unmarked {
+ if let Some(stab) = self.tcx.lookup_stability(def_id) {
+ record!(self.tables.lookup_stability[def_id] <- stab)
+ }
+ }
+ }
+
+ fn encode_const_stability(&mut self, def_id: DefId) {
+ debug!("EncodeContext::encode_const_stability({:?})", def_id);
+
+ // The query lookup can take a measurable amount of time in crates with many items. Check if
+ // the stability attributes are even enabled before using their queries.
+ if self.feat.staged_api || self.tcx.sess.opts.unstable_opts.force_unstable_if_unmarked {
+ if let Some(stab) = self.tcx.lookup_const_stability(def_id) {
+ record!(self.tables.lookup_const_stability[def_id] <- stab)
+ }
+ }
+ }
+
+ fn encode_deprecation(&mut self, def_id: DefId) {
+ debug!("EncodeContext::encode_deprecation({:?})", def_id);
+ if let Some(depr) = self.tcx.lookup_deprecation(def_id) {
+ record!(self.tables.lookup_deprecation_entry[def_id] <- depr);
+ }
+ }
+
+ fn encode_rendered_const_for_body(&mut self, body_id: hir::BodyId) -> String {
+ let hir = self.tcx.hir();
+ let body = hir.body(body_id);
+ rustc_hir_pretty::to_string(&(&hir as &dyn intravisit::Map<'_>), |s| {
+ s.print_expr(&body.value)
+ })
+ }
+
+ fn encode_info_for_item(&mut self, def_id: DefId, item: &'tcx hir::Item<'tcx>) {
+ let tcx = self.tcx;
+
+ debug!("EncodeContext::encode_info_for_item({:?})", def_id);
+
+ let entry_kind = match item.kind {
+ hir::ItemKind::Static(..) => EntryKind::Static,
+ hir::ItemKind::Const(_, body_id) => {
+ let qualifs = self.tcx.at(item.span).mir_const_qualif(def_id);
+ let const_data = self.encode_rendered_const_for_body(body_id);
+ record!(self.tables.mir_const_qualif[def_id] <- qualifs);
+ record!(self.tables.rendered_const[def_id] <- const_data);
+ EntryKind::Const
+ }
+ hir::ItemKind::Fn(ref sig, .., body) => {
+ self.tables.asyncness.set(def_id.index, sig.header.asyncness);
+ record_array!(self.tables.fn_arg_names[def_id] <- self.tcx.hir().body_param_names(body));
+ self.tables.constness.set(def_id.index, sig.header.constness);
+ EntryKind::Fn
+ }
+ hir::ItemKind::Macro(ref macro_def, _) => {
+ EntryKind::MacroDef(self.lazy(&*macro_def.body), macro_def.macro_rules)
+ }
+ hir::ItemKind::Mod(ref m) => {
+ return self.encode_info_for_mod(item.def_id, m);
+ }
+ hir::ItemKind::ForeignMod { .. } => EntryKind::ForeignMod,
+ hir::ItemKind::GlobalAsm(..) => EntryKind::GlobalAsm,
+ hir::ItemKind::TyAlias(..) => EntryKind::Type,
+ hir::ItemKind::OpaqueTy(..) => {
+ self.encode_explicit_item_bounds(def_id);
+ EntryKind::OpaqueTy
+ }
+ hir::ItemKind::Enum(..) => {
+ let adt_def = self.tcx.adt_def(def_id);
+ record!(self.tables.repr_options[def_id] <- adt_def.repr());
+ EntryKind::Enum
+ }
+ hir::ItemKind::Struct(ref struct_def, _) => {
+ let adt_def = self.tcx.adt_def(def_id);
+ record!(self.tables.repr_options[def_id] <- adt_def.repr());
+ self.tables.constness.set(def_id.index, hir::Constness::Const);
+
+ // Encode def_ids for each field and method
+ // for methods, write all the stuff get_trait_method
+ // needs to know
+ let ctor = struct_def
+ .ctor_hir_id()
+ .map(|ctor_hir_id| self.tcx.hir().local_def_id(ctor_hir_id).local_def_index);
+
+ let variant = adt_def.non_enum_variant();
+ EntryKind::Struct(self.lazy(VariantData {
+ ctor_kind: variant.ctor_kind,
+ discr: variant.discr,
+ ctor,
+ is_non_exhaustive: variant.is_field_list_non_exhaustive(),
+ }))
+ }
+ hir::ItemKind::Union(..) => {
+ let adt_def = self.tcx.adt_def(def_id);
+ record!(self.tables.repr_options[def_id] <- adt_def.repr());
+
+ let variant = adt_def.non_enum_variant();
+ EntryKind::Union(self.lazy(VariantData {
+ ctor_kind: variant.ctor_kind,
+ discr: variant.discr,
+ ctor: None,
+ is_non_exhaustive: variant.is_field_list_non_exhaustive(),
+ }))
+ }
+ hir::ItemKind::Impl(hir::Impl { defaultness, constness, .. }) => {
+ self.tables.impl_defaultness.set(def_id.index, *defaultness);
+ self.tables.constness.set(def_id.index, *constness);
+
+ let trait_ref = self.tcx.impl_trait_ref(def_id);
+ if let Some(trait_ref) = trait_ref {
+ let trait_def = self.tcx.trait_def(trait_ref.def_id);
+ if let Some(mut an) = trait_def.ancestors(self.tcx, def_id).ok() {
+ if let Some(specialization_graph::Node::Impl(parent)) = an.nth(1) {
+ self.tables.impl_parent.set(def_id.index, parent.into());
+ }
+ }
+
+ // if this is an impl of `CoerceUnsized`, create its
+ // "unsized info", else just store None
+ if Some(trait_ref.def_id) == self.tcx.lang_items().coerce_unsized_trait() {
+ let coerce_unsized_info =
+ self.tcx.at(item.span).coerce_unsized_info(def_id);
+ record!(self.tables.coerce_unsized_info[def_id] <- coerce_unsized_info);
+ }
+ }
+
+ let polarity = self.tcx.impl_polarity(def_id);
+ self.tables.impl_polarity.set(def_id.index, polarity);
+
+ EntryKind::Impl
+ }
+ hir::ItemKind::Trait(..) => {
+ let trait_def = self.tcx.trait_def(def_id);
+ record!(self.tables.trait_def[def_id] <- trait_def);
+
+ EntryKind::Trait
+ }
+ hir::ItemKind::TraitAlias(..) => {
+ let trait_def = self.tcx.trait_def(def_id);
+ record!(self.tables.trait_def[def_id] <- trait_def);
+
+ EntryKind::TraitAlias
+ }
+ hir::ItemKind::ExternCrate(_) | hir::ItemKind::Use(..) => {
+ bug!("cannot encode info for item {:?}", item)
+ }
+ };
+ record!(self.tables.kind[def_id] <- entry_kind);
+ // FIXME(eddyb) there should be a nicer way to do this.
+ match item.kind {
+ hir::ItemKind::Enum(..) => record_array!(self.tables.children[def_id] <-
+ self.tcx.adt_def(def_id).variants().iter().map(|v| {
+ assert!(v.def_id.is_local());
+ v.def_id.index
+ })
+ ),
+ hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => {
+ record_array!(self.tables.children[def_id] <-
+ self.tcx.adt_def(def_id).non_enum_variant().fields.iter().map(|f| {
+ assert!(f.did.is_local());
+ f.did.index
+ })
+ )
+ }
+ hir::ItemKind::Impl { .. } | hir::ItemKind::Trait(..) => {
+ let associated_item_def_ids = self.tcx.associated_item_def_ids(def_id);
+ record_array!(self.tables.children[def_id] <-
+ associated_item_def_ids.iter().map(|&def_id| {
+ assert!(def_id.is_local());
+ def_id.index
+ })
+ );
+ }
+ _ => {}
+ }
+ match item.kind {
+ hir::ItemKind::Static(..)
+ | hir::ItemKind::Const(..)
+ | hir::ItemKind::Fn(..)
+ | hir::ItemKind::TyAlias(..)
+ | hir::ItemKind::OpaqueTy(..)
+ | hir::ItemKind::Enum(..)
+ | hir::ItemKind::Struct(..)
+ | hir::ItemKind::Union(..)
+ | hir::ItemKind::Impl { .. } => self.encode_item_type(def_id),
+ _ => {}
+ }
+ if let hir::ItemKind::Fn(..) = item.kind {
+ record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
+ if tcx.is_intrinsic(def_id) {
+ self.tables.is_intrinsic.set(def_id.index, ());
+ }
+ }
+ if let hir::ItemKind::Impl { .. } = item.kind {
+ if let Some(trait_ref) = self.tcx.impl_trait_ref(def_id) {
+ record!(self.tables.impl_trait_ref[def_id] <- trait_ref);
+ }
+ }
+ }
+
+ fn encode_info_for_generic_param(&mut self, def_id: DefId, kind: EntryKind, encode_type: bool) {
+ record!(self.tables.kind[def_id] <- kind);
+ if encode_type {
+ self.encode_item_type(def_id);
+ }
+ }
+
+ fn encode_info_for_closure(&mut self, hir_id: hir::HirId) {
+ let def_id = self.tcx.hir().local_def_id(hir_id);
+ debug!("EncodeContext::encode_info_for_closure({:?})", def_id);
+ // NOTE(eddyb) `tcx.type_of(def_id)` isn't used because it's fully generic,
+ // including on the signature, which is inferred in `typeck.
+ let typeck_result: &'tcx ty::TypeckResults<'tcx> = self.tcx.typeck(def_id);
+ let ty = typeck_result.node_type(hir_id);
+ match ty.kind() {
+ ty::Generator(..) => {
+ let data = self.tcx.generator_kind(def_id).unwrap();
+ let generator_diagnostic_data = typeck_result.get_generator_diagnostic_data();
+ record!(self.tables.kind[def_id.to_def_id()] <- EntryKind::Generator);
+ record!(self.tables.generator_kind[def_id.to_def_id()] <- data);
+ record!(self.tables.generator_diagnostic_data[def_id.to_def_id()] <- generator_diagnostic_data);
+ }
+
+ ty::Closure(..) => {
+ record!(self.tables.kind[def_id.to_def_id()] <- EntryKind::Closure);
+ }
+
+ _ => bug!("closure that is neither generator nor closure"),
+ }
+ self.encode_item_type(def_id.to_def_id());
+ if let ty::Closure(def_id, substs) = *ty.kind() {
+ record!(self.tables.fn_sig[def_id] <- substs.as_closure().sig());
+ }
+ }
+
+ fn encode_info_for_anon_const(&mut self, id: hir::HirId) {
+ let def_id = self.tcx.hir().local_def_id(id);
+ debug!("EncodeContext::encode_info_for_anon_const({:?})", def_id);
+ let body_id = self.tcx.hir().body_owned_by(def_id);
+ let const_data = self.encode_rendered_const_for_body(body_id);
+ let qualifs = self.tcx.mir_const_qualif(def_id);
+
+ record!(self.tables.kind[def_id.to_def_id()] <- EntryKind::AnonConst);
+ record!(self.tables.mir_const_qualif[def_id.to_def_id()] <- qualifs);
+ record!(self.tables.rendered_const[def_id.to_def_id()] <- const_data);
+ self.encode_item_type(def_id.to_def_id());
+ }
+
+ fn encode_native_libraries(&mut self) -> LazyArray<NativeLib> {
+ empty_proc_macro!(self);
+ let used_libraries = self.tcx.native_libraries(LOCAL_CRATE);
+ self.lazy_array(used_libraries.iter())
+ }
+
+ fn encode_foreign_modules(&mut self) -> LazyArray<ForeignModule> {
+ empty_proc_macro!(self);
+ let foreign_modules = self.tcx.foreign_modules(LOCAL_CRATE);
+ self.lazy_array(foreign_modules.iter().map(|(_, m)| m).cloned())
+ }
+
+ fn encode_hygiene(&mut self) -> (SyntaxContextTable, ExpnDataTable, ExpnHashTable) {
+ let mut syntax_contexts: TableBuilder<_, _> = Default::default();
+ let mut expn_data_table: TableBuilder<_, _> = Default::default();
+ let mut expn_hash_table: TableBuilder<_, _> = Default::default();
+
+ self.hygiene_ctxt.encode(
+ &mut (&mut *self, &mut syntax_contexts, &mut expn_data_table, &mut expn_hash_table),
+ |(this, syntax_contexts, _, _), index, ctxt_data| {
+ syntax_contexts.set(index, this.lazy(ctxt_data));
+ },
+ |(this, _, expn_data_table, expn_hash_table), index, expn_data, hash| {
+ if let Some(index) = index.as_local() {
+ expn_data_table.set(index.as_raw(), this.lazy(expn_data));
+ expn_hash_table.set(index.as_raw(), this.lazy(hash));
+ }
+ },
+ );
+
+ (
+ syntax_contexts.encode(&mut self.opaque),
+ expn_data_table.encode(&mut self.opaque),
+ expn_hash_table.encode(&mut self.opaque),
+ )
+ }
+
+ fn encode_proc_macros(&mut self) -> Option<ProcMacroData> {
+ let is_proc_macro = self.tcx.sess.crate_types().contains(&CrateType::ProcMacro);
+ if is_proc_macro {
+ let tcx = self.tcx;
+ let hir = tcx.hir();
+
+ let proc_macro_decls_static = tcx.proc_macro_decls_static(()).unwrap().local_def_index;
+ let stability = tcx.lookup_stability(CRATE_DEF_ID);
+ let macros =
+ self.lazy_array(tcx.resolutions(()).proc_macros.iter().map(|p| p.local_def_index));
+ let spans = self.tcx.sess.parse_sess.proc_macro_quoted_spans();
+ for (i, span) in spans.into_iter().enumerate() {
+ let span = self.lazy(span);
+ self.tables.proc_macro_quoted_spans.set(i, span);
+ }
+
+ self.tables.opt_def_kind.set(LOCAL_CRATE.as_def_id().index, DefKind::Mod);
+ record!(self.tables.def_span[LOCAL_CRATE.as_def_id()] <- tcx.def_span(LOCAL_CRATE.as_def_id()));
+ self.encode_attrs(LOCAL_CRATE.as_def_id().expect_local());
+ record!(self.tables.visibility[LOCAL_CRATE.as_def_id()] <- tcx.visibility(LOCAL_CRATE.as_def_id()));
+ if let Some(stability) = stability {
+ record!(self.tables.lookup_stability[LOCAL_CRATE.as_def_id()] <- stability);
+ }
+ self.encode_deprecation(LOCAL_CRATE.as_def_id());
+
+ // Normally, this information is encoded when we walk the items
+ // defined in this crate. However, we skip doing that for proc-macro crates,
+ // so we manually encode just the information that we need
+ for &proc_macro in &tcx.resolutions(()).proc_macros {
+ let id = proc_macro;
+ let proc_macro = hir.local_def_id_to_hir_id(proc_macro);
+ let mut name = hir.name(proc_macro);
+ let span = hir.span(proc_macro);
+ // Proc-macros may have attributes like `#[allow_internal_unstable]`,
+ // so downstream crates need access to them.
+ let attrs = hir.attrs(proc_macro);
+ let macro_kind = if tcx.sess.contains_name(attrs, sym::proc_macro) {
+ MacroKind::Bang
+ } else if tcx.sess.contains_name(attrs, sym::proc_macro_attribute) {
+ MacroKind::Attr
+ } else if let Some(attr) = tcx.sess.find_by_name(attrs, sym::proc_macro_derive) {
+ // This unwrap chain should have been checked by the proc-macro harness.
+ name = attr.meta_item_list().unwrap()[0]
+ .meta_item()
+ .unwrap()
+ .ident()
+ .unwrap()
+ .name;
+ MacroKind::Derive
+ } else {
+ bug!("Unknown proc-macro type for item {:?}", id);
+ };
+
+ let mut def_key = self.tcx.hir().def_key(id);
+ def_key.disambiguated_data.data = DefPathData::MacroNs(name);
+
+ let def_id = id.to_def_id();
+ self.tables.opt_def_kind.set(def_id.index, DefKind::Macro(macro_kind));
+ record!(self.tables.kind[def_id] <- EntryKind::ProcMacro(macro_kind));
+ self.encode_attrs(id);
+ record!(self.tables.def_keys[def_id] <- def_key);
+ record!(self.tables.def_ident_span[def_id] <- span);
+ record!(self.tables.def_span[def_id] <- span);
+ record!(self.tables.visibility[def_id] <- ty::Visibility::Public);
+ if let Some(stability) = stability {
+ record!(self.tables.lookup_stability[def_id] <- stability);
+ }
+ }
+
+ Some(ProcMacroData { proc_macro_decls_static, stability, macros })
+ } else {
+ None
+ }
+ }
+
+ fn encode_debugger_visualizers(&mut self) -> LazyArray<DebuggerVisualizerFile> {
+ empty_proc_macro!(self);
+ self.lazy_array(self.tcx.debugger_visualizers(LOCAL_CRATE).iter())
+ }
+
+ fn encode_crate_deps(&mut self) -> LazyArray<CrateDep> {
+ empty_proc_macro!(self);
+
+ let deps = self
+ .tcx
+ .crates(())
+ .iter()
+ .map(|&cnum| {
+ let dep = CrateDep {
+ name: self.tcx.crate_name(cnum),
+ hash: self.tcx.crate_hash(cnum),
+ host_hash: self.tcx.crate_host_hash(cnum),
+ kind: self.tcx.dep_kind(cnum),
+ extra_filename: self.tcx.extra_filename(cnum).clone(),
+ };
+ (cnum, dep)
+ })
+ .collect::<Vec<_>>();
+
+ {
+ // Sanity-check the crate numbers
+ let mut expected_cnum = 1;
+ for &(n, _) in &deps {
+ assert_eq!(n, CrateNum::new(expected_cnum));
+ expected_cnum += 1;
+ }
+ }
+
+ // We're just going to write a list of crate 'name-hash-version's, with
+ // the assumption that they are numbered 1 to n.
+ // FIXME (#2166): This is not nearly enough to support correct versioning
+ // but is enough to get transitive crate dependencies working.
+ self.lazy_array(deps.iter().map(|&(_, ref dep)| dep))
+ }
+
+ fn encode_lib_features(&mut self) -> LazyArray<(Symbol, Option<Symbol>)> {
+ empty_proc_macro!(self);
+ let tcx = self.tcx;
+ let lib_features = tcx.lib_features(());
+ self.lazy_array(lib_features.to_vec())
+ }
+
+ fn encode_stability_implications(&mut self) -> LazyArray<(Symbol, Symbol)> {
+ empty_proc_macro!(self);
+ let tcx = self.tcx;
+ let implications = tcx.stability_implications(LOCAL_CRATE);
+ self.lazy_array(implications.iter().map(|(k, v)| (*k, *v)))
+ }
+
+ fn encode_diagnostic_items(&mut self) -> LazyArray<(Symbol, DefIndex)> {
+ empty_proc_macro!(self);
+ let tcx = self.tcx;
+ let diagnostic_items = &tcx.diagnostic_items(LOCAL_CRATE).name_to_id;
+ self.lazy_array(diagnostic_items.iter().map(|(&name, def_id)| (name, def_id.index)))
+ }
+
+ fn encode_lang_items(&mut self) -> LazyArray<(DefIndex, usize)> {
+ empty_proc_macro!(self);
+ let tcx = self.tcx;
+ let lang_items = tcx.lang_items();
+ let lang_items = lang_items.items().iter();
+ self.lazy_array(lang_items.enumerate().filter_map(|(i, &opt_def_id)| {
+ if let Some(def_id) = opt_def_id {
+ if def_id.is_local() {
+ return Some((def_id.index, i));
+ }
+ }
+ None
+ }))
+ }
+
+ fn encode_lang_items_missing(&mut self) -> LazyArray<lang_items::LangItem> {
+ empty_proc_macro!(self);
+ let tcx = self.tcx;
+ self.lazy_array(&tcx.lang_items().missing)
+ }
+
+ fn encode_traits(&mut self) -> LazyArray<DefIndex> {
+ empty_proc_macro!(self);
+ self.lazy_array(self.tcx.traits_in_crate(LOCAL_CRATE).iter().map(|def_id| def_id.index))
+ }
+
+ /// Encodes an index, mapping each trait to its (local) implementations.
+ fn encode_impls(&mut self) -> LazyArray<TraitImpls> {
+ debug!("EncodeContext::encode_traits_and_impls()");
+ empty_proc_macro!(self);
+ let tcx = self.tcx;
+ let mut fx_hash_map: FxHashMap<DefId, Vec<(DefIndex, Option<SimplifiedType>)>> =
+ FxHashMap::default();
+
+ for id in tcx.hir().items() {
+ if matches!(tcx.def_kind(id.def_id), DefKind::Impl) {
+ if let Some(trait_ref) = tcx.impl_trait_ref(id.def_id.to_def_id()) {
+ let simplified_self_ty = fast_reject::simplify_type(
+ self.tcx,
+ trait_ref.self_ty(),
+ TreatParams::AsInfer,
+ );
+
+ fx_hash_map
+ .entry(trait_ref.def_id)
+ .or_default()
+ .push((id.def_id.local_def_index, simplified_self_ty));
+ }
+ }
+ }
+
+ let mut all_impls: Vec<_> = fx_hash_map.into_iter().collect();
+
+ // Bring everything into deterministic order for hashing
+ all_impls.sort_by_cached_key(|&(trait_def_id, _)| tcx.def_path_hash(trait_def_id));
+
+ let all_impls: Vec<_> = all_impls
+ .into_iter()
+ .map(|(trait_def_id, mut impls)| {
+ // Bring everything into deterministic order for hashing
+ impls.sort_by_cached_key(|&(index, _)| {
+ tcx.hir().def_path_hash(LocalDefId { local_def_index: index })
+ });
+
+ TraitImpls {
+ trait_id: (trait_def_id.krate.as_u32(), trait_def_id.index),
+ impls: self.lazy_array(&impls),
+ }
+ })
+ .collect();
+
+ self.lazy_array(&all_impls)
+ }
+
+ fn encode_incoherent_impls(&mut self) -> LazyArray<IncoherentImpls> {
+ debug!("EncodeContext::encode_traits_and_impls()");
+ empty_proc_macro!(self);
+ let tcx = self.tcx;
+ let mut all_impls: Vec<_> = tcx.crate_inherent_impls(()).incoherent_impls.iter().collect();
+ tcx.with_stable_hashing_context(|mut ctx| {
+ all_impls.sort_by_cached_key(|&(&simp, _)| {
+ let mut hasher = StableHasher::new();
+ simp.hash_stable(&mut ctx, &mut hasher);
+ hasher.finish::<Fingerprint>()
+ })
+ });
+ let all_impls: Vec<_> = all_impls
+ .into_iter()
+ .map(|(&simp, impls)| {
+ let mut impls: Vec<_> =
+ impls.into_iter().map(|def_id| def_id.local_def_index).collect();
+ impls.sort_by_cached_key(|&local_def_index| {
+ tcx.hir().def_path_hash(LocalDefId { local_def_index })
+ });
+
+ IncoherentImpls { self_ty: simp, impls: self.lazy_array(impls) }
+ })
+ .collect();
+
+ self.lazy_array(&all_impls)
+ }
+
+ // Encodes all symbols exported from this crate into the metadata.
+ //
+ // This pass is seeded off the reachability list calculated in the
+ // middle::reachable module but filters out items that either don't have a
+ // symbol associated with them (they weren't translated) or if they're an FFI
+ // definition (as that's not defined in this crate).
+ fn encode_exported_symbols(
+ &mut self,
+ exported_symbols: &[(ExportedSymbol<'tcx>, SymbolExportInfo)],
+ ) -> LazyArray<(ExportedSymbol<'static>, SymbolExportInfo)> {
+ empty_proc_macro!(self);
+ // The metadata symbol name is special. It should not show up in
+ // downstream crates.
+ let metadata_symbol_name = SymbolName::new(self.tcx, &metadata_symbol_name(self.tcx));
+
+ self.lazy_array(
+ exported_symbols
+ .iter()
+ .filter(|&&(ref exported_symbol, _)| match *exported_symbol {
+ ExportedSymbol::NoDefId(symbol_name) => symbol_name != metadata_symbol_name,
+ _ => true,
+ })
+ .cloned(),
+ )
+ }
+
+ fn encode_dylib_dependency_formats(&mut self) -> LazyArray<Option<LinkagePreference>> {
+ empty_proc_macro!(self);
+ let formats = self.tcx.dependency_formats(());
+ for (ty, arr) in formats.iter() {
+ if *ty != CrateType::Dylib {
+ continue;
+ }
+ return self.lazy_array(arr.iter().map(|slot| match *slot {
+ Linkage::NotLinked | Linkage::IncludedFromDylib => None,
+
+ Linkage::Dynamic => Some(LinkagePreference::RequireDynamic),
+ Linkage::Static => Some(LinkagePreference::RequireStatic),
+ }));
+ }
+ LazyArray::empty()
+ }
+
+ fn encode_info_for_foreign_item(&mut self, def_id: DefId, nitem: &hir::ForeignItem<'_>) {
+ let tcx = self.tcx;
+
+ debug!("EncodeContext::encode_info_for_foreign_item({:?})", def_id);
+
+ match nitem.kind {
+ hir::ForeignItemKind::Fn(_, ref names, _) => {
+ self.tables.asyncness.set(def_id.index, hir::IsAsync::NotAsync);
+ record_array!(self.tables.fn_arg_names[def_id] <- *names);
+ let constness = if self.tcx.is_const_fn_raw(def_id) {
+ hir::Constness::Const
+ } else {
+ hir::Constness::NotConst
+ };
+ self.tables.constness.set(def_id.index, constness);
+ record!(self.tables.kind[def_id] <- EntryKind::ForeignFn);
+ }
+ hir::ForeignItemKind::Static(..) => {
+ record!(self.tables.kind[def_id] <- EntryKind::ForeignStatic);
+ }
+ hir::ForeignItemKind::Type => {
+ record!(self.tables.kind[def_id] <- EntryKind::ForeignType);
+ }
+ }
+ self.encode_item_type(def_id);
+ if let hir::ForeignItemKind::Fn(..) = nitem.kind {
+ record!(self.tables.fn_sig[def_id] <- tcx.fn_sig(def_id));
+ if tcx.is_intrinsic(def_id) {
+ self.tables.is_intrinsic.set(def_id.index, ());
+ }
+ }
+ }
+}
+
+// FIXME(eddyb) make metadata encoding walk over all definitions, instead of HIR.
+impl<'a, 'tcx> Visitor<'tcx> for EncodeContext<'a, 'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+ fn visit_expr(&mut self, ex: &'tcx hir::Expr<'tcx>) {
+ intravisit::walk_expr(self, ex);
+ self.encode_info_for_expr(ex);
+ }
+ fn visit_anon_const(&mut self, c: &'tcx AnonConst) {
+ intravisit::walk_anon_const(self, c);
+ self.encode_info_for_anon_const(c.hir_id);
+ }
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ intravisit::walk_item(self, item);
+ match item.kind {
+ hir::ItemKind::ExternCrate(_) | hir::ItemKind::Use(..) => {} // ignore these
+ _ => self.encode_info_for_item(item.def_id.to_def_id(), item),
+ }
+ self.encode_addl_info_for_item(item);
+ }
+ fn visit_foreign_item(&mut self, ni: &'tcx hir::ForeignItem<'tcx>) {
+ intravisit::walk_foreign_item(self, ni);
+ self.encode_info_for_foreign_item(ni.def_id.to_def_id(), ni);
+ }
+ fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
+ intravisit::walk_generics(self, generics);
+ self.encode_info_for_generics(generics);
+ }
+}
+
+impl<'a, 'tcx> EncodeContext<'a, 'tcx> {
+ fn encode_fields(&mut self, adt_def: ty::AdtDef<'tcx>) {
+ for (variant_index, variant) in adt_def.variants().iter_enumerated() {
+ for (field_index, _field) in variant.fields.iter().enumerate() {
+ self.encode_field(adt_def, variant_index, field_index);
+ }
+ }
+ }
+
+ fn encode_info_for_generics(&mut self, generics: &hir::Generics<'tcx>) {
+ for param in generics.params {
+ let def_id = self.tcx.hir().local_def_id(param.hir_id);
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => continue,
+ GenericParamKind::Type { default, .. } => {
+ self.encode_info_for_generic_param(
+ def_id.to_def_id(),
+ EntryKind::TypeParam,
+ default.is_some(),
+ );
+ }
+ GenericParamKind::Const { ref default, .. } => {
+ let def_id = def_id.to_def_id();
+ self.encode_info_for_generic_param(def_id, EntryKind::ConstParam, true);
+ if default.is_some() {
+ record!(self.tables.const_param_default[def_id] <- self.tcx.const_param_default(def_id))
+ }
+ }
+ }
+ }
+ }
+
+ fn encode_info_for_expr(&mut self, expr: &hir::Expr<'_>) {
+ if let hir::ExprKind::Closure { .. } = expr.kind {
+ self.encode_info_for_closure(expr.hir_id);
+ }
+ }
+
+ /// In some cases, along with the item itself, we also
+ /// encode some sub-items. Usually we want some info from the item
+ /// so it's easier to do that here then to wait until we would encounter
+ /// normally in the visitor walk.
+ fn encode_addl_info_for_item(&mut self, item: &hir::Item<'_>) {
+ match item.kind {
+ hir::ItemKind::Static(..)
+ | hir::ItemKind::Const(..)
+ | hir::ItemKind::Fn(..)
+ | hir::ItemKind::Macro(..)
+ | hir::ItemKind::Mod(..)
+ | hir::ItemKind::ForeignMod { .. }
+ | hir::ItemKind::GlobalAsm(..)
+ | hir::ItemKind::ExternCrate(..)
+ | hir::ItemKind::Use(..)
+ | hir::ItemKind::TyAlias(..)
+ | hir::ItemKind::OpaqueTy(..)
+ | hir::ItemKind::TraitAlias(..) => {
+ // no sub-item recording needed in these cases
+ }
+ hir::ItemKind::Enum(..) => {
+ let def = self.tcx.adt_def(item.def_id.to_def_id());
+ self.encode_fields(def);
+
+ for (i, variant) in def.variants().iter_enumerated() {
+ self.encode_enum_variant_info(def, i);
+
+ if let Some(_ctor_def_id) = variant.ctor_def_id {
+ self.encode_enum_variant_ctor(def, i);
+ }
+ }
+ }
+ hir::ItemKind::Struct(ref struct_def, _) => {
+ let def = self.tcx.adt_def(item.def_id.to_def_id());
+ self.encode_fields(def);
+
+ // If the struct has a constructor, encode it.
+ if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
+ let ctor_def_id = self.tcx.hir().local_def_id(ctor_hir_id);
+ self.encode_struct_ctor(def, ctor_def_id.to_def_id());
+ }
+ }
+ hir::ItemKind::Union(..) => {
+ let def = self.tcx.adt_def(item.def_id.to_def_id());
+ self.encode_fields(def);
+ }
+ hir::ItemKind::Impl { .. } => {
+ for &trait_item_def_id in
+ self.tcx.associated_item_def_ids(item.def_id.to_def_id()).iter()
+ {
+ self.encode_info_for_impl_item(trait_item_def_id);
+ }
+ }
+ hir::ItemKind::Trait(..) => {
+ for &item_def_id in self.tcx.associated_item_def_ids(item.def_id.to_def_id()).iter()
+ {
+ self.encode_info_for_trait_item(item_def_id);
+ }
+ }
+ }
+ }
+}
+
+/// Used to prefetch queries which will be needed later by metadata encoding.
+/// Only a subset of the queries are actually prefetched to keep this code smaller.
+fn prefetch_mir(tcx: TyCtxt<'_>) {
+ if !tcx.sess.opts.output_types.should_codegen() {
+ // We won't emit MIR, so don't prefetch it.
+ return;
+ }
+
+ par_iter(tcx.mir_keys(())).for_each(|&def_id| {
+ let (encode_const, encode_opt) = should_encode_mir(tcx, def_id);
+
+ if encode_const {
+ tcx.ensure().mir_for_ctfe(def_id);
+ }
+ if encode_opt {
+ tcx.ensure().optimized_mir(def_id);
+ }
+ if encode_opt || encode_const {
+ tcx.ensure().promoted_mir(def_id);
+ }
+ })
+}
+
+// NOTE(eddyb) The following comment was preserved for posterity, even
+// though it's no longer relevant as EBML (which uses nested & tagged
+// "documents") was replaced with a scheme that can't go out of bounds.
+//
+// And here we run into yet another obscure archive bug: in which metadata
+// loaded from archives may have trailing garbage bytes. Awhile back one of
+// our tests was failing sporadically on the macOS 64-bit builders (both nopt
+// and opt) by having ebml generate an out-of-bounds panic when looking at
+// metadata.
+//
+// Upon investigation it turned out that the metadata file inside of an rlib
+// (and ar archive) was being corrupted. Some compilations would generate a
+// metadata file which would end in a few extra bytes, while other
+// compilations would not have these extra bytes appended to the end. These
+// extra bytes were interpreted by ebml as an extra tag, so they ended up
+// being interpreted causing the out-of-bounds.
+//
+// The root cause of why these extra bytes were appearing was never
+// discovered, and in the meantime the solution we're employing is to insert
+// the length of the metadata to the start of the metadata. Later on this
+// will allow us to slice the metadata to the precise length that we just
+// generated regardless of trailing bytes that end up in it.
+
+pub struct EncodedMetadata {
+ // The declaration order matters because `mmap` should be dropped before `_temp_dir`.
+ mmap: Option<Mmap>,
+ // We need to carry MaybeTempDir to avoid deleting the temporary
+ // directory while accessing the Mmap.
+ _temp_dir: Option<MaybeTempDir>,
+}
+
+impl EncodedMetadata {
+ #[inline]
+ pub fn from_path(path: PathBuf, temp_dir: Option<MaybeTempDir>) -> std::io::Result<Self> {
+ let file = std::fs::File::open(&path)?;
+ let file_metadata = file.metadata()?;
+ if file_metadata.len() == 0 {
+ return Ok(Self { mmap: None, _temp_dir: None });
+ }
+ let mmap = unsafe { Some(Mmap::map(file)?) };
+ Ok(Self { mmap, _temp_dir: temp_dir })
+ }
+
+ #[inline]
+ pub fn raw_data(&self) -> &[u8] {
+ self.mmap.as_ref().map(|mmap| mmap.as_ref()).unwrap_or_default()
+ }
+}
+
+impl<S: Encoder> Encodable<S> for EncodedMetadata {
+ fn encode(&self, s: &mut S) {
+ let slice = self.raw_data();
+ slice.encode(s)
+ }
+}
+
+impl<D: Decoder> Decodable<D> for EncodedMetadata {
+ fn decode(d: &mut D) -> Self {
+ let len = d.read_usize();
+ let mmap = if len > 0 {
+ let mut mmap = MmapMut::map_anon(len).unwrap();
+ for _ in 0..len {
+ (&mut mmap[..]).write(&[d.read_u8()]).unwrap();
+ }
+ mmap.flush().unwrap();
+ Some(mmap.make_read_only().unwrap())
+ } else {
+ None
+ };
+
+ Self { mmap, _temp_dir: None }
+ }
+}
+
+pub fn encode_metadata(tcx: TyCtxt<'_>, path: &Path) {
+ let _prof_timer = tcx.prof.verbose_generic_activity("generate_crate_metadata");
+
+ // Since encoding metadata is not in a query, and nothing is cached,
+ // there's no need to do dep-graph tracking for any of it.
+ tcx.dep_graph.assert_ignored();
+
+ join(
+ || encode_metadata_impl(tcx, path),
+ || {
+ if tcx.sess.threads() == 1 {
+ return;
+ }
+ // Prefetch some queries used by metadata encoding.
+ // This is not necessary for correctness, but is only done for performance reasons.
+ // It can be removed if it turns out to cause trouble or be detrimental to performance.
+ join(|| prefetch_mir(tcx), || tcx.exported_symbols(LOCAL_CRATE));
+ },
+ );
+}
+
+fn encode_metadata_impl(tcx: TyCtxt<'_>, path: &Path) {
+ let mut encoder = opaque::FileEncoder::new(path)
+ .unwrap_or_else(|err| tcx.sess.fatal(&format!("failed to create file encoder: {}", err)));
+ encoder.emit_raw_bytes(METADATA_HEADER);
+
+ // Will be filled with the root position after encoding everything.
+ encoder.emit_raw_bytes(&[0, 0, 0, 0]);
+
+ let source_map_files = tcx.sess.source_map().files();
+ let source_file_cache = (source_map_files[0].clone(), 0);
+ let required_source_files = Some(GrowableBitSet::with_capacity(source_map_files.len()));
+ drop(source_map_files);
+
+ let hygiene_ctxt = HygieneEncodeContext::default();
+
+ let mut ecx = EncodeContext {
+ opaque: encoder,
+ tcx,
+ feat: tcx.features(),
+ tables: Default::default(),
+ lazy_state: LazyState::NoNode,
+ type_shorthands: Default::default(),
+ predicate_shorthands: Default::default(),
+ source_file_cache,
+ interpret_allocs: Default::default(),
+ required_source_files,
+ is_proc_macro: tcx.sess.crate_types().contains(&CrateType::ProcMacro),
+ hygiene_ctxt: &hygiene_ctxt,
+ };
+
+ // Encode the rustc version string in a predictable location.
+ rustc_version().encode(&mut ecx);
+
+ // Encode all the entries and extra information in the crate,
+ // culminating in the `CrateRoot` which points to all of it.
+ let root = ecx.encode_crate_root();
+
+ ecx.opaque.flush();
+
+ let mut file = ecx.opaque.file();
+ // We will return to this position after writing the root position.
+ let pos_before_seek = file.stream_position().unwrap();
+
+ // Encode the root position.
+ let header = METADATA_HEADER.len();
+ file.seek(std::io::SeekFrom::Start(header as u64))
+ .unwrap_or_else(|err| tcx.sess.fatal(&format!("failed to seek the file: {}", err)));
+ let pos = root.position.get();
+ file.write_all(&[(pos >> 24) as u8, (pos >> 16) as u8, (pos >> 8) as u8, (pos >> 0) as u8])
+ .unwrap_or_else(|err| tcx.sess.fatal(&format!("failed to write to the file: {}", err)));
+
+ // Return to the position where we are before writing the root position.
+ file.seek(std::io::SeekFrom::Start(pos_before_seek)).unwrap();
+
+ // Record metadata size for self-profiling
+ tcx.prof.artifact_size(
+ "crate_metadata",
+ "crate_metadata",
+ file.metadata().unwrap().len() as u64,
+ );
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers {
+ traits_in_crate: |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ let mut traits = Vec::new();
+ for id in tcx.hir().items() {
+ if matches!(tcx.def_kind(id.def_id), DefKind::Trait | DefKind::TraitAlias) {
+ traits.push(id.def_id.to_def_id())
+ }
+ }
+
+ // Bring everything into deterministic order.
+ traits.sort_by_cached_key(|&def_id| tcx.def_path_hash(def_id));
+ tcx.arena.alloc_slice(&traits)
+ },
+
+ ..*providers
+ }
+}
diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs
new file mode 100644
index 000000000..66bdecc30
--- /dev/null
+++ b/compiler/rustc_metadata/src/rmeta/mod.rs
@@ -0,0 +1,460 @@
+use crate::creader::CrateMetadataRef;
+use decoder::Metadata;
+use def_path_hash_map::DefPathHashMapRef;
+use table::TableBuilder;
+
+use rustc_ast as ast;
+use rustc_attr as attr;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::MetadataRef;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind};
+use rustc_hir::def_id::{CrateNum, DefId, DefIndex, DefPathHash, StableCrateId};
+use rustc_hir::definitions::DefKey;
+use rustc_hir::lang_items;
+use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
+use rustc_middle::metadata::ModChild;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
+use rustc_middle::mir;
+use rustc_middle::ty::fast_reject::SimplifiedType;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, ReprOptions, Ty};
+use rustc_middle::ty::{GeneratorDiagnosticData, ParameterizedOverTcx, TyCtxt};
+use rustc_serialize::opaque::FileEncoder;
+use rustc_session::config::SymbolManglingVersion;
+use rustc_session::cstore::{CrateDepKind, ForeignModule, LinkagePreference, NativeLib};
+use rustc_span::edition::Edition;
+use rustc_span::hygiene::{ExpnIndex, MacroKind};
+use rustc_span::symbol::{Ident, Symbol};
+use rustc_span::{self, ExpnData, ExpnHash, ExpnId, Span};
+use rustc_target::spec::{PanicStrategy, TargetTriple};
+
+use std::marker::PhantomData;
+use std::num::NonZeroUsize;
+
+pub use decoder::provide_extern;
+use decoder::DecodeContext;
+pub(crate) use decoder::{CrateMetadata, CrateNumMap, MetadataBlob};
+use encoder::EncodeContext;
+pub use encoder::{encode_metadata, EncodedMetadata};
+use rustc_span::hygiene::SyntaxContextData;
+
+mod decoder;
+mod def_path_hash_map;
+mod encoder;
+mod table;
+
+pub(crate) fn rustc_version() -> String {
+ format!("rustc {}", option_env!("CFG_VERSION").unwrap_or("unknown version"))
+}
+
+/// Metadata encoding version.
+/// N.B., increment this if you change the format of metadata such that
+/// the rustc version can't be found to compare with `rustc_version()`.
+const METADATA_VERSION: u8 = 6;
+
+/// Metadata header which includes `METADATA_VERSION`.
+///
+/// This header is followed by the position of the `CrateRoot`,
+/// which is encoded as a 32-bit big-endian unsigned integer,
+/// and further followed by the rustc version string.
+pub const METADATA_HEADER: &[u8] = &[b'r', b'u', b's', b't', 0, 0, 0, METADATA_VERSION];
+
+/// A value of type T referred to by its absolute position
+/// in the metadata, and which can be decoded lazily.
+///
+/// Metadata is effective a tree, encoded in post-order,
+/// and with the root's position written next to the header.
+/// That means every single `LazyValue` points to some previous
+/// location in the metadata and is part of a larger node.
+///
+/// The first `LazyValue` in a node is encoded as the backwards
+/// distance from the position where the containing node
+/// starts and where the `LazyValue` points to, while the rest
+/// use the forward distance from the previous `LazyValue`.
+/// Distances start at 1, as 0-byte nodes are invalid.
+/// Also invalid are nodes being referred in a different
+/// order than they were encoded in.
+#[must_use]
+struct LazyValue<T> {
+ position: NonZeroUsize,
+ _marker: PhantomData<fn() -> T>,
+}
+
+impl<T: ParameterizedOverTcx> ParameterizedOverTcx for LazyValue<T> {
+ type Value<'tcx> = LazyValue<T::Value<'tcx>>;
+}
+
+impl<T> LazyValue<T> {
+ fn from_position(position: NonZeroUsize) -> LazyValue<T> {
+ LazyValue { position, _marker: PhantomData }
+ }
+}
+
+/// A list of lazily-decoded values.
+///
+/// Unlike `LazyValue<Vec<T>>`, the length is encoded next to the
+/// position, not at the position, which means that the length
+/// doesn't need to be known before encoding all the elements.
+///
+/// If the length is 0, no position is encoded, but otherwise,
+/// the encoding is that of `LazyArray`, with the distinction that
+/// the minimal distance the length of the sequence, i.e.
+/// it's assumed there's no 0-byte element in the sequence.
+struct LazyArray<T> {
+ position: NonZeroUsize,
+ num_elems: usize,
+ _marker: PhantomData<fn() -> T>,
+}
+
+impl<T: ParameterizedOverTcx> ParameterizedOverTcx for LazyArray<T> {
+ type Value<'tcx> = LazyArray<T::Value<'tcx>>;
+}
+
+impl<T> LazyArray<T> {
+ fn from_position_and_num_elems(position: NonZeroUsize, num_elems: usize) -> LazyArray<T> {
+ LazyArray { position, num_elems, _marker: PhantomData }
+ }
+
+ fn empty() -> LazyArray<T> {
+ LazyArray::from_position_and_num_elems(NonZeroUsize::new(1).unwrap(), 0)
+ }
+}
+
+/// A list of lazily-decoded values, with the added capability of random access.
+///
+/// Random-access table (i.e. offering constant-time `get`/`set`), similar to
+/// `LazyArray<T>`, but without requiring encoding or decoding all the values
+/// eagerly and in-order.
+struct LazyTable<I, T> {
+ position: NonZeroUsize,
+ encoded_size: usize,
+ _marker: PhantomData<fn(I) -> T>,
+}
+
+impl<I: 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for LazyTable<I, T> {
+ type Value<'tcx> = LazyTable<I, T::Value<'tcx>>;
+}
+
+impl<I, T> LazyTable<I, T> {
+ fn from_position_and_encoded_size(
+ position: NonZeroUsize,
+ encoded_size: usize,
+ ) -> LazyTable<I, T> {
+ LazyTable { position, encoded_size, _marker: PhantomData }
+ }
+}
+
+impl<T> Copy for LazyValue<T> {}
+impl<T> Clone for LazyValue<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T> Copy for LazyArray<T> {}
+impl<T> Clone for LazyArray<T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<I, T> Copy for LazyTable<I, T> {}
+impl<I, T> Clone for LazyTable<I, T> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+/// Encoding / decoding state for `Lazy`s (`LazyValue`, `LazyArray`, and `LazyTable`).
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+enum LazyState {
+ /// Outside of a metadata node.
+ NoNode,
+
+ /// Inside a metadata node, and before any `Lazy`s.
+ /// The position is that of the node itself.
+ NodeStart(NonZeroUsize),
+
+ /// Inside a metadata node, with a previous `Lazy`s.
+ /// The position is where that previous `Lazy` would start.
+ Previous(NonZeroUsize),
+}
+
+type SyntaxContextTable = LazyTable<u32, LazyValue<SyntaxContextData>>;
+type ExpnDataTable = LazyTable<ExpnIndex, LazyValue<ExpnData>>;
+type ExpnHashTable = LazyTable<ExpnIndex, LazyValue<ExpnHash>>;
+
+#[derive(MetadataEncodable, MetadataDecodable)]
+pub(crate) struct ProcMacroData {
+ proc_macro_decls_static: DefIndex,
+ stability: Option<attr::Stability>,
+ macros: LazyArray<DefIndex>,
+}
+
+/// Serialized metadata for a crate.
+/// When compiling a proc-macro crate, we encode many of
+/// the `LazyArray<T>` fields as `Lazy::empty()`. This serves two purposes:
+///
+/// 1. We avoid performing unnecessary work. Proc-macro crates can only
+/// export proc-macros functions, which are compiled into a shared library.
+/// As a result, a large amount of the information we normally store
+/// (e.g. optimized MIR) is unneeded by downstream crates.
+/// 2. We avoid serializing invalid `CrateNum`s. When we deserialize
+/// a proc-macro crate, we don't load any of its dependencies (since we
+/// just need to invoke a native function from the shared library).
+/// This means that any foreign `CrateNum`s that we serialize cannot be
+/// deserialized, since we will not know how to map them into the current
+/// compilation session. If we were to serialize a proc-macro crate like
+/// a normal crate, much of what we serialized would be unusable in addition
+/// to being unused.
+#[derive(MetadataEncodable, MetadataDecodable)]
+pub(crate) struct CrateRoot {
+ name: Symbol,
+ triple: TargetTriple,
+ extra_filename: String,
+ hash: Svh,
+ stable_crate_id: StableCrateId,
+ required_panic_strategy: Option<PanicStrategy>,
+ panic_in_drop_strategy: PanicStrategy,
+ edition: Edition,
+ has_global_allocator: bool,
+ has_panic_handler: bool,
+ has_default_lib_allocator: bool,
+
+ crate_deps: LazyArray<CrateDep>,
+ dylib_dependency_formats: LazyArray<Option<LinkagePreference>>,
+ lib_features: LazyArray<(Symbol, Option<Symbol>)>,
+ stability_implications: LazyArray<(Symbol, Symbol)>,
+ lang_items: LazyArray<(DefIndex, usize)>,
+ lang_items_missing: LazyArray<lang_items::LangItem>,
+ diagnostic_items: LazyArray<(Symbol, DefIndex)>,
+ native_libraries: LazyArray<NativeLib>,
+ foreign_modules: LazyArray<ForeignModule>,
+ traits: LazyArray<DefIndex>,
+ impls: LazyArray<TraitImpls>,
+ incoherent_impls: LazyArray<IncoherentImpls>,
+ interpret_alloc_index: LazyArray<u32>,
+ proc_macro_data: Option<ProcMacroData>,
+
+ tables: LazyTables,
+ debugger_visualizers: LazyArray<rustc_span::DebuggerVisualizerFile>,
+
+ exported_symbols: LazyArray<(ExportedSymbol<'static>, SymbolExportInfo)>,
+
+ syntax_contexts: SyntaxContextTable,
+ expn_data: ExpnDataTable,
+ expn_hashes: ExpnHashTable,
+
+ def_path_hash_map: LazyValue<DefPathHashMapRef<'static>>,
+
+ source_map: LazyArray<rustc_span::SourceFile>,
+
+ compiler_builtins: bool,
+ needs_allocator: bool,
+ needs_panic_runtime: bool,
+ no_builtins: bool,
+ panic_runtime: bool,
+ profiler_runtime: bool,
+ symbol_mangling_version: SymbolManglingVersion,
+}
+
+/// On-disk representation of `DefId`.
+/// This creates a type-safe way to enforce that we remap the CrateNum between the on-disk
+/// representation and the compilation session.
+#[derive(Copy, Clone)]
+pub(crate) struct RawDefId {
+ krate: u32,
+ index: u32,
+}
+
+impl Into<RawDefId> for DefId {
+ fn into(self) -> RawDefId {
+ RawDefId { krate: self.krate.as_u32(), index: self.index.as_u32() }
+ }
+}
+
+impl RawDefId {
+ /// This exists so that `provide_one!` is happy
+ fn decode(self, meta: (CrateMetadataRef<'_>, TyCtxt<'_>)) -> DefId {
+ self.decode_from_cdata(meta.0)
+ }
+
+ fn decode_from_cdata(self, cdata: CrateMetadataRef<'_>) -> DefId {
+ let krate = CrateNum::from_u32(self.krate);
+ let krate = cdata.map_encoded_cnum_to_current(krate);
+ DefId { krate, index: DefIndex::from_u32(self.index) }
+ }
+}
+
+#[derive(Encodable, Decodable)]
+pub(crate) struct CrateDep {
+ pub name: Symbol,
+ pub hash: Svh,
+ pub host_hash: Option<Svh>,
+ pub kind: CrateDepKind,
+ pub extra_filename: String,
+}
+
+#[derive(MetadataEncodable, MetadataDecodable)]
+pub(crate) struct TraitImpls {
+ trait_id: (u32, DefIndex),
+ impls: LazyArray<(DefIndex, Option<SimplifiedType>)>,
+}
+
+#[derive(MetadataEncodable, MetadataDecodable)]
+pub(crate) struct IncoherentImpls {
+ self_ty: SimplifiedType,
+ impls: LazyArray<DefIndex>,
+}
+
+/// Define `LazyTables` and `TableBuilders` at the same time.
+macro_rules! define_tables {
+ ($($name:ident: Table<$IDX:ty, $T:ty>),+ $(,)?) => {
+ #[derive(MetadataEncodable, MetadataDecodable)]
+ pub(crate) struct LazyTables {
+ $($name: LazyTable<$IDX, $T>),+
+ }
+
+ #[derive(Default)]
+ struct TableBuilders {
+ $($name: TableBuilder<$IDX, $T>),+
+ }
+
+ impl TableBuilders {
+ fn encode(&self, buf: &mut FileEncoder) -> LazyTables {
+ LazyTables {
+ $($name: self.$name.encode(buf)),+
+ }
+ }
+ }
+ }
+}
+
+define_tables! {
+ kind: Table<DefIndex, LazyValue<EntryKind>>,
+ attributes: Table<DefIndex, LazyArray<ast::Attribute>>,
+ children: Table<DefIndex, LazyArray<DefIndex>>,
+
+ opt_def_kind: Table<DefIndex, DefKind>,
+ visibility: Table<DefIndex, LazyValue<ty::Visibility>>,
+ def_span: Table<DefIndex, LazyValue<Span>>,
+ def_ident_span: Table<DefIndex, LazyValue<Span>>,
+ lookup_stability: Table<DefIndex, LazyValue<attr::Stability>>,
+ lookup_const_stability: Table<DefIndex, LazyValue<attr::ConstStability>>,
+ lookup_deprecation_entry: Table<DefIndex, LazyValue<attr::Deprecation>>,
+ // As an optimization, a missing entry indicates an empty `&[]`.
+ explicit_item_bounds: Table<DefIndex, LazyArray<(ty::Predicate<'static>, Span)>>,
+ explicit_predicates_of: Table<DefIndex, LazyValue<ty::GenericPredicates<'static>>>,
+ generics_of: Table<DefIndex, LazyValue<ty::Generics>>,
+ // As an optimization, a missing entry indicates an empty `&[]`.
+ inferred_outlives_of: Table<DefIndex, LazyArray<(ty::Predicate<'static>, Span)>>,
+ super_predicates_of: Table<DefIndex, LazyValue<ty::GenericPredicates<'static>>>,
+ type_of: Table<DefIndex, LazyValue<Ty<'static>>>,
+ variances_of: Table<DefIndex, LazyArray<ty::Variance>>,
+ fn_sig: Table<DefIndex, LazyValue<ty::PolyFnSig<'static>>>,
+ codegen_fn_attrs: Table<DefIndex, LazyValue<CodegenFnAttrs>>,
+ impl_trait_ref: Table<DefIndex, LazyValue<ty::TraitRef<'static>>>,
+ const_param_default: Table<DefIndex, LazyValue<rustc_middle::ty::Const<'static>>>,
+ optimized_mir: Table<DefIndex, LazyValue<mir::Body<'static>>>,
+ mir_for_ctfe: Table<DefIndex, LazyValue<mir::Body<'static>>>,
+ promoted_mir: Table<DefIndex, LazyValue<IndexVec<mir::Promoted, mir::Body<'static>>>>,
+ // FIXME(compiler-errors): Why isn't this a LazyArray?
+ thir_abstract_const: Table<DefIndex, LazyValue<&'static [ty::abstract_const::Node<'static>]>>,
+ impl_parent: Table<DefIndex, RawDefId>,
+ impl_polarity: Table<DefIndex, ty::ImplPolarity>,
+ constness: Table<DefIndex, hir::Constness>,
+ is_intrinsic: Table<DefIndex, ()>,
+ impl_defaultness: Table<DefIndex, hir::Defaultness>,
+ // FIXME(eddyb) perhaps compute this on the fly if cheap enough?
+ coerce_unsized_info: Table<DefIndex, LazyValue<ty::adjustment::CoerceUnsizedInfo>>,
+ mir_const_qualif: Table<DefIndex, LazyValue<mir::ConstQualifs>>,
+ rendered_const: Table<DefIndex, LazyValue<String>>,
+ asyncness: Table<DefIndex, hir::IsAsync>,
+ fn_arg_names: Table<DefIndex, LazyArray<Ident>>,
+ generator_kind: Table<DefIndex, LazyValue<hir::GeneratorKind>>,
+ trait_def: Table<DefIndex, LazyValue<ty::TraitDef>>,
+
+ trait_item_def_id: Table<DefIndex, RawDefId>,
+ inherent_impls: Table<DefIndex, LazyArray<DefIndex>>,
+ expn_that_defined: Table<DefIndex, LazyValue<ExpnId>>,
+ unused_generic_params: Table<DefIndex, LazyValue<FiniteBitSet<u32>>>,
+ repr_options: Table<DefIndex, LazyValue<ReprOptions>>,
+ // `def_keys` and `def_path_hashes` represent a lazy version of a
+ // `DefPathTable`. This allows us to avoid deserializing an entire
+ // `DefPathTable` up front, since we may only ever use a few
+ // definitions from any given crate.
+ def_keys: Table<DefIndex, LazyValue<DefKey>>,
+ def_path_hashes: Table<DefIndex, DefPathHash>,
+ proc_macro_quoted_spans: Table<usize, LazyValue<Span>>,
+ generator_diagnostic_data: Table<DefIndex, LazyValue<GeneratorDiagnosticData<'static>>>,
+ may_have_doc_links: Table<DefIndex, ()>,
+}
+
+#[derive(Copy, Clone, MetadataEncodable, MetadataDecodable)]
+enum EntryKind {
+ AnonConst,
+ Const,
+ Static,
+ ForeignStatic,
+ ForeignMod,
+ ForeignType,
+ GlobalAsm,
+ Type,
+ TypeParam,
+ ConstParam,
+ OpaqueTy,
+ Enum,
+ Field,
+ Variant(LazyValue<VariantData>),
+ Struct(LazyValue<VariantData>),
+ Union(LazyValue<VariantData>),
+ Fn,
+ ForeignFn,
+ Mod(LazyArray<ModChild>),
+ MacroDef(LazyValue<ast::MacArgs>, /*macro_rules*/ bool),
+ ProcMacro(MacroKind),
+ Closure,
+ Generator,
+ Trait,
+ Impl,
+ AssocFn { container: ty::AssocItemContainer, has_self: bool },
+ AssocType(ty::AssocItemContainer),
+ AssocConst(ty::AssocItemContainer),
+ TraitAlias,
+}
+
+#[derive(TyEncodable, TyDecodable)]
+struct VariantData {
+ ctor_kind: CtorKind,
+ discr: ty::VariantDiscr,
+ /// If this is unit or tuple-variant/struct, then this is the index of the ctor id.
+ ctor: Option<DefIndex>,
+ is_non_exhaustive: bool,
+}
+
+#[derive(TyEncodable, TyDecodable)]
+struct GeneratorData<'tcx> {
+ layout: mir::GeneratorLayout<'tcx>,
+}
+
+// Tags used for encoding Spans:
+const TAG_VALID_SPAN_LOCAL: u8 = 0;
+const TAG_VALID_SPAN_FOREIGN: u8 = 1;
+const TAG_PARTIAL_SPAN: u8 = 2;
+
+pub fn provide(providers: &mut Providers) {
+ encoder::provide(providers);
+ decoder::provide(providers);
+}
+
+trivially_parameterized_over_tcx! {
+ VariantData,
+ EntryKind,
+ RawDefId,
+ TraitImpls,
+ IncoherentImpls,
+ CrateRoot,
+ CrateDep,
+}
diff --git a/compiler/rustc_metadata/src/rmeta/table.rs b/compiler/rustc_metadata/src/rmeta/table.rs
new file mode 100644
index 000000000..21841ae25
--- /dev/null
+++ b/compiler/rustc_metadata/src/rmeta/table.rs
@@ -0,0 +1,330 @@
+use crate::rmeta::*;
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_hir::def::{CtorKind, CtorOf};
+use rustc_index::vec::Idx;
+use rustc_middle::ty::ParameterizedOverTcx;
+use rustc_serialize::opaque::FileEncoder;
+use rustc_serialize::Encoder as _;
+use rustc_span::hygiene::MacroKind;
+use std::convert::TryInto;
+use std::marker::PhantomData;
+use std::num::NonZeroUsize;
+use tracing::debug;
+
+/// Helper trait, for encoding to, and decoding from, a fixed number of bytes.
+/// Used mainly for Lazy positions and lengths.
+/// Unchecked invariant: `Self::default()` should encode as `[0; BYTE_LEN]`,
+/// but this has no impact on safety.
+pub(super) trait FixedSizeEncoding: Default {
+ /// This should be `[u8; BYTE_LEN]`;
+ type ByteArray;
+
+ fn from_bytes(b: &Self::ByteArray) -> Self;
+ fn write_to_bytes(self, b: &mut Self::ByteArray);
+}
+
+impl FixedSizeEncoding for u32 {
+ type ByteArray = [u8; 4];
+
+ #[inline]
+ fn from_bytes(b: &[u8; 4]) -> Self {
+ Self::from_le_bytes(*b)
+ }
+
+ #[inline]
+ fn write_to_bytes(self, b: &mut [u8; 4]) {
+ *b = self.to_le_bytes();
+ }
+}
+
+macro_rules! fixed_size_enum {
+ ($ty:ty { $(($($pat:tt)*))* }) => {
+ impl FixedSizeEncoding for Option<$ty> {
+ type ByteArray = [u8;1];
+
+ #[inline]
+ fn from_bytes(b: &[u8;1]) -> Self {
+ use $ty::*;
+ if b[0] == 0 {
+ return None;
+ }
+ match b[0] - 1 {
+ $(${index()} => Some($($pat)*),)*
+ _ => panic!("Unexpected ImplPolarity code: {:?}", b[0]),
+ }
+ }
+
+ #[inline]
+ fn write_to_bytes(self, b: &mut [u8;1]) {
+ use $ty::*;
+ b[0] = match self {
+ None => 0,
+ $(Some($($pat)*) => 1 + ${index()},)*
+ }
+ }
+ }
+ }
+}
+
+fixed_size_enum! {
+ DefKind {
+ ( Mod )
+ ( Struct )
+ ( Union )
+ ( Enum )
+ ( Variant )
+ ( Trait )
+ ( TyAlias )
+ ( ForeignTy )
+ ( TraitAlias )
+ ( AssocTy )
+ ( TyParam )
+ ( Fn )
+ ( Const )
+ ( ConstParam )
+ ( AssocFn )
+ ( AssocConst )
+ ( ExternCrate )
+ ( Use )
+ ( ForeignMod )
+ ( AnonConst )
+ ( InlineConst )
+ ( OpaqueTy )
+ ( Field )
+ ( LifetimeParam )
+ ( GlobalAsm )
+ ( Impl )
+ ( Closure )
+ ( Generator )
+ ( Static(ast::Mutability::Not) )
+ ( Static(ast::Mutability::Mut) )
+ ( Ctor(CtorOf::Struct, CtorKind::Fn) )
+ ( Ctor(CtorOf::Struct, CtorKind::Const) )
+ ( Ctor(CtorOf::Struct, CtorKind::Fictive) )
+ ( Ctor(CtorOf::Variant, CtorKind::Fn) )
+ ( Ctor(CtorOf::Variant, CtorKind::Const) )
+ ( Ctor(CtorOf::Variant, CtorKind::Fictive) )
+ ( Macro(MacroKind::Bang) )
+ ( Macro(MacroKind::Attr) )
+ ( Macro(MacroKind::Derive) )
+ }
+}
+
+fixed_size_enum! {
+ ty::ImplPolarity {
+ ( Positive )
+ ( Negative )
+ ( Reservation )
+ }
+}
+
+fixed_size_enum! {
+ hir::Constness {
+ ( NotConst )
+ ( Const )
+ }
+}
+
+fixed_size_enum! {
+ hir::Defaultness {
+ ( Final )
+ ( Default { has_value: false } )
+ ( Default { has_value: true } )
+ }
+}
+
+fixed_size_enum! {
+ hir::IsAsync {
+ ( NotAsync )
+ ( Async )
+ }
+}
+
+// We directly encode `DefPathHash` because a `LazyValue` would incur a 25% cost.
+impl FixedSizeEncoding for Option<DefPathHash> {
+ type ByteArray = [u8; 16];
+
+ #[inline]
+ fn from_bytes(b: &[u8; 16]) -> Self {
+ Some(DefPathHash(Fingerprint::from_le_bytes(*b)))
+ }
+
+ #[inline]
+ fn write_to_bytes(self, b: &mut [u8; 16]) {
+ let Some(DefPathHash(fingerprint)) = self else {
+ panic!("Trying to encode absent DefPathHash.")
+ };
+ *b = fingerprint.to_le_bytes();
+ }
+}
+
+// We directly encode RawDefId because using a `LazyValue` would incur a 50% overhead in the worst case.
+impl FixedSizeEncoding for Option<RawDefId> {
+ type ByteArray = [u8; 8];
+
+ #[inline]
+ fn from_bytes(b: &[u8; 8]) -> Self {
+ let krate = u32::from_le_bytes(b[0..4].try_into().unwrap());
+ let index = u32::from_le_bytes(b[4..8].try_into().unwrap());
+ if krate == 0 {
+ return None;
+ }
+ Some(RawDefId { krate: krate - 1, index })
+ }
+
+ #[inline]
+ fn write_to_bytes(self, b: &mut [u8; 8]) {
+ match self {
+ None => *b = [0; 8],
+ Some(RawDefId { krate, index }) => {
+ // CrateNum is less than `CrateNum::MAX_AS_U32`.
+ debug_assert!(krate < u32::MAX);
+ b[0..4].copy_from_slice(&(1 + krate).to_le_bytes());
+ b[4..8].copy_from_slice(&index.to_le_bytes());
+ }
+ }
+ }
+}
+
+impl FixedSizeEncoding for Option<()> {
+ type ByteArray = [u8; 1];
+
+ #[inline]
+ fn from_bytes(b: &[u8; 1]) -> Self {
+ (b[0] != 0).then(|| ())
+ }
+
+ #[inline]
+ fn write_to_bytes(self, b: &mut [u8; 1]) {
+ b[0] = self.is_some() as u8
+ }
+}
+
+// NOTE(eddyb) there could be an impl for `usize`, which would enable a more
+// generic `LazyValue<T>` impl, but in the general case we might not need / want
+// to fit every `usize` in `u32`.
+impl<T> FixedSizeEncoding for Option<LazyValue<T>> {
+ type ByteArray = [u8; 4];
+
+ #[inline]
+ fn from_bytes(b: &[u8; 4]) -> Self {
+ let position = NonZeroUsize::new(u32::from_bytes(b) as usize)?;
+ Some(LazyValue::from_position(position))
+ }
+
+ #[inline]
+ fn write_to_bytes(self, b: &mut [u8; 4]) {
+ let position = self.map_or(0, |lazy| lazy.position.get());
+ let position: u32 = position.try_into().unwrap();
+ position.write_to_bytes(b)
+ }
+}
+
+impl<T> FixedSizeEncoding for Option<LazyArray<T>> {
+ type ByteArray = [u8; 8];
+
+ #[inline]
+ fn from_bytes(b: &[u8; 8]) -> Self {
+ let ([ref position_bytes, ref meta_bytes],[])= b.as_chunks::<4>() else { panic!() };
+ let position = NonZeroUsize::new(u32::from_bytes(position_bytes) as usize)?;
+ let len = u32::from_bytes(meta_bytes) as usize;
+ Some(LazyArray::from_position_and_num_elems(position, len))
+ }
+
+ #[inline]
+ fn write_to_bytes(self, b: &mut [u8; 8]) {
+ let ([ref mut position_bytes, ref mut meta_bytes],[])= b.as_chunks_mut::<4>() else { panic!() };
+
+ let position = self.map_or(0, |lazy| lazy.position.get());
+ let position: u32 = position.try_into().unwrap();
+ position.write_to_bytes(position_bytes);
+
+ let len = self.map_or(0, |lazy| lazy.num_elems);
+ let len: u32 = len.try_into().unwrap();
+ len.write_to_bytes(meta_bytes);
+ }
+}
+
+/// Helper for constructing a table's serialization (also see `Table`).
+pub(super) struct TableBuilder<I: Idx, T>
+where
+ Option<T>: FixedSizeEncoding,
+{
+ blocks: IndexVec<I, <Option<T> as FixedSizeEncoding>::ByteArray>,
+ _marker: PhantomData<T>,
+}
+
+impl<I: Idx, T> Default for TableBuilder<I, T>
+where
+ Option<T>: FixedSizeEncoding,
+{
+ fn default() -> Self {
+ TableBuilder { blocks: Default::default(), _marker: PhantomData }
+ }
+}
+
+impl<I: Idx, T> TableBuilder<I, T>
+where
+ Option<T>: FixedSizeEncoding,
+{
+ pub(crate) fn set<const N: usize>(&mut self, i: I, value: T)
+ where
+ Option<T>: FixedSizeEncoding<ByteArray = [u8; N]>,
+ {
+ // FIXME(eddyb) investigate more compact encodings for sparse tables.
+ // On the PR @michaelwoerister mentioned:
+ // > Space requirements could perhaps be optimized by using the HAMT `popcnt`
+ // > trick (i.e. divide things into buckets of 32 or 64 items and then
+ // > store bit-masks of which item in each bucket is actually serialized).
+ self.blocks.ensure_contains_elem(i, || [0; N]);
+ Some(value).write_to_bytes(&mut self.blocks[i]);
+ }
+
+ pub(crate) fn encode<const N: usize>(&self, buf: &mut FileEncoder) -> LazyTable<I, T>
+ where
+ Option<T>: FixedSizeEncoding<ByteArray = [u8; N]>,
+ {
+ let pos = buf.position();
+ for block in &self.blocks {
+ buf.emit_raw_bytes(block);
+ }
+ let num_bytes = self.blocks.len() * N;
+ LazyTable::from_position_and_encoded_size(
+ NonZeroUsize::new(pos as usize).unwrap(),
+ num_bytes,
+ )
+ }
+}
+
+impl<I: Idx, T: ParameterizedOverTcx> LazyTable<I, T>
+where
+ Option<T>: FixedSizeEncoding,
+{
+ /// Given the metadata, extract out the value at a particular index (if any).
+ #[inline(never)]
+ pub(super) fn get<'a, 'tcx, M: Metadata<'a, 'tcx>, const N: usize>(
+ &self,
+ metadata: M,
+ i: I,
+ ) -> Option<T::Value<'tcx>>
+ where
+ Option<T::Value<'tcx>>: FixedSizeEncoding<ByteArray = [u8; N]>,
+ {
+ debug!("LazyTable::lookup: index={:?} len={:?}", i, self.encoded_size);
+
+ let start = self.position.get();
+ let bytes = &metadata.blob()[start..start + self.encoded_size];
+ let (bytes, []) = bytes.as_chunks::<N>() else { panic!() };
+ let bytes = bytes.get(i.index())?;
+ FixedSizeEncoding::from_bytes(bytes)
+ }
+
+ /// Size of the table in entries, including possible gaps.
+ pub(super) fn size<const N: usize>(&self) -> usize
+ where
+ for<'tcx> Option<T::Value<'tcx>>: FixedSizeEncoding<ByteArray = [u8; N]>,
+ {
+ self.encoded_size / N
+ }
+}
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml
new file mode 100644
index 000000000..008d2c709
--- /dev/null
+++ b/compiler/rustc_middle/Cargo.toml
@@ -0,0 +1,40 @@
+[package]
+name = "rustc_middle"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_arena = { path = "../rustc_arena" }
+bitflags = "1.2.1"
+either = "1.5.0"
+gsgdt = "0.1.2"
+tracing = "0.1"
+rustc-rayon = { version = "0.4.0", optional = true }
+rustc-rayon-core = { version = "0.4.0", optional = true }
+polonius-engine = "0.13.0"
+rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_target = { path = "../rustc_target" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_index = { path = "../rustc_index" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+chalk-ir = "0.80.0"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_session = { path = "../rustc_session" }
+rustc_type_ir = { path = "../rustc_type_ir" }
+rand = "0.8.4"
+rand_xoshiro = "0.6.0"
+
+[features]
+rustc_use_parallel_compiler = ["rustc-rayon", "rustc-rayon-core"]
diff --git a/compiler/rustc_middle/README.md b/compiler/rustc_middle/README.md
new file mode 100644
index 000000000..de58f546c
--- /dev/null
+++ b/compiler/rustc_middle/README.md
@@ -0,0 +1,3 @@
+For more information about how rustc works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/
diff --git a/compiler/rustc_middle/benches/lib.rs b/compiler/rustc_middle/benches/lib.rs
new file mode 100644
index 000000000..237751bcb
--- /dev/null
+++ b/compiler/rustc_middle/benches/lib.rs
@@ -0,0 +1,54 @@
+#![feature(test)]
+
+extern crate test;
+
+use test::Bencher;
+
+// Static/dynamic method dispatch
+
+struct Struct {
+ field: isize,
+}
+
+trait Trait {
+ fn method(&self) -> isize;
+}
+
+impl Trait for Struct {
+ fn method(&self) -> isize {
+ self.field
+ }
+}
+
+#[bench]
+fn trait_vtable_method_call(b: &mut Bencher) {
+ let s = Struct { field: 10 };
+ let t = &s as &dyn Trait;
+ b.iter(|| t.method());
+}
+
+#[bench]
+fn trait_static_method_call(b: &mut Bencher) {
+ let s = Struct { field: 10 };
+ b.iter(|| s.method());
+}
+
+// Overhead of various match forms
+
+#[bench]
+fn option_some(b: &mut Bencher) {
+ let x = Some(10);
+ b.iter(|| match x {
+ Some(y) => y,
+ None => 11,
+ });
+}
+
+#[bench]
+fn vec_pattern(b: &mut Bencher) {
+ let x = [1, 2, 3, 4, 5, 6];
+ b.iter(|| match x {
+ [1, 2, 3, ..] => 10,
+ _ => 11,
+ });
+}
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
new file mode 100644
index 000000000..b94de537d
--- /dev/null
+++ b/compiler/rustc_middle/src/arena.rs
@@ -0,0 +1,108 @@
+/// This higher-order macro declares a list of types which can be allocated by `Arena`.
+///
+/// Specifying the `decode` modifier will add decode impls for `&T` and `&[T]` where `T` is the type
+/// listed. These impls will appear in the implement_ty_decoder! macro.
+#[macro_export]
+macro_rules! arena_types {
+ ($macro:path) => (
+ $macro!([
+ [] layout: rustc_target::abi::LayoutS<'tcx>,
+ [] fn_abi: rustc_target::abi::call::FnAbi<'tcx, rustc_middle::ty::Ty<'tcx>>,
+ // AdtDef are interned and compared by address
+ [decode] adt_def: rustc_middle::ty::AdtDefData,
+ [] steal_thir: rustc_data_structures::steal::Steal<rustc_middle::thir::Thir<'tcx>>,
+ [] steal_mir: rustc_data_structures::steal::Steal<rustc_middle::mir::Body<'tcx>>,
+ [decode] mir: rustc_middle::mir::Body<'tcx>,
+ [] steal_promoted:
+ rustc_data_structures::steal::Steal<
+ rustc_index::vec::IndexVec<
+ rustc_middle::mir::Promoted,
+ rustc_middle::mir::Body<'tcx>
+ >
+ >,
+ [decode] promoted:
+ rustc_index::vec::IndexVec<
+ rustc_middle::mir::Promoted,
+ rustc_middle::mir::Body<'tcx>
+ >,
+ [decode] typeck_results: rustc_middle::ty::TypeckResults<'tcx>,
+ [decode] borrowck_result:
+ rustc_middle::mir::BorrowCheckResult<'tcx>,
+ [decode] unsafety_check_result: rustc_middle::mir::UnsafetyCheckResult,
+ [decode] code_region: rustc_middle::mir::coverage::CodeRegion,
+ [] const_allocs: rustc_middle::mir::interpret::Allocation,
+ [] region_scope_tree: rustc_middle::middle::region::ScopeTree,
+ // Required for the incremental on-disk cache
+ [] mir_keys: rustc_hir::def_id::DefIdSet,
+ [] dropck_outlives:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx,
+ rustc_middle::traits::query::DropckOutlivesResult<'tcx>
+ >
+ >,
+ [] normalize_projection_ty:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx,
+ rustc_middle::traits::query::NormalizationResult<'tcx>
+ >
+ >,
+ [] implied_outlives_bounds:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx,
+ Vec<rustc_middle::traits::query::OutlivesBound<'tcx>>
+ >
+ >,
+ [] dtorck_constraint: rustc_middle::traits::query::DropckConstraint<'tcx>,
+ [] candidate_step: rustc_middle::traits::query::CandidateStep<'tcx>,
+ [] autoderef_bad_ty: rustc_middle::traits::query::MethodAutoderefBadTy<'tcx>,
+ [] query_region_constraints: rustc_middle::infer::canonical::QueryRegionConstraints<'tcx>,
+ [] type_op_subtype:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, ()>
+ >,
+ [] type_op_normalize_poly_fn_sig:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::PolyFnSig<'tcx>>
+ >,
+ [] type_op_normalize_fn_sig:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::FnSig<'tcx>>
+ >,
+ [] type_op_normalize_predicate:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Predicate<'tcx>>
+ >,
+ [] type_op_normalize_ty:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Ty<'tcx>>
+ >,
+ [] all_traits: Vec<rustc_hir::def_id::DefId>,
+ [] privacy_access_levels: rustc_middle::middle::privacy::AccessLevels,
+ [] foreign_module: rustc_session::cstore::ForeignModule,
+ [] foreign_modules: Vec<rustc_session::cstore::ForeignModule>,
+ [] upvars_mentioned: rustc_data_structures::fx::FxIndexMap<rustc_hir::HirId, rustc_hir::Upvar>,
+ [] object_safety_violations: rustc_middle::traits::ObjectSafetyViolation,
+ [] codegen_unit: rustc_middle::mir::mono::CodegenUnit<'tcx>,
+ [decode] attribute: rustc_ast::Attribute,
+ [] name_set: rustc_data_structures::fx::FxHashSet<rustc_span::symbol::Symbol>,
+ [] hir_id_set: rustc_hir::HirIdSet,
+
+ // Interned types
+ [] tys: rustc_data_structures::intern::WithStableHash<rustc_middle::ty::TyS<'tcx>>,
+ [] predicates: rustc_middle::ty::PredicateS<'tcx>,
+ [] consts: rustc_middle::ty::ConstS<'tcx>,
+
+ // Note that this deliberately duplicates items in the `rustc_hir::arena`,
+ // since we need to allocate this type on both the `rustc_hir` arena
+ // (during lowering) and the `librustc_middle` arena (for decoding MIR)
+ [decode] asm_template: rustc_ast::InlineAsmTemplatePiece,
+ [decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>,
+ [decode] is_late_bound_map: rustc_data_structures::fx::FxIndexSet<rustc_hir::def_id::LocalDefId>,
+ [decode] impl_source: rustc_middle::traits::ImplSource<'tcx, ()>,
+
+ [] dep_kind: rustc_middle::dep_graph::DepKindStruct,
+ ]);
+ )
+}
+
+arena_types!(rustc_arena::declare_arena);
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
new file mode 100644
index 000000000..2d095438f
--- /dev/null
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -0,0 +1,435 @@
+//! Nodes in the dependency graph.
+//!
+//! A node in the [dependency graph] is represented by a [`DepNode`].
+//! A `DepNode` consists of a [`DepKind`] (which
+//! specifies the kind of thing it represents, like a piece of HIR, MIR, etc.)
+//! and a [`Fingerprint`], a 128-bit hash value, the exact meaning of which
+//! depends on the node's `DepKind`. Together, the kind and the fingerprint
+//! fully identify a dependency node, even across multiple compilation sessions.
+//! In other words, the value of the fingerprint does not depend on anything
+//! that is specific to a given compilation session, like an unpredictable
+//! interning key (e.g., `NodeId`, `DefId`, `Symbol`) or the numeric value of a
+//! pointer. The concept behind this could be compared to how git commit hashes
+//! uniquely identify a given commit. The fingerprinting approach has
+//! a few advantages:
+//!
+//! * A `DepNode` can simply be serialized to disk and loaded in another session
+//! without the need to do any "rebasing" (like we have to do for Spans and
+//! NodeIds) or "retracing" (like we had to do for `DefId` in earlier
+//! implementations of the dependency graph).
+//! * A `Fingerprint` is just a bunch of bits, which allows `DepNode` to
+//! implement `Copy`, `Sync`, `Send`, `Freeze`, etc.
+//! * Since we just have a bit pattern, `DepNode` can be mapped from disk into
+//! memory without any post-processing (e.g., "abomination-style" pointer
+//! reconstruction).
+//! * Because a `DepNode` is self-contained, we can instantiate `DepNodes` that
+//! refer to things that do not exist anymore. In previous implementations
+//! `DepNode` contained a `DefId`. A `DepNode` referring to something that
+//! had been removed between the previous and the current compilation session
+//! could not be instantiated because the current compilation session
+//! contained no `DefId` for thing that had been removed.
+//!
+//! `DepNode` definition happens in the `define_dep_nodes!()` macro. This macro
+//! defines the `DepKind` enum. Each `DepKind` has its own parameters that are
+//! needed at runtime in order to construct a valid `DepNode` fingerprint.
+//! However, only `CompileCodegenUnit` and `CompileMonoItem` are constructed
+//! explicitly (with `make_compile_codegen_unit` cq `make_compile_mono_item`).
+//!
+//! Because the macro sees what parameters a given `DepKind` requires, it can
+//! "infer" some properties for each kind of `DepNode`:
+//!
+//! * Whether a `DepNode` of a given kind has any parameters at all. Some
+//! `DepNode`s could represent global concepts with only one value.
+//! * Whether it is possible, in principle, to reconstruct a query key from a
+//! given `DepNode`. Many `DepKind`s only require a single `DefId` parameter,
+//! in which case it is possible to map the node's fingerprint back to the
+//! `DefId` it was computed from. In other cases, too much information gets
+//! lost during fingerprint computation.
+//!
+//! `make_compile_codegen_unit` and `make_compile_mono_items`, together with
+//! `DepNode::new()`, ensures that only valid `DepNode` instances can be
+//! constructed. For example, the API does not allow for constructing
+//! parameterless `DepNode`s with anything other than a zeroed out fingerprint.
+//! More generally speaking, it relieves the user of the `DepNode` API of
+//! having to know how to compute the expected fingerprint for a given set of
+//! node parameters.
+//!
+//! [dependency graph]: https://rustc-dev-guide.rust-lang.org/query.html
+
+use crate::mir::mono::MonoItem;
+use crate::ty::TyCtxt;
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
+use rustc_hir::definitions::DefPathHash;
+use rustc_hir::HirId;
+use rustc_query_system::dep_graph::FingerprintStyle;
+use rustc_span::symbol::Symbol;
+use std::hash::Hash;
+
+pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
+
+/// This struct stores metadata about each DepKind.
+///
+/// Information is retrieved by indexing the `DEP_KINDS` array using the integer value
+/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
+/// jump table instead of large matches.
+pub struct DepKindStruct {
+ /// Anonymous queries cannot be replayed from one compiler invocation to the next.
+ /// When their result is needed, it is recomputed. They are useful for fine-grained
+ /// dependency tracking, and caching within one compiler invocation.
+ pub is_anon: bool,
+
+ /// Eval-always queries do not track their dependencies, and are always recomputed, even if
+ /// their inputs have not changed since the last compiler invocation. The result is still
+ /// cached within one compiler invocation.
+ pub is_eval_always: bool,
+
+ /// Whether the query key can be recovered from the hashed fingerprint.
+ /// See [DepNodeParams] trait for the behaviour of each key type.
+ pub fingerprint_style: FingerprintStyle,
+
+ /// The red/green evaluation system will try to mark a specific DepNode in the
+ /// dependency graph as green by recursively trying to mark the dependencies of
+ /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
+ /// where we don't know if it is red or green and we therefore actually have
+ /// to recompute its value in order to find out. Since the only piece of
+ /// information that we have at that point is the `DepNode` we are trying to
+ /// re-evaluate, we need some way to re-run a query from just that. This is what
+ /// `force_from_dep_node()` implements.
+ ///
+ /// In the general case, a `DepNode` consists of a `DepKind` and an opaque
+ /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
+ /// is usually constructed by computing a stable hash of the query-key that the
+ /// `DepNode` corresponds to. Consequently, it is not in general possible to go
+ /// back from hash to query-key (since hash functions are not reversible). For
+ /// this reason `force_from_dep_node()` is expected to fail from time to time
+ /// because we just cannot find out, from the `DepNode` alone, what the
+ /// corresponding query-key is and therefore cannot re-run the query.
+ ///
+ /// The system deals with this case letting `try_mark_green` fail which forces
+ /// the root query to be re-evaluated.
+ ///
+ /// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
+ /// Fortunately, we can use some contextual information that will allow us to
+ /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
+ /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
+ /// valid `DefPathHash`. Since we also always build a huge table that maps every
+ /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
+ /// everything we need to re-run the query.
+ ///
+ /// Take the `mir_promoted` query as an example. Like many other queries, it
+ /// just has a single parameter: the `DefId` of the item it will compute the
+ /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
+ /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
+ /// is actually a `DefPathHash`, and can therefore just look up the corresponding
+ /// `DefId` in `tcx.def_path_hash_to_def_id`.
+ pub force_from_dep_node: Option<fn(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool>,
+
+ /// Invoke a query to put the on-disk cached value in memory.
+ pub try_load_from_on_disk_cache: Option<fn(TyCtxt<'_>, DepNode)>,
+}
+
+impl DepKind {
+ #[inline(always)]
+ pub fn fingerprint_style(self, tcx: TyCtxt<'_>) -> FingerprintStyle {
+ // Only fetch the DepKindStruct once.
+ let data = tcx.query_kind(self);
+ if data.is_anon {
+ return FingerprintStyle::Opaque;
+ }
+ data.fingerprint_style
+ }
+}
+
+macro_rules! define_dep_nodes {
+ (<$tcx:tt>
+ $(
+ [$($attrs:tt)*]
+ $variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
+ ,)*
+ ) => (
+ #[macro_export]
+ macro_rules! make_dep_kind_array {
+ ($mod:ident) => {[ $($mod::$variant()),* ]};
+ }
+
+ /// This enum serves as an index into arrays built by `make_dep_kind_array`.
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
+ #[allow(non_camel_case_types)]
+ pub enum DepKind {
+ $($variant),*
+ }
+
+ fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
+ match label {
+ $(stringify!($variant) => Ok(DepKind::$variant),)*
+ _ => Err(()),
+ }
+ }
+
+ /// Contains variant => str representations for constructing
+ /// DepNode groups for tests.
+ #[allow(dead_code, non_upper_case_globals)]
+ pub mod label_strs {
+ $(
+ pub const $variant: &str = stringify!($variant);
+ )*
+ }
+ );
+}
+
+rustc_dep_node_append!([define_dep_nodes!][ <'tcx>
+ // We use this for most things when incr. comp. is turned off.
+ [] Null,
+
+ // We use this to create a forever-red node.
+ [] Red,
+
+ [anon] TraitSelect,
+
+ // WARNING: if `Symbol` is changed, make sure you update `make_compile_codegen_unit` below.
+ [] CompileCodegenUnit(Symbol),
+
+ // WARNING: if `MonoItem` is changed, make sure you update `make_compile_mono_item` below.
+ // Only used by rustc_codegen_cranelift
+ [] CompileMonoItem(MonoItem),
+]);
+
+// WARNING: `construct` is generic and does not know that `CompileCodegenUnit` takes `Symbol`s as keys.
+// Be very careful changing this type signature!
+pub(crate) fn make_compile_codegen_unit(tcx: TyCtxt<'_>, name: Symbol) -> DepNode {
+ DepNode::construct(tcx, DepKind::CompileCodegenUnit, &name)
+}
+
+// WARNING: `construct` is generic and does not know that `CompileMonoItem` takes `MonoItem`s as keys.
+// Be very careful changing this type signature!
+pub(crate) fn make_compile_mono_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mono_item: &MonoItem<'tcx>,
+) -> DepNode {
+ DepNode::construct(tcx, DepKind::CompileMonoItem, mono_item)
+}
+
+pub type DepNode = rustc_query_system::dep_graph::DepNode<DepKind>;
+
+// We keep a lot of `DepNode`s in memory during compilation. It's not
+// required that their size stay the same, but we don't want to change
+// it inadvertently. This assert just ensures we're aware of any change.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+static_assert_size!(DepNode, 18);
+
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+static_assert_size!(DepNode, 24);
+
+pub trait DepNodeExt: Sized {
+ /// Construct a DepNode from the given DepKind and DefPathHash. This
+ /// method will assert that the given DepKind actually requires a
+ /// single DefId/DefPathHash parameter.
+ fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> Self;
+
+ /// Extracts the DefId corresponding to this DepNode. This will work
+ /// if two conditions are met:
+ ///
+ /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
+ /// 2. the item that the DefPath refers to exists in the current tcx.
+ ///
+ /// Condition (1) is determined by the DepKind variant of the
+ /// DepNode. Condition (2) might not be fulfilled if a DepNode
+ /// refers to something from the previous compilation session that
+ /// has been removed.
+ fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId>;
+
+ /// Used in testing
+ fn from_label_string(
+ tcx: TyCtxt<'_>,
+ label: &str,
+ def_path_hash: DefPathHash,
+ ) -> Result<Self, ()>;
+
+ /// Used in testing
+ fn has_label_string(label: &str) -> bool;
+}
+
+impl DepNodeExt for DepNode {
+ /// Construct a DepNode from the given DepKind and DefPathHash. This
+ /// method will assert that the given DepKind actually requires a
+ /// single DefId/DefPathHash parameter.
+ fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
+ debug_assert!(kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash);
+ DepNode { kind, hash: def_path_hash.0.into() }
+ }
+
+ /// Extracts the DefId corresponding to this DepNode. This will work
+ /// if two conditions are met:
+ ///
+ /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
+ /// 2. the item that the DefPath refers to exists in the current tcx.
+ ///
+ /// Condition (1) is determined by the DepKind variant of the
+ /// DepNode. Condition (2) might not be fulfilled if a DepNode
+ /// refers to something from the previous compilation session that
+ /// has been removed.
+ fn extract_def_id<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
+ if self.kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash {
+ Some(tcx.def_path_hash_to_def_id(DefPathHash(self.hash.into()), &mut || {
+ panic!("Failed to extract DefId: {:?} {}", self.kind, self.hash)
+ }))
+ } else {
+ None
+ }
+ }
+
+ /// Used in testing
+ fn from_label_string(
+ tcx: TyCtxt<'_>,
+ label: &str,
+ def_path_hash: DefPathHash,
+ ) -> Result<DepNode, ()> {
+ let kind = dep_kind_from_label_string(label)?;
+
+ match kind.fingerprint_style(tcx) {
+ FingerprintStyle::Opaque => Err(()),
+ FingerprintStyle::Unit => Ok(DepNode::new_no_params(tcx, kind)),
+ FingerprintStyle::DefPathHash => {
+ Ok(DepNode::from_def_path_hash(tcx, def_path_hash, kind))
+ }
+ }
+ }
+
+ /// Used in testing
+ fn has_label_string(label: &str) -> bool {
+ dep_kind_from_label_string(label).is_ok()
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for () {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::Unit
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, _: TyCtxt<'tcx>) -> Fingerprint {
+ Fingerprint::ZERO
+ }
+
+ #[inline(always)]
+ fn recover(_: TyCtxt<'tcx>, _: &DepNode) -> Option<Self> {
+ Some(())
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for DefId {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ tcx.def_path_hash(*self).0
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ tcx.def_path_str(*self)
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx)
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for LocalDefId {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ self.to_def_id().to_fingerprint(tcx)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ self.to_def_id().to_debug_str(tcx)
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx).map(|id| id.expect_local())
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for CrateNum {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ let def_id = self.as_def_id();
+ def_id.to_fingerprint(tcx)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ tcx.crate_name(*self).to_string()
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx).map(|id| id.krate)
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::Opaque
+ }
+
+ // We actually would not need to specialize the implementation of this
+ // method but it's faster to combine the hashes than to instantiate a full
+ // hashing context and stable-hashing state.
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ let (def_id_0, def_id_1) = *self;
+
+ let def_path_hash_0 = tcx.def_path_hash(def_id_0);
+ let def_path_hash_1 = tcx.def_path_hash(def_id_1);
+
+ def_path_hash_0.0.combine(def_path_hash_1.0)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ let (def_id_0, def_id_1) = *self;
+
+ format!("({}, {})", tcx.def_path_debug_str(def_id_0), tcx.def_path_debug_str(def_id_1))
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::Opaque
+ }
+
+ // We actually would not need to specialize the implementation of this
+ // method but it's faster to combine the hashes than to instantiate a full
+ // hashing context and stable-hashing state.
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ let HirId { owner, local_id } = *self;
+
+ let def_path_hash = tcx.def_path_hash(owner.to_def_id());
+ let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into());
+
+ def_path_hash.0.combine(local_id)
+ }
+}
diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs
new file mode 100644
index 000000000..c8b3b52b0
--- /dev/null
+++ b/compiler/rustc_middle/src/dep_graph/mod.rs
@@ -0,0 +1,140 @@
+use crate::ty::{self, TyCtxt};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::Session;
+
+#[macro_use]
+mod dep_node;
+
+pub use rustc_query_system::dep_graph::{
+ debug::DepNodeFilter, hash_result, DepContext, DepNodeColor, DepNodeIndex,
+ SerializedDepNodeIndex, WorkProduct, WorkProductId,
+};
+
+pub use dep_node::{label_strs, DepKind, DepKindStruct, DepNode, DepNodeExt};
+pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item};
+
+pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
+pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
+pub type TaskDepsRef<'a> = rustc_query_system::dep_graph::TaskDepsRef<'a, DepKind>;
+pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
+pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
+pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
+
+impl rustc_query_system::dep_graph::DepKind for DepKind {
+ const NULL: Self = DepKind::Null;
+ const RED: Self = DepKind::Red;
+
+ fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{:?}(", node.kind)?;
+
+ ty::tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ if let Some(def_id) = node.extract_def_id(tcx) {
+ write!(f, "{}", tcx.def_path_debug_str(def_id))?;
+ } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) {
+ write!(f, "{}", s)?;
+ } else {
+ write!(f, "{}", node.hash)?;
+ }
+ } else {
+ write!(f, "{}", node.hash)?;
+ }
+ Ok(())
+ })?;
+
+ write!(f, ")")
+ }
+
+ fn with_deps<OP, R>(task_deps: TaskDepsRef<'_>, op: OP) -> R
+ where
+ OP: FnOnce() -> R,
+ {
+ ty::tls::with_context(|icx| {
+ let icx = ty::tls::ImplicitCtxt { task_deps, ..icx.clone() };
+
+ ty::tls::enter_context(&icx, |_| op())
+ })
+ }
+
+ fn read_deps<OP>(op: OP)
+ where
+ OP: for<'a> FnOnce(TaskDepsRef<'a>),
+ {
+ ty::tls::with_context_opt(|icx| {
+ let Some(icx) = icx else { return };
+ op(icx.task_deps)
+ })
+ }
+}
+
+impl<'tcx> DepContext for TyCtxt<'tcx> {
+ type DepKind = DepKind;
+
+ #[inline]
+ fn with_stable_hashing_context<R>(&self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R {
+ TyCtxt::with_stable_hashing_context(*self, f)
+ }
+
+ #[inline]
+ fn dep_graph(&self) -> &DepGraph {
+ &self.dep_graph
+ }
+
+ #[inline(always)]
+ fn profiler(&self) -> &SelfProfilerRef {
+ &self.prof
+ }
+
+ #[inline(always)]
+ fn sess(&self) -> &Session {
+ self.sess
+ }
+
+ #[inline(always)]
+ fn fingerprint_style(&self, kind: DepKind) -> rustc_query_system::dep_graph::FingerprintStyle {
+ kind.fingerprint_style(*self)
+ }
+
+ #[inline(always)]
+ fn is_eval_always(&self, kind: DepKind) -> bool {
+ self.query_kind(kind).is_eval_always
+ }
+
+ fn try_force_from_dep_node(&self, dep_node: DepNode) -> bool {
+ debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
+
+ // We must avoid ever having to call `force_from_dep_node()` for a
+ // `DepNode::codegen_unit`:
+ // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
+ // would always end up having to evaluate the first caller of the
+ // `codegen_unit` query that *is* reconstructible. This might very well be
+ // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
+ // to re-trigger calling the `codegen_unit` query with the right key. At
+ // that point we would already have re-done all the work we are trying to
+ // avoid doing in the first place.
+ // The solution is simple: Just explicitly call the `codegen_unit` query for
+ // each CGU, right after partitioning. This way `try_mark_green` will always
+ // hit the cache instead of having to go through `force_from_dep_node`.
+ // This assertion makes sure, we actually keep applying the solution above.
+ debug_assert!(
+ dep_node.kind != DepKind::codegen_unit,
+ "calling force_from_dep_node() on DepKind::codegen_unit"
+ );
+
+ let cb = self.query_kind(dep_node.kind);
+ if let Some(f) = cb.force_from_dep_node {
+ f(*self, dep_node);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn try_load_from_on_disk_cache(&self, dep_node: DepNode) {
+ let cb = self.query_kind(dep_node.kind);
+ if let Some(f) = cb.try_load_from_on_disk_cache {
+ f(*self, dep_node)
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
new file mode 100644
index 000000000..47b04c33e
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -0,0 +1,1405 @@
+use crate::hir::{ModuleItems, Owner};
+use crate::ty::{DefIdTree, TyCtxt};
+use rustc_ast as ast;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::{par_for_each_in, Send, Sync};
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::*;
+use rustc_index::vec::Idx;
+use rustc_middle::hir::nested_filter;
+use rustc_span::def_id::StableCrateId;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
+
+fn fn_decl<'hir>(node: Node<'hir>) -> Option<&'hir FnDecl<'hir>> {
+ match node {
+ Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. })
+ | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(&sig.decl),
+ Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl, .. }), .. })
+ | Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(fn_decl, ..), .. }) => {
+ Some(fn_decl)
+ }
+ _ => None,
+ }
+}
+
+pub fn fn_sig<'hir>(node: Node<'hir>) -> Option<&'hir FnSig<'hir>> {
+ match &node {
+ Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. })
+ | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(sig),
+ _ => None,
+ }
+}
+
+#[inline]
+pub fn associated_body<'hir>(node: Node<'hir>) -> Option<BodyId> {
+ match node {
+ Node::Item(Item {
+ kind: ItemKind::Const(_, body) | ItemKind::Static(.., body) | ItemKind::Fn(.., body),
+ ..
+ })
+ | Node::TraitItem(TraitItem {
+ kind:
+ TraitItemKind::Const(_, Some(body)) | TraitItemKind::Fn(_, TraitFn::Provided(body)),
+ ..
+ })
+ | Node::ImplItem(ImplItem {
+ kind: ImplItemKind::Const(_, body) | ImplItemKind::Fn(_, body),
+ ..
+ })
+ | Node::Expr(Expr { kind: ExprKind::Closure(Closure { body, .. }), .. }) => Some(*body),
+
+ Node::AnonConst(constant) => Some(constant.body),
+
+ _ => None,
+ }
+}
+
+fn is_body_owner<'hir>(node: Node<'hir>, hir_id: HirId) -> bool {
+ match associated_body(node) {
+ Some(b) => b.hir_id == hir_id,
+ None => false,
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct Map<'hir> {
+ pub(super) tcx: TyCtxt<'hir>,
+}
+
+/// An iterator that walks up the ancestor tree of a given `HirId`.
+/// Constructed using `tcx.hir().parent_iter(hir_id)`.
+pub struct ParentHirIterator<'hir> {
+ current_id: HirId,
+ map: Map<'hir>,
+}
+
+impl<'hir> Iterator for ParentHirIterator<'hir> {
+ type Item = (HirId, Node<'hir>);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.current_id == CRATE_HIR_ID {
+ return None;
+ }
+ loop {
+ // There are nodes that do not have entries, so we need to skip them.
+ let parent_id = self.map.get_parent_node(self.current_id);
+
+ if parent_id == self.current_id {
+ self.current_id = CRATE_HIR_ID;
+ return None;
+ }
+
+ self.current_id = parent_id;
+ if let Some(node) = self.map.find(parent_id) {
+ return Some((parent_id, node));
+ }
+ // If this `HirId` doesn't have an entry, skip it and look for its `parent_id`.
+ }
+ }
+}
+
+/// An iterator that walks up the ancestor tree of a given `HirId`.
+/// Constructed using `tcx.hir().parent_owner_iter(hir_id)`.
+pub struct ParentOwnerIterator<'hir> {
+ current_id: HirId,
+ map: Map<'hir>,
+}
+
+impl<'hir> Iterator for ParentOwnerIterator<'hir> {
+ type Item = (LocalDefId, OwnerNode<'hir>);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.current_id.local_id.index() != 0 {
+ self.current_id.local_id = ItemLocalId::new(0);
+ if let Some(node) = self.map.tcx.hir_owner(self.current_id.owner) {
+ return Some((self.current_id.owner, node.node));
+ }
+ }
+ if self.current_id == CRATE_HIR_ID {
+ return None;
+ }
+ loop {
+ // There are nodes that do not have entries, so we need to skip them.
+ let parent_id = self.map.def_key(self.current_id.owner).parent;
+
+ let parent_id = parent_id.map_or(CRATE_HIR_ID.owner, |local_def_index| {
+ let def_id = LocalDefId { local_def_index };
+ self.map.local_def_id_to_hir_id(def_id).owner
+ });
+ self.current_id = HirId::make_owner(parent_id);
+
+ // If this `HirId` doesn't have an entry, skip it and look for its `parent_id`.
+ if let Some(node) = self.map.tcx.hir_owner(self.current_id.owner) {
+ return Some((self.current_id.owner, node.node));
+ }
+ }
+ }
+}
+
+impl<'hir> Map<'hir> {
+ pub fn krate(self) -> &'hir Crate<'hir> {
+ self.tcx.hir_crate(())
+ }
+
+ pub fn root_module(self) -> &'hir Mod<'hir> {
+ match self.tcx.hir_owner(CRATE_DEF_ID).map(|o| o.node) {
+ Some(OwnerNode::Crate(item)) => item,
+ _ => bug!(),
+ }
+ }
+
+ pub fn items(self) -> impl Iterator<Item = ItemId> + 'hir {
+ self.tcx.hir_crate_items(()).items.iter().copied()
+ }
+
+ pub fn module_items(self, module: LocalDefId) -> impl Iterator<Item = ItemId> + 'hir {
+ self.tcx.hir_module_items(module).items()
+ }
+
+ pub fn par_for_each_item(self, f: impl Fn(ItemId) + Sync + Send) {
+ par_for_each_in(&self.tcx.hir_crate_items(()).items[..], |id| f(*id));
+ }
+
+ pub fn def_key(self, def_id: LocalDefId) -> DefKey {
+ // Accessing the DefKey is ok, since it is part of DefPathHash.
+ self.tcx.definitions_untracked().def_key(def_id)
+ }
+
+ pub fn def_path_from_hir_id(self, id: HirId) -> Option<DefPath> {
+ self.opt_local_def_id(id).map(|def_id| self.def_path(def_id))
+ }
+
+ pub fn def_path(self, def_id: LocalDefId) -> DefPath {
+ // Accessing the DefPath is ok, since it is part of DefPathHash.
+ self.tcx.definitions_untracked().def_path(def_id)
+ }
+
+ #[inline]
+ pub fn def_path_hash(self, def_id: LocalDefId) -> DefPathHash {
+ // Accessing the DefPathHash is ok, it is incr. comp. stable.
+ self.tcx.definitions_untracked().def_path_hash(def_id)
+ }
+
+ #[inline]
+ pub fn local_def_id(self, hir_id: HirId) -> LocalDefId {
+ self.opt_local_def_id(hir_id).unwrap_or_else(|| {
+ bug!(
+ "local_def_id: no entry for `{:?}`, which has a map of `{:?}`",
+ hir_id,
+ self.find(hir_id)
+ )
+ })
+ }
+
+ #[inline]
+ pub fn opt_local_def_id(self, hir_id: HirId) -> Option<LocalDefId> {
+ if hir_id.local_id == ItemLocalId::new(0) {
+ Some(hir_id.owner)
+ } else {
+ self.tcx
+ .hir_owner_nodes(hir_id.owner)
+ .as_owner()?
+ .local_id_to_def_id
+ .get(&hir_id.local_id)
+ .copied()
+ }
+ }
+
+ #[inline]
+ pub fn local_def_id_to_hir_id(self, def_id: LocalDefId) -> HirId {
+ self.tcx.local_def_id_to_hir_id(def_id)
+ }
+
+ /// Do not call this function directly. The query should be called.
+ pub(super) fn opt_def_kind(self, local_def_id: LocalDefId) -> Option<DefKind> {
+ let hir_id = self.local_def_id_to_hir_id(local_def_id);
+ let def_kind = match self.find(hir_id)? {
+ Node::Item(item) => match item.kind {
+ ItemKind::Static(_, mt, _) => DefKind::Static(mt),
+ ItemKind::Const(..) => DefKind::Const,
+ ItemKind::Fn(..) => DefKind::Fn,
+ ItemKind::Macro(_, macro_kind) => DefKind::Macro(macro_kind),
+ ItemKind::Mod(..) => DefKind::Mod,
+ ItemKind::OpaqueTy(..) => DefKind::OpaqueTy,
+ ItemKind::TyAlias(..) => DefKind::TyAlias,
+ ItemKind::Enum(..) => DefKind::Enum,
+ ItemKind::Struct(..) => DefKind::Struct,
+ ItemKind::Union(..) => DefKind::Union,
+ ItemKind::Trait(..) => DefKind::Trait,
+ ItemKind::TraitAlias(..) => DefKind::TraitAlias,
+ ItemKind::ExternCrate(_) => DefKind::ExternCrate,
+ ItemKind::Use(..) => DefKind::Use,
+ ItemKind::ForeignMod { .. } => DefKind::ForeignMod,
+ ItemKind::GlobalAsm(..) => DefKind::GlobalAsm,
+ ItemKind::Impl { .. } => DefKind::Impl,
+ },
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Fn(..) => DefKind::Fn,
+ ForeignItemKind::Static(_, mt) => DefKind::Static(mt),
+ ForeignItemKind::Type => DefKind::ForeignTy,
+ },
+ Node::TraitItem(item) => match item.kind {
+ TraitItemKind::Const(..) => DefKind::AssocConst,
+ TraitItemKind::Fn(..) => DefKind::AssocFn,
+ TraitItemKind::Type(..) => DefKind::AssocTy,
+ },
+ Node::ImplItem(item) => match item.kind {
+ ImplItemKind::Const(..) => DefKind::AssocConst,
+ ImplItemKind::Fn(..) => DefKind::AssocFn,
+ ImplItemKind::TyAlias(..) => DefKind::AssocTy,
+ },
+ Node::Variant(_) => DefKind::Variant,
+ Node::Ctor(variant_data) => {
+ // FIXME(eddyb) is this even possible, if we have a `Node::Ctor`?
+ assert_ne!(variant_data.ctor_hir_id(), None);
+
+ let ctor_of = match self.find(self.get_parent_node(hir_id)) {
+ Some(Node::Item(..)) => def::CtorOf::Struct,
+ Some(Node::Variant(..)) => def::CtorOf::Variant,
+ _ => unreachable!(),
+ };
+ DefKind::Ctor(ctor_of, def::CtorKind::from_hir(variant_data))
+ }
+ Node::AnonConst(_) => {
+ let inline = match self.find(self.get_parent_node(hir_id)) {
+ Some(Node::Expr(&Expr {
+ kind: ExprKind::ConstBlock(ref anon_const), ..
+ })) if anon_const.hir_id == hir_id => true,
+ _ => false,
+ };
+ if inline { DefKind::InlineConst } else { DefKind::AnonConst }
+ }
+ Node::Field(_) => DefKind::Field,
+ Node::Expr(expr) => match expr.kind {
+ ExprKind::Closure(Closure { movability: None, .. }) => DefKind::Closure,
+ ExprKind::Closure(Closure { movability: Some(_), .. }) => DefKind::Generator,
+ _ => bug!("def_kind: unsupported node: {}", self.node_to_string(hir_id)),
+ },
+ Node::GenericParam(param) => match param.kind {
+ GenericParamKind::Lifetime { .. } => DefKind::LifetimeParam,
+ GenericParamKind::Type { .. } => DefKind::TyParam,
+ GenericParamKind::Const { .. } => DefKind::ConstParam,
+ },
+ Node::Crate(_) => DefKind::Mod,
+ Node::Stmt(_)
+ | Node::PathSegment(_)
+ | Node::Ty(_)
+ | Node::TypeBinding(_)
+ | Node::Infer(_)
+ | Node::TraitRef(_)
+ | Node::Pat(_)
+ | Node::Local(_)
+ | Node::Param(_)
+ | Node::Arm(_)
+ | Node::Lifetime(_)
+ | Node::Block(_) => return None,
+ };
+ Some(def_kind)
+ }
+
+ pub fn find_parent_node(self, id: HirId) -> Option<HirId> {
+ if id.local_id == ItemLocalId::from_u32(0) {
+ Some(self.tcx.hir_owner_parent(id.owner))
+ } else {
+ let owner = self.tcx.hir_owner_nodes(id.owner).as_owner()?;
+ let node = owner.nodes[id.local_id].as_ref()?;
+ let hir_id = HirId { owner: id.owner, local_id: node.parent };
+ Some(hir_id)
+ }
+ }
+
+ pub fn get_parent_node(self, hir_id: HirId) -> HirId {
+ self.find_parent_node(hir_id)
+ .unwrap_or_else(|| bug!("No parent for node {:?}", self.node_to_string(hir_id)))
+ }
+
+ /// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found.
+ pub fn find(self, id: HirId) -> Option<Node<'hir>> {
+ if id.local_id == ItemLocalId::from_u32(0) {
+ let owner = self.tcx.hir_owner(id.owner)?;
+ Some(owner.node.into())
+ } else {
+ let owner = self.tcx.hir_owner_nodes(id.owner).as_owner()?;
+ let node = owner.nodes[id.local_id].as_ref()?;
+ Some(node.node)
+ }
+ }
+
+ /// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found.
+ #[inline]
+ pub fn find_by_def_id(self, id: LocalDefId) -> Option<Node<'hir>> {
+ self.find(self.local_def_id_to_hir_id(id))
+ }
+
+ /// Retrieves the `Node` corresponding to `id`, panicking if it cannot be found.
+ pub fn get(self, id: HirId) -> Node<'hir> {
+ self.find(id).unwrap_or_else(|| bug!("couldn't find hir id {} in the HIR map", id))
+ }
+
+ /// Retrieves the `Node` corresponding to `id`, panicking if it cannot be found.
+ #[inline]
+ pub fn get_by_def_id(self, id: LocalDefId) -> Node<'hir> {
+ self.find_by_def_id(id).unwrap_or_else(|| bug!("couldn't find {:?} in the HIR map", id))
+ }
+
+ pub fn get_if_local(self, id: DefId) -> Option<Node<'hir>> {
+ id.as_local().and_then(|id| self.find(self.local_def_id_to_hir_id(id)))
+ }
+
+ pub fn get_generics(self, id: LocalDefId) -> Option<&'hir Generics<'hir>> {
+ let node = self.tcx.hir_owner(id)?;
+ node.node.generics()
+ }
+
+ pub fn item(self, id: ItemId) -> &'hir Item<'hir> {
+ self.tcx.hir_owner(id.def_id).unwrap().node.expect_item()
+ }
+
+ pub fn trait_item(self, id: TraitItemId) -> &'hir TraitItem<'hir> {
+ self.tcx.hir_owner(id.def_id).unwrap().node.expect_trait_item()
+ }
+
+ pub fn impl_item(self, id: ImplItemId) -> &'hir ImplItem<'hir> {
+ self.tcx.hir_owner(id.def_id).unwrap().node.expect_impl_item()
+ }
+
+ pub fn foreign_item(self, id: ForeignItemId) -> &'hir ForeignItem<'hir> {
+ self.tcx.hir_owner(id.def_id).unwrap().node.expect_foreign_item()
+ }
+
+ pub fn body(self, id: BodyId) -> &'hir Body<'hir> {
+ self.tcx.hir_owner_nodes(id.hir_id.owner).unwrap().bodies[&id.hir_id.local_id]
+ }
+
+ pub fn fn_decl_by_hir_id(self, hir_id: HirId) -> Option<&'hir FnDecl<'hir>> {
+ if let Some(node) = self.find(hir_id) {
+ fn_decl(node)
+ } else {
+ bug!("no node for hir_id `{}`", hir_id)
+ }
+ }
+
+ pub fn fn_sig_by_hir_id(self, hir_id: HirId) -> Option<&'hir FnSig<'hir>> {
+ if let Some(node) = self.find(hir_id) {
+ fn_sig(node)
+ } else {
+ bug!("no node for hir_id `{}`", hir_id)
+ }
+ }
+
+ pub fn enclosing_body_owner(self, hir_id: HirId) -> LocalDefId {
+ for (parent, _) in self.parent_iter(hir_id) {
+ if let Some(body) = self.find(parent).map(associated_body).flatten() {
+ return self.body_owner_def_id(body);
+ }
+ }
+
+ bug!("no `enclosing_body_owner` for hir_id `{}`", hir_id);
+ }
+
+ /// Returns the `HirId` that corresponds to the definition of
+ /// which this is the body of, i.e., a `fn`, `const` or `static`
+ /// item (possibly associated), a closure, or a `hir::AnonConst`.
+ pub fn body_owner(self, BodyId { hir_id }: BodyId) -> HirId {
+ let parent = self.get_parent_node(hir_id);
+ assert!(self.find(parent).map_or(false, |n| is_body_owner(n, hir_id)));
+ parent
+ }
+
+ pub fn body_owner_def_id(self, id: BodyId) -> LocalDefId {
+ self.local_def_id(self.body_owner(id))
+ }
+
+ /// Given a `LocalDefId`, returns the `BodyId` associated with it,
+ /// if the node is a body owner, otherwise returns `None`.
+ pub fn maybe_body_owned_by(self, id: LocalDefId) -> Option<BodyId> {
+ self.get_if_local(id.to_def_id()).map(associated_body).flatten()
+ }
+
+ /// Given a body owner's id, returns the `BodyId` associated with it.
+ pub fn body_owned_by(self, id: LocalDefId) -> BodyId {
+ self.maybe_body_owned_by(id).unwrap_or_else(|| {
+ let hir_id = self.local_def_id_to_hir_id(id);
+ span_bug!(
+ self.span(hir_id),
+ "body_owned_by: {} has no associated body",
+ self.node_to_string(hir_id)
+ );
+ })
+ }
+
+ pub fn body_param_names(self, id: BodyId) -> impl Iterator<Item = Ident> + 'hir {
+ self.body(id).params.iter().map(|arg| match arg.pat.kind {
+ PatKind::Binding(_, _, ident, _) => ident,
+ _ => Ident::empty(),
+ })
+ }
+
+ /// Returns the `BodyOwnerKind` of this `LocalDefId`.
+ ///
+ /// Panics if `LocalDefId` does not have an associated body.
+ pub fn body_owner_kind(self, def_id: LocalDefId) -> BodyOwnerKind {
+ match self.tcx.def_kind(def_id) {
+ DefKind::Const | DefKind::AssocConst | DefKind::InlineConst | DefKind::AnonConst => {
+ BodyOwnerKind::Const
+ }
+ DefKind::Ctor(..) | DefKind::Fn | DefKind::AssocFn => BodyOwnerKind::Fn,
+ DefKind::Closure | DefKind::Generator => BodyOwnerKind::Closure,
+ DefKind::Static(mt) => BodyOwnerKind::Static(mt),
+ dk => bug!("{:?} is not a body node: {:?}", def_id, dk),
+ }
+ }
+
+ /// Returns the `ConstContext` of the body associated with this `LocalDefId`.
+ ///
+ /// Panics if `LocalDefId` does not have an associated body.
+ ///
+ /// This should only be used for determining the context of a body, a return
+ /// value of `Some` does not always suggest that the owner of the body is `const`,
+ /// just that it has to be checked as if it were.
+ pub fn body_const_context(self, def_id: LocalDefId) -> Option<ConstContext> {
+ let ccx = match self.body_owner_kind(def_id) {
+ BodyOwnerKind::Const => ConstContext::Const,
+ BodyOwnerKind::Static(mt) => ConstContext::Static(mt),
+
+ BodyOwnerKind::Fn if self.tcx.is_constructor(def_id.to_def_id()) => return None,
+ BodyOwnerKind::Fn if self.tcx.is_const_fn_raw(def_id.to_def_id()) => {
+ ConstContext::ConstFn
+ }
+ BodyOwnerKind::Fn if self.tcx.is_const_default_method(def_id.to_def_id()) => {
+ ConstContext::ConstFn
+ }
+ BodyOwnerKind::Fn | BodyOwnerKind::Closure => return None,
+ };
+
+ Some(ccx)
+ }
+
+ /// Returns an iterator of the `DefId`s for all body-owners in this
+ /// crate. If you would prefer to iterate over the bodies
+ /// themselves, you can do `self.hir().krate().body_ids.iter()`.
+ pub fn body_owners(self) -> impl Iterator<Item = LocalDefId> + 'hir {
+ self.tcx.hir_crate_items(()).body_owners.iter().copied()
+ }
+
+ pub fn par_body_owners<F: Fn(LocalDefId) + Sync + Send>(self, f: F) {
+ par_for_each_in(&self.tcx.hir_crate_items(()).body_owners[..], |&def_id| f(def_id));
+ }
+
+ pub fn ty_param_owner(self, def_id: LocalDefId) -> LocalDefId {
+ let def_kind = self.tcx.def_kind(def_id);
+ match def_kind {
+ DefKind::Trait | DefKind::TraitAlias => def_id,
+ DefKind::TyParam | DefKind::ConstParam => self.tcx.local_parent(def_id),
+ _ => bug!("ty_param_owner: {:?} is a {:?} not a type parameter", def_id, def_kind),
+ }
+ }
+
+ pub fn ty_param_name(self, def_id: LocalDefId) -> Symbol {
+ let def_kind = self.tcx.def_kind(def_id);
+ match def_kind {
+ DefKind::Trait | DefKind::TraitAlias => kw::SelfUpper,
+ DefKind::TyParam | DefKind::ConstParam => self.tcx.item_name(def_id.to_def_id()),
+ _ => bug!("ty_param_name: {:?} is a {:?} not a type parameter", def_id, def_kind),
+ }
+ }
+
+ pub fn trait_impls(self, trait_did: DefId) -> &'hir [LocalDefId] {
+ self.tcx.all_local_trait_impls(()).get(&trait_did).map_or(&[], |xs| &xs[..])
+ }
+
+ /// Gets the attributes on the crate. This is preferable to
+ /// invoking `krate.attrs` because it registers a tighter
+ /// dep-graph access.
+ pub fn krate_attrs(self) -> &'hir [ast::Attribute] {
+ self.attrs(CRATE_HIR_ID)
+ }
+
+ pub fn rustc_coherence_is_core(self) -> bool {
+ self.krate_attrs().iter().any(|attr| attr.has_name(sym::rustc_coherence_is_core))
+ }
+
+ pub fn get_module(self, module: LocalDefId) -> (&'hir Mod<'hir>, Span, HirId) {
+ let hir_id = HirId::make_owner(module);
+ match self.tcx.hir_owner(module).map(|o| o.node) {
+ Some(OwnerNode::Item(&Item { span, kind: ItemKind::Mod(ref m), .. })) => {
+ (m, span, hir_id)
+ }
+ Some(OwnerNode::Crate(item)) => (item, item.spans.inner_span, hir_id),
+ node => panic!("not a module: {:?}", node),
+ }
+ }
+
+ /// Walks the contents of the local crate. See also `visit_all_item_likes_in_crate`.
+ pub fn walk_toplevel_module(self, visitor: &mut impl Visitor<'hir>) {
+ let (top_mod, span, hir_id) = self.get_module(CRATE_DEF_ID);
+ visitor.visit_mod(top_mod, span, hir_id);
+ }
+
+ /// Walks the attributes in a crate.
+ pub fn walk_attributes(self, visitor: &mut impl Visitor<'hir>) {
+ let krate = self.krate();
+ for info in krate.owners.iter() {
+ if let MaybeOwner::Owner(info) = info {
+ for attrs in info.attrs.map.values() {
+ for a in *attrs {
+ visitor.visit_attribute(a)
+ }
+ }
+ }
+ }
+ }
+
+ /// Visits all item-likes in the crate in some deterministic (but unspecified) order. If you
+ /// need to process every item-like, and don't care about visiting nested items in a particular
+ /// order then this method is the best choice. If you do care about this nesting, you should
+ /// use the `tcx.hir().walk_toplevel_module`.
+ ///
+ /// Note that this function will access HIR for all the item-likes in the crate. If you only
+ /// need to access some of them, it is usually better to manually loop on the iterators
+ /// provided by `tcx.hir_crate_items(())`.
+ ///
+ /// Please see the notes in `intravisit.rs` for more information.
+ pub fn visit_all_item_likes_in_crate<V>(self, visitor: &mut V)
+ where
+ V: Visitor<'hir>,
+ {
+ let krate = self.tcx.hir_crate_items(());
+
+ for id in krate.items() {
+ visitor.visit_item(self.item(id));
+ }
+
+ for id in krate.trait_items() {
+ visitor.visit_trait_item(self.trait_item(id));
+ }
+
+ for id in krate.impl_items() {
+ visitor.visit_impl_item(self.impl_item(id));
+ }
+
+ for id in krate.foreign_items() {
+ visitor.visit_foreign_item(self.foreign_item(id));
+ }
+ }
+
+ /// This method is the equivalent of `visit_all_item_likes_in_crate` but restricted to
+ /// item-likes in a single module.
+ pub fn visit_item_likes_in_module<V>(self, module: LocalDefId, visitor: &mut V)
+ where
+ V: Visitor<'hir>,
+ {
+ let module = self.tcx.hir_module_items(module);
+
+ for id in module.items() {
+ visitor.visit_item(self.item(id));
+ }
+
+ for id in module.trait_items() {
+ visitor.visit_trait_item(self.trait_item(id));
+ }
+
+ for id in module.impl_items() {
+ visitor.visit_impl_item(self.impl_item(id));
+ }
+
+ for id in module.foreign_items() {
+ visitor.visit_foreign_item(self.foreign_item(id));
+ }
+ }
+
+ pub fn for_each_module(self, mut f: impl FnMut(LocalDefId)) {
+ let crate_items = self.tcx.hir_crate_items(());
+ for module in crate_items.submodules.iter() {
+ f(*module)
+ }
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline]
+ pub fn par_for_each_module(self, f: impl Fn(LocalDefId)) {
+ self.for_each_module(f)
+ }
+
+ #[cfg(parallel_compiler)]
+ pub fn par_for_each_module(self, f: impl Fn(LocalDefId) + Sync) {
+ use rustc_data_structures::sync::{par_iter, ParallelIterator};
+ par_iter_submodules(self.tcx, CRATE_DEF_ID, &f);
+
+ fn par_iter_submodules<F>(tcx: TyCtxt<'_>, module: LocalDefId, f: &F)
+ where
+ F: Fn(LocalDefId) + Sync,
+ {
+ (*f)(module);
+ let items = tcx.hir_module_items(module);
+ par_iter(&items.submodules[..]).for_each(|&sm| par_iter_submodules(tcx, sm, f));
+ }
+ }
+
+ /// Returns an iterator for the nodes in the ancestor tree of the `current_id`
+ /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
+ pub fn parent_iter(self, current_id: HirId) -> ParentHirIterator<'hir> {
+ ParentHirIterator { current_id, map: self }
+ }
+
+ /// Returns an iterator for the nodes in the ancestor tree of the `current_id`
+ /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
+ pub fn parent_owner_iter(self, current_id: HirId) -> ParentOwnerIterator<'hir> {
+ ParentOwnerIterator { current_id, map: self }
+ }
+
+ /// Checks if the node is left-hand side of an assignment.
+ pub fn is_lhs(self, id: HirId) -> bool {
+ match self.find(self.get_parent_node(id)) {
+ Some(Node::Expr(expr)) => match expr.kind {
+ ExprKind::Assign(lhs, _rhs, _span) => lhs.hir_id == id,
+ _ => false,
+ },
+ _ => false,
+ }
+ }
+
+ /// Whether the expression pointed at by `hir_id` belongs to a `const` evaluation context.
+ /// Used exclusively for diagnostics, to avoid suggestion function calls.
+ pub fn is_inside_const_context(self, hir_id: HirId) -> bool {
+ self.body_const_context(self.enclosing_body_owner(hir_id)).is_some()
+ }
+
+ /// Retrieves the `HirId` for `id`'s enclosing method, unless there's a
+ /// `while` or `loop` before reaching it, as block tail returns are not
+ /// available in them.
+ ///
+ /// ```
+ /// fn foo(x: usize) -> bool {
+ /// if x == 1 {
+ /// true // If `get_return_block` gets passed the `id` corresponding
+ /// } else { // to this, it will return `foo`'s `HirId`.
+ /// false
+ /// }
+ /// }
+ /// ```
+ ///
+ /// ```compile_fail,E0308
+ /// fn foo(x: usize) -> bool {
+ /// loop {
+ /// true // If `get_return_block` gets passed the `id` corresponding
+ /// } // to this, it will return `None`.
+ /// false
+ /// }
+ /// ```
+ pub fn get_return_block(self, id: HirId) -> Option<HirId> {
+ let mut iter = self.parent_iter(id).peekable();
+ let mut ignore_tail = false;
+ if let Some(node) = self.find(id) {
+ if let Node::Expr(Expr { kind: ExprKind::Ret(_), .. }) = node {
+ // When dealing with `return` statements, we don't care about climbing only tail
+ // expressions.
+ ignore_tail = true;
+ }
+ }
+ while let Some((hir_id, node)) = iter.next() {
+ if let (Some((_, next_node)), false) = (iter.peek(), ignore_tail) {
+ match next_node {
+ Node::Block(Block { expr: None, .. }) => return None,
+ // The current node is not the tail expression of its parent.
+ Node::Block(Block { expr: Some(e), .. }) if hir_id != e.hir_id => return None,
+ _ => {}
+ }
+ }
+ match node {
+ Node::Item(_)
+ | Node::ForeignItem(_)
+ | Node::TraitItem(_)
+ | Node::Expr(Expr { kind: ExprKind::Closure { .. }, .. })
+ | Node::ImplItem(_) => return Some(hir_id),
+ // Ignore `return`s on the first iteration
+ Node::Expr(Expr { kind: ExprKind::Loop(..) | ExprKind::Ret(..), .. })
+ | Node::Local(_) => {
+ return None;
+ }
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// Retrieves the `HirId` for `id`'s parent item, or `id` itself if no
+ /// parent item is in this map. The "parent item" is the closest parent node
+ /// in the HIR which is recorded by the map and is an item, either an item
+ /// in a module, trait, or impl.
+ pub fn get_parent_item(self, hir_id: HirId) -> LocalDefId {
+ if let Some((def_id, _node)) = self.parent_owner_iter(hir_id).next() {
+ def_id
+ } else {
+ CRATE_DEF_ID
+ }
+ }
+
+ /// Returns the `HirId` of `id`'s nearest module parent, or `id` itself if no
+ /// module parent is in this map.
+ pub(super) fn get_module_parent_node(self, hir_id: HirId) -> LocalDefId {
+ for (def_id, node) in self.parent_owner_iter(hir_id) {
+ if let OwnerNode::Item(&Item { kind: ItemKind::Mod(_), .. }) = node {
+ return def_id;
+ }
+ }
+ CRATE_DEF_ID
+ }
+
+ /// When on an if expression, a match arm tail expression or a match arm, give back
+ /// the enclosing `if` or `match` expression.
+ ///
+ /// Used by error reporting when there's a type error in an if or match arm caused by the
+ /// expression needing to be unit.
+ pub fn get_if_cause(self, hir_id: HirId) -> Option<&'hir Expr<'hir>> {
+ for (_, node) in self.parent_iter(hir_id) {
+ match node {
+ Node::Item(_)
+ | Node::ForeignItem(_)
+ | Node::TraitItem(_)
+ | Node::ImplItem(_)
+ | Node::Stmt(Stmt { kind: StmtKind::Local(_), .. }) => break,
+ Node::Expr(expr @ Expr { kind: ExprKind::If(..) | ExprKind::Match(..), .. }) => {
+ return Some(expr);
+ }
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// Returns the nearest enclosing scope. A scope is roughly an item or block.
+ pub fn get_enclosing_scope(self, hir_id: HirId) -> Option<HirId> {
+ for (hir_id, node) in self.parent_iter(hir_id) {
+ if let Node::Item(Item {
+ kind:
+ ItemKind::Fn(..)
+ | ItemKind::Const(..)
+ | ItemKind::Static(..)
+ | ItemKind::Mod(..)
+ | ItemKind::Enum(..)
+ | ItemKind::Struct(..)
+ | ItemKind::Union(..)
+ | ItemKind::Trait(..)
+ | ItemKind::Impl { .. },
+ ..
+ })
+ | Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(..), .. })
+ | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(..), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(..), .. })
+ | Node::Block(_) = node
+ {
+ return Some(hir_id);
+ }
+ }
+ None
+ }
+
+ /// Returns the defining scope for an opaque type definition.
+ pub fn get_defining_scope(self, id: HirId) -> HirId {
+ let mut scope = id;
+ loop {
+ scope = self.get_enclosing_scope(scope).unwrap_or(CRATE_HIR_ID);
+ if scope == CRATE_HIR_ID || !matches!(self.get(scope), Node::Block(_)) {
+ return scope;
+ }
+ }
+ }
+
+ pub fn get_foreign_abi(self, hir_id: HirId) -> Abi {
+ let parent = self.get_parent_item(hir_id);
+ if let Some(node) = self.tcx.hir_owner(parent) {
+ if let OwnerNode::Item(Item { kind: ItemKind::ForeignMod { abi, .. }, .. }) = node.node
+ {
+ return *abi;
+ }
+ }
+ bug!(
+ "expected foreign mod or inlined parent, found {}",
+ self.node_to_string(HirId::make_owner(parent))
+ )
+ }
+
+ pub fn expect_owner(self, id: LocalDefId) -> OwnerNode<'hir> {
+ self.tcx.hir_owner(id).unwrap_or_else(|| bug!("expected owner for {:?}", id)).node
+ }
+
+ pub fn expect_item(self, id: LocalDefId) -> &'hir Item<'hir> {
+ match self.tcx.hir_owner(id) {
+ Some(Owner { node: OwnerNode::Item(item), .. }) => item,
+ _ => bug!("expected item, found {}", self.node_to_string(HirId::make_owner(id))),
+ }
+ }
+
+ pub fn expect_impl_item(self, id: LocalDefId) -> &'hir ImplItem<'hir> {
+ match self.tcx.hir_owner(id) {
+ Some(Owner { node: OwnerNode::ImplItem(item), .. }) => item,
+ _ => bug!("expected impl item, found {}", self.node_to_string(HirId::make_owner(id))),
+ }
+ }
+
+ pub fn expect_trait_item(self, id: LocalDefId) -> &'hir TraitItem<'hir> {
+ match self.tcx.hir_owner(id) {
+ Some(Owner { node: OwnerNode::TraitItem(item), .. }) => item,
+ _ => bug!("expected trait item, found {}", self.node_to_string(HirId::make_owner(id))),
+ }
+ }
+
+ pub fn expect_variant(self, id: HirId) -> &'hir Variant<'hir> {
+ match self.find(id) {
+ Some(Node::Variant(variant)) => variant,
+ _ => bug!("expected variant, found {}", self.node_to_string(id)),
+ }
+ }
+
+ pub fn expect_foreign_item(self, id: LocalDefId) -> &'hir ForeignItem<'hir> {
+ match self.tcx.hir_owner(id) {
+ Some(Owner { node: OwnerNode::ForeignItem(item), .. }) => item,
+ _ => {
+ bug!("expected foreign item, found {}", self.node_to_string(HirId::make_owner(id)))
+ }
+ }
+ }
+
+ pub fn expect_expr(self, id: HirId) -> &'hir Expr<'hir> {
+ match self.find(id) {
+ Some(Node::Expr(expr)) => expr,
+ _ => bug!("expected expr, found {}", self.node_to_string(id)),
+ }
+ }
+
+ #[inline]
+ fn opt_ident(self, id: HirId) -> Option<Ident> {
+ match self.get(id) {
+ Node::Pat(&Pat { kind: PatKind::Binding(_, _, ident, _), .. }) => Some(ident),
+ // A `Ctor` doesn't have an identifier itself, but its parent
+ // struct/variant does. Compare with `hir::Map::opt_span`.
+ Node::Ctor(..) => match self.find(self.get_parent_node(id))? {
+ Node::Item(item) => Some(item.ident),
+ Node::Variant(variant) => Some(variant.ident),
+ _ => unreachable!(),
+ },
+ node => node.ident(),
+ }
+ }
+
+ #[inline]
+ pub(super) fn opt_ident_span(self, id: HirId) -> Option<Span> {
+ self.opt_ident(id).map(|ident| ident.span)
+ }
+
+ #[inline]
+ pub fn opt_name(self, id: HirId) -> Option<Symbol> {
+ self.opt_ident(id).map(|ident| ident.name)
+ }
+
+ pub fn name(self, id: HirId) -> Symbol {
+ self.opt_name(id).unwrap_or_else(|| bug!("no name for {}", self.node_to_string(id)))
+ }
+
+ /// Given a node ID, gets a list of attributes associated with the AST
+ /// corresponding to the node-ID.
+ pub fn attrs(self, id: HirId) -> &'hir [ast::Attribute] {
+ self.tcx.hir_attrs(id.owner).get(id.local_id)
+ }
+
+ /// Gets the span of the definition of the specified HIR node.
+ /// This is used by `tcx.def_span`.
+ pub fn span(self, hir_id: HirId) -> Span {
+ self.opt_span(hir_id)
+ .unwrap_or_else(|| bug!("hir::map::Map::span: id not in map: {:?}", hir_id))
+ }
+
+ pub fn opt_span(self, hir_id: HirId) -> Option<Span> {
+ fn until_within(outer: Span, end: Span) -> Span {
+ if let Some(end) = end.find_ancestor_inside(outer) {
+ outer.with_hi(end.hi())
+ } else {
+ outer
+ }
+ }
+
+ fn named_span(item_span: Span, ident: Ident, generics: Option<&Generics<'_>>) -> Span {
+ if ident.name != kw::Empty {
+ let mut span = until_within(item_span, ident.span);
+ if let Some(g) = generics
+ && !g.span.is_dummy()
+ && let Some(g_span) = g.span.find_ancestor_inside(item_span)
+ {
+ span = span.to(g_span);
+ }
+ span
+ } else {
+ item_span
+ }
+ }
+
+ let span = match self.find(hir_id)? {
+ // Function-like.
+ Node::Item(Item { kind: ItemKind::Fn(sig, ..), .. })
+ | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, ..), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, ..), .. }) => sig.span,
+ // Constants and Statics.
+ Node::Item(Item {
+ kind:
+ ItemKind::Const(ty, ..)
+ | ItemKind::Static(ty, ..)
+ | ItemKind::Impl(Impl { self_ty: ty, .. }),
+ span: outer_span,
+ ..
+ })
+ | Node::TraitItem(TraitItem {
+ kind: TraitItemKind::Const(ty, ..),
+ span: outer_span,
+ ..
+ })
+ | Node::ImplItem(ImplItem {
+ kind: ImplItemKind::Const(ty, ..),
+ span: outer_span,
+ ..
+ })
+ | Node::ForeignItem(ForeignItem {
+ kind: ForeignItemKind::Static(ty, ..),
+ span: outer_span,
+ ..
+ }) => until_within(*outer_span, ty.span),
+ // With generics and bounds.
+ Node::Item(Item {
+ kind: ItemKind::Trait(_, _, generics, bounds, _),
+ span: outer_span,
+ ..
+ })
+ | Node::TraitItem(TraitItem {
+ kind: TraitItemKind::Type(bounds, _),
+ generics,
+ span: outer_span,
+ ..
+ }) => {
+ let end = if let Some(b) = bounds.last() { b.span() } else { generics.span };
+ until_within(*outer_span, end)
+ }
+ // Other cases.
+ Node::Item(item) => match &item.kind {
+ ItemKind::Use(path, _) => path.span,
+ _ => named_span(item.span, item.ident, item.kind.generics()),
+ },
+ Node::Variant(variant) => named_span(variant.span, variant.ident, None),
+ Node::ImplItem(item) => named_span(item.span, item.ident, Some(item.generics)),
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Fn(decl, _, _) => until_within(item.span, decl.output.span()),
+ _ => named_span(item.span, item.ident, None),
+ },
+ Node::Ctor(_) => return self.opt_span(self.get_parent_node(hir_id)),
+ Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl_span, .. }), .. }) => {
+ *fn_decl_span
+ }
+ _ => self.span_with_body(hir_id),
+ };
+ Some(span)
+ }
+
+ /// Like `hir.span()`, but includes the body of items
+ /// (instead of just the item header)
+ pub fn span_with_body(self, hir_id: HirId) -> Span {
+ match self.get(hir_id) {
+ Node::Param(param) => param.span,
+ Node::Item(item) => item.span,
+ Node::ForeignItem(foreign_item) => foreign_item.span,
+ Node::TraitItem(trait_item) => trait_item.span,
+ Node::ImplItem(impl_item) => impl_item.span,
+ Node::Variant(variant) => variant.span,
+ Node::Field(field) => field.span,
+ Node::AnonConst(constant) => self.body(constant.body).value.span,
+ Node::Expr(expr) => expr.span,
+ Node::Stmt(stmt) => stmt.span,
+ Node::PathSegment(seg) => {
+ let ident_span = seg.ident.span;
+ ident_span
+ .with_hi(seg.args.map_or_else(|| ident_span.hi(), |args| args.span_ext.hi()))
+ }
+ Node::Ty(ty) => ty.span,
+ Node::TypeBinding(tb) => tb.span,
+ Node::TraitRef(tr) => tr.path.span,
+ Node::Pat(pat) => pat.span,
+ Node::Arm(arm) => arm.span,
+ Node::Block(block) => block.span,
+ Node::Ctor(..) => self.span_with_body(self.get_parent_node(hir_id)),
+ Node::Lifetime(lifetime) => lifetime.span,
+ Node::GenericParam(param) => param.span,
+ Node::Infer(i) => i.span,
+ Node::Local(local) => local.span,
+ Node::Crate(item) => item.spans.inner_span,
+ }
+ }
+
+ pub fn span_if_local(self, id: DefId) -> Option<Span> {
+ if id.is_local() { Some(self.tcx.def_span(id)) } else { None }
+ }
+
+ pub fn res_span(self, res: Res) -> Option<Span> {
+ match res {
+ Res::Err => None,
+ Res::Local(id) => Some(self.span(id)),
+ res => self.span_if_local(res.opt_def_id()?),
+ }
+ }
+
+ /// Get a representation of this `id` for debugging purposes.
+ /// NOTE: Do NOT use this in diagnostics!
+ pub fn node_to_string(self, id: HirId) -> String {
+ hir_id_to_string(self, id)
+ }
+
+ /// Returns the HirId of `N` in `struct Foo<const N: usize = { ... }>` when
+ /// called with the HirId for the `{ ... }` anon const
+ pub fn opt_const_param_default_param_hir_id(self, anon_const: HirId) -> Option<HirId> {
+ match self.get(self.get_parent_node(anon_const)) {
+ Node::GenericParam(GenericParam {
+ hir_id: param_id,
+ kind: GenericParamKind::Const { .. },
+ ..
+ }) => Some(*param_id),
+ _ => None,
+ }
+ }
+}
+
+impl<'hir> intravisit::Map<'hir> for Map<'hir> {
+ fn find(&self, hir_id: HirId) -> Option<Node<'hir>> {
+ (*self).find(hir_id)
+ }
+
+ fn body(&self, id: BodyId) -> &'hir Body<'hir> {
+ (*self).body(id)
+ }
+
+ fn item(&self, id: ItemId) -> &'hir Item<'hir> {
+ (*self).item(id)
+ }
+
+ fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem<'hir> {
+ (*self).trait_item(id)
+ }
+
+ fn impl_item(&self, id: ImplItemId) -> &'hir ImplItem<'hir> {
+ (*self).impl_item(id)
+ }
+
+ fn foreign_item(&self, id: ForeignItemId) -> &'hir ForeignItem<'hir> {
+ (*self).foreign_item(id)
+ }
+}
+
+pub(super) fn crate_hash(tcx: TyCtxt<'_>, crate_num: CrateNum) -> Svh {
+ debug_assert_eq!(crate_num, LOCAL_CRATE);
+ let krate = tcx.hir_crate(());
+ let hir_body_hash = krate.hir_hash;
+
+ let upstream_crates = upstream_crates(tcx);
+
+ let resolutions = tcx.resolutions(());
+
+ // We hash the final, remapped names of all local source files so we
+ // don't have to include the path prefix remapping commandline args.
+ // If we included the full mapping in the SVH, we could only have
+ // reproducible builds by compiling from the same directory. So we just
+ // hash the result of the mapping instead of the mapping itself.
+ let mut source_file_names: Vec<_> = tcx
+ .sess
+ .source_map()
+ .files()
+ .iter()
+ .filter(|source_file| source_file.cnum == LOCAL_CRATE)
+ .map(|source_file| source_file.name_hash)
+ .collect();
+
+ source_file_names.sort_unstable();
+
+ let crate_hash: Fingerprint = tcx.with_stable_hashing_context(|mut hcx| {
+ let mut stable_hasher = StableHasher::new();
+ hir_body_hash.hash_stable(&mut hcx, &mut stable_hasher);
+ upstream_crates.hash_stable(&mut hcx, &mut stable_hasher);
+ source_file_names.hash_stable(&mut hcx, &mut stable_hasher);
+ if tcx.sess.opts.unstable_opts.incremental_relative_spans {
+ let definitions = tcx.definitions_untracked();
+ let mut owner_spans: Vec<_> = krate
+ .owners
+ .iter_enumerated()
+ .filter_map(|(def_id, info)| {
+ let _ = info.as_owner()?;
+ let def_path_hash = definitions.def_path_hash(def_id);
+ let span = resolutions.source_span[def_id];
+ debug_assert_eq!(span.parent(), None);
+ Some((def_path_hash, span))
+ })
+ .collect();
+ owner_spans.sort_unstable_by_key(|bn| bn.0);
+ owner_spans.hash_stable(&mut hcx, &mut stable_hasher);
+ }
+ tcx.sess.opts.dep_tracking_hash(true).hash_stable(&mut hcx, &mut stable_hasher);
+ tcx.sess.local_stable_crate_id().hash_stable(&mut hcx, &mut stable_hasher);
+ // Hash visibility information since it does not appear in HIR.
+ resolutions.visibilities.hash_stable(&mut hcx, &mut stable_hasher);
+ resolutions.has_pub_restricted.hash_stable(&mut hcx, &mut stable_hasher);
+ stable_hasher.finish()
+ });
+
+ Svh::new(crate_hash.to_smaller_hash())
+}
+
+fn upstream_crates(tcx: TyCtxt<'_>) -> Vec<(StableCrateId, Svh)> {
+ let mut upstream_crates: Vec<_> = tcx
+ .crates(())
+ .iter()
+ .map(|&cnum| {
+ let stable_crate_id = tcx.stable_crate_id(cnum);
+ let hash = tcx.crate_hash(cnum);
+ (stable_crate_id, hash)
+ })
+ .collect();
+ upstream_crates.sort_unstable_by_key(|&(stable_crate_id, _)| stable_crate_id);
+ upstream_crates
+}
+
+fn hir_id_to_string(map: Map<'_>, id: HirId) -> String {
+ let id_str = format!(" (hir_id={})", id);
+
+ let path_str = || {
+ // This functionality is used for debugging, try to use `TyCtxt` to get
+ // the user-friendly path, otherwise fall back to stringifying `DefPath`.
+ crate::ty::tls::with_opt(|tcx| {
+ if let Some(tcx) = tcx {
+ let def_id = map.local_def_id(id);
+ tcx.def_path_str(def_id.to_def_id())
+ } else if let Some(path) = map.def_path_from_hir_id(id) {
+ path.data.into_iter().map(|elem| elem.to_string()).collect::<Vec<_>>().join("::")
+ } else {
+ String::from("<missing path>")
+ }
+ })
+ };
+
+ let span_str = || map.tcx.sess.source_map().span_to_snippet(map.span(id)).unwrap_or_default();
+ let node_str = |prefix| format!("{} {}{}", prefix, span_str(), id_str);
+
+ match map.find(id) {
+ Some(Node::Item(item)) => {
+ let item_str = match item.kind {
+ ItemKind::ExternCrate(..) => "extern crate",
+ ItemKind::Use(..) => "use",
+ ItemKind::Static(..) => "static",
+ ItemKind::Const(..) => "const",
+ ItemKind::Fn(..) => "fn",
+ ItemKind::Macro(..) => "macro",
+ ItemKind::Mod(..) => "mod",
+ ItemKind::ForeignMod { .. } => "foreign mod",
+ ItemKind::GlobalAsm(..) => "global asm",
+ ItemKind::TyAlias(..) => "ty",
+ ItemKind::OpaqueTy(..) => "opaque type",
+ ItemKind::Enum(..) => "enum",
+ ItemKind::Struct(..) => "struct",
+ ItemKind::Union(..) => "union",
+ ItemKind::Trait(..) => "trait",
+ ItemKind::TraitAlias(..) => "trait alias",
+ ItemKind::Impl { .. } => "impl",
+ };
+ format!("{} {}{}", item_str, path_str(), id_str)
+ }
+ Some(Node::ForeignItem(_)) => format!("foreign item {}{}", path_str(), id_str),
+ Some(Node::ImplItem(ii)) => match ii.kind {
+ ImplItemKind::Const(..) => {
+ format!("assoc const {} in {}{}", ii.ident, path_str(), id_str)
+ }
+ ImplItemKind::Fn(..) => format!("method {} in {}{}", ii.ident, path_str(), id_str),
+ ImplItemKind::TyAlias(_) => {
+ format!("assoc type {} in {}{}", ii.ident, path_str(), id_str)
+ }
+ },
+ Some(Node::TraitItem(ti)) => {
+ let kind = match ti.kind {
+ TraitItemKind::Const(..) => "assoc constant",
+ TraitItemKind::Fn(..) => "trait method",
+ TraitItemKind::Type(..) => "assoc type",
+ };
+
+ format!("{} {} in {}{}", kind, ti.ident, path_str(), id_str)
+ }
+ Some(Node::Variant(ref variant)) => {
+ format!("variant {} in {}{}", variant.ident, path_str(), id_str)
+ }
+ Some(Node::Field(ref field)) => {
+ format!("field {} in {}{}", field.ident, path_str(), id_str)
+ }
+ Some(Node::AnonConst(_)) => node_str("const"),
+ Some(Node::Expr(_)) => node_str("expr"),
+ Some(Node::Stmt(_)) => node_str("stmt"),
+ Some(Node::PathSegment(_)) => node_str("path segment"),
+ Some(Node::Ty(_)) => node_str("type"),
+ Some(Node::TypeBinding(_)) => node_str("type binding"),
+ Some(Node::TraitRef(_)) => node_str("trait ref"),
+ Some(Node::Pat(_)) => node_str("pat"),
+ Some(Node::Param(_)) => node_str("param"),
+ Some(Node::Arm(_)) => node_str("arm"),
+ Some(Node::Block(_)) => node_str("block"),
+ Some(Node::Infer(_)) => node_str("infer"),
+ Some(Node::Local(_)) => node_str("local"),
+ Some(Node::Ctor(..)) => format!("ctor {}{}", path_str(), id_str),
+ Some(Node::Lifetime(_)) => node_str("lifetime"),
+ Some(Node::GenericParam(ref param)) => format!("generic_param {:?}{}", param, id_str),
+ Some(Node::Crate(..)) => String::from("root_crate"),
+ None => format!("unknown node{}", id_str),
+ }
+}
+
+pub(super) fn hir_module_items(tcx: TyCtxt<'_>, module_id: LocalDefId) -> ModuleItems {
+ let mut collector = ItemCollector::new(tcx, false);
+
+ let (hir_mod, span, hir_id) = tcx.hir().get_module(module_id);
+ collector.visit_mod(hir_mod, span, hir_id);
+
+ let ItemCollector {
+ submodules,
+ items,
+ trait_items,
+ impl_items,
+ foreign_items,
+ body_owners,
+ ..
+ } = collector;
+ return ModuleItems {
+ submodules: submodules.into_boxed_slice(),
+ items: items.into_boxed_slice(),
+ trait_items: trait_items.into_boxed_slice(),
+ impl_items: impl_items.into_boxed_slice(),
+ foreign_items: foreign_items.into_boxed_slice(),
+ body_owners: body_owners.into_boxed_slice(),
+ };
+}
+
+pub(crate) fn hir_crate_items(tcx: TyCtxt<'_>, _: ()) -> ModuleItems {
+ let mut collector = ItemCollector::new(tcx, true);
+
+ // A "crate collector" and "module collector" start at a
+ // module item (the former starts at the crate root) but only
+ // the former needs to collect it. ItemCollector does not do this for us.
+ collector.submodules.push(CRATE_DEF_ID);
+ tcx.hir().walk_toplevel_module(&mut collector);
+
+ let ItemCollector {
+ submodules,
+ items,
+ trait_items,
+ impl_items,
+ foreign_items,
+ body_owners,
+ ..
+ } = collector;
+
+ return ModuleItems {
+ submodules: submodules.into_boxed_slice(),
+ items: items.into_boxed_slice(),
+ trait_items: trait_items.into_boxed_slice(),
+ impl_items: impl_items.into_boxed_slice(),
+ foreign_items: foreign_items.into_boxed_slice(),
+ body_owners: body_owners.into_boxed_slice(),
+ };
+}
+
+struct ItemCollector<'tcx> {
+ // When true, it collects all items in the create,
+ // otherwise it collects items in some module.
+ crate_collector: bool,
+ tcx: TyCtxt<'tcx>,
+ submodules: Vec<LocalDefId>,
+ items: Vec<ItemId>,
+ trait_items: Vec<TraitItemId>,
+ impl_items: Vec<ImplItemId>,
+ foreign_items: Vec<ForeignItemId>,
+ body_owners: Vec<LocalDefId>,
+}
+
+impl<'tcx> ItemCollector<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, crate_collector: bool) -> ItemCollector<'tcx> {
+ ItemCollector {
+ crate_collector,
+ tcx,
+ submodules: Vec::default(),
+ items: Vec::default(),
+ trait_items: Vec::default(),
+ impl_items: Vec::default(),
+ foreign_items: Vec::default(),
+ body_owners: Vec::default(),
+ }
+ }
+}
+
+impl<'hir> Visitor<'hir> for ItemCollector<'hir> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'hir Item<'hir>) {
+ if associated_body(Node::Item(item)).is_some() {
+ self.body_owners.push(item.def_id);
+ }
+
+ self.items.push(item.item_id());
+
+ // Items that are modules are handled here instead of in visit_mod.
+ if let ItemKind::Mod(module) = &item.kind {
+ self.submodules.push(item.def_id);
+ // A module collector does not recurse inside nested modules.
+ if self.crate_collector {
+ intravisit::walk_mod(self, module, item.hir_id());
+ }
+ } else {
+ intravisit::walk_item(self, item)
+ }
+ }
+
+ fn visit_foreign_item(&mut self, item: &'hir ForeignItem<'hir>) {
+ self.foreign_items.push(item.foreign_item_id());
+ intravisit::walk_foreign_item(self, item)
+ }
+
+ fn visit_anon_const(&mut self, c: &'hir AnonConst) {
+ self.body_owners.push(self.tcx.hir().local_def_id(c.hir_id));
+ intravisit::walk_anon_const(self, c)
+ }
+
+ fn visit_expr(&mut self, ex: &'hir Expr<'hir>) {
+ if matches!(ex.kind, ExprKind::Closure { .. }) {
+ self.body_owners.push(self.tcx.hir().local_def_id(ex.hir_id));
+ }
+ intravisit::walk_expr(self, ex)
+ }
+
+ fn visit_trait_item(&mut self, item: &'hir TraitItem<'hir>) {
+ if associated_body(Node::TraitItem(item)).is_some() {
+ self.body_owners.push(item.def_id);
+ }
+
+ self.trait_items.push(item.trait_item_id());
+ intravisit::walk_trait_item(self, item)
+ }
+
+ fn visit_impl_item(&mut self, item: &'hir ImplItem<'hir>) {
+ if associated_body(Node::ImplItem(item)).is_some() {
+ self.body_owners.push(item.def_id);
+ }
+
+ self.impl_items.push(item.impl_item_id());
+ intravisit::walk_impl_item(self, item)
+ }
+}
diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs
new file mode 100644
index 000000000..211a61471
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/mod.rs
@@ -0,0 +1,182 @@
+//! HIR datatypes. See the [rustc dev guide] for more info.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/hir.html
+
+pub mod map;
+pub mod nested_filter;
+pub mod place;
+
+use crate::ty::query::Providers;
+use crate::ty::{DefIdTree, ImplSubject, TyCtxt};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{par_for_each_in, Send, Sync};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::*;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_span::{ExpnId, DUMMY_SP};
+
+/// Top-level HIR node for current owner. This only contains the node for which
+/// `HirId::local_id == 0`, and excludes bodies.
+///
+/// This struct exists to encapsulate all access to the hir_owner query in this module, and to
+/// implement HashStable without hashing bodies.
+#[derive(Copy, Clone, Debug)]
+pub struct Owner<'tcx> {
+ node: OwnerNode<'tcx>,
+ hash_without_bodies: Fingerprint,
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Owner<'tcx> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let Owner { node: _, hash_without_bodies } = self;
+ hash_without_bodies.hash_stable(hcx, hasher)
+ }
+}
+
+/// Gather the LocalDefId for each item-like within a module, including items contained within
+/// bodies. The Ids are in visitor order. This is used to partition a pass between modules.
+#[derive(Debug, HashStable, Encodable, Decodable)]
+pub struct ModuleItems {
+ submodules: Box<[LocalDefId]>,
+ items: Box<[ItemId]>,
+ trait_items: Box<[TraitItemId]>,
+ impl_items: Box<[ImplItemId]>,
+ foreign_items: Box<[ForeignItemId]>,
+ body_owners: Box<[LocalDefId]>,
+}
+
+impl ModuleItems {
+ pub fn items(&self) -> impl Iterator<Item = ItemId> + '_ {
+ self.items.iter().copied()
+ }
+
+ pub fn trait_items(&self) -> impl Iterator<Item = TraitItemId> + '_ {
+ self.trait_items.iter().copied()
+ }
+
+ pub fn impl_items(&self) -> impl Iterator<Item = ImplItemId> + '_ {
+ self.impl_items.iter().copied()
+ }
+
+ pub fn foreign_items(&self) -> impl Iterator<Item = ForeignItemId> + '_ {
+ self.foreign_items.iter().copied()
+ }
+
+ pub fn definitions(&self) -> impl Iterator<Item = LocalDefId> + '_ {
+ self.items
+ .iter()
+ .map(|id| id.def_id)
+ .chain(self.trait_items.iter().map(|id| id.def_id))
+ .chain(self.impl_items.iter().map(|id| id.def_id))
+ .chain(self.foreign_items.iter().map(|id| id.def_id))
+ }
+
+ pub fn par_items(&self, f: impl Fn(ItemId) + Send + Sync) {
+ par_for_each_in(&self.items[..], |&id| f(id))
+ }
+
+ pub fn par_trait_items(&self, f: impl Fn(TraitItemId) + Send + Sync) {
+ par_for_each_in(&self.trait_items[..], |&id| f(id))
+ }
+
+ pub fn par_impl_items(&self, f: impl Fn(ImplItemId) + Send + Sync) {
+ par_for_each_in(&self.impl_items[..], |&id| f(id))
+ }
+
+ pub fn par_foreign_items(&self, f: impl Fn(ForeignItemId) + Send + Sync) {
+ par_for_each_in(&self.foreign_items[..], |&id| f(id))
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ #[inline(always)]
+ pub fn hir(self) -> map::Map<'tcx> {
+ map::Map { tcx: self }
+ }
+
+ pub fn parent_module(self, id: HirId) -> LocalDefId {
+ self.parent_module_from_def_id(id.owner)
+ }
+
+ pub fn impl_subject(self, def_id: DefId) -> ImplSubject<'tcx> {
+ self.impl_trait_ref(def_id)
+ .map(ImplSubject::Trait)
+ .unwrap_or_else(|| ImplSubject::Inherent(self.type_of(def_id)))
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.parent_module_from_def_id = |tcx, id| {
+ let hir = tcx.hir();
+ hir.get_module_parent_node(hir.local_def_id_to_hir_id(id))
+ };
+ providers.hir_crate_items = map::hir_crate_items;
+ providers.crate_hash = map::crate_hash;
+ providers.hir_module_items = map::hir_module_items;
+ providers.hir_owner = |tcx, id| {
+ let owner = tcx.hir_crate(()).owners.get(id)?.as_owner()?;
+ let node = owner.node();
+ Some(Owner { node, hash_without_bodies: owner.nodes.hash_without_bodies })
+ };
+ providers.local_def_id_to_hir_id = |tcx, id| {
+ let owner = tcx.hir_crate(()).owners[id].map(|_| ());
+ match owner {
+ MaybeOwner::Owner(_) => HirId::make_owner(id),
+ MaybeOwner::Phantom => bug!("No HirId for {:?}", id),
+ MaybeOwner::NonOwner(hir_id) => hir_id,
+ }
+ };
+ providers.hir_owner_nodes = |tcx, id| tcx.hir_crate(()).owners[id].map(|i| &i.nodes);
+ providers.hir_owner_parent = |tcx, id| {
+ // Accessing the local_parent is ok since its value is hashed as part of `id`'s DefPathHash.
+ tcx.opt_local_parent(id).map_or(CRATE_HIR_ID, |parent| {
+ let mut parent_hir_id = tcx.hir().local_def_id_to_hir_id(parent);
+ if let Some(local_id) =
+ tcx.hir_crate(()).owners[parent_hir_id.owner].unwrap().parenting.get(&id)
+ {
+ parent_hir_id.local_id = *local_id;
+ }
+ parent_hir_id
+ })
+ };
+ providers.hir_attrs =
+ |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs);
+ providers.source_span =
+ |tcx, def_id| tcx.resolutions(()).source_span.get(def_id).copied().unwrap_or(DUMMY_SP);
+ providers.def_span = |tcx, def_id| {
+ let def_id = def_id.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ tcx.hir().opt_span(hir_id).unwrap_or(DUMMY_SP)
+ };
+ providers.def_ident_span = |tcx, def_id| {
+ let def_id = def_id.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ tcx.hir().opt_ident_span(hir_id)
+ };
+ providers.fn_arg_names = |tcx, id| {
+ let hir = tcx.hir();
+ let def_id = id.expect_local();
+ let hir_id = hir.local_def_id_to_hir_id(def_id);
+ if let Some(body_id) = hir.maybe_body_owned_by(def_id) {
+ tcx.arena.alloc_from_iter(hir.body_param_names(body_id))
+ } else if let Node::TraitItem(&TraitItem {
+ kind: TraitItemKind::Fn(_, TraitFn::Required(idents)),
+ ..
+ }) = hir.get(hir_id)
+ {
+ tcx.arena.alloc_slice(idents)
+ } else {
+ span_bug!(hir.span(hir_id), "fn_arg_names: unexpected item {:?}", id);
+ }
+ };
+ providers.opt_def_kind = |tcx, def_id| tcx.hir().opt_def_kind(def_id.expect_local());
+ providers.all_local_trait_impls = |tcx, ()| &tcx.resolutions(()).trait_impls;
+ providers.expn_that_defined = |tcx, id| {
+ let id = id.expect_local();
+ tcx.resolutions(()).expn_that_defined.get(&id).copied().unwrap_or(ExpnId::root())
+ };
+ providers.in_scope_traits_map =
+ |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map(|owner_info| &owner_info.trait_map);
+}
diff --git a/compiler/rustc_middle/src/hir/nested_filter.rs b/compiler/rustc_middle/src/hir/nested_filter.rs
new file mode 100644
index 000000000..6896837aa
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/nested_filter.rs
@@ -0,0 +1,31 @@
+use rustc_hir::intravisit::nested_filter::NestedFilter;
+
+/// Do not visit nested item-like things, but visit nested things
+/// that are inside of an item-like.
+///
+/// Notably, possible occurrences of bodies in non-item-like things
+/// include: closures/generators, inline `const {}` blocks, and
+/// constant arguments of types, e.g. in `let _: [(); /* HERE */];`.
+///
+/// **This is the most common choice.** A very common pattern is
+/// to use `visit_all_item_likes_in_crate()` as an outer loop,
+/// and to have the visitor that visits the contents of each item
+/// using this setting.
+pub struct OnlyBodies(());
+impl<'hir> NestedFilter<'hir> for OnlyBodies {
+ type Map = crate::hir::map::Map<'hir>;
+ const INTER: bool = false;
+ const INTRA: bool = true;
+}
+
+/// Visits all nested things, including item-likes.
+///
+/// **This is an unusual choice.** It is used when you want to
+/// process everything within their lexical context. Typically you
+/// kick off the visit by doing `walk_krate()`.
+pub struct All(());
+impl<'hir> NestedFilter<'hir> for All {
+ type Map = crate::hir::map::Map<'hir>;
+ const INTER: bool = true;
+ const INTRA: bool = true;
+}
diff --git a/compiler/rustc_middle/src/hir/place.rs b/compiler/rustc_middle/src/hir/place.rs
new file mode 100644
index 000000000..83d3b0100
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/place.rs
@@ -0,0 +1,117 @@
+use crate::ty;
+use crate::ty::Ty;
+
+use rustc_hir::HirId;
+use rustc_target::abi::VariantIdx;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum PlaceBase {
+ /// A temporary variable.
+ Rvalue,
+ /// A named `static` item.
+ StaticItem,
+ /// A named local variable.
+ Local(HirId),
+ /// An upvar referenced by closure env.
+ Upvar(ty::UpvarId),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum ProjectionKind {
+ /// A dereference of a pointer, reference or `Box<T>` of the given type.
+ Deref,
+
+ /// `B.F` where `B` is the base expression and `F` is
+ /// the field. The field is identified by which variant
+ /// it appears in along with a field index. The variant
+ /// is used for enums.
+ Field(u32, VariantIdx),
+
+ /// Some index like `B[x]`, where `B` is the base
+ /// expression. We don't preserve the index `x` because
+ /// we won't need it.
+ Index,
+
+ /// A subslice covering a range of values like `B[x..y]`.
+ Subslice,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct Projection<'tcx> {
+ /// Type after the projection is applied.
+ pub ty: Ty<'tcx>,
+
+ /// Defines the kind of access made by the projection.
+ pub kind: ProjectionKind,
+}
+
+/// A `Place` represents how a value is located in memory.
+///
+/// This is an HIR version of [`rustc_middle::mir::Place`].
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct Place<'tcx> {
+ /// The type of the `PlaceBase`
+ pub base_ty: Ty<'tcx>,
+ /// The "outermost" place that holds this value.
+ pub base: PlaceBase,
+ /// How this place is derived from the base place.
+ pub projections: Vec<Projection<'tcx>>,
+}
+
+/// A `PlaceWithHirId` represents how a value is located in memory.
+///
+/// This is an HIR version of [`rustc_middle::mir::Place`].
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct PlaceWithHirId<'tcx> {
+ /// `HirId` of the expression or pattern producing this value.
+ pub hir_id: HirId,
+
+ /// Information about the `Place`.
+ pub place: Place<'tcx>,
+}
+
+impl<'tcx> PlaceWithHirId<'tcx> {
+ pub fn new(
+ hir_id: HirId,
+ base_ty: Ty<'tcx>,
+ base: PlaceBase,
+ projections: Vec<Projection<'tcx>>,
+ ) -> PlaceWithHirId<'tcx> {
+ PlaceWithHirId { hir_id, place: Place { base_ty, base, projections } }
+ }
+}
+
+impl<'tcx> Place<'tcx> {
+ /// Returns an iterator of the types that have to be dereferenced to access
+ /// the `Place`.
+ ///
+ /// The types are in the reverse order that they are applied. So if
+ /// `x: &*const u32` and the `Place` is `**x`, then the types returned are
+ ///`*const u32` then `&*const u32`.
+ pub fn deref_tys(&self) -> impl Iterator<Item = Ty<'tcx>> + '_ {
+ self.projections.iter().enumerate().rev().filter_map(move |(index, proj)| {
+ if ProjectionKind::Deref == proj.kind {
+ Some(self.ty_before_projection(index))
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Returns the type of this `Place` after all projections have been applied.
+ pub fn ty(&self) -> Ty<'tcx> {
+ self.projections.last().map_or(self.base_ty, |proj| proj.ty)
+ }
+
+ /// Returns the type of this `Place` immediately before `projection_index`th projection
+ /// is applied.
+ pub fn ty_before_projection(&self, projection_index: usize) -> Ty<'tcx> {
+ assert!(projection_index < self.projections.len());
+ if projection_index == 0 { self.base_ty } else { self.projections[projection_index - 1].ty }
+ }
+}
diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs
new file mode 100644
index 000000000..200de9079
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/canonical.rs
@@ -0,0 +1,363 @@
+//! **Canonicalization** is the key to constructing a query in the
+//! middle of type inference. Ordinarily, it is not possible to store
+//! types from type inference in query keys, because they contain
+//! references to inference variables whose lifetimes are too short
+//! and so forth. Canonicalizing a value T1 using `canonicalize_query`
+//! produces two things:
+//!
+//! - a value T2 where each unbound inference variable has been
+//! replaced with a **canonical variable**;
+//! - a map M (of type `CanonicalVarValues`) from those canonical
+//! variables back to the original.
+//!
+//! We can then do queries using T2. These will give back constraints
+//! on the canonical variables which can be translated, using the map
+//! M, into constraints in our source context. This process of
+//! translating the results back is done by the
+//! `instantiate_query_result` method.
+//!
+//! For a more detailed look at what is happening here, check
+//! out the [chapter in the rustc dev guide][c].
+//!
+//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
+
+use crate::infer::MemberConstraint;
+use crate::ty::subst::GenericArg;
+use crate::ty::{self, BoundVar, List, Region, Ty, TyCtxt};
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable;
+use smallvec::SmallVec;
+use std::iter;
+use std::ops::Index;
+
+/// A "canonicalized" type `V` is one where all free inference
+/// variables have been rewritten to "canonical vars". These are
+/// numbered starting from 0 in order of first appearance.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct Canonical<'tcx, V> {
+ pub max_universe: ty::UniverseIndex,
+ pub variables: CanonicalVarInfos<'tcx>,
+ pub value: V,
+}
+
+pub type CanonicalVarInfos<'tcx> = &'tcx List<CanonicalVarInfo<'tcx>>;
+
+/// A set of values corresponding to the canonical variables from some
+/// `Canonical`. You can give these values to
+/// `canonical_value.substitute` to substitute them into the canonical
+/// value at the right places.
+///
+/// When you canonicalize a value `V`, you get back one of these
+/// vectors with the original values that were replaced by canonical
+/// variables. You will need to supply it later to instantiate the
+/// canonicalized query response.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct CanonicalVarValues<'tcx> {
+ pub var_values: IndexVec<BoundVar, GenericArg<'tcx>>,
+}
+
+/// When we canonicalize a value to form a query, we wind up replacing
+/// various parts of it with canonical variables. This struct stores
+/// those replaced bits to remember for when we process the query
+/// result.
+#[derive(Clone, Debug)]
+pub struct OriginalQueryValues<'tcx> {
+ /// Map from the universes that appear in the query to the universes in the
+ /// caller context. For all queries except `evaluate_goal` (used by Chalk),
+ /// we only ever put ROOT values into the query, so this map is very
+ /// simple.
+ pub universe_map: SmallVec<[ty::UniverseIndex; 4]>,
+
+ /// This is equivalent to `CanonicalVarValues`, but using a
+ /// `SmallVec` yields a significant performance win.
+ pub var_values: SmallVec<[GenericArg<'tcx>; 8]>,
+}
+
+impl<'tcx> Default for OriginalQueryValues<'tcx> {
+ fn default() -> Self {
+ let mut universe_map = SmallVec::default();
+ universe_map.push(ty::UniverseIndex::ROOT);
+
+ Self { universe_map, var_values: SmallVec::default() }
+ }
+}
+
+/// Information about a canonical variable that is included with the
+/// canonical value. This is sufficient information for code to create
+/// a copy of the canonical value in some other inference context,
+/// with fresh inference variables replacing the canonical values.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub struct CanonicalVarInfo<'tcx> {
+ pub kind: CanonicalVarKind<'tcx>,
+}
+
+impl<'tcx> CanonicalVarInfo<'tcx> {
+ pub fn universe(&self) -> ty::UniverseIndex {
+ self.kind.universe()
+ }
+
+ pub fn is_existential(&self) -> bool {
+ match self.kind {
+ CanonicalVarKind::Ty(_) => true,
+ CanonicalVarKind::PlaceholderTy(_) => false,
+ CanonicalVarKind::Region(_) => true,
+ CanonicalVarKind::PlaceholderRegion(..) => false,
+ CanonicalVarKind::Const(..) => true,
+ CanonicalVarKind::PlaceholderConst(_, _) => false,
+ }
+ }
+}
+
+/// Describes the "kind" of the canonical variable. This is a "kind"
+/// in the type-theory sense of the term -- i.e., a "meta" type system
+/// that analyzes type-like values.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum CanonicalVarKind<'tcx> {
+ /// Some kind of type inference variable.
+ Ty(CanonicalTyVarKind),
+
+ /// A "placeholder" that represents "any type".
+ PlaceholderTy(ty::PlaceholderType),
+
+ /// Region variable `'?R`.
+ Region(ty::UniverseIndex),
+
+ /// A "placeholder" that represents "any region". Created when you
+ /// are solving a goal like `for<'a> T: Foo<'a>` to represent the
+ /// bound region `'a`.
+ PlaceholderRegion(ty::PlaceholderRegion),
+
+ /// Some kind of const inference variable.
+ Const(ty::UniverseIndex, Ty<'tcx>),
+
+ /// A "placeholder" that represents "any const".
+ PlaceholderConst(ty::PlaceholderConst<'tcx>, Ty<'tcx>),
+}
+
+impl<'tcx> CanonicalVarKind<'tcx> {
+ pub fn universe(self) -> ty::UniverseIndex {
+ match self {
+ CanonicalVarKind::Ty(kind) => match kind {
+ CanonicalTyVarKind::General(ui) => ui,
+ CanonicalTyVarKind::Float | CanonicalTyVarKind::Int => ty::UniverseIndex::ROOT,
+ },
+
+ CanonicalVarKind::PlaceholderTy(placeholder) => placeholder.universe,
+ CanonicalVarKind::Region(ui) => ui,
+ CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.universe,
+ CanonicalVarKind::Const(ui, _) => ui,
+ CanonicalVarKind::PlaceholderConst(placeholder, _) => placeholder.universe,
+ }
+ }
+}
+
+/// Rust actually has more than one category of type variables;
+/// notably, the type variables we create for literals (e.g., 22 or
+/// 22.) can only be instantiated with integral/float types (e.g.,
+/// usize or f32). In order to faithfully reproduce a type, we need to
+/// know what set of types a given type variable can be unified with.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum CanonicalTyVarKind {
+ /// General type variable `?T` that can be unified with arbitrary types.
+ General(ty::UniverseIndex),
+
+ /// Integral type variable `?I` (that can only be unified with integral types).
+ Int,
+
+ /// Floating-point type variable `?F` (that can only be unified with float types).
+ Float,
+}
+
+/// After we execute a query with a canonicalized key, we get back a
+/// `Canonical<QueryResponse<..>>`. You can use
+/// `instantiate_query_result` to access the data in this result.
+#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct QueryResponse<'tcx, R> {
+ pub var_values: CanonicalVarValues<'tcx>,
+ pub region_constraints: QueryRegionConstraints<'tcx>,
+ pub certainty: Certainty,
+ /// List of opaque types which we tried to compare to another type.
+ /// Inside the query we don't know yet whether the opaque type actually
+ /// should get its hidden type inferred. So we bubble the opaque type
+ /// and the type it was compared against upwards and let the query caller
+ /// handle it.
+ pub opaque_types: Vec<(Ty<'tcx>, Ty<'tcx>)>,
+ pub value: R,
+}
+
+#[derive(Clone, Debug, Default, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct QueryRegionConstraints<'tcx> {
+ pub outlives: Vec<QueryOutlivesConstraint<'tcx>>,
+ pub member_constraints: Vec<MemberConstraint<'tcx>>,
+}
+
+impl QueryRegionConstraints<'_> {
+ /// Represents an empty (trivially true) set of region
+ /// constraints.
+ pub fn is_empty(&self) -> bool {
+ self.outlives.is_empty() && self.member_constraints.is_empty()
+ }
+}
+
+pub type Canonicalized<'tcx, V> = Canonical<'tcx, V>;
+
+pub type CanonicalizedQueryResponse<'tcx, T> = &'tcx Canonical<'tcx, QueryResponse<'tcx, T>>;
+
+/// Indicates whether or not we were able to prove the query to be
+/// true.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum Certainty {
+ /// The query is known to be true, presuming that you apply the
+ /// given `var_values` and the region-constraints are satisfied.
+ Proven,
+
+ /// The query is not known to be true, but also not known to be
+ /// false. The `var_values` represent *either* values that must
+ /// hold in order for the query to be true, or helpful tips that
+ /// *might* make it true. Currently rustc's trait solver cannot
+ /// distinguish the two (e.g., due to our preference for where
+ /// clauses over impls).
+ ///
+ /// After some unification and things have been done, it makes
+ /// sense to try and prove again -- of course, at that point, the
+ /// canonical form will be different, making this a distinct
+ /// query.
+ Ambiguous,
+}
+
+impl Certainty {
+ pub fn is_proven(&self) -> bool {
+ match self {
+ Certainty::Proven => true,
+ Certainty::Ambiguous => false,
+ }
+ }
+}
+
+impl<'tcx, R> QueryResponse<'tcx, R> {
+ pub fn is_proven(&self) -> bool {
+ self.certainty.is_proven()
+ }
+}
+
+impl<'tcx, R> Canonical<'tcx, QueryResponse<'tcx, R>> {
+ pub fn is_proven(&self) -> bool {
+ self.value.is_proven()
+ }
+
+ pub fn is_ambiguous(&self) -> bool {
+ !self.is_proven()
+ }
+}
+
+impl<'tcx, R> Canonical<'tcx, ty::ParamEnvAnd<'tcx, R>> {
+ #[inline]
+ pub fn without_const(mut self) -> Self {
+ self.value = self.value.without_const();
+ self
+ }
+}
+
+impl<'tcx, V> Canonical<'tcx, V> {
+ /// Allows you to map the `value` of a canonical while keeping the
+ /// same set of bound variables.
+ ///
+ /// **WARNING:** This function is very easy to mis-use, hence the
+ /// name! In particular, the new value `W` must use all **the
+ /// same type/region variables** in **precisely the same order**
+ /// as the original! (The ordering is defined by the
+ /// `TypeFoldable` implementation of the type in question.)
+ ///
+ /// An example of a **correct** use of this:
+ ///
+ /// ```rust,ignore (not real code)
+ /// let a: Canonical<'_, T> = ...;
+ /// let b: Canonical<'_, (T,)> = a.unchecked_map(|v| (v, ));
+ /// ```
+ ///
+ /// An example of an **incorrect** use of this:
+ ///
+ /// ```rust,ignore (not real code)
+ /// let a: Canonical<'tcx, T> = ...;
+ /// let ty: Ty<'tcx> = ...;
+ /// let b: Canonical<'tcx, (T, Ty<'tcx>)> = a.unchecked_map(|v| (v, ty));
+ /// ```
+ pub fn unchecked_map<W>(self, map_op: impl FnOnce(V) -> W) -> Canonical<'tcx, W> {
+ let Canonical { max_universe, variables, value } = self;
+ Canonical { max_universe, variables, value: map_op(value) }
+ }
+}
+
+pub type QueryOutlivesConstraint<'tcx> =
+ ty::Binder<'tcx, ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>>;
+
+TrivialTypeTraversalAndLiftImpls! {
+ for <'tcx> {
+ crate::infer::canonical::Certainty,
+ crate::infer::canonical::CanonicalVarInfo<'tcx>,
+ crate::infer::canonical::CanonicalVarKind<'tcx>,
+ }
+}
+
+TrivialTypeTraversalImpls! {
+ for <'tcx> {
+ crate::infer::canonical::CanonicalVarInfos<'tcx>,
+ }
+}
+
+impl<'tcx> CanonicalVarValues<'tcx> {
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.var_values.len()
+ }
+
+ /// Makes an identity substitution from this one: each bound var
+ /// is matched to the same bound var, preserving the original kinds.
+ /// For example, if we have:
+ /// `self.var_values == [Type(u32), Lifetime('a), Type(u64)]`
+ /// we'll return a substitution `subst` with:
+ /// `subst.var_values == [Type(^0), Lifetime(^1), Type(^2)]`.
+ pub fn make_identity(&self, tcx: TyCtxt<'tcx>) -> Self {
+ use crate::ty::subst::GenericArgKind;
+
+ CanonicalVarValues {
+ var_values: iter::zip(&self.var_values, 0..)
+ .map(|(kind, i)| match kind.unpack() {
+ GenericArgKind::Type(..) => {
+ tcx.mk_ty(ty::Bound(ty::INNERMOST, ty::BoundVar::from_u32(i).into())).into()
+ }
+ GenericArgKind::Lifetime(..) => {
+ let br =
+ ty::BoundRegion { var: ty::BoundVar::from_u32(i), kind: ty::BrAnon(i) };
+ tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)).into()
+ }
+ GenericArgKind::Const(ct) => tcx
+ .mk_const(ty::ConstS {
+ ty: ct.ty(),
+ kind: ty::ConstKind::Bound(ty::INNERMOST, ty::BoundVar::from_u32(i)),
+ })
+ .into(),
+ })
+ .collect(),
+ }
+ }
+}
+
+impl<'a, 'tcx> IntoIterator for &'a CanonicalVarValues<'tcx> {
+ type Item = GenericArg<'tcx>;
+ type IntoIter = ::std::iter::Cloned<::std::slice::Iter<'a, GenericArg<'tcx>>>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.var_values.iter().cloned()
+ }
+}
+
+impl<'tcx> Index<BoundVar> for CanonicalVarValues<'tcx> {
+ type Output = GenericArg<'tcx>;
+
+ fn index(&self, value: BoundVar) -> &GenericArg<'tcx> {
+ &self.var_values[value]
+ }
+}
diff --git a/compiler/rustc_middle/src/infer/mod.rs b/compiler/rustc_middle/src/infer/mod.rs
new file mode 100644
index 000000000..38868c210
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/mod.rs
@@ -0,0 +1,32 @@
+pub mod canonical;
+pub mod unify_key;
+
+use crate::ty::Region;
+use crate::ty::{OpaqueTypeKey, Ty};
+use rustc_data_structures::sync::Lrc;
+use rustc_span::Span;
+
+/// Requires that `region` must be equal to one of the regions in `choice_regions`.
+/// We often denote this using the syntax:
+///
+/// ```text
+/// R0 member of [O1..On]
+/// ```
+#[derive(Debug, Clone, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct MemberConstraint<'tcx> {
+ /// The `DefId` and substs of the opaque type causing this constraint.
+ /// Used for error reporting.
+ pub key: OpaqueTypeKey<'tcx>,
+
+ /// The span where the hidden type was instantiated.
+ pub definition_span: Span,
+
+ /// The hidden type in which `member_region` appears: used for error reporting.
+ pub hidden_ty: Ty<'tcx>,
+
+ /// The region `R0`.
+ pub member_region: Region<'tcx>,
+
+ /// The options `O1..On`.
+ pub choice_regions: Lrc<Vec<Region<'tcx>>>,
+}
diff --git a/compiler/rustc_middle/src/infer/unify_key.rs b/compiler/rustc_middle/src/infer/unify_key.rs
new file mode 100644
index 000000000..f2627885d
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/unify_key.rs
@@ -0,0 +1,162 @@
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_data_structures::unify::{NoError, UnifyKey, UnifyValue};
+use rustc_span::def_id::DefId;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+use std::cmp;
+use std::marker::PhantomData;
+
+pub trait ToType {
+ fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+}
+
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct UnifiedRegion<'tcx>(pub Option<ty::Region<'tcx>>);
+
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct RegionVidKey<'tcx> {
+ pub vid: ty::RegionVid,
+ pub phantom: PhantomData<UnifiedRegion<'tcx>>,
+}
+
+impl<'tcx> From<ty::RegionVid> for RegionVidKey<'tcx> {
+ fn from(vid: ty::RegionVid) -> Self {
+ RegionVidKey { vid, phantom: PhantomData }
+ }
+}
+
+impl<'tcx> UnifyKey for RegionVidKey<'tcx> {
+ type Value = UnifiedRegion<'tcx>;
+ #[inline]
+ fn index(&self) -> u32 {
+ self.vid.as_u32()
+ }
+ #[inline]
+ fn from_index(i: u32) -> Self {
+ RegionVidKey::from(ty::RegionVid::from_u32(i))
+ }
+ fn tag() -> &'static str {
+ "RegionVidKey"
+ }
+}
+
+impl<'tcx> UnifyValue for UnifiedRegion<'tcx> {
+ type Error = NoError;
+
+ fn unify_values(value1: &Self, value2: &Self) -> Result<Self, NoError> {
+ Ok(match (value1.0, value2.0) {
+ // Here we can just pick one value, because the full constraints graph
+ // will be handled later. Ideally, we might want a `MultipleValues`
+ // variant or something. For now though, this is fine.
+ (Some(_), Some(_)) => *value1,
+
+ (Some(_), _) => *value1,
+ (_, Some(_)) => *value2,
+
+ (None, None) => *value1,
+ })
+ }
+}
+
+impl ToType for ty::IntVarValue {
+ fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ ty::IntType(i) => tcx.mk_mach_int(i),
+ ty::UintType(i) => tcx.mk_mach_uint(i),
+ }
+ }
+}
+
+impl ToType for ty::FloatVarValue {
+ fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ tcx.mk_mach_float(self.0)
+ }
+}
+
+// Generic consts.
+
+#[derive(Copy, Clone, Debug)]
+pub struct ConstVariableOrigin {
+ pub kind: ConstVariableOriginKind,
+ pub span: Span,
+}
+
+/// Reasons to create a const inference variable
+#[derive(Copy, Clone, Debug)]
+pub enum ConstVariableOriginKind {
+ MiscVariable,
+ ConstInference,
+ ConstParameterDefinition(Symbol, DefId),
+ SubstitutionPlaceholder,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum ConstVariableValue<'tcx> {
+ Known { value: ty::Const<'tcx> },
+ Unknown { universe: ty::UniverseIndex },
+}
+
+impl<'tcx> ConstVariableValue<'tcx> {
+ /// If this value is known, returns the const it is known to be.
+ /// Otherwise, `None`.
+ pub fn known(&self) -> Option<ty::Const<'tcx>> {
+ match *self {
+ ConstVariableValue::Unknown { .. } => None,
+ ConstVariableValue::Known { value } => Some(value),
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct ConstVarValue<'tcx> {
+ pub origin: ConstVariableOrigin,
+ pub val: ConstVariableValue<'tcx>,
+}
+
+impl<'tcx> UnifyKey for ty::ConstVid<'tcx> {
+ type Value = ConstVarValue<'tcx>;
+ #[inline]
+ fn index(&self) -> u32 {
+ self.index
+ }
+ #[inline]
+ fn from_index(i: u32) -> Self {
+ ty::ConstVid { index: i, phantom: PhantomData }
+ }
+ fn tag() -> &'static str {
+ "ConstVid"
+ }
+}
+
+impl<'tcx> UnifyValue for ConstVarValue<'tcx> {
+ type Error = (ty::Const<'tcx>, ty::Const<'tcx>);
+
+ fn unify_values(&value1: &Self, &value2: &Self) -> Result<Self, Self::Error> {
+ Ok(match (value1.val, value2.val) {
+ (ConstVariableValue::Known { .. }, ConstVariableValue::Known { .. }) => {
+ bug!("equating two const variables, both of which have known values")
+ }
+
+ // If one side is known, prefer that one.
+ (ConstVariableValue::Known { .. }, ConstVariableValue::Unknown { .. }) => value1,
+ (ConstVariableValue::Unknown { .. }, ConstVariableValue::Known { .. }) => value2,
+
+ // If both sides are *unknown*, it hardly matters, does it?
+ (
+ ConstVariableValue::Unknown { universe: universe1 },
+ ConstVariableValue::Unknown { universe: universe2 },
+ ) => {
+ // If we unify two unbound variables, ?T and ?U, then whatever
+ // value they wind up taking (which must be the same value) must
+ // be nameable by both universes. Therefore, the resulting
+ // universe is the minimum of the two universes, because that is
+ // the one which contains the fewest names in scope.
+ let universe = cmp::min(universe1, universe2);
+ ConstVarValue {
+ val: ConstVariableValue::Unknown { universe },
+ origin: value1.origin,
+ }
+ }
+ })
+ }
+}
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
new file mode 100644
index 000000000..ef06c457b
--- /dev/null
+++ b/compiler/rustc_middle/src/lib.rs
@@ -0,0 +1,106 @@
+//! The "main crate" of the Rust compiler. This crate contains common
+//! type definitions that are used by the other crates in the rustc
+//! "family". Some prominent examples (note that each of these modules
+//! has their own README with further details).
+//!
+//! - **HIR.** The "high-level (H) intermediate representation (IR)" is
+//! defined in the `hir` module.
+//! - **MIR.** The "mid-level (M) intermediate representation (IR)" is
+//! defined in the `mir` module. This module contains only the
+//! *definition* of the MIR; the passes that transform and operate
+//! on MIR are found in `rustc_const_eval` crate.
+//! - **Types.** The internal representation of types used in rustc is
+//! defined in the `ty` module. This includes the **type context**
+//! (or `tcx`), which is the central context during most of
+//! compilation, containing the interners and other things.
+//!
+//! For more information about how rustc works, see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(allocator_api)]
+#![feature(array_windows)]
+#![feature(assert_matches)]
+#![feature(backtrace)]
+#![feature(box_patterns)]
+#![feature(core_intrinsics)]
+#![feature(discriminant_kind)]
+#![feature(exhaustive_patterns)]
+#![feature(get_mut_unchecked)]
+#![feature(generic_associated_types)]
+#![feature(if_let_guard)]
+#![feature(map_first_last)]
+#![feature(negative_impls)]
+#![feature(never_type)]
+#![feature(extern_types)]
+#![feature(new_uninit)]
+#![feature(once_cell)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(trusted_len)]
+#![feature(type_alias_impl_trait)]
+#![feature(associated_type_bounds)]
+#![feature(rustc_attrs)]
+#![feature(half_open_range_patterns)]
+#![feature(control_flow_enum)]
+#![feature(associated_type_defaults)]
+#![feature(trusted_step)]
+#![feature(try_blocks)]
+#![feature(try_reserve_kind)]
+#![feature(nonzero_ops)]
+#![feature(unwrap_infallible)]
+#![feature(decl_macro)]
+#![feature(drain_filter)]
+#![feature(intra_doc_pointers)]
+#![feature(yeet_expr)]
+#![feature(const_option)]
+#![recursion_limit = "512"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate bitflags;
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate rustc_data_structures;
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate smallvec;
+
+#[cfg(test)]
+mod tests;
+
+#[macro_use]
+mod macros;
+
+#[macro_use]
+pub mod query;
+
+#[macro_use]
+pub mod arena;
+#[macro_use]
+pub mod dep_graph;
+pub mod hir;
+pub mod infer;
+pub mod lint;
+pub mod metadata;
+pub mod middle;
+pub mod mir;
+pub mod thir;
+pub mod traits;
+pub mod ty;
+
+pub mod util {
+ pub mod bug;
+ pub mod common;
+}
+
+// Allows macros to refer to this crate as `::rustc_middle`
+extern crate self as rustc_middle;
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
new file mode 100644
index 000000000..2f45222de
--- /dev/null
+++ b/compiler/rustc_middle/src/lint.rs
@@ -0,0 +1,443 @@
+use std::cmp;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::{Diagnostic, DiagnosticId, LintDiagnosticBuilder, MultiSpan};
+use rustc_hir::HirId;
+use rustc_index::vec::IndexVec;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::lint::{
+ builtin::{self, FORBIDDEN_LINT_GROUPS},
+ FutureIncompatibilityReason, Level, Lint, LintExpectationId, LintId,
+};
+use rustc_session::Session;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::source_map::{DesugaringKind, ExpnKind};
+use rustc_span::{symbol, Span, Symbol, DUMMY_SP};
+
+/// How a lint level was set.
+#[derive(Clone, Copy, PartialEq, Eq, HashStable, Debug)]
+pub enum LintLevelSource {
+ /// Lint is at the default level as declared
+ /// in rustc or a plugin.
+ Default,
+
+ /// Lint level was set by an attribute.
+ Node(Symbol, Span, Option<Symbol> /* RFC 2383 reason */),
+
+ /// Lint level was set by a command-line flag.
+ /// The provided `Level` is the level specified on the command line.
+ /// (The actual level may be lower due to `--cap-lints`.)
+ CommandLine(Symbol, Level),
+}
+
+impl LintLevelSource {
+ pub fn name(&self) -> Symbol {
+ match *self {
+ LintLevelSource::Default => symbol::kw::Default,
+ LintLevelSource::Node(name, _, _) => name,
+ LintLevelSource::CommandLine(name, _) => name,
+ }
+ }
+
+ pub fn span(&self) -> Span {
+ match *self {
+ LintLevelSource::Default => DUMMY_SP,
+ LintLevelSource::Node(_, span, _) => span,
+ LintLevelSource::CommandLine(_, _) => DUMMY_SP,
+ }
+ }
+}
+
+/// A tuple of a lint level and its source.
+pub type LevelAndSource = (Level, LintLevelSource);
+
+#[derive(Debug, HashStable)]
+pub struct LintLevelSets {
+ pub list: IndexVec<LintStackIndex, LintSet>,
+ pub lint_cap: Level,
+}
+
+rustc_index::newtype_index! {
+ #[derive(HashStable)]
+ pub struct LintStackIndex {
+ const COMMAND_LINE = 0,
+ }
+}
+
+#[derive(Debug, HashStable)]
+pub struct LintSet {
+ // -A,-W,-D flags, a `Symbol` for the flag itself and `Level` for which
+ // flag.
+ pub specs: FxHashMap<LintId, LevelAndSource>,
+
+ pub parent: LintStackIndex,
+}
+
+impl LintLevelSets {
+ pub fn new() -> Self {
+ LintLevelSets { list: IndexVec::new(), lint_cap: Level::Forbid }
+ }
+
+ pub fn get_lint_level(
+ &self,
+ lint: &'static Lint,
+ idx: LintStackIndex,
+ aux: Option<&FxHashMap<LintId, LevelAndSource>>,
+ sess: &Session,
+ ) -> LevelAndSource {
+ let (level, mut src) = self.get_lint_id_level(LintId::of(lint), idx, aux);
+
+ // If `level` is none then we actually assume the default level for this
+ // lint.
+ let mut level = level.unwrap_or_else(|| lint.default_level(sess.edition()));
+
+ // If we're about to issue a warning, check at the last minute for any
+ // directives against the warnings "lint". If, for example, there's an
+ // `allow(warnings)` in scope then we want to respect that instead.
+ //
+ // We exempt `FORBIDDEN_LINT_GROUPS` from this because it specifically
+ // triggers in cases (like #80988) where you have `forbid(warnings)`,
+ // and so if we turned that into an error, it'd defeat the purpose of the
+ // future compatibility warning.
+ if level == Level::Warn && LintId::of(lint) != LintId::of(FORBIDDEN_LINT_GROUPS) {
+ let (warnings_level, warnings_src) =
+ self.get_lint_id_level(LintId::of(builtin::WARNINGS), idx, aux);
+ if let Some(configured_warning_level) = warnings_level {
+ if configured_warning_level != Level::Warn {
+ level = configured_warning_level;
+ src = warnings_src;
+ }
+ }
+ }
+
+ // Ensure that we never exceed the `--cap-lints` argument
+ // unless the source is a --force-warn
+ level = if let LintLevelSource::CommandLine(_, Level::ForceWarn(_)) = src {
+ level
+ } else {
+ cmp::min(level, self.lint_cap)
+ };
+
+ if let Some(driver_level) = sess.driver_lint_caps.get(&LintId::of(lint)) {
+ // Ensure that we never exceed driver level.
+ level = cmp::min(*driver_level, level);
+ }
+
+ (level, src)
+ }
+
+ pub fn get_lint_id_level(
+ &self,
+ id: LintId,
+ mut idx: LintStackIndex,
+ aux: Option<&FxHashMap<LintId, LevelAndSource>>,
+ ) -> (Option<Level>, LintLevelSource) {
+ if let Some(specs) = aux {
+ if let Some(&(level, src)) = specs.get(&id) {
+ return (Some(level), src);
+ }
+ }
+ loop {
+ let LintSet { ref specs, parent } = self.list[idx];
+ if let Some(&(level, src)) = specs.get(&id) {
+ return (Some(level), src);
+ }
+ if idx == COMMAND_LINE {
+ return (None, LintLevelSource::Default);
+ }
+ idx = parent;
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct LintLevelMap {
+ /// This is a collection of lint expectations as described in RFC 2383, that
+ /// can be fulfilled during this compilation session. This means that at least
+ /// one expected lint is currently registered in the lint store.
+ ///
+ /// The [`LintExpectationId`] is stored as a part of the [`Expect`](Level::Expect)
+ /// lint level.
+ pub lint_expectations: Vec<(LintExpectationId, LintExpectation)>,
+ pub sets: LintLevelSets,
+ pub id_to_set: FxHashMap<HirId, LintStackIndex>,
+}
+
+impl LintLevelMap {
+ /// If the `id` was previously registered with `register_id` when building
+ /// this `LintLevelMap` this returns the corresponding lint level and source
+ /// of the lint level for the lint provided.
+ ///
+ /// If the `id` was not previously registered, returns `None`. If `None` is
+ /// returned then the parent of `id` should be acquired and this function
+ /// should be called again.
+ pub fn level_and_source(
+ &self,
+ lint: &'static Lint,
+ id: HirId,
+ session: &Session,
+ ) -> Option<LevelAndSource> {
+ self.id_to_set.get(&id).map(|idx| self.sets.get_lint_level(lint, *idx, None, session))
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for LintLevelMap {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let LintLevelMap { ref sets, ref id_to_set, ref lint_expectations } = *self;
+
+ id_to_set.hash_stable(hcx, hasher);
+ lint_expectations.hash_stable(hcx, hasher);
+
+ hcx.while_hashing_spans(true, |hcx| sets.hash_stable(hcx, hasher))
+ }
+}
+
+/// This struct represents a lint expectation and holds all required information
+/// to emit the `unfulfilled_lint_expectations` lint if it is unfulfilled after
+/// the `LateLintPass` has completed.
+#[derive(Clone, Debug, HashStable)]
+pub struct LintExpectation {
+ /// The reason for this expectation that can optionally be added as part of
+ /// the attribute. It will be displayed as part of the lint message.
+ pub reason: Option<Symbol>,
+ /// The [`Span`] of the attribute that this expectation originated from.
+ pub emission_span: Span,
+ /// Lint messages for the `unfulfilled_lint_expectations` lint will be
+ /// adjusted to include an additional note. Therefore, we have to track if
+ /// the expectation is for the lint.
+ pub is_unfulfilled_lint_expectations: bool,
+ /// This will hold the name of the tool that this lint belongs to. For
+ /// the lint `clippy::some_lint` the tool would be `clippy`, the same
+ /// goes for `rustdoc`. This will be `None` for rustc lints
+ pub lint_tool: Option<Symbol>,
+}
+
+impl LintExpectation {
+ pub fn new(
+ reason: Option<Symbol>,
+ emission_span: Span,
+ is_unfulfilled_lint_expectations: bool,
+ lint_tool: Option<Symbol>,
+ ) -> Self {
+ Self { reason, emission_span, is_unfulfilled_lint_expectations, lint_tool }
+ }
+}
+
+pub fn explain_lint_level_source(
+ lint: &'static Lint,
+ level: Level,
+ src: LintLevelSource,
+ err: &mut Diagnostic,
+) {
+ let name = lint.name_lower();
+ match src {
+ LintLevelSource::Default => {
+ err.note_once(&format!("`#[{}({})]` on by default", level.as_str(), name));
+ }
+ LintLevelSource::CommandLine(lint_flag_val, orig_level) => {
+ let flag = match orig_level {
+ Level::Warn => "-W",
+ Level::Deny => "-D",
+ Level::Forbid => "-F",
+ Level::Allow => "-A",
+ Level::ForceWarn(_) => "--force-warn",
+ Level::Expect(_) => {
+ unreachable!("the expect level does not have a commandline flag")
+ }
+ };
+ let hyphen_case_lint_name = name.replace('_', "-");
+ if lint_flag_val.as_str() == name {
+ err.note_once(&format!(
+ "requested on the command line with `{} {}`",
+ flag, hyphen_case_lint_name
+ ));
+ } else {
+ let hyphen_case_flag_val = lint_flag_val.as_str().replace('_', "-");
+ err.note_once(&format!(
+ "`{} {}` implied by `{} {}`",
+ flag, hyphen_case_lint_name, flag, hyphen_case_flag_val
+ ));
+ }
+ }
+ LintLevelSource::Node(lint_attr_name, src, reason) => {
+ if let Some(rationale) = reason {
+ err.note(rationale.as_str());
+ }
+ err.span_note_once(src, "the lint level is defined here");
+ if lint_attr_name.as_str() != name {
+ let level_str = level.as_str();
+ err.note_once(&format!(
+ "`#[{}({})]` implied by `#[{}({})]`",
+ level_str, name, level_str, lint_attr_name
+ ));
+ }
+ }
+ }
+}
+
+pub fn struct_lint_level<'s, 'd>(
+ sess: &'s Session,
+ lint: &'static Lint,
+ level: Level,
+ src: LintLevelSource,
+ span: Option<MultiSpan>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>) + 'd,
+) {
+ // Avoid codegen bloat from monomorphization by immediately doing dyn dispatch of `decorate` to
+ // the "real" work.
+ fn struct_lint_level_impl<'s, 'd>(
+ sess: &'s Session,
+ lint: &'static Lint,
+ level: Level,
+ src: LintLevelSource,
+ span: Option<MultiSpan>,
+ decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) + 'd>,
+ ) {
+ // Check for future incompatibility lints and issue a stronger warning.
+ let future_incompatible = lint.future_incompatible;
+
+ let has_future_breakage = future_incompatible.map_or(
+ // Default allow lints trigger too often for testing.
+ sess.opts.unstable_opts.future_incompat_test && lint.default_level != Level::Allow,
+ |incompat| {
+ matches!(incompat.reason, FutureIncompatibilityReason::FutureReleaseErrorReportNow)
+ },
+ );
+
+ let mut err = match (level, span) {
+ (Level::Allow, span) => {
+ if has_future_breakage {
+ if let Some(span) = span {
+ sess.struct_span_allow(span, "")
+ } else {
+ sess.struct_allow("")
+ }
+ } else {
+ return;
+ }
+ }
+ (Level::Expect(expect_id), _) => {
+ // This case is special as we actually allow the lint itself in this context, but
+ // we can't return early like in the case for `Level::Allow` because we still
+ // need the lint diagnostic to be emitted to `rustc_error::HandlerInner`.
+ //
+ // We can also not mark the lint expectation as fulfilled here right away, as it
+ // can still be cancelled in the decorate function. All of this means that we simply
+ // create a `DiagnosticBuilder` and continue as we would for warnings.
+ sess.struct_expect("", expect_id)
+ }
+ (Level::ForceWarn(Some(expect_id)), Some(span)) => {
+ sess.struct_span_warn_with_expectation(span, "", expect_id)
+ }
+ (Level::ForceWarn(Some(expect_id)), None) => {
+ sess.struct_warn_with_expectation("", expect_id)
+ }
+ (Level::Warn | Level::ForceWarn(None), Some(span)) => sess.struct_span_warn(span, ""),
+ (Level::Warn | Level::ForceWarn(None), None) => sess.struct_warn(""),
+ (Level::Deny | Level::Forbid, Some(span)) => {
+ let mut builder = sess.diagnostic().struct_err_lint("");
+ builder.set_span(span);
+ builder
+ }
+ (Level::Deny | Level::Forbid, None) => sess.diagnostic().struct_err_lint(""),
+ };
+
+ // If this code originates in a foreign macro, aka something that this crate
+ // did not itself author, then it's likely that there's nothing this crate
+ // can do about it. We probably want to skip the lint entirely.
+ if err.span.primary_spans().iter().any(|s| in_external_macro(sess, *s)) {
+ // Any suggestions made here are likely to be incorrect, so anything we
+ // emit shouldn't be automatically fixed by rustfix.
+ err.disable_suggestions();
+
+ // If this is a future incompatible that is not an edition fixing lint
+ // it'll become a hard error, so we have to emit *something*. Also,
+ // if this lint occurs in the expansion of a macro from an external crate,
+ // allow individual lints to opt-out from being reported.
+ let not_future_incompatible =
+ future_incompatible.map(|f| f.reason.edition().is_some()).unwrap_or(true);
+ if not_future_incompatible && !lint.report_in_external_macro {
+ err.cancel();
+ // Don't continue further, since we don't want to have
+ // `diag_span_note_once` called for a diagnostic that isn't emitted.
+ return;
+ }
+ }
+
+ // Lint diagnostics that are covered by the expect level will not be emitted outside
+ // the compiler. It is therefore not necessary to add any information for the user.
+ // This will therefore directly call the decorate function which will in turn emit
+ // the `Diagnostic`.
+ if let Level::Expect(_) = level {
+ let name = lint.name_lower();
+ err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn: false });
+ decorate(LintDiagnosticBuilder::new(err));
+ return;
+ }
+
+ explain_lint_level_source(lint, level, src, &mut err);
+
+ let name = lint.name_lower();
+ let is_force_warn = matches!(level, Level::ForceWarn(_));
+ err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn });
+
+ if let Some(future_incompatible) = future_incompatible {
+ let explanation = match future_incompatible.reason {
+ FutureIncompatibilityReason::FutureReleaseError
+ | FutureIncompatibilityReason::FutureReleaseErrorReportNow => {
+ "this was previously accepted by the compiler but is being phased out; \
+ it will become a hard error in a future release!"
+ .to_owned()
+ }
+ FutureIncompatibilityReason::FutureReleaseSemanticsChange => {
+ "this will change its meaning in a future release!".to_owned()
+ }
+ FutureIncompatibilityReason::EditionError(edition) => {
+ let current_edition = sess.edition();
+ format!(
+ "this is accepted in the current edition (Rust {}) but is a hard error in Rust {}!",
+ current_edition, edition
+ )
+ }
+ FutureIncompatibilityReason::EditionSemanticsChange(edition) => {
+ format!("this changes meaning in Rust {}", edition)
+ }
+ FutureIncompatibilityReason::Custom(reason) => reason.to_owned(),
+ };
+
+ if future_incompatible.explain_reason {
+ err.warn(&explanation);
+ }
+ if !future_incompatible.reference.is_empty() {
+ let citation =
+ format!("for more information, see {}", future_incompatible.reference);
+ err.note(&citation);
+ }
+ }
+
+ // Finally, run `decorate`. This function is also responsible for emitting the diagnostic.
+ decorate(LintDiagnosticBuilder::new(err));
+ }
+ struct_lint_level_impl(sess, lint, level, src, span, Box::new(decorate))
+}
+
+/// Returns whether `span` originates in a foreign crate's external macro.
+///
+/// This is used to test whether a lint should not even begin to figure out whether it should
+/// be reported on the current node.
+pub fn in_external_macro(sess: &Session, span: Span) -> bool {
+ let expn_data = span.ctxt().outer_expn_data();
+ match expn_data.kind {
+ ExpnKind::Inlined
+ | ExpnKind::Root
+ | ExpnKind::Desugaring(DesugaringKind::ForLoop | DesugaringKind::WhileLoop) => false,
+ ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => true, // well, it's "external"
+ ExpnKind::Macro(MacroKind::Bang, _) => {
+ // Dummy span for the `def_site` means it's an external macro.
+ expn_data.def_site.is_dummy() || sess.source_map().is_imported(expn_data.def_site)
+ }
+ ExpnKind::Macro { .. } => true, // definitely a plugin
+ }
+}
diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs
new file mode 100644
index 000000000..0e85c60a3
--- /dev/null
+++ b/compiler/rustc_middle/src/macros.rs
@@ -0,0 +1,232 @@
+#[macro_export]
+macro_rules! bug {
+ () => ( $crate::bug!("impossible case reached") );
+ ($msg:expr) => ({ $crate::util::bug::bug_fmt(::std::format_args!($msg)) });
+ ($msg:expr,) => ({ $crate::bug!($msg) });
+ ($fmt:expr, $($arg:tt)+) => ({
+ $crate::util::bug::bug_fmt(::std::format_args!($fmt, $($arg)+))
+ });
+}
+
+#[macro_export]
+macro_rules! span_bug {
+ ($span:expr, $msg:expr) => ({ $crate::util::bug::span_bug_fmt($span, ::std::format_args!($msg)) });
+ ($span:expr, $msg:expr,) => ({ $crate::span_bug!($span, $msg) });
+ ($span:expr, $fmt:expr, $($arg:tt)+) => ({
+ $crate::util::bug::span_bug_fmt($span, ::std::format_args!($fmt, $($arg)+))
+ });
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift and TypeFoldable/TypeVisitable macros
+//
+// When possible, use one of these (relatively) convenient macros to write
+// the impls for you.
+
+#[macro_export]
+macro_rules! CloneLiftImpls {
+ (for <$tcx:lifetime> { $($ty:ty,)+ }) => {
+ $(
+ impl<$tcx> $crate::ty::Lift<$tcx> for $ty {
+ type Lifted = Self;
+ fn lift_to_tcx(self, _: $crate::ty::TyCtxt<$tcx>) -> Option<Self> {
+ Some(self)
+ }
+ }
+ )+
+ };
+
+ ($($ty:ty,)+) => {
+ CloneLiftImpls! {
+ for <'tcx> {
+ $($ty,)+
+ }
+ }
+ };
+}
+
+/// Used for types that are `Copy` and which **do not care arena
+/// allocated data** (i.e., don't need to be folded).
+#[macro_export]
+macro_rules! TrivialTypeTraversalImpls {
+ (for <$tcx:lifetime> { $($ty:ty,)+ }) => {
+ $(
+ impl<$tcx> $crate::ty::fold::TypeFoldable<$tcx> for $ty {
+ fn try_fold_with<F: $crate::ty::fold::FallibleTypeFolder<$tcx>>(
+ self,
+ _: &mut F
+ ) -> ::std::result::Result<$ty, F::Error> {
+ Ok(self)
+ }
+ }
+
+ impl<$tcx> $crate::ty::visit::TypeVisitable<$tcx> for $ty {
+ fn visit_with<F: $crate::ty::visit::TypeVisitor<$tcx>>(
+ &self,
+ _: &mut F)
+ -> ::std::ops::ControlFlow<F::BreakTy>
+ {
+ ::std::ops::ControlFlow::CONTINUE
+ }
+ }
+ )+
+ };
+
+ ($($ty:ty,)+) => {
+ TrivialTypeTraversalImpls! {
+ for <'tcx> {
+ $($ty,)+
+ }
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! TrivialTypeTraversalAndLiftImpls {
+ ($($t:tt)*) => {
+ TrivialTypeTraversalImpls! { $($t)* }
+ CloneLiftImpls! { $($t)* }
+ }
+}
+
+#[macro_export]
+macro_rules! EnumTypeTraversalImpl {
+ (impl<$($p:tt),*> TypeFoldable<$tcx:tt> for $s:path {
+ $($variants:tt)*
+ } $(where $($wc:tt)*)*) => {
+ impl<$($p),*> $crate::ty::fold::TypeFoldable<$tcx> for $s
+ $(where $($wc)*)*
+ {
+ fn try_fold_with<V: $crate::ty::fold::FallibleTypeFolder<$tcx>>(
+ self,
+ folder: &mut V,
+ ) -> ::std::result::Result<Self, V::Error> {
+ EnumTypeTraversalImpl!(@FoldVariants(self, folder) input($($variants)*) output())
+ }
+ }
+ };
+
+ (impl<$($p:tt),*> TypeVisitable<$tcx:tt> for $s:path {
+ $($variants:tt)*
+ } $(where $($wc:tt)*)*) => {
+ impl<$($p),*> $crate::ty::visit::TypeVisitable<$tcx> for $s
+ $(where $($wc)*)*
+ {
+ fn visit_with<V: $crate::ty::visit::TypeVisitor<$tcx>>(
+ &self,
+ visitor: &mut V,
+ ) -> ::std::ops::ControlFlow<V::BreakTy> {
+ EnumTypeTraversalImpl!(@VisitVariants(self, visitor) input($($variants)*) output())
+ }
+ }
+ };
+
+ (@FoldVariants($this:expr, $folder:expr) input() output($($output:tt)*)) => {
+ Ok(match $this {
+ $($output)*
+ })
+ };
+
+ (@FoldVariants($this:expr, $folder:expr)
+ input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @FoldVariants($this, $folder)
+ input($($input)*)
+ output(
+ $variant ( $($variant_arg),* ) => {
+ $variant (
+ $($crate::ty::fold::TypeFoldable::try_fold_with($variant_arg, $folder)?),*
+ )
+ }
+ $($output)*
+ )
+ )
+ };
+
+ (@FoldVariants($this:expr, $folder:expr)
+ input( ($variant:path) { $($variant_arg:ident),* $(,)? } , $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @FoldVariants($this, $folder)
+ input($($input)*)
+ output(
+ $variant { $($variant_arg),* } => {
+ $variant {
+ $($variant_arg: $crate::ty::fold::TypeFoldable::fold_with(
+ $variant_arg, $folder
+ )?),* }
+ }
+ $($output)*
+ )
+ )
+ };
+
+ (@FoldVariants($this:expr, $folder:expr)
+ input( ($variant:path), $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @FoldVariants($this, $folder)
+ input($($input)*)
+ output(
+ $variant => { $variant }
+ $($output)*
+ )
+ )
+ };
+
+ (@VisitVariants($this:expr, $visitor:expr) input() output($($output:tt)*)) => {
+ match $this {
+ $($output)*
+ }
+ };
+
+ (@VisitVariants($this:expr, $visitor:expr)
+ input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @VisitVariants($this, $visitor)
+ input($($input)*)
+ output(
+ $variant ( $($variant_arg),* ) => {
+ $($crate::ty::visit::TypeVisitable::visit_with(
+ $variant_arg, $visitor
+ )?;)*
+ ::std::ops::ControlFlow::CONTINUE
+ }
+ $($output)*
+ )
+ )
+ };
+
+ (@VisitVariants($this:expr, $visitor:expr)
+ input( ($variant:path) { $($variant_arg:ident),* $(,)? } , $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @VisitVariants($this, $visitor)
+ input($($input)*)
+ output(
+ $variant { $($variant_arg),* } => {
+ $($crate::ty::visit::TypeVisitable::visit_with(
+ $variant_arg, $visitor
+ )?;)*
+ ::std::ops::ControlFlow::CONTINUE
+ }
+ $($output)*
+ )
+ )
+ };
+
+ (@VisitVariants($this:expr, $visitor:expr)
+ input( ($variant:path), $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @VisitVariants($this, $visitor)
+ input($($input)*)
+ output(
+ $variant => { ::std::ops::ControlFlow::CONTINUE }
+ $($output)*
+ )
+ )
+ };
+}
diff --git a/compiler/rustc_middle/src/metadata.rs b/compiler/rustc_middle/src/metadata.rs
new file mode 100644
index 000000000..c8e78747d
--- /dev/null
+++ b/compiler/rustc_middle/src/metadata.rs
@@ -0,0 +1,26 @@
+use crate::ty;
+
+use rustc_hir::def::Res;
+use rustc_macros::HashStable;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+/// This structure is supposed to keep enough data to re-create `NameBinding`s for other crates
+/// during name resolution. Right now the bindings are not recreated entirely precisely so we may
+/// need to add more data in the future to correctly support macros 2.0, for example.
+/// Module child can be either a proper item or a reexport (including private imports).
+/// In case of reexport all the fields describe the reexport item itself, not what it refers to.
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct ModChild {
+ /// Name of the item.
+ pub ident: Ident,
+ /// Resolution result corresponding to the item.
+ /// Local variables cannot be exported, so this `Res` doesn't need the ID parameter.
+ pub res: Res<!>,
+ /// Visibility of the item.
+ pub vis: ty::Visibility,
+ /// Span of the item.
+ pub span: Span,
+ /// A proper `macro_rules` item (not a reexport).
+ pub macro_rules: bool,
+}
diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
new file mode 100644
index 000000000..45d33a165
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
@@ -0,0 +1,146 @@
+use crate::mir::mono::Linkage;
+use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::SanitizerSet;
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, Debug)]
+pub struct CodegenFnAttrs {
+ pub flags: CodegenFnAttrFlags,
+ /// Parsed representation of the `#[inline]` attribute
+ pub inline: InlineAttr,
+ /// Parsed representation of the `#[optimize]` attribute
+ pub optimize: OptimizeAttr,
+ /// The `#[export_name = "..."]` attribute, indicating a custom symbol a
+ /// function should be exported under
+ pub export_name: Option<Symbol>,
+ /// The `#[link_name = "..."]` attribute, indicating a custom symbol an
+ /// imported function should be imported as. Note that `export_name`
+ /// probably isn't set when this is set, this is for foreign items while
+ /// `#[export_name]` is for Rust-defined functions.
+ pub link_name: Option<Symbol>,
+ /// The `#[link_ordinal = "..."]` attribute, indicating an ordinal an
+ /// imported function has in the dynamic library. Note that this must not
+ /// be set when `link_name` is set. This is for foreign items with the
+ /// "raw-dylib" kind.
+ pub link_ordinal: Option<u16>,
+ /// The `#[target_feature(enable = "...")]` attribute and the enabled
+ /// features (only enabled features are supported right now).
+ pub target_features: Vec<Symbol>,
+ /// The `#[linkage = "..."]` attribute and the value we found.
+ pub linkage: Option<Linkage>,
+ /// The `#[link_section = "..."]` attribute, or what executable section this
+ /// should be placed in.
+ pub link_section: Option<Symbol>,
+ /// The `#[no_sanitize(...)]` attribute. Indicates sanitizers for which
+ /// instrumentation should be disabled inside the annotated function.
+ pub no_sanitize: SanitizerSet,
+ /// The `#[instruction_set(set)]` attribute. Indicates if the generated code should
+ /// be generated against a specific instruction set. Only usable on architectures which allow
+ /// switching between multiple instruction sets.
+ pub instruction_set: Option<InstructionSetAttr>,
+ /// The `#[repr(align(...))]` attribute. Indicates the value of which the function should be
+ /// aligned to.
+ pub alignment: Option<u32>,
+}
+
+bitflags! {
+ #[derive(TyEncodable, TyDecodable, HashStable)]
+ pub struct CodegenFnAttrFlags: u32 {
+ /// `#[cold]`: a hint to LLVM that this function, when called, is never on
+ /// the hot path.
+ const COLD = 1 << 0;
+ /// `#[rustc_allocator]`: a hint to LLVM that the pointer returned from this
+ /// function is never null and the function has no side effects other than allocating.
+ const ALLOCATOR = 1 << 1;
+ /// An indicator that function will never unwind. Will become obsolete
+ /// once C-unwind is fully stabilized.
+ const NEVER_UNWIND = 1 << 3;
+ /// `#[naked]`: an indicator to LLVM that no function prologue/epilogue
+ /// should be generated.
+ const NAKED = 1 << 4;
+ /// `#[no_mangle]`: an indicator that the function's name should be the same
+ /// as its symbol.
+ const NO_MANGLE = 1 << 5;
+ /// `#[rustc_std_internal_symbol]`: an indicator that this symbol is a
+ /// "weird symbol" for the standard library in that it has slightly
+ /// different linkage, visibility, and reachability rules.
+ const RUSTC_STD_INTERNAL_SYMBOL = 1 << 6;
+ /// `#[thread_local]`: indicates a static is actually a thread local
+ /// piece of memory
+ const THREAD_LOCAL = 1 << 8;
+ /// `#[used]`: indicates that LLVM can't eliminate this function (but the
+ /// linker can!).
+ const USED = 1 << 9;
+ /// `#[ffi_returns_twice]`, indicates that an extern function can return
+ /// multiple times
+ const FFI_RETURNS_TWICE = 1 << 10;
+ /// `#[track_caller]`: allow access to the caller location
+ const TRACK_CALLER = 1 << 11;
+ /// #[ffi_pure]: applies clang's `pure` attribute to a foreign function
+ /// declaration.
+ const FFI_PURE = 1 << 12;
+ /// #[ffi_const]: applies clang's `const` attribute to a foreign function
+ /// declaration.
+ const FFI_CONST = 1 << 13;
+ /// #[cmse_nonsecure_entry]: with a TrustZone-M extension, declare a
+ /// function as an entry function from Non-Secure code.
+ const CMSE_NONSECURE_ENTRY = 1 << 14;
+ /// `#[no_coverage]`: indicates that the function should be ignored by
+ /// the MIR `InstrumentCoverage` pass and not added to the coverage map
+ /// during codegen.
+ const NO_COVERAGE = 1 << 15;
+ /// `#[used(linker)]`: indicates that LLVM nor the linker can eliminate this function.
+ const USED_LINKER = 1 << 16;
+ /// `#[rustc_deallocator]`: a hint to LLVM that the function only deallocates memory.
+ const DEALLOCATOR = 1 << 17;
+ /// `#[rustc_reallocator]`: a hint to LLVM that the function only reallocates memory.
+ const REALLOCATOR = 1 << 18;
+ /// `#[rustc_allocator_zeroed]`: a hint to LLVM that the function only allocates zeroed memory.
+ const ALLOCATOR_ZEROED = 1 << 19;
+ }
+}
+
+impl CodegenFnAttrs {
+ pub const EMPTY: &'static Self = &Self::new();
+
+ pub const fn new() -> CodegenFnAttrs {
+ CodegenFnAttrs {
+ flags: CodegenFnAttrFlags::empty(),
+ inline: InlineAttr::None,
+ optimize: OptimizeAttr::None,
+ export_name: None,
+ link_name: None,
+ link_ordinal: None,
+ target_features: vec![],
+ linkage: None,
+ link_section: None,
+ no_sanitize: SanitizerSet::empty(),
+ instruction_set: None,
+ alignment: None,
+ }
+ }
+
+ /// Returns `true` if `#[inline]` or `#[inline(always)]` is present.
+ pub fn requests_inline(&self) -> bool {
+ match self.inline {
+ InlineAttr::Hint | InlineAttr::Always => true,
+ InlineAttr::None | InlineAttr::Never => false,
+ }
+ }
+
+ /// Returns `true` if it looks like this symbol needs to be exported, for example:
+ ///
+ /// * `#[no_mangle]` is present
+ /// * `#[export_name(...)]` is present
+ /// * `#[linkage]` is present
+ pub fn contains_extern_indicator(&self) -> bool {
+ self.flags.contains(CodegenFnAttrFlags::NO_MANGLE)
+ || self.export_name.is_some()
+ || match self.linkage {
+ // These are private, so make sure we don't try to consider
+ // them external.
+ None | Some(Linkage::Internal | Linkage::Private) => false,
+ Some(_) => true,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/middle/dependency_format.rs b/compiler/rustc_middle/src/middle/dependency_format.rs
new file mode 100644
index 000000000..e079843bf
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/dependency_format.rs
@@ -0,0 +1,28 @@
+//! Type definitions for learning about the dependency formats of all upstream
+//! crates (rlibs/dylibs/oh my).
+//!
+//! For all the gory details, see the provider of the `dependency_formats`
+//! query.
+
+use rustc_session::config::CrateType;
+
+/// A list of dependencies for a certain crate type.
+///
+/// The length of this vector is the same as the number of external crates used.
+/// The value is None if the crate does not need to be linked (it was found
+/// statically in another dylib), or Some(kind) if it needs to be linked as
+/// `kind` (either static or dynamic).
+pub type DependencyList = Vec<Linkage>;
+
+/// A mapping of all required dependencies for a particular flavor of output.
+///
+/// This is local to the tcx, and is generally relevant to one session.
+pub type Dependencies = Vec<(CrateType, DependencyList)>;
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable, Encodable, Decodable)]
+pub enum Linkage {
+ NotLinked,
+ IncludedFromDylib,
+ Static,
+ Dynamic,
+}
diff --git a/compiler/rustc_middle/src/middle/exported_symbols.rs b/compiler/rustc_middle/src/middle/exported_symbols.rs
new file mode 100644
index 000000000..631fd09ec
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/exported_symbols.rs
@@ -0,0 +1,72 @@
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_macros::HashStable;
+
+/// The SymbolExportLevel of a symbols specifies from which kinds of crates
+/// the symbol will be exported. `C` symbols will be exported from any
+/// kind of crate, including cdylibs which export very few things.
+/// `Rust` will only be exported if the crate produced is a Rust
+/// dylib.
+#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum SymbolExportLevel {
+ C,
+ Rust,
+}
+
+impl SymbolExportLevel {
+ pub fn is_below_threshold(self, threshold: SymbolExportLevel) -> bool {
+ threshold == SymbolExportLevel::Rust // export everything from Rust dylibs
+ || self == SymbolExportLevel::C
+ }
+}
+
+/// Kind of exported symbols.
+#[derive(Eq, PartialEq, Debug, Copy, Clone, Encodable, Decodable, HashStable)]
+pub enum SymbolExportKind {
+ Text,
+ Data,
+ Tls,
+}
+
+/// The `SymbolExportInfo` of a symbols specifies symbol-related information
+/// that is relevant to code generation and linking.
+#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub struct SymbolExportInfo {
+ pub level: SymbolExportLevel,
+ pub kind: SymbolExportKind,
+ pub used: bool,
+}
+
+#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum ExportedSymbol<'tcx> {
+ NonGeneric(DefId),
+ Generic(DefId, SubstsRef<'tcx>),
+ DropGlue(Ty<'tcx>),
+ NoDefId(ty::SymbolName<'tcx>),
+}
+
+impl<'tcx> ExportedSymbol<'tcx> {
+ /// This is the symbol name of an instance if it is instantiated in the
+ /// local crate.
+ pub fn symbol_name_for_local_instance(&self, tcx: TyCtxt<'tcx>) -> ty::SymbolName<'tcx> {
+ match *self {
+ ExportedSymbol::NonGeneric(def_id) => tcx.symbol_name(ty::Instance::mono(tcx, def_id)),
+ ExportedSymbol::Generic(def_id, substs) => {
+ tcx.symbol_name(ty::Instance::new(def_id, substs))
+ }
+ ExportedSymbol::DropGlue(ty) => {
+ tcx.symbol_name(ty::Instance::resolve_drop_in_place(tcx, ty))
+ }
+ ExportedSymbol::NoDefId(symbol_name) => symbol_name,
+ }
+ }
+}
+
+pub fn metadata_symbol_name(tcx: TyCtxt<'_>) -> String {
+ format!(
+ "rust_metadata_{}_{:08x}",
+ tcx.crate_name(LOCAL_CRATE),
+ tcx.sess.local_stable_crate_id().to_u64(),
+ )
+}
diff --git a/compiler/rustc_middle/src/middle/lang_items.rs b/compiler/rustc_middle/src/middle/lang_items.rs
new file mode 100644
index 000000000..cc9706f2d
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/lang_items.rs
@@ -0,0 +1,61 @@
+//! Detecting language items.
+//!
+//! Language items are items that represent concepts intrinsic to the language
+//! itself. Examples are:
+//!
+//! * Traits that specify "kinds"; e.g., `Sync`, `Send`.
+//! * Traits that represent operators; e.g., `Add`, `Sub`, `Index`.
+//! * Functions called by the compiler itself.
+
+use crate::ty::{self, TyCtxt};
+
+use rustc_hir::def_id::DefId;
+use rustc_hir::LangItem;
+use rustc_span::Span;
+use rustc_target::spec::PanicStrategy;
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Returns the `DefId` for a given `LangItem`.
+ /// If not found, fatally aborts compilation.
+ pub fn require_lang_item(self, lang_item: LangItem, span: Option<Span>) -> DefId {
+ self.lang_items().require(lang_item).unwrap_or_else(|msg| {
+ if let Some(span) = span {
+ self.sess.span_fatal(span, &msg)
+ } else {
+ self.sess.fatal(&msg)
+ }
+ })
+ }
+
+ pub fn fn_trait_kind_from_lang_item(self, id: DefId) -> Option<ty::ClosureKind> {
+ let items = self.lang_items();
+ match Some(id) {
+ x if x == items.fn_trait() => Some(ty::ClosureKind::Fn),
+ x if x == items.fn_mut_trait() => Some(ty::ClosureKind::FnMut),
+ x if x == items.fn_once_trait() => Some(ty::ClosureKind::FnOnce),
+ _ => None,
+ }
+ }
+
+ pub fn is_weak_lang_item(self, item_def_id: DefId) -> bool {
+ self.lang_items().is_weak_lang_item(item_def_id)
+ }
+}
+
+/// Returns `true` if the specified `lang_item` must be present for this
+/// compilation.
+///
+/// Not all lang items are always required for each compilation, particularly in
+/// the case of panic=abort. In these situations some lang items are injected by
+/// crates and don't actually need to be defined in libstd.
+pub fn required(tcx: TyCtxt<'_>, lang_item: LangItem) -> bool {
+ // If we're not compiling with unwinding, we won't actually need these
+ // symbols. Other panic runtimes ensure that the relevant symbols are
+ // available to link things together, but they're never exercised.
+ match tcx.sess.panic_strategy() {
+ PanicStrategy::Abort => {
+ lang_item != LangItem::EhPersonality && lang_item != LangItem::EhCatchTypeinfo
+ }
+ PanicStrategy::Unwind => true,
+ }
+}
diff --git a/compiler/rustc_middle/src/middle/limits.rs b/compiler/rustc_middle/src/middle/limits.rs
new file mode 100644
index 000000000..acced0492
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/limits.rs
@@ -0,0 +1,85 @@
+//! Registering limits:
+//! * recursion_limit,
+//! * move_size_limit,
+//! * type_length_limit, and
+//! * const_eval_limit
+//!
+//! There are various parts of the compiler that must impose arbitrary limits
+//! on how deeply they recurse to prevent stack overflow. Users can override
+//! this via an attribute on the crate like `#![recursion_limit="22"]`. This pass
+//! just peeks and looks for that attribute.
+
+use crate::bug;
+use crate::ty;
+use rustc_ast::Attribute;
+use rustc_session::Session;
+use rustc_session::{Limit, Limits};
+use rustc_span::symbol::{sym, Symbol};
+
+use std::num::IntErrorKind;
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ providers.limits = |tcx, ()| Limits {
+ recursion_limit: get_recursion_limit(tcx.hir().krate_attrs(), tcx.sess),
+ move_size_limit: get_limit(
+ tcx.hir().krate_attrs(),
+ tcx.sess,
+ sym::move_size_limit,
+ tcx.sess.opts.unstable_opts.move_size_limit.unwrap_or(0),
+ ),
+ type_length_limit: get_limit(
+ tcx.hir().krate_attrs(),
+ tcx.sess,
+ sym::type_length_limit,
+ 1048576,
+ ),
+ const_eval_limit: get_limit(
+ tcx.hir().krate_attrs(),
+ tcx.sess,
+ sym::const_eval_limit,
+ 1_000_000,
+ ),
+ }
+}
+
+pub fn get_recursion_limit(krate_attrs: &[Attribute], sess: &Session) -> Limit {
+ get_limit(krate_attrs, sess, sym::recursion_limit, 128)
+}
+
+fn get_limit(krate_attrs: &[Attribute], sess: &Session, name: Symbol, default: usize) -> Limit {
+ for attr in krate_attrs {
+ if !attr.has_name(name) {
+ continue;
+ }
+
+ if let Some(s) = attr.value_str() {
+ match s.as_str().parse() {
+ Ok(n) => return Limit::new(n),
+ Err(e) => {
+ let mut err =
+ sess.struct_span_err(attr.span, "`limit` must be a non-negative integer");
+
+ let value_span = attr
+ .meta()
+ .and_then(|meta| meta.name_value_literal_span())
+ .unwrap_or(attr.span);
+
+ let error_str = match e.kind() {
+ IntErrorKind::PosOverflow => "`limit` is too large",
+ IntErrorKind::Empty => "`limit` must be a non-negative integer",
+ IntErrorKind::InvalidDigit => "not a valid integer",
+ IntErrorKind::NegOverflow => {
+ bug!("`limit` should never negatively overflow")
+ }
+ IntErrorKind::Zero => bug!("zero is a valid `limit`"),
+ kind => bug!("unimplemented IntErrorKind variant: {:?}", kind),
+ };
+
+ err.span_label(value_span, error_str);
+ err.emit();
+ }
+ }
+ }
+ }
+ return Limit::new(default);
+}
diff --git a/compiler/rustc_middle/src/middle/mod.rs b/compiler/rustc_middle/src/middle/mod.rs
new file mode 100644
index 000000000..8dc68b1f5
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/mod.rs
@@ -0,0 +1,37 @@
+pub mod codegen_fn_attrs;
+pub mod dependency_format;
+pub mod exported_symbols;
+pub mod lang_items;
+pub mod lib_features {
+ use rustc_data_structures::fx::FxHashMap;
+ use rustc_span::{symbol::Symbol, Span};
+
+ #[derive(HashStable, Debug)]
+ pub struct LibFeatures {
+ /// A map from feature to stabilisation version.
+ pub stable: FxHashMap<Symbol, (Symbol, Span)>,
+ pub unstable: FxHashMap<Symbol, Span>,
+ }
+
+ impl LibFeatures {
+ pub fn to_vec(&self) -> Vec<(Symbol, Option<Symbol>)> {
+ let mut all_features: Vec<_> = self
+ .stable
+ .iter()
+ .map(|(f, (s, _))| (*f, Some(*s)))
+ .chain(self.unstable.iter().map(|(f, _)| (*f, None)))
+ .collect();
+ all_features.sort_unstable_by(|a, b| a.0.as_str().partial_cmp(b.0.as_str()).unwrap());
+ all_features
+ }
+ }
+}
+pub mod limits;
+pub mod privacy;
+pub mod region;
+pub mod resolve_lifetime;
+pub mod stability;
+
+pub fn provide(providers: &mut crate::ty::query::Providers) {
+ limits::provide(providers);
+}
diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs
new file mode 100644
index 000000000..751c7f464
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/privacy.rs
@@ -0,0 +1,64 @@
+//! A pass that checks to make sure private fields and methods aren't used
+//! outside their scopes. This pass will also generate a set of exported items
+//! which are available for use externally when compiled as a library.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_macros::HashStable;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_span::def_id::LocalDefId;
+use std::hash::Hash;
+
+/// Represents the levels of accessibility an item can have.
+///
+/// The variants are sorted in ascending order of accessibility.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, HashStable)]
+pub enum AccessLevel {
+ /// Superset of `AccessLevel::Reachable` used to mark impl Trait items.
+ ReachableFromImplTrait,
+ /// Exported items + items participating in various kinds of public interfaces,
+ /// but not directly nameable. For example, if function `fn f() -> T {...}` is
+ /// public, then type `T` is reachable. Its values can be obtained by other crates
+ /// even if the type itself is not nameable.
+ Reachable,
+ /// Public items + items accessible to other crates with the help of `pub use` re-exports.
+ Exported,
+ /// Items accessible to other crates directly, without the help of re-exports.
+ Public,
+}
+
+/// Holds a map of accessibility levels for reachable HIR nodes.
+#[derive(Debug, Clone)]
+pub struct AccessLevels<Id = LocalDefId> {
+ pub map: FxHashMap<Id, AccessLevel>,
+}
+
+impl<Id: Hash + Eq> AccessLevels<Id> {
+ /// See `AccessLevel::Reachable`.
+ pub fn is_reachable(&self, id: Id) -> bool {
+ self.map.get(&id) >= Some(&AccessLevel::Reachable)
+ }
+
+ /// See `AccessLevel::Exported`.
+ pub fn is_exported(&self, id: Id) -> bool {
+ self.map.get(&id) >= Some(&AccessLevel::Exported)
+ }
+
+ /// See `AccessLevel::Public`.
+ pub fn is_public(&self, id: Id) -> bool {
+ self.map.get(&id) >= Some(&AccessLevel::Public)
+ }
+}
+
+impl<Id> Default for AccessLevels<Id> {
+ fn default() -> Self {
+ AccessLevels { map: Default::default() }
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for AccessLevels {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let AccessLevels { ref map } = *self;
+ map.hash_stable(hcx, hasher);
+ }
+}
diff --git a/compiler/rustc_middle/src/middle/region.rs b/compiler/rustc_middle/src/middle/region.rs
new file mode 100644
index 000000000..c886175c6
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/region.rs
@@ -0,0 +1,443 @@
+//! This file declares the `ScopeTree` type, which describes
+//! the parent links in the region hierarchy.
+//!
+//! For more information about how MIR-based region-checking works,
+//! see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
+
+use crate::ty::TyCtxt;
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_macros::HashStable;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_span::{Span, DUMMY_SP};
+
+use std::fmt;
+use std::ops::Deref;
+
+/// Represents a statically-describable scope that can be used to
+/// bound the lifetime/region for values.
+///
+/// `Node(node_id)`: Any AST node that has any scope at all has the
+/// `Node(node_id)` scope. Other variants represent special cases not
+/// immediately derivable from the abstract syntax tree structure.
+///
+/// `DestructionScope(node_id)` represents the scope of destructors
+/// implicitly-attached to `node_id` that run immediately after the
+/// expression for `node_id` itself. Not every AST node carries a
+/// `DestructionScope`, but those that are `terminating_scopes` do;
+/// see discussion with `ScopeTree`.
+///
+/// `Remainder { block, statement_index }` represents
+/// the scope of user code running immediately after the initializer
+/// expression for the indexed statement, until the end of the block.
+///
+/// So: the following code can be broken down into the scopes beneath:
+///
+/// ```text
+/// let a = f().g( 'b: { let x = d(); let y = d(); x.h(y) } ) ;
+///
+/// +-+ (D12.)
+/// +-+ (D11.)
+/// +---------+ (R10.)
+/// +-+ (D9.)
+/// +----------+ (M8.)
+/// +----------------------+ (R7.)
+/// +-+ (D6.)
+/// +----------+ (M5.)
+/// +-----------------------------------+ (M4.)
+/// +--------------------------------------------------+ (M3.)
+/// +--+ (M2.)
+/// +-----------------------------------------------------------+ (M1.)
+///
+/// (M1.): Node scope of the whole `let a = ...;` statement.
+/// (M2.): Node scope of the `f()` expression.
+/// (M3.): Node scope of the `f().g(..)` expression.
+/// (M4.): Node scope of the block labeled `'b:`.
+/// (M5.): Node scope of the `let x = d();` statement
+/// (D6.): DestructionScope for temporaries created during M5.
+/// (R7.): Remainder scope for block `'b:`, stmt 0 (let x = ...).
+/// (M8.): Node scope of the `let y = d();` statement.
+/// (D9.): DestructionScope for temporaries created during M8.
+/// (R10.): Remainder scope for block `'b:`, stmt 1 (let y = ...).
+/// (D11.): DestructionScope for temporaries and bindings from block `'b:`.
+/// (D12.): DestructionScope for temporaries created during M1 (e.g., f()).
+/// ```
+///
+/// Note that while the above picture shows the destruction scopes
+/// as following their corresponding node scopes, in the internal
+/// data structures of the compiler the destruction scopes are
+/// represented as enclosing parents. This is sound because we use the
+/// enclosing parent relationship just to ensure that referenced
+/// values live long enough; phrased another way, the starting point
+/// of each range is not really the important thing in the above
+/// picture, but rather the ending point.
+//
+// FIXME(pnkfelix): this currently derives `PartialOrd` and `Ord` to
+// placate the same deriving in `ty::FreeRegion`, but we may want to
+// actually attach a more meaningful ordering to scopes than the one
+// generated via deriving here.
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Copy, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct Scope {
+ pub id: hir::ItemLocalId,
+ pub data: ScopeData,
+}
+
+impl fmt::Debug for Scope {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.data {
+ ScopeData::Node => write!(fmt, "Node({:?})", self.id),
+ ScopeData::CallSite => write!(fmt, "CallSite({:?})", self.id),
+ ScopeData::Arguments => write!(fmt, "Arguments({:?})", self.id),
+ ScopeData::Destruction => write!(fmt, "Destruction({:?})", self.id),
+ ScopeData::IfThen => write!(fmt, "IfThen({:?})", self.id),
+ ScopeData::Remainder(fsi) => write!(
+ fmt,
+ "Remainder {{ block: {:?}, first_statement_index: {}}}",
+ self.id,
+ fsi.as_u32(),
+ ),
+ }
+ }
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Debug, Copy, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum ScopeData {
+ Node,
+
+ /// Scope of the call-site for a function or closure
+ /// (outlives the arguments as well as the body).
+ CallSite,
+
+ /// Scope of arguments passed to a function or closure
+ /// (they outlive its body).
+ Arguments,
+
+ /// Scope of destructors for temporaries of node-id.
+ Destruction,
+
+ /// Scope of the condition and then block of an if expression
+ /// Used for variables introduced in an if-let expression.
+ IfThen,
+
+ /// Scope following a `let id = expr;` binding in a block.
+ Remainder(FirstStatementIndex),
+}
+
+rustc_index::newtype_index! {
+ /// Represents a subscope of `block` for a binding that is introduced
+ /// by `block.stmts[first_statement_index]`. Such subscopes represent
+ /// a suffix of the block. Note that each subscope does not include
+ /// the initializer expression, if any, for the statement indexed by
+ /// `first_statement_index`.
+ ///
+ /// For example, given `{ let (a, b) = EXPR_1; let c = EXPR_2; ... }`:
+ ///
+ /// * The subscope with `first_statement_index == 0` is scope of both
+ /// `a` and `b`; it does not include EXPR_1, but does include
+ /// everything after that first `let`. (If you want a scope that
+ /// includes EXPR_1 as well, then do not use `Scope::Remainder`,
+ /// but instead another `Scope` that encompasses the whole block,
+ /// e.g., `Scope::Node`.
+ ///
+ /// * The subscope with `first_statement_index == 1` is scope of `c`,
+ /// and thus does not include EXPR_2, but covers the `...`.
+ pub struct FirstStatementIndex {
+ derive [HashStable]
+ }
+}
+
+// compilation error if size of `ScopeData` is not the same as a `u32`
+static_assert_size!(ScopeData, 4);
+
+impl Scope {
+ /// Returns an item-local ID associated with this scope.
+ ///
+ /// N.B., likely to be replaced as API is refined; e.g., pnkfelix
+ /// anticipates `fn entry_node_id` and `fn each_exit_node_id`.
+ pub fn item_local_id(&self) -> hir::ItemLocalId {
+ self.id
+ }
+
+ pub fn hir_id(&self, scope_tree: &ScopeTree) -> Option<hir::HirId> {
+ scope_tree
+ .root_body
+ .map(|hir_id| hir::HirId { owner: hir_id.owner, local_id: self.item_local_id() })
+ }
+
+ /// Returns the span of this `Scope`. Note that in general the
+ /// returned span may not correspond to the span of any `NodeId` in
+ /// the AST.
+ pub fn span(&self, tcx: TyCtxt<'_>, scope_tree: &ScopeTree) -> Span {
+ let Some(hir_id) = self.hir_id(scope_tree) else {
+ return DUMMY_SP;
+ };
+ let span = tcx.hir().span(hir_id);
+ if let ScopeData::Remainder(first_statement_index) = self.data {
+ if let Node::Block(ref blk) = tcx.hir().get(hir_id) {
+ // Want span for scope starting after the
+ // indexed statement and ending at end of
+ // `blk`; reuse span of `blk` and shift `lo`
+ // forward to end of indexed statement.
+ //
+ // (This is the special case alluded to in the
+ // doc-comment for this method)
+
+ let stmt_span = blk.stmts[first_statement_index.index()].span;
+
+ // To avoid issues with macro-generated spans, the span
+ // of the statement must be nested in that of the block.
+ if span.lo() <= stmt_span.lo() && stmt_span.lo() <= span.hi() {
+ return span.with_lo(stmt_span.lo());
+ }
+ }
+ }
+ span
+ }
+}
+
+pub type ScopeDepth = u32;
+
+/// The region scope tree encodes information about region relationships.
+#[derive(TyEncodable, TyDecodable, Default, Debug)]
+pub struct ScopeTree {
+ /// If not empty, this body is the root of this region hierarchy.
+ pub root_body: Option<hir::HirId>,
+
+ /// Maps from a scope ID to the enclosing scope id;
+ /// this is usually corresponding to the lexical nesting, though
+ /// in the case of closures the parent scope is the innermost
+ /// conditional expression or repeating block. (Note that the
+ /// enclosing scope ID for the block associated with a closure is
+ /// the closure itself.)
+ pub parent_map: FxIndexMap<Scope, (Scope, ScopeDepth)>,
+
+ /// Maps from a variable or binding ID to the block in which that
+ /// variable is declared.
+ var_map: FxIndexMap<hir::ItemLocalId, Scope>,
+
+ /// Maps from a `NodeId` to the associated destruction scope (if any).
+ destruction_scopes: FxIndexMap<hir::ItemLocalId, Scope>,
+
+ /// Identifies expressions which, if captured into a temporary, ought to
+ /// have a temporary whose lifetime extends to the end of the enclosing *block*,
+ /// and not the enclosing *statement*. Expressions that are not present in this
+ /// table are not rvalue candidates. The set of rvalue candidates is computed
+ /// during type check based on a traversal of the AST.
+ pub rvalue_candidates: FxHashMap<hir::HirId, RvalueCandidateType>,
+
+ /// If there are any `yield` nested within a scope, this map
+ /// stores the `Span` of the last one and its index in the
+ /// postorder of the Visitor traversal on the HIR.
+ ///
+ /// HIR Visitor postorder indexes might seem like a peculiar
+ /// thing to care about. but it turns out that HIR bindings
+ /// and the temporary results of HIR expressions are never
+ /// storage-live at the end of HIR nodes with postorder indexes
+ /// lower than theirs, and therefore don't need to be suspended
+ /// at yield-points at these indexes.
+ ///
+ /// For an example, suppose we have some code such as:
+ /// ```rust,ignore (example)
+ /// foo(f(), yield y, bar(g()))
+ /// ```
+ ///
+ /// With the HIR tree (calls numbered for expository purposes)
+ ///
+ /// ```text
+ /// Call#0(foo, [Call#1(f), Yield(y), Call#2(bar, Call#3(g))])
+ /// ```
+ ///
+ /// Obviously, the result of `f()` was created before the yield
+ /// (and therefore needs to be kept valid over the yield) while
+ /// the result of `g()` occurs after the yield (and therefore
+ /// doesn't). If we want to infer that, we can look at the
+ /// postorder traversal:
+ /// ```plain,ignore
+ /// `foo` `f` Call#1 `y` Yield `bar` `g` Call#3 Call#2 Call#0
+ /// ```
+ ///
+ /// In which we can easily see that `Call#1` occurs before the yield,
+ /// and `Call#3` after it.
+ ///
+ /// To see that this method works, consider:
+ ///
+ /// Let `D` be our binding/temporary and `U` be our other HIR node, with
+ /// `HIR-postorder(U) < HIR-postorder(D)`. Suppose, as in our example,
+ /// U is the yield and D is one of the calls.
+ /// Let's show that `D` is storage-dead at `U`.
+ ///
+ /// Remember that storage-live/storage-dead refers to the state of
+ /// the *storage*, and does not consider moves/drop flags.
+ ///
+ /// Then:
+ ///
+ /// 1. From the ordering guarantee of HIR visitors (see
+ /// `rustc_hir::intravisit`), `D` does not dominate `U`.
+ ///
+ /// 2. Therefore, `D` is *potentially* storage-dead at `U` (because
+ /// we might visit `U` without ever getting to `D`).
+ ///
+ /// 3. However, we guarantee that at each HIR point, each
+ /// binding/temporary is always either always storage-live
+ /// or always storage-dead. This is what is being guaranteed
+ /// by `terminating_scopes` including all blocks where the
+ /// count of executions is not guaranteed.
+ ///
+ /// 4. By `2.` and `3.`, `D` is *statically* storage-dead at `U`,
+ /// QED.
+ ///
+ /// This property ought to not on (3) in an essential way -- it
+ /// is probably still correct even if we have "unrestricted" terminating
+ /// scopes. However, why use the complicated proof when a simple one
+ /// works?
+ ///
+ /// A subtle thing: `box` expressions, such as `box (&x, yield 2, &y)`. It
+ /// might seem that a `box` expression creates a `Box<T>` temporary
+ /// when it *starts* executing, at `HIR-preorder(BOX-EXPR)`. That might
+ /// be true in the MIR desugaring, but it is not important in the semantics.
+ ///
+ /// The reason is that semantically, until the `box` expression returns,
+ /// the values are still owned by their containing expressions. So
+ /// we'll see that `&x`.
+ pub yield_in_scope: FxHashMap<Scope, Vec<YieldData>>,
+
+ /// The number of visit_expr and visit_pat calls done in the body.
+ /// Used to sanity check visit_expr/visit_pat call count when
+ /// calculating generator interiors.
+ pub body_expr_count: FxHashMap<hir::BodyId, usize>,
+}
+
+/// Identifies the reason that a given expression is an rvalue candidate
+/// (see the `rvalue_candidates` field for more information what rvalue
+/// candidates in general). In constants, the `lifetime` field is None
+/// to indicate that certain expressions escape into 'static and
+/// should have no local cleanup scope.
+#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum RvalueCandidateType {
+ Borrow { target: hir::ItemLocalId, lifetime: Option<Scope> },
+ Pattern { target: hir::ItemLocalId, lifetime: Option<Scope> },
+}
+
+#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub struct YieldData {
+ /// The `Span` of the yield.
+ pub span: Span,
+ /// The number of expressions and patterns appearing before the `yield` in the body, plus one.
+ pub expr_and_pat_count: usize,
+ pub source: hir::YieldSource,
+}
+
+impl ScopeTree {
+ pub fn record_scope_parent(&mut self, child: Scope, parent: Option<(Scope, ScopeDepth)>) {
+ debug!("{:?}.parent = {:?}", child, parent);
+
+ if let Some(p) = parent {
+ let prev = self.parent_map.insert(child, p);
+ assert!(prev.is_none());
+ }
+
+ // Record the destruction scopes for later so we can query them.
+ if let ScopeData::Destruction = child.data {
+ self.destruction_scopes.insert(child.item_local_id(), child);
+ }
+ }
+
+ pub fn opt_destruction_scope(&self, n: hir::ItemLocalId) -> Option<Scope> {
+ self.destruction_scopes.get(&n).cloned()
+ }
+
+ pub fn record_var_scope(&mut self, var: hir::ItemLocalId, lifetime: Scope) {
+ debug!("record_var_scope(sub={:?}, sup={:?})", var, lifetime);
+ assert!(var != lifetime.item_local_id());
+ self.var_map.insert(var, lifetime);
+ }
+
+ pub fn record_rvalue_candidate(
+ &mut self,
+ var: hir::HirId,
+ candidate_type: RvalueCandidateType,
+ ) {
+ debug!("record_rvalue_candidate(var={var:?}, type={candidate_type:?})");
+ match &candidate_type {
+ RvalueCandidateType::Borrow { lifetime: Some(lifetime), .. }
+ | RvalueCandidateType::Pattern { lifetime: Some(lifetime), .. } => {
+ assert!(var.local_id != lifetime.item_local_id())
+ }
+ _ => {}
+ }
+ self.rvalue_candidates.insert(var, candidate_type);
+ }
+
+ /// Returns the narrowest scope that encloses `id`, if any.
+ pub fn opt_encl_scope(&self, id: Scope) -> Option<Scope> {
+ self.parent_map.get(&id).cloned().map(|(p, _)| p)
+ }
+
+ /// Returns the lifetime of the local variable `var_id`, if any.
+ pub fn var_scope(&self, var_id: hir::ItemLocalId) -> Option<Scope> {
+ self.var_map.get(&var_id).cloned()
+ }
+
+ /// Returns `true` if `subscope` is equal to or is lexically nested inside `superscope`, and
+ /// `false` otherwise.
+ ///
+ /// Used by clippy.
+ pub fn is_subscope_of(&self, subscope: Scope, superscope: Scope) -> bool {
+ let mut s = subscope;
+ debug!("is_subscope_of({:?}, {:?})", subscope, superscope);
+ while superscope != s {
+ match self.opt_encl_scope(s) {
+ None => {
+ debug!("is_subscope_of({:?}, {:?}, s={:?})=false", subscope, superscope, s);
+ return false;
+ }
+ Some(scope) => s = scope,
+ }
+ }
+
+ debug!("is_subscope_of({:?}, {:?})=true", subscope, superscope);
+
+ true
+ }
+
+ /// Checks whether the given scope contains a `yield`. If so,
+ /// returns `Some(YieldData)`. If not, returns `None`.
+ pub fn yield_in_scope(&self, scope: Scope) -> Option<&[YieldData]> {
+ self.yield_in_scope.get(&scope).map(Deref::deref)
+ }
+
+ /// Gives the number of expressions visited in a body.
+ /// Used to sanity check visit_expr call count when
+ /// calculating generator interiors.
+ pub fn body_expr_count(&self, body_id: hir::BodyId) -> Option<usize> {
+ self.body_expr_count.get(&body_id).copied()
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ScopeTree {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let ScopeTree {
+ root_body,
+ ref body_expr_count,
+ ref parent_map,
+ ref var_map,
+ ref destruction_scopes,
+ ref rvalue_candidates,
+ ref yield_in_scope,
+ } = *self;
+
+ root_body.hash_stable(hcx, hasher);
+ body_expr_count.hash_stable(hcx, hasher);
+ parent_map.hash_stable(hcx, hasher);
+ var_map.hash_stable(hcx, hasher);
+ destruction_scopes.hash_stable(hcx, hasher);
+ rvalue_candidates.hash_stable(hcx, hasher);
+ yield_in_scope.hash_stable(hcx, hasher);
+ }
+}
diff --git a/compiler/rustc_middle/src/middle/resolve_lifetime.rs b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
new file mode 100644
index 000000000..9b2f44567
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
@@ -0,0 +1,54 @@
+//! Name resolution for lifetimes: type declarations.
+
+use crate::ty;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::ItemLocalId;
+use rustc_macros::HashStable;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum Region {
+ Static,
+ EarlyBound(/* index */ u32, /* lifetime decl */ DefId),
+ LateBound(ty::DebruijnIndex, /* late-bound index */ u32, /* lifetime decl */ DefId),
+ Free(DefId, /* lifetime decl */ DefId),
+}
+
+/// A set containing, at most, one known element.
+/// If two distinct values are inserted into a set, then it
+/// becomes `Many`, which can be used to detect ambiguities.
+#[derive(Copy, Clone, PartialEq, Eq, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum Set1<T> {
+ Empty,
+ One(T),
+ Many,
+}
+
+impl<T: PartialEq> Set1<T> {
+ pub fn insert(&mut self, value: T) {
+ *self = match self {
+ Set1::Empty => Set1::One(value),
+ Set1::One(old) if *old == value => return,
+ _ => Set1::Many,
+ };
+ }
+}
+
+pub type ObjectLifetimeDefault = Set1<Region>;
+
+/// Maps the id of each lifetime reference to the lifetime decl
+/// that it corresponds to.
+#[derive(Default, HashStable, Debug)]
+pub struct ResolveLifetimes {
+ /// Maps from every use of a named (not anonymous) lifetime to a
+ /// `Region` describing how that region is bound
+ pub defs: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Region>>,
+
+ /// Set of lifetime def ids that are late-bound; a region can
+ /// be late-bound if (a) it does NOT appear in a where-clause and
+ /// (b) it DOES appear in the arguments.
+ pub late_bound: FxHashMap<LocalDefId, FxHashSet<LocalDefId>>,
+
+ pub late_bound_vars: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>>,
+}
diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs
new file mode 100644
index 000000000..414912dd0
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/stability.rs
@@ -0,0 +1,591 @@
+//! A pass that annotates every item and method with its stability level,
+//! propagating default levels lexically from parent to children ast nodes.
+
+pub use self::StabilityLevel::*;
+
+use crate::ty::{self, DefIdTree, TyCtxt};
+use rustc_ast::NodeId;
+use rustc_attr::{self as attr, ConstStability, Deprecation, Stability};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_feature::GateIssue;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::{self as hir, HirId};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_session::lint::builtin::{DEPRECATED, DEPRECATED_IN_FUTURE, SOFT_UNSTABLE};
+use rustc_session::lint::{BuiltinLintDiagnostics, Level, Lint, LintBuffer};
+use rustc_session::parse::feature_err_issue;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use std::num::NonZeroU32;
+
+#[derive(PartialEq, Clone, Copy, Debug)]
+pub enum StabilityLevel {
+ Unstable,
+ Stable,
+}
+
+/// An entry in the `depr_map`.
+#[derive(Copy, Clone, HashStable, Debug, Encodable, Decodable)]
+pub struct DeprecationEntry {
+ /// The metadata of the attribute associated with this entry.
+ pub attr: Deprecation,
+ /// The `DefId` where the attr was originally attached. `None` for non-local
+ /// `DefId`'s.
+ origin: Option<LocalDefId>,
+}
+
+impl DeprecationEntry {
+ pub fn local(attr: Deprecation, def_id: LocalDefId) -> DeprecationEntry {
+ DeprecationEntry { attr, origin: Some(def_id) }
+ }
+
+ pub fn external(attr: Deprecation) -> DeprecationEntry {
+ DeprecationEntry { attr, origin: None }
+ }
+
+ pub fn same_origin(&self, other: &DeprecationEntry) -> bool {
+ match (self.origin, other.origin) {
+ (Some(o1), Some(o2)) => o1 == o2,
+ _ => false,
+ }
+ }
+}
+
+/// A stability index, giving the stability level for items and methods.
+#[derive(HashStable, Debug)]
+pub struct Index {
+ /// This is mostly a cache, except the stabilities of local items
+ /// are filled by the annotator.
+ pub stab_map: FxHashMap<LocalDefId, Stability>,
+ pub const_stab_map: FxHashMap<LocalDefId, ConstStability>,
+ pub depr_map: FxHashMap<LocalDefId, DeprecationEntry>,
+ /// Mapping from feature name to feature name based on the `implied_by` field of `#[unstable]`
+ /// attributes. If a `#[unstable(feature = "implier", implied_by = "impliee")]` attribute
+ /// exists, then this map will have a `impliee -> implier` entry.
+ ///
+ /// This mapping is necessary unless both the `#[stable]` and `#[unstable]` attributes should
+ /// specify their implications (both `implies` and `implied_by`). If only one of the two
+ /// attributes do (as in the current implementation, `implied_by` in `#[unstable]`), then this
+ /// mapping is necessary for diagnostics. When a "unnecessary feature attribute" error is
+ /// reported, only the `#[stable]` attribute information is available, so the map is necessary
+ /// to know that the feature implies another feature. If it were reversed, and the `#[stable]`
+ /// attribute had an `implies` meta item, then a map would be necessary when avoiding a "use of
+ /// unstable feature" error for a feature that was implied.
+ pub implications: FxHashMap<Symbol, Symbol>,
+}
+
+impl Index {
+ pub fn local_stability(&self, def_id: LocalDefId) -> Option<Stability> {
+ self.stab_map.get(&def_id).copied()
+ }
+
+ pub fn local_const_stability(&self, def_id: LocalDefId) -> Option<ConstStability> {
+ self.const_stab_map.get(&def_id).copied()
+ }
+
+ pub fn local_deprecation_entry(&self, def_id: LocalDefId) -> Option<DeprecationEntry> {
+ self.depr_map.get(&def_id).cloned()
+ }
+}
+
+pub fn report_unstable(
+ sess: &Session,
+ feature: Symbol,
+ reason: Option<Symbol>,
+ issue: Option<NonZeroU32>,
+ suggestion: Option<(Span, String, String, Applicability)>,
+ is_soft: bool,
+ span: Span,
+ soft_handler: impl FnOnce(&'static Lint, Span, &str),
+) {
+ let msg = match reason {
+ Some(r) => format!("use of unstable library feature '{}': {}", feature, r),
+ None => format!("use of unstable library feature '{}'", &feature),
+ };
+
+ if is_soft {
+ soft_handler(SOFT_UNSTABLE, span, &msg)
+ } else {
+ let mut err =
+ feature_err_issue(&sess.parse_sess, feature, span, GateIssue::Library(issue), &msg);
+ if let Some((inner_types, ref msg, sugg, applicability)) = suggestion {
+ err.span_suggestion(inner_types, msg, sugg, applicability);
+ }
+ err.emit();
+ }
+}
+
+/// Checks whether an item marked with `deprecated(since="X")` is currently
+/// deprecated (i.e., whether X is not greater than the current rustc version).
+pub fn deprecation_in_effect(depr: &Deprecation) -> bool {
+ let is_since_rustc_version = depr.is_since_rustc_version;
+ let since = depr.since.as_ref().map(Symbol::as_str);
+
+ fn parse_version(ver: &str) -> Vec<u32> {
+ // We ignore non-integer components of the version (e.g., "nightly").
+ ver.split(|c| c == '.' || c == '-').flat_map(|s| s.parse()).collect()
+ }
+
+ if !is_since_rustc_version {
+ // The `since` field doesn't have semantic purpose without `#![staged_api]`.
+ return true;
+ }
+
+ if let Some(since) = since {
+ if since == "TBD" {
+ return false;
+ }
+
+ if let Some(rustc) = option_env!("CFG_RELEASE") {
+ let since: Vec<u32> = parse_version(&since);
+ let rustc: Vec<u32> = parse_version(rustc);
+ // We simply treat invalid `since` attributes as relating to a previous
+ // Rust version, thus always displaying the warning.
+ if since.len() != 3 {
+ return true;
+ }
+ return since <= rustc;
+ }
+ };
+
+ // Assume deprecation is in effect if "since" field is missing
+ // or if we can't determine the current Rust version.
+ true
+}
+
+pub fn deprecation_suggestion(
+ diag: &mut Diagnostic,
+ kind: &str,
+ suggestion: Option<Symbol>,
+ span: Span,
+) {
+ if let Some(suggestion) = suggestion {
+ diag.span_suggestion_verbose(
+ span,
+ &format!("replace the use of the deprecated {}", kind),
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ }
+}
+
+fn deprecation_lint(is_in_effect: bool) -> &'static Lint {
+ if is_in_effect { DEPRECATED } else { DEPRECATED_IN_FUTURE }
+}
+
+fn deprecation_message(
+ is_in_effect: bool,
+ since: Option<Symbol>,
+ note: Option<Symbol>,
+ kind: &str,
+ path: &str,
+) -> String {
+ let message = if is_in_effect {
+ format!("use of deprecated {} `{}`", kind, path)
+ } else {
+ let since = since.as_ref().map(Symbol::as_str);
+
+ if since == Some("TBD") {
+ format!("use of {} `{}` that will be deprecated in a future Rust version", kind, path)
+ } else {
+ format!(
+ "use of {} `{}` that will be deprecated in future version {}",
+ kind,
+ path,
+ since.unwrap()
+ )
+ }
+ };
+
+ match note {
+ Some(reason) => format!("{}: {}", message, reason),
+ None => message,
+ }
+}
+
+pub fn deprecation_message_and_lint(
+ depr: &Deprecation,
+ kind: &str,
+ path: &str,
+) -> (String, &'static Lint) {
+ let is_in_effect = deprecation_in_effect(depr);
+ (
+ deprecation_message(is_in_effect, depr.since, depr.note, kind, path),
+ deprecation_lint(is_in_effect),
+ )
+}
+
+pub fn early_report_deprecation<'a>(
+ lint_buffer: &'a mut LintBuffer,
+ message: &str,
+ suggestion: Option<Symbol>,
+ lint: &'static Lint,
+ span: Span,
+ node_id: NodeId,
+) {
+ if span.in_derive_expansion() {
+ return;
+ }
+
+ let diag = BuiltinLintDiagnostics::DeprecatedMacro(suggestion, span);
+ lint_buffer.buffer_lint_with_diagnostic(lint, node_id, span, message, diag);
+}
+
+fn late_report_deprecation(
+ tcx: TyCtxt<'_>,
+ message: &str,
+ suggestion: Option<Symbol>,
+ lint: &'static Lint,
+ span: Span,
+ method_span: Option<Span>,
+ hir_id: HirId,
+ def_id: DefId,
+) {
+ if span.in_derive_expansion() {
+ return;
+ }
+ let method_span = method_span.unwrap_or(span);
+ tcx.struct_span_lint_hir(lint, hir_id, method_span, |lint| {
+ let mut diag = lint.build(message);
+ if let hir::Node::Expr(_) = tcx.hir().get(hir_id) {
+ let kind = tcx.def_kind(def_id).descr(def_id);
+ deprecation_suggestion(&mut diag, kind, suggestion, method_span);
+ }
+ diag.emit();
+ });
+}
+
+/// Result of `TyCtxt::eval_stability`.
+pub enum EvalResult {
+ /// We can use the item because it is stable or we provided the
+ /// corresponding feature gate.
+ Allow,
+ /// We cannot use the item because it is unstable and we did not provide the
+ /// corresponding feature gate.
+ Deny {
+ feature: Symbol,
+ reason: Option<Symbol>,
+ issue: Option<NonZeroU32>,
+ suggestion: Option<(Span, String, String, Applicability)>,
+ is_soft: bool,
+ },
+ /// The item does not have the `#[stable]` or `#[unstable]` marker assigned.
+ Unmarked,
+}
+
+// See issue #38412.
+fn skip_stability_check_due_to_privacy(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ if tcx.def_kind(def_id) == DefKind::TyParam {
+ // Have no visibility, considered public for the purpose of this check.
+ return false;
+ }
+ match tcx.visibility(def_id) {
+ // Must check stability for `pub` items.
+ ty::Visibility::Public => false,
+
+ // These are not visible outside crate; therefore
+ // stability markers are irrelevant, if even present.
+ ty::Visibility::Restricted(..) | ty::Visibility::Invisible => true,
+ }
+}
+
+// See issue #83250.
+fn suggestion_for_allocator_api(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ span: Span,
+ feature: Symbol,
+) -> Option<(Span, String, String, Applicability)> {
+ if feature == sym::allocator_api {
+ if let Some(trait_) = tcx.opt_parent(def_id) {
+ if tcx.is_diagnostic_item(sym::Vec, trait_) {
+ let sm = tcx.sess.parse_sess.source_map();
+ let inner_types = sm.span_extend_to_prev_char(span, '<', true);
+ if let Ok(snippet) = sm.span_to_snippet(inner_types) {
+ return Some((
+ inner_types,
+ "consider wrapping the inner types in tuple".to_string(),
+ format!("({})", snippet),
+ Applicability::MaybeIncorrect,
+ ));
+ }
+ }
+ }
+ }
+ None
+}
+
+/// An override option for eval_stability.
+pub enum AllowUnstable {
+ /// Don't emit an unstable error for the item
+ Yes,
+ /// Handle the item normally
+ No,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Evaluates the stability of an item.
+ ///
+ /// Returns `EvalResult::Allow` if the item is stable, or unstable but the corresponding
+ /// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending
+ /// unstable feature otherwise.
+ ///
+ /// If `id` is `Some(_)`, this function will also check if the item at `def_id` has been
+ /// deprecated. If the item is indeed deprecated, we will emit a deprecation lint attached to
+ /// `id`.
+ pub fn eval_stability(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ ) -> EvalResult {
+ self.eval_stability_allow_unstable(def_id, id, span, method_span, AllowUnstable::No)
+ }
+
+ /// Evaluates the stability of an item.
+ ///
+ /// Returns `EvalResult::Allow` if the item is stable, or unstable but the corresponding
+ /// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending
+ /// unstable feature otherwise.
+ ///
+ /// If `id` is `Some(_)`, this function will also check if the item at `def_id` has been
+ /// deprecated. If the item is indeed deprecated, we will emit a deprecation lint attached to
+ /// `id`.
+ ///
+ /// Pass `AllowUnstable::Yes` to `allow_unstable` to force an unstable item to be allowed. Deprecation warnings will be emitted normally.
+ pub fn eval_stability_allow_unstable(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ allow_unstable: AllowUnstable,
+ ) -> EvalResult {
+ // Deprecated attributes apply in-crate and cross-crate.
+ if let Some(id) = id {
+ if let Some(depr_entry) = self.lookup_deprecation_entry(def_id) {
+ let parent_def_id = self.hir().get_parent_item(id);
+ let skip = self
+ .lookup_deprecation_entry(parent_def_id.to_def_id())
+ .map_or(false, |parent_depr| parent_depr.same_origin(&depr_entry));
+
+ // #[deprecated] doesn't emit a notice if we're not on the
+ // topmost deprecation. For example, if a struct is deprecated,
+ // the use of a field won't be linted.
+ //
+ // With #![staged_api], we want to emit down the whole
+ // hierarchy.
+ let depr_attr = &depr_entry.attr;
+ if !skip || depr_attr.is_since_rustc_version {
+ // Calculating message for lint involves calling `self.def_path_str`.
+ // Which by default to calculate visible path will invoke expensive `visible_parent_map` query.
+ // So we skip message calculation altogether, if lint is allowed.
+ let is_in_effect = deprecation_in_effect(depr_attr);
+ let lint = deprecation_lint(is_in_effect);
+ if self.lint_level_at_node(lint, id).0 != Level::Allow {
+ let def_path = with_no_trimmed_paths!(self.def_path_str(def_id));
+ let def_kind = self.def_kind(def_id).descr(def_id);
+
+ late_report_deprecation(
+ self,
+ &deprecation_message(
+ is_in_effect,
+ depr_attr.since,
+ depr_attr.note,
+ def_kind,
+ &def_path,
+ ),
+ depr_attr.suggestion,
+ lint,
+ span,
+ method_span,
+ id,
+ def_id,
+ );
+ }
+ }
+ };
+ }
+
+ let is_staged_api = self.lookup_stability(def_id.krate.as_def_id()).is_some();
+ if !is_staged_api {
+ return EvalResult::Allow;
+ }
+
+ let stability = self.lookup_stability(def_id);
+ debug!(
+ "stability: \
+ inspecting def_id={:?} span={:?} of stability={:?}",
+ def_id, span, stability
+ );
+
+ // Only the cross-crate scenario matters when checking unstable APIs
+ let cross_crate = !def_id.is_local();
+ if !cross_crate {
+ return EvalResult::Allow;
+ }
+
+ // Issue #38412: private items lack stability markers.
+ if skip_stability_check_due_to_privacy(self, def_id) {
+ return EvalResult::Allow;
+ }
+
+ match stability {
+ Some(Stability {
+ level: attr::Unstable { reason, issue, is_soft, implied_by },
+ feature,
+ ..
+ }) => {
+ if span.allows_unstable(feature) {
+ debug!("stability: skipping span={:?} since it is internal", span);
+ return EvalResult::Allow;
+ }
+ if self.features().active(feature) {
+ return EvalResult::Allow;
+ }
+
+ // If this item was previously part of a now-stabilized feature which is still
+ // active (i.e. the user hasn't removed the attribute for the stabilized feature
+ // yet) then allow use of this item.
+ if let Some(implied_by) = implied_by && self.features().active(implied_by) {
+ return EvalResult::Allow;
+ }
+
+ // When we're compiling the compiler itself we may pull in
+ // crates from crates.io, but those crates may depend on other
+ // crates also pulled in from crates.io. We want to ideally be
+ // able to compile everything without requiring upstream
+ // modifications, so in the case that this looks like a
+ // `rustc_private` crate (e.g., a compiler crate) and we also have
+ // the `-Z force-unstable-if-unmarked` flag present (we're
+ // compiling a compiler crate), then let this missing feature
+ // annotation slide.
+ if feature == sym::rustc_private && issue == NonZeroU32::new(27812) {
+ if self.sess.opts.unstable_opts.force_unstable_if_unmarked {
+ return EvalResult::Allow;
+ }
+ }
+
+ if matches!(allow_unstable, AllowUnstable::Yes) {
+ return EvalResult::Allow;
+ }
+
+ let suggestion = suggestion_for_allocator_api(self, def_id, span, feature);
+ EvalResult::Deny {
+ feature,
+ reason: reason.to_opt_reason(),
+ issue,
+ suggestion,
+ is_soft,
+ }
+ }
+ Some(_) => {
+ // Stable APIs are always ok to call and deprecated APIs are
+ // handled by the lint emitting logic above.
+ EvalResult::Allow
+ }
+ None => EvalResult::Unmarked,
+ }
+ }
+
+ /// Checks if an item is stable or error out.
+ ///
+ /// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not
+ /// exist, emits an error.
+ ///
+ /// This function will also check if the item is deprecated.
+ /// If so, and `id` is not `None`, a deprecated lint attached to `id` will be emitted.
+ ///
+ /// Returns `true` if item is allowed aka, stable or unstable under an enabled feature.
+ pub fn check_stability(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ ) -> bool {
+ self.check_stability_allow_unstable(def_id, id, span, method_span, AllowUnstable::No)
+ }
+
+ /// Checks if an item is stable or error out.
+ ///
+ /// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not
+ /// exist, emits an error.
+ ///
+ /// This function will also check if the item is deprecated.
+ /// If so, and `id` is not `None`, a deprecated lint attached to `id` will be emitted.
+ ///
+ /// Pass `AllowUnstable::Yes` to `allow_unstable` to force an unstable item to be allowed. Deprecation warnings will be emitted normally.
+ ///
+ /// Returns `true` if item is allowed aka, stable or unstable under an enabled feature.
+ pub fn check_stability_allow_unstable(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ allow_unstable: AllowUnstable,
+ ) -> bool {
+ self.check_optional_stability(
+ def_id,
+ id,
+ span,
+ method_span,
+ allow_unstable,
+ |span, def_id| {
+ // The API could be uncallable for other reasons, for example when a private module
+ // was referenced.
+ self.sess.delay_span_bug(span, &format!("encountered unmarked API: {:?}", def_id));
+ },
+ )
+ }
+
+ /// Like `check_stability`, except that we permit items to have custom behaviour for
+ /// missing stability attributes (not necessarily just emit a `bug!`). This is necessary
+ /// for default generic parameters, which only have stability attributes if they were
+ /// added after the type on which they're defined.
+ ///
+ /// Returns `true` if item is allowed aka, stable or unstable under an enabled feature.
+ pub fn check_optional_stability(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ allow_unstable: AllowUnstable,
+ unmarked: impl FnOnce(Span, DefId),
+ ) -> bool {
+ let soft_handler = |lint, span, msg: &_| {
+ self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, |lint| {
+ lint.build(msg).emit();
+ })
+ };
+ let eval_result =
+ self.eval_stability_allow_unstable(def_id, id, span, method_span, allow_unstable);
+ let is_allowed = matches!(eval_result, EvalResult::Allow);
+ match eval_result {
+ EvalResult::Allow => {}
+ EvalResult::Deny { feature, reason, issue, suggestion, is_soft } => report_unstable(
+ self.sess,
+ feature,
+ reason,
+ issue,
+ suggestion,
+ is_soft,
+ span,
+ soft_handler,
+ ),
+ EvalResult::Unmarked => unmarked(span, def_id),
+ }
+
+ is_allowed
+ }
+
+ pub fn lookup_deprecation(self, id: DefId) -> Option<Deprecation> {
+ self.lookup_deprecation_entry(id).map(|depr| depr.attr)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs
new file mode 100644
index 000000000..78080fcd5
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/basic_blocks.rs
@@ -0,0 +1,147 @@
+use crate::mir::graph_cyclic_cache::GraphIsCyclicCache;
+use crate::mir::predecessors::{PredecessorCache, Predecessors};
+use crate::mir::switch_sources::{SwitchSourceCache, SwitchSources};
+use crate::mir::traversal::PostorderCache;
+use crate::mir::{BasicBlock, BasicBlockData, Successors, START_BLOCK};
+
+use rustc_data_structures::graph;
+use rustc_data_structures::graph::dominators::{dominators, Dominators};
+use rustc_index::vec::IndexVec;
+
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub struct BasicBlocks<'tcx> {
+ basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ predecessor_cache: PredecessorCache,
+ switch_source_cache: SwitchSourceCache,
+ is_cyclic: GraphIsCyclicCache,
+ postorder_cache: PostorderCache,
+}
+
+impl<'tcx> BasicBlocks<'tcx> {
+ #[inline]
+ pub fn new(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
+ BasicBlocks {
+ basic_blocks,
+ predecessor_cache: PredecessorCache::new(),
+ switch_source_cache: SwitchSourceCache::new(),
+ is_cyclic: GraphIsCyclicCache::new(),
+ postorder_cache: PostorderCache::new(),
+ }
+ }
+
+ /// Returns true if control-flow graph contains a cycle reachable from the `START_BLOCK`.
+ #[inline]
+ pub fn is_cfg_cyclic(&self) -> bool {
+ self.is_cyclic.is_cyclic(self)
+ }
+
+ #[inline]
+ pub fn dominators(&self) -> Dominators<BasicBlock> {
+ dominators(&self)
+ }
+
+ /// Returns predecessors for each basic block.
+ #[inline]
+ pub fn predecessors(&self) -> &Predecessors {
+ self.predecessor_cache.compute(&self.basic_blocks)
+ }
+
+ /// Returns basic blocks in a postorder.
+ #[inline]
+ pub fn postorder(&self) -> &[BasicBlock] {
+ self.postorder_cache.compute(&self.basic_blocks)
+ }
+
+ /// `switch_sources()[&(target, switch)]` returns a list of switch
+ /// values that lead to a `target` block from a `switch` block.
+ #[inline]
+ pub fn switch_sources(&self) -> &SwitchSources {
+ self.switch_source_cache.compute(&self.basic_blocks)
+ }
+
+ /// Returns mutable reference to basic blocks. Invalidates CFG cache.
+ #[inline]
+ pub fn as_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ self.invalidate_cfg_cache();
+ &mut self.basic_blocks
+ }
+
+ /// Get mutable access to basic blocks without invalidating the CFG cache.
+ ///
+ /// By calling this method instead of e.g. [`BasicBlocks::as_mut`] you promise not to change
+ /// the CFG. This means that
+ ///
+ /// 1) The number of basic blocks remains unchanged
+ /// 2) The set of successors of each terminator remains unchanged.
+ /// 3) For each `TerminatorKind::SwitchInt`, the `targets` remains the same and the terminator
+ /// kind is not changed.
+ ///
+ /// If any of these conditions cannot be upheld, you should call [`BasicBlocks::invalidate_cfg_cache`].
+ #[inline]
+ pub fn as_mut_preserves_cfg(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ &mut self.basic_blocks
+ }
+
+ /// Invalidates cached information about the CFG.
+ ///
+ /// You will only ever need this if you have also called [`BasicBlocks::as_mut_preserves_cfg`].
+ /// All other methods that allow you to mutate the basic blocks also call this method
+ /// themselves, thereby avoiding any risk of accidentaly cache invalidation.
+ pub fn invalidate_cfg_cache(&mut self) {
+ self.predecessor_cache.invalidate();
+ self.switch_source_cache.invalidate();
+ self.is_cyclic.invalidate();
+ self.postorder_cache.invalidate();
+ }
+}
+
+impl<'tcx> std::ops::Deref for BasicBlocks<'tcx> {
+ type Target = IndexVec<BasicBlock, BasicBlockData<'tcx>>;
+
+ #[inline]
+ fn deref(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ &self.basic_blocks
+ }
+}
+
+impl<'tcx> graph::DirectedGraph for BasicBlocks<'tcx> {
+ type Node = BasicBlock;
+}
+
+impl<'tcx> graph::WithNumNodes for BasicBlocks<'tcx> {
+ #[inline]
+ fn num_nodes(&self) -> usize {
+ self.basic_blocks.len()
+ }
+}
+
+impl<'tcx> graph::WithStartNode for BasicBlocks<'tcx> {
+ #[inline]
+ fn start_node(&self) -> Self::Node {
+ START_BLOCK
+ }
+}
+
+impl<'tcx> graph::WithSuccessors for BasicBlocks<'tcx> {
+ #[inline]
+ fn successors(&self, node: Self::Node) -> <Self as graph::GraphSuccessors<'_>>::Iter {
+ self.basic_blocks[node].terminator().successors()
+ }
+}
+
+impl<'a, 'b> graph::GraphSuccessors<'b> for BasicBlocks<'a> {
+ type Item = BasicBlock;
+ type Iter = Successors<'b>;
+}
+
+impl<'tcx, 'graph> graph::GraphPredecessors<'graph> for BasicBlocks<'tcx> {
+ type Item = BasicBlock;
+ type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicBlock>>;
+}
+
+impl<'tcx> graph::WithPredecessors for BasicBlocks<'tcx> {
+ #[inline]
+ fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
+ self.predecessors()[node].iter().copied()
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs
new file mode 100644
index 000000000..efa946452
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/coverage.rs
@@ -0,0 +1,186 @@
+//! Metadata from source code coverage analysis and instrumentation.
+
+use rustc_macros::HashStable;
+use rustc_span::Symbol;
+
+use std::cmp::Ord;
+use std::fmt::{self, Debug, Formatter};
+
+rustc_index::newtype_index! {
+ /// An ExpressionOperandId value is assigned directly from either a
+ /// CounterValueReference.as_u32() (which ascend from 1) or an ExpressionOperandId.as_u32()
+ /// (which _*descend*_ from u32::MAX). Id value `0` (zero) represents a virtual counter with a
+ /// constant value of `0`.
+ pub struct ExpressionOperandId {
+ derive [HashStable]
+ DEBUG_FORMAT = "ExpressionOperandId({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+impl ExpressionOperandId {
+ /// An expression operand for a "zero counter", as described in the following references:
+ ///
+ /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#counter>
+ /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#tag>
+ /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#counter-expressions>
+ ///
+ /// This operand can be used to count two or more separate code regions with a single counter,
+ /// if they run sequentially with no branches, by injecting the `Counter` in a `BasicBlock` for
+ /// one of the code regions, and inserting `CounterExpression`s ("add ZERO to the counter") in
+ /// the coverage map for the other code regions.
+ pub const ZERO: Self = Self::from_u32(0);
+}
+
+rustc_index::newtype_index! {
+ pub struct CounterValueReference {
+ derive [HashStable]
+ DEBUG_FORMAT = "CounterValueReference({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+impl CounterValueReference {
+ /// Counters start at 1 to reserve 0 for ExpressionOperandId::ZERO.
+ pub const START: Self = Self::from_u32(1);
+
+ /// Returns explicitly-requested zero-based version of the counter id, used
+ /// during codegen. LLVM expects zero-based indexes.
+ pub fn zero_based_index(self) -> u32 {
+ let one_based_index = self.as_u32();
+ debug_assert!(one_based_index > 0);
+ one_based_index - 1
+ }
+}
+
+rustc_index::newtype_index! {
+ /// InjectedExpressionId.as_u32() converts to ExpressionOperandId.as_u32()
+ ///
+ /// Values descend from u32::MAX.
+ pub struct InjectedExpressionId {
+ derive [HashStable]
+ DEBUG_FORMAT = "InjectedExpressionId({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+rustc_index::newtype_index! {
+ /// InjectedExpressionIndex.as_u32() translates to u32::MAX - ExpressionOperandId.as_u32()
+ ///
+ /// Values ascend from 0.
+ pub struct InjectedExpressionIndex {
+ derive [HashStable]
+ DEBUG_FORMAT = "InjectedExpressionIndex({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+rustc_index::newtype_index! {
+ /// MappedExpressionIndex values ascend from zero, and are recalculated indexes based on their
+ /// array position in the LLVM coverage map "Expressions" array, which is assembled during the
+ /// "mapgen" process. They cannot be computed algorithmically, from the other `newtype_index`s.
+ pub struct MappedExpressionIndex {
+ derive [HashStable]
+ DEBUG_FORMAT = "MappedExpressionIndex({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+impl From<CounterValueReference> for ExpressionOperandId {
+ #[inline]
+ fn from(v: CounterValueReference) -> ExpressionOperandId {
+ ExpressionOperandId::from(v.as_u32())
+ }
+}
+
+impl From<InjectedExpressionId> for ExpressionOperandId {
+ #[inline]
+ fn from(v: InjectedExpressionId) -> ExpressionOperandId {
+ ExpressionOperandId::from(v.as_u32())
+ }
+}
+
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)]
+pub enum CoverageKind {
+ Counter {
+ function_source_hash: u64,
+ id: CounterValueReference,
+ },
+ Expression {
+ id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ },
+ Unreachable,
+}
+
+impl CoverageKind {
+ pub fn as_operand_id(&self) -> ExpressionOperandId {
+ use CoverageKind::*;
+ match *self {
+ Counter { id, .. } => ExpressionOperandId::from(id),
+ Expression { id, .. } => ExpressionOperandId::from(id),
+ Unreachable => bug!("Unreachable coverage cannot be part of an expression"),
+ }
+ }
+
+ pub fn is_expression(&self) -> bool {
+ matches!(self, Self::Expression { .. })
+ }
+}
+
+impl Debug for CoverageKind {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use CoverageKind::*;
+ match self {
+ Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()),
+ Expression { id, lhs, op, rhs } => write!(
+ fmt,
+ "Expression({:?}) = {} {} {}",
+ id.index(),
+ lhs.index(),
+ if *op == Op::Add { "+" } else { "-" },
+ rhs.index(),
+ ),
+ Unreachable => write!(fmt, "Unreachable"),
+ }
+ }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct CodeRegion {
+ pub file_name: Symbol,
+ pub start_line: u32,
+ pub start_col: u32,
+ pub end_line: u32,
+ pub end_col: u32,
+}
+
+impl Debug for CodeRegion {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ write!(
+ fmt,
+ "{}:{}:{} - {}:{}",
+ self.file_name, self.start_line, self.start_col, self.end_line, self.end_col
+ )
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum Op {
+ Subtract,
+ Add,
+}
+
+impl Op {
+ pub fn is_add(&self) -> bool {
+ matches!(self, Self::Add)
+ }
+
+ pub fn is_subtract(&self) -> bool {
+ matches!(self, Self::Subtract)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/generic_graph.rs b/compiler/rustc_middle/src/mir/generic_graph.rs
new file mode 100644
index 000000000..f3621cd99
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/generic_graph.rs
@@ -0,0 +1,69 @@
+use gsgdt::{Edge, Graph, Node, NodeStyle};
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+/// Convert an MIR function into a gsgdt Graph
+pub fn mir_fn_to_generic_graph<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'_>) -> Graph {
+ let def_id = body.source.def_id();
+ let def_name = graphviz_safe_def_name(def_id);
+ let graph_name = format!("Mir_{}", def_name);
+ let dark_mode = tcx.sess.opts.unstable_opts.graphviz_dark_mode;
+
+ // Nodes
+ let nodes: Vec<Node> = body
+ .basic_blocks()
+ .iter_enumerated()
+ .map(|(block, _)| bb_to_graph_node(block, body, dark_mode))
+ .collect();
+
+ // Edges
+ let mut edges = Vec::new();
+ for (source, _) in body.basic_blocks().iter_enumerated() {
+ let def_id = body.source.def_id();
+ let terminator = body[source].terminator();
+ let labels = terminator.kind.fmt_successor_labels();
+
+ for (target, label) in terminator.successors().zip(labels) {
+ let src = node(def_id, source);
+ let trg = node(def_id, target);
+ edges.push(Edge::new(src, trg, label.to_string()));
+ }
+ }
+
+ Graph::new(graph_name, nodes, edges)
+}
+
+fn bb_to_graph_node(block: BasicBlock, body: &Body<'_>, dark_mode: bool) -> Node {
+ let def_id = body.source.def_id();
+ let data = &body[block];
+ let label = node(def_id, block);
+
+ let (title, bgcolor) = if data.is_cleanup {
+ let color = if dark_mode { "royalblue" } else { "lightblue" };
+ (format!("{} (cleanup)", block.index()), color)
+ } else {
+ let color = if dark_mode { "dimgray" } else { "gray" };
+ (format!("{}", block.index()), color)
+ };
+
+ let style = NodeStyle { title_bg: Some(bgcolor.to_owned()), ..Default::default() };
+ let mut stmts: Vec<String> = data.statements.iter().map(|x| format!("{:?}", x)).collect();
+
+ // add the terminator to the stmts, gsgdt can print it out separately
+ let mut terminator_head = String::new();
+ data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ stmts.push(terminator_head);
+
+ Node::new(stmts, label, title, style)
+}
+
+// Must match `[0-9A-Za-z_]*`. This does not appear in the rendered graph, so
+// it does not have to be user friendly.
+pub fn graphviz_safe_def_name(def_id: DefId) -> String {
+ format!("{}_{}", def_id.krate.index(), def_id.index.index(),)
+}
+
+fn node(def_id: DefId, block: BasicBlock) -> String {
+ format!("bb{}__{}", block.index(), graphviz_safe_def_name(def_id))
+}
diff --git a/compiler/rustc_middle/src/mir/generic_graphviz.rs b/compiler/rustc_middle/src/mir/generic_graphviz.rs
new file mode 100644
index 000000000..11ac45943
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/generic_graphviz.rs
@@ -0,0 +1,173 @@
+use rustc_data_structures::graph::{self, iterate};
+use rustc_graphviz as dot;
+use rustc_middle::ty::TyCtxt;
+use std::io::{self, Write};
+
+pub struct GraphvizWriter<
+ 'a,
+ G: graph::DirectedGraph + graph::WithSuccessors + graph::WithStartNode + graph::WithNumNodes,
+ NodeContentFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+ EdgeLabelsFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+> {
+ graph: &'a G,
+ is_subgraph: bool,
+ graphviz_name: String,
+ graph_label: Option<String>,
+ node_content_fn: NodeContentFn,
+ edge_labels_fn: EdgeLabelsFn,
+}
+
+impl<
+ 'a,
+ G: graph::DirectedGraph + graph::WithSuccessors + graph::WithStartNode + graph::WithNumNodes,
+ NodeContentFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+ EdgeLabelsFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+> GraphvizWriter<'a, G, NodeContentFn, EdgeLabelsFn>
+{
+ pub fn new(
+ graph: &'a G,
+ graphviz_name: &str,
+ node_content_fn: NodeContentFn,
+ edge_labels_fn: EdgeLabelsFn,
+ ) -> Self {
+ Self {
+ graph,
+ is_subgraph: false,
+ graphviz_name: graphviz_name.to_owned(),
+ graph_label: None,
+ node_content_fn,
+ edge_labels_fn,
+ }
+ }
+
+ pub fn set_graph_label(&mut self, graph_label: &str) {
+ self.graph_label = Some(graph_label.to_owned());
+ }
+
+ /// Write a graphviz DOT of the graph
+ pub fn write_graphviz<'tcx, W>(&self, tcx: TyCtxt<'tcx>, w: &mut W) -> io::Result<()>
+ where
+ W: Write,
+ {
+ let kind = if self.is_subgraph { "subgraph" } else { "digraph" };
+ let cluster = if self.is_subgraph { "cluster_" } else { "" }; // Print border around graph
+ // FIXME(richkadel): If/when migrating the MIR graphviz to this generic implementation,
+ // prepend "Mir_" to the graphviz_safe_def_name(def_id)
+ writeln!(w, "{} {}{} {{", kind, cluster, self.graphviz_name)?;
+
+ // Global graph properties
+ let font = format!(r#"fontname="{}""#, tcx.sess.opts.unstable_opts.graphviz_font);
+ let mut graph_attrs = vec![&font[..]];
+ let mut content_attrs = vec![&font[..]];
+
+ let dark_mode = tcx.sess.opts.unstable_opts.graphviz_dark_mode;
+ if dark_mode {
+ graph_attrs.push(r#"bgcolor="black""#);
+ graph_attrs.push(r#"fontcolor="white""#);
+ content_attrs.push(r#"color="white""#);
+ content_attrs.push(r#"fontcolor="white""#);
+ }
+
+ writeln!(w, r#" graph [{}];"#, graph_attrs.join(" "))?;
+ let content_attrs_str = content_attrs.join(" ");
+ writeln!(w, r#" node [{}];"#, content_attrs_str)?;
+ writeln!(w, r#" edge [{}];"#, content_attrs_str)?;
+
+ // Graph label
+ if let Some(graph_label) = &self.graph_label {
+ self.write_graph_label(graph_label, w)?;
+ }
+
+ // Nodes
+ for node in iterate::post_order_from(self.graph, self.graph.start_node()) {
+ self.write_node(node, dark_mode, w)?;
+ }
+
+ // Edges
+ for source in iterate::post_order_from(self.graph, self.graph.start_node()) {
+ self.write_edges(source, w)?;
+ }
+ writeln!(w, "}}")
+ }
+
+ /// Write a graphviz DOT node for the given node.
+ pub fn write_node<W>(&self, node: G::Node, dark_mode: bool, w: &mut W) -> io::Result<()>
+ where
+ W: Write,
+ {
+ // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
+ write!(w, r#" {} [shape="none", label=<"#, self.node(node))?;
+
+ write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
+
+ // FIXME(richkadel): If/when migrating the MIR graphviz to this generic implementation,
+ // we need generic way to know if node header should have a different color. For example,
+ // for MIR:
+ //
+ // let (blk, bgcolor) = if data.is_cleanup {
+ // let color = if dark_mode { "royalblue" } else { "lightblue" };
+ // (format!("{:?} (cleanup)", node), color)
+ // } else {
+ // let color = if dark_mode { "dimgray" } else { "gray" };
+ // (format!("{:?}", node), color)
+ // };
+ let color = if dark_mode { "dimgray" } else { "gray" };
+ let (blk, bgcolor) = (format!("{:?}", node), color);
+ write!(
+ w,
+ r#"<tr><td bgcolor="{bgcolor}" {attrs} colspan="{colspan}">{blk}</td></tr>"#,
+ attrs = r#"align="center""#,
+ colspan = 1,
+ blk = blk,
+ bgcolor = bgcolor
+ )?;
+
+ for section in (self.node_content_fn)(node) {
+ write!(
+ w,
+ r#"<tr><td align="left" balign="left">{}</td></tr>"#,
+ dot::escape_html(&section).replace('\n', "<br/>")
+ )?;
+ }
+
+ // Close the table
+ write!(w, "</table>")?;
+
+ // Close the node label and the node itself.
+ writeln!(w, ">];")
+ }
+
+ /// Write graphviz DOT edges with labels between the given node and all of its successors.
+ fn write_edges<W>(&self, source: G::Node, w: &mut W) -> io::Result<()>
+ where
+ W: Write,
+ {
+ let edge_labels = (self.edge_labels_fn)(source);
+ for (index, target) in self.graph.successors(source).enumerate() {
+ let src = self.node(source);
+ let trg = self.node(target);
+ let escaped_edge_label = if let Some(edge_label) = edge_labels.get(index) {
+ dot::escape_html(edge_label).replace('\n', r#"<br align="left"/>"#)
+ } else {
+ "".to_owned()
+ };
+ writeln!(w, r#" {} -> {} [label=<{}>];"#, src, trg, escaped_edge_label)?;
+ }
+ Ok(())
+ }
+
+ /// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
+ /// will appear below the graph.
+ fn write_graph_label<W>(&self, label: &str, w: &mut W) -> io::Result<()>
+ where
+ W: Write,
+ {
+ let lines = label.split('\n').map(|s| dot::escape_html(s)).collect::<Vec<_>>();
+ let escaped_label = lines.join(r#"<br align="left"/>"#);
+ writeln!(w, r#" label=<<br/><br/>{}<br align="left"/><br/><br/><br/>>;"#, escaped_label)
+ }
+
+ fn node(&self, node: G::Node) -> String {
+ format!("{:?}__{}", node, self.graphviz_name)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs b/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs
new file mode 100644
index 000000000..f97bf2883
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs
@@ -0,0 +1,63 @@
+use rustc_data_structures::graph::{
+ self, DirectedGraph, WithNumNodes, WithStartNode, WithSuccessors,
+};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+/// Helper type to cache the result of `graph::is_cyclic`.
+#[derive(Clone, Debug)]
+pub(super) struct GraphIsCyclicCache {
+ cache: OnceCell<bool>,
+}
+
+impl GraphIsCyclicCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ GraphIsCyclicCache { cache: OnceCell::new() }
+ }
+
+ pub(super) fn is_cyclic<G>(&self, graph: &G) -> bool
+ where
+ G: ?Sized + DirectedGraph + WithStartNode + WithSuccessors + WithNumNodes,
+ {
+ *self.cache.get_or_init(|| graph::is_cyclic(graph))
+ }
+
+ /// Invalidates the cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ // Invalidating the cache requires mutating the MIR, which in turn requires a unique
+ // reference (`&mut`) to the `mir::Body`. Because of this, we can assume that all
+ // callers of `invalidate` have a unique reference to the MIR and thus to the
+ // cache. This means we never need to do synchronization when `invalidate` is called,
+ // we can simply reinitialize the `OnceCell`.
+ self.cache = OnceCell::new();
+ }
+}
+
+impl<S: Encoder> Encodable<S> for GraphIsCyclicCache {
+ #[inline]
+ fn encode(&self, s: &mut S) {
+ Encodable::encode(&(), s);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for GraphIsCyclicCache {
+ #[inline]
+ fn decode(d: &mut D) -> Self {
+ let () = Decodable::decode(d);
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for GraphIsCyclicCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ GraphIsCyclicCache,
+}
diff --git a/compiler/rustc_middle/src/mir/graphviz.rs b/compiler/rustc_middle/src/mir/graphviz.rs
new file mode 100644
index 000000000..5de56dad0
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/graphviz.rs
@@ -0,0 +1,134 @@
+use gsgdt::GraphvizSettings;
+use rustc_graphviz as dot;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use std::fmt::Debug;
+use std::io::{self, Write};
+
+use super::generic_graph::mir_fn_to_generic_graph;
+use super::pretty::dump_mir_def_ids;
+
+/// Write a graphviz DOT graph of a list of MIRs.
+pub fn write_mir_graphviz<W>(tcx: TyCtxt<'_>, single: Option<DefId>, w: &mut W) -> io::Result<()>
+where
+ W: Write,
+{
+ let def_ids = dump_mir_def_ids(tcx, single);
+
+ let mirs =
+ def_ids
+ .iter()
+ .flat_map(|def_id| {
+ if tcx.is_const_fn_raw(*def_id) {
+ vec![tcx.optimized_mir(*def_id), tcx.mir_for_ctfe(*def_id)]
+ } else {
+ vec![tcx.instance_mir(ty::InstanceDef::Item(ty::WithOptConstParam::unknown(
+ *def_id,
+ )))]
+ }
+ })
+ .collect::<Vec<_>>();
+
+ let use_subgraphs = mirs.len() > 1;
+ if use_subgraphs {
+ writeln!(w, "digraph __crate__ {{")?;
+ }
+
+ for mir in mirs {
+ write_mir_fn_graphviz(tcx, mir, use_subgraphs, w)?;
+ }
+
+ if use_subgraphs {
+ writeln!(w, "}}")?;
+ }
+
+ Ok(())
+}
+
+/// Write a graphviz DOT graph of the MIR.
+pub fn write_mir_fn_graphviz<'tcx, W>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ subgraph: bool,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ // Global graph properties
+ let font = format!(r#"fontname="{}""#, tcx.sess.opts.unstable_opts.graphviz_font);
+ let mut graph_attrs = vec![&font[..]];
+ let mut content_attrs = vec![&font[..]];
+
+ let dark_mode = tcx.sess.opts.unstable_opts.graphviz_dark_mode;
+ if dark_mode {
+ graph_attrs.push(r#"bgcolor="black""#);
+ graph_attrs.push(r#"fontcolor="white""#);
+ content_attrs.push(r#"color="white""#);
+ content_attrs.push(r#"fontcolor="white""#);
+ }
+
+ // Graph label
+ let mut label = String::from("");
+ // FIXME: remove this unwrap
+ write_graph_label(tcx, body, &mut label).unwrap();
+ let g = mir_fn_to_generic_graph(tcx, body);
+ let settings = GraphvizSettings {
+ graph_attrs: Some(graph_attrs.join(" ")),
+ node_attrs: Some(content_attrs.join(" ")),
+ edge_attrs: Some(content_attrs.join(" ")),
+ graph_label: Some(label),
+ };
+ g.to_dot(w, &settings, subgraph)
+}
+
+/// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
+/// will appear below the graph, showing the type of the `fn` this MIR represents and the types of
+/// all the variables and temporaries.
+fn write_graph_label<'tcx, W: std::fmt::Write>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ w: &mut W,
+) -> std::fmt::Result {
+ let def_id = body.source.def_id();
+
+ write!(w, "fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
+
+ // fn argument types.
+ for (i, arg) in body.args_iter().enumerate() {
+ if i > 0 {
+ write!(w, ", ")?;
+ }
+ write!(w, "{:?}: {}", Place::from(arg), escape(&body.local_decls[arg].ty))?;
+ }
+
+ write!(w, ") -&gt; {}", escape(&body.return_ty()))?;
+ write!(w, r#"<br align="left"/>"#)?;
+
+ for local in body.vars_and_temps_iter() {
+ let decl = &body.local_decls[local];
+
+ write!(w, "let ")?;
+ if decl.mutability == Mutability::Mut {
+ write!(w, "mut ")?;
+ }
+
+ write!(w, r#"{:?}: {};<br align="left"/>"#, Place::from(local), escape(&decl.ty))?;
+ }
+
+ for var_debug_info in &body.var_debug_info {
+ write!(
+ w,
+ r#"debug {} =&gt; {};<br align="left"/>"#,
+ var_debug_info.name,
+ escape(&var_debug_info.value),
+ )?;
+ }
+
+ Ok(())
+}
+
+fn escape<T: Debug>(t: &T) -> String {
+ dot::escape_html(&format!("{:?}", t))
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
new file mode 100644
index 000000000..db7e0fb8a
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -0,0 +1,1300 @@
+//! The virtual memory representation of the MIR interpreter.
+
+use std::borrow::Cow;
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+use std::hash;
+use std::iter;
+use std::ops::{Deref, Range};
+use std::ptr;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::intern::Interned;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::{Align, HasDataLayout, Size};
+
+use super::{
+ read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
+ ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, ScalarSizeMismatch, UndefinedBehaviorInfo,
+ UninitBytesAccess, UnsupportedOpInfo,
+};
+use crate::ty;
+
+/// This type represents an Allocation in the Miri/CTFE core engine.
+///
+/// Its public API is rather low-level, working directly with allocation offsets and a custom error
+/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
+/// module provides higher-level access.
+// Note: for performance reasons when interning, some of the `Allocation` fields can be partially
+// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
+#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct Allocation<Prov = AllocId, Extra = ()> {
+ /// The actual bytes of the allocation.
+ /// Note that the bytes of a pointer represent the offset of the pointer.
+ bytes: Box<[u8]>,
+ /// Maps from byte addresses to extra data for each pointer.
+ /// Only the first byte of a pointer is inserted into the map; i.e.,
+ /// every entry in this map applies to `pointer_size` consecutive bytes starting
+ /// at the given offset.
+ relocations: Relocations<Prov>,
+ /// Denotes which part of this allocation is initialized.
+ init_mask: InitMask,
+ /// The alignment of the allocation to detect unaligned reads.
+ /// (`Align` guarantees that this is a power of two.)
+ pub align: Align,
+ /// `true` if the allocation is mutable.
+ /// Also used by codegen to determine if a static should be put into mutable memory,
+ /// which happens for `static mut` and `static` with interior mutability.
+ pub mutability: Mutability,
+ /// Extra state for the machine.
+ pub extra: Extra,
+}
+
+/// This is the maximum size we will hash at a time, when interning an `Allocation` and its
+/// `InitMask`. Note, we hash that amount of bytes twice: at the start, and at the end of a buffer.
+/// Used when these two structures are large: we only partially hash the larger fields in that
+/// situation. See the comment at the top of their respective `Hash` impl for more details.
+const MAX_BYTES_TO_HASH: usize = 64;
+
+/// This is the maximum size (in bytes) for which a buffer will be fully hashed, when interning.
+/// Otherwise, it will be partially hashed in 2 slices, requiring at least 2 `MAX_BYTES_TO_HASH`
+/// bytes.
+const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
+
+// Const allocations are only hashed for interning. However, they can be large, making the hashing
+// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
+// big buffers like the actual bytes of allocation. We can partially hash some fields when they're
+// large.
+impl hash::Hash for Allocation {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ // Partially hash the `bytes` buffer when it is large. To limit collisions with common
+ // prefixes and suffixes, we hash the length and some slices of the buffer.
+ let byte_count = self.bytes.len();
+ if byte_count > MAX_HASHED_BUFFER_LEN {
+ // Hash the buffer's length.
+ byte_count.hash(state);
+
+ // And its head and tail.
+ self.bytes[..MAX_BYTES_TO_HASH].hash(state);
+ self.bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
+ } else {
+ self.bytes.hash(state);
+ }
+
+ // Hash the other fields as usual.
+ self.relocations.hash(state);
+ self.init_mask.hash(state);
+ self.align.hash(state);
+ self.mutability.hash(state);
+ self.extra.hash(state);
+ }
+}
+
+/// Interned types generally have an `Outer` type and an `Inner` type, where
+/// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
+/// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
+/// outer type and `TyS` is its inner type.
+///
+/// Here things are different because only const allocations are interned. This
+/// means that both the inner type (`Allocation`) and the outer type
+/// (`ConstAllocation`) are used quite a bit.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[rustc_pass_by_value]
+pub struct ConstAllocation<'tcx, Prov = AllocId, Extra = ()>(
+ pub Interned<'tcx, Allocation<Prov, Extra>>,
+);
+
+impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This matches how `Allocation` is printed. We print it like this to
+ // avoid having to update expected output in a lot of tests.
+ write!(f, "{:?}", self.inner())
+ }
+}
+
+impl<'tcx, Prov, Extra> ConstAllocation<'tcx, Prov, Extra> {
+ pub fn inner(self) -> &'tcx Allocation<Prov, Extra> {
+ self.0.0
+ }
+}
+
+/// We have our own error type that does not know about the `AllocId`; that information
+/// is added when converting to `InterpError`.
+#[derive(Debug)]
+pub enum AllocError {
+ /// A scalar had the wrong size.
+ ScalarSizeMismatch(ScalarSizeMismatch),
+ /// Encountered a pointer where we needed raw bytes.
+ ReadPointerAsBytes,
+ /// Partially overwriting a pointer.
+ PartialPointerOverwrite(Size),
+ /// Using uninitialized data where it is not allowed.
+ InvalidUninitBytes(Option<UninitBytesAccess>),
+}
+pub type AllocResult<T = ()> = Result<T, AllocError>;
+
+impl From<ScalarSizeMismatch> for AllocError {
+ fn from(s: ScalarSizeMismatch) -> Self {
+ AllocError::ScalarSizeMismatch(s)
+ }
+}
+
+impl AllocError {
+ pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
+ use AllocError::*;
+ match self {
+ ScalarSizeMismatch(s) => {
+ InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
+ }
+ ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
+ PartialPointerOverwrite(offset) => InterpError::Unsupported(
+ UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
+ ),
+ InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
+ UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
+ ),
+ }
+ }
+}
+
+/// The information that makes up a memory access: offset and size.
+#[derive(Copy, Clone)]
+pub struct AllocRange {
+ pub start: Size,
+ pub size: Size,
+}
+
+impl fmt::Debug for AllocRange {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())
+ }
+}
+
+/// Free-starting constructor for less syntactic overhead.
+#[inline(always)]
+pub fn alloc_range(start: Size, size: Size) -> AllocRange {
+ AllocRange { start, size }
+}
+
+impl AllocRange {
+ #[inline]
+ pub fn from(r: Range<Size>) -> Self {
+ alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
+ }
+
+ #[inline(always)]
+ pub fn end(self) -> Size {
+ self.start + self.size // This does overflow checking.
+ }
+
+ /// Returns the `subrange` within this range; panics if it is not a subrange.
+ #[inline]
+ pub fn subrange(self, subrange: AllocRange) -> AllocRange {
+ let sub_start = self.start + subrange.start;
+ let range = alloc_range(sub_start, subrange.size);
+ assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
+ range
+ }
+}
+
+// The constructors are all without extra; the extra gets added by a machine hook later.
+impl<Prov> Allocation<Prov> {
+ /// Creates an allocation initialized by the given bytes
+ pub fn from_bytes<'a>(
+ slice: impl Into<Cow<'a, [u8]>>,
+ align: Align,
+ mutability: Mutability,
+ ) -> Self {
+ let bytes = Box::<[u8]>::from(slice.into());
+ let size = Size::from_bytes(bytes.len());
+ Self {
+ bytes,
+ relocations: Relocations::new(),
+ init_mask: InitMask::new(size, true),
+ align,
+ mutability,
+ extra: (),
+ }
+ }
+
+ pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
+ Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
+ }
+
+ /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
+ /// available to the compiler to do so.
+ ///
+ /// If `panic_on_fail` is true, this will never return `Err`.
+ pub fn uninit<'tcx>(size: Size, align: Align, panic_on_fail: bool) -> InterpResult<'tcx, Self> {
+ let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes_usize()).map_err(|_| {
+ // This results in an error that can happen non-deterministically, since the memory
+ // available to the compiler can change between runs. Normally queries are always
+ // deterministic. However, we can be non-deterministic here because all uses of const
+ // evaluation (including ConstProp!) will make compilation fail (via hard error
+ // or ICE) upon encountering a `MemoryExhausted` error.
+ if panic_on_fail {
+ panic!("Allocation::uninit called with panic_on_fail had allocation failure")
+ }
+ ty::tls::with(|tcx| {
+ tcx.sess.delay_span_bug(DUMMY_SP, "exhausted memory during interpretation")
+ });
+ InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
+ })?;
+ // SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
+ let bytes = unsafe { bytes.assume_init() };
+ Ok(Allocation {
+ bytes,
+ relocations: Relocations::new(),
+ init_mask: InitMask::new(size, false),
+ align,
+ mutability: Mutability::Mut,
+ extra: (),
+ })
+ }
+}
+
+impl Allocation {
+ /// Adjust allocation from the ones in tcx to a custom Machine instance
+ /// with a different Provenance and Extra type.
+ pub fn adjust_from_tcx<Prov, Extra, Err>(
+ self,
+ cx: &impl HasDataLayout,
+ extra: Extra,
+ mut adjust_ptr: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Prov>, Err>,
+ ) -> Result<Allocation<Prov, Extra>, Err> {
+ // Compute new pointer provenance, which also adjusts the bytes.
+ let mut bytes = self.bytes;
+ let mut new_relocations = Vec::with_capacity(self.relocations.0.len());
+ let ptr_size = cx.data_layout().pointer_size.bytes_usize();
+ let endian = cx.data_layout().endian;
+ for &(offset, alloc_id) in self.relocations.iter() {
+ let idx = offset.bytes_usize();
+ let ptr_bytes = &mut bytes[idx..idx + ptr_size];
+ let bits = read_target_uint(endian, ptr_bytes).unwrap();
+ let (ptr_prov, ptr_offset) =
+ adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
+ write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
+ new_relocations.push((offset, ptr_prov));
+ }
+ // Create allocation.
+ Ok(Allocation {
+ bytes,
+ relocations: Relocations::from_presorted(new_relocations),
+ init_mask: self.init_mask,
+ align: self.align,
+ mutability: self.mutability,
+ extra,
+ })
+ }
+}
+
+/// Raw accessors. Provide access to otherwise private bytes.
+impl<Prov, Extra> Allocation<Prov, Extra> {
+ pub fn len(&self) -> usize {
+ self.bytes.len()
+ }
+
+ pub fn size(&self) -> Size {
+ Size::from_bytes(self.len())
+ }
+
+ /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
+ /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
+ /// edges) at all.
+ /// This must not be used for reads affecting the interpreter execution.
+ pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
+ &self.bytes[range]
+ }
+
+ /// Returns the mask indicating which bytes are initialized.
+ pub fn init_mask(&self) -> &InitMask {
+ &self.init_mask
+ }
+
+ /// Returns the relocation list.
+ pub fn relocations(&self) -> &Relocations<Prov> {
+ &self.relocations
+ }
+}
+
+/// Byte accessors.
+impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
+ /// This is the entirely abstraction-violating way to just grab the raw bytes without
+ /// caring about relocations. It just deduplicates some code between `read_scalar`
+ /// and `get_bytes_internal`.
+ fn get_bytes_even_more_internal(&self, range: AllocRange) -> &[u8] {
+ &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
+ }
+
+ /// The last argument controls whether we error out when there are uninitialized or pointer
+ /// bytes. However, we *always* error when there are relocations overlapping the edges of the
+ /// range.
+ ///
+ /// You should never call this, call `get_bytes` or `get_bytes_with_uninit_and_ptr` instead,
+ ///
+ /// This function also guarantees that the resulting pointer will remain stable
+ /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
+ /// on that.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ fn get_bytes_internal(
+ &self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ check_init_and_ptr: bool,
+ ) -> AllocResult<&[u8]> {
+ if check_init_and_ptr {
+ self.check_init(range)?;
+ self.check_relocations(cx, range)?;
+ } else {
+ // We still don't want relocations on the *edges*.
+ self.check_relocation_edges(cx, range)?;
+ }
+
+ Ok(self.get_bytes_even_more_internal(range))
+ }
+
+ /// Checks that these bytes are initialized and not pointer bytes, and then return them
+ /// as a slice.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
+ /// on `InterpCx` instead.
+ #[inline]
+ pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> {
+ self.get_bytes_internal(cx, range, true)
+ }
+
+ /// It is the caller's responsibility to handle uninitialized and pointer bytes.
+ /// However, this still checks that there are no relocations on the *edges*.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ #[inline]
+ pub fn get_bytes_with_uninit_and_ptr(
+ &self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ ) -> AllocResult<&[u8]> {
+ self.get_bytes_internal(cx, range, false)
+ }
+
+ /// Just calling this already marks everything as defined and removes relocations,
+ /// so be sure to actually put data there!
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
+ /// on `InterpCx` instead.
+ pub fn get_bytes_mut(
+ &mut self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ ) -> AllocResult<&mut [u8]> {
+ self.mark_init(range, true);
+ self.clear_relocations(cx, range)?;
+
+ Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
+ }
+
+ /// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
+ pub fn get_bytes_mut_ptr(
+ &mut self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ ) -> AllocResult<*mut [u8]> {
+ self.mark_init(range, true);
+ self.clear_relocations(cx, range)?;
+
+ assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
+ let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
+ let len = range.end().bytes_usize() - range.start.bytes_usize();
+ Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
+ }
+}
+
+/// Reading and writing.
+impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
+ /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
+ /// relocation. If `allow_uninit`/`allow_ptr` is `false`, also enforces that the memory in the
+ /// given range contains no uninitialized bytes/relocations.
+ pub fn check_bytes(
+ &self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ allow_uninit: bool,
+ allow_ptr: bool,
+ ) -> AllocResult {
+ // Check bounds and relocations on the edges.
+ self.get_bytes_with_uninit_and_ptr(cx, range)?;
+ // Check uninit and ptr.
+ if !allow_uninit {
+ self.check_init(range)?;
+ }
+ if !allow_ptr {
+ self.check_relocations(cx, range)?;
+ }
+ Ok(())
+ }
+
+ /// Reads a *non-ZST* scalar.
+ ///
+ /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
+ /// supports that) provenance is entirely ignored.
+ ///
+ /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
+ /// for ZSTness anyway due to integer pointers being valid for ZSTs.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
+ pub fn read_scalar(
+ &self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ read_provenance: bool,
+ ) -> AllocResult<ScalarMaybeUninit<Prov>> {
+ if read_provenance {
+ assert_eq!(range.size, cx.data_layout().pointer_size);
+ }
+
+ // First and foremost, if anything is uninit, bail.
+ if self.is_init(range).is_err() {
+ // This inflates uninitialized bytes to the entire scalar, even if only a few
+ // bytes are uninitialized.
+ return Ok(ScalarMaybeUninit::Uninit);
+ }
+
+ // If we are doing a pointer read, and there is a relocation exactly where we
+ // are reading, then we can put data and relocation back together and return that.
+ if read_provenance && let Some(&prov) = self.relocations.get(&range.start) {
+ // We already checked init and relocations, so we can use this function.
+ let bytes = self.get_bytes_even_more_internal(range);
+ let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
+ let ptr = Pointer::new(prov, Size::from_bytes(bits));
+ return Ok(ScalarMaybeUninit::from_pointer(ptr, cx));
+ }
+
+ // If we are *not* reading a pointer, and we can just ignore relocations,
+ // then do exactly that.
+ if !read_provenance && Prov::OFFSET_IS_ADDR {
+ // We just strip provenance.
+ let bytes = self.get_bytes_even_more_internal(range);
+ let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
+ return Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)));
+ }
+
+ // It's complicated. Better make sure there is no provenance anywhere.
+ // FIXME: If !OFFSET_IS_ADDR, this is the best we can do. But if OFFSET_IS_ADDR, then
+ // `read_pointer` is true and we ideally would distinguish the following two cases:
+ // - The entire `range` is covered by 2 relocations for the same provenance.
+ // Then we should return a pointer with that provenance.
+ // - The range has inhomogeneous provenance. Then we should return just the
+ // underlying bits.
+ let bytes = self.get_bytes(cx, range)?;
+ let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
+ Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
+ }
+
+ /// Writes a *non-ZST* scalar.
+ ///
+ /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
+ /// for ZSTness anyway due to integer pointers being valid for ZSTs.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
+ #[instrument(skip(self, cx), level = "debug")]
+ pub fn write_scalar(
+ &mut self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ val: ScalarMaybeUninit<Prov>,
+ ) -> AllocResult {
+ assert!(self.mutability == Mutability::Mut);
+
+ let val = match val {
+ ScalarMaybeUninit::Scalar(scalar) => scalar,
+ ScalarMaybeUninit::Uninit => {
+ return self.write_uninit(cx, range);
+ }
+ };
+
+ // `to_bits_or_ptr_internal` is the right method because we just want to store this data
+ // as-is into memory.
+ let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
+ Err(val) => {
+ let (provenance, offset) = val.into_parts();
+ (u128::from(offset.bytes()), Some(provenance))
+ }
+ Ok(data) => (data, None),
+ };
+
+ let endian = cx.data_layout().endian;
+ let dst = self.get_bytes_mut(cx, range)?;
+ write_target_uint(endian, dst, bytes).unwrap();
+
+ // See if we have to also write a relocation.
+ if let Some(provenance) = provenance {
+ self.relocations.0.insert(range.start, provenance);
+ }
+
+ Ok(())
+ }
+
+ /// Write "uninit" to the given memory range.
+ pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
+ self.mark_init(range, false);
+ self.clear_relocations(cx, range)?;
+ return Ok(());
+ }
+}
+
+/// Relocations.
+impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
+ /// Returns all relocations overlapping with the given pointer-offset pair.
+ fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
+ // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
+ // the beginning of this range.
+ let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
+ self.relocations.range(Size::from_bytes(start)..range.end())
+ }
+
+ /// Returns whether this allocation has relocations overlapping with the given range.
+ ///
+ /// Note: this function exists to allow `get_relocations` to be private, in order to somewhat
+ /// limit access to relocations outside of the `Allocation` abstraction.
+ ///
+ pub fn has_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
+ !self.get_relocations(cx, range).is_empty()
+ }
+
+ /// Checks that there are no relocations overlapping with the given range.
+ #[inline(always)]
+ fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
+ if self.has_relocations(cx, range) { Err(AllocError::ReadPointerAsBytes) } else { Ok(()) }
+ }
+
+ /// Removes all relocations inside the given range.
+ /// If there are relocations overlapping with the edges, they
+ /// are removed as well *and* the bytes they cover are marked as
+ /// uninitialized. This is a somewhat odd "spooky action at a distance",
+ /// but it allows strictly more code to run than if we would just error
+ /// immediately in that case.
+ fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
+ where
+ Prov: Provenance,
+ {
+ // Find the start and end of the given range and its outermost relocations.
+ let (first, last) = {
+ // Find all relocations overlapping the given range.
+ let relocations = self.get_relocations(cx, range);
+ if relocations.is_empty() {
+ return Ok(());
+ }
+
+ (
+ relocations.first().unwrap().0,
+ relocations.last().unwrap().0 + cx.data_layout().pointer_size,
+ )
+ };
+ let start = range.start;
+ let end = range.end();
+
+ // We need to handle clearing the relocations from parts of a pointer.
+ // FIXME: Miri should preserve partial relocations; see
+ // https://github.com/rust-lang/miri/issues/2181.
+ if first < start {
+ if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
+ return Err(AllocError::PartialPointerOverwrite(first));
+ }
+ warn!(
+ "Partial pointer overwrite! De-initializing memory at offsets {first:?}..{start:?}."
+ );
+ self.init_mask.set_range(first, start, false);
+ }
+ if last > end {
+ if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
+ return Err(AllocError::PartialPointerOverwrite(
+ last - cx.data_layout().pointer_size,
+ ));
+ }
+ warn!(
+ "Partial pointer overwrite! De-initializing memory at offsets {end:?}..{last:?}."
+ );
+ self.init_mask.set_range(end, last, false);
+ }
+
+ // Forget all the relocations.
+ // Since relocations do not overlap, we know that removing until `last` (exclusive) is fine,
+ // i.e., this will not remove any other relocations just after the ones we care about.
+ self.relocations.0.remove_range(first..last);
+
+ Ok(())
+ }
+
+ /// Errors if there are relocations overlapping with the edges of the
+ /// given memory range.
+ #[inline]
+ fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
+ self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?;
+ self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?;
+ Ok(())
+ }
+}
+
+/// "Relocations" stores the provenance information of pointers stored in memory.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+pub struct Relocations<Prov = AllocId>(SortedMap<Size, Prov>);
+
+impl<Prov> Relocations<Prov> {
+ pub fn new() -> Self {
+ Relocations(SortedMap::new())
+ }
+
+ // The caller must guarantee that the given relocations are already sorted
+ // by address and contain no duplicates.
+ pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self {
+ Relocations(SortedMap::from_presorted_elements(r))
+ }
+}
+
+impl<Prov> Deref for Relocations<Prov> {
+ type Target = SortedMap<Size, Prov>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+/// A partial, owned list of relocations to transfer into another allocation.
+///
+/// Offsets are already adjusted to the destination allocation.
+pub struct AllocationRelocations<Prov> {
+ dest_relocations: Vec<(Size, Prov)>,
+}
+
+impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
+ pub fn prepare_relocation_copy(
+ &self,
+ cx: &impl HasDataLayout,
+ src: AllocRange,
+ dest: Size,
+ count: u64,
+ ) -> AllocationRelocations<Prov> {
+ let relocations = self.get_relocations(cx, src);
+ if relocations.is_empty() {
+ return AllocationRelocations { dest_relocations: Vec::new() };
+ }
+
+ let size = src.size;
+ let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
+
+ // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
+ // is mostly filled with redundant information since it's just N copies of the same `Prov`s
+ // at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range`
+ // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
+ // the right sequence of relocations for all N copies.
+ for i in 0..count {
+ new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
+ // compute offset for current repetition
+ let dest_offset = dest + size * i; // `Size` operations
+ (
+ // shift offsets from source allocation to destination allocation
+ (offset + dest_offset) - src.start, // `Size` operations
+ reloc,
+ )
+ }));
+ }
+
+ AllocationRelocations { dest_relocations: new_relocations }
+ }
+
+ /// Applies a relocation copy.
+ /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
+ /// to be clear of relocations.
+ ///
+ /// This is dangerous to use as it can violate internal `Allocation` invariants!
+ /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
+ pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Prov>) {
+ self.relocations.0.insert_presorted(relocations.dest_relocations);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Uninitialized byte tracking
+////////////////////////////////////////////////////////////////////////////////
+
+type Block = u64;
+
+/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
+/// is initialized. If it is `false` the byte is uninitialized.
+// Note: for performance reasons when interning, some of the `InitMask` fields can be partially
+// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
+#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct InitMask {
+ blocks: Vec<Block>,
+ len: Size,
+}
+
+// Const allocations are only hashed for interning. However, they can be large, making the hashing
+// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
+// big buffers like the allocation's init mask. We can partially hash some fields when they're
+// large.
+impl hash::Hash for InitMask {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ const MAX_BLOCKS_TO_HASH: usize = MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
+ const MAX_BLOCKS_LEN: usize = MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
+
+ // Partially hash the `blocks` buffer when it is large. To limit collisions with common
+ // prefixes and suffixes, we hash the length and some slices of the buffer.
+ let block_count = self.blocks.len();
+ if block_count > MAX_BLOCKS_LEN {
+ // Hash the buffer's length.
+ block_count.hash(state);
+
+ // And its head and tail.
+ self.blocks[..MAX_BLOCKS_TO_HASH].hash(state);
+ self.blocks[block_count - MAX_BLOCKS_TO_HASH..].hash(state);
+ } else {
+ self.blocks.hash(state);
+ }
+
+ // Hash the other fields as usual.
+ self.len.hash(state);
+ }
+}
+
+impl InitMask {
+ pub const BLOCK_SIZE: u64 = 64;
+
+ #[inline]
+ fn bit_index(bits: Size) -> (usize, usize) {
+ // BLOCK_SIZE is the number of bits that can fit in a `Block`.
+ // Each bit in a `Block` represents the initialization state of one byte of an allocation,
+ // so we use `.bytes()` here.
+ let bits = bits.bytes();
+ let a = bits / InitMask::BLOCK_SIZE;
+ let b = bits % InitMask::BLOCK_SIZE;
+ (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
+ }
+
+ #[inline]
+ fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
+ let block = block.try_into().ok().unwrap();
+ let bit = bit.try_into().ok().unwrap();
+ Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
+ }
+
+ pub fn new(size: Size, state: bool) -> Self {
+ let mut m = InitMask { blocks: vec![], len: Size::ZERO };
+ m.grow(size, state);
+ m
+ }
+
+ pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
+ let len = self.len;
+ if end > len {
+ self.grow(end - len, new_state);
+ }
+ self.set_range_inbounds(start, end, new_state);
+ }
+
+ pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
+ let (blocka, bita) = Self::bit_index(start);
+ let (blockb, bitb) = Self::bit_index(end);
+ if blocka == blockb {
+ // First set all bits except the first `bita`,
+ // then unset the last `64 - bitb` bits.
+ let range = if bitb == 0 {
+ u64::MAX << bita
+ } else {
+ (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
+ };
+ if new_state {
+ self.blocks[blocka] |= range;
+ } else {
+ self.blocks[blocka] &= !range;
+ }
+ return;
+ }
+ // across block boundaries
+ if new_state {
+ // Set `bita..64` to `1`.
+ self.blocks[blocka] |= u64::MAX << bita;
+ // Set `0..bitb` to `1`.
+ if bitb != 0 {
+ self.blocks[blockb] |= u64::MAX >> (64 - bitb);
+ }
+ // Fill in all the other blocks (much faster than one bit at a time).
+ for block in (blocka + 1)..blockb {
+ self.blocks[block] = u64::MAX;
+ }
+ } else {
+ // Set `bita..64` to `0`.
+ self.blocks[blocka] &= !(u64::MAX << bita);
+ // Set `0..bitb` to `0`.
+ if bitb != 0 {
+ self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
+ }
+ // Fill in all the other blocks (much faster than one bit at a time).
+ for block in (blocka + 1)..blockb {
+ self.blocks[block] = 0;
+ }
+ }
+ }
+
+ #[inline]
+ pub fn get(&self, i: Size) -> bool {
+ let (block, bit) = Self::bit_index(i);
+ (self.blocks[block] & (1 << bit)) != 0
+ }
+
+ #[inline]
+ pub fn set(&mut self, i: Size, new_state: bool) {
+ let (block, bit) = Self::bit_index(i);
+ self.set_bit(block, bit, new_state);
+ }
+
+ #[inline]
+ fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
+ if new_state {
+ self.blocks[block] |= 1 << bit;
+ } else {
+ self.blocks[block] &= !(1 << bit);
+ }
+ }
+
+ pub fn grow(&mut self, amount: Size, new_state: bool) {
+ if amount.bytes() == 0 {
+ return;
+ }
+ let unused_trailing_bits =
+ u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
+ if amount.bytes() > unused_trailing_bits {
+ let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
+ self.blocks.extend(
+ // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
+ iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
+ );
+ }
+ let start = self.len;
+ self.len += amount;
+ self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
+ }
+
+ /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
+ fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
+ /// A fast implementation of `find_bit`,
+ /// which skips over an entire block at a time if it's all 0s (resp. 1s),
+ /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
+ ///
+ /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
+ /// and with the least significant bit (and lowest block) first:
+ /// ```text
+ /// 00000000|00000000
+ /// ^ ^ ^ ^
+ /// index: 0 7 8 15
+ /// ```
+ /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
+ fn find_bit_fast(
+ init_mask: &InitMask,
+ start: Size,
+ end: Size,
+ is_init: bool,
+ ) -> Option<Size> {
+ /// Search one block, returning the index of the first bit equal to `is_init`.
+ fn search_block(
+ bits: Block,
+ block: usize,
+ start_bit: usize,
+ is_init: bool,
+ ) -> Option<Size> {
+ // For the following examples, assume this function was called with:
+ // bits = 0b00111011
+ // start_bit = 3
+ // is_init = false
+ // Note that, for the examples in this function, the most significant bit is written first,
+ // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
+
+ // Invert bits so we're always looking for the first set bit.
+ // ! 0b00111011
+ // bits = 0b11000100
+ let bits = if is_init { bits } else { !bits };
+ // Mask off unused start bits.
+ // 0b11000100
+ // & 0b11111000
+ // bits = 0b11000000
+ let bits = bits & (!0 << start_bit);
+ // Find set bit, if any.
+ // bit = trailing_zeros(0b11000000)
+ // bit = 6
+ if bits == 0 {
+ None
+ } else {
+ let bit = bits.trailing_zeros();
+ Some(InitMask::size_from_bit_index(block, bit))
+ }
+ }
+
+ if start >= end {
+ return None;
+ }
+
+ // Convert `start` and `end` to block indexes and bit indexes within each block.
+ // We must convert `end` to an inclusive bound to handle block boundaries correctly.
+ //
+ // For example:
+ //
+ // (a) 00000000|00000000 (b) 00000000|
+ // ^~~~~~~~~~~^ ^~~~~~~~~^
+ // start end start end
+ //
+ // In both cases, the block index of `end` is 1.
+ // But we do want to search block 1 in (a), and we don't in (b).
+ //
+ // We subtract 1 from both end positions to make them inclusive:
+ //
+ // (a) 00000000|00000000 (b) 00000000|
+ // ^~~~~~~~~~^ ^~~~~~~^
+ // start end_inclusive start end_inclusive
+ //
+ // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
+ // This provides the desired behavior of searching blocks 0 and 1 for (a),
+ // and searching only block 0 for (b).
+ // There is no concern of overflows since we checked for `start >= end` above.
+ let (start_block, start_bit) = InitMask::bit_index(start);
+ let end_inclusive = Size::from_bytes(end.bytes() - 1);
+ let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
+
+ // Handle first block: need to skip `start_bit` bits.
+ //
+ // We need to handle the first block separately,
+ // because there may be bits earlier in the block that should be ignored,
+ // such as the bit marked (1) in this example:
+ //
+ // (1)
+ // -|------
+ // (c) 01000000|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ if let Some(i) =
+ search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
+ {
+ // If the range is less than a block, we may find a matching bit after `end`.
+ //
+ // For example, we shouldn't successfully find bit (2), because it's after `end`:
+ //
+ // (2)
+ // -------|
+ // (d) 00000001|00000000|00000001
+ // ^~~~~^
+ // start end
+ //
+ // An alternative would be to mask off end bits in the same way as we do for start bits,
+ // but performing this check afterwards is faster and simpler to implement.
+ if i < end {
+ return Some(i);
+ } else {
+ return None;
+ }
+ }
+
+ // Handle remaining blocks.
+ //
+ // We can skip over an entire block at once if it's all 0s (resp. 1s).
+ // The block marked (3) in this example is the first block that will be handled by this loop,
+ // and it will be skipped for that reason:
+ //
+ // (3)
+ // --------
+ // (e) 01000000|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ if start_block < end_block_inclusive {
+ // This loop is written in a specific way for performance.
+ // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
+ // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
+ // because both alternatives result in significantly worse codegen.
+ // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
+ // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
+ for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
+ .iter()
+ .zip(start_block + 1..)
+ {
+ if let Some(i) = search_block(bits, block, 0, is_init) {
+ // If this is the last block, we may find a matching bit after `end`.
+ //
+ // For example, we shouldn't successfully find bit (4), because it's after `end`:
+ //
+ // (4)
+ // -------|
+ // (f) 00000001|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ //
+ // As above with example (d), we could handle the end block separately and mask off end bits,
+ // but unconditionally searching an entire block at once and performing this check afterwards
+ // is faster and much simpler to implement.
+ if i < end {
+ return Some(i);
+ } else {
+ return None;
+ }
+ }
+ }
+ }
+
+ None
+ }
+
+ #[cfg_attr(not(debug_assertions), allow(dead_code))]
+ fn find_bit_slow(
+ init_mask: &InitMask,
+ start: Size,
+ end: Size,
+ is_init: bool,
+ ) -> Option<Size> {
+ (start..end).find(|&i| init_mask.get(i) == is_init)
+ }
+
+ let result = find_bit_fast(self, start, end, is_init);
+
+ debug_assert_eq!(
+ result,
+ find_bit_slow(self, start, end, is_init),
+ "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
+ start,
+ end,
+ is_init,
+ self
+ );
+
+ result
+ }
+}
+
+/// A contiguous chunk of initialized or uninitialized memory.
+pub enum InitChunk {
+ Init(Range<Size>),
+ Uninit(Range<Size>),
+}
+
+impl InitChunk {
+ #[inline]
+ pub fn is_init(&self) -> bool {
+ match self {
+ Self::Init(_) => true,
+ Self::Uninit(_) => false,
+ }
+ }
+
+ #[inline]
+ pub fn range(&self) -> Range<Size> {
+ match self {
+ Self::Init(r) => r.clone(),
+ Self::Uninit(r) => r.clone(),
+ }
+ }
+}
+
+impl InitMask {
+ /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
+ ///
+ /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
+ /// indexes for the first contiguous span of the uninitialized access.
+ #[inline]
+ pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> {
+ if end > self.len {
+ return Err(AllocRange::from(self.len..end));
+ }
+
+ let uninit_start = self.find_bit(start, end, false);
+
+ match uninit_start {
+ Some(uninit_start) => {
+ let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
+ Err(AllocRange::from(uninit_start..uninit_end))
+ }
+ None => Ok(()),
+ }
+ }
+
+ /// Returns an iterator, yielding a range of byte indexes for each contiguous region
+ /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
+ ///
+ /// The iterator guarantees the following:
+ /// - Chunks are nonempty.
+ /// - Chunks are adjacent (each range's start is equal to the previous range's end).
+ /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
+ /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
+ #[inline]
+ pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
+ assert!(end <= self.len);
+
+ let is_init = if start < end {
+ self.get(start)
+ } else {
+ // `start..end` is empty: there are no chunks, so use some arbitrary value
+ false
+ };
+
+ InitChunkIter { init_mask: self, is_init, start, end }
+ }
+}
+
+/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
+#[derive(Clone)]
+pub struct InitChunkIter<'a> {
+ init_mask: &'a InitMask,
+ /// Whether the next chunk we will return is initialized.
+ /// If there are no more chunks, contains some arbitrary value.
+ is_init: bool,
+ /// The current byte index into `init_mask`.
+ start: Size,
+ /// The end byte index into `init_mask`.
+ end: Size,
+}
+
+impl<'a> Iterator for InitChunkIter<'a> {
+ type Item = InitChunk;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.start >= self.end {
+ return None;
+ }
+
+ let end_of_chunk =
+ self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
+ let range = self.start..end_of_chunk;
+
+ let ret =
+ Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
+
+ self.is_init = !self.is_init;
+ self.start = end_of_chunk;
+
+ ret
+ }
+}
+
+/// Uninitialized bytes.
+impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
+ /// Checks whether the given range is entirely initialized.
+ ///
+ /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
+ /// indexes of the first contiguous uninitialized access.
+ fn is_init(&self, range: AllocRange) -> Result<(), AllocRange> {
+ self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
+ }
+
+ /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
+ /// error which will report the first range of bytes which is uninitialized.
+ fn check_init(&self, range: AllocRange) -> AllocResult {
+ self.is_init(range).map_err(|uninit_range| {
+ AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
+ access: range,
+ uninit: uninit_range,
+ }))
+ })
+ }
+
+ fn mark_init(&mut self, range: AllocRange, is_init: bool) {
+ if range.size.bytes() == 0 {
+ return;
+ }
+ assert!(self.mutability == Mutability::Mut);
+ self.init_mask.set_range(range.start, range.end(), is_init);
+ }
+}
+
+/// Run-length encoding of the uninit mask.
+/// Used to copy parts of a mask multiple times to another allocation.
+pub struct InitMaskCompressed {
+ /// Whether the first range is initialized.
+ initial: bool,
+ /// The lengths of ranges that are run-length encoded.
+ /// The initialization state of the ranges alternate starting with `initial`.
+ ranges: smallvec::SmallVec<[u64; 1]>,
+}
+
+impl InitMaskCompressed {
+ pub fn no_bytes_init(&self) -> bool {
+ // The `ranges` are run-length encoded and of alternating initialization state.
+ // So if `ranges.len() > 1` then the second block is an initialized range.
+ !self.initial && self.ranges.len() == 1
+ }
+}
+
+/// Transferring the initialization mask to other allocations.
+impl<Prov, Extra> Allocation<Prov, Extra> {
+ /// Creates a run-length encoding of the initialization mask; panics if range is empty.
+ ///
+ /// This is essentially a more space-efficient version of
+ /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
+ pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
+ // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
+ // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
+ // the source and write it to the destination. Even if we optimized the memory accesses,
+ // we'd be doing all of this `repeat` times.
+ // Therefore we precompute a compressed version of the initialization mask of the source value and
+ // then write it back `repeat` times without computing any more information from the source.
+
+ // A precomputed cache for ranges of initialized / uninitialized bits
+ // 0000010010001110 will become
+ // `[5, 1, 2, 1, 3, 3, 1]`,
+ // where each element toggles the state.
+
+ let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
+
+ let mut chunks = self.init_mask.range_as_init_chunks(range.start, range.end()).peekable();
+
+ let initial = chunks.peek().expect("range should be nonempty").is_init();
+
+ // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
+ for chunk in chunks {
+ let len = chunk.range().end.bytes() - chunk.range().start.bytes();
+ ranges.push(len);
+ }
+
+ InitMaskCompressed { ranges, initial }
+ }
+
+ /// Applies multiple instances of the run-length encoding to the initialization mask.
+ ///
+ /// This is dangerous to use as it can violate internal `Allocation` invariants!
+ /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
+ pub fn mark_compressed_init_range(
+ &mut self,
+ defined: &InitMaskCompressed,
+ range: AllocRange,
+ repeat: u64,
+ ) {
+ // An optimization where we can just overwrite an entire range of initialization
+ // bits if they are going to be uniformly `1` or `0`.
+ if defined.ranges.len() <= 1 {
+ self.init_mask.set_range_inbounds(
+ range.start,
+ range.start + range.size * repeat, // `Size` operations
+ defined.initial,
+ );
+ return;
+ }
+
+ for mut j in 0..repeat {
+ j *= range.size.bytes();
+ j += range.start.bytes();
+ let mut cur = defined.initial;
+ for range in &defined.ranges {
+ let old_j = j;
+ j += range;
+ self.init_mask.set_range_inbounds(
+ Size::from_bytes(old_j),
+ Size::from_bytes(j),
+ cur,
+ );
+ cur = !cur;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
new file mode 100644
index 000000000..cecb55578
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -0,0 +1,551 @@
+use super::{AllocId, AllocRange, ConstAlloc, Pointer, Scalar};
+
+use crate::mir::interpret::ConstValue;
+use crate::ty::{layout, query::TyCtxtAt, tls, Ty, ValTree};
+
+use rustc_data_structures::sync::Lock;
+use rustc_errors::{pluralize, struct_span_err, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_macros::HashStable;
+use rustc_session::CtfeBacktrace;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{call, Align, Size};
+use std::{any::Any, backtrace::Backtrace, fmt};
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum ErrorHandled {
+ /// Already reported an error for this evaluation, and the compilation is
+ /// *guaranteed* to fail. Warnings/lints *must not* produce `Reported`.
+ Reported(ErrorGuaranteed),
+ /// Already emitted a lint for this evaluation.
+ Linted,
+ /// Don't emit an error, the evaluation failed because the MIR was generic
+ /// and the substs didn't fully monomorphize it.
+ TooGeneric,
+}
+
+impl From<ErrorGuaranteed> for ErrorHandled {
+ fn from(err: ErrorGuaranteed) -> ErrorHandled {
+ ErrorHandled::Reported(err)
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ ErrorHandled,
+}
+
+pub type EvalToAllocationRawResult<'tcx> = Result<ConstAlloc<'tcx>, ErrorHandled>;
+pub type EvalToConstValueResult<'tcx> = Result<ConstValue<'tcx>, ErrorHandled>;
+pub type EvalToValTreeResult<'tcx> = Result<Option<ValTree<'tcx>>, ErrorHandled>;
+
+pub fn struct_error<'tcx>(
+ tcx: TyCtxtAt<'tcx>,
+ msg: &str,
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg)
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(InterpErrorInfo<'_>, 8);
+
+/// Packages the kind of error we got from the const code interpreter
+/// up with a Rust-level backtrace of where the error occurred.
+/// These should always be constructed by calling `.into()` on
+/// an `InterpError`. In `rustc_mir::interpret`, we have `throw_err_*`
+/// macros for this.
+#[derive(Debug)]
+pub struct InterpErrorInfo<'tcx>(Box<InterpErrorInfoInner<'tcx>>);
+
+#[derive(Debug)]
+struct InterpErrorInfoInner<'tcx> {
+ kind: InterpError<'tcx>,
+ backtrace: Option<Box<Backtrace>>,
+}
+
+impl fmt::Display for InterpErrorInfo<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.0.kind)
+ }
+}
+
+impl<'tcx> InterpErrorInfo<'tcx> {
+ pub fn print_backtrace(&self) {
+ if let Some(backtrace) = self.0.backtrace.as_ref() {
+ print_backtrace(backtrace);
+ }
+ }
+
+ pub fn into_kind(self) -> InterpError<'tcx> {
+ let InterpErrorInfo(box InterpErrorInfoInner { kind, .. }) = self;
+ kind
+ }
+
+ #[inline]
+ pub fn kind(&self) -> &InterpError<'tcx> {
+ &self.0.kind
+ }
+}
+
+fn print_backtrace(backtrace: &Backtrace) {
+ eprintln!("\n\nAn error occurred in miri:\n{}", backtrace);
+}
+
+impl From<ErrorHandled> for InterpErrorInfo<'_> {
+ fn from(err: ErrorHandled) -> Self {
+ match err {
+ ErrorHandled::Reported(ErrorGuaranteed { .. }) | ErrorHandled::Linted => {
+ err_inval!(ReferencedConstant)
+ }
+ ErrorHandled::TooGeneric => err_inval!(TooGeneric),
+ }
+ .into()
+ }
+}
+
+impl From<ErrorGuaranteed> for InterpErrorInfo<'_> {
+ fn from(err: ErrorGuaranteed) -> Self {
+ InterpError::InvalidProgram(InvalidProgramInfo::AlreadyReported(err)).into()
+ }
+}
+
+impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> {
+ fn from(kind: InterpError<'tcx>) -> Self {
+ let capture_backtrace = tls::with_opt(|tcx| {
+ if let Some(tcx) = tcx {
+ *Lock::borrow(&tcx.sess.ctfe_backtrace)
+ } else {
+ CtfeBacktrace::Disabled
+ }
+ });
+
+ let backtrace = match capture_backtrace {
+ CtfeBacktrace::Disabled => None,
+ CtfeBacktrace::Capture => Some(Box::new(Backtrace::force_capture())),
+ CtfeBacktrace::Immediate => {
+ // Print it now.
+ let backtrace = Backtrace::force_capture();
+ print_backtrace(&backtrace);
+ None
+ }
+ };
+
+ InterpErrorInfo(Box::new(InterpErrorInfoInner { kind, backtrace }))
+ }
+}
+
+/// Error information for when the program we executed turned out not to actually be a valid
+/// program. This cannot happen in stand-alone Miri, but it can happen during CTFE/ConstProp
+/// where we work on generic code or execution does not have all information available.
+pub enum InvalidProgramInfo<'tcx> {
+ /// Resolution can fail if we are in a too generic context.
+ TooGeneric,
+ /// Cannot compute this constant because it depends on another one
+ /// which already produced an error.
+ ReferencedConstant,
+ /// Abort in case errors are already reported.
+ AlreadyReported(ErrorGuaranteed),
+ /// An error occurred during layout computation.
+ Layout(layout::LayoutError<'tcx>),
+ /// An error occurred during FnAbi computation: the passed --target lacks FFI support
+ /// (which unfortunately typeck does not reject).
+ /// Not using `FnAbiError` as that contains a nested `LayoutError`.
+ FnAbiAdjustForForeignAbi(call::AdjustForForeignAbiError),
+ /// SizeOf of unsized type was requested.
+ SizeOfUnsizedType(Ty<'tcx>),
+}
+
+impl fmt::Display for InvalidProgramInfo<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use InvalidProgramInfo::*;
+ match self {
+ TooGeneric => write!(f, "encountered overly generic constant"),
+ ReferencedConstant => write!(f, "referenced constant has errors"),
+ AlreadyReported(ErrorGuaranteed { .. }) => {
+ write!(f, "encountered constants with type errors, stopping evaluation")
+ }
+ Layout(ref err) => write!(f, "{err}"),
+ FnAbiAdjustForForeignAbi(ref err) => write!(f, "{err}"),
+ SizeOfUnsizedType(ty) => write!(f, "size_of called on unsized type `{ty}`"),
+ }
+ }
+}
+
+/// Details of why a pointer had to be in-bounds.
+#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum CheckInAllocMsg {
+ /// We are dereferencing a pointer (i.e., creating a place).
+ DerefTest,
+ /// We are access memory.
+ MemoryAccessTest,
+ /// We are doing pointer arithmetic.
+ PointerArithmeticTest,
+ /// We are doing pointer offset_from.
+ OffsetFromTest,
+ /// None of the above -- generic/unspecific inbounds test.
+ InboundsTest,
+}
+
+impl fmt::Display for CheckInAllocMsg {
+ /// When this is printed as an error the context looks like this:
+ /// "{msg}{pointer} is a dangling pointer".
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "{}",
+ match *self {
+ CheckInAllocMsg::DerefTest => "dereferencing pointer failed: ",
+ CheckInAllocMsg::MemoryAccessTest => "memory access failed: ",
+ CheckInAllocMsg::PointerArithmeticTest => "out-of-bounds pointer arithmetic: ",
+ CheckInAllocMsg::OffsetFromTest => "out-of-bounds offset_from: ",
+ CheckInAllocMsg::InboundsTest => "out-of-bounds pointer use: ",
+ }
+ )
+ }
+}
+
+/// Details of an access to uninitialized bytes where it is not allowed.
+#[derive(Debug)]
+pub struct UninitBytesAccess {
+ /// Range of the original memory access.
+ pub access: AllocRange,
+ /// Range of the uninit memory that was encountered. (Might not be maximal.)
+ pub uninit: AllocRange,
+}
+
+/// Information about a size mismatch.
+#[derive(Debug)]
+pub struct ScalarSizeMismatch {
+ pub target_size: u64,
+ pub data_size: u64,
+}
+
+/// Error information for when the program caused Undefined Behavior.
+pub enum UndefinedBehaviorInfo {
+ /// Free-form case. Only for errors that are never caught!
+ Ub(String),
+ /// Unreachable code was executed.
+ Unreachable,
+ /// A slice/array index projection went out-of-bounds.
+ BoundsCheckFailed {
+ len: u64,
+ index: u64,
+ },
+ /// Something was divided by 0 (x / 0).
+ DivisionByZero,
+ /// Something was "remainded" by 0 (x % 0).
+ RemainderByZero,
+ /// Signed division overflowed (INT_MIN / -1).
+ DivisionOverflow,
+ /// Signed remainder overflowed (INT_MIN % -1).
+ RemainderOverflow,
+ /// Overflowing inbounds pointer arithmetic.
+ PointerArithOverflow,
+ /// Invalid metadata in a wide pointer (using `str` to avoid allocations).
+ InvalidMeta(&'static str),
+ /// Reading a C string that does not end within its allocation.
+ UnterminatedCString(Pointer),
+ /// Dereferencing a dangling pointer after it got freed.
+ PointerUseAfterFree(AllocId),
+ /// Used a pointer outside the bounds it is valid for.
+ /// (If `ptr_size > 0`, determines the size of the memory range that was expected to be in-bounds.)
+ PointerOutOfBounds {
+ alloc_id: AllocId,
+ alloc_size: Size,
+ ptr_offset: i64,
+ ptr_size: Size,
+ msg: CheckInAllocMsg,
+ },
+ /// Using an integer as a pointer in the wrong way.
+ DanglingIntPointer(u64, CheckInAllocMsg),
+ /// Used a pointer with bad alignment.
+ AlignmentCheckFailed {
+ required: Align,
+ has: Align,
+ },
+ /// Writing to read-only memory.
+ WriteToReadOnly(AllocId),
+ // Trying to access the data behind a function pointer.
+ DerefFunctionPointer(AllocId),
+ // Trying to access the data behind a vtable pointer.
+ DerefVTablePointer(AllocId),
+ /// The value validity check found a problem.
+ /// Should only be thrown by `validity.rs` and always point out which part of the value
+ /// is the problem.
+ ValidationFailure {
+ /// The "path" to the value in question, e.g. `.0[5].field` for a struct
+ /// field in the 6th element of an array that is the first element of a tuple.
+ path: Option<String>,
+ msg: String,
+ },
+ /// Using a non-boolean `u8` as bool.
+ InvalidBool(u8),
+ /// Using a non-character `u32` as character.
+ InvalidChar(u32),
+ /// The tag of an enum does not encode an actual discriminant.
+ InvalidTag(Scalar),
+ /// Using a pointer-not-to-a-function as function pointer.
+ InvalidFunctionPointer(Pointer),
+ /// Using a pointer-not-to-a-vtable as vtable pointer.
+ InvalidVTablePointer(Pointer),
+ /// Using a string that is not valid UTF-8,
+ InvalidStr(std::str::Utf8Error),
+ /// Using uninitialized data where it is not allowed.
+ InvalidUninitBytes(Option<(AllocId, UninitBytesAccess)>),
+ /// Working with a local that is not currently live.
+ DeadLocal,
+ /// Data size is not equal to target size.
+ ScalarSizeMismatch(ScalarSizeMismatch),
+ /// A discriminant of an uninhabited enum variant is written.
+ UninhabitedEnumVariantWritten,
+}
+
+impl fmt::Display for UndefinedBehaviorInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use UndefinedBehaviorInfo::*;
+ match self {
+ Ub(msg) => write!(f, "{msg}"),
+ Unreachable => write!(f, "entering unreachable code"),
+ BoundsCheckFailed { ref len, ref index } => {
+ write!(f, "indexing out of bounds: the len is {len} but the index is {index}")
+ }
+ DivisionByZero => write!(f, "dividing by zero"),
+ RemainderByZero => write!(f, "calculating the remainder with a divisor of zero"),
+ DivisionOverflow => write!(f, "overflow in signed division (dividing MIN by -1)"),
+ RemainderOverflow => write!(f, "overflow in signed remainder (dividing MIN by -1)"),
+ PointerArithOverflow => write!(f, "overflowing in-bounds pointer arithmetic"),
+ InvalidMeta(msg) => write!(f, "invalid metadata in wide pointer: {msg}"),
+ UnterminatedCString(p) => write!(
+ f,
+ "reading a null-terminated string starting at {p:?} with no null found before end of allocation",
+ ),
+ PointerUseAfterFree(a) => {
+ write!(f, "pointer to {a:?} was dereferenced after this allocation got freed")
+ }
+ PointerOutOfBounds { alloc_id, alloc_size, ptr_offset, ptr_size: Size::ZERO, msg } => {
+ write!(
+ f,
+ "{msg}{alloc_id:?} has size {alloc_size}, so pointer at offset {ptr_offset} is out-of-bounds",
+ alloc_size = alloc_size.bytes(),
+ )
+ }
+ PointerOutOfBounds { alloc_id, alloc_size, ptr_offset, ptr_size, msg } => write!(
+ f,
+ "{msg}{alloc_id:?} has size {alloc_size}, so pointer to {ptr_size} byte{ptr_size_p} starting at offset {ptr_offset} is out-of-bounds",
+ alloc_size = alloc_size.bytes(),
+ ptr_size = ptr_size.bytes(),
+ ptr_size_p = pluralize!(ptr_size.bytes()),
+ ),
+ DanglingIntPointer(i, msg) => {
+ write!(
+ f,
+ "{msg}{pointer} is a dangling pointer (it has no provenance)",
+ pointer = Pointer::<Option<AllocId>>::from_addr(*i),
+ )
+ }
+ AlignmentCheckFailed { required, has } => write!(
+ f,
+ "accessing memory with alignment {has}, but alignment {required} is required",
+ has = has.bytes(),
+ required = required.bytes()
+ ),
+ WriteToReadOnly(a) => write!(f, "writing to {a:?} which is read-only"),
+ DerefFunctionPointer(a) => write!(f, "accessing {a:?} which contains a function"),
+ DerefVTablePointer(a) => write!(f, "accessing {a:?} which contains a vtable"),
+ ValidationFailure { path: None, msg } => {
+ write!(f, "constructing invalid value: {msg}")
+ }
+ ValidationFailure { path: Some(path), msg } => {
+ write!(f, "constructing invalid value at {path}: {msg}")
+ }
+ InvalidBool(b) => {
+ write!(f, "interpreting an invalid 8-bit value as a bool: 0x{b:02x}")
+ }
+ InvalidChar(c) => {
+ write!(f, "interpreting an invalid 32-bit value as a char: 0x{c:08x}")
+ }
+ InvalidTag(val) => write!(f, "enum value has invalid tag: {val:x}"),
+ InvalidFunctionPointer(p) => {
+ write!(f, "using {p:?} as function pointer but it does not point to a function")
+ }
+ InvalidVTablePointer(p) => {
+ write!(f, "using {p:?} as vtable pointer but it does not point to a vtable")
+ }
+ InvalidStr(err) => write!(f, "this string is not valid UTF-8: {err}"),
+ InvalidUninitBytes(Some((alloc, info))) => write!(
+ f,
+ "reading memory at {alloc:?}{access:?}, \
+ but memory is uninitialized at {uninit:?}, \
+ and this operation requires initialized memory",
+ access = info.access,
+ uninit = info.uninit,
+ ),
+ InvalidUninitBytes(None) => write!(
+ f,
+ "using uninitialized data, but this operation requires initialized memory"
+ ),
+ DeadLocal => write!(f, "accessing a dead local variable"),
+ ScalarSizeMismatch(self::ScalarSizeMismatch { target_size, data_size }) => write!(
+ f,
+ "scalar size mismatch: expected {target_size} bytes but got {data_size} bytes instead",
+ ),
+ UninhabitedEnumVariantWritten => {
+ write!(f, "writing discriminant of an uninhabited enum")
+ }
+ }
+ }
+}
+
+/// Error information for when the program did something that might (or might not) be correct
+/// to do according to the Rust spec, but due to limitations in the interpreter, the
+/// operation could not be carried out. These limitations can differ between CTFE and the
+/// Miri engine, e.g., CTFE does not support dereferencing pointers at integral addresses.
+pub enum UnsupportedOpInfo {
+ /// Free-form case. Only for errors that are never caught!
+ Unsupported(String),
+ /// Encountered a pointer where we needed raw bytes.
+ ReadPointerAsBytes,
+ /// Overwriting parts of a pointer; the resulting state cannot be represented in our
+ /// `Allocation` data structure. See <https://github.com/rust-lang/miri/issues/2181>.
+ PartialPointerOverwrite(Pointer<AllocId>),
+ //
+ // The variants below are only reachable from CTFE/const prop, miri will never emit them.
+ //
+ /// Accessing thread local statics
+ ThreadLocalStatic(DefId),
+ /// Accessing an unsupported extern static.
+ ReadExternStatic(DefId),
+}
+
+impl fmt::Display for UnsupportedOpInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use UnsupportedOpInfo::*;
+ match self {
+ Unsupported(ref msg) => write!(f, "{msg}"),
+ ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"),
+ PartialPointerOverwrite(ptr) => {
+ write!(f, "unable to overwrite parts of a pointer in memory at {ptr:?}")
+ }
+ ThreadLocalStatic(did) => write!(f, "cannot access thread local static ({did:?})"),
+ ReadExternStatic(did) => write!(f, "cannot read from extern static ({did:?})"),
+ }
+ }
+}
+
+/// Error information for when the program exhausted the resources granted to it
+/// by the interpreter.
+pub enum ResourceExhaustionInfo {
+ /// The stack grew too big.
+ StackFrameLimitReached,
+ /// The program ran for too long.
+ ///
+ /// The exact limit is set by the `const_eval_limit` attribute.
+ StepLimitReached,
+ /// There is not enough memory to perform an allocation.
+ MemoryExhausted,
+}
+
+impl fmt::Display for ResourceExhaustionInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use ResourceExhaustionInfo::*;
+ match self {
+ StackFrameLimitReached => {
+ write!(f, "reached the configured maximum number of stack frames")
+ }
+ StepLimitReached => {
+ write!(f, "exceeded interpreter step limit (see `#[const_eval_limit]`)")
+ }
+ MemoryExhausted => {
+ write!(f, "tried to allocate more memory than available to compiler")
+ }
+ }
+ }
+}
+
+/// A trait to work around not having trait object upcasting.
+pub trait AsAny: Any {
+ fn as_any(&self) -> &dyn Any;
+}
+impl<T: Any> AsAny for T {
+ #[inline(always)]
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
+
+/// A trait for machine-specific errors (or other "machine stop" conditions).
+pub trait MachineStopType: AsAny + fmt::Display + Send {
+ /// If `true`, emit a hard error instead of going through the `CONST_ERR` lint
+ fn is_hard_err(&self) -> bool {
+ false
+ }
+}
+
+impl dyn MachineStopType {
+ #[inline(always)]
+ pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+ self.as_any().downcast_ref()
+ }
+}
+
+pub enum InterpError<'tcx> {
+ /// The program caused undefined behavior.
+ UndefinedBehavior(UndefinedBehaviorInfo),
+ /// The program did something the interpreter does not support (some of these *might* be UB
+ /// but the interpreter is not sure).
+ Unsupported(UnsupportedOpInfo),
+ /// The program was invalid (ill-typed, bad MIR, not sufficiently monomorphized, ...).
+ InvalidProgram(InvalidProgramInfo<'tcx>),
+ /// The program exhausted the interpreter's resources (stack/heap too big,
+ /// execution takes too long, ...).
+ ResourceExhaustion(ResourceExhaustionInfo),
+ /// Stop execution for a machine-controlled reason. This is never raised by
+ /// the core engine itself.
+ MachineStop(Box<dyn MachineStopType>),
+}
+
+pub type InterpResult<'tcx, T = ()> = Result<T, InterpErrorInfo<'tcx>>;
+
+impl fmt::Display for InterpError<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use InterpError::*;
+ match *self {
+ Unsupported(ref msg) => write!(f, "{msg}"),
+ InvalidProgram(ref msg) => write!(f, "{msg}"),
+ UndefinedBehavior(ref msg) => write!(f, "{msg}"),
+ ResourceExhaustion(ref msg) => write!(f, "{msg}"),
+ MachineStop(ref msg) => write!(f, "{msg}"),
+ }
+ }
+}
+
+// Forward `Debug` to `Display`, so it does not look awful.
+impl fmt::Debug for InterpError<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl InterpError<'_> {
+ /// Some errors do string formatting even if the error is never printed.
+ /// To avoid performance issues, there are places where we want to be sure to never raise these formatting errors,
+ /// so this method lets us detect them and `bug!` on unexpected errors.
+ pub fn formatted_string(&self) -> bool {
+ matches!(
+ self,
+ InterpError::Unsupported(UnsupportedOpInfo::Unsupported(_))
+ | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ValidationFailure { .. })
+ | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_))
+ )
+ }
+
+ /// Should this error be reported as a hard error, preventing compilation, or a soft error,
+ /// causing a deny-by-default lint?
+ pub fn is_hard_err(&self) -> bool {
+ use InterpError::*;
+ match *self {
+ MachineStop(ref err) => err.is_hard_err(),
+ UndefinedBehavior(_) => true,
+ ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted) => true,
+ _ => false,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
new file mode 100644
index 000000000..967f8ece1
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -0,0 +1,633 @@
+//! An interpreter for MIR used in CTFE and by miri.
+
+#[macro_export]
+macro_rules! err_unsup {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::Unsupported(
+ $crate::mir::interpret::UnsupportedOpInfo::$($tt)*
+ )
+ };
+}
+
+#[macro_export]
+macro_rules! err_unsup_format {
+ ($($tt:tt)*) => { err_unsup!(Unsupported(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! err_inval {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::InvalidProgram(
+ $crate::mir::interpret::InvalidProgramInfo::$($tt)*
+ )
+ };
+}
+
+#[macro_export]
+macro_rules! err_ub {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::UndefinedBehavior(
+ $crate::mir::interpret::UndefinedBehaviorInfo::$($tt)*
+ )
+ };
+}
+
+#[macro_export]
+macro_rules! err_ub_format {
+ ($($tt:tt)*) => { err_ub!(Ub(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! err_exhaust {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::ResourceExhaustion(
+ $crate::mir::interpret::ResourceExhaustionInfo::$($tt)*
+ )
+ };
+}
+
+#[macro_export]
+macro_rules! err_machine_stop {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::MachineStop(Box::new($($tt)*))
+ };
+}
+
+// In the `throw_*` macros, avoid `return` to make them work with `try {}`.
+#[macro_export]
+macro_rules! throw_unsup {
+ ($($tt:tt)*) => { do yeet err_unsup!($($tt)*) };
+}
+
+#[macro_export]
+macro_rules! throw_unsup_format {
+ ($($tt:tt)*) => { throw_unsup!(Unsupported(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! throw_inval {
+ ($($tt:tt)*) => { do yeet err_inval!($($tt)*) };
+}
+
+#[macro_export]
+macro_rules! throw_ub {
+ ($($tt:tt)*) => { do yeet err_ub!($($tt)*) };
+}
+
+#[macro_export]
+macro_rules! throw_ub_format {
+ ($($tt:tt)*) => { throw_ub!(Ub(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! throw_exhaust {
+ ($($tt:tt)*) => { do yeet err_exhaust!($($tt)*) };
+}
+
+#[macro_export]
+macro_rules! throw_machine_stop {
+ ($($tt:tt)*) => { do yeet err_machine_stop!($($tt)*) };
+}
+
+mod allocation;
+mod error;
+mod pointer;
+mod queries;
+mod value;
+
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::io::{Read, Write};
+use std::num::{NonZeroU32, NonZeroU64};
+use std::sync::atomic::{AtomicU32, Ordering};
+
+use rustc_ast::LitKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::{HashMapExt, Lock};
+use rustc_data_structures::tiny_list::TinyList;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_serialize::{Decodable, Encodable};
+use rustc_target::abi::Endian;
+
+use crate::mir;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::subst::GenericArgKind;
+use crate::ty::{self, Instance, Ty, TyCtxt};
+
+pub use self::error::{
+ struct_error, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult, EvalToConstValueResult,
+ EvalToValTreeResult, InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo,
+ MachineStopType, ResourceExhaustionInfo, ScalarSizeMismatch, UndefinedBehaviorInfo,
+ UninitBytesAccess, UnsupportedOpInfo,
+};
+
+pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
+
+pub use self::allocation::{
+ alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask,
+ Relocations,
+};
+
+pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
+
+/// Uniquely identifies one of the following:
+/// - A constant
+/// - A static
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, Lift)]
+pub struct GlobalId<'tcx> {
+ /// For a constant or static, the `Instance` of the item itself.
+ /// For a promoted global, the `Instance` of the function they belong to.
+ pub instance: ty::Instance<'tcx>,
+
+ /// The index for promoted globals within their function's `mir::Body`.
+ pub promoted: Option<mir::Promoted>,
+}
+
+impl<'tcx> GlobalId<'tcx> {
+ pub fn display(self, tcx: TyCtxt<'tcx>) -> String {
+ let instance_name = with_no_trimmed_paths!(tcx.def_path_str(self.instance.def.def_id()));
+ if let Some(promoted) = self.promoted {
+ format!("{}::{:?}", instance_name, promoted)
+ } else {
+ instance_name
+ }
+ }
+}
+
+/// Input argument for `tcx.lit_to_const`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, HashStable)]
+pub struct LitToConstInput<'tcx> {
+ /// The absolute value of the resultant constant.
+ pub lit: &'tcx LitKind,
+ /// The type of the constant.
+ pub ty: Ty<'tcx>,
+ /// If the constant is negative.
+ pub neg: bool,
+}
+
+/// Error type for `tcx.lit_to_const`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
+pub enum LitToConstError {
+ /// The literal's inferred type did not match the expected `ty` in the input.
+ /// This is used for graceful error handling (`delay_span_bug`) in
+ /// type checking (`Const::from_anon_const`).
+ TypeError,
+ Reported,
+}
+
+#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct AllocId(pub NonZeroU64);
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl fmt::Debug for AllocId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if f.alternate() { write!(f, "a{}", self.0) } else { write!(f, "alloc{}", self.0) }
+ }
+}
+
+// No "Display" since AllocIds are not usually user-visible.
+
+#[derive(TyDecodable, TyEncodable)]
+enum AllocDiscriminant {
+ Alloc,
+ Fn,
+ VTable,
+ Static,
+}
+
+pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>>(
+ encoder: &mut E,
+ tcx: TyCtxt<'tcx>,
+ alloc_id: AllocId,
+) {
+ match tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ trace!("encoding {:?} with {:#?}", alloc_id, alloc);
+ AllocDiscriminant::Alloc.encode(encoder);
+ alloc.encode(encoder);
+ }
+ GlobalAlloc::Function(fn_instance) => {
+ trace!("encoding {:?} with {:#?}", alloc_id, fn_instance);
+ AllocDiscriminant::Fn.encode(encoder);
+ fn_instance.encode(encoder);
+ }
+ GlobalAlloc::VTable(ty, poly_trait_ref) => {
+ trace!("encoding {:?} with {ty:#?}, {poly_trait_ref:#?}", alloc_id);
+ AllocDiscriminant::VTable.encode(encoder);
+ ty.encode(encoder);
+ poly_trait_ref.encode(encoder);
+ }
+ GlobalAlloc::Static(did) => {
+ assert!(!tcx.is_thread_local_static(did));
+ // References to statics doesn't need to know about their allocations,
+ // just about its `DefId`.
+ AllocDiscriminant::Static.encode(encoder);
+ did.encode(encoder);
+ }
+ }
+}
+
+// Used to avoid infinite recursion when decoding cyclic allocations.
+type DecodingSessionId = NonZeroU32;
+
+#[derive(Clone)]
+enum State {
+ Empty,
+ InProgressNonAlloc(TinyList<DecodingSessionId>),
+ InProgress(TinyList<DecodingSessionId>, AllocId),
+ Done(AllocId),
+}
+
+pub struct AllocDecodingState {
+ // For each `AllocId`, we keep track of which decoding state it's currently in.
+ decoding_state: Vec<Lock<State>>,
+ // The offsets of each allocation in the data stream.
+ data_offsets: Vec<u32>,
+}
+
+impl AllocDecodingState {
+ #[inline]
+ pub fn new_decoding_session(&self) -> AllocDecodingSession<'_> {
+ static DECODER_SESSION_ID: AtomicU32 = AtomicU32::new(0);
+ let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst);
+
+ // Make sure this is never zero.
+ let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap();
+
+ AllocDecodingSession { state: self, session_id }
+ }
+
+ pub fn new(data_offsets: Vec<u32>) -> Self {
+ let decoding_state = vec![Lock::new(State::Empty); data_offsets.len()];
+
+ Self { decoding_state, data_offsets }
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct AllocDecodingSession<'s> {
+ state: &'s AllocDecodingState,
+ session_id: DecodingSessionId,
+}
+
+impl<'s> AllocDecodingSession<'s> {
+ /// Decodes an `AllocId` in a thread-safe way.
+ pub fn decode_alloc_id<'tcx, D>(&self, decoder: &mut D) -> AllocId
+ where
+ D: TyDecoder<I = TyCtxt<'tcx>>,
+ {
+ // Read the index of the allocation.
+ let idx = usize::try_from(decoder.read_u32()).unwrap();
+ let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();
+
+ // Decode the `AllocDiscriminant` now so that we know if we have to reserve an
+ // `AllocId`.
+ let (alloc_kind, pos) = decoder.with_position(pos, |decoder| {
+ let alloc_kind = AllocDiscriminant::decode(decoder);
+ (alloc_kind, decoder.position())
+ });
+
+ // Check the decoding state to see if it's already decoded or if we should
+ // decode it here.
+ let alloc_id = {
+ let mut entry = self.state.decoding_state[idx].lock();
+
+ match *entry {
+ State::Done(alloc_id) => {
+ return alloc_id;
+ }
+ ref mut entry @ State::Empty => {
+ // We are allowed to decode.
+ match alloc_kind {
+ AllocDiscriminant::Alloc => {
+ // If this is an allocation, we need to reserve an
+ // `AllocId` so we can decode cyclic graphs.
+ let alloc_id = decoder.interner().reserve_alloc_id();
+ *entry =
+ State::InProgress(TinyList::new_single(self.session_id), alloc_id);
+ Some(alloc_id)
+ }
+ AllocDiscriminant::Fn
+ | AllocDiscriminant::Static
+ | AllocDiscriminant::VTable => {
+ // Fns and statics cannot be cyclic, and their `AllocId`
+ // is determined later by interning.
+ *entry =
+ State::InProgressNonAlloc(TinyList::new_single(self.session_id));
+ None
+ }
+ }
+ }
+ State::InProgressNonAlloc(ref mut sessions) => {
+ if sessions.contains(&self.session_id) {
+ bug!("this should be unreachable");
+ } else {
+ // Start decoding concurrently.
+ sessions.insert(self.session_id);
+ None
+ }
+ }
+ State::InProgress(ref mut sessions, alloc_id) => {
+ if sessions.contains(&self.session_id) {
+ // Don't recurse.
+ return alloc_id;
+ } else {
+ // Start decoding concurrently.
+ sessions.insert(self.session_id);
+ Some(alloc_id)
+ }
+ }
+ }
+ };
+
+ // Now decode the actual data.
+ let alloc_id = decoder.with_position(pos, |decoder| {
+ match alloc_kind {
+ AllocDiscriminant::Alloc => {
+ let alloc = <ConstAllocation<'tcx> as Decodable<_>>::decode(decoder);
+ // We already have a reserved `AllocId`.
+ let alloc_id = alloc_id.unwrap();
+ trace!("decoded alloc {:?}: {:#?}", alloc_id, alloc);
+ decoder.interner().set_alloc_id_same_memory(alloc_id, alloc);
+ alloc_id
+ }
+ AllocDiscriminant::Fn => {
+ assert!(alloc_id.is_none());
+ trace!("creating fn alloc ID");
+ let instance = ty::Instance::decode(decoder);
+ trace!("decoded fn alloc instance: {:?}", instance);
+ let alloc_id = decoder.interner().create_fn_alloc(instance);
+ alloc_id
+ }
+ AllocDiscriminant::VTable => {
+ assert!(alloc_id.is_none());
+ trace!("creating vtable alloc ID");
+ let ty = <Ty<'_> as Decodable<D>>::decode(decoder);
+ let poly_trait_ref =
+ <Option<ty::PolyExistentialTraitRef<'_>> as Decodable<D>>::decode(decoder);
+ trace!("decoded vtable alloc instance: {ty:?}, {poly_trait_ref:?}");
+ let alloc_id = decoder.interner().create_vtable_alloc(ty, poly_trait_ref);
+ alloc_id
+ }
+ AllocDiscriminant::Static => {
+ assert!(alloc_id.is_none());
+ trace!("creating extern static alloc ID");
+ let did = <DefId as Decodable<D>>::decode(decoder);
+ trace!("decoded static def-ID: {:?}", did);
+ let alloc_id = decoder.interner().create_static_alloc(did);
+ alloc_id
+ }
+ }
+ });
+
+ self.state.decoding_state[idx].with_lock(|entry| {
+ *entry = State::Done(alloc_id);
+ });
+
+ alloc_id
+ }
+}
+
+/// An allocation in the global (tcx-managed) memory can be either a function pointer,
+/// a static, or a "real" allocation with some data in it.
+#[derive(Debug, Clone, Eq, PartialEq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum GlobalAlloc<'tcx> {
+ /// The alloc ID is used as a function pointer.
+ Function(Instance<'tcx>),
+ /// This alloc ID points to a symbolic (not-reified) vtable.
+ VTable(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>),
+ /// The alloc ID points to a "lazy" static variable that did not get computed (yet).
+ /// This is also used to break the cycle in recursive statics.
+ Static(DefId),
+ /// The alloc ID points to memory.
+ Memory(ConstAllocation<'tcx>),
+}
+
+impl<'tcx> GlobalAlloc<'tcx> {
+ /// Panics if the `GlobalAlloc` does not refer to an `GlobalAlloc::Memory`
+ #[track_caller]
+ #[inline]
+ pub fn unwrap_memory(&self) -> ConstAllocation<'tcx> {
+ match *self {
+ GlobalAlloc::Memory(mem) => mem,
+ _ => bug!("expected memory, got {:?}", self),
+ }
+ }
+
+ /// Panics if the `GlobalAlloc` is not `GlobalAlloc::Function`
+ #[track_caller]
+ #[inline]
+ pub fn unwrap_fn(&self) -> Instance<'tcx> {
+ match *self {
+ GlobalAlloc::Function(instance) => instance,
+ _ => bug!("expected function, got {:?}", self),
+ }
+ }
+
+ /// Panics if the `GlobalAlloc` is not `GlobalAlloc::VTable`
+ #[track_caller]
+ #[inline]
+ pub fn unwrap_vtable(&self) -> (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>) {
+ match *self {
+ GlobalAlloc::VTable(ty, poly_trait_ref) => (ty, poly_trait_ref),
+ _ => bug!("expected vtable, got {:?}", self),
+ }
+ }
+}
+
+pub(crate) struct AllocMap<'tcx> {
+ /// Maps `AllocId`s to their corresponding allocations.
+ alloc_map: FxHashMap<AllocId, GlobalAlloc<'tcx>>,
+
+ /// Used to ensure that statics and functions only get one associated `AllocId`.
+ /// Should never contain a `GlobalAlloc::Memory`!
+ //
+ // FIXME: Should we just have two separate dedup maps for statics and functions each?
+ dedup: FxHashMap<GlobalAlloc<'tcx>, AllocId>,
+
+ /// The `AllocId` to assign to the next requested ID.
+ /// Always incremented; never gets smaller.
+ next_id: AllocId,
+}
+
+impl<'tcx> AllocMap<'tcx> {
+ pub(crate) fn new() -> Self {
+ AllocMap {
+ alloc_map: Default::default(),
+ dedup: Default::default(),
+ next_id: AllocId(NonZeroU64::new(1).unwrap()),
+ }
+ }
+ fn reserve(&mut self) -> AllocId {
+ let next = self.next_id;
+ self.next_id.0 = self.next_id.0.checked_add(1).expect(
+ "You overflowed a u64 by incrementing by 1... \
+ You've just earned yourself a free drink if we ever meet. \
+ Seriously, how did you do that?!",
+ );
+ next
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Obtains a new allocation ID that can be referenced but does not
+ /// yet have an allocation backing it.
+ ///
+ /// Make sure to call `set_alloc_id_memory` or `set_alloc_id_same_memory` before returning such
+ /// an `AllocId` from a query.
+ pub fn reserve_alloc_id(self) -> AllocId {
+ self.alloc_map.lock().reserve()
+ }
+
+ /// Reserves a new ID *if* this allocation has not been dedup-reserved before.
+ /// Should only be used for "symbolic" allocations (function pointers, vtables, statics), we
+ /// don't want to dedup IDs for "real" memory!
+ fn reserve_and_set_dedup(self, alloc: GlobalAlloc<'tcx>) -> AllocId {
+ let mut alloc_map = self.alloc_map.lock();
+ match alloc {
+ GlobalAlloc::Function(..) | GlobalAlloc::Static(..) | GlobalAlloc::VTable(..) => {}
+ GlobalAlloc::Memory(..) => bug!("Trying to dedup-reserve memory with real data!"),
+ }
+ if let Some(&alloc_id) = alloc_map.dedup.get(&alloc) {
+ return alloc_id;
+ }
+ let id = alloc_map.reserve();
+ debug!("creating alloc {alloc:?} with id {id:?}");
+ alloc_map.alloc_map.insert(id, alloc.clone());
+ alloc_map.dedup.insert(alloc, id);
+ id
+ }
+
+ /// Generates an `AllocId` for a static or return a cached one in case this function has been
+ /// called on the same static before.
+ pub fn create_static_alloc(self, static_id: DefId) -> AllocId {
+ self.reserve_and_set_dedup(GlobalAlloc::Static(static_id))
+ }
+
+ /// Generates an `AllocId` for a function. Depending on the function type,
+ /// this might get deduplicated or assigned a new ID each time.
+ pub fn create_fn_alloc(self, instance: Instance<'tcx>) -> AllocId {
+ // Functions cannot be identified by pointers, as asm-equal functions can get deduplicated
+ // by the linker (we set the "unnamed_addr" attribute for LLVM) and functions can be
+ // duplicated across crates.
+ // We thus generate a new `AllocId` for every mention of a function. This means that
+ // `main as fn() == main as fn()` is false, while `let x = main as fn(); x == x` is true.
+ // However, formatting code relies on function identity (see #58320), so we only do
+ // this for generic functions. Lifetime parameters are ignored.
+ let is_generic = instance
+ .substs
+ .into_iter()
+ .any(|kind| !matches!(kind.unpack(), GenericArgKind::Lifetime(_)));
+ if is_generic {
+ // Get a fresh ID.
+ let mut alloc_map = self.alloc_map.lock();
+ let id = alloc_map.reserve();
+ alloc_map.alloc_map.insert(id, GlobalAlloc::Function(instance));
+ id
+ } else {
+ // Deduplicate.
+ self.reserve_and_set_dedup(GlobalAlloc::Function(instance))
+ }
+ }
+
+ /// Generates an `AllocId` for a (symbolic, not-reified) vtable. Will get deduplicated.
+ pub fn create_vtable_alloc(
+ self,
+ ty: Ty<'tcx>,
+ poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ ) -> AllocId {
+ self.reserve_and_set_dedup(GlobalAlloc::VTable(ty, poly_trait_ref))
+ }
+
+ /// Interns the `Allocation` and return a new `AllocId`, even if there's already an identical
+ /// `Allocation` with a different `AllocId`.
+ /// Statics with identical content will still point to the same `Allocation`, i.e.,
+ /// their data will be deduplicated through `Allocation` interning -- but they
+ /// are different places in memory and as such need different IDs.
+ pub fn create_memory_alloc(self, mem: ConstAllocation<'tcx>) -> AllocId {
+ let id = self.reserve_alloc_id();
+ self.set_alloc_id_memory(id, mem);
+ id
+ }
+
+ /// Returns `None` in case the `AllocId` is dangling. An `InterpretCx` can still have a
+ /// local `Allocation` for that `AllocId`, but having such an `AllocId` in a constant is
+ /// illegal and will likely ICE.
+ /// This function exists to allow const eval to detect the difference between evaluation-
+ /// local dangling pointers and allocations in constants/statics.
+ #[inline]
+ pub fn try_get_global_alloc(self, id: AllocId) -> Option<GlobalAlloc<'tcx>> {
+ self.alloc_map.lock().alloc_map.get(&id).cloned()
+ }
+
+ #[inline]
+ #[track_caller]
+ /// Panics in case the `AllocId` is dangling. Since that is impossible for `AllocId`s in
+ /// constants (as all constants must pass interning and validation that check for dangling
+ /// ids), this function is frequently used throughout rustc, but should not be used within
+ /// the miri engine.
+ pub fn global_alloc(self, id: AllocId) -> GlobalAlloc<'tcx> {
+ match self.try_get_global_alloc(id) {
+ Some(alloc) => alloc,
+ None => bug!("could not find allocation for {id:?}"),
+ }
+ }
+
+ /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. Trying to
+ /// call this function twice, even with the same `Allocation` will ICE the compiler.
+ pub fn set_alloc_id_memory(self, id: AllocId, mem: ConstAllocation<'tcx>) {
+ if let Some(old) = self.alloc_map.lock().alloc_map.insert(id, GlobalAlloc::Memory(mem)) {
+ bug!("tried to set allocation ID {id:?}, but it was already existing as {old:#?}");
+ }
+ }
+
+ /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. May be called
+ /// twice for the same `(AllocId, Allocation)` pair.
+ fn set_alloc_id_same_memory(self, id: AllocId, mem: ConstAllocation<'tcx>) {
+ self.alloc_map.lock().alloc_map.insert_same(id, GlobalAlloc::Memory(mem));
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Methods to access integers in the target endianness
+////////////////////////////////////////////////////////////////////////////////
+
+#[inline]
+pub fn write_target_uint(
+ endianness: Endian,
+ mut target: &mut [u8],
+ data: u128,
+) -> Result<(), io::Error> {
+ // This u128 holds an "any-size uint" (since smaller uints can fits in it)
+ // So we do not write all bytes of the u128, just the "payload".
+ match endianness {
+ Endian::Little => target.write(&data.to_le_bytes())?,
+ Endian::Big => target.write(&data.to_be_bytes()[16 - target.len()..])?,
+ };
+ debug_assert!(target.len() == 0); // We should have filled the target buffer.
+ Ok(())
+}
+
+#[inline]
+pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
+ // This u128 holds an "any-size uint" (since smaller uints can fits in it)
+ let mut buf = [0u8; std::mem::size_of::<u128>()];
+ // So we do not read exactly 16 bytes into the u128, just the "payload".
+ let uint = match endianness {
+ Endian::Little => {
+ source.read(&mut buf)?;
+ Ok(u128::from_le_bytes(buf))
+ }
+ Endian::Big => {
+ source.read(&mut buf[16 - source.len()..])?;
+ Ok(u128::from_be_bytes(buf))
+ }
+ };
+ debug_assert!(source.len() == 0); // We should have consumed the source buffer.
+ uint
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
new file mode 100644
index 000000000..384954cbb
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -0,0 +1,307 @@
+use super::{AllocId, InterpResult};
+
+use rustc_macros::HashStable;
+use rustc_target::abi::{HasDataLayout, Size};
+
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+
+////////////////////////////////////////////////////////////////////////////////
+// Pointer arithmetic
+////////////////////////////////////////////////////////////////////////////////
+
+pub trait PointerArithmetic: HasDataLayout {
+ // These are not supposed to be overridden.
+
+ #[inline(always)]
+ fn pointer_size(&self) -> Size {
+ self.data_layout().pointer_size
+ }
+
+ #[inline(always)]
+ fn max_size_of_val(&self) -> Size {
+ Size::from_bytes(self.machine_isize_max())
+ }
+
+ #[inline]
+ fn machine_usize_max(&self) -> u64 {
+ self.pointer_size().unsigned_int_max().try_into().unwrap()
+ }
+
+ #[inline]
+ fn machine_isize_min(&self) -> i64 {
+ self.pointer_size().signed_int_min().try_into().unwrap()
+ }
+
+ #[inline]
+ fn machine_isize_max(&self) -> i64 {
+ self.pointer_size().signed_int_max().try_into().unwrap()
+ }
+
+ #[inline]
+ fn machine_usize_to_isize(&self, val: u64) -> i64 {
+ let val = val as i64;
+ // Now wrap-around into the machine_isize range.
+ if val > self.machine_isize_max() {
+ // This can only happen the the ptr size is < 64, so we know max_usize_plus_1 fits into
+ // i64.
+ debug_assert!(self.pointer_size().bits() < 64);
+ let max_usize_plus_1 = 1u128 << self.pointer_size().bits();
+ val - i64::try_from(max_usize_plus_1).unwrap()
+ } else {
+ val
+ }
+ }
+
+ /// Helper function: truncate given value-"overflowed flag" pair to pointer size and
+ /// update "overflowed flag" if there was an overflow.
+ /// This should be called by all the other methods before returning!
+ #[inline]
+ fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
+ let val = u128::from(val);
+ let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
+ (u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
+ }
+
+ #[inline]
+ fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) {
+ // We do not need to check if i fits in a machine usize. If it doesn't,
+ // either the wrapping_add will wrap or res will not fit in a pointer.
+ let res = val.overflowing_add(i);
+ self.truncate_to_ptr(res)
+ }
+
+ #[inline]
+ fn overflowing_signed_offset(&self, val: u64, i: i64) -> (u64, bool) {
+ // We need to make sure that i fits in a machine isize.
+ let n = i.unsigned_abs();
+ if i >= 0 {
+ let (val, over) = self.overflowing_offset(val, n);
+ (val, over || i > self.machine_isize_max())
+ } else {
+ let res = val.overflowing_sub(n);
+ let (val, over) = self.truncate_to_ptr(res);
+ (val, over || i < self.machine_isize_min())
+ }
+ }
+
+ #[inline]
+ fn offset<'tcx>(&self, val: u64, i: u64) -> InterpResult<'tcx, u64> {
+ let (res, over) = self.overflowing_offset(val, i);
+ if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
+ }
+
+ #[inline]
+ fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> {
+ let (res, over) = self.overflowing_signed_offset(val, i);
+ if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
+ }
+}
+
+impl<T: HasDataLayout> PointerArithmetic for T {}
+
+/// This trait abstracts over the kind of provenance that is associated with a `Pointer`. It is
+/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
+/// some global state.
+/// We don't actually care about this `Debug` bound (we use `Provenance::fmt` to format the entire
+/// pointer), but `derive` adds some unnecessary bounds.
+pub trait Provenance: Copy + fmt::Debug {
+ /// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
+ /// If `true, ptr-to-int casts work by simply discarding the provenance.
+ /// If `false`, ptr-to-int casts are not supported. The offset *must* be relative in that case.
+ const OFFSET_IS_ADDR: bool;
+
+ /// We also use this trait to control whether to abort execution when a pointer is being partially overwritten
+ /// (this avoids a separate trait in `allocation.rs` just for this purpose).
+ const ERR_ON_PARTIAL_PTR_OVERWRITE: bool;
+
+ /// Determines how a pointer should be printed.
+ fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result
+ where
+ Self: Sized;
+
+ /// If `OFFSET_IS_ADDR == false`, provenance must always be able to
+ /// identify the allocation this ptr points to (i.e., this must return `Some`).
+ /// Otherwise this function is best-effort (but must agree with `Machine::ptr_get_alloc`).
+ /// (Identifying the offset in that allocation, however, is harder -- use `Memory::ptr_get_alloc` for that.)
+ fn get_alloc_id(self) -> Option<AllocId>;
+}
+
+impl Provenance for AllocId {
+ // With the `AllocId` as provenance, the `offset` is interpreted *relative to the allocation*,
+ // so ptr-to-int casts are not possible (since we do not know the global physical offset).
+ const OFFSET_IS_ADDR: bool = false;
+
+ // For now, do not allow this, so that we keep our options open.
+ const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = true;
+
+ fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Forward `alternate` flag to `alloc_id` printing.
+ if f.alternate() {
+ write!(f, "{:#?}", ptr.provenance)?;
+ } else {
+ write!(f, "{:?}", ptr.provenance)?;
+ }
+ // Print offset only if it is non-zero.
+ if ptr.offset.bytes() > 0 {
+ write!(f, "+{:#x}", ptr.offset.bytes())?;
+ }
+ Ok(())
+ }
+
+ fn get_alloc_id(self) -> Option<AllocId> {
+ Some(self)
+ }
+}
+
+/// Represents a pointer in the Miri engine.
+///
+/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub struct Pointer<Prov = AllocId> {
+ pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Prov` type)
+ pub provenance: Prov,
+}
+
+static_assert_size!(Pointer, 16);
+// `Option<Prov>` pointers are also passed around quite a bit
+// (but not stored in permanent machine state).
+static_assert_size!(Pointer<Option<AllocId>>, 16);
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl<Prov: Provenance> fmt::Debug for Pointer<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Provenance::fmt(self, f)
+ }
+}
+
+impl<Prov: Provenance> fmt::Debug for Pointer<Option<Prov>> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.provenance {
+ Some(prov) => Provenance::fmt(&Pointer::new(prov, self.offset), f),
+ None => write!(f, "{:#x}[noalloc]", self.offset.bytes()),
+ }
+ }
+}
+
+impl<Prov: Provenance> fmt::Display for Pointer<Option<Prov>> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.provenance.is_none() && self.offset.bytes() == 0 {
+ write!(f, "null pointer")
+ } else {
+ fmt::Debug::fmt(self, f)
+ }
+ }
+}
+
+/// Produces a `Pointer` that points to the beginning of the `Allocation`.
+impl From<AllocId> for Pointer {
+ #[inline(always)]
+ fn from(alloc_id: AllocId) -> Self {
+ Pointer::new(alloc_id, Size::ZERO)
+ }
+}
+
+impl<Prov> From<Pointer<Prov>> for Pointer<Option<Prov>> {
+ #[inline(always)]
+ fn from(ptr: Pointer<Prov>) -> Self {
+ let (prov, offset) = ptr.into_parts();
+ Pointer::new(Some(prov), offset)
+ }
+}
+
+impl<Prov> Pointer<Option<Prov>> {
+ /// Convert this pointer that *might* have a provenance into a pointer that *definitely* has a
+ /// provenance, or an absolute address.
+ ///
+ /// This is rarely what you want; call `ptr_try_get_alloc_id` instead.
+ pub fn into_pointer_or_addr(self) -> Result<Pointer<Prov>, Size> {
+ match self.provenance {
+ Some(prov) => Ok(Pointer::new(prov, self.offset)),
+ None => Err(self.offset),
+ }
+ }
+
+ /// Returns the absolute address the pointer points to.
+ /// Only works if Prov::OFFSET_IS_ADDR is true!
+ pub fn addr(self) -> Size
+ where
+ Prov: Provenance,
+ {
+ assert!(Prov::OFFSET_IS_ADDR);
+ self.offset
+ }
+}
+
+impl<Prov> Pointer<Option<Prov>> {
+ #[inline(always)]
+ pub fn from_addr(addr: u64) -> Self {
+ Pointer { provenance: None, offset: Size::from_bytes(addr) }
+ }
+
+ #[inline(always)]
+ pub fn null() -> Self {
+ Pointer::from_addr(0)
+ }
+}
+
+impl<'tcx, Prov> Pointer<Prov> {
+ #[inline(always)]
+ pub fn new(provenance: Prov, offset: Size) -> Self {
+ Pointer { provenance, offset }
+ }
+
+ /// Obtain the constituents of this pointer. Not that the meaning of the offset depends on the type `Prov`!
+ /// This function must only be used in the implementation of `Machine::ptr_get_alloc`,
+ /// and when a `Pointer` is taken apart to be stored efficiently in an `Allocation`.
+ #[inline(always)]
+ pub fn into_parts(self) -> (Prov, Size) {
+ (self.provenance, self.offset)
+ }
+
+ pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
+ Pointer { provenance: f(self.provenance), ..self }
+ }
+
+ #[inline]
+ pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+ Ok(Pointer {
+ offset: Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
+ ..self
+ })
+ }
+
+ #[inline]
+ pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) {
+ let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
+ let ptr = Pointer { offset: Size::from_bytes(res), ..self };
+ (ptr, over)
+ }
+
+ #[inline(always)]
+ pub fn wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self {
+ self.overflowing_offset(i, cx).0
+ }
+
+ #[inline]
+ pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+ Ok(Pointer {
+ offset: Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
+ ..self
+ })
+ }
+
+ #[inline]
+ pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) {
+ let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
+ let ptr = Pointer { offset: Size::from_bytes(res), ..self };
+ (ptr, over)
+ }
+
+ #[inline(always)]
+ pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
+ self.overflowing_signed_offset(i, cx).0
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
new file mode 100644
index 000000000..786927e2d
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -0,0 +1,217 @@
+use super::{ErrorHandled, EvalToConstValueResult, EvalToValTreeResult, GlobalId};
+
+use crate::mir;
+use crate::ty::subst::InternalSubsts;
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{self, query::TyCtxtAt, query::TyCtxtEnsure, TyCtxt};
+use rustc_hir::def_id::DefId;
+use rustc_span::{Span, DUMMY_SP};
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Evaluates a constant without providing any substitutions. This is useful to evaluate consts
+ /// that can't take any generic arguments like statics, const items or enum discriminants. If a
+ /// generic parameter is used within the constant `ErrorHandled::ToGeneric` will be returned.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_poly(self, def_id: DefId) -> EvalToConstValueResult<'tcx> {
+ // In some situations def_id will have substitutions within scope, but they aren't allowed
+ // to be used. So we can't use `Instance::mono`, instead we feed unresolved substitutions
+ // into `const_eval` which will return `ErrorHandled::ToGeneric` if any of them are
+ // encountered.
+ let substs = InternalSubsts::identity_for_item(self, def_id);
+ let instance = ty::Instance::new(def_id, substs);
+ let cid = GlobalId { instance, promoted: None };
+ let param_env = self.param_env(def_id).with_reveal_all_normalized(self);
+ self.const_eval_global_id(param_env, cid, None)
+ }
+ /// Resolves and evaluates a constant.
+ ///
+ /// The constant can be located on a trait like `<A as B>::C`, in which case the given
+ /// substitutions and environment are used to resolve the constant. Alternatively if the
+ /// constant has generic parameters in scope the substitutions are used to evaluate the value of
+ /// the constant. For example in `fn foo<T>() { let _ = [0; bar::<T>()]; }` the repeat count
+ /// constant `bar::<T>()` requires a substitution for `T`, if the substitution for `T` is still
+ /// too generic for the constant to be evaluated then `Err(ErrorHandled::TooGeneric)` is
+ /// returned.
+ #[instrument(level = "debug", skip(self))]
+ pub fn const_eval_resolve(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ ct: ty::Unevaluated<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToConstValueResult<'tcx> {
+ // Cannot resolve `Unevaluated` constants that contain inference
+ // variables. We reject those here since `resolve_opt_const_arg`
+ // would fail otherwise.
+ //
+ // When trying to evaluate constants containing inference variables,
+ // use `Infcx::const_eval_resolve` instead.
+ if ct.substs.has_infer_types_or_consts() {
+ bug!("did not expect inference variables here");
+ }
+
+ match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
+ Ok(Some(instance)) => {
+ let cid = GlobalId { instance, promoted: ct.promoted };
+ self.const_eval_global_id(param_env, cid, span)
+ }
+ Ok(None) => Err(ErrorHandled::TooGeneric),
+ Err(error_reported) => Err(ErrorHandled::Reported(error_reported)),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub fn const_eval_resolve_for_typeck(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ ct: ty::Unevaluated<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToValTreeResult<'tcx> {
+ // Cannot resolve `Unevaluated` constants that contain inference
+ // variables. We reject those here since `resolve_opt_const_arg`
+ // would fail otherwise.
+ //
+ // When trying to evaluate constants containing inference variables,
+ // use `Infcx::const_eval_resolve` instead.
+ if ct.substs.has_infer_types_or_consts() {
+ bug!("did not expect inference variables here");
+ }
+
+ match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
+ Ok(Some(instance)) => {
+ let cid = GlobalId { instance, promoted: ct.promoted };
+ self.const_eval_global_id_for_typeck(param_env, cid, span)
+ }
+ Ok(None) => Err(ErrorHandled::TooGeneric),
+ Err(error_reported) => Err(ErrorHandled::Reported(error_reported)),
+ }
+ }
+
+ pub fn const_eval_instance(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ instance: ty::Instance<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToConstValueResult<'tcx> {
+ self.const_eval_global_id(param_env, GlobalId { instance, promoted: None }, span)
+ }
+
+ /// Evaluate a constant to a `ConstValue`.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_global_id(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ cid: GlobalId<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToConstValueResult<'tcx> {
+ let param_env = param_env.with_const();
+ // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
+ // improve caching of queries.
+ let inputs = self.erase_regions(param_env.and(cid));
+ if let Some(span) = span {
+ self.at(span).eval_to_const_value_raw(inputs)
+ } else {
+ self.eval_to_const_value_raw(inputs)
+ }
+ }
+
+ /// Evaluate a constant to a type-level constant.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_global_id_for_typeck(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ cid: GlobalId<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToValTreeResult<'tcx> {
+ let param_env = param_env.with_const();
+ debug!(?param_env);
+ // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
+ // improve caching of queries.
+ let inputs = self.erase_regions(param_env.and(cid));
+ debug!(?inputs);
+ if let Some(span) = span {
+ self.at(span).eval_to_valtree(inputs)
+ } else {
+ self.eval_to_valtree(inputs)
+ }
+ }
+
+ /// Evaluate a static's initializer, returning the allocation of the initializer's memory.
+ #[inline(always)]
+ pub fn eval_static_initializer(
+ self,
+ def_id: DefId,
+ ) -> Result<mir::ConstAllocation<'tcx>, ErrorHandled> {
+ self.at(DUMMY_SP).eval_static_initializer(def_id)
+ }
+}
+
+impl<'tcx> TyCtxtAt<'tcx> {
+ /// Evaluate a static's initializer, returning the allocation of the initializer's memory.
+ pub fn eval_static_initializer(
+ self,
+ def_id: DefId,
+ ) -> Result<mir::ConstAllocation<'tcx>, ErrorHandled> {
+ trace!("eval_static_initializer: Need to compute {:?}", def_id);
+ assert!(self.is_static(def_id));
+ let instance = ty::Instance::mono(*self, def_id);
+ let gid = GlobalId { instance, promoted: None };
+ self.eval_to_allocation(gid, ty::ParamEnv::reveal_all())
+ }
+
+ /// Evaluate anything constant-like, returning the allocation of the final memory.
+ fn eval_to_allocation(
+ self,
+ gid: GlobalId<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Result<mir::ConstAllocation<'tcx>, ErrorHandled> {
+ let param_env = param_env.with_const();
+ trace!("eval_to_allocation: Need to compute {:?}", gid);
+ let raw_const = self.eval_to_allocation_raw(param_env.and(gid))?;
+ Ok(self.global_alloc(raw_const.alloc_id).unwrap_memory())
+ }
+}
+
+impl<'tcx> TyCtxtEnsure<'tcx> {
+ /// Evaluates a constant without providing any substitutions. This is useful to evaluate consts
+ /// that can't take any generic arguments like statics, const items or enum discriminants. If a
+ /// generic parameter is used within the constant `ErrorHandled::ToGeneric` will be returned.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_poly(self, def_id: DefId) {
+ // In some situations def_id will have substitutions within scope, but they aren't allowed
+ // to be used. So we can't use `Instance::mono`, instead we feed unresolved substitutions
+ // into `const_eval` which will return `ErrorHandled::ToGeneric` if any of them are
+ // encountered.
+ let substs = InternalSubsts::identity_for_item(self.tcx, def_id);
+ let instance = ty::Instance::new(def_id, substs);
+ let cid = GlobalId { instance, promoted: None };
+ let param_env =
+ self.tcx.param_env(def_id).with_reveal_all_normalized(self.tcx).with_const();
+ // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
+ // improve caching of queries.
+ let inputs = self.tcx.erase_regions(param_env.and(cid));
+ self.eval_to_const_value_raw(inputs)
+ }
+
+ /// Evaluate a static's initializer, returning the allocation of the initializer's memory.
+ pub fn eval_static_initializer(self, def_id: DefId) {
+ trace!("eval_static_initializer: Need to compute {:?}", def_id);
+ assert!(self.tcx.is_static(def_id));
+ let instance = ty::Instance::mono(self.tcx, def_id);
+ let gid = GlobalId { instance, promoted: None };
+ let param_env = ty::ParamEnv::reveal_all().with_const();
+ trace!("eval_to_allocation: Need to compute {:?}", gid);
+ self.eval_to_allocation_raw(param_env.and(gid))
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Destructure a mir constant ADT or array into its variant index and its field values.
+ /// Panics if the destructuring fails, use `try_destructure_mir_constant` for fallible version.
+ pub fn destructure_mir_constant(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ constant: mir::ConstantKind<'tcx>,
+ ) -> mir::DestructuredMirConstant<'tcx> {
+ self.try_destructure_mir_constant(param_env.and(constant)).unwrap()
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
new file mode 100644
index 000000000..834c114ee
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -0,0 +1,651 @@
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+
+use rustc_apfloat::{
+ ieee::{Double, Single},
+ Float,
+};
+use rustc_macros::HashStable;
+use rustc_target::abi::{HasDataLayout, Size};
+
+use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
+
+use super::{
+ AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance,
+ ScalarSizeMismatch,
+};
+
+/// Represents the result of const evaluation via the `eval_to_allocation` query.
+#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
+pub struct ConstAlloc<'tcx> {
+ // the value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory`
+ // (so you can use `AllocMap::unwrap_memory`).
+ pub alloc_id: AllocId,
+ pub ty: Ty<'tcx>,
+}
+
+/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
+/// array length computations, enum discriminants and the pattern matching logic.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum ConstValue<'tcx> {
+ /// Used only for types with `layout::abi::Scalar` ABI.
+ ///
+ /// Not using the enum `Value` to encode that this must not be `Uninit`.
+ Scalar(Scalar),
+
+ /// Only used for ZSTs.
+ ZeroSized,
+
+ /// Used only for `&[u8]` and `&str`
+ Slice { data: ConstAllocation<'tcx>, start: usize, end: usize },
+
+ /// A value not represented/representable by `Scalar` or `Slice`
+ ByRef {
+ /// The backing memory of the value, may contain more memory than needed for just the value
+ /// in order to share `ConstAllocation`s between values
+ alloc: ConstAllocation<'tcx>,
+ /// Offset into `alloc`
+ offset: Size,
+ },
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ConstValue<'_>, 32);
+
+impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
+ type Lifted = ConstValue<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> {
+ Some(match self {
+ ConstValue::Scalar(s) => ConstValue::Scalar(s),
+ ConstValue::ZeroSized => ConstValue::ZeroSized,
+ ConstValue::Slice { data, start, end } => {
+ ConstValue::Slice { data: tcx.lift(data)?, start, end }
+ }
+ ConstValue::ByRef { alloc, offset } => {
+ ConstValue::ByRef { alloc: tcx.lift(alloc)?, offset }
+ }
+ })
+ }
+}
+
+impl<'tcx> ConstValue<'tcx> {
+ #[inline]
+ pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
+ match *self {
+ ConstValue::ByRef { .. } | ConstValue::Slice { .. } | ConstValue::ZeroSized => None,
+ ConstValue::Scalar(val) => Some(val),
+ }
+ }
+
+ pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
+ Some(self.try_to_scalar()?.assert_int())
+ }
+
+ pub fn try_to_bits(&self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ pub fn try_to_bool(&self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ pub fn try_to_machine_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+ self.try_to_scalar_int()?.try_to_machine_usize(tcx).ok()
+ }
+
+ pub fn try_to_bits_for_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<u128> {
+ let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ self.try_to_bits(size)
+ }
+
+ pub fn from_bool(b: bool) -> Self {
+ ConstValue::Scalar(Scalar::from_bool(b))
+ }
+
+ pub fn from_u64(i: u64) -> Self {
+ ConstValue::Scalar(Scalar::from_u64(i))
+ }
+
+ pub fn from_machine_usize(i: u64, cx: &impl HasDataLayout) -> Self {
+ ConstValue::Scalar(Scalar::from_machine_usize(i, cx))
+ }
+}
+
+/// A `Scalar` represents an immediate, primitive value existing outside of a
+/// `memory::Allocation`. It is in many ways like a small chunk of an `Allocation`, up to 16 bytes in
+/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
+/// of a simple value or a pointer into another `Allocation`
+///
+/// These variants would be private if there was a convenient way to achieve that in Rust.
+/// Do *not* match on a `Scalar`! Use the various `to_*` methods instead.
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum Scalar<Prov = AllocId> {
+ /// The raw bytes of a simple value.
+ Int(ScalarInt),
+
+ /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
+ /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
+ /// relocation and its associated offset together as a `Pointer` here.
+ ///
+ /// We also store the size of the pointer, such that a `Scalar` always knows how big it is.
+ /// The size is always the pointer size of the current target, but this is not information
+ /// that we always have readily available.
+ Ptr(Pointer<Prov>, u8),
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(Scalar, 24);
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl<Prov: Provenance> fmt::Debug for Scalar<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Scalar::Ptr(ptr, _size) => write!(f, "{:?}", ptr),
+ Scalar::Int(int) => write!(f, "{:?}", int),
+ }
+ }
+}
+
+impl<Prov: Provenance> fmt::Display for Scalar<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
+ Scalar::Int(int) => write!(f, "{}", int),
+ }
+ }
+}
+
+impl<Prov: Provenance> fmt::LowerHex for Scalar<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
+ Scalar::Int(int) => write!(f, "{:#x}", int),
+ }
+ }
+}
+
+impl<Prov> From<Single> for Scalar<Prov> {
+ #[inline(always)]
+ fn from(f: Single) -> Self {
+ Scalar::from_f32(f)
+ }
+}
+
+impl<Prov> From<Double> for Scalar<Prov> {
+ #[inline(always)]
+ fn from(f: Double) -> Self {
+ Scalar::from_f64(f)
+ }
+}
+
+impl<Prov> From<ScalarInt> for Scalar<Prov> {
+ #[inline(always)]
+ fn from(ptr: ScalarInt) -> Self {
+ Scalar::Int(ptr)
+ }
+}
+
+impl<Prov> Scalar<Prov> {
+ #[inline(always)]
+ pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+ Scalar::Ptr(ptr, u8::try_from(cx.pointer_size().bytes()).unwrap())
+ }
+
+ /// Create a Scalar from a pointer with an `Option<_>` provenance (where `None` represents a
+ /// plain integer / "invalid" pointer).
+ pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+ match ptr.into_parts() {
+ (Some(prov), offset) => Scalar::from_pointer(Pointer::new(prov, offset), cx),
+ (None, offset) => {
+ Scalar::Int(ScalarInt::try_from_uint(offset.bytes(), cx.pointer_size()).unwrap())
+ }
+ }
+ }
+
+ #[inline]
+ pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
+ Scalar::Int(ScalarInt::null(cx.pointer_size()))
+ }
+
+ #[inline]
+ pub fn from_bool(b: bool) -> Self {
+ Scalar::Int(b.into())
+ }
+
+ #[inline]
+ pub fn from_char(c: char) -> Self {
+ Scalar::Int(c.into())
+ }
+
+ #[inline]
+ pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
+ ScalarInt::try_from_uint(i, size).map(Scalar::Int)
+ }
+
+ #[inline]
+ pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
+ let i = i.into();
+ Self::try_from_uint(i, size)
+ .unwrap_or_else(|| bug!("Unsigned value {:#x} does not fit in {} bits", i, size.bits()))
+ }
+
+ #[inline]
+ pub fn from_u8(i: u8) -> Self {
+ Scalar::Int(i.into())
+ }
+
+ #[inline]
+ pub fn from_u16(i: u16) -> Self {
+ Scalar::Int(i.into())
+ }
+
+ #[inline]
+ pub fn from_u32(i: u32) -> Self {
+ Scalar::Int(i.into())
+ }
+
+ #[inline]
+ pub fn from_u64(i: u64) -> Self {
+ Scalar::Int(i.into())
+ }
+
+ #[inline]
+ pub fn from_machine_usize(i: u64, cx: &impl HasDataLayout) -> Self {
+ Self::from_uint(i, cx.data_layout().pointer_size)
+ }
+
+ #[inline]
+ pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
+ ScalarInt::try_from_int(i, size).map(Scalar::Int)
+ }
+
+ #[inline]
+ pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
+ let i = i.into();
+ Self::try_from_int(i, size)
+ .unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
+ }
+
+ #[inline]
+ pub fn from_i32(i: i32) -> Self {
+ Self::from_int(i, Size::from_bits(32))
+ }
+
+ #[inline]
+ pub fn from_i64(i: i64) -> Self {
+ Self::from_int(i, Size::from_bits(64))
+ }
+
+ #[inline]
+ pub fn from_machine_isize(i: i64, cx: &impl HasDataLayout) -> Self {
+ Self::from_int(i, cx.data_layout().pointer_size)
+ }
+
+ #[inline]
+ pub fn from_f32(f: Single) -> Self {
+ Scalar::Int(f.into())
+ }
+
+ #[inline]
+ pub fn from_f64(f: Double) -> Self {
+ Scalar::Int(f.into())
+ }
+
+ /// This is almost certainly not the method you want! You should dispatch on the type
+ /// and use `to_{u8,u16,...}`/`scalar_to_ptr` to perform ptr-to-int / int-to-ptr casts as needed.
+ ///
+ /// This method only exists for the benefit of low-level operations that truly need to treat the
+ /// scalar in whatever form it is.
+ ///
+ /// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
+ /// Miri when someone declares a function that we shim (such as `malloc`) with a wrong type.
+ #[inline]
+ pub fn to_bits_or_ptr_internal(
+ self,
+ target_size: Size,
+ ) -> Result<Result<u128, Pointer<Prov>>, ScalarSizeMismatch> {
+ assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+ Ok(match self {
+ Scalar::Int(int) => Ok(int.to_bits(target_size).map_err(|size| {
+ ScalarSizeMismatch { target_size: target_size.bytes(), data_size: size.bytes() }
+ })?),
+ Scalar::Ptr(ptr, sz) => {
+ if target_size.bytes() != u64::from(sz) {
+ return Err(ScalarSizeMismatch {
+ target_size: target_size.bytes(),
+ data_size: sz.into(),
+ });
+ }
+ Err(ptr)
+ }
+ })
+ }
+}
+
+impl<'tcx, Prov: Provenance> Scalar<Prov> {
+ pub fn to_pointer(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, Pointer<Option<Prov>>> {
+ match self
+ .to_bits_or_ptr_internal(cx.pointer_size())
+ .map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
+ {
+ Err(ptr) => Ok(ptr.into()),
+ Ok(bits) => {
+ let addr = u64::try_from(bits).unwrap();
+ Ok(Pointer::from_addr(addr))
+ }
+ }
+ }
+
+ /// Fundamental scalar-to-int (cast) operation. Many convenience wrappers exist below, that you
+ /// likely want to use instead.
+ ///
+ /// Will perform ptr-to-int casts if needed and possible.
+ /// If that fails, we know the offset is relative, so we return an "erased" Scalar
+ /// (which is useful for error messages but not much else).
+ #[inline]
+ pub fn try_to_int(self) -> Result<ScalarInt, Scalar<AllocId>> {
+ match self {
+ Scalar::Int(int) => Ok(int),
+ Scalar::Ptr(ptr, sz) => {
+ if Prov::OFFSET_IS_ADDR {
+ Ok(ScalarInt::try_from_uint(ptr.offset.bytes(), Size::from_bytes(sz)).unwrap())
+ } else {
+ // We know `offset` is relative, since `OFFSET_IS_ADDR == false`.
+ let (prov, offset) = ptr.into_parts();
+ // Because `OFFSET_IS_ADDR == false`, this unwrap can never fail.
+ Err(Scalar::Ptr(Pointer::new(prov.get_alloc_id().unwrap(), offset), sz))
+ }
+ }
+ }
+ }
+
+ #[inline(always)]
+ pub fn assert_int(self) -> ScalarInt {
+ self.try_to_int().unwrap()
+ }
+
+ /// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
+ /// Miri when someone declares a function that we shim (such as `malloc`) with a wrong type.
+ #[inline]
+ pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
+ assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+ self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsBytes))?.to_bits(target_size).map_err(
+ |size| {
+ err_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
+ target_size: target_size.bytes(),
+ data_size: size.bytes(),
+ }))
+ .into()
+ },
+ )
+ }
+
+ #[inline(always)]
+ pub fn assert_bits(self, target_size: Size) -> u128 {
+ self.to_bits(target_size).unwrap()
+ }
+
+ pub fn to_bool(self) -> InterpResult<'tcx, bool> {
+ let val = self.to_u8()?;
+ match val {
+ 0 => Ok(false),
+ 1 => Ok(true),
+ _ => throw_ub!(InvalidBool(val)),
+ }
+ }
+
+ pub fn to_char(self) -> InterpResult<'tcx, char> {
+ let val = self.to_u32()?;
+ match std::char::from_u32(val) {
+ Some(c) => Ok(c),
+ None => throw_ub!(InvalidChar(val)),
+ }
+ }
+
+ /// Converts the scalar to produce an unsigned integer of the given size.
+ /// Fails if the scalar is a pointer.
+ #[inline]
+ pub fn to_uint(self, size: Size) -> InterpResult<'tcx, u128> {
+ self.to_bits(size)
+ }
+
+ /// Converts the scalar to produce a `u8`. Fails if the scalar is a pointer.
+ pub fn to_u8(self) -> InterpResult<'tcx, u8> {
+ self.to_uint(Size::from_bits(8)).map(|v| u8::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce a `u16`. Fails if the scalar is a pointer.
+ pub fn to_u16(self) -> InterpResult<'tcx, u16> {
+ self.to_uint(Size::from_bits(16)).map(|v| u16::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce a `u32`. Fails if the scalar is a pointer.
+ pub fn to_u32(self) -> InterpResult<'tcx, u32> {
+ self.to_uint(Size::from_bits(32)).map(|v| u32::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce a `u64`. Fails if the scalar is a pointer.
+ pub fn to_u64(self) -> InterpResult<'tcx, u64> {
+ self.to_uint(Size::from_bits(64)).map(|v| u64::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce a `u128`. Fails if the scalar is a pointer.
+ pub fn to_u128(self) -> InterpResult<'tcx, u128> {
+ self.to_uint(Size::from_bits(128))
+ }
+
+ /// Converts the scalar to produce a machine-pointer-sized unsigned integer.
+ /// Fails if the scalar is a pointer.
+ pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ let b = self.to_uint(cx.data_layout().pointer_size)?;
+ Ok(u64::try_from(b).unwrap())
+ }
+
+ /// Converts the scalar to produce a signed integer of the given size.
+ /// Fails if the scalar is a pointer.
+ #[inline]
+ pub fn to_int(self, size: Size) -> InterpResult<'tcx, i128> {
+ let b = self.to_bits(size)?;
+ Ok(size.sign_extend(b) as i128)
+ }
+
+ /// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
+ pub fn to_i8(self) -> InterpResult<'tcx, i8> {
+ self.to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce an `i16`. Fails if the scalar is a pointer.
+ pub fn to_i16(self) -> InterpResult<'tcx, i16> {
+ self.to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce an `i32`. Fails if the scalar is a pointer.
+ pub fn to_i32(self) -> InterpResult<'tcx, i32> {
+ self.to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce an `i64`. Fails if the scalar is a pointer.
+ pub fn to_i64(self) -> InterpResult<'tcx, i64> {
+ self.to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce an `i128`. Fails if the scalar is a pointer.
+ pub fn to_i128(self) -> InterpResult<'tcx, i128> {
+ self.to_int(Size::from_bits(128))
+ }
+
+ /// Converts the scalar to produce a machine-pointer-sized signed integer.
+ /// Fails if the scalar is a pointer.
+ pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
+ let b = self.to_int(cx.data_layout().pointer_size)?;
+ Ok(i64::try_from(b).unwrap())
+ }
+
+ #[inline]
+ pub fn to_f32(self) -> InterpResult<'tcx, Single> {
+ // Going through `u32` to check size and truncation.
+ Ok(Single::from_bits(self.to_u32()?.into()))
+ }
+
+ #[inline]
+ pub fn to_f64(self) -> InterpResult<'tcx, Double> {
+ // Going through `u64` to check size and truncation.
+ Ok(Double::from_bits(self.to_u64()?.into()))
+ }
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
+pub enum ScalarMaybeUninit<Prov = AllocId> {
+ Scalar(Scalar<Prov>),
+ Uninit,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ScalarMaybeUninit, 24);
+
+impl<Prov> From<Scalar<Prov>> for ScalarMaybeUninit<Prov> {
+ #[inline(always)]
+ fn from(s: Scalar<Prov>) -> Self {
+ ScalarMaybeUninit::Scalar(s)
+ }
+}
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl<Prov: Provenance> fmt::Debug for ScalarMaybeUninit<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
+ ScalarMaybeUninit::Scalar(s) => write!(f, "{:?}", s),
+ }
+ }
+}
+
+impl<Prov: Provenance> fmt::LowerHex for ScalarMaybeUninit<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
+ ScalarMaybeUninit::Scalar(s) => write!(f, "{:x}", s),
+ }
+ }
+}
+
+impl<Prov> ScalarMaybeUninit<Prov> {
+ #[inline]
+ pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+ ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx))
+ }
+
+ #[inline]
+ pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+ ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx))
+ }
+
+ #[inline]
+ pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Prov>> {
+ match self {
+ ScalarMaybeUninit::Scalar(scalar) => Ok(scalar),
+ ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
+ }
+ }
+}
+
+impl<'tcx, Prov: Provenance> ScalarMaybeUninit<Prov> {
+ #[inline(always)]
+ pub fn to_pointer(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, Pointer<Option<Prov>>> {
+ self.check_init()?.to_pointer(cx)
+ }
+
+ #[inline(always)]
+ pub fn to_bool(self) -> InterpResult<'tcx, bool> {
+ self.check_init()?.to_bool()
+ }
+
+ #[inline(always)]
+ pub fn to_char(self) -> InterpResult<'tcx, char> {
+ self.check_init()?.to_char()
+ }
+
+ #[inline(always)]
+ pub fn to_f32(self) -> InterpResult<'tcx, Single> {
+ self.check_init()?.to_f32()
+ }
+
+ #[inline(always)]
+ pub fn to_f64(self) -> InterpResult<'tcx, Double> {
+ self.check_init()?.to_f64()
+ }
+
+ #[inline(always)]
+ pub fn to_u8(self) -> InterpResult<'tcx, u8> {
+ self.check_init()?.to_u8()
+ }
+
+ #[inline(always)]
+ pub fn to_u16(self) -> InterpResult<'tcx, u16> {
+ self.check_init()?.to_u16()
+ }
+
+ #[inline(always)]
+ pub fn to_u32(self) -> InterpResult<'tcx, u32> {
+ self.check_init()?.to_u32()
+ }
+
+ #[inline(always)]
+ pub fn to_u64(self) -> InterpResult<'tcx, u64> {
+ self.check_init()?.to_u64()
+ }
+
+ #[inline(always)]
+ pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ self.check_init()?.to_machine_usize(cx)
+ }
+
+ #[inline(always)]
+ pub fn to_i8(self) -> InterpResult<'tcx, i8> {
+ self.check_init()?.to_i8()
+ }
+
+ #[inline(always)]
+ pub fn to_i16(self) -> InterpResult<'tcx, i16> {
+ self.check_init()?.to_i16()
+ }
+
+ #[inline(always)]
+ pub fn to_i32(self) -> InterpResult<'tcx, i32> {
+ self.check_init()?.to_i32()
+ }
+
+ #[inline(always)]
+ pub fn to_i64(self) -> InterpResult<'tcx, i64> {
+ self.check_init()?.to_i64()
+ }
+
+ #[inline(always)]
+ pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
+ self.check_init()?.to_machine_isize(cx)
+ }
+}
+
+/// Gets the bytes of a constant slice value.
+pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> &'tcx [u8] {
+ if let ConstValue::Slice { data, start, end } = val {
+ let len = end - start;
+ data.inner()
+ .get_bytes(
+ cx,
+ AllocRange { start: Size::from_bytes(start), size: Size::from_bytes(len) },
+ )
+ .unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err))
+ } else {
+ bug!("expected const slice, but found another const value");
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
new file mode 100644
index 000000000..7ab71f900
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -0,0 +1,2900 @@
+//! MIR datatypes and passes. See the [rustc dev guide] for more info.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
+
+use crate::mir::interpret::{
+ AllocRange, ConstAllocation, ConstValue, GlobalAlloc, LitToConstInput, Scalar,
+};
+use crate::mir::visit::MirVisitable;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
+use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use crate::ty::{self, List, Ty, TyCtxt};
+use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex};
+
+use rustc_data_structures::captures::Captures;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def::{CtorKind, Namespace};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_hir::{self, GeneratorKind};
+use rustc_hir::{self as hir, HirId};
+use rustc_session::Session;
+use rustc_target::abi::{Size, VariantIdx};
+
+use polonius_engine::Atom;
+pub use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_index::bit_set::BitMatrix;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_serialize::{Decodable, Encodable};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+
+use either::Either;
+
+use std::borrow::Cow;
+use std::convert::TryInto;
+use std::fmt::{self, Debug, Display, Formatter, Write};
+use std::ops::{ControlFlow, Index, IndexMut};
+use std::{iter, mem};
+
+pub use self::query::*;
+pub use basic_blocks::BasicBlocks;
+
+mod basic_blocks;
+pub mod coverage;
+mod generic_graph;
+pub mod generic_graphviz;
+mod graph_cyclic_cache;
+pub mod graphviz;
+pub mod interpret;
+pub mod mono;
+pub mod patch;
+mod predecessors;
+pub mod pretty;
+mod query;
+pub mod spanview;
+mod syntax;
+pub use syntax::*;
+mod switch_sources;
+pub mod tcx;
+pub mod terminator;
+pub use terminator::*;
+
+pub mod traversal;
+mod type_foldable;
+mod type_visitable;
+pub mod visit;
+
+pub use self::generic_graph::graphviz_safe_def_name;
+pub use self::graphviz::write_mir_graphviz;
+pub use self::pretty::{
+ create_dump_file, display_allocation, dump_enabled, dump_mir, write_mir_pretty, PassWhere,
+};
+
+/// Types for locals
+pub type LocalDecls<'tcx> = IndexVec<Local, LocalDecl<'tcx>>;
+
+pub trait HasLocalDecls<'tcx> {
+ fn local_decls(&self) -> &LocalDecls<'tcx>;
+}
+
+impl<'tcx> HasLocalDecls<'tcx> for LocalDecls<'tcx> {
+ #[inline]
+ fn local_decls(&self) -> &LocalDecls<'tcx> {
+ self
+ }
+}
+
+impl<'tcx> HasLocalDecls<'tcx> for Body<'tcx> {
+ #[inline]
+ fn local_decls(&self) -> &LocalDecls<'tcx> {
+ &self.local_decls
+ }
+}
+
+/// A streamlined trait that you can implement to create a pass; the
+/// pass will be named after the type, and it will consist of a main
+/// loop that goes over each available MIR and applies `run_pass`.
+pub trait MirPass<'tcx> {
+ fn name(&self) -> Cow<'_, str> {
+ let name = std::any::type_name::<Self>();
+ if let Some(tail) = name.rfind(':') {
+ Cow::from(&name[tail + 1..])
+ } else {
+ Cow::from(name)
+ }
+ }
+
+ /// Returns `true` if this pass is enabled with the current combination of compiler flags.
+ fn is_enabled(&self, _sess: &Session) -> bool {
+ true
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>);
+
+ /// If this pass causes the MIR to enter a new phase, return that phase.
+ fn phase_change(&self) -> Option<MirPhase> {
+ None
+ }
+
+ fn is_mir_dump_enabled(&self) -> bool {
+ true
+ }
+}
+
+impl MirPhase {
+ /// Gets the index of the current MirPhase within the set of all `MirPhase`s.
+ pub fn phase_index(&self) -> usize {
+ *self as usize
+ }
+}
+
+/// Where a specific `mir::Body` comes from.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+#[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
+pub struct MirSource<'tcx> {
+ pub instance: InstanceDef<'tcx>,
+
+ /// If `Some`, this is a promoted rvalue within the parent function.
+ pub promoted: Option<Promoted>,
+}
+
+impl<'tcx> MirSource<'tcx> {
+ pub fn item(def_id: DefId) -> Self {
+ MirSource {
+ instance: InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+ promoted: None,
+ }
+ }
+
+ pub fn from_instance(instance: InstanceDef<'tcx>) -> Self {
+ MirSource { instance, promoted: None }
+ }
+
+ pub fn with_opt_param(self) -> ty::WithOptConstParam<DefId> {
+ self.instance.with_opt_param()
+ }
+
+ #[inline]
+ pub fn def_id(&self) -> DefId {
+ self.instance.def_id()
+ }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub struct GeneratorInfo<'tcx> {
+ /// The yield type of the function, if it is a generator.
+ pub yield_ty: Option<Ty<'tcx>>,
+
+ /// Generator drop glue.
+ pub generator_drop: Option<Body<'tcx>>,
+
+ /// The layout of a generator. Produced by the state transformation.
+ pub generator_layout: Option<GeneratorLayout<'tcx>>,
+
+ /// If this is a generator then record the type of source expression that caused this generator
+ /// to be created.
+ pub generator_kind: GeneratorKind,
+}
+
+/// The lowered representation of a single function.
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub struct Body<'tcx> {
+ /// A list of basic blocks. References to basic block use a newtyped index type [`BasicBlock`]
+ /// that indexes into this vector.
+ pub basic_blocks: BasicBlocks<'tcx>,
+
+ /// Records how far through the "desugaring and optimization" process this particular
+ /// MIR has traversed. This is particularly useful when inlining, since in that context
+ /// we instantiate the promoted constants and add them to our promoted vector -- but those
+ /// promoted items have already been optimized, whereas ours have not. This field allows
+ /// us to see the difference and forego optimization on the inlined promoted items.
+ pub phase: MirPhase,
+
+ pub source: MirSource<'tcx>,
+
+ /// A list of source scopes; these are referenced by statements
+ /// and used for debuginfo. Indexed by a `SourceScope`.
+ pub source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
+
+ pub generator: Option<Box<GeneratorInfo<'tcx>>>,
+
+ /// Declarations of locals.
+ ///
+ /// The first local is the return value pointer, followed by `arg_count`
+ /// locals for the function arguments, followed by any user-declared
+ /// variables and temporaries.
+ pub local_decls: LocalDecls<'tcx>,
+
+ /// User type annotations.
+ pub user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
+
+ /// The number of arguments this function takes.
+ ///
+ /// Starting at local 1, `arg_count` locals will be provided by the caller
+ /// and can be assumed to be initialized.
+ ///
+ /// If this MIR was built for a constant, this will be 0.
+ pub arg_count: usize,
+
+ /// Mark an argument local (which must be a tuple) as getting passed as
+ /// its individual components at the LLVM level.
+ ///
+ /// This is used for the "rust-call" ABI.
+ pub spread_arg: Option<Local>,
+
+ /// Debug information pertaining to user variables, including captures.
+ pub var_debug_info: Vec<VarDebugInfo<'tcx>>,
+
+ /// A span representing this MIR, for error reporting.
+ pub span: Span,
+
+ /// Constants that are required to evaluate successfully for this MIR to be well-formed.
+ /// We hold in this field all the constants we are not able to evaluate yet.
+ pub required_consts: Vec<Constant<'tcx>>,
+
+ /// Does this body use generic parameters. This is used for the `ConstEvaluatable` check.
+ ///
+ /// Note that this does not actually mean that this body is not computable right now.
+ /// The repeat count in the following example is polymorphic, but can still be evaluated
+ /// without knowing anything about the type parameter `T`.
+ ///
+ /// ```rust
+ /// fn test<T>() {
+ /// let _ = [0; std::mem::size_of::<*mut T>()];
+ /// }
+ /// ```
+ ///
+ /// **WARNING**: Do not change this flags after the MIR was originally created, even if an optimization
+ /// removed the last mention of all generic params. We do not want to rely on optimizations and
+ /// potentially allow things like `[u8; std::mem::size_of::<T>() * 0]` due to this.
+ pub is_polymorphic: bool,
+
+ pub tainted_by_errors: Option<ErrorGuaranteed>,
+}
+
+impl<'tcx> Body<'tcx> {
+ pub fn new(
+ source: MirSource<'tcx>,
+ basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ local_decls: LocalDecls<'tcx>,
+ user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
+ arg_count: usize,
+ var_debug_info: Vec<VarDebugInfo<'tcx>>,
+ span: Span,
+ generator_kind: Option<GeneratorKind>,
+ tainted_by_errors: Option<ErrorGuaranteed>,
+ ) -> Self {
+ // We need `arg_count` locals, and one for the return place.
+ assert!(
+ local_decls.len() > arg_count,
+ "expected at least {} locals, got {}",
+ arg_count + 1,
+ local_decls.len()
+ );
+
+ let mut body = Body {
+ phase: MirPhase::Built,
+ source,
+ basic_blocks: BasicBlocks::new(basic_blocks),
+ source_scopes,
+ generator: generator_kind.map(|generator_kind| {
+ Box::new(GeneratorInfo {
+ yield_ty: None,
+ generator_drop: None,
+ generator_layout: None,
+ generator_kind,
+ })
+ }),
+ local_decls,
+ user_type_annotations,
+ arg_count,
+ spread_arg: None,
+ var_debug_info,
+ span,
+ required_consts: Vec::new(),
+ is_polymorphic: false,
+ tainted_by_errors,
+ };
+ body.is_polymorphic = body.has_param_types_or_consts();
+ body
+ }
+
+ /// Returns a partially initialized MIR body containing only a list of basic blocks.
+ ///
+ /// The returned MIR contains no `LocalDecl`s (even for the return place) or source scopes. It
+ /// is only useful for testing but cannot be `#[cfg(test)]` because it is used in a different
+ /// crate.
+ pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
+ let mut body = Body {
+ phase: MirPhase::Built,
+ source: MirSource::item(CRATE_DEF_ID.to_def_id()),
+ basic_blocks: BasicBlocks::new(basic_blocks),
+ source_scopes: IndexVec::new(),
+ generator: None,
+ local_decls: IndexVec::new(),
+ user_type_annotations: IndexVec::new(),
+ arg_count: 0,
+ spread_arg: None,
+ span: DUMMY_SP,
+ required_consts: Vec::new(),
+ var_debug_info: Vec::new(),
+ is_polymorphic: false,
+ tainted_by_errors: None,
+ };
+ body.is_polymorphic = body.has_param_types_or_consts();
+ body
+ }
+
+ #[inline]
+ pub fn basic_blocks(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ &self.basic_blocks
+ }
+
+ #[inline]
+ pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ self.basic_blocks.as_mut()
+ }
+
+ #[inline]
+ pub fn local_kind(&self, local: Local) -> LocalKind {
+ let index = local.as_usize();
+ if index == 0 {
+ debug_assert!(
+ self.local_decls[local].mutability == Mutability::Mut,
+ "return place should be mutable"
+ );
+
+ LocalKind::ReturnPointer
+ } else if index < self.arg_count + 1 {
+ LocalKind::Arg
+ } else if self.local_decls[local].is_user_variable() {
+ LocalKind::Var
+ } else {
+ LocalKind::Temp
+ }
+ }
+
+ /// Returns an iterator over all user-declared mutable locals.
+ #[inline]
+ pub fn mut_vars_iter<'a>(&'a self) -> impl Iterator<Item = Local> + Captures<'tcx> + 'a {
+ (self.arg_count + 1..self.local_decls.len()).filter_map(move |index| {
+ let local = Local::new(index);
+ let decl = &self.local_decls[local];
+ if decl.is_user_variable() && decl.mutability == Mutability::Mut {
+ Some(local)
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Returns an iterator over all user-declared mutable arguments and locals.
+ #[inline]
+ pub fn mut_vars_and_args_iter<'a>(
+ &'a self,
+ ) -> impl Iterator<Item = Local> + Captures<'tcx> + 'a {
+ (1..self.local_decls.len()).filter_map(move |index| {
+ let local = Local::new(index);
+ let decl = &self.local_decls[local];
+ if (decl.is_user_variable() || index < self.arg_count + 1)
+ && decl.mutability == Mutability::Mut
+ {
+ Some(local)
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Returns an iterator over all function arguments.
+ #[inline]
+ pub fn args_iter(&self) -> impl Iterator<Item = Local> + ExactSizeIterator {
+ (1..self.arg_count + 1).map(Local::new)
+ }
+
+ /// Returns an iterator over all user-defined variables and compiler-generated temporaries (all
+ /// locals that are neither arguments nor the return place).
+ #[inline]
+ pub fn vars_and_temps_iter(
+ &self,
+ ) -> impl DoubleEndedIterator<Item = Local> + ExactSizeIterator {
+ (self.arg_count + 1..self.local_decls.len()).map(Local::new)
+ }
+
+ #[inline]
+ pub fn drain_vars_and_temps<'a>(&'a mut self) -> impl Iterator<Item = LocalDecl<'tcx>> + 'a {
+ self.local_decls.drain(self.arg_count + 1..)
+ }
+
+ /// Returns the source info associated with `location`.
+ pub fn source_info(&self, location: Location) -> &SourceInfo {
+ let block = &self[location.block];
+ let stmts = &block.statements;
+ let idx = location.statement_index;
+ if idx < stmts.len() {
+ &stmts[idx].source_info
+ } else {
+ assert_eq!(idx, stmts.len());
+ &block.terminator().source_info
+ }
+ }
+
+ /// Returns the return type; it always return first element from `local_decls` array.
+ #[inline]
+ pub fn return_ty(&self) -> Ty<'tcx> {
+ self.local_decls[RETURN_PLACE].ty
+ }
+
+ /// Returns the return type; it always return first element from `local_decls` array.
+ #[inline]
+ pub fn bound_return_ty(&self) -> ty::EarlyBinder<Ty<'tcx>> {
+ ty::EarlyBinder(self.local_decls[RETURN_PLACE].ty)
+ }
+
+ /// Gets the location of the terminator for the given block.
+ #[inline]
+ pub fn terminator_loc(&self, bb: BasicBlock) -> Location {
+ Location { block: bb, statement_index: self[bb].statements.len() }
+ }
+
+ pub fn stmt_at(&self, location: Location) -> Either<&Statement<'tcx>, &Terminator<'tcx>> {
+ let Location { block, statement_index } = location;
+ let block_data = &self.basic_blocks[block];
+ block_data
+ .statements
+ .get(statement_index)
+ .map(Either::Left)
+ .unwrap_or_else(|| Either::Right(block_data.terminator()))
+ }
+
+ #[inline]
+ pub fn yield_ty(&self) -> Option<Ty<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.yield_ty)
+ }
+
+ #[inline]
+ pub fn generator_layout(&self) -> Option<&GeneratorLayout<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.generator_layout.as_ref())
+ }
+
+ #[inline]
+ pub fn generator_drop(&self) -> Option<&Body<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.generator_drop.as_ref())
+ }
+
+ #[inline]
+ pub fn generator_kind(&self) -> Option<GeneratorKind> {
+ self.generator.as_ref().map(|generator| generator.generator_kind)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum Safety {
+ Safe,
+ /// Unsafe because of compiler-generated unsafe code, like `await` desugaring
+ BuiltinUnsafe,
+ /// Unsafe because of an unsafe fn
+ FnUnsafe,
+ /// Unsafe because of an `unsafe` block
+ ExplicitUnsafe(hir::HirId),
+}
+
+impl<'tcx> Index<BasicBlock> for Body<'tcx> {
+ type Output = BasicBlockData<'tcx>;
+
+ #[inline]
+ fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
+ &self.basic_blocks()[index]
+ }
+}
+
+impl<'tcx> IndexMut<BasicBlock> for Body<'tcx> {
+ #[inline]
+ fn index_mut(&mut self, index: BasicBlock) -> &mut BasicBlockData<'tcx> {
+ &mut self.basic_blocks.as_mut()[index]
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub enum ClearCrossCrate<T> {
+ Clear,
+ Set(T),
+}
+
+impl<T> ClearCrossCrate<T> {
+ pub fn as_ref(&self) -> ClearCrossCrate<&T> {
+ match self {
+ ClearCrossCrate::Clear => ClearCrossCrate::Clear,
+ ClearCrossCrate::Set(v) => ClearCrossCrate::Set(v),
+ }
+ }
+
+ pub fn assert_crate_local(self) -> T {
+ match self {
+ ClearCrossCrate::Clear => bug!("unwrapping cross-crate data"),
+ ClearCrossCrate::Set(v) => v,
+ }
+ }
+}
+
+const TAG_CLEAR_CROSS_CRATE_CLEAR: u8 = 0;
+const TAG_CLEAR_CROSS_CRATE_SET: u8 = 1;
+
+impl<E: TyEncoder, T: Encodable<E>> Encodable<E> for ClearCrossCrate<T> {
+ #[inline]
+ fn encode(&self, e: &mut E) {
+ if E::CLEAR_CROSS_CRATE {
+ return;
+ }
+
+ match *self {
+ ClearCrossCrate::Clear => TAG_CLEAR_CROSS_CRATE_CLEAR.encode(e),
+ ClearCrossCrate::Set(ref val) => {
+ TAG_CLEAR_CROSS_CRATE_SET.encode(e);
+ val.encode(e);
+ }
+ }
+ }
+}
+impl<D: TyDecoder, T: Decodable<D>> Decodable<D> for ClearCrossCrate<T> {
+ #[inline]
+ fn decode(d: &mut D) -> ClearCrossCrate<T> {
+ if D::CLEAR_CROSS_CRATE {
+ return ClearCrossCrate::Clear;
+ }
+
+ let discr = u8::decode(d);
+
+ match discr {
+ TAG_CLEAR_CROSS_CRATE_CLEAR => ClearCrossCrate::Clear,
+ TAG_CLEAR_CROSS_CRATE_SET => {
+ let val = T::decode(d);
+ ClearCrossCrate::Set(val)
+ }
+ tag => panic!("Invalid tag for ClearCrossCrate: {:?}", tag),
+ }
+ }
+}
+
+/// Grouped information about the source code origin of a MIR entity.
+/// Intended to be inspected by diagnostics and debuginfo.
+/// Most passes can work with it as a whole, within a single function.
+// The unofficial Cranelift backend, at least as of #65828, needs `SourceInfo` to implement `Eq` and
+// `Hash`. Please ping @bjorn3 if removing them.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub struct SourceInfo {
+ /// The source span for the AST pertaining to this MIR entity.
+ pub span: Span,
+
+ /// The source scope, keeping track of which bindings can be
+ /// seen by debuginfo, active lint levels, `unsafe {...}`, etc.
+ pub scope: SourceScope,
+}
+
+impl SourceInfo {
+ #[inline]
+ pub fn outermost(span: Span) -> Self {
+ SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Variables and temps
+
+rustc_index::newtype_index! {
+ pub struct Local {
+ derive [HashStable]
+ DEBUG_FORMAT = "_{}",
+ const RETURN_PLACE = 0,
+ }
+}
+
+impl Atom for Local {
+ fn index(self) -> usize {
+ Idx::index(self)
+ }
+}
+
+/// Classifies locals into categories. See `Body::local_kind`.
+#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable)]
+pub enum LocalKind {
+ /// User-declared variable binding.
+ Var,
+ /// Compiler-introduced temporary.
+ Temp,
+ /// Function argument.
+ Arg,
+ /// Location of function's return value.
+ ReturnPointer,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct VarBindingForm<'tcx> {
+ /// Is variable bound via `x`, `mut x`, `ref x`, or `ref mut x`?
+ pub binding_mode: ty::BindingMode,
+ /// If an explicit type was provided for this variable binding,
+ /// this holds the source Span of that type.
+ ///
+ /// NOTE: if you want to change this to a `HirId`, be wary that
+ /// doing so breaks incremental compilation (as of this writing),
+ /// while a `Span` does not cause our tests to fail.
+ pub opt_ty_info: Option<Span>,
+ /// Place of the RHS of the =, or the subject of the `match` where this
+ /// variable is initialized. None in the case of `let PATTERN;`.
+ /// Some((None, ..)) in the case of and `let [mut] x = ...` because
+ /// (a) the right-hand side isn't evaluated as a place expression.
+ /// (b) it gives a way to separate this case from the remaining cases
+ /// for diagnostics.
+ pub opt_match_place: Option<(Option<Place<'tcx>>, Span)>,
+ /// The span of the pattern in which this variable was bound.
+ pub pat_span: Span,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable)]
+pub enum BindingForm<'tcx> {
+ /// This is a binding for a non-`self` binding, or a `self` that has an explicit type.
+ Var(VarBindingForm<'tcx>),
+ /// Binding for a `self`/`&self`/`&mut self` binding where the type is implicit.
+ ImplicitSelf(ImplicitSelfKind),
+ /// Reference used in a guard expression to ensure immutability.
+ RefForGuard,
+}
+
+/// Represents what type of implicit self a function has, if any.
+#[derive(Clone, Copy, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum ImplicitSelfKind {
+ /// Represents a `fn x(self);`.
+ Imm,
+ /// Represents a `fn x(mut self);`.
+ Mut,
+ /// Represents a `fn x(&self);`.
+ ImmRef,
+ /// Represents a `fn x(&mut self);`.
+ MutRef,
+ /// Represents when a function does not have a self argument or
+ /// when a function has a `self: X` argument.
+ None,
+}
+
+TrivialTypeTraversalAndLiftImpls! { BindingForm<'tcx>, }
+
+mod binding_form_impl {
+ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+ use rustc_query_system::ich::StableHashingContext;
+
+ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for super::BindingForm<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ use super::BindingForm::*;
+ std::mem::discriminant(self).hash_stable(hcx, hasher);
+
+ match self {
+ Var(binding) => binding.hash_stable(hcx, hasher),
+ ImplicitSelf(kind) => kind.hash_stable(hcx, hasher),
+ RefForGuard => (),
+ }
+ }
+ }
+}
+
+/// `BlockTailInfo` is attached to the `LocalDecl` for temporaries
+/// created during evaluation of expressions in a block tail
+/// expression; that is, a block like `{ STMT_1; STMT_2; EXPR }`.
+///
+/// It is used to improve diagnostics when such temporaries are
+/// involved in borrow_check errors, e.g., explanations of where the
+/// temporaries come from, when their destructors are run, and/or how
+/// one might revise the code to satisfy the borrow checker's rules.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct BlockTailInfo {
+ /// If `true`, then the value resulting from evaluating this tail
+ /// expression is ignored by the block's expression context.
+ ///
+ /// Examples include `{ ...; tail };` and `let _ = { ...; tail };`
+ /// but not e.g., `let _x = { ...; tail };`
+ pub tail_result_is_ignored: bool,
+
+ /// `Span` of the tail expression.
+ pub span: Span,
+}
+
+/// A MIR local.
+///
+/// This can be a binding declared by the user, a temporary inserted by the compiler, a function
+/// argument, or the return place.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct LocalDecl<'tcx> {
+ /// Whether this is a mutable binding (i.e., `let x` or `let mut x`).
+ ///
+ /// Temporaries and the return place are always mutable.
+ pub mutability: Mutability,
+
+ // FIXME(matthewjasper) Don't store in this in `Body`
+ pub local_info: Option<Box<LocalInfo<'tcx>>>,
+
+ /// `true` if this is an internal local.
+ ///
+ /// These locals are not based on types in the source code and are only used
+ /// for a few desugarings at the moment.
+ ///
+ /// The generator transformation will sanity check the locals which are live
+ /// across a suspension point against the type components of the generator
+ /// which type checking knows are live across a suspension point. We need to
+ /// flag drop flags to avoid triggering this check as they are introduced
+ /// outside of type inference.
+ ///
+ /// This should be sound because the drop flags are fully algebraic, and
+ /// therefore don't affect the auto-trait or outlives properties of the
+ /// generator.
+ pub internal: bool,
+
+ /// If this local is a temporary and `is_block_tail` is `Some`,
+ /// then it is a temporary created for evaluation of some
+ /// subexpression of some block's tail expression (with no
+ /// intervening statement context).
+ // FIXME(matthewjasper) Don't store in this in `Body`
+ pub is_block_tail: Option<BlockTailInfo>,
+
+ /// The type of this local.
+ pub ty: Ty<'tcx>,
+
+ /// If the user manually ascribed a type to this variable,
+ /// e.g., via `let x: T`, then we carry that type here. The MIR
+ /// borrow checker needs this information since it can affect
+ /// region inference.
+ // FIXME(matthewjasper) Don't store in this in `Body`
+ pub user_ty: Option<Box<UserTypeProjections>>,
+
+ /// The *syntactic* (i.e., not visibility) source scope the local is defined
+ /// in. If the local was defined in a let-statement, this
+ /// is *within* the let-statement, rather than outside
+ /// of it.
+ ///
+ /// This is needed because the visibility source scope of locals within
+ /// a let-statement is weird.
+ ///
+ /// The reason is that we want the local to be *within* the let-statement
+ /// for lint purposes, but we want the local to be *after* the let-statement
+ /// for names-in-scope purposes.
+ ///
+ /// That's it, if we have a let-statement like the one in this
+ /// function:
+ ///
+ /// ```
+ /// fn foo(x: &str) {
+ /// #[allow(unused_mut)]
+ /// let mut x: u32 = { // <- one unused mut
+ /// let mut y: u32 = x.parse().unwrap();
+ /// y + 2
+ /// };
+ /// drop(x);
+ /// }
+ /// ```
+ ///
+ /// Then, from a lint point of view, the declaration of `x: u32`
+ /// (and `y: u32`) are within the `#[allow(unused_mut)]` scope - the
+ /// lint scopes are the same as the AST/HIR nesting.
+ ///
+ /// However, from a name lookup point of view, the scopes look more like
+ /// as if the let-statements were `match` expressions:
+ ///
+ /// ```
+ /// fn foo(x: &str) {
+ /// match {
+ /// match x.parse::<u32>().unwrap() {
+ /// y => y + 2
+ /// }
+ /// } {
+ /// x => drop(x)
+ /// };
+ /// }
+ /// ```
+ ///
+ /// We care about the name-lookup scopes for debuginfo - if the
+ /// debuginfo instruction pointer is at the call to `x.parse()`, we
+ /// want `x` to refer to `x: &str`, but if it is at the call to
+ /// `drop(x)`, we want it to refer to `x: u32`.
+ ///
+ /// To allow both uses to work, we need to have more than a single scope
+ /// for a local. We have the `source_info.scope` represent the "syntactic"
+ /// lint scope (with a variable being under its let block) while the
+ /// `var_debug_info.source_info.scope` represents the "local variable"
+ /// scope (where the "rest" of a block is under all prior let-statements).
+ ///
+ /// The end result looks like this:
+ ///
+ /// ```text
+ /// ROOT SCOPE
+ /// │{ argument x: &str }
+ /// │
+ /// │ │{ #[allow(unused_mut)] } // This is actually split into 2 scopes
+ /// │ │ // in practice because I'm lazy.
+ /// │ │
+ /// │ │← x.source_info.scope
+ /// │ │← `x.parse().unwrap()`
+ /// │ │
+ /// │ │ │← y.source_info.scope
+ /// │ │
+ /// │ │ │{ let y: u32 }
+ /// │ │ │
+ /// │ │ │← y.var_debug_info.source_info.scope
+ /// │ │ │← `y + 2`
+ /// │
+ /// │ │{ let x: u32 }
+ /// │ │← x.var_debug_info.source_info.scope
+ /// │ │← `drop(x)` // This accesses `x: u32`.
+ /// ```
+ pub source_info: SourceInfo,
+}
+
+// `LocalDecl` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(LocalDecl<'_>, 56);
+
+/// Extra information about a some locals that's used for diagnostics and for
+/// classifying variables into local variables, statics, etc, which is needed e.g.
+/// for unsafety checking.
+///
+/// Not used for non-StaticRef temporaries, the return place, or anonymous
+/// function parameters.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub enum LocalInfo<'tcx> {
+ /// A user-defined local variable or function parameter
+ ///
+ /// The `BindingForm` is solely used for local diagnostics when generating
+ /// warnings/errors when compiling the current crate, and therefore it need
+ /// not be visible across crates.
+ User(ClearCrossCrate<BindingForm<'tcx>>),
+ /// A temporary created that references the static with the given `DefId`.
+ StaticRef { def_id: DefId, is_thread_local: bool },
+ /// A temporary created that references the const with the given `DefId`
+ ConstRef { def_id: DefId },
+ /// A temporary created during the creation of an aggregate
+ /// (e.g. a temporary for `foo` in `MyStruct { my_field: foo }`)
+ AggregateTemp,
+ /// A temporary created during the pass `Derefer` to avoid it's retagging
+ DerefTemp,
+}
+
+impl<'tcx> LocalDecl<'tcx> {
+ /// Returns `true` only if local is a binding that can itself be
+ /// made mutable via the addition of the `mut` keyword, namely
+ /// something like the occurrences of `x` in:
+ /// - `fn foo(x: Type) { ... }`,
+ /// - `let x = ...`,
+ /// - or `match ... { C(x) => ... }`
+ pub fn can_be_made_mutable(&self) -> bool {
+ matches!(
+ self.local_info,
+ Some(box LocalInfo::User(ClearCrossCrate::Set(
+ BindingForm::Var(VarBindingForm {
+ binding_mode: ty::BindingMode::BindByValue(_),
+ opt_ty_info: _,
+ opt_match_place: _,
+ pat_span: _,
+ }) | BindingForm::ImplicitSelf(ImplicitSelfKind::Imm),
+ )))
+ )
+ }
+
+ /// Returns `true` if local is definitely not a `ref ident` or
+ /// `ref mut ident` binding. (Such bindings cannot be made into
+ /// mutable bindings, but the inverse does not necessarily hold).
+ pub fn is_nonref_binding(&self) -> bool {
+ matches!(
+ self.local_info,
+ Some(box LocalInfo::User(ClearCrossCrate::Set(
+ BindingForm::Var(VarBindingForm {
+ binding_mode: ty::BindingMode::BindByValue(_),
+ opt_ty_info: _,
+ opt_match_place: _,
+ pat_span: _,
+ }) | BindingForm::ImplicitSelf(_),
+ )))
+ )
+ }
+
+ /// Returns `true` if this variable is a named variable or function
+ /// parameter declared by the user.
+ #[inline]
+ pub fn is_user_variable(&self) -> bool {
+ matches!(self.local_info, Some(box LocalInfo::User(_)))
+ }
+
+ /// Returns `true` if this is a reference to a variable bound in a `match`
+ /// expression that is used to access said variable for the guard of the
+ /// match arm.
+ pub fn is_ref_for_guard(&self) -> bool {
+ matches!(
+ self.local_info,
+ Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::RefForGuard)))
+ )
+ }
+
+ /// Returns `Some` if this is a reference to a static item that is used to
+ /// access that static.
+ pub fn is_ref_to_static(&self) -> bool {
+ matches!(self.local_info, Some(box LocalInfo::StaticRef { .. }))
+ }
+
+ /// Returns `Some` if this is a reference to a thread-local static item that is used to
+ /// access that static.
+ pub fn is_ref_to_thread_local(&self) -> bool {
+ match self.local_info {
+ Some(box LocalInfo::StaticRef { is_thread_local, .. }) => is_thread_local,
+ _ => false,
+ }
+ }
+
+ /// Returns `true` if this is a DerefTemp
+ pub fn is_deref_temp(&self) -> bool {
+ match self.local_info {
+ Some(box LocalInfo::DerefTemp) => return true,
+ _ => (),
+ }
+ return false;
+ }
+
+ /// Returns `true` is the local is from a compiler desugaring, e.g.,
+ /// `__next` from a `for` loop.
+ #[inline]
+ pub fn from_compiler_desugaring(&self) -> bool {
+ self.source_info.span.desugaring_kind().is_some()
+ }
+
+ /// Creates a new `LocalDecl` for a temporary: mutable, non-internal.
+ #[inline]
+ pub fn new(ty: Ty<'tcx>, span: Span) -> Self {
+ Self::with_source_info(ty, SourceInfo::outermost(span))
+ }
+
+ /// Like `LocalDecl::new`, but takes a `SourceInfo` instead of a `Span`.
+ #[inline]
+ pub fn with_source_info(ty: Ty<'tcx>, source_info: SourceInfo) -> Self {
+ LocalDecl {
+ mutability: Mutability::Mut,
+ local_info: None,
+ internal: false,
+ is_block_tail: None,
+ ty,
+ user_ty: None,
+ source_info,
+ }
+ }
+
+ /// Converts `self` into same `LocalDecl` except tagged as internal.
+ #[inline]
+ pub fn internal(mut self) -> Self {
+ self.internal = true;
+ self
+ }
+
+ /// Converts `self` into same `LocalDecl` except tagged as immutable.
+ #[inline]
+ pub fn immutable(mut self) -> Self {
+ self.mutability = Mutability::Not;
+ self
+ }
+
+ /// Converts `self` into same `LocalDecl` except tagged as internal temporary.
+ #[inline]
+ pub fn block_tail(mut self, info: BlockTailInfo) -> Self {
+ assert!(self.is_block_tail.is_none());
+ self.is_block_tail = Some(info);
+ self
+ }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub enum VarDebugInfoContents<'tcx> {
+ /// NOTE(eddyb) There's an unenforced invariant that this `Place` is
+ /// based on a `Local`, not a `Static`, and contains no indexing.
+ Place(Place<'tcx>),
+ Const(Constant<'tcx>),
+}
+
+impl<'tcx> Debug for VarDebugInfoContents<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ VarDebugInfoContents::Const(c) => write!(fmt, "{}", c),
+ VarDebugInfoContents::Place(p) => write!(fmt, "{:?}", p),
+ }
+ }
+}
+
+/// Debug information pertaining to a user variable.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct VarDebugInfo<'tcx> {
+ pub name: Symbol,
+
+ /// Source info of the user variable, including the scope
+ /// within which the variable is visible (to debuginfo)
+ /// (see `LocalDecl`'s `source_info` field for more details).
+ pub source_info: SourceInfo,
+
+ /// Where the data for this user variable is to be found.
+ pub value: VarDebugInfoContents<'tcx>,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// BasicBlock
+
+rustc_index::newtype_index! {
+ /// A node in the MIR [control-flow graph][CFG].
+ ///
+ /// There are no branches (e.g., `if`s, function calls, etc.) within a basic block, which makes
+ /// it easier to do [data-flow analyses] and optimizations. Instead, branches are represented
+ /// as an edge in a graph between basic blocks.
+ ///
+ /// Basic blocks consist of a series of [statements][Statement], ending with a
+ /// [terminator][Terminator]. Basic blocks can have multiple predecessors and successors,
+ /// however there is a MIR pass ([`CriticalCallEdges`]) that removes *critical edges*, which
+ /// are edges that go from a multi-successor node to a multi-predecessor node. This pass is
+ /// needed because some analyses require that there are no critical edges in the CFG.
+ ///
+ /// Note that this type is just an index into [`Body.basic_blocks`](Body::basic_blocks);
+ /// the actual data that a basic block holds is in [`BasicBlockData`].
+ ///
+ /// Read more about basic blocks in the [rustc-dev-guide][guide-mir].
+ ///
+ /// [CFG]: https://rustc-dev-guide.rust-lang.org/appendix/background.html#cfg
+ /// [data-flow analyses]:
+ /// https://rustc-dev-guide.rust-lang.org/appendix/background.html#what-is-a-dataflow-analysis
+ /// [`CriticalCallEdges`]: ../../rustc_const_eval/transform/add_call_guards/enum.AddCallGuards.html#variant.CriticalCallEdges
+ /// [guide-mir]: https://rustc-dev-guide.rust-lang.org/mir/
+ pub struct BasicBlock {
+ derive [HashStable]
+ DEBUG_FORMAT = "bb{}",
+ const START_BLOCK = 0,
+ }
+}
+
+impl BasicBlock {
+ pub fn start_location(self) -> Location {
+ Location { block: self, statement_index: 0 }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// BasicBlockData
+
+/// Data for a basic block, including a list of its statements.
+///
+/// See [`BasicBlock`] for documentation on what basic blocks are at a high level.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct BasicBlockData<'tcx> {
+ /// List of statements in this block.
+ pub statements: Vec<Statement<'tcx>>,
+
+ /// Terminator for this block.
+ ///
+ /// N.B., this should generally ONLY be `None` during construction.
+ /// Therefore, you should generally access it via the
+ /// `terminator()` or `terminator_mut()` methods. The only
+ /// exception is that certain passes, such as `simplify_cfg`, swap
+ /// out the terminator temporarily with `None` while they continue
+ /// to recurse over the set of basic blocks.
+ pub terminator: Option<Terminator<'tcx>>,
+
+ /// If true, this block lies on an unwind path. This is used
+ /// during codegen where distinct kinds of basic blocks may be
+ /// generated (particularly for MSVC cleanup). Unwind blocks must
+ /// only branch to other unwind blocks.
+ pub is_cleanup: bool,
+}
+
+impl<'tcx> BasicBlockData<'tcx> {
+ pub fn new(terminator: Option<Terminator<'tcx>>) -> BasicBlockData<'tcx> {
+ BasicBlockData { statements: vec![], terminator, is_cleanup: false }
+ }
+
+ /// Accessor for terminator.
+ ///
+ /// Terminator may not be None after construction of the basic block is complete. This accessor
+ /// provides a convenient way to reach the terminator.
+ #[inline]
+ pub fn terminator(&self) -> &Terminator<'tcx> {
+ self.terminator.as_ref().expect("invalid terminator state")
+ }
+
+ #[inline]
+ pub fn terminator_mut(&mut self) -> &mut Terminator<'tcx> {
+ self.terminator.as_mut().expect("invalid terminator state")
+ }
+
+ pub fn retain_statements<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut Statement<'_>) -> bool,
+ {
+ for s in &mut self.statements {
+ if !f(s) {
+ s.make_nop();
+ }
+ }
+ }
+
+ pub fn expand_statements<F, I>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut Statement<'tcx>) -> Option<I>,
+ I: iter::TrustedLen<Item = Statement<'tcx>>,
+ {
+ // Gather all the iterators we'll need to splice in, and their positions.
+ let mut splices: Vec<(usize, I)> = vec![];
+ let mut extra_stmts = 0;
+ for (i, s) in self.statements.iter_mut().enumerate() {
+ if let Some(mut new_stmts) = f(s) {
+ if let Some(first) = new_stmts.next() {
+ // We can already store the first new statement.
+ *s = first;
+
+ // Save the other statements for optimized splicing.
+ let remaining = new_stmts.size_hint().0;
+ if remaining > 0 {
+ splices.push((i + 1 + extra_stmts, new_stmts));
+ extra_stmts += remaining;
+ }
+ } else {
+ s.make_nop();
+ }
+ }
+ }
+
+ // Splice in the new statements, from the end of the block.
+ // FIXME(eddyb) This could be more efficient with a "gap buffer"
+ // where a range of elements ("gap") is left uninitialized, with
+ // splicing adding new elements to the end of that gap and moving
+ // existing elements from before the gap to the end of the gap.
+ // For now, this is safe code, emulating a gap but initializing it.
+ let mut gap = self.statements.len()..self.statements.len() + extra_stmts;
+ self.statements.resize(
+ gap.end,
+ Statement { source_info: SourceInfo::outermost(DUMMY_SP), kind: StatementKind::Nop },
+ );
+ for (splice_start, new_stmts) in splices.into_iter().rev() {
+ let splice_end = splice_start + new_stmts.size_hint().0;
+ while gap.end > splice_end {
+ gap.start -= 1;
+ gap.end -= 1;
+ self.statements.swap(gap.start, gap.end);
+ }
+ self.statements.splice(splice_start..splice_end, new_stmts);
+ gap.end = splice_start;
+ }
+ }
+
+ pub fn visitable(&self, index: usize) -> &dyn MirVisitable<'tcx> {
+ if index < self.statements.len() { &self.statements[index] } else { &self.terminator }
+ }
+}
+
+impl<O> AssertKind<O> {
+ /// Getting a description does not require `O` to be printable, and does not
+ /// require allocation.
+ /// The caller is expected to handle `BoundsCheck` separately.
+ pub fn description(&self) -> &'static str {
+ use AssertKind::*;
+ match self {
+ Overflow(BinOp::Add, _, _) => "attempt to add with overflow",
+ Overflow(BinOp::Sub, _, _) => "attempt to subtract with overflow",
+ Overflow(BinOp::Mul, _, _) => "attempt to multiply with overflow",
+ Overflow(BinOp::Div, _, _) => "attempt to divide with overflow",
+ Overflow(BinOp::Rem, _, _) => "attempt to calculate the remainder with overflow",
+ OverflowNeg(_) => "attempt to negate with overflow",
+ Overflow(BinOp::Shr, _, _) => "attempt to shift right with overflow",
+ Overflow(BinOp::Shl, _, _) => "attempt to shift left with overflow",
+ Overflow(op, _, _) => bug!("{:?} cannot overflow", op),
+ DivisionByZero(_) => "attempt to divide by zero",
+ RemainderByZero(_) => "attempt to calculate the remainder with a divisor of zero",
+ ResumedAfterReturn(GeneratorKind::Gen) => "generator resumed after completion",
+ ResumedAfterReturn(GeneratorKind::Async(_)) => "`async fn` resumed after completion",
+ ResumedAfterPanic(GeneratorKind::Gen) => "generator resumed after panicking",
+ ResumedAfterPanic(GeneratorKind::Async(_)) => "`async fn` resumed after panicking",
+ BoundsCheck { .. } => bug!("Unexpected AssertKind"),
+ }
+ }
+
+ /// Format the message arguments for the `assert(cond, msg..)` terminator in MIR printing.
+ pub fn fmt_assert_args<W: Write>(&self, f: &mut W) -> fmt::Result
+ where
+ O: Debug,
+ {
+ use AssertKind::*;
+ match self {
+ BoundsCheck { ref len, ref index } => write!(
+ f,
+ "\"index out of bounds: the length is {{}} but the index is {{}}\", {:?}, {:?}",
+ len, index
+ ),
+
+ OverflowNeg(op) => {
+ write!(f, "\"attempt to negate `{{}}`, which would overflow\", {:?}", op)
+ }
+ DivisionByZero(op) => write!(f, "\"attempt to divide `{{}}` by zero\", {:?}", op),
+ RemainderByZero(op) => write!(
+ f,
+ "\"attempt to calculate the remainder of `{{}}` with a divisor of zero\", {:?}",
+ op
+ ),
+ Overflow(BinOp::Add, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} + {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Sub, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} - {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Mul, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} * {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Div, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} / {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Rem, l, r) => write!(
+ f,
+ "\"attempt to compute the remainder of `{{}} % {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Shr, _, r) => {
+ write!(f, "\"attempt to shift right by `{{}}`, which would overflow\", {:?}", r)
+ }
+ Overflow(BinOp::Shl, _, r) => {
+ write!(f, "\"attempt to shift left by `{{}}`, which would overflow\", {:?}", r)
+ }
+ _ => write!(f, "\"{}\"", self.description()),
+ }
+ }
+}
+
+impl<O: fmt::Debug> fmt::Debug for AssertKind<O> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use AssertKind::*;
+ match self {
+ BoundsCheck { ref len, ref index } => write!(
+ f,
+ "index out of bounds: the length is {:?} but the index is {:?}",
+ len, index
+ ),
+ OverflowNeg(op) => write!(f, "attempt to negate `{:#?}`, which would overflow", op),
+ DivisionByZero(op) => write!(f, "attempt to divide `{:#?}` by zero", op),
+ RemainderByZero(op) => write!(
+ f,
+ "attempt to calculate the remainder of `{:#?}` with a divisor of zero",
+ op
+ ),
+ Overflow(BinOp::Add, l, r) => {
+ write!(f, "attempt to compute `{:#?} + {:#?}`, which would overflow", l, r)
+ }
+ Overflow(BinOp::Sub, l, r) => {
+ write!(f, "attempt to compute `{:#?} - {:#?}`, which would overflow", l, r)
+ }
+ Overflow(BinOp::Mul, l, r) => {
+ write!(f, "attempt to compute `{:#?} * {:#?}`, which would overflow", l, r)
+ }
+ Overflow(BinOp::Div, l, r) => {
+ write!(f, "attempt to compute `{:#?} / {:#?}`, which would overflow", l, r)
+ }
+ Overflow(BinOp::Rem, l, r) => write!(
+ f,
+ "attempt to compute the remainder of `{:#?} % {:#?}`, which would overflow",
+ l, r
+ ),
+ Overflow(BinOp::Shr, _, r) => {
+ write!(f, "attempt to shift right by `{:#?}`, which would overflow", r)
+ }
+ Overflow(BinOp::Shl, _, r) => {
+ write!(f, "attempt to shift left by `{:#?}`, which would overflow", r)
+ }
+ _ => write!(f, "{}", self.description()),
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Statements
+
+/// A statement in a basic block, including information about its source code.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct Statement<'tcx> {
+ pub source_info: SourceInfo,
+ pub kind: StatementKind<'tcx>,
+}
+
+// `Statement` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(Statement<'_>, 32);
+
+impl Statement<'_> {
+ /// Changes a statement to a nop. This is both faster than deleting instructions and avoids
+ /// invalidating statement indices in `Location`s.
+ pub fn make_nop(&mut self) {
+ self.kind = StatementKind::Nop
+ }
+
+ /// Changes a statement to a nop and returns the original statement.
+ #[must_use = "If you don't need the statement, use `make_nop` instead"]
+ pub fn replace_nop(&mut self) -> Self {
+ Statement {
+ source_info: self.source_info,
+ kind: mem::replace(&mut self.kind, StatementKind::Nop),
+ }
+ }
+}
+
+impl Debug for Statement<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::StatementKind::*;
+ match self.kind {
+ Assign(box (ref place, ref rv)) => write!(fmt, "{:?} = {:?}", place, rv),
+ FakeRead(box (ref cause, ref place)) => {
+ write!(fmt, "FakeRead({:?}, {:?})", cause, place)
+ }
+ Retag(ref kind, ref place) => write!(
+ fmt,
+ "Retag({}{:?})",
+ match kind {
+ RetagKind::FnEntry => "[fn entry] ",
+ RetagKind::TwoPhase => "[2phase] ",
+ RetagKind::Raw => "[raw] ",
+ RetagKind::Default => "",
+ },
+ place,
+ ),
+ StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place),
+ StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place),
+ SetDiscriminant { ref place, variant_index } => {
+ write!(fmt, "discriminant({:?}) = {:?}", place, variant_index)
+ }
+ Deinit(ref place) => write!(fmt, "Deinit({:?})", place),
+ AscribeUserType(box (ref place, ref c_ty), ref variance) => {
+ write!(fmt, "AscribeUserType({:?}, {:?}, {:?})", place, variance, c_ty)
+ }
+ Coverage(box self::Coverage { ref kind, code_region: Some(ref rgn) }) => {
+ write!(fmt, "Coverage::{:?} for {:?}", kind, rgn)
+ }
+ Coverage(box ref coverage) => write!(fmt, "Coverage::{:?}", coverage.kind),
+ CopyNonOverlapping(box crate::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ write!(fmt, "copy_nonoverlapping(src={:?}, dst={:?}, count={:?})", src, dst, count)
+ }
+ Nop => write!(fmt, "nop"),
+ }
+ }
+}
+
+impl<'tcx> StatementKind<'tcx> {
+ pub fn as_assign_mut(&mut self) -> Option<&mut (Place<'tcx>, Rvalue<'tcx>)> {
+ match self {
+ StatementKind::Assign(x) => Some(x),
+ _ => None,
+ }
+ }
+
+ pub fn as_assign(&self) -> Option<&(Place<'tcx>, Rvalue<'tcx>)> {
+ match self {
+ StatementKind::Assign(x) => Some(x),
+ _ => None,
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Places
+
+impl<V, T> ProjectionElem<V, T> {
+ /// Returns `true` if the target of this projection may refer to a different region of memory
+ /// than the base.
+ fn is_indirect(&self) -> bool {
+ match self {
+ Self::Deref => true,
+
+ Self::Field(_, _)
+ | Self::Index(_)
+ | Self::ConstantIndex { .. }
+ | Self::Subslice { .. }
+ | Self::Downcast(_, _) => false,
+ }
+ }
+
+ /// Returns `true` if this is a `Downcast` projection with the given `VariantIdx`.
+ pub fn is_downcast_to(&self, v: VariantIdx) -> bool {
+ matches!(*self, Self::Downcast(_, x) if x == v)
+ }
+
+ /// Returns `true` if this is a `Field` projection with the given index.
+ pub fn is_field_to(&self, f: Field) -> bool {
+ matches!(*self, Self::Field(x, _) if x == f)
+ }
+}
+
+/// Alias for projections as they appear in `UserTypeProjection`, where we
+/// need neither the `V` parameter for `Index` nor the `T` for `Field`.
+pub type ProjectionKind = ProjectionElem<(), ()>;
+
+rustc_index::newtype_index! {
+ /// A [newtype'd][wrapper] index type in the MIR [control-flow graph][CFG]
+ ///
+ /// A field (e.g., `f` in `_1.f`) is one variant of [`ProjectionElem`]. Conceptually,
+ /// rustc can identify that a field projection refers to either two different regions of memory
+ /// or the same one between the base and the 'projection element'.
+ /// Read more about projections in the [rustc-dev-guide][mir-datatypes]
+ ///
+ /// [wrapper]: https://rustc-dev-guide.rust-lang.org/appendix/glossary.html#newtype
+ /// [CFG]: https://rustc-dev-guide.rust-lang.org/appendix/background.html#cfg
+ /// [mir-datatypes]: https://rustc-dev-guide.rust-lang.org/mir/index.html#mir-data-types
+ pub struct Field {
+ derive [HashStable]
+ DEBUG_FORMAT = "field[{}]"
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct PlaceRef<'tcx> {
+ pub local: Local,
+ pub projection: &'tcx [PlaceElem<'tcx>],
+}
+
+// Once we stop implementing `Ord` for `DefId`,
+// this impl will be unnecessary. Until then, we'll
+// leave this impl in place to prevent re-adding a
+// dependnecy on the `Ord` impl for `DefId`
+impl<'tcx> !PartialOrd for PlaceRef<'tcx> {}
+
+impl<'tcx> Place<'tcx> {
+ // FIXME change this to a const fn by also making List::empty a const fn.
+ pub fn return_place() -> Place<'tcx> {
+ Place { local: RETURN_PLACE, projection: List::empty() }
+ }
+
+ /// Returns `true` if this `Place` contains a `Deref` projection.
+ ///
+ /// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
+ /// same region of memory as its base.
+ pub fn is_indirect(&self) -> bool {
+ self.projection.iter().any(|elem| elem.is_indirect())
+ }
+
+ /// If MirPhase >= Derefered and if projection contains Deref,
+ /// It's guaranteed to be in the first place
+ pub fn has_deref(&self) -> bool {
+ // To make sure this is not accidently used in wrong mir phase
+ debug_assert!(!self.projection[1..].contains(&PlaceElem::Deref));
+ self.projection.first() == Some(&PlaceElem::Deref)
+ }
+
+ /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
+ /// a single deref of a local.
+ #[inline(always)]
+ pub fn local_or_deref_local(&self) -> Option<Local> {
+ self.as_ref().local_or_deref_local()
+ }
+
+ /// If this place represents a local variable like `_X` with no
+ /// projections, return `Some(_X)`.
+ #[inline(always)]
+ pub fn as_local(&self) -> Option<Local> {
+ self.as_ref().as_local()
+ }
+
+ #[inline]
+ pub fn as_ref(&self) -> PlaceRef<'tcx> {
+ PlaceRef { local: self.local, projection: &self.projection }
+ }
+
+ /// Iterate over the projections in evaluation order, i.e., the first element is the base with
+ /// its projection and then subsequently more projections are added.
+ /// As a concrete example, given the place a.b.c, this would yield:
+ /// - (a, .b)
+ /// - (a.b, .c)
+ ///
+ /// Given a place without projections, the iterator is empty.
+ #[inline]
+ pub fn iter_projections(
+ self,
+ ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
+ self.as_ref().iter_projections()
+ }
+
+ /// Generates a new place by appending `more_projections` to the existing ones
+ /// and interning the result.
+ pub fn project_deeper(self, more_projections: &[PlaceElem<'tcx>], tcx: TyCtxt<'tcx>) -> Self {
+ if more_projections.is_empty() {
+ return self;
+ }
+
+ let mut v: Vec<PlaceElem<'tcx>>;
+
+ let new_projections = if self.projection.is_empty() {
+ more_projections
+ } else {
+ v = Vec::with_capacity(self.projection.len() + more_projections.len());
+ v.extend(self.projection);
+ v.extend(more_projections);
+ &v
+ };
+
+ Place { local: self.local, projection: tcx.intern_place_elems(new_projections) }
+ }
+}
+
+impl From<Local> for Place<'_> {
+ fn from(local: Local) -> Self {
+ Place { local, projection: List::empty() }
+ }
+}
+
+impl<'tcx> PlaceRef<'tcx> {
+ /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
+ /// a single deref of a local.
+ pub fn local_or_deref_local(&self) -> Option<Local> {
+ match *self {
+ PlaceRef { local, projection: [] }
+ | PlaceRef { local, projection: [ProjectionElem::Deref] } => Some(local),
+ _ => None,
+ }
+ }
+
+ /// If MirPhase >= Derefered and if projection contains Deref,
+ /// It's guaranteed to be in the first place
+ pub fn has_deref(&self) -> bool {
+ self.projection.first() == Some(&PlaceElem::Deref)
+ }
+
+ /// If this place represents a local variable like `_X` with no
+ /// projections, return `Some(_X)`.
+ #[inline]
+ pub fn as_local(&self) -> Option<Local> {
+ match *self {
+ PlaceRef { local, projection: [] } => Some(local),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ pub fn last_projection(&self) -> Option<(PlaceRef<'tcx>, PlaceElem<'tcx>)> {
+ if let &[ref proj_base @ .., elem] = self.projection {
+ Some((PlaceRef { local: self.local, projection: proj_base }, elem))
+ } else {
+ None
+ }
+ }
+
+ /// Iterate over the projections in evaluation order, i.e., the first element is the base with
+ /// its projection and then subsequently more projections are added.
+ /// As a concrete example, given the place a.b.c, this would yield:
+ /// - (a, .b)
+ /// - (a.b, .c)
+ ///
+ /// Given a place without projections, the iterator is empty.
+ #[inline]
+ pub fn iter_projections(
+ self,
+ ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
+ self.projection.iter().enumerate().map(move |(i, proj)| {
+ let base = PlaceRef { local: self.local, projection: &self.projection[..i] };
+ (base, *proj)
+ })
+ }
+}
+
+impl Debug for Place<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ for elem in self.projection.iter().rev() {
+ match elem {
+ ProjectionElem::Downcast(_, _) | ProjectionElem::Field(_, _) => {
+ write!(fmt, "(").unwrap();
+ }
+ ProjectionElem::Deref => {
+ write!(fmt, "(*").unwrap();
+ }
+ ProjectionElem::Index(_)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => {}
+ }
+ }
+
+ write!(fmt, "{:?}", self.local)?;
+
+ for elem in self.projection.iter() {
+ match elem {
+ ProjectionElem::Downcast(Some(name), _index) => {
+ write!(fmt, " as {})", name)?;
+ }
+ ProjectionElem::Downcast(None, index) => {
+ write!(fmt, " as variant#{:?})", index)?;
+ }
+ ProjectionElem::Deref => {
+ write!(fmt, ")")?;
+ }
+ ProjectionElem::Field(field, ty) => {
+ write!(fmt, ".{:?}: {:?})", field.index(), ty)?;
+ }
+ ProjectionElem::Index(ref index) => {
+ write!(fmt, "[{:?}]", index)?;
+ }
+ ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => {
+ write!(fmt, "[{:?} of {:?}]", offset, min_length)?;
+ }
+ ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => {
+ write!(fmt, "[-{:?} of {:?}]", offset, min_length)?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: true } if to == 0 => {
+ write!(fmt, "[{:?}:]", from)?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: true } if from == 0 => {
+ write!(fmt, "[:-{:?}]", to)?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: true } => {
+ write!(fmt, "[{:?}:-{:?}]", from, to)?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: false } => {
+ write!(fmt, "[{:?}..{:?}]", from, to)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Scopes
+
+rustc_index::newtype_index! {
+ pub struct SourceScope {
+ derive [HashStable]
+ DEBUG_FORMAT = "scope[{}]",
+ const OUTERMOST_SOURCE_SCOPE = 0,
+ }
+}
+
+impl SourceScope {
+ /// Finds the original HirId this MIR item came from.
+ /// This is necessary after MIR optimizations, as otherwise we get a HirId
+ /// from the function that was inlined instead of the function call site.
+ pub fn lint_root<'tcx>(
+ self,
+ source_scopes: &IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ ) -> Option<HirId> {
+ let mut data = &source_scopes[self];
+ // FIXME(oli-obk): we should be able to just walk the `inlined_parent_scope`, but it
+ // does not work as I thought it would. Needs more investigation and documentation.
+ while data.inlined.is_some() {
+ trace!(?data);
+ data = &source_scopes[data.parent_scope.unwrap()];
+ }
+ trace!(?data);
+ match &data.local_data {
+ ClearCrossCrate::Set(data) => Some(data.lint_root),
+ ClearCrossCrate::Clear => None,
+ }
+ }
+
+ /// The instance this source scope was inlined from, if any.
+ #[inline]
+ pub fn inlined_instance<'tcx>(
+ self,
+ source_scopes: &IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ ) -> Option<ty::Instance<'tcx>> {
+ let scope_data = &source_scopes[self];
+ if let Some((inlined_instance, _)) = scope_data.inlined {
+ Some(inlined_instance)
+ } else if let Some(inlined_scope) = scope_data.inlined_parent_scope {
+ Some(source_scopes[inlined_scope].inlined.unwrap().0)
+ } else {
+ None
+ }
+ }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct SourceScopeData<'tcx> {
+ pub span: Span,
+ pub parent_scope: Option<SourceScope>,
+
+ /// Whether this scope is the root of a scope tree of another body,
+ /// inlined into this body by the MIR inliner.
+ /// `ty::Instance` is the callee, and the `Span` is the call site.
+ pub inlined: Option<(ty::Instance<'tcx>, Span)>,
+
+ /// Nearest (transitive) parent scope (if any) which is inlined.
+ /// This is an optimization over walking up `parent_scope`
+ /// until a scope with `inlined: Some(...)` is found.
+ pub inlined_parent_scope: Option<SourceScope>,
+
+ /// Crate-local information for this source scope, that can't (and
+ /// needn't) be tracked across crates.
+ pub local_data: ClearCrossCrate<SourceScopeLocalData>,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct SourceScopeLocalData {
+ /// An `HirId` with lint levels equivalent to this scope's lint levels.
+ pub lint_root: hir::HirId,
+ /// The unsafe block that contains this node.
+ pub safety: Safety,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Operands
+
+impl<'tcx> Debug for Operand<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::Operand::*;
+ match *self {
+ Constant(ref a) => write!(fmt, "{:?}", a),
+ Copy(ref place) => write!(fmt, "{:?}", place),
+ Move(ref place) => write!(fmt, "move {:?}", place),
+ }
+ }
+}
+
+impl<'tcx> Operand<'tcx> {
+ /// Convenience helper to make a constant that refers to the fn
+ /// with given `DefId` and substs. Since this is used to synthesize
+ /// MIR, assumes `user_ty` is None.
+ pub fn function_handle(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ span: Span,
+ ) -> Self {
+ let ty = tcx.bound_type_of(def_id).subst(tcx, substs);
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::Val(ConstValue::ZeroSized, ty),
+ }))
+ }
+
+ pub fn is_move(&self) -> bool {
+ matches!(self, Operand::Move(..))
+ }
+
+ /// Convenience helper to make a literal-like constant from a given scalar value.
+ /// Since this is used to synthesize MIR, assumes `user_ty` is None.
+ pub fn const_from_scalar(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ val: Scalar,
+ span: Span,
+ ) -> Operand<'tcx> {
+ debug_assert!({
+ let param_env_and_ty = ty::ParamEnv::empty().and(ty);
+ let type_size = tcx
+ .layout_of(param_env_and_ty)
+ .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
+ .size;
+ let scalar_size = match val {
+ Scalar::Int(int) => int.size(),
+ _ => panic!("Invalid scalar type {:?}", val),
+ };
+ scalar_size == type_size
+ });
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::Val(ConstValue::Scalar(val), ty),
+ }))
+ }
+
+ pub fn to_copy(&self) -> Self {
+ match *self {
+ Operand::Copy(_) | Operand::Constant(_) => self.clone(),
+ Operand::Move(place) => Operand::Copy(place),
+ }
+ }
+
+ /// Returns the `Place` that is the target of this `Operand`, or `None` if this `Operand` is a
+ /// constant.
+ pub fn place(&self) -> Option<Place<'tcx>> {
+ match self {
+ Operand::Copy(place) | Operand::Move(place) => Some(*place),
+ Operand::Constant(_) => None,
+ }
+ }
+
+ /// Returns the `Constant` that is the target of this `Operand`, or `None` if this `Operand` is a
+ /// place.
+ pub fn constant(&self) -> Option<&Constant<'tcx>> {
+ match self {
+ Operand::Constant(x) => Some(&**x),
+ Operand::Copy(_) | Operand::Move(_) => None,
+ }
+ }
+
+ /// Gets the `ty::FnDef` from an operand if it's a constant function item.
+ ///
+ /// While this is unlikely in general, it's the normal case of what you'll
+ /// find as the `func` in a [`TerminatorKind::Call`].
+ pub fn const_fn_def(&self) -> Option<(DefId, SubstsRef<'tcx>)> {
+ let const_ty = self.constant()?.literal.ty();
+ if let ty::FnDef(def_id, substs) = *const_ty.kind() { Some((def_id, substs)) } else { None }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// Rvalues
+
+impl<'tcx> Rvalue<'tcx> {
+ /// Returns true if rvalue can be safely removed when the result is unused.
+ #[inline]
+ pub fn is_safe_to_remove(&self) -> bool {
+ match self {
+ // Pointer to int casts may be side-effects due to exposing the provenance.
+ // While the model is undecided, we should be conservative. See
+ // <https://www.ralfj.de/blog/2022/04/11/provenance-exposed.html>
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => false,
+
+ Rvalue::Use(_)
+ | Rvalue::CopyForDeref(_)
+ | Rvalue::Repeat(_, _)
+ | Rvalue::Ref(_, _, _)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::AddressOf(_, _)
+ | Rvalue::Len(_)
+ | Rvalue::Cast(
+ CastKind::Misc | CastKind::Pointer(_) | CastKind::PointerFromExposedAddress,
+ _,
+ _,
+ )
+ | Rvalue::BinaryOp(_, _)
+ | Rvalue::CheckedBinaryOp(_, _)
+ | Rvalue::NullaryOp(_, _)
+ | Rvalue::UnaryOp(_, _)
+ | Rvalue::Discriminant(_)
+ | Rvalue::Aggregate(_, _)
+ | Rvalue::ShallowInitBox(_, _) => true,
+ }
+ }
+}
+
+impl BorrowKind {
+ pub fn allows_two_phase_borrow(&self) -> bool {
+ match *self {
+ BorrowKind::Shared | BorrowKind::Shallow | BorrowKind::Unique => false,
+ BorrowKind::Mut { allow_two_phase_borrow } => allow_two_phase_borrow,
+ }
+ }
+
+ pub fn describe_mutability(&self) -> &str {
+ match *self {
+ BorrowKind::Shared | BorrowKind::Shallow | BorrowKind::Unique => "immutable",
+ BorrowKind::Mut { .. } => "mutable",
+ }
+ }
+}
+
+impl BinOp {
+ pub fn is_checkable(self) -> bool {
+ use self::BinOp::*;
+ matches!(self, Add | Sub | Mul | Shl | Shr)
+ }
+}
+
+impl<'tcx> Debug for Rvalue<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::Rvalue::*;
+
+ match *self {
+ Use(ref place) => write!(fmt, "{:?}", place),
+ Repeat(ref a, b) => {
+ write!(fmt, "[{:?}; ", a)?;
+ pretty_print_const(b, fmt, false)?;
+ write!(fmt, "]")
+ }
+ Len(ref a) => write!(fmt, "Len({:?})", a),
+ Cast(ref kind, ref place, ref ty) => {
+ write!(fmt, "{:?} as {:?} ({:?})", place, ty, kind)
+ }
+ BinaryOp(ref op, box (ref a, ref b)) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
+ CheckedBinaryOp(ref op, box (ref a, ref b)) => {
+ write!(fmt, "Checked{:?}({:?}, {:?})", op, a, b)
+ }
+ UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
+ Discriminant(ref place) => write!(fmt, "discriminant({:?})", place),
+ NullaryOp(ref op, ref t) => write!(fmt, "{:?}({:?})", op, t),
+ ThreadLocalRef(did) => ty::tls::with(|tcx| {
+ let muta = tcx.static_mutability(did).unwrap().prefix_str();
+ write!(fmt, "&/*tls*/ {}{}", muta, tcx.def_path_str(did))
+ }),
+ Ref(region, borrow_kind, ref place) => {
+ let kind_str = match borrow_kind {
+ BorrowKind::Shared => "",
+ BorrowKind::Shallow => "shallow ",
+ BorrowKind::Mut { .. } | BorrowKind::Unique => "mut ",
+ };
+
+ // When printing regions, add trailing space if necessary.
+ let print_region = ty::tls::with(|tcx| {
+ tcx.sess.verbose() || tcx.sess.opts.unstable_opts.identify_regions
+ });
+ let region = if print_region {
+ let mut region = region.to_string();
+ if !region.is_empty() {
+ region.push(' ');
+ }
+ region
+ } else {
+ // Do not even print 'static
+ String::new()
+ };
+ write!(fmt, "&{}{}{:?}", region, kind_str, place)
+ }
+
+ CopyForDeref(ref place) => write!(fmt, "deref_copy {:#?}", place),
+
+ AddressOf(mutability, ref place) => {
+ let kind_str = match mutability {
+ Mutability::Mut => "mut",
+ Mutability::Not => "const",
+ };
+
+ write!(fmt, "&raw {} {:?}", kind_str, place)
+ }
+
+ Aggregate(ref kind, ref places) => {
+ let fmt_tuple = |fmt: &mut Formatter<'_>, name: &str| {
+ let mut tuple_fmt = fmt.debug_tuple(name);
+ for place in places {
+ tuple_fmt.field(place);
+ }
+ tuple_fmt.finish()
+ };
+
+ match **kind {
+ AggregateKind::Array(_) => write!(fmt, "{:?}", places),
+
+ AggregateKind::Tuple => {
+ if places.is_empty() {
+ write!(fmt, "()")
+ } else {
+ fmt_tuple(fmt, "")
+ }
+ }
+
+ AggregateKind::Adt(adt_did, variant, substs, _user_ty, _) => {
+ ty::tls::with(|tcx| {
+ let variant_def = &tcx.adt_def(adt_did).variant(variant);
+ let substs = tcx.lift(substs).expect("could not lift for printing");
+ let name = FmtPrinter::new(tcx, Namespace::ValueNS)
+ .print_def_path(variant_def.def_id, substs)?
+ .into_buffer();
+
+ match variant_def.ctor_kind {
+ CtorKind::Const => fmt.write_str(&name),
+ CtorKind::Fn => fmt_tuple(fmt, &name),
+ CtorKind::Fictive => {
+ let mut struct_fmt = fmt.debug_struct(&name);
+ for (field, place) in iter::zip(&variant_def.fields, places) {
+ struct_fmt.field(field.name.as_str(), place);
+ }
+ struct_fmt.finish()
+ }
+ }
+ })
+ }
+
+ AggregateKind::Closure(def_id, substs) => ty::tls::with(|tcx| {
+ let name = if tcx.sess.opts.unstable_opts.span_free_formats {
+ let substs = tcx.lift(substs).unwrap();
+ format!(
+ "[closure@{}]",
+ tcx.def_path_str_with_substs(def_id.to_def_id(), substs),
+ )
+ } else {
+ let span = tcx.def_span(def_id);
+ format!(
+ "[closure@{}]",
+ tcx.sess.source_map().span_to_diagnostic_string(span)
+ )
+ };
+ let mut struct_fmt = fmt.debug_struct(&name);
+
+ // FIXME(project-rfc-2229#48): This should be a list of capture names/places
+ if let Some(upvars) = tcx.upvars_mentioned(def_id) {
+ for (&var_id, place) in iter::zip(upvars.keys(), places) {
+ let var_name = tcx.hir().name(var_id);
+ struct_fmt.field(var_name.as_str(), place);
+ }
+ }
+
+ struct_fmt.finish()
+ }),
+
+ AggregateKind::Generator(def_id, _, _) => ty::tls::with(|tcx| {
+ let name = format!("[generator@{:?}]", tcx.def_span(def_id));
+ let mut struct_fmt = fmt.debug_struct(&name);
+
+ // FIXME(project-rfc-2229#48): This should be a list of capture names/places
+ if let Some(upvars) = tcx.upvars_mentioned(def_id) {
+ for (&var_id, place) in iter::zip(upvars.keys(), places) {
+ let var_name = tcx.hir().name(var_id);
+ struct_fmt.field(var_name.as_str(), place);
+ }
+ }
+
+ struct_fmt.finish()
+ }),
+ }
+ }
+
+ ShallowInitBox(ref place, ref ty) => {
+ write!(fmt, "ShallowInitBox({:?}, {:?})", place, ty)
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// Constants
+///
+/// Two constants are equal if they are the same constant. Note that
+/// this does not necessarily mean that they are `==` in Rust. In
+/// particular, one must be wary of `NaN`!
+
+#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub struct Constant<'tcx> {
+ pub span: Span,
+
+ /// Optional user-given type: for something like
+ /// `collect::<Vec<_>>`, this would be present and would
+ /// indicate that `Vec<_>` was explicitly specified.
+ ///
+ /// Needed for NLL to impose user-given type constraints.
+ pub user_ty: Option<UserTypeAnnotationIndex>,
+
+ pub literal: ConstantKind<'tcx>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable, Debug)]
+#[derive(Lift)]
+pub enum ConstantKind<'tcx> {
+ /// This constant came from the type system
+ Ty(ty::Const<'tcx>),
+ /// This constant cannot go back into the type system, as it represents
+ /// something the type system cannot handle (e.g. pointers).
+ Val(interpret::ConstValue<'tcx>, Ty<'tcx>),
+}
+
+impl<'tcx> Constant<'tcx> {
+ pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+ match self.literal.try_to_scalar() {
+ Some(Scalar::Ptr(ptr, _size)) => match tcx.global_alloc(ptr.provenance) {
+ GlobalAlloc::Static(def_id) => {
+ assert!(!tcx.is_thread_local_static(def_id));
+ Some(def_id)
+ }
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+ #[inline]
+ pub fn ty(&self) -> Ty<'tcx> {
+ self.literal.ty()
+ }
+}
+
+impl<'tcx> ConstantKind<'tcx> {
+ /// Returns `None` if the constant is not trivially safe for use in the type system.
+ #[inline]
+ pub fn const_for_ty(&self) -> Option<ty::Const<'tcx>> {
+ match self {
+ ConstantKind::Ty(c) => Some(*c),
+ ConstantKind::Val(..) => None,
+ }
+ }
+
+ #[inline(always)]
+ pub fn ty(&self) -> Ty<'tcx> {
+ match self {
+ ConstantKind::Ty(c) => c.ty(),
+ ConstantKind::Val(_, ty) => *ty,
+ }
+ }
+
+ #[inline]
+ pub fn try_to_value(self, tcx: TyCtxt<'tcx>) -> Option<interpret::ConstValue<'tcx>> {
+ match self {
+ ConstantKind::Ty(c) => match c.kind() {
+ ty::ConstKind::Value(valtree) => Some(tcx.valtree_to_const_val((c.ty(), valtree))),
+ _ => None,
+ },
+ ConstantKind::Val(val, _) => Some(val),
+ }
+ }
+
+ #[inline]
+ pub fn try_to_scalar(self) -> Option<Scalar> {
+ match self {
+ ConstantKind::Ty(c) => match c.kind() {
+ ty::ConstKind::Value(valtree) => match valtree {
+ ty::ValTree::Leaf(scalar_int) => Some(Scalar::Int(scalar_int)),
+ ty::ValTree::Branch(_) => None,
+ },
+ _ => None,
+ },
+ ConstantKind::Val(val, _) => val.try_to_scalar(),
+ }
+ }
+
+ #[inline]
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ Some(self.try_to_scalar()?.assert_int())
+ }
+
+ #[inline]
+ pub fn try_to_bits(self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ #[inline]
+ pub fn try_to_bool(self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ #[inline]
+ pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+ match self {
+ Self::Ty(c) => {
+ if let Some(val) = c.kind().try_eval_for_mir(tcx, param_env) {
+ match val {
+ Ok(val) => Self::Val(val, c.ty()),
+ Err(_) => Self::Ty(tcx.const_error(self.ty())),
+ }
+ } else {
+ self
+ }
+ }
+ Self::Val(_, _) => self,
+ }
+ }
+
+ /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
+ #[inline]
+ pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
+ self.try_eval_bits(tcx, param_env, ty)
+ .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
+ }
+
+ #[inline]
+ pub fn try_eval_bits(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<u128> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_bits(tcx, param_env, ty),
+ Self::Val(val, t) => {
+ assert_eq!(*t, ty);
+ let size =
+ tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ val.try_to_bits(size)
+ }
+ }
+ }
+
+ #[inline]
+ pub fn try_eval_bool(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<bool> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_bool(tcx, param_env),
+ Self::Val(val, _) => val.try_to_bool(),
+ }
+ }
+
+ #[inline]
+ pub fn try_eval_usize(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<u64> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_usize(tcx, param_env),
+ Self::Val(val, _) => val.try_to_machine_usize(tcx),
+ }
+ }
+
+ #[inline]
+ pub fn from_value(val: ConstValue<'tcx>, ty: Ty<'tcx>) -> Self {
+ Self::Val(val, ty)
+ }
+
+ pub fn from_bits(
+ tcx: TyCtxt<'tcx>,
+ bits: u128,
+ param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+ ) -> Self {
+ let size = tcx
+ .layout_of(param_env_ty)
+ .unwrap_or_else(|e| {
+ bug!("could not compute layout for {:?}: {:?}", param_env_ty.value, e)
+ })
+ .size;
+ let cv = ConstValue::Scalar(Scalar::from_uint(bits, size));
+
+ Self::Val(cv, param_env_ty.value)
+ }
+
+ #[inline]
+ pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> Self {
+ let cv = ConstValue::from_bool(v);
+ Self::Val(cv, tcx.types.bool)
+ }
+
+ #[inline]
+ pub fn zero_sized(ty: Ty<'tcx>) -> Self {
+ let cv = ConstValue::ZeroSized;
+ Self::Val(cv, ty)
+ }
+
+ pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> Self {
+ let ty = tcx.types.usize;
+ Self::from_bits(tcx, n as u128, ty::ParamEnv::empty().and(ty))
+ }
+
+ #[inline]
+ pub fn from_scalar(_tcx: TyCtxt<'tcx>, s: Scalar, ty: Ty<'tcx>) -> Self {
+ let val = ConstValue::Scalar(s);
+ Self::Val(val, ty)
+ }
+
+ /// Literals are converted to `ConstantKindVal`, const generic parameters are eagerly
+ /// converted to a constant, everything else becomes `Unevaluated`.
+ pub fn from_anon_const(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id), param_env)
+ }
+
+ #[instrument(skip(tcx), level = "debug")]
+ pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let body_id = match tcx.hir().get(hir_id) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => span_bug!(
+ tcx.def_span(def_id.to_def_id()),
+ "from_inline_const can only process anonymous constants"
+ ),
+ };
+ let expr = &tcx.hir().body(body_id).value;
+ let ty = tcx.typeck(def_id).node_type(hir_id);
+
+ let lit_input = match expr.kind {
+ hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind {
+ hir::ExprKind::Lit(ref lit) => {
+ Some(LitToConstInput { lit: &lit.node, ty, neg: true })
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+ if let Some(lit_input) = lit_input {
+ // If an error occurred, ignore that it's a literal and leave reporting the error up to
+ // mir.
+ match tcx.at(expr.span).lit_to_mir_constant(lit_input) {
+ Ok(c) => return c,
+ Err(_) => {}
+ }
+ }
+
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id());
+ let parent_substs =
+ tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
+ let substs =
+ ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty })
+ .substs;
+ let uneval_const = tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: ty::WithOptConstParam::unknown(def_id).to_global(),
+ substs,
+ promoted: None,
+ }),
+ ty,
+ });
+ debug!(?uneval_const);
+ debug_assert!(!uneval_const.has_free_regions());
+
+ Self::Ty(uneval_const)
+ }
+
+ #[instrument(skip(tcx), level = "debug")]
+ fn from_opt_const_arg_anon_const(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ let body_id = match tcx.hir().get_by_def_id(def.did) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => span_bug!(
+ tcx.def_span(def.did.to_def_id()),
+ "from_anon_const can only process anonymous constants"
+ ),
+ };
+
+ let expr = &tcx.hir().body(body_id).value;
+ debug!(?expr);
+
+ // Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
+ // currently have to be wrapped in curly brackets, so it's necessary to special-case.
+ let expr = match &expr.kind {
+ hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
+ block.expr.as_ref().unwrap()
+ }
+ _ => expr,
+ };
+ debug!("expr.kind: {:?}", expr.kind);
+
+ let ty = tcx.type_of(def.def_id_for_type_of());
+ debug!(?ty);
+
+ // FIXME(const_generics): We currently have to special case parameters because `min_const_generics`
+ // does not provide the parents generics to anonymous constants. We still allow generic const
+ // parameters by themselves however, e.g. `N`. These constants would cause an ICE if we were to
+ // ever try to substitute the generic parameters in their bodies.
+ //
+ // While this doesn't happen as these constants are always used as `ty::ConstKind::Param`, it does
+ // cause issues if we were to remove that special-case and try to evaluate the constant instead.
+ use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
+ match expr.kind {
+ ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
+ // Find the name and index of the const parameter by indexing the generics of
+ // the parent item and construct a `ParamConst`.
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let item_id = tcx.hir().get_parent_node(hir_id);
+ let item_def_id = tcx.hir().local_def_id(item_id);
+ let generics = tcx.generics_of(item_def_id.to_def_id());
+ let index = generics.param_def_id_to_index[&def_id];
+ let name = tcx.hir().name(hir_id);
+ let ty_const = tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Param(ty::ParamConst::new(index, name)),
+ ty,
+ });
+ debug!(?ty_const);
+
+ return Self::Ty(ty_const);
+ }
+ _ => {}
+ }
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
+ let parent_substs = if let Some(parent_hir_id) = tcx.hir().find_parent_node(hir_id) {
+ if let Some(parent_did) = tcx.hir().opt_local_def_id(parent_hir_id) {
+ InternalSubsts::identity_for_item(tcx, parent_did.to_def_id())
+ } else {
+ tcx.mk_substs(Vec::<GenericArg<'tcx>>::new().into_iter())
+ }
+ } else {
+ tcx.mk_substs(Vec::<GenericArg<'tcx>>::new().into_iter())
+ };
+ debug!(?parent_substs);
+
+ let did = def.did.to_def_id();
+ let child_substs = InternalSubsts::identity_for_item(tcx, did);
+ let substs = tcx.mk_substs(parent_substs.into_iter().chain(child_substs.into_iter()));
+ debug!(?substs);
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
+ let span = tcx.hir().span(hir_id);
+ let uneval = ty::Unevaluated::new(def.to_global(), substs);
+ debug!(?span, ?param_env);
+
+ match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
+ Ok(val) => {
+ debug!("evaluated const value: {:?}", val);
+ Self::Val(val, ty)
+ }
+ Err(_) => {
+ debug!("error encountered during evaluation");
+ // Error was handled in `const_eval_resolve`. Here we just create a
+ // new unevaluated const and error hard later in codegen
+ let ty_const = tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: def.to_global(),
+ substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
+ promoted: None,
+ }),
+ ty,
+ });
+ debug!(?ty_const);
+
+ Self::Ty(ty_const)
+ }
+ }
+ }
+
+ pub fn from_const(c: ty::Const<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
+ match c.kind() {
+ ty::ConstKind::Value(valtree) => {
+ let const_val = tcx.valtree_to_const_val((c.ty(), valtree));
+ Self::Val(const_val, c.ty())
+ }
+ _ => Self::Ty(c),
+ }
+ }
+}
+
+/// A collection of projections into user types.
+///
+/// They are projections because a binding can occur a part of a
+/// parent pattern that has been ascribed a type.
+///
+/// Its a collection because there can be multiple type ascriptions on
+/// the path from the root of the pattern down to the binding itself.
+///
+/// An example:
+///
+/// ```ignore (illustrative)
+/// struct S<'a>((i32, &'a str), String);
+/// let S((_, w): (i32, &'static str), _): S = ...;
+/// // ------ ^^^^^^^^^^^^^^^^^^^ (1)
+/// // --------------------------------- ^ (2)
+/// ```
+///
+/// The highlights labelled `(1)` show the subpattern `(_, w)` being
+/// ascribed the type `(i32, &'static str)`.
+///
+/// The highlights labelled `(2)` show the whole pattern being
+/// ascribed the type `S`.
+///
+/// In this example, when we descend to `w`, we will have built up the
+/// following two projected types:
+///
+/// * base: `S`, projection: `(base.0).1`
+/// * base: `(i32, &'static str)`, projection: `base.1`
+///
+/// The first will lead to the constraint `w: &'1 str` (for some
+/// inferred region `'1`). The second will lead to the constraint `w:
+/// &'static str`.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct UserTypeProjections {
+ pub contents: Vec<(UserTypeProjection, Span)>,
+}
+
+impl<'tcx> UserTypeProjections {
+ pub fn none() -> Self {
+ UserTypeProjections { contents: vec![] }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.contents.is_empty()
+ }
+
+ pub fn projections_and_spans(
+ &self,
+ ) -> impl Iterator<Item = &(UserTypeProjection, Span)> + ExactSizeIterator {
+ self.contents.iter()
+ }
+
+ pub fn projections(&self) -> impl Iterator<Item = &UserTypeProjection> + ExactSizeIterator {
+ self.contents.iter().map(|&(ref user_type, _span)| user_type)
+ }
+
+ pub fn push_projection(mut self, user_ty: &UserTypeProjection, span: Span) -> Self {
+ self.contents.push((user_ty.clone(), span));
+ self
+ }
+
+ fn map_projections(
+ mut self,
+ mut f: impl FnMut(UserTypeProjection) -> UserTypeProjection,
+ ) -> Self {
+ self.contents = self.contents.into_iter().map(|(proj, span)| (f(proj), span)).collect();
+ self
+ }
+
+ pub fn index(self) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.index())
+ }
+
+ pub fn subslice(self, from: u64, to: u64) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.subslice(from, to))
+ }
+
+ pub fn deref(self) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.deref())
+ }
+
+ pub fn leaf(self, field: Field) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.leaf(field))
+ }
+
+ pub fn variant(self, adt_def: AdtDef<'tcx>, variant_index: VariantIdx, field: Field) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field))
+ }
+}
+
+/// Encodes the effect of a user-supplied type annotation on the
+/// subcomponents of a pattern. The effect is determined by applying the
+/// given list of projections to some underlying base type. Often,
+/// the projection element list `projs` is empty, in which case this
+/// directly encodes a type in `base`. But in the case of complex patterns with
+/// subpatterns and bindings, we want to apply only a *part* of the type to a variable,
+/// in which case the `projs` vector is used.
+///
+/// Examples:
+///
+/// * `let x: T = ...` -- here, the `projs` vector is empty.
+///
+/// * `let (x, _): T = ...` -- here, the `projs` vector would contain
+/// `field[0]` (aka `.0`), indicating that the type of `s` is
+/// determined by finding the type of the `.0` field from `T`.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+pub struct UserTypeProjection {
+ pub base: UserTypeAnnotationIndex,
+ pub projs: Vec<ProjectionKind>,
+}
+
+impl Copy for ProjectionKind {}
+
+impl UserTypeProjection {
+ pub(crate) fn index(mut self) -> Self {
+ self.projs.push(ProjectionElem::Index(()));
+ self
+ }
+
+ pub(crate) fn subslice(mut self, from: u64, to: u64) -> Self {
+ self.projs.push(ProjectionElem::Subslice { from, to, from_end: true });
+ self
+ }
+
+ pub(crate) fn deref(mut self) -> Self {
+ self.projs.push(ProjectionElem::Deref);
+ self
+ }
+
+ pub(crate) fn leaf(mut self, field: Field) -> Self {
+ self.projs.push(ProjectionElem::Field(field, ()));
+ self
+ }
+
+ pub(crate) fn variant(
+ mut self,
+ adt_def: AdtDef<'_>,
+ variant_index: VariantIdx,
+ field: Field,
+ ) -> Self {
+ self.projs.push(ProjectionElem::Downcast(
+ Some(adt_def.variant(variant_index).name),
+ variant_index,
+ ));
+ self.projs.push(ProjectionElem::Field(field, ()));
+ self
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! { ProjectionKind, }
+
+impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(UserTypeProjection {
+ base: self.base.try_fold_with(folder)?,
+ projs: self.projs.try_fold_with(folder)?,
+ })
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for UserTypeProjection {
+ fn visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<Vs::BreakTy> {
+ self.base.visit_with(visitor)
+ // Note: there's nothing in `self.proj` to visit.
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct Promoted {
+ derive [HashStable]
+ DEBUG_FORMAT = "promoted[{}]"
+ }
+}
+
+impl<'tcx> Debug for Constant<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{}", self)
+ }
+}
+
+impl<'tcx> Display for Constant<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match self.ty().kind() {
+ ty::FnDef(..) => {}
+ _ => write!(fmt, "const ")?,
+ }
+ Display::fmt(&self.literal, fmt)
+ }
+}
+
+impl<'tcx> Display for ConstantKind<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match *self {
+ ConstantKind::Ty(c) => pretty_print_const(c, fmt, true),
+ ConstantKind::Val(val, ty) => pretty_print_const_value(val, ty, fmt, true),
+ }
+ }
+}
+
+fn pretty_print_const<'tcx>(
+ c: ty::Const<'tcx>,
+ fmt: &mut Formatter<'_>,
+ print_types: bool,
+) -> fmt::Result {
+ use crate::ty::print::PrettyPrinter;
+ ty::tls::with(|tcx| {
+ let literal = tcx.lift(c).unwrap();
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.pretty_print_const(literal, print_types)?;
+ fmt.write_str(&cx.into_buffer())?;
+ Ok(())
+ })
+}
+
+fn pretty_print_byte_str(fmt: &mut Formatter<'_>, byte_str: &[u8]) -> fmt::Result {
+ fmt.write_str("b\"")?;
+ for &c in byte_str {
+ for e in std::ascii::escape_default(c) {
+ fmt.write_char(e as char)?;
+ }
+ }
+ fmt.write_str("\"")?;
+
+ Ok(())
+}
+
+fn comma_sep<'tcx>(fmt: &mut Formatter<'_>, elems: Vec<ConstantKind<'tcx>>) -> fmt::Result {
+ let mut first = true;
+ for elem in elems {
+ if !first {
+ fmt.write_str(", ")?;
+ }
+ fmt.write_str(&format!("{}", elem))?;
+ first = false;
+ }
+ Ok(())
+}
+
+// FIXME: Move that into `mir/pretty.rs`.
+fn pretty_print_const_value<'tcx>(
+ ct: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ fmt: &mut Formatter<'_>,
+ print_ty: bool,
+) -> fmt::Result {
+ use crate::ty::print::PrettyPrinter;
+
+ ty::tls::with(|tcx| {
+ let ct = tcx.lift(ct).unwrap();
+ let ty = tcx.lift(ty).unwrap();
+
+ if tcx.sess.verbose() {
+ fmt.write_str(&format!("ConstValue({:?}: {})", ct, ty))?;
+ return Ok(());
+ }
+
+ let u8_type = tcx.types.u8;
+ match (ct, ty.kind()) {
+ // Byte/string slices, printed as (byte) string literals.
+ (ConstValue::Slice { data, start, end }, ty::Ref(_, inner, _)) => {
+ match inner.kind() {
+ ty::Slice(t) => {
+ if *t == u8_type {
+ // The `inspect` here is okay since we checked the bounds, and there are
+ // no relocations (we have an active slice reference here). We don't use
+ // this result to affect interpreter execution.
+ let byte_str = data
+ .inner()
+ .inspect_with_uninit_and_ptr_outside_interpreter(start..end);
+ pretty_print_byte_str(fmt, byte_str)?;
+ return Ok(());
+ }
+ }
+ ty::Str => {
+ // The `inspect` here is okay since we checked the bounds, and there are no
+ // relocations (we have an active `str` reference here). We don't use this
+ // result to affect interpreter execution.
+ let slice = data
+ .inner()
+ .inspect_with_uninit_and_ptr_outside_interpreter(start..end);
+ fmt.write_str(&format!("{:?}", String::from_utf8_lossy(slice)))?;
+ return Ok(());
+ }
+ _ => {}
+ }
+ }
+ (ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => {
+ let n = n.kind().try_to_bits(tcx.data_layout.pointer_size).unwrap();
+ // cast is ok because we already checked for pointer size (32 or 64 bit) above
+ let range = AllocRange { start: offset, size: Size::from_bytes(n) };
+ let byte_str = alloc.inner().get_bytes(&tcx, range).unwrap();
+ fmt.write_str("*")?;
+ pretty_print_byte_str(fmt, byte_str)?;
+ return Ok(());
+ }
+ // Aggregates, printed as array/tuple/struct/variant construction syntax.
+ //
+ // NB: the `has_param_types_or_consts` check ensures that we can use
+ // the `destructure_const` query with an empty `ty::ParamEnv` without
+ // introducing ICEs (e.g. via `layout_of`) from missing bounds.
+ // E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized`
+ // to be able to destructure the tuple into `(0u8, *mut T)
+ //
+ // FIXME(eddyb) for `--emit=mir`/`-Z dump-mir`, we should provide the
+ // correct `ty::ParamEnv` to allow printing *all* constant values.
+ (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_param_types_or_consts() => {
+ let ct = tcx.lift(ct).unwrap();
+ let ty = tcx.lift(ty).unwrap();
+ if let Some(contents) = tcx.try_destructure_mir_constant(
+ ty::ParamEnv::reveal_all().and(ConstantKind::Val(ct, ty)),
+ ) {
+ let fields = contents.fields.iter().copied().collect::<Vec<_>>();
+ match *ty.kind() {
+ ty::Array(..) => {
+ fmt.write_str("[")?;
+ comma_sep(fmt, fields)?;
+ fmt.write_str("]")?;
+ }
+ ty::Tuple(..) => {
+ fmt.write_str("(")?;
+ comma_sep(fmt, fields)?;
+ if contents.fields.len() == 1 {
+ fmt.write_str(",")?;
+ }
+ fmt.write_str(")")?;
+ }
+ ty::Adt(def, _) if def.variants().is_empty() => {
+ fmt.write_str(&format!("{{unreachable(): {}}}", ty))?;
+ }
+ ty::Adt(def, substs) => {
+ let variant_idx = contents
+ .variant
+ .expect("destructed mir constant of adt without variant idx");
+ let variant_def = &def.variant(variant_idx);
+ let substs = tcx.lift(substs).unwrap();
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.print_value_path(variant_def.def_id, substs)?;
+ fmt.write_str(&cx.into_buffer())?;
+
+ match variant_def.ctor_kind {
+ CtorKind::Const => {}
+ CtorKind::Fn => {
+ fmt.write_str("(")?;
+ comma_sep(fmt, fields)?;
+ fmt.write_str(")")?;
+ }
+ CtorKind::Fictive => {
+ fmt.write_str(" {{ ")?;
+ let mut first = true;
+ for (field_def, field) in iter::zip(&variant_def.fields, fields)
+ {
+ if !first {
+ fmt.write_str(", ")?;
+ }
+ fmt.write_str(&format!("{}: {}", field_def.name, field))?;
+ first = false;
+ }
+ fmt.write_str(" }}")?;
+ }
+ }
+ }
+ _ => unreachable!(),
+ }
+ return Ok(());
+ } else {
+ // Fall back to debug pretty printing for invalid constants.
+ fmt.write_str(&format!("{:?}", ct))?;
+ if print_ty {
+ fmt.write_str(&format!(": {}", ty))?;
+ }
+ return Ok(());
+ };
+ }
+ (ConstValue::Scalar(scalar), _) => {
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let ty = tcx.lift(ty).unwrap();
+ cx = cx.pretty_print_const_scalar(scalar, ty, print_ty)?;
+ fmt.write_str(&cx.into_buffer())?;
+ return Ok(());
+ }
+ (ConstValue::ZeroSized, ty::FnDef(d, s)) => {
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.print_value_path(*d, s)?;
+ fmt.write_str(&cx.into_buffer())?;
+ return Ok(());
+ }
+ // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading
+ // their fields instead of just dumping the memory.
+ _ => {}
+ }
+ // fallback
+ fmt.write_str(&format!("{:?}", ct))?;
+ if print_ty {
+ fmt.write_str(&format!(": {}", ty))?;
+ }
+ Ok(())
+ })
+}
+
+/// `Location` represents the position of the start of the statement; or, if
+/// `statement_index` equals the number of statements, then the start of the
+/// terminator.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, HashStable)]
+pub struct Location {
+ /// The block that the location is within.
+ pub block: BasicBlock,
+
+ pub statement_index: usize,
+}
+
+impl fmt::Debug for Location {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{:?}[{}]", self.block, self.statement_index)
+ }
+}
+
+impl Location {
+ pub const START: Location = Location { block: START_BLOCK, statement_index: 0 };
+
+ /// Returns the location immediately after this one within the enclosing block.
+ ///
+ /// Note that if this location represents a terminator, then the
+ /// resulting location would be out of bounds and invalid.
+ pub fn successor_within_block(&self) -> Location {
+ Location { block: self.block, statement_index: self.statement_index + 1 }
+ }
+
+ /// Returns `true` if `other` is earlier in the control flow graph than `self`.
+ pub fn is_predecessor_of<'tcx>(&self, other: Location, body: &Body<'tcx>) -> bool {
+ // If we are in the same block as the other location and are an earlier statement
+ // then we are a predecessor of `other`.
+ if self.block == other.block && self.statement_index < other.statement_index {
+ return true;
+ }
+
+ let predecessors = body.basic_blocks.predecessors();
+
+ // If we're in another block, then we want to check that block is a predecessor of `other`.
+ let mut queue: Vec<BasicBlock> = predecessors[other.block].to_vec();
+ let mut visited = FxHashSet::default();
+
+ while let Some(block) = queue.pop() {
+ // If we haven't visited this block before, then make sure we visit its predecessors.
+ if visited.insert(block) {
+ queue.extend(predecessors[block].iter().cloned());
+ } else {
+ continue;
+ }
+
+ // If we found the block that `self` is in, then we are a predecessor of `other` (since
+ // we found that block by looking at the predecessors of `other`).
+ if self.block == block {
+ return true;
+ }
+ }
+
+ false
+ }
+
+ pub fn dominates(&self, other: Location, dominators: &Dominators<BasicBlock>) -> bool {
+ if self.block == other.block {
+ self.statement_index <= other.statement_index
+ } else {
+ dominators.is_dominated_by(other.block, self.block)
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
new file mode 100644
index 000000000..21ae121e1
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -0,0 +1,527 @@
+use crate::dep_graph::{DepNode, WorkProduct, WorkProductId};
+use crate::ty::{subst::InternalSubsts, Instance, InstanceDef, SymbolName, TyCtxt};
+use rustc_attr::InlineAttr;
+use rustc_data_structures::base_n;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_hir::ItemId;
+use rustc_index::vec::Idx;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::config::OptLevel;
+use rustc_span::source_map::Span;
+use rustc_span::symbol::Symbol;
+use std::fmt;
+use std::hash::Hash;
+
+/// Describes how a monomorphization will be instantiated in object files.
+#[derive(PartialEq)]
+pub enum InstantiationMode {
+ /// There will be exactly one instance of the given MonoItem. It will have
+ /// external linkage so that it can be linked to from other codegen units.
+ GloballyShared {
+ /// In some compilation scenarios we may decide to take functions that
+ /// are typically `LocalCopy` and instead move them to `GloballyShared`
+ /// to avoid codegenning them a bunch of times. In this situation,
+ /// however, our local copy may conflict with other crates also
+ /// inlining the same function.
+ ///
+ /// This flag indicates that this situation is occurring, and informs
+ /// symbol name calculation that some extra mangling is needed to
+ /// avoid conflicts. Note that this may eventually go away entirely if
+ /// ThinLTO enables us to *always* have a globally shared instance of a
+ /// function within one crate's compilation.
+ may_conflict: bool,
+ },
+
+ /// Each codegen unit containing a reference to the given MonoItem will
+ /// have its own private copy of the function (with internal linkage).
+ LocalCopy,
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, HashStable)]
+pub enum MonoItem<'tcx> {
+ Fn(Instance<'tcx>),
+ Static(DefId),
+ GlobalAsm(ItemId),
+}
+
+impl<'tcx> MonoItem<'tcx> {
+ /// Returns `true` if the mono item is user-defined (i.e. not compiler-generated, like shims).
+ pub fn is_user_defined(&self) -> bool {
+ match *self {
+ MonoItem::Fn(instance) => matches!(instance.def, InstanceDef::Item(..)),
+ MonoItem::Static(..) | MonoItem::GlobalAsm(..) => true,
+ }
+ }
+
+ pub fn size_estimate(&self, tcx: TyCtxt<'tcx>) -> usize {
+ match *self {
+ MonoItem::Fn(instance) => {
+ // Estimate the size of a function based on how many statements
+ // it contains.
+ tcx.instance_def_size_estimate(instance.def)
+ }
+ // Conservatively estimate the size of a static declaration
+ // or assembly to be 1.
+ MonoItem::Static(_) | MonoItem::GlobalAsm(_) => 1,
+ }
+ }
+
+ pub fn is_generic_fn(&self) -> bool {
+ match *self {
+ MonoItem::Fn(ref instance) => instance.substs.non_erasable_generics().next().is_some(),
+ MonoItem::Static(..) | MonoItem::GlobalAsm(..) => false,
+ }
+ }
+
+ pub fn symbol_name(&self, tcx: TyCtxt<'tcx>) -> SymbolName<'tcx> {
+ match *self {
+ MonoItem::Fn(instance) => tcx.symbol_name(instance),
+ MonoItem::Static(def_id) => tcx.symbol_name(Instance::mono(tcx, def_id)),
+ MonoItem::GlobalAsm(item_id) => {
+ SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.def_id))
+ }
+ }
+ }
+
+ pub fn instantiation_mode(&self, tcx: TyCtxt<'tcx>) -> InstantiationMode {
+ let generate_cgu_internal_copies = tcx
+ .sess
+ .opts
+ .unstable_opts
+ .inline_in_all_cgus
+ .unwrap_or_else(|| tcx.sess.opts.optimize != OptLevel::No)
+ && !tcx.sess.link_dead_code();
+
+ match *self {
+ MonoItem::Fn(ref instance) => {
+ let entry_def_id = tcx.entry_fn(()).map(|(id, _)| id);
+ // If this function isn't inlined or otherwise has an extern
+ // indicator, then we'll be creating a globally shared version.
+ if tcx.codegen_fn_attrs(instance.def_id()).contains_extern_indicator()
+ || !instance.def.generates_cgu_internal_copy(tcx)
+ || Some(instance.def_id()) == entry_def_id
+ {
+ return InstantiationMode::GloballyShared { may_conflict: false };
+ }
+
+ // At this point we don't have explicit linkage and we're an
+ // inlined function. If we're inlining into all CGUs then we'll
+ // be creating a local copy per CGU.
+ if generate_cgu_internal_copies {
+ return InstantiationMode::LocalCopy;
+ }
+
+ // Finally, if this is `#[inline(always)]` we're sure to respect
+ // that with an inline copy per CGU, but otherwise we'll be
+ // creating one copy of this `#[inline]` function which may
+ // conflict with upstream crates as it could be an exported
+ // symbol.
+ match tcx.codegen_fn_attrs(instance.def_id()).inline {
+ InlineAttr::Always => InstantiationMode::LocalCopy,
+ _ => InstantiationMode::GloballyShared { may_conflict: true },
+ }
+ }
+ MonoItem::Static(..) | MonoItem::GlobalAsm(..) => {
+ InstantiationMode::GloballyShared { may_conflict: false }
+ }
+ }
+ }
+
+ pub fn explicit_linkage(&self, tcx: TyCtxt<'tcx>) -> Option<Linkage> {
+ let def_id = match *self {
+ MonoItem::Fn(ref instance) => instance.def_id(),
+ MonoItem::Static(def_id) => def_id,
+ MonoItem::GlobalAsm(..) => return None,
+ };
+
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
+ codegen_fn_attrs.linkage
+ }
+
+ /// Returns `true` if this instance is instantiable - whether it has no unsatisfied
+ /// predicates.
+ ///
+ /// In order to codegen an item, all of its predicates must hold, because
+ /// otherwise the item does not make sense. Type-checking ensures that
+ /// the predicates of every item that is *used by* a valid item *do*
+ /// hold, so we can rely on that.
+ ///
+ /// However, we codegen collector roots (reachable items) and functions
+ /// in vtables when they are seen, even if they are not used, and so they
+ /// might not be instantiable. For example, a programmer can define this
+ /// public function:
+ ///
+ /// pub fn foo<'a>(s: &'a mut ()) where &'a mut (): Clone {
+ /// <&mut () as Clone>::clone(&s);
+ /// }
+ ///
+ /// That function can't be codegened, because the method `<&mut () as Clone>::clone`
+ /// does not exist. Luckily for us, that function can't ever be used,
+ /// because that would require for `&'a mut (): Clone` to hold, so we
+ /// can just not emit any code, or even a linker reference for it.
+ ///
+ /// Similarly, if a vtable method has such a signature, and therefore can't
+ /// be used, we can just not emit it and have a placeholder (a null pointer,
+ /// which will never be accessed) in its place.
+ pub fn is_instantiable(&self, tcx: TyCtxt<'tcx>) -> bool {
+ debug!("is_instantiable({:?})", self);
+ let (def_id, substs) = match *self {
+ MonoItem::Fn(ref instance) => (instance.def_id(), instance.substs),
+ MonoItem::Static(def_id) => (def_id, InternalSubsts::empty()),
+ // global asm never has predicates
+ MonoItem::GlobalAsm(..) => return true,
+ };
+
+ !tcx.subst_and_check_impossible_predicates((def_id, &substs))
+ }
+
+ pub fn local_span(&self, tcx: TyCtxt<'tcx>) -> Option<Span> {
+ match *self {
+ MonoItem::Fn(Instance { def, .. }) => def.def_id().as_local(),
+ MonoItem::Static(def_id) => def_id.as_local(),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.def_id),
+ }
+ .map(|def_id| tcx.def_span(def_id))
+ }
+
+ // Only used by rustc_codegen_cranelift
+ pub fn codegen_dep_node(&self, tcx: TyCtxt<'tcx>) -> DepNode {
+ crate::dep_graph::make_compile_mono_item(tcx, self)
+ }
+
+ /// Returns the item's `CrateNum`
+ pub fn krate(&self) -> CrateNum {
+ match self {
+ MonoItem::Fn(ref instance) => instance.def_id().krate,
+ MonoItem::Static(def_id) => def_id.krate,
+ MonoItem::GlobalAsm(..) => LOCAL_CRATE,
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for MonoItem<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ MonoItem::Fn(instance) => write!(f, "fn {}", instance),
+ MonoItem::Static(def_id) => {
+ write!(f, "static {}", Instance::new(def_id, InternalSubsts::empty()))
+ }
+ MonoItem::GlobalAsm(..) => write!(f, "global_asm"),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct CodegenUnit<'tcx> {
+ /// A name for this CGU. Incremental compilation requires that
+ /// name be unique amongst **all** crates. Therefore, it should
+ /// contain something unique to this crate (e.g., a module path)
+ /// as well as the crate name and disambiguator.
+ name: Symbol,
+ items: FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)>,
+ size_estimate: Option<usize>,
+ primary: bool,
+ /// True if this is CGU is used to hold code coverage information for dead code,
+ /// false otherwise.
+ is_code_coverage_dead_code_cgu: bool,
+}
+
+/// Specifies the linkage type for a `MonoItem`.
+///
+/// See <https://llvm.org/docs/LangRef.html#linkage-types> for more details about these variants.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum Linkage {
+ External,
+ AvailableExternally,
+ LinkOnceAny,
+ LinkOnceODR,
+ WeakAny,
+ WeakODR,
+ Appending,
+ Internal,
+ Private,
+ ExternalWeak,
+ Common,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable)]
+pub enum Visibility {
+ Default,
+ Hidden,
+ Protected,
+}
+
+impl<'tcx> CodegenUnit<'tcx> {
+ #[inline]
+ pub fn new(name: Symbol) -> CodegenUnit<'tcx> {
+ CodegenUnit {
+ name,
+ items: Default::default(),
+ size_estimate: None,
+ primary: false,
+ is_code_coverage_dead_code_cgu: false,
+ }
+ }
+
+ pub fn name(&self) -> Symbol {
+ self.name
+ }
+
+ pub fn set_name(&mut self, name: Symbol) {
+ self.name = name;
+ }
+
+ pub fn is_primary(&self) -> bool {
+ self.primary
+ }
+
+ pub fn make_primary(&mut self) {
+ self.primary = true;
+ }
+
+ pub fn items(&self) -> &FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
+ &self.items
+ }
+
+ pub fn items_mut(&mut self) -> &mut FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
+ &mut self.items
+ }
+
+ pub fn is_code_coverage_dead_code_cgu(&self) -> bool {
+ self.is_code_coverage_dead_code_cgu
+ }
+
+ /// Marks this CGU as the one used to contain code coverage information for dead code.
+ pub fn make_code_coverage_dead_code_cgu(&mut self) {
+ self.is_code_coverage_dead_code_cgu = true;
+ }
+
+ pub fn mangle_name(human_readable_name: &str) -> String {
+ // We generate a 80 bit hash from the name. This should be enough to
+ // avoid collisions and is still reasonably short for filenames.
+ let mut hasher = StableHasher::new();
+ human_readable_name.hash(&mut hasher);
+ let hash: u128 = hasher.finish();
+ let hash = hash & ((1u128 << 80) - 1);
+ base_n::encode(hash, base_n::CASE_INSENSITIVE)
+ }
+
+ pub fn estimate_size(&mut self, tcx: TyCtxt<'tcx>) {
+ // Estimate the size of a codegen unit as (approximately) the number of MIR
+ // statements it corresponds to.
+ self.size_estimate = Some(self.items.keys().map(|mi| mi.size_estimate(tcx)).sum());
+ }
+
+ #[inline]
+ pub fn size_estimate(&self) -> usize {
+ // Should only be called if `estimate_size` has previously been called.
+ self.size_estimate.expect("estimate_size must be called before getting a size_estimate")
+ }
+
+ pub fn modify_size_estimate(&mut self, delta: usize) {
+ assert!(self.size_estimate.is_some());
+ if let Some(size_estimate) = self.size_estimate {
+ self.size_estimate = Some(size_estimate + delta);
+ }
+ }
+
+ pub fn contains_item(&self, item: &MonoItem<'tcx>) -> bool {
+ self.items().contains_key(item)
+ }
+
+ pub fn work_product_id(&self) -> WorkProductId {
+ WorkProductId::from_cgu_name(self.name().as_str())
+ }
+
+ pub fn previous_work_product(&self, tcx: TyCtxt<'_>) -> WorkProduct {
+ let work_product_id = self.work_product_id();
+ tcx.dep_graph
+ .previous_work_product(&work_product_id)
+ .unwrap_or_else(|| panic!("Could not find work-product for CGU `{}`", self.name()))
+ }
+
+ pub fn items_in_deterministic_order(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ ) -> Vec<(MonoItem<'tcx>, (Linkage, Visibility))> {
+ // The codegen tests rely on items being process in the same order as
+ // they appear in the file, so for local items, we sort by node_id first
+ #[derive(PartialEq, Eq, PartialOrd, Ord)]
+ pub struct ItemSortKey<'tcx>(Option<usize>, SymbolName<'tcx>);
+
+ fn item_sort_key<'tcx>(tcx: TyCtxt<'tcx>, item: MonoItem<'tcx>) -> ItemSortKey<'tcx> {
+ ItemSortKey(
+ match item {
+ MonoItem::Fn(ref instance) => {
+ match instance.def {
+ // We only want to take HirIds of user-defined
+ // instances into account. The others don't matter for
+ // the codegen tests and can even make item order
+ // unstable.
+ InstanceDef::Item(def) => def.did.as_local().map(Idx::index),
+ InstanceDef::VTableShim(..)
+ | InstanceDef::ReifyShim(..)
+ | InstanceDef::Intrinsic(..)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::Virtual(..)
+ | InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::DropGlue(..)
+ | InstanceDef::CloneShim(..) => None,
+ }
+ }
+ MonoItem::Static(def_id) => def_id.as_local().map(Idx::index),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.index()),
+ },
+ item.symbol_name(tcx),
+ )
+ }
+
+ let mut items: Vec<_> = self.items().iter().map(|(&i, &l)| (i, l)).collect();
+ items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i));
+ items
+ }
+
+ pub fn codegen_dep_node(&self, tcx: TyCtxt<'tcx>) -> DepNode {
+ crate::dep_graph::make_compile_codegen_unit(tcx, self.name())
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for CodegenUnit<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let CodegenUnit {
+ ref items,
+ name,
+ // The size estimate is not relevant to the hash
+ size_estimate: _,
+ primary: _,
+ is_code_coverage_dead_code_cgu,
+ } = *self;
+
+ name.hash_stable(hcx, hasher);
+ is_code_coverage_dead_code_cgu.hash_stable(hcx, hasher);
+
+ let mut items: Vec<(Fingerprint, _)> = items
+ .iter()
+ .map(|(mono_item, &attrs)| {
+ let mut hasher = StableHasher::new();
+ mono_item.hash_stable(hcx, &mut hasher);
+ let mono_item_fingerprint = hasher.finish();
+ (mono_item_fingerprint, attrs)
+ })
+ .collect();
+
+ items.sort_unstable_by_key(|i| i.0);
+ items.hash_stable(hcx, hasher);
+ }
+}
+
+pub struct CodegenUnitNameBuilder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ cache: FxHashMap<CrateNum, String>,
+}
+
+impl<'tcx> CodegenUnitNameBuilder<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>) -> Self {
+ CodegenUnitNameBuilder { tcx, cache: Default::default() }
+ }
+
+ /// CGU names should fulfill the following requirements:
+ /// - They should be able to act as a file name on any kind of file system
+ /// - They should not collide with other CGU names, even for different versions
+ /// of the same crate.
+ ///
+ /// Consequently, we don't use special characters except for '.' and '-' and we
+ /// prefix each name with the crate-name and crate-disambiguator.
+ ///
+ /// This function will build CGU names of the form:
+ ///
+ /// ```text
+ /// <crate-name>.<crate-disambiguator>[-in-<local-crate-id>](-<component>)*[.<special-suffix>]
+ /// <local-crate-id> = <local-crate-name>.<local-crate-disambiguator>
+ /// ```
+ ///
+ /// The '.' before `<special-suffix>` makes sure that names with a special
+ /// suffix can never collide with a name built out of regular Rust
+ /// identifiers (e.g., module paths).
+ pub fn build_cgu_name<I, C, S>(
+ &mut self,
+ cnum: CrateNum,
+ components: I,
+ special_suffix: Option<S>,
+ ) -> Symbol
+ where
+ I: IntoIterator<Item = C>,
+ C: fmt::Display,
+ S: fmt::Display,
+ {
+ let cgu_name = self.build_cgu_name_no_mangle(cnum, components, special_suffix);
+
+ if self.tcx.sess.opts.unstable_opts.human_readable_cgu_names {
+ cgu_name
+ } else {
+ Symbol::intern(&CodegenUnit::mangle_name(cgu_name.as_str()))
+ }
+ }
+
+ /// Same as `CodegenUnit::build_cgu_name()` but will never mangle the
+ /// resulting name.
+ pub fn build_cgu_name_no_mangle<I, C, S>(
+ &mut self,
+ cnum: CrateNum,
+ components: I,
+ special_suffix: Option<S>,
+ ) -> Symbol
+ where
+ I: IntoIterator<Item = C>,
+ C: fmt::Display,
+ S: fmt::Display,
+ {
+ use std::fmt::Write;
+
+ let mut cgu_name = String::with_capacity(64);
+
+ // Start out with the crate name and disambiguator
+ let tcx = self.tcx;
+ let crate_prefix = self.cache.entry(cnum).or_insert_with(|| {
+ // Whenever the cnum is not LOCAL_CRATE we also mix in the
+ // local crate's ID. Otherwise there can be collisions between CGUs
+ // instantiating stuff for upstream crates.
+ let local_crate_id = if cnum != LOCAL_CRATE {
+ let local_stable_crate_id = tcx.sess.local_stable_crate_id();
+ format!(
+ "-in-{}.{:08x}",
+ tcx.crate_name(LOCAL_CRATE),
+ local_stable_crate_id.to_u64() as u32,
+ )
+ } else {
+ String::new()
+ };
+
+ let stable_crate_id = tcx.sess.local_stable_crate_id();
+ format!(
+ "{}.{:08x}{}",
+ tcx.crate_name(cnum),
+ stable_crate_id.to_u64() as u32,
+ local_crate_id,
+ )
+ });
+
+ write!(cgu_name, "{}", crate_prefix).unwrap();
+
+ // Add the components
+ for component in components {
+ write!(cgu_name, "-{}", component).unwrap();
+ }
+
+ if let Some(special_suffix) = special_suffix {
+ // We add a dot in here so it cannot clash with anything in a regular
+ // Rust identifier
+ write!(cgu_name, ".{}", special_suffix).unwrap();
+ }
+
+ Symbol::intern(&cgu_name)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/patch.rs b/compiler/rustc_middle/src/mir/patch.rs
new file mode 100644
index 000000000..15496842d
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/patch.rs
@@ -0,0 +1,196 @@
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::*;
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+
+/// This struct represents a patch to MIR, which can add
+/// new statements and basic blocks and patch over block
+/// terminators.
+pub struct MirPatch<'tcx> {
+ patch_map: IndexVec<BasicBlock, Option<TerminatorKind<'tcx>>>,
+ new_blocks: Vec<BasicBlockData<'tcx>>,
+ new_statements: Vec<(Location, StatementKind<'tcx>)>,
+ new_locals: Vec<LocalDecl<'tcx>>,
+ resume_block: Option<BasicBlock>,
+ body_span: Span,
+ next_local: usize,
+}
+
+impl<'tcx> MirPatch<'tcx> {
+ pub fn new(body: &Body<'tcx>) -> Self {
+ let mut result = MirPatch {
+ patch_map: IndexVec::from_elem(None, body.basic_blocks()),
+ new_blocks: vec![],
+ new_statements: vec![],
+ new_locals: vec![],
+ next_local: body.local_decls.len(),
+ resume_block: None,
+ body_span: body.span,
+ };
+
+ // Check if we already have a resume block
+ for (bb, block) in body.basic_blocks().iter_enumerated() {
+ if let TerminatorKind::Resume = block.terminator().kind && block.statements.is_empty() {
+ result.resume_block = Some(bb);
+ break;
+ }
+ }
+
+ result
+ }
+
+ pub fn resume_block(&mut self) -> BasicBlock {
+ if let Some(bb) = self.resume_block {
+ return bb;
+ }
+
+ let bb = self.new_block(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(self.body_span),
+ kind: TerminatorKind::Resume,
+ }),
+ is_cleanup: true,
+ });
+ self.resume_block = Some(bb);
+ bb
+ }
+
+ pub fn is_patched(&self, bb: BasicBlock) -> bool {
+ self.patch_map[bb].is_some()
+ }
+
+ pub fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
+ let offset = match bb.index().checked_sub(body.basic_blocks().len()) {
+ Some(index) => self.new_blocks[index].statements.len(),
+ None => body[bb].statements.len(),
+ };
+ Location { block: bb, statement_index: offset }
+ }
+
+ pub fn new_local_with_info(
+ &mut self,
+ ty: Ty<'tcx>,
+ span: Span,
+ local_info: Option<Box<LocalInfo<'tcx>>>,
+ ) -> Local {
+ let index = self.next_local;
+ self.next_local += 1;
+ let mut new_decl = LocalDecl::new(ty, span);
+ new_decl.local_info = local_info;
+ self.new_locals.push(new_decl);
+ Local::new(index as usize)
+ }
+
+ pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+ self.new_local_with_info(ty, span, None)
+ }
+
+ pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+ let index = self.next_local;
+ self.next_local += 1;
+ self.new_locals.push(LocalDecl::new(ty, span).internal());
+ Local::new(index as usize)
+ }
+
+ pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
+ let block = BasicBlock::new(self.patch_map.len());
+ debug!("MirPatch: new_block: {:?}: {:?}", block, data);
+ self.new_blocks.push(data);
+ self.patch_map.push(None);
+ block
+ }
+
+ pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) {
+ assert!(self.patch_map[block].is_none());
+ debug!("MirPatch: patch_terminator({:?}, {:?})", block, new);
+ self.patch_map[block] = Some(new);
+ }
+
+ pub fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) {
+ debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt);
+ self.new_statements.push((loc, stmt));
+ }
+
+ pub fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) {
+ self.add_statement(loc, StatementKind::Assign(Box::new((place, rv))));
+ }
+
+ pub fn apply(self, body: &mut Body<'tcx>) {
+ debug!(
+ "MirPatch: {:?} new temps, starting from index {}: {:?}",
+ self.new_locals.len(),
+ body.local_decls.len(),
+ self.new_locals
+ );
+ debug!(
+ "MirPatch: {} new blocks, starting from index {}",
+ self.new_blocks.len(),
+ body.basic_blocks().len()
+ );
+ let bbs = if self.patch_map.is_empty() && self.new_blocks.is_empty() {
+ body.basic_blocks.as_mut_preserves_cfg()
+ } else {
+ body.basic_blocks.as_mut()
+ };
+ bbs.extend(self.new_blocks);
+ body.local_decls.extend(self.new_locals);
+ for (src, patch) in self.patch_map.into_iter_enumerated() {
+ if let Some(patch) = patch {
+ debug!("MirPatch: patching block {:?}", src);
+ bbs[src].terminator_mut().kind = patch;
+ }
+ }
+
+ let mut new_statements = self.new_statements;
+ new_statements.sort_by_key(|s| s.0);
+
+ let mut delta = 0;
+ let mut last_bb = START_BLOCK;
+ let mut stmts_and_targets: Vec<(Statement<'_>, BasicBlock)> = Vec::new();
+ for (mut loc, stmt) in new_statements {
+ if loc.block != last_bb {
+ delta = 0;
+ last_bb = loc.block;
+ }
+ debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta);
+ loc.statement_index += delta;
+ let source_info = Self::source_info_for_index(&body[loc.block], loc);
+
+ // For mir-opt `Derefer` to work in all cases we need to
+ // get terminator's targets and apply the statement to all of them.
+ if loc.statement_index > body[loc.block].statements.len() {
+ let term = body[loc.block].terminator();
+ for i in term.successors() {
+ stmts_and_targets.push((Statement { source_info, kind: stmt.clone() }, i));
+ }
+ delta += 1;
+ continue;
+ }
+
+ body[loc.block]
+ .statements
+ .insert(loc.statement_index, Statement { source_info, kind: stmt });
+ delta += 1;
+ }
+
+ for (stmt, target) in stmts_and_targets.into_iter().rev() {
+ body[target].statements.insert(0, stmt);
+ }
+ }
+
+ pub fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
+ match data.statements.get(loc.statement_index) {
+ Some(stmt) => stmt.source_info,
+ None => data.terminator().source_info,
+ }
+ }
+
+ pub fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo {
+ let data = match loc.block.index().checked_sub(body.basic_blocks().len()) {
+ Some(new) => &self.new_blocks[new],
+ None => &body[loc.block],
+ };
+ Self::source_info_for_index(data, loc)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/predecessors.rs b/compiler/rustc_middle/src/mir/predecessors.rs
new file mode 100644
index 000000000..5f1fadaf3
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/predecessors.rs
@@ -0,0 +1,78 @@
+//! Lazily compute the reverse control-flow graph for the MIR.
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_index::vec::IndexVec;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use smallvec::SmallVec;
+
+use crate::mir::{BasicBlock, BasicBlockData};
+
+// Typically 95%+ of basic blocks have 4 or fewer predecessors.
+pub type Predecessors = IndexVec<BasicBlock, SmallVec<[BasicBlock; 4]>>;
+
+#[derive(Clone, Debug)]
+pub(super) struct PredecessorCache {
+ cache: OnceCell<Predecessors>,
+}
+
+impl PredecessorCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ PredecessorCache { cache: OnceCell::new() }
+ }
+
+ /// Invalidates the predecessor cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ // Invalidating the predecessor cache requires mutating the MIR, which in turn requires a
+ // unique reference (`&mut`) to the `mir::Body`. Because of this, we can assume that all
+ // callers of `invalidate` have a unique reference to the MIR and thus to the predecessor
+ // cache. This means we never need to do synchronization when `invalidate` is called, we can
+ // simply reinitialize the `OnceCell`.
+ self.cache = OnceCell::new();
+ }
+
+ /// Returns the predecessor graph for this MIR.
+ #[inline]
+ pub(super) fn compute(
+ &self,
+ basic_blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>,
+ ) -> &Predecessors {
+ self.cache.get_or_init(|| {
+ let mut preds = IndexVec::from_elem(SmallVec::new(), basic_blocks);
+ for (bb, data) in basic_blocks.iter_enumerated() {
+ if let Some(term) = &data.terminator {
+ for succ in term.successors() {
+ preds[succ].push(bb);
+ }
+ }
+ }
+
+ preds
+ })
+ }
+}
+
+impl<S: Encoder> Encodable<S> for PredecessorCache {
+ #[inline]
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder> Decodable<D> for PredecessorCache {
+ #[inline]
+ fn decode(_: &mut D) -> Self {
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for PredecessorCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ PredecessorCache,
+}
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
new file mode 100644
index 000000000..0ce41337b
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -0,0 +1,1067 @@
+use std::collections::BTreeSet;
+use std::fmt::Display;
+use std::fmt::Write as _;
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+
+use super::graphviz::write_mir_fn_graphviz;
+use super::spanview::write_mir_fn_spanview;
+use either::Either;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::interpret::{
+ read_target_uint, AllocId, Allocation, ConstAllocation, ConstValue, GlobalAlloc, Pointer,
+ Provenance,
+};
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::MirSource;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::Size;
+
+const INDENT: &str = " ";
+/// Alignment for lining up comments following MIR statements
+pub(crate) const ALIGN: usize = 40;
+
+/// An indication of where we are in the control flow graph. Used for printing
+/// extra information in `dump_mir`
+pub enum PassWhere {
+ /// We have not started dumping the control flow graph, but we are about to.
+ BeforeCFG,
+
+ /// We just finished dumping the control flow graph. This is right before EOF
+ AfterCFG,
+
+ /// We are about to start dumping the given basic block.
+ BeforeBlock(BasicBlock),
+
+ /// We are just about to dump the given statement or terminator.
+ BeforeLocation(Location),
+
+ /// We just dumped the given statement or terminator.
+ AfterLocation(Location),
+
+ /// We just dumped the terminator for a block but not the closing `}`.
+ AfterTerminator(BasicBlock),
+}
+
+/// If the session is properly configured, dumps a human-readable
+/// representation of the mir into:
+///
+/// ```text
+/// rustc.node<node_id>.<pass_num>.<pass_name>.<disambiguator>
+/// ```
+///
+/// Output from this function is controlled by passing `-Z dump-mir=<filter>`,
+/// where `<filter>` takes the following forms:
+///
+/// - `all` -- dump MIR for all fns, all passes, all everything
+/// - a filter defined by a set of substrings combined with `&` and `|`
+/// (`&` has higher precedence). At least one of the `|`-separated groups
+/// must match; an `|`-separated group matches if all of its `&`-separated
+/// substrings are matched.
+///
+/// Example:
+///
+/// - `nll` == match if `nll` appears in the name
+/// - `foo & nll` == match if `foo` and `nll` both appear in the name
+/// - `foo & nll | typeck` == match if `foo` and `nll` both appear in the name
+/// or `typeck` appears in the name.
+/// - `foo & nll | bar & typeck` == match if `foo` and `nll` both appear in the name
+/// or `typeck` and `bar` both appear in the name.
+#[inline]
+pub fn dump_mir<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ pass_num: Option<&dyn Display>,
+ pass_name: &str,
+ disambiguator: &dyn Display,
+ body: &Body<'tcx>,
+ extra_data: F,
+) where
+ F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+ if !dump_enabled(tcx, pass_name, body.source.def_id()) {
+ return;
+ }
+
+ dump_matched_mir_node(tcx, pass_num, pass_name, disambiguator, body, extra_data);
+}
+
+pub fn dump_enabled<'tcx>(tcx: TyCtxt<'tcx>, pass_name: &str, def_id: DefId) -> bool {
+ let Some(ref filters) = tcx.sess.opts.unstable_opts.dump_mir else {
+ return false;
+ };
+ // see notes on #41697 below
+ let node_path = ty::print::with_forced_impl_filename_line!(tcx.def_path_str(def_id));
+ filters.split('|').any(|or_filter| {
+ or_filter.split('&').all(|and_filter| {
+ let and_filter_trimmed = and_filter.trim();
+ and_filter_trimmed == "all"
+ || pass_name.contains(and_filter_trimmed)
+ || node_path.contains(and_filter_trimmed)
+ })
+ })
+}
+
+// #41697 -- we use `with_forced_impl_filename_line()` because
+// `def_path_str()` would otherwise trigger `type_of`, and this can
+// run while we are already attempting to evaluate `type_of`.
+
+fn dump_matched_mir_node<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ pass_num: Option<&dyn Display>,
+ pass_name: &str,
+ disambiguator: &dyn Display,
+ body: &Body<'tcx>,
+ mut extra_data: F,
+) where
+ F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+ let _: io::Result<()> = try {
+ let mut file =
+ create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, body.source)?;
+ // see notes on #41697 above
+ let def_path =
+ ty::print::with_forced_impl_filename_line!(tcx.def_path_str(body.source.def_id()));
+ write!(file, "// MIR for `{}", def_path)?;
+ match body.source.promoted {
+ None => write!(file, "`")?,
+ Some(promoted) => write!(file, "::{:?}`", promoted)?,
+ }
+ writeln!(file, " {} {}", disambiguator, pass_name)?;
+ if let Some(ref layout) = body.generator_layout() {
+ writeln!(file, "/* generator_layout = {:#?} */", layout)?;
+ }
+ writeln!(file)?;
+ extra_data(PassWhere::BeforeCFG, &mut file)?;
+ write_user_type_annotations(tcx, body, &mut file)?;
+ write_mir_fn(tcx, body, &mut extra_data, &mut file)?;
+ extra_data(PassWhere::AfterCFG, &mut file)?;
+ };
+
+ if tcx.sess.opts.unstable_opts.dump_mir_graphviz {
+ let _: io::Result<()> = try {
+ let mut file =
+ create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, body.source)?;
+ write_mir_fn_graphviz(tcx, body, false, &mut file)?;
+ };
+ }
+
+ if let Some(spanview) = tcx.sess.opts.unstable_opts.dump_mir_spanview {
+ let _: io::Result<()> = try {
+ let file_basename =
+ dump_file_basename(tcx, pass_num, pass_name, disambiguator, body.source);
+ let mut file = create_dump_file_with_basename(tcx, &file_basename, "html")?;
+ if body.source.def_id().is_local() {
+ write_mir_fn_spanview(tcx, body, spanview, &file_basename, &mut file)?;
+ }
+ };
+ }
+}
+
+/// Returns the file basename portion (without extension) of a filename path
+/// where we should dump a MIR representation output files.
+fn dump_file_basename<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ pass_num: Option<&dyn Display>,
+ pass_name: &str,
+ disambiguator: &dyn Display,
+ source: MirSource<'tcx>,
+) -> String {
+ let promotion_id = match source.promoted {
+ Some(id) => format!("-{:?}", id),
+ None => String::new(),
+ };
+
+ let pass_num = if tcx.sess.opts.unstable_opts.dump_mir_exclude_pass_number {
+ String::new()
+ } else {
+ match pass_num {
+ None => ".-------".to_string(),
+ Some(pass_num) => format!(".{}", pass_num),
+ }
+ };
+
+ let crate_name = tcx.crate_name(source.def_id().krate);
+ let item_name = tcx.def_path(source.def_id()).to_filename_friendly_no_crate();
+ // All drop shims have the same DefId, so we have to add the type
+ // to get unique file names.
+ let shim_disambiguator = match source.instance {
+ ty::InstanceDef::DropGlue(_, Some(ty)) => {
+ // Unfortunately, pretty-printed typed are not very filename-friendly.
+ // We dome some filtering.
+ let mut s = ".".to_owned();
+ s.extend(ty.to_string().chars().filter_map(|c| match c {
+ ' ' => None,
+ ':' | '<' | '>' => Some('_'),
+ c => Some(c),
+ }));
+ s
+ }
+ _ => String::new(),
+ };
+
+ format!(
+ "{}.{}{}{}{}.{}.{}",
+ crate_name, item_name, shim_disambiguator, promotion_id, pass_num, pass_name, disambiguator,
+ )
+}
+
+/// Returns the path to the filename where we should dump a given MIR.
+/// Also used by other bits of code (e.g., NLL inference) that dump
+/// graphviz data or other things.
+fn dump_path(tcx: TyCtxt<'_>, basename: &str, extension: &str) -> PathBuf {
+ let mut file_path = PathBuf::new();
+ file_path.push(Path::new(&tcx.sess.opts.unstable_opts.dump_mir_dir));
+
+ let file_name = format!("{}.{}", basename, extension,);
+
+ file_path.push(&file_name);
+
+ file_path
+}
+
+/// Attempts to open the MIR dump file with the given name and extension.
+fn create_dump_file_with_basename(
+ tcx: TyCtxt<'_>,
+ file_basename: &str,
+ extension: &str,
+) -> io::Result<io::BufWriter<fs::File>> {
+ let file_path = dump_path(tcx, file_basename, extension);
+ if let Some(parent) = file_path.parent() {
+ fs::create_dir_all(parent).map_err(|e| {
+ io::Error::new(
+ e.kind(),
+ format!("IO error creating MIR dump directory: {:?}; {}", parent, e),
+ )
+ })?;
+ }
+ Ok(io::BufWriter::new(fs::File::create(&file_path).map_err(|e| {
+ io::Error::new(e.kind(), format!("IO error creating MIR dump file: {:?}; {}", file_path, e))
+ })?))
+}
+
+/// Attempts to open a file where we should dump a given MIR or other
+/// bit of MIR-related data. Used by `mir-dump`, but also by other
+/// bits of code (e.g., NLL inference) that dump graphviz data or
+/// other things, and hence takes the extension as an argument.
+pub fn create_dump_file<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ extension: &str,
+ pass_num: Option<&dyn Display>,
+ pass_name: &str,
+ disambiguator: &dyn Display,
+ source: MirSource<'tcx>,
+) -> io::Result<io::BufWriter<fs::File>> {
+ create_dump_file_with_basename(
+ tcx,
+ &dump_file_basename(tcx, pass_num, pass_name, disambiguator, source),
+ extension,
+ )
+}
+
+/// Write out a human-readable textual representation for the given MIR.
+pub fn write_mir_pretty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ single: Option<DefId>,
+ w: &mut dyn Write,
+) -> io::Result<()> {
+ writeln!(w, "// WARNING: This output format is intended for human consumers only")?;
+ writeln!(w, "// and is subject to change without notice. Knock yourself out.")?;
+
+ let mut first = true;
+ for def_id in dump_mir_def_ids(tcx, single) {
+ if first {
+ first = false;
+ } else {
+ // Put empty lines between all items
+ writeln!(w)?;
+ }
+
+ let render_body = |w: &mut dyn Write, body| -> io::Result<()> {
+ write_mir_fn(tcx, body, &mut |_, _| Ok(()), w)?;
+
+ for body in tcx.promoted_mir(def_id) {
+ writeln!(w)?;
+ write_mir_fn(tcx, body, &mut |_, _| Ok(()), w)?;
+ }
+ Ok(())
+ };
+
+ // For `const fn` we want to render both the optimized MIR and the MIR for ctfe.
+ if tcx.is_const_fn_raw(def_id) {
+ render_body(w, tcx.optimized_mir(def_id))?;
+ writeln!(w)?;
+ writeln!(w, "// MIR FOR CTFE")?;
+ // Do not use `render_body`, as that would render the promoteds again, but these
+ // are shared between mir_for_ctfe and optimized_mir
+ write_mir_fn(tcx, tcx.mir_for_ctfe(def_id), &mut |_, _| Ok(()), w)?;
+ } else {
+ let instance_mir =
+ tcx.instance_mir(ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)));
+ render_body(w, instance_mir)?;
+ }
+ }
+ Ok(())
+}
+
+/// Write out a human-readable textual representation for the given function.
+pub fn write_mir_fn<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ extra_data: &mut F,
+ w: &mut dyn Write,
+) -> io::Result<()>
+where
+ F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+ write_mir_intro(tcx, body, w)?;
+ for block in body.basic_blocks().indices() {
+ extra_data(PassWhere::BeforeBlock(block), w)?;
+ write_basic_block(tcx, block, body, extra_data, w)?;
+ if block.index() + 1 != body.basic_blocks().len() {
+ writeln!(w)?;
+ }
+ }
+
+ writeln!(w, "}}")?;
+
+ write_allocations(tcx, body, w)?;
+
+ Ok(())
+}
+
+/// Write out a human-readable textual representation for the given basic block.
+pub fn write_basic_block<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ block: BasicBlock,
+ body: &Body<'tcx>,
+ extra_data: &mut F,
+ w: &mut dyn Write,
+) -> io::Result<()>
+where
+ F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+ let data = &body[block];
+
+ // Basic block label at the top.
+ let cleanup_text = if data.is_cleanup { " (cleanup)" } else { "" };
+ writeln!(w, "{}{:?}{}: {{", INDENT, block, cleanup_text)?;
+
+ // List of statements in the middle.
+ let mut current_location = Location { block, statement_index: 0 };
+ for statement in &data.statements {
+ extra_data(PassWhere::BeforeLocation(current_location), w)?;
+ let indented_body = format!("{0}{0}{1:?};", INDENT, statement);
+ writeln!(
+ w,
+ "{:A$} // {}{}",
+ indented_body,
+ if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+ comment(tcx, statement.source_info, body.span),
+ A = ALIGN,
+ )?;
+
+ write_extra(tcx, w, |visitor| {
+ visitor.visit_statement(statement, current_location);
+ })?;
+
+ extra_data(PassWhere::AfterLocation(current_location), w)?;
+
+ current_location.statement_index += 1;
+ }
+
+ // Terminator at the bottom.
+ extra_data(PassWhere::BeforeLocation(current_location), w)?;
+ let indented_terminator = format!("{0}{0}{1:?};", INDENT, data.terminator().kind);
+ writeln!(
+ w,
+ "{:A$} // {}{}",
+ indented_terminator,
+ if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+ comment(tcx, data.terminator().source_info, body.span),
+ A = ALIGN,
+ )?;
+
+ write_extra(tcx, w, |visitor| {
+ visitor.visit_terminator(data.terminator(), current_location);
+ })?;
+
+ extra_data(PassWhere::AfterLocation(current_location), w)?;
+ extra_data(PassWhere::AfterTerminator(block), w)?;
+
+ writeln!(w, "{}}}", INDENT)
+}
+
+/// After we print the main statement, we sometimes dump extra
+/// information. There's often a lot of little things "nuzzled up" in
+/// a statement.
+fn write_extra<'tcx, F>(tcx: TyCtxt<'tcx>, write: &mut dyn Write, mut visit_op: F) -> io::Result<()>
+where
+ F: FnMut(&mut ExtraComments<'tcx>),
+{
+ let mut extra_comments = ExtraComments { tcx, comments: vec![] };
+ visit_op(&mut extra_comments);
+ for comment in extra_comments.comments {
+ writeln!(write, "{:A$} // {}", "", comment, A = ALIGN)?;
+ }
+ Ok(())
+}
+
+struct ExtraComments<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ comments: Vec<String>,
+}
+
+impl<'tcx> ExtraComments<'tcx> {
+ fn push(&mut self, lines: &str) {
+ for line in lines.split('\n') {
+ self.comments.push(line.to_string());
+ }
+ }
+}
+
+fn use_verbose<'tcx>(ty: Ty<'tcx>, fn_def: bool) -> bool {
+ match *ty.kind() {
+ ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char | ty::Float(_) => false,
+ // Unit type
+ ty::Tuple(g_args) if g_args.is_empty() => false,
+ ty::Tuple(g_args) => g_args.iter().any(|g_arg| use_verbose(g_arg, fn_def)),
+ ty::Array(ty, _) => use_verbose(ty, fn_def),
+ ty::FnDef(..) => fn_def,
+ _ => true,
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
+ fn visit_constant(&mut self, constant: &Constant<'tcx>, _location: Location) {
+ let Constant { span, user_ty, literal } = constant;
+ if use_verbose(literal.ty(), true) {
+ self.push("mir::Constant");
+ self.push(&format!(
+ "+ span: {}",
+ self.tcx.sess.source_map().span_to_embeddable_string(*span)
+ ));
+ if let Some(user_ty) = user_ty {
+ self.push(&format!("+ user_ty: {:?}", user_ty));
+ }
+
+ // FIXME: this is a poor version of `pretty_print_const_value`.
+ let fmt_val = |val: &ConstValue<'tcx>| match val {
+ ConstValue::ZeroSized => format!("<ZST>"),
+ ConstValue::Scalar(s) => format!("Scalar({:?})", s),
+ ConstValue::Slice { .. } => format!("Slice(..)"),
+ ConstValue::ByRef { .. } => format!("ByRef(..)"),
+ };
+
+ let fmt_valtree = |valtree: &ty::ValTree<'tcx>| match valtree {
+ ty::ValTree::Leaf(leaf) => format!("ValTree::Leaf({:?})", leaf),
+ ty::ValTree::Branch(_) => format!("ValTree::Branch(..)"),
+ };
+
+ let val = match literal {
+ ConstantKind::Ty(ct) => match ct.kind() {
+ ty::ConstKind::Param(p) => format!("Param({})", p),
+ ty::ConstKind::Unevaluated(uv) => format!(
+ "Unevaluated({}, {:?}, {:?})",
+ self.tcx.def_path_str(uv.def.did),
+ uv.substs,
+ uv.promoted,
+ ),
+ ty::ConstKind::Value(val) => format!("Value({})", fmt_valtree(&val)),
+ ty::ConstKind::Error(_) => "Error".to_string(),
+ // These variants shouldn't exist in the MIR.
+ ty::ConstKind::Placeholder(_)
+ | ty::ConstKind::Infer(_)
+ | ty::ConstKind::Bound(..) => bug!("unexpected MIR constant: {:?}", literal),
+ },
+ // To keep the diffs small, we render this like we render `ty::Const::Value`.
+ //
+ // This changes once `ty::Const::Value` is represented using valtrees.
+ ConstantKind::Val(val, _) => format!("Value({})", fmt_val(&val)),
+ };
+
+ // This reflects what `Const` looked liked before `val` was renamed
+ // as `kind`. We print it like this to avoid having to update
+ // expected output in a lot of tests.
+ self.push(&format!("+ literal: Const {{ ty: {}, val: {} }}", literal.ty(), val));
+ }
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+ if let Rvalue::Aggregate(kind, _) = rvalue {
+ match **kind {
+ AggregateKind::Closure(def_id, substs) => {
+ self.push("closure");
+ self.push(&format!("+ def_id: {:?}", def_id));
+ self.push(&format!("+ substs: {:#?}", substs));
+ }
+
+ AggregateKind::Generator(def_id, substs, movability) => {
+ self.push("generator");
+ self.push(&format!("+ def_id: {:?}", def_id));
+ self.push(&format!("+ substs: {:#?}", substs));
+ self.push(&format!("+ movability: {:?}", movability));
+ }
+
+ AggregateKind::Adt(_, _, _, Some(user_ty), _) => {
+ self.push("adt");
+ self.push(&format!("+ user_ty: {:?}", user_ty));
+ }
+
+ _ => {}
+ }
+ }
+ }
+}
+
+fn comment(tcx: TyCtxt<'_>, SourceInfo { span, scope }: SourceInfo, function_span: Span) -> String {
+ let location = if tcx.sess.opts.unstable_opts.mir_pretty_relative_line_numbers {
+ tcx.sess.source_map().span_to_relative_line_string(span, function_span)
+ } else {
+ tcx.sess.source_map().span_to_embeddable_string(span)
+ };
+
+ format!("scope {} at {}", scope.index(), location,)
+}
+
+/// Prints local variables in a scope tree.
+fn write_scope_tree(
+ tcx: TyCtxt<'_>,
+ body: &Body<'_>,
+ scope_tree: &FxHashMap<SourceScope, Vec<SourceScope>>,
+ w: &mut dyn Write,
+ parent: SourceScope,
+ depth: usize,
+) -> io::Result<()> {
+ let indent = depth * INDENT.len();
+
+ // Local variable debuginfo.
+ for var_debug_info in &body.var_debug_info {
+ if var_debug_info.source_info.scope != parent {
+ // Not declared in this scope.
+ continue;
+ }
+
+ let indented_debug_info = format!(
+ "{0:1$}debug {2} => {3:?};",
+ INDENT, indent, var_debug_info.name, var_debug_info.value,
+ );
+
+ writeln!(
+ w,
+ "{0:1$} // in {2}",
+ indented_debug_info,
+ ALIGN,
+ comment(tcx, var_debug_info.source_info, body.span),
+ )?;
+ }
+
+ // Local variable types.
+ for (local, local_decl) in body.local_decls.iter_enumerated() {
+ if (1..body.arg_count + 1).contains(&local.index()) {
+ // Skip over argument locals, they're printed in the signature.
+ continue;
+ }
+
+ if local_decl.source_info.scope != parent {
+ // Not declared in this scope.
+ continue;
+ }
+
+ let mut_str = if local_decl.mutability == Mutability::Mut { "mut " } else { "" };
+
+ let mut indented_decl =
+ format!("{0:1$}let {2}{3:?}: {4:?}", INDENT, indent, mut_str, local, local_decl.ty);
+ if let Some(user_ty) = &local_decl.user_ty {
+ for user_ty in user_ty.projections() {
+ write!(indented_decl, " as {:?}", user_ty).unwrap();
+ }
+ }
+ indented_decl.push(';');
+
+ let local_name = if local == RETURN_PLACE { " return place" } else { "" };
+
+ writeln!(
+ w,
+ "{0:1$} //{2} in {3}",
+ indented_decl,
+ ALIGN,
+ local_name,
+ comment(tcx, local_decl.source_info, body.span),
+ )?;
+ }
+
+ let Some(children) = scope_tree.get(&parent) else {
+ return Ok(());
+ };
+
+ for &child in children {
+ let child_data = &body.source_scopes[child];
+ assert_eq!(child_data.parent_scope, Some(parent));
+
+ let (special, span) = if let Some((callee, callsite_span)) = child_data.inlined {
+ (
+ format!(
+ " (inlined {}{})",
+ if callee.def.requires_caller_location(tcx) { "#[track_caller] " } else { "" },
+ callee
+ ),
+ Some(callsite_span),
+ )
+ } else {
+ (String::new(), None)
+ };
+
+ let indented_header = format!("{0:1$}scope {2}{3} {{", "", indent, child.index(), special);
+
+ if let Some(span) = span {
+ writeln!(
+ w,
+ "{0:1$} // at {2}",
+ indented_header,
+ ALIGN,
+ tcx.sess.source_map().span_to_embeddable_string(span),
+ )?;
+ } else {
+ writeln!(w, "{}", indented_header)?;
+ }
+
+ write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
+ writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
+ }
+
+ Ok(())
+}
+
+/// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
+/// local variables (both user-defined bindings and compiler temporaries).
+pub fn write_mir_intro<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ w: &mut dyn Write,
+) -> io::Result<()> {
+ write_mir_sig(tcx, body, w)?;
+ writeln!(w, "{{")?;
+
+ // construct a scope tree and write it out
+ let mut scope_tree: FxHashMap<SourceScope, Vec<SourceScope>> = Default::default();
+ for (index, scope_data) in body.source_scopes.iter().enumerate() {
+ if let Some(parent) = scope_data.parent_scope {
+ scope_tree.entry(parent).or_default().push(SourceScope::new(index));
+ } else {
+ // Only the argument scope has no parent, because it's the root.
+ assert_eq!(index, OUTERMOST_SOURCE_SCOPE.index());
+ }
+ }
+
+ write_scope_tree(tcx, body, &scope_tree, w, OUTERMOST_SOURCE_SCOPE, 1)?;
+
+ // Add an empty line before the first block is printed.
+ writeln!(w)?;
+
+ Ok(())
+}
+
+/// Find all `AllocId`s mentioned (recursively) in the MIR body and print their corresponding
+/// allocations.
+pub fn write_allocations<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ w: &mut dyn Write,
+) -> io::Result<()> {
+ fn alloc_ids_from_alloc(
+ alloc: ConstAllocation<'_>,
+ ) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
+ alloc.inner().relocations().values().map(|id| *id)
+ }
+
+ fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
+ match val {
+ ConstValue::Scalar(interpret::Scalar::Ptr(ptr, _)) => {
+ Either::Left(Either::Left(std::iter::once(ptr.provenance)))
+ }
+ ConstValue::Scalar(interpret::Scalar::Int { .. }) => {
+ Either::Left(Either::Right(std::iter::empty()))
+ }
+ ConstValue::ZeroSized => Either::Left(Either::Right(std::iter::empty())),
+ ConstValue::ByRef { alloc, .. } | ConstValue::Slice { data: alloc, .. } => {
+ Either::Right(alloc_ids_from_alloc(alloc))
+ }
+ }
+ }
+ struct CollectAllocIds(BTreeSet<AllocId>);
+
+ impl<'tcx> Visitor<'tcx> for CollectAllocIds {
+ fn visit_constant(&mut self, c: &Constant<'tcx>, loc: Location) {
+ match c.literal {
+ ConstantKind::Ty(c) => self.visit_const(c, loc),
+ ConstantKind::Val(val, _) => {
+ self.0.extend(alloc_ids_from_const_val(val));
+ }
+ }
+ }
+ }
+
+ let mut visitor = CollectAllocIds(Default::default());
+ visitor.visit_body(body);
+
+ // `seen` contains all seen allocations, including the ones we have *not* printed yet.
+ // The protocol is to first `insert` into `seen`, and only if that returns `true`
+ // then push to `todo`.
+ let mut seen = visitor.0;
+ let mut todo: Vec<_> = seen.iter().copied().collect();
+ while let Some(id) = todo.pop() {
+ let mut write_allocation_track_relocs =
+ |w: &mut dyn Write, alloc: ConstAllocation<'tcx>| -> io::Result<()> {
+ // `.rev()` because we are popping them from the back of the `todo` vector.
+ for id in alloc_ids_from_alloc(alloc).rev() {
+ if seen.insert(id) {
+ todo.push(id);
+ }
+ }
+ write!(w, "{}", display_allocation(tcx, alloc.inner()))
+ };
+ write!(w, "\n{id:?}")?;
+ match tcx.try_get_global_alloc(id) {
+ // This can't really happen unless there are bugs, but it doesn't cost us anything to
+ // gracefully handle it and allow buggy rustc to be debugged via allocation printing.
+ None => write!(w, " (deallocated)")?,
+ Some(GlobalAlloc::Function(inst)) => write!(w, " (fn: {inst})")?,
+ Some(GlobalAlloc::VTable(ty, Some(trait_ref))) => {
+ write!(w, " (vtable: impl {trait_ref} for {ty})")?
+ }
+ Some(GlobalAlloc::VTable(ty, None)) => {
+ write!(w, " (vtable: impl <auto trait> for {ty})")?
+ }
+ Some(GlobalAlloc::Static(did)) if !tcx.is_foreign_item(did) => {
+ match tcx.eval_static_initializer(did) {
+ Ok(alloc) => {
+ write!(w, " (static: {}, ", tcx.def_path_str(did))?;
+ write_allocation_track_relocs(w, alloc)?;
+ }
+ Err(_) => write!(
+ w,
+ " (static: {}, error during initializer evaluation)",
+ tcx.def_path_str(did)
+ )?,
+ }
+ }
+ Some(GlobalAlloc::Static(did)) => {
+ write!(w, " (extern static: {})", tcx.def_path_str(did))?
+ }
+ Some(GlobalAlloc::Memory(alloc)) => {
+ write!(w, " (")?;
+ write_allocation_track_relocs(w, alloc)?
+ }
+ }
+ writeln!(w)?;
+ }
+ Ok(())
+}
+
+/// Dumps the size and metadata and content of an allocation to the given writer.
+/// The expectation is that the caller first prints other relevant metadata, so the exact
+/// format of this function is (*without* leading or trailing newline):
+///
+/// ```text
+/// size: {}, align: {}) {
+/// <bytes>
+/// }
+/// ```
+///
+/// The byte format is similar to how hex editors print bytes. Each line starts with the address of
+/// the start of the line, followed by all bytes in hex format (space separated).
+/// If the allocation is small enough to fit into a single line, no start address is given.
+/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
+/// characters or characters whose value is larger than 127) with a `.`
+/// This also prints relocations adequately.
+pub fn display_allocation<'a, 'tcx, Prov, Extra>(
+ tcx: TyCtxt<'tcx>,
+ alloc: &'a Allocation<Prov, Extra>,
+) -> RenderAllocation<'a, 'tcx, Prov, Extra> {
+ RenderAllocation { tcx, alloc }
+}
+
+#[doc(hidden)]
+pub struct RenderAllocation<'a, 'tcx, Prov, Extra> {
+ tcx: TyCtxt<'tcx>,
+ alloc: &'a Allocation<Prov, Extra>,
+}
+
+impl<'a, 'tcx, Prov: Provenance, Extra> std::fmt::Display
+ for RenderAllocation<'a, 'tcx, Prov, Extra>
+{
+ fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let RenderAllocation { tcx, alloc } = *self;
+ write!(w, "size: {}, align: {})", alloc.size().bytes(), alloc.align.bytes())?;
+ if alloc.size() == Size::ZERO {
+ // We are done.
+ return write!(w, " {{}}");
+ }
+ // Write allocation bytes.
+ writeln!(w, " {{")?;
+ write_allocation_bytes(tcx, alloc, w, " ")?;
+ write!(w, "}}")?;
+ Ok(())
+ }
+}
+
+fn write_allocation_endline(w: &mut dyn std::fmt::Write, ascii: &str) -> std::fmt::Result {
+ for _ in 0..(BYTES_PER_LINE - ascii.chars().count()) {
+ write!(w, " ")?;
+ }
+ writeln!(w, " │ {}", ascii)
+}
+
+/// Number of bytes to print per allocation hex dump line.
+const BYTES_PER_LINE: usize = 16;
+
+/// Prints the line start address and returns the new line start address.
+fn write_allocation_newline(
+ w: &mut dyn std::fmt::Write,
+ mut line_start: Size,
+ ascii: &str,
+ pos_width: usize,
+ prefix: &str,
+) -> Result<Size, std::fmt::Error> {
+ write_allocation_endline(w, ascii)?;
+ line_start += Size::from_bytes(BYTES_PER_LINE);
+ write!(w, "{}0x{:02$x} │ ", prefix, line_start.bytes(), pos_width)?;
+ Ok(line_start)
+}
+
+/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
+/// is only one line). Note that your prefix should contain a trailing space as the lines are
+/// printed directly after it.
+fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
+ tcx: TyCtxt<'tcx>,
+ alloc: &Allocation<Prov, Extra>,
+ w: &mut dyn std::fmt::Write,
+ prefix: &str,
+) -> std::fmt::Result {
+ let num_lines = alloc.size().bytes_usize().saturating_sub(BYTES_PER_LINE);
+ // Number of chars needed to represent all line numbers.
+ let pos_width = hex_number_length(alloc.size().bytes());
+
+ if num_lines > 0 {
+ write!(w, "{}0x{:02$x} │ ", prefix, 0, pos_width)?;
+ } else {
+ write!(w, "{}", prefix)?;
+ }
+
+ let mut i = Size::ZERO;
+ let mut line_start = Size::ZERO;
+
+ let ptr_size = tcx.data_layout.pointer_size;
+
+ let mut ascii = String::new();
+
+ let oversized_ptr = |target: &mut String, width| {
+ if target.len() > width {
+ write!(target, " ({} ptr bytes)", ptr_size.bytes()).unwrap();
+ }
+ };
+
+ while i < alloc.size() {
+ // The line start already has a space. While we could remove that space from the line start
+ // printing and unconditionally print a space here, that would cause the single-line case
+ // to have a single space before it, which looks weird.
+ if i != line_start {
+ write!(w, " ")?;
+ }
+ if let Some(&prov) = alloc.relocations().get(&i) {
+ // Memory with a relocation must be defined
+ assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
+ let j = i.bytes_usize();
+ let offset = alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());
+ let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
+ let offset = Size::from_bytes(offset);
+ let relocation_width = |bytes| bytes * 3;
+ let ptr = Pointer::new(prov, offset);
+ let mut target = format!("{:?}", ptr);
+ if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
+ // This is too long, try to save some space.
+ target = format!("{:#?}", ptr);
+ }
+ if ((i - line_start) + ptr_size).bytes_usize() > BYTES_PER_LINE {
+ // This branch handles the situation where a relocation starts in the current line
+ // but ends in the next one.
+ let remainder = Size::from_bytes(BYTES_PER_LINE) - (i - line_start);
+ let overflow = ptr_size - remainder;
+ let remainder_width = relocation_width(remainder.bytes_usize()) - 2;
+ let overflow_width = relocation_width(overflow.bytes_usize() - 1) + 1;
+ ascii.push('╾');
+ for _ in 0..remainder.bytes() - 1 {
+ ascii.push('─');
+ }
+ if overflow_width > remainder_width && overflow_width >= target.len() {
+ // The case where the relocation fits into the part in the next line
+ write!(w, "╾{0:─^1$}", "", remainder_width)?;
+ line_start =
+ write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+ ascii.clear();
+ write!(w, "{0:─^1$}╼", target, overflow_width)?;
+ } else {
+ oversized_ptr(&mut target, remainder_width);
+ write!(w, "╾{0:─^1$}", target, remainder_width)?;
+ line_start =
+ write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+ write!(w, "{0:─^1$}╼", "", overflow_width)?;
+ ascii.clear();
+ }
+ for _ in 0..overflow.bytes() - 1 {
+ ascii.push('─');
+ }
+ ascii.push('╼');
+ i += ptr_size;
+ continue;
+ } else {
+ // This branch handles a relocation that starts and ends in the current line.
+ let relocation_width = relocation_width(ptr_size.bytes_usize() - 1);
+ oversized_ptr(&mut target, relocation_width);
+ ascii.push('╾');
+ write!(w, "╾{0:─^1$}╼", target, relocation_width)?;
+ for _ in 0..ptr_size.bytes() - 2 {
+ ascii.push('─');
+ }
+ ascii.push('╼');
+ i += ptr_size;
+ }
+ } else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
+ let j = i.bytes_usize();
+
+ // Checked definedness (and thus range) and relocations. This access also doesn't
+ // influence interpreter execution but is only for debugging.
+ let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
+ write!(w, "{:02x}", c)?;
+ if c.is_ascii_control() || c >= 0x80 {
+ ascii.push('.');
+ } else {
+ ascii.push(char::from(c));
+ }
+ i += Size::from_bytes(1);
+ } else {
+ write!(w, "__")?;
+ ascii.push('░');
+ i += Size::from_bytes(1);
+ }
+ // Print a new line header if the next line still has some bytes to print.
+ if i == line_start + Size::from_bytes(BYTES_PER_LINE) && i != alloc.size() {
+ line_start = write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+ ascii.clear();
+ }
+ }
+ write_allocation_endline(w, &ascii)?;
+
+ Ok(())
+}
+
+fn write_mir_sig(tcx: TyCtxt<'_>, body: &Body<'_>, w: &mut dyn Write) -> io::Result<()> {
+ use rustc_hir::def::DefKind;
+
+ trace!("write_mir_sig: {:?}", body.source.instance);
+ let def_id = body.source.def_id();
+ let kind = tcx.def_kind(def_id);
+ let is_function = match kind {
+ DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..) => true,
+ _ => tcx.is_closure(def_id),
+ };
+ match (kind, body.source.promoted) {
+ (_, Some(i)) => write!(w, "{:?} in ", i)?,
+ (DefKind::Const | DefKind::AssocConst, _) => write!(w, "const ")?,
+ (DefKind::Static(hir::Mutability::Not), _) => write!(w, "static ")?,
+ (DefKind::Static(hir::Mutability::Mut), _) => write!(w, "static mut ")?,
+ (_, _) if is_function => write!(w, "fn ")?,
+ (DefKind::AnonConst | DefKind::InlineConst, _) => {} // things like anon const, not an item
+ _ => bug!("Unexpected def kind {:?}", kind),
+ }
+
+ ty::print::with_forced_impl_filename_line! {
+ // see notes on #41697 elsewhere
+ write!(w, "{}", tcx.def_path_str(def_id))?
+ }
+
+ if body.source.promoted.is_none() && is_function {
+ write!(w, "(")?;
+
+ // fn argument types.
+ for (i, arg) in body.args_iter().enumerate() {
+ if i != 0 {
+ write!(w, ", ")?;
+ }
+ write!(w, "{:?}: {}", Place::from(arg), body.local_decls[arg].ty)?;
+ }
+
+ write!(w, ") -> {}", body.return_ty())?;
+ } else {
+ assert_eq!(body.arg_count, 0);
+ write!(w, ": {} =", body.return_ty())?;
+ }
+
+ if let Some(yield_ty) = body.yield_ty() {
+ writeln!(w)?;
+ writeln!(w, "yields {}", yield_ty)?;
+ }
+
+ write!(w, " ")?;
+ // Next thing that gets printed is the opening {
+
+ Ok(())
+}
+
+fn write_user_type_annotations(
+ tcx: TyCtxt<'_>,
+ body: &Body<'_>,
+ w: &mut dyn Write,
+) -> io::Result<()> {
+ if !body.user_type_annotations.is_empty() {
+ writeln!(w, "| User Type Annotations")?;
+ }
+ for (index, annotation) in body.user_type_annotations.iter_enumerated() {
+ writeln!(
+ w,
+ "| {:?}: user_ty: {:?}, span: {}, inferred_ty: {:?}",
+ index.index(),
+ annotation.user_ty,
+ tcx.sess.source_map().span_to_embeddable_string(annotation.span),
+ annotation.inferred_ty,
+ )?;
+ }
+ if !body.user_type_annotations.is_empty() {
+ writeln!(w, "|")?;
+ }
+ Ok(())
+}
+
+pub fn dump_mir_def_ids(tcx: TyCtxt<'_>, single: Option<DefId>) -> Vec<DefId> {
+ if let Some(i) = single {
+ vec![i]
+ } else {
+ tcx.mir_keys(()).iter().map(|def_id| def_id.to_def_id()).collect()
+ }
+}
+
+/// Calc converted u64 decimal into hex and return it's length in chars
+///
+/// ```ignore (cannot-test-private-function)
+/// assert_eq!(1, hex_number_length(0));
+/// assert_eq!(1, hex_number_length(1));
+/// assert_eq!(2, hex_number_length(16));
+/// ```
+fn hex_number_length(x: u64) -> usize {
+ if x == 0 {
+ return 1;
+ }
+ let mut length = 0;
+ let mut x_left = x;
+ while x_left > 0 {
+ x_left /= 16;
+ length += 1;
+ }
+ length
+}
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
new file mode 100644
index 000000000..dd9f8795f
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -0,0 +1,476 @@
+//! Values computed by queries that use MIR.
+
+use crate::mir::{Body, ConstantKind, Promoted};
+use crate::ty::{self, OpaqueHiddenType, Ty, TyCtxt};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::vec_map::VecMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_index::bit_set::BitMatrix;
+use rustc_index::vec::IndexVec;
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+use smallvec::SmallVec;
+use std::cell::Cell;
+use std::fmt::{self, Debug};
+
+use super::{Field, SourceInfo};
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum UnsafetyViolationKind {
+ /// Unsafe operation outside `unsafe`.
+ General,
+ /// Unsafe operation in an `unsafe fn` but outside an `unsafe` block.
+ /// Has to be handled as a lint for backwards compatibility.
+ UnsafeFn,
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum UnsafetyViolationDetails {
+ CallToUnsafeFunction,
+ UseOfInlineAssembly,
+ InitializingTypeWith,
+ CastOfPointerToInt,
+ UseOfMutableStatic,
+ UseOfExternStatic,
+ DerefOfRawPointer,
+ AccessToUnionField,
+ MutationOfLayoutConstrainedField,
+ BorrowOfLayoutConstrainedField,
+ CallToFunctionWith,
+}
+
+impl UnsafetyViolationDetails {
+ pub fn description_and_note(&self) -> (&'static str, &'static str) {
+ use UnsafetyViolationDetails::*;
+ match self {
+ CallToUnsafeFunction => (
+ "call to unsafe function",
+ "consult the function's documentation for information on how to avoid undefined \
+ behavior",
+ ),
+ UseOfInlineAssembly => (
+ "use of inline assembly",
+ "inline assembly is entirely unchecked and can cause undefined behavior",
+ ),
+ InitializingTypeWith => (
+ "initializing type with `rustc_layout_scalar_valid_range` attr",
+ "initializing a layout restricted type's field with a value outside the valid \
+ range is undefined behavior",
+ ),
+ CastOfPointerToInt => {
+ ("cast of pointer to int", "casting pointers to integers in constants")
+ }
+ UseOfMutableStatic => (
+ "use of mutable static",
+ "mutable statics can be mutated by multiple threads: aliasing violations or data \
+ races will cause undefined behavior",
+ ),
+ UseOfExternStatic => (
+ "use of extern static",
+ "extern statics are not controlled by the Rust type system: invalid data, \
+ aliasing violations or data races will cause undefined behavior",
+ ),
+ DerefOfRawPointer => (
+ "dereference of raw pointer",
+ "raw pointers may be null, dangling or unaligned; they can violate aliasing rules \
+ and cause data races: all of these are undefined behavior",
+ ),
+ AccessToUnionField => (
+ "access to union field",
+ "the field may not be properly initialized: using uninitialized data will cause \
+ undefined behavior",
+ ),
+ MutationOfLayoutConstrainedField => (
+ "mutation of layout constrained field",
+ "mutating layout constrained fields cannot statically be checked for valid values",
+ ),
+ BorrowOfLayoutConstrainedField => (
+ "borrow of layout constrained field with interior mutability",
+ "references to fields of layout constrained fields lose the constraints. Coupled \
+ with interior mutability, the field can be changed to invalid values",
+ ),
+ CallToFunctionWith => (
+ "call to function with `#[target_feature]`",
+ "can only be called if the required target features are available",
+ ),
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub struct UnsafetyViolation {
+ pub source_info: SourceInfo,
+ pub lint_root: hir::HirId,
+ pub kind: UnsafetyViolationKind,
+ pub details: UnsafetyViolationDetails,
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum UnusedUnsafe {
+ /// `unsafe` block contains no unsafe operations
+ /// > ``unnecessary `unsafe` block``
+ Unused,
+ /// `unsafe` block nested under another (used) `unsafe` block
+ /// > ``… because it's nested under this `unsafe` block``
+ InUnsafeBlock(hir::HirId),
+ /// `unsafe` block nested under `unsafe fn`
+ /// > ``… because it's nested under this `unsafe fn` ``
+ ///
+ /// the second HirId here indicates the first usage of the `unsafe` block,
+ /// which allows retrieval of the LintLevelSource for why that operation would
+ /// have been permitted without the block
+ InUnsafeFn(hir::HirId, hir::HirId),
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum UsedUnsafeBlockData {
+ SomeDisallowedInUnsafeFn,
+ // the HirId here indicates the first usage of the `unsafe` block
+ // (i.e. the one that's first encountered in the MIR traversal of the unsafety check)
+ AllAllowedInUnsafeFn(hir::HirId),
+}
+
+#[derive(TyEncodable, TyDecodable, HashStable, Debug)]
+pub struct UnsafetyCheckResult {
+ /// Violations that are propagated *upwards* from this function.
+ pub violations: Vec<UnsafetyViolation>,
+
+ /// Used `unsafe` blocks in this function. This is used for the "unused_unsafe" lint.
+ ///
+ /// The keys are the used `unsafe` blocks, the UnusedUnsafeKind indicates whether
+ /// or not any of the usages happen at a place that doesn't allow `unsafe_op_in_unsafe_fn`.
+ pub used_unsafe_blocks: FxHashMap<hir::HirId, UsedUnsafeBlockData>,
+
+ /// This is `Some` iff the item is not a closure.
+ pub unused_unsafes: Option<Vec<(hir::HirId, UnusedUnsafe)>>,
+}
+
+rustc_index::newtype_index! {
+ pub struct GeneratorSavedLocal {
+ derive [HashStable]
+ DEBUG_FORMAT = "_{}",
+ }
+}
+
+/// The layout of generator state.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct GeneratorLayout<'tcx> {
+ /// The type of every local stored inside the generator.
+ pub field_tys: IndexVec<GeneratorSavedLocal, Ty<'tcx>>,
+
+ /// Which of the above fields are in each variant. Note that one field may
+ /// be stored in multiple variants.
+ pub variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>>,
+
+ /// The source that led to each variant being created (usually, a yield or
+ /// await).
+ pub variant_source_info: IndexVec<VariantIdx, SourceInfo>,
+
+ /// Which saved locals are storage-live at the same time. Locals that do not
+ /// have conflicts with each other are allowed to overlap in the computed
+ /// layout.
+ pub storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+}
+
+impl Debug for GeneratorLayout<'_> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// Prints an iterator of (key, value) tuples as a map.
+ struct MapPrinter<'a, K, V>(Cell<Option<Box<dyn Iterator<Item = (K, V)> + 'a>>>);
+ impl<'a, K, V> MapPrinter<'a, K, V> {
+ fn new(iter: impl Iterator<Item = (K, V)> + 'a) -> Self {
+ Self(Cell::new(Some(Box::new(iter))))
+ }
+ }
+ impl<'a, K: Debug, V: Debug> Debug for MapPrinter<'a, K, V> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().entries(self.0.take().unwrap()).finish()
+ }
+ }
+
+ /// Prints the generator variant name.
+ struct GenVariantPrinter(VariantIdx);
+ impl From<VariantIdx> for GenVariantPrinter {
+ fn from(idx: VariantIdx) -> Self {
+ GenVariantPrinter(idx)
+ }
+ }
+ impl Debug for GenVariantPrinter {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let variant_name = ty::GeneratorSubsts::variant_name(self.0);
+ if fmt.alternate() {
+ write!(fmt, "{:9}({:?})", variant_name, self.0)
+ } else {
+ write!(fmt, "{}", variant_name)
+ }
+ }
+ }
+
+ /// Forces its contents to print in regular mode instead of alternate mode.
+ struct OneLinePrinter<T>(T);
+ impl<T: Debug> Debug for OneLinePrinter<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{:?}", self.0)
+ }
+ }
+
+ fmt.debug_struct("GeneratorLayout")
+ .field("field_tys", &MapPrinter::new(self.field_tys.iter_enumerated()))
+ .field(
+ "variant_fields",
+ &MapPrinter::new(
+ self.variant_fields
+ .iter_enumerated()
+ .map(|(k, v)| (GenVariantPrinter(k), OneLinePrinter(v))),
+ ),
+ )
+ .field("storage_conflicts", &self.storage_conflicts)
+ .finish()
+ }
+}
+
+#[derive(Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct BorrowCheckResult<'tcx> {
+ /// All the opaque types that are restricted to concrete types
+ /// by this function. Unlike the value in `TypeckResults`, this has
+ /// unerased regions.
+ pub concrete_opaque_types: VecMap<LocalDefId, OpaqueHiddenType<'tcx>>,
+ pub closure_requirements: Option<ClosureRegionRequirements<'tcx>>,
+ pub used_mut_upvars: SmallVec<[Field; 8]>,
+ pub tainted_by_errors: Option<ErrorGuaranteed>,
+}
+
+/// The result of the `mir_const_qualif` query.
+///
+/// Each field (except `error_occurred`) corresponds to an implementer of the `Qualif` trait in
+/// `rustc_const_eval/src/transform/check_consts/qualifs.rs`. See that file for more information on each
+/// `Qualif`.
+#[derive(Clone, Copy, Debug, Default, TyEncodable, TyDecodable, HashStable)]
+pub struct ConstQualifs {
+ pub has_mut_interior: bool,
+ pub needs_drop: bool,
+ pub needs_non_const_drop: bool,
+ pub custom_eq: bool,
+ pub tainted_by_errors: Option<ErrorGuaranteed>,
+}
+
+/// After we borrow check a closure, we are left with various
+/// requirements that we have inferred between the free regions that
+/// appear in the closure's signature or on its field types. These
+/// requirements are then verified and proved by the closure's
+/// creating function. This struct encodes those requirements.
+///
+/// The requirements are listed as being between various `RegionVid`. The 0th
+/// region refers to `'static`; subsequent region vids refer to the free
+/// regions that appear in the closure (or generator's) type, in order of
+/// appearance. (This numbering is actually defined by the `UniversalRegions`
+/// struct in the NLL region checker. See for example
+/// `UniversalRegions::closure_mapping`.) Note the free regions in the
+/// closure's signature and captures are erased.
+///
+/// Example: If type check produces a closure with the closure substs:
+///
+/// ```text
+/// ClosureSubsts = [
+/// 'a, // From the parent.
+/// 'b,
+/// i8, // the "closure kind"
+/// for<'x> fn(&'<erased> &'x u32) -> &'x u32, // the "closure signature"
+/// &'<erased> String, // some upvar
+/// ]
+/// ```
+///
+/// We would "renumber" each free region to a unique vid, as follows:
+///
+/// ```text
+/// ClosureSubsts = [
+/// '1, // From the parent.
+/// '2,
+/// i8, // the "closure kind"
+/// for<'x> fn(&'3 &'x u32) -> &'x u32, // the "closure signature"
+/// &'4 String, // some upvar
+/// ]
+/// ```
+///
+/// Now the code might impose a requirement like `'1: '2`. When an
+/// instance of the closure is created, the corresponding free regions
+/// can be extracted from its type and constrained to have the given
+/// outlives relationship.
+///
+/// In some cases, we have to record outlives requirements between types and
+/// regions as well. In that case, if those types include any regions, those
+/// regions are recorded using their external names (`ReStatic`,
+/// `ReEarlyBound`, `ReFree`). We use these because in a query response we
+/// cannot use `ReVar` (which is what we use internally within the rest of the
+/// NLL code).
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct ClosureRegionRequirements<'tcx> {
+ /// The number of external regions defined on the closure. In our
+ /// example above, it would be 3 -- one for `'static`, then `'1`
+ /// and `'2`. This is just used for a sanity check later on, to
+ /// make sure that the number of regions we see at the callsite
+ /// matches.
+ pub num_external_vids: usize,
+
+ /// Requirements between the various free regions defined in
+ /// indices.
+ pub outlives_requirements: Vec<ClosureOutlivesRequirement<'tcx>>,
+}
+
+/// Indicates an outlives-constraint between a type or between two
+/// free regions declared on the closure.
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct ClosureOutlivesRequirement<'tcx> {
+ // This region or type ...
+ pub subject: ClosureOutlivesSubject<'tcx>,
+
+ // ... must outlive this one.
+ pub outlived_free_region: ty::RegionVid,
+
+ // If not, report an error here ...
+ pub blame_span: Span,
+
+ // ... due to this reason.
+ pub category: ConstraintCategory<'tcx>,
+}
+
+// Make sure this enum doesn't unintentionally grow
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(ConstraintCategory<'_>, 16);
+
+/// Outlives-constraints can be categorized to determine whether and why they
+/// are interesting (for error reporting). Order of variants indicates sort
+/// order of the category, thereby influencing diagnostic output.
+///
+/// See also `rustc_const_eval::borrow_check::constraints`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ConstraintCategory<'tcx> {
+ Return(ReturnConstraint),
+ Yield,
+ UseAsConst,
+ UseAsStatic,
+ TypeAnnotation,
+ Cast,
+
+ /// A constraint that came from checking the body of a closure.
+ ///
+ /// We try to get the category that the closure used when reporting this.
+ ClosureBounds,
+
+ /// Contains the function type if available.
+ CallArgument(Option<Ty<'tcx>>),
+ CopyBound,
+ SizedBound,
+ Assignment,
+ /// A constraint that came from a usage of a variable (e.g. in an ADT expression
+ /// like `Foo { field: my_val }`)
+ Usage,
+ OpaqueType,
+ ClosureUpvar(Field),
+
+ /// A constraint from a user-written predicate
+ /// with the provided span, written on the item
+ /// with the given `DefId`
+ Predicate(Span),
+
+ /// A "boring" constraint (caused by the given location) is one that
+ /// the user probably doesn't want to see described in diagnostics,
+ /// because it is kind of an artifact of the type system setup.
+ Boring,
+ // Boring and applicable everywhere.
+ BoringNoLocation,
+
+ /// A constraint that doesn't correspond to anything the user sees.
+ Internal,
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ReturnConstraint {
+ Normal,
+ ClosureUpvar(Field),
+}
+
+/// The subject of a `ClosureOutlivesRequirement` -- that is, the thing
+/// that must outlive some region.
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum ClosureOutlivesSubject<'tcx> {
+ /// Subject is a type, typically a type parameter, but could also
+ /// be a projection. Indicates a requirement like `T: 'a` being
+ /// passed to the caller, where the type here is `T`.
+ ///
+ /// The type here is guaranteed not to contain any free regions at
+ /// present.
+ Ty(Ty<'tcx>),
+
+ /// Subject is a free region from the closure. Indicates a requirement
+ /// like `'a: 'b` being passed to the caller; the region here is `'a`.
+ Region(ty::RegionVid),
+}
+
+/// The constituent parts of a type level constant of kind ADT or array.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct DestructuredConst<'tcx> {
+ pub variant: Option<VariantIdx>,
+ pub fields: &'tcx [ty::Const<'tcx>],
+}
+
+/// The constituent parts of a mir constant of kind ADT or array.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct DestructuredMirConstant<'tcx> {
+ pub variant: Option<VariantIdx>,
+ pub fields: &'tcx [ConstantKind<'tcx>],
+}
+
+/// Coverage information summarized from a MIR if instrumented for source code coverage (see
+/// compiler option `-Cinstrument-coverage`). This information is generated by the
+/// `InstrumentCoverage` MIR pass and can be retrieved via the `coverageinfo` query.
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct CoverageInfo {
+ /// The total number of coverage region counters added to the MIR `Body`.
+ pub num_counters: u32,
+
+ /// The total number of coverage region counter expressions added to the MIR `Body`.
+ pub num_expressions: u32,
+}
+
+/// Shims which make dealing with `WithOptConstParam` easier.
+///
+/// For more information on why this is needed, consider looking
+/// at the docs for `WithOptConstParam` itself.
+impl<'tcx> TyCtxt<'tcx> {
+ #[inline]
+ pub fn mir_const_qualif_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<LocalDefId>,
+ ) -> ConstQualifs {
+ if let Some(param_did) = def.const_param_did {
+ self.mir_const_qualif_const_arg((def.did, param_did))
+ } else {
+ self.mir_const_qualif(def.did)
+ }
+ }
+
+ #[inline]
+ pub fn promoted_mir_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<DefId>,
+ ) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
+ if let Some((did, param_did)) = def.as_const_arg() {
+ self.promoted_mir_of_const_arg((did, param_did))
+ } else {
+ self.promoted_mir(def.did)
+ }
+ }
+
+ #[inline]
+ pub fn mir_for_ctfe_opt_const_arg(self, def: ty::WithOptConstParam<DefId>) -> &'tcx Body<'tcx> {
+ if let Some((did, param_did)) = def.as_const_arg() {
+ self.mir_for_ctfe_of_const_arg((did, param_did))
+ } else {
+ self.mir_for_ctfe(def.did)
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs
new file mode 100644
index 000000000..4418b848e
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/spanview.rs
@@ -0,0 +1,691 @@
+use rustc_hir::def_id::DefId;
+use rustc_middle::hir;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::MirSpanview;
+use rustc_span::{BytePos, Pos, Span, SyntaxContext};
+
+use std::cmp;
+use std::io::{self, Write};
+
+pub const TOOLTIP_INDENT: &str = " ";
+
+const CARET: char = '\u{2038}'; // Unicode `CARET`
+const ANNOTATION_LEFT_BRACKET: char = '\u{298a}'; // Unicode `Z NOTATION RIGHT BINDING BRACKET
+const ANNOTATION_RIGHT_BRACKET: char = '\u{2989}'; // Unicode `Z NOTATION LEFT BINDING BRACKET`
+const NEW_LINE_SPAN: &str = "</span>\n<span class=\"line\">";
+const HEADER: &str = r#"<!DOCTYPE html>
+<html>
+<head>"#;
+const START_BODY: &str = r#"</head>
+<body>"#;
+const FOOTER: &str = r#"</body>
+</html>"#;
+
+const STYLE_SECTION: &str = r#"<style>
+ .line {
+ counter-increment: line;
+ }
+ .line:before {
+ content: counter(line) ": ";
+ font-family: Menlo, Monaco, monospace;
+ font-style: italic;
+ width: 3.8em;
+ display: inline-block;
+ text-align: right;
+ filter: opacity(50%);
+ -webkit-user-select: none;
+ }
+ .code {
+ color: #dddddd;
+ background-color: #222222;
+ font-family: Menlo, Monaco, monospace;
+ line-height: 1.4em;
+ border-bottom: 2px solid #222222;
+ white-space: pre;
+ display: inline-block;
+ }
+ .odd {
+ background-color: #55bbff;
+ color: #223311;
+ }
+ .even {
+ background-color: #ee7756;
+ color: #551133;
+ }
+ .code {
+ --index: calc(var(--layer) - 1);
+ padding-top: calc(var(--index) * 0.15em);
+ filter:
+ hue-rotate(calc(var(--index) * 25deg))
+ saturate(calc(100% - (var(--index) * 2%)))
+ brightness(calc(100% - (var(--index) * 1.5%)));
+ }
+ .annotation {
+ color: #4444ff;
+ font-family: monospace;
+ font-style: italic;
+ display: none;
+ -webkit-user-select: none;
+ }
+ body:active .annotation {
+ /* requires holding mouse down anywhere on the page */
+ display: inline-block;
+ }
+ span:hover .annotation {
+ /* requires hover over a span ONLY on its first line */
+ display: inline-block;
+ }
+</style>"#;
+
+/// Metadata to highlight the span of a MIR BasicBlock, Statement, or Terminator.
+#[derive(Clone, Debug)]
+pub struct SpanViewable {
+ pub bb: BasicBlock,
+ pub span: Span,
+ pub id: String,
+ pub tooltip: String,
+}
+
+/// Write a spanview HTML+CSS file to analyze MIR element spans.
+pub fn write_mir_fn_spanview<'tcx, W>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ spanview: MirSpanview,
+ title: &str,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ let def_id = body.source.def_id();
+ let hir_body = hir_body(tcx, def_id);
+ if hir_body.is_none() {
+ return Ok(());
+ }
+ let body_span = hir_body.unwrap().value.span;
+ let mut span_viewables = Vec::new();
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
+ match spanview {
+ MirSpanview::Statement => {
+ for (i, statement) in data.statements.iter().enumerate() {
+ if let Some(span_viewable) =
+ statement_span_viewable(tcx, body_span, bb, i, statement)
+ {
+ span_viewables.push(span_viewable);
+ }
+ }
+ if let Some(span_viewable) = terminator_span_viewable(tcx, body_span, bb, data) {
+ span_viewables.push(span_viewable);
+ }
+ }
+ MirSpanview::Terminator => {
+ if let Some(span_viewable) = terminator_span_viewable(tcx, body_span, bb, data) {
+ span_viewables.push(span_viewable);
+ }
+ }
+ MirSpanview::Block => {
+ if let Some(span_viewable) = block_span_viewable(tcx, body_span, bb, data) {
+ span_viewables.push(span_viewable);
+ }
+ }
+ }
+ }
+ write_document(tcx, fn_span(tcx, def_id), span_viewables, title, w)?;
+ Ok(())
+}
+
+/// Generate a spanview HTML+CSS document for the given local function `def_id`, and a pre-generated
+/// list `SpanViewable`s.
+pub fn write_document<'tcx, W>(
+ tcx: TyCtxt<'tcx>,
+ spanview_span: Span,
+ mut span_viewables: Vec<SpanViewable>,
+ title: &str,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ let mut from_pos = spanview_span.lo();
+ let end_pos = spanview_span.hi();
+ let source_map = tcx.sess.source_map();
+ let start = source_map.lookup_char_pos(from_pos);
+ let indent_to_initial_start_col = " ".repeat(start.col.to_usize());
+ debug!(
+ "spanview_span={:?}; source is:\n{}{}",
+ spanview_span,
+ indent_to_initial_start_col,
+ source_map.span_to_snippet(spanview_span).expect("function should have printable source")
+ );
+ writeln!(w, "{}", HEADER)?;
+ writeln!(w, "<title>{}</title>", title)?;
+ writeln!(w, "{}", STYLE_SECTION)?;
+ writeln!(w, "{}", START_BODY)?;
+ write!(
+ w,
+ r#"<div class="code" style="counter-reset: line {}"><span class="line">{}"#,
+ start.line - 1,
+ indent_to_initial_start_col,
+ )?;
+ span_viewables.sort_unstable_by(|a, b| {
+ let a = a.span;
+ let b = b.span;
+ if a.lo() == b.lo() {
+ // Sort hi() in reverse order so shorter spans are attempted after longer spans.
+ // This should give shorter spans a higher "layer", so they are not covered by
+ // the longer spans.
+ b.hi().partial_cmp(&a.hi())
+ } else {
+ a.lo().partial_cmp(&b.lo())
+ }
+ .unwrap()
+ });
+ let mut ordered_viewables = &span_viewables[..];
+ const LOWEST_VIEWABLE_LAYER: usize = 1;
+ let mut alt = false;
+ while ordered_viewables.len() > 0 {
+ debug!(
+ "calling write_next_viewable with from_pos={}, end_pos={}, and viewables len={}",
+ from_pos.to_usize(),
+ end_pos.to_usize(),
+ ordered_viewables.len()
+ );
+ let curr_id = &ordered_viewables[0].id;
+ let (next_from_pos, next_ordered_viewables) = write_next_viewable_with_overlaps(
+ tcx,
+ from_pos,
+ end_pos,
+ ordered_viewables,
+ alt,
+ LOWEST_VIEWABLE_LAYER,
+ w,
+ )?;
+ debug!(
+ "DONE calling write_next_viewable, with new from_pos={}, \
+ and remaining viewables len={}",
+ next_from_pos.to_usize(),
+ next_ordered_viewables.len()
+ );
+ assert!(
+ from_pos != next_from_pos || ordered_viewables.len() != next_ordered_viewables.len(),
+ "write_next_viewable_with_overlaps() must make a state change"
+ );
+ from_pos = next_from_pos;
+ if next_ordered_viewables.len() != ordered_viewables.len() {
+ ordered_viewables = next_ordered_viewables;
+ if let Some(next_ordered_viewable) = ordered_viewables.first() {
+ if &next_ordered_viewable.id != curr_id {
+ alt = !alt;
+ }
+ }
+ }
+ }
+ if from_pos < end_pos {
+ write_coverage_gap(tcx, from_pos, end_pos, w)?;
+ }
+ writeln!(w, r#"</span></div>"#)?;
+ writeln!(w, "{}", FOOTER)?;
+ Ok(())
+}
+
+/// Format a string showing the start line and column, and end line and column within a file.
+pub fn source_range_no_file<'tcx>(tcx: TyCtxt<'tcx>, span: Span) -> String {
+ let source_map = tcx.sess.source_map();
+ let start = source_map.lookup_char_pos(span.lo());
+ let end = source_map.lookup_char_pos(span.hi());
+ format!("{}:{}-{}:{}", start.line, start.col.to_usize() + 1, end.line, end.col.to_usize() + 1)
+}
+
+pub fn statement_kind_name(statement: &Statement<'_>) -> &'static str {
+ use StatementKind::*;
+ match statement.kind {
+ Assign(..) => "Assign",
+ FakeRead(..) => "FakeRead",
+ SetDiscriminant { .. } => "SetDiscriminant",
+ Deinit(..) => "Deinit",
+ StorageLive(..) => "StorageLive",
+ StorageDead(..) => "StorageDead",
+ Retag(..) => "Retag",
+ AscribeUserType(..) => "AscribeUserType",
+ Coverage(..) => "Coverage",
+ CopyNonOverlapping(..) => "CopyNonOverlapping",
+ Nop => "Nop",
+ }
+}
+
+pub fn terminator_kind_name(term: &Terminator<'_>) -> &'static str {
+ use TerminatorKind::*;
+ match term.kind {
+ Goto { .. } => "Goto",
+ SwitchInt { .. } => "SwitchInt",
+ Resume => "Resume",
+ Abort => "Abort",
+ Return => "Return",
+ Unreachable => "Unreachable",
+ Drop { .. } => "Drop",
+ DropAndReplace { .. } => "DropAndReplace",
+ Call { .. } => "Call",
+ Assert { .. } => "Assert",
+ Yield { .. } => "Yield",
+ GeneratorDrop => "GeneratorDrop",
+ FalseEdge { .. } => "FalseEdge",
+ FalseUnwind { .. } => "FalseUnwind",
+ InlineAsm { .. } => "InlineAsm",
+ }
+}
+
+fn statement_span_viewable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body_span: Span,
+ bb: BasicBlock,
+ i: usize,
+ statement: &Statement<'tcx>,
+) -> Option<SpanViewable> {
+ let span = statement.source_info.span;
+ if !body_span.contains(span) {
+ return None;
+ }
+ let id = format!("{}[{}]", bb.index(), i);
+ let tooltip = tooltip(tcx, &id, span, vec![statement.clone()], &None);
+ Some(SpanViewable { bb, span, id, tooltip })
+}
+
+fn terminator_span_viewable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body_span: Span,
+ bb: BasicBlock,
+ data: &BasicBlockData<'tcx>,
+) -> Option<SpanViewable> {
+ let term = data.terminator();
+ let span = term.source_info.span;
+ if !body_span.contains(span) {
+ return None;
+ }
+ let id = format!("{}:{}", bb.index(), terminator_kind_name(term));
+ let tooltip = tooltip(tcx, &id, span, vec![], &data.terminator);
+ Some(SpanViewable { bb, span, id, tooltip })
+}
+
+fn block_span_viewable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body_span: Span,
+ bb: BasicBlock,
+ data: &BasicBlockData<'tcx>,
+) -> Option<SpanViewable> {
+ let span = compute_block_span(data, body_span);
+ if !body_span.contains(span) {
+ return None;
+ }
+ let id = format!("{}", bb.index());
+ let tooltip = tooltip(tcx, &id, span, data.statements.clone(), &data.terminator);
+ Some(SpanViewable { bb, span, id, tooltip })
+}
+
+fn compute_block_span<'tcx>(data: &BasicBlockData<'tcx>, body_span: Span) -> Span {
+ let mut span = data.terminator().source_info.span;
+ for statement_span in data.statements.iter().map(|statement| statement.source_info.span) {
+ // Only combine Spans from the root context, and within the function's body_span.
+ if statement_span.ctxt() == SyntaxContext::root() && body_span.contains(statement_span) {
+ span = span.to(statement_span);
+ }
+ }
+ span
+}
+
+/// Recursively process each ordered span. Spans that overlap will have progressively varying
+/// styles, such as increased padding for each overlap. Non-overlapping adjacent spans will
+/// have alternating style choices, to help distinguish between them if, visually adjacent.
+/// The `layer` is incremented for each overlap, and the `alt` bool alternates between true
+/// and false, for each adjacent non-overlapping span. Source code between the spans (code
+/// that is not in any coverage region) has neutral styling.
+fn write_next_viewable_with_overlaps<'tcx, 'b, W>(
+ tcx: TyCtxt<'tcx>,
+ mut from_pos: BytePos,
+ mut to_pos: BytePos,
+ ordered_viewables: &'b [SpanViewable],
+ alt: bool,
+ layer: usize,
+ w: &mut W,
+) -> io::Result<(BytePos, &'b [SpanViewable])>
+where
+ W: Write,
+{
+ let debug_indent = " ".repeat(layer);
+ let (viewable, mut remaining_viewables) =
+ ordered_viewables.split_first().expect("ordered_viewables should have some");
+
+ if from_pos < viewable.span.lo() {
+ debug!(
+ "{}advance from_pos to next SpanViewable (from from_pos={} to viewable.span.lo()={} \
+ of {:?}), with to_pos={}",
+ debug_indent,
+ from_pos.to_usize(),
+ viewable.span.lo().to_usize(),
+ viewable.span,
+ to_pos.to_usize()
+ );
+ let hi = cmp::min(viewable.span.lo(), to_pos);
+ write_coverage_gap(tcx, from_pos, hi, w)?;
+ from_pos = hi;
+ if from_pos < viewable.span.lo() {
+ debug!(
+ "{}EARLY RETURN: stopped before getting to next SpanViewable, at {}",
+ debug_indent,
+ from_pos.to_usize()
+ );
+ return Ok((from_pos, ordered_viewables));
+ }
+ }
+
+ if from_pos < viewable.span.hi() {
+ // Set to_pos to the end of this `viewable` to ensure the recursive calls stop writing
+ // with room to print the tail.
+ to_pos = cmp::min(viewable.span.hi(), to_pos);
+ debug!(
+ "{}update to_pos (if not closer) to viewable.span.hi()={}; to_pos is now {}",
+ debug_indent,
+ viewable.span.hi().to_usize(),
+ to_pos.to_usize()
+ );
+ }
+
+ let mut subalt = false;
+ while remaining_viewables.len() > 0 && remaining_viewables[0].span.overlaps(viewable.span) {
+ let overlapping_viewable = &remaining_viewables[0];
+ debug!("{}overlapping_viewable.span={:?}", debug_indent, overlapping_viewable.span);
+
+ let span =
+ trim_span(viewable.span, from_pos, cmp::min(overlapping_viewable.span.lo(), to_pos));
+ let mut some_html_snippet = if from_pos <= viewable.span.hi() || viewable.span.is_empty() {
+ // `viewable` is not yet fully rendered, so start writing the span, up to either the
+ // `to_pos` or the next `overlapping_viewable`, whichever comes first.
+ debug!(
+ "{}make html_snippet (may not write it if early exit) for partial span {:?} \
+ of viewable.span {:?}",
+ debug_indent, span, viewable.span
+ );
+ from_pos = span.hi();
+ make_html_snippet(tcx, span, Some(&viewable))
+ } else {
+ None
+ };
+
+ // Defer writing the HTML snippet (until after early return checks) ONLY for empty spans.
+ // An empty Span with Some(html_snippet) is probably a tail marker. If there is an early
+ // exit, there should be another opportunity to write the tail marker.
+ if !span.is_empty() {
+ if let Some(ref html_snippet) = some_html_snippet {
+ debug!(
+ "{}write html_snippet for that partial span of viewable.span {:?}",
+ debug_indent, viewable.span
+ );
+ write_span(html_snippet, &viewable.tooltip, alt, layer, w)?;
+ }
+ some_html_snippet = None;
+ }
+
+ if from_pos < overlapping_viewable.span.lo() {
+ debug!(
+ "{}EARLY RETURN: from_pos={} has not yet reached the \
+ overlapping_viewable.span {:?}",
+ debug_indent,
+ from_pos.to_usize(),
+ overlapping_viewable.span
+ );
+ // must have reached `to_pos` before reaching the start of the
+ // `overlapping_viewable.span`
+ return Ok((from_pos, ordered_viewables));
+ }
+
+ if from_pos == to_pos
+ && !(from_pos == overlapping_viewable.span.lo() && overlapping_viewable.span.is_empty())
+ {
+ debug!(
+ "{}EARLY RETURN: from_pos=to_pos={} and overlapping_viewable.span {:?} is not \
+ empty, or not from_pos",
+ debug_indent,
+ to_pos.to_usize(),
+ overlapping_viewable.span
+ );
+ // `to_pos` must have occurred before the overlapping viewable. Return
+ // `ordered_viewables` so we can continue rendering the `viewable`, from after the
+ // `to_pos`.
+ return Ok((from_pos, ordered_viewables));
+ }
+
+ if let Some(ref html_snippet) = some_html_snippet {
+ debug!(
+ "{}write html_snippet for that partial span of viewable.span {:?}",
+ debug_indent, viewable.span
+ );
+ write_span(html_snippet, &viewable.tooltip, alt, layer, w)?;
+ }
+
+ debug!(
+ "{}recursively calling write_next_viewable with from_pos={}, to_pos={}, \
+ and viewables len={}",
+ debug_indent,
+ from_pos.to_usize(),
+ to_pos.to_usize(),
+ remaining_viewables.len()
+ );
+ // Write the overlaps (and the overlaps' overlaps, if any) up to `to_pos`.
+ let curr_id = &remaining_viewables[0].id;
+ let (next_from_pos, next_remaining_viewables) = write_next_viewable_with_overlaps(
+ tcx,
+ from_pos,
+ to_pos,
+ &remaining_viewables,
+ subalt,
+ layer + 1,
+ w,
+ )?;
+ debug!(
+ "{}DONE recursively calling write_next_viewable, with new from_pos={}, and remaining \
+ viewables len={}",
+ debug_indent,
+ next_from_pos.to_usize(),
+ next_remaining_viewables.len()
+ );
+ assert!(
+ from_pos != next_from_pos
+ || remaining_viewables.len() != next_remaining_viewables.len(),
+ "write_next_viewable_with_overlaps() must make a state change"
+ );
+ from_pos = next_from_pos;
+ if next_remaining_viewables.len() != remaining_viewables.len() {
+ remaining_viewables = next_remaining_viewables;
+ if let Some(next_ordered_viewable) = remaining_viewables.first() {
+ if &next_ordered_viewable.id != curr_id {
+ subalt = !subalt;
+ }
+ }
+ }
+ }
+ if from_pos <= viewable.span.hi() {
+ let span = trim_span(viewable.span, from_pos, to_pos);
+ debug!(
+ "{}After overlaps, writing (end span?) {:?} of viewable.span {:?}",
+ debug_indent, span, viewable.span
+ );
+ if let Some(ref html_snippet) = make_html_snippet(tcx, span, Some(&viewable)) {
+ from_pos = span.hi();
+ write_span(html_snippet, &viewable.tooltip, alt, layer, w)?;
+ }
+ }
+ debug!("{}RETURN: No more overlap", debug_indent);
+ Ok((
+ from_pos,
+ if from_pos < viewable.span.hi() { ordered_viewables } else { remaining_viewables },
+ ))
+}
+
+#[inline(always)]
+fn write_coverage_gap<'tcx, W>(
+ tcx: TyCtxt<'tcx>,
+ lo: BytePos,
+ hi: BytePos,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ let span = Span::with_root_ctxt(lo, hi);
+ if let Some(ref html_snippet) = make_html_snippet(tcx, span, None) {
+ write_span(html_snippet, "", false, 0, w)
+ } else {
+ Ok(())
+ }
+}
+
+fn write_span<W>(
+ html_snippet: &str,
+ tooltip: &str,
+ alt: bool,
+ layer: usize,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ let maybe_alt_class = if layer > 0 {
+ if alt { " odd" } else { " even" }
+ } else {
+ ""
+ };
+ let maybe_title_attr = if !tooltip.is_empty() {
+ format!(" title=\"{}\"", escape_attr(tooltip))
+ } else {
+ "".to_owned()
+ };
+ if layer == 1 {
+ write!(w, "<span>")?;
+ }
+ for (i, line) in html_snippet.lines().enumerate() {
+ if i > 0 {
+ write!(w, "{}", NEW_LINE_SPAN)?;
+ }
+ write!(
+ w,
+ r#"<span class="code{}" style="--layer: {}"{}>{}</span>"#,
+ maybe_alt_class, layer, maybe_title_attr, line
+ )?;
+ }
+ // Check for and translate trailing newlines, because `str::lines()` ignores them
+ if html_snippet.ends_with('\n') {
+ write!(w, "{}", NEW_LINE_SPAN)?;
+ }
+ if layer == 1 {
+ write!(w, "</span>")?;
+ }
+ Ok(())
+}
+
+fn make_html_snippet<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ some_viewable: Option<&SpanViewable>,
+) -> Option<String> {
+ let source_map = tcx.sess.source_map();
+ let snippet = source_map
+ .span_to_snippet(span)
+ .unwrap_or_else(|err| bug!("span_to_snippet error for span {:?}: {:?}", span, err));
+ let html_snippet = if let Some(viewable) = some_viewable {
+ let is_head = span.lo() == viewable.span.lo();
+ let is_tail = span.hi() == viewable.span.hi();
+ let mut labeled_snippet = if is_head {
+ format!(r#"<span class="annotation">{}{}</span>"#, viewable.id, ANNOTATION_LEFT_BRACKET)
+ } else {
+ "".to_owned()
+ };
+ if span.is_empty() {
+ if is_head && is_tail {
+ labeled_snippet.push(CARET);
+ }
+ } else {
+ labeled_snippet.push_str(&escape_html(&snippet));
+ };
+ if is_tail {
+ labeled_snippet.push_str(&format!(
+ r#"<span class="annotation">{}{}</span>"#,
+ ANNOTATION_RIGHT_BRACKET, viewable.id
+ ));
+ }
+ labeled_snippet
+ } else {
+ escape_html(&snippet)
+ };
+ if html_snippet.is_empty() { None } else { Some(html_snippet) }
+}
+
+fn tooltip<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ spanview_id: &str,
+ span: Span,
+ statements: Vec<Statement<'tcx>>,
+ terminator: &Option<Terminator<'tcx>>,
+) -> String {
+ let source_map = tcx.sess.source_map();
+ let mut text = Vec::new();
+ text.push(format!("{}: {}:", spanview_id, &source_map.span_to_embeddable_string(span)));
+ for statement in statements {
+ let source_range = source_range_no_file(tcx, statement.source_info.span);
+ text.push(format!(
+ "\n{}{}: {}: {:?}",
+ TOOLTIP_INDENT,
+ source_range,
+ statement_kind_name(&statement),
+ statement
+ ));
+ }
+ if let Some(term) = terminator {
+ let source_range = source_range_no_file(tcx, term.source_info.span);
+ text.push(format!(
+ "\n{}{}: {}: {:?}",
+ TOOLTIP_INDENT,
+ source_range,
+ terminator_kind_name(term),
+ term.kind
+ ));
+ }
+ text.join("")
+}
+
+fn trim_span(span: Span, from_pos: BytePos, to_pos: BytePos) -> Span {
+ trim_span_hi(trim_span_lo(span, from_pos), to_pos)
+}
+
+fn trim_span_lo(span: Span, from_pos: BytePos) -> Span {
+ if from_pos <= span.lo() { span } else { span.with_lo(cmp::min(span.hi(), from_pos)) }
+}
+
+fn trim_span_hi(span: Span, to_pos: BytePos) -> Span {
+ if to_pos >= span.hi() { span } else { span.with_hi(cmp::max(span.lo(), to_pos)) }
+}
+
+fn fn_span<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Span {
+ let fn_decl_span = tcx.def_span(def_id);
+ if let Some(body_span) = hir_body(tcx, def_id).map(|hir_body| hir_body.value.span) {
+ if fn_decl_span.eq_ctxt(body_span) { fn_decl_span.to(body_span) } else { body_span }
+ } else {
+ fn_decl_span
+ }
+}
+
+fn hir_body<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Option<&'tcx rustc_hir::Body<'tcx>> {
+ let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local");
+ hir::map::associated_body(hir_node).map(|fn_body_id| tcx.hir().body(fn_body_id))
+}
+
+fn escape_html(s: &str) -> String {
+ s.replace('&', "&amp;").replace('<', "&lt;").replace('>', "&gt;")
+}
+
+fn escape_attr(s: &str) -> String {
+ s.replace('&', "&amp;")
+ .replace('\"', "&quot;")
+ .replace('\'', "&#39;")
+ .replace('<', "&lt;")
+ .replace('>', "&gt;")
+}
diff --git a/compiler/rustc_middle/src/mir/switch_sources.rs b/compiler/rustc_middle/src/mir/switch_sources.rs
new file mode 100644
index 000000000..b91c0c257
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/switch_sources.rs
@@ -0,0 +1,78 @@
+//! Lazily compute the inverse of each `SwitchInt`'s switch targets. Modeled after
+//! `Predecessors`/`PredecessorCache`.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_index::vec::IndexVec;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use smallvec::SmallVec;
+
+use crate::mir::{BasicBlock, BasicBlockData, Terminator, TerminatorKind};
+
+pub type SwitchSources = FxHashMap<(BasicBlock, BasicBlock), SmallVec<[Option<u128>; 1]>>;
+
+#[derive(Clone, Debug)]
+pub(super) struct SwitchSourceCache {
+ cache: OnceCell<SwitchSources>,
+}
+
+impl SwitchSourceCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ SwitchSourceCache { cache: OnceCell::new() }
+ }
+
+ /// Invalidates the switch source cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ self.cache = OnceCell::new();
+ }
+
+ /// Returns the switch sources for this MIR.
+ #[inline]
+ pub(super) fn compute(
+ &self,
+ basic_blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>,
+ ) -> &SwitchSources {
+ self.cache.get_or_init(|| {
+ let mut switch_sources: SwitchSources = FxHashMap::default();
+ for (bb, data) in basic_blocks.iter_enumerated() {
+ if let Some(Terminator {
+ kind: TerminatorKind::SwitchInt { targets, .. }, ..
+ }) = &data.terminator
+ {
+ for (value, target) in targets.iter() {
+ switch_sources.entry((target, bb)).or_default().push(Some(value));
+ }
+ switch_sources.entry((targets.otherwise(), bb)).or_default().push(None);
+ }
+ }
+
+ switch_sources
+ })
+ }
+}
+
+impl<S: Encoder> Encodable<S> for SwitchSourceCache {
+ #[inline]
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder> Decodable<D> for SwitchSourceCache {
+ #[inline]
+ fn decode(_: &mut D) -> Self {
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for SwitchSourceCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ SwitchSourceCache,
+}
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
new file mode 100644
index 000000000..eb90169d0
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -0,0 +1,1168 @@
+//! This defines the syntax of MIR, i.e., the set of available MIR operations, and other definitions
+//! closely related to MIR semantics.
+//! This is in a dedicated file so that changes to this file can be reviewed more carefully.
+//! The intention is that this file only contains datatype declarations, no code.
+
+use super::{BasicBlock, Constant, Field, Local, SwitchTargets, UserTypeProjection};
+
+use crate::mir::coverage::{CodeRegion, CoverageKind};
+use crate::ty::adjustment::PointerCast;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, List, Ty};
+use crate::ty::{Region, UserTypeAnnotationIndex};
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{self as hir};
+use rustc_hir::{self, GeneratorKind};
+use rustc_target::abi::VariantIdx;
+
+use rustc_ast::Mutability;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+
+/// The various "big phases" that MIR goes through.
+///
+/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the
+/// dialects forbid certain variants or values in certain phases. The sections below summarize the
+/// changes, but do not document them thoroughly. The full documentation is found in the appropriate
+/// documentation for the thing the change is affecting.
+///
+/// Warning: ordering of variants is significant.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub enum MirPhase {
+ /// The dialect of MIR used during all phases before `DropsLowered` is the same. This is also
+ /// the MIR that analysis such as borrowck uses.
+ ///
+ /// One important thing to remember about the behavior of this section of MIR is that drop terminators
+ /// (including drop and replace) are *conditional*. The elaborate drops pass will then replace each
+ /// instance of a drop terminator with a nop, an unconditional drop, or a drop conditioned on a drop
+ /// flag. Of course, this means that it is important that the drop elaboration can accurately recognize
+ /// when things are initialized and when things are de-initialized. That means any code running on this
+ /// version of MIR must be sure to produce output that drop elaboration can reason about. See the
+ /// section on the drop terminatorss for more details.
+ Built = 0,
+ // FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query).
+ // We used to have this for pre-miri MIR based const eval.
+ Const = 1,
+ /// This phase checks the MIR for promotable elements and takes them out of the main MIR body
+ /// by creating a new MIR body per promoted element. After this phase (and thus the termination
+ /// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir`
+ /// query.
+ ConstsPromoted = 2,
+ /// After this projections may only contain deref projections as the first element.
+ Derefered = 3,
+ /// Beginning with this phase, the following variants are disallowed:
+ /// * [`TerminatorKind::DropAndReplace`]
+ /// * [`TerminatorKind::FalseUnwind`]
+ /// * [`TerminatorKind::FalseEdge`]
+ /// * [`StatementKind::FakeRead`]
+ /// * [`StatementKind::AscribeUserType`]
+ /// * [`Rvalue::Ref`] with `BorrowKind::Shallow`
+ ///
+ /// And the following variant is allowed:
+ /// * [`StatementKind::Retag`]
+ ///
+ /// Furthermore, `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop`
+ /// terminator means that the auto-generated drop glue will be invoked. Also, `Copy` operands
+ /// are allowed for non-`Copy` types.
+ DropsLowered = 4,
+ /// Beginning with this phase, the following variant is disallowed:
+ /// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
+ ///
+ /// And the following variant is allowed:
+ /// * [`StatementKind::SetDiscriminant`]
+ Deaggregated = 5,
+ /// Before this phase, generators are in the "source code" form, featuring `yield` statements
+ /// and such. With this phase change, they are transformed into a proper state machine. Running
+ /// optimizations before this change can be potentially dangerous because the source code is to
+ /// some extent a "lie." In particular, `yield` terminators effectively make the value of all
+ /// locals visible to the caller. This means that dead store elimination before them, or code
+ /// motion across them, is not correct in general. This is also exasperated by type checking
+ /// having pre-computed a list of the types that it thinks are ok to be live across a yield
+ /// point - this is necessary to decide eg whether autotraits are implemented. Introducing new
+ /// types across a yield point will lead to ICEs becaues of this.
+ ///
+ /// Beginning with this phase, the following variants are disallowed:
+ /// * [`TerminatorKind::Yield`]
+ /// * [`TerminatorKind::GeneratorDrop`]
+ /// * [`ProjectionElem::Deref`] of `Box`
+ GeneratorsLowered = 6,
+ Optimized = 7,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Borrow kinds
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(Hash, HashStable)]
+pub enum BorrowKind {
+ /// Data must be immutable and is aliasable.
+ Shared,
+
+ /// The immediately borrowed place must be immutable, but projections from
+ /// it don't need to be. For example, a shallow borrow of `a.b` doesn't
+ /// conflict with a mutable borrow of `a.b.c`.
+ ///
+ /// This is used when lowering matches: when matching on a place we want to
+ /// ensure that place have the same value from the start of the match until
+ /// an arm is selected. This prevents this code from compiling:
+ /// ```compile_fail,E0510
+ /// let mut x = &Some(0);
+ /// match *x {
+ /// None => (),
+ /// Some(_) if { x = &None; false } => (),
+ /// Some(_) => (),
+ /// }
+ /// ```
+ /// This can't be a shared borrow because mutably borrowing (*x as Some).0
+ /// should not prevent `if let None = x { ... }`, for example, because the
+ /// mutating `(*x as Some).0` can't affect the discriminant of `x`.
+ /// We can also report errors with this kind of borrow differently.
+ Shallow,
+
+ /// Data must be immutable but not aliasable. This kind of borrow
+ /// cannot currently be expressed by the user and is used only in
+ /// implicit closure bindings. It is needed when the closure is
+ /// borrowing or mutating a mutable referent, e.g.:
+ /// ```
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = || *x += 5;
+ /// ```
+ /// If we were to try to translate this closure into a more explicit
+ /// form, we'd encounter an error with the code as written:
+ /// ```compile_fail,E0594
+ /// struct Env<'a> { x: &'a &'a mut isize }
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = (&mut Env { x: &x }, fn_ptr); // Closure is pair of env and fn
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ /// This is then illegal because you cannot mutate an `&mut` found
+ /// in an aliasable location. To solve, you'd have to translate with
+ /// an `&mut` borrow:
+ /// ```compile_fail,E0596
+ /// struct Env<'a> { x: &'a mut &'a mut isize }
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = (&mut Env { x: &mut x }, fn_ptr); // changed from &x to &mut x
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ /// Now the assignment to `**env.x` is legal, but creating a
+ /// mutable pointer to `x` is not because `x` is not mutable. We
+ /// could fix this by declaring `x` as `let mut x`. This is ok in
+ /// user code, if awkward, but extra weird for closures, since the
+ /// borrow is hidden.
+ ///
+ /// So we introduce a "unique imm" borrow -- the referent is
+ /// immutable, but not aliasable. This solves the problem. For
+ /// simplicity, we don't give users the way to express this
+ /// borrow, it's just used when translating closures.
+ Unique,
+
+ /// Data is mutable and not aliasable.
+ Mut {
+ /// `true` if this borrow arose from method-call auto-ref
+ /// (i.e., `adjustment::Adjust::Borrow`).
+ allow_two_phase_borrow: bool,
+ },
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Statements
+
+/// The various kinds of statements that can appear in MIR.
+///
+/// Not all of these are allowed at every [`MirPhase`]. Check the documentation there to see which
+/// ones you do not have to worry about. The MIR validator will generally enforce such restrictions,
+/// causing an ICE if they are violated.
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum StatementKind<'tcx> {
+ /// Assign statements roughly correspond to an assignment in Rust proper (`x = ...`) except
+ /// without the possibility of dropping the previous value (that must be done separately, if at
+ /// all). The *exact* way this works is undecided. It probably does something like evaluating
+ /// the LHS to a place and the RHS to a value, and then storing the value to the place. Various
+ /// parts of this may do type specific things that are more complicated than simply copying
+ /// bytes.
+ ///
+ /// **Needs clarification**: The implication of the above idea would be that assignment implies
+ /// that the resulting value is initialized. I believe we could commit to this separately from
+ /// committing to whatever part of the memory model we would need to decide on to make the above
+ /// paragragh precise. Do we want to?
+ ///
+ /// Assignments in which the types of the place and rvalue differ are not well-formed.
+ ///
+ /// **Needs clarification**: Do we ever want to worry about non-free (in the body) lifetimes for
+ /// the typing requirement in post drop-elaboration MIR? I think probably not - I'm not sure we
+ /// could meaningfully require this anyway. How about free lifetimes? Is ignoring this
+ /// interesting for optimizations? Do we want to allow such optimizations?
+ ///
+ /// **Needs clarification**: We currently require that the LHS place not overlap with any place
+ /// read as part of computation of the RHS for some rvalues (generally those not producing
+ /// primitives). This requirement is under discussion in [#68364]. As a part of this discussion,
+ /// it is also unclear in what order the components are evaluated.
+ ///
+ /// [#68364]: https://github.com/rust-lang/rust/issues/68364
+ ///
+ /// See [`Rvalue`] documentation for details on each of those.
+ Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>),
+
+ /// This represents all the reading that a pattern match may do (e.g., inspecting constants and
+ /// discriminant values), and the kind of pattern it comes from. This is in order to adapt
+ /// potential error messages to these specific patterns.
+ ///
+ /// Note that this also is emitted for regular `let` bindings to ensure that locals that are
+ /// never accessed still get some sanity checks for, e.g., `let x: ! = ..;`
+ ///
+ /// When executed at runtime this is a nop.
+ ///
+ /// Disallowed after drop elaboration.
+ FakeRead(Box<(FakeReadCause, Place<'tcx>)>),
+
+ /// Write the discriminant for a variant to the enum Place.
+ ///
+ /// This is permitted for both generators and ADTs. This does not necessarily write to the
+ /// entire place; instead, it writes to the minimum set of bytes as required by the layout for
+ /// the type.
+ SetDiscriminant { place: Box<Place<'tcx>>, variant_index: VariantIdx },
+
+ /// Deinitializes the place.
+ ///
+ /// This writes `uninit` bytes to the entire place.
+ Deinit(Box<Place<'tcx>>),
+
+ /// `StorageLive` and `StorageDead` statements mark the live range of a local.
+ ///
+ /// At any point during the execution of a function, each local is either allocated or
+ /// unallocated. Except as noted below, all locals except function parameters are initially
+ /// unallocated. `StorageLive` statements cause memory to be allocated for the local while
+ /// `StorageDead` statements cause the memory to be freed. Using a local in any way (not only
+ /// reading/writing from it) while it is unallocated is UB.
+ ///
+ /// Some locals have no `StorageLive` or `StorageDead` statements within the entire MIR body.
+ /// These locals are implicitly allocated for the full duration of the function. There is a
+ /// convenience method at `rustc_mir_dataflow::storage::always_storage_live_locals` for
+ /// computing these locals.
+ ///
+ /// If the local is already allocated, calling `StorageLive` again is UB. However, for an
+ /// unallocated local an additional `StorageDead` all is simply a nop.
+ StorageLive(Local),
+
+ /// See `StorageLive` above.
+ StorageDead(Local),
+
+ /// Retag references in the given place, ensuring they got fresh tags.
+ ///
+ /// This is part of the Stacked Borrows model. These statements are currently only interpreted
+ /// by miri and only generated when `-Z mir-emit-retag` is passed. See
+ /// <https://internals.rust-lang.org/t/stacked-borrows-an-aliasing-model-for-rust/8153/> for
+ /// more details.
+ ///
+ /// For code that is not specific to stacked borrows, you should consider retags to read
+ /// and modify the place in an opaque way.
+ Retag(RetagKind, Box<Place<'tcx>>),
+
+ /// Encodes a user's type ascription. These need to be preserved
+ /// intact so that NLL can respect them. For example:
+ /// ```ignore (illustrative)
+ /// let a: T = y;
+ /// ```
+ /// The effect of this annotation is to relate the type `T_y` of the place `y`
+ /// to the user-given type `T`. The effect depends on the specified variance:
+ ///
+ /// - `Covariant` -- requires that `T_y <: T`
+ /// - `Contravariant` -- requires that `T_y :> T`
+ /// - `Invariant` -- requires that `T_y == T`
+ /// - `Bivariant` -- no effect
+ ///
+ /// When executed at runtime this is a nop.
+ ///
+ /// Disallowed after drop elaboration.
+ AscribeUserType(Box<(Place<'tcx>, UserTypeProjection)>, ty::Variance),
+
+ /// Marks the start of a "coverage region", injected with '-Cinstrument-coverage'. A
+ /// `Coverage` statement carries metadata about the coverage region, used to inject a coverage
+ /// map into the binary. If `Coverage::kind` is a `Counter`, the statement also generates
+ /// executable code, to increment a counter variable at runtime, each time the code region is
+ /// executed.
+ Coverage(Box<Coverage>),
+
+ /// Denotes a call to the intrinsic function `copy_nonoverlapping`.
+ ///
+ /// First, all three operands are evaluated. `src` and `dest` must each be a reference, pointer,
+ /// or `Box` pointing to the same type `T`. `count` must evaluate to a `usize`. Then, `src` and
+ /// `dest` are dereferenced, and `count * size_of::<T>()` bytes beginning with the first byte of
+ /// the `src` place are copied to the continguous range of bytes beginning with the first byte
+ /// of `dest`.
+ ///
+ /// **Needs clarification**: In what order are operands computed and dereferenced? It should
+ /// probably match the order for assignment, but that is also undecided.
+ ///
+ /// **Needs clarification**: Is this typed or not, ie is there a typed load and store involved?
+ /// I vaguely remember Ralf saying somewhere that he thought it should not be.
+ CopyNonOverlapping(Box<CopyNonOverlapping<'tcx>>),
+
+ /// No-op. Useful for deleting instructions without affecting statement indices.
+ Nop,
+}
+
+/// Describes what kind of retag is to be performed.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, Hash, HashStable)]
+#[rustc_pass_by_value]
+pub enum RetagKind {
+ /// The initial retag when entering a function.
+ FnEntry,
+ /// Retag preparing for a two-phase borrow.
+ TwoPhase,
+ /// Retagging raw pointers.
+ Raw,
+ /// A "normal" retag.
+ Default,
+}
+
+/// The `FakeReadCause` describes the type of pattern why a FakeRead statement exists.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, Hash, HashStable, PartialEq)]
+pub enum FakeReadCause {
+ /// Inject a fake read of the borrowed input at the end of each guards
+ /// code.
+ ///
+ /// This should ensure that you cannot change the variant for an enum while
+ /// you are in the midst of matching on it.
+ ForMatchGuard,
+
+ /// `let x: !; match x {}` doesn't generate any read of x so we need to
+ /// generate a read of x to check that it is initialized and safe.
+ ///
+ /// If a closure pattern matches a Place starting with an Upvar, then we introduce a
+ /// FakeRead for that Place outside the closure, in such a case this option would be
+ /// Some(closure_def_id).
+ /// Otherwise, the value of the optional LocalDefId will be None.
+ //
+ // We can use LocaDefId here since fake read statements are removed
+ // before codegen in the `CleanupNonCodegenStatements` pass.
+ ForMatchedPlace(Option<LocalDefId>),
+
+ /// A fake read of the RefWithinGuard version of a bind-by-value variable
+ /// in a match guard to ensure that its value hasn't change by the time
+ /// we create the OutsideGuard version.
+ ForGuardBinding,
+
+ /// Officially, the semantics of
+ ///
+ /// `let pattern = <expr>;`
+ ///
+ /// is that `<expr>` is evaluated into a temporary and then this temporary is
+ /// into the pattern.
+ ///
+ /// However, if we see the simple pattern `let var = <expr>`, we optimize this to
+ /// evaluate `<expr>` directly into the variable `var`. This is mostly unobservable,
+ /// but in some cases it can affect the borrow checker, as in #53695.
+ /// Therefore, we insert a "fake read" here to ensure that we get
+ /// appropriate errors.
+ ///
+ /// If a closure pattern matches a Place starting with an Upvar, then we introduce a
+ /// FakeRead for that Place outside the closure, in such a case this option would be
+ /// Some(closure_def_id).
+ /// Otherwise, the value of the optional DefId will be None.
+ ForLet(Option<LocalDefId>),
+
+ /// If we have an index expression like
+ ///
+ /// (*x)[1][{ x = y; 4}]
+ ///
+ /// then the first bounds check is invalidated when we evaluate the second
+ /// index expression. Thus we create a fake borrow of `x` across the second
+ /// indexer, which will cause a borrow check error.
+ ForIndex,
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct Coverage {
+ pub kind: CoverageKind,
+ pub code_region: Option<CodeRegion>,
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct CopyNonOverlapping<'tcx> {
+ pub src: Operand<'tcx>,
+ pub dst: Operand<'tcx>,
+ /// Number of elements to copy from src to dest, not bytes.
+ pub count: Operand<'tcx>,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Terminators
+
+/// The various kinds of terminators, representing ways of exiting from a basic block.
+///
+/// A note on unwinding: Panics may occur during the execution of some terminators. Depending on the
+/// `-C panic` flag, this may either cause the program to abort or the call stack to unwind. Such
+/// terminators have a `cleanup: Option<BasicBlock>` field on them. If stack unwinding occurs, then
+/// once the current function is reached, execution continues at the given basic block, if any. If
+/// `cleanup` is `None` then no cleanup is performed, and the stack continues unwinding. This is
+/// equivalent to the execution of a `Resume` terminator.
+///
+/// The basic block pointed to by a `cleanup` field must have its `cleanup` flag set. `cleanup`
+/// basic blocks have a couple restrictions:
+/// 1. All `cleanup` fields in them must be `None`.
+/// 2. `Return` terminators are not allowed in them. `Abort` and `Unwind` terminators are.
+/// 3. All other basic blocks (in the current body) that are reachable from `cleanup` basic blocks
+/// must also be `cleanup`. This is a part of the type system and checked statically, so it is
+/// still an error to have such an edge in the CFG even if it's known that it won't be taken at
+/// runtime.
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+pub enum TerminatorKind<'tcx> {
+ /// Block has one successor; we continue execution there.
+ Goto { target: BasicBlock },
+
+ /// Switches based on the computed value.
+ ///
+ /// First, evaluates the `discr` operand. The type of the operand must be a signed or unsigned
+ /// integer, char, or bool, and must match the given type. Then, if the list of switch targets
+ /// contains the computed value, continues execution at the associated basic block. Otherwise,
+ /// continues execution at the "otherwise" basic block.
+ ///
+ /// Target values may not appear more than once.
+ SwitchInt {
+ /// The discriminant value being tested.
+ discr: Operand<'tcx>,
+
+ /// The type of value being tested.
+ /// This is always the same as the type of `discr`.
+ /// FIXME: remove this redundant information. Currently, it is relied on by pretty-printing.
+ switch_ty: Ty<'tcx>,
+
+ targets: SwitchTargets,
+ },
+
+ /// Indicates that the landing pad is finished and that the process should continue unwinding.
+ ///
+ /// Like a return, this marks the end of this invocation of the function.
+ ///
+ /// Only permitted in cleanup blocks. `Resume` is not permitted with `-C unwind=abort` after
+ /// deaggregation runs.
+ Resume,
+
+ /// Indicates that the landing pad is finished and that the process should abort.
+ ///
+ /// Used to prevent unwinding for foreign items or with `-C unwind=abort`. Only permitted in
+ /// cleanup blocks.
+ Abort,
+
+ /// Returns from the function.
+ ///
+ /// Like function calls, the exact semantics of returns in Rust are unclear. Returning very
+ /// likely at least assigns the value currently in the return place (`_0`) to the place
+ /// specified in the associated `Call` terminator in the calling function, as if assigned via
+ /// `dest = move _0`. It might additionally do other things, like have side-effects in the
+ /// aliasing model.
+ ///
+ /// If the body is a generator body, this has slightly different semantics; it instead causes a
+ /// `GeneratorState::Returned(_0)` to be created (as if by an `Aggregate` rvalue) and assigned
+ /// to the return place.
+ Return,
+
+ /// Indicates a terminator that can never be reached.
+ ///
+ /// Executing this terminator is UB.
+ Unreachable,
+
+ /// The behavior of this statement differs significantly before and after drop elaboration.
+ /// After drop elaboration, `Drop` executes the drop glue for the specified place, after which
+ /// it continues execution/unwinds at the given basic blocks. It is possible that executing drop
+ /// glue is special - this would be part of Rust's memory model. (**FIXME**: due we have an
+ /// issue tracking if drop glue has any interesting semantics in addition to those of a function
+ /// call?)
+ ///
+ /// `Drop` before drop elaboration is a *conditional* execution of the drop glue. Specifically, the
+ /// `Drop` will be executed if...
+ ///
+ /// **Needs clarification**: End of that sentence. This in effect should document the exact
+ /// behavior of drop elaboration. The following sounds vaguely right, but I'm not quite sure:
+ ///
+ /// > The drop glue is executed if, among all statements executed within this `Body`, an assignment to
+ /// > the place or one of its "parents" occurred more recently than a move out of it. This does not
+ /// > consider indirect assignments.
+ Drop { place: Place<'tcx>, target: BasicBlock, unwind: Option<BasicBlock> },
+
+ /// Drops the place and assigns a new value to it.
+ ///
+ /// This first performs the exact same operation as the pre drop-elaboration `Drop` terminator;
+ /// it then additionally assigns the `value` to the `place` as if by an assignment statement.
+ /// This assignment occurs both in the unwind and the regular code paths. The semantics are best
+ /// explained by the elaboration:
+ ///
+ /// ```ignore (MIR)
+ /// BB0 {
+ /// DropAndReplace(P <- V, goto BB1, unwind BB2)
+ /// }
+ /// ```
+ ///
+ /// becomes
+ ///
+ /// ```ignore (MIR)
+ /// BB0 {
+ /// Drop(P, goto BB1, unwind BB2)
+ /// }
+ /// BB1 {
+ /// // P is now uninitialized
+ /// P <- V
+ /// }
+ /// BB2 {
+ /// // P is now uninitialized -- its dtor panicked
+ /// P <- V
+ /// }
+ /// ```
+ ///
+ /// Disallowed after drop elaboration.
+ DropAndReplace {
+ place: Place<'tcx>,
+ value: Operand<'tcx>,
+ target: BasicBlock,
+ unwind: Option<BasicBlock>,
+ },
+
+ /// Roughly speaking, evaluates the `func` operand and the arguments, and starts execution of
+ /// the referred to function. The operand types must match the argument types of the function.
+ /// The return place type must match the return type. The type of the `func` operand must be
+ /// callable, meaning either a function pointer, a function type, or a closure type.
+ ///
+ /// **Needs clarification**: The exact semantics of this. Current backends rely on `move`
+ /// operands not aliasing the return place. It is unclear how this is justified in MIR, see
+ /// [#71117].
+ ///
+ /// [#71117]: https://github.com/rust-lang/rust/issues/71117
+ Call {
+ /// The function that’s being called.
+ func: Operand<'tcx>,
+ /// Arguments the function is called with.
+ /// These are owned by the callee, which is free to modify them.
+ /// This allows the memory occupied by "by-value" arguments to be
+ /// reused across function calls without duplicating the contents.
+ args: Vec<Operand<'tcx>>,
+ /// Where the returned value will be written
+ destination: Place<'tcx>,
+ /// Where to go after this call returns. If none, the call necessarily diverges.
+ target: Option<BasicBlock>,
+ /// Cleanups to be done if the call unwinds.
+ cleanup: Option<BasicBlock>,
+ /// `true` if this is from a call in HIR rather than from an overloaded
+ /// operator. True for overloaded function call.
+ from_hir_call: bool,
+ /// This `Span` is the span of the function, without the dot and receiver
+ /// (e.g. `foo(a, b)` in `x.foo(a, b)`
+ fn_span: Span,
+ },
+
+ /// Evaluates the operand, which must have type `bool`. If it is not equal to `expected`,
+ /// initiates a panic. Initiating a panic corresponds to a `Call` terminator with some
+ /// unspecified constant as the function to call, all the operands stored in the `AssertMessage`
+ /// as parameters, and `None` for the destination. Keep in mind that the `cleanup` path is not
+ /// necessarily executed even in the case of a panic, for example in `-C panic=abort`. If the
+ /// assertion does not fail, execution continues at the specified basic block.
+ Assert {
+ cond: Operand<'tcx>,
+ expected: bool,
+ msg: AssertMessage<'tcx>,
+ target: BasicBlock,
+ cleanup: Option<BasicBlock>,
+ },
+
+ /// Marks a suspend point.
+ ///
+ /// Like `Return` terminators in generator bodies, this computes `value` and then a
+ /// `GeneratorState::Yielded(value)` as if by `Aggregate` rvalue. That value is then assigned to
+ /// the return place of the function calling this one, and execution continues in the calling
+ /// function. When next invoked with the same first argument, execution of this function
+ /// continues at the `resume` basic block, with the second argument written to the `resume_arg`
+ /// place. If the generator is dropped before then, the `drop` basic block is invoked.
+ ///
+ /// Not permitted in bodies that are not generator bodies, or after generator lowering.
+ ///
+ /// **Needs clarification**: What about the evaluation order of the `resume_arg` and `value`?
+ Yield {
+ /// The value to return.
+ value: Operand<'tcx>,
+ /// Where to resume to.
+ resume: BasicBlock,
+ /// The place to store the resume argument in.
+ resume_arg: Place<'tcx>,
+ /// Cleanup to be done if the generator is dropped at this suspend point.
+ drop: Option<BasicBlock>,
+ },
+
+ /// Indicates the end of dropping a generator.
+ ///
+ /// Semantically just a `return` (from the generators drop glue). Only permitted in the same situations
+ /// as `yield`.
+ ///
+ /// **Needs clarification**: Is that even correct? The generator drop code is always confusing
+ /// to me, because it's not even really in the current body.
+ ///
+ /// **Needs clarification**: Are there type system constraints on these terminators? Should
+ /// there be a "block type" like `cleanup` blocks for them?
+ GeneratorDrop,
+
+ /// A block where control flow only ever takes one real path, but borrowck needs to be more
+ /// conservative.
+ ///
+ /// At runtime this is semantically just a goto.
+ ///
+ /// Disallowed after drop elaboration.
+ FalseEdge {
+ /// The target normal control flow will take.
+ real_target: BasicBlock,
+ /// A block control flow could conceptually jump to, but won't in
+ /// practice.
+ imaginary_target: BasicBlock,
+ },
+
+ /// A terminator for blocks that only take one path in reality, but where we reserve the right
+ /// to unwind in borrowck, even if it won't happen in practice. This can arise in infinite loops
+ /// with no function calls for example.
+ ///
+ /// At runtime this is semantically just a goto.
+ ///
+ /// Disallowed after drop elaboration.
+ FalseUnwind {
+ /// The target normal control flow will take.
+ real_target: BasicBlock,
+ /// The imaginary cleanup block link. This particular path will never be taken
+ /// in practice, but in order to avoid fragility we want to always
+ /// consider it in borrowck. We don't want to accept programs which
+ /// pass borrowck only when `panic=abort` or some assertions are disabled
+ /// due to release vs. debug mode builds. This needs to be an `Option` because
+ /// of the `remove_noop_landing_pads` and `abort_unwinding_calls` passes.
+ unwind: Option<BasicBlock>,
+ },
+
+ /// Block ends with an inline assembly block. This is a terminator since
+ /// inline assembly is allowed to diverge.
+ InlineAsm {
+ /// The template for the inline assembly, with placeholders.
+ template: &'tcx [InlineAsmTemplatePiece],
+
+ /// The operands for the inline assembly, as `Operand`s or `Place`s.
+ operands: Vec<InlineAsmOperand<'tcx>>,
+
+ /// Miscellaneous options for the inline assembly.
+ options: InlineAsmOptions,
+
+ /// Source spans for each line of the inline assembly code. These are
+ /// used to map assembler errors back to the line in the source code.
+ line_spans: &'tcx [Span],
+
+ /// Destination block after the inline assembly returns, unless it is
+ /// diverging (InlineAsmOptions::NORETURN).
+ destination: Option<BasicBlock>,
+
+ /// Cleanup to be done if the inline assembly unwinds. This is present
+ /// if and only if InlineAsmOptions::MAY_UNWIND is set.
+ cleanup: Option<BasicBlock>,
+ },
+}
+
+/// Information about an assertion failure.
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
+pub enum AssertKind<O> {
+ BoundsCheck { len: O, index: O },
+ Overflow(BinOp, O, O),
+ OverflowNeg(O),
+ DivisionByZero(O),
+ RemainderByZero(O),
+ ResumedAfterReturn(GeneratorKind),
+ ResumedAfterPanic(GeneratorKind),
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum InlineAsmOperand<'tcx> {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ value: Operand<'tcx>,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ place: Option<Place<'tcx>>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_value: Operand<'tcx>,
+ out_place: Option<Place<'tcx>>,
+ },
+ Const {
+ value: Box<Constant<'tcx>>,
+ },
+ SymFn {
+ value: Box<Constant<'tcx>>,
+ },
+ SymStatic {
+ def_id: DefId,
+ },
+}
+
+/// Type for MIR `Assert` terminator error messages.
+pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
+
+///////////////////////////////////////////////////////////////////////////
+// Places
+
+/// Places roughly correspond to a "location in memory." Places in MIR are the same mathematical
+/// object as places in Rust. This of course means that what exactly they are is undecided and part
+/// of the Rust memory model. However, they will likely contain at least the following pieces of
+/// information in some form:
+///
+/// 1. The address in memory that the place refers to.
+/// 2. The provenance with which the place is being accessed.
+/// 3. The type of the place and an optional variant index. See [`PlaceTy`][super::tcx::PlaceTy].
+/// 4. Optionally, some metadata. This exists if and only if the type of the place is not `Sized`.
+///
+/// We'll give a description below of how all pieces of the place except for the provenance are
+/// calculated. We cannot give a description of the provenance, because that is part of the
+/// undecided aliasing model - we only include it here at all to acknowledge its existence.
+///
+/// Each local naturally corresponds to the place `Place { local, projection: [] }`. This place has
+/// the address of the local's allocation and the type of the local.
+///
+/// **Needs clarification:** Unsized locals seem to present a bit of an issue. Their allocation
+/// can't actually be created on `StorageLive`, because it's unclear how big to make the allocation.
+/// Furthermore, MIR produces assignments to unsized locals, although that is not permitted under
+/// `#![feature(unsized_locals)]` in Rust. Besides just putting "unsized locals are special and
+/// different" in a bunch of places, I (JakobDegen) don't know how to incorporate this behavior into
+/// the current MIR semantics in a clean way - possibly this needs some design work first.
+///
+/// For places that are not locals, ie they have a non-empty list of projections, we define the
+/// values as a function of the parent place, that is the place with its last [`ProjectionElem`]
+/// stripped. The way this is computed of course depends on the kind of that last projection
+/// element:
+///
+/// - [`Downcast`](ProjectionElem::Downcast): This projection sets the place's variant index to the
+/// given one, and makes no other changes. A `Downcast` projection on a place with its variant
+/// index already set is not well-formed.
+/// - [`Field`](ProjectionElem::Field): `Field` projections take their parent place and create a
+/// place referring to one of the fields of the type. The resulting address is the parent
+/// address, plus the offset of the field. The type becomes the type of the field. If the parent
+/// was unsized and so had metadata associated with it, then the metadata is retained if the
+/// field is unsized and thrown out if it is sized.
+///
+/// These projections are only legal for tuples, ADTs, closures, and generators. If the ADT or
+/// generator has more than one variant, the parent place's variant index must be set, indicating
+/// which variant is being used. If it has just one variant, the variant index may or may not be
+/// included - the single possible variant is inferred if it is not included.
+/// - [`ConstantIndex`](ProjectionElem::ConstantIndex): Computes an offset in units of `T` into the
+/// place as described in the documentation for the `ProjectionElem`. The resulting address is
+/// the parent's address plus that offset, and the type is `T`. This is only legal if the parent
+/// place has type `[T; N]` or `[T]` (*not* `&[T]`). Since such a `T` is always sized, any
+/// resulting metadata is thrown out.
+/// - [`Subslice`](ProjectionElem::Subslice): This projection calculates an offset and a new
+/// address in a similar manner as `ConstantIndex`. It is also only legal on `[T; N]` and `[T]`.
+/// However, this yields a `Place` of type `[T]`, and additionally sets the metadata to be the
+/// length of the subslice.
+/// - [`Index`](ProjectionElem::Index): Like `ConstantIndex`, only legal on `[T; N]` or `[T]`.
+/// However, `Index` additionally takes a local from which the value of the index is computed at
+/// runtime. Computing the value of the index involves interpreting the `Local` as a
+/// `Place { local, projection: [] }`, and then computing its value as if done via
+/// [`Operand::Copy`]. The array/slice is then indexed with the resulting value. The local must
+/// have type `usize`.
+/// - [`Deref`](ProjectionElem::Deref): Derefs are the last type of projection, and the most
+/// complicated. They are only legal on parent places that are references, pointers, or `Box`. A
+/// `Deref` projection begins by loading a value from the parent place, as if by
+/// [`Operand::Copy`]. It then dereferences the resulting pointer, creating a place of the
+/// pointee's type. The resulting address is the address that was stored in the pointer. If the
+/// pointee type is unsized, the pointer additionally stored the value of the metadata.
+///
+/// Computing a place may cause UB. One possibility is that the pointer used for a `Deref` may not
+/// be suitably aligned. Another possibility is that the place is not in bounds, meaning it does not
+/// point to an actual allocation.
+///
+/// However, if this is actually UB and when the UB kicks in is undecided. This is being discussed
+/// in [UCG#319]. The options include that every place must obey those rules, that only some places
+/// must obey them, or that places impose no rules of their own.
+///
+/// [UCG#319]: https://github.com/rust-lang/unsafe-code-guidelines/issues/319
+///
+/// Rust currently requires that every place obey those two rules. This is checked by MIRI and taken
+/// advantage of by codegen (via `gep inbounds`). That is possibly subject to change.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable)]
+pub struct Place<'tcx> {
+ pub local: Local,
+
+ /// projection out of a place (access a field, deref a pointer, etc)
+ pub projection: &'tcx List<PlaceElem<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ProjectionElem<V, T> {
+ Deref,
+ Field(Field, T),
+ /// Index into a slice/array.
+ ///
+ /// Note that this does not also dereference, and so it does not exactly correspond to slice
+ /// indexing in Rust. In other words, in the below Rust code:
+ ///
+ /// ```rust
+ /// let x = &[1, 2, 3, 4];
+ /// let i = 2;
+ /// x[i];
+ /// ```
+ ///
+ /// The `x[i]` is turned into a `Deref` followed by an `Index`, not just an `Index`. The same
+ /// thing is true of the `ConstantIndex` and `Subslice` projections below.
+ Index(V),
+
+ /// These indices are generated by slice patterns. Easiest to explain
+ /// by example:
+ ///
+ /// ```ignore (illustrative)
+ /// [X, _, .._, _, _] => { offset: 0, min_length: 4, from_end: false },
+ /// [_, X, .._, _, _] => { offset: 1, min_length: 4, from_end: false },
+ /// [_, _, .._, X, _] => { offset: 2, min_length: 4, from_end: true },
+ /// [_, _, .._, _, X] => { offset: 1, min_length: 4, from_end: true },
+ /// ```
+ ConstantIndex {
+ /// index or -index (in Python terms), depending on from_end
+ offset: u64,
+ /// The thing being indexed must be at least this long. For arrays this
+ /// is always the exact length.
+ min_length: u64,
+ /// Counting backwards from end? This is always false when indexing an
+ /// array.
+ from_end: bool,
+ },
+
+ /// These indices are generated by slice patterns.
+ ///
+ /// If `from_end` is true `slice[from..slice.len() - to]`.
+ /// Otherwise `array[from..to]`.
+ Subslice {
+ from: u64,
+ to: u64,
+ /// Whether `to` counts from the start or end of the array/slice.
+ /// For `PlaceElem`s this is `true` if and only if the base is a slice.
+ /// For `ProjectionKind`, this can also be `true` for arrays.
+ from_end: bool,
+ },
+
+ /// "Downcast" to a variant of an enum or a generator.
+ ///
+ /// The included Symbol is the name of the variant, used for printing MIR.
+ Downcast(Option<Symbol>, VariantIdx),
+}
+
+/// Alias for projections as they appear in places, where the base is a place
+/// and the index is a local.
+pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
+
+///////////////////////////////////////////////////////////////////////////
+// Operands
+
+/// An operand in MIR represents a "value" in Rust, the definition of which is undecided and part of
+/// the memory model. One proposal for a definition of values can be found [on UCG][value-def].
+///
+/// [value-def]: https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/value-domain.md
+///
+/// The most common way to create values is via loading a place. Loading a place is an operation
+/// which reads the memory of the place and converts it to a value. This is a fundamentally *typed*
+/// operation. The nature of the value produced depends on the type of the conversion. Furthermore,
+/// there may be other effects: if the type has a validity constraint loading the place might be UB
+/// if the validity constraint is not met.
+///
+/// **Needs clarification:** Ralf proposes that loading a place not have side-effects.
+/// This is what is implemented in miri today. Are these the semantics we want for MIR? Is this
+/// something we can even decide without knowing more about Rust's memory model?
+///
+/// **Needs clarifiation:** Is loading a place that has its variant index set well-formed? Miri
+/// currently implements it, but it seems like this may be something to check against in the
+/// validator.
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum Operand<'tcx> {
+ /// Creates a value by loading the given place.
+ ///
+ /// Before drop elaboration, the type of the place must be `Copy`. After drop elaboration there
+ /// is no such requirement.
+ Copy(Place<'tcx>),
+
+ /// Creates a value by performing loading the place, just like the `Copy` operand.
+ ///
+ /// This *may* additionally overwrite the place with `uninit` bytes, depending on how we decide
+ /// in [UCG#188]. You should not emit MIR that may attempt a subsequent second load of this
+ /// place without first re-initializing it.
+ ///
+ /// [UCG#188]: https://github.com/rust-lang/unsafe-code-guidelines/issues/188
+ Move(Place<'tcx>),
+
+ /// Constants are already semantically values, and remain unchanged.
+ Constant(Box<Constant<'tcx>>),
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Rvalues
+
+/// The various kinds of rvalues that can appear in MIR.
+///
+/// Not all of these are allowed at every [`MirPhase`] - when this is the case, it's stated below.
+///
+/// Computing any rvalue begins by evaluating the places and operands in some order (**Needs
+/// clarification**: Which order?). These are then used to produce a "value" - the same kind of
+/// value that an [`Operand`] produces.
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+pub enum Rvalue<'tcx> {
+ /// Yields the operand unchanged
+ Use(Operand<'tcx>),
+
+ /// Creates an array where each element is the value of the operand.
+ ///
+ /// This is the cause of a bug in the case where the repetition count is zero because the value
+ /// is not dropped, see [#74836].
+ ///
+ /// Corresponds to source code like `[x; 32]`.
+ ///
+ /// [#74836]: https://github.com/rust-lang/rust/issues/74836
+ Repeat(Operand<'tcx>, ty::Const<'tcx>),
+
+ /// Creates a reference of the indicated kind to the place.
+ ///
+ /// There is not much to document here, because besides the obvious parts the semantics of this
+ /// are essentially entirely a part of the aliasing model. There are many UCG issues discussing
+ /// exactly what the behavior of this operation should be.
+ ///
+ /// `Shallow` borrows are disallowed after drop lowering.
+ Ref(Region<'tcx>, BorrowKind, Place<'tcx>),
+
+ /// Creates a pointer/reference to the given thread local.
+ ///
+ /// The yielded type is a `*mut T` if the static is mutable, otherwise if the static is extern a
+ /// `*const T`, and if neither of those apply a `&T`.
+ ///
+ /// **Note:** This is a runtime operation that actually executes code and is in this sense more
+ /// like a function call. Also, eliminating dead stores of this rvalue causes `fn main() {}` to
+ /// SIGILL for some reason that I (JakobDegen) never got a chance to look into.
+ ///
+ /// **Needs clarification**: Are there weird additional semantics here related to the runtime
+ /// nature of this operation?
+ ThreadLocalRef(DefId),
+
+ /// Creates a pointer with the indicated mutability to the place.
+ ///
+ /// This is generated by pointer casts like `&v as *const _` or raw address of expressions like
+ /// `&raw v` or `addr_of!(v)`.
+ ///
+ /// Like with references, the semantics of this operation are heavily dependent on the aliasing
+ /// model.
+ AddressOf(Mutability, Place<'tcx>),
+
+ /// Yields the length of the place, as a `usize`.
+ ///
+ /// If the type of the place is an array, this is the array length. For slices (`[T]`, not
+ /// `&[T]`) this accesses the place's metadata to determine the length. This rvalue is
+ /// ill-formed for places of other types.
+ Len(Place<'tcx>),
+
+ /// Performs essentially all of the casts that can be performed via `as`.
+ ///
+ /// This allows for casts from/to a variety of types.
+ ///
+ /// **FIXME**: Document exactly which `CastKind`s allow which types of casts. Figure out why
+ /// `ArrayToPointer` and `MutToConstPointer` are special.
+ Cast(CastKind, Operand<'tcx>, Ty<'tcx>),
+
+ /// * `Offset` has the same semantics as [`offset`](pointer::offset), except that the second
+ /// parameter may be a `usize` as well.
+ /// * The comparison operations accept `bool`s, `char`s, signed or unsigned integers, floats,
+ /// raw pointers, or function pointers and return a `bool`. The types of the operands must be
+ /// matching, up to the usual caveat of the lifetimes in function pointers.
+ /// * Left and right shift operations accept signed or unsigned integers not necessarily of the
+ /// same type and return a value of the same type as their LHS. Like in Rust, the RHS is
+ /// truncated as needed.
+ /// * The `Bit*` operations accept signed integers, unsigned integers, or bools with matching
+ /// types and return a value of that type.
+ /// * The remaining operations accept signed integers, unsigned integers, or floats with
+ /// matching types and return a value of that type.
+ BinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
+
+ /// Same as `BinaryOp`, but yields `(T, bool)` with a `bool` indicating an error condition.
+ ///
+ /// When overflow checking is disabled and we are generating run-time code, the error condition
+ /// is false. Otherwise, and always during CTFE, the error condition is determined as described
+ /// below.
+ ///
+ /// For addition, subtraction, and multiplication on integers the error condition is set when
+ /// the infinite precision result would be unequal to the actual result.
+ ///
+ /// For shift operations on integers the error condition is set when the value of right-hand
+ /// side is greater than or equal to the number of bits in the type of the left-hand side, or
+ /// when the value of right-hand side is negative.
+ ///
+ /// Other combinations of types and operators are unsupported.
+ CheckedBinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
+
+ /// Computes a value as described by the operation.
+ NullaryOp(NullOp, Ty<'tcx>),
+
+ /// Exactly like `BinaryOp`, but less operands.
+ ///
+ /// Also does two's-complement arithmetic. Negation requires a signed integer or a float;
+ /// bitwise not requires a signed integer, unsigned integer, or bool. Both operation kinds
+ /// return a value with the same type as their operand.
+ UnaryOp(UnOp, Operand<'tcx>),
+
+ /// Computes the discriminant of the place, returning it as an integer of type
+ /// [`discriminant_ty`]. Returns zero for types without discriminant.
+ ///
+ /// The validity requirements for the underlying value are undecided for this rvalue, see
+ /// [#91095]. Note too that the value of the discriminant is not the same thing as the
+ /// variant index; use [`discriminant_for_variant`] to convert.
+ ///
+ /// [`discriminant_ty`]: crate::ty::Ty::discriminant_ty
+ /// [#91095]: https://github.com/rust-lang/rust/issues/91095
+ /// [`discriminant_for_variant`]: crate::ty::Ty::discriminant_for_variant
+ Discriminant(Place<'tcx>),
+
+ /// Creates an aggregate value, like a tuple or struct.
+ ///
+ /// This is needed because dataflow analysis needs to distinguish
+ /// `dest = Foo { x: ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case that `Foo`
+ /// has a destructor.
+ ///
+ /// Disallowed after deaggregation for all aggregate kinds except `Array` and `Generator`. After
+ /// generator lowering, `Generator` aggregate kinds are disallowed too.
+ Aggregate(Box<AggregateKind<'tcx>>, Vec<Operand<'tcx>>),
+
+ /// Transmutes a `*mut u8` into shallow-initialized `Box<T>`.
+ ///
+ /// This is different from a normal transmute because dataflow analysis will treat the box as
+ /// initialized but its content as uninitialized. Like other pointer casts, this in general
+ /// affects alias analysis.
+ ShallowInitBox(Operand<'tcx>, Ty<'tcx>),
+
+ /// A CopyForDeref is equivalent to a read from a place at the
+ /// codegen level, but is treated specially by drop elaboration. When such a read happens, it
+ /// is guaranteed (via nature of the mir_opt `Derefer` in rustc_mir_transform/src/deref_separator)
+ /// that the only use of the returned value is a deref operation, immediately
+ /// followed by one or more projections. Drop elaboration treats this rvalue as if the
+ /// read never happened and just projects further. This allows simplifying various MIR
+ /// optimizations and codegen backends that previously had to handle deref operations anywhere
+ /// in a place.
+ CopyForDeref(Place<'tcx>),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum CastKind {
+ /// An exposing pointer to address cast. A cast between a pointer and an integer type, or
+ /// between a function pointer and an integer type.
+ /// See the docs on `expose_addr` for more details.
+ PointerExposeAddress,
+ /// An address-to-pointer cast that picks up an exposed provenance.
+ /// See the docs on `from_exposed_addr` for more details.
+ PointerFromExposedAddress,
+ /// All sorts of pointer-to-pointer casts. Note that reference-to-raw-ptr casts are
+ /// translated into `&raw mut/const *r`, i.e., they are not actually casts.
+ Pointer(PointerCast),
+ /// Remaining unclassified casts.
+ Misc,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum AggregateKind<'tcx> {
+ /// The type is of the element
+ Array(Ty<'tcx>),
+ Tuple,
+
+ /// The second field is the variant index. It's equal to 0 for struct
+ /// and union expressions. The fourth field is
+ /// active field number and is present only for union expressions
+ /// -- e.g., for a union expression `SomeUnion { c: .. }`, the
+ /// active field index would identity the field `c`
+ Adt(DefId, VariantIdx, SubstsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<usize>),
+
+ // Note: We can use LocalDefId since closures and generators a deaggregated
+ // before codegen.
+ Closure(LocalDefId, SubstsRef<'tcx>),
+ Generator(LocalDefId, SubstsRef<'tcx>, hir::Movability),
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum NullOp {
+ /// Returns the size of a value of that type
+ SizeOf,
+ /// Returns the minimum alignment of a type
+ AlignOf,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum UnOp {
+ /// The `!` operator for logical inversion
+ Not,
+ /// The `-` operator for negation
+ Neg,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum BinOp {
+ /// The `+` operator (addition)
+ Add,
+ /// The `-` operator (subtraction)
+ Sub,
+ /// The `*` operator (multiplication)
+ Mul,
+ /// The `/` operator (division)
+ ///
+ /// Division by zero is UB, because the compiler should have inserted checks
+ /// prior to this.
+ Div,
+ /// The `%` operator (modulus)
+ ///
+ /// Using zero as the modulus (second operand) is UB, because the compiler
+ /// should have inserted checks prior to this.
+ Rem,
+ /// The `^` operator (bitwise xor)
+ BitXor,
+ /// The `&` operator (bitwise and)
+ BitAnd,
+ /// The `|` operator (bitwise or)
+ BitOr,
+ /// The `<<` operator (shift left)
+ ///
+ /// The offset is truncated to the size of the first operand before shifting.
+ Shl,
+ /// The `>>` operator (shift right)
+ ///
+ /// The offset is truncated to the size of the first operand before shifting.
+ Shr,
+ /// The `==` operator (equality)
+ Eq,
+ /// The `<` operator (less than)
+ Lt,
+ /// The `<=` operator (less than or equal to)
+ Le,
+ /// The `!=` operator (not equal to)
+ Ne,
+ /// The `>=` operator (greater than or equal to)
+ Ge,
+ /// The `>` operator (greater than)
+ Gt,
+ /// The `ptr.offset` operator
+ Offset,
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ static_assert_size!(AggregateKind<'_>, 48);
+ static_assert_size!(Operand<'_>, 24);
+ static_assert_size!(Place<'_>, 16);
+ static_assert_size!(PlaceElem<'_>, 24);
+ static_assert_size!(Rvalue<'_>, 40);
+}
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
new file mode 100644
index 000000000..405003156
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -0,0 +1,307 @@
+/*!
+ * Methods for the various MIR types. These are intended for use after
+ * building is complete.
+ */
+
+use crate::mir::*;
+use crate::ty::subst::Subst;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_target::abi::VariantIdx;
+
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct PlaceTy<'tcx> {
+ pub ty: Ty<'tcx>,
+ /// Downcast to a particular variant of an enum or a generator, if included.
+ pub variant_index: Option<VariantIdx>,
+}
+
+// At least on 64 bit systems, `PlaceTy` should not be larger than two or three pointers.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(PlaceTy<'_>, 16);
+
+impl<'tcx> PlaceTy<'tcx> {
+ #[inline]
+ pub fn from_ty(ty: Ty<'tcx>) -> PlaceTy<'tcx> {
+ PlaceTy { ty, variant_index: None }
+ }
+
+ /// `place_ty.field_ty(tcx, f)` computes the type at a given field
+ /// of a record or enum-variant. (Most clients of `PlaceTy` can
+ /// instead just extract the relevant type directly from their
+ /// `PlaceElem`, but some instances of `ProjectionElem<V, T>` do
+ /// not carry a `Ty` for `T`.)
+ ///
+ /// Note that the resulting type has not been normalized.
+ pub fn field_ty(self, tcx: TyCtxt<'tcx>, f: Field) -> Ty<'tcx> {
+ let answer = match self.ty.kind() {
+ ty::Adt(adt_def, substs) => {
+ let variant_def = match self.variant_index {
+ None => adt_def.non_enum_variant(),
+ Some(variant_index) => {
+ assert!(adt_def.is_enum());
+ &adt_def.variant(variant_index)
+ }
+ };
+ let field_def = &variant_def.fields[f.index()];
+ field_def.ty(tcx, substs)
+ }
+ ty::Tuple(tys) => tys[f.index()],
+ _ => bug!("extracting field of non-tuple non-adt: {:?}", self),
+ };
+ debug!("field_ty self: {:?} f: {:?} yields: {:?}", self, f, answer);
+ answer
+ }
+
+ /// Convenience wrapper around `projection_ty_core` for
+ /// `PlaceElem`, where we can just use the `Ty` that is already
+ /// stored inline on field projection elems.
+ pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: PlaceElem<'tcx>) -> PlaceTy<'tcx> {
+ self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty)
+ }
+
+ /// `place_ty.projection_ty_core(tcx, elem, |...| { ... })`
+ /// projects `place_ty` onto `elem`, returning the appropriate
+ /// `Ty` or downcast variant corresponding to that projection.
+ /// The `handle_field` callback must map a `Field` to its `Ty`,
+ /// (which should be trivial when `T` = `Ty`).
+ pub fn projection_ty_core<V, T>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ elem: &ProjectionElem<V, T>,
+ mut handle_field: impl FnMut(&Self, Field, T) -> Ty<'tcx>,
+ ) -> PlaceTy<'tcx>
+ where
+ V: ::std::fmt::Debug,
+ T: ::std::fmt::Debug + Copy,
+ {
+ if self.variant_index.is_some() && !matches!(elem, ProjectionElem::Field(..)) {
+ bug!("cannot use non field projection on downcasted place")
+ }
+ let answer = match *elem {
+ ProjectionElem::Deref => {
+ let ty = self
+ .ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| {
+ bug!("deref projection of non-dereferenceable ty {:?}", self)
+ })
+ .ty;
+ PlaceTy::from_ty(ty)
+ }
+ ProjectionElem::Index(_) | ProjectionElem::ConstantIndex { .. } => {
+ PlaceTy::from_ty(self.ty.builtin_index().unwrap())
+ }
+ ProjectionElem::Subslice { from, to, from_end } => {
+ PlaceTy::from_ty(match self.ty.kind() {
+ ty::Slice(..) => self.ty,
+ ty::Array(inner, _) if !from_end => tcx.mk_array(*inner, (to - from) as u64),
+ ty::Array(inner, size) if from_end => {
+ let size = size.eval_usize(tcx, param_env);
+ let len = size - (from as u64) - (to as u64);
+ tcx.mk_array(*inner, len)
+ }
+ _ => bug!("cannot subslice non-array type: `{:?}`", self),
+ })
+ }
+ ProjectionElem::Downcast(_name, index) => {
+ PlaceTy { ty: self.ty, variant_index: Some(index) }
+ }
+ ProjectionElem::Field(f, fty) => PlaceTy::from_ty(handle_field(&self, f, fty)),
+ };
+ debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer);
+ answer
+ }
+}
+
+impl<'tcx> Place<'tcx> {
+ pub fn ty_from<D>(
+ local: Local,
+ projection: &[PlaceElem<'tcx>],
+ local_decls: &D,
+ tcx: TyCtxt<'tcx>,
+ ) -> PlaceTy<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ projection
+ .iter()
+ .fold(PlaceTy::from_ty(local_decls.local_decls()[local].ty), |place_ty, &elem| {
+ place_ty.projection_ty(tcx, elem)
+ })
+ }
+
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ Place::ty_from(self.local, &self.projection, local_decls, tcx)
+ }
+}
+
+impl<'tcx> PlaceRef<'tcx> {
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ Place::ty_from(self.local, &self.projection, local_decls, tcx)
+ }
+}
+
+pub enum RvalueInitializationState {
+ Shallow,
+ Deep,
+}
+
+impl<'tcx> Rvalue<'tcx> {
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ match *self {
+ Rvalue::Use(ref operand) => operand.ty(local_decls, tcx),
+ Rvalue::Repeat(ref operand, count) => {
+ tcx.mk_ty(ty::Array(operand.ty(local_decls, tcx), count))
+ }
+ Rvalue::ThreadLocalRef(did) => {
+ let static_ty = tcx.type_of(did);
+ if tcx.is_mutable_static(did) {
+ tcx.mk_mut_ptr(static_ty)
+ } else if tcx.is_foreign_item(did) {
+ tcx.mk_imm_ptr(static_ty)
+ } else {
+ // FIXME: These things don't *really* have 'static lifetime.
+ tcx.mk_imm_ref(tcx.lifetimes.re_static, static_ty)
+ }
+ }
+ Rvalue::Ref(reg, bk, ref place) => {
+ let place_ty = place.ty(local_decls, tcx).ty;
+ tcx.mk_ref(reg, ty::TypeAndMut { ty: place_ty, mutbl: bk.to_mutbl_lossy() })
+ }
+ Rvalue::AddressOf(mutability, ref place) => {
+ let place_ty = place.ty(local_decls, tcx).ty;
+ tcx.mk_ptr(ty::TypeAndMut { ty: place_ty, mutbl: mutability })
+ }
+ Rvalue::Len(..) => tcx.types.usize,
+ Rvalue::Cast(.., ty) => ty,
+ Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs_ty = lhs.ty(local_decls, tcx);
+ let rhs_ty = rhs.ty(local_decls, tcx);
+ op.ty(tcx, lhs_ty, rhs_ty)
+ }
+ Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs_ty = lhs.ty(local_decls, tcx);
+ let rhs_ty = rhs.ty(local_decls, tcx);
+ let ty = op.ty(tcx, lhs_ty, rhs_ty);
+ tcx.intern_tup(&[ty, tcx.types.bool])
+ }
+ Rvalue::UnaryOp(UnOp::Not | UnOp::Neg, ref operand) => operand.ty(local_decls, tcx),
+ Rvalue::Discriminant(ref place) => place.ty(local_decls, tcx).ty.discriminant_ty(tcx),
+ Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => tcx.types.usize,
+ Rvalue::Aggregate(ref ak, ref ops) => match **ak {
+ AggregateKind::Array(ty) => tcx.mk_array(ty, ops.len() as u64),
+ AggregateKind::Tuple => tcx.mk_tup(ops.iter().map(|op| op.ty(local_decls, tcx))),
+ AggregateKind::Adt(did, _, substs, _, _) => {
+ tcx.bound_type_of(did).subst(tcx, substs)
+ }
+ AggregateKind::Closure(did, substs) => tcx.mk_closure(did.to_def_id(), substs),
+ AggregateKind::Generator(did, substs, movability) => {
+ tcx.mk_generator(did.to_def_id(), substs, movability)
+ }
+ },
+ Rvalue::ShallowInitBox(_, ty) => tcx.mk_box(ty),
+ Rvalue::CopyForDeref(ref place) => place.ty(local_decls, tcx).ty,
+ }
+ }
+
+ #[inline]
+ /// Returns `true` if this rvalue is deeply initialized (most rvalues) or
+ /// whether its only shallowly initialized (`Rvalue::Box`).
+ pub fn initialization_state(&self) -> RvalueInitializationState {
+ match *self {
+ Rvalue::ShallowInitBox(_, _) => RvalueInitializationState::Shallow,
+ _ => RvalueInitializationState::Deep,
+ }
+ }
+}
+
+impl<'tcx> Operand<'tcx> {
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ match self {
+ &Operand::Copy(ref l) | &Operand::Move(ref l) => l.ty(local_decls, tcx).ty,
+ &Operand::Constant(ref c) => c.literal.ty(),
+ }
+ }
+}
+
+impl<'tcx> BinOp {
+ pub fn ty(&self, tcx: TyCtxt<'tcx>, lhs_ty: Ty<'tcx>, rhs_ty: Ty<'tcx>) -> Ty<'tcx> {
+ // FIXME: handle SIMD correctly
+ match self {
+ &BinOp::Add
+ | &BinOp::Sub
+ | &BinOp::Mul
+ | &BinOp::Div
+ | &BinOp::Rem
+ | &BinOp::BitXor
+ | &BinOp::BitAnd
+ | &BinOp::BitOr => {
+ // these should be integers or floats of the same size.
+ assert_eq!(lhs_ty, rhs_ty);
+ lhs_ty
+ }
+ &BinOp::Shl | &BinOp::Shr | &BinOp::Offset => {
+ lhs_ty // lhs_ty can be != rhs_ty
+ }
+ &BinOp::Eq | &BinOp::Lt | &BinOp::Le | &BinOp::Ne | &BinOp::Ge | &BinOp::Gt => {
+ tcx.types.bool
+ }
+ }
+ }
+}
+
+impl BorrowKind {
+ pub fn to_mutbl_lossy(self) -> hir::Mutability {
+ match self {
+ BorrowKind::Mut { .. } => hir::Mutability::Mut,
+ BorrowKind::Shared => hir::Mutability::Not,
+
+ // We have no type corresponding to a unique imm borrow, so
+ // use `&mut`. It gives all the capabilities of a `&uniq`
+ // and hence is a safe "over approximation".
+ BorrowKind::Unique => hir::Mutability::Mut,
+
+ // We have no type corresponding to a shallow borrow, so use
+ // `&` as an approximation.
+ BorrowKind::Shallow => hir::Mutability::Not,
+ }
+ }
+}
+
+impl BinOp {
+ pub fn to_hir_binop(self) -> hir::BinOpKind {
+ match self {
+ BinOp::Add => hir::BinOpKind::Add,
+ BinOp::Sub => hir::BinOpKind::Sub,
+ BinOp::Mul => hir::BinOpKind::Mul,
+ BinOp::Div => hir::BinOpKind::Div,
+ BinOp::Rem => hir::BinOpKind::Rem,
+ BinOp::BitXor => hir::BinOpKind::BitXor,
+ BinOp::BitAnd => hir::BinOpKind::BitAnd,
+ BinOp::BitOr => hir::BinOpKind::BitOr,
+ BinOp::Shl => hir::BinOpKind::Shl,
+ BinOp::Shr => hir::BinOpKind::Shr,
+ BinOp::Eq => hir::BinOpKind::Eq,
+ BinOp::Ne => hir::BinOpKind::Ne,
+ BinOp::Lt => hir::BinOpKind::Lt,
+ BinOp::Gt => hir::BinOpKind::Gt,
+ BinOp::Le => hir::BinOpKind::Le,
+ BinOp::Ge => hir::BinOpKind::Ge,
+ BinOp::Offset => unreachable!(),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs
new file mode 100644
index 000000000..9ccf5aea6
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -0,0 +1,448 @@
+use crate::mir;
+use crate::mir::interpret::Scalar;
+use crate::ty::{self, Ty, TyCtxt};
+use smallvec::{smallvec, SmallVec};
+
+use super::{BasicBlock, InlineAsmOperand, Operand, SourceInfo, TerminatorKind};
+use rustc_ast::InlineAsmTemplatePiece;
+pub use rustc_ast::Mutability;
+use rustc_macros::HashStable;
+use std::borrow::Cow;
+use std::fmt::{self, Debug, Formatter, Write};
+use std::iter;
+use std::slice;
+
+pub use super::query::*;
+
+#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
+pub struct SwitchTargets {
+ /// Possible values. The locations to branch to in each case
+ /// are found in the corresponding indices from the `targets` vector.
+ values: SmallVec<[u128; 1]>,
+
+ /// Possible branch sites. The last element of this vector is used
+ /// for the otherwise branch, so targets.len() == values.len() + 1
+ /// should hold.
+ //
+ // This invariant is quite non-obvious and also could be improved.
+ // One way to make this invariant is to have something like this instead:
+ //
+ // branches: Vec<(ConstInt, BasicBlock)>,
+ // otherwise: Option<BasicBlock> // exhaustive if None
+ //
+ // However we’ve decided to keep this as-is until we figure a case
+ // where some other approach seems to be strictly better than other.
+ targets: SmallVec<[BasicBlock; 2]>,
+}
+
+impl SwitchTargets {
+ /// Creates switch targets from an iterator of values and target blocks.
+ ///
+ /// The iterator may be empty, in which case the `SwitchInt` instruction is equivalent to
+ /// `goto otherwise;`.
+ pub fn new(targets: impl Iterator<Item = (u128, BasicBlock)>, otherwise: BasicBlock) -> Self {
+ let (values, mut targets): (SmallVec<_>, SmallVec<_>) = targets.unzip();
+ targets.push(otherwise);
+ Self { values, targets }
+ }
+
+ /// Builds a switch targets definition that jumps to `then` if the tested value equals `value`,
+ /// and to `else_` if not.
+ pub fn static_if(value: u128, then: BasicBlock, else_: BasicBlock) -> Self {
+ Self { values: smallvec![value], targets: smallvec![then, else_] }
+ }
+
+ /// Returns the fallback target that is jumped to when none of the values match the operand.
+ pub fn otherwise(&self) -> BasicBlock {
+ *self.targets.last().unwrap()
+ }
+
+ /// Returns an iterator over the switch targets.
+ ///
+ /// The iterator will yield tuples containing the value and corresponding target to jump to, not
+ /// including the `otherwise` fallback target.
+ ///
+ /// Note that this may yield 0 elements. Only the `otherwise` branch is mandatory.
+ pub fn iter(&self) -> SwitchTargetsIter<'_> {
+ SwitchTargetsIter { inner: iter::zip(&self.values, &self.targets) }
+ }
+
+ /// Returns a slice with all possible jump targets (including the fallback target).
+ pub fn all_targets(&self) -> &[BasicBlock] {
+ &self.targets
+ }
+
+ pub fn all_targets_mut(&mut self) -> &mut [BasicBlock] {
+ &mut self.targets
+ }
+
+ /// Finds the `BasicBlock` to which this `SwitchInt` will branch given the
+ /// specific value. This cannot fail, as it'll return the `otherwise`
+ /// branch if there's not a specific match for the value.
+ pub fn target_for_value(&self, value: u128) -> BasicBlock {
+ self.iter().find_map(|(v, t)| (v == value).then_some(t)).unwrap_or_else(|| self.otherwise())
+ }
+}
+
+pub struct SwitchTargetsIter<'a> {
+ inner: iter::Zip<slice::Iter<'a, u128>, slice::Iter<'a, BasicBlock>>,
+}
+
+impl<'a> Iterator for SwitchTargetsIter<'a> {
+ type Item = (u128, BasicBlock);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.inner.next().map(|(val, bb)| (*val, *bb))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<'a> ExactSizeIterator for SwitchTargetsIter<'a> {}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct Terminator<'tcx> {
+ pub source_info: SourceInfo,
+ pub kind: TerminatorKind<'tcx>,
+}
+
+pub type Successors<'a> = impl Iterator<Item = BasicBlock> + 'a;
+pub type SuccessorsMut<'a> =
+ iter::Chain<std::option::IntoIter<&'a mut BasicBlock>, slice::IterMut<'a, BasicBlock>>;
+
+impl<'tcx> Terminator<'tcx> {
+ pub fn successors(&self) -> Successors<'_> {
+ self.kind.successors()
+ }
+
+ pub fn successors_mut(&mut self) -> SuccessorsMut<'_> {
+ self.kind.successors_mut()
+ }
+
+ pub fn unwind(&self) -> Option<&Option<BasicBlock>> {
+ self.kind.unwind()
+ }
+
+ pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+ self.kind.unwind_mut()
+ }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+ pub fn if_(
+ tcx: TyCtxt<'tcx>,
+ cond: Operand<'tcx>,
+ t: BasicBlock,
+ f: BasicBlock,
+ ) -> TerminatorKind<'tcx> {
+ TerminatorKind::SwitchInt {
+ discr: cond,
+ switch_ty: tcx.types.bool,
+ targets: SwitchTargets::static_if(0, f, t),
+ }
+ }
+
+ pub fn successors(&self) -> Successors<'_> {
+ use self::TerminatorKind::*;
+ match *self {
+ Resume
+ | Abort
+ | GeneratorDrop
+ | Return
+ | Unreachable
+ | Call { target: None, cleanup: None, .. }
+ | InlineAsm { destination: None, cleanup: None, .. } => {
+ None.into_iter().chain((&[]).into_iter().copied())
+ }
+ Goto { target: t }
+ | Call { target: None, cleanup: Some(t), .. }
+ | Call { target: Some(t), cleanup: None, .. }
+ | Yield { resume: t, drop: None, .. }
+ | DropAndReplace { target: t, unwind: None, .. }
+ | Drop { target: t, unwind: None, .. }
+ | Assert { target: t, cleanup: None, .. }
+ | FalseUnwind { real_target: t, unwind: None }
+ | InlineAsm { destination: Some(t), cleanup: None, .. }
+ | InlineAsm { destination: None, cleanup: Some(t), .. } => {
+ Some(t).into_iter().chain((&[]).into_iter().copied())
+ }
+ Call { target: Some(t), cleanup: Some(ref u), .. }
+ | Yield { resume: t, drop: Some(ref u), .. }
+ | DropAndReplace { target: t, unwind: Some(ref u), .. }
+ | Drop { target: t, unwind: Some(ref u), .. }
+ | Assert { target: t, cleanup: Some(ref u), .. }
+ | FalseUnwind { real_target: t, unwind: Some(ref u) }
+ | InlineAsm { destination: Some(t), cleanup: Some(ref u), .. } => {
+ Some(t).into_iter().chain(slice::from_ref(u).into_iter().copied())
+ }
+ SwitchInt { ref targets, .. } => {
+ None.into_iter().chain(targets.targets.iter().copied())
+ }
+ FalseEdge { real_target, ref imaginary_target } => Some(real_target)
+ .into_iter()
+ .chain(slice::from_ref(imaginary_target).into_iter().copied()),
+ }
+ }
+
+ pub fn successors_mut(&mut self) -> SuccessorsMut<'_> {
+ use self::TerminatorKind::*;
+ match *self {
+ Resume
+ | Abort
+ | GeneratorDrop
+ | Return
+ | Unreachable
+ | Call { target: None, cleanup: None, .. }
+ | InlineAsm { destination: None, cleanup: None, .. } => None.into_iter().chain(&mut []),
+ Goto { target: ref mut t }
+ | Call { target: None, cleanup: Some(ref mut t), .. }
+ | Call { target: Some(ref mut t), cleanup: None, .. }
+ | Yield { resume: ref mut t, drop: None, .. }
+ | DropAndReplace { target: ref mut t, unwind: None, .. }
+ | Drop { target: ref mut t, unwind: None, .. }
+ | Assert { target: ref mut t, cleanup: None, .. }
+ | FalseUnwind { real_target: ref mut t, unwind: None }
+ | InlineAsm { destination: Some(ref mut t), cleanup: None, .. }
+ | InlineAsm { destination: None, cleanup: Some(ref mut t), .. } => {
+ Some(t).into_iter().chain(&mut [])
+ }
+ Call { target: Some(ref mut t), cleanup: Some(ref mut u), .. }
+ | Yield { resume: ref mut t, drop: Some(ref mut u), .. }
+ | DropAndReplace { target: ref mut t, unwind: Some(ref mut u), .. }
+ | Drop { target: ref mut t, unwind: Some(ref mut u), .. }
+ | Assert { target: ref mut t, cleanup: Some(ref mut u), .. }
+ | FalseUnwind { real_target: ref mut t, unwind: Some(ref mut u) }
+ | InlineAsm { destination: Some(ref mut t), cleanup: Some(ref mut u), .. } => {
+ Some(t).into_iter().chain(slice::from_mut(u))
+ }
+ SwitchInt { ref mut targets, .. } => None.into_iter().chain(&mut targets.targets),
+ FalseEdge { ref mut real_target, ref mut imaginary_target } => {
+ Some(real_target).into_iter().chain(slice::from_mut(imaginary_target))
+ }
+ }
+ }
+
+ pub fn unwind(&self) -> Option<&Option<BasicBlock>> {
+ match *self {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::FalseEdge { .. } => None,
+ TerminatorKind::Call { cleanup: ref unwind, .. }
+ | TerminatorKind::Assert { cleanup: ref unwind, .. }
+ | TerminatorKind::DropAndReplace { ref unwind, .. }
+ | TerminatorKind::Drop { ref unwind, .. }
+ | TerminatorKind::FalseUnwind { ref unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: ref unwind, .. } => Some(unwind),
+ }
+ }
+
+ pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+ match *self {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::FalseEdge { .. } => None,
+ TerminatorKind::Call { cleanup: ref mut unwind, .. }
+ | TerminatorKind::Assert { cleanup: ref mut unwind, .. }
+ | TerminatorKind::DropAndReplace { ref mut unwind, .. }
+ | TerminatorKind::Drop { ref mut unwind, .. }
+ | TerminatorKind::FalseUnwind { ref mut unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: ref mut unwind, .. } => Some(unwind),
+ }
+ }
+
+ pub fn as_switch(&self) -> Option<(&Operand<'tcx>, Ty<'tcx>, &SwitchTargets)> {
+ match self {
+ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+ Some((discr, *switch_ty, targets))
+ }
+ _ => None,
+ }
+ }
+
+ pub fn as_goto(&self) -> Option<BasicBlock> {
+ match self {
+ TerminatorKind::Goto { target } => Some(*target),
+ _ => None,
+ }
+ }
+}
+
+impl<'tcx> Debug for TerminatorKind<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ self.fmt_head(fmt)?;
+ let successor_count = self.successors().count();
+ let labels = self.fmt_successor_labels();
+ assert_eq!(successor_count, labels.len());
+
+ match successor_count {
+ 0 => Ok(()),
+
+ 1 => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
+
+ _ => {
+ write!(fmt, " -> [")?;
+ for (i, target) in self.successors().enumerate() {
+ if i > 0 {
+ write!(fmt, ", ")?;
+ }
+ write!(fmt, "{}: {:?}", labels[i], target)?;
+ }
+ write!(fmt, "]")
+ }
+ }
+ }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+ /// Writes the "head" part of the terminator; that is, its name and the data it uses to pick the
+ /// successor basic block, if any. The only information not included is the list of possible
+ /// successors, which may be rendered differently between the text and the graphviz format.
+ pub fn fmt_head<W: Write>(&self, fmt: &mut W) -> fmt::Result {
+ use self::TerminatorKind::*;
+ match self {
+ Goto { .. } => write!(fmt, "goto"),
+ SwitchInt { discr, .. } => write!(fmt, "switchInt({:?})", discr),
+ Return => write!(fmt, "return"),
+ GeneratorDrop => write!(fmt, "generator_drop"),
+ Resume => write!(fmt, "resume"),
+ Abort => write!(fmt, "abort"),
+ Yield { value, resume_arg, .. } => write!(fmt, "{:?} = yield({:?})", resume_arg, value),
+ Unreachable => write!(fmt, "unreachable"),
+ Drop { place, .. } => write!(fmt, "drop({:?})", place),
+ DropAndReplace { place, value, .. } => {
+ write!(fmt, "replace({:?} <- {:?})", place, value)
+ }
+ Call { func, args, destination, .. } => {
+ write!(fmt, "{:?} = ", destination)?;
+ write!(fmt, "{:?}(", func)?;
+ for (index, arg) in args.iter().enumerate() {
+ if index > 0 {
+ write!(fmt, ", ")?;
+ }
+ write!(fmt, "{:?}", arg)?;
+ }
+ write!(fmt, ")")
+ }
+ Assert { cond, expected, msg, .. } => {
+ write!(fmt, "assert(")?;
+ if !expected {
+ write!(fmt, "!")?;
+ }
+ write!(fmt, "{:?}, ", cond)?;
+ msg.fmt_assert_args(fmt)?;
+ write!(fmt, ")")
+ }
+ FalseEdge { .. } => write!(fmt, "falseEdge"),
+ FalseUnwind { .. } => write!(fmt, "falseUnwind"),
+ InlineAsm { template, ref operands, options, .. } => {
+ write!(fmt, "asm!(\"{}\"", InlineAsmTemplatePiece::to_string(template))?;
+ for op in operands {
+ write!(fmt, ", ")?;
+ let print_late = |&late| if late { "late" } else { "" };
+ match op {
+ InlineAsmOperand::In { reg, value } => {
+ write!(fmt, "in({}) {:?}", reg, value)?;
+ }
+ InlineAsmOperand::Out { reg, late, place: Some(place) } => {
+ write!(fmt, "{}out({}) {:?}", print_late(late), reg, place)?;
+ }
+ InlineAsmOperand::Out { reg, late, place: None } => {
+ write!(fmt, "{}out({}) _", print_late(late), reg)?;
+ }
+ InlineAsmOperand::InOut {
+ reg,
+ late,
+ in_value,
+ out_place: Some(out_place),
+ } => {
+ write!(
+ fmt,
+ "in{}out({}) {:?} => {:?}",
+ print_late(late),
+ reg,
+ in_value,
+ out_place
+ )?;
+ }
+ InlineAsmOperand::InOut { reg, late, in_value, out_place: None } => {
+ write!(fmt, "in{}out({}) {:?} => _", print_late(late), reg, in_value)?;
+ }
+ InlineAsmOperand::Const { value } => {
+ write!(fmt, "const {:?}", value)?;
+ }
+ InlineAsmOperand::SymFn { value } => {
+ write!(fmt, "sym_fn {:?}", value)?;
+ }
+ InlineAsmOperand::SymStatic { def_id } => {
+ write!(fmt, "sym_static {:?}", def_id)?;
+ }
+ }
+ }
+ write!(fmt, ", options({:?}))", options)
+ }
+ }
+ }
+
+ /// Returns the list of labels for the edges to the successor basic blocks.
+ pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
+ use self::TerminatorKind::*;
+ match *self {
+ Return | Resume | Abort | Unreachable | GeneratorDrop => vec![],
+ Goto { .. } => vec!["".into()],
+ SwitchInt { ref targets, switch_ty, .. } => ty::tls::with(|tcx| {
+ let param_env = ty::ParamEnv::empty();
+ let switch_ty = tcx.lift(switch_ty).unwrap();
+ let size = tcx.layout_of(param_env.and(switch_ty)).unwrap().size;
+ targets
+ .values
+ .iter()
+ .map(|&u| {
+ mir::ConstantKind::from_scalar(tcx, Scalar::from_uint(u, size), switch_ty)
+ .to_string()
+ .into()
+ })
+ .chain(iter::once("otherwise".into()))
+ .collect()
+ }),
+ Call { target: Some(_), cleanup: Some(_), .. } => {
+ vec!["return".into(), "unwind".into()]
+ }
+ Call { target: Some(_), cleanup: None, .. } => vec!["return".into()],
+ Call { target: None, cleanup: Some(_), .. } => vec!["unwind".into()],
+ Call { target: None, cleanup: None, .. } => vec![],
+ Yield { drop: Some(_), .. } => vec!["resume".into(), "drop".into()],
+ Yield { drop: None, .. } => vec!["resume".into()],
+ DropAndReplace { unwind: None, .. } | Drop { unwind: None, .. } => {
+ vec!["return".into()]
+ }
+ DropAndReplace { unwind: Some(_), .. } | Drop { unwind: Some(_), .. } => {
+ vec!["return".into(), "unwind".into()]
+ }
+ Assert { cleanup: None, .. } => vec!["".into()],
+ Assert { .. } => vec!["success".into(), "unwind".into()],
+ FalseEdge { .. } => vec!["real".into(), "imaginary".into()],
+ FalseUnwind { unwind: Some(_), .. } => vec!["real".into(), "cleanup".into()],
+ FalseUnwind { unwind: None, .. } => vec!["real".into()],
+ InlineAsm { destination: Some(_), cleanup: Some(_), .. } => {
+ vec!["return".into(), "unwind".into()]
+ }
+ InlineAsm { destination: Some(_), cleanup: None, .. } => vec!["return".into()],
+ InlineAsm { destination: None, cleanup: Some(_), .. } => vec!["unwind".into()],
+ InlineAsm { destination: None, cleanup: None, .. } => vec![],
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs
new file mode 100644
index 000000000..627dc32f3
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/traversal.rs
@@ -0,0 +1,388 @@
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_index::bit_set::BitSet;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+use super::*;
+
+/// Preorder traversal of a graph.
+///
+/// Preorder traversal is when each node is visited after at least one of its predecessors. If you
+/// are familiar with some basic graph theory, then this performs a depth first search and returns
+/// nodes in order of discovery time.
+///
+/// ```text
+///
+/// A
+/// / \
+/// / \
+/// B C
+/// \ /
+/// \ /
+/// D
+/// ```
+///
+/// A preorder traversal of this graph is either `A B D C` or `A C D B`
+#[derive(Clone)]
+pub struct Preorder<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ visited: BitSet<BasicBlock>,
+ worklist: Vec<BasicBlock>,
+ root_is_start_block: bool,
+}
+
+impl<'a, 'tcx> Preorder<'a, 'tcx> {
+ pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> {
+ let worklist = vec![root];
+
+ Preorder {
+ body,
+ visited: BitSet::new_empty(body.basic_blocks().len()),
+ worklist,
+ root_is_start_block: root == START_BLOCK,
+ }
+ }
+}
+
+pub fn preorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Preorder<'a, 'tcx> {
+ Preorder::new(body, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ while let Some(idx) = self.worklist.pop() {
+ if !self.visited.insert(idx) {
+ continue;
+ }
+
+ let data = &self.body[idx];
+
+ if let Some(ref term) = data.terminator {
+ self.worklist.extend(term.successors());
+ }
+
+ return Some((idx, data));
+ }
+
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // All the blocks, minus the number of blocks we've visited.
+ let upper = self.body.basic_blocks().len() - self.visited.count();
+
+ let lower = if self.root_is_start_block {
+ // We will visit all remaining blocks exactly once.
+ upper
+ } else {
+ self.worklist.len()
+ };
+
+ (lower, Some(upper))
+ }
+}
+
+/// Postorder traversal of a graph.
+///
+/// Postorder traversal is when each node is visited after all of its successors, except when the
+/// successor is only reachable by a back-edge. If you are familiar with some basic graph theory,
+/// then this performs a depth first search and returns nodes in order of completion time.
+///
+///
+/// ```text
+///
+/// A
+/// / \
+/// / \
+/// B C
+/// \ /
+/// \ /
+/// D
+/// ```
+///
+/// A Postorder traversal of this graph is `D B C A` or `D C B A`
+pub struct Postorder<'a, 'tcx> {
+ basic_blocks: &'a IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ visited: BitSet<BasicBlock>,
+ visit_stack: Vec<(BasicBlock, Successors<'a>)>,
+ root_is_start_block: bool,
+}
+
+impl<'a, 'tcx> Postorder<'a, 'tcx> {
+ pub fn new(
+ basic_blocks: &'a IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ root: BasicBlock,
+ ) -> Postorder<'a, 'tcx> {
+ let mut po = Postorder {
+ basic_blocks,
+ visited: BitSet::new_empty(basic_blocks.len()),
+ visit_stack: Vec::new(),
+ root_is_start_block: root == START_BLOCK,
+ };
+
+ let data = &po.basic_blocks[root];
+
+ if let Some(ref term) = data.terminator {
+ po.visited.insert(root);
+ po.visit_stack.push((root, term.successors()));
+ po.traverse_successor();
+ }
+
+ po
+ }
+
+ fn traverse_successor(&mut self) {
+ // This is quite a complex loop due to 1. the borrow checker not liking it much
+ // and 2. what exactly is going on is not clear
+ //
+ // It does the actual traversal of the graph, while the `next` method on the iterator
+ // just pops off of the stack. `visit_stack` is a stack containing pairs of nodes and
+ // iterators over the successors of those nodes. Each iteration attempts to get the next
+ // node from the top of the stack, then pushes that node and an iterator over the
+ // successors to the top of the stack. This loop only grows `visit_stack`, stopping when
+ // we reach a child that has no children that we haven't already visited.
+ //
+ // For a graph that looks like this:
+ //
+ // A
+ // / \
+ // / \
+ // B C
+ // | |
+ // | |
+ // D |
+ // \ /
+ // \ /
+ // E
+ //
+ // The state of the stack starts out with just the root node (`A` in this case);
+ // [(A, [B, C])]
+ //
+ // When the first call to `traverse_successor` happens, the following happens:
+ //
+ // [(B, [D]), // `B` taken from the successors of `A`, pushed to the
+ // // top of the stack along with the successors of `B`
+ // (A, [C])]
+ //
+ // [(D, [E]), // `D` taken from successors of `B`, pushed to stack
+ // (B, []),
+ // (A, [C])]
+ //
+ // [(E, []), // `E` taken from successors of `D`, pushed to stack
+ // (D, []),
+ // (B, []),
+ // (A, [C])]
+ //
+ // Now that the top of the stack has no successors we can traverse, each item will
+ // be popped off during iteration until we get back to `A`. This yields [E, D, B].
+ //
+ // When we yield `B` and call `traverse_successor`, we push `C` to the stack, but
+ // since we've already visited `E`, that child isn't added to the stack. The last
+ // two iterations yield `C` and finally `A` for a final traversal of [E, D, B, C, A]
+ loop {
+ let bb = if let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() {
+ if let Some(bb) = iter.next() {
+ bb
+ } else {
+ break;
+ }
+ } else {
+ break;
+ };
+
+ if self.visited.insert(bb) {
+ if let Some(term) = &self.basic_blocks[bb].terminator {
+ self.visit_stack.push((bb, term.successors()));
+ }
+ }
+ }
+ }
+}
+
+pub fn postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Postorder<'a, 'tcx> {
+ Postorder::new(&body.basic_blocks, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> {
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ let next = self.visit_stack.pop();
+ if next.is_some() {
+ self.traverse_successor();
+ }
+
+ next.map(|(bb, _)| (bb, &self.basic_blocks[bb]))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // All the blocks, minus the number of blocks we've visited.
+ let upper = self.basic_blocks.len() - self.visited.count();
+
+ let lower = if self.root_is_start_block {
+ // We will visit all remaining blocks exactly once.
+ upper
+ } else {
+ self.visit_stack.len()
+ };
+
+ (lower, Some(upper))
+ }
+}
+
+/// Reverse postorder traversal of a graph
+///
+/// Reverse postorder is the reverse order of a postorder traversal.
+/// This is different to a preorder traversal and represents a natural
+/// linearization of control-flow.
+///
+/// ```text
+///
+/// A
+/// / \
+/// / \
+/// B C
+/// \ /
+/// \ /
+/// D
+/// ```
+///
+/// A reverse postorder traversal of this graph is either `A B C D` or `A C B D`
+/// Note that for a graph containing no loops (i.e., A DAG), this is equivalent to
+/// a topological sort.
+///
+/// Construction of a `ReversePostorder` traversal requires doing a full
+/// postorder traversal of the graph, therefore this traversal should be
+/// constructed as few times as possible. Use the `reset` method to be able
+/// to re-use the traversal
+#[derive(Clone)]
+pub struct ReversePostorder<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ blocks: Vec<BasicBlock>,
+ idx: usize,
+}
+
+impl<'a, 'tcx> ReversePostorder<'a, 'tcx> {
+ pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> {
+ let blocks: Vec<_> = Postorder::new(&body.basic_blocks, root).map(|(bb, _)| bb).collect();
+ let len = blocks.len();
+ ReversePostorder { body, blocks, idx: len }
+ }
+}
+
+impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> {
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ if self.idx == 0 {
+ return None;
+ }
+ self.idx -= 1;
+
+ self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb]))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.idx, Some(self.idx))
+ }
+}
+
+impl<'a, 'tcx> ExactSizeIterator for ReversePostorder<'a, 'tcx> {}
+
+/// Returns an iterator over all basic blocks reachable from the `START_BLOCK` in no particular
+/// order.
+///
+/// This is clearer than writing `preorder` in cases where the order doesn't matter.
+pub fn reachable<'a, 'tcx>(
+ body: &'a Body<'tcx>,
+) -> impl 'a + Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> {
+ preorder(body)
+}
+
+/// Returns a `BitSet` containing all basic blocks reachable from the `START_BLOCK`.
+pub fn reachable_as_bitset<'tcx>(body: &Body<'tcx>) -> BitSet<BasicBlock> {
+ let mut iter = preorder(body);
+ (&mut iter).for_each(drop);
+ iter.visited
+}
+
+#[derive(Clone)]
+pub struct ReversePostorderIter<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ blocks: &'a [BasicBlock],
+ idx: usize,
+}
+
+impl<'a, 'tcx> Iterator for ReversePostorderIter<'a, 'tcx> {
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ if self.idx == 0 {
+ return None;
+ }
+ self.idx -= 1;
+
+ self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb]))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.idx, Some(self.idx))
+ }
+}
+
+impl<'a, 'tcx> ExactSizeIterator for ReversePostorderIter<'a, 'tcx> {}
+
+pub fn reverse_postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> ReversePostorderIter<'a, 'tcx> {
+ let blocks = body.basic_blocks.postorder();
+ let len = blocks.len();
+ ReversePostorderIter { body, blocks, idx: len }
+}
+
+#[derive(Clone, Debug)]
+pub(super) struct PostorderCache {
+ cache: OnceCell<Vec<BasicBlock>>,
+}
+
+impl PostorderCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ PostorderCache { cache: OnceCell::new() }
+ }
+
+ /// Invalidates the postorder cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ self.cache = OnceCell::new();
+ }
+
+ /// Returns the `&[BasicBlocks]` represents the postorder graph for this MIR.
+ #[inline]
+ pub(super) fn compute(&self, body: &IndexVec<BasicBlock, BasicBlockData<'_>>) -> &[BasicBlock] {
+ self.cache.get_or_init(|| Postorder::new(body, START_BLOCK).map(|(bb, _)| bb).collect())
+ }
+}
+
+impl<S: Encoder> Encodable<S> for PostorderCache {
+ #[inline]
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder> Decodable<D> for PostorderCache {
+ #[inline]
+ fn decode(_: &mut D) -> Self {
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for PostorderCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ PostorderCache,
+}
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
new file mode 100644
index 000000000..82a6b0c50
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -0,0 +1,240 @@
+//! `TypeFoldable` implementations for MIR types
+
+use super::*;
+use crate::ty;
+use rustc_data_structures::functor::IdFunctor;
+
+TrivialTypeTraversalAndLiftImpls! {
+ BlockTailInfo,
+ MirPhase,
+ SourceInfo,
+ FakeReadCause,
+ RetagKind,
+ SourceScope,
+ SourceScopeLocalData,
+ UserTypeAnnotationIndex,
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ use crate::mir::TerminatorKind::*;
+
+ let kind = match self.kind {
+ Goto { target } => Goto { target },
+ SwitchInt { discr, switch_ty, targets } => SwitchInt {
+ discr: discr.try_fold_with(folder)?,
+ switch_ty: switch_ty.try_fold_with(folder)?,
+ targets,
+ },
+ Drop { place, target, unwind } => {
+ Drop { place: place.try_fold_with(folder)?, target, unwind }
+ }
+ DropAndReplace { place, value, target, unwind } => DropAndReplace {
+ place: place.try_fold_with(folder)?,
+ value: value.try_fold_with(folder)?,
+ target,
+ unwind,
+ },
+ Yield { value, resume, resume_arg, drop } => Yield {
+ value: value.try_fold_with(folder)?,
+ resume,
+ resume_arg: resume_arg.try_fold_with(folder)?,
+ drop,
+ },
+ Call { func, args, destination, target, cleanup, from_hir_call, fn_span } => Call {
+ func: func.try_fold_with(folder)?,
+ args: args.try_fold_with(folder)?,
+ destination: destination.try_fold_with(folder)?,
+ target,
+ cleanup,
+ from_hir_call,
+ fn_span,
+ },
+ Assert { cond, expected, msg, target, cleanup } => {
+ use AssertKind::*;
+ let msg = match msg {
+ BoundsCheck { len, index } => BoundsCheck {
+ len: len.try_fold_with(folder)?,
+ index: index.try_fold_with(folder)?,
+ },
+ Overflow(op, l, r) => {
+ Overflow(op, l.try_fold_with(folder)?, r.try_fold_with(folder)?)
+ }
+ OverflowNeg(op) => OverflowNeg(op.try_fold_with(folder)?),
+ DivisionByZero(op) => DivisionByZero(op.try_fold_with(folder)?),
+ RemainderByZero(op) => RemainderByZero(op.try_fold_with(folder)?),
+ ResumedAfterReturn(_) | ResumedAfterPanic(_) => msg,
+ };
+ Assert { cond: cond.try_fold_with(folder)?, expected, msg, target, cleanup }
+ }
+ GeneratorDrop => GeneratorDrop,
+ Resume => Resume,
+ Abort => Abort,
+ Return => Return,
+ Unreachable => Unreachable,
+ FalseEdge { real_target, imaginary_target } => {
+ FalseEdge { real_target, imaginary_target }
+ }
+ FalseUnwind { real_target, unwind } => FalseUnwind { real_target, unwind },
+ InlineAsm { template, operands, options, line_spans, destination, cleanup } => {
+ InlineAsm {
+ template,
+ operands: operands.try_fold_with(folder)?,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ }
+ }
+ };
+ Ok(Terminator { source_info: self.source_info, kind })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GeneratorKind {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(Place {
+ local: self.local.try_fold_with(folder)?,
+ projection: self.projection.try_fold_with(folder)?,
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_place_elems(v))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ use crate::mir::Rvalue::*;
+ Ok(match self {
+ Use(op) => Use(op.try_fold_with(folder)?),
+ Repeat(op, len) => Repeat(op.try_fold_with(folder)?, len.try_fold_with(folder)?),
+ ThreadLocalRef(did) => ThreadLocalRef(did.try_fold_with(folder)?),
+ Ref(region, bk, place) => {
+ Ref(region.try_fold_with(folder)?, bk, place.try_fold_with(folder)?)
+ }
+ CopyForDeref(place) => CopyForDeref(place.try_fold_with(folder)?),
+ AddressOf(mutability, place) => AddressOf(mutability, place.try_fold_with(folder)?),
+ Len(place) => Len(place.try_fold_with(folder)?),
+ Cast(kind, op, ty) => Cast(kind, op.try_fold_with(folder)?, ty.try_fold_with(folder)?),
+ BinaryOp(op, box (rhs, lhs)) => {
+ BinaryOp(op, Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)))
+ }
+ CheckedBinaryOp(op, box (rhs, lhs)) => CheckedBinaryOp(
+ op,
+ Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)),
+ ),
+ UnaryOp(op, val) => UnaryOp(op, val.try_fold_with(folder)?),
+ Discriminant(place) => Discriminant(place.try_fold_with(folder)?),
+ NullaryOp(op, ty) => NullaryOp(op, ty.try_fold_with(folder)?),
+ Aggregate(kind, fields) => {
+ let kind = kind.try_map_id(|kind| {
+ Ok(match kind {
+ AggregateKind::Array(ty) => AggregateKind::Array(ty.try_fold_with(folder)?),
+ AggregateKind::Tuple => AggregateKind::Tuple,
+ AggregateKind::Adt(def, v, substs, user_ty, n) => AggregateKind::Adt(
+ def,
+ v,
+ substs.try_fold_with(folder)?,
+ user_ty.try_fold_with(folder)?,
+ n,
+ ),
+ AggregateKind::Closure(id, substs) => {
+ AggregateKind::Closure(id, substs.try_fold_with(folder)?)
+ }
+ AggregateKind::Generator(id, substs, movablity) => {
+ AggregateKind::Generator(id, substs.try_fold_with(folder)?, movablity)
+ }
+ })
+ })?;
+ Aggregate(kind, fields.try_fold_with(folder)?)
+ }
+ ShallowInitBox(op, ty) => {
+ ShallowInitBox(op.try_fold_with(folder)?, ty.try_fold_with(folder)?)
+ }
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(match self {
+ Operand::Copy(place) => Operand::Copy(place.try_fold_with(folder)?),
+ Operand::Move(place) => Operand::Move(place.try_fold_with(folder)?),
+ Operand::Constant(c) => Operand::Constant(c.try_fold_with(folder)?),
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ use crate::mir::ProjectionElem::*;
+
+ Ok(match self {
+ Deref => Deref,
+ Field(f, ty) => Field(f, ty.try_fold_with(folder)?),
+ Index(v) => Index(v.try_fold_with(folder)?),
+ Downcast(symbol, variantidx) => Downcast(symbol, variantidx),
+ ConstantIndex { offset, min_length, from_end } => {
+ ConstantIndex { offset, min_length, from_end }
+ }
+ Subslice { from, to, from_end } => Subslice { from, to, from_end },
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Field {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix<R, C> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(Constant {
+ span: self.span,
+ user_ty: self.user_ty.try_fold_with(folder)?,
+ literal: self.literal.try_fold_with(folder)?,
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ConstantKind<'tcx> {
+ #[inline(always)]
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_mir_const(self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ConstantKind<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ match self {
+ ConstantKind::Ty(c) => Ok(ConstantKind::Ty(c.try_fold_with(folder)?)),
+ ConstantKind::Val(v, t) => Ok(ConstantKind::Val(v, t.try_fold_with(folder)?)),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/type_visitable.rs b/compiler/rustc_middle/src/mir/type_visitable.rs
new file mode 100644
index 000000000..6a0801cb0
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/type_visitable.rs
@@ -0,0 +1,190 @@
+//! `TypeVisitable` implementations for MIR types
+
+use super::*;
+use crate::ty;
+
+impl<'tcx> TypeVisitable<'tcx> for Terminator<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ use crate::mir::TerminatorKind::*;
+
+ match self.kind {
+ SwitchInt { ref discr, switch_ty, .. } => {
+ discr.visit_with(visitor)?;
+ switch_ty.visit_with(visitor)
+ }
+ Drop { ref place, .. } => place.visit_with(visitor),
+ DropAndReplace { ref place, ref value, .. } => {
+ place.visit_with(visitor)?;
+ value.visit_with(visitor)
+ }
+ Yield { ref value, .. } => value.visit_with(visitor),
+ Call { ref func, ref args, ref destination, .. } => {
+ destination.visit_with(visitor)?;
+ func.visit_with(visitor)?;
+ args.visit_with(visitor)
+ }
+ Assert { ref cond, ref msg, .. } => {
+ cond.visit_with(visitor)?;
+ use AssertKind::*;
+ match msg {
+ BoundsCheck { ref len, ref index } => {
+ len.visit_with(visitor)?;
+ index.visit_with(visitor)
+ }
+ Overflow(_, l, r) => {
+ l.visit_with(visitor)?;
+ r.visit_with(visitor)
+ }
+ OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
+ op.visit_with(visitor)
+ }
+ ResumedAfterReturn(_) | ResumedAfterPanic(_) => ControlFlow::CONTINUE,
+ }
+ }
+ InlineAsm { ref operands, .. } => operands.visit_with(visitor),
+ Goto { .. }
+ | Resume
+ | Abort
+ | Return
+ | GeneratorDrop
+ | Unreachable
+ | FalseEdge { .. }
+ | FalseUnwind { .. } => ControlFlow::CONTINUE,
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for GeneratorKind {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Place<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.local.visit_with(visitor)?;
+ self.projection.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Rvalue<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ use crate::mir::Rvalue::*;
+ match *self {
+ Use(ref op) => op.visit_with(visitor),
+ CopyForDeref(ref place) => {
+ let op = &Operand::Copy(*place);
+ op.visit_with(visitor)
+ }
+ Repeat(ref op, _) => op.visit_with(visitor),
+ ThreadLocalRef(did) => did.visit_with(visitor),
+ Ref(region, _, ref place) => {
+ region.visit_with(visitor)?;
+ place.visit_with(visitor)
+ }
+ AddressOf(_, ref place) => place.visit_with(visitor),
+ Len(ref place) => place.visit_with(visitor),
+ Cast(_, ref op, ty) => {
+ op.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ BinaryOp(_, box (ref rhs, ref lhs)) | CheckedBinaryOp(_, box (ref rhs, ref lhs)) => {
+ rhs.visit_with(visitor)?;
+ lhs.visit_with(visitor)
+ }
+ UnaryOp(_, ref val) => val.visit_with(visitor),
+ Discriminant(ref place) => place.visit_with(visitor),
+ NullaryOp(_, ty) => ty.visit_with(visitor),
+ Aggregate(ref kind, ref fields) => {
+ match **kind {
+ AggregateKind::Array(ty) => {
+ ty.visit_with(visitor)?;
+ }
+ AggregateKind::Tuple => {}
+ AggregateKind::Adt(_, _, substs, user_ty, _) => {
+ substs.visit_with(visitor)?;
+ user_ty.visit_with(visitor)?;
+ }
+ AggregateKind::Closure(_, substs) => {
+ substs.visit_with(visitor)?;
+ }
+ AggregateKind::Generator(_, substs, _) => {
+ substs.visit_with(visitor)?;
+ }
+ }
+ fields.visit_with(visitor)
+ }
+ ShallowInitBox(ref op, ty) => {
+ op.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Operand<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match *self {
+ Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor),
+ Operand::Constant(ref c) => c.visit_with(visitor),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for PlaceElem<'tcx> {
+ fn visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<Vs::BreakTy> {
+ use crate::mir::ProjectionElem::*;
+
+ match self {
+ Field(_, ty) => ty.visit_with(visitor),
+ Index(v) => v.visit_with(visitor),
+ _ => ControlFlow::CONTINUE,
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Field {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for GeneratorSavedLocal {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx, R: Idx, C: Idx> TypeVisitable<'tcx> for BitMatrix<R, C> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Constant<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.literal.visit_with(visitor)?;
+ self.user_ty.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ConstantKind<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_mir_const(*self)
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ConstantKind<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match *self {
+ ConstantKind::Ty(c) => c.visit_with(visitor),
+ ConstantKind::Val(_, t) => t.visit_with(visitor),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
new file mode 100644
index 000000000..891608764
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -0,0 +1,1330 @@
+//! # The MIR Visitor
+//!
+//! ## Overview
+//!
+//! There are two visitors, one for immutable and one for mutable references,
+//! but both are generated by the following macro. The code is written according
+//! to the following conventions:
+//!
+//! - introduce a `visit_foo` and a `super_foo` method for every MIR type
+//! - `visit_foo`, by default, calls `super_foo`
+//! - `super_foo`, by default, destructures the `foo` and calls `visit_foo`
+//!
+//! This allows you as a user to override `visit_foo` for types are
+//! interested in, and invoke (within that method) call
+//! `self.super_foo` to get the default behavior. Just as in an OO
+//! language, you should never call `super` methods ordinarily except
+//! in that circumstance.
+//!
+//! For the most part, we do not destructure things external to the
+//! MIR, e.g., types, spans, etc, but simply visit them and stop. This
+//! avoids duplication with other visitors like `TypeFoldable`.
+//!
+//! ## Updating
+//!
+//! The code is written in a very deliberate style intended to minimize
+//! the chance of things being overlooked. You'll notice that we always
+//! use pattern matching to reference fields and we ensure that all
+//! matches are exhaustive.
+//!
+//! For example, the `super_basic_block_data` method begins like this:
+//!
+//! ```ignore (pseudo-rust)
+//! fn super_basic_block_data(
+//! &mut self,
+//! block: BasicBlock,
+//! data: & $($mutability)? BasicBlockData<'tcx>
+//! ) {
+//! let BasicBlockData {
+//! statements,
+//! terminator,
+//! is_cleanup: _
+//! } = *data;
+//!
+//! for statement in statements {
+//! self.visit_statement(block, statement);
+//! }
+//!
+//! ...
+//! }
+//! ```
+//!
+//! Here we used `let BasicBlockData { <fields> } = *data` deliberately,
+//! rather than writing `data.statements` in the body. This is because if one
+//! adds a new field to `BasicBlockData`, one will be forced to revise this code,
+//! and hence one will (hopefully) invoke the correct visit methods (if any).
+//!
+//! For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS.
+//! That means you never write `..` to skip over fields, nor do you write `_`
+//! to skip over variants in a `match`.
+//!
+//! The only place that `_` is acceptable is to match a field (or
+//! variant argument) that does not require visiting, as in
+//! `is_cleanup` above.
+
+use crate::mir::*;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{CanonicalUserTypeAnnotation, Ty};
+use rustc_span::Span;
+
+macro_rules! make_mir_visitor {
+ ($visitor_trait_name:ident, $($mutability:ident)?) => {
+ pub trait $visitor_trait_name<'tcx> {
+ // Override these, and call `self.super_xxx` to revert back to the
+ // default behavior.
+
+ fn visit_body(
+ &mut self,
+ body: &$($mutability)? Body<'tcx>,
+ ) {
+ self.super_body(body);
+ }
+
+ fn visit_basic_block_data(
+ &mut self,
+ block: BasicBlock,
+ data: & $($mutability)? BasicBlockData<'tcx>,
+ ) {
+ self.super_basic_block_data(block, data);
+ }
+
+ fn visit_source_scope_data(
+ &mut self,
+ scope_data: & $($mutability)? SourceScopeData<'tcx>,
+ ) {
+ self.super_source_scope_data(scope_data);
+ }
+
+ fn visit_statement(
+ &mut self,
+ statement: & $($mutability)? Statement<'tcx>,
+ location: Location,
+ ) {
+ self.super_statement(statement, location);
+ }
+
+ fn visit_assign(
+ &mut self,
+ place: & $($mutability)? Place<'tcx>,
+ rvalue: & $($mutability)? Rvalue<'tcx>,
+ location: Location,
+ ) {
+ self.super_assign(place, rvalue, location);
+ }
+
+ fn visit_terminator(
+ &mut self,
+ terminator: & $($mutability)? Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_assert_message(
+ &mut self,
+ msg: & $($mutability)? AssertMessage<'tcx>,
+ location: Location,
+ ) {
+ self.super_assert_message(msg, location);
+ }
+
+ fn visit_rvalue(
+ &mut self,
+ rvalue: & $($mutability)? Rvalue<'tcx>,
+ location: Location,
+ ) {
+ self.super_rvalue(rvalue, location);
+ }
+
+ fn visit_operand(
+ &mut self,
+ operand: & $($mutability)? Operand<'tcx>,
+ location: Location,
+ ) {
+ self.super_operand(operand, location);
+ }
+
+ fn visit_ascribe_user_ty(
+ &mut self,
+ place: & $($mutability)? Place<'tcx>,
+ variance: $(& $mutability)? ty::Variance,
+ user_ty: & $($mutability)? UserTypeProjection,
+ location: Location,
+ ) {
+ self.super_ascribe_user_ty(place, variance, user_ty, location);
+ }
+
+ fn visit_coverage(
+ &mut self,
+ coverage: & $($mutability)? Coverage,
+ location: Location,
+ ) {
+ self.super_coverage(coverage, location);
+ }
+
+ fn visit_retag(
+ &mut self,
+ kind: $(& $mutability)? RetagKind,
+ place: & $($mutability)? Place<'tcx>,
+ location: Location,
+ ) {
+ self.super_retag(kind, place, location);
+ }
+
+ fn visit_place(
+ &mut self,
+ place: & $($mutability)? Place<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.super_place(place, context, location);
+ }
+
+ visit_place_fns!($($mutability)?);
+
+ fn visit_constant(
+ &mut self,
+ constant: & $($mutability)? Constant<'tcx>,
+ location: Location,
+ ) {
+ self.super_constant(constant, location);
+ }
+
+ fn visit_span(
+ &mut self,
+ span: $(& $mutability)? Span,
+ ) {
+ self.super_span(span);
+ }
+
+ fn visit_source_info(
+ &mut self,
+ source_info: & $($mutability)? SourceInfo,
+ ) {
+ self.super_source_info(source_info);
+ }
+
+ fn visit_ty(
+ &mut self,
+ ty: $(& $mutability)? Ty<'tcx>,
+ _: TyContext,
+ ) {
+ self.super_ty(ty);
+ }
+
+ fn visit_user_type_projection(
+ &mut self,
+ ty: & $($mutability)? UserTypeProjection,
+ ) {
+ self.super_user_type_projection(ty);
+ }
+
+ fn visit_user_type_annotation(
+ &mut self,
+ index: UserTypeAnnotationIndex,
+ ty: & $($mutability)? CanonicalUserTypeAnnotation<'tcx>,
+ ) {
+ self.super_user_type_annotation(index, ty);
+ }
+
+ fn visit_region(
+ &mut self,
+ region: $(& $mutability)? ty::Region<'tcx>,
+ _: Location,
+ ) {
+ self.super_region(region);
+ }
+
+ fn visit_const(
+ &mut self,
+ constant: $(& $mutability)? ty::Const<'tcx>,
+ _: Location,
+ ) {
+ self.super_const(constant);
+ }
+
+ fn visit_substs(
+ &mut self,
+ substs: & $($mutability)? SubstsRef<'tcx>,
+ _: Location,
+ ) {
+ self.super_substs(substs);
+ }
+
+ fn visit_local_decl(
+ &mut self,
+ local: Local,
+ local_decl: & $($mutability)? LocalDecl<'tcx>,
+ ) {
+ self.super_local_decl(local, local_decl);
+ }
+
+ fn visit_var_debug_info(
+ &mut self,
+ var_debug_info: & $($mutability)* VarDebugInfo<'tcx>,
+ ) {
+ self.super_var_debug_info(var_debug_info);
+ }
+
+ fn visit_local(
+ &mut self,
+ _local: $(& $mutability)? Local,
+ _context: PlaceContext,
+ _location: Location,
+ ) {}
+
+ fn visit_source_scope(
+ &mut self,
+ scope: $(& $mutability)? SourceScope,
+ ) {
+ self.super_source_scope(scope);
+ }
+
+ // The `super_xxx` methods comprise the default behavior and are
+ // not meant to be overridden.
+
+ fn super_body(
+ &mut self,
+ body: &$($mutability)? Body<'tcx>,
+ ) {
+ let span = body.span;
+ if let Some(gen) = &$($mutability)? body.generator {
+ if let Some(yield_ty) = $(& $mutability)? gen.yield_ty {
+ self.visit_ty(
+ yield_ty,
+ TyContext::YieldTy(SourceInfo::outermost(span))
+ );
+ }
+ }
+
+ // for best performance, we want to use an iterator rather
+ // than a for-loop, to avoid calling `body::Body::invalidate` for
+ // each basic block.
+ #[allow(unused_macro_rules)]
+ macro_rules! basic_blocks {
+ (mut) => (body.basic_blocks_mut().iter_enumerated_mut());
+ () => (body.basic_blocks().iter_enumerated());
+ }
+ for (bb, data) in basic_blocks!($($mutability)?) {
+ self.visit_basic_block_data(bb, data);
+ }
+
+ for scope in &$($mutability)? body.source_scopes {
+ self.visit_source_scope_data(scope);
+ }
+
+ self.visit_ty(
+ $(& $mutability)? body.return_ty(),
+ TyContext::ReturnTy(SourceInfo::outermost(body.span))
+ );
+
+ for local in body.local_decls.indices() {
+ self.visit_local_decl(local, & $($mutability)? body.local_decls[local]);
+ }
+
+ #[allow(unused_macro_rules)]
+ macro_rules! type_annotations {
+ (mut) => (body.user_type_annotations.iter_enumerated_mut());
+ () => (body.user_type_annotations.iter_enumerated());
+ }
+
+ for (index, annotation) in type_annotations!($($mutability)?) {
+ self.visit_user_type_annotation(
+ index, annotation
+ );
+ }
+
+ for var_debug_info in &$($mutability)? body.var_debug_info {
+ self.visit_var_debug_info(var_debug_info);
+ }
+
+ self.visit_span($(& $mutability)? body.span);
+
+ for const_ in &$($mutability)? body.required_consts {
+ let location = START_BLOCK.start_location();
+ self.visit_constant(const_, location);
+ }
+ }
+
+ fn super_basic_block_data(&mut self,
+ block: BasicBlock,
+ data: & $($mutability)? BasicBlockData<'tcx>) {
+ let BasicBlockData {
+ statements,
+ terminator,
+ is_cleanup: _
+ } = data;
+
+ let mut index = 0;
+ for statement in statements {
+ let location = Location { block, statement_index: index };
+ self.visit_statement(statement, location);
+ index += 1;
+ }
+
+ if let Some(terminator) = terminator {
+ let location = Location { block, statement_index: index };
+ self.visit_terminator(terminator, location);
+ }
+ }
+
+ fn super_source_scope_data(
+ &mut self,
+ scope_data: & $($mutability)? SourceScopeData<'tcx>,
+ ) {
+ let SourceScopeData {
+ span,
+ parent_scope,
+ inlined,
+ inlined_parent_scope,
+ local_data: _,
+ } = scope_data;
+
+ self.visit_span($(& $mutability)? *span);
+ if let Some(parent_scope) = parent_scope {
+ self.visit_source_scope($(& $mutability)? *parent_scope);
+ }
+ if let Some((callee, callsite_span)) = inlined {
+ let location = START_BLOCK.start_location();
+
+ self.visit_span($(& $mutability)? *callsite_span);
+
+ let ty::Instance { def: callee_def, substs: callee_substs } = callee;
+ match callee_def {
+ ty::InstanceDef::Item(_def_id) => {}
+
+ ty::InstanceDef::Intrinsic(_def_id) |
+ ty::InstanceDef::VTableShim(_def_id) |
+ ty::InstanceDef::ReifyShim(_def_id) |
+ ty::InstanceDef::Virtual(_def_id, _) |
+ ty::InstanceDef::ClosureOnceShim { call_once: _def_id, track_caller: _ } |
+ ty::InstanceDef::DropGlue(_def_id, None) => {}
+
+ ty::InstanceDef::FnPtrShim(_def_id, ty) |
+ ty::InstanceDef::DropGlue(_def_id, Some(ty)) |
+ ty::InstanceDef::CloneShim(_def_id, ty) => {
+ // FIXME(eddyb) use a better `TyContext` here.
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+ }
+ self.visit_substs(callee_substs, location);
+ }
+ if let Some(inlined_parent_scope) = inlined_parent_scope {
+ self.visit_source_scope($(& $mutability)? *inlined_parent_scope);
+ }
+ }
+
+ fn super_statement(&mut self,
+ statement: & $($mutability)? Statement<'tcx>,
+ location: Location) {
+ let Statement {
+ source_info,
+ kind,
+ } = statement;
+
+ self.visit_source_info(source_info);
+ match kind {
+ StatementKind::Assign(
+ box (place, rvalue)
+ ) => {
+ self.visit_assign(place, rvalue, location);
+ }
+ StatementKind::FakeRead(box (_, place)) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+ location
+ );
+ }
+ StatementKind::SetDiscriminant { place, .. } => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::SetDiscriminant),
+ location
+ );
+ }
+ StatementKind::Deinit(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Deinit),
+ location
+ )
+ }
+ StatementKind::StorageLive(local) => {
+ self.visit_local(
+ $(& $mutability)? *local,
+ PlaceContext::NonUse(NonUseContext::StorageLive),
+ location
+ );
+ }
+ StatementKind::StorageDead(local) => {
+ self.visit_local(
+ $(& $mutability)? *local,
+ PlaceContext::NonUse(NonUseContext::StorageDead),
+ location
+ );
+ }
+ StatementKind::Retag(kind, place) => {
+ self.visit_retag($(& $mutability)? *kind, place, location);
+ }
+ StatementKind::AscribeUserType(
+ box (place, user_ty),
+ variance
+ ) => {
+ self.visit_ascribe_user_ty(place, $(& $mutability)? *variance, user_ty, location);
+ }
+ StatementKind::Coverage(coverage) => {
+ self.visit_coverage(
+ coverage,
+ location
+ )
+ }
+ StatementKind::CopyNonOverlapping(box crate::mir::CopyNonOverlapping{
+ src,
+ dst,
+ count,
+ }) => {
+ self.visit_operand(src, location);
+ self.visit_operand(dst, location);
+ self.visit_operand(count, location)
+ }
+ StatementKind::Nop => {}
+ }
+ }
+
+ fn super_assign(&mut self,
+ place: &$($mutability)? Place<'tcx>,
+ rvalue: &$($mutability)? Rvalue<'tcx>,
+ location: Location) {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Store),
+ location
+ );
+ self.visit_rvalue(rvalue, location);
+ }
+
+ fn super_terminator(&mut self,
+ terminator: &$($mutability)? Terminator<'tcx>,
+ location: Location) {
+ let Terminator { source_info, kind } = terminator;
+
+ self.visit_source_info(source_info);
+ match kind {
+ TerminatorKind::Goto { .. } |
+ TerminatorKind::Resume |
+ TerminatorKind::Abort |
+ TerminatorKind::GeneratorDrop |
+ TerminatorKind::Unreachable |
+ TerminatorKind::FalseEdge { .. } |
+ TerminatorKind::FalseUnwind { .. } => {}
+
+ TerminatorKind::Return => {
+ // `return` logically moves from the return place `_0`. Note that the place
+ // cannot be changed by any visitor, though.
+ let $($mutability)? local = RETURN_PLACE;
+ self.visit_local(
+ $(& $mutability)? local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Move),
+ location,
+ );
+
+ assert_eq!(
+ local,
+ RETURN_PLACE,
+ "`MutVisitor` tried to mutate return place of `return` terminator"
+ );
+ }
+
+ TerminatorKind::SwitchInt {
+ discr,
+ switch_ty,
+ targets: _
+ } => {
+ self.visit_operand(discr, location);
+ self.visit_ty($(& $mutability)? *switch_ty, TyContext::Location(location));
+ }
+
+ TerminatorKind::Drop {
+ place,
+ target: _,
+ unwind: _,
+ } => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Drop),
+ location
+ );
+ }
+
+ TerminatorKind::DropAndReplace {
+ place,
+ value,
+ target: _,
+ unwind: _,
+ } => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Drop),
+ location
+ );
+ self.visit_operand(value, location);
+ }
+
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target: _,
+ cleanup: _,
+ from_hir_call: _,
+ fn_span: _
+ } => {
+ self.visit_operand(func, location);
+ for arg in args {
+ self.visit_operand(arg, location);
+ }
+ self.visit_place(
+ destination,
+ PlaceContext::MutatingUse(MutatingUseContext::Call),
+ location
+ );
+ }
+
+ TerminatorKind::Assert {
+ cond,
+ expected: _,
+ msg,
+ target: _,
+ cleanup: _,
+ } => {
+ self.visit_operand(cond, location);
+ self.visit_assert_message(msg, location);
+ }
+
+ TerminatorKind::Yield {
+ value,
+ resume: _,
+ resume_arg,
+ drop: _,
+ } => {
+ self.visit_operand(value, location);
+ self.visit_place(
+ resume_arg,
+ PlaceContext::MutatingUse(MutatingUseContext::Yield),
+ location,
+ );
+ }
+
+ TerminatorKind::InlineAsm {
+ template: _,
+ operands,
+ options: _,
+ line_spans: _,
+ destination: _,
+ cleanup: _,
+ } => {
+ for op in operands {
+ match op {
+ InlineAsmOperand::In { value, .. } => {
+ self.visit_operand(value, location);
+ }
+ InlineAsmOperand::Out { place: Some(place), .. } => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::AsmOutput),
+ location,
+ );
+ }
+ InlineAsmOperand::InOut { in_value, out_place, .. } => {
+ self.visit_operand(in_value, location);
+ if let Some(out_place) = out_place {
+ self.visit_place(
+ out_place,
+ PlaceContext::MutatingUse(MutatingUseContext::AsmOutput),
+ location,
+ );
+ }
+ }
+ InlineAsmOperand::Const { value }
+ | InlineAsmOperand::SymFn { value } => {
+ self.visit_constant(value, location);
+ }
+ InlineAsmOperand::Out { place: None, .. }
+ | InlineAsmOperand::SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+ }
+ }
+
+ fn super_assert_message(&mut self,
+ msg: & $($mutability)? AssertMessage<'tcx>,
+ location: Location) {
+ use crate::mir::AssertKind::*;
+ match msg {
+ BoundsCheck { len, index } => {
+ self.visit_operand(len, location);
+ self.visit_operand(index, location);
+ }
+ Overflow(_, l, r) => {
+ self.visit_operand(l, location);
+ self.visit_operand(r, location);
+ }
+ OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
+ self.visit_operand(op, location);
+ }
+ ResumedAfterReturn(_) | ResumedAfterPanic(_) => {
+ // Nothing to visit
+ }
+ }
+ }
+
+ fn super_rvalue(&mut self,
+ rvalue: & $($mutability)? Rvalue<'tcx>,
+ location: Location) {
+ match rvalue {
+ Rvalue::Use(operand) => {
+ self.visit_operand(operand, location);
+ }
+
+ Rvalue::Repeat(value, _) => {
+ self.visit_operand(value, location);
+ }
+
+ Rvalue::ThreadLocalRef(_) => {}
+
+ Rvalue::Ref(r, bk, path) => {
+ self.visit_region($(& $mutability)? *r, location);
+ let ctx = match bk {
+ BorrowKind::Shared => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::SharedBorrow
+ ),
+ BorrowKind::Shallow => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::ShallowBorrow
+ ),
+ BorrowKind::Unique => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::UniqueBorrow
+ ),
+ BorrowKind::Mut { .. } =>
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow),
+ };
+ self.visit_place(path, ctx, location);
+ }
+ Rvalue::CopyForDeref(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+ location
+ );
+ }
+
+ Rvalue::AddressOf(m, path) => {
+ let ctx = match m {
+ Mutability::Mut => PlaceContext::MutatingUse(
+ MutatingUseContext::AddressOf
+ ),
+ Mutability::Not => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::AddressOf
+ ),
+ };
+ self.visit_place(path, ctx, location);
+ }
+
+ Rvalue::Len(path) => {
+ self.visit_place(
+ path,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+ location
+ );
+ }
+
+ Rvalue::Cast(_cast_kind, operand, ty) => {
+ self.visit_operand(operand, location);
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+
+ Rvalue::BinaryOp(_bin_op, box(lhs, rhs))
+ | Rvalue::CheckedBinaryOp(_bin_op, box(lhs, rhs)) => {
+ self.visit_operand(lhs, location);
+ self.visit_operand(rhs, location);
+ }
+
+ Rvalue::UnaryOp(_un_op, op) => {
+ self.visit_operand(op, location);
+ }
+
+ Rvalue::Discriminant(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+ location
+ );
+ }
+
+ Rvalue::NullaryOp(_op, ty) => {
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+
+ Rvalue::Aggregate(kind, operands) => {
+ let kind = &$($mutability)? **kind;
+ match kind {
+ AggregateKind::Array(ty) => {
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+ AggregateKind::Tuple => {
+ }
+ AggregateKind::Adt(
+ _adt_def,
+ _variant_index,
+ substs,
+ _user_substs,
+ _active_field_index
+ ) => {
+ self.visit_substs(substs, location);
+ }
+ AggregateKind::Closure(
+ _,
+ closure_substs
+ ) => {
+ self.visit_substs(closure_substs, location);
+ }
+ AggregateKind::Generator(
+ _,
+ generator_substs,
+ _movability,
+ ) => {
+ self.visit_substs(generator_substs, location);
+ }
+ }
+
+ for operand in operands {
+ self.visit_operand(operand, location);
+ }
+ }
+
+ Rvalue::ShallowInitBox(operand, ty) => {
+ self.visit_operand(operand, location);
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+ }
+ }
+
+ fn super_operand(&mut self,
+ operand: & $($mutability)? Operand<'tcx>,
+ location: Location) {
+ match operand {
+ Operand::Copy(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location
+ );
+ }
+ Operand::Move(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Move),
+ location
+ );
+ }
+ Operand::Constant(constant) => {
+ self.visit_constant(constant, location);
+ }
+ }
+ }
+
+ fn super_ascribe_user_ty(&mut self,
+ place: & $($mutability)? Place<'tcx>,
+ _variance: $(& $mutability)? ty::Variance,
+ user_ty: & $($mutability)? UserTypeProjection,
+ location: Location) {
+ self.visit_place(
+ place,
+ PlaceContext::NonUse(NonUseContext::AscribeUserTy),
+ location
+ );
+ self.visit_user_type_projection(user_ty);
+ }
+
+ fn super_coverage(&mut self,
+ _coverage: & $($mutability)? Coverage,
+ _location: Location) {
+ }
+
+ fn super_retag(&mut self,
+ _kind: $(& $mutability)? RetagKind,
+ place: & $($mutability)? Place<'tcx>,
+ location: Location) {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Retag),
+ location,
+ );
+ }
+
+ fn super_local_decl(&mut self,
+ local: Local,
+ local_decl: & $($mutability)? LocalDecl<'tcx>) {
+ let LocalDecl {
+ mutability: _,
+ ty,
+ user_ty,
+ source_info,
+ internal: _,
+ local_info: _,
+ is_block_tail: _,
+ } = local_decl;
+
+ self.visit_ty($(& $mutability)? *ty, TyContext::LocalDecl {
+ local,
+ source_info: *source_info,
+ });
+ if let Some(user_ty) = user_ty {
+ for (user_ty, _) in & $($mutability)? user_ty.contents {
+ self.visit_user_type_projection(user_ty);
+ }
+ }
+ self.visit_source_info(source_info);
+ }
+
+ fn super_var_debug_info(
+ &mut self,
+ var_debug_info: & $($mutability)? VarDebugInfo<'tcx>
+ ) {
+ let VarDebugInfo {
+ name: _,
+ source_info,
+ value,
+ } = var_debug_info;
+
+ self.visit_source_info(source_info);
+ let location = START_BLOCK.start_location();
+ match value {
+ VarDebugInfoContents::Const(c) => self.visit_constant(c, location),
+ VarDebugInfoContents::Place(place) =>
+ self.visit_place(
+ place,
+ PlaceContext::NonUse(NonUseContext::VarDebugInfo),
+ location
+ ),
+ }
+ }
+
+ fn super_source_scope(
+ &mut self,
+ _scope: $(& $mutability)? SourceScope
+ ) {}
+
+ fn super_constant(
+ &mut self,
+ constant: & $($mutability)? Constant<'tcx>,
+ location: Location
+ ) {
+ let Constant {
+ span,
+ user_ty,
+ literal,
+ } = constant;
+
+ self.visit_span($(& $mutability)? *span);
+ drop(user_ty); // no visit method for this
+ match literal {
+ ConstantKind::Ty(ct) => self.visit_const($(& $mutability)? *ct, location),
+ ConstantKind::Val(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
+ }
+ }
+
+ fn super_span(&mut self, _span: $(& $mutability)? Span) {
+ }
+
+ fn super_source_info(&mut self, source_info: & $($mutability)? SourceInfo) {
+ let SourceInfo {
+ span,
+ scope,
+ } = source_info;
+
+ self.visit_span($(& $mutability)? *span);
+ self.visit_source_scope($(& $mutability)? *scope);
+ }
+
+ fn super_user_type_projection(
+ &mut self,
+ _ty: & $($mutability)? UserTypeProjection,
+ ) {
+ }
+
+ fn super_user_type_annotation(
+ &mut self,
+ _index: UserTypeAnnotationIndex,
+ ty: & $($mutability)? CanonicalUserTypeAnnotation<'tcx>,
+ ) {
+ self.visit_span($(& $mutability)? ty.span);
+ self.visit_ty($(& $mutability)? ty.inferred_ty, TyContext::UserTy(ty.span));
+ }
+
+ fn super_ty(&mut self, _ty: $(& $mutability)? Ty<'tcx>) {
+ }
+
+ fn super_region(&mut self, _region: $(& $mutability)? ty::Region<'tcx>) {
+ }
+
+ fn super_const(&mut self, _const: $(& $mutability)? ty::Const<'tcx>) {
+ }
+
+ fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) {
+ }
+
+ // Convenience methods
+
+ fn visit_location(
+ &mut self,
+ body: &$($mutability)? Body<'tcx>,
+ location: Location
+ ) {
+ #[allow(unused_macro_rules)]
+ macro_rules! basic_blocks {
+ (mut) => (body.basic_blocks_mut());
+ () => (body.basic_blocks());
+ }
+ let basic_block = & $($mutability)? basic_blocks!($($mutability)?)[location.block];
+ if basic_block.statements.len() == location.statement_index {
+ if let Some(ref $($mutability)? terminator) = basic_block.terminator {
+ self.visit_terminator(terminator, location)
+ }
+ } else {
+ let statement = & $($mutability)?
+ basic_block.statements[location.statement_index];
+ self.visit_statement(statement, location)
+ }
+ }
+ }
+ }
+}
+
+macro_rules! visit_place_fns {
+ (mut) => {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn super_place(
+ &mut self,
+ place: &mut Place<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.visit_local(&mut place.local, context, location);
+
+ if let Some(new_projection) = self.process_projection(&place.projection, location) {
+ place.projection = self.tcx().intern_place_elems(&new_projection);
+ }
+ }
+
+ fn process_projection<'a>(
+ &mut self,
+ projection: &'a [PlaceElem<'tcx>],
+ location: Location,
+ ) -> Option<Vec<PlaceElem<'tcx>>> {
+ let mut projection = Cow::Borrowed(projection);
+
+ for i in 0..projection.len() {
+ if let Some(&elem) = projection.get(i) {
+ if let Some(elem) = self.process_projection_elem(elem, location) {
+ // This converts the borrowed projection into `Cow::Owned(_)` and returns a
+ // clone of the projection so we can mutate and reintern later.
+ let vec = projection.to_mut();
+ vec[i] = elem;
+ }
+ }
+ }
+
+ match projection {
+ Cow::Borrowed(_) => None,
+ Cow::Owned(vec) => Some(vec),
+ }
+ }
+
+ fn process_projection_elem(
+ &mut self,
+ elem: PlaceElem<'tcx>,
+ location: Location,
+ ) -> Option<PlaceElem<'tcx>> {
+ match elem {
+ PlaceElem::Index(local) => {
+ let mut new_local = local;
+ self.visit_local(
+ &mut new_local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location,
+ );
+
+ if new_local == local { None } else { Some(PlaceElem::Index(new_local)) }
+ }
+ PlaceElem::Field(field, ty) => {
+ let mut new_ty = ty;
+ self.visit_ty(&mut new_ty, TyContext::Location(location));
+ if ty != new_ty { Some(PlaceElem::Field(field, new_ty)) } else { None }
+ }
+ PlaceElem::Deref
+ | PlaceElem::ConstantIndex { .. }
+ | PlaceElem::Subslice { .. }
+ | PlaceElem::Downcast(..) => None,
+ }
+ }
+ };
+
+ () => {
+ fn visit_projection(
+ &mut self,
+ place_ref: PlaceRef<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.super_projection(place_ref, context, location);
+ }
+
+ fn visit_projection_elem(
+ &mut self,
+ local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.super_projection_elem(local, proj_base, elem, context, location);
+ }
+
+ fn super_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+ let mut context = context;
+
+ if !place.projection.is_empty() {
+ if context.is_use() {
+ // ^ Only change the context if it is a real use, not a "use" in debuginfo.
+ context = if context.is_mutating_use() {
+ PlaceContext::MutatingUse(MutatingUseContext::Projection)
+ } else {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+ };
+ }
+ }
+
+ self.visit_local(place.local, context, location);
+
+ self.visit_projection(place.as_ref(), context, location);
+ }
+
+ fn super_projection(
+ &mut self,
+ place_ref: PlaceRef<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ for (base, elem) in place_ref.iter_projections().rev() {
+ let base_proj = base.projection;
+ self.visit_projection_elem(place_ref.local, base_proj, elem, context, location);
+ }
+ }
+
+ fn super_projection_elem(
+ &mut self,
+ _local: Local,
+ _proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ _context: PlaceContext,
+ location: Location,
+ ) {
+ match elem {
+ ProjectionElem::Field(_field, ty) => {
+ self.visit_ty(ty, TyContext::Location(location));
+ }
+ ProjectionElem::Index(local) => {
+ self.visit_local(
+ local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location,
+ );
+ }
+ ProjectionElem::Deref
+ | ProjectionElem::Subslice { from: _, to: _, from_end: _ }
+ | ProjectionElem::ConstantIndex { offset: _, min_length: _, from_end: _ }
+ | ProjectionElem::Downcast(_, _) => {}
+ }
+ }
+ };
+}
+
+make_mir_visitor!(Visitor,);
+make_mir_visitor!(MutVisitor, mut);
+
+pub trait MirVisitable<'tcx> {
+ fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>);
+}
+
+impl<'tcx> MirVisitable<'tcx> for Statement<'tcx> {
+ fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+ visitor.visit_statement(self, location)
+ }
+}
+
+impl<'tcx> MirVisitable<'tcx> for Terminator<'tcx> {
+ fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+ visitor.visit_terminator(self, location)
+ }
+}
+
+impl<'tcx> MirVisitable<'tcx> for Option<Terminator<'tcx>> {
+ fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+ visitor.visit_terminator(self.as_ref().unwrap(), location)
+ }
+}
+
+/// Extra information passed to `visit_ty` and friends to give context
+/// about where the type etc appears.
+#[derive(Debug)]
+pub enum TyContext {
+ LocalDecl {
+ /// The index of the local variable we are visiting.
+ local: Local,
+
+ /// The source location where this local variable was declared.
+ source_info: SourceInfo,
+ },
+
+ /// The inferred type of a user type annotation.
+ UserTy(Span),
+
+ /// The return type of the function.
+ ReturnTy(SourceInfo),
+
+ YieldTy(SourceInfo),
+
+ /// A type found at some location.
+ Location(Location),
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum NonMutatingUseContext {
+ /// Being inspected in some way, like loading a len.
+ Inspect,
+ /// Consumed as part of an operand.
+ Copy,
+ /// Consumed as part of an operand.
+ Move,
+ /// Shared borrow.
+ SharedBorrow,
+ /// Shallow borrow.
+ ShallowBorrow,
+ /// Unique borrow.
+ UniqueBorrow,
+ /// AddressOf for *const pointer.
+ AddressOf,
+ /// Used as base for another place, e.g., `x` in `x.y`. Will not mutate the place.
+ /// For example, the projection `x.y` is not marked as a mutation in these cases:
+ /// ```ignore (illustrative)
+ /// z = x.y;
+ /// f(&x.y);
+ /// ```
+ Projection,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum MutatingUseContext {
+ /// Appears as LHS of an assignment.
+ Store,
+ /// Appears on `SetDiscriminant`
+ SetDiscriminant,
+ /// Appears on `Deinit`
+ Deinit,
+ /// Output operand of an inline assembly block.
+ AsmOutput,
+ /// Destination of a call.
+ Call,
+ /// Destination of a yield.
+ Yield,
+ /// Being dropped.
+ Drop,
+ /// Mutable borrow.
+ Borrow,
+ /// AddressOf for *mut pointer.
+ AddressOf,
+ /// Used as base for another place, e.g., `x` in `x.y`. Could potentially mutate the place.
+ /// For example, the projection `x.y` is marked as a mutation in these cases:
+ /// ```ignore (illustrative)
+ /// x.y = ...;
+ /// f(&mut x.y);
+ /// ```
+ Projection,
+ /// Retagging, a "Stacked Borrows" shadow state operation
+ Retag,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum NonUseContext {
+ /// Starting a storage live range.
+ StorageLive,
+ /// Ending a storage live range.
+ StorageDead,
+ /// User type annotation assertions for NLL.
+ AscribeUserTy,
+ /// The data of a user variable, for debug info.
+ VarDebugInfo,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum PlaceContext {
+ NonMutatingUse(NonMutatingUseContext),
+ MutatingUse(MutatingUseContext),
+ NonUse(NonUseContext),
+}
+
+impl PlaceContext {
+ /// Returns `true` if this place context represents a drop.
+ #[inline]
+ pub fn is_drop(&self) -> bool {
+ matches!(self, PlaceContext::MutatingUse(MutatingUseContext::Drop))
+ }
+
+ /// Returns `true` if this place context represents a borrow.
+ pub fn is_borrow(&self) -> bool {
+ matches!(
+ self,
+ PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::SharedBorrow
+ | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::UniqueBorrow
+ ) | PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+ )
+ }
+
+ /// Returns `true` if this place context represents a storage live or storage dead marker.
+ #[inline]
+ pub fn is_storage_marker(&self) -> bool {
+ matches!(
+ self,
+ PlaceContext::NonUse(NonUseContext::StorageLive | NonUseContext::StorageDead)
+ )
+ }
+
+ /// Returns `true` if this place context represents a use that potentially changes the value.
+ #[inline]
+ pub fn is_mutating_use(&self) -> bool {
+ matches!(self, PlaceContext::MutatingUse(..))
+ }
+
+ /// Returns `true` if this place context represents a use.
+ #[inline]
+ pub fn is_use(&self) -> bool {
+ !matches!(self, PlaceContext::NonUse(..))
+ }
+
+ /// Returns `true` if this place context represents an assignment statement.
+ pub fn is_place_assignment(&self) -> bool {
+ matches!(
+ self,
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Store
+ | MutatingUseContext::Call
+ | MutatingUseContext::AsmOutput,
+ )
+ )
+ }
+}
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
new file mode 100644
index 000000000..d8483e7e4
--- /dev/null
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -0,0 +1,2060 @@
+//! Defines the various compiler queries.
+//!
+//! For more information on the query system, see
+//! ["Queries: demand-driven compilation"](https://rustc-dev-guide.rust-lang.org/query.html).
+//! This chapter includes instructions for adding new queries.
+
+// Each of these queries corresponds to a function pointer field in the
+// `Providers` struct for requesting a value of that type, and a method
+// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
+// which memoizes and does dep-graph tracking, wrapping around the actual
+// `Providers` that the driver creates (using several `rustc_*` crates).
+//
+// The result type of each query must implement `Clone`, and additionally
+// `ty::query::values::Value`, which produces an appropriate placeholder
+// (error) value if the query resulted in a query cycle.
+// Queries marked with `fatal_cycle` do not need the latter implementation,
+// as they will raise an fatal error on query cycles instead.
+rustc_queries! {
+ query trigger_delay_span_bug(key: DefId) -> () {
+ desc { "trigger a delay span bug" }
+ }
+
+ query resolutions(_: ()) -> &'tcx ty::ResolverOutputs {
+ eval_always
+ no_hash
+ desc { "get the resolver outputs" }
+ }
+
+ query resolver_for_lowering(_: ()) -> &'tcx Steal<ty::ResolverAstLowering> {
+ eval_always
+ no_hash
+ desc { "get the resolver for lowering" }
+ }
+
+ /// Return the span for a definition.
+ /// Contrary to `def_span` below, this query returns the full absolute span of the definition.
+ /// This span is meant for dep-tracking rather than diagnostics. It should not be used outside
+ /// of rustc_middle::hir::source_map.
+ query source_span(key: LocalDefId) -> Span {
+ desc { "get the source span" }
+ }
+
+ /// Represents crate as a whole (as distinct from the top-level crate module).
+ /// If you call `hir_crate` (e.g., indirectly by calling `tcx.hir().krate()`),
+ /// we will have to assume that any change means that you need to be recompiled.
+ /// This is because the `hir_crate` query gives you access to all other items.
+ /// To avoid this fate, do not call `tcx.hir().krate()`; instead,
+ /// prefer wrappers like `tcx.visit_all_items_in_krate()`.
+ query hir_crate(key: ()) -> Crate<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "get the crate HIR" }
+ }
+
+ /// All items in the crate.
+ query hir_crate_items(_: ()) -> rustc_middle::hir::ModuleItems {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "get HIR crate items" }
+ }
+
+ /// The items in a module.
+ ///
+ /// This can be conveniently accessed by `tcx.hir().visit_item_likes_in_module`.
+ /// Avoid calling this query directly.
+ query hir_module_items(key: LocalDefId) -> rustc_middle::hir::ModuleItems {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+
+ /// Gives access to the HIR node for the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_owner(key: LocalDefId) -> Option<crate::hir::Owner<'tcx>> {
+ desc { |tcx| "HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Gives access to the HIR ID for the given `LocalDefId` owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query local_def_id_to_hir_id(key: LocalDefId) -> hir::HirId {
+ desc { |tcx| "HIR ID of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Gives access to the HIR node's parent for the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_owner_parent(key: LocalDefId) -> hir::HirId {
+ desc { |tcx| "HIR parent of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Gives access to the HIR nodes and bodies inside the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_owner_nodes(key: LocalDefId) -> hir::MaybeOwner<&'tcx hir::OwnerNodes<'tcx>> {
+ desc { |tcx| "HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Gives access to the HIR attributes inside the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_attrs(key: LocalDefId) -> &'tcx hir::AttributeMap<'tcx> {
+ desc { |tcx| "HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Computes the `DefId` of the corresponding const parameter in case the `key` is a
+ /// const argument and returns `None` otherwise.
+ ///
+ /// ```ignore (incomplete)
+ /// let a = foo::<7>();
+ /// // ^ Calling `opt_const_param_of` for this argument,
+ ///
+ /// fn foo<const N: usize>()
+ /// // ^ returns this `DefId`.
+ ///
+ /// fn bar() {
+ /// // ^ While calling `opt_const_param_of` for other bodies returns `None`.
+ /// }
+ /// ```
+ // It looks like caching this query on disk actually slightly
+ // worsened performance in #74376.
+ //
+ // Once const generics are more prevalently used, we might want to
+ // consider only caching calls returning `Some`.
+ query opt_const_param_of(key: LocalDefId) -> Option<DefId> {
+ desc { |tcx| "computing the optional const parameter of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Given the def_id of a const-generic parameter, computes the associated default const
+ /// parameter. e.g. `fn example<const N: usize=3>` called on `N` would return `3`.
+ query const_param_default(param: DefId) -> ty::Const<'tcx> {
+ desc { |tcx| "compute const default for a given parameter `{}`", tcx.def_path_str(param) }
+ cache_on_disk_if { param.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns the [`Ty`][rustc_middle::ty::Ty] of the given [`DefId`]. If the [`DefId`] points
+ /// to an alias, it will "skip" this alias to return the aliased type.
+ ///
+ /// [`DefId`]: rustc_hir::def_id::DefId
+ query type_of(key: DefId) -> Ty<'tcx> {
+ desc { |tcx|
+ "{action} `{path}`",
+ action = {
+ use rustc_hir::def::DefKind;
+ match tcx.def_kind(key) {
+ DefKind::TyAlias => "expanding type alias",
+ DefKind::TraitAlias => "expanding trait alias",
+ _ => "computing type of",
+ }
+ },
+ path = tcx.def_path_str(key),
+ }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query analysis(key: ()) -> Result<(), ErrorGuaranteed> {
+ eval_always
+ desc { "running analysis passes on this crate" }
+ }
+
+ /// This query checks the fulfillment of collected lint expectations.
+ /// All lint emitting queries have to be done before this is executed
+ /// to ensure that all expectations can be fulfilled.
+ ///
+ /// This is an extra query to enable other drivers (like rustdoc) to
+ /// only execute a small subset of the `analysis` query, while allowing
+ /// lints to be expected. In rustc, this query will be executed as part of
+ /// the `analysis` query and doesn't have to be called a second time.
+ ///
+ /// Tools can additionally pass in a tool filter. That will restrict the
+ /// expectations to only trigger for lints starting with the listed tool
+ /// name. This is useful for cases were not all linting code from rustc
+ /// was called. With the default `None` all registered lints will also
+ /// be checked for expectation fulfillment.
+ query check_expectations(key: Option<Symbol>) -> () {
+ eval_always
+ desc { "checking lint expectations (RFC 2383)" }
+ }
+
+ /// Maps from the `DefId` of an item (trait/struct/enum/fn) to its
+ /// associated generics.
+ query generics_of(key: DefId) -> ty::Generics {
+ desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
+ /// predicates (where-clauses) that must be proven true in order
+ /// to reference it. This is almost always the "predicates query"
+ /// that you want.
+ ///
+ /// `predicates_of` builds on `predicates_defined_on` -- in fact,
+ /// it is almost always the same as that query, except for the
+ /// case of traits. For traits, `predicates_of` contains
+ /// an additional `Self: Trait<...>` predicate that users don't
+ /// actually write. This reflects the fact that to invoke the
+ /// trait (e.g., via `Default::default`) you must supply types
+ /// that actually implement the trait. (However, this extra
+ /// predicate gets in the way of some checks, which are intended
+ /// to operate over only the actual where-clauses written by the
+ /// user.)
+ query predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing predicates of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ }
+
+ /// Returns the list of bounds that can be used for
+ /// `SelectionCandidate::ProjectionCandidate(_)` and
+ /// `ProjectionTyCandidate::TraitDef`.
+ /// Specifically this is the bounds written on the trait's type
+ /// definition, or those after the `impl` keyword
+ ///
+ /// ```ignore (incomplete)
+ /// type X: Bound + 'lt
+ /// // ^^^^^^^^^^^
+ /// impl Debug + Display
+ /// // ^^^^^^^^^^^^^^^
+ /// ```
+ ///
+ /// `key` is the `DefId` of the associated type or opaque type.
+ ///
+ /// Bounds from the parent (e.g. with nested impl trait) are not included.
+ query explicit_item_bounds(key: DefId) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+ desc { |tcx| "finding item bounds for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Elaborated version of the predicates from `explicit_item_bounds`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// trait MyTrait {
+ /// type MyAType: Eq + ?Sized;
+ /// }
+ /// ```
+ ///
+ /// `explicit_item_bounds` returns `[<Self as MyTrait>::MyAType: Eq]`,
+ /// and `item_bounds` returns
+ /// ```text
+ /// [
+ /// <Self as Trait>::MyAType: Eq,
+ /// <Self as Trait>::MyAType: PartialEq<<Self as Trait>::MyAType>
+ /// ]
+ /// ```
+ ///
+ /// Bounds from the parent (e.g. with nested impl trait) are not included.
+ query item_bounds(key: DefId) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+ desc { |tcx| "elaborating item bounds for `{}`", tcx.def_path_str(key) }
+ }
+
+ query native_libraries(_: CrateNum) -> Vec<NativeLib> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "looking up the native libraries of a linked crate" }
+ separate_provide_extern
+ }
+
+ query lint_levels(_: ()) -> LintLevelMap {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "computing the lint levels for items in this crate" }
+ }
+
+ query parent_module_from_def_id(key: LocalDefId) -> LocalDefId {
+ eval_always
+ desc { |tcx| "parent module of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ query expn_that_defined(key: DefId) -> rustc_span::ExpnId {
+ desc { |tcx| "expansion that defined `{}`", tcx.def_path_str(key) }
+ separate_provide_extern
+ }
+
+ query is_panic_runtime(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "checking if the crate is_panic_runtime" }
+ separate_provide_extern
+ }
+
+ /// Fetch the THIR for a given body. If typeck for that body failed, returns an empty `Thir`.
+ query thir_body(key: ty::WithOptConstParam<LocalDefId>)
+ -> Result<(&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId), ErrorGuaranteed>
+ {
+ // Perf tests revealed that hashing THIR is inefficient (see #85729).
+ no_hash
+ desc { |tcx| "building THIR for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+ }
+
+ /// Create a THIR tree for debugging.
+ query thir_tree(key: ty::WithOptConstParam<LocalDefId>) -> String {
+ no_hash
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "constructing THIR tree for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+ }
+
+ /// Set of all the `DefId`s in this crate that have MIR associated with
+ /// them. This includes all the body owners, but also things like struct
+ /// constructors.
+ query mir_keys(_: ()) -> rustc_data_structures::fx::FxIndexSet<LocalDefId> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "getting a list of all mir_keys" }
+ }
+
+ /// Maps DefId's that have an associated `mir::Body` to the result
+ /// of the MIR const-checking pass. This is the set of qualifs in
+ /// the final value of a `const`.
+ query mir_const_qualif(key: DefId) -> mir::ConstQualifs {
+ desc { |tcx| "const checking `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query mir_const_qualif_const_arg(
+ key: (LocalDefId, DefId)
+ ) -> mir::ConstQualifs {
+ desc {
+ |tcx| "const checking the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// Fetch the MIR for a given `DefId` right after it's built - this includes
+ /// unreachable code.
+ query mir_built(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> {
+ desc { |tcx| "building MIR for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+ }
+
+ /// Fetch the MIR for a given `DefId` up till the point where it is
+ /// ready for const qualification.
+ ///
+ /// See the README for the `mir` module for details.
+ query mir_const(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> {
+ desc {
+ |tcx| "processing MIR for {}`{}`",
+ if key.const_param_did.is_some() { "the const argument " } else { "" },
+ tcx.def_path_str(key.did.to_def_id()),
+ }
+ no_hash
+ }
+
+ /// Try to build an abstract representation of the given constant.
+ query thir_abstract_const(
+ key: DefId
+ ) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> {
+ desc {
+ |tcx| "building an abstract representation for {}", tcx.def_path_str(key),
+ }
+ separate_provide_extern
+ }
+ /// Try to build an abstract representation of the given constant.
+ query thir_abstract_const_of_const_arg(
+ key: (LocalDefId, DefId)
+ ) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> {
+ desc {
+ |tcx|
+ "building an abstract representation for the const argument {}",
+ tcx.def_path_str(key.0.to_def_id()),
+ }
+ }
+
+ query try_unify_abstract_consts(key:
+ ty::ParamEnvAnd<'tcx, (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>
+ )>) -> bool {
+ desc {
+ |tcx| "trying to unify the generic constants {} and {}",
+ tcx.def_path_str(key.value.0.def.did), tcx.def_path_str(key.value.1.def.did)
+ }
+ }
+
+ query mir_drops_elaborated_and_const_checked(
+ key: ty::WithOptConstParam<LocalDefId>
+ ) -> &'tcx Steal<mir::Body<'tcx>> {
+ no_hash
+ desc { |tcx| "elaborating drops for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+ }
+
+ query mir_for_ctfe(
+ key: DefId
+ ) -> &'tcx mir::Body<'tcx> {
+ desc { |tcx| "caching mir of `{}` for CTFE", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query mir_for_ctfe_of_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::Body<'tcx> {
+ desc {
+ |tcx| "MIR for CTFE of the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ query mir_promoted(key: ty::WithOptConstParam<LocalDefId>) ->
+ (
+ &'tcx Steal<mir::Body<'tcx>>,
+ &'tcx Steal<IndexVec<mir::Promoted, mir::Body<'tcx>>>
+ ) {
+ no_hash
+ desc {
+ |tcx| "processing {}`{}`",
+ if key.const_param_did.is_some() { "the const argument " } else { "" },
+ tcx.def_path_str(key.did.to_def_id()),
+ }
+ }
+
+ query symbols_for_closure_captures(
+ key: (LocalDefId, LocalDefId)
+ ) -> Vec<rustc_span::Symbol> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc {
+ |tcx| "symbols for captures of closure `{}` in `{}`",
+ tcx.def_path_str(key.1.to_def_id()),
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// MIR after our optimization passes have run. This is MIR that is ready
+ /// for codegen. This is also the only query that can fetch non-local MIR, at present.
+ query optimized_mir(key: DefId) -> &'tcx mir::Body<'tcx> {
+ desc { |tcx| "optimizing MIR for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns coverage summary info for a function, after executing the `InstrumentCoverage`
+ /// MIR pass (assuming the -Cinstrument-coverage option is enabled).
+ query coverageinfo(key: ty::InstanceDef<'tcx>) -> mir::CoverageInfo {
+ desc { |tcx| "retrieving coverage info from MIR for `{}`", tcx.def_path_str(key.def_id()) }
+ storage(ArenaCacheSelector<'tcx>)
+ }
+
+ /// Returns the `CodeRegions` for a function that has instrumented coverage, in case the
+ /// function was optimized out before codegen, and before being added to the Coverage Map.
+ query covered_code_regions(key: DefId) -> Vec<&'tcx mir::coverage::CodeRegion> {
+ desc {
+ |tcx| "retrieving the covered `CodeRegion`s, if instrumented, for `{}`",
+ tcx.def_path_str(key)
+ }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { key.is_local() }
+ }
+
+ /// The `DefId` is the `DefId` of the containing MIR body. Promoteds do not have their own
+ /// `DefId`. This function returns all promoteds in the specified body. The body references
+ /// promoteds by the `DefId` and the `mir::Promoted` index. This is necessary, because
+ /// after inlining a body may refer to promoteds from other bodies. In that case you still
+ /// need to use the `DefId` of the original body.
+ query promoted_mir(key: DefId) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
+ desc { |tcx| "optimizing promoted MIR for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query promoted_mir_of_const_arg(
+ key: (LocalDefId, DefId)
+ ) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
+ desc {
+ |tcx| "optimizing promoted MIR for the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id()),
+ }
+ }
+
+ /// Erases regions from `ty` to yield a new type.
+ /// Normally you would just use `tcx.erase_regions(value)`,
+ /// however, which uses this query as a kind of cache.
+ query erase_regions_ty(ty: Ty<'tcx>) -> Ty<'tcx> {
+ // This query is not expected to have input -- as a result, it
+ // is not a good candidates for "replay" because it is essentially a
+ // pure function of its input (and hence the expectation is that
+ // no caller would be green **apart** from just these
+ // queries). Making it anonymous avoids hashing the result, which
+ // may save a bit of time.
+ anon
+ desc { "erasing regions from `{:?}`", ty }
+ }
+
+ query wasm_import_module_map(_: CrateNum) -> FxHashMap<DefId, String> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "wasm import module map" }
+ }
+
+ /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
+ /// predicates (where-clauses) directly defined on it. This is
+ /// equal to the `explicit_predicates_of` predicates plus the
+ /// `inferred_outlives_of` predicates.
+ query predicates_defined_on(key: DefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing predicates of `{}`", tcx.def_path_str(key) }
+ }
+
+ /// Returns everything that looks like a predicate written explicitly
+ /// by the user on a trait item.
+ ///
+ /// Traits are unusual, because predicates on associated types are
+ /// converted into bounds on that type for backwards compatibility:
+ ///
+ /// trait X where Self::U: Copy { type U; }
+ ///
+ /// becomes
+ ///
+ /// trait X { type U: Copy; }
+ ///
+ /// `explicit_predicates_of` and `explicit_item_bounds` will then take
+ /// the appropriate subsets of the predicates here.
+ query trait_explicit_predicates_and_bounds(key: LocalDefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing explicit predicates of trait `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Returns the predicates written explicitly by the user.
+ query explicit_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing explicit predicates of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns the inferred outlives predicates (e.g., for `struct
+ /// Foo<'a, T> { x: &'a T }`, this would return `T: 'a`).
+ query inferred_outlives_of(key: DefId) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+ desc { |tcx| "computing inferred outlives predicates of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Maps from the `DefId` of a trait to the list of
+ /// super-predicates. This is a subset of the full list of
+ /// predicates. We store these in a separate map because we must
+ /// evaluate them even during type conversion, often before the
+ /// full predicates are available (note that supertraits have
+ /// additional acyclicity requirements).
+ query super_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing the super predicates of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// The `Option<Ident>` is the name of an associated type. If it is `None`, then this query
+ /// returns the full set of predicates. If `Some<Ident>`, then the query returns only the
+ /// subset of super-predicates that reference traits that define the given associated type.
+ /// This is used to avoid cycles in resolving types like `T::Item`.
+ query super_predicates_that_define_assoc_type(key: (DefId, Option<rustc_span::symbol::Ident>)) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing the super traits of `{}`{}",
+ tcx.def_path_str(key.0),
+ if let Some(assoc_name) = key.1 { format!(" with associated type name `{}`", assoc_name) } else { "".to_string() },
+ }
+ }
+
+ /// To avoid cycles within the predicates of a single item we compute
+ /// per-type-parameter predicates for resolving `T::AssocTy`.
+ query type_param_predicates(key: (DefId, LocalDefId, rustc_span::symbol::Ident)) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing the bounds for type parameter `{}`", tcx.hir().ty_param_name(key.1) }
+ }
+
+ query trait_def(key: DefId) -> ty::TraitDef {
+ desc { |tcx| "computing trait definition for `{}`", tcx.def_path_str(key) }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query adt_def(key: DefId) -> ty::AdtDef<'tcx> {
+ desc { |tcx| "computing ADT definition for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query adt_destructor(key: DefId) -> Option<ty::Destructor> {
+ desc { |tcx| "computing `Drop` impl for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ // The cycle error here should be reported as an error by `check_representable`.
+ // We consider the type as Sized in the meanwhile to avoid
+ // further errors (done in impl Value for AdtSizedConstraint).
+ // Use `cycle_delay_bug` to delay the cycle error here to be emitted later
+ // in case we accidentally otherwise don't emit an error.
+ query adt_sized_constraint(
+ key: DefId
+ ) -> AdtSizedConstraint<'tcx> {
+ desc { |tcx| "computing `Sized` constraints for `{}`", tcx.def_path_str(key) }
+ cycle_delay_bug
+ }
+
+ query adt_dtorck_constraint(
+ key: DefId
+ ) -> Result<&'tcx DropckConstraint<'tcx>, NoSolution> {
+ desc { |tcx| "computing drop-check constraints for `{}`", tcx.def_path_str(key) }
+ }
+
+ /// Returns `true` if this is a const fn, use the `is_const_fn` to know whether your crate
+ /// actually sees it as const fn (e.g., the const-fn-ness might be unstable and you might
+ /// not have the feature gate active).
+ ///
+ /// **Do not call this function manually.** It is only meant to cache the base data for the
+ /// `is_const_fn` function. Consider using `is_const_fn` or `is_const_fn_raw` instead.
+ query constness(key: DefId) -> hir::Constness {
+ desc { |tcx| "checking if item is const: `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query asyncness(key: DefId) -> hir::IsAsync {
+ desc { |tcx| "checking if the function is async: `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns `true` if calls to the function may be promoted.
+ ///
+ /// This is either because the function is e.g., a tuple-struct or tuple-variant
+ /// constructor, or because it has the `#[rustc_promotable]` attribute. The attribute should
+ /// be removed in the future in favour of some form of check which figures out whether the
+ /// function does not inspect the bits of any of its arguments (so is essentially just a
+ /// constructor function).
+ query is_promotable_const_fn(key: DefId) -> bool {
+ desc { |tcx| "checking if item is promotable: `{}`", tcx.def_path_str(key) }
+ }
+
+ /// Returns `true` if this is a foreign item (i.e., linked via `extern { ... }`).
+ query is_foreign_item(key: DefId) -> bool {
+ desc { |tcx| "checking if `{}` is a foreign item", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns `Some(generator_kind)` if the node pointed to by `def_id` is a generator.
+ query generator_kind(def_id: DefId) -> Option<hir::GeneratorKind> {
+ desc { |tcx| "looking up generator kind of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Gets a map with the variance of every item; use `item_variance` instead.
+ query crate_variances(_: ()) -> ty::CrateVariancesMap<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "computing the variances for items in this crate" }
+ }
+
+ /// Maps from the `DefId` of a type or region parameter to its (inferred) variance.
+ query variances_of(def_id: DefId) -> &'tcx [ty::Variance] {
+ desc { |tcx| "computing the variances of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Maps from thee `DefId` of a type to its (inferred) outlives.
+ query inferred_outlives_crate(_: ()) -> ty::CratePredicatesMap<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "computing the inferred outlives predicates for items in this crate" }
+ }
+
+ /// Maps from an impl/trait `DefId` to a list of the `DefId`s of its items.
+ query associated_item_def_ids(key: DefId) -> &'tcx [DefId] {
+ desc { |tcx| "collecting associated items of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Maps from a trait item to the trait item "descriptor".
+ query associated_item(key: DefId) -> ty::AssocItem {
+ desc { |tcx| "computing associated item data for `{}`", tcx.def_path_str(key) }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Collects the associated items defined on a trait or impl.
+ query associated_items(key: DefId) -> ty::AssocItems<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "collecting associated items of {}", tcx.def_path_str(key) }
+ }
+
+ /// Maps from associated items on a trait to the corresponding associated
+ /// item on the impl specified by `impl_id`.
+ ///
+ /// For example, with the following code
+ ///
+ /// ```
+ /// struct Type {}
+ /// // DefId
+ /// trait Trait { // trait_id
+ /// fn f(); // trait_f
+ /// fn g() {} // trait_g
+ /// }
+ ///
+ /// impl Trait for Type { // impl_id
+ /// fn f() {} // impl_f
+ /// fn g() {} // impl_g
+ /// }
+ /// ```
+ ///
+ /// The map returned for `tcx.impl_item_implementor_ids(impl_id)` would be
+ ///`{ trait_f: impl_f, trait_g: impl_g }`
+ query impl_item_implementor_ids(impl_id: DefId) -> FxHashMap<DefId, DefId> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "comparing impl items against trait for {}", tcx.def_path_str(impl_id) }
+ }
+
+ /// Given an `impl_id`, return the trait it implements.
+ /// Return `None` if this is an inherent impl.
+ query impl_trait_ref(impl_id: DefId) -> Option<ty::TraitRef<'tcx>> {
+ desc { |tcx| "computing trait implemented by `{}`", tcx.def_path_str(impl_id) }
+ cache_on_disk_if { impl_id.is_local() }
+ separate_provide_extern
+ }
+ query impl_polarity(impl_id: DefId) -> ty::ImplPolarity {
+ desc { |tcx| "computing implementation polarity of `{}`", tcx.def_path_str(impl_id) }
+ cache_on_disk_if { impl_id.is_local() }
+ separate_provide_extern
+ }
+
+ query issue33140_self_ty(key: DefId) -> Option<ty::Ty<'tcx>> {
+ desc { |tcx| "computing Self type wrt issue #33140 `{}`", tcx.def_path_str(key) }
+ }
+
+ /// Maps a `DefId` of a type to a list of its inherent impls.
+ /// Contains implementations of methods that are inherent to a type.
+ /// Methods in these implementations don't need to be exported.
+ query inherent_impls(key: DefId) -> &'tcx [DefId] {
+ desc { |tcx| "collecting inherent impls for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query incoherent_impls(key: SimplifiedType) -> &'tcx [DefId] {
+ desc { |tcx| "collecting all inherent impls for `{:?}`", key }
+ }
+
+ /// The result of unsafety-checking this `LocalDefId`.
+ query unsafety_check_result(key: LocalDefId) -> &'tcx mir::UnsafetyCheckResult {
+ desc { |tcx| "unsafety-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+ query unsafety_check_result_for_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::UnsafetyCheckResult {
+ desc {
+ |tcx| "unsafety-checking the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// Unsafety-check this `LocalDefId` with THIR unsafeck. This should be
+ /// used with `-Zthir-unsafeck`.
+ query thir_check_unsafety(key: LocalDefId) {
+ desc { |tcx| "unsafety-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+ query thir_check_unsafety_for_const_arg(key: (LocalDefId, DefId)) {
+ desc {
+ |tcx| "unsafety-checking the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// HACK: when evaluated, this reports an "unsafe derive on repr(packed)" error.
+ ///
+ /// Unsafety checking is executed for each method separately, but we only want
+ /// to emit this error once per derive. As there are some impls with multiple
+ /// methods, we use a query for deduplication.
+ query unsafe_derive_on_repr_packed(key: LocalDefId) -> () {
+ desc { |tcx| "processing `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Computes the signature of the function.
+ query fn_sig(key: DefId) -> ty::PolyFnSig<'tcx> {
+ desc { |tcx| "computing function signature of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Performs lint checking for the module.
+ query lint_mod(key: LocalDefId) -> () {
+ desc { |tcx| "linting {}", describe_as_module(key, tcx) }
+ }
+
+ /// Checks the attributes in the module.
+ query check_mod_attrs(key: LocalDefId) -> () {
+ desc { |tcx| "checking attributes in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Checks for uses of unstable APIs in the module.
+ query check_mod_unstable_api_usage(key: LocalDefId) -> () {
+ desc { |tcx| "checking for unstable API usage in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Checks the const bodies in the module for illegal operations (e.g. `if` or `loop`).
+ query check_mod_const_bodies(key: LocalDefId) -> () {
+ desc { |tcx| "checking consts in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Checks the loops in the module.
+ query check_mod_loops(key: LocalDefId) -> () {
+ desc { |tcx| "checking loops in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_naked_functions(key: LocalDefId) -> () {
+ desc { |tcx| "checking naked functions in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_item_types(key: LocalDefId) -> () {
+ desc { |tcx| "checking item types in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_privacy(key: LocalDefId) -> () {
+ desc { |tcx| "checking privacy in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_liveness(key: LocalDefId) -> () {
+ desc { |tcx| "checking liveness of variables in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Return the live symbols in the crate for dead code check.
+ ///
+ /// The second return value maps from ADTs to ignored derived traits (e.g. Debug and Clone) and
+ /// their respective impl (i.e., part of the derive macro)
+ query live_symbols_and_ignored_derived_traits(_: ()) -> (
+ FxHashSet<LocalDefId>,
+ FxHashMap<LocalDefId, Vec<(DefId, DefId)>>
+ ) {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "find live symbols in crate" }
+ }
+
+ query check_mod_deathness(key: LocalDefId) -> () {
+ desc { |tcx| "checking deathness of variables in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_impl_wf(key: LocalDefId) -> () {
+ desc { |tcx| "checking that impls are well-formed in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_type_wf(key: LocalDefId) -> () {
+ desc { |tcx| "checking that types are well-formed in {}", describe_as_module(key, tcx) }
+ }
+
+ query collect_mod_item_types(key: LocalDefId) -> () {
+ desc { |tcx| "collecting item types in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Caches `CoerceUnsized` kinds for impls on custom types.
+ query coerce_unsized_info(key: DefId) -> ty::adjustment::CoerceUnsizedInfo {
+ desc { |tcx| "computing CoerceUnsized info for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query typeck_item_bodies(_: ()) -> () {
+ desc { "type-checking all item bodies" }
+ }
+
+ query typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
+ desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+ query typeck_const_arg(
+ key: (LocalDefId, DefId)
+ ) -> &'tcx ty::TypeckResults<'tcx> {
+ desc {
+ |tcx| "type-checking the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id()),
+ }
+ }
+ query diagnostic_only_typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
+ desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ load_cached(tcx, id) {
+ let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx
+ .on_disk_cache().as_ref()
+ .and_then(|c| c.try_load_query_result(*tcx, id));
+
+ typeck_results.map(|x| &*tcx.arena.alloc(x))
+ }
+ }
+
+ query used_trait_imports(key: LocalDefId) -> &'tcx FxHashSet<LocalDefId> {
+ desc { |tcx| "used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+
+ query has_typeck_results(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` has a body", tcx.def_path_str(def_id) }
+ }
+
+ query coherent_trait(def_id: DefId) -> () {
+ desc { |tcx| "coherence checking all impls of trait `{}`", tcx.def_path_str(def_id) }
+ }
+
+ /// Borrow-checks the function body. If this is a closure, returns
+ /// additional requirements that the closure's creator must verify.
+ query mir_borrowck(key: LocalDefId) -> &'tcx mir::BorrowCheckResult<'tcx> {
+ desc { |tcx| "borrow-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if(tcx) { tcx.is_typeck_child(key.to_def_id()) }
+ }
+ query mir_borrowck_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::BorrowCheckResult<'tcx> {
+ desc {
+ |tcx| "borrow-checking the const argument`{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// Gets a complete map from all types to their inherent impls.
+ /// Not meant to be used directly outside of coherence.
+ query crate_inherent_impls(k: ()) -> CrateInherentImpls {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "all inherent impls defined in crate" }
+ }
+
+ /// Checks all types in the crate for overlap in their inherent impls. Reports errors.
+ /// Not meant to be used directly outside of coherence.
+ query crate_inherent_impls_overlap_check(_: ()) -> () {
+ desc { "check for overlap between inherent impls defined in this crate" }
+ }
+
+ /// Checks whether all impls in the crate pass the overlap check, returning
+ /// which impls fail it. If all impls are correct, the returned slice is empty.
+ query orphan_check_impl(key: LocalDefId) -> Result<(), ErrorGuaranteed> {
+ desc { |tcx|
+ "checking whether impl `{}` follows the orphan rules",
+ tcx.def_path_str(key.to_def_id()),
+ }
+ }
+
+ /// Check whether the function has any recursion that could cause the inliner to trigger
+ /// a cycle. Returns the call stack causing the cycle. The call stack does not contain the
+ /// current function, just all intermediate functions.
+ query mir_callgraph_reachable(key: (ty::Instance<'tcx>, LocalDefId)) -> bool {
+ fatal_cycle
+ desc { |tcx|
+ "computing if `{}` (transitively) calls `{}`",
+ key.0,
+ tcx.def_path_str(key.1.to_def_id()),
+ }
+ }
+
+ /// Obtain all the calls into other local functions
+ query mir_inliner_callees(key: ty::InstanceDef<'tcx>) -> &'tcx [(DefId, SubstsRef<'tcx>)] {
+ fatal_cycle
+ desc { |tcx|
+ "computing all local function calls in `{}`",
+ tcx.def_path_str(key.def_id()),
+ }
+ }
+
+ /// Evaluates a constant and returns the computed allocation.
+ ///
+ /// **Do not use this** directly, use the `tcx.eval_static_initializer` wrapper.
+ query eval_to_allocation_raw(key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>)
+ -> EvalToAllocationRawResult<'tcx> {
+ desc { |tcx|
+ "const-evaluating + checking `{}`",
+ key.value.display(tcx)
+ }
+ cache_on_disk_if { true }
+ }
+
+ /// Evaluates const items or anonymous constants
+ /// (such as enum variant explicit discriminants or array lengths)
+ /// into a representation suitable for the type system and const generics.
+ ///
+ /// **Do not use this** directly, use one of the following wrappers: `tcx.const_eval_poly`,
+ /// `tcx.const_eval_resolve`, `tcx.const_eval_instance`, or `tcx.const_eval_global_id`.
+ query eval_to_const_value_raw(key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>)
+ -> EvalToConstValueResult<'tcx> {
+ desc { |tcx|
+ "simplifying constant for the type system `{}`",
+ key.value.display(tcx)
+ }
+ cache_on_disk_if { true }
+ }
+
+ /// Evaluate a constant and convert it to a type level constant or
+ /// return `None` if that is not possible.
+ query eval_to_valtree(
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>
+ ) -> EvalToValTreeResult<'tcx> {
+ desc { "evaluating type-level constant" }
+ }
+
+ /// Converts a type level constant value into `ConstValue`
+ query valtree_to_const_val(key: (Ty<'tcx>, ty::ValTree<'tcx>)) -> ConstValue<'tcx> {
+ desc { "converting type-level constant value to mir constant value"}
+ }
+
+ /// Destructures array, ADT or tuple constants into the constants
+ /// of their fields.
+ query destructure_const(key: ty::Const<'tcx>) -> ty::DestructuredConst<'tcx> {
+ desc { "destructuring type level constant"}
+ }
+
+ /// Tries to destructure an `mir::ConstantKind` ADT or array into its variant index
+ /// and its field values.
+ query try_destructure_mir_constant(key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>) -> Option<mir::DestructuredMirConstant<'tcx>> {
+ desc { "destructuring mir constant"}
+ remap_env_constness
+ }
+
+ /// Dereference a constant reference or raw pointer and turn the result into a constant
+ /// again.
+ query deref_mir_constant(
+ key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
+ ) -> mir::ConstantKind<'tcx> {
+ desc { "dereferencing mir constant" }
+ remap_env_constness
+ }
+
+ query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> {
+ desc { "get a &core::panic::Location referring to a span" }
+ }
+
+ // FIXME get rid of this with valtrees
+ query lit_to_const(
+ key: LitToConstInput<'tcx>
+ ) -> Result<ty::Const<'tcx>, LitToConstError> {
+ desc { "converting literal to const" }
+ }
+
+ query lit_to_mir_constant(key: LitToConstInput<'tcx>) -> Result<mir::ConstantKind<'tcx>, LitToConstError> {
+ desc { "converting literal to mir constant" }
+ }
+
+ query check_match(key: DefId) {
+ desc { |tcx| "match-checking `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ }
+
+ /// Performs part of the privacy check and computes "access levels".
+ query privacy_access_levels(_: ()) -> &'tcx AccessLevels {
+ eval_always
+ desc { "privacy access levels" }
+ }
+ query check_private_in_public(_: ()) -> () {
+ eval_always
+ desc { "checking for private elements in public interfaces" }
+ }
+
+ query reachable_set(_: ()) -> FxHashSet<LocalDefId> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "reachability" }
+ }
+
+ /// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body;
+ /// in the case of closures, this will be redirected to the enclosing function.
+ query region_scope_tree(def_id: DefId) -> &'tcx crate::middle::region::ScopeTree {
+ desc { |tcx| "computing drop scopes for `{}`", tcx.def_path_str(def_id) }
+ }
+
+ /// Generates a MIR body for the shim.
+ query mir_shims(key: ty::InstanceDef<'tcx>) -> mir::Body<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) }
+ }
+
+ /// The `symbol_name` query provides the symbol name for calling a
+ /// given instance from the local crate. In particular, it will also
+ /// look up the correct symbol name of instances from upstream crates.
+ query symbol_name(key: ty::Instance<'tcx>) -> ty::SymbolName<'tcx> {
+ desc { "computing the symbol for `{}`", key }
+ cache_on_disk_if { true }
+ }
+
+ query opt_def_kind(def_id: DefId) -> Option<DefKind> {
+ desc { |tcx| "looking up definition kind of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Gets the span for the definition.
+ query def_span(def_id: DefId) -> Span {
+ desc { |tcx| "looking up span for `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Gets the span for the identifier of the definition.
+ query def_ident_span(def_id: DefId) -> Option<Span> {
+ desc { |tcx| "looking up span for `{}`'s identifier", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query lookup_stability(def_id: DefId) -> Option<attr::Stability> {
+ desc { |tcx| "looking up stability of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query lookup_const_stability(def_id: DefId) -> Option<attr::ConstStability> {
+ desc { |tcx| "looking up const stability of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query should_inherit_track_caller(def_id: DefId) -> bool {
+ desc { |tcx| "computing should_inherit_track_caller of `{}`", tcx.def_path_str(def_id) }
+ }
+
+ query lookup_deprecation_entry(def_id: DefId) -> Option<DeprecationEntry> {
+ desc { |tcx| "checking whether `{}` is deprecated", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Determines whether an item is annotated with `doc(hidden)`.
+ query is_doc_hidden(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` is `doc(hidden)`", tcx.def_path_str(def_id) }
+ }
+
+ /// Returns the attributes on the item at `def_id`.
+ ///
+ /// Do not use this directly, use `tcx.get_attrs` instead.
+ query item_attrs(def_id: DefId) -> &'tcx [ast::Attribute] {
+ desc { |tcx| "collecting attributes of `{}`", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+
+ query codegen_fn_attrs(def_id: DefId) -> CodegenFnAttrs {
+ desc { |tcx| "computing codegen attributes of `{}`", tcx.def_path_str(def_id) }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query asm_target_features(def_id: DefId) -> &'tcx FxHashSet<Symbol> {
+ desc { |tcx| "computing target features for inline asm of `{}`", tcx.def_path_str(def_id) }
+ }
+
+ query fn_arg_names(def_id: DefId) -> &'tcx [rustc_span::symbol::Ident] {
+ desc { |tcx| "looking up function parameter names for `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+ /// Gets the rendered value of the specified constant or associated constant.
+ /// Used by rustdoc.
+ query rendered_const(def_id: DefId) -> String {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "rendering constant intializer of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+ query impl_parent(def_id: DefId) -> Option<DefId> {
+ desc { |tcx| "computing specialization parent impl of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query is_ctfe_mir_available(key: DefId) -> bool {
+ desc { |tcx| "checking if item has ctfe mir available: `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query is_mir_available(key: DefId) -> bool {
+ desc { |tcx| "checking if item has mir available: `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query own_existential_vtable_entries(
+ key: ty::PolyExistentialTraitRef<'tcx>
+ ) -> &'tcx [DefId] {
+ desc { |tcx| "finding all existential vtable entries for trait {}", tcx.def_path_str(key.def_id()) }
+ }
+
+ query vtable_entries(key: ty::PolyTraitRef<'tcx>)
+ -> &'tcx [ty::VtblEntry<'tcx>] {
+ desc { |tcx| "finding all vtable entries for trait {}", tcx.def_path_str(key.def_id()) }
+ }
+
+ query vtable_trait_upcasting_coercion_new_vptr_slot(key: (ty::Ty<'tcx>, ty::Ty<'tcx>)) -> Option<usize> {
+ desc { |tcx| "finding the slot within vtable for trait object {} vtable ptr during trait upcasting coercion from {} vtable",
+ key.1, key.0 }
+ }
+
+ query vtable_allocation(key: (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)) -> mir::interpret::AllocId {
+ desc { |tcx| "vtable const allocation for <{} as {}>",
+ key.0,
+ key.1.map(|trait_ref| format!("{}", trait_ref)).unwrap_or("_".to_owned())
+ }
+ }
+
+ query codegen_fulfill_obligation(
+ key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)
+ ) -> Result<&'tcx ImplSource<'tcx, ()>, traits::CodegenObligationError> {
+ cache_on_disk_if { true }
+ desc { |tcx|
+ "checking if `{}` fulfills its obligations",
+ tcx.def_path_str(key.1.def_id())
+ }
+ }
+
+ /// Return all `impl` blocks in the current crate.
+ query all_local_trait_impls(_: ()) -> &'tcx rustc_data_structures::fx::FxIndexMap<DefId, Vec<LocalDefId>> {
+ desc { "local trait impls" }
+ }
+
+ /// Given a trait `trait_id`, return all known `impl` blocks.
+ query trait_impls_of(trait_id: DefId) -> ty::trait_def::TraitImpls {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "trait impls of `{}`", tcx.def_path_str(trait_id) }
+ }
+
+ query specialization_graph_of(trait_id: DefId) -> specialization_graph::Graph {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "building specialization graph of trait `{}`", tcx.def_path_str(trait_id) }
+ cache_on_disk_if { true }
+ }
+ query object_safety_violations(trait_id: DefId) -> &'tcx [traits::ObjectSafetyViolation] {
+ desc { |tcx| "determine object safety of trait `{}`", tcx.def_path_str(trait_id) }
+ }
+
+ /// Gets the ParameterEnvironment for a given item; this environment
+ /// will be in "user-facing" mode, meaning that it is suitable for
+ /// type-checking etc, and it does not normalize specializable
+ /// associated types. This is almost always what you want,
+ /// unless you are doing MIR optimizations, in which case you
+ /// might want to use `reveal_all()` method to change modes.
+ query param_env(def_id: DefId) -> ty::ParamEnv<'tcx> {
+ desc { |tcx| "computing normalized predicates of `{}`", tcx.def_path_str(def_id) }
+ }
+
+ /// Like `param_env`, but returns the `ParamEnv` in `Reveal::All` mode.
+ /// Prefer this over `tcx.param_env(def_id).with_reveal_all_normalized(tcx)`,
+ /// as this method is more efficient.
+ query param_env_reveal_all_normalized(def_id: DefId) -> ty::ParamEnv<'tcx> {
+ desc { |tcx| "computing revealed normalized predicates of `{}`", tcx.def_path_str(def_id) }
+ }
+
+ /// Trait selection queries. These are best used by invoking `ty.is_copy_modulo_regions()`,
+ /// `ty.is_copy()`, etc, since that will prune the environment where possible.
+ query is_copy_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` is `Copy`", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::is_sized`.
+ query is_sized_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` is `Sized`", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::is_freeze`.
+ query is_freeze_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` is freeze", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::is_unpin`.
+ query is_unpin_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` is `Unpin`", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::needs_drop`.
+ query needs_drop_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` needs drop", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::has_significant_drop_raw`.
+ query has_significant_drop_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` has a significant drop", env.value }
+ remap_env_constness
+ }
+
+ /// Query backing `Ty::is_structural_eq_shallow`.
+ ///
+ /// This is only correct for ADTs. Call `is_structural_eq_shallow` to handle all types
+ /// correctly.
+ query has_structural_eq_impls(ty: Ty<'tcx>) -> bool {
+ desc {
+ "computing whether `{:?}` implements `PartialStructuralEq` and `StructuralEq`",
+ ty
+ }
+ }
+
+ /// A list of types where the ADT requires drop if and only if any of
+ /// those types require drop. If the ADT is known to always need drop
+ /// then `Err(AlwaysRequiresDrop)` is returned.
+ query adt_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
+ desc { |tcx| "computing when `{}` needs drop", tcx.def_path_str(def_id) }
+ cache_on_disk_if { true }
+ }
+
+ /// A list of types where the ADT requires drop if and only if any of those types
+ /// has significant drop. A type marked with the attribute `rustc_insignificant_dtor`
+ /// is considered to not be significant. A drop is significant if it is implemented
+ /// by the user or does anything that will have any observable behavior (other than
+ /// freeing up memory). If the ADT is known to have a significant destructor then
+ /// `Err(AlwaysRequiresDrop)` is returned.
+ query adt_significant_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
+ desc { |tcx| "computing when `{}` has a significant destructor", tcx.def_path_str(def_id) }
+ cache_on_disk_if { false }
+ }
+
+ /// Computes the layout of a type. Note that this implicitly
+ /// executes in "reveal all" mode, and will normalize the input type.
+ query layout_of(
+ key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
+ ) -> Result<ty::layout::TyAndLayout<'tcx>, ty::layout::LayoutError<'tcx>> {
+ desc { "computing layout of `{}`", key.value }
+ remap_env_constness
+ }
+
+ /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
+ ///
+ /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
+ /// instead, where the instance is an `InstanceDef::Virtual`.
+ query fn_abi_of_fn_ptr(
+ key: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>
+ ) -> Result<&'tcx abi::call::FnAbi<'tcx, Ty<'tcx>>, ty::layout::FnAbiError<'tcx>> {
+ desc { "computing call ABI of `{}` function pointers", key.value.0 }
+ remap_env_constness
+ }
+
+ /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
+ /// direct calls to an `fn`.
+ ///
+ /// NB: that includes virtual calls, which are represented by "direct calls"
+ /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
+ query fn_abi_of_instance(
+ key: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>
+ ) -> Result<&'tcx abi::call::FnAbi<'tcx, Ty<'tcx>>, ty::layout::FnAbiError<'tcx>> {
+ desc { "computing call ABI of `{}`", key.value.0 }
+ remap_env_constness
+ }
+
+ query dylib_dependency_formats(_: CrateNum)
+ -> &'tcx [(CrateNum, LinkagePreference)] {
+ desc { "dylib dependency formats of crate" }
+ separate_provide_extern
+ }
+
+ query dependency_formats(_: ()) -> Lrc<crate::middle::dependency_format::Dependencies> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "get the linkage format of all dependencies" }
+ }
+
+ query is_compiler_builtins(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "checking if the crate is_compiler_builtins" }
+ separate_provide_extern
+ }
+ query has_global_allocator(_: CrateNum) -> bool {
+ // This query depends on untracked global state in CStore
+ eval_always
+ fatal_cycle
+ desc { "checking if the crate has_global_allocator" }
+ separate_provide_extern
+ }
+ query has_panic_handler(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "checking if the crate has_panic_handler" }
+ separate_provide_extern
+ }
+ query is_profiler_runtime(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "query a crate is `#![profiler_runtime]`" }
+ separate_provide_extern
+ }
+ query has_ffi_unwind_calls(key: LocalDefId) -> bool {
+ desc { |tcx| "check if `{}` contains FFI-unwind calls", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+ query required_panic_strategy(_: CrateNum) -> Option<PanicStrategy> {
+ fatal_cycle
+ desc { "query a crate's required panic strategy" }
+ separate_provide_extern
+ }
+ query panic_in_drop_strategy(_: CrateNum) -> PanicStrategy {
+ fatal_cycle
+ desc { "query a crate's configured panic-in-drop strategy" }
+ separate_provide_extern
+ }
+ query is_no_builtins(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "test whether a crate has `#![no_builtins]`" }
+ separate_provide_extern
+ }
+ query symbol_mangling_version(_: CrateNum) -> SymbolManglingVersion {
+ fatal_cycle
+ desc { "query a crate's symbol mangling version" }
+ separate_provide_extern
+ }
+
+ query extern_crate(def_id: DefId) -> Option<&'tcx ExternCrate> {
+ eval_always
+ desc { "getting crate's ExternCrateData" }
+ separate_provide_extern
+ }
+
+ query specializes(_: (DefId, DefId)) -> bool {
+ desc { "computing whether impls specialize one another" }
+ }
+ query in_scope_traits_map(_: LocalDefId)
+ -> Option<&'tcx FxHashMap<ItemLocalId, Box<[TraitCandidate]>>> {
+ desc { "traits in scope at a block" }
+ }
+
+ query module_reexports(def_id: LocalDefId) -> Option<&'tcx [ModChild]> {
+ desc { |tcx| "looking up reexports of module `{}`", tcx.def_path_str(def_id.to_def_id()) }
+ }
+
+ query impl_defaultness(def_id: DefId) -> hir::Defaultness {
+ desc { |tcx| "looking up whether `{}` is a default impl", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query check_well_formed(key: LocalDefId) -> () {
+ desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ // The `DefId`s of all non-generic functions and statics in the given crate
+ // that can be reached from outside the crate.
+ //
+ // We expect this items to be available for being linked to.
+ //
+ // This query can also be called for `LOCAL_CRATE`. In this case it will
+ // compute which items will be reachable to other crates, taking into account
+ // the kind of crate that is currently compiled. Crates with only a
+ // C interface have fewer reachable things.
+ //
+ // Does not include external symbols that don't have a corresponding DefId,
+ // like the compiler-generated `main` function and so on.
+ query reachable_non_generics(_: CrateNum)
+ -> DefIdMap<SymbolExportInfo> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "looking up the exported symbols of a crate" }
+ separate_provide_extern
+ }
+ query is_reachable_non_generic(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` is an exported symbol", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+ query is_unreachable_local_definition(def_id: LocalDefId) -> bool {
+ desc { |tcx|
+ "checking whether `{}` is reachable from outside the crate",
+ tcx.def_path_str(def_id.to_def_id()),
+ }
+ }
+
+ /// The entire set of monomorphizations the local crate can safely link
+ /// to because they are exported from upstream crates. Do not depend on
+ /// this directly, as its value changes anytime a monomorphization gets
+ /// added or removed in any upstream crate. Instead use the narrower
+ /// `upstream_monomorphizations_for`, `upstream_drop_glue_for`, or, even
+ /// better, `Instance::upstream_monomorphization()`.
+ query upstream_monomorphizations(_: ()) -> DefIdMap<FxHashMap<SubstsRef<'tcx>, CrateNum>> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "collecting available upstream monomorphizations" }
+ }
+
+ /// Returns the set of upstream monomorphizations available for the
+ /// generic function identified by the given `def_id`. The query makes
+ /// sure to make a stable selection if the same monomorphization is
+ /// available in multiple upstream crates.
+ ///
+ /// You likely want to call `Instance::upstream_monomorphization()`
+ /// instead of invoking this query directly.
+ query upstream_monomorphizations_for(def_id: DefId)
+ -> Option<&'tcx FxHashMap<SubstsRef<'tcx>, CrateNum>>
+ {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx|
+ "collecting available upstream monomorphizations for `{}`",
+ tcx.def_path_str(def_id),
+ }
+ separate_provide_extern
+ }
+
+ /// Returns the upstream crate that exports drop-glue for the given
+ /// type (`substs` is expected to be a single-item list containing the
+ /// type one wants drop-glue for).
+ ///
+ /// This is a subset of `upstream_monomorphizations_for` in order to
+ /// increase dep-tracking granularity. Otherwise adding or removing any
+ /// type with drop-glue in any upstream crate would invalidate all
+ /// functions calling drop-glue of an upstream type.
+ ///
+ /// You likely want to call `Instance::upstream_monomorphization()`
+ /// instead of invoking this query directly.
+ ///
+ /// NOTE: This query could easily be extended to also support other
+ /// common functions that have are large set of monomorphizations
+ /// (like `Clone::clone` for example).
+ query upstream_drop_glue_for(substs: SubstsRef<'tcx>) -> Option<CrateNum> {
+ desc { "available upstream drop-glue for `{:?}`", substs }
+ }
+
+ query foreign_modules(_: CrateNum) -> FxHashMap<DefId, ForeignModule> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "looking up the foreign modules of a linked crate" }
+ separate_provide_extern
+ }
+
+ /// Identifies the entry-point (e.g., the `main` function) for a given
+ /// crate, returning `None` if there is no entry point (such as for library crates).
+ query entry_fn(_: ()) -> Option<(DefId, EntryFnType)> {
+ desc { "looking up the entry function of a crate" }
+ }
+ query proc_macro_decls_static(_: ()) -> Option<LocalDefId> {
+ desc { "looking up the derive registrar for a crate" }
+ }
+ // The macro which defines `rustc_metadata::provide_extern` depends on this query's name.
+ // Changing the name should cause a compiler error, but in case that changes, be aware.
+ query crate_hash(_: CrateNum) -> Svh {
+ eval_always
+ desc { "looking up the hash a crate" }
+ separate_provide_extern
+ }
+ query crate_host_hash(_: CrateNum) -> Option<Svh> {
+ eval_always
+ desc { "looking up the hash of a host version of a crate" }
+ separate_provide_extern
+ }
+ query extra_filename(_: CrateNum) -> String {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "looking up the extra filename for a crate" }
+ separate_provide_extern
+ }
+ query crate_extern_paths(_: CrateNum) -> Vec<PathBuf> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "looking up the paths for extern crates" }
+ separate_provide_extern
+ }
+
+ /// Given a crate and a trait, look up all impls of that trait in the crate.
+ /// Return `(impl_id, self_ty)`.
+ query implementations_of_trait(_: (CrateNum, DefId)) -> &'tcx [(DefId, Option<SimplifiedType>)] {
+ desc { "looking up implementations of a trait in a crate" }
+ separate_provide_extern
+ }
+
+ /// Collects all incoherent impls for the given crate and type.
+ ///
+ /// Do not call this directly, but instead use the `incoherent_impls` query.
+ /// This query is only used to get the data necessary for that query.
+ query crate_incoherent_impls(key: (CrateNum, SimplifiedType)) -> &'tcx [DefId] {
+ desc { |tcx| "collecting all impls for a type in a crate" }
+ separate_provide_extern
+ }
+
+ query is_dllimport_foreign_item(def_id: DefId) -> bool {
+ desc { |tcx| "is_dllimport_foreign_item({})", tcx.def_path_str(def_id) }
+ }
+ query is_statically_included_foreign_item(def_id: DefId) -> bool {
+ desc { |tcx| "is_statically_included_foreign_item({})", tcx.def_path_str(def_id) }
+ }
+ query native_library_kind(def_id: DefId)
+ -> Option<NativeLibKind> {
+ desc { |tcx| "native_library_kind({})", tcx.def_path_str(def_id) }
+ }
+
+ /// Does lifetime resolution, but does not descend into trait items. This
+ /// should only be used for resolving lifetimes of on trait definitions,
+ /// and is used to avoid cycles. Importantly, `resolve_lifetimes` still visits
+ /// the same lifetimes and is responsible for diagnostics.
+ /// See `rustc_resolve::late::lifetimes for details.
+ query resolve_lifetimes_trait_definition(_: LocalDefId) -> ResolveLifetimes {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "resolving lifetimes for a trait definition" }
+ }
+ /// Does lifetime resolution on items. Importantly, we can't resolve
+ /// lifetimes directly on things like trait methods, because of trait params.
+ /// See `rustc_resolve::late::lifetimes for details.
+ query resolve_lifetimes(_: LocalDefId) -> ResolveLifetimes {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "resolving lifetimes" }
+ }
+ query named_region_map(_: LocalDefId) ->
+ Option<&'tcx FxHashMap<ItemLocalId, Region>> {
+ desc { "looking up a named region" }
+ }
+ query is_late_bound_map(_: LocalDefId) -> Option<&'tcx FxIndexSet<LocalDefId>> {
+ desc { "testing if a region is late bound" }
+ }
+ /// For a given item (like a struct), gets the default lifetimes to be used
+ /// for each parameter if a trait object were to be passed for that parameter.
+ /// For example, for `struct Foo<'a, T, U>`, this would be `['static, 'static]`.
+ /// For `struct Foo<'a, T: 'a, U>`, this would instead be `['a, 'static]`.
+ query object_lifetime_defaults(_: LocalDefId) -> Option<&'tcx [ObjectLifetimeDefault]> {
+ desc { "looking up lifetime defaults for a region on an item" }
+ }
+ query late_bound_vars_map(_: LocalDefId)
+ -> Option<&'tcx FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>> {
+ desc { "looking up late bound vars" }
+ }
+
+ query visibility(def_id: DefId) -> ty::Visibility {
+ desc { |tcx| "computing visibility of `{}`", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+
+ /// Computes the set of modules from which this type is visibly uninhabited.
+ /// To check whether a type is uninhabited at all (not just from a given module), you could
+ /// check whether the forest is empty.
+ query type_uninhabited_from(
+ key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
+ ) -> ty::inhabitedness::DefIdForest<'tcx> {
+ desc { "computing the inhabitedness of `{:?}`", key }
+ remap_env_constness
+ }
+
+ query dep_kind(_: CrateNum) -> CrateDepKind {
+ eval_always
+ desc { "fetching what a dependency looks like" }
+ separate_provide_extern
+ }
+
+ /// Gets the name of the crate.
+ query crate_name(_: CrateNum) -> Symbol {
+ eval_always
+ desc { "fetching what a crate is named" }
+ separate_provide_extern
+ }
+ query module_children(def_id: DefId) -> &'tcx [ModChild] {
+ desc { |tcx| "collecting child items of module `{}`", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+ query extern_mod_stmt_cnum(def_id: LocalDefId) -> Option<CrateNum> {
+ desc { |tcx| "computing crate imported by `{}`", tcx.def_path_str(def_id.to_def_id()) }
+ }
+
+ query lib_features(_: ()) -> LibFeatures {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating the lib features map" }
+ }
+ query defined_lib_features(_: CrateNum) -> &'tcx [(Symbol, Option<Symbol>)] {
+ desc { "calculating the lib features defined in a crate" }
+ separate_provide_extern
+ }
+ query stability_implications(_: CrateNum) -> FxHashMap<Symbol, Symbol> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating the implications between `#[unstable]` features defined in a crate" }
+ separate_provide_extern
+ }
+ /// Whether the function is an intrinsic
+ query is_intrinsic(def_id: DefId) -> bool {
+ desc { |tcx| "is_intrinsic({})", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+ /// Returns the lang items defined in another crate by loading it from metadata.
+ query get_lang_items(_: ()) -> LanguageItems {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "calculating the lang items map" }
+ }
+
+ /// Returns all diagnostic items defined in all crates.
+ query all_diagnostic_items(_: ()) -> rustc_hir::diagnostic_items::DiagnosticItems {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "calculating the diagnostic items map" }
+ }
+
+ /// Returns the lang items defined in another crate by loading it from metadata.
+ query defined_lang_items(_: CrateNum) -> &'tcx [(DefId, usize)] {
+ desc { "calculating the lang items defined in a crate" }
+ separate_provide_extern
+ }
+
+ /// Returns the diagnostic items defined in a crate.
+ query diagnostic_items(_: CrateNum) -> rustc_hir::diagnostic_items::DiagnosticItems {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating the diagnostic items map in a crate" }
+ separate_provide_extern
+ }
+
+ query missing_lang_items(_: CrateNum) -> &'tcx [LangItem] {
+ desc { "calculating the missing lang items in a crate" }
+ separate_provide_extern
+ }
+ query visible_parent_map(_: ()) -> DefIdMap<DefId> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating the visible parent map" }
+ }
+ query trimmed_def_paths(_: ()) -> FxHashMap<DefId, Symbol> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating trimmed def paths" }
+ }
+ query missing_extern_crate_item(_: CrateNum) -> bool {
+ eval_always
+ desc { "seeing if we're missing an `extern crate` item for this crate" }
+ separate_provide_extern
+ }
+ query used_crate_source(_: CrateNum) -> Lrc<CrateSource> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "looking at the source for a crate" }
+ separate_provide_extern
+ }
+ /// Returns the debugger visualizers defined for this crate.
+ query debugger_visualizers(_: CrateNum) -> Vec<rustc_span::DebuggerVisualizerFile> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "looking up the debugger visualizers for this crate" }
+ separate_provide_extern
+ }
+ query postorder_cnums(_: ()) -> &'tcx [CrateNum] {
+ eval_always
+ desc { "generating a postorder list of CrateNums" }
+ }
+ /// Returns whether or not the crate with CrateNum 'cnum'
+ /// is marked as a private dependency
+ query is_private_dep(c: CrateNum) -> bool {
+ eval_always
+ desc { "check whether crate {} is a private dependency", c }
+ separate_provide_extern
+ }
+ query allocator_kind(_: ()) -> Option<AllocatorKind> {
+ eval_always
+ desc { "allocator kind for the current crate" }
+ }
+
+ query upvars_mentioned(def_id: DefId) -> Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>> {
+ desc { |tcx| "collecting upvars mentioned in `{}`", tcx.def_path_str(def_id) }
+ }
+ query maybe_unused_trait_imports(_: ()) -> &'tcx FxIndexSet<LocalDefId> {
+ desc { "fetching potentially unused trait imports" }
+ }
+ query maybe_unused_extern_crates(_: ()) -> &'tcx [(LocalDefId, Span)] {
+ desc { "looking up all possibly unused extern crates" }
+ }
+ query names_imported_by_glob_use(def_id: LocalDefId) -> &'tcx FxHashSet<Symbol> {
+ desc { |tcx| "names_imported_by_glob_use for `{}`", tcx.def_path_str(def_id.to_def_id()) }
+ }
+
+ query stability_index(_: ()) -> stability::Index {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "calculating the stability index for the local crate" }
+ }
+ query crates(_: ()) -> &'tcx [CrateNum] {
+ eval_always
+ desc { "fetching all foreign CrateNum instances" }
+ }
+
+ /// A list of all traits in a crate, used by rustdoc and error reporting.
+ /// NOTE: Not named just `traits` due to a naming conflict.
+ query traits_in_crate(_: CrateNum) -> &'tcx [DefId] {
+ desc { "fetching all traits in a crate" }
+ separate_provide_extern
+ }
+
+ /// The list of symbols exported from the given crate.
+ ///
+ /// - All names contained in `exported_symbols(cnum)` are guaranteed to
+ /// correspond to a publicly visible symbol in `cnum` machine code.
+ /// - The `exported_symbols` sets of different crates do not intersect.
+ query exported_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
+ desc { "exported_symbols" }
+ cache_on_disk_if { *cnum == LOCAL_CRATE }
+ separate_provide_extern
+ }
+
+ query collect_and_partition_mono_items(_: ()) -> (&'tcx DefIdSet, &'tcx [CodegenUnit<'tcx>]) {
+ eval_always
+ desc { "collect_and_partition_mono_items" }
+ }
+ query is_codegened_item(def_id: DefId) -> bool {
+ desc { |tcx| "determining whether `{}` needs codegen", tcx.def_path_str(def_id) }
+ }
+
+ /// All items participating in code generation together with items inlined into them.
+ query codegened_and_inlined_items(_: ()) -> &'tcx DefIdSet {
+ eval_always
+ desc { "codegened_and_inlined_items" }
+ }
+
+ query codegen_unit(_: Symbol) -> &'tcx CodegenUnit<'tcx> {
+ desc { "codegen_unit" }
+ }
+ query unused_generic_params(key: ty::InstanceDef<'tcx>) -> FiniteBitSet<u32> {
+ cache_on_disk_if { key.def_id().is_local() }
+ desc {
+ |tcx| "determining which generic parameters are unused by `{}`",
+ tcx.def_path_str(key.def_id())
+ }
+ separate_provide_extern
+ }
+ query backend_optimization_level(_: ()) -> OptLevel {
+ desc { "optimization level used by backend" }
+ }
+
+ /// Return the filenames where output artefacts shall be stored.
+ ///
+ /// This query returns an `&Arc` because codegen backends need the value even after the `TyCtxt`
+ /// has been destroyed.
+ query output_filenames(_: ()) -> &'tcx Arc<OutputFilenames> {
+ eval_always
+ desc { "output_filenames" }
+ }
+
+ /// Do not call this query directly: invoke `normalize` instead.
+ query normalize_projection_ty(
+ goal: CanonicalProjectionGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: invoke `try_normalize_erasing_regions` instead.
+ query try_normalize_generic_arg_after_erasing_regions(
+ goal: ParamEnvAnd<'tcx, GenericArg<'tcx>>
+ ) -> Result<GenericArg<'tcx>, NoSolution> {
+ desc { "normalizing `{}`", goal.value }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: invoke `try_normalize_erasing_regions` instead.
+ query try_normalize_mir_const_after_erasing_regions(
+ goal: ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
+ ) -> Result<mir::ConstantKind<'tcx>, NoSolution> {
+ desc { "normalizing `{}`", goal.value }
+ remap_env_constness
+ }
+
+ query implied_outlives_bounds(
+ goal: CanonicalTyGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>,
+ NoSolution,
+ > {
+ desc { "computing implied outlives bounds for `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly:
+ /// invoke `DropckOutlives::new(dropped_ty)).fully_perform(typeck.infcx)` instead.
+ query dropck_outlives(
+ goal: CanonicalTyGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "computing dropck types for `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: invoke `infcx.predicate_may_hold()` or
+ /// `infcx.predicate_must_hold()` instead.
+ query evaluate_obligation(
+ goal: CanonicalPredicateGoal<'tcx>
+ ) -> Result<traits::EvaluationResult, traits::OverflowError> {
+ desc { "evaluating trait selection obligation `{}`", goal.value.value }
+ }
+
+ query evaluate_goal(
+ goal: traits::CanonicalChalkEnvironmentAndGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution
+ > {
+ desc { "evaluating trait selection obligation `{}`", goal.value }
+ }
+
+ /// Do not call this query directly: part of the `Eq` type-op
+ query type_op_ascribe_user_type(
+ goal: CanonicalTypeOpAscribeUserTypeGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution,
+ > {
+ desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Eq` type-op
+ query type_op_eq(
+ goal: CanonicalTypeOpEqGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution,
+ > {
+ desc { "evaluating `type_op_eq` `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Subtype` type-op
+ query type_op_subtype(
+ goal: CanonicalTypeOpSubtypeGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution,
+ > {
+ desc { "evaluating `type_op_subtype` `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `ProvePredicate` type-op
+ query type_op_prove_predicate(
+ goal: CanonicalTypeOpProvePredicateGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution,
+ > {
+ desc { "evaluating `type_op_prove_predicate` `{:?}`", goal }
+ }
+
+ /// Do not call this query directly: part of the `Normalize` type-op
+ query type_op_normalize_ty(
+ goal: CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Ty<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Normalize` type-op
+ query type_op_normalize_predicate(
+ goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::Predicate<'tcx>>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::Predicate<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Normalize` type-op
+ query type_op_normalize_poly_fn_sig(
+ goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::PolyFnSig<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Normalize` type-op
+ query type_op_normalize_fn_sig(
+ goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::FnSig<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ query subst_and_check_impossible_predicates(key: (DefId, SubstsRef<'tcx>)) -> bool {
+ desc { |tcx|
+ "impossible substituted predicates:`{}`",
+ tcx.def_path_str(key.0)
+ }
+ }
+
+ query method_autoderef_steps(
+ goal: CanonicalTyGoal<'tcx>
+ ) -> MethodAutoderefStepsResult<'tcx> {
+ desc { "computing autoderef types for `{:?}`", goal }
+ remap_env_constness
+ }
+
+ query supported_target_features(_: CrateNum) -> FxHashMap<String, Option<Symbol>> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "looking up supported target features" }
+ }
+
+ /// Get an estimate of the size of an InstanceDef based on its MIR for CGU partitioning.
+ query instance_def_size_estimate(def: ty::InstanceDef<'tcx>)
+ -> usize {
+ desc { |tcx| "estimating size for `{}`", tcx.def_path_str(def.def_id()) }
+ }
+
+ query features_query(_: ()) -> &'tcx rustc_feature::Features {
+ eval_always
+ desc { "looking up enabled feature gates" }
+ }
+
+ /// Attempt to resolve the given `DefId` to an `Instance`, for the
+ /// given generics args (`SubstsRef`), returning one of:
+ /// * `Ok(Some(instance))` on success
+ /// * `Ok(None)` when the `SubstsRef` are still too generic,
+ /// and therefore don't allow finding the final `Instance`
+ /// * `Err(ErrorGuaranteed)` when the `Instance` resolution process
+ /// couldn't complete due to errors elsewhere - this is distinct
+ /// from `Ok(None)` to avoid misleading diagnostics when an error
+ /// has already been/will be emitted, for the original cause
+ query resolve_instance(
+ key: ty::ParamEnvAnd<'tcx, (DefId, SubstsRef<'tcx>)>
+ ) -> Result<Option<ty::Instance<'tcx>>, ErrorGuaranteed> {
+ desc { "resolving instance `{}`", ty::Instance::new(key.value.0, key.value.1) }
+ remap_env_constness
+ }
+
+ query resolve_instance_of_const_arg(
+ key: ty::ParamEnvAnd<'tcx, (LocalDefId, DefId, SubstsRef<'tcx>)>
+ ) -> Result<Option<ty::Instance<'tcx>>, ErrorGuaranteed> {
+ desc {
+ "resolving instance of the const argument `{}`",
+ ty::Instance::new(key.value.0.to_def_id(), key.value.2),
+ }
+ remap_env_constness
+ }
+
+ query normalize_opaque_types(key: &'tcx ty::List<ty::Predicate<'tcx>>) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+ desc { "normalizing opaque types in {:?}", key }
+ }
+
+ /// Checks whether a type is definitely uninhabited. This is
+ /// conservative: for some types that are uninhabited we return `false`,
+ /// but we only return `true` for types that are definitely uninhabited.
+ /// `ty.conservative_is_privately_uninhabited` implies that any value of type `ty`
+ /// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero
+ /// size, to account for partial initialisation. See #49298 for details.)
+ query conservative_is_privately_uninhabited(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "conservatively checking if {:?} is privately uninhabited", key }
+ remap_env_constness
+ }
+
+ query limits(key: ()) -> Limits {
+ desc { "looking up limits" }
+ }
+
+ /// Performs an HIR-based well-formed check on the item with the given `HirId`. If
+ /// we get an `Unimplemented` error that matches the provided `Predicate`, return
+ /// the cause of the newly created obligation.
+ ///
+ /// This is only used by error-reporting code to get a better cause (in particular, a better
+ /// span) for an *existing* error. Therefore, it is best-effort, and may never handle
+ /// all of the cases that the normal `ty::Ty`-based wfcheck does. This is fine,
+ /// because the `ty::Ty`-based wfcheck is always run.
+ query diagnostic_hir_wf_check(key: (ty::Predicate<'tcx>, traits::WellFormedLoc)) -> Option<traits::ObligationCause<'tcx>> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ no_hash
+ desc { "performing HIR wf-checking for predicate {:?} at item {:?}", key.0, key.1 }
+ }
+
+
+ /// The list of backend features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
+ /// `--target` and similar).
+ query global_backend_features(_: ()) -> Vec<String> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "computing the backend features for CLI flags" }
+ }
+
+ query generator_diagnostic_data(key: DefId) -> Option<GeneratorDiagnosticData<'tcx>> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "looking up generator diagnostic data of `{}`", tcx.def_path_str(key) }
+ separate_provide_extern
+ }
+
+ query permits_uninit_init(key: TyAndLayout<'tcx>) -> bool {
+ desc { "checking to see if {:?} permits being left uninit", key.ty }
+ }
+
+ query permits_zero_init(key: TyAndLayout<'tcx>) -> bool {
+ desc { "checking to see if {:?} permits being left zeroed", key.ty }
+ }
+}
diff --git a/compiler/rustc_middle/src/tests.rs b/compiler/rustc_middle/src/tests.rs
new file mode 100644
index 000000000..757e0bd3b
--- /dev/null
+++ b/compiler/rustc_middle/src/tests.rs
@@ -0,0 +1,13 @@
+use super::*;
+
+// FIXME(#27438): right now the unit tests of librustc_middle don't refer to any actual
+// functions generated in librustc_data_structures (all
+// references are through generic functions), but statics are
+// referenced from time to time. Due to this bug we won't
+// actually correctly link in the statics unless we also
+// reference a function, so be sure to reference a dummy
+// function.
+#[test]
+fn noop() {
+ rustc_data_structures::__noop_fix_for_27438();
+}
diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs
new file mode 100644
index 000000000..b856af1d8
--- /dev/null
+++ b/compiler/rustc_middle/src/thir.rs
@@ -0,0 +1,821 @@
+//! THIR datatypes and definitions. See the [rustc dev guide] for more info.
+//!
+//! If you compare the THIR [`ExprKind`] to [`hir::ExprKind`], you will see it is
+//! a good bit simpler. In fact, a number of the more straight-forward
+//! MIR simplifications are already done in the lowering to THIR. For
+//! example, method calls and overloaded operators are absent: they are
+//! expected to be converted into [`ExprKind::Call`] instances.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/thir.html
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir as hir;
+use rustc_hir::def::CtorKind;
+use rustc_hir::def_id::DefId;
+use rustc_hir::RangeEnd;
+use rustc_index::newtype_index;
+use rustc_index::vec::IndexVec;
+use rustc_middle::infer::canonical::Canonical;
+use rustc_middle::middle::region;
+use rustc_middle::mir::interpret::AllocId;
+use rustc_middle::mir::{self, BinOp, BorrowKind, FakeReadCause, Field, Mutability, UnOp};
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::CanonicalUserTypeAnnotation;
+use rustc_middle::ty::{self, AdtDef, Ty, UpvarSubsts, UserType};
+use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_target::abi::VariantIdx;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+
+use rustc_span::def_id::LocalDefId;
+use std::fmt;
+use std::ops::Index;
+
+pub mod visit;
+
+newtype_index! {
+ /// An index to an [`Arm`] stored in [`Thir::arms`]
+ #[derive(HashStable)]
+ pub struct ArmId {
+ DEBUG_FORMAT = "a{}"
+ }
+}
+
+newtype_index! {
+ /// An index to an [`Expr`] stored in [`Thir::exprs`]
+ #[derive(HashStable)]
+ pub struct ExprId {
+ DEBUG_FORMAT = "e{}"
+ }
+}
+
+newtype_index! {
+ #[derive(HashStable)]
+ /// An index to a [`Stmt`] stored in [`Thir::stmts`]
+ pub struct StmtId {
+ DEBUG_FORMAT = "s{}"
+ }
+}
+
+macro_rules! thir_with_elements {
+ ($($name:ident: $id:ty => $value:ty,)*) => {
+ /// A container for a THIR body.
+ ///
+ /// This can be indexed directly by any THIR index (e.g. [`ExprId`]).
+ #[derive(Debug, HashStable, Clone)]
+ pub struct Thir<'tcx> {
+ $(
+ pub $name: IndexVec<$id, $value>,
+ )*
+ }
+
+ impl<'tcx> Thir<'tcx> {
+ pub fn new() -> Thir<'tcx> {
+ Thir {
+ $(
+ $name: IndexVec::new(),
+ )*
+ }
+ }
+ }
+
+ $(
+ impl<'tcx> Index<$id> for Thir<'tcx> {
+ type Output = $value;
+ fn index(&self, index: $id) -> &Self::Output {
+ &self.$name[index]
+ }
+ }
+ )*
+ }
+}
+
+thir_with_elements! {
+ arms: ArmId => Arm<'tcx>,
+ exprs: ExprId => Expr<'tcx>,
+ stmts: StmtId => Stmt<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum LintLevel {
+ Inherited,
+ Explicit(hir::HirId),
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Block {
+ /// Whether the block itself has a label. Used by `label: {}`
+ /// and `try` blocks.
+ ///
+ /// This does *not* include labels on loops, e.g. `'label: loop {}`.
+ pub targeted_by_break: bool,
+ pub region_scope: region::Scope,
+ pub opt_destruction_scope: Option<region::Scope>,
+ /// The span of the block, including the opening braces,
+ /// the label, and the `unsafe` keyword, if present.
+ pub span: Span,
+ /// The statements in the blocK.
+ pub stmts: Box<[StmtId]>,
+ /// The trailing expression of the block, if any.
+ pub expr: Option<ExprId>,
+ pub safety_mode: BlockSafety,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Adt<'tcx> {
+ /// The ADT we're constructing.
+ pub adt_def: AdtDef<'tcx>,
+ /// The variant of the ADT.
+ pub variant_index: VariantIdx,
+ pub substs: SubstsRef<'tcx>,
+
+ /// Optional user-given substs: for something like `let x =
+ /// Bar::<T> { ... }`.
+ pub user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+
+ pub fields: Box<[FieldExpr]>,
+ /// The base, e.g. `Foo {x: 1, .. base}`.
+ pub base: Option<FruInfo<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum BlockSafety {
+ Safe,
+ /// A compiler-generated unsafe block
+ BuiltinUnsafe,
+ /// An `unsafe` block. The `HirId` is the ID of the block.
+ ExplicitUnsafe(hir::HirId),
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Stmt<'tcx> {
+ pub kind: StmtKind<'tcx>,
+ pub opt_destruction_scope: Option<region::Scope>,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub enum StmtKind<'tcx> {
+ /// An expression with a trailing semicolon.
+ Expr {
+ /// The scope for this statement; may be used as lifetime of temporaries.
+ scope: region::Scope,
+
+ /// The expression being evaluated in this statement.
+ expr: ExprId,
+ },
+
+ /// A `let` binding.
+ Let {
+ /// The scope for variables bound in this `let`; it covers this and
+ /// all the remaining statements in the block.
+ remainder_scope: region::Scope,
+
+ /// The scope for the initialization itself; might be used as
+ /// lifetime of temporaries.
+ init_scope: region::Scope,
+
+ /// `let <PAT> = ...`
+ ///
+ /// If a type annotation is included, it is added as an ascription pattern.
+ pattern: Pat<'tcx>,
+
+ /// `let pat: ty = <INIT>`
+ initializer: Option<ExprId>,
+
+ /// `let pat: ty = <INIT> else { <ELSE> }
+ else_block: Option<Block>,
+
+ /// The lint level for this `let` statement.
+ lint_level: LintLevel,
+ },
+}
+
+#[derive(Clone, Debug, Copy, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct LocalVarId(pub hir::HirId);
+
+/// A THIR expression.
+#[derive(Clone, Debug, HashStable)]
+pub struct Expr<'tcx> {
+ /// The type of this expression
+ pub ty: Ty<'tcx>,
+
+ /// The lifetime of this expression if it should be spilled into a
+ /// temporary; should be `None` only if in a constant context
+ pub temp_lifetime: Option<region::Scope>,
+
+ /// span of the expression in the source
+ pub span: Span,
+
+ /// kind of expression
+ pub kind: ExprKind<'tcx>,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub enum ExprKind<'tcx> {
+ /// `Scope`s are used to explicitly mark destruction scopes,
+ /// and to track the `HirId` of the expressions within the scope.
+ Scope {
+ region_scope: region::Scope,
+ lint_level: LintLevel,
+ value: ExprId,
+ },
+ /// A `box <value>` expression.
+ Box {
+ value: ExprId,
+ },
+ /// An `if` expression.
+ If {
+ if_then_scope: region::Scope,
+ cond: ExprId,
+ then: ExprId,
+ else_opt: Option<ExprId>,
+ },
+ /// A function call. Method calls and overloaded operators are converted to plain function calls.
+ Call {
+ /// The type of the function. This is often a [`FnDef`] or a [`FnPtr`].
+ ///
+ /// [`FnDef`]: ty::TyKind::FnDef
+ /// [`FnPtr`]: ty::TyKind::FnPtr
+ ty: Ty<'tcx>,
+ /// The function itself.
+ fun: ExprId,
+ /// The arguments passed to the function.
+ ///
+ /// Note: in some cases (like calling a closure), the function call `f(...args)` gets
+ /// rewritten as a call to a function trait method (e.g. `FnOnce::call_once(f, (...args))`).
+ args: Box<[ExprId]>,
+ /// Whether this is from an overloaded operator rather than a
+ /// function call from HIR. `true` for overloaded function call.
+ from_hir_call: bool,
+ /// The span of the function, without the dot and receiver
+ /// (e.g. `foo(a, b)` in `x.foo(a, b)`).
+ fn_span: Span,
+ },
+ /// A *non-overloaded* dereference.
+ Deref {
+ arg: ExprId,
+ },
+ /// A *non-overloaded* binary operation.
+ Binary {
+ op: BinOp,
+ lhs: ExprId,
+ rhs: ExprId,
+ },
+ /// A logical operation. This is distinct from `BinaryOp` because
+ /// the operands need to be lazily evaluated.
+ LogicalOp {
+ op: LogicalOp,
+ lhs: ExprId,
+ rhs: ExprId,
+ },
+ /// A *non-overloaded* unary operation. Note that here the deref (`*`)
+ /// operator is represented by `ExprKind::Deref`.
+ Unary {
+ op: UnOp,
+ arg: ExprId,
+ },
+ /// A cast: `<source> as <type>`. The type we cast to is the type of
+ /// the parent expression.
+ Cast {
+ source: ExprId,
+ },
+ Use {
+ source: ExprId,
+ }, // Use a lexpr to get a vexpr.
+ /// A coercion from `!` to any type.
+ NeverToAny {
+ source: ExprId,
+ },
+ /// A pointer cast. More information can be found in [`PointerCast`].
+ Pointer {
+ cast: PointerCast,
+ source: ExprId,
+ },
+ /// A `loop` expression.
+ Loop {
+ body: ExprId,
+ },
+ Let {
+ expr: ExprId,
+ pat: Pat<'tcx>,
+ },
+ /// A `match` expression.
+ Match {
+ scrutinee: ExprId,
+ arms: Box<[ArmId]>,
+ },
+ /// A block.
+ Block {
+ body: Block,
+ },
+ /// An assignment: `lhs = rhs`.
+ Assign {
+ lhs: ExprId,
+ rhs: ExprId,
+ },
+ /// A *non-overloaded* operation assignment, e.g. `lhs += rhs`.
+ AssignOp {
+ op: BinOp,
+ lhs: ExprId,
+ rhs: ExprId,
+ },
+ /// Access to a field of a struct, a tuple, an union, or an enum.
+ Field {
+ lhs: ExprId,
+ /// Variant containing the field.
+ variant_index: VariantIdx,
+ /// This can be a named (`.foo`) or unnamed (`.0`) field.
+ name: Field,
+ },
+ /// A *non-overloaded* indexing operation.
+ Index {
+ lhs: ExprId,
+ index: ExprId,
+ },
+ /// A local variable.
+ VarRef {
+ id: LocalVarId,
+ },
+ /// Used to represent upvars mentioned in a closure/generator
+ UpvarRef {
+ /// DefId of the closure/generator
+ closure_def_id: DefId,
+
+ /// HirId of the root variable
+ var_hir_id: LocalVarId,
+ },
+ /// A borrow, e.g. `&arg`.
+ Borrow {
+ borrow_kind: BorrowKind,
+ arg: ExprId,
+ },
+ /// A `&raw [const|mut] $place_expr` raw borrow resulting in type `*[const|mut] T`.
+ AddressOf {
+ mutability: hir::Mutability,
+ arg: ExprId,
+ },
+ /// A `break` expression.
+ Break {
+ label: region::Scope,
+ value: Option<ExprId>,
+ },
+ /// A `continue` expression.
+ Continue {
+ label: region::Scope,
+ },
+ /// A `return` expression.
+ Return {
+ value: Option<ExprId>,
+ },
+ /// An inline `const` block, e.g. `const {}`.
+ ConstBlock {
+ did: DefId,
+ substs: SubstsRef<'tcx>,
+ },
+ /// An array literal constructed from one repeated element, e.g. `[1; 5]`.
+ Repeat {
+ value: ExprId,
+ count: ty::Const<'tcx>,
+ },
+ /// An array, e.g. `[a, b, c, d]`.
+ Array {
+ fields: Box<[ExprId]>,
+ },
+ /// A tuple, e.g. `(a, b, c, d)`.
+ Tuple {
+ fields: Box<[ExprId]>,
+ },
+ /// An ADT constructor, e.g. `Foo {x: 1, y: 2}`.
+ Adt(Box<Adt<'tcx>>),
+ /// A type ascription on a place.
+ PlaceTypeAscription {
+ source: ExprId,
+ /// Type that the user gave to this expression
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// A type ascription on a value, e.g. `42: i32`.
+ ValueTypeAscription {
+ source: ExprId,
+ /// Type that the user gave to this expression
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// A closure definition.
+ Closure {
+ closure_id: LocalDefId,
+ substs: UpvarSubsts<'tcx>,
+ upvars: Box<[ExprId]>,
+ movability: Option<hir::Movability>,
+ fake_reads: Vec<(ExprId, FakeReadCause, hir::HirId)>,
+ },
+ /// A literal.
+ Literal {
+ lit: &'tcx hir::Lit,
+ neg: bool,
+ },
+ /// For literals that don't correspond to anything in the HIR
+ NonHirLiteral {
+ lit: ty::ScalarInt,
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// A literal of a ZST type.
+ ZstLiteral {
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// Associated constants and named constants
+ NamedConst {
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ ConstParam {
+ param: ty::ParamConst,
+ def_id: DefId,
+ },
+ // FIXME improve docs for `StaticRef` by distinguishing it from `NamedConst`
+ /// A literal containing the address of a `static`.
+ ///
+ /// This is only distinguished from `Literal` so that we can register some
+ /// info for diagnostics.
+ StaticRef {
+ alloc_id: AllocId,
+ ty: Ty<'tcx>,
+ def_id: DefId,
+ },
+ /// Inline assembly, i.e. `asm!()`.
+ InlineAsm {
+ template: &'tcx [InlineAsmTemplatePiece],
+ operands: Box<[InlineAsmOperand<'tcx>]>,
+ options: InlineAsmOptions,
+ line_spans: &'tcx [Span],
+ },
+ /// An expression taking a reference to a thread local.
+ ThreadLocalRef(DefId),
+ /// A `yield` expression.
+ Yield {
+ value: ExprId,
+ },
+}
+
+/// Represents the association of a field identifier and an expression.
+///
+/// This is used in struct constructors.
+#[derive(Clone, Debug, HashStable)]
+pub struct FieldExpr {
+ pub name: Field,
+ pub expr: ExprId,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct FruInfo<'tcx> {
+ pub base: ExprId,
+ pub field_types: Box<[Ty<'tcx>]>,
+}
+
+/// A `match` arm.
+#[derive(Clone, Debug, HashStable)]
+pub struct Arm<'tcx> {
+ pub pattern: Pat<'tcx>,
+ pub guard: Option<Guard<'tcx>>,
+ pub body: ExprId,
+ pub lint_level: LintLevel,
+ pub scope: region::Scope,
+ pub span: Span,
+}
+
+/// A `match` guard.
+#[derive(Clone, Debug, HashStable)]
+pub enum Guard<'tcx> {
+ If(ExprId),
+ IfLet(Pat<'tcx>, ExprId),
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum LogicalOp {
+ /// The `&&` operator.
+ And,
+ /// The `||` operator.
+ Or,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub enum InlineAsmOperand<'tcx> {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ expr: ExprId,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ expr: Option<ExprId>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ expr: ExprId,
+ },
+ SplitInOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_expr: ExprId,
+ out_expr: Option<ExprId>,
+ },
+ Const {
+ value: mir::ConstantKind<'tcx>,
+ span: Span,
+ },
+ SymFn {
+ value: mir::ConstantKind<'tcx>,
+ span: Span,
+ },
+ SymStatic {
+ def_id: DefId,
+ },
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, HashStable)]
+pub enum BindingMode {
+ ByValue,
+ ByRef(BorrowKind),
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct FieldPat<'tcx> {
+ pub field: Field,
+ pub pattern: Pat<'tcx>,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Pat<'tcx> {
+ pub ty: Ty<'tcx>,
+ pub span: Span,
+ pub kind: Box<PatKind<'tcx>>,
+}
+
+impl<'tcx> Pat<'tcx> {
+ pub fn wildcard_from_ty(ty: Ty<'tcx>) -> Self {
+ Pat { ty, span: DUMMY_SP, kind: Box::new(PatKind::Wild) }
+ }
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Ascription<'tcx> {
+ pub annotation: CanonicalUserTypeAnnotation<'tcx>,
+ /// Variance to use when relating the `user_ty` to the **type of the value being
+ /// matched**. Typically, this is `Variance::Covariant`, since the value being matched must
+ /// have a type that is some subtype of the ascribed type.
+ ///
+ /// Note that this variance does not apply for any bindings within subpatterns. The type
+ /// assigned to those bindings must be exactly equal to the `user_ty` given here.
+ ///
+ /// The only place where this field is not `Covariant` is when matching constants, where
+ /// we currently use `Contravariant` -- this is because the constant type just needs to
+ /// be "comparable" to the type of the input value. So, for example:
+ ///
+ /// ```text
+ /// match x { "foo" => .. }
+ /// ```
+ ///
+ /// requires that `&'static str <: T_x`, where `T_x` is the type of `x`. Really, we should
+ /// probably be checking for a `PartialEq` impl instead, but this preserves the behavior
+ /// of the old type-check for now. See #57280 for details.
+ pub variance: ty::Variance,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub enum PatKind<'tcx> {
+ /// A wildcard pattern: `_`.
+ Wild,
+
+ AscribeUserType {
+ ascription: Ascription<'tcx>,
+ subpattern: Pat<'tcx>,
+ },
+
+ /// `x`, `ref x`, `x @ P`, etc.
+ Binding {
+ mutability: Mutability,
+ name: Symbol,
+ mode: BindingMode,
+ var: LocalVarId,
+ ty: Ty<'tcx>,
+ subpattern: Option<Pat<'tcx>>,
+ /// Is this the leftmost occurrence of the binding, i.e., is `var` the
+ /// `HirId` of this pattern?
+ is_primary: bool,
+ },
+
+ /// `Foo(...)` or `Foo{...}` or `Foo`, where `Foo` is a variant name from an ADT with
+ /// multiple variants.
+ Variant {
+ adt_def: AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ variant_index: VariantIdx,
+ subpatterns: Vec<FieldPat<'tcx>>,
+ },
+
+ /// `(...)`, `Foo(...)`, `Foo{...}`, or `Foo`, where `Foo` is a variant name from an ADT with
+ /// a single variant.
+ Leaf {
+ subpatterns: Vec<FieldPat<'tcx>>,
+ },
+
+ /// `box P`, `&P`, `&mut P`, etc.
+ Deref {
+ subpattern: Pat<'tcx>,
+ },
+
+ /// One of the following:
+ /// * `&str`, which will be handled as a string pattern and thus exhaustiveness
+ /// checking will detect if you use the same string twice in different patterns.
+ /// * integer, bool, char or float, which will be handled by exhaustiveness to cover exactly
+ /// its own value, similar to `&str`, but these values are much simpler.
+ /// * Opaque constants, that must not be matched structurally. So anything that does not derive
+ /// `PartialEq` and `Eq`.
+ Constant {
+ value: mir::ConstantKind<'tcx>,
+ },
+
+ Range(PatRange<'tcx>),
+
+ /// Matches against a slice, checking the length and extracting elements.
+ /// irrefutable when there is a slice pattern and both `prefix` and `suffix` are empty.
+ /// e.g., `&[ref xs @ ..]`.
+ Slice {
+ prefix: Vec<Pat<'tcx>>,
+ slice: Option<Pat<'tcx>>,
+ suffix: Vec<Pat<'tcx>>,
+ },
+
+ /// Fixed match against an array; irrefutable.
+ Array {
+ prefix: Vec<Pat<'tcx>>,
+ slice: Option<Pat<'tcx>>,
+ suffix: Vec<Pat<'tcx>>,
+ },
+
+ /// An or-pattern, e.g. `p | q`.
+ /// Invariant: `pats.len() >= 2`.
+ Or {
+ pats: Vec<Pat<'tcx>>,
+ },
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, HashStable)]
+pub struct PatRange<'tcx> {
+ pub lo: mir::ConstantKind<'tcx>,
+ pub hi: mir::ConstantKind<'tcx>,
+ pub end: RangeEnd,
+}
+
+impl<'tcx> fmt::Display for Pat<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Printing lists is a chore.
+ let mut first = true;
+ let mut start_or_continue = |s| {
+ if first {
+ first = false;
+ ""
+ } else {
+ s
+ }
+ };
+ let mut start_or_comma = || start_or_continue(", ");
+
+ match *self.kind {
+ PatKind::Wild => write!(f, "_"),
+ PatKind::AscribeUserType { ref subpattern, .. } => write!(f, "{}: _", subpattern),
+ PatKind::Binding { mutability, name, mode, ref subpattern, .. } => {
+ let is_mut = match mode {
+ BindingMode::ByValue => mutability == Mutability::Mut,
+ BindingMode::ByRef(bk) => {
+ write!(f, "ref ")?;
+ matches!(bk, BorrowKind::Mut { .. })
+ }
+ };
+ if is_mut {
+ write!(f, "mut ")?;
+ }
+ write!(f, "{}", name)?;
+ if let Some(ref subpattern) = *subpattern {
+ write!(f, " @ {}", subpattern)?;
+ }
+ Ok(())
+ }
+ PatKind::Variant { ref subpatterns, .. } | PatKind::Leaf { ref subpatterns } => {
+ let variant = match *self.kind {
+ PatKind::Variant { adt_def, variant_index, .. } => {
+ Some(adt_def.variant(variant_index))
+ }
+ _ => self.ty.ty_adt_def().and_then(|adt| {
+ if !adt.is_enum() { Some(adt.non_enum_variant()) } else { None }
+ }),
+ };
+
+ if let Some(variant) = variant {
+ write!(f, "{}", variant.name)?;
+
+ // Only for Adt we can have `S {...}`,
+ // which we handle separately here.
+ if variant.ctor_kind == CtorKind::Fictive {
+ write!(f, " {{ ")?;
+
+ let mut printed = 0;
+ for p in subpatterns {
+ if let PatKind::Wild = *p.pattern.kind {
+ continue;
+ }
+ let name = variant.fields[p.field.index()].name;
+ write!(f, "{}{}: {}", start_or_comma(), name, p.pattern)?;
+ printed += 1;
+ }
+
+ if printed < variant.fields.len() {
+ write!(f, "{}..", start_or_comma())?;
+ }
+
+ return write!(f, " }}");
+ }
+ }
+
+ let num_fields = variant.map_or(subpatterns.len(), |v| v.fields.len());
+ if num_fields != 0 || variant.is_none() {
+ write!(f, "(")?;
+ for i in 0..num_fields {
+ write!(f, "{}", start_or_comma())?;
+
+ // Common case: the field is where we expect it.
+ if let Some(p) = subpatterns.get(i) {
+ if p.field.index() == i {
+ write!(f, "{}", p.pattern)?;
+ continue;
+ }
+ }
+
+ // Otherwise, we have to go looking for it.
+ if let Some(p) = subpatterns.iter().find(|p| p.field.index() == i) {
+ write!(f, "{}", p.pattern)?;
+ } else {
+ write!(f, "_")?;
+ }
+ }
+ write!(f, ")")?;
+ }
+
+ Ok(())
+ }
+ PatKind::Deref { ref subpattern } => {
+ match self.ty.kind() {
+ ty::Adt(def, _) if def.is_box() => write!(f, "box ")?,
+ ty::Ref(_, _, mutbl) => {
+ write!(f, "&{}", mutbl.prefix_str())?;
+ }
+ _ => bug!("{} is a bad Deref pattern type", self.ty),
+ }
+ write!(f, "{}", subpattern)
+ }
+ PatKind::Constant { value } => write!(f, "{}", value),
+ PatKind::Range(PatRange { lo, hi, end }) => {
+ write!(f, "{}", lo)?;
+ write!(f, "{}", end)?;
+ write!(f, "{}", hi)
+ }
+ PatKind::Slice { ref prefix, ref slice, ref suffix }
+ | PatKind::Array { ref prefix, ref slice, ref suffix } => {
+ write!(f, "[")?;
+ for p in prefix {
+ write!(f, "{}{}", start_or_comma(), p)?;
+ }
+ if let Some(ref slice) = *slice {
+ write!(f, "{}", start_or_comma())?;
+ match *slice.kind {
+ PatKind::Wild => {}
+ _ => write!(f, "{}", slice)?,
+ }
+ write!(f, "..")?;
+ }
+ for p in suffix {
+ write!(f, "{}{}", start_or_comma(), p)?;
+ }
+ write!(f, "]")
+ }
+ PatKind::Or { ref pats } => {
+ for pat in pats {
+ write!(f, "{}{}", start_or_continue(" | "), pat)?;
+ }
+ Ok(())
+ }
+ }
+ }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ rustc_data_structures::static_assert_size!(Block, 56);
+ rustc_data_structures::static_assert_size!(Expr<'_>, 104);
+ rustc_data_structures::static_assert_size!(Pat<'_>, 24);
+ rustc_data_structures::static_assert_size!(Stmt<'_>, 120);
+}
diff --git a/compiler/rustc_middle/src/thir/visit.rs b/compiler/rustc_middle/src/thir/visit.rs
new file mode 100644
index 000000000..97249fdd1
--- /dev/null
+++ b/compiler/rustc_middle/src/thir/visit.rs
@@ -0,0 +1,244 @@
+use super::{
+ Arm, Block, Expr, ExprKind, Guard, InlineAsmOperand, Pat, PatKind, Stmt, StmtKind, Thir,
+};
+
+pub trait Visitor<'a, 'tcx: 'a>: Sized {
+ fn thir(&self) -> &'a Thir<'tcx>;
+
+ fn visit_expr(&mut self, expr: &Expr<'tcx>) {
+ walk_expr(self, expr);
+ }
+
+ fn visit_stmt(&mut self, stmt: &Stmt<'tcx>) {
+ walk_stmt(self, stmt);
+ }
+
+ fn visit_block(&mut self, block: &Block) {
+ walk_block(self, block);
+ }
+
+ fn visit_arm(&mut self, arm: &Arm<'tcx>) {
+ walk_arm(self, arm);
+ }
+
+ fn visit_pat(&mut self, pat: &Pat<'tcx>) {
+ walk_pat(self, pat);
+ }
+
+ // Note: We don't have visitors for `ty::Const` and `mir::ConstantKind`
+ // (even though these types occur in THIR) for consistency and to reduce confusion,
+ // since the lazy creation of constants during thir construction causes most
+ // 'constants' to not be of type `ty::Const` or `mir::ConstantKind` at that
+ // stage (they are mostly still identified by `DefId` or `hir::Lit`, see
+ // the variants `Literal`, `NonHirLiteral` and `NamedConst` in `thir::ExprKind`).
+ // You have to manually visit `ty::Const` and `mir::ConstantKind` through the
+ // other `visit*` functions.
+}
+
+pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Expr<'tcx>) {
+ use ExprKind::*;
+ match expr.kind {
+ Scope { value, region_scope: _, lint_level: _ } => {
+ visitor.visit_expr(&visitor.thir()[value])
+ }
+ Box { value } => visitor.visit_expr(&visitor.thir()[value]),
+ If { cond, then, else_opt, if_then_scope: _ } => {
+ visitor.visit_expr(&visitor.thir()[cond]);
+ visitor.visit_expr(&visitor.thir()[then]);
+ if let Some(else_expr) = else_opt {
+ visitor.visit_expr(&visitor.thir()[else_expr]);
+ }
+ }
+ Call { fun, ref args, ty: _, from_hir_call: _, fn_span: _ } => {
+ visitor.visit_expr(&visitor.thir()[fun]);
+ for &arg in &**args {
+ visitor.visit_expr(&visitor.thir()[arg]);
+ }
+ }
+ Deref { arg } => visitor.visit_expr(&visitor.thir()[arg]),
+ Binary { lhs, rhs, op: _ } | LogicalOp { lhs, rhs, op: _ } => {
+ visitor.visit_expr(&visitor.thir()[lhs]);
+ visitor.visit_expr(&visitor.thir()[rhs]);
+ }
+ Unary { arg, op: _ } => visitor.visit_expr(&visitor.thir()[arg]),
+ Cast { source } => visitor.visit_expr(&visitor.thir()[source]),
+ Use { source } => visitor.visit_expr(&visitor.thir()[source]),
+ NeverToAny { source } => visitor.visit_expr(&visitor.thir()[source]),
+ Pointer { source, cast: _ } => visitor.visit_expr(&visitor.thir()[source]),
+ Let { expr, .. } => {
+ visitor.visit_expr(&visitor.thir()[expr]);
+ }
+ Loop { body } => visitor.visit_expr(&visitor.thir()[body]),
+ Match { scrutinee, ref arms } => {
+ visitor.visit_expr(&visitor.thir()[scrutinee]);
+ for &arm in &**arms {
+ visitor.visit_arm(&visitor.thir()[arm]);
+ }
+ }
+ Block { ref body } => visitor.visit_block(body),
+ Assign { lhs, rhs } | AssignOp { lhs, rhs, op: _ } => {
+ visitor.visit_expr(&visitor.thir()[lhs]);
+ visitor.visit_expr(&visitor.thir()[rhs]);
+ }
+ Field { lhs, variant_index: _, name: _ } => visitor.visit_expr(&visitor.thir()[lhs]),
+ Index { lhs, index } => {
+ visitor.visit_expr(&visitor.thir()[lhs]);
+ visitor.visit_expr(&visitor.thir()[index]);
+ }
+ VarRef { id: _ } | UpvarRef { closure_def_id: _, var_hir_id: _ } => {}
+ Borrow { arg, borrow_kind: _ } => visitor.visit_expr(&visitor.thir()[arg]),
+ AddressOf { arg, mutability: _ } => visitor.visit_expr(&visitor.thir()[arg]),
+ Break { value, label: _ } => {
+ if let Some(value) = value {
+ visitor.visit_expr(&visitor.thir()[value])
+ }
+ }
+ Continue { label: _ } => {}
+ Return { value } => {
+ if let Some(value) = value {
+ visitor.visit_expr(&visitor.thir()[value])
+ }
+ }
+ ConstBlock { did: _, substs: _ } => {}
+ Repeat { value, count: _ } => {
+ visitor.visit_expr(&visitor.thir()[value]);
+ }
+ Array { ref fields } | Tuple { ref fields } => {
+ for &field in &**fields {
+ visitor.visit_expr(&visitor.thir()[field]);
+ }
+ }
+ Adt(box crate::thir::Adt {
+ ref fields,
+ ref base,
+ adt_def: _,
+ variant_index: _,
+ substs: _,
+ user_ty: _,
+ }) => {
+ for field in &**fields {
+ visitor.visit_expr(&visitor.thir()[field.expr]);
+ }
+ if let Some(base) = base {
+ visitor.visit_expr(&visitor.thir()[base.base]);
+ }
+ }
+ PlaceTypeAscription { source, user_ty: _ } | ValueTypeAscription { source, user_ty: _ } => {
+ visitor.visit_expr(&visitor.thir()[source])
+ }
+ Closure { closure_id: _, substs: _, upvars: _, movability: _, fake_reads: _ } => {}
+ Literal { lit: _, neg: _ } => {}
+ NonHirLiteral { lit: _, user_ty: _ } => {}
+ ZstLiteral { user_ty: _ } => {}
+ NamedConst { def_id: _, substs: _, user_ty: _ } => {}
+ ConstParam { param: _, def_id: _ } => {}
+ StaticRef { alloc_id: _, ty: _, def_id: _ } => {}
+ InlineAsm { ref operands, template: _, options: _, line_spans: _ } => {
+ for op in &**operands {
+ use InlineAsmOperand::*;
+ match op {
+ In { expr, reg: _ }
+ | Out { expr: Some(expr), reg: _, late: _ }
+ | InOut { expr, reg: _, late: _ } => visitor.visit_expr(&visitor.thir()[*expr]),
+ SplitInOut { in_expr, out_expr, reg: _, late: _ } => {
+ visitor.visit_expr(&visitor.thir()[*in_expr]);
+ if let Some(out_expr) = out_expr {
+ visitor.visit_expr(&visitor.thir()[*out_expr]);
+ }
+ }
+ Out { expr: None, reg: _, late: _ }
+ | Const { value: _, span: _ }
+ | SymFn { value: _, span: _ }
+ | SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+ ThreadLocalRef(_) => {}
+ Yield { value } => visitor.visit_expr(&visitor.thir()[value]),
+ }
+}
+
+pub fn walk_stmt<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, stmt: &Stmt<'tcx>) {
+ match &stmt.kind {
+ StmtKind::Expr { expr, scope: _ } => visitor.visit_expr(&visitor.thir()[*expr]),
+ StmtKind::Let {
+ initializer,
+ remainder_scope: _,
+ init_scope: _,
+ ref pattern,
+ lint_level: _,
+ else_block,
+ } => {
+ if let Some(init) = initializer {
+ visitor.visit_expr(&visitor.thir()[*init]);
+ }
+ visitor.visit_pat(pattern);
+ if let Some(block) = else_block {
+ visitor.visit_block(block)
+ }
+ }
+ }
+}
+
+pub fn walk_block<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, block: &Block) {
+ for &stmt in &*block.stmts {
+ visitor.visit_stmt(&visitor.thir()[stmt]);
+ }
+ if let Some(expr) = block.expr {
+ visitor.visit_expr(&visitor.thir()[expr]);
+ }
+}
+
+pub fn walk_arm<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, arm: &Arm<'tcx>) {
+ match arm.guard {
+ Some(Guard::If(expr)) => visitor.visit_expr(&visitor.thir()[expr]),
+ Some(Guard::IfLet(ref pat, expr)) => {
+ visitor.visit_pat(pat);
+ visitor.visit_expr(&visitor.thir()[expr]);
+ }
+ None => {}
+ }
+ visitor.visit_pat(&arm.pattern);
+ visitor.visit_expr(&visitor.thir()[arm.body]);
+}
+
+pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<'tcx>) {
+ use PatKind::*;
+ match pat.kind.as_ref() {
+ AscribeUserType { subpattern, ascription: _ }
+ | Deref { subpattern }
+ | Binding {
+ subpattern: Some(subpattern),
+ mutability: _,
+ mode: _,
+ var: _,
+ ty: _,
+ is_primary: _,
+ name: _,
+ } => visitor.visit_pat(&subpattern),
+ Binding { .. } | Wild => {}
+ Variant { subpatterns, adt_def: _, substs: _, variant_index: _ } | Leaf { subpatterns } => {
+ for subpattern in subpatterns {
+ visitor.visit_pat(&subpattern.pattern);
+ }
+ }
+ Constant { value: _ } => {}
+ Range(_) => {}
+ Slice { prefix, slice, suffix } | Array { prefix, slice, suffix } => {
+ for subpattern in prefix {
+ visitor.visit_pat(&subpattern);
+ }
+ if let Some(pat) = slice {
+ visitor.visit_pat(pat);
+ }
+ for subpattern in suffix {
+ visitor.visit_pat(&subpattern);
+ }
+ }
+ Or { pats } => {
+ for pat in pats {
+ visitor.visit_pat(&pat);
+ }
+ }
+ };
+}
diff --git a/compiler/rustc_middle/src/traits/chalk.rs b/compiler/rustc_middle/src/traits/chalk.rs
new file mode 100644
index 000000000..6d4af8bea
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/chalk.rs
@@ -0,0 +1,403 @@
+//! Types required for Chalk-related queries
+//!
+//! The primary purpose of this file is defining an implementation for the
+//! `chalk_ir::interner::Interner` trait. The primary purpose of this trait, as
+//! its name suggest, is to provide an abstraction boundary for creating
+//! interned Chalk types.
+
+use rustc_middle::ty::{self, AdtDef, TyCtxt};
+
+use rustc_hir::def_id::DefId;
+use rustc_target::spec::abi::Abi;
+
+use std::cmp::Ordering;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+
+#[derive(Copy, Clone)]
+pub struct RustInterner<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Hash for RustInterner<'tcx> {
+ fn hash<H: Hasher>(&self, _state: &mut H) {}
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Ord for RustInterner<'tcx> {
+ fn cmp(&self, _other: &Self) -> Ordering {
+ Ordering::Equal
+ }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> PartialOrd for RustInterner<'tcx> {
+ fn partial_cmp(&self, _other: &Self) -> Option<Ordering> {
+ None
+ }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> PartialEq for RustInterner<'tcx> {
+ fn eq(&self, _other: &Self) -> bool {
+ false
+ }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Eq for RustInterner<'tcx> {}
+
+impl fmt::Debug for RustInterner<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "RustInterner")
+ }
+}
+
+// Right now, there is no interning at all. I was running into problems with
+// adding interning in `ty/context.rs` for Chalk types with
+// `parallel-compiler = true`. -jackh726
+impl<'tcx> chalk_ir::interner::Interner for RustInterner<'tcx> {
+ type InternedType = Box<chalk_ir::TyData<Self>>;
+ type InternedLifetime = Box<chalk_ir::LifetimeData<Self>>;
+ type InternedConst = Box<chalk_ir::ConstData<Self>>;
+ type InternedConcreteConst = ty::ValTree<'tcx>;
+ type InternedGenericArg = Box<chalk_ir::GenericArgData<Self>>;
+ type InternedGoal = Box<chalk_ir::GoalData<Self>>;
+ type InternedGoals = Vec<chalk_ir::Goal<Self>>;
+ type InternedSubstitution = Vec<chalk_ir::GenericArg<Self>>;
+ type InternedProgramClause = Box<chalk_ir::ProgramClauseData<Self>>;
+ type InternedProgramClauses = Vec<chalk_ir::ProgramClause<Self>>;
+ type InternedQuantifiedWhereClauses = Vec<chalk_ir::QuantifiedWhereClause<Self>>;
+ type InternedVariableKinds = Vec<chalk_ir::VariableKind<Self>>;
+ type InternedCanonicalVarKinds = Vec<chalk_ir::CanonicalVarKind<Self>>;
+ type InternedVariances = Vec<chalk_ir::Variance>;
+ type InternedConstraints = Vec<chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>>;
+ type DefId = DefId;
+ type InternedAdtId = AdtDef<'tcx>;
+ type Identifier = ();
+ type FnAbi = Abi;
+
+ fn debug_program_clause_implication(
+ pci: &chalk_ir::ProgramClauseImplication<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ let mut write = || {
+ write!(fmt, "{:?}", pci.consequence)?;
+
+ let conditions = pci.conditions.interned();
+ let constraints = pci.constraints.interned();
+
+ let conds = conditions.len();
+ let consts = constraints.len();
+ if conds == 0 && consts == 0 {
+ return Ok(());
+ }
+
+ write!(fmt, " :- ")?;
+
+ if conds != 0 {
+ for cond in &conditions[..conds - 1] {
+ write!(fmt, "{:?}, ", cond)?;
+ }
+ write!(fmt, "{:?}", conditions[conds - 1])?;
+ }
+
+ if conds != 0 && consts != 0 {
+ write!(fmt, " ; ")?;
+ }
+
+ if consts != 0 {
+ for constraint in &constraints[..consts - 1] {
+ write!(fmt, "{:?}, ", constraint)?;
+ }
+ write!(fmt, "{:?}", constraints[consts - 1])?;
+ }
+
+ Ok(())
+ };
+ Some(write())
+ }
+
+ fn debug_substitution(
+ substitution: &chalk_ir::Substitution<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", substitution.interned()))
+ }
+
+ fn debug_separator_trait_ref(
+ separator_trait_ref: &chalk_ir::SeparatorTraitRef<'_, Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ let substitution = &separator_trait_ref.trait_ref.substitution;
+ let parameters = substitution.interned();
+ Some(write!(
+ fmt,
+ "{:?}{}{:?}{:?}",
+ parameters[0],
+ separator_trait_ref.separator,
+ separator_trait_ref.trait_ref.trait_id,
+ chalk_ir::debug::Angle(&parameters[1..])
+ ))
+ }
+
+ fn debug_quantified_where_clauses(
+ clauses: &chalk_ir::QuantifiedWhereClauses<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", clauses.interned()))
+ }
+
+ fn debug_ty(ty: &chalk_ir::Ty<Self>, fmt: &mut fmt::Formatter<'_>) -> Option<fmt::Result> {
+ match &ty.interned().kind {
+ chalk_ir::TyKind::Ref(chalk_ir::Mutability::Not, lifetime, ty) => {
+ Some(write!(fmt, "(&{:?} {:?})", lifetime, ty))
+ }
+ chalk_ir::TyKind::Ref(chalk_ir::Mutability::Mut, lifetime, ty) => {
+ Some(write!(fmt, "(&{:?} mut {:?})", lifetime, ty))
+ }
+ chalk_ir::TyKind::Array(ty, len) => Some(write!(fmt, "[{:?}; {:?}]", ty, len)),
+ chalk_ir::TyKind::Slice(ty) => Some(write!(fmt, "[{:?}]", ty)),
+ chalk_ir::TyKind::Tuple(len, substs) => Some((|| {
+ write!(fmt, "(")?;
+ for (idx, substitution) in substs.interned().iter().enumerate() {
+ if idx == *len && *len != 1 {
+ // Don't add a trailing comma if the tuple has more than one element
+ write!(fmt, "{:?}", substitution)?;
+ } else {
+ write!(fmt, "{:?},", substitution)?;
+ }
+ }
+ write!(fmt, ")")
+ })()),
+ _ => None,
+ }
+ }
+
+ fn debug_alias(
+ alias_ty: &chalk_ir::AliasTy<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ match alias_ty {
+ chalk_ir::AliasTy::Projection(projection_ty) => {
+ Self::debug_projection_ty(projection_ty, fmt)
+ }
+ chalk_ir::AliasTy::Opaque(opaque_ty) => Self::debug_opaque_ty(opaque_ty, fmt),
+ }
+ }
+
+ fn debug_projection_ty(
+ projection_ty: &chalk_ir::ProjectionTy<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(
+ fmt,
+ "projection: {:?} {:?}",
+ projection_ty.associated_ty_id, projection_ty.substitution,
+ ))
+ }
+
+ fn debug_opaque_ty(
+ opaque_ty: &chalk_ir::OpaqueTy<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", opaque_ty.opaque_ty_id))
+ }
+
+ fn intern_ty(self, ty: chalk_ir::TyKind<Self>) -> Self::InternedType {
+ let flags = ty.compute_flags(self);
+ Box::new(chalk_ir::TyData { kind: ty, flags: flags })
+ }
+
+ fn ty_data<'a>(self, ty: &'a Self::InternedType) -> &'a chalk_ir::TyData<Self> {
+ ty
+ }
+
+ fn intern_lifetime(self, lifetime: chalk_ir::LifetimeData<Self>) -> Self::InternedLifetime {
+ Box::new(lifetime)
+ }
+
+ fn lifetime_data<'a>(
+ self,
+ lifetime: &'a Self::InternedLifetime,
+ ) -> &'a chalk_ir::LifetimeData<Self> {
+ &lifetime
+ }
+
+ fn intern_const(self, constant: chalk_ir::ConstData<Self>) -> Self::InternedConst {
+ Box::new(constant)
+ }
+
+ fn const_data<'a>(self, constant: &'a Self::InternedConst) -> &'a chalk_ir::ConstData<Self> {
+ &constant
+ }
+
+ fn const_eq(
+ self,
+ _ty: &Self::InternedType,
+ c1: &Self::InternedConcreteConst,
+ c2: &Self::InternedConcreteConst,
+ ) -> bool {
+ c1 == c2
+ }
+
+ fn intern_generic_arg(self, data: chalk_ir::GenericArgData<Self>) -> Self::InternedGenericArg {
+ Box::new(data)
+ }
+
+ fn generic_arg_data<'a>(
+ self,
+ data: &'a Self::InternedGenericArg,
+ ) -> &'a chalk_ir::GenericArgData<Self> {
+ &data
+ }
+
+ fn intern_goal(self, goal: chalk_ir::GoalData<Self>) -> Self::InternedGoal {
+ Box::new(goal)
+ }
+
+ fn goal_data<'a>(self, goal: &'a Self::InternedGoal) -> &'a chalk_ir::GoalData<Self> {
+ &goal
+ }
+
+ fn intern_goals<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::Goal<Self>, E>>,
+ ) -> Result<Self::InternedGoals, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn goals_data<'a>(self, goals: &'a Self::InternedGoals) -> &'a [chalk_ir::Goal<Self>] {
+ goals
+ }
+
+ fn intern_substitution<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::GenericArg<Self>, E>>,
+ ) -> Result<Self::InternedSubstitution, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn substitution_data<'a>(
+ self,
+ substitution: &'a Self::InternedSubstitution,
+ ) -> &'a [chalk_ir::GenericArg<Self>] {
+ substitution
+ }
+
+ fn intern_program_clause(
+ self,
+ data: chalk_ir::ProgramClauseData<Self>,
+ ) -> Self::InternedProgramClause {
+ Box::new(data)
+ }
+
+ fn program_clause_data<'a>(
+ self,
+ clause: &'a Self::InternedProgramClause,
+ ) -> &'a chalk_ir::ProgramClauseData<Self> {
+ &clause
+ }
+
+ fn intern_program_clauses<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::ProgramClause<Self>, E>>,
+ ) -> Result<Self::InternedProgramClauses, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn program_clauses_data<'a>(
+ self,
+ clauses: &'a Self::InternedProgramClauses,
+ ) -> &'a [chalk_ir::ProgramClause<Self>] {
+ clauses
+ }
+
+ fn intern_quantified_where_clauses<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::QuantifiedWhereClause<Self>, E>>,
+ ) -> Result<Self::InternedQuantifiedWhereClauses, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn quantified_where_clauses_data<'a>(
+ self,
+ clauses: &'a Self::InternedQuantifiedWhereClauses,
+ ) -> &'a [chalk_ir::QuantifiedWhereClause<Self>] {
+ clauses
+ }
+
+ fn intern_generic_arg_kinds<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::VariableKind<Self>, E>>,
+ ) -> Result<Self::InternedVariableKinds, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn variable_kinds_data<'a>(
+ self,
+ parameter_kinds: &'a Self::InternedVariableKinds,
+ ) -> &'a [chalk_ir::VariableKind<Self>] {
+ parameter_kinds
+ }
+
+ fn intern_canonical_var_kinds<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::CanonicalVarKind<Self>, E>>,
+ ) -> Result<Self::InternedCanonicalVarKinds, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn canonical_var_kinds_data<'a>(
+ self,
+ canonical_var_kinds: &'a Self::InternedCanonicalVarKinds,
+ ) -> &'a [chalk_ir::CanonicalVarKind<Self>] {
+ canonical_var_kinds
+ }
+
+ fn intern_constraints<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>, E>>,
+ ) -> Result<Self::InternedConstraints, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn constraints_data<'a>(
+ self,
+ constraints: &'a Self::InternedConstraints,
+ ) -> &'a [chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>] {
+ constraints
+ }
+
+ fn intern_variances<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::Variance, E>>,
+ ) -> Result<Self::InternedVariances, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn variances_data<'a>(
+ self,
+ variances: &'a Self::InternedVariances,
+ ) -> &'a [chalk_ir::Variance] {
+ variances
+ }
+}
+
+impl<'tcx> chalk_ir::interner::HasInterner for RustInterner<'tcx> {
+ type Interner = Self;
+}
+
+/// A chalk environment and goal.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub struct ChalkEnvironmentAndGoal<'tcx> {
+ pub environment: &'tcx ty::List<ty::Predicate<'tcx>>,
+ pub goal: ty::Predicate<'tcx>,
+}
+
+impl<'tcx> fmt::Display for ChalkEnvironmentAndGoal<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "environment: {:?}, goal: {}", self.environment, self.goal)
+ }
+}
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
new file mode 100644
index 000000000..72b848c3e
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -0,0 +1,1026 @@
+//! Trait Resolution. See the [rustc dev guide] for more information on how this works.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html
+
+mod chalk;
+pub mod query;
+pub mod select;
+pub mod specialization_graph;
+mod structural_impls;
+pub mod util;
+
+use crate::infer::canonical::Canonical;
+use crate::ty::abstract_const::NotConstEvaluatable;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, AdtKind, Predicate, Ty, TyCtxt};
+
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use smallvec::SmallVec;
+
+use std::borrow::Cow;
+use std::hash::{Hash, Hasher};
+
+pub use self::select::{EvaluationCache, EvaluationResult, OverflowError, SelectionCache};
+
+pub type CanonicalChalkEnvironmentAndGoal<'tcx> = Canonical<'tcx, ChalkEnvironmentAndGoal<'tcx>>;
+
+pub use self::ObligationCauseCode::*;
+
+pub use self::chalk::{ChalkEnvironmentAndGoal, RustInterner as ChalkRustInterner};
+
+/// Depending on the stage of compilation, we want projection to be
+/// more or less conservative.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, HashStable)]
+pub enum Reveal {
+ /// At type-checking time, we refuse to project any associated
+ /// type that is marked `default`. Non-`default` ("final") types
+ /// are always projected. This is necessary in general for
+ /// soundness of specialization. However, we *could* allow
+ /// projections in fully-monomorphic cases. We choose not to,
+ /// because we prefer for `default type` to force the type
+ /// definition to be treated abstractly by any consumers of the
+ /// impl. Concretely, that means that the following example will
+ /// fail to compile:
+ ///
+ /// ```compile_fail,E0308
+ /// #![feature(specialization)]
+ /// trait Assoc {
+ /// type Output;
+ /// }
+ ///
+ /// impl<T> Assoc for T {
+ /// default type Output = bool;
+ /// }
+ ///
+ /// fn main() {
+ /// let x: <() as Assoc>::Output = true;
+ /// }
+ /// ```
+ ///
+ /// We also do not reveal the hidden type of opaque types during
+ /// type-checking.
+ UserFacing,
+
+ /// At codegen time, all monomorphic projections will succeed.
+ /// Also, `impl Trait` is normalized to the concrete type,
+ /// which has to be already collected by type-checking.
+ ///
+ /// NOTE: as `impl Trait`'s concrete type should *never*
+ /// be observable directly by the user, `Reveal::All`
+ /// should not be used by checks which may expose
+ /// type equality or type contents to the user.
+ /// There are some exceptions, e.g., around auto traits and
+ /// transmute-checking, which expose some details, but
+ /// not the whole concrete type of the `impl Trait`.
+ All,
+}
+
+/// The reason why we incurred this obligation; used for error reporting.
+///
+/// Non-misc `ObligationCauseCode`s are stored on the heap. This gives the
+/// best trade-off between keeping the type small (which makes copies cheaper)
+/// while not doing too many heap allocations.
+///
+/// We do not want to intern this as there are a lot of obligation causes which
+/// only live for a short period of time.
+#[derive(Clone, Debug, PartialEq, Eq, Lift)]
+pub struct ObligationCause<'tcx> {
+ pub span: Span,
+
+ /// The ID of the fn body that triggered this obligation. This is
+ /// used for region obligations to determine the precise
+ /// environment in which the region obligation should be evaluated
+ /// (in particular, closures can add new assumptions). See the
+ /// field `region_obligations` of the `FulfillmentContext` for more
+ /// information.
+ pub body_id: hir::HirId,
+
+ code: InternedObligationCauseCode<'tcx>,
+}
+
+// This custom hash function speeds up hashing for `Obligation` deduplication
+// greatly by skipping the `code` field, which can be large and complex. That
+// shouldn't affect hash quality much since there are several other fields in
+// `Obligation` which should be unique enough, especially the predicate itself
+// which is hashed as an interned pointer. See #90996.
+impl Hash for ObligationCause<'_> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.body_id.hash(state);
+ self.span.hash(state);
+ }
+}
+
+impl<'tcx> ObligationCause<'tcx> {
+ #[inline]
+ pub fn new(
+ span: Span,
+ body_id: hir::HirId,
+ code: ObligationCauseCode<'tcx>,
+ ) -> ObligationCause<'tcx> {
+ ObligationCause { span, body_id, code: code.into() }
+ }
+
+ pub fn misc(span: Span, body_id: hir::HirId) -> ObligationCause<'tcx> {
+ ObligationCause::new(span, body_id, MiscObligation)
+ }
+
+ #[inline(always)]
+ pub fn dummy() -> ObligationCause<'tcx> {
+ ObligationCause::dummy_with_span(DUMMY_SP)
+ }
+
+ #[inline(always)]
+ pub fn dummy_with_span(span: Span) -> ObligationCause<'tcx> {
+ ObligationCause { span, body_id: hir::CRATE_HIR_ID, code: Default::default() }
+ }
+
+ pub fn span(&self) -> Span {
+ match *self.code() {
+ ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
+ arm_span,
+ ..
+ }) => arm_span,
+ _ => self.span,
+ }
+ }
+
+ #[inline]
+ pub fn code(&self) -> &ObligationCauseCode<'tcx> {
+ &self.code
+ }
+
+ pub fn map_code(
+ &mut self,
+ f: impl FnOnce(InternedObligationCauseCode<'tcx>) -> ObligationCauseCode<'tcx>,
+ ) {
+ self.code = f(std::mem::take(&mut self.code)).into();
+ }
+
+ pub fn derived_cause(
+ mut self,
+ parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
+ variant: impl FnOnce(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
+ ) -> ObligationCause<'tcx> {
+ /*!
+ * Creates a cause for obligations that are derived from
+ * `obligation` by a recursive search (e.g., for a builtin
+ * bound, or eventually a `auto trait Foo`). If `obligation`
+ * is itself a derived obligation, this is just a clone, but
+ * otherwise we create a "derived obligation" cause so as to
+ * keep track of the original root obligation for error
+ * reporting.
+ */
+
+ // NOTE(flaper87): As of now, it keeps track of the whole error
+ // chain. Ideally, we should have a way to configure this either
+ // by using -Z verbose or just a CLI argument.
+ self.code =
+ variant(DerivedObligationCause { parent_trait_pred, parent_code: self.code }).into();
+ self
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct UnifyReceiverContext<'tcx> {
+ pub assoc_item: ty::AssocItem,
+ pub param_env: ty::ParamEnv<'tcx>,
+ pub substs: SubstsRef<'tcx>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift, Default)]
+pub struct InternedObligationCauseCode<'tcx> {
+ /// `None` for `ObligationCauseCode::MiscObligation` (a common case, occurs ~60% of
+ /// the time). `Some` otherwise.
+ code: Option<Lrc<ObligationCauseCode<'tcx>>>,
+}
+
+impl<'tcx> ObligationCauseCode<'tcx> {
+ #[inline(always)]
+ fn into(self) -> InternedObligationCauseCode<'tcx> {
+ InternedObligationCauseCode {
+ code: if let ObligationCauseCode::MiscObligation = self {
+ None
+ } else {
+ Some(Lrc::new(self))
+ },
+ }
+ }
+}
+
+impl<'tcx> std::ops::Deref for InternedObligationCauseCode<'tcx> {
+ type Target = ObligationCauseCode<'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ self.code.as_deref().unwrap_or(&ObligationCauseCode::MiscObligation)
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub enum ObligationCauseCode<'tcx> {
+ /// Not well classified or should be obvious from the span.
+ MiscObligation,
+
+ /// A slice or array is WF only if `T: Sized`.
+ SliceOrArrayElem,
+
+ /// A tuple is WF only if its middle elements are `Sized`.
+ TupleElem,
+
+ /// This is the trait reference from the given projection.
+ ProjectionWf(ty::ProjectionTy<'tcx>),
+
+ /// In an impl of trait `X` for type `Y`, type `Y` must
+ /// also implement all supertraits of `X`.
+ ItemObligation(DefId),
+
+ /// Like `ItemObligation`, but with extra detail on the source of the obligation.
+ BindingObligation(DefId, Span),
+
+ /// A type like `&'a T` is WF only if `T: 'a`.
+ ReferenceOutlivesReferent(Ty<'tcx>),
+
+ /// A type like `Box<Foo<'a> + 'b>` is WF only if `'b: 'a`.
+ ObjectTypeBound(Ty<'tcx>, ty::Region<'tcx>),
+
+ /// Obligation incurred due to an object cast.
+ ObjectCastObligation(/* Concrete type */ Ty<'tcx>, /* Object type */ Ty<'tcx>),
+
+ /// Obligation incurred due to a coercion.
+ Coercion {
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ },
+
+ /// Various cases where expressions must be `Sized` / `Copy` / etc.
+ /// `L = X` implies that `L` is `Sized`.
+ AssignmentLhsSized,
+ /// `(x1, .., xn)` must be `Sized`.
+ TupleInitializerSized,
+ /// `S { ... }` must be `Sized`.
+ StructInitializerSized,
+ /// Type of each variable must be `Sized`.
+ VariableType(hir::HirId),
+ /// Argument type must be `Sized`.
+ SizedArgumentType(Option<Span>),
+ /// Return type must be `Sized`.
+ SizedReturnType,
+ /// Yield type must be `Sized`.
+ SizedYieldType,
+ /// Box expression result type must be `Sized`.
+ SizedBoxType,
+ /// Inline asm operand type must be `Sized`.
+ InlineAsmSized,
+ /// `[expr; N]` requires `type_of(expr): Copy`.
+ RepeatElementCopy {
+ /// If element is a `const fn` we display a help message suggesting to move the
+ /// function call to a new `const` item while saying that `T` doesn't implement `Copy`.
+ is_const_fn: bool,
+ },
+
+ /// Types of fields (other than the last, except for packed structs) in a struct must be sized.
+ FieldSized {
+ adt_kind: AdtKind,
+ span: Span,
+ last: bool,
+ },
+
+ /// Constant expressions must be sized.
+ ConstSized,
+
+ /// `static` items must have `Sync` type.
+ SharedStatic,
+
+ BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
+
+ ImplDerivedObligation(Box<ImplDerivedObligationCause<'tcx>>),
+
+ DerivedObligation(DerivedObligationCause<'tcx>),
+
+ FunctionArgumentObligation {
+ /// The node of the relevant argument in the function call.
+ arg_hir_id: hir::HirId,
+ /// The node of the function call.
+ call_hir_id: hir::HirId,
+ /// The obligation introduced by this argument.
+ parent_code: InternedObligationCauseCode<'tcx>,
+ },
+
+ /// Error derived when matching traits/impls; see ObligationCause for more details
+ CompareImplItemObligation {
+ impl_item_def_id: LocalDefId,
+ trait_item_def_id: DefId,
+ kind: ty::AssocKind,
+ },
+
+ /// Checking that the bounds of a trait's associated type hold for a given impl
+ CheckAssociatedTypeBounds {
+ impl_item_def_id: LocalDefId,
+ trait_item_def_id: DefId,
+ },
+
+ /// Checking that this expression can be assigned to its target.
+ ExprAssignable,
+
+ /// Computing common supertype in the arms of a match expression
+ MatchExpressionArm(Box<MatchExpressionArmCause<'tcx>>),
+
+ /// Type error arising from type checking a pattern against an expected type.
+ Pattern {
+ /// The span of the scrutinee or type expression which caused the `root_ty` type.
+ span: Option<Span>,
+ /// The root expected type induced by a scrutinee or type expression.
+ root_ty: Ty<'tcx>,
+ /// Whether the `Span` came from an expression or a type expression.
+ origin_expr: bool,
+ },
+
+ /// Constants in patterns must have `Structural` type.
+ ConstPatternStructural,
+
+ /// Computing common supertype in an if expression
+ IfExpression(Box<IfExpressionCause<'tcx>>),
+
+ /// Computing common supertype of an if expression with no else counter-part
+ IfExpressionWithNoElse,
+
+ /// `main` has wrong type
+ MainFunctionType,
+
+ /// `start` has wrong type
+ StartFunctionType,
+
+ /// Intrinsic has wrong type
+ IntrinsicType,
+
+ /// A let else block does not diverge
+ LetElse,
+
+ /// Method receiver
+ MethodReceiver,
+
+ UnifyReceiver(Box<UnifyReceiverContext<'tcx>>),
+
+ /// `return` with no expression
+ ReturnNoExpression,
+
+ /// `return` with an expression
+ ReturnValue(hir::HirId),
+
+ /// Return type of this function
+ ReturnType,
+
+ /// Opaque return type of this function
+ OpaqueReturnType(Option<(Ty<'tcx>, Span)>),
+
+ /// Block implicit return
+ BlockTailExpression(hir::HirId),
+
+ /// #[feature(trivial_bounds)] is not enabled
+ TrivialBound,
+
+ /// If `X` is the concrete type of an opaque type `impl Y`, then `X` must implement `Y`
+ OpaqueType,
+
+ AwaitableExpr(Option<hir::HirId>),
+
+ ForLoopIterator,
+
+ QuestionMark,
+
+ /// Well-formed checking. If a `WellFormedLoc` is provided,
+ /// then it will be used to perform HIR-based wf checking
+ /// after an error occurs, in order to generate a more precise error span.
+ /// This is purely for diagnostic purposes - it is always
+ /// correct to use `MiscObligation` instead, or to specify
+ /// `WellFormed(None)`
+ WellFormed(Option<WellFormedLoc>),
+
+ /// From `match_impl`. The cause for us having to match an impl, and the DefId we are matching against.
+ MatchImpl(ObligationCause<'tcx>, DefId),
+
+ BinOp {
+ rhs_span: Option<Span>,
+ is_lit: bool,
+ output_pred: Option<Predicate<'tcx>>,
+ },
+}
+
+/// The 'location' at which we try to perform HIR-based wf checking.
+/// This information is used to obtain an `hir::Ty`, which
+/// we can walk in order to obtain precise spans for any
+/// 'nested' types (e.g. `Foo` in `Option<Foo>`).
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
+pub enum WellFormedLoc {
+ /// Use the type of the provided definition.
+ Ty(LocalDefId),
+ /// Use the type of the parameter of the provided function.
+ /// We cannot use `hir::Param`, since the function may
+ /// not have a body (e.g. a trait method definition)
+ Param {
+ /// The function to lookup the parameter in
+ function: LocalDefId,
+ /// The index of the parameter to use.
+ /// Parameters are indexed from 0, with the return type
+ /// being the last 'parameter'
+ param_idx: u16,
+ },
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct ImplDerivedObligationCause<'tcx> {
+ pub derived: DerivedObligationCause<'tcx>,
+ pub impl_def_id: DefId,
+ pub span: Span,
+}
+
+impl<'tcx> ObligationCauseCode<'tcx> {
+ // Return the base obligation, ignoring derived obligations.
+ pub fn peel_derives(&self) -> &Self {
+ let mut base_cause = self;
+ while let Some((parent_code, _)) = base_cause.parent() {
+ base_cause = parent_code;
+ }
+ base_cause
+ }
+
+ pub fn parent(&self) -> Option<(&Self, Option<ty::PolyTraitPredicate<'tcx>>)> {
+ match self {
+ FunctionArgumentObligation { parent_code, .. } => Some((parent_code, None)),
+ BuiltinDerivedObligation(derived)
+ | DerivedObligation(derived)
+ | ImplDerivedObligation(box ImplDerivedObligationCause { derived, .. }) => {
+ Some((&derived.parent_code, Some(derived.parent_trait_pred)))
+ }
+ _ => None,
+ }
+ }
+}
+
+// `ObligationCauseCode` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ObligationCauseCode<'_>, 48);
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum StatementAsExpression {
+ CorrectType,
+ NeedsBoxing,
+}
+
+impl<'tcx> ty::Lift<'tcx> for StatementAsExpression {
+ type Lifted = StatementAsExpression;
+ fn lift_to_tcx(self, _tcx: TyCtxt<'tcx>) -> Option<StatementAsExpression> {
+ Some(self)
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct MatchExpressionArmCause<'tcx> {
+ pub arm_block_id: Option<hir::HirId>,
+ pub arm_ty: Ty<'tcx>,
+ pub arm_span: Span,
+ pub prior_arm_block_id: Option<hir::HirId>,
+ pub prior_arm_ty: Ty<'tcx>,
+ pub prior_arm_span: Span,
+ pub scrut_span: Span,
+ pub source: hir::MatchSource,
+ pub prior_arms: Vec<Span>,
+ pub scrut_hir_id: hir::HirId,
+ pub opt_suggest_box_span: Option<Span>,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[derive(Lift, TypeFoldable, TypeVisitable)]
+pub struct IfExpressionCause<'tcx> {
+ pub then_id: hir::HirId,
+ pub else_id: hir::HirId,
+ pub then_ty: Ty<'tcx>,
+ pub else_ty: Ty<'tcx>,
+ pub outer_span: Option<Span>,
+ pub opt_suggest_box_span: Option<Span>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct DerivedObligationCause<'tcx> {
+ /// The trait predicate of the parent obligation that led to the
+ /// current obligation. Note that only trait obligations lead to
+ /// derived obligations, so we just store the trait predicate here
+ /// directly.
+ pub parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
+
+ /// The parent trait had this cause.
+ pub parent_code: InternedObligationCauseCode<'tcx>,
+}
+
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable, Lift)]
+pub enum SelectionError<'tcx> {
+ /// The trait is not implemented.
+ Unimplemented,
+ /// After a closure impl has selected, its "outputs" were evaluated
+ /// (which for closures includes the "input" type params) and they
+ /// didn't resolve. See `confirm_poly_trait_refs` for more.
+ OutputTypeParameterMismatch(
+ ty::PolyTraitRef<'tcx>,
+ ty::PolyTraitRef<'tcx>,
+ ty::error::TypeError<'tcx>,
+ ),
+ /// The trait pointed by `DefId` is not object safe.
+ TraitNotObjectSafe(DefId),
+ /// A given constant couldn't be evaluated.
+ NotConstEvaluatable(NotConstEvaluatable),
+ /// Exceeded the recursion depth during type projection.
+ Overflow(OverflowError),
+ /// Signaling that an error has already been emitted, to avoid
+ /// multiple errors being shown.
+ ErrorReporting,
+ /// Multiple applicable `impl`s where found. The `DefId`s correspond to
+ /// all the `impl`s' Items.
+ Ambiguous(Vec<DefId>),
+}
+
+/// When performing resolution, it is typically the case that there
+/// can be one of three outcomes:
+///
+/// - `Ok(Some(r))`: success occurred with result `r`
+/// - `Ok(None)`: could not definitely determine anything, usually due
+/// to inconclusive type inference.
+/// - `Err(e)`: error `e` occurred
+pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
+
+/// Given the successful resolution of an obligation, the `ImplSource`
+/// indicates where the impl comes from.
+///
+/// For example, the obligation may be satisfied by a specific impl (case A),
+/// or it may be relative to some bound that is in scope (case B).
+///
+/// ```ignore (illustrative)
+/// impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
+/// impl<T:Clone> Clone<T> for Box<T> { ... } // Impl_2
+/// impl Clone for i32 { ... } // Impl_3
+///
+/// fn foo<T: Clone>(concrete: Option<Box<i32>>, param: T, mixed: Option<T>) {
+/// // Case A: ImplSource points at a specific impl. Only possible when
+/// // type is concretely known. If the impl itself has bounded
+/// // type parameters, ImplSource will carry resolutions for those as well:
+/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
+///
+/// // Case A: ImplSource points at a specific impl. Only possible when
+/// // type is concretely known. If the impl itself has bounded
+/// // type parameters, ImplSource will carry resolutions for those as well:
+/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
+///
+/// // Case B: ImplSource must be provided by caller. This applies when
+/// // type is a type parameter.
+/// param.clone(); // ImplSource::Param
+///
+/// // Case C: A mix of cases A and B.
+/// mixed.clone(); // ImplSource(Impl_1, [ImplSource::Param])
+/// }
+/// ```
+///
+/// ### The type parameter `N`
+///
+/// See explanation on `ImplSourceUserDefinedData`.
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum ImplSource<'tcx, N> {
+ /// ImplSource identifying a particular impl.
+ UserDefined(ImplSourceUserDefinedData<'tcx, N>),
+
+ /// ImplSource for auto trait implementations.
+ /// This carries the information and nested obligations with regards
+ /// to an auto implementation for a trait `Trait`. The nested obligations
+ /// ensure the trait implementation holds for all the constituent types.
+ AutoImpl(ImplSourceAutoImplData<N>),
+
+ /// Successful resolution to an obligation provided by the caller
+ /// for some type parameter. The `Vec<N>` represents the
+ /// obligations incurred from normalizing the where-clause (if
+ /// any).
+ Param(Vec<N>, ty::BoundConstness),
+
+ /// Virtual calls through an object.
+ Object(ImplSourceObjectData<'tcx, N>),
+
+ /// Successful resolution for a builtin trait.
+ Builtin(ImplSourceBuiltinData<N>),
+
+ /// ImplSource for trait upcasting coercion
+ TraitUpcasting(ImplSourceTraitUpcastingData<'tcx, N>),
+
+ /// ImplSource automatically generated for a closure. The `DefId` is the ID
+ /// of the closure expression. This is an `ImplSource::UserDefined` in spirit, but the
+ /// impl is generated by the compiler and does not appear in the source.
+ Closure(ImplSourceClosureData<'tcx, N>),
+
+ /// Same as above, but for a function pointer type with the given signature.
+ FnPointer(ImplSourceFnPointerData<'tcx, N>),
+
+ /// ImplSource for a builtin `DeterminantKind` trait implementation.
+ DiscriminantKind(ImplSourceDiscriminantKindData),
+
+ /// ImplSource for a builtin `Pointee` trait implementation.
+ Pointee(ImplSourcePointeeData),
+
+ /// ImplSource automatically generated for a generator.
+ Generator(ImplSourceGeneratorData<'tcx, N>),
+
+ /// ImplSource for a trait alias.
+ TraitAlias(ImplSourceTraitAliasData<'tcx, N>),
+
+ /// ImplSource for a `const Drop` implementation.
+ ConstDestruct(ImplSourceConstDestructData<N>),
+}
+
+impl<'tcx, N> ImplSource<'tcx, N> {
+ pub fn nested_obligations(self) -> Vec<N> {
+ match self {
+ ImplSource::UserDefined(i) => i.nested,
+ ImplSource::Param(n, _) => n,
+ ImplSource::Builtin(i) => i.nested,
+ ImplSource::AutoImpl(d) => d.nested,
+ ImplSource::Closure(c) => c.nested,
+ ImplSource::Generator(c) => c.nested,
+ ImplSource::Object(d) => d.nested,
+ ImplSource::FnPointer(d) => d.nested,
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ | ImplSource::Pointee(ImplSourcePointeeData) => Vec::new(),
+ ImplSource::TraitAlias(d) => d.nested,
+ ImplSource::TraitUpcasting(d) => d.nested,
+ ImplSource::ConstDestruct(i) => i.nested,
+ }
+ }
+
+ pub fn borrow_nested_obligations(&self) -> &[N] {
+ match &self {
+ ImplSource::UserDefined(i) => &i.nested[..],
+ ImplSource::Param(n, _) => &n,
+ ImplSource::Builtin(i) => &i.nested,
+ ImplSource::AutoImpl(d) => &d.nested,
+ ImplSource::Closure(c) => &c.nested,
+ ImplSource::Generator(c) => &c.nested,
+ ImplSource::Object(d) => &d.nested,
+ ImplSource::FnPointer(d) => &d.nested,
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ | ImplSource::Pointee(ImplSourcePointeeData) => &[],
+ ImplSource::TraitAlias(d) => &d.nested,
+ ImplSource::TraitUpcasting(d) => &d.nested,
+ ImplSource::ConstDestruct(i) => &i.nested,
+ }
+ }
+
+ pub fn map<M, F>(self, f: F) -> ImplSource<'tcx, M>
+ where
+ F: FnMut(N) -> M,
+ {
+ match self {
+ ImplSource::UserDefined(i) => ImplSource::UserDefined(ImplSourceUserDefinedData {
+ impl_def_id: i.impl_def_id,
+ substs: i.substs,
+ nested: i.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::Param(n, ct) => ImplSource::Param(n.into_iter().map(f).collect(), ct),
+ ImplSource::Builtin(i) => ImplSource::Builtin(ImplSourceBuiltinData {
+ nested: i.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::Object(o) => ImplSource::Object(ImplSourceObjectData {
+ upcast_trait_ref: o.upcast_trait_ref,
+ vtable_base: o.vtable_base,
+ nested: o.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::AutoImpl(d) => ImplSource::AutoImpl(ImplSourceAutoImplData {
+ trait_def_id: d.trait_def_id,
+ nested: d.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::Closure(c) => ImplSource::Closure(ImplSourceClosureData {
+ closure_def_id: c.closure_def_id,
+ substs: c.substs,
+ nested: c.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::Generator(c) => ImplSource::Generator(ImplSourceGeneratorData {
+ generator_def_id: c.generator_def_id,
+ substs: c.substs,
+ nested: c.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::FnPointer(p) => ImplSource::FnPointer(ImplSourceFnPointerData {
+ fn_ty: p.fn_ty,
+ nested: p.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) => {
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ }
+ ImplSource::Pointee(ImplSourcePointeeData) => {
+ ImplSource::Pointee(ImplSourcePointeeData)
+ }
+ ImplSource::TraitAlias(d) => ImplSource::TraitAlias(ImplSourceTraitAliasData {
+ alias_def_id: d.alias_def_id,
+ substs: d.substs,
+ nested: d.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::TraitUpcasting(d) => {
+ ImplSource::TraitUpcasting(ImplSourceTraitUpcastingData {
+ upcast_trait_ref: d.upcast_trait_ref,
+ vtable_vptr_slot: d.vtable_vptr_slot,
+ nested: d.nested.into_iter().map(f).collect(),
+ })
+ }
+ ImplSource::ConstDestruct(i) => {
+ ImplSource::ConstDestruct(ImplSourceConstDestructData {
+ nested: i.nested.into_iter().map(f).collect(),
+ })
+ }
+ }
+ }
+}
+
+/// Identifies a particular impl in the source, along with a set of
+/// substitutions from the impl's type/lifetime parameters. The
+/// `nested` vector corresponds to the nested obligations attached to
+/// the impl's type parameters.
+///
+/// The type parameter `N` indicates the type used for "nested
+/// obligations" that are required by the impl. During type-check, this
+/// is `Obligation`, as one might expect. During codegen, however, this
+/// is `()`, because codegen only requires a shallow resolution of an
+/// impl, and nested obligations are satisfied later.
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceUserDefinedData<'tcx, N> {
+ pub impl_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceGeneratorData<'tcx, N> {
+ pub generator_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ /// Nested obligations. This can be non-empty if the generator
+ /// signature contains associated types.
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceClosureData<'tcx, N> {
+ pub closure_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ /// Nested obligations. This can be non-empty if the closure
+ /// signature contains associated types.
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceAutoImplData<N> {
+ pub trait_def_id: DefId,
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceTraitUpcastingData<'tcx, N> {
+ /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
+ pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
+
+ /// The vtable is formed by concatenating together the method lists of
+ /// the base object trait and all supertraits, pointers to supertrait vtable will
+ /// be provided when necessary; this is the position of `upcast_trait_ref`'s vtable
+ /// within that vtable.
+ pub vtable_vptr_slot: Option<usize>,
+
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceBuiltinData<N> {
+ pub nested: Vec<N>,
+}
+
+#[derive(PartialEq, Eq, Clone, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceObjectData<'tcx, N> {
+ /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
+ pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
+
+ /// The vtable is formed by concatenating together the method lists of
+ /// the base object trait and all supertraits, pointers to supertrait vtable will
+ /// be provided when necessary; this is the start of `upcast_trait_ref`'s methods
+ /// in that vtable.
+ pub vtable_base: usize,
+
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceFnPointerData<'tcx, N> {
+ pub fn_ty: Ty<'tcx>,
+ pub nested: Vec<N>,
+}
+
+// FIXME(@lcnr): This should be refactored and merged with other builtin vtables.
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub struct ImplSourceDiscriminantKindData;
+
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub struct ImplSourcePointeeData;
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceConstDestructData<N> {
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceTraitAliasData<'tcx, N> {
+ pub alias_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, HashStable, PartialOrd, Ord)]
+pub enum ObjectSafetyViolation {
+ /// `Self: Sized` declared on the trait.
+ SizedSelf(SmallVec<[Span; 1]>),
+
+ /// Supertrait reference references `Self` an in illegal location
+ /// (e.g., `trait Foo : Bar<Self>`).
+ SupertraitSelf(SmallVec<[Span; 1]>),
+
+ /// Method has something illegal.
+ Method(Symbol, MethodViolationCode, Span),
+
+ /// Associated const.
+ AssocConst(Symbol, Span),
+
+ /// GAT
+ GAT(Symbol, Span),
+}
+
+impl ObjectSafetyViolation {
+ pub fn error_msg(&self) -> Cow<'static, str> {
+ match self {
+ ObjectSafetyViolation::SizedSelf(_) => "it requires `Self: Sized`".into(),
+ ObjectSafetyViolation::SupertraitSelf(ref spans) => {
+ if spans.iter().any(|sp| *sp != DUMMY_SP) {
+ "it uses `Self` as a type parameter".into()
+ } else {
+ "it cannot use `Self` as a type parameter in a supertrait or `where`-clause"
+ .into()
+ }
+ }
+ ObjectSafetyViolation::Method(name, MethodViolationCode::StaticMethod(_), _) => {
+ format!("associated function `{}` has no `self` parameter", name).into()
+ }
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::ReferencesSelfInput(_),
+ DUMMY_SP,
+ ) => format!("method `{}` references the `Self` type in its parameters", name).into(),
+ ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelfInput(_), _) => {
+ format!("method `{}` references the `Self` type in this parameter", name).into()
+ }
+ ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelfOutput, _) => {
+ format!("method `{}` references the `Self` type in its return type", name).into()
+ }
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::WhereClauseReferencesSelf,
+ _,
+ ) => {
+ format!("method `{}` references the `Self` type in its `where` clause", name).into()
+ }
+ ObjectSafetyViolation::Method(name, MethodViolationCode::Generic, _) => {
+ format!("method `{}` has generic type parameters", name).into()
+ }
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::UndispatchableReceiver(_),
+ _,
+ ) => format!("method `{}`'s `self` parameter cannot be dispatched on", name).into(),
+ ObjectSafetyViolation::AssocConst(name, DUMMY_SP) => {
+ format!("it contains associated `const` `{}`", name).into()
+ }
+ ObjectSafetyViolation::AssocConst(..) => "it contains this associated `const`".into(),
+ ObjectSafetyViolation::GAT(name, _) => {
+ format!("it contains the generic associated type `{}`", name).into()
+ }
+ }
+ }
+
+ pub fn solution(&self, err: &mut Diagnostic) {
+ match self {
+ ObjectSafetyViolation::SizedSelf(_) | ObjectSafetyViolation::SupertraitSelf(_) => {}
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::StaticMethod(Some((add_self_sugg, make_sized_sugg))),
+ _,
+ ) => {
+ err.span_suggestion(
+ add_self_sugg.1,
+ format!(
+ "consider turning `{}` into a method by giving it a `&self` argument",
+ name
+ ),
+ add_self_sugg.0.to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ err.span_suggestion(
+ make_sized_sugg.1,
+ format!(
+ "alternatively, consider constraining `{}` so it does not apply to \
+ trait objects",
+ name
+ ),
+ make_sized_sugg.0.to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::UndispatchableReceiver(Some(span)),
+ _,
+ ) => {
+ err.span_suggestion(
+ *span,
+ &format!(
+ "consider changing method `{}`'s `self` parameter to be `&self`",
+ name
+ ),
+ "&Self",
+ Applicability::MachineApplicable,
+ );
+ }
+ ObjectSafetyViolation::AssocConst(name, _)
+ | ObjectSafetyViolation::GAT(name, _)
+ | ObjectSafetyViolation::Method(name, ..) => {
+ err.help(&format!("consider moving `{}` to another trait", name));
+ }
+ }
+ }
+
+ pub fn spans(&self) -> SmallVec<[Span; 1]> {
+ // When `span` comes from a separate crate, it'll be `DUMMY_SP`. Treat it as `None` so
+ // diagnostics use a `note` instead of a `span_label`.
+ match self {
+ ObjectSafetyViolation::SupertraitSelf(spans)
+ | ObjectSafetyViolation::SizedSelf(spans) => spans.clone(),
+ ObjectSafetyViolation::AssocConst(_, span)
+ | ObjectSafetyViolation::GAT(_, span)
+ | ObjectSafetyViolation::Method(_, _, span)
+ if *span != DUMMY_SP =>
+ {
+ smallvec![*span]
+ }
+ _ => smallvec![],
+ }
+ }
+}
+
+/// Reasons a method might not be object-safe.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, HashStable, PartialOrd, Ord)]
+pub enum MethodViolationCode {
+ /// e.g., `fn foo()`
+ StaticMethod(Option<(/* add &self */ (String, Span), /* add Self: Sized */ (String, Span))>),
+
+ /// e.g., `fn foo(&self, x: Self)`
+ ReferencesSelfInput(Option<Span>),
+
+ /// e.g., `fn foo(&self) -> Self`
+ ReferencesSelfOutput,
+
+ /// e.g., `fn foo(&self) where Self: Clone`
+ WhereClauseReferencesSelf,
+
+ /// e.g., `fn foo<A>()`
+ Generic,
+
+ /// the method's receiver (`self` argument) can't be dispatched on
+ UndispatchableReceiver(Option<Span>),
+}
+
+/// These are the error cases for `codegen_fulfill_obligation`.
+#[derive(Copy, Clone, Debug, Hash, HashStable, Encodable, Decodable)]
+pub enum CodegenObligationError {
+ /// Ambiguity can happen when monomorphizing during trans
+ /// expands to some humongous type that never occurred
+ /// statically -- this humongous type can then overflow,
+ /// leading to an ambiguous result. So report this as an
+ /// overflow bug, since I believe this is the only case
+ /// where ambiguity can result.
+ Ambiguity,
+ /// This can trigger when we probe for the source of a `'static` lifetime requirement
+ /// on a trait object: `impl Foo for dyn Trait {}` has an implicit `'static` bound.
+ /// This can also trigger when we have a global bound that is not actually satisfied,
+ /// but was included during typeck due to the trivial_bounds feature.
+ Unimplemented,
+ FulfillmentError,
+}
diff --git a/compiler/rustc_middle/src/traits/query.rs b/compiler/rustc_middle/src/traits/query.rs
new file mode 100644
index 000000000..1f9b474ad
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/query.rs
@@ -0,0 +1,230 @@
+//! Experimental types for the trait query interface. The methods
+//! defined in this module are all based on **canonicalization**,
+//! which makes a canonical query by replacing unbound inference
+//! variables and regions, so that results can be reused more broadly.
+//! The providers for the queries defined here can be found in
+//! `rustc_traits`.
+
+use crate::infer::canonical::{Canonical, QueryResponse};
+use crate::ty::error::TypeError;
+use crate::ty::subst::GenericArg;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_errors::struct_span_err;
+use rustc_span::source_map::Span;
+use std::iter::FromIterator;
+
+pub mod type_op {
+ use crate::ty::fold::TypeFoldable;
+ use crate::ty::subst::UserSubsts;
+ use crate::ty::{Predicate, Ty};
+ use rustc_hir::def_id::DefId;
+ use std::fmt;
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct AscribeUserType<'tcx> {
+ pub mir_ty: Ty<'tcx>,
+ pub def_id: DefId,
+ pub user_substs: UserSubsts<'tcx>,
+ }
+
+ impl<'tcx> AscribeUserType<'tcx> {
+ pub fn new(mir_ty: Ty<'tcx>, def_id: DefId, user_substs: UserSubsts<'tcx>) -> Self {
+ Self { mir_ty, def_id, user_substs }
+ }
+ }
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct Eq<'tcx> {
+ pub a: Ty<'tcx>,
+ pub b: Ty<'tcx>,
+ }
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct Subtype<'tcx> {
+ pub sub: Ty<'tcx>,
+ pub sup: Ty<'tcx>,
+ }
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct ProvePredicate<'tcx> {
+ pub predicate: Predicate<'tcx>,
+ }
+
+ impl<'tcx> ProvePredicate<'tcx> {
+ pub fn new(predicate: Predicate<'tcx>) -> Self {
+ ProvePredicate { predicate }
+ }
+ }
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct Normalize<T> {
+ pub value: T,
+ }
+
+ impl<'tcx, T> Normalize<T>
+ where
+ T: fmt::Debug + TypeFoldable<'tcx>,
+ {
+ pub fn new(value: T) -> Self {
+ Self { value }
+ }
+ }
+}
+
+pub type CanonicalProjectionGoal<'tcx> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, ty::ProjectionTy<'tcx>>>;
+
+pub type CanonicalTyGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, Ty<'tcx>>>;
+
+pub type CanonicalPredicateGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, ty::Predicate<'tcx>>>;
+
+pub type CanonicalTypeOpAscribeUserTypeGoal<'tcx> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::AscribeUserType<'tcx>>>;
+
+pub type CanonicalTypeOpEqGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Eq<'tcx>>>;
+
+pub type CanonicalTypeOpSubtypeGoal<'tcx> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Subtype<'tcx>>>;
+
+pub type CanonicalTypeOpProvePredicateGoal<'tcx> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::ProvePredicate<'tcx>>>;
+
+pub type CanonicalTypeOpNormalizeGoal<'tcx, T> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Normalize<T>>>;
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct NoSolution;
+
+pub type Fallible<T> = Result<T, NoSolution>;
+
+impl<'tcx> From<TypeError<'tcx>> for NoSolution {
+ fn from(_: TypeError<'tcx>) -> NoSolution {
+ NoSolution
+ }
+}
+
+#[derive(Clone, Debug, Default, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct DropckOutlivesResult<'tcx> {
+ pub kinds: Vec<GenericArg<'tcx>>,
+ pub overflows: Vec<Ty<'tcx>>,
+}
+
+impl<'tcx> DropckOutlivesResult<'tcx> {
+ pub fn report_overflows(&self, tcx: TyCtxt<'tcx>, span: Span, ty: Ty<'tcx>) {
+ if let Some(overflow_ty) = self.overflows.get(0) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0320,
+ "overflow while adding drop-check rules for {}",
+ ty,
+ );
+ err.note(&format!("overflowed on {}", overflow_ty));
+ err.emit();
+ }
+ }
+
+ pub fn into_kinds_reporting_overflows(
+ self,
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ ty: Ty<'tcx>,
+ ) -> Vec<GenericArg<'tcx>> {
+ self.report_overflows(tcx, span, ty);
+ let DropckOutlivesResult { kinds, overflows: _ } = self;
+ kinds
+ }
+}
+
+/// A set of constraints that need to be satisfied in order for
+/// a type to be valid for destruction.
+#[derive(Clone, Debug, HashStable)]
+pub struct DropckConstraint<'tcx> {
+ /// Types that are required to be alive in order for this
+ /// type to be valid for destruction.
+ pub outlives: Vec<ty::subst::GenericArg<'tcx>>,
+
+ /// Types that could not be resolved: projections and params.
+ pub dtorck_types: Vec<Ty<'tcx>>,
+
+ /// If, during the computation of the dtorck constraint, we
+ /// overflow, that gets recorded here. The caller is expected to
+ /// report an error.
+ pub overflows: Vec<Ty<'tcx>>,
+}
+
+impl<'tcx> DropckConstraint<'tcx> {
+ pub fn empty() -> DropckConstraint<'tcx> {
+ DropckConstraint { outlives: vec![], dtorck_types: vec![], overflows: vec![] }
+ }
+}
+
+impl<'tcx> FromIterator<DropckConstraint<'tcx>> for DropckConstraint<'tcx> {
+ fn from_iter<I: IntoIterator<Item = DropckConstraint<'tcx>>>(iter: I) -> Self {
+ let mut result = Self::empty();
+
+ for DropckConstraint { outlives, dtorck_types, overflows } in iter {
+ result.outlives.extend(outlives);
+ result.dtorck_types.extend(dtorck_types);
+ result.overflows.extend(overflows);
+ }
+
+ result
+ }
+}
+
+#[derive(Debug, HashStable)]
+pub struct CandidateStep<'tcx> {
+ pub self_ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+ pub autoderefs: usize,
+ /// `true` if the type results from a dereference of a raw pointer.
+ /// when assembling candidates, we include these steps, but not when
+ /// picking methods. This so that if we have `foo: *const Foo` and `Foo` has methods
+ /// `fn by_raw_ptr(self: *const Self)` and `fn by_ref(&self)`, then
+ /// `foo.by_raw_ptr()` will work and `foo.by_ref()` won't.
+ pub from_unsafe_deref: bool,
+ pub unsize: bool,
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct MethodAutoderefStepsResult<'tcx> {
+ /// The valid autoderef steps that could be find.
+ pub steps: &'tcx [CandidateStep<'tcx>],
+ /// If Some(T), a type autoderef reported an error on.
+ pub opt_bad_ty: Option<&'tcx MethodAutoderefBadTy<'tcx>>,
+ /// If `true`, `steps` has been truncated due to reaching the
+ /// recursion limit.
+ pub reached_recursion_limit: bool,
+}
+
+#[derive(Debug, HashStable)]
+pub struct MethodAutoderefBadTy<'tcx> {
+ pub reached_raw_pointer: bool,
+ pub ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+}
+
+/// Result from the `normalize_projection_ty` query.
+#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct NormalizationResult<'tcx> {
+ /// Result of normalization.
+ pub normalized_ty: Ty<'tcx>,
+}
+
+/// Outlives bounds are relationships between generic parameters,
+/// whether they both be regions (`'a: 'b`) or whether types are
+/// involved (`T: 'a`). These relationships can be extracted from the
+/// full set of predicates we understand or also from types (in which
+/// case they are called implied bounds). They are fed to the
+/// `OutlivesEnv` which in turn is supplied to the region checker and
+/// other parts of the inference system.
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable, Lift, HashStable)]
+pub enum OutlivesBound<'tcx> {
+ RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>),
+ RegionSubParam(ty::Region<'tcx>, ty::ParamTy),
+ RegionSubProjection(ty::Region<'tcx>, ty::ProjectionTy<'tcx>),
+}
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
new file mode 100644
index 000000000..e836ba47e
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -0,0 +1,312 @@
+//! Candidate selection. See the [rustc dev guide] for more information on how this works.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
+
+use self::EvaluationResult::*;
+
+use super::{SelectionError, SelectionResult};
+use rustc_errors::ErrorGuaranteed;
+
+use crate::ty;
+
+use rustc_hir::def_id::DefId;
+use rustc_query_system::cache::Cache;
+
+pub type SelectionCache<'tcx> = Cache<
+ // This cache does not use `ParamEnvAnd` in its keys because `ParamEnv::and` can replace
+ // caller bounds with an empty list if the `TraitPredicate` looks global, which may happen
+ // after erasing lifetimes from the predicate.
+ (ty::ParamEnv<'tcx>, ty::TraitPredicate<'tcx>),
+ SelectionResult<'tcx, SelectionCandidate<'tcx>>,
+>;
+
+pub type EvaluationCache<'tcx> = Cache<
+ // See above: this cache does not use `ParamEnvAnd` in its keys due to sometimes incorrectly
+ // caching with the wrong `ParamEnv`.
+ (ty::ParamEnv<'tcx>, ty::PolyTraitPredicate<'tcx>),
+ EvaluationResult,
+>;
+
+/// The selection process begins by considering all impls, where
+/// clauses, and so forth that might resolve an obligation. Sometimes
+/// we'll be able to say definitively that (e.g.) an impl does not
+/// apply to the obligation: perhaps it is defined for `usize` but the
+/// obligation is for `i32`. In that case, we drop the impl out of the
+/// list. But the other cases are considered *candidates*.
+///
+/// For selection to succeed, there must be exactly one matching
+/// candidate. If the obligation is fully known, this is guaranteed
+/// by coherence. However, if the obligation contains type parameters
+/// or variables, there may be multiple such impls.
+///
+/// It is not a real problem if multiple matching impls exist because
+/// of type variables - it just means the obligation isn't sufficiently
+/// elaborated. In that case we report an ambiguity, and the caller can
+/// try again after more type information has been gathered or report a
+/// "type annotations needed" error.
+///
+/// However, with type parameters, this can be a real problem - type
+/// parameters don't unify with regular types, but they *can* unify
+/// with variables from blanket impls, and (unless we know its bounds
+/// will always be satisfied) picking the blanket impl will be wrong
+/// for at least *some* substitutions. To make this concrete, if we have
+///
+/// ```rust, ignore
+/// trait AsDebug { type Out: fmt::Debug; fn debug(self) -> Self::Out; }
+/// impl<T: fmt::Debug> AsDebug for T {
+/// type Out = T;
+/// fn debug(self) -> fmt::Debug { self }
+/// }
+/// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); }
+/// ```
+///
+/// we can't just use the impl to resolve the `<T as AsDebug>` obligation
+/// -- a type from another crate (that doesn't implement `fmt::Debug`) could
+/// implement `AsDebug`.
+///
+/// Because where-clauses match the type exactly, multiple clauses can
+/// only match if there are unresolved variables, and we can mostly just
+/// report this ambiguity in that case. This is still a problem - we can't
+/// *do anything* with ambiguities that involve only regions. This is issue
+/// #21974.
+///
+/// If a single where-clause matches and there are no inference
+/// variables left, then it definitely matches and we can just select
+/// it.
+///
+/// In fact, we even select the where-clause when the obligation contains
+/// inference variables. The can lead to inference making "leaps of logic",
+/// for example in this situation:
+///
+/// ```rust, ignore
+/// pub trait Foo<T> { fn foo(&self) -> T; }
+/// impl<T> Foo<()> for T { fn foo(&self) { } }
+/// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } }
+///
+/// pub fn foo<T>(t: T) where T: Foo<bool> {
+/// println!("{:?}", <T as Foo<_>>::foo(&t));
+/// }
+/// fn main() { foo(false); }
+/// ```
+///
+/// Here the obligation `<T as Foo<$0>>` can be matched by both the blanket
+/// impl and the where-clause. We select the where-clause and unify `$0=bool`,
+/// so the program prints "false". However, if the where-clause is omitted,
+/// the blanket impl is selected, we unify `$0=()`, and the program prints
+/// "()".
+///
+/// Exactly the same issues apply to projection and object candidates, except
+/// that we can have both a projection candidate and a where-clause candidate
+/// for the same obligation. In that case either would do (except that
+/// different "leaps of logic" would occur if inference variables are
+/// present), and we just pick the where-clause. This is, for example,
+/// required for associated types to work in default impls, as the bounds
+/// are visible both as projection bounds and as where-clauses from the
+/// parameter environment.
+#[derive(PartialEq, Eq, Debug, Clone, TypeFoldable, TypeVisitable)]
+pub enum SelectionCandidate<'tcx> {
+ BuiltinCandidate {
+ /// `false` if there are no *further* obligations.
+ has_nested: bool,
+ },
+
+ /// Implementation of transmutability trait.
+ TransmutabilityCandidate,
+
+ ParamCandidate(ty::PolyTraitPredicate<'tcx>),
+ ImplCandidate(DefId),
+ AutoImplCandidate(DefId),
+
+ /// This is a trait matching with a projected type as `Self`, and we found
+ /// an applicable bound in the trait definition. The `usize` is an index
+ /// into the list returned by `tcx.item_bounds`.
+ ProjectionCandidate(usize),
+
+ /// Implementation of a `Fn`-family trait by one of the anonymous types
+ /// generated for an `||` expression.
+ ClosureCandidate,
+
+ /// Implementation of a `Generator` trait by one of the anonymous types
+ /// generated for a generator.
+ GeneratorCandidate,
+
+ /// Implementation of a `Fn`-family trait by one of the anonymous
+ /// types generated for a fn pointer type (e.g., `fn(int) -> int`)
+ FnPointerCandidate {
+ is_const: bool,
+ },
+
+ /// Builtin implementation of `DiscriminantKind`.
+ DiscriminantKindCandidate,
+
+ /// Builtin implementation of `Pointee`.
+ PointeeCandidate,
+
+ TraitAliasCandidate(DefId),
+
+ /// Matching `dyn Trait` with a supertrait of `Trait`. The index is the
+ /// position in the iterator returned by
+ /// `rustc_infer::traits::util::supertraits`.
+ ObjectCandidate(usize),
+
+ /// Perform trait upcasting coercion of `dyn Trait` to a supertrait of `Trait`.
+ /// The index is the position in the iterator returned by
+ /// `rustc_infer::traits::util::supertraits`.
+ TraitUpcastingUnsizeCandidate(usize),
+
+ BuiltinObjectCandidate,
+
+ BuiltinUnsizeCandidate,
+
+ /// Implementation of `const Destruct`, optionally from a custom `impl const Drop`.
+ ConstDestructCandidate(Option<DefId>),
+}
+
+/// The result of trait evaluation. The order is important
+/// here as the evaluation of a list is the maximum of the
+/// evaluations.
+///
+/// The evaluation results are ordered:
+/// - `EvaluatedToOk` implies `EvaluatedToOkModuloRegions`
+/// implies `EvaluatedToAmbig` implies `EvaluatedToUnknown`
+/// - `EvaluatedToErr` implies `EvaluatedToRecur`
+/// - the "union" of evaluation results is equal to their maximum -
+/// all the "potential success" candidates can potentially succeed,
+/// so they are noops when unioned with a definite error, and within
+/// the categories it's easy to see that the unions are correct.
+#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, HashStable)]
+pub enum EvaluationResult {
+ /// Evaluation successful.
+ EvaluatedToOk,
+ /// Evaluation successful, but there were unevaluated region obligations.
+ EvaluatedToOkModuloRegions,
+ /// Evaluation successful, but need to rerun because opaque types got
+ /// hidden types assigned without it being known whether the opaque types
+ /// are within their defining scope
+ EvaluatedToOkModuloOpaqueTypes,
+ /// Evaluation is known to be ambiguous -- it *might* hold for some
+ /// assignment of inference variables, but it might not.
+ ///
+ /// While this has the same meaning as `EvaluatedToUnknown` -- we can't
+ /// know whether this obligation holds or not -- it is the result we
+ /// would get with an empty stack, and therefore is cacheable.
+ EvaluatedToAmbig,
+ /// Evaluation failed because of recursion involving inference
+ /// variables. We are somewhat imprecise there, so we don't actually
+ /// know the real result.
+ ///
+ /// This can't be trivially cached for the same reason as `EvaluatedToRecur`.
+ EvaluatedToUnknown,
+ /// Evaluation failed because we encountered an obligation we are already
+ /// trying to prove on this branch.
+ ///
+ /// We know this branch can't be a part of a minimal proof-tree for
+ /// the "root" of our cycle, because then we could cut out the recursion
+ /// and maintain a valid proof tree. However, this does not mean
+ /// that all the obligations on this branch do not hold -- it's possible
+ /// that we entered this branch "speculatively", and that there
+ /// might be some other way to prove this obligation that does not
+ /// go through this cycle -- so we can't cache this as a failure.
+ ///
+ /// For example, suppose we have this:
+ ///
+ /// ```rust,ignore (pseudo-Rust)
+ /// pub trait Trait { fn xyz(); }
+ /// // This impl is "useless", but we can still have
+ /// // an `impl Trait for SomeUnsizedType` somewhere.
+ /// impl<T: Trait + Sized> Trait for T { fn xyz() {} }
+ ///
+ /// pub fn foo<T: Trait + ?Sized>() {
+ /// <T as Trait>::xyz();
+ /// }
+ /// ```
+ ///
+ /// When checking `foo`, we have to prove `T: Trait`. This basically
+ /// translates into this:
+ ///
+ /// ```plain,ignore
+ /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait
+ /// ```
+ ///
+ /// When we try to prove it, we first go the first option, which
+ /// recurses. This shows us that the impl is "useless" -- it won't
+ /// tell us that `T: Trait` unless it already implemented `Trait`
+ /// by some other means. However, that does not prevent `T: Trait`
+ /// does not hold, because of the bound (which can indeed be satisfied
+ /// by `SomeUnsizedType` from another crate).
+ //
+ // FIXME: when an `EvaluatedToRecur` goes past its parent root, we
+ // ought to convert it to an `EvaluatedToErr`, because we know
+ // there definitely isn't a proof tree for that obligation. Not
+ // doing so is still sound -- there isn't any proof tree, so the
+ // branch still can't be a part of a minimal one -- but does not re-enable caching.
+ EvaluatedToRecur,
+ /// Evaluation failed.
+ EvaluatedToErr,
+}
+
+impl EvaluationResult {
+ /// Returns `true` if this evaluation result is known to apply, even
+ /// considering outlives constraints.
+ pub fn must_apply_considering_regions(self) -> bool {
+ self == EvaluatedToOk
+ }
+
+ /// Returns `true` if this evaluation result is known to apply, ignoring
+ /// outlives constraints.
+ pub fn must_apply_modulo_regions(self) -> bool {
+ self <= EvaluatedToOkModuloRegions
+ }
+
+ pub fn may_apply(self) -> bool {
+ match self {
+ EvaluatedToOkModuloOpaqueTypes
+ | EvaluatedToOk
+ | EvaluatedToOkModuloRegions
+ | EvaluatedToAmbig
+ | EvaluatedToUnknown => true,
+
+ EvaluatedToErr | EvaluatedToRecur => false,
+ }
+ }
+
+ pub fn is_stack_dependent(self) -> bool {
+ match self {
+ EvaluatedToUnknown | EvaluatedToRecur => true,
+
+ EvaluatedToOkModuloOpaqueTypes
+ | EvaluatedToOk
+ | EvaluatedToOkModuloRegions
+ | EvaluatedToAmbig
+ | EvaluatedToErr => false,
+ }
+ }
+}
+
+/// Indicates that trait evaluation caused overflow and in which pass.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable)]
+pub enum OverflowError {
+ Error(ErrorGuaranteed),
+ Canonical,
+ ErrorReporting,
+}
+
+impl From<ErrorGuaranteed> for OverflowError {
+ fn from(e: ErrorGuaranteed) -> OverflowError {
+ OverflowError::Error(e)
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ OverflowError,
+}
+
+impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
+ fn from(overflow_error: OverflowError) -> SelectionError<'tcx> {
+ match overflow_error {
+ OverflowError::Error(e) => SelectionError::Overflow(OverflowError::Error(e)),
+ OverflowError::Canonical => SelectionError::Overflow(OverflowError::Canonical),
+ OverflowError::ErrorReporting => SelectionError::ErrorReporting,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/traits/specialization_graph.rs b/compiler/rustc_middle/src/traits/specialization_graph.rs
new file mode 100644
index 000000000..2465f8e25
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/specialization_graph.rs
@@ -0,0 +1,261 @@
+use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{self, TyCtxt};
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def_id::{DefId, DefIdMap};
+use rustc_span::symbol::sym;
+
+/// A per-trait graph of impls in specialization order. At the moment, this
+/// graph forms a tree rooted with the trait itself, with all other nodes
+/// representing impls, and parent-child relationships representing
+/// specializations.
+///
+/// The graph provides two key services:
+///
+/// - Construction. This implicitly checks for overlapping impls (i.e., impls
+/// that overlap but where neither specializes the other -- an artifact of the
+/// simple "chain" rule.
+///
+/// - Parent extraction. In particular, the graph can give you the *immediate*
+/// parents of a given specializing impl, which is needed for extracting
+/// default items amongst other things. In the simple "chain" rule, every impl
+/// has at most one parent.
+#[derive(TyEncodable, TyDecodable, HashStable, Debug)]
+pub struct Graph {
+ /// All impls have a parent; the "root" impls have as their parent the `def_id`
+ /// of the trait.
+ pub parent: DefIdMap<DefId>,
+
+ /// The "root" impls are found by looking up the trait's def_id.
+ pub children: DefIdMap<Children>,
+
+ /// Whether an error was emitted while constructing the graph.
+ pub has_errored: Option<ErrorGuaranteed>,
+}
+
+impl Graph {
+ pub fn new() -> Graph {
+ Graph { parent: Default::default(), children: Default::default(), has_errored: None }
+ }
+
+ /// The parent of a given impl, which is the `DefId` of the trait when the
+ /// impl is a "specialization root".
+ pub fn parent(&self, child: DefId) -> DefId {
+ *self.parent.get(&child).unwrap_or_else(|| panic!("Failed to get parent for {:?}", child))
+ }
+}
+
+/// What kind of overlap check are we doing -- this exists just for testing and feature-gating
+/// purposes.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable, Debug, TyEncodable, TyDecodable)]
+pub enum OverlapMode {
+ /// The 1.0 rules (either types fail to unify, or where clauses are not implemented for crate-local types)
+ Stable,
+ /// Feature-gated test: Stable, *or* there is an explicit negative impl that rules out one of the where-clauses.
+ WithNegative,
+ /// Just check for negative impls, not for "where clause not implemented": used for testing.
+ Strict,
+}
+
+impl OverlapMode {
+ pub fn get<'tcx>(tcx: TyCtxt<'tcx>, trait_id: DefId) -> OverlapMode {
+ let with_negative_coherence = tcx.features().with_negative_coherence;
+ let strict_coherence = tcx.has_attr(trait_id, sym::rustc_strict_coherence);
+
+ if with_negative_coherence {
+ if strict_coherence { OverlapMode::Strict } else { OverlapMode::WithNegative }
+ } else if strict_coherence {
+ bug!("To use strict_coherence you need to set with_negative_coherence feature flag");
+ } else {
+ OverlapMode::Stable
+ }
+ }
+
+ pub fn use_negative_impl(&self) -> bool {
+ *self == OverlapMode::Strict || *self == OverlapMode::WithNegative
+ }
+
+ pub fn use_implicit_negative(&self) -> bool {
+ *self == OverlapMode::Stable || *self == OverlapMode::WithNegative
+ }
+}
+
+/// Children of a given impl, grouped into blanket/non-blanket varieties as is
+/// done in `TraitDef`.
+#[derive(Default, TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct Children {
+ // Impls of a trait (or specializations of a given impl). To allow for
+ // quicker lookup, the impls are indexed by a simplified version of their
+ // `Self` type: impls with a simplifiable `Self` are stored in
+ // `non_blanket_impls` keyed by it, while all other impls are stored in
+ // `blanket_impls`.
+ //
+ // A similar division is used within `TraitDef`, but the lists there collect
+ // together *all* the impls for a trait, and are populated prior to building
+ // the specialization graph.
+ /// Impls of the trait.
+ pub non_blanket_impls: FxIndexMap<SimplifiedType, Vec<DefId>>,
+
+ /// Blanket impls associated with the trait.
+ pub blanket_impls: Vec<DefId>,
+}
+
+/// A node in the specialization graph is either an impl or a trait
+/// definition; either can serve as a source of item definitions.
+/// There is always exactly one trait definition node: the root.
+#[derive(Debug, Copy, Clone)]
+pub enum Node {
+ Impl(DefId),
+ Trait(DefId),
+}
+
+impl Node {
+ pub fn is_from_trait(&self) -> bool {
+ matches!(self, Node::Trait(..))
+ }
+
+ /// Trys to find the associated item that implements `trait_item_def_id`
+ /// defined in this node.
+ ///
+ /// If this returns `None`, the item can potentially still be found in
+ /// parents of this node.
+ pub fn item<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ trait_item_def_id: DefId,
+ ) -> Option<&'tcx ty::AssocItem> {
+ match *self {
+ Node::Trait(_) => Some(tcx.associated_item(trait_item_def_id)),
+ Node::Impl(impl_def_id) => {
+ let id = tcx.impl_item_implementor_ids(impl_def_id).get(&trait_item_def_id)?;
+ Some(tcx.associated_item(*id))
+ }
+ }
+ }
+
+ pub fn def_id(&self) -> DefId {
+ match *self {
+ Node::Impl(did) => did,
+ Node::Trait(did) => did,
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct Ancestors<'tcx> {
+ trait_def_id: DefId,
+ specialization_graph: &'tcx Graph,
+ current_source: Option<Node>,
+}
+
+impl Iterator for Ancestors<'_> {
+ type Item = Node;
+ fn next(&mut self) -> Option<Node> {
+ let cur = self.current_source.take();
+ if let Some(Node::Impl(cur_impl)) = cur {
+ let parent = self.specialization_graph.parent(cur_impl);
+
+ self.current_source = if parent == self.trait_def_id {
+ Some(Node::Trait(parent))
+ } else {
+ Some(Node::Impl(parent))
+ };
+ }
+ cur
+ }
+}
+
+/// Information about the most specialized definition of an associated item.
+pub struct LeafDef {
+ /// The associated item described by this `LeafDef`.
+ pub item: ty::AssocItem,
+
+ /// The node in the specialization graph containing the definition of `item`.
+ pub defining_node: Node,
+
+ /// The "top-most" (ie. least specialized) specialization graph node that finalized the
+ /// definition of `item`.
+ ///
+ /// Example:
+ ///
+ /// ```
+ /// #![feature(specialization)]
+ /// trait Tr {
+ /// fn assoc(&self);
+ /// }
+ ///
+ /// impl<T> Tr for T {
+ /// default fn assoc(&self) {}
+ /// }
+ ///
+ /// impl Tr for u8 {}
+ /// ```
+ ///
+ /// If we start the leaf definition search at `impl Tr for u8`, that impl will be the
+ /// `finalizing_node`, while `defining_node` will be the generic impl.
+ ///
+ /// If the leaf definition search is started at the generic impl, `finalizing_node` will be
+ /// `None`, since the most specialized impl we found still allows overriding the method
+ /// (doesn't finalize it).
+ pub finalizing_node: Option<Node>,
+}
+
+impl LeafDef {
+ /// Returns whether this definition is known to not be further specializable.
+ pub fn is_final(&self) -> bool {
+ self.finalizing_node.is_some()
+ }
+}
+
+impl<'tcx> Ancestors<'tcx> {
+ /// Finds the bottom-most (ie. most specialized) definition of an associated
+ /// item.
+ pub fn leaf_def(mut self, tcx: TyCtxt<'tcx>, trait_item_def_id: DefId) -> Option<LeafDef> {
+ let mut finalizing_node = None;
+
+ self.find_map(|node| {
+ if let Some(item) = node.item(tcx, trait_item_def_id) {
+ if finalizing_node.is_none() {
+ let is_specializable = item.defaultness(tcx).is_default()
+ || tcx.impl_defaultness(node.def_id()).is_default();
+
+ if !is_specializable {
+ finalizing_node = Some(node);
+ }
+ }
+
+ Some(LeafDef { item: *item, defining_node: node, finalizing_node })
+ } else {
+ // Item not mentioned. This "finalizes" any defaulted item provided by an ancestor.
+ finalizing_node = Some(node);
+ None
+ }
+ })
+ }
+}
+
+/// Walk up the specialization ancestors of a given impl, starting with that
+/// impl itself.
+///
+/// Returns `Err` if an error was reported while building the specialization
+/// graph.
+pub fn ancestors<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ start_from_impl: DefId,
+) -> Result<Ancestors<'tcx>, ErrorGuaranteed> {
+ let specialization_graph = tcx.specialization_graph_of(trait_def_id);
+
+ if let Some(reported) = specialization_graph.has_errored {
+ Err(reported)
+ } else if let Some(reported) = tcx.type_of(start_from_impl).error_reported() {
+ Err(reported)
+ } else {
+ Ok(Ancestors {
+ trait_def_id,
+ specialization_graph,
+ current_source: Some(Node::Impl(start_from_impl)),
+ })
+ }
+}
diff --git a/compiler/rustc_middle/src/traits/structural_impls.rs b/compiler/rustc_middle/src/traits/structural_impls.rs
new file mode 100644
index 000000000..7fbd57ac7
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/structural_impls.rs
@@ -0,0 +1,135 @@
+use crate::traits;
+
+use std::fmt;
+
+// Structural impls for the structs in `traits`.
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSource<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ super::ImplSource::UserDefined(ref v) => write!(f, "{:?}", v),
+
+ super::ImplSource::AutoImpl(ref t) => write!(f, "{:?}", t),
+
+ super::ImplSource::Closure(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::Generator(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::FnPointer(ref d) => write!(f, "({:?})", d),
+
+ super::ImplSource::DiscriminantKind(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::Pointee(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::Object(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::Param(ref n, ct) => {
+ write!(f, "ImplSourceParamData({:?}, {:?})", n, ct)
+ }
+
+ super::ImplSource::Builtin(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::TraitAlias(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::TraitUpcasting(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::ConstDestruct(ref d) => write!(f, "{:?}", d),
+ }
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceUserDefinedData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceUserDefinedData(impl_def_id={:?}, substs={:?}, nested={:?})",
+ self.impl_def_id, self.substs, self.nested
+ )
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceGeneratorData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceGeneratorData(generator_def_id={:?}, substs={:?}, nested={:?})",
+ self.generator_def_id, self.substs, self.nested
+ )
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceClosureData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceClosureData(closure_def_id={:?}, substs={:?}, nested={:?})",
+ self.closure_def_id, self.substs, self.nested
+ )
+ }
+}
+
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceBuiltinData<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ImplSourceBuiltinData(nested={:?})", self.nested)
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceTraitUpcastingData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceTraitUpcastingData(upcast={:?}, vtable_vptr_slot={:?}, nested={:?})",
+ self.upcast_trait_ref, self.vtable_vptr_slot, self.nested
+ )
+ }
+}
+
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceAutoImplData<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceAutoImplData(trait_def_id={:?}, nested={:?})",
+ self.trait_def_id, self.nested
+ )
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceObjectData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceObjectData(upcast={:?}, vtable_base={}, nested={:?})",
+ self.upcast_trait_ref, self.vtable_base, self.nested
+ )
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceFnPointerData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ImplSourceFnPointerData(fn_ty={:?}, nested={:?})", self.fn_ty, self.nested)
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceTraitAliasData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceTraitAliasData(alias_def_id={:?}, substs={:?}, nested={:?})",
+ self.alias_def_id, self.substs, self.nested
+ )
+ }
+}
+
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceConstDestructData<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ImplSourceConstDestructData(nested={:?})", self.nested)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift implementations
+
+TrivialTypeTraversalAndLiftImpls! {
+ super::ImplSourceDiscriminantKindData,
+ super::ImplSourcePointeeData,
+}
diff --git a/compiler/rustc_middle/src/traits/util.rs b/compiler/rustc_middle/src/traits/util.rs
new file mode 100644
index 000000000..d54b8c599
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/util.rs
@@ -0,0 +1,49 @@
+use rustc_data_structures::fx::FxHashSet;
+
+use crate::ty::{PolyTraitRef, TyCtxt};
+
+/// Given a PolyTraitRef, get the PolyTraitRefs of the trait's (transitive) supertraits.
+///
+/// A simplified version of the same function at `rustc_infer::traits::util::supertraits`.
+pub fn supertraits<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: PolyTraitRef<'tcx>,
+) -> impl Iterator<Item = PolyTraitRef<'tcx>> {
+ Elaborator { tcx, visited: FxHashSet::from_iter([trait_ref]), stack: vec![trait_ref] }
+}
+
+struct Elaborator<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ visited: FxHashSet<PolyTraitRef<'tcx>>,
+ stack: Vec<PolyTraitRef<'tcx>>,
+}
+
+impl<'tcx> Elaborator<'tcx> {
+ fn elaborate(&mut self, trait_ref: PolyTraitRef<'tcx>) {
+ let supertrait_refs = self
+ .tcx
+ .super_predicates_of(trait_ref.def_id())
+ .predicates
+ .into_iter()
+ .flat_map(|(pred, _)| {
+ pred.subst_supertrait(self.tcx, &trait_ref).to_opt_poly_trait_pred()
+ })
+ .map(|t| t.map_bound(|pred| pred.trait_ref))
+ .filter(|supertrait_ref| self.visited.insert(*supertrait_ref));
+
+ self.stack.extend(supertrait_refs);
+ }
+}
+
+impl<'tcx> Iterator for Elaborator<'tcx> {
+ type Item = PolyTraitRef<'tcx>;
+
+ fn next(&mut self) -> Option<PolyTraitRef<'tcx>> {
+ if let Some(trait_ref) = self.stack.pop() {
+ self.elaborate(trait_ref);
+ Some(trait_ref)
+ } else {
+ None
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/_match.rs b/compiler/rustc_middle/src/ty/_match.rs
new file mode 100644
index 000000000..e6aab30a1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/_match.rs
@@ -0,0 +1,124 @@
+use crate::ty::error::TypeError;
+use crate::ty::relate::{self, Relate, RelateResult, TypeRelation};
+use crate::ty::{self, InferConst, Ty, TyCtxt};
+
+/// A type "A" *matches* "B" if the fresh types in B could be
+/// substituted with values so as to make it equal to A. Matching is
+/// intended to be used only on freshened types, and it basically
+/// indicates if the non-freshened versions of A and B could have been
+/// unified.
+///
+/// It is only an approximation. If it yields false, unification would
+/// definitely fail, but a true result doesn't mean unification would
+/// succeed. This is because we don't track the "side-constraints" on
+/// type variables, nor do we track if the same freshened type appears
+/// more than once. To some extent these approximations could be
+/// fixed, given effort.
+///
+/// Like subtyping, matching is really a binary relation, so the only
+/// important thing about the result is Ok/Err. Also, matching never
+/// affects any type variables or unification state.
+pub struct Match<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> Match<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Match<'tcx> {
+ Match { tcx, param_env }
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for Match<'tcx> {
+ fn tag(&self) -> &'static str {
+ "Match"
+ }
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+ fn a_is_expected(&self) -> bool {
+ true
+ } // irrelevant
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ _: ty::Variance,
+ _: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ self.relate(a, b)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ Ok(a)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ if a == b {
+ return Ok(a);
+ }
+
+ match (a.kind(), b.kind()) {
+ (
+ _,
+ &ty::Infer(ty::FreshTy(_))
+ | &ty::Infer(ty::FreshIntTy(_))
+ | &ty::Infer(ty::FreshFloatTy(_)),
+ ) => Ok(a),
+
+ (&ty::Infer(_), _) | (_, &ty::Infer(_)) => {
+ Err(TypeError::Sorts(relate::expected_found(self, a, b)))
+ }
+
+ (&ty::Error(_), _) | (_, &ty::Error(_)) => Ok(self.tcx().ty_error()),
+
+ _ => relate::super_relate_tys(self, a, b),
+ }
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ debug!("{}.consts({:?}, {:?})", self.tag(), a, b);
+ if a == b {
+ return Ok(a);
+ }
+
+ match (a.kind(), b.kind()) {
+ (_, ty::ConstKind::Infer(InferConst::Fresh(_))) => {
+ return Ok(a);
+ }
+
+ (ty::ConstKind::Infer(_), _) | (_, ty::ConstKind::Infer(_)) => {
+ return Err(TypeError::ConstMismatch(relate::expected_found(self, a, b)));
+ }
+
+ _ => {}
+ }
+
+ relate::super_relate_consts(self, a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/abstract_const.rs b/compiler/rustc_middle/src/ty/abstract_const.rs
new file mode 100644
index 000000000..bed809930
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/abstract_const.rs
@@ -0,0 +1,194 @@
+//! A subset of a mir body used for const evaluatability checking.
+use crate::mir;
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{self, subst::Subst, DelaySpanBugEmitted, EarlyBinder, SubstsRef, Ty, TyCtxt};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def_id::DefId;
+use std::cmp;
+use std::ops::ControlFlow;
+
+rustc_index::newtype_index! {
+ /// An index into an `AbstractConst`.
+ pub struct NodeId {
+ derive [HashStable]
+ DEBUG_FORMAT = "n{}",
+ }
+}
+
+/// A tree representing an anonymous constant.
+///
+/// This is only able to represent a subset of `MIR`,
+/// and should not leak any information about desugarings.
+#[derive(Debug, Clone, Copy)]
+pub struct AbstractConst<'tcx> {
+ // FIXME: Consider adding something like `IndexSlice`
+ // and use this here.
+ inner: &'tcx [Node<'tcx>],
+ substs: SubstsRef<'tcx>,
+}
+
+impl<'tcx> AbstractConst<'tcx> {
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ uv: ty::Unevaluated<'tcx, ()>,
+ ) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> {
+ let inner = tcx.thir_abstract_const_opt_const_arg(uv.def)?;
+ debug!("AbstractConst::new({:?}) = {:?}", uv, inner);
+ Ok(inner.map(|inner| AbstractConst { inner, substs: tcx.erase_regions(uv.substs) }))
+ }
+
+ pub fn from_const(
+ tcx: TyCtxt<'tcx>,
+ ct: ty::Const<'tcx>,
+ ) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> {
+ match ct.kind() {
+ ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv.shrink()),
+ ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => Err(reported),
+ _ => Ok(None),
+ }
+ }
+
+ #[inline]
+ pub fn subtree(self, node: NodeId) -> AbstractConst<'tcx> {
+ AbstractConst { inner: &self.inner[..=node.index()], substs: self.substs }
+ }
+
+ #[inline]
+ pub fn root(self, tcx: TyCtxt<'tcx>) -> Node<'tcx> {
+ let node = self.inner.last().copied().unwrap();
+ match node {
+ Node::Leaf(leaf) => Node::Leaf(EarlyBinder(leaf).subst(tcx, self.substs)),
+ Node::Cast(kind, operand, ty) => {
+ Node::Cast(kind, operand, EarlyBinder(ty).subst(tcx, self.substs))
+ }
+ // Don't perform substitution on the following as they can't directly contain generic params
+ Node::Binop(_, _, _) | Node::UnaryOp(_, _) | Node::FunctionCall(_, _) => node,
+ }
+ }
+
+ pub fn unify_failure_kind(self, tcx: TyCtxt<'tcx>) -> FailureKind {
+ let mut failure_kind = FailureKind::Concrete;
+ walk_abstract_const::<!, _>(tcx, self, |node| {
+ match node.root(tcx) {
+ Node::Leaf(leaf) => {
+ if leaf.has_infer_types_or_consts() {
+ failure_kind = FailureKind::MentionsInfer;
+ } else if leaf.has_param_types_or_consts() {
+ failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
+ }
+ }
+ Node::Cast(_, _, ty) => {
+ if ty.has_infer_types_or_consts() {
+ failure_kind = FailureKind::MentionsInfer;
+ } else if ty.has_param_types_or_consts() {
+ failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
+ }
+ }
+ Node::Binop(_, _, _) | Node::UnaryOp(_, _) | Node::FunctionCall(_, _) => {}
+ }
+ ControlFlow::CONTINUE
+ });
+ failure_kind
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum CastKind {
+ /// thir::ExprKind::As
+ As,
+ /// thir::ExprKind::Use
+ Use,
+}
+
+/// A node of an `AbstractConst`.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum Node<'tcx> {
+ Leaf(ty::Const<'tcx>),
+ Binop(mir::BinOp, NodeId, NodeId),
+ UnaryOp(mir::UnOp, NodeId),
+ FunctionCall(NodeId, &'tcx [NodeId]),
+ Cast(CastKind, NodeId, Ty<'tcx>),
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum NotConstEvaluatable {
+ Error(ErrorGuaranteed),
+ MentionsInfer,
+ MentionsParam,
+}
+
+impl From<ErrorGuaranteed> for NotConstEvaluatable {
+ fn from(e: ErrorGuaranteed) -> NotConstEvaluatable {
+ NotConstEvaluatable::Error(e)
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ NotConstEvaluatable,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ #[inline]
+ pub fn thir_abstract_const_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<DefId>,
+ ) -> Result<Option<&'tcx [Node<'tcx>]>, ErrorGuaranteed> {
+ if let Some((did, param_did)) = def.as_const_arg() {
+ self.thir_abstract_const_of_const_arg((did, param_did))
+ } else {
+ self.thir_abstract_const(def.did)
+ }
+ }
+}
+
+#[instrument(skip(tcx, f), level = "debug")]
+pub fn walk_abstract_const<'tcx, R, F>(
+ tcx: TyCtxt<'tcx>,
+ ct: AbstractConst<'tcx>,
+ mut f: F,
+) -> ControlFlow<R>
+where
+ F: FnMut(AbstractConst<'tcx>) -> ControlFlow<R>,
+{
+ #[instrument(skip(tcx, f), level = "debug")]
+ fn recurse<'tcx, R>(
+ tcx: TyCtxt<'tcx>,
+ ct: AbstractConst<'tcx>,
+ f: &mut dyn FnMut(AbstractConst<'tcx>) -> ControlFlow<R>,
+ ) -> ControlFlow<R> {
+ f(ct)?;
+ let root = ct.root(tcx);
+ debug!(?root);
+ match root {
+ Node::Leaf(_) => ControlFlow::CONTINUE,
+ Node::Binop(_, l, r) => {
+ recurse(tcx, ct.subtree(l), f)?;
+ recurse(tcx, ct.subtree(r), f)
+ }
+ Node::UnaryOp(_, v) => recurse(tcx, ct.subtree(v), f),
+ Node::FunctionCall(func, args) => {
+ recurse(tcx, ct.subtree(func), f)?;
+ args.iter().try_for_each(|&arg| recurse(tcx, ct.subtree(arg), f))
+ }
+ Node::Cast(_, operand, _) => recurse(tcx, ct.subtree(operand), f),
+ }
+ }
+
+ recurse(tcx, ct, &mut f)
+}
+
+// We were unable to unify the abstract constant with
+// a constant found in the caller bounds, there are
+// now three possible cases here.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub enum FailureKind {
+ /// The abstract const still references an inference
+ /// variable, in this case we return `TooGeneric`.
+ MentionsInfer,
+ /// The abstract const references a generic parameter,
+ /// this means that we emit an error here.
+ MentionsParam,
+ /// The substs are concrete enough that we can simply
+ /// try and evaluate the given constant.
+ Concrete,
+}
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
new file mode 100644
index 000000000..d36cf2fe3
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -0,0 +1,198 @@
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_macros::HashStable;
+use rustc_span::Span;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum PointerCast {
+ /// Go from a fn-item type to a fn-pointer type.
+ ReifyFnPointer,
+
+ /// Go from a safe fn pointer to an unsafe fn pointer.
+ UnsafeFnPointer,
+
+ /// Go from a non-capturing closure to an fn pointer or an unsafe fn pointer.
+ /// It cannot convert a closure that requires unsafe.
+ ClosureFnPointer(hir::Unsafety),
+
+ /// Go from a mut raw pointer to a const raw pointer.
+ MutToConstPointer,
+
+ /// Go from `*const [T; N]` to `*const T`
+ ArrayToPointer,
+
+ /// Unsize a pointer/reference value, e.g., `&[T; n]` to
+ /// `&[T]`. Note that the source could be a thin or fat pointer.
+ /// This will do things like convert thin pointers to fat
+ /// pointers, or convert structs containing thin pointers to
+ /// structs containing fat pointers, or convert between fat
+ /// pointers. We don't store the details of how the transform is
+ /// done (in fact, we don't know that, because it might depend on
+ /// the precise type parameters). We just store the target
+ /// type. Codegen backends and miri figure out what has to be done
+ /// based on the precise source/target type at hand.
+ Unsize,
+}
+
+/// Represents coercing a value to a different type of value.
+///
+/// We transform values by following a number of `Adjust` steps in order.
+/// See the documentation on variants of `Adjust` for more details.
+///
+/// Here are some common scenarios:
+///
+/// 1. The simplest cases are where a pointer is not adjusted fat vs thin.
+/// Here the pointer will be dereferenced N times (where a dereference can
+/// happen to raw or borrowed pointers or any smart pointer which implements
+/// `Deref`, including `Box<_>`). The types of dereferences is given by
+/// `autoderefs`. It can then be auto-referenced zero or one times, indicated
+/// by `autoref`, to either a raw or borrowed pointer. In these cases unsize is
+/// `false`.
+///
+/// 2. A thin-to-fat coercion involves unsizing the underlying data. We start
+/// with a thin pointer, deref a number of times, unsize the underlying data,
+/// then autoref. The 'unsize' phase may change a fixed length array to a
+/// dynamically sized one, a concrete object to a trait object, or statically
+/// sized struct to a dynamically sized one. E.g., `&[i32; 4]` -> `&[i32]` is
+/// represented by:
+///
+/// ```ignore (illustrative)
+/// Deref(None) -> [i32; 4],
+/// Borrow(AutoBorrow::Ref) -> &[i32; 4],
+/// Unsize -> &[i32],
+/// ```
+///
+/// Note that for a struct, the 'deep' unsizing of the struct is not recorded.
+/// E.g., `struct Foo<T> { x: T }` we can coerce `&Foo<[i32; 4]>` to `&Foo<[i32]>`
+/// The autoderef and -ref are the same as in the above example, but the type
+/// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about
+/// the underlying conversions from `[i32; 4]` to `[i32]`.
+///
+/// 3. Coercing a `Box<T>` to `Box<dyn Trait>` is an interesting special case. In
+/// that case, we have the pointer we need coming in, so there are no
+/// autoderefs, and no autoref. Instead we just do the `Unsize` transformation.
+/// At some point, of course, `Box` should move out of the compiler, in which
+/// case this is analogous to transforming a struct. E.g., `Box<[i32; 4]>` ->
+/// `Box<[i32]>` is an `Adjust::Unsize` with the target `Box<[i32]>`.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct Adjustment<'tcx> {
+ pub kind: Adjust<'tcx>,
+ pub target: Ty<'tcx>,
+}
+
+impl<'tcx> Adjustment<'tcx> {
+ pub fn is_region_borrow(&self) -> bool {
+ matches!(self.kind, Adjust::Borrow(AutoBorrow::Ref(..)))
+ }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub enum Adjust<'tcx> {
+ /// Go from ! to any type.
+ NeverToAny,
+
+ /// Dereference once, producing a place.
+ Deref(Option<OverloadedDeref<'tcx>>),
+
+ /// Take the address and produce either a `&` or `*` pointer.
+ Borrow(AutoBorrow<'tcx>),
+
+ Pointer(PointerCast),
+}
+
+/// An overloaded autoderef step, representing a `Deref(Mut)::deref(_mut)`
+/// call, with the signature `&'a T -> &'a U` or `&'a mut T -> &'a mut U`.
+/// The target type is `U` in both cases, with the region and mutability
+/// being those shared by both the receiver and the returned reference.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct OverloadedDeref<'tcx> {
+ pub region: ty::Region<'tcx>,
+ pub mutbl: hir::Mutability,
+ /// The `Span` associated with the field access or method call
+ /// that triggered this overloaded deref.
+ pub span: Span,
+}
+
+impl<'tcx> OverloadedDeref<'tcx> {
+ pub fn method_call(&self, tcx: TyCtxt<'tcx>, source: Ty<'tcx>) -> (DefId, SubstsRef<'tcx>) {
+ let trait_def_id = match self.mutbl {
+ hir::Mutability::Not => tcx.require_lang_item(LangItem::Deref, None),
+ hir::Mutability::Mut => tcx.require_lang_item(LangItem::DerefMut, None),
+ };
+ let method_def_id = tcx
+ .associated_items(trait_def_id)
+ .in_definition_order()
+ .find(|m| m.kind == ty::AssocKind::Fn)
+ .unwrap()
+ .def_id;
+ (method_def_id, tcx.mk_substs_trait(source, &[]))
+ }
+}
+
+/// At least for initial deployment, we want to limit two-phase borrows to
+/// only a few specific cases. Right now, those are mostly "things that desugar"
+/// into method calls:
+/// - using `x.some_method()` syntax, where some_method takes `&mut self`,
+/// - using `Foo::some_method(&mut x, ...)` syntax,
+/// - binary assignment operators (`+=`, `-=`, `*=`, etc.).
+/// Anything else should be rejected until generalized two-phase borrow support
+/// is implemented. Right now, dataflow can't handle the general case where there
+/// is more than one use of a mutable borrow, and we don't want to accept too much
+/// new code via two-phase borrows, so we try to limit where we create two-phase
+/// capable mutable borrows.
+/// See #49434 for tracking.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum AllowTwoPhase {
+ Yes,
+ No,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum AutoBorrowMutability {
+ Mut { allow_two_phase_borrow: AllowTwoPhase },
+ Not,
+}
+
+impl From<AutoBorrowMutability> for hir::Mutability {
+ fn from(m: AutoBorrowMutability) -> Self {
+ match m {
+ AutoBorrowMutability::Mut { .. } => hir::Mutability::Mut,
+ AutoBorrowMutability::Not => hir::Mutability::Not,
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum AutoBorrow<'tcx> {
+ /// Converts from T to &T.
+ Ref(ty::Region<'tcx>, AutoBorrowMutability),
+
+ /// Converts from T to *T.
+ RawPtr(hir::Mutability),
+}
+
+/// Information for `CoerceUnsized` impls, storing information we
+/// have computed about the coercion.
+///
+/// This struct can be obtained via the `coerce_impl_info` query.
+/// Demanding this struct also has the side-effect of reporting errors
+/// for inappropriate impls.
+#[derive(Clone, Copy, TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct CoerceUnsizedInfo {
+ /// If this is a "custom coerce" impl, then what kind of custom
+ /// coercion is it? This applies to impls of `CoerceUnsized` for
+ /// structs, primarily, where we store a bit of info about which
+ /// fields need to be coerced.
+ pub custom_kind: Option<CustomCoerceUnsized>,
+}
+
+#[derive(Clone, Copy, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum CustomCoerceUnsized {
+ /// Records the index of the field being coerced.
+ Struct(usize),
+}
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
new file mode 100644
index 000000000..2e596b275
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -0,0 +1,569 @@
+use crate::mir::interpret::ErrorHandled;
+use crate::ty;
+use crate::ty::util::{Discr, IntTypeExt};
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::intern::Interned;
+use rustc_data_structures::stable_hasher::HashingControls;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::DataTypeKind;
+use rustc_span::symbol::sym;
+use rustc_target::abi::VariantIdx;
+
+use std::cell::RefCell;
+use std::cmp::Ordering;
+use std::hash::{Hash, Hasher};
+use std::ops::Range;
+use std::str;
+
+use super::{
+ Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr,
+};
+
+#[derive(Copy, Clone, HashStable, Debug)]
+pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]);
+
+bitflags! {
+ #[derive(HashStable, TyEncodable, TyDecodable)]
+ pub struct AdtFlags: u32 {
+ const NO_ADT_FLAGS = 0;
+ /// Indicates whether the ADT is an enum.
+ const IS_ENUM = 1 << 0;
+ /// Indicates whether the ADT is a union.
+ const IS_UNION = 1 << 1;
+ /// Indicates whether the ADT is a struct.
+ const IS_STRUCT = 1 << 2;
+ /// Indicates whether the ADT is a struct and has a constructor.
+ const HAS_CTOR = 1 << 3;
+ /// Indicates whether the type is `PhantomData`.
+ const IS_PHANTOM_DATA = 1 << 4;
+ /// Indicates whether the type has a `#[fundamental]` attribute.
+ const IS_FUNDAMENTAL = 1 << 5;
+ /// Indicates whether the type is `Box`.
+ const IS_BOX = 1 << 6;
+ /// Indicates whether the type is `ManuallyDrop`.
+ const IS_MANUALLY_DROP = 1 << 7;
+ /// Indicates whether the variant list of this ADT is `#[non_exhaustive]`.
+ /// (i.e., this flag is never set unless this ADT is an enum).
+ const IS_VARIANT_LIST_NON_EXHAUSTIVE = 1 << 8;
+ /// Indicates whether the type is `UnsafeCell`.
+ const IS_UNSAFE_CELL = 1 << 9;
+ }
+}
+
+/// The definition of a user-defined type, e.g., a `struct`, `enum`, or `union`.
+///
+/// These are all interned (by `alloc_adt_def`) into the global arena.
+///
+/// The initialism *ADT* stands for an [*algebraic data type (ADT)*][adt].
+/// This is slightly wrong because `union`s are not ADTs.
+/// Moreover, Rust only allows recursive data types through indirection.
+///
+/// [adt]: https://en.wikipedia.org/wiki/Algebraic_data_type
+///
+/// # Recursive types
+///
+/// It may seem impossible to represent recursive types using [`Ty`],
+/// since [`TyKind::Adt`] includes [`AdtDef`], which includes its fields,
+/// creating a cycle. However, `AdtDef` does not actually include the *types*
+/// of its fields; it includes just their [`DefId`]s.
+///
+/// [`TyKind::Adt`]: ty::TyKind::Adt
+///
+/// For example, the following type:
+///
+/// ```
+/// struct S { x: Box<S> }
+/// ```
+///
+/// is essentially represented with [`Ty`] as the following pseudocode:
+///
+/// ```ignore (illustrative)
+/// struct S { x }
+/// ```
+///
+/// where `x` here represents the `DefId` of `S.x`. Then, the `DefId`
+/// can be used with [`TyCtxt::type_of()`] to get the type of the field.
+#[derive(TyEncodable, TyDecodable)]
+pub struct AdtDefData {
+ /// The `DefId` of the struct, enum or union item.
+ pub did: DefId,
+ /// Variants of the ADT. If this is a struct or union, then there will be a single variant.
+ variants: IndexVec<VariantIdx, VariantDef>,
+ /// Flags of the ADT (e.g., is this a struct? is this non-exhaustive?).
+ flags: AdtFlags,
+ /// Repr options provided by the user.
+ repr: ReprOptions,
+}
+
+impl PartialOrd for AdtDefData {
+ fn partial_cmp(&self, other: &AdtDefData) -> Option<Ordering> {
+ Some(self.cmp(&other))
+ }
+}
+
+/// There should be only one AdtDef for each `did`, therefore
+/// it is fine to implement `Ord` only based on `did`.
+impl Ord for AdtDefData {
+ fn cmp(&self, other: &AdtDefData) -> Ordering {
+ self.did.cmp(&other.did)
+ }
+}
+
+/// There should be only one AdtDef for each `did`, therefore
+/// it is fine to implement `PartialEq` only based on `did`.
+impl PartialEq for AdtDefData {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.did == other.did
+ }
+}
+
+impl Eq for AdtDefData {}
+
+/// There should be only one AdtDef for each `did`, therefore
+/// it is fine to implement `Hash` only based on `did`.
+impl Hash for AdtDefData {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ self.did.hash(s)
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for AdtDefData {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ thread_local! {
+ static CACHE: RefCell<FxHashMap<(usize, HashingControls), Fingerprint>> = Default::default();
+ }
+
+ let hash: Fingerprint = CACHE.with(|cache| {
+ let addr = self as *const AdtDefData as usize;
+ let hashing_controls = hcx.hashing_controls();
+ *cache.borrow_mut().entry((addr, hashing_controls)).or_insert_with(|| {
+ let ty::AdtDefData { did, ref variants, ref flags, ref repr } = *self;
+
+ let mut hasher = StableHasher::new();
+ did.hash_stable(hcx, &mut hasher);
+ variants.hash_stable(hcx, &mut hasher);
+ flags.hash_stable(hcx, &mut hasher);
+ repr.hash_stable(hcx, &mut hasher);
+
+ hasher.finish()
+ })
+ });
+
+ hash.hash_stable(hcx, hasher);
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, HashStable)]
+#[rustc_pass_by_value]
+pub struct AdtDef<'tcx>(pub Interned<'tcx, AdtDefData>);
+
+impl<'tcx> AdtDef<'tcx> {
+ #[inline]
+ pub fn did(self) -> DefId {
+ self.0.0.did
+ }
+
+ #[inline]
+ pub fn variants(self) -> &'tcx IndexVec<VariantIdx, VariantDef> {
+ &self.0.0.variants
+ }
+
+ #[inline]
+ pub fn variant(self, idx: VariantIdx) -> &'tcx VariantDef {
+ &self.0.0.variants[idx]
+ }
+
+ #[inline]
+ pub fn flags(self) -> AdtFlags {
+ self.0.0.flags
+ }
+
+ #[inline]
+ pub fn repr(self) -> ReprOptions {
+ self.0.0.repr
+ }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
+pub enum AdtKind {
+ Struct,
+ Union,
+ Enum,
+}
+
+impl Into<DataTypeKind> for AdtKind {
+ fn into(self) -> DataTypeKind {
+ match self {
+ AdtKind::Struct => DataTypeKind::Struct,
+ AdtKind::Union => DataTypeKind::Union,
+ AdtKind::Enum => DataTypeKind::Enum,
+ }
+ }
+}
+
+impl AdtDefData {
+ /// Creates a new `AdtDefData`.
+ pub(super) fn new(
+ tcx: TyCtxt<'_>,
+ did: DefId,
+ kind: AdtKind,
+ variants: IndexVec<VariantIdx, VariantDef>,
+ repr: ReprOptions,
+ ) -> Self {
+ debug!("AdtDef::new({:?}, {:?}, {:?}, {:?})", did, kind, variants, repr);
+ let mut flags = AdtFlags::NO_ADT_FLAGS;
+
+ if kind == AdtKind::Enum && tcx.has_attr(did, sym::non_exhaustive) {
+ debug!("found non-exhaustive variant list for {:?}", did);
+ flags = flags | AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE;
+ }
+
+ flags |= match kind {
+ AdtKind::Enum => AdtFlags::IS_ENUM,
+ AdtKind::Union => AdtFlags::IS_UNION,
+ AdtKind::Struct => AdtFlags::IS_STRUCT,
+ };
+
+ if kind == AdtKind::Struct && variants[VariantIdx::new(0)].ctor_def_id.is_some() {
+ flags |= AdtFlags::HAS_CTOR;
+ }
+
+ if tcx.has_attr(did, sym::fundamental) {
+ flags |= AdtFlags::IS_FUNDAMENTAL;
+ }
+ if Some(did) == tcx.lang_items().phantom_data() {
+ flags |= AdtFlags::IS_PHANTOM_DATA;
+ }
+ if Some(did) == tcx.lang_items().owned_box() {
+ flags |= AdtFlags::IS_BOX;
+ }
+ if Some(did) == tcx.lang_items().manually_drop() {
+ flags |= AdtFlags::IS_MANUALLY_DROP;
+ }
+ if Some(did) == tcx.lang_items().unsafe_cell_type() {
+ flags |= AdtFlags::IS_UNSAFE_CELL;
+ }
+
+ AdtDefData { did, variants, flags, repr }
+ }
+}
+
+impl<'tcx> AdtDef<'tcx> {
+ /// Returns `true` if this is a struct.
+ #[inline]
+ pub fn is_struct(self) -> bool {
+ self.flags().contains(AdtFlags::IS_STRUCT)
+ }
+
+ /// Returns `true` if this is a union.
+ #[inline]
+ pub fn is_union(self) -> bool {
+ self.flags().contains(AdtFlags::IS_UNION)
+ }
+
+ /// Returns `true` if this is an enum.
+ #[inline]
+ pub fn is_enum(self) -> bool {
+ self.flags().contains(AdtFlags::IS_ENUM)
+ }
+
+ /// Returns `true` if the variant list of this ADT is `#[non_exhaustive]`.
+ #[inline]
+ pub fn is_variant_list_non_exhaustive(self) -> bool {
+ self.flags().contains(AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE)
+ }
+
+ /// Returns the kind of the ADT.
+ #[inline]
+ pub fn adt_kind(self) -> AdtKind {
+ if self.is_enum() {
+ AdtKind::Enum
+ } else if self.is_union() {
+ AdtKind::Union
+ } else {
+ AdtKind::Struct
+ }
+ }
+
+ /// Returns a description of this abstract data type.
+ pub fn descr(self) -> &'static str {
+ match self.adt_kind() {
+ AdtKind::Struct => "struct",
+ AdtKind::Union => "union",
+ AdtKind::Enum => "enum",
+ }
+ }
+
+ /// Returns a description of a variant of this abstract data type.
+ #[inline]
+ pub fn variant_descr(self) -> &'static str {
+ match self.adt_kind() {
+ AdtKind::Struct => "struct",
+ AdtKind::Union => "union",
+ AdtKind::Enum => "variant",
+ }
+ }
+
+ /// If this function returns `true`, it implies that `is_struct` must return `true`.
+ #[inline]
+ pub fn has_ctor(self) -> bool {
+ self.flags().contains(AdtFlags::HAS_CTOR)
+ }
+
+ /// Returns `true` if this type is `#[fundamental]` for the purposes
+ /// of coherence checking.
+ #[inline]
+ pub fn is_fundamental(self) -> bool {
+ self.flags().contains(AdtFlags::IS_FUNDAMENTAL)
+ }
+
+ /// Returns `true` if this is `PhantomData<T>`.
+ #[inline]
+ pub fn is_phantom_data(self) -> bool {
+ self.flags().contains(AdtFlags::IS_PHANTOM_DATA)
+ }
+
+ /// Returns `true` if this is Box<T>.
+ #[inline]
+ pub fn is_box(self) -> bool {
+ self.flags().contains(AdtFlags::IS_BOX)
+ }
+
+ /// Returns `true` if this is UnsafeCell<T>.
+ #[inline]
+ pub fn is_unsafe_cell(self) -> bool {
+ self.flags().contains(AdtFlags::IS_UNSAFE_CELL)
+ }
+
+ /// Returns `true` if this is `ManuallyDrop<T>`.
+ #[inline]
+ pub fn is_manually_drop(self) -> bool {
+ self.flags().contains(AdtFlags::IS_MANUALLY_DROP)
+ }
+
+ /// Returns `true` if this type has a destructor.
+ pub fn has_dtor(self, tcx: TyCtxt<'tcx>) -> bool {
+ self.destructor(tcx).is_some()
+ }
+
+ pub fn has_non_const_dtor(self, tcx: TyCtxt<'tcx>) -> bool {
+ matches!(self.destructor(tcx), Some(Destructor { constness: hir::Constness::NotConst, .. }))
+ }
+
+ /// Asserts this is a struct or union and returns its unique variant.
+ pub fn non_enum_variant(self) -> &'tcx VariantDef {
+ assert!(self.is_struct() || self.is_union());
+ &self.variant(VariantIdx::new(0))
+ }
+
+ #[inline]
+ pub fn predicates(self, tcx: TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
+ tcx.predicates_of(self.did())
+ }
+
+ /// Returns an iterator over all fields contained
+ /// by this ADT.
+ #[inline]
+ pub fn all_fields(self) -> impl Iterator<Item = &'tcx FieldDef> + Clone {
+ self.variants().iter().flat_map(|v| v.fields.iter())
+ }
+
+ /// Whether the ADT lacks fields. Note that this includes uninhabited enums,
+ /// e.g., `enum Void {}` is considered payload free as well.
+ pub fn is_payloadfree(self) -> bool {
+ // Treat the ADT as not payload-free if arbitrary_enum_discriminant is used (#88621).
+ // This would disallow the following kind of enum from being casted into integer.
+ // ```
+ // enum Enum {
+ // Foo() = 1,
+ // Bar{} = 2,
+ // Baz = 3,
+ // }
+ // ```
+ if self
+ .variants()
+ .iter()
+ .any(|v| matches!(v.discr, VariantDiscr::Explicit(_)) && v.ctor_kind != CtorKind::Const)
+ {
+ return false;
+ }
+ self.variants().iter().all(|v| v.fields.is_empty())
+ }
+
+ /// Return a `VariantDef` given a variant id.
+ pub fn variant_with_id(self, vid: DefId) -> &'tcx VariantDef {
+ self.variants().iter().find(|v| v.def_id == vid).expect("variant_with_id: unknown variant")
+ }
+
+ /// Return a `VariantDef` given a constructor id.
+ pub fn variant_with_ctor_id(self, cid: DefId) -> &'tcx VariantDef {
+ self.variants()
+ .iter()
+ .find(|v| v.ctor_def_id == Some(cid))
+ .expect("variant_with_ctor_id: unknown variant")
+ }
+
+ /// Return the index of `VariantDef` given a variant id.
+ pub fn variant_index_with_id(self, vid: DefId) -> VariantIdx {
+ self.variants()
+ .iter_enumerated()
+ .find(|(_, v)| v.def_id == vid)
+ .expect("variant_index_with_id: unknown variant")
+ .0
+ }
+
+ /// Return the index of `VariantDef` given a constructor id.
+ pub fn variant_index_with_ctor_id(self, cid: DefId) -> VariantIdx {
+ self.variants()
+ .iter_enumerated()
+ .find(|(_, v)| v.ctor_def_id == Some(cid))
+ .expect("variant_index_with_ctor_id: unknown variant")
+ .0
+ }
+
+ pub fn variant_of_res(self, res: Res) -> &'tcx VariantDef {
+ match res {
+ Res::Def(DefKind::Variant, vid) => self.variant_with_id(vid),
+ Res::Def(DefKind::Ctor(..), cid) => self.variant_with_ctor_id(cid),
+ Res::Def(DefKind::Struct, _)
+ | Res::Def(DefKind::Union, _)
+ | Res::Def(DefKind::TyAlias, _)
+ | Res::Def(DefKind::AssocTy, _)
+ | Res::SelfTy { .. }
+ | Res::SelfCtor(..) => self.non_enum_variant(),
+ _ => bug!("unexpected res {:?} in variant_of_res", res),
+ }
+ }
+
+ #[inline]
+ pub fn eval_explicit_discr(self, tcx: TyCtxt<'tcx>, expr_did: DefId) -> Option<Discr<'tcx>> {
+ assert!(self.is_enum());
+ let param_env = tcx.param_env(expr_did);
+ let repr_type = self.repr().discr_type();
+ match tcx.const_eval_poly(expr_did) {
+ Ok(val) => {
+ let ty = repr_type.to_ty(tcx);
+ if let Some(b) = val.try_to_bits_for_ty(tcx, param_env, ty) {
+ trace!("discriminants: {} ({:?})", b, repr_type);
+ Some(Discr { val: b, ty })
+ } else {
+ info!("invalid enum discriminant: {:#?}", val);
+ crate::mir::interpret::struct_error(
+ tcx.at(tcx.def_span(expr_did)),
+ "constant evaluation of enum discriminant resulted in non-integer",
+ )
+ .emit();
+ None
+ }
+ }
+ Err(err) => {
+ let msg = match err {
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {
+ "enum discriminant evaluation failed"
+ }
+ ErrorHandled::TooGeneric => "enum discriminant depends on generics",
+ };
+ tcx.sess.delay_span_bug(tcx.def_span(expr_did), msg);
+ None
+ }
+ }
+ }
+
+ #[inline]
+ pub fn discriminants(
+ self,
+ tcx: TyCtxt<'tcx>,
+ ) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
+ assert!(self.is_enum());
+ let repr_type = self.repr().discr_type();
+ let initial = repr_type.initial_discriminant(tcx);
+ let mut prev_discr = None::<Discr<'tcx>>;
+ self.variants().iter_enumerated().map(move |(i, v)| {
+ let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
+ if let VariantDiscr::Explicit(expr_did) = v.discr {
+ if let Some(new_discr) = self.eval_explicit_discr(tcx, expr_did) {
+ discr = new_discr;
+ }
+ }
+ prev_discr = Some(discr);
+
+ (i, discr)
+ })
+ }
+
+ #[inline]
+ pub fn variant_range(self) -> Range<VariantIdx> {
+ VariantIdx::new(0)..VariantIdx::new(self.variants().len())
+ }
+
+ /// Computes the discriminant value used by a specific variant.
+ /// Unlike `discriminants`, this is (amortized) constant-time,
+ /// only doing at most one query for evaluating an explicit
+ /// discriminant (the last one before the requested variant),
+ /// assuming there are no constant-evaluation errors there.
+ #[inline]
+ pub fn discriminant_for_variant(
+ self,
+ tcx: TyCtxt<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Discr<'tcx> {
+ assert!(self.is_enum());
+ let (val, offset) = self.discriminant_def_for_variant(variant_index);
+ let explicit_value = val
+ .and_then(|expr_did| self.eval_explicit_discr(tcx, expr_did))
+ .unwrap_or_else(|| self.repr().discr_type().initial_discriminant(tcx));
+ explicit_value.checked_add(tcx, offset as u128).0
+ }
+
+ /// Yields a `DefId` for the discriminant and an offset to add to it
+ /// Alternatively, if there is no explicit discriminant, returns the
+ /// inferred discriminant directly.
+ pub fn discriminant_def_for_variant(self, variant_index: VariantIdx) -> (Option<DefId>, u32) {
+ assert!(!self.variants().is_empty());
+ let mut explicit_index = variant_index.as_u32();
+ let expr_did;
+ loop {
+ match self.variant(VariantIdx::from_u32(explicit_index)).discr {
+ ty::VariantDiscr::Relative(0) => {
+ expr_did = None;
+ break;
+ }
+ ty::VariantDiscr::Relative(distance) => {
+ explicit_index -= distance;
+ }
+ ty::VariantDiscr::Explicit(did) => {
+ expr_did = Some(did);
+ break;
+ }
+ }
+ }
+ (expr_did, variant_index.as_u32() - explicit_index)
+ }
+
+ pub fn destructor(self, tcx: TyCtxt<'tcx>) -> Option<Destructor> {
+ tcx.adt_destructor(self.did())
+ }
+
+ /// Returns a list of types such that `Self: Sized` if and only
+ /// if that type is `Sized`, or `TyErr` if this type is recursive.
+ ///
+ /// Oddly enough, checking that the sized-constraint is `Sized` is
+ /// actually more expressive than checking all members:
+ /// the `Sized` trait is inductive, so an associated type that references
+ /// `Self` would prevent its containing ADT from being `Sized`.
+ ///
+ /// Due to normalization being eager, this applies even if
+ /// the associated type is behind a pointer (e.g., issue #31299).
+ pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> ty::EarlyBinder<&'tcx [Ty<'tcx>]> {
+ ty::EarlyBinder(tcx.adt_sized_constraint(self.did()).0)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs
new file mode 100644
index 000000000..c97156ac1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/assoc.rs
@@ -0,0 +1,195 @@
+pub use self::AssocItemContainer::*;
+
+use crate::ty::{self, DefIdTree};
+use rustc_data_structures::sorted_map::SortedIndexMultiMap;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Namespace};
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::{Ident, Symbol};
+
+use super::{TyCtxt, Visibility};
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable, Hash, Encodable, Decodable)]
+pub enum AssocItemContainer {
+ TraitContainer,
+ ImplContainer,
+}
+
+/// Information about an associated item
+#[derive(Copy, Clone, Debug, PartialEq, HashStable, Eq, Hash, Encodable, Decodable)]
+pub struct AssocItem {
+ pub def_id: DefId,
+ pub name: Symbol,
+ pub kind: AssocKind,
+ pub container: AssocItemContainer,
+
+ /// If this is an item in an impl of a trait then this is the `DefId` of
+ /// the associated item on the trait that this implements.
+ pub trait_item_def_id: Option<DefId>,
+
+ /// Whether this is a method with an explicit self
+ /// as its first parameter, allowing method calls.
+ pub fn_has_self_parameter: bool,
+}
+
+impl AssocItem {
+ pub fn ident(&self, tcx: TyCtxt<'_>) -> Ident {
+ Ident::new(self.name, tcx.def_ident_span(self.def_id).unwrap())
+ }
+
+ pub fn defaultness(&self, tcx: TyCtxt<'_>) -> hir::Defaultness {
+ tcx.impl_defaultness(self.def_id)
+ }
+
+ #[inline]
+ pub fn visibility(&self, tcx: TyCtxt<'_>) -> Visibility {
+ tcx.visibility(self.def_id)
+ }
+
+ #[inline]
+ pub fn container_id(&self, tcx: TyCtxt<'_>) -> DefId {
+ tcx.parent(self.def_id)
+ }
+
+ #[inline]
+ pub fn trait_container(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+ match self.container {
+ AssocItemContainer::ImplContainer => None,
+ AssocItemContainer::TraitContainer => Some(tcx.parent(self.def_id)),
+ }
+ }
+
+ #[inline]
+ pub fn impl_container(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+ match self.container {
+ AssocItemContainer::ImplContainer => Some(tcx.parent(self.def_id)),
+ AssocItemContainer::TraitContainer => None,
+ }
+ }
+
+ pub fn signature(&self, tcx: TyCtxt<'_>) -> String {
+ match self.kind {
+ ty::AssocKind::Fn => {
+ // We skip the binder here because the binder would deanonymize all
+ // late-bound regions, and we don't want method signatures to show up
+ // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
+ // regions just fine, showing `fn(&MyType)`.
+ tcx.fn_sig(self.def_id).skip_binder().to_string()
+ }
+ ty::AssocKind::Type => format!("type {};", self.name),
+ ty::AssocKind::Const => {
+ format!("const {}: {:?};", self.name, tcx.type_of(self.def_id))
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable, Eq, Hash, Encodable, Decodable)]
+pub enum AssocKind {
+ Const,
+ Fn,
+ Type,
+}
+
+impl AssocKind {
+ pub fn namespace(&self) -> Namespace {
+ match *self {
+ ty::AssocKind::Type => Namespace::TypeNS,
+ ty::AssocKind::Const | ty::AssocKind::Fn => Namespace::ValueNS,
+ }
+ }
+
+ pub fn as_def_kind(&self) -> DefKind {
+ match self {
+ AssocKind::Const => DefKind::AssocConst,
+ AssocKind::Fn => DefKind::AssocFn,
+ AssocKind::Type => DefKind::AssocTy,
+ }
+ }
+}
+
+impl std::fmt::Display for AssocKind {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ AssocKind::Fn => write!(f, "method"),
+ AssocKind::Const => write!(f, "associated const"),
+ AssocKind::Type => write!(f, "associated type"),
+ }
+ }
+}
+
+/// A list of `ty::AssocItem`s in definition order that allows for efficient lookup by name.
+///
+/// When doing lookup by name, we try to postpone hygienic comparison for as long as possible since
+/// it is relatively expensive. Instead, items are indexed by `Symbol` and hygienic comparison is
+/// done only on items with the same name.
+#[derive(Debug, Clone, PartialEq, HashStable)]
+pub struct AssocItems<'tcx> {
+ pub(super) items: SortedIndexMultiMap<u32, Symbol, &'tcx ty::AssocItem>,
+}
+
+impl<'tcx> AssocItems<'tcx> {
+ /// Constructs an `AssociatedItems` map from a series of `ty::AssocItem`s in definition order.
+ pub fn new(items_in_def_order: impl IntoIterator<Item = &'tcx ty::AssocItem>) -> Self {
+ let items = items_in_def_order.into_iter().map(|item| (item.name, item)).collect();
+ AssocItems { items }
+ }
+
+ /// Returns a slice of associated items in the order they were defined.
+ ///
+ /// New code should avoid relying on definition order. If you need a particular associated item
+ /// for a known trait, make that trait a lang item instead of indexing this array.
+ pub fn in_definition_order(&self) -> impl '_ + Iterator<Item = &ty::AssocItem> {
+ self.items.iter().map(|(_, v)| *v)
+ }
+
+ pub fn len(&self) -> usize {
+ self.items.len()
+ }
+
+ /// Returns an iterator over all associated items with the given name, ignoring hygiene.
+ pub fn filter_by_name_unhygienic(
+ &self,
+ name: Symbol,
+ ) -> impl '_ + Iterator<Item = &ty::AssocItem> {
+ self.items.get_by_key(name).copied()
+ }
+
+ /// Returns the associated item with the given name and `AssocKind`, if one exists.
+ pub fn find_by_name_and_kind(
+ &self,
+ tcx: TyCtxt<'_>,
+ ident: Ident,
+ kind: AssocKind,
+ parent_def_id: DefId,
+ ) -> Option<&ty::AssocItem> {
+ self.filter_by_name_unhygienic(ident.name)
+ .filter(|item| item.kind == kind)
+ .find(|item| tcx.hygienic_eq(ident, item.ident(tcx), parent_def_id))
+ }
+
+ /// Returns the associated item with the given name and any of `AssocKind`, if one exists.
+ pub fn find_by_name_and_kinds(
+ &self,
+ tcx: TyCtxt<'_>,
+ ident: Ident,
+ // Sorted in order of what kinds to look at
+ kinds: &[AssocKind],
+ parent_def_id: DefId,
+ ) -> Option<&ty::AssocItem> {
+ kinds.iter().find_map(|kind| self.find_by_name_and_kind(tcx, ident, *kind, parent_def_id))
+ }
+
+ /// Returns the associated item with the given name in the given `Namespace`, if one exists.
+ pub fn find_by_name_and_namespace(
+ &self,
+ tcx: TyCtxt<'_>,
+ ident: Ident,
+ ns: Namespace,
+ parent_def_id: DefId,
+ ) -> Option<&ty::AssocItem> {
+ self.filter_by_name_unhygienic(ident.name)
+ .filter(|item| item.kind.namespace() == ns)
+ .find(|item| tcx.hygienic_eq(ident, item.ident(tcx), parent_def_id))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/binding.rs b/compiler/rustc_middle/src/ty/binding.rs
new file mode 100644
index 000000000..3d65429f2
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/binding.rs
@@ -0,0 +1,22 @@
+use rustc_hir::BindingAnnotation;
+use rustc_hir::BindingAnnotation::*;
+use rustc_hir::Mutability;
+
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Debug, Copy, HashStable)]
+pub enum BindingMode {
+ BindByReference(Mutability),
+ BindByValue(Mutability),
+}
+
+TrivialTypeTraversalAndLiftImpls! { BindingMode, }
+
+impl BindingMode {
+ pub fn convert(ba: BindingAnnotation) -> BindingMode {
+ match ba {
+ Unannotated => BindingMode::BindByValue(Mutability::Not),
+ Mutable => BindingMode::BindByValue(Mutability::Mut),
+ Ref => BindingMode::BindByReference(Mutability::Not),
+ RefMut => BindingMode::BindByReference(Mutability::Mut),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/cast.rs b/compiler/rustc_middle/src/ty/cast.rs
new file mode 100644
index 000000000..c4b743dd4
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/cast.rs
@@ -0,0 +1,73 @@
+// Helpers for handling cast expressions, used in both
+// typeck and codegen.
+
+use crate::ty::{self, Ty};
+
+use rustc_macros::HashStable;
+
+/// Types that are represented as ints.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum IntTy {
+ U(ty::UintTy),
+ I,
+ CEnum,
+ Bool,
+ Char,
+}
+
+impl IntTy {
+ pub fn is_signed(self) -> bool {
+ matches!(self, Self::I)
+ }
+}
+
+// Valid types for the result of a non-coercion cast
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CastTy<'tcx> {
+ /// Various types that are represented as ints and handled mostly
+ /// in the same way, merged for easier matching.
+ Int(IntTy),
+ /// Floating-point types.
+ Float,
+ /// Function pointers.
+ FnPtr,
+ /// Raw pointers.
+ Ptr(ty::TypeAndMut<'tcx>),
+}
+
+/// Cast Kind. See [RFC 401](https://rust-lang.github.io/rfcs/0401-coercions.html)
+/// (or librustc_typeck/check/cast.rs).
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum CastKind {
+ CoercionCast,
+ PtrPtrCast,
+ PtrAddrCast,
+ AddrPtrCast,
+ NumericCast,
+ EnumCast,
+ PrimIntCast,
+ U8CharCast,
+ ArrayPtrCast,
+ FnPtrPtrCast,
+ FnPtrAddrCast,
+}
+
+impl<'tcx> CastTy<'tcx> {
+ /// Returns `Some` for integral/pointer casts.
+ /// Casts like unsizing casts will return `None`.
+ pub fn from_ty(t: Ty<'tcx>) -> Option<CastTy<'tcx>> {
+ match *t.kind() {
+ ty::Bool => Some(CastTy::Int(IntTy::Bool)),
+ ty::Char => Some(CastTy::Int(IntTy::Char)),
+ ty::Int(_) => Some(CastTy::Int(IntTy::I)),
+ ty::Infer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::I)),
+ ty::Infer(ty::InferTy::FloatVar(_)) => Some(CastTy::Float),
+ ty::Uint(u) => Some(CastTy::Int(IntTy::U(u))),
+ ty::Float(_) => Some(CastTy::Float),
+ ty::Adt(d, _) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)),
+ ty::RawPtr(mt) => Some(CastTy::Ptr(mt)),
+ ty::FnPtr(..) => Some(CastTy::FnPtr),
+ _ => None,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/closure.rs b/compiler/rustc_middle/src/ty/closure.rs
new file mode 100644
index 000000000..0d6c26a58
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/closure.rs
@@ -0,0 +1,454 @@
+use crate::hir::place::{
+ Place as HirPlace, PlaceBase as HirPlaceBase, ProjectionKind as HirProjectionKind,
+};
+use crate::{mir, ty};
+
+use std::fmt::Write;
+
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_span::{Span, Symbol};
+
+use super::{Ty, TyCtxt};
+
+use self::BorrowKind::*;
+
+// Captures are represented using fields inside a structure.
+// This represents accessing self in the closure structure
+pub const CAPTURE_STRUCT_LOCAL: mir::Local = mir::Local::from_u32(1);
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct UpvarPath {
+ pub hir_id: hir::HirId,
+}
+
+/// Upvars do not get their own `NodeId`. Instead, we use the pair of
+/// the original var ID (that is, the root variable that is referenced
+/// by the upvar) and the ID of the closure expression.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct UpvarId {
+ pub var_path: UpvarPath,
+ pub closure_expr_id: LocalDefId,
+}
+
+impl UpvarId {
+ pub fn new(var_hir_id: hir::HirId, closure_def_id: LocalDefId) -> UpvarId {
+ UpvarId { var_path: UpvarPath { hir_id: var_hir_id }, closure_expr_id: closure_def_id }
+ }
+}
+
+/// Information describing the capture of an upvar. This is computed
+/// during `typeck`, specifically by `regionck`.
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum UpvarCapture {
+ /// Upvar is captured by value. This is always true when the
+ /// closure is labeled `move`, but can also be true in other cases
+ /// depending on inference.
+ ByValue,
+
+ /// Upvar is captured by reference.
+ ByRef(BorrowKind),
+}
+
+pub type UpvarListMap = FxHashMap<DefId, FxIndexMap<hir::HirId, UpvarId>>;
+pub type UpvarCaptureMap = FxHashMap<UpvarId, UpvarCapture>;
+
+/// Given the closure DefId this map provides a map of root variables to minimum
+/// set of `CapturedPlace`s that need to be tracked to support all captures of that closure.
+pub type MinCaptureInformationMap<'tcx> = FxHashMap<LocalDefId, RootVariableMinCaptureList<'tcx>>;
+
+/// Part of `MinCaptureInformationMap`; Maps a root variable to the list of `CapturedPlace`.
+/// Used to track the minimum set of `Place`s that need to be captured to support all
+/// Places captured by the closure starting at a given root variable.
+///
+/// This provides a convenient and quick way of checking if a variable being used within
+/// a closure is a capture of a local variable.
+pub type RootVariableMinCaptureList<'tcx> = FxIndexMap<hir::HirId, MinCaptureList<'tcx>>;
+
+/// Part of `MinCaptureInformationMap`; List of `CapturePlace`s.
+pub type MinCaptureList<'tcx> = Vec<CapturedPlace<'tcx>>;
+
+/// Represents the various closure traits in the language. This
+/// will determine the type of the environment (`self`, in the
+/// desugaring) argument that the closure expects.
+///
+/// You can get the environment type of a closure using
+/// `tcx.closure_env_ty()`.
+#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum ClosureKind {
+ // Warning: Ordering is significant here! The ordering is chosen
+ // because the trait Fn is a subtrait of FnMut and so in turn, and
+ // hence we order it so that Fn < FnMut < FnOnce.
+ Fn,
+ FnMut,
+ FnOnce,
+}
+
+impl<'tcx> ClosureKind {
+ // This is the initial value used when doing upvar inference.
+ pub const LATTICE_BOTTOM: ClosureKind = ClosureKind::Fn;
+
+ /// Returns `true` if a type that impls this closure kind
+ /// must also implement `other`.
+ pub fn extends(self, other: ty::ClosureKind) -> bool {
+ matches!(
+ (self, other),
+ (ClosureKind::Fn, ClosureKind::Fn)
+ | (ClosureKind::Fn, ClosureKind::FnMut)
+ | (ClosureKind::Fn, ClosureKind::FnOnce)
+ | (ClosureKind::FnMut, ClosureKind::FnMut)
+ | (ClosureKind::FnMut, ClosureKind::FnOnce)
+ | (ClosureKind::FnOnce, ClosureKind::FnOnce)
+ )
+ }
+
+ /// Returns the representative scalar type for this closure kind.
+ /// See `Ty::to_opt_closure_kind` for more details.
+ pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match self {
+ ClosureKind::Fn => tcx.types.i8,
+ ClosureKind::FnMut => tcx.types.i16,
+ ClosureKind::FnOnce => tcx.types.i32,
+ }
+ }
+
+ pub fn from_def_id(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ClosureKind> {
+ if Some(def_id) == tcx.lang_items().fn_once_trait() {
+ Some(ClosureKind::FnOnce)
+ } else if Some(def_id) == tcx.lang_items().fn_mut_trait() {
+ Some(ClosureKind::FnMut)
+ } else if Some(def_id) == tcx.lang_items().fn_trait() {
+ Some(ClosureKind::Fn)
+ } else {
+ None
+ }
+ }
+
+ pub fn to_def_id(&self, tcx: TyCtxt<'_>) -> DefId {
+ match self {
+ ClosureKind::Fn => tcx.lang_items().fn_once_trait().unwrap(),
+ ClosureKind::FnMut => tcx.lang_items().fn_mut_trait().unwrap(),
+ ClosureKind::FnOnce => tcx.lang_items().fn_trait().unwrap(),
+ }
+ }
+}
+
+/// A composite describing a `Place` that is captured by a closure.
+#[derive(PartialEq, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct CapturedPlace<'tcx> {
+ /// The `Place` that is captured.
+ pub place: HirPlace<'tcx>,
+
+ /// `CaptureKind` and expression(s) that resulted in such capture of `place`.
+ pub info: CaptureInfo,
+
+ /// Represents if `place` can be mutated or not.
+ pub mutability: hir::Mutability,
+
+ /// Region of the resulting reference if the upvar is captured by ref.
+ pub region: Option<ty::Region<'tcx>>,
+}
+
+impl<'tcx> CapturedPlace<'tcx> {
+ pub fn to_string(&self, tcx: TyCtxt<'tcx>) -> String {
+ place_to_string_for_capture(tcx, &self.place)
+ }
+
+ /// Returns a symbol of the captured upvar, which looks like `name__field1__field2`.
+ fn to_symbol(&self, tcx: TyCtxt<'tcx>) -> Symbol {
+ let hir_id = match self.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected an upvar, found {:?}", base),
+ };
+ let mut symbol = tcx.hir().name(hir_id).as_str().to_string();
+
+ let mut ty = self.place.base_ty;
+ for proj in self.place.projections.iter() {
+ match proj.kind {
+ HirProjectionKind::Field(idx, variant) => match ty.kind() {
+ ty::Tuple(_) => write!(&mut symbol, "__{}", idx).unwrap(),
+ ty::Adt(def, ..) => {
+ write!(
+ &mut symbol,
+ "__{}",
+ def.variant(variant).fields[idx as usize].name.as_str(),
+ )
+ .unwrap();
+ }
+ ty => {
+ span_bug!(
+ self.get_capture_kind_span(tcx),
+ "Unexpected type {:?} for `Field` projection",
+ ty
+ )
+ }
+ },
+
+ // Ignore derefs for now, as they are likely caused by
+ // autoderefs that don't appear in the original code.
+ HirProjectionKind::Deref => {}
+ proj => bug!("Unexpected projection {:?} in captured place", proj),
+ }
+ ty = proj.ty;
+ }
+
+ Symbol::intern(&symbol)
+ }
+
+ /// Returns the hir-id of the root variable for the captured place.
+ /// e.g., if `a.b.c` was captured, would return the hir-id for `a`.
+ pub fn get_root_variable(&self) -> hir::HirId {
+ match self.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected upvar, found={:?}", base),
+ }
+ }
+
+ /// Returns the `LocalDefId` of the closure that captured this Place
+ pub fn get_closure_local_def_id(&self) -> LocalDefId {
+ match self.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.closure_expr_id,
+ base => bug!("expected upvar, found={:?}", base),
+ }
+ }
+
+ /// Return span pointing to use that resulted in selecting the captured path
+ pub fn get_path_span(&self, tcx: TyCtxt<'tcx>) -> Span {
+ if let Some(path_expr_id) = self.info.path_expr_id {
+ tcx.hir().span(path_expr_id)
+ } else if let Some(capture_kind_expr_id) = self.info.capture_kind_expr_id {
+ tcx.hir().span(capture_kind_expr_id)
+ } else {
+ // Fallback on upvars mentioned if neither path or capture expr id is captured
+
+ // Safe to unwrap since we know this place is captured by the closure, therefore the closure must have upvars.
+ tcx.upvars_mentioned(self.get_closure_local_def_id()).unwrap()
+ [&self.get_root_variable()]
+ .span
+ }
+ }
+
+ /// Return span pointing to use that resulted in selecting the current capture kind
+ pub fn get_capture_kind_span(&self, tcx: TyCtxt<'tcx>) -> Span {
+ if let Some(capture_kind_expr_id) = self.info.capture_kind_expr_id {
+ tcx.hir().span(capture_kind_expr_id)
+ } else if let Some(path_expr_id) = self.info.path_expr_id {
+ tcx.hir().span(path_expr_id)
+ } else {
+ // Fallback on upvars mentioned if neither path or capture expr id is captured
+
+ // Safe to unwrap since we know this place is captured by the closure, therefore the closure must have upvars.
+ tcx.upvars_mentioned(self.get_closure_local_def_id()).unwrap()
+ [&self.get_root_variable()]
+ .span
+ }
+ }
+}
+
+fn symbols_for_closure_captures<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: (LocalDefId, LocalDefId),
+) -> Vec<Symbol> {
+ let typeck_results = tcx.typeck(def_id.0);
+ let captures = typeck_results.closure_min_captures_flattened(def_id.1);
+ captures.into_iter().map(|captured_place| captured_place.to_symbol(tcx)).collect()
+}
+
+/// Return true if the `proj_possible_ancestor` represents an ancestor path
+/// to `proj_capture` or `proj_possible_ancestor` is same as `proj_capture`,
+/// assuming they both start off of the same root variable.
+///
+/// **Note:** It's the caller's responsibility to ensure that both lists of projections
+/// start off of the same root variable.
+///
+/// Eg: 1. `foo.x` which is represented using `projections=[Field(x)]` is an ancestor of
+/// `foo.x.y` which is represented using `projections=[Field(x), Field(y)]`.
+/// Note both `foo.x` and `foo.x.y` start off of the same root variable `foo`.
+/// 2. Since we only look at the projections here function will return `bar.x` as an a valid
+/// ancestor of `foo.x.y`. It's the caller's responsibility to ensure that both projections
+/// list are being applied to the same root variable.
+pub fn is_ancestor_or_same_capture(
+ proj_possible_ancestor: &[HirProjectionKind],
+ proj_capture: &[HirProjectionKind],
+) -> bool {
+ // We want to make sure `is_ancestor_or_same_capture("x.0.0", "x.0")` to return false.
+ // Therefore we can't just check if all projections are same in the zipped iterator below.
+ if proj_possible_ancestor.len() > proj_capture.len() {
+ return false;
+ }
+
+ proj_possible_ancestor.iter().zip(proj_capture).all(|(a, b)| a == b)
+}
+
+/// Part of `MinCaptureInformationMap`; describes the capture kind (&, &mut, move)
+/// for a particular capture as well as identifying the part of the source code
+/// that triggered this capture to occur.
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct CaptureInfo {
+ /// Expr Id pointing to use that resulted in selecting the current capture kind
+ ///
+ /// Eg:
+ /// ```rust,no_run
+ /// let mut t = (0,1);
+ ///
+ /// let c = || {
+ /// println!("{t:?}"); // L1
+ /// t.1 = 4; // L2
+ /// };
+ /// ```
+ /// `capture_kind_expr_id` will point to the use on L2 and `path_expr_id` will point to the
+ /// use on L1.
+ ///
+ /// If the user doesn't enable feature `capture_disjoint_fields` (RFC 2229) then, it is
+ /// possible that we don't see the use of a particular place resulting in capture_kind_expr_id being
+ /// None. In such case we fallback on uvpars_mentioned for span.
+ ///
+ /// Eg:
+ /// ```rust,no_run
+ /// let x = 5;
+ ///
+ /// let c = || {
+ /// let _ = x;
+ /// };
+ /// ```
+ ///
+ /// In this example, if `capture_disjoint_fields` is **not** set, then x will be captured,
+ /// but we won't see it being used during capture analysis, since it's essentially a discard.
+ pub capture_kind_expr_id: Option<hir::HirId>,
+ /// Expr Id pointing to use that resulted the corresponding place being captured
+ ///
+ /// See `capture_kind_expr_id` for example.
+ ///
+ pub path_expr_id: Option<hir::HirId>,
+
+ /// Capture mode that was selected
+ pub capture_kind: UpvarCapture,
+}
+
+pub fn place_to_string_for_capture<'tcx>(tcx: TyCtxt<'tcx>, place: &HirPlace<'tcx>) -> String {
+ let mut curr_string: String = match place.base {
+ HirPlaceBase::Upvar(upvar_id) => tcx.hir().name(upvar_id.var_path.hir_id).to_string(),
+ _ => bug!("Capture_information should only contain upvars"),
+ };
+
+ for (i, proj) in place.projections.iter().enumerate() {
+ match proj.kind {
+ HirProjectionKind::Deref => {
+ curr_string = format!("*{}", curr_string);
+ }
+ HirProjectionKind::Field(idx, variant) => match place.ty_before_projection(i).kind() {
+ ty::Adt(def, ..) => {
+ curr_string = format!(
+ "{}.{}",
+ curr_string,
+ def.variant(variant).fields[idx as usize].name.as_str()
+ );
+ }
+ ty::Tuple(_) => {
+ curr_string = format!("{}.{}", curr_string, idx);
+ }
+ _ => {
+ bug!(
+ "Field projection applied to a type other than Adt or Tuple: {:?}.",
+ place.ty_before_projection(i).kind()
+ )
+ }
+ },
+ proj => bug!("{:?} unexpected because it isn't captured", proj),
+ }
+ }
+
+ curr_string
+}
+
+#[derive(Clone, PartialEq, Debug, TyEncodable, TyDecodable, Copy, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum BorrowKind {
+ /// Data must be immutable and is aliasable.
+ ImmBorrow,
+
+ /// Data must be immutable but not aliasable. This kind of borrow
+ /// cannot currently be expressed by the user and is used only in
+ /// implicit closure bindings. It is needed when the closure
+ /// is borrowing or mutating a mutable referent, e.g.:
+ ///
+ /// ```
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = || *x += 5;
+ /// ```
+ ///
+ /// If we were to try to translate this closure into a more explicit
+ /// form, we'd encounter an error with the code as written:
+ ///
+ /// ```compile_fail,E0594
+ /// struct Env<'a> { x: &'a &'a mut isize }
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = (&mut Env { x: &x }, fn_ptr); // Closure is pair of env and fn
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ ///
+ /// This is then illegal because you cannot mutate a `&mut` found
+ /// in an aliasable location. To solve, you'd have to translate with
+ /// an `&mut` borrow:
+ ///
+ /// ```compile_fail,E0596
+ /// struct Env<'a> { x: &'a mut &'a mut isize }
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = (&mut Env { x: &mut x }, fn_ptr); // changed from &x to &mut x
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ ///
+ /// Now the assignment to `**env.x` is legal, but creating a
+ /// mutable pointer to `x` is not because `x` is not mutable. We
+ /// could fix this by declaring `x` as `let mut x`. This is ok in
+ /// user code, if awkward, but extra weird for closures, since the
+ /// borrow is hidden.
+ ///
+ /// So we introduce a "unique imm" borrow -- the referent is
+ /// immutable, but not aliasable. This solves the problem. For
+ /// simplicity, we don't give users the way to express this
+ /// borrow, it's just used when translating closures.
+ UniqueImmBorrow,
+
+ /// Data is mutable and not aliasable.
+ MutBorrow,
+}
+
+impl BorrowKind {
+ pub fn from_mutbl(m: hir::Mutability) -> BorrowKind {
+ match m {
+ hir::Mutability::Mut => MutBorrow,
+ hir::Mutability::Not => ImmBorrow,
+ }
+ }
+
+ /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
+ /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
+ /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
+ /// question.
+ pub fn to_mutbl_lossy(self) -> hir::Mutability {
+ match self {
+ MutBorrow => hir::Mutability::Mut,
+ ImmBorrow => hir::Mutability::Not,
+
+ // We have no type corresponding to a unique imm borrow, so
+ // use `&mut`. It gives all the capabilities of a `&uniq`
+ // and hence is a safe "over approximation".
+ UniqueImmBorrow => hir::Mutability::Mut,
+ }
+ }
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { symbols_for_closure_captures, ..*providers }
+}
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
new file mode 100644
index 000000000..51137c526
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -0,0 +1,527 @@
+//! This module contains some shared code for encoding and decoding various
+//! things from the `ty` module, and in particular implements support for
+//! "shorthands" which allow to have pointers back into the already encoded
+//! stream instead of re-encoding the same thing twice.
+//!
+//! The functionality in here is shared between persisting to crate metadata and
+//! persisting to incr. comp. caches.
+
+use crate::arena::ArenaAllocatable;
+use crate::infer::canonical::{CanonicalVarInfo, CanonicalVarInfos};
+use crate::mir::{
+ self,
+ interpret::{AllocId, ConstAllocation},
+};
+use crate::traits;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, AdtDef, Ty};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::TyCtxt;
+use rustc_serialize::{Decodable, Encodable};
+use rustc_span::Span;
+pub use rustc_type_ir::{TyDecoder, TyEncoder};
+use std::hash::Hash;
+use std::intrinsics;
+use std::marker::DiscriminantKind;
+
+/// The shorthand encoding uses an enum's variant index `usize`
+/// and is offset by this value so it never matches a real variant.
+/// This offset is also chosen so that the first byte is never < 0x80.
+pub const SHORTHAND_OFFSET: usize = 0x80;
+
+pub trait EncodableWithShorthand<E: TyEncoder>: Copy + Eq + Hash {
+ type Variant: Encodable<E>;
+ fn variant(&self) -> &Self::Variant;
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> EncodableWithShorthand<E> for Ty<'tcx> {
+ type Variant = ty::TyKind<'tcx>;
+
+ #[inline]
+ fn variant(&self) -> &Self::Variant {
+ self.kind()
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> EncodableWithShorthand<E> for ty::PredicateKind<'tcx> {
+ type Variant = ty::PredicateKind<'tcx>;
+
+ #[inline]
+ fn variant(&self) -> &Self::Variant {
+ self
+ }
+}
+
+/// Trait for decoding to a reference.
+///
+/// This is a separate trait from `Decodable` so that we can implement it for
+/// upstream types, such as `FxHashSet`.
+///
+/// The `TyDecodable` derive macro will use this trait for fields that are
+/// references (and don't use a type alias to hide that).
+///
+/// `Decodable` can still be implemented in cases where `Decodable` is required
+/// by a trait bound.
+pub trait RefDecodable<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> {
+ fn decode(d: &mut D) -> &'tcx Self;
+}
+
+/// Encode the given value or a previously cached shorthand.
+pub fn encode_with_shorthand<'tcx, E, T, M>(encoder: &mut E, value: &T, cache: M)
+where
+ E: TyEncoder<I = TyCtxt<'tcx>>,
+ M: for<'b> Fn(&'b mut E) -> &'b mut FxHashMap<T, usize>,
+ T: EncodableWithShorthand<E>,
+ // The discriminant and shorthand must have the same size.
+ T::Variant: DiscriminantKind<Discriminant = isize>,
+{
+ let existing_shorthand = cache(encoder).get(value).copied();
+ if let Some(shorthand) = existing_shorthand {
+ encoder.emit_usize(shorthand);
+ return;
+ }
+
+ let variant = value.variant();
+
+ let start = encoder.position();
+ variant.encode(encoder);
+ let len = encoder.position() - start;
+
+ // The shorthand encoding uses the same usize as the
+ // discriminant, with an offset so they can't conflict.
+ let discriminant = intrinsics::discriminant_value(variant);
+ assert!(SHORTHAND_OFFSET > discriminant as usize);
+
+ let shorthand = start + SHORTHAND_OFFSET;
+
+ // Get the number of bits that leb128 could fit
+ // in the same space as the fully encoded type.
+ let leb128_bits = len * 7;
+
+ // Check that the shorthand is a not longer than the
+ // full encoding itself, i.e., it's an obvious win.
+ if leb128_bits >= 64 || (shorthand as u64) < (1 << leb128_bits) {
+ cache(encoder).insert(*value, shorthand);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for Ty<'tcx> {
+ fn encode(&self, e: &mut E) {
+ encode_with_shorthand(e, self, TyEncoder::type_shorthands);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E>
+ for ty::Binder<'tcx, ty::PredicateKind<'tcx>>
+{
+ fn encode(&self, e: &mut E) {
+ self.bound_vars().encode(e);
+ encode_with_shorthand(e, &self.skip_binder(), TyEncoder::predicate_shorthands);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::Predicate<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.kind().encode(e);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::Region<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.kind().encode(e);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::Const<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.0.0.encode(e);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ConstAllocation<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.inner().encode(e)
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for AdtDef<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.0.0.encode(e)
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for AllocId {
+ fn encode(&self, e: &mut E) {
+ e.encode_alloc_id(self)
+ }
+}
+
+#[inline]
+fn decode_arena_allocable<
+ 'tcx,
+ D: TyDecoder<I = TyCtxt<'tcx>>,
+ T: ArenaAllocatable<'tcx> + Decodable<D>,
+>(
+ decoder: &mut D,
+) -> &'tcx T
+where
+ D: TyDecoder,
+{
+ decoder.interner().arena.alloc(Decodable::decode(decoder))
+}
+
+#[inline]
+fn decode_arena_allocable_slice<
+ 'tcx,
+ D: TyDecoder<I = TyCtxt<'tcx>>,
+ T: ArenaAllocatable<'tcx> + Decodable<D>,
+>(
+ decoder: &mut D,
+) -> &'tcx [T]
+where
+ D: TyDecoder,
+{
+ decoder.interner().arena.alloc_from_iter(<Vec<T> as Decodable<D>>::decode(decoder))
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for Ty<'tcx> {
+ #[allow(rustc::usage_of_ty_tykind)]
+ fn decode(decoder: &mut D) -> Ty<'tcx> {
+ // Handle shorthands first, if we have a usize > 0x80.
+ if decoder.positioned_at_shorthand() {
+ let pos = decoder.read_usize();
+ assert!(pos >= SHORTHAND_OFFSET);
+ let shorthand = pos - SHORTHAND_OFFSET;
+
+ decoder.cached_ty_for_shorthand(shorthand, |decoder| {
+ decoder.with_position(shorthand, Ty::decode)
+ })
+ } else {
+ let tcx = decoder.interner();
+ tcx.mk_ty(rustc_type_ir::TyKind::decode(decoder))
+ }
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D>
+ for ty::Binder<'tcx, ty::PredicateKind<'tcx>>
+{
+ fn decode(decoder: &mut D) -> ty::Binder<'tcx, ty::PredicateKind<'tcx>> {
+ let bound_vars = Decodable::decode(decoder);
+ // Handle shorthands first, if we have a usize > 0x80.
+ ty::Binder::bind_with_vars(
+ if decoder.positioned_at_shorthand() {
+ let pos = decoder.read_usize();
+ assert!(pos >= SHORTHAND_OFFSET);
+ let shorthand = pos - SHORTHAND_OFFSET;
+
+ decoder.with_position(shorthand, ty::PredicateKind::decode)
+ } else {
+ ty::PredicateKind::decode(decoder)
+ },
+ bound_vars,
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Predicate<'tcx> {
+ fn decode(decoder: &mut D) -> ty::Predicate<'tcx> {
+ let predicate_kind = Decodable::decode(decoder);
+ decoder.interner().mk_predicate(predicate_kind)
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for SubstsRef<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ let len = decoder.read_usize();
+ let tcx = decoder.interner();
+ tcx.mk_substs(
+ (0..len).map::<ty::subst::GenericArg<'tcx>, _>(|_| Decodable::decode(decoder)),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for mir::Place<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ let local: mir::Local = Decodable::decode(decoder);
+ let len = decoder.read_usize();
+ let projection = decoder.interner().mk_place_elems(
+ (0..len).map::<mir::PlaceElem<'tcx>, _>(|_| Decodable::decode(decoder)),
+ );
+ mir::Place { local, projection }
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Region<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.interner().mk_region(Decodable::decode(decoder))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for CanonicalVarInfos<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ let len = decoder.read_usize();
+ let interned: Vec<CanonicalVarInfo<'tcx>> =
+ (0..len).map(|_| Decodable::decode(decoder)).collect();
+ decoder.interner().intern_canonical_var_infos(interned.as_slice())
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for AllocId {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.decode_alloc_id()
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::SymbolName<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ ty::SymbolName::new(decoder.interner(), &decoder.read_str())
+ }
+}
+
+macro_rules! impl_decodable_via_ref {
+ ($($t:ty),+) => {
+ $(impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for $t {
+ fn decode(decoder: &mut D) -> Self {
+ RefDecodable::decode(decoder)
+ }
+ })*
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for ty::List<Ty<'tcx>> {
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ let len = decoder.read_usize();
+ decoder.interner().mk_type_list((0..len).map::<Ty<'tcx>, _>(|_| Decodable::decode(decoder)))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ let len = decoder.read_usize();
+ decoder.interner().mk_poly_existential_predicates(
+ (0..len).map::<ty::Binder<'tcx, _>, _>(|_| Decodable::decode(decoder)),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Const<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.interner().mk_const(Decodable::decode(decoder))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [ty::ValTree<'tcx>] {
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(
+ (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ConstAllocation<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.interner().intern_const_alloc(Decodable::decode(decoder))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for AdtDef<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.interner().intern_adt_def(Decodable::decode(decoder))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for [(ty::Predicate<'tcx>, Span)]
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(
+ (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for [ty::abstract_const::Node<'tcx>]
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(
+ (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for [ty::abstract_const::NodeId]
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(
+ (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for ty::List<ty::BoundVariableKind>
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ let len = decoder.read_usize();
+ decoder.interner().mk_bound_variable_kinds(
+ (0..len).map::<ty::BoundVariableKind, _>(|_| Decodable::decode(decoder)),
+ )
+ }
+}
+
+impl_decodable_via_ref! {
+ &'tcx ty::TypeckResults<'tcx>,
+ &'tcx ty::List<Ty<'tcx>>,
+ &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ &'tcx traits::ImplSource<'tcx, ()>,
+ &'tcx mir::Body<'tcx>,
+ &'tcx mir::UnsafetyCheckResult,
+ &'tcx mir::BorrowCheckResult<'tcx>,
+ &'tcx mir::coverage::CodeRegion,
+ &'tcx ty::List<ty::BoundVariableKind>
+}
+
+#[macro_export]
+macro_rules! __impl_decoder_methods {
+ ($($name:ident -> $ty:ty;)*) => {
+ $(
+ #[inline]
+ fn $name(&mut self) -> $ty {
+ self.opaque.$name()
+ }
+ )*
+ }
+}
+
+macro_rules! impl_arena_allocatable_decoder {
+ ([]$args:tt) => {};
+ ([decode $(, $attrs:ident)*]
+ [$name:ident: $ty:ty]) => {
+ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for $ty {
+ #[inline]
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decode_arena_allocable(decoder)
+ }
+ }
+
+ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [$ty] {
+ #[inline]
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decode_arena_allocable_slice(decoder)
+ }
+ }
+ };
+}
+
+macro_rules! impl_arena_allocatable_decoders {
+ ([$($a:tt $name:ident: $ty:ty,)*]) => {
+ $(
+ impl_arena_allocatable_decoder!($a [$name: $ty]);
+ )*
+ }
+}
+
+rustc_hir::arena_types!(impl_arena_allocatable_decoders);
+arena_types!(impl_arena_allocatable_decoders);
+
+macro_rules! impl_arena_copy_decoder {
+ (<$tcx:tt> $($ty:ty,)*) => {
+ $(impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for $ty {
+ #[inline]
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc(Decodable::decode(decoder))
+ }
+ }
+
+ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [$ty] {
+ #[inline]
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(<Vec<_> as Decodable<D>>::decode(decoder))
+ }
+ })*
+ };
+}
+
+impl_arena_copy_decoder! {<'tcx>
+ Span,
+ rustc_span::symbol::Ident,
+ ty::Variance,
+ rustc_span::def_id::DefId,
+ rustc_span::def_id::LocalDefId,
+ (rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
+}
+
+#[macro_export]
+macro_rules! implement_ty_decoder {
+ ($DecoderName:ident <$($typaram:tt),*>) => {
+ mod __ty_decoder_impl {
+ use std::borrow::Cow;
+ use rustc_serialize::Decoder;
+
+ use super::$DecoderName;
+
+ impl<$($typaram ),*> Decoder for $DecoderName<$($typaram),*> {
+ $crate::__impl_decoder_methods! {
+ read_u128 -> u128;
+ read_u64 -> u64;
+ read_u32 -> u32;
+ read_u16 -> u16;
+ read_u8 -> u8;
+ read_usize -> usize;
+
+ read_i128 -> i128;
+ read_i64 -> i64;
+ read_i32 -> i32;
+ read_i16 -> i16;
+ read_i8 -> i8;
+ read_isize -> isize;
+
+ read_bool -> bool;
+ read_f64 -> f64;
+ read_f32 -> f32;
+ read_char -> char;
+ read_str -> &str;
+ }
+
+ #[inline]
+ fn read_raw_bytes(&mut self, len: usize) -> &[u8] {
+ self.opaque.read_raw_bytes(len)
+ }
+ }
+ }
+ }
+}
+
+macro_rules! impl_binder_encode_decode {
+ ($($t:ty),+ $(,)?) => {
+ $(
+ impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::Binder<'tcx, $t> {
+ fn encode(&self, e: &mut E) {
+ self.bound_vars().encode(e);
+ self.as_ref().skip_binder().encode(e);
+ }
+ }
+ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Binder<'tcx, $t> {
+ fn decode(decoder: &mut D) -> Self {
+ let bound_vars = Decodable::decode(decoder);
+ ty::Binder::bind_with_vars(Decodable::decode(decoder), bound_vars)
+ }
+ }
+ )*
+ }
+}
+
+impl_binder_encode_decode! {
+ &'tcx ty::List<Ty<'tcx>>,
+ ty::FnSig<'tcx>,
+ ty::ExistentialPredicate<'tcx>,
+ ty::TraitRef<'tcx>,
+ Vec<ty::GeneratorInteriorTypeCause<'tcx>>,
+ ty::ExistentialTraitRef<'tcx>,
+}
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
new file mode 100644
index 000000000..f8792edc0
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -0,0 +1,326 @@
+use crate::mir::interpret::LitToConstInput;
+use crate::mir::ConstantKind;
+use crate::ty::{
+ self, InlineConstSubsts, InlineConstSubstsParts, InternalSubsts, ParamEnv, ParamEnvAnd, Ty,
+ TyCtxt, TypeVisitable,
+};
+use rustc_data_structures::intern::Interned;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_macros::HashStable;
+use std::fmt;
+
+mod int;
+mod kind;
+mod valtree;
+
+pub use int::*;
+pub use kind::*;
+pub use valtree::*;
+
+/// Use this rather than `ConstS`, whenever possible.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[rustc_pass_by_value]
+pub struct Const<'tcx>(pub Interned<'tcx, ConstS<'tcx>>);
+
+impl<'tcx> fmt::Debug for Const<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This reflects what `Const` looked liked before `Interned` was
+ // introduced. We print it like this to avoid having to update expected
+ // output in a lot of tests.
+ write!(f, "Const {{ ty: {:?}, kind: {:?} }}", self.ty(), self.kind())
+ }
+}
+
+/// Typed constant value.
+#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, HashStable, TyEncodable, TyDecodable)]
+pub struct ConstS<'tcx> {
+ pub ty: Ty<'tcx>,
+ pub kind: ConstKind<'tcx>,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ConstS<'_>, 48);
+
+impl<'tcx> Const<'tcx> {
+ #[inline]
+ pub fn ty(self) -> Ty<'tcx> {
+ self.0.ty
+ }
+
+ #[inline]
+ pub fn kind(self) -> ConstKind<'tcx> {
+ self.0.kind
+ }
+
+ /// Literals and const generic parameters are eagerly converted to a constant, everything else
+ /// becomes `Unevaluated`.
+ pub fn from_anon_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
+ Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id))
+ }
+
+ #[instrument(skip(tcx), level = "debug")]
+ pub fn from_opt_const_arg_anon_const(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ ) -> Self {
+ debug!("Const::from_anon_const(def={:?})", def);
+
+ let body_id = match tcx.hir().get_by_def_id(def.did) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => span_bug!(
+ tcx.def_span(def.did.to_def_id()),
+ "from_anon_const can only process anonymous constants"
+ ),
+ };
+
+ let expr = &tcx.hir().body(body_id).value;
+ debug!(?expr);
+
+ let ty = tcx.type_of(def.def_id_for_type_of());
+
+ match Self::try_eval_lit_or_param(tcx, ty, expr) {
+ Some(v) => v,
+ None => tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: def.to_global(),
+ substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
+ promoted: None,
+ }),
+ ty,
+ }),
+ }
+ }
+
+ #[instrument(skip(tcx), level = "debug")]
+ fn try_eval_lit_or_param(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Option<Self> {
+ // Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
+ // currently have to be wrapped in curly brackets, so it's necessary to special-case.
+ let expr = match &expr.kind {
+ hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
+ block.expr.as_ref().unwrap()
+ }
+ _ => expr,
+ };
+
+ let lit_input = match expr.kind {
+ hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind {
+ hir::ExprKind::Lit(ref lit) => {
+ Some(LitToConstInput { lit: &lit.node, ty, neg: true })
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+
+ if let Some(lit_input) = lit_input {
+ // If an error occurred, ignore that it's a literal and leave reporting the error up to
+ // mir.
+ match tcx.at(expr.span).lit_to_const(lit_input) {
+ Ok(c) => return Some(c),
+ Err(e) => {
+ tcx.sess.delay_span_bug(
+ expr.span,
+ &format!("Const::from_anon_const: couldn't lit_to_const {:?}", e),
+ );
+ }
+ }
+ }
+
+ use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
+ match expr.kind {
+ ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
+ // Find the name and index of the const parameter by indexing the generics of
+ // the parent item and construct a `ParamConst`.
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let item_id = tcx.hir().get_parent_node(hir_id);
+ let item_def_id = tcx.hir().local_def_id(item_id);
+ let generics = tcx.generics_of(item_def_id.to_def_id());
+ let index = generics.param_def_id_to_index[&def_id];
+ let name = tcx.hir().name(hir_id);
+ Some(tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Param(ty::ParamConst::new(index, name)),
+ ty,
+ }))
+ }
+ _ => None,
+ }
+ }
+
+ pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
+ debug!("Const::from_inline_const(def_id={:?})", def_id);
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ let body_id = match tcx.hir().get(hir_id) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => span_bug!(
+ tcx.def_span(def_id.to_def_id()),
+ "from_inline_const can only process anonymous constants"
+ ),
+ };
+
+ let expr = &tcx.hir().body(body_id).value;
+
+ let ty = tcx.typeck(def_id).node_type(hir_id);
+
+ let ret = match Self::try_eval_lit_or_param(tcx, ty, expr) {
+ Some(v) => v,
+ None => {
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id());
+ let parent_substs =
+ tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
+ let substs =
+ InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty })
+ .substs;
+ tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: ty::WithOptConstParam::unknown(def_id).to_global(),
+ substs,
+ promoted: None,
+ }),
+ ty,
+ })
+ }
+ };
+ debug_assert!(!ret.has_free_regions());
+ ret
+ }
+
+ /// Interns the given value as a constant.
+ #[inline]
+ pub fn from_value(tcx: TyCtxt<'tcx>, val: ty::ValTree<'tcx>, ty: Ty<'tcx>) -> Self {
+ tcx.mk_const(ConstS { kind: ConstKind::Value(val), ty })
+ }
+
+ /// Panics if self.kind != ty::ConstKind::Value
+ pub fn to_valtree(self) -> ty::ValTree<'tcx> {
+ match self.kind() {
+ ty::ConstKind::Value(valtree) => valtree,
+ _ => bug!("expected ConstKind::Value, got {:?}", self.kind()),
+ }
+ }
+
+ pub fn from_scalar_int(tcx: TyCtxt<'tcx>, i: ScalarInt, ty: Ty<'tcx>) -> Self {
+ let valtree = ty::ValTree::from_scalar_int(i);
+ Self::from_value(tcx, valtree, ty)
+ }
+
+ #[inline]
+ /// Creates a constant with the given integer value and interns it.
+ pub fn from_bits(tcx: TyCtxt<'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>) -> Self {
+ let size = tcx
+ .layout_of(ty)
+ .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
+ .size;
+ Self::from_scalar_int(tcx, ScalarInt::try_from_uint(bits, size).unwrap(), ty.value)
+ }
+
+ #[inline]
+ /// Creates an interned zst constant.
+ pub fn zero_sized(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Self {
+ let valtree = ty::ValTree::zst();
+ Self::from_value(tcx, valtree, ty)
+ }
+
+ #[inline]
+ /// Creates an interned bool constant.
+ pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> Self {
+ Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool))
+ }
+
+ #[inline]
+ /// Creates an interned usize constant.
+ pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> Self {
+ Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize))
+ }
+
+ #[inline]
+ /// Attempts to evaluate the given constant to bits. Can fail to evaluate in the presence of
+ /// generics (or erroneous code) or if the value can't be represented as bits (e.g. because it
+ /// contains const generic parameters or pointers).
+ pub fn try_eval_bits(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<u128> {
+ assert_eq!(self.ty(), ty);
+ let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ // if `ty` does not depend on generic parameters, use an empty param_env
+ self.kind().eval(tcx, param_env).try_to_bits(size)
+ }
+
+ #[inline]
+ pub fn try_eval_bool(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> {
+ self.kind().eval(tcx, param_env).try_to_bool()
+ }
+
+ #[inline]
+ pub fn try_eval_usize(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<u64> {
+ self.kind().eval(tcx, param_env).try_to_machine_usize(tcx)
+ }
+
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the
+ /// unevaluated constant.
+ pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Const<'tcx> {
+ if let Some(val) = self.kind().try_eval_for_typeck(tcx, param_env) {
+ match val {
+ Ok(val) => Const::from_value(tcx, val, self.ty()),
+ Err(ErrorGuaranteed { .. }) => tcx.const_error(self.ty()),
+ }
+ } else {
+ // Either the constant isn't evaluatable or ValTree creation failed.
+ self
+ }
+ }
+
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated` and creates a ConstValue if the
+ /// evaluation succeeds. If it doesn't succeed, returns the unevaluated constant.
+ pub fn eval_for_mir(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> ConstantKind<'tcx> {
+ if let Some(val) = self.kind().try_eval_for_mir(tcx, param_env) {
+ match val {
+ Ok(const_val) => ConstantKind::from_value(const_val, self.ty()),
+ Err(ErrorGuaranteed { .. }) => ConstantKind::Ty(tcx.const_error(self.ty())),
+ }
+ } else {
+ ConstantKind::Ty(self)
+ }
+ }
+
+ #[inline]
+ /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
+ pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
+ self.try_eval_bits(tcx, param_env, ty)
+ .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
+ }
+
+ #[inline]
+ /// Panics if the value cannot be evaluated or doesn't contain a valid `usize`.
+ pub fn eval_usize(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> u64 {
+ self.try_eval_usize(tcx, param_env)
+ .unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
+ }
+}
+
+pub fn const_param_default<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Const<'tcx> {
+ let default_def_id = match tcx.hir().get_by_def_id(def_id.expect_local()) {
+ hir::Node::GenericParam(hir::GenericParam {
+ kind: hir::GenericParamKind::Const { ty: _, default: Some(ac) },
+ ..
+ }) => tcx.hir().local_def_id(ac.hir_id),
+ _ => span_bug!(
+ tcx.def_span(def_id),
+ "`const_param_default` expected a generic parameter with a constant"
+ ),
+ };
+ Const::from_anon_const(tcx, default_def_id)
+}
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
new file mode 100644
index 000000000..7436f0f6f
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -0,0 +1,483 @@
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::Float;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_target::abi::Size;
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+use std::num::NonZeroU8;
+
+use crate::ty::TyCtxt;
+
+#[derive(Copy, Clone)]
+/// A type for representing any integer. Only used for printing.
+pub struct ConstInt {
+ /// The "untyped" variant of `ConstInt`.
+ int: ScalarInt,
+ /// Whether the value is of a signed integer type.
+ signed: bool,
+ /// Whether the value is a `usize` or `isize` type.
+ is_ptr_sized_integral: bool,
+}
+
+impl ConstInt {
+ pub fn new(int: ScalarInt, signed: bool, is_ptr_sized_integral: bool) -> Self {
+ Self { int, signed, is_ptr_sized_integral }
+ }
+}
+
+impl std::fmt::Debug for ConstInt {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let Self { int, signed, is_ptr_sized_integral } = *self;
+ let size = int.size().bytes();
+ let raw = int.data;
+ if signed {
+ let bit_size = size * 8;
+ let min = 1u128 << (bit_size - 1);
+ let max = min - 1;
+ if raw == min {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "isize::MIN"),
+ (1, _) => write!(fmt, "i8::MIN"),
+ (2, _) => write!(fmt, "i16::MIN"),
+ (4, _) => write!(fmt, "i32::MIN"),
+ (8, _) => write!(fmt, "i64::MIN"),
+ (16, _) => write!(fmt, "i128::MIN"),
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ } else if raw == max {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "isize::MAX"),
+ (1, _) => write!(fmt, "i8::MAX"),
+ (2, _) => write!(fmt, "i16::MAX"),
+ (4, _) => write!(fmt, "i32::MAX"),
+ (8, _) => write!(fmt, "i64::MAX"),
+ (16, _) => write!(fmt, "i128::MAX"),
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ } else {
+ match size {
+ 1 => write!(fmt, "{}", raw as i8)?,
+ 2 => write!(fmt, "{}", raw as i16)?,
+ 4 => write!(fmt, "{}", raw as i32)?,
+ 8 => write!(fmt, "{}", raw as i64)?,
+ 16 => write!(fmt, "{}", raw as i128)?,
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ if fmt.alternate() {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "_isize")?,
+ (1, _) => write!(fmt, "_i8")?,
+ (2, _) => write!(fmt, "_i16")?,
+ (4, _) => write!(fmt, "_i32")?,
+ (8, _) => write!(fmt, "_i64")?,
+ (16, _) => write!(fmt, "_i128")?,
+ _ => bug!(),
+ }
+ }
+ Ok(())
+ }
+ } else {
+ let max = Size::from_bytes(size).truncate(u128::MAX);
+ if raw == max {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "usize::MAX"),
+ (1, _) => write!(fmt, "u8::MAX"),
+ (2, _) => write!(fmt, "u16::MAX"),
+ (4, _) => write!(fmt, "u32::MAX"),
+ (8, _) => write!(fmt, "u64::MAX"),
+ (16, _) => write!(fmt, "u128::MAX"),
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ } else {
+ match size {
+ 1 => write!(fmt, "{}", raw as u8)?,
+ 2 => write!(fmt, "{}", raw as u16)?,
+ 4 => write!(fmt, "{}", raw as u32)?,
+ 8 => write!(fmt, "{}", raw as u64)?,
+ 16 => write!(fmt, "{}", raw as u128)?,
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ if fmt.alternate() {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "_usize")?,
+ (1, _) => write!(fmt, "_u8")?,
+ (2, _) => write!(fmt, "_u16")?,
+ (4, _) => write!(fmt, "_u32")?,
+ (8, _) => write!(fmt, "_u64")?,
+ (16, _) => write!(fmt, "_u128")?,
+ _ => bug!(),
+ }
+ }
+ Ok(())
+ }
+ }
+ }
+}
+
+/// The raw bytes of a simple value.
+///
+/// This is a packed struct in order to allow this type to be optimally embedded in enums
+/// (like Scalar).
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[repr(packed)]
+pub struct ScalarInt {
+ /// The first `size` bytes of `data` are the value.
+ /// Do not try to read less or more bytes than that. The remaining bytes must be 0.
+ data: u128,
+ size: NonZeroU8,
+}
+
+// Cannot derive these, as the derives take references to the fields, and we
+// can't take references to fields of packed structs.
+impl<CTX> crate::ty::HashStable<CTX> for ScalarInt {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut crate::ty::StableHasher) {
+ // Using a block `{self.data}` here to force a copy instead of using `self.data`
+ // directly, because `hash_stable` takes `&self` and would thus borrow `self.data`.
+ // Since `Self` is a packed struct, that would create a possibly unaligned reference,
+ // which is UB.
+ { self.data }.hash_stable(hcx, hasher);
+ self.size.get().hash_stable(hcx, hasher);
+ }
+}
+
+impl<S: Encoder> Encodable<S> for ScalarInt {
+ fn encode(&self, s: &mut S) {
+ s.emit_u128(self.data);
+ s.emit_u8(self.size.get());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for ScalarInt {
+ fn decode(d: &mut D) -> ScalarInt {
+ ScalarInt { data: d.read_u128(), size: NonZeroU8::new(d.read_u8()).unwrap() }
+ }
+}
+
+impl ScalarInt {
+ pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: NonZeroU8::new(1).unwrap() };
+
+ pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: NonZeroU8::new(1).unwrap() };
+
+ #[inline]
+ pub fn size(self) -> Size {
+ Size::from_bytes(self.size.get())
+ }
+
+ /// Make sure the `data` fits in `size`.
+ /// This is guaranteed by all constructors here, but having had this check saved us from
+ /// bugs many times in the past, so keeping it around is definitely worth it.
+ #[inline(always)]
+ fn check_data(self) {
+ // Using a block `{self.data}` here to force a copy instead of using `self.data`
+ // directly, because `debug_assert_eq` takes references to its arguments and formatting
+ // arguments and would thus borrow `self.data`. Since `Self`
+ // is a packed struct, that would create a possibly unaligned reference, which
+ // is UB.
+ debug_assert_eq!(
+ self.size().truncate(self.data),
+ { self.data },
+ "Scalar value {:#x} exceeds size of {} bytes",
+ { self.data },
+ self.size
+ );
+ }
+
+ #[inline]
+ pub fn null(size: Size) -> Self {
+ Self { data: 0, size: NonZeroU8::new(size.bytes() as u8).unwrap() }
+ }
+
+ #[inline]
+ pub fn is_null(self) -> bool {
+ self.data == 0
+ }
+
+ #[inline]
+ pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
+ let data = i.into();
+ if size.truncate(data) == data {
+ Some(Self { data, size: NonZeroU8::new(size.bytes() as u8).unwrap() })
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
+ let i = i.into();
+ // `into` performed sign extension, we have to truncate
+ let truncated = size.truncate(i as u128);
+ if size.sign_extend(truncated) as i128 == i {
+ Some(Self { data: truncated, size: NonZeroU8::new(size.bytes() as u8).unwrap() })
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn assert_bits(self, target_size: Size) -> u128 {
+ self.to_bits(target_size).unwrap_or_else(|size| {
+ bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
+ })
+ }
+
+ #[inline]
+ pub fn to_bits(self, target_size: Size) -> Result<u128, Size> {
+ assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+ if target_size.bytes() == u64::from(self.size.get()) {
+ self.check_data();
+ Ok(self.data)
+ } else {
+ Err(self.size())
+ }
+ }
+
+ #[inline]
+ pub fn try_to_machine_usize<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Result<u64, Size> {
+ Ok(self.to_bits(tcx.data_layout.pointer_size)? as u64)
+ }
+
+ /// Tries to convert the `ScalarInt` to an unsigned integer of the given size.
+ /// Fails if the size of the `ScalarInt` is unequal to `size` and returns the
+ /// `ScalarInt`s size in that case.
+ #[inline]
+ pub fn try_to_uint(self, size: Size) -> Result<u128, Size> {
+ self.to_bits(size)
+ }
+
+ // Tries to convert the `ScalarInt` to `u8`. Fails if the `size` of the `ScalarInt`
+ // in not equal to `Size { raw: 1 }` and returns the `size` value of the `ScalarInt` in
+ // that case.
+ #[inline]
+ pub fn try_to_u8(self) -> Result<u8, Size> {
+ self.to_bits(Size::from_bits(8)).map(|v| u8::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to `u16`. Fails if the size of the `ScalarInt`
+ /// in not equal to `Size { raw: 2 }` and returns the `size` value of the `ScalarInt` in
+ /// that case.
+ #[inline]
+ pub fn try_to_u16(self) -> Result<u16, Size> {
+ self.to_bits(Size::from_bits(16)).map(|v| u16::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to `u32`. Fails if the `size` of the `ScalarInt`
+ /// in not equal to `Size { raw: 4 }` and returns the `size` value of the `ScalarInt` in
+ /// that case.
+ #[inline]
+ pub fn try_to_u32(self) -> Result<u32, Size> {
+ self.to_bits(Size::from_bits(32)).map(|v| u32::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to `u64`. Fails if the `size` of the `ScalarInt`
+ /// in not equal to `Size { raw: 8 }` and returns the `size` value of the `ScalarInt` in
+ /// that case.
+ #[inline]
+ pub fn try_to_u64(self) -> Result<u64, Size> {
+ self.to_bits(Size::from_bits(64)).map(|v| u64::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to `u128`. Fails if the `size` of the `ScalarInt`
+ /// in not equal to `Size { raw: 16 }` and returns the `size` value of the `ScalarInt` in
+ /// that case.
+ #[inline]
+ pub fn try_to_u128(self) -> Result<u128, Size> {
+ self.to_bits(Size::from_bits(128))
+ }
+
+ /// Tries to convert the `ScalarInt` to a signed integer of the given size.
+ /// Fails if the size of the `ScalarInt` is unequal to `size` and returns the
+ /// `ScalarInt`s size in that case.
+ #[inline]
+ pub fn try_to_int(self, size: Size) -> Result<i128, Size> {
+ let b = self.to_bits(size)?;
+ Ok(size.sign_extend(b) as i128)
+ }
+
+ /// Tries to convert the `ScalarInt` to i8.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 1 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i8(self) -> Result<i8, Size> {
+ self.try_to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to i16.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 2 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i16(self) -> Result<i16, Size> {
+ self.try_to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to i32.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 4 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i32(self) -> Result<i32, Size> {
+ self.try_to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to i64.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 8 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i64(self) -> Result<i64, Size> {
+ self.try_to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to i128.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 16 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i128(self) -> Result<i128, Size> {
+ self.try_to_int(Size::from_bits(128)).map(|v| i128::try_from(v).unwrap())
+ }
+}
+
+macro_rules! from {
+ ($($ty:ty),*) => {
+ $(
+ impl From<$ty> for ScalarInt {
+ #[inline]
+ fn from(u: $ty) -> Self {
+ Self {
+ data: u128::from(u),
+ size: NonZeroU8::new(std::mem::size_of::<$ty>() as u8).unwrap(),
+ }
+ }
+ }
+ )*
+ }
+}
+
+macro_rules! try_from {
+ ($($ty:ty),*) => {
+ $(
+ impl TryFrom<ScalarInt> for $ty {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ // The `unwrap` cannot fail because to_bits (if it succeeds)
+ // is guaranteed to return a value that fits into the size.
+ int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
+ .map(|u| u.try_into().unwrap())
+ }
+ }
+ )*
+ }
+}
+
+from!(u8, u16, u32, u64, u128, bool);
+try_from!(u8, u16, u32, u64, u128);
+
+impl TryFrom<ScalarInt> for bool {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ int.to_bits(Size::from_bytes(1)).and_then(|u| match u {
+ 0 => Ok(false),
+ 1 => Ok(true),
+ _ => Err(Size::from_bytes(1)),
+ })
+ }
+}
+
+impl From<char> for ScalarInt {
+ #[inline]
+ fn from(c: char) -> Self {
+ Self { data: c as u128, size: NonZeroU8::new(std::mem::size_of::<char>() as u8).unwrap() }
+ }
+}
+
+/// Error returned when a conversion from ScalarInt to char fails.
+#[derive(Debug)]
+pub struct CharTryFromScalarInt;
+
+impl TryFrom<ScalarInt> for char {
+ type Error = CharTryFromScalarInt;
+
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Self::Error> {
+ let Ok(bits) = int.to_bits(Size::from_bytes(std::mem::size_of::<char>())) else {
+ return Err(CharTryFromScalarInt);
+ };
+ match char::from_u32(bits.try_into().unwrap()) {
+ Some(c) => Ok(c),
+ None => Err(CharTryFromScalarInt),
+ }
+ }
+}
+
+impl From<Single> for ScalarInt {
+ #[inline]
+ fn from(f: Single) -> Self {
+ // We trust apfloat to give us properly truncated data.
+ Self { data: f.to_bits(), size: NonZeroU8::new((Single::BITS / 8) as u8).unwrap() }
+ }
+}
+
+impl TryFrom<ScalarInt> for Single {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ int.to_bits(Size::from_bytes(4)).map(Self::from_bits)
+ }
+}
+
+impl From<Double> for ScalarInt {
+ #[inline]
+ fn from(f: Double) -> Self {
+ // We trust apfloat to give us properly truncated data.
+ Self { data: f.to_bits(), size: NonZeroU8::new((Double::BITS / 8) as u8).unwrap() }
+ }
+}
+
+impl TryFrom<ScalarInt> for Double {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ int.to_bits(Size::from_bytes(8)).map(Self::from_bits)
+ }
+}
+
+impl fmt::Debug for ScalarInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Dispatch to LowerHex below.
+ write!(f, "0x{:x}", self)
+ }
+}
+
+impl fmt::LowerHex for ScalarInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.check_data();
+ if f.alternate() {
+ // Like regular ints, alternate flag adds leading `0x`.
+ write!(f, "0x")?;
+ }
+ // Format as hex number wide enough to fit any value of the given `size`.
+ // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
+ // Using a block `{self.data}` here to force a copy instead of using `self.data`
+ // directly, because `write!` takes references to its formatting arguments and
+ // would thus borrow `self.data`. Since `Self`
+ // is a packed struct, that would create a possibly unaligned reference, which
+ // is UB.
+ write!(f, "{:01$x}", { self.data }, self.size.get() as usize * 2)
+ }
+}
+
+impl fmt::UpperHex for ScalarInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.check_data();
+ // Format as hex number wide enough to fit any value of the given `size`.
+ // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
+ // Using a block `{self.data}` here to force a copy instead of using `self.data`
+ // directly, because `write!` takes references to its formatting arguments and
+ // would thus borrow `self.data`. Since `Self`
+ // is a packed struct, that would create a possibly unaligned reference, which
+ // is UB.
+ write!(f, "{:01$X}", { self.data }, self.size.get() as usize * 2)
+ }
+}
+
+impl fmt::Display for ScalarInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.check_data();
+ write!(f, "{}", { self.data })
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs
new file mode 100644
index 000000000..cb0137d2e
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/kind.rs
@@ -0,0 +1,239 @@
+use std::convert::TryInto;
+
+use crate::mir::interpret::{AllocId, ConstValue, Scalar};
+use crate::mir::Promoted;
+use crate::ty::subst::{InternalSubsts, SubstsRef};
+use crate::ty::ParamEnv;
+use crate::ty::{self, TyCtxt, TypeVisitable};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_target::abi::Size;
+
+use super::ScalarInt;
+/// An unevaluated, potentially generic, constant.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
+#[derive(Hash, HashStable)]
+pub struct Unevaluated<'tcx, P = Option<Promoted>> {
+ pub def: ty::WithOptConstParam<DefId>,
+ pub substs: SubstsRef<'tcx>,
+ pub promoted: P,
+}
+
+impl<'tcx> Unevaluated<'tcx> {
+ #[inline]
+ pub fn shrink(self) -> Unevaluated<'tcx, ()> {
+ debug_assert_eq!(self.promoted, None);
+ Unevaluated { def: self.def, substs: self.substs, promoted: () }
+ }
+}
+
+impl<'tcx> Unevaluated<'tcx, ()> {
+ #[inline]
+ pub fn expand(self) -> Unevaluated<'tcx> {
+ Unevaluated { def: self.def, substs: self.substs, promoted: None }
+ }
+}
+
+impl<'tcx, P: Default> Unevaluated<'tcx, P> {
+ #[inline]
+ pub fn new(def: ty::WithOptConstParam<DefId>, substs: SubstsRef<'tcx>) -> Unevaluated<'tcx, P> {
+ Unevaluated { def, substs, promoted: Default::default() }
+ }
+}
+
+/// Represents a constant in Rust.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(Hash, HashStable)]
+pub enum ConstKind<'tcx> {
+ /// A const generic parameter.
+ Param(ty::ParamConst),
+
+ /// Infer the value of the const.
+ Infer(InferConst<'tcx>),
+
+ /// Bound const variable, used only when preparing a trait query.
+ Bound(ty::DebruijnIndex, ty::BoundVar),
+
+ /// A placeholder const - universally quantified higher-ranked const.
+ Placeholder(ty::PlaceholderConst<'tcx>),
+
+ /// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other
+ /// variants when the code is monomorphic enough for that.
+ Unevaluated(Unevaluated<'tcx>),
+
+ /// Used to hold computed value.
+ Value(ty::ValTree<'tcx>),
+
+ /// A placeholder for a const which could not be computed; this is
+ /// propagated to avoid useless error messages.
+ Error(ty::DelaySpanBugEmitted),
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ConstKind<'_>, 40);
+
+impl<'tcx> ConstKind<'tcx> {
+ #[inline]
+ pub fn try_to_value(self) -> Option<ty::ValTree<'tcx>> {
+ if let ConstKind::Value(val) = self { Some(val) } else { None }
+ }
+
+ #[inline]
+ pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> {
+ self.try_to_value()?.try_to_scalar()
+ }
+
+ #[inline]
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ self.try_to_value()?.try_to_scalar_int()
+ }
+
+ #[inline]
+ pub fn try_to_bits(self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ #[inline]
+ pub fn try_to_bool(self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ #[inline]
+ pub fn try_to_machine_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+ self.try_to_value()?.try_to_machine_usize(tcx)
+ }
+}
+
+/// An inference variable for a const, for use in const generics.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum InferConst<'tcx> {
+ /// Infer the value of the const.
+ Var(ty::ConstVid<'tcx>),
+ /// A fresh const variable. See `infer::freshen` for more details.
+ Fresh(u32),
+}
+
+enum EvalMode {
+ Typeck,
+ Mir,
+}
+
+enum EvalResult<'tcx> {
+ ValTree(ty::ValTree<'tcx>),
+ ConstVal(ConstValue<'tcx>),
+}
+
+impl<'tcx> ConstKind<'tcx> {
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the
+ /// unevaluated constant.
+ pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self {
+ self.try_eval_for_typeck(tcx, param_env).and_then(Result::ok).map_or(self, ConstKind::Value)
+ }
+
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary
+ /// return `None`.
+ // FIXME(@lcnr): Completely rework the evaluation/normalization system for `ty::Const` once valtrees are merged.
+ pub fn try_eval_for_mir(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ) -> Option<Result<ConstValue<'tcx>, ErrorGuaranteed>> {
+ match self.try_eval_inner(tcx, param_env, EvalMode::Mir) {
+ Some(Ok(EvalResult::ValTree(_))) => unreachable!(),
+ Some(Ok(EvalResult::ConstVal(v))) => Some(Ok(v)),
+ Some(Err(e)) => Some(Err(e)),
+ None => None,
+ }
+ }
+
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary
+ /// return `None`.
+ // FIXME(@lcnr): Completely rework the evaluation/normalization system for `ty::Const` once valtrees are merged.
+ pub fn try_eval_for_typeck(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ) -> Option<Result<ty::ValTree<'tcx>, ErrorGuaranteed>> {
+ match self.try_eval_inner(tcx, param_env, EvalMode::Typeck) {
+ Some(Ok(EvalResult::ValTree(v))) => Some(Ok(v)),
+ Some(Ok(EvalResult::ConstVal(_))) => unreachable!(),
+ Some(Err(e)) => Some(Err(e)),
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn try_eval_inner(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ eval_mode: EvalMode,
+ ) -> Option<Result<EvalResult<'tcx>, ErrorGuaranteed>> {
+ if let ConstKind::Unevaluated(unevaluated) = self {
+ use crate::mir::interpret::ErrorHandled;
+
+ // HACK(eddyb) this erases lifetimes even though `const_eval_resolve`
+ // also does later, but we want to do it before checking for
+ // inference variables.
+ // Note that we erase regions *before* calling `with_reveal_all_normalized`,
+ // so that we don't try to invoke this query with
+ // any region variables.
+ let param_env_and = tcx
+ .erase_regions(param_env)
+ .with_reveal_all_normalized(tcx)
+ .and(tcx.erase_regions(unevaluated));
+
+ // HACK(eddyb) when the query key would contain inference variables,
+ // attempt using identity substs and `ParamEnv` instead, that will succeed
+ // when the expression doesn't depend on any parameters.
+ // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that
+ // we can call `infcx.const_eval_resolve` which handles inference variables.
+ let param_env_and = if param_env_and.needs_infer() {
+ tcx.param_env(unevaluated.def.did).and(ty::Unevaluated {
+ def: unevaluated.def,
+ substs: InternalSubsts::identity_for_item(tcx, unevaluated.def.did),
+ promoted: unevaluated.promoted,
+ })
+ } else {
+ param_env_and
+ };
+
+ // FIXME(eddyb) maybe the `const_eval_*` methods should take
+ // `ty::ParamEnvAnd` instead of having them separate.
+ let (param_env, unevaluated) = param_env_and.into_parts();
+ // try to resolve e.g. associated constants to their definition on an impl, and then
+ // evaluate the const.
+ match eval_mode {
+ EvalMode::Typeck => {
+ match tcx.const_eval_resolve_for_typeck(param_env, unevaluated, None) {
+ // NOTE(eddyb) `val` contains no lifetimes/types/consts,
+ // and we use the original type, so nothing from `substs`
+ // (which may be identity substs, see above),
+ // can leak through `val` into the const we return.
+ Ok(val) => Some(Ok(EvalResult::ValTree(val?))),
+ Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => None,
+ Err(ErrorHandled::Reported(e)) => Some(Err(e)),
+ }
+ }
+ EvalMode::Mir => {
+ match tcx.const_eval_resolve(param_env, unevaluated, None) {
+ // NOTE(eddyb) `val` contains no lifetimes/types/consts,
+ // and we use the original type, so nothing from `substs`
+ // (which may be identity substs, see above),
+ // can leak through `val` into the const we return.
+ Ok(val) => Some(Ok(EvalResult::ConstVal(val))),
+ Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => None,
+ Err(ErrorHandled::Reported(e)) => Some(Err(e)),
+ }
+ }
+ }
+ } else {
+ None
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs
new file mode 100644
index 000000000..93707bb18
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/valtree.rs
@@ -0,0 +1,104 @@
+use super::ScalarInt;
+use crate::mir::interpret::{AllocId, Scalar};
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_macros::{HashStable, TyDecodable, TyEncodable};
+
+#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(HashStable)]
+/// This datastructure is used to represent the value of constants used in the type system.
+///
+/// We explicitly choose a different datastructure from the way values are processed within
+/// CTFE, as in the type system equal values (according to their `PartialEq`) must also have
+/// equal representation (`==` on the rustc data structure, e.g. `ValTree`) and vice versa.
+/// Since CTFE uses `AllocId` to represent pointers, it often happens that two different
+/// `AllocId`s point to equal values. So we may end up with different representations for
+/// two constants whose value is `&42`. Furthermore any kind of struct that has padding will
+/// have arbitrary values within that padding, even if the values of the struct are the same.
+///
+/// `ValTree` does not have this problem with representation, as it only contains integers or
+/// lists of (nested) `ValTree`.
+pub enum ValTree<'tcx> {
+ /// ZSTs, integers, `bool`, `char` are represented as scalars.
+ /// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values
+ /// of these types have the same representation.
+ Leaf(ScalarInt),
+
+ //SliceOrStr(ValSlice<'tcx>),
+ // dont use SliceOrStr for now
+ /// The fields of any kind of aggregate. Structs, tuples and arrays are represented by
+ /// listing their fields' values in order.
+ /// Enums are represented by storing their discriminant as a field, followed by all
+ /// the fields of the variant.
+ Branch(&'tcx [ValTree<'tcx>]),
+}
+
+impl<'tcx> ValTree<'tcx> {
+ pub fn zst() -> Self {
+ Self::Branch(&[])
+ }
+
+ #[inline]
+ pub fn unwrap_leaf(self) -> ScalarInt {
+ match self {
+ Self::Leaf(s) => s,
+ _ => bug!("expected leaf, got {:?}", self),
+ }
+ }
+
+ #[inline]
+ pub fn unwrap_branch(self) -> &'tcx [Self] {
+ match self {
+ Self::Branch(branch) => branch,
+ _ => bug!("expected branch, got {:?}", self),
+ }
+ }
+
+ pub fn from_raw_bytes<'a>(tcx: TyCtxt<'tcx>, bytes: &'a [u8]) -> Self {
+ let branches = bytes.iter().map(|b| Self::Leaf(ScalarInt::from(*b)));
+ let interned = tcx.arena.alloc_from_iter(branches);
+
+ Self::Branch(interned)
+ }
+
+ pub fn from_scalar_int(i: ScalarInt) -> Self {
+ Self::Leaf(i)
+ }
+
+ pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> {
+ self.try_to_scalar_int().map(Scalar::Int)
+ }
+
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ match self {
+ Self::Leaf(s) => Some(s),
+ Self::Branch(_) => None,
+ }
+ }
+
+ pub fn try_to_machine_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+ self.try_to_scalar_int().map(|s| s.try_to_machine_usize(tcx).ok()).flatten()
+ }
+
+ /// Get the values inside the ValTree as a slice of bytes. This only works for
+ /// constants with types &str, &[u8], or [u8; _].
+ pub fn try_to_raw_bytes(self, tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<&'tcx [u8]> {
+ match ty.kind() {
+ ty::Ref(_, inner_ty, _) => match inner_ty.kind() {
+ // `&str` can be interpreted as raw bytes
+ ty::Str => {}
+ // `&[u8]` can be interpreted as raw bytes
+ ty::Slice(slice_ty) if *slice_ty == tcx.types.u8 => {}
+ // other `&_` can't be interpreted as raw bytes
+ _ => return None,
+ },
+ // `[u8; N]` can be interpreted as raw bytes
+ ty::Array(array_ty, _) if *array_ty == tcx.types.u8 => {}
+ // Otherwise, type cannot be interpreted as raw bytes
+ _ => return None,
+ }
+
+ Some(tcx.arena.alloc_from_iter(
+ self.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().try_to_u8().unwrap()),
+ ))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
new file mode 100644
index 000000000..0a0f45ce1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -0,0 +1,3018 @@
+//! Type context book-keeping.
+
+use crate::arena::Arena;
+use crate::dep_graph::{DepGraph, DepKind, DepKindStruct};
+use crate::hir::place::Place as HirPlace;
+use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
+use crate::lint::{struct_lint_level, LintLevelSource};
+use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
+use crate::middle::resolve_lifetime;
+use crate::middle::stability;
+use crate::mir::interpret::{self, Allocation, ConstAllocation};
+use crate::mir::{
+ Body, BorrowCheckResult, Field, Local, Place, PlaceElem, ProjectionKind, Promoted,
+};
+use crate::thir::Thir;
+use crate::traits;
+use crate::ty::query::{self, TyCtxtAt};
+use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
+use crate::ty::{
+ self, AdtDef, AdtDefData, AdtKind, Binder, BindingMode, BoundVar, CanonicalPolyFnSig,
+ ClosureSizeProfileData, Const, ConstS, ConstVid, DefIdTree, ExistentialPredicate, FloatTy,
+ FloatVar, FloatVid, GenericParamDefKind, InferConst, InferTy, IntTy, IntVar, IntVid, List,
+ ParamConst, ParamTy, PolyFnSig, Predicate, PredicateKind, PredicateS, ProjectionTy, Region,
+ RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind, TyS, TyVar, TyVid, TypeAndMut, UintTy,
+};
+use rustc_ast as ast;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::intern::{Interned, WithStableHash};
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::steal::Steal;
+use rustc_data_structures::sync::{self, Lock, Lrc, ReadGuard, RwLock, WorkerLocal};
+use rustc_data_structures::vec_map::VecMap;
+use rustc_errors::{DecorateLint, ErrorGuaranteed, LintDiagnosticBuilder, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
+use rustc_hir::definitions::Definitions;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{
+ Constness, ExprKind, HirId, ImplItemKind, ItemKind, ItemLocalId, ItemLocalMap, ItemLocalSet,
+ Node, TraitCandidate, TraitItemKind,
+};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_macros::HashStable;
+use rustc_middle::mir::FakeReadCause;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
+use rustc_session::config::{CrateType, OutputFilenames};
+use rustc_session::cstore::CrateStoreDyn;
+use rustc_session::lint::{Level, Lint};
+use rustc_session::Limit;
+use rustc_session::Session;
+use rustc_span::def_id::{DefPathHash, StableCrateId};
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi::{Layout, LayoutS, TargetDataLayout, VariantIdx};
+use rustc_target::spec::abi;
+use rustc_type_ir::sty::TyKind::*;
+use rustc_type_ir::{InternAs, InternIteratorElement, Interner, TypeFlags};
+
+use std::any::Any;
+use std::borrow::Borrow;
+use std::cmp::Ordering;
+use std::collections::hash_map::{self, Entry};
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::mem;
+use std::ops::{Bound, Deref};
+use std::sync::Arc;
+
+use super::{ImplPolarity, RvalueScopes};
+
+pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync {
+ /// Creates a new `OnDiskCache` instance from the serialized data in `data`.
+ fn new(sess: &'tcx Session, data: Mmap, start_pos: usize) -> Self
+ where
+ Self: Sized;
+
+ fn new_empty(source_map: &'tcx SourceMap) -> Self
+ where
+ Self: Sized;
+
+ fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>);
+
+ fn serialize(&self, tcx: TyCtxt<'tcx>, encoder: FileEncoder) -> FileEncodeResult;
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+impl<'tcx> Interner for TyCtxt<'tcx> {
+ type AdtDef = ty::AdtDef<'tcx>;
+ type SubstsRef = ty::SubstsRef<'tcx>;
+ type DefId = DefId;
+ type Ty = Ty<'tcx>;
+ type Const = ty::Const<'tcx>;
+ type Region = Region<'tcx>;
+ type TypeAndMut = TypeAndMut<'tcx>;
+ type Mutability = hir::Mutability;
+ type Movability = hir::Movability;
+ type PolyFnSig = PolyFnSig<'tcx>;
+ type ListBinderExistentialPredicate = &'tcx List<Binder<'tcx, ExistentialPredicate<'tcx>>>;
+ type BinderListTy = Binder<'tcx, &'tcx List<Ty<'tcx>>>;
+ type ListTy = &'tcx List<Ty<'tcx>>;
+ type ProjectionTy = ty::ProjectionTy<'tcx>;
+ type ParamTy = ParamTy;
+ type BoundTy = ty::BoundTy;
+ type PlaceholderType = ty::PlaceholderType;
+ type InferTy = InferTy;
+ type DelaySpanBugEmitted = DelaySpanBugEmitted;
+ type PredicateKind = ty::PredicateKind<'tcx>;
+ type AllocId = crate::mir::interpret::AllocId;
+
+ type EarlyBoundRegion = ty::EarlyBoundRegion;
+ type BoundRegion = ty::BoundRegion;
+ type FreeRegion = ty::FreeRegion;
+ type RegionVid = ty::RegionVid;
+ type PlaceholderRegion = ty::PlaceholderRegion;
+}
+
+/// A type that is not publicly constructable. This prevents people from making [`TyKind::Error`]s
+/// except through the error-reporting functions on a [`tcx`][TyCtxt].
+#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub struct DelaySpanBugEmitted {
+ pub reported: ErrorGuaranteed,
+ _priv: (),
+}
+
+type InternedSet<'tcx, T> = ShardedHashMap<InternedInSet<'tcx, T>, ()>;
+
+pub struct CtxtInterners<'tcx> {
+ /// The arena that types, regions, etc. are allocated from.
+ arena: &'tcx WorkerLocal<Arena<'tcx>>,
+
+ // Specifically use a speedy hash algorithm for these hash sets, since
+ // they're accessed quite often.
+ type_: InternedSet<'tcx, WithStableHash<TyS<'tcx>>>,
+ substs: InternedSet<'tcx, InternalSubsts<'tcx>>,
+ canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo<'tcx>>>,
+ region: InternedSet<'tcx, RegionKind<'tcx>>,
+ poly_existential_predicates:
+ InternedSet<'tcx, List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>>,
+ predicate: InternedSet<'tcx, PredicateS<'tcx>>,
+ predicates: InternedSet<'tcx, List<Predicate<'tcx>>>,
+ projs: InternedSet<'tcx, List<ProjectionKind>>,
+ place_elems: InternedSet<'tcx, List<PlaceElem<'tcx>>>,
+ const_: InternedSet<'tcx, ConstS<'tcx>>,
+ const_allocation: InternedSet<'tcx, Allocation>,
+ bound_variable_kinds: InternedSet<'tcx, List<ty::BoundVariableKind>>,
+ layout: InternedSet<'tcx, LayoutS<'tcx>>,
+ adt_def: InternedSet<'tcx, AdtDefData>,
+}
+
+impl<'tcx> CtxtInterners<'tcx> {
+ fn new(arena: &'tcx WorkerLocal<Arena<'tcx>>) -> CtxtInterners<'tcx> {
+ CtxtInterners {
+ arena,
+ type_: Default::default(),
+ substs: Default::default(),
+ region: Default::default(),
+ poly_existential_predicates: Default::default(),
+ canonical_var_infos: Default::default(),
+ predicate: Default::default(),
+ predicates: Default::default(),
+ projs: Default::default(),
+ place_elems: Default::default(),
+ const_: Default::default(),
+ const_allocation: Default::default(),
+ bound_variable_kinds: Default::default(),
+ layout: Default::default(),
+ adt_def: Default::default(),
+ }
+ }
+
+ /// Interns a type.
+ #[allow(rustc::usage_of_ty_tykind)]
+ #[inline(never)]
+ fn intern_ty(
+ &self,
+ kind: TyKind<'tcx>,
+ sess: &Session,
+ definitions: &rustc_hir::definitions::Definitions,
+ cstore: &CrateStoreDyn,
+ source_span: &IndexVec<LocalDefId, Span>,
+ ) -> Ty<'tcx> {
+ Ty(Interned::new_unchecked(
+ self.type_
+ .intern(kind, |kind| {
+ let flags = super::flags::FlagComputation::for_kind(&kind);
+
+ // It's impossible to hash inference regions (and will ICE), so we don't need to try to cache them.
+ // Without incremental, we rarely stable-hash types, so let's not do it proactively.
+ let stable_hash = if flags.flags.intersects(TypeFlags::HAS_RE_INFER)
+ || sess.opts.incremental.is_none()
+ {
+ Fingerprint::ZERO
+ } else {
+ let mut hasher = StableHasher::new();
+ let mut hcx = StableHashingContext::ignore_spans(
+ sess,
+ definitions,
+ cstore,
+ source_span,
+ );
+ kind.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish()
+ };
+
+ let ty_struct = TyS {
+ kind,
+ flags: flags.flags,
+ outer_exclusive_binder: flags.outer_exclusive_binder,
+ };
+
+ InternedInSet(
+ self.arena.alloc(WithStableHash { internee: ty_struct, stable_hash }),
+ )
+ })
+ .0,
+ ))
+ }
+
+ #[inline(never)]
+ fn intern_predicate(&self, kind: Binder<'tcx, PredicateKind<'tcx>>) -> Predicate<'tcx> {
+ Predicate(Interned::new_unchecked(
+ self.predicate
+ .intern(kind, |kind| {
+ let flags = super::flags::FlagComputation::for_predicate(kind);
+
+ let predicate_struct = PredicateS {
+ kind,
+ flags: flags.flags,
+ outer_exclusive_binder: flags.outer_exclusive_binder,
+ };
+
+ InternedInSet(self.arena.alloc(predicate_struct))
+ })
+ .0,
+ ))
+ }
+}
+
+pub struct CommonTypes<'tcx> {
+ pub unit: Ty<'tcx>,
+ pub bool: Ty<'tcx>,
+ pub char: Ty<'tcx>,
+ pub isize: Ty<'tcx>,
+ pub i8: Ty<'tcx>,
+ pub i16: Ty<'tcx>,
+ pub i32: Ty<'tcx>,
+ pub i64: Ty<'tcx>,
+ pub i128: Ty<'tcx>,
+ pub usize: Ty<'tcx>,
+ pub u8: Ty<'tcx>,
+ pub u16: Ty<'tcx>,
+ pub u32: Ty<'tcx>,
+ pub u64: Ty<'tcx>,
+ pub u128: Ty<'tcx>,
+ pub f32: Ty<'tcx>,
+ pub f64: Ty<'tcx>,
+ pub str_: Ty<'tcx>,
+ pub never: Ty<'tcx>,
+ pub self_param: Ty<'tcx>,
+
+ /// Dummy type used for the `Self` of a `TraitRef` created for converting
+ /// a trait object, and which gets removed in `ExistentialTraitRef`.
+ /// This type must not appear anywhere in other converted types.
+ pub trait_object_dummy_self: Ty<'tcx>,
+}
+
+pub struct CommonLifetimes<'tcx> {
+ /// `ReEmpty` in the root universe.
+ pub re_root_empty: Region<'tcx>,
+
+ /// `ReStatic`
+ pub re_static: Region<'tcx>,
+
+ /// Erased region, used outside of type inference.
+ pub re_erased: Region<'tcx>,
+}
+
+pub struct CommonConsts<'tcx> {
+ pub unit: Const<'tcx>,
+}
+
+pub struct LocalTableInContext<'a, V> {
+ hir_owner: LocalDefId,
+ data: &'a ItemLocalMap<V>,
+}
+
+/// Validate that the given HirId (respectively its `local_id` part) can be
+/// safely used as a key in the maps of a TypeckResults. For that to be
+/// the case, the HirId must have the same `owner` as all the other IDs in
+/// this table (signified by `hir_owner`). Otherwise the HirId
+/// would be in a different frame of reference and using its `local_id`
+/// would result in lookup errors, or worse, in silently wrong data being
+/// stored/returned.
+#[inline]
+fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+ if hir_id.owner != hir_owner {
+ invalid_hir_id_for_typeck_results(hir_owner, hir_id);
+ }
+}
+
+#[cold]
+#[inline(never)]
+fn invalid_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+ ty::tls::with(|tcx| {
+ bug!(
+ "node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}",
+ tcx.hir().node_to_string(hir_id),
+ hir_id.owner,
+ hir_owner
+ )
+ });
+}
+
+impl<'a, V> LocalTableInContext<'a, V> {
+ pub fn contains_key(&self, id: hir::HirId) -> bool {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.contains_key(&id.local_id)
+ }
+
+ pub fn get(&self, id: hir::HirId) -> Option<&V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.get(&id.local_id)
+ }
+
+ pub fn iter(&self) -> hash_map::Iter<'_, hir::ItemLocalId, V> {
+ self.data.iter()
+ }
+}
+
+impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> {
+ type Output = V;
+
+ fn index(&self, key: hir::HirId) -> &V {
+ self.get(key).expect("LocalTableInContext: key not found")
+ }
+}
+
+pub struct LocalTableInContextMut<'a, V> {
+ hir_owner: LocalDefId,
+ data: &'a mut ItemLocalMap<V>,
+}
+
+impl<'a, V> LocalTableInContextMut<'a, V> {
+ pub fn get_mut(&mut self, id: hir::HirId) -> Option<&mut V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.get_mut(&id.local_id)
+ }
+
+ pub fn entry(&mut self, id: hir::HirId) -> Entry<'_, hir::ItemLocalId, V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.entry(id.local_id)
+ }
+
+ pub fn insert(&mut self, id: hir::HirId, val: V) -> Option<V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.insert(id.local_id, val)
+ }
+
+ pub fn remove(&mut self, id: hir::HirId) -> Option<V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.remove(&id.local_id)
+ }
+}
+
+/// Whenever a value may be live across a generator yield, the type of that value winds up in the
+/// `GeneratorInteriorTypeCause` struct. This struct adds additional information about such
+/// captured types that can be useful for diagnostics. In particular, it stores the span that
+/// caused a given type to be recorded, along with the scope that enclosed the value (which can
+/// be used to find the await that the value is live across).
+///
+/// For example:
+///
+/// ```ignore (pseudo-Rust)
+/// async move {
+/// let x: T = expr;
+/// foo.await
+/// ...
+/// }
+/// ```
+///
+/// Here, we would store the type `T`, the span of the value `x`, the "scope-span" for
+/// the scope that contains `x`, the expr `T` evaluated from, and the span of `foo.await`.
+#[derive(TyEncodable, TyDecodable, Clone, Debug, Eq, Hash, PartialEq, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct GeneratorInteriorTypeCause<'tcx> {
+ /// Type of the captured binding.
+ pub ty: Ty<'tcx>,
+ /// Span of the binding that was captured.
+ pub span: Span,
+ /// Span of the scope of the captured binding.
+ pub scope_span: Option<Span>,
+ /// Span of `.await` or `yield` expression.
+ pub yield_span: Span,
+ /// Expr which the type evaluated from.
+ pub expr: Option<hir::HirId>,
+}
+
+// This type holds diagnostic information on generators and async functions across crate boundaries
+// and is used to provide better error messages
+#[derive(TyEncodable, TyDecodable, Clone, Debug, HashStable)]
+pub struct GeneratorDiagnosticData<'tcx> {
+ pub generator_interior_types: ty::Binder<'tcx, Vec<GeneratorInteriorTypeCause<'tcx>>>,
+ pub hir_owner: DefId,
+ pub nodes_types: ItemLocalMap<Ty<'tcx>>,
+ pub adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
+}
+
+#[derive(TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct TypeckResults<'tcx> {
+ /// The `HirId::owner` all `ItemLocalId`s in this table are relative to.
+ pub hir_owner: LocalDefId,
+
+ /// Resolved definitions for `<T>::X` associated paths and
+ /// method calls, including those of overloaded operators.
+ type_dependent_defs: ItemLocalMap<Result<(DefKind, DefId), ErrorGuaranteed>>,
+
+ /// Resolved field indices for field accesses in expressions (`S { field }`, `obj.field`)
+ /// or patterns (`S { field }`). The index is often useful by itself, but to learn more
+ /// about the field you also need definition of the variant to which the field
+ /// belongs, but it may not exist if it's a tuple field (`tuple.0`).
+ field_indices: ItemLocalMap<usize>,
+
+ /// Stores the types for various nodes in the AST. Note that this table
+ /// is not guaranteed to be populated outside inference. See
+ /// typeck::check::fn_ctxt for details.
+ node_types: ItemLocalMap<Ty<'tcx>>,
+
+ /// Stores the type parameters which were substituted to obtain the type
+ /// of this node. This only applies to nodes that refer to entities
+ /// parameterized by type parameters, such as generic fns, types, or
+ /// other items.
+ node_substs: ItemLocalMap<SubstsRef<'tcx>>,
+
+ /// This will either store the canonicalized types provided by the user
+ /// or the substitutions that the user explicitly gave (if any) attached
+ /// to `id`. These will not include any inferred values. The canonical form
+ /// is used to capture things like `_` or other unspecified values.
+ ///
+ /// For example, if the user wrote `foo.collect::<Vec<_>>()`, then the
+ /// canonical substitutions would include only `for<X> { Vec<X> }`.
+ ///
+ /// See also `AscribeUserType` statement in MIR.
+ user_provided_types: ItemLocalMap<CanonicalUserType<'tcx>>,
+
+ /// Stores the canonicalized types provided by the user. See also
+ /// `AscribeUserType` statement in MIR.
+ pub user_provided_sigs: DefIdMap<CanonicalPolyFnSig<'tcx>>,
+
+ adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
+
+ /// Stores the actual binding mode for all instances of hir::BindingAnnotation.
+ pat_binding_modes: ItemLocalMap<BindingMode>,
+
+ /// Stores the types which were implicitly dereferenced in pattern binding modes
+ /// for later usage in THIR lowering. For example,
+ ///
+ /// ```
+ /// match &&Some(5i32) {
+ /// Some(n) => {},
+ /// _ => {},
+ /// }
+ /// ```
+ /// leads to a `vec![&&Option<i32>, &Option<i32>]`. Empty vectors are not stored.
+ ///
+ /// See:
+ /// <https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions>
+ pat_adjustments: ItemLocalMap<Vec<Ty<'tcx>>>,
+
+ /// Records the reasons that we picked the kind of each closure;
+ /// not all closures are present in the map.
+ closure_kind_origins: ItemLocalMap<(Span, HirPlace<'tcx>)>,
+
+ /// For each fn, records the "liberated" types of its arguments
+ /// and return type. Liberated means that all bound regions
+ /// (including late-bound regions) are replaced with free
+ /// equivalents. This table is not used in codegen (since regions
+ /// are erased there) and hence is not serialized to metadata.
+ ///
+ /// This table also contains the "revealed" values for any `impl Trait`
+ /// that appear in the signature and whose values are being inferred
+ /// by this function.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// # use std::fmt::Debug;
+ /// fn foo(x: &u32) -> impl Debug { *x }
+ /// ```
+ ///
+ /// The function signature here would be:
+ ///
+ /// ```ignore (illustrative)
+ /// for<'a> fn(&'a u32) -> Foo
+ /// ```
+ ///
+ /// where `Foo` is an opaque type created for this function.
+ ///
+ ///
+ /// The *liberated* form of this would be
+ ///
+ /// ```ignore (illustrative)
+ /// fn(&'a u32) -> u32
+ /// ```
+ ///
+ /// Note that `'a` is not bound (it would be an `ReFree`) and
+ /// that the `Foo` opaque type is replaced by its hidden type.
+ liberated_fn_sigs: ItemLocalMap<ty::FnSig<'tcx>>,
+
+ /// For each FRU expression, record the normalized types of the fields
+ /// of the struct - this is needed because it is non-trivial to
+ /// normalize while preserving regions. This table is used only in
+ /// MIR construction and hence is not serialized to metadata.
+ fru_field_types: ItemLocalMap<Vec<Ty<'tcx>>>,
+
+ /// For every coercion cast we add the HIR node ID of the cast
+ /// expression to this set.
+ coercion_casts: ItemLocalSet,
+
+ /// Set of trait imports actually used in the method resolution.
+ /// This is used for warning unused imports. During type
+ /// checking, this `Lrc` should not be cloned: it must have a ref-count
+ /// of 1 so that we can insert things into the set mutably.
+ pub used_trait_imports: Lrc<FxHashSet<LocalDefId>>,
+
+ /// If any errors occurred while type-checking this body,
+ /// this field will be set to `Some(ErrorGuaranteed)`.
+ pub tainted_by_errors: Option<ErrorGuaranteed>,
+
+ /// All the opaque types that have hidden types set
+ /// by this function. For return-position-impl-trait we also store the
+ /// type here, so that mir-borrowck can figure out hidden types,
+ /// even if they are only set in dead code (which doesn't show up in MIR).
+ /// For type-alias-impl-trait, this map is only used to prevent query cycles,
+ /// so the hidden types are all `None`.
+ pub concrete_opaque_types: VecMap<LocalDefId, Option<Ty<'tcx>>>,
+
+ /// Tracks the minimum captures required for a closure;
+ /// see `MinCaptureInformationMap` for more details.
+ pub closure_min_captures: ty::MinCaptureInformationMap<'tcx>,
+
+ /// Tracks the fake reads required for a closure and the reason for the fake read.
+ /// When performing pattern matching for closures, there are times we don't end up
+ /// reading places that are mentioned in a closure (because of _ patterns). However,
+ /// to ensure the places are initialized, we introduce fake reads.
+ /// Consider these two examples:
+ /// ``` (discriminant matching with only wildcard arm)
+ /// let x: u8;
+ /// let c = || match x { _ => () };
+ /// ```
+ /// In this example, we don't need to actually read/borrow `x` in `c`, and so we don't
+ /// want to capture it. However, we do still want an error here, because `x` should have
+ /// to be initialized at the point where c is created. Therefore, we add a "fake read"
+ /// instead.
+ /// ``` (destructured assignments)
+ /// let c = || {
+ /// let (t1, t2) = t;
+ /// }
+ /// ```
+ /// In the second example, we capture the disjoint fields of `t` (`t.0` & `t.1`), but
+ /// we never capture `t`. This becomes an issue when we build MIR as we require
+ /// information on `t` in order to create place `t.0` and `t.1`. We can solve this
+ /// issue by fake reading `t`.
+ pub closure_fake_reads: FxHashMap<LocalDefId, Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>>,
+
+ /// Tracks the rvalue scoping rules which defines finer scoping for rvalue expressions
+ /// by applying extended parameter rules.
+ /// Details may be find in `rustc_typeck::check::rvalue_scopes`.
+ pub rvalue_scopes: RvalueScopes,
+
+ /// Stores the type, expression, span and optional scope span of all types
+ /// that are live across the yield of this generator (if a generator).
+ pub generator_interior_types: ty::Binder<'tcx, Vec<GeneratorInteriorTypeCause<'tcx>>>,
+
+ /// We sometimes treat byte string literals (which are of type `&[u8; N]`)
+ /// as `&[u8]`, depending on the pattern in which they are used.
+ /// This hashset records all instances where we behave
+ /// like this to allow `const_to_pat` to reliably handle this situation.
+ pub treat_byte_string_as_slice: ItemLocalSet,
+
+ /// Contains the data for evaluating the effect of feature `capture_disjoint_fields`
+ /// on closure size.
+ pub closure_size_eval: FxHashMap<LocalDefId, ClosureSizeProfileData<'tcx>>,
+}
+
+impl<'tcx> TypeckResults<'tcx> {
+ pub fn new(hir_owner: LocalDefId) -> TypeckResults<'tcx> {
+ TypeckResults {
+ hir_owner,
+ type_dependent_defs: Default::default(),
+ field_indices: Default::default(),
+ user_provided_types: Default::default(),
+ user_provided_sigs: Default::default(),
+ node_types: Default::default(),
+ node_substs: Default::default(),
+ adjustments: Default::default(),
+ pat_binding_modes: Default::default(),
+ pat_adjustments: Default::default(),
+ closure_kind_origins: Default::default(),
+ liberated_fn_sigs: Default::default(),
+ fru_field_types: Default::default(),
+ coercion_casts: Default::default(),
+ used_trait_imports: Lrc::new(Default::default()),
+ tainted_by_errors: None,
+ concrete_opaque_types: Default::default(),
+ closure_min_captures: Default::default(),
+ closure_fake_reads: Default::default(),
+ rvalue_scopes: Default::default(),
+ generator_interior_types: ty::Binder::dummy(Default::default()),
+ treat_byte_string_as_slice: Default::default(),
+ closure_size_eval: Default::default(),
+ }
+ }
+
+ /// Returns the final resolution of a `QPath` in an `Expr` or `Pat` node.
+ pub fn qpath_res(&self, qpath: &hir::QPath<'_>, id: hir::HirId) -> Res {
+ match *qpath {
+ hir::QPath::Resolved(_, ref path) => path.res,
+ hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => self
+ .type_dependent_def(id)
+ .map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)),
+ }
+ }
+
+ pub fn type_dependent_defs(
+ &self,
+ ) -> LocalTableInContext<'_, Result<(DefKind, DefId), ErrorGuaranteed>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.type_dependent_defs }
+ }
+
+ pub fn type_dependent_def(&self, id: HirId) -> Option<(DefKind, DefId)> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.type_dependent_defs.get(&id.local_id).cloned().and_then(|r| r.ok())
+ }
+
+ pub fn type_dependent_def_id(&self, id: HirId) -> Option<DefId> {
+ self.type_dependent_def(id).map(|(_, def_id)| def_id)
+ }
+
+ pub fn type_dependent_defs_mut(
+ &mut self,
+ ) -> LocalTableInContextMut<'_, Result<(DefKind, DefId), ErrorGuaranteed>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.type_dependent_defs }
+ }
+
+ pub fn field_indices(&self) -> LocalTableInContext<'_, usize> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.field_indices }
+ }
+
+ pub fn field_indices_mut(&mut self) -> LocalTableInContextMut<'_, usize> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.field_indices }
+ }
+
+ pub fn user_provided_types(&self) -> LocalTableInContext<'_, CanonicalUserType<'tcx>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.user_provided_types }
+ }
+
+ pub fn user_provided_types_mut(
+ &mut self,
+ ) -> LocalTableInContextMut<'_, CanonicalUserType<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.user_provided_types }
+ }
+
+ pub fn node_types(&self) -> LocalTableInContext<'_, Ty<'tcx>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.node_types }
+ }
+
+ pub fn node_types_mut(&mut self) -> LocalTableInContextMut<'_, Ty<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_types }
+ }
+
+ pub fn get_generator_diagnostic_data(&self) -> GeneratorDiagnosticData<'tcx> {
+ let generator_interior_type = self.generator_interior_types.map_bound_ref(|vec| {
+ vec.iter()
+ .map(|item| {
+ GeneratorInteriorTypeCause {
+ ty: item.ty,
+ span: item.span,
+ scope_span: item.scope_span,
+ yield_span: item.yield_span,
+ expr: None, //FIXME: Passing expression over crate boundaries is impossible at the moment
+ }
+ })
+ .collect::<Vec<_>>()
+ });
+ GeneratorDiagnosticData {
+ generator_interior_types: generator_interior_type,
+ hir_owner: self.hir_owner.to_def_id(),
+ nodes_types: self.node_types.clone(),
+ adjustments: self.adjustments.clone(),
+ }
+ }
+
+ pub fn node_type(&self, id: hir::HirId) -> Ty<'tcx> {
+ self.node_type_opt(id).unwrap_or_else(|| {
+ bug!("node_type: no type for node `{}`", tls::with(|tcx| tcx.hir().node_to_string(id)))
+ })
+ }
+
+ pub fn node_type_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.node_types.get(&id.local_id).cloned()
+ }
+
+ pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, SubstsRef<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_substs }
+ }
+
+ pub fn node_substs(&self, id: hir::HirId) -> SubstsRef<'tcx> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| InternalSubsts::empty())
+ }
+
+ pub fn node_substs_opt(&self, id: hir::HirId) -> Option<SubstsRef<'tcx>> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.node_substs.get(&id.local_id).cloned()
+ }
+
+ // Returns the type of a pattern as a monotype. Like @expr_ty, this function
+ // doesn't provide type parameter substitutions.
+ pub fn pat_ty(&self, pat: &hir::Pat<'_>) -> Ty<'tcx> {
+ self.node_type(pat.hir_id)
+ }
+
+ // Returns the type of an expression as a monotype.
+ //
+ // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression. That is, in
+ // some cases, we insert `Adjustment` annotations such as auto-deref or
+ // auto-ref. The type returned by this function does not consider such
+ // adjustments. See `expr_ty_adjusted()` instead.
+ //
+ // NB (2): This type doesn't provide type parameter substitutions; e.g., if you
+ // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
+ // instead of "fn(ty) -> T with T = isize".
+ pub fn expr_ty(&self, expr: &hir::Expr<'_>) -> Ty<'tcx> {
+ self.node_type(expr.hir_id)
+ }
+
+ pub fn expr_ty_opt(&self, expr: &hir::Expr<'_>) -> Option<Ty<'tcx>> {
+ self.node_type_opt(expr.hir_id)
+ }
+
+ pub fn adjustments(&self) -> LocalTableInContext<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.adjustments }
+ }
+
+ pub fn adjustments_mut(
+ &mut self,
+ ) -> LocalTableInContextMut<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.adjustments }
+ }
+
+ pub fn expr_adjustments(&self, expr: &hir::Expr<'_>) -> &[ty::adjustment::Adjustment<'tcx>] {
+ validate_hir_id_for_typeck_results(self.hir_owner, expr.hir_id);
+ self.adjustments.get(&expr.hir_id.local_id).map_or(&[], |a| &a[..])
+ }
+
+ /// Returns the type of `expr`, considering any `Adjustment`
+ /// entry recorded for that expression.
+ pub fn expr_ty_adjusted(&self, expr: &hir::Expr<'_>) -> Ty<'tcx> {
+ self.expr_adjustments(expr).last().map_or_else(|| self.expr_ty(expr), |adj| adj.target)
+ }
+
+ pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr<'_>) -> Option<Ty<'tcx>> {
+ self.expr_adjustments(expr).last().map(|adj| adj.target).or_else(|| self.expr_ty_opt(expr))
+ }
+
+ pub fn is_method_call(&self, expr: &hir::Expr<'_>) -> bool {
+ // Only paths and method calls/overloaded operators have
+ // entries in type_dependent_defs, ignore the former here.
+ if let hir::ExprKind::Path(_) = expr.kind {
+ return false;
+ }
+
+ matches!(self.type_dependent_defs().get(expr.hir_id), Some(Ok((DefKind::AssocFn, _))))
+ }
+
+ pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> Option<BindingMode> {
+ self.pat_binding_modes().get(id).copied().or_else(|| {
+ s.delay_span_bug(sp, "missing binding mode");
+ None
+ })
+ }
+
+ pub fn pat_binding_modes(&self) -> LocalTableInContext<'_, BindingMode> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_binding_modes }
+ }
+
+ pub fn pat_binding_modes_mut(&mut self) -> LocalTableInContextMut<'_, BindingMode> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_binding_modes }
+ }
+
+ pub fn pat_adjustments(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_adjustments }
+ }
+
+ pub fn pat_adjustments_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_adjustments }
+ }
+
+ /// For a given closure, returns the iterator of `ty::CapturedPlace`s that are captured
+ /// by the closure.
+ pub fn closure_min_captures_flattened(
+ &self,
+ closure_def_id: LocalDefId,
+ ) -> impl Iterator<Item = &ty::CapturedPlace<'tcx>> {
+ self.closure_min_captures
+ .get(&closure_def_id)
+ .map(|closure_min_captures| closure_min_captures.values().flat_map(|v| v.iter()))
+ .into_iter()
+ .flatten()
+ }
+
+ pub fn closure_kind_origins(&self) -> LocalTableInContext<'_, (Span, HirPlace<'tcx>)> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.closure_kind_origins }
+ }
+
+ pub fn closure_kind_origins_mut(
+ &mut self,
+ ) -> LocalTableInContextMut<'_, (Span, HirPlace<'tcx>)> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.closure_kind_origins }
+ }
+
+ pub fn liberated_fn_sigs(&self) -> LocalTableInContext<'_, ty::FnSig<'tcx>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.liberated_fn_sigs }
+ }
+
+ pub fn liberated_fn_sigs_mut(&mut self) -> LocalTableInContextMut<'_, ty::FnSig<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.liberated_fn_sigs }
+ }
+
+ pub fn fru_field_types(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.fru_field_types }
+ }
+
+ pub fn fru_field_types_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.fru_field_types }
+ }
+
+ pub fn is_coercion_cast(&self, hir_id: hir::HirId) -> bool {
+ validate_hir_id_for_typeck_results(self.hir_owner, hir_id);
+ self.coercion_casts.contains(&hir_id.local_id)
+ }
+
+ pub fn set_coercion_cast(&mut self, id: ItemLocalId) {
+ self.coercion_casts.insert(id);
+ }
+
+ pub fn coercion_casts(&self) -> &ItemLocalSet {
+ &self.coercion_casts
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct UserTypeAnnotationIndex {
+ derive [HashStable]
+ DEBUG_FORMAT = "UserType({})",
+ const START_INDEX = 0,
+ }
+}
+
+/// Mapping of type annotation indices to canonical user type annotations.
+pub type CanonicalUserTypeAnnotations<'tcx> =
+ IndexVec<UserTypeAnnotationIndex, CanonicalUserTypeAnnotation<'tcx>>;
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct CanonicalUserTypeAnnotation<'tcx> {
+ pub user_ty: CanonicalUserType<'tcx>,
+ pub span: Span,
+ pub inferred_ty: Ty<'tcx>,
+}
+
+/// Canonicalized user type annotation.
+pub type CanonicalUserType<'tcx> = Canonical<'tcx, UserType<'tcx>>;
+
+impl<'tcx> CanonicalUserType<'tcx> {
+ /// Returns `true` if this represents a substitution of the form `[?0, ?1, ?2]`,
+ /// i.e., each thing is mapped to a canonical variable with the same index.
+ pub fn is_identity(&self) -> bool {
+ match self.value {
+ UserType::Ty(_) => false,
+ UserType::TypeOf(_, user_substs) => {
+ if user_substs.user_self_ty.is_some() {
+ return false;
+ }
+
+ iter::zip(user_substs.substs, BoundVar::new(0)..).all(|(kind, cvar)| {
+ match kind.unpack() {
+ GenericArgKind::Type(ty) => match ty.kind() {
+ ty::Bound(debruijn, b) => {
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(*debruijn, ty::INNERMOST);
+ cvar == b.var
+ }
+ _ => false,
+ },
+
+ GenericArgKind::Lifetime(r) => match *r {
+ ty::ReLateBound(debruijn, br) => {
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(debruijn, ty::INNERMOST);
+ cvar == br.var
+ }
+ _ => false,
+ },
+
+ GenericArgKind::Const(ct) => match ct.kind() {
+ ty::ConstKind::Bound(debruijn, b) => {
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(debruijn, ty::INNERMOST);
+ cvar == b
+ }
+ _ => false,
+ },
+ }
+ })
+ }
+ }
+ }
+}
+
+/// A user-given type annotation attached to a constant. These arise
+/// from constants that are named via paths, like `Foo::<A>::new` and
+/// so forth.
+#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub enum UserType<'tcx> {
+ Ty(Ty<'tcx>),
+
+ /// The canonical type is the result of `type_of(def_id)` with the
+ /// given substitutions applied.
+ TypeOf(DefId, UserSubsts<'tcx>),
+}
+
+impl<'tcx> CommonTypes<'tcx> {
+ fn new(
+ interners: &CtxtInterners<'tcx>,
+ sess: &Session,
+ definitions: &rustc_hir::definitions::Definitions,
+ cstore: &CrateStoreDyn,
+ source_span: &IndexVec<LocalDefId, Span>,
+ ) -> CommonTypes<'tcx> {
+ let mk = |ty| interners.intern_ty(ty, sess, definitions, cstore, source_span);
+
+ CommonTypes {
+ unit: mk(Tuple(List::empty())),
+ bool: mk(Bool),
+ char: mk(Char),
+ never: mk(Never),
+ isize: mk(Int(ty::IntTy::Isize)),
+ i8: mk(Int(ty::IntTy::I8)),
+ i16: mk(Int(ty::IntTy::I16)),
+ i32: mk(Int(ty::IntTy::I32)),
+ i64: mk(Int(ty::IntTy::I64)),
+ i128: mk(Int(ty::IntTy::I128)),
+ usize: mk(Uint(ty::UintTy::Usize)),
+ u8: mk(Uint(ty::UintTy::U8)),
+ u16: mk(Uint(ty::UintTy::U16)),
+ u32: mk(Uint(ty::UintTy::U32)),
+ u64: mk(Uint(ty::UintTy::U64)),
+ u128: mk(Uint(ty::UintTy::U128)),
+ f32: mk(Float(ty::FloatTy::F32)),
+ f64: mk(Float(ty::FloatTy::F64)),
+ str_: mk(Str),
+ self_param: mk(ty::Param(ty::ParamTy { index: 0, name: kw::SelfUpper })),
+
+ trait_object_dummy_self: mk(Infer(ty::FreshTy(0))),
+ }
+ }
+}
+
+impl<'tcx> CommonLifetimes<'tcx> {
+ fn new(interners: &CtxtInterners<'tcx>) -> CommonLifetimes<'tcx> {
+ let mk = |r| {
+ Region(Interned::new_unchecked(
+ interners.region.intern(r, |r| InternedInSet(interners.arena.alloc(r))).0,
+ ))
+ };
+
+ CommonLifetimes {
+ re_root_empty: mk(ty::ReEmpty(ty::UniverseIndex::ROOT)),
+ re_static: mk(ty::ReStatic),
+ re_erased: mk(ty::ReErased),
+ }
+ }
+}
+
+impl<'tcx> CommonConsts<'tcx> {
+ fn new(interners: &CtxtInterners<'tcx>, types: &CommonTypes<'tcx>) -> CommonConsts<'tcx> {
+ let mk_const = |c| {
+ Const(Interned::new_unchecked(
+ interners.const_.intern(c, |c| InternedInSet(interners.arena.alloc(c))).0,
+ ))
+ };
+
+ CommonConsts {
+ unit: mk_const(ty::ConstS {
+ kind: ty::ConstKind::Value(ty::ValTree::zst()),
+ ty: types.unit,
+ }),
+ }
+ }
+}
+
+// This struct contains information regarding the `ReFree(FreeRegion)` corresponding to a lifetime
+// conflict.
+#[derive(Debug)]
+pub struct FreeRegionInfo {
+ // `LocalDefId` corresponding to FreeRegion
+ pub def_id: LocalDefId,
+ // the bound region corresponding to FreeRegion
+ pub boundregion: ty::BoundRegionKind,
+ // checks if bound region is in Impl Item
+ pub is_impl_item: bool,
+}
+
+/// The central data structure of the compiler. It stores references
+/// to the various **arenas** and also houses the results of the
+/// various **compiler queries** that have been performed. See the
+/// [rustc dev guide] for more details.
+///
+/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/ty.html
+#[derive(Copy, Clone)]
+#[rustc_diagnostic_item = "TyCtxt"]
+#[rustc_pass_by_value]
+pub struct TyCtxt<'tcx> {
+ gcx: &'tcx GlobalCtxt<'tcx>,
+}
+
+impl<'tcx> Deref for TyCtxt<'tcx> {
+ type Target = &'tcx GlobalCtxt<'tcx>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ &self.gcx
+ }
+}
+
+pub struct GlobalCtxt<'tcx> {
+ pub arena: &'tcx WorkerLocal<Arena<'tcx>>,
+ pub hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>,
+
+ interners: CtxtInterners<'tcx>,
+
+ pub sess: &'tcx Session,
+
+ /// This only ever stores a `LintStore` but we don't want a dependency on that type here.
+ ///
+ /// FIXME(Centril): consider `dyn LintStoreMarker` once
+ /// we can upcast to `Any` for some additional type safety.
+ pub lint_store: Lrc<dyn Any + sync::Sync + sync::Send>,
+
+ pub dep_graph: DepGraph,
+
+ pub prof: SelfProfilerRef,
+
+ /// Common types, pre-interned for your convenience.
+ pub types: CommonTypes<'tcx>,
+
+ /// Common lifetimes, pre-interned for your convenience.
+ pub lifetimes: CommonLifetimes<'tcx>,
+
+ /// Common consts, pre-interned for your convenience.
+ pub consts: CommonConsts<'tcx>,
+
+ definitions: RwLock<Definitions>,
+ cstore: Box<CrateStoreDyn>,
+
+ /// Output of the resolver.
+ pub(crate) untracked_resolutions: ty::ResolverOutputs,
+ untracked_resolver_for_lowering: Steal<ty::ResolverAstLowering>,
+ /// The entire crate as AST. This field serves as the input for the hir_crate query,
+ /// which lowers it from AST to HIR. It must not be read or used by anything else.
+ pub untracked_crate: Steal<Lrc<ast::Crate>>,
+
+ /// This provides access to the incremental compilation on-disk cache for query results.
+ /// Do not access this directly. It is only meant to be used by
+ /// `DepGraph::try_mark_green()` and the query infrastructure.
+ /// This is `None` if we are not incremental compilation mode
+ pub on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
+
+ pub queries: &'tcx dyn query::QueryEngine<'tcx>,
+ pub query_caches: query::QueryCaches<'tcx>,
+ query_kinds: &'tcx [DepKindStruct],
+
+ // Internal caches for metadata decoding. No need to track deps on this.
+ pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
+ pub pred_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Predicate<'tcx>>>,
+
+ /// Caches the results of trait selection. This cache is used
+ /// for things that do not have to do with the parameters in scope.
+ pub selection_cache: traits::SelectionCache<'tcx>,
+
+ /// Caches the results of trait evaluation. This cache is used
+ /// for things that do not have to do with the parameters in scope.
+ /// Merge this with `selection_cache`?
+ pub evaluation_cache: traits::EvaluationCache<'tcx>,
+
+ /// The definite name of the current crate after taking into account
+ /// attributes, commandline parameters, etc.
+ crate_name: Symbol,
+
+ /// Data layout specification for the current target.
+ pub data_layout: TargetDataLayout,
+
+ /// Stores memory for globals (statics/consts).
+ pub(crate) alloc_map: Lock<interpret::AllocMap<'tcx>>,
+
+ output_filenames: Arc<OutputFilenames>,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Expects a body and returns its codegen attributes.
+ ///
+ /// Unlike `codegen_fn_attrs`, this returns `CodegenFnAttrs::EMPTY` for
+ /// constants.
+ pub fn body_codegen_attrs(self, def_id: DefId) -> &'tcx CodegenFnAttrs {
+ let def_kind = self.def_kind(def_id);
+ if def_kind.has_codegen_attrs() {
+ self.codegen_fn_attrs(def_id)
+ } else if matches!(
+ def_kind,
+ DefKind::AnonConst | DefKind::AssocConst | DefKind::Const | DefKind::InlineConst
+ ) {
+ CodegenFnAttrs::EMPTY
+ } else {
+ bug!(
+ "body_codegen_fn_attrs called on unexpected definition: {:?} {:?}",
+ def_id,
+ def_kind
+ )
+ }
+ }
+
+ pub fn typeck_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<LocalDefId>,
+ ) -> &'tcx TypeckResults<'tcx> {
+ if let Some(param_did) = def.const_param_did {
+ self.typeck_const_arg((def.did, param_did))
+ } else {
+ self.typeck(def.did)
+ }
+ }
+
+ pub fn mir_borrowck_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<LocalDefId>,
+ ) -> &'tcx BorrowCheckResult<'tcx> {
+ if let Some(param_did) = def.const_param_did {
+ self.mir_borrowck_const_arg((def.did, param_did))
+ } else {
+ self.mir_borrowck(def.did)
+ }
+ }
+
+ pub fn alloc_steal_thir(self, thir: Thir<'tcx>) -> &'tcx Steal<Thir<'tcx>> {
+ self.arena.alloc(Steal::new(thir))
+ }
+
+ pub fn alloc_steal_mir(self, mir: Body<'tcx>) -> &'tcx Steal<Body<'tcx>> {
+ self.arena.alloc(Steal::new(mir))
+ }
+
+ pub fn alloc_steal_promoted(
+ self,
+ promoted: IndexVec<Promoted, Body<'tcx>>,
+ ) -> &'tcx Steal<IndexVec<Promoted, Body<'tcx>>> {
+ self.arena.alloc(Steal::new(promoted))
+ }
+
+ pub fn alloc_adt_def(
+ self,
+ did: DefId,
+ kind: AdtKind,
+ variants: IndexVec<VariantIdx, ty::VariantDef>,
+ repr: ReprOptions,
+ ) -> ty::AdtDef<'tcx> {
+ self.intern_adt_def(ty::AdtDefData::new(self, did, kind, variants, repr))
+ }
+
+ /// Allocates a read-only byte or string literal for `mir::interpret`.
+ pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId {
+ // Create an allocation that just contains these bytes.
+ let alloc = interpret::Allocation::from_bytes_byte_aligned_immutable(bytes);
+ let alloc = self.intern_const_alloc(alloc);
+ self.create_memory_alloc(alloc)
+ }
+
+ /// Returns a range of the start/end indices specified with the
+ /// `rustc_layout_scalar_valid_range` attribute.
+ // FIXME(eddyb) this is an awkward spot for this method, maybe move it?
+ pub fn layout_scalar_valid_range(self, def_id: DefId) -> (Bound<u128>, Bound<u128>) {
+ let get = |name| {
+ let Some(attr) = self.get_attr(def_id, name) else {
+ return Bound::Unbounded;
+ };
+ debug!("layout_scalar_valid_range: attr={:?}", attr);
+ if let Some(
+ &[
+ ast::NestedMetaItem::Literal(ast::Lit {
+ kind: ast::LitKind::Int(a, _), ..
+ }),
+ ],
+ ) = attr.meta_item_list().as_deref()
+ {
+ Bound::Included(a)
+ } else {
+ self.sess
+ .delay_span_bug(attr.span, "invalid rustc_layout_scalar_valid_range attribute");
+ Bound::Unbounded
+ }
+ };
+ (
+ get(sym::rustc_layout_scalar_valid_range_start),
+ get(sym::rustc_layout_scalar_valid_range_end),
+ )
+ }
+
+ pub fn lift<T: Lift<'tcx>>(self, value: T) -> Option<T::Lifted> {
+ value.lift_to_tcx(self)
+ }
+
+ /// Creates a type context and call the closure with a `TyCtxt` reference
+ /// to the context. The closure enforces that the type context and any interned
+ /// value (types, substs, etc.) can only be used while `ty::tls` has a valid
+ /// reference to the context, to allow formatting values that need it.
+ pub fn create_global_ctxt(
+ s: &'tcx Session,
+ lint_store: Lrc<dyn Any + sync::Send + sync::Sync>,
+ arena: &'tcx WorkerLocal<Arena<'tcx>>,
+ hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>,
+ definitions: Definitions,
+ cstore: Box<CrateStoreDyn>,
+ untracked_resolutions: ty::ResolverOutputs,
+ untracked_resolver_for_lowering: ty::ResolverAstLowering,
+ krate: Lrc<ast::Crate>,
+ dep_graph: DepGraph,
+ on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
+ queries: &'tcx dyn query::QueryEngine<'tcx>,
+ query_kinds: &'tcx [DepKindStruct],
+ crate_name: &str,
+ output_filenames: OutputFilenames,
+ ) -> GlobalCtxt<'tcx> {
+ let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| {
+ s.fatal(&err);
+ });
+ let interners = CtxtInterners::new(arena);
+ let common_types = CommonTypes::new(
+ &interners,
+ s,
+ &definitions,
+ &*cstore,
+ // This is only used to create a stable hashing context.
+ &untracked_resolutions.source_span,
+ );
+ let common_lifetimes = CommonLifetimes::new(&interners);
+ let common_consts = CommonConsts::new(&interners, &common_types);
+
+ GlobalCtxt {
+ sess: s,
+ lint_store,
+ arena,
+ hir_arena,
+ interners,
+ dep_graph,
+ definitions: RwLock::new(definitions),
+ cstore,
+ prof: s.prof.clone(),
+ types: common_types,
+ lifetimes: common_lifetimes,
+ consts: common_consts,
+ untracked_resolutions,
+ untracked_resolver_for_lowering: Steal::new(untracked_resolver_for_lowering),
+ untracked_crate: Steal::new(krate),
+ on_disk_cache,
+ queries,
+ query_caches: query::QueryCaches::default(),
+ query_kinds,
+ ty_rcache: Default::default(),
+ pred_rcache: Default::default(),
+ selection_cache: Default::default(),
+ evaluation_cache: Default::default(),
+ crate_name: Symbol::intern(crate_name),
+ data_layout,
+ alloc_map: Lock::new(interpret::AllocMap::new()),
+ output_filenames: Arc::new(output_filenames),
+ }
+ }
+
+ pub(crate) fn query_kind(self, k: DepKind) -> &'tcx DepKindStruct {
+ &self.query_kinds[k as usize]
+ }
+
+ /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
+ #[track_caller]
+ pub fn ty_error(self) -> Ty<'tcx> {
+ self.ty_error_with_message(DUMMY_SP, "TyKind::Error constructed but no error reported")
+ }
+
+ /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` with the given `msg` to
+ /// ensure it gets used.
+ #[track_caller]
+ pub fn ty_error_with_message<S: Into<MultiSpan>>(self, span: S, msg: &str) -> Ty<'tcx> {
+ let reported = self.sess.delay_span_bug(span, msg);
+ self.mk_ty(Error(DelaySpanBugEmitted { reported, _priv: () }))
+ }
+
+ /// Like [TyCtxt::ty_error] but for constants.
+ #[track_caller]
+ pub fn const_error(self, ty: Ty<'tcx>) -> Const<'tcx> {
+ self.const_error_with_message(
+ ty,
+ DUMMY_SP,
+ "ty::ConstKind::Error constructed but no error reported",
+ )
+ }
+
+ /// Like [TyCtxt::ty_error_with_message] but for constants.
+ #[track_caller]
+ pub fn const_error_with_message<S: Into<MultiSpan>>(
+ self,
+ ty: Ty<'tcx>,
+ span: S,
+ msg: &str,
+ ) -> Const<'tcx> {
+ let reported = self.sess.delay_span_bug(span, msg);
+ self.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Error(DelaySpanBugEmitted { reported, _priv: () }),
+ ty,
+ })
+ }
+
+ pub fn consider_optimizing<T: Fn() -> String>(self, msg: T) -> bool {
+ let cname = self.crate_name(LOCAL_CRATE);
+ self.sess.consider_optimizing(cname.as_str(), msg)
+ }
+
+ /// Obtain all lang items of this crate and all dependencies (recursively)
+ pub fn lang_items(self) -> &'tcx rustc_hir::lang_items::LanguageItems {
+ self.get_lang_items(())
+ }
+
+ /// Obtain the given diagnostic item's `DefId`. Use `is_diagnostic_item` if you just want to
+ /// compare against another `DefId`, since `is_diagnostic_item` is cheaper.
+ pub fn get_diagnostic_item(self, name: Symbol) -> Option<DefId> {
+ self.all_diagnostic_items(()).name_to_id.get(&name).copied()
+ }
+
+ /// Obtain the diagnostic item's name
+ pub fn get_diagnostic_name(self, id: DefId) -> Option<Symbol> {
+ self.diagnostic_items(id.krate).id_to_name.get(&id).copied()
+ }
+
+ /// Check whether the diagnostic item with the given `name` has the given `DefId`.
+ pub fn is_diagnostic_item(self, name: Symbol, did: DefId) -> bool {
+ self.diagnostic_items(did.krate).name_to_id.get(&name) == Some(&did)
+ }
+
+ pub fn stability(self) -> &'tcx stability::Index {
+ self.stability_index(())
+ }
+
+ pub fn features(self) -> &'tcx rustc_feature::Features {
+ self.features_query(())
+ }
+
+ pub fn def_key(self, id: DefId) -> rustc_hir::definitions::DefKey {
+ // Accessing the DefKey is ok, since it is part of DefPathHash.
+ if let Some(id) = id.as_local() {
+ self.definitions_untracked().def_key(id)
+ } else {
+ self.cstore.def_key(id)
+ }
+ }
+
+ /// Converts a `DefId` into its fully expanded `DefPath` (every
+ /// `DefId` is really just an interned `DefPath`).
+ ///
+ /// Note that if `id` is not local to this crate, the result will
+ /// be a non-local `DefPath`.
+ pub fn def_path(self, id: DefId) -> rustc_hir::definitions::DefPath {
+ // Accessing the DefPath is ok, since it is part of DefPathHash.
+ if let Some(id) = id.as_local() {
+ self.definitions_untracked().def_path(id)
+ } else {
+ self.cstore.def_path(id)
+ }
+ }
+
+ #[inline]
+ pub fn def_path_hash(self, def_id: DefId) -> rustc_hir::definitions::DefPathHash {
+ // Accessing the DefPathHash is ok, it is incr. comp. stable.
+ if let Some(def_id) = def_id.as_local() {
+ self.definitions_untracked().def_path_hash(def_id)
+ } else {
+ self.cstore.def_path_hash(def_id)
+ }
+ }
+
+ #[inline]
+ pub fn stable_crate_id(self, crate_num: CrateNum) -> StableCrateId {
+ if crate_num == LOCAL_CRATE {
+ self.sess.local_stable_crate_id()
+ } else {
+ self.cstore.stable_crate_id(crate_num)
+ }
+ }
+
+ /// Maps a StableCrateId to the corresponding CrateNum. This method assumes
+ /// that the crate in question has already been loaded by the CrateStore.
+ #[inline]
+ pub fn stable_crate_id_to_crate_num(self, stable_crate_id: StableCrateId) -> CrateNum {
+ if stable_crate_id == self.sess.local_stable_crate_id() {
+ LOCAL_CRATE
+ } else {
+ self.cstore.stable_crate_id_to_crate_num(stable_crate_id)
+ }
+ }
+
+ /// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation
+ /// session, if it still exists. This is used during incremental compilation to
+ /// turn a deserialized `DefPathHash` into its current `DefId`.
+ pub fn def_path_hash_to_def_id(self, hash: DefPathHash, err: &mut dyn FnMut() -> !) -> DefId {
+ debug!("def_path_hash_to_def_id({:?})", hash);
+
+ let stable_crate_id = hash.stable_crate_id();
+
+ // If this is a DefPathHash from the local crate, we can look up the
+ // DefId in the tcx's `Definitions`.
+ if stable_crate_id == self.sess.local_stable_crate_id() {
+ self.definitions.read().local_def_path_hash_to_def_id(hash, err).to_def_id()
+ } else {
+ // If this is a DefPathHash from an upstream crate, let the CrateStore map
+ // it to a DefId.
+ let cnum = self.cstore.stable_crate_id_to_crate_num(stable_crate_id);
+ self.cstore.def_path_hash_to_def_id(cnum, hash)
+ }
+ }
+
+ pub fn def_path_debug_str(self, def_id: DefId) -> String {
+ // We are explicitly not going through queries here in order to get
+ // crate name and stable crate id since this code is called from debug!()
+ // statements within the query system and we'd run into endless
+ // recursion otherwise.
+ let (crate_name, stable_crate_id) = if def_id.is_local() {
+ (self.crate_name, self.sess.local_stable_crate_id())
+ } else {
+ let cstore = &self.cstore;
+ (cstore.crate_name(def_id.krate), cstore.stable_crate_id(def_id.krate))
+ };
+
+ format!(
+ "{}[{:04x}]{}",
+ crate_name,
+ // Don't print the whole stable crate id. That's just
+ // annoying in debug output.
+ stable_crate_id.to_u64() >> 8 * 6,
+ self.def_path(def_id).to_string_no_crate_verbose()
+ )
+ }
+
+ /// Create a new definition within the incr. comp. engine.
+ pub fn create_def(self, parent: LocalDefId, data: hir::definitions::DefPathData) -> LocalDefId {
+ // This function modifies `self.definitions` using a side-effect.
+ // We need to ensure that these side effects are re-run by the incr. comp. engine.
+ // Depending on the forever-red node will tell the graph that the calling query
+ // needs to be re-evaluated.
+ use rustc_query_system::dep_graph::DepNodeIndex;
+ self.dep_graph.read_index(DepNodeIndex::FOREVER_RED_NODE);
+
+ // The following call has the side effect of modifying the tables inside `definitions`.
+ // These very tables are relied on by the incr. comp. engine to decode DepNodes and to
+ // decode the on-disk cache.
+ //
+ // Any LocalDefId which is used within queries, either as key or result, either:
+ // - has been created before the construction of the TyCtxt;
+ // - has been created by this call to `create_def`.
+ // As a consequence, this LocalDefId is always re-created before it is needed by the incr.
+ // comp. engine itself.
+ //
+ // This call also writes to the value of `source_span` and `expn_that_defined` queries.
+ // This is fine because:
+ // - those queries are `eval_always` so we won't miss their result changing;
+ // - this write will have happened before these queries are called.
+ self.definitions.write().create_def(parent, data)
+ }
+
+ pub fn iter_local_def_id(self) -> impl Iterator<Item = LocalDefId> + 'tcx {
+ // Create a dependency to the crate to be sure we re-execute this when the amount of
+ // definitions change.
+ self.ensure().hir_crate(());
+ // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // while iterating. If some query needs to add definitions, it should be `ensure`d above.
+ let definitions = self.definitions.leak();
+ definitions.iter_local_def_id()
+ }
+
+ pub fn def_path_table(self) -> &'tcx rustc_hir::definitions::DefPathTable {
+ // Create a dependency to the crate to be sure we reexcute this when the amount of
+ // definitions change.
+ self.ensure().hir_crate(());
+ // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // while iterating. If some query needs to add definitions, it should be `ensure`d above.
+ let definitions = self.definitions.leak();
+ definitions.def_path_table()
+ }
+
+ pub fn def_path_hash_to_def_index_map(
+ self,
+ ) -> &'tcx rustc_hir::def_path_hash_map::DefPathHashMap {
+ // Create a dependency to the crate to be sure we reexcute this when the amount of
+ // definitions change.
+ self.ensure().hir_crate(());
+ // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // while iterating. If some query needs to add definitions, it should be `ensure`d above.
+ let definitions = self.definitions.leak();
+ definitions.def_path_hash_to_def_index_map()
+ }
+
+ /// Note that this is *untracked* and should only be used within the query
+ /// system if the result is otherwise tracked through queries
+ pub fn cstore_untracked(self) -> &'tcx CrateStoreDyn {
+ &*self.cstore
+ }
+
+ /// Note that this is *untracked* and should only be used within the query
+ /// system if the result is otherwise tracked through queries
+ #[inline]
+ pub fn definitions_untracked(self) -> ReadGuard<'tcx, Definitions> {
+ self.definitions.read()
+ }
+
+ /// Note that this is *untracked* and should only be used within the query
+ /// system if the result is otherwise tracked through queries
+ #[inline]
+ pub fn source_span_untracked(self, def_id: LocalDefId) -> Span {
+ self.untracked_resolutions.source_span.get(def_id).copied().unwrap_or(DUMMY_SP)
+ }
+
+ #[inline(always)]
+ pub fn with_stable_hashing_context<R>(
+ self,
+ f: impl FnOnce(StableHashingContext<'_>) -> R,
+ ) -> R {
+ let definitions = self.definitions_untracked();
+ let hcx = StableHashingContext::new(
+ self.sess,
+ &*definitions,
+ &*self.cstore,
+ &self.untracked_resolutions.source_span,
+ );
+ f(hcx)
+ }
+
+ pub fn serialize_query_result_cache(self, encoder: FileEncoder) -> FileEncodeResult {
+ self.on_disk_cache.as_ref().map_or(Ok(0), |c| c.serialize(self, encoder))
+ }
+
+ /// If `true`, we should use lazy normalization for constants, otherwise
+ /// we still evaluate them eagerly.
+ #[inline]
+ pub fn lazy_normalization(self) -> bool {
+ let features = self.features();
+ // Note: We only use lazy normalization for generic const expressions.
+ features.generic_const_exprs
+ }
+
+ #[inline]
+ pub fn local_crate_exports_generics(self) -> bool {
+ debug_assert!(self.sess.opts.share_generics());
+
+ self.sess.crate_types().iter().any(|crate_type| {
+ match crate_type {
+ CrateType::Executable
+ | CrateType::Staticlib
+ | CrateType::ProcMacro
+ | CrateType::Cdylib => false,
+
+ // FIXME rust-lang/rust#64319, rust-lang/rust#64872:
+ // We want to block export of generics from dylibs,
+ // but we must fix rust-lang/rust#65890 before we can
+ // do that robustly.
+ CrateType::Dylib => true,
+
+ CrateType::Rlib => true,
+ }
+ })
+ }
+
+ // Returns the `DefId` and the `BoundRegionKind` corresponding to the given region.
+ pub fn is_suitable_region(self, region: Region<'tcx>) -> Option<FreeRegionInfo> {
+ let (suitable_region_binding_scope, bound_region) = match *region {
+ ty::ReFree(ref free_region) => {
+ (free_region.scope.expect_local(), free_region.bound_region)
+ }
+ ty::ReEarlyBound(ref ebr) => (
+ self.local_parent(ebr.def_id.expect_local()),
+ ty::BoundRegionKind::BrNamed(ebr.def_id, ebr.name),
+ ),
+ _ => return None, // not a free region
+ };
+
+ let is_impl_item = match self.hir().find_by_def_id(suitable_region_binding_scope) {
+ Some(Node::Item(..) | Node::TraitItem(..)) => false,
+ Some(Node::ImplItem(..)) => {
+ self.is_bound_region_in_impl_item(suitable_region_binding_scope)
+ }
+ _ => return None,
+ };
+
+ Some(FreeRegionInfo {
+ def_id: suitable_region_binding_scope,
+ boundregion: bound_region,
+ is_impl_item,
+ })
+ }
+
+ /// Given a `DefId` for an `fn`, return all the `dyn` and `impl` traits in its return type.
+ pub fn return_type_impl_or_dyn_traits(
+ self,
+ scope_def_id: LocalDefId,
+ ) -> Vec<&'tcx hir::Ty<'tcx>> {
+ let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id);
+ let Some(hir::FnDecl { output: hir::FnRetTy::Return(hir_output), .. }) = self.hir().fn_decl_by_hir_id(hir_id) else {
+ return vec![];
+ };
+
+ let mut v = TraitObjectVisitor(vec![], self.hir());
+ v.visit_ty(hir_output);
+ v.0
+ }
+
+ pub fn return_type_impl_trait(self, scope_def_id: LocalDefId) -> Option<(Ty<'tcx>, Span)> {
+ // `type_of()` will fail on these (#55796, #86483), so only allow `fn`s or closures.
+ match self.hir().get_by_def_id(scope_def_id) {
+ Node::Item(&hir::Item { kind: ItemKind::Fn(..), .. }) => {}
+ Node::TraitItem(&hir::TraitItem { kind: TraitItemKind::Fn(..), .. }) => {}
+ Node::ImplItem(&hir::ImplItem { kind: ImplItemKind::Fn(..), .. }) => {}
+ Node::Expr(&hir::Expr { kind: ExprKind::Closure { .. }, .. }) => {}
+ _ => return None,
+ }
+
+ let ret_ty = self.type_of(scope_def_id);
+ match ret_ty.kind() {
+ ty::FnDef(_, _) => {
+ let sig = ret_ty.fn_sig(self);
+ let output = self.erase_late_bound_regions(sig.output());
+ if output.is_impl_trait() {
+ let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id);
+ let fn_decl = self.hir().fn_decl_by_hir_id(hir_id).unwrap();
+ Some((output, fn_decl.output.span()))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+ }
+
+ // Checks if the bound region is in Impl Item.
+ pub fn is_bound_region_in_impl_item(self, suitable_region_binding_scope: LocalDefId) -> bool {
+ let container_id = self.parent(suitable_region_binding_scope.to_def_id());
+ if self.impl_trait_ref(container_id).is_some() {
+ // For now, we do not try to target impls of traits. This is
+ // because this message is going to suggest that the user
+ // change the fn signature, but they may not be free to do so,
+ // since the signature must match the trait.
+ //
+ // FIXME(#42706) -- in some cases, we could do better here.
+ return true;
+ }
+ false
+ }
+
+ /// Determines whether identifiers in the assembly have strict naming rules.
+ /// Currently, only NVPTX* targets need it.
+ pub fn has_strict_asm_symbol_naming(self) -> bool {
+ self.sess.target.arch.contains("nvptx")
+ }
+
+ /// Returns `&'static core::panic::Location<'static>`.
+ pub fn caller_location_ty(self) -> Ty<'tcx> {
+ self.mk_imm_ref(
+ self.lifetimes.re_static,
+ self.bound_type_of(self.require_lang_item(LangItem::PanicLocation, None))
+ .subst(self, self.mk_substs([self.lifetimes.re_static.into()].iter())),
+ )
+ }
+
+ /// Returns a displayable description and article for the given `def_id` (e.g. `("a", "struct")`).
+ pub fn article_and_description(self, def_id: DefId) -> (&'static str, &'static str) {
+ match self.def_kind(def_id) {
+ DefKind::Generator => match self.generator_kind(def_id).unwrap() {
+ rustc_hir::GeneratorKind::Async(..) => ("an", "async closure"),
+ rustc_hir::GeneratorKind::Gen => ("a", "generator"),
+ },
+ def_kind => (def_kind.article(), def_kind.descr(def_id)),
+ }
+ }
+
+ pub fn type_length_limit(self) -> Limit {
+ self.limits(()).type_length_limit
+ }
+
+ pub fn recursion_limit(self) -> Limit {
+ self.limits(()).recursion_limit
+ }
+
+ pub fn move_size_limit(self) -> Limit {
+ self.limits(()).move_size_limit
+ }
+
+ pub fn const_eval_limit(self) -> Limit {
+ self.limits(()).const_eval_limit
+ }
+
+ pub fn all_traits(self) -> impl Iterator<Item = DefId> + 'tcx {
+ iter::once(LOCAL_CRATE)
+ .chain(self.crates(()).iter().copied())
+ .flat_map(move |cnum| self.traits_in_crate(cnum).iter().copied())
+ }
+}
+
+/// A trait implemented for all `X<'a>` types that can be safely and
+/// efficiently converted to `X<'tcx>` as long as they are part of the
+/// provided `TyCtxt<'tcx>`.
+/// This can be done, for example, for `Ty<'tcx>` or `SubstsRef<'tcx>`
+/// by looking them up in their respective interners.
+///
+/// However, this is still not the best implementation as it does
+/// need to compare the components, even for interned values.
+/// It would be more efficient if `TypedArena` provided a way to
+/// determine whether the address is in the allocated range.
+///
+/// `None` is returned if the value or one of the components is not part
+/// of the provided context.
+/// For `Ty`, `None` can be returned if either the type interner doesn't
+/// contain the `TyKind` key or if the address of the interned
+/// pointer differs. The latter case is possible if a primitive type,
+/// e.g., `()` or `u8`, was interned in a different context.
+pub trait Lift<'tcx>: fmt::Debug {
+ type Lifted: fmt::Debug + 'tcx;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted>;
+}
+
+macro_rules! nop_lift {
+ ($set:ident; $ty:ty => $lifted:ty) => {
+ impl<'a, 'tcx> Lift<'tcx> for $ty {
+ type Lifted = $lifted;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ if tcx.interners.$set.contains_pointer_to(&InternedInSet(&*self.0.0)) {
+ // SAFETY: `self` is interned and therefore valid
+ // for the entire lifetime of the `TyCtxt`.
+ Some(unsafe { mem::transmute(self) })
+ } else {
+ None
+ }
+ }
+ }
+ };
+}
+
+// Can't use the macros as we have reuse the `substs` here.
+//
+// See `intern_type_list` for more info.
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Ty<'a>> {
+ type Lifted = &'tcx List<Ty<'tcx>>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ if self.is_empty() {
+ return Some(List::empty());
+ }
+ if tcx.interners.substs.contains_pointer_to(&InternedInSet(self.as_substs())) {
+ // SAFETY: `self` is interned and therefore valid
+ // for the entire lifetime of the `TyCtxt`.
+ Some(unsafe { mem::transmute::<&'a List<Ty<'a>>, &'tcx List<Ty<'tcx>>>(self) })
+ } else {
+ None
+ }
+ }
+}
+
+macro_rules! nop_list_lift {
+ ($set:ident; $ty:ty => $lifted:ty) => {
+ impl<'a, 'tcx> Lift<'tcx> for &'a List<$ty> {
+ type Lifted = &'tcx List<$lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ if self.is_empty() {
+ return Some(List::empty());
+ }
+ if tcx.interners.$set.contains_pointer_to(&InternedInSet(self)) {
+ Some(unsafe { mem::transmute(self) })
+ } else {
+ None
+ }
+ }
+ }
+ };
+}
+
+nop_lift! {type_; Ty<'a> => Ty<'tcx>}
+nop_lift! {region; Region<'a> => Region<'tcx>}
+nop_lift! {const_; Const<'a> => Const<'tcx>}
+nop_lift! {const_allocation; ConstAllocation<'a> => ConstAllocation<'tcx>}
+nop_lift! {predicate; Predicate<'a> => Predicate<'tcx>}
+
+nop_list_lift! {poly_existential_predicates; ty::Binder<'a, ExistentialPredicate<'a>> => ty::Binder<'tcx, ExistentialPredicate<'tcx>>}
+nop_list_lift! {predicates; Predicate<'a> => Predicate<'tcx>}
+nop_list_lift! {canonical_var_infos; CanonicalVarInfo<'a> => CanonicalVarInfo<'tcx>}
+nop_list_lift! {projs; ProjectionKind => ProjectionKind}
+nop_list_lift! {bound_variable_kinds; ty::BoundVariableKind => ty::BoundVariableKind}
+
+// This is the impl for `&'a InternalSubsts<'a>`.
+nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>}
+
+CloneLiftImpls! { for<'tcx> { Constness, traits::WellFormedLoc, } }
+
+pub mod tls {
+ use super::{ptr_eq, GlobalCtxt, TyCtxt};
+
+ use crate::dep_graph::TaskDepsRef;
+ use crate::ty::query;
+ use rustc_data_structures::sync::{self, Lock};
+ use rustc_data_structures::thin_vec::ThinVec;
+ use rustc_errors::Diagnostic;
+ use std::mem;
+
+ #[cfg(not(parallel_compiler))]
+ use std::cell::Cell;
+
+ #[cfg(parallel_compiler)]
+ use rustc_rayon_core as rayon_core;
+
+ /// This is the implicit state of rustc. It contains the current
+ /// `TyCtxt` and query. It is updated when creating a local interner or
+ /// executing a new query. Whenever there's a `TyCtxt` value available
+ /// you should also have access to an `ImplicitCtxt` through the functions
+ /// in this module.
+ #[derive(Clone)]
+ pub struct ImplicitCtxt<'a, 'tcx> {
+ /// The current `TyCtxt`.
+ pub tcx: TyCtxt<'tcx>,
+
+ /// The current query job, if any. This is updated by `JobOwner::start` in
+ /// `ty::query::plumbing` when executing a query.
+ pub query: Option<query::QueryJobId>,
+
+ /// Where to store diagnostics for the current query job, if any.
+ /// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.
+ pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
+
+ /// Used to prevent layout from recursing too deeply.
+ pub layout_depth: usize,
+
+ /// The current dep graph task. This is used to add dependencies to queries
+ /// when executing them.
+ pub task_deps: TaskDepsRef<'a>,
+ }
+
+ impl<'a, 'tcx> ImplicitCtxt<'a, 'tcx> {
+ pub fn new(gcx: &'tcx GlobalCtxt<'tcx>) -> Self {
+ let tcx = TyCtxt { gcx };
+ ImplicitCtxt {
+ tcx,
+ query: None,
+ diagnostics: None,
+ layout_depth: 0,
+ task_deps: TaskDepsRef::Ignore,
+ }
+ }
+ }
+
+ /// Sets Rayon's thread-local variable, which is preserved for Rayon jobs
+ /// to `value` during the call to `f`. It is restored to its previous value after.
+ /// This is used to set the pointer to the new `ImplicitCtxt`.
+ #[cfg(parallel_compiler)]
+ #[inline]
+ fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
+ rayon_core::tlv::with(value, f)
+ }
+
+ /// Gets Rayon's thread-local variable, which is preserved for Rayon jobs.
+ /// This is used to get the pointer to the current `ImplicitCtxt`.
+ #[cfg(parallel_compiler)]
+ #[inline]
+ pub fn get_tlv() -> usize {
+ rayon_core::tlv::get()
+ }
+
+ #[cfg(not(parallel_compiler))]
+ thread_local! {
+ /// A thread local variable that stores a pointer to the current `ImplicitCtxt`.
+ static TLV: Cell<usize> = const { Cell::new(0) };
+ }
+
+ /// Sets TLV to `value` during the call to `f`.
+ /// It is restored to its previous value after.
+ /// This is used to set the pointer to the new `ImplicitCtxt`.
+ #[cfg(not(parallel_compiler))]
+ #[inline]
+ fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
+ let old = get_tlv();
+ let _reset = rustc_data_structures::OnDrop(move || TLV.with(|tlv| tlv.set(old)));
+ TLV.with(|tlv| tlv.set(value));
+ f()
+ }
+
+ /// Gets the pointer to the current `ImplicitCtxt`.
+ #[cfg(not(parallel_compiler))]
+ #[inline]
+ fn get_tlv() -> usize {
+ TLV.with(|tlv| tlv.get())
+ }
+
+ /// Sets `context` as the new current `ImplicitCtxt` for the duration of the function `f`.
+ #[inline]
+ pub fn enter_context<'a, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'tcx>, f: F) -> R
+ where
+ F: FnOnce(&ImplicitCtxt<'a, 'tcx>) -> R,
+ {
+ set_tlv(context as *const _ as usize, || f(&context))
+ }
+
+ /// Allows access to the current `ImplicitCtxt` in a closure if one is available.
+ #[inline]
+ pub fn with_context_opt<F, R>(f: F) -> R
+ where
+ F: for<'a, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'tcx>>) -> R,
+ {
+ let context = get_tlv();
+ if context == 0 {
+ f(None)
+ } else {
+ // We could get an `ImplicitCtxt` pointer from another thread.
+ // Ensure that `ImplicitCtxt` is `Sync`.
+ sync::assert_sync::<ImplicitCtxt<'_, '_>>();
+
+ unsafe { f(Some(&*(context as *const ImplicitCtxt<'_, '_>))) }
+ }
+ }
+
+ /// Allows access to the current `ImplicitCtxt`.
+ /// Panics if there is no `ImplicitCtxt` available.
+ #[inline]
+ pub fn with_context<F, R>(f: F) -> R
+ where
+ F: for<'a, 'tcx> FnOnce(&ImplicitCtxt<'a, 'tcx>) -> R,
+ {
+ with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls")))
+ }
+
+ /// Allows access to the current `ImplicitCtxt` whose tcx field is the same as the tcx argument
+ /// passed in. This means the closure is given an `ImplicitCtxt` with the same `'tcx` lifetime
+ /// as the `TyCtxt` passed in.
+ /// This will panic if you pass it a `TyCtxt` which is different from the current
+ /// `ImplicitCtxt`'s `tcx` field.
+ #[inline]
+ pub fn with_related_context<'tcx, F, R>(tcx: TyCtxt<'tcx>, f: F) -> R
+ where
+ F: FnOnce(&ImplicitCtxt<'_, 'tcx>) -> R,
+ {
+ with_context(|context| unsafe {
+ assert!(ptr_eq(context.tcx.gcx, tcx.gcx));
+ let context: &ImplicitCtxt<'_, '_> = mem::transmute(context);
+ f(context)
+ })
+ }
+
+ /// Allows access to the `TyCtxt` in the current `ImplicitCtxt`.
+ /// Panics if there is no `ImplicitCtxt` available.
+ #[inline]
+ pub fn with<F, R>(f: F) -> R
+ where
+ F: for<'tcx> FnOnce(TyCtxt<'tcx>) -> R,
+ {
+ with_context(|context| f(context.tcx))
+ }
+
+ /// Allows access to the `TyCtxt` in the current `ImplicitCtxt`.
+ /// The closure is passed None if there is no `ImplicitCtxt` available.
+ #[inline]
+ pub fn with_opt<F, R>(f: F) -> R
+ where
+ F: for<'tcx> FnOnce(Option<TyCtxt<'tcx>>) -> R,
+ {
+ with_context_opt(|opt_context| f(opt_context.map(|context| context.tcx)))
+ }
+}
+
+macro_rules! sty_debug_print {
+ ($fmt: expr, $ctxt: expr, $($variant: ident),*) => {{
+ // Curious inner module to allow variant names to be used as
+ // variable names.
+ #[allow(non_snake_case)]
+ mod inner {
+ use crate::ty::{self, TyCtxt};
+ use crate::ty::context::InternedInSet;
+
+ #[derive(Copy, Clone)]
+ struct DebugStat {
+ total: usize,
+ lt_infer: usize,
+ ty_infer: usize,
+ ct_infer: usize,
+ all_infer: usize,
+ }
+
+ pub fn go(fmt: &mut std::fmt::Formatter<'_>, tcx: TyCtxt<'_>) -> std::fmt::Result {
+ let mut total = DebugStat {
+ total: 0,
+ lt_infer: 0,
+ ty_infer: 0,
+ ct_infer: 0,
+ all_infer: 0,
+ };
+ $(let mut $variant = total;)*
+
+ let shards = tcx.interners.type_.lock_shards();
+ let types = shards.iter().flat_map(|shard| shard.keys());
+ for &InternedInSet(t) in types {
+ let variant = match t.kind {
+ ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
+ ty::Float(..) | ty::Str | ty::Never => continue,
+ ty::Error(_) => /* unimportant */ continue,
+ $(ty::$variant(..) => &mut $variant,)*
+ };
+ let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
+ let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
+ let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER);
+
+ variant.total += 1;
+ total.total += 1;
+ if lt { total.lt_infer += 1; variant.lt_infer += 1 }
+ if ty { total.ty_infer += 1; variant.ty_infer += 1 }
+ if ct { total.ct_infer += 1; variant.ct_infer += 1 }
+ if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 }
+ }
+ writeln!(fmt, "Ty interner total ty lt ct all")?;
+ $(writeln!(fmt, " {:18}: {uses:6} {usespc:4.1}%, \
+ {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%",
+ stringify!($variant),
+ uses = $variant.total,
+ usespc = $variant.total as f64 * 100.0 / total.total as f64,
+ ty = $variant.ty_infer as f64 * 100.0 / total.total as f64,
+ lt = $variant.lt_infer as f64 * 100.0 / total.total as f64,
+ ct = $variant.ct_infer as f64 * 100.0 / total.total as f64,
+ all = $variant.all_infer as f64 * 100.0 / total.total as f64)?;
+ )*
+ writeln!(fmt, " total {uses:6} \
+ {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%",
+ uses = total.total,
+ ty = total.ty_infer as f64 * 100.0 / total.total as f64,
+ lt = total.lt_infer as f64 * 100.0 / total.total as f64,
+ ct = total.ct_infer as f64 * 100.0 / total.total as f64,
+ all = total.all_infer as f64 * 100.0 / total.total as f64)
+ }
+ }
+
+ inner::go($fmt, $ctxt)
+ }}
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn debug_stats(self) -> impl std::fmt::Debug + 'tcx {
+ struct DebugStats<'tcx>(TyCtxt<'tcx>);
+
+ impl<'tcx> std::fmt::Debug for DebugStats<'tcx> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ sty_debug_print!(
+ fmt,
+ self.0,
+ Adt,
+ Array,
+ Slice,
+ RawPtr,
+ Ref,
+ FnDef,
+ FnPtr,
+ Placeholder,
+ Generator,
+ GeneratorWitness,
+ Dynamic,
+ Closure,
+ Tuple,
+ Bound,
+ Param,
+ Infer,
+ Projection,
+ Opaque,
+ Foreign
+ )?;
+
+ writeln!(fmt, "InternalSubsts interner: #{}", self.0.interners.substs.len())?;
+ writeln!(fmt, "Region interner: #{}", self.0.interners.region.len())?;
+ writeln!(
+ fmt,
+ "Const Allocation interner: #{}",
+ self.0.interners.const_allocation.len()
+ )?;
+ writeln!(fmt, "Layout interner: #{}", self.0.interners.layout.len())?;
+
+ Ok(())
+ }
+ }
+
+ DebugStats(self)
+ }
+}
+
+// This type holds a `T` in the interner. The `T` is stored in the arena and
+// this type just holds a pointer to it, but it still effectively owns it. It
+// impls `Borrow` so that it can be looked up using the original
+// (non-arena-memory-owning) types.
+struct InternedInSet<'tcx, T: ?Sized>(&'tcx T);
+
+impl<'tcx, T: 'tcx + ?Sized> Clone for InternedInSet<'tcx, T> {
+ fn clone(&self) -> Self {
+ InternedInSet(self.0)
+ }
+}
+
+impl<'tcx, T: 'tcx + ?Sized> Copy for InternedInSet<'tcx, T> {}
+
+impl<'tcx, T: 'tcx + ?Sized> IntoPointer for InternedInSet<'tcx, T> {
+ fn into_pointer(&self) -> *const () {
+ self.0 as *const _ as *const ()
+ }
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+impl<'tcx> Borrow<TyKind<'tcx>> for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {
+ fn borrow<'a>(&'a self) -> &'a TyKind<'tcx> {
+ &self.0.kind
+ }
+}
+
+impl<'tcx> PartialEq for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {
+ fn eq(&self, other: &InternedInSet<'tcx, WithStableHash<TyS<'tcx>>>) -> bool {
+ // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals
+ // `x == y`.
+ self.0.kind == other.0.kind
+ }
+}
+
+impl<'tcx> Eq for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {}
+
+impl<'tcx> Hash for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`.
+ self.0.kind.hash(s)
+ }
+}
+
+impl<'tcx> Borrow<Binder<'tcx, PredicateKind<'tcx>>> for InternedInSet<'tcx, PredicateS<'tcx>> {
+ fn borrow<'a>(&'a self) -> &'a Binder<'tcx, PredicateKind<'tcx>> {
+ &self.0.kind
+ }
+}
+
+impl<'tcx> PartialEq for InternedInSet<'tcx, PredicateS<'tcx>> {
+ fn eq(&self, other: &InternedInSet<'tcx, PredicateS<'tcx>>) -> bool {
+ // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals
+ // `x == y`.
+ self.0.kind == other.0.kind
+ }
+}
+
+impl<'tcx> Eq for InternedInSet<'tcx, PredicateS<'tcx>> {}
+
+impl<'tcx> Hash for InternedInSet<'tcx, PredicateS<'tcx>> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`.
+ self.0.kind.hash(s)
+ }
+}
+
+impl<'tcx, T> Borrow<[T]> for InternedInSet<'tcx, List<T>> {
+ fn borrow<'a>(&'a self) -> &'a [T] {
+ &self.0[..]
+ }
+}
+
+impl<'tcx, T: PartialEq> PartialEq for InternedInSet<'tcx, List<T>> {
+ fn eq(&self, other: &InternedInSet<'tcx, List<T>>) -> bool {
+ // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals
+ // `x == y`.
+ self.0[..] == other.0[..]
+ }
+}
+
+impl<'tcx, T: Eq> Eq for InternedInSet<'tcx, List<T>> {}
+
+impl<'tcx, T: Hash> Hash for InternedInSet<'tcx, List<T>> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`.
+ self.0[..].hash(s)
+ }
+}
+
+macro_rules! direct_interners {
+ ($($name:ident: $method:ident($ty:ty): $ret_ctor:ident -> $ret_ty:ty,)+) => {
+ $(impl<'tcx> Borrow<$ty> for InternedInSet<'tcx, $ty> {
+ fn borrow<'a>(&'a self) -> &'a $ty {
+ &self.0
+ }
+ }
+
+ impl<'tcx> PartialEq for InternedInSet<'tcx, $ty> {
+ fn eq(&self, other: &Self) -> bool {
+ // The `Borrow` trait requires that `x.borrow() == y.borrow()`
+ // equals `x == y`.
+ self.0 == other.0
+ }
+ }
+
+ impl<'tcx> Eq for InternedInSet<'tcx, $ty> {}
+
+ impl<'tcx> Hash for InternedInSet<'tcx, $ty> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // The `Borrow` trait requires that `x.borrow().hash(s) ==
+ // x.hash(s)`.
+ self.0.hash(s)
+ }
+ }
+
+ impl<'tcx> TyCtxt<'tcx> {
+ pub fn $method(self, v: $ty) -> $ret_ty {
+ $ret_ctor(Interned::new_unchecked(self.interners.$name.intern(v, |v| {
+ InternedInSet(self.interners.arena.alloc(v))
+ }).0))
+ }
+ })+
+ }
+}
+
+direct_interners! {
+ region: mk_region(RegionKind<'tcx>): Region -> Region<'tcx>,
+ const_: mk_const(ConstS<'tcx>): Const -> Const<'tcx>,
+ const_allocation: intern_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>,
+ layout: intern_layout(LayoutS<'tcx>): Layout -> Layout<'tcx>,
+ adt_def: intern_adt_def(AdtDefData): AdtDef -> AdtDef<'tcx>,
+}
+
+macro_rules! slice_interners {
+ ($($field:ident: $method:ident($ty:ty)),+ $(,)?) => (
+ impl<'tcx> TyCtxt<'tcx> {
+ $(pub fn $method(self, v: &[$ty]) -> &'tcx List<$ty> {
+ self.interners.$field.intern_ref(v, || {
+ InternedInSet(List::from_arena(&*self.arena, v))
+ }).0
+ })+
+ }
+ );
+}
+
+slice_interners!(
+ substs: _intern_substs(GenericArg<'tcx>),
+ canonical_var_infos: _intern_canonical_var_infos(CanonicalVarInfo<'tcx>),
+ poly_existential_predicates:
+ _intern_poly_existential_predicates(ty::Binder<'tcx, ExistentialPredicate<'tcx>>),
+ predicates: _intern_predicates(Predicate<'tcx>),
+ projs: _intern_projs(ProjectionKind),
+ place_elems: _intern_place_elems(PlaceElem<'tcx>),
+ bound_variable_kinds: _intern_bound_variable_kinds(ty::BoundVariableKind),
+);
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Given a `fn` type, returns an equivalent `unsafe fn` type;
+ /// that is, a `fn` type that is equivalent in every way for being
+ /// unsafe.
+ pub fn safe_to_unsafe_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> {
+ assert_eq!(sig.unsafety(), hir::Unsafety::Normal);
+ self.mk_fn_ptr(sig.map_bound(|sig| ty::FnSig { unsafety: hir::Unsafety::Unsafe, ..sig }))
+ }
+
+ /// Given the def_id of a Trait `trait_def_id` and the name of an associated item `assoc_name`
+ /// returns true if the `trait_def_id` defines an associated item of name `assoc_name`.
+ pub fn trait_may_define_assoc_type(self, trait_def_id: DefId, assoc_name: Ident) -> bool {
+ self.super_traits_of(trait_def_id).any(|trait_did| {
+ self.associated_items(trait_did)
+ .find_by_name_and_kind(self, assoc_name, ty::AssocKind::Type, trait_did)
+ .is_some()
+ })
+ }
+
+ /// Given a `ty`, return whether it's an `impl Future<...>`.
+ pub fn ty_is_opaque_future(self, ty: Ty<'_>) -> bool {
+ let ty::Opaque(def_id, _) = ty.kind() else { return false };
+ let future_trait = self.lang_items().future_trait().unwrap();
+
+ self.explicit_item_bounds(def_id).iter().any(|(predicate, _)| {
+ let ty::PredicateKind::Trait(trait_predicate) = predicate.kind().skip_binder() else {
+ return false;
+ };
+ trait_predicate.trait_ref.def_id == future_trait
+ && trait_predicate.polarity == ImplPolarity::Positive
+ })
+ }
+
+ /// Computes the def-ids of the transitive supertraits of `trait_def_id`. This (intentionally)
+ /// does not compute the full elaborated super-predicates but just the set of def-ids. It is used
+ /// to identify which traits may define a given associated type to help avoid cycle errors.
+ /// Returns a `DefId` iterator.
+ fn super_traits_of(self, trait_def_id: DefId) -> impl Iterator<Item = DefId> + 'tcx {
+ let mut set = FxHashSet::default();
+ let mut stack = vec![trait_def_id];
+
+ set.insert(trait_def_id);
+
+ iter::from_fn(move || -> Option<DefId> {
+ let trait_did = stack.pop()?;
+ let generic_predicates = self.super_predicates_of(trait_did);
+
+ for (predicate, _) in generic_predicates.predicates {
+ if let ty::PredicateKind::Trait(data) = predicate.kind().skip_binder() {
+ if set.insert(data.def_id()) {
+ stack.push(data.def_id());
+ }
+ }
+ }
+
+ Some(trait_did)
+ })
+ }
+
+ /// Given a closure signature, returns an equivalent fn signature. Detuples
+ /// and so forth -- so e.g., if we have a sig with `Fn<(u32, i32)>` then
+ /// you would get a `fn(u32, i32)`.
+ /// `unsafety` determines the unsafety of the fn signature. If you pass
+ /// `hir::Unsafety::Unsafe` in the previous example, then you would get
+ /// an `unsafe fn (u32, i32)`.
+ /// It cannot convert a closure that requires unsafe.
+ pub fn signature_unclosure(
+ self,
+ sig: PolyFnSig<'tcx>,
+ unsafety: hir::Unsafety,
+ ) -> PolyFnSig<'tcx> {
+ sig.map_bound(|s| {
+ let params_iter = match s.inputs()[0].kind() {
+ ty::Tuple(params) => params.into_iter(),
+ _ => bug!(),
+ };
+ self.mk_fn_sig(params_iter, s.output(), s.c_variadic, unsafety, abi::Abi::Rust)
+ })
+ }
+
+ /// Same a `self.mk_region(kind)`, but avoids accessing the interners if
+ /// `*r == kind`.
+ #[inline]
+ pub fn reuse_or_mk_region(self, r: Region<'tcx>, kind: RegionKind<'tcx>) -> Region<'tcx> {
+ if *r == kind { r } else { self.mk_region(kind) }
+ }
+
+ #[allow(rustc::usage_of_ty_tykind)]
+ #[inline]
+ pub fn mk_ty(self, st: TyKind<'tcx>) -> Ty<'tcx> {
+ self.interners.intern_ty(
+ st,
+ self.sess,
+ &self.definitions.read(),
+ &*self.cstore,
+ // This is only used to create a stable hashing context.
+ &self.untracked_resolutions.source_span,
+ )
+ }
+
+ #[inline]
+ pub fn mk_predicate(self, binder: Binder<'tcx, PredicateKind<'tcx>>) -> Predicate<'tcx> {
+ self.interners.intern_predicate(binder)
+ }
+
+ #[inline]
+ pub fn reuse_or_mk_predicate(
+ self,
+ pred: Predicate<'tcx>,
+ binder: Binder<'tcx, PredicateKind<'tcx>>,
+ ) -> Predicate<'tcx> {
+ if pred.kind() != binder { self.mk_predicate(binder) } else { pred }
+ }
+
+ pub fn mk_mach_int(self, tm: IntTy) -> Ty<'tcx> {
+ match tm {
+ IntTy::Isize => self.types.isize,
+ IntTy::I8 => self.types.i8,
+ IntTy::I16 => self.types.i16,
+ IntTy::I32 => self.types.i32,
+ IntTy::I64 => self.types.i64,
+ IntTy::I128 => self.types.i128,
+ }
+ }
+
+ pub fn mk_mach_uint(self, tm: UintTy) -> Ty<'tcx> {
+ match tm {
+ UintTy::Usize => self.types.usize,
+ UintTy::U8 => self.types.u8,
+ UintTy::U16 => self.types.u16,
+ UintTy::U32 => self.types.u32,
+ UintTy::U64 => self.types.u64,
+ UintTy::U128 => self.types.u128,
+ }
+ }
+
+ pub fn mk_mach_float(self, tm: FloatTy) -> Ty<'tcx> {
+ match tm {
+ FloatTy::F32 => self.types.f32,
+ FloatTy::F64 => self.types.f64,
+ }
+ }
+
+ #[inline]
+ pub fn mk_static_str(self) -> Ty<'tcx> {
+ self.mk_imm_ref(self.lifetimes.re_static, self.types.str_)
+ }
+
+ #[inline]
+ pub fn mk_adt(self, def: AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ // Take a copy of substs so that we own the vectors inside.
+ self.mk_ty(Adt(def, substs))
+ }
+
+ #[inline]
+ pub fn mk_foreign(self, def_id: DefId) -> Ty<'tcx> {
+ self.mk_ty(Foreign(def_id))
+ }
+
+ fn mk_generic_adt(self, wrapper_def_id: DefId, ty_param: Ty<'tcx>) -> Ty<'tcx> {
+ let adt_def = self.adt_def(wrapper_def_id);
+ let substs =
+ InternalSubsts::for_item(self, wrapper_def_id, |param, substs| match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => bug!(),
+ GenericParamDefKind::Type { has_default, .. } => {
+ if param.index == 0 {
+ ty_param.into()
+ } else {
+ assert!(has_default);
+ self.bound_type_of(param.def_id).subst(self, substs).into()
+ }
+ }
+ });
+ self.mk_ty(Adt(adt_def, substs))
+ }
+
+ #[inline]
+ pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let def_id = self.require_lang_item(LangItem::OwnedBox, None);
+ self.mk_generic_adt(def_id, ty)
+ }
+
+ #[inline]
+ pub fn mk_lang_item(self, ty: Ty<'tcx>, item: LangItem) -> Option<Ty<'tcx>> {
+ let def_id = self.lang_items().require(item).ok()?;
+ Some(self.mk_generic_adt(def_id, ty))
+ }
+
+ #[inline]
+ pub fn mk_diagnostic_item(self, ty: Ty<'tcx>, name: Symbol) -> Option<Ty<'tcx>> {
+ let def_id = self.get_diagnostic_item(name)?;
+ Some(self.mk_generic_adt(def_id, ty))
+ }
+
+ #[inline]
+ pub fn mk_maybe_uninit(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let def_id = self.require_lang_item(LangItem::MaybeUninit, None);
+ self.mk_generic_adt(def_id, ty)
+ }
+
+ #[inline]
+ pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(RawPtr(tm))
+ }
+
+ #[inline]
+ pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Ref(r, tm.ty, tm.mutbl))
+ }
+
+ #[inline]
+ pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ref(r, TypeAndMut { ty, mutbl: hir::Mutability::Mut })
+ }
+
+ #[inline]
+ pub fn mk_imm_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ref(r, TypeAndMut { ty, mutbl: hir::Mutability::Not })
+ }
+
+ #[inline]
+ pub fn mk_mut_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ptr(TypeAndMut { ty, mutbl: hir::Mutability::Mut })
+ }
+
+ #[inline]
+ pub fn mk_imm_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ptr(TypeAndMut { ty, mutbl: hir::Mutability::Not })
+ }
+
+ #[inline]
+ pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> {
+ self.mk_ty(Array(ty, ty::Const::from_usize(self, n)))
+ }
+
+ #[inline]
+ pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Slice(ty))
+ }
+
+ #[inline]
+ pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> {
+ self.mk_ty(Tuple(self.intern_type_list(&ts)))
+ }
+
+ pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I) -> I::Output {
+ iter.intern_with(|ts| self.mk_ty(Tuple(self.intern_type_list(&ts))))
+ }
+
+ #[inline]
+ pub fn mk_unit(self) -> Ty<'tcx> {
+ self.types.unit
+ }
+
+ #[inline]
+ pub fn mk_diverging_default(self) -> Ty<'tcx> {
+ if self.features().never_type_fallback { self.types.never } else { self.types.unit }
+ }
+
+ #[inline]
+ pub fn mk_fn_def(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(FnDef(def_id, substs))
+ }
+
+ #[inline]
+ pub fn mk_fn_ptr(self, fty: PolyFnSig<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(FnPtr(fty))
+ }
+
+ #[inline]
+ pub fn mk_dynamic(
+ self,
+ obj: &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>,
+ reg: ty::Region<'tcx>,
+ ) -> Ty<'tcx> {
+ self.mk_ty(Dynamic(obj, reg))
+ }
+
+ #[inline]
+ pub fn mk_projection(self, item_def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Projection(ProjectionTy { item_def_id, substs }))
+ }
+
+ #[inline]
+ pub fn mk_closure(self, closure_id: DefId, closure_substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Closure(closure_id, closure_substs))
+ }
+
+ #[inline]
+ pub fn mk_generator(
+ self,
+ id: DefId,
+ generator_substs: SubstsRef<'tcx>,
+ movability: hir::Movability,
+ ) -> Ty<'tcx> {
+ self.mk_ty(Generator(id, generator_substs, movability))
+ }
+
+ #[inline]
+ pub fn mk_generator_witness(self, types: ty::Binder<'tcx, &'tcx List<Ty<'tcx>>>) -> Ty<'tcx> {
+ self.mk_ty(GeneratorWitness(types))
+ }
+
+ #[inline]
+ pub fn mk_ty_var(self, v: TyVid) -> Ty<'tcx> {
+ self.mk_ty_infer(TyVar(v))
+ }
+
+ #[inline]
+ pub fn mk_const_var(self, v: ConstVid<'tcx>, ty: Ty<'tcx>) -> Const<'tcx> {
+ self.mk_const(ty::ConstS { kind: ty::ConstKind::Infer(InferConst::Var(v)), ty })
+ }
+
+ #[inline]
+ pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> {
+ self.mk_ty_infer(IntVar(v))
+ }
+
+ #[inline]
+ pub fn mk_float_var(self, v: FloatVid) -> Ty<'tcx> {
+ self.mk_ty_infer(FloatVar(v))
+ }
+
+ #[inline]
+ pub fn mk_ty_infer(self, it: InferTy) -> Ty<'tcx> {
+ self.mk_ty(Infer(it))
+ }
+
+ #[inline]
+ pub fn mk_const_infer(self, ic: InferConst<'tcx>, ty: Ty<'tcx>) -> ty::Const<'tcx> {
+ self.mk_const(ty::ConstS { kind: ty::ConstKind::Infer(ic), ty })
+ }
+
+ #[inline]
+ pub fn mk_ty_param(self, index: u32, name: Symbol) -> Ty<'tcx> {
+ self.mk_ty(Param(ParamTy { index, name }))
+ }
+
+ #[inline]
+ pub fn mk_const_param(self, index: u32, name: Symbol, ty: Ty<'tcx>) -> Const<'tcx> {
+ self.mk_const(ty::ConstS { kind: ty::ConstKind::Param(ParamConst { index, name }), ty })
+ }
+
+ pub fn mk_param_from_def(self, param: &ty::GenericParamDef) -> GenericArg<'tcx> {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ self.mk_region(ty::ReEarlyBound(param.to_early_bound_region_data())).into()
+ }
+ GenericParamDefKind::Type { .. } => self.mk_ty_param(param.index, param.name).into(),
+ GenericParamDefKind::Const { .. } => {
+ self.mk_const_param(param.index, param.name, self.type_of(param.def_id)).into()
+ }
+ }
+ }
+
+ #[inline]
+ pub fn mk_opaque(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Opaque(def_id, substs))
+ }
+
+ pub fn mk_place_field(self, place: Place<'tcx>, f: Field, ty: Ty<'tcx>) -> Place<'tcx> {
+ self.mk_place_elem(place, PlaceElem::Field(f, ty))
+ }
+
+ pub fn mk_place_deref(self, place: Place<'tcx>) -> Place<'tcx> {
+ self.mk_place_elem(place, PlaceElem::Deref)
+ }
+
+ pub fn mk_place_downcast(
+ self,
+ place: Place<'tcx>,
+ adt_def: AdtDef<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Place<'tcx> {
+ self.mk_place_elem(
+ place,
+ PlaceElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index),
+ )
+ }
+
+ pub fn mk_place_downcast_unnamed(
+ self,
+ place: Place<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Place<'tcx> {
+ self.mk_place_elem(place, PlaceElem::Downcast(None, variant_index))
+ }
+
+ pub fn mk_place_index(self, place: Place<'tcx>, index: Local) -> Place<'tcx> {
+ self.mk_place_elem(place, PlaceElem::Index(index))
+ }
+
+ /// This method copies `Place`'s projection, add an element and reintern it. Should not be used
+ /// to build a full `Place` it's just a convenient way to grab a projection and modify it in
+ /// flight.
+ pub fn mk_place_elem(self, place: Place<'tcx>, elem: PlaceElem<'tcx>) -> Place<'tcx> {
+ let mut projection = place.projection.to_vec();
+ projection.push(elem);
+
+ Place { local: place.local, projection: self.intern_place_elems(&projection) }
+ }
+
+ pub fn intern_poly_existential_predicates(
+ self,
+ eps: &[ty::Binder<'tcx, ExistentialPredicate<'tcx>>],
+ ) -> &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>> {
+ assert!(!eps.is_empty());
+ assert!(
+ eps.array_windows()
+ .all(|[a, b]| a.skip_binder().stable_cmp(self, &b.skip_binder())
+ != Ordering::Greater)
+ );
+ self._intern_poly_existential_predicates(eps)
+ }
+
+ pub fn intern_predicates(self, preds: &[Predicate<'tcx>]) -> &'tcx List<Predicate<'tcx>> {
+ // FIXME consider asking the input slice to be sorted to avoid
+ // re-interning permutations, in which case that would be asserted
+ // here.
+ if preds.is_empty() {
+ // The macro-generated method below asserts we don't intern an empty slice.
+ List::empty()
+ } else {
+ self._intern_predicates(preds)
+ }
+ }
+
+ pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx List<Ty<'tcx>> {
+ if ts.is_empty() {
+ List::empty()
+ } else {
+ // Actually intern type lists as lists of `GenericArg`s.
+ //
+ // Transmuting from `Ty<'tcx>` to `GenericArg<'tcx>` is sound
+ // as explained in ty_slice_as_generic_arg`. With this,
+ // we guarantee that even when transmuting between `List<Ty<'tcx>>`
+ // and `List<GenericArg<'tcx>>`, the uniqueness requirement for
+ // lists is upheld.
+ let substs = self._intern_substs(ty::subst::ty_slice_as_generic_args(ts));
+ substs.try_as_type_list().unwrap()
+ }
+ }
+
+ pub fn intern_substs(self, ts: &[GenericArg<'tcx>]) -> &'tcx List<GenericArg<'tcx>> {
+ if ts.is_empty() { List::empty() } else { self._intern_substs(ts) }
+ }
+
+ pub fn intern_projs(self, ps: &[ProjectionKind]) -> &'tcx List<ProjectionKind> {
+ if ps.is_empty() { List::empty() } else { self._intern_projs(ps) }
+ }
+
+ pub fn intern_place_elems(self, ts: &[PlaceElem<'tcx>]) -> &'tcx List<PlaceElem<'tcx>> {
+ if ts.is_empty() { List::empty() } else { self._intern_place_elems(ts) }
+ }
+
+ pub fn intern_canonical_var_infos(
+ self,
+ ts: &[CanonicalVarInfo<'tcx>],
+ ) -> CanonicalVarInfos<'tcx> {
+ if ts.is_empty() { List::empty() } else { self._intern_canonical_var_infos(ts) }
+ }
+
+ pub fn intern_bound_variable_kinds(
+ self,
+ ts: &[ty::BoundVariableKind],
+ ) -> &'tcx List<ty::BoundVariableKind> {
+ if ts.is_empty() { List::empty() } else { self._intern_bound_variable_kinds(ts) }
+ }
+
+ pub fn mk_fn_sig<I>(
+ self,
+ inputs: I,
+ output: I::Item,
+ c_variadic: bool,
+ unsafety: hir::Unsafety,
+ abi: abi::Abi,
+ ) -> <I::Item as InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>::Output
+ where
+ I: Iterator<Item: InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>,
+ {
+ inputs.chain(iter::once(output)).intern_with(|xs| ty::FnSig {
+ inputs_and_output: self.intern_type_list(xs),
+ c_variadic,
+ unsafety,
+ abi,
+ })
+ }
+
+ pub fn mk_poly_existential_predicates<
+ I: InternAs<
+ [ty::Binder<'tcx, ExistentialPredicate<'tcx>>],
+ &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>,
+ >,
+ >(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_poly_existential_predicates(xs))
+ }
+
+ pub fn mk_predicates<I: InternAs<[Predicate<'tcx>], &'tcx List<Predicate<'tcx>>>>(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_predicates(xs))
+ }
+
+ pub fn mk_type_list<I: InternAs<[Ty<'tcx>], &'tcx List<Ty<'tcx>>>>(self, iter: I) -> I::Output {
+ iter.intern_with(|xs| self.intern_type_list(xs))
+ }
+
+ pub fn mk_substs<I: InternAs<[GenericArg<'tcx>], &'tcx List<GenericArg<'tcx>>>>(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_substs(xs))
+ }
+
+ pub fn mk_place_elems<I: InternAs<[PlaceElem<'tcx>], &'tcx List<PlaceElem<'tcx>>>>(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_place_elems(xs))
+ }
+
+ pub fn mk_substs_trait(self, self_ty: Ty<'tcx>, rest: &[GenericArg<'tcx>]) -> SubstsRef<'tcx> {
+ self.mk_substs(iter::once(self_ty.into()).chain(rest.iter().cloned()))
+ }
+
+ pub fn mk_bound_variable_kinds<
+ I: InternAs<[ty::BoundVariableKind], &'tcx List<ty::BoundVariableKind>>,
+ >(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_bound_variable_kinds(xs))
+ }
+
+ /// Walks upwards from `id` to find a node which might change lint levels with attributes.
+ /// It stops at `bound` and just returns it if reached.
+ pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId {
+ let hir = self.hir();
+ loop {
+ if id == bound {
+ return bound;
+ }
+
+ if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
+ return id;
+ }
+ let next = hir.get_parent_node(id);
+ if next == id {
+ bug!("lint traversal reached the root of the crate");
+ }
+ id = next;
+ }
+ }
+
+ pub fn lint_level_at_node(
+ self,
+ lint: &'static Lint,
+ mut id: hir::HirId,
+ ) -> (Level, LintLevelSource) {
+ let sets = self.lint_levels(());
+ loop {
+ if let Some(pair) = sets.level_and_source(lint, id, self.sess) {
+ return pair;
+ }
+ let next = self.hir().get_parent_node(id);
+ if next == id {
+ bug!("lint traversal reached the root of the crate");
+ }
+ id = next;
+ }
+ }
+
+ /// Emit a lint at `span` from a lint struct (some type that implements `DecorateLint`,
+ /// typically generated by `#[derive(LintDiagnostic)]`).
+ pub fn emit_spanned_lint(
+ self,
+ lint: &'static Lint,
+ hir_id: HirId,
+ span: impl Into<MultiSpan>,
+ decorator: impl for<'a> DecorateLint<'a, ()>,
+ ) {
+ self.struct_span_lint_hir(lint, hir_id, span, |diag| decorator.decorate_lint(diag))
+ }
+
+ pub fn struct_span_lint_hir(
+ self,
+ lint: &'static Lint,
+ hir_id: HirId,
+ span: impl Into<MultiSpan>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ let (level, src) = self.lint_level_at_node(lint, hir_id);
+ struct_lint_level(self.sess, lint, level, src, Some(span.into()), decorate);
+ }
+
+ /// Emit a lint from a lint struct (some type that implements `DecorateLint`, typically
+ /// generated by `#[derive(LintDiagnostic)]`).
+ pub fn emit_lint(
+ self,
+ lint: &'static Lint,
+ id: HirId,
+ decorator: impl for<'a> DecorateLint<'a, ()>,
+ ) {
+ self.struct_lint_node(lint, id, |diag| decorator.decorate_lint(diag))
+ }
+
+ pub fn struct_lint_node(
+ self,
+ lint: &'static Lint,
+ id: HirId,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ let (level, src) = self.lint_level_at_node(lint, id);
+ struct_lint_level(self.sess, lint, level, src, None, decorate);
+ }
+
+ pub fn in_scope_traits(self, id: HirId) -> Option<&'tcx [TraitCandidate]> {
+ let map = self.in_scope_traits_map(id.owner)?;
+ let candidates = map.get(&id.local_id)?;
+ Some(&*candidates)
+ }
+
+ pub fn named_region(self, id: HirId) -> Option<resolve_lifetime::Region> {
+ debug!(?id, "named_region");
+ self.named_region_map(id.owner).and_then(|map| map.get(&id.local_id).cloned())
+ }
+
+ pub fn is_late_bound(self, id: HirId) -> bool {
+ self.is_late_bound_map(id.owner).map_or(false, |set| {
+ let def_id = self.hir().local_def_id(id);
+ set.contains(&def_id)
+ })
+ }
+
+ pub fn late_bound_vars(self, id: HirId) -> &'tcx List<ty::BoundVariableKind> {
+ self.mk_bound_variable_kinds(
+ self.late_bound_vars_map(id.owner)
+ .and_then(|map| map.get(&id.local_id).cloned())
+ .unwrap_or_else(|| {
+ bug!("No bound vars found for {:?} ({:?})", self.hir().node_to_string(id), id)
+ })
+ .iter(),
+ )
+ }
+
+ /// Whether the `def_id` counts as const fn in the current crate, considering all active
+ /// feature gates
+ pub fn is_const_fn(self, def_id: DefId) -> bool {
+ if self.is_const_fn_raw(def_id) {
+ match self.lookup_const_stability(def_id) {
+ Some(stability) if stability.is_const_unstable() => {
+ // has a `rustc_const_unstable` attribute, check whether the user enabled the
+ // corresponding feature gate.
+ self.features()
+ .declared_lib_features
+ .iter()
+ .any(|&(sym, _)| sym == stability.feature)
+ }
+ // functions without const stability are either stable user written
+ // const fn or the user is using feature gates and we thus don't
+ // care what they do
+ _ => true,
+ }
+ } else {
+ false
+ }
+ }
+
+ /// Whether the trait impl is marked const. This does not consider stability or feature gates.
+ pub fn is_const_trait_impl_raw(self, def_id: DefId) -> bool {
+ let Some(local_def_id) = def_id.as_local() else { return false };
+ let hir_id = self.local_def_id_to_hir_id(local_def_id);
+ let node = self.hir().get(hir_id);
+
+ matches!(
+ node,
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
+ ..
+ })
+ )
+ }
+}
+
+impl<'tcx> TyCtxtAt<'tcx> {
+ /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
+ #[track_caller]
+ pub fn ty_error(self) -> Ty<'tcx> {
+ self.tcx.ty_error_with_message(self.span, "TyKind::Error constructed but no error reported")
+ }
+
+ /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` with the given `msg to
+ /// ensure it gets used.
+ #[track_caller]
+ pub fn ty_error_with_message(self, msg: &str) -> Ty<'tcx> {
+ self.tcx.ty_error_with_message(self.span, msg)
+ }
+}
+
+// We are comparing types with different invariant lifetimes, so `ptr::eq`
+// won't work for us.
+fn ptr_eq<T, U>(t: *const T, u: *const U) -> bool {
+ t as *const () == u as *const ()
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ providers.resolutions = |tcx, ()| &tcx.untracked_resolutions;
+ providers.resolver_for_lowering = |tcx, ()| &tcx.untracked_resolver_for_lowering;
+ providers.module_reexports =
+ |tcx, id| tcx.resolutions(()).reexport_map.get(&id).map(|v| &v[..]);
+ providers.crate_name = |tcx, id| {
+ assert_eq!(id, LOCAL_CRATE);
+ tcx.crate_name
+ };
+ providers.maybe_unused_trait_imports =
+ |tcx, ()| &tcx.resolutions(()).maybe_unused_trait_imports;
+ providers.maybe_unused_extern_crates =
+ |tcx, ()| &tcx.resolutions(()).maybe_unused_extern_crates[..];
+ providers.names_imported_by_glob_use = |tcx, id| {
+ tcx.arena.alloc(tcx.resolutions(()).glob_map.get(&id).cloned().unwrap_or_default())
+ };
+
+ providers.extern_mod_stmt_cnum =
+ |tcx, id| tcx.resolutions(()).extern_crate_map.get(&id).cloned();
+ providers.output_filenames = |tcx, ()| &tcx.output_filenames;
+ providers.features_query = |tcx, ()| tcx.sess.features_untracked();
+ providers.is_panic_runtime = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ tcx.sess.contains_name(tcx.hir().krate_attrs(), sym::panic_runtime)
+ };
+ providers.is_compiler_builtins = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ tcx.sess.contains_name(tcx.hir().krate_attrs(), sym::compiler_builtins)
+ };
+ providers.has_panic_handler = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ // We want to check if the panic handler was defined in this crate
+ tcx.lang_items().panic_impl().map_or(false, |did| did.is_local())
+ };
+}
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
new file mode 100644
index 000000000..dd2f43210
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -0,0 +1,501 @@
+//! Diagnostics related methods for `Ty`.
+
+use std::ops::ControlFlow;
+
+use crate::ty::{
+ visit::TypeVisitable, Const, ConstKind, DefIdTree, ExistentialPredicate, InferConst, InferTy,
+ PolyTraitPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitor,
+};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticArgValue, IntoDiagnosticArg};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::WherePredicate;
+use rustc_span::Span;
+use rustc_type_ir::sty::TyKind::*;
+
+impl<'tcx> IntoDiagnosticArg for Ty<'tcx> {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ format!("{}", self).into_diagnostic_arg()
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ /// Similar to `Ty::is_primitive`, but also considers inferred numeric values to be primitive.
+ pub fn is_primitive_ty(self) -> bool {
+ matches!(
+ self.kind(),
+ Bool | Char
+ | Str
+ | Int(_)
+ | Uint(_)
+ | Float(_)
+ | Infer(
+ InferTy::IntVar(_)
+ | InferTy::FloatVar(_)
+ | InferTy::FreshIntTy(_)
+ | InferTy::FreshFloatTy(_)
+ )
+ )
+ }
+
+ /// Whether the type is succinctly representable as a type instead of just referred to with a
+ /// description in error messages. This is used in the main error message.
+ pub fn is_simple_ty(self) -> bool {
+ match self.kind() {
+ Bool
+ | Char
+ | Str
+ | Int(_)
+ | Uint(_)
+ | Float(_)
+ | Infer(
+ InferTy::IntVar(_)
+ | InferTy::FloatVar(_)
+ | InferTy::FreshIntTy(_)
+ | InferTy::FreshFloatTy(_),
+ ) => true,
+ Ref(_, x, _) | Array(x, _) | Slice(x) => x.peel_refs().is_simple_ty(),
+ Tuple(tys) if tys.is_empty() => true,
+ _ => false,
+ }
+ }
+
+ /// Whether the type is succinctly representable as a type instead of just referred to with a
+ /// description in error messages. This is used in the primary span label. Beyond what
+ /// `is_simple_ty` includes, it also accepts ADTs with no type arguments and references to
+ /// ADTs with no type arguments.
+ pub fn is_simple_text(self) -> bool {
+ match self.kind() {
+ Adt(_, substs) => substs.non_erasable_generics().next().is_none(),
+ Ref(_, ty, _) => ty.is_simple_text(),
+ _ => self.is_simple_ty(),
+ }
+ }
+}
+
+pub trait IsSuggestable<'tcx> {
+ /// Whether this makes sense to suggest in a diagnostic.
+ ///
+ /// We filter out certain types and constants since they don't provide
+ /// meaningful rendered suggestions when pretty-printed. We leave some
+ /// nonsense, such as region vars, since those render as `'_` and are
+ /// usually okay to reinterpret as elided lifetimes.
+ ///
+ /// Only if `infer_suggestable` is true, we consider type and const
+ /// inference variables to be suggestable.
+ fn is_suggestable(self, tcx: TyCtxt<'tcx>, infer_suggestable: bool) -> bool;
+}
+
+impl<'tcx, T> IsSuggestable<'tcx> for T
+where
+ T: TypeVisitable<'tcx>,
+{
+ fn is_suggestable(self, tcx: TyCtxt<'tcx>, infer_suggestable: bool) -> bool {
+ self.visit_with(&mut IsSuggestableVisitor { tcx, infer_suggestable }).is_continue()
+ }
+}
+
+pub fn suggest_arbitrary_trait_bound<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &hir::Generics<'_>,
+ err: &mut Diagnostic,
+ trait_pred: PolyTraitPredicate<'tcx>,
+) -> bool {
+ if !trait_pred.is_suggestable(tcx, false) {
+ return false;
+ }
+
+ let param_name = trait_pred.skip_binder().self_ty().to_string();
+ let constraint = trait_pred.print_modifiers_and_trait_path().to_string();
+ let param = generics.params.iter().find(|p| p.name.ident().as_str() == param_name);
+
+ // Skip, there is a param named Self
+ if param.is_some() && param_name == "Self" {
+ return false;
+ }
+
+ // Suggest a where clause bound for a non-type parameter.
+ err.span_suggestion_verbose(
+ generics.tail_span_for_predicate_suggestion(),
+ &format!(
+ "consider {} `where` clause, but there might be an alternative better way to express \
+ this requirement",
+ if generics.where_clause_span.is_empty() { "introducing a" } else { "extending the" },
+ ),
+ format!("{} {}: {}", generics.add_where_or_trailing_comma(), param_name, constraint),
+ Applicability::MaybeIncorrect,
+ );
+ true
+}
+
+#[derive(Debug)]
+enum SuggestChangingConstraintsMessage<'a> {
+ RestrictBoundFurther,
+ RestrictType { ty: &'a str },
+ RestrictTypeFurther { ty: &'a str },
+ RemovingQSized,
+}
+
+fn suggest_removing_unsized_bound(
+ tcx: TyCtxt<'_>,
+ generics: &hir::Generics<'_>,
+ suggestions: &mut Vec<(Span, String, SuggestChangingConstraintsMessage<'_>)>,
+ param: &hir::GenericParam<'_>,
+ def_id: Option<DefId>,
+) {
+ // See if there's a `?Sized` bound that can be removed to suggest that.
+ // First look at the `where` clause because we can have `where T: ?Sized`,
+ // then look at params.
+ let param_def_id = tcx.hir().local_def_id(param.hir_id);
+ for (where_pos, predicate) in generics.predicates.iter().enumerate() {
+ let WherePredicate::BoundPredicate(predicate) = predicate else {
+ continue;
+ };
+ if !predicate.is_param_bound(param_def_id.to_def_id()) {
+ continue;
+ };
+
+ for (pos, bound) in predicate.bounds.iter().enumerate() {
+ let hir::GenericBound::Trait(poly, hir::TraitBoundModifier::Maybe) = bound else {
+ continue;
+ };
+ if poly.trait_ref.trait_def_id() != def_id {
+ continue;
+ }
+ let sp = generics.span_for_bound_removal(where_pos, pos);
+ suggestions.push((
+ sp,
+ String::new(),
+ SuggestChangingConstraintsMessage::RemovingQSized,
+ ));
+ }
+ }
+}
+
+/// Suggest restricting a type param with a new bound.
+pub fn suggest_constraining_type_param(
+ tcx: TyCtxt<'_>,
+ generics: &hir::Generics<'_>,
+ err: &mut Diagnostic,
+ param_name: &str,
+ constraint: &str,
+ def_id: Option<DefId>,
+) -> bool {
+ suggest_constraining_type_params(
+ tcx,
+ generics,
+ err,
+ [(param_name, constraint, def_id)].into_iter(),
+ )
+}
+
+/// Suggest restricting a type param with a new bound.
+pub fn suggest_constraining_type_params<'a>(
+ tcx: TyCtxt<'_>,
+ generics: &hir::Generics<'_>,
+ err: &mut Diagnostic,
+ param_names_and_constraints: impl Iterator<Item = (&'a str, &'a str, Option<DefId>)>,
+) -> bool {
+ let mut grouped = FxHashMap::default();
+ param_names_and_constraints.for_each(|(param_name, constraint, def_id)| {
+ grouped.entry(param_name).or_insert(Vec::new()).push((constraint, def_id))
+ });
+
+ let mut applicability = Applicability::MachineApplicable;
+ let mut suggestions = Vec::new();
+
+ for (param_name, mut constraints) in grouped {
+ let param = generics.params.iter().find(|p| p.name.ident().as_str() == param_name);
+ let Some(param) = param else { return false };
+
+ {
+ let mut sized_constraints =
+ constraints.drain_filter(|(_, def_id)| *def_id == tcx.lang_items().sized_trait());
+ if let Some((constraint, def_id)) = sized_constraints.next() {
+ applicability = Applicability::MaybeIncorrect;
+
+ err.span_label(
+ param.span,
+ &format!("this type parameter needs to be `{}`", constraint),
+ );
+ suggest_removing_unsized_bound(tcx, generics, &mut suggestions, param, def_id);
+ }
+ }
+
+ if constraints.is_empty() {
+ continue;
+ }
+
+ let mut constraint = constraints.iter().map(|&(c, _)| c).collect::<Vec<_>>();
+ constraint.sort();
+ constraint.dedup();
+ let constraint = constraint.join(" + ");
+ let mut suggest_restrict = |span, bound_list_non_empty| {
+ suggestions.push((
+ span,
+ if bound_list_non_empty {
+ format!(" + {}", constraint)
+ } else {
+ format!(" {}", constraint)
+ },
+ SuggestChangingConstraintsMessage::RestrictBoundFurther,
+ ))
+ };
+
+ // When the type parameter has been provided bounds
+ //
+ // Message:
+ // fn foo<T>(t: T) where T: Foo { ... }
+ // ^^^^^^
+ // |
+ // help: consider further restricting this bound with `+ Bar`
+ //
+ // Suggestion:
+ // fn foo<T>(t: T) where T: Foo { ... }
+ // ^
+ // |
+ // replace with: ` + Bar`
+ //
+ // Or, if user has provided some bounds, suggest restricting them:
+ //
+ // fn foo<T: Foo>(t: T) { ... }
+ // ---
+ // |
+ // help: consider further restricting this bound with `+ Bar`
+ //
+ // Suggestion for tools in this case is:
+ //
+ // fn foo<T: Foo>(t: T) { ... }
+ // --
+ // |
+ // replace with: `T: Bar +`
+ let param_def_id = tcx.hir().local_def_id(param.hir_id);
+ if let Some(span) = generics.bounds_span_for_suggestions(param_def_id) {
+ suggest_restrict(span, true);
+ continue;
+ }
+
+ if generics.has_where_clause_predicates {
+ // This part is a bit tricky, because using the `where` clause user can
+ // provide zero, one or many bounds for the same type parameter, so we
+ // have following cases to consider:
+ //
+ // When the type parameter has been provided zero bounds
+ //
+ // Message:
+ // fn foo<X, Y>(x: X, y: Y) where Y: Foo { ... }
+ // - help: consider restricting this type parameter with `where X: Bar`
+ //
+ // Suggestion:
+ // fn foo<X, Y>(x: X, y: Y) where Y: Foo { ... }
+ // - insert: `, X: Bar`
+ suggestions.push((
+ generics.tail_span_for_predicate_suggestion(),
+ constraints
+ .iter()
+ .map(|&(constraint, _)| format!(", {}: {}", param_name, constraint))
+ .collect::<String>(),
+ SuggestChangingConstraintsMessage::RestrictTypeFurther { ty: param_name },
+ ));
+ continue;
+ }
+
+ // Additionally, there may be no `where` clause but the generic parameter has a default:
+ //
+ // Message:
+ // trait Foo<T=()> {... }
+ // - help: consider further restricting this type parameter with `where T: Zar`
+ //
+ // Suggestion:
+ // trait Foo<T=()> {... }
+ // - insert: `where T: Zar`
+ if matches!(param.kind, hir::GenericParamKind::Type { default: Some(_), .. }) {
+ // Suggest a bound, but there is no existing `where` clause *and* the type param has a
+ // default (`<T=Foo>`), so we suggest adding `where T: Bar`.
+ suggestions.push((
+ generics.tail_span_for_predicate_suggestion(),
+ format!(" where {}: {}", param_name, constraint),
+ SuggestChangingConstraintsMessage::RestrictTypeFurther { ty: param_name },
+ ));
+ continue;
+ }
+
+ // If user has provided a colon, don't suggest adding another:
+ //
+ // fn foo<T:>(t: T) { ... }
+ // - insert: consider restricting this type parameter with `T: Foo`
+ if let Some(colon_span) = param.colon_span {
+ suggestions.push((
+ colon_span.shrink_to_hi(),
+ format!(" {}", constraint),
+ SuggestChangingConstraintsMessage::RestrictType { ty: param_name },
+ ));
+ continue;
+ }
+
+ // If user hasn't provided any bounds, suggest adding a new one:
+ //
+ // fn foo<T>(t: T) { ... }
+ // - help: consider restricting this type parameter with `T: Foo`
+ suggestions.push((
+ param.span.shrink_to_hi(),
+ format!(": {}", constraint),
+ SuggestChangingConstraintsMessage::RestrictType { ty: param_name },
+ ));
+ }
+
+ if suggestions.len() == 1 {
+ let (span, suggestion, msg) = suggestions.pop().unwrap();
+
+ let s;
+ let msg = match msg {
+ SuggestChangingConstraintsMessage::RestrictBoundFurther => {
+ "consider further restricting this bound"
+ }
+ SuggestChangingConstraintsMessage::RestrictType { ty } => {
+ s = format!("consider restricting type parameter `{}`", ty);
+ &s
+ }
+ SuggestChangingConstraintsMessage::RestrictTypeFurther { ty } => {
+ s = format!("consider further restricting type parameter `{}`", ty);
+ &s
+ }
+ SuggestChangingConstraintsMessage::RemovingQSized => {
+ "consider removing the `?Sized` bound to make the type parameter `Sized`"
+ }
+ };
+
+ err.span_suggestion_verbose(span, msg, suggestion, applicability);
+ } else if suggestions.len() > 1 {
+ err.multipart_suggestion_verbose(
+ "consider restricting type parameters",
+ suggestions.into_iter().map(|(span, suggestion, _)| (span, suggestion)).collect(),
+ applicability,
+ );
+ }
+
+ true
+}
+
+/// Collect al types that have an implicit `'static` obligation that we could suggest `'_` for.
+pub struct TraitObjectVisitor<'tcx>(pub Vec<&'tcx hir::Ty<'tcx>>, pub crate::hir::map::Map<'tcx>);
+
+impl<'v> hir::intravisit::Visitor<'v> for TraitObjectVisitor<'v> {
+ fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
+ match ty.kind {
+ hir::TyKind::TraitObject(
+ _,
+ hir::Lifetime {
+ name:
+ hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Static,
+ ..
+ },
+ _,
+ ) => {
+ self.0.push(ty);
+ }
+ hir::TyKind::OpaqueDef(item_id, _) => {
+ self.0.push(ty);
+ let item = self.1.item(item_id);
+ hir::intravisit::walk_item(self, item);
+ }
+ _ => {}
+ }
+ hir::intravisit::walk_ty(self, ty);
+ }
+}
+
+/// Collect al types that have an implicit `'static` obligation that we could suggest `'_` for.
+pub struct StaticLifetimeVisitor<'tcx>(pub Vec<Span>, pub crate::hir::map::Map<'tcx>);
+
+impl<'v> hir::intravisit::Visitor<'v> for StaticLifetimeVisitor<'v> {
+ fn visit_lifetime(&mut self, lt: &'v hir::Lifetime) {
+ if let hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Static =
+ lt.name
+ {
+ self.0.push(lt.span);
+ }
+ }
+}
+
+pub struct IsSuggestableVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ infer_suggestable: bool,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for IsSuggestableVisitor<'tcx> {
+ type BreakTy = ();
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match t.kind() {
+ Infer(InferTy::TyVar(_)) if self.infer_suggestable => {}
+
+ FnDef(..)
+ | Closure(..)
+ | Infer(..)
+ | Generator(..)
+ | GeneratorWitness(..)
+ | Bound(_, _)
+ | Placeholder(_)
+ | Error(_) => {
+ return ControlFlow::Break(());
+ }
+
+ Opaque(did, _) => {
+ let parent = self.tcx.parent(*did);
+ if let hir::def::DefKind::TyAlias | hir::def::DefKind::AssocTy = self.tcx.def_kind(parent)
+ && let Opaque(parent_did, _) = self.tcx.type_of(parent).kind()
+ && parent_did == did
+ {
+ // Okay
+ } else {
+ return ControlFlow::Break(());
+ }
+ }
+
+ Dynamic(dty, _) => {
+ for pred in *dty {
+ match pred.skip_binder() {
+ ExistentialPredicate::Trait(_) | ExistentialPredicate::Projection(_) => {
+ // Okay
+ }
+ _ => return ControlFlow::Break(()),
+ }
+ }
+ }
+
+ Param(param) => {
+ // FIXME: It would be nice to make this not use string manipulation,
+ // but it's pretty hard to do this, since `ty::ParamTy` is missing
+ // sufficient info to determine if it is synthetic, and we don't
+ // always have a convenient way of getting `ty::Generics` at the call
+ // sites we invoke `IsSuggestable::is_suggestable`.
+ if param.name.as_str().starts_with("impl ") {
+ return ControlFlow::Break(());
+ }
+ }
+
+ _ => {}
+ }
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match c.kind() {
+ ConstKind::Infer(InferConst::Var(_)) if self.infer_suggestable => {}
+
+ ConstKind::Infer(..)
+ | ConstKind::Bound(..)
+ | ConstKind::Placeholder(..)
+ | ConstKind::Error(..) => {
+ return ControlFlow::Break(());
+ }
+ _ => {}
+ }
+
+ c.super_visit_with(self)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs
new file mode 100644
index 000000000..3226950e7
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/erase_regions.rs
@@ -0,0 +1,74 @@
+use crate::mir;
+use crate::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{self, Ty, TyCtxt, TypeFlags};
+
+pub(super) fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { erase_regions_ty, ..*providers };
+}
+
+fn erase_regions_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ // N.B., use `super_fold_with` here. If we used `fold_with`, it
+ // could invoke the `erase_regions_ty` query recursively.
+ ty.super_fold_with(&mut RegionEraserVisitor { tcx })
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Returns an equivalent value with all free regions removed (note
+ /// that late-bound regions remain, because they are important for
+ /// subtyping, but they are anonymized and normalized as well)..
+ pub fn erase_regions<T>(self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ // If there's nothing to erase avoid performing the query at all
+ if !value.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND | TypeFlags::HAS_FREE_REGIONS) {
+ return value;
+ }
+ debug!("erase_regions({:?})", value);
+ let value1 = value.fold_with(&mut RegionEraserVisitor { tcx: self });
+ debug!("erase_regions = {:?}", value1);
+ value1
+ }
+}
+
+struct RegionEraserVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TypeFolder<'tcx> for RegionEraserVisitor<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if ty.needs_infer() { ty.super_fold_with(self) } else { self.tcx.erase_regions_ty(ty) }
+ }
+
+ fn fold_binder<T>(&mut self, t: ty::Binder<'tcx, T>) -> ty::Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let u = self.tcx.anonymize_bound_vars(t);
+ u.super_fold_with(self)
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ // because late-bound regions affect subtyping, we can't
+ // erase the bound/free distinction, but we can replace
+ // all free regions with 'erased.
+ //
+ // Note that we *CAN* replace early-bound regions -- the
+ // type system never "sees" those, they get substituted
+ // away. In codegen, they will always be erased to 'erased
+ // whenever a substitution occurs.
+ match *r {
+ ty::ReLateBound(..) => r,
+ _ => self.tcx.lifetimes.re_erased,
+ }
+ }
+
+ fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ c.super_fold_with(self)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
new file mode 100644
index 000000000..4b0bc3c11
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -0,0 +1,965 @@
+use crate::traits::{ObligationCause, ObligationCauseCode};
+use crate::ty::diagnostics::suggest_constraining_type_param;
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::{self, BoundRegionKind, Region, Ty, TyCtxt};
+use rustc_errors::Applicability::{MachineApplicable, MaybeIncorrect};
+use rustc_errors::{pluralize, Diagnostic, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{BytePos, Span};
+use rustc_target::spec::abi;
+
+use std::borrow::Cow;
+use std::fmt;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable)]
+pub struct ExpectedFound<T> {
+ pub expected: T,
+ pub found: T,
+}
+
+impl<T> ExpectedFound<T> {
+ pub fn new(a_is_expected: bool, a: T, b: T) -> Self {
+ if a_is_expected {
+ ExpectedFound { expected: a, found: b }
+ } else {
+ ExpectedFound { expected: b, found: a }
+ }
+ }
+}
+
+// Data structures used in type unification
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable)]
+pub enum TypeError<'tcx> {
+ Mismatch,
+ ConstnessMismatch(ExpectedFound<ty::BoundConstness>),
+ PolarityMismatch(ExpectedFound<ty::ImplPolarity>),
+ UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
+ AbiMismatch(ExpectedFound<abi::Abi>),
+ Mutability,
+ ArgumentMutability(usize),
+ TupleSize(ExpectedFound<usize>),
+ FixedArraySize(ExpectedFound<u64>),
+ ArgCount,
+ FieldMisMatch(Symbol, Symbol),
+
+ RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
+ RegionsInsufficientlyPolymorphic(BoundRegionKind, Region<'tcx>),
+ RegionsOverlyPolymorphic(BoundRegionKind, Region<'tcx>),
+ RegionsPlaceholderMismatch,
+
+ Sorts(ExpectedFound<Ty<'tcx>>),
+ ArgumentSorts(ExpectedFound<Ty<'tcx>>, usize),
+ IntMismatch(ExpectedFound<ty::IntVarValue>),
+ FloatMismatch(ExpectedFound<ty::FloatTy>),
+ Traits(ExpectedFound<DefId>),
+ VariadicMismatch(ExpectedFound<bool>),
+
+ /// Instantiating a type variable with the given type would have
+ /// created a cycle (because it appears somewhere within that
+ /// type).
+ CyclicTy(Ty<'tcx>),
+ CyclicConst(ty::Const<'tcx>),
+ ProjectionMismatched(ExpectedFound<DefId>),
+ ExistentialMismatch(
+ ExpectedFound<&'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>>,
+ ),
+ ObjectUnsafeCoercion(DefId),
+ ConstMismatch(ExpectedFound<ty::Const<'tcx>>),
+
+ IntrinsicCast,
+ /// Safe `#[target_feature]` functions are not assignable to safe function pointers.
+ TargetFeatureCast(DefId),
+}
+
+/// Explains the source of a type err in a short, human readable way. This is meant to be placed
+/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
+/// afterwards to present additional details, particularly when it comes to lifetime-related
+/// errors.
+impl<'tcx> fmt::Display for TypeError<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use self::TypeError::*;
+ fn report_maybe_different(
+ f: &mut fmt::Formatter<'_>,
+ expected: &str,
+ found: &str,
+ ) -> fmt::Result {
+ // A naive approach to making sure that we're not reporting silly errors such as:
+ // (expected closure, found closure).
+ if expected == found {
+ write!(f, "expected {}, found a different {}", expected, found)
+ } else {
+ write!(f, "expected {}, found {}", expected, found)
+ }
+ }
+
+ let br_string = |br: ty::BoundRegionKind| match br {
+ ty::BrNamed(_, name) => format!(" {}", name),
+ _ => String::new(),
+ };
+
+ match *self {
+ CyclicTy(_) => write!(f, "cyclic type of infinite size"),
+ CyclicConst(_) => write!(f, "encountered a self-referencing constant"),
+ Mismatch => write!(f, "types differ"),
+ ConstnessMismatch(values) => {
+ write!(f, "expected {} bound, found {} bound", values.expected, values.found)
+ }
+ PolarityMismatch(values) => {
+ write!(f, "expected {} polarity, found {} polarity", values.expected, values.found)
+ }
+ UnsafetyMismatch(values) => {
+ write!(f, "expected {} fn, found {} fn", values.expected, values.found)
+ }
+ AbiMismatch(values) => {
+ write!(f, "expected {} fn, found {} fn", values.expected, values.found)
+ }
+ ArgumentMutability(_) | Mutability => write!(f, "types differ in mutability"),
+ TupleSize(values) => write!(
+ f,
+ "expected a tuple with {} element{}, found one with {} element{}",
+ values.expected,
+ pluralize!(values.expected),
+ values.found,
+ pluralize!(values.found)
+ ),
+ FixedArraySize(values) => write!(
+ f,
+ "expected an array with a fixed size of {} element{}, found one with {} element{}",
+ values.expected,
+ pluralize!(values.expected),
+ values.found,
+ pluralize!(values.found)
+ ),
+ ArgCount => write!(f, "incorrect number of function parameters"),
+ FieldMisMatch(adt, field) => write!(f, "field type mismatch: {}.{}", adt, field),
+ RegionsDoesNotOutlive(..) => write!(f, "lifetime mismatch"),
+ // Actually naming the region here is a bit confusing because context is lacking
+ RegionsInsufficientlyPolymorphic(..) => {
+ write!(f, "one type is more general than the other")
+ }
+ RegionsOverlyPolymorphic(br, _) => write!(
+ f,
+ "expected concrete lifetime, found bound lifetime parameter{}",
+ br_string(br)
+ ),
+ RegionsPlaceholderMismatch => write!(f, "one type is more general than the other"),
+ ArgumentSorts(values, _) | Sorts(values) => ty::tls::with(|tcx| {
+ report_maybe_different(
+ f,
+ &values.expected.sort_string(tcx),
+ &values.found.sort_string(tcx),
+ )
+ }),
+ Traits(values) => ty::tls::with(|tcx| {
+ report_maybe_different(
+ f,
+ &format!("trait `{}`", tcx.def_path_str(values.expected)),
+ &format!("trait `{}`", tcx.def_path_str(values.found)),
+ )
+ }),
+ IntMismatch(ref values) => {
+ let expected = match values.expected {
+ ty::IntVarValue::IntType(ty) => ty.name_str(),
+ ty::IntVarValue::UintType(ty) => ty.name_str(),
+ };
+ let found = match values.found {
+ ty::IntVarValue::IntType(ty) => ty.name_str(),
+ ty::IntVarValue::UintType(ty) => ty.name_str(),
+ };
+ write!(f, "expected `{}`, found `{}`", expected, found)
+ }
+ FloatMismatch(ref values) => {
+ write!(
+ f,
+ "expected `{}`, found `{}`",
+ values.expected.name_str(),
+ values.found.name_str()
+ )
+ }
+ VariadicMismatch(ref values) => write!(
+ f,
+ "expected {} fn, found {} function",
+ if values.expected { "variadic" } else { "non-variadic" },
+ if values.found { "variadic" } else { "non-variadic" }
+ ),
+ ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
+ write!(
+ f,
+ "expected {}, found {}",
+ tcx.def_path_str(values.expected),
+ tcx.def_path_str(values.found)
+ )
+ }),
+ ExistentialMismatch(ref values) => report_maybe_different(
+ f,
+ &format!("trait `{}`", values.expected),
+ &format!("trait `{}`", values.found),
+ ),
+ ConstMismatch(ref values) => {
+ write!(f, "expected `{}`, found `{}`", values.expected, values.found)
+ }
+ IntrinsicCast => write!(f, "cannot coerce intrinsics to function pointers"),
+ TargetFeatureCast(_) => write!(
+ f,
+ "cannot coerce functions with `#[target_feature]` to safe function pointers"
+ ),
+ ObjectUnsafeCoercion(_) => write!(f, "coercion to object-unsafe trait object"),
+ }
+ }
+}
+
+impl<'tcx> TypeError<'tcx> {
+ pub fn must_include_note(&self) -> bool {
+ use self::TypeError::*;
+ match self {
+ CyclicTy(_) | CyclicConst(_) | UnsafetyMismatch(_) | ConstnessMismatch(_)
+ | PolarityMismatch(_) | Mismatch | AbiMismatch(_) | FixedArraySize(_)
+ | ArgumentSorts(..) | Sorts(_) | IntMismatch(_) | FloatMismatch(_)
+ | VariadicMismatch(_) | TargetFeatureCast(_) => false,
+
+ Mutability
+ | ArgumentMutability(_)
+ | TupleSize(_)
+ | ArgCount
+ | FieldMisMatch(..)
+ | RegionsDoesNotOutlive(..)
+ | RegionsInsufficientlyPolymorphic(..)
+ | RegionsOverlyPolymorphic(..)
+ | RegionsPlaceholderMismatch
+ | Traits(_)
+ | ProjectionMismatched(_)
+ | ExistentialMismatch(_)
+ | ConstMismatch(_)
+ | IntrinsicCast
+ | ObjectUnsafeCoercion(_) => true,
+ }
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ pub fn sort_string(self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
+ match *self.kind() {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => {
+ format!("`{}`", self).into()
+ }
+ ty::Tuple(ref tys) if tys.is_empty() => format!("`{}`", self).into(),
+
+ ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did())).into(),
+ ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(),
+ ty::Array(t, n) => {
+ if t.is_simple_ty() {
+ return format!("array `{}`", self).into();
+ }
+
+ let n = tcx.lift(n).unwrap();
+ if let ty::ConstKind::Value(v) = n.kind() {
+ if let Some(n) = v.try_to_machine_usize(tcx) {
+ return format!("array of {} element{}", n, pluralize!(n)).into();
+ }
+ }
+ "array".into()
+ }
+ ty::Slice(ty) if ty.is_simple_ty() => format!("slice `{}`", self).into(),
+ ty::Slice(_) => "slice".into(),
+ ty::RawPtr(_) => "*-ptr".into(),
+ ty::Ref(_, ty, mutbl) => {
+ let tymut = ty::TypeAndMut { ty, mutbl };
+ let tymut_string = tymut.to_string();
+ if tymut_string != "_"
+ && (ty.is_simple_text() || tymut_string.len() < "mutable reference".len())
+ {
+ format!("`&{}`", tymut_string).into()
+ } else {
+ // Unknown type name, it's long or has type arguments
+ match mutbl {
+ hir::Mutability::Mut => "mutable reference",
+ _ => "reference",
+ }
+ .into()
+ }
+ }
+ ty::FnDef(..) => "fn item".into(),
+ ty::FnPtr(_) => "fn pointer".into(),
+ ty::Dynamic(ref inner, ..) if let Some(principal) = inner.principal() => {
+ format!("trait object `dyn {}`", tcx.def_path_str(principal.def_id())).into()
+ }
+ ty::Dynamic(..) => "trait object".into(),
+ ty::Closure(..) => "closure".into(),
+ ty::Generator(def_id, ..) => tcx.generator_kind(def_id).unwrap().descr().into(),
+ ty::GeneratorWitness(..) => "generator witness".into(),
+ ty::Tuple(..) => "tuple".into(),
+ ty::Infer(ty::TyVar(_)) => "inferred type".into(),
+ ty::Infer(ty::IntVar(_)) => "integer".into(),
+ ty::Infer(ty::FloatVar(_)) => "floating-point number".into(),
+ ty::Placeholder(..) => "placeholder type".into(),
+ ty::Bound(..) => "bound type".into(),
+ ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
+ ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
+ ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
+ ty::Projection(_) => "associated type".into(),
+ ty::Param(p) => format!("type parameter `{}`", p).into(),
+ ty::Opaque(..) => "opaque type".into(),
+ ty::Error(_) => "type error".into(),
+ }
+ }
+
+ pub fn prefix_string(self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
+ match *self.kind() {
+ ty::Infer(_)
+ | ty::Error(_)
+ | ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Never => "type".into(),
+ ty::Tuple(ref tys) if tys.is_empty() => "unit type".into(),
+ ty::Adt(def, _) => def.descr().into(),
+ ty::Foreign(_) => "extern type".into(),
+ ty::Array(..) => "array".into(),
+ ty::Slice(_) => "slice".into(),
+ ty::RawPtr(_) => "raw pointer".into(),
+ ty::Ref(.., mutbl) => match mutbl {
+ hir::Mutability::Mut => "mutable reference",
+ _ => "reference",
+ }
+ .into(),
+ ty::FnDef(..) => "fn item".into(),
+ ty::FnPtr(_) => "fn pointer".into(),
+ ty::Dynamic(..) => "trait object".into(),
+ ty::Closure(..) => "closure".into(),
+ ty::Generator(def_id, ..) => tcx.generator_kind(def_id).unwrap().descr().into(),
+ ty::GeneratorWitness(..) => "generator witness".into(),
+ ty::Tuple(..) => "tuple".into(),
+ ty::Placeholder(..) => "higher-ranked type".into(),
+ ty::Bound(..) => "bound type variable".into(),
+ ty::Projection(_) => "associated type".into(),
+ ty::Param(_) => "type parameter".into(),
+ ty::Opaque(..) => "opaque type".into(),
+ }
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn note_and_explain_type_err(
+ self,
+ diag: &mut Diagnostic,
+ err: &TypeError<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ sp: Span,
+ body_owner_def_id: DefId,
+ ) {
+ use self::TypeError::*;
+ debug!("note_and_explain_type_err err={:?} cause={:?}", err, cause);
+ match err {
+ ArgumentSorts(values, _) | Sorts(values) => {
+ match (values.expected.kind(), values.found.kind()) {
+ (ty::Closure(..), ty::Closure(..)) => {
+ diag.note("no two closures, even if identical, have the same type");
+ diag.help("consider boxing your closure and/or using it as a trait object");
+ }
+ (ty::Opaque(..), ty::Opaque(..)) => {
+ // Issue #63167
+ diag.note("distinct uses of `impl Trait` result in different opaque types");
+ }
+ (ty::Float(_), ty::Infer(ty::IntVar(_)))
+ if let Ok(
+ // Issue #53280
+ snippet,
+ ) = self.sess.source_map().span_to_snippet(sp) =>
+ {
+ if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
+ diag.span_suggestion(
+ sp,
+ "use a float literal",
+ format!("{}.0", snippet),
+ MachineApplicable,
+ );
+ }
+ }
+ (ty::Param(expected), ty::Param(found)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let e_span = self.def_span(generics.type_param(expected, self).def_id);
+ if !sp.contains(e_span) {
+ diag.span_label(e_span, "expected type parameter");
+ }
+ let f_span = self.def_span(generics.type_param(found, self).def_id);
+ if !sp.contains(f_span) {
+ diag.span_label(f_span, "found type parameter");
+ }
+ diag.note(
+ "a type parameter was expected, but a different one was found; \
+ you might be missing a type parameter or trait bound",
+ );
+ diag.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch10-02-traits.html\
+ #traits-as-parameters",
+ );
+ }
+ (ty::Projection(_), ty::Projection(_)) => {
+ diag.note("an associated type was expected, but a different one was found");
+ }
+ (ty::Param(p), ty::Projection(proj)) | (ty::Projection(proj), ty::Param(p)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let p_span = self.def_span(generics.type_param(p, self).def_id);
+ if !sp.contains(p_span) {
+ diag.span_label(p_span, "this type parameter");
+ }
+ let hir = self.hir();
+ let mut note = true;
+ if let Some(generics) = generics
+ .type_param(p, self)
+ .def_id
+ .as_local()
+ .map(|id| hir.local_def_id_to_hir_id(id))
+ .and_then(|id| self.hir().find(self.hir().get_parent_node(id)))
+ .as_ref()
+ .and_then(|node| node.generics())
+ {
+ // Synthesize the associated type restriction `Add<Output = Expected>`.
+ // FIXME: extract this logic for use in other diagnostics.
+ let (trait_ref, assoc_substs) = proj.trait_ref_and_own_substs(self);
+ let path =
+ self.def_path_str_with_substs(trait_ref.def_id, trait_ref.substs);
+ let item_name = self.item_name(proj.item_def_id);
+ let item_args = self.format_generic_args(assoc_substs);
+
+ let path = if path.ends_with('>') {
+ format!(
+ "{}, {}{} = {}>",
+ &path[..path.len() - 1],
+ item_name,
+ item_args,
+ p
+ )
+ } else {
+ format!("{}<{}{} = {}>", path, item_name, item_args, p)
+ };
+ note = !suggest_constraining_type_param(
+ self,
+ generics,
+ diag,
+ &format!("{}", proj.self_ty()),
+ &path,
+ None,
+ );
+ }
+ if note {
+ diag.note("you might be missing a type parameter or trait bound");
+ }
+ }
+ (ty::Param(p), ty::Dynamic(..) | ty::Opaque(..))
+ | (ty::Dynamic(..) | ty::Opaque(..), ty::Param(p)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let p_span = self.def_span(generics.type_param(p, self).def_id);
+ if !sp.contains(p_span) {
+ diag.span_label(p_span, "this type parameter");
+ }
+ diag.help("type parameters must be constrained to match other types");
+ if self.sess.teach(&diag.get_code().unwrap()) {
+ diag.help(
+ "given a type parameter `T` and a method `foo`:
+```
+trait Trait<T> { fn foo(&self) -> T; }
+```
+the only ways to implement method `foo` are:
+- constrain `T` with an explicit type:
+```
+impl Trait<String> for X {
+ fn foo(&self) -> String { String::new() }
+}
+```
+- add a trait bound to `T` and call a method on that trait that returns `Self`:
+```
+impl<T: std::default::Default> Trait<T> for X {
+ fn foo(&self) -> T { <T as std::default::Default>::default() }
+}
+```
+- change `foo` to return an argument of type `T`:
+```
+impl<T> Trait<T> for X {
+ fn foo(&self, x: T) -> T { x }
+}
+```",
+ );
+ }
+ diag.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch10-02-traits.html\
+ #traits-as-parameters",
+ );
+ }
+ (ty::Param(p), ty::Closure(..) | ty::Generator(..)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let p_span = self.def_span(generics.type_param(p, self).def_id);
+ if !sp.contains(p_span) {
+ diag.span_label(p_span, "this type parameter");
+ }
+ diag.help(&format!(
+ "every closure has a distinct type and so could not always match the \
+ caller-chosen type of parameter `{}`",
+ p
+ ));
+ }
+ (ty::Param(p), _) | (_, ty::Param(p)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let p_span = self.def_span(generics.type_param(p, self).def_id);
+ if !sp.contains(p_span) {
+ diag.span_label(p_span, "this type parameter");
+ }
+ }
+ (ty::Projection(proj_ty), _) => {
+ self.expected_projection(
+ diag,
+ proj_ty,
+ values,
+ body_owner_def_id,
+ cause.code(),
+ );
+ }
+ (_, ty::Projection(proj_ty)) => {
+ let msg = format!(
+ "consider constraining the associated type `{}` to `{}`",
+ values.found, values.expected,
+ );
+ if !(self.suggest_constraining_opaque_associated_type(
+ diag,
+ &msg,
+ proj_ty,
+ values.expected,
+ ) || self.suggest_constraint(
+ diag,
+ &msg,
+ body_owner_def_id,
+ proj_ty,
+ values.expected,
+ )) {
+ diag.help(&msg);
+ diag.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch19-03-advanced-traits.html",
+ );
+ }
+ }
+ _ => {}
+ }
+ debug!(
+ "note_and_explain_type_err expected={:?} ({:?}) found={:?} ({:?})",
+ values.expected,
+ values.expected.kind(),
+ values.found,
+ values.found.kind(),
+ );
+ }
+ CyclicTy(ty) => {
+ // Watch out for various cases of cyclic types and try to explain.
+ if ty.is_closure() || ty.is_generator() {
+ diag.note(
+ "closures cannot capture themselves or take themselves as argument;\n\
+ this error may be the result of a recent compiler bug-fix,\n\
+ see issue #46062 <https://github.com/rust-lang/rust/issues/46062>\n\
+ for more information",
+ );
+ }
+ }
+ TargetFeatureCast(def_id) => {
+ let target_spans =
+ self.get_attrs(*def_id, sym::target_feature).map(|attr| attr.span);
+ diag.note(
+ "functions with `#[target_feature]` can only be coerced to `unsafe` function pointers"
+ );
+ diag.span_labels(target_spans, "`#[target_feature]` added here");
+ }
+ _ => {}
+ }
+ }
+
+ fn suggest_constraint(
+ self,
+ diag: &mut Diagnostic,
+ msg: &str,
+ body_owner_def_id: DefId,
+ proj_ty: &ty::ProjectionTy<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> bool {
+ let assoc = self.associated_item(proj_ty.item_def_id);
+ let (trait_ref, assoc_substs) = proj_ty.trait_ref_and_own_substs(self);
+ if let Some(item) = self.hir().get_if_local(body_owner_def_id) {
+ if let Some(hir_generics) = item.generics() {
+ // Get the `DefId` for the type parameter corresponding to `A` in `<A as T>::Foo`.
+ // This will also work for `impl Trait`.
+ let def_id = if let ty::Param(param_ty) = proj_ty.self_ty().kind() {
+ let generics = self.generics_of(body_owner_def_id);
+ generics.type_param(param_ty, self).def_id
+ } else {
+ return false;
+ };
+ let Some(def_id) = def_id.as_local() else {
+ return false;
+ };
+
+ // First look in the `where` clause, as this might be
+ // `fn foo<T>(x: T) where T: Trait`.
+ for pred in hir_generics.bounds_for_param(def_id) {
+ if self.constrain_generic_bound_associated_type_structured_suggestion(
+ diag,
+ &trait_ref,
+ pred.bounds,
+ &assoc,
+ assoc_substs,
+ ty,
+ msg,
+ false,
+ ) {
+ return true;
+ }
+ }
+ }
+ }
+ false
+ }
+
+ /// An associated type was expected and a different type was found.
+ ///
+ /// We perform a few different checks to see what we can suggest:
+ ///
+ /// - In the current item, look for associated functions that return the expected type and
+ /// suggest calling them. (Not a structured suggestion.)
+ /// - If any of the item's generic bounds can be constrained, we suggest constraining the
+ /// associated type to the found type.
+ /// - If the associated type has a default type and was expected inside of a `trait`, we
+ /// mention that this is disallowed.
+ /// - If all other things fail, and the error is not because of a mismatch between the `trait`
+ /// and the `impl`, we provide a generic `help` to constrain the assoc type or call an assoc
+ /// fn that returns the type.
+ fn expected_projection(
+ self,
+ diag: &mut Diagnostic,
+ proj_ty: &ty::ProjectionTy<'tcx>,
+ values: &ExpectedFound<Ty<'tcx>>,
+ body_owner_def_id: DefId,
+ cause_code: &ObligationCauseCode<'_>,
+ ) {
+ let msg = format!(
+ "consider constraining the associated type `{}` to `{}`",
+ values.expected, values.found
+ );
+ let body_owner = self.hir().get_if_local(body_owner_def_id);
+ let current_method_ident = body_owner.and_then(|n| n.ident()).map(|i| i.name);
+
+ // We don't want to suggest calling an assoc fn in a scope where that isn't feasible.
+ let callable_scope = matches!(
+ body_owner,
+ Some(
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(..), .. })
+ | hir::Node::TraitItem(hir::TraitItem { kind: hir::TraitItemKind::Fn(..), .. })
+ | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }),
+ )
+ );
+ let impl_comparison =
+ matches!(cause_code, ObligationCauseCode::CompareImplItemObligation { .. });
+ let assoc = self.associated_item(proj_ty.item_def_id);
+ if !callable_scope || impl_comparison {
+ // We do not want to suggest calling functions when the reason of the
+ // type error is a comparison of an `impl` with its `trait` or when the
+ // scope is outside of a `Body`.
+ } else {
+ // If we find a suitable associated function that returns the expected type, we don't
+ // want the more general suggestion later in this method about "consider constraining
+ // the associated type or calling a method that returns the associated type".
+ let point_at_assoc_fn = self.point_at_methods_that_satisfy_associated_type(
+ diag,
+ assoc.container_id(self),
+ current_method_ident,
+ proj_ty.item_def_id,
+ values.expected,
+ );
+ // Possibly suggest constraining the associated type to conform to the
+ // found type.
+ if self.suggest_constraint(diag, &msg, body_owner_def_id, proj_ty, values.found)
+ || point_at_assoc_fn
+ {
+ return;
+ }
+ }
+
+ self.suggest_constraining_opaque_associated_type(diag, &msg, proj_ty, values.found);
+
+ if self.point_at_associated_type(diag, body_owner_def_id, values.found) {
+ return;
+ }
+
+ if !impl_comparison {
+ // Generic suggestion when we can't be more specific.
+ if callable_scope {
+ diag.help(&format!(
+ "{} or calling a method that returns `{}`",
+ msg, values.expected
+ ));
+ } else {
+ diag.help(&msg);
+ }
+ diag.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch19-03-advanced-traits.html",
+ );
+ }
+ if self.sess.teach(&diag.get_code().unwrap()) {
+ diag.help(
+ "given an associated type `T` and a method `foo`:
+```
+trait Trait {
+type T;
+fn foo(&self) -> Self::T;
+}
+```
+the only way of implementing method `foo` is to constrain `T` with an explicit associated type:
+```
+impl Trait for X {
+type T = String;
+fn foo(&self) -> Self::T { String::new() }
+}
+```",
+ );
+ }
+ }
+
+ /// When the expected `impl Trait` is not defined in the current item, it will come from
+ /// a return type. This can occur when dealing with `TryStream` (#71035).
+ fn suggest_constraining_opaque_associated_type(
+ self,
+ diag: &mut Diagnostic,
+ msg: &str,
+ proj_ty: &ty::ProjectionTy<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> bool {
+ let assoc = self.associated_item(proj_ty.item_def_id);
+ if let ty::Opaque(def_id, _) = *proj_ty.self_ty().kind() {
+ let opaque_local_def_id = def_id.as_local();
+ let opaque_hir_ty = if let Some(opaque_local_def_id) = opaque_local_def_id {
+ match &self.hir().expect_item(opaque_local_def_id).kind {
+ hir::ItemKind::OpaqueTy(opaque_hir_ty) => opaque_hir_ty,
+ _ => bug!("The HirId comes from a `ty::Opaque`"),
+ }
+ } else {
+ return false;
+ };
+
+ let (trait_ref, assoc_substs) = proj_ty.trait_ref_and_own_substs(self);
+
+ self.constrain_generic_bound_associated_type_structured_suggestion(
+ diag,
+ &trait_ref,
+ opaque_hir_ty.bounds,
+ assoc,
+ assoc_substs,
+ ty,
+ msg,
+ true,
+ )
+ } else {
+ false
+ }
+ }
+
+ fn point_at_methods_that_satisfy_associated_type(
+ self,
+ diag: &mut Diagnostic,
+ assoc_container_id: DefId,
+ current_method_ident: Option<Symbol>,
+ proj_ty_item_def_id: DefId,
+ expected: Ty<'tcx>,
+ ) -> bool {
+ let items = self.associated_items(assoc_container_id);
+ // Find all the methods in the trait that could be called to construct the
+ // expected associated type.
+ // FIXME: consider suggesting the use of associated `const`s.
+ let methods: Vec<(Span, String)> = items
+ .items
+ .iter()
+ .filter(|(name, item)| {
+ ty::AssocKind::Fn == item.kind && Some(**name) != current_method_ident
+ })
+ .filter_map(|(_, item)| {
+ let method = self.fn_sig(item.def_id);
+ match *method.output().skip_binder().kind() {
+ ty::Projection(ty::ProjectionTy { item_def_id, .. })
+ if item_def_id == proj_ty_item_def_id =>
+ {
+ Some((
+ self.def_span(item.def_id),
+ format!("consider calling `{}`", self.def_path_str(item.def_id)),
+ ))
+ }
+ _ => None,
+ }
+ })
+ .collect();
+ if !methods.is_empty() {
+ // Use a single `help:` to show all the methods in the trait that can
+ // be used to construct the expected associated type.
+ let mut span: MultiSpan =
+ methods.iter().map(|(sp, _)| *sp).collect::<Vec<Span>>().into();
+ let msg = format!(
+ "{some} method{s} {are} available that return{r} `{ty}`",
+ some = if methods.len() == 1 { "a" } else { "some" },
+ s = pluralize!(methods.len()),
+ are = pluralize!("is", methods.len()),
+ r = if methods.len() == 1 { "s" } else { "" },
+ ty = expected
+ );
+ for (sp, label) in methods.into_iter() {
+ span.push_span_label(sp, label);
+ }
+ diag.span_help(span, &msg);
+ return true;
+ }
+ false
+ }
+
+ fn point_at_associated_type(
+ self,
+ diag: &mut Diagnostic,
+ body_owner_def_id: DefId,
+ found: Ty<'tcx>,
+ ) -> bool {
+ let Some(hir_id) = body_owner_def_id.as_local() else {
+ return false;
+ };
+ let hir_id = self.hir().local_def_id_to_hir_id(hir_id);
+ // When `body_owner` is an `impl` or `trait` item, look in its associated types for
+ // `expected` and point at it.
+ let parent_id = self.hir().get_parent_item(hir_id);
+ let item = self.hir().find_by_def_id(parent_id);
+ debug!("expected_projection parent item {:?}", item);
+ match item {
+ Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Trait(.., items), .. })) => {
+ // FIXME: account for `#![feature(specialization)]`
+ for item in &items[..] {
+ match item.kind {
+ hir::AssocItemKind::Type => {
+ // FIXME: account for returning some type in a trait fn impl that has
+ // an assoc type as a return type (#72076).
+ if let hir::Defaultness::Default { has_value: true } =
+ self.impl_defaultness(item.id.def_id)
+ {
+ if self.type_of(item.id.def_id) == found {
+ diag.span_label(
+ item.span,
+ "associated type defaults can't be assumed inside the \
+ trait defining them",
+ );
+ return true;
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+ Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { items, .. }),
+ ..
+ })) => {
+ for item in &items[..] {
+ if let hir::AssocItemKind::Type = item.kind {
+ if self.type_of(item.id.def_id) == found {
+ diag.span_label(item.span, "expected this associated type");
+ return true;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ false
+ }
+
+ /// Given a slice of `hir::GenericBound`s, if any of them corresponds to the `trait_ref`
+ /// requirement, provide a structured suggestion to constrain it to a given type `ty`.
+ ///
+ /// `is_bound_surely_present` indicates whether we know the bound we're looking for is
+ /// inside `bounds`. If that's the case then we can consider `bounds` containing only one
+ /// trait bound as the one we're looking for. This can help in cases where the associated
+ /// type is defined on a supertrait of the one present in the bounds.
+ fn constrain_generic_bound_associated_type_structured_suggestion(
+ self,
+ diag: &mut Diagnostic,
+ trait_ref: &ty::TraitRef<'tcx>,
+ bounds: hir::GenericBounds<'_>,
+ assoc: &ty::AssocItem,
+ assoc_substs: &[ty::GenericArg<'tcx>],
+ ty: Ty<'tcx>,
+ msg: &str,
+ is_bound_surely_present: bool,
+ ) -> bool {
+ // FIXME: we would want to call `resolve_vars_if_possible` on `ty` before suggesting.
+
+ let trait_bounds = bounds.iter().filter_map(|bound| match bound {
+ hir::GenericBound::Trait(ptr, hir::TraitBoundModifier::None) => Some(ptr),
+ _ => None,
+ });
+
+ let matching_trait_bounds = trait_bounds
+ .clone()
+ .filter(|ptr| ptr.trait_ref.trait_def_id() == Some(trait_ref.def_id))
+ .collect::<Vec<_>>();
+
+ let span = match &matching_trait_bounds[..] {
+ &[ptr] => ptr.span,
+ &[] if is_bound_surely_present => match &trait_bounds.collect::<Vec<_>>()[..] {
+ &[ptr] => ptr.span,
+ _ => return false,
+ },
+ _ => return false,
+ };
+
+ self.constrain_associated_type_structured_suggestion(
+ diag,
+ span,
+ assoc,
+ assoc_substs,
+ ty,
+ msg,
+ )
+ }
+
+ /// Given a span corresponding to a bound, provide a structured suggestion to set an
+ /// associated type to a given type `ty`.
+ fn constrain_associated_type_structured_suggestion(
+ self,
+ diag: &mut Diagnostic,
+ span: Span,
+ assoc: &ty::AssocItem,
+ assoc_substs: &[ty::GenericArg<'tcx>],
+ ty: Ty<'tcx>,
+ msg: &str,
+ ) -> bool {
+ if let Ok(has_params) =
+ self.sess.source_map().span_to_snippet(span).map(|snippet| snippet.ends_with('>'))
+ {
+ let (span, sugg) = if has_params {
+ let pos = span.hi() - BytePos(1);
+ let span = Span::new(pos, pos, span.ctxt(), span.parent());
+ (span, format!(", {} = {}", assoc.ident(self), ty))
+ } else {
+ let item_args = self.format_generic_args(assoc_substs);
+ (span.shrink_to_hi(), format!("<{}{} = {}>", assoc.ident(self), item_args, ty))
+ };
+ diag.span_suggestion_verbose(span, msg, sugg, MaybeIncorrect);
+ return true;
+ }
+ false
+ }
+
+ fn format_generic_args(self, args: &[ty::GenericArg<'tcx>]) -> String {
+ FmtPrinter::new(self, hir::def::Namespace::TypeNS)
+ .path_generic_args(Ok, args)
+ .expect("could not write to `String`.")
+ .into_buffer()
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs
new file mode 100644
index 000000000..8d019a3ba
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/fast_reject.rs
@@ -0,0 +1,405 @@
+use crate::mir::Mutability;
+use crate::ty::subst::GenericArgKind;
+use crate::ty::{self, Ty, TyCtxt, TypeVisitable};
+use rustc_hir::def_id::DefId;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::iter;
+
+use self::SimplifiedTypeGen::*;
+
+pub type SimplifiedType = SimplifiedTypeGen<DefId>;
+
+/// See `simplify_type`
+///
+/// Note that we keep this type generic over the type of identifier it uses
+/// because we sometimes need to use SimplifiedTypeGen values as stable sorting
+/// keys (in which case we use a DefPathHash as id-type) but in the general case
+/// the non-stable but fast to construct DefId-version is the better choice.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub enum SimplifiedTypeGen<D>
+where
+ D: Copy + Debug + Eq,
+{
+ BoolSimplifiedType,
+ CharSimplifiedType,
+ IntSimplifiedType(ty::IntTy),
+ UintSimplifiedType(ty::UintTy),
+ FloatSimplifiedType(ty::FloatTy),
+ AdtSimplifiedType(D),
+ ForeignSimplifiedType(D),
+ StrSimplifiedType,
+ ArraySimplifiedType,
+ SliceSimplifiedType,
+ RefSimplifiedType(Mutability),
+ PtrSimplifiedType(Mutability),
+ NeverSimplifiedType,
+ TupleSimplifiedType(usize),
+ /// A trait object, all of whose components are markers
+ /// (e.g., `dyn Send + Sync`).
+ MarkerTraitObjectSimplifiedType,
+ TraitSimplifiedType(D),
+ ClosureSimplifiedType(D),
+ GeneratorSimplifiedType(D),
+ GeneratorWitnessSimplifiedType(usize),
+ OpaqueSimplifiedType(D),
+ FunctionSimplifiedType(usize),
+ PlaceholderSimplifiedType,
+}
+
+/// Generic parameters are pretty much just bound variables, e.g.
+/// the type of `fn foo<'a, T>(x: &'a T) -> u32 { ... }` can be thought of as
+/// `for<'a, T> fn(&'a T) -> u32`.
+///
+/// Typecheck of `foo` has to succeed for all possible generic arguments, so
+/// during typeck, we have to treat its generic parameters as if they
+/// were placeholders.
+///
+/// But when calling `foo` we only have to provide a specific generic argument.
+/// In that case the generic parameters are instantiated with inference variables.
+/// As we use `simplify_type` before that instantiation happens, we just treat
+/// generic parameters as if they were inference variables in that case.
+#[derive(PartialEq, Eq, Debug, Clone, Copy)]
+pub enum TreatParams {
+ /// Treat parameters as placeholders in the given environment.
+ ///
+ /// Note that this also causes us to treat projections as if they were
+ /// placeholders. This is only correct if the given projection cannot
+ /// be normalized in the current context. Even if normalization fails,
+ /// it may still succeed later if the projection contains any inference
+ /// variables.
+ AsPlaceholder,
+ AsInfer,
+}
+
+/// Tries to simplify a type by only returning the outermost injective¹ layer, if one exists.
+///
+/// **This function should only be used if you need to store or retrieve the type from some
+/// hashmap. If you want to quickly decide whether two types may unify, use the [DeepRejectCtxt]
+/// instead.**
+///
+/// The idea is to get something simple that we can use to quickly decide if two types could unify,
+/// for example during method lookup. If this function returns `Some(x)` it can only unify with
+/// types for which this method returns either `Some(x)` as well or `None`.
+///
+/// A special case here are parameters and projections, which are only injective
+/// if they are treated as placeholders.
+///
+/// For example when storing impls based on their simplified self type, we treat
+/// generic parameters as if they were inference variables. We must not simplify them here,
+/// as they can unify with any other type.
+///
+/// With projections we have to be even more careful, as treating them as placeholders
+/// is only correct if they are fully normalized.
+///
+/// ¹ meaning that if the outermost layers are different, then the whole types are also different.
+pub fn simplify_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ treat_params: TreatParams,
+) -> Option<SimplifiedType> {
+ match *ty.kind() {
+ ty::Bool => Some(BoolSimplifiedType),
+ ty::Char => Some(CharSimplifiedType),
+ ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
+ ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
+ ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
+ ty::Adt(def, _) => Some(AdtSimplifiedType(def.did())),
+ ty::Str => Some(StrSimplifiedType),
+ ty::Array(..) => Some(ArraySimplifiedType),
+ ty::Slice(..) => Some(SliceSimplifiedType),
+ ty::RawPtr(ptr) => Some(PtrSimplifiedType(ptr.mutbl)),
+ ty::Dynamic(trait_info, ..) => match trait_info.principal_def_id() {
+ Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => {
+ Some(TraitSimplifiedType(principal_def_id))
+ }
+ _ => Some(MarkerTraitObjectSimplifiedType),
+ },
+ ty::Ref(_, _, mutbl) => Some(RefSimplifiedType(mutbl)),
+ ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(ClosureSimplifiedType(def_id)),
+ ty::Generator(def_id, _, _) => Some(GeneratorSimplifiedType(def_id)),
+ ty::GeneratorWitness(tys) => Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len())),
+ ty::Never => Some(NeverSimplifiedType),
+ ty::Tuple(tys) => Some(TupleSimplifiedType(tys.len())),
+ ty::FnPtr(f) => Some(FunctionSimplifiedType(f.skip_binder().inputs().len())),
+ ty::Placeholder(..) => Some(PlaceholderSimplifiedType),
+ ty::Param(_) => match treat_params {
+ TreatParams::AsPlaceholder => Some(PlaceholderSimplifiedType),
+ TreatParams::AsInfer => None,
+ },
+ ty::Projection(_) => match treat_params {
+ // When treating `ty::Param` as a placeholder, projections also
+ // don't unify with anything else as long as they are fully normalized.
+ //
+ // We will have to be careful with lazy normalization here.
+ TreatParams::AsPlaceholder if !ty.has_infer_types_or_consts() => {
+ debug!("treating `{}` as a placeholder", ty);
+ Some(PlaceholderSimplifiedType)
+ }
+ TreatParams::AsPlaceholder | TreatParams::AsInfer => None,
+ },
+ ty::Opaque(def_id, _) => Some(OpaqueSimplifiedType(def_id)),
+ ty::Foreign(def_id) => Some(ForeignSimplifiedType(def_id)),
+ ty::Bound(..) | ty::Infer(_) | ty::Error(_) => None,
+ }
+}
+
+impl<D: Copy + Debug + Eq> SimplifiedTypeGen<D> {
+ pub fn def(self) -> Option<D> {
+ match self {
+ AdtSimplifiedType(d)
+ | ForeignSimplifiedType(d)
+ | TraitSimplifiedType(d)
+ | ClosureSimplifiedType(d)
+ | GeneratorSimplifiedType(d)
+ | OpaqueSimplifiedType(d) => Some(d),
+ _ => None,
+ }
+ }
+
+ pub fn map_def<U, F>(self, map: F) -> SimplifiedTypeGen<U>
+ where
+ F: Fn(D) -> U,
+ U: Copy + Debug + Eq,
+ {
+ match self {
+ BoolSimplifiedType => BoolSimplifiedType,
+ CharSimplifiedType => CharSimplifiedType,
+ IntSimplifiedType(t) => IntSimplifiedType(t),
+ UintSimplifiedType(t) => UintSimplifiedType(t),
+ FloatSimplifiedType(t) => FloatSimplifiedType(t),
+ AdtSimplifiedType(d) => AdtSimplifiedType(map(d)),
+ ForeignSimplifiedType(d) => ForeignSimplifiedType(map(d)),
+ StrSimplifiedType => StrSimplifiedType,
+ ArraySimplifiedType => ArraySimplifiedType,
+ SliceSimplifiedType => SliceSimplifiedType,
+ RefSimplifiedType(m) => RefSimplifiedType(m),
+ PtrSimplifiedType(m) => PtrSimplifiedType(m),
+ NeverSimplifiedType => NeverSimplifiedType,
+ MarkerTraitObjectSimplifiedType => MarkerTraitObjectSimplifiedType,
+ TupleSimplifiedType(n) => TupleSimplifiedType(n),
+ TraitSimplifiedType(d) => TraitSimplifiedType(map(d)),
+ ClosureSimplifiedType(d) => ClosureSimplifiedType(map(d)),
+ GeneratorSimplifiedType(d) => GeneratorSimplifiedType(map(d)),
+ GeneratorWitnessSimplifiedType(n) => GeneratorWitnessSimplifiedType(n),
+ OpaqueSimplifiedType(d) => OpaqueSimplifiedType(map(d)),
+ FunctionSimplifiedType(n) => FunctionSimplifiedType(n),
+ PlaceholderSimplifiedType => PlaceholderSimplifiedType,
+ }
+ }
+}
+
+/// Given generic arguments from an obligation and an impl,
+/// could these two be unified after replacing parameters in the
+/// the impl with inference variables.
+///
+/// For obligations, parameters won't be replaced by inference
+/// variables and only unify with themselves. We treat them
+/// the same way we treat placeholders.
+///
+/// We also use this function during coherence. For coherence the
+/// impls only have to overlap for some value, so we treat parameters
+/// on both sides like inference variables. This behavior is toggled
+/// using the `treat_obligation_params` field.
+#[derive(Debug, Clone, Copy)]
+pub struct DeepRejectCtxt {
+ pub treat_obligation_params: TreatParams,
+}
+
+impl DeepRejectCtxt {
+ pub fn generic_args_may_unify<'tcx>(
+ self,
+ obligation_arg: ty::GenericArg<'tcx>,
+ impl_arg: ty::GenericArg<'tcx>,
+ ) -> bool {
+ match (obligation_arg.unpack(), impl_arg.unpack()) {
+ // We don't fast reject based on regions for now.
+ (GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => true,
+ (GenericArgKind::Type(obl), GenericArgKind::Type(imp)) => {
+ self.types_may_unify(obl, imp)
+ }
+ (GenericArgKind::Const(obl), GenericArgKind::Const(imp)) => {
+ self.consts_may_unify(obl, imp)
+ }
+ _ => bug!("kind mismatch: {obligation_arg} {impl_arg}"),
+ }
+ }
+
+ pub fn types_may_unify<'tcx>(self, obligation_ty: Ty<'tcx>, impl_ty: Ty<'tcx>) -> bool {
+ match impl_ty.kind() {
+ // Start by checking whether the type in the impl may unify with
+ // pretty much everything. Just return `true` in that case.
+ ty::Param(_) | ty::Projection(_) | ty::Error(_) => return true,
+ // These types only unify with inference variables or their own
+ // variant.
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(..)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::RawPtr(..)
+ | ty::Dynamic(..)
+ | ty::Ref(..)
+ | ty::Never
+ | ty::Tuple(..)
+ | ty::FnPtr(..)
+ | ty::Foreign(..)
+ | ty::Opaque(..) => {}
+ ty::FnDef(..)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Placeholder(..)
+ | ty::Bound(..)
+ | ty::Infer(_) => bug!("unexpected impl_ty: {impl_ty}"),
+ }
+
+ let k = impl_ty.kind();
+ match *obligation_ty.kind() {
+ // Purely rigid types, use structural equivalence.
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Never
+ | ty::Foreign(_) => obligation_ty == impl_ty,
+ ty::Ref(_, obl_ty, obl_mutbl) => match k {
+ &ty::Ref(_, impl_ty, impl_mutbl) => {
+ obl_mutbl == impl_mutbl && self.types_may_unify(obl_ty, impl_ty)
+ }
+ _ => false,
+ },
+ ty::Adt(obl_def, obl_substs) => match k {
+ &ty::Adt(impl_def, impl_substs) => {
+ obl_def == impl_def
+ && iter::zip(obl_substs, impl_substs)
+ .all(|(obl, imp)| self.generic_args_may_unify(obl, imp))
+ }
+ _ => false,
+ },
+ ty::Slice(obl_ty) => {
+ matches!(k, &ty::Slice(impl_ty) if self.types_may_unify(obl_ty, impl_ty))
+ }
+ ty::Array(obl_ty, obl_len) => match k {
+ &ty::Array(impl_ty, impl_len) => {
+ self.types_may_unify(obl_ty, impl_ty)
+ && self.consts_may_unify(obl_len, impl_len)
+ }
+ _ => false,
+ },
+ ty::Tuple(obl) => match k {
+ &ty::Tuple(imp) => {
+ obl.len() == imp.len()
+ && iter::zip(obl, imp).all(|(obl, imp)| self.types_may_unify(obl, imp))
+ }
+ _ => false,
+ },
+ ty::RawPtr(obl) => match k {
+ ty::RawPtr(imp) => obl.mutbl == imp.mutbl && self.types_may_unify(obl.ty, imp.ty),
+ _ => false,
+ },
+ ty::Dynamic(obl_preds, ..) => {
+ // Ideally we would walk the existential predicates here or at least
+ // compare their length. But considering that the relevant `Relate` impl
+ // actually sorts and deduplicates these, that doesn't work.
+ matches!(k, ty::Dynamic(impl_preds, ..) if
+ obl_preds.principal_def_id() == impl_preds.principal_def_id()
+ )
+ }
+ ty::FnPtr(obl_sig) => match k {
+ ty::FnPtr(impl_sig) => {
+ let ty::FnSig { inputs_and_output, c_variadic, unsafety, abi } =
+ obl_sig.skip_binder();
+ let impl_sig = impl_sig.skip_binder();
+
+ abi == impl_sig.abi
+ && c_variadic == impl_sig.c_variadic
+ && unsafety == impl_sig.unsafety
+ && inputs_and_output.len() == impl_sig.inputs_and_output.len()
+ && iter::zip(inputs_and_output, impl_sig.inputs_and_output)
+ .all(|(obl, imp)| self.types_may_unify(obl, imp))
+ }
+ _ => false,
+ },
+
+ // Opaque types in impls should be forbidden, but that doesn't
+ // stop compilation. So this match arm should never return true
+ // if compilation succeeds.
+ ty::Opaque(..) => matches!(k, ty::Opaque(..)),
+
+ // Impls cannot contain these types as these cannot be named directly.
+ ty::FnDef(..) | ty::Closure(..) | ty::Generator(..) => false,
+
+ ty::Placeholder(..) => false,
+
+ // Depending on the value of `treat_obligation_params`, we either
+ // treat generic parameters like placeholders or like inference variables.
+ ty::Param(_) => match self.treat_obligation_params {
+ TreatParams::AsPlaceholder => false,
+ TreatParams::AsInfer => true,
+ },
+
+ ty::Infer(_) => true,
+
+ // As we're walking the whole type, it may encounter projections
+ // inside of binders and what not, so we're just going to assume that
+ // projections can unify with other stuff.
+ //
+ // Looking forward to lazy normalization this is the safer strategy anyways.
+ ty::Projection(_) => true,
+
+ ty::Error(_) => true,
+
+ ty::GeneratorWitness(..) | ty::Bound(..) => {
+ bug!("unexpected obligation type: {:?}", obligation_ty)
+ }
+ }
+ }
+
+ pub fn consts_may_unify(self, obligation_ct: ty::Const<'_>, impl_ct: ty::Const<'_>) -> bool {
+ match impl_ct.kind() {
+ ty::ConstKind::Param(_) | ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => {
+ return true;
+ }
+ ty::ConstKind::Value(_) => {}
+ ty::ConstKind::Infer(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => {
+ bug!("unexpected impl arg: {:?}", impl_ct)
+ }
+ }
+
+ let k = impl_ct.kind();
+ match obligation_ct.kind() {
+ ty::ConstKind::Param(_) => match self.treat_obligation_params {
+ TreatParams::AsPlaceholder => false,
+ TreatParams::AsInfer => true,
+ },
+
+ // As we don't necessarily eagerly evaluate constants,
+ // they might unify with any value.
+ ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => true,
+ ty::ConstKind::Value(obl) => match k {
+ ty::ConstKind::Value(imp) => {
+ // FIXME(valtrees): Once we have valtrees, we can just
+ // compare them directly here.
+ match (obl.try_to_scalar_int(), imp.try_to_scalar_int()) {
+ (Some(obl), Some(imp)) => obl == imp,
+ _ => true,
+ }
+ }
+ _ => true,
+ },
+
+ ty::ConstKind::Infer(_) => true,
+
+ ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => {
+ bug!("unexpected obl const: {:?}", obligation_ct)
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
new file mode 100644
index 000000000..ea6bb8a7a
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -0,0 +1,342 @@
+use crate::ty::subst::{GenericArg, GenericArgKind};
+use crate::ty::{self, InferConst, Term, Ty, TypeFlags};
+use std::slice;
+
+#[derive(Debug)]
+pub struct FlagComputation {
+ pub flags: TypeFlags,
+
+ // see `Ty::outer_exclusive_binder` for details
+ pub outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+impl FlagComputation {
+ fn new() -> FlagComputation {
+ FlagComputation { flags: TypeFlags::empty(), outer_exclusive_binder: ty::INNERMOST }
+ }
+
+ #[allow(rustc::usage_of_ty_tykind)]
+ pub fn for_kind(kind: &ty::TyKind<'_>) -> FlagComputation {
+ let mut result = FlagComputation::new();
+ result.add_kind(kind);
+ result
+ }
+
+ pub fn for_predicate<'tcx>(binder: ty::Binder<'tcx, ty::PredicateKind<'_>>) -> FlagComputation {
+ let mut result = FlagComputation::new();
+ result.add_predicate(binder);
+ result
+ }
+
+ pub fn for_const(c: ty::Const<'_>) -> TypeFlags {
+ let mut result = FlagComputation::new();
+ result.add_const(c);
+ result.flags
+ }
+
+ pub fn for_unevaluated_const(uv: ty::Unevaluated<'_>) -> TypeFlags {
+ let mut result = FlagComputation::new();
+ result.add_unevaluated_const(uv);
+ result.flags
+ }
+
+ fn add_flags(&mut self, flags: TypeFlags) {
+ self.flags = self.flags | flags;
+ }
+
+ /// indicates that `self` refers to something at binding level `binder`
+ fn add_bound_var(&mut self, binder: ty::DebruijnIndex) {
+ let exclusive_binder = binder.shifted_in(1);
+ self.add_exclusive_binder(exclusive_binder);
+ }
+
+ /// indicates that `self` refers to something *inside* binding
+ /// level `binder` -- not bound by `binder`, but bound by the next
+ /// binder internal to it
+ fn add_exclusive_binder(&mut self, exclusive_binder: ty::DebruijnIndex) {
+ self.outer_exclusive_binder = self.outer_exclusive_binder.max(exclusive_binder);
+ }
+
+ /// Adds the flags/depth from a set of types that appear within the current type, but within a
+ /// region binder.
+ fn bound_computation<T, F>(&mut self, value: ty::Binder<'_, T>, f: F)
+ where
+ F: FnOnce(&mut Self, T),
+ {
+ let mut computation = FlagComputation::new();
+
+ if !value.bound_vars().is_empty() {
+ computation.flags = computation.flags | TypeFlags::HAS_RE_LATE_BOUND;
+ }
+
+ f(&mut computation, value.skip_binder());
+
+ self.add_flags(computation.flags);
+
+ // The types that contributed to `computation` occurred within
+ // a region binder, so subtract one from the region depth
+ // within when adding the depth to `self`.
+ let outer_exclusive_binder = computation.outer_exclusive_binder;
+ if outer_exclusive_binder > ty::INNERMOST {
+ self.add_exclusive_binder(outer_exclusive_binder.shifted_out(1));
+ } // otherwise, this binder captures nothing
+ }
+
+ #[allow(rustc::usage_of_ty_tykind)]
+ fn add_kind(&mut self, kind: &ty::TyKind<'_>) {
+ match kind {
+ &ty::Bool
+ | &ty::Char
+ | &ty::Int(_)
+ | &ty::Float(_)
+ | &ty::Uint(_)
+ | &ty::Never
+ | &ty::Str
+ | &ty::Foreign(..) => {}
+
+ &ty::Error(_) => self.add_flags(TypeFlags::HAS_ERROR),
+
+ &ty::Param(_) => {
+ self.add_flags(TypeFlags::HAS_TY_PARAM);
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ }
+
+ &ty::Generator(_, ref substs, _) => {
+ let substs = substs.as_generator();
+ let should_remove_further_specializable =
+ !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ self.add_substs(substs.parent_substs());
+ if should_remove_further_specializable {
+ self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
+ }
+
+ self.add_ty(substs.resume_ty());
+ self.add_ty(substs.return_ty());
+ self.add_ty(substs.witness());
+ self.add_ty(substs.yield_ty());
+ self.add_ty(substs.tupled_upvars_ty());
+ }
+
+ &ty::GeneratorWitness(ts) => {
+ self.bound_computation(ts, |flags, ts| flags.add_tys(ts));
+ }
+
+ &ty::Closure(_, substs) => {
+ let substs = substs.as_closure();
+ let should_remove_further_specializable =
+ !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ self.add_substs(substs.parent_substs());
+ if should_remove_further_specializable {
+ self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
+ }
+
+ self.add_ty(substs.sig_as_fn_ptr_ty());
+ self.add_ty(substs.kind_ty());
+ self.add_ty(substs.tupled_upvars_ty());
+ }
+
+ &ty::Bound(debruijn, _) => {
+ self.add_bound_var(debruijn);
+ }
+
+ &ty::Placeholder(..) => {
+ self.add_flags(TypeFlags::HAS_TY_PLACEHOLDER);
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ }
+
+ &ty::Infer(infer) => {
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ match infer {
+ ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_) => {
+ self.add_flags(TypeFlags::HAS_TY_FRESH)
+ }
+
+ ty::TyVar(_) | ty::IntVar(_) | ty::FloatVar(_) => {
+ self.add_flags(TypeFlags::HAS_TY_INFER)
+ }
+ }
+ }
+
+ &ty::Adt(_, substs) => {
+ self.add_substs(substs);
+ }
+
+ &ty::Projection(data) => {
+ self.add_flags(TypeFlags::HAS_TY_PROJECTION);
+ self.add_projection_ty(data);
+ }
+
+ &ty::Opaque(_, substs) => {
+ self.add_flags(TypeFlags::HAS_TY_OPAQUE);
+ self.add_substs(substs);
+ }
+
+ &ty::Dynamic(obj, r) => {
+ for predicate in obj.iter() {
+ self.bound_computation(predicate, |computation, predicate| match predicate {
+ ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs),
+ ty::ExistentialPredicate::Projection(p) => {
+ computation.add_existential_projection(&p);
+ }
+ ty::ExistentialPredicate::AutoTrait(_) => {}
+ });
+ }
+
+ self.add_region(r);
+ }
+
+ &ty::Array(tt, len) => {
+ self.add_ty(tt);
+ self.add_const(len);
+ }
+
+ &ty::Slice(tt) => self.add_ty(tt),
+
+ &ty::RawPtr(ref m) => {
+ self.add_ty(m.ty);
+ }
+
+ &ty::Ref(r, ty, _) => {
+ self.add_region(r);
+ self.add_ty(ty);
+ }
+
+ &ty::Tuple(types) => {
+ self.add_tys(types);
+ }
+
+ &ty::FnDef(_, substs) => {
+ self.add_substs(substs);
+ }
+
+ &ty::FnPtr(fn_sig) => self.bound_computation(fn_sig, |computation, fn_sig| {
+ computation.add_tys(fn_sig.inputs());
+ computation.add_ty(fn_sig.output());
+ }),
+ }
+ }
+
+ fn add_predicate(&mut self, binder: ty::Binder<'_, ty::PredicateKind<'_>>) {
+ self.bound_computation(binder, |computation, atom| computation.add_predicate_atom(atom));
+ }
+
+ fn add_predicate_atom(&mut self, atom: ty::PredicateKind<'_>) {
+ match atom {
+ ty::PredicateKind::Trait(trait_pred) => {
+ self.add_substs(trait_pred.trait_ref.substs);
+ }
+ ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(a, b)) => {
+ self.add_region(a);
+ self.add_region(b);
+ }
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty, region)) => {
+ self.add_ty(ty);
+ self.add_region(region);
+ }
+ ty::PredicateKind::Subtype(ty::SubtypePredicate { a_is_expected: _, a, b }) => {
+ self.add_ty(a);
+ self.add_ty(b);
+ }
+ ty::PredicateKind::Coerce(ty::CoercePredicate { a, b }) => {
+ self.add_ty(a);
+ self.add_ty(b);
+ }
+ ty::PredicateKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => {
+ self.add_projection_ty(projection_ty);
+ match term {
+ Term::Ty(ty) => self.add_ty(ty),
+ Term::Const(c) => self.add_const(c),
+ }
+ }
+ ty::PredicateKind::WellFormed(arg) => {
+ self.add_substs(slice::from_ref(&arg));
+ }
+ ty::PredicateKind::ObjectSafe(_def_id) => {}
+ ty::PredicateKind::ClosureKind(_def_id, substs, _kind) => {
+ self.add_substs(substs);
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ self.add_unevaluated_const(uv);
+ }
+ ty::PredicateKind::ConstEquate(expected, found) => {
+ self.add_const(expected);
+ self.add_const(found);
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ self.add_ty(ty);
+ }
+ }
+ }
+
+ fn add_ty(&mut self, ty: Ty<'_>) {
+ self.add_flags(ty.flags());
+ self.add_exclusive_binder(ty.outer_exclusive_binder());
+ }
+
+ fn add_tys(&mut self, tys: &[Ty<'_>]) {
+ for &ty in tys {
+ self.add_ty(ty);
+ }
+ }
+
+ fn add_region(&mut self, r: ty::Region<'_>) {
+ self.add_flags(r.type_flags());
+ if let ty::ReLateBound(debruijn, _) = *r {
+ self.add_bound_var(debruijn);
+ }
+ }
+
+ fn add_const(&mut self, c: ty::Const<'_>) {
+ self.add_ty(c.ty());
+ match c.kind() {
+ ty::ConstKind::Unevaluated(unevaluated) => self.add_unevaluated_const(unevaluated),
+ ty::ConstKind::Infer(infer) => {
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ match infer {
+ InferConst::Fresh(_) => self.add_flags(TypeFlags::HAS_CT_FRESH),
+ InferConst::Var(_) => self.add_flags(TypeFlags::HAS_CT_INFER),
+ }
+ }
+ ty::ConstKind::Bound(debruijn, _) => {
+ self.add_bound_var(debruijn);
+ }
+ ty::ConstKind::Param(_) => {
+ self.add_flags(TypeFlags::HAS_CT_PARAM);
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ }
+ ty::ConstKind::Placeholder(_) => {
+ self.add_flags(TypeFlags::HAS_CT_PLACEHOLDER);
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ }
+ ty::ConstKind::Value(_) => {}
+ ty::ConstKind::Error(_) => self.add_flags(TypeFlags::HAS_ERROR),
+ }
+ }
+
+ fn add_unevaluated_const<P>(&mut self, ct: ty::Unevaluated<'_, P>) {
+ self.add_substs(ct.substs);
+ self.add_flags(TypeFlags::HAS_CT_PROJECTION);
+ }
+
+ fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection<'_>) {
+ self.add_substs(projection.substs);
+ match projection.term {
+ ty::Term::Ty(ty) => self.add_ty(ty),
+ ty::Term::Const(ct) => self.add_const(ct),
+ }
+ }
+
+ fn add_projection_ty(&mut self, projection_ty: ty::ProjectionTy<'_>) {
+ self.add_substs(projection_ty.substs);
+ }
+
+ fn add_substs(&mut self, substs: &[GenericArg<'_>]) {
+ for kind in substs {
+ match kind.unpack() {
+ GenericArgKind::Type(ty) => self.add_ty(ty),
+ GenericArgKind::Lifetime(lt) => self.add_region(lt),
+ GenericArgKind::Const(ct) => self.add_const(ct),
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs
new file mode 100644
index 000000000..5e96e278b
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/fold.rs
@@ -0,0 +1,797 @@
+//! A folding traversal mechanism for complex data structures that contain type
+//! information.
+//!
+//! This is a modifying traversal. It consumes the data structure, producing a
+//! (possibly) modified version of it. Both fallible and infallible versions are
+//! available. The name is potentially confusing, because this traversal is more
+//! like `Iterator::map` than `Iterator::fold`.
+//!
+//! This traversal has limited flexibility. Only a small number of "types of
+//! interest" within the complex data structures can receive custom
+//! modification. These are the ones containing the most important type-related
+//! information, such as `Ty`, `Predicate`, `Region`, and `Const`.
+//!
+//! There are three groups of traits involved in each traversal.
+//! - `TypeFoldable`. This is implemented once for many types, including:
+//! - Types of interest, for which the the methods delegate to the
+//! folder.
+//! - All other types, including generic containers like `Vec` and `Option`.
+//! It defines a "skeleton" of how they should be folded.
+//! - `TypeSuperFoldable`. This is implemented only for each type of interest,
+//! and defines the folding "skeleton" for these types.
+//! - `TypeFolder`/`FallibleTypeFolder. One of these is implemented for each
+//! folder. This defines how types of interest are folded.
+//!
+//! This means each fold is a mixture of (a) generic folding operations, and (b)
+//! custom fold operations that are specific to the folder.
+//! - The `TypeFoldable` impls handle most of the traversal, and call into
+//! `TypeFolder`/`FallibleTypeFolder` when they encounter a type of interest.
+//! - A `TypeFolder`/`FallibleTypeFolder` may call into another `TypeFoldable`
+//! impl, because some of the types of interest are recursive and can contain
+//! other types of interest.
+//! - A `TypeFolder`/`FallibleTypeFolder` may also call into a `TypeSuperFoldable`
+//! impl, because each folder might provide custom handling only for some types
+//! of interest, or only for some variants of each type of interest, and then
+//! use default traversal for the remaining cases.
+//!
+//! For example, if you have `struct S(Ty, U)` where `S: TypeFoldable` and `U:
+//! TypeFoldable`, and an instance `s = S(ty, u)`, it would be folded like so:
+//! ```text
+//! s.fold_with(folder) calls
+//! - ty.fold_with(folder) calls
+//! - folder.fold_ty(ty) may call
+//! - ty.super_fold_with(folder)
+//! - u.fold_with(folder)
+//! ```
+use crate::mir;
+use crate::ty::{self, Binder, BoundTy, Ty, TyCtxt, TypeVisitable};
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_hir::def_id::DefId;
+
+use std::collections::BTreeMap;
+
+/// This trait is implemented for every type that can be folded,
+/// providing the skeleton of the traversal.
+///
+/// To implement this conveniently, use the derive macro located in
+/// `rustc_macros`.
+pub trait TypeFoldable<'tcx>: TypeVisitable<'tcx> {
+ /// The entry point for folding. To fold a value `t` with a folder `f`
+ /// call: `t.try_fold_with(f)`.
+ ///
+ /// For most types, this just traverses the value, calling `try_fold_with`
+ /// on each field/element.
+ ///
+ /// For types of interest (such as `Ty`), the implementation of method
+ /// calls a folder method specifically for that type (such as
+ /// `F::try_fold_ty`). This is where control transfers from `TypeFoldable`
+ /// to `TypeFolder`.
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error>;
+
+ /// A convenient alternative to `try_fold_with` for use with infallible
+ /// folders. Do not override this method, to ensure coherence with
+ /// `try_fold_with`.
+ fn fold_with<F: TypeFolder<'tcx>>(self, folder: &mut F) -> Self {
+ self.try_fold_with(folder).into_ok()
+ }
+}
+
+// This trait is implemented for types of interest.
+pub trait TypeSuperFoldable<'tcx>: TypeFoldable<'tcx> {
+ /// Provides a default fold for a type of interest. This should only be
+ /// called within `TypeFolder` methods, when a non-custom traversal is
+ /// desired for the value of the type of interest passed to that method.
+ /// For example, in `MyFolder::try_fold_ty(ty)`, it is valid to call
+ /// `ty.try_super_fold_with(self)`, but any other folding should be done
+ /// with `xyz.try_fold_with(self)`.
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error>;
+
+ /// A convenient alternative to `try_super_fold_with` for use with
+ /// infallible folders. Do not override this method, to ensure coherence
+ /// with `try_super_fold_with`.
+ fn super_fold_with<F: TypeFolder<'tcx>>(self, folder: &mut F) -> Self {
+ self.try_super_fold_with(folder).into_ok()
+ }
+}
+
+/// This trait is implemented for every infallible folding traversal. There is
+/// a fold method defined for every type of interest. Each such method has a
+/// default that does an "identity" fold. Implementations of these methods
+/// often fall back to a `super_fold_with` method if the primary argument
+/// doesn't satisfy a particular condition.
+///
+/// A blanket implementation of [`FallibleTypeFolder`] will defer to
+/// the infallible methods of this trait to ensure that the two APIs
+/// are coherent.
+pub trait TypeFolder<'tcx>: FallibleTypeFolder<'tcx, Error = !> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn fold_binder<T>(&mut self, t: Binder<'tcx, T>) -> Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ t.super_fold_with(self)
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ t.super_fold_with(self)
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ r.super_fold_with(self)
+ }
+
+ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ c.super_fold_with(self)
+ }
+
+ fn fold_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ty::Unevaluated<'tcx> {
+ uv.super_fold_with(self)
+ }
+
+ fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
+ p.super_fold_with(self)
+ }
+
+ fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ bug!("most type folders should not be folding MIR datastructures: {:?}", c)
+ }
+}
+
+/// This trait is implemented for every folding traversal. There is a fold
+/// method defined for every type of interest. Each such method has a default
+/// that does an "identity" fold.
+///
+/// A blanket implementation of this trait (that defers to the relevant
+/// method of [`TypeFolder`]) is provided for all infallible folders in
+/// order to ensure the two APIs are coherent.
+pub trait FallibleTypeFolder<'tcx>: Sized {
+ type Error;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn try_fold_binder<T>(&mut self, t: Binder<'tcx, T>) -> Result<Binder<'tcx, T>, Self::Error>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ t.try_super_fold_with(self)
+ }
+
+ fn try_fold_ty(&mut self, t: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
+ t.try_super_fold_with(self)
+ }
+
+ fn try_fold_region(&mut self, r: ty::Region<'tcx>) -> Result<ty::Region<'tcx>, Self::Error> {
+ r.try_super_fold_with(self)
+ }
+
+ fn try_fold_const(&mut self, c: ty::Const<'tcx>) -> Result<ty::Const<'tcx>, Self::Error> {
+ c.try_super_fold_with(self)
+ }
+
+ fn try_fold_unevaluated(
+ &mut self,
+ c: ty::Unevaluated<'tcx>,
+ ) -> Result<ty::Unevaluated<'tcx>, Self::Error> {
+ c.try_super_fold_with(self)
+ }
+
+ fn try_fold_predicate(
+ &mut self,
+ p: ty::Predicate<'tcx>,
+ ) -> Result<ty::Predicate<'tcx>, Self::Error> {
+ p.try_super_fold_with(self)
+ }
+
+ fn try_fold_mir_const(
+ &mut self,
+ c: mir::ConstantKind<'tcx>,
+ ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
+ bug!("most type folders should not be folding MIR datastructures: {:?}", c)
+ }
+}
+
+// This blanket implementation of the fallible trait for infallible folders
+// delegates to infallible methods to ensure coherence.
+impl<'tcx, F> FallibleTypeFolder<'tcx> for F
+where
+ F: TypeFolder<'tcx>,
+{
+ type Error = !;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ TypeFolder::tcx(self)
+ }
+
+ fn try_fold_binder<T>(&mut self, t: Binder<'tcx, T>) -> Result<Binder<'tcx, T>, !>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ Ok(self.fold_binder(t))
+ }
+
+ fn try_fold_ty(&mut self, t: Ty<'tcx>) -> Result<Ty<'tcx>, !> {
+ Ok(self.fold_ty(t))
+ }
+
+ fn try_fold_region(&mut self, r: ty::Region<'tcx>) -> Result<ty::Region<'tcx>, !> {
+ Ok(self.fold_region(r))
+ }
+
+ fn try_fold_const(&mut self, c: ty::Const<'tcx>) -> Result<ty::Const<'tcx>, !> {
+ Ok(self.fold_const(c))
+ }
+
+ fn try_fold_unevaluated(
+ &mut self,
+ c: ty::Unevaluated<'tcx>,
+ ) -> Result<ty::Unevaluated<'tcx>, !> {
+ Ok(self.fold_unevaluated(c))
+ }
+
+ fn try_fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> Result<ty::Predicate<'tcx>, !> {
+ Ok(self.fold_predicate(p))
+ }
+
+ fn try_fold_mir_const(
+ &mut self,
+ c: mir::ConstantKind<'tcx>,
+ ) -> Result<mir::ConstantKind<'tcx>, !> {
+ Ok(self.fold_mir_const(c))
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Some sample folders
+
+pub struct BottomUpFolder<'tcx, F, G, H>
+where
+ F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
+ G: FnMut(ty::Region<'tcx>) -> ty::Region<'tcx>,
+ H: FnMut(ty::Const<'tcx>) -> ty::Const<'tcx>,
+{
+ pub tcx: TyCtxt<'tcx>,
+ pub ty_op: F,
+ pub lt_op: G,
+ pub ct_op: H,
+}
+
+impl<'tcx, F, G, H> TypeFolder<'tcx> for BottomUpFolder<'tcx, F, G, H>
+where
+ F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
+ G: FnMut(ty::Region<'tcx>) -> ty::Region<'tcx>,
+ H: FnMut(ty::Const<'tcx>) -> ty::Const<'tcx>,
+{
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let t = ty.super_fold_with(self);
+ (self.ty_op)(t)
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ let r = r.super_fold_with(self);
+ (self.lt_op)(r)
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ let ct = ct.super_fold_with(self);
+ (self.ct_op)(ct)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Region folder
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Folds the escaping and free regions in `value` using `f`, and
+ /// sets `skipped_regions` to true if any late-bound region was found
+ /// and skipped.
+ pub fn fold_regions<T>(
+ self,
+ value: T,
+ mut f: impl FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ value.fold_with(&mut RegionFolder::new(self, &mut f))
+ }
+}
+
+/// Folds over the substructure of a type, visiting its component
+/// types and all regions that occur *free* within it.
+///
+/// That is, `Ty` can contain function or method types that bind
+/// regions at the call site (`ReLateBound`), and occurrences of
+/// regions (aka "lifetimes") that are bound within a type are not
+/// visited by this folder; only regions that occur free will be
+/// visited by `fld_r`.
+
+pub struct RegionFolder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ /// Stores the index of a binder *just outside* the stuff we have
+ /// visited. So this begins as INNERMOST; when we pass through a
+ /// binder, it is incremented (via `shift_in`).
+ current_index: ty::DebruijnIndex,
+
+ /// Callback invokes for each free region. The `DebruijnIndex`
+ /// points to the binder *just outside* the ones we have passed
+ /// through.
+ fold_region_fn:
+ &'a mut (dyn FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx> + 'a),
+}
+
+impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
+ #[inline]
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ fold_region_fn: &'a mut dyn FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>,
+ ) -> RegionFolder<'a, 'tcx> {
+ RegionFolder { tcx, current_index: ty::INNERMOST, fold_region_fn }
+ }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(debruijn, _) if debruijn < self.current_index => {
+ debug!(?self.current_index, "skipped bound region");
+ r
+ }
+ _ => {
+ debug!(?self.current_index, "folding free region");
+ (self.fold_region_fn)(r, self.current_index)
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Bound vars replacer
+
+pub trait BoundVarReplacerDelegate<'tcx> {
+ fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx>;
+ fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx>;
+ fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx>;
+}
+
+pub struct FnMutDelegate<R, T, C> {
+ pub regions: R,
+ pub types: T,
+ pub consts: C,
+}
+impl<'tcx, R, T, C> BoundVarReplacerDelegate<'tcx> for FnMutDelegate<R, T, C>
+where
+ R: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ T: FnMut(ty::BoundTy) -> Ty<'tcx>,
+ C: FnMut(ty::BoundVar, Ty<'tcx>) -> ty::Const<'tcx>,
+{
+ fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx> {
+ (self.regions)(br)
+ }
+ fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx> {
+ (self.types)(bt)
+ }
+ fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx> {
+ (self.consts)(bv, ty)
+ }
+}
+
+/// Replaces the escaping bound vars (late bound regions or bound types) in a type.
+struct BoundVarReplacer<'tcx, D> {
+ tcx: TyCtxt<'tcx>,
+
+ /// As with `RegionFolder`, represents the index of a binder *just outside*
+ /// the ones we have visited.
+ current_index: ty::DebruijnIndex,
+
+ delegate: D,
+}
+
+impl<'tcx, D: BoundVarReplacerDelegate<'tcx>> BoundVarReplacer<'tcx, D> {
+ fn new(tcx: TyCtxt<'tcx>, delegate: D) -> Self {
+ BoundVarReplacer { tcx, current_index: ty::INNERMOST, delegate }
+ }
+}
+
+impl<'tcx, D> TypeFolder<'tcx> for BoundVarReplacer<'tcx, D>
+where
+ D: BoundVarReplacerDelegate<'tcx>,
+{
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ ty::Bound(debruijn, bound_ty) if debruijn == self.current_index => {
+ let ty = self.delegate.replace_ty(bound_ty);
+ ty::fold::shift_vars(self.tcx, ty, self.current_index.as_u32())
+ }
+ _ if t.has_vars_bound_at_or_above(self.current_index) => t.super_fold_with(self),
+ _ => t,
+ }
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(debruijn, br) if debruijn == self.current_index => {
+ let region = self.delegate.replace_region(br);
+ if let ty::ReLateBound(debruijn1, br) = *region {
+ // If the callback returns a late-bound region,
+ // that region should always use the INNERMOST
+ // debruijn index. Then we adjust it to the
+ // correct depth.
+ assert_eq!(debruijn1, ty::INNERMOST);
+ self.tcx.reuse_or_mk_region(region, ty::ReLateBound(debruijn, br))
+ } else {
+ region
+ }
+ }
+ _ => r,
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ match ct.kind() {
+ ty::ConstKind::Bound(debruijn, bound_const) if debruijn == self.current_index => {
+ let ct = self.delegate.replace_const(bound_const, ct.ty());
+ ty::fold::shift_vars(self.tcx, ct, self.current_index.as_u32())
+ }
+ _ => ct.super_fold_with(self),
+ }
+ }
+
+ fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
+ if p.has_vars_bound_at_or_above(self.current_index) { p.super_fold_with(self) } else { p }
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Replaces all regions bound by the given `Binder` with the
+ /// results returned by the closure; the closure is expected to
+ /// return a free region (relative to this binder), and hence the
+ /// binder is removed in the return type. The closure is invoked
+ /// once for each unique `BoundRegionKind`; multiple references to the
+ /// same `BoundRegionKind` will reuse the previous result. A map is
+ /// returned at the end with each bound region and the free region
+ /// that replaced it.
+ ///
+ /// # Panics
+ ///
+ /// This method only replaces late bound regions. Any types or
+ /// constants bound by `value` will cause an ICE.
+ pub fn replace_late_bound_regions<T, F>(
+ self,
+ value: Binder<'tcx, T>,
+ mut fld_r: F,
+ ) -> (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)
+ where
+ F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ T: TypeFoldable<'tcx>,
+ {
+ let mut region_map = BTreeMap::new();
+ let real_fld_r = |br: ty::BoundRegion| *region_map.entry(br).or_insert_with(|| fld_r(br));
+ let value = self.replace_late_bound_regions_uncached(value, real_fld_r);
+ (value, region_map)
+ }
+
+ pub fn replace_late_bound_regions_uncached<T, F>(
+ self,
+ value: Binder<'tcx, T>,
+ replace_regions: F,
+ ) -> T
+ where
+ F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ T: TypeFoldable<'tcx>,
+ {
+ let value = value.skip_binder();
+ if !value.has_escaping_bound_vars() {
+ value
+ } else {
+ let delegate = FnMutDelegate {
+ regions: replace_regions,
+ types: |b| bug!("unexpected bound ty in binder: {b:?}"),
+ consts: |b, ty| bug!("unexpected bound ct in binder: {b:?} {ty}"),
+ };
+ let mut replacer = BoundVarReplacer::new(self, delegate);
+ value.fold_with(&mut replacer)
+ }
+ }
+
+ /// Replaces all escaping bound vars. The `fld_r` closure replaces escaping
+ /// bound regions; the `fld_t` closure replaces escaping bound types and the `fld_c`
+ /// closure replaces escaping bound consts.
+ pub fn replace_escaping_bound_vars_uncached<T: TypeFoldable<'tcx>>(
+ self,
+ value: T,
+ delegate: impl BoundVarReplacerDelegate<'tcx>,
+ ) -> T {
+ if !value.has_escaping_bound_vars() {
+ value
+ } else {
+ let mut replacer = BoundVarReplacer::new(self, delegate);
+ value.fold_with(&mut replacer)
+ }
+ }
+
+ /// Replaces all types or regions bound by the given `Binder`. The `fld_r`
+ /// closure replaces bound regions, the `fld_t` closure replaces bound
+ /// types, and `fld_c` replaces bound constants.
+ pub fn replace_bound_vars_uncached<T: TypeFoldable<'tcx>>(
+ self,
+ value: Binder<'tcx, T>,
+ delegate: impl BoundVarReplacerDelegate<'tcx>,
+ ) -> T {
+ self.replace_escaping_bound_vars_uncached(value.skip_binder(), delegate)
+ }
+
+ /// Replaces any late-bound regions bound in `value` with
+ /// free variants attached to `all_outlive_scope`.
+ pub fn liberate_late_bound_regions<T>(
+ self,
+ all_outlive_scope: DefId,
+ value: ty::Binder<'tcx, T>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.replace_late_bound_regions_uncached(value, |br| {
+ self.mk_region(ty::ReFree(ty::FreeRegion {
+ scope: all_outlive_scope,
+ bound_region: br.kind,
+ }))
+ })
+ }
+
+ pub fn shift_bound_var_indices<T>(self, bound_vars: usize, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let shift_bv = |bv: ty::BoundVar| ty::BoundVar::from_usize(bv.as_usize() + bound_vars);
+ self.replace_escaping_bound_vars_uncached(
+ value,
+ FnMutDelegate {
+ regions: |r: ty::BoundRegion| {
+ self.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: shift_bv(r.var), kind: r.kind },
+ ))
+ },
+ types: |t: ty::BoundTy| {
+ self.mk_ty(ty::Bound(
+ ty::INNERMOST,
+ ty::BoundTy { var: shift_bv(t.var), kind: t.kind },
+ ))
+ },
+ consts: |c, ty: Ty<'tcx>| {
+ self.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Bound(ty::INNERMOST, shift_bv(c)),
+ ty,
+ })
+ },
+ },
+ )
+ }
+
+ /// Replaces any late-bound regions bound in `value` with `'erased`. Useful in codegen but also
+ /// method lookup and a few other places where precise region relationships are not required.
+ pub fn erase_late_bound_regions<T>(self, value: Binder<'tcx, T>) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.replace_late_bound_regions(value, |_| self.lifetimes.re_erased).0
+ }
+
+ /// Rewrite any late-bound regions so that they are anonymous. Region numbers are
+ /// assigned starting at 0 and increasing monotonically in the order traversed
+ /// by the fold operation.
+ ///
+ /// The chief purpose of this function is to canonicalize regions so that two
+ /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
+ /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
+ /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
+ pub fn anonymize_late_bound_regions<T>(self, sig: Binder<'tcx, T>) -> Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let mut counter = 0;
+ let inner = self
+ .replace_late_bound_regions(sig, |_| {
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(counter),
+ kind: ty::BrAnon(counter),
+ };
+ let r = self.mk_region(ty::ReLateBound(ty::INNERMOST, br));
+ counter += 1;
+ r
+ })
+ .0;
+ let bound_vars = self.mk_bound_variable_kinds(
+ (0..counter).map(|i| ty::BoundVariableKind::Region(ty::BrAnon(i))),
+ );
+ Binder::bind_with_vars(inner, bound_vars)
+ }
+
+ /// Anonymize all bound variables in `value`, this is mostly used to improve caching.
+ pub fn anonymize_bound_vars<T>(self, value: Binder<'tcx, T>) -> Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ struct Anonymize<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ map: &'a mut FxIndexMap<ty::BoundVar, ty::BoundVariableKind>,
+ }
+ impl<'tcx> BoundVarReplacerDelegate<'tcx> for Anonymize<'_, 'tcx> {
+ fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx> {
+ let entry = self.map.entry(br.var);
+ let index = entry.index();
+ let var = ty::BoundVar::from_usize(index);
+ let kind = entry
+ .or_insert_with(|| ty::BoundVariableKind::Region(ty::BrAnon(index as u32)))
+ .expect_region();
+ let br = ty::BoundRegion { var, kind };
+ self.tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br))
+ }
+ fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx> {
+ let entry = self.map.entry(bt.var);
+ let index = entry.index();
+ let var = ty::BoundVar::from_usize(index);
+ let kind = entry
+ .or_insert_with(|| ty::BoundVariableKind::Ty(ty::BoundTyKind::Anon))
+ .expect_ty();
+ self.tcx.mk_ty(ty::Bound(ty::INNERMOST, BoundTy { var, kind }))
+ }
+ fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx> {
+ let entry = self.map.entry(bv);
+ let index = entry.index();
+ let var = ty::BoundVar::from_usize(index);
+ let () = entry.or_insert_with(|| ty::BoundVariableKind::Const).expect_const();
+ self.tcx.mk_const(ty::ConstS { ty, kind: ty::ConstKind::Bound(ty::INNERMOST, var) })
+ }
+ }
+
+ let mut map = Default::default();
+ let delegate = Anonymize { tcx: self, map: &mut map };
+ let inner = self.replace_escaping_bound_vars_uncached(value.skip_binder(), delegate);
+ let bound_vars = self.mk_bound_variable_kinds(map.into_values());
+ Binder::bind_with_vars(inner, bound_vars)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Shifter
+//
+// Shifts the De Bruijn indices on all escaping bound vars by a
+// fixed amount. Useful in substitution or when otherwise introducing
+// a binding level that is not intended to capture the existing bound
+// vars. See comment on `shift_vars_through_binders` method in
+// `subst.rs` for more details.
+
+struct Shifter<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ current_index: ty::DebruijnIndex,
+ amount: u32,
+}
+
+impl<'tcx> Shifter<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, amount: u32) -> Self {
+ Shifter { tcx, current_index: ty::INNERMOST, amount }
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for Shifter<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(debruijn, br) => {
+ if self.amount == 0 || debruijn < self.current_index {
+ r
+ } else {
+ let debruijn = debruijn.shifted_in(self.amount);
+ let shifted = ty::ReLateBound(debruijn, br);
+ self.tcx.mk_region(shifted)
+ }
+ }
+ _ => r,
+ }
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match *ty.kind() {
+ ty::Bound(debruijn, bound_ty) => {
+ if self.amount == 0 || debruijn < self.current_index {
+ ty
+ } else {
+ let debruijn = debruijn.shifted_in(self.amount);
+ self.tcx.mk_ty(ty::Bound(debruijn, bound_ty))
+ }
+ }
+
+ _ => ty.super_fold_with(self),
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if let ty::ConstKind::Bound(debruijn, bound_ct) = ct.kind() {
+ if self.amount == 0 || debruijn < self.current_index {
+ ct
+ } else {
+ let debruijn = debruijn.shifted_in(self.amount);
+ self.tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Bound(debruijn, bound_ct),
+ ty: ct.ty(),
+ })
+ }
+ } else {
+ ct.super_fold_with(self)
+ }
+ }
+}
+
+pub fn shift_region<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ region: ty::Region<'tcx>,
+ amount: u32,
+) -> ty::Region<'tcx> {
+ match *region {
+ ty::ReLateBound(debruijn, br) if amount > 0 => {
+ tcx.mk_region(ty::ReLateBound(debruijn.shifted_in(amount), br))
+ }
+ _ => region,
+ }
+}
+
+pub fn shift_vars<'tcx, T>(tcx: TyCtxt<'tcx>, value: T, amount: u32) -> T
+where
+ T: TypeFoldable<'tcx>,
+{
+ debug!("shift_vars(value={:?}, amount={})", value, amount);
+
+ value.fold_with(&mut Shifter::new(tcx, amount))
+}
diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs
new file mode 100644
index 000000000..add2df258
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/generics.rs
@@ -0,0 +1,349 @@
+use crate::middle::resolve_lifetime::ObjectLifetimeDefault;
+use crate::ty;
+use crate::ty::subst::{Subst, SubstsRef};
+use crate::ty::EarlyBinder;
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+
+use super::{EarlyBoundRegion, InstantiatedPredicates, ParamConst, ParamTy, Predicate, TyCtxt};
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum GenericParamDefKind {
+ Lifetime,
+ Type { has_default: bool, object_lifetime_default: ObjectLifetimeDefault, synthetic: bool },
+ Const { has_default: bool },
+}
+
+impl GenericParamDefKind {
+ pub fn descr(&self) -> &'static str {
+ match self {
+ GenericParamDefKind::Lifetime => "lifetime",
+ GenericParamDefKind::Type { .. } => "type",
+ GenericParamDefKind::Const { .. } => "constant",
+ }
+ }
+ pub fn to_ord(&self) -> ast::ParamKindOrd {
+ match self {
+ GenericParamDefKind::Lifetime => ast::ParamKindOrd::Lifetime,
+ GenericParamDefKind::Type { .. } => ast::ParamKindOrd::Type,
+ GenericParamDefKind::Const { .. } => ast::ParamKindOrd::Const,
+ }
+ }
+
+ pub fn is_ty_or_const(&self) -> bool {
+ match self {
+ GenericParamDefKind::Lifetime => false,
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => true,
+ }
+ }
+
+ pub fn is_synthetic(&self) -> bool {
+ match self {
+ GenericParamDefKind::Type { synthetic, .. } => *synthetic,
+ _ => false,
+ }
+ }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct GenericParamDef {
+ pub name: Symbol,
+ pub def_id: DefId,
+ pub index: u32,
+
+ /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute
+ /// on generic parameter `'a`/`T`, asserts data behind the parameter
+ /// `'a`/`T` won't be accessed during the parent type's `Drop` impl.
+ pub pure_wrt_drop: bool,
+
+ pub kind: GenericParamDefKind,
+}
+
+impl GenericParamDef {
+ pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion {
+ if let GenericParamDefKind::Lifetime = self.kind {
+ ty::EarlyBoundRegion { def_id: self.def_id, index: self.index, name: self.name }
+ } else {
+ bug!("cannot convert a non-lifetime parameter def to an early bound region")
+ }
+ }
+
+ pub fn has_default(&self) -> bool {
+ match self.kind {
+ GenericParamDefKind::Type { has_default, .. }
+ | GenericParamDefKind::Const { has_default } => has_default,
+ GenericParamDefKind::Lifetime => false,
+ }
+ }
+
+ pub fn default_value<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ ) -> Option<EarlyBinder<ty::GenericArg<'tcx>>> {
+ match self.kind {
+ GenericParamDefKind::Type { has_default, .. } if has_default => {
+ Some(tcx.bound_type_of(self.def_id).map_bound(|t| t.into()))
+ }
+ GenericParamDefKind::Const { has_default } if has_default => {
+ Some(tcx.bound_const_param_default(self.def_id).map_bound(|c| c.into()))
+ }
+ _ => None,
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct GenericParamCount {
+ pub lifetimes: usize,
+ pub types: usize,
+ pub consts: usize,
+}
+
+/// Information about the formal type/lifetime parameters associated
+/// with an item or method. Analogous to `hir::Generics`.
+///
+/// The ordering of parameters is the same as in `Subst` (excluding child generics):
+/// `Self` (optionally), `Lifetime` params..., `Type` params...
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct Generics {
+ pub parent: Option<DefId>,
+ pub parent_count: usize,
+ pub params: Vec<GenericParamDef>,
+
+ /// Reverse map to the `index` field of each `GenericParamDef`.
+ #[stable_hasher(ignore)]
+ pub param_def_id_to_index: FxHashMap<DefId, u32>,
+
+ pub has_self: bool,
+ pub has_late_bound_regions: Option<Span>,
+}
+
+impl<'tcx> Generics {
+ #[inline]
+ pub fn count(&self) -> usize {
+ self.parent_count + self.params.len()
+ }
+
+ pub fn own_counts(&self) -> GenericParamCount {
+ // We could cache this as a property of `GenericParamCount`, but
+ // the aim is to refactor this away entirely eventually and the
+ // presence of this method will be a constant reminder.
+ let mut own_counts = GenericParamCount::default();
+
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => own_counts.lifetimes += 1,
+ GenericParamDefKind::Type { .. } => own_counts.types += 1,
+ GenericParamDefKind::Const { .. } => own_counts.consts += 1,
+ }
+ }
+
+ own_counts
+ }
+
+ pub fn own_defaults(&self) -> GenericParamCount {
+ let mut own_defaults = GenericParamCount::default();
+
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => (),
+ GenericParamDefKind::Type { has_default, .. } => {
+ own_defaults.types += has_default as usize;
+ }
+ GenericParamDefKind::Const { has_default } => {
+ own_defaults.consts += has_default as usize;
+ }
+ }
+ }
+
+ own_defaults
+ }
+
+ pub fn requires_monomorphization(&self, tcx: TyCtxt<'tcx>) -> bool {
+ if self.own_requires_monomorphization() {
+ return true;
+ }
+
+ if let Some(parent_def_id) = self.parent {
+ let parent = tcx.generics_of(parent_def_id);
+ parent.requires_monomorphization(tcx)
+ } else {
+ false
+ }
+ }
+
+ pub fn own_requires_monomorphization(&self) -> bool {
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
+ return true;
+ }
+ GenericParamDefKind::Lifetime => {}
+ }
+ }
+ false
+ }
+
+ /// Returns the `GenericParamDef` with the given index.
+ pub fn param_at(&'tcx self, param_index: usize, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
+ if let Some(index) = param_index.checked_sub(self.parent_count) {
+ &self.params[index]
+ } else {
+ tcx.generics_of(self.parent.expect("parent_count > 0 but no parent?"))
+ .param_at(param_index, tcx)
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `EarlyBoundRegion`.
+ pub fn region_param(
+ &'tcx self,
+ param: &EarlyBoundRegion,
+ tcx: TyCtxt<'tcx>,
+ ) -> &'tcx GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Lifetime => param,
+ _ => bug!("expected lifetime parameter, but found another generic parameter"),
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `ParamTy`.
+ pub fn type_param(&'tcx self, param: &ParamTy, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Type { .. } => param,
+ _ => bug!("expected type parameter, but found another generic parameter"),
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `ParamConst`.
+ pub fn const_param(&'tcx self, param: &ParamConst, tcx: TyCtxt<'tcx>) -> &GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Const { .. } => param,
+ _ => bug!("expected const parameter, but found another generic parameter"),
+ }
+ }
+
+ /// Returns `true` if `params` has `impl Trait`.
+ pub fn has_impl_trait(&'tcx self) -> bool {
+ self.params.iter().any(|param| {
+ matches!(param.kind, ty::GenericParamDefKind::Type { synthetic: true, .. })
+ })
+ }
+
+ /// Returns the substs corresponding to the generic parameters
+ /// of this item, excluding `Self`.
+ ///
+ /// **This should only be used for diagnostics purposes.**
+ pub fn own_substs_no_defaults(
+ &'tcx self,
+ tcx: TyCtxt<'tcx>,
+ substs: &'tcx [ty::GenericArg<'tcx>],
+ ) -> &'tcx [ty::GenericArg<'tcx>] {
+ let mut own_params = self.parent_count..self.count();
+ if self.has_self && self.parent.is_none() {
+ own_params.start = 1;
+ }
+
+ // Filter the default arguments.
+ //
+ // This currently uses structural equality instead
+ // of semantic equivalance. While not ideal, that's
+ // good enough for now as this should only be used
+ // for diagnostics anyways.
+ own_params.end -= self
+ .params
+ .iter()
+ .rev()
+ .take_while(|param| {
+ param.default_value(tcx).map_or(false, |default| {
+ default.subst(tcx, substs) == substs[param.index as usize]
+ })
+ })
+ .count();
+
+ &substs[own_params]
+ }
+
+ /// Returns the substs corresponding to the generic parameters of this item, excluding `Self`.
+ ///
+ /// **This should only be used for diagnostics purposes.**
+ pub fn own_substs(
+ &'tcx self,
+ substs: &'tcx [ty::GenericArg<'tcx>],
+ ) -> &'tcx [ty::GenericArg<'tcx>] {
+ let own = &substs[self.parent_count..][..self.params.len()];
+ if self.has_self && self.parent.is_none() { &own[1..] } else { &own }
+ }
+}
+
+/// Bounds on generics.
+#[derive(Copy, Clone, Default, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct GenericPredicates<'tcx> {
+ pub parent: Option<DefId>,
+ pub predicates: &'tcx [(Predicate<'tcx>, Span)],
+}
+
+impl<'tcx> GenericPredicates<'tcx> {
+ pub fn instantiate(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> InstantiatedPredicates<'tcx> {
+ let mut instantiated = InstantiatedPredicates::empty();
+ self.instantiate_into(tcx, &mut instantiated, substs);
+ instantiated
+ }
+
+ pub fn instantiate_own(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> InstantiatedPredicates<'tcx> {
+ InstantiatedPredicates {
+ predicates: self
+ .predicates
+ .iter()
+ .map(|(p, _)| EarlyBinder(*p).subst(tcx, substs))
+ .collect(),
+ spans: self.predicates.iter().map(|(_, sp)| *sp).collect(),
+ }
+ }
+
+ fn instantiate_into(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ instantiated: &mut InstantiatedPredicates<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) {
+ if let Some(def_id) = self.parent {
+ tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, substs);
+ }
+ instantiated
+ .predicates
+ .extend(self.predicates.iter().map(|(p, _)| EarlyBinder(*p).subst(tcx, substs)));
+ instantiated.spans.extend(self.predicates.iter().map(|(_, sp)| *sp));
+ }
+
+ pub fn instantiate_identity(&self, tcx: TyCtxt<'tcx>) -> InstantiatedPredicates<'tcx> {
+ let mut instantiated = InstantiatedPredicates::empty();
+ self.instantiate_identity_into(tcx, &mut instantiated);
+ instantiated
+ }
+
+ fn instantiate_identity_into(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ instantiated: &mut InstantiatedPredicates<'tcx>,
+ ) {
+ if let Some(def_id) = self.parent {
+ tcx.predicates_of(def_id).instantiate_identity_into(tcx, instantiated);
+ }
+ instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p));
+ instantiated.spans.extend(self.predicates.iter().map(|(_, s)| s));
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/impls_ty.rs b/compiler/rustc_middle/src/ty/impls_ty.rs
new file mode 100644
index 000000000..cd00b26b8
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/impls_ty.rs
@@ -0,0 +1,135 @@
+//! This module contains `HashStable` implementations for various data types
+//! from `rustc_middle::ty` in no particular order.
+
+use crate::middle::region;
+use crate::mir;
+use crate::ty;
+use crate::ty::fast_reject::SimplifiedType;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::HashingControls;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use rustc_query_system::ich::StableHashingContext;
+use std::cell::RefCell;
+
+impl<'a, 'tcx, T> HashStable<StableHashingContext<'a>> for &'tcx ty::List<T>
+where
+ T: HashStable<StableHashingContext<'a>>,
+{
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ thread_local! {
+ static CACHE: RefCell<FxHashMap<(usize, usize, HashingControls), Fingerprint>> =
+ RefCell::new(Default::default());
+ }
+
+ let hash = CACHE.with(|cache| {
+ let key = (self.as_ptr() as usize, self.len(), hcx.hashing_controls());
+ if let Some(&hash) = cache.borrow().get(&key) {
+ return hash;
+ }
+
+ let mut hasher = StableHasher::new();
+ (&self[..]).hash_stable(hcx, &mut hasher);
+
+ let hash: Fingerprint = hasher.finish();
+ cache.borrow_mut().insert(key, hash);
+ hash
+ });
+
+ hash.hash_stable(hcx, hasher);
+ }
+}
+
+impl<'a, 'tcx, T> ToStableHashKey<StableHashingContext<'a>> for &'tcx ty::List<T>
+where
+ T: HashStable<StableHashingContext<'a>>,
+{
+ type KeyType = Fingerprint;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Fingerprint {
+ let mut hasher = StableHasher::new();
+ let mut hcx: StableHashingContext<'a> = hcx.clone();
+ self.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish()
+ }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for SimplifiedType {
+ type KeyType = Fingerprint;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Fingerprint {
+ let mut hasher = StableHasher::new();
+ let mut hcx: StableHashingContext<'a> = hcx.clone();
+ self.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish()
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ty::subst::GenericArg<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ self.unpack().hash_stable(hcx, hasher);
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ty::subst::GenericArgKind<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ match self {
+ // WARNING: We dedup cache the `HashStable` results for `List`
+ // while ignoring types and freely transmute
+ // between `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>`.
+ // See `fn intern_type_list` for more details.
+ //
+ // We therefore hash types without adding a hash for their discriminant.
+ //
+ // In order to make it very unlikely for the sequence of bytes being hashed for
+ // a `GenericArgKind::Type` to be the same as the sequence of bytes being
+ // hashed for one of the other variants, we hash some very high number instead
+ // of their actual discriminant since `TyKind` should never start with anything
+ // that high.
+ ty::subst::GenericArgKind::Type(ty) => ty.hash_stable(hcx, hasher),
+ ty::subst::GenericArgKind::Const(ct) => {
+ 0xF3u8.hash_stable(hcx, hasher);
+ ct.hash_stable(hcx, hasher);
+ }
+ ty::subst::GenericArgKind::Lifetime(lt) => {
+ 0xF5u8.hash_stable(hcx, hasher);
+ lt.hash_stable(hcx, hasher);
+ }
+ }
+ }
+}
+
+// AllocIds get resolved to whatever they point to (to be stable)
+impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ ty::tls::with_opt(|tcx| {
+ trace!("hashing {:?}", *self);
+ let tcx = tcx.expect("can't hash AllocIds during hir lowering");
+ tcx.try_get_global_alloc(*self).hash_stable(hcx, hasher);
+ });
+ }
+}
+
+// `Relocations` with default type parameters is a sorted map.
+impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Prov>
+where
+ Prov: HashStable<StableHashingContext<'a>>,
+{
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ self.len().hash_stable(hcx, hasher);
+ for reloc in self.iter() {
+ reloc.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for region::Scope {
+ type KeyType = region::Scope;
+
+ #[inline]
+ fn to_stable_hash_key(&self, _: &StableHashingContext<'a>) -> region::Scope {
+ *self
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
new file mode 100644
index 000000000..c4ad698ba
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
@@ -0,0 +1,145 @@
+use crate::ty::context::TyCtxt;
+use crate::ty::{DefId, DefIdTree};
+use rustc_span::def_id::CRATE_DEF_ID;
+use smallvec::SmallVec;
+use std::mem;
+
+use DefIdForest::*;
+
+/// Represents a forest of `DefId`s closed under the ancestor relation. That is,
+/// if a `DefId` representing a module is contained in the forest then all
+/// `DefId`s defined in that module or submodules are also implicitly contained
+/// in the forest.
+///
+/// This is used to represent a set of modules in which a type is visibly
+/// uninhabited.
+///
+/// We store the minimal set of `DefId`s required to represent the whole set. If A and B are
+/// `DefId`s in the `DefIdForest`, and A is a parent of B, then only A will be stored. When this is
+/// used with `type_uninhabited_from`, there will very rarely be more than one `DefId` stored.
+#[derive(Copy, Clone, HashStable, Debug)]
+pub enum DefIdForest<'a> {
+ Empty,
+ Single(DefId),
+ /// This variant is very rare.
+ /// Invariant: >1 elements
+ Multiple(&'a [DefId]),
+}
+
+/// Tests whether a slice of roots contains a given DefId.
+#[inline]
+fn slice_contains<'tcx>(tcx: TyCtxt<'tcx>, slice: &[DefId], id: DefId) -> bool {
+ slice.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
+}
+
+impl<'tcx> DefIdForest<'tcx> {
+ /// Creates an empty forest.
+ pub fn empty() -> DefIdForest<'tcx> {
+ DefIdForest::Empty
+ }
+
+ /// Creates a forest consisting of a single tree representing the entire
+ /// crate.
+ #[inline]
+ pub fn full() -> DefIdForest<'tcx> {
+ DefIdForest::from_id(CRATE_DEF_ID.to_def_id())
+ }
+
+ /// Creates a forest containing a `DefId` and all its descendants.
+ pub fn from_id(id: DefId) -> DefIdForest<'tcx> {
+ DefIdForest::Single(id)
+ }
+
+ fn as_slice(&self) -> &[DefId] {
+ match self {
+ Empty => &[],
+ Single(id) => std::slice::from_ref(id),
+ Multiple(root_ids) => root_ids,
+ }
+ }
+
+ // Only allocates in the rare `Multiple` case.
+ fn from_vec(tcx: TyCtxt<'tcx>, root_ids: SmallVec<[DefId; 1]>) -> DefIdForest<'tcx> {
+ match &root_ids[..] {
+ [] => Empty,
+ [id] => Single(*id),
+ _ => DefIdForest::Multiple(tcx.arena.alloc_from_iter(root_ids)),
+ }
+ }
+
+ /// Tests whether the forest is empty.
+ pub fn is_empty(&self) -> bool {
+ match self {
+ Empty => true,
+ Single(..) | Multiple(..) => false,
+ }
+ }
+
+ /// Iterate over the set of roots.
+ fn iter(&self) -> impl Iterator<Item = DefId> + '_ {
+ self.as_slice().iter().copied()
+ }
+
+ /// Tests whether the forest contains a given DefId.
+ pub fn contains(&self, tcx: TyCtxt<'tcx>, id: DefId) -> bool {
+ slice_contains(tcx, self.as_slice(), id)
+ }
+
+ /// Calculate the intersection of a collection of forests.
+ pub fn intersection<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx>
+ where
+ I: IntoIterator<Item = DefIdForest<'tcx>>,
+ {
+ let mut iter = iter.into_iter();
+ let mut ret: SmallVec<[_; 1]> = if let Some(first) = iter.next() {
+ SmallVec::from_slice(first.as_slice())
+ } else {
+ return DefIdForest::full();
+ };
+
+ let mut next_ret: SmallVec<[_; 1]> = SmallVec::new();
+ for next_forest in iter {
+ // No need to continue if the intersection is already empty.
+ if ret.is_empty() || next_forest.is_empty() {
+ return DefIdForest::empty();
+ }
+
+ // We keep the elements in `ret` that are also in `next_forest`.
+ next_ret.extend(ret.iter().copied().filter(|&id| next_forest.contains(tcx, id)));
+ // We keep the elements in `next_forest` that are also in `ret`.
+ next_ret.extend(next_forest.iter().filter(|&id| slice_contains(tcx, &ret, id)));
+
+ mem::swap(&mut next_ret, &mut ret);
+ next_ret.clear();
+ }
+ DefIdForest::from_vec(tcx, ret)
+ }
+
+ /// Calculate the union of a collection of forests.
+ pub fn union<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx>
+ where
+ I: IntoIterator<Item = DefIdForest<'tcx>>,
+ {
+ let mut ret: SmallVec<[_; 1]> = SmallVec::new();
+ let mut next_ret: SmallVec<[_; 1]> = SmallVec::new();
+ for next_forest in iter {
+ // Union with the empty set is a no-op.
+ if next_forest.is_empty() {
+ continue;
+ }
+
+ // We add everything in `ret` that is not in `next_forest`.
+ next_ret.extend(ret.iter().copied().filter(|&id| !next_forest.contains(tcx, id)));
+ // We add everything in `next_forest` that we haven't added yet.
+ for id in next_forest.iter() {
+ if !slice_contains(tcx, &next_ret, id) {
+ next_ret.push(id);
+ }
+ }
+
+ mem::swap(&mut next_ret, &mut ret);
+ next_ret.clear();
+ }
+ DefIdForest::from_vec(tcx, ret)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
new file mode 100644
index 000000000..3d22f5a04
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
@@ -0,0 +1,234 @@
+pub use self::def_id_forest::DefIdForest;
+
+use crate::ty;
+use crate::ty::context::TyCtxt;
+use crate::ty::{AdtDef, FieldDef, Ty, VariantDef};
+use crate::ty::{AdtKind, Visibility};
+use crate::ty::{DefId, SubstsRef};
+
+use rustc_type_ir::sty::TyKind::*;
+
+mod def_id_forest;
+
+// The methods in this module calculate `DefIdForest`s of modules in which an
+// `AdtDef`/`VariantDef`/`FieldDef` is visibly uninhabited.
+//
+// # Example
+// ```rust
+// enum Void {}
+// mod a {
+// pub mod b {
+// pub struct SecretlyUninhabited {
+// _priv: !,
+// }
+// }
+// }
+//
+// mod c {
+// pub struct AlsoSecretlyUninhabited {
+// _priv: Void,
+// }
+// mod d {
+// }
+// }
+//
+// struct Foo {
+// x: a::b::SecretlyUninhabited,
+// y: c::AlsoSecretlyUninhabited,
+// }
+// ```
+// In this code, the type `Foo` will only be visibly uninhabited inside the
+// modules `b`, `c` and `d`. Calling `uninhabited_from` on `Foo` or its `AdtDef` will
+// return the forest of modules {`b`, `c`->`d`} (represented in a `DefIdForest` by the
+// set {`b`, `c`}).
+//
+// We need this information for pattern-matching on `Foo` or types that contain
+// `Foo`.
+//
+// # Example
+// ```rust
+// let foo_result: Result<T, Foo> = ... ;
+// let Ok(t) = foo_result;
+// ```
+// This code should only compile in modules where the uninhabitedness of `Foo` is
+// visible.
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Checks whether a type is visibly uninhabited from a particular module.
+ ///
+ /// # Example
+ /// ```
+ /// #![feature(never_type)]
+ /// # fn main() {}
+ /// enum Void {}
+ /// mod a {
+ /// pub mod b {
+ /// pub struct SecretlyUninhabited {
+ /// _priv: !,
+ /// }
+ /// }
+ /// }
+ ///
+ /// mod c {
+ /// use super::Void;
+ /// pub struct AlsoSecretlyUninhabited {
+ /// _priv: Void,
+ /// }
+ /// mod d {
+ /// }
+ /// }
+ ///
+ /// struct Foo {
+ /// x: a::b::SecretlyUninhabited,
+ /// y: c::AlsoSecretlyUninhabited,
+ /// }
+ /// ```
+ /// In this code, the type `Foo` will only be visibly uninhabited inside the
+ /// modules b, c and d. This effects pattern-matching on `Foo` or types that
+ /// contain `Foo`.
+ ///
+ /// # Example
+ /// ```ignore (illustrative)
+ /// let foo_result: Result<T, Foo> = ... ;
+ /// let Ok(t) = foo_result;
+ /// ```
+ /// This code should only compile in modules where the uninhabitedness of Foo is
+ /// visible.
+ pub fn is_ty_uninhabited_from(
+ self,
+ module: DefId,
+ ty: Ty<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> bool {
+ // To check whether this type is uninhabited at all (not just from the
+ // given node), you could check whether the forest is empty.
+ // ```
+ // forest.is_empty()
+ // ```
+ ty.uninhabited_from(self, param_env).contains(self, module)
+ }
+}
+
+impl<'tcx> AdtDef<'tcx> {
+ /// Calculates the forest of `DefId`s from which this ADT is visibly uninhabited.
+ fn uninhabited_from(
+ self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> DefIdForest<'tcx> {
+ // Non-exhaustive ADTs from other crates are always considered inhabited.
+ if self.is_variant_list_non_exhaustive() && !self.did().is_local() {
+ DefIdForest::empty()
+ } else {
+ DefIdForest::intersection(
+ tcx,
+ self.variants()
+ .iter()
+ .map(|v| v.uninhabited_from(tcx, substs, self.adt_kind(), param_env)),
+ )
+ }
+ }
+}
+
+impl<'tcx> VariantDef {
+ /// Calculates the forest of `DefId`s from which this variant is visibly uninhabited.
+ pub fn uninhabited_from(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ adt_kind: AdtKind,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> DefIdForest<'tcx> {
+ let is_enum = match adt_kind {
+ // For now, `union`s are never considered uninhabited.
+ // The precise semantics of inhabitedness with respect to unions is currently undecided.
+ AdtKind::Union => return DefIdForest::empty(),
+ AdtKind::Enum => true,
+ AdtKind::Struct => false,
+ };
+ // Non-exhaustive variants from other crates are always considered inhabited.
+ if self.is_field_list_non_exhaustive() && !self.def_id.is_local() {
+ DefIdForest::empty()
+ } else {
+ DefIdForest::union(
+ tcx,
+ self.fields.iter().map(|f| f.uninhabited_from(tcx, substs, is_enum, param_env)),
+ )
+ }
+ }
+}
+
+impl<'tcx> FieldDef {
+ /// Calculates the forest of `DefId`s from which this field is visibly uninhabited.
+ fn uninhabited_from(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ is_enum: bool,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> DefIdForest<'tcx> {
+ let data_uninhabitedness = move || self.ty(tcx, substs).uninhabited_from(tcx, param_env);
+ // FIXME(canndrew): Currently enum fields are (incorrectly) stored with
+ // `Visibility::Invisible` so we need to override `self.vis` if we're
+ // dealing with an enum.
+ if is_enum {
+ data_uninhabitedness()
+ } else {
+ match self.vis {
+ Visibility::Invisible => DefIdForest::empty(),
+ Visibility::Restricted(from) => {
+ let forest = DefIdForest::from_id(from);
+ let iter = Some(forest).into_iter().chain(Some(data_uninhabitedness()));
+ DefIdForest::intersection(tcx, iter)
+ }
+ Visibility::Public => data_uninhabitedness(),
+ }
+ }
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ /// Calculates the forest of `DefId`s from which this type is visibly uninhabited.
+ fn uninhabited_from(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> DefIdForest<'tcx> {
+ tcx.type_uninhabited_from(param_env.and(self))
+ }
+}
+
+// Query provider for `type_uninhabited_from`.
+pub(crate) fn type_uninhabited_from<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> DefIdForest<'tcx> {
+ let ty = key.value;
+ let param_env = key.param_env;
+ match *ty.kind() {
+ Adt(def, substs) => def.uninhabited_from(tcx, substs, param_env),
+
+ Never => DefIdForest::full(),
+
+ Tuple(ref tys) => {
+ DefIdForest::union(tcx, tys.iter().map(|ty| ty.uninhabited_from(tcx, param_env)))
+ }
+
+ Array(ty, len) => match len.try_eval_usize(tcx, param_env) {
+ Some(0) | None => DefIdForest::empty(),
+ // If the array is definitely non-empty, it's uninhabited if
+ // the type of its elements is uninhabited.
+ Some(1..) => ty.uninhabited_from(tcx, param_env),
+ },
+
+ // References to uninitialised memory are valid for any type, including
+ // uninhabited types, in unsafe code, so we treat all references as
+ // inhabited.
+ // The precise semantics of inhabitedness with respect to references is currently
+ // undecided.
+ Ref(..) => DefIdForest::empty(),
+
+ _ => DefIdForest::empty(),
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
new file mode 100644
index 000000000..53218225d
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -0,0 +1,746 @@
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::subst::{InternalSubsts, Subst};
+use crate::ty::{
+ self, EarlyBinder, SubstsRef, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable,
+};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def::Namespace;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_macros::HashStable;
+use rustc_middle::ty::normalize_erasing_regions::NormalizationError;
+use rustc_span::Symbol;
+
+use std::fmt;
+
+/// A monomorphized `InstanceDef`.
+///
+/// Monomorphization happens on-the-fly and no monomorphized MIR is ever created. Instead, this type
+/// simply couples a potentially generic `InstanceDef` with some substs, and codegen and const eval
+/// will do all required substitution as they run.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, Lift)]
+pub struct Instance<'tcx> {
+ pub def: InstanceDef<'tcx>,
+ pub substs: SubstsRef<'tcx>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub enum InstanceDef<'tcx> {
+ /// A user-defined callable item.
+ ///
+ /// This includes:
+ /// - `fn` items
+ /// - closures
+ /// - generators
+ Item(ty::WithOptConstParam<DefId>),
+
+ /// An intrinsic `fn` item (with `"rust-intrinsic"` or `"platform-intrinsic"` ABI).
+ ///
+ /// Alongside `Virtual`, this is the only `InstanceDef` that does not have its own callable MIR.
+ /// Instead, codegen and const eval "magically" evaluate calls to intrinsics purely in the
+ /// caller.
+ Intrinsic(DefId),
+
+ /// `<T as Trait>::method` where `method` receives unsizeable `self: Self` (part of the
+ /// `unsized_locals` feature).
+ ///
+ /// The generated shim will take `Self` via `*mut Self` - conceptually this is `&owned Self` -
+ /// and dereference the argument to call the original function.
+ VTableShim(DefId),
+
+ /// `fn()` pointer where the function itself cannot be turned into a pointer.
+ ///
+ /// One example is `<dyn Trait as Trait>::fn`, where the shim contains
+ /// a virtual call, which codegen supports only via a direct call to the
+ /// `<dyn Trait as Trait>::fn` instance (an `InstanceDef::Virtual`).
+ ///
+ /// Another example is functions annotated with `#[track_caller]`, which
+ /// must have their implicit caller location argument populated for a call.
+ /// Because this is a required part of the function's ABI but can't be tracked
+ /// as a property of the function pointer, we use a single "caller location"
+ /// (the definition of the function itself).
+ ReifyShim(DefId),
+
+ /// `<fn() as FnTrait>::call_*` (generated `FnTrait` implementation for `fn()` pointers).
+ ///
+ /// `DefId` is `FnTrait::call_*`.
+ FnPtrShim(DefId, Ty<'tcx>),
+
+ /// Dynamic dispatch to `<dyn Trait as Trait>::fn`.
+ ///
+ /// This `InstanceDef` does not have callable MIR. Calls to `Virtual` instances must be
+ /// codegen'd as virtual calls through the vtable.
+ ///
+ /// If this is reified to a `fn` pointer, a `ReifyShim` is used (see `ReifyShim` above for more
+ /// details on that).
+ Virtual(DefId, usize),
+
+ /// `<[FnMut closure] as FnOnce>::call_once`.
+ ///
+ /// The `DefId` is the ID of the `call_once` method in `FnOnce`.
+ ClosureOnceShim { call_once: DefId, track_caller: bool },
+
+ /// `core::ptr::drop_in_place::<T>`.
+ ///
+ /// The `DefId` is for `core::ptr::drop_in_place`.
+ /// The `Option<Ty<'tcx>>` is either `Some(T)`, or `None` for empty drop
+ /// glue.
+ DropGlue(DefId, Option<Ty<'tcx>>),
+
+ /// Compiler-generated `<T as Clone>::clone` implementation.
+ ///
+ /// For all types that automatically implement `Copy`, a trivial `Clone` impl is provided too.
+ /// Additionally, arrays, tuples, and closures get a `Clone` shim even if they aren't `Copy`.
+ ///
+ /// The `DefId` is for `Clone::clone`, the `Ty` is the type `T` with the builtin `Clone` impl.
+ CloneShim(DefId, Ty<'tcx>),
+}
+
+impl<'tcx> Instance<'tcx> {
+ /// Returns the `Ty` corresponding to this `Instance`, with generic substitutions applied and
+ /// lifetimes erased, allowing a `ParamEnv` to be specified for use during normalization.
+ pub fn ty(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Ty<'tcx> {
+ let ty = tcx.type_of(self.def.def_id());
+ tcx.subst_and_normalize_erasing_regions(self.substs, param_env, ty)
+ }
+
+ /// Finds a crate that contains a monomorphization of this instance that
+ /// can be linked to from the local crate. A return value of `None` means
+ /// no upstream crate provides such an exported monomorphization.
+ ///
+ /// This method already takes into account the global `-Zshare-generics`
+ /// setting, always returning `None` if `share-generics` is off.
+ pub fn upstream_monomorphization(&self, tcx: TyCtxt<'tcx>) -> Option<CrateNum> {
+ // If we are not in share generics mode, we don't link to upstream
+ // monomorphizations but always instantiate our own internal versions
+ // instead.
+ if !tcx.sess.opts.share_generics() {
+ return None;
+ }
+
+ // If this is an item that is defined in the local crate, no upstream
+ // crate can know about it/provide a monomorphization.
+ if self.def_id().is_local() {
+ return None;
+ }
+
+ // If this a non-generic instance, it cannot be a shared monomorphization.
+ self.substs.non_erasable_generics().next()?;
+
+ match self.def {
+ InstanceDef::Item(def) => tcx
+ .upstream_monomorphizations_for(def.did)
+ .and_then(|monos| monos.get(&self.substs).cloned()),
+ InstanceDef::DropGlue(_, Some(_)) => tcx.upstream_drop_glue_for(self.substs),
+ _ => None,
+ }
+ }
+}
+
+impl<'tcx> InstanceDef<'tcx> {
+ #[inline]
+ pub fn def_id(self) -> DefId {
+ match self {
+ InstanceDef::Item(def) => def.did,
+ InstanceDef::VTableShim(def_id)
+ | InstanceDef::ReifyShim(def_id)
+ | InstanceDef::FnPtrShim(def_id, _)
+ | InstanceDef::Virtual(def_id, _)
+ | InstanceDef::Intrinsic(def_id)
+ | InstanceDef::ClosureOnceShim { call_once: def_id, track_caller: _ }
+ | InstanceDef::DropGlue(def_id, _)
+ | InstanceDef::CloneShim(def_id, _) => def_id,
+ }
+ }
+
+ /// Returns the `DefId` of instances which might not require codegen locally.
+ pub fn def_id_if_not_guaranteed_local_codegen(self) -> Option<DefId> {
+ match self {
+ ty::InstanceDef::Item(def) => Some(def.did),
+ ty::InstanceDef::DropGlue(def_id, Some(_)) => Some(def_id),
+ InstanceDef::VTableShim(..)
+ | InstanceDef::ReifyShim(..)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::Virtual(..)
+ | InstanceDef::Intrinsic(..)
+ | InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::DropGlue(..)
+ | InstanceDef::CloneShim(..) => None,
+ }
+ }
+
+ #[inline]
+ pub fn with_opt_param(self) -> ty::WithOptConstParam<DefId> {
+ match self {
+ InstanceDef::Item(def) => def,
+ InstanceDef::VTableShim(def_id)
+ | InstanceDef::ReifyShim(def_id)
+ | InstanceDef::FnPtrShim(def_id, _)
+ | InstanceDef::Virtual(def_id, _)
+ | InstanceDef::Intrinsic(def_id)
+ | InstanceDef::ClosureOnceShim { call_once: def_id, track_caller: _ }
+ | InstanceDef::DropGlue(def_id, _)
+ | InstanceDef::CloneShim(def_id, _) => ty::WithOptConstParam::unknown(def_id),
+ }
+ }
+
+ #[inline]
+ pub fn get_attrs(&self, tcx: TyCtxt<'tcx>, attr: Symbol) -> ty::Attributes<'tcx> {
+ tcx.get_attrs(self.def_id(), attr)
+ }
+
+ /// Returns `true` if the LLVM version of this instance is unconditionally
+ /// marked with `inline`. This implies that a copy of this instance is
+ /// generated in every codegen unit.
+ /// Note that this is only a hint. See the documentation for
+ /// `generates_cgu_internal_copy` for more information.
+ pub fn requires_inline(&self, tcx: TyCtxt<'tcx>) -> bool {
+ use rustc_hir::definitions::DefPathData;
+ let def_id = match *self {
+ ty::InstanceDef::Item(def) => def.did,
+ ty::InstanceDef::DropGlue(_, Some(_)) => return false,
+ _ => return true,
+ };
+ matches!(
+ tcx.def_key(def_id).disambiguated_data.data,
+ DefPathData::Ctor | DefPathData::ClosureExpr
+ )
+ }
+
+ /// Returns `true` if the machine code for this instance is instantiated in
+ /// each codegen unit that references it.
+ /// Note that this is only a hint! The compiler can globally decide to *not*
+ /// do this in order to speed up compilation. CGU-internal copies are
+ /// only exist to enable inlining. If inlining is not performed (e.g. at
+ /// `-Copt-level=0`) then the time for generating them is wasted and it's
+ /// better to create a single copy with external linkage.
+ pub fn generates_cgu_internal_copy(&self, tcx: TyCtxt<'tcx>) -> bool {
+ if self.requires_inline(tcx) {
+ return true;
+ }
+ if let ty::InstanceDef::DropGlue(.., Some(ty)) = *self {
+ // Drop glue generally wants to be instantiated at every codegen
+ // unit, but without an #[inline] hint. We should make this
+ // available to normal end-users.
+ if tcx.sess.opts.incremental.is_none() {
+ return true;
+ }
+ // When compiling with incremental, we can generate a *lot* of
+ // codegen units. Including drop glue into all of them has a
+ // considerable compile time cost.
+ //
+ // We include enums without destructors to allow, say, optimizing
+ // drops of `Option::None` before LTO. We also respect the intent of
+ // `#[inline]` on `Drop::drop` implementations.
+ return ty.ty_adt_def().map_or(true, |adt_def| {
+ adt_def.destructor(tcx).map_or_else(
+ || adt_def.is_enum(),
+ |dtor| tcx.codegen_fn_attrs(dtor.did).requests_inline(),
+ )
+ });
+ }
+ tcx.codegen_fn_attrs(self.def_id()).requests_inline()
+ }
+
+ pub fn requires_caller_location(&self, tcx: TyCtxt<'_>) -> bool {
+ match *self {
+ InstanceDef::Item(ty::WithOptConstParam { did: def_id, .. })
+ | InstanceDef::Virtual(def_id, _) => {
+ tcx.body_codegen_attrs(def_id).flags.contains(CodegenFnAttrFlags::TRACK_CALLER)
+ }
+ InstanceDef::ClosureOnceShim { call_once: _, track_caller } => track_caller,
+ _ => false,
+ }
+ }
+
+ /// Returns `true` when the MIR body associated with this instance should be monomorphized
+ /// by its users (e.g. codegen or miri) by substituting the `substs` from `Instance` (see
+ /// `Instance::substs_for_mir_body`).
+ ///
+ /// Otherwise, returns `false` only for some kinds of shims where the construction of the MIR
+ /// body should perform necessary substitutions.
+ pub fn has_polymorphic_mir_body(&self) -> bool {
+ match *self {
+ InstanceDef::CloneShim(..)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::DropGlue(_, Some(_)) => false,
+ InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::DropGlue(..)
+ | InstanceDef::Item(_)
+ | InstanceDef::Intrinsic(..)
+ | InstanceDef::ReifyShim(..)
+ | InstanceDef::Virtual(..)
+ | InstanceDef::VTableShim(..) => true,
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for Instance<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ let substs = tcx.lift(self.substs).expect("could not lift for printing");
+ let s = FmtPrinter::new(tcx, Namespace::ValueNS)
+ .print_def_path(self.def_id(), substs)?
+ .into_buffer();
+ f.write_str(&s)
+ })?;
+
+ match self.def {
+ InstanceDef::Item(_) => Ok(()),
+ InstanceDef::VTableShim(_) => write!(f, " - shim(vtable)"),
+ InstanceDef::ReifyShim(_) => write!(f, " - shim(reify)"),
+ InstanceDef::Intrinsic(_) => write!(f, " - intrinsic"),
+ InstanceDef::Virtual(_, num) => write!(f, " - virtual#{}", num),
+ InstanceDef::FnPtrShim(_, ty) => write!(f, " - shim({})", ty),
+ InstanceDef::ClosureOnceShim { .. } => write!(f, " - shim"),
+ InstanceDef::DropGlue(_, None) => write!(f, " - shim(None)"),
+ InstanceDef::DropGlue(_, Some(ty)) => write!(f, " - shim(Some({}))", ty),
+ InstanceDef::CloneShim(_, ty) => write!(f, " - shim({})", ty),
+ }
+ }
+}
+
+impl<'tcx> Instance<'tcx> {
+ pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> Instance<'tcx> {
+ assert!(
+ !substs.has_escaping_bound_vars(),
+ "substs of instance {:?} not normalized for codegen: {:?}",
+ def_id,
+ substs
+ );
+ Instance { def: InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)), substs }
+ }
+
+ pub fn mono(tcx: TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
+ let substs = InternalSubsts::for_item(tcx, def_id, |param, _| match param.kind {
+ ty::GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
+ ty::GenericParamDefKind::Type { .. } => {
+ bug!("Instance::mono: {:?} has type parameters", def_id)
+ }
+ ty::GenericParamDefKind::Const { .. } => {
+ bug!("Instance::mono: {:?} has const parameters", def_id)
+ }
+ });
+
+ Instance::new(def_id, substs)
+ }
+
+ #[inline]
+ pub fn def_id(&self) -> DefId {
+ self.def.def_id()
+ }
+
+ /// Resolves a `(def_id, substs)` pair to an (optional) instance -- most commonly,
+ /// this is used to find the precise code that will run for a trait method invocation,
+ /// if known.
+ ///
+ /// Returns `Ok(None)` if we cannot resolve `Instance` to a specific instance.
+ /// For example, in a context like this,
+ ///
+ /// ```ignore (illustrative)
+ /// fn foo<T: Debug>(t: T) { ... }
+ /// ```
+ ///
+ /// trying to resolve `Debug::fmt` applied to `T` will yield `Ok(None)`, because we do not
+ /// know what code ought to run. (Note that this setting is also affected by the
+ /// `RevealMode` in the parameter environment.)
+ ///
+ /// Presuming that coherence and type-check have succeeded, if this method is invoked
+ /// in a monomorphic context (i.e., like during codegen), then it is guaranteed to return
+ /// `Ok(Some(instance))`.
+ ///
+ /// Returns `Err(ErrorGuaranteed)` when the `Instance` resolution process
+ /// couldn't complete due to errors elsewhere - this is distinct
+ /// from `Ok(None)` to avoid misleading diagnostics when an error
+ /// has already been/will be emitted, for the original cause
+ pub fn resolve(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
+ Instance::resolve_opt_const_arg(
+ tcx,
+ param_env,
+ ty::WithOptConstParam::unknown(def_id),
+ substs,
+ )
+ }
+
+ // This should be kept up to date with `resolve`.
+ pub fn resolve_opt_const_arg(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def: ty::WithOptConstParam<DefId>,
+ substs: SubstsRef<'tcx>,
+ ) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
+ // All regions in the result of this query are erased, so it's
+ // fine to erase all of the input regions.
+
+ // HACK(eddyb) erase regions in `substs` first, so that `param_env.and(...)`
+ // below is more likely to ignore the bounds in scope (e.g. if the only
+ // generic parameters mentioned by `substs` were lifetime ones).
+ let substs = tcx.erase_regions(substs);
+
+ // FIXME(eddyb) should this always use `param_env.with_reveal_all()`?
+ if let Some((did, param_did)) = def.as_const_arg() {
+ tcx.resolve_instance_of_const_arg(
+ tcx.erase_regions(param_env.and((did, param_did, substs))),
+ )
+ } else {
+ tcx.resolve_instance(tcx.erase_regions(param_env.and((def.did, substs))))
+ }
+ }
+
+ pub fn resolve_for_fn_ptr(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Option<Instance<'tcx>> {
+ debug!("resolve(def_id={:?}, substs={:?})", def_id, substs);
+ // Use either `resolve_closure` or `resolve_for_vtable`
+ assert!(!tcx.is_closure(def_id), "Called `resolve_for_fn_ptr` on closure: {:?}", def_id);
+ Instance::resolve(tcx, param_env, def_id, substs).ok().flatten().map(|mut resolved| {
+ match resolved.def {
+ InstanceDef::Item(def) if resolved.def.requires_caller_location(tcx) => {
+ debug!(" => fn pointer created for function with #[track_caller]");
+ resolved.def = InstanceDef::ReifyShim(def.did);
+ }
+ InstanceDef::Virtual(def_id, _) => {
+ debug!(" => fn pointer created for virtual call");
+ resolved.def = InstanceDef::ReifyShim(def_id);
+ }
+ _ => {}
+ }
+
+ resolved
+ })
+ }
+
+ pub fn resolve_for_vtable(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Option<Instance<'tcx>> {
+ debug!("resolve_for_vtable(def_id={:?}, substs={:?})", def_id, substs);
+ let fn_sig = tcx.fn_sig(def_id);
+ let is_vtable_shim = !fn_sig.inputs().skip_binder().is_empty()
+ && fn_sig.input(0).skip_binder().is_param(0)
+ && tcx.generics_of(def_id).has_self;
+ if is_vtable_shim {
+ debug!(" => associated item with unsizeable self: Self");
+ Some(Instance { def: InstanceDef::VTableShim(def_id), substs })
+ } else {
+ Instance::resolve(tcx, param_env, def_id, substs).ok().flatten().map(|mut resolved| {
+ match resolved.def {
+ InstanceDef::Item(def) => {
+ // We need to generate a shim when we cannot guarantee that
+ // the caller of a trait object method will be aware of
+ // `#[track_caller]` - this ensures that the caller
+ // and callee ABI will always match.
+ //
+ // The shim is generated when all of these conditions are met:
+ //
+ // 1) The underlying method expects a caller location parameter
+ // in the ABI
+ if resolved.def.requires_caller_location(tcx)
+ // 2) The caller location parameter comes from having `#[track_caller]`
+ // on the implementation, and *not* on the trait method.
+ && !tcx.should_inherit_track_caller(def.did)
+ // If the method implementation comes from the trait definition itself
+ // (e.g. `trait Foo { #[track_caller] my_fn() { /* impl */ } }`),
+ // then we don't need to generate a shim. This check is needed because
+ // `should_inherit_track_caller` returns `false` if our method
+ // implementation comes from the trait block, and not an impl block
+ && !matches!(
+ tcx.opt_associated_item(def.did),
+ Some(ty::AssocItem {
+ container: ty::AssocItemContainer::TraitContainer,
+ ..
+ })
+ )
+ {
+ if tcx.is_closure(def.did) {
+ debug!(" => vtable fn pointer created for closure with #[track_caller]: {:?} for method {:?} {:?}",
+ def.did, def_id, substs);
+
+ // Create a shim for the `FnOnce/FnMut/Fn` method we are calling
+ // - unlike functions, invoking a closure always goes through a
+ // trait.
+ resolved = Instance { def: InstanceDef::ReifyShim(def_id), substs };
+ } else {
+ debug!(
+ " => vtable fn pointer created for function with #[track_caller]: {:?}", def.did
+ );
+ resolved.def = InstanceDef::ReifyShim(def.did);
+ }
+ }
+ }
+ InstanceDef::Virtual(def_id, _) => {
+ debug!(" => vtable fn pointer created for virtual call");
+ resolved.def = InstanceDef::ReifyShim(def_id);
+ }
+ _ => {}
+ }
+
+ resolved
+ })
+ }
+ }
+
+ pub fn resolve_closure(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ substs: ty::SubstsRef<'tcx>,
+ requested_kind: ty::ClosureKind,
+ ) -> Option<Instance<'tcx>> {
+ let actual_kind = substs.as_closure().kind();
+
+ match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
+ Ok(true) => Instance::fn_once_adapter_instance(tcx, def_id, substs),
+ _ => Some(Instance::new(def_id, substs)),
+ }
+ }
+
+ pub fn resolve_drop_in_place(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ty::Instance<'tcx> {
+ let def_id = tcx.require_lang_item(LangItem::DropInPlace, None);
+ let substs = tcx.intern_substs(&[ty.into()]);
+ Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap().unwrap()
+ }
+
+ pub fn fn_once_adapter_instance(
+ tcx: TyCtxt<'tcx>,
+ closure_did: DefId,
+ substs: ty::SubstsRef<'tcx>,
+ ) -> Option<Instance<'tcx>> {
+ debug!("fn_once_adapter_shim({:?}, {:?})", closure_did, substs);
+ let fn_once = tcx.require_lang_item(LangItem::FnOnce, None);
+ let call_once = tcx
+ .associated_items(fn_once)
+ .in_definition_order()
+ .find(|it| it.kind == ty::AssocKind::Fn)
+ .unwrap()
+ .def_id;
+ let track_caller =
+ tcx.codegen_fn_attrs(closure_did).flags.contains(CodegenFnAttrFlags::TRACK_CALLER);
+ let def = ty::InstanceDef::ClosureOnceShim { call_once, track_caller };
+
+ let self_ty = tcx.mk_closure(closure_did, substs);
+
+ let sig = substs.as_closure().sig();
+ let sig =
+ tcx.try_normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig).ok()?;
+ assert_eq!(sig.inputs().len(), 1);
+ let substs = tcx.mk_substs_trait(self_ty, &[sig.inputs()[0].into()]);
+
+ debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
+ Some(Instance { def, substs })
+ }
+
+ /// Depending on the kind of `InstanceDef`, the MIR body associated with an
+ /// instance is expressed in terms of the generic parameters of `self.def_id()`, and in other
+ /// cases the MIR body is expressed in terms of the types found in the substitution array.
+ /// In the former case, we want to substitute those generic types and replace them with the
+ /// values from the substs when monomorphizing the function body. But in the latter case, we
+ /// don't want to do that substitution, since it has already been done effectively.
+ ///
+ /// This function returns `Some(substs)` in the former case and `None` otherwise -- i.e., if
+ /// this function returns `None`, then the MIR body does not require substitution during
+ /// codegen.
+ fn substs_for_mir_body(&self) -> Option<SubstsRef<'tcx>> {
+ if self.def.has_polymorphic_mir_body() { Some(self.substs) } else { None }
+ }
+
+ pub fn subst_mir<T>(&self, tcx: TyCtxt<'tcx>, v: &T) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ if let Some(substs) = self.substs_for_mir_body() {
+ EarlyBinder(*v).subst(tcx, substs)
+ } else {
+ *v
+ }
+ }
+
+ #[inline(always)]
+ pub fn subst_mir_and_normalize_erasing_regions<T>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ v: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx> + Clone,
+ {
+ if let Some(substs) = self.substs_for_mir_body() {
+ tcx.subst_and_normalize_erasing_regions(substs, param_env, v)
+ } else {
+ tcx.normalize_erasing_regions(param_env, v)
+ }
+ }
+
+ #[inline(always)]
+ pub fn try_subst_mir_and_normalize_erasing_regions<T>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ v: T,
+ ) -> Result<T, NormalizationError<'tcx>>
+ where
+ T: TypeFoldable<'tcx> + Clone,
+ {
+ if let Some(substs) = self.substs_for_mir_body() {
+ tcx.try_subst_and_normalize_erasing_regions(substs, param_env, v)
+ } else {
+ tcx.try_normalize_erasing_regions(param_env, v)
+ }
+ }
+
+ /// Returns a new `Instance` where generic parameters in `instance.substs` are replaced by
+ /// identity parameters if they are determined to be unused in `instance.def`.
+ pub fn polymorphize(self, tcx: TyCtxt<'tcx>) -> Self {
+ debug!("polymorphize: running polymorphization analysis");
+ if !tcx.sess.opts.unstable_opts.polymorphize {
+ return self;
+ }
+
+ let polymorphized_substs = polymorphize(tcx, self.def, self.substs);
+ debug!("polymorphize: self={:?} polymorphized_substs={:?}", self, polymorphized_substs);
+ Self { def: self.def, substs: polymorphized_substs }
+ }
+}
+
+fn polymorphize<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: ty::InstanceDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+) -> SubstsRef<'tcx> {
+ debug!("polymorphize({:?}, {:?})", instance, substs);
+ let unused = tcx.unused_generic_params(instance);
+ debug!("polymorphize: unused={:?}", unused);
+
+ // If this is a closure or generator then we need to handle the case where another closure
+ // from the function is captured as an upvar and hasn't been polymorphized. In this case,
+ // the unpolymorphized upvar closure would result in a polymorphized closure producing
+ // multiple mono items (and eventually symbol clashes).
+ let def_id = instance.def_id();
+ let upvars_ty = if tcx.is_closure(def_id) {
+ Some(substs.as_closure().tupled_upvars_ty())
+ } else if tcx.type_of(def_id).is_generator() {
+ Some(substs.as_generator().tupled_upvars_ty())
+ } else {
+ None
+ };
+ let has_upvars = upvars_ty.map_or(false, |ty| !ty.tuple_fields().is_empty());
+ debug!("polymorphize: upvars_ty={:?} has_upvars={:?}", upvars_ty, has_upvars);
+
+ struct PolymorphizationFolder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ }
+
+ impl<'tcx> ty::TypeFolder<'tcx> for PolymorphizationFolder<'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ debug!("fold_ty: ty={:?}", ty);
+ match *ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let polymorphized_substs = polymorphize(
+ self.tcx,
+ ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+ substs,
+ );
+ if substs == polymorphized_substs {
+ ty
+ } else {
+ self.tcx.mk_closure(def_id, polymorphized_substs)
+ }
+ }
+ ty::Generator(def_id, substs, movability) => {
+ let polymorphized_substs = polymorphize(
+ self.tcx,
+ ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+ substs,
+ );
+ if substs == polymorphized_substs {
+ ty
+ } else {
+ self.tcx.mk_generator(def_id, polymorphized_substs, movability)
+ }
+ }
+ _ => ty.super_fold_with(self),
+ }
+ }
+ }
+
+ InternalSubsts::for_item(tcx, def_id, |param, _| {
+ let is_unused = unused.contains(param.index).unwrap_or(false);
+ debug!("polymorphize: param={:?} is_unused={:?}", param, is_unused);
+ match param.kind {
+ // Upvar case: If parameter is a type parameter..
+ ty::GenericParamDefKind::Type { .. } if
+ // ..and has upvars..
+ has_upvars &&
+ // ..and this param has the same type as the tupled upvars..
+ upvars_ty == Some(substs[param.index as usize].expect_ty()) => {
+ // ..then double-check that polymorphization marked it used..
+ debug_assert!(!is_unused);
+ // ..and polymorphize any closures/generators captured as upvars.
+ let upvars_ty = upvars_ty.unwrap();
+ let polymorphized_upvars_ty = upvars_ty.fold_with(
+ &mut PolymorphizationFolder { tcx });
+ debug!("polymorphize: polymorphized_upvars_ty={:?}", polymorphized_upvars_ty);
+ ty::GenericArg::from(polymorphized_upvars_ty)
+ },
+
+ // Simple case: If parameter is a const or type parameter..
+ ty::GenericParamDefKind::Const { .. } | ty::GenericParamDefKind::Type { .. } if
+ // ..and is within range and unused..
+ unused.contains(param.index).unwrap_or(false) =>
+ // ..then use the identity for this parameter.
+ tcx.mk_param_from_def(param),
+
+ // Otherwise, use the parameter as before.
+ _ => substs[param.index as usize],
+ }
+ })
+}
+
+fn needs_fn_once_adapter_shim(
+ actual_closure_kind: ty::ClosureKind,
+ trait_closure_kind: ty::ClosureKind,
+) -> Result<bool, ()> {
+ match (actual_closure_kind, trait_closure_kind) {
+ (ty::ClosureKind::Fn, ty::ClosureKind::Fn)
+ | (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut)
+ | (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
+ // No adapter needed.
+ Ok(false)
+ }
+ (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
+ // The closure fn `llfn` is a `fn(&self, ...)`. We want a
+ // `fn(&mut self, ...)`. In fact, at codegen time, these are
+ // basically the same thing, so we can just return llfn.
+ Ok(false)
+ }
+ (ty::ClosureKind::Fn | ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
+ // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
+ // self, ...)`. We want a `fn(self, ...)`. We can produce
+ // this by doing something like:
+ //
+ // fn call_once(self, ...) { call_mut(&self, ...) }
+ // fn call_once(mut self, ...) { call_mut(&mut self, ...) }
+ //
+ // These are both the same at codegen time.
+ Ok(true)
+ }
+ (ty::ClosureKind::FnMut | ty::ClosureKind::FnOnce, _) => Err(()),
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
new file mode 100644
index 000000000..ad78d24e9
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -0,0 +1,3504 @@
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
+use crate::ty::normalize_erasing_regions::NormalizationError;
+use crate::ty::subst::Subst;
+use crate::ty::{self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable};
+use rustc_ast as ast;
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
+};
+use rustc_target::abi::*;
+use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
+
+use std::cmp;
+use std::fmt;
+use std::iter;
+use std::num::NonZeroUsize;
+use std::ops::Bound;
+
+use rand::{seq::SliceRandom, SeedableRng};
+use rand_xoshiro::Xoshiro128StarStar;
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers =
+ ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
+}
+
+pub trait IntegerExt {
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
+ fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
+ fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
+ fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
+ fn repr_discr<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ repr: &ReprOptions,
+ min: i128,
+ max: i128,
+ ) -> (Integer, bool);
+}
+
+impl IntegerExt for Integer {
+ #[inline]
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
+ match (*self, signed) {
+ (I8, false) => tcx.types.u8,
+ (I16, false) => tcx.types.u16,
+ (I32, false) => tcx.types.u32,
+ (I64, false) => tcx.types.u64,
+ (I128, false) => tcx.types.u128,
+ (I8, true) => tcx.types.i8,
+ (I16, true) => tcx.types.i16,
+ (I32, true) => tcx.types.i32,
+ (I64, true) => tcx.types.i64,
+ (I128, true) => tcx.types.i128,
+ }
+ }
+
+ /// Gets the Integer type from an attr::IntType.
+ fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
+ let dl = cx.data_layout();
+
+ match ity {
+ attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
+ attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
+ attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
+ attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
+ attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
+ attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
+ dl.ptr_sized_integer()
+ }
+ }
+ }
+
+ fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
+ match ity {
+ ty::IntTy::I8 => I8,
+ ty::IntTy::I16 => I16,
+ ty::IntTy::I32 => I32,
+ ty::IntTy::I64 => I64,
+ ty::IntTy::I128 => I128,
+ ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
+ }
+ }
+ fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
+ match ity {
+ ty::UintTy::U8 => I8,
+ ty::UintTy::U16 => I16,
+ ty::UintTy::U32 => I32,
+ ty::UintTy::U64 => I64,
+ ty::UintTy::U128 => I128,
+ ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
+ }
+ }
+
+ /// Finds the appropriate Integer type and signedness for the given
+ /// signed discriminant range and `#[repr]` attribute.
+ /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
+ /// that shouldn't affect anything, other than maybe debuginfo.
+ fn repr_discr<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ repr: &ReprOptions,
+ min: i128,
+ max: i128,
+ ) -> (Integer, bool) {
+ // Theoretically, negative values could be larger in unsigned representation
+ // than the unsigned representation of the signed minimum. However, if there
+ // are any negative values, the only valid unsigned representation is u128
+ // which can fit all i128 values, so the result remains unaffected.
+ let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
+ let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
+
+ if let Some(ity) = repr.int {
+ let discr = Integer::from_attr(&tcx, ity);
+ let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
+ if discr < fit {
+ bug!(
+ "Integer::repr_discr: `#[repr]` hint too small for \
+ discriminant range of enum `{}",
+ ty
+ )
+ }
+ return (discr, ity.is_signed());
+ }
+
+ let at_least = if repr.c() {
+ // This is usually I32, however it can be different on some platforms,
+ // notably hexagon and arm-none/thumb-none
+ tcx.data_layout().c_enum_min_size
+ } else {
+ // repr(Rust) enums try to be as small as possible
+ I8
+ };
+
+ // If there are no negative values, we can use the unsigned fit.
+ if min >= 0 {
+ (cmp::max(unsigned_fit, at_least), false)
+ } else {
+ (cmp::max(signed_fit, at_least), true)
+ }
+ }
+}
+
+pub trait PrimitiveExt {
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+ fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+}
+
+impl PrimitiveExt for Primitive {
+ #[inline]
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ Int(i, signed) => i.to_ty(tcx, signed),
+ F32 => tcx.types.f32,
+ F64 => tcx.types.f64,
+ Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
+ }
+ }
+
+ /// Return an *integer* type matching this primitive.
+ /// Useful in particular when dealing with enum discriminants.
+ #[inline]
+ fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ Int(i, signed) => i.to_ty(tcx, signed),
+ Pointer => tcx.types.usize,
+ F32 | F64 => bug!("floats do not have an int type"),
+ }
+ }
+}
+
+/// The first half of a fat pointer.
+///
+/// - For a trait object, this is the address of the box.
+/// - For a slice, this is the base address.
+pub const FAT_PTR_ADDR: usize = 0;
+
+/// The second half of a fat pointer.
+///
+/// - For a trait object, this is the address of the vtable.
+/// - For a slice, this is the length.
+pub const FAT_PTR_EXTRA: usize = 1;
+
+/// The maximum supported number of lanes in a SIMD vector.
+///
+/// This value is selected based on backend support:
+/// * LLVM does not appear to have a vector width limit.
+/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
+pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
+
+#[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
+pub enum LayoutError<'tcx> {
+ Unknown(Ty<'tcx>),
+ SizeOverflow(Ty<'tcx>),
+ NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
+}
+
+impl<'tcx> fmt::Display for LayoutError<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
+ LayoutError::SizeOverflow(ty) => {
+ write!(f, "values of the type `{}` are too big for the current architecture", ty)
+ }
+ LayoutError::NormalizationFailure(t, e) => write!(
+ f,
+ "unable to determine layout for `{}` because `{}` cannot be normalized",
+ t,
+ e.get_type_for_failure()
+ ),
+ }
+ }
+}
+
+/// Enforce some basic invariants on layouts.
+fn sanity_check_layout<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ layout: &TyAndLayout<'tcx>,
+) {
+ // Type-level uninhabitedness should always imply ABI uninhabitedness.
+ if tcx.conservative_is_privately_uninhabited(param_env.and(layout.ty)) {
+ assert!(layout.abi.is_uninhabited());
+ }
+
+ if layout.size.bytes() % layout.align.abi.bytes() != 0 {
+ bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
+ }
+
+ if cfg!(debug_assertions) {
+ fn check_layout_abi<'tcx>(tcx: TyCtxt<'tcx>, layout: Layout<'tcx>) {
+ match layout.abi() {
+ Abi::Scalar(scalar) => {
+ // No padding in scalars.
+ assert_eq!(
+ layout.align().abi,
+ scalar.align(&tcx).abi,
+ "alignment mismatch between ABI and layout in {layout:#?}"
+ );
+ assert_eq!(
+ layout.size(),
+ scalar.size(&tcx),
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ }
+ Abi::Vector { count, element } => {
+ // No padding in vectors. Alignment can be strengthened, though.
+ assert!(
+ layout.align().abi >= element.align(&tcx).abi,
+ "alignment mismatch between ABI and layout in {layout:#?}"
+ );
+ let size = element.size(&tcx) * count;
+ assert_eq!(
+ layout.size(),
+ size.align_to(tcx.data_layout().vector_align(size).abi),
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ }
+ Abi::ScalarPair(scalar1, scalar2) => {
+ // Sanity-check scalar pairs. These are a bit more flexible and support
+ // padding, but we can at least ensure both fields actually fit into the layout
+ // and the alignment requirement has not been weakened.
+ let align1 = scalar1.align(&tcx).abi;
+ let align2 = scalar2.align(&tcx).abi;
+ assert!(
+ layout.align().abi >= cmp::max(align1, align2),
+ "alignment mismatch between ABI and layout in {layout:#?}",
+ );
+ let field2_offset = scalar1.size(&tcx).align_to(align2);
+ assert!(
+ layout.size() >= field2_offset + scalar2.size(&tcx),
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
+ }
+ }
+
+ check_layout_abi(tcx, layout.layout);
+
+ if let Variants::Multiple { variants, .. } = &layout.variants {
+ for variant in variants {
+ check_layout_abi(tcx, *variant);
+ // No nested "multiple".
+ assert!(matches!(variant.variants(), Variants::Single { .. }));
+ // Skip empty variants.
+ if variant.size() == Size::ZERO
+ || variant.fields().count() == 0
+ || variant.abi().is_uninhabited()
+ {
+ // These are never actually accessed anyway, so we can skip them. (Note that
+ // sometimes, variants with fields have size 0, and sometimes, variants without
+ // fields have non-0 size.)
+ continue;
+ }
+ // Variants should have the same or a smaller size as the full thing.
+ if variant.size() > layout.size {
+ bug!(
+ "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
+ layout.size.bytes(),
+ variant.size().bytes(),
+ )
+ }
+ // The top-level ABI and the ABI of the variants should be coherent.
+ let abi_coherent = match (layout.abi, variant.abi()) {
+ (Abi::Scalar(..), Abi::Scalar(..)) => true,
+ (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ (Abi::Uninhabited, _) => true,
+ (Abi::Aggregate { .. }, _) => true,
+ _ => false,
+ };
+ if !abi_coherent {
+ bug!(
+ "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
+ variant
+ );
+ }
+ }
+ }
+ }
+}
+
+#[instrument(skip(tcx, query), level = "debug")]
+fn layout_of<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
+ ty::tls::with_related_context(tcx, move |icx| {
+ let (param_env, ty) = query.into_parts();
+ debug!(?ty);
+
+ if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
+ tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
+ }
+
+ // Update the ImplicitCtxt to increase the layout_depth
+ let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
+
+ ty::tls::enter_context(&icx, |_| {
+ let param_env = param_env.with_reveal_all_normalized(tcx);
+ let unnormalized_ty = ty;
+
+ // FIXME: We might want to have two different versions of `layout_of`:
+ // One that can be called after typecheck has completed and can use
+ // `normalize_erasing_regions` here and another one that can be called
+ // before typecheck has completed and uses `try_normalize_erasing_regions`.
+ let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
+ Ok(t) => t,
+ Err(normalization_error) => {
+ return Err(LayoutError::NormalizationFailure(ty, normalization_error));
+ }
+ };
+
+ if ty != unnormalized_ty {
+ // Ensure this layout is also cached for the normalized type.
+ return tcx.layout_of(param_env.and(ty));
+ }
+
+ let cx = LayoutCx { tcx, param_env };
+
+ let layout = cx.layout_of_uncached(ty)?;
+ let layout = TyAndLayout { ty, layout };
+
+ cx.record_layout_for_printing(layout);
+
+ sanity_check_layout(tcx, param_env, &layout);
+
+ Ok(layout)
+ })
+ })
+}
+
+pub struct LayoutCx<'tcx, C> {
+ pub tcx: C,
+ pub param_env: ty::ParamEnv<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum StructKind {
+ /// A tuple, closure, or univariant which cannot be coerced to unsized.
+ AlwaysSized,
+ /// A univariant, the last field of which may be coerced to unsized.
+ MaybeUnsized,
+ /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
+ Prefixed(Size, Align),
+}
+
+// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
+// This is used to go between `memory_index` (source field order to memory order)
+// and `inverse_memory_index` (memory order to source field order).
+// See also `FieldsShape::Arbitrary::memory_index` for more details.
+// FIXME(eddyb) build a better abstraction for permutations, if possible.
+fn invert_mapping(map: &[u32]) -> Vec<u32> {
+ let mut inverse = vec![0; map.len()];
+ for i in 0..map.len() {
+ inverse[map[i] as usize] = i as u32;
+ }
+ inverse
+}
+
+impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
+ fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
+ let dl = self.data_layout();
+ let b_align = b.align(dl);
+ let align = a.align(dl).max(b_align).max(dl.aggregate_align);
+ let b_offset = a.size(dl).align_to(b_align.abi);
+ let size = (b_offset + b.size(dl)).align_to(align.abi);
+
+ // HACK(nox): We iter on `b` and then `a` because `max_by_key`
+ // returns the last maximum.
+ let largest_niche = Niche::from_scalar(dl, b_offset, b)
+ .into_iter()
+ .chain(Niche::from_scalar(dl, Size::ZERO, a))
+ .max_by_key(|niche| niche.available(dl));
+
+ LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![Size::ZERO, b_offset],
+ memory_index: vec![0, 1],
+ },
+ abi: Abi::ScalarPair(a, b),
+ largest_niche,
+ align,
+ size,
+ }
+ }
+
+ fn univariant_uninterned(
+ &self,
+ ty: Ty<'tcx>,
+ fields: &[TyAndLayout<'_>],
+ repr: &ReprOptions,
+ kind: StructKind,
+ ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
+ let dl = self.data_layout();
+ let pack = repr.pack;
+ if pack.is_some() && repr.align.is_some() {
+ self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+ let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+
+ let optimize = !repr.inhibit_struct_field_reordering_opt();
+ if optimize {
+ let end =
+ if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+ let optimizing = &mut inverse_memory_index[..end];
+ let field_align = |f: &TyAndLayout<'_>| {
+ if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
+ };
+
+ // If `-Z randomize-layout` was enabled for the type definition we can shuffle
+ // the field ordering to try and catch some code making assumptions about layouts
+ // we don't guarantee
+ if repr.can_randomize_type_layout() {
+ // `ReprOptions.layout_seed` is a deterministic seed that we can use to
+ // randomize field ordering with
+ let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
+
+ // Shuffle the ordering of the fields
+ optimizing.shuffle(&mut rng);
+
+ // Otherwise we just leave things alone and actually optimize the type's fields
+ } else {
+ match kind {
+ StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+ optimizing.sort_by_key(|&x| {
+ // Place ZSTs first to avoid "interesting offsets",
+ // especially with only one or two non-ZST fields.
+ let f = &fields[x as usize];
+ (!f.is_zst(), cmp::Reverse(field_align(f)))
+ });
+ }
+
+ StructKind::Prefixed(..) => {
+ // Sort in ascending alignment so that the layout stays optimal
+ // regardless of the prefix
+ optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
+ }
+ }
+
+ // FIXME(Kixiron): We can always shuffle fields within a given alignment class
+ // regardless of the status of `-Z randomize-layout`
+ }
+ }
+
+ // inverse_memory_index holds field indices by increasing memory offset.
+ // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+ // We now write field offsets to the corresponding offset slot;
+ // field 5 with offset 0 puts 0 in offsets[5].
+ // At the bottom of this function, we invert `inverse_memory_index` to
+ // produce `memory_index` (see `invert_mapping`).
+
+ let mut sized = true;
+ let mut offsets = vec![Size::ZERO; fields.len()];
+ let mut offset = Size::ZERO;
+ let mut largest_niche = None;
+ let mut largest_niche_available = 0;
+
+ if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+ let prefix_align =
+ if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+ align = align.max(AbiAndPrefAlign::new(prefix_align));
+ offset = prefix_size.align_to(prefix_align);
+ }
+
+ for &i in &inverse_memory_index {
+ let field = fields[i as usize];
+ if !sized {
+ self.tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ &format!(
+ "univariant: field #{} of `{}` comes after unsized field",
+ offsets.len(),
+ ty
+ ),
+ );
+ }
+
+ if field.is_unsized() {
+ sized = false;
+ }
+
+ // Invariant: offset < dl.obj_size_bound() <= 1<<61
+ let field_align = if let Some(pack) = pack {
+ field.align.min(AbiAndPrefAlign::new(pack))
+ } else {
+ field.align
+ };
+ offset = offset.align_to(field_align.abi);
+ align = align.max(field_align);
+
+ debug!("univariant offset: {:?} field: {:#?}", offset, field);
+ offsets[i as usize] = offset;
+
+ if let Some(mut niche) = field.largest_niche {
+ let available = niche.available(dl);
+ if available > largest_niche_available {
+ largest_niche_available = available;
+ niche.offset += offset;
+ largest_niche = Some(niche);
+ }
+ }
+
+ offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+ }
+
+ if let Some(repr_align) = repr.align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+
+ debug!("univariant min_size: {:?}", offset);
+ let min_size = offset;
+
+ // As stated above, inverse_memory_index holds field indices by increasing offset.
+ // This makes it an already-sorted view of the offsets vec.
+ // To invert it, consider:
+ // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+ // Field 5 would be the first element, so memory_index is i:
+ // Note: if we didn't optimize, it's already right.
+
+ let memory_index =
+ if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
+
+ let size = min_size.align_to(align.abi);
+ let mut abi = Abi::Aggregate { sized };
+
+ // Unpack newtype ABIs and find scalar pairs.
+ if sized && size.bytes() > 0 {
+ // All other fields must be ZSTs.
+ let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+
+ match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+ // We have exactly one non-ZST field.
+ (Some((i, field)), None, None) => {
+ // Field fills the struct and it has a scalar or scalar pair ABI.
+ if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
+ {
+ match field.abi {
+ // For plain scalars, or vectors of them, we can't unpack
+ // newtypes for `#[repr(C)]`, as that affects C ABIs.
+ Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+ abi = field.abi;
+ }
+ // But scalar pairs are Rust-specific and get
+ // treated as aggregates by C ABIs anyway.
+ Abi::ScalarPair(..) => {
+ abi = field.abi;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Two non-ZST fields, and they're both scalars.
+ (Some((i, a)), Some((j, b)), None) => {
+ match (a.abi, b.abi) {
+ (Abi::Scalar(a), Abi::Scalar(b)) => {
+ // Order by the memory placement, not source order.
+ let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+ ((i, a), (j, b))
+ } else {
+ ((j, b), (i, a))
+ };
+ let pair = self.scalar_pair(a, b);
+ let pair_offsets = match pair.fields {
+ FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
+ }
+ _ => bug!(),
+ };
+ if offsets[i] == pair_offsets[0]
+ && offsets[j] == pair_offsets[1]
+ && align == pair.align
+ && size == pair.size
+ {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
+ }
+ }
+ _ => {}
+ }
+ }
+
+ _ => {}
+ }
+ }
+
+ if fields.iter().any(|f| f.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ }
+
+ Ok(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Arbitrary { offsets, memory_index },
+ abi,
+ largest_niche,
+ align,
+ size,
+ })
+ }
+
+ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
+ let tcx = self.tcx;
+ let param_env = self.param_env;
+ let dl = self.data_layout();
+ let scalar_unit = |value: Primitive| {
+ let size = value.size(dl);
+ assert!(size.bits() <= 128);
+ Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
+ };
+ let scalar =
+ |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
+
+ let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
+ Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
+ };
+ debug_assert!(!ty.has_infer_types_or_consts());
+
+ Ok(match *ty.kind() {
+ // Basic scalars.
+ ty::Bool => tcx.intern_layout(LayoutS::scalar(
+ self,
+ Scalar::Initialized {
+ value: Int(I8, false),
+ valid_range: WrappingRange { start: 0, end: 1 },
+ },
+ )),
+ ty::Char => tcx.intern_layout(LayoutS::scalar(
+ self,
+ Scalar::Initialized {
+ value: Int(I32, false),
+ valid_range: WrappingRange { start: 0, end: 0x10FFFF },
+ },
+ )),
+ ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
+ ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
+ ty::Float(fty) => scalar(match fty {
+ ty::FloatTy::F32 => F32,
+ ty::FloatTy::F64 => F64,
+ }),
+ ty::FnPtr(_) => {
+ let mut ptr = scalar_unit(Pointer);
+ ptr.valid_range_mut().start = 1;
+ tcx.intern_layout(LayoutS::scalar(self, ptr))
+ }
+
+ // The never type.
+ ty::Never => tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Primitive,
+ abi: Abi::Uninhabited,
+ largest_niche: None,
+ align: dl.i8_align,
+ size: Size::ZERO,
+ }),
+
+ // Potentially-wide pointers.
+ ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ let mut data_ptr = scalar_unit(Pointer);
+ if !ty.is_unsafe_ptr() {
+ data_ptr.valid_range_mut().start = 1;
+ }
+
+ let pointee = tcx.normalize_erasing_regions(param_env, pointee);
+ if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
+ return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
+ }
+
+ let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
+ let metadata = match unsized_part.kind() {
+ ty::Foreign(..) => {
+ return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
+ }
+ ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
+ ty::Dynamic(..) => {
+ let mut vtable = scalar_unit(Pointer);
+ vtable.valid_range_mut().start = 1;
+ vtable
+ }
+ _ => return Err(LayoutError::Unknown(unsized_part)),
+ };
+
+ // Effectively a (ptr, meta) tuple.
+ tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
+ }
+
+ // Arrays and slices.
+ ty::Array(element, mut count) => {
+ if count.has_projections() {
+ count = tcx.normalize_erasing_regions(param_env, count);
+ if count.has_projections() {
+ return Err(LayoutError::Unknown(ty));
+ }
+ }
+
+ let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
+ let element = self.layout_of(element)?;
+ let size =
+ element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+
+ let abi =
+ if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let largest_niche = if count != 0 { element.largest_niche } else { None };
+
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: element.size, count },
+ abi,
+ largest_niche,
+ align: element.align,
+ size,
+ })
+ }
+ ty::Slice(element) => {
+ let element = self.layout_of(element)?;
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: element.size, count: 0 },
+ abi: Abi::Aggregate { sized: false },
+ largest_niche: None,
+ align: element.align,
+ size: Size::ZERO,
+ })
+ }
+ ty::Str => tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
+ abi: Abi::Aggregate { sized: false },
+ largest_niche: None,
+ align: dl.i8_align,
+ size: Size::ZERO,
+ }),
+
+ // Odd unit types.
+ ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
+ ty::Dynamic(..) | ty::Foreign(..) => {
+ let mut unit = self.univariant_uninterned(
+ ty,
+ &[],
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?;
+ match unit.abi {
+ Abi::Aggregate { ref mut sized } => *sized = false,
+ _ => bug!(),
+ }
+ tcx.intern_layout(unit)
+ }
+
+ ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
+
+ ty::Closure(_, ref substs) => {
+ let tys = substs.as_closure().upvar_tys();
+ univariant(
+ &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?
+ }
+
+ ty::Tuple(tys) => {
+ let kind =
+ if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
+
+ univariant(
+ &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ kind,
+ )?
+ }
+
+ // SIMD vector types.
+ ty::Adt(def, substs) if def.repr().simd() => {
+ if !def.is_struct() {
+ // Should have yielded E0517 by now.
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ "#[repr(simd)] was applied to an ADT that is not a struct",
+ );
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ // Supported SIMD vectors are homogeneous ADTs with at least one field:
+ //
+ // * #[repr(simd)] struct S(T, T, T, T);
+ // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
+ // * #[repr(simd)] struct S([T; 4])
+ //
+ // where T is a primitive scalar (integer/float/pointer).
+
+ // SIMD vectors with zero fields are not supported.
+ // (should be caught by typeck)
+ if def.non_enum_variant().fields.is_empty() {
+ tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
+ }
+
+ // Type of the first ADT field:
+ let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
+
+ // Heterogeneous SIMD vectors are not supported:
+ // (should be caught by typeck)
+ for fi in &def.non_enum_variant().fields {
+ if fi.ty(tcx, substs) != f0_ty {
+ tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
+ }
+ }
+
+ // The element type and number of elements of the SIMD vector
+ // are obtained from:
+ //
+ // * the element type and length of the single array field, if
+ // the first field is of array type, or
+ //
+ // * the homogenous field type and the number of fields.
+ let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
+ // First ADT field is an array:
+
+ // SIMD vectors with multiple array fields are not supported:
+ // (should be caught by typeck)
+ if def.non_enum_variant().fields.len() != 1 {
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` with more than one array field",
+ ty
+ ));
+ }
+
+ // Extract the number of elements from the layout of the array field:
+ let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
+ return Err(LayoutError::Unknown(ty));
+ };
+
+ (*e_ty, *count, true)
+ } else {
+ // First ADT field is not an array:
+ (f0_ty, def.non_enum_variant().fields.len() as _, false)
+ };
+
+ // SIMD vectors of zero length are not supported.
+ // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
+ // support.
+ //
+ // Can't be caught in typeck if the array length is generic.
+ if e_len == 0 {
+ tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
+ } else if e_len > MAX_SIMD_LANES {
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` of length greater than {}",
+ ty, MAX_SIMD_LANES,
+ ));
+ }
+
+ // Compute the ABI of the element type:
+ let e_ly = self.layout_of(e_ty)?;
+ let Abi::Scalar(e_abi) = e_ly.abi else {
+ // This error isn't caught in typeck, e.g., if
+ // the element type of the vector is generic.
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` with a non-primitive-scalar \
+ (integer/float/pointer) element type `{}`",
+ ty, e_ty
+ ))
+ };
+
+ // Compute the size and alignment of the vector:
+ let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+ let align = dl.vector_align(size);
+ let size = size.align_to(align.abi);
+
+ // Compute the placement of the vector fields:
+ let fields = if is_array {
+ FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
+ } else {
+ FieldsShape::Array { stride: e_ly.size, count: e_len }
+ };
+
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields,
+ abi: Abi::Vector { element: e_abi, count: e_len },
+ largest_niche: e_ly.largest_niche,
+ size,
+ align,
+ })
+ }
+
+ // ADTs.
+ ty::Adt(def, substs) => {
+ // Cache the field layouts.
+ let variants = def
+ .variants()
+ .iter()
+ .map(|v| {
+ v.fields
+ .iter()
+ .map(|field| self.layout_of(field.ty(tcx, substs)))
+ .collect::<Result<Vec<_>, _>>()
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ if def.is_union() {
+ if def.repr().pack.is_some() && def.repr().align.is_some() {
+ self.tcx.sess.delay_span_bug(
+ tcx.def_span(def.did()),
+ "union cannot be packed and aligned",
+ );
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ let mut align =
+ if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+ if let Some(repr_align) = def.repr().align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+
+ let optimize = !def.repr().inhibit_union_abi_opt();
+ let mut size = Size::ZERO;
+ let mut abi = Abi::Aggregate { sized: true };
+ let index = VariantIdx::new(0);
+ for field in &variants[index] {
+ assert!(!field.is_unsized());
+ align = align.max(field.align);
+
+ // If all non-ZST fields have the same ABI, forward this ABI
+ if optimize && !field.is_zst() {
+ // Discard valid range information and allow undef
+ let field_abi = match field.abi {
+ Abi::Scalar(x) => Abi::Scalar(x.to_union()),
+ Abi::ScalarPair(x, y) => {
+ Abi::ScalarPair(x.to_union(), y.to_union())
+ }
+ Abi::Vector { element: x, count } => {
+ Abi::Vector { element: x.to_union(), count }
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {
+ Abi::Aggregate { sized: true }
+ }
+ };
+
+ if size == Size::ZERO {
+ // first non ZST: initialize 'abi'
+ abi = field_abi;
+ } else if abi != field_abi {
+ // different fields have different ABI: reset to Aggregate
+ abi = Abi::Aggregate { sized: true };
+ }
+ }
+
+ size = cmp::max(size, field.size);
+ }
+
+ if let Some(pack) = def.repr().pack {
+ align = align.min(AbiAndPrefAlign::new(pack));
+ }
+
+ return Ok(tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index },
+ fields: FieldsShape::Union(
+ NonZeroUsize::new(variants[index].len())
+ .ok_or(LayoutError::Unknown(ty))?,
+ ),
+ abi,
+ largest_niche: None,
+ align,
+ size: size.align_to(align.abi),
+ }));
+ }
+
+ // A variant is absent if it's uninhabited and only has ZST fields.
+ // Present uninhabited variants only require space for their fields,
+ // but *not* an encoding of the discriminant (e.g., a tag value).
+ // See issue #49298 for more details on the need to leave space
+ // for non-ZST uninhabited data (mostly partial initialization).
+ let absent = |fields: &[TyAndLayout<'_>]| {
+ let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
+ let is_zst = fields.iter().all(|f| f.is_zst());
+ uninhabited && is_zst
+ };
+ let (present_first, present_second) = {
+ let mut present_variants = variants
+ .iter_enumerated()
+ .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
+ (present_variants.next(), present_variants.next())
+ };
+ let present_first = match present_first {
+ Some(present_first) => present_first,
+ // Uninhabited because it has no variants, or only absent ones.
+ None if def.is_enum() => {
+ return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
+ }
+ // If it's a struct, still compute a layout so that we can still compute the
+ // field offsets.
+ None => VariantIdx::new(0),
+ };
+
+ let is_struct = !def.is_enum() ||
+ // Only one variant is present.
+ (present_second.is_none() &&
+ // Representation optimizations are allowed.
+ !def.repr().inhibit_enum_layout_opt());
+ if is_struct {
+ // Struct, or univariant enum equivalent to a struct.
+ // (Typechecking will reject discriminant-sizing attrs.)
+
+ let v = present_first;
+ let kind = if def.is_enum() || variants[v].is_empty() {
+ StructKind::AlwaysSized
+ } else {
+ let param_env = tcx.param_env(def.did());
+ let last_field = def.variant(v).fields.last().unwrap();
+ let always_sized =
+ tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
+ if !always_sized {
+ StructKind::MaybeUnsized
+ } else {
+ StructKind::AlwaysSized
+ }
+ };
+
+ let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
+ st.variants = Variants::Single { index: v };
+
+ if def.is_unsafe_cell() {
+ let hide_niches = |scalar: &mut _| match scalar {
+ Scalar::Initialized { value, valid_range } => {
+ *valid_range = WrappingRange::full(value.size(dl))
+ }
+ // Already doesn't have any niches
+ Scalar::Union { .. } => {}
+ };
+ match &mut st.abi {
+ Abi::Uninhabited => {}
+ Abi::Scalar(scalar) => hide_niches(scalar),
+ Abi::ScalarPair(a, b) => {
+ hide_niches(a);
+ hide_niches(b);
+ }
+ Abi::Vector { element, count: _ } => hide_niches(element),
+ Abi::Aggregate { sized: _ } => {}
+ }
+ st.largest_niche = None;
+ return Ok(tcx.intern_layout(st));
+ }
+
+ let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
+ match st.abi {
+ Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+ // the asserts ensure that we are not using the
+ // `#[rustc_layout_scalar_valid_range(n)]`
+ // attribute to widen the range of anything as that would probably
+ // result in UB somewhere
+ // FIXME(eddyb) the asserts are probably not needed,
+ // as larger validity ranges would result in missed
+ // optimizations, *not* wrongly assuming the inner
+ // value is valid. e.g. unions enlarge validity ranges,
+ // because the values may be uninitialized.
+ if let Bound::Included(start) = start {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.start <= start);
+ valid_range.start = start;
+ }
+ if let Bound::Included(end) = end {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.end >= end);
+ valid_range.end = end;
+ }
+
+ // Update `largest_niche` if we have introduced a larger niche.
+ let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
+ if let Some(niche) = niche {
+ match st.largest_niche {
+ Some(largest_niche) => {
+ // Replace the existing niche even if they're equal,
+ // because this one is at a lower offset.
+ if largest_niche.available(dl) <= niche.available(dl) {
+ st.largest_niche = Some(niche);
+ }
+ }
+ None => st.largest_niche = Some(niche),
+ }
+ }
+ }
+ _ => assert!(
+ start == Bound::Unbounded && end == Bound::Unbounded,
+ "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
+ def,
+ st,
+ ),
+ }
+
+ return Ok(tcx.intern_layout(st));
+ }
+
+ // At this point, we have handled all unions and
+ // structs. (We have also handled univariant enums
+ // that allow representation optimization.)
+ assert!(def.is_enum());
+
+ // The current code for niche-filling relies on variant indices
+ // instead of actual discriminants, so dataful enums with
+ // explicit discriminants (RFC #2363) would misbehave.
+ let no_explicit_discriminants = def
+ .variants()
+ .iter_enumerated()
+ .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
+
+ let mut niche_filling_layout = None;
+
+ // Niche-filling enum optimization.
+ if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
+ let mut dataful_variant = None;
+ let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
+
+ // Find one non-ZST variant.
+ 'variants: for (v, fields) in variants.iter_enumerated() {
+ if absent(fields) {
+ continue 'variants;
+ }
+ for f in fields {
+ if !f.is_zst() {
+ if dataful_variant.is_none() {
+ dataful_variant = Some(v);
+ continue 'variants;
+ } else {
+ dataful_variant = None;
+ break 'variants;
+ }
+ }
+ }
+ niche_variants = *niche_variants.start().min(&v)..=v;
+ }
+
+ if niche_variants.start() > niche_variants.end() {
+ dataful_variant = None;
+ }
+
+ if let Some(i) = dataful_variant {
+ let count = (niche_variants.end().as_u32()
+ - niche_variants.start().as_u32()
+ + 1) as u128;
+
+ // Find the field with the largest niche
+ let niche_candidate = variants[i]
+ .iter()
+ .enumerate()
+ .filter_map(|(j, field)| Some((j, field.largest_niche?)))
+ .max_by_key(|(_, niche)| niche.available(dl));
+
+ if let Some((field_index, niche, (niche_start, niche_scalar))) =
+ niche_candidate.and_then(|(field_index, niche)| {
+ Some((field_index, niche, niche.reserve(self, count)?))
+ })
+ {
+ let mut align = dl.aggregate_align;
+ let st = variants
+ .iter_enumerated()
+ .map(|(j, v)| {
+ let mut st = self.univariant_uninterned(
+ ty,
+ v,
+ &def.repr(),
+ StructKind::AlwaysSized,
+ )?;
+ st.variants = Variants::Single { index: j };
+
+ align = align.max(st.align);
+
+ Ok(tcx.intern_layout(st))
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ let offset = st[i].fields().offset(field_index) + niche.offset;
+
+ // Align the total size to the largest alignment.
+ let size = st[i].size().align_to(align.abi);
+
+ let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
+ Abi::Uninhabited
+ } else if align == st[i].align() && size == st[i].size() {
+ // When the total alignment and size match, we can use the
+ // same ABI as the scalar variant with the reserved niche.
+ match st[i].abi() {
+ Abi::Scalar(_) => Abi::Scalar(niche_scalar),
+ Abi::ScalarPair(first, second) => {
+ // Only the niche is guaranteed to be initialised,
+ // so use union layout for the other primitive.
+ if offset.bytes() == 0 {
+ Abi::ScalarPair(niche_scalar, second.to_union())
+ } else {
+ Abi::ScalarPair(first.to_union(), niche_scalar)
+ }
+ }
+ _ => Abi::Aggregate { sized: true },
+ }
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
+
+ niche_filling_layout = Some(LayoutS {
+ variants: Variants::Multiple {
+ tag: niche_scalar,
+ tag_encoding: TagEncoding::Niche {
+ dataful_variant: i,
+ niche_variants,
+ niche_start,
+ },
+ tag_field: 0,
+ variants: st,
+ },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![offset],
+ memory_index: vec![0],
+ },
+ abi,
+ largest_niche,
+ size,
+ align,
+ });
+ }
+ }
+ }
+
+ let (mut min, mut max) = (i128::MAX, i128::MIN);
+ let discr_type = def.repr().discr_type();
+ let bits = Integer::from_attr(self, discr_type).size().bits();
+ for (i, discr) in def.discriminants(tcx) {
+ if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
+ continue;
+ }
+ let mut x = discr.val as i128;
+ if discr_type.is_signed() {
+ // sign extend the raw representation to be an i128
+ x = (x << (128 - bits)) >> (128 - bits);
+ }
+ if x < min {
+ min = x;
+ }
+ if x > max {
+ max = x;
+ }
+ }
+ // We might have no inhabited variants, so pretend there's at least one.
+ if (min, max) == (i128::MAX, i128::MIN) {
+ min = 0;
+ max = 0;
+ }
+ assert!(min <= max, "discriminant range is {}...{}", min, max);
+ let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
+
+ let mut align = dl.aggregate_align;
+ let mut size = Size::ZERO;
+
+ // We're interested in the smallest alignment, so start large.
+ let mut start_align = Align::from_bytes(256).unwrap();
+ assert_eq!(Integer::for_align(dl, start_align), None);
+
+ // repr(C) on an enum tells us to make a (tag, union) layout,
+ // so we need to grow the prefix alignment to be at least
+ // the alignment of the union. (This value is used both for
+ // determining the alignment of the overall enum, and the
+ // determining the alignment of the payload after the tag.)
+ let mut prefix_align = min_ity.align(dl).abi;
+ if def.repr().c() {
+ for fields in &variants {
+ for field in fields {
+ prefix_align = prefix_align.max(field.align.abi);
+ }
+ }
+ }
+
+ // Create the set of structs that represent each variant.
+ let mut layout_variants = variants
+ .iter_enumerated()
+ .map(|(i, field_layouts)| {
+ let mut st = self.univariant_uninterned(
+ ty,
+ &field_layouts,
+ &def.repr(),
+ StructKind::Prefixed(min_ity.size(), prefix_align),
+ )?;
+ st.variants = Variants::Single { index: i };
+ // Find the first field we can't move later
+ // to make room for a larger discriminant.
+ for field in
+ st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
+ {
+ if !field.is_zst() || field.align.abi.bytes() != 1 {
+ start_align = start_align.min(field.align.abi);
+ break;
+ }
+ }
+ size = cmp::max(size, st.size);
+ align = align.max(st.align);
+ Ok(st)
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ // Align the maximum variant size to the largest alignment.
+ size = size.align_to(align.abi);
+
+ if size.bytes() >= dl.obj_size_bound() {
+ return Err(LayoutError::SizeOverflow(ty));
+ }
+
+ let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
+ if typeck_ity < min_ity {
+ // It is a bug if Layout decided on a greater discriminant size than typeck for
+ // some reason at this point (based on values discriminant can take on). Mostly
+ // because this discriminant will be loaded, and then stored into variable of
+ // type calculated by typeck. Consider such case (a bug): typeck decided on
+ // byte-sized discriminant, but layout thinks we need a 16-bit to store all
+ // discriminant values. That would be a bug, because then, in codegen, in order
+ // to store this 16-bit discriminant into 8-bit sized temporary some of the
+ // space necessary to represent would have to be discarded (or layout is wrong
+ // on thinking it needs 16 bits)
+ bug!(
+ "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
+ min_ity,
+ typeck_ity
+ );
+ // However, it is fine to make discr type however large (as an optimisation)
+ // after this point – we’ll just truncate the value we load in codegen.
+ }
+
+ // Check to see if we should use a different type for the
+ // discriminant. We can safely use a type with the same size
+ // as the alignment of the first field of each variant.
+ // We increase the size of the discriminant to avoid LLVM copying
+ // padding when it doesn't need to. This normally causes unaligned
+ // load/stores and excessive memcpy/memset operations. By using a
+ // bigger integer size, LLVM can be sure about its contents and
+ // won't be so conservative.
+
+ // Use the initial field alignment
+ let mut ity = if def.repr().c() || def.repr().int.is_some() {
+ min_ity
+ } else {
+ Integer::for_align(dl, start_align).unwrap_or(min_ity)
+ };
+
+ // If the alignment is not larger than the chosen discriminant size,
+ // don't use the alignment as the final size.
+ if ity <= min_ity {
+ ity = min_ity;
+ } else {
+ // Patch up the variants' first few fields.
+ let old_ity_size = min_ity.size();
+ let new_ity_size = ity.size();
+ for variant in &mut layout_variants {
+ match variant.fields {
+ FieldsShape::Arbitrary { ref mut offsets, .. } => {
+ for i in offsets {
+ if *i <= old_ity_size {
+ assert_eq!(*i, old_ity_size);
+ *i = new_ity_size;
+ }
+ }
+ // We might be making the struct larger.
+ if variant.size <= old_ity_size {
+ variant.size = new_ity_size;
+ }
+ }
+ _ => bug!(),
+ }
+ }
+ }
+
+ let tag_mask = ity.size().unsigned_int_max();
+ let tag = Scalar::Initialized {
+ value: Int(ity, signed),
+ valid_range: WrappingRange {
+ start: (min as u128 & tag_mask),
+ end: (max as u128 & tag_mask),
+ },
+ };
+ let mut abi = Abi::Aggregate { sized: true };
+
+ if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ } else if tag.size(dl) == size {
+ // Make sure we only use scalar layout when the enum is entirely its
+ // own tag (i.e. it has no padding nor any non-ZST variant fields).
+ abi = Abi::Scalar(tag);
+ } else {
+ // Try to use a ScalarPair for all tagged enums.
+ let mut common_prim = None;
+ let mut common_prim_initialized_in_all_variants = true;
+ for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
+ let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
+ bug!();
+ };
+ let mut fields =
+ iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
+ let (field, offset) = match (fields.next(), fields.next()) {
+ (None, None) => {
+ common_prim_initialized_in_all_variants = false;
+ continue;
+ }
+ (Some(pair), None) => pair,
+ _ => {
+ common_prim = None;
+ break;
+ }
+ };
+ let prim = match field.abi {
+ Abi::Scalar(scalar) => {
+ common_prim_initialized_in_all_variants &=
+ matches!(scalar, Scalar::Initialized { .. });
+ scalar.primitive()
+ }
+ _ => {
+ common_prim = None;
+ break;
+ }
+ };
+ if let Some(pair) = common_prim {
+ // This is pretty conservative. We could go fancier
+ // by conflating things like i32 and u32, or even
+ // realising that (u8, u8) could just cohabit with
+ // u16 or even u32.
+ if pair != (prim, offset) {
+ common_prim = None;
+ break;
+ }
+ } else {
+ common_prim = Some((prim, offset));
+ }
+ }
+ if let Some((prim, offset)) = common_prim {
+ let prim_scalar = if common_prim_initialized_in_all_variants {
+ scalar_unit(prim)
+ } else {
+ // Common prim might be uninit.
+ Scalar::Union { value: prim }
+ };
+ let pair = self.scalar_pair(tag, prim_scalar);
+ let pair_offsets = match pair.fields {
+ FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
+ }
+ _ => bug!(),
+ };
+ if pair_offsets[0] == Size::ZERO
+ && pair_offsets[1] == *offset
+ && align == pair.align
+ && size == pair.size
+ {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
+ }
+ }
+ }
+
+ // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
+ // variants to ensure they are consistent. This is because a downcast is
+ // semantically a NOP, and thus should not affect layout.
+ if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+ for variant in &mut layout_variants {
+ // We only do this for variants with fields; the others are not accessed anyway.
+ // Also do not overwrite any already existing "clever" ABIs.
+ if variant.fields.count() > 0
+ && matches!(variant.abi, Abi::Aggregate { .. })
+ {
+ variant.abi = abi;
+ // Also need to bump up the size and alignment, so that the entire value fits in here.
+ variant.size = cmp::max(variant.size, size);
+ variant.align.abi = cmp::max(variant.align.abi, align.abi);
+ }
+ }
+ }
+
+ let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
+
+ let layout_variants =
+ layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
+
+ let tagged_layout = LayoutS {
+ variants: Variants::Multiple {
+ tag,
+ tag_encoding: TagEncoding::Direct,
+ tag_field: 0,
+ variants: layout_variants,
+ },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![Size::ZERO],
+ memory_index: vec![0],
+ },
+ largest_niche,
+ abi,
+ align,
+ size,
+ };
+
+ let best_layout = match (tagged_layout, niche_filling_layout) {
+ (tagged_layout, Some(niche_filling_layout)) => {
+ // Pick the smaller layout; otherwise,
+ // pick the layout with the larger niche; otherwise,
+ // pick tagged as it has simpler codegen.
+ cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
+ let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
+ (layout.size, cmp::Reverse(niche_size))
+ })
+ }
+ (tagged_layout, None) => tagged_layout,
+ };
+
+ tcx.intern_layout(best_layout)
+ }
+
+ // Types with no meaningful known layout.
+ ty::Projection(_) | ty::Opaque(..) => {
+ // NOTE(eddyb) `layout_of` query should've normalized these away,
+ // if that was possible, so there's no reason to try again here.
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
+ bug!("Layout::compute: unexpected type `{}`", ty)
+ }
+
+ ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
+ return Err(LayoutError::Unknown(ty));
+ }
+ })
+ }
+}
+
+/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
+#[derive(Clone, Debug, PartialEq)]
+enum SavedLocalEligibility {
+ Unassigned,
+ Assigned(VariantIdx),
+ // FIXME: Use newtype_index so we aren't wasting bytes
+ Ineligible(Option<u32>),
+}
+
+// When laying out generators, we divide our saved local fields into two
+// categories: overlap-eligible and overlap-ineligible.
+//
+// Those fields which are ineligible for overlap go in a "prefix" at the
+// beginning of the layout, and always have space reserved for them.
+//
+// Overlap-eligible fields are only assigned to one variant, so we lay
+// those fields out for each variant and put them right after the
+// prefix.
+//
+// Finally, in the layout details, we point to the fields from the
+// variants they are assigned to. It is possible for some fields to be
+// included in multiple variants. No field ever "moves around" in the
+// layout; its offset is always the same.
+//
+// Also included in the layout are the upvars and the discriminant.
+// These are included as fields on the "outer" layout; they are not part
+// of any variant.
+impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
+ /// Compute the eligibility and assignment of each local.
+ fn generator_saved_local_eligibility(
+ &self,
+ info: &GeneratorLayout<'tcx>,
+ ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
+ use SavedLocalEligibility::*;
+
+ let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
+ IndexVec::from_elem_n(Unassigned, info.field_tys.len());
+
+ // The saved locals not eligible for overlap. These will get
+ // "promoted" to the prefix of our generator.
+ let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
+
+ // Figure out which of our saved locals are fields in only
+ // one variant. The rest are deemed ineligible for overlap.
+ for (variant_index, fields) in info.variant_fields.iter_enumerated() {
+ for local in fields {
+ match assignments[*local] {
+ Unassigned => {
+ assignments[*local] = Assigned(variant_index);
+ }
+ Assigned(idx) => {
+ // We've already seen this local at another suspension
+ // point, so it is no longer a candidate.
+ trace!(
+ "removing local {:?} in >1 variant ({:?}, {:?})",
+ local,
+ variant_index,
+ idx
+ );
+ ineligible_locals.insert(*local);
+ assignments[*local] = Ineligible(None);
+ }
+ Ineligible(_) => {}
+ }
+ }
+ }
+
+ // Next, check every pair of eligible locals to see if they
+ // conflict.
+ for local_a in info.storage_conflicts.rows() {
+ let conflicts_a = info.storage_conflicts.count(local_a);
+ if ineligible_locals.contains(local_a) {
+ continue;
+ }
+
+ for local_b in info.storage_conflicts.iter(local_a) {
+ // local_a and local_b are storage live at the same time, therefore they
+ // cannot overlap in the generator layout. The only way to guarantee
+ // this is if they are in the same variant, or one is ineligible
+ // (which means it is stored in every variant).
+ if ineligible_locals.contains(local_b)
+ || assignments[local_a] == assignments[local_b]
+ {
+ continue;
+ }
+
+ // If they conflict, we will choose one to make ineligible.
+ // This is not always optimal; it's just a greedy heuristic that
+ // seems to produce good results most of the time.
+ let conflicts_b = info.storage_conflicts.count(local_b);
+ let (remove, other) =
+ if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
+ ineligible_locals.insert(remove);
+ assignments[remove] = Ineligible(None);
+ trace!("removing local {:?} due to conflict with {:?}", remove, other);
+ }
+ }
+
+ // Count the number of variants in use. If only one of them, then it is
+ // impossible to overlap any locals in our layout. In this case it's
+ // always better to make the remaining locals ineligible, so we can
+ // lay them out with the other locals in the prefix and eliminate
+ // unnecessary padding bytes.
+ {
+ let mut used_variants = BitSet::new_empty(info.variant_fields.len());
+ for assignment in &assignments {
+ if let Assigned(idx) = assignment {
+ used_variants.insert(*idx);
+ }
+ }
+ if used_variants.count() < 2 {
+ for assignment in assignments.iter_mut() {
+ *assignment = Ineligible(None);
+ }
+ ineligible_locals.insert_all();
+ }
+ }
+
+ // Write down the order of our locals that will be promoted to the prefix.
+ {
+ for (idx, local) in ineligible_locals.iter().enumerate() {
+ assignments[local] = Ineligible(Some(idx as u32));
+ }
+ }
+ debug!("generator saved local assignments: {:?}", assignments);
+
+ (ineligible_locals, assignments)
+ }
+
+ /// Compute the full generator layout.
+ fn generator_layout(
+ &self,
+ ty: Ty<'tcx>,
+ def_id: hir::def_id::DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
+ use SavedLocalEligibility::*;
+ let tcx = self.tcx;
+ let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
+
+ let Some(info) = tcx.generator_layout(def_id) else {
+ return Err(LayoutError::Unknown(ty));
+ };
+ let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
+
+ // Build a prefix layout, including "promoting" all ineligible
+ // locals as part of the prefix. We compute the layout of all of
+ // these fields at once to get optimal packing.
+ let tag_index = substs.as_generator().prefix_tys().count();
+
+ // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
+ let max_discr = (info.variant_fields.len() - 1) as u128;
+ let discr_int = Integer::fit_unsigned(max_discr);
+ let discr_int_ty = discr_int.to_ty(tcx, false);
+ let tag = Scalar::Initialized {
+ value: Primitive::Int(discr_int, false),
+ valid_range: WrappingRange { start: 0, end: max_discr },
+ };
+ let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
+ let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
+
+ let promoted_layouts = ineligible_locals
+ .iter()
+ .map(|local| subst_field(info.field_tys[local]))
+ .map(|ty| tcx.mk_maybe_uninit(ty))
+ .map(|ty| self.layout_of(ty));
+ let prefix_layouts = substs
+ .as_generator()
+ .prefix_tys()
+ .map(|ty| self.layout_of(ty))
+ .chain(iter::once(Ok(tag_layout)))
+ .chain(promoted_layouts)
+ .collect::<Result<Vec<_>, _>>()?;
+ let prefix = self.univariant_uninterned(
+ ty,
+ &prefix_layouts,
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?;
+
+ let (prefix_size, prefix_align) = (prefix.size, prefix.align);
+
+ // Split the prefix layout into the "outer" fields (upvars and
+ // discriminant) and the "promoted" fields. Promoted fields will
+ // get included in each variant that requested them in
+ // GeneratorLayout.
+ debug!("prefix = {:#?}", prefix);
+ let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
+ FieldsShape::Arbitrary { mut offsets, memory_index } => {
+ let mut inverse_memory_index = invert_mapping(&memory_index);
+
+ // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
+ // "outer" and "promoted" fields respectively.
+ let b_start = (tag_index + 1) as u32;
+ let offsets_b = offsets.split_off(b_start as usize);
+ let offsets_a = offsets;
+
+ // Disentangle the "a" and "b" components of `inverse_memory_index`
+ // by preserving the order but keeping only one disjoint "half" each.
+ // FIXME(eddyb) build a better abstraction for permutations, if possible.
+ let inverse_memory_index_b: Vec<_> =
+ inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
+ inverse_memory_index.retain(|&i| i < b_start);
+ let inverse_memory_index_a = inverse_memory_index;
+
+ // Since `inverse_memory_index_{a,b}` each only refer to their
+ // respective fields, they can be safely inverted
+ let memory_index_a = invert_mapping(&inverse_memory_index_a);
+ let memory_index_b = invert_mapping(&inverse_memory_index_b);
+
+ let outer_fields =
+ FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
+ (outer_fields, offsets_b, memory_index_b)
+ }
+ _ => bug!(),
+ };
+
+ let mut size = prefix.size;
+ let mut align = prefix.align;
+ let variants = info
+ .variant_fields
+ .iter_enumerated()
+ .map(|(index, variant_fields)| {
+ // Only include overlap-eligible fields when we compute our variant layout.
+ let variant_only_tys = variant_fields
+ .iter()
+ .filter(|local| match assignments[**local] {
+ Unassigned => bug!(),
+ Assigned(v) if v == index => true,
+ Assigned(_) => bug!("assignment does not match variant"),
+ Ineligible(_) => false,
+ })
+ .map(|local| subst_field(info.field_tys[*local]));
+
+ let mut variant = self.univariant_uninterned(
+ ty,
+ &variant_only_tys
+ .map(|ty| self.layout_of(ty))
+ .collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ StructKind::Prefixed(prefix_size, prefix_align.abi),
+ )?;
+ variant.variants = Variants::Single { index };
+
+ let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
+ bug!();
+ };
+
+ // Now, stitch the promoted and variant-only fields back together in
+ // the order they are mentioned by our GeneratorLayout.
+ // Because we only use some subset (that can differ between variants)
+ // of the promoted fields, we can't just pick those elements of the
+ // `promoted_memory_index` (as we'd end up with gaps).
+ // So instead, we build an "inverse memory_index", as if all of the
+ // promoted fields were being used, but leave the elements not in the
+ // subset as `INVALID_FIELD_IDX`, which we can filter out later to
+ // obtain a valid (bijective) mapping.
+ const INVALID_FIELD_IDX: u32 = !0;
+ let mut combined_inverse_memory_index =
+ vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
+ let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
+ let combined_offsets = variant_fields
+ .iter()
+ .enumerate()
+ .map(|(i, local)| {
+ let (offset, memory_index) = match assignments[*local] {
+ Unassigned => bug!(),
+ Assigned(_) => {
+ let (offset, memory_index) =
+ offsets_and_memory_index.next().unwrap();
+ (offset, promoted_memory_index.len() as u32 + memory_index)
+ }
+ Ineligible(field_idx) => {
+ let field_idx = field_idx.unwrap() as usize;
+ (promoted_offsets[field_idx], promoted_memory_index[field_idx])
+ }
+ };
+ combined_inverse_memory_index[memory_index as usize] = i as u32;
+ offset
+ })
+ .collect();
+
+ // Remove the unused slots and invert the mapping to obtain the
+ // combined `memory_index` (also see previous comment).
+ combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
+ let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
+
+ variant.fields = FieldsShape::Arbitrary {
+ offsets: combined_offsets,
+ memory_index: combined_memory_index,
+ };
+
+ size = size.max(variant.size);
+ align = align.max(variant.align);
+ Ok(tcx.intern_layout(variant))
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ size = size.align_to(align.abi);
+
+ let abi =
+ if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let layout = tcx.intern_layout(LayoutS {
+ variants: Variants::Multiple {
+ tag,
+ tag_encoding: TagEncoding::Direct,
+ tag_field: tag_index,
+ variants,
+ },
+ fields: outer_fields,
+ abi,
+ largest_niche: prefix.largest_niche,
+ size,
+ align,
+ });
+ debug!("generator layout ({:?}): {:#?}", ty, layout);
+ Ok(layout)
+ }
+
+ /// This is invoked by the `layout_of` query to record the final
+ /// layout of each type.
+ #[inline(always)]
+ fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
+ // If we are running with `-Zprint-type-sizes`, maybe record layouts
+ // for dumping later.
+ if self.tcx.sess.opts.unstable_opts.print_type_sizes {
+ self.record_layout_for_printing_outlined(layout)
+ }
+ }
+
+ fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
+ // Ignore layouts that are done with non-empty environments or
+ // non-monomorphic layouts, as the user only wants to see the stuff
+ // resulting from the final codegen session.
+ if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
+ return;
+ }
+
+ // (delay format until we actually need it)
+ let record = |kind, packed, opt_discr_size, variants| {
+ let type_desc = format!("{:?}", layout.ty);
+ self.tcx.sess.code_stats.record_type_size(
+ kind,
+ type_desc,
+ layout.align.abi,
+ layout.size,
+ packed,
+ opt_discr_size,
+ variants,
+ );
+ };
+
+ let adt_def = match *layout.ty.kind() {
+ ty::Adt(ref adt_def, _) => {
+ debug!("print-type-size t: `{:?}` process adt", layout.ty);
+ adt_def
+ }
+
+ ty::Closure(..) => {
+ debug!("print-type-size t: `{:?}` record closure", layout.ty);
+ record(DataTypeKind::Closure, false, None, vec![]);
+ return;
+ }
+
+ _ => {
+ debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
+ return;
+ }
+ };
+
+ let adt_kind = adt_def.adt_kind();
+ let adt_packed = adt_def.repr().pack.is_some();
+
+ let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
+ let mut min_size = Size::ZERO;
+ let field_info: Vec<_> = flds
+ .iter()
+ .enumerate()
+ .map(|(i, &name)| {
+ let field_layout = layout.field(self, i);
+ let offset = layout.fields.offset(i);
+ let field_end = offset + field_layout.size;
+ if min_size < field_end {
+ min_size = field_end;
+ }
+ FieldInfo {
+ name,
+ offset: offset.bytes(),
+ size: field_layout.size.bytes(),
+ align: field_layout.align.abi.bytes(),
+ }
+ })
+ .collect();
+
+ VariantInfo {
+ name: n,
+ kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
+ align: layout.align.abi.bytes(),
+ size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
+ fields: field_info,
+ }
+ };
+
+ match layout.variants {
+ Variants::Single { index } => {
+ if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
+ debug!(
+ "print-type-size `{:#?}` variant {}",
+ layout,
+ adt_def.variant(index).name
+ );
+ let variant_def = &adt_def.variant(index);
+ let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
+ record(
+ adt_kind.into(),
+ adt_packed,
+ None,
+ vec![build_variant_info(Some(variant_def.name), &fields, layout)],
+ );
+ } else {
+ // (This case arises for *empty* enums; so give it
+ // zero variants.)
+ record(adt_kind.into(), adt_packed, None, vec![]);
+ }
+ }
+
+ Variants::Multiple { tag, ref tag_encoding, .. } => {
+ debug!(
+ "print-type-size `{:#?}` adt general variants def {}",
+ layout.ty,
+ adt_def.variants().len()
+ );
+ let variant_infos: Vec<_> = adt_def
+ .variants()
+ .iter_enumerated()
+ .map(|(i, variant_def)| {
+ let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
+ build_variant_info(
+ Some(variant_def.name),
+ &fields,
+ layout.for_variant(self, i),
+ )
+ })
+ .collect();
+ record(
+ adt_kind.into(),
+ adt_packed,
+ match tag_encoding {
+ TagEncoding::Direct => Some(tag.size(self)),
+ _ => None,
+ },
+ variant_infos,
+ );
+ }
+ }
+ }
+}
+
+/// Type size "skeleton", i.e., the only information determining a type's size.
+/// While this is conservative, (aside from constant sizes, only pointers,
+/// newtypes thereof and null pointer optimized enums are allowed), it is
+/// enough to statically check common use cases of transmute.
+#[derive(Copy, Clone, Debug)]
+pub enum SizeSkeleton<'tcx> {
+ /// Any statically computable Layout.
+ Known(Size),
+
+ /// A potentially-fat pointer.
+ Pointer {
+ /// If true, this pointer is never null.
+ non_zero: bool,
+ /// The type which determines the unsized metadata, if any,
+ /// of this pointer. Either a type parameter or a projection
+ /// depending on one, with regions erased.
+ tail: Ty<'tcx>,
+ },
+}
+
+impl<'tcx> SizeSkeleton<'tcx> {
+ pub fn compute(
+ ty: Ty<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
+ debug_assert!(!ty.has_infer_types_or_consts());
+
+ // First try computing a static layout.
+ let err = match tcx.layout_of(param_env.and(ty)) {
+ Ok(layout) => {
+ return Ok(SizeSkeleton::Known(layout.size));
+ }
+ Err(err) => err,
+ };
+
+ match *ty.kind() {
+ ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ let non_zero = !ty.is_unsafe_ptr();
+ let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
+ match tail.kind() {
+ ty::Param(_) | ty::Projection(_) => {
+ debug_assert!(tail.has_param_types_or_consts());
+ Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
+ }
+ _ => bug!(
+ "SizeSkeleton::compute({}): layout errored ({}), yet \
+ tail `{}` is not a type parameter or a projection",
+ ty,
+ err,
+ tail
+ ),
+ }
+ }
+
+ ty::Adt(def, substs) => {
+ // Only newtypes and enums w/ nullable pointer optimization.
+ if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
+ return Err(err);
+ }
+
+ // Get a zero-sized variant or a pointer newtype.
+ let zero_or_ptr_variant = |i| {
+ let i = VariantIdx::new(i);
+ let fields =
+ def.variant(i).fields.iter().map(|field| {
+ SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
+ });
+ let mut ptr = None;
+ for field in fields {
+ let field = field?;
+ match field {
+ SizeSkeleton::Known(size) => {
+ if size.bytes() > 0 {
+ return Err(err);
+ }
+ }
+ SizeSkeleton::Pointer { .. } => {
+ if ptr.is_some() {
+ return Err(err);
+ }
+ ptr = Some(field);
+ }
+ }
+ }
+ Ok(ptr)
+ };
+
+ let v0 = zero_or_ptr_variant(0)?;
+ // Newtype.
+ if def.variants().len() == 1 {
+ if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
+ return Ok(SizeSkeleton::Pointer {
+ non_zero: non_zero
+ || match tcx.layout_scalar_valid_range(def.did()) {
+ (Bound::Included(start), Bound::Unbounded) => start > 0,
+ (Bound::Included(start), Bound::Included(end)) => {
+ 0 < start && start < end
+ }
+ _ => false,
+ },
+ tail,
+ });
+ } else {
+ return Err(err);
+ }
+ }
+
+ let v1 = zero_or_ptr_variant(1)?;
+ // Nullable pointer enum optimization.
+ match (v0, v1) {
+ (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
+ | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
+ Ok(SizeSkeleton::Pointer { non_zero: false, tail })
+ }
+ _ => Err(err),
+ }
+ }
+
+ ty::Projection(_) | ty::Opaque(..) => {
+ let normalized = tcx.normalize_erasing_regions(param_env, ty);
+ if ty == normalized {
+ Err(err)
+ } else {
+ SizeSkeleton::compute(normalized, tcx, param_env)
+ }
+ }
+
+ _ => Err(err),
+ }
+ }
+
+ pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
+ match (self, other) {
+ (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
+ (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
+ a == b
+ }
+ _ => false,
+ }
+ }
+}
+
+pub trait HasTyCtxt<'tcx>: HasDataLayout {
+ fn tcx(&self) -> TyCtxt<'tcx>;
+}
+
+pub trait HasParamEnv<'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx>;
+}
+
+impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.data_layout
+ }
+}
+
+impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.sess.target
+ }
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ *self
+ }
+}
+
+impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.data_layout
+ }
+}
+
+impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.sess.target
+ }
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ **self
+ }
+}
+
+impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ self.tcx.data_layout()
+ }
+}
+
+impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
+ fn target_spec(&self) -> &Target {
+ self.tcx.target_spec()
+ }
+}
+
+impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx.tcx()
+ }
+}
+
+pub trait MaybeResult<T> {
+ type Error;
+
+ fn from(x: Result<T, Self::Error>) -> Self;
+ fn to_result(self) -> Result<T, Self::Error>;
+}
+
+impl<T> MaybeResult<T> for T {
+ type Error = !;
+
+ fn from(Ok(x): Result<T, Self::Error>) -> Self {
+ x
+ }
+ fn to_result(self) -> Result<T, Self::Error> {
+ Ok(self)
+ }
+}
+
+impl<T, E> MaybeResult<T> for Result<T, E> {
+ type Error = E;
+
+ fn from(x: Result<T, Self::Error>) -> Self {
+ x
+ }
+ fn to_result(self) -> Result<T, Self::Error> {
+ self
+ }
+}
+
+pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
+
+/// Trait for contexts that want to be able to compute layouts of types.
+/// This automatically gives access to `LayoutOf`, through a blanket `impl`.
+pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
+ /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
+ /// returned from `layout_of` (see also `handle_layout_err`).
+ type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
+
+ /// `Span` to use for `tcx.at(span)`, from `layout_of`.
+ // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
+ #[inline]
+ fn layout_tcx_at_span(&self) -> Span {
+ DUMMY_SP
+ }
+
+ /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
+ /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
+ ///
+ /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
+ /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
+ /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
+ /// (and any `LayoutError`s are turned into fatal errors or ICEs).
+ fn handle_layout_err(
+ &self,
+ err: LayoutError<'tcx>,
+ span: Span,
+ ty: Ty<'tcx>,
+ ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
+}
+
+/// Blanket extension trait for contexts that can compute layouts of types.
+pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
+ /// Computes the layout of a type. Note that this implicitly
+ /// executes in "reveal all" mode, and will normalize the input type.
+ #[inline]
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
+ self.spanned_layout_of(ty, DUMMY_SP)
+ }
+
+ /// Computes the layout of a type, at `span`. Note that this implicitly
+ /// executes in "reveal all" mode, and will normalize the input type.
+ // FIXME(eddyb) avoid passing information like this, and instead add more
+ // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
+ #[inline]
+ fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
+ let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
+ let tcx = self.tcx().at(span);
+
+ MaybeResult::from(
+ tcx.layout_of(self.param_env().and(ty))
+ .map_err(|err| self.handle_layout_err(err, span, ty)),
+ )
+ }
+}
+
+impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
+ type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
+ err
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
+ type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+ #[inline]
+ fn layout_tcx_at_span(&self) -> Span {
+ self.tcx.span
+ }
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
+ err
+ }
+}
+
+impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
+where
+ C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
+{
+ fn ty_and_layout_for_variant(
+ this: TyAndLayout<'tcx>,
+ cx: &C,
+ variant_index: VariantIdx,
+ ) -> TyAndLayout<'tcx> {
+ let layout = match this.variants {
+ Variants::Single { index }
+ // If all variants but one are uninhabited, the variant layout is the enum layout.
+ if index == variant_index &&
+ // Don't confuse variants of uninhabited enums with the enum itself.
+ // For more details see https://github.com/rust-lang/rust/issues/69763.
+ this.fields != FieldsShape::Primitive =>
+ {
+ this.layout
+ }
+
+ Variants::Single { index } => {
+ let tcx = cx.tcx();
+ let param_env = cx.param_env();
+
+ // Deny calling for_variant more than once for non-Single enums.
+ if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
+ assert_eq!(original_layout.variants, Variants::Single { index });
+ }
+
+ let fields = match this.ty.kind() {
+ ty::Adt(def, _) if def.variants().is_empty() =>
+ bug!("for_variant called on zero-variant enum"),
+ ty::Adt(def, _) => def.variant(variant_index).fields.len(),
+ _ => bug!(),
+ };
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: variant_index },
+ fields: match NonZeroUsize::new(fields) {
+ Some(fields) => FieldsShape::Union(fields),
+ None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
+ },
+ abi: Abi::Uninhabited,
+ largest_niche: None,
+ align: tcx.data_layout.i8_align,
+ size: Size::ZERO,
+ })
+ }
+
+ Variants::Multiple { ref variants, .. } => variants[variant_index],
+ };
+
+ assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
+
+ TyAndLayout { ty: this.ty, layout }
+ }
+
+ fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
+ enum TyMaybeWithLayout<'tcx> {
+ Ty(Ty<'tcx>),
+ TyAndLayout(TyAndLayout<'tcx>),
+ }
+
+ fn field_ty_or_layout<'tcx>(
+ this: TyAndLayout<'tcx>,
+ cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
+ i: usize,
+ ) -> TyMaybeWithLayout<'tcx> {
+ let tcx = cx.tcx();
+ let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
+ TyAndLayout {
+ layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
+ ty: tag.primitive().to_ty(tcx),
+ }
+ };
+
+ match *this.ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::FnPtr(_)
+ | ty::Never
+ | ty::FnDef(..)
+ | ty::GeneratorWitness(..)
+ | ty::Foreign(..)
+ | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
+
+ // Potentially-fat pointers.
+ ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ assert!(i < this.fields.count());
+
+ // Reuse the fat `*T` type as its own thin pointer data field.
+ // This provides information about, e.g., DST struct pointees
+ // (which may have no non-DST form), and will work as long
+ // as the `Abi` or `FieldsShape` is checked by users.
+ if i == 0 {
+ let nil = tcx.mk_unit();
+ let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
+ tcx.mk_mut_ptr(nil)
+ } else {
+ tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
+ };
+
+ // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
+ // the `Result` should always work because the type is
+ // always either `*mut ()` or `&'static mut ()`.
+ return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
+ ty: this.ty,
+ ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
+ });
+ }
+
+ match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
+ ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
+ ty::Dynamic(_, _) => {
+ TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
+ tcx.lifetimes.re_static,
+ tcx.mk_array(tcx.types.usize, 3),
+ ))
+ /* FIXME: use actual fn pointers
+ Warning: naively computing the number of entries in the
+ vtable by counting the methods on the trait + methods on
+ all parent traits does not work, because some methods can
+ be not object safe and thus excluded from the vtable.
+ Increase this counter if you tried to implement this but
+ failed to do it without duplicating a lot of code from
+ other places in the compiler: 2
+ tcx.mk_tup(&[
+ tcx.mk_array(tcx.types.usize, 3),
+ tcx.mk_array(Option<fn()>),
+ ])
+ */
+ }
+ _ => bug!("TyAndLayout::field({:?}): not applicable", this),
+ }
+ }
+
+ // Arrays and slices.
+ ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
+ ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
+
+ // Tuples, generators and closures.
+ ty::Closure(_, ref substs) => field_ty_or_layout(
+ TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
+ cx,
+ i,
+ ),
+
+ ty::Generator(def_id, ref substs, _) => match this.variants {
+ Variants::Single { index } => TyMaybeWithLayout::Ty(
+ substs
+ .as_generator()
+ .state_tys(def_id, tcx)
+ .nth(index.as_usize())
+ .unwrap()
+ .nth(i)
+ .unwrap(),
+ ),
+ Variants::Multiple { tag, tag_field, .. } => {
+ if i == tag_field {
+ return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
+ }
+ TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
+ }
+ },
+
+ ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
+
+ // ADTs.
+ ty::Adt(def, substs) => {
+ match this.variants {
+ Variants::Single { index } => {
+ TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
+ }
+
+ // Discriminant field for enums (where applicable).
+ Variants::Multiple { tag, .. } => {
+ assert_eq!(i, 0);
+ return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
+ }
+ }
+ }
+
+ ty::Projection(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Opaque(..)
+ | ty::Param(_)
+ | ty::Infer(_)
+ | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
+ }
+ }
+
+ match field_ty_or_layout(this, cx, i) {
+ TyMaybeWithLayout::Ty(field_ty) => {
+ cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
+ bug!(
+ "failed to get layout for `{}`: {},\n\
+ despite it being a field (#{}) of an existing layout: {:#?}",
+ field_ty,
+ e,
+ i,
+ this
+ )
+ })
+ }
+ TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
+ }
+ }
+
+ fn ty_and_layout_pointee_info_at(
+ this: TyAndLayout<'tcx>,
+ cx: &C,
+ offset: Size,
+ ) -> Option<PointeeInfo> {
+ let tcx = cx.tcx();
+ let param_env = cx.param_env();
+
+ let addr_space_of_ty = |ty: Ty<'tcx>| {
+ if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
+ };
+
+ let pointee_info = match *this.ty.kind() {
+ ty::RawPtr(mt) if offset.bytes() == 0 => {
+ tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
+ size: layout.size,
+ align: layout.align.abi,
+ safe: None,
+ address_space: addr_space_of_ty(mt.ty),
+ })
+ }
+ ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
+ tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
+ size: layout.size,
+ align: layout.align.abi,
+ safe: None,
+ address_space: cx.data_layout().instruction_address_space,
+ })
+ }
+ ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
+ let address_space = addr_space_of_ty(ty);
+ let kind = if tcx.sess.opts.optimize == OptLevel::No {
+ // Use conservative pointer kind if not optimizing. This saves us the
+ // Freeze/Unpin queries, and can save time in the codegen backend (noalias
+ // attributes in LLVM have compile-time cost even in unoptimized builds).
+ PointerKind::SharedMutable
+ } else {
+ match mt {
+ hir::Mutability::Not => {
+ if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
+ PointerKind::Frozen
+ } else {
+ PointerKind::SharedMutable
+ }
+ }
+ hir::Mutability::Mut => {
+ // References to self-referential structures should not be considered
+ // noalias, as another pointer to the structure can be obtained, that
+ // is not based-on the original reference. We consider all !Unpin
+ // types to be potentially self-referential here.
+ if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
+ PointerKind::UniqueBorrowed
+ } else {
+ PointerKind::UniqueBorrowedPinned
+ }
+ }
+ }
+ };
+
+ tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
+ size: layout.size,
+ align: layout.align.abi,
+ safe: Some(kind),
+ address_space,
+ })
+ }
+
+ _ => {
+ let mut data_variant = match this.variants {
+ // Within the discriminant field, only the niche itself is
+ // always initialized, so we only check for a pointer at its
+ // offset.
+ //
+ // If the niche is a pointer, it's either valid (according
+ // to its type), or null (which the niche field's scalar
+ // validity range encodes). This allows using
+ // `dereferenceable_or_null` for e.g., `Option<&T>`, and
+ // this will continue to work as long as we don't start
+ // using more niches than just null (e.g., the first page of
+ // the address space, or unaligned pointers).
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ tag_field,
+ ..
+ } if this.fields.offset(tag_field) == offset => {
+ Some(this.for_variant(cx, dataful_variant))
+ }
+ _ => Some(this),
+ };
+
+ if let Some(variant) = data_variant {
+ // We're not interested in any unions.
+ if let FieldsShape::Union(_) = variant.fields {
+ data_variant = None;
+ }
+ }
+
+ let mut result = None;
+
+ if let Some(variant) = data_variant {
+ let ptr_end = offset + Pointer.size(cx);
+ for i in 0..variant.fields.count() {
+ let field_start = variant.fields.offset(i);
+ if field_start <= offset {
+ let field = variant.field(cx, i);
+ result = field.to_result().ok().and_then(|field| {
+ if ptr_end <= field_start + field.size {
+ // We found the right field, look inside it.
+ let field_info =
+ field.pointee_info_at(cx, offset - field_start);
+ field_info
+ } else {
+ None
+ }
+ });
+ if result.is_some() {
+ break;
+ }
+ }
+ }
+ }
+
+ // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
+ if let Some(ref mut pointee) = result {
+ if let ty::Adt(def, _) = this.ty.kind() {
+ if def.is_box() && offset.bytes() == 0 {
+ pointee.safe = Some(PointerKind::UniqueOwned);
+ }
+ }
+ }
+
+ result
+ }
+ };
+
+ debug!(
+ "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
+ offset,
+ this.ty.kind(),
+ pointee_info
+ );
+
+ pointee_info
+ }
+
+ fn is_adt(this: TyAndLayout<'tcx>) -> bool {
+ matches!(this.ty.kind(), ty::Adt(..))
+ }
+
+ fn is_never(this: TyAndLayout<'tcx>) -> bool {
+ this.ty.kind() == &ty::Never
+ }
+
+ fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
+ matches!(this.ty.kind(), ty::Tuple(..))
+ }
+
+ fn is_unit(this: TyAndLayout<'tcx>) -> bool {
+ matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
+ }
+}
+
+impl<'tcx> ty::Instance<'tcx> {
+ // NOTE(eddyb) this is private to avoid using it from outside of
+ // `fn_abi_of_instance` - any other uses are either too high-level
+ // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
+ // or should go through `FnAbi` instead, to avoid losing any
+ // adjustments `fn_abi_of_instance` might be performing.
+ fn fn_sig_for_fn_abi(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> ty::PolyFnSig<'tcx> {
+ let ty = self.ty(tcx, param_env);
+ match *ty.kind() {
+ ty::FnDef(..) => {
+ // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
+ // parameters unused if they show up in the signature, but not in the `mir::Body`
+ // (i.e. due to being inside a projection that got normalized, see
+ // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
+ // track of a polymorphization `ParamEnv` to allow normalizing later.
+ let mut sig = match *ty.kind() {
+ ty::FnDef(def_id, substs) => tcx
+ .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
+ .subst(tcx, substs),
+ _ => unreachable!(),
+ };
+
+ if let ty::InstanceDef::VTableShim(..) = self.def {
+ // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
+ sig = sig.map_bound(|mut sig| {
+ let mut inputs_and_output = sig.inputs_and_output.to_vec();
+ inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
+ sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+ sig
+ });
+ }
+ sig
+ }
+ ty::Closure(def_id, substs) => {
+ let sig = substs.as_closure().sig();
+
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ sig.bound_vars()
+ .iter()
+ .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
+ );
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind: ty::BoundRegionKind::BrEnv,
+ };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
+
+ let sig = sig.skip_binder();
+ ty::Binder::bind_with_vars(
+ tcx.mk_fn_sig(
+ iter::once(env_ty).chain(sig.inputs().iter().cloned()),
+ sig.output(),
+ sig.c_variadic,
+ sig.unsafety,
+ sig.abi,
+ ),
+ bound_vars,
+ )
+ }
+ ty::Generator(_, substs, _) => {
+ let sig = substs.as_generator().poly_sig();
+
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ sig.bound_vars()
+ .iter()
+ .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
+ );
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind: ty::BoundRegionKind::BrEnv,
+ };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
+
+ let pin_did = tcx.require_lang_item(LangItem::Pin, None);
+ let pin_adt_ref = tcx.adt_def(pin_did);
+ let pin_substs = tcx.intern_substs(&[env_ty.into()]);
+ let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
+
+ let sig = sig.skip_binder();
+ let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
+ let state_adt_ref = tcx.adt_def(state_did);
+ let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
+ let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
+ ty::Binder::bind_with_vars(
+ tcx.mk_fn_sig(
+ [env_ty, sig.resume_ty].iter(),
+ &ret_ty,
+ false,
+ hir::Unsafety::Normal,
+ rustc_target::spec::abi::Abi::Rust,
+ ),
+ bound_vars,
+ )
+ }
+ _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
+ }
+ }
+}
+
+/// Calculates whether a function's ABI can unwind or not.
+///
+/// This takes two primary parameters:
+///
+/// * `codegen_fn_attr_flags` - these are flags calculated as part of the
+/// codegen attrs for a defined function. For function pointers this set of
+/// flags is the empty set. This is only applicable for Rust-defined
+/// functions, and generally isn't needed except for small optimizations where
+/// we try to say a function which otherwise might look like it could unwind
+/// doesn't actually unwind (such as for intrinsics and such).
+///
+/// * `abi` - this is the ABI that the function is defined with. This is the
+/// primary factor for determining whether a function can unwind or not.
+///
+/// Note that in this case unwinding is not necessarily panicking in Rust. Rust
+/// panics are implemented with unwinds on most platform (when
+/// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
+/// Notably unwinding is disallowed for more non-Rust ABIs unless it's
+/// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
+/// defined for each ABI individually, but it always corresponds to some form of
+/// stack-based unwinding (the exact mechanism of which varies
+/// platform-by-platform).
+///
+/// Rust functions are classified whether or not they can unwind based on the
+/// active "panic strategy". In other words Rust functions are considered to
+/// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
+/// Note that Rust supports intermingling panic=abort and panic=unwind code, but
+/// only if the final panic mode is panic=abort. In this scenario any code
+/// previously compiled assuming that a function can unwind is still correct, it
+/// just never happens to actually unwind at runtime.
+///
+/// This function's answer to whether or not a function can unwind is quite
+/// impactful throughout the compiler. This affects things like:
+///
+/// * Calling a function which can't unwind means codegen simply ignores any
+/// associated unwinding cleanup.
+/// * Calling a function which can unwind from a function which can't unwind
+/// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
+/// aborts the process.
+/// * This affects whether functions have the LLVM `nounwind` attribute, which
+/// affects various optimizations and codegen.
+///
+/// FIXME: this is actually buggy with respect to Rust functions. Rust functions
+/// compiled with `-Cpanic=unwind` and referenced from another crate compiled
+/// with `-Cpanic=abort` will look like they can't unwind when in fact they
+/// might (from a foreign exception or similar).
+#[inline]
+pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
+ if let Some(did) = fn_def_id {
+ // Special attribute for functions which can't unwind.
+ if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
+ return false;
+ }
+
+ // With `-C panic=abort`, all non-FFI functions are required to not unwind.
+ //
+ // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
+ // function defined in Rust is also required to abort.
+ if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
+ return false;
+ }
+
+ // With -Z panic-in-drop=abort, drop_in_place never unwinds.
+ //
+ // This is not part of `codegen_fn_attrs` as it can differ between crates
+ // and therefore cannot be computed in core.
+ if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
+ if Some(did) == tcx.lang_items().drop_in_place_fn() {
+ return false;
+ }
+ }
+ }
+
+ // Otherwise if this isn't special then unwinding is generally determined by
+ // the ABI of the itself. ABIs like `C` have variants which also
+ // specifically allow unwinding (`C-unwind`), but not all platform-specific
+ // ABIs have such an option. Otherwise the only other thing here is Rust
+ // itself, and those ABIs are determined by the panic strategy configured
+ // for this compilation.
+ //
+ // Unfortunately at this time there's also another caveat. Rust [RFC
+ // 2945][rfc] has been accepted and is in the process of being implemented
+ // and stabilized. In this interim state we need to deal with historical
+ // rustc behavior as well as plan for future rustc behavior.
+ //
+ // Historically functions declared with `extern "C"` were marked at the
+ // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
+ // or not. This is UB for functions in `panic=unwind` mode that then
+ // actually panic and unwind. Note that this behavior is true for both
+ // externally declared functions as well as Rust-defined function.
+ //
+ // To fix this UB rustc would like to change in the future to catch unwinds
+ // from function calls that may unwind within a Rust-defined `extern "C"`
+ // function and forcibly abort the process, thereby respecting the
+ // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
+ // ready to roll out, so determining whether or not the `C` family of ABIs
+ // unwinds is conditional not only on their definition but also whether the
+ // `#![feature(c_unwind)]` feature gate is active.
+ //
+ // Note that this means that unlike historical compilers rustc now, by
+ // default, unconditionally thinks that the `C` ABI may unwind. This will
+ // prevent some optimization opportunities, however, so we try to scope this
+ // change and only assume that `C` unwinds with `panic=unwind` (as opposed
+ // to `panic=abort`).
+ //
+ // Eventually the check against `c_unwind` here will ideally get removed and
+ // this'll be a little cleaner as it'll be a straightforward check of the
+ // ABI.
+ //
+ // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
+ use SpecAbi::*;
+ match abi {
+ C { unwind }
+ | System { unwind }
+ | Cdecl { unwind }
+ | Stdcall { unwind }
+ | Fastcall { unwind }
+ | Vectorcall { unwind }
+ | Thiscall { unwind }
+ | Aapcs { unwind }
+ | Win64 { unwind }
+ | SysV64 { unwind } => {
+ unwind
+ || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
+ }
+ PtxKernel
+ | Msp430Interrupt
+ | X86Interrupt
+ | AmdGpuKernel
+ | EfiApi
+ | AvrInterrupt
+ | AvrNonBlockingInterrupt
+ | CCmseNonSecureCall
+ | Wasm
+ | RustIntrinsic
+ | PlatformIntrinsic
+ | Unadjusted => false,
+ Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
+ }
+}
+
+#[inline]
+pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
+ use rustc_target::spec::abi::Abi::*;
+ match tcx.sess.target.adjust_abi(abi) {
+ RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
+ RustCold => Conv::RustCold,
+
+ // It's the ABI's job to select this, not ours.
+ System { .. } => bug!("system abi should be selected elsewhere"),
+ EfiApi => bug!("eficall abi should be selected elsewhere"),
+
+ Stdcall { .. } => Conv::X86Stdcall,
+ Fastcall { .. } => Conv::X86Fastcall,
+ Vectorcall { .. } => Conv::X86VectorCall,
+ Thiscall { .. } => Conv::X86ThisCall,
+ C { .. } => Conv::C,
+ Unadjusted => Conv::C,
+ Win64 { .. } => Conv::X86_64Win64,
+ SysV64 { .. } => Conv::X86_64SysV,
+ Aapcs { .. } => Conv::ArmAapcs,
+ CCmseNonSecureCall => Conv::CCmseNonSecureCall,
+ PtxKernel => Conv::PtxKernel,
+ Msp430Interrupt => Conv::Msp430Intr,
+ X86Interrupt => Conv::X86Intr,
+ AmdGpuKernel => Conv::AmdGpuKernel,
+ AvrInterrupt => Conv::AvrInterrupt,
+ AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
+ Wasm => Conv::C,
+
+ // These API constants ought to be more specific...
+ Cdecl { .. } => Conv::C,
+ }
+}
+
+/// Error produced by attempting to compute or adjust a `FnAbi`.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum FnAbiError<'tcx> {
+ /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
+ Layout(LayoutError<'tcx>),
+
+ /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
+ AdjustForForeignAbi(call::AdjustForForeignAbiError),
+}
+
+impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
+ fn from(err: LayoutError<'tcx>) -> Self {
+ Self::Layout(err)
+ }
+}
+
+impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
+ fn from(err: call::AdjustForForeignAbiError) -> Self {
+ Self::AdjustForForeignAbi(err)
+ }
+}
+
+impl<'tcx> fmt::Display for FnAbiError<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Layout(err) => err.fmt(f),
+ Self::AdjustForForeignAbi(err) => err.fmt(f),
+ }
+ }
+}
+
+// FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
+// just for error handling.
+#[derive(Debug)]
+pub enum FnAbiRequest<'tcx> {
+ OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
+ OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
+}
+
+/// Trait for contexts that want to be able to compute `FnAbi`s.
+/// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
+pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
+ /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
+ /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
+ type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
+
+ /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
+ /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
+ ///
+ /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
+ /// but this hook allows e.g. codegen to return only `&FnAbi` from its
+ /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
+ /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
+}
+
+/// Blanket extension trait for contexts that can compute `FnAbi`s.
+pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
+ /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
+ ///
+ /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
+ /// instead, where the instance is an `InstanceDef::Virtual`.
+ #[inline]
+ fn fn_abi_of_fn_ptr(
+ &self,
+ sig: ty::PolyFnSig<'tcx>,
+ extra_args: &'tcx ty::List<Ty<'tcx>>,
+ ) -> Self::FnAbiOfResult {
+ // FIXME(eddyb) get a better `span` here.
+ let span = self.layout_tcx_at_span();
+ let tcx = self.tcx().at(span);
+
+ MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
+ |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
+ ))
+ }
+
+ /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
+ /// direct calls to an `fn`.
+ ///
+ /// NB: that includes virtual calls, which are represented by "direct calls"
+ /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
+ #[inline]
+ fn fn_abi_of_instance(
+ &self,
+ instance: ty::Instance<'tcx>,
+ extra_args: &'tcx ty::List<Ty<'tcx>>,
+ ) -> Self::FnAbiOfResult {
+ // FIXME(eddyb) get a better `span` here.
+ let span = self.layout_tcx_at_span();
+ let tcx = self.tcx().at(span);
+
+ MaybeResult::from(
+ tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
+ // HACK(eddyb) at least for definitions of/calls to `Instance`s,
+ // we can get some kind of span even if one wasn't provided.
+ // However, we don't do this early in order to avoid calling
+ // `def_span` unconditionally (which may have a perf penalty).
+ let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
+ self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
+ }),
+ )
+ }
+}
+
+impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
+
+fn fn_abi_of_fn_ptr<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
+) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ let (param_env, (sig, extra_args)) = query.into_parts();
+
+ LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
+}
+
+fn fn_abi_of_instance<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
+) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ let (param_env, (instance, extra_args)) = query.into_parts();
+
+ let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
+
+ let caller_location = if instance.def.requires_caller_location(tcx) {
+ Some(tcx.caller_location_ty())
+ } else {
+ None
+ };
+
+ LayoutCx { tcx, param_env }.fn_abi_new_uncached(
+ sig,
+ extra_args,
+ caller_location,
+ Some(instance.def_id()),
+ matches!(instance.def, ty::InstanceDef::Virtual(..)),
+ )
+}
+
+impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
+ // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
+ // arguments of this method, into a separate `struct`.
+ fn fn_abi_new_uncached(
+ &self,
+ sig: ty::PolyFnSig<'tcx>,
+ extra_args: &[Ty<'tcx>],
+ caller_location: Option<Ty<'tcx>>,
+ fn_def_id: Option<DefId>,
+ // FIXME(eddyb) replace this with something typed, like an `enum`.
+ force_thin_self_ptr: bool,
+ ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
+
+ let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
+
+ let conv = conv_from_spec_abi(self.tcx(), sig.abi);
+
+ let mut inputs = sig.inputs();
+ let extra_args = if sig.abi == RustCall {
+ assert!(!sig.c_variadic && extra_args.is_empty());
+
+ if let Some(input) = sig.inputs().last() {
+ if let ty::Tuple(tupled_arguments) = input.kind() {
+ inputs = &sig.inputs()[0..sig.inputs().len() - 1];
+ tupled_arguments
+ } else {
+ bug!(
+ "argument to function with \"rust-call\" ABI \
+ is not a tuple"
+ );
+ }
+ } else {
+ bug!(
+ "argument to function with \"rust-call\" ABI \
+ is not a tuple"
+ );
+ }
+ } else {
+ assert!(sig.c_variadic || extra_args.is_empty());
+ extra_args
+ };
+
+ let target = &self.tcx.sess.target;
+ let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
+ let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
+ let linux_s390x_gnu_like =
+ target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
+ let linux_sparc64_gnu_like =
+ target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
+ let linux_powerpc_gnu_like =
+ target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
+ use SpecAbi::*;
+ let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
+
+ // Handle safe Rust thin and fat pointers.
+ let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
+ scalar: Scalar,
+ layout: TyAndLayout<'tcx>,
+ offset: Size,
+ is_return: bool| {
+ // Booleans are always a noundef i1 that needs to be zero-extended.
+ if scalar.is_bool() {
+ attrs.ext(ArgExtension::Zext);
+ attrs.set(ArgAttribute::NoUndef);
+ return;
+ }
+
+ // Scalars which have invalid values cannot be undef.
+ if !scalar.is_always_valid(self) {
+ attrs.set(ArgAttribute::NoUndef);
+ }
+
+ // Only pointer types handled below.
+ let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
+
+ if !valid_range.contains(0) {
+ attrs.set(ArgAttribute::NonNull);
+ }
+
+ if let Some(pointee) = layout.pointee_info_at(self, offset) {
+ if let Some(kind) = pointee.safe {
+ attrs.pointee_align = Some(pointee.align);
+
+ // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
+ // for the entire duration of the function as they can be deallocated
+ // at any time. Same for shared mutable references. If LLVM had a
+ // way to say "dereferenceable on entry" we could use it here.
+ attrs.pointee_size = match kind {
+ PointerKind::UniqueBorrowed
+ | PointerKind::UniqueBorrowedPinned
+ | PointerKind::Frozen => pointee.size,
+ PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
+ };
+
+ // `Box`, `&T`, and `&mut T` cannot be undef.
+ // Note that this only applies to the value of the pointer itself;
+ // this attribute doesn't make it UB for the pointed-to data to be undef.
+ attrs.set(ArgAttribute::NoUndef);
+
+ // The aliasing rules for `Box<T>` are still not decided, but currently we emit
+ // `noalias` for it. This can be turned off using an unstable flag.
+ // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
+ let noalias_for_box =
+ self.tcx().sess.opts.unstable_opts.box_noalias.unwrap_or(true);
+
+ // `&mut` pointer parameters never alias other parameters,
+ // or mutable global data
+ //
+ // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
+ // and can be marked as both `readonly` and `noalias`, as
+ // LLVM's definition of `noalias` is based solely on memory
+ // dependencies rather than pointer equality
+ //
+ // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
+ // for UniqueBorrowed arguments, so that the codegen backend can decide whether
+ // or not to actually emit the attribute. It can also be controlled with the
+ // `-Zmutable-noalias` debugging option.
+ let no_alias = match kind {
+ PointerKind::SharedMutable
+ | PointerKind::UniqueBorrowed
+ | PointerKind::UniqueBorrowedPinned => false,
+ PointerKind::UniqueOwned => noalias_for_box,
+ PointerKind::Frozen => !is_return,
+ };
+ if no_alias {
+ attrs.set(ArgAttribute::NoAlias);
+ }
+
+ if kind == PointerKind::Frozen && !is_return {
+ attrs.set(ArgAttribute::ReadOnly);
+ }
+
+ if kind == PointerKind::UniqueBorrowed && !is_return {
+ attrs.set(ArgAttribute::NoAliasMutRef);
+ }
+ }
+ }
+ };
+
+ let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
+ let is_return = arg_idx.is_none();
+
+ let layout = self.layout_of(ty)?;
+ let layout = if force_thin_self_ptr && arg_idx == Some(0) {
+ // Don't pass the vtable, it's not an argument of the virtual fn.
+ // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
+ // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
+ make_thin_self_ptr(self, layout)
+ } else {
+ layout
+ };
+
+ let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
+ let mut attrs = ArgAttributes::new();
+ adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
+ attrs
+ });
+
+ if arg.layout.is_zst() {
+ // For some forsaken reason, x86_64-pc-windows-gnu
+ // doesn't ignore zero-sized struct arguments.
+ // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
+ if is_return
+ || rust_abi
+ || (!win_x64_gnu
+ && !linux_s390x_gnu_like
+ && !linux_sparc64_gnu_like
+ && !linux_powerpc_gnu_like)
+ {
+ arg.mode = PassMode::Ignore;
+ }
+ }
+
+ Ok(arg)
+ };
+
+ let mut fn_abi = FnAbi {
+ ret: arg_of(sig.output(), None)?,
+ args: inputs
+ .iter()
+ .copied()
+ .chain(extra_args.iter().copied())
+ .chain(caller_location)
+ .enumerate()
+ .map(|(i, ty)| arg_of(ty, Some(i)))
+ .collect::<Result<_, _>>()?,
+ c_variadic: sig.c_variadic,
+ fixed_count: inputs.len(),
+ conv,
+ can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
+ };
+ self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
+ debug!("fn_abi_new_uncached = {:?}", fn_abi);
+ Ok(self.tcx.arena.alloc(fn_abi))
+ }
+
+ fn fn_abi_adjust_for_abi(
+ &self,
+ fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
+ abi: SpecAbi,
+ ) -> Result<(), FnAbiError<'tcx>> {
+ if abi == SpecAbi::Unadjusted {
+ return Ok(());
+ }
+
+ if abi == SpecAbi::Rust
+ || abi == SpecAbi::RustCall
+ || abi == SpecAbi::RustIntrinsic
+ || abi == SpecAbi::PlatformIntrinsic
+ {
+ let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
+ if arg.is_ignore() {
+ return;
+ }
+
+ match arg.layout.abi {
+ Abi::Aggregate { .. } => {}
+
+ // This is a fun case! The gist of what this is doing is
+ // that we want callers and callees to always agree on the
+ // ABI of how they pass SIMD arguments. If we were to *not*
+ // make these arguments indirect then they'd be immediates
+ // in LLVM, which means that they'd used whatever the
+ // appropriate ABI is for the callee and the caller. That
+ // means, for example, if the caller doesn't have AVX
+ // enabled but the callee does, then passing an AVX argument
+ // across this boundary would cause corrupt data to show up.
+ //
+ // This problem is fixed by unconditionally passing SIMD
+ // arguments through memory between callers and callees
+ // which should get them all to agree on ABI regardless of
+ // target feature sets. Some more information about this
+ // issue can be found in #44367.
+ //
+ // Note that the platform intrinsic ABI is exempt here as
+ // that's how we connect up to LLVM and it's unstable
+ // anyway, we control all calls to it in libstd.
+ Abi::Vector { .. }
+ if abi != SpecAbi::PlatformIntrinsic
+ && self.tcx.sess.target.simd_types_indirect =>
+ {
+ arg.make_indirect();
+ return;
+ }
+
+ _ => return,
+ }
+
+ let size = arg.layout.size;
+ if arg.layout.is_unsized() || size > Pointer.size(self) {
+ arg.make_indirect();
+ } else {
+ // We want to pass small aggregates as immediates, but using
+ // a LLVM aggregate type for this leads to bad optimizations,
+ // so we pick an appropriately sized integer type instead.
+ arg.cast_to(Reg { kind: RegKind::Integer, size });
+ }
+ };
+ fixup(&mut fn_abi.ret);
+ for arg in &mut fn_abi.args {
+ fixup(arg);
+ }
+ } else {
+ fn_abi.adjust_for_foreign_abi(self, abi)?;
+ }
+
+ Ok(())
+ }
+}
+
+fn make_thin_self_ptr<'tcx>(
+ cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
+ layout: TyAndLayout<'tcx>,
+) -> TyAndLayout<'tcx> {
+ let tcx = cx.tcx();
+ let fat_pointer_ty = if layout.is_unsized() {
+ // unsized `self` is passed as a pointer to `self`
+ // FIXME (mikeyhew) change this to use &own if it is ever added to the language
+ tcx.mk_mut_ptr(layout.ty)
+ } else {
+ match layout.abi {
+ Abi::ScalarPair(..) => (),
+ _ => bug!("receiver type has unsupported layout: {:?}", layout),
+ }
+
+ // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
+ // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
+ // elsewhere in the compiler as a method on a `dyn Trait`.
+ // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
+ // get a built-in pointer type
+ let mut fat_pointer_layout = layout;
+ 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
+ && !fat_pointer_layout.ty.is_region_ptr()
+ {
+ for i in 0..fat_pointer_layout.fields.count() {
+ let field_layout = fat_pointer_layout.field(cx, i);
+
+ if !field_layout.is_zst() {
+ fat_pointer_layout = field_layout;
+ continue 'descend_newtypes;
+ }
+ }
+
+ bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
+ }
+
+ fat_pointer_layout.ty
+ };
+
+ // we now have a type like `*mut RcBox<dyn Trait>`
+ // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
+ // this is understood as a special case elsewhere in the compiler
+ let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
+
+ TyAndLayout {
+ ty: fat_pointer_ty,
+
+ // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
+ // should always work because the type is always `*mut ()`.
+ ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs
new file mode 100644
index 000000000..db3b5cfd1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/list.rs
@@ -0,0 +1,215 @@
+use crate::arena::Arena;
+use rustc_serialize::{Encodable, Encoder};
+use std::alloc::Layout;
+use std::cmp::Ordering;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::mem;
+use std::ops::Deref;
+use std::ptr;
+use std::slice;
+
+/// `List<T>` is a bit like `&[T]`, but with some critical differences.
+/// - IMPORTANT: Every `List<T>` is *required* to have unique contents. The
+/// type's correctness relies on this, *but it does not enforce it*.
+/// Therefore, any code that creates a `List<T>` must ensure uniqueness
+/// itself. In practice this is achieved by interning.
+/// - The length is stored within the `List<T>`, so `&List<Ty>` is a thin
+/// pointer.
+/// - Because of this, you cannot get a `List<T>` that is a sub-list of another
+/// `List<T>`. You can get a sub-slice `&[T]`, however.
+/// - `List<T>` can be used with `CopyTaggedPtr`, which is useful within
+/// structs whose size must be minimized.
+/// - Because of the uniqueness assumption, we can use the address of a
+/// `List<T>` for faster equality comparisons and hashing.
+/// - `T` must be `Copy`. This lets `List<T>` be stored in a dropless arena and
+/// iterators return a `T` rather than a `&T`.
+/// - `T` must not be zero-sized.
+#[repr(C)]
+pub struct List<T> {
+ len: usize,
+
+ /// Although this claims to be a zero-length array, in practice `len`
+ /// elements are actually present.
+ data: [T; 0],
+
+ opaque: OpaqueListContents,
+}
+
+extern "C" {
+ /// A dummy type used to force `List` to be unsized while not requiring
+ /// references to it be wide pointers.
+ type OpaqueListContents;
+}
+
+impl<T> List<T> {
+ /// Returns a reference to the (unique, static) empty list.
+ #[inline(always)]
+ pub fn empty<'a>() -> &'a List<T> {
+ #[repr(align(64))]
+ struct MaxAlign;
+
+ assert!(mem::align_of::<T>() <= mem::align_of::<MaxAlign>());
+
+ #[repr(C)]
+ struct InOrder<T, U>(T, U);
+
+ // The empty slice is static and contains a single `0` usize (for the
+ // length) that is 64-byte aligned, thus featuring the necessary
+ // trailing padding for elements with up to 64-byte alignment.
+ static EMPTY_SLICE: InOrder<usize, MaxAlign> = InOrder(0, MaxAlign);
+ unsafe { &*(&EMPTY_SLICE as *const _ as *const List<T>) }
+ }
+
+ pub fn len(&self) -> usize {
+ self.len
+ }
+}
+
+impl<T: Copy> List<T> {
+ /// Allocates a list from `arena` and copies the contents of `slice` into it.
+ ///
+ /// WARNING: the contents *must be unique*, such that no list with these
+ /// contents has been previously created. If not, operations such as `eq`
+ /// and `hash` might give incorrect results.
+ ///
+ /// Panics if `T` is `Drop`, or `T` is zero-sized, or the slice is empty
+ /// (because the empty list exists statically, and is available via
+ /// `empty()`).
+ #[inline]
+ pub(super) fn from_arena<'tcx>(arena: &'tcx Arena<'tcx>, slice: &[T]) -> &'tcx List<T> {
+ assert!(!mem::needs_drop::<T>());
+ assert!(mem::size_of::<T>() != 0);
+ assert!(!slice.is_empty());
+
+ let (layout, _offset) =
+ Layout::new::<usize>().extend(Layout::for_value::<[T]>(slice)).unwrap();
+ let mem = arena.dropless.alloc_raw(layout) as *mut List<T>;
+ unsafe {
+ // Write the length
+ ptr::addr_of_mut!((*mem).len).write(slice.len());
+
+ // Write the elements
+ ptr::addr_of_mut!((*mem).data)
+ .cast::<T>()
+ .copy_from_nonoverlapping(slice.as_ptr(), slice.len());
+
+ &*mem
+ }
+ }
+
+ // If this method didn't exist, we would use `slice.iter` due to
+ // deref coercion.
+ //
+ // This would be weird, as `self.into_iter` iterates over `T` directly.
+ #[inline(always)]
+ pub fn iter(&self) -> <&'_ List<T> as IntoIterator>::IntoIter {
+ self.into_iter()
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for List<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for List<T> {
+ #[inline]
+ fn encode(&self, s: &mut S) {
+ (**self).encode(s);
+ }
+}
+
+impl<T: PartialEq> PartialEq for List<T> {
+ #[inline]
+ fn eq(&self, other: &List<T>) -> bool {
+ // Pointer equality implies list equality (due to the unique contents
+ // assumption).
+ ptr::eq(self, other)
+ }
+}
+
+impl<T: Eq> Eq for List<T> {}
+
+impl<T> Ord for List<T>
+where
+ T: Ord,
+{
+ fn cmp(&self, other: &List<T>) -> Ordering {
+ // Pointer equality implies list equality (due to the unique contents
+ // assumption), but the contents must be compared otherwise.
+ if self == other { Ordering::Equal } else { <[T] as Ord>::cmp(&**self, &**other) }
+ }
+}
+
+impl<T> PartialOrd for List<T>
+where
+ T: PartialOrd,
+{
+ fn partial_cmp(&self, other: &List<T>) -> Option<Ordering> {
+ // Pointer equality implies list equality (due to the unique contents
+ // assumption), but the contents must be compared otherwise.
+ if self == other {
+ Some(Ordering::Equal)
+ } else {
+ <[T] as PartialOrd>::partial_cmp(&**self, &**other)
+ }
+ }
+}
+
+impl<T> Hash for List<T> {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // Pointer hashing is sufficient (due to the unique contents
+ // assumption).
+ (self as *const List<T>).hash(s)
+ }
+}
+
+impl<T> Deref for List<T> {
+ type Target = [T];
+ #[inline(always)]
+ fn deref(&self) -> &[T] {
+ self.as_ref()
+ }
+}
+
+impl<T> AsRef<[T]> for List<T> {
+ #[inline(always)]
+ fn as_ref(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len) }
+ }
+}
+
+impl<'a, T: Copy> IntoIterator for &'a List<T> {
+ type Item = T;
+ type IntoIter = iter::Copied<<&'a [T] as IntoIterator>::IntoIter>;
+ #[inline(always)]
+ fn into_iter(self) -> Self::IntoIter {
+ self[..].iter().copied()
+ }
+}
+
+unsafe impl<T: Sync> Sync for List<T> {}
+
+unsafe impl<'a, T: 'a> rustc_data_structures::tagged_ptr::Pointer for &'a List<T> {
+ const BITS: usize = std::mem::align_of::<usize>().trailing_zeros() as usize;
+
+ #[inline]
+ fn into_usize(self) -> usize {
+ self as *const List<T> as usize
+ }
+
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> &'a List<T> {
+ &*(ptr as *const List<T>)
+ }
+
+ unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+ // `Self` is `&'a List<T>` which impls `Copy`, so this is fine.
+ let ptr = Self::from_usize(ptr);
+ f(&ptr)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
new file mode 100644
index 000000000..02da02568
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -0,0 +1,2518 @@
+//! Defines how the compiler represents types internally.
+//!
+//! Two important entities in this module are:
+//!
+//! - [`rustc_middle::ty::Ty`], used to represent the semantics of a type.
+//! - [`rustc_middle::ty::TyCtxt`], the central data structure in the compiler.
+//!
+//! For more information, see ["The `ty` module: representing types"] in the rustc-dev-guide.
+//!
+//! ["The `ty` module: representing types"]: https://rustc-dev-guide.rust-lang.org/ty.html
+
+pub use self::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable};
+pub use self::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+pub use self::AssocItemContainer::*;
+pub use self::BorrowKind::*;
+pub use self::IntVarValue::*;
+pub use self::Variance::*;
+use crate::metadata::ModChild;
+use crate::middle::privacy::AccessLevels;
+use crate::mir::{Body, GeneratorLayout};
+use crate::traits::{self, Reveal};
+use crate::ty;
+use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::util::Discr;
+pub use adt::*;
+pub use assoc::*;
+pub use generics::*;
+use rustc_ast as ast;
+use rustc_ast::node_id::NodeMap;
+use rustc_attr as attr;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
+use rustc_data_structures::intern::{Interned, WithStableHash};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::tagged_ptr::CopyTaggedPtr;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, CtorOf, DefKind, LifetimeRes, Res};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LocalDefIdMap};
+use rustc_hir::Node;
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{ExpnId, Span};
+use rustc_target::abi::{Align, VariantIdx};
+pub use subst::*;
+pub use vtable::*;
+
+use std::fmt::Debug;
+use std::hash::{Hash, Hasher};
+use std::ops::ControlFlow;
+use std::{fmt, str};
+
+pub use crate::ty::diagnostics::*;
+pub use rustc_type_ir::InferTy::*;
+pub use rustc_type_ir::RegionKind::*;
+pub use rustc_type_ir::TyKind::*;
+pub use rustc_type_ir::*;
+
+pub use self::binding::BindingMode;
+pub use self::binding::BindingMode::*;
+pub use self::closure::{
+ is_ancestor_or_same_capture, place_to_string_for_capture, BorrowKind, CaptureInfo,
+ CapturedPlace, ClosureKind, MinCaptureInformationMap, MinCaptureList,
+ RootVariableMinCaptureList, UpvarCapture, UpvarCaptureMap, UpvarId, UpvarListMap, UpvarPath,
+ CAPTURE_STRUCT_LOCAL,
+};
+pub use self::consts::{
+ Const, ConstInt, ConstKind, ConstS, InferConst, ScalarInt, Unevaluated, ValTree,
+};
+pub use self::context::{
+ tls, CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations,
+ CtxtInterners, DelaySpanBugEmitted, FreeRegionInfo, GeneratorDiagnosticData,
+ GeneratorInteriorTypeCause, GlobalCtxt, Lift, OnDiskCache, TyCtxt, TypeckResults, UserType,
+ UserTypeAnnotationIndex,
+};
+pub use self::instance::{Instance, InstanceDef};
+pub use self::list::List;
+pub use self::parameterized::ParameterizedOverTcx;
+pub use self::rvalue_scopes::RvalueScopes;
+pub use self::sty::BoundRegionKind::*;
+pub use self::sty::{
+ Article, Binder, BoundRegion, BoundRegionKind, BoundTy, BoundTyKind, BoundVar,
+ BoundVariableKind, CanonicalPolyFnSig, ClosureSubsts, ClosureSubstsParts, ConstVid,
+ EarlyBinder, EarlyBoundRegion, ExistentialPredicate, ExistentialProjection,
+ ExistentialTraitRef, FnSig, FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts,
+ InlineConstSubsts, InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialProjection,
+ PolyExistentialTraitRef, PolyFnSig, PolyGenSig, PolyTraitRef, ProjectionTy, Region, RegionKind,
+ RegionVid, TraitRef, TyKind, TypeAndMut, UpvarSubsts, VarianceDiagInfo,
+};
+pub use self::trait_def::TraitDef;
+
+pub mod _match;
+pub mod abstract_const;
+pub mod adjustment;
+pub mod binding;
+pub mod cast;
+pub mod codec;
+pub mod error;
+pub mod fast_reject;
+pub mod flags;
+pub mod fold;
+pub mod inhabitedness;
+pub mod layout;
+pub mod normalize_erasing_regions;
+pub mod print;
+pub mod query;
+pub mod relate;
+pub mod subst;
+pub mod trait_def;
+pub mod util;
+pub mod visit;
+pub mod vtable;
+pub mod walk;
+
+mod adt;
+mod assoc;
+mod closure;
+mod consts;
+mod context;
+mod diagnostics;
+mod erase_regions;
+mod generics;
+mod impls_ty;
+mod instance;
+mod list;
+mod parameterized;
+mod rvalue_scopes;
+mod structural_impls;
+mod sty;
+
+// Data types
+
+pub type RegisteredTools = FxHashSet<Ident>;
+
+#[derive(Debug)]
+pub struct ResolverOutputs {
+ pub visibilities: FxHashMap<LocalDefId, Visibility>,
+ /// This field is used to decide whether we should make `PRIVATE_IN_PUBLIC` a hard error.
+ pub has_pub_restricted: bool,
+ /// Item with a given `LocalDefId` was defined during macro expansion with ID `ExpnId`.
+ pub expn_that_defined: FxHashMap<LocalDefId, ExpnId>,
+ /// Reference span for definitions.
+ pub source_span: IndexVec<LocalDefId, Span>,
+ pub access_levels: AccessLevels,
+ pub extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
+ pub maybe_unused_trait_imports: FxIndexSet<LocalDefId>,
+ pub maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
+ pub reexport_map: FxHashMap<LocalDefId, Vec<ModChild>>,
+ pub glob_map: FxHashMap<LocalDefId, FxHashSet<Symbol>>,
+ /// Extern prelude entries. The value is `true` if the entry was introduced
+ /// via `extern crate` item and not `--extern` option or compiler built-in.
+ pub extern_prelude: FxHashMap<Symbol, bool>,
+ pub main_def: Option<MainDefinition>,
+ pub trait_impls: FxIndexMap<DefId, Vec<LocalDefId>>,
+ /// A list of proc macro LocalDefIds, written out in the order in which
+ /// they are declared in the static array generated by proc_macro_harness.
+ pub proc_macros: Vec<LocalDefId>,
+ /// Mapping from ident span to path span for paths that don't exist as written, but that
+ /// exist under `std`. For example, wrote `str::from_utf8` instead of `std::str::from_utf8`.
+ pub confused_type_with_std_module: FxHashMap<Span, Span>,
+ pub registered_tools: RegisteredTools,
+}
+
+/// Resolutions that should only be used for lowering.
+/// This struct is meant to be consumed by lowering.
+#[derive(Debug)]
+pub struct ResolverAstLowering {
+ pub legacy_const_generic_args: FxHashMap<DefId, Option<Vec<usize>>>,
+
+ /// Resolutions for nodes that have a single resolution.
+ pub partial_res_map: NodeMap<hir::def::PartialRes>,
+ /// Resolutions for import nodes, which have multiple resolutions in different namespaces.
+ pub import_res_map: NodeMap<hir::def::PerNS<Option<Res<ast::NodeId>>>>,
+ /// Resolutions for labels (node IDs of their corresponding blocks or loops).
+ pub label_res_map: NodeMap<ast::NodeId>,
+ /// Resolutions for lifetimes.
+ pub lifetimes_res_map: NodeMap<LifetimeRes>,
+ /// Mapping from generics `def_id`s to TAIT generics `def_id`s.
+ /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
+ /// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
+ /// field from the original parameter 'a to the new parameter 'a1.
+ pub generics_def_id_map: Vec<FxHashMap<LocalDefId, LocalDefId>>,
+ /// Lifetime parameters that lowering will have to introduce.
+ pub extra_lifetime_params_map: NodeMap<Vec<(Ident, ast::NodeId, LifetimeRes)>>,
+
+ pub next_node_id: ast::NodeId,
+
+ pub node_id_to_def_id: FxHashMap<ast::NodeId, LocalDefId>,
+ pub def_id_to_node_id: IndexVec<LocalDefId, ast::NodeId>,
+
+ pub trait_map: NodeMap<Vec<hir::TraitCandidate>>,
+ /// A small map keeping true kinds of built-in macros that appear to be fn-like on
+ /// the surface (`macro` items in libcore), but are actually attributes or derives.
+ pub builtin_macro_kinds: FxHashMap<LocalDefId, MacroKind>,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct MainDefinition {
+ pub res: Res<ast::NodeId>,
+ pub is_import: bool,
+ pub span: Span,
+}
+
+impl MainDefinition {
+ pub fn opt_fn_def_id(self) -> Option<DefId> {
+ if let Res::Def(DefKind::Fn, def_id) = self.res { Some(def_id) } else { None }
+ }
+}
+
+/// The "header" of an impl is everything outside the body: a Self type, a trait
+/// ref (in the case of a trait impl), and a set of predicates (from the
+/// bounds / where-clauses).
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct ImplHeader<'tcx> {
+ pub impl_def_id: DefId,
+ pub self_ty: Ty<'tcx>,
+ pub trait_ref: Option<TraitRef<'tcx>>,
+ pub predicates: Vec<Predicate<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub enum ImplSubject<'tcx> {
+ Trait(TraitRef<'tcx>),
+ Inherent(Ty<'tcx>),
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable, Debug)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum ImplPolarity {
+ /// `impl Trait for Type`
+ Positive,
+ /// `impl !Trait for Type`
+ Negative,
+ /// `#[rustc_reservation_impl] impl Trait for Type`
+ ///
+ /// This is a "stability hack", not a real Rust feature.
+ /// See #64631 for details.
+ Reservation,
+}
+
+impl ImplPolarity {
+ /// Flips polarity by turning `Positive` into `Negative` and `Negative` into `Positive`.
+ pub fn flip(&self) -> Option<ImplPolarity> {
+ match self {
+ ImplPolarity::Positive => Some(ImplPolarity::Negative),
+ ImplPolarity::Negative => Some(ImplPolarity::Positive),
+ ImplPolarity::Reservation => None,
+ }
+ }
+}
+
+impl fmt::Display for ImplPolarity {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Positive => f.write_str("positive"),
+ Self::Negative => f.write_str("negative"),
+ Self::Reservation => f.write_str("reservation"),
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, Encodable, Decodable, HashStable)]
+pub enum Visibility {
+ /// Visible everywhere (including in other crates).
+ Public,
+ /// Visible only in the given crate-local module.
+ Restricted(DefId),
+ /// Not visible anywhere in the local crate. This is the visibility of private external items.
+ Invisible,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable)]
+pub enum BoundConstness {
+ /// `T: Trait`
+ NotConst,
+ /// `T: ~const Trait`
+ ///
+ /// Requires resolving to const only when we are in a const context.
+ ConstIfConst,
+}
+
+impl BoundConstness {
+ /// Reduce `self` and `constness` to two possible combined states instead of four.
+ pub fn and(&mut self, constness: hir::Constness) -> hir::Constness {
+ match (constness, self) {
+ (hir::Constness::Const, BoundConstness::ConstIfConst) => hir::Constness::Const,
+ (_, this) => {
+ *this = BoundConstness::NotConst;
+ hir::Constness::NotConst
+ }
+ }
+ }
+}
+
+impl fmt::Display for BoundConstness {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::NotConst => f.write_str("normal"),
+ Self::ConstIfConst => f.write_str("`~const`"),
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ClosureSizeProfileData<'tcx> {
+ /// Tuple containing the types of closure captures before the feature `capture_disjoint_fields`
+ pub before_feature_tys: Ty<'tcx>,
+ /// Tuple containing the types of closure captures after the feature `capture_disjoint_fields`
+ pub after_feature_tys: Ty<'tcx>,
+}
+
+pub trait DefIdTree: Copy {
+ fn opt_parent(self, id: DefId) -> Option<DefId>;
+
+ #[inline]
+ #[track_caller]
+ fn parent(self, id: DefId) -> DefId {
+ match self.opt_parent(id) {
+ Some(id) => id,
+ // not `unwrap_or_else` to avoid breaking caller tracking
+ None => bug!("{id:?} doesn't have a parent"),
+ }
+ }
+
+ #[inline]
+ #[track_caller]
+ fn opt_local_parent(self, id: LocalDefId) -> Option<LocalDefId> {
+ self.opt_parent(id.to_def_id()).map(DefId::expect_local)
+ }
+
+ #[inline]
+ #[track_caller]
+ fn local_parent(self, id: LocalDefId) -> LocalDefId {
+ self.parent(id.to_def_id()).expect_local()
+ }
+
+ fn is_descendant_of(self, mut descendant: DefId, ancestor: DefId) -> bool {
+ if descendant.krate != ancestor.krate {
+ return false;
+ }
+
+ while descendant != ancestor {
+ match self.opt_parent(descendant) {
+ Some(parent) => descendant = parent,
+ None => return false,
+ }
+ }
+ true
+ }
+}
+
+impl<'tcx> DefIdTree for TyCtxt<'tcx> {
+ #[inline]
+ fn opt_parent(self, id: DefId) -> Option<DefId> {
+ self.def_key(id).parent.map(|index| DefId { index, ..id })
+ }
+}
+
+impl Visibility {
+ /// Returns `true` if an item with this visibility is accessible from the given block.
+ pub fn is_accessible_from<T: DefIdTree>(self, module: DefId, tree: T) -> bool {
+ let restriction = match self {
+ // Public items are visible everywhere.
+ Visibility::Public => return true,
+ // Private items from other crates are visible nowhere.
+ Visibility::Invisible => return false,
+ // Restricted items are visible in an arbitrary local module.
+ Visibility::Restricted(other) if other.krate != module.krate => return false,
+ Visibility::Restricted(module) => module,
+ };
+
+ tree.is_descendant_of(module, restriction)
+ }
+
+ /// Returns `true` if this visibility is at least as accessible as the given visibility
+ pub fn is_at_least<T: DefIdTree>(self, vis: Visibility, tree: T) -> bool {
+ let vis_restriction = match vis {
+ Visibility::Public => return self == Visibility::Public,
+ Visibility::Invisible => return true,
+ Visibility::Restricted(module) => module,
+ };
+
+ self.is_accessible_from(vis_restriction, tree)
+ }
+
+ // Returns `true` if this item is visible anywhere in the local crate.
+ pub fn is_visible_locally(self) -> bool {
+ match self {
+ Visibility::Public => true,
+ Visibility::Restricted(def_id) => def_id.is_local(),
+ Visibility::Invisible => false,
+ }
+ }
+
+ pub fn is_public(self) -> bool {
+ matches!(self, Visibility::Public)
+ }
+}
+
+/// The crate variances map is computed during typeck and contains the
+/// variance of every item in the local crate. You should not use it
+/// directly, because to do so will make your pass dependent on the
+/// HIR of every item in the local crate. Instead, use
+/// `tcx.variances_of()` to get the variance for a *particular*
+/// item.
+#[derive(HashStable, Debug)]
+pub struct CrateVariancesMap<'tcx> {
+ /// For each item with generics, maps to a vector of the variance
+ /// of its generics. If an item has no generics, it will have no
+ /// entry.
+ pub variances: FxHashMap<DefId, &'tcx [ty::Variance]>,
+}
+
+// Contains information needed to resolve types and (in the future) look up
+// the types of AST nodes.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct CReaderCacheKey {
+ pub cnum: Option<CrateNum>,
+ pub pos: usize,
+}
+
+/// Represents a type.
+///
+/// IMPORTANT:
+/// - This is a very "dumb" struct (with no derives and no `impls`).
+/// - Values of this type are always interned and thus unique, and are stored
+/// as an `Interned<TyS>`.
+/// - `Ty` (which contains a reference to a `Interned<TyS>`) or `Interned<TyS>`
+/// should be used everywhere instead of `TyS`. In particular, `Ty` has most
+/// of the relevant methods.
+#[derive(PartialEq, Eq, PartialOrd, Ord)]
+#[allow(rustc::usage_of_ty_tykind)]
+pub(crate) struct TyS<'tcx> {
+ /// This field shouldn't be used directly and may be removed in the future.
+ /// Use `Ty::kind()` instead.
+ kind: TyKind<'tcx>,
+
+ /// This field provides fast access to information that is also contained
+ /// in `kind`.
+ ///
+ /// This field shouldn't be used directly and may be removed in the future.
+ /// Use `Ty::flags()` instead.
+ flags: TypeFlags,
+
+ /// This field provides fast access to information that is also contained
+ /// in `kind`.
+ ///
+ /// This is a kind of confusing thing: it stores the smallest
+ /// binder such that
+ ///
+ /// (a) the binder itself captures nothing but
+ /// (b) all the late-bound things within the type are captured
+ /// by some sub-binder.
+ ///
+ /// So, for a type without any late-bound things, like `u32`, this
+ /// will be *innermost*, because that is the innermost binder that
+ /// captures nothing. But for a type `&'D u32`, where `'D` is a
+ /// late-bound region with De Bruijn index `D`, this would be `D + 1`
+ /// -- the binder itself does not capture `D`, but `D` is captured
+ /// by an inner binder.
+ ///
+ /// We call this concept an "exclusive" binder `D` because all
+ /// De Bruijn indices within the type are contained within `0..D`
+ /// (exclusive).
+ outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+// `TyS` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(TyS<'_>, 40);
+
+// We are actually storing a stable hash cache next to the type, so let's
+// also check the full size
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(WithStableHash<TyS<'_>>, 56);
+
+/// Use this rather than `TyS`, whenever possible.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[rustc_diagnostic_item = "Ty"]
+#[rustc_pass_by_value]
+pub struct Ty<'tcx>(Interned<'tcx, WithStableHash<TyS<'tcx>>>);
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// A "bool" type used in rustc_mir_transform unit tests when we
+ /// have not spun up a TyCtxt.
+ pub const BOOL_TY_FOR_UNIT_TESTING: Ty<'tcx> = Ty(Interned::new_unchecked(&WithStableHash {
+ internee: TyS {
+ kind: ty::Bool,
+ flags: TypeFlags::empty(),
+ outer_exclusive_binder: DebruijnIndex::from_usize(0),
+ },
+ stable_hash: Fingerprint::ZERO,
+ }));
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for TyS<'tcx> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let TyS {
+ kind,
+
+ // The other fields just provide fast access to information that is
+ // also contained in `kind`, so no need to hash them.
+ flags: _,
+
+ outer_exclusive_binder: _,
+ } = self;
+
+ kind.hash_stable(hcx, hasher)
+ }
+}
+
+impl ty::EarlyBoundRegion {
+ /// Does this early bound region have a name? Early bound regions normally
+ /// always have names except when using anonymous lifetimes (`'_`).
+ pub fn has_name(&self) -> bool {
+ self.name != kw::UnderscoreLifetime
+ }
+}
+
+/// Represents a predicate.
+///
+/// See comments on `TyS`, which apply here too (albeit for
+/// `PredicateS`/`Predicate` rather than `TyS`/`Ty`).
+#[derive(Debug)]
+pub(crate) struct PredicateS<'tcx> {
+ kind: Binder<'tcx, PredicateKind<'tcx>>,
+ flags: TypeFlags,
+ /// See the comment for the corresponding field of [TyS].
+ outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+// This type is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(PredicateS<'_>, 56);
+
+/// Use this rather than `PredicateS`, whenever possible.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[rustc_pass_by_value]
+pub struct Predicate<'tcx>(Interned<'tcx, PredicateS<'tcx>>);
+
+impl<'tcx> Predicate<'tcx> {
+ /// Gets the inner `Binder<'tcx, PredicateKind<'tcx>>`.
+ #[inline]
+ pub fn kind(self) -> Binder<'tcx, PredicateKind<'tcx>> {
+ self.0.kind
+ }
+
+ #[inline(always)]
+ pub fn flags(self) -> TypeFlags {
+ self.0.flags
+ }
+
+ #[inline(always)]
+ pub fn outer_exclusive_binder(self) -> DebruijnIndex {
+ self.0.outer_exclusive_binder
+ }
+
+ /// Flips the polarity of a Predicate.
+ ///
+ /// Given `T: Trait` predicate it returns `T: !Trait` and given `T: !Trait` returns `T: Trait`.
+ pub fn flip_polarity(self, tcx: TyCtxt<'tcx>) -> Option<Predicate<'tcx>> {
+ let kind = self
+ .kind()
+ .map_bound(|kind| match kind {
+ PredicateKind::Trait(TraitPredicate { trait_ref, constness, polarity }) => {
+ Some(PredicateKind::Trait(TraitPredicate {
+ trait_ref,
+ constness,
+ polarity: polarity.flip()?,
+ }))
+ }
+
+ _ => None,
+ })
+ .transpose()?;
+
+ Some(tcx.mk_predicate(kind))
+ }
+
+ pub fn without_const(mut self, tcx: TyCtxt<'tcx>) -> Self {
+ if let PredicateKind::Trait(TraitPredicate { trait_ref, constness, polarity }) = self.kind().skip_binder()
+ && constness != BoundConstness::NotConst
+ {
+ self = tcx.mk_predicate(self.kind().rebind(PredicateKind::Trait(TraitPredicate {
+ trait_ref,
+ constness: BoundConstness::NotConst,
+ polarity,
+ })));
+ }
+ self
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Predicate<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let PredicateS {
+ ref kind,
+
+ // The other fields just provide fast access to information that is
+ // also contained in `kind`, so no need to hash them.
+ flags: _,
+ outer_exclusive_binder: _,
+ } = self.0.0;
+
+ kind.hash_stable(hcx, hasher);
+ }
+}
+
+impl rustc_errors::IntoDiagnosticArg for Predicate<'_> {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ rustc_errors::DiagnosticArgValue::Str(std::borrow::Cow::Owned(self.to_string()))
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub enum PredicateKind<'tcx> {
+ /// Corresponds to `where Foo: Bar<A, B, C>`. `Foo` here would be
+ /// the `Self` type of the trait reference and `A`, `B`, and `C`
+ /// would be the type parameters.
+ Trait(TraitPredicate<'tcx>),
+
+ /// `where 'a: 'b`
+ RegionOutlives(RegionOutlivesPredicate<'tcx>),
+
+ /// `where T: 'a`
+ TypeOutlives(TypeOutlivesPredicate<'tcx>),
+
+ /// `where <T as TraitRef>::Name == X`, approximately.
+ /// See the `ProjectionPredicate` struct for details.
+ Projection(ProjectionPredicate<'tcx>),
+
+ /// No syntax: `T` well-formed.
+ WellFormed(GenericArg<'tcx>),
+
+ /// Trait must be object-safe.
+ ObjectSafe(DefId),
+
+ /// No direct syntax. May be thought of as `where T: FnFoo<...>`
+ /// for some substitutions `...` and `T` being a closure type.
+ /// Satisfied (or refuted) once we know the closure's kind.
+ ClosureKind(DefId, SubstsRef<'tcx>, ClosureKind),
+
+ /// `T1 <: T2`
+ ///
+ /// This obligation is created most often when we have two
+ /// unresolved type variables and hence don't have enough
+ /// information to process the subtyping obligation yet.
+ Subtype(SubtypePredicate<'tcx>),
+
+ /// `T1` coerced to `T2`
+ ///
+ /// Like a subtyping obligation, this is created most often
+ /// when we have two unresolved type variables and hence
+ /// don't have enough information to process the coercion
+ /// obligation yet. At the moment, we actually process coercions
+ /// very much like subtyping and don't handle the full coercion
+ /// logic.
+ Coerce(CoercePredicate<'tcx>),
+
+ /// Constant initializer must evaluate successfully.
+ ConstEvaluatable(ty::Unevaluated<'tcx, ()>),
+
+ /// Constants must be equal. The first component is the const that is expected.
+ ConstEquate(Const<'tcx>, Const<'tcx>),
+
+ /// Represents a type found in the environment that we can use for implied bounds.
+ ///
+ /// Only used for Chalk.
+ TypeWellFormedFromEnv(Ty<'tcx>),
+}
+
+/// The crate outlives map is computed during typeck and contains the
+/// outlives of every item in the local crate. You should not use it
+/// directly, because to do so will make your pass dependent on the
+/// HIR of every item in the local crate. Instead, use
+/// `tcx.inferred_outlives_of()` to get the outlives for a *particular*
+/// item.
+#[derive(HashStable, Debug)]
+pub struct CratePredicatesMap<'tcx> {
+ /// For each struct with outlive bounds, maps to a vector of the
+ /// predicate of its outlive bounds. If an item has no outlives
+ /// bounds, it will have no entry.
+ pub predicates: FxHashMap<DefId, &'tcx [(Predicate<'tcx>, Span)]>,
+}
+
+impl<'tcx> Predicate<'tcx> {
+ /// Performs a substitution suitable for going from a
+ /// poly-trait-ref to supertraits that must hold if that
+ /// poly-trait-ref holds. This is slightly different from a normal
+ /// substitution in terms of what happens with bound regions. See
+ /// lengthy comment below for details.
+ pub fn subst_supertrait(
+ self,
+ tcx: TyCtxt<'tcx>,
+ trait_ref: &ty::PolyTraitRef<'tcx>,
+ ) -> Predicate<'tcx> {
+ // The interaction between HRTB and supertraits is not entirely
+ // obvious. Let me walk you (and myself) through an example.
+ //
+ // Let's start with an easy case. Consider two traits:
+ //
+ // trait Foo<'a>: Bar<'a,'a> { }
+ // trait Bar<'b,'c> { }
+ //
+ // Now, if we have a trait reference `for<'x> T: Foo<'x>`, then
+ // we can deduce that `for<'x> T: Bar<'x,'x>`. Basically, if we
+ // knew that `Foo<'x>` (for any 'x) then we also know that
+ // `Bar<'x,'x>` (for any 'x). This more-or-less falls out from
+ // normal substitution.
+ //
+ // In terms of why this is sound, the idea is that whenever there
+ // is an impl of `T:Foo<'a>`, it must show that `T:Bar<'a,'a>`
+ // holds. So if there is an impl of `T:Foo<'a>` that applies to
+ // all `'a`, then we must know that `T:Bar<'a,'a>` holds for all
+ // `'a`.
+ //
+ // Another example to be careful of is this:
+ //
+ // trait Foo1<'a>: for<'b> Bar1<'a,'b> { }
+ // trait Bar1<'b,'c> { }
+ //
+ // Here, if we have `for<'x> T: Foo1<'x>`, then what do we know?
+ // The answer is that we know `for<'x,'b> T: Bar1<'x,'b>`. The
+ // reason is similar to the previous example: any impl of
+ // `T:Foo1<'x>` must show that `for<'b> T: Bar1<'x, 'b>`. So
+ // basically we would want to collapse the bound lifetimes from
+ // the input (`trait_ref`) and the supertraits.
+ //
+ // To achieve this in practice is fairly straightforward. Let's
+ // consider the more complicated scenario:
+ //
+ // - We start out with `for<'x> T: Foo1<'x>`. In this case, `'x`
+ // has a De Bruijn index of 1. We want to produce `for<'x,'b> T: Bar1<'x,'b>`,
+ // where both `'x` and `'b` would have a DB index of 1.
+ // The substitution from the input trait-ref is therefore going to be
+ // `'a => 'x` (where `'x` has a DB index of 1).
+ // - The supertrait-ref is `for<'b> Bar1<'a,'b>`, where `'a` is an
+ // early-bound parameter and `'b' is a late-bound parameter with a
+ // DB index of 1.
+ // - If we replace `'a` with `'x` from the input, it too will have
+ // a DB index of 1, and thus we'll have `for<'x,'b> Bar1<'x,'b>`
+ // just as we wanted.
+ //
+ // There is only one catch. If we just apply the substitution `'a
+ // => 'x` to `for<'b> Bar1<'a,'b>`, the substitution code will
+ // adjust the DB index because we substituting into a binder (it
+ // tries to be so smart...) resulting in `for<'x> for<'b>
+ // Bar1<'x,'b>` (we have no syntax for this, so use your
+ // imagination). Basically the 'x will have DB index of 2 and 'b
+ // will have DB index of 1. Not quite what we want. So we apply
+ // the substitution to the *contents* of the trait reference,
+ // rather than the trait reference itself (put another way, the
+ // substitution code expects equal binding levels in the values
+ // from the substitution and the value being substituted into, and
+ // this trick achieves that).
+
+ // Working through the second example:
+ // trait_ref: for<'x> T: Foo1<'^0.0>; substs: [T, '^0.0]
+ // predicate: for<'b> Self: Bar1<'a, '^0.0>; substs: [Self, 'a, '^0.0]
+ // We want to end up with:
+ // for<'x, 'b> T: Bar1<'^0.0, '^0.1>
+ // To do this:
+ // 1) We must shift all bound vars in predicate by the length
+ // of trait ref's bound vars. So, we would end up with predicate like
+ // Self: Bar1<'a, '^0.1>
+ // 2) We can then apply the trait substs to this, ending up with
+ // T: Bar1<'^0.0, '^0.1>
+ // 3) Finally, to create the final bound vars, we concatenate the bound
+ // vars of the trait ref with those of the predicate:
+ // ['x, 'b]
+ let bound_pred = self.kind();
+ let pred_bound_vars = bound_pred.bound_vars();
+ let trait_bound_vars = trait_ref.bound_vars();
+ // 1) Self: Bar1<'a, '^0.0> -> Self: Bar1<'a, '^0.1>
+ let shifted_pred =
+ tcx.shift_bound_var_indices(trait_bound_vars.len(), bound_pred.skip_binder());
+ // 2) Self: Bar1<'a, '^0.1> -> T: Bar1<'^0.0, '^0.1>
+ let new = EarlyBinder(shifted_pred).subst(tcx, trait_ref.skip_binder().substs);
+ // 3) ['x] + ['b] -> ['x, 'b]
+ let bound_vars =
+ tcx.mk_bound_variable_kinds(trait_bound_vars.iter().chain(pred_bound_vars));
+ tcx.reuse_or_mk_predicate(self, ty::Binder::bind_with_vars(new, bound_vars))
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct TraitPredicate<'tcx> {
+ pub trait_ref: TraitRef<'tcx>,
+
+ pub constness: BoundConstness,
+
+ /// If polarity is Positive: we are proving that the trait is implemented.
+ ///
+ /// If polarity is Negative: we are proving that a negative impl of this trait
+ /// exists. (Note that coherence also checks whether negative impls of supertraits
+ /// exist via a series of predicates.)
+ ///
+ /// If polarity is Reserved: that's a bug.
+ pub polarity: ImplPolarity,
+}
+
+pub type PolyTraitPredicate<'tcx> = ty::Binder<'tcx, TraitPredicate<'tcx>>;
+
+impl<'tcx> TraitPredicate<'tcx> {
+ pub fn remap_constness(&mut self, param_env: &mut ParamEnv<'tcx>) {
+ *param_env = param_env.with_constness(self.constness.and(param_env.constness()))
+ }
+
+ /// Remap the constness of this predicate before emitting it for diagnostics.
+ pub fn remap_constness_diag(&mut self, param_env: ParamEnv<'tcx>) {
+ // this is different to `remap_constness` that callees want to print this predicate
+ // in case of selection errors. `T: ~const Drop` bounds cannot end up here when the
+ // param_env is not const because it is always satisfied in non-const contexts.
+ if let hir::Constness::NotConst = param_env.constness() {
+ self.constness = ty::BoundConstness::NotConst;
+ }
+ }
+
+ pub fn def_id(self) -> DefId {
+ self.trait_ref.def_id
+ }
+
+ pub fn self_ty(self) -> Ty<'tcx> {
+ self.trait_ref.self_ty()
+ }
+
+ #[inline]
+ pub fn is_const_if_const(self) -> bool {
+ self.constness == BoundConstness::ConstIfConst
+ }
+
+ pub fn is_constness_satisfied_by(self, constness: hir::Constness) -> bool {
+ match (self.constness, constness) {
+ (BoundConstness::NotConst, _)
+ | (BoundConstness::ConstIfConst, hir::Constness::Const) => true,
+ (BoundConstness::ConstIfConst, hir::Constness::NotConst) => false,
+ }
+ }
+}
+
+impl<'tcx> PolyTraitPredicate<'tcx> {
+ pub fn def_id(self) -> DefId {
+ // Ok to skip binder since trait `DefId` does not care about regions.
+ self.skip_binder().def_id()
+ }
+
+ pub fn self_ty(self) -> ty::Binder<'tcx, Ty<'tcx>> {
+ self.map_bound(|trait_ref| trait_ref.self_ty())
+ }
+
+ /// Remap the constness of this predicate before emitting it for diagnostics.
+ pub fn remap_constness_diag(&mut self, param_env: ParamEnv<'tcx>) {
+ *self = self.map_bound(|mut p| {
+ p.remap_constness_diag(param_env);
+ p
+ });
+ }
+
+ #[inline]
+ pub fn is_const_if_const(self) -> bool {
+ self.skip_binder().is_const_if_const()
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct OutlivesPredicate<A, B>(pub A, pub B); // `A: B`
+pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>;
+pub type TypeOutlivesPredicate<'tcx> = OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>;
+pub type PolyRegionOutlivesPredicate<'tcx> = ty::Binder<'tcx, RegionOutlivesPredicate<'tcx>>;
+pub type PolyTypeOutlivesPredicate<'tcx> = ty::Binder<'tcx, TypeOutlivesPredicate<'tcx>>;
+
+/// Encodes that `a` must be a subtype of `b`. The `a_is_expected` flag indicates
+/// whether the `a` type is the type that we should label as "expected" when
+/// presenting user diagnostics.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct SubtypePredicate<'tcx> {
+ pub a_is_expected: bool,
+ pub a: Ty<'tcx>,
+ pub b: Ty<'tcx>,
+}
+pub type PolySubtypePredicate<'tcx> = ty::Binder<'tcx, SubtypePredicate<'tcx>>;
+
+/// Encodes that we have to coerce *from* the `a` type to the `b` type.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct CoercePredicate<'tcx> {
+ pub a: Ty<'tcx>,
+ pub b: Ty<'tcx>,
+}
+pub type PolyCoercePredicate<'tcx> = ty::Binder<'tcx, CoercePredicate<'tcx>>;
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub enum Term<'tcx> {
+ Ty(Ty<'tcx>),
+ Const(Const<'tcx>),
+}
+
+impl<'tcx> From<Ty<'tcx>> for Term<'tcx> {
+ fn from(ty: Ty<'tcx>) -> Self {
+ Term::Ty(ty)
+ }
+}
+
+impl<'tcx> From<Const<'tcx>> for Term<'tcx> {
+ fn from(c: Const<'tcx>) -> Self {
+ Term::Const(c)
+ }
+}
+
+impl<'tcx> Term<'tcx> {
+ pub fn ty(&self) -> Option<Ty<'tcx>> {
+ if let Term::Ty(ty) = self { Some(*ty) } else { None }
+ }
+
+ pub fn ct(&self) -> Option<Const<'tcx>> {
+ if let Term::Const(c) = self { Some(*c) } else { None }
+ }
+
+ pub fn into_arg(self) -> GenericArg<'tcx> {
+ match self {
+ Term::Ty(ty) => ty.into(),
+ Term::Const(c) => c.into(),
+ }
+ }
+}
+
+/// This kind of predicate has no *direct* correspondent in the
+/// syntax, but it roughly corresponds to the syntactic forms:
+///
+/// 1. `T: TraitRef<..., Item = Type>`
+/// 2. `<T as TraitRef<...>>::Item == Type` (NYI)
+///
+/// In particular, form #1 is "desugared" to the combination of a
+/// normal trait predicate (`T: TraitRef<...>`) and one of these
+/// predicates. Form #2 is a broader form in that it also permits
+/// equality between arbitrary types. Processing an instance of
+/// Form #2 eventually yields one of these `ProjectionPredicate`
+/// instances to normalize the LHS.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct ProjectionPredicate<'tcx> {
+ pub projection_ty: ProjectionTy<'tcx>,
+ pub term: Term<'tcx>,
+}
+
+pub type PolyProjectionPredicate<'tcx> = Binder<'tcx, ProjectionPredicate<'tcx>>;
+
+impl<'tcx> PolyProjectionPredicate<'tcx> {
+ /// Returns the `DefId` of the trait of the associated item being projected.
+ #[inline]
+ pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId {
+ self.skip_binder().projection_ty.trait_def_id(tcx)
+ }
+
+ /// Get the [PolyTraitRef] required for this projection to be well formed.
+ /// Note that for generic associated types the predicates of the associated
+ /// type also need to be checked.
+ #[inline]
+ pub fn required_poly_trait_ref(&self, tcx: TyCtxt<'tcx>) -> PolyTraitRef<'tcx> {
+ // Note: unlike with `TraitRef::to_poly_trait_ref()`,
+ // `self.0.trait_ref` is permitted to have escaping regions.
+ // This is because here `self` has a `Binder` and so does our
+ // return value, so we are preserving the number of binding
+ // levels.
+ self.map_bound(|predicate| predicate.projection_ty.trait_ref(tcx))
+ }
+
+ pub fn term(&self) -> Binder<'tcx, Term<'tcx>> {
+ self.map_bound(|predicate| predicate.term)
+ }
+
+ /// The `DefId` of the `TraitItem` for the associated type.
+ ///
+ /// Note that this is not the `DefId` of the `TraitRef` containing this
+ /// associated type, which is in `tcx.associated_item(projection_def_id()).container`.
+ pub fn projection_def_id(&self) -> DefId {
+ // Ok to skip binder since trait `DefId` does not care about regions.
+ self.skip_binder().projection_ty.item_def_id
+ }
+}
+
+pub trait ToPolyTraitRef<'tcx> {
+ fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>;
+}
+
+impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> {
+ fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
+ self.map_bound_ref(|trait_pred| trait_pred.trait_ref)
+ }
+}
+
+pub trait ToPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx>;
+}
+
+impl<'tcx> ToPredicate<'tcx> for Binder<'tcx, PredicateKind<'tcx>> {
+ #[inline(always)]
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ tcx.mk_predicate(self)
+ }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyTraitPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self.map_bound(PredicateKind::Trait).to_predicate(tcx)
+ }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self.map_bound(PredicateKind::RegionOutlives).to_predicate(tcx)
+ }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self.map_bound(PredicateKind::TypeOutlives).to_predicate(tcx)
+ }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self.map_bound(PredicateKind::Projection).to_predicate(tcx)
+ }
+}
+
+impl<'tcx> Predicate<'tcx> {
+ pub fn to_opt_poly_trait_pred(self) -> Option<PolyTraitPredicate<'tcx>> {
+ let predicate = self.kind();
+ match predicate.skip_binder() {
+ PredicateKind::Trait(t) => Some(predicate.rebind(t)),
+ PredicateKind::Projection(..)
+ | PredicateKind::Subtype(..)
+ | PredicateKind::Coerce(..)
+ | PredicateKind::RegionOutlives(..)
+ | PredicateKind::WellFormed(..)
+ | PredicateKind::ObjectSafe(..)
+ | PredicateKind::ClosureKind(..)
+ | PredicateKind::TypeOutlives(..)
+ | PredicateKind::ConstEvaluatable(..)
+ | PredicateKind::ConstEquate(..)
+ | PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ }
+
+ pub fn to_opt_poly_projection_pred(self) -> Option<PolyProjectionPredicate<'tcx>> {
+ let predicate = self.kind();
+ match predicate.skip_binder() {
+ PredicateKind::Projection(t) => Some(predicate.rebind(t)),
+ PredicateKind::Trait(..)
+ | PredicateKind::Subtype(..)
+ | PredicateKind::Coerce(..)
+ | PredicateKind::RegionOutlives(..)
+ | PredicateKind::WellFormed(..)
+ | PredicateKind::ObjectSafe(..)
+ | PredicateKind::ClosureKind(..)
+ | PredicateKind::TypeOutlives(..)
+ | PredicateKind::ConstEvaluatable(..)
+ | PredicateKind::ConstEquate(..)
+ | PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ }
+
+ pub fn to_opt_type_outlives(self) -> Option<PolyTypeOutlivesPredicate<'tcx>> {
+ let predicate = self.kind();
+ match predicate.skip_binder() {
+ PredicateKind::TypeOutlives(data) => Some(predicate.rebind(data)),
+ PredicateKind::Trait(..)
+ | PredicateKind::Projection(..)
+ | PredicateKind::Subtype(..)
+ | PredicateKind::Coerce(..)
+ | PredicateKind::RegionOutlives(..)
+ | PredicateKind::WellFormed(..)
+ | PredicateKind::ObjectSafe(..)
+ | PredicateKind::ClosureKind(..)
+ | PredicateKind::ConstEvaluatable(..)
+ | PredicateKind::ConstEquate(..)
+ | PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ }
+}
+
+/// Represents the bounds declared on a particular set of type
+/// parameters. Should eventually be generalized into a flag list of
+/// where-clauses. You can obtain an `InstantiatedPredicates` list from a
+/// `GenericPredicates` by using the `instantiate` method. Note that this method
+/// reflects an important semantic invariant of `InstantiatedPredicates`: while
+/// the `GenericPredicates` are expressed in terms of the bound type
+/// parameters of the impl/trait/whatever, an `InstantiatedPredicates` instance
+/// represented a set of bounds for some particular instantiation,
+/// meaning that the generic parameters have been substituted with
+/// their values.
+///
+/// Example:
+/// ```ignore (illustrative)
+/// struct Foo<T, U: Bar<T>> { ... }
+/// ```
+/// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like
+/// `[[], [U:Bar<T>]]`. Now if there were some particular reference
+/// like `Foo<isize,usize>`, then the `InstantiatedPredicates` would be `[[],
+/// [usize:Bar<isize>]]`.
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct InstantiatedPredicates<'tcx> {
+ pub predicates: Vec<Predicate<'tcx>>,
+ pub spans: Vec<Span>,
+}
+
+impl<'tcx> InstantiatedPredicates<'tcx> {
+ pub fn empty() -> InstantiatedPredicates<'tcx> {
+ InstantiatedPredicates { predicates: vec![], spans: vec![] }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.predicates.is_empty()
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, TyEncodable, TyDecodable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct OpaqueTypeKey<'tcx> {
+ pub def_id: LocalDefId,
+ pub substs: SubstsRef<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, HashStable, TyEncodable, TyDecodable)]
+pub struct OpaqueHiddenType<'tcx> {
+ /// The span of this particular definition of the opaque type. So
+ /// for example:
+ ///
+ /// ```ignore (incomplete snippet)
+ /// type Foo = impl Baz;
+ /// fn bar() -> Foo {
+ /// // ^^^ This is the span we are looking for!
+ /// }
+ /// ```
+ ///
+ /// In cases where the fn returns `(impl Trait, impl Trait)` or
+ /// other such combinations, the result is currently
+ /// over-approximated, but better than nothing.
+ pub span: Span,
+
+ /// The type variable that represents the value of the opaque type
+ /// that we require. In other words, after we compile this function,
+ /// we will be created a constraint like:
+ /// ```ignore (pseudo-rust)
+ /// Foo<'a, T> = ?C
+ /// ```
+ /// where `?C` is the value of this type variable. =) It may
+ /// naturally refer to the type and lifetime parameters in scope
+ /// in this function, though ultimately it should only reference
+ /// those that are arguments to `Foo` in the constraint above. (In
+ /// other words, `?C` should not include `'b`, even though it's a
+ /// lifetime parameter on `foo`.)
+ pub ty: Ty<'tcx>,
+}
+
+impl<'tcx> OpaqueHiddenType<'tcx> {
+ pub fn report_mismatch(&self, other: &Self, tcx: TyCtxt<'tcx>) {
+ // Found different concrete types for the opaque type.
+ let mut err = tcx.sess.struct_span_err(
+ other.span,
+ "concrete type differs from previous defining opaque type use",
+ );
+ err.span_label(other.span, format!("expected `{}`, got `{}`", self.ty, other.ty));
+ if self.span == other.span {
+ err.span_label(
+ self.span,
+ "this expression supplies two conflicting concrete types for the same opaque type",
+ );
+ } else {
+ err.span_note(self.span, "previous use here");
+ }
+ err.emit();
+ }
+}
+
+/// The "placeholder index" fully defines a placeholder region, type, or const. Placeholders are
+/// identified by both a universe, as well as a name residing within that universe. Distinct bound
+/// regions/types/consts within the same universe simply have an unknown relationship to one
+/// another.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
+#[derive(HashStable, TyEncodable, TyDecodable)]
+pub struct Placeholder<T> {
+ pub universe: UniverseIndex,
+ pub name: T,
+}
+
+pub type PlaceholderRegion = Placeholder<BoundRegionKind>;
+
+pub type PlaceholderType = Placeholder<BoundVar>;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
+#[derive(TyEncodable, TyDecodable, PartialOrd, Ord)]
+pub struct BoundConst<'tcx> {
+ pub var: BoundVar,
+ pub ty: Ty<'tcx>,
+}
+
+pub type PlaceholderConst<'tcx> = Placeholder<BoundVar>;
+
+/// A `DefId` which, in case it is a const argument, is potentially bundled with
+/// the `DefId` of the generic parameter it instantiates.
+///
+/// This is used to avoid calls to `type_of` for const arguments during typeck
+/// which cause cycle errors.
+///
+/// ```rust
+/// struct A;
+/// impl A {
+/// fn foo<const N: usize>(&self) -> [u8; N] { [0; N] }
+/// // ^ const parameter
+/// }
+/// struct B;
+/// impl B {
+/// fn foo<const M: u8>(&self) -> usize { 42 }
+/// // ^ const parameter
+/// }
+///
+/// fn main() {
+/// let a = A;
+/// let _b = a.foo::<{ 3 + 7 }>();
+/// // ^^^^^^^^^ const argument
+/// }
+/// ```
+///
+/// Let's look at the call `a.foo::<{ 3 + 7 }>()` here. We do not know
+/// which `foo` is used until we know the type of `a`.
+///
+/// We only know the type of `a` once we are inside of `typeck(main)`.
+/// We also end up normalizing the type of `_b` during `typeck(main)` which
+/// requires us to evaluate the const argument.
+///
+/// To evaluate that const argument we need to know its type,
+/// which we would get using `type_of(const_arg)`. This requires us to
+/// resolve `foo` as it can be either `usize` or `u8` in this example.
+/// However, resolving `foo` once again requires `typeck(main)` to get the type of `a`,
+/// which results in a cycle.
+///
+/// In short we must not call `type_of(const_arg)` during `typeck(main)`.
+///
+/// When first creating the `ty::Const` of the const argument inside of `typeck` we have
+/// already resolved `foo` so we know which const parameter this argument instantiates.
+/// This means that we also know the expected result of `type_of(const_arg)` even if we
+/// aren't allowed to call that query: it is equal to `type_of(const_param)` which is
+/// trivial to compute.
+///
+/// If we now want to use that constant in a place which potentially needs its type
+/// we also pass the type of its `const_param`. This is the point of `WithOptConstParam`,
+/// except that instead of a `Ty` we bundle the `DefId` of the const parameter.
+/// Meaning that we need to use `type_of(const_param_did)` if `const_param_did` is `Some`
+/// to get the type of `did`.
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift, TyEncodable, TyDecodable)]
+#[derive(PartialEq, Eq, PartialOrd, Ord)]
+#[derive(Hash, HashStable)]
+pub struct WithOptConstParam<T> {
+ pub did: T,
+ /// The `DefId` of the corresponding generic parameter in case `did` is
+ /// a const argument.
+ ///
+ /// Note that even if `did` is a const argument, this may still be `None`.
+ /// All queries taking `WithOptConstParam` start by calling `tcx.opt_const_param_of(def.did)`
+ /// to potentially update `param_did` in the case it is `None`.
+ pub const_param_did: Option<DefId>,
+}
+
+impl<T> WithOptConstParam<T> {
+ /// Creates a new `WithOptConstParam` setting `const_param_did` to `None`.
+ #[inline(always)]
+ pub fn unknown(did: T) -> WithOptConstParam<T> {
+ WithOptConstParam { did, const_param_did: None }
+ }
+}
+
+impl WithOptConstParam<LocalDefId> {
+ /// Returns `Some((did, param_did))` if `def_id` is a const argument,
+ /// `None` otherwise.
+ #[inline(always)]
+ pub fn try_lookup(did: LocalDefId, tcx: TyCtxt<'_>) -> Option<(LocalDefId, DefId)> {
+ tcx.opt_const_param_of(did).map(|param_did| (did, param_did))
+ }
+
+ /// In case `self` is unknown but `self.did` is a const argument, this returns
+ /// a `WithOptConstParam` with the correct `const_param_did`.
+ #[inline(always)]
+ pub fn try_upgrade(self, tcx: TyCtxt<'_>) -> Option<WithOptConstParam<LocalDefId>> {
+ if self.const_param_did.is_none() {
+ if let const_param_did @ Some(_) = tcx.opt_const_param_of(self.did) {
+ return Some(WithOptConstParam { did: self.did, const_param_did });
+ }
+ }
+
+ None
+ }
+
+ pub fn to_global(self) -> WithOptConstParam<DefId> {
+ WithOptConstParam { did: self.did.to_def_id(), const_param_did: self.const_param_did }
+ }
+
+ pub fn def_id_for_type_of(self) -> DefId {
+ if let Some(did) = self.const_param_did { did } else { self.did.to_def_id() }
+ }
+}
+
+impl WithOptConstParam<DefId> {
+ pub fn as_local(self) -> Option<WithOptConstParam<LocalDefId>> {
+ self.did
+ .as_local()
+ .map(|did| WithOptConstParam { did, const_param_did: self.const_param_did })
+ }
+
+ pub fn as_const_arg(self) -> Option<(LocalDefId, DefId)> {
+ if let Some(param_did) = self.const_param_did {
+ if let Some(did) = self.did.as_local() {
+ return Some((did, param_did));
+ }
+ }
+
+ None
+ }
+
+ pub fn is_local(self) -> bool {
+ self.did.is_local()
+ }
+
+ pub fn def_id_for_type_of(self) -> DefId {
+ self.const_param_did.unwrap_or(self.did)
+ }
+}
+
+/// When type checking, we use the `ParamEnv` to track
+/// details about the set of where-clauses that are in scope at this
+/// particular point.
+#[derive(Copy, Clone, Hash, PartialEq, Eq)]
+pub struct ParamEnv<'tcx> {
+ /// This packs both caller bounds and the reveal enum into one pointer.
+ ///
+ /// Caller bounds are `Obligation`s that the caller must satisfy. This is
+ /// basically the set of bounds on the in-scope type parameters, translated
+ /// into `Obligation`s, and elaborated and normalized.
+ ///
+ /// Use the `caller_bounds()` method to access.
+ ///
+ /// Typically, this is `Reveal::UserFacing`, but during codegen we
+ /// want `Reveal::All`.
+ ///
+ /// Note: This is packed, use the reveal() method to access it.
+ packed: CopyTaggedPtr<&'tcx List<Predicate<'tcx>>, ParamTag, true>,
+}
+
+#[derive(Copy, Clone)]
+struct ParamTag {
+ reveal: traits::Reveal,
+ constness: hir::Constness,
+}
+
+unsafe impl rustc_data_structures::tagged_ptr::Tag for ParamTag {
+ const BITS: usize = 2;
+ #[inline]
+ fn into_usize(self) -> usize {
+ match self {
+ Self { reveal: traits::Reveal::UserFacing, constness: hir::Constness::NotConst } => 0,
+ Self { reveal: traits::Reveal::All, constness: hir::Constness::NotConst } => 1,
+ Self { reveal: traits::Reveal::UserFacing, constness: hir::Constness::Const } => 2,
+ Self { reveal: traits::Reveal::All, constness: hir::Constness::Const } => 3,
+ }
+ }
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> Self {
+ match ptr {
+ 0 => Self { reveal: traits::Reveal::UserFacing, constness: hir::Constness::NotConst },
+ 1 => Self { reveal: traits::Reveal::All, constness: hir::Constness::NotConst },
+ 2 => Self { reveal: traits::Reveal::UserFacing, constness: hir::Constness::Const },
+ 3 => Self { reveal: traits::Reveal::All, constness: hir::Constness::Const },
+ _ => std::hint::unreachable_unchecked(),
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for ParamEnv<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ParamEnv")
+ .field("caller_bounds", &self.caller_bounds())
+ .field("reveal", &self.reveal())
+ .field("constness", &self.constness())
+ .finish()
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ParamEnv<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ self.caller_bounds().hash_stable(hcx, hasher);
+ self.reveal().hash_stable(hcx, hasher);
+ self.constness().hash_stable(hcx, hasher);
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ParamEnv<'tcx> {
+ fn try_fold_with<F: ty::fold::FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ Ok(ParamEnv::new(
+ self.caller_bounds().try_fold_with(folder)?,
+ self.reveal().try_fold_with(folder)?,
+ self.constness().try_fold_with(folder)?,
+ ))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ParamEnv<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.caller_bounds().visit_with(visitor)?;
+ self.reveal().visit_with(visitor)?;
+ self.constness().visit_with(visitor)
+ }
+}
+
+impl<'tcx> ParamEnv<'tcx> {
+ /// Construct a trait environment suitable for contexts where
+ /// there are no where-clauses in scope. Hidden types (like `impl
+ /// Trait`) are left hidden, so this is suitable for ordinary
+ /// type-checking.
+ #[inline]
+ pub fn empty() -> Self {
+ Self::new(List::empty(), Reveal::UserFacing, hir::Constness::NotConst)
+ }
+
+ #[inline]
+ pub fn caller_bounds(self) -> &'tcx List<Predicate<'tcx>> {
+ self.packed.pointer()
+ }
+
+ #[inline]
+ pub fn reveal(self) -> traits::Reveal {
+ self.packed.tag().reveal
+ }
+
+ #[inline]
+ pub fn constness(self) -> hir::Constness {
+ self.packed.tag().constness
+ }
+
+ #[inline]
+ pub fn is_const(self) -> bool {
+ self.packed.tag().constness == hir::Constness::Const
+ }
+
+ /// Construct a trait environment with no where-clauses in scope
+ /// where the values of all `impl Trait` and other hidden types
+ /// are revealed. This is suitable for monomorphized, post-typeck
+ /// environments like codegen or doing optimizations.
+ ///
+ /// N.B., if you want to have predicates in scope, use `ParamEnv::new`,
+ /// or invoke `param_env.with_reveal_all()`.
+ #[inline]
+ pub fn reveal_all() -> Self {
+ Self::new(List::empty(), Reveal::All, hir::Constness::NotConst)
+ }
+
+ /// Construct a trait environment with the given set of predicates.
+ #[inline]
+ pub fn new(
+ caller_bounds: &'tcx List<Predicate<'tcx>>,
+ reveal: Reveal,
+ constness: hir::Constness,
+ ) -> Self {
+ ty::ParamEnv { packed: CopyTaggedPtr::new(caller_bounds, ParamTag { reveal, constness }) }
+ }
+
+ pub fn with_user_facing(mut self) -> Self {
+ self.packed.set_tag(ParamTag { reveal: Reveal::UserFacing, ..self.packed.tag() });
+ self
+ }
+
+ #[inline]
+ pub fn with_constness(mut self, constness: hir::Constness) -> Self {
+ self.packed.set_tag(ParamTag { constness, ..self.packed.tag() });
+ self
+ }
+
+ #[inline]
+ pub fn with_const(mut self) -> Self {
+ self.packed.set_tag(ParamTag { constness: hir::Constness::Const, ..self.packed.tag() });
+ self
+ }
+
+ #[inline]
+ pub fn without_const(mut self) -> Self {
+ self.packed.set_tag(ParamTag { constness: hir::Constness::NotConst, ..self.packed.tag() });
+ self
+ }
+
+ #[inline]
+ pub fn remap_constness_with(&mut self, mut constness: ty::BoundConstness) {
+ *self = self.with_constness(constness.and(self.constness()))
+ }
+
+ /// Returns a new parameter environment with the same clauses, but
+ /// which "reveals" the true results of projections in all cases
+ /// (even for associated types that are specializable). This is
+ /// the desired behavior during codegen and certain other special
+ /// contexts; normally though we want to use `Reveal::UserFacing`,
+ /// which is the default.
+ /// All opaque types in the caller_bounds of the `ParamEnv`
+ /// will be normalized to their underlying types.
+ /// See PR #65989 and issue #65918 for more details
+ pub fn with_reveal_all_normalized(self, tcx: TyCtxt<'tcx>) -> Self {
+ if self.packed.tag().reveal == traits::Reveal::All {
+ return self;
+ }
+
+ ParamEnv::new(
+ tcx.normalize_opaque_types(self.caller_bounds()),
+ Reveal::All,
+ self.constness(),
+ )
+ }
+
+ /// Returns this same environment but with no caller bounds.
+ #[inline]
+ pub fn without_caller_bounds(self) -> Self {
+ Self::new(List::empty(), self.reveal(), self.constness())
+ }
+
+ /// Creates a suitable environment in which to perform trait
+ /// queries on the given value. When type-checking, this is simply
+ /// the pair of the environment plus value. But when reveal is set to
+ /// All, then if `value` does not reference any type parameters, we will
+ /// pair it with the empty environment. This improves caching and is generally
+ /// invisible.
+ ///
+ /// N.B., we preserve the environment when type-checking because it
+ /// is possible for the user to have wacky where-clauses like
+ /// `where Box<u32>: Copy`, which are clearly never
+ /// satisfiable. We generally want to behave as if they were true,
+ /// although the surrounding function is never reachable.
+ pub fn and<T: TypeVisitable<'tcx>>(self, value: T) -> ParamEnvAnd<'tcx, T> {
+ match self.reveal() {
+ Reveal::UserFacing => ParamEnvAnd { param_env: self, value },
+
+ Reveal::All => {
+ if value.is_global() {
+ ParamEnvAnd { param_env: self.without_caller_bounds(), value }
+ } else {
+ ParamEnvAnd { param_env: self, value }
+ }
+ }
+ }
+ }
+}
+
+// FIXME(ecstaticmorse): Audit all occurrences of `without_const().to_predicate(tcx)` to ensure that
+// the constness of trait bounds is being propagated correctly.
+impl<'tcx> PolyTraitRef<'tcx> {
+ #[inline]
+ pub fn with_constness(self, constness: BoundConstness) -> PolyTraitPredicate<'tcx> {
+ self.map_bound(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness,
+ polarity: ty::ImplPolarity::Positive,
+ })
+ }
+
+ #[inline]
+ pub fn without_const(self) -> PolyTraitPredicate<'tcx> {
+ self.with_constness(BoundConstness::NotConst)
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)]
+#[derive(HashStable)]
+pub struct ParamEnvAnd<'tcx, T> {
+ pub param_env: ParamEnv<'tcx>,
+ pub value: T,
+}
+
+impl<'tcx, T> ParamEnvAnd<'tcx, T> {
+ pub fn into_parts(self) -> (ParamEnv<'tcx>, T) {
+ (self.param_env, self.value)
+ }
+
+ #[inline]
+ pub fn without_const(mut self) -> Self {
+ self.param_env = self.param_env.without_const();
+ self
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable, Encodable, Decodable)]
+pub struct Destructor {
+ /// The `DefId` of the destructor method
+ pub did: DefId,
+ /// The constness of the destructor method
+ pub constness: hir::Constness,
+}
+
+bitflags! {
+ #[derive(HashStable, TyEncodable, TyDecodable)]
+ pub struct VariantFlags: u32 {
+ const NO_VARIANT_FLAGS = 0;
+ /// Indicates whether the field list of this variant is `#[non_exhaustive]`.
+ const IS_FIELD_LIST_NON_EXHAUSTIVE = 1 << 0;
+ /// Indicates whether this variant was obtained as part of recovering from
+ /// a syntactic error. May be incomplete or bogus.
+ const IS_RECOVERED = 1 << 1;
+ }
+}
+
+/// Definition of a variant -- a struct's fields or an enum variant.
+#[derive(Debug, HashStable, TyEncodable, TyDecodable)]
+pub struct VariantDef {
+ /// `DefId` that identifies the variant itself.
+ /// If this variant belongs to a struct or union, then this is a copy of its `DefId`.
+ pub def_id: DefId,
+ /// `DefId` that identifies the variant's constructor.
+ /// If this variant is a struct variant, then this is `None`.
+ pub ctor_def_id: Option<DefId>,
+ /// Variant or struct name.
+ pub name: Symbol,
+ /// Discriminant of this variant.
+ pub discr: VariantDiscr,
+ /// Fields of this variant.
+ pub fields: Vec<FieldDef>,
+ /// Type of constructor of variant.
+ pub ctor_kind: CtorKind,
+ /// Flags of the variant (e.g. is field list non-exhaustive)?
+ flags: VariantFlags,
+}
+
+impl VariantDef {
+ /// Creates a new `VariantDef`.
+ ///
+ /// `variant_did` is the `DefId` that identifies the enum variant (if this `VariantDef`
+ /// represents an enum variant).
+ ///
+ /// `ctor_did` is the `DefId` that identifies the constructor of unit or
+ /// tuple-variants/structs. If this is a `struct`-variant then this should be `None`.
+ ///
+ /// `parent_did` is the `DefId` of the `AdtDef` representing the enum or struct that
+ /// owns this variant. It is used for checking if a struct has `#[non_exhaustive]` w/out having
+ /// to go through the redirect of checking the ctor's attributes - but compiling a small crate
+ /// requires loading the `AdtDef`s for all the structs in the universe (e.g., coherence for any
+ /// built-in trait), and we do not want to load attributes twice.
+ ///
+ /// If someone speeds up attribute loading to not be a performance concern, they can
+ /// remove this hack and use the constructor `DefId` everywhere.
+ pub fn new(
+ name: Symbol,
+ variant_did: Option<DefId>,
+ ctor_def_id: Option<DefId>,
+ discr: VariantDiscr,
+ fields: Vec<FieldDef>,
+ ctor_kind: CtorKind,
+ adt_kind: AdtKind,
+ parent_did: DefId,
+ recovered: bool,
+ is_field_list_non_exhaustive: bool,
+ ) -> Self {
+ debug!(
+ "VariantDef::new(name = {:?}, variant_did = {:?}, ctor_def_id = {:?}, discr = {:?},
+ fields = {:?}, ctor_kind = {:?}, adt_kind = {:?}, parent_did = {:?})",
+ name, variant_did, ctor_def_id, discr, fields, ctor_kind, adt_kind, parent_did,
+ );
+
+ let mut flags = VariantFlags::NO_VARIANT_FLAGS;
+ if is_field_list_non_exhaustive {
+ flags |= VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE;
+ }
+
+ if recovered {
+ flags |= VariantFlags::IS_RECOVERED;
+ }
+
+ VariantDef {
+ def_id: variant_did.unwrap_or(parent_did),
+ ctor_def_id,
+ name,
+ discr,
+ fields,
+ ctor_kind,
+ flags,
+ }
+ }
+
+ /// Is this field list non-exhaustive?
+ #[inline]
+ pub fn is_field_list_non_exhaustive(&self) -> bool {
+ self.flags.intersects(VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE)
+ }
+
+ /// Was this variant obtained as part of recovering from a syntactic error?
+ #[inline]
+ pub fn is_recovered(&self) -> bool {
+ self.flags.intersects(VariantFlags::IS_RECOVERED)
+ }
+
+ /// Computes the `Ident` of this variant by looking up the `Span`
+ pub fn ident(&self, tcx: TyCtxt<'_>) -> Ident {
+ Ident::new(self.name, tcx.def_ident_span(self.def_id).unwrap())
+ }
+}
+
+impl PartialEq for VariantDef {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ // There should be only one `VariantDef` for each `def_id`, therefore
+ // it is fine to implement `PartialEq` only based on `def_id`.
+ //
+ // Below, we exhaustively destructure `self` and `other` so that if the
+ // definition of `VariantDef` changes, a compile-error will be produced,
+ // reminding us to revisit this assumption.
+
+ let Self {
+ def_id: lhs_def_id,
+ ctor_def_id: _,
+ name: _,
+ discr: _,
+ fields: _,
+ ctor_kind: _,
+ flags: _,
+ } = &self;
+
+ let Self {
+ def_id: rhs_def_id,
+ ctor_def_id: _,
+ name: _,
+ discr: _,
+ fields: _,
+ ctor_kind: _,
+ flags: _,
+ } = other;
+
+ lhs_def_id == rhs_def_id
+ }
+}
+
+impl Eq for VariantDef {}
+
+impl Hash for VariantDef {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // There should be only one `VariantDef` for each `def_id`, therefore
+ // it is fine to implement `Hash` only based on `def_id`.
+ //
+ // Below, we exhaustively destructure `self` so that if the definition
+ // of `VariantDef` changes, a compile-error will be produced, reminding
+ // us to revisit this assumption.
+
+ let Self { def_id, ctor_def_id: _, name: _, discr: _, fields: _, ctor_kind: _, flags: _ } =
+ &self;
+
+ def_id.hash(s)
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum VariantDiscr {
+ /// Explicit value for this variant, i.e., `X = 123`.
+ /// The `DefId` corresponds to the embedded constant.
+ Explicit(DefId),
+
+ /// The previous variant's discriminant plus one.
+ /// For efficiency reasons, the distance from the
+ /// last `Explicit` discriminant is being stored,
+ /// or `0` for the first variant, if it has none.
+ Relative(u32),
+}
+
+#[derive(Debug, HashStable, TyEncodable, TyDecodable)]
+pub struct FieldDef {
+ pub did: DefId,
+ pub name: Symbol,
+ pub vis: Visibility,
+}
+
+impl PartialEq for FieldDef {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ // There should be only one `FieldDef` for each `did`, therefore it is
+ // fine to implement `PartialEq` only based on `did`.
+ //
+ // Below, we exhaustively destructure `self` so that if the definition
+ // of `FieldDef` changes, a compile-error will be produced, reminding
+ // us to revisit this assumption.
+
+ let Self { did: lhs_did, name: _, vis: _ } = &self;
+
+ let Self { did: rhs_did, name: _, vis: _ } = other;
+
+ lhs_did == rhs_did
+ }
+}
+
+impl Eq for FieldDef {}
+
+impl Hash for FieldDef {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // There should be only one `FieldDef` for each `did`, therefore it is
+ // fine to implement `Hash` only based on `did`.
+ //
+ // Below, we exhaustively destructure `self` so that if the definition
+ // of `FieldDef` changes, a compile-error will be produced, reminding
+ // us to revisit this assumption.
+
+ let Self { did, name: _, vis: _ } = &self;
+
+ did.hash(s)
+ }
+}
+
+bitflags! {
+ #[derive(TyEncodable, TyDecodable, Default, HashStable)]
+ pub struct ReprFlags: u8 {
+ const IS_C = 1 << 0;
+ const IS_SIMD = 1 << 1;
+ const IS_TRANSPARENT = 1 << 2;
+ // Internal only for now. If true, don't reorder fields.
+ const IS_LINEAR = 1 << 3;
+ // If true, the type's layout can be randomized using
+ // the seed stored in `ReprOptions.layout_seed`
+ const RANDOMIZE_LAYOUT = 1 << 4;
+ // Any of these flags being set prevent field reordering optimisation.
+ const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits
+ | ReprFlags::IS_SIMD.bits
+ | ReprFlags::IS_LINEAR.bits;
+ }
+}
+
+/// Represents the repr options provided by the user,
+#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Default, HashStable)]
+pub struct ReprOptions {
+ pub int: Option<attr::IntType>,
+ pub align: Option<Align>,
+ pub pack: Option<Align>,
+ pub flags: ReprFlags,
+ /// The seed to be used for randomizing a type's layout
+ ///
+ /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
+ /// be the "most accurate" hash as it'd encompass the item and crate
+ /// hash without loss, but it does pay the price of being larger.
+ /// Everything's a tradeoff, a `u64` seed should be sufficient for our
+ /// purposes (primarily `-Z randomize-layout`)
+ pub field_shuffle_seed: u64,
+}
+
+impl ReprOptions {
+ pub fn new(tcx: TyCtxt<'_>, did: DefId) -> ReprOptions {
+ let mut flags = ReprFlags::empty();
+ let mut size = None;
+ let mut max_align: Option<Align> = None;
+ let mut min_pack: Option<Align> = None;
+
+ // Generate a deterministically-derived seed from the item's path hash
+ // to allow for cross-crate compilation to actually work
+ let mut field_shuffle_seed = tcx.def_path_hash(did).0.to_smaller_hash();
+
+ // If the user defined a custom seed for layout randomization, xor the item's
+ // path hash with the user defined seed, this will allowing determinism while
+ // still allowing users to further randomize layout generation for e.g. fuzzing
+ if let Some(user_seed) = tcx.sess.opts.unstable_opts.layout_seed {
+ field_shuffle_seed ^= user_seed;
+ }
+
+ for attr in tcx.get_attrs(did, sym::repr) {
+ for r in attr::parse_repr_attr(&tcx.sess, attr) {
+ flags.insert(match r {
+ attr::ReprC => ReprFlags::IS_C,
+ attr::ReprPacked(pack) => {
+ let pack = Align::from_bytes(pack as u64).unwrap();
+ min_pack = Some(if let Some(min_pack) = min_pack {
+ min_pack.min(pack)
+ } else {
+ pack
+ });
+ ReprFlags::empty()
+ }
+ attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
+ attr::ReprSimd => ReprFlags::IS_SIMD,
+ attr::ReprInt(i) => {
+ size = Some(i);
+ ReprFlags::empty()
+ }
+ attr::ReprAlign(align) => {
+ max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
+ ReprFlags::empty()
+ }
+ });
+ }
+ }
+
+ // If `-Z randomize-layout` was enabled for the type definition then we can
+ // consider performing layout randomization
+ if tcx.sess.opts.unstable_opts.randomize_layout {
+ flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
+ }
+
+ // This is here instead of layout because the choice must make it into metadata.
+ if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) {
+ flags.insert(ReprFlags::IS_LINEAR);
+ }
+
+ Self { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
+ }
+
+ #[inline]
+ pub fn simd(&self) -> bool {
+ self.flags.contains(ReprFlags::IS_SIMD)
+ }
+
+ #[inline]
+ pub fn c(&self) -> bool {
+ self.flags.contains(ReprFlags::IS_C)
+ }
+
+ #[inline]
+ pub fn packed(&self) -> bool {
+ self.pack.is_some()
+ }
+
+ #[inline]
+ pub fn transparent(&self) -> bool {
+ self.flags.contains(ReprFlags::IS_TRANSPARENT)
+ }
+
+ #[inline]
+ pub fn linear(&self) -> bool {
+ self.flags.contains(ReprFlags::IS_LINEAR)
+ }
+
+ /// Returns the discriminant type, given these `repr` options.
+ /// This must only be called on enums!
+ pub fn discr_type(&self) -> attr::IntType {
+ self.int.unwrap_or(attr::SignedInt(ast::IntTy::Isize))
+ }
+
+ /// Returns `true` if this `#[repr()]` should inhabit "smart enum
+ /// layout" optimizations, such as representing `Foo<&T>` as a
+ /// single pointer.
+ pub fn inhibit_enum_layout_opt(&self) -> bool {
+ self.c() || self.int.is_some()
+ }
+
+ /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
+ /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
+ pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
+ if let Some(pack) = self.pack {
+ if pack.bytes() == 1 {
+ return true;
+ }
+ }
+
+ self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
+ }
+
+ /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
+ /// was enabled for its declaration crate
+ pub fn can_randomize_type_layout(&self) -> bool {
+ !self.inhibit_struct_field_reordering_opt()
+ && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
+ }
+
+ /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
+ pub fn inhibit_union_abi_opt(&self) -> bool {
+ self.c()
+ }
+}
+
+impl<'tcx> FieldDef {
+ /// Returns the type of this field. The resulting type is not normalized. The `subst` is
+ /// typically obtained via the second field of [`TyKind::Adt`].
+ pub fn ty(&self, tcx: TyCtxt<'tcx>, subst: SubstsRef<'tcx>) -> Ty<'tcx> {
+ tcx.bound_type_of(self.did).subst(tcx, subst)
+ }
+
+ /// Computes the `Ident` of this variant by looking up the `Span`
+ pub fn ident(&self, tcx: TyCtxt<'_>) -> Ident {
+ Ident::new(self.name, tcx.def_ident_span(self.did).unwrap())
+ }
+}
+
+pub type Attributes<'tcx> = impl Iterator<Item = &'tcx ast::Attribute>;
+#[derive(Debug, PartialEq, Eq)]
+pub enum ImplOverlapKind {
+ /// These impls are always allowed to overlap.
+ Permitted {
+ /// Whether or not the impl is permitted due to the trait being a `#[marker]` trait
+ marker: bool,
+ },
+ /// These impls are allowed to overlap, but that raises
+ /// an issue #33140 future-compatibility warning.
+ ///
+ /// Some background: in Rust 1.0, the trait-object types `Send + Sync` (today's
+ /// `dyn Send + Sync`) and `Sync + Send` (now `dyn Sync + Send`) were different.
+ ///
+ /// The widely-used version 0.1.0 of the crate `traitobject` had accidentally relied
+ /// that difference, making what reduces to the following set of impls:
+ ///
+ /// ```compile_fail,(E0119)
+ /// trait Trait {}
+ /// impl Trait for dyn Send + Sync {}
+ /// impl Trait for dyn Sync + Send {}
+ /// ```
+ ///
+ /// Obviously, once we made these types be identical, that code causes a coherence
+ /// error and a fairly big headache for us. However, luckily for us, the trait
+ /// `Trait` used in this case is basically a marker trait, and therefore having
+ /// overlapping impls for it is sound.
+ ///
+ /// To handle this, we basically regard the trait as a marker trait, with an additional
+ /// future-compatibility warning. To avoid accidentally "stabilizing" this feature,
+ /// it has the following restrictions:
+ ///
+ /// 1. The trait must indeed be a marker-like trait (i.e., no items), and must be
+ /// positive impls.
+ /// 2. The trait-ref of both impls must be equal.
+ /// 3. The trait-ref of both impls must be a trait object type consisting only of
+ /// marker traits.
+ /// 4. Neither of the impls can have any where-clauses.
+ ///
+ /// Once `traitobject` 0.1.0 is no longer an active concern, this hack can be removed.
+ Issue33140,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn typeck_body(self, body: hir::BodyId) -> &'tcx TypeckResults<'tcx> {
+ self.typeck(self.hir().body_owner_def_id(body))
+ }
+
+ pub fn provided_trait_methods(self, id: DefId) -> impl 'tcx + Iterator<Item = &'tcx AssocItem> {
+ self.associated_items(id)
+ .in_definition_order()
+ .filter(move |item| item.kind == AssocKind::Fn && item.defaultness(self).has_value())
+ }
+
+ /// Look up the name of a definition across crates. This does not look at HIR.
+ pub fn opt_item_name(self, def_id: DefId) -> Option<Symbol> {
+ if let Some(cnum) = def_id.as_crate_root() {
+ Some(self.crate_name(cnum))
+ } else {
+ let def_key = self.def_key(def_id);
+ match def_key.disambiguated_data.data {
+ // The name of a constructor is that of its parent.
+ rustc_hir::definitions::DefPathData::Ctor => self
+ .opt_item_name(DefId { krate: def_id.krate, index: def_key.parent.unwrap() }),
+ // The name of opaque types only exists in HIR.
+ rustc_hir::definitions::DefPathData::ImplTrait
+ if let Some(def_id) = def_id.as_local() =>
+ self.hir().opt_name(self.hir().local_def_id_to_hir_id(def_id)),
+ _ => def_key.get_opt_name(),
+ }
+ }
+ }
+
+ /// Look up the name of a definition across crates. This does not look at HIR.
+ ///
+ /// This method will ICE if the corresponding item does not have a name. In these cases, use
+ /// [`opt_item_name`] instead.
+ ///
+ /// [`opt_item_name`]: Self::opt_item_name
+ pub fn item_name(self, id: DefId) -> Symbol {
+ self.opt_item_name(id).unwrap_or_else(|| {
+ bug!("item_name: no name for {:?}", self.def_path(id));
+ })
+ }
+
+ /// Look up the name and span of a definition.
+ ///
+ /// See [`item_name`][Self::item_name] for more information.
+ pub fn opt_item_ident(self, def_id: DefId) -> Option<Ident> {
+ let def = self.opt_item_name(def_id)?;
+ let span = def_id
+ .as_local()
+ .and_then(|id| self.def_ident_span(id))
+ .unwrap_or(rustc_span::DUMMY_SP);
+ Some(Ident::new(def, span))
+ }
+
+ pub fn opt_associated_item(self, def_id: DefId) -> Option<&'tcx AssocItem> {
+ if let DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy = self.def_kind(def_id) {
+ Some(self.associated_item(def_id))
+ } else {
+ None
+ }
+ }
+
+ pub fn field_index(self, hir_id: hir::HirId, typeck_results: &TypeckResults<'_>) -> usize {
+ typeck_results.field_indices().get(hir_id).cloned().expect("no index for a field")
+ }
+
+ pub fn find_field_index(self, ident: Ident, variant: &VariantDef) -> Option<usize> {
+ variant
+ .fields
+ .iter()
+ .position(|field| self.hygienic_eq(ident, field.ident(self), variant.def_id))
+ }
+
+ /// Returns `true` if the impls are the same polarity and the trait either
+ /// has no items or is annotated `#[marker]` and prevents item overrides.
+ pub fn impls_are_allowed_to_overlap(
+ self,
+ def_id1: DefId,
+ def_id2: DefId,
+ ) -> Option<ImplOverlapKind> {
+ // If either trait impl references an error, they're allowed to overlap,
+ // as one of them essentially doesn't exist.
+ if self.impl_trait_ref(def_id1).map_or(false, |tr| tr.references_error())
+ || self.impl_trait_ref(def_id2).map_or(false, |tr| tr.references_error())
+ {
+ return Some(ImplOverlapKind::Permitted { marker: false });
+ }
+
+ match (self.impl_polarity(def_id1), self.impl_polarity(def_id2)) {
+ (ImplPolarity::Reservation, _) | (_, ImplPolarity::Reservation) => {
+ // `#[rustc_reservation_impl]` impls don't overlap with anything
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) = Some(Permitted) (reservations)",
+ def_id1, def_id2
+ );
+ return Some(ImplOverlapKind::Permitted { marker: false });
+ }
+ (ImplPolarity::Positive, ImplPolarity::Negative)
+ | (ImplPolarity::Negative, ImplPolarity::Positive) => {
+ // `impl AutoTrait for Type` + `impl !AutoTrait for Type`
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) - None (differing polarities)",
+ def_id1, def_id2
+ );
+ return None;
+ }
+ (ImplPolarity::Positive, ImplPolarity::Positive)
+ | (ImplPolarity::Negative, ImplPolarity::Negative) => {}
+ };
+
+ let is_marker_overlap = {
+ let is_marker_impl = |def_id: DefId| -> bool {
+ let trait_ref = self.impl_trait_ref(def_id);
+ trait_ref.map_or(false, |tr| self.trait_def(tr.def_id).is_marker)
+ };
+ is_marker_impl(def_id1) && is_marker_impl(def_id2)
+ };
+
+ if is_marker_overlap {
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) = Some(Permitted) (marker overlap)",
+ def_id1, def_id2
+ );
+ Some(ImplOverlapKind::Permitted { marker: true })
+ } else {
+ if let Some(self_ty1) = self.issue33140_self_ty(def_id1) {
+ if let Some(self_ty2) = self.issue33140_self_ty(def_id2) {
+ if self_ty1 == self_ty2 {
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) - issue #33140 HACK",
+ def_id1, def_id2
+ );
+ return Some(ImplOverlapKind::Issue33140);
+ } else {
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) - found {:?} != {:?}",
+ def_id1, def_id2, self_ty1, self_ty2
+ );
+ }
+ }
+ }
+
+ debug!("impls_are_allowed_to_overlap({:?}, {:?}) = None", def_id1, def_id2);
+ None
+ }
+ }
+
+ /// Returns `ty::VariantDef` if `res` refers to a struct,
+ /// or variant or their constructors, panics otherwise.
+ pub fn expect_variant_res(self, res: Res) -> &'tcx VariantDef {
+ match res {
+ Res::Def(DefKind::Variant, did) => {
+ let enum_did = self.parent(did);
+ self.adt_def(enum_did).variant_with_id(did)
+ }
+ Res::Def(DefKind::Struct | DefKind::Union, did) => self.adt_def(did).non_enum_variant(),
+ Res::Def(DefKind::Ctor(CtorOf::Variant, ..), variant_ctor_did) => {
+ let variant_did = self.parent(variant_ctor_did);
+ let enum_did = self.parent(variant_did);
+ self.adt_def(enum_did).variant_with_ctor_id(variant_ctor_did)
+ }
+ Res::Def(DefKind::Ctor(CtorOf::Struct, ..), ctor_did) => {
+ let struct_did = self.parent(ctor_did);
+ self.adt_def(struct_did).non_enum_variant()
+ }
+ _ => bug!("expect_variant_res used with unexpected res {:?}", res),
+ }
+ }
+
+ /// Returns the possibly-auto-generated MIR of a `(DefId, Subst)` pair.
+ #[instrument(skip(self), level = "debug")]
+ pub fn instance_mir(self, instance: ty::InstanceDef<'tcx>) -> &'tcx Body<'tcx> {
+ match instance {
+ ty::InstanceDef::Item(def) => {
+ debug!("calling def_kind on def: {:?}", def);
+ let def_kind = self.def_kind(def.did);
+ debug!("returned from def_kind: {:?}", def_kind);
+ match def_kind {
+ DefKind::Const
+ | DefKind::Static(..)
+ | DefKind::AssocConst
+ | DefKind::Ctor(..)
+ | DefKind::AnonConst
+ | DefKind::InlineConst => self.mir_for_ctfe_opt_const_arg(def),
+ // If the caller wants `mir_for_ctfe` of a function they should not be using
+ // `instance_mir`, so we'll assume const fn also wants the optimized version.
+ _ => {
+ assert_eq!(def.const_param_did, None);
+ self.optimized_mir(def.did)
+ }
+ }
+ }
+ ty::InstanceDef::VTableShim(..)
+ | ty::InstanceDef::ReifyShim(..)
+ | ty::InstanceDef::Intrinsic(..)
+ | ty::InstanceDef::FnPtrShim(..)
+ | ty::InstanceDef::Virtual(..)
+ | ty::InstanceDef::ClosureOnceShim { .. }
+ | ty::InstanceDef::DropGlue(..)
+ | ty::InstanceDef::CloneShim(..) => self.mir_shims(instance),
+ }
+ }
+
+ // FIXME(@lcnr): Remove this function.
+ pub fn get_attrs_unchecked(self, did: DefId) -> &'tcx [ast::Attribute] {
+ if let Some(did) = did.as_local() {
+ self.hir().attrs(self.hir().local_def_id_to_hir_id(did))
+ } else {
+ self.item_attrs(did)
+ }
+ }
+
+ /// Gets all attributes with the given name.
+ pub fn get_attrs(self, did: DefId, attr: Symbol) -> ty::Attributes<'tcx> {
+ let filter_fn = move |a: &&ast::Attribute| a.has_name(attr);
+ if let Some(did) = did.as_local() {
+ self.hir().attrs(self.hir().local_def_id_to_hir_id(did)).iter().filter(filter_fn)
+ } else if cfg!(debug_assertions) && rustc_feature::is_builtin_only_local(attr) {
+ bug!("tried to access the `only_local` attribute `{}` from an extern crate", attr);
+ } else {
+ self.item_attrs(did).iter().filter(filter_fn)
+ }
+ }
+
+ pub fn get_attr(self, did: DefId, attr: Symbol) -> Option<&'tcx ast::Attribute> {
+ self.get_attrs(did, attr).next()
+ }
+
+ /// Determines whether an item is annotated with an attribute.
+ pub fn has_attr(self, did: DefId, attr: Symbol) -> bool {
+ if cfg!(debug_assertions) && !did.is_local() && rustc_feature::is_builtin_only_local(attr) {
+ bug!("tried to access the `only_local` attribute `{}` from an extern crate", attr);
+ } else {
+ self.get_attrs(did, attr).next().is_some()
+ }
+ }
+
+ /// Returns `true` if this is an `auto trait`.
+ pub fn trait_is_auto(self, trait_def_id: DefId) -> bool {
+ self.trait_def(trait_def_id).has_auto_impl
+ }
+
+ /// Returns layout of a generator. Layout might be unavailable if the
+ /// generator is tainted by errors.
+ pub fn generator_layout(self, def_id: DefId) -> Option<&'tcx GeneratorLayout<'tcx>> {
+ self.optimized_mir(def_id).generator_layout()
+ }
+
+ /// Given the `DefId` of an impl, returns the `DefId` of the trait it implements.
+ /// If it implements no trait, returns `None`.
+ pub fn trait_id_of_impl(self, def_id: DefId) -> Option<DefId> {
+ self.impl_trait_ref(def_id).map(|tr| tr.def_id)
+ }
+
+ /// If the given `DefId` describes an item belonging to a trait,
+ /// returns the `DefId` of the trait that the trait item belongs to;
+ /// otherwise, returns `None`.
+ pub fn trait_of_item(self, def_id: DefId) -> Option<DefId> {
+ if let DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy = self.def_kind(def_id) {
+ let parent = self.parent(def_id);
+ if let DefKind::Trait | DefKind::TraitAlias = self.def_kind(parent) {
+ return Some(parent);
+ }
+ }
+ None
+ }
+
+ /// If the given `DefId` describes a method belonging to an impl, returns the
+ /// `DefId` of the impl that the method belongs to; otherwise, returns `None`.
+ pub fn impl_of_method(self, def_id: DefId) -> Option<DefId> {
+ if let DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy = self.def_kind(def_id) {
+ let parent = self.parent(def_id);
+ if let DefKind::Impl = self.def_kind(parent) {
+ return Some(parent);
+ }
+ }
+ None
+ }
+
+ /// If the given `DefId` belongs to a trait that was automatically derived, returns `true`.
+ pub fn is_builtin_derive(self, def_id: DefId) -> bool {
+ self.has_attr(def_id, sym::automatically_derived)
+ }
+
+ /// Looks up the span of `impl_did` if the impl is local; otherwise returns `Err`
+ /// with the name of the crate containing the impl.
+ pub fn span_of_impl(self, impl_did: DefId) -> Result<Span, Symbol> {
+ if let Some(impl_did) = impl_did.as_local() {
+ Ok(self.def_span(impl_did))
+ } else {
+ Err(self.crate_name(impl_did.krate))
+ }
+ }
+
+ /// Hygienically compares a use-site name (`use_name`) for a field or an associated item with
+ /// its supposed definition name (`def_name`). The method also needs `DefId` of the supposed
+ /// definition's parent/scope to perform comparison.
+ pub fn hygienic_eq(self, use_name: Ident, def_name: Ident, def_parent_def_id: DefId) -> bool {
+ // We could use `Ident::eq` here, but we deliberately don't. The name
+ // comparison fails frequently, and we want to avoid the expensive
+ // `normalize_to_macros_2_0()` calls required for the span comparison whenever possible.
+ use_name.name == def_name.name
+ && use_name
+ .span
+ .ctxt()
+ .hygienic_eq(def_name.span.ctxt(), self.expn_that_defined(def_parent_def_id))
+ }
+
+ pub fn adjust_ident(self, mut ident: Ident, scope: DefId) -> Ident {
+ ident.span.normalize_to_macros_2_0_and_adjust(self.expn_that_defined(scope));
+ ident
+ }
+
+ pub fn adjust_ident_and_get_scope(
+ self,
+ mut ident: Ident,
+ scope: DefId,
+ block: hir::HirId,
+ ) -> (Ident, DefId) {
+ let scope = ident
+ .span
+ .normalize_to_macros_2_0_and_adjust(self.expn_that_defined(scope))
+ .and_then(|actual_expansion| actual_expansion.expn_data().parent_module)
+ .unwrap_or_else(|| self.parent_module(block).to_def_id());
+ (ident, scope)
+ }
+
+ pub fn is_object_safe(self, key: DefId) -> bool {
+ self.object_safety_violations(key).is_empty()
+ }
+
+ #[inline]
+ pub fn is_const_fn_raw(self, def_id: DefId) -> bool {
+ matches!(self.def_kind(def_id), DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..))
+ && self.constness(def_id) == hir::Constness::Const
+ }
+
+ #[inline]
+ pub fn is_const_default_method(self, def_id: DefId) -> bool {
+ matches!(self.trait_of_item(def_id), Some(trait_id) if self.has_attr(trait_id, sym::const_trait))
+ }
+}
+
+/// Yields the parent function's `LocalDefId` if `def_id` is an `impl Trait` definition.
+pub fn is_impl_trait_defn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<LocalDefId> {
+ let def_id = def_id.as_local()?;
+ if let Node::Item(item) = tcx.hir().get_by_def_id(def_id) {
+ if let hir::ItemKind::OpaqueTy(ref opaque_ty) = item.kind {
+ return match opaque_ty.origin {
+ hir::OpaqueTyOrigin::FnReturn(parent) | hir::OpaqueTyOrigin::AsyncFn(parent) => {
+ Some(parent)
+ }
+ hir::OpaqueTyOrigin::TyAlias => None,
+ };
+ }
+ }
+ None
+}
+
+pub fn int_ty(ity: ast::IntTy) -> IntTy {
+ match ity {
+ ast::IntTy::Isize => IntTy::Isize,
+ ast::IntTy::I8 => IntTy::I8,
+ ast::IntTy::I16 => IntTy::I16,
+ ast::IntTy::I32 => IntTy::I32,
+ ast::IntTy::I64 => IntTy::I64,
+ ast::IntTy::I128 => IntTy::I128,
+ }
+}
+
+pub fn uint_ty(uty: ast::UintTy) -> UintTy {
+ match uty {
+ ast::UintTy::Usize => UintTy::Usize,
+ ast::UintTy::U8 => UintTy::U8,
+ ast::UintTy::U16 => UintTy::U16,
+ ast::UintTy::U32 => UintTy::U32,
+ ast::UintTy::U64 => UintTy::U64,
+ ast::UintTy::U128 => UintTy::U128,
+ }
+}
+
+pub fn float_ty(fty: ast::FloatTy) -> FloatTy {
+ match fty {
+ ast::FloatTy::F32 => FloatTy::F32,
+ ast::FloatTy::F64 => FloatTy::F64,
+ }
+}
+
+pub fn ast_int_ty(ity: IntTy) -> ast::IntTy {
+ match ity {
+ IntTy::Isize => ast::IntTy::Isize,
+ IntTy::I8 => ast::IntTy::I8,
+ IntTy::I16 => ast::IntTy::I16,
+ IntTy::I32 => ast::IntTy::I32,
+ IntTy::I64 => ast::IntTy::I64,
+ IntTy::I128 => ast::IntTy::I128,
+ }
+}
+
+pub fn ast_uint_ty(uty: UintTy) -> ast::UintTy {
+ match uty {
+ UintTy::Usize => ast::UintTy::Usize,
+ UintTy::U8 => ast::UintTy::U8,
+ UintTy::U16 => ast::UintTy::U16,
+ UintTy::U32 => ast::UintTy::U32,
+ UintTy::U64 => ast::UintTy::U64,
+ UintTy::U128 => ast::UintTy::U128,
+ }
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ closure::provide(providers);
+ context::provide(providers);
+ erase_regions::provide(providers);
+ layout::provide(providers);
+ util::provide(providers);
+ print::provide(providers);
+ super::util::bug::provide(providers);
+ super::middle::provide(providers);
+ *providers = ty::query::Providers {
+ trait_impls_of: trait_def::trait_impls_of_provider,
+ incoherent_impls: trait_def::incoherent_impls_provider,
+ type_uninhabited_from: inhabitedness::type_uninhabited_from,
+ const_param_default: consts::const_param_default,
+ vtable_allocation: vtable::vtable_allocation_provider,
+ ..*providers
+ };
+}
+
+/// A map for the local crate mapping each type to a vector of its
+/// inherent impls. This is not meant to be used outside of coherence;
+/// rather, you should request the vector for a specific type via
+/// `tcx.inherent_impls(def_id)` so as to minimize your dependencies
+/// (constructing this map requires touching the entire crate).
+#[derive(Clone, Debug, Default, HashStable)]
+pub struct CrateInherentImpls {
+ pub inherent_impls: LocalDefIdMap<Vec<DefId>>,
+ pub incoherent_impls: FxHashMap<SimplifiedType, Vec<LocalDefId>>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, HashStable)]
+pub struct SymbolName<'tcx> {
+ /// `&str` gives a consistent ordering, which ensures reproducible builds.
+ pub name: &'tcx str,
+}
+
+impl<'tcx> SymbolName<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, name: &str) -> SymbolName<'tcx> {
+ SymbolName {
+ name: unsafe { str::from_utf8_unchecked(tcx.arena.alloc_slice(name.as_bytes())) },
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for SymbolName<'tcx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.name, fmt)
+ }
+}
+
+impl<'tcx> fmt::Debug for SymbolName<'tcx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.name, fmt)
+ }
+}
+
+#[derive(Debug, Default, Copy, Clone)]
+pub struct FoundRelationships {
+ /// This is true if we identified that this Ty (`?T`) is found in a `?T: Foo`
+ /// obligation, where:
+ ///
+ /// * `Foo` is not `Sized`
+ /// * `(): Foo` may be satisfied
+ pub self_in_trait: bool,
+ /// This is true if we identified that this Ty (`?T`) is found in a `<_ as
+ /// _>::AssocType = ?T`
+ pub output: bool,
+}
+
+/// The constituent parts of a type level constant of kind ADT or array.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct DestructuredConst<'tcx> {
+ pub variant: Option<VariantIdx>,
+ pub fields: &'tcx [ty::Const<'tcx>],
+}
diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
new file mode 100644
index 000000000..9d8a81165
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
@@ -0,0 +1,283 @@
+//! Methods for normalizing when you don't care about regions (and
+//! aren't doing type inference). If either of those things don't
+//! apply to you, use `infcx.normalize(...)`.
+//!
+//! The methods in this file use a `TypeFolder` to recursively process
+//! contents, invoking the underlying
+//! `normalize_generic_arg_after_erasing_regions` query for each type
+//! or constant found within. (This underlying query is what is cached.)
+
+use crate::mir;
+use crate::traits::query::NoSolution;
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder};
+use crate::ty::subst::{Subst, SubstsRef};
+use crate::ty::{self, EarlyBinder, Ty, TyCtxt};
+
+#[derive(Debug, Copy, Clone, HashStable, TyEncodable, TyDecodable)]
+pub enum NormalizationError<'tcx> {
+ Type(Ty<'tcx>),
+ Const(ty::Const<'tcx>),
+ ConstantKind(mir::ConstantKind<'tcx>),
+}
+
+impl<'tcx> NormalizationError<'tcx> {
+ pub fn get_type_for_failure(&self) -> String {
+ match self {
+ NormalizationError::Type(t) => format!("{}", t),
+ NormalizationError::Const(c) => format!("{}", c),
+ NormalizationError::ConstantKind(ck) => format!("{}", ck),
+ }
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Erase the regions in `value` and then fully normalize all the
+ /// types found within. The result will also have regions erased.
+ ///
+ /// This should only be used outside of type inference. For example,
+ /// it assumes that normalization will succeed.
+ pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
+ std::any::type_name::<T>(),
+ value,
+ param_env,
+ );
+
+ // Erase first before we do the real query -- this keeps the
+ // cache from being too polluted.
+ let value = self.erase_regions(value);
+ debug!(?value);
+
+ if !value.has_projections() {
+ value
+ } else {
+ value.fold_with(&mut NormalizeAfterErasingRegionsFolder { tcx: self, param_env })
+ }
+ }
+
+ /// Tries to erase the regions in `value` and then fully normalize all the
+ /// types found within. The result will also have regions erased.
+ ///
+ /// Contrary to `normalize_erasing_regions` this function does not assume that normalization
+ /// succeeds.
+ pub fn try_normalize_erasing_regions<T>(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> Result<T, NormalizationError<'tcx>>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "try_normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
+ std::any::type_name::<T>(),
+ value,
+ param_env,
+ );
+
+ // Erase first before we do the real query -- this keeps the
+ // cache from being too polluted.
+ let value = self.erase_regions(value);
+ debug!(?value);
+
+ if !value.has_projections() {
+ Ok(value)
+ } else {
+ let mut folder = TryNormalizeAfterErasingRegionsFolder::new(self, param_env);
+ value.try_fold_with(&mut folder)
+ }
+ }
+
+ /// If you have a `Binder<'tcx, T>`, you can do this to strip out the
+ /// late-bound regions and then normalize the result, yielding up
+ /// a `T` (with regions erased). This is appropriate when the
+ /// binder is being instantiated at the call site.
+ ///
+ /// N.B., currently, higher-ranked type bounds inhibit
+ /// normalization. Therefore, each time we erase them in
+ /// codegen, we need to normalize the contents.
+ pub fn normalize_erasing_late_bound_regions<T>(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ value: ty::Binder<'tcx, T>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let value = self.erase_late_bound_regions(value);
+ self.normalize_erasing_regions(param_env, value)
+ }
+
+ /// If you have a `Binder<'tcx, T>`, you can do this to strip out the
+ /// late-bound regions and then normalize the result, yielding up
+ /// a `T` (with regions erased). This is appropriate when the
+ /// binder is being instantiated at the call site.
+ ///
+ /// N.B., currently, higher-ranked type bounds inhibit
+ /// normalization. Therefore, each time we erase them in
+ /// codegen, we need to normalize the contents.
+ pub fn try_normalize_erasing_late_bound_regions<T>(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ value: ty::Binder<'tcx, T>,
+ ) -> Result<T, NormalizationError<'tcx>>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let value = self.erase_late_bound_regions(value);
+ self.try_normalize_erasing_regions(param_env, value)
+ }
+
+ /// Monomorphizes a type from the AST by first applying the
+ /// in-scope substitutions and then normalizing any associated
+ /// types.
+ /// Panics if normalization fails. In case normalization might fail
+ /// use `try_subst_and_normalize_erasing_regions` instead.
+ pub fn subst_and_normalize_erasing_regions<T>(
+ self,
+ param_substs: SubstsRef<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "subst_and_normalize_erasing_regions(\
+ param_substs={:?}, \
+ value={:?}, \
+ param_env={:?})",
+ param_substs, value, param_env,
+ );
+ let substituted = EarlyBinder(value).subst(self, param_substs);
+ self.normalize_erasing_regions(param_env, substituted)
+ }
+
+ /// Monomorphizes a type from the AST by first applying the
+ /// in-scope substitutions and then trying to normalize any associated
+ /// types. Contrary to `subst_and_normalize_erasing_regions` this does
+ /// not assume that normalization succeeds.
+ pub fn try_subst_and_normalize_erasing_regions<T>(
+ self,
+ param_substs: SubstsRef<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> Result<T, NormalizationError<'tcx>>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "subst_and_normalize_erasing_regions(\
+ param_substs={:?}, \
+ value={:?}, \
+ param_env={:?})",
+ param_substs, value, param_env,
+ );
+ let substituted = EarlyBinder(value).subst(self, param_substs);
+ self.try_normalize_erasing_regions(param_env, substituted)
+ }
+}
+
+struct NormalizeAfterErasingRegionsFolder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> {
+ #[instrument(skip(self), level = "debug")]
+ fn normalize_generic_arg_after_erasing_regions(
+ &self,
+ arg: ty::GenericArg<'tcx>,
+ ) -> ty::GenericArg<'tcx> {
+ let arg = self.param_env.and(arg);
+ debug!(?arg);
+
+ self.tcx.try_normalize_generic_arg_after_erasing_regions(arg).unwrap_or_else(|_| bug!(
+ "Failed to normalize {:?}, maybe try to call `try_normalize_erasing_regions` instead",
+ arg.value
+ ))
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.normalize_generic_arg_after_erasing_regions(ty.into()).expect_ty()
+ }
+
+ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const()
+ }
+
+ #[inline]
+ fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ // FIXME: This *probably* needs canonicalization too!
+ let arg = self.param_env.and(c);
+ self.tcx
+ .try_normalize_mir_const_after_erasing_regions(arg)
+ .unwrap_or_else(|_| bug!("failed to normalize {:?}", c))
+ }
+}
+
+struct TryNormalizeAfterErasingRegionsFolder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> TryNormalizeAfterErasingRegionsFolder<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+ TryNormalizeAfterErasingRegionsFolder { tcx, param_env }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn try_normalize_generic_arg_after_erasing_regions(
+ &self,
+ arg: ty::GenericArg<'tcx>,
+ ) -> Result<ty::GenericArg<'tcx>, NoSolution> {
+ let arg = self.param_env.and(arg);
+ debug!(?arg);
+
+ self.tcx.try_normalize_generic_arg_after_erasing_regions(arg)
+ }
+}
+
+impl<'tcx> FallibleTypeFolder<'tcx> for TryNormalizeAfterErasingRegionsFolder<'tcx> {
+ type Error = NormalizationError<'tcx>;
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn try_fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
+ match self.try_normalize_generic_arg_after_erasing_regions(ty.into()) {
+ Ok(t) => Ok(t.expect_ty()),
+ Err(_) => Err(NormalizationError::Type(ty)),
+ }
+ }
+
+ fn try_fold_const(&mut self, c: ty::Const<'tcx>) -> Result<ty::Const<'tcx>, Self::Error> {
+ match self.try_normalize_generic_arg_after_erasing_regions(c.into()) {
+ Ok(t) => Ok(t.expect_const()),
+ Err(_) => Err(NormalizationError::Const(c)),
+ }
+ }
+
+ fn try_fold_mir_const(
+ &mut self,
+ c: mir::ConstantKind<'tcx>,
+ ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
+ // FIXME: This *probably* needs canonicalization too!
+ let arg = self.param_env.and(c);
+ match self.tcx.try_normalize_mir_const_after_erasing_regions(arg) {
+ Ok(c) => Ok(c),
+ Err(_) => Err(NormalizationError::ConstantKind(c)),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs
new file mode 100644
index 000000000..e189ee2fc
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/parameterized.rs
@@ -0,0 +1,119 @@
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::{Idx, IndexVec};
+
+use crate::middle::exported_symbols::ExportedSymbol;
+use crate::mir::Body;
+use crate::ty::abstract_const::Node;
+use crate::ty::{
+ self, Const, FnSig, GeneratorDiagnosticData, GenericPredicates, Predicate, TraitRef, Ty,
+};
+
+pub trait ParameterizedOverTcx: 'static {
+ #[allow(unused_lifetimes)]
+ type Value<'tcx>;
+}
+
+impl<T: ParameterizedOverTcx> ParameterizedOverTcx for &'static [T] {
+ type Value<'tcx> = &'tcx [T::Value<'tcx>];
+}
+
+impl<T: ParameterizedOverTcx> ParameterizedOverTcx for Option<T> {
+ type Value<'tcx> = Option<T::Value<'tcx>>;
+}
+
+impl<A: ParameterizedOverTcx, B: ParameterizedOverTcx> ParameterizedOverTcx for (A, B) {
+ type Value<'tcx> = (A::Value<'tcx>, B::Value<'tcx>);
+}
+
+impl<I: Idx + 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for IndexVec<I, T> {
+ type Value<'tcx> = IndexVec<I, T::Value<'tcx>>;
+}
+
+impl<T: ParameterizedOverTcx> ParameterizedOverTcx for ty::Binder<'static, T> {
+ type Value<'tcx> = ty::Binder<'tcx, T::Value<'tcx>>;
+}
+
+#[macro_export]
+macro_rules! trivially_parameterized_over_tcx {
+ ($($ty:ty),+ $(,)?) => {
+ $(
+ impl $crate::ty::ParameterizedOverTcx for $ty {
+ #[allow(unused_lifetimes)]
+ type Value<'tcx> = $ty;
+ }
+ )*
+ }
+}
+
+trivially_parameterized_over_tcx! {
+ usize,
+ (),
+ u32,
+ std::string::String,
+ crate::metadata::ModChild,
+ crate::middle::codegen_fn_attrs::CodegenFnAttrs,
+ crate::middle::exported_symbols::SymbolExportInfo,
+ crate::mir::ConstQualifs,
+ ty::Generics,
+ ty::ImplPolarity,
+ ty::ReprOptions,
+ ty::TraitDef,
+ ty::Visibility,
+ ty::adjustment::CoerceUnsizedInfo,
+ ty::fast_reject::SimplifiedTypeGen<DefId>,
+ rustc_ast::Attribute,
+ rustc_ast::MacArgs,
+ rustc_attr::ConstStability,
+ rustc_attr::Deprecation,
+ rustc_attr::Stability,
+ rustc_hir::Constness,
+ rustc_hir::Defaultness,
+ rustc_hir::GeneratorKind,
+ rustc_hir::IsAsync,
+ rustc_hir::LangItem,
+ rustc_hir::def::DefKind,
+ rustc_hir::def_id::DefIndex,
+ rustc_hir::definitions::DefKey,
+ rustc_index::bit_set::FiniteBitSet<u32>,
+ rustc_session::cstore::ForeignModule,
+ rustc_session::cstore::LinkagePreference,
+ rustc_session::cstore::NativeLib,
+ rustc_span::DebuggerVisualizerFile,
+ rustc_span::ExpnData,
+ rustc_span::ExpnHash,
+ rustc_span::ExpnId,
+ rustc_span::SourceFile,
+ rustc_span::Span,
+ rustc_span::Symbol,
+ rustc_span::def_id::DefPathHash,
+ rustc_span::hygiene::SyntaxContextData,
+ rustc_span::symbol::Ident,
+ rustc_type_ir::Variance,
+}
+
+// HACK(compiler-errors): This macro rule can only take an ident,
+// not a path, due to parsing ambiguity reasons. That means we gotta
+// import all of these types above.
+#[macro_export]
+macro_rules! parameterized_over_tcx {
+ ($($ident:ident),+ $(,)?) => {
+ $(
+ impl $crate::ty::ParameterizedOverTcx for $ident<'static> {
+ type Value<'tcx> = $ident<'tcx>;
+ }
+ )*
+ }
+}
+
+parameterized_over_tcx! {
+ Ty,
+ FnSig,
+ GenericPredicates,
+ TraitRef,
+ Const,
+ Predicate,
+ GeneratorDiagnosticData,
+ Body,
+ Node,
+ ExportedSymbol,
+}
diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs
new file mode 100644
index 000000000..d57cf8f01
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/print/mod.rs
@@ -0,0 +1,327 @@
+use crate::ty::subst::{GenericArg, Subst};
+use crate::ty::{self, DefIdTree, Ty, TyCtxt};
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sso::SsoHashSet;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+
+// `pretty` is a separate module only for organization.
+mod pretty;
+pub use self::pretty::*;
+
+// FIXME(eddyb) false positive, the lifetime parameters are used with `P: Printer<...>`.
+#[allow(unused_lifetimes)]
+pub trait Print<'tcx, P> {
+ type Output;
+ type Error;
+
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error>;
+}
+
+/// Interface for outputting user-facing "type-system entities"
+/// (paths, types, lifetimes, constants, etc.) as a side-effect
+/// (e.g. formatting, like `PrettyPrinter` implementors do) or by
+/// constructing some alternative representation (e.g. an AST),
+/// which the associated types allow passing through the methods.
+///
+/// For pretty-printing/formatting in particular, see `PrettyPrinter`.
+//
+// FIXME(eddyb) find a better name; this is more general than "printing".
+pub trait Printer<'tcx>: Sized {
+ type Error;
+
+ type Path;
+ type Region;
+ type Type;
+ type DynExistential;
+ type Const;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn print_def_path(
+ self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self.default_print_def_path(def_id, substs)
+ }
+
+ fn print_impl_path(
+ self,
+ impl_def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self.default_print_impl_path(impl_def_id, substs, self_ty, trait_ref)
+ }
+
+ fn print_region(self, region: ty::Region<'tcx>) -> Result<Self::Region, Self::Error>;
+
+ fn print_type(self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error>;
+
+ fn print_dyn_existential(
+ self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error>;
+
+ fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error>;
+
+ fn path_crate(self, cnum: CrateNum) -> Result<Self::Path, Self::Error>;
+
+ fn path_qualified(
+ self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error>;
+
+ fn path_append_impl(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error>;
+
+ fn path_append(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error>;
+
+ fn path_generic_args(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error>;
+
+ // Defaults (should not be overridden):
+
+ #[instrument(skip(self), level = "debug")]
+ fn default_print_def_path(
+ self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ let key = self.tcx().def_key(def_id);
+ debug!(?key);
+
+ match key.disambiguated_data.data {
+ DefPathData::CrateRoot => {
+ assert!(key.parent.is_none());
+ self.path_crate(def_id.krate)
+ }
+
+ DefPathData::Impl => {
+ let generics = self.tcx().generics_of(def_id);
+ let self_ty = self.tcx().bound_type_of(def_id);
+ let impl_trait_ref = self.tcx().bound_impl_trait_ref(def_id);
+ let (self_ty, impl_trait_ref) = if substs.len() >= generics.count() {
+ (
+ self_ty.subst(self.tcx(), substs),
+ impl_trait_ref.map(|i| i.subst(self.tcx(), substs)),
+ )
+ } else {
+ (self_ty.0, impl_trait_ref.map(|i| i.0))
+ };
+ self.print_impl_path(def_id, substs, self_ty, impl_trait_ref)
+ }
+
+ _ => {
+ let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id };
+
+ let mut parent_substs = substs;
+ let mut trait_qualify_parent = false;
+ if !substs.is_empty() {
+ let generics = self.tcx().generics_of(def_id);
+ parent_substs = &substs[..generics.parent_count.min(substs.len())];
+
+ match key.disambiguated_data.data {
+ // Closures' own generics are only captures, don't print them.
+ DefPathData::ClosureExpr => {}
+ // This covers both `DefKind::AnonConst` and `DefKind::InlineConst`.
+ // Anon consts doesn't have their own generics, and inline consts' own
+ // generics are their inferred types, so don't print them.
+ DefPathData::AnonConst => {}
+
+ // If we have any generic arguments to print, we do that
+ // on top of the same path, but without its own generics.
+ _ => {
+ if !generics.params.is_empty() && substs.len() >= generics.count() {
+ let args = generics.own_substs_no_defaults(self.tcx(), substs);
+ return self.path_generic_args(
+ |cx| cx.print_def_path(def_id, parent_substs),
+ args,
+ );
+ }
+ }
+ }
+
+ // FIXME(eddyb) try to move this into the parent's printing
+ // logic, instead of doing it when printing the child.
+ trait_qualify_parent = generics.has_self
+ && generics.parent == Some(parent_def_id)
+ && parent_substs.len() == generics.parent_count
+ && self.tcx().generics_of(parent_def_id).parent_count == 0;
+ }
+
+ self.path_append(
+ |cx: Self| {
+ if trait_qualify_parent {
+ let trait_ref = ty::TraitRef::new(
+ parent_def_id,
+ cx.tcx().intern_substs(parent_substs),
+ );
+ cx.path_qualified(trait_ref.self_ty(), Some(trait_ref))
+ } else {
+ cx.print_def_path(parent_def_id, parent_substs)
+ }
+ },
+ &key.disambiguated_data,
+ )
+ }
+ }
+ }
+
+ fn default_print_impl_path(
+ self,
+ impl_def_id: DefId,
+ _substs: &'tcx [GenericArg<'tcx>],
+ self_ty: Ty<'tcx>,
+ impl_trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ debug!(
+ "default_print_impl_path: impl_def_id={:?}, self_ty={}, impl_trait_ref={:?}",
+ impl_def_id, self_ty, impl_trait_ref
+ );
+
+ let key = self.tcx().def_key(impl_def_id);
+ let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
+
+ // Decide whether to print the parent path for the impl.
+ // Logically, since impls are global, it's never needed, but
+ // users may find it useful. Currently, we omit the parent if
+ // the impl is either in the same module as the self-type or
+ // as the trait.
+ let in_self_mod = match characteristic_def_id_of_type(self_ty) {
+ None => false,
+ Some(ty_def_id) => self.tcx().parent(ty_def_id) == parent_def_id,
+ };
+ let in_trait_mod = match impl_trait_ref {
+ None => false,
+ Some(trait_ref) => self.tcx().parent(trait_ref.def_id) == parent_def_id,
+ };
+
+ if !in_self_mod && !in_trait_mod {
+ // If the impl is not co-located with either self-type or
+ // trait-type, then fallback to a format that identifies
+ // the module more clearly.
+ self.path_append_impl(
+ |cx| cx.print_def_path(parent_def_id, &[]),
+ &key.disambiguated_data,
+ self_ty,
+ impl_trait_ref,
+ )
+ } else {
+ // Otherwise, try to give a good form that would be valid language
+ // syntax. Preferably using associated item notation.
+ self.path_qualified(self_ty, impl_trait_ref)
+ }
+ }
+}
+
+/// As a heuristic, when we see an impl, if we see that the
+/// 'self type' is a type defined in the same module as the impl,
+/// we can omit including the path to the impl itself. This
+/// function tries to find a "characteristic `DefId`" for a
+/// type. It's just a heuristic so it makes some questionable
+/// decisions and we may want to adjust it later.
+///
+/// Visited set is needed to avoid full iteration over
+/// deeply nested tuples that have no DefId.
+fn characteristic_def_id_of_type_cached<'a>(
+ ty: Ty<'a>,
+ visited: &mut SsoHashSet<Ty<'a>>,
+) -> Option<DefId> {
+ match *ty.kind() {
+ ty::Adt(adt_def, _) => Some(adt_def.did()),
+
+ ty::Dynamic(data, ..) => data.principal_def_id(),
+
+ ty::Array(subty, _) | ty::Slice(subty) => {
+ characteristic_def_id_of_type_cached(subty, visited)
+ }
+
+ ty::RawPtr(mt) => characteristic_def_id_of_type_cached(mt.ty, visited),
+
+ ty::Ref(_, ty, _) => characteristic_def_id_of_type_cached(ty, visited),
+
+ ty::Tuple(ref tys) => tys.iter().find_map(|ty| {
+ if visited.insert(ty) {
+ return characteristic_def_id_of_type_cached(ty, visited);
+ }
+ return None;
+ }),
+
+ ty::FnDef(def_id, _)
+ | ty::Closure(def_id, _)
+ | ty::Generator(def_id, _, _)
+ | ty::Foreign(def_id) => Some(def_id),
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Str
+ | ty::FnPtr(_)
+ | ty::Projection(_)
+ | ty::Placeholder(..)
+ | ty::Param(_)
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ | ty::Bound(..)
+ | ty::Error(_)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Float(_) => None,
+ }
+}
+pub fn characteristic_def_id_of_type(ty: Ty<'_>) -> Option<DefId> {
+ characteristic_def_id_of_type_cached(ty, &mut SsoHashSet::new())
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Region<'tcx> {
+ type Output = P::Region;
+ type Error = P::Error;
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.print_region(*self)
+ }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for Ty<'tcx> {
+ type Output = P::Type;
+ type Error = P::Error;
+
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.print_type(*self)
+ }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P>
+ for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>
+{
+ type Output = P::DynExistential;
+ type Error = P::Error;
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.print_dyn_existential(self)
+ }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Const<'tcx> {
+ type Output = P::Const;
+ type Error = P::Error;
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.print_const(*self)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
new file mode 100644
index 000000000..7f2e81a71
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -0,0 +1,2789 @@
+use crate::mir::interpret::{AllocRange, GlobalAlloc, Pointer, Provenance, Scalar};
+use crate::ty::subst::{GenericArg, GenericArgKind, Subst};
+use crate::ty::{
+ self, ConstInt, DefIdTree, ParamConst, ScalarInt, Term, Ty, TyCtxt, TypeFoldable,
+ TypeSuperFoldable, TypeSuperVisitable, TypeVisitable,
+};
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_data_structures::sso::SsoHashSet;
+use rustc_hir as hir;
+use rustc_hir::def::{self, CtorKind, DefKind, Namespace};
+use rustc_hir::def_id::{DefId, DefIdSet, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathData};
+use rustc_session::config::TrimmedDefPaths;
+use rustc_session::cstore::{ExternCrate, ExternCrateSource};
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_target::abi::Size;
+use rustc_target::spec::abi::Abi;
+
+use std::cell::Cell;
+use std::char;
+use std::collections::BTreeMap;
+use std::convert::TryFrom;
+use std::fmt::{self, Write as _};
+use std::iter;
+use std::ops::{ControlFlow, Deref, DerefMut};
+
+// `pretty` is a separate module only for organization.
+use super::*;
+
+macro_rules! p {
+ (@$lit:literal) => {
+ write!(scoped_cx!(), $lit)?
+ };
+ (@write($($data:expr),+)) => {
+ write!(scoped_cx!(), $($data),+)?
+ };
+ (@print($x:expr)) => {
+ scoped_cx!() = $x.print(scoped_cx!())?
+ };
+ (@$method:ident($($arg:expr),*)) => {
+ scoped_cx!() = scoped_cx!().$method($($arg),*)?
+ };
+ ($($elem:tt $(($($args:tt)*))?),+) => {{
+ $(p!(@ $elem $(($($args)*))?);)+
+ }};
+}
+macro_rules! define_scoped_cx {
+ ($cx:ident) => {
+ #[allow(unused_macros)]
+ macro_rules! scoped_cx {
+ () => {
+ $cx
+ };
+ }
+ };
+}
+
+thread_local! {
+ static FORCE_IMPL_FILENAME_LINE: Cell<bool> = const { Cell::new(false) };
+ static SHOULD_PREFIX_WITH_CRATE: Cell<bool> = const { Cell::new(false) };
+ static NO_TRIMMED_PATH: Cell<bool> = const { Cell::new(false) };
+ static NO_QUERIES: Cell<bool> = const { Cell::new(false) };
+ static NO_VISIBLE_PATH: Cell<bool> = const { Cell::new(false) };
+}
+
+macro_rules! define_helper {
+ ($($(#[$a:meta])* fn $name:ident($helper:ident, $tl:ident);)+) => {
+ $(
+ #[must_use]
+ pub struct $helper(bool);
+
+ impl $helper {
+ pub fn new() -> $helper {
+ $helper($tl.with(|c| c.replace(true)))
+ }
+ }
+
+ $(#[$a])*
+ pub macro $name($e:expr) {
+ {
+ let _guard = $helper::new();
+ $e
+ }
+ }
+
+ impl Drop for $helper {
+ fn drop(&mut self) {
+ $tl.with(|c| c.set(self.0))
+ }
+ }
+ )+
+ }
+}
+
+define_helper!(
+ /// Avoids running any queries during any prints that occur
+ /// during the closure. This may alter the appearance of some
+ /// types (e.g. forcing verbose printing for opaque types).
+ /// This method is used during some queries (e.g. `explicit_item_bounds`
+ /// for opaque types), to ensure that any debug printing that
+ /// occurs during the query computation does not end up recursively
+ /// calling the same query.
+ fn with_no_queries(NoQueriesGuard, NO_QUERIES);
+ /// Force us to name impls with just the filename/line number. We
+ /// normally try to use types. But at some points, notably while printing
+ /// cycle errors, this can result in extra or suboptimal error output,
+ /// so this variable disables that check.
+ fn with_forced_impl_filename_line(ForcedImplGuard, FORCE_IMPL_FILENAME_LINE);
+ /// Adds the `crate::` prefix to paths where appropriate.
+ fn with_crate_prefix(CratePrefixGuard, SHOULD_PREFIX_WITH_CRATE);
+ /// Prevent path trimming if it is turned on. Path trimming affects `Display` impl
+ /// of various rustc types, for example `std::vec::Vec` would be trimmed to `Vec`,
+ /// if no other `Vec` is found.
+ fn with_no_trimmed_paths(NoTrimmedGuard, NO_TRIMMED_PATH);
+ /// Prevent selection of visible paths. `Display` impl of DefId will prefer
+ /// visible (public) reexports of types as paths.
+ fn with_no_visible_paths(NoVisibleGuard, NO_VISIBLE_PATH);
+);
+
+/// The "region highlights" are used to control region printing during
+/// specific error messages. When a "region highlight" is enabled, it
+/// gives an alternate way to print specific regions. For now, we
+/// always print those regions using a number, so something like "`'0`".
+///
+/// Regions not selected by the region highlight mode are presently
+/// unaffected.
+#[derive(Copy, Clone)]
+pub struct RegionHighlightMode<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ /// If enabled, when we see the selected region, use "`'N`"
+ /// instead of the ordinary behavior.
+ highlight_regions: [Option<(ty::Region<'tcx>, usize)>; 3],
+
+ /// If enabled, when printing a "free region" that originated from
+ /// the given `ty::BoundRegionKind`, print it as "`'1`". Free regions that would ordinarily
+ /// have names print as normal.
+ ///
+ /// This is used when you have a signature like `fn foo(x: &u32,
+ /// y: &'a u32)` and we want to give a name to the region of the
+ /// reference `x`.
+ highlight_bound_region: Option<(ty::BoundRegionKind, usize)>,
+}
+
+impl<'tcx> RegionHighlightMode<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>) -> Self {
+ Self {
+ tcx,
+ highlight_regions: Default::default(),
+ highlight_bound_region: Default::default(),
+ }
+ }
+
+ /// If `region` and `number` are both `Some`, invokes
+ /// `highlighting_region`.
+ pub fn maybe_highlighting_region(
+ &mut self,
+ region: Option<ty::Region<'tcx>>,
+ number: Option<usize>,
+ ) {
+ if let Some(k) = region {
+ if let Some(n) = number {
+ self.highlighting_region(k, n);
+ }
+ }
+ }
+
+ /// Highlights the region inference variable `vid` as `'N`.
+ pub fn highlighting_region(&mut self, region: ty::Region<'tcx>, number: usize) {
+ let num_slots = self.highlight_regions.len();
+ let first_avail_slot =
+ self.highlight_regions.iter_mut().find(|s| s.is_none()).unwrap_or_else(|| {
+ bug!("can only highlight {} placeholders at a time", num_slots,)
+ });
+ *first_avail_slot = Some((region, number));
+ }
+
+ /// Convenience wrapper for `highlighting_region`.
+ pub fn highlighting_region_vid(&mut self, vid: ty::RegionVid, number: usize) {
+ self.highlighting_region(self.tcx.mk_region(ty::ReVar(vid)), number)
+ }
+
+ /// Returns `Some(n)` with the number to use for the given region, if any.
+ fn region_highlighted(&self, region: ty::Region<'tcx>) -> Option<usize> {
+ self.highlight_regions.iter().find_map(|h| match h {
+ Some((r, n)) if *r == region => Some(*n),
+ _ => None,
+ })
+ }
+
+ /// Highlight the given bound region.
+ /// We can only highlight one bound region at a time. See
+ /// the field `highlight_bound_region` for more detailed notes.
+ pub fn highlighting_bound_region(&mut self, br: ty::BoundRegionKind, number: usize) {
+ assert!(self.highlight_bound_region.is_none());
+ self.highlight_bound_region = Some((br, number));
+ }
+}
+
+/// Trait for printers that pretty-print using `fmt::Write` to the printer.
+pub trait PrettyPrinter<'tcx>:
+ Printer<
+ 'tcx,
+ Error = fmt::Error,
+ Path = Self,
+ Region = Self,
+ Type = Self,
+ DynExistential = Self,
+ Const = Self,
+ > + fmt::Write
+{
+ /// Like `print_def_path` but for value paths.
+ fn print_value_path(
+ self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self.print_def_path(def_id, substs)
+ }
+
+ fn in_binder<T>(self, value: &ty::Binder<'tcx, T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+ {
+ value.as_ref().skip_binder().print(self)
+ }
+
+ fn wrap_binder<T, F: FnOnce(&T, Self) -> Result<Self, fmt::Error>>(
+ self,
+ value: &ty::Binder<'tcx, T>,
+ f: F,
+ ) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+ {
+ f(value.as_ref().skip_binder(), self)
+ }
+
+ /// Prints comma-separated elements.
+ fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+ {
+ if let Some(first) = elems.next() {
+ self = first.print(self)?;
+ for elem in elems {
+ self.write_str(", ")?;
+ self = elem.print(self)?;
+ }
+ }
+ Ok(self)
+ }
+
+ /// Prints `{f: t}` or `{f as t}` depending on the `cast` argument
+ fn typed_value(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ t: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ conversion: &str,
+ ) -> Result<Self::Const, Self::Error> {
+ self.write_str("{")?;
+ self = f(self)?;
+ self.write_str(conversion)?;
+ self = t(self)?;
+ self.write_str("}")?;
+ Ok(self)
+ }
+
+ /// Prints `<...>` around what `f` prints.
+ fn generic_delimiters(
+ self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ ) -> Result<Self, Self::Error>;
+
+ /// Returns `true` if the region should be printed in
+ /// optional positions, e.g., `&'a T` or `dyn Tr + 'b`.
+ /// This is typically the case for all non-`'_` regions.
+ fn should_print_region(&self, region: ty::Region<'tcx>) -> bool;
+
+ // Defaults (should not be overridden):
+
+ /// If possible, this returns a global path resolving to `def_id` that is visible
+ /// from at least one local module, and returns `true`. If the crate defining `def_id` is
+ /// declared with an `extern crate`, the path is guaranteed to use the `extern crate`.
+ fn try_print_visible_def_path(self, def_id: DefId) -> Result<(Self, bool), Self::Error> {
+ if NO_VISIBLE_PATH.with(|flag| flag.get()) {
+ return Ok((self, false));
+ }
+
+ let mut callers = Vec::new();
+ self.try_print_visible_def_path_recur(def_id, &mut callers)
+ }
+
+ /// Try to see if this path can be trimmed to a unique symbol name.
+ fn try_print_trimmed_def_path(
+ mut self,
+ def_id: DefId,
+ ) -> Result<(Self::Path, bool), Self::Error> {
+ if !self.tcx().sess.opts.unstable_opts.trim_diagnostic_paths
+ || matches!(self.tcx().sess.opts.trimmed_def_paths, TrimmedDefPaths::Never)
+ || NO_TRIMMED_PATH.with(|flag| flag.get())
+ || SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get())
+ {
+ return Ok((self, false));
+ }
+
+ match self.tcx().trimmed_def_paths(()).get(&def_id) {
+ None => Ok((self, false)),
+ Some(symbol) => {
+ self.write_str(symbol.as_str())?;
+ Ok((self, true))
+ }
+ }
+ }
+
+ /// Does the work of `try_print_visible_def_path`, building the
+ /// full definition path recursively before attempting to
+ /// post-process it into the valid and visible version that
+ /// accounts for re-exports.
+ ///
+ /// This method should only be called by itself or
+ /// `try_print_visible_def_path`.
+ ///
+ /// `callers` is a chain of visible_parent's leading to `def_id`,
+ /// to support cycle detection during recursion.
+ ///
+ /// This method returns false if we can't print the visible path, so
+ /// `print_def_path` can fall back on the item's real definition path.
+ fn try_print_visible_def_path_recur(
+ mut self,
+ def_id: DefId,
+ callers: &mut Vec<DefId>,
+ ) -> Result<(Self, bool), Self::Error> {
+ define_scoped_cx!(self);
+
+ debug!("try_print_visible_def_path: def_id={:?}", def_id);
+
+ // If `def_id` is a direct or injected extern crate, return the
+ // path to the crate followed by the path to the item within the crate.
+ if let Some(cnum) = def_id.as_crate_root() {
+ if cnum == LOCAL_CRATE {
+ return Ok((self.path_crate(cnum)?, true));
+ }
+
+ // In local mode, when we encounter a crate other than
+ // LOCAL_CRATE, execution proceeds in one of two ways:
+ //
+ // 1. For a direct dependency, where user added an
+ // `extern crate` manually, we put the `extern
+ // crate` as the parent. So you wind up with
+ // something relative to the current crate.
+ // 2. For an extern inferred from a path or an indirect crate,
+ // where there is no explicit `extern crate`, we just prepend
+ // the crate name.
+ match self.tcx().extern_crate(def_id) {
+ Some(&ExternCrate { src, dependency_of, span, .. }) => match (src, dependency_of) {
+ (ExternCrateSource::Extern(def_id), LOCAL_CRATE) => {
+ // NOTE(eddyb) the only reason `span` might be dummy,
+ // that we're aware of, is that it's the `std`/`core`
+ // `extern crate` injected by default.
+ // FIXME(eddyb) find something better to key this on,
+ // or avoid ending up with `ExternCrateSource::Extern`,
+ // for the injected `std`/`core`.
+ if span.is_dummy() {
+ return Ok((self.path_crate(cnum)?, true));
+ }
+
+ // Disable `try_print_trimmed_def_path` behavior within
+ // the `print_def_path` call, to avoid infinite recursion
+ // in cases where the `extern crate foo` has non-trivial
+ // parents, e.g. it's nested in `impl foo::Trait for Bar`
+ // (see also issues #55779 and #87932).
+ self = with_no_visible_paths!(self.print_def_path(def_id, &[])?);
+
+ return Ok((self, true));
+ }
+ (ExternCrateSource::Path, LOCAL_CRATE) => {
+ return Ok((self.path_crate(cnum)?, true));
+ }
+ _ => {}
+ },
+ None => {
+ return Ok((self.path_crate(cnum)?, true));
+ }
+ }
+ }
+
+ if def_id.is_local() {
+ return Ok((self, false));
+ }
+
+ let visible_parent_map = self.tcx().visible_parent_map(());
+
+ let mut cur_def_key = self.tcx().def_key(def_id);
+ debug!("try_print_visible_def_path: cur_def_key={:?}", cur_def_key);
+
+ // For a constructor, we want the name of its parent rather than <unnamed>.
+ if let DefPathData::Ctor = cur_def_key.disambiguated_data.data {
+ let parent = DefId {
+ krate: def_id.krate,
+ index: cur_def_key
+ .parent
+ .expect("`DefPathData::Ctor` / `VariantData` missing a parent"),
+ };
+
+ cur_def_key = self.tcx().def_key(parent);
+ }
+
+ let Some(visible_parent) = visible_parent_map.get(&def_id).cloned() else {
+ return Ok((self, false));
+ };
+
+ let actual_parent = self.tcx().opt_parent(def_id);
+ debug!(
+ "try_print_visible_def_path: visible_parent={:?} actual_parent={:?}",
+ visible_parent, actual_parent,
+ );
+
+ let mut data = cur_def_key.disambiguated_data.data;
+ debug!(
+ "try_print_visible_def_path: data={:?} visible_parent={:?} actual_parent={:?}",
+ data, visible_parent, actual_parent,
+ );
+
+ match data {
+ // In order to output a path that could actually be imported (valid and visible),
+ // we need to handle re-exports correctly.
+ //
+ // For example, take `std::os::unix::process::CommandExt`, this trait is actually
+ // defined at `std::sys::unix::ext::process::CommandExt` (at time of writing).
+ //
+ // `std::os::unix` reexports the contents of `std::sys::unix::ext`. `std::sys` is
+ // private so the "true" path to `CommandExt` isn't accessible.
+ //
+ // In this case, the `visible_parent_map` will look something like this:
+ //
+ // (child) -> (parent)
+ // `std::sys::unix::ext::process::CommandExt` -> `std::sys::unix::ext::process`
+ // `std::sys::unix::ext::process` -> `std::sys::unix::ext`
+ // `std::sys::unix::ext` -> `std::os`
+ //
+ // This is correct, as the visible parent of `std::sys::unix::ext` is in fact
+ // `std::os`.
+ //
+ // When printing the path to `CommandExt` and looking at the `cur_def_key` that
+ // corresponds to `std::sys::unix::ext`, we would normally print `ext` and then go
+ // to the parent - resulting in a mangled path like
+ // `std::os::ext::process::CommandExt`.
+ //
+ // Instead, we must detect that there was a re-export and instead print `unix`
+ // (which is the name `std::sys::unix::ext` was re-exported as in `std::os`). To
+ // do this, we compare the parent of `std::sys::unix::ext` (`std::sys::unix`) with
+ // the visible parent (`std::os`). If these do not match, then we iterate over
+ // the children of the visible parent (as was done when computing
+ // `visible_parent_map`), looking for the specific child we currently have and then
+ // have access to the re-exported name.
+ DefPathData::TypeNs(ref mut name) if Some(visible_parent) != actual_parent => {
+ // Item might be re-exported several times, but filter for the one
+ // that's public and whose identifier isn't `_`.
+ let reexport = self
+ .tcx()
+ .module_children(visible_parent)
+ .iter()
+ .filter(|child| child.res.opt_def_id() == Some(def_id))
+ .find(|child| child.vis.is_public() && child.ident.name != kw::Underscore)
+ .map(|child| child.ident.name);
+
+ if let Some(new_name) = reexport {
+ *name = new_name;
+ } else {
+ // There is no name that is public and isn't `_`, so bail.
+ return Ok((self, false));
+ }
+ }
+ // Re-exported `extern crate` (#43189).
+ DefPathData::CrateRoot => {
+ data = DefPathData::TypeNs(self.tcx().crate_name(def_id.krate));
+ }
+ _ => {}
+ }
+ debug!("try_print_visible_def_path: data={:?}", data);
+
+ if callers.contains(&visible_parent) {
+ return Ok((self, false));
+ }
+ callers.push(visible_parent);
+ // HACK(eddyb) this bypasses `path_append`'s prefix printing to avoid
+ // knowing ahead of time whether the entire path will succeed or not.
+ // To support printers that do not implement `PrettyPrinter`, a `Vec` or
+ // linked list on the stack would need to be built, before any printing.
+ match self.try_print_visible_def_path_recur(visible_parent, callers)? {
+ (cx, false) => return Ok((cx, false)),
+ (cx, true) => self = cx,
+ }
+ callers.pop();
+
+ Ok((self.path_append(Ok, &DisambiguatedDefPathData { data, disambiguator: 0 })?, true))
+ }
+
+ fn pretty_path_qualified(
+ self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ if trait_ref.is_none() {
+ // Inherent impls. Try to print `Foo::bar` for an inherent
+ // impl on `Foo`, but fallback to `<Foo>::bar` if self-type is
+ // anything other than a simple path.
+ match self_ty.kind() {
+ ty::Adt(..)
+ | ty::Foreign(_)
+ | ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_) => {
+ return self_ty.print(self);
+ }
+
+ _ => {}
+ }
+ }
+
+ self.generic_delimiters(|mut cx| {
+ define_scoped_cx!(cx);
+
+ p!(print(self_ty));
+ if let Some(trait_ref) = trait_ref {
+ p!(" as ", print(trait_ref.print_only_trait_path()));
+ }
+ Ok(cx)
+ })
+ }
+
+ fn pretty_path_append_impl(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ self.generic_delimiters(|mut cx| {
+ define_scoped_cx!(cx);
+
+ p!("impl ");
+ if let Some(trait_ref) = trait_ref {
+ p!(print(trait_ref.print_only_trait_path()), " for ");
+ }
+ p!(print(self_ty));
+
+ Ok(cx)
+ })
+ }
+
+ fn pretty_print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ define_scoped_cx!(self);
+
+ match *ty.kind() {
+ ty::Bool => p!("bool"),
+ ty::Char => p!("char"),
+ ty::Int(t) => p!(write("{}", t.name_str())),
+ ty::Uint(t) => p!(write("{}", t.name_str())),
+ ty::Float(t) => p!(write("{}", t.name_str())),
+ ty::RawPtr(ref tm) => {
+ p!(write(
+ "*{} ",
+ match tm.mutbl {
+ hir::Mutability::Mut => "mut",
+ hir::Mutability::Not => "const",
+ }
+ ));
+ p!(print(tm.ty))
+ }
+ ty::Ref(r, ty, mutbl) => {
+ p!("&");
+ if self.should_print_region(r) {
+ p!(print(r), " ");
+ }
+ p!(print(ty::TypeAndMut { ty, mutbl }))
+ }
+ ty::Never => p!("!"),
+ ty::Tuple(ref tys) => {
+ p!("(", comma_sep(tys.iter()));
+ if tys.len() == 1 {
+ p!(",");
+ }
+ p!(")")
+ }
+ ty::FnDef(def_id, substs) => {
+ let sig = self.tcx().bound_fn_sig(def_id).subst(self.tcx(), substs);
+ p!(print(sig), " {{", print_value_path(def_id, substs), "}}");
+ }
+ ty::FnPtr(ref bare_fn) => p!(print(bare_fn)),
+ ty::Infer(infer_ty) => {
+ let verbose = self.tcx().sess.verbose();
+ if let ty::TyVar(ty_vid) = infer_ty {
+ if let Some(name) = self.ty_infer_name(ty_vid) {
+ p!(write("{}", name))
+ } else {
+ if verbose {
+ p!(write("{:?}", infer_ty))
+ } else {
+ p!(write("{}", infer_ty))
+ }
+ }
+ } else {
+ if verbose { p!(write("{:?}", infer_ty)) } else { p!(write("{}", infer_ty)) }
+ }
+ }
+ ty::Error(_) => p!("[type error]"),
+ ty::Param(ref param_ty) => p!(print(param_ty)),
+ ty::Bound(debruijn, bound_ty) => match bound_ty.kind {
+ ty::BoundTyKind::Anon => self.pretty_print_bound_var(debruijn, bound_ty.var)?,
+ ty::BoundTyKind::Param(p) => p!(write("{}", p)),
+ },
+ ty::Adt(def, substs) => {
+ p!(print_def_path(def.did(), substs));
+ }
+ ty::Dynamic(data, r) => {
+ let print_r = self.should_print_region(r);
+ if print_r {
+ p!("(");
+ }
+ p!("dyn ", print(data));
+ if print_r {
+ p!(" + ", print(r), ")");
+ }
+ }
+ ty::Foreign(def_id) => {
+ p!(print_def_path(def_id, &[]));
+ }
+ ty::Projection(ref data) => p!(print(data)),
+ ty::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)),
+ ty::Opaque(def_id, substs) => {
+ // FIXME(eddyb) print this with `print_def_path`.
+ // We use verbose printing in 'NO_QUERIES' mode, to
+ // avoid needing to call `predicates_of`. This should
+ // only affect certain debug messages (e.g. messages printed
+ // from `rustc_middle::ty` during the computation of `tcx.predicates_of`),
+ // and should have no effect on any compiler output.
+ if self.tcx().sess.verbose() || NO_QUERIES.with(|q| q.get()) {
+ p!(write("Opaque({:?}, {:?})", def_id, substs));
+ return Ok(self);
+ }
+
+ let parent = self.tcx().parent(def_id);
+ match self.tcx().def_kind(parent) {
+ DefKind::TyAlias | DefKind::AssocTy => {
+ if let ty::Opaque(d, _) = *self.tcx().type_of(parent).kind() {
+ if d == def_id {
+ // If the type alias directly starts with the `impl` of the
+ // opaque type we're printing, then skip the `::{opaque#1}`.
+ p!(print_def_path(parent, substs));
+ return Ok(self);
+ }
+ }
+ // Complex opaque type, e.g. `type Foo = (i32, impl Debug);`
+ p!(print_def_path(def_id, substs));
+ return Ok(self);
+ }
+ _ => return self.pretty_print_opaque_impl_type(def_id, substs),
+ }
+ }
+ ty::Str => p!("str"),
+ ty::Generator(did, substs, movability) => {
+ p!(write("["));
+ match movability {
+ hir::Movability::Movable => {}
+ hir::Movability::Static => p!("static "),
+ }
+
+ if !self.tcx().sess.verbose() {
+ p!("generator");
+ // FIXME(eddyb) should use `def_span`.
+ if let Some(did) = did.as_local() {
+ let span = self.tcx().def_span(did);
+ p!(write(
+ "@{}",
+ // This may end up in stderr diagnostics but it may also be emitted
+ // into MIR. Hence we use the remapped path if available
+ self.tcx().sess.source_map().span_to_embeddable_string(span)
+ ));
+ } else {
+ p!(write("@"), print_def_path(did, substs));
+ }
+ } else {
+ p!(print_def_path(did, substs));
+ p!(" upvar_tys=(");
+ if !substs.as_generator().is_valid() {
+ p!("unavailable");
+ } else {
+ self = self.comma_sep(substs.as_generator().upvar_tys())?;
+ }
+ p!(")");
+
+ if substs.as_generator().is_valid() {
+ p!(" ", print(substs.as_generator().witness()));
+ }
+ }
+
+ p!("]")
+ }
+ ty::GeneratorWitness(types) => {
+ p!(in_binder(&types));
+ }
+ ty::Closure(did, substs) => {
+ p!(write("["));
+ if !self.tcx().sess.verbose() {
+ p!(write("closure"));
+ // FIXME(eddyb) should use `def_span`.
+ if let Some(did) = did.as_local() {
+ if self.tcx().sess.opts.unstable_opts.span_free_formats {
+ p!("@", print_def_path(did.to_def_id(), substs));
+ } else {
+ let span = self.tcx().def_span(did);
+ p!(write(
+ "@{}",
+ // This may end up in stderr diagnostics but it may also be emitted
+ // into MIR. Hence we use the remapped path if available
+ self.tcx().sess.source_map().span_to_embeddable_string(span)
+ ));
+ }
+ } else {
+ p!(write("@"), print_def_path(did, substs));
+ }
+ } else {
+ p!(print_def_path(did, substs));
+ if !substs.as_closure().is_valid() {
+ p!(" closure_substs=(unavailable)");
+ p!(write(" substs={:?}", substs));
+ } else {
+ p!(" closure_kind_ty=", print(substs.as_closure().kind_ty()));
+ p!(
+ " closure_sig_as_fn_ptr_ty=",
+ print(substs.as_closure().sig_as_fn_ptr_ty())
+ );
+ p!(" upvar_tys=(");
+ self = self.comma_sep(substs.as_closure().upvar_tys())?;
+ p!(")");
+ }
+ }
+ p!("]");
+ }
+ ty::Array(ty, sz) => {
+ p!("[", print(ty), "; ");
+ if self.tcx().sess.verbose() {
+ p!(write("{:?}", sz));
+ } else if let ty::ConstKind::Unevaluated(..) = sz.kind() {
+ // Do not try to evaluate unevaluated constants. If we are const evaluating an
+ // array length anon const, rustc will (with debug assertions) print the
+ // constant's path. Which will end up here again.
+ p!("_");
+ } else if let Some(n) = sz.kind().try_to_bits(self.tcx().data_layout.pointer_size) {
+ p!(write("{}", n));
+ } else if let ty::ConstKind::Param(param) = sz.kind() {
+ p!(print(param));
+ } else {
+ p!("_");
+ }
+ p!("]")
+ }
+ ty::Slice(ty) => p!("[", print(ty), "]"),
+ }
+
+ Ok(self)
+ }
+
+ fn pretty_print_opaque_impl_type(
+ mut self,
+ def_id: DefId,
+ substs: &'tcx ty::List<ty::GenericArg<'tcx>>,
+ ) -> Result<Self::Type, Self::Error> {
+ let tcx = self.tcx();
+
+ // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`,
+ // by looking up the projections associated with the def_id.
+ let bounds = tcx.bound_explicit_item_bounds(def_id);
+
+ let mut traits = FxIndexMap::default();
+ let mut fn_traits = FxIndexMap::default();
+ let mut is_sized = false;
+
+ for predicate in bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) {
+ let predicate = predicate.subst(tcx, substs);
+ let bound_predicate = predicate.kind();
+
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(pred) => {
+ let trait_ref = bound_predicate.rebind(pred.trait_ref);
+
+ // Don't print + Sized, but rather + ?Sized if absent.
+ if Some(trait_ref.def_id()) == tcx.lang_items().sized_trait() {
+ is_sized = true;
+ continue;
+ }
+
+ self.insert_trait_and_projection(trait_ref, None, &mut traits, &mut fn_traits);
+ }
+ ty::PredicateKind::Projection(pred) => {
+ let proj_ref = bound_predicate.rebind(pred);
+ let trait_ref = proj_ref.required_poly_trait_ref(tcx);
+
+ // Projection type entry -- the def-id for naming, and the ty.
+ let proj_ty = (proj_ref.projection_def_id(), proj_ref.term());
+
+ self.insert_trait_and_projection(
+ trait_ref,
+ Some(proj_ty),
+ &mut traits,
+ &mut fn_traits,
+ );
+ }
+ _ => {}
+ }
+ }
+
+ write!(self, "impl ")?;
+
+ let mut first = true;
+ // Insert parenthesis around (Fn(A, B) -> C) if the opaque ty has more than one other trait
+ let paren_needed = fn_traits.len() > 1 || traits.len() > 0 || !is_sized;
+
+ for (fn_once_trait_ref, entry) in fn_traits {
+ write!(self, "{}", if first { "" } else { " + " })?;
+ write!(self, "{}", if paren_needed { "(" } else { "" })?;
+
+ self = self.wrap_binder(&fn_once_trait_ref, |trait_ref, mut cx| {
+ define_scoped_cx!(cx);
+ // Get the (single) generic ty (the args) of this FnOnce trait ref.
+ let generics = tcx.generics_of(trait_ref.def_id);
+ let args = generics.own_substs_no_defaults(tcx, trait_ref.substs);
+
+ match (entry.return_ty, args[0].expect_ty()) {
+ // We can only print `impl Fn() -> ()` if we have a tuple of args and we recorded
+ // a return type.
+ (Some(return_ty), arg_tys) if matches!(arg_tys.kind(), ty::Tuple(_)) => {
+ let name = if entry.fn_trait_ref.is_some() {
+ "Fn"
+ } else if entry.fn_mut_trait_ref.is_some() {
+ "FnMut"
+ } else {
+ "FnOnce"
+ };
+
+ p!(write("{}(", name));
+
+ for (idx, ty) in arg_tys.tuple_fields().iter().enumerate() {
+ if idx > 0 {
+ p!(", ");
+ }
+ p!(print(ty));
+ }
+
+ p!(")");
+ if let Term::Ty(ty) = return_ty.skip_binder() {
+ if !ty.is_unit() {
+ p!(" -> ", print(return_ty));
+ }
+ }
+ p!(write("{}", if paren_needed { ")" } else { "" }));
+
+ first = false;
+ }
+ // If we got here, we can't print as a `impl Fn(A, B) -> C`. Just record the
+ // trait_refs we collected in the OpaqueFnEntry as normal trait refs.
+ _ => {
+ if entry.has_fn_once {
+ traits.entry(fn_once_trait_ref).or_default().extend(
+ // Group the return ty with its def id, if we had one.
+ entry
+ .return_ty
+ .map(|ty| (tcx.lang_items().fn_once_output().unwrap(), ty)),
+ );
+ }
+ if let Some(trait_ref) = entry.fn_mut_trait_ref {
+ traits.entry(trait_ref).or_default();
+ }
+ if let Some(trait_ref) = entry.fn_trait_ref {
+ traits.entry(trait_ref).or_default();
+ }
+ }
+ }
+
+ Ok(cx)
+ })?;
+ }
+
+ // Print the rest of the trait types (that aren't Fn* family of traits)
+ for (trait_ref, assoc_items) in traits {
+ write!(self, "{}", if first { "" } else { " + " })?;
+
+ self = self.wrap_binder(&trait_ref, |trait_ref, mut cx| {
+ define_scoped_cx!(cx);
+ p!(print(trait_ref.print_only_trait_name()));
+
+ let generics = tcx.generics_of(trait_ref.def_id);
+ let args = generics.own_substs_no_defaults(tcx, trait_ref.substs);
+
+ if !args.is_empty() || !assoc_items.is_empty() {
+ let mut first = true;
+
+ for ty in args {
+ if first {
+ p!("<");
+ first = false;
+ } else {
+ p!(", ");
+ }
+ p!(print(ty));
+ }
+
+ for (assoc_item_def_id, term) in assoc_items {
+ // Skip printing `<[generator@] as Generator<_>>::Return` from async blocks,
+ // unless we can find out what generator return type it comes from.
+ let term = if let Some(ty) = term.skip_binder().ty()
+ && let ty::Projection(ty::ProjectionTy { item_def_id, substs }) = ty.kind()
+ && Some(*item_def_id) == tcx.lang_items().generator_return()
+ {
+ if let ty::Generator(_, substs, _) = substs.type_at(0).kind() {
+ let return_ty = substs.as_generator().return_ty();
+ if !return_ty.is_ty_infer() {
+ return_ty.into()
+ } else {
+ continue;
+ }
+ } else {
+ continue;
+ }
+ } else {
+ term.skip_binder()
+ };
+
+ if first {
+ p!("<");
+ first = false;
+ } else {
+ p!(", ");
+ }
+
+ p!(write("{} = ", tcx.associated_item(assoc_item_def_id).name));
+
+ match term {
+ Term::Ty(ty) => {
+ p!(print(ty))
+ }
+ Term::Const(c) => {
+ p!(print(c));
+ }
+ };
+ }
+
+ if !first {
+ p!(">");
+ }
+ }
+
+ first = false;
+ Ok(cx)
+ })?;
+ }
+
+ if !is_sized {
+ write!(self, "{}?Sized", if first { "" } else { " + " })?;
+ } else if first {
+ write!(self, "Sized")?;
+ }
+
+ Ok(self)
+ }
+
+ /// Insert the trait ref and optionally a projection type associated with it into either the
+ /// traits map or fn_traits map, depending on if the trait is in the Fn* family of traits.
+ fn insert_trait_and_projection(
+ &mut self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ proj_ty: Option<(DefId, ty::Binder<'tcx, Term<'tcx>>)>,
+ traits: &mut FxIndexMap<
+ ty::PolyTraitRef<'tcx>,
+ FxIndexMap<DefId, ty::Binder<'tcx, Term<'tcx>>>,
+ >,
+ fn_traits: &mut FxIndexMap<ty::PolyTraitRef<'tcx>, OpaqueFnEntry<'tcx>>,
+ ) {
+ let trait_def_id = trait_ref.def_id();
+
+ // If our trait_ref is FnOnce or any of its children, project it onto the parent FnOnce
+ // super-trait ref and record it there.
+ if let Some(fn_once_trait) = self.tcx().lang_items().fn_once_trait() {
+ // If we have a FnOnce, then insert it into
+ if trait_def_id == fn_once_trait {
+ let entry = fn_traits.entry(trait_ref).or_default();
+ // Optionally insert the return_ty as well.
+ if let Some((_, ty)) = proj_ty {
+ entry.return_ty = Some(ty);
+ }
+ entry.has_fn_once = true;
+ return;
+ } else if Some(trait_def_id) == self.tcx().lang_items().fn_mut_trait() {
+ let super_trait_ref = crate::traits::util::supertraits(self.tcx(), trait_ref)
+ .find(|super_trait_ref| super_trait_ref.def_id() == fn_once_trait)
+ .unwrap();
+
+ fn_traits.entry(super_trait_ref).or_default().fn_mut_trait_ref = Some(trait_ref);
+ return;
+ } else if Some(trait_def_id) == self.tcx().lang_items().fn_trait() {
+ let super_trait_ref = crate::traits::util::supertraits(self.tcx(), trait_ref)
+ .find(|super_trait_ref| super_trait_ref.def_id() == fn_once_trait)
+ .unwrap();
+
+ fn_traits.entry(super_trait_ref).or_default().fn_trait_ref = Some(trait_ref);
+ return;
+ }
+ }
+
+ // Otherwise, just group our traits and projection types.
+ traits.entry(trait_ref).or_default().extend(proj_ty);
+ }
+
+ fn pretty_print_bound_var(
+ &mut self,
+ debruijn: ty::DebruijnIndex,
+ var: ty::BoundVar,
+ ) -> Result<(), Self::Error> {
+ if debruijn == ty::INNERMOST {
+ write!(self, "^{}", var.index())
+ } else {
+ write!(self, "^{}_{}", debruijn.index(), var.index())
+ }
+ }
+
+ fn ty_infer_name(&self, _: ty::TyVid) -> Option<Symbol> {
+ None
+ }
+
+ fn const_infer_name(&self, _: ty::ConstVid<'tcx>) -> Option<Symbol> {
+ None
+ }
+
+ fn pretty_print_dyn_existential(
+ mut self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ // Generate the main trait ref, including associated types.
+ let mut first = true;
+
+ if let Some(principal) = predicates.principal() {
+ self = self.wrap_binder(&principal, |principal, mut cx| {
+ define_scoped_cx!(cx);
+ p!(print_def_path(principal.def_id, &[]));
+
+ let mut resugared = false;
+
+ // Special-case `Fn(...) -> ...` and re-sugar it.
+ let fn_trait_kind = cx.tcx().fn_trait_kind_from_lang_item(principal.def_id);
+ if !cx.tcx().sess.verbose() && fn_trait_kind.is_some() {
+ if let ty::Tuple(tys) = principal.substs.type_at(0).kind() {
+ let mut projections = predicates.projection_bounds();
+ if let (Some(proj), None) = (projections.next(), projections.next()) {
+ p!(pretty_fn_sig(
+ tys,
+ false,
+ proj.skip_binder().term.ty().expect("Return type was a const")
+ ));
+ resugared = true;
+ }
+ }
+ }
+
+ // HACK(eddyb) this duplicates `FmtPrinter`'s `path_generic_args`,
+ // in order to place the projections inside the `<...>`.
+ if !resugared {
+ // Use a type that can't appear in defaults of type parameters.
+ let dummy_cx = cx.tcx().mk_ty_infer(ty::FreshTy(0));
+ let principal = principal.with_self_ty(cx.tcx(), dummy_cx);
+
+ let args = cx
+ .tcx()
+ .generics_of(principal.def_id)
+ .own_substs_no_defaults(cx.tcx(), principal.substs);
+
+ // Don't print `'_` if there's no unerased regions.
+ let print_regions = args.iter().any(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(r) => !r.is_erased(),
+ _ => false,
+ });
+ let mut args = args.iter().cloned().filter(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(_) => print_regions,
+ _ => true,
+ });
+ let mut projections = predicates.projection_bounds();
+
+ let arg0 = args.next();
+ let projection0 = projections.next();
+ if arg0.is_some() || projection0.is_some() {
+ let args = arg0.into_iter().chain(args);
+ let projections = projection0.into_iter().chain(projections);
+
+ p!(generic_delimiters(|mut cx| {
+ cx = cx.comma_sep(args)?;
+ if arg0.is_some() && projection0.is_some() {
+ write!(cx, ", ")?;
+ }
+ cx.comma_sep(projections)
+ }));
+ }
+ }
+ Ok(cx)
+ })?;
+
+ first = false;
+ }
+
+ define_scoped_cx!(self);
+
+ // Builtin bounds.
+ // FIXME(eddyb) avoid printing twice (needed to ensure
+ // that the auto traits are sorted *and* printed via cx).
+ let mut auto_traits: Vec<_> = predicates.auto_traits().collect();
+
+ // The auto traits come ordered by `DefPathHash`. While
+ // `DefPathHash` is *stable* in the sense that it depends on
+ // neither the host nor the phase of the moon, it depends
+ // "pseudorandomly" on the compiler version and the target.
+ //
+ // To avoid causing instabilities in compiletest
+ // output, sort the auto-traits alphabetically.
+ auto_traits.sort_by_cached_key(|did| self.tcx().def_path_str(*did));
+
+ for def_id in auto_traits {
+ if !first {
+ p!(" + ");
+ }
+ first = false;
+
+ p!(print_def_path(def_id, &[]));
+ }
+
+ Ok(self)
+ }
+
+ fn pretty_fn_sig(
+ mut self,
+ inputs: &[Ty<'tcx>],
+ c_variadic: bool,
+ output: Ty<'tcx>,
+ ) -> Result<Self, Self::Error> {
+ define_scoped_cx!(self);
+
+ p!("(", comma_sep(inputs.iter().copied()));
+ if c_variadic {
+ if !inputs.is_empty() {
+ p!(", ");
+ }
+ p!("...");
+ }
+ p!(")");
+ if !output.is_unit() {
+ p!(" -> ", print(output));
+ }
+
+ Ok(self)
+ }
+
+ fn pretty_print_const(
+ mut self,
+ ct: ty::Const<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ if self.tcx().sess.verbose() {
+ p!(write("Const({:?}: {:?})", ct.kind(), ct.ty()));
+ return Ok(self);
+ }
+
+ macro_rules! print_underscore {
+ () => {{
+ if print_ty {
+ self = self.typed_value(
+ |mut this| {
+ write!(this, "_")?;
+ Ok(this)
+ },
+ |this| this.print_type(ct.ty()),
+ ": ",
+ )?;
+ } else {
+ write!(self, "_")?;
+ }
+ }};
+ }
+
+ match ct.kind() {
+ ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def,
+ substs,
+ promoted: Some(promoted),
+ }) => {
+ p!(print_value_path(def.did, substs));
+ p!(write("::{:?}", promoted));
+ }
+ ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted: None }) => {
+ match self.tcx().def_kind(def.did) {
+ DefKind::Static(..) | DefKind::Const | DefKind::AssocConst => {
+ p!(print_value_path(def.did, substs))
+ }
+ _ => {
+ if def.is_local() {
+ let span = self.tcx().def_span(def.did);
+ if let Ok(snip) = self.tcx().sess.source_map().span_to_snippet(span) {
+ p!(write("{}", snip))
+ } else {
+ print_underscore!()
+ }
+ } else {
+ print_underscore!()
+ }
+ }
+ }
+ }
+ ty::ConstKind::Infer(infer_ct) => {
+ match infer_ct {
+ ty::InferConst::Var(ct_vid)
+ if let Some(name) = self.const_infer_name(ct_vid) =>
+ p!(write("{}", name)),
+ _ => print_underscore!(),
+ }
+ }
+ ty::ConstKind::Param(ParamConst { name, .. }) => p!(write("{}", name)),
+ ty::ConstKind::Value(value) => {
+ return self.pretty_print_const_valtree(value, ct.ty(), print_ty);
+ }
+
+ ty::ConstKind::Bound(debruijn, bound_var) => {
+ self.pretty_print_bound_var(debruijn, bound_var)?
+ }
+ ty::ConstKind::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)),
+ ty::ConstKind::Error(_) => p!("[const error]"),
+ };
+ Ok(self)
+ }
+
+ fn pretty_print_const_scalar(
+ self,
+ scalar: Scalar,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ match scalar {
+ Scalar::Ptr(ptr, _size) => self.pretty_print_const_scalar_ptr(ptr, ty, print_ty),
+ Scalar::Int(int) => self.pretty_print_const_scalar_int(int, ty, print_ty),
+ }
+ }
+
+ fn pretty_print_const_scalar_ptr(
+ mut self,
+ ptr: Pointer,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ let (alloc_id, offset) = ptr.into_parts();
+ match ty.kind() {
+ // Byte strings (&[u8; N])
+ ty::Ref(_, inner, _) => {
+ if let ty::Array(elem, len) = inner.kind() {
+ if let ty::Uint(ty::UintTy::U8) = elem.kind() {
+ if let ty::ConstKind::Value(ty::ValTree::Leaf(int)) = len.kind() {
+ match self.tcx().try_get_global_alloc(alloc_id) {
+ Some(GlobalAlloc::Memory(alloc)) => {
+ let len = int.assert_bits(self.tcx().data_layout.pointer_size);
+ let range =
+ AllocRange { start: offset, size: Size::from_bytes(len) };
+ if let Ok(byte_str) =
+ alloc.inner().get_bytes(&self.tcx(), range)
+ {
+ p!(pretty_print_byte_str(byte_str))
+ } else {
+ p!("<too short allocation>")
+ }
+ }
+ // FIXME: for statics, vtables, and functions, we could in principle print more detail.
+ Some(GlobalAlloc::Static(def_id)) => {
+ p!(write("<static({:?})>", def_id))
+ }
+ Some(GlobalAlloc::Function(_)) => p!("<function>"),
+ Some(GlobalAlloc::VTable(..)) => p!("<vtable>"),
+ None => p!("<dangling pointer>"),
+ }
+ return Ok(self);
+ }
+ }
+ }
+ }
+ ty::FnPtr(_) => {
+ // FIXME: We should probably have a helper method to share code with the "Byte strings"
+ // printing above (which also has to handle pointers to all sorts of things).
+ if let Some(GlobalAlloc::Function(instance)) =
+ self.tcx().try_get_global_alloc(alloc_id)
+ {
+ self = self.typed_value(
+ |this| this.print_value_path(instance.def_id(), instance.substs),
+ |this| this.print_type(ty),
+ " as ",
+ )?;
+ return Ok(self);
+ }
+ }
+ _ => {}
+ }
+ // Any pointer values not covered by a branch above
+ self = self.pretty_print_const_pointer(ptr, ty, print_ty)?;
+ Ok(self)
+ }
+
+ fn pretty_print_const_scalar_int(
+ mut self,
+ int: ScalarInt,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ match ty.kind() {
+ // Bool
+ ty::Bool if int == ScalarInt::FALSE => p!("false"),
+ ty::Bool if int == ScalarInt::TRUE => p!("true"),
+ // Float
+ ty::Float(ty::FloatTy::F32) => {
+ p!(write("{}f32", Single::try_from(int).unwrap()))
+ }
+ ty::Float(ty::FloatTy::F64) => {
+ p!(write("{}f64", Double::try_from(int).unwrap()))
+ }
+ // Int
+ ty::Uint(_) | ty::Int(_) => {
+ let int =
+ ConstInt::new(int, matches!(ty.kind(), ty::Int(_)), ty.is_ptr_sized_integral());
+ if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
+ }
+ // Char
+ ty::Char if char::try_from(int).is_ok() => {
+ p!(write("{:?}", char::try_from(int).unwrap()))
+ }
+ // Pointer types
+ ty::Ref(..) | ty::RawPtr(_) | ty::FnPtr(_) => {
+ let data = int.assert_bits(self.tcx().data_layout.pointer_size);
+ self = self.typed_value(
+ |mut this| {
+ write!(this, "0x{:x}", data)?;
+ Ok(this)
+ },
+ |this| this.print_type(ty),
+ " as ",
+ )?;
+ }
+ // Nontrivial types with scalar bit representation
+ _ => {
+ let print = |mut this: Self| {
+ if int.size() == Size::ZERO {
+ write!(this, "transmute(())")?;
+ } else {
+ write!(this, "transmute(0x{:x})", int)?;
+ }
+ Ok(this)
+ };
+ self = if print_ty {
+ self.typed_value(print, |this| this.print_type(ty), ": ")?
+ } else {
+ print(self)?
+ };
+ }
+ }
+ Ok(self)
+ }
+
+ /// This is overridden for MIR printing because we only want to hide alloc ids from users, not
+ /// from MIR where it is actually useful.
+ fn pretty_print_const_pointer<Prov: Provenance>(
+ mut self,
+ _: Pointer<Prov>,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ if print_ty {
+ self.typed_value(
+ |mut this| {
+ this.write_str("&_")?;
+ Ok(this)
+ },
+ |this| this.print_type(ty),
+ ": ",
+ )
+ } else {
+ self.write_str("&_")?;
+ Ok(self)
+ }
+ }
+
+ fn pretty_print_byte_str(mut self, byte_str: &'tcx [u8]) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+ p!("b\"");
+ for &c in byte_str {
+ for e in std::ascii::escape_default(c) {
+ self.write_char(e as char)?;
+ }
+ }
+ p!("\"");
+ Ok(self)
+ }
+
+ fn pretty_print_const_valtree(
+ mut self,
+ valtree: ty::ValTree<'tcx>,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ if self.tcx().sess.verbose() {
+ p!(write("ValTree({:?}: ", valtree), print(ty), ")");
+ return Ok(self);
+ }
+
+ let u8_type = self.tcx().types.u8;
+ match (valtree, ty.kind()) {
+ (ty::ValTree::Branch(_), ty::Ref(_, inner_ty, _)) => match inner_ty.kind() {
+ ty::Slice(t) if *t == u8_type => {
+ let bytes = valtree.try_to_raw_bytes(self.tcx(), ty).unwrap_or_else(|| {
+ bug!(
+ "expected to convert valtree {:?} to raw bytes for type {:?}",
+ valtree,
+ t
+ )
+ });
+ return self.pretty_print_byte_str(bytes);
+ }
+ ty::Str => {
+ let bytes = valtree.try_to_raw_bytes(self.tcx(), ty).unwrap_or_else(|| {
+ bug!("expected to convert valtree to raw bytes for type {:?}", ty)
+ });
+ p!(write("{:?}", String::from_utf8_lossy(bytes)));
+ return Ok(self);
+ }
+ _ => {
+ p!("&");
+ p!(pretty_print_const_valtree(valtree, *inner_ty, print_ty));
+ return Ok(self);
+ }
+ },
+ (ty::ValTree::Branch(_), ty::Array(t, _)) if *t == u8_type => {
+ let bytes = valtree.try_to_raw_bytes(self.tcx(), ty).unwrap_or_else(|| {
+ bug!("expected to convert valtree to raw bytes for type {:?}", t)
+ });
+ p!("*");
+ p!(pretty_print_byte_str(bytes));
+ return Ok(self);
+ }
+ // Aggregates, printed as array/tuple/struct/variant construction syntax.
+ (ty::ValTree::Branch(_), ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) => {
+ let contents =
+ self.tcx().destructure_const(ty::Const::from_value(self.tcx(), valtree, ty));
+ let fields = contents.fields.iter().copied();
+ match *ty.kind() {
+ ty::Array(..) => {
+ p!("[", comma_sep(fields), "]");
+ }
+ ty::Tuple(..) => {
+ p!("(", comma_sep(fields));
+ if contents.fields.len() == 1 {
+ p!(",");
+ }
+ p!(")");
+ }
+ ty::Adt(def, _) if def.variants().is_empty() => {
+ self = self.typed_value(
+ |mut this| {
+ write!(this, "unreachable()")?;
+ Ok(this)
+ },
+ |this| this.print_type(ty),
+ ": ",
+ )?;
+ }
+ ty::Adt(def, substs) => {
+ let variant_idx =
+ contents.variant.expect("destructed const of adt without variant idx");
+ let variant_def = &def.variant(variant_idx);
+ p!(print_value_path(variant_def.def_id, substs));
+ match variant_def.ctor_kind {
+ CtorKind::Const => {}
+ CtorKind::Fn => {
+ p!("(", comma_sep(fields), ")");
+ }
+ CtorKind::Fictive => {
+ p!(" {{ ");
+ let mut first = true;
+ for (field_def, field) in iter::zip(&variant_def.fields, fields) {
+ if !first {
+ p!(", ");
+ }
+ p!(write("{}: ", field_def.name), print(field));
+ first = false;
+ }
+ p!(" }}");
+ }
+ }
+ }
+ _ => unreachable!(),
+ }
+ return Ok(self);
+ }
+ (ty::ValTree::Leaf(leaf), _) => {
+ return self.pretty_print_const_scalar_int(leaf, ty, print_ty);
+ }
+ // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading
+ // their fields instead of just dumping the memory.
+ _ => {}
+ }
+
+ // fallback
+ if valtree == ty::ValTree::zst() {
+ p!(write("<ZST>"));
+ } else {
+ p!(write("{:?}", valtree));
+ }
+ if print_ty {
+ p!(": ", print(ty));
+ }
+ Ok(self)
+ }
+}
+
+// HACK(eddyb) boxed to avoid moving around a large struct by-value.
+pub struct FmtPrinter<'a, 'tcx>(Box<FmtPrinterData<'a, 'tcx>>);
+
+pub struct FmtPrinterData<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ fmt: String,
+
+ empty_path: bool,
+ in_value: bool,
+ pub print_alloc_ids: bool,
+
+ used_region_names: FxHashSet<Symbol>,
+ region_index: usize,
+ binder_depth: usize,
+ printed_type_count: usize,
+
+ pub region_highlight_mode: RegionHighlightMode<'tcx>,
+
+ pub ty_infer_name_resolver: Option<Box<dyn Fn(ty::TyVid) -> Option<Symbol> + 'a>>,
+ pub const_infer_name_resolver: Option<Box<dyn Fn(ty::ConstVid<'tcx>) -> Option<Symbol> + 'a>>,
+}
+
+impl<'a, 'tcx> Deref for FmtPrinter<'a, 'tcx> {
+ type Target = FmtPrinterData<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for FmtPrinter<'_, '_> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl<'a, 'tcx> FmtPrinter<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, ns: Namespace) -> Self {
+ FmtPrinter(Box::new(FmtPrinterData {
+ tcx,
+ // Estimated reasonable capacity to allocate upfront based on a few
+ // benchmarks.
+ fmt: String::with_capacity(64),
+ empty_path: false,
+ in_value: ns == Namespace::ValueNS,
+ print_alloc_ids: false,
+ used_region_names: Default::default(),
+ region_index: 0,
+ binder_depth: 0,
+ printed_type_count: 0,
+ region_highlight_mode: RegionHighlightMode::new(tcx),
+ ty_infer_name_resolver: None,
+ const_infer_name_resolver: None,
+ }))
+ }
+
+ pub fn into_buffer(self) -> String {
+ self.0.fmt
+ }
+}
+
+// HACK(eddyb) get rid of `def_path_str` and/or pass `Namespace` explicitly always
+// (but also some things just print a `DefId` generally so maybe we need this?)
+fn guess_def_namespace(tcx: TyCtxt<'_>, def_id: DefId) -> Namespace {
+ match tcx.def_key(def_id).disambiguated_data.data {
+ DefPathData::TypeNs(..) | DefPathData::CrateRoot | DefPathData::ImplTrait => {
+ Namespace::TypeNS
+ }
+
+ DefPathData::ValueNs(..)
+ | DefPathData::AnonConst
+ | DefPathData::ClosureExpr
+ | DefPathData::Ctor => Namespace::ValueNS,
+
+ DefPathData::MacroNs(..) => Namespace::MacroNS,
+
+ _ => Namespace::TypeNS,
+ }
+}
+
+impl<'t> TyCtxt<'t> {
+ /// Returns a string identifying this `DefId`. This string is
+ /// suitable for user output.
+ pub fn def_path_str(self, def_id: DefId) -> String {
+ self.def_path_str_with_substs(def_id, &[])
+ }
+
+ pub fn def_path_str_with_substs(self, def_id: DefId, substs: &'t [GenericArg<'t>]) -> String {
+ let ns = guess_def_namespace(self, def_id);
+ debug!("def_path_str: def_id={:?}, ns={:?}", def_id, ns);
+ FmtPrinter::new(self, ns).print_def_path(def_id, substs).unwrap().into_buffer()
+ }
+}
+
+impl fmt::Write for FmtPrinter<'_, '_> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ self.fmt.push_str(s);
+ Ok(())
+ }
+}
+
+impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> {
+ type Error = fmt::Error;
+
+ type Path = Self;
+ type Region = Self;
+ type Type = Self;
+ type DynExistential = Self;
+ type Const = Self;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn print_def_path(
+ mut self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ define_scoped_cx!(self);
+
+ if substs.is_empty() {
+ match self.try_print_trimmed_def_path(def_id)? {
+ (cx, true) => return Ok(cx),
+ (cx, false) => self = cx,
+ }
+
+ match self.try_print_visible_def_path(def_id)? {
+ (cx, true) => return Ok(cx),
+ (cx, false) => self = cx,
+ }
+ }
+
+ let key = self.tcx.def_key(def_id);
+ if let DefPathData::Impl = key.disambiguated_data.data {
+ // Always use types for non-local impls, where types are always
+ // available, and filename/line-number is mostly uninteresting.
+ let use_types = !def_id.is_local() || {
+ // Otherwise, use filename/line-number if forced.
+ let force_no_types = FORCE_IMPL_FILENAME_LINE.with(|f| f.get());
+ !force_no_types
+ };
+
+ if !use_types {
+ // If no type info is available, fall back to
+ // pretty printing some span information. This should
+ // only occur very early in the compiler pipeline.
+ let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id };
+ let span = self.tcx.def_span(def_id);
+
+ self = self.print_def_path(parent_def_id, &[])?;
+
+ // HACK(eddyb) copy of `path_append` to avoid
+ // constructing a `DisambiguatedDefPathData`.
+ if !self.empty_path {
+ write!(self, "::")?;
+ }
+ write!(
+ self,
+ "<impl at {}>",
+ // This may end up in stderr diagnostics but it may also be emitted
+ // into MIR. Hence we use the remapped path if available
+ self.tcx.sess.source_map().span_to_embeddable_string(span)
+ )?;
+ self.empty_path = false;
+
+ return Ok(self);
+ }
+ }
+
+ self.default_print_def_path(def_id, substs)
+ }
+
+ fn print_region(self, region: ty::Region<'tcx>) -> Result<Self::Region, Self::Error> {
+ self.pretty_print_region(region)
+ }
+
+ fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ let type_length_limit = self.tcx.type_length_limit();
+ if type_length_limit.value_within_limit(self.printed_type_count) {
+ self.printed_type_count += 1;
+ self.pretty_print_type(ty)
+ } else {
+ write!(self, "...")?;
+ Ok(self)
+ }
+ }
+
+ fn print_dyn_existential(
+ self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ self.pretty_print_dyn_existential(predicates)
+ }
+
+ fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ self.pretty_print_const(ct, false)
+ }
+
+ fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ self.empty_path = true;
+ if cnum == LOCAL_CRATE {
+ if self.tcx.sess.rust_2018() {
+ // We add the `crate::` keyword on Rust 2018, only when desired.
+ if SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get()) {
+ write!(self, "{}", kw::Crate)?;
+ self.empty_path = false;
+ }
+ }
+ } else {
+ write!(self, "{}", self.tcx.crate_name(cnum))?;
+ self.empty_path = false;
+ }
+ Ok(self)
+ }
+
+ fn path_qualified(
+ mut self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self = self.pretty_path_qualified(self_ty, trait_ref)?;
+ self.empty_path = false;
+ Ok(self)
+ }
+
+ fn path_append_impl(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _disambiguated_data: &DisambiguatedDefPathData,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self = self.pretty_path_append_impl(
+ |mut cx| {
+ cx = print_prefix(cx)?;
+ if !cx.empty_path {
+ write!(cx, "::")?;
+ }
+
+ Ok(cx)
+ },
+ self_ty,
+ trait_ref,
+ )?;
+ self.empty_path = false;
+ Ok(self)
+ }
+
+ fn path_append(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ // Skip `::{{extern}}` blocks and `::{{constructor}}` on tuple/unit structs.
+ if let DefPathData::ForeignMod | DefPathData::Ctor = disambiguated_data.data {
+ return Ok(self);
+ }
+
+ let name = disambiguated_data.data.name();
+ if !self.empty_path {
+ write!(self, "::")?;
+ }
+
+ if let DefPathDataName::Named(name) = name {
+ if Ident::with_dummy_span(name).is_raw_guess() {
+ write!(self, "r#")?;
+ }
+ }
+
+ let verbose = self.tcx.sess.verbose();
+ disambiguated_data.fmt_maybe_verbose(&mut self, verbose)?;
+
+ self.empty_path = false;
+
+ Ok(self)
+ }
+
+ fn path_generic_args(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ // Don't print `'_` if there's no unerased regions.
+ let print_regions = self.tcx.sess.verbose()
+ || args.iter().any(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(r) => !r.is_erased(),
+ _ => false,
+ });
+ let args = args.iter().cloned().filter(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(_) => print_regions,
+ _ => true,
+ });
+
+ if args.clone().next().is_some() {
+ if self.in_value {
+ write!(self, "::")?;
+ }
+ self.generic_delimiters(|cx| cx.comma_sep(args))
+ } else {
+ Ok(self)
+ }
+ }
+}
+
+impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> {
+ fn ty_infer_name(&self, id: ty::TyVid) -> Option<Symbol> {
+ self.0.ty_infer_name_resolver.as_ref().and_then(|func| func(id))
+ }
+
+ fn const_infer_name(&self, id: ty::ConstVid<'tcx>) -> Option<Symbol> {
+ self.0.const_infer_name_resolver.as_ref().and_then(|func| func(id))
+ }
+
+ fn print_value_path(
+ mut self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ let was_in_value = std::mem::replace(&mut self.in_value, true);
+ self = self.print_def_path(def_id, substs)?;
+ self.in_value = was_in_value;
+
+ Ok(self)
+ }
+
+ fn in_binder<T>(self, value: &ty::Binder<'tcx, T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+ {
+ self.pretty_in_binder(value)
+ }
+
+ fn wrap_binder<T, C: FnOnce(&T, Self) -> Result<Self, Self::Error>>(
+ self,
+ value: &ty::Binder<'tcx, T>,
+ f: C,
+ ) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+ {
+ self.pretty_wrap_binder(value, f)
+ }
+
+ fn typed_value(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ t: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ conversion: &str,
+ ) -> Result<Self::Const, Self::Error> {
+ self.write_str("{")?;
+ self = f(self)?;
+ self.write_str(conversion)?;
+ let was_in_value = std::mem::replace(&mut self.in_value, false);
+ self = t(self)?;
+ self.in_value = was_in_value;
+ self.write_str("}")?;
+ Ok(self)
+ }
+
+ fn generic_delimiters(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ ) -> Result<Self, Self::Error> {
+ write!(self, "<")?;
+
+ let was_in_value = std::mem::replace(&mut self.in_value, false);
+ let mut inner = f(self)?;
+ inner.in_value = was_in_value;
+
+ write!(inner, ">")?;
+ Ok(inner)
+ }
+
+ fn should_print_region(&self, region: ty::Region<'tcx>) -> bool {
+ let highlight = self.region_highlight_mode;
+ if highlight.region_highlighted(region).is_some() {
+ return true;
+ }
+
+ if self.tcx.sess.verbose() {
+ return true;
+ }
+
+ let identify_regions = self.tcx.sess.opts.unstable_opts.identify_regions;
+
+ match *region {
+ ty::ReEarlyBound(ref data) => {
+ data.name != kw::Empty && data.name != kw::UnderscoreLifetime
+ }
+
+ ty::ReLateBound(_, ty::BoundRegion { kind: br, .. })
+ | ty::ReFree(ty::FreeRegion { bound_region: br, .. })
+ | ty::RePlaceholder(ty::Placeholder { name: br, .. }) => {
+ if let ty::BrNamed(_, name) = br {
+ if name != kw::Empty && name != kw::UnderscoreLifetime {
+ return true;
+ }
+ }
+
+ if let Some((region, _)) = highlight.highlight_bound_region {
+ if br == region {
+ return true;
+ }
+ }
+
+ false
+ }
+
+ ty::ReVar(_) if identify_regions => true,
+
+ ty::ReVar(_) | ty::ReErased => false,
+
+ ty::ReStatic | ty::ReEmpty(_) => true,
+ }
+ }
+
+ fn pretty_print_const_pointer<Prov: Provenance>(
+ self,
+ p: Pointer<Prov>,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ let print = |mut this: Self| {
+ define_scoped_cx!(this);
+ if this.print_alloc_ids {
+ p!(write("{:?}", p));
+ } else {
+ p!("&_");
+ }
+ Ok(this)
+ };
+ if print_ty {
+ self.typed_value(print, |this| this.print_type(ty), ": ")
+ } else {
+ print(self)
+ }
+ }
+}
+
+// HACK(eddyb) limited to `FmtPrinter` because of `region_highlight_mode`.
+impl<'tcx> FmtPrinter<'_, 'tcx> {
+ pub fn pretty_print_region(mut self, region: ty::Region<'tcx>) -> Result<Self, fmt::Error> {
+ define_scoped_cx!(self);
+
+ // Watch out for region highlights.
+ let highlight = self.region_highlight_mode;
+ if let Some(n) = highlight.region_highlighted(region) {
+ p!(write("'{}", n));
+ return Ok(self);
+ }
+
+ if self.tcx.sess.verbose() {
+ p!(write("{:?}", region));
+ return Ok(self);
+ }
+
+ let identify_regions = self.tcx.sess.opts.unstable_opts.identify_regions;
+
+ // These printouts are concise. They do not contain all the information
+ // the user might want to diagnose an error, but there is basically no way
+ // to fit that into a short string. Hence the recommendation to use
+ // `explain_region()` or `note_and_explain_region()`.
+ match *region {
+ ty::ReEarlyBound(ref data) => {
+ if data.name != kw::Empty {
+ p!(write("{}", data.name));
+ return Ok(self);
+ }
+ }
+ ty::ReLateBound(_, ty::BoundRegion { kind: br, .. })
+ | ty::ReFree(ty::FreeRegion { bound_region: br, .. })
+ | ty::RePlaceholder(ty::Placeholder { name: br, .. }) => {
+ if let ty::BrNamed(_, name) = br {
+ if name != kw::Empty && name != kw::UnderscoreLifetime {
+ p!(write("{}", name));
+ return Ok(self);
+ }
+ }
+
+ if let Some((region, counter)) = highlight.highlight_bound_region {
+ if br == region {
+ p!(write("'{}", counter));
+ return Ok(self);
+ }
+ }
+ }
+ ty::ReVar(region_vid) if identify_regions => {
+ p!(write("{:?}", region_vid));
+ return Ok(self);
+ }
+ ty::ReVar(_) => {}
+ ty::ReErased => {}
+ ty::ReStatic => {
+ p!("'static");
+ return Ok(self);
+ }
+ ty::ReEmpty(ty::UniverseIndex::ROOT) => {
+ p!("'<empty>");
+ return Ok(self);
+ }
+ ty::ReEmpty(ui) => {
+ p!(write("'<empty:{:?}>", ui));
+ return Ok(self);
+ }
+ }
+
+ p!("'_");
+
+ Ok(self)
+ }
+}
+
+/// Folds through bound vars and placeholders, naming them
+struct RegionFolder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ current_index: ty::DebruijnIndex,
+ region_map: BTreeMap<ty::BoundRegion, ty::Region<'tcx>>,
+ name: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
+}
+
+impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ _ if t.has_vars_bound_at_or_above(self.current_index) || t.has_placeholders() => {
+ return t.super_fold_with(self);
+ }
+ _ => {}
+ }
+ t
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ let name = &mut self.name;
+ let region = match *r {
+ ty::ReLateBound(_, br) => *self.region_map.entry(br).or_insert_with(|| name(br)),
+ ty::RePlaceholder(ty::PlaceholderRegion { name: kind, .. }) => {
+ // If this is an anonymous placeholder, don't rename. Otherwise, in some
+ // async fns, we get a `for<'r> Send` bound
+ match kind {
+ ty::BrAnon(_) | ty::BrEnv => r,
+ _ => {
+ // Index doesn't matter, since this is just for naming and these never get bound
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind };
+ *self.region_map.entry(br).or_insert_with(|| name(br))
+ }
+ }
+ }
+ _ => return r,
+ };
+ if let ty::ReLateBound(debruijn1, br) = *region {
+ assert_eq!(debruijn1, ty::INNERMOST);
+ self.tcx.mk_region(ty::ReLateBound(self.current_index, br))
+ } else {
+ region
+ }
+ }
+}
+
+// HACK(eddyb) limited to `FmtPrinter` because of `binder_depth`,
+// `region_index` and `used_region_names`.
+impl<'tcx> FmtPrinter<'_, 'tcx> {
+ pub fn name_all_regions<T>(
+ mut self,
+ value: &ty::Binder<'tcx, T>,
+ ) -> Result<(Self, T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>), fmt::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
+ {
+ fn name_by_region_index(index: usize) -> Symbol {
+ match index {
+ 0 => Symbol::intern("'r"),
+ 1 => Symbol::intern("'s"),
+ i => Symbol::intern(&format!("'t{}", i - 2)),
+ }
+ }
+
+ // Replace any anonymous late-bound regions with named
+ // variants, using new unique identifiers, so that we can
+ // clearly differentiate between named and unnamed regions in
+ // the output. We'll probably want to tweak this over time to
+ // decide just how much information to give.
+ if self.binder_depth == 0 {
+ self.prepare_late_bound_region_info(value);
+ }
+
+ let mut empty = true;
+ let mut start_or_continue = |cx: &mut Self, start: &str, cont: &str| {
+ let w = if empty {
+ empty = false;
+ start
+ } else {
+ cont
+ };
+ let _ = write!(cx, "{}", w);
+ };
+ let do_continue = |cx: &mut Self, cont: Symbol| {
+ let _ = write!(cx, "{}", cont);
+ };
+
+ define_scoped_cx!(self);
+
+ let mut region_index = self.region_index;
+ let mut next_name = |this: &Self| loop {
+ let name = name_by_region_index(region_index);
+ region_index += 1;
+ if !this.used_region_names.contains(&name) {
+ break name;
+ }
+ };
+
+ // If we want to print verbosely, then print *all* binders, even if they
+ // aren't named. Eventually, we might just want this as the default, but
+ // this is not *quite* right and changes the ordering of some output
+ // anyways.
+ let (new_value, map) = if self.tcx().sess.verbose() {
+ let regions: Vec<_> = value
+ .bound_vars()
+ .into_iter()
+ .map(|var| {
+ let ty::BoundVariableKind::Region(var) = var else {
+ // This doesn't really matter because it doesn't get used,
+ // it's just an empty value
+ return ty::BrAnon(0);
+ };
+ match var {
+ ty::BrAnon(_) | ty::BrEnv => {
+ start_or_continue(&mut self, "for<", ", ");
+ let name = next_name(&self);
+ do_continue(&mut self, name);
+ ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)
+ }
+ ty::BrNamed(def_id, kw::UnderscoreLifetime) => {
+ start_or_continue(&mut self, "for<", ", ");
+ let name = next_name(&self);
+ do_continue(&mut self, name);
+ ty::BrNamed(def_id, name)
+ }
+ ty::BrNamed(def_id, name) => {
+ start_or_continue(&mut self, "for<", ", ");
+ do_continue(&mut self, name);
+ ty::BrNamed(def_id, name)
+ }
+ }
+ })
+ .collect();
+ start_or_continue(&mut self, "", "> ");
+
+ self.tcx.replace_late_bound_regions(value.clone(), |br| {
+ let kind = regions[br.var.as_usize()];
+ self.tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: br.var, kind },
+ ))
+ })
+ } else {
+ let tcx = self.tcx;
+ let mut name = |br: ty::BoundRegion| {
+ start_or_continue(&mut self, "for<", ", ");
+ let kind = match br.kind {
+ ty::BrAnon(_) | ty::BrEnv => {
+ let name = next_name(&self);
+ do_continue(&mut self, name);
+ ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)
+ }
+ ty::BrNamed(def_id, kw::UnderscoreLifetime) => {
+ let name = next_name(&self);
+ do_continue(&mut self, name);
+ ty::BrNamed(def_id, name)
+ }
+ ty::BrNamed(_, name) => {
+ do_continue(&mut self, name);
+ br.kind
+ }
+ };
+ tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BoundRegion { var: br.var, kind }))
+ };
+ let mut folder = RegionFolder {
+ tcx,
+ current_index: ty::INNERMOST,
+ name: &mut name,
+ region_map: BTreeMap::new(),
+ };
+ let new_value = value.clone().skip_binder().fold_with(&mut folder);
+ let region_map = folder.region_map;
+ start_or_continue(&mut self, "", "> ");
+ (new_value, region_map)
+ };
+
+ self.binder_depth += 1;
+ self.region_index = region_index;
+ Ok((self, new_value, map))
+ }
+
+ pub fn pretty_in_binder<T>(self, value: &ty::Binder<'tcx, T>) -> Result<Self, fmt::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
+ {
+ let old_region_index = self.region_index;
+ let (new, new_value, _) = self.name_all_regions(value)?;
+ let mut inner = new_value.print(new)?;
+ inner.region_index = old_region_index;
+ inner.binder_depth -= 1;
+ Ok(inner)
+ }
+
+ pub fn pretty_wrap_binder<T, C: FnOnce(&T, Self) -> Result<Self, fmt::Error>>(
+ self,
+ value: &ty::Binder<'tcx, T>,
+ f: C,
+ ) -> Result<Self, fmt::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
+ {
+ let old_region_index = self.region_index;
+ let (new, new_value, _) = self.name_all_regions(value)?;
+ let mut inner = f(&new_value, new)?;
+ inner.region_index = old_region_index;
+ inner.binder_depth -= 1;
+ Ok(inner)
+ }
+
+ fn prepare_late_bound_region_info<T>(&mut self, value: &ty::Binder<'tcx, T>)
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ struct LateBoundRegionNameCollector<'a, 'tcx> {
+ used_region_names: &'a mut FxHashSet<Symbol>,
+ type_collector: SsoHashSet<Ty<'tcx>>,
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_, 'tcx> {
+ type BreakTy = ();
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ trace!("address: {:p}", r.0.0);
+ if let ty::ReLateBound(_, ty::BoundRegion { kind: ty::BrNamed(_, name), .. }) = *r {
+ self.used_region_names.insert(name);
+ } else if let ty::RePlaceholder(ty::PlaceholderRegion {
+ name: ty::BrNamed(_, name),
+ ..
+ }) = *r
+ {
+ self.used_region_names.insert(name);
+ }
+ r.super_visit_with(self)
+ }
+
+ // We collect types in order to prevent really large types from compiling for
+ // a really long time. See issue #83150 for why this is necessary.
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let not_previously_inserted = self.type_collector.insert(ty);
+ if not_previously_inserted {
+ ty.super_visit_with(self)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+
+ self.used_region_names.clear();
+ let mut collector = LateBoundRegionNameCollector {
+ used_region_names: &mut self.used_region_names,
+ type_collector: SsoHashSet::new(),
+ };
+ value.visit_with(&mut collector);
+ self.region_index = 0;
+ }
+}
+
+impl<'tcx, T, P: PrettyPrinter<'tcx>> Print<'tcx, P> for ty::Binder<'tcx, T>
+where
+ T: Print<'tcx, P, Output = P, Error = P::Error> + TypeFoldable<'tcx>,
+{
+ type Output = P;
+ type Error = P::Error;
+
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.in_binder(self)
+ }
+}
+
+impl<'tcx, T, U, P: PrettyPrinter<'tcx>> Print<'tcx, P> for ty::OutlivesPredicate<T, U>
+where
+ T: Print<'tcx, P, Output = P, Error = P::Error>,
+ U: Print<'tcx, P, Output = P, Error = P::Error>,
+{
+ type Output = P;
+ type Error = P::Error;
+ fn print(&self, mut cx: P) -> Result<Self::Output, Self::Error> {
+ define_scoped_cx!(cx);
+ p!(print(self.0), ": ", print(self.1));
+ Ok(cx)
+ }
+}
+
+macro_rules! forward_display_to_print {
+ ($($ty:ty),+) => {
+ // Some of the $ty arguments may not actually use 'tcx
+ $(#[allow(unused_lifetimes)] impl<'tcx> fmt::Display for $ty {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ let cx = tcx.lift(*self)
+ .expect("could not lift for printing")
+ .print(FmtPrinter::new(tcx, Namespace::TypeNS))?;
+ f.write_str(&cx.into_buffer())?;
+ Ok(())
+ })
+ }
+ })+
+ };
+}
+
+macro_rules! define_print_and_forward_display {
+ (($self:ident, $cx:ident): $($ty:ty $print:block)+) => {
+ $(impl<'tcx, P: PrettyPrinter<'tcx>> Print<'tcx, P> for $ty {
+ type Output = P;
+ type Error = fmt::Error;
+ fn print(&$self, $cx: P) -> Result<Self::Output, Self::Error> {
+ #[allow(unused_mut)]
+ let mut $cx = $cx;
+ define_scoped_cx!($cx);
+ let _: () = $print;
+ #[allow(unreachable_code)]
+ Ok($cx)
+ }
+ })+
+
+ forward_display_to_print!($($ty),+);
+ };
+}
+
+/// Wrapper type for `ty::TraitRef` which opts-in to pretty printing only
+/// the trait path. That is, it will print `Trait<U>` instead of
+/// `<T as Trait<U>>`.
+#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift)]
+pub struct TraitRefPrintOnlyTraitPath<'tcx>(ty::TraitRef<'tcx>);
+
+impl<'tcx> fmt::Debug for TraitRefPrintOnlyTraitPath<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+/// Wrapper type for `ty::TraitRef` which opts-in to pretty printing only
+/// the trait name. That is, it will print `Trait` instead of
+/// `<T as Trait<U>>`.
+#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift)]
+pub struct TraitRefPrintOnlyTraitName<'tcx>(ty::TraitRef<'tcx>);
+
+impl<'tcx> fmt::Debug for TraitRefPrintOnlyTraitName<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl<'tcx> ty::TraitRef<'tcx> {
+ pub fn print_only_trait_path(self) -> TraitRefPrintOnlyTraitPath<'tcx> {
+ TraitRefPrintOnlyTraitPath(self)
+ }
+
+ pub fn print_only_trait_name(self) -> TraitRefPrintOnlyTraitName<'tcx> {
+ TraitRefPrintOnlyTraitName(self)
+ }
+}
+
+impl<'tcx> ty::Binder<'tcx, ty::TraitRef<'tcx>> {
+ pub fn print_only_trait_path(self) -> ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>> {
+ self.map_bound(|tr| tr.print_only_trait_path())
+ }
+}
+
+#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift)]
+pub struct TraitPredPrintModifiersAndPath<'tcx>(ty::TraitPredicate<'tcx>);
+
+impl<'tcx> fmt::Debug for TraitPredPrintModifiersAndPath<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl<'tcx> ty::TraitPredicate<'tcx> {
+ pub fn print_modifiers_and_trait_path(self) -> TraitPredPrintModifiersAndPath<'tcx> {
+ TraitPredPrintModifiersAndPath(self)
+ }
+}
+
+impl<'tcx> ty::PolyTraitPredicate<'tcx> {
+ pub fn print_modifiers_and_trait_path(
+ self,
+ ) -> ty::Binder<'tcx, TraitPredPrintModifiersAndPath<'tcx>> {
+ self.map_bound(TraitPredPrintModifiersAndPath)
+ }
+}
+
+forward_display_to_print! {
+ ty::Region<'tcx>,
+ Ty<'tcx>,
+ &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ty::Const<'tcx>,
+
+ // HACK(eddyb) these are exhaustive instead of generic,
+ // because `for<'tcx>` isn't possible yet.
+ ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>,
+ ty::Binder<'tcx, ty::TraitRef<'tcx>>,
+ ty::Binder<'tcx, ty::ExistentialTraitRef<'tcx>>,
+ ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>>,
+ ty::Binder<'tcx, TraitRefPrintOnlyTraitName<'tcx>>,
+ ty::Binder<'tcx, ty::FnSig<'tcx>>,
+ ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
+ ty::Binder<'tcx, TraitPredPrintModifiersAndPath<'tcx>>,
+ ty::Binder<'tcx, ty::SubtypePredicate<'tcx>>,
+ ty::Binder<'tcx, ty::ProjectionPredicate<'tcx>>,
+ ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>,
+ ty::Binder<'tcx, ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>>,
+
+ ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>,
+ ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>
+}
+
+define_print_and_forward_display! {
+ (self, cx):
+
+ &'tcx ty::List<Ty<'tcx>> {
+ p!("{{", comma_sep(self.iter()), "}}")
+ }
+
+ ty::TypeAndMut<'tcx> {
+ p!(write("{}", self.mutbl.prefix_str()), print(self.ty))
+ }
+
+ ty::ExistentialTraitRef<'tcx> {
+ // Use a type that can't appear in defaults of type parameters.
+ let dummy_self = cx.tcx().mk_ty_infer(ty::FreshTy(0));
+ let trait_ref = self.with_self_ty(cx.tcx(), dummy_self);
+ p!(print(trait_ref.print_only_trait_path()))
+ }
+
+ ty::ExistentialProjection<'tcx> {
+ let name = cx.tcx().associated_item(self.item_def_id).name;
+ p!(write("{} = ", name), print(self.term))
+ }
+
+ ty::ExistentialPredicate<'tcx> {
+ match *self {
+ ty::ExistentialPredicate::Trait(x) => p!(print(x)),
+ ty::ExistentialPredicate::Projection(x) => p!(print(x)),
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ p!(print_def_path(def_id, &[]));
+ }
+ }
+ }
+
+ ty::FnSig<'tcx> {
+ p!(write("{}", self.unsafety.prefix_str()));
+
+ if self.abi != Abi::Rust {
+ p!(write("extern {} ", self.abi));
+ }
+
+ p!("fn", pretty_fn_sig(self.inputs(), self.c_variadic, self.output()));
+ }
+
+ ty::TraitRef<'tcx> {
+ p!(write("<{} as {}>", self.self_ty(), self.print_only_trait_path()))
+ }
+
+ TraitRefPrintOnlyTraitPath<'tcx> {
+ p!(print_def_path(self.0.def_id, self.0.substs));
+ }
+
+ TraitRefPrintOnlyTraitName<'tcx> {
+ p!(print_def_path(self.0.def_id, &[]));
+ }
+
+ TraitPredPrintModifiersAndPath<'tcx> {
+ if let ty::BoundConstness::ConstIfConst = self.0.constness {
+ p!("~const ")
+ }
+
+ if let ty::ImplPolarity::Negative = self.0.polarity {
+ p!("!")
+ }
+
+ p!(print(self.0.trait_ref.print_only_trait_path()));
+ }
+
+ ty::ParamTy {
+ p!(write("{}", self.name))
+ }
+
+ ty::ParamConst {
+ p!(write("{}", self.name))
+ }
+
+ ty::SubtypePredicate<'tcx> {
+ p!(print(self.a), " <: ", print(self.b))
+ }
+
+ ty::CoercePredicate<'tcx> {
+ p!(print(self.a), " -> ", print(self.b))
+ }
+
+ ty::TraitPredicate<'tcx> {
+ p!(print(self.trait_ref.self_ty()), ": ");
+ if let ty::BoundConstness::ConstIfConst = self.constness && cx.tcx().features().const_trait_impl {
+ p!("~const ");
+ }
+ p!(print(self.trait_ref.print_only_trait_path()))
+ }
+
+ ty::ProjectionPredicate<'tcx> {
+ p!(print(self.projection_ty), " == ", print(self.term))
+ }
+
+ ty::Term<'tcx> {
+ match self {
+ ty::Term::Ty(ty) => p!(print(ty)),
+ ty::Term::Const(c) => p!(print(c)),
+ }
+ }
+
+ ty::ProjectionTy<'tcx> {
+ p!(print_def_path(self.item_def_id, self.substs));
+ }
+
+ ty::ClosureKind {
+ match *self {
+ ty::ClosureKind::Fn => p!("Fn"),
+ ty::ClosureKind::FnMut => p!("FnMut"),
+ ty::ClosureKind::FnOnce => p!("FnOnce"),
+ }
+ }
+
+ ty::Predicate<'tcx> {
+ let binder = self.kind();
+ p!(print(binder))
+ }
+
+ ty::PredicateKind<'tcx> {
+ match *self {
+ ty::PredicateKind::Trait(ref data) => {
+ p!(print(data))
+ }
+ ty::PredicateKind::Subtype(predicate) => p!(print(predicate)),
+ ty::PredicateKind::Coerce(predicate) => p!(print(predicate)),
+ ty::PredicateKind::RegionOutlives(predicate) => p!(print(predicate)),
+ ty::PredicateKind::TypeOutlives(predicate) => p!(print(predicate)),
+ ty::PredicateKind::Projection(predicate) => p!(print(predicate)),
+ ty::PredicateKind::WellFormed(arg) => p!(print(arg), " well-formed"),
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ p!("the trait `", print_def_path(trait_def_id, &[]), "` is object-safe")
+ }
+ ty::PredicateKind::ClosureKind(closure_def_id, _closure_substs, kind) => {
+ p!("the closure `",
+ print_value_path(closure_def_id, &[]),
+ write("` implements the trait `{}`", kind))
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ p!("the constant `", print_value_path(uv.def.did, uv.substs), "` can be evaluated")
+ }
+ ty::PredicateKind::ConstEquate(c1, c2) => {
+ p!("the constant `", print(c1), "` equals `", print(c2), "`")
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ p!("the type `", print(ty), "` is found in the environment")
+ }
+ }
+ }
+
+ GenericArg<'tcx> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => p!(print(lt)),
+ GenericArgKind::Type(ty) => p!(print(ty)),
+ GenericArgKind::Const(ct) => p!(print(ct)),
+ }
+ }
+}
+
+fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, Namespace, DefId)) {
+ // Iterate all local crate items no matter where they are defined.
+ let hir = tcx.hir();
+ for id in hir.items() {
+ if matches!(tcx.def_kind(id.def_id), DefKind::Use) {
+ continue;
+ }
+
+ let item = hir.item(id);
+ if item.ident.name == kw::Empty {
+ continue;
+ }
+
+ let def_id = item.def_id.to_def_id();
+ let ns = tcx.def_kind(def_id).ns().unwrap_or(Namespace::TypeNS);
+ collect_fn(&item.ident, ns, def_id);
+ }
+
+ // Now take care of extern crate items.
+ let queue = &mut Vec::new();
+ let mut seen_defs: DefIdSet = Default::default();
+
+ for &cnum in tcx.crates(()).iter() {
+ let def_id = cnum.as_def_id();
+
+ // Ignore crates that are not direct dependencies.
+ match tcx.extern_crate(def_id) {
+ None => continue,
+ Some(extern_crate) => {
+ if !extern_crate.is_direct() {
+ continue;
+ }
+ }
+ }
+
+ queue.push(def_id);
+ }
+
+ // Iterate external crate defs but be mindful about visibility
+ while let Some(def) = queue.pop() {
+ for child in tcx.module_children(def).iter() {
+ if !child.vis.is_public() {
+ continue;
+ }
+
+ match child.res {
+ def::Res::Def(DefKind::AssocTy, _) => {}
+ def::Res::Def(DefKind::TyAlias, _) => {}
+ def::Res::Def(defkind, def_id) => {
+ if let Some(ns) = defkind.ns() {
+ collect_fn(&child.ident, ns, def_id);
+ }
+
+ if matches!(defkind, DefKind::Mod | DefKind::Enum | DefKind::Trait)
+ && seen_defs.insert(def_id)
+ {
+ queue.push(def_id);
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+}
+
+/// The purpose of this function is to collect public symbols names that are unique across all
+/// crates in the build. Later, when printing about types we can use those names instead of the
+/// full exported path to them.
+///
+/// So essentially, if a symbol name can only be imported from one place for a type, and as
+/// long as it was not glob-imported anywhere in the current crate, we can trim its printed
+/// path and print only the name.
+///
+/// This has wide implications on error messages with types, for example, shortening
+/// `std::vec::Vec` to just `Vec`, as long as there is no other `Vec` importable anywhere.
+///
+/// The implementation uses similar import discovery logic to that of 'use' suggestions.
+fn trimmed_def_paths(tcx: TyCtxt<'_>, (): ()) -> FxHashMap<DefId, Symbol> {
+ let mut map: FxHashMap<DefId, Symbol> = FxHashMap::default();
+
+ if let TrimmedDefPaths::GoodPath = tcx.sess.opts.trimmed_def_paths {
+ // For good paths causing this bug, the `rustc_middle::ty::print::with_no_trimmed_paths`
+ // wrapper can be used to suppress this query, in exchange for full paths being formatted.
+ tcx.sess.delay_good_path_bug("trimmed_def_paths constructed");
+ }
+
+ let unique_symbols_rev: &mut FxHashMap<(Namespace, Symbol), Option<DefId>> =
+ &mut FxHashMap::default();
+
+ for symbol_set in tcx.resolutions(()).glob_map.values() {
+ for symbol in symbol_set {
+ unique_symbols_rev.insert((Namespace::TypeNS, *symbol), None);
+ unique_symbols_rev.insert((Namespace::ValueNS, *symbol), None);
+ unique_symbols_rev.insert((Namespace::MacroNS, *symbol), None);
+ }
+ }
+
+ for_each_def(tcx, |ident, ns, def_id| {
+ use std::collections::hash_map::Entry::{Occupied, Vacant};
+
+ match unique_symbols_rev.entry((ns, ident.name)) {
+ Occupied(mut v) => match v.get() {
+ None => {}
+ Some(existing) => {
+ if *existing != def_id {
+ v.insert(None);
+ }
+ }
+ },
+ Vacant(v) => {
+ v.insert(Some(def_id));
+ }
+ }
+ });
+
+ for ((_, symbol), opt_def_id) in unique_symbols_rev.drain() {
+ use std::collections::hash_map::Entry::{Occupied, Vacant};
+
+ if let Some(def_id) = opt_def_id {
+ match map.entry(def_id) {
+ Occupied(mut v) => {
+ // A single DefId can be known under multiple names (e.g.,
+ // with a `pub use ... as ...;`). We need to ensure that the
+ // name placed in this map is chosen deterministically, so
+ // if we find multiple names (`symbol`) resolving to the
+ // same `def_id`, we prefer the lexicographically smallest
+ // name.
+ //
+ // Any stable ordering would be fine here though.
+ if *v.get() != symbol {
+ if v.get().as_str() > symbol.as_str() {
+ v.insert(symbol);
+ }
+ }
+ }
+ Vacant(v) => {
+ v.insert(symbol);
+ }
+ }
+ }
+ }
+
+ map
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { trimmed_def_paths, ..*providers };
+}
+
+#[derive(Default)]
+pub struct OpaqueFnEntry<'tcx> {
+ // The trait ref is already stored as a key, so just track if we have it as a real predicate
+ has_fn_once: bool,
+ fn_mut_trait_ref: Option<ty::PolyTraitRef<'tcx>>,
+ fn_trait_ref: Option<ty::PolyTraitRef<'tcx>>,
+ return_ty: Option<ty::Binder<'tcx, Term<'tcx>>>,
+}
diff --git a/compiler/rustc_middle/src/ty/query.rs b/compiler/rustc_middle/src/ty/query.rs
new file mode 100644
index 000000000..2452bcf6a
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query.rs
@@ -0,0 +1,386 @@
+use crate::dep_graph;
+use crate::infer::canonical::{self, Canonical};
+use crate::lint::LintLevelMap;
+use crate::metadata::ModChild;
+use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
+use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
+use crate::middle::lib_features::LibFeatures;
+use crate::middle::privacy::AccessLevels;
+use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes};
+use crate::middle::stability::{self, DeprecationEntry};
+use crate::mir;
+use crate::mir::interpret::GlobalId;
+use crate::mir::interpret::{
+ ConstValue, EvalToAllocationRawResult, EvalToConstValueResult, EvalToValTreeResult,
+};
+use crate::mir::interpret::{LitToConstError, LitToConstInput};
+use crate::mir::mono::CodegenUnit;
+use crate::thir;
+use crate::traits::query::{
+ CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
+ CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
+ CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, NoSolution,
+};
+use crate::traits::query::{
+ DropckConstraint, DropckOutlivesResult, MethodAutoderefStepsResult, NormalizationResult,
+ OutlivesBound,
+};
+use crate::traits::specialization_graph;
+use crate::traits::{self, ImplSource};
+use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::layout::TyAndLayout;
+use crate::ty::subst::{GenericArg, SubstsRef};
+use crate::ty::util::AlwaysRequiresDrop;
+use crate::ty::GeneratorDiagnosticData;
+use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
+use rustc_ast as ast;
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_attr as attr;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
+use rustc_data_structures::steal::Steal;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId};
+use rustc_hir::lang_items::{LangItem, LanguageItems};
+use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
+use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
+use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
+use rustc_session::cstore::{CrateDepKind, CrateSource};
+use rustc_session::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLib};
+use rustc_session::utils::NativeLibKind;
+use rustc_session::Limits;
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi;
+use rustc_target::spec::PanicStrategy;
+use std::ops::Deref;
+use std::path::PathBuf;
+use std::sync::Arc;
+
+pub(crate) use rustc_query_system::query::QueryJobId;
+use rustc_query_system::query::*;
+
+#[derive(Copy, Clone)]
+pub struct TyCtxtAt<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub span: Span,
+}
+
+impl<'tcx> Deref for TyCtxtAt<'tcx> {
+ type Target = TyCtxt<'tcx>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ &self.tcx
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct TyCtxtEnsure<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
+ /// are executed instead of just returning their results.
+ #[inline(always)]
+ pub fn ensure(self) -> TyCtxtEnsure<'tcx> {
+ TyCtxtEnsure { tcx: self }
+ }
+
+ /// Returns a transparent wrapper for `TyCtxt` which uses
+ /// `span` as the location of queries performed through it.
+ #[inline(always)]
+ pub fn at(self, span: Span) -> TyCtxtAt<'tcx> {
+ TyCtxtAt { tcx: self, span }
+ }
+
+ pub fn try_mark_green(self, dep_node: &dep_graph::DepNode) -> bool {
+ self.queries.try_mark_green(self, dep_node)
+ }
+}
+
+/// Helper for `TyCtxtEnsure` to avoid a closure.
+#[inline(always)]
+fn noop<T>(_: &T) {}
+
+/// Helper to ensure that queries only return `Copy` types.
+#[inline(always)]
+fn copy<T: Copy>(x: &T) -> T {
+ *x
+}
+
+macro_rules! query_helper_param_ty {
+ (DefId) => { impl IntoQueryParam<DefId> };
+ ($K:ty) => { $K };
+}
+
+macro_rules! query_storage {
+ ([][$K:ty, $V:ty]) => {
+ <DefaultCacheSelector as CacheSelector<$K, $V>>::Cache
+ };
+ ([(storage $ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
+ <$ty as CacheSelector<$K, $V>>::Cache
+ };
+ ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
+ query_storage!([$($modifiers)*][$($args)*])
+ };
+}
+
+macro_rules! separate_provide_extern_decl {
+ ([][$name:ident]) => {
+ ()
+ };
+ ([(separate_provide_extern) $($rest:tt)*][$name:ident]) => {
+ for<'tcx> fn(
+ TyCtxt<'tcx>,
+ query_keys::$name<'tcx>,
+ ) -> query_values::$name<'tcx>
+ };
+ ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
+ separate_provide_extern_decl!([$($modifiers)*][$($args)*])
+ };
+}
+
+macro_rules! separate_provide_extern_default {
+ ([][$name:ident]) => {
+ ()
+ };
+ ([(separate_provide_extern) $($rest:tt)*][$name:ident]) => {
+ |_, key| bug!(
+ "`tcx.{}({:?})` unsupported by its crate; \
+ perhaps the `{}` query was never assigned a provider function",
+ stringify!($name),
+ key,
+ stringify!($name),
+ )
+ };
+ ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
+ separate_provide_extern_default!([$($modifiers)*][$($args)*])
+ };
+}
+
+macro_rules! opt_remap_env_constness {
+ ([][$name:ident]) => {};
+ ([(remap_env_constness) $($rest:tt)*][$name:ident]) => {
+ let $name = $name.without_const();
+ };
+ ([$other:tt $($modifiers:tt)*][$name:ident]) => {
+ opt_remap_env_constness!([$($modifiers)*][$name])
+ };
+}
+
+macro_rules! define_callbacks {
+ (<$tcx:tt>
+ $($(#[$attr:meta])*
+ [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
+
+ // HACK(eddyb) this is like the `impl QueryConfig for queries::$name`
+ // below, but using type aliases instead of associated types, to bypass
+ // the limitations around normalizing under HRTB - for example, this:
+ // `for<'tcx> fn(...) -> <queries::$name<'tcx> as QueryConfig<TyCtxt<'tcx>>>::Value`
+ // doesn't currently normalize to `for<'tcx> fn(...) -> query_values::$name<'tcx>`.
+ // This is primarily used by the `provide!` macro in `rustc_metadata`.
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_keys {
+ use super::*;
+
+ $(pub type $name<$tcx> = $($K)*;)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_values {
+ use super::*;
+
+ $(pub type $name<$tcx> = $V;)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_storage {
+ use super::*;
+
+ $(pub type $name<$tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_stored {
+ use super::*;
+
+ $(pub type $name<$tcx> = <query_storage::$name<$tcx> as QueryStorage>::Stored;)*
+ }
+
+ #[derive(Default)]
+ pub struct QueryCaches<$tcx> {
+ $($(#[$attr])* pub $name: query_storage::$name<$tcx>,)*
+ }
+
+ impl<$tcx> TyCtxtEnsure<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
+ let key = key.into_query_param();
+ opt_remap_env_constness!([$($modifiers)*][key]);
+
+ let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, noop);
+
+ match cached {
+ Ok(()) => return,
+ Err(()) => (),
+ }
+
+ self.tcx.queries.$name(self.tcx, DUMMY_SP, key, QueryMode::Ensure);
+ })*
+ }
+
+ impl<$tcx> TyCtxt<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ #[must_use]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
+ {
+ self.at(DUMMY_SP).$name(key)
+ })*
+ }
+
+ impl<$tcx> TyCtxtAt<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
+ {
+ let key = key.into_query_param();
+ opt_remap_env_constness!([$($modifiers)*][key]);
+
+ let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, copy);
+
+ match cached {
+ Ok(value) => return value,
+ Err(()) => (),
+ }
+
+ self.tcx.queries.$name(self.tcx, self.span, key, QueryMode::Get).unwrap()
+ })*
+ }
+
+ pub struct Providers {
+ $(pub $name: for<'tcx> fn(
+ TyCtxt<'tcx>,
+ query_keys::$name<'tcx>,
+ ) -> query_values::$name<'tcx>,)*
+ }
+
+ pub struct ExternProviders {
+ $(pub $name: separate_provide_extern_decl!([$($modifiers)*][$name]),)*
+ }
+
+ impl Default for Providers {
+ fn default() -> Self {
+ Providers {
+ $($name: |_, key| bug!(
+ "`tcx.{}({:?})` unsupported by its crate; \
+ perhaps the `{}` query was never assigned a provider function",
+ stringify!($name),
+ key,
+ stringify!($name),
+ ),)*
+ }
+ }
+ }
+
+ impl Default for ExternProviders {
+ fn default() -> Self {
+ ExternProviders {
+ $($name: separate_provide_extern_default!([$($modifiers)*][$name]),)*
+ }
+ }
+ }
+
+ impl Copy for Providers {}
+ impl Clone for Providers {
+ fn clone(&self) -> Self { *self }
+ }
+
+ impl Copy for ExternProviders {}
+ impl Clone for ExternProviders {
+ fn clone(&self) -> Self { *self }
+ }
+
+ pub trait QueryEngine<'tcx>: rustc_data_structures::sync::Sync {
+ fn as_any(&'tcx self) -> &'tcx dyn std::any::Any;
+
+ fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool;
+
+ $($(#[$attr])*
+ fn $name(
+ &'tcx self,
+ tcx: TyCtxt<$tcx>,
+ span: Span,
+ key: query_keys::$name<$tcx>,
+ mode: QueryMode,
+ ) -> Option<query_stored::$name<$tcx>>;)*
+ }
+ };
+}
+
+// Each of these queries corresponds to a function pointer field in the
+// `Providers` struct for requesting a value of that type, and a method
+// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
+// which memoizes and does dep-graph tracking, wrapping around the actual
+// `Providers` that the driver creates (using several `rustc_*` crates).
+//
+// The result type of each query must implement `Clone`, and additionally
+// `ty::query::values::Value`, which produces an appropriate placeholder
+// (error) value if the query resulted in a query cycle.
+// Queries marked with `fatal_cycle` do not need the latter implementation,
+// as they will raise an fatal error on query cycles instead.
+
+rustc_query_append! { [define_callbacks!][<'tcx>] }
+
+mod sealed {
+ use super::{DefId, LocalDefId};
+
+ /// An analogue of the `Into` trait that's intended only for query parameters.
+ ///
+ /// This exists to allow queries to accept either `DefId` or `LocalDefId` while requiring that the
+ /// user call `to_def_id` to convert between them everywhere else.
+ pub trait IntoQueryParam<P> {
+ fn into_query_param(self) -> P;
+ }
+
+ impl<P> IntoQueryParam<P> for P {
+ #[inline(always)]
+ fn into_query_param(self) -> P {
+ self
+ }
+ }
+
+ impl<'a, P: Copy> IntoQueryParam<P> for &'a P {
+ #[inline(always)]
+ fn into_query_param(self) -> P {
+ *self
+ }
+ }
+
+ impl IntoQueryParam<DefId> for LocalDefId {
+ #[inline(always)]
+ fn into_query_param(self) -> DefId {
+ self.to_def_id()
+ }
+ }
+}
+
+use sealed::IntoQueryParam;
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn def_kind(self, def_id: impl IntoQueryParam<DefId>) -> DefKind {
+ let def_id = def_id.into_query_param();
+ self.opt_def_kind(def_id)
+ .unwrap_or_else(|| bug!("def_kind: unsupported node: {:?}", def_id))
+ }
+}
+
+impl<'tcx> TyCtxtAt<'tcx> {
+ pub fn def_kind(self, def_id: impl IntoQueryParam<DefId>) -> DefKind {
+ let def_id = def_id.into_query_param();
+ self.opt_def_kind(def_id)
+ .unwrap_or_else(|| bug!("def_kind: unsupported node: {:?}", def_id))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
new file mode 100644
index 000000000..818affa71
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -0,0 +1,841 @@
+//! Generalized type relating mechanism.
+//!
+//! A type relation `R` relates a pair of values `(A, B)`. `A and B` are usually
+//! types or regions but can be other things. Examples of type relations are
+//! subtyping, type equality, etc.
+
+use crate::ty::error::{ExpectedFound, TypeError};
+use crate::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef};
+use crate::ty::{self, ImplSubject, Term, Ty, TyCtxt, TypeFoldable};
+use rustc_hir as ast;
+use rustc_hir::def_id::DefId;
+use rustc_span::DUMMY_SP;
+use rustc_target::spec::abi;
+use std::iter;
+
+pub type RelateResult<'tcx, T> = Result<T, TypeError<'tcx>>;
+
+#[derive(Clone, Debug)]
+pub enum Cause {
+ ExistentialRegionBound, // relating an existential region bound
+}
+
+pub trait TypeRelation<'tcx>: Sized {
+ fn tcx(&self) -> TyCtxt<'tcx>;
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx>;
+
+ /// Returns a static string we can use for printouts.
+ fn tag(&self) -> &'static str;
+
+ /// Returns `true` if the value `a` is the "expected" type in the
+ /// relation. Just affects error messages.
+ fn a_is_expected(&self) -> bool;
+
+ fn with_cause<F, R>(&mut self, _cause: Cause, f: F) -> R
+ where
+ F: FnOnce(&mut Self) -> R,
+ {
+ f(self)
+ }
+
+ /// Generic relation routine suitable for most anything.
+ fn relate<T: Relate<'tcx>>(&mut self, a: T, b: T) -> RelateResult<'tcx, T> {
+ Relate::relate(self, a, b)
+ }
+
+ /// Relate the two substitutions for the given item. The default
+ /// is to look up the variance for the item and proceed
+ /// accordingly.
+ fn relate_item_substs(
+ &mut self,
+ item_def_id: DefId,
+ a_subst: SubstsRef<'tcx>,
+ b_subst: SubstsRef<'tcx>,
+ ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ debug!(
+ "relate_item_substs(item_def_id={:?}, a_subst={:?}, b_subst={:?})",
+ item_def_id, a_subst, b_subst
+ );
+
+ let tcx = self.tcx();
+ let opt_variances = tcx.variances_of(item_def_id);
+ relate_substs_with_variances(self, item_def_id, opt_variances, a_subst, b_subst)
+ }
+
+ /// Switch variance for the purpose of relating `a` and `b`.
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ variance: ty::Variance,
+ info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T>;
+
+ // Overridable relations. You shouldn't typically call these
+ // directly, instead call `relate()`, which in turn calls
+ // these. This is both more uniform but also allows us to add
+ // additional hooks for other types in the future if needed
+ // without making older code, which called `relate`, obsolete.
+
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>>;
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>>;
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>>;
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>;
+}
+
+pub trait Relate<'tcx>: TypeFoldable<'tcx> + Copy {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Self,
+ b: Self,
+ ) -> RelateResult<'tcx, Self>;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Relate impls
+
+pub fn relate_type_and_mut<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::TypeAndMut<'tcx>,
+ b: ty::TypeAndMut<'tcx>,
+ base_ty: Ty<'tcx>,
+) -> RelateResult<'tcx, ty::TypeAndMut<'tcx>> {
+ debug!("{}.mts({:?}, {:?})", relation.tag(), a, b);
+ if a.mutbl != b.mutbl {
+ Err(TypeError::Mutability)
+ } else {
+ let mutbl = a.mutbl;
+ let (variance, info) = match mutbl {
+ ast::Mutability::Not => (ty::Covariant, ty::VarianceDiagInfo::None),
+ ast::Mutability::Mut => {
+ (ty::Invariant, ty::VarianceDiagInfo::Invariant { ty: base_ty, param_index: 0 })
+ }
+ };
+ let ty = relation.relate_with_variance(variance, info, a.ty, b.ty)?;
+ Ok(ty::TypeAndMut { ty, mutbl })
+ }
+}
+
+#[inline]
+pub fn relate_substs<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a_subst: SubstsRef<'tcx>,
+ b_subst: SubstsRef<'tcx>,
+) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ relation.tcx().mk_substs(iter::zip(a_subst, b_subst).map(|(a, b)| {
+ relation.relate_with_variance(ty::Invariant, ty::VarianceDiagInfo::default(), a, b)
+ }))
+}
+
+pub fn relate_substs_with_variances<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ ty_def_id: DefId,
+ variances: &[ty::Variance],
+ a_subst: SubstsRef<'tcx>,
+ b_subst: SubstsRef<'tcx>,
+) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ let tcx = relation.tcx();
+
+ let mut cached_ty = None;
+ let params = iter::zip(a_subst, b_subst).enumerate().map(|(i, (a, b))| {
+ let variance = variances[i];
+ let variance_info = if variance == ty::Invariant {
+ let ty =
+ *cached_ty.get_or_insert_with(|| tcx.bound_type_of(ty_def_id).subst(tcx, a_subst));
+ ty::VarianceDiagInfo::Invariant { ty, param_index: i.try_into().unwrap() }
+ } else {
+ ty::VarianceDiagInfo::default()
+ };
+ relation.relate_with_variance(variance, variance_info, a, b)
+ });
+
+ tcx.mk_substs(params)
+}
+
+impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::FnSig<'tcx>,
+ b: ty::FnSig<'tcx>,
+ ) -> RelateResult<'tcx, ty::FnSig<'tcx>> {
+ let tcx = relation.tcx();
+
+ if a.c_variadic != b.c_variadic {
+ return Err(TypeError::VariadicMismatch(expected_found(
+ relation,
+ a.c_variadic,
+ b.c_variadic,
+ )));
+ }
+ let unsafety = relation.relate(a.unsafety, b.unsafety)?;
+ let abi = relation.relate(a.abi, b.abi)?;
+
+ if a.inputs().len() != b.inputs().len() {
+ return Err(TypeError::ArgCount);
+ }
+
+ let inputs_and_output = iter::zip(a.inputs(), b.inputs())
+ .map(|(&a, &b)| ((a, b), false))
+ .chain(iter::once(((a.output(), b.output()), true)))
+ .map(|((a, b), is_output)| {
+ if is_output {
+ relation.relate(a, b)
+ } else {
+ relation.relate_with_variance(
+ ty::Contravariant,
+ ty::VarianceDiagInfo::default(),
+ a,
+ b,
+ )
+ }
+ })
+ .enumerate()
+ .map(|(i, r)| match r {
+ Err(TypeError::Sorts(exp_found) | TypeError::ArgumentSorts(exp_found, _)) => {
+ Err(TypeError::ArgumentSorts(exp_found, i))
+ }
+ Err(TypeError::Mutability | TypeError::ArgumentMutability(_)) => {
+ Err(TypeError::ArgumentMutability(i))
+ }
+ r => r,
+ });
+ Ok(ty::FnSig {
+ inputs_and_output: tcx.mk_type_list(inputs_and_output)?,
+ c_variadic: a.c_variadic,
+ unsafety,
+ abi,
+ })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::BoundConstness {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::BoundConstness,
+ b: ty::BoundConstness,
+ ) -> RelateResult<'tcx, ty::BoundConstness> {
+ if a != b {
+ Err(TypeError::ConstnessMismatch(expected_found(relation, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ast::Unsafety {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ast::Unsafety,
+ b: ast::Unsafety,
+ ) -> RelateResult<'tcx, ast::Unsafety> {
+ if a != b {
+ Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for abi::Abi {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: abi::Abi,
+ b: abi::Abi,
+ ) -> RelateResult<'tcx, abi::Abi> {
+ if a == b { Ok(a) } else { Err(TypeError::AbiMismatch(expected_found(relation, a, b))) }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ProjectionTy<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ProjectionTy<'tcx>,
+ b: ty::ProjectionTy<'tcx>,
+ ) -> RelateResult<'tcx, ty::ProjectionTy<'tcx>> {
+ if a.item_def_id != b.item_def_id {
+ Err(TypeError::ProjectionMismatched(expected_found(
+ relation,
+ a.item_def_id,
+ b.item_def_id,
+ )))
+ } else {
+ let substs = relation.relate(a.substs, b.substs)?;
+ Ok(ty::ProjectionTy { item_def_id: a.item_def_id, substs: &substs })
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ExistentialProjection<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ExistentialProjection<'tcx>,
+ b: ty::ExistentialProjection<'tcx>,
+ ) -> RelateResult<'tcx, ty::ExistentialProjection<'tcx>> {
+ if a.item_def_id != b.item_def_id {
+ Err(TypeError::ProjectionMismatched(expected_found(
+ relation,
+ a.item_def_id,
+ b.item_def_id,
+ )))
+ } else {
+ let term = relation.relate_with_variance(
+ ty::Invariant,
+ ty::VarianceDiagInfo::default(),
+ a.term,
+ b.term,
+ )?;
+ let substs = relation.relate_with_variance(
+ ty::Invariant,
+ ty::VarianceDiagInfo::default(),
+ a.substs,
+ b.substs,
+ )?;
+ Ok(ty::ExistentialProjection { item_def_id: a.item_def_id, substs, term })
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::TraitRef<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::TraitRef<'tcx>,
+ b: ty::TraitRef<'tcx>,
+ ) -> RelateResult<'tcx, ty::TraitRef<'tcx>> {
+ // Different traits cannot be related.
+ if a.def_id != b.def_id {
+ Err(TypeError::Traits(expected_found(relation, a.def_id, b.def_id)))
+ } else {
+ let substs = relate_substs(relation, a.substs, b.substs)?;
+ Ok(ty::TraitRef { def_id: a.def_id, substs })
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ExistentialTraitRef<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ExistentialTraitRef<'tcx>,
+ b: ty::ExistentialTraitRef<'tcx>,
+ ) -> RelateResult<'tcx, ty::ExistentialTraitRef<'tcx>> {
+ // Different traits cannot be related.
+ if a.def_id != b.def_id {
+ Err(TypeError::Traits(expected_found(relation, a.def_id, b.def_id)))
+ } else {
+ let substs = relate_substs(relation, a.substs, b.substs)?;
+ Ok(ty::ExistentialTraitRef { def_id: a.def_id, substs })
+ }
+ }
+}
+
+#[derive(Copy, Debug, Clone, TypeFoldable, TypeVisitable)]
+struct GeneratorWitness<'tcx>(&'tcx ty::List<Ty<'tcx>>);
+
+impl<'tcx> Relate<'tcx> for GeneratorWitness<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: GeneratorWitness<'tcx>,
+ b: GeneratorWitness<'tcx>,
+ ) -> RelateResult<'tcx, GeneratorWitness<'tcx>> {
+ assert_eq!(a.0.len(), b.0.len());
+ let tcx = relation.tcx();
+ let types = tcx.mk_type_list(iter::zip(a.0, b.0).map(|(a, b)| relation.relate(a, b)))?;
+ Ok(GeneratorWitness(types))
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ImplSubject<'tcx> {
+ #[inline]
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ImplSubject<'tcx>,
+ b: ImplSubject<'tcx>,
+ ) -> RelateResult<'tcx, ImplSubject<'tcx>> {
+ match (a, b) {
+ (ImplSubject::Trait(trait_ref_a), ImplSubject::Trait(trait_ref_b)) => {
+ let trait_ref = ty::TraitRef::relate(relation, trait_ref_a, trait_ref_b)?;
+ Ok(ImplSubject::Trait(trait_ref))
+ }
+ (ImplSubject::Inherent(ty_a), ImplSubject::Inherent(ty_b)) => {
+ let ty = Ty::relate(relation, ty_a, ty_b)?;
+ Ok(ImplSubject::Inherent(ty))
+ }
+ (ImplSubject::Trait(_), ImplSubject::Inherent(_))
+ | (ImplSubject::Inherent(_), ImplSubject::Trait(_)) => {
+ bug!("can not relate TraitRef and Ty");
+ }
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for Ty<'tcx> {
+ #[inline]
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ) -> RelateResult<'tcx, Ty<'tcx>> {
+ relation.tys(a, b)
+ }
+}
+
+/// The main "type relation" routine. Note that this does not handle
+/// inference artifacts, so you should filter those out before calling
+/// it.
+pub fn super_relate_tys<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+) -> RelateResult<'tcx, Ty<'tcx>> {
+ let tcx = relation.tcx();
+ debug!("super_relate_tys: a={:?} b={:?}", a, b);
+ match (a.kind(), b.kind()) {
+ (&ty::Infer(_), _) | (_, &ty::Infer(_)) => {
+ // The caller should handle these cases!
+ bug!("var types encountered in super_relate_tys")
+ }
+
+ (ty::Bound(..), _) | (_, ty::Bound(..)) => {
+ bug!("bound types encountered in super_relate_tys")
+ }
+
+ (&ty::Error(_), _) | (_, &ty::Error(_)) => Ok(tcx.ty_error()),
+
+ (&ty::Never, _)
+ | (&ty::Char, _)
+ | (&ty::Bool, _)
+ | (&ty::Int(_), _)
+ | (&ty::Uint(_), _)
+ | (&ty::Float(_), _)
+ | (&ty::Str, _)
+ if a == b =>
+ {
+ Ok(a)
+ }
+
+ (&ty::Param(ref a_p), &ty::Param(ref b_p)) if a_p.index == b_p.index => Ok(a),
+
+ (ty::Placeholder(p1), ty::Placeholder(p2)) if p1 == p2 => Ok(a),
+
+ (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs)) if a_def == b_def => {
+ let substs = relation.relate_item_substs(a_def.did(), a_substs, b_substs)?;
+ Ok(tcx.mk_adt(a_def, substs))
+ }
+
+ (&ty::Foreign(a_id), &ty::Foreign(b_id)) if a_id == b_id => Ok(tcx.mk_foreign(a_id)),
+
+ (&ty::Dynamic(a_obj, a_region), &ty::Dynamic(b_obj, b_region)) => {
+ let region_bound = relation.with_cause(Cause::ExistentialRegionBound, |relation| {
+ relation.relate_with_variance(
+ ty::Contravariant,
+ ty::VarianceDiagInfo::default(),
+ a_region,
+ b_region,
+ )
+ })?;
+ Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound))
+ }
+
+ (&ty::Generator(a_id, a_substs, movability), &ty::Generator(b_id, b_substs, _))
+ if a_id == b_id =>
+ {
+ // All Generator types with the same id represent
+ // the (anonymous) type of the same generator expression. So
+ // all of their regions should be equated.
+ let substs = relation.relate(a_substs, b_substs)?;
+ Ok(tcx.mk_generator(a_id, substs, movability))
+ }
+
+ (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
+ // Wrap our types with a temporary GeneratorWitness struct
+ // inside the binder so we can related them
+ let a_types = a_types.map_bound(GeneratorWitness);
+ let b_types = b_types.map_bound(GeneratorWitness);
+ // Then remove the GeneratorWitness for the result
+ let types = relation.relate(a_types, b_types)?.map_bound(|witness| witness.0);
+ Ok(tcx.mk_generator_witness(types))
+ }
+
+ (&ty::Closure(a_id, a_substs), &ty::Closure(b_id, b_substs)) if a_id == b_id => {
+ // All Closure types with the same id represent
+ // the (anonymous) type of the same closure expression. So
+ // all of their regions should be equated.
+ let substs = relation.relate(a_substs, b_substs)?;
+ Ok(tcx.mk_closure(a_id, &substs))
+ }
+
+ (&ty::RawPtr(a_mt), &ty::RawPtr(b_mt)) => {
+ let mt = relate_type_and_mut(relation, a_mt, b_mt, a)?;
+ Ok(tcx.mk_ptr(mt))
+ }
+
+ (&ty::Ref(a_r, a_ty, a_mutbl), &ty::Ref(b_r, b_ty, b_mutbl)) => {
+ let r = relation.relate_with_variance(
+ ty::Contravariant,
+ ty::VarianceDiagInfo::default(),
+ a_r,
+ b_r,
+ )?;
+ let a_mt = ty::TypeAndMut { ty: a_ty, mutbl: a_mutbl };
+ let b_mt = ty::TypeAndMut { ty: b_ty, mutbl: b_mutbl };
+ let mt = relate_type_and_mut(relation, a_mt, b_mt, a)?;
+ Ok(tcx.mk_ref(r, mt))
+ }
+
+ (&ty::Array(a_t, sz_a), &ty::Array(b_t, sz_b)) => {
+ let t = relation.relate(a_t, b_t)?;
+ match relation.relate(sz_a, sz_b) {
+ Ok(sz) => Ok(tcx.mk_ty(ty::Array(t, sz))),
+ Err(err) => {
+ // Check whether the lengths are both concrete/known values,
+ // but are unequal, for better diagnostics.
+ //
+ // It might seem dubious to eagerly evaluate these constants here,
+ // we however cannot end up with errors in `Relate` during both
+ // `type_of` and `predicates_of`. This means that evaluating the
+ // constants should not cause cycle errors here.
+ let sz_a = sz_a.try_eval_usize(tcx, relation.param_env());
+ let sz_b = sz_b.try_eval_usize(tcx, relation.param_env());
+ match (sz_a, sz_b) {
+ (Some(sz_a_val), Some(sz_b_val)) if sz_a_val != sz_b_val => Err(
+ TypeError::FixedArraySize(expected_found(relation, sz_a_val, sz_b_val)),
+ ),
+ _ => Err(err),
+ }
+ }
+ }
+ }
+
+ (&ty::Slice(a_t), &ty::Slice(b_t)) => {
+ let t = relation.relate(a_t, b_t)?;
+ Ok(tcx.mk_slice(t))
+ }
+
+ (&ty::Tuple(as_), &ty::Tuple(bs)) => {
+ if as_.len() == bs.len() {
+ Ok(tcx.mk_tup(iter::zip(as_, bs).map(|(a, b)| relation.relate(a, b)))?)
+ } else if !(as_.is_empty() || bs.is_empty()) {
+ Err(TypeError::TupleSize(expected_found(relation, as_.len(), bs.len())))
+ } else {
+ Err(TypeError::Sorts(expected_found(relation, a, b)))
+ }
+ }
+
+ (&ty::FnDef(a_def_id, a_substs), &ty::FnDef(b_def_id, b_substs))
+ if a_def_id == b_def_id =>
+ {
+ let substs = relation.relate_item_substs(a_def_id, a_substs, b_substs)?;
+ Ok(tcx.mk_fn_def(a_def_id, substs))
+ }
+
+ (&ty::FnPtr(a_fty), &ty::FnPtr(b_fty)) => {
+ let fty = relation.relate(a_fty, b_fty)?;
+ Ok(tcx.mk_fn_ptr(fty))
+ }
+
+ // these two are already handled downstream in case of lazy normalization
+ (&ty::Projection(a_data), &ty::Projection(b_data)) => {
+ let projection_ty = relation.relate(a_data, b_data)?;
+ Ok(tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs))
+ }
+
+ (&ty::Opaque(a_def_id, a_substs), &ty::Opaque(b_def_id, b_substs))
+ if a_def_id == b_def_id =>
+ {
+ let substs = relate_substs(relation, a_substs, b_substs)?;
+ Ok(tcx.mk_opaque(a_def_id, substs))
+ }
+
+ _ => Err(TypeError::Sorts(expected_found(relation, a, b))),
+ }
+}
+
+/// The main "const relation" routine. Note that this does not handle
+/// inference artifacts, so you should filter those out before calling
+/// it.
+pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ debug!("{}.super_relate_consts(a = {:?}, b = {:?})", relation.tag(), a, b);
+ let tcx = relation.tcx();
+
+ let a_ty;
+ let b_ty;
+ if relation.tcx().features().adt_const_params {
+ a_ty = tcx.normalize_erasing_regions(relation.param_env(), a.ty());
+ b_ty = tcx.normalize_erasing_regions(relation.param_env(), b.ty());
+ } else {
+ a_ty = tcx.erase_regions(a.ty());
+ b_ty = tcx.erase_regions(b.ty());
+ }
+ if a_ty != b_ty {
+ relation.tcx().sess.delay_span_bug(
+ DUMMY_SP,
+ &format!("cannot relate constants of different types: {} != {}", a_ty, b_ty),
+ );
+ }
+
+ let eagerly_eval = |x: ty::Const<'tcx>| x.eval(tcx, relation.param_env());
+ let a = eagerly_eval(a);
+ let b = eagerly_eval(b);
+
+ // Currently, the values that can be unified are primitive types,
+ // and those that derive both `PartialEq` and `Eq`, corresponding
+ // to structural-match types.
+ let is_match = match (a.kind(), b.kind()) {
+ (ty::ConstKind::Infer(_), _) | (_, ty::ConstKind::Infer(_)) => {
+ // The caller should handle these cases!
+ bug!("var types encountered in super_relate_consts: {:?} {:?}", a, b)
+ }
+
+ (ty::ConstKind::Error(_), _) => return Ok(a),
+ (_, ty::ConstKind::Error(_)) => return Ok(b),
+
+ (ty::ConstKind::Param(a_p), ty::ConstKind::Param(b_p)) => a_p.index == b_p.index,
+ (ty::ConstKind::Placeholder(p1), ty::ConstKind::Placeholder(p2)) => p1 == p2,
+ (ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val,
+
+ (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
+ if tcx.features().generic_const_exprs =>
+ {
+ tcx.try_unify_abstract_consts(relation.param_env().and((au.shrink(), bu.shrink())))
+ }
+
+ // While this is slightly incorrect, it shouldn't matter for `min_const_generics`
+ // and is the better alternative to waiting until `generic_const_exprs` can
+ // be stabilized.
+ (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
+ if au.def == bu.def && au.promoted == bu.promoted =>
+ {
+ let substs = relation.relate_with_variance(
+ ty::Variance::Invariant,
+ ty::VarianceDiagInfo::default(),
+ au.substs,
+ bu.substs,
+ )?;
+ return Ok(tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: au.def,
+ substs,
+ promoted: au.promoted,
+ }),
+ ty: a.ty(),
+ }));
+ }
+ _ => false,
+ };
+ if is_match { Ok(a) } else { Err(TypeError::ConstMismatch(expected_found(relation, a, b))) }
+}
+
+impl<'tcx> Relate<'tcx> for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Self,
+ b: Self,
+ ) -> RelateResult<'tcx, Self> {
+ let tcx = relation.tcx();
+
+ // FIXME: this is wasteful, but want to do a perf run to see how slow it is.
+ // We need to perform this deduplication as we sometimes generate duplicate projections
+ // in `a`.
+ let mut a_v: Vec<_> = a.into_iter().collect();
+ let mut b_v: Vec<_> = b.into_iter().collect();
+ // `skip_binder` here is okay because `stable_cmp` doesn't look at binders
+ a_v.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder()));
+ a_v.dedup();
+ b_v.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder()));
+ b_v.dedup();
+ if a_v.len() != b_v.len() {
+ return Err(TypeError::ExistentialMismatch(expected_found(relation, a, b)));
+ }
+
+ let v = iter::zip(a_v, b_v).map(|(ep_a, ep_b)| {
+ use crate::ty::ExistentialPredicate::*;
+ match (ep_a.skip_binder(), ep_b.skip_binder()) {
+ (Trait(a), Trait(b)) => Ok(ep_a
+ .rebind(Trait(relation.relate(ep_a.rebind(a), ep_b.rebind(b))?.skip_binder()))),
+ (Projection(a), Projection(b)) => Ok(ep_a.rebind(Projection(
+ relation.relate(ep_a.rebind(a), ep_b.rebind(b))?.skip_binder(),
+ ))),
+ (AutoTrait(a), AutoTrait(b)) if a == b => Ok(ep_a.rebind(AutoTrait(a))),
+ _ => Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))),
+ }
+ });
+ tcx.mk_poly_existential_predicates(v)
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ClosureSubsts<'tcx>,
+ b: ty::ClosureSubsts<'tcx>,
+ ) -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>> {
+ let substs = relate_substs(relation, a.substs, b.substs)?;
+ Ok(ty::ClosureSubsts { substs })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::GeneratorSubsts<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::GeneratorSubsts<'tcx>,
+ b: ty::GeneratorSubsts<'tcx>,
+ ) -> RelateResult<'tcx, ty::GeneratorSubsts<'tcx>> {
+ let substs = relate_substs(relation, a.substs, b.substs)?;
+ Ok(ty::GeneratorSubsts { substs })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for SubstsRef<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: SubstsRef<'tcx>,
+ b: SubstsRef<'tcx>,
+ ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ relate_substs(relation, a, b)
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::Region<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ relation.regions(a, b)
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::Const<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ relation.consts(a, b)
+ }
+}
+
+impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for ty::Binder<'tcx, T> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>> {
+ relation.binders(a, b)
+ }
+}
+
+impl<'tcx> Relate<'tcx> for GenericArg<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: GenericArg<'tcx>,
+ b: GenericArg<'tcx>,
+ ) -> RelateResult<'tcx, GenericArg<'tcx>> {
+ match (a.unpack(), b.unpack()) {
+ (GenericArgKind::Lifetime(a_lt), GenericArgKind::Lifetime(b_lt)) => {
+ Ok(relation.relate(a_lt, b_lt)?.into())
+ }
+ (GenericArgKind::Type(a_ty), GenericArgKind::Type(b_ty)) => {
+ Ok(relation.relate(a_ty, b_ty)?.into())
+ }
+ (GenericArgKind::Const(a_ct), GenericArgKind::Const(b_ct)) => {
+ Ok(relation.relate(a_ct, b_ct)?.into())
+ }
+ (GenericArgKind::Lifetime(unpacked), x) => {
+ bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+ }
+ (GenericArgKind::Type(unpacked), x) => {
+ bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+ }
+ (GenericArgKind::Const(unpacked), x) => {
+ bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+ }
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ImplPolarity {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ImplPolarity,
+ b: ty::ImplPolarity,
+ ) -> RelateResult<'tcx, ty::ImplPolarity> {
+ if a != b {
+ Err(TypeError::PolarityMismatch(expected_found(relation, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::TraitPredicate<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::TraitPredicate<'tcx>,
+ b: ty::TraitPredicate<'tcx>,
+ ) -> RelateResult<'tcx, ty::TraitPredicate<'tcx>> {
+ Ok(ty::TraitPredicate {
+ trait_ref: relation.relate(a.trait_ref, b.trait_ref)?,
+ constness: relation.relate(a.constness, b.constness)?,
+ polarity: relation.relate(a.polarity, b.polarity)?,
+ })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::Term<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Self,
+ b: Self,
+ ) -> RelateResult<'tcx, Self> {
+ Ok(match (a, b) {
+ (Term::Ty(a), Term::Ty(b)) => relation.relate(a, b)?.into(),
+ (Term::Const(a), Term::Const(b)) => relation.relate(a, b)?.into(),
+ _ => return Err(TypeError::Mismatch),
+ })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ProjectionPredicate<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ProjectionPredicate<'tcx>,
+ b: ty::ProjectionPredicate<'tcx>,
+ ) -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>> {
+ Ok(ty::ProjectionPredicate {
+ projection_ty: relation.relate(a.projection_ty, b.projection_ty)?,
+ term: relation.relate(a.term, b.term)?,
+ })
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Error handling
+
+pub fn expected_found<'tcx, R, T>(relation: &mut R, a: T, b: T) -> ExpectedFound<T>
+where
+ R: TypeRelation<'tcx>,
+{
+ ExpectedFound::new(relation.a_is_expected(), a, b)
+}
diff --git a/compiler/rustc_middle/src/ty/rvalue_scopes.rs b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
new file mode 100644
index 000000000..e86dafae3
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
@@ -0,0 +1,57 @@
+use crate::middle::region::{Scope, ScopeData, ScopeTree};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+
+/// `RvalueScopes` is a mapping from sub-expressions to _extended_ lifetime as determined by
+/// rules laid out in `rustc_typeck::check::rvalue_scopes`.
+#[derive(TyEncodable, TyDecodable, Clone, Debug, Default, Eq, PartialEq, HashStable)]
+pub struct RvalueScopes {
+ map: FxHashMap<hir::ItemLocalId, Option<Scope>>,
+}
+
+impl RvalueScopes {
+ pub fn new() -> Self {
+ Self { map: <_>::default() }
+ }
+
+ /// Returns the scope when the temp created by `expr_id` will be cleaned up.
+ pub fn temporary_scope(
+ &self,
+ region_scope_tree: &ScopeTree,
+ expr_id: hir::ItemLocalId,
+ ) -> Option<Scope> {
+ // Check for a designated rvalue scope.
+ if let Some(&s) = self.map.get(&expr_id) {
+ debug!("temporary_scope({expr_id:?}) = {s:?} [custom]");
+ return s;
+ }
+
+ // Otherwise, locate the innermost terminating scope
+ // if there's one. Static items, for instance, won't
+ // have an enclosing scope, hence no scope will be
+ // returned.
+ let mut id = Scope { id: expr_id, data: ScopeData::Node };
+
+ while let Some(&(p, _)) = region_scope_tree.parent_map.get(&id) {
+ match p.data {
+ ScopeData::Destruction => {
+ debug!("temporary_scope({expr_id:?}) = {id:?} [enclosing]");
+ return Some(id);
+ }
+ _ => id = p,
+ }
+ }
+
+ debug!("temporary_scope({expr_id:?}) = None");
+ None
+ }
+
+ /// Make an association between a sub-expression and an extended lifetime
+ pub fn record_rvalue_scope(&mut self, var: hir::ItemLocalId, lifetime: Option<Scope>) {
+ debug!("record_rvalue_scope(var={var:?}, lifetime={lifetime:?})");
+ if let Some(lifetime) = lifetime {
+ assert!(var != lifetime.item_local_id());
+ }
+ self.map.insert(var, lifetime);
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
new file mode 100644
index 000000000..7660a2f3a
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -0,0 +1,1304 @@
+//! This module contains implements of the `Lift` and `TypeFoldable`
+//! traits for various types in the Rust compiler. Most are written by
+//! hand, though we've recently added some macros and proc-macros to help with the tedium.
+
+use crate::mir::interpret;
+use crate::mir::ProjectionKind;
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
+use crate::ty::print::{with_no_trimmed_paths, FmtPrinter, Printer};
+use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use crate::ty::{self, InferConst, Lift, Term, Ty, TyCtxt};
+use rustc_data_structures::functor::IdFunctor;
+use rustc_hir as hir;
+use rustc_hir::def::Namespace;
+use rustc_index::vec::{Idx, IndexVec};
+
+use std::fmt;
+use std::mem::ManuallyDrop;
+use std::ops::ControlFlow;
+use std::rc::Rc;
+use std::sync::Arc;
+
+impl fmt::Debug for ty::TraitDef {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ with_no_trimmed_paths!({
+ f.write_str(
+ &FmtPrinter::new(tcx, Namespace::TypeNS)
+ .print_def_path(self.def_id, &[])?
+ .into_buffer(),
+ )
+ })
+ })
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::AdtDef<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ with_no_trimmed_paths!({
+ f.write_str(
+ &FmtPrinter::new(tcx, Namespace::TypeNS)
+ .print_def_path(self.did(), &[])?
+ .into_buffer(),
+ )
+ })
+ })
+ }
+}
+
+impl fmt::Debug for ty::UpvarId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let name = ty::tls::with(|tcx| tcx.hir().name(self.var_path.hir_id));
+ write!(f, "UpvarId({:?};`{}`;{:?})", self.var_path.hir_id, name, self.closure_expr_id)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::ExistentialTraitRef<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ with_no_trimmed_paths!(fmt::Display::fmt(self, f))
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::adjustment::Adjustment<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?} -> {}", self.kind, self.target)
+ }
+}
+
+impl fmt::Debug for ty::BoundRegionKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ ty::BrAnon(n) => write!(f, "BrAnon({:?})", n),
+ ty::BrNamed(did, name) => {
+ if did.is_crate_root() {
+ write!(f, "BrNamed({})", name)
+ } else {
+ write!(f, "BrNamed({:?}, {})", did, name)
+ }
+ }
+ ty::BrEnv => write!(f, "BrEnv"),
+ }
+ }
+}
+
+impl fmt::Debug for ty::FreeRegion {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ReFree({:?}, {:?})", self.scope, self.bound_region)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::FnSig<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "({:?}; c_variadic: {})->{:?}", self.inputs(), self.c_variadic, self.output())
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::ConstVid<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "_#{}c", self.index)
+ }
+}
+
+impl fmt::Debug for ty::RegionVid {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "'_#{}r", self.index())
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::TraitRef<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ with_no_trimmed_paths!(fmt::Display::fmt(self, f))
+ }
+}
+
+impl<'tcx> fmt::Debug for Ty<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ with_no_trimmed_paths!(fmt::Display::fmt(self, f))
+ }
+}
+
+impl fmt::Debug for ty::ParamTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}/#{}", self.name, self.index)
+ }
+}
+
+impl fmt::Debug for ty::ParamConst {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}/#{}", self.name, self.index)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::TraitPredicate<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if let ty::BoundConstness::ConstIfConst = self.constness {
+ write!(f, "~const ")?;
+ }
+ write!(f, "TraitPredicate({:?}, polarity:{:?})", self.trait_ref, self.polarity)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::ProjectionPredicate<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ProjectionPredicate({:?}, {:?})", self.projection_ty, self.term)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::Predicate<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.kind())
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::PredicateKind<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ ty::PredicateKind::Trait(ref a) => a.fmt(f),
+ ty::PredicateKind::Subtype(ref pair) => pair.fmt(f),
+ ty::PredicateKind::Coerce(ref pair) => pair.fmt(f),
+ ty::PredicateKind::RegionOutlives(ref pair) => pair.fmt(f),
+ ty::PredicateKind::TypeOutlives(ref pair) => pair.fmt(f),
+ ty::PredicateKind::Projection(ref pair) => pair.fmt(f),
+ ty::PredicateKind::WellFormed(data) => write!(f, "WellFormed({:?})", data),
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ write!(f, "ObjectSafe({:?})", trait_def_id)
+ }
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
+ write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind)
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ write!(f, "ConstEvaluatable({:?}, {:?})", uv.def, uv.substs)
+ }
+ ty::PredicateKind::ConstEquate(c1, c2) => write!(f, "ConstEquate({:?}, {:?})", c1, c2),
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ write!(f, "TypeWellFormedFromEnv({:?})", ty)
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Atomic structs
+//
+// For things that don't carry any arena-allocated data (and are
+// copy...), just add them to this list.
+
+TrivialTypeTraversalAndLiftImpls! {
+ (),
+ bool,
+ usize,
+ ::rustc_target::abi::VariantIdx,
+ u32,
+ u64,
+ String,
+ crate::middle::region::Scope,
+ crate::ty::FloatTy,
+ ::rustc_ast::InlineAsmOptions,
+ ::rustc_ast::InlineAsmTemplatePiece,
+ ::rustc_ast::NodeId,
+ ::rustc_span::symbol::Symbol,
+ ::rustc_hir::def::Res,
+ ::rustc_hir::def_id::DefId,
+ ::rustc_hir::def_id::LocalDefId,
+ ::rustc_hir::HirId,
+ ::rustc_hir::MatchSource,
+ ::rustc_hir::Mutability,
+ ::rustc_hir::Unsafety,
+ ::rustc_target::asm::InlineAsmRegOrRegClass,
+ ::rustc_target::spec::abi::Abi,
+ crate::mir::coverage::ExpressionOperandId,
+ crate::mir::coverage::CounterValueReference,
+ crate::mir::coverage::InjectedExpressionId,
+ crate::mir::coverage::InjectedExpressionIndex,
+ crate::mir::coverage::MappedExpressionIndex,
+ crate::mir::Local,
+ crate::mir::Promoted,
+ crate::traits::Reveal,
+ crate::ty::adjustment::AutoBorrowMutability,
+ crate::ty::AdtKind,
+ crate::ty::BoundConstness,
+ // Including `BoundRegionKind` is a *bit* dubious, but direct
+ // references to bound region appear in `ty::Error`, and aren't
+ // really meant to be folded. In general, we can only fold a fully
+ // general `Region`.
+ crate::ty::BoundRegionKind,
+ crate::ty::AssocItem,
+ crate::ty::AssocKind,
+ crate::ty::Placeholder<crate::ty::BoundRegionKind>,
+ crate::ty::ClosureKind,
+ crate::ty::FreeRegion,
+ crate::ty::InferTy,
+ crate::ty::IntVarValue,
+ crate::ty::ParamConst,
+ crate::ty::ParamTy,
+ crate::ty::adjustment::PointerCast,
+ crate::ty::RegionVid,
+ crate::ty::UniverseIndex,
+ crate::ty::Variance,
+ ::rustc_span::Span,
+ ::rustc_errors::ErrorGuaranteed,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift implementations
+
+// FIXME(eddyb) replace all the uses of `Option::map` with `?`.
+impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
+ type Lifted = (A::Lifted, B::Lifted);
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ Some((tcx.lift(self.0)?, tcx.lift(self.1)?))
+ }
+}
+
+impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C) {
+ type Lifted = (A::Lifted, B::Lifted, C::Lifted);
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ Some((tcx.lift(self.0)?, tcx.lift(self.1)?, tcx.lift(self.2)?))
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> {
+ type Lifted = Option<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ Some(x) => tcx.lift(x).map(Some),
+ None => Some(None),
+ }
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> {
+ type Lifted = Result<T::Lifted, E::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ Ok(x) => tcx.lift(x).map(Ok),
+ Err(e) => tcx.lift(e).map(Err),
+ }
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box<T> {
+ type Lifted = Box<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(*self).map(Box::new)
+ }
+}
+
+impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Rc<T> {
+ type Lifted = Rc<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.as_ref().clone()).map(Rc::new)
+ }
+}
+
+impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Arc<T> {
+ type Lifted = Arc<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.as_ref().clone()).map(Arc::new)
+ }
+}
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> {
+ type Lifted = Vec<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ self.into_iter().map(|v| tcx.lift(v)).collect()
+ }
+}
+
+impl<'tcx, I: Idx, T: Lift<'tcx>> Lift<'tcx> for IndexVec<I, T> {
+ type Lifted = IndexVec<I, T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ self.into_iter().map(|e| tcx.lift(e)).collect()
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> {
+ type Lifted = ty::TraitRef<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::TraitRef { def_id: self.def_id, substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> {
+ type Lifted = ty::ExistentialTraitRef<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialPredicate<'a> {
+ type Lifted = ty::ExistentialPredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::ExistentialPredicate::Trait(x) => tcx.lift(x).map(ty::ExistentialPredicate::Trait),
+ ty::ExistentialPredicate::Projection(x) => {
+ tcx.lift(x).map(ty::ExistentialPredicate::Projection)
+ }
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ Some(ty::ExistentialPredicate::AutoTrait(def_id))
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for Term<'a> {
+ type Lifted = ty::Term<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ Some(match self {
+ Term::Ty(ty) => Term::Ty(tcx.lift(ty)?),
+ Term::Const(c) => Term::Const(tcx.lift(c)?),
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> {
+ type Lifted = ty::TraitPredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
+ tcx.lift(self.trait_ref).map(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness: self.constness,
+ polarity: self.polarity,
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> {
+ type Lifted = ty::SubtypePredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::SubtypePredicate<'tcx>> {
+ tcx.lift((self.a, self.b)).map(|(a, b)| ty::SubtypePredicate {
+ a_is_expected: self.a_is_expected,
+ a,
+ b,
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::CoercePredicate<'a> {
+ type Lifted = ty::CoercePredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::CoercePredicate<'tcx>> {
+ tcx.lift((self.a, self.b)).map(|(a, b)| ty::CoercePredicate { a, b })
+ }
+}
+
+impl<'tcx, A: Copy + Lift<'tcx>, B: Copy + Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
+ type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift((self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionTy<'a> {
+ type Lifted = ty::ProjectionTy<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionTy<'tcx>> {
+ tcx.lift(self.substs)
+ .map(|substs| ty::ProjectionTy { item_def_id: self.item_def_id, substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> {
+ type Lifted = ty::ProjectionPredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
+ tcx.lift((self.projection_ty, self.term))
+ .map(|(projection_ty, term)| ty::ProjectionPredicate { projection_ty, term })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> {
+ type Lifted = ty::ExistentialProjection<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::ExistentialProjection {
+ substs,
+ term: tcx.lift(self.term).expect("type must lift when substs do"),
+ item_def_id: self.item_def_id,
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::PredicateKind<'a> {
+ type Lifted = ty::PredicateKind<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::PredicateKind::Trait(data) => tcx.lift(data).map(ty::PredicateKind::Trait),
+ ty::PredicateKind::Subtype(data) => tcx.lift(data).map(ty::PredicateKind::Subtype),
+ ty::PredicateKind::Coerce(data) => tcx.lift(data).map(ty::PredicateKind::Coerce),
+ ty::PredicateKind::RegionOutlives(data) => {
+ tcx.lift(data).map(ty::PredicateKind::RegionOutlives)
+ }
+ ty::PredicateKind::TypeOutlives(data) => {
+ tcx.lift(data).map(ty::PredicateKind::TypeOutlives)
+ }
+ ty::PredicateKind::Projection(data) => {
+ tcx.lift(data).map(ty::PredicateKind::Projection)
+ }
+ ty::PredicateKind::WellFormed(ty) => tcx.lift(ty).map(ty::PredicateKind::WellFormed),
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
+ tcx.lift(closure_substs).map(|closure_substs| {
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind)
+ })
+ }
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ Some(ty::PredicateKind::ObjectSafe(trait_def_id))
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ tcx.lift(uv).map(|uv| ty::PredicateKind::ConstEvaluatable(uv))
+ }
+ ty::PredicateKind::ConstEquate(c1, c2) => {
+ tcx.lift((c1, c2)).map(|(c1, c2)| ty::PredicateKind::ConstEquate(c1, c2))
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ tcx.lift(ty).map(ty::PredicateKind::TypeWellFormedFromEnv)
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<'a, T>
+where
+ <T as Lift<'tcx>>::Lifted: TypeVisitable<'tcx>,
+{
+ type Lifted = ty::Binder<'tcx, T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ let bound_vars = tcx.lift(self.bound_vars());
+ tcx.lift(self.skip_binder())
+ .zip(bound_vars)
+ .map(|(value, vars)| ty::Binder::bind_with_vars(value, vars))
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> {
+ type Lifted = ty::ParamEnv<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.caller_bounds())
+ .map(|caller_bounds| ty::ParamEnv::new(caller_bounds, self.reveal(), self.constness()))
+ }
+}
+
+impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::ParamEnvAnd<'a, T> {
+ type Lifted = ty::ParamEnvAnd<'tcx, T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.param_env).and_then(|param_env| {
+ tcx.lift(self.value).map(|value| ty::ParamEnvAnd { param_env, value })
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> {
+ type Lifted = ty::ClosureSubsts<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::ClosureSubsts { substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorSubsts<'a> {
+ type Lifted = ty::GeneratorSubsts<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::GeneratorSubsts { substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjustment<'a> {
+ type Lifted = ty::adjustment::Adjustment<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ let ty::adjustment::Adjustment { kind, target } = self;
+ tcx.lift(kind).and_then(|kind| {
+ tcx.lift(target).map(|target| ty::adjustment::Adjustment { kind, target })
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjust<'a> {
+ type Lifted = ty::adjustment::Adjust<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::adjustment::Adjust::NeverToAny => Some(ty::adjustment::Adjust::NeverToAny),
+ ty::adjustment::Adjust::Pointer(ptr) => Some(ty::adjustment::Adjust::Pointer(ptr)),
+ ty::adjustment::Adjust::Deref(overloaded) => {
+ tcx.lift(overloaded).map(ty::adjustment::Adjust::Deref)
+ }
+ ty::adjustment::Adjust::Borrow(autoref) => {
+ tcx.lift(autoref).map(ty::adjustment::Adjust::Borrow)
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::OverloadedDeref<'a> {
+ type Lifted = ty::adjustment::OverloadedDeref<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.region).map(|region| ty::adjustment::OverloadedDeref {
+ region,
+ mutbl: self.mutbl,
+ span: self.span,
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> {
+ type Lifted = ty::adjustment::AutoBorrow<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::adjustment::AutoBorrow::Ref(r, m) => {
+ tcx.lift(r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m))
+ }
+ ty::adjustment::AutoBorrow::RawPtr(m) => Some(ty::adjustment::AutoBorrow::RawPtr(m)),
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::GenSig<'a> {
+ type Lifted = ty::GenSig<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift((self.resume_ty, self.yield_ty, self.return_ty))
+ .map(|(resume_ty, yield_ty, return_ty)| ty::GenSig { resume_ty, yield_ty, return_ty })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> {
+ type Lifted = ty::FnSig<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.inputs_and_output).map(|x| ty::FnSig {
+ inputs_and_output: x,
+ c_variadic: self.c_variadic,
+ unsafety: self.unsafety,
+ abi: self.abi,
+ })
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound<T> {
+ type Lifted = ty::error::ExpectedFound<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ let ty::error::ExpectedFound { expected, found } = self;
+ tcx.lift(expected).and_then(|expected| {
+ tcx.lift(found).map(|found| ty::error::ExpectedFound { expected, found })
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> {
+ type Lifted = ty::error::TypeError<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ use crate::ty::error::TypeError::*;
+
+ Some(match self {
+ Mismatch => Mismatch,
+ ConstnessMismatch(x) => ConstnessMismatch(x),
+ PolarityMismatch(x) => PolarityMismatch(x),
+ UnsafetyMismatch(x) => UnsafetyMismatch(x),
+ AbiMismatch(x) => AbiMismatch(x),
+ Mutability => Mutability,
+ ArgumentMutability(i) => ArgumentMutability(i),
+ TupleSize(x) => TupleSize(x),
+ FixedArraySize(x) => FixedArraySize(x),
+ ArgCount => ArgCount,
+ FieldMisMatch(x, y) => FieldMisMatch(x, y),
+ RegionsDoesNotOutlive(a, b) => {
+ return tcx.lift((a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b));
+ }
+ RegionsInsufficientlyPolymorphic(a, b) => {
+ return tcx.lift(b).map(|b| RegionsInsufficientlyPolymorphic(a, b));
+ }
+ RegionsOverlyPolymorphic(a, b) => {
+ return tcx.lift(b).map(|b| RegionsOverlyPolymorphic(a, b));
+ }
+ RegionsPlaceholderMismatch => RegionsPlaceholderMismatch,
+ IntMismatch(x) => IntMismatch(x),
+ FloatMismatch(x) => FloatMismatch(x),
+ Traits(x) => Traits(x),
+ VariadicMismatch(x) => VariadicMismatch(x),
+ CyclicTy(t) => return tcx.lift(t).map(|t| CyclicTy(t)),
+ CyclicConst(ct) => return tcx.lift(ct).map(|ct| CyclicConst(ct)),
+ ProjectionMismatched(x) => ProjectionMismatched(x),
+ ArgumentSorts(x, i) => return tcx.lift(x).map(|x| ArgumentSorts(x, i)),
+ Sorts(x) => return tcx.lift(x).map(Sorts),
+ ExistentialMismatch(x) => return tcx.lift(x).map(ExistentialMismatch),
+ ConstMismatch(x) => return tcx.lift(x).map(ConstMismatch),
+ IntrinsicCast => IntrinsicCast,
+ TargetFeatureCast(x) => TargetFeatureCast(x),
+ ObjectUnsafeCoercion(x) => return tcx.lift(x).map(ObjectUnsafeCoercion),
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> {
+ type Lifted = ty::InstanceDef<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::InstanceDef::Item(def_id) => Some(ty::InstanceDef::Item(def_id)),
+ ty::InstanceDef::VTableShim(def_id) => Some(ty::InstanceDef::VTableShim(def_id)),
+ ty::InstanceDef::ReifyShim(def_id) => Some(ty::InstanceDef::ReifyShim(def_id)),
+ ty::InstanceDef::Intrinsic(def_id) => Some(ty::InstanceDef::Intrinsic(def_id)),
+ ty::InstanceDef::FnPtrShim(def_id, ty) => {
+ Some(ty::InstanceDef::FnPtrShim(def_id, tcx.lift(ty)?))
+ }
+ ty::InstanceDef::Virtual(def_id, n) => Some(ty::InstanceDef::Virtual(def_id, n)),
+ ty::InstanceDef::ClosureOnceShim { call_once, track_caller } => {
+ Some(ty::InstanceDef::ClosureOnceShim { call_once, track_caller })
+ }
+ ty::InstanceDef::DropGlue(def_id, ty) => {
+ Some(ty::InstanceDef::DropGlue(def_id, tcx.lift(ty)?))
+ }
+ ty::InstanceDef::CloneShim(def_id, ty) => {
+ Some(ty::InstanceDef::CloneShim(def_id, tcx.lift(ty)?))
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// TypeFoldable implementations.
+
+/// AdtDefs are basically the same as a DefId.
+impl<'tcx> TypeFoldable<'tcx> for ty::AdtDef<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::AdtDef<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>, U: TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<(T, U), F::Error> {
+ Ok((self.0.try_fold_with(folder)?, self.1.try_fold_with(folder)?))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>, U: TypeVisitable<'tcx>> TypeVisitable<'tcx> for (T, U) {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.0.visit_with(visitor)?;
+ self.1.visit_with(visitor)
+ }
+}
+
+impl<'tcx, A: TypeFoldable<'tcx>, B: TypeFoldable<'tcx>, C: TypeFoldable<'tcx>> TypeFoldable<'tcx>
+ for (A, B, C)
+{
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<(A, B, C), F::Error> {
+ Ok((
+ self.0.try_fold_with(folder)?,
+ self.1.try_fold_with(folder)?,
+ self.2.try_fold_with(folder)?,
+ ))
+ }
+}
+
+impl<'tcx, A: TypeVisitable<'tcx>, B: TypeVisitable<'tcx>, C: TypeVisitable<'tcx>>
+ TypeVisitable<'tcx> for (A, B, C)
+{
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.0.visit_with(visitor)?;
+ self.1.visit_with(visitor)?;
+ self.2.visit_with(visitor)
+ }
+}
+
+EnumTypeTraversalImpl! {
+ impl<'tcx, T> TypeFoldable<'tcx> for Option<T> {
+ (Some)(a),
+ (None),
+ } where T: TypeFoldable<'tcx>
+}
+EnumTypeTraversalImpl! {
+ impl<'tcx, T> TypeVisitable<'tcx> for Option<T> {
+ (Some)(a),
+ (None),
+ } where T: TypeVisitable<'tcx>
+}
+
+EnumTypeTraversalImpl! {
+ impl<'tcx, T, E> TypeFoldable<'tcx> for Result<T, E> {
+ (Ok)(a),
+ (Err)(a),
+ } where T: TypeFoldable<'tcx>, E: TypeFoldable<'tcx>,
+}
+EnumTypeTraversalImpl! {
+ impl<'tcx, T, E> TypeVisitable<'tcx> for Result<T, E> {
+ (Ok)(a),
+ (Err)(a),
+ } where T: TypeVisitable<'tcx>, E: TypeVisitable<'tcx>,
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(
+ mut self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ // We merely want to replace the contained `T`, if at all possible,
+ // so that we don't needlessly allocate a new `Rc` or indeed clone
+ // the contained type.
+ unsafe {
+ // First step is to ensure that we have a unique reference to
+ // the contained type, which `Rc::make_mut` will accomplish (by
+ // allocating a new `Rc` and cloning the `T` only if required).
+ // This is done *before* casting to `Rc<ManuallyDrop<T>>` so that
+ // panicking during `make_mut` does not leak the `T`.
+ Rc::make_mut(&mut self);
+
+ // Casting to `Rc<ManuallyDrop<T>>` is safe because `ManuallyDrop`
+ // is `repr(transparent)`.
+ let ptr = Rc::into_raw(self).cast::<ManuallyDrop<T>>();
+ let mut unique = Rc::from_raw(ptr);
+
+ // Call to `Rc::make_mut` above guarantees that `unique` is the
+ // sole reference to the contained value, so we can avoid doing
+ // a checked `get_mut` here.
+ let slot = Rc::get_mut_unchecked(&mut unique);
+
+ // Semantically move the contained type out from `unique`, fold
+ // it, then move the folded value back into `unique`. Should
+ // folding fail, `ManuallyDrop` ensures that the "moved-out"
+ // value is not re-dropped.
+ let owned = ManuallyDrop::take(slot);
+ let folded = owned.try_fold_with(folder)?;
+ *slot = ManuallyDrop::new(folded);
+
+ // Cast back to `Rc<T>`.
+ Ok(Rc::from_raw(Rc::into_raw(unique).cast()))
+ }
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Rc<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ (**self).visit_with(visitor)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Arc<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(
+ mut self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ // We merely want to replace the contained `T`, if at all possible,
+ // so that we don't needlessly allocate a new `Arc` or indeed clone
+ // the contained type.
+ unsafe {
+ // First step is to ensure that we have a unique reference to
+ // the contained type, which `Arc::make_mut` will accomplish (by
+ // allocating a new `Arc` and cloning the `T` only if required).
+ // This is done *before* casting to `Arc<ManuallyDrop<T>>` so that
+ // panicking during `make_mut` does not leak the `T`.
+ Arc::make_mut(&mut self);
+
+ // Casting to `Arc<ManuallyDrop<T>>` is safe because `ManuallyDrop`
+ // is `repr(transparent)`.
+ let ptr = Arc::into_raw(self).cast::<ManuallyDrop<T>>();
+ let mut unique = Arc::from_raw(ptr);
+
+ // Call to `Arc::make_mut` above guarantees that `unique` is the
+ // sole reference to the contained value, so we can avoid doing
+ // a checked `get_mut` here.
+ let slot = Arc::get_mut_unchecked(&mut unique);
+
+ // Semantically move the contained type out from `unique`, fold
+ // it, then move the folded value back into `unique`. Should
+ // folding fail, `ManuallyDrop` ensures that the "moved-out"
+ // value is not re-dropped.
+ let owned = ManuallyDrop::take(slot);
+ let folded = owned.try_fold_with(folder)?;
+ *slot = ManuallyDrop::new(folded);
+
+ // Cast back to `Arc<T>`.
+ Ok(Arc::from_raw(Arc::into_raw(unique).cast()))
+ }
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Arc<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ (**self).visit_with(visitor)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_id(|value| value.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ (**self).visit_with(visitor)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_id(|t| t.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Vec<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_id(|t| t.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<[T]> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::EarlyBinder<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_bound(|ty| ty.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for ty::EarlyBinder<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.as_ref().0.visit_with(visitor)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder<'tcx, T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_binder(self)
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for ty::Binder<'tcx, T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_binder(self)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeSuperFoldable<'tcx> for ty::Binder<'tcx, T> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ self.try_map_bound(|ty| ty.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeSuperVisitable<'tcx> for ty::Binder<'tcx, T> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.as_ref().skip_binder().visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_poly_existential_predicates(v))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx>
+ for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>
+{
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|p| p.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ProjectionKind> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_projs(v))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<ProjectionKind> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ use crate::ty::InstanceDef::*;
+ Ok(Self {
+ substs: self.substs.try_fold_with(folder)?,
+ def: match self.def {
+ Item(def) => Item(def.try_fold_with(folder)?),
+ VTableShim(did) => VTableShim(did.try_fold_with(folder)?),
+ ReifyShim(did) => ReifyShim(did.try_fold_with(folder)?),
+ Intrinsic(did) => Intrinsic(did.try_fold_with(folder)?),
+ FnPtrShim(did, ty) => {
+ FnPtrShim(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
+ }
+ Virtual(did, i) => Virtual(did.try_fold_with(folder)?, i),
+ ClosureOnceShim { call_once, track_caller } => {
+ ClosureOnceShim { call_once: call_once.try_fold_with(folder)?, track_caller }
+ }
+ DropGlue(did, ty) => {
+ DropGlue(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
+ }
+ CloneShim(did, ty) => {
+ CloneShim(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
+ }
+ },
+ })
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::instance::Instance<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ use crate::ty::InstanceDef::*;
+ self.substs.visit_with(visitor)?;
+ match self.def {
+ Item(def) => def.visit_with(visitor),
+ VTableShim(did) | ReifyShim(did) | Intrinsic(did) | Virtual(did, _) => {
+ did.visit_with(visitor)
+ }
+ FnPtrShim(did, ty) | CloneShim(did, ty) => {
+ did.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ DropGlue(did, ty) => {
+ did.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ ClosureOnceShim { call_once, track_caller: _ } => call_once.visit_with(visitor),
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for interpret::GlobalId<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(Self { instance: self.instance.try_fold_with(folder)?, promoted: self.promoted })
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for interpret::GlobalId<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.instance.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_ty(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Ty<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_ty(*self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for Ty<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ let kind = match *self.kind() {
+ ty::RawPtr(tm) => ty::RawPtr(tm.try_fold_with(folder)?),
+ ty::Array(typ, sz) => ty::Array(typ.try_fold_with(folder)?, sz.try_fold_with(folder)?),
+ ty::Slice(typ) => ty::Slice(typ.try_fold_with(folder)?),
+ ty::Adt(tid, substs) => ty::Adt(tid, substs.try_fold_with(folder)?),
+ ty::Dynamic(trait_ty, region) => {
+ ty::Dynamic(trait_ty.try_fold_with(folder)?, region.try_fold_with(folder)?)
+ }
+ ty::Tuple(ts) => ty::Tuple(ts.try_fold_with(folder)?),
+ ty::FnDef(def_id, substs) => ty::FnDef(def_id, substs.try_fold_with(folder)?),
+ ty::FnPtr(f) => ty::FnPtr(f.try_fold_with(folder)?),
+ ty::Ref(r, ty, mutbl) => {
+ ty::Ref(r.try_fold_with(folder)?, ty.try_fold_with(folder)?, mutbl)
+ }
+ ty::Generator(did, substs, movability) => {
+ ty::Generator(did, substs.try_fold_with(folder)?, movability)
+ }
+ ty::GeneratorWitness(types) => ty::GeneratorWitness(types.try_fold_with(folder)?),
+ ty::Closure(did, substs) => ty::Closure(did, substs.try_fold_with(folder)?),
+ ty::Projection(data) => ty::Projection(data.try_fold_with(folder)?),
+ ty::Opaque(did, substs) => ty::Opaque(did, substs.try_fold_with(folder)?),
+
+ ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Error(_)
+ | ty::Infer(_)
+ | ty::Param(..)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Never
+ | ty::Foreign(..) => return Ok(self),
+ };
+
+ Ok(if *self.kind() == kind { self } else { folder.tcx().mk_ty(kind) })
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for Ty<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match self.kind() {
+ ty::RawPtr(ref tm) => tm.visit_with(visitor),
+ ty::Array(typ, sz) => {
+ typ.visit_with(visitor)?;
+ sz.visit_with(visitor)
+ }
+ ty::Slice(typ) => typ.visit_with(visitor),
+ ty::Adt(_, substs) => substs.visit_with(visitor),
+ ty::Dynamic(ref trait_ty, ref reg) => {
+ trait_ty.visit_with(visitor)?;
+ reg.visit_with(visitor)
+ }
+ ty::Tuple(ts) => ts.visit_with(visitor),
+ ty::FnDef(_, substs) => substs.visit_with(visitor),
+ ty::FnPtr(ref f) => f.visit_with(visitor),
+ ty::Ref(r, ty, _) => {
+ r.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ ty::Generator(_did, ref substs, _) => substs.visit_with(visitor),
+ ty::GeneratorWitness(ref types) => types.visit_with(visitor),
+ ty::Closure(_did, ref substs) => substs.visit_with(visitor),
+ ty::Projection(ref data) => data.visit_with(visitor),
+ ty::Opaque(_, ref substs) => substs.visit_with(visitor),
+
+ ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Error(_)
+ | ty::Infer(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Param(..)
+ | ty::Never
+ | ty::Foreign(..) => ControlFlow::CONTINUE,
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Region<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_region(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Region<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_region(*self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ty::Region<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ _folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::Region<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_predicate(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Predicate<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_predicate(*self)
+ }
+
+ #[inline]
+ fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
+ self.outer_exclusive_binder() > binder
+ }
+
+ #[inline]
+ fn has_type_flags(&self, flags: ty::TypeFlags) -> bool {
+ self.flags().intersects(flags)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ty::Predicate<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ let new = self.kind().try_fold_with(folder)?;
+ Ok(folder.tcx().reuse_or_mk_predicate(self, new))
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::Predicate<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.kind().visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_predicates(v))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|p| p.visit_with(visitor))
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec<I, T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_id(|x| x.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>, I: Idx> TypeVisitable<'tcx> for IndexVec<I, T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Const<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_const(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Const<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_const(*self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ty::Const<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ let ty = self.ty().try_fold_with(folder)?;
+ let kind = self.kind().try_fold_with(folder)?;
+ if ty != self.ty() || kind != self.kind() {
+ Ok(folder.tcx().mk_const(ty::ConstS { ty, kind }))
+ } else {
+ Ok(self)
+ }
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::Const<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.ty().visit_with(visitor)?;
+ self.kind().visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(match self {
+ ty::ConstKind::Infer(ic) => ty::ConstKind::Infer(ic.try_fold_with(folder)?),
+ ty::ConstKind::Param(p) => ty::ConstKind::Param(p.try_fold_with(folder)?),
+ ty::ConstKind::Unevaluated(uv) => ty::ConstKind::Unevaluated(uv.try_fold_with(folder)?),
+ ty::ConstKind::Value(_)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Placeholder(..)
+ | ty::ConstKind::Error(_) => self,
+ })
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::ConstKind<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match *self {
+ ty::ConstKind::Infer(ic) => ic.visit_with(visitor),
+ ty::ConstKind::Param(p) => p.visit_with(visitor),
+ ty::ConstKind::Unevaluated(uv) => uv.visit_with(visitor),
+ ty::ConstKind::Value(_)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Placeholder(_)
+ | ty::ConstKind::Error(_) => ControlFlow::CONTINUE,
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for InferConst<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for InferConst<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_unevaluated(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_unevaluated(*self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ty::Unevaluated<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ Ok(ty::Unevaluated {
+ def: self.def,
+ substs: self.substs.try_fold_with(folder)?,
+ promoted: self.promoted,
+ })
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::Unevaluated<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.substs.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx, ()> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self.expand().try_fold_with(folder)?.shrink())
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx, ()> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.expand().visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for hir::Constness {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for hir::Constness {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
new file mode 100644
index 000000000..52c3a3886
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -0,0 +1,2295 @@
+//! This module contains `TyKind` and its major components.
+
+#![allow(rustc::usage_of_ty_tykind)]
+
+use crate::infer::canonical::Canonical;
+use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
+use crate::ty::visit::ValidateBoundVars;
+use crate::ty::InferTy::*;
+use crate::ty::{
+ self, AdtDef, DefIdTree, Discr, Term, Ty, TyCtxt, TypeFlags, TypeSuperVisitable, TypeVisitable,
+ TypeVisitor,
+};
+use crate::ty::{List, ParamEnv};
+use polonius_engine::Atom;
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::intern::Interned;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_macros::HashStable;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_target::abi::VariantIdx;
+use rustc_target::spec::abi;
+use std::borrow::Cow;
+use std::cmp::Ordering;
+use std::fmt;
+use std::marker::PhantomData;
+use std::ops::{ControlFlow, Deref, Range};
+use ty::util::IntTypeExt;
+
+use rustc_type_ir::sty::TyKind::*;
+use rustc_type_ir::RegionKind as IrRegionKind;
+use rustc_type_ir::TyKind as IrTyKind;
+
+// Re-export the `TyKind` from `rustc_type_ir` here for convenience
+#[rustc_diagnostic_item = "TyKind"]
+pub type TyKind<'tcx> = IrTyKind<TyCtxt<'tcx>>;
+pub type RegionKind<'tcx> = IrRegionKind<TyCtxt<'tcx>>;
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct TypeAndMut<'tcx> {
+ pub ty: Ty<'tcx>,
+ pub mutbl: hir::Mutability,
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, TyEncodable, TyDecodable, Copy)]
+#[derive(HashStable)]
+/// A "free" region `fr` can be interpreted as "some region
+/// at least as big as the scope `fr.scope`".
+pub struct FreeRegion {
+ pub scope: DefId,
+ pub bound_region: BoundRegionKind,
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, TyEncodable, TyDecodable, Copy)]
+#[derive(HashStable)]
+pub enum BoundRegionKind {
+ /// An anonymous region parameter for a given fn (&T)
+ BrAnon(u32),
+
+ /// Named region parameters for functions (a in &'a T)
+ ///
+ /// The `DefId` is needed to distinguish free regions in
+ /// the event of shadowing.
+ BrNamed(DefId, Symbol),
+
+ /// Anonymous region for the implicit env pointer parameter
+ /// to a closure
+ BrEnv,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub struct BoundRegion {
+ pub var: BoundVar,
+ pub kind: BoundRegionKind,
+}
+
+impl BoundRegionKind {
+ pub fn is_named(&self) -> bool {
+ match *self {
+ BoundRegionKind::BrNamed(_, name) => name != kw::UnderscoreLifetime,
+ _ => false,
+ }
+ }
+}
+
+pub trait Article {
+ fn article(&self) -> &'static str;
+}
+
+impl<'tcx> Article for TyKind<'tcx> {
+ /// Get the article ("a" or "an") to use with this type.
+ fn article(&self) -> &'static str {
+ match self {
+ Int(_) | Float(_) | Array(_, _) => "an",
+ Adt(def, _) if def.is_enum() => "an",
+ // This should never happen, but ICEing and causing the user's code
+ // to not compile felt too harsh.
+ Error(_) => "a",
+ _ => "a",
+ }
+ }
+}
+
+// `TyKind` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(TyKind<'_>, 32);
+
+/// A closure can be modeled as a struct that looks like:
+/// ```ignore (illustrative)
+/// struct Closure<'l0...'li, T0...Tj, CK, CS, U>(...U);
+/// ```
+/// where:
+///
+/// - 'l0...'li and T0...Tj are the generic parameters
+/// in scope on the function that defined the closure,
+/// - CK represents the *closure kind* (Fn vs FnMut vs FnOnce). This
+/// is rather hackily encoded via a scalar type. See
+/// `Ty::to_opt_closure_kind` for details.
+/// - CS represents the *closure signature*, representing as a `fn()`
+/// type. For example, `fn(u32, u32) -> u32` would mean that the closure
+/// implements `CK<(u32, u32), Output = u32>`, where `CK` is the trait
+/// specified above.
+/// - U is a type parameter representing the types of its upvars, tupled up
+/// (borrowed, if appropriate; that is, if a U field represents a by-ref upvar,
+/// and the up-var has the type `Foo`, then that field of U will be `&Foo`).
+///
+/// So, for example, given this function:
+/// ```ignore (illustrative)
+/// fn foo<'a, T>(data: &'a mut T) {
+/// do(|| data.count += 1)
+/// }
+/// ```
+/// the type of the closure would be something like:
+/// ```ignore (illustrative)
+/// struct Closure<'a, T, U>(...U);
+/// ```
+/// Note that the type of the upvar is not specified in the struct.
+/// You may wonder how the impl would then be able to use the upvar,
+/// if it doesn't know it's type? The answer is that the impl is
+/// (conceptually) not fully generic over Closure but rather tied to
+/// instances with the expected upvar types:
+/// ```ignore (illustrative)
+/// impl<'b, 'a, T> FnMut() for Closure<'a, T, (&'b mut &'a mut T,)> {
+/// ...
+/// }
+/// ```
+/// You can see that the *impl* fully specified the type of the upvar
+/// and thus knows full well that `data` has type `&'b mut &'a mut T`.
+/// (Here, I am assuming that `data` is mut-borrowed.)
+///
+/// Now, the last question you may ask is: Why include the upvar types
+/// in an extra type parameter? The reason for this design is that the
+/// upvar types can reference lifetimes that are internal to the
+/// creating function. In my example above, for example, the lifetime
+/// `'b` represents the scope of the closure itself; this is some
+/// subset of `foo`, probably just the scope of the call to the to
+/// `do()`. If we just had the lifetime/type parameters from the
+/// enclosing function, we couldn't name this lifetime `'b`. Note that
+/// there can also be lifetimes in the types of the upvars themselves,
+/// if one of them happens to be a reference to something that the
+/// creating fn owns.
+///
+/// OK, you say, so why not create a more minimal set of parameters
+/// that just includes the extra lifetime parameters? The answer is
+/// primarily that it would be hard --- we don't know at the time when
+/// we create the closure type what the full types of the upvars are,
+/// nor do we know which are borrowed and which are not. In this
+/// design, we can just supply a fresh type parameter and figure that
+/// out later.
+///
+/// All right, you say, but why include the type parameters from the
+/// original function then? The answer is that codegen may need them
+/// when monomorphizing, and they may not appear in the upvars. A
+/// closure could capture no variables but still make use of some
+/// in-scope type parameter with a bound (e.g., if our example above
+/// had an extra `U: Default`, and the closure called `U::default()`).
+///
+/// There is another reason. This design (implicitly) prohibits
+/// closures from capturing themselves (except via a trait
+/// object). This simplifies closure inference considerably, since it
+/// means that when we infer the kind of a closure or its upvars, we
+/// don't have to handle cycles where the decisions we make for
+/// closure C wind up influencing the decisions we ought to make for
+/// closure C (which would then require fixed point iteration to
+/// handle). Plus it fixes an ICE. :P
+///
+/// ## Generators
+///
+/// Generators are handled similarly in `GeneratorSubsts`. The set of
+/// type parameters is similar, but `CK` and `CS` are replaced by the
+/// following type parameters:
+///
+/// * `GS`: The generator's "resume type", which is the type of the
+/// argument passed to `resume`, and the type of `yield` expressions
+/// inside the generator.
+/// * `GY`: The "yield type", which is the type of values passed to
+/// `yield` inside the generator.
+/// * `GR`: The "return type", which is the type of value returned upon
+/// completion of the generator.
+/// * `GW`: The "generator witness".
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct ClosureSubsts<'tcx> {
+ /// Lifetime and type parameters from the enclosing function,
+ /// concatenated with a tuple containing the types of the upvars.
+ ///
+ /// These are separated out because codegen wants to pass them around
+ /// when monomorphizing.
+ pub substs: SubstsRef<'tcx>,
+}
+
+/// Struct returned by `split()`.
+pub struct ClosureSubstsParts<'tcx, T> {
+ pub parent_substs: &'tcx [GenericArg<'tcx>],
+ pub closure_kind_ty: T,
+ pub closure_sig_as_fn_ptr_ty: T,
+ pub tupled_upvars_ty: T,
+}
+
+impl<'tcx> ClosureSubsts<'tcx> {
+ /// Construct `ClosureSubsts` from `ClosureSubstsParts`, containing `Substs`
+ /// for the closure parent, alongside additional closure-specific components.
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ parts: ClosureSubstsParts<'tcx, Ty<'tcx>>,
+ ) -> ClosureSubsts<'tcx> {
+ ClosureSubsts {
+ substs: tcx.mk_substs(
+ parts.parent_substs.iter().copied().chain(
+ [parts.closure_kind_ty, parts.closure_sig_as_fn_ptr_ty, parts.tupled_upvars_ty]
+ .iter()
+ .map(|&ty| ty.into()),
+ ),
+ ),
+ }
+ }
+
+ /// Divides the closure substs into their respective components.
+ /// The ordering assumed here must match that used by `ClosureSubsts::new` above.
+ fn split(self) -> ClosureSubstsParts<'tcx, GenericArg<'tcx>> {
+ match self.substs[..] {
+ [
+ ref parent_substs @ ..,
+ closure_kind_ty,
+ closure_sig_as_fn_ptr_ty,
+ tupled_upvars_ty,
+ ] => ClosureSubstsParts {
+ parent_substs,
+ closure_kind_ty,
+ closure_sig_as_fn_ptr_ty,
+ tupled_upvars_ty,
+ },
+ _ => bug!("closure substs missing synthetics"),
+ }
+ }
+
+ /// Returns `true` only if enough of the synthetic types are known to
+ /// allow using all of the methods on `ClosureSubsts` without panicking.
+ ///
+ /// Used primarily by `ty::print::pretty` to be able to handle closure
+ /// types that haven't had their synthetic types substituted in.
+ pub fn is_valid(self) -> bool {
+ self.substs.len() >= 3
+ && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_))
+ }
+
+ /// Returns the substitutions of the closure's parent.
+ pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_substs
+ }
+
+ /// Returns an iterator over the list of types of captured paths by the closure.
+ /// In case there was a type error in figuring out the types of the captured path, an
+ /// empty iterator is returned.
+ #[inline]
+ pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ match self.tupled_upvars_ty().kind() {
+ TyKind::Error(_) => None,
+ TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+ ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+ }
+ .into_iter()
+ .flatten()
+ }
+
+ /// Returns the tuple type representing the upvars for this closure.
+ #[inline]
+ pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+ self.split().tupled_upvars_ty.expect_ty()
+ }
+
+ /// Returns the closure kind for this closure; may return a type
+ /// variable during inference. To get the closure kind during
+ /// inference, use `infcx.closure_kind(substs)`.
+ pub fn kind_ty(self) -> Ty<'tcx> {
+ self.split().closure_kind_ty.expect_ty()
+ }
+
+ /// Returns the `fn` pointer type representing the closure signature for this
+ /// closure.
+ // FIXME(eddyb) this should be unnecessary, as the shallowly resolved
+ // type is known at the time of the creation of `ClosureSubsts`,
+ // see `rustc_typeck::check::closure`.
+ pub fn sig_as_fn_ptr_ty(self) -> Ty<'tcx> {
+ self.split().closure_sig_as_fn_ptr_ty.expect_ty()
+ }
+
+ /// Returns the closure kind for this closure; only usable outside
+ /// of an inference context, because in that context we know that
+ /// there are no type variables.
+ ///
+ /// If you have an inference context, use `infcx.closure_kind()`.
+ pub fn kind(self) -> ty::ClosureKind {
+ self.kind_ty().to_opt_closure_kind().unwrap()
+ }
+
+ /// Extracts the signature from the closure.
+ pub fn sig(self) -> ty::PolyFnSig<'tcx> {
+ let ty = self.sig_as_fn_ptr_ty();
+ match ty.kind() {
+ ty::FnPtr(sig) => *sig,
+ _ => bug!("closure_sig_as_fn_ptr_ty is not a fn-ptr: {:?}", ty.kind()),
+ }
+ }
+}
+
+/// Similar to `ClosureSubsts`; see the above documentation for more.
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct GeneratorSubsts<'tcx> {
+ pub substs: SubstsRef<'tcx>,
+}
+
+pub struct GeneratorSubstsParts<'tcx, T> {
+ pub parent_substs: &'tcx [GenericArg<'tcx>],
+ pub resume_ty: T,
+ pub yield_ty: T,
+ pub return_ty: T,
+ pub witness: T,
+ pub tupled_upvars_ty: T,
+}
+
+impl<'tcx> GeneratorSubsts<'tcx> {
+ /// Construct `GeneratorSubsts` from `GeneratorSubstsParts`, containing `Substs`
+ /// for the generator parent, alongside additional generator-specific components.
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ parts: GeneratorSubstsParts<'tcx, Ty<'tcx>>,
+ ) -> GeneratorSubsts<'tcx> {
+ GeneratorSubsts {
+ substs: tcx.mk_substs(
+ parts.parent_substs.iter().copied().chain(
+ [
+ parts.resume_ty,
+ parts.yield_ty,
+ parts.return_ty,
+ parts.witness,
+ parts.tupled_upvars_ty,
+ ]
+ .iter()
+ .map(|&ty| ty.into()),
+ ),
+ ),
+ }
+ }
+
+ /// Divides the generator substs into their respective components.
+ /// The ordering assumed here must match that used by `GeneratorSubsts::new` above.
+ fn split(self) -> GeneratorSubstsParts<'tcx, GenericArg<'tcx>> {
+ match self.substs[..] {
+ [ref parent_substs @ .., resume_ty, yield_ty, return_ty, witness, tupled_upvars_ty] => {
+ GeneratorSubstsParts {
+ parent_substs,
+ resume_ty,
+ yield_ty,
+ return_ty,
+ witness,
+ tupled_upvars_ty,
+ }
+ }
+ _ => bug!("generator substs missing synthetics"),
+ }
+ }
+
+ /// Returns `true` only if enough of the synthetic types are known to
+ /// allow using all of the methods on `GeneratorSubsts` without panicking.
+ ///
+ /// Used primarily by `ty::print::pretty` to be able to handle generator
+ /// types that haven't had their synthetic types substituted in.
+ pub fn is_valid(self) -> bool {
+ self.substs.len() >= 5
+ && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_))
+ }
+
+ /// Returns the substitutions of the generator's parent.
+ pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_substs
+ }
+
+ /// This describes the types that can be contained in a generator.
+ /// It will be a type variable initially and unified in the last stages of typeck of a body.
+ /// It contains a tuple of all the types that could end up on a generator frame.
+ /// The state transformation MIR pass may only produce layouts which mention types
+ /// in this tuple. Upvars are not counted here.
+ pub fn witness(self) -> Ty<'tcx> {
+ self.split().witness.expect_ty()
+ }
+
+ /// Returns an iterator over the list of types of captured paths by the generator.
+ /// In case there was a type error in figuring out the types of the captured path, an
+ /// empty iterator is returned.
+ #[inline]
+ pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ match self.tupled_upvars_ty().kind() {
+ TyKind::Error(_) => None,
+ TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+ ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+ }
+ .into_iter()
+ .flatten()
+ }
+
+ /// Returns the tuple type representing the upvars for this generator.
+ #[inline]
+ pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+ self.split().tupled_upvars_ty.expect_ty()
+ }
+
+ /// Returns the type representing the resume type of the generator.
+ pub fn resume_ty(self) -> Ty<'tcx> {
+ self.split().resume_ty.expect_ty()
+ }
+
+ /// Returns the type representing the yield type of the generator.
+ pub fn yield_ty(self) -> Ty<'tcx> {
+ self.split().yield_ty.expect_ty()
+ }
+
+ /// Returns the type representing the return type of the generator.
+ pub fn return_ty(self) -> Ty<'tcx> {
+ self.split().return_ty.expect_ty()
+ }
+
+ /// Returns the "generator signature", which consists of its yield
+ /// and return types.
+ ///
+ /// N.B., some bits of the code prefers to see this wrapped in a
+ /// binder, but it never contains bound regions. Probably this
+ /// function should be removed.
+ pub fn poly_sig(self) -> PolyGenSig<'tcx> {
+ ty::Binder::dummy(self.sig())
+ }
+
+ /// Returns the "generator signature", which consists of its resume, yield
+ /// and return types.
+ pub fn sig(self) -> GenSig<'tcx> {
+ ty::GenSig {
+ resume_ty: self.resume_ty(),
+ yield_ty: self.yield_ty(),
+ return_ty: self.return_ty(),
+ }
+ }
+}
+
+impl<'tcx> GeneratorSubsts<'tcx> {
+ /// Generator has not been resumed yet.
+ pub const UNRESUMED: usize = 0;
+ /// Generator has returned or is completed.
+ pub const RETURNED: usize = 1;
+ /// Generator has been poisoned.
+ pub const POISONED: usize = 2;
+
+ const UNRESUMED_NAME: &'static str = "Unresumed";
+ const RETURNED_NAME: &'static str = "Returned";
+ const POISONED_NAME: &'static str = "Panicked";
+
+ /// The valid variant indices of this generator.
+ #[inline]
+ pub fn variant_range(&self, def_id: DefId, tcx: TyCtxt<'tcx>) -> Range<VariantIdx> {
+ // FIXME requires optimized MIR
+ let num_variants = tcx.generator_layout(def_id).unwrap().variant_fields.len();
+ VariantIdx::new(0)..VariantIdx::new(num_variants)
+ }
+
+ /// The discriminant for the given variant. Panics if the `variant_index` is
+ /// out of range.
+ #[inline]
+ pub fn discriminant_for_variant(
+ &self,
+ def_id: DefId,
+ tcx: TyCtxt<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Discr<'tcx> {
+ // Generators don't support explicit discriminant values, so they are
+ // the same as the variant index.
+ assert!(self.variant_range(def_id, tcx).contains(&variant_index));
+ Discr { val: variant_index.as_usize() as u128, ty: self.discr_ty(tcx) }
+ }
+
+ /// The set of all discriminants for the generator, enumerated with their
+ /// variant indices.
+ #[inline]
+ pub fn discriminants(
+ self,
+ def_id: DefId,
+ tcx: TyCtxt<'tcx>,
+ ) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
+ self.variant_range(def_id, tcx).map(move |index| {
+ (index, Discr { val: index.as_usize() as u128, ty: self.discr_ty(tcx) })
+ })
+ }
+
+ /// Calls `f` with a reference to the name of the enumerator for the given
+ /// variant `v`.
+ pub fn variant_name(v: VariantIdx) -> Cow<'static, str> {
+ match v.as_usize() {
+ Self::UNRESUMED => Cow::from(Self::UNRESUMED_NAME),
+ Self::RETURNED => Cow::from(Self::RETURNED_NAME),
+ Self::POISONED => Cow::from(Self::POISONED_NAME),
+ _ => Cow::from(format!("Suspend{}", v.as_usize() - 3)),
+ }
+ }
+
+ /// The type of the state discriminant used in the generator type.
+ #[inline]
+ pub fn discr_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ tcx.types.u32
+ }
+
+ /// This returns the types of the MIR locals which had to be stored across suspension points.
+ /// It is calculated in rustc_mir_transform::generator::StateTransform.
+ /// All the types here must be in the tuple in GeneratorInterior.
+ ///
+ /// The locals are grouped by their variant number. Note that some locals may
+ /// be repeated in multiple variants.
+ #[inline]
+ pub fn state_tys(
+ self,
+ def_id: DefId,
+ tcx: TyCtxt<'tcx>,
+ ) -> impl Iterator<Item = impl Iterator<Item = Ty<'tcx>> + Captures<'tcx>> {
+ let layout = tcx.generator_layout(def_id).unwrap();
+ layout.variant_fields.iter().map(move |variant| {
+ variant
+ .iter()
+ .map(move |field| EarlyBinder(layout.field_tys[*field]).subst(tcx, self.substs))
+ })
+ }
+
+ /// This is the types of the fields of a generator which are not stored in a
+ /// variant.
+ #[inline]
+ pub fn prefix_tys(self) -> impl Iterator<Item = Ty<'tcx>> {
+ self.upvar_tys()
+ }
+}
+
+#[derive(Debug, Copy, Clone, HashStable)]
+pub enum UpvarSubsts<'tcx> {
+ Closure(SubstsRef<'tcx>),
+ Generator(SubstsRef<'tcx>),
+}
+
+impl<'tcx> UpvarSubsts<'tcx> {
+ /// Returns an iterator over the list of types of captured paths by the closure/generator.
+ /// In case there was a type error in figuring out the types of the captured path, an
+ /// empty iterator is returned.
+ #[inline]
+ pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ let tupled_tys = match self {
+ UpvarSubsts::Closure(substs) => substs.as_closure().tupled_upvars_ty(),
+ UpvarSubsts::Generator(substs) => substs.as_generator().tupled_upvars_ty(),
+ };
+
+ match tupled_tys.kind() {
+ TyKind::Error(_) => None,
+ TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+ ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+ }
+ .into_iter()
+ .flatten()
+ }
+
+ #[inline]
+ pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+ match self {
+ UpvarSubsts::Closure(substs) => substs.as_closure().tupled_upvars_ty(),
+ UpvarSubsts::Generator(substs) => substs.as_generator().tupled_upvars_ty(),
+ }
+ }
+}
+
+/// An inline const is modeled like
+/// ```ignore (illustrative)
+/// const InlineConst<'l0...'li, T0...Tj, R>: R;
+/// ```
+/// where:
+///
+/// - 'l0...'li and T0...Tj are the generic parameters
+/// inherited from the item that defined the inline const,
+/// - R represents the type of the constant.
+///
+/// When the inline const is instantiated, `R` is substituted as the actual inferred
+/// type of the constant. The reason that `R` is represented as an extra type parameter
+/// is the same reason that [`ClosureSubsts`] have `CS` and `U` as type parameters:
+/// inline const can reference lifetimes that are internal to the creating function.
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct InlineConstSubsts<'tcx> {
+ /// Generic parameters from the enclosing item,
+ /// concatenated with the inferred type of the constant.
+ pub substs: SubstsRef<'tcx>,
+}
+
+/// Struct returned by `split()`.
+pub struct InlineConstSubstsParts<'tcx, T> {
+ pub parent_substs: &'tcx [GenericArg<'tcx>],
+ pub ty: T,
+}
+
+impl<'tcx> InlineConstSubsts<'tcx> {
+ /// Construct `InlineConstSubsts` from `InlineConstSubstsParts`.
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ parts: InlineConstSubstsParts<'tcx, Ty<'tcx>>,
+ ) -> InlineConstSubsts<'tcx> {
+ InlineConstSubsts {
+ substs: tcx.mk_substs(
+ parts.parent_substs.iter().copied().chain(std::iter::once(parts.ty.into())),
+ ),
+ }
+ }
+
+ /// Divides the inline const substs into their respective components.
+ /// The ordering assumed here must match that used by `InlineConstSubsts::new` above.
+ fn split(self) -> InlineConstSubstsParts<'tcx, GenericArg<'tcx>> {
+ match self.substs[..] {
+ [ref parent_substs @ .., ty] => InlineConstSubstsParts { parent_substs, ty },
+ _ => bug!("inline const substs missing synthetics"),
+ }
+ }
+
+ /// Returns the substitutions of the inline const's parent.
+ pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_substs
+ }
+
+ /// Returns the type of this inline const.
+ pub fn ty(self) -> Ty<'tcx> {
+ self.split().ty.expect_ty()
+ }
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub enum ExistentialPredicate<'tcx> {
+ /// E.g., `Iterator`.
+ Trait(ExistentialTraitRef<'tcx>),
+ /// E.g., `Iterator::Item = T`.
+ Projection(ExistentialProjection<'tcx>),
+ /// E.g., `Send`.
+ AutoTrait(DefId),
+}
+
+impl<'tcx> ExistentialPredicate<'tcx> {
+ /// Compares via an ordering that will not change if modules are reordered or other changes are
+ /// made to the tree. In particular, this ordering is preserved across incremental compilations.
+ pub fn stable_cmp(&self, tcx: TyCtxt<'tcx>, other: &Self) -> Ordering {
+ use self::ExistentialPredicate::*;
+ match (*self, *other) {
+ (Trait(_), Trait(_)) => Ordering::Equal,
+ (Projection(ref a), Projection(ref b)) => {
+ tcx.def_path_hash(a.item_def_id).cmp(&tcx.def_path_hash(b.item_def_id))
+ }
+ (AutoTrait(ref a), AutoTrait(ref b)) => {
+ tcx.def_path_hash(*a).cmp(&tcx.def_path_hash(*b))
+ }
+ (Trait(_), _) => Ordering::Less,
+ (Projection(_), Trait(_)) => Ordering::Greater,
+ (Projection(_), _) => Ordering::Less,
+ (AutoTrait(_), _) => Ordering::Greater,
+ }
+ }
+}
+
+impl<'tcx> Binder<'tcx, ExistentialPredicate<'tcx>> {
+ pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::Predicate<'tcx> {
+ use crate::ty::ToPredicate;
+ match self.skip_binder() {
+ ExistentialPredicate::Trait(tr) => {
+ self.rebind(tr).with_self_ty(tcx, self_ty).without_const().to_predicate(tcx)
+ }
+ ExistentialPredicate::Projection(p) => {
+ self.rebind(p.with_self_ty(tcx, self_ty)).to_predicate(tcx)
+ }
+ ExistentialPredicate::AutoTrait(did) => {
+ let trait_ref = self.rebind(ty::TraitRef {
+ def_id: did,
+ substs: tcx.mk_substs_trait(self_ty, &[]),
+ });
+ trait_ref.without_const().to_predicate(tcx)
+ }
+ }
+ }
+}
+
+impl<'tcx> List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>> {
+ /// Returns the "principal `DefId`" of this set of existential predicates.
+ ///
+ /// A Rust trait object type consists (in addition to a lifetime bound)
+ /// of a set of trait bounds, which are separated into any number
+ /// of auto-trait bounds, and at most one non-auto-trait bound. The
+ /// non-auto-trait bound is called the "principal" of the trait
+ /// object.
+ ///
+ /// Only the principal can have methods or type parameters (because
+ /// auto traits can have neither of them). This is important, because
+ /// it means the auto traits can be treated as an unordered set (methods
+ /// would force an order for the vtable, while relating traits with
+ /// type parameters without knowing the order to relate them in is
+ /// a rather non-trivial task).
+ ///
+ /// For example, in the trait object `dyn fmt::Debug + Sync`, the
+ /// principal bound is `Some(fmt::Debug)`, while the auto-trait bounds
+ /// are the set `{Sync}`.
+ ///
+ /// It is also possible to have a "trivial" trait object that
+ /// consists only of auto traits, with no principal - for example,
+ /// `dyn Send + Sync`. In that case, the set of auto-trait bounds
+ /// is `{Send, Sync}`, while there is no principal. These trait objects
+ /// have a "trivial" vtable consisting of just the size, alignment,
+ /// and destructor.
+ pub fn principal(&self) -> Option<ty::Binder<'tcx, ExistentialTraitRef<'tcx>>> {
+ self[0]
+ .map_bound(|this| match this {
+ ExistentialPredicate::Trait(tr) => Some(tr),
+ _ => None,
+ })
+ .transpose()
+ }
+
+ pub fn principal_def_id(&self) -> Option<DefId> {
+ self.principal().map(|trait_ref| trait_ref.skip_binder().def_id)
+ }
+
+ #[inline]
+ pub fn projection_bounds<'a>(
+ &'a self,
+ ) -> impl Iterator<Item = ty::Binder<'tcx, ExistentialProjection<'tcx>>> + 'a {
+ self.iter().filter_map(|predicate| {
+ predicate
+ .map_bound(|pred| match pred {
+ ExistentialPredicate::Projection(projection) => Some(projection),
+ _ => None,
+ })
+ .transpose()
+ })
+ }
+
+ #[inline]
+ pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item = DefId> + Captures<'tcx> + 'a {
+ self.iter().filter_map(|predicate| match predicate.skip_binder() {
+ ExistentialPredicate::AutoTrait(did) => Some(did),
+ _ => None,
+ })
+ }
+}
+
+/// A complete reference to a trait. These take numerous guises in syntax,
+/// but perhaps the most recognizable form is in a where-clause:
+/// ```ignore (illustrative)
+/// T: Foo<U>
+/// ```
+/// This would be represented by a trait-reference where the `DefId` is the
+/// `DefId` for the trait `Foo` and the substs define `T` as parameter 0,
+/// and `U` as parameter 1.
+///
+/// Trait references also appear in object types like `Foo<U>`, but in
+/// that case the `Self` parameter is absent from the substitutions.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct TraitRef<'tcx> {
+ pub def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+}
+
+impl<'tcx> TraitRef<'tcx> {
+ pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> TraitRef<'tcx> {
+ TraitRef { def_id, substs }
+ }
+
+ /// Returns a `TraitRef` of the form `P0: Foo<P1..Pn>` where `Pi`
+ /// are the parameters defined on trait.
+ pub fn identity(tcx: TyCtxt<'tcx>, def_id: DefId) -> Binder<'tcx, TraitRef<'tcx>> {
+ ty::Binder::dummy(TraitRef {
+ def_id,
+ substs: InternalSubsts::identity_for_item(tcx, def_id),
+ })
+ }
+
+ #[inline]
+ pub fn self_ty(&self) -> Ty<'tcx> {
+ self.substs.type_at(0)
+ }
+
+ pub fn from_method(
+ tcx: TyCtxt<'tcx>,
+ trait_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> ty::TraitRef<'tcx> {
+ let defs = tcx.generics_of(trait_id);
+ ty::TraitRef { def_id: trait_id, substs: tcx.intern_substs(&substs[..defs.params.len()]) }
+ }
+}
+
+pub type PolyTraitRef<'tcx> = Binder<'tcx, TraitRef<'tcx>>;
+
+impl<'tcx> PolyTraitRef<'tcx> {
+ pub fn self_ty(&self) -> Binder<'tcx, Ty<'tcx>> {
+ self.map_bound_ref(|tr| tr.self_ty())
+ }
+
+ pub fn def_id(&self) -> DefId {
+ self.skip_binder().def_id
+ }
+
+ pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> {
+ self.map_bound(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Positive,
+ })
+ }
+
+ /// Same as [`PolyTraitRef::to_poly_trait_predicate`] but sets a negative polarity instead.
+ pub fn to_poly_trait_predicate_negative_polarity(&self) -> ty::PolyTraitPredicate<'tcx> {
+ self.map_bound(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Negative,
+ })
+ }
+}
+
+/// An existential reference to a trait, where `Self` is erased.
+/// For example, the trait object `Trait<'a, 'b, X, Y>` is:
+/// ```ignore (illustrative)
+/// exists T. T: Trait<'a, 'b, X, Y>
+/// ```
+/// The substitutions don't include the erased `Self`, only trait
+/// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above).
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct ExistentialTraitRef<'tcx> {
+ pub def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+}
+
+impl<'tcx> ExistentialTraitRef<'tcx> {
+ pub fn erase_self_ty(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ ) -> ty::ExistentialTraitRef<'tcx> {
+ // Assert there is a Self.
+ trait_ref.substs.type_at(0);
+
+ ty::ExistentialTraitRef {
+ def_id: trait_ref.def_id,
+ substs: tcx.intern_substs(&trait_ref.substs[1..]),
+ }
+ }
+
+ /// Object types don't have a self type specified. Therefore, when
+ /// we convert the principal trait-ref into a normal trait-ref,
+ /// you must give *some* self type. A common choice is `mk_err()`
+ /// or some placeholder type.
+ pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> {
+ // otherwise the escaping vars would be captured by the binder
+ // debug_assert!(!self_ty.has_escaping_bound_vars());
+
+ ty::TraitRef { def_id: self.def_id, substs: tcx.mk_substs_trait(self_ty, self.substs) }
+ }
+}
+
+pub type PolyExistentialTraitRef<'tcx> = Binder<'tcx, ExistentialTraitRef<'tcx>>;
+
+impl<'tcx> PolyExistentialTraitRef<'tcx> {
+ pub fn def_id(&self) -> DefId {
+ self.skip_binder().def_id
+ }
+
+ /// Object types don't have a self type specified. Therefore, when
+ /// we convert the principal trait-ref into a normal trait-ref,
+ /// you must give *some* self type. A common choice is `mk_err()`
+ /// or some placeholder type.
+ pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::PolyTraitRef<'tcx> {
+ self.map_bound(|trait_ref| trait_ref.with_self_ty(tcx, self_ty))
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable)]
+pub struct EarlyBinder<T>(pub T);
+
+impl<T> EarlyBinder<T> {
+ pub fn as_ref(&self) -> EarlyBinder<&T> {
+ EarlyBinder(&self.0)
+ }
+
+ pub fn map_bound_ref<F, U>(&self, f: F) -> EarlyBinder<U>
+ where
+ F: FnOnce(&T) -> U,
+ {
+ self.as_ref().map_bound(f)
+ }
+
+ pub fn map_bound<F, U>(self, f: F) -> EarlyBinder<U>
+ where
+ F: FnOnce(T) -> U,
+ {
+ let value = f(self.0);
+ EarlyBinder(value)
+ }
+
+ pub fn try_map_bound<F, U, E>(self, f: F) -> Result<EarlyBinder<U>, E>
+ where
+ F: FnOnce(T) -> Result<U, E>,
+ {
+ let value = f(self.0)?;
+ Ok(EarlyBinder(value))
+ }
+
+ pub fn rebind<U>(&self, value: U) -> EarlyBinder<U> {
+ EarlyBinder(value)
+ }
+}
+
+impl<T> EarlyBinder<Option<T>> {
+ pub fn transpose(self) -> Option<EarlyBinder<T>> {
+ self.0.map(|v| EarlyBinder(v))
+ }
+}
+
+impl<T, U> EarlyBinder<(T, U)> {
+ pub fn transpose_tuple2(self) -> (EarlyBinder<T>, EarlyBinder<U>) {
+ (EarlyBinder(self.0.0), EarlyBinder(self.0.1))
+ }
+}
+
+pub struct EarlyBinderIter<T> {
+ t: T,
+}
+
+impl<T: IntoIterator> EarlyBinder<T> {
+ pub fn transpose_iter(self) -> EarlyBinderIter<T::IntoIter> {
+ EarlyBinderIter { t: self.0.into_iter() }
+ }
+}
+
+impl<T: Iterator> Iterator for EarlyBinderIter<T> {
+ type Item = EarlyBinder<T::Item>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.t.next().map(|i| EarlyBinder(i))
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum BoundVariableKind {
+ Ty(BoundTyKind),
+ Region(BoundRegionKind),
+ Const,
+}
+
+impl BoundVariableKind {
+ pub fn expect_region(self) -> BoundRegionKind {
+ match self {
+ BoundVariableKind::Region(lt) => lt,
+ _ => bug!("expected a region, but found another kind"),
+ }
+ }
+
+ pub fn expect_ty(self) -> BoundTyKind {
+ match self {
+ BoundVariableKind::Ty(ty) => ty,
+ _ => bug!("expected a type, but found another kind"),
+ }
+ }
+
+ pub fn expect_const(self) {
+ match self {
+ BoundVariableKind::Const => (),
+ _ => bug!("expected a const, but found another kind"),
+ }
+ }
+}
+
+/// Binder is a binder for higher-ranked lifetimes or types. It is part of the
+/// compiler's representation for things like `for<'a> Fn(&'a isize)`
+/// (which would be represented by the type `PolyTraitRef ==
+/// Binder<'tcx, TraitRef>`). Note that when we instantiate,
+/// erase, or otherwise "discharge" these bound vars, we change the
+/// type from `Binder<'tcx, T>` to just `T` (see
+/// e.g., `liberate_late_bound_regions`).
+///
+/// `Decodable` and `Encodable` are implemented for `Binder<T>` using the `impl_binder_encode_decode!` macro.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(HashStable)]
+pub struct Binder<'tcx, T>(T, &'tcx List<BoundVariableKind>);
+
+impl<'tcx, T> Binder<'tcx, T>
+where
+ T: TypeVisitable<'tcx>,
+{
+ /// Wraps `value` in a binder, asserting that `value` does not
+ /// contain any bound vars that would be bound by the
+ /// binder. This is commonly used to 'inject' a value T into a
+ /// different binding level.
+ pub fn dummy(value: T) -> Binder<'tcx, T> {
+ assert!(!value.has_escaping_bound_vars());
+ Binder(value, ty::List::empty())
+ }
+
+ pub fn bind_with_vars(value: T, vars: &'tcx List<BoundVariableKind>) -> Binder<'tcx, T> {
+ if cfg!(debug_assertions) {
+ let mut validator = ValidateBoundVars::new(vars);
+ value.visit_with(&mut validator);
+ }
+ Binder(value, vars)
+ }
+}
+
+impl<'tcx, T> Binder<'tcx, T> {
+ /// Skips the binder and returns the "bound" value. This is a
+ /// risky thing to do because it's easy to get confused about
+ /// De Bruijn indices and the like. It is usually better to
+ /// discharge the binder using `no_bound_vars` or
+ /// `replace_late_bound_regions` or something like
+ /// that. `skip_binder` is only valid when you are either
+ /// extracting data that has nothing to do with bound vars, you
+ /// are doing some sort of test that does not involve bound
+ /// regions, or you are being very careful about your depth
+ /// accounting.
+ ///
+ /// Some examples where `skip_binder` is reasonable:
+ ///
+ /// - extracting the `DefId` from a PolyTraitRef;
+ /// - comparing the self type of a PolyTraitRef to see if it is equal to
+ /// a type parameter `X`, since the type `X` does not reference any regions
+ pub fn skip_binder(self) -> T {
+ self.0
+ }
+
+ pub fn bound_vars(&self) -> &'tcx List<BoundVariableKind> {
+ self.1
+ }
+
+ pub fn as_ref(&self) -> Binder<'tcx, &T> {
+ Binder(&self.0, self.1)
+ }
+
+ pub fn as_deref(&self) -> Binder<'tcx, &T::Target>
+ where
+ T: Deref,
+ {
+ Binder(&self.0, self.1)
+ }
+
+ pub fn map_bound_ref_unchecked<F, U>(&self, f: F) -> Binder<'tcx, U>
+ where
+ F: FnOnce(&T) -> U,
+ {
+ let value = f(&self.0);
+ Binder(value, self.1)
+ }
+
+ pub fn map_bound_ref<F, U: TypeVisitable<'tcx>>(&self, f: F) -> Binder<'tcx, U>
+ where
+ F: FnOnce(&T) -> U,
+ {
+ self.as_ref().map_bound(f)
+ }
+
+ pub fn map_bound<F, U: TypeVisitable<'tcx>>(self, f: F) -> Binder<'tcx, U>
+ where
+ F: FnOnce(T) -> U,
+ {
+ let value = f(self.0);
+ if cfg!(debug_assertions) {
+ let mut validator = ValidateBoundVars::new(self.1);
+ value.visit_with(&mut validator);
+ }
+ Binder(value, self.1)
+ }
+
+ pub fn try_map_bound<F, U: TypeVisitable<'tcx>, E>(self, f: F) -> Result<Binder<'tcx, U>, E>
+ where
+ F: FnOnce(T) -> Result<U, E>,
+ {
+ let value = f(self.0)?;
+ if cfg!(debug_assertions) {
+ let mut validator = ValidateBoundVars::new(self.1);
+ value.visit_with(&mut validator);
+ }
+ Ok(Binder(value, self.1))
+ }
+
+ /// Wraps a `value` in a binder, using the same bound variables as the
+ /// current `Binder`. This should not be used if the new value *changes*
+ /// the bound variables. Note: the (old or new) value itself does not
+ /// necessarily need to *name* all the bound variables.
+ ///
+ /// This currently doesn't do anything different than `bind`, because we
+ /// don't actually track bound vars. However, semantically, it is different
+ /// because bound vars aren't allowed to change here, whereas they are
+ /// in `bind`. This may be (debug) asserted in the future.
+ pub fn rebind<U>(&self, value: U) -> Binder<'tcx, U>
+ where
+ U: TypeVisitable<'tcx>,
+ {
+ if cfg!(debug_assertions) {
+ let mut validator = ValidateBoundVars::new(self.bound_vars());
+ value.visit_with(&mut validator);
+ }
+ Binder(value, self.1)
+ }
+
+ /// Unwraps and returns the value within, but only if it contains
+ /// no bound vars at all. (In other words, if this binder --
+ /// and indeed any enclosing binder -- doesn't bind anything at
+ /// all.) Otherwise, returns `None`.
+ ///
+ /// (One could imagine having a method that just unwraps a single
+ /// binder, but permits late-bound vars bound by enclosing
+ /// binders, but that would require adjusting the debruijn
+ /// indices, and given the shallow binding structure we often use,
+ /// would not be that useful.)
+ pub fn no_bound_vars(self) -> Option<T>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ if self.0.has_escaping_bound_vars() { None } else { Some(self.skip_binder()) }
+ }
+
+ /// Splits the contents into two things that share the same binder
+ /// level as the original, returning two distinct binders.
+ ///
+ /// `f` should consider bound regions at depth 1 to be free, and
+ /// anything it produces with bound regions at depth 1 will be
+ /// bound in the resulting return values.
+ pub fn split<U, V, F>(self, f: F) -> (Binder<'tcx, U>, Binder<'tcx, V>)
+ where
+ F: FnOnce(T) -> (U, V),
+ {
+ let (u, v) = f(self.0);
+ (Binder(u, self.1), Binder(v, self.1))
+ }
+}
+
+impl<'tcx, T> Binder<'tcx, Option<T>> {
+ pub fn transpose(self) -> Option<Binder<'tcx, T>> {
+ let bound_vars = self.1;
+ self.0.map(|v| Binder(v, bound_vars))
+ }
+}
+
+/// Represents the projection of an associated type. In explicit UFCS
+/// form this would be written `<T as Trait<..>>::N`.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct ProjectionTy<'tcx> {
+ /// The parameters of the associated item.
+ pub substs: SubstsRef<'tcx>,
+
+ /// The `DefId` of the `TraitItem` for the associated type `N`.
+ ///
+ /// Note that this is not the `DefId` of the `TraitRef` containing this
+ /// associated type, which is in `tcx.associated_item(item_def_id).container`,
+ /// aka. `tcx.parent(item_def_id).unwrap()`.
+ pub item_def_id: DefId,
+}
+
+impl<'tcx> ProjectionTy<'tcx> {
+ pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId {
+ tcx.parent(self.item_def_id)
+ }
+
+ /// Extracts the underlying trait reference and own substs from this projection.
+ /// For example, if this is a projection of `<T as StreamingIterator>::Item<'a>`,
+ /// then this function would return a `T: Iterator` trait reference and `['a]` as the own substs
+ pub fn trait_ref_and_own_substs(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ ) -> (ty::TraitRef<'tcx>, &'tcx [ty::GenericArg<'tcx>]) {
+ let def_id = tcx.parent(self.item_def_id);
+ let trait_generics = tcx.generics_of(def_id);
+ (
+ ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, trait_generics) },
+ &self.substs[trait_generics.count()..],
+ )
+ }
+
+ /// Extracts the underlying trait reference from this projection.
+ /// For example, if this is a projection of `<T as Iterator>::Item`,
+ /// then this function would return a `T: Iterator` trait reference.
+ ///
+ /// WARNING: This will drop the substs for generic associated types
+ /// consider calling [Self::trait_ref_and_own_substs] to get those
+ /// as well.
+ pub fn trait_ref(&self, tcx: TyCtxt<'tcx>) -> ty::TraitRef<'tcx> {
+ let def_id = self.trait_def_id(tcx);
+ ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, tcx.generics_of(def_id)) }
+ }
+
+ pub fn self_ty(&self) -> Ty<'tcx> {
+ self.substs.type_at(0)
+ }
+}
+
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct GenSig<'tcx> {
+ pub resume_ty: Ty<'tcx>,
+ pub yield_ty: Ty<'tcx>,
+ pub return_ty: Ty<'tcx>,
+}
+
+pub type PolyGenSig<'tcx> = Binder<'tcx, GenSig<'tcx>>;
+
+/// Signature of a function type, which we have arbitrarily
+/// decided to use to refer to the input/output types.
+///
+/// - `inputs`: is the list of arguments and their modes.
+/// - `output`: is the return type.
+/// - `c_variadic`: indicates whether this is a C-variadic function.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct FnSig<'tcx> {
+ pub inputs_and_output: &'tcx List<Ty<'tcx>>,
+ pub c_variadic: bool,
+ pub unsafety: hir::Unsafety,
+ pub abi: abi::Abi,
+}
+
+impl<'tcx> FnSig<'tcx> {
+ pub fn inputs(&self) -> &'tcx [Ty<'tcx>] {
+ &self.inputs_and_output[..self.inputs_and_output.len() - 1]
+ }
+
+ pub fn output(&self) -> Ty<'tcx> {
+ self.inputs_and_output[self.inputs_and_output.len() - 1]
+ }
+
+ // Creates a minimal `FnSig` to be used when encountering a `TyKind::Error` in a fallible
+ // method.
+ fn fake() -> FnSig<'tcx> {
+ FnSig {
+ inputs_and_output: List::empty(),
+ c_variadic: false,
+ unsafety: hir::Unsafety::Normal,
+ abi: abi::Abi::Rust,
+ }
+ }
+}
+
+pub type PolyFnSig<'tcx> = Binder<'tcx, FnSig<'tcx>>;
+
+impl<'tcx> PolyFnSig<'tcx> {
+ #[inline]
+ pub fn inputs(&self) -> Binder<'tcx, &'tcx [Ty<'tcx>]> {
+ self.map_bound_ref_unchecked(|fn_sig| fn_sig.inputs())
+ }
+ #[inline]
+ pub fn input(&self, index: usize) -> ty::Binder<'tcx, Ty<'tcx>> {
+ self.map_bound_ref(|fn_sig| fn_sig.inputs()[index])
+ }
+ pub fn inputs_and_output(&self) -> ty::Binder<'tcx, &'tcx List<Ty<'tcx>>> {
+ self.map_bound_ref(|fn_sig| fn_sig.inputs_and_output)
+ }
+ #[inline]
+ pub fn output(&self) -> ty::Binder<'tcx, Ty<'tcx>> {
+ self.map_bound_ref(|fn_sig| fn_sig.output())
+ }
+ pub fn c_variadic(&self) -> bool {
+ self.skip_binder().c_variadic
+ }
+ pub fn unsafety(&self) -> hir::Unsafety {
+ self.skip_binder().unsafety
+ }
+ pub fn abi(&self) -> abi::Abi {
+ self.skip_binder().abi
+ }
+}
+
+pub type CanonicalPolyFnSig<'tcx> = Canonical<'tcx, Binder<'tcx, FnSig<'tcx>>>;
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct ParamTy {
+ pub index: u32,
+ pub name: Symbol,
+}
+
+impl<'tcx> ParamTy {
+ pub fn new(index: u32, name: Symbol) -> ParamTy {
+ ParamTy { index, name }
+ }
+
+ pub fn for_def(def: &ty::GenericParamDef) -> ParamTy {
+ ParamTy::new(def.index, def.name)
+ }
+
+ #[inline]
+ pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ tcx.mk_ty_param(self.index, self.name)
+ }
+}
+
+#[derive(Copy, Clone, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(HashStable)]
+pub struct ParamConst {
+ pub index: u32,
+ pub name: Symbol,
+}
+
+impl ParamConst {
+ pub fn new(index: u32, name: Symbol) -> ParamConst {
+ ParamConst { index, name }
+ }
+
+ pub fn for_def(def: &ty::GenericParamDef) -> ParamConst {
+ ParamConst::new(def.index, def.name)
+ }
+}
+
+/// Use this rather than `RegionKind`, whenever possible.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[rustc_pass_by_value]
+pub struct Region<'tcx>(pub Interned<'tcx, RegionKind<'tcx>>);
+
+impl<'tcx> Deref for Region<'tcx> {
+ type Target = RegionKind<'tcx>;
+
+ #[inline]
+ fn deref(&self) -> &RegionKind<'tcx> {
+ &self.0.0
+ }
+}
+
+impl<'tcx> fmt::Debug for Region<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.kind())
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub struct EarlyBoundRegion {
+ pub def_id: DefId,
+ pub index: u32,
+ pub name: Symbol,
+}
+
+impl fmt::Debug for EarlyBoundRegion {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}, {}", self.index, self.name)
+ }
+}
+
+/// A **`const`** **v**ariable **ID**.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[derive(HashStable, TyEncodable, TyDecodable)]
+pub struct ConstVid<'tcx> {
+ pub index: u32,
+ pub phantom: PhantomData<&'tcx ()>,
+}
+
+rustc_index::newtype_index! {
+ /// A **region** (lifetime) **v**ariable **ID**.
+ #[derive(HashStable)]
+ pub struct RegionVid {
+ DEBUG_FORMAT = custom,
+ }
+}
+
+impl Atom for RegionVid {
+ fn index(self) -> usize {
+ Idx::index(self)
+ }
+}
+
+rustc_index::newtype_index! {
+ #[derive(HashStable)]
+ pub struct BoundVar { .. }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct BoundTy {
+ pub var: BoundVar,
+ pub kind: BoundTyKind,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum BoundTyKind {
+ Anon,
+ Param(Symbol),
+}
+
+impl From<BoundVar> for BoundTy {
+ fn from(var: BoundVar) -> Self {
+ BoundTy { var, kind: BoundTyKind::Anon }
+ }
+}
+
+/// A `ProjectionPredicate` for an `ExistentialTraitRef`.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct ExistentialProjection<'tcx> {
+ pub item_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub term: Term<'tcx>,
+}
+
+pub type PolyExistentialProjection<'tcx> = Binder<'tcx, ExistentialProjection<'tcx>>;
+
+impl<'tcx> ExistentialProjection<'tcx> {
+ /// Extracts the underlying existential trait reference from this projection.
+ /// For example, if this is a projection of `exists T. <T as Iterator>::Item == X`,
+ /// then this function would return an `exists T. T: Iterator` existential trait
+ /// reference.
+ pub fn trait_ref(&self, tcx: TyCtxt<'tcx>) -> ty::ExistentialTraitRef<'tcx> {
+ let def_id = tcx.parent(self.item_def_id);
+ let subst_count = tcx.generics_of(def_id).count() - 1;
+ let substs = tcx.intern_substs(&self.substs[..subst_count]);
+ ty::ExistentialTraitRef { def_id, substs }
+ }
+
+ pub fn with_self_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ self_ty: Ty<'tcx>,
+ ) -> ty::ProjectionPredicate<'tcx> {
+ // otherwise the escaping regions would be captured by the binders
+ debug_assert!(!self_ty.has_escaping_bound_vars());
+
+ ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy {
+ item_def_id: self.item_def_id,
+ substs: tcx.mk_substs_trait(self_ty, self.substs),
+ },
+ term: self.term,
+ }
+ }
+
+ pub fn erase_self_ty(
+ tcx: TyCtxt<'tcx>,
+ projection_predicate: ty::ProjectionPredicate<'tcx>,
+ ) -> Self {
+ // Assert there is a Self.
+ projection_predicate.projection_ty.substs.type_at(0);
+
+ Self {
+ item_def_id: projection_predicate.projection_ty.item_def_id,
+ substs: tcx.intern_substs(&projection_predicate.projection_ty.substs[1..]),
+ term: projection_predicate.term,
+ }
+ }
+}
+
+impl<'tcx> PolyExistentialProjection<'tcx> {
+ pub fn with_self_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ self_ty: Ty<'tcx>,
+ ) -> ty::PolyProjectionPredicate<'tcx> {
+ self.map_bound(|p| p.with_self_ty(tcx, self_ty))
+ }
+
+ pub fn item_def_id(&self) -> DefId {
+ self.skip_binder().item_def_id
+ }
+}
+
+/// Region utilities
+impl<'tcx> Region<'tcx> {
+ pub fn kind(self) -> RegionKind<'tcx> {
+ *self.0.0
+ }
+
+ /// Is this region named by the user?
+ pub fn has_name(self) -> bool {
+ match *self {
+ ty::ReEarlyBound(ebr) => ebr.has_name(),
+ ty::ReLateBound(_, br) => br.kind.is_named(),
+ ty::ReFree(fr) => fr.bound_region.is_named(),
+ ty::ReStatic => true,
+ ty::ReVar(..) => false,
+ ty::RePlaceholder(placeholder) => placeholder.name.is_named(),
+ ty::ReEmpty(_) => false,
+ ty::ReErased => false,
+ }
+ }
+
+ #[inline]
+ pub fn is_static(self) -> bool {
+ matches!(*self, ty::ReStatic)
+ }
+
+ #[inline]
+ pub fn is_erased(self) -> bool {
+ matches!(*self, ty::ReErased)
+ }
+
+ #[inline]
+ pub fn is_late_bound(self) -> bool {
+ matches!(*self, ty::ReLateBound(..))
+ }
+
+ #[inline]
+ pub fn is_placeholder(self) -> bool {
+ matches!(*self, ty::RePlaceholder(..))
+ }
+
+ #[inline]
+ pub fn is_empty(self) -> bool {
+ matches!(*self, ty::ReEmpty(..))
+ }
+
+ #[inline]
+ pub fn bound_at_or_above_binder(self, index: ty::DebruijnIndex) -> bool {
+ match *self {
+ ty::ReLateBound(debruijn, _) => debruijn >= index,
+ _ => false,
+ }
+ }
+
+ pub fn type_flags(self) -> TypeFlags {
+ let mut flags = TypeFlags::empty();
+
+ match *self {
+ ty::ReVar(..) => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+ flags = flags | TypeFlags::HAS_RE_INFER;
+ }
+ ty::RePlaceholder(..) => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+ flags = flags | TypeFlags::HAS_RE_PLACEHOLDER;
+ }
+ ty::ReEarlyBound(..) => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+ flags = flags | TypeFlags::HAS_RE_PARAM;
+ }
+ ty::ReFree { .. } => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+ }
+ ty::ReEmpty(_) | ty::ReStatic => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ }
+ ty::ReLateBound(..) => {
+ flags = flags | TypeFlags::HAS_RE_LATE_BOUND;
+ }
+ ty::ReErased => {
+ flags = flags | TypeFlags::HAS_RE_ERASED;
+ }
+ }
+
+ debug!("type_flags({:?}) = {:?}", self, flags);
+
+ flags
+ }
+
+ /// Given an early-bound or free region, returns the `DefId` where it was bound.
+ /// For example, consider the regions in this snippet of code:
+ ///
+ /// ```ignore (illustrative)
+ /// impl<'a> Foo {
+ /// // ^^ -- early bound, declared on an impl
+ ///
+ /// fn bar<'b, 'c>(x: &self, y: &'b u32, z: &'c u64) where 'static: 'c
+ /// // ^^ ^^ ^ anonymous, late-bound
+ /// // | early-bound, appears in where-clauses
+ /// // late-bound, appears only in fn args
+ /// {..}
+ /// }
+ /// ```
+ ///
+ /// Here, `free_region_binding_scope('a)` would return the `DefId`
+ /// of the impl, and for all the other highlighted regions, it
+ /// would return the `DefId` of the function. In other cases (not shown), this
+ /// function might return the `DefId` of a closure.
+ pub fn free_region_binding_scope(self, tcx: TyCtxt<'_>) -> DefId {
+ match *self {
+ ty::ReEarlyBound(br) => tcx.parent(br.def_id),
+ ty::ReFree(fr) => fr.scope,
+ _ => bug!("free_region_binding_scope invoked on inappropriate region: {:?}", self),
+ }
+ }
+
+ /// True for free regions other than `'static`.
+ pub fn is_free(self) -> bool {
+ matches!(*self, ty::ReEarlyBound(_) | ty::ReFree(_))
+ }
+
+ /// True if `self` is a free region or static.
+ pub fn is_free_or_static(self) -> bool {
+ match *self {
+ ty::ReStatic => true,
+ _ => self.is_free(),
+ }
+ }
+}
+
+/// Type utilities
+impl<'tcx> Ty<'tcx> {
+ #[inline(always)]
+ pub fn kind(self) -> &'tcx TyKind<'tcx> {
+ &self.0.0.kind
+ }
+
+ #[inline(always)]
+ pub fn flags(self) -> TypeFlags {
+ self.0.0.flags
+ }
+
+ #[inline]
+ pub fn is_unit(self) -> bool {
+ match self.kind() {
+ Tuple(ref tys) => tys.is_empty(),
+ _ => false,
+ }
+ }
+
+ #[inline]
+ pub fn is_never(self) -> bool {
+ matches!(self.kind(), Never)
+ }
+
+ #[inline]
+ pub fn is_primitive(self) -> bool {
+ self.kind().is_primitive()
+ }
+
+ #[inline]
+ pub fn is_adt(self) -> bool {
+ matches!(self.kind(), Adt(..))
+ }
+
+ #[inline]
+ pub fn is_ref(self) -> bool {
+ matches!(self.kind(), Ref(..))
+ }
+
+ #[inline]
+ pub fn is_ty_var(self) -> bool {
+ matches!(self.kind(), Infer(TyVar(_)))
+ }
+
+ #[inline]
+ pub fn ty_vid(self) -> Option<ty::TyVid> {
+ match self.kind() {
+ &Infer(TyVar(vid)) => Some(vid),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ pub fn is_ty_infer(self) -> bool {
+ matches!(self.kind(), Infer(_))
+ }
+
+ #[inline]
+ pub fn is_phantom_data(self) -> bool {
+ if let Adt(def, _) = self.kind() { def.is_phantom_data() } else { false }
+ }
+
+ #[inline]
+ pub fn is_bool(self) -> bool {
+ *self.kind() == Bool
+ }
+
+ /// Returns `true` if this type is a `str`.
+ #[inline]
+ pub fn is_str(self) -> bool {
+ *self.kind() == Str
+ }
+
+ #[inline]
+ pub fn is_param(self, index: u32) -> bool {
+ match self.kind() {
+ ty::Param(ref data) => data.index == index,
+ _ => false,
+ }
+ }
+
+ #[inline]
+ pub fn is_slice(self) -> bool {
+ matches!(self.kind(), Slice(_))
+ }
+
+ #[inline]
+ pub fn is_array_slice(self) -> bool {
+ match self.kind() {
+ Slice(_) => true,
+ RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => matches!(ty.kind(), Slice(_)),
+ _ => false,
+ }
+ }
+
+ #[inline]
+ pub fn is_array(self) -> bool {
+ matches!(self.kind(), Array(..))
+ }
+
+ #[inline]
+ pub fn is_simd(self) -> bool {
+ match self.kind() {
+ Adt(def, _) => def.repr().simd(),
+ _ => false,
+ }
+ }
+
+ pub fn sequence_element_type(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match self.kind() {
+ Array(ty, _) | Slice(ty) => *ty,
+ Str => tcx.types.u8,
+ _ => bug!("`sequence_element_type` called on non-sequence value: {}", self),
+ }
+ }
+
+ pub fn simd_size_and_type(self, tcx: TyCtxt<'tcx>) -> (u64, Ty<'tcx>) {
+ match self.kind() {
+ Adt(def, substs) => {
+ assert!(def.repr().simd(), "`simd_size_and_type` called on non-SIMD type");
+ let variant = def.non_enum_variant();
+ let f0_ty = variant.fields[0].ty(tcx, substs);
+
+ match f0_ty.kind() {
+ // If the first field is an array, we assume it is the only field and its
+ // elements are the SIMD components.
+ Array(f0_elem_ty, f0_len) => {
+ // FIXME(repr_simd): https://github.com/rust-lang/rust/pull/78863#discussion_r522784112
+ // The way we evaluate the `N` in `[T; N]` here only works since we use
+ // `simd_size_and_type` post-monomorphization. It will probably start to ICE
+ // if we use it in generic code. See the `simd-array-trait` ui test.
+ (f0_len.eval_usize(tcx, ParamEnv::empty()) as u64, *f0_elem_ty)
+ }
+ // Otherwise, the fields of this Adt are the SIMD components (and we assume they
+ // all have the same type).
+ _ => (variant.fields.len() as u64, f0_ty),
+ }
+ }
+ _ => bug!("`simd_size_and_type` called on invalid type"),
+ }
+ }
+
+ #[inline]
+ pub fn is_region_ptr(self) -> bool {
+ matches!(self.kind(), Ref(..))
+ }
+
+ #[inline]
+ pub fn is_mutable_ptr(self) -> bool {
+ matches!(
+ self.kind(),
+ RawPtr(TypeAndMut { mutbl: hir::Mutability::Mut, .. })
+ | Ref(_, _, hir::Mutability::Mut)
+ )
+ }
+
+ /// Get the mutability of the reference or `None` when not a reference
+ #[inline]
+ pub fn ref_mutability(self) -> Option<hir::Mutability> {
+ match self.kind() {
+ Ref(_, _, mutability) => Some(*mutability),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ pub fn is_unsafe_ptr(self) -> bool {
+ matches!(self.kind(), RawPtr(_))
+ }
+
+ /// Tests if this is any kind of primitive pointer type (reference, raw pointer, fn pointer).
+ #[inline]
+ pub fn is_any_ptr(self) -> bool {
+ self.is_region_ptr() || self.is_unsafe_ptr() || self.is_fn_ptr()
+ }
+
+ #[inline]
+ pub fn is_box(self) -> bool {
+ match self.kind() {
+ Adt(def, _) => def.is_box(),
+ _ => false,
+ }
+ }
+
+ /// Panics if called on any type other than `Box<T>`.
+ pub fn boxed_ty(self) -> Ty<'tcx> {
+ match self.kind() {
+ Adt(def, substs) if def.is_box() => substs.type_at(0),
+ _ => bug!("`boxed_ty` is called on non-box type {:?}", self),
+ }
+ }
+
+ /// A scalar type is one that denotes an atomic datum, with no sub-components.
+ /// (A RawPtr is scalar because it represents a non-managed pointer, so its
+ /// contents are abstract to rustc.)
+ #[inline]
+ pub fn is_scalar(self) -> bool {
+ matches!(
+ self.kind(),
+ Bool | Char
+ | Int(_)
+ | Float(_)
+ | Uint(_)
+ | FnDef(..)
+ | FnPtr(_)
+ | RawPtr(_)
+ | Infer(IntVar(_) | FloatVar(_))
+ )
+ }
+
+ /// Returns `true` if this type is a floating point type.
+ #[inline]
+ pub fn is_floating_point(self) -> bool {
+ matches!(self.kind(), Float(_) | Infer(FloatVar(_)))
+ }
+
+ #[inline]
+ pub fn is_trait(self) -> bool {
+ matches!(self.kind(), Dynamic(..))
+ }
+
+ #[inline]
+ pub fn is_enum(self) -> bool {
+ matches!(self.kind(), Adt(adt_def, _) if adt_def.is_enum())
+ }
+
+ #[inline]
+ pub fn is_union(self) -> bool {
+ matches!(self.kind(), Adt(adt_def, _) if adt_def.is_union())
+ }
+
+ #[inline]
+ pub fn is_closure(self) -> bool {
+ matches!(self.kind(), Closure(..))
+ }
+
+ #[inline]
+ pub fn is_generator(self) -> bool {
+ matches!(self.kind(), Generator(..))
+ }
+
+ #[inline]
+ pub fn is_integral(self) -> bool {
+ matches!(self.kind(), Infer(IntVar(_)) | Int(_) | Uint(_))
+ }
+
+ #[inline]
+ pub fn is_fresh_ty(self) -> bool {
+ matches!(self.kind(), Infer(FreshTy(_)))
+ }
+
+ #[inline]
+ pub fn is_fresh(self) -> bool {
+ matches!(self.kind(), Infer(FreshTy(_) | FreshIntTy(_) | FreshFloatTy(_)))
+ }
+
+ #[inline]
+ pub fn is_char(self) -> bool {
+ matches!(self.kind(), Char)
+ }
+
+ #[inline]
+ pub fn is_numeric(self) -> bool {
+ self.is_integral() || self.is_floating_point()
+ }
+
+ #[inline]
+ pub fn is_signed(self) -> bool {
+ matches!(self.kind(), Int(_))
+ }
+
+ #[inline]
+ pub fn is_ptr_sized_integral(self) -> bool {
+ matches!(self.kind(), Int(ty::IntTy::Isize) | Uint(ty::UintTy::Usize))
+ }
+
+ #[inline]
+ pub fn has_concrete_skeleton(self) -> bool {
+ !matches!(self.kind(), Param(_) | Infer(_) | Error(_))
+ }
+
+ /// Checks whether a type recursively contains another type
+ ///
+ /// Example: `Option<()>` contains `()`
+ pub fn contains(self, other: Ty<'tcx>) -> bool {
+ struct ContainsTyVisitor<'tcx>(Ty<'tcx>);
+
+ impl<'tcx> TypeVisitor<'tcx> for ContainsTyVisitor<'tcx> {
+ type BreakTy = ();
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if self.0 == t { ControlFlow::BREAK } else { t.super_visit_with(self) }
+ }
+ }
+
+ let cf = self.visit_with(&mut ContainsTyVisitor(other));
+ cf.is_break()
+ }
+
+ /// Returns the type and mutability of `*ty`.
+ ///
+ /// The parameter `explicit` indicates if this is an *explicit* dereference.
+ /// Some types -- notably unsafe ptrs -- can only be dereferenced explicitly.
+ pub fn builtin_deref(self, explicit: bool) -> Option<TypeAndMut<'tcx>> {
+ match self.kind() {
+ Adt(def, _) if def.is_box() => {
+ Some(TypeAndMut { ty: self.boxed_ty(), mutbl: hir::Mutability::Not })
+ }
+ Ref(_, ty, mutbl) => Some(TypeAndMut { ty: *ty, mutbl: *mutbl }),
+ RawPtr(mt) if explicit => Some(*mt),
+ _ => None,
+ }
+ }
+
+ /// Returns the type of `ty[i]`.
+ pub fn builtin_index(self) -> Option<Ty<'tcx>> {
+ match self.kind() {
+ Array(ty, _) | Slice(ty) => Some(*ty),
+ _ => None,
+ }
+ }
+
+ pub fn fn_sig(self, tcx: TyCtxt<'tcx>) -> PolyFnSig<'tcx> {
+ match self.kind() {
+ FnDef(def_id, substs) => tcx.bound_fn_sig(*def_id).subst(tcx, substs),
+ FnPtr(f) => *f,
+ Error(_) => {
+ // ignore errors (#54954)
+ ty::Binder::dummy(FnSig::fake())
+ }
+ Closure(..) => bug!(
+ "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`",
+ ),
+ _ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self),
+ }
+ }
+
+ #[inline]
+ pub fn is_fn(self) -> bool {
+ matches!(self.kind(), FnDef(..) | FnPtr(_))
+ }
+
+ #[inline]
+ pub fn is_fn_ptr(self) -> bool {
+ matches!(self.kind(), FnPtr(_))
+ }
+
+ #[inline]
+ pub fn is_impl_trait(self) -> bool {
+ matches!(self.kind(), Opaque(..))
+ }
+
+ #[inline]
+ pub fn ty_adt_def(self) -> Option<AdtDef<'tcx>> {
+ match self.kind() {
+ Adt(adt, _) => Some(*adt),
+ _ => None,
+ }
+ }
+
+ /// Iterates over tuple fields.
+ /// Panics when called on anything but a tuple.
+ #[inline]
+ pub fn tuple_fields(self) -> &'tcx List<Ty<'tcx>> {
+ match self.kind() {
+ Tuple(substs) => substs,
+ _ => bug!("tuple_fields called on non-tuple"),
+ }
+ }
+
+ /// If the type contains variants, returns the valid range of variant indices.
+ //
+ // FIXME: This requires the optimized MIR in the case of generators.
+ #[inline]
+ pub fn variant_range(self, tcx: TyCtxt<'tcx>) -> Option<Range<VariantIdx>> {
+ match self.kind() {
+ TyKind::Adt(adt, _) => Some(adt.variant_range()),
+ TyKind::Generator(def_id, substs, _) => {
+ Some(substs.as_generator().variant_range(*def_id, tcx))
+ }
+ _ => None,
+ }
+ }
+
+ /// If the type contains variants, returns the variant for `variant_index`.
+ /// Panics if `variant_index` is out of range.
+ //
+ // FIXME: This requires the optimized MIR in the case of generators.
+ #[inline]
+ pub fn discriminant_for_variant(
+ self,
+ tcx: TyCtxt<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Option<Discr<'tcx>> {
+ match self.kind() {
+ TyKind::Adt(adt, _) if adt.variants().is_empty() => {
+ // This can actually happen during CTFE, see
+ // https://github.com/rust-lang/rust/issues/89765.
+ None
+ }
+ TyKind::Adt(adt, _) if adt.is_enum() => {
+ Some(adt.discriminant_for_variant(tcx, variant_index))
+ }
+ TyKind::Generator(def_id, substs, _) => {
+ Some(substs.as_generator().discriminant_for_variant(*def_id, tcx, variant_index))
+ }
+ _ => None,
+ }
+ }
+
+ /// Returns the type of the discriminant of this type.
+ pub fn discriminant_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match self.kind() {
+ ty::Adt(adt, _) if adt.is_enum() => adt.repr().discr_type().to_ty(tcx),
+ ty::Generator(_, substs, _) => substs.as_generator().discr_ty(tcx),
+
+ ty::Param(_) | ty::Projection(_) | ty::Opaque(..) | ty::Infer(ty::TyVar(_)) => {
+ let assoc_items = tcx.associated_item_def_ids(
+ tcx.require_lang_item(hir::LangItem::DiscriminantKind, None),
+ );
+ tcx.mk_projection(assoc_items[0], tcx.intern_substs(&[self.into()]))
+ }
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(..)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Dynamic(..)
+ | ty::Closure(..)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::Error(_)
+ | ty::Infer(IntVar(_) | FloatVar(_)) => tcx.types.u8,
+
+ ty::Bound(..)
+ | ty::Placeholder(_)
+ | ty::Infer(FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`discriminant_ty` applied to unexpected type: {:?}", self)
+ }
+ }
+ }
+
+ /// Returns the type of metadata for (potentially fat) pointers to this type,
+ /// and a boolean signifying if this is conditional on this type being `Sized`.
+ pub fn ptr_metadata_ty(
+ self,
+ tcx: TyCtxt<'tcx>,
+ normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>,
+ ) -> (Ty<'tcx>, bool) {
+ let tail = tcx.struct_tail_with_normalize(self, normalize, || {});
+ match tail.kind() {
+ // Sized types
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(..)
+ | ty::Char
+ | ty::Ref(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Array(..)
+ | ty::Closure(..)
+ | ty::Never
+ | ty::Error(_)
+ // Extern types have metadata = ().
+ | ty::Foreign(..)
+ // If returned by `struct_tail_without_normalization` this is a unit struct
+ // without any fields, or not a struct, and therefore is Sized.
+ | ty::Adt(..)
+ // If returned by `struct_tail_without_normalization` this is the empty tuple,
+ // a.k.a. unit type, which is Sized
+ | ty::Tuple(..) => (tcx.types.unit, false),
+
+ ty::Str | ty::Slice(_) => (tcx.types.usize, false),
+ ty::Dynamic(..) => {
+ let dyn_metadata = tcx.lang_items().dyn_metadata().unwrap();
+ (tcx.bound_type_of(dyn_metadata).subst(tcx, &[tail.into()]), false)
+ },
+
+ // type parameters only have unit metadata if they're sized, so return true
+ // to make sure we double check this during confirmation
+ ty::Param(_) | ty::Projection(_) | ty::Opaque(..) => (tcx.types.unit, true),
+
+ ty::Infer(ty::TyVar(_))
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`ptr_metadata_ty` applied to unexpected type: {:?} (tail = {:?})", self, tail)
+ }
+ }
+ }
+
+ /// When we create a closure, we record its kind (i.e., what trait
+ /// it implements) into its `ClosureSubsts` using a type
+ /// parameter. This is kind of a phantom type, except that the
+ /// most convenient thing for us to are the integral types. This
+ /// function converts such a special type into the closure
+ /// kind. To go the other way, use
+ /// `tcx.closure_kind_ty(closure_kind)`.
+ ///
+ /// Note that during type checking, we use an inference variable
+ /// to represent the closure kind, because it has not yet been
+ /// inferred. Once upvar inference (in `rustc_typeck/src/check/upvar.rs`)
+ /// is complete, that type variable will be unified.
+ pub fn to_opt_closure_kind(self) -> Option<ty::ClosureKind> {
+ match self.kind() {
+ Int(int_ty) => match int_ty {
+ ty::IntTy::I8 => Some(ty::ClosureKind::Fn),
+ ty::IntTy::I16 => Some(ty::ClosureKind::FnMut),
+ ty::IntTy::I32 => Some(ty::ClosureKind::FnOnce),
+ _ => bug!("cannot convert type `{:?}` to a closure kind", self),
+ },
+
+ // "Bound" types appear in canonical queries when the
+ // closure type is not yet known
+ Bound(..) | Infer(_) => None,
+
+ Error(_) => Some(ty::ClosureKind::Fn),
+
+ _ => bug!("cannot convert type `{:?}` to a closure kind", self),
+ }
+ }
+
+ /// Fast path helper for testing if a type is `Sized`.
+ ///
+ /// Returning true means the type is known to be sized. Returning
+ /// `false` means nothing -- could be sized, might not be.
+ ///
+ /// Note that we could never rely on the fact that a type such as `[_]` is
+ /// trivially `!Sized` because we could be in a type environment with a
+ /// bound such as `[_]: Copy`. A function with such a bound obviously never
+ /// can be called, but that doesn't mean it shouldn't typecheck. This is why
+ /// this method doesn't return `Option<bool>`.
+ pub fn is_trivially_sized(self, tcx: TyCtxt<'tcx>) -> bool {
+ match self.kind() {
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(..)
+ | ty::Char
+ | ty::Ref(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Array(..)
+ | ty::Closure(..)
+ | ty::Never
+ | ty::Error(_) => true,
+
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false,
+
+ ty::Tuple(tys) => tys.iter().all(|ty| ty.is_trivially_sized(tcx)),
+
+ ty::Adt(def, _substs) => def.sized_constraint(tcx).0.is_empty(),
+
+ ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => false,
+
+ ty::Infer(ty::TyVar(_)) => false,
+
+ ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`is_trivially_sized` applied to unexpected type: {:?}", self)
+ }
+ }
+ }
+
+ /// Fast path helper for primitives which are always `Copy` and which
+ /// have a side-effect-free `Clone` impl.
+ ///
+ /// Returning true means the type is known to be pure and `Copy+Clone`.
+ /// Returning `false` means nothing -- could be `Copy`, might not be.
+ ///
+ /// This is mostly useful for optimizations, as there are the types
+ /// on which we can replace cloning with dereferencing.
+ pub fn is_trivially_pure_clone_copy(self) -> bool {
+ match self.kind() {
+ ty::Bool | ty::Char | ty::Never => true,
+
+ // These aren't even `Clone`
+ ty::Str | ty::Slice(..) | ty::Foreign(..) | ty::Dynamic(..) => false,
+
+ ty::Int(..) | ty::Uint(..) | ty::Float(..) => true,
+
+ // The voldemort ZSTs are fine.
+ ty::FnDef(..) => true,
+
+ ty::Array(element_ty, _len) => element_ty.is_trivially_pure_clone_copy(),
+
+ // A 100-tuple isn't "trivial", so doing this only for reasonable sizes.
+ ty::Tuple(field_tys) => {
+ field_tys.len() <= 3 && field_tys.iter().all(Self::is_trivially_pure_clone_copy)
+ }
+
+ // Sometimes traits aren't implemented for every ABI or arity,
+ // because we can't be generic over everything yet.
+ ty::FnPtr(..) => false,
+
+ // Definitely absolutely not copy.
+ ty::Ref(_, _, hir::Mutability::Mut) => false,
+
+ // Thin pointers & thin shared references are pure-clone-copy, but for
+ // anything with custom metadata it might be more complicated.
+ ty::Ref(_, _, hir::Mutability::Not) | ty::RawPtr(..) => false,
+
+ ty::Generator(..) | ty::GeneratorWitness(..) => false,
+
+ // Might be, but not "trivial" so just giving the safe answer.
+ ty::Adt(..) | ty::Closure(..) | ty::Opaque(..) => false,
+
+ ty::Projection(..) | ty::Param(..) | ty::Infer(..) | ty::Error(..) => false,
+
+ ty::Bound(..) | ty::Placeholder(..) => {
+ bug!("`is_trivially_pure_clone_copy` applied to unexpected type: {:?}", self);
+ }
+ }
+ }
+}
+
+/// Extra information about why we ended up with a particular variance.
+/// This is only used to add more information to error messages, and
+/// has no effect on soundness. While choosing the 'wrong' `VarianceDiagInfo`
+/// may lead to confusing notes in error messages, it will never cause
+/// a miscompilation or unsoundness.
+///
+/// When in doubt, use `VarianceDiagInfo::default()`
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
+pub enum VarianceDiagInfo<'tcx> {
+ /// No additional information - this is the default.
+ /// We will not add any additional information to error messages.
+ #[default]
+ None,
+ /// We switched our variance because a generic argument occurs inside
+ /// the invariant generic argument of another type.
+ Invariant {
+ /// The generic type containing the generic parameter
+ /// that changes the variance (e.g. `*mut T`, `MyStruct<T>`)
+ ty: Ty<'tcx>,
+ /// The index of the generic parameter being used
+ /// (e.g. `0` for `*mut T`, `1` for `MyStruct<'CovariantParam, 'InvariantParam>`)
+ param_index: u32,
+ },
+}
+
+impl<'tcx> VarianceDiagInfo<'tcx> {
+ /// Mirrors `Variance::xform` - used to 'combine' the existing
+ /// and new `VarianceDiagInfo`s when our variance changes.
+ pub fn xform(self, other: VarianceDiagInfo<'tcx>) -> VarianceDiagInfo<'tcx> {
+ // For now, just use the first `VarianceDiagInfo::Invariant` that we see
+ match self {
+ VarianceDiagInfo::None => other,
+ VarianceDiagInfo::Invariant { .. } => self,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs
new file mode 100644
index 000000000..6262aa180
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/subst.rs
@@ -0,0 +1,785 @@
+// Type substitutions.
+
+use crate::mir;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable};
+use crate::ty::sty::{ClosureSubsts, GeneratorSubsts, InlineConstSubsts};
+use crate::ty::visit::{TypeVisitable, TypeVisitor};
+use crate::ty::{self, Lift, List, ParamConst, Ty, TyCtxt};
+
+use rustc_data_structures::intern::{Interned, WithStableHash};
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_serialize::{self, Decodable, Encodable};
+use smallvec::SmallVec;
+
+use core::intrinsics;
+use std::cmp::Ordering;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem;
+use std::num::NonZeroUsize;
+use std::ops::ControlFlow;
+use std::slice;
+
+/// An entity in the Rust type system, which can be one of
+/// several kinds (types, lifetimes, and consts).
+/// To reduce memory usage, a `GenericArg` is an interned pointer,
+/// with the lowest 2 bits being reserved for a tag to
+/// indicate the type (`Ty`, `Region`, or `Const`) it points to.
+///
+/// Note: the `PartialEq`, `Eq` and `Hash` derives are only valid because `Ty`,
+/// `Region` and `Const` are all interned.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct GenericArg<'tcx> {
+ ptr: NonZeroUsize,
+ marker: PhantomData<(Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>)>,
+}
+
+const TAG_MASK: usize = 0b11;
+const TYPE_TAG: usize = 0b00;
+const REGION_TAG: usize = 0b01;
+const CONST_TAG: usize = 0b10;
+
+#[derive(Debug, TyEncodable, TyDecodable, PartialEq, Eq, PartialOrd, Ord)]
+pub enum GenericArgKind<'tcx> {
+ Lifetime(ty::Region<'tcx>),
+ Type(Ty<'tcx>),
+ Const(ty::Const<'tcx>),
+}
+
+/// This function goes from `&'a [Ty<'tcx>]` to `&'a [GenericArg<'tcx>]`
+///
+/// This is sound as, for types, `GenericArg` is just
+/// `NonZeroUsize::new_unchecked(ty as *const _ as usize)` as
+/// long as we use `0` for the `TYPE_TAG`.
+pub fn ty_slice_as_generic_args<'a, 'tcx>(ts: &'a [Ty<'tcx>]) -> &'a [GenericArg<'tcx>] {
+ assert_eq!(TYPE_TAG, 0);
+ // SAFETY: the whole slice is valid and immutable.
+ // `Ty` and `GenericArg` is explained above.
+ unsafe { slice::from_raw_parts(ts.as_ptr().cast(), ts.len()) }
+}
+
+impl<'tcx> List<Ty<'tcx>> {
+ /// Allows to freely switch between `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>`.
+ ///
+ /// As lists are interned, `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>` have
+ /// be interned together, see `intern_type_list` for more details.
+ #[inline]
+ pub fn as_substs(&'tcx self) -> SubstsRef<'tcx> {
+ assert_eq!(TYPE_TAG, 0);
+ // SAFETY: `List<T>` is `#[repr(C)]`. `Ty` and `GenericArg` is explained above.
+ unsafe { &*(self as *const List<Ty<'tcx>> as *const List<GenericArg<'tcx>>) }
+ }
+}
+
+impl<'tcx> GenericArgKind<'tcx> {
+ #[inline]
+ fn pack(self) -> GenericArg<'tcx> {
+ let (tag, ptr) = match self {
+ GenericArgKind::Lifetime(lt) => {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0);
+ (REGION_TAG, lt.0.0 as *const ty::RegionKind<'tcx> as usize)
+ }
+ GenericArgKind::Type(ty) => {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
+ (TYPE_TAG, ty.0.0 as *const WithStableHash<ty::TyS<'tcx>> as usize)
+ }
+ GenericArgKind::Const(ct) => {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
+ (CONST_TAG, ct.0.0 as *const ty::ConstS<'tcx> as usize)
+ }
+ };
+
+ GenericArg { ptr: unsafe { NonZeroUsize::new_unchecked(ptr | tag) }, marker: PhantomData }
+ }
+}
+
+impl<'tcx> fmt::Debug for GenericArg<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt.fmt(f),
+ GenericArgKind::Type(ty) => ty.fmt(f),
+ GenericArgKind::Const(ct) => ct.fmt(f),
+ }
+ }
+}
+
+impl<'tcx> Ord for GenericArg<'tcx> {
+ fn cmp(&self, other: &GenericArg<'tcx>) -> Ordering {
+ self.unpack().cmp(&other.unpack())
+ }
+}
+
+impl<'tcx> PartialOrd for GenericArg<'tcx> {
+ fn partial_cmp(&self, other: &GenericArg<'tcx>) -> Option<Ordering> {
+ Some(self.cmp(&other))
+ }
+}
+
+impl<'tcx> From<ty::Region<'tcx>> for GenericArg<'tcx> {
+ #[inline]
+ fn from(r: ty::Region<'tcx>) -> GenericArg<'tcx> {
+ GenericArgKind::Lifetime(r).pack()
+ }
+}
+
+impl<'tcx> From<Ty<'tcx>> for GenericArg<'tcx> {
+ #[inline]
+ fn from(ty: Ty<'tcx>) -> GenericArg<'tcx> {
+ GenericArgKind::Type(ty).pack()
+ }
+}
+
+impl<'tcx> From<ty::Const<'tcx>> for GenericArg<'tcx> {
+ #[inline]
+ fn from(c: ty::Const<'tcx>) -> GenericArg<'tcx> {
+ GenericArgKind::Const(c).pack()
+ }
+}
+
+impl<'tcx> GenericArg<'tcx> {
+ #[inline]
+ pub fn unpack(self) -> GenericArgKind<'tcx> {
+ let ptr = self.ptr.get();
+ // SAFETY: use of `Interned::new_unchecked` here is ok because these
+ // pointers were originally created from `Interned` types in `pack()`,
+ // and this is just going in the other direction.
+ unsafe {
+ match ptr & TAG_MASK {
+ REGION_TAG => GenericArgKind::Lifetime(ty::Region(Interned::new_unchecked(
+ &*((ptr & !TAG_MASK) as *const ty::RegionKind<'tcx>),
+ ))),
+ TYPE_TAG => GenericArgKind::Type(Ty(Interned::new_unchecked(
+ &*((ptr & !TAG_MASK) as *const WithStableHash<ty::TyS<'tcx>>),
+ ))),
+ CONST_TAG => GenericArgKind::Const(ty::Const(Interned::new_unchecked(
+ &*((ptr & !TAG_MASK) as *const ty::ConstS<'tcx>),
+ ))),
+ _ => intrinsics::unreachable(),
+ }
+ }
+ }
+
+ /// Unpack the `GenericArg` as a region when it is known certainly to be a region.
+ pub fn expect_region(self) -> ty::Region<'tcx> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt,
+ _ => bug!("expected a region, but found another kind"),
+ }
+ }
+
+ /// Unpack the `GenericArg` as a type when it is known certainly to be a type.
+ /// This is true in cases where `Substs` is used in places where the kinds are known
+ /// to be limited (e.g. in tuples, where the only parameters are type parameters).
+ pub fn expect_ty(self) -> Ty<'tcx> {
+ match self.unpack() {
+ GenericArgKind::Type(ty) => ty,
+ _ => bug!("expected a type, but found another kind"),
+ }
+ }
+
+ /// Unpack the `GenericArg` as a const when it is known certainly to be a const.
+ pub fn expect_const(self) -> ty::Const<'tcx> {
+ match self.unpack() {
+ GenericArgKind::Const(c) => c,
+ _ => bug!("expected a const, but found another kind"),
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> {
+ type Lifted = GenericArg<'tcx>;
+
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => tcx.lift(lt).map(|lt| lt.into()),
+ GenericArgKind::Type(ty) => tcx.lift(ty).map(|ty| ty.into()),
+ GenericArgKind::Const(ct) => tcx.lift(ct).map(|ct| ct.into()),
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GenericArg<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt.try_fold_with(folder).map(Into::into),
+ GenericArgKind::Type(ty) => ty.try_fold_with(folder).map(Into::into),
+ GenericArgKind::Const(ct) => ct.try_fold_with(folder).map(Into::into),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for GenericArg<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt.visit_with(visitor),
+ GenericArgKind::Type(ty) => ty.visit_with(visitor),
+ GenericArgKind::Const(ct) => ct.visit_with(visitor),
+ }
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for GenericArg<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.unpack().encode(e)
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for GenericArg<'tcx> {
+ fn decode(d: &mut D) -> GenericArg<'tcx> {
+ GenericArgKind::decode(d).pack()
+ }
+}
+
+/// A substitution mapping generic parameters to new values.
+pub type InternalSubsts<'tcx> = List<GenericArg<'tcx>>;
+
+pub type SubstsRef<'tcx> = &'tcx InternalSubsts<'tcx>;
+
+impl<'tcx> InternalSubsts<'tcx> {
+ /// Checks whether all elements of this list are types, if so, transmute.
+ pub fn try_as_type_list(&'tcx self) -> Option<&'tcx List<Ty<'tcx>>> {
+ if self.iter().all(|arg| matches!(arg.unpack(), GenericArgKind::Type(_))) {
+ assert_eq!(TYPE_TAG, 0);
+ // SAFETY: All elements are types, see `List<Ty<'tcx>>::as_substs`.
+ Some(unsafe { &*(self as *const List<GenericArg<'tcx>> as *const List<Ty<'tcx>>) })
+ } else {
+ None
+ }
+ }
+
+ /// Interpret these substitutions as the substitutions of a closure type.
+ /// Closure substitutions have a particular structure controlled by the
+ /// compiler that encodes information like the signature and closure kind;
+ /// see `ty::ClosureSubsts` struct for more comments.
+ pub fn as_closure(&'tcx self) -> ClosureSubsts<'tcx> {
+ ClosureSubsts { substs: self }
+ }
+
+ /// Interpret these substitutions as the substitutions of a generator type.
+ /// Generator substitutions have a particular structure controlled by the
+ /// compiler that encodes information like the signature and generator kind;
+ /// see `ty::GeneratorSubsts` struct for more comments.
+ pub fn as_generator(&'tcx self) -> GeneratorSubsts<'tcx> {
+ GeneratorSubsts { substs: self }
+ }
+
+ /// Interpret these substitutions as the substitutions of an inline const.
+ /// Inline const substitutions have a particular structure controlled by the
+ /// compiler that encodes information like the inferred type;
+ /// see `ty::InlineConstSubsts` struct for more comments.
+ pub fn as_inline_const(&'tcx self) -> InlineConstSubsts<'tcx> {
+ InlineConstSubsts { substs: self }
+ }
+
+ /// Creates an `InternalSubsts` that maps each generic parameter to itself.
+ pub fn identity_for_item(tcx: TyCtxt<'tcx>, def_id: DefId) -> SubstsRef<'tcx> {
+ Self::for_item(tcx, def_id, |param, _| tcx.mk_param_from_def(param))
+ }
+
+ /// Creates an `InternalSubsts` for generic parameter definitions,
+ /// by calling closures to obtain each kind.
+ /// The closures get to observe the `InternalSubsts` as they're
+ /// being built, which can be used to correctly
+ /// substitute defaults of generic parameters.
+ pub fn for_item<F>(tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> SubstsRef<'tcx>
+ where
+ F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+ {
+ let defs = tcx.generics_of(def_id);
+ let count = defs.count();
+ let mut substs = SmallVec::with_capacity(count);
+ Self::fill_item(&mut substs, tcx, defs, &mut mk_kind);
+ tcx.intern_substs(&substs)
+ }
+
+ pub fn extend_to<F>(&self, tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> SubstsRef<'tcx>
+ where
+ F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+ {
+ Self::for_item(tcx, def_id, |param, substs| {
+ self.get(param.index as usize).cloned().unwrap_or_else(|| mk_kind(param, substs))
+ })
+ }
+
+ pub fn fill_item<F>(
+ substs: &mut SmallVec<[GenericArg<'tcx>; 8]>,
+ tcx: TyCtxt<'tcx>,
+ defs: &ty::Generics,
+ mk_kind: &mut F,
+ ) where
+ F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+ {
+ if let Some(def_id) = defs.parent {
+ let parent_defs = tcx.generics_of(def_id);
+ Self::fill_item(substs, tcx, parent_defs, mk_kind);
+ }
+ Self::fill_single(substs, defs, mk_kind)
+ }
+
+ pub fn fill_single<F>(
+ substs: &mut SmallVec<[GenericArg<'tcx>; 8]>,
+ defs: &ty::Generics,
+ mk_kind: &mut F,
+ ) where
+ F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+ {
+ substs.reserve(defs.params.len());
+ for param in &defs.params {
+ let kind = mk_kind(param, substs);
+ assert_eq!(param.index as usize, substs.len());
+ substs.push(kind);
+ }
+ }
+
+ #[inline]
+ pub fn types(&'tcx self) -> impl DoubleEndedIterator<Item = Ty<'tcx>> + 'tcx {
+ self.iter()
+ .filter_map(|k| if let GenericArgKind::Type(ty) = k.unpack() { Some(ty) } else { None })
+ }
+
+ #[inline]
+ pub fn regions(&'tcx self) -> impl DoubleEndedIterator<Item = ty::Region<'tcx>> + 'tcx {
+ self.iter().filter_map(|k| {
+ if let GenericArgKind::Lifetime(lt) = k.unpack() { Some(lt) } else { None }
+ })
+ }
+
+ #[inline]
+ pub fn consts(&'tcx self) -> impl DoubleEndedIterator<Item = ty::Const<'tcx>> + 'tcx {
+ self.iter().filter_map(|k| {
+ if let GenericArgKind::Const(ct) = k.unpack() { Some(ct) } else { None }
+ })
+ }
+
+ #[inline]
+ pub fn non_erasable_generics(
+ &'tcx self,
+ ) -> impl DoubleEndedIterator<Item = GenericArgKind<'tcx>> + 'tcx {
+ self.iter().filter_map(|k| match k.unpack() {
+ GenericArgKind::Lifetime(_) => None,
+ generic => Some(generic),
+ })
+ }
+
+ #[inline]
+ pub fn type_at(&self, i: usize) -> Ty<'tcx> {
+ if let GenericArgKind::Type(ty) = self[i].unpack() {
+ ty
+ } else {
+ bug!("expected type for param #{} in {:?}", i, self);
+ }
+ }
+
+ #[inline]
+ pub fn region_at(&self, i: usize) -> ty::Region<'tcx> {
+ if let GenericArgKind::Lifetime(lt) = self[i].unpack() {
+ lt
+ } else {
+ bug!("expected region for param #{} in {:?}", i, self);
+ }
+ }
+
+ #[inline]
+ pub fn const_at(&self, i: usize) -> ty::Const<'tcx> {
+ if let GenericArgKind::Const(ct) = self[i].unpack() {
+ ct
+ } else {
+ bug!("expected const for param #{} in {:?}", i, self);
+ }
+ }
+
+ #[inline]
+ pub fn type_for_def(&self, def: &ty::GenericParamDef) -> GenericArg<'tcx> {
+ self.type_at(def.index as usize).into()
+ }
+
+ /// Transform from substitutions for a child of `source_ancestor`
+ /// (e.g., a trait or impl) to substitutions for the same child
+ /// in a different item, with `target_substs` as the base for
+ /// the target impl/trait, with the source child-specific
+ /// parameters (e.g., method parameters) on top of that base.
+ ///
+ /// For example given:
+ ///
+ /// ```no_run
+ /// trait X<S> { fn f<T>(); }
+ /// impl<U> X<U> for U { fn f<V>() {} }
+ /// ```
+ ///
+ /// * If `self` is `[Self, S, T]`: the identity substs of `f` in the trait.
+ /// * If `source_ancestor` is the def_id of the trait.
+ /// * If `target_substs` is `[U]`, the substs for the impl.
+ /// * Then we will return `[U, T]`, the subst for `f` in the impl that
+ /// are needed for it to match the trait.
+ pub fn rebase_onto(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ source_ancestor: DefId,
+ target_substs: SubstsRef<'tcx>,
+ ) -> SubstsRef<'tcx> {
+ let defs = tcx.generics_of(source_ancestor);
+ tcx.mk_substs(target_substs.iter().chain(self.iter().skip(defs.params.len())))
+ }
+
+ pub fn truncate_to(&self, tcx: TyCtxt<'tcx>, generics: &ty::Generics) -> SubstsRef<'tcx> {
+ tcx.mk_substs(self.iter().take(generics.count()))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ // This code is hot enough that it's worth specializing for the most
+ // common length lists, to avoid the overhead of `SmallVec` creation.
+ // The match arms are in order of frequency. The 1, 2, and 0 cases are
+ // typically hit in 90--99.99% of cases. When folding doesn't change
+ // the substs, it's faster to reuse the existing substs rather than
+ // calling `intern_substs`.
+ match self.len() {
+ 1 => {
+ let param0 = self[0].try_fold_with(folder)?;
+ if param0 == self[0] { Ok(self) } else { Ok(folder.tcx().intern_substs(&[param0])) }
+ }
+ 2 => {
+ let param0 = self[0].try_fold_with(folder)?;
+ let param1 = self[1].try_fold_with(folder)?;
+ if param0 == self[0] && param1 == self[1] {
+ Ok(self)
+ } else {
+ Ok(folder.tcx().intern_substs(&[param0, param1]))
+ }
+ }
+ 0 => Ok(self),
+ _ => ty::util::fold_list(self, folder, |tcx, v| tcx.intern_substs(v)),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for SubstsRef<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ // This code is fairly hot, though not as hot as `SubstsRef`.
+ //
+ // When compiling stage 2, I get the following results:
+ //
+ // len | total | %
+ // --- | --------- | -----
+ // 2 | 15083590 | 48.1
+ // 3 | 7540067 | 24.0
+ // 1 | 5300377 | 16.9
+ // 4 | 1351897 | 4.3
+ // 0 | 1256849 | 4.0
+ //
+ // I've tried it with some private repositories and got
+ // close to the same result, with 4 and 0 swapping places
+ // sometimes.
+ match self.len() {
+ 2 => {
+ let param0 = self[0].try_fold_with(folder)?;
+ let param1 = self[1].try_fold_with(folder)?;
+ if param0 == self[0] && param1 == self[1] {
+ Ok(self)
+ } else {
+ Ok(folder.tcx().intern_type_list(&[param0, param1]))
+ }
+ }
+ _ => ty::util::fold_list(self, folder, |tcx, v| tcx.intern_type_list(v)),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+// Just call `foo.subst(tcx, substs)` to perform a substitution across `foo`.
+#[rustc_on_unimplemented(message = "Calling `subst` must now be done through an `EarlyBinder`")]
+pub trait Subst<'tcx>: Sized {
+ type Inner;
+
+ fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner;
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> Subst<'tcx> for ty::EarlyBinder<T> {
+ type Inner = T;
+
+ fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner {
+ let mut folder = SubstFolder { tcx, substs, binders_passed: 0 };
+ self.0.fold_with(&mut folder)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// The actual substitution engine itself is a type folder.
+
+struct SubstFolder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ substs: &'a [GenericArg<'tcx>],
+
+ /// Number of region binders we have passed through while doing the substitution
+ binders_passed: u32,
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
+ #[inline]
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.binders_passed += 1;
+ let t = t.super_fold_with(self);
+ self.binders_passed -= 1;
+ t
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ #[cold]
+ #[inline(never)]
+ fn region_param_out_of_range(data: ty::EarlyBoundRegion) -> ! {
+ bug!(
+ "Region parameter out of range when substituting in region {} (index={})",
+ data.name,
+ data.index
+ )
+ }
+
+ // Note: This routine only handles regions that are bound on
+ // type declarations and other outer declarations, not those
+ // bound in *fn types*. Region substitution of the bound
+ // regions that appear in a function signature is done using
+ // the specialized routine `ty::replace_late_regions()`.
+ match *r {
+ ty::ReEarlyBound(data) => {
+ let rk = self.substs.get(data.index as usize).map(|k| k.unpack());
+ match rk {
+ Some(GenericArgKind::Lifetime(lt)) => self.shift_region_through_binders(lt),
+ _ => region_param_out_of_range(data),
+ }
+ }
+ _ => r,
+ }
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ if !t.needs_subst() {
+ return t;
+ }
+
+ match *t.kind() {
+ ty::Param(p) => self.ty_for_param(p, t),
+ _ => t.super_fold_with(self),
+ }
+ }
+
+ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if let ty::ConstKind::Param(p) = c.kind() {
+ self.const_for_param(p, c)
+ } else {
+ c.super_fold_with(self)
+ }
+ }
+
+ #[inline]
+ fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ c.super_fold_with(self)
+ }
+}
+
+impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
+ fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
+ // Look up the type in the substitutions. It really should be in there.
+ let opt_ty = self.substs.get(p.index as usize).map(|k| k.unpack());
+ let ty = match opt_ty {
+ Some(GenericArgKind::Type(ty)) => ty,
+ Some(kind) => self.type_param_expected(p, source_ty, kind),
+ None => self.type_param_out_of_range(p, source_ty),
+ };
+
+ self.shift_vars_through_binders(ty)
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn type_param_expected(&self, p: ty::ParamTy, ty: Ty<'tcx>, kind: GenericArgKind<'tcx>) -> ! {
+ bug!(
+ "expected type for `{:?}` ({:?}/{}) but found {:?} when substituting, substs={:?}",
+ p,
+ ty,
+ p.index,
+ kind,
+ self.substs,
+ )
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn type_param_out_of_range(&self, p: ty::ParamTy, ty: Ty<'tcx>) -> ! {
+ bug!(
+ "type parameter `{:?}` ({:?}/{}) out of range when substituting, substs={:?}",
+ p,
+ ty,
+ p.index,
+ self.substs,
+ )
+ }
+
+ fn const_for_param(&self, p: ParamConst, source_ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ // Look up the const in the substitutions. It really should be in there.
+ let opt_ct = self.substs.get(p.index as usize).map(|k| k.unpack());
+ let ct = match opt_ct {
+ Some(GenericArgKind::Const(ct)) => ct,
+ Some(kind) => self.const_param_expected(p, source_ct, kind),
+ None => self.const_param_out_of_range(p, source_ct),
+ };
+
+ self.shift_vars_through_binders(ct)
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn const_param_expected(
+ &self,
+ p: ty::ParamConst,
+ ct: ty::Const<'tcx>,
+ kind: GenericArgKind<'tcx>,
+ ) -> ! {
+ bug!(
+ "expected const for `{:?}` ({:?}/{}) but found {:?} when substituting substs={:?}",
+ p,
+ ct,
+ p.index,
+ kind,
+ self.substs,
+ )
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn const_param_out_of_range(&self, p: ty::ParamConst, ct: ty::Const<'tcx>) -> ! {
+ bug!(
+ "const parameter `{:?}` ({:?}/{}) out of range when substituting substs={:?}",
+ p,
+ ct,
+ p.index,
+ self.substs,
+ )
+ }
+
+ /// It is sometimes necessary to adjust the De Bruijn indices during substitution. This occurs
+ /// when we are substituting a type with escaping bound vars into a context where we have
+ /// passed through binders. That's quite a mouthful. Let's see an example:
+ ///
+ /// ```
+ /// type Func<A> = fn(A);
+ /// type MetaFunc = for<'a> fn(Func<&'a i32>);
+ /// ```
+ ///
+ /// The type `MetaFunc`, when fully expanded, will be
+ /// ```ignore (illustrative)
+ /// for<'a> fn(fn(&'a i32))
+ /// // ^~ ^~ ^~~
+ /// // | | |
+ /// // | | DebruijnIndex of 2
+ /// // Binders
+ /// ```
+ /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the
+ /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip
+ /// over the inner binder (remember that we count De Bruijn indices from 1). However, in the
+ /// definition of `MetaFunc`, the binder is not visible, so the type `&'a i32` will have a
+ /// De Bruijn index of 1. It's only during the substitution that we can see we must increase the
+ /// depth by 1 to account for the binder that we passed through.
+ ///
+ /// As a second example, consider this twist:
+ ///
+ /// ```
+ /// type FuncTuple<A> = (A,fn(A));
+ /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a i32>);
+ /// ```
+ ///
+ /// Here the final type will be:
+ /// ```ignore (illustrative)
+ /// for<'a> fn((&'a i32, fn(&'a i32)))
+ /// // ^~~ ^~~
+ /// // | |
+ /// // DebruijnIndex of 1 |
+ /// // DebruijnIndex of 2
+ /// ```
+ /// As indicated in the diagram, here the same type `&'a i32` is substituted once, but in the
+ /// first case we do not increase the De Bruijn index and in the second case we do. The reason
+ /// is that only in the second case have we passed through a fn binder.
+ fn shift_vars_through_binders<T: TypeFoldable<'tcx>>(&self, val: T) -> T {
+ debug!(
+ "shift_vars(val={:?}, binders_passed={:?}, has_escaping_bound_vars={:?})",
+ val,
+ self.binders_passed,
+ val.has_escaping_bound_vars()
+ );
+
+ if self.binders_passed == 0 || !val.has_escaping_bound_vars() {
+ return val;
+ }
+
+ let result = ty::fold::shift_vars(TypeFolder::tcx(self), val, self.binders_passed);
+ debug!("shift_vars: shifted result = {:?}", result);
+
+ result
+ }
+
+ fn shift_region_through_binders(&self, region: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ if self.binders_passed == 0 || !region.has_escaping_bound_vars() {
+ return region;
+ }
+ ty::fold::shift_region(self.tcx, region, self.binders_passed)
+ }
+}
+
+/// Stores the user-given substs to reach some fully qualified path
+/// (e.g., `<T>::Item` or `<T as Trait>::Item`).
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct UserSubsts<'tcx> {
+ /// The substitutions for the item as given by the user.
+ pub substs: SubstsRef<'tcx>,
+
+ /// The self type, in the case of a `<T>::Item` path (when applied
+ /// to an inherent impl). See `UserSelfTy` below.
+ pub user_self_ty: Option<UserSelfTy<'tcx>>,
+}
+
+/// Specifies the user-given self type. In the case of a path that
+/// refers to a member in an inherent impl, this self type is
+/// sometimes needed to constrain the type parameters on the impl. For
+/// example, in this code:
+///
+/// ```ignore (illustrative)
+/// struct Foo<T> { }
+/// impl<A> Foo<A> { fn method() { } }
+/// ```
+///
+/// when you then have a path like `<Foo<&'static u32>>::method`,
+/// this struct would carry the `DefId` of the impl along with the
+/// self type `Foo<u32>`. Then we can instantiate the parameters of
+/// the impl (with the substs from `UserSubsts`) and apply those to
+/// the self type, giving `Foo<?A>`. Finally, we unify that with
+/// the self type here, which contains `?A` to be `&'static u32`
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct UserSelfTy<'tcx> {
+ pub impl_def_id: DefId,
+ pub self_ty: Ty<'tcx>,
+}
diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs
new file mode 100644
index 000000000..541dace5c
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/trait_def.rs
@@ -0,0 +1,272 @@
+use crate::traits::specialization_graph;
+use crate::ty::fast_reject::{self, SimplifiedType, TreatParams};
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{Ident, Ty, TyCtxt};
+use hir::def_id::LOCAL_CRATE;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use std::iter;
+
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_macros::HashStable;
+
+/// A trait's definition with type information.
+#[derive(HashStable, Encodable, Decodable)]
+pub struct TraitDef {
+ pub def_id: DefId,
+
+ pub unsafety: hir::Unsafety,
+
+ /// If `true`, then this trait had the `#[rustc_paren_sugar]`
+ /// attribute, indicating that it should be used with `Foo()`
+ /// sugar. This is a temporary thing -- eventually any trait will
+ /// be usable with the sugar (or without it).
+ pub paren_sugar: bool,
+
+ pub has_auto_impl: bool,
+
+ /// If `true`, then this trait has the `#[marker]` attribute, indicating
+ /// that all its associated items have defaults that cannot be overridden,
+ /// and thus `impl`s of it are allowed to overlap.
+ pub is_marker: bool,
+
+ /// If `true`, then this trait has the `#[rustc_skip_array_during_method_dispatch]`
+ /// attribute, indicating that editions before 2021 should not consider this trait
+ /// during method dispatch if the receiver is an array.
+ pub skip_array_during_method_dispatch: bool,
+
+ /// Used to determine whether the standard library is allowed to specialize
+ /// on this trait.
+ pub specialization_kind: TraitSpecializationKind,
+
+ /// List of functions from `#[rustc_must_implement_one_of]` attribute one of which
+ /// must be implemented.
+ pub must_implement_one_of: Option<Box<[Ident]>>,
+}
+
+/// Whether this trait is treated specially by the standard library
+/// specialization lint.
+#[derive(HashStable, PartialEq, Clone, Copy, Encodable, Decodable)]
+pub enum TraitSpecializationKind {
+ /// The default. Specializing on this trait is not allowed.
+ None,
+ /// Specializing on this trait is allowed because it doesn't have any
+ /// methods. For example `Sized` or `FusedIterator`.
+ /// Applies to traits with the `rustc_unsafe_specialization_marker`
+ /// attribute.
+ Marker,
+ /// Specializing on this trait is allowed because all of the impls of this
+ /// trait are "always applicable". Always applicable means that if
+ /// `X<'x>: T<'y>` for any lifetimes, then `for<'a, 'b> X<'a>: T<'b>`.
+ /// Applies to traits with the `rustc_specialization_trait` attribute.
+ AlwaysApplicable,
+}
+
+#[derive(Default, Debug, HashStable)]
+pub struct TraitImpls {
+ blanket_impls: Vec<DefId>,
+ /// Impls indexed by their simplified self type, for fast lookup.
+ non_blanket_impls: FxIndexMap<SimplifiedType, Vec<DefId>>,
+}
+
+impl TraitImpls {
+ pub fn blanket_impls(&self) -> &[DefId] {
+ self.blanket_impls.as_slice()
+ }
+
+ pub fn non_blanket_impls(&self) -> &FxIndexMap<SimplifiedType, Vec<DefId>> {
+ &self.non_blanket_impls
+ }
+}
+
+impl<'tcx> TraitDef {
+ pub fn new(
+ def_id: DefId,
+ unsafety: hir::Unsafety,
+ paren_sugar: bool,
+ has_auto_impl: bool,
+ is_marker: bool,
+ skip_array_during_method_dispatch: bool,
+ specialization_kind: TraitSpecializationKind,
+ must_implement_one_of: Option<Box<[Ident]>>,
+ ) -> TraitDef {
+ TraitDef {
+ def_id,
+ unsafety,
+ paren_sugar,
+ has_auto_impl,
+ is_marker,
+ skip_array_during_method_dispatch,
+ specialization_kind,
+ must_implement_one_of,
+ }
+ }
+
+ pub fn ancestors(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ of_impl: DefId,
+ ) -> Result<specialization_graph::Ancestors<'tcx>, ErrorGuaranteed> {
+ specialization_graph::ancestors(tcx, self.def_id, of_impl)
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn for_each_impl<F: FnMut(DefId)>(self, def_id: DefId, mut f: F) {
+ let impls = self.trait_impls_of(def_id);
+
+ for &impl_def_id in impls.blanket_impls.iter() {
+ f(impl_def_id);
+ }
+
+ for v in impls.non_blanket_impls.values() {
+ for &impl_def_id in v {
+ f(impl_def_id);
+ }
+ }
+ }
+
+ /// Iterate over every impl that could possibly match the
+ /// self type `self_ty`.
+ pub fn for_each_relevant_impl<F: FnMut(DefId)>(
+ self,
+ def_id: DefId,
+ self_ty: Ty<'tcx>,
+ mut f: F,
+ ) {
+ let _: Option<()> = self.find_map_relevant_impl(def_id, self_ty, |did| {
+ f(did);
+ None
+ });
+ }
+
+ pub fn non_blanket_impls_for_ty(
+ self,
+ def_id: DefId,
+ self_ty: Ty<'tcx>,
+ ) -> impl Iterator<Item = DefId> + 'tcx {
+ let impls = self.trait_impls_of(def_id);
+ if let Some(simp) = fast_reject::simplify_type(self, self_ty, TreatParams::AsInfer) {
+ if let Some(impls) = impls.non_blanket_impls.get(&simp) {
+ return impls.iter().copied();
+ }
+ }
+
+ [].iter().copied()
+ }
+
+ /// Applies function to every impl that could possibly match the self type `self_ty` and returns
+ /// the first non-none value.
+ pub fn find_map_relevant_impl<T, F: FnMut(DefId) -> Option<T>>(
+ self,
+ def_id: DefId,
+ self_ty: Ty<'tcx>,
+ mut f: F,
+ ) -> Option<T> {
+ // FIXME: This depends on the set of all impls for the trait. That is
+ // unfortunate wrt. incremental compilation.
+ //
+ // If we want to be faster, we could have separate queries for
+ // blanket and non-blanket impls, and compare them separately.
+ let impls = self.trait_impls_of(def_id);
+
+ for &impl_def_id in impls.blanket_impls.iter() {
+ if let result @ Some(_) = f(impl_def_id) {
+ return result;
+ }
+ }
+
+ // Note that we're using `TreatParams::AsPlaceholder` to query `non_blanket_impls` while using
+ // `TreatParams::AsInfer` while actually adding them.
+ //
+ // This way, when searching for some impl for `T: Trait`, we do not look at any impls
+ // whose outer level is not a parameter or projection. Especially for things like
+ // `T: Clone` this is incredibly useful as we would otherwise look at all the impls
+ // of `Clone` for `Option<T>`, `Vec<T>`, `ConcreteType` and so on.
+ if let Some(simp) = fast_reject::simplify_type(self, self_ty, TreatParams::AsPlaceholder) {
+ if let Some(impls) = impls.non_blanket_impls.get(&simp) {
+ for &impl_def_id in impls {
+ if let result @ Some(_) = f(impl_def_id) {
+ return result;
+ }
+ }
+ }
+ } else {
+ for &impl_def_id in impls.non_blanket_impls.values().flatten() {
+ if let result @ Some(_) = f(impl_def_id) {
+ return result;
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Returns an iterator containing all impls
+ pub fn all_impls(self, def_id: DefId) -> impl Iterator<Item = DefId> + 'tcx {
+ let TraitImpls { blanket_impls, non_blanket_impls } = self.trait_impls_of(def_id);
+
+ blanket_impls.iter().chain(non_blanket_impls.iter().flat_map(|(_, v)| v)).cloned()
+ }
+}
+
+// Query provider for `trait_impls_of`.
+pub(super) fn trait_impls_of_provider(tcx: TyCtxt<'_>, trait_id: DefId) -> TraitImpls {
+ let mut impls = TraitImpls::default();
+
+ // Traits defined in the current crate can't have impls in upstream
+ // crates, so we don't bother querying the cstore.
+ if !trait_id.is_local() {
+ for &cnum in tcx.crates(()).iter() {
+ for &(impl_def_id, simplified_self_ty) in
+ tcx.implementations_of_trait((cnum, trait_id)).iter()
+ {
+ if let Some(simplified_self_ty) = simplified_self_ty {
+ impls
+ .non_blanket_impls
+ .entry(simplified_self_ty)
+ .or_default()
+ .push(impl_def_id);
+ } else {
+ impls.blanket_impls.push(impl_def_id);
+ }
+ }
+ }
+ }
+
+ for &impl_def_id in tcx.hir().trait_impls(trait_id) {
+ let impl_def_id = impl_def_id.to_def_id();
+
+ let impl_self_ty = tcx.type_of(impl_def_id);
+ if impl_self_ty.references_error() {
+ continue;
+ }
+
+ if let Some(simplified_self_ty) =
+ fast_reject::simplify_type(tcx, impl_self_ty, TreatParams::AsInfer)
+ {
+ impls.non_blanket_impls.entry(simplified_self_ty).or_default().push(impl_def_id);
+ } else {
+ impls.blanket_impls.push(impl_def_id);
+ }
+ }
+
+ impls
+}
+
+// Query provider for `incoherent_impls`.
+#[instrument(level = "debug", skip(tcx))]
+pub(super) fn incoherent_impls_provider(tcx: TyCtxt<'_>, simp: SimplifiedType) -> &[DefId] {
+ let mut impls = Vec::new();
+
+ for cnum in iter::once(LOCAL_CRATE).chain(tcx.crates(()).iter().copied()) {
+ for &impl_def_id in tcx.crate_incoherent_impls((cnum, simp)) {
+ impls.push(impl_def_id)
+ }
+ }
+
+ debug!(?impls);
+
+ tcx.arena.alloc_slice(&impls)
+}
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
new file mode 100644
index 000000000..591bb7831
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -0,0 +1,1294 @@
+//! Miscellaneous type-system utilities that are too small to deserve their own modules.
+
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::ty::layout::IntegerExt;
+use crate::ty::query::TyCtxtAt;
+use crate::ty::subst::{GenericArgKind, Subst, SubstsRef};
+use crate::ty::{
+ self, DefIdTree, FallibleTypeFolder, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
+ TypeVisitable,
+};
+use rustc_apfloat::Float as _;
+use rustc_ast as ast;
+use rustc_attr::{self as attr, SignedInt, UnsignedInt};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::GrowableBitSet;
+use rustc_macros::HashStable;
+use rustc_span::{sym, DUMMY_SP};
+use rustc_target::abi::{Integer, Size, TargetDataLayout};
+use rustc_target::spec::abi::Abi;
+use smallvec::SmallVec;
+use std::{fmt, iter};
+
+#[derive(Copy, Clone, Debug)]
+pub struct Discr<'tcx> {
+ /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
+ pub val: u128,
+ pub ty: Ty<'tcx>,
+}
+
+/// Used as an input to [`TyCtxt::uses_unique_generic_params`].
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum IgnoreRegions {
+ Yes,
+ No,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum NotUniqueParam<'tcx> {
+ DuplicateParam(ty::GenericArg<'tcx>),
+ NotParam(ty::GenericArg<'tcx>),
+}
+
+impl<'tcx> fmt::Display for Discr<'tcx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self.ty.kind() {
+ ty::Int(ity) => {
+ let size = ty::tls::with(|tcx| Integer::from_int_ty(&tcx, ity).size());
+ let x = self.val;
+ // sign extend the raw representation to be an i128
+ let x = size.sign_extend(x) as i128;
+ write!(fmt, "{}", x)
+ }
+ _ => write!(fmt, "{}", self.val),
+ }
+ }
+}
+
+fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
+ let (int, signed) = match *ty.kind() {
+ ty::Int(ity) => (Integer::from_int_ty(&tcx, ity), true),
+ ty::Uint(uty) => (Integer::from_uint_ty(&tcx, uty), false),
+ _ => bug!("non integer discriminant"),
+ };
+ (int.size(), signed)
+}
+
+impl<'tcx> Discr<'tcx> {
+ /// Adds `1` to the value and wraps around if the maximum for the type is reached.
+ pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
+ self.checked_add(tcx, 1).0
+ }
+ pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
+ let (size, signed) = int_size_and_signed(tcx, self.ty);
+ let (val, oflo) = if signed {
+ let min = size.signed_int_min();
+ let max = size.signed_int_max();
+ let val = size.sign_extend(self.val) as i128;
+ assert!(n < (i128::MAX as u128));
+ let n = n as i128;
+ let oflo = val > max - n;
+ let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
+ // zero the upper bits
+ let val = val as u128;
+ let val = size.truncate(val);
+ (val, oflo)
+ } else {
+ let max = size.unsigned_int_max();
+ let val = self.val;
+ let oflo = val > max - n;
+ let val = if oflo { n - (max - val) - 1 } else { val + n };
+ (val, oflo)
+ };
+ (Self { val, ty: self.ty }, oflo)
+ }
+}
+
+pub trait IntTypeExt {
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+ fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
+ fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
+}
+
+impl IntTypeExt for attr::IntType {
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ SignedInt(ast::IntTy::I8) => tcx.types.i8,
+ SignedInt(ast::IntTy::I16) => tcx.types.i16,
+ SignedInt(ast::IntTy::I32) => tcx.types.i32,
+ SignedInt(ast::IntTy::I64) => tcx.types.i64,
+ SignedInt(ast::IntTy::I128) => tcx.types.i128,
+ SignedInt(ast::IntTy::Isize) => tcx.types.isize,
+ UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
+ UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
+ UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
+ UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
+ UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
+ UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
+ }
+ }
+
+ fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
+ Discr { val: 0, ty: self.to_ty(tcx) }
+ }
+
+ fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
+ if let Some(val) = val {
+ assert_eq!(self.to_ty(tcx), val.ty);
+ let (new, oflo) = val.checked_add(tcx, 1);
+ if oflo { None } else { Some(new) }
+ } else {
+ Some(self.initial_discriminant(tcx))
+ }
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Creates a hash of the type `Ty` which will be the same no matter what crate
+ /// context it's calculated within. This is used by the `type_id` intrinsic.
+ pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
+ // We want the type_id be independent of the types free regions, so we
+ // erase them. The erase_regions() call will also anonymize bound
+ // regions, which is desirable too.
+ let ty = self.erase_regions(ty);
+
+ self.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+ hcx.while_hashing_spans(false, |hcx| ty.hash_stable(hcx, &mut hasher));
+ hasher.finish()
+ })
+ }
+
+ pub fn res_generics_def_id(self, res: Res) -> Option<DefId> {
+ match res {
+ Res::Def(DefKind::Ctor(CtorOf::Variant, _), def_id) => {
+ Some(self.parent(self.parent(def_id)))
+ }
+ Res::Def(DefKind::Variant | DefKind::Ctor(CtorOf::Struct, _), def_id) => {
+ Some(self.parent(def_id))
+ }
+ // Other `DefKind`s don't have generics and would ICE when calling
+ // `generics_of`.
+ Res::Def(
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Trait
+ | DefKind::OpaqueTy
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::Fn
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::Impl,
+ def_id,
+ ) => Some(def_id),
+ Res::Err => None,
+ _ => None,
+ }
+ }
+
+ pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
+ if let ty::Adt(def, substs) = *ty.kind() {
+ for field in def.all_fields() {
+ let field_ty = field.ty(self, substs);
+ if let ty::Error(_) = field_ty.kind() {
+ return true;
+ }
+ }
+ }
+ false
+ }
+
+ /// Attempts to returns the deeply last field of nested structures, but
+ /// does not apply any normalization in its search. Returns the same type
+ /// if input `ty` is not a structure at all.
+ pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let tcx = self;
+ tcx.struct_tail_with_normalize(ty, |ty| ty, || {})
+ }
+
+ /// Returns the deeply last field of nested structures, or the same type if
+ /// not a structure at all. Corresponds to the only possible unsized field,
+ /// and its type can be used to determine unsizing strategy.
+ ///
+ /// Should only be called if `ty` has no inference variables and does not
+ /// need its lifetimes preserved (e.g. as part of codegen); otherwise
+ /// normalization attempt may cause compiler bugs.
+ pub fn struct_tail_erasing_lifetimes(
+ self,
+ ty: Ty<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self;
+ tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty), || {})
+ }
+
+ /// Returns the deeply last field of nested structures, or the same type if
+ /// not a structure at all. Corresponds to the only possible unsized field,
+ /// and its type can be used to determine unsizing strategy.
+ ///
+ /// This is parameterized over the normalization strategy (i.e. how to
+ /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
+ /// function to indicate no normalization should take place.
+ ///
+ /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
+ /// during codegen.
+ pub fn struct_tail_with_normalize(
+ self,
+ mut ty: Ty<'tcx>,
+ mut normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>,
+ // This is currently used to allow us to walk a ValTree
+ // in lockstep with the type in order to get the ValTree branch that
+ // corresponds to an unsized field.
+ mut f: impl FnMut() -> (),
+ ) -> Ty<'tcx> {
+ let recursion_limit = self.recursion_limit();
+ for iteration in 0.. {
+ if !recursion_limit.value_within_limit(iteration) {
+ return self.ty_error_with_message(
+ DUMMY_SP,
+ &format!("reached the recursion limit finding the struct tail for {}", ty),
+ );
+ }
+ match *ty.kind() {
+ ty::Adt(def, substs) => {
+ if !def.is_struct() {
+ break;
+ }
+ match def.non_enum_variant().fields.last() {
+ Some(field) => {
+ f();
+ ty = field.ty(self, substs);
+ }
+ None => break,
+ }
+ }
+
+ ty::Tuple(tys) if let Some((&last_ty, _)) = tys.split_last() => {
+ f();
+ ty = last_ty;
+ }
+
+ ty::Tuple(_) => break,
+
+ ty::Projection(_) | ty::Opaque(..) => {
+ let normalized = normalize(ty);
+ if ty == normalized {
+ return ty;
+ } else {
+ ty = normalized;
+ }
+ }
+
+ _ => {
+ break;
+ }
+ }
+ }
+ ty
+ }
+
+ /// Same as applying `struct_tail` on `source` and `target`, but only
+ /// keeps going as long as the two types are instances of the same
+ /// structure definitions.
+ /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
+ /// whereas struct_tail produces `T`, and `Trait`, respectively.
+ ///
+ /// Should only be called if the types have no inference variables and do
+ /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
+ /// normalization attempt may cause compiler bugs.
+ pub fn struct_lockstep_tails_erasing_lifetimes(
+ self,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> (Ty<'tcx>, Ty<'tcx>) {
+ let tcx = self;
+ tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
+ tcx.normalize_erasing_regions(param_env, ty)
+ })
+ }
+
+ /// Same as applying `struct_tail` on `source` and `target`, but only
+ /// keeps going as long as the two types are instances of the same
+ /// structure definitions.
+ /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
+ /// whereas struct_tail produces `T`, and `Trait`, respectively.
+ ///
+ /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
+ /// during codegen.
+ pub fn struct_lockstep_tails_with_normalize(
+ self,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
+ ) -> (Ty<'tcx>, Ty<'tcx>) {
+ let (mut a, mut b) = (source, target);
+ loop {
+ match (&a.kind(), &b.kind()) {
+ (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs))
+ if a_def == b_def && a_def.is_struct() =>
+ {
+ if let Some(f) = a_def.non_enum_variant().fields.last() {
+ a = f.ty(self, a_substs);
+ b = f.ty(self, b_substs);
+ } else {
+ break;
+ }
+ }
+ (&ty::Tuple(a_tys), &ty::Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
+ if let Some(&a_last) = a_tys.last() {
+ a = a_last;
+ b = *b_tys.last().unwrap();
+ } else {
+ break;
+ }
+ }
+ (ty::Projection(_) | ty::Opaque(..), _)
+ | (_, ty::Projection(_) | ty::Opaque(..)) => {
+ // If either side is a projection, attempt to
+ // progress via normalization. (Should be safe to
+ // apply to both sides as normalization is
+ // idempotent.)
+ let a_norm = normalize(a);
+ let b_norm = normalize(b);
+ if a == a_norm && b == b_norm {
+ break;
+ } else {
+ a = a_norm;
+ b = b_norm;
+ }
+ }
+
+ _ => break,
+ }
+ }
+ (a, b)
+ }
+
+ /// Calculate the destructor of a given type.
+ pub fn calculate_dtor(
+ self,
+ adt_did: DefId,
+ validate: impl Fn(Self, DefId) -> Result<(), ErrorGuaranteed>,
+ ) -> Option<ty::Destructor> {
+ let drop_trait = self.lang_items().drop_trait()?;
+ self.ensure().coherent_trait(drop_trait);
+
+ let ty = self.type_of(adt_did);
+ let (did, constness) = self.find_map_relevant_impl(drop_trait, ty, |impl_did| {
+ if let Some(item_id) = self.associated_item_def_ids(impl_did).first() {
+ if validate(self, impl_did).is_ok() {
+ return Some((*item_id, self.constness(impl_did)));
+ }
+ }
+ None
+ })?;
+
+ Some(ty::Destructor { did, constness })
+ }
+
+ /// Returns the set of types that are required to be alive in
+ /// order to run the destructor of `def` (see RFCs 769 and
+ /// 1238).
+ ///
+ /// Note that this returns only the constraints for the
+ /// destructor of `def` itself. For the destructors of the
+ /// contents, you need `adt_dtorck_constraint`.
+ pub fn destructor_constraints(self, def: ty::AdtDef<'tcx>) -> Vec<ty::subst::GenericArg<'tcx>> {
+ let dtor = match def.destructor(self) {
+ None => {
+ debug!("destructor_constraints({:?}) - no dtor", def.did());
+ return vec![];
+ }
+ Some(dtor) => dtor.did,
+ };
+
+ let impl_def_id = self.parent(dtor);
+ let impl_generics = self.generics_of(impl_def_id);
+
+ // We have a destructor - all the parameters that are not
+ // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
+ // must be live.
+
+ // We need to return the list of parameters from the ADTs
+ // generics/substs that correspond to impure parameters on the
+ // impl's generics. This is a bit ugly, but conceptually simple:
+ //
+ // Suppose our ADT looks like the following
+ //
+ // struct S<X, Y, Z>(X, Y, Z);
+ //
+ // and the impl is
+ //
+ // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
+ //
+ // We want to return the parameters (X, Y). For that, we match
+ // up the item-substs <X, Y, Z> with the substs on the impl ADT,
+ // <P1, P2, P0>, and then look up which of the impl substs refer to
+ // parameters marked as pure.
+
+ let impl_substs = match *self.type_of(impl_def_id).kind() {
+ ty::Adt(def_, substs) if def_ == def => substs,
+ _ => bug!(),
+ };
+
+ let item_substs = match *self.type_of(def.did()).kind() {
+ ty::Adt(def_, substs) if def_ == def => substs,
+ _ => bug!(),
+ };
+
+ let result = iter::zip(item_substs, impl_substs)
+ .filter(|&(_, k)| {
+ match k.unpack() {
+ GenericArgKind::Lifetime(region) => match region.kind() {
+ ty::ReEarlyBound(ref ebr) => {
+ !impl_generics.region_param(ebr, self).pure_wrt_drop
+ }
+ // Error: not a region param
+ _ => false,
+ },
+ GenericArgKind::Type(ty) => match ty.kind() {
+ ty::Param(ref pt) => !impl_generics.type_param(pt, self).pure_wrt_drop,
+ // Error: not a type param
+ _ => false,
+ },
+ GenericArgKind::Const(ct) => match ct.kind() {
+ ty::ConstKind::Param(ref pc) => {
+ !impl_generics.const_param(pc, self).pure_wrt_drop
+ }
+ // Error: not a const param
+ _ => false,
+ },
+ }
+ })
+ .map(|(item_param, _)| item_param)
+ .collect();
+ debug!("destructor_constraint({:?}) = {:?}", def.did(), result);
+ result
+ }
+
+ /// Checks whether each generic argument is simply a unique generic parameter.
+ pub fn uses_unique_generic_params(
+ self,
+ substs: SubstsRef<'tcx>,
+ ignore_regions: IgnoreRegions,
+ ) -> Result<(), NotUniqueParam<'tcx>> {
+ let mut seen = GrowableBitSet::default();
+ for arg in substs {
+ match arg.unpack() {
+ GenericArgKind::Lifetime(lt) => {
+ if ignore_regions == IgnoreRegions::No {
+ let ty::ReEarlyBound(p) = lt.kind() else {
+ return Err(NotUniqueParam::NotParam(lt.into()))
+ };
+ if !seen.insert(p.index) {
+ return Err(NotUniqueParam::DuplicateParam(lt.into()));
+ }
+ }
+ }
+ GenericArgKind::Type(t) => match t.kind() {
+ ty::Param(p) => {
+ if !seen.insert(p.index) {
+ return Err(NotUniqueParam::DuplicateParam(t.into()));
+ }
+ }
+ _ => return Err(NotUniqueParam::NotParam(t.into())),
+ },
+ GenericArgKind::Const(c) => match c.kind() {
+ ty::ConstKind::Param(p) => {
+ if !seen.insert(p.index) {
+ return Err(NotUniqueParam::DuplicateParam(c.into()));
+ }
+ }
+ _ => return Err(NotUniqueParam::NotParam(c.into())),
+ },
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
+ /// that closures have a `DefId`, but the closure *expression* also
+ /// has a `HirId` that is located within the context where the
+ /// closure appears (and, sadly, a corresponding `NodeId`, since
+ /// those are not yet phased out). The parent of the closure's
+ /// `DefId` will also be the context where it appears.
+ pub fn is_closure(self, def_id: DefId) -> bool {
+ matches!(self.def_kind(def_id), DefKind::Closure | DefKind::Generator)
+ }
+
+ /// Returns `true` if `def_id` refers to a definition that does not have its own
+ /// type-checking context, i.e. closure, generator or inline const.
+ pub fn is_typeck_child(self, def_id: DefId) -> bool {
+ matches!(
+ self.def_kind(def_id),
+ DefKind::Closure | DefKind::Generator | DefKind::InlineConst
+ )
+ }
+
+ /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
+ pub fn is_trait(self, def_id: DefId) -> bool {
+ self.def_kind(def_id) == DefKind::Trait
+ }
+
+ /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
+ /// and `false` otherwise.
+ pub fn is_trait_alias(self, def_id: DefId) -> bool {
+ self.def_kind(def_id) == DefKind::TraitAlias
+ }
+
+ /// Returns `true` if this `DefId` refers to the implicit constructor for
+ /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
+ pub fn is_constructor(self, def_id: DefId) -> bool {
+ matches!(self.def_kind(def_id), DefKind::Ctor(..))
+ }
+
+ /// Given the `DefId`, returns the `DefId` of the innermost item that
+ /// has its own type-checking context or "inference environment".
+ ///
+ /// For example, a closure has its own `DefId`, but it is type-checked
+ /// with the containing item. Similarly, an inline const block has its
+ /// own `DefId` but it is type-checked together with the containing item.
+ ///
+ /// Therefore, when we fetch the
+ /// `typeck` the closure, for example, we really wind up
+ /// fetching the `typeck` the enclosing fn item.
+ pub fn typeck_root_def_id(self, def_id: DefId) -> DefId {
+ let mut def_id = def_id;
+ while self.is_typeck_child(def_id) {
+ def_id = self.parent(def_id);
+ }
+ def_id
+ }
+
+ /// Given the `DefId` and substs a closure, creates the type of
+ /// `self` argument that the closure expects. For example, for a
+ /// `Fn` closure, this would return a reference type `&T` where
+ /// `T = closure_ty`.
+ ///
+ /// Returns `None` if this closure's kind has not yet been inferred.
+ /// This should only be possible during type checking.
+ ///
+ /// Note that the return value is a late-bound region and hence
+ /// wrapped in a binder.
+ pub fn closure_env_ty(
+ self,
+ closure_def_id: DefId,
+ closure_substs: SubstsRef<'tcx>,
+ env_region: ty::RegionKind<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ let closure_ty = self.mk_closure(closure_def_id, closure_substs);
+ let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
+ let env_ty = match closure_kind {
+ ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
+ ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
+ ty::ClosureKind::FnOnce => closure_ty,
+ };
+ Some(env_ty)
+ }
+
+ /// Returns `true` if the node pointed to by `def_id` is a `static` item.
+ #[inline]
+ pub fn is_static(self, def_id: DefId) -> bool {
+ matches!(self.def_kind(def_id), DefKind::Static(_))
+ }
+
+ #[inline]
+ pub fn static_mutability(self, def_id: DefId) -> Option<hir::Mutability> {
+ if let DefKind::Static(mt) = self.def_kind(def_id) { Some(mt) } else { None }
+ }
+
+ /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
+ pub fn is_thread_local_static(self, def_id: DefId) -> bool {
+ self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ }
+
+ /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
+ #[inline]
+ pub fn is_mutable_static(self, def_id: DefId) -> bool {
+ self.static_mutability(def_id) == Some(hir::Mutability::Mut)
+ }
+
+ /// Get the type of the pointer to the static that we use in MIR.
+ pub fn static_ptr_ty(self, def_id: DefId) -> Ty<'tcx> {
+ // Make sure that any constants in the static's type are evaluated.
+ let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
+
+ // Make sure that accesses to unsafe statics end up using raw pointers.
+ // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
+ if self.is_mutable_static(def_id) {
+ self.mk_mut_ptr(static_ty)
+ } else if self.is_foreign_item(def_id) {
+ self.mk_imm_ptr(static_ty)
+ } else {
+ self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
+ }
+ }
+
+ /// Expands the given impl trait type, stopping if the type is recursive.
+ #[instrument(skip(self), level = "debug")]
+ pub fn try_expand_impl_trait_type(
+ self,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Result<Ty<'tcx>, Ty<'tcx>> {
+ let mut visitor = OpaqueTypeExpander {
+ seen_opaque_tys: FxHashSet::default(),
+ expanded_cache: FxHashMap::default(),
+ primary_def_id: Some(def_id),
+ found_recursion: false,
+ found_any_recursion: false,
+ check_recursion: true,
+ tcx: self,
+ };
+
+ let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
+ trace!(?expanded_type);
+ if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
+ }
+
+ pub fn bound_type_of(self, def_id: DefId) -> ty::EarlyBinder<Ty<'tcx>> {
+ ty::EarlyBinder(self.type_of(def_id))
+ }
+
+ pub fn bound_fn_sig(self, def_id: DefId) -> ty::EarlyBinder<ty::PolyFnSig<'tcx>> {
+ ty::EarlyBinder(self.fn_sig(def_id))
+ }
+
+ pub fn bound_impl_trait_ref(
+ self,
+ def_id: DefId,
+ ) -> Option<ty::EarlyBinder<ty::TraitRef<'tcx>>> {
+ self.impl_trait_ref(def_id).map(|i| ty::EarlyBinder(i))
+ }
+
+ pub fn bound_explicit_item_bounds(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<&'tcx [(ty::Predicate<'tcx>, rustc_span::Span)]> {
+ ty::EarlyBinder(self.explicit_item_bounds(def_id))
+ }
+
+ pub fn bound_item_bounds(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<&'tcx ty::List<ty::Predicate<'tcx>>> {
+ ty::EarlyBinder(self.item_bounds(def_id))
+ }
+
+ pub fn bound_const_param_default(self, def_id: DefId) -> ty::EarlyBinder<ty::Const<'tcx>> {
+ ty::EarlyBinder(self.const_param_default(def_id))
+ }
+
+ pub fn bound_predicates_of(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<ty::generics::GenericPredicates<'tcx>> {
+ ty::EarlyBinder(self.predicates_of(def_id))
+ }
+
+ pub fn bound_explicit_predicates_of(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<ty::generics::GenericPredicates<'tcx>> {
+ ty::EarlyBinder(self.explicit_predicates_of(def_id))
+ }
+
+ pub fn bound_impl_subject(self, def_id: DefId) -> ty::EarlyBinder<ty::ImplSubject<'tcx>> {
+ ty::EarlyBinder(self.impl_subject(def_id))
+ }
+}
+
+struct OpaqueTypeExpander<'tcx> {
+ // Contains the DefIds of the opaque types that are currently being
+ // expanded. When we expand an opaque type we insert the DefId of
+ // that type, and when we finish expanding that type we remove the
+ // its DefId.
+ seen_opaque_tys: FxHashSet<DefId>,
+ // Cache of all expansions we've seen so far. This is a critical
+ // optimization for some large types produced by async fn trees.
+ expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
+ primary_def_id: Option<DefId>,
+ found_recursion: bool,
+ found_any_recursion: bool,
+ /// Whether or not to check for recursive opaque types.
+ /// This is `true` when we're explicitly checking for opaque type
+ /// recursion, and 'false' otherwise to avoid unnecessary work.
+ check_recursion: bool,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> OpaqueTypeExpander<'tcx> {
+ fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
+ if self.found_any_recursion {
+ return None;
+ }
+ let substs = substs.fold_with(self);
+ if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
+ let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
+ Some(expanded_ty) => *expanded_ty,
+ None => {
+ let generic_ty = self.tcx.bound_type_of(def_id);
+ let concrete_ty = generic_ty.subst(self.tcx, substs);
+ let expanded_ty = self.fold_ty(concrete_ty);
+ self.expanded_cache.insert((def_id, substs), expanded_ty);
+ expanded_ty
+ }
+ };
+ if self.check_recursion {
+ self.seen_opaque_tys.remove(&def_id);
+ }
+ Some(expanded_ty)
+ } else {
+ // If another opaque type that we contain is recursive, then it
+ // will report the error, so we don't have to.
+ self.found_any_recursion = true;
+ self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
+ None
+ }
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::Opaque(def_id, substs) = *t.kind() {
+ self.expand_opaque_ty(def_id, substs).unwrap_or(t)
+ } else if t.has_opaque_types() {
+ t.super_fold_with(self)
+ } else {
+ t
+ }
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ /// Returns the maximum value for the given numeric type (including `char`s)
+ /// or returns `None` if the type is not numeric.
+ pub fn numeric_max_val(self, tcx: TyCtxt<'tcx>) -> Option<ty::Const<'tcx>> {
+ let val = match self.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let (size, signed) = int_size_and_signed(tcx, self);
+ let val =
+ if signed { size.signed_int_max() as u128 } else { size.unsigned_int_max() };
+ Some(val)
+ }
+ ty::Char => Some(std::char::MAX as u128),
+ ty::Float(fty) => Some(match fty {
+ ty::FloatTy::F32 => rustc_apfloat::ieee::Single::INFINITY.to_bits(),
+ ty::FloatTy::F64 => rustc_apfloat::ieee::Double::INFINITY.to_bits(),
+ }),
+ _ => None,
+ };
+
+ val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
+ }
+
+ /// Returns the minimum value for the given numeric type (including `char`s)
+ /// or returns `None` if the type is not numeric.
+ pub fn numeric_min_val(self, tcx: TyCtxt<'tcx>) -> Option<ty::Const<'tcx>> {
+ let val = match self.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let (size, signed) = int_size_and_signed(tcx, self);
+ let val = if signed { size.truncate(size.signed_int_min() as u128) } else { 0 };
+ Some(val)
+ }
+ ty::Char => Some(0),
+ ty::Float(fty) => Some(match fty {
+ ty::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
+ ty::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
+ }),
+ _ => None,
+ };
+
+ val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
+ }
+
+ /// Checks whether values of this type `T` are *moved* or *copied*
+ /// when referenced -- this amounts to a check for whether `T:
+ /// Copy`, but note that we **don't** consider lifetimes when
+ /// doing this check. This means that we may generate MIR which
+ /// does copies even when the type actually doesn't satisfy the
+ /// full requirements for the `Copy` trait (cc #29149) -- this
+ /// winds up being reported as an error during NLL borrow check.
+ pub fn is_copy_modulo_regions(
+ self,
+ tcx_at: TyCtxtAt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> bool {
+ self.is_trivially_pure_clone_copy() || tcx_at.is_copy_raw(param_env.and(self))
+ }
+
+ /// Checks whether values of this type `T` have a size known at
+ /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
+ /// for the purposes of this check, so it can be an
+ /// over-approximation in generic contexts, where one can have
+ /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
+ /// actually carry lifetime requirements.
+ pub fn is_sized(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
+ }
+
+ /// Checks whether values of this type `T` implement the `Freeze`
+ /// trait -- frozen types are those that do not contain an
+ /// `UnsafeCell` anywhere. This is a language concept used to
+ /// distinguish "true immutability", which is relevant to
+ /// optimization as well as the rules around static values. Note
+ /// that the `Freeze` trait is not exposed to end users and is
+ /// effectively an implementation detail.
+ pub fn is_freeze(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
+ }
+
+ /// Fast path helper for testing if a type is `Freeze`.
+ ///
+ /// Returning true means the type is known to be `Freeze`. Returning
+ /// `false` means nothing -- could be `Freeze`, might not be.
+ fn is_trivially_freeze(self) -> bool {
+ match self.kind() {
+ ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Never
+ | ty::Ref(..)
+ | ty::RawPtr(_)
+ | ty::FnDef(..)
+ | ty::Error(_)
+ | ty::FnPtr(_) => true,
+ ty::Tuple(fields) => fields.iter().all(Self::is_trivially_freeze),
+ ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_freeze(),
+ ty::Adt(..)
+ | ty::Bound(..)
+ | ty::Closure(..)
+ | ty::Dynamic(..)
+ | ty::Foreign(_)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(_)
+ | ty::Infer(_)
+ | ty::Opaque(..)
+ | ty::Param(_)
+ | ty::Placeholder(_)
+ | ty::Projection(_) => false,
+ }
+ }
+
+ /// Checks whether values of this type `T` implement the `Unpin` trait.
+ pub fn is_unpin(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_unpin() || tcx_at.is_unpin_raw(param_env.and(self))
+ }
+
+ /// Fast path helper for testing if a type is `Unpin`.
+ ///
+ /// Returning true means the type is known to be `Unpin`. Returning
+ /// `false` means nothing -- could be `Unpin`, might not be.
+ fn is_trivially_unpin(self) -> bool {
+ match self.kind() {
+ ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Never
+ | ty::Ref(..)
+ | ty::RawPtr(_)
+ | ty::FnDef(..)
+ | ty::Error(_)
+ | ty::FnPtr(_) => true,
+ ty::Tuple(fields) => fields.iter().all(Self::is_trivially_unpin),
+ ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_unpin(),
+ ty::Adt(..)
+ | ty::Bound(..)
+ | ty::Closure(..)
+ | ty::Dynamic(..)
+ | ty::Foreign(_)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(_)
+ | ty::Infer(_)
+ | ty::Opaque(..)
+ | ty::Param(_)
+ | ty::Placeholder(_)
+ | ty::Projection(_) => false,
+ }
+ }
+
+ /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
+ /// non-copy and *might* have a destructor attached; if it returns
+ /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
+ ///
+ /// (Note that this implies that if `ty` has a destructor attached,
+ /// then `needs_drop` will definitely return `true` for `ty`.)
+ ///
+ /// Note that this method is used to check eligible types in unions.
+ #[inline]
+ pub fn needs_drop(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ // Avoid querying in simple cases.
+ match needs_drop_components(self, &tcx.data_layout) {
+ Err(AlwaysRequiresDrop) => true,
+ Ok(components) => {
+ let query_ty = match *components {
+ [] => return false,
+ // If we've got a single component, call the query with that
+ // to increase the chance that we hit the query cache.
+ [component_ty] => component_ty,
+ _ => self,
+ };
+
+ // This doesn't depend on regions, so try to minimize distinct
+ // query keys used.
+ // If normalization fails, we just use `query_ty`.
+ let query_ty =
+ tcx.try_normalize_erasing_regions(param_env, query_ty).unwrap_or(query_ty);
+
+ tcx.needs_drop_raw(param_env.and(query_ty))
+ }
+ }
+ }
+
+ /// Checks if `ty` has has a significant drop.
+ ///
+ /// Note that this method can return false even if `ty` has a destructor
+ /// attached; even if that is the case then the adt has been marked with
+ /// the attribute `rustc_insignificant_dtor`.
+ ///
+ /// Note that this method is used to check for change in drop order for
+ /// 2229 drop reorder migration analysis.
+ #[inline]
+ pub fn has_significant_drop(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ // Avoid querying in simple cases.
+ match needs_drop_components(self, &tcx.data_layout) {
+ Err(AlwaysRequiresDrop) => true,
+ Ok(components) => {
+ let query_ty = match *components {
+ [] => return false,
+ // If we've got a single component, call the query with that
+ // to increase the chance that we hit the query cache.
+ [component_ty] => component_ty,
+ _ => self,
+ };
+
+ // FIXME(#86868): We should be canonicalizing, or else moving this to a method of inference
+ // context, or *something* like that, but for now just avoid passing inference
+ // variables to queries that can't cope with them. Instead, conservatively
+ // return "true" (may change drop order).
+ if query_ty.needs_infer() {
+ return true;
+ }
+
+ // This doesn't depend on regions, so try to minimize distinct
+ // query keys used.
+ let erased = tcx.normalize_erasing_regions(param_env, query_ty);
+ tcx.has_significant_drop_raw(param_env.and(erased))
+ }
+ }
+ }
+
+ /// Returns `true` if equality for this type is both reflexive and structural.
+ ///
+ /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
+ ///
+ /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
+ /// types, equality for the type as a whole is structural when it is the same as equality
+ /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
+ /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
+ /// that type.
+ ///
+ /// This function is "shallow" because it may return `true` for a composite type whose fields
+ /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
+ /// because equality for arrays is determined by the equality of each array element. If you
+ /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
+ /// down, you will need to use a type visitor.
+ #[inline]
+ pub fn is_structural_eq_shallow(self, tcx: TyCtxt<'tcx>) -> bool {
+ match self.kind() {
+ // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
+ ty::Adt(..) => tcx.has_structural_eq_impls(self),
+
+ // Primitive types that satisfy `Eq`.
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Str | ty::Never => true,
+
+ // Composite types that satisfy `Eq` when all of their fields do.
+ //
+ // Because this function is "shallow", we return `true` for these composites regardless
+ // of the type(s) contained within.
+ ty::Ref(..) | ty::Array(..) | ty::Slice(_) | ty::Tuple(..) => true,
+
+ // Raw pointers use bitwise comparison.
+ ty::RawPtr(_) | ty::FnPtr(_) => true,
+
+ // Floating point numbers are not `Eq`.
+ ty::Float(_) => false,
+
+ // Conservatively return `false` for all others...
+
+ // Anonymous function types
+ ty::FnDef(..) | ty::Closure(..) | ty::Dynamic(..) | ty::Generator(..) => false,
+
+ // Generic or inferred types
+ //
+ // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
+ // called for known, fully-monomorphized types.
+ ty::Projection(_)
+ | ty::Opaque(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(_)
+ | ty::Infer(_) => false,
+
+ ty::Foreign(_) | ty::GeneratorWitness(..) | ty::Error(_) => false,
+ }
+ }
+
+ /// Peel off all reference types in this type until there are none left.
+ ///
+ /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
+ ///
+ /// # Examples
+ ///
+ /// - `u8` -> `u8`
+ /// - `&'a mut u8` -> `u8`
+ /// - `&'a &'b u8` -> `u8`
+ /// - `&'a *const &'b u8 -> *const &'b u8`
+ pub fn peel_refs(self) -> Ty<'tcx> {
+ let mut ty = self;
+ while let ty::Ref(_, inner_ty, _) = ty.kind() {
+ ty = *inner_ty;
+ }
+ ty
+ }
+
+ #[inline]
+ pub fn outer_exclusive_binder(self) -> ty::DebruijnIndex {
+ self.0.outer_exclusive_binder
+ }
+}
+
+pub enum ExplicitSelf<'tcx> {
+ ByValue,
+ ByReference(ty::Region<'tcx>, hir::Mutability),
+ ByRawPointer(hir::Mutability),
+ ByBox,
+ Other,
+}
+
+impl<'tcx> ExplicitSelf<'tcx> {
+ /// Categorizes an explicit self declaration like `self: SomeType`
+ /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
+ /// `Other`.
+ /// This is mainly used to require the arbitrary_self_types feature
+ /// in the case of `Other`, to improve error messages in the common cases,
+ /// and to make `Other` non-object-safe.
+ ///
+ /// Examples:
+ ///
+ /// ```ignore (illustrative)
+ /// impl<'a> Foo for &'a T {
+ /// // Legal declarations:
+ /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
+ /// fn method2(self: &'a T); // ExplicitSelf::ByValue
+ /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
+ /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
+ ///
+ /// // Invalid cases will be caught by `check_method_receiver`:
+ /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
+ /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
+ /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
+ /// }
+ /// ```
+ ///
+ pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
+ where
+ P: Fn(Ty<'tcx>) -> bool,
+ {
+ use self::ExplicitSelf::*;
+
+ match *self_arg_ty.kind() {
+ _ if is_self_ty(self_arg_ty) => ByValue,
+ ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
+ ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
+ _ => Other,
+ }
+ }
+}
+
+/// Returns a list of types such that the given type needs drop if and only if
+/// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
+/// this type always needs drop.
+pub fn needs_drop_components<'tcx>(
+ ty: Ty<'tcx>,
+ target_layout: &TargetDataLayout,
+) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
+ match ty.kind() {
+ ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Bool
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Never
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Char
+ | ty::GeneratorWitness(..)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::Str => Ok(SmallVec::new()),
+
+ // Foreign types can never have destructors.
+ ty::Foreign(..) => Ok(SmallVec::new()),
+
+ ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
+
+ ty::Slice(ty) => needs_drop_components(*ty, target_layout),
+ ty::Array(elem_ty, size) => {
+ match needs_drop_components(*elem_ty, target_layout) {
+ Ok(v) if v.is_empty() => Ok(v),
+ res => match size.kind().try_to_bits(target_layout.pointer_size) {
+ // Arrays of size zero don't need drop, even if their element
+ // type does.
+ Some(0) => Ok(SmallVec::new()),
+ Some(_) => res,
+ // We don't know which of the cases above we are in, so
+ // return the whole type and let the caller decide what to
+ // do.
+ None => Ok(smallvec![ty]),
+ },
+ }
+ }
+ // If any field needs drop, then the whole tuple does.
+ ty::Tuple(fields) => fields.iter().try_fold(SmallVec::new(), move |mut acc, elem| {
+ acc.extend(needs_drop_components(elem, target_layout)?);
+ Ok(acc)
+ }),
+
+ // These require checking for `Copy` bounds or `Adt` destructors.
+ ty::Adt(..)
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ | ty::Closure(..)
+ | ty::Generator(..) => Ok(smallvec![ty]),
+ }
+}
+
+pub fn is_trivially_const_drop<'tcx>(ty: Ty<'tcx>) -> bool {
+ match *ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Infer(ty::IntVar(_))
+ | ty::Infer(ty::FloatVar(_))
+ | ty::Str
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Never
+ | ty::Foreign(_) => true,
+
+ ty::Opaque(..)
+ | ty::Dynamic(..)
+ | ty::Error(_)
+ | ty::Bound(..)
+ | ty::Param(_)
+ | ty::Placeholder(_)
+ | ty::Projection(_)
+ | ty::Infer(_) => false,
+
+ // Not trivial because they have components, and instead of looking inside,
+ // we'll just perform trait selection.
+ ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(_) | ty::Adt(..) => false,
+
+ ty::Array(ty, _) | ty::Slice(ty) => is_trivially_const_drop(ty),
+
+ ty::Tuple(tys) => tys.iter().all(|ty| is_trivially_const_drop(ty)),
+ }
+}
+
+// Does the equivalent of
+// ```
+// let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
+// folder.tcx().intern_*(&v)
+// ```
+pub fn fold_list<'tcx, F, T>(
+ list: &'tcx ty::List<T>,
+ folder: &mut F,
+ intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>,
+) -> Result<&'tcx ty::List<T>, F::Error>
+where
+ F: FallibleTypeFolder<'tcx>,
+ T: TypeFoldable<'tcx> + PartialEq + Copy,
+{
+ let mut iter = list.iter();
+ // Look for the first element that changed
+ match iter.by_ref().enumerate().find_map(|(i, t)| match t.try_fold_with(folder) {
+ Ok(new_t) if new_t == t => None,
+ new_t => Some((i, new_t)),
+ }) {
+ Some((i, Ok(new_t))) => {
+ // An element changed, prepare to intern the resulting list
+ let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len());
+ new_list.extend_from_slice(&list[..i]);
+ new_list.push(new_t);
+ for t in iter {
+ new_list.push(t.try_fold_with(folder)?)
+ }
+ Ok(intern(folder.tcx(), &new_list))
+ }
+ Some((_, Err(err))) => {
+ return Err(err);
+ }
+ None => Ok(list),
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
+pub struct AlwaysRequiresDrop;
+
+/// Normalizes all opaque types in the given value, replacing them
+/// with their underlying types.
+pub fn normalize_opaque_types<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ val: &'tcx ty::List<ty::Predicate<'tcx>>,
+) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+ let mut visitor = OpaqueTypeExpander {
+ seen_opaque_tys: FxHashSet::default(),
+ expanded_cache: FxHashMap::default(),
+ primary_def_id: None,
+ found_recursion: false,
+ found_any_recursion: false,
+ check_recursion: false,
+ tcx,
+ };
+ val.fold_with(&mut visitor)
+}
+
+/// Determines whether an item is annotated with `doc(hidden)`.
+pub fn is_doc_hidden(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.get_attrs(def_id, sym::doc)
+ .filter_map(|attr| attr.meta_item_list())
+ .any(|items| items.iter().any(|item| item.has_name(sym::hidden)))
+}
+
+/// Determines whether an item is an intrinsic by Abi.
+pub fn is_intrinsic(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ matches!(tcx.fn_sig(def_id).abi(), Abi::RustIntrinsic | Abi::PlatformIntrinsic)
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers =
+ ty::query::Providers { normalize_opaque_types, is_doc_hidden, is_intrinsic, ..*providers }
+}
diff --git a/compiler/rustc_middle/src/ty/visit.rs b/compiler/rustc_middle/src/ty/visit.rs
new file mode 100644
index 000000000..536506720
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/visit.rs
@@ -0,0 +1,745 @@
+//! A visiting traversal mechanism for complex data structures that contain type
+//! information.
+//!
+//! This is a read-only traversal of the data structure.
+//!
+//! This traversal has limited flexibility. Only a small number of "types of
+//! interest" within the complex data structures can receive custom
+//! visitation. These are the ones containing the most important type-related
+//! information, such as `Ty`, `Predicate`, `Region`, and `Const`.
+//!
+//! There are three groups of traits involved in each traversal.
+//! - `TypeVisitable`. This is implemented once for many types, including:
+//! - Types of interest, for which the the methods delegate to the
+//! visitor.
+//! - All other types, including generic containers like `Vec` and `Option`.
+//! It defines a "skeleton" of how they should be visited.
+//! - `TypeSuperVisitable`. This is implemented only for each type of interest,
+//! and defines the visiting "skeleton" for these types.
+//! - `TypeVisitor`. This is implemented for each visitor. This defines how
+//! types of interest are visited.
+//!
+//! This means each visit is a mixture of (a) generic visiting operations, and (b)
+//! custom visit operations that are specific to the visitor.
+//! - The `TypeVisitable` impls handle most of the traversal, and call into
+//! `TypeVisitor` when they encounter a type of interest.
+//! - A `TypeVisitor` may call into another `TypeVisitable` impl, because some of
+//! the types of interest are recursive and can contain other types of interest.
+//! - A `TypeVisitor` may also call into a `TypeSuperVisitable` impl, because each
+//! visitor might provide custom handling only for some types of interest, or
+//! only for some variants of each type of interest, and then use default
+//! traversal for the remaining cases.
+//!
+//! For example, if you have `struct S(Ty, U)` where `S: TypeVisitable` and `U:
+//! TypeVisitable`, and an instance `s = S(ty, u)`, it would be visited like so:
+//! ```text
+//! s.visit_with(visitor) calls
+//! - ty.visit_with(visitor) calls
+//! - visitor.visit_ty(ty) may call
+//! - ty.super_visit_with(visitor)
+//! - u.visit_with(visitor)
+//! ```
+use crate::mir;
+use crate::ty::{self, flags::FlagComputation, Binder, Ty, TyCtxt, TypeFlags};
+use rustc_errors::ErrorGuaranteed;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sso::SsoHashSet;
+use std::fmt;
+use std::ops::ControlFlow;
+
+/// This trait is implemented for every type that can be visited,
+/// providing the skeleton of the traversal.
+///
+/// To implement this conveniently, use the derive macro located in
+/// `rustc_macros`.
+pub trait TypeVisitable<'tcx>: fmt::Debug + Clone {
+ /// The entry point for visiting. To visit a value `t` with a visitor `v`
+ /// call: `t.visit_with(v)`.
+ ///
+ /// For most types, this just traverses the value, calling `visit_with` on
+ /// each field/element.
+ ///
+ /// For types of interest (such as `Ty`), the implementation of this method
+ /// that calls a visitor method specifically for that type (such as
+ /// `V::visit_ty`). This is where control transfers from `TypeFoldable` to
+ /// `TypeVisitor`.
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy>;
+
+ /// Returns `true` if `self` has any late-bound regions that are either
+ /// bound by `binder` or bound by some binder outside of `binder`.
+ /// If `binder` is `ty::INNERMOST`, this indicates whether
+ /// there are any late-bound regions that appear free.
+ fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
+ self.visit_with(&mut HasEscapingVarsVisitor { outer_index: binder }).is_break()
+ }
+
+ /// Returns `true` if this `self` has any regions that escape `binder` (and
+ /// hence are not bound by it).
+ fn has_vars_bound_above(&self, binder: ty::DebruijnIndex) -> bool {
+ self.has_vars_bound_at_or_above(binder.shifted_in(1))
+ }
+
+ fn has_escaping_bound_vars(&self) -> bool {
+ self.has_vars_bound_at_or_above(ty::INNERMOST)
+ }
+
+ #[instrument(level = "trace")]
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.visit_with(&mut HasTypeFlagsVisitor { flags }).break_value() == Some(FoundFlags)
+ }
+ fn has_projections(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_PROJECTION)
+ }
+ fn has_opaque_types(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_OPAQUE)
+ }
+ fn references_error(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_ERROR)
+ }
+ fn error_reported(&self) -> Option<ErrorGuaranteed> {
+ if self.references_error() {
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted())
+ } else {
+ None
+ }
+ }
+ fn has_param_types_or_consts(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_CT_PARAM)
+ }
+ fn has_infer_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_RE_INFER)
+ }
+ fn has_infer_types(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_INFER)
+ }
+ fn has_infer_types_or_consts(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_CT_INFER)
+ }
+ fn needs_infer(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_INFER)
+ }
+ fn has_placeholders(&self) -> bool {
+ self.has_type_flags(
+ TypeFlags::HAS_RE_PLACEHOLDER
+ | TypeFlags::HAS_TY_PLACEHOLDER
+ | TypeFlags::HAS_CT_PLACEHOLDER,
+ )
+ }
+ fn needs_subst(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_SUBST)
+ }
+ /// "Free" regions in this context means that it has any region
+ /// that is not (a) erased or (b) late-bound.
+ fn has_free_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_FREE_REGIONS)
+ }
+
+ fn has_erased_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_RE_ERASED)
+ }
+
+ /// True if there are any un-erased free regions.
+ fn has_erasable_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_FREE_REGIONS)
+ }
+
+ /// Indicates whether this value references only 'global'
+ /// generic parameters that are the same regardless of what fn we are
+ /// in. This is used for caching.
+ fn is_global(&self) -> bool {
+ !self.has_type_flags(TypeFlags::HAS_FREE_LOCAL_NAMES)
+ }
+
+ /// True if there are any late-bound regions
+ fn has_late_bound_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND)
+ }
+
+ /// Indicates whether this value still has parameters/placeholders/inference variables
+ /// which could be replaced later, in a way that would change the results of `impl`
+ /// specialization.
+ fn still_further_specializable(&self) -> bool {
+ self.has_type_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE)
+ }
+}
+
+pub trait TypeSuperVisitable<'tcx>: TypeVisitable<'tcx> {
+ /// Provides a default visit for a type of interest. This should only be
+ /// called within `TypeVisitor` methods, when a non-custom traversal is
+ /// desired for the value of the type of interest passed to that method.
+ /// For example, in `MyVisitor::visit_ty(ty)`, it is valid to call
+ /// `ty.super_visit_with(self)`, but any other visiting should be done
+ /// with `xyz.visit_with(self)`.
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy>;
+}
+
+/// This trait is implemented for every visiting traversal. There is a visit
+/// method defined for every type of interest. Each such method has a default
+/// that recurses into the type's fields in a non-custom fashion.
+pub trait TypeVisitor<'tcx>: Sized {
+ type BreakTy = !;
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ t.super_visit_with(self)
+ }
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ r.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ c.super_visit_with(self)
+ }
+
+ fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
+ uv.super_visit_with(self)
+ }
+
+ fn visit_predicate(&mut self, p: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
+ p.super_visit_with(self)
+ }
+
+ fn visit_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> ControlFlow<Self::BreakTy> {
+ c.super_visit_with(self)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Region folder
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Invoke `callback` on every region appearing free in `value`.
+ pub fn for_each_free_region(
+ self,
+ value: &impl TypeVisitable<'tcx>,
+ mut callback: impl FnMut(ty::Region<'tcx>),
+ ) {
+ self.any_free_region_meets(value, |r| {
+ callback(r);
+ false
+ });
+ }
+
+ /// Returns `true` if `callback` returns true for every region appearing free in `value`.
+ pub fn all_free_regions_meet(
+ self,
+ value: &impl TypeVisitable<'tcx>,
+ mut callback: impl FnMut(ty::Region<'tcx>) -> bool,
+ ) -> bool {
+ !self.any_free_region_meets(value, |r| !callback(r))
+ }
+
+ /// Returns `true` if `callback` returns true for some region appearing free in `value`.
+ pub fn any_free_region_meets(
+ self,
+ value: &impl TypeVisitable<'tcx>,
+ callback: impl FnMut(ty::Region<'tcx>) -> bool,
+ ) -> bool {
+ struct RegionVisitor<F> {
+ /// The index of a binder *just outside* the things we have
+ /// traversed. If we encounter a bound region bound by this
+ /// binder or one outer to it, it appears free. Example:
+ ///
+ /// ```ignore (illustrative)
+ /// for<'a> fn(for<'b> fn(), T)
+ /// // ^ ^ ^ ^
+ /// // | | | | here, would be shifted in 1
+ /// // | | | here, would be shifted in 2
+ /// // | | here, would be `INNERMOST` shifted in by 1
+ /// // | here, initially, binder would be `INNERMOST`
+ /// ```
+ ///
+ /// You see that, initially, *any* bound value is free,
+ /// because we've not traversed any binders. As we pass
+ /// through a binder, we shift the `outer_index` by 1 to
+ /// account for the new binder that encloses us.
+ outer_index: ty::DebruijnIndex,
+ callback: F,
+ }
+
+ impl<'tcx, F> TypeVisitor<'tcx> for RegionVisitor<F>
+ where
+ F: FnMut(ty::Region<'tcx>) -> bool,
+ {
+ type BreakTy = ();
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.outer_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.outer_index.shift_out(1);
+ result
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ ty::ReLateBound(debruijn, _) if debruijn < self.outer_index => {
+ ControlFlow::CONTINUE
+ }
+ _ => {
+ if (self.callback)(r) {
+ ControlFlow::BREAK
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+ }
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // We're only interested in types involving regions
+ if ty.flags().intersects(TypeFlags::HAS_FREE_REGIONS) {
+ ty.super_visit_with(self)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+
+ value.visit_with(&mut RegionVisitor { outer_index: ty::INNERMOST, callback }).is_break()
+ }
+
+ /// Returns a set of all late-bound regions that are constrained
+ /// by `value`, meaning that if we instantiate those LBR with
+ /// variables and equate `value` with something else, those
+ /// variables will also be equated.
+ pub fn collect_constrained_late_bound_regions<T>(
+ self,
+ value: &Binder<'tcx, T>,
+ ) -> FxHashSet<ty::BoundRegionKind>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ self.collect_late_bound_regions(value, true)
+ }
+
+ /// Returns a set of all late-bound regions that appear in `value` anywhere.
+ pub fn collect_referenced_late_bound_regions<T>(
+ self,
+ value: &Binder<'tcx, T>,
+ ) -> FxHashSet<ty::BoundRegionKind>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ self.collect_late_bound_regions(value, false)
+ }
+
+ fn collect_late_bound_regions<T>(
+ self,
+ value: &Binder<'tcx, T>,
+ just_constraint: bool,
+ ) -> FxHashSet<ty::BoundRegionKind>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ let mut collector = LateBoundRegionsCollector::new(just_constraint);
+ let result = value.as_ref().skip_binder().visit_with(&mut collector);
+ assert!(result.is_continue()); // should never have stopped early
+ collector.regions
+ }
+}
+
+pub struct ValidateBoundVars<'tcx> {
+ bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
+ binder_index: ty::DebruijnIndex,
+ // We may encounter the same variable at different levels of binding, so
+ // this can't just be `Ty`
+ visited: SsoHashSet<(ty::DebruijnIndex, Ty<'tcx>)>,
+}
+
+impl<'tcx> ValidateBoundVars<'tcx> {
+ pub fn new(bound_vars: &'tcx ty::List<ty::BoundVariableKind>) -> Self {
+ ValidateBoundVars {
+ bound_vars,
+ binder_index: ty::INNERMOST,
+ visited: SsoHashSet::default(),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for ValidateBoundVars<'tcx> {
+ type BreakTy = ();
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.binder_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.binder_index.shift_out(1);
+ result
+ }
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if t.outer_exclusive_binder() < self.binder_index
+ || !self.visited.insert((self.binder_index, t))
+ {
+ return ControlFlow::BREAK;
+ }
+ match *t.kind() {
+ ty::Bound(debruijn, bound_ty) if debruijn == self.binder_index => {
+ if self.bound_vars.len() <= bound_ty.var.as_usize() {
+ bug!("Not enough bound vars: {:?} not found in {:?}", t, self.bound_vars);
+ }
+ let list_var = self.bound_vars[bound_ty.var.as_usize()];
+ match list_var {
+ ty::BoundVariableKind::Ty(kind) => {
+ if kind != bound_ty.kind {
+ bug!(
+ "Mismatched type kinds: {:?} doesn't var in list {:?}",
+ bound_ty.kind,
+ list_var
+ );
+ }
+ }
+ _ => {
+ bug!("Mismatched bound variable kinds! Expected type, found {:?}", list_var)
+ }
+ }
+ }
+
+ _ => (),
+ };
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ ty::ReLateBound(index, br) if index == self.binder_index => {
+ if self.bound_vars.len() <= br.var.as_usize() {
+ bug!("Not enough bound vars: {:?} not found in {:?}", br, self.bound_vars);
+ }
+ let list_var = self.bound_vars[br.var.as_usize()];
+ match list_var {
+ ty::BoundVariableKind::Region(kind) => {
+ if kind != br.kind {
+ bug!(
+ "Mismatched region kinds: {:?} doesn't match var ({:?}) in list ({:?})",
+ br.kind,
+ list_var,
+ self.bound_vars
+ );
+ }
+ }
+ _ => bug!(
+ "Mismatched bound variable kinds! Expected region, found {:?}",
+ list_var
+ ),
+ }
+ }
+
+ _ => (),
+ };
+
+ r.super_visit_with(self)
+ }
+}
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+struct FoundEscapingVars;
+
+/// An "escaping var" is a bound var whose binder is not part of `t`. A bound var can be a
+/// bound region or a bound type.
+///
+/// So, for example, consider a type like the following, which has two binders:
+///
+/// for<'a> fn(x: for<'b> fn(&'a isize, &'b isize))
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope
+///
+/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
+/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
+/// fn type*, that type has an escaping region: `'a`.
+///
+/// Note that what I'm calling an "escaping var" is often just called a "free var". However,
+/// we already use the term "free var". It refers to the regions or types that we use to represent
+/// bound regions or type params on a fn definition while we are type checking its body.
+///
+/// To clarify, conceptually there is no particular difference between
+/// an "escaping" var and a "free" var. However, there is a big
+/// difference in practice. Basically, when "entering" a binding
+/// level, one is generally required to do some sort of processing to
+/// a bound var, such as replacing it with a fresh/placeholder
+/// var, or making an entry in the environment to represent the
+/// scope to which it is attached, etc. An escaping var represents
+/// a bound var for which this processing has not yet been done.
+struct HasEscapingVarsVisitor {
+ /// Anything bound by `outer_index` or "above" is escaping.
+ outer_index: ty::DebruijnIndex,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for HasEscapingVarsVisitor {
+ type BreakTy = FoundEscapingVars;
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.outer_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.outer_index.shift_out(1);
+ result
+ }
+
+ #[inline]
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // If the outer-exclusive-binder is *strictly greater* than
+ // `outer_index`, that means that `t` contains some content
+ // bound at `outer_index` or above (because
+ // `outer_exclusive_binder` is always 1 higher than the
+ // content in `t`). Therefore, `t` has some escaping vars.
+ if t.outer_exclusive_binder() > self.outer_index {
+ ControlFlow::Break(FoundEscapingVars)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // If the region is bound by `outer_index` or anything outside
+ // of outer index, then it escapes the binders we have
+ // visited.
+ if r.bound_at_or_above_binder(self.outer_index) {
+ ControlFlow::Break(FoundEscapingVars)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ fn visit_const(&mut self, ct: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // we don't have a `visit_infer_const` callback, so we have to
+ // hook in here to catch this case (annoying...), but
+ // otherwise we do want to remember to visit the rest of the
+ // const, as it has types/regions embedded in a lot of other
+ // places.
+ match ct.kind() {
+ ty::ConstKind::Bound(debruijn, _) if debruijn >= self.outer_index => {
+ ControlFlow::Break(FoundEscapingVars)
+ }
+ _ => ct.super_visit_with(self),
+ }
+ }
+
+ #[inline]
+ fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if predicate.outer_exclusive_binder() > self.outer_index {
+ ControlFlow::Break(FoundEscapingVars)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+}
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+struct FoundFlags;
+
+// FIXME: Optimize for checking for infer flags
+struct HasTypeFlagsVisitor {
+ flags: ty::TypeFlags,
+}
+
+impl std::fmt::Debug for HasTypeFlagsVisitor {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ self.flags.fmt(fmt)
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
+ type BreakTy = FoundFlags;
+
+ #[inline]
+ #[instrument(skip(self), level = "trace")]
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let flags = t.flags();
+ trace!(t.flags=?t.flags());
+ if flags.intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ #[instrument(skip(self), level = "trace")]
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let flags = r.type_flags();
+ trace!(r.flags=?flags);
+ if flags.intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ #[instrument(level = "trace")]
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let flags = FlagComputation::for_const(c);
+ trace!(r.flags=?flags);
+ if flags.intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ #[instrument(level = "trace")]
+ fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let flags = FlagComputation::for_unevaluated_const(uv);
+ trace!(r.flags=?flags);
+ if flags.intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ #[instrument(level = "trace")]
+ fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!(
+ "HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}",
+ predicate,
+ predicate.flags(),
+ self.flags
+ );
+ if predicate.flags().intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+}
+
+/// Collects all the late-bound regions at the innermost binding level
+/// into a hash set.
+struct LateBoundRegionsCollector {
+ current_index: ty::DebruijnIndex,
+ regions: FxHashSet<ty::BoundRegionKind>,
+
+ /// `true` if we only want regions that are known to be
+ /// "constrained" when you equate this type with another type. In
+ /// particular, if you have e.g., `&'a u32` and `&'b u32`, equating
+ /// them constraints `'a == 'b`. But if you have `<&'a u32 as
+ /// Trait>::Foo` and `<&'b u32 as Trait>::Foo`, normalizing those
+ /// types may mean that `'a` and `'b` don't appear in the results,
+ /// so they are not considered *constrained*.
+ just_constrained: bool,
+}
+
+impl LateBoundRegionsCollector {
+ fn new(just_constrained: bool) -> Self {
+ LateBoundRegionsCollector {
+ current_index: ty::INNERMOST,
+ regions: Default::default(),
+ just_constrained,
+ }
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector {
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.current_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.current_index.shift_out(1);
+ result
+ }
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // if we are only looking for "constrained" region, we have to
+ // ignore the inputs to a projection, as they may not appear
+ // in the normalized form
+ if self.just_constrained {
+ if let ty::Projection(..) = t.kind() {
+ return ControlFlow::CONTINUE;
+ }
+ }
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // if we are only looking for "constrained" region, we have to
+ // ignore the inputs of an unevaluated const, as they may not appear
+ // in the normalized form
+ if self.just_constrained {
+ if let ty::ConstKind::Unevaluated(..) = c.kind() {
+ return ControlFlow::CONTINUE;
+ }
+ }
+
+ c.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ReLateBound(debruijn, br) = *r {
+ if debruijn == self.current_index {
+ self.regions.insert(br.kind);
+ }
+ }
+ ControlFlow::CONTINUE
+ }
+}
+
+/// Finds the max universe present
+pub struct MaxUniverse {
+ max_universe: ty::UniverseIndex,
+}
+
+impl MaxUniverse {
+ pub fn new() -> Self {
+ MaxUniverse { max_universe: ty::UniverseIndex::ROOT }
+ }
+
+ pub fn max_universe(self) -> ty::UniverseIndex {
+ self.max_universe
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for MaxUniverse {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::Placeholder(placeholder) = t.kind() {
+ self.max_universe = ty::UniverseIndex::from_u32(
+ self.max_universe.as_u32().max(placeholder.universe.as_u32()),
+ );
+ }
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::consts::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ConstKind::Placeholder(placeholder) = c.kind() {
+ self.max_universe = ty::UniverseIndex::from_u32(
+ self.max_universe.as_u32().max(placeholder.universe.as_u32()),
+ );
+ }
+
+ c.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::RePlaceholder(placeholder) = *r {
+ self.max_universe = ty::UniverseIndex::from_u32(
+ self.max_universe.as_u32().max(placeholder.universe.as_u32()),
+ );
+ }
+
+ ControlFlow::CONTINUE
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs
new file mode 100644
index 000000000..04a9fd1f7
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/vtable.rs
@@ -0,0 +1,117 @@
+use std::convert::TryFrom;
+use std::fmt;
+
+use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar, ScalarMaybeUninit};
+use crate::ty::{self, Instance, PolyTraitRef, Ty, TyCtxt};
+use rustc_ast::Mutability;
+
+#[derive(Clone, Copy, PartialEq, HashStable)]
+pub enum VtblEntry<'tcx> {
+ /// destructor of this type (used in vtable header)
+ MetadataDropInPlace,
+ /// layout size of this type (used in vtable header)
+ MetadataSize,
+ /// layout align of this type (used in vtable header)
+ MetadataAlign,
+ /// non-dispatchable associated function that is excluded from trait object
+ Vacant,
+ /// dispatchable associated function
+ Method(Instance<'tcx>),
+ /// pointer to a separate supertrait vtable, can be used by trait upcasting coercion
+ TraitVPtr(PolyTraitRef<'tcx>),
+}
+
+impl<'tcx> fmt::Debug for VtblEntry<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // We want to call `Display` on `Instance` and `PolyTraitRef`,
+ // so we implement this manually.
+ match self {
+ VtblEntry::MetadataDropInPlace => write!(f, "MetadataDropInPlace"),
+ VtblEntry::MetadataSize => write!(f, "MetadataSize"),
+ VtblEntry::MetadataAlign => write!(f, "MetadataAlign"),
+ VtblEntry::Vacant => write!(f, "Vacant"),
+ VtblEntry::Method(instance) => write!(f, "Method({})", instance),
+ VtblEntry::TraitVPtr(trait_ref) => write!(f, "TraitVPtr({})", trait_ref),
+ }
+ }
+}
+
+// Needs to be associated with the `'tcx` lifetime
+impl<'tcx> TyCtxt<'tcx> {
+ pub const COMMON_VTABLE_ENTRIES: &'tcx [VtblEntry<'tcx>] =
+ &[VtblEntry::MetadataDropInPlace, VtblEntry::MetadataSize, VtblEntry::MetadataAlign];
+}
+
+pub const COMMON_VTABLE_ENTRIES_DROPINPLACE: usize = 0;
+pub const COMMON_VTABLE_ENTRIES_SIZE: usize = 1;
+pub const COMMON_VTABLE_ENTRIES_ALIGN: usize = 2;
+
+/// Retrieves an allocation that represents the contents of a vtable.
+/// Since this is a query, allocations are cached and not duplicated.
+pub(super) fn vtable_allocation_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>),
+) -> AllocId {
+ let (ty, poly_trait_ref) = key;
+
+ let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
+ let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
+ let trait_ref = tcx.erase_regions(trait_ref);
+
+ tcx.vtable_entries(trait_ref)
+ } else {
+ TyCtxt::COMMON_VTABLE_ENTRIES
+ };
+
+ let layout = tcx
+ .layout_of(ty::ParamEnv::reveal_all().and(ty))
+ .expect("failed to build vtable representation");
+ assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
+ let size = layout.size.bytes();
+ let align = layout.align.abi.bytes();
+
+ let ptr_size = tcx.data_layout.pointer_size;
+ let ptr_align = tcx.data_layout.pointer_align.abi;
+
+ let vtable_size = ptr_size * u64::try_from(vtable_entries.len()).unwrap();
+ let mut vtable = Allocation::uninit(vtable_size, ptr_align, /* panic_on_fail */ true).unwrap();
+
+ // No need to do any alignment checks on the memory accesses below, because we know the
+ // allocation is correctly aligned as we created it above. Also we're only offsetting by
+ // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
+
+ for (idx, entry) in vtable_entries.iter().enumerate() {
+ let idx: u64 = u64::try_from(idx).unwrap();
+ let scalar = match entry {
+ VtblEntry::MetadataDropInPlace => {
+ let instance = ty::Instance::resolve_drop_in_place(tcx, ty);
+ let fn_alloc_id = tcx.create_fn_alloc(instance);
+ let fn_ptr = Pointer::from(fn_alloc_id);
+ ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
+ }
+ VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_size).into(),
+ VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_size).into(),
+ VtblEntry::Vacant => continue,
+ VtblEntry::Method(instance) => {
+ // Prepare the fn ptr we write into the vtable.
+ let instance = instance.polymorphize(tcx);
+ let fn_alloc_id = tcx.create_fn_alloc(instance);
+ let fn_ptr = Pointer::from(fn_alloc_id);
+ ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
+ }
+ VtblEntry::TraitVPtr(trait_ref) => {
+ let super_trait_ref = trait_ref
+ .map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
+ let supertrait_alloc_id = tcx.vtable_allocation((ty, Some(super_trait_ref)));
+ let vptr = Pointer::from(supertrait_alloc_id);
+ ScalarMaybeUninit::from_pointer(vptr, &tcx)
+ }
+ };
+ vtable
+ .write_scalar(&tcx, alloc_range(ptr_size * idx, ptr_size), scalar)
+ .expect("failed to build vtable representation");
+ }
+
+ vtable.mutability = Mutability::Not;
+ tcx.create_memory_alloc(tcx.intern_const_alloc(vtable))
+}
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
new file mode 100644
index 000000000..02fe1f3a7
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -0,0 +1,207 @@
+//! An iterator over the type substructure.
+//! WARNING: this does not keep track of the region depth.
+
+use crate::ty::subst::{GenericArg, GenericArgKind};
+use crate::ty::{self, Ty};
+use rustc_data_structures::sso::SsoHashSet;
+use smallvec::{self, SmallVec};
+
+// The TypeWalker's stack is hot enough that it's worth going to some effort to
+// avoid heap allocations.
+type TypeWalkerStack<'tcx> = SmallVec<[GenericArg<'tcx>; 8]>;
+
+pub struct TypeWalker<'tcx> {
+ stack: TypeWalkerStack<'tcx>,
+ last_subtree: usize,
+ pub visited: SsoHashSet<GenericArg<'tcx>>,
+}
+
+/// An iterator for walking the type tree.
+///
+/// It's very easy to produce a deeply
+/// nested type tree with a lot of
+/// identical subtrees. In order to work efficiently
+/// in this situation walker only visits each type once.
+/// It maintains a set of visited types and
+/// skips any types that are already there.
+impl<'tcx> TypeWalker<'tcx> {
+ pub fn new(root: GenericArg<'tcx>) -> Self {
+ Self { stack: smallvec![root], last_subtree: 1, visited: SsoHashSet::new() }
+ }
+
+ /// Skips the subtree corresponding to the last type
+ /// returned by `next()`.
+ ///
+ /// Example: Imagine you are walking `Foo<Bar<i32>, usize>`.
+ ///
+ /// ```ignore (illustrative)
+ /// let mut iter: TypeWalker = ...;
+ /// iter.next(); // yields Foo
+ /// iter.next(); // yields Bar<i32>
+ /// iter.skip_current_subtree(); // skips i32
+ /// iter.next(); // yields usize
+ /// ```
+ pub fn skip_current_subtree(&mut self) {
+ self.stack.truncate(self.last_subtree);
+ }
+}
+
+impl<'tcx> Iterator for TypeWalker<'tcx> {
+ type Item = GenericArg<'tcx>;
+
+ fn next(&mut self) -> Option<GenericArg<'tcx>> {
+ debug!("next(): stack={:?}", self.stack);
+ loop {
+ let next = self.stack.pop()?;
+ self.last_subtree = self.stack.len();
+ if self.visited.insert(next) {
+ push_inner(&mut self.stack, next);
+ debug!("next: stack={:?}", self.stack);
+ return Some(next);
+ }
+ }
+ }
+}
+
+impl<'tcx> GenericArg<'tcx> {
+ /// Iterator that walks `self` and any types reachable from
+ /// `self`, in depth-first order. Note that just walks the types
+ /// that appear in `self`, it does not descend into the fields of
+ /// structs or variants. For example:
+ ///
+ /// ```text
+ /// isize => { isize }
+ /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+ /// [isize] => { [isize], isize }
+ /// ```
+ pub fn walk(self) -> TypeWalker<'tcx> {
+ TypeWalker::new(self)
+ }
+
+ /// Iterator that walks the immediate children of `self`. Hence
+ /// `Foo<Bar<i32>, u32>` yields the sequence `[Bar<i32>, u32]`
+ /// (but not `i32`, like `walk`).
+ ///
+ /// Iterator only walks items once.
+ /// It accepts visited set, updates it with all visited types
+ /// and skips any types that are already there.
+ pub fn walk_shallow(
+ self,
+ visited: &mut SsoHashSet<GenericArg<'tcx>>,
+ ) -> impl Iterator<Item = GenericArg<'tcx>> {
+ let mut stack = SmallVec::new();
+ push_inner(&mut stack, self);
+ stack.retain(|a| visited.insert(*a));
+ stack.into_iter()
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ /// Iterator that walks `self` and any types reachable from
+ /// `self`, in depth-first order. Note that just walks the types
+ /// that appear in `self`, it does not descend into the fields of
+ /// structs or variants. For example:
+ ///
+ /// ```text
+ /// isize => { isize }
+ /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+ /// [isize] => { [isize], isize }
+ /// ```
+ pub fn walk(self) -> TypeWalker<'tcx> {
+ TypeWalker::new(self.into())
+ }
+}
+
+/// We push `GenericArg`s on the stack in reverse order so as to
+/// maintain a pre-order traversal. As of the time of this
+/// writing, the fact that the traversal is pre-order is not
+/// known to be significant to any code, but it seems like the
+/// natural order one would expect (basically, the order of the
+/// types as they are written).
+fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>) {
+ match parent.unpack() {
+ GenericArgKind::Type(parent_ty) => match *parent_ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Infer(_)
+ | ty::Param(_)
+ | ty::Never
+ | ty::Error(_)
+ | ty::Placeholder(..)
+ | ty::Bound(..)
+ | ty::Foreign(..) => {}
+
+ ty::Array(ty, len) => {
+ stack.push(len.into());
+ stack.push(ty.into());
+ }
+ ty::Slice(ty) => {
+ stack.push(ty.into());
+ }
+ ty::RawPtr(mt) => {
+ stack.push(mt.ty.into());
+ }
+ ty::Ref(lt, ty, _) => {
+ stack.push(ty.into());
+ stack.push(lt.into());
+ }
+ ty::Projection(data) => {
+ stack.extend(data.substs.iter().rev());
+ }
+ ty::Dynamic(obj, lt) => {
+ stack.push(lt.into());
+ stack.extend(obj.iter().rev().flat_map(|predicate| {
+ let (substs, opt_ty) = match predicate.skip_binder() {
+ ty::ExistentialPredicate::Trait(tr) => (tr.substs, None),
+ ty::ExistentialPredicate::Projection(p) => (p.substs, Some(p.term)),
+ ty::ExistentialPredicate::AutoTrait(_) =>
+ // Empty iterator
+ {
+ (ty::InternalSubsts::empty(), None)
+ }
+ };
+
+ substs.iter().rev().chain(opt_ty.map(|term| match term {
+ ty::Term::Ty(ty) => ty.into(),
+ ty::Term::Const(ct) => ct.into(),
+ }))
+ }));
+ }
+ ty::Adt(_, substs)
+ | ty::Opaque(_, substs)
+ | ty::Closure(_, substs)
+ | ty::Generator(_, substs, _)
+ | ty::FnDef(_, substs) => {
+ stack.extend(substs.iter().rev());
+ }
+ ty::Tuple(ts) => stack.extend(ts.as_substs().iter().rev()),
+ ty::GeneratorWitness(ts) => {
+ stack.extend(ts.skip_binder().iter().rev().map(|ty| ty.into()));
+ }
+ ty::FnPtr(sig) => {
+ stack.push(sig.skip_binder().output().into());
+ stack.extend(sig.skip_binder().inputs().iter().copied().rev().map(|ty| ty.into()));
+ }
+ },
+ GenericArgKind::Lifetime(_) => {}
+ GenericArgKind::Const(parent_ct) => {
+ stack.push(parent_ct.ty().into());
+ match parent_ct.kind() {
+ ty::ConstKind::Infer(_)
+ | ty::ConstKind::Param(_)
+ | ty::ConstKind::Placeholder(_)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Value(_)
+ | ty::ConstKind::Error(_) => {}
+
+ ty::ConstKind::Unevaluated(ct) => {
+ stack.extend(ct.substs.iter().rev());
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/util/bug.rs b/compiler/rustc_middle/src/util/bug.rs
new file mode 100644
index 000000000..fd7045d6a
--- /dev/null
+++ b/compiler/rustc_middle/src/util/bug.rs
@@ -0,0 +1,54 @@
+// These functions are used by macro expansion for bug! and span_bug!
+
+use crate::ty::{tls, TyCtxt};
+use rustc_errors::MultiSpan;
+use rustc_span::Span;
+use std::fmt;
+use std::panic::{panic_any, Location};
+
+#[cold]
+#[inline(never)]
+#[track_caller]
+pub fn bug_fmt(args: fmt::Arguments<'_>) -> ! {
+ // this wrapper mostly exists so I don't have to write a fully
+ // qualified path of None::<Span> inside the bug!() macro definition
+ opt_span_bug_fmt(None::<Span>, args, Location::caller());
+}
+
+#[cold]
+#[inline(never)]
+#[track_caller]
+pub fn span_bug_fmt<S: Into<MultiSpan>>(span: S, args: fmt::Arguments<'_>) -> ! {
+ opt_span_bug_fmt(Some(span), args, Location::caller());
+}
+
+#[track_caller]
+fn opt_span_bug_fmt<S: Into<MultiSpan>>(
+ span: Option<S>,
+ args: fmt::Arguments<'_>,
+ location: &Location<'_>,
+) -> ! {
+ tls::with_opt(move |tcx| {
+ let msg = format!("{}: {}", location, args);
+ match (tcx, span) {
+ (Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, &msg),
+ (Some(tcx), None) => tcx.sess.diagnostic().bug(&msg),
+ (None, _) => panic_any(msg),
+ }
+ });
+ unreachable!();
+}
+
+/// A query to trigger a `delay_span_bug`. Clearly, if one has a `tcx` one can already trigger a
+/// `delay_span_bug`, so what is the point of this? It exists to help us test `delay_span_bug`'s
+/// interactions with the query system and incremental.
+pub fn trigger_delay_span_bug(tcx: TyCtxt<'_>, key: rustc_hir::def_id::DefId) {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(key),
+ "delayed span bug triggered by #[rustc_error(delay_span_bug_from_inside_query)]",
+ );
+}
+
+pub fn provide(providers: &mut crate::ty::query::Providers) {
+ *providers = crate::ty::query::Providers { trigger_delay_span_bug, ..*providers };
+}
diff --git a/compiler/rustc_middle/src/util/common.rs b/compiler/rustc_middle/src/util/common.rs
new file mode 100644
index 000000000..08977049d
--- /dev/null
+++ b/compiler/rustc_middle/src/util/common.rs
@@ -0,0 +1,67 @@
+use rustc_data_structures::sync::Lock;
+
+use std::fmt::Debug;
+use std::time::{Duration, Instant};
+
+#[cfg(test)]
+mod tests;
+
+pub fn to_readable_str(mut val: usize) -> String {
+ let mut groups = vec![];
+ loop {
+ let group = val % 1000;
+
+ val /= 1000;
+
+ if val == 0 {
+ groups.push(group.to_string());
+ break;
+ } else {
+ groups.push(format!("{:03}", group));
+ }
+ }
+
+ groups.reverse();
+
+ groups.join("_")
+}
+
+pub fn record_time<T, F>(accu: &Lock<Duration>, f: F) -> T
+where
+ F: FnOnce() -> T,
+{
+ let start = Instant::now();
+ let rv = f();
+ let duration = start.elapsed();
+ let mut accu = accu.lock();
+ *accu += duration;
+ rv
+}
+
+pub fn indent<R, F>(op: F) -> R
+where
+ R: Debug,
+ F: FnOnce() -> R,
+{
+ // Use in conjunction with the log post-processor like `src/etc/indenter`
+ // to make debug output more readable.
+ debug!(">>");
+ let r = op();
+ debug!("<< (Result = {:?})", r);
+ r
+}
+
+pub struct Indenter {
+ _cannot_construct_outside_of_this_module: (),
+}
+
+impl Drop for Indenter {
+ fn drop(&mut self) {
+ debug!("<<");
+ }
+}
+
+pub fn indenter() -> Indenter {
+ debug!(">>");
+ Indenter { _cannot_construct_outside_of_this_module: () }
+}
diff --git a/compiler/rustc_middle/src/util/common/tests.rs b/compiler/rustc_middle/src/util/common/tests.rs
new file mode 100644
index 000000000..9a9fb203c
--- /dev/null
+++ b/compiler/rustc_middle/src/util/common/tests.rs
@@ -0,0 +1,14 @@
+use super::*;
+
+#[test]
+fn test_to_readable_str() {
+ assert_eq!("0", to_readable_str(0));
+ assert_eq!("1", to_readable_str(1));
+ assert_eq!("99", to_readable_str(99));
+ assert_eq!("999", to_readable_str(999));
+ assert_eq!("1_000", to_readable_str(1_000));
+ assert_eq!("1_001", to_readable_str(1_001));
+ assert_eq!("999_999", to_readable_str(999_999));
+ assert_eq!("1_000_000", to_readable_str(1_000_000));
+ assert_eq!("1_234_567", to_readable_str(1_234_567));
+}
diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml
new file mode 100644
index 000000000..30f90e383
--- /dev/null
+++ b/compiler/rustc_mir_build/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+name = "rustc_mir_build"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_arena = { path = "../rustc_arena" }
+tracing = "0.1"
+rustc_middle = { path = "../rustc_middle" }
+rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_index = { path = "../rustc_index" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_ast = { path = "../rustc_ast" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_mir_build/src/build/block.rs b/compiler/rustc_mir_build/src/build/block.rs
new file mode 100644
index 000000000..687560012
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/block.rs
@@ -0,0 +1,240 @@
+use crate::build::matches::ArmHasGuard;
+use crate::build::ForGuard::OutsideGuard;
+use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder};
+use rustc_middle::thir::*;
+use rustc_middle::{mir::*, ty};
+use rustc_span::Span;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub(crate) fn ast_block(
+ &mut self,
+ destination: Place<'tcx>,
+ block: BasicBlock,
+ ast_block: &Block,
+ source_info: SourceInfo,
+ ) -> BlockAnd<()> {
+ let Block {
+ region_scope,
+ opt_destruction_scope,
+ span,
+ ref stmts,
+ expr,
+ targeted_by_break,
+ safety_mode,
+ } = *ast_block;
+ let expr = expr.map(|expr| &self.thir[expr]);
+ self.in_opt_scope(opt_destruction_scope.map(|de| (de, source_info)), move |this| {
+ this.in_scope((region_scope, source_info), LintLevel::Inherited, move |this| {
+ if targeted_by_break {
+ this.in_breakable_scope(None, destination, span, |this| {
+ Some(this.ast_block_stmts(
+ destination,
+ block,
+ span,
+ &stmts,
+ expr,
+ safety_mode,
+ ))
+ })
+ } else {
+ this.ast_block_stmts(destination, block, span, &stmts, expr, safety_mode)
+ }
+ })
+ })
+ }
+
+ fn ast_block_stmts(
+ &mut self,
+ destination: Place<'tcx>,
+ mut block: BasicBlock,
+ span: Span,
+ stmts: &[StmtId],
+ expr: Option<&Expr<'tcx>>,
+ safety_mode: BlockSafety,
+ ) -> BlockAnd<()> {
+ let this = self;
+
+ // This convoluted structure is to avoid using recursion as we walk down a list
+ // of statements. Basically, the structure we get back is something like:
+ //
+ // let x = <init> in {
+ // expr1;
+ // let y = <init> in {
+ // expr2;
+ // expr3;
+ // ...
+ // }
+ // }
+ //
+ // The let bindings are valid till the end of block so all we have to do is to pop all
+ // the let-scopes at the end.
+ //
+ // First we build all the statements in the block.
+ let mut let_scope_stack = Vec::with_capacity(8);
+ let outer_source_scope = this.source_scope;
+ let outer_in_scope_unsafe = this.in_scope_unsafe;
+ this.update_source_scope_for_safety_mode(span, safety_mode);
+
+ let source_info = this.source_info(span);
+ for stmt in stmts {
+ let Stmt { ref kind, opt_destruction_scope } = this.thir[*stmt];
+ match kind {
+ StmtKind::Expr { scope, expr } => {
+ this.block_context.push(BlockFrame::Statement { ignores_expr_result: true });
+ unpack!(
+ block = this.in_opt_scope(
+ opt_destruction_scope.map(|de| (de, source_info)),
+ |this| {
+ let si = (*scope, source_info);
+ this.in_scope(si, LintLevel::Inherited, |this| {
+ this.stmt_expr(block, &this.thir[*expr], Some(*scope))
+ })
+ }
+ )
+ );
+ }
+ StmtKind::Let {
+ remainder_scope,
+ init_scope,
+ ref pattern,
+ initializer,
+ lint_level,
+ else_block,
+ } => {
+ let ignores_expr_result = matches!(*pattern.kind, PatKind::Wild);
+ this.block_context.push(BlockFrame::Statement { ignores_expr_result });
+
+ // Enter the remainder scope, i.e., the bindings' destruction scope.
+ this.push_scope((*remainder_scope, source_info));
+ let_scope_stack.push(remainder_scope);
+
+ // Declare the bindings, which may create a source scope.
+ let remainder_span = remainder_scope.span(this.tcx, this.region_scope_tree);
+
+ let visibility_scope =
+ Some(this.new_source_scope(remainder_span, LintLevel::Inherited, None));
+
+ // Evaluate the initializer, if present.
+ if let Some(init) = initializer {
+ let init = &this.thir[*init];
+ let initializer_span = init.span;
+
+ unpack!(
+ block = this.in_opt_scope(
+ opt_destruction_scope.map(|de| (de, source_info)),
+ |this| {
+ let scope = (*init_scope, source_info);
+ this.in_scope(scope, *lint_level, |this| {
+ if let Some(else_block) = else_block {
+ this.ast_let_else(
+ block,
+ init,
+ initializer_span,
+ else_block,
+ visibility_scope,
+ *remainder_scope,
+ remainder_span,
+ pattern,
+ )
+ } else {
+ this.declare_bindings(
+ visibility_scope,
+ remainder_span,
+ pattern,
+ ArmHasGuard(false),
+ Some((None, initializer_span)),
+ );
+ this.expr_into_pattern(block, pattern.clone(), init) // irrefutable pattern
+ }
+ })
+ },
+ )
+ )
+ } else {
+ let scope = (*init_scope, source_info);
+ unpack!(this.in_scope(scope, *lint_level, |this| {
+ this.declare_bindings(
+ visibility_scope,
+ remainder_span,
+ pattern,
+ ArmHasGuard(false),
+ None,
+ );
+ block.unit()
+ }));
+
+ debug!("ast_block_stmts: pattern={:?}", pattern);
+ this.visit_primary_bindings(
+ pattern,
+ UserTypeProjections::none(),
+ &mut |this, _, _, _, node, span, _, _| {
+ this.storage_live_binding(block, node, span, OutsideGuard, true);
+ this.schedule_drop_for_binding(node, span, OutsideGuard);
+ },
+ )
+ }
+
+ // Enter the visibility scope, after evaluating the initializer.
+ if let Some(source_scope) = visibility_scope {
+ this.source_scope = source_scope;
+ }
+ }
+ }
+
+ let popped = this.block_context.pop();
+ assert!(popped.map_or(false, |bf| bf.is_statement()));
+ }
+
+ // Then, the block may have an optional trailing expression which is a “return” value
+ // of the block, which is stored into `destination`.
+ let tcx = this.tcx;
+ let destination_ty = destination.ty(&this.local_decls, tcx).ty;
+ if let Some(expr) = expr {
+ let tail_result_is_ignored =
+ destination_ty.is_unit() || this.block_context.currently_ignores_tail_results();
+ this.block_context
+ .push(BlockFrame::TailExpr { tail_result_is_ignored, span: expr.span });
+
+ unpack!(block = this.expr_into_dest(destination, block, expr));
+ let popped = this.block_context.pop();
+
+ assert!(popped.map_or(false, |bf| bf.is_tail_expr()));
+ } else {
+ // If a block has no trailing expression, then it is given an implicit return type.
+ // This return type is usually `()`, unless the block is diverging, in which case the
+ // return type is `!`. For the unit type, we need to actually return the unit, but in
+ // the case of `!`, no return value is required, as the block will never return.
+ // Opaque types of empty bodies also need this unit assignment, in order to infer that their
+ // type is actually unit. Otherwise there will be no defining use found in the MIR.
+ if destination_ty.is_unit() || matches!(destination_ty.kind(), ty::Opaque(..)) {
+ // We only want to assign an implicit `()` as the return value of the block if the
+ // block does not diverge. (Otherwise, we may try to assign a unit to a `!`-type.)
+ this.cfg.push_assign_unit(block, source_info, destination, this.tcx);
+ }
+ }
+ // Finally, we pop all the let scopes before exiting out from the scope of block
+ // itself.
+ for scope in let_scope_stack.into_iter().rev() {
+ unpack!(block = this.pop_scope((*scope, source_info), block));
+ }
+ // Restore the original source scope.
+ this.source_scope = outer_source_scope;
+ this.in_scope_unsafe = outer_in_scope_unsafe;
+ block.unit()
+ }
+
+ /// If we are entering an unsafe block, create a new source scope
+ fn update_source_scope_for_safety_mode(&mut self, span: Span, safety_mode: BlockSafety) {
+ debug!("update_source_scope_for({:?}, {:?})", span, safety_mode);
+ let new_unsafety = match safety_mode {
+ BlockSafety::Safe => return,
+ BlockSafety::BuiltinUnsafe => Safety::BuiltinUnsafe,
+ BlockSafety::ExplicitUnsafe(hir_id) => {
+ self.in_scope_unsafe = Safety::ExplicitUnsafe(hir_id);
+ Safety::ExplicitUnsafe(hir_id)
+ }
+ };
+
+ self.source_scope = self.new_source_scope(span, LintLevel::Inherited, Some(new_unsafety));
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/cfg.rs b/compiler/rustc_mir_build/src/build/cfg.rs
new file mode 100644
index 000000000..d7b4b1f73
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/cfg.rs
@@ -0,0 +1,113 @@
+//! Routines for manipulating the control-flow graph.
+
+use crate::build::CFG;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+impl<'tcx> CFG<'tcx> {
+ pub(crate) fn block_data(&self, blk: BasicBlock) -> &BasicBlockData<'tcx> {
+ &self.basic_blocks[blk]
+ }
+
+ pub(crate) fn block_data_mut(&mut self, blk: BasicBlock) -> &mut BasicBlockData<'tcx> {
+ &mut self.basic_blocks[blk]
+ }
+
+ // llvm.org/PR32488 makes this function use an excess of stack space. Mark
+ // it as #[inline(never)] to keep rustc's stack use in check.
+ #[inline(never)]
+ pub(crate) fn start_new_block(&mut self) -> BasicBlock {
+ self.basic_blocks.push(BasicBlockData::new(None))
+ }
+
+ pub(crate) fn start_new_cleanup_block(&mut self) -> BasicBlock {
+ let bb = self.start_new_block();
+ self.block_data_mut(bb).is_cleanup = true;
+ bb
+ }
+
+ pub(crate) fn push(&mut self, block: BasicBlock, statement: Statement<'tcx>) {
+ debug!("push({:?}, {:?})", block, statement);
+ self.block_data_mut(block).statements.push(statement);
+ }
+
+ pub(crate) fn push_assign(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ place: Place<'tcx>,
+ rvalue: Rvalue<'tcx>,
+ ) {
+ self.push(
+ block,
+ Statement { source_info, kind: StatementKind::Assign(Box::new((place, rvalue))) },
+ );
+ }
+
+ pub(crate) fn push_assign_constant(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ temp: Place<'tcx>,
+ constant: Constant<'tcx>,
+ ) {
+ self.push_assign(
+ block,
+ source_info,
+ temp,
+ Rvalue::Use(Operand::Constant(Box::new(constant))),
+ );
+ }
+
+ pub(crate) fn push_assign_unit(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ place: Place<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ ) {
+ self.push_assign(
+ block,
+ source_info,
+ place,
+ Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: source_info.span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(tcx.types.unit),
+ }))),
+ );
+ }
+
+ pub(crate) fn push_fake_read(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ cause: FakeReadCause,
+ place: Place<'tcx>,
+ ) {
+ let kind = StatementKind::FakeRead(Box::new((cause, place)));
+ let stmt = Statement { source_info, kind };
+ self.push(block, stmt);
+ }
+
+ pub(crate) fn terminate(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ kind: TerminatorKind<'tcx>,
+ ) {
+ debug!("terminating block {:?} <- {:?}", block, kind);
+ debug_assert!(
+ self.block_data(block).terminator.is_none(),
+ "terminate: block {:?}={:?} already has a terminator set",
+ block,
+ self.block_data(block)
+ );
+ self.block_data_mut(block).terminator = Some(Terminator { source_info, kind });
+ }
+
+ /// In the `origin` block, push a `goto -> target` terminator.
+ pub(crate) fn goto(&mut self, origin: BasicBlock, source_info: SourceInfo, target: BasicBlock) {
+ self.terminate(origin, source_info, TerminatorKind::Goto { target })
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_constant.rs b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
new file mode 100644
index 000000000..648d10b9e
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_constant.rs
@@ -0,0 +1,152 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::{parse_float_into_constval, Builder};
+use rustc_ast as ast;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::interpret::{
+ Allocation, ConstValue, LitToConstError, LitToConstInput, Scalar,
+};
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty, TyCtxt};
+use rustc_target::abi::Size;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Compile `expr`, yielding a compile-time constant. Assumes that
+ /// `expr` is a valid compile-time constant!
+ pub(crate) fn as_constant(&mut self, expr: &Expr<'tcx>) -> Constant<'tcx> {
+ let create_uneval_from_def_id =
+ |tcx: TyCtxt<'tcx>, def_id: DefId, ty: Ty<'tcx>, substs: SubstsRef<'tcx>| {
+ let uneval = ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs);
+ tcx.mk_const(ty::ConstS { kind: ty::ConstKind::Unevaluated(uneval), ty })
+ };
+
+ let this = self;
+ let tcx = this.tcx;
+ let Expr { ty, temp_lifetime: _, span, ref kind } = *expr;
+ match *kind {
+ ExprKind::Scope { region_scope: _, lint_level: _, value } => {
+ this.as_constant(&this.thir[value])
+ }
+ ExprKind::Literal { lit, neg } => {
+ let literal =
+ match lit_to_mir_constant(tcx, LitToConstInput { lit: &lit.node, ty, neg }) {
+ Ok(c) => c,
+ Err(LitToConstError::Reported) => ConstantKind::Ty(tcx.const_error(ty)),
+ Err(LitToConstError::TypeError) => {
+ bug!("encountered type error in `lit_to_mir_constant")
+ }
+ };
+
+ Constant { span, user_ty: None, literal }
+ }
+ ExprKind::NonHirLiteral { lit, user_ty } => {
+ let user_ty = user_ty.map(|user_ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span,
+ user_ty,
+ inferred_ty: ty,
+ })
+ });
+ let literal = ConstantKind::Val(ConstValue::Scalar(Scalar::Int(lit)), ty);
+
+ Constant { span, user_ty: user_ty, literal }
+ }
+ ExprKind::ZstLiteral { user_ty } => {
+ let user_ty = user_ty.map(|user_ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span,
+ user_ty,
+ inferred_ty: ty,
+ })
+ });
+ let literal = ConstantKind::Val(ConstValue::ZeroSized, ty);
+
+ Constant { span, user_ty: user_ty, literal }
+ }
+ ExprKind::NamedConst { def_id, substs, user_ty } => {
+ let user_ty = user_ty.map(|user_ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span,
+ user_ty,
+ inferred_ty: ty,
+ })
+ });
+ let literal = ConstantKind::Ty(create_uneval_from_def_id(tcx, def_id, ty, substs));
+
+ Constant { user_ty, span, literal }
+ }
+ ExprKind::ConstParam { param, def_id: _ } => {
+ let const_param =
+ tcx.mk_const(ty::ConstS { kind: ty::ConstKind::Param(param), ty: expr.ty });
+ let literal = ConstantKind::Ty(const_param);
+
+ Constant { user_ty: None, span, literal }
+ }
+ ExprKind::ConstBlock { did: def_id, substs } => {
+ let literal = ConstantKind::Ty(create_uneval_from_def_id(tcx, def_id, ty, substs));
+
+ Constant { user_ty: None, span, literal }
+ }
+ ExprKind::StaticRef { alloc_id, ty, .. } => {
+ let const_val = ConstValue::Scalar(Scalar::from_pointer(alloc_id.into(), &tcx));
+ let literal = ConstantKind::Val(const_val, ty);
+
+ Constant { span, user_ty: None, literal }
+ }
+ _ => span_bug!(span, "expression is not a valid constant {:?}", kind),
+ }
+ }
+}
+
+#[instrument(skip(tcx, lit_input))]
+pub(crate) fn lit_to_mir_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ lit_input: LitToConstInput<'tcx>,
+) -> Result<ConstantKind<'tcx>, LitToConstError> {
+ let LitToConstInput { lit, ty, neg } = lit_input;
+ let trunc = |n| {
+ let param_ty = ty::ParamEnv::reveal_all().and(ty);
+ let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size;
+ trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
+ let result = width.truncate(n);
+ trace!("trunc result: {}", result);
+ Ok(ConstValue::Scalar(Scalar::from_uint(result, width)))
+ };
+
+ let value = match (lit, &ty.kind()) {
+ (ast::LitKind::Str(s, _), ty::Ref(_, inner_ty, _)) if inner_ty.is_str() => {
+ let s = s.as_str();
+ let allocation = Allocation::from_bytes_byte_aligned_immutable(s.as_bytes());
+ let allocation = tcx.intern_const_alloc(allocation);
+ ConstValue::Slice { data: allocation, start: 0, end: s.len() }
+ }
+ (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _))
+ if matches!(inner_ty.kind(), ty::Slice(_)) =>
+ {
+ let allocation = Allocation::from_bytes_byte_aligned_immutable(data as &[u8]);
+ let allocation = tcx.intern_const_alloc(allocation);
+ ConstValue::Slice { data: allocation, start: 0, end: data.len() }
+ }
+ (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _)) if inner_ty.is_array() => {
+ let id = tcx.allocate_bytes(data);
+ ConstValue::Scalar(Scalar::from_pointer(id.into(), &tcx))
+ }
+ (ast::LitKind::Byte(n), ty::Uint(ty::UintTy::U8)) => {
+ ConstValue::Scalar(Scalar::from_uint(*n, Size::from_bytes(1)))
+ }
+ (ast::LitKind::Int(n, _), ty::Uint(_)) | (ast::LitKind::Int(n, _), ty::Int(_)) => {
+ trunc(if neg { (*n as i128).overflowing_neg().0 as u128 } else { *n })?
+ }
+ (ast::LitKind::Float(n, _), ty::Float(fty)) => {
+ parse_float_into_constval(*n, *fty, neg).ok_or(LitToConstError::Reported)?
+ }
+ (ast::LitKind::Bool(b), ty::Bool) => ConstValue::Scalar(Scalar::from_bool(*b)),
+ (ast::LitKind::Char(c), ty::Char) => ConstValue::Scalar(Scalar::from_char(*c)),
+ (ast::LitKind::Err(_), _) => return Err(LitToConstError::Reported),
+ _ => return Err(LitToConstError::TypeError),
+ };
+
+ Ok(ConstantKind::Val(value, ty))
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_operand.rs b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
new file mode 100644
index 000000000..e707c373f
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_operand.rs
@@ -0,0 +1,184 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::expr::category::Category;
+use crate::build::{BlockAnd, BlockAndExtension, Builder, NeedsTemporary};
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Returns an operand suitable for use until the end of the current
+ /// scope expression.
+ ///
+ /// The operand returned from this function will *not be valid*
+ /// after the current enclosing `ExprKind::Scope` has ended, so
+ /// please do *not* return it from functions to avoid bad
+ /// miscompiles.
+ pub(crate) fn as_local_operand(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
+ let local_scope = self.local_scope();
+ self.as_operand(block, Some(local_scope), expr, None, NeedsTemporary::Maybe)
+ }
+
+ /// Returns an operand suitable for use until the end of the current scope expression and
+ /// suitable also to be passed as function arguments.
+ ///
+ /// The operand returned from this function will *not be valid* after an ExprKind::Scope is
+ /// passed, so please do *not* return it from functions to avoid bad miscompiles. Returns an
+ /// operand suitable for use as a call argument. This is almost always equivalent to
+ /// `as_operand`, except for the particular case of passing values of (potentially) unsized
+ /// types "by value" (see details below).
+ ///
+ /// The operand returned from this function will *not be valid*
+ /// after the current enclosing `ExprKind::Scope` has ended, so
+ /// please do *not* return it from functions to avoid bad
+ /// miscompiles.
+ ///
+ /// # Parameters of unsized types
+ ///
+ /// We tweak the handling of parameters of unsized type slightly to avoid the need to create a
+ /// local variable of unsized type. For example, consider this program:
+ ///
+ /// ```
+ /// #![feature(unsized_locals, unsized_fn_params)]
+ /// # use core::fmt::Debug;
+ /// fn foo(p: dyn Debug) { dbg!(p); }
+ ///
+ /// fn bar(box_p: Box<dyn Debug>) { foo(*box_p); }
+ /// ```
+ ///
+ /// Ordinarily, for sized types, we would compile the call `foo(*p)` like so:
+ ///
+ /// ```ignore (illustrative)
+ /// let tmp0 = *box_p; // tmp0 would be the operand returned by this function call
+ /// foo(tmp0)
+ /// ```
+ ///
+ /// But because the parameter to `foo` is of the unsized type `dyn Debug`, and because it is
+ /// being moved the deref of a box, we compile it slightly differently. The temporary `tmp0`
+ /// that we create *stores the entire box*, and the parameter to the call itself will be
+ /// `*tmp0`:
+ ///
+ /// ```ignore (illustrative)
+ /// let tmp0 = box_p; call foo(*tmp0)
+ /// ```
+ ///
+ /// This way, the temporary `tmp0` that we create has type `Box<dyn Debug>`, which is sized.
+ /// The value passed to the call (`*tmp0`) still has the `dyn Debug` type -- but the way that
+ /// calls are compiled means that this parameter will be passed "by reference", meaning that we
+ /// will actually provide a pointer to the interior of the box, and not move the `dyn Debug`
+ /// value to the stack.
+ ///
+ /// See #68034 for more details.
+ pub(crate) fn as_local_call_operand(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
+ let local_scope = self.local_scope();
+ self.as_call_operand(block, Some(local_scope), expr)
+ }
+
+ /// Compile `expr` into a value that can be used as an operand.
+ /// If `expr` is a place like `x`, this will introduce a
+ /// temporary `tmp = x`, so that we capture the value of `x` at
+ /// this time.
+ ///
+ /// If we end up needing to create a temporary, then we will use
+ /// `local_info` as its `LocalInfo`, unless `as_temporary`
+ /// has already assigned it a non-`None` `LocalInfo`.
+ /// Normally, you should use `None` for `local_info`
+ ///
+ /// The operand is known to be live until the end of `scope`.
+ ///
+ /// Like `as_local_call_operand`, except that the argument will
+ /// not be valid once `scope` ends.
+ #[instrument(level = "debug", skip(self, scope))]
+ pub(crate) fn as_operand(
+ &mut self,
+ mut block: BasicBlock,
+ scope: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ local_info: Option<Box<LocalInfo<'tcx>>>,
+ needs_temporary: NeedsTemporary,
+ ) -> BlockAnd<Operand<'tcx>> {
+ let this = self;
+
+ if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind {
+ let source_info = this.source_info(expr.span);
+ let region_scope = (region_scope, source_info);
+ return this.in_scope(region_scope, lint_level, |this| {
+ this.as_operand(block, scope, &this.thir[value], local_info, needs_temporary)
+ });
+ }
+
+ let category = Category::of(&expr.kind).unwrap();
+ debug!(?category, ?expr.kind);
+ match category {
+ Category::Constant if let NeedsTemporary::No = needs_temporary || !expr.ty.needs_drop(this.tcx, this.param_env) => {
+ let constant = this.as_constant(expr);
+ block.and(Operand::Constant(Box::new(constant)))
+ }
+ Category::Constant | Category::Place | Category::Rvalue(..) => {
+ let operand = unpack!(block = this.as_temp(block, scope, expr, Mutability::Mut));
+ if this.local_decls[operand].local_info.is_none() {
+ this.local_decls[operand].local_info = local_info;
+ }
+ block.and(Operand::Move(Place::from(operand)))
+ }
+ }
+ }
+
+ pub(crate) fn as_call_operand(
+ &mut self,
+ mut block: BasicBlock,
+ scope: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
+ debug!("as_call_operand(block={:?}, expr={:?})", block, expr);
+ let this = self;
+
+ if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind {
+ let source_info = this.source_info(expr.span);
+ let region_scope = (region_scope, source_info);
+ return this.in_scope(region_scope, lint_level, |this| {
+ this.as_call_operand(block, scope, &this.thir[value])
+ });
+ }
+
+ let tcx = this.tcx;
+
+ if tcx.features().unsized_fn_params {
+ let ty = expr.ty;
+ let span = expr.span;
+ let param_env = this.param_env;
+
+ if !ty.is_sized(tcx.at(span), param_env) {
+ // !sized means !copy, so this is an unsized move
+ assert!(!ty.is_copy_modulo_regions(tcx.at(span), param_env));
+
+ // As described above, detect the case where we are passing a value of unsized
+ // type, and that value is coming from the deref of a box.
+ if let ExprKind::Deref { arg } = expr.kind {
+ // Generate let tmp0 = arg0
+ let operand = unpack!(
+ block = this.as_temp(block, scope, &this.thir[arg], Mutability::Mut)
+ );
+
+ // Return the operand *tmp0 to be used as the call argument
+ let place = Place {
+ local: operand,
+ projection: tcx.intern_place_elems(&[PlaceElem::Deref]),
+ };
+
+ return block.and(Operand::Move(place));
+ }
+ }
+ }
+
+ this.as_operand(block, scope, expr, None, NeedsTemporary::Maybe)
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_place.rs b/compiler/rustc_mir_build/src/build/expr/as_place.rs
new file mode 100644
index 000000000..0c06aad4e
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_place.rs
@@ -0,0 +1,820 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::expr::category::Category;
+use crate::build::ForGuard::{OutsideGuard, RefWithinGuard};
+use crate::build::{BlockAnd, BlockAndExtension, Builder};
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::hir::place::Projection as HirProjection;
+use rustc_middle::hir::place::ProjectionKind as HirProjectionKind;
+use rustc_middle::middle::region;
+use rustc_middle::mir::AssertKind::BoundsCheck;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::AdtDef;
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty, TyCtxt, Variance};
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+
+use rustc_index::vec::Idx;
+
+use std::iter;
+
+/// The "outermost" place that holds this value.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub(crate) enum PlaceBase {
+ /// Denotes the start of a `Place`.
+ Local(Local),
+
+ /// When building place for an expression within a closure, the place might start off a
+ /// captured path. When `capture_disjoint_fields` is enabled, we might not know the capture
+ /// index (within the desugared closure) of the captured path until most of the projections
+ /// are applied. We use `PlaceBase::Upvar` to keep track of the root variable off of which the
+ /// captured path starts, the closure the capture belongs to and the trait the closure
+ /// implements.
+ ///
+ /// Once we have figured out the capture index, we can convert the place builder to start from
+ /// `PlaceBase::Local`.
+ ///
+ /// Consider the following example
+ /// ```rust
+ /// let t = (((10, 10), 10), 10);
+ ///
+ /// let c = || {
+ /// println!("{}", t.0.0.0);
+ /// };
+ /// ```
+ /// Here the THIR expression for `t.0.0.0` will be something like
+ ///
+ /// ```ignore (illustrative)
+ /// * Field(0)
+ /// * Field(0)
+ /// * Field(0)
+ /// * UpvarRef(t)
+ /// ```
+ ///
+ /// When `capture_disjoint_fields` is enabled, `t.0.0.0` is captured and we won't be able to
+ /// figure out that it is captured until all the `Field` projections are applied.
+ Upvar {
+ /// HirId of the upvar
+ var_hir_id: LocalVarId,
+ /// DefId of the closure
+ closure_def_id: LocalDefId,
+ /// The trait closure implements, `Fn`, `FnMut`, `FnOnce`
+ closure_kind: ty::ClosureKind,
+ },
+}
+
+/// `PlaceBuilder` is used to create places during MIR construction. It allows you to "build up" a
+/// place by pushing more and more projections onto the end, and then convert the final set into a
+/// place using the `into_place` method.
+///
+/// This is used internally when building a place for an expression like `a.b.c`. The fields `b`
+/// and `c` can be progressively pushed onto the place builder that is created when converting `a`.
+#[derive(Clone, Debug, PartialEq)]
+pub(crate) struct PlaceBuilder<'tcx> {
+ base: PlaceBase,
+ projection: Vec<PlaceElem<'tcx>>,
+}
+
+/// Given a list of MIR projections, convert them to list of HIR ProjectionKind.
+/// The projections are truncated to represent a path that might be captured by a
+/// closure/generator. This implies the vector returned from this function doesn't contain
+/// ProjectionElems `Downcast`, `ConstantIndex`, `Index`, or `Subslice` because those will never be
+/// part of a path that is captured by a closure. We stop applying projections once we see the first
+/// projection that isn't captured by a closure.
+fn convert_to_hir_projections_and_truncate_for_capture<'tcx>(
+ mir_projections: &[PlaceElem<'tcx>],
+) -> Vec<HirProjectionKind> {
+ let mut hir_projections = Vec::new();
+ let mut variant = None;
+
+ for mir_projection in mir_projections {
+ let hir_projection = match mir_projection {
+ ProjectionElem::Deref => HirProjectionKind::Deref,
+ ProjectionElem::Field(field, _) => {
+ let variant = variant.unwrap_or(VariantIdx::new(0));
+ HirProjectionKind::Field(field.index() as u32, variant)
+ }
+ ProjectionElem::Downcast(.., idx) => {
+ // We don't expect to see multi-variant enums here, as earlier
+ // phases will have truncated them already. However, there can
+ // still be downcasts, thanks to single-variant enums.
+ // We keep track of VariantIdx so we can use this information
+ // if the next ProjectionElem is a Field.
+ variant = Some(*idx);
+ continue;
+ }
+ ProjectionElem::Index(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => {
+ // We don't capture array-access projections.
+ // We can stop here as arrays are captured completely.
+ break;
+ }
+ };
+ variant = None;
+ hir_projections.push(hir_projection);
+ }
+
+ hir_projections
+}
+
+/// Return true if the `proj_possible_ancestor` represents an ancestor path
+/// to `proj_capture` or `proj_possible_ancestor` is same as `proj_capture`,
+/// assuming they both start off of the same root variable.
+///
+/// **Note:** It's the caller's responsibility to ensure that both lists of projections
+/// start off of the same root variable.
+///
+/// Eg: 1. `foo.x` which is represented using `projections=[Field(x)]` is an ancestor of
+/// `foo.x.y` which is represented using `projections=[Field(x), Field(y)]`.
+/// Note both `foo.x` and `foo.x.y` start off of the same root variable `foo`.
+/// 2. Since we only look at the projections here function will return `bar.x` as an a valid
+/// ancestor of `foo.x.y`. It's the caller's responsibility to ensure that both projections
+/// list are being applied to the same root variable.
+fn is_ancestor_or_same_capture(
+ proj_possible_ancestor: &[HirProjectionKind],
+ proj_capture: &[HirProjectionKind],
+) -> bool {
+ // We want to make sure `is_ancestor_or_same_capture("x.0.0", "x.0")` to return false.
+ // Therefore we can't just check if all projections are same in the zipped iterator below.
+ if proj_possible_ancestor.len() > proj_capture.len() {
+ return false;
+ }
+
+ iter::zip(proj_possible_ancestor, proj_capture).all(|(a, b)| a == b)
+}
+
+/// Computes the index of a capture within the desugared closure provided the closure's
+/// `closure_min_captures` and the capture's index of the capture in the
+/// `ty::MinCaptureList` of the root variable `var_hir_id`.
+fn compute_capture_idx<'tcx>(
+ closure_min_captures: &ty::RootVariableMinCaptureList<'tcx>,
+ var_hir_id: LocalVarId,
+ root_var_idx: usize,
+) -> usize {
+ let mut res = 0;
+ for (var_id, capture_list) in closure_min_captures {
+ if *var_id == var_hir_id.0 {
+ res += root_var_idx;
+ break;
+ } else {
+ res += capture_list.len();
+ }
+ }
+
+ res
+}
+
+/// Given a closure, returns the index of a capture within the desugared closure struct and the
+/// `ty::CapturedPlace` which is the ancestor of the Place represented using the `var_hir_id`
+/// and `projection`.
+///
+/// Note there will be at most one ancestor for any given Place.
+///
+/// Returns None, when the ancestor is not found.
+fn find_capture_matching_projections<'a, 'tcx>(
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ var_hir_id: LocalVarId,
+ closure_def_id: LocalDefId,
+ projections: &[PlaceElem<'tcx>],
+) -> Option<(usize, &'a ty::CapturedPlace<'tcx>)> {
+ let closure_min_captures = typeck_results.closure_min_captures.get(&closure_def_id)?;
+ let root_variable_min_captures = closure_min_captures.get(&var_hir_id.0)?;
+
+ let hir_projections = convert_to_hir_projections_and_truncate_for_capture(projections);
+
+ // If an ancestor is found, `idx` is the index within the list of captured places
+ // for root variable `var_hir_id` and `capture` is the `ty::CapturedPlace` itself.
+ let (idx, capture) = root_variable_min_captures.iter().enumerate().find(|(_, capture)| {
+ let possible_ancestor_proj_kinds: Vec<_> =
+ capture.place.projections.iter().map(|proj| proj.kind).collect();
+ is_ancestor_or_same_capture(&possible_ancestor_proj_kinds, &hir_projections)
+ })?;
+
+ // Convert index to be from the perspective of the entire closure_min_captures map
+ // instead of just the root variable capture list
+ Some((compute_capture_idx(closure_min_captures, var_hir_id, idx), capture))
+}
+
+/// Takes a PlaceBuilder and resolves the upvar (if any) within it, so that the
+/// `PlaceBuilder` now starts from `PlaceBase::Local`.
+///
+/// Returns a Result with the error being the PlaceBuilder (`from_builder`) that was not found.
+fn to_upvars_resolved_place_builder<'a, 'tcx>(
+ from_builder: PlaceBuilder<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+) -> Result<PlaceBuilder<'tcx>, PlaceBuilder<'tcx>> {
+ match from_builder.base {
+ PlaceBase::Local(_) => Ok(from_builder),
+ PlaceBase::Upvar { var_hir_id, closure_def_id, closure_kind } => {
+ let mut upvar_resolved_place_builder = PlaceBuilder::from(ty::CAPTURE_STRUCT_LOCAL);
+ match closure_kind {
+ ty::ClosureKind::Fn | ty::ClosureKind::FnMut => {
+ upvar_resolved_place_builder = upvar_resolved_place_builder.deref();
+ }
+ ty::ClosureKind::FnOnce => {}
+ }
+
+ let Some((capture_index, capture)) =
+ find_capture_matching_projections(
+ typeck_results,
+ var_hir_id,
+ closure_def_id,
+ &from_builder.projection,
+ ) else {
+ let closure_span = tcx.def_span(closure_def_id);
+ if !enable_precise_capture(tcx, closure_span) {
+ bug!(
+ "No associated capture found for {:?}[{:#?}] even though \
+ capture_disjoint_fields isn't enabled",
+ var_hir_id,
+ from_builder.projection
+ )
+ } else {
+ debug!(
+ "No associated capture found for {:?}[{:#?}]",
+ var_hir_id, from_builder.projection,
+ );
+ }
+ return Err(from_builder);
+ };
+
+ // We won't be building MIR if the closure wasn't local
+ let closure_hir_id = tcx.hir().local_def_id_to_hir_id(closure_def_id);
+ let closure_ty = typeck_results.node_type(closure_hir_id);
+
+ let substs = match closure_ty.kind() {
+ ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs),
+ ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
+ _ => bug!("Lowering capture for non-closure type {:?}", closure_ty),
+ };
+
+ // Access the capture by accessing the field within the Closure struct.
+ //
+ // We must have inferred the capture types since we are building MIR, therefore
+ // it's safe to call `tuple_element_ty` and we can unwrap here because
+ // we know that the capture exists and is the `capture_index`-th capture.
+ let var_ty = substs.tupled_upvars_ty().tuple_fields()[capture_index];
+
+ upvar_resolved_place_builder =
+ upvar_resolved_place_builder.field(Field::new(capture_index), var_ty);
+
+ // If the variable is captured via ByRef(Immutable/Mutable) Borrow,
+ // we need to deref it
+ upvar_resolved_place_builder = match capture.info.capture_kind {
+ ty::UpvarCapture::ByRef(_) => upvar_resolved_place_builder.deref(),
+ ty::UpvarCapture::ByValue => upvar_resolved_place_builder,
+ };
+
+ // We used some of the projections to build the capture itself,
+ // now we apply the remaining to the upvar resolved place.
+ let remaining_projections = strip_prefix(
+ capture.place.base_ty,
+ from_builder.projection,
+ &capture.place.projections,
+ );
+ upvar_resolved_place_builder.projection.extend(remaining_projections);
+
+ Ok(upvar_resolved_place_builder)
+ }
+ }
+}
+
+/// Returns projections remaining after stripping an initial prefix of HIR
+/// projections.
+///
+/// Supports only HIR projection kinds that represent a path that might be
+/// captured by a closure or a generator, i.e., an `Index` or a `Subslice`
+/// projection kinds are unsupported.
+fn strip_prefix<'tcx>(
+ mut base_ty: Ty<'tcx>,
+ projections: Vec<PlaceElem<'tcx>>,
+ prefix_projections: &[HirProjection<'tcx>],
+) -> impl Iterator<Item = PlaceElem<'tcx>> {
+ let mut iter = projections.into_iter();
+ for projection in prefix_projections {
+ match projection.kind {
+ HirProjectionKind::Deref => {
+ assert!(matches!(iter.next(), Some(ProjectionElem::Deref)));
+ }
+ HirProjectionKind::Field(..) => {
+ if base_ty.is_enum() {
+ assert!(matches!(iter.next(), Some(ProjectionElem::Downcast(..))));
+ }
+ assert!(matches!(iter.next(), Some(ProjectionElem::Field(..))));
+ }
+ HirProjectionKind::Index | HirProjectionKind::Subslice => {
+ bug!("unexpected projection kind: {:?}", projection);
+ }
+ }
+ base_ty = projection.ty;
+ }
+ iter
+}
+
+impl<'tcx> PlaceBuilder<'tcx> {
+ pub(crate) fn into_place<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Place<'tcx> {
+ if let PlaceBase::Local(local) = self.base {
+ Place { local, projection: tcx.intern_place_elems(&self.projection) }
+ } else {
+ self.expect_upvars_resolved(tcx, typeck_results).into_place(tcx, typeck_results)
+ }
+ }
+
+ fn expect_upvars_resolved<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> PlaceBuilder<'tcx> {
+ to_upvars_resolved_place_builder(self, tcx, typeck_results).unwrap()
+ }
+
+ /// Attempts to resolve the `PlaceBuilder`.
+ /// On success, it will return the resolved `PlaceBuilder`.
+ /// On failure, it will return itself.
+ ///
+ /// Upvars resolve may fail for a `PlaceBuilder` when attempting to
+ /// resolve a disjoint field whose root variable is not captured
+ /// (destructured assignments) or when attempting to resolve a root
+ /// variable (discriminant matching with only wildcard arm) that is
+ /// not captured. This can happen because the final mir that will be
+ /// generated doesn't require a read for this place. Failures will only
+ /// happen inside closures.
+ pub(crate) fn try_upvars_resolved<'a>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Result<PlaceBuilder<'tcx>, PlaceBuilder<'tcx>> {
+ to_upvars_resolved_place_builder(self, tcx, typeck_results)
+ }
+
+ pub(crate) fn base(&self) -> PlaceBase {
+ self.base
+ }
+
+ pub(crate) fn field(self, f: Field, ty: Ty<'tcx>) -> Self {
+ self.project(PlaceElem::Field(f, ty))
+ }
+
+ pub(crate) fn deref(self) -> Self {
+ self.project(PlaceElem::Deref)
+ }
+
+ pub(crate) fn downcast(self, adt_def: AdtDef<'tcx>, variant_index: VariantIdx) -> Self {
+ self.project(PlaceElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index))
+ }
+
+ fn index(self, index: Local) -> Self {
+ self.project(PlaceElem::Index(index))
+ }
+
+ pub(crate) fn project(mut self, elem: PlaceElem<'tcx>) -> Self {
+ self.projection.push(elem);
+ self
+ }
+}
+
+impl<'tcx> From<Local> for PlaceBuilder<'tcx> {
+ fn from(local: Local) -> Self {
+ Self { base: PlaceBase::Local(local), projection: Vec::new() }
+ }
+}
+
+impl<'tcx> From<PlaceBase> for PlaceBuilder<'tcx> {
+ fn from(base: PlaceBase) -> Self {
+ Self { base, projection: Vec::new() }
+ }
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Compile `expr`, yielding a place that we can move from etc.
+ ///
+ /// WARNING: Any user code might:
+ /// * Invalidate any slice bounds checks performed.
+ /// * Change the address that this `Place` refers to.
+ /// * Modify the memory that this place refers to.
+ /// * Invalidate the memory that this place refers to, this will be caught
+ /// by borrow checking.
+ ///
+ /// Extra care is needed if any user code is allowed to run between calling
+ /// this method and using it, as is the case for `match` and index
+ /// expressions.
+ pub(crate) fn as_place(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Place<'tcx>> {
+ let place_builder = unpack!(block = self.as_place_builder(block, expr));
+ block.and(place_builder.into_place(self.tcx, self.typeck_results))
+ }
+
+ /// This is used when constructing a compound `Place`, so that we can avoid creating
+ /// intermediate `Place` values until we know the full set of projections.
+ pub(crate) fn as_place_builder(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ self.expr_as_place(block, expr, Mutability::Mut, None)
+ }
+
+ /// Compile `expr`, yielding a place that we can move from etc.
+ /// Mutability note: The caller of this method promises only to read from the resulting
+ /// place. The place itself may or may not be mutable:
+ /// * If this expr is a place expr like a.b, then we will return that place.
+ /// * Otherwise, a temporary is created: in that event, it will be an immutable temporary.
+ pub(crate) fn as_read_only_place(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Place<'tcx>> {
+ let place_builder = unpack!(block = self.as_read_only_place_builder(block, expr));
+ block.and(place_builder.into_place(self.tcx, self.typeck_results))
+ }
+
+ /// This is used when constructing a compound `Place`, so that we can avoid creating
+ /// intermediate `Place` values until we know the full set of projections.
+ /// Mutability note: The caller of this method promises only to read from the resulting
+ /// place. The place itself may or may not be mutable:
+ /// * If this expr is a place expr like a.b, then we will return that place.
+ /// * Otherwise, a temporary is created: in that event, it will be an immutable temporary.
+ fn as_read_only_place_builder(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ self.expr_as_place(block, expr, Mutability::Not, None)
+ }
+
+ fn expr_as_place(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ mutability: Mutability,
+ fake_borrow_temps: Option<&mut Vec<Local>>,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ debug!("expr_as_place(block={:?}, expr={:?}, mutability={:?})", block, expr, mutability);
+
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+ match expr.kind {
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ this.in_scope((region_scope, source_info), lint_level, |this| {
+ this.expr_as_place(block, &this.thir[value], mutability, fake_borrow_temps)
+ })
+ }
+ ExprKind::Field { lhs, variant_index, name } => {
+ let lhs = &this.thir[lhs];
+ let mut place_builder =
+ unpack!(block = this.expr_as_place(block, lhs, mutability, fake_borrow_temps,));
+ if let ty::Adt(adt_def, _) = lhs.ty.kind() {
+ if adt_def.is_enum() {
+ place_builder = place_builder.downcast(*adt_def, variant_index);
+ }
+ }
+ block.and(place_builder.field(name, expr.ty))
+ }
+ ExprKind::Deref { arg } => {
+ let place_builder = unpack!(
+ block =
+ this.expr_as_place(block, &this.thir[arg], mutability, fake_borrow_temps,)
+ );
+ block.and(place_builder.deref())
+ }
+ ExprKind::Index { lhs, index } => this.lower_index_expression(
+ block,
+ &this.thir[lhs],
+ &this.thir[index],
+ mutability,
+ fake_borrow_temps,
+ expr.temp_lifetime,
+ expr_span,
+ source_info,
+ ),
+ ExprKind::UpvarRef { closure_def_id, var_hir_id } => {
+ this.lower_captured_upvar(block, closure_def_id.expect_local(), var_hir_id)
+ }
+
+ ExprKind::VarRef { id } => {
+ let place_builder = if this.is_bound_var_in_guard(id) {
+ let index = this.var_local_id(id, RefWithinGuard);
+ PlaceBuilder::from(index).deref()
+ } else {
+ let index = this.var_local_id(id, OutsideGuard);
+ PlaceBuilder::from(index)
+ };
+ block.and(place_builder)
+ }
+
+ ExprKind::PlaceTypeAscription { source, user_ty } => {
+ let place_builder = unpack!(
+ block = this.expr_as_place(
+ block,
+ &this.thir[source],
+ mutability,
+ fake_borrow_temps,
+ )
+ );
+ if let Some(user_ty) = user_ty {
+ let annotation_index =
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span: source_info.span,
+ user_ty,
+ inferred_ty: expr.ty,
+ });
+
+ let place = place_builder.clone().into_place(this.tcx, this.typeck_results);
+ this.cfg.push(
+ block,
+ Statement {
+ source_info,
+ kind: StatementKind::AscribeUserType(
+ Box::new((
+ place,
+ UserTypeProjection { base: annotation_index, projs: vec![] },
+ )),
+ Variance::Invariant,
+ ),
+ },
+ );
+ }
+ block.and(place_builder)
+ }
+ ExprKind::ValueTypeAscription { source, user_ty } => {
+ let source = &this.thir[source];
+ let temp =
+ unpack!(block = this.as_temp(block, source.temp_lifetime, source, mutability));
+ if let Some(user_ty) = user_ty {
+ let annotation_index =
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span: source_info.span,
+ user_ty,
+ inferred_ty: expr.ty,
+ });
+ this.cfg.push(
+ block,
+ Statement {
+ source_info,
+ kind: StatementKind::AscribeUserType(
+ Box::new((
+ Place::from(temp),
+ UserTypeProjection { base: annotation_index, projs: vec![] },
+ )),
+ Variance::Invariant,
+ ),
+ },
+ );
+ }
+ block.and(PlaceBuilder::from(temp))
+ }
+
+ ExprKind::Array { .. }
+ | ExprKind::Tuple { .. }
+ | ExprKind::Adt { .. }
+ | ExprKind::Closure { .. }
+ | ExprKind::Unary { .. }
+ | ExprKind::Binary { .. }
+ | ExprKind::LogicalOp { .. }
+ | ExprKind::Box { .. }
+ | ExprKind::Cast { .. }
+ | ExprKind::Use { .. }
+ | ExprKind::NeverToAny { .. }
+ | ExprKind::Pointer { .. }
+ | ExprKind::Repeat { .. }
+ | ExprKind::Borrow { .. }
+ | ExprKind::AddressOf { .. }
+ | ExprKind::Match { .. }
+ | ExprKind::If { .. }
+ | ExprKind::Loop { .. }
+ | ExprKind::Block { .. }
+ | ExprKind::Let { .. }
+ | ExprKind::Assign { .. }
+ | ExprKind::AssignOp { .. }
+ | ExprKind::Break { .. }
+ | ExprKind::Continue { .. }
+ | ExprKind::Return { .. }
+ | ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::ConstBlock { .. }
+ | ExprKind::StaticRef { .. }
+ | ExprKind::InlineAsm { .. }
+ | ExprKind::Yield { .. }
+ | ExprKind::ThreadLocalRef(_)
+ | ExprKind::Call { .. } => {
+ // these are not places, so we need to make a temporary.
+ debug_assert!(!matches!(Category::of(&expr.kind), Some(Category::Place)));
+ let temp =
+ unpack!(block = this.as_temp(block, expr.temp_lifetime, expr, mutability));
+ block.and(PlaceBuilder::from(temp))
+ }
+ }
+ }
+
+ /// Lower a captured upvar. Note we might not know the actual capture index,
+ /// so we create a place starting from `PlaceBase::Upvar`, which will be resolved
+ /// once all projections that allow us to identify a capture have been applied.
+ fn lower_captured_upvar(
+ &mut self,
+ block: BasicBlock,
+ closure_def_id: LocalDefId,
+ var_hir_id: LocalVarId,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ let closure_ty =
+ self.typeck_results.node_type(self.tcx.hir().local_def_id_to_hir_id(closure_def_id));
+
+ let closure_kind = if let ty::Closure(_, closure_substs) = closure_ty.kind() {
+ self.infcx.closure_kind(closure_substs).unwrap()
+ } else {
+ // Generators are considered FnOnce.
+ ty::ClosureKind::FnOnce
+ };
+
+ block.and(PlaceBuilder::from(PlaceBase::Upvar { var_hir_id, closure_def_id, closure_kind }))
+ }
+
+ /// Lower an index expression
+ ///
+ /// This has two complications;
+ ///
+ /// * We need to do a bounds check.
+ /// * We need to ensure that the bounds check can't be invalidated using an
+ /// expression like `x[1][{x = y; 2}]`. We use fake borrows here to ensure
+ /// that this is the case.
+ fn lower_index_expression(
+ &mut self,
+ mut block: BasicBlock,
+ base: &Expr<'tcx>,
+ index: &Expr<'tcx>,
+ mutability: Mutability,
+ fake_borrow_temps: Option<&mut Vec<Local>>,
+ temp_lifetime: Option<region::Scope>,
+ expr_span: Span,
+ source_info: SourceInfo,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ let base_fake_borrow_temps = &mut Vec::new();
+ let is_outermost_index = fake_borrow_temps.is_none();
+ let fake_borrow_temps = fake_borrow_temps.unwrap_or(base_fake_borrow_temps);
+
+ let mut base_place =
+ unpack!(block = self.expr_as_place(block, base, mutability, Some(fake_borrow_temps),));
+
+ // Making this a *fresh* temporary means we do not have to worry about
+ // the index changing later: Nothing will ever change this temporary.
+ // The "retagging" transformation (for Stacked Borrows) relies on this.
+ let idx = unpack!(block = self.as_temp(block, temp_lifetime, index, Mutability::Not,));
+
+ block = self.bounds_check(block, base_place.clone(), idx, expr_span, source_info);
+
+ if is_outermost_index {
+ self.read_fake_borrows(block, fake_borrow_temps, source_info)
+ } else {
+ base_place = base_place.expect_upvars_resolved(self.tcx, self.typeck_results);
+ self.add_fake_borrows_of_base(
+ &base_place,
+ block,
+ fake_borrow_temps,
+ expr_span,
+ source_info,
+ );
+ }
+
+ block.and(base_place.index(idx))
+ }
+
+ fn bounds_check(
+ &mut self,
+ block: BasicBlock,
+ slice: PlaceBuilder<'tcx>,
+ index: Local,
+ expr_span: Span,
+ source_info: SourceInfo,
+ ) -> BasicBlock {
+ let usize_ty = self.tcx.types.usize;
+ let bool_ty = self.tcx.types.bool;
+ // bounds check:
+ let len = self.temp(usize_ty, expr_span);
+ let lt = self.temp(bool_ty, expr_span);
+
+ // len = len(slice)
+ self.cfg.push_assign(
+ block,
+ source_info,
+ len,
+ Rvalue::Len(slice.into_place(self.tcx, self.typeck_results)),
+ );
+ // lt = idx < len
+ self.cfg.push_assign(
+ block,
+ source_info,
+ lt,
+ Rvalue::BinaryOp(
+ BinOp::Lt,
+ Box::new((Operand::Copy(Place::from(index)), Operand::Copy(len))),
+ ),
+ );
+ let msg = BoundsCheck { len: Operand::Move(len), index: Operand::Copy(Place::from(index)) };
+ // assert!(lt, "...")
+ self.assert(block, Operand::Move(lt), true, msg, expr_span)
+ }
+
+ fn add_fake_borrows_of_base(
+ &mut self,
+ base_place: &PlaceBuilder<'tcx>,
+ block: BasicBlock,
+ fake_borrow_temps: &mut Vec<Local>,
+ expr_span: Span,
+ source_info: SourceInfo,
+ ) {
+ let tcx = self.tcx;
+ let local = match base_place.base {
+ PlaceBase::Local(local) => local,
+ PlaceBase::Upvar { .. } => bug!("Expected PlacseBase::Local found Upvar"),
+ };
+
+ let place_ty = Place::ty_from(local, &base_place.projection, &self.local_decls, tcx);
+ if let ty::Slice(_) = place_ty.ty.kind() {
+ // We need to create fake borrows to ensure that the bounds
+ // check that we just did stays valid. Since we can't assign to
+ // unsized values, we only need to ensure that none of the
+ // pointers in the base place are modified.
+ for (idx, elem) in base_place.projection.iter().enumerate().rev() {
+ match elem {
+ ProjectionElem::Deref => {
+ let fake_borrow_deref_ty = Place::ty_from(
+ local,
+ &base_place.projection[..idx],
+ &self.local_decls,
+ tcx,
+ )
+ .ty;
+ let fake_borrow_ty =
+ tcx.mk_imm_ref(tcx.lifetimes.re_erased, fake_borrow_deref_ty);
+ let fake_borrow_temp =
+ self.local_decls.push(LocalDecl::new(fake_borrow_ty, expr_span));
+ let projection = tcx.intern_place_elems(&base_place.projection[..idx]);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ fake_borrow_temp.into(),
+ Rvalue::Ref(
+ tcx.lifetimes.re_erased,
+ BorrowKind::Shallow,
+ Place { local, projection },
+ ),
+ );
+ fake_borrow_temps.push(fake_borrow_temp);
+ }
+ ProjectionElem::Index(_) => {
+ let index_ty = Place::ty_from(
+ local,
+ &base_place.projection[..idx],
+ &self.local_decls,
+ tcx,
+ );
+ match index_ty.ty.kind() {
+ // The previous index expression has already
+ // done any index expressions needed here.
+ ty::Slice(_) => break,
+ ty::Array(..) => (),
+ _ => bug!("unexpected index base"),
+ }
+ }
+ ProjectionElem::Field(..)
+ | ProjectionElem::Downcast(..)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => (),
+ }
+ }
+ }
+ }
+
+ fn read_fake_borrows(
+ &mut self,
+ bb: BasicBlock,
+ fake_borrow_temps: &mut Vec<Local>,
+ source_info: SourceInfo,
+ ) {
+ // All indexes have been evaluated now, read all of the
+ // fake borrows so that they are live across those index
+ // expressions.
+ for temp in fake_borrow_temps {
+ self.cfg.push_fake_read(bb, source_info, FakeReadCause::ForIndex, Place::from(*temp));
+ }
+ }
+}
+
+/// Precise capture is enabled if the feature gate `capture_disjoint_fields` is enabled or if
+/// user is using Rust Edition 2021 or higher.
+fn enable_precise_capture(tcx: TyCtxt<'_>, closure_span: Span) -> bool {
+ tcx.features().capture_disjoint_fields || closure_span.rust_2021()
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
new file mode 100644
index 000000000..15f2d17c4
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs
@@ -0,0 +1,694 @@
+//! See docs in `build/expr/mod.rs`.
+
+use rustc_index::vec::Idx;
+use rustc_middle::ty::util::IntTypeExt;
+
+use crate::build::expr::as_place::PlaceBase;
+use crate::build::expr::category::{Category, RvalueFunc};
+use crate::build::{BlockAnd, BlockAndExtension, Builder, NeedsTemporary};
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::middle::region;
+use rustc_middle::mir::AssertKind;
+use rustc_middle::mir::Place;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::cast::CastTy;
+use rustc_middle::ty::{self, Ty, UpvarSubsts};
+use rustc_span::Span;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Returns an rvalue suitable for use until the end of the current
+ /// scope expression.
+ ///
+ /// The operand returned from this function will *not be valid* after
+ /// an ExprKind::Scope is passed, so please do *not* return it from
+ /// functions to avoid bad miscompiles.
+ pub(crate) fn as_local_rvalue(
+ &mut self,
+ block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Rvalue<'tcx>> {
+ let local_scope = self.local_scope();
+ self.as_rvalue(block, Some(local_scope), expr)
+ }
+
+ /// Compile `expr`, yielding an rvalue.
+ pub(crate) fn as_rvalue(
+ &mut self,
+ mut block: BasicBlock,
+ scope: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<Rvalue<'tcx>> {
+ debug!("expr_as_rvalue(block={:?}, scope={:?}, expr={:?})", block, scope, expr);
+
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+
+ match expr.kind {
+ ExprKind::ThreadLocalRef(did) => block.and(Rvalue::ThreadLocalRef(did)),
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ let region_scope = (region_scope, source_info);
+ this.in_scope(region_scope, lint_level, |this| {
+ this.as_rvalue(block, scope, &this.thir[value])
+ })
+ }
+ ExprKind::Repeat { value, count } => {
+ if Some(0) == count.try_eval_usize(this.tcx, this.param_env) {
+ this.build_zero_repeat(block, value, scope, source_info)
+ } else {
+ let value_operand = unpack!(
+ block = this.as_operand(
+ block,
+ scope,
+ &this.thir[value],
+ None,
+ NeedsTemporary::No
+ )
+ );
+ block.and(Rvalue::Repeat(value_operand, count))
+ }
+ }
+ ExprKind::Binary { op, lhs, rhs } => {
+ let lhs = unpack!(
+ block =
+ this.as_operand(block, scope, &this.thir[lhs], None, NeedsTemporary::Maybe)
+ );
+ let rhs = unpack!(
+ block =
+ this.as_operand(block, scope, &this.thir[rhs], None, NeedsTemporary::No)
+ );
+ this.build_binary_op(block, op, expr_span, expr.ty, lhs, rhs)
+ }
+ ExprKind::Unary { op, arg } => {
+ let arg = unpack!(
+ block =
+ this.as_operand(block, scope, &this.thir[arg], None, NeedsTemporary::No)
+ );
+ // Check for -MIN on signed integers
+ if this.check_overflow && op == UnOp::Neg && expr.ty.is_signed() {
+ let bool_ty = this.tcx.types.bool;
+
+ let minval = this.minval_literal(expr_span, expr.ty);
+ let is_min = this.temp(bool_ty, expr_span);
+
+ this.cfg.push_assign(
+ block,
+ source_info,
+ is_min,
+ Rvalue::BinaryOp(BinOp::Eq, Box::new((arg.to_copy(), minval))),
+ );
+
+ block = this.assert(
+ block,
+ Operand::Move(is_min),
+ false,
+ AssertKind::OverflowNeg(arg.to_copy()),
+ expr_span,
+ );
+ }
+ block.and(Rvalue::UnaryOp(op, arg))
+ }
+ ExprKind::Box { value } => {
+ let value = &this.thir[value];
+ let tcx = this.tcx;
+
+ // `exchange_malloc` is unsafe but box is safe, so need a new scope.
+ let synth_scope = this.new_source_scope(
+ expr_span,
+ LintLevel::Inherited,
+ Some(Safety::BuiltinUnsafe),
+ );
+ let synth_info = SourceInfo { span: expr_span, scope: synth_scope };
+
+ let size = this.temp(tcx.types.usize, expr_span);
+ this.cfg.push_assign(
+ block,
+ synth_info,
+ size,
+ Rvalue::NullaryOp(NullOp::SizeOf, value.ty),
+ );
+
+ let align = this.temp(tcx.types.usize, expr_span);
+ this.cfg.push_assign(
+ block,
+ synth_info,
+ align,
+ Rvalue::NullaryOp(NullOp::AlignOf, value.ty),
+ );
+
+ // malloc some memory of suitable size and align:
+ let exchange_malloc = Operand::function_handle(
+ tcx,
+ tcx.require_lang_item(LangItem::ExchangeMalloc, Some(expr_span)),
+ ty::List::empty(),
+ expr_span,
+ );
+ let storage = this.temp(tcx.mk_mut_ptr(tcx.types.u8), expr_span);
+ let success = this.cfg.start_new_block();
+ this.cfg.terminate(
+ block,
+ synth_info,
+ TerminatorKind::Call {
+ func: exchange_malloc,
+ args: vec![Operand::Move(size), Operand::Move(align)],
+ destination: storage,
+ target: Some(success),
+ cleanup: None,
+ from_hir_call: false,
+ fn_span: expr_span,
+ },
+ );
+ this.diverge_from(block);
+ block = success;
+
+ // The `Box<T>` temporary created here is not a part of the HIR,
+ // and therefore is not considered during generator auto-trait
+ // determination. See the comment about `box` at `yield_in_scope`.
+ let result = this.local_decls.push(LocalDecl::new(expr.ty, expr_span).internal());
+ this.cfg.push(
+ block,
+ Statement { source_info, kind: StatementKind::StorageLive(result) },
+ );
+ if let Some(scope) = scope {
+ // schedule a shallow free of that memory, lest we unwind:
+ this.schedule_drop_storage_and_value(expr_span, scope, result);
+ }
+
+ // Transmute `*mut u8` to the box (thus far, uninitialized):
+ let box_ = Rvalue::ShallowInitBox(Operand::Move(storage), value.ty);
+ this.cfg.push_assign(block, source_info, Place::from(result), box_);
+
+ // initialize the box contents:
+ unpack!(
+ block = this.expr_into_dest(
+ this.tcx.mk_place_deref(Place::from(result)),
+ block,
+ value
+ )
+ );
+ block.and(Rvalue::Use(Operand::Move(Place::from(result))))
+ }
+ ExprKind::Cast { source } => {
+ let source = &this.thir[source];
+
+ // Casting an enum to an integer is equivalent to computing the discriminant and casting the
+ // discriminant. Previously every backend had to repeat the logic for this operation. Now we
+ // create all the steps directly in MIR with operations all backends need to support anyway.
+ let (source, ty) = if let ty::Adt(adt_def, ..) = source.ty.kind() && adt_def.is_enum() {
+ let discr_ty = adt_def.repr().discr_type().to_ty(this.tcx);
+ let place = unpack!(block = this.as_place(block, source));
+ let discr = this.temp(discr_ty, source.span);
+ this.cfg.push_assign(
+ block,
+ source_info,
+ discr,
+ Rvalue::Discriminant(place),
+ );
+
+ (Operand::Move(discr), discr_ty)
+ } else {
+ let ty = source.ty;
+ let source = unpack!(
+ block = this.as_operand(block, scope, source, None, NeedsTemporary::No)
+ );
+ (source, ty)
+ };
+ let from_ty = CastTy::from_ty(ty);
+ let cast_ty = CastTy::from_ty(expr.ty);
+ let cast_kind = match (from_ty, cast_ty) {
+ (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Int(_))) => {
+ CastKind::PointerExposeAddress
+ }
+ (Some(CastTy::Int(_)), Some(CastTy::Ptr(_))) => {
+ CastKind::PointerFromExposedAddress
+ }
+ (_, _) => CastKind::Misc,
+ };
+ block.and(Rvalue::Cast(cast_kind, source, expr.ty))
+ }
+ ExprKind::Pointer { cast, source } => {
+ let source = unpack!(
+ block =
+ this.as_operand(block, scope, &this.thir[source], None, NeedsTemporary::No)
+ );
+ block.and(Rvalue::Cast(CastKind::Pointer(cast), source, expr.ty))
+ }
+ ExprKind::Array { ref fields } => {
+ // (*) We would (maybe) be closer to codegen if we
+ // handled this and other aggregate cases via
+ // `into()`, not `as_rvalue` -- in that case, instead
+ // of generating
+ //
+ // let tmp1 = ...1;
+ // let tmp2 = ...2;
+ // dest = Rvalue::Aggregate(Foo, [tmp1, tmp2])
+ //
+ // we could just generate
+ //
+ // dest.f = ...1;
+ // dest.g = ...2;
+ //
+ // The problem is that then we would need to:
+ //
+ // (a) have a more complex mechanism for handling
+ // partial cleanup;
+ // (b) distinguish the case where the type `Foo` has a
+ // destructor, in which case creating an instance
+ // as a whole "arms" the destructor, and you can't
+ // write individual fields; and,
+ // (c) handle the case where the type Foo has no
+ // fields. We don't want `let x: ();` to compile
+ // to the same MIR as `let x = ();`.
+
+ // first process the set of fields
+ let el_ty = expr.ty.sequence_element_type(this.tcx);
+ let fields: Vec<_> = fields
+ .into_iter()
+ .copied()
+ .map(|f| {
+ unpack!(
+ block = this.as_operand(
+ block,
+ scope,
+ &this.thir[f],
+ None,
+ NeedsTemporary::Maybe
+ )
+ )
+ })
+ .collect();
+
+ block.and(Rvalue::Aggregate(Box::new(AggregateKind::Array(el_ty)), fields))
+ }
+ ExprKind::Tuple { ref fields } => {
+ // see (*) above
+ // first process the set of fields
+ let fields: Vec<_> = fields
+ .into_iter()
+ .copied()
+ .map(|f| {
+ unpack!(
+ block = this.as_operand(
+ block,
+ scope,
+ &this.thir[f],
+ None,
+ NeedsTemporary::Maybe
+ )
+ )
+ })
+ .collect();
+
+ block.and(Rvalue::Aggregate(Box::new(AggregateKind::Tuple), fields))
+ }
+ ExprKind::Closure { closure_id, substs, ref upvars, movability, ref fake_reads } => {
+ // Convert the closure fake reads, if any, from `ExprRef` to mir `Place`
+ // and push the fake reads.
+ // This must come before creating the operands. This is required in case
+ // there is a fake read and a borrow of the same path, since otherwise the
+ // fake read might interfere with the borrow. Consider an example like this
+ // one:
+ // ```
+ // let mut x = 0;
+ // let c = || {
+ // &mut x; // mutable borrow of `x`
+ // match x { _ => () } // fake read of `x`
+ // };
+ // ```
+ //
+ for (thir_place, cause, hir_id) in fake_reads.into_iter() {
+ let place_builder =
+ unpack!(block = this.as_place_builder(block, &this.thir[*thir_place]));
+
+ if let Ok(place_builder_resolved) =
+ place_builder.try_upvars_resolved(this.tcx, this.typeck_results)
+ {
+ let mir_place =
+ place_builder_resolved.into_place(this.tcx, this.typeck_results);
+ this.cfg.push_fake_read(
+ block,
+ this.source_info(this.tcx.hir().span(*hir_id)),
+ *cause,
+ mir_place,
+ );
+ }
+ }
+
+ // see (*) above
+ let operands: Vec<_> = upvars
+ .into_iter()
+ .copied()
+ .map(|upvar| {
+ let upvar = &this.thir[upvar];
+ match Category::of(&upvar.kind) {
+ // Use as_place to avoid creating a temporary when
+ // moving a variable into a closure, so that
+ // borrowck knows which variables to mark as being
+ // used as mut. This is OK here because the upvar
+ // expressions have no side effects and act on
+ // disjoint places.
+ // This occurs when capturing by copy/move, while
+ // by reference captures use as_operand
+ Some(Category::Place) => {
+ let place = unpack!(block = this.as_place(block, upvar));
+ this.consume_by_copy_or_move(place)
+ }
+ _ => {
+ // Turn mutable borrow captures into unique
+ // borrow captures when capturing an immutable
+ // variable. This is sound because the mutation
+ // that caused the capture will cause an error.
+ match upvar.kind {
+ ExprKind::Borrow {
+ borrow_kind:
+ BorrowKind::Mut { allow_two_phase_borrow: false },
+ arg,
+ } => unpack!(
+ block = this.limit_capture_mutability(
+ upvar.span,
+ upvar.ty,
+ scope,
+ block,
+ &this.thir[arg],
+ )
+ ),
+ _ => {
+ unpack!(
+ block = this.as_operand(
+ block,
+ scope,
+ upvar,
+ None,
+ NeedsTemporary::Maybe
+ )
+ )
+ }
+ }
+ }
+ }
+ })
+ .collect();
+
+ let result = match substs {
+ UpvarSubsts::Generator(substs) => {
+ // We implicitly set the discriminant to 0. See
+ // librustc_mir/transform/deaggregator.rs for details.
+ let movability = movability.unwrap();
+ Box::new(AggregateKind::Generator(closure_id, substs, movability))
+ }
+ UpvarSubsts::Closure(substs) => {
+ Box::new(AggregateKind::Closure(closure_id, substs))
+ }
+ };
+ block.and(Rvalue::Aggregate(result, operands))
+ }
+ ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
+ block = unpack!(this.stmt_expr(block, expr, None));
+ block.and(Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(this.tcx.types.unit),
+ }))))
+ }
+
+ ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::ConstBlock { .. }
+ | ExprKind::StaticRef { .. } => {
+ let constant = this.as_constant(expr);
+ block.and(Rvalue::Use(Operand::Constant(Box::new(constant))))
+ }
+
+ ExprKind::Yield { .. }
+ | ExprKind::Block { .. }
+ | ExprKind::Match { .. }
+ | ExprKind::If { .. }
+ | ExprKind::NeverToAny { .. }
+ | ExprKind::Use { .. }
+ | ExprKind::Borrow { .. }
+ | ExprKind::AddressOf { .. }
+ | ExprKind::Adt { .. }
+ | ExprKind::Loop { .. }
+ | ExprKind::LogicalOp { .. }
+ | ExprKind::Call { .. }
+ | ExprKind::Field { .. }
+ | ExprKind::Let { .. }
+ | ExprKind::Deref { .. }
+ | ExprKind::Index { .. }
+ | ExprKind::VarRef { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::Break { .. }
+ | ExprKind::Continue { .. }
+ | ExprKind::Return { .. }
+ | ExprKind::InlineAsm { .. }
+ | ExprKind::PlaceTypeAscription { .. }
+ | ExprKind::ValueTypeAscription { .. } => {
+ // these do not have corresponding `Rvalue` variants,
+ // so make an operand and then return that
+ debug_assert!(!matches!(
+ Category::of(&expr.kind),
+ Some(Category::Rvalue(RvalueFunc::AsRvalue) | Category::Constant)
+ ));
+ let operand =
+ unpack!(block = this.as_operand(block, scope, expr, None, NeedsTemporary::No));
+ block.and(Rvalue::Use(operand))
+ }
+ }
+ }
+
+ pub(crate) fn build_binary_op(
+ &mut self,
+ mut block: BasicBlock,
+ op: BinOp,
+ span: Span,
+ ty: Ty<'tcx>,
+ lhs: Operand<'tcx>,
+ rhs: Operand<'tcx>,
+ ) -> BlockAnd<Rvalue<'tcx>> {
+ let source_info = self.source_info(span);
+ let bool_ty = self.tcx.types.bool;
+ if self.check_overflow && op.is_checkable() && ty.is_integral() {
+ let result_tup = self.tcx.intern_tup(&[ty, bool_ty]);
+ let result_value = self.temp(result_tup, span);
+
+ self.cfg.push_assign(
+ block,
+ source_info,
+ result_value,
+ Rvalue::CheckedBinaryOp(op, Box::new((lhs.to_copy(), rhs.to_copy()))),
+ );
+ let val_fld = Field::new(0);
+ let of_fld = Field::new(1);
+
+ let tcx = self.tcx;
+ let val = tcx.mk_place_field(result_value, val_fld, ty);
+ let of = tcx.mk_place_field(result_value, of_fld, bool_ty);
+
+ let err = AssertKind::Overflow(op, lhs, rhs);
+
+ block = self.assert(block, Operand::Move(of), false, err, span);
+
+ block.and(Rvalue::Use(Operand::Move(val)))
+ } else {
+ if ty.is_integral() && (op == BinOp::Div || op == BinOp::Rem) {
+ // Checking division and remainder is more complex, since we 1. always check
+ // and 2. there are two possible failure cases, divide-by-zero and overflow.
+
+ let zero_err = if op == BinOp::Div {
+ AssertKind::DivisionByZero(lhs.to_copy())
+ } else {
+ AssertKind::RemainderByZero(lhs.to_copy())
+ };
+ let overflow_err = AssertKind::Overflow(op, lhs.to_copy(), rhs.to_copy());
+
+ // Check for / 0
+ let is_zero = self.temp(bool_ty, span);
+ let zero = self.zero_literal(span, ty);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ is_zero,
+ Rvalue::BinaryOp(BinOp::Eq, Box::new((rhs.to_copy(), zero))),
+ );
+
+ block = self.assert(block, Operand::Move(is_zero), false, zero_err, span);
+
+ // We only need to check for the overflow in one case:
+ // MIN / -1, and only for signed values.
+ if ty.is_signed() {
+ let neg_1 = self.neg_1_literal(span, ty);
+ let min = self.minval_literal(span, ty);
+
+ let is_neg_1 = self.temp(bool_ty, span);
+ let is_min = self.temp(bool_ty, span);
+ let of = self.temp(bool_ty, span);
+
+ // this does (rhs == -1) & (lhs == MIN). It could short-circuit instead
+
+ self.cfg.push_assign(
+ block,
+ source_info,
+ is_neg_1,
+ Rvalue::BinaryOp(BinOp::Eq, Box::new((rhs.to_copy(), neg_1))),
+ );
+ self.cfg.push_assign(
+ block,
+ source_info,
+ is_min,
+ Rvalue::BinaryOp(BinOp::Eq, Box::new((lhs.to_copy(), min))),
+ );
+
+ let is_neg_1 = Operand::Move(is_neg_1);
+ let is_min = Operand::Move(is_min);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ of,
+ Rvalue::BinaryOp(BinOp::BitAnd, Box::new((is_neg_1, is_min))),
+ );
+
+ block = self.assert(block, Operand::Move(of), false, overflow_err, span);
+ }
+ }
+
+ block.and(Rvalue::BinaryOp(op, Box::new((lhs, rhs))))
+ }
+ }
+
+ fn build_zero_repeat(
+ &mut self,
+ mut block: BasicBlock,
+ value: ExprId,
+ scope: Option<region::Scope>,
+ outer_source_info: SourceInfo,
+ ) -> BlockAnd<Rvalue<'tcx>> {
+ let this = self;
+ let value = &this.thir[value];
+ let elem_ty = value.ty;
+ if let Some(Category::Constant) = Category::of(&value.kind) {
+ // Repeating a const does nothing
+ } else {
+ // For a non-const, we may need to generate an appropriate `Drop`
+ let value_operand =
+ unpack!(block = this.as_operand(block, scope, value, None, NeedsTemporary::No));
+ if let Operand::Move(to_drop) = value_operand {
+ let success = this.cfg.start_new_block();
+ this.cfg.terminate(
+ block,
+ outer_source_info,
+ TerminatorKind::Drop { place: to_drop, target: success, unwind: None },
+ );
+ this.diverge_from(block);
+ block = success;
+ }
+ this.record_operands_moved(&[value_operand]);
+ }
+ block.and(Rvalue::Aggregate(Box::new(AggregateKind::Array(elem_ty)), Vec::new()))
+ }
+
+ fn limit_capture_mutability(
+ &mut self,
+ upvar_span: Span,
+ upvar_ty: Ty<'tcx>,
+ temp_lifetime: Option<region::Scope>,
+ mut block: BasicBlock,
+ arg: &Expr<'tcx>,
+ ) -> BlockAnd<Operand<'tcx>> {
+ let this = self;
+
+ let source_info = this.source_info(upvar_span);
+ let temp = this.local_decls.push(LocalDecl::new(upvar_ty, upvar_span));
+
+ this.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(temp) });
+
+ let arg_place_builder = unpack!(block = this.as_place_builder(block, arg));
+
+ let mutability = match arg_place_builder.base() {
+ // We are capturing a path that starts off a local variable in the parent.
+ // The mutability of the current capture is same as the mutability
+ // of the local declaration in the parent.
+ PlaceBase::Local(local) => this.local_decls[local].mutability,
+ // Parent is a closure and we are capturing a path that is captured
+ // by the parent itself. The mutability of the current capture
+ // is same as that of the capture in the parent closure.
+ PlaceBase::Upvar { .. } => {
+ let enclosing_upvars_resolved =
+ arg_place_builder.clone().into_place(this.tcx, this.typeck_results);
+
+ match enclosing_upvars_resolved.as_ref() {
+ PlaceRef {
+ local,
+ projection: &[ProjectionElem::Field(upvar_index, _), ..],
+ }
+ | PlaceRef {
+ local,
+ projection:
+ &[ProjectionElem::Deref, ProjectionElem::Field(upvar_index, _), ..],
+ } => {
+ // Not in a closure
+ debug_assert!(
+ local == ty::CAPTURE_STRUCT_LOCAL,
+ "Expected local to be Local(1), found {:?}",
+ local
+ );
+ // Not in a closure
+ debug_assert!(
+ this.upvar_mutbls.len() > upvar_index.index(),
+ "Unexpected capture place, upvar_mutbls={:#?}, upvar_index={:?}",
+ this.upvar_mutbls,
+ upvar_index
+ );
+ this.upvar_mutbls[upvar_index.index()]
+ }
+ _ => bug!("Unexpected capture place"),
+ }
+ }
+ };
+
+ let borrow_kind = match mutability {
+ Mutability::Not => BorrowKind::Unique,
+ Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false },
+ };
+
+ let arg_place = arg_place_builder.into_place(this.tcx, this.typeck_results);
+
+ this.cfg.push_assign(
+ block,
+ source_info,
+ Place::from(temp),
+ Rvalue::Ref(this.tcx.lifetimes.re_erased, borrow_kind, arg_place),
+ );
+
+ // See the comment in `expr_as_temp` and on the `rvalue_scopes` field for why
+ // this can be `None`.
+ if let Some(temp_lifetime) = temp_lifetime {
+ this.schedule_drop_storage_and_value(upvar_span, temp_lifetime, temp);
+ }
+
+ block.and(Operand::Move(Place::from(temp)))
+ }
+
+ // Helper to get a `-1` value of the appropriate type
+ fn neg_1_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
+ let param_ty = ty::ParamEnv::empty().and(ty);
+ let size = self.tcx.layout_of(param_ty).unwrap().size;
+ let literal = ConstantKind::from_bits(self.tcx, size.unsigned_int_max(), param_ty);
+
+ self.literal_operand(span, literal)
+ }
+
+ // Helper to get the minimum value of the appropriate type
+ fn minval_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
+ assert!(ty.is_signed());
+ let param_ty = ty::ParamEnv::empty().and(ty);
+ let bits = self.tcx.layout_of(param_ty).unwrap().size.bits();
+ let n = 1 << (bits - 1);
+ let literal = ConstantKind::from_bits(self.tcx, n, param_ty);
+
+ self.literal_operand(span, literal)
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/as_temp.rs b/compiler/rustc_mir_build/src/build/expr/as_temp.rs
new file mode 100644
index 000000000..724b72f87
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/as_temp.rs
@@ -0,0 +1,119 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::scope::DropKind;
+use crate::build::{BlockAnd, BlockAndExtension, Builder};
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Compile `expr` into a fresh temporary. This is used when building
+ /// up rvalues so as to freeze the value that will be consumed.
+ pub(crate) fn as_temp(
+ &mut self,
+ block: BasicBlock,
+ temp_lifetime: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ mutability: Mutability,
+ ) -> BlockAnd<Local> {
+ // this is the only place in mir building that we need to truly need to worry about
+ // infinite recursion. Everything else does recurse, too, but it always gets broken up
+ // at some point by inserting an intermediate temporary
+ ensure_sufficient_stack(|| self.as_temp_inner(block, temp_lifetime, expr, mutability))
+ }
+
+ fn as_temp_inner(
+ &mut self,
+ mut block: BasicBlock,
+ temp_lifetime: Option<region::Scope>,
+ expr: &Expr<'tcx>,
+ mutability: Mutability,
+ ) -> BlockAnd<Local> {
+ debug!(
+ "as_temp(block={:?}, temp_lifetime={:?}, expr={:?}, mutability={:?})",
+ block, temp_lifetime, expr, mutability
+ );
+ let this = self;
+
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+ if let ExprKind::Scope { region_scope, lint_level, value } = expr.kind {
+ return this.in_scope((region_scope, source_info), lint_level, |this| {
+ this.as_temp(block, temp_lifetime, &this.thir[value], mutability)
+ });
+ }
+
+ let expr_ty = expr.ty;
+ let temp = {
+ let mut local_decl = LocalDecl::new(expr_ty, expr_span);
+ if mutability == Mutability::Not {
+ local_decl = local_decl.immutable();
+ }
+
+ debug!("creating temp {:?} with block_context: {:?}", local_decl, this.block_context);
+ // Find out whether this temp is being created within the
+ // tail expression of a block whose result is ignored.
+ if let Some(tail_info) = this.block_context.currently_in_block_tail() {
+ local_decl = local_decl.block_tail(tail_info);
+ }
+ match expr.kind {
+ ExprKind::StaticRef { def_id, .. } => {
+ assert!(!this.tcx.is_thread_local_static(def_id));
+ local_decl.internal = true;
+ local_decl.local_info =
+ Some(Box::new(LocalInfo::StaticRef { def_id, is_thread_local: false }));
+ }
+ ExprKind::ThreadLocalRef(def_id) => {
+ assert!(this.tcx.is_thread_local_static(def_id));
+ local_decl.internal = true;
+ local_decl.local_info =
+ Some(Box::new(LocalInfo::StaticRef { def_id, is_thread_local: true }));
+ }
+ ExprKind::NamedConst { def_id, .. } | ExprKind::ConstParam { def_id, .. } => {
+ local_decl.local_info = Some(Box::new(LocalInfo::ConstRef { def_id }));
+ }
+ _ => {}
+ }
+ this.local_decls.push(local_decl)
+ };
+ let temp_place = Place::from(temp);
+
+ match expr.kind {
+ // Don't bother with StorageLive and Dead for these temporaries,
+ // they are never assigned.
+ ExprKind::Break { .. } | ExprKind::Continue { .. } | ExprKind::Return { .. } => (),
+ ExprKind::Block { body: Block { expr: None, targeted_by_break: false, .. } }
+ if expr_ty.is_never() => {}
+ _ => {
+ this.cfg
+ .push(block, Statement { source_info, kind: StatementKind::StorageLive(temp) });
+
+ // In constants, `temp_lifetime` is `None` for temporaries that
+ // live for the `'static` lifetime. Thus we do not drop these
+ // temporaries and simply leak them.
+ // This is equivalent to what `let x = &foo();` does in
+ // functions. The temporary is lifted to their surrounding
+ // scope. In a function that means the temporary lives until
+ // just before the function returns. In constants that means it
+ // outlives the constant's initialization value computation.
+ // Anything outliving a constant must have the `'static`
+ // lifetime and live forever.
+ // Anything with a shorter lifetime (e.g the `&foo()` in
+ // `bar(&foo())` or anything within a block will keep the
+ // regular drops just like runtime code.
+ if let Some(temp_lifetime) = temp_lifetime {
+ this.schedule_drop(expr_span, temp_lifetime, temp, DropKind::Storage);
+ }
+ }
+ }
+
+ unpack!(block = this.expr_into_dest(temp_place, block, expr));
+
+ if let Some(temp_lifetime) = temp_lifetime {
+ this.schedule_drop(expr_span, temp_lifetime, temp, DropKind::Value);
+ }
+
+ block.and(temp)
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/category.rs b/compiler/rustc_mir_build/src/build/expr/category.rs
new file mode 100644
index 000000000..a4386319d
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/category.rs
@@ -0,0 +1,92 @@
+use rustc_middle::thir::*;
+
+#[derive(Debug, PartialEq)]
+pub(crate) enum Category {
+ // An assignable memory location like `x`, `x.f`, `foo()[3]`, that
+ // sort of thing. Something that could appear on the LHS of an `=`
+ // sign.
+ Place,
+
+ // A literal like `23` or `"foo"`. Does not include constant
+ // expressions like `3 + 5`.
+ Constant,
+
+ // Something that generates a new value at runtime, like `x + y`
+ // or `foo()`.
+ Rvalue(RvalueFunc),
+}
+
+// Rvalues fall into different "styles" that will determine which fn
+// is best suited to generate them.
+#[derive(Debug, PartialEq)]
+pub(crate) enum RvalueFunc {
+ // Best generated by `into`. This is generally exprs that
+ // cause branching, like `match`, but also includes calls.
+ Into,
+
+ // Best generated by `as_rvalue`. This is usually the case.
+ AsRvalue,
+}
+
+/// Determines the category for a given expression. Note that scope
+/// and paren expressions have no category.
+impl Category {
+ pub(crate) fn of(ek: &ExprKind<'_>) -> Option<Category> {
+ match *ek {
+ ExprKind::Scope { .. } => None,
+
+ ExprKind::Field { .. }
+ | ExprKind::Deref { .. }
+ | ExprKind::Index { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::VarRef { .. }
+ | ExprKind::PlaceTypeAscription { .. }
+ | ExprKind::ValueTypeAscription { .. } => Some(Category::Place),
+
+ ExprKind::LogicalOp { .. }
+ | ExprKind::Match { .. }
+ | ExprKind::If { .. }
+ | ExprKind::Let { .. }
+ | ExprKind::NeverToAny { .. }
+ | ExprKind::Use { .. }
+ | ExprKind::Adt { .. }
+ | ExprKind::Borrow { .. }
+ | ExprKind::AddressOf { .. }
+ | ExprKind::Yield { .. }
+ | ExprKind::Call { .. }
+ | ExprKind::InlineAsm { .. } => Some(Category::Rvalue(RvalueFunc::Into)),
+
+ ExprKind::Array { .. }
+ | ExprKind::Tuple { .. }
+ | ExprKind::Closure { .. }
+ | ExprKind::Unary { .. }
+ | ExprKind::Binary { .. }
+ | ExprKind::Box { .. }
+ | ExprKind::Cast { .. }
+ | ExprKind::Pointer { .. }
+ | ExprKind::Repeat { .. }
+ | ExprKind::Assign { .. }
+ | ExprKind::AssignOp { .. }
+ | ExprKind::ThreadLocalRef(_) => Some(Category::Rvalue(RvalueFunc::AsRvalue)),
+
+ ExprKind::ConstBlock { .. }
+ | ExprKind::Literal { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::StaticRef { .. }
+ | ExprKind::NamedConst { .. } => Some(Category::Constant),
+
+ ExprKind::Loop { .. }
+ | ExprKind::Block { .. }
+ | ExprKind::Break { .. }
+ | ExprKind::Continue { .. }
+ | ExprKind::Return { .. } =>
+ // FIXME(#27840) these probably want their own
+ // category, like "nonterminating"
+ {
+ Some(Category::Rvalue(RvalueFunc::Into))
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs
new file mode 100644
index 000000000..017d43d10
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/into.rs
@@ -0,0 +1,599 @@
+//! See docs in build/expr/mod.rs
+
+use crate::build::expr::category::{Category, RvalueFunc};
+use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, NeedsTemporary};
+use rustc_ast::InlineAsmOptions;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_hir as hir;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::CanonicalUserTypeAnnotation;
+use std::iter;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Compile `expr`, storing the result into `destination`, which
+ /// is assumed to be uninitialized.
+ pub(crate) fn expr_into_dest(
+ &mut self,
+ destination: Place<'tcx>,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<()> {
+ debug!("expr_into_dest(destination={:?}, block={:?}, expr={:?})", destination, block, expr);
+
+ // since we frequently have to reference `self` from within a
+ // closure, where `self` would be shadowed, it's easier to
+ // just use the name `this` uniformly
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr_span);
+
+ let expr_is_block_or_scope =
+ matches!(expr.kind, ExprKind::Block { .. } | ExprKind::Scope { .. });
+
+ if !expr_is_block_or_scope {
+ this.block_context.push(BlockFrame::SubExpr);
+ }
+
+ let block_and = match expr.kind {
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ let region_scope = (region_scope, source_info);
+ ensure_sufficient_stack(|| {
+ this.in_scope(region_scope, lint_level, |this| {
+ this.expr_into_dest(destination, block, &this.thir[value])
+ })
+ })
+ }
+ ExprKind::Block { body: ref ast_block } => {
+ this.ast_block(destination, block, ast_block, source_info)
+ }
+ ExprKind::Match { scrutinee, ref arms } => {
+ this.match_expr(destination, expr_span, block, &this.thir[scrutinee], arms)
+ }
+ ExprKind::If { cond, then, else_opt, if_then_scope } => {
+ let then_blk;
+ let then_expr = &this.thir[then];
+ let then_source_info = this.source_info(then_expr.span);
+ let condition_scope = this.local_scope();
+
+ let mut else_blk = unpack!(
+ then_blk = this.in_scope(
+ (if_then_scope, then_source_info),
+ LintLevel::Inherited,
+ |this| {
+ let source_info = if this.is_let(cond) {
+ let variable_scope = this.new_source_scope(
+ then_expr.span,
+ LintLevel::Inherited,
+ None,
+ );
+ this.source_scope = variable_scope;
+ SourceInfo { span: then_expr.span, scope: variable_scope }
+ } else {
+ this.source_info(then_expr.span)
+ };
+ let (then_block, else_block) =
+ this.in_if_then_scope(condition_scope, |this| {
+ let then_blk = unpack!(this.then_else_break(
+ block,
+ &this.thir[cond],
+ Some(condition_scope),
+ condition_scope,
+ source_info
+ ));
+
+ this.expr_into_dest(destination, then_blk, then_expr)
+ });
+ then_block.and(else_block)
+ },
+ )
+ );
+
+ else_blk = if let Some(else_opt) = else_opt {
+ unpack!(this.expr_into_dest(destination, else_blk, &this.thir[else_opt]))
+ } else {
+ // Body of the `if` expression without an `else` clause must return `()`, thus
+ // we implicitly generate an `else {}` if it is not specified.
+ let correct_si = this.source_info(expr_span.shrink_to_hi());
+ this.cfg.push_assign_unit(else_blk, correct_si, destination, this.tcx);
+ else_blk
+ };
+
+ let join_block = this.cfg.start_new_block();
+ this.cfg.goto(then_blk, source_info, join_block);
+ this.cfg.goto(else_blk, source_info, join_block);
+ join_block.unit()
+ }
+ ExprKind::Let { expr, ref pat } => {
+ let scope = this.local_scope();
+ let (true_block, false_block) = this.in_if_then_scope(scope, |this| {
+ this.lower_let_expr(block, &this.thir[expr], pat, scope, None, expr_span)
+ });
+
+ this.cfg.push_assign_constant(
+ true_block,
+ source_info,
+ destination,
+ Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: ConstantKind::from_bool(this.tcx, true),
+ },
+ );
+
+ this.cfg.push_assign_constant(
+ false_block,
+ source_info,
+ destination,
+ Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: ConstantKind::from_bool(this.tcx, false),
+ },
+ );
+
+ let join_block = this.cfg.start_new_block();
+ this.cfg.goto(true_block, source_info, join_block);
+ this.cfg.goto(false_block, source_info, join_block);
+ join_block.unit()
+ }
+ ExprKind::NeverToAny { source } => {
+ let source = &this.thir[source];
+ let is_call =
+ matches!(source.kind, ExprKind::Call { .. } | ExprKind::InlineAsm { .. });
+
+ // (#66975) Source could be a const of type `!`, so has to
+ // exist in the generated MIR.
+ unpack!(
+ block = this.as_temp(block, Some(this.local_scope()), source, Mutability::Mut,)
+ );
+
+ // This is an optimization. If the expression was a call then we already have an
+ // unreachable block. Don't bother to terminate it and create a new one.
+ if is_call {
+ block.unit()
+ } else {
+ this.cfg.terminate(block, source_info, TerminatorKind::Unreachable);
+ let end_block = this.cfg.start_new_block();
+ end_block.unit()
+ }
+ }
+ ExprKind::LogicalOp { op, lhs, rhs } => {
+ // And:
+ //
+ // [block: If(lhs)] -true-> [else_block: dest = (rhs)]
+ // | (false)
+ // [shortcurcuit_block: dest = false]
+ //
+ // Or:
+ //
+ // [block: If(lhs)] -false-> [else_block: dest = (rhs)]
+ // | (true)
+ // [shortcurcuit_block: dest = true]
+
+ let (shortcircuit_block, mut else_block, join_block) = (
+ this.cfg.start_new_block(),
+ this.cfg.start_new_block(),
+ this.cfg.start_new_block(),
+ );
+
+ let lhs = unpack!(block = this.as_local_operand(block, &this.thir[lhs]));
+ let blocks = match op {
+ LogicalOp::And => (else_block, shortcircuit_block),
+ LogicalOp::Or => (shortcircuit_block, else_block),
+ };
+ let term = TerminatorKind::if_(this.tcx, lhs, blocks.0, blocks.1);
+ this.cfg.terminate(block, source_info, term);
+
+ this.cfg.push_assign_constant(
+ shortcircuit_block,
+ source_info,
+ destination,
+ Constant {
+ span: expr_span,
+ user_ty: None,
+ literal: match op {
+ LogicalOp::And => ConstantKind::from_bool(this.tcx, false),
+ LogicalOp::Or => ConstantKind::from_bool(this.tcx, true),
+ },
+ },
+ );
+ this.cfg.goto(shortcircuit_block, source_info, join_block);
+
+ let rhs = unpack!(else_block = this.as_local_operand(else_block, &this.thir[rhs]));
+ this.cfg.push_assign(else_block, source_info, destination, Rvalue::Use(rhs));
+ this.cfg.goto(else_block, source_info, join_block);
+
+ join_block.unit()
+ }
+ ExprKind::Loop { body } => {
+ // [block]
+ // |
+ // [loop_block] -> [body_block] -/eval. body/-> [body_block_end]
+ // | ^ |
+ // false link | |
+ // | +-----------------------------------------+
+ // +-> [diverge_cleanup]
+ // The false link is required to make sure borrowck considers unwinds through the
+ // body, even when the exact code in the body cannot unwind
+
+ let loop_block = this.cfg.start_new_block();
+
+ // Start the loop.
+ this.cfg.goto(block, source_info, loop_block);
+
+ this.in_breakable_scope(Some(loop_block), destination, expr_span, move |this| {
+ // conduct the test, if necessary
+ let body_block = this.cfg.start_new_block();
+ this.cfg.terminate(
+ loop_block,
+ source_info,
+ TerminatorKind::FalseUnwind { real_target: body_block, unwind: None },
+ );
+ this.diverge_from(loop_block);
+
+ // The “return” value of the loop body must always be a unit. We therefore
+ // introduce a unit temporary as the destination for the loop body.
+ let tmp = this.get_unit_temp();
+ // Execute the body, branching back to the test.
+ let body_block_end =
+ unpack!(this.expr_into_dest(tmp, body_block, &this.thir[body]));
+ this.cfg.goto(body_block_end, source_info, loop_block);
+
+ // Loops are only exited by `break` expressions.
+ None
+ })
+ }
+ ExprKind::Call { ty: _, fun, ref args, from_hir_call, fn_span } => {
+ let fun = unpack!(block = this.as_local_operand(block, &this.thir[fun]));
+ let args: Vec<_> = args
+ .into_iter()
+ .copied()
+ .map(|arg| unpack!(block = this.as_local_call_operand(block, &this.thir[arg])))
+ .collect();
+
+ let success = this.cfg.start_new_block();
+
+ this.record_operands_moved(&args);
+
+ debug!("expr_into_dest: fn_span={:?}", fn_span);
+
+ this.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Call {
+ func: fun,
+ args,
+ cleanup: None,
+ destination,
+ // The presence or absence of a return edge affects control-flow sensitive
+ // MIR checks and ultimately whether code is accepted or not. We can only
+ // omit the return edge if a return type is visibly uninhabited to a module
+ // that makes the call.
+ target: if this.tcx.is_ty_uninhabited_from(
+ this.parent_module,
+ expr.ty,
+ this.param_env,
+ ) {
+ None
+ } else {
+ Some(success)
+ },
+ from_hir_call,
+ fn_span,
+ },
+ );
+ this.diverge_from(block);
+ success.unit()
+ }
+ ExprKind::Use { source } => this.expr_into_dest(destination, block, &this.thir[source]),
+ ExprKind::Borrow { arg, borrow_kind } => {
+ let arg = &this.thir[arg];
+ // We don't do this in `as_rvalue` because we use `as_place`
+ // for borrow expressions, so we cannot create an `RValue` that
+ // remains valid across user code. `as_rvalue` is usually called
+ // by this method anyway, so this shouldn't cause too many
+ // unnecessary temporaries.
+ let arg_place = match borrow_kind {
+ BorrowKind::Shared => unpack!(block = this.as_read_only_place(block, arg)),
+ _ => unpack!(block = this.as_place(block, arg)),
+ };
+ let borrow = Rvalue::Ref(this.tcx.lifetimes.re_erased, borrow_kind, arg_place);
+ this.cfg.push_assign(block, source_info, destination, borrow);
+ block.unit()
+ }
+ ExprKind::AddressOf { mutability, arg } => {
+ let arg = &this.thir[arg];
+ let place = match mutability {
+ hir::Mutability::Not => this.as_read_only_place(block, arg),
+ hir::Mutability::Mut => this.as_place(block, arg),
+ };
+ let address_of = Rvalue::AddressOf(mutability, unpack!(block = place));
+ this.cfg.push_assign(block, source_info, destination, address_of);
+ block.unit()
+ }
+ ExprKind::Adt(box Adt {
+ adt_def,
+ variant_index,
+ substs,
+ user_ty,
+ ref fields,
+ ref base,
+ }) => {
+ // See the notes for `ExprKind::Array` in `as_rvalue` and for
+ // `ExprKind::Borrow` above.
+ let is_union = adt_def.is_union();
+ let active_field_index = if is_union { Some(fields[0].name.index()) } else { None };
+
+ let scope = this.local_scope();
+
+ // first process the set of fields that were provided
+ // (evaluating them in order given by user)
+ let fields_map: FxHashMap<_, _> = fields
+ .into_iter()
+ .map(|f| {
+ let local_info = Box::new(LocalInfo::AggregateTemp);
+ (
+ f.name,
+ unpack!(
+ block = this.as_operand(
+ block,
+ Some(scope),
+ &this.thir[f.expr],
+ Some(local_info),
+ NeedsTemporary::Maybe,
+ )
+ ),
+ )
+ })
+ .collect();
+
+ let field_names: Vec<_> =
+ (0..adt_def.variant(variant_index).fields.len()).map(Field::new).collect();
+
+ let fields: Vec<_> = if let Some(FruInfo { base, field_types }) = base {
+ let place_builder =
+ unpack!(block = this.as_place_builder(block, &this.thir[*base]));
+
+ // MIR does not natively support FRU, so for each
+ // base-supplied field, generate an operand that
+ // reads it from the base.
+ iter::zip(field_names, &**field_types)
+ .map(|(n, ty)| match fields_map.get(&n) {
+ Some(v) => v.clone(),
+ None => {
+ let place_builder = place_builder.clone();
+ this.consume_by_copy_or_move(
+ place_builder
+ .field(n, *ty)
+ .into_place(this.tcx, this.typeck_results),
+ )
+ }
+ })
+ .collect()
+ } else {
+ field_names.iter().filter_map(|n| fields_map.get(n).cloned()).collect()
+ };
+
+ let inferred_ty = expr.ty;
+ let user_ty = user_ty.map(|ty| {
+ this.canonical_user_type_annotations.push(CanonicalUserTypeAnnotation {
+ span: source_info.span,
+ user_ty: ty,
+ inferred_ty,
+ })
+ });
+ let adt = Box::new(AggregateKind::Adt(
+ adt_def.did(),
+ variant_index,
+ substs,
+ user_ty,
+ active_field_index,
+ ));
+ this.cfg.push_assign(
+ block,
+ source_info,
+ destination,
+ Rvalue::Aggregate(adt, fields),
+ );
+ block.unit()
+ }
+ ExprKind::InlineAsm { template, ref operands, options, line_spans } => {
+ use rustc_middle::{mir, thir};
+ let operands = operands
+ .into_iter()
+ .map(|op| match *op {
+ thir::InlineAsmOperand::In { reg, expr } => mir::InlineAsmOperand::In {
+ reg,
+ value: unpack!(block = this.as_local_operand(block, &this.thir[expr])),
+ },
+ thir::InlineAsmOperand::Out { reg, late, expr } => {
+ mir::InlineAsmOperand::Out {
+ reg,
+ late,
+ place: expr.map(|expr| {
+ unpack!(block = this.as_place(block, &this.thir[expr]))
+ }),
+ }
+ }
+ thir::InlineAsmOperand::InOut { reg, late, expr } => {
+ let place = unpack!(block = this.as_place(block, &this.thir[expr]));
+ mir::InlineAsmOperand::InOut {
+ reg,
+ late,
+ // This works because asm operands must be Copy
+ in_value: Operand::Copy(place),
+ out_place: Some(place),
+ }
+ }
+ thir::InlineAsmOperand::SplitInOut { reg, late, in_expr, out_expr } => {
+ mir::InlineAsmOperand::InOut {
+ reg,
+ late,
+ in_value: unpack!(
+ block = this.as_local_operand(block, &this.thir[in_expr])
+ ),
+ out_place: out_expr.map(|out_expr| {
+ unpack!(block = this.as_place(block, &this.thir[out_expr]))
+ }),
+ }
+ }
+ thir::InlineAsmOperand::Const { value, span } => {
+ mir::InlineAsmOperand::Const {
+ value: Box::new(Constant { span, user_ty: None, literal: value }),
+ }
+ }
+ thir::InlineAsmOperand::SymFn { value, span } => {
+ mir::InlineAsmOperand::SymFn {
+ value: Box::new(Constant { span, user_ty: None, literal: value }),
+ }
+ }
+ thir::InlineAsmOperand::SymStatic { def_id } => {
+ mir::InlineAsmOperand::SymStatic { def_id }
+ }
+ })
+ .collect();
+
+ if !options.contains(InlineAsmOptions::NORETURN) {
+ this.cfg.push_assign_unit(block, source_info, destination, this.tcx);
+ }
+
+ let destination_block = this.cfg.start_new_block();
+ this.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::InlineAsm {
+ template,
+ operands,
+ options,
+ line_spans,
+ destination: if options.contains(InlineAsmOptions::NORETURN) {
+ None
+ } else {
+ Some(destination_block)
+ },
+ cleanup: None,
+ },
+ );
+ if options.contains(InlineAsmOptions::MAY_UNWIND) {
+ this.diverge_from(block);
+ }
+ destination_block.unit()
+ }
+
+ // These cases don't actually need a destination
+ ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
+ unpack!(block = this.stmt_expr(block, expr, None));
+ this.cfg.push_assign_unit(block, source_info, destination, this.tcx);
+ block.unit()
+ }
+
+ ExprKind::Continue { .. } | ExprKind::Break { .. } | ExprKind::Return { .. } => {
+ unpack!(block = this.stmt_expr(block, expr, None));
+ // No assign, as these have type `!`.
+ block.unit()
+ }
+
+ // Avoid creating a temporary
+ ExprKind::VarRef { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::PlaceTypeAscription { .. }
+ | ExprKind::ValueTypeAscription { .. } => {
+ debug_assert!(Category::of(&expr.kind) == Some(Category::Place));
+
+ let place = unpack!(block = this.as_place(block, expr));
+ let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place));
+ this.cfg.push_assign(block, source_info, destination, rvalue);
+ block.unit()
+ }
+ ExprKind::Index { .. } | ExprKind::Deref { .. } | ExprKind::Field { .. } => {
+ debug_assert_eq!(Category::of(&expr.kind), Some(Category::Place));
+
+ // Create a "fake" temporary variable so that we check that the
+ // value is Sized. Usually, this is caught in type checking, but
+ // in the case of box expr there is no such check.
+ if !destination.projection.is_empty() {
+ this.local_decls.push(LocalDecl::new(expr.ty, expr.span));
+ }
+
+ let place = unpack!(block = this.as_place(block, expr));
+ let rvalue = Rvalue::Use(this.consume_by_copy_or_move(place));
+ this.cfg.push_assign(block, source_info, destination, rvalue);
+ block.unit()
+ }
+
+ ExprKind::Yield { value } => {
+ let scope = this.local_scope();
+ let value = unpack!(
+ block = this.as_operand(
+ block,
+ Some(scope),
+ &this.thir[value],
+ None,
+ NeedsTemporary::No
+ )
+ );
+ let resume = this.cfg.start_new_block();
+ this.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Yield { value, resume, resume_arg: destination, drop: None },
+ );
+ this.generator_drop_cleanup(block);
+ resume.unit()
+ }
+
+ // these are the cases that are more naturally handled by some other mode
+ ExprKind::Unary { .. }
+ | ExprKind::Binary { .. }
+ | ExprKind::Box { .. }
+ | ExprKind::Cast { .. }
+ | ExprKind::Pointer { .. }
+ | ExprKind::Repeat { .. }
+ | ExprKind::Array { .. }
+ | ExprKind::Tuple { .. }
+ | ExprKind::Closure { .. }
+ | ExprKind::ConstBlock { .. }
+ | ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::ThreadLocalRef(_)
+ | ExprKind::StaticRef { .. } => {
+ debug_assert!(match Category::of(&expr.kind).unwrap() {
+ // should be handled above
+ Category::Rvalue(RvalueFunc::Into) => false,
+
+ // must be handled above or else we get an
+ // infinite loop in the builder; see
+ // e.g., `ExprKind::VarRef` above
+ Category::Place => false,
+
+ _ => true,
+ });
+
+ let rvalue = unpack!(block = this.as_local_rvalue(block, expr));
+ this.cfg.push_assign(block, source_info, destination, rvalue);
+ block.unit()
+ }
+ };
+
+ if !expr_is_block_or_scope {
+ let popped = this.block_context.pop();
+ assert!(popped.is_some());
+ }
+
+ block_and
+ }
+
+ fn is_let(&self, expr: ExprId) -> bool {
+ match self.thir[expr].kind {
+ ExprKind::Let { .. } => true,
+ ExprKind::Scope { value, .. } => self.is_let(value),
+ _ => false,
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/expr/mod.rs b/compiler/rustc_mir_build/src/build/expr/mod.rs
new file mode 100644
index 000000000..f5ae060d6
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/mod.rs
@@ -0,0 +1,70 @@
+//! Builds MIR from expressions. As a caller into this module, you
+//! have many options, but the first thing you have to decide is
+//! whether you are evaluating this expression for its *value*, its
+//! *location*, or as a *constant*.
+//!
+//! Typically, you want the value: e.g., if you are doing `expr_a +
+//! expr_b`, you want the values of those expressions. In that case,
+//! you want one of the following functions. Note that if the expr has
+//! a type that is not `Copy`, then using any of these functions will
+//! "move" the value out of its current home (if any).
+//!
+//! - `expr_into_dest` -- writes the value into a specific location, which
+//! should be uninitialized
+//! - `as_operand` -- evaluates the value and yields an `Operand`,
+//! suitable for use as an argument to an `Rvalue`
+//! - `as_temp` -- evaluates into a temporary; this is similar to `as_operand`
+//! except it always returns a fresh place, even for constants
+//! - `as_rvalue` -- yields an `Rvalue`, suitable for use in an assignment;
+//! as of this writing, never needed outside of the `expr` module itself
+//!
+//! Sometimes though want the expression's *location*. An example
+//! would be during a match statement, or the operand of the `&`
+//! operator. In that case, you want `as_place`. This will create a
+//! temporary if necessary.
+//!
+//! Finally, if it's a constant you seek, then call
+//! `as_constant`. This creates a `Constant<H>`, but naturally it can
+//! only be used on constant expressions and hence is needed only in
+//! very limited contexts.
+//!
+//! ### Implementation notes
+//!
+//! For any given kind of expression, there is generally one way that
+//! can be lowered most naturally. This is specified by the
+//! `Category::of` function in the `category` module. For example, a
+//! struct expression (or other expression that creates a new value)
+//! is typically easiest to write in terms of `as_rvalue` or `into`,
+//! whereas a reference to a field is easiest to write in terms of
+//! `as_place`. (The exception to this is scope and paren
+//! expressions, which have no category.)
+//!
+//! Therefore, the various functions above make use of one another in
+//! a descending fashion. For any given expression, you should pick
+//! the most suitable spot to implement it, and then just let the
+//! other fns cycle around. The handoff works like this:
+//!
+//! - `into(place)` -> fallback is to create a rvalue with `as_rvalue` and assign it to `place`
+//! - `as_rvalue` -> fallback is to create an Operand with `as_operand` and use `Rvalue::use`
+//! - `as_operand` -> either invokes `as_constant` or `as_temp`
+//! - `as_constant` -> (no fallback)
+//! - `as_temp` -> creates a temporary and either calls `as_place` or `into`
+//! - `as_place` -> for rvalues, falls back to `as_temp` and returns that
+//!
+//! As you can see, there is a cycle where `into` can (in theory) fallback to `as_temp`
+//! which can fallback to `into`. So if one of the `ExprKind` variants is not, in fact,
+//! implemented in the category where it is supposed to be, there will be a problem.
+//!
+//! Of those fallbacks, the most interesting one is `into`, because
+//! it discriminates based on the category of the expression. This is
+//! basically the point where the "by value" operations are bridged
+//! over to the "by reference" mode (`as_place`).
+
+pub(crate) mod as_constant;
+mod as_operand;
+pub mod as_place;
+mod as_rvalue;
+mod as_temp;
+pub mod category;
+mod into;
+mod stmt;
diff --git a/compiler/rustc_mir_build/src/build/expr/stmt.rs b/compiler/rustc_mir_build/src/build/expr/stmt.rs
new file mode 100644
index 000000000..a7e1331aa
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/expr/stmt.rs
@@ -0,0 +1,149 @@
+use crate::build::scope::BreakableTarget;
+use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder};
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Builds a block of MIR statements to evaluate the THIR `expr`.
+ /// If the original expression was an AST statement,
+ /// (e.g., `some().code(&here());`) then `opt_stmt_span` is the
+ /// span of that statement (including its semicolon, if any).
+ /// The scope is used if a statement temporary must be dropped.
+ pub(crate) fn stmt_expr(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ statement_scope: Option<region::Scope>,
+ ) -> BlockAnd<()> {
+ let this = self;
+ let expr_span = expr.span;
+ let source_info = this.source_info(expr.span);
+ // Handle a number of expressions that don't need a destination at all. This
+ // avoids needing a mountain of temporary `()` variables.
+ match expr.kind {
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ this.in_scope((region_scope, source_info), lint_level, |this| {
+ this.stmt_expr(block, &this.thir[value], statement_scope)
+ })
+ }
+ ExprKind::Assign { lhs, rhs } => {
+ let lhs = &this.thir[lhs];
+ let rhs = &this.thir[rhs];
+ let lhs_span = lhs.span;
+
+ // Note: we evaluate assignments right-to-left. This
+ // is better for borrowck interaction with overloaded
+ // operators like x[j] = x[i].
+
+ debug!("stmt_expr Assign block_context.push(SubExpr) : {:?}", expr);
+ this.block_context.push(BlockFrame::SubExpr);
+
+ // Generate better code for things that don't need to be
+ // dropped.
+ if lhs.ty.needs_drop(this.tcx, this.param_env) {
+ let rhs = unpack!(block = this.as_local_operand(block, rhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
+ unpack!(block = this.build_drop_and_replace(block, lhs_span, lhs, rhs));
+ } else {
+ let rhs = unpack!(block = this.as_local_rvalue(block, rhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
+ this.cfg.push_assign(block, source_info, lhs, rhs);
+ }
+
+ this.block_context.pop();
+ block.unit()
+ }
+ ExprKind::AssignOp { op, lhs, rhs } => {
+ // FIXME(#28160) there is an interesting semantics
+ // question raised here -- should we "freeze" the
+ // value of the lhs here? I'm inclined to think not,
+ // since it seems closer to the semantics of the
+ // overloaded version, which takes `&mut self`. This
+ // only affects weird things like `x += {x += 1; x}`
+ // -- is that equal to `x + (x + 1)` or `2*(x+1)`?
+
+ let lhs = &this.thir[lhs];
+ let rhs = &this.thir[rhs];
+ let lhs_ty = lhs.ty;
+
+ debug!("stmt_expr AssignOp block_context.push(SubExpr) : {:?}", expr);
+ this.block_context.push(BlockFrame::SubExpr);
+
+ // As above, RTL.
+ let rhs = unpack!(block = this.as_local_operand(block, rhs));
+ let lhs = unpack!(block = this.as_place(block, lhs));
+
+ // we don't have to drop prior contents or anything
+ // because AssignOp is only legal for Copy types
+ // (overloaded ops should be desugared into a call).
+ let result = unpack!(
+ block =
+ this.build_binary_op(block, op, expr_span, lhs_ty, Operand::Copy(lhs), rhs)
+ );
+ this.cfg.push_assign(block, source_info, lhs, result);
+
+ this.block_context.pop();
+ block.unit()
+ }
+ ExprKind::Continue { label } => {
+ this.break_scope(block, None, BreakableTarget::Continue(label), source_info)
+ }
+ ExprKind::Break { label, value } => this.break_scope(
+ block,
+ value.map(|value| &this.thir[value]),
+ BreakableTarget::Break(label),
+ source_info,
+ ),
+ ExprKind::Return { value } => this.break_scope(
+ block,
+ value.map(|value| &this.thir[value]),
+ BreakableTarget::Return,
+ source_info,
+ ),
+ _ => {
+ assert!(
+ statement_scope.is_some(),
+ "Should not be calling `stmt_expr` on a general expression \
+ without a statement scope",
+ );
+
+ // Issue #54382: When creating temp for the value of
+ // expression like:
+ //
+ // `{ side_effects(); { let l = stuff(); the_value } }`
+ //
+ // it is usually better to focus on `the_value` rather
+ // than the entirety of block(s) surrounding it.
+ let adjusted_span = (|| {
+ if let ExprKind::Block { body } = &expr.kind && let Some(tail_ex) = body.expr {
+ let mut expr = &this.thir[tail_ex];
+ while let ExprKind::Block {
+ body: Block { expr: Some(nested_expr), .. },
+ }
+ | ExprKind::Scope { value: nested_expr, .. } = expr.kind
+ {
+ expr = &this.thir[nested_expr];
+ }
+ this.block_context.push(BlockFrame::TailExpr {
+ tail_result_is_ignored: true,
+ span: expr.span,
+ });
+ return Some(expr.span);
+ }
+ None
+ })();
+
+ let temp =
+ unpack!(block = this.as_temp(block, statement_scope, expr, Mutability::Not));
+
+ if let Some(span) = adjusted_span {
+ this.local_decls[temp].source_info.span = span;
+ this.block_context.pop();
+ }
+
+ block.unit()
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/mod.rs b/compiler/rustc_mir_build/src/build/matches/mod.rs
new file mode 100644
index 000000000..58b1564cc
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/mod.rs
@@ -0,0 +1,2354 @@
+//! Code related to match expressions. These are sufficiently complex to
+//! warrant their own module and submodules. :) This main module includes the
+//! high-level algorithm, the submodules contain the details.
+//!
+//! This also includes code for pattern bindings in `let` statements and
+//! function parameters.
+
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::scope::DropKind;
+use crate::build::ForGuard::{self, OutsideGuard, RefWithinGuard};
+use crate::build::{BlockAnd, BlockAndExtension, Builder};
+use crate::build::{GuardFrame, GuardFrameLocal, LocalsForNode};
+use rustc_data_structures::{
+ fx::{FxHashSet, FxIndexMap, FxIndexSet},
+ stack::ensure_sufficient_stack,
+};
+use rustc_index::bit_set::BitSet;
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::{self, *};
+use rustc_middle::ty::{self, CanonicalUserTypeAnnotation, Ty};
+use rustc_span::symbol::Symbol;
+use rustc_span::{BytePos, Pos, Span};
+use rustc_target::abi::VariantIdx;
+use smallvec::{smallvec, SmallVec};
+
+// helper functions, broken out by category:
+mod simplify;
+mod test;
+mod util;
+
+use std::borrow::Borrow;
+use std::convert::TryFrom;
+use std::mem;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub(crate) fn then_else_break(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ temp_scope_override: Option<region::Scope>,
+ break_scope: region::Scope,
+ variable_source_info: SourceInfo,
+ ) -> BlockAnd<()> {
+ let this = self;
+ let expr_span = expr.span;
+
+ match expr.kind {
+ ExprKind::LogicalOp { op: LogicalOp::And, lhs, rhs } => {
+ let lhs_then_block = unpack!(this.then_else_break(
+ block,
+ &this.thir[lhs],
+ temp_scope_override,
+ break_scope,
+ variable_source_info,
+ ));
+
+ let rhs_then_block = unpack!(this.then_else_break(
+ lhs_then_block,
+ &this.thir[rhs],
+ temp_scope_override,
+ break_scope,
+ variable_source_info,
+ ));
+
+ rhs_then_block.unit()
+ }
+ ExprKind::Scope { region_scope, lint_level, value } => {
+ let region_scope = (region_scope, this.source_info(expr_span));
+ this.in_scope(region_scope, lint_level, |this| {
+ this.then_else_break(
+ block,
+ &this.thir[value],
+ temp_scope_override,
+ break_scope,
+ variable_source_info,
+ )
+ })
+ }
+ ExprKind::Let { expr, ref pat } => this.lower_let_expr(
+ block,
+ &this.thir[expr],
+ pat,
+ break_scope,
+ Some(variable_source_info.scope),
+ variable_source_info.span,
+ ),
+ _ => {
+ let temp_scope = temp_scope_override.unwrap_or_else(|| this.local_scope());
+ let mutability = Mutability::Mut;
+ let place =
+ unpack!(block = this.as_temp(block, Some(temp_scope), expr, mutability));
+ let operand = Operand::Move(Place::from(place));
+
+ let then_block = this.cfg.start_new_block();
+ let else_block = this.cfg.start_new_block();
+ let term = TerminatorKind::if_(this.tcx, operand, then_block, else_block);
+
+ let source_info = this.source_info(expr_span);
+ this.cfg.terminate(block, source_info, term);
+ this.break_for_else(else_block, break_scope, source_info);
+
+ then_block.unit()
+ }
+ }
+ }
+
+ /// Generates MIR for a `match` expression.
+ ///
+ /// The MIR that we generate for a match looks like this.
+ ///
+ /// ```text
+ /// [ 0. Pre-match ]
+ /// |
+ /// [ 1. Evaluate Scrutinee (expression being matched on) ]
+ /// [ (fake read of scrutinee) ]
+ /// |
+ /// [ 2. Decision tree -- check discriminants ] <--------+
+ /// | |
+ /// | (once a specific arm is chosen) |
+ /// | |
+ /// [pre_binding_block] [otherwise_block]
+ /// | |
+ /// [ 3. Create "guard bindings" for arm ] |
+ /// [ (create fake borrows) ] |
+ /// | |
+ /// [ 4. Execute guard code ] |
+ /// [ (read fake borrows) ] --(guard is false)-----------+
+ /// |
+ /// | (guard results in true)
+ /// |
+ /// [ 5. Create real bindings and execute arm ]
+ /// |
+ /// [ Exit match ]
+ /// ```
+ ///
+ /// All of the different arms have been stacked on top of each other to
+ /// simplify the diagram. For an arm with no guard the blocks marked 3 and
+ /// 4 and the fake borrows are omitted.
+ ///
+ /// We generate MIR in the following steps:
+ ///
+ /// 1. Evaluate the scrutinee and add the fake read of it ([Builder::lower_scrutinee]).
+ /// 2. Create the decision tree ([Builder::lower_match_tree]).
+ /// 3. Determine the fake borrows that are needed from the places that were
+ /// matched against and create the required temporaries for them
+ /// ([Builder::calculate_fake_borrows]).
+ /// 4. Create everything else: the guards and the arms ([Builder::lower_match_arms]).
+ ///
+ /// ## False edges
+ ///
+ /// We don't want to have the exact structure of the decision tree be
+ /// visible through borrow checking. False edges ensure that the CFG as
+ /// seen by borrow checking doesn't encode this. False edges are added:
+ ///
+ /// * From each pre-binding block to the next pre-binding block.
+ /// * From each otherwise block to the next pre-binding block.
+ #[tracing::instrument(level = "debug", skip(self, arms))]
+ pub(crate) fn match_expr(
+ &mut self,
+ destination: Place<'tcx>,
+ span: Span,
+ mut block: BasicBlock,
+ scrutinee: &Expr<'tcx>,
+ arms: &[ArmId],
+ ) -> BlockAnd<()> {
+ let scrutinee_span = scrutinee.span;
+ let scrutinee_place =
+ unpack!(block = self.lower_scrutinee(block, scrutinee, scrutinee_span,));
+
+ let mut arm_candidates = self.create_match_candidates(scrutinee_place.clone(), &arms);
+
+ let match_has_guard = arms.iter().copied().any(|arm| self.thir[arm].guard.is_some());
+ let mut candidates =
+ arm_candidates.iter_mut().map(|(_, candidate)| candidate).collect::<Vec<_>>();
+
+ let match_start_span = span.shrink_to_lo().to(scrutinee.span);
+
+ let fake_borrow_temps = self.lower_match_tree(
+ block,
+ scrutinee_span,
+ match_start_span,
+ match_has_guard,
+ &mut candidates,
+ );
+
+ self.lower_match_arms(
+ destination,
+ scrutinee_place,
+ scrutinee_span,
+ arm_candidates,
+ self.source_info(span),
+ fake_borrow_temps,
+ )
+ }
+
+ /// Evaluate the scrutinee and add the fake read of it.
+ fn lower_scrutinee(
+ &mut self,
+ mut block: BasicBlock,
+ scrutinee: &Expr<'tcx>,
+ scrutinee_span: Span,
+ ) -> BlockAnd<PlaceBuilder<'tcx>> {
+ let scrutinee_place_builder = unpack!(block = self.as_place_builder(block, scrutinee));
+ // Matching on a `scrutinee_place` with an uninhabited type doesn't
+ // generate any memory reads by itself, and so if the place "expression"
+ // contains unsafe operations like raw pointer dereferences or union
+ // field projections, we wouldn't know to require an `unsafe` block
+ // around a `match` equivalent to `std::intrinsics::unreachable()`.
+ // See issue #47412 for this hole being discovered in the wild.
+ //
+ // HACK(eddyb) Work around the above issue by adding a dummy inspection
+ // of `scrutinee_place`, specifically by applying `ReadForMatch`.
+ //
+ // NOTE: ReadForMatch also checks that the scrutinee is initialized.
+ // This is currently needed to not allow matching on an uninitialized,
+ // uninhabited value. If we get never patterns, those will check that
+ // the place is initialized, and so this read would only be used to
+ // check safety.
+ let cause_matched_place = FakeReadCause::ForMatchedPlace(None);
+ let source_info = self.source_info(scrutinee_span);
+
+ if let Ok(scrutinee_builder) =
+ scrutinee_place_builder.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let scrutinee_place = scrutinee_builder.into_place(self.tcx, self.typeck_results);
+ self.cfg.push_fake_read(block, source_info, cause_matched_place, scrutinee_place);
+ }
+
+ block.and(scrutinee_place_builder)
+ }
+
+ /// Create the initial `Candidate`s for a `match` expression.
+ fn create_match_candidates<'pat>(
+ &mut self,
+ scrutinee: PlaceBuilder<'tcx>,
+ arms: &'pat [ArmId],
+ ) -> Vec<(&'pat Arm<'tcx>, Candidate<'pat, 'tcx>)>
+ where
+ 'a: 'pat,
+ {
+ // Assemble a list of candidates: there is one candidate per pattern,
+ // which means there may be more than one candidate *per arm*.
+ arms.iter()
+ .copied()
+ .map(|arm| {
+ let arm = &self.thir[arm];
+ let arm_has_guard = arm.guard.is_some();
+ let arm_candidate = Candidate::new(scrutinee.clone(), &arm.pattern, arm_has_guard);
+ (arm, arm_candidate)
+ })
+ .collect()
+ }
+
+ /// Create the decision tree for the match expression, starting from `block`.
+ ///
+ /// Modifies `candidates` to store the bindings and type ascriptions for
+ /// that candidate.
+ ///
+ /// Returns the places that need fake borrows because we bind or test them.
+ fn lower_match_tree<'pat>(
+ &mut self,
+ block: BasicBlock,
+ scrutinee_span: Span,
+ match_start_span: Span,
+ match_has_guard: bool,
+ candidates: &mut [&mut Candidate<'pat, 'tcx>],
+ ) -> Vec<(Place<'tcx>, Local)> {
+ // The set of places that we are creating fake borrows of. If there are
+ // no match guards then we don't need any fake borrows, so don't track
+ // them.
+ let mut fake_borrows = match_has_guard.then(FxIndexSet::default);
+
+ let mut otherwise = None;
+
+ // This will generate code to test scrutinee_place and
+ // branch to the appropriate arm block
+ self.match_candidates(
+ match_start_span,
+ scrutinee_span,
+ block,
+ &mut otherwise,
+ candidates,
+ &mut fake_borrows,
+ );
+
+ if let Some(otherwise_block) = otherwise {
+ // See the doc comment on `match_candidates` for why we may have an
+ // otherwise block. Match checking will ensure this is actually
+ // unreachable.
+ let source_info = self.source_info(scrutinee_span);
+ self.cfg.terminate(otherwise_block, source_info, TerminatorKind::Unreachable);
+ }
+
+ // Link each leaf candidate to the `pre_binding_block` of the next one.
+ let mut previous_candidate: Option<&mut Candidate<'_, '_>> = None;
+
+ for candidate in candidates {
+ candidate.visit_leaves(|leaf_candidate| {
+ if let Some(ref mut prev) = previous_candidate {
+ prev.next_candidate_pre_binding_block = leaf_candidate.pre_binding_block;
+ }
+ previous_candidate = Some(leaf_candidate);
+ });
+ }
+
+ if let Some(ref borrows) = fake_borrows {
+ self.calculate_fake_borrows(borrows, scrutinee_span)
+ } else {
+ Vec::new()
+ }
+ }
+
+ /// Lower the bindings, guards and arm bodies of a `match` expression.
+ ///
+ /// The decision tree should have already been created
+ /// (by [Builder::lower_match_tree]).
+ ///
+ /// `outer_source_info` is the SourceInfo for the whole match.
+ fn lower_match_arms(
+ &mut self,
+ destination: Place<'tcx>,
+ scrutinee_place_builder: PlaceBuilder<'tcx>,
+ scrutinee_span: Span,
+ arm_candidates: Vec<(&'_ Arm<'tcx>, Candidate<'_, 'tcx>)>,
+ outer_source_info: SourceInfo,
+ fake_borrow_temps: Vec<(Place<'tcx>, Local)>,
+ ) -> BlockAnd<()> {
+ let arm_end_blocks: Vec<_> = arm_candidates
+ .into_iter()
+ .map(|(arm, candidate)| {
+ debug!("lowering arm {:?}\ncandidate = {:?}", arm, candidate);
+
+ let arm_source_info = self.source_info(arm.span);
+ let arm_scope = (arm.scope, arm_source_info);
+ let match_scope = self.local_scope();
+ self.in_scope(arm_scope, arm.lint_level, |this| {
+ // `try_upvars_resolved` may fail if it is unable to resolve the given
+ // `PlaceBuilder` inside a closure. In this case, we don't want to include
+ // a scrutinee place. `scrutinee_place_builder` will fail to be resolved
+ // if the only match arm is a wildcard (`_`).
+ // Example:
+ // ```
+ // let foo = (0, 1);
+ // let c = || {
+ // match foo { _ => () };
+ // };
+ // ```
+ let mut opt_scrutinee_place: Option<(Option<&Place<'tcx>>, Span)> = None;
+ let scrutinee_place: Place<'tcx>;
+ if let Ok(scrutinee_builder) = scrutinee_place_builder
+ .clone()
+ .try_upvars_resolved(this.tcx, this.typeck_results)
+ {
+ scrutinee_place =
+ scrutinee_builder.into_place(this.tcx, this.typeck_results);
+ opt_scrutinee_place = Some((Some(&scrutinee_place), scrutinee_span));
+ }
+ let scope = this.declare_bindings(
+ None,
+ arm.span,
+ &arm.pattern,
+ ArmHasGuard(arm.guard.is_some()),
+ opt_scrutinee_place,
+ );
+
+ let arm_block = this.bind_pattern(
+ outer_source_info,
+ candidate,
+ arm.guard.as_ref(),
+ &fake_borrow_temps,
+ scrutinee_span,
+ Some(arm.span),
+ Some(arm.scope),
+ Some(match_scope),
+ );
+
+ if let Some(source_scope) = scope {
+ this.source_scope = source_scope;
+ }
+
+ this.expr_into_dest(destination, arm_block, &&this.thir[arm.body])
+ })
+ })
+ .collect();
+
+ // all the arm blocks will rejoin here
+ let end_block = self.cfg.start_new_block();
+
+ let end_brace = self.source_info(
+ outer_source_info.span.with_lo(outer_source_info.span.hi() - BytePos::from_usize(1)),
+ );
+ for arm_block in arm_end_blocks {
+ let block = &self.cfg.basic_blocks[arm_block.0];
+ let last_location = block.statements.last().map(|s| s.source_info);
+
+ self.cfg.goto(unpack!(arm_block), last_location.unwrap_or(end_brace), end_block);
+ }
+
+ self.source_scope = outer_source_info.scope;
+
+ end_block.unit()
+ }
+
+ /// Binds the variables and ascribes types for a given `match` arm or
+ /// `let` binding.
+ ///
+ /// Also check if the guard matches, if it's provided.
+ /// `arm_scope` should be `Some` if and only if this is called for a
+ /// `match` arm.
+ fn bind_pattern(
+ &mut self,
+ outer_source_info: SourceInfo,
+ candidate: Candidate<'_, 'tcx>,
+ guard: Option<&Guard<'tcx>>,
+ fake_borrow_temps: &[(Place<'tcx>, Local)],
+ scrutinee_span: Span,
+ arm_span: Option<Span>,
+ arm_scope: Option<region::Scope>,
+ match_scope: Option<region::Scope>,
+ ) -> BasicBlock {
+ if candidate.subcandidates.is_empty() {
+ // Avoid generating another `BasicBlock` when we only have one
+ // candidate.
+ self.bind_and_guard_matched_candidate(
+ candidate,
+ &[],
+ guard,
+ fake_borrow_temps,
+ scrutinee_span,
+ arm_span,
+ match_scope,
+ true,
+ )
+ } else {
+ // It's helpful to avoid scheduling drops multiple times to save
+ // drop elaboration from having to clean up the extra drops.
+ //
+ // If we are in a `let` then we only schedule drops for the first
+ // candidate.
+ //
+ // If we're in a `match` arm then we could have a case like so:
+ //
+ // Ok(x) | Err(x) if return => { /* ... */ }
+ //
+ // In this case we don't want a drop of `x` scheduled when we
+ // return: it isn't bound by move until right before enter the arm.
+ // To handle this we instead unschedule it's drop after each time
+ // we lower the guard.
+ let target_block = self.cfg.start_new_block();
+ let mut schedule_drops = true;
+ // We keep a stack of all of the bindings and type ascriptions
+ // from the parent candidates that we visit, that also need to
+ // be bound for each candidate.
+ traverse_candidate(
+ candidate,
+ &mut Vec::new(),
+ &mut |leaf_candidate, parent_bindings| {
+ if let Some(arm_scope) = arm_scope {
+ self.clear_top_scope(arm_scope);
+ }
+ let binding_end = self.bind_and_guard_matched_candidate(
+ leaf_candidate,
+ parent_bindings,
+ guard,
+ &fake_borrow_temps,
+ scrutinee_span,
+ arm_span,
+ match_scope,
+ schedule_drops,
+ );
+ if arm_scope.is_none() {
+ schedule_drops = false;
+ }
+ self.cfg.goto(binding_end, outer_source_info, target_block);
+ },
+ |inner_candidate, parent_bindings| {
+ parent_bindings.push((inner_candidate.bindings, inner_candidate.ascriptions));
+ inner_candidate.subcandidates.into_iter()
+ },
+ |parent_bindings| {
+ parent_bindings.pop();
+ },
+ );
+
+ target_block
+ }
+ }
+
+ pub(super) fn expr_into_pattern(
+ &mut self,
+ mut block: BasicBlock,
+ irrefutable_pat: Pat<'tcx>,
+ initializer: &Expr<'tcx>,
+ ) -> BlockAnd<()> {
+ match *irrefutable_pat.kind {
+ // Optimize the case of `let x = ...` to write directly into `x`
+ PatKind::Binding { mode: BindingMode::ByValue, var, subpattern: None, .. } => {
+ let place =
+ self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true);
+ unpack!(block = self.expr_into_dest(place, block, initializer));
+
+ // Inject a fake read, see comments on `FakeReadCause::ForLet`.
+ let source_info = self.source_info(irrefutable_pat.span);
+ self.cfg.push_fake_read(block, source_info, FakeReadCause::ForLet(None), place);
+
+ self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard);
+ block.unit()
+ }
+
+ // Optimize the case of `let x: T = ...` to write directly
+ // into `x` and then require that `T == typeof(x)`.
+ //
+ // Weirdly, this is needed to prevent the
+ // `intrinsic-move-val.rs` test case from crashing. That
+ // test works with uninitialized values in a rather
+ // dubious way, so it may be that the test is kind of
+ // broken.
+ PatKind::AscribeUserType {
+ subpattern:
+ Pat {
+ kind:
+ box PatKind::Binding {
+ mode: BindingMode::ByValue,
+ var,
+ subpattern: None,
+ ..
+ },
+ ..
+ },
+ ascription: thir::Ascription { annotation, variance: _ },
+ } => {
+ let place =
+ self.storage_live_binding(block, var, irrefutable_pat.span, OutsideGuard, true);
+ unpack!(block = self.expr_into_dest(place, block, initializer));
+
+ // Inject a fake read, see comments on `FakeReadCause::ForLet`.
+ let pattern_source_info = self.source_info(irrefutable_pat.span);
+ let cause_let = FakeReadCause::ForLet(None);
+ self.cfg.push_fake_read(block, pattern_source_info, cause_let, place);
+
+ let ty_source_info = self.source_info(annotation.span);
+
+ let base = self.canonical_user_type_annotations.push(annotation);
+ self.cfg.push(
+ block,
+ Statement {
+ source_info: ty_source_info,
+ kind: StatementKind::AscribeUserType(
+ Box::new((place, UserTypeProjection { base, projs: Vec::new() })),
+ // We always use invariant as the variance here. This is because the
+ // variance field from the ascription refers to the variance to use
+ // when applying the type to the value being matched, but this
+ // ascription applies rather to the type of the binding. e.g., in this
+ // example:
+ //
+ // ```
+ // let x: T = <expr>
+ // ```
+ //
+ // We are creating an ascription that defines the type of `x` to be
+ // exactly `T` (i.e., with invariance). The variance field, in
+ // contrast, is intended to be used to relate `T` to the type of
+ // `<expr>`.
+ ty::Variance::Invariant,
+ ),
+ },
+ );
+
+ self.schedule_drop_for_binding(var, irrefutable_pat.span, OutsideGuard);
+ block.unit()
+ }
+
+ _ => {
+ let place_builder = unpack!(block = self.as_place_builder(block, initializer));
+ self.place_into_pattern(block, irrefutable_pat, place_builder, true)
+ }
+ }
+ }
+
+ pub(crate) fn place_into_pattern(
+ &mut self,
+ block: BasicBlock,
+ irrefutable_pat: Pat<'tcx>,
+ initializer: PlaceBuilder<'tcx>,
+ set_match_place: bool,
+ ) -> BlockAnd<()> {
+ let mut candidate = Candidate::new(initializer.clone(), &irrefutable_pat, false);
+ let fake_borrow_temps = self.lower_match_tree(
+ block,
+ irrefutable_pat.span,
+ irrefutable_pat.span,
+ false,
+ &mut [&mut candidate],
+ );
+ // For matches and function arguments, the place that is being matched
+ // can be set when creating the variables. But the place for
+ // let PATTERN = ... might not even exist until we do the assignment.
+ // so we set it here instead.
+ if set_match_place {
+ let mut candidate_ref = &candidate;
+ while let Some(next) = {
+ for binding in &candidate_ref.bindings {
+ let local = self.var_local_id(binding.var_id, OutsideGuard);
+
+ let Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm { opt_match_place: Some((ref mut match_place, _)), .. },
+ )))) = self.local_decls[local].local_info else {
+ bug!("Let binding to non-user variable.")
+ };
+ // `try_upvars_resolved` may fail if it is unable to resolve the given
+ // `PlaceBuilder` inside a closure. In this case, we don't want to include
+ // a scrutinee place. `scrutinee_place_builder` will fail for destructured
+ // assignments. This is because a closure only captures the precise places
+ // that it will read and as a result a closure may not capture the entire
+ // tuple/struct and rather have individual places that will be read in the
+ // final MIR.
+ // Example:
+ // ```
+ // let foo = (0, 1);
+ // let c = || {
+ // let (v1, v2) = foo;
+ // };
+ // ```
+ if let Ok(match_pair_resolved) =
+ initializer.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let place = match_pair_resolved.into_place(self.tcx, self.typeck_results);
+ *match_place = Some(place);
+ }
+ }
+ // All of the subcandidates should bind the same locals, so we
+ // only visit the first one.
+ candidate_ref.subcandidates.get(0)
+ } {
+ candidate_ref = next;
+ }
+ }
+
+ self.bind_pattern(
+ self.source_info(irrefutable_pat.span),
+ candidate,
+ None,
+ &fake_borrow_temps,
+ irrefutable_pat.span,
+ None,
+ None,
+ None,
+ )
+ .unit()
+ }
+
+ /// Declares the bindings of the given patterns and returns the visibility
+ /// scope for the bindings in these patterns, if such a scope had to be
+ /// created. NOTE: Declaring the bindings should always be done in their
+ /// drop scope.
+ pub(crate) fn declare_bindings(
+ &mut self,
+ mut visibility_scope: Option<SourceScope>,
+ scope_span: Span,
+ pattern: &Pat<'tcx>,
+ has_guard: ArmHasGuard,
+ opt_match_place: Option<(Option<&Place<'tcx>>, Span)>,
+ ) -> Option<SourceScope> {
+ debug!("declare_bindings: pattern={:?}", pattern);
+ self.visit_primary_bindings(
+ &pattern,
+ UserTypeProjections::none(),
+ &mut |this, mutability, name, mode, var, span, ty, user_ty| {
+ if visibility_scope.is_none() {
+ visibility_scope =
+ Some(this.new_source_scope(scope_span, LintLevel::Inherited, None));
+ }
+ let source_info = SourceInfo { span, scope: this.source_scope };
+ let visibility_scope = visibility_scope.unwrap();
+ this.declare_binding(
+ source_info,
+ visibility_scope,
+ mutability,
+ name,
+ mode,
+ var,
+ ty,
+ user_ty,
+ has_guard,
+ opt_match_place.map(|(x, y)| (x.cloned(), y)),
+ pattern.span,
+ );
+ },
+ );
+ visibility_scope
+ }
+
+ pub(crate) fn storage_live_binding(
+ &mut self,
+ block: BasicBlock,
+ var: LocalVarId,
+ span: Span,
+ for_guard: ForGuard,
+ schedule_drop: bool,
+ ) -> Place<'tcx> {
+ let local_id = self.var_local_id(var, for_guard);
+ let source_info = self.source_info(span);
+ self.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(local_id) });
+ // Altough there is almost always scope for given variable in corner cases
+ // like #92893 we might get variable with no scope.
+ if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) && schedule_drop{
+ self.schedule_drop(span, region_scope, local_id, DropKind::Storage);
+ }
+ Place::from(local_id)
+ }
+
+ pub(crate) fn schedule_drop_for_binding(
+ &mut self,
+ var: LocalVarId,
+ span: Span,
+ for_guard: ForGuard,
+ ) {
+ let local_id = self.var_local_id(var, for_guard);
+ if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) {
+ self.schedule_drop(span, region_scope, local_id, DropKind::Value);
+ }
+ }
+
+ /// Visit all of the primary bindings in a patterns, that is, visit the
+ /// leftmost occurrence of each variable bound in a pattern. A variable
+ /// will occur more than once in an or-pattern.
+ pub(super) fn visit_primary_bindings(
+ &mut self,
+ pattern: &Pat<'tcx>,
+ pattern_user_ty: UserTypeProjections,
+ f: &mut impl FnMut(
+ &mut Self,
+ Mutability,
+ Symbol,
+ BindingMode,
+ LocalVarId,
+ Span,
+ Ty<'tcx>,
+ UserTypeProjections,
+ ),
+ ) {
+ debug!(
+ "visit_primary_bindings: pattern={:?} pattern_user_ty={:?}",
+ pattern, pattern_user_ty
+ );
+ match *pattern.kind {
+ PatKind::Binding {
+ mutability,
+ name,
+ mode,
+ var,
+ ty,
+ ref subpattern,
+ is_primary,
+ ..
+ } => {
+ if is_primary {
+ f(self, mutability, name, mode, var, pattern.span, ty, pattern_user_ty.clone());
+ }
+ if let Some(subpattern) = subpattern.as_ref() {
+ self.visit_primary_bindings(subpattern, pattern_user_ty, f);
+ }
+ }
+
+ PatKind::Array { ref prefix, ref slice, ref suffix }
+ | PatKind::Slice { ref prefix, ref slice, ref suffix } => {
+ let from = u64::try_from(prefix.len()).unwrap();
+ let to = u64::try_from(suffix.len()).unwrap();
+ for subpattern in prefix {
+ self.visit_primary_bindings(subpattern, pattern_user_ty.clone().index(), f);
+ }
+ for subpattern in slice {
+ self.visit_primary_bindings(
+ subpattern,
+ pattern_user_ty.clone().subslice(from, to),
+ f,
+ );
+ }
+ for subpattern in suffix {
+ self.visit_primary_bindings(subpattern, pattern_user_ty.clone().index(), f);
+ }
+ }
+
+ PatKind::Constant { .. } | PatKind::Range { .. } | PatKind::Wild => {}
+
+ PatKind::Deref { ref subpattern } => {
+ self.visit_primary_bindings(subpattern, pattern_user_ty.deref(), f);
+ }
+
+ PatKind::AscribeUserType {
+ ref subpattern,
+ ascription: thir::Ascription { ref annotation, variance: _ },
+ } => {
+ // This corresponds to something like
+ //
+ // ```
+ // let A::<'a>(_): A<'static> = ...;
+ // ```
+ //
+ // Note that the variance doesn't apply here, as we are tracking the effect
+ // of `user_ty` on any bindings contained with subpattern.
+
+ let projection = UserTypeProjection {
+ base: self.canonical_user_type_annotations.push(annotation.clone()),
+ projs: Vec::new(),
+ };
+ let subpattern_user_ty =
+ pattern_user_ty.push_projection(&projection, annotation.span);
+ self.visit_primary_bindings(subpattern, subpattern_user_ty, f)
+ }
+
+ PatKind::Leaf { ref subpatterns } => {
+ for subpattern in subpatterns {
+ let subpattern_user_ty = pattern_user_ty.clone().leaf(subpattern.field);
+ debug!("visit_primary_bindings: subpattern_user_ty={:?}", subpattern_user_ty);
+ self.visit_primary_bindings(&subpattern.pattern, subpattern_user_ty, f);
+ }
+ }
+
+ PatKind::Variant { adt_def, substs: _, variant_index, ref subpatterns } => {
+ for subpattern in subpatterns {
+ let subpattern_user_ty =
+ pattern_user_ty.clone().variant(adt_def, variant_index, subpattern.field);
+ self.visit_primary_bindings(&subpattern.pattern, subpattern_user_ty, f);
+ }
+ }
+ PatKind::Or { ref pats } => {
+ // In cases where we recover from errors the primary bindings
+ // may not all be in the leftmost subpattern. For example in
+ // `let (x | y) = ...`, the primary binding of `y` occurs in
+ // the right subpattern
+ for subpattern in pats {
+ self.visit_primary_bindings(subpattern, pattern_user_ty.clone(), f);
+ }
+ }
+ }
+ }
+}
+
+#[derive(Debug)]
+struct Candidate<'pat, 'tcx> {
+ /// [`Span`] of the original pattern that gave rise to this candidate.
+ span: Span,
+
+ /// Whether this `Candidate` has a guard.
+ has_guard: bool,
+
+ /// All of these must be satisfied...
+ match_pairs: SmallVec<[MatchPair<'pat, 'tcx>; 1]>,
+
+ /// ...these bindings established...
+ bindings: Vec<Binding<'tcx>>,
+
+ /// ...and these types asserted...
+ ascriptions: Vec<Ascription<'tcx>>,
+
+ /// ...and if this is non-empty, one of these subcandidates also has to match...
+ subcandidates: Vec<Candidate<'pat, 'tcx>>,
+
+ /// ...and the guard must be evaluated; if it's `false` then branch to `otherwise_block`.
+ otherwise_block: Option<BasicBlock>,
+
+ /// The block before the `bindings` have been established.
+ pre_binding_block: Option<BasicBlock>,
+ /// The pre-binding block of the next candidate.
+ next_candidate_pre_binding_block: Option<BasicBlock>,
+}
+
+impl<'tcx, 'pat> Candidate<'pat, 'tcx> {
+ fn new(place: PlaceBuilder<'tcx>, pattern: &'pat Pat<'tcx>, has_guard: bool) -> Self {
+ Candidate {
+ span: pattern.span,
+ has_guard,
+ match_pairs: smallvec![MatchPair { place, pattern }],
+ bindings: Vec::new(),
+ ascriptions: Vec::new(),
+ subcandidates: Vec::new(),
+ otherwise_block: None,
+ pre_binding_block: None,
+ next_candidate_pre_binding_block: None,
+ }
+ }
+
+ /// Visit the leaf candidates (those with no subcandidates) contained in
+ /// this candidate.
+ fn visit_leaves<'a>(&'a mut self, mut visit_leaf: impl FnMut(&'a mut Self)) {
+ traverse_candidate(
+ self,
+ &mut (),
+ &mut move |c, _| visit_leaf(c),
+ move |c, _| c.subcandidates.iter_mut(),
+ |_| {},
+ );
+ }
+}
+
+/// A depth-first traversal of the `Candidate` and all of its recursive
+/// subcandidates.
+fn traverse_candidate<'pat, 'tcx: 'pat, C, T, I>(
+ candidate: C,
+ context: &mut T,
+ visit_leaf: &mut impl FnMut(C, &mut T),
+ get_children: impl Copy + Fn(C, &mut T) -> I,
+ complete_children: impl Copy + Fn(&mut T),
+) where
+ C: Borrow<Candidate<'pat, 'tcx>>,
+ I: Iterator<Item = C>,
+{
+ if candidate.borrow().subcandidates.is_empty() {
+ visit_leaf(candidate, context)
+ } else {
+ for child in get_children(candidate, context) {
+ traverse_candidate(child, context, visit_leaf, get_children, complete_children);
+ }
+ complete_children(context)
+ }
+}
+
+#[derive(Clone, Debug)]
+struct Binding<'tcx> {
+ span: Span,
+ source: Place<'tcx>,
+ var_id: LocalVarId,
+ binding_mode: BindingMode,
+}
+
+/// Indicates that the type of `source` must be a subtype of the
+/// user-given type `user_ty`; this is basically a no-op but can
+/// influence region inference.
+#[derive(Clone, Debug)]
+struct Ascription<'tcx> {
+ source: Place<'tcx>,
+ annotation: CanonicalUserTypeAnnotation<'tcx>,
+ variance: ty::Variance,
+}
+
+#[derive(Clone, Debug)]
+pub(crate) struct MatchPair<'pat, 'tcx> {
+ // this place...
+ place: PlaceBuilder<'tcx>,
+
+ // ... must match this pattern.
+ pattern: &'pat Pat<'tcx>,
+}
+
+/// See [`Test`] for more.
+#[derive(Clone, Debug, PartialEq)]
+enum TestKind<'tcx> {
+ /// Test what enum variant a value is.
+ Switch {
+ /// The enum type being tested.
+ adt_def: ty::AdtDef<'tcx>,
+ /// The set of variants that we should create a branch for. We also
+ /// create an additional "otherwise" case.
+ variants: BitSet<VariantIdx>,
+ },
+
+ /// Test what value an integer, `bool`, or `char` has.
+ SwitchInt {
+ /// The type of the value that we're testing.
+ switch_ty: Ty<'tcx>,
+ /// The (ordered) set of values that we test for.
+ ///
+ /// For integers and `char`s we create a branch to each of the values in
+ /// `options`, as well as an "otherwise" branch for all other values, even
+ /// in the (rare) case that `options` is exhaustive.
+ ///
+ /// For `bool` we always generate two edges, one for `true` and one for
+ /// `false`.
+ options: FxIndexMap<ConstantKind<'tcx>, u128>,
+ },
+
+ /// Test for equality with value, possibly after an unsizing coercion to
+ /// `ty`,
+ Eq {
+ value: ConstantKind<'tcx>,
+ // Integer types are handled by `SwitchInt`, and constants with ADT
+ // types are converted back into patterns, so this can only be `&str`,
+ // `&[T]`, `f32` or `f64`.
+ ty: Ty<'tcx>,
+ },
+
+ /// Test whether the value falls within an inclusive or exclusive range
+ Range(PatRange<'tcx>),
+
+ /// Test that the length of the slice is equal to `len`.
+ Len { len: u64, op: BinOp },
+}
+
+/// A test to perform to determine which [`Candidate`] matches a value.
+///
+/// [`Test`] is just the test to perform; it does not include the value
+/// to be tested.
+#[derive(Debug)]
+pub(crate) struct Test<'tcx> {
+ span: Span,
+ kind: TestKind<'tcx>,
+}
+
+/// `ArmHasGuard` is a wrapper around a boolean flag. It indicates whether
+/// a match arm has a guard expression attached to it.
+#[derive(Copy, Clone, Debug)]
+pub(crate) struct ArmHasGuard(pub(crate) bool);
+
+///////////////////////////////////////////////////////////////////////////
+// Main matching algorithm
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// The main match algorithm. It begins with a set of candidates
+ /// `candidates` and has the job of generating code to determine
+ /// which of these candidates, if any, is the correct one. The
+ /// candidates are sorted such that the first item in the list
+ /// has the highest priority. When a candidate is found to match
+ /// the value, we will set and generate a branch to the appropriate
+ /// pre-binding block.
+ ///
+ /// If we find that *NONE* of the candidates apply, we branch to the
+ /// `otherwise_block`, setting it to `Some` if required. In principle, this
+ /// means that the input list was not exhaustive, though at present we
+ /// sometimes are not smart enough to recognize all exhaustive inputs.
+ ///
+ /// It might be surprising that the input can be non-exhaustive.
+ /// Indeed, initially, it is not, because all matches are
+ /// exhaustive in Rust. But during processing we sometimes divide
+ /// up the list of candidates and recurse with a non-exhaustive
+ /// list. This is important to keep the size of the generated code
+ /// under control. See [`Builder::test_candidates`] for more details.
+ ///
+ /// If `fake_borrows` is `Some`, then places which need fake borrows
+ /// will be added to it.
+ ///
+ /// For an example of a case where we set `otherwise_block`, even for an
+ /// exhaustive match, consider:
+ ///
+ /// ```
+ /// # fn foo(x: (bool, bool)) {
+ /// match x {
+ /// (true, true) => (),
+ /// (_, false) => (),
+ /// (false, true) => (),
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// For this match, we check if `x.0` matches `true` (for the first
+ /// arm). If it doesn't match, we check `x.1`. If `x.1` is `true` we check
+ /// if `x.0` matches `false` (for the third arm). In the (impossible at
+ /// runtime) case when `x.0` is now `true`, we branch to
+ /// `otherwise_block`.
+ fn match_candidates<'pat>(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ start_block: BasicBlock,
+ otherwise_block: &mut Option<BasicBlock>,
+ candidates: &mut [&mut Candidate<'pat, 'tcx>],
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ debug!(
+ "matched_candidate(span={:?}, candidates={:?}, start_block={:?}, otherwise_block={:?})",
+ span, candidates, start_block, otherwise_block,
+ );
+
+ // Start by simplifying candidates. Once this process is complete, all
+ // the match pairs which remain require some form of test, whether it
+ // be a switch or pattern comparison.
+ let mut split_or_candidate = false;
+ for candidate in &mut *candidates {
+ split_or_candidate |= self.simplify_candidate(candidate);
+ }
+
+ ensure_sufficient_stack(|| {
+ if split_or_candidate {
+ // At least one of the candidates has been split into subcandidates.
+ // We need to change the candidate list to include those.
+ let mut new_candidates = Vec::new();
+
+ for candidate in candidates {
+ candidate.visit_leaves(|leaf_candidate| new_candidates.push(leaf_candidate));
+ }
+ self.match_simplified_candidates(
+ span,
+ scrutinee_span,
+ start_block,
+ otherwise_block,
+ &mut *new_candidates,
+ fake_borrows,
+ );
+ } else {
+ self.match_simplified_candidates(
+ span,
+ scrutinee_span,
+ start_block,
+ otherwise_block,
+ candidates,
+ fake_borrows,
+ );
+ }
+ });
+ }
+
+ fn match_simplified_candidates(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ start_block: BasicBlock,
+ otherwise_block: &mut Option<BasicBlock>,
+ candidates: &mut [&mut Candidate<'_, 'tcx>],
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ // The candidates are sorted by priority. Check to see whether the
+ // higher priority candidates (and hence at the front of the slice)
+ // have satisfied all their match pairs.
+ let fully_matched = candidates.iter().take_while(|c| c.match_pairs.is_empty()).count();
+ debug!("match_candidates: {:?} candidates fully matched", fully_matched);
+ let (matched_candidates, unmatched_candidates) = candidates.split_at_mut(fully_matched);
+
+ let block = if !matched_candidates.is_empty() {
+ let otherwise_block =
+ self.select_matched_candidates(matched_candidates, start_block, fake_borrows);
+
+ if let Some(last_otherwise_block) = otherwise_block {
+ last_otherwise_block
+ } else {
+ // Any remaining candidates are unreachable.
+ if unmatched_candidates.is_empty() {
+ return;
+ }
+ self.cfg.start_new_block()
+ }
+ } else {
+ start_block
+ };
+
+ // If there are no candidates that still need testing, we're
+ // done. Since all matches are exhaustive, execution should
+ // never reach this point.
+ if unmatched_candidates.is_empty() {
+ let source_info = self.source_info(span);
+ if let Some(otherwise) = *otherwise_block {
+ self.cfg.goto(block, source_info, otherwise);
+ } else {
+ *otherwise_block = Some(block);
+ }
+ return;
+ }
+
+ // Test for the remaining candidates.
+ self.test_candidates_with_or(
+ span,
+ scrutinee_span,
+ unmatched_candidates,
+ block,
+ otherwise_block,
+ fake_borrows,
+ );
+ }
+
+ /// Link up matched candidates.
+ ///
+ /// For example, if we have something like this:
+ ///
+ /// ```ignore (illustrative)
+ /// ...
+ /// Some(x) if cond1 => ...
+ /// Some(x) => ...
+ /// Some(x) if cond2 => ...
+ /// ...
+ /// ```
+ ///
+ /// We generate real edges from:
+ ///
+ /// * `start_block` to the [pre-binding block] of the first pattern,
+ /// * the [otherwise block] of the first pattern to the second pattern,
+ /// * the [otherwise block] of the third pattern to a block with an
+ /// [`Unreachable` terminator](TerminatorKind::Unreachable).
+ ///
+ /// In addition, we add fake edges from the otherwise blocks to the
+ /// pre-binding block of the next candidate in the original set of
+ /// candidates.
+ ///
+ /// [pre-binding block]: Candidate::pre_binding_block
+ /// [otherwise block]: Candidate::otherwise_block
+ fn select_matched_candidates(
+ &mut self,
+ matched_candidates: &mut [&mut Candidate<'_, 'tcx>],
+ start_block: BasicBlock,
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) -> Option<BasicBlock> {
+ debug_assert!(
+ !matched_candidates.is_empty(),
+ "select_matched_candidates called with no candidates",
+ );
+ debug_assert!(
+ matched_candidates.iter().all(|c| c.subcandidates.is_empty()),
+ "subcandidates should be empty in select_matched_candidates",
+ );
+
+ // Insert a borrows of prefixes of places that are bound and are
+ // behind a dereference projection.
+ //
+ // These borrows are taken to avoid situations like the following:
+ //
+ // match x[10] {
+ // _ if { x = &[0]; false } => (),
+ // y => (), // Out of bounds array access!
+ // }
+ //
+ // match *x {
+ // // y is bound by reference in the guard and then by copy in the
+ // // arm, so y is 2 in the arm!
+ // y if { y == 1 && (x = &2) == () } => y,
+ // _ => 3,
+ // }
+ if let Some(fake_borrows) = fake_borrows {
+ for Binding { source, .. } in
+ matched_candidates.iter().flat_map(|candidate| &candidate.bindings)
+ {
+ if let Some(i) =
+ source.projection.iter().rposition(|elem| elem == ProjectionElem::Deref)
+ {
+ let proj_base = &source.projection[..i];
+
+ fake_borrows.insert(Place {
+ local: source.local,
+ projection: self.tcx.intern_place_elems(proj_base),
+ });
+ }
+ }
+ }
+
+ let fully_matched_with_guard = matched_candidates
+ .iter()
+ .position(|c| !c.has_guard)
+ .unwrap_or(matched_candidates.len() - 1);
+
+ let (reachable_candidates, unreachable_candidates) =
+ matched_candidates.split_at_mut(fully_matched_with_guard + 1);
+
+ let mut next_prebinding = start_block;
+
+ for candidate in reachable_candidates.iter_mut() {
+ assert!(candidate.otherwise_block.is_none());
+ assert!(candidate.pre_binding_block.is_none());
+ candidate.pre_binding_block = Some(next_prebinding);
+ if candidate.has_guard {
+ // Create the otherwise block for this candidate, which is the
+ // pre-binding block for the next candidate.
+ next_prebinding = self.cfg.start_new_block();
+ candidate.otherwise_block = Some(next_prebinding);
+ }
+ }
+
+ debug!(
+ "match_candidates: add pre_binding_blocks for unreachable {:?}",
+ unreachable_candidates,
+ );
+ for candidate in unreachable_candidates {
+ assert!(candidate.pre_binding_block.is_none());
+ candidate.pre_binding_block = Some(self.cfg.start_new_block());
+ }
+
+ reachable_candidates.last_mut().unwrap().otherwise_block
+ }
+
+ /// Tests a candidate where there are only or-patterns left to test, or
+ /// forwards to [Builder::test_candidates].
+ ///
+ /// Given a pattern `(P | Q, R | S)` we (in principle) generate a CFG like
+ /// so:
+ ///
+ /// ```text
+ /// [ start ]
+ /// |
+ /// [ match P, Q ]
+ /// |
+ /// +----------------------------------------+------------------------------------+
+ /// | | |
+ /// V V V
+ /// [ P matches ] [ Q matches ] [ otherwise ]
+ /// | | |
+ /// V V |
+ /// [ match R, S ] [ match R, S ] |
+ /// | | |
+ /// +--------------+------------+ +--------------+------------+ |
+ /// | | | | | | |
+ /// V V V V V V |
+ /// [ R matches ] [ S matches ] [otherwise ] [ R matches ] [ S matches ] [otherwise ] |
+ /// | | | | | | |
+ /// +--------------+------------|------------+--------------+ | |
+ /// | | | |
+ /// | +----------------------------------------+--------+
+ /// | |
+ /// V V
+ /// [ Success ] [ Failure ]
+ /// ```
+ ///
+ /// In practice there are some complications:
+ ///
+ /// * If there's a guard, then the otherwise branch of the first match on
+ /// `R | S` goes to a test for whether `Q` matches, and the control flow
+ /// doesn't merge into a single success block until after the guard is
+ /// tested.
+ /// * If neither `P` or `Q` has any bindings or type ascriptions and there
+ /// isn't a match guard, then we create a smaller CFG like:
+ ///
+ /// ```text
+ /// ...
+ /// +---------------+------------+
+ /// | | |
+ /// [ P matches ] [ Q matches ] [ otherwise ]
+ /// | | |
+ /// +---------------+ |
+ /// | ...
+ /// [ match R, S ]
+ /// |
+ /// ...
+ /// ```
+ fn test_candidates_with_or(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ candidates: &mut [&mut Candidate<'_, 'tcx>],
+ block: BasicBlock,
+ otherwise_block: &mut Option<BasicBlock>,
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ let (first_candidate, remaining_candidates) = candidates.split_first_mut().unwrap();
+
+ // All of the or-patterns have been sorted to the end, so if the first
+ // pattern is an or-pattern we only have or-patterns.
+ match *first_candidate.match_pairs[0].pattern.kind {
+ PatKind::Or { .. } => (),
+ _ => {
+ self.test_candidates(
+ span,
+ scrutinee_span,
+ candidates,
+ block,
+ otherwise_block,
+ fake_borrows,
+ );
+ return;
+ }
+ }
+
+ let match_pairs = mem::take(&mut first_candidate.match_pairs);
+ first_candidate.pre_binding_block = Some(block);
+
+ let mut otherwise = None;
+ for match_pair in match_pairs {
+ let PatKind::Or { ref pats } = &*match_pair.pattern.kind else {
+ bug!("Or-patterns should have been sorted to the end");
+ };
+ let or_span = match_pair.pattern.span;
+ let place = match_pair.place;
+
+ first_candidate.visit_leaves(|leaf_candidate| {
+ self.test_or_pattern(
+ leaf_candidate,
+ &mut otherwise,
+ pats,
+ or_span,
+ place.clone(),
+ fake_borrows,
+ );
+ });
+ }
+
+ let remainder_start = otherwise.unwrap_or_else(|| self.cfg.start_new_block());
+
+ self.match_candidates(
+ span,
+ scrutinee_span,
+ remainder_start,
+ otherwise_block,
+ remaining_candidates,
+ fake_borrows,
+ )
+ }
+
+ fn test_or_pattern<'pat>(
+ &mut self,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ otherwise: &mut Option<BasicBlock>,
+ pats: &'pat [Pat<'tcx>],
+ or_span: Span,
+ place: PlaceBuilder<'tcx>,
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ debug!("test_or_pattern:\ncandidate={:#?}\npats={:#?}", candidate, pats);
+ let mut or_candidates: Vec<_> = pats
+ .iter()
+ .map(|pat| Candidate::new(place.clone(), pat, candidate.has_guard))
+ .collect();
+ let mut or_candidate_refs: Vec<_> = or_candidates.iter_mut().collect();
+ let otherwise = if candidate.otherwise_block.is_some() {
+ &mut candidate.otherwise_block
+ } else {
+ otherwise
+ };
+ self.match_candidates(
+ or_span,
+ or_span,
+ candidate.pre_binding_block.unwrap(),
+ otherwise,
+ &mut or_candidate_refs,
+ fake_borrows,
+ );
+ candidate.subcandidates = or_candidates;
+ self.merge_trivial_subcandidates(candidate, self.source_info(or_span));
+ }
+
+ /// Try to merge all of the subcandidates of the given candidate into one.
+ /// This avoids exponentially large CFGs in cases like `(1 | 2, 3 | 4, ...)`.
+ fn merge_trivial_subcandidates(
+ &mut self,
+ candidate: &mut Candidate<'_, 'tcx>,
+ source_info: SourceInfo,
+ ) {
+ if candidate.subcandidates.is_empty() || candidate.has_guard {
+ // FIXME(or_patterns; matthewjasper) Don't give up if we have a guard.
+ return;
+ }
+
+ let mut can_merge = true;
+
+ // Not `Iterator::all` because we don't want to short-circuit.
+ for subcandidate in &mut candidate.subcandidates {
+ self.merge_trivial_subcandidates(subcandidate, source_info);
+
+ // FIXME(or_patterns; matthewjasper) Try to be more aggressive here.
+ can_merge &= subcandidate.subcandidates.is_empty()
+ && subcandidate.bindings.is_empty()
+ && subcandidate.ascriptions.is_empty();
+ }
+
+ if can_merge {
+ let any_matches = self.cfg.start_new_block();
+ for subcandidate in mem::take(&mut candidate.subcandidates) {
+ let or_block = subcandidate.pre_binding_block.unwrap();
+ self.cfg.goto(or_block, source_info, any_matches);
+ }
+ candidate.pre_binding_block = Some(any_matches);
+ }
+ }
+
+ /// This is the most subtle part of the matching algorithm. At
+ /// this point, the input candidates have been fully simplified,
+ /// and so we know that all remaining match-pairs require some
+ /// sort of test. To decide what test to perform, we take the highest
+ /// priority candidate (the first one in the list, as of January 2021)
+ /// and extract the first match-pair from the list. From this we decide
+ /// what kind of test is needed using [`Builder::test`], defined in the
+ /// [`test` module](mod@test).
+ ///
+ /// *Note:* taking the first match pair is somewhat arbitrary, and
+ /// we might do better here by choosing more carefully what to
+ /// test.
+ ///
+ /// For example, consider the following possible match-pairs:
+ ///
+ /// 1. `x @ Some(P)` -- we will do a [`Switch`] to decide what variant `x` has
+ /// 2. `x @ 22` -- we will do a [`SwitchInt`] to decide what value `x` has
+ /// 3. `x @ 3..5` -- we will do a [`Range`] test to decide what range `x` falls in
+ /// 4. etc.
+ ///
+ /// [`Switch`]: TestKind::Switch
+ /// [`SwitchInt`]: TestKind::SwitchInt
+ /// [`Range`]: TestKind::Range
+ ///
+ /// Once we know what sort of test we are going to perform, this
+ /// test may also help us winnow down our candidates. So we walk over
+ /// the candidates (from high to low priority) and check. This
+ /// gives us, for each outcome of the test, a transformed list of
+ /// candidates. For example, if we are testing `x.0`'s variant,
+ /// and we have a candidate `(x.0 @ Some(v), x.1 @ 22)`,
+ /// then we would have a resulting candidate of `((x.0 as Some).0 @ v, x.1 @ 22)`.
+ /// Note that the first match-pair is now simpler (and, in fact, irrefutable).
+ ///
+ /// But there may also be candidates that the test just doesn't
+ /// apply to. The classical example involves wildcards:
+ ///
+ /// ```
+ /// # let (x, y, z) = (true, true, true);
+ /// match (x, y, z) {
+ /// (true , _ , true ) => true, // (0)
+ /// (_ , true , _ ) => true, // (1)
+ /// (false, false, _ ) => false, // (2)
+ /// (true , _ , false) => false, // (3)
+ /// }
+ /// # ;
+ /// ```
+ ///
+ /// In that case, after we test on `x`, there are 2 overlapping candidate
+ /// sets:
+ ///
+ /// - If the outcome is that `x` is true, candidates 0, 1, and 3
+ /// - If the outcome is that `x` is false, candidates 1 and 2
+ ///
+ /// Here, the traditional "decision tree" method would generate 2
+ /// separate code-paths for the 2 separate cases.
+ ///
+ /// In some cases, this duplication can create an exponential amount of
+ /// code. This is most easily seen by noticing that this method terminates
+ /// with precisely the reachable arms being reachable - but that problem
+ /// is trivially NP-complete:
+ ///
+ /// ```ignore (illustrative)
+ /// match (var0, var1, var2, var3, ...) {
+ /// (true , _ , _ , false, true, ...) => false,
+ /// (_ , true, true , false, _ , ...) => false,
+ /// (false, _ , false, false, _ , ...) => false,
+ /// ...
+ /// _ => true
+ /// }
+ /// ```
+ ///
+ /// Here the last arm is reachable only if there is an assignment to
+ /// the variables that does not match any of the literals. Therefore,
+ /// compilation would take an exponential amount of time in some cases.
+ ///
+ /// That kind of exponential worst-case might not occur in practice, but
+ /// our simplistic treatment of constants and guards would make it occur
+ /// in very common situations - for example [#29740]:
+ ///
+ /// ```ignore (illustrative)
+ /// match x {
+ /// "foo" if foo_guard => ...,
+ /// "bar" if bar_guard => ...,
+ /// "baz" if baz_guard => ...,
+ /// ...
+ /// }
+ /// ```
+ ///
+ /// [#29740]: https://github.com/rust-lang/rust/issues/29740
+ ///
+ /// Here we first test the match-pair `x @ "foo"`, which is an [`Eq` test].
+ ///
+ /// [`Eq` test]: TestKind::Eq
+ ///
+ /// It might seem that we would end up with 2 disjoint candidate
+ /// sets, consisting of the first candidate or the other two, but our
+ /// algorithm doesn't reason about `"foo"` being distinct from the other
+ /// constants; it considers the latter arms to potentially match after
+ /// both outcomes, which obviously leads to an exponential number
+ /// of tests.
+ ///
+ /// To avoid these kinds of problems, our algorithm tries to ensure
+ /// the amount of generated tests is linear. When we do a k-way test,
+ /// we return an additional "unmatched" set alongside the obvious `k`
+ /// sets. When we encounter a candidate that would be present in more
+ /// than one of the sets, we put it and all candidates below it into the
+ /// "unmatched" set. This ensures these `k+1` sets are disjoint.
+ ///
+ /// After we perform our test, we branch into the appropriate candidate
+ /// set and recurse with `match_candidates`. These sub-matches are
+ /// obviously non-exhaustive - as we discarded our otherwise set - so
+ /// we set their continuation to do `match_candidates` on the
+ /// "unmatched" set (which is again non-exhaustive).
+ ///
+ /// If you apply this to the above test, you basically wind up
+ /// with an if-else-if chain, testing each candidate in turn,
+ /// which is precisely what we want.
+ ///
+ /// In addition to avoiding exponential-time blowups, this algorithm
+ /// also has the nice property that each guard and arm is only generated
+ /// once.
+ fn test_candidates<'pat, 'b, 'c>(
+ &mut self,
+ span: Span,
+ scrutinee_span: Span,
+ mut candidates: &'b mut [&'c mut Candidate<'pat, 'tcx>],
+ block: BasicBlock,
+ otherwise_block: &mut Option<BasicBlock>,
+ fake_borrows: &mut Option<FxIndexSet<Place<'tcx>>>,
+ ) {
+ // extract the match-pair from the highest priority candidate
+ let match_pair = &candidates.first().unwrap().match_pairs[0];
+ let mut test = self.test(match_pair);
+ let match_place = match_pair.place.clone();
+
+ // most of the time, the test to perform is simply a function
+ // of the main candidate; but for a test like SwitchInt, we
+ // may want to add cases based on the candidates that are
+ // available
+ match test.kind {
+ TestKind::SwitchInt { switch_ty, ref mut options } => {
+ for candidate in candidates.iter() {
+ if !self.add_cases_to_switch(&match_place, candidate, switch_ty, options) {
+ break;
+ }
+ }
+ }
+ TestKind::Switch { adt_def: _, ref mut variants } => {
+ for candidate in candidates.iter() {
+ if !self.add_variants_to_switch(&match_place, candidate, variants) {
+ break;
+ }
+ }
+ }
+ _ => {}
+ }
+
+ // Insert a Shallow borrow of any places that is switched on.
+ if let Some(fb) = fake_borrows && let Ok(match_place_resolved) =
+ match_place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ let resolved_place = match_place_resolved.into_place(self.tcx, self.typeck_results);
+ fb.insert(resolved_place);
+ }
+
+ // perform the test, branching to one of N blocks. For each of
+ // those N possible outcomes, create a (initially empty)
+ // vector of candidates. Those are the candidates that still
+ // apply if the test has that particular outcome.
+ debug!("test_candidates: test={:?} match_pair={:?}", test, match_pair);
+ let mut target_candidates: Vec<Vec<&mut Candidate<'pat, 'tcx>>> = vec![];
+ target_candidates.resize_with(test.targets(), Default::default);
+
+ let total_candidate_count = candidates.len();
+
+ // Sort the candidates into the appropriate vector in
+ // `target_candidates`. Note that at some point we may
+ // encounter a candidate where the test is not relevant; at
+ // that point, we stop sorting.
+ while let Some(candidate) = candidates.first_mut() {
+ let Some(idx) = self.sort_candidate(&match_place.clone(), &test, candidate) else {
+ break;
+ };
+ let (candidate, rest) = candidates.split_first_mut().unwrap();
+ target_candidates[idx].push(candidate);
+ candidates = rest;
+ }
+ // at least the first candidate ought to be tested
+ assert!(total_candidate_count > candidates.len());
+ debug!("test_candidates: tested_candidates: {}", total_candidate_count - candidates.len());
+ debug!("test_candidates: untested_candidates: {}", candidates.len());
+
+ // HACK(matthewjasper) This is a closure so that we can let the test
+ // create its blocks before the rest of the match. This currently
+ // improves the speed of llvm when optimizing long string literal
+ // matches
+ let make_target_blocks = move |this: &mut Self| -> Vec<BasicBlock> {
+ // The block that we should branch to if none of the
+ // `target_candidates` match. This is either the block where we
+ // start matching the untested candidates if there are any,
+ // otherwise it's the `otherwise_block`.
+ let remainder_start = &mut None;
+ let remainder_start =
+ if candidates.is_empty() { &mut *otherwise_block } else { remainder_start };
+
+ // For each outcome of test, process the candidates that still
+ // apply. Collect a list of blocks where control flow will
+ // branch if one of the `target_candidate` sets is not
+ // exhaustive.
+ let target_blocks: Vec<_> = target_candidates
+ .into_iter()
+ .map(|mut candidates| {
+ if !candidates.is_empty() {
+ let candidate_start = this.cfg.start_new_block();
+ this.match_candidates(
+ span,
+ scrutinee_span,
+ candidate_start,
+ remainder_start,
+ &mut *candidates,
+ fake_borrows,
+ );
+ candidate_start
+ } else {
+ *remainder_start.get_or_insert_with(|| this.cfg.start_new_block())
+ }
+ })
+ .collect();
+
+ if !candidates.is_empty() {
+ let remainder_start = remainder_start.unwrap_or_else(|| this.cfg.start_new_block());
+ this.match_candidates(
+ span,
+ scrutinee_span,
+ remainder_start,
+ otherwise_block,
+ candidates,
+ fake_borrows,
+ );
+ };
+
+ target_blocks
+ };
+
+ self.perform_test(span, scrutinee_span, block, match_place, &test, make_target_blocks);
+ }
+
+ /// Determine the fake borrows that are needed from a set of places that
+ /// have to be stable across match guards.
+ ///
+ /// Returns a list of places that need a fake borrow and the temporary
+ /// that's used to store the fake borrow.
+ ///
+ /// Match exhaustiveness checking is not able to handle the case where the
+ /// place being matched on is mutated in the guards. We add "fake borrows"
+ /// to the guards that prevent any mutation of the place being matched.
+ /// There are a some subtleties:
+ ///
+ /// 1. Borrowing `*x` doesn't prevent assigning to `x`. If `x` is a shared
+ /// reference, the borrow isn't even tracked. As such we have to add fake
+ /// borrows of any prefixes of a place
+ /// 2. We don't want `match x { _ => (), }` to conflict with mutable
+ /// borrows of `x`, so we only add fake borrows for places which are
+ /// bound or tested by the match.
+ /// 3. We don't want the fake borrows to conflict with `ref mut` bindings,
+ /// so we use a special BorrowKind for them.
+ /// 4. The fake borrows may be of places in inactive variants, so it would
+ /// be UB to generate code for them. They therefore have to be removed
+ /// by a MIR pass run after borrow checking.
+ fn calculate_fake_borrows<'b>(
+ &mut self,
+ fake_borrows: &'b FxIndexSet<Place<'tcx>>,
+ temp_span: Span,
+ ) -> Vec<(Place<'tcx>, Local)> {
+ let tcx = self.tcx;
+
+ debug!("add_fake_borrows fake_borrows = {:?}", fake_borrows);
+
+ let mut all_fake_borrows = Vec::with_capacity(fake_borrows.len());
+
+ // Insert a Shallow borrow of the prefixes of any fake borrows.
+ for place in fake_borrows {
+ let mut cursor = place.projection.as_ref();
+ while let [proj_base @ .., elem] = cursor {
+ cursor = proj_base;
+
+ if let ProjectionElem::Deref = elem {
+ // Insert a shallow borrow after a deref. For other
+ // projections the borrow of prefix_cursor will
+ // conflict with any mutation of base.
+ all_fake_borrows.push(PlaceRef { local: place.local, projection: proj_base });
+ }
+ }
+
+ all_fake_borrows.push(place.as_ref());
+ }
+
+ // Deduplicate
+ let mut dedup = FxHashSet::default();
+ all_fake_borrows.retain(|b| dedup.insert(*b));
+
+ debug!("add_fake_borrows all_fake_borrows = {:?}", all_fake_borrows);
+
+ all_fake_borrows
+ .into_iter()
+ .map(|matched_place_ref| {
+ let matched_place = Place {
+ local: matched_place_ref.local,
+ projection: tcx.intern_place_elems(matched_place_ref.projection),
+ };
+ let fake_borrow_deref_ty = matched_place.ty(&self.local_decls, tcx).ty;
+ let fake_borrow_ty = tcx.mk_imm_ref(tcx.lifetimes.re_erased, fake_borrow_deref_ty);
+ let fake_borrow_temp =
+ self.local_decls.push(LocalDecl::new(fake_borrow_ty, temp_span));
+
+ (matched_place, fake_borrow_temp)
+ })
+ .collect()
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Pat binding - used for `let` and function parameters as well.
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub(crate) fn lower_let_expr(
+ &mut self,
+ mut block: BasicBlock,
+ expr: &Expr<'tcx>,
+ pat: &Pat<'tcx>,
+ else_target: region::Scope,
+ source_scope: Option<SourceScope>,
+ span: Span,
+ ) -> BlockAnd<()> {
+ let expr_span = expr.span;
+ let expr_place_builder = unpack!(block = self.lower_scrutinee(block, expr, expr_span));
+ let wildcard = Pat::wildcard_from_ty(pat.ty);
+ let mut guard_candidate = Candidate::new(expr_place_builder.clone(), &pat, false);
+ let mut otherwise_candidate = Candidate::new(expr_place_builder.clone(), &wildcard, false);
+ let fake_borrow_temps = self.lower_match_tree(
+ block,
+ pat.span,
+ pat.span,
+ false,
+ &mut [&mut guard_candidate, &mut otherwise_candidate],
+ );
+ let mut opt_expr_place: Option<(Option<&Place<'tcx>>, Span)> = None;
+ let expr_place: Place<'tcx>;
+ if let Ok(expr_builder) =
+ expr_place_builder.try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ expr_place = expr_builder.into_place(self.tcx, self.typeck_results);
+ opt_expr_place = Some((Some(&expr_place), expr_span));
+ }
+ let otherwise_post_guard_block = otherwise_candidate.pre_binding_block.unwrap();
+ self.break_for_else(otherwise_post_guard_block, else_target, self.source_info(expr_span));
+
+ self.declare_bindings(
+ source_scope,
+ pat.span.to(span),
+ pat,
+ ArmHasGuard(false),
+ opt_expr_place,
+ );
+
+ let post_guard_block = self.bind_pattern(
+ self.source_info(pat.span),
+ guard_candidate,
+ None,
+ &fake_borrow_temps,
+ expr.span,
+ None,
+ None,
+ None,
+ );
+
+ post_guard_block.unit()
+ }
+
+ /// Initializes each of the bindings from the candidate by
+ /// moving/copying/ref'ing the source as appropriate. Tests the guard, if
+ /// any, and then branches to the arm. Returns the block for the case where
+ /// the guard succeeds.
+ ///
+ /// Note: we do not check earlier that if there is a guard,
+ /// there cannot be move bindings. We avoid a use-after-move by only
+ /// moving the binding once the guard has evaluated to true (see below).
+ fn bind_and_guard_matched_candidate<'pat>(
+ &mut self,
+ candidate: Candidate<'pat, 'tcx>,
+ parent_bindings: &[(Vec<Binding<'tcx>>, Vec<Ascription<'tcx>>)],
+ guard: Option<&Guard<'tcx>>,
+ fake_borrows: &[(Place<'tcx>, Local)],
+ scrutinee_span: Span,
+ arm_span: Option<Span>,
+ match_scope: Option<region::Scope>,
+ schedule_drops: bool,
+ ) -> BasicBlock {
+ debug!("bind_and_guard_matched_candidate(candidate={:?})", candidate);
+
+ debug_assert!(candidate.match_pairs.is_empty());
+
+ let candidate_source_info = self.source_info(candidate.span);
+
+ let mut block = candidate.pre_binding_block.unwrap();
+
+ if candidate.next_candidate_pre_binding_block.is_some() {
+ let fresh_block = self.cfg.start_new_block();
+ self.false_edges(
+ block,
+ fresh_block,
+ candidate.next_candidate_pre_binding_block,
+ candidate_source_info,
+ );
+ block = fresh_block;
+ }
+
+ self.ascribe_types(
+ block,
+ parent_bindings
+ .iter()
+ .flat_map(|(_, ascriptions)| ascriptions)
+ .cloned()
+ .chain(candidate.ascriptions),
+ );
+
+ // rust-lang/rust#27282: The `autoref` business deserves some
+ // explanation here.
+ //
+ // The intent of the `autoref` flag is that when it is true,
+ // then any pattern bindings of type T will map to a `&T`
+ // within the context of the guard expression, but will
+ // continue to map to a `T` in the context of the arm body. To
+ // avoid surfacing this distinction in the user source code
+ // (which would be a severe change to the language and require
+ // far more revision to the compiler), when `autoref` is true,
+ // then any occurrence of the identifier in the guard
+ // expression will automatically get a deref op applied to it.
+ //
+ // So an input like:
+ //
+ // ```
+ // let place = Foo::new();
+ // match place { foo if inspect(foo)
+ // => feed(foo), ... }
+ // ```
+ //
+ // will be treated as if it were really something like:
+ //
+ // ```
+ // let place = Foo::new();
+ // match place { Foo { .. } if { let tmp1 = &place; inspect(*tmp1) }
+ // => { let tmp2 = place; feed(tmp2) }, ... }
+ //
+ // And an input like:
+ //
+ // ```
+ // let place = Foo::new();
+ // match place { ref mut foo if inspect(foo)
+ // => feed(foo), ... }
+ // ```
+ //
+ // will be treated as if it were really something like:
+ //
+ // ```
+ // let place = Foo::new();
+ // match place { Foo { .. } if { let tmp1 = & &mut place; inspect(*tmp1) }
+ // => { let tmp2 = &mut place; feed(tmp2) }, ... }
+ // ```
+ //
+ // In short, any pattern binding will always look like *some*
+ // kind of `&T` within the guard at least in terms of how the
+ // MIR-borrowck views it, and this will ensure that guard
+ // expressions cannot mutate their the match inputs via such
+ // bindings. (It also ensures that guard expressions can at
+ // most *copy* values from such bindings; non-Copy things
+ // cannot be moved via pattern bindings in guard expressions.)
+ //
+ // ----
+ //
+ // Implementation notes (under assumption `autoref` is true).
+ //
+ // To encode the distinction above, we must inject the
+ // temporaries `tmp1` and `tmp2`.
+ //
+ // There are two cases of interest: binding by-value, and binding by-ref.
+ //
+ // 1. Binding by-value: Things are simple.
+ //
+ // * Establishing `tmp1` creates a reference into the
+ // matched place. This code is emitted by
+ // bind_matched_candidate_for_guard.
+ //
+ // * `tmp2` is only initialized "lazily", after we have
+ // checked the guard. Thus, the code that can trigger
+ // moves out of the candidate can only fire after the
+ // guard evaluated to true. This initialization code is
+ // emitted by bind_matched_candidate_for_arm.
+ //
+ // 2. Binding by-reference: Things are tricky.
+ //
+ // * Here, the guard expression wants a `&&` or `&&mut`
+ // into the original input. This means we need to borrow
+ // the reference that we create for the arm.
+ // * So we eagerly create the reference for the arm and then take a
+ // reference to that.
+ if let Some(guard) = guard {
+ let tcx = self.tcx;
+ let bindings = parent_bindings
+ .iter()
+ .flat_map(|(bindings, _)| bindings)
+ .chain(&candidate.bindings);
+
+ self.bind_matched_candidate_for_guard(block, schedule_drops, bindings.clone());
+ let guard_frame = GuardFrame {
+ locals: bindings.map(|b| GuardFrameLocal::new(b.var_id, b.binding_mode)).collect(),
+ };
+ debug!("entering guard building context: {:?}", guard_frame);
+ self.guard_context.push(guard_frame);
+
+ let re_erased = tcx.lifetimes.re_erased;
+ let scrutinee_source_info = self.source_info(scrutinee_span);
+ for &(place, temp) in fake_borrows {
+ let borrow = Rvalue::Ref(re_erased, BorrowKind::Shallow, place);
+ self.cfg.push_assign(block, scrutinee_source_info, Place::from(temp), borrow);
+ }
+
+ let arm_span = arm_span.unwrap();
+ let match_scope = match_scope.unwrap();
+ let mut guard_span = rustc_span::DUMMY_SP;
+
+ let (post_guard_block, otherwise_post_guard_block) =
+ self.in_if_then_scope(match_scope, |this| match *guard {
+ Guard::If(e) => {
+ let e = &this.thir[e];
+ guard_span = e.span;
+ this.then_else_break(
+ block,
+ e,
+ None,
+ match_scope,
+ this.source_info(arm_span),
+ )
+ }
+ Guard::IfLet(ref pat, scrutinee) => {
+ let s = &this.thir[scrutinee];
+ guard_span = s.span;
+ this.lower_let_expr(block, s, pat, match_scope, None, arm_span)
+ }
+ });
+
+ let source_info = self.source_info(guard_span);
+ let guard_end = self.source_info(tcx.sess.source_map().end_point(guard_span));
+ let guard_frame = self.guard_context.pop().unwrap();
+ debug!("Exiting guard building context with locals: {:?}", guard_frame);
+
+ for &(_, temp) in fake_borrows {
+ let cause = FakeReadCause::ForMatchGuard;
+ self.cfg.push_fake_read(post_guard_block, guard_end, cause, Place::from(temp));
+ }
+
+ let otherwise_block = candidate.otherwise_block.unwrap_or_else(|| {
+ let unreachable = self.cfg.start_new_block();
+ self.cfg.terminate(unreachable, source_info, TerminatorKind::Unreachable);
+ unreachable
+ });
+ self.false_edges(
+ otherwise_post_guard_block,
+ otherwise_block,
+ candidate.next_candidate_pre_binding_block,
+ source_info,
+ );
+
+ // We want to ensure that the matched candidates are bound
+ // after we have confirmed this candidate *and* any
+ // associated guard; Binding them on `block` is too soon,
+ // because that would be before we've checked the result
+ // from the guard.
+ //
+ // But binding them on the arm is *too late*, because
+ // then all of the candidates for a single arm would be
+ // bound in the same place, that would cause a case like:
+ //
+ // ```rust
+ // match (30, 2) {
+ // (mut x, 1) | (2, mut x) if { true } => { ... }
+ // ... // ^^^^^^^ (this is `arm_block`)
+ // }
+ // ```
+ //
+ // would yield an `arm_block` something like:
+ //
+ // ```
+ // StorageLive(_4); // _4 is `x`
+ // _4 = &mut (_1.0: i32); // this is handling `(mut x, 1)` case
+ // _4 = &mut (_1.1: i32); // this is handling `(2, mut x)` case
+ // ```
+ //
+ // and that is clearly not correct.
+ let by_value_bindings = parent_bindings
+ .iter()
+ .flat_map(|(bindings, _)| bindings)
+ .chain(&candidate.bindings)
+ .filter(|binding| matches!(binding.binding_mode, BindingMode::ByValue));
+ // Read all of the by reference bindings to ensure that the
+ // place they refer to can't be modified by the guard.
+ for binding in by_value_bindings.clone() {
+ let local_id = self.var_local_id(binding.var_id, RefWithinGuard);
+ let cause = FakeReadCause::ForGuardBinding;
+ self.cfg.push_fake_read(post_guard_block, guard_end, cause, Place::from(local_id));
+ }
+ assert!(schedule_drops, "patterns with guards must schedule drops");
+ self.bind_matched_candidate_for_arm_body(post_guard_block, true, by_value_bindings);
+
+ post_guard_block
+ } else {
+ // (Here, it is not too early to bind the matched
+ // candidate on `block`, because there is no guard result
+ // that we have to inspect before we bind them.)
+ self.bind_matched_candidate_for_arm_body(
+ block,
+ schedule_drops,
+ parent_bindings
+ .iter()
+ .flat_map(|(bindings, _)| bindings)
+ .chain(&candidate.bindings),
+ );
+ block
+ }
+ }
+
+ /// Append `AscribeUserType` statements onto the end of `block`
+ /// for each ascription
+ fn ascribe_types(
+ &mut self,
+ block: BasicBlock,
+ ascriptions: impl IntoIterator<Item = Ascription<'tcx>>,
+ ) {
+ for ascription in ascriptions {
+ let source_info = self.source_info(ascription.annotation.span);
+
+ let base = self.canonical_user_type_annotations.push(ascription.annotation);
+ self.cfg.push(
+ block,
+ Statement {
+ source_info,
+ kind: StatementKind::AscribeUserType(
+ Box::new((
+ ascription.source,
+ UserTypeProjection { base, projs: Vec::new() },
+ )),
+ ascription.variance,
+ ),
+ },
+ );
+ }
+ }
+
+ fn bind_matched_candidate_for_guard<'b>(
+ &mut self,
+ block: BasicBlock,
+ schedule_drops: bool,
+ bindings: impl IntoIterator<Item = &'b Binding<'tcx>>,
+ ) where
+ 'tcx: 'b,
+ {
+ debug!("bind_matched_candidate_for_guard(block={:?})", block);
+
+ // Assign each of the bindings. Since we are binding for a
+ // guard expression, this will never trigger moves out of the
+ // candidate.
+ let re_erased = self.tcx.lifetimes.re_erased;
+ for binding in bindings {
+ debug!("bind_matched_candidate_for_guard(binding={:?})", binding);
+ let source_info = self.source_info(binding.span);
+
+ // For each pattern ident P of type T, `ref_for_guard` is
+ // a reference R: &T pointing to the location matched by
+ // the pattern, and every occurrence of P within a guard
+ // denotes *R.
+ let ref_for_guard = self.storage_live_binding(
+ block,
+ binding.var_id,
+ binding.span,
+ RefWithinGuard,
+ schedule_drops,
+ );
+ match binding.binding_mode {
+ BindingMode::ByValue => {
+ let rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, binding.source);
+ self.cfg.push_assign(block, source_info, ref_for_guard, rvalue);
+ }
+ BindingMode::ByRef(borrow_kind) => {
+ let value_for_arm = self.storage_live_binding(
+ block,
+ binding.var_id,
+ binding.span,
+ OutsideGuard,
+ schedule_drops,
+ );
+
+ let rvalue = Rvalue::Ref(re_erased, borrow_kind, binding.source);
+ self.cfg.push_assign(block, source_info, value_for_arm, rvalue);
+ let rvalue = Rvalue::Ref(re_erased, BorrowKind::Shared, value_for_arm);
+ self.cfg.push_assign(block, source_info, ref_for_guard, rvalue);
+ }
+ }
+ }
+ }
+
+ fn bind_matched_candidate_for_arm_body<'b>(
+ &mut self,
+ block: BasicBlock,
+ schedule_drops: bool,
+ bindings: impl IntoIterator<Item = &'b Binding<'tcx>>,
+ ) where
+ 'tcx: 'b,
+ {
+ debug!("bind_matched_candidate_for_arm_body(block={:?})", block);
+
+ let re_erased = self.tcx.lifetimes.re_erased;
+ // Assign each of the bindings. This may trigger moves out of the candidate.
+ for binding in bindings {
+ let source_info = self.source_info(binding.span);
+ let local = self.storage_live_binding(
+ block,
+ binding.var_id,
+ binding.span,
+ OutsideGuard,
+ schedule_drops,
+ );
+ if schedule_drops {
+ self.schedule_drop_for_binding(binding.var_id, binding.span, OutsideGuard);
+ }
+ let rvalue = match binding.binding_mode {
+ BindingMode::ByValue => Rvalue::Use(self.consume_by_copy_or_move(binding.source)),
+ BindingMode::ByRef(borrow_kind) => {
+ Rvalue::Ref(re_erased, borrow_kind, binding.source)
+ }
+ };
+ self.cfg.push_assign(block, source_info, local, rvalue);
+ }
+ }
+
+ /// Each binding (`ref mut var`/`ref var`/`mut var`/`var`, where the bound
+ /// `var` has type `T` in the arm body) in a pattern maps to 2 locals. The
+ /// first local is a binding for occurrences of `var` in the guard, which
+ /// will have type `&T`. The second local is a binding for occurrences of
+ /// `var` in the arm body, which will have type `T`.
+ fn declare_binding(
+ &mut self,
+ source_info: SourceInfo,
+ visibility_scope: SourceScope,
+ mutability: Mutability,
+ name: Symbol,
+ mode: BindingMode,
+ var_id: LocalVarId,
+ var_ty: Ty<'tcx>,
+ user_ty: UserTypeProjections,
+ has_guard: ArmHasGuard,
+ opt_match_place: Option<(Option<Place<'tcx>>, Span)>,
+ pat_span: Span,
+ ) {
+ debug!(
+ "declare_binding(var_id={:?}, name={:?}, mode={:?}, var_ty={:?}, \
+ visibility_scope={:?}, source_info={:?})",
+ var_id, name, mode, var_ty, visibility_scope, source_info
+ );
+
+ let tcx = self.tcx;
+ let debug_source_info = SourceInfo { span: source_info.span, scope: visibility_scope };
+ let binding_mode = match mode {
+ BindingMode::ByValue => ty::BindingMode::BindByValue(mutability),
+ BindingMode::ByRef(_) => ty::BindingMode::BindByReference(mutability),
+ };
+ debug!("declare_binding: user_ty={:?}", user_ty);
+ let local = LocalDecl::<'tcx> {
+ mutability,
+ ty: var_ty,
+ user_ty: if user_ty.is_empty() { None } else { Some(Box::new(user_ty)) },
+ source_info,
+ internal: false,
+ is_block_tail: None,
+ local_info: Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm {
+ binding_mode,
+ // hypothetically, `visit_primary_bindings` could try to unzip
+ // an outermost hir::Ty as we descend, matching up
+ // idents in pat; but complex w/ unclear UI payoff.
+ // Instead, just abandon providing diagnostic info.
+ opt_ty_info: None,
+ opt_match_place,
+ pat_span,
+ },
+ ))))),
+ };
+ let for_arm_body = self.local_decls.push(local);
+ self.var_debug_info.push(VarDebugInfo {
+ name,
+ source_info: debug_source_info,
+ value: VarDebugInfoContents::Place(for_arm_body.into()),
+ });
+ let locals = if has_guard.0 {
+ let ref_for_guard = self.local_decls.push(LocalDecl::<'tcx> {
+ // This variable isn't mutated but has a name, so has to be
+ // immutable to avoid the unused mut lint.
+ mutability: Mutability::Not,
+ ty: tcx.mk_imm_ref(tcx.lifetimes.re_erased, var_ty),
+ user_ty: None,
+ source_info,
+ internal: false,
+ is_block_tail: None,
+ local_info: Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(
+ BindingForm::RefForGuard,
+ )))),
+ });
+ self.var_debug_info.push(VarDebugInfo {
+ name,
+ source_info: debug_source_info,
+ value: VarDebugInfoContents::Place(ref_for_guard.into()),
+ });
+ LocalsForNode::ForGuard { ref_for_guard, for_arm_body }
+ } else {
+ LocalsForNode::One(for_arm_body)
+ };
+ debug!("declare_binding: vars={:?}", locals);
+ self.var_indices.insert(var_id, locals);
+ }
+
+ pub(crate) fn ast_let_else(
+ &mut self,
+ mut block: BasicBlock,
+ init: &Expr<'tcx>,
+ initializer_span: Span,
+ else_block: &Block,
+ visibility_scope: Option<SourceScope>,
+ remainder_scope: region::Scope,
+ remainder_span: Span,
+ pattern: &Pat<'tcx>,
+ ) -> BlockAnd<()> {
+ let (matching, failure) = self.in_if_then_scope(remainder_scope, |this| {
+ let scrutinee = unpack!(block = this.lower_scrutinee(block, init, initializer_span));
+ let pat = Pat { ty: init.ty, span: else_block.span, kind: Box::new(PatKind::Wild) };
+ let mut wildcard = Candidate::new(scrutinee.clone(), &pat, false);
+ this.declare_bindings(
+ visibility_scope,
+ remainder_span,
+ pattern,
+ ArmHasGuard(false),
+ Some((None, initializer_span)),
+ );
+ let mut candidate = Candidate::new(scrutinee.clone(), pattern, false);
+ let fake_borrow_temps = this.lower_match_tree(
+ block,
+ initializer_span,
+ pattern.span,
+ false,
+ &mut [&mut candidate, &mut wildcard],
+ );
+ // This block is for the matching case
+ let matching = this.bind_pattern(
+ this.source_info(pattern.span),
+ candidate,
+ None,
+ &fake_borrow_temps,
+ initializer_span,
+ None,
+ None,
+ None,
+ );
+ // This block is for the failure case
+ let failure = this.bind_pattern(
+ this.source_info(else_block.span),
+ wildcard,
+ None,
+ &fake_borrow_temps,
+ initializer_span,
+ None,
+ None,
+ None,
+ );
+ this.break_for_else(failure, remainder_scope, this.source_info(initializer_span));
+ matching.unit()
+ });
+
+ // This place is not really used because this destination place
+ // should never be used to take values at the end of the failure
+ // block.
+ let dummy_place = Place { local: RETURN_PLACE, projection: ty::List::empty() };
+ let failure_block;
+ unpack!(
+ failure_block = self.ast_block(
+ dummy_place,
+ failure,
+ else_block,
+ self.source_info(else_block.span),
+ )
+ );
+ self.cfg.terminate(
+ failure_block,
+ self.source_info(else_block.span),
+ TerminatorKind::Unreachable,
+ );
+ matching.unit()
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/simplify.rs b/compiler/rustc_mir_build/src/build/matches/simplify.rs
new file mode 100644
index 000000000..c62989041
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/simplify.rs
@@ -0,0 +1,318 @@
+//! Simplifying Candidates
+//!
+//! *Simplifying* a match pair `place @ pattern` means breaking it down
+//! into bindings or other, simpler match pairs. For example:
+//!
+//! - `place @ (P1, P2)` can be simplified to `[place.0 @ P1, place.1 @ P2]`
+//! - `place @ x` can be simplified to `[]` by binding `x` to `place`
+//!
+//! The `simplify_candidate` routine just repeatedly applies these
+//! sort of simplifications until there is nothing left to
+//! simplify. Match pairs cannot be simplified if they require some
+//! sort of test: for example, testing which variant an enum is, or
+//! testing a value against a constant.
+
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::matches::{Ascription, Binding, Candidate, MatchPair};
+use crate::build::Builder;
+use rustc_hir::RangeEnd;
+use rustc_middle::thir::{self, *};
+use rustc_middle::ty;
+use rustc_middle::ty::layout::IntegerExt;
+use rustc_target::abi::{Integer, Size};
+
+use std::mem;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Simplify a candidate so that all match pairs require a test.
+ ///
+ /// This method will also split a candidate, in which the only
+ /// match-pair is an or-pattern, into multiple candidates.
+ /// This is so that
+ ///
+ /// match x {
+ /// 0 | 1 => { ... },
+ /// 2 | 3 => { ... },
+ /// }
+ ///
+ /// only generates a single switch. If this happens this method returns
+ /// `true`.
+ pub(super) fn simplify_candidate<'pat>(
+ &mut self,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ ) -> bool {
+ // repeatedly simplify match pairs until fixed point is reached
+ debug!(?candidate, "simplify_candidate");
+
+ // existing_bindings and new_bindings exists to keep the semantics in order.
+ // Reversing the binding order for bindings after `@` changes the binding order in places
+ // it shouldn't be changed, for example `let (Some(a), Some(b)) = (x, y)`
+ //
+ // To avoid this, the binding occurs in the following manner:
+ // * the bindings for one iteration of the following loop occurs in order (i.e. left to
+ // right)
+ // * the bindings from the previous iteration of the loop is prepended to the bindings from
+ // the current iteration (in the implementation this is done by mem::swap and extend)
+ // * after all iterations, these new bindings are then appended to the bindings that were
+ // preexisting (i.e. `candidate.binding` when the function was called).
+ //
+ // example:
+ // candidate.bindings = [1, 2, 3]
+ // binding in iter 1: [4, 5]
+ // binding in iter 2: [6, 7]
+ //
+ // final binding: [1, 2, 3, 6, 7, 4, 5]
+ let mut existing_bindings = mem::take(&mut candidate.bindings);
+ let mut new_bindings = Vec::new();
+ loop {
+ let match_pairs = mem::take(&mut candidate.match_pairs);
+
+ if let [MatchPair { pattern: Pat { kind: box PatKind::Or { pats }, .. }, place }] =
+ &*match_pairs
+ {
+ existing_bindings.extend_from_slice(&new_bindings);
+ mem::swap(&mut candidate.bindings, &mut existing_bindings);
+ candidate.subcandidates =
+ self.create_or_subcandidates(candidate, place.clone(), pats);
+ return true;
+ }
+
+ let mut changed = false;
+ for match_pair in match_pairs {
+ match self.simplify_match_pair(match_pair, candidate) {
+ Ok(()) => {
+ changed = true;
+ }
+ Err(match_pair) => {
+ candidate.match_pairs.push(match_pair);
+ }
+ }
+ }
+ // Avoid issue #69971: the binding order should be right to left if there are more
+ // bindings after `@` to please the borrow checker
+ // Ex
+ // struct NonCopyStruct {
+ // copy_field: u32,
+ // }
+ //
+ // fn foo1(x: NonCopyStruct) {
+ // let y @ NonCopyStruct { copy_field: z } = x;
+ // // the above should turn into
+ // let z = x.copy_field;
+ // let y = x;
+ // }
+ candidate.bindings.extend_from_slice(&new_bindings);
+ mem::swap(&mut candidate.bindings, &mut new_bindings);
+ candidate.bindings.clear();
+
+ if !changed {
+ existing_bindings.extend_from_slice(&new_bindings);
+ mem::swap(&mut candidate.bindings, &mut existing_bindings);
+ // Move or-patterns to the end, because they can result in us
+ // creating additional candidates, so we want to test them as
+ // late as possible.
+ candidate
+ .match_pairs
+ .sort_by_key(|pair| matches!(*pair.pattern.kind, PatKind::Or { .. }));
+ debug!(simplified = ?candidate, "simplify_candidate");
+ return false; // if we were not able to simplify any, done.
+ }
+ }
+ }
+
+ /// Given `candidate` that has a single or-pattern for its match-pairs,
+ /// creates a fresh candidate for each of its input subpatterns passed via
+ /// `pats`.
+ fn create_or_subcandidates<'pat>(
+ &mut self,
+ candidate: &Candidate<'pat, 'tcx>,
+ place: PlaceBuilder<'tcx>,
+ pats: &'pat [Pat<'tcx>],
+ ) -> Vec<Candidate<'pat, 'tcx>> {
+ pats.iter()
+ .map(|pat| {
+ let mut candidate = Candidate::new(place.clone(), pat, candidate.has_guard);
+ self.simplify_candidate(&mut candidate);
+ candidate
+ })
+ .collect()
+ }
+
+ /// Tries to simplify `match_pair`, returning `Ok(())` if
+ /// successful. If successful, new match pairs and bindings will
+ /// have been pushed into the candidate. If no simplification is
+ /// possible, `Err` is returned and no changes are made to
+ /// candidate.
+ fn simplify_match_pair<'pat>(
+ &mut self,
+ match_pair: MatchPair<'pat, 'tcx>,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ ) -> Result<(), MatchPair<'pat, 'tcx>> {
+ let tcx = self.tcx;
+ match *match_pair.pattern.kind {
+ PatKind::AscribeUserType {
+ ref subpattern,
+ ascription: thir::Ascription { ref annotation, variance },
+ } => {
+ // Apply the type ascription to the value at `match_pair.place`, which is the
+ if let Ok(place_resolved) =
+ match_pair.place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ candidate.ascriptions.push(Ascription {
+ annotation: annotation.clone(),
+ source: place_resolved.into_place(self.tcx, self.typeck_results),
+ variance,
+ });
+ }
+
+ candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern));
+
+ Ok(())
+ }
+
+ PatKind::Wild => {
+ // nothing left to do
+ Ok(())
+ }
+
+ PatKind::Binding {
+ name: _,
+ mutability: _,
+ mode,
+ var,
+ ty: _,
+ ref subpattern,
+ is_primary: _,
+ } => {
+ if let Ok(place_resolved) =
+ match_pair.place.clone().try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ candidate.bindings.push(Binding {
+ span: match_pair.pattern.span,
+ source: place_resolved.into_place(self.tcx, self.typeck_results),
+ var_id: var,
+ binding_mode: mode,
+ });
+ }
+
+ if let Some(subpattern) = subpattern.as_ref() {
+ // this is the `x @ P` case; have to keep matching against `P` now
+ candidate.match_pairs.push(MatchPair::new(match_pair.place, subpattern));
+ }
+
+ Ok(())
+ }
+
+ PatKind::Constant { .. } => {
+ // FIXME normalize patterns when possible
+ Err(match_pair)
+ }
+
+ PatKind::Range(PatRange { lo, hi, end }) => {
+ let (range, bias) = match *lo.ty().kind() {
+ ty::Char => {
+ (Some(('\u{0000}' as u128, '\u{10FFFF}' as u128, Size::from_bits(32))), 0)
+ }
+ ty::Int(ity) => {
+ let size = Integer::from_int_ty(&tcx, ity).size();
+ let max = size.truncate(u128::MAX);
+ let bias = 1u128 << (size.bits() - 1);
+ (Some((0, max, size)), bias)
+ }
+ ty::Uint(uty) => {
+ let size = Integer::from_uint_ty(&tcx, uty).size();
+ let max = size.truncate(u128::MAX);
+ (Some((0, max, size)), 0)
+ }
+ _ => (None, 0),
+ };
+ if let Some((min, max, sz)) = range {
+ // We want to compare ranges numerically, but the order of the bitwise
+ // representation of signed integers does not match their numeric order. Thus,
+ // to correct the ordering, we need to shift the range of signed integers to
+ // correct the comparison. This is achieved by XORing with a bias (see
+ // pattern/_match.rs for another pertinent example of this pattern).
+ //
+ // Also, for performance, it's important to only do the second `try_to_bits` if
+ // necessary.
+ let lo = lo.try_to_bits(sz).unwrap() ^ bias;
+ if lo <= min {
+ let hi = hi.try_to_bits(sz).unwrap() ^ bias;
+ if hi > max || hi == max && end == RangeEnd::Included {
+ // Irrefutable pattern match.
+ return Ok(());
+ }
+ }
+ }
+ Err(match_pair)
+ }
+
+ PatKind::Slice { ref prefix, ref slice, ref suffix } => {
+ if prefix.is_empty() && slice.is_some() && suffix.is_empty() {
+ // irrefutable
+ self.prefix_slice_suffix(
+ &mut candidate.match_pairs,
+ &match_pair.place,
+ prefix,
+ slice.as_ref(),
+ suffix,
+ );
+ Ok(())
+ } else {
+ Err(match_pair)
+ }
+ }
+
+ PatKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
+ let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| {
+ i == variant_index || {
+ self.tcx.features().exhaustive_patterns
+ && !v
+ .uninhabited_from(
+ self.tcx,
+ substs,
+ adt_def.adt_kind(),
+ self.param_env,
+ )
+ .is_empty()
+ }
+ }) && (adt_def.did().is_local()
+ || !adt_def.is_variant_list_non_exhaustive());
+ if irrefutable {
+ let place_builder = match_pair.place.downcast(adt_def, variant_index);
+ candidate
+ .match_pairs
+ .extend(self.field_match_pairs(place_builder, subpatterns));
+ Ok(())
+ } else {
+ Err(match_pair)
+ }
+ }
+
+ PatKind::Array { ref prefix, ref slice, ref suffix } => {
+ self.prefix_slice_suffix(
+ &mut candidate.match_pairs,
+ &match_pair.place,
+ prefix,
+ slice.as_ref(),
+ suffix,
+ );
+ Ok(())
+ }
+
+ PatKind::Leaf { ref subpatterns } => {
+ // tuple struct, match subpats (if any)
+ candidate.match_pairs.extend(self.field_match_pairs(match_pair.place, subpatterns));
+ Ok(())
+ }
+
+ PatKind::Deref { ref subpattern } => {
+ let place_builder = match_pair.place.deref();
+ candidate.match_pairs.push(MatchPair::new(place_builder, subpattern));
+ Ok(())
+ }
+
+ PatKind::Or { .. } => Err(match_pair),
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/test.rs b/compiler/rustc_mir_build/src/build/matches/test.rs
new file mode 100644
index 000000000..598da80c5
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/test.rs
@@ -0,0 +1,837 @@
+// Testing candidates
+//
+// After candidates have been simplified, the only match pairs that
+// remain are those that require some sort of test. The functions here
+// identify what tests are needed, perform the tests, and then filter
+// the candidates based on the result.
+
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::matches::{Candidate, MatchPair, Test, TestKind};
+use crate::build::Builder;
+use crate::thir::pattern::compare_const_vals;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_hir::{LangItem, RangeEnd};
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty::subst::{GenericArg, Subst};
+use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::{self, adjustment::PointerCast, Ty, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+
+use std::cmp::Ordering;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Identifies what test is needed to decide if `match_pair` is applicable.
+ ///
+ /// It is a bug to call this with a not-fully-simplified pattern.
+ pub(super) fn test<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> Test<'tcx> {
+ match *match_pair.pattern.kind {
+ PatKind::Variant { adt_def, substs: _, variant_index: _, subpatterns: _ } => Test {
+ span: match_pair.pattern.span,
+ kind: TestKind::Switch {
+ adt_def,
+ variants: BitSet::new_empty(adt_def.variants().len()),
+ },
+ },
+
+ PatKind::Constant { .. } if is_switch_ty(match_pair.pattern.ty) => {
+ // For integers, we use a `SwitchInt` match, which allows
+ // us to handle more cases.
+ Test {
+ span: match_pair.pattern.span,
+ kind: TestKind::SwitchInt {
+ switch_ty: match_pair.pattern.ty,
+
+ // these maps are empty to start; cases are
+ // added below in add_cases_to_switch
+ options: Default::default(),
+ },
+ }
+ }
+
+ PatKind::Constant { value } => Test {
+ span: match_pair.pattern.span,
+ kind: TestKind::Eq { value, ty: match_pair.pattern.ty },
+ },
+
+ PatKind::Range(range) => {
+ assert_eq!(range.lo.ty(), match_pair.pattern.ty);
+ assert_eq!(range.hi.ty(), match_pair.pattern.ty);
+ Test { span: match_pair.pattern.span, kind: TestKind::Range(range) }
+ }
+
+ PatKind::Slice { ref prefix, ref slice, ref suffix } => {
+ let len = prefix.len() + suffix.len();
+ let op = if slice.is_some() { BinOp::Ge } else { BinOp::Eq };
+ Test { span: match_pair.pattern.span, kind: TestKind::Len { len: len as u64, op } }
+ }
+
+ PatKind::Or { .. } => bug!("or-patterns should have already been handled"),
+
+ PatKind::AscribeUserType { .. }
+ | PatKind::Array { .. }
+ | PatKind::Wild
+ | PatKind::Binding { .. }
+ | PatKind::Leaf { .. }
+ | PatKind::Deref { .. } => self.error_simplifyable(match_pair),
+ }
+ }
+
+ pub(super) fn add_cases_to_switch<'pat>(
+ &mut self,
+ test_place: &PlaceBuilder<'tcx>,
+ candidate: &Candidate<'pat, 'tcx>,
+ switch_ty: Ty<'tcx>,
+ options: &mut FxIndexMap<ConstantKind<'tcx>, u128>,
+ ) -> bool {
+ let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place) else {
+ return false;
+ };
+
+ match *match_pair.pattern.kind {
+ PatKind::Constant { value } => {
+ options
+ .entry(value)
+ .or_insert_with(|| value.eval_bits(self.tcx, self.param_env, switch_ty));
+ true
+ }
+ PatKind::Variant { .. } => {
+ panic!("you should have called add_variants_to_switch instead!");
+ }
+ PatKind::Range(range) => {
+ // Check that none of the switch values are in the range.
+ self.values_not_contained_in_range(range, options).unwrap_or(false)
+ }
+ PatKind::Slice { .. }
+ | PatKind::Array { .. }
+ | PatKind::Wild
+ | PatKind::Or { .. }
+ | PatKind::Binding { .. }
+ | PatKind::AscribeUserType { .. }
+ | PatKind::Leaf { .. }
+ | PatKind::Deref { .. } => {
+ // don't know how to add these patterns to a switch
+ false
+ }
+ }
+ }
+
+ pub(super) fn add_variants_to_switch<'pat>(
+ &mut self,
+ test_place: &PlaceBuilder<'tcx>,
+ candidate: &Candidate<'pat, 'tcx>,
+ variants: &mut BitSet<VariantIdx>,
+ ) -> bool {
+ let Some(match_pair) = candidate.match_pairs.iter().find(|mp| mp.place == *test_place) else {
+ return false;
+ };
+
+ match *match_pair.pattern.kind {
+ PatKind::Variant { adt_def: _, variant_index, .. } => {
+ // We have a pattern testing for variant `variant_index`
+ // set the corresponding index to true
+ variants.insert(variant_index);
+ true
+ }
+ _ => {
+ // don't know how to add these patterns to a switch
+ false
+ }
+ }
+ }
+
+ pub(super) fn perform_test(
+ &mut self,
+ match_start_span: Span,
+ scrutinee_span: Span,
+ block: BasicBlock,
+ place_builder: PlaceBuilder<'tcx>,
+ test: &Test<'tcx>,
+ make_target_blocks: impl FnOnce(&mut Self) -> Vec<BasicBlock>,
+ ) {
+ let place: Place<'tcx>;
+ if let Ok(test_place_builder) =
+ place_builder.try_upvars_resolved(self.tcx, self.typeck_results)
+ {
+ place = test_place_builder.into_place(self.tcx, self.typeck_results);
+ } else {
+ return;
+ }
+ debug!(
+ "perform_test({:?}, {:?}: {:?}, {:?})",
+ block,
+ place,
+ place.ty(&self.local_decls, self.tcx),
+ test
+ );
+
+ let source_info = self.source_info(test.span);
+ match test.kind {
+ TestKind::Switch { adt_def, ref variants } => {
+ let target_blocks = make_target_blocks(self);
+ // Variants is a BitVec of indexes into adt_def.variants.
+ let num_enum_variants = adt_def.variants().len();
+ debug_assert_eq!(target_blocks.len(), num_enum_variants + 1);
+ let otherwise_block = *target_blocks.last().unwrap();
+ let tcx = self.tcx;
+ let switch_targets = SwitchTargets::new(
+ adt_def.discriminants(tcx).filter_map(|(idx, discr)| {
+ if variants.contains(idx) {
+ debug_assert_ne!(
+ target_blocks[idx.index()],
+ otherwise_block,
+ "no canididates for tested discriminant: {:?}",
+ discr,
+ );
+ Some((discr.val, target_blocks[idx.index()]))
+ } else {
+ debug_assert_eq!(
+ target_blocks[idx.index()],
+ otherwise_block,
+ "found canididates for untested discriminant: {:?}",
+ discr,
+ );
+ None
+ }
+ }),
+ otherwise_block,
+ );
+ debug!("num_enum_variants: {}, variants: {:?}", num_enum_variants, variants);
+ let discr_ty = adt_def.repr().discr_type().to_ty(tcx);
+ let discr = self.temp(discr_ty, test.span);
+ self.cfg.push_assign(
+ block,
+ self.source_info(scrutinee_span),
+ discr,
+ Rvalue::Discriminant(place),
+ );
+ self.cfg.terminate(
+ block,
+ self.source_info(match_start_span),
+ TerminatorKind::SwitchInt {
+ discr: Operand::Move(discr),
+ switch_ty: discr_ty,
+ targets: switch_targets,
+ },
+ );
+ }
+
+ TestKind::SwitchInt { switch_ty, ref options } => {
+ let target_blocks = make_target_blocks(self);
+ let terminator = if *switch_ty.kind() == ty::Bool {
+ assert!(!options.is_empty() && options.len() <= 2);
+ let [first_bb, second_bb] = *target_blocks else {
+ bug!("`TestKind::SwitchInt` on `bool` should have two targets")
+ };
+ let (true_bb, false_bb) = match options[0] {
+ 1 => (first_bb, second_bb),
+ 0 => (second_bb, first_bb),
+ v => span_bug!(test.span, "expected boolean value but got {:?}", v),
+ };
+ TerminatorKind::if_(self.tcx, Operand::Copy(place), true_bb, false_bb)
+ } else {
+ // The switch may be inexhaustive so we have a catch all block
+ debug_assert_eq!(options.len() + 1, target_blocks.len());
+ let otherwise_block = *target_blocks.last().unwrap();
+ let switch_targets = SwitchTargets::new(
+ options.values().copied().zip(target_blocks),
+ otherwise_block,
+ );
+ TerminatorKind::SwitchInt {
+ discr: Operand::Copy(place),
+ switch_ty,
+ targets: switch_targets,
+ }
+ };
+ self.cfg.terminate(block, self.source_info(match_start_span), terminator);
+ }
+
+ TestKind::Eq { value, ty } => {
+ if !ty.is_scalar() {
+ // Use `PartialEq::eq` instead of `BinOp::Eq`
+ // (the binop can only handle primitives)
+ self.non_scalar_compare(
+ block,
+ make_target_blocks,
+ source_info,
+ value,
+ place,
+ ty,
+ );
+ } else if let [success, fail] = *make_target_blocks(self) {
+ assert_eq!(value.ty(), ty);
+ let expect = self.literal_operand(test.span, value);
+ let val = Operand::Copy(place);
+ self.compare(block, success, fail, source_info, BinOp::Eq, expect, val);
+ } else {
+ bug!("`TestKind::Eq` should have two target blocks");
+ }
+ }
+
+ TestKind::Range(PatRange { lo, hi, ref end }) => {
+ let lower_bound_success = self.cfg.start_new_block();
+ let target_blocks = make_target_blocks(self);
+
+ // Test `val` by computing `lo <= val && val <= hi`, using primitive comparisons.
+ let lo = self.literal_operand(test.span, lo);
+ let hi = self.literal_operand(test.span, hi);
+ let val = Operand::Copy(place);
+
+ let [success, fail] = *target_blocks else {
+ bug!("`TestKind::Range` should have two target blocks");
+ };
+ self.compare(
+ block,
+ lower_bound_success,
+ fail,
+ source_info,
+ BinOp::Le,
+ lo,
+ val.clone(),
+ );
+ let op = match *end {
+ RangeEnd::Included => BinOp::Le,
+ RangeEnd::Excluded => BinOp::Lt,
+ };
+ self.compare(lower_bound_success, success, fail, source_info, op, val, hi);
+ }
+
+ TestKind::Len { len, op } => {
+ let target_blocks = make_target_blocks(self);
+
+ let usize_ty = self.tcx.types.usize;
+ let actual = self.temp(usize_ty, test.span);
+
+ // actual = len(place)
+ self.cfg.push_assign(block, source_info, actual, Rvalue::Len(place));
+
+ // expected = <N>
+ let expected = self.push_usize(block, source_info, len);
+
+ let [true_bb, false_bb] = *target_blocks else {
+ bug!("`TestKind::Len` should have two target blocks");
+ };
+ // result = actual == expected OR result = actual < expected
+ // branch based on result
+ self.compare(
+ block,
+ true_bb,
+ false_bb,
+ source_info,
+ op,
+ Operand::Move(actual),
+ Operand::Move(expected),
+ );
+ }
+ }
+ }
+
+ /// Compare using the provided built-in comparison operator
+ fn compare(
+ &mut self,
+ block: BasicBlock,
+ success_block: BasicBlock,
+ fail_block: BasicBlock,
+ source_info: SourceInfo,
+ op: BinOp,
+ left: Operand<'tcx>,
+ right: Operand<'tcx>,
+ ) {
+ let bool_ty = self.tcx.types.bool;
+ let result = self.temp(bool_ty, source_info.span);
+
+ // result = op(left, right)
+ self.cfg.push_assign(
+ block,
+ source_info,
+ result,
+ Rvalue::BinaryOp(op, Box::new((left, right))),
+ );
+
+ // branch based on result
+ self.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::if_(self.tcx, Operand::Move(result), success_block, fail_block),
+ );
+ }
+
+ /// Compare two `&T` values using `<T as std::compare::PartialEq>::eq`
+ fn non_scalar_compare(
+ &mut self,
+ block: BasicBlock,
+ make_target_blocks: impl FnOnce(&mut Self) -> Vec<BasicBlock>,
+ source_info: SourceInfo,
+ value: ConstantKind<'tcx>,
+ place: Place<'tcx>,
+ mut ty: Ty<'tcx>,
+ ) {
+ let mut expect = self.literal_operand(source_info.span, value);
+ let mut val = Operand::Copy(place);
+
+ // If we're using `b"..."` as a pattern, we need to insert an
+ // unsizing coercion, as the byte string has the type `&[u8; N]`.
+ //
+ // We want to do this even when the scrutinee is a reference to an
+ // array, so we can call `<[u8]>::eq` rather than having to find an
+ // `<[u8; N]>::eq`.
+ let unsize = |ty: Ty<'tcx>| match ty.kind() {
+ ty::Ref(region, rty, _) => match rty.kind() {
+ ty::Array(inner_ty, n) => Some((region, inner_ty, n)),
+ _ => None,
+ },
+ _ => None,
+ };
+ let opt_ref_ty = unsize(ty);
+ let opt_ref_test_ty = unsize(value.ty());
+ match (opt_ref_ty, opt_ref_test_ty) {
+ // nothing to do, neither is an array
+ (None, None) => {}
+ (Some((region, elem_ty, _)), _) | (None, Some((region, elem_ty, _))) => {
+ let tcx = self.tcx;
+ // make both a slice
+ ty = tcx.mk_imm_ref(*region, tcx.mk_slice(*elem_ty));
+ if opt_ref_ty.is_some() {
+ let temp = self.temp(ty, source_info.span);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ temp,
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), val, ty),
+ );
+ val = Operand::Move(temp);
+ }
+ if opt_ref_test_ty.is_some() {
+ let slice = self.temp(ty, source_info.span);
+ self.cfg.push_assign(
+ block,
+ source_info,
+ slice,
+ Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), expect, ty),
+ );
+ expect = Operand::Move(slice);
+ }
+ }
+ }
+
+ let ty::Ref(_, deref_ty, _) = *ty.kind() else {
+ bug!("non_scalar_compare called on non-reference type: {}", ty);
+ };
+
+ let eq_def_id = self.tcx.require_lang_item(LangItem::PartialEq, None);
+ let method = trait_method(self.tcx, eq_def_id, sym::eq, deref_ty, &[deref_ty.into()]);
+
+ let bool_ty = self.tcx.types.bool;
+ let eq_result = self.temp(bool_ty, source_info.span);
+ let eq_block = self.cfg.start_new_block();
+ self.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Call {
+ func: Operand::Constant(Box::new(Constant {
+ span: source_info.span,
+
+ // FIXME(#54571): This constant comes from user input (a
+ // constant in a pattern). Are there forms where users can add
+ // type annotations here? For example, an associated constant?
+ // Need to experiment.
+ user_ty: None,
+
+ literal: method,
+ })),
+ args: vec![val, expect],
+ destination: eq_result,
+ target: Some(eq_block),
+ cleanup: None,
+ from_hir_call: false,
+ fn_span: source_info.span,
+ },
+ );
+ self.diverge_from(block);
+
+ let [success_block, fail_block] = *make_target_blocks(self) else {
+ bug!("`TestKind::Eq` should have two target blocks")
+ };
+ // check the result
+ self.cfg.terminate(
+ eq_block,
+ source_info,
+ TerminatorKind::if_(self.tcx, Operand::Move(eq_result), success_block, fail_block),
+ );
+ }
+
+ /// Given that we are performing `test` against `test_place`, this job
+ /// sorts out what the status of `candidate` will be after the test. See
+ /// `test_candidates` for the usage of this function. The returned index is
+ /// the index that this candidate should be placed in the
+ /// `target_candidates` vec. The candidate may be modified to update its
+ /// `match_pairs`.
+ ///
+ /// So, for example, if this candidate is `x @ Some(P0)` and the `Test` is
+ /// a variant test, then we would modify the candidate to be `(x as
+ /// Option).0 @ P0` and return the index corresponding to the variant
+ /// `Some`.
+ ///
+ /// However, in some cases, the test may just not be relevant to candidate.
+ /// For example, suppose we are testing whether `foo.x == 22`, but in one
+ /// match arm we have `Foo { x: _, ... }`... in that case, the test for
+ /// what value `x` has has no particular relevance to this candidate. In
+ /// such cases, this function just returns None without doing anything.
+ /// This is used by the overall `match_candidates` algorithm to structure
+ /// the match as a whole. See `match_candidates` for more details.
+ ///
+ /// FIXME(#29623). In some cases, we have some tricky choices to make. for
+ /// example, if we are testing that `x == 22`, but the candidate is `x @
+ /// 13..55`, what should we do? In the event that the test is true, we know
+ /// that the candidate applies, but in the event of false, we don't know
+ /// that it *doesn't* apply. For now, we return false, indicate that the
+ /// test does not apply to this candidate, but it might be we can get
+ /// tighter match code if we do something a bit different.
+ pub(super) fn sort_candidate<'pat>(
+ &mut self,
+ test_place: &PlaceBuilder<'tcx>,
+ test: &Test<'tcx>,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ ) -> Option<usize> {
+ // Find the match_pair for this place (if any). At present,
+ // afaik, there can be at most one. (In the future, if we
+ // adopted a more general `@` operator, there might be more
+ // than one, but it'd be very unusual to have two sides that
+ // both require tests; you'd expect one side to be simplified
+ // away.)
+ let (match_pair_index, match_pair) =
+ candidate.match_pairs.iter().enumerate().find(|&(_, mp)| mp.place == *test_place)?;
+
+ match (&test.kind, &*match_pair.pattern.kind) {
+ // If we are performing a variant switch, then this
+ // informs variant patterns, but nothing else.
+ (
+ &TestKind::Switch { adt_def: tested_adt_def, .. },
+ &PatKind::Variant { adt_def, variant_index, ref subpatterns, .. },
+ ) => {
+ assert_eq!(adt_def, tested_adt_def);
+ self.candidate_after_variant_switch(
+ match_pair_index,
+ adt_def,
+ variant_index,
+ subpatterns,
+ candidate,
+ );
+ Some(variant_index.as_usize())
+ }
+
+ (&TestKind::Switch { .. }, _) => None,
+
+ // If we are performing a switch over integers, then this informs integer
+ // equality, but nothing else.
+ //
+ // FIXME(#29623) we could use PatKind::Range to rule
+ // things out here, in some cases.
+ (
+ &TestKind::SwitchInt { switch_ty: _, ref options },
+ &PatKind::Constant { ref value },
+ ) if is_switch_ty(match_pair.pattern.ty) => {
+ let index = options.get_index_of(value).unwrap();
+ self.candidate_without_match_pair(match_pair_index, candidate);
+ Some(index)
+ }
+
+ (&TestKind::SwitchInt { switch_ty: _, ref options }, &PatKind::Range(range)) => {
+ let not_contained =
+ self.values_not_contained_in_range(range, options).unwrap_or(false);
+
+ if not_contained {
+ // No switch values are contained in the pattern range,
+ // so the pattern can be matched only if this test fails.
+ let otherwise = options.len();
+ Some(otherwise)
+ } else {
+ None
+ }
+ }
+
+ (&TestKind::SwitchInt { .. }, _) => None,
+
+ (
+ &TestKind::Len { len: test_len, op: BinOp::Eq },
+ &PatKind::Slice { ref prefix, ref slice, ref suffix },
+ ) => {
+ let pat_len = (prefix.len() + suffix.len()) as u64;
+ match (test_len.cmp(&pat_len), slice) {
+ (Ordering::Equal, &None) => {
+ // on true, min_len = len = $actual_length,
+ // on false, len != $actual_length
+ self.candidate_after_slice_test(
+ match_pair_index,
+ candidate,
+ prefix,
+ slice.as_ref(),
+ suffix,
+ );
+ Some(0)
+ }
+ (Ordering::Less, _) => {
+ // test_len < pat_len. If $actual_len = test_len,
+ // then $actual_len < pat_len and we don't have
+ // enough elements.
+ Some(1)
+ }
+ (Ordering::Equal | Ordering::Greater, &Some(_)) => {
+ // This can match both if $actual_len = test_len >= pat_len,
+ // and if $actual_len > test_len. We can't advance.
+ None
+ }
+ (Ordering::Greater, &None) => {
+ // test_len != pat_len, so if $actual_len = test_len, then
+ // $actual_len != pat_len.
+ Some(1)
+ }
+ }
+ }
+
+ (
+ &TestKind::Len { len: test_len, op: BinOp::Ge },
+ &PatKind::Slice { ref prefix, ref slice, ref suffix },
+ ) => {
+ // the test is `$actual_len >= test_len`
+ let pat_len = (prefix.len() + suffix.len()) as u64;
+ match (test_len.cmp(&pat_len), slice) {
+ (Ordering::Equal, &Some(_)) => {
+ // $actual_len >= test_len = pat_len,
+ // so we can match.
+ self.candidate_after_slice_test(
+ match_pair_index,
+ candidate,
+ prefix,
+ slice.as_ref(),
+ suffix,
+ );
+ Some(0)
+ }
+ (Ordering::Less, _) | (Ordering::Equal, &None) => {
+ // test_len <= pat_len. If $actual_len < test_len,
+ // then it is also < pat_len, so the test passing is
+ // necessary (but insufficient).
+ Some(0)
+ }
+ (Ordering::Greater, &None) => {
+ // test_len > pat_len. If $actual_len >= test_len > pat_len,
+ // then we know we won't have a match.
+ Some(1)
+ }
+ (Ordering::Greater, &Some(_)) => {
+ // test_len < pat_len, and is therefore less
+ // strict. This can still go both ways.
+ None
+ }
+ }
+ }
+
+ (&TestKind::Range(test), &PatKind::Range(pat)) => {
+ use std::cmp::Ordering::*;
+
+ if test == pat {
+ self.candidate_without_match_pair(match_pair_index, candidate);
+ return Some(0);
+ }
+
+ // For performance, it's important to only do the second
+ // `compare_const_vals` if necessary.
+ let no_overlap = if matches!(
+ (compare_const_vals(self.tcx, test.hi, pat.lo, self.param_env)?, test.end),
+ (Less, _) | (Equal, RangeEnd::Excluded) // test < pat
+ ) || matches!(
+ (compare_const_vals(self.tcx, test.lo, pat.hi, self.param_env)?, pat.end),
+ (Greater, _) | (Equal, RangeEnd::Excluded) // test > pat
+ ) {
+ Some(1)
+ } else {
+ None
+ };
+
+ // If the testing range does not overlap with pattern range,
+ // the pattern can be matched only if this test fails.
+ no_overlap
+ }
+
+ (&TestKind::Range(range), &PatKind::Constant { value }) => {
+ if let Some(false) = self.const_range_contains(range, value) {
+ // `value` is not contained in the testing range,
+ // so `value` can be matched only if this test fails.
+ Some(1)
+ } else {
+ None
+ }
+ }
+
+ (&TestKind::Range { .. }, _) => None,
+
+ (&TestKind::Eq { .. } | &TestKind::Len { .. }, _) => {
+ // The call to `self.test(&match_pair)` below is not actually used to generate any
+ // MIR. Instead, we just want to compare with `test` (the parameter of the method)
+ // to see if it is the same.
+ //
+ // However, at this point we can still encounter or-patterns that were extracted
+ // from previous calls to `sort_candidate`, so we need to manually address that
+ // case to avoid panicking in `self.test()`.
+ if let PatKind::Or { .. } = &*match_pair.pattern.kind {
+ return None;
+ }
+
+ // These are all binary tests.
+ //
+ // FIXME(#29623) we can be more clever here
+ let pattern_test = self.test(&match_pair);
+ if pattern_test.kind == test.kind {
+ self.candidate_without_match_pair(match_pair_index, candidate);
+ Some(0)
+ } else {
+ None
+ }
+ }
+ }
+ }
+
+ fn candidate_without_match_pair(
+ &mut self,
+ match_pair_index: usize,
+ candidate: &mut Candidate<'_, 'tcx>,
+ ) {
+ candidate.match_pairs.remove(match_pair_index);
+ }
+
+ fn candidate_after_slice_test<'pat>(
+ &mut self,
+ match_pair_index: usize,
+ candidate: &mut Candidate<'pat, 'tcx>,
+ prefix: &'pat [Pat<'tcx>],
+ opt_slice: Option<&'pat Pat<'tcx>>,
+ suffix: &'pat [Pat<'tcx>],
+ ) {
+ let removed_place = candidate.match_pairs.remove(match_pair_index).place;
+ self.prefix_slice_suffix(
+ &mut candidate.match_pairs,
+ &removed_place,
+ prefix,
+ opt_slice,
+ suffix,
+ );
+ }
+
+ fn candidate_after_variant_switch<'pat>(
+ &mut self,
+ match_pair_index: usize,
+ adt_def: ty::AdtDef<'tcx>,
+ variant_index: VariantIdx,
+ subpatterns: &'pat [FieldPat<'tcx>],
+ candidate: &mut Candidate<'pat, 'tcx>,
+ ) {
+ let match_pair = candidate.match_pairs.remove(match_pair_index);
+
+ // So, if we have a match-pattern like `x @ Enum::Variant(P1, P2)`,
+ // we want to create a set of derived match-patterns like
+ // `(x as Variant).0 @ P1` and `(x as Variant).1 @ P1`.
+ let elem =
+ ProjectionElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index);
+ let downcast_place = match_pair.place.project(elem); // `(x as Variant)`
+ let consequent_match_pairs = subpatterns.iter().map(|subpattern| {
+ // e.g., `(x as Variant).0`
+ let place = downcast_place.clone().field(subpattern.field, subpattern.pattern.ty);
+ // e.g., `(x as Variant).0 @ P1`
+ MatchPair::new(place, &subpattern.pattern)
+ });
+
+ candidate.match_pairs.extend(consequent_match_pairs);
+ }
+
+ fn error_simplifyable<'pat>(&mut self, match_pair: &MatchPair<'pat, 'tcx>) -> ! {
+ span_bug!(match_pair.pattern.span, "simplifyable pattern found: {:?}", match_pair.pattern)
+ }
+
+ fn const_range_contains(
+ &self,
+ range: PatRange<'tcx>,
+ value: ConstantKind<'tcx>,
+ ) -> Option<bool> {
+ use std::cmp::Ordering::*;
+
+ // For performance, it's important to only do the second
+ // `compare_const_vals` if necessary.
+ Some(
+ matches!(compare_const_vals(self.tcx, range.lo, value, self.param_env)?, Less | Equal)
+ && matches!(
+ (compare_const_vals(self.tcx, value, range.hi, self.param_env)?, range.end),
+ (Less, _) | (Equal, RangeEnd::Included)
+ ),
+ )
+ }
+
+ fn values_not_contained_in_range(
+ &self,
+ range: PatRange<'tcx>,
+ options: &FxIndexMap<ConstantKind<'tcx>, u128>,
+ ) -> Option<bool> {
+ for &val in options.keys() {
+ if self.const_range_contains(range, val)? {
+ return Some(false);
+ }
+ }
+
+ Some(true)
+ }
+}
+
+impl Test<'_> {
+ pub(super) fn targets(&self) -> usize {
+ match self.kind {
+ TestKind::Eq { .. } | TestKind::Range(_) | TestKind::Len { .. } => 2,
+ TestKind::Switch { adt_def, .. } => {
+ // While the switch that we generate doesn't test for all
+ // variants, we have a target for each variant and the
+ // otherwise case, and we make sure that all of the cases not
+ // specified have the same block.
+ adt_def.variants().len() + 1
+ }
+ TestKind::SwitchInt { switch_ty, ref options, .. } => {
+ if switch_ty.is_bool() {
+ // `bool` is special cased in `perform_test` to always
+ // branch to two blocks.
+ 2
+ } else {
+ options.len() + 1
+ }
+ }
+ }
+ }
+}
+
+fn is_switch_ty(ty: Ty<'_>) -> bool {
+ ty.is_integral() || ty.is_char() || ty.is_bool()
+}
+
+fn trait_method<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ method_name: Symbol,
+ self_ty: Ty<'tcx>,
+ params: &[GenericArg<'tcx>],
+) -> ConstantKind<'tcx> {
+ let substs = tcx.mk_substs_trait(self_ty, params);
+
+ // The unhygienic comparison here is acceptable because this is only
+ // used on known traits.
+ let item = tcx
+ .associated_items(trait_def_id)
+ .filter_by_name_unhygienic(method_name)
+ .find(|item| item.kind == ty::AssocKind::Fn)
+ .expect("trait method not found");
+
+ let method_ty = tcx.bound_type_of(item.def_id);
+ let method_ty = method_ty.subst(tcx, substs);
+
+ ConstantKind::zero_sized(method_ty)
+}
diff --git a/compiler/rustc_mir_build/src/build/matches/util.rs b/compiler/rustc_mir_build/src/build/matches/util.rs
new file mode 100644
index 000000000..9a1e98d3b
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/matches/util.rs
@@ -0,0 +1,109 @@
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::matches::MatchPair;
+use crate::build::Builder;
+use rustc_middle::mir::*;
+use rustc_middle::thir::*;
+use rustc_middle::ty;
+use smallvec::SmallVec;
+use std::convert::TryInto;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub(crate) fn field_match_pairs<'pat>(
+ &mut self,
+ place: PlaceBuilder<'tcx>,
+ subpatterns: &'pat [FieldPat<'tcx>],
+ ) -> Vec<MatchPair<'pat, 'tcx>> {
+ subpatterns
+ .iter()
+ .map(|fieldpat| {
+ let place = place.clone().field(fieldpat.field, fieldpat.pattern.ty);
+ MatchPair::new(place, &fieldpat.pattern)
+ })
+ .collect()
+ }
+
+ pub(crate) fn prefix_slice_suffix<'pat>(
+ &mut self,
+ match_pairs: &mut SmallVec<[MatchPair<'pat, 'tcx>; 1]>,
+ place: &PlaceBuilder<'tcx>,
+ prefix: &'pat [Pat<'tcx>],
+ opt_slice: Option<&'pat Pat<'tcx>>,
+ suffix: &'pat [Pat<'tcx>],
+ ) {
+ let tcx = self.tcx;
+ let (min_length, exact_size) = if let Ok(place_resolved) =
+ place.clone().try_upvars_resolved(tcx, self.typeck_results)
+ {
+ match place_resolved
+ .into_place(tcx, self.typeck_results)
+ .ty(&self.local_decls, tcx)
+ .ty
+ .kind()
+ {
+ ty::Array(_, length) => (length.eval_usize(tcx, self.param_env), true),
+ _ => ((prefix.len() + suffix.len()).try_into().unwrap(), false),
+ }
+ } else {
+ ((prefix.len() + suffix.len()).try_into().unwrap(), false)
+ };
+
+ match_pairs.extend(prefix.iter().enumerate().map(|(idx, subpattern)| {
+ let elem =
+ ProjectionElem::ConstantIndex { offset: idx as u64, min_length, from_end: false };
+ let place = place.clone().project(elem);
+ MatchPair::new(place, subpattern)
+ }));
+
+ if let Some(subslice_pat) = opt_slice {
+ let suffix_len = suffix.len() as u64;
+ let subslice = place.clone().project(ProjectionElem::Subslice {
+ from: prefix.len() as u64,
+ to: if exact_size { min_length - suffix_len } else { suffix_len },
+ from_end: !exact_size,
+ });
+ match_pairs.push(MatchPair::new(subslice, subslice_pat));
+ }
+
+ match_pairs.extend(suffix.iter().rev().enumerate().map(|(idx, subpattern)| {
+ let end_offset = (idx + 1) as u64;
+ let elem = ProjectionElem::ConstantIndex {
+ offset: if exact_size { min_length - end_offset } else { end_offset },
+ min_length,
+ from_end: !exact_size,
+ };
+ let place = place.clone().project(elem);
+ MatchPair::new(place, subpattern)
+ }));
+ }
+
+ /// Creates a false edge to `imaginary_target` and a real edge to
+ /// real_target. If `imaginary_target` is none, or is the same as the real
+ /// target, a Goto is generated instead to simplify the generated MIR.
+ pub(crate) fn false_edges(
+ &mut self,
+ from_block: BasicBlock,
+ real_target: BasicBlock,
+ imaginary_target: Option<BasicBlock>,
+ source_info: SourceInfo,
+ ) {
+ match imaginary_target {
+ Some(target) if target != real_target => {
+ self.cfg.terminate(
+ from_block,
+ source_info,
+ TerminatorKind::FalseEdge { real_target, imaginary_target: target },
+ );
+ }
+ _ => self.cfg.goto(from_block, source_info, real_target),
+ }
+ }
+}
+
+impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
+ pub(crate) fn new(
+ place: PlaceBuilder<'tcx>,
+ pattern: &'pat Pat<'tcx>,
+ ) -> MatchPair<'pat, 'tcx> {
+ MatchPair { place, pattern }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/misc.rs b/compiler/rustc_mir_build/src/build/misc.rs
new file mode 100644
index 000000000..86f466ff7
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/misc.rs
@@ -0,0 +1,75 @@
+//! Miscellaneous builder routines that are not specific to building any particular
+//! kind of thing.
+
+use crate::build::Builder;
+
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::infer::InferCtxtExt;
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ /// Adds a new temporary value of type `ty` storing the result of
+ /// evaluating `expr`.
+ ///
+ /// N.B., **No cleanup is scheduled for this temporary.** You should
+ /// call `schedule_drop` once the temporary is initialized.
+ pub(crate) fn temp(&mut self, ty: Ty<'tcx>, span: Span) -> Place<'tcx> {
+ // Mark this local as internal to avoid temporaries with types not present in the
+ // user's code resulting in ICEs from the generator transform.
+ let temp = self.local_decls.push(LocalDecl::new(ty, span).internal());
+ let place = Place::from(temp);
+ debug!("temp: created temp {:?} with type {:?}", place, self.local_decls[temp].ty);
+ place
+ }
+
+ /// Convenience function for creating a literal operand, one
+ /// without any user type annotation.
+ pub(crate) fn literal_operand(
+ &mut self,
+ span: Span,
+ literal: ConstantKind<'tcx>,
+ ) -> Operand<'tcx> {
+ let constant = Box::new(Constant { span, user_ty: None, literal });
+ Operand::Constant(constant)
+ }
+
+ // Returns a zero literal operand for the appropriate type, works for
+ // bool, char and integers.
+ pub(crate) fn zero_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
+ let literal = ConstantKind::from_bits(self.tcx, 0, ty::ParamEnv::empty().and(ty));
+
+ self.literal_operand(span, literal)
+ }
+
+ pub(crate) fn push_usize(
+ &mut self,
+ block: BasicBlock,
+ source_info: SourceInfo,
+ value: u64,
+ ) -> Place<'tcx> {
+ let usize_ty = self.tcx.types.usize;
+ let temp = self.temp(usize_ty, source_info.span);
+ self.cfg.push_assign_constant(
+ block,
+ source_info,
+ temp,
+ Constant {
+ span: source_info.span,
+ user_ty: None,
+ literal: ConstantKind::from_usize(self.tcx, value),
+ },
+ );
+ temp
+ }
+
+ pub(crate) fn consume_by_copy_or_move(&self, place: Place<'tcx>) -> Operand<'tcx> {
+ let tcx = self.tcx;
+ let ty = place.ty(&self.local_decls, tcx).ty;
+ if !self.infcx.type_is_copy_modulo_regions(self.param_env, ty, DUMMY_SP) {
+ Operand::Move(place)
+ } else {
+ Operand::Copy(place)
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/build/mod.rs b/compiler/rustc_mir_build/src/build/mod.rs
new file mode 100644
index 000000000..12b8ceede
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/mod.rs
@@ -0,0 +1,1171 @@
+use crate::build;
+pub(crate) use crate::build::expr::as_constant::lit_to_mir_constant;
+use crate::build::expr::as_place::PlaceBuilder;
+use crate::build::scope::DropKind;
+use crate::thir::pattern::pat_from_hir;
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::Float;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{GeneratorKind, Node};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
+use rustc_middle::middle::region;
+use rustc_middle::mir::interpret::ConstValue;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::*;
+use rustc_middle::thir::{BindingMode, Expr, ExprId, LintLevel, LocalVarId, PatKind, Thir};
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable, TypeckResults};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_span::Symbol;
+use rustc_target::spec::abi::Abi;
+
+use super::lints;
+
+pub(crate) fn mir_built<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx rustc_data_structures::steal::Steal<Body<'tcx>> {
+ if let Some(def) = def.try_upgrade(tcx) {
+ return tcx.mir_built(def);
+ }
+
+ let mut body = mir_build(tcx, def);
+ if def.const_param_did.is_some() {
+ assert!(matches!(body.source.instance, ty::InstanceDef::Item(_)));
+ body.source = MirSource::from_instance(ty::InstanceDef::Item(def.to_global()));
+ }
+
+ tcx.alloc_steal_mir(body)
+}
+
+/// Construct the MIR for a given `DefId`.
+fn mir_build(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> Body<'_> {
+ let id = tcx.hir().local_def_id_to_hir_id(def.did);
+ let body_owner_kind = tcx.hir().body_owner_kind(def.did);
+ let typeck_results = tcx.typeck_opt_const_arg(def);
+
+ // Ensure unsafeck and abstract const building is ran before we steal the THIR.
+ // We can't use `ensure()` for `thir_abstract_const` as it doesn't compute the query
+ // if inputs are green. This can cause ICEs when calling `thir_abstract_const` after
+ // THIR has been stolen if we haven't computed this query yet.
+ match def {
+ ty::WithOptConstParam { did, const_param_did: Some(const_param_did) } => {
+ tcx.ensure().thir_check_unsafety_for_const_arg((did, const_param_did));
+ drop(tcx.thir_abstract_const_of_const_arg((did, const_param_did)));
+ }
+ ty::WithOptConstParam { did, const_param_did: None } => {
+ tcx.ensure().thir_check_unsafety(did);
+ drop(tcx.thir_abstract_const(did));
+ }
+ }
+
+ // Figure out what primary body this item has.
+ let (body_id, return_ty_span, span_with_body) = match tcx.hir().get(id) {
+ Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { fn_decl, body, .. }),
+ ..
+ }) => (*body, fn_decl.output.span(), None),
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Fn(hir::FnSig { decl, .. }, _, body_id),
+ span,
+ ..
+ })
+ | Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(hir::FnSig { decl, .. }, body_id),
+ span,
+ ..
+ })
+ | Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(hir::FnSig { decl, .. }, hir::TraitFn::Provided(body_id)),
+ span,
+ ..
+ }) => {
+ // Use the `Span` of the `Item/ImplItem/TraitItem` as the body span,
+ // since the def span of a function does not include the body
+ (*body_id, decl.output.span(), Some(*span))
+ }
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Static(ty, _, body_id) | hir::ItemKind::Const(ty, body_id),
+ ..
+ })
+ | Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(ty, body_id), .. })
+ | Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Const(ty, Some(body_id)),
+ ..
+ }) => (*body_id, ty.span, None),
+ Node::AnonConst(hir::AnonConst { body, hir_id, .. }) => {
+ (*body, tcx.hir().span(*hir_id), None)
+ }
+
+ _ => span_bug!(tcx.hir().span(id), "can't build MIR for {:?}", def.did),
+ };
+
+ // If we don't have a specialized span for the body, just use the
+ // normal def span.
+ let span_with_body = span_with_body.unwrap_or_else(|| tcx.hir().span(id));
+
+ tcx.infer_ctxt().enter(|infcx| {
+ let body = if let Some(error_reported) = typeck_results.tainted_by_errors {
+ build::construct_error(&infcx, def, id, body_id, body_owner_kind, error_reported)
+ } else if body_owner_kind.is_fn_or_closure() {
+ // fetch the fully liberated fn signature (that is, all bound
+ // types/lifetimes replaced)
+ let fn_sig = typeck_results.liberated_fn_sigs()[id];
+ let fn_def_id = tcx.hir().local_def_id(id);
+
+ let safety = match fn_sig.unsafety {
+ hir::Unsafety::Normal => Safety::Safe,
+ hir::Unsafety::Unsafe => Safety::FnUnsafe,
+ };
+
+ let body = tcx.hir().body(body_id);
+ let (thir, expr) = tcx
+ .thir_body(def)
+ .unwrap_or_else(|_| (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0)));
+ // We ran all queries that depended on THIR at the beginning
+ // of `mir_build`, so now we can steal it
+ let thir = thir.steal();
+ let ty = tcx.type_of(fn_def_id);
+ let mut abi = fn_sig.abi;
+ let implicit_argument = match ty.kind() {
+ ty::Closure(..) => {
+ // HACK(eddyb) Avoid having RustCall on closures,
+ // as it adds unnecessary (and wrong) auto-tupling.
+ abi = Abi::Rust;
+ vec![ArgInfo(liberated_closure_env_ty(tcx, id, body_id), None, None, None)]
+ }
+ ty::Generator(..) => {
+ let gen_ty = tcx.typeck_body(body_id).node_type(id);
+
+ // The resume argument may be missing, in that case we need to provide it here.
+ // It will always be `()` in this case.
+ if body.params.is_empty() {
+ vec![
+ ArgInfo(gen_ty, None, None, None),
+ ArgInfo(tcx.mk_unit(), None, None, None),
+ ]
+ } else {
+ vec![ArgInfo(gen_ty, None, None, None)]
+ }
+ }
+ _ => vec![],
+ };
+
+ let explicit_arguments = body.params.iter().enumerate().map(|(index, arg)| {
+ let owner_id = tcx.hir().body_owner(body_id);
+ let opt_ty_info;
+ let self_arg;
+ if let Some(ref fn_decl) = tcx.hir().fn_decl_by_hir_id(owner_id) {
+ opt_ty_info = fn_decl
+ .inputs
+ .get(index)
+ // Make sure that inferred closure args have no type span
+ .and_then(|ty| if arg.pat.span != ty.span { Some(ty.span) } else { None });
+ self_arg = if index == 0 && fn_decl.implicit_self.has_implicit_self() {
+ match fn_decl.implicit_self {
+ hir::ImplicitSelfKind::Imm => Some(ImplicitSelfKind::Imm),
+ hir::ImplicitSelfKind::Mut => Some(ImplicitSelfKind::Mut),
+ hir::ImplicitSelfKind::ImmRef => Some(ImplicitSelfKind::ImmRef),
+ hir::ImplicitSelfKind::MutRef => Some(ImplicitSelfKind::MutRef),
+ _ => None,
+ }
+ } else {
+ None
+ };
+ } else {
+ opt_ty_info = None;
+ self_arg = None;
+ }
+
+ // C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
+ // (as it's created inside the body itself, not passed in from outside).
+ let ty = if fn_sig.c_variadic && index == fn_sig.inputs().len() {
+ let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(arg.span));
+
+ tcx.bound_type_of(va_list_did).subst(tcx, &[tcx.lifetimes.re_erased.into()])
+ } else {
+ fn_sig.inputs()[index]
+ };
+
+ ArgInfo(ty, opt_ty_info, Some(&arg), self_arg)
+ });
+
+ let arguments = implicit_argument.into_iter().chain(explicit_arguments);
+
+ let (yield_ty, return_ty) = if body.generator_kind.is_some() {
+ let gen_ty = tcx.typeck_body(body_id).node_type(id);
+ let gen_sig = match gen_ty.kind() {
+ ty::Generator(_, gen_substs, ..) => gen_substs.as_generator().sig(),
+ _ => span_bug!(tcx.hir().span(id), "generator w/o generator type: {:?}", ty),
+ };
+ (Some(gen_sig.yield_ty), gen_sig.return_ty)
+ } else {
+ (None, fn_sig.output())
+ };
+
+ let mut mir = build::construct_fn(
+ &thir,
+ &infcx,
+ def,
+ id,
+ arguments,
+ safety,
+ abi,
+ return_ty,
+ return_ty_span,
+ body,
+ expr,
+ span_with_body,
+ );
+ if yield_ty.is_some() {
+ mir.generator.as_mut().unwrap().yield_ty = yield_ty;
+ }
+ mir
+ } else {
+ // Get the revealed type of this const. This is *not* the adjusted
+ // type of its body, which may be a subtype of this type. For
+ // example:
+ //
+ // fn foo(_: &()) {}
+ // static X: fn(&'static ()) = foo;
+ //
+ // The adjusted type of the body of X is `for<'a> fn(&'a ())` which
+ // is not the same as the type of X. We need the type of the return
+ // place to be the type of the constant because NLL typeck will
+ // equate them.
+
+ let return_ty = typeck_results.node_type(id);
+
+ let (thir, expr) = tcx
+ .thir_body(def)
+ .unwrap_or_else(|_| (tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0)));
+ // We ran all queries that depended on THIR at the beginning
+ // of `mir_build`, so now we can steal it
+ let thir = thir.steal();
+
+ build::construct_const(&thir, &infcx, expr, def, id, return_ty, return_ty_span)
+ };
+
+ lints::check(tcx, &body);
+
+ // The borrow checker will replace all the regions here with its own
+ // inference variables. There's no point having non-erased regions here.
+ // The exception is `body.user_type_annotations`, which is used unmodified
+ // by borrow checking.
+ debug_assert!(
+ !(body.local_decls.has_free_regions()
+ || body.basic_blocks().has_free_regions()
+ || body.var_debug_info.has_free_regions()
+ || body.yield_ty().has_free_regions()),
+ "Unexpected free regions in MIR: {:?}",
+ body,
+ );
+
+ body
+ })
+}
+
+///////////////////////////////////////////////////////////////////////////
+// BuildMir -- walks a crate, looking for fn items and methods to build MIR from
+
+fn liberated_closure_env_ty(
+ tcx: TyCtxt<'_>,
+ closure_expr_id: hir::HirId,
+ body_id: hir::BodyId,
+) -> Ty<'_> {
+ let closure_ty = tcx.typeck_body(body_id).node_type(closure_expr_id);
+
+ let ty::Closure(closure_def_id, closure_substs) = *closure_ty.kind() else {
+ bug!("closure expr does not have closure type: {:?}", closure_ty);
+ };
+
+ let bound_vars =
+ tcx.mk_bound_variable_kinds(std::iter::once(ty::BoundVariableKind::Region(ty::BrEnv)));
+ let br =
+ ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind: ty::BrEnv };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let closure_env_ty = tcx.closure_env_ty(closure_def_id, closure_substs, env_region).unwrap();
+ tcx.erase_late_bound_regions(ty::Binder::bind_with_vars(closure_env_ty, bound_vars))
+}
+
+#[derive(Debug, PartialEq, Eq)]
+enum BlockFrame {
+ /// Evaluation is currently within a statement.
+ ///
+ /// Examples include:
+ /// 1. `EXPR;`
+ /// 2. `let _ = EXPR;`
+ /// 3. `let x = EXPR;`
+ Statement {
+ /// If true, then statement discards result from evaluating
+ /// the expression (such as examples 1 and 2 above).
+ ignores_expr_result: bool,
+ },
+
+ /// Evaluation is currently within the tail expression of a block.
+ ///
+ /// Example: `{ STMT_1; STMT_2; EXPR }`
+ TailExpr {
+ /// If true, then the surrounding context of the block ignores
+ /// the result of evaluating the block's tail expression.
+ ///
+ /// Example: `let _ = { STMT_1; EXPR };`
+ tail_result_is_ignored: bool,
+
+ /// `Span` of the tail expression.
+ span: Span,
+ },
+
+ /// Generic mark meaning that the block occurred as a subexpression
+ /// where the result might be used.
+ ///
+ /// Examples: `foo(EXPR)`, `match EXPR { ... }`
+ SubExpr,
+}
+
+impl BlockFrame {
+ fn is_tail_expr(&self) -> bool {
+ match *self {
+ BlockFrame::TailExpr { .. } => true,
+
+ BlockFrame::Statement { .. } | BlockFrame::SubExpr => false,
+ }
+ }
+ fn is_statement(&self) -> bool {
+ match *self {
+ BlockFrame::Statement { .. } => true,
+
+ BlockFrame::TailExpr { .. } | BlockFrame::SubExpr => false,
+ }
+ }
+}
+
+#[derive(Debug)]
+struct BlockContext(Vec<BlockFrame>);
+
+struct Builder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ typeck_results: &'tcx TypeckResults<'tcx>,
+ region_scope_tree: &'tcx region::ScopeTree,
+ param_env: ty::ParamEnv<'tcx>,
+
+ thir: &'a Thir<'tcx>,
+ cfg: CFG<'tcx>,
+
+ def_id: DefId,
+ hir_id: hir::HirId,
+ parent_module: DefId,
+ check_overflow: bool,
+ fn_span: Span,
+ arg_count: usize,
+ generator_kind: Option<GeneratorKind>,
+
+ /// The current set of scopes, updated as we traverse;
+ /// see the `scope` module for more details.
+ scopes: scope::Scopes<'tcx>,
+
+ /// The block-context: each time we build the code within an thir::Block,
+ /// we push a frame here tracking whether we are building a statement or
+ /// if we are pushing the tail expression of the block. This is used to
+ /// embed information in generated temps about whether they were created
+ /// for a block tail expression or not.
+ ///
+ /// It would be great if we could fold this into `self.scopes`
+ /// somehow, but right now I think that is very tightly tied to
+ /// the code generation in ways that we cannot (or should not)
+ /// start just throwing new entries onto that vector in order to
+ /// distinguish the context of EXPR1 from the context of EXPR2 in
+ /// `{ STMTS; EXPR1 } + EXPR2`.
+ block_context: BlockContext,
+
+ /// The current unsafe block in scope
+ in_scope_unsafe: Safety,
+
+ /// The vector of all scopes that we have created thus far;
+ /// we track this for debuginfo later.
+ source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ source_scope: SourceScope,
+
+ /// The guard-context: each time we build the guard expression for
+ /// a match arm, we push onto this stack, and then pop when we
+ /// finish building it.
+ guard_context: Vec<GuardFrame>,
+
+ /// Maps `HirId`s of variable bindings to the `Local`s created for them.
+ /// (A match binding can have two locals; the 2nd is for the arm's guard.)
+ var_indices: FxHashMap<LocalVarId, LocalsForNode>,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+ canonical_user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
+ upvar_mutbls: Vec<Mutability>,
+ unit_temp: Option<Place<'tcx>>,
+
+ var_debug_info: Vec<VarDebugInfo<'tcx>>,
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ fn is_bound_var_in_guard(&self, id: LocalVarId) -> bool {
+ self.guard_context.iter().any(|frame| frame.locals.iter().any(|local| local.id == id))
+ }
+
+ fn var_local_id(&self, id: LocalVarId, for_guard: ForGuard) -> Local {
+ self.var_indices[&id].local_id(for_guard)
+ }
+}
+
+impl BlockContext {
+ fn new() -> Self {
+ BlockContext(vec![])
+ }
+ fn push(&mut self, bf: BlockFrame) {
+ self.0.push(bf);
+ }
+ fn pop(&mut self) -> Option<BlockFrame> {
+ self.0.pop()
+ }
+
+ /// Traverses the frames on the `BlockContext`, searching for either
+ /// the first block-tail expression frame with no intervening
+ /// statement frame.
+ ///
+ /// Notably, this skips over `SubExpr` frames; this method is
+ /// meant to be used in the context of understanding the
+ /// relationship of a temp (created within some complicated
+ /// expression) with its containing expression, and whether the
+ /// value of that *containing expression* (not the temp!) is
+ /// ignored.
+ fn currently_in_block_tail(&self) -> Option<BlockTailInfo> {
+ for bf in self.0.iter().rev() {
+ match bf {
+ BlockFrame::SubExpr => continue,
+ BlockFrame::Statement { .. } => break,
+ &BlockFrame::TailExpr { tail_result_is_ignored, span } => {
+ return Some(BlockTailInfo { tail_result_is_ignored, span });
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Looks at the topmost frame on the BlockContext and reports
+ /// whether its one that would discard a block tail result.
+ ///
+ /// Unlike `currently_within_ignored_tail_expression`, this does
+ /// *not* skip over `SubExpr` frames: here, we want to know
+ /// whether the block result itself is discarded.
+ fn currently_ignores_tail_results(&self) -> bool {
+ match self.0.last() {
+ // no context: conservatively assume result is read
+ None => false,
+
+ // sub-expression: block result feeds into some computation
+ Some(BlockFrame::SubExpr) => false,
+
+ // otherwise: use accumulated is_ignored state.
+ Some(
+ BlockFrame::TailExpr { tail_result_is_ignored: ignored, .. }
+ | BlockFrame::Statement { ignores_expr_result: ignored },
+ ) => *ignored,
+ }
+ }
+}
+
+#[derive(Debug)]
+enum LocalsForNode {
+ /// In the usual case, a `HirId` for an identifier maps to at most
+ /// one `Local` declaration.
+ One(Local),
+
+ /// The exceptional case is identifiers in a match arm's pattern
+ /// that are referenced in a guard of that match arm. For these,
+ /// we have `2` Locals.
+ ///
+ /// * `for_arm_body` is the Local used in the arm body (which is
+ /// just like the `One` case above),
+ ///
+ /// * `ref_for_guard` is the Local used in the arm's guard (which
+ /// is a reference to a temp that is an alias of
+ /// `for_arm_body`).
+ ForGuard { ref_for_guard: Local, for_arm_body: Local },
+}
+
+#[derive(Debug)]
+struct GuardFrameLocal {
+ id: LocalVarId,
+}
+
+impl GuardFrameLocal {
+ fn new(id: LocalVarId, _binding_mode: BindingMode) -> Self {
+ GuardFrameLocal { id }
+ }
+}
+
+#[derive(Debug)]
+struct GuardFrame {
+ /// These are the id's of names that are bound by patterns of the
+ /// arm of *this* guard.
+ ///
+ /// (Frames higher up the stack will have the id's bound in arms
+ /// further out, such as in a case like:
+ ///
+ /// match E1 {
+ /// P1(id1) if (... (match E2 { P2(id2) if ... => B2 })) => B1,
+ /// }
+ ///
+ /// here, when building for FIXME.
+ locals: Vec<GuardFrameLocal>,
+}
+
+/// `ForGuard` indicates whether we are talking about:
+/// 1. The variable for use outside of guard expressions, or
+/// 2. The temp that holds reference to (1.), which is actually what the
+/// guard expressions see.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum ForGuard {
+ RefWithinGuard,
+ OutsideGuard,
+}
+
+impl LocalsForNode {
+ fn local_id(&self, for_guard: ForGuard) -> Local {
+ match (self, for_guard) {
+ (&LocalsForNode::One(local_id), ForGuard::OutsideGuard)
+ | (
+ &LocalsForNode::ForGuard { ref_for_guard: local_id, .. },
+ ForGuard::RefWithinGuard,
+ )
+ | (&LocalsForNode::ForGuard { for_arm_body: local_id, .. }, ForGuard::OutsideGuard) => {
+ local_id
+ }
+
+ (&LocalsForNode::One(_), ForGuard::RefWithinGuard) => {
+ bug!("anything with one local should never be within a guard.")
+ }
+ }
+ }
+}
+
+struct CFG<'tcx> {
+ basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+}
+
+rustc_index::newtype_index! {
+ struct ScopeId { .. }
+}
+
+#[derive(Debug)]
+enum NeedsTemporary {
+ /// Use this variant when whatever you are converting with `as_operand`
+ /// is the last thing you are converting. This means that if we introduced
+ /// an intermediate temporary, we'd only read it immediately after, so we can
+ /// also avoid it.
+ No,
+ /// For all cases where you aren't sure or that are too expensive to compute
+ /// for now. It is always safe to fall back to this.
+ Maybe,
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// The `BlockAnd` "monad" packages up the new basic block along with a
+/// produced value (sometimes just unit, of course). The `unpack!`
+/// macro (and methods below) makes working with `BlockAnd` much more
+/// convenient.
+
+#[must_use = "if you don't use one of these results, you're leaving a dangling edge"]
+struct BlockAnd<T>(BasicBlock, T);
+
+trait BlockAndExtension {
+ fn and<T>(self, v: T) -> BlockAnd<T>;
+ fn unit(self) -> BlockAnd<()>;
+}
+
+impl BlockAndExtension for BasicBlock {
+ fn and<T>(self, v: T) -> BlockAnd<T> {
+ BlockAnd(self, v)
+ }
+
+ fn unit(self) -> BlockAnd<()> {
+ BlockAnd(self, ())
+ }
+}
+
+/// Update a block pointer and return the value.
+/// Use it like `let x = unpack!(block = self.foo(block, foo))`.
+macro_rules! unpack {
+ ($x:ident = $c:expr) => {{
+ let BlockAnd(b, v) = $c;
+ $x = b;
+ v
+ }};
+
+ ($c:expr) => {{
+ let BlockAnd(b, ()) = $c;
+ b
+ }};
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// the main entry point for building MIR for a function
+
+struct ArgInfo<'tcx>(
+ Ty<'tcx>,
+ Option<Span>,
+ Option<&'tcx hir::Param<'tcx>>,
+ Option<ImplicitSelfKind>,
+);
+
+fn construct_fn<'tcx, A>(
+ thir: &Thir<'tcx>,
+ infcx: &InferCtxt<'_, 'tcx>,
+ fn_def: ty::WithOptConstParam<LocalDefId>,
+ fn_id: hir::HirId,
+ arguments: A,
+ safety: Safety,
+ abi: Abi,
+ return_ty: Ty<'tcx>,
+ return_ty_span: Span,
+ body: &'tcx hir::Body<'tcx>,
+ expr: ExprId,
+ span_with_body: Span,
+) -> Body<'tcx>
+where
+ A: Iterator<Item = ArgInfo<'tcx>>,
+{
+ let arguments: Vec<_> = arguments.collect();
+
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(fn_id);
+
+ let mut builder = Builder::new(
+ thir,
+ infcx,
+ fn_def,
+ fn_id,
+ span_with_body,
+ arguments.len(),
+ safety,
+ return_ty,
+ return_ty_span,
+ body.generator_kind,
+ );
+
+ let call_site_scope =
+ region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::CallSite };
+ let arg_scope =
+ region::Scope { id: body.value.hir_id.local_id, data: region::ScopeData::Arguments };
+ let source_info = builder.source_info(span);
+ let call_site_s = (call_site_scope, source_info);
+ unpack!(builder.in_scope(call_site_s, LintLevel::Inherited, |builder| {
+ let arg_scope_s = (arg_scope, source_info);
+ // Attribute epilogue to function's closing brace
+ let fn_end = span_with_body.shrink_to_hi();
+ let return_block =
+ unpack!(builder.in_breakable_scope(None, Place::return_place(), fn_end, |builder| {
+ Some(builder.in_scope(arg_scope_s, LintLevel::Inherited, |builder| {
+ builder.args_and_body(
+ START_BLOCK,
+ fn_def.did,
+ &arguments,
+ arg_scope,
+ &thir[expr],
+ )
+ }))
+ }));
+ let source_info = builder.source_info(fn_end);
+ builder.cfg.terminate(return_block, source_info, TerminatorKind::Return);
+ builder.build_drop_trees();
+ return_block.unit()
+ }));
+
+ let spread_arg = if abi == Abi::RustCall {
+ // RustCall pseudo-ABI untuples the last argument.
+ Some(Local::new(arguments.len()))
+ } else {
+ None
+ };
+
+ let mut body = builder.finish();
+ body.spread_arg = spread_arg;
+ body
+}
+
+fn construct_const<'a, 'tcx>(
+ thir: &'a Thir<'tcx>,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ expr: ExprId,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
+ const_ty: Ty<'tcx>,
+ const_ty_span: Span,
+) -> Body<'tcx> {
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(hir_id);
+ let mut builder = Builder::new(
+ thir,
+ infcx,
+ def,
+ hir_id,
+ span,
+ 0,
+ Safety::Safe,
+ const_ty,
+ const_ty_span,
+ None,
+ );
+
+ let mut block = START_BLOCK;
+ unpack!(block = builder.expr_into_dest(Place::return_place(), block, &thir[expr]));
+
+ let source_info = builder.source_info(span);
+ builder.cfg.terminate(block, source_info, TerminatorKind::Return);
+
+ builder.build_drop_trees();
+
+ builder.finish()
+}
+
+/// Construct MIR for an item that has had errors in type checking.
+///
+/// This is required because we may still want to run MIR passes on an item
+/// with type errors, but normal MIR construction can't handle that in general.
+fn construct_error<'a, 'tcx>(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
+ body_id: hir::BodyId,
+ body_owner_kind: hir::BodyOwnerKind,
+ err: ErrorGuaranteed,
+) -> Body<'tcx> {
+ let tcx = infcx.tcx;
+ let span = tcx.hir().span(hir_id);
+ let ty = tcx.ty_error();
+ let generator_kind = tcx.hir().body(body_id).generator_kind;
+ let num_params = match body_owner_kind {
+ hir::BodyOwnerKind::Fn => tcx.hir().fn_decl_by_hir_id(hir_id).unwrap().inputs.len(),
+ hir::BodyOwnerKind::Closure => {
+ if generator_kind.is_some() {
+ // Generators have an implicit `self` parameter *and* a possibly
+ // implicit resume parameter.
+ 2
+ } else {
+ // The implicit self parameter adds another local in MIR.
+ 1 + tcx.hir().fn_decl_by_hir_id(hir_id).unwrap().inputs.len()
+ }
+ }
+ hir::BodyOwnerKind::Const => 0,
+ hir::BodyOwnerKind::Static(_) => 0,
+ };
+ let mut cfg = CFG { basic_blocks: IndexVec::new() };
+ let mut source_scopes = IndexVec::new();
+ let mut local_decls = IndexVec::from_elem_n(LocalDecl::new(ty, span), 1);
+
+ cfg.start_new_block();
+ source_scopes.push(SourceScopeData {
+ span,
+ parent_scope: None,
+ inlined: None,
+ inlined_parent_scope: None,
+ local_data: ClearCrossCrate::Set(SourceScopeLocalData {
+ lint_root: hir_id,
+ safety: Safety::Safe,
+ }),
+ });
+ let source_info = SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE };
+
+ // Some MIR passes will expect the number of parameters to match the
+ // function declaration.
+ for _ in 0..num_params {
+ local_decls.push(LocalDecl::with_source_info(ty, source_info));
+ }
+ cfg.terminate(START_BLOCK, source_info, TerminatorKind::Unreachable);
+
+ let mut body = Body::new(
+ MirSource::item(def.did.to_def_id()),
+ cfg.basic_blocks,
+ source_scopes,
+ local_decls,
+ IndexVec::new(),
+ num_params,
+ vec![],
+ span,
+ generator_kind,
+ Some(err),
+ );
+ body.generator.as_mut().map(|gen| gen.yield_ty = Some(ty));
+ body
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ fn new(
+ thir: &'a Thir<'tcx>,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ hir_id: hir::HirId,
+ span: Span,
+ arg_count: usize,
+ safety: Safety,
+ return_ty: Ty<'tcx>,
+ return_span: Span,
+ generator_kind: Option<GeneratorKind>,
+ ) -> Builder<'a, 'tcx> {
+ let tcx = infcx.tcx;
+ let attrs = tcx.hir().attrs(hir_id);
+ // Some functions always have overflow checks enabled,
+ // however, they may not get codegen'd, depending on
+ // the settings for the crate they are codegened in.
+ let mut check_overflow = tcx.sess.contains_name(attrs, sym::rustc_inherit_overflow_checks);
+ // Respect -C overflow-checks.
+ check_overflow |= tcx.sess.overflow_checks();
+ // Constants always need overflow checks.
+ check_overflow |= matches!(
+ tcx.hir().body_owner_kind(def.did),
+ hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_)
+ );
+
+ let lint_level = LintLevel::Explicit(hir_id);
+ let param_env = tcx.param_env(def.did);
+ let mut builder = Builder {
+ thir,
+ tcx,
+ infcx,
+ typeck_results: tcx.typeck_opt_const_arg(def),
+ region_scope_tree: tcx.region_scope_tree(def.did),
+ param_env,
+ def_id: def.did.to_def_id(),
+ hir_id,
+ parent_module: tcx.parent_module(hir_id).to_def_id(),
+ check_overflow,
+ cfg: CFG { basic_blocks: IndexVec::new() },
+ fn_span: span,
+ arg_count,
+ generator_kind,
+ scopes: scope::Scopes::new(),
+ block_context: BlockContext::new(),
+ source_scopes: IndexVec::new(),
+ source_scope: OUTERMOST_SOURCE_SCOPE,
+ guard_context: vec![],
+ in_scope_unsafe: safety,
+ local_decls: IndexVec::from_elem_n(LocalDecl::new(return_ty, return_span), 1),
+ canonical_user_type_annotations: IndexVec::new(),
+ upvar_mutbls: vec![],
+ var_indices: Default::default(),
+ unit_temp: None,
+ var_debug_info: vec![],
+ };
+
+ assert_eq!(builder.cfg.start_new_block(), START_BLOCK);
+ assert_eq!(
+ builder.new_source_scope(span, lint_level, Some(safety)),
+ OUTERMOST_SOURCE_SCOPE
+ );
+ builder.source_scopes[OUTERMOST_SOURCE_SCOPE].parent_scope = None;
+
+ builder
+ }
+
+ fn finish(self) -> Body<'tcx> {
+ for (index, block) in self.cfg.basic_blocks.iter().enumerate() {
+ if block.terminator.is_none() {
+ span_bug!(self.fn_span, "no terminator on block {:?}", index);
+ }
+ }
+
+ Body::new(
+ MirSource::item(self.def_id),
+ self.cfg.basic_blocks,
+ self.source_scopes,
+ self.local_decls,
+ self.canonical_user_type_annotations,
+ self.arg_count,
+ self.var_debug_info,
+ self.fn_span,
+ self.generator_kind,
+ self.typeck_results.tainted_by_errors,
+ )
+ }
+
+ fn args_and_body(
+ &mut self,
+ mut block: BasicBlock,
+ fn_def_id: LocalDefId,
+ arguments: &[ArgInfo<'tcx>],
+ argument_scope: region::Scope,
+ expr: &Expr<'tcx>,
+ ) -> BlockAnd<()> {
+ // Allocate locals for the function arguments
+ for &ArgInfo(ty, _, arg_opt, _) in arguments.iter() {
+ let source_info =
+ SourceInfo::outermost(arg_opt.map_or(self.fn_span, |arg| arg.pat.span));
+ let arg_local = self.local_decls.push(LocalDecl::with_source_info(ty, source_info));
+
+ // If this is a simple binding pattern, give debuginfo a nice name.
+ if let Some(arg) = arg_opt && let Some(ident) = arg.pat.simple_ident() {
+ self.var_debug_info.push(VarDebugInfo {
+ name: ident.name,
+ source_info,
+ value: VarDebugInfoContents::Place(arg_local.into()),
+ });
+ }
+ }
+
+ let tcx = self.tcx;
+ let tcx_hir = tcx.hir();
+ let hir_typeck_results = self.typeck_results;
+
+ // In analyze_closure() in upvar.rs we gathered a list of upvars used by an
+ // indexed closure and we stored in a map called closure_min_captures in TypeckResults
+ // with the closure's DefId. Here, we run through that vec of UpvarIds for
+ // the given closure and use the necessary information to create upvar
+ // debuginfo and to fill `self.upvar_mutbls`.
+ if hir_typeck_results.closure_min_captures.get(&fn_def_id).is_some() {
+ let mut closure_env_projs = vec![];
+ let mut closure_ty = self.local_decls[ty::CAPTURE_STRUCT_LOCAL].ty;
+ if let ty::Ref(_, ty, _) = closure_ty.kind() {
+ closure_env_projs.push(ProjectionElem::Deref);
+ closure_ty = *ty;
+ }
+ let upvar_substs = match closure_ty.kind() {
+ ty::Closure(_, substs) => ty::UpvarSubsts::Closure(substs),
+ ty::Generator(_, substs, _) => ty::UpvarSubsts::Generator(substs),
+ _ => span_bug!(self.fn_span, "upvars with non-closure env ty {:?}", closure_ty),
+ };
+ let def_id = self.def_id.as_local().unwrap();
+ let capture_syms = tcx.symbols_for_closure_captures((def_id, fn_def_id));
+ let capture_tys = upvar_substs.upvar_tys();
+ let captures_with_tys = hir_typeck_results
+ .closure_min_captures_flattened(fn_def_id)
+ .zip(capture_tys.zip(capture_syms));
+
+ self.upvar_mutbls = captures_with_tys
+ .enumerate()
+ .map(|(i, (captured_place, (ty, sym)))| {
+ let capture = captured_place.info.capture_kind;
+ let var_id = match captured_place.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ _ => bug!("Expected an upvar"),
+ };
+
+ let mutability = captured_place.mutability;
+
+ let mut projs = closure_env_projs.clone();
+ projs.push(ProjectionElem::Field(Field::new(i), ty));
+ match capture {
+ ty::UpvarCapture::ByValue => {}
+ ty::UpvarCapture::ByRef(..) => {
+ projs.push(ProjectionElem::Deref);
+ }
+ };
+
+ self.var_debug_info.push(VarDebugInfo {
+ name: *sym,
+ source_info: SourceInfo::outermost(tcx_hir.span(var_id)),
+ value: VarDebugInfoContents::Place(Place {
+ local: ty::CAPTURE_STRUCT_LOCAL,
+ projection: tcx.intern_place_elems(&projs),
+ }),
+ });
+
+ mutability
+ })
+ .collect();
+ }
+
+ let mut scope = None;
+ // Bind the argument patterns
+ for (index, arg_info) in arguments.iter().enumerate() {
+ // Function arguments always get the first Local indices after the return place
+ let local = Local::new(index + 1);
+ let place = Place::from(local);
+ let &ArgInfo(_, opt_ty_info, arg_opt, ref self_binding) = arg_info;
+
+ // Make sure we drop (parts of) the argument even when not matched on.
+ self.schedule_drop(
+ arg_opt.as_ref().map_or(expr.span, |arg| arg.pat.span),
+ argument_scope,
+ local,
+ DropKind::Value,
+ );
+
+ let Some(arg) = arg_opt else {
+ continue;
+ };
+ let pat = match tcx.hir().get(arg.pat.hir_id) {
+ Node::Pat(pat) => pat,
+ node => bug!("pattern became {:?}", node),
+ };
+ let pattern = pat_from_hir(tcx, self.param_env, self.typeck_results, pat);
+ let original_source_scope = self.source_scope;
+ let span = pattern.span;
+ self.set_correct_source_scope_for_arg(arg.hir_id, original_source_scope, span);
+ match *pattern.kind {
+ // Don't introduce extra copies for simple bindings
+ PatKind::Binding {
+ mutability,
+ var,
+ mode: BindingMode::ByValue,
+ subpattern: None,
+ ..
+ } => {
+ self.local_decls[local].mutability = mutability;
+ self.local_decls[local].source_info.scope = self.source_scope;
+ self.local_decls[local].local_info = if let Some(kind) = self_binding {
+ Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(
+ BindingForm::ImplicitSelf(*kind),
+ ))))
+ } else {
+ let binding_mode = ty::BindingMode::BindByValue(mutability);
+ Some(Box::new(LocalInfo::User(ClearCrossCrate::Set(BindingForm::Var(
+ VarBindingForm {
+ binding_mode,
+ opt_ty_info,
+ opt_match_place: Some((Some(place), span)),
+ pat_span: span,
+ },
+ )))))
+ };
+ self.var_indices.insert(var, LocalsForNode::One(local));
+ }
+ _ => {
+ scope = self.declare_bindings(
+ scope,
+ expr.span,
+ &pattern,
+ matches::ArmHasGuard(false),
+ Some((Some(&place), span)),
+ );
+ let place_builder = PlaceBuilder::from(local);
+ unpack!(block = self.place_into_pattern(block, pattern, place_builder, false));
+ }
+ }
+ self.source_scope = original_source_scope;
+ }
+
+ // Enter the argument pattern bindings source scope, if it exists.
+ if let Some(source_scope) = scope {
+ self.source_scope = source_scope;
+ }
+
+ self.expr_into_dest(Place::return_place(), block, &expr)
+ }
+
+ fn set_correct_source_scope_for_arg(
+ &mut self,
+ arg_hir_id: hir::HirId,
+ original_source_scope: SourceScope,
+ pattern_span: Span,
+ ) {
+ let tcx = self.tcx;
+ let current_root = tcx.maybe_lint_level_root_bounded(arg_hir_id, self.hir_id);
+ let parent_root = tcx.maybe_lint_level_root_bounded(
+ self.source_scopes[original_source_scope]
+ .local_data
+ .as_ref()
+ .assert_crate_local()
+ .lint_root,
+ self.hir_id,
+ );
+ if current_root != parent_root {
+ self.source_scope =
+ self.new_source_scope(pattern_span, LintLevel::Explicit(current_root), None);
+ }
+ }
+
+ fn get_unit_temp(&mut self) -> Place<'tcx> {
+ match self.unit_temp {
+ Some(tmp) => tmp,
+ None => {
+ let ty = self.tcx.mk_unit();
+ let fn_span = self.fn_span;
+ let tmp = self.temp(ty, fn_span);
+ self.unit_temp = Some(tmp);
+ tmp
+ }
+ }
+ }
+}
+
+fn parse_float_into_constval<'tcx>(
+ num: Symbol,
+ float_ty: ty::FloatTy,
+ neg: bool,
+) -> Option<ConstValue<'tcx>> {
+ parse_float_into_scalar(num, float_ty, neg).map(ConstValue::Scalar)
+}
+
+pub(crate) fn parse_float_into_scalar(
+ num: Symbol,
+ float_ty: ty::FloatTy,
+ neg: bool,
+) -> Option<Scalar> {
+ let num = num.as_str();
+ match float_ty {
+ ty::FloatTy::F32 => {
+ let Ok(rust_f) = num.parse::<f32>() else { return None };
+ let mut f = num.parse::<Single>().unwrap_or_else(|e| {
+ panic!("apfloat::ieee::Single failed to parse `{}`: {:?}", num, e)
+ });
+
+ assert!(
+ u128::from(rust_f.to_bits()) == f.to_bits(),
+ "apfloat::ieee::Single gave different result for `{}`: \
+ {}({:#x}) vs Rust's {}({:#x})",
+ rust_f,
+ f,
+ f.to_bits(),
+ Single::from_bits(rust_f.to_bits().into()),
+ rust_f.to_bits()
+ );
+
+ if neg {
+ f = -f;
+ }
+
+ Some(Scalar::from_f32(f))
+ }
+ ty::FloatTy::F64 => {
+ let Ok(rust_f) = num.parse::<f64>() else { return None };
+ let mut f = num.parse::<Double>().unwrap_or_else(|e| {
+ panic!("apfloat::ieee::Double failed to parse `{}`: {:?}", num, e)
+ });
+
+ assert!(
+ u128::from(rust_f.to_bits()) == f.to_bits(),
+ "apfloat::ieee::Double gave different result for `{}`: \
+ {}({:#x}) vs Rust's {}({:#x})",
+ rust_f,
+ f,
+ f.to_bits(),
+ Double::from_bits(rust_f.to_bits().into()),
+ rust_f.to_bits()
+ );
+
+ if neg {
+ f = -f;
+ }
+
+ Some(Scalar::from_f64(f))
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Builder methods are broken up into modules, depending on what kind
+// of thing is being lowered. Note that they use the `unpack` macro
+// above extensively.
+
+mod block;
+mod cfg;
+mod expr;
+mod matches;
+mod misc;
+mod scope;
+
+pub(crate) use expr::category::Category as ExprCategory;
diff --git a/compiler/rustc_mir_build/src/build/scope.rs b/compiler/rustc_mir_build/src/build/scope.rs
new file mode 100644
index 000000000..b2fd9f25b
--- /dev/null
+++ b/compiler/rustc_mir_build/src/build/scope.rs
@@ -0,0 +1,1395 @@
+/*!
+Managing the scope stack. The scopes are tied to lexical scopes, so as
+we descend the THIR, we push a scope on the stack, build its
+contents, and then pop it off. Every scope is named by a
+`region::Scope`.
+
+### SEME Regions
+
+When pushing a new [Scope], we record the current point in the graph (a
+basic block); this marks the entry to the scope. We then generate more
+stuff in the control-flow graph. Whenever the scope is exited, either
+via a `break` or `return` or just by fallthrough, that marks an exit
+from the scope. Each lexical scope thus corresponds to a single-entry,
+multiple-exit (SEME) region in the control-flow graph.
+
+For now, we record the `region::Scope` to each SEME region for later reference
+(see caveat in next paragraph). This is because destruction scopes are tied to
+them. This may change in the future so that MIR lowering determines its own
+destruction scopes.
+
+### Not so SEME Regions
+
+In the course of building matches, it sometimes happens that certain code
+(namely guards) gets executed multiple times. This means that the scope lexical
+scope may in fact correspond to multiple, disjoint SEME regions. So in fact our
+mapping is from one scope to a vector of SEME regions. Since the SEME regions
+are disjoint, the mapping is still one-to-one for the set of SEME regions that
+we're currently in.
+
+Also in matches, the scopes assigned to arms are not always even SEME regions!
+Each arm has a single region with one entry for each pattern. We manually
+manipulate the scheduled drops in this scope to avoid dropping things multiple
+times.
+
+### Drops
+
+The primary purpose for scopes is to insert drops: while building
+the contents, we also accumulate places that need to be dropped upon
+exit from each scope. This is done by calling `schedule_drop`. Once a
+drop is scheduled, whenever we branch out we will insert drops of all
+those places onto the outgoing edge. Note that we don't know the full
+set of scheduled drops up front, and so whenever we exit from the
+scope we only drop the values scheduled thus far. For example, consider
+the scope S corresponding to this loop:
+
+```
+# let cond = true;
+loop {
+ let x = ..;
+ if cond { break; }
+ let y = ..;
+}
+```
+
+When processing the `let x`, we will add one drop to the scope for
+`x`. The break will then insert a drop for `x`. When we process `let
+y`, we will add another drop (in fact, to a subscope, but let's ignore
+that for now); any later drops would also drop `y`.
+
+### Early exit
+
+There are numerous "normal" ways to early exit a scope: `break`,
+`continue`, `return` (panics are handled separately). Whenever an
+early exit occurs, the method `break_scope` is called. It is given the
+current point in execution where the early exit occurs, as well as the
+scope you want to branch to (note that all early exits from to some
+other enclosing scope). `break_scope` will record the set of drops currently
+scheduled in a [DropTree]. Later, before `in_breakable_scope` exits, the drops
+will be added to the CFG.
+
+Panics are handled in a similar fashion, except that the drops are added to the
+MIR once the rest of the function has finished being lowered. If a terminator
+can panic, call `diverge_from(block)` with the block containing the terminator
+`block`.
+
+### Breakable scopes
+
+In addition to the normal scope stack, we track a loop scope stack
+that contains only loops and breakable blocks. It tracks where a `break`,
+`continue` or `return` should go to.
+
+*/
+
+use std::mem;
+
+use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::vec::IndexVec;
+use rustc_middle::middle::region;
+use rustc_middle::mir::*;
+use rustc_middle::thir::{Expr, LintLevel};
+
+use rustc_span::{Span, DUMMY_SP};
+
+#[derive(Debug)]
+pub struct Scopes<'tcx> {
+ scopes: Vec<Scope>,
+
+ /// The current set of breakable scopes. See module comment for more details.
+ breakable_scopes: Vec<BreakableScope<'tcx>>,
+
+ /// The scope of the innermost if-then currently being lowered.
+ if_then_scope: Option<IfThenScope>,
+
+ /// Drops that need to be done on unwind paths. See the comment on
+ /// [DropTree] for more details.
+ unwind_drops: DropTree,
+
+ /// Drops that need to be done on paths to the `GeneratorDrop` terminator.
+ generator_drops: DropTree,
+}
+
+#[derive(Debug)]
+struct Scope {
+ /// The source scope this scope was created in.
+ source_scope: SourceScope,
+
+ /// the region span of this scope within source code.
+ region_scope: region::Scope,
+
+ /// set of places to drop when exiting this scope. This starts
+ /// out empty but grows as variables are declared during the
+ /// building process. This is a stack, so we always drop from the
+ /// end of the vector (top of the stack) first.
+ drops: Vec<DropData>,
+
+ moved_locals: Vec<Local>,
+
+ /// The drop index that will drop everything in and below this scope on an
+ /// unwind path.
+ cached_unwind_block: Option<DropIdx>,
+
+ /// The drop index that will drop everything in and below this scope on a
+ /// generator drop path.
+ cached_generator_drop_block: Option<DropIdx>,
+}
+
+#[derive(Clone, Copy, Debug)]
+struct DropData {
+ /// The `Span` where drop obligation was incurred (typically where place was
+ /// declared)
+ source_info: SourceInfo,
+
+ /// local to drop
+ local: Local,
+
+ /// Whether this is a value Drop or a StorageDead.
+ kind: DropKind,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub(crate) enum DropKind {
+ Value,
+ Storage,
+}
+
+#[derive(Debug)]
+struct BreakableScope<'tcx> {
+ /// Region scope of the loop
+ region_scope: region::Scope,
+ /// The destination of the loop/block expression itself (i.e., where to put
+ /// the result of a `break` or `return` expression)
+ break_destination: Place<'tcx>,
+ /// Drops that happen on the `break`/`return` path.
+ break_drops: DropTree,
+ /// Drops that happen on the `continue` path.
+ continue_drops: Option<DropTree>,
+}
+
+#[derive(Debug)]
+struct IfThenScope {
+ /// The if-then scope or arm scope
+ region_scope: region::Scope,
+ /// Drops that happen on the `else` path.
+ else_drops: DropTree,
+}
+
+/// The target of an expression that breaks out of a scope
+#[derive(Clone, Copy, Debug)]
+pub(crate) enum BreakableTarget {
+ Continue(region::Scope),
+ Break(region::Scope),
+ Return,
+}
+
+rustc_index::newtype_index! {
+ struct DropIdx { .. }
+}
+
+const ROOT_NODE: DropIdx = DropIdx::from_u32(0);
+
+/// A tree of drops that we have deferred lowering. It's used for:
+///
+/// * Drops on unwind paths
+/// * Drops on generator drop paths (when a suspended generator is dropped)
+/// * Drops on return and loop exit paths
+/// * Drops on the else path in an `if let` chain
+///
+/// Once no more nodes could be added to the tree, we lower it to MIR in one go
+/// in `build_mir`.
+#[derive(Debug)]
+struct DropTree {
+ /// Drops in the tree.
+ drops: IndexVec<DropIdx, (DropData, DropIdx)>,
+ /// Map for finding the inverse of the `next_drop` relation:
+ ///
+ /// `previous_drops[(drops[i].1, drops[i].0.local, drops[i].0.kind)] == i`
+ previous_drops: FxHashMap<(DropIdx, Local, DropKind), DropIdx>,
+ /// Edges into the `DropTree` that need to be added once it's lowered.
+ entry_points: Vec<(DropIdx, BasicBlock)>,
+}
+
+impl Scope {
+ /// Whether there's anything to do for the cleanup path, that is,
+ /// when unwinding through this scope. This includes destructors,
+ /// but not StorageDead statements, which don't get emitted at all
+ /// for unwinding, for several reasons:
+ /// * clang doesn't emit llvm.lifetime.end for C++ unwinding
+ /// * LLVM's memory dependency analysis can't handle it atm
+ /// * polluting the cleanup MIR with StorageDead creates
+ /// landing pads even though there's no actual destructors
+ /// * freeing up stack space has no effect during unwinding
+ /// Note that for generators we do emit StorageDeads, for the
+ /// use of optimizations in the MIR generator transform.
+ fn needs_cleanup(&self) -> bool {
+ self.drops.iter().any(|drop| match drop.kind {
+ DropKind::Value => true,
+ DropKind::Storage => false,
+ })
+ }
+
+ fn invalidate_cache(&mut self) {
+ self.cached_unwind_block = None;
+ self.cached_generator_drop_block = None;
+ }
+}
+
+/// A trait that determined how [DropTree] creates its blocks and
+/// links to any entry nodes.
+trait DropTreeBuilder<'tcx> {
+ /// Create a new block for the tree. This should call either
+ /// `cfg.start_new_block()` or `cfg.start_new_cleanup_block()`.
+ fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock;
+
+ /// Links a block outside the drop tree, `from`, to the block `to` inside
+ /// the drop tree.
+ fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock);
+}
+
+impl DropTree {
+ fn new() -> Self {
+ // The root node of the tree doesn't represent a drop, but instead
+ // represents the block in the tree that should be jumped to once all
+ // of the required drops have been performed.
+ let fake_source_info = SourceInfo::outermost(DUMMY_SP);
+ let fake_data =
+ DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage };
+ let drop_idx = DropIdx::MAX;
+ let drops = IndexVec::from_elem_n((fake_data, drop_idx), 1);
+ Self { drops, entry_points: Vec::new(), previous_drops: FxHashMap::default() }
+ }
+
+ fn add_drop(&mut self, drop: DropData, next: DropIdx) -> DropIdx {
+ let drops = &mut self.drops;
+ *self
+ .previous_drops
+ .entry((next, drop.local, drop.kind))
+ .or_insert_with(|| drops.push((drop, next)))
+ }
+
+ fn add_entry(&mut self, from: BasicBlock, to: DropIdx) {
+ debug_assert!(to < self.drops.next_index());
+ self.entry_points.push((to, from));
+ }
+
+ /// Builds the MIR for a given drop tree.
+ ///
+ /// `blocks` should have the same length as `self.drops`, and may have its
+ /// first value set to some already existing block.
+ fn build_mir<'tcx, T: DropTreeBuilder<'tcx>>(
+ &mut self,
+ cfg: &mut CFG<'tcx>,
+ blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
+ ) {
+ debug!("DropTree::build_mir(drops = {:#?})", self);
+ assert_eq!(blocks.len(), self.drops.len());
+
+ self.assign_blocks::<T>(cfg, blocks);
+ self.link_blocks(cfg, blocks)
+ }
+
+ /// Assign blocks for all of the drops in the drop tree that need them.
+ fn assign_blocks<'tcx, T: DropTreeBuilder<'tcx>>(
+ &mut self,
+ cfg: &mut CFG<'tcx>,
+ blocks: &mut IndexVec<DropIdx, Option<BasicBlock>>,
+ ) {
+ // StorageDead statements can share blocks with each other and also with
+ // a Drop terminator. We iterate through the drops to find which drops
+ // need their own block.
+ #[derive(Clone, Copy)]
+ enum Block {
+ // This drop is unreachable
+ None,
+ // This drop is only reachable through the `StorageDead` with the
+ // specified index.
+ Shares(DropIdx),
+ // This drop has more than one way of being reached, or it is
+ // branched to from outside the tree, or its predecessor is a
+ // `Value` drop.
+ Own,
+ }
+
+ let mut needs_block = IndexVec::from_elem(Block::None, &self.drops);
+ if blocks[ROOT_NODE].is_some() {
+ // In some cases (such as drops for `continue`) the root node
+ // already has a block. In this case, make sure that we don't
+ // override it.
+ needs_block[ROOT_NODE] = Block::Own;
+ }
+
+ // Sort so that we only need to check the last value.
+ let entry_points = &mut self.entry_points;
+ entry_points.sort();
+
+ for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
+ if entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
+ let block = *blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
+ needs_block[drop_idx] = Block::Own;
+ while entry_points.last().map_or(false, |entry_point| entry_point.0 == drop_idx) {
+ let entry_block = entry_points.pop().unwrap().1;
+ T::add_entry(cfg, entry_block, block);
+ }
+ }
+ match needs_block[drop_idx] {
+ Block::None => continue,
+ Block::Own => {
+ blocks[drop_idx].get_or_insert_with(|| T::make_block(cfg));
+ }
+ Block::Shares(pred) => {
+ blocks[drop_idx] = blocks[pred];
+ }
+ }
+ if let DropKind::Value = drop_data.0.kind {
+ needs_block[drop_data.1] = Block::Own;
+ } else if drop_idx != ROOT_NODE {
+ match &mut needs_block[drop_data.1] {
+ pred @ Block::None => *pred = Block::Shares(drop_idx),
+ pred @ Block::Shares(_) => *pred = Block::Own,
+ Block::Own => (),
+ }
+ }
+ }
+
+ debug!("assign_blocks: blocks = {:#?}", blocks);
+ assert!(entry_points.is_empty());
+ }
+
+ fn link_blocks<'tcx>(
+ &self,
+ cfg: &mut CFG<'tcx>,
+ blocks: &IndexVec<DropIdx, Option<BasicBlock>>,
+ ) {
+ for (drop_idx, drop_data) in self.drops.iter_enumerated().rev() {
+ let Some(block) = blocks[drop_idx] else { continue };
+ match drop_data.0.kind {
+ DropKind::Value => {
+ let terminator = TerminatorKind::Drop {
+ target: blocks[drop_data.1].unwrap(),
+ // The caller will handle this if needed.
+ unwind: None,
+ place: drop_data.0.local.into(),
+ };
+ cfg.terminate(block, drop_data.0.source_info, terminator);
+ }
+ // Root nodes don't correspond to a drop.
+ DropKind::Storage if drop_idx == ROOT_NODE => {}
+ DropKind::Storage => {
+ let stmt = Statement {
+ source_info: drop_data.0.source_info,
+ kind: StatementKind::StorageDead(drop_data.0.local),
+ };
+ cfg.push(block, stmt);
+ let target = blocks[drop_data.1].unwrap();
+ if target != block {
+ // Diagnostics don't use this `Span` but debuginfo
+ // might. Since we don't want breakpoints to be placed
+ // here, especially when this is on an unwind path, we
+ // use `DUMMY_SP`.
+ let source_info = SourceInfo { span: DUMMY_SP, ..drop_data.0.source_info };
+ let terminator = TerminatorKind::Goto { target };
+ cfg.terminate(block, source_info, terminator);
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> Scopes<'tcx> {
+ pub(crate) fn new() -> Self {
+ Self {
+ scopes: Vec::new(),
+ breakable_scopes: Vec::new(),
+ if_then_scope: None,
+ unwind_drops: DropTree::new(),
+ generator_drops: DropTree::new(),
+ }
+ }
+
+ fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) {
+ debug!("push_scope({:?})", region_scope);
+ self.scopes.push(Scope {
+ source_scope: vis_scope,
+ region_scope: region_scope.0,
+ drops: vec![],
+ moved_locals: vec![],
+ cached_unwind_block: None,
+ cached_generator_drop_block: None,
+ });
+ }
+
+ fn pop_scope(&mut self, region_scope: (region::Scope, SourceInfo)) -> Scope {
+ let scope = self.scopes.pop().unwrap();
+ assert_eq!(scope.region_scope, region_scope.0);
+ scope
+ }
+
+ fn scope_index(&self, region_scope: region::Scope, span: Span) -> usize {
+ self.scopes
+ .iter()
+ .rposition(|scope| scope.region_scope == region_scope)
+ .unwrap_or_else(|| span_bug!(span, "region_scope {:?} does not enclose", region_scope))
+ }
+
+ /// Returns the topmost active scope, which is known to be alive until
+ /// the next scope expression.
+ fn topmost(&self) -> region::Scope {
+ self.scopes.last().expect("topmost_scope: no scopes present").region_scope
+ }
+}
+
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ // Adding and removing scopes
+ // ==========================
+ // Start a breakable scope, which tracks where `continue`, `break` and
+ // `return` should branch to.
+ pub(crate) fn in_breakable_scope<F>(
+ &mut self,
+ loop_block: Option<BasicBlock>,
+ break_destination: Place<'tcx>,
+ span: Span,
+ f: F,
+ ) -> BlockAnd<()>
+ where
+ F: FnOnce(&mut Builder<'a, 'tcx>) -> Option<BlockAnd<()>>,
+ {
+ let region_scope = self.scopes.topmost();
+ let scope = BreakableScope {
+ region_scope,
+ break_destination,
+ break_drops: DropTree::new(),
+ continue_drops: loop_block.map(|_| DropTree::new()),
+ };
+ self.scopes.breakable_scopes.push(scope);
+ let normal_exit_block = f(self);
+ let breakable_scope = self.scopes.breakable_scopes.pop().unwrap();
+ assert!(breakable_scope.region_scope == region_scope);
+ let break_block = self.build_exit_tree(breakable_scope.break_drops, None);
+ if let Some(drops) = breakable_scope.continue_drops {
+ self.build_exit_tree(drops, loop_block);
+ }
+ match (normal_exit_block, break_block) {
+ (Some(block), None) | (None, Some(block)) => block,
+ (None, None) => self.cfg.start_new_block().unit(),
+ (Some(normal_block), Some(exit_block)) => {
+ let target = self.cfg.start_new_block();
+ let source_info = self.source_info(span);
+ self.cfg.terminate(
+ unpack!(normal_block),
+ source_info,
+ TerminatorKind::Goto { target },
+ );
+ self.cfg.terminate(
+ unpack!(exit_block),
+ source_info,
+ TerminatorKind::Goto { target },
+ );
+ target.unit()
+ }
+ }
+ }
+
+ /// Start an if-then scope which tracks drop for `if` expressions and `if`
+ /// guards.
+ ///
+ /// For an if-let chain:
+ ///
+ /// if let Some(x) = a && let Some(y) = b && let Some(z) = c { ... }
+ ///
+ /// There are three possible ways the condition can be false and we may have
+ /// to drop `x`, `x` and `y`, or neither depending on which binding fails.
+ /// To handle this correctly we use a `DropTree` in a similar way to a
+ /// `loop` expression and 'break' out on all of the 'else' paths.
+ ///
+ /// Notes:
+ /// - We don't need to keep a stack of scopes in the `Builder` because the
+ /// 'else' paths will only leave the innermost scope.
+ /// - This is also used for match guards.
+ pub(crate) fn in_if_then_scope<F>(
+ &mut self,
+ region_scope: region::Scope,
+ f: F,
+ ) -> (BasicBlock, BasicBlock)
+ where
+ F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<()>,
+ {
+ let scope = IfThenScope { region_scope, else_drops: DropTree::new() };
+ let previous_scope = mem::replace(&mut self.scopes.if_then_scope, Some(scope));
+
+ let then_block = unpack!(f(self));
+
+ let if_then_scope = mem::replace(&mut self.scopes.if_then_scope, previous_scope).unwrap();
+ assert!(if_then_scope.region_scope == region_scope);
+
+ let else_block = self
+ .build_exit_tree(if_then_scope.else_drops, None)
+ .map_or_else(|| self.cfg.start_new_block(), |else_block_and| unpack!(else_block_and));
+
+ (then_block, else_block)
+ }
+
+ pub(crate) fn in_opt_scope<F, R>(
+ &mut self,
+ opt_scope: Option<(region::Scope, SourceInfo)>,
+ f: F,
+ ) -> BlockAnd<R>
+ where
+ F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
+ {
+ debug!("in_opt_scope(opt_scope={:?})", opt_scope);
+ if let Some(region_scope) = opt_scope {
+ self.push_scope(region_scope);
+ }
+ let mut block;
+ let rv = unpack!(block = f(self));
+ if let Some(region_scope) = opt_scope {
+ unpack!(block = self.pop_scope(region_scope, block));
+ }
+ debug!("in_scope: exiting opt_scope={:?} block={:?}", opt_scope, block);
+ block.and(rv)
+ }
+
+ /// Convenience wrapper that pushes a scope and then executes `f`
+ /// to build its contents, popping the scope afterwards.
+ pub(crate) fn in_scope<F, R>(
+ &mut self,
+ region_scope: (region::Scope, SourceInfo),
+ lint_level: LintLevel,
+ f: F,
+ ) -> BlockAnd<R>
+ where
+ F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>,
+ {
+ debug!("in_scope(region_scope={:?})", region_scope);
+ let source_scope = self.source_scope;
+ let tcx = self.tcx;
+ if let LintLevel::Explicit(current_hir_id) = lint_level {
+ // Use `maybe_lint_level_root_bounded` with `root_lint_level` as a bound
+ // to avoid adding Hir dependencies on our parents.
+ // We estimate the true lint roots here to avoid creating a lot of source scopes.
+
+ let parent_root = tcx.maybe_lint_level_root_bounded(
+ self.source_scopes[source_scope].local_data.as_ref().assert_crate_local().lint_root,
+ self.hir_id,
+ );
+ let current_root = tcx.maybe_lint_level_root_bounded(current_hir_id, self.hir_id);
+
+ if parent_root != current_root {
+ self.source_scope = self.new_source_scope(
+ region_scope.1.span,
+ LintLevel::Explicit(current_root),
+ None,
+ );
+ }
+ }
+ self.push_scope(region_scope);
+ let mut block;
+ let rv = unpack!(block = f(self));
+ unpack!(block = self.pop_scope(region_scope, block));
+ self.source_scope = source_scope;
+ debug!("in_scope: exiting region_scope={:?} block={:?}", region_scope, block);
+ block.and(rv)
+ }
+
+ /// Push a scope onto the stack. You can then build code in this
+ /// scope and call `pop_scope` afterwards. Note that these two
+ /// calls must be paired; using `in_scope` as a convenience
+ /// wrapper maybe preferable.
+ pub(crate) fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo)) {
+ self.scopes.push_scope(region_scope, self.source_scope);
+ }
+
+ /// Pops a scope, which should have region scope `region_scope`,
+ /// adding any drops onto the end of `block` that are needed.
+ /// This must match 1-to-1 with `push_scope`.
+ pub(crate) fn pop_scope(
+ &mut self,
+ region_scope: (region::Scope, SourceInfo),
+ mut block: BasicBlock,
+ ) -> BlockAnd<()> {
+ debug!("pop_scope({:?}, {:?})", region_scope, block);
+
+ block = self.leave_top_scope(block);
+
+ self.scopes.pop_scope(region_scope);
+
+ block.unit()
+ }
+
+ /// Sets up the drops for breaking from `block` to `target`.
+ pub(crate) fn break_scope(
+ &mut self,
+ mut block: BasicBlock,
+ value: Option<&Expr<'tcx>>,
+ target: BreakableTarget,
+ source_info: SourceInfo,
+ ) -> BlockAnd<()> {
+ let span = source_info.span;
+
+ let get_scope_index = |scope: region::Scope| {
+ // find the loop-scope by its `region::Scope`.
+ self.scopes
+ .breakable_scopes
+ .iter()
+ .rposition(|breakable_scope| breakable_scope.region_scope == scope)
+ .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found"))
+ };
+ let (break_index, destination) = match target {
+ BreakableTarget::Return => {
+ let scope = &self.scopes.breakable_scopes[0];
+ if scope.break_destination != Place::return_place() {
+ span_bug!(span, "`return` in item with no return scope");
+ }
+ (0, Some(scope.break_destination))
+ }
+ BreakableTarget::Break(scope) => {
+ let break_index = get_scope_index(scope);
+ let scope = &self.scopes.breakable_scopes[break_index];
+ (break_index, Some(scope.break_destination))
+ }
+ BreakableTarget::Continue(scope) => {
+ let break_index = get_scope_index(scope);
+ (break_index, None)
+ }
+ };
+
+ if let Some(destination) = destination {
+ if let Some(value) = value {
+ debug!("stmt_expr Break val block_context.push(SubExpr)");
+ self.block_context.push(BlockFrame::SubExpr);
+ unpack!(block = self.expr_into_dest(destination, block, value));
+ self.block_context.pop();
+ } else {
+ self.cfg.push_assign_unit(block, source_info, destination, self.tcx)
+ }
+ } else {
+ assert!(value.is_none(), "`return` and `break` should have a destination");
+ if self.tcx.sess.instrument_coverage() {
+ // Unlike `break` and `return`, which push an `Assign` statement to MIR, from which
+ // a Coverage code region can be generated, `continue` needs no `Assign`; but
+ // without one, the `InstrumentCoverage` MIR pass cannot generate a code region for
+ // `continue`. Coverage will be missing unless we add a dummy `Assign` to MIR.
+ self.add_dummy_assignment(span, block, source_info);
+ }
+ }
+
+ let region_scope = self.scopes.breakable_scopes[break_index].region_scope;
+ let scope_index = self.scopes.scope_index(region_scope, span);
+ let drops = if destination.is_some() {
+ &mut self.scopes.breakable_scopes[break_index].break_drops
+ } else {
+ self.scopes.breakable_scopes[break_index].continue_drops.as_mut().unwrap()
+ };
+ let mut drop_idx = ROOT_NODE;
+ for scope in &self.scopes.scopes[scope_index + 1..] {
+ for drop in &scope.drops {
+ drop_idx = drops.add_drop(*drop, drop_idx);
+ }
+ }
+ drops.add_entry(block, drop_idx);
+
+ // `build_drop_trees` doesn't have access to our source_info, so we
+ // create a dummy terminator now. `TerminatorKind::Resume` is used
+ // because MIR type checking will panic if it hasn't been overwritten.
+ self.cfg.terminate(block, source_info, TerminatorKind::Resume);
+
+ self.cfg.start_new_block().unit()
+ }
+
+ pub(crate) fn break_for_else(
+ &mut self,
+ block: BasicBlock,
+ target: region::Scope,
+ source_info: SourceInfo,
+ ) {
+ let scope_index = self.scopes.scope_index(target, source_info.span);
+ let if_then_scope = self
+ .scopes
+ .if_then_scope
+ .as_mut()
+ .unwrap_or_else(|| span_bug!(source_info.span, "no if-then scope found"));
+
+ assert_eq!(if_then_scope.region_scope, target, "breaking to incorrect scope");
+
+ let mut drop_idx = ROOT_NODE;
+ let drops = &mut if_then_scope.else_drops;
+ for scope in &self.scopes.scopes[scope_index + 1..] {
+ for drop in &scope.drops {
+ drop_idx = drops.add_drop(*drop, drop_idx);
+ }
+ }
+ drops.add_entry(block, drop_idx);
+
+ // `build_drop_trees` doesn't have access to our source_info, so we
+ // create a dummy terminator now. `TerminatorKind::Resume` is used
+ // because MIR type checking will panic if it hasn't been overwritten.
+ self.cfg.terminate(block, source_info, TerminatorKind::Resume);
+ }
+
+ // Add a dummy `Assign` statement to the CFG, with the span for the source code's `continue`
+ // statement.
+ fn add_dummy_assignment(&mut self, span: Span, block: BasicBlock, source_info: SourceInfo) {
+ let local_decl = LocalDecl::new(self.tcx.mk_unit(), span).internal();
+ let temp_place = Place::from(self.local_decls.push(local_decl));
+ self.cfg.push_assign_unit(block, source_info, temp_place, self.tcx);
+ }
+
+ fn leave_top_scope(&mut self, block: BasicBlock) -> BasicBlock {
+ // If we are emitting a `drop` statement, we need to have the cached
+ // diverge cleanup pads ready in case that drop panics.
+ let needs_cleanup = self.scopes.scopes.last().map_or(false, |scope| scope.needs_cleanup());
+ let is_generator = self.generator_kind.is_some();
+ let unwind_to = if needs_cleanup { self.diverge_cleanup() } else { DropIdx::MAX };
+
+ let scope = self.scopes.scopes.last().expect("leave_top_scope called with no scopes");
+ unpack!(build_scope_drops(
+ &mut self.cfg,
+ &mut self.scopes.unwind_drops,
+ scope,
+ block,
+ unwind_to,
+ is_generator && needs_cleanup,
+ self.arg_count,
+ ))
+ }
+
+ /// Creates a new source scope, nested in the current one.
+ pub(crate) fn new_source_scope(
+ &mut self,
+ span: Span,
+ lint_level: LintLevel,
+ safety: Option<Safety>,
+ ) -> SourceScope {
+ let parent = self.source_scope;
+ debug!(
+ "new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
+ span,
+ lint_level,
+ safety,
+ parent,
+ self.source_scopes.get(parent)
+ );
+ let scope_local_data = SourceScopeLocalData {
+ lint_root: if let LintLevel::Explicit(lint_root) = lint_level {
+ lint_root
+ } else {
+ self.source_scopes[parent].local_data.as_ref().assert_crate_local().lint_root
+ },
+ safety: safety.unwrap_or_else(|| {
+ self.source_scopes[parent].local_data.as_ref().assert_crate_local().safety
+ }),
+ };
+ self.source_scopes.push(SourceScopeData {
+ span,
+ parent_scope: Some(parent),
+ inlined: None,
+ inlined_parent_scope: None,
+ local_data: ClearCrossCrate::Set(scope_local_data),
+ })
+ }
+
+ /// Given a span and the current source scope, make a SourceInfo.
+ pub(crate) fn source_info(&self, span: Span) -> SourceInfo {
+ SourceInfo { span, scope: self.source_scope }
+ }
+
+ // Finding scopes
+ // ==============
+ /// Returns the scope that we should use as the lifetime of an
+ /// operand. Basically, an operand must live until it is consumed.
+ /// This is similar to, but not quite the same as, the temporary
+ /// scope (which can be larger or smaller).
+ ///
+ /// Consider:
+ /// ```ignore (illustrative)
+ /// let x = foo(bar(X, Y));
+ /// ```
+ /// We wish to pop the storage for X and Y after `bar()` is
+ /// called, not after the whole `let` is completed.
+ ///
+ /// As another example, if the second argument diverges:
+ /// ```ignore (illustrative)
+ /// foo(Box::new(2), panic!())
+ /// ```
+ /// We would allocate the box but then free it on the unwinding
+ /// path; we would also emit a free on the 'success' path from
+ /// panic, but that will turn out to be removed as dead-code.
+ pub(crate) fn local_scope(&self) -> region::Scope {
+ self.scopes.topmost()
+ }
+
+ // Scheduling drops
+ // ================
+ pub(crate) fn schedule_drop_storage_and_value(
+ &mut self,
+ span: Span,
+ region_scope: region::Scope,
+ local: Local,
+ ) {
+ self.schedule_drop(span, region_scope, local, DropKind::Storage);
+ self.schedule_drop(span, region_scope, local, DropKind::Value);
+ }
+
+ /// Indicates that `place` should be dropped on exit from `region_scope`.
+ ///
+ /// When called with `DropKind::Storage`, `place` shouldn't be the return
+ /// place, or a function parameter.
+ pub(crate) fn schedule_drop(
+ &mut self,
+ span: Span,
+ region_scope: region::Scope,
+ local: Local,
+ drop_kind: DropKind,
+ ) {
+ let needs_drop = match drop_kind {
+ DropKind::Value => {
+ if !self.local_decls[local].ty.needs_drop(self.tcx, self.param_env) {
+ return;
+ }
+ true
+ }
+ DropKind::Storage => {
+ if local.index() <= self.arg_count {
+ span_bug!(
+ span,
+ "`schedule_drop` called with local {:?} and arg_count {}",
+ local,
+ self.arg_count,
+ )
+ }
+ false
+ }
+ };
+
+ // When building drops, we try to cache chains of drops to reduce the
+ // number of `DropTree::add_drop` calls. This, however, means that
+ // whenever we add a drop into a scope which already had some entries
+ // in the drop tree built (and thus, cached) for it, we must invalidate
+ // all caches which might branch into the scope which had a drop just
+ // added to it. This is necessary, because otherwise some other code
+ // might use the cache to branch into already built chain of drops,
+ // essentially ignoring the newly added drop.
+ //
+ // For example consider there’s two scopes with a drop in each. These
+ // are built and thus the caches are filled:
+ //
+ // +--------------------------------------------------------+
+ // | +---------------------------------+ |
+ // | | +--------+ +-------------+ | +---------------+ |
+ // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | |
+ // | | +--------+ +-------------+ | +---------------+ |
+ // | +------------|outer_scope cache|--+ |
+ // +------------------------------|middle_scope cache|------+
+ //
+ // Now, a new, inner-most scope is added along with a new drop into
+ // both inner-most and outer-most scopes:
+ //
+ // +------------------------------------------------------------+
+ // | +----------------------------------+ |
+ // | | +--------+ +-------------+ | +---------------+ | +-------------+
+ // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) |
+ // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+
+ // | | +-+ +-------------+ | |
+ // | +---|invalid outer_scope cache|----+ |
+ // +----=----------------|invalid middle_scope cache|-----------+
+ //
+ // If, when adding `drop(new)` we do not invalidate the cached blocks for both
+ // outer_scope and middle_scope, then, when building drops for the inner (right-most)
+ // scope, the old, cached blocks, without `drop(new)` will get used, producing the
+ // wrong results.
+ //
+ // Note that this code iterates scopes from the inner-most to the outer-most,
+ // invalidating caches of each scope visited. This way bare minimum of the
+ // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
+ // cache of outer scope stays intact.
+ //
+ // Since we only cache drops for the unwind path and the generator drop
+ // path, we only need to invalidate the cache for drops that happen on
+ // the unwind or generator drop paths. This means that for
+ // non-generators we don't need to invalidate caches for `DropKind::Storage`.
+ let invalidate_caches = needs_drop || self.generator_kind.is_some();
+ for scope in self.scopes.scopes.iter_mut().rev() {
+ if invalidate_caches {
+ scope.invalidate_cache();
+ }
+
+ if scope.region_scope == region_scope {
+ let region_scope_span = region_scope.span(self.tcx, &self.region_scope_tree);
+ // Attribute scope exit drops to scope's closing brace.
+ let scope_end = self.tcx.sess.source_map().end_point(region_scope_span);
+
+ scope.drops.push(DropData {
+ source_info: SourceInfo { span: scope_end, scope: scope.source_scope },
+ local,
+ kind: drop_kind,
+ });
+
+ return;
+ }
+ }
+
+ span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local);
+ }
+
+ /// Indicates that the "local operand" stored in `local` is
+ /// *moved* at some point during execution (see `local_scope` for
+ /// more information about what a "local operand" is -- in short,
+ /// it's an intermediate operand created as part of preparing some
+ /// MIR instruction). We use this information to suppress
+ /// redundant drops on the non-unwind paths. This results in less
+ /// MIR, but also avoids spurious borrow check errors
+ /// (c.f. #64391).
+ ///
+ /// Example: when compiling the call to `foo` here:
+ ///
+ /// ```ignore (illustrative)
+ /// foo(bar(), ...)
+ /// ```
+ ///
+ /// we would evaluate `bar()` to an operand `_X`. We would also
+ /// schedule `_X` to be dropped when the expression scope for
+ /// `foo(bar())` is exited. This is relevant, for example, if the
+ /// later arguments should unwind (it would ensure that `_X` gets
+ /// dropped). However, if no unwind occurs, then `_X` will be
+ /// unconditionally consumed by the `call`:
+ ///
+ /// ```ignore (illustrative)
+ /// bb {
+ /// ...
+ /// _R = CALL(foo, _X, ...)
+ /// }
+ /// ```
+ ///
+ /// However, `_X` is still registered to be dropped, and so if we
+ /// do nothing else, we would generate a `DROP(_X)` that occurs
+ /// after the call. This will later be optimized out by the
+ /// drop-elaboration code, but in the meantime it can lead to
+ /// spurious borrow-check errors -- the problem, ironically, is
+ /// not the `DROP(_X)` itself, but the (spurious) unwind pathways
+ /// that it creates. See #64391 for an example.
+ pub(crate) fn record_operands_moved(&mut self, operands: &[Operand<'tcx>]) {
+ let local_scope = self.local_scope();
+ let scope = self.scopes.scopes.last_mut().unwrap();
+
+ assert_eq!(scope.region_scope, local_scope, "local scope is not the topmost scope!",);
+
+ // look for moves of a local variable, like `MOVE(_X)`
+ let locals_moved = operands.iter().flat_map(|operand| match operand {
+ Operand::Copy(_) | Operand::Constant(_) => None,
+ Operand::Move(place) => place.as_local(),
+ });
+
+ for local in locals_moved {
+ // check if we have a Drop for this operand and -- if so
+ // -- add it to the list of moved operands. Note that this
+ // local might not have been an operand created for this
+ // call, it could come from other places too.
+ if scope.drops.iter().any(|drop| drop.local == local && drop.kind == DropKind::Value) {
+ scope.moved_locals.push(local);
+ }
+ }
+ }
+
+ // Other
+ // =====
+ /// Returns the [DropIdx] for the innermost drop if the function unwound at
+ /// this point. The `DropIdx` will be created if it doesn't already exist.
+ fn diverge_cleanup(&mut self) -> DropIdx {
+ let is_generator = self.generator_kind.is_some();
+ let (uncached_scope, mut cached_drop) = self
+ .scopes
+ .scopes
+ .iter()
+ .enumerate()
+ .rev()
+ .find_map(|(scope_idx, scope)| {
+ scope.cached_unwind_block.map(|cached_block| (scope_idx + 1, cached_block))
+ })
+ .unwrap_or((0, ROOT_NODE));
+
+ for scope in &mut self.scopes.scopes[uncached_scope..] {
+ for drop in &scope.drops {
+ if is_generator || drop.kind == DropKind::Value {
+ cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop);
+ }
+ }
+ scope.cached_unwind_block = Some(cached_drop);
+ }
+
+ cached_drop
+ }
+
+ /// Prepares to create a path that performs all required cleanup for a
+ /// terminator that can unwind at the given basic block.
+ ///
+ /// This path terminates in Resume. The path isn't created until after all
+ /// of the non-unwind paths in this item have been lowered.
+ pub(crate) fn diverge_from(&mut self, start: BasicBlock) {
+ debug_assert!(
+ matches!(
+ self.cfg.block_data(start).terminator().kind,
+ TerminatorKind::Assert { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. }
+ ),
+ "diverge_from called on block with terminator that cannot unwind."
+ );
+
+ let next_drop = self.diverge_cleanup();
+ self.scopes.unwind_drops.add_entry(start, next_drop);
+ }
+
+ /// Sets up a path that performs all required cleanup for dropping a
+ /// generator, starting from the given block that ends in
+ /// [TerminatorKind::Yield].
+ ///
+ /// This path terminates in GeneratorDrop.
+ pub(crate) fn generator_drop_cleanup(&mut self, yield_block: BasicBlock) {
+ debug_assert!(
+ matches!(
+ self.cfg.block_data(yield_block).terminator().kind,
+ TerminatorKind::Yield { .. }
+ ),
+ "generator_drop_cleanup called on block with non-yield terminator."
+ );
+ let (uncached_scope, mut cached_drop) = self
+ .scopes
+ .scopes
+ .iter()
+ .enumerate()
+ .rev()
+ .find_map(|(scope_idx, scope)| {
+ scope.cached_generator_drop_block.map(|cached_block| (scope_idx + 1, cached_block))
+ })
+ .unwrap_or((0, ROOT_NODE));
+
+ for scope in &mut self.scopes.scopes[uncached_scope..] {
+ for drop in &scope.drops {
+ cached_drop = self.scopes.generator_drops.add_drop(*drop, cached_drop);
+ }
+ scope.cached_generator_drop_block = Some(cached_drop);
+ }
+
+ self.scopes.generator_drops.add_entry(yield_block, cached_drop);
+ }
+
+ /// Utility function for *non*-scope code to build their own drops
+ pub(crate) fn build_drop_and_replace(
+ &mut self,
+ block: BasicBlock,
+ span: Span,
+ place: Place<'tcx>,
+ value: Operand<'tcx>,
+ ) -> BlockAnd<()> {
+ let source_info = self.source_info(span);
+ let next_target = self.cfg.start_new_block();
+
+ self.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::DropAndReplace { place, value, target: next_target, unwind: None },
+ );
+ self.diverge_from(block);
+
+ next_target.unit()
+ }
+
+ /// Creates an `Assert` terminator and return the success block.
+ /// If the boolean condition operand is not the expected value,
+ /// a runtime panic will be caused with the given message.
+ pub(crate) fn assert(
+ &mut self,
+ block: BasicBlock,
+ cond: Operand<'tcx>,
+ expected: bool,
+ msg: AssertMessage<'tcx>,
+ span: Span,
+ ) -> BasicBlock {
+ let source_info = self.source_info(span);
+ let success_block = self.cfg.start_new_block();
+
+ self.cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Assert { cond, expected, msg, target: success_block, cleanup: None },
+ );
+ self.diverge_from(block);
+
+ success_block
+ }
+
+ /// Unschedules any drops in the top scope.
+ ///
+ /// This is only needed for `match` arm scopes, because they have one
+ /// entrance per pattern, but only one exit.
+ pub(crate) fn clear_top_scope(&mut self, region_scope: region::Scope) {
+ let top_scope = self.scopes.scopes.last_mut().unwrap();
+
+ assert_eq!(top_scope.region_scope, region_scope);
+
+ top_scope.drops.clear();
+ top_scope.invalidate_cache();
+ }
+}
+
+/// Builds drops for `pop_scope` and `leave_top_scope`.
+fn build_scope_drops<'tcx>(
+ cfg: &mut CFG<'tcx>,
+ unwind_drops: &mut DropTree,
+ scope: &Scope,
+ mut block: BasicBlock,
+ mut unwind_to: DropIdx,
+ storage_dead_on_unwind: bool,
+ arg_count: usize,
+) -> BlockAnd<()> {
+ debug!("build_scope_drops({:?} -> {:?})", block, scope);
+
+ // Build up the drops in evaluation order. The end result will
+ // look like:
+ //
+ // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
+ // | | |
+ // : | |
+ // V V
+ // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
+ //
+ // The horizontal arrows represent the execution path when the drops return
+ // successfully. The downwards arrows represent the execution path when the
+ // drops panic (panicking while unwinding will abort, so there's no need for
+ // another set of arrows).
+ //
+ // For generators, we unwind from a drop on a local to its StorageDead
+ // statement. For other functions we don't worry about StorageDead. The
+ // drops for the unwind path should have already been generated by
+ // `diverge_cleanup_gen`.
+
+ for drop_data in scope.drops.iter().rev() {
+ let source_info = drop_data.source_info;
+ let local = drop_data.local;
+
+ match drop_data.kind {
+ DropKind::Value => {
+ // `unwind_to` should drop the value that we're about to
+ // schedule. If dropping this value panics, then we continue
+ // with the *next* value on the unwind path.
+ debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
+ debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
+ unwind_to = unwind_drops.drops[unwind_to].1;
+
+ // If the operand has been moved, and we are not on an unwind
+ // path, then don't generate the drop. (We only take this into
+ // account for non-unwind paths so as not to disturb the
+ // caching mechanism.)
+ if scope.moved_locals.iter().any(|&o| o == local) {
+ continue;
+ }
+
+ unwind_drops.add_entry(block, unwind_to);
+
+ let next = cfg.start_new_block();
+ cfg.terminate(
+ block,
+ source_info,
+ TerminatorKind::Drop { place: local.into(), target: next, unwind: None },
+ );
+ block = next;
+ }
+ DropKind::Storage => {
+ if storage_dead_on_unwind {
+ debug_assert_eq!(unwind_drops.drops[unwind_to].0.local, drop_data.local);
+ debug_assert_eq!(unwind_drops.drops[unwind_to].0.kind, drop_data.kind);
+ unwind_to = unwind_drops.drops[unwind_to].1;
+ }
+ // Only temps and vars need their storage dead.
+ assert!(local.index() > arg_count);
+ cfg.push(block, Statement { source_info, kind: StatementKind::StorageDead(local) });
+ }
+ }
+ }
+ block.unit()
+}
+
+impl<'a, 'tcx: 'a> Builder<'a, 'tcx> {
+ /// Build a drop tree for a breakable scope.
+ ///
+ /// If `continue_block` is `Some`, then the tree is for `continue` inside a
+ /// loop. Otherwise this is for `break` or `return`.
+ fn build_exit_tree(
+ &mut self,
+ mut drops: DropTree,
+ continue_block: Option<BasicBlock>,
+ ) -> Option<BlockAnd<()>> {
+ let mut blocks = IndexVec::from_elem(None, &drops.drops);
+ blocks[ROOT_NODE] = continue_block;
+
+ drops.build_mir::<ExitScopes>(&mut self.cfg, &mut blocks);
+
+ // Link the exit drop tree to unwind drop tree.
+ if drops.drops.iter().any(|(drop, _)| drop.kind == DropKind::Value) {
+ let unwind_target = self.diverge_cleanup();
+ let mut unwind_indices = IndexVec::from_elem_n(unwind_target, 1);
+ for (drop_idx, drop_data) in drops.drops.iter_enumerated().skip(1) {
+ match drop_data.0.kind {
+ DropKind::Storage => {
+ if self.generator_kind.is_some() {
+ let unwind_drop = self
+ .scopes
+ .unwind_drops
+ .add_drop(drop_data.0, unwind_indices[drop_data.1]);
+ unwind_indices.push(unwind_drop);
+ } else {
+ unwind_indices.push(unwind_indices[drop_data.1]);
+ }
+ }
+ DropKind::Value => {
+ let unwind_drop = self
+ .scopes
+ .unwind_drops
+ .add_drop(drop_data.0, unwind_indices[drop_data.1]);
+ self.scopes
+ .unwind_drops
+ .add_entry(blocks[drop_idx].unwrap(), unwind_indices[drop_data.1]);
+ unwind_indices.push(unwind_drop);
+ }
+ }
+ }
+ }
+ blocks[ROOT_NODE].map(BasicBlock::unit)
+ }
+
+ /// Build the unwind and generator drop trees.
+ pub(crate) fn build_drop_trees(&mut self) {
+ if self.generator_kind.is_some() {
+ self.build_generator_drop_trees();
+ } else {
+ Self::build_unwind_tree(
+ &mut self.cfg,
+ &mut self.scopes.unwind_drops,
+ self.fn_span,
+ &mut None,
+ );
+ }
+ }
+
+ fn build_generator_drop_trees(&mut self) {
+ // Build the drop tree for dropping the generator while it's suspended.
+ let drops = &mut self.scopes.generator_drops;
+ let cfg = &mut self.cfg;
+ let fn_span = self.fn_span;
+ let mut blocks = IndexVec::from_elem(None, &drops.drops);
+ drops.build_mir::<GeneratorDrop>(cfg, &mut blocks);
+ if let Some(root_block) = blocks[ROOT_NODE] {
+ cfg.terminate(
+ root_block,
+ SourceInfo::outermost(fn_span),
+ TerminatorKind::GeneratorDrop,
+ );
+ }
+
+ // Build the drop tree for unwinding in the normal control flow paths.
+ let resume_block = &mut None;
+ let unwind_drops = &mut self.scopes.unwind_drops;
+ Self::build_unwind_tree(cfg, unwind_drops, fn_span, resume_block);
+
+ // Build the drop tree for unwinding when dropping a suspended
+ // generator.
+ //
+ // This is a different tree to the standard unwind paths here to
+ // prevent drop elaboration from creating drop flags that would have
+ // to be captured by the generator. I'm not sure how important this
+ // optimization is, but it is here.
+ for (drop_idx, drop_data) in drops.drops.iter_enumerated() {
+ if let DropKind::Value = drop_data.0.kind {
+ debug_assert!(drop_data.1 < drops.drops.next_index());
+ drops.entry_points.push((drop_data.1, blocks[drop_idx].unwrap()));
+ }
+ }
+ Self::build_unwind_tree(cfg, drops, fn_span, resume_block);
+ }
+
+ fn build_unwind_tree(
+ cfg: &mut CFG<'tcx>,
+ drops: &mut DropTree,
+ fn_span: Span,
+ resume_block: &mut Option<BasicBlock>,
+ ) {
+ let mut blocks = IndexVec::from_elem(None, &drops.drops);
+ blocks[ROOT_NODE] = *resume_block;
+ drops.build_mir::<Unwind>(cfg, &mut blocks);
+ if let (None, Some(resume)) = (*resume_block, blocks[ROOT_NODE]) {
+ cfg.terminate(resume, SourceInfo::outermost(fn_span), TerminatorKind::Resume);
+
+ *resume_block = blocks[ROOT_NODE];
+ }
+ }
+}
+
+// DropTreeBuilder implementations.
+
+struct ExitScopes;
+
+impl<'tcx> DropTreeBuilder<'tcx> for ExitScopes {
+ fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+ cfg.start_new_block()
+ }
+ fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+ cfg.block_data_mut(from).terminator_mut().kind = TerminatorKind::Goto { target: to };
+ }
+}
+
+struct GeneratorDrop;
+
+impl<'tcx> DropTreeBuilder<'tcx> for GeneratorDrop {
+ fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+ cfg.start_new_block()
+ }
+ fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+ let term = cfg.block_data_mut(from).terminator_mut();
+ if let TerminatorKind::Yield { ref mut drop, .. } = term.kind {
+ *drop = Some(to);
+ } else {
+ span_bug!(
+ term.source_info.span,
+ "cannot enter generator drop tree from {:?}",
+ term.kind
+ )
+ }
+ }
+}
+
+struct Unwind;
+
+impl<'tcx> DropTreeBuilder<'tcx> for Unwind {
+ fn make_block(cfg: &mut CFG<'tcx>) -> BasicBlock {
+ cfg.start_new_cleanup_block()
+ }
+ fn add_entry(cfg: &mut CFG<'tcx>, from: BasicBlock, to: BasicBlock) {
+ let term = &mut cfg.block_data_mut(from).terminator_mut();
+ match &mut term.kind {
+ TerminatorKind::Drop { unwind, .. }
+ | TerminatorKind::DropAndReplace { unwind, .. }
+ | TerminatorKind::FalseUnwind { unwind, .. }
+ | TerminatorKind::Call { cleanup: unwind, .. }
+ | TerminatorKind::Assert { cleanup: unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: unwind, .. } => {
+ *unwind = Some(to);
+ }
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. } => {
+ span_bug!(term.source_info.span, "cannot unwind from {:?}", term.kind)
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/check_unsafety.rs b/compiler/rustc_mir_build/src/check_unsafety.rs
new file mode 100644
index 000000000..864caf0ba
--- /dev/null
+++ b/compiler/rustc_mir_build/src/check_unsafety.rs
@@ -0,0 +1,680 @@
+use crate::build::ExprCategory;
+use rustc_middle::thir::visit::{self, Visitor};
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_middle::mir::BorrowKind;
+use rustc_middle::thir::*;
+use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt};
+use rustc_session::lint::builtin::{UNSAFE_OP_IN_UNSAFE_FN, UNUSED_UNSAFE};
+use rustc_session::lint::Level;
+use rustc_span::def_id::{DefId, LocalDefId};
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+
+use std::borrow::Cow;
+use std::ops::Bound;
+
+struct UnsafetyVisitor<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ thir: &'a Thir<'tcx>,
+ /// The `HirId` of the current scope, which would be the `HirId`
+ /// of the current HIR node, modulo adjustments. Used for lint levels.
+ hir_context: hir::HirId,
+ /// The current "safety context". This notably tracks whether we are in an
+ /// `unsafe` block, and whether it has been used.
+ safety_context: SafetyContext,
+ body_unsafety: BodyUnsafety,
+ /// The `#[target_feature]` attributes of the body. Used for checking
+ /// calls to functions with `#[target_feature]` (RFC 2396).
+ body_target_features: &'tcx [Symbol],
+ /// When inside the LHS of an assignment to a field, this is the type
+ /// of the LHS and the span of the assignment expression.
+ assignment_info: Option<(Ty<'tcx>, Span)>,
+ in_union_destructure: bool,
+ param_env: ParamEnv<'tcx>,
+ inside_adt: bool,
+}
+
+impl<'tcx> UnsafetyVisitor<'_, 'tcx> {
+ fn in_safety_context(&mut self, safety_context: SafetyContext, f: impl FnOnce(&mut Self)) {
+ if let (
+ SafetyContext::UnsafeBlock { span: enclosing_span, .. },
+ SafetyContext::UnsafeBlock { span: block_span, hir_id, .. },
+ ) = (self.safety_context, safety_context)
+ {
+ self.warn_unused_unsafe(
+ hir_id,
+ block_span,
+ Some((self.tcx.sess.source_map().guess_head_span(enclosing_span), "block")),
+ );
+ f(self);
+ } else {
+ let prev_context = self.safety_context;
+ self.safety_context = safety_context;
+
+ f(self);
+
+ if let SafetyContext::UnsafeBlock { used: false, span, hir_id } = self.safety_context {
+ self.warn_unused_unsafe(
+ hir_id,
+ span,
+ if self.unsafe_op_in_unsafe_fn_allowed() {
+ self.body_unsafety.unsafe_fn_sig_span().map(|span| (span, "fn"))
+ } else {
+ None
+ },
+ );
+ }
+ self.safety_context = prev_context;
+ }
+ }
+
+ fn requires_unsafe(&mut self, span: Span, kind: UnsafeOpKind) {
+ let unsafe_op_in_unsafe_fn_allowed = self.unsafe_op_in_unsafe_fn_allowed();
+ match self.safety_context {
+ SafetyContext::BuiltinUnsafeBlock => {}
+ SafetyContext::UnsafeBlock { ref mut used, .. } => {
+ if !self.body_unsafety.is_unsafe() || !unsafe_op_in_unsafe_fn_allowed {
+ // Mark this block as useful
+ *used = true;
+ }
+ }
+ SafetyContext::UnsafeFn if unsafe_op_in_unsafe_fn_allowed => {}
+ SafetyContext::UnsafeFn => {
+ let (description, note) = kind.description_and_note(self.tcx);
+ // unsafe_op_in_unsafe_fn is disallowed
+ self.tcx.struct_span_lint_hir(
+ UNSAFE_OP_IN_UNSAFE_FN,
+ self.hir_context,
+ span,
+ |lint| {
+ lint.build(&format!(
+ "{} is unsafe and requires unsafe block (error E0133)",
+ description,
+ ))
+ .span_label(span, kind.simple_description())
+ .note(note)
+ .emit();
+ },
+ )
+ }
+ SafetyContext::Safe => {
+ let (description, note) = kind.description_and_note(self.tcx);
+ let fn_sugg = if unsafe_op_in_unsafe_fn_allowed { " function or" } else { "" };
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0133,
+ "{} is unsafe and requires unsafe{} block",
+ description,
+ fn_sugg,
+ )
+ .span_label(span, kind.simple_description())
+ .note(note)
+ .emit();
+ }
+ }
+ }
+
+ fn warn_unused_unsafe(
+ &self,
+ hir_id: hir::HirId,
+ block_span: Span,
+ enclosing_unsafe: Option<(Span, &'static str)>,
+ ) {
+ let block_span = self.tcx.sess.source_map().guess_head_span(block_span);
+ self.tcx.struct_span_lint_hir(UNUSED_UNSAFE, hir_id, block_span, |lint| {
+ let msg = "unnecessary `unsafe` block";
+ let mut db = lint.build(msg);
+ db.span_label(block_span, msg);
+ if let Some((span, kind)) = enclosing_unsafe {
+ db.span_label(span, format!("because it's nested under this `unsafe` {}", kind));
+ }
+ db.emit();
+ });
+ }
+
+ /// Whether the `unsafe_op_in_unsafe_fn` lint is `allow`ed at the current HIR node.
+ fn unsafe_op_in_unsafe_fn_allowed(&self) -> bool {
+ self.tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, self.hir_context).0 == Level::Allow
+ }
+}
+
+// Searches for accesses to layout constrained fields.
+struct LayoutConstrainedPlaceVisitor<'a, 'tcx> {
+ found: bool,
+ thir: &'a Thir<'tcx>,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'a, 'tcx> LayoutConstrainedPlaceVisitor<'a, 'tcx> {
+ fn new(thir: &'a Thir<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
+ Self { found: false, thir, tcx }
+ }
+}
+
+impl<'a, 'tcx> Visitor<'a, 'tcx> for LayoutConstrainedPlaceVisitor<'a, 'tcx> {
+ fn thir(&self) -> &'a Thir<'tcx> {
+ self.thir
+ }
+
+ fn visit_expr(&mut self, expr: &Expr<'tcx>) {
+ match expr.kind {
+ ExprKind::Field { lhs, .. } => {
+ if let ty::Adt(adt_def, _) = self.thir[lhs].ty.kind() {
+ if (Bound::Unbounded, Bound::Unbounded)
+ != self.tcx.layout_scalar_valid_range(adt_def.did())
+ {
+ self.found = true;
+ }
+ }
+ visit::walk_expr(self, expr);
+ }
+
+ // Keep walking through the expression as long as we stay in the same
+ // place, i.e. the expression is a place expression and not a dereference
+ // (since dereferencing something leads us to a different place).
+ ExprKind::Deref { .. } => {}
+ ref kind if ExprCategory::of(kind).map_or(true, |cat| cat == ExprCategory::Place) => {
+ visit::walk_expr(self, expr);
+ }
+
+ _ => {}
+ }
+ }
+}
+
+impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> {
+ fn thir(&self) -> &'a Thir<'tcx> {
+ &self.thir
+ }
+
+ fn visit_block(&mut self, block: &Block) {
+ match block.safety_mode {
+ // compiler-generated unsafe code should not count towards the usefulness of
+ // an outer unsafe block
+ BlockSafety::BuiltinUnsafe => {
+ self.in_safety_context(SafetyContext::BuiltinUnsafeBlock, |this| {
+ visit::walk_block(this, block)
+ });
+ }
+ BlockSafety::ExplicitUnsafe(hir_id) => {
+ self.in_safety_context(
+ SafetyContext::UnsafeBlock { span: block.span, hir_id, used: false },
+ |this| visit::walk_block(this, block),
+ );
+ }
+ BlockSafety::Safe => {
+ visit::walk_block(self, block);
+ }
+ }
+ }
+
+ fn visit_pat(&mut self, pat: &Pat<'tcx>) {
+ if self.in_union_destructure {
+ match *pat.kind {
+ // binding to a variable allows getting stuff out of variable
+ PatKind::Binding { .. }
+ // match is conditional on having this value
+ | PatKind::Constant { .. }
+ | PatKind::Variant { .. }
+ | PatKind::Leaf { .. }
+ | PatKind::Deref { .. }
+ | PatKind::Range { .. }
+ | PatKind::Slice { .. }
+ | PatKind::Array { .. } => {
+ self.requires_unsafe(pat.span, AccessToUnionField);
+ return; // we can return here since this already requires unsafe
+ }
+ // wildcard doesn't take anything
+ PatKind::Wild |
+ // these just wrap other patterns
+ PatKind::Or { .. } |
+ PatKind::AscribeUserType { .. } => {}
+ }
+ };
+
+ match &*pat.kind {
+ PatKind::Leaf { .. } => {
+ if let ty::Adt(adt_def, ..) = pat.ty.kind() {
+ if adt_def.is_union() {
+ let old_in_union_destructure =
+ std::mem::replace(&mut self.in_union_destructure, true);
+ visit::walk_pat(self, pat);
+ self.in_union_destructure = old_in_union_destructure;
+ } else if (Bound::Unbounded, Bound::Unbounded)
+ != self.tcx.layout_scalar_valid_range(adt_def.did())
+ {
+ let old_inside_adt = std::mem::replace(&mut self.inside_adt, true);
+ visit::walk_pat(self, pat);
+ self.inside_adt = old_inside_adt;
+ } else {
+ visit::walk_pat(self, pat);
+ }
+ } else {
+ visit::walk_pat(self, pat);
+ }
+ }
+ PatKind::Binding { mode: BindingMode::ByRef(borrow_kind), ty, .. } => {
+ if self.inside_adt {
+ let ty::Ref(_, ty, _) = ty.kind() else {
+ span_bug!(
+ pat.span,
+ "BindingMode::ByRef in pattern, but found non-reference type {}",
+ ty
+ );
+ };
+ match borrow_kind {
+ BorrowKind::Shallow | BorrowKind::Shared | BorrowKind::Unique => {
+ if !ty.is_freeze(self.tcx.at(pat.span), self.param_env) {
+ self.requires_unsafe(pat.span, BorrowOfLayoutConstrainedField);
+ }
+ }
+ BorrowKind::Mut { .. } => {
+ self.requires_unsafe(pat.span, MutationOfLayoutConstrainedField);
+ }
+ }
+ }
+ visit::walk_pat(self, pat);
+ }
+ PatKind::Deref { .. } => {
+ let old_inside_adt = std::mem::replace(&mut self.inside_adt, false);
+ visit::walk_pat(self, pat);
+ self.inside_adt = old_inside_adt;
+ }
+ _ => {
+ visit::walk_pat(self, pat);
+ }
+ }
+ }
+
+ fn visit_expr(&mut self, expr: &Expr<'tcx>) {
+ // could we be in the LHS of an assignment to a field?
+ match expr.kind {
+ ExprKind::Field { .. }
+ | ExprKind::VarRef { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::Scope { .. }
+ | ExprKind::Cast { .. } => {}
+
+ ExprKind::AddressOf { .. }
+ | ExprKind::Adt { .. }
+ | ExprKind::Array { .. }
+ | ExprKind::Binary { .. }
+ | ExprKind::Block { .. }
+ | ExprKind::Borrow { .. }
+ | ExprKind::Literal { .. }
+ | ExprKind::NamedConst { .. }
+ | ExprKind::NonHirLiteral { .. }
+ | ExprKind::ZstLiteral { .. }
+ | ExprKind::ConstParam { .. }
+ | ExprKind::ConstBlock { .. }
+ | ExprKind::Deref { .. }
+ | ExprKind::Index { .. }
+ | ExprKind::NeverToAny { .. }
+ | ExprKind::PlaceTypeAscription { .. }
+ | ExprKind::ValueTypeAscription { .. }
+ | ExprKind::Pointer { .. }
+ | ExprKind::Repeat { .. }
+ | ExprKind::StaticRef { .. }
+ | ExprKind::ThreadLocalRef { .. }
+ | ExprKind::Tuple { .. }
+ | ExprKind::Unary { .. }
+ | ExprKind::Call { .. }
+ | ExprKind::Assign { .. }
+ | ExprKind::AssignOp { .. }
+ | ExprKind::Break { .. }
+ | ExprKind::Closure { .. }
+ | ExprKind::Continue { .. }
+ | ExprKind::Return { .. }
+ | ExprKind::Yield { .. }
+ | ExprKind::Loop { .. }
+ | ExprKind::Let { .. }
+ | ExprKind::Match { .. }
+ | ExprKind::Box { .. }
+ | ExprKind::If { .. }
+ | ExprKind::InlineAsm { .. }
+ | ExprKind::LogicalOp { .. }
+ | ExprKind::Use { .. } => {
+ // We don't need to save the old value and restore it
+ // because all the place expressions can't have more
+ // than one child.
+ self.assignment_info = None;
+ }
+ };
+ match expr.kind {
+ ExprKind::Scope { value, lint_level: LintLevel::Explicit(hir_id), region_scope: _ } => {
+ let prev_id = self.hir_context;
+ self.hir_context = hir_id;
+ self.visit_expr(&self.thir[value]);
+ self.hir_context = prev_id;
+ return; // don't visit the whole expression
+ }
+ ExprKind::Call { fun, ty: _, args: _, from_hir_call: _, fn_span: _ } => {
+ if self.thir[fun].ty.fn_sig(self.tcx).unsafety() == hir::Unsafety::Unsafe {
+ let func_id = if let ty::FnDef(func_id, _) = self.thir[fun].ty.kind() {
+ Some(*func_id)
+ } else {
+ None
+ };
+ self.requires_unsafe(expr.span, CallToUnsafeFunction(func_id));
+ } else if let &ty::FnDef(func_did, _) = self.thir[fun].ty.kind() {
+ // If the called function has target features the calling function hasn't,
+ // the call requires `unsafe`. Don't check this on wasm
+ // targets, though. For more information on wasm see the
+ // is_like_wasm check in typeck/src/collect.rs
+ if !self.tcx.sess.target.options.is_like_wasm
+ && !self
+ .tcx
+ .codegen_fn_attrs(func_did)
+ .target_features
+ .iter()
+ .all(|feature| self.body_target_features.contains(feature))
+ {
+ self.requires_unsafe(expr.span, CallToFunctionWith(func_did));
+ }
+ }
+ }
+ ExprKind::Deref { arg } => {
+ if let ExprKind::StaticRef { def_id, .. } = self.thir[arg].kind {
+ if self.tcx.is_mutable_static(def_id) {
+ self.requires_unsafe(expr.span, UseOfMutableStatic);
+ } else if self.tcx.is_foreign_item(def_id) {
+ self.requires_unsafe(expr.span, UseOfExternStatic);
+ }
+ } else if self.thir[arg].ty.is_unsafe_ptr() {
+ self.requires_unsafe(expr.span, DerefOfRawPointer);
+ }
+ }
+ ExprKind::InlineAsm { .. } => {
+ self.requires_unsafe(expr.span, UseOfInlineAssembly);
+ }
+ ExprKind::Adt(box Adt {
+ adt_def,
+ variant_index: _,
+ substs: _,
+ user_ty: _,
+ fields: _,
+ base: _,
+ }) => match self.tcx.layout_scalar_valid_range(adt_def.did()) {
+ (Bound::Unbounded, Bound::Unbounded) => {}
+ _ => self.requires_unsafe(expr.span, InitializingTypeWith),
+ },
+ ExprKind::Closure {
+ closure_id,
+ substs: _,
+ upvars: _,
+ movability: _,
+ fake_reads: _,
+ } => {
+ let closure_def = if let Some((did, const_param_id)) =
+ ty::WithOptConstParam::try_lookup(closure_id, self.tcx)
+ {
+ ty::WithOptConstParam { did, const_param_did: Some(const_param_id) }
+ } else {
+ ty::WithOptConstParam::unknown(closure_id)
+ };
+ let (closure_thir, expr) = self.tcx.thir_body(closure_def).unwrap_or_else(|_| {
+ (self.tcx.alloc_steal_thir(Thir::new()), ExprId::from_u32(0))
+ });
+ let closure_thir = &closure_thir.borrow();
+ let hir_context = self.tcx.hir().local_def_id_to_hir_id(closure_id);
+ let mut closure_visitor =
+ UnsafetyVisitor { thir: closure_thir, hir_context, ..*self };
+ closure_visitor.visit_expr(&closure_thir[expr]);
+ // Unsafe blocks can be used in closures, make sure to take it into account
+ self.safety_context = closure_visitor.safety_context;
+ }
+ ExprKind::Field { lhs, .. } => {
+ let lhs = &self.thir[lhs];
+ if let ty::Adt(adt_def, _) = lhs.ty.kind() && adt_def.is_union() {
+ if let Some((assigned_ty, assignment_span)) = self.assignment_info {
+ if assigned_ty.needs_drop(self.tcx, self.param_env) {
+ // This would be unsafe, but should be outright impossible since we reject such unions.
+ self.tcx.sess.delay_span_bug(assignment_span, format!("union fields that need dropping should be impossible: {assigned_ty}"));
+ }
+ } else {
+ self.requires_unsafe(expr.span, AccessToUnionField);
+ }
+ }
+ }
+ ExprKind::Assign { lhs, rhs } | ExprKind::AssignOp { lhs, rhs, .. } => {
+ let lhs = &self.thir[lhs];
+ // First, check whether we are mutating a layout constrained field
+ let mut visitor = LayoutConstrainedPlaceVisitor::new(self.thir, self.tcx);
+ visit::walk_expr(&mut visitor, lhs);
+ if visitor.found {
+ self.requires_unsafe(expr.span, MutationOfLayoutConstrainedField);
+ }
+
+ // Second, check for accesses to union fields
+ // don't have any special handling for AssignOp since it causes a read *and* write to lhs
+ if matches!(expr.kind, ExprKind::Assign { .. }) {
+ self.assignment_info = Some((lhs.ty, expr.span));
+ visit::walk_expr(self, lhs);
+ self.assignment_info = None;
+ visit::walk_expr(self, &self.thir()[rhs]);
+ return; // we have already visited everything by now
+ }
+ }
+ ExprKind::Borrow { borrow_kind, arg } => {
+ let mut visitor = LayoutConstrainedPlaceVisitor::new(self.thir, self.tcx);
+ visit::walk_expr(&mut visitor, expr);
+ if visitor.found {
+ match borrow_kind {
+ BorrowKind::Shallow | BorrowKind::Shared | BorrowKind::Unique
+ if !self.thir[arg]
+ .ty
+ .is_freeze(self.tcx.at(self.thir[arg].span), self.param_env) =>
+ {
+ self.requires_unsafe(expr.span, BorrowOfLayoutConstrainedField)
+ }
+ BorrowKind::Mut { .. } => {
+ self.requires_unsafe(expr.span, MutationOfLayoutConstrainedField)
+ }
+ BorrowKind::Shallow | BorrowKind::Shared | BorrowKind::Unique => {}
+ }
+ }
+ }
+ ExprKind::Let { expr: expr_id, .. } => {
+ let let_expr = &self.thir[expr_id];
+ if let ty::Adt(adt_def, _) = let_expr.ty.kind() && adt_def.is_union() {
+ self.requires_unsafe(expr.span, AccessToUnionField);
+ }
+ }
+ _ => {}
+ }
+ visit::walk_expr(self, expr);
+ }
+}
+
+#[derive(Clone, Copy)]
+enum SafetyContext {
+ Safe,
+ BuiltinUnsafeBlock,
+ UnsafeFn,
+ UnsafeBlock { span: Span, hir_id: hir::HirId, used: bool },
+}
+
+#[derive(Clone, Copy)]
+enum BodyUnsafety {
+ /// The body is not unsafe.
+ Safe,
+ /// The body is an unsafe function. The span points to
+ /// the signature of the function.
+ Unsafe(Span),
+}
+
+impl BodyUnsafety {
+ /// Returns whether the body is unsafe.
+ fn is_unsafe(&self) -> bool {
+ matches!(self, BodyUnsafety::Unsafe(_))
+ }
+
+ /// If the body is unsafe, returns the `Span` of its signature.
+ fn unsafe_fn_sig_span(self) -> Option<Span> {
+ match self {
+ BodyUnsafety::Unsafe(span) => Some(span),
+ BodyUnsafety::Safe => None,
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq)]
+enum UnsafeOpKind {
+ CallToUnsafeFunction(Option<DefId>),
+ UseOfInlineAssembly,
+ InitializingTypeWith,
+ UseOfMutableStatic,
+ UseOfExternStatic,
+ DerefOfRawPointer,
+ AccessToUnionField,
+ MutationOfLayoutConstrainedField,
+ BorrowOfLayoutConstrainedField,
+ CallToFunctionWith(DefId),
+}
+
+use UnsafeOpKind::*;
+
+impl UnsafeOpKind {
+ pub fn simple_description(&self) -> &'static str {
+ match self {
+ CallToUnsafeFunction(..) => "call to unsafe function",
+ UseOfInlineAssembly => "use of inline assembly",
+ InitializingTypeWith => "initializing type with `rustc_layout_scalar_valid_range` attr",
+ UseOfMutableStatic => "use of mutable static",
+ UseOfExternStatic => "use of extern static",
+ DerefOfRawPointer => "dereference of raw pointer",
+ AccessToUnionField => "access to union field",
+ MutationOfLayoutConstrainedField => "mutation of layout constrained field",
+ BorrowOfLayoutConstrainedField => {
+ "borrow of layout constrained field with interior mutability"
+ }
+ CallToFunctionWith(..) => "call to function with `#[target_feature]`",
+ }
+ }
+
+ pub fn description_and_note(&self, tcx: TyCtxt<'_>) -> (Cow<'static, str>, &'static str) {
+ match self {
+ CallToUnsafeFunction(did) => (
+ if let Some(did) = did {
+ Cow::from(format!("call to unsafe function `{}`", tcx.def_path_str(*did)))
+ } else {
+ Cow::Borrowed(self.simple_description())
+ },
+ "consult the function's documentation for information on how to avoid undefined \
+ behavior",
+ ),
+ UseOfInlineAssembly => (
+ Cow::Borrowed(self.simple_description()),
+ "inline assembly is entirely unchecked and can cause undefined behavior",
+ ),
+ InitializingTypeWith => (
+ Cow::Borrowed(self.simple_description()),
+ "initializing a layout restricted type's field with a value outside the valid \
+ range is undefined behavior",
+ ),
+ UseOfMutableStatic => (
+ Cow::Borrowed(self.simple_description()),
+ "mutable statics can be mutated by multiple threads: aliasing violations or data \
+ races will cause undefined behavior",
+ ),
+ UseOfExternStatic => (
+ Cow::Borrowed(self.simple_description()),
+ "extern statics are not controlled by the Rust type system: invalid data, \
+ aliasing violations or data races will cause undefined behavior",
+ ),
+ DerefOfRawPointer => (
+ Cow::Borrowed(self.simple_description()),
+ "raw pointers may be null, dangling or unaligned; they can violate aliasing rules \
+ and cause data races: all of these are undefined behavior",
+ ),
+ AccessToUnionField => (
+ Cow::Borrowed(self.simple_description()),
+ "the field may not be properly initialized: using uninitialized data will cause \
+ undefined behavior",
+ ),
+ MutationOfLayoutConstrainedField => (
+ Cow::Borrowed(self.simple_description()),
+ "mutating layout constrained fields cannot statically be checked for valid values",
+ ),
+ BorrowOfLayoutConstrainedField => (
+ Cow::Borrowed(self.simple_description()),
+ "references to fields of layout constrained fields lose the constraints. Coupled \
+ with interior mutability, the field can be changed to invalid values",
+ ),
+ CallToFunctionWith(did) => (
+ Cow::from(format!(
+ "call to function `{}` with `#[target_feature]`",
+ tcx.def_path_str(*did)
+ )),
+ "can only be called if the required target features are available",
+ ),
+ }
+ }
+}
+
+pub fn check_unsafety<'tcx>(tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam<LocalDefId>) {
+ // THIR unsafeck is gated under `-Z thir-unsafeck`
+ if !tcx.sess.opts.unstable_opts.thir_unsafeck {
+ return;
+ }
+
+ // Closures are handled by their owner, if it has a body
+ if tcx.is_closure(def.did.to_def_id()) {
+ let hir = tcx.hir();
+ let owner = hir.enclosing_body_owner(hir.local_def_id_to_hir_id(def.did));
+ tcx.ensure().thir_check_unsafety(owner);
+ return;
+ }
+
+ let Ok((thir, expr)) = tcx.thir_body(def) else {
+ return
+ };
+ let thir = &thir.borrow();
+ // If `thir` is empty, a type error occurred, skip this body.
+ if thir.exprs.is_empty() {
+ return;
+ }
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
+ let body_unsafety = tcx.hir().fn_sig_by_hir_id(hir_id).map_or(BodyUnsafety::Safe, |fn_sig| {
+ if fn_sig.header.unsafety == hir::Unsafety::Unsafe {
+ BodyUnsafety::Unsafe(fn_sig.span)
+ } else {
+ BodyUnsafety::Safe
+ }
+ });
+ let body_target_features = &tcx.body_codegen_attrs(def.did.to_def_id()).target_features;
+ let safety_context =
+ if body_unsafety.is_unsafe() { SafetyContext::UnsafeFn } else { SafetyContext::Safe };
+ let mut visitor = UnsafetyVisitor {
+ tcx,
+ thir,
+ safety_context,
+ hir_context: hir_id,
+ body_unsafety,
+ body_target_features,
+ assignment_info: None,
+ in_union_destructure: false,
+ param_env: tcx.param_env(def.did),
+ inside_adt: false,
+ };
+ visitor.visit_expr(&thir[expr]);
+}
+
+pub(crate) fn thir_check_unsafety<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) {
+ if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
+ tcx.thir_check_unsafety_for_const_arg(def)
+ } else {
+ check_unsafety(tcx, ty::WithOptConstParam::unknown(def_id))
+ }
+}
+
+pub(crate) fn thir_check_unsafety_for_const_arg<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (did, param_did): (LocalDefId, DefId),
+) {
+ check_unsafety(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
+}
diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs
new file mode 100644
index 000000000..11cd2a9aa
--- /dev/null
+++ b/compiler/rustc_mir_build/src/lib.rs
@@ -0,0 +1,35 @@
+//! Construction of MIR from HIR.
+//!
+//! This crate also contains the match exhaustiveness and usefulness checking.
+#![allow(rustc::potential_query_instability)]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(if_let_guard)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(once_cell)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+mod build;
+mod check_unsafety;
+mod lints;
+pub mod thir;
+
+use rustc_middle::ty::query::Providers;
+
+pub fn provide(providers: &mut Providers) {
+ providers.check_match = thir::pattern::check_match;
+ providers.lit_to_const = thir::constant::lit_to_const;
+ providers.lit_to_mir_constant = build::lit_to_mir_constant;
+ providers.mir_built = build::mir_built;
+ providers.thir_check_unsafety = check_unsafety::thir_check_unsafety;
+ providers.thir_check_unsafety_for_const_arg = check_unsafety::thir_check_unsafety_for_const_arg;
+ providers.thir_body = thir::cx::thir_body;
+ providers.thir_tree = thir::cx::thir_tree;
+}
diff --git a/compiler/rustc_mir_build/src/lints.rs b/compiler/rustc_mir_build/src/lints.rs
new file mode 100644
index 000000000..54d549fd6
--- /dev/null
+++ b/compiler/rustc_mir_build/src/lints.rs
@@ -0,0 +1,166 @@
+use rustc_data_structures::graph::iterate::{
+ NodeStatus, TriColorDepthFirstSearch, TriColorVisitor,
+};
+use rustc_hir::def::DefKind;
+use rustc_middle::mir::{BasicBlock, BasicBlocks, Body, Operand, TerminatorKind};
+use rustc_middle::ty::subst::{GenericArg, InternalSubsts};
+use rustc_middle::ty::{self, Instance, TyCtxt};
+use rustc_session::lint::builtin::UNCONDITIONAL_RECURSION;
+use rustc_span::Span;
+use std::ops::ControlFlow;
+
+pub(crate) fn check<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ let def_id = body.source.def_id().expect_local();
+
+ if let DefKind::Fn | DefKind::AssocFn = tcx.def_kind(def_id) {
+ // If this is trait/impl method, extract the trait's substs.
+ let trait_substs = match tcx.trait_of_item(def_id.to_def_id()) {
+ Some(trait_def_id) => {
+ let trait_substs_count = tcx.generics_of(trait_def_id).count();
+ &InternalSubsts::identity_for_item(tcx, def_id.to_def_id())[..trait_substs_count]
+ }
+ _ => &[],
+ };
+
+ let mut vis = Search { tcx, body, reachable_recursive_calls: vec![], trait_substs };
+ if let Some(NonRecursive) =
+ TriColorDepthFirstSearch::new(&body.basic_blocks).run_from_start(&mut vis)
+ {
+ return;
+ }
+ if vis.reachable_recursive_calls.is_empty() {
+ return;
+ }
+
+ vis.reachable_recursive_calls.sort();
+
+ let sp = tcx.def_span(def_id);
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ tcx.struct_span_lint_hir(UNCONDITIONAL_RECURSION, hir_id, sp, |lint| {
+ let mut db = lint.build("function cannot return without recursing");
+ db.span_label(sp, "cannot return without recursing");
+ // offer some help to the programmer.
+ for call_span in vis.reachable_recursive_calls {
+ db.span_label(call_span, "recursive call site");
+ }
+ db.help("a `loop` may express intention better if this is on purpose");
+ db.emit();
+ });
+ }
+}
+
+struct NonRecursive;
+
+struct Search<'mir, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'mir Body<'tcx>,
+ trait_substs: &'tcx [GenericArg<'tcx>],
+
+ reachable_recursive_calls: Vec<Span>,
+}
+
+impl<'mir, 'tcx> Search<'mir, 'tcx> {
+ /// Returns `true` if `func` refers to the function we are searching in.
+ fn is_recursive_call(&self, func: &Operand<'tcx>, args: &[Operand<'tcx>]) -> bool {
+ let Search { tcx, body, trait_substs, .. } = *self;
+ // Resolving function type to a specific instance that is being called is expensive. To
+ // avoid the cost we check the number of arguments first, which is sufficient to reject
+ // most of calls as non-recursive.
+ if args.len() != body.arg_count {
+ return false;
+ }
+ let caller = body.source.def_id();
+ let param_env = tcx.param_env(caller);
+
+ let func_ty = func.ty(body, tcx);
+ if let ty::FnDef(callee, substs) = *func_ty.kind() {
+ let normalized_substs = tcx.normalize_erasing_regions(param_env, substs);
+ let (callee, call_substs) = if let Ok(Some(instance)) =
+ Instance::resolve(tcx, param_env, callee, normalized_substs)
+ {
+ (instance.def_id(), instance.substs)
+ } else {
+ (callee, normalized_substs)
+ };
+
+ // FIXME(#57965): Make this work across function boundaries
+
+ // If this is a trait fn, the substs on the trait have to match, or we might be
+ // calling into an entirely different method (for example, a call from the default
+ // method in the trait to `<A as Trait<B>>::method`, where `A` and/or `B` are
+ // specific types).
+ return callee == caller && &call_substs[..trait_substs.len()] == trait_substs;
+ }
+
+ false
+ }
+}
+
+impl<'mir, 'tcx> TriColorVisitor<BasicBlocks<'tcx>> for Search<'mir, 'tcx> {
+ type BreakVal = NonRecursive;
+
+ fn node_examined(
+ &mut self,
+ bb: BasicBlock,
+ prior_status: Option<NodeStatus>,
+ ) -> ControlFlow<Self::BreakVal> {
+ // Back-edge in the CFG (loop).
+ if let Some(NodeStatus::Visited) = prior_status {
+ return ControlFlow::Break(NonRecursive);
+ }
+
+ match self.body[bb].terminator().kind {
+ // These terminators return control flow to the caller.
+ TerminatorKind::Abort
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Resume
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Yield { .. } => ControlFlow::Break(NonRecursive),
+
+ // A diverging InlineAsm is treated as non-recursing
+ TerminatorKind::InlineAsm { destination, .. } => {
+ if destination.is_some() {
+ ControlFlow::CONTINUE
+ } else {
+ ControlFlow::Break(NonRecursive)
+ }
+ }
+
+ // These do not.
+ TerminatorKind::Assert { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. } => ControlFlow::CONTINUE,
+ }
+ }
+
+ fn node_settled(&mut self, bb: BasicBlock) -> ControlFlow<Self::BreakVal> {
+ // When we examine a node for the last time, remember it if it is a recursive call.
+ let terminator = self.body[bb].terminator();
+ if let TerminatorKind::Call { func, args, .. } = &terminator.kind {
+ if self.is_recursive_call(func, args) {
+ self.reachable_recursive_calls.push(terminator.source_info.span);
+ }
+ }
+
+ ControlFlow::CONTINUE
+ }
+
+ fn ignore_edge(&mut self, bb: BasicBlock, target: BasicBlock) -> bool {
+ let terminator = self.body[bb].terminator();
+ if terminator.unwind() == Some(&Some(target)) && terminator.successors().count() > 1 {
+ return true;
+ }
+ // Don't traverse successors of recursive calls or false CFG edges.
+ match self.body[bb].terminator().kind {
+ TerminatorKind::Call { ref func, ref args, .. } => self.is_recursive_call(func, args),
+ TerminatorKind::FalseEdge { imaginary_target, .. } => imaginary_target == target,
+ _ => false,
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs
new file mode 100644
index 000000000..a7e4403a2
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/constant.rs
@@ -0,0 +1,52 @@
+use rustc_ast as ast;
+use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput};
+use rustc_middle::ty::{self, ParamEnv, ScalarInt, TyCtxt};
+
+pub(crate) fn lit_to_const<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ lit_input: LitToConstInput<'tcx>,
+) -> Result<ty::Const<'tcx>, LitToConstError> {
+ let LitToConstInput { lit, ty, neg } = lit_input;
+
+ let trunc = |n| {
+ let param_ty = ParamEnv::reveal_all().and(ty);
+ let width = tcx.layout_of(param_ty).map_err(|_| LitToConstError::Reported)?.size;
+ trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits());
+ let result = width.truncate(n);
+ trace!("trunc result: {}", result);
+
+ Ok(ScalarInt::try_from_uint(result, width)
+ .unwrap_or_else(|| bug!("expected to create ScalarInt from uint {:?}", result)))
+ };
+
+ let valtree = match (lit, &ty.kind()) {
+ (ast::LitKind::Str(s, _), ty::Ref(_, inner_ty, _)) if inner_ty.is_str() => {
+ let str_bytes = s.as_str().as_bytes();
+ ty::ValTree::from_raw_bytes(tcx, str_bytes)
+ }
+ (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _))
+ if matches!(inner_ty.kind(), ty::Slice(_)) =>
+ {
+ let bytes = data as &[u8];
+ ty::ValTree::from_raw_bytes(tcx, bytes)
+ }
+ (ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _)) if inner_ty.is_array() => {
+ let bytes = data as &[u8];
+ ty::ValTree::from_raw_bytes(tcx, bytes)
+ }
+ (ast::LitKind::Byte(n), ty::Uint(ty::UintTy::U8)) => {
+ ty::ValTree::from_scalar_int((*n).into())
+ }
+ (ast::LitKind::Int(n, _), ty::Uint(_)) | (ast::LitKind::Int(n, _), ty::Int(_)) => {
+ let scalar_int =
+ trunc(if neg { (*n as i128).overflowing_neg().0 as u128 } else { *n })?;
+ ty::ValTree::from_scalar_int(scalar_int)
+ }
+ (ast::LitKind::Bool(b), ty::Bool) => ty::ValTree::from_scalar_int((*b).into()),
+ (ast::LitKind::Char(c), ty::Char) => ty::ValTree::from_scalar_int((*c).into()),
+ (ast::LitKind::Err(_), _) => return Err(LitToConstError::Reported),
+ _ => return Err(LitToConstError::TypeError),
+ };
+
+ Ok(ty::Const::from_value(tcx, valtree, ty))
+}
diff --git a/compiler/rustc_mir_build/src/thir/cx/block.rs b/compiler/rustc_mir_build/src/thir/cx/block.rs
new file mode 100644
index 000000000..dccaa61ed
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/cx/block.rs
@@ -0,0 +1,126 @@
+use crate::thir::cx::Cx;
+
+use rustc_hir as hir;
+use rustc_middle::middle::region;
+use rustc_middle::thir::*;
+use rustc_middle::ty;
+
+use rustc_index::vec::Idx;
+use rustc_middle::ty::CanonicalUserTypeAnnotation;
+
+impl<'tcx> Cx<'tcx> {
+ pub(crate) fn mirror_block(&mut self, block: &'tcx hir::Block<'tcx>) -> Block {
+ // We have to eagerly lower the "spine" of the statements
+ // in order to get the lexical scoping correctly.
+ let stmts = self.mirror_stmts(block.hir_id.local_id, block.stmts);
+ let opt_destruction_scope =
+ self.region_scope_tree.opt_destruction_scope(block.hir_id.local_id);
+ Block {
+ targeted_by_break: block.targeted_by_break,
+ region_scope: region::Scope {
+ id: block.hir_id.local_id,
+ data: region::ScopeData::Node,
+ },
+ opt_destruction_scope,
+ span: block.span,
+ stmts,
+ expr: block.expr.map(|expr| self.mirror_expr(expr)),
+ safety_mode: match block.rules {
+ hir::BlockCheckMode::DefaultBlock => BlockSafety::Safe,
+ hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::CompilerGenerated) => {
+ BlockSafety::BuiltinUnsafe
+ }
+ hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::UserProvided) => {
+ BlockSafety::ExplicitUnsafe(block.hir_id)
+ }
+ },
+ }
+ }
+
+ fn mirror_stmts(
+ &mut self,
+ block_id: hir::ItemLocalId,
+ stmts: &'tcx [hir::Stmt<'tcx>],
+ ) -> Box<[StmtId]> {
+ stmts
+ .iter()
+ .enumerate()
+ .filter_map(|(index, stmt)| {
+ let hir_id = stmt.hir_id;
+ let opt_dxn_ext = self.region_scope_tree.opt_destruction_scope(hir_id.local_id);
+ match stmt.kind {
+ hir::StmtKind::Expr(ref expr) | hir::StmtKind::Semi(ref expr) => {
+ let stmt = Stmt {
+ kind: StmtKind::Expr {
+ scope: region::Scope {
+ id: hir_id.local_id,
+ data: region::ScopeData::Node,
+ },
+ expr: self.mirror_expr(expr),
+ },
+ opt_destruction_scope: opt_dxn_ext,
+ };
+ Some(self.thir.stmts.push(stmt))
+ }
+ hir::StmtKind::Item(..) => {
+ // ignore for purposes of the MIR
+ None
+ }
+ hir::StmtKind::Local(ref local) => {
+ let remainder_scope = region::Scope {
+ id: block_id,
+ data: region::ScopeData::Remainder(region::FirstStatementIndex::new(
+ index,
+ )),
+ };
+
+ let else_block = local.els.map(|els| self.mirror_block(els));
+
+ let mut pattern = self.pattern_from_hir(local.pat);
+ debug!(?pattern);
+
+ if let Some(ty) = &local.ty {
+ if let Some(&user_ty) =
+ self.typeck_results.user_provided_types().get(ty.hir_id)
+ {
+ debug!("mirror_stmts: user_ty={:?}", user_ty);
+ let annotation = CanonicalUserTypeAnnotation {
+ user_ty,
+ span: ty.span,
+ inferred_ty: self.typeck_results.node_type(ty.hir_id),
+ };
+ pattern = Pat {
+ ty: pattern.ty,
+ span: pattern.span,
+ kind: Box::new(PatKind::AscribeUserType {
+ ascription: Ascription {
+ annotation,
+ variance: ty::Variance::Covariant,
+ },
+ subpattern: pattern,
+ }),
+ };
+ }
+ }
+
+ let stmt = Stmt {
+ kind: StmtKind::Let {
+ remainder_scope,
+ init_scope: region::Scope {
+ id: hir_id.local_id,
+ data: region::ScopeData::Node,
+ },
+ pattern,
+ initializer: local.init.map(|init| self.mirror_expr(init)),
+ else_block,
+ lint_level: LintLevel::Explicit(local.hir_id),
+ },
+ opt_destruction_scope: opt_dxn_ext,
+ };
+ Some(self.thir.stmts.push(stmt))
+ }
+ }
+ })
+ .collect()
+ }
+}
diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs
new file mode 100644
index 000000000..985601712
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs
@@ -0,0 +1,1117 @@
+use crate::thir::cx::region::Scope;
+use crate::thir::cx::Cx;
+use crate::thir::util::UserAnnotatedTyHelpers;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, CtorOf, DefKind, Res};
+use rustc_index::vec::Idx;
+use rustc_middle::hir::place::Place as HirPlace;
+use rustc_middle::hir::place::PlaceBase as HirPlaceBase;
+use rustc_middle::hir::place::ProjectionKind as HirProjectionKind;
+use rustc_middle::middle::region;
+use rustc_middle::mir::{self, BinOp, BorrowKind, Field, UnOp};
+use rustc_middle::thir::*;
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AutoBorrow, AutoBorrowMutability, PointerCast,
+};
+use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{
+ self, AdtKind, InlineConstSubsts, InlineConstSubstsParts, ScalarInt, Ty, UpvarSubsts, UserType,
+};
+use rustc_span::def_id::DefId;
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+
+impl<'tcx> Cx<'tcx> {
+ pub(crate) fn mirror_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) -> ExprId {
+ // `mirror_expr` is recursing very deep. Make sure the stack doesn't overflow.
+ ensure_sufficient_stack(|| self.mirror_expr_inner(expr))
+ }
+
+ pub(crate) fn mirror_exprs(&mut self, exprs: &'tcx [hir::Expr<'tcx>]) -> Box<[ExprId]> {
+ exprs.iter().map(|expr| self.mirror_expr_inner(expr)).collect()
+ }
+
+ #[instrument(level = "trace", skip(self, hir_expr))]
+ pub(super) fn mirror_expr_inner(&mut self, hir_expr: &'tcx hir::Expr<'tcx>) -> ExprId {
+ let temp_lifetime =
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, hir_expr.hir_id.local_id);
+ let expr_scope =
+ region::Scope { id: hir_expr.hir_id.local_id, data: region::ScopeData::Node };
+
+ trace!(?hir_expr.hir_id, ?hir_expr.span);
+
+ let mut expr = self.make_mirror_unadjusted(hir_expr);
+
+ let adjustment_span = match self.adjustment_span {
+ Some((hir_id, span)) if hir_id == hir_expr.hir_id => Some(span),
+ _ => None,
+ };
+
+ // Now apply adjustments, if any.
+ for adjustment in self.typeck_results.expr_adjustments(hir_expr) {
+ trace!(?expr, ?adjustment);
+ let span = expr.span;
+ expr =
+ self.apply_adjustment(hir_expr, expr, adjustment, adjustment_span.unwrap_or(span));
+ }
+
+ // Next, wrap this up in the expr's scope.
+ expr = Expr {
+ temp_lifetime,
+ ty: expr.ty,
+ span: hir_expr.span,
+ kind: ExprKind::Scope {
+ region_scope: expr_scope,
+ value: self.thir.exprs.push(expr),
+ lint_level: LintLevel::Explicit(hir_expr.hir_id),
+ },
+ };
+
+ // Finally, create a destruction scope, if any.
+ if let Some(region_scope) =
+ self.region_scope_tree.opt_destruction_scope(hir_expr.hir_id.local_id)
+ {
+ expr = Expr {
+ temp_lifetime,
+ ty: expr.ty,
+ span: hir_expr.span,
+ kind: ExprKind::Scope {
+ region_scope,
+ value: self.thir.exprs.push(expr),
+ lint_level: LintLevel::Inherited,
+ },
+ };
+ }
+
+ // OK, all done!
+ self.thir.exprs.push(expr)
+ }
+
+ fn apply_adjustment(
+ &mut self,
+ hir_expr: &'tcx hir::Expr<'tcx>,
+ mut expr: Expr<'tcx>,
+ adjustment: &Adjustment<'tcx>,
+ mut span: Span,
+ ) -> Expr<'tcx> {
+ let Expr { temp_lifetime, .. } = expr;
+
+ // Adjust the span from the block, to the last expression of the
+ // block. This is a better span when returning a mutable reference
+ // with too short a lifetime. The error message will use the span
+ // from the assignment to the return place, which should only point
+ // at the returned value, not the entire function body.
+ //
+ // fn return_short_lived<'a>(x: &'a mut i32) -> &'static mut i32 {
+ // x
+ // // ^ error message points at this expression.
+ // }
+ let mut adjust_span = |expr: &mut Expr<'tcx>| {
+ if let ExprKind::Block { body } = &expr.kind {
+ if let Some(last_expr) = body.expr {
+ span = self.thir[last_expr].span;
+ expr.span = span;
+ }
+ }
+ };
+
+ let kind = match adjustment.kind {
+ Adjust::Pointer(PointerCast::Unsize) => {
+ adjust_span(&mut expr);
+ ExprKind::Pointer { cast: PointerCast::Unsize, source: self.thir.exprs.push(expr) }
+ }
+ Adjust::Pointer(cast) => ExprKind::Pointer { cast, source: self.thir.exprs.push(expr) },
+ Adjust::NeverToAny => ExprKind::NeverToAny { source: self.thir.exprs.push(expr) },
+ Adjust::Deref(None) => {
+ adjust_span(&mut expr);
+ ExprKind::Deref { arg: self.thir.exprs.push(expr) }
+ }
+ Adjust::Deref(Some(deref)) => {
+ // We don't need to do call adjust_span here since
+ // deref coercions always start with a built-in deref.
+ let call = deref.method_call(self.tcx(), expr.ty);
+
+ expr = Expr {
+ temp_lifetime,
+ ty: self
+ .tcx
+ .mk_ref(deref.region, ty::TypeAndMut { ty: expr.ty, mutbl: deref.mutbl }),
+ span,
+ kind: ExprKind::Borrow {
+ borrow_kind: deref.mutbl.to_borrow_kind(),
+ arg: self.thir.exprs.push(expr),
+ },
+ };
+
+ let expr = Box::new([self.thir.exprs.push(expr)]);
+
+ self.overloaded_place(hir_expr, adjustment.target, Some(call), expr, deref.span)
+ }
+ Adjust::Borrow(AutoBorrow::Ref(_, m)) => ExprKind::Borrow {
+ borrow_kind: m.to_borrow_kind(),
+ arg: self.thir.exprs.push(expr),
+ },
+ Adjust::Borrow(AutoBorrow::RawPtr(mutability)) => {
+ ExprKind::AddressOf { mutability, arg: self.thir.exprs.push(expr) }
+ }
+ };
+
+ Expr { temp_lifetime, ty: adjustment.target, span, kind }
+ }
+
+ /// Lowers a cast expression.
+ ///
+ /// Dealing with user type annotations is left to the caller.
+ fn mirror_expr_cast(
+ &mut self,
+ source: &'tcx hir::Expr<'tcx>,
+ temp_lifetime: Option<Scope>,
+ span: Span,
+ ) -> ExprKind<'tcx> {
+ let tcx = self.tcx;
+
+ // Check to see if this cast is a "coercion cast", where the cast is actually done
+ // using a coercion (or is a no-op).
+ if self.typeck_results().is_coercion_cast(source.hir_id) {
+ // Convert the lexpr to a vexpr.
+ ExprKind::Use { source: self.mirror_expr(source) }
+ } else if self.typeck_results().expr_ty(source).is_region_ptr() {
+ // Special cased so that we can type check that the element
+ // type of the source matches the pointed to type of the
+ // destination.
+ ExprKind::Pointer {
+ source: self.mirror_expr(source),
+ cast: PointerCast::ArrayToPointer,
+ }
+ } else {
+ // check whether this is casting an enum variant discriminant
+ // to prevent cycles, we refer to the discriminant initializer
+ // which is always an integer and thus doesn't need to know the
+ // enum's layout (or its tag type) to compute it during const eval
+ // Example:
+ // enum Foo {
+ // A,
+ // B = A as isize + 4,
+ // }
+ // The correct solution would be to add symbolic computations to miri,
+ // so we wouldn't have to compute and store the actual value
+
+ let hir::ExprKind::Path(ref qpath) = source.kind else {
+ return ExprKind::Cast { source: self.mirror_expr(source)};
+ };
+
+ let res = self.typeck_results().qpath_res(qpath, source.hir_id);
+ let ty = self.typeck_results().node_type(source.hir_id);
+ let ty::Adt(adt_def, substs) = ty.kind() else {
+ return ExprKind::Cast { source: self.mirror_expr(source)};
+ };
+
+ let Res::Def(DefKind::Ctor(CtorOf::Variant, CtorKind::Const), variant_ctor_id) = res else {
+ return ExprKind::Cast { source: self.mirror_expr(source)};
+ };
+
+ let idx = adt_def.variant_index_with_ctor_id(variant_ctor_id);
+ let (discr_did, discr_offset) = adt_def.discriminant_def_for_variant(idx);
+
+ use rustc_middle::ty::util::IntTypeExt;
+ let ty = adt_def.repr().discr_type();
+ let discr_ty = ty.to_ty(tcx);
+
+ let param_env_ty = self.param_env.and(discr_ty);
+ let size = tcx
+ .layout_of(param_env_ty)
+ .unwrap_or_else(|e| {
+ panic!("could not compute layout for {:?}: {:?}", param_env_ty, e)
+ })
+ .size;
+
+ let lit = ScalarInt::try_from_uint(discr_offset as u128, size).unwrap();
+ let kind = ExprKind::NonHirLiteral { lit, user_ty: None };
+ let offset = self.thir.exprs.push(Expr { temp_lifetime, ty: discr_ty, span, kind });
+
+ let source = match discr_did {
+ // in case we are offsetting from a computed discriminant
+ // and not the beginning of discriminants (which is always `0`)
+ Some(did) => {
+ let kind = ExprKind::NamedConst { def_id: did, substs, user_ty: None };
+ let lhs =
+ self.thir.exprs.push(Expr { temp_lifetime, ty: discr_ty, span, kind });
+ let bin = ExprKind::Binary { op: BinOp::Add, lhs, rhs: offset };
+ self.thir.exprs.push(Expr {
+ temp_lifetime,
+ ty: discr_ty,
+ span: span,
+ kind: bin,
+ })
+ }
+ None => offset,
+ };
+
+ ExprKind::Cast { source }
+ }
+ }
+
+ fn make_mirror_unadjusted(&mut self, expr: &'tcx hir::Expr<'tcx>) -> Expr<'tcx> {
+ let tcx = self.tcx;
+ let expr_ty = self.typeck_results().expr_ty(expr);
+ let expr_span = expr.span;
+ let temp_lifetime =
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id);
+
+ let kind = match expr.kind {
+ // Here comes the interesting stuff:
+ hir::ExprKind::MethodCall(segment, ref args, fn_span) => {
+ // Rewrite a.b(c) into UFCS form like Trait::b(a, c)
+ let expr = self.method_callee(expr, segment.ident.span, None);
+ // When we apply adjustments to the receiver, use the span of
+ // the overall method call for better diagnostics. args[0]
+ // is guaranteed to exist, since a method call always has a receiver.
+ let old_adjustment_span = self.adjustment_span.replace((args[0].hir_id, expr_span));
+ tracing::info!("Using method span: {:?}", expr.span);
+ let args = self.mirror_exprs(args);
+ self.adjustment_span = old_adjustment_span;
+ ExprKind::Call {
+ ty: expr.ty,
+ fun: self.thir.exprs.push(expr),
+ args,
+ from_hir_call: true,
+ fn_span,
+ }
+ }
+
+ hir::ExprKind::Call(ref fun, ref args) => {
+ if self.typeck_results().is_method_call(expr) {
+ // The callee is something implementing Fn, FnMut, or FnOnce.
+ // Find the actual method implementation being called and
+ // build the appropriate UFCS call expression with the
+ // callee-object as expr parameter.
+
+ // rewrite f(u, v) into FnOnce::call_once(f, (u, v))
+
+ let method = self.method_callee(expr, fun.span, None);
+
+ let arg_tys = args.iter().map(|e| self.typeck_results().expr_ty_adjusted(e));
+ let tupled_args = Expr {
+ ty: tcx.mk_tup(arg_tys),
+ temp_lifetime,
+ span: expr.span,
+ kind: ExprKind::Tuple { fields: self.mirror_exprs(args) },
+ };
+ let tupled_args = self.thir.exprs.push(tupled_args);
+
+ ExprKind::Call {
+ ty: method.ty,
+ fun: self.thir.exprs.push(method),
+ args: Box::new([self.mirror_expr(fun), tupled_args]),
+ from_hir_call: true,
+ fn_span: expr.span,
+ }
+ } else {
+ let adt_data =
+ if let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = fun.kind {
+ // Tuple-like ADTs are represented as ExprKind::Call. We convert them here.
+ expr_ty.ty_adt_def().and_then(|adt_def| match path.res {
+ Res::Def(DefKind::Ctor(_, CtorKind::Fn), ctor_id) => {
+ Some((adt_def, adt_def.variant_index_with_ctor_id(ctor_id)))
+ }
+ Res::SelfCtor(..) => Some((adt_def, VariantIdx::new(0))),
+ _ => None,
+ })
+ } else {
+ None
+ };
+ if let Some((adt_def, index)) = adt_data {
+ let substs = self.typeck_results().node_substs(fun.hir_id);
+ let user_provided_types = self.typeck_results().user_provided_types();
+ let user_ty =
+ user_provided_types.get(fun.hir_id).copied().map(|mut u_ty| {
+ if let UserType::TypeOf(ref mut did, _) = &mut u_ty.value {
+ *did = adt_def.did();
+ }
+ u_ty
+ });
+ debug!("make_mirror_unadjusted: (call) user_ty={:?}", user_ty);
+
+ let field_refs = args
+ .iter()
+ .enumerate()
+ .map(|(idx, e)| FieldExpr {
+ name: Field::new(idx),
+ expr: self.mirror_expr(e),
+ })
+ .collect();
+ ExprKind::Adt(Box::new(Adt {
+ adt_def,
+ substs,
+ variant_index: index,
+ fields: field_refs,
+ user_ty,
+ base: None,
+ }))
+ } else {
+ ExprKind::Call {
+ ty: self.typeck_results().node_type(fun.hir_id),
+ fun: self.mirror_expr(fun),
+ args: self.mirror_exprs(args),
+ from_hir_call: true,
+ fn_span: expr.span,
+ }
+ }
+ }
+ }
+
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, mutbl, ref arg) => {
+ ExprKind::Borrow { borrow_kind: mutbl.to_borrow_kind(), arg: self.mirror_expr(arg) }
+ }
+
+ hir::ExprKind::AddrOf(hir::BorrowKind::Raw, mutability, ref arg) => {
+ ExprKind::AddressOf { mutability, arg: self.mirror_expr(arg) }
+ }
+
+ hir::ExprKind::Block(ref blk, _) => ExprKind::Block { body: self.mirror_block(blk) },
+
+ hir::ExprKind::Assign(ref lhs, ref rhs, _) => {
+ ExprKind::Assign { lhs: self.mirror_expr(lhs), rhs: self.mirror_expr(rhs) }
+ }
+
+ hir::ExprKind::AssignOp(op, ref lhs, ref rhs) => {
+ if self.typeck_results().is_method_call(expr) {
+ let lhs = self.mirror_expr(lhs);
+ let rhs = self.mirror_expr(rhs);
+ self.overloaded_operator(expr, Box::new([lhs, rhs]))
+ } else {
+ ExprKind::AssignOp {
+ op: bin_op(op.node),
+ lhs: self.mirror_expr(lhs),
+ rhs: self.mirror_expr(rhs),
+ }
+ }
+ }
+
+ hir::ExprKind::Lit(ref lit) => ExprKind::Literal { lit, neg: false },
+
+ hir::ExprKind::Binary(op, ref lhs, ref rhs) => {
+ if self.typeck_results().is_method_call(expr) {
+ let lhs = self.mirror_expr(lhs);
+ let rhs = self.mirror_expr(rhs);
+ self.overloaded_operator(expr, Box::new([lhs, rhs]))
+ } else {
+ // FIXME overflow
+ match op.node {
+ hir::BinOpKind::And => ExprKind::LogicalOp {
+ op: LogicalOp::And,
+ lhs: self.mirror_expr(lhs),
+ rhs: self.mirror_expr(rhs),
+ },
+ hir::BinOpKind::Or => ExprKind::LogicalOp {
+ op: LogicalOp::Or,
+ lhs: self.mirror_expr(lhs),
+ rhs: self.mirror_expr(rhs),
+ },
+ _ => {
+ let op = bin_op(op.node);
+ ExprKind::Binary {
+ op,
+ lhs: self.mirror_expr(lhs),
+ rhs: self.mirror_expr(rhs),
+ }
+ }
+ }
+ }
+ }
+
+ hir::ExprKind::Index(ref lhs, ref index) => {
+ if self.typeck_results().is_method_call(expr) {
+ let lhs = self.mirror_expr(lhs);
+ let index = self.mirror_expr(index);
+ self.overloaded_place(expr, expr_ty, None, Box::new([lhs, index]), expr.span)
+ } else {
+ ExprKind::Index { lhs: self.mirror_expr(lhs), index: self.mirror_expr(index) }
+ }
+ }
+
+ hir::ExprKind::Unary(hir::UnOp::Deref, ref arg) => {
+ if self.typeck_results().is_method_call(expr) {
+ let arg = self.mirror_expr(arg);
+ self.overloaded_place(expr, expr_ty, None, Box::new([arg]), expr.span)
+ } else {
+ ExprKind::Deref { arg: self.mirror_expr(arg) }
+ }
+ }
+
+ hir::ExprKind::Unary(hir::UnOp::Not, ref arg) => {
+ if self.typeck_results().is_method_call(expr) {
+ let arg = self.mirror_expr(arg);
+ self.overloaded_operator(expr, Box::new([arg]))
+ } else {
+ ExprKind::Unary { op: UnOp::Not, arg: self.mirror_expr(arg) }
+ }
+ }
+
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref arg) => {
+ if self.typeck_results().is_method_call(expr) {
+ let arg = self.mirror_expr(arg);
+ self.overloaded_operator(expr, Box::new([arg]))
+ } else if let hir::ExprKind::Lit(ref lit) = arg.kind {
+ ExprKind::Literal { lit, neg: true }
+ } else {
+ ExprKind::Unary { op: UnOp::Neg, arg: self.mirror_expr(arg) }
+ }
+ }
+
+ hir::ExprKind::Struct(ref qpath, ref fields, ref base) => match expr_ty.kind() {
+ ty::Adt(adt, substs) => match adt.adt_kind() {
+ AdtKind::Struct | AdtKind::Union => {
+ let user_provided_types = self.typeck_results().user_provided_types();
+ let user_ty = user_provided_types.get(expr.hir_id).copied();
+ debug!("make_mirror_unadjusted: (struct/union) user_ty={:?}", user_ty);
+ ExprKind::Adt(Box::new(Adt {
+ adt_def: *adt,
+ variant_index: VariantIdx::new(0),
+ substs,
+ user_ty,
+ fields: self.field_refs(fields),
+ base: base.as_ref().map(|base| FruInfo {
+ base: self.mirror_expr(base),
+ field_types: self.typeck_results().fru_field_types()[expr.hir_id]
+ .iter()
+ .copied()
+ .collect(),
+ }),
+ }))
+ }
+ AdtKind::Enum => {
+ let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
+ match res {
+ Res::Def(DefKind::Variant, variant_id) => {
+ assert!(base.is_none());
+
+ let index = adt.variant_index_with_id(variant_id);
+ let user_provided_types =
+ self.typeck_results().user_provided_types();
+ let user_ty = user_provided_types.get(expr.hir_id).copied();
+ debug!("make_mirror_unadjusted: (variant) user_ty={:?}", user_ty);
+ ExprKind::Adt(Box::new(Adt {
+ adt_def: *adt,
+ variant_index: index,
+ substs,
+ user_ty,
+ fields: self.field_refs(fields),
+ base: None,
+ }))
+ }
+ _ => {
+ span_bug!(expr.span, "unexpected res: {:?}", res);
+ }
+ }
+ }
+ },
+ _ => {
+ span_bug!(expr.span, "unexpected type for struct literal: {:?}", expr_ty);
+ }
+ },
+
+ hir::ExprKind::Closure { .. } => {
+ let closure_ty = self.typeck_results().expr_ty(expr);
+ let (def_id, substs, movability) = match *closure_ty.kind() {
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs), None),
+ ty::Generator(def_id, substs, movability) => {
+ (def_id, UpvarSubsts::Generator(substs), Some(movability))
+ }
+ _ => {
+ span_bug!(expr.span, "closure expr w/o closure type: {:?}", closure_ty);
+ }
+ };
+ let def_id = def_id.expect_local();
+
+ let upvars = self
+ .typeck_results
+ .closure_min_captures_flattened(def_id)
+ .zip(substs.upvar_tys())
+ .map(|(captured_place, ty)| {
+ let upvars = self.capture_upvar(expr, captured_place, ty);
+ self.thir.exprs.push(upvars)
+ })
+ .collect();
+
+ // Convert the closure fake reads, if any, from hir `Place` to ExprRef
+ let fake_reads = match self.typeck_results.closure_fake_reads.get(&def_id) {
+ Some(fake_reads) => fake_reads
+ .iter()
+ .map(|(place, cause, hir_id)| {
+ let expr = self.convert_captured_hir_place(expr, place.clone());
+ (self.thir.exprs.push(expr), *cause, *hir_id)
+ })
+ .collect(),
+ None => Vec::new(),
+ };
+
+ ExprKind::Closure { closure_id: def_id, substs, upvars, movability, fake_reads }
+ }
+
+ hir::ExprKind::Path(ref qpath) => {
+ let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
+ self.convert_path_expr(expr, res)
+ }
+
+ hir::ExprKind::InlineAsm(ref asm) => ExprKind::InlineAsm {
+ template: asm.template,
+ operands: asm
+ .operands
+ .iter()
+ .map(|(op, _op_sp)| match *op {
+ hir::InlineAsmOperand::In { reg, ref expr } => {
+ InlineAsmOperand::In { reg, expr: self.mirror_expr(expr) }
+ }
+ hir::InlineAsmOperand::Out { reg, late, ref expr } => {
+ InlineAsmOperand::Out {
+ reg,
+ late,
+ expr: expr.as_ref().map(|expr| self.mirror_expr(expr)),
+ }
+ }
+ hir::InlineAsmOperand::InOut { reg, late, ref expr } => {
+ InlineAsmOperand::InOut { reg, late, expr: self.mirror_expr(expr) }
+ }
+ hir::InlineAsmOperand::SplitInOut {
+ reg,
+ late,
+ ref in_expr,
+ ref out_expr,
+ } => InlineAsmOperand::SplitInOut {
+ reg,
+ late,
+ in_expr: self.mirror_expr(in_expr),
+ out_expr: out_expr.as_ref().map(|expr| self.mirror_expr(expr)),
+ },
+ hir::InlineAsmOperand::Const { ref anon_const } => {
+ let anon_const_def_id = tcx.hir().local_def_id(anon_const.hir_id);
+ let value = mir::ConstantKind::from_anon_const(
+ tcx,
+ anon_const_def_id,
+ self.param_env,
+ );
+ let span = tcx.hir().span(anon_const.hir_id);
+
+ InlineAsmOperand::Const { value, span }
+ }
+ hir::InlineAsmOperand::SymFn { ref anon_const } => {
+ let anon_const_def_id = tcx.hir().local_def_id(anon_const.hir_id);
+ let value = mir::ConstantKind::from_anon_const(
+ tcx,
+ anon_const_def_id,
+ self.param_env,
+ );
+ let span = tcx.hir().span(anon_const.hir_id);
+
+ InlineAsmOperand::SymFn { value, span }
+ }
+ hir::InlineAsmOperand::SymStatic { path: _, def_id } => {
+ InlineAsmOperand::SymStatic { def_id }
+ }
+ })
+ .collect(),
+ options: asm.options,
+ line_spans: asm.line_spans,
+ },
+
+ hir::ExprKind::ConstBlock(ref anon_const) => {
+ let ty = self.typeck_results().node_type(anon_const.hir_id);
+ let did = tcx.hir().local_def_id(anon_const.hir_id).to_def_id();
+ let typeck_root_def_id = tcx.typeck_root_def_id(did);
+ let parent_substs =
+ tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
+ let substs =
+ InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty })
+ .substs;
+
+ ExprKind::ConstBlock { did, substs }
+ }
+ // Now comes the rote stuff:
+ hir::ExprKind::Repeat(ref v, _) => {
+ let ty = self.typeck_results().expr_ty(expr);
+ let ty::Array(_, count) = ty.kind() else {
+ span_bug!(expr.span, "unexpected repeat expr ty: {:?}", ty);
+ };
+
+ ExprKind::Repeat { value: self.mirror_expr(v), count: *count }
+ }
+ hir::ExprKind::Ret(ref v) => {
+ ExprKind::Return { value: v.as_ref().map(|v| self.mirror_expr(v)) }
+ }
+ hir::ExprKind::Break(dest, ref value) => match dest.target_id {
+ Ok(target_id) => ExprKind::Break {
+ label: region::Scope { id: target_id.local_id, data: region::ScopeData::Node },
+ value: value.as_ref().map(|value| self.mirror_expr(value)),
+ },
+ Err(err) => bug!("invalid loop id for break: {}", err),
+ },
+ hir::ExprKind::Continue(dest) => match dest.target_id {
+ Ok(loop_id) => ExprKind::Continue {
+ label: region::Scope { id: loop_id.local_id, data: region::ScopeData::Node },
+ },
+ Err(err) => bug!("invalid loop id for continue: {}", err),
+ },
+ hir::ExprKind::Let(let_expr) => ExprKind::Let {
+ expr: self.mirror_expr(let_expr.init),
+ pat: self.pattern_from_hir(let_expr.pat),
+ },
+ hir::ExprKind::If(cond, then, else_opt) => ExprKind::If {
+ if_then_scope: region::Scope {
+ id: then.hir_id.local_id,
+ data: region::ScopeData::IfThen,
+ },
+ cond: self.mirror_expr(cond),
+ then: self.mirror_expr(then),
+ else_opt: else_opt.map(|el| self.mirror_expr(el)),
+ },
+ hir::ExprKind::Match(ref discr, ref arms, _) => ExprKind::Match {
+ scrutinee: self.mirror_expr(discr),
+ arms: arms.iter().map(|a| self.convert_arm(a)).collect(),
+ },
+ hir::ExprKind::Loop(ref body, ..) => {
+ let block_ty = self.typeck_results().node_type(body.hir_id);
+ let temp_lifetime = self
+ .rvalue_scopes
+ .temporary_scope(self.region_scope_tree, body.hir_id.local_id);
+ let block = self.mirror_block(body);
+ let body = self.thir.exprs.push(Expr {
+ ty: block_ty,
+ temp_lifetime,
+ span: block.span,
+ kind: ExprKind::Block { body: block },
+ });
+ ExprKind::Loop { body }
+ }
+ hir::ExprKind::Field(ref source, ..) => ExprKind::Field {
+ lhs: self.mirror_expr(source),
+ variant_index: VariantIdx::new(0),
+ name: Field::new(tcx.field_index(expr.hir_id, self.typeck_results)),
+ },
+ hir::ExprKind::Cast(ref source, ref cast_ty) => {
+ // Check for a user-given type annotation on this `cast`
+ let user_provided_types = self.typeck_results.user_provided_types();
+ let user_ty = user_provided_types.get(cast_ty.hir_id);
+
+ debug!(
+ "cast({:?}) has ty w/ hir_id {:?} and user provided ty {:?}",
+ expr, cast_ty.hir_id, user_ty,
+ );
+
+ let cast = self.mirror_expr_cast(*source, temp_lifetime, expr.span);
+
+ if let Some(user_ty) = user_ty {
+ // NOTE: Creating a new Expr and wrapping a Cast inside of it may be
+ // inefficient, revisit this when performance becomes an issue.
+ let cast_expr = self.thir.exprs.push(Expr {
+ temp_lifetime,
+ ty: expr_ty,
+ span: expr.span,
+ kind: cast,
+ });
+ debug!("make_mirror_unadjusted: (cast) user_ty={:?}", user_ty);
+
+ ExprKind::ValueTypeAscription { source: cast_expr, user_ty: Some(*user_ty) }
+ } else {
+ cast
+ }
+ }
+ hir::ExprKind::Type(ref source, ref ty) => {
+ let user_provided_types = self.typeck_results.user_provided_types();
+ let user_ty = user_provided_types.get(ty.hir_id).copied();
+ debug!("make_mirror_unadjusted: (type) user_ty={:?}", user_ty);
+ let mirrored = self.mirror_expr(source);
+ if source.is_syntactic_place_expr() {
+ ExprKind::PlaceTypeAscription { source: mirrored, user_ty }
+ } else {
+ ExprKind::ValueTypeAscription { source: mirrored, user_ty }
+ }
+ }
+ hir::ExprKind::DropTemps(ref source) => {
+ ExprKind::Use { source: self.mirror_expr(source) }
+ }
+ hir::ExprKind::Box(ref value) => ExprKind::Box { value: self.mirror_expr(value) },
+ hir::ExprKind::Array(ref fields) => {
+ ExprKind::Array { fields: self.mirror_exprs(fields) }
+ }
+ hir::ExprKind::Tup(ref fields) => ExprKind::Tuple { fields: self.mirror_exprs(fields) },
+
+ hir::ExprKind::Yield(ref v, _) => ExprKind::Yield { value: self.mirror_expr(v) },
+ hir::ExprKind::Err => unreachable!(),
+ };
+
+ Expr { temp_lifetime, ty: expr_ty, span: expr.span, kind }
+ }
+
+ fn user_substs_applied_to_res(
+ &mut self,
+ hir_id: hir::HirId,
+ res: Res,
+ ) -> Option<ty::CanonicalUserType<'tcx>> {
+ debug!("user_substs_applied_to_res: res={:?}", res);
+ let user_provided_type = match res {
+ // A reference to something callable -- e.g., a fn, method, or
+ // a tuple-struct or tuple-variant. This has the type of a
+ // `Fn` but with the user-given substitutions.
+ Res::Def(DefKind::Fn, _)
+ | Res::Def(DefKind::AssocFn, _)
+ | Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
+ | Res::Def(DefKind::Const, _)
+ | Res::Def(DefKind::AssocConst, _) => {
+ self.typeck_results().user_provided_types().get(hir_id).copied()
+ }
+
+ // A unit struct/variant which is used as a value (e.g.,
+ // `None`). This has the type of the enum/struct that defines
+ // this variant -- but with the substitutions given by the
+ // user.
+ Res::Def(DefKind::Ctor(_, CtorKind::Const), _) => {
+ self.user_substs_applied_to_ty_of_hir_id(hir_id)
+ }
+
+ // `Self` is used in expression as a tuple struct constructor or a unit struct constructor
+ Res::SelfCtor(_) => self.user_substs_applied_to_ty_of_hir_id(hir_id),
+
+ _ => bug!("user_substs_applied_to_res: unexpected res {:?} at {:?}", res, hir_id),
+ };
+ debug!("user_substs_applied_to_res: user_provided_type={:?}", user_provided_type);
+ user_provided_type
+ }
+
+ fn method_callee(
+ &mut self,
+ expr: &hir::Expr<'_>,
+ span: Span,
+ overloaded_callee: Option<(DefId, SubstsRef<'tcx>)>,
+ ) -> Expr<'tcx> {
+ let temp_lifetime =
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id);
+ let (def_id, substs, user_ty) = match overloaded_callee {
+ Some((def_id, substs)) => (def_id, substs, None),
+ None => {
+ let (kind, def_id) =
+ self.typeck_results().type_dependent_def(expr.hir_id).unwrap_or_else(|| {
+ span_bug!(expr.span, "no type-dependent def for method callee")
+ });
+ let user_ty = self.user_substs_applied_to_res(expr.hir_id, Res::Def(kind, def_id));
+ debug!("method_callee: user_ty={:?}", user_ty);
+ (def_id, self.typeck_results().node_substs(expr.hir_id), user_ty)
+ }
+ };
+ let ty = self.tcx().mk_fn_def(def_id, substs);
+ Expr { temp_lifetime, ty, span, kind: ExprKind::ZstLiteral { user_ty } }
+ }
+
+ fn convert_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) -> ArmId {
+ let arm = Arm {
+ pattern: self.pattern_from_hir(&arm.pat),
+ guard: arm.guard.as_ref().map(|g| match g {
+ hir::Guard::If(ref e) => Guard::If(self.mirror_expr(e)),
+ hir::Guard::IfLet(ref l) => {
+ Guard::IfLet(self.pattern_from_hir(l.pat), self.mirror_expr(l.init))
+ }
+ }),
+ body: self.mirror_expr(arm.body),
+ lint_level: LintLevel::Explicit(arm.hir_id),
+ scope: region::Scope { id: arm.hir_id.local_id, data: region::ScopeData::Node },
+ span: arm.span,
+ };
+ self.thir.arms.push(arm)
+ }
+
+ fn convert_path_expr(&mut self, expr: &'tcx hir::Expr<'tcx>, res: Res) -> ExprKind<'tcx> {
+ let substs = self.typeck_results().node_substs(expr.hir_id);
+ match res {
+ // A regular function, constructor function or a constant.
+ Res::Def(DefKind::Fn, _)
+ | Res::Def(DefKind::AssocFn, _)
+ | Res::Def(DefKind::Ctor(_, CtorKind::Fn), _)
+ | Res::SelfCtor(_) => {
+ let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
+ ExprKind::ZstLiteral { user_ty }
+ }
+
+ Res::Def(DefKind::ConstParam, def_id) => {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let item_id = self.tcx.hir().get_parent_node(hir_id);
+ let item_def_id = self.tcx.hir().local_def_id(item_id);
+ let generics = self.tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id];
+ let name = self.tcx.hir().name(hir_id);
+ let param = ty::ParamConst::new(index, name);
+
+ ExprKind::ConstParam { param, def_id }
+ }
+
+ Res::Def(DefKind::Const, def_id) | Res::Def(DefKind::AssocConst, def_id) => {
+ let user_ty = self.user_substs_applied_to_res(expr.hir_id, res);
+ ExprKind::NamedConst { def_id, substs, user_ty: user_ty }
+ }
+
+ Res::Def(DefKind::Ctor(_, CtorKind::Const), def_id) => {
+ let user_provided_types = self.typeck_results.user_provided_types();
+ let user_provided_type = user_provided_types.get(expr.hir_id).copied();
+ debug!("convert_path_expr: user_provided_type={:?}", user_provided_type);
+ let ty = self.typeck_results().node_type(expr.hir_id);
+ match ty.kind() {
+ // A unit struct/variant which is used as a value.
+ // We return a completely different ExprKind here to account for this special case.
+ ty::Adt(adt_def, substs) => ExprKind::Adt(Box::new(Adt {
+ adt_def: *adt_def,
+ variant_index: adt_def.variant_index_with_ctor_id(def_id),
+ substs,
+ user_ty: user_provided_type,
+ fields: Box::new([]),
+ base: None,
+ })),
+ _ => bug!("unexpected ty: {:?}", ty),
+ }
+ }
+
+ // We encode uses of statics as a `*&STATIC` where the `&STATIC` part is
+ // a constant reference (or constant raw pointer for `static mut`) in MIR
+ Res::Def(DefKind::Static(_), id) => {
+ let ty = self.tcx.static_ptr_ty(id);
+ let temp_lifetime = self
+ .rvalue_scopes
+ .temporary_scope(self.region_scope_tree, expr.hir_id.local_id);
+ let kind = if self.tcx.is_thread_local_static(id) {
+ ExprKind::ThreadLocalRef(id)
+ } else {
+ let alloc_id = self.tcx.create_static_alloc(id);
+ ExprKind::StaticRef { alloc_id, ty, def_id: id }
+ };
+ ExprKind::Deref {
+ arg: self.thir.exprs.push(Expr { ty, temp_lifetime, span: expr.span, kind }),
+ }
+ }
+
+ Res::Local(var_hir_id) => self.convert_var(var_hir_id),
+
+ _ => span_bug!(expr.span, "res `{:?}` not yet implemented", res),
+ }
+ }
+
+ fn convert_var(&mut self, var_hir_id: hir::HirId) -> ExprKind<'tcx> {
+ // We want upvars here not captures.
+ // Captures will be handled in MIR.
+ let is_upvar = self
+ .tcx
+ .upvars_mentioned(self.body_owner)
+ .map_or(false, |upvars| upvars.contains_key(&var_hir_id));
+
+ debug!(
+ "convert_var({:?}): is_upvar={}, body_owner={:?}",
+ var_hir_id, is_upvar, self.body_owner
+ );
+
+ if is_upvar {
+ ExprKind::UpvarRef {
+ closure_def_id: self.body_owner,
+ var_hir_id: LocalVarId(var_hir_id),
+ }
+ } else {
+ ExprKind::VarRef { id: LocalVarId(var_hir_id) }
+ }
+ }
+
+ fn overloaded_operator(
+ &mut self,
+ expr: &'tcx hir::Expr<'tcx>,
+ args: Box<[ExprId]>,
+ ) -> ExprKind<'tcx> {
+ let fun = self.method_callee(expr, expr.span, None);
+ let fun = self.thir.exprs.push(fun);
+ ExprKind::Call {
+ ty: self.thir[fun].ty,
+ fun,
+ args,
+ from_hir_call: false,
+ fn_span: expr.span,
+ }
+ }
+
+ fn overloaded_place(
+ &mut self,
+ expr: &'tcx hir::Expr<'tcx>,
+ place_ty: Ty<'tcx>,
+ overloaded_callee: Option<(DefId, SubstsRef<'tcx>)>,
+ args: Box<[ExprId]>,
+ span: Span,
+ ) -> ExprKind<'tcx> {
+ // For an overloaded *x or x[y] expression of type T, the method
+ // call returns an &T and we must add the deref so that the types
+ // line up (this is because `*x` and `x[y]` represent places):
+
+ // Reconstruct the output assuming it's a reference with the
+ // same region and mutability as the receiver. This holds for
+ // `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
+ let ty::Ref(region, _, mutbl) = *self.thir[args[0]].ty.kind() else {
+ span_bug!(span, "overloaded_place: receiver is not a reference");
+ };
+ let ref_ty = self.tcx.mk_ref(region, ty::TypeAndMut { ty: place_ty, mutbl });
+
+ // construct the complete expression `foo()` for the overloaded call,
+ // which will yield the &T type
+ let temp_lifetime =
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id);
+ let fun = self.method_callee(expr, span, overloaded_callee);
+ let fun = self.thir.exprs.push(fun);
+ let fun_ty = self.thir[fun].ty;
+ let ref_expr = self.thir.exprs.push(Expr {
+ temp_lifetime,
+ ty: ref_ty,
+ span,
+ kind: ExprKind::Call { ty: fun_ty, fun, args, from_hir_call: false, fn_span: span },
+ });
+
+ // construct and return a deref wrapper `*foo()`
+ ExprKind::Deref { arg: ref_expr }
+ }
+
+ fn convert_captured_hir_place(
+ &mut self,
+ closure_expr: &'tcx hir::Expr<'tcx>,
+ place: HirPlace<'tcx>,
+ ) -> Expr<'tcx> {
+ let temp_lifetime = self
+ .rvalue_scopes
+ .temporary_scope(self.region_scope_tree, closure_expr.hir_id.local_id);
+ let var_ty = place.base_ty;
+
+ // The result of capture analysis in `rustc_typeck/check/upvar.rs`represents a captured path
+ // as it's seen for use within the closure and not at the time of closure creation.
+ //
+ // That is we see expect to see it start from a captured upvar and not something that is local
+ // to the closure's parent.
+ let var_hir_id = match place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected an upvar, found {:?}", base),
+ };
+
+ let mut captured_place_expr = Expr {
+ temp_lifetime,
+ ty: var_ty,
+ span: closure_expr.span,
+ kind: self.convert_var(var_hir_id),
+ };
+
+ for proj in place.projections.iter() {
+ let kind = match proj.kind {
+ HirProjectionKind::Deref => {
+ ExprKind::Deref { arg: self.thir.exprs.push(captured_place_expr) }
+ }
+ HirProjectionKind::Field(field, variant_index) => ExprKind::Field {
+ lhs: self.thir.exprs.push(captured_place_expr),
+ variant_index,
+ name: Field::new(field as usize),
+ },
+ HirProjectionKind::Index | HirProjectionKind::Subslice => {
+ // We don't capture these projections, so we can ignore them here
+ continue;
+ }
+ };
+
+ captured_place_expr =
+ Expr { temp_lifetime, ty: proj.ty, span: closure_expr.span, kind };
+ }
+
+ captured_place_expr
+ }
+
+ fn capture_upvar(
+ &mut self,
+ closure_expr: &'tcx hir::Expr<'tcx>,
+ captured_place: &'tcx ty::CapturedPlace<'tcx>,
+ upvar_ty: Ty<'tcx>,
+ ) -> Expr<'tcx> {
+ let upvar_capture = captured_place.info.capture_kind;
+ let captured_place_expr =
+ self.convert_captured_hir_place(closure_expr, captured_place.place.clone());
+ let temp_lifetime = self
+ .rvalue_scopes
+ .temporary_scope(self.region_scope_tree, closure_expr.hir_id.local_id);
+
+ match upvar_capture {
+ ty::UpvarCapture::ByValue => captured_place_expr,
+ ty::UpvarCapture::ByRef(upvar_borrow) => {
+ let borrow_kind = match upvar_borrow {
+ ty::BorrowKind::ImmBorrow => BorrowKind::Shared,
+ ty::BorrowKind::UniqueImmBorrow => BorrowKind::Unique,
+ ty::BorrowKind::MutBorrow => BorrowKind::Mut { allow_two_phase_borrow: false },
+ };
+ Expr {
+ temp_lifetime,
+ ty: upvar_ty,
+ span: closure_expr.span,
+ kind: ExprKind::Borrow {
+ borrow_kind,
+ arg: self.thir.exprs.push(captured_place_expr),
+ },
+ }
+ }
+ }
+ }
+
+ /// Converts a list of named fields (i.e., for struct-like struct/enum ADTs) into FieldExpr.
+ fn field_refs(&mut self, fields: &'tcx [hir::ExprField<'tcx>]) -> Box<[FieldExpr]> {
+ fields
+ .iter()
+ .map(|field| FieldExpr {
+ name: Field::new(self.tcx.field_index(field.hir_id, self.typeck_results)),
+ expr: self.mirror_expr(field.expr),
+ })
+ .collect()
+ }
+}
+
+trait ToBorrowKind {
+ fn to_borrow_kind(&self) -> BorrowKind;
+}
+
+impl ToBorrowKind for AutoBorrowMutability {
+ fn to_borrow_kind(&self) -> BorrowKind {
+ use rustc_middle::ty::adjustment::AllowTwoPhase;
+ match *self {
+ AutoBorrowMutability::Mut { allow_two_phase_borrow } => BorrowKind::Mut {
+ allow_two_phase_borrow: match allow_two_phase_borrow {
+ AllowTwoPhase::Yes => true,
+ AllowTwoPhase::No => false,
+ },
+ },
+ AutoBorrowMutability::Not => BorrowKind::Shared,
+ }
+ }
+}
+
+impl ToBorrowKind for hir::Mutability {
+ fn to_borrow_kind(&self) -> BorrowKind {
+ match *self {
+ hir::Mutability::Mut => BorrowKind::Mut { allow_two_phase_borrow: false },
+ hir::Mutability::Not => BorrowKind::Shared,
+ }
+ }
+}
+
+fn bin_op(op: hir::BinOpKind) -> BinOp {
+ match op {
+ hir::BinOpKind::Add => BinOp::Add,
+ hir::BinOpKind::Sub => BinOp::Sub,
+ hir::BinOpKind::Mul => BinOp::Mul,
+ hir::BinOpKind::Div => BinOp::Div,
+ hir::BinOpKind::Rem => BinOp::Rem,
+ hir::BinOpKind::BitXor => BinOp::BitXor,
+ hir::BinOpKind::BitAnd => BinOp::BitAnd,
+ hir::BinOpKind::BitOr => BinOp::BitOr,
+ hir::BinOpKind::Shl => BinOp::Shl,
+ hir::BinOpKind::Shr => BinOp::Shr,
+ hir::BinOpKind::Eq => BinOp::Eq,
+ hir::BinOpKind::Lt => BinOp::Lt,
+ hir::BinOpKind::Le => BinOp::Le,
+ hir::BinOpKind::Ne => BinOp::Ne,
+ hir::BinOpKind::Ge => BinOp::Ge,
+ hir::BinOpKind::Gt => BinOp::Gt,
+ _ => bug!("no equivalent for ast binop {:?}", op),
+ }
+}
diff --git a/compiler/rustc_mir_build/src/thir/cx/mod.rs b/compiler/rustc_mir_build/src/thir/cx/mod.rs
new file mode 100644
index 000000000..f7351a4ca
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/cx/mod.rs
@@ -0,0 +1,101 @@
+//! This module contains the functionality to convert from the wacky tcx data
+//! structures into the THIR. The `builder` is generally ignorant of the tcx,
+//! etc., and instead goes through the `Cx` for most of its work.
+
+use crate::thir::pattern::pat_from_hir;
+use crate::thir::util::UserAnnotatedTyHelpers;
+
+use rustc_data_structures::steal::Steal;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::HirId;
+use rustc_hir::Node;
+use rustc_middle::middle::region;
+use rustc_middle::thir::*;
+use rustc_middle::ty::{self, RvalueScopes, TyCtxt};
+use rustc_span::Span;
+
+pub(crate) fn thir_body<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ owner_def: ty::WithOptConstParam<LocalDefId>,
+) -> Result<(&'tcx Steal<Thir<'tcx>>, ExprId), ErrorGuaranteed> {
+ let hir = tcx.hir();
+ let body = hir.body(hir.body_owned_by(owner_def.did));
+ let mut cx = Cx::new(tcx, owner_def);
+ if let Some(reported) = cx.typeck_results.tainted_by_errors {
+ return Err(reported);
+ }
+ let expr = cx.mirror_expr(&body.value);
+ Ok((tcx.alloc_steal_thir(cx.thir), expr))
+}
+
+pub(crate) fn thir_tree<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ owner_def: ty::WithOptConstParam<LocalDefId>,
+) -> String {
+ match thir_body(tcx, owner_def) {
+ Ok((thir, _)) => format!("{:#?}", thir.steal()),
+ Err(_) => "error".into(),
+ }
+}
+
+struct Cx<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ thir: Thir<'tcx>,
+
+ pub(crate) param_env: ty::ParamEnv<'tcx>,
+
+ pub(crate) region_scope_tree: &'tcx region::ScopeTree,
+ pub(crate) typeck_results: &'tcx ty::TypeckResults<'tcx>,
+ pub(crate) rvalue_scopes: &'tcx RvalueScopes,
+
+ /// When applying adjustments to the expression
+ /// with the given `HirId`, use the given `Span`,
+ /// instead of the usual span. This is used to
+ /// assign the span of an overall method call
+ /// (e.g. `my_val.foo()`) to the adjustment expressions
+ /// for the receiver.
+ adjustment_span: Option<(HirId, Span)>,
+
+ /// The `DefId` of the owner of this body.
+ body_owner: DefId,
+}
+
+impl<'tcx> Cx<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam<LocalDefId>) -> Cx<'tcx> {
+ let typeck_results = tcx.typeck_opt_const_arg(def);
+ Cx {
+ tcx,
+ thir: Thir::new(),
+ param_env: tcx.param_env(def.did),
+ region_scope_tree: tcx.region_scope_tree(def.did),
+ typeck_results,
+ rvalue_scopes: &typeck_results.rvalue_scopes,
+ body_owner: def.did.to_def_id(),
+ adjustment_span: None,
+ }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ pub(crate) fn pattern_from_hir(&mut self, p: &hir::Pat<'_>) -> Pat<'tcx> {
+ let p = match self.tcx.hir().get(p.hir_id) {
+ Node::Pat(p) => p,
+ node => bug!("pattern became {:?}", node),
+ };
+ pat_from_hir(self.tcx, self.param_env, self.typeck_results(), p)
+ }
+}
+
+impl<'tcx> UserAnnotatedTyHelpers<'tcx> for Cx<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn typeck_results(&self) -> &ty::TypeckResults<'tcx> {
+ self.typeck_results
+ }
+}
+
+mod block;
+mod expr;
diff --git a/compiler/rustc_mir_build/src/thir/mod.rs b/compiler/rustc_mir_build/src/thir/mod.rs
new file mode 100644
index 000000000..e0e6ac266
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/mod.rs
@@ -0,0 +1,13 @@
+//! The MIR is built from some typed high-level IR
+//! (THIR). This section defines the THIR along with a trait for
+//! accessing it. The intention is to allow MIR construction to be
+//! unit-tested and separated from the Rust source and compiler data
+//! structures.
+
+pub(crate) mod constant;
+
+pub(crate) mod cx;
+
+pub(crate) mod pattern;
+
+mod util;
diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
new file mode 100644
index 000000000..063c07647
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs
@@ -0,0 +1,1162 @@
+use super::deconstruct_pat::{Constructor, DeconstructedPat};
+use super::usefulness::{
+ compute_match_usefulness, MatchArm, MatchCheckCtxt, Reachability, UsefulnessReport,
+};
+use super::{PatCtxt, PatternError};
+
+use rustc_arena::TypedArena;
+use rustc_ast::Mutability;
+use rustc_errors::{
+ error_code, pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder,
+ ErrorGuaranteed, MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::*;
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{HirId, Pat};
+use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
+use rustc_session::lint::builtin::{
+ BINDINGS_WITH_VARIANT_NAME, IRREFUTABLE_LET_PATTERNS, UNREACHABLE_PATTERNS,
+};
+use rustc_session::Session;
+use rustc_span::source_map::Spanned;
+use rustc_span::{BytePos, Span};
+
+pub(crate) fn check_match(tcx: TyCtxt<'_>, def_id: DefId) {
+ let body_id = match def_id.as_local() {
+ None => return,
+ Some(def_id) => tcx.hir().body_owned_by(def_id),
+ };
+
+ let pattern_arena = TypedArena::default();
+ let mut visitor = MatchVisitor {
+ tcx,
+ typeck_results: tcx.typeck_body(body_id),
+ param_env: tcx.param_env(def_id),
+ pattern_arena: &pattern_arena,
+ };
+ visitor.visit_body(tcx.hir().body(body_id));
+}
+
+fn create_e0004(
+ sess: &Session,
+ sp: Span,
+ error_message: String,
+) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ struct_span_err!(sess, sp, E0004, "{}", &error_message)
+}
+
+#[derive(PartialEq)]
+enum RefutableFlag {
+ Irrefutable,
+ Refutable,
+}
+use RefutableFlag::*;
+
+struct MatchVisitor<'a, 'p, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ pattern_arena: &'p TypedArena<DeconstructedPat<'p, 'tcx>>,
+}
+
+impl<'tcx> Visitor<'tcx> for MatchVisitor<'_, '_, 'tcx> {
+ fn visit_expr(&mut self, ex: &'tcx hir::Expr<'tcx>) {
+ intravisit::walk_expr(self, ex);
+ match &ex.kind {
+ hir::ExprKind::Match(scrut, arms, source) => {
+ self.check_match(scrut, arms, *source, ex.span)
+ }
+ hir::ExprKind::Let(hir::Let { pat, init, span, .. }) => {
+ self.check_let(pat, init, *span)
+ }
+ _ => {}
+ }
+ }
+
+ fn visit_local(&mut self, loc: &'tcx hir::Local<'tcx>) {
+ intravisit::walk_local(self, loc);
+ let els = loc.els;
+ if let Some(init) = loc.init && els.is_some() {
+ self.check_let(&loc.pat, init, loc.span);
+ }
+
+ let (msg, sp) = match loc.source {
+ hir::LocalSource::Normal => ("local binding", Some(loc.span)),
+ hir::LocalSource::AsyncFn => ("async fn binding", None),
+ hir::LocalSource::AwaitDesugar => ("`await` future binding", None),
+ hir::LocalSource::AssignDesugar(_) => ("destructuring assignment binding", None),
+ };
+ if els.is_none() {
+ self.check_irrefutable(&loc.pat, msg, sp);
+ }
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ intravisit::walk_param(self, param);
+ self.check_irrefutable(&param.pat, "function argument", None);
+ }
+}
+
+impl PatCtxt<'_, '_> {
+ fn report_inlining_errors(&self) {
+ for error in &self.errors {
+ match *error {
+ PatternError::StaticInPattern(span) => {
+ self.span_e0158(span, "statics cannot be referenced in patterns")
+ }
+ PatternError::AssocConstInPattern(span) => {
+ self.span_e0158(span, "associated consts cannot be referenced in patterns")
+ }
+ PatternError::ConstParamInPattern(span) => {
+ self.span_e0158(span, "const parameters cannot be referenced in patterns")
+ }
+ PatternError::NonConstPath(span) => {
+ rustc_middle::mir::interpret::struct_error(
+ self.tcx.at(span),
+ "runtime values cannot be referenced in patterns",
+ )
+ .emit();
+ }
+ }
+ }
+ }
+
+ fn span_e0158(&self, span: Span, text: &str) {
+ struct_span_err!(self.tcx.sess, span, E0158, "{}", text).emit();
+ }
+}
+
+impl<'p, 'tcx> MatchVisitor<'_, 'p, 'tcx> {
+ fn check_patterns(&self, pat: &Pat<'_>, rf: RefutableFlag) {
+ pat.walk_always(|pat| check_borrow_conflicts_in_at_patterns(self, pat));
+ check_for_bindings_named_same_as_variants(self, pat, rf);
+ }
+
+ fn lower_pattern(
+ &self,
+ cx: &mut MatchCheckCtxt<'p, 'tcx>,
+ pat: &'tcx hir::Pat<'tcx>,
+ have_errors: &mut bool,
+ ) -> &'p DeconstructedPat<'p, 'tcx> {
+ let mut patcx = PatCtxt::new(self.tcx, self.param_env, self.typeck_results);
+ patcx.include_lint_checks();
+ let pattern = patcx.lower_pattern(pat);
+ let pattern: &_ = cx.pattern_arena.alloc(DeconstructedPat::from_pat(cx, &pattern));
+ if !patcx.errors.is_empty() {
+ *have_errors = true;
+ patcx.report_inlining_errors();
+ }
+ pattern
+ }
+
+ fn new_cx(&self, hir_id: HirId) -> MatchCheckCtxt<'p, 'tcx> {
+ MatchCheckCtxt {
+ tcx: self.tcx,
+ param_env: self.param_env,
+ module: self.tcx.parent_module(hir_id).to_def_id(),
+ pattern_arena: &self.pattern_arena,
+ }
+ }
+
+ fn check_let(&mut self, pat: &'tcx hir::Pat<'tcx>, scrutinee: &hir::Expr<'_>, span: Span) {
+ self.check_patterns(pat, Refutable);
+ let mut cx = self.new_cx(scrutinee.hir_id);
+ let tpat = self.lower_pattern(&mut cx, pat, &mut false);
+ self.check_let_reachability(&mut cx, pat.hir_id, tpat, span);
+ }
+
+ fn check_match(
+ &mut self,
+ scrut: &hir::Expr<'_>,
+ hir_arms: &'tcx [hir::Arm<'tcx>],
+ source: hir::MatchSource,
+ expr_span: Span,
+ ) {
+ let mut cx = self.new_cx(scrut.hir_id);
+
+ for arm in hir_arms {
+ // Check the arm for some things unrelated to exhaustiveness.
+ self.check_patterns(&arm.pat, Refutable);
+ if let Some(hir::Guard::IfLet(ref let_expr)) = arm.guard {
+ self.check_patterns(let_expr.pat, Refutable);
+ let tpat = self.lower_pattern(&mut cx, let_expr.pat, &mut false);
+ self.check_let_reachability(&mut cx, let_expr.pat.hir_id, tpat, tpat.span());
+ }
+ }
+
+ let mut have_errors = false;
+
+ let arms: Vec<_> = hir_arms
+ .iter()
+ .map(|hir::Arm { pat, guard, .. }| MatchArm {
+ pat: self.lower_pattern(&mut cx, pat, &mut have_errors),
+ hir_id: pat.hir_id,
+ has_guard: guard.is_some(),
+ })
+ .collect();
+
+ // Bail out early if lowering failed.
+ if have_errors {
+ return;
+ }
+
+ let scrut_ty = self.typeck_results.expr_ty_adjusted(scrut);
+ let report = compute_match_usefulness(&cx, &arms, scrut.hir_id, scrut_ty);
+
+ match source {
+ // Don't report arm reachability of desugared `match $iter.into_iter() { iter => .. }`
+ // when the iterator is an uninhabited type. unreachable_code will trigger instead.
+ hir::MatchSource::ForLoopDesugar if arms.len() == 1 => {}
+ hir::MatchSource::ForLoopDesugar | hir::MatchSource::Normal => {
+ report_arm_reachability(&cx, &report)
+ }
+ // Unreachable patterns in try and await expressions occur when one of
+ // the arms are an uninhabited type. Which is OK.
+ hir::MatchSource::AwaitDesugar | hir::MatchSource::TryDesugar => {}
+ }
+
+ // Check if the match is exhaustive.
+ let witnesses = report.non_exhaustiveness_witnesses;
+ if !witnesses.is_empty() {
+ if source == hir::MatchSource::ForLoopDesugar && hir_arms.len() == 2 {
+ // the for loop pattern is not irrefutable
+ let pat = hir_arms[1].pat.for_loop_some().unwrap();
+ self.check_irrefutable(pat, "`for` loop binding", None);
+ } else {
+ non_exhaustive_match(&cx, scrut_ty, scrut.span, witnesses, hir_arms, expr_span);
+ }
+ }
+ }
+
+ fn check_let_reachability(
+ &mut self,
+ cx: &mut MatchCheckCtxt<'p, 'tcx>,
+ pat_id: HirId,
+ pat: &'p DeconstructedPat<'p, 'tcx>,
+ span: Span,
+ ) {
+ if self.check_let_chain(cx, pat_id) {
+ return;
+ }
+
+ if is_let_irrefutable(cx, pat_id, pat) {
+ irrefutable_let_pattern(cx.tcx, pat_id, span);
+ }
+ }
+
+ fn check_let_chain(&mut self, cx: &mut MatchCheckCtxt<'p, 'tcx>, pat_id: HirId) -> bool {
+ let hir = self.tcx.hir();
+ let parent = hir.get_parent_node(pat_id);
+
+ // First, figure out if the given pattern is part of a let chain,
+ // and if so, obtain the top node of the chain.
+ let mut top = parent;
+ let mut part_of_chain = false;
+ loop {
+ let new_top = hir.get_parent_node(top);
+ if let hir::Node::Expr(
+ hir::Expr {
+ kind: hir::ExprKind::Binary(Spanned { node: hir::BinOpKind::And, .. }, lhs, rhs),
+ ..
+ },
+ ..,
+ ) = hir.get(new_top)
+ {
+ // If this isn't the first iteration, we need to check
+ // if there is a let expr before us in the chain, so
+ // that we avoid doubly checking the let chain.
+
+ // The way a chain of &&s is encoded is ((let ... && let ...) && let ...) && let ...
+ // as && is left-to-right associative. Thus, we need to check rhs.
+ if part_of_chain && matches!(rhs.kind, hir::ExprKind::Let(..)) {
+ return true;
+ }
+ // If there is a let at the lhs, and we provide the rhs, we don't do any checking either.
+ if !part_of_chain && matches!(lhs.kind, hir::ExprKind::Let(..)) && rhs.hir_id == top
+ {
+ return true;
+ }
+ } else {
+ // We've reached the top.
+ break;
+ }
+
+ // Since this function is called within a let context, it is reasonable to assume that any parent
+ // `&&` infers a let chain
+ part_of_chain = true;
+ top = new_top;
+ }
+ if !part_of_chain {
+ return false;
+ }
+
+ // Second, obtain the refutabilities of all exprs in the chain,
+ // and record chain members that aren't let exprs.
+ let mut chain_refutabilities = Vec::new();
+ let hir::Node::Expr(top_expr) = hir.get(top) else {
+ // We ensure right above that it's an Expr
+ unreachable!()
+ };
+ let mut cur_expr = top_expr;
+ loop {
+ let mut add = |expr: &hir::Expr<'tcx>| {
+ let refutability = match expr.kind {
+ hir::ExprKind::Let(hir::Let { pat, init, span, .. }) => {
+ let mut ncx = self.new_cx(init.hir_id);
+ let tpat = self.lower_pattern(&mut ncx, pat, &mut false);
+
+ let refutable = !is_let_irrefutable(&mut ncx, pat.hir_id, tpat);
+ Some((*span, refutable))
+ }
+ _ => None,
+ };
+ chain_refutabilities.push(refutability);
+ };
+ if let hir::Expr {
+ kind: hir::ExprKind::Binary(Spanned { node: hir::BinOpKind::And, .. }, lhs, rhs),
+ ..
+ } = cur_expr
+ {
+ add(rhs);
+ cur_expr = lhs;
+ } else {
+ add(cur_expr);
+ break;
+ }
+ }
+ chain_refutabilities.reverse();
+
+ // Third, emit the actual warnings.
+
+ if chain_refutabilities.iter().all(|r| matches!(*r, Some((_, false)))) {
+ // The entire chain is made up of irrefutable `let` statements
+ let let_source = let_source_parent(self.tcx, top, None);
+ irrefutable_let_patterns(
+ cx.tcx,
+ top,
+ let_source,
+ chain_refutabilities.len(),
+ top_expr.span,
+ );
+ return true;
+ }
+ let lint_affix = |affix: &[Option<(Span, bool)>], kind, suggestion| {
+ let span_start = affix[0].unwrap().0;
+ let span_end = affix.last().unwrap().unwrap().0;
+ let span = span_start.to(span_end);
+ let cnt = affix.len();
+ cx.tcx.struct_span_lint_hir(IRREFUTABLE_LET_PATTERNS, top, span, |lint| {
+ let s = pluralize!(cnt);
+ let mut diag = lint.build(&format!("{kind} irrefutable pattern{s} in let chain"));
+ diag.note(&format!(
+ "{these} pattern{s} will always match",
+ these = pluralize!("this", cnt),
+ ));
+ diag.help(&format!(
+ "consider moving {} {suggestion}",
+ if cnt > 1 { "them" } else { "it" }
+ ));
+ diag.emit()
+ });
+ };
+ if let Some(until) = chain_refutabilities.iter().position(|r| !matches!(*r, Some((_, false)))) && until > 0 {
+ // The chain has a non-zero prefix of irrefutable `let` statements.
+
+ // Check if the let source is while, for there is no alternative place to put a prefix,
+ // and we shouldn't lint.
+ let let_source = let_source_parent(self.tcx, top, None);
+ if !matches!(let_source, LetSource::WhileLet) {
+ // Emit the lint
+ let prefix = &chain_refutabilities[..until];
+ lint_affix(prefix, "leading", "outside of the construct");
+ }
+ }
+ if let Some(from) = chain_refutabilities.iter().rposition(|r| !matches!(*r, Some((_, false)))) && from != (chain_refutabilities.len() - 1) {
+ // The chain has a non-empty suffix of irrefutable `let` statements
+ let suffix = &chain_refutabilities[from + 1..];
+ lint_affix(suffix, "trailing", "into the body");
+ }
+ true
+ }
+
+ fn check_irrefutable(&self, pat: &'tcx Pat<'tcx>, origin: &str, sp: Option<Span>) {
+ let mut cx = self.new_cx(pat.hir_id);
+
+ let pattern = self.lower_pattern(&mut cx, pat, &mut false);
+ let pattern_ty = pattern.ty();
+ let arms = vec![MatchArm { pat: pattern, hir_id: pat.hir_id, has_guard: false }];
+ let report = compute_match_usefulness(&cx, &arms, pat.hir_id, pattern_ty);
+
+ // Note: we ignore whether the pattern is unreachable (i.e. whether the type is empty). We
+ // only care about exhaustiveness here.
+ let witnesses = report.non_exhaustiveness_witnesses;
+ if witnesses.is_empty() {
+ // The pattern is irrefutable.
+ self.check_patterns(pat, Irrefutable);
+ return;
+ }
+
+ let joined_patterns = joined_uncovered_patterns(&cx, &witnesses);
+
+ let mut bindings = vec![];
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ pat.span,
+ E0005,
+ "refutable pattern in {}: {} not covered",
+ origin,
+ joined_patterns
+ );
+ let suggest_if_let = match &pat.kind {
+ hir::PatKind::Path(hir::QPath::Resolved(None, path))
+ if path.segments.len() == 1 && path.segments[0].args.is_none() =>
+ {
+ const_not_var(&mut err, cx.tcx, pat, path);
+ false
+ }
+ _ => {
+ pat.walk(&mut |pat: &hir::Pat<'_>| {
+ match pat.kind {
+ hir::PatKind::Binding(_, _, ident, _) => {
+ bindings.push(ident);
+ }
+ _ => {}
+ }
+ true
+ });
+
+ err.span_label(pat.span, pattern_not_covered_label(&witnesses, &joined_patterns));
+ true
+ }
+ };
+
+ if let (Some(span), true) = (sp, suggest_if_let) {
+ err.note(
+ "`let` bindings require an \"irrefutable pattern\", like a `struct` or \
+ an `enum` with only one variant",
+ );
+ if self.tcx.sess.source_map().is_span_accessible(span) {
+ let semi_span = span.shrink_to_hi().with_lo(span.hi() - BytePos(1));
+ let start_span = span.shrink_to_lo();
+ let end_span = semi_span.shrink_to_lo();
+ err.multipart_suggestion(
+ &format!(
+ "you might want to use `if let` to ignore the variant{} that {} matched",
+ pluralize!(witnesses.len()),
+ match witnesses.len() {
+ 1 => "isn't",
+ _ => "aren't",
+ },
+ ),
+ vec![
+ match &bindings[..] {
+ [] => (start_span, "if ".to_string()),
+ [binding] => (start_span, format!("let {} = if ", binding)),
+ bindings => (
+ start_span,
+ format!(
+ "let ({}) = if ",
+ bindings
+ .iter()
+ .map(|ident| ident.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
+ ),
+ ),
+ },
+ match &bindings[..] {
+ [] => (semi_span, " { todo!() }".to_string()),
+ [binding] => {
+ (end_span, format!(" {{ {} }} else {{ todo!() }}", binding))
+ }
+ bindings => (
+ end_span,
+ format!(
+ " {{ ({}) }} else {{ todo!() }}",
+ bindings
+ .iter()
+ .map(|ident| ident.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
+ ),
+ ),
+ },
+ ],
+ Applicability::HasPlaceholders,
+ );
+ if !bindings.is_empty() && cx.tcx.sess.is_nightly_build() {
+ err.span_suggestion_verbose(
+ semi_span.shrink_to_lo(),
+ &format!(
+ "alternatively, on nightly, you might want to use \
+ `#![feature(let_else)]` to handle the variant{} that {} matched",
+ pluralize!(witnesses.len()),
+ match witnesses.len() {
+ 1 => "isn't",
+ _ => "aren't",
+ },
+ ),
+ " else { todo!() }".to_string(),
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ err.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch18-02-refutability.html",
+ );
+ }
+
+ adt_defined_here(&cx, &mut err, pattern_ty, &witnesses);
+ err.note(&format!("the matched value is of type `{}`", pattern_ty));
+ err.emit();
+ }
+}
+
+/// A path pattern was interpreted as a constant, not a new variable.
+/// This caused an irrefutable match failure in e.g. `let`.
+fn const_not_var(err: &mut Diagnostic, tcx: TyCtxt<'_>, pat: &Pat<'_>, path: &hir::Path<'_>) {
+ let descr = path.res.descr();
+ err.span_label(
+ pat.span,
+ format!("interpreted as {} {} pattern, not a new variable", path.res.article(), descr,),
+ );
+
+ err.span_suggestion(
+ pat.span,
+ "introduce a variable instead",
+ format!("{}_var", path.segments[0].ident).to_lowercase(),
+ // Cannot use `MachineApplicable` as it's not really *always* correct
+ // because there may be such an identifier in scope or the user maybe
+ // really wanted to match against the constant. This is quite unlikely however.
+ Applicability::MaybeIncorrect,
+ );
+
+ if let Some(span) = tcx.hir().res_span(path.res) {
+ err.span_label(span, format!("{} defined here", descr));
+ }
+}
+
+fn check_for_bindings_named_same_as_variants(
+ cx: &MatchVisitor<'_, '_, '_>,
+ pat: &Pat<'_>,
+ rf: RefutableFlag,
+) {
+ pat.walk_always(|p| {
+ if let hir::PatKind::Binding(_, _, ident, None) = p.kind
+ && let Some(ty::BindByValue(hir::Mutability::Not)) =
+ cx.typeck_results.extract_binding_mode(cx.tcx.sess, p.hir_id, p.span)
+ && let pat_ty = cx.typeck_results.pat_ty(p).peel_refs()
+ && let ty::Adt(edef, _) = pat_ty.kind()
+ && edef.is_enum()
+ && edef.variants().iter().any(|variant| {
+ variant.ident(cx.tcx) == ident && variant.ctor_kind == CtorKind::Const
+ })
+ {
+ let variant_count = edef.variants().len();
+ cx.tcx.struct_span_lint_hir(
+ BINDINGS_WITH_VARIANT_NAME,
+ p.hir_id,
+ p.span,
+ |lint| {
+ let ty_path = cx.tcx.def_path_str(edef.did());
+ let mut err = lint.build(&format!(
+ "pattern binding `{}` is named the same as one \
+ of the variants of the type `{}`",
+ ident, ty_path
+ ));
+ err.code(error_code!(E0170));
+ // If this is an irrefutable pattern, and there's > 1 variant,
+ // then we can't actually match on this. Applying the below
+ // suggestion would produce code that breaks on `check_irrefutable`.
+ if rf == Refutable || variant_count == 1 {
+ err.span_suggestion(
+ p.span,
+ "to match on the variant, qualify the path",
+ format!("{}::{}", ty_path, ident),
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ },
+ )
+ }
+ });
+}
+
+/// Checks for common cases of "catchall" patterns that may not be intended as such.
+fn pat_is_catchall(pat: &DeconstructedPat<'_, '_>) -> bool {
+ use Constructor::*;
+ match pat.ctor() {
+ Wildcard => true,
+ Single => pat.iter_fields().all(|pat| pat_is_catchall(pat)),
+ _ => false,
+ }
+}
+
+fn unreachable_pattern(tcx: TyCtxt<'_>, span: Span, id: HirId, catchall: Option<Span>) {
+ tcx.struct_span_lint_hir(UNREACHABLE_PATTERNS, id, span, |lint| {
+ let mut err = lint.build("unreachable pattern");
+ if let Some(catchall) = catchall {
+ // We had a catchall pattern, hint at that.
+ err.span_label(span, "unreachable pattern");
+ err.span_label(catchall, "matches any value");
+ }
+ err.emit();
+ });
+}
+
+fn irrefutable_let_pattern(tcx: TyCtxt<'_>, id: HirId, span: Span) {
+ let source = let_source(tcx, id);
+ irrefutable_let_patterns(tcx, id, source, 1, span);
+}
+
+fn irrefutable_let_patterns(
+ tcx: TyCtxt<'_>,
+ id: HirId,
+ source: LetSource,
+ count: usize,
+ span: Span,
+) {
+ macro_rules! emit_diag {
+ (
+ $lint:expr,
+ $source_name:expr,
+ $note_sufix:expr,
+ $help_sufix:expr
+ ) => {{
+ let s = pluralize!(count);
+ let these = pluralize!("this", count);
+ let mut diag = $lint.build(&format!("irrefutable {} pattern{s}", $source_name));
+ diag.note(&format!("{these} pattern{s} will always match, so the {}", $note_sufix));
+ diag.help(concat!("consider ", $help_sufix));
+ diag.emit()
+ }};
+ }
+
+ let span = match source {
+ LetSource::LetElse(span) => span,
+ _ => span,
+ };
+ tcx.struct_span_lint_hir(IRREFUTABLE_LET_PATTERNS, id, span, |lint| match source {
+ LetSource::GenericLet => {
+ emit_diag!(lint, "`let`", "`let` is useless", "removing `let`");
+ }
+ LetSource::IfLet => {
+ emit_diag!(
+ lint,
+ "`if let`",
+ "`if let` is useless",
+ "replacing the `if let` with a `let`"
+ );
+ }
+ LetSource::IfLetGuard => {
+ emit_diag!(
+ lint,
+ "`if let` guard",
+ "guard is useless",
+ "removing the guard and adding a `let` inside the match arm"
+ );
+ }
+ LetSource::LetElse(..) => {
+ emit_diag!(
+ lint,
+ "`let...else`",
+ "`else` clause is useless",
+ "removing the `else` clause"
+ );
+ }
+ LetSource::WhileLet => {
+ emit_diag!(
+ lint,
+ "`while let`",
+ "loop will never exit",
+ "instead using a `loop { ... }` with a `let` inside it"
+ );
+ }
+ });
+}
+
+fn is_let_irrefutable<'p, 'tcx>(
+ cx: &mut MatchCheckCtxt<'p, 'tcx>,
+ pat_id: HirId,
+ pat: &'p DeconstructedPat<'p, 'tcx>,
+) -> bool {
+ let arms = [MatchArm { pat, hir_id: pat_id, has_guard: false }];
+ let report = compute_match_usefulness(&cx, &arms, pat_id, pat.ty());
+
+ // Report if the pattern is unreachable, which can only occur when the type is uninhabited.
+ // This also reports unreachable sub-patterns though, so we can't just replace it with an
+ // `is_uninhabited` check.
+ report_arm_reachability(&cx, &report);
+
+ // If the list of witnesses is empty, the match is exhaustive,
+ // i.e. the `if let` pattern is irrefutable.
+ report.non_exhaustiveness_witnesses.is_empty()
+}
+
+/// Report unreachable arms, if any.
+fn report_arm_reachability<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ report: &UsefulnessReport<'p, 'tcx>,
+) {
+ use Reachability::*;
+ let mut catchall = None;
+ for (arm, is_useful) in report.arm_usefulness.iter() {
+ match is_useful {
+ Unreachable => unreachable_pattern(cx.tcx, arm.pat.span(), arm.hir_id, catchall),
+ Reachable(unreachables) if unreachables.is_empty() => {}
+ // The arm is reachable, but contains unreachable subpatterns (from or-patterns).
+ Reachable(unreachables) => {
+ let mut unreachables = unreachables.clone();
+ // Emit lints in the order in which they occur in the file.
+ unreachables.sort_unstable();
+ for span in unreachables {
+ unreachable_pattern(cx.tcx, span, arm.hir_id, None);
+ }
+ }
+ }
+ if !arm.has_guard && catchall.is_none() && pat_is_catchall(arm.pat) {
+ catchall = Some(arm.pat.span());
+ }
+ }
+}
+
+/// Report that a match is not exhaustive.
+fn non_exhaustive_match<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ scrut_ty: Ty<'tcx>,
+ sp: Span,
+ witnesses: Vec<DeconstructedPat<'p, 'tcx>>,
+ arms: &[hir::Arm<'tcx>],
+ expr_span: Span,
+) {
+ let is_empty_match = arms.is_empty();
+ let non_empty_enum = match scrut_ty.kind() {
+ ty::Adt(def, _) => def.is_enum() && !def.variants().is_empty(),
+ _ => false,
+ };
+ // In the case of an empty match, replace the '`_` not covered' diagnostic with something more
+ // informative.
+ let mut err;
+ let pattern;
+ let mut patterns_len = 0;
+ if is_empty_match && !non_empty_enum {
+ err = create_e0004(
+ cx.tcx.sess,
+ sp,
+ format!("non-exhaustive patterns: type `{}` is non-empty", scrut_ty),
+ );
+ pattern = "_".to_string();
+ } else {
+ let joined_patterns = joined_uncovered_patterns(cx, &witnesses);
+ err = create_e0004(
+ cx.tcx.sess,
+ sp,
+ format!("non-exhaustive patterns: {} not covered", joined_patterns),
+ );
+ err.span_label(sp, pattern_not_covered_label(&witnesses, &joined_patterns));
+ patterns_len = witnesses.len();
+ pattern = if witnesses.len() < 4 {
+ witnesses
+ .iter()
+ .map(|witness| witness.to_pat(cx).to_string())
+ .collect::<Vec<String>>()
+ .join(" | ")
+ } else {
+ "_".to_string()
+ };
+ };
+
+ let is_variant_list_non_exhaustive = match scrut_ty.kind() {
+ ty::Adt(def, _) if def.is_variant_list_non_exhaustive() && !def.did().is_local() => true,
+ _ => false,
+ };
+
+ adt_defined_here(cx, &mut err, scrut_ty, &witnesses);
+ err.note(&format!(
+ "the matched value is of type `{}`{}",
+ scrut_ty,
+ if is_variant_list_non_exhaustive { ", which is marked as non-exhaustive" } else { "" }
+ ));
+ if (scrut_ty == cx.tcx.types.usize || scrut_ty == cx.tcx.types.isize)
+ && !is_empty_match
+ && witnesses.len() == 1
+ && matches!(witnesses[0].ctor(), Constructor::NonExhaustive)
+ {
+ err.note(&format!(
+ "`{}` does not have a fixed maximum value, so a wildcard `_` is necessary to match \
+ exhaustively",
+ scrut_ty,
+ ));
+ if cx.tcx.sess.is_nightly_build() {
+ err.help(&format!(
+ "add `#![feature(precise_pointer_size_matching)]` to the crate attributes to \
+ enable precise `{}` matching",
+ scrut_ty,
+ ));
+ }
+ }
+ if let ty::Ref(_, sub_ty, _) = scrut_ty.kind() {
+ if cx.tcx.is_ty_uninhabited_from(cx.module, *sub_ty, cx.param_env) {
+ err.note("references are always considered inhabited");
+ }
+ }
+
+ let mut suggestion = None;
+ let sm = cx.tcx.sess.source_map();
+ match arms {
+ [] if sp.eq_ctxt(expr_span) => {
+ // Get the span for the empty match body `{}`.
+ let (indentation, more) = if let Some(snippet) = sm.indentation_before(sp) {
+ (format!("\n{}", snippet), " ")
+ } else {
+ (" ".to_string(), "")
+ };
+ suggestion = Some((
+ sp.shrink_to_hi().with_hi(expr_span.hi()),
+ format!(
+ " {{{indentation}{more}{pattern} => todo!(),{indentation}}}",
+ indentation = indentation,
+ more = more,
+ pattern = pattern,
+ ),
+ ));
+ }
+ [only] => {
+ let (pre_indentation, is_multiline) = if let Some(snippet) = sm.indentation_before(only.span)
+ && let Ok(with_trailing) = sm.span_extend_while(only.span, |c| c.is_whitespace() || c == ',')
+ && sm.is_multiline(with_trailing)
+ {
+ (format!("\n{}", snippet), true)
+ } else {
+ (" ".to_string(), false)
+ };
+ let comma = if matches!(only.body.kind, hir::ExprKind::Block(..))
+ && only.span.eq_ctxt(only.body.span)
+ && is_multiline
+ {
+ ""
+ } else {
+ ","
+ };
+ suggestion = Some((
+ only.span.shrink_to_hi(),
+ format!("{}{}{} => todo!()", comma, pre_indentation, pattern),
+ ));
+ }
+ [.., prev, last] if prev.span.eq_ctxt(last.span) => {
+ if let Ok(snippet) = sm.span_to_snippet(prev.span.between(last.span)) {
+ let comma = if matches!(last.body.kind, hir::ExprKind::Block(..))
+ && last.span.eq_ctxt(last.body.span)
+ {
+ ""
+ } else {
+ ","
+ };
+ suggestion = Some((
+ last.span.shrink_to_hi(),
+ format!(
+ "{}{}{} => todo!()",
+ comma,
+ snippet.strip_prefix(',').unwrap_or(&snippet),
+ pattern
+ ),
+ ));
+ }
+ }
+ _ => {}
+ }
+
+ let msg = format!(
+ "ensure that all possible cases are being handled by adding a match arm with a wildcard \
+ pattern{}{}",
+ if patterns_len > 1 && patterns_len < 4 && suggestion.is_some() {
+ ", a match arm with multiple or-patterns"
+ } else {
+ // we are either not suggesting anything, or suggesting `_`
+ ""
+ },
+ match patterns_len {
+ // non-exhaustive enum case
+ 0 if suggestion.is_some() => " as shown",
+ 0 => "",
+ 1 if suggestion.is_some() => " or an explicit pattern as shown",
+ 1 => " or an explicit pattern",
+ _ if suggestion.is_some() => " as shown, or multiple match arms",
+ _ => " or multiple match arms",
+ },
+ );
+ if let Some((span, sugg)) = suggestion {
+ err.span_suggestion_verbose(span, &msg, sugg, Applicability::HasPlaceholders);
+ } else {
+ err.help(&msg);
+ }
+ err.emit();
+}
+
+pub(crate) fn joined_uncovered_patterns<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ witnesses: &[DeconstructedPat<'p, 'tcx>],
+) -> String {
+ const LIMIT: usize = 3;
+ let pat_to_str = |pat: &DeconstructedPat<'p, 'tcx>| pat.to_pat(cx).to_string();
+ match witnesses {
+ [] => bug!(),
+ [witness] => format!("`{}`", witness.to_pat(cx)),
+ [head @ .., tail] if head.len() < LIMIT => {
+ let head: Vec<_> = head.iter().map(pat_to_str).collect();
+ format!("`{}` and `{}`", head.join("`, `"), tail.to_pat(cx))
+ }
+ _ => {
+ let (head, tail) = witnesses.split_at(LIMIT);
+ let head: Vec<_> = head.iter().map(pat_to_str).collect();
+ format!("`{}` and {} more", head.join("`, `"), tail.len())
+ }
+ }
+}
+
+pub(crate) fn pattern_not_covered_label(
+ witnesses: &[DeconstructedPat<'_, '_>],
+ joined_patterns: &str,
+) -> String {
+ format!("pattern{} {} not covered", rustc_errors::pluralize!(witnesses.len()), joined_patterns)
+}
+
+/// Point at the definition of non-covered `enum` variants.
+fn adt_defined_here<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ err: &mut Diagnostic,
+ ty: Ty<'tcx>,
+ witnesses: &[DeconstructedPat<'p, 'tcx>],
+) {
+ let ty = ty.peel_refs();
+ if let ty::Adt(def, _) = ty.kind() {
+ let mut spans = vec![];
+ if witnesses.len() < 5 {
+ for sp in maybe_point_at_variant(cx, *def, witnesses.iter()) {
+ spans.push(sp);
+ }
+ }
+ let def_span = cx
+ .tcx
+ .hir()
+ .get_if_local(def.did())
+ .and_then(|node| node.ident())
+ .map(|ident| ident.span)
+ .unwrap_or_else(|| cx.tcx.def_span(def.did()));
+ let mut span: MultiSpan =
+ if spans.is_empty() { def_span.into() } else { spans.clone().into() };
+
+ span.push_span_label(def_span, "");
+ for pat in spans {
+ span.push_span_label(pat, "not covered");
+ }
+ err.span_note(span, &format!("`{}` defined here", ty));
+ }
+}
+
+fn maybe_point_at_variant<'a, 'p: 'a, 'tcx: 'a>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ def: AdtDef<'tcx>,
+ patterns: impl Iterator<Item = &'a DeconstructedPat<'p, 'tcx>>,
+) -> Vec<Span> {
+ use Constructor::*;
+ let mut covered = vec![];
+ for pattern in patterns {
+ if let Variant(variant_index) = pattern.ctor() {
+ if let ty::Adt(this_def, _) = pattern.ty().kind() && this_def.did() != def.did() {
+ continue;
+ }
+ let sp = def.variant(*variant_index).ident(cx.tcx).span;
+ if covered.contains(&sp) {
+ // Don't point at variants that have already been covered due to other patterns to avoid
+ // visual clutter.
+ continue;
+ }
+ covered.push(sp);
+ }
+ covered.extend(maybe_point_at_variant(cx, def, pattern.iter_fields()));
+ }
+ covered
+}
+
+/// Check if a by-value binding is by-value. That is, check if the binding's type is not `Copy`.
+fn is_binding_by_move(cx: &MatchVisitor<'_, '_, '_>, hir_id: HirId, span: Span) -> bool {
+ !cx.typeck_results.node_type(hir_id).is_copy_modulo_regions(cx.tcx.at(span), cx.param_env)
+}
+
+/// Check that there are no borrow or move conflicts in `binding @ subpat` patterns.
+///
+/// For example, this would reject:
+/// - `ref x @ Some(ref mut y)`,
+/// - `ref mut x @ Some(ref y)`,
+/// - `ref mut x @ Some(ref mut y)`,
+/// - `ref mut? x @ Some(y)`, and
+/// - `x @ Some(ref mut? y)`.
+///
+/// This analysis is *not* subsumed by NLL.
+fn check_borrow_conflicts_in_at_patterns(cx: &MatchVisitor<'_, '_, '_>, pat: &Pat<'_>) {
+ // Extract `sub` in `binding @ sub`.
+ let (name, sub) = match &pat.kind {
+ hir::PatKind::Binding(.., name, Some(sub)) => (*name, sub),
+ _ => return,
+ };
+ let binding_span = pat.span.with_hi(name.span.hi());
+
+ let typeck_results = cx.typeck_results;
+ let sess = cx.tcx.sess;
+
+ // Get the binding move, extract the mutability if by-ref.
+ let mut_outer = match typeck_results.extract_binding_mode(sess, pat.hir_id, pat.span) {
+ Some(ty::BindByValue(_)) if is_binding_by_move(cx, pat.hir_id, pat.span) => {
+ // We have `x @ pat` where `x` is by-move. Reject all borrows in `pat`.
+ let mut conflicts_ref = Vec::new();
+ sub.each_binding(|_, hir_id, span, _| {
+ match typeck_results.extract_binding_mode(sess, hir_id, span) {
+ Some(ty::BindByValue(_)) | None => {}
+ Some(ty::BindByReference(_)) => conflicts_ref.push(span),
+ }
+ });
+ if !conflicts_ref.is_empty() {
+ let occurs_because = format!(
+ "move occurs because `{}` has type `{}` which does not implement the `Copy` trait",
+ name,
+ typeck_results.node_type(pat.hir_id),
+ );
+ sess.struct_span_err(pat.span, "borrow of moved value")
+ .span_label(binding_span, format!("value moved into `{}` here", name))
+ .span_label(binding_span, occurs_because)
+ .span_labels(conflicts_ref, "value borrowed here after move")
+ .emit();
+ }
+ return;
+ }
+ Some(ty::BindByValue(_)) | None => return,
+ Some(ty::BindByReference(m)) => m,
+ };
+
+ // We now have `ref $mut_outer binding @ sub` (semantically).
+ // Recurse into each binding in `sub` and find mutability or move conflicts.
+ let mut conflicts_move = Vec::new();
+ let mut conflicts_mut_mut = Vec::new();
+ let mut conflicts_mut_ref = Vec::new();
+ sub.each_binding(|_, hir_id, span, name| {
+ match typeck_results.extract_binding_mode(sess, hir_id, span) {
+ Some(ty::BindByReference(mut_inner)) => match (mut_outer, mut_inner) {
+ (Mutability::Not, Mutability::Not) => {} // Both sides are `ref`.
+ (Mutability::Mut, Mutability::Mut) => conflicts_mut_mut.push((span, name)), // 2x `ref mut`.
+ _ => conflicts_mut_ref.push((span, name)), // `ref` + `ref mut` in either direction.
+ },
+ Some(ty::BindByValue(_)) if is_binding_by_move(cx, hir_id, span) => {
+ conflicts_move.push((span, name)) // `ref mut?` + by-move conflict.
+ }
+ Some(ty::BindByValue(_)) | None => {} // `ref mut?` + by-copy is fine.
+ }
+ });
+
+ // Report errors if any.
+ if !conflicts_mut_mut.is_empty() {
+ // Report mutability conflicts for e.g. `ref mut x @ Some(ref mut y)`.
+ let mut err = sess
+ .struct_span_err(pat.span, "cannot borrow value as mutable more than once at a time");
+ err.span_label(binding_span, format!("first mutable borrow, by `{}`, occurs here", name));
+ for (span, name) in conflicts_mut_mut {
+ err.span_label(span, format!("another mutable borrow, by `{}`, occurs here", name));
+ }
+ for (span, name) in conflicts_mut_ref {
+ err.span_label(span, format!("also borrowed as immutable, by `{}`, here", name));
+ }
+ for (span, name) in conflicts_move {
+ err.span_label(span, format!("also moved into `{}` here", name));
+ }
+ err.emit();
+ } else if !conflicts_mut_ref.is_empty() {
+ // Report mutability conflicts for e.g. `ref x @ Some(ref mut y)` or the converse.
+ let (primary, also) = match mut_outer {
+ Mutability::Mut => ("mutable", "immutable"),
+ Mutability::Not => ("immutable", "mutable"),
+ };
+ let msg =
+ format!("cannot borrow value as {} because it is also borrowed as {}", also, primary);
+ let mut err = sess.struct_span_err(pat.span, &msg);
+ err.span_label(binding_span, format!("{} borrow, by `{}`, occurs here", primary, name));
+ for (span, name) in conflicts_mut_ref {
+ err.span_label(span, format!("{} borrow, by `{}`, occurs here", also, name));
+ }
+ for (span, name) in conflicts_move {
+ err.span_label(span, format!("also moved into `{}` here", name));
+ }
+ err.emit();
+ } else if !conflicts_move.is_empty() {
+ // Report by-ref and by-move conflicts, e.g. `ref x @ y`.
+ let mut err =
+ sess.struct_span_err(pat.span, "cannot move out of value because it is borrowed");
+ err.span_label(binding_span, format!("value borrowed, by `{}`, here", name));
+ for (span, name) in conflicts_move {
+ err.span_label(span, format!("value moved into `{}` here", name));
+ }
+ err.emit();
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub enum LetSource {
+ GenericLet,
+ IfLet,
+ IfLetGuard,
+ LetElse(Span),
+ WhileLet,
+}
+
+fn let_source(tcx: TyCtxt<'_>, pat_id: HirId) -> LetSource {
+ let hir = tcx.hir();
+
+ let parent = hir.get_parent_node(pat_id);
+ let_source_parent(tcx, parent, Some(pat_id))
+}
+
+fn let_source_parent(tcx: TyCtxt<'_>, parent: HirId, pat_id: Option<HirId>) -> LetSource {
+ let hir = tcx.hir();
+
+ let parent_node = hir.get(parent);
+
+ match parent_node {
+ hir::Node::Arm(hir::Arm {
+ guard: Some(hir::Guard::IfLet(&hir::Let { pat: hir::Pat { hir_id, .. }, .. })),
+ ..
+ }) if Some(*hir_id) == pat_id => {
+ return LetSource::IfLetGuard;
+ }
+ _ => {}
+ }
+
+ let parent_parent = hir.get_parent_node(parent);
+ let parent_parent_node = hir.get(parent_parent);
+ if let hir::Node::Stmt(hir::Stmt { kind: hir::StmtKind::Local(_), span, .. }) =
+ parent_parent_node
+ {
+ return LetSource::LetElse(*span);
+ }
+
+ let parent_parent_parent = hir.get_parent_node(parent_parent);
+ let parent_parent_parent_parent = hir.get_parent_node(parent_parent_parent);
+ let parent_parent_parent_parent_node = hir.get(parent_parent_parent_parent);
+
+ if let hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Loop(_, _, hir::LoopSource::While, _),
+ ..
+ }) = parent_parent_parent_parent_node
+ {
+ return LetSource::WhileLet;
+ }
+
+ if let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::If(..), .. }) = parent_parent_node {
+ return LetSource::IfLet;
+ }
+
+ LetSource::GenericLet
+}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
new file mode 100644
index 000000000..d6dd0f017
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs
@@ -0,0 +1,601 @@
+use rustc_hir as hir;
+use rustc_index::vec::Idx;
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_middle::mir::{self, Field};
+use rustc_middle::thir::{FieldPat, Pat, PatKind};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
+use rustc_session::lint;
+use rustc_span::Span;
+use rustc_trait_selection::traits::predicate_for_trait_def;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+use rustc_trait_selection::traits::{self, ObligationCause, PredicateObligation};
+
+use std::cell::Cell;
+
+use super::PatCtxt;
+
+impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
+ /// Converts an evaluated constant to a pattern (if possible).
+ /// This means aggregate values (like structs and enums) are converted
+ /// to a pattern that matches the value (as if you'd compared via structural equality).
+ #[instrument(level = "debug", skip(self))]
+ pub(super) fn const_to_pat(
+ &self,
+ cv: mir::ConstantKind<'tcx>,
+ id: hir::HirId,
+ span: Span,
+ mir_structural_match_violation: bool,
+ ) -> Pat<'tcx> {
+ let pat = self.tcx.infer_ctxt().enter(|infcx| {
+ let mut convert = ConstToPat::new(self, id, span, infcx);
+ convert.to_pat(cv, mir_structural_match_violation)
+ });
+
+ debug!(?pat);
+ pat
+ }
+}
+
+struct ConstToPat<'a, 'tcx> {
+ id: hir::HirId,
+ span: Span,
+ param_env: ty::ParamEnv<'tcx>,
+
+ // This tracks if we emitted some hard error for a given const value, so that
+ // we will not subsequently issue an irrelevant lint for the same const
+ // value.
+ saw_const_match_error: Cell<bool>,
+
+ // This tracks if we emitted some diagnostic for a given const value, so that
+ // we will not subsequently issue an irrelevant lint for the same const
+ // value.
+ saw_const_match_lint: Cell<bool>,
+
+ // For backcompat we need to keep allowing non-structurally-eq types behind references.
+ // See also all the `cant-hide-behind` tests.
+ behind_reference: Cell<bool>,
+
+ // inference context used for checking `T: Structural` bounds.
+ infcx: InferCtxt<'a, 'tcx>,
+
+ include_lint_checks: bool,
+
+ treat_byte_string_as_slice: bool,
+}
+
+mod fallback_to_const_ref {
+ #[derive(Debug)]
+ /// This error type signals that we encountered a non-struct-eq situation behind a reference.
+ /// We bubble this up in order to get back to the reference destructuring and make that emit
+ /// a const pattern instead of a deref pattern. This allows us to simply call `PartialEq::eq`
+ /// on such patterns (since that function takes a reference) and not have to jump through any
+ /// hoops to get a reference to the value.
+ pub(super) struct FallbackToConstRef(());
+
+ pub(super) fn fallback_to_const_ref<'a, 'tcx>(
+ c2p: &super::ConstToPat<'a, 'tcx>,
+ ) -> FallbackToConstRef {
+ assert!(c2p.behind_reference.get());
+ FallbackToConstRef(())
+ }
+}
+use fallback_to_const_ref::{fallback_to_const_ref, FallbackToConstRef};
+
+impl<'a, 'tcx> ConstToPat<'a, 'tcx> {
+ fn new(
+ pat_ctxt: &PatCtxt<'_, 'tcx>,
+ id: hir::HirId,
+ span: Span,
+ infcx: InferCtxt<'a, 'tcx>,
+ ) -> Self {
+ trace!(?pat_ctxt.typeck_results.hir_owner);
+ ConstToPat {
+ id,
+ span,
+ infcx,
+ param_env: pat_ctxt.param_env,
+ include_lint_checks: pat_ctxt.include_lint_checks,
+ saw_const_match_error: Cell::new(false),
+ saw_const_match_lint: Cell::new(false),
+ behind_reference: Cell::new(false),
+ treat_byte_string_as_slice: pat_ctxt
+ .typeck_results
+ .treat_byte_string_as_slice
+ .contains(&id.local_id),
+ }
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn adt_derive_msg(&self, adt_def: AdtDef<'tcx>) -> String {
+ let path = self.tcx().def_path_str(adt_def.did());
+ format!(
+ "to use a constant of type `{}` in a pattern, \
+ `{}` must be annotated with `#[derive(PartialEq, Eq)]`",
+ path, path,
+ )
+ }
+
+ fn search_for_structural_match_violation(&self, ty: Ty<'tcx>) -> Option<String> {
+ traits::search_for_structural_match_violation(self.span, self.tcx(), ty).map(|non_sm_ty| {
+ with_no_trimmed_paths!(match non_sm_ty.kind() {
+ ty::Adt(adt, _) => self.adt_derive_msg(*adt),
+ ty::Dynamic(..) => {
+ "trait objects cannot be used in patterns".to_string()
+ }
+ ty::Opaque(..) => {
+ "opaque types cannot be used in patterns".to_string()
+ }
+ ty::Closure(..) => {
+ "closures cannot be used in patterns".to_string()
+ }
+ ty::Generator(..) | ty::GeneratorWitness(..) => {
+ "generators cannot be used in patterns".to_string()
+ }
+ ty::Float(..) => {
+ "floating-point numbers cannot be used in patterns".to_string()
+ }
+ ty::FnPtr(..) => {
+ "function pointers cannot be used in patterns".to_string()
+ }
+ ty::RawPtr(..) => {
+ "raw pointers cannot be used in patterns".to_string()
+ }
+ _ => {
+ bug!("use of a value of `{non_sm_ty}` inside a pattern")
+ }
+ })
+ })
+ }
+
+ fn type_marked_structural(&self, ty: Ty<'tcx>) -> bool {
+ ty.is_structural_eq_shallow(self.infcx.tcx)
+ }
+
+ fn to_pat(
+ &mut self,
+ cv: mir::ConstantKind<'tcx>,
+ mir_structural_match_violation: bool,
+ ) -> Pat<'tcx> {
+ trace!(self.treat_byte_string_as_slice);
+ // This method is just a wrapper handling a validity check; the heavy lifting is
+ // performed by the recursive `recur` method, which is not meant to be
+ // invoked except by this method.
+ //
+ // once indirect_structural_match is a full fledged error, this
+ // level of indirection can be eliminated
+
+ let inlined_const_as_pat = self.recur(cv, mir_structural_match_violation).unwrap();
+
+ if self.include_lint_checks && !self.saw_const_match_error.get() {
+ // If we were able to successfully convert the const to some pat,
+ // double-check that all types in the const implement `Structural`.
+
+ let structural = self.search_for_structural_match_violation(cv.ty());
+ debug!(
+ "search_for_structural_match_violation cv.ty: {:?} returned: {:?}",
+ cv.ty(),
+ structural
+ );
+
+ // This can occur because const qualification treats all associated constants as
+ // opaque, whereas `search_for_structural_match_violation` tries to monomorphize them
+ // before it runs.
+ //
+ // FIXME(#73448): Find a way to bring const qualification into parity with
+ // `search_for_structural_match_violation`.
+ if structural.is_none() && mir_structural_match_violation {
+ warn!("MIR const-checker found novel structural match violation. See #73448.");
+ return inlined_const_as_pat;
+ }
+
+ if let Some(msg) = structural {
+ if !self.type_may_have_partial_eq_impl(cv.ty()) {
+ // span_fatal avoids ICE from resolution of non-existent method (rare case).
+ self.tcx().sess.span_fatal(self.span, &msg);
+ } else if mir_structural_match_violation && !self.saw_const_match_lint.get() {
+ self.tcx().struct_span_lint_hir(
+ lint::builtin::INDIRECT_STRUCTURAL_MATCH,
+ self.id,
+ self.span,
+ |lint| {
+ lint.build(&msg).emit();
+ },
+ );
+ } else {
+ debug!(
+ "`search_for_structural_match_violation` found one, but `CustomEq` was \
+ not in the qualifs for that `const`"
+ );
+ }
+ }
+ }
+
+ inlined_const_as_pat
+ }
+
+ fn type_may_have_partial_eq_impl(&self, ty: Ty<'tcx>) -> bool {
+ // double-check there even *is* a semantic `PartialEq` to dispatch to.
+ //
+ // (If there isn't, then we can safely issue a hard
+ // error, because that's never worked, due to compiler
+ // using `PartialEq::eq` in this scenario in the past.)
+ let partial_eq_trait_id =
+ self.tcx().require_lang_item(hir::LangItem::PartialEq, Some(self.span));
+ let obligation: PredicateObligation<'_> = predicate_for_trait_def(
+ self.tcx(),
+ self.param_env,
+ ObligationCause::misc(self.span, self.id),
+ partial_eq_trait_id,
+ 0,
+ ty,
+ &[],
+ );
+ // FIXME: should this call a `predicate_must_hold` variant instead?
+
+ let has_impl = self.infcx.predicate_may_hold(&obligation);
+
+ // Note: To fix rust-lang/rust#65466, we could just remove this type
+ // walk hack for function pointers, and unconditionally error
+ // if `PartialEq` is not implemented. However, that breaks stable
+ // code at the moment, because types like `for <'a> fn(&'a ())` do
+ // not *yet* implement `PartialEq`. So for now we leave this here.
+ has_impl
+ || ty.walk().any(|t| match t.unpack() {
+ ty::subst::GenericArgKind::Lifetime(_) => false,
+ ty::subst::GenericArgKind::Type(t) => t.is_fn_ptr(),
+ ty::subst::GenericArgKind::Const(_) => false,
+ })
+ }
+
+ fn field_pats(
+ &self,
+ vals: impl Iterator<Item = mir::ConstantKind<'tcx>>,
+ ) -> Result<Vec<FieldPat<'tcx>>, FallbackToConstRef> {
+ vals.enumerate()
+ .map(|(idx, val)| {
+ let field = Field::new(idx);
+ Ok(FieldPat { field, pattern: self.recur(val, false)? })
+ })
+ .collect()
+ }
+
+ // Recursive helper for `to_pat`; invoke that (instead of calling this directly).
+ #[instrument(skip(self), level = "debug")]
+ fn recur(
+ &self,
+ cv: mir::ConstantKind<'tcx>,
+ mir_structural_match_violation: bool,
+ ) -> Result<Pat<'tcx>, FallbackToConstRef> {
+ let id = self.id;
+ let span = self.span;
+ let tcx = self.tcx();
+ let param_env = self.param_env;
+
+ let kind = match cv.ty().kind() {
+ ty::Float(_) => {
+ if self.include_lint_checks {
+ tcx.struct_span_lint_hir(
+ lint::builtin::ILLEGAL_FLOATING_POINT_LITERAL_PATTERN,
+ id,
+ span,
+ |lint| {
+ lint.build("floating-point types cannot be used in patterns").emit();
+ },
+ );
+ }
+ PatKind::Constant { value: cv }
+ }
+ ty::Adt(adt_def, _) if adt_def.is_union() => {
+ // Matching on union fields is unsafe, we can't hide it in constants
+ self.saw_const_match_error.set(true);
+ let msg = "cannot use unions in constant patterns";
+ if self.include_lint_checks {
+ tcx.sess.span_err(span, msg);
+ } else {
+ tcx.sess.delay_span_bug(span, msg);
+ }
+ PatKind::Wild
+ }
+ ty::Adt(..)
+ if !self.type_may_have_partial_eq_impl(cv.ty())
+ // FIXME(#73448): Find a way to bring const qualification into parity with
+ // `search_for_structural_match_violation` and then remove this condition.
+ && self.search_for_structural_match_violation(cv.ty()).is_some() =>
+ {
+ // Obtain the actual type that isn't annotated. If we just looked at `cv.ty` we
+ // could get `Option<NonStructEq>`, even though `Option` is annotated with derive.
+ let msg = self.search_for_structural_match_violation(cv.ty()).unwrap();
+ self.saw_const_match_error.set(true);
+ if self.include_lint_checks {
+ tcx.sess.span_err(self.span, &msg);
+ } else {
+ tcx.sess.delay_span_bug(self.span, &msg);
+ }
+ PatKind::Wild
+ }
+ // If the type is not structurally comparable, just emit the constant directly,
+ // causing the pattern match code to treat it opaquely.
+ // FIXME: This code doesn't emit errors itself, the caller emits the errors.
+ // So instead of specific errors, you just get blanket errors about the whole
+ // const type. See
+ // https://github.com/rust-lang/rust/pull/70743#discussion_r404701963 for
+ // details.
+ // Backwards compatibility hack because we can't cause hard errors on these
+ // types, so we compare them via `PartialEq::eq` at runtime.
+ ty::Adt(..) if !self.type_marked_structural(cv.ty()) && self.behind_reference.get() => {
+ if self.include_lint_checks
+ && !self.saw_const_match_error.get()
+ && !self.saw_const_match_lint.get()
+ {
+ self.saw_const_match_lint.set(true);
+ tcx.struct_span_lint_hir(
+ lint::builtin::INDIRECT_STRUCTURAL_MATCH,
+ id,
+ span,
+ |lint| {
+ let msg = format!(
+ "to use a constant of type `{}` in a pattern, \
+ `{}` must be annotated with `#[derive(PartialEq, Eq)]`",
+ cv.ty(),
+ cv.ty(),
+ );
+ lint.build(&msg).emit();
+ },
+ );
+ }
+ // Since we are behind a reference, we can just bubble the error up so we get a
+ // constant at reference type, making it easy to let the fallback call
+ // `PartialEq::eq` on it.
+ return Err(fallback_to_const_ref(self));
+ }
+ ty::Adt(adt_def, _) if !self.type_marked_structural(cv.ty()) => {
+ debug!(
+ "adt_def {:?} has !type_marked_structural for cv.ty: {:?}",
+ adt_def,
+ cv.ty()
+ );
+ let path = tcx.def_path_str(adt_def.did());
+ let msg = format!(
+ "to use a constant of type `{}` in a pattern, \
+ `{}` must be annotated with `#[derive(PartialEq, Eq)]`",
+ path, path,
+ );
+ self.saw_const_match_error.set(true);
+ if self.include_lint_checks {
+ tcx.sess.span_err(span, &msg);
+ } else {
+ tcx.sess.delay_span_bug(span, &msg);
+ }
+ PatKind::Wild
+ }
+ ty::Adt(adt_def, substs) if adt_def.is_enum() => {
+ let destructured = tcx.destructure_mir_constant(param_env, cv);
+
+ PatKind::Variant {
+ adt_def: *adt_def,
+ substs,
+ variant_index: destructured
+ .variant
+ .expect("destructed const of adt without variant id"),
+ subpatterns: self.field_pats(destructured.fields.iter().copied())?,
+ }
+ }
+ ty::Tuple(_) | ty::Adt(_, _) => {
+ let destructured = tcx.destructure_mir_constant(param_env, cv);
+ PatKind::Leaf { subpatterns: self.field_pats(destructured.fields.iter().copied())? }
+ }
+ ty::Array(..) => PatKind::Array {
+ prefix: tcx
+ .destructure_mir_constant(param_env, cv)
+ .fields
+ .iter()
+ .map(|val| self.recur(*val, false))
+ .collect::<Result<_, _>>()?,
+ slice: None,
+ suffix: Vec::new(),
+ },
+ ty::Ref(_, pointee_ty, ..) => match *pointee_ty.kind() {
+ // These are not allowed and will error elsewhere anyway.
+ ty::Dynamic(..) => {
+ self.saw_const_match_error.set(true);
+ let msg = format!("`{}` cannot be used in patterns", cv.ty());
+ if self.include_lint_checks {
+ tcx.sess.span_err(span, &msg);
+ } else {
+ tcx.sess.delay_span_bug(span, &msg);
+ }
+ PatKind::Wild
+ }
+ // `&str` is represented as `ConstValue::Slice`, let's keep using this
+ // optimization for now.
+ ty::Str => PatKind::Constant { value: cv },
+ // `b"foo"` produces a `&[u8; 3]`, but you can't use constants of array type when
+ // matching against references, you can only use byte string literals.
+ // The typechecker has a special case for byte string literals, by treating them
+ // as slices. This means we turn `&[T; N]` constants into slice patterns, which
+ // has no negative effects on pattern matching, even if we're actually matching on
+ // arrays.
+ ty::Array(..) if !self.treat_byte_string_as_slice => {
+ let old = self.behind_reference.replace(true);
+ let array = tcx.deref_mir_constant(self.param_env.and(cv));
+ let val = PatKind::Deref {
+ subpattern: Pat {
+ kind: Box::new(PatKind::Array {
+ prefix: tcx
+ .destructure_mir_constant(param_env, array)
+ .fields
+ .iter()
+ .map(|val| self.recur(*val, false))
+ .collect::<Result<_, _>>()?,
+ slice: None,
+ suffix: vec![],
+ }),
+ span,
+ ty: *pointee_ty,
+ },
+ };
+ self.behind_reference.set(old);
+ val
+ }
+ ty::Array(elem_ty, _) |
+ // Cannot merge this with the catch all branch below, because the `const_deref`
+ // changes the type from slice to array, we need to keep the original type in the
+ // pattern.
+ ty::Slice(elem_ty) => {
+ let old = self.behind_reference.replace(true);
+ let array = tcx.deref_mir_constant(self.param_env.and(cv));
+ let val = PatKind::Deref {
+ subpattern: Pat {
+ kind: Box::new(PatKind::Slice {
+ prefix: tcx
+ .destructure_mir_constant(param_env, array)
+ .fields
+ .iter()
+ .map(|val| self.recur(*val, false))
+ .collect::<Result<_, _>>()?,
+ slice: None,
+ suffix: vec![],
+ }),
+ span,
+ ty: tcx.mk_slice(elem_ty),
+ },
+ };
+ self.behind_reference.set(old);
+ val
+ }
+ // Backwards compatibility hack: support references to non-structural types.
+ // We'll lower
+ // this pattern to a `PartialEq::eq` comparison and `PartialEq::eq` takes a
+ // reference. This makes the rest of the matching logic simpler as it doesn't have
+ // to figure out how to get a reference again.
+ ty::Adt(adt_def, _) if !self.type_marked_structural(*pointee_ty) => {
+ if self.behind_reference.get() {
+ if self.include_lint_checks
+ && !self.saw_const_match_error.get()
+ && !self.saw_const_match_lint.get()
+ {
+ self.saw_const_match_lint.set(true);
+ let msg = self.adt_derive_msg(adt_def);
+ self.tcx().struct_span_lint_hir(
+ lint::builtin::INDIRECT_STRUCTURAL_MATCH,
+ self.id,
+ self.span,
+ |lint| {lint.build(&msg).emit();},
+ );
+ }
+ PatKind::Constant { value: cv }
+ } else {
+ if !self.saw_const_match_error.get() {
+ self.saw_const_match_error.set(true);
+ let msg = self.adt_derive_msg(adt_def);
+ if self.include_lint_checks {
+ tcx.sess.span_err(span, &msg);
+ } else {
+ tcx.sess.delay_span_bug(span, &msg);
+ }
+ }
+ PatKind::Wild
+ }
+ }
+ // All other references are converted into deref patterns and then recursively
+ // convert the dereferenced constant to a pattern that is the sub-pattern of the
+ // deref pattern.
+ _ => {
+ if !pointee_ty.is_sized(tcx.at(span), param_env) {
+ // `tcx.deref_mir_constant()` below will ICE with an unsized type
+ // (except slices, which are handled in a separate arm above).
+ let msg = format!("cannot use unsized non-slice type `{}` in constant patterns", pointee_ty);
+ if self.include_lint_checks {
+ tcx.sess.span_err(span, &msg);
+ } else {
+ tcx.sess.delay_span_bug(span, &msg);
+ }
+ PatKind::Wild
+ } else {
+ let old = self.behind_reference.replace(true);
+ // In case there are structural-match violations somewhere in this subpattern,
+ // we fall back to a const pattern. If we do not do this, we may end up with
+ // a !structural-match constant that is not of reference type, which makes it
+ // very hard to invoke `PartialEq::eq` on it as a fallback.
+ let val = match self.recur(tcx.deref_mir_constant(self.param_env.and(cv)), false) {
+ Ok(subpattern) => PatKind::Deref { subpattern },
+ Err(_) => PatKind::Constant { value: cv },
+ };
+ self.behind_reference.set(old);
+ val
+ }
+ }
+ },
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::FnDef(..) => {
+ PatKind::Constant { value: cv }
+ }
+ ty::RawPtr(pointee) if pointee.ty.is_sized(tcx.at(span), param_env) => {
+ PatKind::Constant { value: cv }
+ }
+ // FIXME: these can have very surprising behaviour where optimization levels or other
+ // compilation choices change the runtime behaviour of the match.
+ // See https://github.com/rust-lang/rust/issues/70861 for examples.
+ ty::FnPtr(..) | ty::RawPtr(..) => {
+ if self.include_lint_checks
+ && !self.saw_const_match_error.get()
+ && !self.saw_const_match_lint.get()
+ {
+ self.saw_const_match_lint.set(true);
+ let msg = "function pointers and unsized pointers in patterns behave \
+ unpredictably and should not be relied upon. \
+ See https://github.com/rust-lang/rust/issues/70861 for details.";
+ tcx.struct_span_lint_hir(
+ lint::builtin::POINTER_STRUCTURAL_MATCH,
+ id,
+ span,
+ |lint| {
+ lint.build(msg).emit();
+ },
+ );
+ }
+ PatKind::Constant { value: cv }
+ }
+ _ => {
+ self.saw_const_match_error.set(true);
+ let msg = format!("`{}` cannot be used in patterns", cv.ty());
+ if self.include_lint_checks {
+ tcx.sess.span_err(span, &msg);
+ } else {
+ tcx.sess.delay_span_bug(span, &msg);
+ }
+ PatKind::Wild
+ }
+ };
+
+ if self.include_lint_checks
+ && !self.saw_const_match_error.get()
+ && !self.saw_const_match_lint.get()
+ && mir_structural_match_violation
+ // FIXME(#73448): Find a way to bring const qualification into parity with
+ // `search_for_structural_match_violation` and then remove this condition.
+ && self.search_for_structural_match_violation(cv.ty()).is_some()
+ {
+ self.saw_const_match_lint.set(true);
+ // Obtain the actual type that isn't annotated. If we just looked at `cv.ty` we
+ // could get `Option<NonStructEq>`, even though `Option` is annotated with derive.
+ let msg = self.search_for_structural_match_violation(cv.ty()).unwrap().replace(
+ "in a pattern,",
+ "in a pattern, the constant's initializer must be trivial or",
+ );
+ tcx.struct_span_lint_hir(
+ lint::builtin::NONTRIVIAL_STRUCTURAL_MATCH,
+ id,
+ span,
+ |lint| {
+ lint.build(&msg).emit();
+ },
+ );
+ }
+
+ Ok(Pat { span, ty: cv.ty(), kind: Box::new(kind) })
+ }
+}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
new file mode 100644
index 000000000..8d6f8efb6
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/pattern/deconstruct_pat.rs
@@ -0,0 +1,1711 @@
+//! [`super::usefulness`] explains most of what is happening in this file. As explained there,
+//! values and patterns are made from constructors applied to fields. This file defines a
+//! `Constructor` enum, a `Fields` struct, and various operations to manipulate them and convert
+//! them from/to patterns.
+//!
+//! There's one idea that is not detailed in [`super::usefulness`] because the details are not
+//! needed there: _constructor splitting_.
+//!
+//! # Constructor splitting
+//!
+//! The idea is as follows: given a constructor `c` and a matrix, we want to specialize in turn
+//! with all the value constructors that are covered by `c`, and compute usefulness for each.
+//! Instead of listing all those constructors (which is intractable), we group those value
+//! constructors together as much as possible. Example:
+//!
+//! ```compile_fail,E0004
+//! match (0, false) {
+//! (0 ..=100, true) => {} // `p_1`
+//! (50..=150, false) => {} // `p_2`
+//! (0 ..=200, _) => {} // `q`
+//! }
+//! ```
+//!
+//! The naive approach would try all numbers in the range `0..=200`. But we can be a lot more
+//! clever: `0` and `1` for example will match the exact same rows, and return equivalent
+//! witnesses. In fact all of `0..50` would. We can thus restrict our exploration to 4
+//! constructors: `0..50`, `50..=100`, `101..=150` and `151..=200`. That is enough and infinitely
+//! more tractable.
+//!
+//! We capture this idea in a function `split(p_1 ... p_n, c)` which returns a list of constructors
+//! `c'` covered by `c`. Given such a `c'`, we require that all value ctors `c''` covered by `c'`
+//! return an equivalent set of witnesses after specializing and computing usefulness.
+//! In the example above, witnesses for specializing by `c''` covered by `0..50` will only differ
+//! in their first element.
+//!
+//! We usually also ask that the `c'` together cover all of the original `c`. However we allow
+//! skipping some constructors as long as it doesn't change whether the resulting list of witnesses
+//! is empty of not. We use this in the wildcard `_` case.
+//!
+//! Splitting is implemented in the [`Constructor::split`] function. We don't do splitting for
+//! or-patterns; instead we just try the alternatives one-by-one. For details on splitting
+//! wildcards, see [`SplitWildcard`]; for integer ranges, see [`SplitIntRange`]; for slices, see
+//! [`SplitVarLenSlice`].
+
+use self::Constructor::*;
+use self::SliceKind::*;
+
+use super::compare_const_vals;
+use super::usefulness::{MatchCheckCtxt, PatCtxt};
+
+use rustc_data_structures::captures::Captures;
+use rustc_index::vec::Idx;
+
+use rustc_hir::{HirId, RangeEnd};
+use rustc_middle::mir::{self, Field};
+use rustc_middle::thir::{FieldPat, Pat, PatKind, PatRange};
+use rustc_middle::ty::layout::IntegerExt;
+use rustc_middle::ty::{self, Ty, TyCtxt, VariantDef};
+use rustc_middle::{middle::stability::EvalResult, mir::interpret::ConstValue};
+use rustc_session::lint;
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi::{Integer, Size, VariantIdx};
+
+use smallvec::{smallvec, SmallVec};
+use std::cell::Cell;
+use std::cmp::{self, max, min, Ordering};
+use std::fmt;
+use std::iter::{once, IntoIterator};
+use std::ops::RangeInclusive;
+
+/// Recursively expand this pattern into its subpatterns. Only useful for or-patterns.
+fn expand_or_pat<'p, 'tcx>(pat: &'p Pat<'tcx>) -> Vec<&'p Pat<'tcx>> {
+ fn expand<'p, 'tcx>(pat: &'p Pat<'tcx>, vec: &mut Vec<&'p Pat<'tcx>>) {
+ if let PatKind::Or { pats } = pat.kind.as_ref() {
+ for pat in pats {
+ expand(pat, vec);
+ }
+ } else {
+ vec.push(pat)
+ }
+ }
+
+ let mut pats = Vec::new();
+ expand(pat, &mut pats);
+ pats
+}
+
+/// An inclusive interval, used for precise integer exhaustiveness checking.
+/// `IntRange`s always store a contiguous range. This means that values are
+/// encoded such that `0` encodes the minimum value for the integer,
+/// regardless of the signedness.
+/// For example, the pattern `-128..=127i8` is encoded as `0..=255`.
+/// This makes comparisons and arithmetic on interval endpoints much more
+/// straightforward. See `signed_bias` for details.
+///
+/// `IntRange` is never used to encode an empty range or a "range" that wraps
+/// around the (offset) space: i.e., `range.lo <= range.hi`.
+#[derive(Clone, PartialEq, Eq)]
+pub(super) struct IntRange {
+ range: RangeInclusive<u128>,
+ /// Keeps the bias used for encoding the range. It depends on the type of the range and
+ /// possibly the pointer size of the current architecture. The algorithm ensures we never
+ /// compare `IntRange`s with different types/architectures.
+ bias: u128,
+}
+
+impl IntRange {
+ #[inline]
+ fn is_integral(ty: Ty<'_>) -> bool {
+ matches!(ty.kind(), ty::Char | ty::Int(_) | ty::Uint(_) | ty::Bool)
+ }
+
+ fn is_singleton(&self) -> bool {
+ self.range.start() == self.range.end()
+ }
+
+ fn boundaries(&self) -> (u128, u128) {
+ (*self.range.start(), *self.range.end())
+ }
+
+ #[inline]
+ fn integral_size_and_signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> Option<(Size, u128)> {
+ match *ty.kind() {
+ ty::Bool => Some((Size::from_bytes(1), 0)),
+ ty::Char => Some((Size::from_bytes(4), 0)),
+ ty::Int(ity) => {
+ let size = Integer::from_int_ty(&tcx, ity).size();
+ Some((size, 1u128 << (size.bits() as u128 - 1)))
+ }
+ ty::Uint(uty) => Some((Integer::from_uint_ty(&tcx, uty).size(), 0)),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ fn from_constant<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: mir::ConstantKind<'tcx>,
+ ) -> Option<IntRange> {
+ let ty = value.ty();
+ if let Some((target_size, bias)) = Self::integral_size_and_signed_bias(tcx, ty) {
+ let val = (|| {
+ match value {
+ mir::ConstantKind::Val(ConstValue::Scalar(scalar), _) => {
+ // For this specific pattern we can skip a lot of effort and go
+ // straight to the result, after doing a bit of checking. (We
+ // could remove this branch and just fall through, which
+ // is more general but much slower.)
+ if let Ok(Ok(bits)) = scalar.to_bits_or_ptr_internal(target_size) {
+ return Some(bits);
+ } else {
+ return None;
+ }
+ }
+ mir::ConstantKind::Ty(c) => match c.kind() {
+ ty::ConstKind::Value(_) => bug!(
+ "encountered ConstValue in mir::ConstantKind::Ty, whereas this is expected to be in ConstantKind::Val"
+ ),
+ _ => {}
+ },
+ _ => {}
+ }
+
+ // This is a more general form of the previous case.
+ value.try_eval_bits(tcx, param_env, ty)
+ })()?;
+ let val = val ^ bias;
+ Some(IntRange { range: val..=val, bias })
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ fn from_range<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ lo: u128,
+ hi: u128,
+ ty: Ty<'tcx>,
+ end: &RangeEnd,
+ ) -> Option<IntRange> {
+ if Self::is_integral(ty) {
+ // Perform a shift if the underlying types are signed,
+ // which makes the interval arithmetic simpler.
+ let bias = IntRange::signed_bias(tcx, ty);
+ let (lo, hi) = (lo ^ bias, hi ^ bias);
+ let offset = (*end == RangeEnd::Excluded) as u128;
+ if lo > hi || (lo == hi && *end == RangeEnd::Excluded) {
+ // This should have been caught earlier by E0030.
+ bug!("malformed range pattern: {}..={}", lo, (hi - offset));
+ }
+ Some(IntRange { range: lo..=(hi - offset), bias })
+ } else {
+ None
+ }
+ }
+
+ // The return value of `signed_bias` should be XORed with an endpoint to encode/decode it.
+ fn signed_bias(tcx: TyCtxt<'_>, ty: Ty<'_>) -> u128 {
+ match *ty.kind() {
+ ty::Int(ity) => {
+ let bits = Integer::from_int_ty(&tcx, ity).size().bits() as u128;
+ 1u128 << (bits - 1)
+ }
+ _ => 0,
+ }
+ }
+
+ fn is_subrange(&self, other: &Self) -> bool {
+ other.range.start() <= self.range.start() && self.range.end() <= other.range.end()
+ }
+
+ fn intersection(&self, other: &Self) -> Option<Self> {
+ let (lo, hi) = self.boundaries();
+ let (other_lo, other_hi) = other.boundaries();
+ if lo <= other_hi && other_lo <= hi {
+ Some(IntRange { range: max(lo, other_lo)..=min(hi, other_hi), bias: self.bias })
+ } else {
+ None
+ }
+ }
+
+ fn suspicious_intersection(&self, other: &Self) -> bool {
+ // `false` in the following cases:
+ // 1 ---- // 1 ---------- // 1 ---- // 1 ----
+ // 2 ---------- // 2 ---- // 2 ---- // 2 ----
+ //
+ // The following are currently `false`, but could be `true` in the future (#64007):
+ // 1 --------- // 1 ---------
+ // 2 ---------- // 2 ----------
+ //
+ // `true` in the following cases:
+ // 1 ------- // 1 -------
+ // 2 -------- // 2 -------
+ let (lo, hi) = self.boundaries();
+ let (other_lo, other_hi) = other.boundaries();
+ (lo == other_hi || hi == other_lo) && !self.is_singleton() && !other.is_singleton()
+ }
+
+ /// Only used for displaying the range properly.
+ fn to_pat<'tcx>(&self, tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Pat<'tcx> {
+ let (lo, hi) = self.boundaries();
+
+ let bias = self.bias;
+ let (lo, hi) = (lo ^ bias, hi ^ bias);
+
+ let env = ty::ParamEnv::empty().and(ty);
+ let lo_const = mir::ConstantKind::from_bits(tcx, lo, env);
+ let hi_const = mir::ConstantKind::from_bits(tcx, hi, env);
+
+ let kind = if lo == hi {
+ PatKind::Constant { value: lo_const }
+ } else {
+ PatKind::Range(PatRange { lo: lo_const, hi: hi_const, end: RangeEnd::Included })
+ };
+
+ Pat { ty, span: DUMMY_SP, kind: Box::new(kind) }
+ }
+
+ /// Lint on likely incorrect range patterns (#63987)
+ pub(super) fn lint_overlapping_range_endpoints<'a, 'p: 'a, 'tcx: 'a>(
+ &self,
+ pcx: &PatCtxt<'_, 'p, 'tcx>,
+ pats: impl Iterator<Item = &'a DeconstructedPat<'p, 'tcx>>,
+ column_count: usize,
+ hir_id: HirId,
+ ) {
+ if self.is_singleton() {
+ return;
+ }
+
+ if column_count != 1 {
+ // FIXME: for now, only check for overlapping ranges on simple range
+ // patterns. Otherwise with the current logic the following is detected
+ // as overlapping:
+ // ```
+ // match (0u8, true) {
+ // (0 ..= 125, false) => {}
+ // (125 ..= 255, true) => {}
+ // _ => {}
+ // }
+ // ```
+ return;
+ }
+
+ let overlaps: Vec<_> = pats
+ .filter_map(|pat| Some((pat.ctor().as_int_range()?, pat.span())))
+ .filter(|(range, _)| self.suspicious_intersection(range))
+ .map(|(range, span)| (self.intersection(&range).unwrap(), span))
+ .collect();
+
+ if !overlaps.is_empty() {
+ pcx.cx.tcx.struct_span_lint_hir(
+ lint::builtin::OVERLAPPING_RANGE_ENDPOINTS,
+ hir_id,
+ pcx.span,
+ |lint| {
+ let mut err = lint.build("multiple patterns overlap on their endpoints");
+ for (int_range, span) in overlaps {
+ err.span_label(
+ span,
+ &format!(
+ "this range overlaps on `{}`...",
+ int_range.to_pat(pcx.cx.tcx, pcx.ty)
+ ),
+ );
+ }
+ err.span_label(pcx.span, "... with this range");
+ err.note("you likely meant to write mutually exclusive ranges");
+ err.emit();
+ },
+ );
+ }
+ }
+
+ /// See `Constructor::is_covered_by`
+ fn is_covered_by(&self, other: &Self) -> bool {
+ if self.intersection(other).is_some() {
+ // Constructor splitting should ensure that all intersections we encounter are actually
+ // inclusions.
+ assert!(self.is_subrange(other));
+ true
+ } else {
+ false
+ }
+ }
+}
+
+/// Note: this is often not what we want: e.g. `false` is converted into the range `0..=0` and
+/// would be displayed as such. To render properly, convert to a pattern first.
+impl fmt::Debug for IntRange {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let (lo, hi) = self.boundaries();
+ let bias = self.bias;
+ let (lo, hi) = (lo ^ bias, hi ^ bias);
+ write!(f, "{}", lo)?;
+ write!(f, "{}", RangeEnd::Included)?;
+ write!(f, "{}", hi)
+ }
+}
+
+/// Represents a border between 2 integers. Because the intervals spanning borders must be able to
+/// cover every integer, we need to be able to represent 2^128 + 1 such borders.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
+enum IntBorder {
+ JustBefore(u128),
+ AfterMax,
+}
+
+/// A range of integers that is partitioned into disjoint subranges. This does constructor
+/// splitting for integer ranges as explained at the top of the file.
+///
+/// This is fed multiple ranges, and returns an output that covers the input, but is split so that
+/// the only intersections between an output range and a seen range are inclusions. No output range
+/// straddles the boundary of one of the inputs.
+///
+/// The following input:
+/// ```text
+/// |-------------------------| // `self`
+/// |------| |----------| |----|
+/// |-------| |-------|
+/// ```
+/// would be iterated over as follows:
+/// ```text
+/// ||---|--||-|---|---|---|--|
+/// ```
+#[derive(Debug, Clone)]
+struct SplitIntRange {
+ /// The range we are splitting
+ range: IntRange,
+ /// The borders of ranges we have seen. They are all contained within `range`. This is kept
+ /// sorted.
+ borders: Vec<IntBorder>,
+}
+
+impl SplitIntRange {
+ fn new(range: IntRange) -> Self {
+ SplitIntRange { range, borders: Vec::new() }
+ }
+
+ /// Internal use
+ fn to_borders(r: IntRange) -> [IntBorder; 2] {
+ use IntBorder::*;
+ let (lo, hi) = r.boundaries();
+ let lo = JustBefore(lo);
+ let hi = match hi.checked_add(1) {
+ Some(m) => JustBefore(m),
+ None => AfterMax,
+ };
+ [lo, hi]
+ }
+
+ /// Add ranges relative to which we split.
+ fn split(&mut self, ranges: impl Iterator<Item = IntRange>) {
+ let this_range = &self.range;
+ let included_ranges = ranges.filter_map(|r| this_range.intersection(&r));
+ let included_borders = included_ranges.flat_map(|r| {
+ let borders = Self::to_borders(r);
+ once(borders[0]).chain(once(borders[1]))
+ });
+ self.borders.extend(included_borders);
+ self.borders.sort_unstable();
+ }
+
+ /// Iterate over the contained ranges.
+ fn iter<'a>(&'a self) -> impl Iterator<Item = IntRange> + Captures<'a> {
+ use IntBorder::*;
+
+ let self_range = Self::to_borders(self.range.clone());
+ // Start with the start of the range.
+ let mut prev_border = self_range[0];
+ self.borders
+ .iter()
+ .copied()
+ // End with the end of the range.
+ .chain(once(self_range[1]))
+ // List pairs of adjacent borders.
+ .map(move |border| {
+ let ret = (prev_border, border);
+ prev_border = border;
+ ret
+ })
+ // Skip duplicates.
+ .filter(|(prev_border, border)| prev_border != border)
+ // Finally, convert to ranges.
+ .map(move |(prev_border, border)| {
+ let range = match (prev_border, border) {
+ (JustBefore(n), JustBefore(m)) if n < m => n..=(m - 1),
+ (JustBefore(n), AfterMax) => n..=u128::MAX,
+ _ => unreachable!(), // Ruled out by the sorting and filtering we did
+ };
+ IntRange { range, bias: self.range.bias }
+ })
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum SliceKind {
+ /// Patterns of length `n` (`[x, y]`).
+ FixedLen(usize),
+ /// Patterns using the `..` notation (`[x, .., y]`).
+ /// Captures any array constructor of `length >= i + j`.
+ /// In the case where `array_len` is `Some(_)`,
+ /// this indicates that we only care about the first `i` and the last `j` values of the array,
+ /// and everything in between is a wildcard `_`.
+ VarLen(usize, usize),
+}
+
+impl SliceKind {
+ fn arity(self) -> usize {
+ match self {
+ FixedLen(length) => length,
+ VarLen(prefix, suffix) => prefix + suffix,
+ }
+ }
+
+ /// Whether this pattern includes patterns of length `other_len`.
+ fn covers_length(self, other_len: usize) -> bool {
+ match self {
+ FixedLen(len) => len == other_len,
+ VarLen(prefix, suffix) => prefix + suffix <= other_len,
+ }
+ }
+}
+
+/// A constructor for array and slice patterns.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub(super) struct Slice {
+ /// `None` if the matched value is a slice, `Some(n)` if it is an array of size `n`.
+ array_len: Option<usize>,
+ /// The kind of pattern it is: fixed-length `[x, y]` or variable length `[x, .., y]`.
+ kind: SliceKind,
+}
+
+impl Slice {
+ fn new(array_len: Option<usize>, kind: SliceKind) -> Self {
+ let kind = match (array_len, kind) {
+ // If the middle `..` is empty, we effectively have a fixed-length pattern.
+ (Some(len), VarLen(prefix, suffix)) if prefix + suffix >= len => FixedLen(len),
+ _ => kind,
+ };
+ Slice { array_len, kind }
+ }
+
+ fn arity(self) -> usize {
+ self.kind.arity()
+ }
+
+ /// See `Constructor::is_covered_by`
+ fn is_covered_by(self, other: Self) -> bool {
+ other.kind.covers_length(self.arity())
+ }
+}
+
+/// This computes constructor splitting for variable-length slices, as explained at the top of the
+/// file.
+///
+/// A slice pattern `[x, .., y]` behaves like the infinite or-pattern `[x, y] | [x, _, y] | [x, _,
+/// _, y] | ...`. The corresponding value constructors are fixed-length array constructors above a
+/// given minimum length. We obviously can't list this infinitude of constructors. Thankfully,
+/// it turns out that for each finite set of slice patterns, all sufficiently large array lengths
+/// are equivalent.
+///
+/// Let's look at an example, where we are trying to split the last pattern:
+/// ```
+/// # fn foo(x: &[bool]) {
+/// match x {
+/// [true, true, ..] => {}
+/// [.., false, false] => {}
+/// [..] => {}
+/// }
+/// # }
+/// ```
+/// Here are the results of specialization for the first few lengths:
+/// ```
+/// # fn foo(x: &[bool]) { match x {
+/// // length 0
+/// [] => {}
+/// // length 1
+/// [_] => {}
+/// // length 2
+/// [true, true] => {}
+/// [false, false] => {}
+/// [_, _] => {}
+/// // length 3
+/// [true, true, _ ] => {}
+/// [_, false, false] => {}
+/// [_, _, _ ] => {}
+/// // length 4
+/// [true, true, _, _ ] => {}
+/// [_, _, false, false] => {}
+/// [_, _, _, _ ] => {}
+/// // length 5
+/// [true, true, _, _, _ ] => {}
+/// [_, _, _, false, false] => {}
+/// [_, _, _, _, _ ] => {}
+/// # _ => {}
+/// # }}
+/// ```
+///
+/// If we went above length 5, we would simply be inserting more columns full of wildcards in the
+/// middle. This means that the set of witnesses for length `l >= 5` if equivalent to the set for
+/// any other `l' >= 5`: simply add or remove wildcards in the middle to convert between them.
+///
+/// This applies to any set of slice patterns: there will be a length `L` above which all lengths
+/// behave the same. This is exactly what we need for constructor splitting. Therefore a
+/// variable-length slice can be split into a variable-length slice of minimal length `L`, and many
+/// fixed-length slices of lengths `< L`.
+///
+/// For each variable-length pattern `p` with a prefix of length `plₚ` and suffix of length `slₚ`,
+/// only the first `plₚ` and the last `slₚ` elements are examined. Therefore, as long as `L` is
+/// positive (to avoid concerns about empty types), all elements after the maximum prefix length
+/// and before the maximum suffix length are not examined by any variable-length pattern, and
+/// therefore can be added/removed without affecting them - creating equivalent patterns from any
+/// sufficiently-large length.
+///
+/// Of course, if fixed-length patterns exist, we must be sure that our length is large enough to
+/// miss them all, so we can pick `L = max(max(FIXED_LEN)+1, max(PREFIX_LEN) + max(SUFFIX_LEN))`
+///
+/// `max_slice` below will be made to have arity `L`.
+#[derive(Debug)]
+struct SplitVarLenSlice {
+ /// If the type is an array, this is its size.
+ array_len: Option<usize>,
+ /// The arity of the input slice.
+ arity: usize,
+ /// The smallest slice bigger than any slice seen. `max_slice.arity()` is the length `L`
+ /// described above.
+ max_slice: SliceKind,
+}
+
+impl SplitVarLenSlice {
+ fn new(prefix: usize, suffix: usize, array_len: Option<usize>) -> Self {
+ SplitVarLenSlice { array_len, arity: prefix + suffix, max_slice: VarLen(prefix, suffix) }
+ }
+
+ /// Pass a set of slices relative to which to split this one.
+ fn split(&mut self, slices: impl Iterator<Item = SliceKind>) {
+ let VarLen(max_prefix_len, max_suffix_len) = &mut self.max_slice else {
+ // No need to split
+ return;
+ };
+ // We grow `self.max_slice` to be larger than all slices encountered, as described above.
+ // For diagnostics, we keep the prefix and suffix lengths separate, but grow them so that
+ // `L = max_prefix_len + max_suffix_len`.
+ let mut max_fixed_len = 0;
+ for slice in slices {
+ match slice {
+ FixedLen(len) => {
+ max_fixed_len = cmp::max(max_fixed_len, len);
+ }
+ VarLen(prefix, suffix) => {
+ *max_prefix_len = cmp::max(*max_prefix_len, prefix);
+ *max_suffix_len = cmp::max(*max_suffix_len, suffix);
+ }
+ }
+ }
+ // We want `L = max(L, max_fixed_len + 1)`, modulo the fact that we keep prefix and
+ // suffix separate.
+ if max_fixed_len + 1 >= *max_prefix_len + *max_suffix_len {
+ // The subtraction can't overflow thanks to the above check.
+ // The new `max_prefix_len` is larger than its previous value.
+ *max_prefix_len = max_fixed_len + 1 - *max_suffix_len;
+ }
+
+ // We cap the arity of `max_slice` at the array size.
+ match self.array_len {
+ Some(len) if self.max_slice.arity() >= len => self.max_slice = FixedLen(len),
+ _ => {}
+ }
+ }
+
+ /// Iterate over the partition of this slice.
+ fn iter<'a>(&'a self) -> impl Iterator<Item = Slice> + Captures<'a> {
+ let smaller_lengths = match self.array_len {
+ // The only admissible fixed-length slice is one of the array size. Whether `max_slice`
+ // is fixed-length or variable-length, it will be the only relevant slice to output
+ // here.
+ Some(_) => 0..0, // empty range
+ // We cover all arities in the range `(self.arity..infinity)`. We split that range into
+ // two: lengths smaller than `max_slice.arity()` are treated independently as
+ // fixed-lengths slices, and lengths above are captured by `max_slice`.
+ None => self.arity..self.max_slice.arity(),
+ };
+ smaller_lengths
+ .map(FixedLen)
+ .chain(once(self.max_slice))
+ .map(move |kind| Slice::new(self.array_len, kind))
+ }
+}
+
+/// A value can be decomposed into a constructor applied to some fields. This struct represents
+/// the constructor. See also `Fields`.
+///
+/// `pat_constructor` retrieves the constructor corresponding to a pattern.
+/// `specialize_constructor` returns the list of fields corresponding to a pattern, given a
+/// constructor. `Constructor::apply` reconstructs the pattern from a pair of `Constructor` and
+/// `Fields`.
+#[derive(Clone, Debug, PartialEq)]
+pub(super) enum Constructor<'tcx> {
+ /// The constructor for patterns that have a single constructor, like tuples, struct patterns
+ /// and fixed-length arrays.
+ Single,
+ /// Enum variants.
+ Variant(VariantIdx),
+ /// Ranges of integer literal values (`2`, `2..=5` or `2..5`).
+ IntRange(IntRange),
+ /// Ranges of floating-point literal values (`2.0..=5.2`).
+ FloatRange(mir::ConstantKind<'tcx>, mir::ConstantKind<'tcx>, RangeEnd),
+ /// String literals. Strings are not quite the same as `&[u8]` so we treat them separately.
+ Str(mir::ConstantKind<'tcx>),
+ /// Array and slice patterns.
+ Slice(Slice),
+ /// Constants that must not be matched structurally. They are treated as black
+ /// boxes for the purposes of exhaustiveness: we must not inspect them, and they
+ /// don't count towards making a match exhaustive.
+ Opaque,
+ /// Fake extra constructor for enums that aren't allowed to be matched exhaustively. Also used
+ /// for those types for which we cannot list constructors explicitly, like `f64` and `str`.
+ NonExhaustive,
+ /// Stands for constructors that are not seen in the matrix, as explained in the documentation
+ /// for [`SplitWildcard`]. The carried `bool` is used for the `non_exhaustive_omitted_patterns`
+ /// lint.
+ Missing { nonexhaustive_enum_missing_real_variants: bool },
+ /// Wildcard pattern.
+ Wildcard,
+ /// Or-pattern.
+ Or,
+}
+
+impl<'tcx> Constructor<'tcx> {
+ pub(super) fn is_wildcard(&self) -> bool {
+ matches!(self, Wildcard)
+ }
+
+ pub(super) fn is_non_exhaustive(&self) -> bool {
+ matches!(self, NonExhaustive)
+ }
+
+ fn as_int_range(&self) -> Option<&IntRange> {
+ match self {
+ IntRange(range) => Some(range),
+ _ => None,
+ }
+ }
+
+ fn as_slice(&self) -> Option<Slice> {
+ match self {
+ Slice(slice) => Some(*slice),
+ _ => None,
+ }
+ }
+
+ /// Checks if the `Constructor` is a variant and `TyCtxt::eval_stability` returns
+ /// `EvalResult::Deny { .. }`.
+ ///
+ /// This means that the variant has a stdlib unstable feature marking it.
+ pub(super) fn is_unstable_variant(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> bool {
+ if let Constructor::Variant(idx) = self && let ty::Adt(adt, _) = pcx.ty.kind() {
+ let variant_def_id = adt.variant(*idx).def_id;
+ // Filter variants that depend on a disabled unstable feature.
+ return matches!(
+ pcx.cx.tcx.eval_stability(variant_def_id, None, DUMMY_SP, None),
+ EvalResult::Deny { .. }
+ );
+ }
+ false
+ }
+
+ /// Checks if the `Constructor` is a `Constructor::Variant` with a `#[doc(hidden)]`
+ /// attribute from a type not local to the current crate.
+ pub(super) fn is_doc_hidden_variant(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> bool {
+ if let Constructor::Variant(idx) = self && let ty::Adt(adt, _) = pcx.ty.kind() {
+ let variant_def_id = adt.variants()[*idx].def_id;
+ return pcx.cx.tcx.is_doc_hidden(variant_def_id) && !variant_def_id.is_local();
+ }
+ false
+ }
+
+ fn variant_index_for_adt(&self, adt: ty::AdtDef<'tcx>) -> VariantIdx {
+ match *self {
+ Variant(idx) => idx,
+ Single => {
+ assert!(!adt.is_enum());
+ VariantIdx::new(0)
+ }
+ _ => bug!("bad constructor {:?} for adt {:?}", self, adt),
+ }
+ }
+
+ /// The number of fields for this constructor. This must be kept in sync with
+ /// `Fields::wildcards`.
+ pub(super) fn arity(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> usize {
+ match self {
+ Single | Variant(_) => match pcx.ty.kind() {
+ ty::Tuple(fs) => fs.len(),
+ ty::Ref(..) => 1,
+ ty::Adt(adt, ..) => {
+ if adt.is_box() {
+ // The only legal patterns of type `Box` (outside `std`) are `_` and box
+ // patterns. If we're here we can assume this is a box pattern.
+ 1
+ } else {
+ let variant = &adt.variant(self.variant_index_for_adt(*adt));
+ Fields::list_variant_nonhidden_fields(pcx.cx, pcx.ty, variant).count()
+ }
+ }
+ _ => bug!("Unexpected type for `Single` constructor: {:?}", pcx.ty),
+ },
+ Slice(slice) => slice.arity(),
+ Str(..)
+ | FloatRange(..)
+ | IntRange(..)
+ | NonExhaustive
+ | Opaque
+ | Missing { .. }
+ | Wildcard => 0,
+ Or => bug!("The `Or` constructor doesn't have a fixed arity"),
+ }
+ }
+
+ /// Some constructors (namely `Wildcard`, `IntRange` and `Slice`) actually stand for a set of actual
+ /// constructors (like variants, integers or fixed-sized slices). When specializing for these
+ /// constructors, we want to be specialising for the actual underlying constructors.
+ /// Naively, we would simply return the list of constructors they correspond to. We instead are
+ /// more clever: if there are constructors that we know will behave the same wrt the current
+ /// matrix, we keep them grouped. For example, all slices of a sufficiently large length
+ /// will either be all useful or all non-useful with a given matrix.
+ ///
+ /// See the branches for details on how the splitting is done.
+ ///
+ /// This function may discard some irrelevant constructors if this preserves behavior and
+ /// diagnostics. Eg. for the `_` case, we ignore the constructors already present in the
+ /// matrix, unless all of them are.
+ pub(super) fn split<'a>(
+ &self,
+ pcx: &PatCtxt<'_, '_, 'tcx>,
+ ctors: impl Iterator<Item = &'a Constructor<'tcx>> + Clone,
+ ) -> SmallVec<[Self; 1]>
+ where
+ 'tcx: 'a,
+ {
+ match self {
+ Wildcard => {
+ let mut split_wildcard = SplitWildcard::new(pcx);
+ split_wildcard.split(pcx, ctors);
+ split_wildcard.into_ctors(pcx)
+ }
+ // Fast-track if the range is trivial. In particular, we don't do the overlapping
+ // ranges check.
+ IntRange(ctor_range) if !ctor_range.is_singleton() => {
+ let mut split_range = SplitIntRange::new(ctor_range.clone());
+ let int_ranges = ctors.filter_map(|ctor| ctor.as_int_range());
+ split_range.split(int_ranges.cloned());
+ split_range.iter().map(IntRange).collect()
+ }
+ &Slice(Slice { kind: VarLen(self_prefix, self_suffix), array_len }) => {
+ let mut split_self = SplitVarLenSlice::new(self_prefix, self_suffix, array_len);
+ let slices = ctors.filter_map(|c| c.as_slice()).map(|s| s.kind);
+ split_self.split(slices);
+ split_self.iter().map(Slice).collect()
+ }
+ // Any other constructor can be used unchanged.
+ _ => smallvec![self.clone()],
+ }
+ }
+
+ /// Returns whether `self` is covered by `other`, i.e. whether `self` is a subset of `other`.
+ /// For the simple cases, this is simply checking for equality. For the "grouped" constructors,
+ /// this checks for inclusion.
+ // We inline because this has a single call site in `Matrix::specialize_constructor`.
+ #[inline]
+ pub(super) fn is_covered_by<'p>(&self, pcx: &PatCtxt<'_, 'p, 'tcx>, other: &Self) -> bool {
+ // This must be kept in sync with `is_covered_by_any`.
+ match (self, other) {
+ // Wildcards cover anything
+ (_, Wildcard) => true,
+ // The missing ctors are not covered by anything in the matrix except wildcards.
+ (Missing { .. } | Wildcard, _) => false,
+
+ (Single, Single) => true,
+ (Variant(self_id), Variant(other_id)) => self_id == other_id,
+
+ (IntRange(self_range), IntRange(other_range)) => self_range.is_covered_by(other_range),
+ (
+ FloatRange(self_from, self_to, self_end),
+ FloatRange(other_from, other_to, other_end),
+ ) => {
+ match (
+ compare_const_vals(pcx.cx.tcx, *self_to, *other_to, pcx.cx.param_env),
+ compare_const_vals(pcx.cx.tcx, *self_from, *other_from, pcx.cx.param_env),
+ ) {
+ (Some(to), Some(from)) => {
+ (from == Ordering::Greater || from == Ordering::Equal)
+ && (to == Ordering::Less
+ || (other_end == self_end && to == Ordering::Equal))
+ }
+ _ => false,
+ }
+ }
+ (Str(self_val), Str(other_val)) => {
+ // FIXME Once valtrees are available we can directly use the bytes
+ // in the `Str` variant of the valtree for the comparison here.
+ self_val == other_val
+ }
+ (Slice(self_slice), Slice(other_slice)) => self_slice.is_covered_by(*other_slice),
+
+ // We are trying to inspect an opaque constant. Thus we skip the row.
+ (Opaque, _) | (_, Opaque) => false,
+ // Only a wildcard pattern can match the special extra constructor.
+ (NonExhaustive, _) => false,
+
+ _ => span_bug!(
+ pcx.span,
+ "trying to compare incompatible constructors {:?} and {:?}",
+ self,
+ other
+ ),
+ }
+ }
+
+ /// Faster version of `is_covered_by` when applied to many constructors. `used_ctors` is
+ /// assumed to be built from `matrix.head_ctors()` with wildcards filtered out, and `self` is
+ /// assumed to have been split from a wildcard.
+ fn is_covered_by_any<'p>(
+ &self,
+ pcx: &PatCtxt<'_, 'p, 'tcx>,
+ used_ctors: &[Constructor<'tcx>],
+ ) -> bool {
+ if used_ctors.is_empty() {
+ return false;
+ }
+
+ // This must be kept in sync with `is_covered_by`.
+ match self {
+ // If `self` is `Single`, `used_ctors` cannot contain anything else than `Single`s.
+ Single => !used_ctors.is_empty(),
+ Variant(vid) => used_ctors.iter().any(|c| matches!(c, Variant(i) if i == vid)),
+ IntRange(range) => used_ctors
+ .iter()
+ .filter_map(|c| c.as_int_range())
+ .any(|other| range.is_covered_by(other)),
+ Slice(slice) => used_ctors
+ .iter()
+ .filter_map(|c| c.as_slice())
+ .any(|other| slice.is_covered_by(other)),
+ // This constructor is never covered by anything else
+ NonExhaustive => false,
+ Str(..) | FloatRange(..) | Opaque | Missing { .. } | Wildcard | Or => {
+ span_bug!(pcx.span, "found unexpected ctor in all_ctors: {:?}", self)
+ }
+ }
+ }
+}
+
+/// A wildcard constructor that we split relative to the constructors in the matrix, as explained
+/// at the top of the file.
+///
+/// A constructor that is not present in the matrix rows will only be covered by the rows that have
+/// wildcards. Thus we can group all of those constructors together; we call them "missing
+/// constructors". Splitting a wildcard would therefore list all present constructors individually
+/// (or grouped if they are integers or slices), and then all missing constructors together as a
+/// group.
+///
+/// However we can go further: since any constructor will match the wildcard rows, and having more
+/// rows can only reduce the amount of usefulness witnesses, we can skip the present constructors
+/// and only try the missing ones.
+/// This will not preserve the whole list of witnesses, but will preserve whether the list is empty
+/// or not. In fact this is quite natural from the point of view of diagnostics too. This is done
+/// in `to_ctors`: in some cases we only return `Missing`.
+#[derive(Debug)]
+pub(super) struct SplitWildcard<'tcx> {
+ /// Constructors seen in the matrix.
+ matrix_ctors: Vec<Constructor<'tcx>>,
+ /// All the constructors for this type
+ all_ctors: SmallVec<[Constructor<'tcx>; 1]>,
+}
+
+impl<'tcx> SplitWildcard<'tcx> {
+ pub(super) fn new<'p>(pcx: &PatCtxt<'_, 'p, 'tcx>) -> Self {
+ debug!("SplitWildcard::new({:?})", pcx.ty);
+ let cx = pcx.cx;
+ let make_range = |start, end| {
+ IntRange(
+ // `unwrap()` is ok because we know the type is an integer.
+ IntRange::from_range(cx.tcx, start, end, pcx.ty, &RangeEnd::Included).unwrap(),
+ )
+ };
+ // This determines the set of all possible constructors for the type `pcx.ty`. For numbers,
+ // arrays and slices we use ranges and variable-length slices when appropriate.
+ //
+ // If the `exhaustive_patterns` feature is enabled, we make sure to omit constructors that
+ // are statically impossible. E.g., for `Option<!>`, we do not include `Some(_)` in the
+ // returned list of constructors.
+ // Invariant: this is empty if and only if the type is uninhabited (as determined by
+ // `cx.is_uninhabited()`).
+ let all_ctors = match pcx.ty.kind() {
+ ty::Bool => smallvec![make_range(0, 1)],
+ ty::Array(sub_ty, len) if len.try_eval_usize(cx.tcx, cx.param_env).is_some() => {
+ let len = len.eval_usize(cx.tcx, cx.param_env) as usize;
+ if len != 0 && cx.is_uninhabited(*sub_ty) {
+ smallvec![]
+ } else {
+ smallvec![Slice(Slice::new(Some(len), VarLen(0, 0)))]
+ }
+ }
+ // Treat arrays of a constant but unknown length like slices.
+ ty::Array(sub_ty, _) | ty::Slice(sub_ty) => {
+ let kind = if cx.is_uninhabited(*sub_ty) { FixedLen(0) } else { VarLen(0, 0) };
+ smallvec![Slice(Slice::new(None, kind))]
+ }
+ ty::Adt(def, substs) if def.is_enum() => {
+ // If the enum is declared as `#[non_exhaustive]`, we treat it as if it had an
+ // additional "unknown" constructor.
+ // There is no point in enumerating all possible variants, because the user can't
+ // actually match against them all themselves. So we always return only the fictitious
+ // constructor.
+ // E.g., in an example like:
+ //
+ // ```
+ // let err: io::ErrorKind = ...;
+ // match err {
+ // io::ErrorKind::NotFound => {},
+ // }
+ // ```
+ //
+ // we don't want to show every possible IO error, but instead have only `_` as the
+ // witness.
+ let is_declared_nonexhaustive = cx.is_foreign_non_exhaustive_enum(pcx.ty);
+
+ let is_exhaustive_pat_feature = cx.tcx.features().exhaustive_patterns;
+
+ // If `exhaustive_patterns` is disabled and our scrutinee is an empty enum, we treat it
+ // as though it had an "unknown" constructor to avoid exposing its emptiness. The
+ // exception is if the pattern is at the top level, because we want empty matches to be
+ // considered exhaustive.
+ let is_secretly_empty =
+ def.variants().is_empty() && !is_exhaustive_pat_feature && !pcx.is_top_level;
+
+ let mut ctors: SmallVec<[_; 1]> = def
+ .variants()
+ .iter_enumerated()
+ .filter(|(_, v)| {
+ // If `exhaustive_patterns` is enabled, we exclude variants known to be
+ // uninhabited.
+ let is_uninhabited = is_exhaustive_pat_feature
+ && v.uninhabited_from(cx.tcx, substs, def.adt_kind(), cx.param_env)
+ .contains(cx.tcx, cx.module);
+ !is_uninhabited
+ })
+ .map(|(idx, _)| Variant(idx))
+ .collect();
+
+ if is_secretly_empty || is_declared_nonexhaustive {
+ ctors.push(NonExhaustive);
+ }
+ ctors
+ }
+ ty::Char => {
+ smallvec![
+ // The valid Unicode Scalar Value ranges.
+ make_range('\u{0000}' as u128, '\u{D7FF}' as u128),
+ make_range('\u{E000}' as u128, '\u{10FFFF}' as u128),
+ ]
+ }
+ ty::Int(_) | ty::Uint(_)
+ if pcx.ty.is_ptr_sized_integral()
+ && !cx.tcx.features().precise_pointer_size_matching =>
+ {
+ // `usize`/`isize` are not allowed to be matched exhaustively unless the
+ // `precise_pointer_size_matching` feature is enabled. So we treat those types like
+ // `#[non_exhaustive]` enums by returning a special unmatchable constructor.
+ smallvec![NonExhaustive]
+ }
+ &ty::Int(ity) => {
+ let bits = Integer::from_int_ty(&cx.tcx, ity).size().bits() as u128;
+ let min = 1u128 << (bits - 1);
+ let max = min - 1;
+ smallvec![make_range(min, max)]
+ }
+ &ty::Uint(uty) => {
+ let size = Integer::from_uint_ty(&cx.tcx, uty).size();
+ let max = size.truncate(u128::MAX);
+ smallvec![make_range(0, max)]
+ }
+ // If `exhaustive_patterns` is disabled and our scrutinee is the never type, we cannot
+ // expose its emptiness. The exception is if the pattern is at the top level, because we
+ // want empty matches to be considered exhaustive.
+ ty::Never if !cx.tcx.features().exhaustive_patterns && !pcx.is_top_level => {
+ smallvec![NonExhaustive]
+ }
+ ty::Never => smallvec![],
+ _ if cx.is_uninhabited(pcx.ty) => smallvec![],
+ ty::Adt(..) | ty::Tuple(..) | ty::Ref(..) => smallvec![Single],
+ // This type is one for which we cannot list constructors, like `str` or `f64`.
+ _ => smallvec![NonExhaustive],
+ };
+
+ SplitWildcard { matrix_ctors: Vec::new(), all_ctors }
+ }
+
+ /// Pass a set of constructors relative to which to split this one. Don't call twice, it won't
+ /// do what you want.
+ pub(super) fn split<'a>(
+ &mut self,
+ pcx: &PatCtxt<'_, '_, 'tcx>,
+ ctors: impl Iterator<Item = &'a Constructor<'tcx>> + Clone,
+ ) where
+ 'tcx: 'a,
+ {
+ // Since `all_ctors` never contains wildcards, this won't recurse further.
+ self.all_ctors =
+ self.all_ctors.iter().flat_map(|ctor| ctor.split(pcx, ctors.clone())).collect();
+ self.matrix_ctors = ctors.filter(|c| !c.is_wildcard()).cloned().collect();
+ }
+
+ /// Whether there are any value constructors for this type that are not present in the matrix.
+ fn any_missing(&self, pcx: &PatCtxt<'_, '_, 'tcx>) -> bool {
+ self.iter_missing(pcx).next().is_some()
+ }
+
+ /// Iterate over the constructors for this type that are not present in the matrix.
+ pub(super) fn iter_missing<'a, 'p>(
+ &'a self,
+ pcx: &'a PatCtxt<'a, 'p, 'tcx>,
+ ) -> impl Iterator<Item = &'a Constructor<'tcx>> + Captures<'p> {
+ self.all_ctors.iter().filter(move |ctor| !ctor.is_covered_by_any(pcx, &self.matrix_ctors))
+ }
+
+ /// Return the set of constructors resulting from splitting the wildcard. As explained at the
+ /// top of the file, if any constructors are missing we can ignore the present ones.
+ fn into_ctors(self, pcx: &PatCtxt<'_, '_, 'tcx>) -> SmallVec<[Constructor<'tcx>; 1]> {
+ if self.any_missing(pcx) {
+ // Some constructors are missing, thus we can specialize with the special `Missing`
+ // constructor, which stands for those constructors that are not seen in the matrix,
+ // and matches the same rows as any of them (namely the wildcard rows). See the top of
+ // the file for details.
+ // However, when all constructors are missing we can also specialize with the full
+ // `Wildcard` constructor. The difference will depend on what we want in diagnostics.
+
+ // If some constructors are missing, we typically want to report those constructors,
+ // e.g.:
+ // ```
+ // enum Direction { N, S, E, W }
+ // let Direction::N = ...;
+ // ```
+ // we can report 3 witnesses: `S`, `E`, and `W`.
+ //
+ // However, if the user didn't actually specify a constructor
+ // in this arm, e.g., in
+ // ```
+ // let x: (Direction, Direction, bool) = ...;
+ // let (_, _, false) = x;
+ // ```
+ // we don't want to show all 16 possible witnesses `(<direction-1>, <direction-2>,
+ // true)` - we are satisfied with `(_, _, true)`. So if all constructors are missing we
+ // prefer to report just a wildcard `_`.
+ //
+ // The exception is: if we are at the top-level, for example in an empty match, we
+ // sometimes prefer reporting the list of constructors instead of just `_`.
+ let report_when_all_missing = pcx.is_top_level && !IntRange::is_integral(pcx.ty);
+ let ctor = if !self.matrix_ctors.is_empty() || report_when_all_missing {
+ if pcx.is_non_exhaustive {
+ Missing {
+ nonexhaustive_enum_missing_real_variants: self
+ .iter_missing(pcx)
+ .any(|c| !(c.is_non_exhaustive() || c.is_unstable_variant(pcx))),
+ }
+ } else {
+ Missing { nonexhaustive_enum_missing_real_variants: false }
+ }
+ } else {
+ Wildcard
+ };
+ return smallvec![ctor];
+ }
+
+ // All the constructors are present in the matrix, so we just go through them all.
+ self.all_ctors
+ }
+}
+
+/// A value can be decomposed into a constructor applied to some fields. This struct represents
+/// those fields, generalized to allow patterns in each field. See also `Constructor`.
+///
+/// This is constructed for a constructor using [`Fields::wildcards()`]. The idea is that
+/// [`Fields::wildcards()`] constructs a list of fields where all entries are wildcards, and then
+/// given a pattern we fill some of the fields with its subpatterns.
+/// In the following example `Fields::wildcards` returns `[_, _, _, _]`. Then in
+/// `extract_pattern_arguments` we fill some of the entries, and the result is
+/// `[Some(0), _, _, _]`.
+/// ```compile_fail,E0004
+/// # fn foo() -> [Option<u8>; 4] { [None; 4] }
+/// let x: [Option<u8>; 4] = foo();
+/// match x {
+/// [Some(0), ..] => {}
+/// }
+/// ```
+///
+/// Note that the number of fields of a constructor may not match the fields declared in the
+/// original struct/variant. This happens if a private or `non_exhaustive` field is uninhabited,
+/// because the code mustn't observe that it is uninhabited. In that case that field is not
+/// included in `fields`. For that reason, when you have a `mir::Field` you must use
+/// `index_with_declared_idx`.
+#[derive(Debug, Clone, Copy)]
+pub(super) struct Fields<'p, 'tcx> {
+ fields: &'p [DeconstructedPat<'p, 'tcx>],
+}
+
+impl<'p, 'tcx> Fields<'p, 'tcx> {
+ fn empty() -> Self {
+ Fields { fields: &[] }
+ }
+
+ fn singleton(cx: &MatchCheckCtxt<'p, 'tcx>, field: DeconstructedPat<'p, 'tcx>) -> Self {
+ let field: &_ = cx.pattern_arena.alloc(field);
+ Fields { fields: std::slice::from_ref(field) }
+ }
+
+ pub(super) fn from_iter(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ fields: impl IntoIterator<Item = DeconstructedPat<'p, 'tcx>>,
+ ) -> Self {
+ let fields: &[_] = cx.pattern_arena.alloc_from_iter(fields);
+ Fields { fields }
+ }
+
+ fn wildcards_from_tys(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ tys: impl IntoIterator<Item = Ty<'tcx>>,
+ ) -> Self {
+ Fields::from_iter(cx, tys.into_iter().map(DeconstructedPat::wildcard))
+ }
+
+ // In the cases of either a `#[non_exhaustive]` field list or a non-public field, we hide
+ // uninhabited fields in order not to reveal the uninhabitedness of the whole variant.
+ // This lists the fields we keep along with their types.
+ fn list_variant_nonhidden_fields<'a>(
+ cx: &'a MatchCheckCtxt<'p, 'tcx>,
+ ty: Ty<'tcx>,
+ variant: &'a VariantDef,
+ ) -> impl Iterator<Item = (Field, Ty<'tcx>)> + Captures<'a> + Captures<'p> {
+ let ty::Adt(adt, substs) = ty.kind() else { bug!() };
+ // Whether we must not match the fields of this variant exhaustively.
+ let is_non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local();
+
+ variant.fields.iter().enumerate().filter_map(move |(i, field)| {
+ let ty = field.ty(cx.tcx, substs);
+ // `field.ty()` doesn't normalize after substituting.
+ let ty = cx.tcx.normalize_erasing_regions(cx.param_env, ty);
+ let is_visible = adt.is_enum() || field.vis.is_accessible_from(cx.module, cx.tcx);
+ let is_uninhabited = cx.is_uninhabited(ty);
+
+ if is_uninhabited && (!is_visible || is_non_exhaustive) {
+ None
+ } else {
+ Some((Field::new(i), ty))
+ }
+ })
+ }
+
+ /// Creates a new list of wildcard fields for a given constructor. The result must have a
+ /// length of `constructor.arity()`.
+ #[instrument(level = "trace")]
+ pub(super) fn wildcards(pcx: &PatCtxt<'_, 'p, 'tcx>, constructor: &Constructor<'tcx>) -> Self {
+ let ret = match constructor {
+ Single | Variant(_) => match pcx.ty.kind() {
+ ty::Tuple(fs) => Fields::wildcards_from_tys(pcx.cx, fs.iter()),
+ ty::Ref(_, rty, _) => Fields::wildcards_from_tys(pcx.cx, once(*rty)),
+ ty::Adt(adt, substs) => {
+ if adt.is_box() {
+ // The only legal patterns of type `Box` (outside `std`) are `_` and box
+ // patterns. If we're here we can assume this is a box pattern.
+ Fields::wildcards_from_tys(pcx.cx, once(substs.type_at(0)))
+ } else {
+ let variant = &adt.variant(constructor.variant_index_for_adt(*adt));
+ let tys = Fields::list_variant_nonhidden_fields(pcx.cx, pcx.ty, variant)
+ .map(|(_, ty)| ty);
+ Fields::wildcards_from_tys(pcx.cx, tys)
+ }
+ }
+ _ => bug!("Unexpected type for `Single` constructor: {:?}", pcx),
+ },
+ Slice(slice) => match *pcx.ty.kind() {
+ ty::Slice(ty) | ty::Array(ty, _) => {
+ let arity = slice.arity();
+ Fields::wildcards_from_tys(pcx.cx, (0..arity).map(|_| ty))
+ }
+ _ => bug!("bad slice pattern {:?} {:?}", constructor, pcx),
+ },
+ Str(..)
+ | FloatRange(..)
+ | IntRange(..)
+ | NonExhaustive
+ | Opaque
+ | Missing { .. }
+ | Wildcard => Fields::empty(),
+ Or => {
+ bug!("called `Fields::wildcards` on an `Or` ctor")
+ }
+ };
+ debug!(?ret);
+ ret
+ }
+
+ /// Returns the list of patterns.
+ pub(super) fn iter_patterns<'a>(
+ &'a self,
+ ) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Captures<'a> {
+ self.fields.iter()
+ }
+}
+
+/// Values and patterns can be represented as a constructor applied to some fields. This represents
+/// a pattern in this form.
+/// This also keeps track of whether the pattern has been found reachable during analysis. For this
+/// reason we should be careful not to clone patterns for which we care about that. Use
+/// `clone_and_forget_reachability` if you're sure.
+pub(crate) struct DeconstructedPat<'p, 'tcx> {
+ ctor: Constructor<'tcx>,
+ fields: Fields<'p, 'tcx>,
+ ty: Ty<'tcx>,
+ span: Span,
+ reachable: Cell<bool>,
+}
+
+impl<'p, 'tcx> DeconstructedPat<'p, 'tcx> {
+ pub(super) fn wildcard(ty: Ty<'tcx>) -> Self {
+ Self::new(Wildcard, Fields::empty(), ty, DUMMY_SP)
+ }
+
+ pub(super) fn new(
+ ctor: Constructor<'tcx>,
+ fields: Fields<'p, 'tcx>,
+ ty: Ty<'tcx>,
+ span: Span,
+ ) -> Self {
+ DeconstructedPat { ctor, fields, ty, span, reachable: Cell::new(false) }
+ }
+
+ /// Construct a pattern that matches everything that starts with this constructor.
+ /// For example, if `ctor` is a `Constructor::Variant` for `Option::Some`, we get the pattern
+ /// `Some(_)`.
+ pub(super) fn wild_from_ctor(pcx: &PatCtxt<'_, 'p, 'tcx>, ctor: Constructor<'tcx>) -> Self {
+ let fields = Fields::wildcards(pcx, &ctor);
+ DeconstructedPat::new(ctor, fields, pcx.ty, DUMMY_SP)
+ }
+
+ /// Clone this value. This method emphasizes that cloning loses reachability information and
+ /// should be done carefully.
+ pub(super) fn clone_and_forget_reachability(&self) -> Self {
+ DeconstructedPat::new(self.ctor.clone(), self.fields, self.ty, self.span)
+ }
+
+ pub(crate) fn from_pat(cx: &MatchCheckCtxt<'p, 'tcx>, pat: &Pat<'tcx>) -> Self {
+ let mkpat = |pat| DeconstructedPat::from_pat(cx, pat);
+ let ctor;
+ let fields;
+ match pat.kind.as_ref() {
+ PatKind::AscribeUserType { subpattern, .. } => return mkpat(subpattern),
+ PatKind::Binding { subpattern: Some(subpat), .. } => return mkpat(subpat),
+ PatKind::Binding { subpattern: None, .. } | PatKind::Wild => {
+ ctor = Wildcard;
+ fields = Fields::empty();
+ }
+ PatKind::Deref { subpattern } => {
+ ctor = Single;
+ fields = Fields::singleton(cx, mkpat(subpattern));
+ }
+ PatKind::Leaf { subpatterns } | PatKind::Variant { subpatterns, .. } => {
+ match pat.ty.kind() {
+ ty::Tuple(fs) => {
+ ctor = Single;
+ let mut wilds: SmallVec<[_; 2]> =
+ fs.iter().map(DeconstructedPat::wildcard).collect();
+ for pat in subpatterns {
+ wilds[pat.field.index()] = mkpat(&pat.pattern);
+ }
+ fields = Fields::from_iter(cx, wilds);
+ }
+ ty::Adt(adt, substs) if adt.is_box() => {
+ // The only legal patterns of type `Box` (outside `std`) are `_` and box
+ // patterns. If we're here we can assume this is a box pattern.
+ // FIXME(Nadrieril): A `Box` can in theory be matched either with `Box(_,
+ // _)` or a box pattern. As a hack to avoid an ICE with the former, we
+ // ignore other fields than the first one. This will trigger an error later
+ // anyway.
+ // See https://github.com/rust-lang/rust/issues/82772 ,
+ // explanation: https://github.com/rust-lang/rust/pull/82789#issuecomment-796921977
+ // The problem is that we can't know from the type whether we'll match
+ // normally or through box-patterns. We'll have to figure out a proper
+ // solution when we introduce generalized deref patterns. Also need to
+ // prevent mixing of those two options.
+ let pat = subpatterns.into_iter().find(|pat| pat.field.index() == 0);
+ let pat = if let Some(pat) = pat {
+ mkpat(&pat.pattern)
+ } else {
+ DeconstructedPat::wildcard(substs.type_at(0))
+ };
+ ctor = Single;
+ fields = Fields::singleton(cx, pat);
+ }
+ ty::Adt(adt, _) => {
+ ctor = match pat.kind.as_ref() {
+ PatKind::Leaf { .. } => Single,
+ PatKind::Variant { variant_index, .. } => Variant(*variant_index),
+ _ => bug!(),
+ };
+ let variant = &adt.variant(ctor.variant_index_for_adt(*adt));
+ // For each field in the variant, we store the relevant index into `self.fields` if any.
+ let mut field_id_to_id: Vec<Option<usize>> =
+ (0..variant.fields.len()).map(|_| None).collect();
+ let tys = Fields::list_variant_nonhidden_fields(cx, pat.ty, variant)
+ .enumerate()
+ .map(|(i, (field, ty))| {
+ field_id_to_id[field.index()] = Some(i);
+ ty
+ });
+ let mut wilds: SmallVec<[_; 2]> =
+ tys.map(DeconstructedPat::wildcard).collect();
+ for pat in subpatterns {
+ if let Some(i) = field_id_to_id[pat.field.index()] {
+ wilds[i] = mkpat(&pat.pattern);
+ }
+ }
+ fields = Fields::from_iter(cx, wilds);
+ }
+ _ => bug!("pattern has unexpected type: pat: {:?}, ty: {:?}", pat, pat.ty),
+ }
+ }
+ PatKind::Constant { value } => {
+ if let Some(int_range) = IntRange::from_constant(cx.tcx, cx.param_env, *value) {
+ ctor = IntRange(int_range);
+ fields = Fields::empty();
+ } else {
+ match pat.ty.kind() {
+ ty::Float(_) => {
+ ctor = FloatRange(*value, *value, RangeEnd::Included);
+ fields = Fields::empty();
+ }
+ ty::Ref(_, t, _) if t.is_str() => {
+ // We want a `&str` constant to behave like a `Deref` pattern, to be compatible
+ // with other `Deref` patterns. This could have been done in `const_to_pat`,
+ // but that causes issues with the rest of the matching code.
+ // So here, the constructor for a `"foo"` pattern is `&` (represented by
+ // `Single`), and has one field. That field has constructor `Str(value)` and no
+ // fields.
+ // Note: `t` is `str`, not `&str`.
+ let subpattern =
+ DeconstructedPat::new(Str(*value), Fields::empty(), *t, pat.span);
+ ctor = Single;
+ fields = Fields::singleton(cx, subpattern)
+ }
+ // All constants that can be structurally matched have already been expanded
+ // into the corresponding `Pat`s by `const_to_pat`. Constants that remain are
+ // opaque.
+ _ => {
+ ctor = Opaque;
+ fields = Fields::empty();
+ }
+ }
+ }
+ }
+ &PatKind::Range(PatRange { lo, hi, end }) => {
+ let ty = lo.ty();
+ ctor = if let Some(int_range) = IntRange::from_range(
+ cx.tcx,
+ lo.eval_bits(cx.tcx, cx.param_env, lo.ty()),
+ hi.eval_bits(cx.tcx, cx.param_env, hi.ty()),
+ ty,
+ &end,
+ ) {
+ IntRange(int_range)
+ } else {
+ FloatRange(lo, hi, end)
+ };
+ fields = Fields::empty();
+ }
+ PatKind::Array { prefix, slice, suffix } | PatKind::Slice { prefix, slice, suffix } => {
+ let array_len = match pat.ty.kind() {
+ ty::Array(_, length) => Some(length.eval_usize(cx.tcx, cx.param_env) as usize),
+ ty::Slice(_) => None,
+ _ => span_bug!(pat.span, "bad ty {:?} for slice pattern", pat.ty),
+ };
+ let kind = if slice.is_some() {
+ VarLen(prefix.len(), suffix.len())
+ } else {
+ FixedLen(prefix.len() + suffix.len())
+ };
+ ctor = Slice(Slice::new(array_len, kind));
+ fields = Fields::from_iter(cx, prefix.iter().chain(suffix).map(mkpat));
+ }
+ PatKind::Or { .. } => {
+ ctor = Or;
+ let pats = expand_or_pat(pat);
+ fields = Fields::from_iter(cx, pats.into_iter().map(mkpat));
+ }
+ }
+ DeconstructedPat::new(ctor, fields, pat.ty, pat.span)
+ }
+
+ pub(crate) fn to_pat(&self, cx: &MatchCheckCtxt<'p, 'tcx>) -> Pat<'tcx> {
+ let is_wildcard = |pat: &Pat<'_>| {
+ matches!(*pat.kind, PatKind::Binding { subpattern: None, .. } | PatKind::Wild)
+ };
+ let mut subpatterns = self.iter_fields().map(|p| p.to_pat(cx));
+ let pat = match &self.ctor {
+ Single | Variant(_) => match self.ty.kind() {
+ ty::Tuple(..) => PatKind::Leaf {
+ subpatterns: subpatterns
+ .enumerate()
+ .map(|(i, p)| FieldPat { field: Field::new(i), pattern: p })
+ .collect(),
+ },
+ ty::Adt(adt_def, _) if adt_def.is_box() => {
+ // Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside
+ // of `std`). So this branch is only reachable when the feature is enabled and
+ // the pattern is a box pattern.
+ PatKind::Deref { subpattern: subpatterns.next().unwrap() }
+ }
+ ty::Adt(adt_def, substs) => {
+ let variant_index = self.ctor.variant_index_for_adt(*adt_def);
+ let variant = &adt_def.variant(variant_index);
+ let subpatterns = Fields::list_variant_nonhidden_fields(cx, self.ty, variant)
+ .zip(subpatterns)
+ .map(|((field, _ty), pattern)| FieldPat { field, pattern })
+ .collect();
+
+ if adt_def.is_enum() {
+ PatKind::Variant { adt_def: *adt_def, substs, variant_index, subpatterns }
+ } else {
+ PatKind::Leaf { subpatterns }
+ }
+ }
+ // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
+ // be careful to reconstruct the correct constant pattern here. However a string
+ // literal pattern will never be reported as a non-exhaustiveness witness, so we
+ // ignore this issue.
+ ty::Ref(..) => PatKind::Deref { subpattern: subpatterns.next().unwrap() },
+ _ => bug!("unexpected ctor for type {:?} {:?}", self.ctor, self.ty),
+ },
+ Slice(slice) => {
+ match slice.kind {
+ FixedLen(_) => PatKind::Slice {
+ prefix: subpatterns.collect(),
+ slice: None,
+ suffix: vec![],
+ },
+ VarLen(prefix, _) => {
+ let mut subpatterns = subpatterns.peekable();
+ let mut prefix: Vec<_> = subpatterns.by_ref().take(prefix).collect();
+ if slice.array_len.is_some() {
+ // Improves diagnostics a bit: if the type is a known-size array, instead
+ // of reporting `[x, _, .., _, y]`, we prefer to report `[x, .., y]`.
+ // This is incorrect if the size is not known, since `[_, ..]` captures
+ // arrays of lengths `>= 1` whereas `[..]` captures any length.
+ while !prefix.is_empty() && is_wildcard(prefix.last().unwrap()) {
+ prefix.pop();
+ }
+ while subpatterns.peek().is_some()
+ && is_wildcard(subpatterns.peek().unwrap())
+ {
+ subpatterns.next();
+ }
+ }
+ let suffix: Vec<_> = subpatterns.collect();
+ let wild = Pat::wildcard_from_ty(self.ty);
+ PatKind::Slice { prefix, slice: Some(wild), suffix }
+ }
+ }
+ }
+ &Str(value) => PatKind::Constant { value },
+ &FloatRange(lo, hi, end) => PatKind::Range(PatRange { lo, hi, end }),
+ IntRange(range) => return range.to_pat(cx.tcx, self.ty),
+ Wildcard | NonExhaustive => PatKind::Wild,
+ Missing { .. } => bug!(
+ "trying to convert a `Missing` constructor into a `Pat`; this is probably a bug,
+ `Missing` should have been processed in `apply_constructors`"
+ ),
+ Opaque | Or => {
+ bug!("can't convert to pattern: {:?}", self)
+ }
+ };
+
+ Pat { ty: self.ty, span: DUMMY_SP, kind: Box::new(pat) }
+ }
+
+ pub(super) fn is_or_pat(&self) -> bool {
+ matches!(self.ctor, Or)
+ }
+
+ pub(super) fn ctor(&self) -> &Constructor<'tcx> {
+ &self.ctor
+ }
+ pub(super) fn ty(&self) -> Ty<'tcx> {
+ self.ty
+ }
+ pub(super) fn span(&self) -> Span {
+ self.span
+ }
+
+ pub(super) fn iter_fields<'a>(
+ &'a self,
+ ) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Captures<'a> {
+ self.fields.iter_patterns()
+ }
+
+ /// Specialize this pattern with a constructor.
+ /// `other_ctor` can be different from `self.ctor`, but must be covered by it.
+ pub(super) fn specialize<'a>(
+ &'a self,
+ pcx: &PatCtxt<'_, 'p, 'tcx>,
+ other_ctor: &Constructor<'tcx>,
+ ) -> SmallVec<[&'p DeconstructedPat<'p, 'tcx>; 2]> {
+ match (&self.ctor, other_ctor) {
+ (Wildcard, _) => {
+ // We return a wildcard for each field of `other_ctor`.
+ Fields::wildcards(pcx, other_ctor).iter_patterns().collect()
+ }
+ (Slice(self_slice), Slice(other_slice))
+ if self_slice.arity() != other_slice.arity() =>
+ {
+ // The only tricky case: two slices of different arity. Since `self_slice` covers
+ // `other_slice`, `self_slice` must be `VarLen`, i.e. of the form
+ // `[prefix, .., suffix]`. Moreover `other_slice` is guaranteed to have a larger
+ // arity. So we fill the middle part with enough wildcards to reach the length of
+ // the new, larger slice.
+ match self_slice.kind {
+ FixedLen(_) => bug!("{:?} doesn't cover {:?}", self_slice, other_slice),
+ VarLen(prefix, suffix) => {
+ let (ty::Slice(inner_ty) | ty::Array(inner_ty, _)) = *self.ty.kind() else {
+ bug!("bad slice pattern {:?} {:?}", self.ctor, self.ty);
+ };
+ let prefix = &self.fields.fields[..prefix];
+ let suffix = &self.fields.fields[self_slice.arity() - suffix..];
+ let wildcard: &_ =
+ pcx.cx.pattern_arena.alloc(DeconstructedPat::wildcard(inner_ty));
+ let extra_wildcards = other_slice.arity() - self_slice.arity();
+ let extra_wildcards = (0..extra_wildcards).map(|_| wildcard);
+ prefix.iter().chain(extra_wildcards).chain(suffix).collect()
+ }
+ }
+ }
+ _ => self.fields.iter_patterns().collect(),
+ }
+ }
+
+ /// We keep track for each pattern if it was ever reachable during the analysis. This is used
+ /// with `unreachable_spans` to report unreachable subpatterns arising from or patterns.
+ pub(super) fn set_reachable(&self) {
+ self.reachable.set(true)
+ }
+ pub(super) fn is_reachable(&self) -> bool {
+ self.reachable.get()
+ }
+
+ /// Report the spans of subpatterns that were not reachable, if any.
+ pub(super) fn unreachable_spans(&self) -> Vec<Span> {
+ let mut spans = Vec::new();
+ self.collect_unreachable_spans(&mut spans);
+ spans
+ }
+
+ fn collect_unreachable_spans(&self, spans: &mut Vec<Span>) {
+ // We don't look at subpatterns if we already reported the whole pattern as unreachable.
+ if !self.is_reachable() {
+ spans.push(self.span);
+ } else {
+ for p in self.iter_fields() {
+ p.collect_unreachable_spans(spans);
+ }
+ }
+ }
+}
+
+/// This is mostly copied from the `Pat` impl. This is best effort and not good enough for a
+/// `Display` impl.
+impl<'p, 'tcx> fmt::Debug for DeconstructedPat<'p, 'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Printing lists is a chore.
+ let mut first = true;
+ let mut start_or_continue = |s| {
+ if first {
+ first = false;
+ ""
+ } else {
+ s
+ }
+ };
+ let mut start_or_comma = || start_or_continue(", ");
+
+ match &self.ctor {
+ Single | Variant(_) => match self.ty.kind() {
+ ty::Adt(def, _) if def.is_box() => {
+ // Without `box_patterns`, the only legal pattern of type `Box` is `_` (outside
+ // of `std`). So this branch is only reachable when the feature is enabled and
+ // the pattern is a box pattern.
+ let subpattern = self.iter_fields().next().unwrap();
+ write!(f, "box {:?}", subpattern)
+ }
+ ty::Adt(..) | ty::Tuple(..) => {
+ let variant = match self.ty.kind() {
+ ty::Adt(adt, _) => Some(adt.variant(self.ctor.variant_index_for_adt(*adt))),
+ ty::Tuple(_) => None,
+ _ => unreachable!(),
+ };
+
+ if let Some(variant) = variant {
+ write!(f, "{}", variant.name)?;
+ }
+
+ // Without `cx`, we can't know which field corresponds to which, so we can't
+ // get the names of the fields. Instead we just display everything as a tuple
+ // struct, which should be good enough.
+ write!(f, "(")?;
+ for p in self.iter_fields() {
+ write!(f, "{}", start_or_comma())?;
+ write!(f, "{:?}", p)?;
+ }
+ write!(f, ")")
+ }
+ // Note: given the expansion of `&str` patterns done in `expand_pattern`, we should
+ // be careful to detect strings here. However a string literal pattern will never
+ // be reported as a non-exhaustiveness witness, so we can ignore this issue.
+ ty::Ref(_, _, mutbl) => {
+ let subpattern = self.iter_fields().next().unwrap();
+ write!(f, "&{}{:?}", mutbl.prefix_str(), subpattern)
+ }
+ _ => write!(f, "_"),
+ },
+ Slice(slice) => {
+ let mut subpatterns = self.fields.iter_patterns();
+ write!(f, "[")?;
+ match slice.kind {
+ FixedLen(_) => {
+ for p in subpatterns {
+ write!(f, "{}{:?}", start_or_comma(), p)?;
+ }
+ }
+ VarLen(prefix_len, _) => {
+ for p in subpatterns.by_ref().take(prefix_len) {
+ write!(f, "{}{:?}", start_or_comma(), p)?;
+ }
+ write!(f, "{}", start_or_comma())?;
+ write!(f, "..")?;
+ for p in subpatterns {
+ write!(f, "{}{:?}", start_or_comma(), p)?;
+ }
+ }
+ }
+ write!(f, "]")
+ }
+ &FloatRange(lo, hi, end) => {
+ write!(f, "{}", lo)?;
+ write!(f, "{}", end)?;
+ write!(f, "{}", hi)
+ }
+ IntRange(range) => write!(f, "{:?}", range), // Best-effort, will render e.g. `false` as `0..=0`
+ Wildcard | Missing { .. } | NonExhaustive => write!(f, "_ : {:?}", self.ty),
+ Or => {
+ for pat in self.iter_fields() {
+ write!(f, "{}{:?}", start_or_continue(" | "), pat)?;
+ }
+ Ok(())
+ }
+ Str(value) => write!(f, "{}", value),
+ Opaque => write!(f, "<constant pattern>"),
+ }
+ }
+}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
new file mode 100644
index 000000000..a13748a2d
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs
@@ -0,0 +1,802 @@
+//! Validation of patterns/matches.
+
+mod check_match;
+mod const_to_pat;
+mod deconstruct_pat;
+mod usefulness;
+
+pub(crate) use self::check_match::check_match;
+
+use crate::thir::util::UserAnnotatedTyHelpers;
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::pat_util::EnumerateAndAdjustIterator;
+use rustc_hir::RangeEnd;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::interpret::{
+ ConstValue, ErrorHandled, LitToConstError, LitToConstInput, Scalar,
+};
+use rustc_middle::mir::{self, UserTypeProjection};
+use rustc_middle::mir::{BorrowKind, Field, Mutability};
+use rustc_middle::thir::{Ascription, BindingMode, FieldPat, LocalVarId, Pat, PatKind, PatRange};
+use rustc_middle::ty::subst::{GenericArg, SubstsRef};
+use rustc_middle::ty::CanonicalUserTypeAnnotation;
+use rustc_middle::ty::{self, AdtDef, ConstKind, DefIdTree, Region, Ty, TyCtxt, UserType};
+use rustc_span::{Span, Symbol};
+
+use std::cmp::Ordering;
+
+#[derive(Clone, Debug)]
+pub(crate) enum PatternError {
+ AssocConstInPattern(Span),
+ ConstParamInPattern(Span),
+ StaticInPattern(Span),
+ NonConstPath(Span),
+}
+
+pub(crate) struct PatCtxt<'a, 'tcx> {
+ pub(crate) tcx: TyCtxt<'tcx>,
+ pub(crate) param_env: ty::ParamEnv<'tcx>,
+ pub(crate) typeck_results: &'a ty::TypeckResults<'tcx>,
+ pub(crate) errors: Vec<PatternError>,
+ include_lint_checks: bool,
+}
+
+pub(crate) fn pat_from_hir<'a, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ pat: &'tcx hir::Pat<'tcx>,
+) -> Pat<'tcx> {
+ let mut pcx = PatCtxt::new(tcx, param_env, typeck_results);
+ let result = pcx.lower_pattern(pat);
+ if !pcx.errors.is_empty() {
+ let msg = format!("encountered errors lowering pattern: {:?}", pcx.errors);
+ tcx.sess.delay_span_bug(pat.span, &msg);
+ }
+ debug!("pat_from_hir({:?}) = {:?}", pat, result);
+ result
+}
+
+impl<'a, 'tcx> PatCtxt<'a, 'tcx> {
+ pub(crate) fn new(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Self {
+ PatCtxt { tcx, param_env, typeck_results, errors: vec![], include_lint_checks: false }
+ }
+
+ pub(crate) fn include_lint_checks(&mut self) -> &mut Self {
+ self.include_lint_checks = true;
+ self
+ }
+
+ pub(crate) fn lower_pattern(&mut self, pat: &'tcx hir::Pat<'tcx>) -> Pat<'tcx> {
+ // When implicit dereferences have been inserted in this pattern, the unadjusted lowered
+ // pattern has the type that results *after* dereferencing. For example, in this code:
+ //
+ // ```
+ // match &&Some(0i32) {
+ // Some(n) => { ... },
+ // _ => { ... },
+ // }
+ // ```
+ //
+ // the type assigned to `Some(n)` in `unadjusted_pat` would be `Option<i32>` (this is
+ // determined in rustc_typeck::check::match). The adjustments would be
+ //
+ // `vec![&&Option<i32>, &Option<i32>]`.
+ //
+ // Applying the adjustments, we want to instead output `&&Some(n)` (as a THIR pattern). So
+ // we wrap the unadjusted pattern in `PatKind::Deref` repeatedly, consuming the
+ // adjustments in *reverse order* (last-in-first-out, so that the last `Deref` inserted
+ // gets the least-dereferenced type).
+ let unadjusted_pat = self.lower_pattern_unadjusted(pat);
+ self.typeck_results.pat_adjustments().get(pat.hir_id).unwrap_or(&vec![]).iter().rev().fold(
+ unadjusted_pat,
+ |pat, ref_ty| {
+ debug!("{:?}: wrapping pattern with type {:?}", pat, ref_ty);
+ Pat {
+ span: pat.span,
+ ty: *ref_ty,
+ kind: Box::new(PatKind::Deref { subpattern: pat }),
+ }
+ },
+ )
+ }
+
+ fn lower_range_expr(
+ &mut self,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> (PatKind<'tcx>, Option<Ascription<'tcx>>) {
+ match self.lower_lit(expr) {
+ PatKind::AscribeUserType { ascription, subpattern: Pat { kind: box kind, .. } } => {
+ (kind, Some(ascription))
+ }
+ kind => (kind, None),
+ }
+ }
+
+ fn lower_pattern_range(
+ &mut self,
+ ty: Ty<'tcx>,
+ lo: mir::ConstantKind<'tcx>,
+ hi: mir::ConstantKind<'tcx>,
+ end: RangeEnd,
+ span: Span,
+ ) -> PatKind<'tcx> {
+ assert_eq!(lo.ty(), ty);
+ assert_eq!(hi.ty(), ty);
+ let cmp = compare_const_vals(self.tcx, lo, hi, self.param_env);
+ match (end, cmp) {
+ // `x..y` where `x < y`.
+ // Non-empty because the range includes at least `x`.
+ (RangeEnd::Excluded, Some(Ordering::Less)) => PatKind::Range(PatRange { lo, hi, end }),
+ // `x..y` where `x >= y`. The range is empty => error.
+ (RangeEnd::Excluded, _) => {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0579,
+ "lower range bound must be less than upper"
+ )
+ .emit();
+ PatKind::Wild
+ }
+ // `x..=y` where `x == y`.
+ (RangeEnd::Included, Some(Ordering::Equal)) => PatKind::Constant { value: lo },
+ // `x..=y` where `x < y`.
+ (RangeEnd::Included, Some(Ordering::Less)) => PatKind::Range(PatRange { lo, hi, end }),
+ // `x..=y` where `x > y` hence the range is empty => error.
+ (RangeEnd::Included, _) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0030,
+ "lower range bound must be less than or equal to upper"
+ );
+ err.span_label(span, "lower bound larger than upper bound");
+ if self.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "When matching against a range, the compiler \
+ verifies that the range is non-empty. Range \
+ patterns include both end-points, so this is \
+ equivalent to requiring the start of the range \
+ to be less than or equal to the end of the range.",
+ );
+ }
+ err.emit();
+ PatKind::Wild
+ }
+ }
+ }
+
+ fn normalize_range_pattern_ends(
+ &self,
+ ty: Ty<'tcx>,
+ lo: Option<&PatKind<'tcx>>,
+ hi: Option<&PatKind<'tcx>>,
+ ) -> Option<(mir::ConstantKind<'tcx>, mir::ConstantKind<'tcx>)> {
+ match (lo, hi) {
+ (Some(PatKind::Constant { value: lo }), Some(PatKind::Constant { value: hi })) => {
+ Some((*lo, *hi))
+ }
+ (Some(PatKind::Constant { value: lo }), None) => {
+ let hi = ty.numeric_max_val(self.tcx)?;
+ Some((*lo, mir::ConstantKind::from_const(hi, self.tcx)))
+ }
+ (None, Some(PatKind::Constant { value: hi })) => {
+ let lo = ty.numeric_min_val(self.tcx)?;
+ Some((mir::ConstantKind::from_const(lo, self.tcx), *hi))
+ }
+ _ => None,
+ }
+ }
+
+ fn lower_pattern_unadjusted(&mut self, pat: &'tcx hir::Pat<'tcx>) -> Pat<'tcx> {
+ let mut ty = self.typeck_results.node_type(pat.hir_id);
+
+ let kind = match pat.kind {
+ hir::PatKind::Wild => PatKind::Wild,
+
+ hir::PatKind::Lit(value) => self.lower_lit(value),
+
+ hir::PatKind::Range(ref lo_expr, ref hi_expr, end) => {
+ let (lo_expr, hi_expr) = (lo_expr.as_deref(), hi_expr.as_deref());
+ let lo_span = lo_expr.map_or(pat.span, |e| e.span);
+ let lo = lo_expr.map(|e| self.lower_range_expr(e));
+ let hi = hi_expr.map(|e| self.lower_range_expr(e));
+
+ let (lp, hp) = (lo.as_ref().map(|x| &x.0), hi.as_ref().map(|x| &x.0));
+ let mut kind = match self.normalize_range_pattern_ends(ty, lp, hp) {
+ Some((lc, hc)) => self.lower_pattern_range(ty, lc, hc, end, lo_span),
+ None => {
+ let msg = &format!(
+ "found bad range pattern `{:?}` outside of error recovery",
+ (&lo, &hi),
+ );
+ self.tcx.sess.delay_span_bug(pat.span, msg);
+ PatKind::Wild
+ }
+ };
+
+ // If we are handling a range with associated constants (e.g.
+ // `Foo::<'a>::A..=Foo::B`), we need to put the ascriptions for the associated
+ // constants somewhere. Have them on the range pattern.
+ for end in &[lo, hi] {
+ if let Some((_, Some(ascription))) = end {
+ let subpattern = Pat { span: pat.span, ty, kind: Box::new(kind) };
+ kind =
+ PatKind::AscribeUserType { ascription: ascription.clone(), subpattern };
+ }
+ }
+
+ kind
+ }
+
+ hir::PatKind::Path(ref qpath) => {
+ return self.lower_path(qpath, pat.hir_id, pat.span);
+ }
+
+ hir::PatKind::Ref(ref subpattern, _) | hir::PatKind::Box(ref subpattern) => {
+ PatKind::Deref { subpattern: self.lower_pattern(subpattern) }
+ }
+
+ hir::PatKind::Slice(ref prefix, ref slice, ref suffix) => {
+ self.slice_or_array_pattern(pat.span, ty, prefix, slice, suffix)
+ }
+
+ hir::PatKind::Tuple(ref pats, ddpos) => {
+ let ty::Tuple(ref tys) = ty.kind() else {
+ span_bug!(pat.span, "unexpected type for tuple pattern: {:?}", ty);
+ };
+ let subpatterns = self.lower_tuple_subpats(pats, tys.len(), ddpos);
+ PatKind::Leaf { subpatterns }
+ }
+
+ hir::PatKind::Binding(_, id, ident, ref sub) => {
+ let bm = *self
+ .typeck_results
+ .pat_binding_modes()
+ .get(pat.hir_id)
+ .expect("missing binding mode");
+ let (mutability, mode) = match bm {
+ ty::BindByValue(mutbl) => (mutbl, BindingMode::ByValue),
+ ty::BindByReference(hir::Mutability::Mut) => (
+ Mutability::Not,
+ BindingMode::ByRef(BorrowKind::Mut { allow_two_phase_borrow: false }),
+ ),
+ ty::BindByReference(hir::Mutability::Not) => {
+ (Mutability::Not, BindingMode::ByRef(BorrowKind::Shared))
+ }
+ };
+
+ // A ref x pattern is the same node used for x, and as such it has
+ // x's type, which is &T, where we want T (the type being matched).
+ let var_ty = ty;
+ if let ty::BindByReference(_) = bm {
+ if let ty::Ref(_, rty, _) = ty.kind() {
+ ty = *rty;
+ } else {
+ bug!("`ref {}` has wrong type {}", ident, ty);
+ }
+ };
+
+ PatKind::Binding {
+ mutability,
+ mode,
+ name: ident.name,
+ var: LocalVarId(id),
+ ty: var_ty,
+ subpattern: self.lower_opt_pattern(sub),
+ is_primary: id == pat.hir_id,
+ }
+ }
+
+ hir::PatKind::TupleStruct(ref qpath, ref pats, ddpos) => {
+ let res = self.typeck_results.qpath_res(qpath, pat.hir_id);
+ let ty::Adt(adt_def, _) = ty.kind() else {
+ span_bug!(pat.span, "tuple struct pattern not applied to an ADT {:?}", ty);
+ };
+ let variant_def = adt_def.variant_of_res(res);
+ let subpatterns = self.lower_tuple_subpats(pats, variant_def.fields.len(), ddpos);
+ self.lower_variant_or_leaf(res, pat.hir_id, pat.span, ty, subpatterns)
+ }
+
+ hir::PatKind::Struct(ref qpath, ref fields, _) => {
+ let res = self.typeck_results.qpath_res(qpath, pat.hir_id);
+ let subpatterns = fields
+ .iter()
+ .map(|field| FieldPat {
+ field: Field::new(self.tcx.field_index(field.hir_id, self.typeck_results)),
+ pattern: self.lower_pattern(&field.pat),
+ })
+ .collect();
+
+ self.lower_variant_or_leaf(res, pat.hir_id, pat.span, ty, subpatterns)
+ }
+
+ hir::PatKind::Or(ref pats) => PatKind::Or { pats: self.lower_patterns(pats) },
+ };
+
+ Pat { span: pat.span, ty, kind: Box::new(kind) }
+ }
+
+ fn lower_tuple_subpats(
+ &mut self,
+ pats: &'tcx [hir::Pat<'tcx>],
+ expected_len: usize,
+ gap_pos: Option<usize>,
+ ) -> Vec<FieldPat<'tcx>> {
+ pats.iter()
+ .enumerate_and_adjust(expected_len, gap_pos)
+ .map(|(i, subpattern)| FieldPat {
+ field: Field::new(i),
+ pattern: self.lower_pattern(subpattern),
+ })
+ .collect()
+ }
+
+ fn lower_patterns(&mut self, pats: &'tcx [hir::Pat<'tcx>]) -> Vec<Pat<'tcx>> {
+ pats.iter().map(|p| self.lower_pattern(p)).collect()
+ }
+
+ fn lower_opt_pattern(&mut self, pat: &'tcx Option<&'tcx hir::Pat<'tcx>>) -> Option<Pat<'tcx>> {
+ pat.as_ref().map(|p| self.lower_pattern(p))
+ }
+
+ fn slice_or_array_pattern(
+ &mut self,
+ span: Span,
+ ty: Ty<'tcx>,
+ prefix: &'tcx [hir::Pat<'tcx>],
+ slice: &'tcx Option<&'tcx hir::Pat<'tcx>>,
+ suffix: &'tcx [hir::Pat<'tcx>],
+ ) -> PatKind<'tcx> {
+ let prefix = self.lower_patterns(prefix);
+ let slice = self.lower_opt_pattern(slice);
+ let suffix = self.lower_patterns(suffix);
+ match ty.kind() {
+ // Matching a slice, `[T]`.
+ ty::Slice(..) => PatKind::Slice { prefix, slice, suffix },
+ // Fixed-length array, `[T; len]`.
+ ty::Array(_, len) => {
+ let len = len.eval_usize(self.tcx, self.param_env);
+ assert!(len >= prefix.len() as u64 + suffix.len() as u64);
+ PatKind::Array { prefix, slice, suffix }
+ }
+ _ => span_bug!(span, "bad slice pattern type {:?}", ty),
+ }
+ }
+
+ fn lower_variant_or_leaf(
+ &mut self,
+ res: Res,
+ hir_id: hir::HirId,
+ span: Span,
+ ty: Ty<'tcx>,
+ subpatterns: Vec<FieldPat<'tcx>>,
+ ) -> PatKind<'tcx> {
+ let res = match res {
+ Res::Def(DefKind::Ctor(CtorOf::Variant, ..), variant_ctor_id) => {
+ let variant_id = self.tcx.parent(variant_ctor_id);
+ Res::Def(DefKind::Variant, variant_id)
+ }
+ res => res,
+ };
+
+ let mut kind = match res {
+ Res::Def(DefKind::Variant, variant_id) => {
+ let enum_id = self.tcx.parent(variant_id);
+ let adt_def = self.tcx.adt_def(enum_id);
+ if adt_def.is_enum() {
+ let substs = match ty.kind() {
+ ty::Adt(_, substs) | ty::FnDef(_, substs) => substs,
+ ty::Error(_) => {
+ // Avoid ICE (#50585)
+ return PatKind::Wild;
+ }
+ _ => bug!("inappropriate type for def: {:?}", ty),
+ };
+ PatKind::Variant {
+ adt_def,
+ substs,
+ variant_index: adt_def.variant_index_with_id(variant_id),
+ subpatterns,
+ }
+ } else {
+ PatKind::Leaf { subpatterns }
+ }
+ }
+
+ Res::Def(
+ DefKind::Struct
+ | DefKind::Ctor(CtorOf::Struct, ..)
+ | DefKind::Union
+ | DefKind::TyAlias
+ | DefKind::AssocTy,
+ _,
+ )
+ | Res::SelfTy { .. }
+ | Res::SelfCtor(..) => PatKind::Leaf { subpatterns },
+ _ => {
+ let pattern_error = match res {
+ Res::Def(DefKind::ConstParam, _) => PatternError::ConstParamInPattern(span),
+ Res::Def(DefKind::Static(_), _) => PatternError::StaticInPattern(span),
+ _ => PatternError::NonConstPath(span),
+ };
+ self.errors.push(pattern_error);
+ PatKind::Wild
+ }
+ };
+
+ if let Some(user_ty) = self.user_substs_applied_to_ty_of_hir_id(hir_id) {
+ debug!("lower_variant_or_leaf: kind={:?} user_ty={:?} span={:?}", kind, user_ty, span);
+ let annotation = CanonicalUserTypeAnnotation {
+ user_ty,
+ span,
+ inferred_ty: self.typeck_results.node_type(hir_id),
+ };
+ kind = PatKind::AscribeUserType {
+ subpattern: Pat { span, ty, kind: Box::new(kind) },
+ ascription: Ascription { annotation, variance: ty::Variance::Covariant },
+ };
+ }
+
+ kind
+ }
+
+ /// Takes a HIR Path. If the path is a constant, evaluates it and feeds
+ /// it to `const_to_pat`. Any other path (like enum variants without fields)
+ /// is converted to the corresponding pattern via `lower_variant_or_leaf`.
+ #[instrument(skip(self), level = "debug")]
+ fn lower_path(&mut self, qpath: &hir::QPath<'_>, id: hir::HirId, span: Span) -> Pat<'tcx> {
+ let ty = self.typeck_results.node_type(id);
+ let res = self.typeck_results.qpath_res(qpath, id);
+
+ let pat_from_kind = |kind| Pat { span, ty, kind: Box::new(kind) };
+
+ let (def_id, is_associated_const) = match res {
+ Res::Def(DefKind::Const, def_id) => (def_id, false),
+ Res::Def(DefKind::AssocConst, def_id) => (def_id, true),
+
+ _ => return pat_from_kind(self.lower_variant_or_leaf(res, id, span, ty, vec![])),
+ };
+
+ // Use `Reveal::All` here because patterns are always monomorphic even if their function
+ // isn't.
+ let param_env_reveal_all = self.param_env.with_reveal_all_normalized(self.tcx);
+ let substs = self.typeck_results.node_substs(id);
+ let instance = match ty::Instance::resolve(self.tcx, param_env_reveal_all, def_id, substs) {
+ Ok(Some(i)) => i,
+ Ok(None) => {
+ // It should be assoc consts if there's no error but we cannot resolve it.
+ debug_assert!(is_associated_const);
+
+ self.errors.push(PatternError::AssocConstInPattern(span));
+
+ return pat_from_kind(PatKind::Wild);
+ }
+
+ Err(_) => {
+ self.tcx.sess.span_err(span, "could not evaluate constant pattern");
+ return pat_from_kind(PatKind::Wild);
+ }
+ };
+
+ // `mir_const_qualif` must be called with the `DefId` of the item where the const is
+ // defined, not where it is declared. The difference is significant for associated
+ // constants.
+ let mir_structural_match_violation = self.tcx.mir_const_qualif(instance.def_id()).custom_eq;
+ debug!("mir_structural_match_violation({:?}) -> {}", qpath, mir_structural_match_violation);
+
+ match self.tcx.const_eval_instance(param_env_reveal_all, instance, Some(span)) {
+ Ok(literal) => {
+ let const_ = mir::ConstantKind::Val(literal, ty);
+ let pattern = self.const_to_pat(const_, id, span, mir_structural_match_violation);
+
+ if !is_associated_const {
+ return pattern;
+ }
+
+ let user_provided_types = self.typeck_results().user_provided_types();
+ if let Some(&user_ty) = user_provided_types.get(id) {
+ let annotation = CanonicalUserTypeAnnotation {
+ user_ty,
+ span,
+ inferred_ty: self.typeck_results().node_type(id),
+ };
+ Pat {
+ span,
+ kind: Box::new(PatKind::AscribeUserType {
+ subpattern: pattern,
+ ascription: Ascription {
+ annotation,
+ /// Note that use `Contravariant` here. See the
+ /// `variance` field documentation for details.
+ variance: ty::Variance::Contravariant,
+ },
+ }),
+ ty: const_.ty(),
+ }
+ } else {
+ pattern
+ }
+ }
+ Err(ErrorHandled::TooGeneric) => {
+ // While `Reported | Linted` cases will have diagnostics emitted already
+ // it is not true for TooGeneric case, so we need to give user more information.
+ self.tcx.sess.span_err(span, "constant pattern depends on a generic parameter");
+ pat_from_kind(PatKind::Wild)
+ }
+ Err(_) => {
+ self.tcx.sess.span_err(span, "could not evaluate constant pattern");
+ pat_from_kind(PatKind::Wild)
+ }
+ }
+ }
+
+ /// Converts inline const patterns.
+ fn lower_inline_const(
+ &mut self,
+ anon_const: &'tcx hir::AnonConst,
+ id: hir::HirId,
+ span: Span,
+ ) -> PatKind<'tcx> {
+ let anon_const_def_id = self.tcx.hir().local_def_id(anon_const.hir_id);
+ let value = mir::ConstantKind::from_inline_const(self.tcx, anon_const_def_id);
+
+ // Evaluate early like we do in `lower_path`.
+ let value = value.eval(self.tcx, self.param_env);
+
+ match value {
+ mir::ConstantKind::Ty(c) => {
+ match c.kind() {
+ ConstKind::Param(_) => {
+ self.errors.push(PatternError::ConstParamInPattern(span));
+ return PatKind::Wild;
+ }
+ ConstKind::Unevaluated(_) => {
+ // If we land here it means the const can't be evaluated because it's `TooGeneric`.
+ self.tcx
+ .sess
+ .span_err(span, "constant pattern depends on a generic parameter");
+ return PatKind::Wild;
+ }
+ _ => bug!("Expected either ConstKind::Param or ConstKind::Unevaluated"),
+ }
+ }
+ mir::ConstantKind::Val(_, _) => *self.const_to_pat(value, id, span, false).kind,
+ }
+ }
+
+ /// Converts literals, paths and negation of literals to patterns.
+ /// The special case for negation exists to allow things like `-128_i8`
+ /// which would overflow if we tried to evaluate `128_i8` and then negate
+ /// afterwards.
+ fn lower_lit(&mut self, expr: &'tcx hir::Expr<'tcx>) -> PatKind<'tcx> {
+ let (lit, neg) = match expr.kind {
+ hir::ExprKind::Path(ref qpath) => {
+ return *self.lower_path(qpath, expr.hir_id, expr.span).kind;
+ }
+ hir::ExprKind::ConstBlock(ref anon_const) => {
+ return self.lower_inline_const(anon_const, expr.hir_id, expr.span);
+ }
+ hir::ExprKind::Lit(ref lit) => (lit, false),
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => {
+ let hir::ExprKind::Lit(ref lit) = expr.kind else {
+ span_bug!(expr.span, "not a literal: {:?}", expr);
+ };
+ (lit, true)
+ }
+ _ => span_bug!(expr.span, "not a literal: {:?}", expr),
+ };
+
+ let lit_input =
+ LitToConstInput { lit: &lit.node, ty: self.typeck_results.expr_ty(expr), neg };
+ match self.tcx.at(expr.span).lit_to_mir_constant(lit_input) {
+ Ok(constant) => *self.const_to_pat(constant, expr.hir_id, lit.span, false).kind,
+ Err(LitToConstError::Reported) => PatKind::Wild,
+ Err(LitToConstError::TypeError) => bug!("lower_lit: had type error"),
+ }
+ }
+}
+
+impl<'tcx> UserAnnotatedTyHelpers<'tcx> for PatCtxt<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn typeck_results(&self) -> &ty::TypeckResults<'tcx> {
+ self.typeck_results
+ }
+}
+
+pub(crate) trait PatternFoldable<'tcx>: Sized {
+ fn fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ self.super_fold_with(folder)
+ }
+
+ fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self;
+}
+
+pub(crate) trait PatternFolder<'tcx>: Sized {
+ fn fold_pattern(&mut self, pattern: &Pat<'tcx>) -> Pat<'tcx> {
+ pattern.super_fold_with(self)
+ }
+
+ fn fold_pattern_kind(&mut self, kind: &PatKind<'tcx>) -> PatKind<'tcx> {
+ kind.super_fold_with(self)
+ }
+}
+
+impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Box<T> {
+ fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ let content: T = (**self).fold_with(folder);
+ Box::new(content)
+ }
+}
+
+impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Vec<T> {
+ fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ self.iter().map(|t| t.fold_with(folder)).collect()
+ }
+}
+
+impl<'tcx, T: PatternFoldable<'tcx>> PatternFoldable<'tcx> for Option<T> {
+ fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ self.as_ref().map(|t| t.fold_with(folder))
+ }
+}
+
+macro_rules! ClonePatternFoldableImpls {
+ (<$lt_tcx:tt> $($ty:ty),+) => {
+ $(
+ impl<$lt_tcx> PatternFoldable<$lt_tcx> for $ty {
+ fn super_fold_with<F: PatternFolder<$lt_tcx>>(&self, _: &mut F) -> Self {
+ Clone::clone(self)
+ }
+ }
+ )+
+ }
+}
+
+ClonePatternFoldableImpls! { <'tcx>
+ Span, Field, Mutability, Symbol, LocalVarId, usize, ty::Const<'tcx>,
+ Region<'tcx>, Ty<'tcx>, BindingMode, AdtDef<'tcx>,
+ SubstsRef<'tcx>, &'tcx GenericArg<'tcx>, UserType<'tcx>,
+ UserTypeProjection, CanonicalUserTypeAnnotation<'tcx>
+}
+
+impl<'tcx> PatternFoldable<'tcx> for FieldPat<'tcx> {
+ fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ FieldPat { field: self.field.fold_with(folder), pattern: self.pattern.fold_with(folder) }
+ }
+}
+
+impl<'tcx> PatternFoldable<'tcx> for Pat<'tcx> {
+ fn fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ folder.fold_pattern(self)
+ }
+
+ fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ Pat {
+ ty: self.ty.fold_with(folder),
+ span: self.span.fold_with(folder),
+ kind: self.kind.fold_with(folder),
+ }
+ }
+}
+
+impl<'tcx> PatternFoldable<'tcx> for PatKind<'tcx> {
+ fn fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ folder.fold_pattern_kind(self)
+ }
+
+ fn super_fold_with<F: PatternFolder<'tcx>>(&self, folder: &mut F) -> Self {
+ match *self {
+ PatKind::Wild => PatKind::Wild,
+ PatKind::AscribeUserType {
+ ref subpattern,
+ ascription: Ascription { ref annotation, variance },
+ } => PatKind::AscribeUserType {
+ subpattern: subpattern.fold_with(folder),
+ ascription: Ascription { annotation: annotation.fold_with(folder), variance },
+ },
+ PatKind::Binding { mutability, name, mode, var, ty, ref subpattern, is_primary } => {
+ PatKind::Binding {
+ mutability: mutability.fold_with(folder),
+ name: name.fold_with(folder),
+ mode: mode.fold_with(folder),
+ var: var.fold_with(folder),
+ ty: ty.fold_with(folder),
+ subpattern: subpattern.fold_with(folder),
+ is_primary,
+ }
+ }
+ PatKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
+ PatKind::Variant {
+ adt_def: adt_def.fold_with(folder),
+ substs: substs.fold_with(folder),
+ variant_index,
+ subpatterns: subpatterns.fold_with(folder),
+ }
+ }
+ PatKind::Leaf { ref subpatterns } => {
+ PatKind::Leaf { subpatterns: subpatterns.fold_with(folder) }
+ }
+ PatKind::Deref { ref subpattern } => {
+ PatKind::Deref { subpattern: subpattern.fold_with(folder) }
+ }
+ PatKind::Constant { value } => PatKind::Constant { value },
+ PatKind::Range(range) => PatKind::Range(range),
+ PatKind::Slice { ref prefix, ref slice, ref suffix } => PatKind::Slice {
+ prefix: prefix.fold_with(folder),
+ slice: slice.fold_with(folder),
+ suffix: suffix.fold_with(folder),
+ },
+ PatKind::Array { ref prefix, ref slice, ref suffix } => PatKind::Array {
+ prefix: prefix.fold_with(folder),
+ slice: slice.fold_with(folder),
+ suffix: suffix.fold_with(folder),
+ },
+ PatKind::Or { ref pats } => PatKind::Or { pats: pats.fold_with(folder) },
+ }
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn compare_const_vals<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ a: mir::ConstantKind<'tcx>,
+ b: mir::ConstantKind<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+) -> Option<Ordering> {
+ assert_eq!(a.ty(), b.ty());
+
+ let ty = a.ty();
+
+ // This code is hot when compiling matches with many ranges. So we
+ // special-case extraction of evaluated scalars for speed, for types where
+ // raw data comparisons are appropriate. E.g. `unicode-normalization` has
+ // many ranges such as '\u{037A}'..='\u{037F}', and chars can be compared
+ // in this way.
+ match ty.kind() {
+ ty::Float(_) | ty::Int(_) => {} // require special handling, see below
+ _ => match (a, b) {
+ (
+ mir::ConstantKind::Val(ConstValue::Scalar(Scalar::Int(a)), _a_ty),
+ mir::ConstantKind::Val(ConstValue::Scalar(Scalar::Int(b)), _b_ty),
+ ) => return Some(a.cmp(&b)),
+ _ => {}
+ },
+ }
+
+ let a = a.eval_bits(tcx, param_env, ty);
+ let b = b.eval_bits(tcx, param_env, ty);
+
+ use rustc_apfloat::Float;
+ match *ty.kind() {
+ ty::Float(ty::FloatTy::F32) => {
+ let a = rustc_apfloat::ieee::Single::from_bits(a);
+ let b = rustc_apfloat::ieee::Single::from_bits(b);
+ a.partial_cmp(&b)
+ }
+ ty::Float(ty::FloatTy::F64) => {
+ let a = rustc_apfloat::ieee::Double::from_bits(a);
+ let b = rustc_apfloat::ieee::Double::from_bits(b);
+ a.partial_cmp(&b)
+ }
+ ty::Int(ity) => {
+ use rustc_middle::ty::layout::IntegerExt;
+ let size = rustc_target::abi::Integer::from_int_ty(&tcx, ity).size();
+ let a = size.sign_extend(a);
+ let b = size.sign_extend(b);
+ Some((a as i128).cmp(&(b as i128)))
+ }
+ _ => Some(a.cmp(&b)),
+ }
+}
diff --git a/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
new file mode 100644
index 000000000..0a660ef30
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/pattern/usefulness.rs
@@ -0,0 +1,978 @@
+//! Note: tests specific to this file can be found in:
+//!
+//! - `ui/pattern/usefulness`
+//! - `ui/or-patterns`
+//! - `ui/consts/const_in_pattern`
+//! - `ui/rfc-2008-non-exhaustive`
+//! - `ui/half-open-range-patterns`
+//! - probably many others
+//!
+//! I (Nadrieril) prefer to put new tests in `ui/pattern/usefulness` unless there's a specific
+//! reason not to, for example if they depend on a particular feature like `or_patterns`.
+//!
+//! -----
+//!
+//! This file includes the logic for exhaustiveness and reachability checking for pattern-matching.
+//! Specifically, given a list of patterns for a type, we can tell whether:
+//! (a) each pattern is reachable (reachability)
+//! (b) the patterns cover every possible value for the type (exhaustiveness)
+//!
+//! The algorithm implemented here is a modified version of the one described in [this
+//! paper](http://moscova.inria.fr/~maranget/papers/warn/index.html). We have however generalized
+//! it to accommodate the variety of patterns that Rust supports. We thus explain our version here,
+//! without being as rigorous.
+//!
+//!
+//! # Summary
+//!
+//! The core of the algorithm is the notion of "usefulness". A pattern `q` is said to be *useful*
+//! relative to another pattern `p` of the same type if there is a value that is matched by `q` and
+//! not matched by `p`. This generalizes to many `p`s: `q` is useful w.r.t. a list of patterns
+//! `p_1 .. p_n` if there is a value that is matched by `q` and by none of the `p_i`. We write
+//! `usefulness(p_1 .. p_n, q)` for a function that returns a list of such values. The aim of this
+//! file is to compute it efficiently.
+//!
+//! This is enough to compute reachability: a pattern in a `match` expression is reachable iff it
+//! is useful w.r.t. the patterns above it:
+//! ```rust
+//! # fn foo(x: Option<i32>) {
+//! match x {
+//! Some(_) => {},
+//! None => {}, // reachable: `None` is matched by this but not the branch above
+//! Some(0) => {}, // unreachable: all the values this matches are already matched by
+//! // `Some(_)` above
+//! }
+//! # }
+//! ```
+//!
+//! This is also enough to compute exhaustiveness: a match is exhaustive iff the wildcard `_`
+//! pattern is _not_ useful w.r.t. the patterns in the match. The values returned by `usefulness`
+//! are used to tell the user which values are missing.
+//! ```compile_fail,E0004
+//! # fn foo(x: Option<i32>) {
+//! match x {
+//! Some(0) => {},
+//! None => {},
+//! // not exhaustive: `_` is useful because it matches `Some(1)`
+//! }
+//! # }
+//! ```
+//!
+//! The entrypoint of this file is the [`compute_match_usefulness`] function, which computes
+//! reachability for each match branch and exhaustiveness for the whole match.
+//!
+//!
+//! # Constructors and fields
+//!
+//! Note: we will often abbreviate "constructor" as "ctor".
+//!
+//! The idea that powers everything that is done in this file is the following: a (matchable)
+//! value is made from a constructor applied to a number of subvalues. Examples of constructors are
+//! `Some`, `None`, `(,)` (the 2-tuple constructor), `Foo {..}` (the constructor for a struct
+//! `Foo`), and `2` (the constructor for the number `2`). This is natural when we think of
+//! pattern-matching, and this is the basis for what follows.
+//!
+//! Some of the ctors listed above might feel weird: `None` and `2` don't take any arguments.
+//! That's ok: those are ctors that take a list of 0 arguments; they are the simplest case of
+//! ctors. We treat `2` as a ctor because `u64` and other number types behave exactly like a huge
+//! `enum`, with one variant for each number. This allows us to see any matchable value as made up
+//! from a tree of ctors, each having a set number of children. For example: `Foo { bar: None,
+//! baz: Ok(0) }` is made from 4 different ctors, namely `Foo{..}`, `None`, `Ok` and `0`.
+//!
+//! This idea can be extended to patterns: they are also made from constructors applied to fields.
+//! A pattern for a given type is allowed to use all the ctors for values of that type (which we
+//! call "value constructors"), but there are also pattern-only ctors. The most important one is
+//! the wildcard (`_`), and the others are integer ranges (`0..=10`), variable-length slices (`[x,
+//! ..]`), and or-patterns (`Ok(0) | Err(_)`). Examples of valid patterns are `42`, `Some(_)`, `Foo
+//! { bar: Some(0) | None, baz: _ }`. Note that a binder in a pattern (e.g. `Some(x)`) matches the
+//! same values as a wildcard (e.g. `Some(_)`), so we treat both as wildcards.
+//!
+//! From this deconstruction we can compute whether a given value matches a given pattern; we
+//! simply look at ctors one at a time. Given a pattern `p` and a value `v`, we want to compute
+//! `matches!(v, p)`. It's mostly straightforward: we compare the head ctors and when they match
+//! we compare their fields recursively. A few representative examples:
+//!
+//! - `matches!(v, _) := true`
+//! - `matches!((v0, v1), (p0, p1)) := matches!(v0, p0) && matches!(v1, p1)`
+//! - `matches!(Foo { bar: v0, baz: v1 }, Foo { bar: p0, baz: p1 }) := matches!(v0, p0) && matches!(v1, p1)`
+//! - `matches!(Ok(v0), Ok(p0)) := matches!(v0, p0)`
+//! - `matches!(Ok(v0), Err(p0)) := false` (incompatible variants)
+//! - `matches!(v, 1..=100) := matches!(v, 1) || ... || matches!(v, 100)`
+//! - `matches!([v0], [p0, .., p1]) := false` (incompatible lengths)
+//! - `matches!([v0, v1, v2], [p0, .., p1]) := matches!(v0, p0) && matches!(v2, p1)`
+//! - `matches!(v, p0 | p1) := matches!(v, p0) || matches!(v, p1)`
+//!
+//! Constructors, fields and relevant operations are defined in the [`super::deconstruct_pat`] module.
+//!
+//! Note: this constructors/fields distinction may not straightforwardly apply to every Rust type.
+//! For example a value of type `Rc<u64>` can't be deconstructed that way, and `&str` has an
+//! infinitude of constructors. There are also subtleties with visibility of fields and
+//! uninhabitedness and various other things. The constructors idea can be extended to handle most
+//! of these subtleties though; caveats are documented where relevant throughout the code.
+//!
+//! Whether constructors cover each other is computed by [`Constructor::is_covered_by`].
+//!
+//!
+//! # Specialization
+//!
+//! Recall that we wish to compute `usefulness(p_1 .. p_n, q)`: given a list of patterns `p_1 ..
+//! p_n` and a pattern `q`, all of the same type, we want to find a list of values (called
+//! "witnesses") that are matched by `q` and by none of the `p_i`. We obviously don't just
+//! enumerate all possible values. From the discussion above we see that we can proceed
+//! ctor-by-ctor: for each value ctor of the given type, we ask "is there a value that starts with
+//! this constructor and matches `q` and none of the `p_i`?". As we saw above, there's a lot we can
+//! say from knowing only the first constructor of our candidate value.
+//!
+//! Let's take the following example:
+//! ```compile_fail,E0004
+//! # enum Enum { Variant1(()), Variant2(Option<bool>, u32)}
+//! # fn foo(x: Enum) {
+//! match x {
+//! Enum::Variant1(_) => {} // `p1`
+//! Enum::Variant2(None, 0) => {} // `p2`
+//! Enum::Variant2(Some(_), 0) => {} // `q`
+//! }
+//! # }
+//! ```
+//!
+//! We can easily see that if our candidate value `v` starts with `Variant1` it will not match `q`.
+//! If `v = Variant2(v0, v1)` however, whether or not it matches `p2` and `q` will depend on `v0`
+//! and `v1`. In fact, such a `v` will be a witness of usefulness of `q` exactly when the tuple
+//! `(v0, v1)` is a witness of usefulness of `q'` in the following reduced match:
+//!
+//! ```compile_fail,E0004
+//! # fn foo(x: (Option<bool>, u32)) {
+//! match x {
+//! (None, 0) => {} // `p2'`
+//! (Some(_), 0) => {} // `q'`
+//! }
+//! # }
+//! ```
+//!
+//! This motivates a new step in computing usefulness, that we call _specialization_.
+//! Specialization consist of filtering a list of patterns for those that match a constructor, and
+//! then looking into the constructor's fields. This enables usefulness to be computed recursively.
+//!
+//! Instead of acting on a single pattern in each row, we will consider a list of patterns for each
+//! row, and we call such a list a _pattern-stack_. The idea is that we will specialize the
+//! leftmost pattern, which amounts to popping the constructor and pushing its fields, which feels
+//! like a stack. We note a pattern-stack simply with `[p_1 ... p_n]`.
+//! Here's a sequence of specializations of a list of pattern-stacks, to illustrate what's
+//! happening:
+//! ```ignore (illustrative)
+//! [Enum::Variant1(_)]
+//! [Enum::Variant2(None, 0)]
+//! [Enum::Variant2(Some(_), 0)]
+//! //==>> specialize with `Variant2`
+//! [None, 0]
+//! [Some(_), 0]
+//! //==>> specialize with `Some`
+//! [_, 0]
+//! //==>> specialize with `true` (say the type was `bool`)
+//! [0]
+//! //==>> specialize with `0`
+//! []
+//! ```
+//!
+//! The function `specialize(c, p)` takes a value constructor `c` and a pattern `p`, and returns 0
+//! or more pattern-stacks. If `c` does not match the head constructor of `p`, it returns nothing;
+//! otherwise if returns the fields of the constructor. This only returns more than one
+//! pattern-stack if `p` has a pattern-only constructor.
+//!
+//! - Specializing for the wrong constructor returns nothing
+//!
+//! `specialize(None, Some(p0)) := []`
+//!
+//! - Specializing for the correct constructor returns a single row with the fields
+//!
+//! `specialize(Variant1, Variant1(p0, p1, p2)) := [[p0, p1, p2]]`
+//!
+//! `specialize(Foo{..}, Foo { bar: p0, baz: p1 }) := [[p0, p1]]`
+//!
+//! - For or-patterns, we specialize each branch and concatenate the results
+//!
+//! `specialize(c, p0 | p1) := specialize(c, p0) ++ specialize(c, p1)`
+//!
+//! - We treat the other pattern constructors as if they were a large or-pattern of all the
+//! possibilities:
+//!
+//! `specialize(c, _) := specialize(c, Variant1(_) | Variant2(_, _) | ...)`
+//!
+//! `specialize(c, 1..=100) := specialize(c, 1 | ... | 100)`
+//!
+//! `specialize(c, [p0, .., p1]) := specialize(c, [p0, p1] | [p0, _, p1] | [p0, _, _, p1] | ...)`
+//!
+//! - If `c` is a pattern-only constructor, `specialize` is defined on a case-by-case basis. See
+//! the discussion about constructor splitting in [`super::deconstruct_pat`].
+//!
+//!
+//! We then extend this function to work with pattern-stacks as input, by acting on the first
+//! column and keeping the other columns untouched.
+//!
+//! Specialization for the whole matrix is done in [`Matrix::specialize_constructor`]. Note that
+//! or-patterns in the first column are expanded before being stored in the matrix. Specialization
+//! for a single patstack is done from a combination of [`Constructor::is_covered_by`] and
+//! [`PatStack::pop_head_constructor`]. The internals of how it's done mostly live in the
+//! [`Fields`] struct.
+//!
+//!
+//! # Computing usefulness
+//!
+//! We now have all we need to compute usefulness. The inputs to usefulness are a list of
+//! pattern-stacks `p_1 ... p_n` (one per row), and a new pattern_stack `q`. The paper and this
+//! file calls the list of patstacks a _matrix_. They must all have the same number of columns and
+//! the patterns in a given column must all have the same type. `usefulness` returns a (possibly
+//! empty) list of witnesses of usefulness. These witnesses will also be pattern-stacks.
+//!
+//! - base case: `n_columns == 0`.
+//! Since a pattern-stack functions like a tuple of patterns, an empty one functions like the
+//! unit type. Thus `q` is useful iff there are no rows above it, i.e. if `n == 0`.
+//!
+//! - inductive case: `n_columns > 0`.
+//! We need a way to list the constructors we want to try. We will be more clever in the next
+//! section but for now assume we list all value constructors for the type of the first column.
+//!
+//! - for each such ctor `c`:
+//!
+//! - for each `q'` returned by `specialize(c, q)`:
+//!
+//! - we compute `usefulness(specialize(c, p_1) ... specialize(c, p_n), q')`
+//!
+//! - for each witness found, we revert specialization by pushing the constructor `c` on top.
+//!
+//! - We return the concatenation of all the witnesses found, if any.
+//!
+//! Example:
+//! ```ignore (illustrative)
+//! [Some(true)] // p_1
+//! [None] // p_2
+//! [Some(_)] // q
+//! //==>> try `None`: `specialize(None, q)` returns nothing
+//! //==>> try `Some`: `specialize(Some, q)` returns a single row
+//! [true] // p_1'
+//! [_] // q'
+//! //==>> try `true`: `specialize(true, q')` returns a single row
+//! [] // p_1''
+//! [] // q''
+//! //==>> base case; `n != 0` so `q''` is not useful.
+//! //==>> go back up a step
+//! [true] // p_1'
+//! [_] // q'
+//! //==>> try `false`: `specialize(false, q')` returns a single row
+//! [] // q''
+//! //==>> base case; `n == 0` so `q''` is useful. We return the single witness `[]`
+//! witnesses:
+//! []
+//! //==>> undo the specialization with `false`
+//! witnesses:
+//! [false]
+//! //==>> undo the specialization with `Some`
+//! witnesses:
+//! [Some(false)]
+//! //==>> we have tried all the constructors. The output is the single witness `[Some(false)]`.
+//! ```
+//!
+//! This computation is done in [`is_useful`]. In practice we don't care about the list of
+//! witnesses when computing reachability; we only need to know whether any exist. We do keep the
+//! witnesses when computing exhaustiveness to report them to the user.
+//!
+//!
+//! # Making usefulness tractable: constructor splitting
+//!
+//! We're missing one last detail: which constructors do we list? Naively listing all value
+//! constructors cannot work for types like `u64` or `&str`, so we need to be more clever. The
+//! first obvious insight is that we only want to list constructors that are covered by the head
+//! constructor of `q`. If it's a value constructor, we only try that one. If it's a pattern-only
+//! constructor, we use the final clever idea for this algorithm: _constructor splitting_, where we
+//! group together constructors that behave the same.
+//!
+//! The details are not necessary to understand this file, so we explain them in
+//! [`super::deconstruct_pat`]. Splitting is done by the [`Constructor::split`] function.
+
+use self::ArmType::*;
+use self::Usefulness::*;
+
+use super::check_match::{joined_uncovered_patterns, pattern_not_covered_label};
+use super::deconstruct_pat::{Constructor, DeconstructedPat, Fields, SplitWildcard};
+
+use rustc_data_structures::captures::Captures;
+
+use rustc_arena::TypedArena;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_hir::def_id::DefId;
+use rustc_hir::HirId;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS;
+use rustc_span::{Span, DUMMY_SP};
+
+use smallvec::{smallvec, SmallVec};
+use std::fmt;
+use std::iter::once;
+
+pub(crate) struct MatchCheckCtxt<'p, 'tcx> {
+ pub(crate) tcx: TyCtxt<'tcx>,
+ /// The module in which the match occurs. This is necessary for
+ /// checking inhabited-ness of types because whether a type is (visibly)
+ /// inhabited can depend on whether it was defined in the current module or
+ /// not. E.g., `struct Foo { _private: ! }` cannot be seen to be empty
+ /// outside its module and should not be matchable with an empty match statement.
+ pub(crate) module: DefId,
+ pub(crate) param_env: ty::ParamEnv<'tcx>,
+ pub(crate) pattern_arena: &'p TypedArena<DeconstructedPat<'p, 'tcx>>,
+}
+
+impl<'a, 'tcx> MatchCheckCtxt<'a, 'tcx> {
+ pub(super) fn is_uninhabited(&self, ty: Ty<'tcx>) -> bool {
+ if self.tcx.features().exhaustive_patterns {
+ self.tcx.is_ty_uninhabited_from(self.module, ty, self.param_env)
+ } else {
+ false
+ }
+ }
+
+ /// Returns whether the given type is an enum from another crate declared `#[non_exhaustive]`.
+ pub(super) fn is_foreign_non_exhaustive_enum(&self, ty: Ty<'tcx>) -> bool {
+ match ty.kind() {
+ ty::Adt(def, ..) => {
+ def.is_enum() && def.is_variant_list_non_exhaustive() && !def.did().is_local()
+ }
+ _ => false,
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+pub(super) struct PatCtxt<'a, 'p, 'tcx> {
+ pub(super) cx: &'a MatchCheckCtxt<'p, 'tcx>,
+ /// Type of the current column under investigation.
+ pub(super) ty: Ty<'tcx>,
+ /// Span of the current pattern under investigation.
+ pub(super) span: Span,
+ /// Whether the current pattern is the whole pattern as found in a match arm, or if it's a
+ /// subpattern.
+ pub(super) is_top_level: bool,
+ /// Whether the current pattern is from a `non_exhaustive` enum.
+ pub(super) is_non_exhaustive: bool,
+}
+
+impl<'a, 'p, 'tcx> fmt::Debug for PatCtxt<'a, 'p, 'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("PatCtxt").field("ty", &self.ty).finish()
+ }
+}
+
+/// A row of a matrix. Rows of len 1 are very common, which is why `SmallVec[_; 2]`
+/// works well.
+#[derive(Clone)]
+struct PatStack<'p, 'tcx> {
+ pats: SmallVec<[&'p DeconstructedPat<'p, 'tcx>; 2]>,
+}
+
+impl<'p, 'tcx> PatStack<'p, 'tcx> {
+ fn from_pattern(pat: &'p DeconstructedPat<'p, 'tcx>) -> Self {
+ Self::from_vec(smallvec![pat])
+ }
+
+ fn from_vec(vec: SmallVec<[&'p DeconstructedPat<'p, 'tcx>; 2]>) -> Self {
+ PatStack { pats: vec }
+ }
+
+ fn is_empty(&self) -> bool {
+ self.pats.is_empty()
+ }
+
+ fn len(&self) -> usize {
+ self.pats.len()
+ }
+
+ fn head(&self) -> &'p DeconstructedPat<'p, 'tcx> {
+ self.pats[0]
+ }
+
+ fn iter(&self) -> impl Iterator<Item = &DeconstructedPat<'p, 'tcx>> {
+ self.pats.iter().copied()
+ }
+
+ // Recursively expand the first pattern into its subpatterns. Only useful if the pattern is an
+ // or-pattern. Panics if `self` is empty.
+ fn expand_or_pat<'a>(&'a self) -> impl Iterator<Item = PatStack<'p, 'tcx>> + Captures<'a> {
+ self.head().iter_fields().map(move |pat| {
+ let mut new_patstack = PatStack::from_pattern(pat);
+ new_patstack.pats.extend_from_slice(&self.pats[1..]);
+ new_patstack
+ })
+ }
+
+ /// This computes `S(self.head().ctor(), self)`. See top of the file for explanations.
+ ///
+ /// Structure patterns with a partial wild pattern (Foo { a: 42, .. }) have their missing
+ /// fields filled with wild patterns.
+ ///
+ /// This is roughly the inverse of `Constructor::apply`.
+ fn pop_head_constructor(
+ &self,
+ pcx: &PatCtxt<'_, 'p, 'tcx>,
+ ctor: &Constructor<'tcx>,
+ ) -> PatStack<'p, 'tcx> {
+ // We pop the head pattern and push the new fields extracted from the arguments of
+ // `self.head()`.
+ let mut new_fields: SmallVec<[_; 2]> = self.head().specialize(pcx, ctor);
+ new_fields.extend_from_slice(&self.pats[1..]);
+ PatStack::from_vec(new_fields)
+ }
+}
+
+/// Pretty-printing for matrix row.
+impl<'p, 'tcx> fmt::Debug for PatStack<'p, 'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "+")?;
+ for pat in self.iter() {
+ write!(f, " {:?} +", pat)?;
+ }
+ Ok(())
+ }
+}
+
+/// A 2D matrix.
+#[derive(Clone)]
+pub(super) struct Matrix<'p, 'tcx> {
+ patterns: Vec<PatStack<'p, 'tcx>>,
+}
+
+impl<'p, 'tcx> Matrix<'p, 'tcx> {
+ fn empty() -> Self {
+ Matrix { patterns: vec![] }
+ }
+
+ /// Number of columns of this matrix. `None` is the matrix is empty.
+ pub(super) fn column_count(&self) -> Option<usize> {
+ self.patterns.get(0).map(|r| r.len())
+ }
+
+ /// Pushes a new row to the matrix. If the row starts with an or-pattern, this recursively
+ /// expands it.
+ fn push(&mut self, row: PatStack<'p, 'tcx>) {
+ if !row.is_empty() && row.head().is_or_pat() {
+ self.patterns.extend(row.expand_or_pat());
+ } else {
+ self.patterns.push(row);
+ }
+ }
+
+ /// Iterate over the first component of each row
+ fn heads<'a>(
+ &'a self,
+ ) -> impl Iterator<Item = &'p DeconstructedPat<'p, 'tcx>> + Clone + Captures<'a> {
+ self.patterns.iter().map(|r| r.head())
+ }
+
+ /// This computes `S(constructor, self)`. See top of the file for explanations.
+ fn specialize_constructor(
+ &self,
+ pcx: &PatCtxt<'_, 'p, 'tcx>,
+ ctor: &Constructor<'tcx>,
+ ) -> Matrix<'p, 'tcx> {
+ let mut matrix = Matrix::empty();
+ for row in &self.patterns {
+ if ctor.is_covered_by(pcx, row.head().ctor()) {
+ let new_row = row.pop_head_constructor(pcx, ctor);
+ matrix.push(new_row);
+ }
+ }
+ matrix
+ }
+}
+
+/// Pretty-printer for matrices of patterns, example:
+///
+/// ```text
+/// + _ + [] +
+/// + true + [First] +
+/// + true + [Second(true)] +
+/// + false + [_] +
+/// + _ + [_, _, tail @ ..] +
+/// ```
+impl<'p, 'tcx> fmt::Debug for Matrix<'p, 'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "\n")?;
+
+ let Matrix { patterns: m, .. } = self;
+ let pretty_printed_matrix: Vec<Vec<String>> =
+ m.iter().map(|row| row.iter().map(|pat| format!("{:?}", pat)).collect()).collect();
+
+ let column_count = m.iter().map(|row| row.len()).next().unwrap_or(0);
+ assert!(m.iter().all(|row| row.len() == column_count));
+ let column_widths: Vec<usize> = (0..column_count)
+ .map(|col| pretty_printed_matrix.iter().map(|row| row[col].len()).max().unwrap_or(0))
+ .collect();
+
+ for row in pretty_printed_matrix {
+ write!(f, "+")?;
+ for (column, pat_str) in row.into_iter().enumerate() {
+ write!(f, " ")?;
+ write!(f, "{:1$}", pat_str, column_widths[column])?;
+ write!(f, " +")?;
+ }
+ write!(f, "\n")?;
+ }
+ Ok(())
+ }
+}
+
+/// This carries the results of computing usefulness, as described at the top of the file. When
+/// checking usefulness of a match branch, we use the `NoWitnesses` variant, which also keeps track
+/// of potential unreachable sub-patterns (in the presence of or-patterns). When checking
+/// exhaustiveness of a whole match, we use the `WithWitnesses` variant, which carries a list of
+/// witnesses of non-exhaustiveness when there are any.
+/// Which variant to use is dictated by `ArmType`.
+#[derive(Debug)]
+enum Usefulness<'p, 'tcx> {
+ /// If we don't care about witnesses, simply remember if the pattern was useful.
+ NoWitnesses { useful: bool },
+ /// Carries a list of witnesses of non-exhaustiveness. If empty, indicates that the whole
+ /// pattern is unreachable.
+ WithWitnesses(Vec<Witness<'p, 'tcx>>),
+}
+
+impl<'p, 'tcx> Usefulness<'p, 'tcx> {
+ fn new_useful(preference: ArmType) -> Self {
+ match preference {
+ // A single (empty) witness of reachability.
+ FakeExtraWildcard => WithWitnesses(vec![Witness(vec![])]),
+ RealArm => NoWitnesses { useful: true },
+ }
+ }
+
+ fn new_not_useful(preference: ArmType) -> Self {
+ match preference {
+ FakeExtraWildcard => WithWitnesses(vec![]),
+ RealArm => NoWitnesses { useful: false },
+ }
+ }
+
+ fn is_useful(&self) -> bool {
+ match self {
+ Usefulness::NoWitnesses { useful } => *useful,
+ Usefulness::WithWitnesses(witnesses) => !witnesses.is_empty(),
+ }
+ }
+
+ /// Combine usefulnesses from two branches. This is an associative operation.
+ fn extend(&mut self, other: Self) {
+ match (&mut *self, other) {
+ (WithWitnesses(_), WithWitnesses(o)) if o.is_empty() => {}
+ (WithWitnesses(s), WithWitnesses(o)) if s.is_empty() => *self = WithWitnesses(o),
+ (WithWitnesses(s), WithWitnesses(o)) => s.extend(o),
+ (NoWitnesses { useful: s_useful }, NoWitnesses { useful: o_useful }) => {
+ *s_useful = *s_useful || o_useful
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ /// After calculating usefulness after a specialization, call this to reconstruct a usefulness
+ /// that makes sense for the matrix pre-specialization. This new usefulness can then be merged
+ /// with the results of specializing with the other constructors.
+ fn apply_constructor(
+ self,
+ pcx: &PatCtxt<'_, 'p, 'tcx>,
+ matrix: &Matrix<'p, 'tcx>, // used to compute missing ctors
+ ctor: &Constructor<'tcx>,
+ ) -> Self {
+ match self {
+ NoWitnesses { .. } => self,
+ WithWitnesses(ref witnesses) if witnesses.is_empty() => self,
+ WithWitnesses(witnesses) => {
+ let new_witnesses = if let Constructor::Missing { .. } = ctor {
+ // We got the special `Missing` constructor, so each of the missing constructors
+ // gives a new pattern that is not caught by the match. We list those patterns.
+ let new_patterns = if pcx.is_non_exhaustive {
+ // Here we don't want the user to try to list all variants, we want them to add
+ // a wildcard, so we only suggest that.
+ vec![DeconstructedPat::wildcard(pcx.ty)]
+ } else {
+ let mut split_wildcard = SplitWildcard::new(pcx);
+ split_wildcard.split(pcx, matrix.heads().map(DeconstructedPat::ctor));
+
+ // This lets us know if we skipped any variants because they are marked
+ // `doc(hidden)` or they are unstable feature gate (only stdlib types).
+ let mut hide_variant_show_wild = false;
+ // Construct for each missing constructor a "wild" version of this
+ // constructor, that matches everything that can be built with
+ // it. For example, if `ctor` is a `Constructor::Variant` for
+ // `Option::Some`, we get the pattern `Some(_)`.
+ let mut new: Vec<DeconstructedPat<'_, '_>> = split_wildcard
+ .iter_missing(pcx)
+ .filter_map(|missing_ctor| {
+ // Check if this variant is marked `doc(hidden)`
+ if missing_ctor.is_doc_hidden_variant(pcx)
+ || missing_ctor.is_unstable_variant(pcx)
+ {
+ hide_variant_show_wild = true;
+ return None;
+ }
+ Some(DeconstructedPat::wild_from_ctor(pcx, missing_ctor.clone()))
+ })
+ .collect();
+
+ if hide_variant_show_wild {
+ new.push(DeconstructedPat::wildcard(pcx.ty));
+ }
+
+ new
+ };
+
+ witnesses
+ .into_iter()
+ .flat_map(|witness| {
+ new_patterns.iter().map(move |pat| {
+ Witness(
+ witness
+ .0
+ .iter()
+ .chain(once(pat))
+ .map(DeconstructedPat::clone_and_forget_reachability)
+ .collect(),
+ )
+ })
+ })
+ .collect()
+ } else {
+ witnesses
+ .into_iter()
+ .map(|witness| witness.apply_constructor(pcx, &ctor))
+ .collect()
+ };
+ WithWitnesses(new_witnesses)
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+enum ArmType {
+ FakeExtraWildcard,
+ RealArm,
+}
+
+/// A witness of non-exhaustiveness for error reporting, represented
+/// as a list of patterns (in reverse order of construction) with
+/// wildcards inside to represent elements that can take any inhabitant
+/// of the type as a value.
+///
+/// A witness against a list of patterns should have the same types
+/// and length as the pattern matched against. Because Rust `match`
+/// is always against a single pattern, at the end the witness will
+/// have length 1, but in the middle of the algorithm, it can contain
+/// multiple patterns.
+///
+/// For example, if we are constructing a witness for the match against
+///
+/// ```compile_fail,E0004
+/// # #![feature(type_ascription)]
+/// struct Pair(Option<(u32, u32)>, bool);
+/// # fn foo(p: Pair) {
+/// match (p: Pair) {
+/// Pair(None, _) => {}
+/// Pair(_, false) => {}
+/// }
+/// # }
+/// ```
+///
+/// We'll perform the following steps:
+/// 1. Start with an empty witness
+/// `Witness(vec![])`
+/// 2. Push a witness `true` against the `false`
+/// `Witness(vec![true])`
+/// 3. Push a witness `Some(_)` against the `None`
+/// `Witness(vec![true, Some(_)])`
+/// 4. Apply the `Pair` constructor to the witnesses
+/// `Witness(vec![Pair(Some(_), true)])`
+///
+/// The final `Pair(Some(_), true)` is then the resulting witness.
+#[derive(Debug)]
+pub(crate) struct Witness<'p, 'tcx>(Vec<DeconstructedPat<'p, 'tcx>>);
+
+impl<'p, 'tcx> Witness<'p, 'tcx> {
+ /// Asserts that the witness contains a single pattern, and returns it.
+ fn single_pattern(self) -> DeconstructedPat<'p, 'tcx> {
+ assert_eq!(self.0.len(), 1);
+ self.0.into_iter().next().unwrap()
+ }
+
+ /// Constructs a partial witness for a pattern given a list of
+ /// patterns expanded by the specialization step.
+ ///
+ /// When a pattern P is discovered to be useful, this function is used bottom-up
+ /// to reconstruct a complete witness, e.g., a pattern P' that covers a subset
+ /// of values, V, where each value in that set is not covered by any previously
+ /// used patterns and is covered by the pattern P'. Examples:
+ ///
+ /// left_ty: tuple of 3 elements
+ /// pats: [10, 20, _] => (10, 20, _)
+ ///
+ /// left_ty: struct X { a: (bool, &'static str), b: usize}
+ /// pats: [(false, "foo"), 42] => X { a: (false, "foo"), b: 42 }
+ fn apply_constructor(mut self, pcx: &PatCtxt<'_, 'p, 'tcx>, ctor: &Constructor<'tcx>) -> Self {
+ let pat = {
+ let len = self.0.len();
+ let arity = ctor.arity(pcx);
+ let pats = self.0.drain((len - arity)..).rev();
+ let fields = Fields::from_iter(pcx.cx, pats);
+ DeconstructedPat::new(ctor.clone(), fields, pcx.ty, DUMMY_SP)
+ };
+
+ self.0.push(pat);
+
+ self
+ }
+}
+
+/// Report that a match of a `non_exhaustive` enum marked with `non_exhaustive_omitted_patterns`
+/// is not exhaustive enough.
+///
+/// NB: The partner lint for structs lives in `compiler/rustc_typeck/src/check/pat.rs`.
+fn lint_non_exhaustive_omitted_patterns<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ scrut_ty: Ty<'tcx>,
+ sp: Span,
+ hir_id: HirId,
+ witnesses: Vec<DeconstructedPat<'p, 'tcx>>,
+) {
+ let joined_patterns = joined_uncovered_patterns(cx, &witnesses);
+ cx.tcx.struct_span_lint_hir(NON_EXHAUSTIVE_OMITTED_PATTERNS, hir_id, sp, |build| {
+ let mut lint = build.build("some variants are not matched explicitly");
+ lint.span_label(sp, pattern_not_covered_label(&witnesses, &joined_patterns));
+ lint.help(
+ "ensure that all variants are matched explicitly by adding the suggested match arms",
+ );
+ lint.note(&format!(
+ "the matched value is of type `{}` and the `non_exhaustive_omitted_patterns` attribute was found",
+ scrut_ty,
+ ));
+ lint.emit();
+ });
+}
+
+/// Algorithm from <http://moscova.inria.fr/~maranget/papers/warn/index.html>.
+/// The algorithm from the paper has been modified to correctly handle empty
+/// types. The changes are:
+/// (0) We don't exit early if the pattern matrix has zero rows. We just
+/// continue to recurse over columns.
+/// (1) all_constructors will only return constructors that are statically
+/// possible. E.g., it will only return `Ok` for `Result<T, !>`.
+///
+/// This finds whether a (row) vector `v` of patterns is 'useful' in relation
+/// to a set of such vectors `m` - this is defined as there being a set of
+/// inputs that will match `v` but not any of the sets in `m`.
+///
+/// All the patterns at each column of the `matrix ++ v` matrix must have the same type.
+///
+/// This is used both for reachability checking (if a pattern isn't useful in
+/// relation to preceding patterns, it is not reachable) and exhaustiveness
+/// checking (if a wildcard pattern is useful in relation to a matrix, the
+/// matrix isn't exhaustive).
+///
+/// `is_under_guard` is used to inform if the pattern has a guard. If it
+/// has one it must not be inserted into the matrix. This shouldn't be
+/// relied on for soundness.
+#[instrument(level = "debug", skip(cx, matrix, hir_id))]
+fn is_useful<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ matrix: &Matrix<'p, 'tcx>,
+ v: &PatStack<'p, 'tcx>,
+ witness_preference: ArmType,
+ hir_id: HirId,
+ is_under_guard: bool,
+ is_top_level: bool,
+) -> Usefulness<'p, 'tcx> {
+ debug!(?matrix, ?v);
+ let Matrix { patterns: rows, .. } = matrix;
+
+ // The base case. We are pattern-matching on () and the return value is
+ // based on whether our matrix has a row or not.
+ // NOTE: This could potentially be optimized by checking rows.is_empty()
+ // first and then, if v is non-empty, the return value is based on whether
+ // the type of the tuple we're checking is inhabited or not.
+ if v.is_empty() {
+ let ret = if rows.is_empty() {
+ Usefulness::new_useful(witness_preference)
+ } else {
+ Usefulness::new_not_useful(witness_preference)
+ };
+ debug!(?ret);
+ return ret;
+ }
+
+ debug_assert!(rows.iter().all(|r| r.len() == v.len()));
+
+ // If the first pattern is an or-pattern, expand it.
+ let mut ret = Usefulness::new_not_useful(witness_preference);
+ if v.head().is_or_pat() {
+ debug!("expanding or-pattern");
+ // We try each or-pattern branch in turn.
+ let mut matrix = matrix.clone();
+ for v in v.expand_or_pat() {
+ debug!(?v);
+ let usefulness = ensure_sufficient_stack(|| {
+ is_useful(cx, &matrix, &v, witness_preference, hir_id, is_under_guard, false)
+ });
+ debug!(?usefulness);
+ ret.extend(usefulness);
+ // If pattern has a guard don't add it to the matrix.
+ if !is_under_guard {
+ // We push the already-seen patterns into the matrix in order to detect redundant
+ // branches like `Some(_) | Some(0)`.
+ matrix.push(v);
+ }
+ }
+ } else {
+ let ty = v.head().ty();
+ let is_non_exhaustive = cx.is_foreign_non_exhaustive_enum(ty);
+ debug!("v.head: {:?}, v.span: {:?}", v.head(), v.head().span());
+ let pcx = &PatCtxt { cx, ty, span: v.head().span(), is_top_level, is_non_exhaustive };
+
+ let v_ctor = v.head().ctor();
+ debug!(?v_ctor);
+ if let Constructor::IntRange(ctor_range) = &v_ctor {
+ // Lint on likely incorrect range patterns (#63987)
+ ctor_range.lint_overlapping_range_endpoints(
+ pcx,
+ matrix.heads(),
+ matrix.column_count().unwrap_or(0),
+ hir_id,
+ )
+ }
+ // We split the head constructor of `v`.
+ let split_ctors = v_ctor.split(pcx, matrix.heads().map(DeconstructedPat::ctor));
+ let is_non_exhaustive_and_wild = is_non_exhaustive && v_ctor.is_wildcard();
+ // For each constructor, we compute whether there's a value that starts with it that would
+ // witness the usefulness of `v`.
+ let start_matrix = &matrix;
+ for ctor in split_ctors {
+ debug!("specialize({:?})", ctor);
+ // We cache the result of `Fields::wildcards` because it is used a lot.
+ let spec_matrix = start_matrix.specialize_constructor(pcx, &ctor);
+ let v = v.pop_head_constructor(pcx, &ctor);
+ let usefulness = ensure_sufficient_stack(|| {
+ is_useful(cx, &spec_matrix, &v, witness_preference, hir_id, is_under_guard, false)
+ });
+ let usefulness = usefulness.apply_constructor(pcx, start_matrix, &ctor);
+
+ // When all the conditions are met we have a match with a `non_exhaustive` enum
+ // that has the potential to trigger the `non_exhaustive_omitted_patterns` lint.
+ // To understand the workings checkout `Constructor::split` and `SplitWildcard::new/into_ctors`
+ if is_non_exhaustive_and_wild
+ // We check that the match has a wildcard pattern and that that wildcard is useful,
+ // meaning there are variants that are covered by the wildcard. Without the check
+ // for `witness_preference` the lint would trigger on `if let NonExhaustiveEnum::A = foo {}`
+ && usefulness.is_useful() && matches!(witness_preference, RealArm)
+ && matches!(
+ &ctor,
+ Constructor::Missing { nonexhaustive_enum_missing_real_variants: true }
+ )
+ {
+ let patterns = {
+ let mut split_wildcard = SplitWildcard::new(pcx);
+ split_wildcard.split(pcx, matrix.heads().map(DeconstructedPat::ctor));
+ // Construct for each missing constructor a "wild" version of this
+ // constructor, that matches everything that can be built with
+ // it. For example, if `ctor` is a `Constructor::Variant` for
+ // `Option::Some`, we get the pattern `Some(_)`.
+ split_wildcard
+ .iter_missing(pcx)
+ // Filter out the `NonExhaustive` because we want to list only real
+ // variants. Also remove any unstable feature gated variants.
+ // Because of how we computed `nonexhaustive_enum_missing_real_variants`,
+ // this will not return an empty `Vec`.
+ .filter(|c| !(c.is_non_exhaustive() || c.is_unstable_variant(pcx)))
+ .cloned()
+ .map(|missing_ctor| DeconstructedPat::wild_from_ctor(pcx, missing_ctor))
+ .collect::<Vec<_>>()
+ };
+
+ lint_non_exhaustive_omitted_patterns(pcx.cx, pcx.ty, pcx.span, hir_id, patterns);
+ }
+
+ ret.extend(usefulness);
+ }
+ }
+
+ if ret.is_useful() {
+ v.head().set_reachable();
+ }
+
+ debug!(?ret);
+ ret
+}
+
+/// The arm of a match expression.
+#[derive(Clone, Copy, Debug)]
+pub(crate) struct MatchArm<'p, 'tcx> {
+ /// The pattern must have been lowered through `check_match::MatchVisitor::lower_pattern`.
+ pub(crate) pat: &'p DeconstructedPat<'p, 'tcx>,
+ pub(crate) hir_id: HirId,
+ pub(crate) has_guard: bool,
+}
+
+/// Indicates whether or not a given arm is reachable.
+#[derive(Clone, Debug)]
+pub(crate) enum Reachability {
+ /// The arm is reachable. This additionally carries a set of or-pattern branches that have been
+ /// found to be unreachable despite the overall arm being reachable. Used only in the presence
+ /// of or-patterns, otherwise it stays empty.
+ Reachable(Vec<Span>),
+ /// The arm is unreachable.
+ Unreachable,
+}
+
+/// The output of checking a match for exhaustiveness and arm reachability.
+pub(crate) struct UsefulnessReport<'p, 'tcx> {
+ /// For each arm of the input, whether that arm is reachable after the arms above it.
+ pub(crate) arm_usefulness: Vec<(MatchArm<'p, 'tcx>, Reachability)>,
+ /// If the match is exhaustive, this is empty. If not, this contains witnesses for the lack of
+ /// exhaustiveness.
+ pub(crate) non_exhaustiveness_witnesses: Vec<DeconstructedPat<'p, 'tcx>>,
+}
+
+/// The entrypoint for the usefulness algorithm. Computes whether a match is exhaustive and which
+/// of its arms are reachable.
+///
+/// Note: the input patterns must have been lowered through
+/// `check_match::MatchVisitor::lower_pattern`.
+#[instrument(skip(cx, arms), level = "debug")]
+pub(crate) fn compute_match_usefulness<'p, 'tcx>(
+ cx: &MatchCheckCtxt<'p, 'tcx>,
+ arms: &[MatchArm<'p, 'tcx>],
+ scrut_hir_id: HirId,
+ scrut_ty: Ty<'tcx>,
+) -> UsefulnessReport<'p, 'tcx> {
+ let mut matrix = Matrix::empty();
+ let arm_usefulness: Vec<_> = arms
+ .iter()
+ .copied()
+ .map(|arm| {
+ debug!(?arm);
+ let v = PatStack::from_pattern(arm.pat);
+ is_useful(cx, &matrix, &v, RealArm, arm.hir_id, arm.has_guard, true);
+ if !arm.has_guard {
+ matrix.push(v);
+ }
+ let reachability = if arm.pat.is_reachable() {
+ Reachability::Reachable(arm.pat.unreachable_spans())
+ } else {
+ Reachability::Unreachable
+ };
+ (arm, reachability)
+ })
+ .collect();
+
+ let wild_pattern = cx.pattern_arena.alloc(DeconstructedPat::wildcard(scrut_ty));
+ let v = PatStack::from_pattern(wild_pattern);
+ let usefulness = is_useful(cx, &matrix, &v, FakeExtraWildcard, scrut_hir_id, false, true);
+ let non_exhaustiveness_witnesses = match usefulness {
+ WithWitnesses(pats) => pats.into_iter().map(|w| w.single_pattern()).collect(),
+ NoWitnesses { .. } => bug!(),
+ };
+ UsefulnessReport { arm_usefulness, non_exhaustiveness_witnesses }
+}
diff --git a/compiler/rustc_mir_build/src/thir/util.rs b/compiler/rustc_mir_build/src/thir/util.rs
new file mode 100644
index 000000000..c58ed1ac0
--- /dev/null
+++ b/compiler/rustc_mir_build/src/thir/util.rs
@@ -0,0 +1,31 @@
+use rustc_hir as hir;
+use rustc_middle::ty::{self, CanonicalUserType, TyCtxt, UserType};
+
+pub(crate) trait UserAnnotatedTyHelpers<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx>;
+
+ fn typeck_results(&self) -> &ty::TypeckResults<'tcx>;
+
+ /// Looks up the type associated with this hir-id and applies the
+ /// user-given substitutions; the hir-id must map to a suitable
+ /// type.
+ fn user_substs_applied_to_ty_of_hir_id(
+ &self,
+ hir_id: hir::HirId,
+ ) -> Option<CanonicalUserType<'tcx>> {
+ let user_provided_types = self.typeck_results().user_provided_types();
+ let mut user_ty = *user_provided_types.get(hir_id)?;
+ debug!("user_subts_applied_to_ty_of_hir_id: user_ty={:?}", user_ty);
+ let ty = self.typeck_results().node_type(hir_id);
+ match ty.kind() {
+ ty::Adt(adt_def, ..) => {
+ if let UserType::TypeOf(ref mut did, _) = &mut user_ty.value {
+ *did = adt_def.did();
+ }
+ Some(user_ty)
+ }
+ ty::FnDef(..) => Some(user_ty),
+ _ => bug!("ty: {:?} should not have user provided type {:?} recorded ", ty, user_ty),
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/Cargo.toml b/compiler/rustc_mir_dataflow/Cargo.toml
new file mode 100644
index 000000000..baf9735fb
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "rustc_mir_dataflow"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+polonius-engine = "0.13.0"
+regex = "1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_target = { path = "../rustc_target" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_mir_dataflow/src/drop_flag_effects.rs b/compiler/rustc_mir_dataflow/src/drop_flag_effects.rs
new file mode 100644
index 000000000..f102872cd
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/drop_flag_effects.rs
@@ -0,0 +1,268 @@
+use crate::elaborate_drops::DropFlagState;
+use rustc_middle::mir::{self, Body, Location};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use super::indexes::MovePathIndex;
+use super::move_paths::{InitKind, LookupResult, MoveData};
+use super::MoveDataParamEnv;
+
+pub fn move_path_children_matching<'tcx, F>(
+ move_data: &MoveData<'tcx>,
+ path: MovePathIndex,
+ mut cond: F,
+) -> Option<MovePathIndex>
+where
+ F: FnMut(mir::PlaceElem<'tcx>) -> bool,
+{
+ let mut next_child = move_data.move_paths[path].first_child;
+ while let Some(child_index) = next_child {
+ let move_path_children = &move_data.move_paths[child_index];
+ if let Some(&elem) = move_path_children.place.projection.last() {
+ if cond(elem) {
+ return Some(child_index);
+ }
+ }
+ next_child = move_path_children.next_sibling;
+ }
+
+ None
+}
+
+/// When enumerating the child fragments of a path, don't recurse into
+/// paths (1.) past arrays, slices, and pointers, nor (2.) into a type
+/// that implements `Drop`.
+///
+/// Places behind references or arrays are not tracked by elaboration
+/// and are always assumed to be initialized when accessible. As
+/// references and indexes can be reseated, trying to track them can
+/// only lead to trouble.
+///
+/// Places behind ADT's with a Drop impl are not tracked by
+/// elaboration since they can never have a drop-flag state that
+/// differs from that of the parent with the Drop impl.
+///
+/// In both cases, the contents can only be accessed if and only if
+/// their parents are initialized. This implies for example that there
+/// is no need to maintain separate drop flags to track such state.
+//
+// FIXME: we have to do something for moving slice patterns.
+fn place_contents_drop_state_cannot_differ<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ place: mir::Place<'tcx>,
+) -> bool {
+ let ty = place.ty(body, tcx).ty;
+ match ty.kind() {
+ ty::Array(..) => {
+ debug!(
+ "place_contents_drop_state_cannot_differ place: {:?} ty: {:?} => false",
+ place, ty
+ );
+ false
+ }
+ ty::Slice(..) | ty::Ref(..) | ty::RawPtr(..) => {
+ debug!(
+ "place_contents_drop_state_cannot_differ place: {:?} ty: {:?} refd => true",
+ place, ty
+ );
+ true
+ }
+ ty::Adt(def, _) if (def.has_dtor(tcx) && !def.is_box()) || def.is_union() => {
+ debug!(
+ "place_contents_drop_state_cannot_differ place: {:?} ty: {:?} Drop => true",
+ place, ty
+ );
+ true
+ }
+ _ => false,
+ }
+}
+
+pub fn on_lookup_result_bits<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ move_data: &MoveData<'tcx>,
+ lookup_result: LookupResult,
+ each_child: F,
+) where
+ F: FnMut(MovePathIndex),
+{
+ match lookup_result {
+ LookupResult::Parent(..) => {
+ // access to untracked value - do not touch children
+ }
+ LookupResult::Exact(e) => on_all_children_bits(tcx, body, move_data, e, each_child),
+ }
+}
+
+pub fn on_all_children_bits<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ move_data: &MoveData<'tcx>,
+ move_path_index: MovePathIndex,
+ mut each_child: F,
+) where
+ F: FnMut(MovePathIndex),
+{
+ fn is_terminal_path<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ move_data: &MoveData<'tcx>,
+ path: MovePathIndex,
+ ) -> bool {
+ place_contents_drop_state_cannot_differ(tcx, body, move_data.move_paths[path].place)
+ }
+
+ fn on_all_children_bits<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ move_data: &MoveData<'tcx>,
+ move_path_index: MovePathIndex,
+ each_child: &mut F,
+ ) where
+ F: FnMut(MovePathIndex),
+ {
+ each_child(move_path_index);
+
+ if is_terminal_path(tcx, body, move_data, move_path_index) {
+ return;
+ }
+
+ let mut next_child_index = move_data.move_paths[move_path_index].first_child;
+ while let Some(child_index) = next_child_index {
+ on_all_children_bits(tcx, body, move_data, child_index, each_child);
+ next_child_index = move_data.move_paths[child_index].next_sibling;
+ }
+ }
+ on_all_children_bits(tcx, body, move_data, move_path_index, &mut each_child);
+}
+
+pub fn on_all_drop_children_bits<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ ctxt: &MoveDataParamEnv<'tcx>,
+ path: MovePathIndex,
+ mut each_child: F,
+) where
+ F: FnMut(MovePathIndex),
+{
+ on_all_children_bits(tcx, body, &ctxt.move_data, path, |child| {
+ let place = &ctxt.move_data.move_paths[path].place;
+ let ty = place.ty(body, tcx).ty;
+ debug!("on_all_drop_children_bits({:?}, {:?} : {:?})", path, place, ty);
+
+ let erased_ty = tcx.erase_regions(ty);
+ if erased_ty.needs_drop(tcx, ctxt.param_env) {
+ each_child(child);
+ } else {
+ debug!("on_all_drop_children_bits - skipping")
+ }
+ })
+}
+
+pub fn drop_flag_effects_for_function_entry<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ ctxt: &MoveDataParamEnv<'tcx>,
+ mut callback: F,
+) where
+ F: FnMut(MovePathIndex, DropFlagState),
+{
+ let move_data = &ctxt.move_data;
+ for arg in body.args_iter() {
+ let place = mir::Place::from(arg);
+ let lookup_result = move_data.rev_lookup.find(place.as_ref());
+ on_lookup_result_bits(tcx, body, move_data, lookup_result, |mpi| {
+ callback(mpi, DropFlagState::Present)
+ });
+ }
+}
+
+pub fn drop_flag_effects_for_location<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ ctxt: &MoveDataParamEnv<'tcx>,
+ loc: Location,
+ mut callback: F,
+) where
+ F: FnMut(MovePathIndex, DropFlagState),
+{
+ let move_data = &ctxt.move_data;
+ debug!("drop_flag_effects_for_location({:?})", loc);
+
+ // first, move out of the RHS
+ for mi in &move_data.loc_map[loc] {
+ let path = mi.move_path_index(move_data);
+ debug!("moving out of path {:?}", move_data.move_paths[path]);
+
+ on_all_children_bits(tcx, body, move_data, path, |mpi| callback(mpi, DropFlagState::Absent))
+ }
+
+ debug!("drop_flag_effects: assignment for location({:?})", loc);
+
+ for_location_inits(tcx, body, move_data, loc, |mpi| callback(mpi, DropFlagState::Present));
+}
+
+pub fn for_location_inits<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ move_data: &MoveData<'tcx>,
+ loc: Location,
+ mut callback: F,
+) where
+ F: FnMut(MovePathIndex),
+{
+ for ii in &move_data.init_loc_map[loc] {
+ let init = move_data.inits[*ii];
+ match init.kind {
+ InitKind::Deep => {
+ let path = init.path;
+
+ on_all_children_bits(tcx, body, move_data, path, &mut callback)
+ }
+ InitKind::Shallow => {
+ let mpi = init.path;
+ callback(mpi);
+ }
+ InitKind::NonPanicPathOnly => (),
+ }
+ }
+}
+
+/// Calls `handle_inactive_variant` for each descendant move path of `enum_place` that contains a
+/// `Downcast` to a variant besides the `active_variant`.
+///
+/// NOTE: If there are no move paths corresponding to an inactive variant,
+/// `handle_inactive_variant` will not be called for that variant.
+pub(crate) fn on_all_inactive_variants<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &mir::Body<'tcx>,
+ move_data: &MoveData<'tcx>,
+ enum_place: mir::Place<'tcx>,
+ active_variant: VariantIdx,
+ mut handle_inactive_variant: impl FnMut(MovePathIndex),
+) {
+ let LookupResult::Exact(enum_mpi) = move_data.rev_lookup.find(enum_place.as_ref()) else {
+ return;
+ };
+
+ let enum_path = &move_data.move_paths[enum_mpi];
+ for (variant_mpi, variant_path) in enum_path.children(&move_data.move_paths) {
+ // Because of the way we build the `MoveData` tree, each child should have exactly one more
+ // projection than `enum_place`. This additional projection must be a downcast since the
+ // base is an enum.
+ let (downcast, base_proj) = variant_path.place.projection.split_last().unwrap();
+ assert_eq!(enum_place.projection.len(), base_proj.len());
+
+ let mir::ProjectionElem::Downcast(_, variant_idx) = *downcast else {
+ unreachable!();
+ };
+
+ if variant_idx != active_variant {
+ on_all_children_bits(tcx, body, move_data, variant_mpi, |mpi| {
+ handle_inactive_variant(mpi)
+ });
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
new file mode 100644
index 000000000..c0b0cc3c5
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs
@@ -0,0 +1,1056 @@
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::patch::MirPatch;
+use rustc_middle::mir::*;
+use rustc_middle::traits::Reveal;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+use std::{fmt, iter};
+
+/// The value of an inserted drop flag.
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum DropFlagState {
+ /// The tracked value is initialized and needs to be dropped when leaving its scope.
+ Present,
+
+ /// The tracked value is uninitialized or was moved out of and does not need to be dropped when
+ /// leaving its scope.
+ Absent,
+}
+
+impl DropFlagState {
+ pub fn value(self) -> bool {
+ match self {
+ DropFlagState::Present => true,
+ DropFlagState::Absent => false,
+ }
+ }
+}
+
+/// Describes how/if a value should be dropped.
+#[derive(Debug)]
+pub enum DropStyle {
+ /// The value is already dead at the drop location, no drop will be executed.
+ Dead,
+
+ /// The value is known to always be initialized at the drop location, drop will always be
+ /// executed.
+ Static,
+
+ /// Whether the value needs to be dropped depends on its drop flag.
+ Conditional,
+
+ /// An "open" drop is one where only the fields of a value are dropped.
+ ///
+ /// For example, this happens when moving out of a struct field: The rest of the struct will be
+ /// dropped in such an "open" drop. It is also used to generate drop glue for the individual
+ /// components of a value, for example for dropping array elements.
+ Open,
+}
+
+/// Which drop flags to affect/check with an operation.
+#[derive(Debug)]
+pub enum DropFlagMode {
+ /// Only affect the top-level drop flag, not that of any contained fields.
+ Shallow,
+ /// Affect all nested drop flags in addition to the top-level one.
+ Deep,
+}
+
+/// Describes if unwinding is necessary and where to unwind to if a panic occurs.
+#[derive(Copy, Clone, Debug)]
+pub enum Unwind {
+ /// Unwind to this block.
+ To(BasicBlock),
+ /// Already in an unwind path, any panic will cause an abort.
+ InCleanup,
+}
+
+impl Unwind {
+ fn is_cleanup(self) -> bool {
+ match self {
+ Unwind::To(..) => false,
+ Unwind::InCleanup => true,
+ }
+ }
+
+ fn into_option(self) -> Option<BasicBlock> {
+ match self {
+ Unwind::To(bb) => Some(bb),
+ Unwind::InCleanup => None,
+ }
+ }
+
+ fn map<F>(self, f: F) -> Self
+ where
+ F: FnOnce(BasicBlock) -> BasicBlock,
+ {
+ match self {
+ Unwind::To(bb) => Unwind::To(f(bb)),
+ Unwind::InCleanup => Unwind::InCleanup,
+ }
+ }
+}
+
+pub trait DropElaborator<'a, 'tcx>: fmt::Debug {
+ /// The type representing paths that can be moved out of.
+ ///
+ /// Users can move out of individual fields of a struct, such as `a.b.c`. This type is used to
+ /// represent such move paths. Sometimes tracking individual move paths is not necessary, in
+ /// which case this may be set to (for example) `()`.
+ type Path: Copy + fmt::Debug;
+
+ // Accessors
+
+ fn patch(&mut self) -> &mut MirPatch<'tcx>;
+ fn body(&self) -> &'a Body<'tcx>;
+ fn tcx(&self) -> TyCtxt<'tcx>;
+ fn param_env(&self) -> ty::ParamEnv<'tcx>;
+
+ // Drop logic
+
+ /// Returns how `path` should be dropped, given `mode`.
+ fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle;
+
+ /// Returns the drop flag of `path` as a MIR `Operand` (or `None` if `path` has no drop flag).
+ fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>>;
+
+ /// Modifies the MIR patch so that the drop flag of `path` (if any) is cleared at `location`.
+ ///
+ /// If `mode` is deep, drop flags of all child paths should also be cleared by inserting
+ /// additional statements.
+ fn clear_drop_flag(&mut self, location: Location, path: Self::Path, mode: DropFlagMode);
+
+ // Subpaths
+
+ /// Returns the subpath of a field of `path` (or `None` if there is no dedicated subpath).
+ ///
+ /// If this returns `None`, `field` will not get a dedicated drop flag.
+ fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path>;
+
+ /// Returns the subpath of a dereference of `path` (or `None` if there is no dedicated subpath).
+ ///
+ /// If this returns `None`, `*path` will not get a dedicated drop flag.
+ ///
+ /// This is only relevant for `Box<T>`, where the contained `T` can be moved out of the box.
+ fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path>;
+
+ /// Returns the subpath of downcasting `path` to one of its variants.
+ ///
+ /// If this returns `None`, the downcast of `path` will not get a dedicated drop flag.
+ fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path>;
+
+ /// Returns the subpath of indexing a fixed-size array `path`.
+ ///
+ /// If this returns `None`, elements of `path` will not get a dedicated drop flag.
+ ///
+ /// This is only relevant for array patterns, which can move out of individual array elements.
+ fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path>;
+}
+
+#[derive(Debug)]
+struct DropCtxt<'l, 'b, 'tcx, D>
+where
+ D: DropElaborator<'b, 'tcx>,
+{
+ elaborator: &'l mut D,
+
+ source_info: SourceInfo,
+
+ place: Place<'tcx>,
+ path: D::Path,
+ succ: BasicBlock,
+ unwind: Unwind,
+}
+
+/// "Elaborates" a drop of `place`/`path` and patches `bb`'s terminator to execute it.
+///
+/// The passed `elaborator` is used to determine what should happen at the drop terminator. It
+/// decides whether the drop can be statically determined or whether it needs a dynamic drop flag,
+/// and whether the drop is "open", ie. should be expanded to drop all subfields of the dropped
+/// value.
+///
+/// When this returns, the MIR patch in the `elaborator` contains the necessary changes.
+pub fn elaborate_drop<'b, 'tcx, D>(
+ elaborator: &mut D,
+ source_info: SourceInfo,
+ place: Place<'tcx>,
+ path: D::Path,
+ succ: BasicBlock,
+ unwind: Unwind,
+ bb: BasicBlock,
+) where
+ D: DropElaborator<'b, 'tcx>,
+ 'tcx: 'b,
+{
+ DropCtxt { elaborator, source_info, place, path, succ, unwind }.elaborate_drop(bb)
+}
+
+impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
+where
+ D: DropElaborator<'b, 'tcx>,
+ 'tcx: 'b,
+{
+ fn place_ty(&self, place: Place<'tcx>) -> Ty<'tcx> {
+ place.ty(self.elaborator.body(), self.tcx()).ty
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.elaborator.tcx()
+ }
+
+ /// This elaborates a single drop instruction, located at `bb`, and
+ /// patches over it.
+ ///
+ /// The elaborated drop checks the drop flags to only drop what
+ /// is initialized.
+ ///
+ /// In addition, the relevant drop flags also need to be cleared
+ /// to avoid double-drops. However, in the middle of a complex
+ /// drop, one must avoid clearing some of the flags before they
+ /// are read, as that would cause a memory leak.
+ ///
+ /// In particular, when dropping an ADT, multiple fields may be
+ /// joined together under the `rest` subpath. They are all controlled
+ /// by the primary drop flag, but only the last rest-field dropped
+ /// should clear it (and it must also not clear anything else).
+ //
+ // FIXME: I think we should just control the flags externally,
+ // and then we do not need this machinery.
+ pub fn elaborate_drop(&mut self, bb: BasicBlock) {
+ debug!("elaborate_drop({:?}, {:?})", bb, self);
+ let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep);
+ debug!("elaborate_drop({:?}, {:?}): live - {:?}", bb, self, style);
+ match style {
+ DropStyle::Dead => {
+ self.elaborator
+ .patch()
+ .patch_terminator(bb, TerminatorKind::Goto { target: self.succ });
+ }
+ DropStyle::Static => {
+ self.elaborator.patch().patch_terminator(
+ bb,
+ TerminatorKind::Drop {
+ place: self.place,
+ target: self.succ,
+ unwind: self.unwind.into_option(),
+ },
+ );
+ }
+ DropStyle::Conditional => {
+ let drop_bb = self.complete_drop(self.succ, self.unwind);
+ self.elaborator
+ .patch()
+ .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
+ }
+ DropStyle::Open => {
+ let drop_bb = self.open_drop();
+ self.elaborator
+ .patch()
+ .patch_terminator(bb, TerminatorKind::Goto { target: drop_bb });
+ }
+ }
+ }
+
+ /// Returns the place and move path for each field of `variant`,
+ /// (the move path is `None` if the field is a rest field).
+ fn move_paths_for_fields(
+ &self,
+ base_place: Place<'tcx>,
+ variant_path: D::Path,
+ variant: &'tcx ty::VariantDef,
+ substs: SubstsRef<'tcx>,
+ ) -> Vec<(Place<'tcx>, Option<D::Path>)> {
+ variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field = Field::new(i);
+ let subpath = self.elaborator.field_subpath(variant_path, field);
+ let tcx = self.tcx();
+
+ assert_eq!(self.elaborator.param_env().reveal(), Reveal::All);
+ let field_ty =
+ tcx.normalize_erasing_regions(self.elaborator.param_env(), f.ty(tcx, substs));
+ (tcx.mk_place_field(base_place, field, field_ty), subpath)
+ })
+ .collect()
+ }
+
+ fn drop_subpath(
+ &mut self,
+ place: Place<'tcx>,
+ path: Option<D::Path>,
+ succ: BasicBlock,
+ unwind: Unwind,
+ ) -> BasicBlock {
+ if let Some(path) = path {
+ debug!("drop_subpath: for std field {:?}", place);
+
+ DropCtxt {
+ elaborator: self.elaborator,
+ source_info: self.source_info,
+ path,
+ place,
+ succ,
+ unwind,
+ }
+ .elaborated_drop_block()
+ } else {
+ debug!("drop_subpath: for rest field {:?}", place);
+
+ DropCtxt {
+ elaborator: self.elaborator,
+ source_info: self.source_info,
+ place,
+ succ,
+ unwind,
+ // Using `self.path` here to condition the drop on
+ // our own drop flag.
+ path: self.path,
+ }
+ .complete_drop(succ, unwind)
+ }
+ }
+
+ /// Creates one-half of the drop ladder for a list of fields, and return
+ /// the list of steps in it in reverse order, with the first step
+ /// dropping 0 fields and so on.
+ ///
+ /// `unwind_ladder` is such a list of steps in reverse order,
+ /// which is called if the matching step of the drop glue panics.
+ fn drop_halfladder(
+ &mut self,
+ unwind_ladder: &[Unwind],
+ mut succ: BasicBlock,
+ fields: &[(Place<'tcx>, Option<D::Path>)],
+ ) -> Vec<BasicBlock> {
+ iter::once(succ)
+ .chain(fields.iter().rev().zip(unwind_ladder).map(|(&(place, path), &unwind_succ)| {
+ succ = self.drop_subpath(place, path, succ, unwind_succ);
+ succ
+ }))
+ .collect()
+ }
+
+ fn drop_ladder_bottom(&mut self) -> (BasicBlock, Unwind) {
+ // Clear the "master" drop flag at the end. This is needed
+ // because the "master" drop protects the ADT's discriminant,
+ // which is invalidated after the ADT is dropped.
+ (self.drop_flag_reset_block(DropFlagMode::Shallow, self.succ, self.unwind), self.unwind)
+ }
+
+ /// Creates a full drop ladder, consisting of 2 connected half-drop-ladders
+ ///
+ /// For example, with 3 fields, the drop ladder is
+ ///
+ /// .d0:
+ /// ELAB(drop location.0 [target=.d1, unwind=.c1])
+ /// .d1:
+ /// ELAB(drop location.1 [target=.d2, unwind=.c2])
+ /// .d2:
+ /// ELAB(drop location.2 [target=`self.succ`, unwind=`self.unwind`])
+ /// .c1:
+ /// ELAB(drop location.1 [target=.c2])
+ /// .c2:
+ /// ELAB(drop location.2 [target=`self.unwind`])
+ ///
+ /// NOTE: this does not clear the master drop flag, so you need
+ /// to point succ/unwind on a `drop_ladder_bottom`.
+ fn drop_ladder(
+ &mut self,
+ fields: Vec<(Place<'tcx>, Option<D::Path>)>,
+ succ: BasicBlock,
+ unwind: Unwind,
+ ) -> (BasicBlock, Unwind) {
+ debug!("drop_ladder({:?}, {:?})", self, fields);
+
+ let mut fields = fields;
+ fields.retain(|&(place, _)| {
+ self.place_ty(place).needs_drop(self.tcx(), self.elaborator.param_env())
+ });
+
+ debug!("drop_ladder - fields needing drop: {:?}", fields);
+
+ let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
+ let unwind_ladder: Vec<_> = if let Unwind::To(target) = unwind {
+ let halfladder = self.drop_halfladder(&unwind_ladder, target, &fields);
+ halfladder.into_iter().map(Unwind::To).collect()
+ } else {
+ unwind_ladder
+ };
+
+ let normal_ladder = self.drop_halfladder(&unwind_ladder, succ, &fields);
+
+ (*normal_ladder.last().unwrap(), *unwind_ladder.last().unwrap())
+ }
+
+ fn open_drop_for_tuple(&mut self, tys: &[Ty<'tcx>]) -> BasicBlock {
+ debug!("open_drop_for_tuple({:?}, {:?})", self, tys);
+
+ let fields = tys
+ .iter()
+ .enumerate()
+ .map(|(i, &ty)| {
+ (
+ self.tcx().mk_place_field(self.place, Field::new(i), ty),
+ self.elaborator.field_subpath(self.path, Field::new(i)),
+ )
+ })
+ .collect();
+
+ let (succ, unwind) = self.drop_ladder_bottom();
+ self.drop_ladder(fields, succ, unwind).0
+ }
+
+ fn open_drop_for_box(&mut self, adt: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> BasicBlock {
+ debug!("open_drop_for_box({:?}, {:?}, {:?})", self, adt, substs);
+
+ // drop glue is sent straight to codegen
+ // box cannot be directly dereferenced
+ let unique_ty = adt.non_enum_variant().fields[0].ty(self.tcx(), substs);
+ let nonnull_ty =
+ unique_ty.ty_adt_def().unwrap().non_enum_variant().fields[0].ty(self.tcx(), substs);
+ let ptr_ty = self.tcx().mk_imm_ptr(substs[0].expect_ty());
+
+ let unique_place = self.tcx().mk_place_field(self.place, Field::new(0), unique_ty);
+ let nonnull_place = self.tcx().mk_place_field(unique_place, Field::new(0), nonnull_ty);
+ let ptr_place = self.tcx().mk_place_field(nonnull_place, Field::new(0), ptr_ty);
+ let interior = self.tcx().mk_place_deref(ptr_place);
+
+ let interior_path = self.elaborator.deref_subpath(self.path);
+
+ let succ = self.box_free_block(adt, substs, self.succ, self.unwind);
+ let unwind_succ =
+ self.unwind.map(|unwind| self.box_free_block(adt, substs, unwind, Unwind::InCleanup));
+
+ self.drop_subpath(interior, interior_path, succ, unwind_succ)
+ }
+
+ fn open_drop_for_adt(&mut self, adt: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> BasicBlock {
+ debug!("open_drop_for_adt({:?}, {:?}, {:?})", self, adt, substs);
+ if adt.variants().is_empty() {
+ return self.elaborator.patch().new_block(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: self.source_info,
+ kind: TerminatorKind::Unreachable,
+ }),
+ is_cleanup: self.unwind.is_cleanup(),
+ });
+ }
+
+ let skip_contents =
+ adt.is_union() || Some(adt.did()) == self.tcx().lang_items().manually_drop();
+ let contents_drop = if skip_contents {
+ (self.succ, self.unwind)
+ } else {
+ self.open_drop_for_adt_contents(adt, substs)
+ };
+
+ if adt.has_dtor(self.tcx()) {
+ self.destructor_call_block(contents_drop)
+ } else {
+ contents_drop.0
+ }
+ }
+
+ fn open_drop_for_adt_contents(
+ &mut self,
+ adt: ty::AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> (BasicBlock, Unwind) {
+ let (succ, unwind) = self.drop_ladder_bottom();
+ if !adt.is_enum() {
+ let fields = self.move_paths_for_fields(
+ self.place,
+ self.path,
+ &adt.variant(VariantIdx::new(0)),
+ substs,
+ );
+ self.drop_ladder(fields, succ, unwind)
+ } else {
+ self.open_drop_for_multivariant(adt, substs, succ, unwind)
+ }
+ }
+
+ fn open_drop_for_multivariant(
+ &mut self,
+ adt: ty::AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ succ: BasicBlock,
+ unwind: Unwind,
+ ) -> (BasicBlock, Unwind) {
+ let mut values = Vec::with_capacity(adt.variants().len());
+ let mut normal_blocks = Vec::with_capacity(adt.variants().len());
+ let mut unwind_blocks =
+ if unwind.is_cleanup() { None } else { Some(Vec::with_capacity(adt.variants().len())) };
+
+ let mut have_otherwise_with_drop_glue = false;
+ let mut have_otherwise = false;
+ let tcx = self.tcx();
+
+ for (variant_index, discr) in adt.discriminants(tcx) {
+ let variant = &adt.variant(variant_index);
+ let subpath = self.elaborator.downcast_subpath(self.path, variant_index);
+
+ if let Some(variant_path) = subpath {
+ let base_place = tcx.mk_place_elem(
+ self.place,
+ ProjectionElem::Downcast(Some(variant.name), variant_index),
+ );
+ let fields = self.move_paths_for_fields(base_place, variant_path, &variant, substs);
+ values.push(discr.val);
+ if let Unwind::To(unwind) = unwind {
+ // We can't use the half-ladder from the original
+ // drop ladder, because this breaks the
+ // "funclet can't have 2 successor funclets"
+ // requirement from MSVC:
+ //
+ // switch unwind-switch
+ // / \ / \
+ // v1.0 v2.0 v2.0-unwind v1.0-unwind
+ // | | / |
+ // v1.1-unwind v2.1-unwind |
+ // ^ |
+ // \-------------------------------/
+ //
+ // Create a duplicate half-ladder to avoid that. We
+ // could technically only do this on MSVC, but I
+ // I want to minimize the divergence between MSVC
+ // and non-MSVC.
+
+ let unwind_blocks = unwind_blocks.as_mut().unwrap();
+ let unwind_ladder = vec![Unwind::InCleanup; fields.len() + 1];
+ let halfladder = self.drop_halfladder(&unwind_ladder, unwind, &fields);
+ unwind_blocks.push(halfladder.last().cloned().unwrap());
+ }
+ let (normal, _) = self.drop_ladder(fields, succ, unwind);
+ normal_blocks.push(normal);
+ } else {
+ have_otherwise = true;
+
+ let param_env = self.elaborator.param_env();
+ let have_field_with_drop_glue = variant
+ .fields
+ .iter()
+ .any(|field| field.ty(tcx, substs).needs_drop(tcx, param_env));
+ if have_field_with_drop_glue {
+ have_otherwise_with_drop_glue = true;
+ }
+ }
+ }
+
+ if !have_otherwise {
+ values.pop();
+ } else if !have_otherwise_with_drop_glue {
+ normal_blocks.push(self.goto_block(succ, unwind));
+ if let Unwind::To(unwind) = unwind {
+ unwind_blocks.as_mut().unwrap().push(self.goto_block(unwind, Unwind::InCleanup));
+ }
+ } else {
+ normal_blocks.push(self.drop_block(succ, unwind));
+ if let Unwind::To(unwind) = unwind {
+ unwind_blocks.as_mut().unwrap().push(self.drop_block(unwind, Unwind::InCleanup));
+ }
+ }
+
+ (
+ self.adt_switch_block(adt, normal_blocks, &values, succ, unwind),
+ unwind.map(|unwind| {
+ self.adt_switch_block(
+ adt,
+ unwind_blocks.unwrap(),
+ &values,
+ unwind,
+ Unwind::InCleanup,
+ )
+ }),
+ )
+ }
+
+ fn adt_switch_block(
+ &mut self,
+ adt: ty::AdtDef<'tcx>,
+ blocks: Vec<BasicBlock>,
+ values: &[u128],
+ succ: BasicBlock,
+ unwind: Unwind,
+ ) -> BasicBlock {
+ // If there are multiple variants, then if something
+ // is present within the enum the discriminant, tracked
+ // by the rest path, must be initialized.
+ //
+ // Additionally, we do not want to switch on the
+ // discriminant after it is free-ed, because that
+ // way lies only trouble.
+ let discr_ty = adt.repr().discr_type().to_ty(self.tcx());
+ let discr = Place::from(self.new_temp(discr_ty));
+ let discr_rv = Rvalue::Discriminant(self.place);
+ let switch_block = BasicBlockData {
+ statements: vec![self.assign(discr, discr_rv)],
+ terminator: Some(Terminator {
+ source_info: self.source_info,
+ kind: TerminatorKind::SwitchInt {
+ discr: Operand::Move(discr),
+ switch_ty: discr_ty,
+ targets: SwitchTargets::new(
+ values.iter().copied().zip(blocks.iter().copied()),
+ *blocks.last().unwrap(),
+ ),
+ },
+ }),
+ is_cleanup: unwind.is_cleanup(),
+ };
+ let switch_block = self.elaborator.patch().new_block(switch_block);
+ self.drop_flag_test_block(switch_block, succ, unwind)
+ }
+
+ fn destructor_call_block(&mut self, (succ, unwind): (BasicBlock, Unwind)) -> BasicBlock {
+ debug!("destructor_call_block({:?}, {:?})", self, succ);
+ let tcx = self.tcx();
+ let drop_trait = tcx.require_lang_item(LangItem::Drop, None);
+ let drop_fn = tcx.associated_item_def_ids(drop_trait)[0];
+ let ty = self.place_ty(self.place);
+ let substs = tcx.mk_substs_trait(ty, &[]);
+
+ let ref_ty =
+ tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Mut });
+ let ref_place = self.new_temp(ref_ty);
+ let unit_temp = Place::from(self.new_temp(tcx.mk_unit()));
+
+ let result = BasicBlockData {
+ statements: vec![self.assign(
+ Place::from(ref_place),
+ Rvalue::Ref(
+ tcx.lifetimes.re_erased,
+ BorrowKind::Mut { allow_two_phase_borrow: false },
+ self.place,
+ ),
+ )],
+ terminator: Some(Terminator {
+ kind: TerminatorKind::Call {
+ func: Operand::function_handle(tcx, drop_fn, substs, self.source_info.span),
+ args: vec![Operand::Move(Place::from(ref_place))],
+ destination: unit_temp,
+ target: Some(succ),
+ cleanup: unwind.into_option(),
+ from_hir_call: true,
+ fn_span: self.source_info.span,
+ },
+ source_info: self.source_info,
+ }),
+ is_cleanup: unwind.is_cleanup(),
+ };
+ self.elaborator.patch().new_block(result)
+ }
+
+ /// Create a loop that drops an array:
+ ///
+ /// ```text
+ /// loop-block:
+ /// can_go = cur == length_or_end
+ /// if can_go then succ else drop-block
+ /// drop-block:
+ /// if ptr_based {
+ /// ptr = cur
+ /// cur = cur.offset(1)
+ /// } else {
+ /// ptr = &raw mut P[cur]
+ /// cur = cur + 1
+ /// }
+ /// drop(ptr)
+ /// ```
+ fn drop_loop(
+ &mut self,
+ succ: BasicBlock,
+ cur: Local,
+ length_or_end: Place<'tcx>,
+ ety: Ty<'tcx>,
+ unwind: Unwind,
+ ptr_based: bool,
+ ) -> BasicBlock {
+ let copy = |place: Place<'tcx>| Operand::Copy(place);
+ let move_ = |place: Place<'tcx>| Operand::Move(place);
+ let tcx = self.tcx();
+
+ let ptr_ty = tcx.mk_ptr(ty::TypeAndMut { ty: ety, mutbl: hir::Mutability::Mut });
+ let ptr = Place::from(self.new_temp(ptr_ty));
+ let can_go = Place::from(self.new_temp(tcx.types.bool));
+
+ let one = self.constant_usize(1);
+ let (ptr_next, cur_next) = if ptr_based {
+ (
+ Rvalue::Use(copy(cur.into())),
+ Rvalue::BinaryOp(BinOp::Offset, Box::new((move_(cur.into()), one))),
+ )
+ } else {
+ (
+ Rvalue::AddressOf(Mutability::Mut, tcx.mk_place_index(self.place, cur)),
+ Rvalue::BinaryOp(BinOp::Add, Box::new((move_(cur.into()), one))),
+ )
+ };
+
+ let drop_block = BasicBlockData {
+ statements: vec![self.assign(ptr, ptr_next), self.assign(Place::from(cur), cur_next)],
+ is_cleanup: unwind.is_cleanup(),
+ terminator: Some(Terminator {
+ source_info: self.source_info,
+ // this gets overwritten by drop elaboration.
+ kind: TerminatorKind::Unreachable,
+ }),
+ };
+ let drop_block = self.elaborator.patch().new_block(drop_block);
+
+ let loop_block = BasicBlockData {
+ statements: vec![self.assign(
+ can_go,
+ Rvalue::BinaryOp(
+ BinOp::Eq,
+ Box::new((copy(Place::from(cur)), copy(length_or_end))),
+ ),
+ )],
+ is_cleanup: unwind.is_cleanup(),
+ terminator: Some(Terminator {
+ source_info: self.source_info,
+ kind: TerminatorKind::if_(tcx, move_(can_go), succ, drop_block),
+ }),
+ };
+ let loop_block = self.elaborator.patch().new_block(loop_block);
+
+ self.elaborator.patch().patch_terminator(
+ drop_block,
+ TerminatorKind::Drop {
+ place: tcx.mk_place_deref(ptr),
+ target: loop_block,
+ unwind: unwind.into_option(),
+ },
+ );
+
+ loop_block
+ }
+
+ fn open_drop_for_array(&mut self, ety: Ty<'tcx>, opt_size: Option<u64>) -> BasicBlock {
+ debug!("open_drop_for_array({:?}, {:?})", ety, opt_size);
+
+ // if size_of::<ety>() == 0 {
+ // index_based_loop
+ // } else {
+ // ptr_based_loop
+ // }
+
+ let tcx = self.tcx();
+
+ if let Some(size) = opt_size {
+ let fields: Vec<(Place<'tcx>, Option<D::Path>)> = (0..size)
+ .map(|i| {
+ (
+ tcx.mk_place_elem(
+ self.place,
+ ProjectionElem::ConstantIndex {
+ offset: i,
+ min_length: size,
+ from_end: false,
+ },
+ ),
+ self.elaborator.array_subpath(self.path, i, size),
+ )
+ })
+ .collect();
+
+ if fields.iter().any(|(_, path)| path.is_some()) {
+ let (succ, unwind) = self.drop_ladder_bottom();
+ return self.drop_ladder(fields, succ, unwind).0;
+ }
+ }
+
+ let move_ = |place: Place<'tcx>| Operand::Move(place);
+ let elem_size = Place::from(self.new_temp(tcx.types.usize));
+ let len = Place::from(self.new_temp(tcx.types.usize));
+
+ let base_block = BasicBlockData {
+ statements: vec![
+ self.assign(elem_size, Rvalue::NullaryOp(NullOp::SizeOf, ety)),
+ self.assign(len, Rvalue::Len(self.place)),
+ ],
+ is_cleanup: self.unwind.is_cleanup(),
+ terminator: Some(Terminator {
+ source_info: self.source_info,
+ kind: TerminatorKind::SwitchInt {
+ discr: move_(elem_size),
+ switch_ty: tcx.types.usize,
+ targets: SwitchTargets::static_if(
+ 0,
+ self.drop_loop_pair(ety, false, len),
+ self.drop_loop_pair(ety, true, len),
+ ),
+ },
+ }),
+ };
+ self.elaborator.patch().new_block(base_block)
+ }
+
+ /// Creates a pair of drop-loops of `place`, which drops its contents, even
+ /// in the case of 1 panic. If `ptr_based`, creates a pointer loop,
+ /// otherwise create an index loop.
+ fn drop_loop_pair(
+ &mut self,
+ ety: Ty<'tcx>,
+ ptr_based: bool,
+ length: Place<'tcx>,
+ ) -> BasicBlock {
+ debug!("drop_loop_pair({:?}, {:?})", ety, ptr_based);
+ let tcx = self.tcx();
+ let iter_ty = if ptr_based { tcx.mk_mut_ptr(ety) } else { tcx.types.usize };
+
+ let cur = self.new_temp(iter_ty);
+ let length_or_end = if ptr_based { Place::from(self.new_temp(iter_ty)) } else { length };
+
+ let unwind = self.unwind.map(|unwind| {
+ self.drop_loop(unwind, cur, length_or_end, ety, Unwind::InCleanup, ptr_based)
+ });
+
+ let loop_block = self.drop_loop(self.succ, cur, length_or_end, ety, unwind, ptr_based);
+
+ let cur = Place::from(cur);
+ let drop_block_stmts = if ptr_based {
+ let tmp_ty = tcx.mk_mut_ptr(self.place_ty(self.place));
+ let tmp = Place::from(self.new_temp(tmp_ty));
+ // tmp = &raw mut P;
+ // cur = tmp as *mut T;
+ // end = Offset(cur, len);
+ vec![
+ self.assign(tmp, Rvalue::AddressOf(Mutability::Mut, self.place)),
+ self.assign(cur, Rvalue::Cast(CastKind::Misc, Operand::Move(tmp), iter_ty)),
+ self.assign(
+ length_or_end,
+ Rvalue::BinaryOp(
+ BinOp::Offset,
+ Box::new((Operand::Copy(cur), Operand::Move(length))),
+ ),
+ ),
+ ]
+ } else {
+ // cur = 0 (length already pushed)
+ let zero = self.constant_usize(0);
+ vec![self.assign(cur, Rvalue::Use(zero))]
+ };
+ let drop_block = self.elaborator.patch().new_block(BasicBlockData {
+ statements: drop_block_stmts,
+ is_cleanup: unwind.is_cleanup(),
+ terminator: Some(Terminator {
+ source_info: self.source_info,
+ kind: TerminatorKind::Goto { target: loop_block },
+ }),
+ });
+
+ // FIXME(#34708): handle partially-dropped array/slice elements.
+ let reset_block = self.drop_flag_reset_block(DropFlagMode::Deep, drop_block, unwind);
+ self.drop_flag_test_block(reset_block, self.succ, unwind)
+ }
+
+ /// The slow-path - create an "open", elaborated drop for a type
+ /// which is moved-out-of only partially, and patch `bb` to a jump
+ /// to it. This must not be called on ADTs with a destructor,
+ /// as these can't be moved-out-of, except for `Box<T>`, which is
+ /// special-cased.
+ ///
+ /// This creates a "drop ladder" that drops the needed fields of the
+ /// ADT, both in the success case or if one of the destructors fail.
+ fn open_drop(&mut self) -> BasicBlock {
+ let ty = self.place_ty(self.place);
+ match ty.kind() {
+ ty::Closure(_, substs) => {
+ let tys: Vec<_> = substs.as_closure().upvar_tys().collect();
+ self.open_drop_for_tuple(&tys)
+ }
+ // Note that `elaborate_drops` only drops the upvars of a generator,
+ // and this is ok because `open_drop` here can only be reached
+ // within that own generator's resume function.
+ // This should only happen for the self argument on the resume function.
+ // It effectively only contains upvars until the generator transformation runs.
+ // See librustc_body/transform/generator.rs for more details.
+ ty::Generator(_, substs, _) => {
+ let tys: Vec<_> = substs.as_generator().upvar_tys().collect();
+ self.open_drop_for_tuple(&tys)
+ }
+ ty::Tuple(fields) => self.open_drop_for_tuple(fields),
+ ty::Adt(def, substs) => {
+ if def.is_box() {
+ self.open_drop_for_box(*def, substs)
+ } else {
+ self.open_drop_for_adt(*def, substs)
+ }
+ }
+ ty::Dynamic(..) => self.complete_drop(self.succ, self.unwind),
+ ty::Array(ety, size) => {
+ let size = size.try_eval_usize(self.tcx(), self.elaborator.param_env());
+ self.open_drop_for_array(*ety, size)
+ }
+ ty::Slice(ety) => self.open_drop_for_array(*ety, None),
+
+ _ => bug!("open drop from non-ADT `{:?}`", ty),
+ }
+ }
+
+ fn complete_drop(&mut self, succ: BasicBlock, unwind: Unwind) -> BasicBlock {
+ debug!("complete_drop(succ={:?}, unwind={:?})", succ, unwind);
+
+ let drop_block = self.drop_block(succ, unwind);
+
+ self.drop_flag_test_block(drop_block, succ, unwind)
+ }
+
+ /// Creates a block that resets the drop flag. If `mode` is deep, all children drop flags will
+ /// also be cleared.
+ fn drop_flag_reset_block(
+ &mut self,
+ mode: DropFlagMode,
+ succ: BasicBlock,
+ unwind: Unwind,
+ ) -> BasicBlock {
+ debug!("drop_flag_reset_block({:?},{:?})", self, mode);
+
+ if unwind.is_cleanup() {
+ // The drop flag isn't read again on the unwind path, so don't
+ // bother setting it.
+ return succ;
+ }
+ let block = self.new_block(unwind, TerminatorKind::Goto { target: succ });
+ let block_start = Location { block, statement_index: 0 };
+ self.elaborator.clear_drop_flag(block_start, self.path, mode);
+ block
+ }
+
+ fn elaborated_drop_block(&mut self) -> BasicBlock {
+ debug!("elaborated_drop_block({:?})", self);
+ let blk = self.drop_block(self.succ, self.unwind);
+ self.elaborate_drop(blk);
+ blk
+ }
+
+ /// Creates a block that frees the backing memory of a `Box` if its drop is required (either
+ /// statically or by checking its drop flag).
+ ///
+ /// The contained value will not be dropped.
+ fn box_free_block(
+ &mut self,
+ adt: ty::AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ target: BasicBlock,
+ unwind: Unwind,
+ ) -> BasicBlock {
+ let block = self.unelaborated_free_block(adt, substs, target, unwind);
+ self.drop_flag_test_block(block, target, unwind)
+ }
+
+ /// Creates a block that frees the backing memory of a `Box` (without dropping the contained
+ /// value).
+ fn unelaborated_free_block(
+ &mut self,
+ adt: ty::AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ target: BasicBlock,
+ unwind: Unwind,
+ ) -> BasicBlock {
+ let tcx = self.tcx();
+ let unit_temp = Place::from(self.new_temp(tcx.mk_unit()));
+ let free_func = tcx.require_lang_item(LangItem::BoxFree, Some(self.source_info.span));
+ let args = adt
+ .variant(VariantIdx::new(0))
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, f)| {
+ let field = Field::new(i);
+ let field_ty = f.ty(tcx, substs);
+ Operand::Move(tcx.mk_place_field(self.place, field, field_ty))
+ })
+ .collect();
+
+ let call = TerminatorKind::Call {
+ func: Operand::function_handle(tcx, free_func, substs, self.source_info.span),
+ args,
+ destination: unit_temp,
+ target: Some(target),
+ cleanup: None,
+ from_hir_call: false,
+ fn_span: self.source_info.span,
+ }; // FIXME(#43234)
+ let free_block = self.new_block(unwind, call);
+
+ let block_start = Location { block: free_block, statement_index: 0 };
+ self.elaborator.clear_drop_flag(block_start, self.path, DropFlagMode::Shallow);
+ free_block
+ }
+
+ fn drop_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+ let block =
+ TerminatorKind::Drop { place: self.place, target, unwind: unwind.into_option() };
+ self.new_block(unwind, block)
+ }
+
+ fn goto_block(&mut self, target: BasicBlock, unwind: Unwind) -> BasicBlock {
+ let block = TerminatorKind::Goto { target };
+ self.new_block(unwind, block)
+ }
+
+ /// Returns the block to jump to in order to test the drop flag and execute the drop.
+ ///
+ /// Depending on the required `DropStyle`, this might be a generated block with an `if`
+ /// terminator (for dynamic/open drops), or it might be `on_set` or `on_unset` itself, in case
+ /// the drop can be statically determined.
+ fn drop_flag_test_block(
+ &mut self,
+ on_set: BasicBlock,
+ on_unset: BasicBlock,
+ unwind: Unwind,
+ ) -> BasicBlock {
+ let style = self.elaborator.drop_style(self.path, DropFlagMode::Shallow);
+ debug!(
+ "drop_flag_test_block({:?},{:?},{:?},{:?}) - {:?}",
+ self, on_set, on_unset, unwind, style
+ );
+
+ match style {
+ DropStyle::Dead => on_unset,
+ DropStyle::Static => on_set,
+ DropStyle::Conditional | DropStyle::Open => {
+ let flag = self.elaborator.get_drop_flag(self.path).unwrap();
+ let term = TerminatorKind::if_(self.tcx(), flag, on_set, on_unset);
+ self.new_block(unwind, term)
+ }
+ }
+ }
+
+ fn new_block(&mut self, unwind: Unwind, k: TerminatorKind<'tcx>) -> BasicBlock {
+ self.elaborator.patch().new_block(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator { source_info: self.source_info, kind: k }),
+ is_cleanup: unwind.is_cleanup(),
+ })
+ }
+
+ fn new_temp(&mut self, ty: Ty<'tcx>) -> Local {
+ self.elaborator.patch().new_temp(ty, self.source_info.span)
+ }
+
+ fn constant_usize(&self, val: u16) -> Operand<'tcx> {
+ Operand::Constant(Box::new(Constant {
+ span: self.source_info.span,
+ user_ty: None,
+ literal: ConstantKind::from_usize(self.tcx(), val.into()),
+ }))
+ }
+
+ fn assign(&self, lhs: Place<'tcx>, rhs: Rvalue<'tcx>) -> Statement<'tcx> {
+ Statement {
+ source_info: self.source_info,
+ kind: StatementKind::Assign(Box::new((lhs, rhs))),
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/cursor.rs b/compiler/rustc_mir_dataflow/src/framework/cursor.rs
new file mode 100644
index 000000000..f3b5544aa
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/cursor.rs
@@ -0,0 +1,235 @@
+//! Random access inspection of the results of a dataflow analysis.
+
+use crate::framework::BitSetExt;
+
+use std::borrow::Borrow;
+use std::cmp::Ordering;
+
+#[cfg(debug_assertions)]
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{self, BasicBlock, Location};
+
+use super::{Analysis, Direction, Effect, EffectIndex, Results};
+
+/// A `ResultsCursor` that borrows the underlying `Results`.
+pub type ResultsRefCursor<'a, 'mir, 'tcx, A> = ResultsCursor<'mir, 'tcx, A, &'a Results<'tcx, A>>;
+
+/// Allows random access inspection of the results of a dataflow analysis.
+///
+/// This cursor only has linear performance within a basic block when its statements are visited in
+/// the same order as the `DIRECTION` of the analysis. In the worst case—when statements are
+/// visited in *reverse* order—performance will be quadratic in the number of statements in the
+/// block. The order in which basic blocks are inspected has no impact on performance.
+///
+/// A `ResultsCursor` can either own (the default) or borrow the dataflow results it inspects. The
+/// type of ownership is determined by `R` (see `ResultsRefCursor` above).
+pub struct ResultsCursor<'mir, 'tcx, A, R = Results<'tcx, A>>
+where
+ A: Analysis<'tcx>,
+{
+ body: &'mir mir::Body<'tcx>,
+ results: R,
+ state: A::Domain,
+
+ pos: CursorPosition,
+
+ /// Indicates that `state` has been modified with a custom effect.
+ ///
+ /// When this flag is set, we need to reset to an entry set before doing a seek.
+ state_needs_reset: bool,
+
+ #[cfg(debug_assertions)]
+ reachable_blocks: BitSet<BasicBlock>,
+}
+
+impl<'mir, 'tcx, A, R> ResultsCursor<'mir, 'tcx, A, R>
+where
+ A: Analysis<'tcx>,
+ R: Borrow<Results<'tcx, A>>,
+{
+ /// Returns a new cursor that can inspect `results`.
+ pub fn new(body: &'mir mir::Body<'tcx>, results: R) -> Self {
+ let bottom_value = results.borrow().analysis.bottom_value(body);
+ ResultsCursor {
+ body,
+ results,
+
+ // Initialize to the `bottom_value` and set `state_needs_reset` to tell the cursor that
+ // it needs to reset to block entry before the first seek. The cursor position is
+ // immaterial.
+ state_needs_reset: true,
+ state: bottom_value,
+ pos: CursorPosition::block_entry(mir::START_BLOCK),
+
+ #[cfg(debug_assertions)]
+ reachable_blocks: mir::traversal::reachable_as_bitset(body),
+ }
+ }
+
+ /// Allows inspection of unreachable basic blocks even with `debug_assertions` enabled.
+ #[cfg(test)]
+ pub(crate) fn allow_unreachable(&mut self) {
+ #[cfg(debug_assertions)]
+ self.reachable_blocks.insert_all()
+ }
+
+ /// Returns the underlying `Results`.
+ pub fn results(&self) -> &Results<'tcx, A> {
+ &self.results.borrow()
+ }
+
+ /// Returns the `Analysis` used to generate the underlying `Results`.
+ pub fn analysis(&self) -> &A {
+ &self.results.borrow().analysis
+ }
+
+ /// Returns the dataflow state at the current location.
+ pub fn get(&self) -> &A::Domain {
+ &self.state
+ }
+
+ /// Resets the cursor to hold the entry set for the given basic block.
+ ///
+ /// For forward dataflow analyses, this is the dataflow state prior to the first statement.
+ ///
+ /// For backward dataflow analyses, this is the dataflow state after the terminator.
+ pub(super) fn seek_to_block_entry(&mut self, block: BasicBlock) {
+ #[cfg(debug_assertions)]
+ assert!(self.reachable_blocks.contains(block));
+
+ self.state.clone_from(&self.results.borrow().entry_set_for_block(block));
+ self.pos = CursorPosition::block_entry(block);
+ self.state_needs_reset = false;
+ }
+
+ /// Resets the cursor to hold the state prior to the first statement in a basic block.
+ ///
+ /// For forward analyses, this is the entry set for the given block.
+ ///
+ /// For backward analyses, this is the state that will be propagated to its
+ /// predecessors (ignoring edge-specific effects).
+ pub fn seek_to_block_start(&mut self, block: BasicBlock) {
+ if A::Direction::IS_FORWARD {
+ self.seek_to_block_entry(block)
+ } else {
+ self.seek_after(Location { block, statement_index: 0 }, Effect::Primary)
+ }
+ }
+
+ /// Resets the cursor to hold the state after the terminator in a basic block.
+ ///
+ /// For backward analyses, this is the entry set for the given block.
+ ///
+ /// For forward analyses, this is the state that will be propagated to its
+ /// successors (ignoring edge-specific effects).
+ pub fn seek_to_block_end(&mut self, block: BasicBlock) {
+ if A::Direction::IS_BACKWARD {
+ self.seek_to_block_entry(block)
+ } else {
+ self.seek_after(self.body.terminator_loc(block), Effect::Primary)
+ }
+ }
+
+ /// Advances the cursor to hold the dataflow state at `target` before its "primary" effect is
+ /// applied.
+ ///
+ /// The "before" effect at the target location *will be* applied.
+ pub fn seek_before_primary_effect(&mut self, target: Location) {
+ self.seek_after(target, Effect::Before)
+ }
+
+ /// Advances the cursor to hold the dataflow state at `target` after its "primary" effect is
+ /// applied.
+ ///
+ /// The "before" effect at the target location will be applied as well.
+ pub fn seek_after_primary_effect(&mut self, target: Location) {
+ self.seek_after(target, Effect::Primary)
+ }
+
+ fn seek_after(&mut self, target: Location, effect: Effect) {
+ assert!(target <= self.body.terminator_loc(target.block));
+
+ // Reset to the entry of the target block if any of the following are true:
+ // - A custom effect has been applied to the cursor state.
+ // - We are in a different block than the target.
+ // - We are in the same block but have advanced past the target effect.
+ if self.state_needs_reset || self.pos.block != target.block {
+ self.seek_to_block_entry(target.block);
+ } else if let Some(curr_effect) = self.pos.curr_effect_index {
+ let mut ord = curr_effect.statement_index.cmp(&target.statement_index);
+ if A::Direction::IS_BACKWARD {
+ ord = ord.reverse()
+ }
+
+ match ord.then_with(|| curr_effect.effect.cmp(&effect)) {
+ Ordering::Equal => return,
+ Ordering::Greater => self.seek_to_block_entry(target.block),
+ Ordering::Less => {}
+ }
+ }
+
+ // At this point, the cursor is in the same block as the target location at an earlier
+ // statement.
+ debug_assert_eq!(target.block, self.pos.block);
+
+ let block_data = &self.body[target.block];
+ let next_effect = if A::Direction::IS_FORWARD {
+ #[rustfmt::skip]
+ self.pos.curr_effect_index.map_or_else(
+ || Effect::Before.at_index(0),
+ EffectIndex::next_in_forward_order,
+ )
+ } else {
+ self.pos.curr_effect_index.map_or_else(
+ || Effect::Before.at_index(block_data.statements.len()),
+ EffectIndex::next_in_backward_order,
+ )
+ };
+
+ let analysis = &self.results.borrow().analysis;
+ let target_effect_index = effect.at_index(target.statement_index);
+
+ A::Direction::apply_effects_in_range(
+ analysis,
+ &mut self.state,
+ target.block,
+ block_data,
+ next_effect..=target_effect_index,
+ );
+
+ self.pos =
+ CursorPosition { block: target.block, curr_effect_index: Some(target_effect_index) };
+ }
+
+ /// Applies `f` to the cursor's internal state.
+ ///
+ /// This can be used, e.g., to apply the call return effect directly to the cursor without
+ /// creating an extra copy of the dataflow state.
+ pub fn apply_custom_effect(&mut self, f: impl FnOnce(&A, &mut A::Domain)) {
+ f(&self.results.borrow().analysis, &mut self.state);
+ self.state_needs_reset = true;
+ }
+}
+
+impl<'mir, 'tcx, A, R> ResultsCursor<'mir, 'tcx, A, R>
+where
+ A: crate::GenKillAnalysis<'tcx>,
+ A::Domain: BitSetExt<A::Idx>,
+ R: Borrow<Results<'tcx, A>>,
+{
+ pub fn contains(&self, elem: A::Idx) -> bool {
+ self.get().contains(elem)
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+struct CursorPosition {
+ block: BasicBlock,
+ curr_effect_index: Option<EffectIndex>,
+}
+
+impl CursorPosition {
+ fn block_entry(block: BasicBlock) -> CursorPosition {
+ CursorPosition { block, curr_effect_index: None }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/direction.rs b/compiler/rustc_mir_dataflow/src/framework/direction.rs
new file mode 100644
index 000000000..5c77f3ea3
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/direction.rs
@@ -0,0 +1,656 @@
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{self, BasicBlock, Location, SwitchTargets};
+use rustc_middle::ty::TyCtxt;
+use std::ops::RangeInclusive;
+
+use super::visitor::{ResultsVisitable, ResultsVisitor};
+use super::{
+ Analysis, CallReturnPlaces, Effect, EffectIndex, GenKillAnalysis, GenKillSet, SwitchIntTarget,
+};
+
+pub trait Direction {
+ const IS_FORWARD: bool;
+
+ const IS_BACKWARD: bool = !Self::IS_FORWARD;
+
+ /// Applies all effects between the given `EffectIndex`s.
+ ///
+ /// `effects.start()` must precede or equal `effects.end()` in this direction.
+ fn apply_effects_in_range<'tcx, A>(
+ analysis: &A,
+ state: &mut A::Domain,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ effects: RangeInclusive<EffectIndex>,
+ ) where
+ A: Analysis<'tcx>;
+
+ fn apply_effects_in_block<'tcx, A>(
+ analysis: &A,
+ state: &mut A::Domain,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ ) where
+ A: Analysis<'tcx>;
+
+ fn gen_kill_effects_in_block<'tcx, A>(
+ analysis: &A,
+ trans: &mut GenKillSet<A::Idx>,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ ) where
+ A: GenKillAnalysis<'tcx>;
+
+ fn visit_results_in_block<'mir, 'tcx, F, R>(
+ state: &mut F,
+ block: BasicBlock,
+ block_data: &'mir mir::BasicBlockData<'tcx>,
+ results: &R,
+ vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = F>,
+ ) where
+ R: ResultsVisitable<'tcx, FlowState = F>;
+
+ fn join_state_into_successors_of<'tcx, A>(
+ analysis: &A,
+ tcx: TyCtxt<'tcx>,
+ body: &mir::Body<'tcx>,
+ dead_unwinds: Option<&BitSet<BasicBlock>>,
+ exit_state: &mut A::Domain,
+ block: (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+ propagate: impl FnMut(BasicBlock, &A::Domain),
+ ) where
+ A: Analysis<'tcx>;
+}
+
+/// Dataflow that runs from the exit of a block (the terminator), to its entry (the first statement).
+pub struct Backward;
+
+impl Direction for Backward {
+ const IS_FORWARD: bool = false;
+
+ fn apply_effects_in_block<'tcx, A>(
+ analysis: &A,
+ state: &mut A::Domain,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ ) where
+ A: Analysis<'tcx>,
+ {
+ let terminator = block_data.terminator();
+ let location = Location { block, statement_index: block_data.statements.len() };
+ analysis.apply_before_terminator_effect(state, terminator, location);
+ analysis.apply_terminator_effect(state, terminator, location);
+
+ for (statement_index, statement) in block_data.statements.iter().enumerate().rev() {
+ let location = Location { block, statement_index };
+ analysis.apply_before_statement_effect(state, statement, location);
+ analysis.apply_statement_effect(state, statement, location);
+ }
+ }
+
+ fn gen_kill_effects_in_block<'tcx, A>(
+ analysis: &A,
+ trans: &mut GenKillSet<A::Idx>,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ ) where
+ A: GenKillAnalysis<'tcx>,
+ {
+ let terminator = block_data.terminator();
+ let location = Location { block, statement_index: block_data.statements.len() };
+ analysis.before_terminator_effect(trans, terminator, location);
+ analysis.terminator_effect(trans, terminator, location);
+
+ for (statement_index, statement) in block_data.statements.iter().enumerate().rev() {
+ let location = Location { block, statement_index };
+ analysis.before_statement_effect(trans, statement, location);
+ analysis.statement_effect(trans, statement, location);
+ }
+ }
+
+ fn apply_effects_in_range<'tcx, A>(
+ analysis: &A,
+ state: &mut A::Domain,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ effects: RangeInclusive<EffectIndex>,
+ ) where
+ A: Analysis<'tcx>,
+ {
+ let (from, to) = (*effects.start(), *effects.end());
+ let terminator_index = block_data.statements.len();
+
+ assert!(from.statement_index <= terminator_index);
+ assert!(!to.precedes_in_backward_order(from));
+
+ // Handle the statement (or terminator) at `from`.
+
+ let next_effect = match from.effect {
+ // If we need to apply the terminator effect in all or in part, do so now.
+ _ if from.statement_index == terminator_index => {
+ let location = Location { block, statement_index: from.statement_index };
+ let terminator = block_data.terminator();
+
+ if from.effect == Effect::Before {
+ analysis.apply_before_terminator_effect(state, terminator, location);
+ if to == Effect::Before.at_index(terminator_index) {
+ return;
+ }
+ }
+
+ analysis.apply_terminator_effect(state, terminator, location);
+ if to == Effect::Primary.at_index(terminator_index) {
+ return;
+ }
+
+ // If `from.statement_index` is `0`, we will have hit one of the earlier comparisons
+ // with `to`.
+ from.statement_index - 1
+ }
+
+ Effect::Primary => {
+ let location = Location { block, statement_index: from.statement_index };
+ let statement = &block_data.statements[from.statement_index];
+
+ analysis.apply_statement_effect(state, statement, location);
+ if to == Effect::Primary.at_index(from.statement_index) {
+ return;
+ }
+
+ from.statement_index - 1
+ }
+
+ Effect::Before => from.statement_index,
+ };
+
+ // Handle all statements between `first_unapplied_idx` and `to.statement_index`.
+
+ for statement_index in (to.statement_index..next_effect).rev().map(|i| i + 1) {
+ let location = Location { block, statement_index };
+ let statement = &block_data.statements[statement_index];
+ analysis.apply_before_statement_effect(state, statement, location);
+ analysis.apply_statement_effect(state, statement, location);
+ }
+
+ // Handle the statement at `to`.
+
+ let location = Location { block, statement_index: to.statement_index };
+ let statement = &block_data.statements[to.statement_index];
+ analysis.apply_before_statement_effect(state, statement, location);
+
+ if to.effect == Effect::Before {
+ return;
+ }
+
+ analysis.apply_statement_effect(state, statement, location);
+ }
+
+ fn visit_results_in_block<'mir, 'tcx, F, R>(
+ state: &mut F,
+ block: BasicBlock,
+ block_data: &'mir mir::BasicBlockData<'tcx>,
+ results: &R,
+ vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = F>,
+ ) where
+ R: ResultsVisitable<'tcx, FlowState = F>,
+ {
+ results.reset_to_block_entry(state, block);
+
+ vis.visit_block_end(&state, block_data, block);
+
+ // Terminator
+ let loc = Location { block, statement_index: block_data.statements.len() };
+ let term = block_data.terminator();
+ results.reconstruct_before_terminator_effect(state, term, loc);
+ vis.visit_terminator_before_primary_effect(state, term, loc);
+ results.reconstruct_terminator_effect(state, term, loc);
+ vis.visit_terminator_after_primary_effect(state, term, loc);
+
+ for (statement_index, stmt) in block_data.statements.iter().enumerate().rev() {
+ let loc = Location { block, statement_index };
+ results.reconstruct_before_statement_effect(state, stmt, loc);
+ vis.visit_statement_before_primary_effect(state, stmt, loc);
+ results.reconstruct_statement_effect(state, stmt, loc);
+ vis.visit_statement_after_primary_effect(state, stmt, loc);
+ }
+
+ vis.visit_block_start(state, block_data, block);
+ }
+
+ fn join_state_into_successors_of<'tcx, A>(
+ analysis: &A,
+ _tcx: TyCtxt<'tcx>,
+ body: &mir::Body<'tcx>,
+ dead_unwinds: Option<&BitSet<BasicBlock>>,
+ exit_state: &mut A::Domain,
+ (bb, _bb_data): (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+ mut propagate: impl FnMut(BasicBlock, &A::Domain),
+ ) where
+ A: Analysis<'tcx>,
+ {
+ for pred in body.basic_blocks.predecessors()[bb].iter().copied() {
+ match body[pred].terminator().kind {
+ // Apply terminator-specific edge effects.
+ //
+ // FIXME(ecstaticmorse): Avoid cloning the exit state unconditionally.
+ mir::TerminatorKind::Call { destination, target: Some(dest), .. } if dest == bb => {
+ let mut tmp = exit_state.clone();
+ analysis.apply_call_return_effect(
+ &mut tmp,
+ pred,
+ CallReturnPlaces::Call(destination),
+ );
+ propagate(pred, &tmp);
+ }
+
+ mir::TerminatorKind::InlineAsm {
+ destination: Some(dest), ref operands, ..
+ } if dest == bb => {
+ let mut tmp = exit_state.clone();
+ analysis.apply_call_return_effect(
+ &mut tmp,
+ pred,
+ CallReturnPlaces::InlineAsm(operands),
+ );
+ propagate(pred, &tmp);
+ }
+
+ mir::TerminatorKind::Yield { resume, resume_arg, .. } if resume == bb => {
+ let mut tmp = exit_state.clone();
+ analysis.apply_yield_resume_effect(&mut tmp, resume, resume_arg);
+ propagate(pred, &tmp);
+ }
+
+ mir::TerminatorKind::SwitchInt { targets: _, ref discr, switch_ty: _ } => {
+ let mut applier = BackwardSwitchIntEdgeEffectsApplier {
+ body,
+ pred,
+ exit_state,
+ bb,
+ propagate: &mut propagate,
+ effects_applied: false,
+ };
+
+ analysis.apply_switch_int_edge_effects(pred, discr, &mut applier);
+
+ if !applier.effects_applied {
+ propagate(pred, exit_state)
+ }
+ }
+
+ // Ignore dead unwinds.
+ mir::TerminatorKind::Call { cleanup: Some(unwind), .. }
+ | mir::TerminatorKind::Assert { cleanup: Some(unwind), .. }
+ | mir::TerminatorKind::Drop { unwind: Some(unwind), .. }
+ | mir::TerminatorKind::DropAndReplace { unwind: Some(unwind), .. }
+ | mir::TerminatorKind::FalseUnwind { unwind: Some(unwind), .. }
+ | mir::TerminatorKind::InlineAsm { cleanup: Some(unwind), .. }
+ if unwind == bb =>
+ {
+ if dead_unwinds.map_or(true, |dead| !dead.contains(bb)) {
+ propagate(pred, exit_state);
+ }
+ }
+
+ _ => propagate(pred, exit_state),
+ }
+ }
+ }
+}
+
+struct BackwardSwitchIntEdgeEffectsApplier<'a, 'tcx, D, F> {
+ body: &'a mir::Body<'tcx>,
+ pred: BasicBlock,
+ exit_state: &'a mut D,
+ bb: BasicBlock,
+ propagate: &'a mut F,
+
+ effects_applied: bool,
+}
+
+impl<D, F> super::SwitchIntEdgeEffects<D> for BackwardSwitchIntEdgeEffectsApplier<'_, '_, D, F>
+where
+ D: Clone,
+ F: FnMut(BasicBlock, &D),
+{
+ fn apply(&mut self, mut apply_edge_effect: impl FnMut(&mut D, SwitchIntTarget)) {
+ assert!(!self.effects_applied);
+
+ let values = &self.body.basic_blocks.switch_sources()[&(self.bb, self.pred)];
+ let targets = values.iter().map(|&value| SwitchIntTarget { value, target: self.bb });
+
+ let mut tmp = None;
+ for target in targets {
+ let tmp = opt_clone_from_or_clone(&mut tmp, self.exit_state);
+ apply_edge_effect(tmp, target);
+ (self.propagate)(self.pred, tmp);
+ }
+
+ self.effects_applied = true;
+ }
+}
+
+/// Dataflow that runs from the entry of a block (the first statement), to its exit (terminator).
+pub struct Forward;
+
+impl Direction for Forward {
+ const IS_FORWARD: bool = true;
+
+ fn apply_effects_in_block<'tcx, A>(
+ analysis: &A,
+ state: &mut A::Domain,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ ) where
+ A: Analysis<'tcx>,
+ {
+ for (statement_index, statement) in block_data.statements.iter().enumerate() {
+ let location = Location { block, statement_index };
+ analysis.apply_before_statement_effect(state, statement, location);
+ analysis.apply_statement_effect(state, statement, location);
+ }
+
+ let terminator = block_data.terminator();
+ let location = Location { block, statement_index: block_data.statements.len() };
+ analysis.apply_before_terminator_effect(state, terminator, location);
+ analysis.apply_terminator_effect(state, terminator, location);
+ }
+
+ fn gen_kill_effects_in_block<'tcx, A>(
+ analysis: &A,
+ trans: &mut GenKillSet<A::Idx>,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ ) where
+ A: GenKillAnalysis<'tcx>,
+ {
+ for (statement_index, statement) in block_data.statements.iter().enumerate() {
+ let location = Location { block, statement_index };
+ analysis.before_statement_effect(trans, statement, location);
+ analysis.statement_effect(trans, statement, location);
+ }
+
+ let terminator = block_data.terminator();
+ let location = Location { block, statement_index: block_data.statements.len() };
+ analysis.before_terminator_effect(trans, terminator, location);
+ analysis.terminator_effect(trans, terminator, location);
+ }
+
+ fn apply_effects_in_range<'tcx, A>(
+ analysis: &A,
+ state: &mut A::Domain,
+ block: BasicBlock,
+ block_data: &mir::BasicBlockData<'tcx>,
+ effects: RangeInclusive<EffectIndex>,
+ ) where
+ A: Analysis<'tcx>,
+ {
+ let (from, to) = (*effects.start(), *effects.end());
+ let terminator_index = block_data.statements.len();
+
+ assert!(to.statement_index <= terminator_index);
+ assert!(!to.precedes_in_forward_order(from));
+
+ // If we have applied the before affect of the statement or terminator at `from` but not its
+ // after effect, do so now and start the loop below from the next statement.
+
+ let first_unapplied_index = match from.effect {
+ Effect::Before => from.statement_index,
+
+ Effect::Primary if from.statement_index == terminator_index => {
+ debug_assert_eq!(from, to);
+
+ let location = Location { block, statement_index: terminator_index };
+ let terminator = block_data.terminator();
+ analysis.apply_terminator_effect(state, terminator, location);
+ return;
+ }
+
+ Effect::Primary => {
+ let location = Location { block, statement_index: from.statement_index };
+ let statement = &block_data.statements[from.statement_index];
+ analysis.apply_statement_effect(state, statement, location);
+
+ // If we only needed to apply the after effect of the statement at `idx`, we are done.
+ if from == to {
+ return;
+ }
+
+ from.statement_index + 1
+ }
+ };
+
+ // Handle all statements between `from` and `to` whose effects must be applied in full.
+
+ for statement_index in first_unapplied_index..to.statement_index {
+ let location = Location { block, statement_index };
+ let statement = &block_data.statements[statement_index];
+ analysis.apply_before_statement_effect(state, statement, location);
+ analysis.apply_statement_effect(state, statement, location);
+ }
+
+ // Handle the statement or terminator at `to`.
+
+ let location = Location { block, statement_index: to.statement_index };
+ if to.statement_index == terminator_index {
+ let terminator = block_data.terminator();
+ analysis.apply_before_terminator_effect(state, terminator, location);
+
+ if to.effect == Effect::Primary {
+ analysis.apply_terminator_effect(state, terminator, location);
+ }
+ } else {
+ let statement = &block_data.statements[to.statement_index];
+ analysis.apply_before_statement_effect(state, statement, location);
+
+ if to.effect == Effect::Primary {
+ analysis.apply_statement_effect(state, statement, location);
+ }
+ }
+ }
+
+ fn visit_results_in_block<'mir, 'tcx, F, R>(
+ state: &mut F,
+ block: BasicBlock,
+ block_data: &'mir mir::BasicBlockData<'tcx>,
+ results: &R,
+ vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = F>,
+ ) where
+ R: ResultsVisitable<'tcx, FlowState = F>,
+ {
+ results.reset_to_block_entry(state, block);
+
+ vis.visit_block_start(state, block_data, block);
+
+ for (statement_index, stmt) in block_data.statements.iter().enumerate() {
+ let loc = Location { block, statement_index };
+ results.reconstruct_before_statement_effect(state, stmt, loc);
+ vis.visit_statement_before_primary_effect(state, stmt, loc);
+ results.reconstruct_statement_effect(state, stmt, loc);
+ vis.visit_statement_after_primary_effect(state, stmt, loc);
+ }
+
+ let loc = Location { block, statement_index: block_data.statements.len() };
+ let term = block_data.terminator();
+ results.reconstruct_before_terminator_effect(state, term, loc);
+ vis.visit_terminator_before_primary_effect(state, term, loc);
+ results.reconstruct_terminator_effect(state, term, loc);
+ vis.visit_terminator_after_primary_effect(state, term, loc);
+
+ vis.visit_block_end(state, block_data, block);
+ }
+
+ fn join_state_into_successors_of<'tcx, A>(
+ analysis: &A,
+ _tcx: TyCtxt<'tcx>,
+ _body: &mir::Body<'tcx>,
+ dead_unwinds: Option<&BitSet<BasicBlock>>,
+ exit_state: &mut A::Domain,
+ (bb, bb_data): (BasicBlock, &'_ mir::BasicBlockData<'tcx>),
+ mut propagate: impl FnMut(BasicBlock, &A::Domain),
+ ) where
+ A: Analysis<'tcx>,
+ {
+ use mir::TerminatorKind::*;
+ match bb_data.terminator().kind {
+ Return | Resume | Abort | GeneratorDrop | Unreachable => {}
+
+ Goto { target } => propagate(target, exit_state),
+
+ Assert { target, cleanup: unwind, expected: _, msg: _, cond: _ }
+ | Drop { target, unwind, place: _ }
+ | DropAndReplace { target, unwind, value: _, place: _ }
+ | FalseUnwind { real_target: target, unwind } => {
+ if let Some(unwind) = unwind {
+ if dead_unwinds.map_or(true, |dead| !dead.contains(bb)) {
+ propagate(unwind, exit_state);
+ }
+ }
+
+ propagate(target, exit_state);
+ }
+
+ FalseEdge { real_target, imaginary_target } => {
+ propagate(real_target, exit_state);
+ propagate(imaginary_target, exit_state);
+ }
+
+ Yield { resume: target, drop, resume_arg, value: _ } => {
+ if let Some(drop) = drop {
+ propagate(drop, exit_state);
+ }
+
+ analysis.apply_yield_resume_effect(exit_state, target, resume_arg);
+ propagate(target, exit_state);
+ }
+
+ Call {
+ cleanup,
+ destination,
+ target,
+ func: _,
+ args: _,
+ from_hir_call: _,
+ fn_span: _,
+ } => {
+ if let Some(unwind) = cleanup {
+ if dead_unwinds.map_or(true, |dead| !dead.contains(bb)) {
+ propagate(unwind, exit_state);
+ }
+ }
+
+ if let Some(target) = target {
+ // N.B.: This must be done *last*, otherwise the unwind path will see the call
+ // return effect.
+ analysis.apply_call_return_effect(
+ exit_state,
+ bb,
+ CallReturnPlaces::Call(destination),
+ );
+ propagate(target, exit_state);
+ }
+ }
+
+ InlineAsm {
+ template: _,
+ ref operands,
+ options: _,
+ line_spans: _,
+ destination,
+ cleanup,
+ } => {
+ if let Some(unwind) = cleanup {
+ if dead_unwinds.map_or(true, |dead| !dead.contains(bb)) {
+ propagate(unwind, exit_state);
+ }
+ }
+
+ if let Some(target) = destination {
+ // N.B.: This must be done *last*, otherwise the unwind path will see the call
+ // return effect.
+ analysis.apply_call_return_effect(
+ exit_state,
+ bb,
+ CallReturnPlaces::InlineAsm(operands),
+ );
+ propagate(target, exit_state);
+ }
+ }
+
+ SwitchInt { ref targets, ref discr, switch_ty: _ } => {
+ let mut applier = ForwardSwitchIntEdgeEffectsApplier {
+ exit_state,
+ targets,
+ propagate,
+ effects_applied: false,
+ };
+
+ analysis.apply_switch_int_edge_effects(bb, discr, &mut applier);
+
+ let ForwardSwitchIntEdgeEffectsApplier {
+ exit_state,
+ mut propagate,
+ effects_applied,
+ ..
+ } = applier;
+
+ if !effects_applied {
+ for target in targets.all_targets() {
+ propagate(*target, exit_state);
+ }
+ }
+ }
+ }
+ }
+}
+
+struct ForwardSwitchIntEdgeEffectsApplier<'a, D, F> {
+ exit_state: &'a mut D,
+ targets: &'a SwitchTargets,
+ propagate: F,
+
+ effects_applied: bool,
+}
+
+impl<D, F> super::SwitchIntEdgeEffects<D> for ForwardSwitchIntEdgeEffectsApplier<'_, D, F>
+where
+ D: Clone,
+ F: FnMut(BasicBlock, &D),
+{
+ fn apply(&mut self, mut apply_edge_effect: impl FnMut(&mut D, SwitchIntTarget)) {
+ assert!(!self.effects_applied);
+
+ let mut tmp = None;
+ for (value, target) in self.targets.iter() {
+ let tmp = opt_clone_from_or_clone(&mut tmp, self.exit_state);
+ apply_edge_effect(tmp, SwitchIntTarget { value: Some(value), target });
+ (self.propagate)(target, tmp);
+ }
+
+ // Once we get to the final, "otherwise" branch, there is no need to preserve `exit_state`,
+ // so pass it directly to `apply_edge_effect` to save a clone of the dataflow state.
+ let otherwise = self.targets.otherwise();
+ apply_edge_effect(self.exit_state, SwitchIntTarget { value: None, target: otherwise });
+ (self.propagate)(otherwise, self.exit_state);
+
+ self.effects_applied = true;
+ }
+}
+
+/// An analogue of `Option::get_or_insert_with` that stores a clone of `val` into `opt`, but uses
+/// the more efficient `clone_from` if `opt` was `Some`.
+///
+/// Returns a mutable reference to the new clone that resides in `opt`.
+//
+// FIXME: Figure out how to express this using `Option::clone_from`, or maybe lift it into the
+// standard library?
+fn opt_clone_from_or_clone<'a, T: Clone>(opt: &'a mut Option<T>, val: &T) -> &'a mut T {
+ if opt.is_some() {
+ let ret = opt.as_mut().unwrap();
+ ret.clone_from(val);
+ ret
+ } else {
+ *opt = Some(val.clone());
+ opt.as_mut().unwrap()
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/engine.rs b/compiler/rustc_mir_dataflow/src/framework/engine.rs
new file mode 100644
index 000000000..f374658ce
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/engine.rs
@@ -0,0 +1,413 @@
+//! A solver for dataflow problems.
+
+use crate::framework::BitSetExt;
+
+use std::ffi::OsString;
+use std::path::PathBuf;
+
+use rustc_ast as ast;
+use rustc_data_structures::work_queue::WorkQueue;
+use rustc_graphviz as dot;
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::{self, traversal, BasicBlock};
+use rustc_middle::mir::{create_dump_file, dump_enabled};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::{sym, Symbol};
+
+use super::fmt::DebugWithContext;
+use super::graphviz;
+use super::{
+ visit_results, Analysis, Direction, GenKill, GenKillAnalysis, GenKillSet, JoinSemiLattice,
+ ResultsCursor, ResultsVisitor,
+};
+
+/// A dataflow analysis that has converged to fixpoint.
+pub struct Results<'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ pub analysis: A,
+ pub(super) entry_sets: IndexVec<BasicBlock, A::Domain>,
+}
+
+impl<'tcx, A> Results<'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ /// Creates a `ResultsCursor` that can inspect these `Results`.
+ pub fn into_results_cursor<'mir>(
+ self,
+ body: &'mir mir::Body<'tcx>,
+ ) -> ResultsCursor<'mir, 'tcx, A> {
+ ResultsCursor::new(body, self)
+ }
+
+ /// Gets the dataflow state for the given block.
+ pub fn entry_set_for_block(&self, block: BasicBlock) -> &A::Domain {
+ &self.entry_sets[block]
+ }
+
+ pub fn visit_with<'mir>(
+ &self,
+ body: &'mir mir::Body<'tcx>,
+ blocks: impl IntoIterator<Item = BasicBlock>,
+ vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = A::Domain>,
+ ) {
+ visit_results(body, blocks, self, vis)
+ }
+
+ pub fn visit_reachable_with<'mir>(
+ &self,
+ body: &'mir mir::Body<'tcx>,
+ vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = A::Domain>,
+ ) {
+ let blocks = mir::traversal::reachable(body);
+ visit_results(body, blocks.map(|(bb, _)| bb), self, vis)
+ }
+}
+
+/// A solver for dataflow problems.
+pub struct Engine<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ tcx: TyCtxt<'tcx>,
+ body: &'a mir::Body<'tcx>,
+ dead_unwinds: Option<&'a BitSet<BasicBlock>>,
+ entry_sets: IndexVec<BasicBlock, A::Domain>,
+ pass_name: Option<&'static str>,
+ analysis: A,
+
+ /// Cached, cumulative transfer functions for each block.
+ //
+ // FIXME(ecstaticmorse): This boxed `Fn` trait object is invoked inside a tight loop for
+ // gen/kill problems on cyclic CFGs. This is not ideal, but it doesn't seem to degrade
+ // performance in practice. I've tried a few ways to avoid this, but they have downsides. See
+ // the message for the commit that added this FIXME for more information.
+ apply_trans_for_block: Option<Box<dyn Fn(BasicBlock, &mut A::Domain)>>,
+}
+
+impl<'a, 'tcx, A, D, T> Engine<'a, 'tcx, A>
+where
+ A: GenKillAnalysis<'tcx, Idx = T, Domain = D>,
+ D: Clone + JoinSemiLattice + GenKill<T> + BitSetExt<T>,
+ T: Idx,
+{
+ /// Creates a new `Engine` to solve a gen-kill dataflow problem.
+ pub fn new_gen_kill(tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, analysis: A) -> Self {
+ // If there are no back-edges in the control-flow graph, we only ever need to apply the
+ // transfer function for each block exactly once (assuming that we process blocks in RPO).
+ //
+ // In this case, there's no need to compute the block transfer functions ahead of time.
+ if !body.basic_blocks.is_cfg_cyclic() {
+ return Self::new(tcx, body, analysis, None);
+ }
+
+ // Otherwise, compute and store the cumulative transfer function for each block.
+
+ let identity = GenKillSet::identity(analysis.bottom_value(body).domain_size());
+ let mut trans_for_block = IndexVec::from_elem(identity, body.basic_blocks());
+
+ for (block, block_data) in body.basic_blocks().iter_enumerated() {
+ let trans = &mut trans_for_block[block];
+ A::Direction::gen_kill_effects_in_block(&analysis, trans, block, block_data);
+ }
+
+ let apply_trans = Box::new(move |bb: BasicBlock, state: &mut A::Domain| {
+ trans_for_block[bb].apply(state);
+ });
+
+ Self::new(tcx, body, analysis, Some(apply_trans as Box<_>))
+ }
+}
+
+impl<'a, 'tcx, A, D> Engine<'a, 'tcx, A>
+where
+ A: Analysis<'tcx, Domain = D>,
+ D: Clone + JoinSemiLattice,
+{
+ /// Creates a new `Engine` to solve a dataflow problem with an arbitrary transfer
+ /// function.
+ ///
+ /// Gen-kill problems should use `new_gen_kill`, which will coalesce transfer functions for
+ /// better performance.
+ pub fn new_generic(tcx: TyCtxt<'tcx>, body: &'a mir::Body<'tcx>, analysis: A) -> Self {
+ Self::new(tcx, body, analysis, None)
+ }
+
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ body: &'a mir::Body<'tcx>,
+ analysis: A,
+ apply_trans_for_block: Option<Box<dyn Fn(BasicBlock, &mut A::Domain)>>,
+ ) -> Self {
+ let bottom_value = analysis.bottom_value(body);
+ let mut entry_sets = IndexVec::from_elem(bottom_value.clone(), body.basic_blocks());
+ analysis.initialize_start_block(body, &mut entry_sets[mir::START_BLOCK]);
+
+ if A::Direction::IS_BACKWARD && entry_sets[mir::START_BLOCK] != bottom_value {
+ bug!("`initialize_start_block` is not yet supported for backward dataflow analyses");
+ }
+
+ Engine {
+ analysis,
+ tcx,
+ body,
+ dead_unwinds: None,
+ pass_name: None,
+ entry_sets,
+ apply_trans_for_block,
+ }
+ }
+
+ /// Signals that we do not want dataflow state to propagate across unwind edges for these
+ /// `BasicBlock`s.
+ ///
+ /// You must take care that `dead_unwinds` does not contain a `BasicBlock` that *can* actually
+ /// unwind during execution. Otherwise, your dataflow results will not be correct.
+ pub fn dead_unwinds(mut self, dead_unwinds: &'a BitSet<BasicBlock>) -> Self {
+ self.dead_unwinds = Some(dead_unwinds);
+ self
+ }
+
+ /// Adds an identifier to the graphviz output for this particular run of a dataflow analysis.
+ ///
+ /// Some analyses are run multiple times in the compilation pipeline. Give them a `pass_name`
+ /// to differentiate them. Otherwise, only the results for the latest run will be saved.
+ pub fn pass_name(mut self, name: &'static str) -> Self {
+ self.pass_name = Some(name);
+ self
+ }
+
+ /// Computes the fixpoint for this dataflow problem and returns it.
+ pub fn iterate_to_fixpoint(self) -> Results<'tcx, A>
+ where
+ A::Domain: DebugWithContext<A>,
+ {
+ let Engine {
+ analysis,
+ body,
+ dead_unwinds,
+ mut entry_sets,
+ tcx,
+ apply_trans_for_block,
+ pass_name,
+ ..
+ } = self;
+
+ let mut dirty_queue: WorkQueue<BasicBlock> =
+ WorkQueue::with_none(body.basic_blocks().len());
+
+ if A::Direction::IS_FORWARD {
+ for (bb, _) in traversal::reverse_postorder(body) {
+ dirty_queue.insert(bb);
+ }
+ } else {
+ // Reverse post-order on the reverse CFG may generate a better iteration order for
+ // backward dataflow analyses, but probably not enough to matter.
+ for (bb, _) in traversal::postorder(body) {
+ dirty_queue.insert(bb);
+ }
+ }
+
+ // `state` is not actually used between iterations;
+ // this is just an optimization to avoid reallocating
+ // every iteration.
+ let mut state = analysis.bottom_value(body);
+ while let Some(bb) = dirty_queue.pop() {
+ let bb_data = &body[bb];
+
+ // Set the state to the entry state of the block.
+ // This is equivalent to `state = entry_sets[bb].clone()`,
+ // but it saves an allocation, thus improving compile times.
+ state.clone_from(&entry_sets[bb]);
+
+ // Apply the block transfer function, using the cached one if it exists.
+ match &apply_trans_for_block {
+ Some(apply) => apply(bb, &mut state),
+ None => A::Direction::apply_effects_in_block(&analysis, &mut state, bb, bb_data),
+ }
+
+ A::Direction::join_state_into_successors_of(
+ &analysis,
+ tcx,
+ body,
+ dead_unwinds,
+ &mut state,
+ (bb, bb_data),
+ |target: BasicBlock, state: &A::Domain| {
+ let set_changed = entry_sets[target].join(state);
+ if set_changed {
+ dirty_queue.insert(target);
+ }
+ },
+ );
+ }
+
+ let results = Results { analysis, entry_sets };
+
+ let res = write_graphviz_results(tcx, &body, &results, pass_name);
+ if let Err(e) = res {
+ error!("Failed to write graphviz dataflow results: {}", e);
+ }
+
+ results
+ }
+}
+
+// Graphviz
+
+/// Writes a DOT file containing the results of a dataflow analysis if the user requested it via
+/// `rustc_mir` attributes.
+fn write_graphviz_results<'tcx, A>(
+ tcx: TyCtxt<'tcx>,
+ body: &mir::Body<'tcx>,
+ results: &Results<'tcx, A>,
+ pass_name: Option<&'static str>,
+) -> std::io::Result<()>
+where
+ A: Analysis<'tcx>,
+ A::Domain: DebugWithContext<A>,
+{
+ use std::fs;
+ use std::io::{self, Write};
+
+ let def_id = body.source.def_id();
+ let Ok(attrs) = RustcMirAttrs::parse(tcx, def_id) else {
+ // Invalid `rustc_mir` attrs are reported in `RustcMirAttrs::parse`
+ return Ok(());
+ };
+
+ let mut file = match attrs.output_path(A::NAME) {
+ Some(path) => {
+ debug!("printing dataflow results for {:?} to {}", def_id, path.display());
+ if let Some(parent) = path.parent() {
+ fs::create_dir_all(parent)?;
+ }
+ io::BufWriter::new(fs::File::create(&path)?)
+ }
+
+ None if tcx.sess.opts.unstable_opts.dump_mir_dataflow
+ && dump_enabled(tcx, A::NAME, def_id) =>
+ {
+ create_dump_file(
+ tcx,
+ ".dot",
+ None,
+ A::NAME,
+ &pass_name.unwrap_or("-----"),
+ body.source,
+ )?
+ }
+
+ _ => return Ok(()),
+ };
+
+ let style = match attrs.formatter {
+ Some(sym::two_phase) => graphviz::OutputStyle::BeforeAndAfter,
+ _ => graphviz::OutputStyle::AfterOnly,
+ };
+
+ let mut buf = Vec::new();
+
+ let graphviz = graphviz::Formatter::new(body, results, style);
+ let mut render_opts =
+ vec![dot::RenderOption::Fontname(tcx.sess.opts.unstable_opts.graphviz_font.clone())];
+ if tcx.sess.opts.unstable_opts.graphviz_dark_mode {
+ render_opts.push(dot::RenderOption::DarkTheme);
+ }
+ dot::render_opts(&graphviz, &mut buf, &render_opts)?;
+
+ file.write_all(&buf)?;
+
+ Ok(())
+}
+
+#[derive(Default)]
+struct RustcMirAttrs {
+ basename_and_suffix: Option<PathBuf>,
+ formatter: Option<Symbol>,
+}
+
+impl RustcMirAttrs {
+ fn parse(tcx: TyCtxt<'_>, def_id: DefId) -> Result<Self, ()> {
+ let mut result = Ok(());
+ let mut ret = RustcMirAttrs::default();
+
+ let rustc_mir_attrs = tcx
+ .get_attrs(def_id, sym::rustc_mir)
+ .flat_map(|attr| attr.meta_item_list().into_iter().flat_map(|v| v.into_iter()));
+
+ for attr in rustc_mir_attrs {
+ let attr_result = if attr.has_name(sym::borrowck_graphviz_postflow) {
+ Self::set_field(&mut ret.basename_and_suffix, tcx, &attr, |s| {
+ let path = PathBuf::from(s.to_string());
+ match path.file_name() {
+ Some(_) => Ok(path),
+ None => {
+ tcx.sess.span_err(attr.span(), "path must end in a filename");
+ Err(())
+ }
+ }
+ })
+ } else if attr.has_name(sym::borrowck_graphviz_format) {
+ Self::set_field(&mut ret.formatter, tcx, &attr, |s| match s {
+ sym::gen_kill | sym::two_phase => Ok(s),
+ _ => {
+ tcx.sess.span_err(attr.span(), "unknown formatter");
+ Err(())
+ }
+ })
+ } else {
+ Ok(())
+ };
+
+ result = result.and(attr_result);
+ }
+
+ result.map(|()| ret)
+ }
+
+ fn set_field<T>(
+ field: &mut Option<T>,
+ tcx: TyCtxt<'_>,
+ attr: &ast::NestedMetaItem,
+ mapper: impl FnOnce(Symbol) -> Result<T, ()>,
+ ) -> Result<(), ()> {
+ if field.is_some() {
+ tcx.sess
+ .span_err(attr.span(), &format!("duplicate values for `{}`", attr.name_or_empty()));
+
+ return Err(());
+ }
+
+ if let Some(s) = attr.value_str() {
+ *field = Some(mapper(s)?);
+ Ok(())
+ } else {
+ tcx.sess
+ .span_err(attr.span(), &format!("`{}` requires an argument", attr.name_or_empty()));
+ Err(())
+ }
+ }
+
+ /// Returns the path where dataflow results should be written, or `None`
+ /// `borrowck_graphviz_postflow` was not specified.
+ ///
+ /// This performs the following transformation to the argument of `borrowck_graphviz_postflow`:
+ ///
+ /// "path/suffix.dot" -> "path/analysis_name_suffix.dot"
+ fn output_path(&self, analysis_name: &str) -> Option<PathBuf> {
+ let mut ret = self.basename_and_suffix.as_ref().cloned()?;
+ let suffix = ret.file_name().unwrap(); // Checked when parsing attrs
+
+ let mut file_name: OsString = analysis_name.into();
+ file_name.push("_");
+ file_name.push(suffix);
+ ret.set_file_name(file_name);
+
+ Some(ret)
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/fmt.rs b/compiler/rustc_mir_dataflow/src/framework/fmt.rs
new file mode 100644
index 000000000..209e6f7ac
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/fmt.rs
@@ -0,0 +1,211 @@
+//! Custom formatting traits used when outputting Graphviz diagrams with the results of a dataflow
+//! analysis.
+
+use rustc_index::bit_set::{BitSet, ChunkedBitSet, HybridBitSet};
+use rustc_index::vec::Idx;
+use std::fmt;
+
+/// An extension to `fmt::Debug` for data that can be better printed with some auxiliary data `C`.
+pub trait DebugWithContext<C>: Eq + fmt::Debug {
+ fn fmt_with(&self, _ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self, f)
+ }
+
+ /// Print the difference between `self` and `old`.
+ ///
+ /// This should print nothing if `self == old`.
+ ///
+ /// `+` and `-` are typically used to indicate differences. However, these characters are
+ /// fairly common and may be needed to print a types representation. If using them to indicate
+ /// a diff, prefix them with the "Unit Separator" control character (␟ U+001F).
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self == old {
+ return Ok(());
+ }
+
+ write!(f, "\u{001f}+")?;
+ self.fmt_with(ctxt, f)?;
+
+ if f.alternate() {
+ write!(f, "\n")?;
+ } else {
+ write!(f, "\t")?;
+ }
+
+ write!(f, "\u{001f}-")?;
+ old.fmt_with(ctxt, f)
+ }
+}
+
+/// Implements `fmt::Debug` by deferring to `<T as DebugWithContext<C>>::fmt_with`.
+pub struct DebugWithAdapter<'a, T, C> {
+ pub this: T,
+ pub ctxt: &'a C,
+}
+
+impl<T, C> fmt::Debug for DebugWithAdapter<'_, T, C>
+where
+ T: DebugWithContext<C>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.this.fmt_with(self.ctxt, f)
+ }
+}
+
+/// Implements `fmt::Debug` by deferring to `<T as DebugWithContext<C>>::fmt_diff_with`.
+pub struct DebugDiffWithAdapter<'a, T, C> {
+ pub new: T,
+ pub old: T,
+ pub ctxt: &'a C,
+}
+
+impl<T, C> fmt::Debug for DebugDiffWithAdapter<'_, T, C>
+where
+ T: DebugWithContext<C>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.new.fmt_diff_with(&self.old, self.ctxt, f)
+ }
+}
+
+// Impls
+
+impl<T, C> DebugWithContext<C> for BitSet<T>
+where
+ T: Idx + DebugWithContext<C>,
+{
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_set().entries(self.iter().map(|i| DebugWithAdapter { this: i, ctxt })).finish()
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let size = self.domain_size();
+ assert_eq!(size, old.domain_size());
+
+ let mut set_in_self = HybridBitSet::new_empty(size);
+ let mut cleared_in_self = HybridBitSet::new_empty(size);
+
+ for i in (0..size).map(T::new) {
+ match (self.contains(i), old.contains(i)) {
+ (true, false) => set_in_self.insert(i),
+ (false, true) => cleared_in_self.insert(i),
+ _ => continue,
+ };
+ }
+
+ fmt_diff(&set_in_self, &cleared_in_self, ctxt, f)
+ }
+}
+
+impl<T, C> DebugWithContext<C> for ChunkedBitSet<T>
+where
+ T: Idx + DebugWithContext<C>,
+{
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_set().entries(self.iter().map(|i| DebugWithAdapter { this: i, ctxt })).finish()
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let size = self.domain_size();
+ assert_eq!(size, old.domain_size());
+
+ let mut set_in_self = HybridBitSet::new_empty(size);
+ let mut cleared_in_self = HybridBitSet::new_empty(size);
+
+ for i in (0..size).map(T::new) {
+ match (self.contains(i), old.contains(i)) {
+ (true, false) => set_in_self.insert(i),
+ (false, true) => cleared_in_self.insert(i),
+ _ => continue,
+ };
+ }
+
+ fmt_diff(&set_in_self, &cleared_in_self, ctxt, f)
+ }
+}
+
+fn fmt_diff<T, C>(
+ inserted: &HybridBitSet<T>,
+ removed: &HybridBitSet<T>,
+ ctxt: &C,
+ f: &mut fmt::Formatter<'_>,
+) -> fmt::Result
+where
+ T: Idx + DebugWithContext<C>,
+{
+ let mut first = true;
+ for idx in inserted.iter() {
+ let delim = if first {
+ "\u{001f}+"
+ } else if f.alternate() {
+ "\n\u{001f}+"
+ } else {
+ ", "
+ };
+
+ write!(f, "{}", delim)?;
+ idx.fmt_with(ctxt, f)?;
+ first = false;
+ }
+
+ if !f.alternate() {
+ first = true;
+ if !inserted.is_empty() && !removed.is_empty() {
+ write!(f, "\t")?;
+ }
+ }
+
+ for idx in removed.iter() {
+ let delim = if first {
+ "\u{001f}-"
+ } else if f.alternate() {
+ "\n\u{001f}-"
+ } else {
+ ", "
+ };
+
+ write!(f, "{}", delim)?;
+ idx.fmt_with(ctxt, f)?;
+ first = false;
+ }
+
+ Ok(())
+}
+
+impl<T, C> DebugWithContext<C> for &'_ T
+where
+ T: DebugWithContext<C>,
+{
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (*self).fmt_with(ctxt, f)
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (*self).fmt_diff_with(*old, ctxt, f)
+ }
+}
+
+impl<C> DebugWithContext<C> for rustc_middle::mir::Local {}
+impl<C> DebugWithContext<C> for crate::move_paths::InitIndex {}
+
+impl<'tcx, C> DebugWithContext<C> for crate::move_paths::MovePathIndex
+where
+ C: crate::move_paths::HasMoveData<'tcx>,
+{
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", ctxt.move_data().move_paths[*self])
+ }
+}
+
+impl<T, C> DebugWithContext<C> for crate::lattice::Dual<T>
+where
+ T: DebugWithContext<C>,
+{
+ fn fmt_with(&self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (self.0).fmt_with(ctxt, f)
+ }
+
+ fn fmt_diff_with(&self, old: &Self, ctxt: &C, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (self.0).fmt_diff_with(&old.0, ctxt, f)
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
new file mode 100644
index 000000000..c94198c56
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs
@@ -0,0 +1,667 @@
+//! A helpful diagram for debugging dataflow problems.
+
+use std::borrow::Cow;
+use std::sync::OnceLock;
+use std::{io, ops, str};
+
+use regex::Regex;
+use rustc_graphviz as dot;
+use rustc_middle::mir::graphviz_safe_def_name;
+use rustc_middle::mir::{self, BasicBlock, Body, Location};
+
+use super::fmt::{DebugDiffWithAdapter, DebugWithAdapter, DebugWithContext};
+use super::{Analysis, CallReturnPlaces, Direction, Results, ResultsRefCursor, ResultsVisitor};
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum OutputStyle {
+ AfterOnly,
+ BeforeAndAfter,
+}
+
+impl OutputStyle {
+ fn num_state_columns(&self) -> usize {
+ match self {
+ Self::AfterOnly => 1,
+ Self::BeforeAndAfter => 2,
+ }
+ }
+}
+
+pub struct Formatter<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ body: &'a Body<'tcx>,
+ results: &'a Results<'tcx, A>,
+ style: OutputStyle,
+}
+
+impl<'a, 'tcx, A> Formatter<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ pub fn new(body: &'a Body<'tcx>, results: &'a Results<'tcx, A>, style: OutputStyle) -> Self {
+ Formatter { body, results, style }
+ }
+}
+
+/// A pair of a basic block and an index into that basic blocks `successors`.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct CfgEdge {
+ source: BasicBlock,
+ index: usize,
+}
+
+fn dataflow_successors(body: &Body<'_>, bb: BasicBlock) -> Vec<CfgEdge> {
+ body[bb]
+ .terminator()
+ .successors()
+ .enumerate()
+ .map(|(index, _)| CfgEdge { source: bb, index })
+ .collect()
+}
+
+impl<'tcx, A> dot::Labeller<'_> for Formatter<'_, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+ A::Domain: DebugWithContext<A>,
+{
+ type Node = BasicBlock;
+ type Edge = CfgEdge;
+
+ fn graph_id(&self) -> dot::Id<'_> {
+ let name = graphviz_safe_def_name(self.body.source.def_id());
+ dot::Id::new(format!("graph_for_def_id_{}", name)).unwrap()
+ }
+
+ fn node_id(&self, n: &Self::Node) -> dot::Id<'_> {
+ dot::Id::new(format!("bb_{}", n.index())).unwrap()
+ }
+
+ fn node_label(&self, block: &Self::Node) -> dot::LabelText<'_> {
+ let mut label = Vec::new();
+ let mut fmt = BlockFormatter {
+ results: ResultsRefCursor::new(self.body, self.results),
+ style: self.style,
+ bg: Background::Light,
+ };
+
+ fmt.write_node_label(&mut label, self.body, *block).unwrap();
+ dot::LabelText::html(String::from_utf8(label).unwrap())
+ }
+
+ fn node_shape(&self, _n: &Self::Node) -> Option<dot::LabelText<'_>> {
+ Some(dot::LabelText::label("none"))
+ }
+
+ fn edge_label(&self, e: &Self::Edge) -> dot::LabelText<'_> {
+ let label = &self.body[e.source].terminator().kind.fmt_successor_labels()[e.index];
+ dot::LabelText::label(label.clone())
+ }
+}
+
+impl<'a, 'tcx, A> dot::GraphWalk<'a> for Formatter<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ type Node = BasicBlock;
+ type Edge = CfgEdge;
+
+ fn nodes(&self) -> dot::Nodes<'_, Self::Node> {
+ self.body.basic_blocks().indices().collect::<Vec<_>>().into()
+ }
+
+ fn edges(&self) -> dot::Edges<'_, Self::Edge> {
+ self.body
+ .basic_blocks()
+ .indices()
+ .flat_map(|bb| dataflow_successors(self.body, bb))
+ .collect::<Vec<_>>()
+ .into()
+ }
+
+ fn source(&self, edge: &Self::Edge) -> Self::Node {
+ edge.source
+ }
+
+ fn target(&self, edge: &Self::Edge) -> Self::Node {
+ self.body[edge.source].terminator().successors().nth(edge.index).unwrap()
+ }
+}
+
+struct BlockFormatter<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ results: ResultsRefCursor<'a, 'a, 'tcx, A>,
+ bg: Background,
+ style: OutputStyle,
+}
+
+impl<'a, 'tcx, A> BlockFormatter<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+ A::Domain: DebugWithContext<A>,
+{
+ const HEADER_COLOR: &'static str = "#a0a0a0";
+
+ fn toggle_background(&mut self) -> Background {
+ let bg = self.bg;
+ self.bg = !bg;
+ bg
+ }
+
+ fn write_node_label(
+ &mut self,
+ w: &mut impl io::Write,
+ body: &'a Body<'tcx>,
+ block: BasicBlock,
+ ) -> io::Result<()> {
+ // Sample output:
+ // +-+-----------------------------------------------+
+ // A | bb4 |
+ // +-+----------------------------------+------------+
+ // B | MIR | STATE |
+ // +-+----------------------------------+------------+
+ // C | | (on entry) | {_0,_2,_3} |
+ // +-+----------------------------------+------------+
+ // D |0| StorageLive(_7) | |
+ // +-+----------------------------------+------------+
+ // |1| StorageLive(_8) | |
+ // +-+----------------------------------+------------+
+ // |2| _8 = &mut _1 | +_8 |
+ // +-+----------------------------------+------------+
+ // E |T| _4 = const Foo::twiddle(move _2) | -_2 |
+ // +-+----------------------------------+------------+
+ // F | | (on unwind) | {_0,_3,_8} |
+ // +-+----------------------------------+------------+
+ // | | (on successful return) | +_4 |
+ // +-+----------------------------------+------------+
+
+ // N.B., Some attributes (`align`, `balign`) are repeated on parent elements and their
+ // children. This is because `xdot` seemed to have a hard time correctly propagating
+ // attributes. Make sure to test the output before trying to remove the redundancy.
+ // Notably, `align` was found to have no effect when applied only to <table>.
+
+ let table_fmt = concat!(
+ " border=\"1\"",
+ " cellborder=\"1\"",
+ " cellspacing=\"0\"",
+ " cellpadding=\"3\"",
+ " sides=\"rb\"",
+ );
+ write!(w, r#"<table{fmt}>"#, fmt = table_fmt)?;
+
+ // A + B: Block header
+ match self.style {
+ OutputStyle::AfterOnly => self.write_block_header_simple(w, block)?,
+ OutputStyle::BeforeAndAfter => {
+ self.write_block_header_with_state_columns(w, block, &["BEFORE", "AFTER"])?
+ }
+ }
+
+ // C: State at start of block
+ self.bg = Background::Light;
+ self.results.seek_to_block_start(block);
+ let block_start_state = self.results.get().clone();
+ self.write_row_with_full_state(w, "", "(on start)")?;
+
+ // D + E: Statement and terminator transfer functions
+ self.write_statements_and_terminator(w, body, block)?;
+
+ // F: State at end of block
+
+ let terminator = body[block].terminator();
+
+ // Write the full dataflow state immediately after the terminator if it differs from the
+ // state at block entry.
+ self.results.seek_to_block_end(block);
+ if self.results.get() != &block_start_state || A::Direction::IS_BACKWARD {
+ let after_terminator_name = match terminator.kind {
+ mir::TerminatorKind::Call { target: Some(_), .. } => "(on unwind)",
+ _ => "(on end)",
+ };
+
+ self.write_row_with_full_state(w, "", after_terminator_name)?;
+ }
+
+ // Write any changes caused by terminator-specific effects.
+ //
+ // FIXME: These should really be printed as part of each outgoing edge rather than the node
+ // for the basic block itself. That way, we could display terminator-specific effects for
+ // backward dataflow analyses as well as effects for `SwitchInt` terminators.
+ match terminator.kind {
+ mir::TerminatorKind::Call { destination, .. } => {
+ self.write_row(w, "", "(on successful return)", |this, w, fmt| {
+ let state_on_unwind = this.results.get().clone();
+ this.results.apply_custom_effect(|analysis, state| {
+ analysis.apply_call_return_effect(
+ state,
+ block,
+ CallReturnPlaces::Call(destination),
+ );
+ });
+
+ write!(
+ w,
+ r#"<td balign="left" colspan="{colspan}" {fmt} align="left">{diff}</td>"#,
+ colspan = this.style.num_state_columns(),
+ fmt = fmt,
+ diff = diff_pretty(
+ this.results.get(),
+ &state_on_unwind,
+ this.results.analysis()
+ ),
+ )
+ })?;
+ }
+
+ mir::TerminatorKind::Yield { resume, resume_arg, .. } => {
+ self.write_row(w, "", "(on yield resume)", |this, w, fmt| {
+ let state_on_generator_drop = this.results.get().clone();
+ this.results.apply_custom_effect(|analysis, state| {
+ analysis.apply_yield_resume_effect(state, resume, resume_arg);
+ });
+
+ write!(
+ w,
+ r#"<td balign="left" colspan="{colspan}" {fmt} align="left">{diff}</td>"#,
+ colspan = this.style.num_state_columns(),
+ fmt = fmt,
+ diff = diff_pretty(
+ this.results.get(),
+ &state_on_generator_drop,
+ this.results.analysis()
+ ),
+ )
+ })?;
+ }
+
+ mir::TerminatorKind::InlineAsm { destination: Some(_), ref operands, .. } => {
+ self.write_row(w, "", "(on successful return)", |this, w, fmt| {
+ let state_on_unwind = this.results.get().clone();
+ this.results.apply_custom_effect(|analysis, state| {
+ analysis.apply_call_return_effect(
+ state,
+ block,
+ CallReturnPlaces::InlineAsm(operands),
+ );
+ });
+
+ write!(
+ w,
+ r#"<td balign="left" colspan="{colspan}" {fmt} align="left">{diff}</td>"#,
+ colspan = this.style.num_state_columns(),
+ fmt = fmt,
+ diff = diff_pretty(
+ this.results.get(),
+ &state_on_unwind,
+ this.results.analysis()
+ ),
+ )
+ })?;
+ }
+
+ _ => {}
+ };
+
+ write!(w, "</table>")
+ }
+
+ fn write_block_header_simple(
+ &mut self,
+ w: &mut impl io::Write,
+ block: BasicBlock,
+ ) -> io::Result<()> {
+ // +-------------------------------------------------+
+ // A | bb4 |
+ // +-----------------------------------+-------------+
+ // B | MIR | STATE |
+ // +-+---------------------------------+-------------+
+ // | | ... | |
+
+ // A
+ write!(
+ w,
+ concat!("<tr>", r#"<td colspan="3" sides="tl">bb{block_id}</td>"#, "</tr>",),
+ block_id = block.index(),
+ )?;
+
+ // B
+ write!(
+ w,
+ concat!(
+ "<tr>",
+ r#"<td colspan="2" {fmt}>MIR</td>"#,
+ r#"<td {fmt}>STATE</td>"#,
+ "</tr>",
+ ),
+ fmt = format!("bgcolor=\"{}\" sides=\"tl\"", Self::HEADER_COLOR),
+ )
+ }
+
+ fn write_block_header_with_state_columns(
+ &mut self,
+ w: &mut impl io::Write,
+ block: BasicBlock,
+ state_column_names: &[&str],
+ ) -> io::Result<()> {
+ // +------------------------------------+-------------+
+ // A | bb4 | STATE |
+ // +------------------------------------+------+------+
+ // B | MIR | GEN | KILL |
+ // +-+----------------------------------+------+------+
+ // | | ... | | |
+
+ // A
+ write!(
+ w,
+ concat!(
+ "<tr>",
+ r#"<td {fmt} colspan="2">bb{block_id}</td>"#,
+ r#"<td {fmt} colspan="{num_state_cols}">STATE</td>"#,
+ "</tr>",
+ ),
+ fmt = "sides=\"tl\"",
+ num_state_cols = state_column_names.len(),
+ block_id = block.index(),
+ )?;
+
+ // B
+ let fmt = format!("bgcolor=\"{}\" sides=\"tl\"", Self::HEADER_COLOR);
+ write!(w, concat!("<tr>", r#"<td colspan="2" {fmt}>MIR</td>"#,), fmt = fmt,)?;
+
+ for name in state_column_names {
+ write!(w, "<td {fmt}>{name}</td>", fmt = fmt, name = name)?;
+ }
+
+ write!(w, "</tr>")
+ }
+
+ fn write_statements_and_terminator(
+ &mut self,
+ w: &mut impl io::Write,
+ body: &'a Body<'tcx>,
+ block: BasicBlock,
+ ) -> io::Result<()> {
+ let diffs = StateDiffCollector::run(body, block, self.results.results(), self.style);
+
+ let mut befores = diffs.before.map(|v| v.into_iter());
+ let mut afters = diffs.after.into_iter();
+
+ let next_in_dataflow_order = |it: &mut std::vec::IntoIter<_>| {
+ if A::Direction::IS_FORWARD { it.next().unwrap() } else { it.next_back().unwrap() }
+ };
+
+ for (i, statement) in body[block].statements.iter().enumerate() {
+ let statement_str = format!("{:?}", statement);
+ let index_str = format!("{}", i);
+
+ let after = next_in_dataflow_order(&mut afters);
+ let before = befores.as_mut().map(next_in_dataflow_order);
+
+ self.write_row(w, &index_str, &statement_str, |_this, w, fmt| {
+ if let Some(before) = before {
+ write!(w, r#"<td {fmt} align="left">{diff}</td>"#, fmt = fmt, diff = before)?;
+ }
+
+ write!(w, r#"<td {fmt} align="left">{diff}</td>"#, fmt = fmt, diff = after)
+ })?;
+ }
+
+ let after = next_in_dataflow_order(&mut afters);
+ let before = befores.as_mut().map(next_in_dataflow_order);
+
+ assert!(afters.is_empty());
+ assert!(befores.as_ref().map_or(true, ExactSizeIterator::is_empty));
+
+ let terminator = body[block].terminator();
+ let mut terminator_str = String::new();
+ terminator.kind.fmt_head(&mut terminator_str).unwrap();
+
+ self.write_row(w, "T", &terminator_str, |_this, w, fmt| {
+ if let Some(before) = before {
+ write!(w, r#"<td {fmt} align="left">{diff}</td>"#, fmt = fmt, diff = before)?;
+ }
+
+ write!(w, r#"<td {fmt} align="left">{diff}</td>"#, fmt = fmt, diff = after)
+ })
+ }
+
+ /// Write a row with the given index and MIR, using the function argument to fill in the
+ /// "STATE" column(s).
+ fn write_row<W: io::Write>(
+ &mut self,
+ w: &mut W,
+ i: &str,
+ mir: &str,
+ f: impl FnOnce(&mut Self, &mut W, &str) -> io::Result<()>,
+ ) -> io::Result<()> {
+ let bg = self.toggle_background();
+ let valign = if mir.starts_with("(on ") && mir != "(on entry)" { "bottom" } else { "top" };
+
+ let fmt = format!("valign=\"{}\" sides=\"tl\" {}", valign, bg.attr());
+
+ write!(
+ w,
+ concat!(
+ "<tr>",
+ r#"<td {fmt} align="right">{i}</td>"#,
+ r#"<td {fmt} align="left">{mir}</td>"#,
+ ),
+ i = i,
+ fmt = fmt,
+ mir = dot::escape_html(mir),
+ )?;
+
+ f(self, w, &fmt)?;
+ write!(w, "</tr>")
+ }
+
+ fn write_row_with_full_state(
+ &mut self,
+ w: &mut impl io::Write,
+ i: &str,
+ mir: &str,
+ ) -> io::Result<()> {
+ self.write_row(w, i, mir, |this, w, fmt| {
+ let state = this.results.get();
+ let analysis = this.results.analysis();
+
+ // FIXME: The full state vector can be quite long. It would be nice to split on commas
+ // and use some text wrapping algorithm.
+ write!(
+ w,
+ r#"<td colspan="{colspan}" {fmt} align="left">{state}</td>"#,
+ colspan = this.style.num_state_columns(),
+ fmt = fmt,
+ state = format!("{:?}", DebugWithAdapter { this: state, ctxt: analysis }),
+ )
+ })
+ }
+}
+
+struct StateDiffCollector<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ analysis: &'a A,
+ prev_state: A::Domain,
+ before: Option<Vec<String>>,
+ after: Vec<String>,
+}
+
+impl<'a, 'tcx, A> StateDiffCollector<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+ A::Domain: DebugWithContext<A>,
+{
+ fn run(
+ body: &'a mir::Body<'tcx>,
+ block: BasicBlock,
+ results: &'a Results<'tcx, A>,
+ style: OutputStyle,
+ ) -> Self {
+ let mut collector = StateDiffCollector {
+ analysis: &results.analysis,
+ prev_state: results.analysis.bottom_value(body),
+ after: vec![],
+ before: (style == OutputStyle::BeforeAndAfter).then_some(vec![]),
+ };
+
+ results.visit_with(body, std::iter::once(block), &mut collector);
+ collector
+ }
+}
+
+impl<'a, 'tcx, A> ResultsVisitor<'a, 'tcx> for StateDiffCollector<'a, 'tcx, A>
+where
+ A: Analysis<'tcx>,
+ A::Domain: DebugWithContext<A>,
+{
+ type FlowState = A::Domain;
+
+ fn visit_block_start(
+ &mut self,
+ state: &Self::FlowState,
+ _block_data: &mir::BasicBlockData<'tcx>,
+ _block: BasicBlock,
+ ) {
+ if A::Direction::IS_FORWARD {
+ self.prev_state.clone_from(state);
+ }
+ }
+
+ fn visit_block_end(
+ &mut self,
+ state: &Self::FlowState,
+ _block_data: &mir::BasicBlockData<'tcx>,
+ _block: BasicBlock,
+ ) {
+ if A::Direction::IS_BACKWARD {
+ self.prev_state.clone_from(state);
+ }
+ }
+
+ fn visit_statement_before_primary_effect(
+ &mut self,
+ state: &Self::FlowState,
+ _statement: &mir::Statement<'tcx>,
+ _location: Location,
+ ) {
+ if let Some(before) = self.before.as_mut() {
+ before.push(diff_pretty(state, &self.prev_state, self.analysis));
+ self.prev_state.clone_from(state)
+ }
+ }
+
+ fn visit_statement_after_primary_effect(
+ &mut self,
+ state: &Self::FlowState,
+ _statement: &mir::Statement<'tcx>,
+ _location: Location,
+ ) {
+ self.after.push(diff_pretty(state, &self.prev_state, self.analysis));
+ self.prev_state.clone_from(state)
+ }
+
+ fn visit_terminator_before_primary_effect(
+ &mut self,
+ state: &Self::FlowState,
+ _terminator: &mir::Terminator<'tcx>,
+ _location: Location,
+ ) {
+ if let Some(before) = self.before.as_mut() {
+ before.push(diff_pretty(state, &self.prev_state, self.analysis));
+ self.prev_state.clone_from(state)
+ }
+ }
+
+ fn visit_terminator_after_primary_effect(
+ &mut self,
+ state: &Self::FlowState,
+ _terminator: &mir::Terminator<'tcx>,
+ _location: Location,
+ ) {
+ self.after.push(diff_pretty(state, &self.prev_state, self.analysis));
+ self.prev_state.clone_from(state)
+ }
+}
+
+macro_rules! regex {
+ ($re:literal $(,)?) => {{
+ static RE: OnceLock<regex::Regex> = OnceLock::new();
+ RE.get_or_init(|| Regex::new($re).unwrap())
+ }};
+}
+
+fn diff_pretty<T, C>(new: T, old: T, ctxt: &C) -> String
+where
+ T: DebugWithContext<C>,
+{
+ if new == old {
+ return String::new();
+ }
+
+ let re = regex!("\t?\u{001f}([+-])");
+
+ let raw_diff = format!("{:#?}", DebugDiffWithAdapter { new, old, ctxt });
+
+ // Replace newlines in the `Debug` output with `<br/>`
+ let raw_diff = raw_diff.replace('\n', r#"<br align="left"/>"#);
+
+ let mut inside_font_tag = false;
+ let html_diff = re.replace_all(&raw_diff, |captures: &regex::Captures<'_>| {
+ let mut ret = String::new();
+ if inside_font_tag {
+ ret.push_str(r#"</font>"#);
+ }
+
+ let tag = match &captures[1] {
+ "+" => r#"<font color="darkgreen">+"#,
+ "-" => r#"<font color="red">-"#,
+ _ => unreachable!(),
+ };
+
+ inside_font_tag = true;
+ ret.push_str(tag);
+ ret
+ });
+
+ let Cow::Owned(mut html_diff) = html_diff else {
+ return raw_diff;
+ };
+
+ if inside_font_tag {
+ html_diff.push_str("</font>");
+ }
+
+ html_diff
+}
+
+/// The background color used for zebra-striping the table.
+#[derive(Clone, Copy)]
+enum Background {
+ Light,
+ Dark,
+}
+
+impl Background {
+ fn attr(self) -> &'static str {
+ match self {
+ Self::Dark => "bgcolor=\"#f0f0f0\"",
+ Self::Light => "",
+ }
+ }
+}
+
+impl ops::Not for Background {
+ type Output = Self;
+
+ fn not(self) -> Self {
+ match self {
+ Self::Light => Self::Dark,
+ Self::Dark => Self::Light,
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/lattice.rs b/compiler/rustc_mir_dataflow/src/framework/lattice.rs
new file mode 100644
index 000000000..d6b89eb82
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/lattice.rs
@@ -0,0 +1,252 @@
+//! Traits used to represent [lattices] for use as the domain of a dataflow analysis.
+//!
+//! # Overview
+//!
+//! The most common lattice is a powerset of some set `S`, ordered by [set inclusion]. The [Hasse
+//! diagram] for the powerset of a set with two elements (`X` and `Y`) is shown below. Note that
+//! distinct elements at the same height in a Hasse diagram (e.g. `{X}` and `{Y}`) are
+//! *incomparable*, not equal.
+//!
+//! ```text
+//! {X, Y} <- top
+//! / \
+//! {X} {Y}
+//! \ /
+//! {} <- bottom
+//!
+//! ```
+//!
+//! The defining characteristic of a lattice—the one that differentiates it from a [partially
+//! ordered set][poset]—is the existence of a *unique* least upper and greatest lower bound for
+//! every pair of elements. The lattice join operator (`∨`) returns the least upper bound, and the
+//! lattice meet operator (`∧`) returns the greatest lower bound. Types that implement one operator
+//! but not the other are known as semilattices. Dataflow analysis only uses the join operator and
+//! will work with any join-semilattice, but both should be specified when possible.
+//!
+//! ## `PartialOrd`
+//!
+//! Given that they represent partially ordered sets, you may be surprised that [`JoinSemiLattice`]
+//! and [`MeetSemiLattice`] do not have [`PartialOrd`][std::cmp::PartialOrd] as a supertrait. This
+//! is because most standard library types use lexicographic ordering instead of set inclusion for
+//! their `PartialOrd` impl. Since we do not actually need to compare lattice elements to run a
+//! dataflow analysis, there's no need for a newtype wrapper with a custom `PartialOrd` impl. The
+//! only benefit would be the ability to check that the least upper (or greatest lower) bound
+//! returned by the lattice join (or meet) operator was in fact greater (or lower) than the inputs.
+//!
+//! [lattices]: https://en.wikipedia.org/wiki/Lattice_(order)
+//! [set inclusion]: https://en.wikipedia.org/wiki/Subset
+//! [Hasse diagram]: https://en.wikipedia.org/wiki/Hasse_diagram
+//! [poset]: https://en.wikipedia.org/wiki/Partially_ordered_set
+
+use crate::framework::BitSetExt;
+use rustc_index::bit_set::{BitSet, ChunkedBitSet, HybridBitSet};
+use rustc_index::vec::{Idx, IndexVec};
+use std::iter;
+
+/// A [partially ordered set][poset] that has a [least upper bound][lub] for any pair of elements
+/// in the set.
+///
+/// [lub]: https://en.wikipedia.org/wiki/Infimum_and_supremum
+/// [poset]: https://en.wikipedia.org/wiki/Partially_ordered_set
+pub trait JoinSemiLattice: Eq {
+ /// Computes the least upper bound of two elements, storing the result in `self` and returning
+ /// `true` if `self` has changed.
+ ///
+ /// The lattice join operator is abbreviated as `∨`.
+ fn join(&mut self, other: &Self) -> bool;
+}
+
+/// A [partially ordered set][poset] that has a [greatest lower bound][glb] for any pair of
+/// elements in the set.
+///
+/// Dataflow analyses only require that their domains implement [`JoinSemiLattice`], not
+/// `MeetSemiLattice`. However, types that will be used as dataflow domains should implement both
+/// so that they can be used with [`Dual`].
+///
+/// [glb]: https://en.wikipedia.org/wiki/Infimum_and_supremum
+/// [poset]: https://en.wikipedia.org/wiki/Partially_ordered_set
+pub trait MeetSemiLattice: Eq {
+ /// Computes the greatest lower bound of two elements, storing the result in `self` and
+ /// returning `true` if `self` has changed.
+ ///
+ /// The lattice meet operator is abbreviated as `∧`.
+ fn meet(&mut self, other: &Self) -> bool;
+}
+
+/// A `bool` is a "two-point" lattice with `true` as the top element and `false` as the bottom:
+///
+/// ```text
+/// true
+/// |
+/// false
+/// ```
+impl JoinSemiLattice for bool {
+ fn join(&mut self, other: &Self) -> bool {
+ if let (false, true) = (*self, *other) {
+ *self = true;
+ return true;
+ }
+
+ false
+ }
+}
+
+impl MeetSemiLattice for bool {
+ fn meet(&mut self, other: &Self) -> bool {
+ if let (true, false) = (*self, *other) {
+ *self = false;
+ return true;
+ }
+
+ false
+ }
+}
+
+/// A tuple (or list) of lattices is itself a lattice whose least upper bound is the concatenation
+/// of the least upper bounds of each element of the tuple (or list).
+///
+/// In other words:
+/// (A₀, A₁, ..., Aₙ) ∨ (B₀, B₁, ..., Bₙ) = (A₀∨B₀, A₁∨B₁, ..., Aₙ∨Bₙ)
+impl<I: Idx, T: JoinSemiLattice> JoinSemiLattice for IndexVec<I, T> {
+ fn join(&mut self, other: &Self) -> bool {
+ assert_eq!(self.len(), other.len());
+
+ let mut changed = false;
+ for (a, b) in iter::zip(self, other) {
+ changed |= a.join(b);
+ }
+ changed
+ }
+}
+
+impl<I: Idx, T: MeetSemiLattice> MeetSemiLattice for IndexVec<I, T> {
+ fn meet(&mut self, other: &Self) -> bool {
+ assert_eq!(self.len(), other.len());
+
+ let mut changed = false;
+ for (a, b) in iter::zip(self, other) {
+ changed |= a.meet(b);
+ }
+ changed
+ }
+}
+
+/// A `BitSet` represents the lattice formed by the powerset of all possible values of
+/// the index type `T` ordered by inclusion. Equivalently, it is a tuple of "two-point" lattices,
+/// one for each possible value of `T`.
+impl<T: Idx> JoinSemiLattice for BitSet<T> {
+ fn join(&mut self, other: &Self) -> bool {
+ self.union(other)
+ }
+}
+
+impl<T: Idx> MeetSemiLattice for BitSet<T> {
+ fn meet(&mut self, other: &Self) -> bool {
+ self.intersect(other)
+ }
+}
+
+impl<T: Idx> JoinSemiLattice for ChunkedBitSet<T> {
+ fn join(&mut self, other: &Self) -> bool {
+ self.union(other)
+ }
+}
+
+impl<T: Idx> MeetSemiLattice for ChunkedBitSet<T> {
+ fn meet(&mut self, other: &Self) -> bool {
+ self.intersect(other)
+ }
+}
+
+/// The counterpart of a given semilattice `T` using the [inverse order].
+///
+/// The dual of a join-semilattice is a meet-semilattice and vice versa. For example, the dual of a
+/// powerset has the empty set as its top element and the full set as its bottom element and uses
+/// set *intersection* as its join operator.
+///
+/// [inverse order]: https://en.wikipedia.org/wiki/Duality_(order_theory)
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct Dual<T>(pub T);
+
+impl<T: Idx> BitSetExt<T> for Dual<BitSet<T>> {
+ fn domain_size(&self) -> usize {
+ self.0.domain_size()
+ }
+
+ fn contains(&self, elem: T) -> bool {
+ self.0.contains(elem)
+ }
+
+ fn union(&mut self, other: &HybridBitSet<T>) {
+ self.0.union(other);
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) {
+ self.0.subtract(other);
+ }
+}
+
+impl<T: MeetSemiLattice> JoinSemiLattice for Dual<T> {
+ fn join(&mut self, other: &Self) -> bool {
+ self.0.meet(&other.0)
+ }
+}
+
+impl<T: JoinSemiLattice> MeetSemiLattice for Dual<T> {
+ fn meet(&mut self, other: &Self) -> bool {
+ self.0.join(&other.0)
+ }
+}
+
+/// Extends a type `T` with top and bottom elements to make it a partially ordered set in which no
+/// value of `T` is comparable with any other.
+///
+/// A flat set has the following [Hasse diagram]:
+///
+/// ```text
+/// top
+/// / ... / / \ \ ... \
+/// all possible values of `T`
+/// \ ... \ \ / / ... /
+/// bottom
+/// ```
+///
+/// [Hasse diagram]: https://en.wikipedia.org/wiki/Hasse_diagram
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum FlatSet<T> {
+ Bottom,
+ Elem(T),
+ Top,
+}
+
+impl<T: Clone + Eq> JoinSemiLattice for FlatSet<T> {
+ fn join(&mut self, other: &Self) -> bool {
+ let result = match (&*self, other) {
+ (Self::Top, _) | (_, Self::Bottom) => return false,
+ (Self::Elem(a), Self::Elem(b)) if a == b => return false,
+
+ (Self::Bottom, Self::Elem(x)) => Self::Elem(x.clone()),
+
+ _ => Self::Top,
+ };
+
+ *self = result;
+ true
+ }
+}
+
+impl<T: Clone + Eq> MeetSemiLattice for FlatSet<T> {
+ fn meet(&mut self, other: &Self) -> bool {
+ let result = match (&*self, other) {
+ (Self::Bottom, _) | (_, Self::Top) => return false,
+ (Self::Elem(ref a), Self::Elem(ref b)) if a == b => return false,
+
+ (Self::Top, Self::Elem(ref x)) => Self::Elem(x.clone()),
+
+ _ => Self::Bottom,
+ };
+
+ *self = result;
+ true
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/mod.rs b/compiler/rustc_mir_dataflow/src/framework/mod.rs
new file mode 100644
index 000000000..f9fd6c9c5
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/mod.rs
@@ -0,0 +1,624 @@
+//! A framework that can express both [gen-kill] and generic dataflow problems.
+//!
+//! To use this framework, implement either the [`Analysis`] or the
+//! [`GenKillAnalysis`] trait. If your transfer function can be expressed with only gen/kill
+//! operations, prefer `GenKillAnalysis` since it will run faster while iterating to fixpoint. The
+//! `impls` module contains several examples of gen/kill dataflow analyses.
+//!
+//! Create an `Engine` for your analysis using the `into_engine` method on the `Analysis` trait,
+//! then call `iterate_to_fixpoint`. From there, you can use a `ResultsCursor` to inspect the
+//! fixpoint solution to your dataflow problem, or implement the `ResultsVisitor` interface and use
+//! `visit_results`. The following example uses the `ResultsCursor` approach.
+//!
+//! ```ignore (cross-crate-imports)
+//! use rustc_const_eval::dataflow::Analysis; // Makes `into_engine` available.
+//!
+//! fn do_my_analysis(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
+//! let analysis = MyAnalysis::new()
+//! .into_engine(tcx, body)
+//! .iterate_to_fixpoint()
+//! .into_results_cursor(body);
+//!
+//! // Print the dataflow state *after* each statement in the start block.
+//! for (_, statement_index) in body.block_data[START_BLOCK].statements.iter_enumerated() {
+//! cursor.seek_after(Location { block: START_BLOCK, statement_index });
+//! let state = cursor.get();
+//! println!("{:?}", state);
+//! }
+//! }
+//! ```
+//!
+//! [gen-kill]: https://en.wikipedia.org/wiki/Data-flow_analysis#Bit_vector_problems
+
+use std::cmp::Ordering;
+
+use rustc_index::bit_set::{BitSet, ChunkedBitSet, HybridBitSet};
+use rustc_index::vec::Idx;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty::TyCtxt;
+
+mod cursor;
+mod direction;
+mod engine;
+pub mod fmt;
+pub mod graphviz;
+pub mod lattice;
+mod visitor;
+
+pub use self::cursor::{ResultsCursor, ResultsRefCursor};
+pub use self::direction::{Backward, Direction, Forward};
+pub use self::engine::{Engine, Results};
+pub use self::lattice::{JoinSemiLattice, MeetSemiLattice};
+pub use self::visitor::{visit_results, ResultsVisitable, ResultsVisitor};
+
+/// Analysis domains are all bitsets of various kinds. This trait holds
+/// operations needed by all of them.
+pub trait BitSetExt<T> {
+ fn domain_size(&self) -> usize;
+ fn contains(&self, elem: T) -> bool;
+ fn union(&mut self, other: &HybridBitSet<T>);
+ fn subtract(&mut self, other: &HybridBitSet<T>);
+}
+
+impl<T: Idx> BitSetExt<T> for BitSet<T> {
+ fn domain_size(&self) -> usize {
+ self.domain_size()
+ }
+
+ fn contains(&self, elem: T) -> bool {
+ self.contains(elem)
+ }
+
+ fn union(&mut self, other: &HybridBitSet<T>) {
+ self.union(other);
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) {
+ self.subtract(other);
+ }
+}
+
+impl<T: Idx> BitSetExt<T> for ChunkedBitSet<T> {
+ fn domain_size(&self) -> usize {
+ self.domain_size()
+ }
+
+ fn contains(&self, elem: T) -> bool {
+ self.contains(elem)
+ }
+
+ fn union(&mut self, other: &HybridBitSet<T>) {
+ self.union(other);
+ }
+
+ fn subtract(&mut self, other: &HybridBitSet<T>) {
+ self.subtract(other);
+ }
+}
+
+/// Defines the domain of a dataflow problem.
+///
+/// This trait specifies the lattice on which this analysis operates (the domain) as well as its
+/// initial value at the entry point of each basic block.
+pub trait AnalysisDomain<'tcx> {
+ /// The type that holds the dataflow state at any given point in the program.
+ type Domain: Clone + JoinSemiLattice;
+
+ /// The direction of this analysis. Either `Forward` or `Backward`.
+ type Direction: Direction = Forward;
+
+ /// A descriptive name for this analysis. Used only for debugging.
+ ///
+ /// This name should be brief and contain no spaces, periods or other characters that are not
+ /// suitable as part of a filename.
+ const NAME: &'static str;
+
+ /// Returns the initial value of the dataflow state upon entry to each basic block.
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain;
+
+ /// Mutates the initial value of the dataflow state upon entry to the `START_BLOCK`.
+ ///
+ /// For backward analyses, initial state (besides the bottom value) is not yet supported. Trying
+ /// to mutate the initial state will result in a panic.
+ //
+ // FIXME: For backward dataflow analyses, the initial state should be applied to every basic
+ // block where control flow could exit the MIR body (e.g., those terminated with `return` or
+ // `resume`). It's not obvious how to handle `yield` points in generators, however.
+ fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut Self::Domain);
+}
+
+/// A dataflow problem with an arbitrarily complex transfer function.
+///
+/// # Convergence
+///
+/// When implementing this trait directly (not via [`GenKillAnalysis`]), it's possible to choose a
+/// transfer function such that the analysis does not reach fixpoint. To guarantee convergence,
+/// your transfer functions must maintain the following invariant:
+///
+/// > If the dataflow state **before** some point in the program changes to be greater
+/// than the prior state **before** that point, the dataflow state **after** that point must
+/// also change to be greater than the prior state **after** that point.
+///
+/// This invariant guarantees that the dataflow state at a given point in the program increases
+/// monotonically until fixpoint is reached. Note that this monotonicity requirement only applies
+/// to the same point in the program at different points in time. The dataflow state at a given
+/// point in the program may or may not be greater than the state at any preceding point.
+pub trait Analysis<'tcx>: AnalysisDomain<'tcx> {
+ /// Updates the current dataflow state with the effect of evaluating a statement.
+ fn apply_statement_effect(
+ &self,
+ state: &mut Self::Domain,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ );
+
+ /// Updates the current dataflow state with an effect that occurs immediately *before* the
+ /// given statement.
+ ///
+ /// This method is useful if the consumer of the results of this analysis only needs to observe
+ /// *part* of the effect of a statement (e.g. for two-phase borrows). As a general rule,
+ /// analyses should not implement this without also implementing `apply_statement_effect`.
+ fn apply_before_statement_effect(
+ &self,
+ _state: &mut Self::Domain,
+ _statement: &mir::Statement<'tcx>,
+ _location: Location,
+ ) {
+ }
+
+ /// Updates the current dataflow state with the effect of evaluating a terminator.
+ ///
+ /// The effect of a successful return from a `Call` terminator should **not** be accounted for
+ /// in this function. That should go in `apply_call_return_effect`. For example, in the
+ /// `InitializedPlaces` analyses, the return place for a function call is not marked as
+ /// initialized here.
+ fn apply_terminator_effect(
+ &self,
+ state: &mut Self::Domain,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ );
+
+ /// Updates the current dataflow state with an effect that occurs immediately *before* the
+ /// given terminator.
+ ///
+ /// This method is useful if the consumer of the results of this analysis needs only to observe
+ /// *part* of the effect of a terminator (e.g. for two-phase borrows). As a general rule,
+ /// analyses should not implement this without also implementing `apply_terminator_effect`.
+ fn apply_before_terminator_effect(
+ &self,
+ _state: &mut Self::Domain,
+ _terminator: &mir::Terminator<'tcx>,
+ _location: Location,
+ ) {
+ }
+
+ /* Edge-specific effects */
+
+ /// Updates the current dataflow state with the effect of a successful return from a `Call`
+ /// terminator.
+ ///
+ /// This is separate from `apply_terminator_effect` to properly track state across unwind
+ /// edges.
+ fn apply_call_return_effect(
+ &self,
+ state: &mut Self::Domain,
+ block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ );
+
+ /// Updates the current dataflow state with the effect of resuming from a `Yield` terminator.
+ ///
+ /// This is similar to `apply_call_return_effect` in that it only takes place after the
+ /// generator is resumed, not when it is dropped.
+ ///
+ /// By default, no effects happen.
+ fn apply_yield_resume_effect(
+ &self,
+ _state: &mut Self::Domain,
+ _resume_block: BasicBlock,
+ _resume_place: mir::Place<'tcx>,
+ ) {
+ }
+
+ /// Updates the current dataflow state with the effect of taking a particular branch in a
+ /// `SwitchInt` terminator.
+ ///
+ /// Unlike the other edge-specific effects, which are allowed to mutate `Self::Domain`
+ /// directly, overriders of this method must pass a callback to
+ /// `SwitchIntEdgeEffects::apply`. The callback will be run once for each outgoing edge and
+ /// will have access to the dataflow state that will be propagated along that edge.
+ ///
+ /// This interface is somewhat more complex than the other visitor-like "effect" methods.
+ /// However, it is both more ergonomic—callers don't need to recompute or cache information
+ /// about a given `SwitchInt` terminator for each one of its edges—and more efficient—the
+ /// engine doesn't need to clone the exit state for a block unless
+ /// `SwitchIntEdgeEffects::apply` is actually called.
+ fn apply_switch_int_edge_effects(
+ &self,
+ _block: BasicBlock,
+ _discr: &mir::Operand<'tcx>,
+ _apply_edge_effects: &mut impl SwitchIntEdgeEffects<Self::Domain>,
+ ) {
+ }
+
+ /* Extension methods */
+
+ /// Creates an `Engine` to find the fixpoint for this dataflow problem.
+ ///
+ /// You shouldn't need to override this outside this module, since the combination of the
+ /// default impl and the one for all `A: GenKillAnalysis` will do the right thing.
+ /// Its purpose is to enable method chaining like so:
+ ///
+ /// ```ignore (cross-crate-imports)
+ /// let results = MyAnalysis::new(tcx, body)
+ /// .into_engine(tcx, body, def_id)
+ /// .iterate_to_fixpoint()
+ /// .into_results_cursor(body);
+ /// ```
+ fn into_engine<'mir>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ ) -> Engine<'mir, 'tcx, Self>
+ where
+ Self: Sized,
+ {
+ Engine::new_generic(tcx, body, self)
+ }
+}
+
+/// A gen/kill dataflow problem.
+///
+/// Each method in this trait has a corresponding one in `Analysis`. However, these methods only
+/// allow modification of the dataflow state via "gen" and "kill" operations. By defining transfer
+/// functions for each statement in this way, the transfer function for an entire basic block can
+/// be computed efficiently.
+///
+/// `Analysis` is automatically implemented for all implementers of `GenKillAnalysis`.
+pub trait GenKillAnalysis<'tcx>: Analysis<'tcx> {
+ type Idx: Idx;
+
+ /// See `Analysis::apply_statement_effect`.
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ );
+
+ /// See `Analysis::apply_before_statement_effect`.
+ fn before_statement_effect(
+ &self,
+ _trans: &mut impl GenKill<Self::Idx>,
+ _statement: &mir::Statement<'tcx>,
+ _location: Location,
+ ) {
+ }
+
+ /// See `Analysis::apply_terminator_effect`.
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ );
+
+ /// See `Analysis::apply_before_terminator_effect`.
+ fn before_terminator_effect(
+ &self,
+ _trans: &mut impl GenKill<Self::Idx>,
+ _terminator: &mir::Terminator<'tcx>,
+ _location: Location,
+ ) {
+ }
+
+ /* Edge-specific effects */
+
+ /// See `Analysis::apply_call_return_effect`.
+ fn call_return_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ );
+
+ /// See `Analysis::apply_yield_resume_effect`.
+ fn yield_resume_effect(
+ &self,
+ _trans: &mut impl GenKill<Self::Idx>,
+ _resume_block: BasicBlock,
+ _resume_place: mir::Place<'tcx>,
+ ) {
+ }
+
+ /// See `Analysis::apply_switch_int_edge_effects`.
+ fn switch_int_edge_effects<G: GenKill<Self::Idx>>(
+ &self,
+ _block: BasicBlock,
+ _discr: &mir::Operand<'tcx>,
+ _edge_effects: &mut impl SwitchIntEdgeEffects<G>,
+ ) {
+ }
+}
+
+impl<'tcx, A> Analysis<'tcx> for A
+where
+ A: GenKillAnalysis<'tcx>,
+ A::Domain: GenKill<A::Idx> + BitSetExt<A::Idx>,
+{
+ fn apply_statement_effect(
+ &self,
+ state: &mut A::Domain,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ self.statement_effect(state, statement, location);
+ }
+
+ fn apply_before_statement_effect(
+ &self,
+ state: &mut A::Domain,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ self.before_statement_effect(state, statement, location);
+ }
+
+ fn apply_terminator_effect(
+ &self,
+ state: &mut A::Domain,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.terminator_effect(state, terminator, location);
+ }
+
+ fn apply_before_terminator_effect(
+ &self,
+ state: &mut A::Domain,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.before_terminator_effect(state, terminator, location);
+ }
+
+ /* Edge-specific effects */
+
+ fn apply_call_return_effect(
+ &self,
+ state: &mut A::Domain,
+ block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ self.call_return_effect(state, block, return_places);
+ }
+
+ fn apply_yield_resume_effect(
+ &self,
+ state: &mut A::Domain,
+ resume_block: BasicBlock,
+ resume_place: mir::Place<'tcx>,
+ ) {
+ self.yield_resume_effect(state, resume_block, resume_place);
+ }
+
+ fn apply_switch_int_edge_effects(
+ &self,
+ block: BasicBlock,
+ discr: &mir::Operand<'tcx>,
+ edge_effects: &mut impl SwitchIntEdgeEffects<A::Domain>,
+ ) {
+ self.switch_int_edge_effects(block, discr, edge_effects);
+ }
+
+ /* Extension methods */
+
+ fn into_engine<'mir>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ ) -> Engine<'mir, 'tcx, Self>
+ where
+ Self: Sized,
+ {
+ Engine::new_gen_kill(tcx, body, self)
+ }
+}
+
+/// The legal operations for a transfer function in a gen/kill problem.
+///
+/// This abstraction exists because there are two different contexts in which we call the methods in
+/// `GenKillAnalysis`. Sometimes we need to store a single transfer function that can be efficiently
+/// applied multiple times, such as when computing the cumulative transfer function for each block.
+/// These cases require a `GenKillSet`, which in turn requires two `BitSet`s of storage. Oftentimes,
+/// however, we only need to apply an effect once. In *these* cases, it is more efficient to pass the
+/// `BitSet` representing the state vector directly into the `*_effect` methods as opposed to
+/// building up a `GenKillSet` and then throwing it away.
+pub trait GenKill<T> {
+ /// Inserts `elem` into the state vector.
+ fn gen(&mut self, elem: T);
+
+ /// Removes `elem` from the state vector.
+ fn kill(&mut self, elem: T);
+
+ /// Calls `gen` for each element in `elems`.
+ fn gen_all(&mut self, elems: impl IntoIterator<Item = T>) {
+ for elem in elems {
+ self.gen(elem);
+ }
+ }
+
+ /// Calls `kill` for each element in `elems`.
+ fn kill_all(&mut self, elems: impl IntoIterator<Item = T>) {
+ for elem in elems {
+ self.kill(elem);
+ }
+ }
+}
+
+/// Stores a transfer function for a gen/kill problem.
+///
+/// Calling `gen`/`kill` on a `GenKillSet` will "build up" a transfer function so that it can be
+/// applied multiple times efficiently. When there are multiple calls to `gen` and/or `kill` for
+/// the same element, the most recent one takes precedence.
+#[derive(Clone)]
+pub struct GenKillSet<T> {
+ gen: HybridBitSet<T>,
+ kill: HybridBitSet<T>,
+}
+
+impl<T: Idx> GenKillSet<T> {
+ /// Creates a new transfer function that will leave the dataflow state unchanged.
+ pub fn identity(universe: usize) -> Self {
+ GenKillSet {
+ gen: HybridBitSet::new_empty(universe),
+ kill: HybridBitSet::new_empty(universe),
+ }
+ }
+
+ pub fn apply(&self, state: &mut impl BitSetExt<T>) {
+ state.union(&self.gen);
+ state.subtract(&self.kill);
+ }
+}
+
+impl<T: Idx> GenKill<T> for GenKillSet<T> {
+ fn gen(&mut self, elem: T) {
+ self.gen.insert(elem);
+ self.kill.remove(elem);
+ }
+
+ fn kill(&mut self, elem: T) {
+ self.kill.insert(elem);
+ self.gen.remove(elem);
+ }
+}
+
+impl<T: Idx> GenKill<T> for BitSet<T> {
+ fn gen(&mut self, elem: T) {
+ self.insert(elem);
+ }
+
+ fn kill(&mut self, elem: T) {
+ self.remove(elem);
+ }
+}
+
+impl<T: Idx> GenKill<T> for ChunkedBitSet<T> {
+ fn gen(&mut self, elem: T) {
+ self.insert(elem);
+ }
+
+ fn kill(&mut self, elem: T) {
+ self.remove(elem);
+ }
+}
+
+impl<T: Idx> GenKill<T> for lattice::Dual<BitSet<T>> {
+ fn gen(&mut self, elem: T) {
+ self.0.insert(elem);
+ }
+
+ fn kill(&mut self, elem: T) {
+ self.0.remove(elem);
+ }
+}
+
+// NOTE: DO NOT CHANGE VARIANT ORDER. The derived `Ord` impls rely on the current order.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Effect {
+ /// The "before" effect (e.g., `apply_before_statement_effect`) for a statement (or
+ /// terminator).
+ Before,
+
+ /// The "primary" effect (e.g., `apply_statement_effect`) for a statement (or terminator).
+ Primary,
+}
+
+impl Effect {
+ pub const fn at_index(self, statement_index: usize) -> EffectIndex {
+ EffectIndex { effect: self, statement_index }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub struct EffectIndex {
+ statement_index: usize,
+ effect: Effect,
+}
+
+impl EffectIndex {
+ fn next_in_forward_order(self) -> Self {
+ match self.effect {
+ Effect::Before => Effect::Primary.at_index(self.statement_index),
+ Effect::Primary => Effect::Before.at_index(self.statement_index + 1),
+ }
+ }
+
+ fn next_in_backward_order(self) -> Self {
+ match self.effect {
+ Effect::Before => Effect::Primary.at_index(self.statement_index),
+ Effect::Primary => Effect::Before.at_index(self.statement_index - 1),
+ }
+ }
+
+ /// Returns `true` if the effect at `self` should be applied earlier than the effect at `other`
+ /// in forward order.
+ fn precedes_in_forward_order(self, other: Self) -> bool {
+ let ord = self
+ .statement_index
+ .cmp(&other.statement_index)
+ .then_with(|| self.effect.cmp(&other.effect));
+ ord == Ordering::Less
+ }
+
+ /// Returns `true` if the effect at `self` should be applied earlier than the effect at `other`
+ /// in backward order.
+ fn precedes_in_backward_order(self, other: Self) -> bool {
+ let ord = other
+ .statement_index
+ .cmp(&self.statement_index)
+ .then_with(|| self.effect.cmp(&other.effect));
+ ord == Ordering::Less
+ }
+}
+
+pub struct SwitchIntTarget {
+ pub value: Option<u128>,
+ pub target: BasicBlock,
+}
+
+/// A type that records the edge-specific effects for a `SwitchInt` terminator.
+pub trait SwitchIntEdgeEffects<D> {
+ /// Calls `apply_edge_effect` for each outgoing edge from a `SwitchInt` terminator and
+ /// records the results.
+ fn apply(&mut self, apply_edge_effect: impl FnMut(&mut D, SwitchIntTarget));
+}
+
+/// List of places that are written to after a successful (non-unwind) return
+/// from a `Call` or `InlineAsm`.
+pub enum CallReturnPlaces<'a, 'tcx> {
+ Call(mir::Place<'tcx>),
+ InlineAsm(&'a [mir::InlineAsmOperand<'tcx>]),
+}
+
+impl<'tcx> CallReturnPlaces<'_, 'tcx> {
+ pub fn for_each(&self, mut f: impl FnMut(mir::Place<'tcx>)) {
+ match *self {
+ Self::Call(place) => f(place),
+ Self::InlineAsm(operands) => {
+ for op in operands {
+ match *op {
+ mir::InlineAsmOperand::Out { place: Some(place), .. }
+ | mir::InlineAsmOperand::InOut { out_place: Some(place), .. } => f(place),
+ _ => {}
+ }
+ }
+ }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_mir_dataflow/src/framework/tests.rs b/compiler/rustc_mir_dataflow/src/framework/tests.rs
new file mode 100644
index 000000000..d9461fd3a
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/tests.rs
@@ -0,0 +1,322 @@
+//! A test for the logic that updates the state in a `ResultsCursor` during seek.
+
+use std::marker::PhantomData;
+
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::{self, BasicBlock, Location};
+use rustc_middle::ty;
+use rustc_span::DUMMY_SP;
+
+use super::*;
+
+/// Creates a `mir::Body` with a few disconnected basic blocks.
+///
+/// This is the `Body` that will be used by the `MockAnalysis` below. The shape of its CFG is not
+/// important.
+fn mock_body<'tcx>() -> mir::Body<'tcx> {
+ let source_info = mir::SourceInfo::outermost(DUMMY_SP);
+
+ let mut blocks = IndexVec::new();
+ let mut block = |n, kind| {
+ let nop = mir::Statement { source_info, kind: mir::StatementKind::Nop };
+
+ blocks.push(mir::BasicBlockData {
+ statements: std::iter::repeat(&nop).cloned().take(n).collect(),
+ terminator: Some(mir::Terminator { source_info, kind }),
+ is_cleanup: false,
+ })
+ };
+
+ let dummy_place = mir::Place { local: mir::RETURN_PLACE, projection: ty::List::empty() };
+
+ block(4, mir::TerminatorKind::Return);
+ block(1, mir::TerminatorKind::Return);
+ block(
+ 2,
+ mir::TerminatorKind::Call {
+ func: mir::Operand::Copy(dummy_place.clone()),
+ args: vec![],
+ destination: dummy_place.clone(),
+ target: Some(mir::START_BLOCK),
+ cleanup: None,
+ from_hir_call: false,
+ fn_span: DUMMY_SP,
+ },
+ );
+ block(3, mir::TerminatorKind::Return);
+ block(0, mir::TerminatorKind::Return);
+ block(
+ 4,
+ mir::TerminatorKind::Call {
+ func: mir::Operand::Copy(dummy_place.clone()),
+ args: vec![],
+ destination: dummy_place.clone(),
+ target: Some(mir::START_BLOCK),
+ cleanup: None,
+ from_hir_call: false,
+ fn_span: DUMMY_SP,
+ },
+ );
+
+ mir::Body::new_cfg_only(blocks)
+}
+
+/// A dataflow analysis whose state is unique at every possible `SeekTarget`.
+///
+/// Uniqueness is achieved by having a *locally* unique effect before and after each statement and
+/// terminator (see `effect_at_target`) while ensuring that the entry set for each block is
+/// *globally* unique (see `mock_entry_set`).
+///
+/// For example, a `BasicBlock` with ID `2` and a `Call` terminator has the following state at each
+/// location ("+x" indicates that "x" is added to the state).
+///
+/// | Location | Before | After |
+/// |------------------------|-------------------|--------|
+/// | (on_entry) | {102} ||
+/// | statement 0 | +0 | +1 |
+/// | statement 1 | +2 | +3 |
+/// | `Call` terminator | +4 | +5 |
+/// | (on unwind) | {102,0,1,2,3,4,5} ||
+///
+/// The `102` in the block's entry set is derived from the basic block index and ensures that the
+/// expected state is unique across all basic blocks. Remember, it is generated by
+/// `mock_entry_sets`, not from actually running `MockAnalysis` to fixpoint.
+struct MockAnalysis<'tcx, D> {
+ body: &'tcx mir::Body<'tcx>,
+ dir: PhantomData<D>,
+}
+
+impl<D: Direction> MockAnalysis<'_, D> {
+ const BASIC_BLOCK_OFFSET: usize = 100;
+
+ /// The entry set for each `BasicBlock` is the ID of that block offset by a fixed amount to
+ /// avoid colliding with the statement/terminator effects.
+ fn mock_entry_set(&self, bb: BasicBlock) -> BitSet<usize> {
+ let mut ret = self.bottom_value(self.body);
+ ret.insert(Self::BASIC_BLOCK_OFFSET + bb.index());
+ ret
+ }
+
+ fn mock_entry_sets(&self) -> IndexVec<BasicBlock, BitSet<usize>> {
+ let empty = self.bottom_value(self.body);
+ let mut ret = IndexVec::from_elem(empty, &self.body.basic_blocks());
+
+ for (bb, _) in self.body.basic_blocks().iter_enumerated() {
+ ret[bb] = self.mock_entry_set(bb);
+ }
+
+ ret
+ }
+
+ /// Returns the index that should be added to the dataflow state at the given target.
+ fn effect(&self, loc: EffectIndex) -> usize {
+ let idx = match loc.effect {
+ Effect::Before => loc.statement_index * 2,
+ Effect::Primary => loc.statement_index * 2 + 1,
+ };
+
+ assert!(idx < Self::BASIC_BLOCK_OFFSET, "Too many statements in basic block");
+ idx
+ }
+
+ /// Returns the expected state at the given `SeekTarget`.
+ ///
+ /// This is the union of index of the target basic block, the index assigned to the
+ /// target statement or terminator, and the indices of all preceding statements in the target
+ /// basic block.
+ ///
+ /// For example, the expected state when calling
+ /// `seek_before_primary_effect(Location { block: 2, statement_index: 2 })`
+ /// would be `[102, 0, 1, 2, 3, 4]`.
+ fn expected_state_at_target(&self, target: SeekTarget) -> BitSet<usize> {
+ let block = target.block();
+ let mut ret = self.bottom_value(self.body);
+ ret.insert(Self::BASIC_BLOCK_OFFSET + block.index());
+
+ let target = match target {
+ SeekTarget::BlockEntry { .. } => return ret,
+ SeekTarget::Before(loc) => Effect::Before.at_index(loc.statement_index),
+ SeekTarget::After(loc) => Effect::Primary.at_index(loc.statement_index),
+ };
+
+ let mut pos = if D::IS_FORWARD {
+ Effect::Before.at_index(0)
+ } else {
+ Effect::Before.at_index(self.body[block].statements.len())
+ };
+
+ loop {
+ ret.insert(self.effect(pos));
+
+ if pos == target {
+ return ret;
+ }
+
+ if D::IS_FORWARD {
+ pos = pos.next_in_forward_order();
+ } else {
+ pos = pos.next_in_backward_order();
+ }
+ }
+ }
+}
+
+impl<'tcx, D: Direction> AnalysisDomain<'tcx> for MockAnalysis<'tcx, D> {
+ type Domain = BitSet<usize>;
+ type Direction = D;
+
+ const NAME: &'static str = "mock";
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ BitSet::new_empty(Self::BASIC_BLOCK_OFFSET + body.basic_blocks().len())
+ }
+
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
+ unimplemented!("This is never called since `MockAnalysis` is never iterated to fixpoint");
+ }
+}
+
+impl<'tcx, D: Direction> Analysis<'tcx> for MockAnalysis<'tcx, D> {
+ fn apply_statement_effect(
+ &self,
+ state: &mut Self::Domain,
+ _statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ let idx = self.effect(Effect::Primary.at_index(location.statement_index));
+ assert!(state.insert(idx));
+ }
+
+ fn apply_before_statement_effect(
+ &self,
+ state: &mut Self::Domain,
+ _statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ let idx = self.effect(Effect::Before.at_index(location.statement_index));
+ assert!(state.insert(idx));
+ }
+
+ fn apply_terminator_effect(
+ &self,
+ state: &mut Self::Domain,
+ _terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ let idx = self.effect(Effect::Primary.at_index(location.statement_index));
+ assert!(state.insert(idx));
+ }
+
+ fn apply_before_terminator_effect(
+ &self,
+ state: &mut Self::Domain,
+ _terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ let idx = self.effect(Effect::Before.at_index(location.statement_index));
+ assert!(state.insert(idx));
+ }
+
+ fn apply_call_return_effect(
+ &self,
+ _state: &mut Self::Domain,
+ _block: BasicBlock,
+ _return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+enum SeekTarget {
+ BlockEntry(BasicBlock),
+ Before(Location),
+ After(Location),
+}
+
+impl SeekTarget {
+ fn block(&self) -> BasicBlock {
+ use SeekTarget::*;
+
+ match *self {
+ BlockEntry(block) => block,
+ Before(loc) | After(loc) => loc.block,
+ }
+ }
+
+ /// An iterator over all possible `SeekTarget`s in a given block in order, starting with
+ /// `BlockEntry`.
+ fn iter_in_block(body: &mir::Body<'_>, block: BasicBlock) -> impl Iterator<Item = Self> {
+ let statements_and_terminator = (0..=body[block].statements.len())
+ .flat_map(|i| (0..2).map(move |j| (i, j)))
+ .map(move |(i, kind)| {
+ let loc = Location { block, statement_index: i };
+ match kind {
+ 0 => SeekTarget::Before(loc),
+ 1 => SeekTarget::After(loc),
+ _ => unreachable!(),
+ }
+ });
+
+ std::iter::once(SeekTarget::BlockEntry(block)).chain(statements_and_terminator)
+ }
+}
+
+fn test_cursor<D: Direction>(analysis: MockAnalysis<'_, D>) {
+ let body = analysis.body;
+
+ let mut cursor =
+ Results { entry_sets: analysis.mock_entry_sets(), analysis }.into_results_cursor(body);
+
+ cursor.allow_unreachable();
+
+ let every_target = || {
+ body.basic_blocks()
+ .iter_enumerated()
+ .flat_map(|(bb, _)| SeekTarget::iter_in_block(body, bb))
+ };
+
+ let mut seek_to_target = |targ| {
+ use SeekTarget::*;
+
+ match targ {
+ BlockEntry(block) => cursor.seek_to_block_entry(block),
+ Before(loc) => cursor.seek_before_primary_effect(loc),
+ After(loc) => cursor.seek_after_primary_effect(loc),
+ }
+
+ assert_eq!(cursor.get(), &cursor.analysis().expected_state_at_target(targ));
+ };
+
+ // Seek *to* every possible `SeekTarget` *from* every possible `SeekTarget`.
+ //
+ // By resetting the cursor to `from` each time it changes, we end up checking some edges twice.
+ // What we really want is an Eulerian cycle for the complete digraph over all possible
+ // `SeekTarget`s, but it's not worth spending the time to compute it.
+ for from in every_target() {
+ seek_to_target(from);
+
+ for to in every_target() {
+ dbg!(from);
+ dbg!(to);
+ seek_to_target(to);
+ seek_to_target(from);
+ }
+ }
+}
+
+#[test]
+fn backward_cursor() {
+ let body = mock_body();
+ let body = &body;
+ let analysis = MockAnalysis { body, dir: PhantomData::<Backward> };
+ test_cursor(analysis)
+}
+
+#[test]
+fn forward_cursor() {
+ let body = mock_body();
+ let body = &body;
+ let analysis = MockAnalysis { body, dir: PhantomData::<Forward> };
+ test_cursor(analysis)
+}
diff --git a/compiler/rustc_mir_dataflow/src/framework/visitor.rs b/compiler/rustc_mir_dataflow/src/framework/visitor.rs
new file mode 100644
index 000000000..75b4e150a
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/framework/visitor.rs
@@ -0,0 +1,187 @@
+use rustc_middle::mir::{self, BasicBlock, Location};
+
+use super::{Analysis, Direction, Results};
+
+/// Calls the corresponding method in `ResultsVisitor` for every location in a `mir::Body` with the
+/// dataflow state at that location.
+pub fn visit_results<'mir, 'tcx, F, V>(
+ body: &'mir mir::Body<'tcx>,
+ blocks: impl IntoIterator<Item = BasicBlock>,
+ results: &V,
+ vis: &mut impl ResultsVisitor<'mir, 'tcx, FlowState = F>,
+) where
+ V: ResultsVisitable<'tcx, FlowState = F>,
+{
+ let mut state = results.new_flow_state(body);
+
+ #[cfg(debug_assertions)]
+ let reachable_blocks = mir::traversal::reachable_as_bitset(body);
+
+ for block in blocks {
+ #[cfg(debug_assertions)]
+ assert!(reachable_blocks.contains(block));
+
+ let block_data = &body[block];
+ V::Direction::visit_results_in_block(&mut state, block, block_data, results, vis);
+ }
+}
+
+pub trait ResultsVisitor<'mir, 'tcx> {
+ type FlowState;
+
+ fn visit_block_start(
+ &mut self,
+ _state: &Self::FlowState,
+ _block_data: &'mir mir::BasicBlockData<'tcx>,
+ _block: BasicBlock,
+ ) {
+ }
+
+ /// Called with the `before_statement_effect` of the given statement applied to `state` but not
+ /// its `statement_effect`.
+ fn visit_statement_before_primary_effect(
+ &mut self,
+ _state: &Self::FlowState,
+ _statement: &'mir mir::Statement<'tcx>,
+ _location: Location,
+ ) {
+ }
+
+ /// Called with both the `before_statement_effect` and the `statement_effect` of the given
+ /// statement applied to `state`.
+ fn visit_statement_after_primary_effect(
+ &mut self,
+ _state: &Self::FlowState,
+ _statement: &'mir mir::Statement<'tcx>,
+ _location: Location,
+ ) {
+ }
+
+ /// Called with the `before_terminator_effect` of the given terminator applied to `state` but not
+ /// its `terminator_effect`.
+ fn visit_terminator_before_primary_effect(
+ &mut self,
+ _state: &Self::FlowState,
+ _terminator: &'mir mir::Terminator<'tcx>,
+ _location: Location,
+ ) {
+ }
+
+ /// Called with both the `before_terminator_effect` and the `terminator_effect` of the given
+ /// terminator applied to `state`.
+ ///
+ /// The `call_return_effect` (if one exists) will *not* be applied to `state`.
+ fn visit_terminator_after_primary_effect(
+ &mut self,
+ _state: &Self::FlowState,
+ _terminator: &'mir mir::Terminator<'tcx>,
+ _location: Location,
+ ) {
+ }
+
+ fn visit_block_end(
+ &mut self,
+ _state: &Self::FlowState,
+ _block_data: &'mir mir::BasicBlockData<'tcx>,
+ _block: BasicBlock,
+ ) {
+ }
+}
+
+/// Things that can be visited by a `ResultsVisitor`.
+///
+/// This trait exists so that we can visit the results of multiple dataflow analyses simultaneously.
+/// DO NOT IMPLEMENT MANUALLY. Instead, use the `impl_visitable` macro below.
+pub trait ResultsVisitable<'tcx> {
+ type Direction: Direction;
+ type FlowState;
+
+ /// Creates an empty `FlowState` to hold the transient state for these dataflow results.
+ ///
+ /// The value of the newly created `FlowState` will be overwritten by `reset_to_block_entry`
+ /// before it can be observed by a `ResultsVisitor`.
+ fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState;
+
+ fn reset_to_block_entry(&self, state: &mut Self::FlowState, block: BasicBlock);
+
+ fn reconstruct_before_statement_effect(
+ &self,
+ state: &mut Self::FlowState,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ );
+
+ fn reconstruct_statement_effect(
+ &self,
+ state: &mut Self::FlowState,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ );
+
+ fn reconstruct_before_terminator_effect(
+ &self,
+ state: &mut Self::FlowState,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ );
+
+ fn reconstruct_terminator_effect(
+ &self,
+ state: &mut Self::FlowState,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ );
+}
+
+impl<'tcx, A> ResultsVisitable<'tcx> for Results<'tcx, A>
+where
+ A: Analysis<'tcx>,
+{
+ type FlowState = A::Domain;
+
+ type Direction = A::Direction;
+
+ fn new_flow_state(&self, body: &mir::Body<'tcx>) -> Self::FlowState {
+ self.analysis.bottom_value(body)
+ }
+
+ fn reset_to_block_entry(&self, state: &mut Self::FlowState, block: BasicBlock) {
+ state.clone_from(&self.entry_set_for_block(block));
+ }
+
+ fn reconstruct_before_statement_effect(
+ &self,
+ state: &mut Self::FlowState,
+ stmt: &mir::Statement<'tcx>,
+ loc: Location,
+ ) {
+ self.analysis.apply_before_statement_effect(state, stmt, loc);
+ }
+
+ fn reconstruct_statement_effect(
+ &self,
+ state: &mut Self::FlowState,
+ stmt: &mir::Statement<'tcx>,
+ loc: Location,
+ ) {
+ self.analysis.apply_statement_effect(state, stmt, loc);
+ }
+
+ fn reconstruct_before_terminator_effect(
+ &self,
+ state: &mut Self::FlowState,
+ term: &mir::Terminator<'tcx>,
+ loc: Location,
+ ) {
+ self.analysis.apply_before_terminator_effect(state, term, loc);
+ }
+
+ fn reconstruct_terminator_effect(
+ &self,
+ state: &mut Self::FlowState,
+ term: &mir::Terminator<'tcx>,
+ loc: Location,
+ ) {
+ self.analysis.apply_terminator_effect(state, term, loc);
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
new file mode 100644
index 000000000..0f8e86d1d
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs
@@ -0,0 +1,162 @@
+use super::*;
+
+use crate::{AnalysisDomain, CallReturnPlaces, GenKill, GenKillAnalysis};
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+
+/// A dataflow analysis that tracks whether a pointer or reference could possibly exist that points
+/// to a given local.
+///
+/// At present, this is used as a very limited form of alias analysis. For example,
+/// `MaybeBorrowedLocals` is used to compute which locals are live during a yield expression for
+/// immovable generators.
+pub struct MaybeBorrowedLocals;
+
+impl MaybeBorrowedLocals {
+ fn transfer_function<'a, T>(&'a self, trans: &'a mut T) -> TransferFunction<'a, T> {
+ TransferFunction { trans }
+ }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for MaybeBorrowedLocals {
+ type Domain = BitSet<Local>;
+ const NAME: &'static str = "maybe_borrowed_locals";
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = unborrowed
+ BitSet::new_empty(body.local_decls().len())
+ }
+
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
+ // No locals are aliased on function entry
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for MaybeBorrowedLocals {
+ type Idx = Local;
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(trans).visit_statement(statement, location);
+ }
+
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(trans).visit_terminator(terminator, location);
+ }
+
+ fn call_return_effect(
+ &self,
+ _trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ _return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ }
+}
+
+/// A `Visitor` that defines the transfer function for `MaybeBorrowedLocals`.
+struct TransferFunction<'a, T> {
+ trans: &'a mut T,
+}
+
+impl<'tcx, T> Visitor<'tcx> for TransferFunction<'_, T>
+where
+ T: GenKill<Local>,
+{
+ fn visit_statement(&mut self, stmt: &Statement<'tcx>, location: Location) {
+ self.super_statement(stmt, location);
+
+ // When we reach a `StorageDead` statement, we can assume that any pointers to this memory
+ // are now invalid.
+ if let StatementKind::StorageDead(local) = stmt.kind {
+ self.trans.kill(local);
+ }
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+
+ match rvalue {
+ mir::Rvalue::AddressOf(_, borrowed_place) | mir::Rvalue::Ref(_, _, borrowed_place) => {
+ if !borrowed_place.is_indirect() {
+ self.trans.gen(borrowed_place.local);
+ }
+ }
+
+ mir::Rvalue::Cast(..)
+ | mir::Rvalue::ShallowInitBox(..)
+ | mir::Rvalue::Use(..)
+ | mir::Rvalue::ThreadLocalRef(..)
+ | mir::Rvalue::Repeat(..)
+ | mir::Rvalue::Len(..)
+ | mir::Rvalue::BinaryOp(..)
+ | mir::Rvalue::CheckedBinaryOp(..)
+ | mir::Rvalue::NullaryOp(..)
+ | mir::Rvalue::UnaryOp(..)
+ | mir::Rvalue::Discriminant(..)
+ | mir::Rvalue::Aggregate(..)
+ | mir::Rvalue::CopyForDeref(..) => {}
+ }
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ self.super_terminator(terminator, location);
+
+ match terminator.kind {
+ mir::TerminatorKind::Drop { place: dropped_place, .. }
+ | mir::TerminatorKind::DropAndReplace { place: dropped_place, .. } => {
+ // Drop terminators may call custom drop glue (`Drop::drop`), which takes `&mut
+ // self` as a parameter. In the general case, a drop impl could launder that
+ // reference into the surrounding environment through a raw pointer, thus creating
+ // a valid `*mut` pointing to the dropped local. We are not yet willing to declare
+ // this particular case UB, so we must treat all dropped locals as mutably borrowed
+ // for now. See discussion on [#61069].
+ //
+ // [#61069]: https://github.com/rust-lang/rust/pull/61069
+ self.trans.gen(dropped_place.local);
+ }
+
+ TerminatorKind::Abort
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::InlineAsm { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Return
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Yield { .. } => {}
+ }
+ }
+}
+
+/// The set of locals that are borrowed at some point in the MIR body.
+pub fn borrowed_locals(body: &Body<'_>) -> BitSet<Local> {
+ struct Borrowed(BitSet<Local>);
+
+ impl GenKill<Local> for Borrowed {
+ #[inline]
+ fn gen(&mut self, elem: Local) {
+ self.0.gen(elem)
+ }
+ #[inline]
+ fn kill(&mut self, _: Local) {
+ // Ignore borrow invalidation.
+ }
+ }
+
+ let mut borrowed = Borrowed(BitSet::new_empty(body.local_decls.len()));
+ TransferFunction { trans: &mut borrowed }.visit_body(body);
+ borrowed.0
+}
diff --git a/compiler/rustc_mir_dataflow/src/impls/init_locals.rs b/compiler/rustc_mir_dataflow/src/impls/init_locals.rs
new file mode 100644
index 000000000..83ce4c44b
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/impls/init_locals.rs
@@ -0,0 +1,122 @@
+//! A less precise version of `MaybeInitializedPlaces` whose domain is entire locals.
+//!
+//! A local will be maybe initialized if *any* projections of that local might be initialized.
+
+use crate::{CallReturnPlaces, GenKill};
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::{self, BasicBlock, Local, Location};
+
+pub struct MaybeInitializedLocals;
+
+impl<'tcx> crate::AnalysisDomain<'tcx> for MaybeInitializedLocals {
+ type Domain = BitSet<Local>;
+
+ const NAME: &'static str = "maybe_init_locals";
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = uninit
+ BitSet::new_empty(body.local_decls.len())
+ }
+
+ fn initialize_start_block(&self, body: &mir::Body<'tcx>, entry_set: &mut Self::Domain) {
+ // Function arguments are initialized to begin with.
+ for arg in body.args_iter() {
+ entry_set.insert(arg);
+ }
+ }
+}
+
+impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeInitializedLocals {
+ type Idx = Local;
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ statement: &mir::Statement<'tcx>,
+ loc: Location,
+ ) {
+ TransferFunction { trans }.visit_statement(statement, loc)
+ }
+
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ terminator: &mir::Terminator<'tcx>,
+ loc: Location,
+ ) {
+ TransferFunction { trans }.visit_terminator(terminator, loc)
+ }
+
+ fn call_return_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| trans.gen(place.local));
+ }
+
+ /// See `Analysis::apply_yield_resume_effect`.
+ fn yield_resume_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _resume_block: BasicBlock,
+ resume_place: mir::Place<'tcx>,
+ ) {
+ trans.gen(resume_place.local)
+ }
+}
+
+struct TransferFunction<'a, T> {
+ trans: &'a mut T,
+}
+
+impl<T> Visitor<'_> for TransferFunction<'_, T>
+where
+ T: GenKill<Local>,
+{
+ // FIXME: Using `visit_local` here is a bug. For example, on `move _5.field` we mark `_5` as
+ // deinitialized, although clearly it is only partially deinitialized. This analysis is not
+ // actually used anywhere at the moment, so this is not critical, but this does need to be fixed
+ // before it starts being used again.
+ fn visit_local(&mut self, local: Local, context: PlaceContext, _: Location) {
+ use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, NonUseContext};
+ match context {
+ // These are handled specially in `call_return_effect` and `yield_resume_effect`.
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Call
+ | MutatingUseContext::AsmOutput
+ | MutatingUseContext::Yield,
+ ) => {}
+
+ // If it's deinitialized, it's no longer init
+ PlaceContext::MutatingUse(MutatingUseContext::Deinit) => self.trans.kill(local),
+
+ // Otherwise, when a place is mutated, we must consider it possibly initialized.
+ PlaceContext::MutatingUse(_) => self.trans.gen(local),
+
+ // If the local is moved out of, or if it gets marked `StorageDead`, consider it no
+ // longer initialized.
+ PlaceContext::NonUse(NonUseContext::StorageDead)
+ | PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) => self.trans.kill(local),
+
+ // All other uses do not affect this analysis.
+ PlaceContext::NonUse(
+ NonUseContext::StorageLive
+ | NonUseContext::AscribeUserTy
+ | NonUseContext::VarDebugInfo,
+ )
+ | PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::Inspect
+ | NonMutatingUseContext::Copy
+ | NonMutatingUseContext::SharedBorrow
+ | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::UniqueBorrow
+ | NonMutatingUseContext::AddressOf
+ | NonMutatingUseContext::Projection,
+ ) => {}
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
new file mode 100644
index 000000000..21132eb99
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs
@@ -0,0 +1,297 @@
+use rustc_index::bit_set::{BitSet, ChunkedBitSet};
+use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{self, Local, Location, Place, StatementKind};
+
+use crate::{Analysis, AnalysisDomain, Backward, CallReturnPlaces, GenKill, GenKillAnalysis};
+
+/// A [live-variable dataflow analysis][liveness].
+///
+/// This analysis considers references as being used only at the point of the
+/// borrow. In other words, this analysis does not track uses because of references that already
+/// exist. See [this `mir-dataflow` test][flow-test] for an example. You almost never want to use
+/// this analysis without also looking at the results of [`MaybeBorrowedLocals`].
+///
+/// ## Field-(in)sensitivity
+///
+/// As the name suggests, this analysis is field insensitive. If a projection of a variable `x` is
+/// assigned to (e.g. `x.0 = 42`), it does not "define" `x` as far as liveness is concerned. In fact,
+/// such an assignment is currently marked as a "use" of `x` in an attempt to be maximally
+/// conservative.
+///
+/// [`MaybeBorrowedLocals`]: super::MaybeBorrowedLocals
+/// [flow-test]: https://github.com/rust-lang/rust/blob/a08c47310c7d49cbdc5d7afb38408ba519967ecd/src/test/ui/mir-dataflow/liveness-ptr.rs
+/// [liveness]: https://en.wikipedia.org/wiki/Live_variable_analysis
+pub struct MaybeLiveLocals;
+
+impl MaybeLiveLocals {
+ fn transfer_function<'a, T>(&self, trans: &'a mut T) -> TransferFunction<'a, T> {
+ TransferFunction(trans)
+ }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for MaybeLiveLocals {
+ type Domain = ChunkedBitSet<Local>;
+ type Direction = Backward;
+
+ const NAME: &'static str = "liveness";
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = not live
+ ChunkedBitSet::new_empty(body.local_decls.len())
+ }
+
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
+ // No variables are live until we observe a use
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for MaybeLiveLocals {
+ type Idx = Local;
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(trans).visit_statement(statement, location);
+ }
+
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.transfer_function(trans).visit_terminator(terminator, location);
+ }
+
+ fn call_return_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ if let Some(local) = place.as_local() {
+ trans.kill(local);
+ }
+ });
+ }
+
+ fn yield_resume_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _resume_block: mir::BasicBlock,
+ resume_place: mir::Place<'tcx>,
+ ) {
+ if let Some(local) = resume_place.as_local() {
+ trans.kill(local);
+ }
+ }
+}
+
+struct TransferFunction<'a, T>(&'a mut T);
+
+impl<'tcx, T> Visitor<'tcx> for TransferFunction<'_, T>
+where
+ T: GenKill<Local>,
+{
+ fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) {
+ let local = place.local;
+
+ // We purposefully do not call `super_place` here to avoid calling `visit_local` for this
+ // place with one of the `Projection` variants of `PlaceContext`.
+ self.visit_projection(place.as_ref(), context, location);
+
+ match DefUse::for_place(*place, context) {
+ Some(DefUse::Def) => self.0.kill(local),
+ Some(DefUse::Use) => self.0.gen(local),
+ None => {}
+ }
+ }
+
+ fn visit_local(&mut self, local: Local, context: PlaceContext, _: Location) {
+ // Because we do not call `super_place` above, `visit_local` is only called for locals that
+ // do not appear as part of a `Place` in the MIR. This handles cases like the implicit use
+ // of the return place in a `Return` terminator or the index in an `Index` projection.
+ match DefUse::for_place(local.into(), context) {
+ Some(DefUse::Def) => self.0.kill(local),
+ Some(DefUse::Use) => self.0.gen(local),
+ None => {}
+ }
+ }
+}
+
+#[derive(Eq, PartialEq, Clone)]
+enum DefUse {
+ Def,
+ Use,
+}
+
+impl DefUse {
+ fn for_place<'tcx>(place: Place<'tcx>, context: PlaceContext) -> Option<DefUse> {
+ match context {
+ PlaceContext::NonUse(_) => None,
+
+ PlaceContext::MutatingUse(MutatingUseContext::Store | MutatingUseContext::Deinit) => {
+ if place.is_indirect() {
+ // Treat derefs as a use of the base local. `*p = 4` is not a def of `p` but a
+ // use.
+ Some(DefUse::Use)
+ } else if place.projection.is_empty() {
+ Some(DefUse::Def)
+ } else {
+ None
+ }
+ }
+
+ // Setting the discriminant is not a use because it does no reading, but it is also not
+ // a def because it does not overwrite the whole place
+ PlaceContext::MutatingUse(MutatingUseContext::SetDiscriminant) => {
+ place.is_indirect().then_some(DefUse::Use)
+ }
+
+ // For the associated terminators, this is only a `Def` when the terminator returns
+ // "successfully." As such, we handle this case separately in `call_return_effect`
+ // above. However, if the place looks like `*_5`, this is still unconditionally a use of
+ // `_5`.
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Call
+ | MutatingUseContext::Yield
+ | MutatingUseContext::AsmOutput,
+ ) => place.is_indirect().then_some(DefUse::Use),
+
+ // All other contexts are uses...
+ PlaceContext::MutatingUse(
+ MutatingUseContext::AddressOf
+ | MutatingUseContext::Borrow
+ | MutatingUseContext::Drop
+ | MutatingUseContext::Retag,
+ )
+ | PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::AddressOf
+ | NonMutatingUseContext::Copy
+ | NonMutatingUseContext::Inspect
+ | NonMutatingUseContext::Move
+ | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::SharedBorrow
+ | NonMutatingUseContext::UniqueBorrow,
+ ) => Some(DefUse::Use),
+
+ PlaceContext::MutatingUse(MutatingUseContext::Projection)
+ | PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) => {
+ unreachable!("A projection could be a def or a use and must be handled separately")
+ }
+ }
+ }
+}
+
+/// Like `MaybeLiveLocals`, but does not mark locals as live if they are used in a dead assignment.
+///
+/// This is basically written for dead store elimination and nothing else.
+///
+/// All of the caveats of `MaybeLiveLocals` apply.
+pub struct MaybeTransitiveLiveLocals<'a> {
+ always_live: &'a BitSet<Local>,
+}
+
+impl<'a> MaybeTransitiveLiveLocals<'a> {
+ /// The `always_alive` set is the set of locals to which all stores should unconditionally be
+ /// considered live.
+ ///
+ /// This should include at least all locals that are ever borrowed.
+ pub fn new(always_live: &'a BitSet<Local>) -> Self {
+ MaybeTransitiveLiveLocals { always_live }
+ }
+}
+
+impl<'a, 'tcx> AnalysisDomain<'tcx> for MaybeTransitiveLiveLocals<'a> {
+ type Domain = ChunkedBitSet<Local>;
+ type Direction = Backward;
+
+ const NAME: &'static str = "transitive liveness";
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = not live
+ ChunkedBitSet::new_empty(body.local_decls.len())
+ }
+
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) {
+ // No variables are live until we observe a use
+ }
+}
+
+impl<'a, 'tcx> Analysis<'tcx> for MaybeTransitiveLiveLocals<'a> {
+ fn apply_statement_effect(
+ &self,
+ trans: &mut Self::Domain,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ // Compute the place that we are storing to, if any
+ let destination = match &statement.kind {
+ StatementKind::Assign(assign) => {
+ if assign.1.is_safe_to_remove() {
+ Some(assign.0)
+ } else {
+ None
+ }
+ }
+ StatementKind::SetDiscriminant { place, .. } | StatementKind::Deinit(place) => {
+ Some(**place)
+ }
+ StatementKind::FakeRead(_)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag(..)
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Nop => None,
+ };
+ if let Some(destination) = destination {
+ if !destination.is_indirect()
+ && !trans.contains(destination.local)
+ && !self.always_live.contains(destination.local)
+ {
+ // This store is dead
+ return;
+ }
+ }
+ TransferFunction(trans).visit_statement(statement, location);
+ }
+
+ fn apply_terminator_effect(
+ &self,
+ trans: &mut Self::Domain,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ TransferFunction(trans).visit_terminator(terminator, location);
+ }
+
+ fn apply_call_return_effect(
+ &self,
+ trans: &mut Self::Domain,
+ _block: mir::BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ if let Some(local) = place.as_local() {
+ trans.remove(local);
+ }
+ });
+ }
+
+ fn apply_yield_resume_effect(
+ &self,
+ trans: &mut Self::Domain,
+ _resume_block: mir::BasicBlock,
+ resume_place: mir::Place<'tcx>,
+ ) {
+ if let Some(local) = resume_place.as_local() {
+ trans.remove(local);
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/impls/mod.rs b/compiler/rustc_mir_dataflow/src/impls/mod.rs
new file mode 100644
index 000000000..fd1e49277
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/impls/mod.rs
@@ -0,0 +1,766 @@
+//! Dataflow analyses are built upon some interpretation of the
+//! bitvectors attached to each basic block, represented via a
+//! zero-sized structure.
+
+use rustc_index::bit_set::{BitSet, ChunkedBitSet};
+use rustc_index::vec::Idx;
+use rustc_middle::mir::visit::{MirVisitable, Visitor};
+use rustc_middle::mir::{self, Body, Location};
+use rustc_middle::ty::{self, TyCtxt};
+
+use crate::drop_flag_effects_for_function_entry;
+use crate::drop_flag_effects_for_location;
+use crate::elaborate_drops::DropFlagState;
+use crate::framework::{CallReturnPlaces, SwitchIntEdgeEffects};
+use crate::move_paths::{HasMoveData, InitIndex, InitKind, LookupResult, MoveData, MovePathIndex};
+use crate::on_lookup_result_bits;
+use crate::MoveDataParamEnv;
+use crate::{drop_flag_effects, on_all_children_bits};
+use crate::{lattice, AnalysisDomain, GenKill, GenKillAnalysis};
+
+mod borrowed_locals;
+mod init_locals;
+mod liveness;
+mod storage_liveness;
+
+pub use self::borrowed_locals::borrowed_locals;
+pub use self::borrowed_locals::MaybeBorrowedLocals;
+pub use self::init_locals::MaybeInitializedLocals;
+pub use self::liveness::MaybeLiveLocals;
+pub use self::liveness::MaybeTransitiveLiveLocals;
+pub use self::storage_liveness::{MaybeRequiresStorage, MaybeStorageLive};
+
+/// `MaybeInitializedPlaces` tracks all places that might be
+/// initialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) { // maybe-init:
+/// // {}
+/// let a = S; let mut b = S; let c; let d; // {a, b}
+///
+/// if pred {
+/// drop(a); // { b}
+/// b = S; // { b}
+///
+/// } else {
+/// drop(b); // {a}
+/// d = S; // {a, d}
+///
+/// } // {a, b, d}
+///
+/// c = S; // {a, b, c, d}
+/// }
+/// ```
+///
+/// To determine whether a place *must* be initialized at a
+/// particular control-flow point, one can take the set-difference
+/// between this data and the data from `MaybeUninitializedPlaces` at the
+/// corresponding control-flow point.
+///
+/// Similarly, at a given `drop` statement, the set-intersection
+/// between this data and `MaybeUninitializedPlaces` yields the set of
+/// places that would require a dynamic drop-flag at that statement.
+pub struct MaybeInitializedPlaces<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ mdpe: &'a MoveDataParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+ MaybeInitializedPlaces { tcx, body, mdpe }
+ }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for MaybeInitializedPlaces<'a, 'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx> {
+ &self.mdpe.move_data
+ }
+}
+
+/// `MaybeUninitializedPlaces` tracks all places that might be
+/// uninitialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) { // maybe-uninit:
+/// // {a, b, c, d}
+/// let a = S; let mut b = S; let c; let d; // { c, d}
+///
+/// if pred {
+/// drop(a); // {a, c, d}
+/// b = S; // {a, c, d}
+///
+/// } else {
+/// drop(b); // { b, c, d}
+/// d = S; // { b, c }
+///
+/// } // {a, b, c, d}
+///
+/// c = S; // {a, b, d}
+/// }
+/// ```
+///
+/// To determine whether a place *must* be uninitialized at a
+/// particular control-flow point, one can take the set-difference
+/// between this data and the data from `MaybeInitializedPlaces` at the
+/// corresponding control-flow point.
+///
+/// Similarly, at a given `drop` statement, the set-intersection
+/// between this data and `MaybeInitializedPlaces` yields the set of
+/// places that would require a dynamic drop-flag at that statement.
+pub struct MaybeUninitializedPlaces<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ mdpe: &'a MoveDataParamEnv<'tcx>,
+
+ mark_inactive_variants_as_uninit: bool,
+}
+
+impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+ MaybeUninitializedPlaces { tcx, body, mdpe, mark_inactive_variants_as_uninit: false }
+ }
+
+ /// Causes inactive enum variants to be marked as "maybe uninitialized" after a switch on an
+ /// enum discriminant.
+ ///
+ /// This is correct in a vacuum but is not the default because it causes problems in the borrow
+ /// checker, where this information gets propagated along `FakeEdge`s.
+ pub fn mark_inactive_variants_as_uninit(mut self) -> Self {
+ self.mark_inactive_variants_as_uninit = true;
+ self
+ }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for MaybeUninitializedPlaces<'a, 'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx> {
+ &self.mdpe.move_data
+ }
+}
+
+/// `DefinitelyInitializedPlaces` tracks all places that are definitely
+/// initialized upon reaching a particular point in the control flow
+/// for a function.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) { // definite-init:
+/// // { }
+/// let a = S; let mut b = S; let c; let d; // {a, b }
+///
+/// if pred {
+/// drop(a); // { b, }
+/// b = S; // { b, }
+///
+/// } else {
+/// drop(b); // {a, }
+/// d = S; // {a, d}
+///
+/// } // { }
+///
+/// c = S; // { c }
+/// }
+/// ```
+///
+/// To determine whether a place *may* be uninitialized at a
+/// particular control-flow point, one can take the set-complement
+/// of this data.
+///
+/// Similarly, at a given `drop` statement, the set-difference between
+/// this data and `MaybeInitializedPlaces` yields the set of places
+/// that would require a dynamic drop-flag at that statement.
+pub struct DefinitelyInitializedPlaces<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ mdpe: &'a MoveDataParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+ DefinitelyInitializedPlaces { tcx, body, mdpe }
+ }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx> {
+ &self.mdpe.move_data
+ }
+}
+
+/// `EverInitializedPlaces` tracks all places that might have ever been
+/// initialized upon reaching a particular point in the control flow
+/// for a function, without an intervening `StorageDead`.
+///
+/// This dataflow is used to determine if an immutable local variable may
+/// be assigned to.
+///
+/// For example, in code like the following, we have corresponding
+/// dataflow information shown in the right-hand comments.
+///
+/// ```rust
+/// struct S;
+/// fn foo(pred: bool) { // ever-init:
+/// // { }
+/// let a = S; let mut b = S; let c; let d; // {a, b }
+///
+/// if pred {
+/// drop(a); // {a, b, }
+/// b = S; // {a, b, }
+///
+/// } else {
+/// drop(b); // {a, b, }
+/// d = S; // {a, b, d }
+///
+/// } // {a, b, d }
+///
+/// c = S; // {a, b, c, d }
+/// }
+/// ```
+pub struct EverInitializedPlaces<'a, 'tcx> {
+ #[allow(dead_code)]
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ mdpe: &'a MoveDataParamEnv<'tcx>,
+}
+
+impl<'a, 'tcx> EverInitializedPlaces<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, mdpe: &'a MoveDataParamEnv<'tcx>) -> Self {
+ EverInitializedPlaces { tcx, body, mdpe }
+ }
+}
+
+impl<'a, 'tcx> HasMoveData<'tcx> for EverInitializedPlaces<'a, 'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx> {
+ &self.mdpe.move_data
+ }
+}
+
+impl<'a, 'tcx> MaybeInitializedPlaces<'a, 'tcx> {
+ fn update_bits(
+ trans: &mut impl GenKill<MovePathIndex>,
+ path: MovePathIndex,
+ state: DropFlagState,
+ ) {
+ match state {
+ DropFlagState::Absent => trans.kill(path),
+ DropFlagState::Present => trans.gen(path),
+ }
+ }
+}
+
+impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> {
+ fn update_bits(
+ trans: &mut impl GenKill<MovePathIndex>,
+ path: MovePathIndex,
+ state: DropFlagState,
+ ) {
+ match state {
+ DropFlagState::Absent => trans.gen(path),
+ DropFlagState::Present => trans.kill(path),
+ }
+ }
+}
+
+impl<'a, 'tcx> DefinitelyInitializedPlaces<'a, 'tcx> {
+ fn update_bits(
+ trans: &mut impl GenKill<MovePathIndex>,
+ path: MovePathIndex,
+ state: DropFlagState,
+ ) {
+ match state {
+ DropFlagState::Absent => trans.kill(path),
+ DropFlagState::Present => trans.gen(path),
+ }
+ }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
+ type Domain = ChunkedBitSet<MovePathIndex>;
+ const NAME: &'static str = "maybe_init";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = uninitialized
+ ChunkedBitSet::new_empty(self.move_data().move_paths.len())
+ }
+
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+ assert!(s == DropFlagState::Present);
+ state.insert(path);
+ });
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for MaybeInitializedPlaces<'_, 'tcx> {
+ type Idx = MovePathIndex;
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ });
+
+ if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
+ return;
+ }
+
+ // Mark all places as "maybe init" if they are mutably borrowed. See #90752.
+ for_each_mut_borrow(statement, location, |place| {
+ let LookupResult::Exact(mpi) = self.move_data().rev_lookup.find(place.as_ref()) else { return };
+ on_all_children_bits(self.tcx, self.body, self.move_data(), mpi, |child| {
+ trans.gen(child);
+ })
+ })
+ }
+
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ });
+
+ if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
+ return;
+ }
+
+ for_each_mut_borrow(terminator, location, |place| {
+ let LookupResult::Exact(mpi) = self.move_data().rev_lookup.find(place.as_ref()) else { return };
+ on_all_children_bits(self.tcx, self.body, self.move_data(), mpi, |child| {
+ trans.gen(child);
+ })
+ })
+ }
+
+ fn call_return_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // when a call returns successfully, that means we need to set
+ // the bits for that dest_place to 1 (initialized).
+ on_lookup_result_bits(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ self.move_data().rev_lookup.find(place.as_ref()),
+ |mpi| {
+ trans.gen(mpi);
+ },
+ );
+ });
+ }
+
+ fn switch_int_edge_effects<G: GenKill<Self::Idx>>(
+ &self,
+ block: mir::BasicBlock,
+ discr: &mir::Operand<'tcx>,
+ edge_effects: &mut impl SwitchIntEdgeEffects<G>,
+ ) {
+ if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
+ return;
+ }
+
+ let enum_ = discr.place().and_then(|discr| {
+ switch_on_enum_discriminant(self.tcx, &self.body, &self.body[block], discr)
+ });
+
+ let Some((enum_place, enum_def)) = enum_ else {
+ return;
+ };
+
+ let mut discriminants = enum_def.discriminants(self.tcx);
+ edge_effects.apply(|trans, edge| {
+ let Some(value) = edge.value else {
+ return;
+ };
+
+ // MIR building adds discriminants to the `values` array in the same order as they
+ // are yielded by `AdtDef::discriminants`. We rely on this to match each
+ // discriminant in `values` to its corresponding variant in linear time.
+ let (variant, _) = discriminants
+ .find(|&(_, discr)| discr.val == value)
+ .expect("Order of `AdtDef::discriminants` differed from `SwitchInt::values`");
+
+ // Kill all move paths that correspond to variants we know to be inactive along this
+ // particular outgoing edge of a `SwitchInt`.
+ drop_flag_effects::on_all_inactive_variants(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ enum_place,
+ variant,
+ |mpi| trans.kill(mpi),
+ );
+ });
+ }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
+ type Domain = ChunkedBitSet<MovePathIndex>;
+
+ const NAME: &'static str = "maybe_uninit";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = initialized (start_block_effect counters this at outset)
+ ChunkedBitSet::new_empty(self.move_data().move_paths.len())
+ }
+
+ // sets on_entry bits for Arg places
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ // set all bits to 1 (uninit) before gathering counter-evidence
+ state.insert_all();
+
+ drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+ assert!(s == DropFlagState::Present);
+ state.remove(path);
+ });
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for MaybeUninitializedPlaces<'_, 'tcx> {
+ type Idx = MovePathIndex;
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ });
+
+ // Unlike in `MaybeInitializedPlaces` above, we don't need to change the state when a
+ // mutable borrow occurs. Places cannot become uninitialized through a mutable reference.
+ }
+
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ });
+ }
+
+ fn call_return_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // when a call returns successfully, that means we need to set
+ // the bits for that dest_place to 0 (initialized).
+ on_lookup_result_bits(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ self.move_data().rev_lookup.find(place.as_ref()),
+ |mpi| {
+ trans.kill(mpi);
+ },
+ );
+ });
+ }
+
+ fn switch_int_edge_effects<G: GenKill<Self::Idx>>(
+ &self,
+ block: mir::BasicBlock,
+ discr: &mir::Operand<'tcx>,
+ edge_effects: &mut impl SwitchIntEdgeEffects<G>,
+ ) {
+ if !self.tcx.sess.opts.unstable_opts.precise_enum_drop_elaboration {
+ return;
+ }
+
+ if !self.mark_inactive_variants_as_uninit {
+ return;
+ }
+
+ let enum_ = discr.place().and_then(|discr| {
+ switch_on_enum_discriminant(self.tcx, &self.body, &self.body[block], discr)
+ });
+
+ let Some((enum_place, enum_def)) = enum_ else {
+ return;
+ };
+
+ let mut discriminants = enum_def.discriminants(self.tcx);
+ edge_effects.apply(|trans, edge| {
+ let Some(value) = edge.value else {
+ return;
+ };
+
+ // MIR building adds discriminants to the `values` array in the same order as they
+ // are yielded by `AdtDef::discriminants`. We rely on this to match each
+ // discriminant in `values` to its corresponding variant in linear time.
+ let (variant, _) = discriminants
+ .find(|&(_, discr)| discr.val == value)
+ .expect("Order of `AdtDef::discriminants` differed from `SwitchInt::values`");
+
+ // Mark all move paths that correspond to variants other than this one as maybe
+ // uninitialized (in reality, they are *definitely* uninitialized).
+ drop_flag_effects::on_all_inactive_variants(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ enum_place,
+ variant,
+ |mpi| trans.gen(mpi),
+ );
+ });
+ }
+}
+
+impl<'a, 'tcx> AnalysisDomain<'tcx> for DefinitelyInitializedPlaces<'a, 'tcx> {
+ /// Use set intersection as the join operator.
+ type Domain = lattice::Dual<BitSet<MovePathIndex>>;
+
+ const NAME: &'static str = "definite_init";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = initialized (start_block_effect counters this at outset)
+ lattice::Dual(BitSet::new_filled(self.move_data().move_paths.len()))
+ }
+
+ // sets on_entry bits for Arg places
+ fn initialize_start_block(&self, _: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ state.0.clear();
+
+ drop_flag_effects_for_function_entry(self.tcx, self.body, self.mdpe, |path, s| {
+ assert!(s == DropFlagState::Present);
+ state.0.insert(path);
+ });
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for DefinitelyInitializedPlaces<'_, 'tcx> {
+ type Idx = MovePathIndex;
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _statement: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ })
+ }
+
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ drop_flag_effects_for_location(self.tcx, self.body, self.mdpe, location, |path, s| {
+ Self::update_bits(trans, path, s)
+ })
+ }
+
+ fn call_return_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: mir::BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| {
+ // when a call returns successfully, that means we need to set
+ // the bits for that dest_place to 1 (initialized).
+ on_lookup_result_bits(
+ self.tcx,
+ self.body,
+ self.move_data(),
+ self.move_data().rev_lookup.find(place.as_ref()),
+ |mpi| {
+ trans.gen(mpi);
+ },
+ );
+ });
+ }
+}
+
+impl<'tcx> AnalysisDomain<'tcx> for EverInitializedPlaces<'_, 'tcx> {
+ type Domain = ChunkedBitSet<InitIndex>;
+
+ const NAME: &'static str = "ever_init";
+
+ fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = no initialized variables by default
+ ChunkedBitSet::new_empty(self.move_data().inits.len())
+ }
+
+ fn initialize_start_block(&self, body: &mir::Body<'tcx>, state: &mut Self::Domain) {
+ for arg_init in 0..body.arg_count {
+ state.insert(InitIndex::new(arg_init));
+ }
+ }
+}
+
+impl<'tcx> GenKillAnalysis<'tcx> for EverInitializedPlaces<'_, 'tcx> {
+ type Idx = InitIndex;
+
+ #[instrument(skip(self, trans), level = "debug")]
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ stmt: &mir::Statement<'tcx>,
+ location: Location,
+ ) {
+ let move_data = self.move_data();
+ let init_path_map = &move_data.init_path_map;
+ let init_loc_map = &move_data.init_loc_map;
+ let rev_lookup = &move_data.rev_lookup;
+
+ debug!("initializes move_indexes {:?}", &init_loc_map[location]);
+ trans.gen_all(init_loc_map[location].iter().copied());
+
+ if let mir::StatementKind::StorageDead(local) = stmt.kind {
+ // End inits for StorageDead, so that an immutable variable can
+ // be reinitialized on the next iteration of the loop.
+ let move_path_index = rev_lookup.find_local(local);
+ debug!("clears the ever initialized status of {:?}", init_path_map[move_path_index]);
+ trans.kill_all(init_path_map[move_path_index].iter().copied());
+ }
+ }
+
+ #[instrument(skip(self, trans, _terminator), level = "debug")]
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _terminator: &mir::Terminator<'tcx>,
+ location: Location,
+ ) {
+ let (body, move_data) = (self.body, self.move_data());
+ let term = body[location.block].terminator();
+ let init_loc_map = &move_data.init_loc_map;
+ debug!(?term);
+ debug!("initializes move_indexes {:?}", init_loc_map[location]);
+ trans.gen_all(
+ init_loc_map[location]
+ .iter()
+ .filter(|init_index| {
+ move_data.inits[**init_index].kind != InitKind::NonPanicPathOnly
+ })
+ .copied(),
+ );
+ }
+
+ fn call_return_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ block: mir::BasicBlock,
+ _return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ let move_data = self.move_data();
+ let init_loc_map = &move_data.init_loc_map;
+
+ let call_loc = self.body.terminator_loc(block);
+ for init_index in &init_loc_map[call_loc] {
+ trans.gen(*init_index);
+ }
+ }
+}
+
+/// Inspect a `SwitchInt`-terminated basic block to see if the condition of that `SwitchInt` is
+/// an enum discriminant.
+///
+/// We expect such blocks to have a call to `discriminant` as their last statement like so:
+///
+/// ```text
+/// ...
+/// _42 = discriminant(_1)
+/// SwitchInt(_42, ..)
+/// ```
+///
+/// If the basic block matches this pattern, this function returns the place corresponding to the
+/// enum (`_1` in the example above) as well as the `AdtDef` of that enum.
+fn switch_on_enum_discriminant<'mir, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &'mir mir::Body<'tcx>,
+ block: &'mir mir::BasicBlockData<'tcx>,
+ switch_on: mir::Place<'tcx>,
+) -> Option<(mir::Place<'tcx>, ty::AdtDef<'tcx>)> {
+ for statement in block.statements.iter().rev() {
+ match &statement.kind {
+ mir::StatementKind::Assign(box (lhs, mir::Rvalue::Discriminant(discriminated)))
+ if *lhs == switch_on =>
+ {
+ match discriminated.ty(body, tcx).ty.kind() {
+ ty::Adt(def, _) => return Some((*discriminated, *def)),
+
+ // `Rvalue::Discriminant` is also used to get the active yield point for a
+ // generator, but we do not need edge-specific effects in that case. This may
+ // change in the future.
+ ty::Generator(..) => return None,
+
+ t => bug!("`discriminant` called on unexpected type {:?}", t),
+ }
+ }
+ mir::StatementKind::Coverage(_) => continue,
+ _ => return None,
+ }
+ }
+ None
+}
+
+struct OnMutBorrow<F>(F);
+
+impl<F> Visitor<'_> for OnMutBorrow<F>
+where
+ F: FnMut(&mir::Place<'_>),
+{
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'_>, location: Location) {
+ // FIXME: Does `&raw const foo` allow mutation? See #90413.
+ match rvalue {
+ mir::Rvalue::Ref(_, mir::BorrowKind::Mut { .. }, place)
+ | mir::Rvalue::AddressOf(_, place) => (self.0)(place),
+
+ _ => {}
+ }
+
+ self.super_rvalue(rvalue, location)
+ }
+}
+
+/// Calls `f` for each mutable borrow or raw reference in the program.
+///
+/// This DOES NOT call `f` for a shared borrow of a type with interior mutability. That's okay for
+/// initializedness, because we cannot move from an `UnsafeCell` (outside of `core::cell`), but
+/// other analyses will likely need to check for `!Freeze`.
+fn for_each_mut_borrow<'tcx>(
+ mir: &impl MirVisitable<'tcx>,
+ location: Location,
+ f: impl FnMut(&mir::Place<'_>),
+) {
+ let mut vis = OnMutBorrow(f);
+
+ mir.apply(location, &mut vis);
+}
diff --git a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
new file mode 100644
index 000000000..f6b5af90a
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs
@@ -0,0 +1,300 @@
+pub use super::*;
+
+use crate::{CallReturnPlaces, GenKill, Results, ResultsRefCursor};
+use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use std::cell::RefCell;
+
+#[derive(Clone)]
+pub struct MaybeStorageLive {
+ always_live_locals: BitSet<Local>,
+}
+
+impl MaybeStorageLive {
+ pub fn new(always_live_locals: BitSet<Local>) -> Self {
+ MaybeStorageLive { always_live_locals }
+ }
+}
+
+impl<'tcx> crate::AnalysisDomain<'tcx> for MaybeStorageLive {
+ type Domain = BitSet<Local>;
+
+ const NAME: &'static str = "maybe_storage_live";
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = dead
+ BitSet::new_empty(body.local_decls.len())
+ }
+
+ fn initialize_start_block(&self, body: &mir::Body<'tcx>, on_entry: &mut Self::Domain) {
+ assert_eq!(body.local_decls.len(), self.always_live_locals.domain_size());
+ for local in self.always_live_locals.iter() {
+ on_entry.insert(local);
+ }
+
+ for arg in body.args_iter() {
+ on_entry.insert(arg);
+ }
+ }
+}
+
+impl<'tcx> crate::GenKillAnalysis<'tcx> for MaybeStorageLive {
+ type Idx = Local;
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ stmt: &mir::Statement<'tcx>,
+ _: Location,
+ ) {
+ match stmt.kind {
+ StatementKind::StorageLive(l) => trans.gen(l),
+ StatementKind::StorageDead(l) => trans.kill(l),
+ _ => (),
+ }
+ }
+
+ fn terminator_effect(
+ &self,
+ _trans: &mut impl GenKill<Self::Idx>,
+ _: &mir::Terminator<'tcx>,
+ _: Location,
+ ) {
+ // Terminators have no effect
+ }
+
+ fn call_return_effect(
+ &self,
+ _trans: &mut impl GenKill<Self::Idx>,
+ _block: BasicBlock,
+ _return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ // Nothing to do when a call returns successfully
+ }
+}
+
+type BorrowedLocalsResults<'a, 'tcx> = ResultsRefCursor<'a, 'a, 'tcx, MaybeBorrowedLocals>;
+
+/// Dataflow analysis that determines whether each local requires storage at a
+/// given location; i.e. whether its storage can go away without being observed.
+pub struct MaybeRequiresStorage<'mir, 'tcx> {
+ body: &'mir Body<'tcx>,
+ borrowed_locals: RefCell<BorrowedLocalsResults<'mir, 'tcx>>,
+}
+
+impl<'mir, 'tcx> MaybeRequiresStorage<'mir, 'tcx> {
+ pub fn new(
+ body: &'mir Body<'tcx>,
+ borrowed_locals: &'mir Results<'tcx, MaybeBorrowedLocals>,
+ ) -> Self {
+ MaybeRequiresStorage {
+ body,
+ borrowed_locals: RefCell::new(ResultsRefCursor::new(&body, borrowed_locals)),
+ }
+ }
+}
+
+impl<'mir, 'tcx> crate::AnalysisDomain<'tcx> for MaybeRequiresStorage<'mir, 'tcx> {
+ type Domain = BitSet<Local>;
+
+ const NAME: &'static str = "requires_storage";
+
+ fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
+ // bottom = dead
+ BitSet::new_empty(body.local_decls.len())
+ }
+
+ fn initialize_start_block(&self, body: &mir::Body<'tcx>, on_entry: &mut Self::Domain) {
+ // The resume argument is live on function entry (we don't care about
+ // the `self` argument)
+ for arg in body.args_iter().skip(1) {
+ on_entry.insert(arg);
+ }
+ }
+}
+
+impl<'mir, 'tcx> crate::GenKillAnalysis<'tcx> for MaybeRequiresStorage<'mir, 'tcx> {
+ type Idx = Local;
+
+ fn before_statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ stmt: &mir::Statement<'tcx>,
+ loc: Location,
+ ) {
+ // If a place is borrowed in a statement, it needs storage for that statement.
+ self.borrowed_locals.borrow().analysis().statement_effect(trans, stmt, loc);
+
+ match &stmt.kind {
+ StatementKind::StorageDead(l) => trans.kill(*l),
+
+ // If a place is assigned to in a statement, it needs storage for that statement.
+ StatementKind::Assign(box (place, _))
+ | StatementKind::SetDiscriminant { box place, .. }
+ | StatementKind::Deinit(box place) => {
+ trans.gen(place.local);
+ }
+
+ // Nothing to do for these. Match exhaustively so this fails to compile when new
+ // variants are added.
+ StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::FakeRead(..)
+ | StatementKind::Nop
+ | StatementKind::Retag(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::StorageLive(..) => {}
+ }
+ }
+
+ fn statement_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _: &mir::Statement<'tcx>,
+ loc: Location,
+ ) {
+ // If we move from a place then it only stops needing storage *after*
+ // that statement.
+ self.check_for_move(trans, loc);
+ }
+
+ fn before_terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ terminator: &mir::Terminator<'tcx>,
+ loc: Location,
+ ) {
+ // If a place is borrowed in a terminator, it needs storage for that terminator.
+ self.borrowed_locals.borrow().analysis().terminator_effect(trans, terminator, loc);
+
+ match &terminator.kind {
+ TerminatorKind::Call { destination, .. } => {
+ trans.gen(destination.local);
+ }
+
+ // Note that we do *not* gen the `resume_arg` of `Yield` terminators. The reason for
+ // that is that a `yield` will return from the function, and `resume_arg` is written
+ // only when the generator is later resumed. Unlike `Call`, this doesn't require the
+ // place to have storage *before* the yield, only after.
+ TerminatorKind::Yield { .. } => {}
+
+ TerminatorKind::InlineAsm { operands, .. } => {
+ for op in operands {
+ match op {
+ InlineAsmOperand::Out { place, .. }
+ | InlineAsmOperand::InOut { out_place: place, .. } => {
+ if let Some(place) = place {
+ trans.gen(place.local);
+ }
+ }
+ InlineAsmOperand::In { .. }
+ | InlineAsmOperand::Const { .. }
+ | InlineAsmOperand::SymFn { .. }
+ | InlineAsmOperand::SymStatic { .. } => {}
+ }
+ }
+ }
+
+ // Nothing to do for these. Match exhaustively so this fails to compile when new
+ // variants are added.
+ TerminatorKind::Abort
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Return
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Unreachable => {}
+ }
+ }
+
+ fn terminator_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ terminator: &mir::Terminator<'tcx>,
+ loc: Location,
+ ) {
+ match terminator.kind {
+ // For call terminators the destination requires storage for the call
+ // and after the call returns successfully, but not after a panic.
+ // Since `propagate_call_unwind` doesn't exist, we have to kill the
+ // destination here, and then gen it again in `call_return_effect`.
+ TerminatorKind::Call { destination, .. } => {
+ trans.kill(destination.local);
+ }
+
+ // The same applies to InlineAsm outputs.
+ TerminatorKind::InlineAsm { ref operands, .. } => {
+ CallReturnPlaces::InlineAsm(operands).for_each(|place| trans.kill(place.local));
+ }
+
+ // Nothing to do for these. Match exhaustively so this fails to compile when new
+ // variants are added.
+ TerminatorKind::Yield { .. }
+ | TerminatorKind::Abort
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Return
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Unreachable => {}
+ }
+
+ self.check_for_move(trans, loc);
+ }
+
+ fn call_return_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _block: BasicBlock,
+ return_places: CallReturnPlaces<'_, 'tcx>,
+ ) {
+ return_places.for_each(|place| trans.gen(place.local));
+ }
+
+ fn yield_resume_effect(
+ &self,
+ trans: &mut impl GenKill<Self::Idx>,
+ _resume_block: BasicBlock,
+ resume_place: mir::Place<'tcx>,
+ ) {
+ trans.gen(resume_place.local);
+ }
+}
+
+impl<'mir, 'tcx> MaybeRequiresStorage<'mir, 'tcx> {
+ /// Kill locals that are fully moved and have not been borrowed.
+ fn check_for_move(&self, trans: &mut impl GenKill<Local>, loc: Location) {
+ let mut visitor = MoveVisitor { trans, borrowed_locals: &self.borrowed_locals };
+ visitor.visit_location(&self.body, loc);
+ }
+}
+
+struct MoveVisitor<'a, 'mir, 'tcx, T> {
+ borrowed_locals: &'a RefCell<BorrowedLocalsResults<'mir, 'tcx>>,
+ trans: &'a mut T,
+}
+
+impl<'a, 'mir, 'tcx, T> Visitor<'tcx> for MoveVisitor<'a, 'mir, 'tcx, T>
+where
+ T: GenKill<Local>,
+{
+ fn visit_local(&mut self, local: Local, context: PlaceContext, loc: Location) {
+ if PlaceContext::NonMutatingUse(NonMutatingUseContext::Move) == context {
+ let mut borrowed_locals = self.borrowed_locals.borrow_mut();
+ borrowed_locals.seek_before_primary_effect(loc);
+ if !borrowed_locals.contains(local) {
+ self.trans.kill(local);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/lib.rs b/compiler/rustc_mir_dataflow/src/lib.rs
new file mode 100644
index 000000000..5793a286b
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/lib.rs
@@ -0,0 +1,63 @@
+#![feature(associated_type_defaults)]
+#![feature(box_patterns)]
+#![feature(exact_size_is_empty)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(once_cell)]
+#![feature(stmt_expr_attributes)]
+#![feature(trusted_step)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+use rustc_ast::MetaItem;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+
+pub use self::drop_flag_effects::{
+ drop_flag_effects_for_function_entry, drop_flag_effects_for_location,
+ move_path_children_matching, on_all_children_bits, on_all_drop_children_bits,
+ on_lookup_result_bits,
+};
+pub use self::framework::{
+ fmt, graphviz, lattice, visit_results, Analysis, AnalysisDomain, Backward, CallReturnPlaces,
+ Direction, Engine, Forward, GenKill, GenKillAnalysis, JoinSemiLattice, Results, ResultsCursor,
+ ResultsRefCursor, ResultsVisitable, ResultsVisitor, SwitchIntEdgeEffects,
+};
+
+use self::move_paths::MoveData;
+
+pub mod drop_flag_effects;
+pub mod elaborate_drops;
+mod framework;
+pub mod impls;
+pub mod move_paths;
+pub mod rustc_peek;
+pub mod storage;
+pub mod un_derefer;
+
+pub(crate) mod indexes {
+ pub(crate) use super::move_paths::MovePathIndex;
+}
+
+pub struct MoveDataParamEnv<'tcx> {
+ pub move_data: MoveData<'tcx>,
+ pub param_env: ty::ParamEnv<'tcx>,
+}
+
+pub fn has_rustc_mir_with(tcx: TyCtxt<'_>, def_id: DefId, name: Symbol) -> Option<MetaItem> {
+ for attr in tcx.get_attrs(def_id, sym::rustc_mir) {
+ let items = attr.meta_item_list();
+ for item in items.iter().flat_map(|l| l.iter()) {
+ match item.meta_item() {
+ Some(mi) if mi.has_name(name) => return Some(mi.clone()),
+ _ => continue,
+ }
+ }
+ }
+ None
+}
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs b/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs
new file mode 100644
index 000000000..28936274b
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/move_paths/abs_domain.rs
@@ -0,0 +1,61 @@
+//! The move-analysis portion of borrowck needs to work in an abstract
+//! domain of lifted `Place`s. Most of the `Place` variants fall into a
+//! one-to-one mapping between the concrete and abstract (e.g., a
+//! field-deref on a local variable, `x.field`, has the same meaning
+//! in both domains). Indexed projections are the exception: `a[x]`
+//! needs to be treated as mapping to the same move path as `a[y]` as
+//! well as `a[13]`, etc.
+//!
+//! (In theory, the analysis could be extended to work with sets of
+//! paths, so that `a[0]` and `a[13]` could be kept distinct, while
+//! `a[x]` would still overlap them both. But that is not this
+//! representation does today.)
+
+use rustc_middle::mir::{Local, Operand, PlaceElem, ProjectionElem};
+use rustc_middle::ty::Ty;
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct AbstractOperand;
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub struct AbstractType;
+pub type AbstractElem = ProjectionElem<AbstractOperand, AbstractType>;
+
+pub trait Lift {
+ type Abstract;
+ fn lift(&self) -> Self::Abstract;
+}
+impl<'tcx> Lift for Operand<'tcx> {
+ type Abstract = AbstractOperand;
+ fn lift(&self) -> Self::Abstract {
+ AbstractOperand
+ }
+}
+impl Lift for Local {
+ type Abstract = AbstractOperand;
+ fn lift(&self) -> Self::Abstract {
+ AbstractOperand
+ }
+}
+impl<'tcx> Lift for Ty<'tcx> {
+ type Abstract = AbstractType;
+ fn lift(&self) -> Self::Abstract {
+ AbstractType
+ }
+}
+impl<'tcx> Lift for PlaceElem<'tcx> {
+ type Abstract = AbstractElem;
+ fn lift(&self) -> Self::Abstract {
+ match *self {
+ ProjectionElem::Deref => ProjectionElem::Deref,
+ ProjectionElem::Field(f, ty) => ProjectionElem::Field(f, ty.lift()),
+ ProjectionElem::Index(ref i) => ProjectionElem::Index(i.lift()),
+ ProjectionElem::Subslice { from, to, from_end } => {
+ ProjectionElem::Subslice { from, to, from_end }
+ }
+ ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
+ ProjectionElem::ConstantIndex { offset, min_length, from_end }
+ }
+ ProjectionElem::Downcast(a, u) => ProjectionElem::Downcast(a, u),
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
new file mode 100644
index 000000000..116e5c1f3
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs
@@ -0,0 +1,559 @@
+use crate::move_paths::FxHashMap;
+use crate::un_derefer::UnDerefer;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::tcx::RvalueInitializationState;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use smallvec::{smallvec, SmallVec};
+
+use std::mem;
+
+use super::abs_domain::Lift;
+use super::IllegalMoveOriginKind::*;
+use super::{Init, InitIndex, InitKind, InitLocation, LookupResult, MoveError};
+use super::{
+ LocationMap, MoveData, MoveOut, MoveOutIndex, MovePath, MovePathIndex, MovePathLookup,
+};
+
+struct MoveDataBuilder<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ data: MoveData<'tcx>,
+ errors: Vec<(Place<'tcx>, MoveError<'tcx>)>,
+ un_derefer: UnDerefer<'tcx>,
+}
+
+impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
+ fn new(body: &'a Body<'tcx>, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+ let mut move_paths = IndexVec::new();
+ let mut path_map = IndexVec::new();
+ let mut init_path_map = IndexVec::new();
+
+ MoveDataBuilder {
+ body,
+ tcx,
+ param_env,
+ errors: Vec::new(),
+ un_derefer: UnDerefer { tcx: tcx, derefer_sidetable: Default::default() },
+ data: MoveData {
+ moves: IndexVec::new(),
+ loc_map: LocationMap::new(body),
+ rev_lookup: MovePathLookup {
+ locals: body
+ .local_decls
+ .indices()
+ .map(|i| {
+ Self::new_move_path(
+ &mut move_paths,
+ &mut path_map,
+ &mut init_path_map,
+ None,
+ Place::from(i),
+ )
+ })
+ .collect(),
+ projections: Default::default(),
+ },
+ move_paths,
+ path_map,
+ inits: IndexVec::new(),
+ init_loc_map: LocationMap::new(body),
+ init_path_map,
+ },
+ }
+ }
+
+ fn new_move_path(
+ move_paths: &mut IndexVec<MovePathIndex, MovePath<'tcx>>,
+ path_map: &mut IndexVec<MovePathIndex, SmallVec<[MoveOutIndex; 4]>>,
+ init_path_map: &mut IndexVec<MovePathIndex, SmallVec<[InitIndex; 4]>>,
+ parent: Option<MovePathIndex>,
+ place: Place<'tcx>,
+ ) -> MovePathIndex {
+ let move_path =
+ move_paths.push(MovePath { next_sibling: None, first_child: None, parent, place });
+
+ if let Some(parent) = parent {
+ let next_sibling = mem::replace(&mut move_paths[parent].first_child, Some(move_path));
+ move_paths[move_path].next_sibling = next_sibling;
+ }
+
+ let path_map_ent = path_map.push(smallvec![]);
+ assert_eq!(path_map_ent, move_path);
+
+ let init_path_map_ent = init_path_map.push(smallvec![]);
+ assert_eq!(init_path_map_ent, move_path);
+
+ move_path
+ }
+}
+
+impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
+ /// This creates a MovePath for a given place, returning an `MovePathError`
+ /// if that place can't be moved from.
+ ///
+ /// NOTE: places behind references *do not* get a move path, which is
+ /// problematic for borrowck.
+ ///
+ /// Maybe we should have separate "borrowck" and "moveck" modes.
+ fn move_path_for(&mut self, place: Place<'tcx>) -> Result<MovePathIndex, MoveError<'tcx>> {
+ if let Some(new_place) = self.builder.un_derefer.derefer(place.as_ref(), self.builder.body)
+ {
+ return self.move_path_for(new_place);
+ }
+
+ debug!("lookup({:?})", place);
+ let mut base = self.builder.data.rev_lookup.locals[place.local];
+
+ // The move path index of the first union that we find. Once this is
+ // some we stop creating child move paths, since moves from unions
+ // move the whole thing.
+ // We continue looking for other move errors though so that moving
+ // from `*(u.f: &_)` isn't allowed.
+ let mut union_path = None;
+
+ for (i, elem) in place.projection.iter().enumerate() {
+ let proj_base = &place.projection[..i];
+ let body = self.builder.body;
+ let tcx = self.builder.tcx;
+ let place_ty = Place::ty_from(place.local, proj_base, body, tcx).ty;
+ match place_ty.kind() {
+ ty::Ref(..) | ty::RawPtr(..) => {
+ let proj = &place.projection[..i + 1];
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ BorrowedContent {
+ target_place: Place {
+ local: place.local,
+ projection: tcx.intern_place_elems(proj),
+ },
+ },
+ ));
+ }
+ ty::Adt(adt, _) if adt.has_dtor(tcx) && !adt.is_box() => {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ InteriorOfTypeWithDestructor { container_ty: place_ty },
+ ));
+ }
+ ty::Adt(adt, _) if adt.is_union() => {
+ union_path.get_or_insert(base);
+ }
+ ty::Slice(_) => {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ InteriorOfSliceOrArray {
+ ty: place_ty,
+ is_index: matches!(elem, ProjectionElem::Index(..)),
+ },
+ ));
+ }
+
+ ty::Array(..) => {
+ if let ProjectionElem::Index(..) = elem {
+ return Err(MoveError::cannot_move_out_of(
+ self.loc,
+ InteriorOfSliceOrArray { ty: place_ty, is_index: true },
+ ));
+ }
+ }
+
+ _ => {}
+ };
+
+ if union_path.is_none() {
+ base = self.add_move_path(base, elem, |tcx| Place {
+ local: place.local,
+ projection: tcx.intern_place_elems(&place.projection[..i + 1]),
+ });
+ }
+ }
+
+ if let Some(base) = union_path {
+ // Move out of union - always move the entire union.
+ Err(MoveError::UnionMove { path: base })
+ } else {
+ Ok(base)
+ }
+ }
+
+ fn add_move_path(
+ &mut self,
+ base: MovePathIndex,
+ elem: PlaceElem<'tcx>,
+ mk_place: impl FnOnce(TyCtxt<'tcx>) -> Place<'tcx>,
+ ) -> MovePathIndex {
+ let MoveDataBuilder {
+ data: MoveData { rev_lookup, move_paths, path_map, init_path_map, .. },
+ tcx,
+ ..
+ } = self.builder;
+ *rev_lookup.projections.entry((base, elem.lift())).or_insert_with(move || {
+ MoveDataBuilder::new_move_path(
+ move_paths,
+ path_map,
+ init_path_map,
+ Some(base),
+ mk_place(*tcx),
+ )
+ })
+ }
+
+ fn create_move_path(&mut self, place: Place<'tcx>) {
+ // This is an non-moving access (such as an overwrite or
+ // drop), so this not being a valid move path is OK.
+ let _ = self.move_path_for(place);
+ }
+}
+
+pub type MoveDat<'tcx> = Result<
+ (FxHashMap<Local, Place<'tcx>>, MoveData<'tcx>),
+ (MoveData<'tcx>, Vec<(Place<'tcx>, MoveError<'tcx>)>),
+>;
+
+impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
+ fn finalize(self) -> MoveDat<'tcx> {
+ debug!("{}", {
+ debug!("moves for {:?}:", self.body.span);
+ for (j, mo) in self.data.moves.iter_enumerated() {
+ debug!(" {:?} = {:?}", j, mo);
+ }
+ debug!("move paths for {:?}:", self.body.span);
+ for (j, path) in self.data.move_paths.iter_enumerated() {
+ debug!(" {:?} = {:?}", j, path);
+ }
+ "done dumping moves"
+ });
+
+ if self.errors.is_empty() {
+ Ok((self.un_derefer.derefer_sidetable, self.data))
+ } else {
+ Err((self.data, self.errors))
+ }
+ }
+}
+
+pub(super) fn gather_moves<'tcx>(
+ body: &Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+) -> MoveDat<'tcx> {
+ let mut builder = MoveDataBuilder::new(body, tcx, param_env);
+
+ builder.gather_args();
+
+ for (bb, block) in body.basic_blocks().iter_enumerated() {
+ for (i, stmt) in block.statements.iter().enumerate() {
+ let source = Location { block: bb, statement_index: i };
+ builder.gather_statement(source, stmt);
+ }
+
+ let terminator_loc = Location { block: bb, statement_index: block.statements.len() };
+ builder.gather_terminator(terminator_loc, block.terminator());
+ }
+
+ builder.finalize()
+}
+
+impl<'a, 'tcx> MoveDataBuilder<'a, 'tcx> {
+ fn gather_args(&mut self) {
+ for arg in self.body.args_iter() {
+ let path = self.data.rev_lookup.locals[arg];
+
+ let init = self.data.inits.push(Init {
+ path,
+ kind: InitKind::Deep,
+ location: InitLocation::Argument(arg),
+ });
+
+ debug!("gather_args: adding init {:?} of {:?} for argument {:?}", init, path, arg);
+
+ self.data.init_path_map[path].push(init);
+ }
+ }
+
+ fn gather_statement(&mut self, loc: Location, stmt: &Statement<'tcx>) {
+ debug!("gather_statement({:?}, {:?})", loc, stmt);
+ (Gatherer { builder: self, loc }).gather_statement(stmt);
+ }
+
+ fn gather_terminator(&mut self, loc: Location, term: &Terminator<'tcx>) {
+ debug!("gather_terminator({:?}, {:?})", loc, term);
+ (Gatherer { builder: self, loc }).gather_terminator(term);
+ }
+}
+
+struct Gatherer<'b, 'a, 'tcx> {
+ builder: &'b mut MoveDataBuilder<'a, 'tcx>,
+ loc: Location,
+}
+
+impl<'b, 'a, 'tcx> Gatherer<'b, 'a, 'tcx> {
+ fn gather_statement(&mut self, stmt: &Statement<'tcx>) {
+ match &stmt.kind {
+ StatementKind::Assign(box (place, Rvalue::CopyForDeref(reffed))) => {
+ assert!(place.projection.is_empty());
+ if self.builder.body.local_decls[place.local].is_deref_temp() {
+ self.builder.un_derefer.derefer_sidetable.insert(place.local, *reffed);
+ }
+ }
+ StatementKind::Assign(box (place, rval)) => {
+ self.create_move_path(*place);
+ if let RvalueInitializationState::Shallow = rval.initialization_state() {
+ // Box starts out uninitialized - need to create a separate
+ // move-path for the interior so it will be separate from
+ // the exterior.
+ self.create_move_path(self.builder.tcx.mk_place_deref(*place));
+ self.gather_init(place.as_ref(), InitKind::Shallow);
+ } else {
+ self.gather_init(place.as_ref(), InitKind::Deep);
+ }
+ self.gather_rvalue(rval);
+ }
+ StatementKind::FakeRead(box (_, place)) => {
+ self.create_move_path(*place);
+ }
+ StatementKind::StorageLive(_) => {}
+ StatementKind::StorageDead(local) => {
+ // DerefTemp locals (results of CopyForDeref) don't actually move anything.
+ if !self.builder.un_derefer.derefer_sidetable.contains_key(&local) {
+ self.gather_move(Place::from(*local));
+ }
+ }
+ StatementKind::SetDiscriminant { .. } | StatementKind::Deinit(..) => {
+ span_bug!(
+ stmt.source_info.span,
+ "SetDiscriminant/Deinit should not exist during borrowck"
+ );
+ }
+ StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Nop => {}
+ }
+ }
+
+ fn gather_rvalue(&mut self, rvalue: &Rvalue<'tcx>) {
+ match *rvalue {
+ Rvalue::ThreadLocalRef(_) => {} // not-a-move
+ Rvalue::Use(ref operand)
+ | Rvalue::Repeat(ref operand, _)
+ | Rvalue::Cast(_, ref operand, _)
+ | Rvalue::ShallowInitBox(ref operand, _)
+ | Rvalue::UnaryOp(_, ref operand) => self.gather_operand(operand),
+ Rvalue::BinaryOp(ref _binop, box (ref lhs, ref rhs))
+ | Rvalue::CheckedBinaryOp(ref _binop, box (ref lhs, ref rhs)) => {
+ self.gather_operand(lhs);
+ self.gather_operand(rhs);
+ }
+ Rvalue::Aggregate(ref _kind, ref operands) => {
+ for operand in operands {
+ self.gather_operand(operand);
+ }
+ }
+ Rvalue::CopyForDeref(..) => unreachable!(),
+ Rvalue::Ref(..)
+ | Rvalue::AddressOf(..)
+ | Rvalue::Discriminant(..)
+ | Rvalue::Len(..)
+ | Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => {}
+ }
+ }
+
+ fn gather_terminator(&mut self, term: &Terminator<'tcx>) {
+ match term.kind {
+ TerminatorKind::Goto { target: _ }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ // In some sense returning moves the return place into the current
+ // call's destination, however, since there are no statements after
+ // this that could possibly access the return place, this doesn't
+ // need recording.
+ | TerminatorKind::Return
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Unreachable => {}
+
+ TerminatorKind::Assert { ref cond, .. } => {
+ self.gather_operand(cond);
+ }
+
+ TerminatorKind::SwitchInt { ref discr, .. } => {
+ self.gather_operand(discr);
+ }
+
+ TerminatorKind::Yield { ref value, resume_arg: place, .. } => {
+ self.gather_operand(value);
+ self.create_move_path(place);
+ self.gather_init(place.as_ref(), InitKind::Deep);
+ }
+
+ TerminatorKind::Drop { place, target: _, unwind: _ } => {
+ self.gather_move(place);
+ }
+ TerminatorKind::DropAndReplace { place, ref value, .. } => {
+ self.create_move_path(place);
+ self.gather_operand(value);
+ self.gather_init(place.as_ref(), InitKind::Deep);
+ }
+ TerminatorKind::Call {
+ ref func,
+ ref args,
+ destination,
+ target,
+ cleanup: _,
+ from_hir_call: _,
+ fn_span: _,
+ } => {
+ self.gather_operand(func);
+ for arg in args {
+ self.gather_operand(arg);
+ }
+ if let Some(_bb) = target {
+ self.create_move_path(destination);
+ self.gather_init(destination.as_ref(), InitKind::NonPanicPathOnly);
+ }
+ }
+ TerminatorKind::InlineAsm {
+ template: _,
+ ref operands,
+ options: _,
+ line_spans: _,
+ destination: _,
+ cleanup: _,
+ } => {
+ for op in operands {
+ match *op {
+ InlineAsmOperand::In { reg: _, ref value }
+ => {
+ self.gather_operand(value);
+ }
+ InlineAsmOperand::Out { reg: _, late: _, place, .. } => {
+ if let Some(place) = place {
+ self.create_move_path(place);
+ self.gather_init(place.as_ref(), InitKind::Deep);
+ }
+ }
+ InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
+ self.gather_operand(in_value);
+ if let Some(out_place) = out_place {
+ self.create_move_path(out_place);
+ self.gather_init(out_place.as_ref(), InitKind::Deep);
+ }
+ }
+ InlineAsmOperand::Const { value: _ }
+ | InlineAsmOperand::SymFn { value: _ }
+ | InlineAsmOperand::SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+ }
+ }
+
+ fn gather_operand(&mut self, operand: &Operand<'tcx>) {
+ match *operand {
+ Operand::Constant(..) | Operand::Copy(..) => {} // not-a-move
+ Operand::Move(place) => {
+ // a move
+ self.gather_move(place);
+ }
+ }
+ }
+
+ fn gather_move(&mut self, place: Place<'tcx>) {
+ debug!("gather_move({:?}, {:?})", self.loc, place);
+ if let Some(new_place) = self.builder.un_derefer.derefer(place.as_ref(), self.builder.body)
+ {
+ self.gather_move(new_place);
+ return;
+ }
+
+ if let [ref base @ .., ProjectionElem::Subslice { from, to, from_end: false }] =
+ **place.projection
+ {
+ // Split `Subslice` patterns into the corresponding list of
+ // `ConstIndex` patterns. This is done to ensure that all move paths
+ // are disjoint, which is expected by drop elaboration.
+ let base_place =
+ Place { local: place.local, projection: self.builder.tcx.intern_place_elems(base) };
+ let base_path = match self.move_path_for(base_place) {
+ Ok(path) => path,
+ Err(MoveError::UnionMove { path }) => {
+ self.record_move(place, path);
+ return;
+ }
+ Err(error @ MoveError::IllegalMove { .. }) => {
+ self.builder.errors.push((base_place, error));
+ return;
+ }
+ };
+ let base_ty = base_place.ty(self.builder.body, self.builder.tcx).ty;
+ let len: u64 = match base_ty.kind() {
+ ty::Array(_, size) => size.eval_usize(self.builder.tcx, self.builder.param_env),
+ _ => bug!("from_end: false slice pattern of non-array type"),
+ };
+ for offset in from..to {
+ let elem =
+ ProjectionElem::ConstantIndex { offset, min_length: len, from_end: false };
+ let path =
+ self.add_move_path(base_path, elem, |tcx| tcx.mk_place_elem(base_place, elem));
+ self.record_move(place, path);
+ }
+ } else {
+ match self.move_path_for(place) {
+ Ok(path) | Err(MoveError::UnionMove { path }) => self.record_move(place, path),
+ Err(error @ MoveError::IllegalMove { .. }) => {
+ self.builder.errors.push((place, error));
+ }
+ };
+ }
+ }
+
+ fn record_move(&mut self, place: Place<'tcx>, path: MovePathIndex) {
+ let move_out = self.builder.data.moves.push(MoveOut { path, source: self.loc });
+ debug!(
+ "gather_move({:?}, {:?}): adding move {:?} of {:?}",
+ self.loc, place, move_out, path
+ );
+ self.builder.data.path_map[path].push(move_out);
+ self.builder.data.loc_map[self.loc].push(move_out);
+ }
+
+ fn gather_init(&mut self, place: PlaceRef<'tcx>, kind: InitKind) {
+ debug!("gather_init({:?}, {:?})", self.loc, place);
+
+ if let Some(new_place) = self.builder.un_derefer.derefer(place, self.builder.body) {
+ self.gather_init(new_place.as_ref(), kind);
+ return;
+ }
+
+ let mut place = place;
+
+ // Check if we are assigning into a field of a union, if so, lookup the place
+ // of the union so it is marked as initialized again.
+ if let Some((place_base, ProjectionElem::Field(_, _))) = place.last_projection() {
+ if place_base.ty(self.builder.body, self.builder.tcx).ty.is_union() {
+ place = place_base;
+ }
+ }
+
+ if let LookupResult::Exact(path) = self.builder.data.rev_lookup.find(place) {
+ let init = self.builder.data.inits.push(Init {
+ location: InitLocation::Statement(self.loc),
+ path,
+ kind,
+ });
+
+ debug!(
+ "gather_init({:?}, {:?}): adding init {:?} of {:?}",
+ self.loc, place, init, path
+ );
+
+ self.builder.data.init_path_map[path].push(init);
+ self.builder.data.init_loc_map[self.loc].push(init);
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/move_paths/mod.rs b/compiler/rustc_mir_dataflow/src/move_paths/mod.rs
new file mode 100644
index 000000000..a951c5b0b
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/move_paths/mod.rs
@@ -0,0 +1,422 @@
+use crate::move_paths::builder::MoveDat;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_span::Span;
+use smallvec::SmallVec;
+
+use std::fmt;
+use std::ops::{Index, IndexMut};
+
+use self::abs_domain::{AbstractElem, Lift};
+
+mod abs_domain;
+
+rustc_index::newtype_index! {
+ pub struct MovePathIndex {
+ DEBUG_FORMAT = "mp{}"
+ }
+}
+
+impl polonius_engine::Atom for MovePathIndex {
+ fn index(self) -> usize {
+ rustc_index::vec::Idx::index(self)
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct MoveOutIndex {
+ DEBUG_FORMAT = "mo{}"
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct InitIndex {
+ DEBUG_FORMAT = "in{}"
+ }
+}
+
+impl MoveOutIndex {
+ pub fn move_path_index(self, move_data: &MoveData<'_>) -> MovePathIndex {
+ move_data.moves[self].path
+ }
+}
+
+/// `MovePath` is a canonicalized representation of a path that is
+/// moved or assigned to.
+///
+/// It follows a tree structure.
+///
+/// Given `struct X { m: M, n: N }` and `x: X`, moves like `drop x.m;`
+/// move *out* of the place `x.m`.
+///
+/// The MovePaths representing `x.m` and `x.n` are siblings (that is,
+/// one of them will link to the other via the `next_sibling` field,
+/// and the other will have no entry in its `next_sibling` field), and
+/// they both have the MovePath representing `x` as their parent.
+#[derive(Clone)]
+pub struct MovePath<'tcx> {
+ pub next_sibling: Option<MovePathIndex>,
+ pub first_child: Option<MovePathIndex>,
+ pub parent: Option<MovePathIndex>,
+ pub place: Place<'tcx>,
+}
+
+impl<'tcx> MovePath<'tcx> {
+ /// Returns an iterator over the parents of `self`.
+ pub fn parents<'a>(
+ &self,
+ move_paths: &'a IndexVec<MovePathIndex, MovePath<'tcx>>,
+ ) -> impl 'a + Iterator<Item = (MovePathIndex, &'a MovePath<'tcx>)> {
+ let first = self.parent.map(|mpi| (mpi, &move_paths[mpi]));
+ MovePathLinearIter {
+ next: first,
+ fetch_next: move |_, parent: &MovePath<'_>| {
+ parent.parent.map(|mpi| (mpi, &move_paths[mpi]))
+ },
+ }
+ }
+
+ /// Returns an iterator over the immediate children of `self`.
+ pub fn children<'a>(
+ &self,
+ move_paths: &'a IndexVec<MovePathIndex, MovePath<'tcx>>,
+ ) -> impl 'a + Iterator<Item = (MovePathIndex, &'a MovePath<'tcx>)> {
+ let first = self.first_child.map(|mpi| (mpi, &move_paths[mpi]));
+ MovePathLinearIter {
+ next: first,
+ fetch_next: move |_, child: &MovePath<'_>| {
+ child.next_sibling.map(|mpi| (mpi, &move_paths[mpi]))
+ },
+ }
+ }
+
+ /// Finds the closest descendant of `self` for which `f` returns `true` using a breadth-first
+ /// search.
+ ///
+ /// `f` will **not** be called on `self`.
+ pub fn find_descendant(
+ &self,
+ move_paths: &IndexVec<MovePathIndex, MovePath<'_>>,
+ f: impl Fn(MovePathIndex) -> bool,
+ ) -> Option<MovePathIndex> {
+ let mut todo = if let Some(child) = self.first_child {
+ vec![child]
+ } else {
+ return None;
+ };
+
+ while let Some(mpi) = todo.pop() {
+ if f(mpi) {
+ return Some(mpi);
+ }
+
+ let move_path = &move_paths[mpi];
+ if let Some(child) = move_path.first_child {
+ todo.push(child);
+ }
+
+ // After we've processed the original `mpi`, we should always
+ // traverse the siblings of any of its children.
+ if let Some(sibling) = move_path.next_sibling {
+ todo.push(sibling);
+ }
+ }
+
+ None
+ }
+}
+
+impl<'tcx> fmt::Debug for MovePath<'tcx> {
+ fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(w, "MovePath {{")?;
+ if let Some(parent) = self.parent {
+ write!(w, " parent: {:?},", parent)?;
+ }
+ if let Some(first_child) = self.first_child {
+ write!(w, " first_child: {:?},", first_child)?;
+ }
+ if let Some(next_sibling) = self.next_sibling {
+ write!(w, " next_sibling: {:?}", next_sibling)?;
+ }
+ write!(w, " place: {:?} }}", self.place)
+ }
+}
+
+impl<'tcx> fmt::Display for MovePath<'tcx> {
+ fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(w, "{:?}", self.place)
+ }
+}
+
+struct MovePathLinearIter<'a, 'tcx, F> {
+ next: Option<(MovePathIndex, &'a MovePath<'tcx>)>,
+ fetch_next: F,
+}
+
+impl<'a, 'tcx, F> Iterator for MovePathLinearIter<'a, 'tcx, F>
+where
+ F: FnMut(MovePathIndex, &'a MovePath<'tcx>) -> Option<(MovePathIndex, &'a MovePath<'tcx>)>,
+{
+ type Item = (MovePathIndex, &'a MovePath<'tcx>);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let ret = self.next.take()?;
+ self.next = (self.fetch_next)(ret.0, ret.1);
+ Some(ret)
+ }
+}
+
+#[derive(Debug)]
+pub struct MoveData<'tcx> {
+ pub move_paths: IndexVec<MovePathIndex, MovePath<'tcx>>,
+ pub moves: IndexVec<MoveOutIndex, MoveOut>,
+ /// Each Location `l` is mapped to the MoveOut's that are effects
+ /// of executing the code at `l`. (There can be multiple MoveOut's
+ /// for a given `l` because each MoveOut is associated with one
+ /// particular path being moved.)
+ pub loc_map: LocationMap<SmallVec<[MoveOutIndex; 4]>>,
+ pub path_map: IndexVec<MovePathIndex, SmallVec<[MoveOutIndex; 4]>>,
+ pub rev_lookup: MovePathLookup,
+ pub inits: IndexVec<InitIndex, Init>,
+ /// Each Location `l` is mapped to the Inits that are effects
+ /// of executing the code at `l`.
+ pub init_loc_map: LocationMap<SmallVec<[InitIndex; 4]>>,
+ pub init_path_map: IndexVec<MovePathIndex, SmallVec<[InitIndex; 4]>>,
+}
+
+pub trait HasMoveData<'tcx> {
+ fn move_data(&self) -> &MoveData<'tcx>;
+}
+
+#[derive(Debug)]
+pub struct LocationMap<T> {
+ /// Location-indexed (BasicBlock for outer index, index within BB
+ /// for inner index) map.
+ pub(crate) map: IndexVec<BasicBlock, Vec<T>>,
+}
+
+impl<T> Index<Location> for LocationMap<T> {
+ type Output = T;
+ fn index(&self, index: Location) -> &Self::Output {
+ &self.map[index.block][index.statement_index]
+ }
+}
+
+impl<T> IndexMut<Location> for LocationMap<T> {
+ fn index_mut(&mut self, index: Location) -> &mut Self::Output {
+ &mut self.map[index.block][index.statement_index]
+ }
+}
+
+impl<T> LocationMap<T>
+where
+ T: Default + Clone,
+{
+ fn new(body: &Body<'_>) -> Self {
+ LocationMap {
+ map: body
+ .basic_blocks()
+ .iter()
+ .map(|block| vec![T::default(); block.statements.len() + 1])
+ .collect(),
+ }
+ }
+}
+
+/// `MoveOut` represents a point in a program that moves out of some
+/// L-value; i.e., "creates" uninitialized memory.
+///
+/// With respect to dataflow analysis:
+/// - Generated by moves and declaration of uninitialized variables.
+/// - Killed by assignments to the memory.
+#[derive(Copy, Clone)]
+pub struct MoveOut {
+ /// path being moved
+ pub path: MovePathIndex,
+ /// location of move
+ pub source: Location,
+}
+
+impl fmt::Debug for MoveOut {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{:?}@{:?}", self.path, self.source)
+ }
+}
+
+/// `Init` represents a point in a program that initializes some L-value;
+#[derive(Copy, Clone)]
+pub struct Init {
+ /// path being initialized
+ pub path: MovePathIndex,
+ /// location of initialization
+ pub location: InitLocation,
+ /// Extra information about this initialization
+ pub kind: InitKind,
+}
+
+/// Initializations can be from an argument or from a statement. Arguments
+/// do not have locations, in those cases the `Local` is kept..
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum InitLocation {
+ Argument(Local),
+ Statement(Location),
+}
+
+/// Additional information about the initialization.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum InitKind {
+ /// Deep init, even on panic
+ Deep,
+ /// Only does a shallow init
+ Shallow,
+ /// This doesn't initialize the variable on panic (and a panic is possible).
+ NonPanicPathOnly,
+}
+
+impl fmt::Debug for Init {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{:?}@{:?} ({:?})", self.path, self.location, self.kind)
+ }
+}
+
+impl Init {
+ pub fn span<'tcx>(&self, body: &Body<'tcx>) -> Span {
+ match self.location {
+ InitLocation::Argument(local) => body.local_decls[local].source_info.span,
+ InitLocation::Statement(location) => body.source_info(location).span,
+ }
+ }
+}
+
+/// Tables mapping from a place to its MovePathIndex.
+#[derive(Debug)]
+pub struct MovePathLookup {
+ locals: IndexVec<Local, MovePathIndex>,
+
+ /// projections are made from a base-place and a projection
+ /// elem. The base-place will have a unique MovePathIndex; we use
+ /// the latter as the index into the outer vector (narrowing
+ /// subsequent search so that it is solely relative to that
+ /// base-place). For the remaining lookup, we map the projection
+ /// elem to the associated MovePathIndex.
+ projections: FxHashMap<(MovePathIndex, AbstractElem), MovePathIndex>,
+}
+
+mod builder;
+
+#[derive(Copy, Clone, Debug)]
+pub enum LookupResult {
+ Exact(MovePathIndex),
+ Parent(Option<MovePathIndex>),
+}
+
+impl MovePathLookup {
+ // Unlike the builder `fn move_path_for` below, this lookup
+ // alternative will *not* create a MovePath on the fly for an
+ // unknown place, but will rather return the nearest available
+ // parent.
+ pub fn find(&self, place: PlaceRef<'_>) -> LookupResult {
+ let mut result = self.locals[place.local];
+
+ for elem in place.projection.iter() {
+ if let Some(&subpath) = self.projections.get(&(result, elem.lift())) {
+ result = subpath;
+ } else {
+ return LookupResult::Parent(Some(result));
+ }
+ }
+
+ LookupResult::Exact(result)
+ }
+
+ pub fn find_local(&self, local: Local) -> MovePathIndex {
+ self.locals[local]
+ }
+
+ /// An enumerated iterator of `local`s and their associated
+ /// `MovePathIndex`es.
+ pub fn iter_locals_enumerated(
+ &self,
+ ) -> impl DoubleEndedIterator<Item = (Local, MovePathIndex)> + ExactSizeIterator + '_ {
+ self.locals.iter_enumerated().map(|(l, &idx)| (l, idx))
+ }
+}
+
+#[derive(Debug)]
+pub struct IllegalMoveOrigin<'tcx> {
+ pub location: Location,
+ pub kind: IllegalMoveOriginKind<'tcx>,
+}
+
+#[derive(Debug)]
+pub enum IllegalMoveOriginKind<'tcx> {
+ /// Illegal move due to attempt to move from behind a reference.
+ BorrowedContent {
+ /// The place the reference refers to: if erroneous code was trying to
+ /// move from `(*x).f` this will be `*x`.
+ target_place: Place<'tcx>,
+ },
+
+ /// Illegal move due to attempt to move from field of an ADT that
+ /// implements `Drop`. Rust maintains invariant that all `Drop`
+ /// ADT's remain fully-initialized so that user-defined destructor
+ /// can safely read from all of the ADT's fields.
+ InteriorOfTypeWithDestructor { container_ty: Ty<'tcx> },
+
+ /// Illegal move due to attempt to move out of a slice or array.
+ InteriorOfSliceOrArray { ty: Ty<'tcx>, is_index: bool },
+}
+
+#[derive(Debug)]
+pub enum MoveError<'tcx> {
+ IllegalMove { cannot_move_out_of: IllegalMoveOrigin<'tcx> },
+ UnionMove { path: MovePathIndex },
+}
+
+impl<'tcx> MoveError<'tcx> {
+ fn cannot_move_out_of(location: Location, kind: IllegalMoveOriginKind<'tcx>) -> Self {
+ let origin = IllegalMoveOrigin { location, kind };
+ MoveError::IllegalMove { cannot_move_out_of: origin }
+ }
+}
+
+impl<'tcx> MoveData<'tcx> {
+ pub fn gather_moves(
+ body: &Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ) -> MoveDat<'tcx> {
+ builder::gather_moves(body, tcx, param_env)
+ }
+
+ /// For the move path `mpi`, returns the root local variable (if any) that starts the path.
+ /// (e.g., for a path like `a.b.c` returns `Some(a)`)
+ pub fn base_local(&self, mut mpi: MovePathIndex) -> Option<Local> {
+ loop {
+ let path = &self.move_paths[mpi];
+ if let Some(l) = path.place.as_local() {
+ return Some(l);
+ }
+ if let Some(parent) = path.parent {
+ mpi = parent;
+ continue;
+ } else {
+ return None;
+ }
+ }
+ }
+
+ pub fn find_in_move_path_or_its_descendants(
+ &self,
+ root: MovePathIndex,
+ pred: impl Fn(MovePathIndex) -> bool,
+ ) -> Option<MovePathIndex> {
+ if pred(root) {
+ return Some(root);
+ }
+
+ self.move_paths[root].find_descendant(&self.move_paths, pred)
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/rustc_peek.rs b/compiler/rustc_mir_dataflow/src/rustc_peek.rs
new file mode 100644
index 000000000..f2471f37a
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/rustc_peek.rs
@@ -0,0 +1,287 @@
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+use rustc_index::bit_set::ChunkedBitSet;
+use rustc_middle::mir::MirPass;
+use rustc_middle::mir::{self, Body, Local, Location};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+use crate::framework::BitSetExt;
+use crate::impls::{
+ DefinitelyInitializedPlaces, MaybeInitializedPlaces, MaybeLiveLocals, MaybeUninitializedPlaces,
+};
+use crate::move_paths::{HasMoveData, MoveData};
+use crate::move_paths::{LookupResult, MovePathIndex};
+use crate::MoveDataParamEnv;
+use crate::{Analysis, JoinSemiLattice, Results, ResultsCursor};
+
+pub struct SanityCheck;
+
+// FIXME: This should be a `MirLint`, but it needs to be moved back to `rustc_mir_transform` first.
+impl<'tcx> MirPass<'tcx> for SanityCheck {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ use crate::has_rustc_mir_with;
+ let def_id = body.source.def_id();
+ if !tcx.has_attr(def_id, sym::rustc_mir) {
+ debug!("skipping rustc_peek::SanityCheck on {}", tcx.def_path_str(def_id));
+ return;
+ } else {
+ debug!("running rustc_peek::SanityCheck on {}", tcx.def_path_str(def_id));
+ }
+
+ let param_env = tcx.param_env(def_id);
+ let (_, move_data) = MoveData::gather_moves(body, tcx, param_env).unwrap();
+ let mdpe = MoveDataParamEnv { move_data, param_env };
+
+ if has_rustc_mir_with(tcx, def_id, sym::rustc_peek_maybe_init).is_some() {
+ let flow_inits = MaybeInitializedPlaces::new(tcx, body, &mdpe)
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint();
+
+ sanity_check_via_rustc_peek(tcx, body, &flow_inits);
+ }
+
+ if has_rustc_mir_with(tcx, def_id, sym::rustc_peek_maybe_uninit).is_some() {
+ let flow_uninits = MaybeUninitializedPlaces::new(tcx, body, &mdpe)
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint();
+
+ sanity_check_via_rustc_peek(tcx, body, &flow_uninits);
+ }
+
+ if has_rustc_mir_with(tcx, def_id, sym::rustc_peek_definite_init).is_some() {
+ let flow_def_inits = DefinitelyInitializedPlaces::new(tcx, body, &mdpe)
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint();
+
+ sanity_check_via_rustc_peek(tcx, body, &flow_def_inits);
+ }
+
+ if has_rustc_mir_with(tcx, def_id, sym::rustc_peek_liveness).is_some() {
+ let flow_liveness = MaybeLiveLocals.into_engine(tcx, body).iterate_to_fixpoint();
+
+ sanity_check_via_rustc_peek(tcx, body, &flow_liveness);
+ }
+
+ if has_rustc_mir_with(tcx, def_id, sym::stop_after_dataflow).is_some() {
+ tcx.sess.fatal("stop_after_dataflow ended compilation");
+ }
+ }
+}
+
+/// This function scans `mir` for all calls to the intrinsic
+/// `rustc_peek` that have the expression form `rustc_peek(&expr)`.
+///
+/// For each such call, determines what the dataflow bit-state is for
+/// the L-value corresponding to `expr`; if the bit-state is a 1, then
+/// that call to `rustc_peek` is ignored by the sanity check. If the
+/// bit-state is a 0, then this pass emits an error message saying
+/// "rustc_peek: bit not set".
+///
+/// The intention is that one can write unit tests for dataflow by
+/// putting code into a UI test and using `rustc_peek` to
+/// make observations about the results of dataflow static analyses.
+///
+/// (If there are any calls to `rustc_peek` that do not match the
+/// expression form above, then that emits an error as well, but those
+/// errors are not intended to be used for unit tests.)
+pub fn sanity_check_via_rustc_peek<'tcx, A>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ results: &Results<'tcx, A>,
+) where
+ A: RustcPeekAt<'tcx>,
+{
+ let def_id = body.source.def_id();
+ debug!("sanity_check_via_rustc_peek def_id: {:?}", def_id);
+
+ let mut cursor = ResultsCursor::new(body, results);
+
+ let peek_calls = body.basic_blocks().iter_enumerated().filter_map(|(bb, block_data)| {
+ PeekCall::from_terminator(tcx, block_data.terminator()).map(|call| (bb, block_data, call))
+ });
+
+ for (bb, block_data, call) in peek_calls {
+ // Look for a sequence like the following to indicate that we should be peeking at `_1`:
+ // _2 = &_1;
+ // rustc_peek(_2);
+ //
+ // /* or */
+ //
+ // _2 = _1;
+ // rustc_peek(_2);
+ let (statement_index, peek_rval) = block_data
+ .statements
+ .iter()
+ .enumerate()
+ .find_map(|(i, stmt)| value_assigned_to_local(stmt, call.arg).map(|rval| (i, rval)))
+ .expect(
+ "call to rustc_peek should be preceded by \
+ assignment to temporary holding its argument",
+ );
+
+ match (call.kind, peek_rval) {
+ (PeekCallKind::ByRef, mir::Rvalue::Ref(_, _, place))
+ | (
+ PeekCallKind::ByVal,
+ mir::Rvalue::Use(mir::Operand::Move(place) | mir::Operand::Copy(place)),
+ ) => {
+ let loc = Location { block: bb, statement_index };
+ cursor.seek_before_primary_effect(loc);
+ let state = cursor.get();
+ results.analysis.peek_at(tcx, *place, state, call);
+ }
+
+ _ => {
+ let msg = "rustc_peek: argument expression \
+ must be either `place` or `&place`";
+ tcx.sess.span_err(call.span, msg);
+ }
+ }
+ }
+}
+
+/// If `stmt` is an assignment where the LHS is the given local (with no projections), returns the
+/// RHS of the assignment.
+fn value_assigned_to_local<'a, 'tcx>(
+ stmt: &'a mir::Statement<'tcx>,
+ local: Local,
+) -> Option<&'a mir::Rvalue<'tcx>> {
+ if let mir::StatementKind::Assign(box (place, rvalue)) = &stmt.kind {
+ if let Some(l) = place.as_local() {
+ if local == l {
+ return Some(&*rvalue);
+ }
+ }
+ }
+
+ None
+}
+
+#[derive(Clone, Copy, Debug)]
+enum PeekCallKind {
+ ByVal,
+ ByRef,
+}
+
+impl PeekCallKind {
+ fn from_arg_ty(arg: Ty<'_>) -> Self {
+ match arg.kind() {
+ ty::Ref(_, _, _) => PeekCallKind::ByRef,
+ _ => PeekCallKind::ByVal,
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct PeekCall {
+ arg: Local,
+ kind: PeekCallKind,
+ span: Span,
+}
+
+impl PeekCall {
+ fn from_terminator<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ terminator: &mir::Terminator<'tcx>,
+ ) -> Option<Self> {
+ use mir::Operand;
+
+ let span = terminator.source_info.span;
+ if let mir::TerminatorKind::Call { func: Operand::Constant(func), args, .. } =
+ &terminator.kind
+ {
+ if let ty::FnDef(def_id, substs) = *func.literal.ty().kind() {
+ let name = tcx.item_name(def_id);
+ if !tcx.is_intrinsic(def_id) || name != sym::rustc_peek {
+ return None;
+ }
+
+ assert_eq!(args.len(), 1);
+ let kind = PeekCallKind::from_arg_ty(substs.type_at(0));
+ let arg = match &args[0] {
+ Operand::Copy(place) | Operand::Move(place) => {
+ if let Some(local) = place.as_local() {
+ local
+ } else {
+ tcx.sess.diagnostic().span_err(
+ span,
+ "dataflow::sanity_check cannot feed a non-temp to rustc_peek.",
+ );
+ return None;
+ }
+ }
+ _ => {
+ tcx.sess.diagnostic().span_err(
+ span,
+ "dataflow::sanity_check cannot feed a non-temp to rustc_peek.",
+ );
+ return None;
+ }
+ };
+
+ return Some(PeekCall { arg, kind, span });
+ }
+ }
+
+ None
+ }
+}
+
+pub trait RustcPeekAt<'tcx>: Analysis<'tcx> {
+ fn peek_at(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ place: mir::Place<'tcx>,
+ flow_state: &Self::Domain,
+ call: PeekCall,
+ );
+}
+
+impl<'tcx, A, D> RustcPeekAt<'tcx> for A
+where
+ A: Analysis<'tcx, Domain = D> + HasMoveData<'tcx>,
+ D: JoinSemiLattice + Clone + BitSetExt<MovePathIndex>,
+{
+ fn peek_at(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ place: mir::Place<'tcx>,
+ flow_state: &Self::Domain,
+ call: PeekCall,
+ ) {
+ match self.move_data().rev_lookup.find(place.as_ref()) {
+ LookupResult::Exact(peek_mpi) => {
+ let bit_state = flow_state.contains(peek_mpi);
+ debug!("rustc_peek({:?} = &{:?}) bit_state: {}", call.arg, place, bit_state);
+ if !bit_state {
+ tcx.sess.span_err(call.span, "rustc_peek: bit not set");
+ }
+ }
+
+ LookupResult::Parent(..) => {
+ tcx.sess.span_err(call.span, "rustc_peek: argument untracked");
+ }
+ }
+ }
+}
+
+impl<'tcx> RustcPeekAt<'tcx> for MaybeLiveLocals {
+ fn peek_at(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ place: mir::Place<'tcx>,
+ flow_state: &ChunkedBitSet<Local>,
+ call: PeekCall,
+ ) {
+ info!(?place, "peek_at");
+ let Some(local) = place.as_local() else {
+ tcx.sess.span_err(call.span, "rustc_peek: argument was not a local");
+ return;
+ };
+
+ if !flow_state.contains(local) {
+ tcx.sess.span_err(call.span, "rustc_peek: bit not set");
+ }
+ }
+}
diff --git a/compiler/rustc_mir_dataflow/src/storage.rs b/compiler/rustc_mir_dataflow/src/storage.rs
new file mode 100644
index 000000000..c909648ea
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/storage.rs
@@ -0,0 +1,20 @@
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::{self, Local};
+
+/// The set of locals in a MIR body that do not have `StorageLive`/`StorageDead` annotations.
+///
+/// These locals have fixed storage for the duration of the body.
+pub fn always_storage_live_locals(body: &mir::Body<'_>) -> BitSet<Local> {
+ let mut always_live_locals = BitSet::new_filled(body.local_decls.len());
+
+ for block in body.basic_blocks() {
+ for statement in &block.statements {
+ use mir::StatementKind::{StorageDead, StorageLive};
+ if let StorageLive(l) | StorageDead(l) = statement.kind {
+ always_live_locals.remove(l);
+ }
+ }
+ }
+
+ always_live_locals
+}
diff --git a/compiler/rustc_mir_dataflow/src/un_derefer.rs b/compiler/rustc_mir_dataflow/src/un_derefer.rs
new file mode 100644
index 000000000..7e6e25cc6
--- /dev/null
+++ b/compiler/rustc_mir_dataflow/src/un_derefer.rs
@@ -0,0 +1,22 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+/// Used for reverting changes made by `DerefSeparator`
+pub struct UnDerefer<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub derefer_sidetable: FxHashMap<Local, Place<'tcx>>,
+}
+
+impl<'tcx> UnDerefer<'tcx> {
+ #[inline]
+ pub fn derefer(&self, place: PlaceRef<'tcx>, body: &Body<'tcx>) -> Option<Place<'tcx>> {
+ let reffed = self.derefer_sidetable.get(&place.local)?;
+
+ let new_place = reffed.project_deeper(place.projection, self.tcx);
+ if body.local_decls[new_place.local].is_deref_temp() {
+ return self.derefer(new_place.as_ref(), body);
+ }
+ Some(new_place)
+ }
+}
diff --git a/compiler/rustc_mir_transform/Cargo.toml b/compiler/rustc_mir_transform/Cargo.toml
new file mode 100644
index 000000000..85b7a4af5
--- /dev/null
+++ b/compiler/rustc_mir_transform/Cargo.toml
@@ -0,0 +1,29 @@
+[package]
+name = "rustc_mir_transform"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+itertools = "0.10.1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_const_eval = { path = "../rustc_const_eval" }
+rustc_mir_dataflow = { path = "../rustc_mir_dataflow" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
+rustc_target = { path = "../rustc_target" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_span = { path = "../rustc_span" }
+
+[dev-dependencies]
+coverage_test_macros = { path = "src/coverage/test_macros" }
diff --git a/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
new file mode 100644
index 000000000..2502e8b60
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/abort_unwinding_calls.rs
@@ -0,0 +1,140 @@
+use crate::MirPass;
+use rustc_ast::InlineAsmOptions;
+use rustc_middle::mir::*;
+use rustc_middle::ty::layout;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::spec::abi::Abi;
+use rustc_target::spec::PanicStrategy;
+
+/// A pass that runs which is targeted at ensuring that codegen guarantees about
+/// unwinding are upheld for compilations of panic=abort programs.
+///
+/// When compiling with panic=abort codegen backends generally want to assume
+/// that all Rust-defined functions do not unwind, and it's UB if they actually
+/// do unwind. Foreign functions, however, can be declared as "may unwind" via
+/// their ABI (e.g. `extern "C-unwind"`). To uphold the guarantees that
+/// Rust-defined functions never unwind a well-behaved Rust program needs to
+/// catch unwinding from foreign functions and force them to abort.
+///
+/// This pass walks over all functions calls which may possibly unwind,
+/// and if any are found sets their cleanup to a block that aborts the process.
+/// This forces all unwinds, in panic=abort mode happening in foreign code, to
+/// trigger a process abort.
+#[derive(PartialEq)]
+pub struct AbortUnwindingCalls;
+
+impl<'tcx> MirPass<'tcx> for AbortUnwindingCalls {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let def_id = body.source.def_id();
+ let kind = tcx.def_kind(def_id);
+
+ // We don't simplify the MIR of constants at this time because that
+ // namely results in a cyclic query when we call `tcx.type_of` below.
+ if !kind.is_fn_like() {
+ return;
+ }
+
+ // This pass only runs on functions which themselves cannot unwind,
+ // forcibly changing the body of the function to structurally provide
+ // this guarantee by aborting on an unwind. If this function can unwind,
+ // then there's nothing to do because it already should work correctly.
+ //
+ // Here we test for this function itself whether its ABI allows
+ // unwinding or not.
+ let body_ty = tcx.type_of(def_id);
+ let body_abi = match body_ty.kind() {
+ ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
+ ty::Closure(..) => Abi::RustCall,
+ ty::Generator(..) => Abi::Rust,
+ _ => span_bug!(body.span, "unexpected body ty: {:?}", body_ty),
+ };
+ let body_can_unwind = layout::fn_can_unwind(tcx, Some(def_id), body_abi);
+
+ // Look in this function body for any basic blocks which are terminated
+ // with a function call, and whose function we're calling may unwind.
+ // This will filter to functions with `extern "C-unwind"` ABIs, for
+ // example.
+ let mut calls_to_terminate = Vec::new();
+ let mut cleanups_to_remove = Vec::new();
+ for (id, block) in body.basic_blocks().iter_enumerated() {
+ if block.is_cleanup {
+ continue;
+ }
+ let Some(terminator) = &block.terminator else { continue };
+ let span = terminator.source_info.span;
+
+ let call_can_unwind = match &terminator.kind {
+ TerminatorKind::Call { func, .. } => {
+ let ty = func.ty(body, tcx);
+ let sig = ty.fn_sig(tcx);
+ let fn_def_id = match ty.kind() {
+ ty::FnPtr(_) => None,
+ &ty::FnDef(def_id, _) => Some(def_id),
+ _ => span_bug!(span, "invalid callee of type {:?}", ty),
+ };
+ layout::fn_can_unwind(tcx, fn_def_id, sig.abi())
+ }
+ TerminatorKind::Drop { .. } | TerminatorKind::DropAndReplace { .. } => {
+ tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Unwind
+ && layout::fn_can_unwind(tcx, None, Abi::Rust)
+ }
+ TerminatorKind::Assert { .. } | TerminatorKind::FalseUnwind { .. } => {
+ layout::fn_can_unwind(tcx, None, Abi::Rust)
+ }
+ TerminatorKind::InlineAsm { options, .. } => {
+ options.contains(InlineAsmOptions::MAY_UNWIND)
+ }
+ _ if terminator.unwind().is_some() => {
+ span_bug!(span, "unexpected terminator that may unwind {:?}", terminator)
+ }
+ _ => continue,
+ };
+
+ // If this function call can't unwind, then there's no need for it
+ // to have a landing pad. This means that we can remove any cleanup
+ // registered for it.
+ if !call_can_unwind {
+ cleanups_to_remove.push(id);
+ continue;
+ }
+
+ // Otherwise if this function can unwind, then if the outer function
+ // can also unwind there's nothing to do. If the outer function
+ // can't unwind, however, we need to change the landing pad for this
+ // function call to one that aborts.
+ if !body_can_unwind {
+ calls_to_terminate.push(id);
+ }
+ }
+
+ // For call instructions which need to be terminated, we insert a
+ // singular basic block which simply terminates, and then configure the
+ // `cleanup` attribute for all calls we found to this basic block we
+ // insert which means that any unwinding that happens in the functions
+ // will force an abort of the process.
+ if !calls_to_terminate.is_empty() {
+ let bb = BasicBlockData {
+ statements: Vec::new(),
+ is_cleanup: true,
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(body.span),
+ kind: TerminatorKind::Abort,
+ }),
+ };
+ let abort_bb = body.basic_blocks_mut().push(bb);
+
+ for bb in calls_to_terminate {
+ let cleanup = body.basic_blocks_mut()[bb].terminator_mut().unwind_mut().unwrap();
+ *cleanup = Some(abort_bb);
+ }
+ }
+
+ for id in cleanups_to_remove {
+ let cleanup = body.basic_blocks_mut()[id].terminator_mut().unwind_mut().unwrap();
+ *cleanup = None;
+ }
+
+ // We may have invalidated some `cleanup` blocks so clean those up now.
+ super::simplify::remove_dead_blocks(tcx, body);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs
new file mode 100644
index 000000000..f12c8560c
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/add_call_guards.rs
@@ -0,0 +1,81 @@
+use crate::MirPass;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+#[derive(PartialEq)]
+pub enum AddCallGuards {
+ AllCallEdges,
+ CriticalCallEdges,
+}
+pub use self::AddCallGuards::*;
+
+/**
+ * Breaks outgoing critical edges for call terminators in the MIR.
+ *
+ * Critical edges are edges that are neither the only edge leaving a
+ * block, nor the only edge entering one.
+ *
+ * When you want something to happen "along" an edge, you can either
+ * do at the end of the predecessor block, or at the start of the
+ * successor block. Critical edges have to be broken in order to prevent
+ * "edge actions" from affecting other edges. We need this for calls that are
+ * codegened to LLVM invoke instructions, because invoke is a block terminator
+ * in LLVM so we can't insert any code to handle the call's result into the
+ * block that performs the call.
+ *
+ * This function will break those edges by inserting new blocks along them.
+ *
+ * NOTE: Simplify CFG will happily undo most of the work this pass does.
+ *
+ */
+
+impl<'tcx> MirPass<'tcx> for AddCallGuards {
+ fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ self.add_call_guards(body);
+ }
+}
+
+impl AddCallGuards {
+ pub fn add_call_guards(&self, body: &mut Body<'_>) {
+ let mut pred_count: IndexVec<_, _> =
+ body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect();
+ pred_count[START_BLOCK] += 1;
+
+ // We need a place to store the new blocks generated
+ let mut new_blocks = Vec::new();
+
+ let cur_len = body.basic_blocks().len();
+
+ for block in body.basic_blocks_mut() {
+ match block.terminator {
+ Some(Terminator {
+ kind: TerminatorKind::Call { target: Some(ref mut destination), cleanup, .. },
+ source_info,
+ }) if pred_count[*destination] > 1
+ && (cleanup.is_some() || self == &AllCallEdges) =>
+ {
+ // It's a critical edge, break it
+ let call_guard = BasicBlockData {
+ statements: vec![],
+ is_cleanup: block.is_cleanup,
+ terminator: Some(Terminator {
+ source_info,
+ kind: TerminatorKind::Goto { target: *destination },
+ }),
+ };
+
+ // Get the index it will be when inserted into the MIR
+ let idx = cur_len + new_blocks.len();
+ new_blocks.push(call_guard);
+ *destination = BasicBlock::new(idx);
+ }
+ _ => {}
+ }
+ }
+
+ debug!("Broke {} N edges", new_blocks.len());
+
+ body.basic_blocks_mut().extend(new_blocks);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
new file mode 100644
index 000000000..8de0aad04
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/add_moves_for_packed_drops.rs
@@ -0,0 +1,107 @@
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+use crate::util;
+use crate::MirPass;
+use rustc_middle::mir::patch::MirPatch;
+
+// This pass moves values being dropped that are within a packed
+// struct to a separate local before dropping them, to ensure that
+// they are dropped from an aligned address.
+//
+// For example, if we have something like
+// ```Rust
+// #[repr(packed)]
+// struct Foo {
+// dealign: u8,
+// data: Vec<u8>
+// }
+//
+// let foo = ...;
+// ```
+//
+// We want to call `drop_in_place::<Vec<u8>>` on `data` from an aligned
+// address. This means we can't simply drop `foo.data` directly, because
+// its address is not aligned.
+//
+// Instead, we move `foo.data` to a local and drop that:
+// ```
+// storage.live(drop_temp)
+// drop_temp = foo.data;
+// drop(drop_temp) -> next
+// next:
+// storage.dead(drop_temp)
+// ```
+//
+// The storage instructions are required to avoid stack space
+// blowup.
+
+pub struct AddMovesForPackedDrops;
+
+impl<'tcx> MirPass<'tcx> for AddMovesForPackedDrops {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ debug!("add_moves_for_packed_drops({:?} @ {:?})", body.source, body.span);
+ add_moves_for_packed_drops(tcx, body);
+ }
+}
+
+pub fn add_moves_for_packed_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let patch = add_moves_for_packed_drops_patch(tcx, body);
+ patch.apply(body);
+}
+
+fn add_moves_for_packed_drops_patch<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> MirPatch<'tcx> {
+ let def_id = body.source.def_id();
+ let mut patch = MirPatch::new(body);
+ let param_env = tcx.param_env(def_id);
+
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
+ let loc = Location { block: bb, statement_index: data.statements.len() };
+ let terminator = data.terminator();
+
+ match terminator.kind {
+ TerminatorKind::Drop { place, .. }
+ if util::is_disaligned(tcx, body, param_env, place) =>
+ {
+ add_move_for_packed_drop(tcx, body, &mut patch, terminator, loc, data.is_cleanup);
+ }
+ TerminatorKind::DropAndReplace { .. } => {
+ span_bug!(terminator.source_info.span, "replace in AddMovesForPackedDrops");
+ }
+ _ => {}
+ }
+ }
+
+ patch
+}
+
+fn add_move_for_packed_drop<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ patch: &mut MirPatch<'tcx>,
+ terminator: &Terminator<'tcx>,
+ loc: Location,
+ is_cleanup: bool,
+) {
+ debug!("add_move_for_packed_drop({:?} @ {:?})", terminator, loc);
+ let TerminatorKind::Drop { ref place, target, unwind } = terminator.kind else {
+ unreachable!();
+ };
+
+ let source_info = terminator.source_info;
+ let ty = place.ty(body, tcx).ty;
+ let temp = patch.new_temp(ty, terminator.source_info.span);
+
+ let storage_dead_block = patch.new_block(BasicBlockData {
+ statements: vec![Statement { source_info, kind: StatementKind::StorageDead(temp) }],
+ terminator: Some(Terminator { source_info, kind: TerminatorKind::Goto { target } }),
+ is_cleanup,
+ });
+
+ patch.add_statement(loc, StatementKind::StorageLive(temp));
+ patch.add_assign(loc, Place::from(temp), Rvalue::Use(Operand::Move(*place)));
+ patch.patch_terminator(
+ loc.block,
+ TerminatorKind::Drop { place: Place::from(temp), target: storage_dead_block, unwind },
+ );
+}
diff --git a/compiler/rustc_mir_transform/src/add_retag.rs b/compiler/rustc_mir_transform/src/add_retag.rs
new file mode 100644
index 000000000..9c5896c4e
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/add_retag.rs
@@ -0,0 +1,186 @@
+//! This pass adds validation calls (AcquireValid, ReleaseValid) where appropriate.
+//! It has to be run really early, before transformations like inlining, because
+//! introducing these calls *adds* UB -- so, conceptually, this pass is actually part
+//! of MIR building, and only after this pass we think of the program has having the
+//! normal MIR semantics.
+
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+pub struct AddRetag;
+
+/// Determines whether this place is "stable": Whether, if we evaluate it again
+/// after the assignment, we can be sure to obtain the same place value.
+/// (Concurrent accesses by other threads are no problem as these are anyway non-atomic
+/// copies. Data races are UB.)
+fn is_stable(place: PlaceRef<'_>) -> bool {
+ // Which place this evaluates to can change with any memory write,
+ // so cannot assume deref to be stable.
+ !place.has_deref()
+}
+
+/// Determine whether this type may contain a reference (or box), and thus needs retagging.
+/// We will only recurse `depth` times into Tuples/ADTs to bound the cost of this.
+fn may_contain_reference<'tcx>(ty: Ty<'tcx>, depth: u32, tcx: TyCtxt<'tcx>) -> bool {
+ match ty.kind() {
+ // Primitive types that are not references
+ ty::Bool
+ | ty::Char
+ | ty::Float(_)
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::RawPtr(..)
+ | ty::FnPtr(..)
+ | ty::Str
+ | ty::FnDef(..)
+ | ty::Never => false,
+ // References
+ ty::Ref(..) => true,
+ ty::Adt(..) if ty.is_box() => true,
+ // Compound types: recurse
+ ty::Array(ty, _) | ty::Slice(ty) => {
+ // This does not branch so we keep the depth the same.
+ may_contain_reference(*ty, depth, tcx)
+ }
+ ty::Tuple(tys) => {
+ depth == 0 || tys.iter().any(|ty| may_contain_reference(ty, depth - 1, tcx))
+ }
+ ty::Adt(adt, subst) => {
+ depth == 0
+ || adt.variants().iter().any(|v| {
+ v.fields.iter().any(|f| may_contain_reference(f.ty(tcx, subst), depth - 1, tcx))
+ })
+ }
+ // Conservative fallback
+ _ => true,
+ }
+}
+
+impl<'tcx> MirPass<'tcx> for AddRetag {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.opts.unstable_opts.mir_emit_retag
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // We need an `AllCallEdges` pass before we can do any work.
+ super::add_call_guards::AllCallEdges.run_pass(tcx, body);
+
+ let (span, arg_count) = (body.span, body.arg_count);
+ let basic_blocks = body.basic_blocks.as_mut();
+ let local_decls = &body.local_decls;
+ let needs_retag = |place: &Place<'tcx>| {
+ // FIXME: Instead of giving up for unstable places, we should introduce
+ // a temporary and retag on that.
+ is_stable(place.as_ref())
+ && may_contain_reference(place.ty(&*local_decls, tcx).ty, /*depth*/ 3, tcx)
+ && !local_decls[place.local].is_deref_temp()
+ };
+ let place_base_raw = |place: &Place<'tcx>| {
+ // If this is a `Deref`, get the type of what we are deref'ing.
+ if place.has_deref() {
+ let ty = &local_decls[place.local].ty;
+ ty.is_unsafe_ptr()
+ } else {
+ // Not a deref, and thus not raw.
+ false
+ }
+ };
+
+ // PART 1
+ // Retag arguments at the beginning of the start block.
+ {
+ // FIXME: Consider using just the span covering the function
+ // argument declaration.
+ let source_info = SourceInfo::outermost(span);
+ // Gather all arguments, skip return value.
+ let places = local_decls
+ .iter_enumerated()
+ .skip(1)
+ .take(arg_count)
+ .map(|(local, _)| Place::from(local))
+ .filter(needs_retag);
+ // Emit their retags.
+ basic_blocks[START_BLOCK].statements.splice(
+ 0..0,
+ places.map(|place| Statement {
+ source_info,
+ kind: StatementKind::Retag(RetagKind::FnEntry, Box::new(place)),
+ }),
+ );
+ }
+
+ // PART 2
+ // Retag return values of functions. Also escape-to-raw the argument of `drop`.
+ // We collect the return destinations because we cannot mutate while iterating.
+ let returns = basic_blocks
+ .iter_mut()
+ .filter_map(|block_data| {
+ match block_data.terminator().kind {
+ TerminatorKind::Call { target: Some(target), destination, .. }
+ if needs_retag(&destination) =>
+ {
+ // Remember the return destination for later
+ Some((block_data.terminator().source_info, destination, target))
+ }
+
+ // `Drop` is also a call, but it doesn't return anything so we are good.
+ TerminatorKind::Drop { .. } | TerminatorKind::DropAndReplace { .. } => None,
+ // Not a block ending in a Call -> ignore.
+ _ => None,
+ }
+ })
+ .collect::<Vec<_>>();
+ // Now we go over the returns we collected to retag the return values.
+ for (source_info, dest_place, dest_block) in returns {
+ basic_blocks[dest_block].statements.insert(
+ 0,
+ Statement {
+ source_info,
+ kind: StatementKind::Retag(RetagKind::Default, Box::new(dest_place)),
+ },
+ );
+ }
+
+ // PART 3
+ // Add retag after assignment.
+ for block_data in basic_blocks {
+ // We want to insert statements as we iterate. To this end, we
+ // iterate backwards using indices.
+ for i in (0..block_data.statements.len()).rev() {
+ let (retag_kind, place) = match block_data.statements[i].kind {
+ // Retag-as-raw after escaping to a raw pointer, if the referent
+ // is not already a raw pointer.
+ StatementKind::Assign(box (lplace, Rvalue::AddressOf(_, ref rplace)))
+ if !place_base_raw(rplace) =>
+ {
+ (RetagKind::Raw, lplace)
+ }
+ // Retag after assignments of reference type.
+ StatementKind::Assign(box (ref place, ref rvalue)) if needs_retag(place) => {
+ let kind = match rvalue {
+ Rvalue::Ref(_, borrow_kind, _)
+ if borrow_kind.allows_two_phase_borrow() =>
+ {
+ RetagKind::TwoPhase
+ }
+ _ => RetagKind::Default,
+ };
+ (kind, *place)
+ }
+ // Do nothing for the rest
+ _ => continue,
+ };
+ // Insert a retag after the statement.
+ let source_info = block_data.statements[i].source_info;
+ block_data.statements.insert(
+ i + 1,
+ Statement {
+ source_info,
+ kind: StatementKind::Retag(retag_kind, Box::new(place)),
+ },
+ );
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/check_const_item_mutation.rs b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
new file mode 100644
index 000000000..8838b14c5
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/check_const_item_mutation.rs
@@ -0,0 +1,156 @@
+use rustc_errors::{DiagnosticBuilder, LintDiagnosticBuilder};
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::lint::builtin::CONST_ITEM_MUTATION;
+use rustc_span::def_id::DefId;
+
+use crate::MirLint;
+
+pub struct CheckConstItemMutation;
+
+impl<'tcx> MirLint<'tcx> for CheckConstItemMutation {
+ fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ let mut checker = ConstMutationChecker { body, tcx, target_local: None };
+ checker.visit_body(&body);
+ }
+}
+
+struct ConstMutationChecker<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ target_local: Option<Local>,
+}
+
+impl<'tcx> ConstMutationChecker<'_, 'tcx> {
+ fn is_const_item(&self, local: Local) -> Option<DefId> {
+ if let Some(box LocalInfo::ConstRef { def_id }) = self.body.local_decls[local].local_info {
+ Some(def_id)
+ } else {
+ None
+ }
+ }
+
+ fn is_const_item_without_destructor(&self, local: Local) -> Option<DefId> {
+ let def_id = self.is_const_item(local)?;
+
+ // We avoid linting mutation of a const item if the const's type has a
+ // Drop impl. The Drop logic observes the mutation which was performed.
+ //
+ // pub struct Log { msg: &'static str }
+ // pub const LOG: Log = Log { msg: "" };
+ // impl Drop for Log {
+ // fn drop(&mut self) { println!("{}", self.msg); }
+ // }
+ //
+ // LOG.msg = "wow"; // prints "wow"
+ //
+ // FIXME(https://github.com/rust-lang/rust/issues/77425):
+ // Drop this exception once there is a stable attribute to suppress the
+ // const item mutation lint for a single specific const only. Something
+ // equivalent to:
+ //
+ // #[const_mutation_allowed]
+ // pub const LOG: Log = Log { msg: "" };
+ match self.tcx.calculate_dtor(def_id, |_, _| Ok(())) {
+ Some(_) => None,
+ None => Some(def_id),
+ }
+ }
+
+ fn lint_const_item_usage(
+ &self,
+ place: &Place<'tcx>,
+ const_item: DefId,
+ location: Location,
+ decorate: impl for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) -> DiagnosticBuilder<'b, ()>,
+ ) {
+ // Don't lint on borrowing/assigning when a dereference is involved.
+ // If we 'leave' the temporary via a dereference, we must
+ // be modifying something else
+ //
+ // `unsafe { *FOO = 0; *BAR.field = 1; }`
+ // `unsafe { &mut *FOO }`
+ // `unsafe { (*ARRAY)[0] = val; }
+ if !place.projection.iter().any(|p| matches!(p, PlaceElem::Deref)) {
+ let source_info = self.body.source_info(location);
+ let lint_root = self.body.source_scopes[source_info.scope]
+ .local_data
+ .as_ref()
+ .assert_crate_local()
+ .lint_root;
+
+ self.tcx.struct_span_lint_hir(
+ CONST_ITEM_MUTATION,
+ lint_root,
+ source_info.span,
+ |lint| {
+ decorate(lint)
+ .span_note(self.tcx.def_span(const_item), "`const` item defined here")
+ .emit();
+ },
+ );
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for ConstMutationChecker<'_, 'tcx> {
+ fn visit_statement(&mut self, stmt: &Statement<'tcx>, loc: Location) {
+ if let StatementKind::Assign(box (lhs, _)) = &stmt.kind {
+ // Check for assignment to fields of a constant
+ // Assigning directly to a constant (e.g. `FOO = true;`) is a hard error,
+ // so emitting a lint would be redundant.
+ if !lhs.projection.is_empty() {
+ if let Some(def_id) = self.is_const_item_without_destructor(lhs.local) {
+ self.lint_const_item_usage(&lhs, def_id, loc, |lint| {
+ let mut lint = lint.build("attempting to modify a `const` item");
+ lint.note("each usage of a `const` item creates a new temporary; the original `const` item will not be modified");
+ lint
+ })
+ }
+ }
+ // We are looking for MIR of the form:
+ //
+ // ```
+ // _1 = const FOO;
+ // _2 = &mut _1;
+ // method_call(_2, ..)
+ // ```
+ //
+ // Record our current LHS, so that we can detect this
+ // pattern in `visit_rvalue`
+ self.target_local = lhs.as_local();
+ }
+ self.super_statement(stmt, loc);
+ self.target_local = None;
+ }
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, loc: Location) {
+ if let Rvalue::Ref(_, BorrowKind::Mut { .. }, place) = rvalue {
+ let local = place.local;
+ if let Some(def_id) = self.is_const_item(local) {
+ // If this Rvalue is being used as the right-hand side of a
+ // `StatementKind::Assign`, see if it ends up getting used as
+ // the `self` parameter of a method call (as the terminator of our current
+ // BasicBlock). If so, we emit a more specific lint.
+ let method_did = self.target_local.and_then(|target_local| {
+ crate::util::find_self_call(self.tcx, &self.body, target_local, loc.block)
+ });
+ let lint_loc =
+ if method_did.is_some() { self.body.terminator_loc(loc.block) } else { loc };
+ self.lint_const_item_usage(place, def_id, lint_loc, |lint| {
+ let mut lint = lint.build("taking a mutable reference to a `const` item");
+ lint
+ .note("each usage of a `const` item creates a new temporary")
+ .note("the mutable reference will refer to this temporary, not the original `const` item");
+
+ if let Some((method_did, _substs)) = method_did {
+ lint.span_note(self.tcx.def_span(method_did), "mutable reference created due to call to this method");
+ }
+
+ lint
+ });
+ }
+ }
+ self.super_rvalue(rvalue, loc);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/check_packed_ref.rs b/compiler/rustc_mir_transform/src/check_packed_ref.rs
new file mode 100644
index 000000000..3b7ba3f9a
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/check_packed_ref.rs
@@ -0,0 +1,108 @@
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::mir::visit::{PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::lint::builtin::UNALIGNED_REFERENCES;
+
+use crate::util;
+use crate::MirLint;
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { unsafe_derive_on_repr_packed, ..*providers };
+}
+
+pub struct CheckPackedRef;
+
+impl<'tcx> MirLint<'tcx> for CheckPackedRef {
+ fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ let param_env = tcx.param_env(body.source.def_id());
+ let source_info = SourceInfo::outermost(body.span);
+ let mut checker = PackedRefChecker { body, tcx, param_env, source_info };
+ checker.visit_body(&body);
+ }
+}
+
+struct PackedRefChecker<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ source_info: SourceInfo,
+}
+
+fn unsafe_derive_on_repr_packed(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let lint_hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ tcx.struct_span_lint_hir(UNALIGNED_REFERENCES, lint_hir_id, tcx.def_span(def_id), |lint| {
+ // FIXME: when we make this a hard error, this should have its
+ // own error code.
+ let extra = if tcx.generics_of(def_id).own_requires_monomorphization() {
+ "with type or const parameters"
+ } else {
+ "that does not derive `Copy`"
+ };
+ let message = format!(
+ "`{}` can't be derived on this `#[repr(packed)]` struct {}",
+ tcx.item_name(tcx.trait_id_of_impl(def_id.to_def_id()).expect("derived trait name")),
+ extra
+ );
+ lint.build(message).emit();
+ });
+}
+
+impl<'tcx> Visitor<'tcx> for PackedRefChecker<'_, 'tcx> {
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ // Make sure we know where in the MIR we are.
+ self.source_info = terminator.source_info;
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ // Make sure we know where in the MIR we are.
+ self.source_info = statement.source_info;
+ self.super_statement(statement, location);
+ }
+
+ fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) {
+ if context.is_borrow() {
+ if util::is_disaligned(self.tcx, self.body, self.param_env, *place) {
+ let def_id = self.body.source.instance.def_id();
+ if let Some(impl_def_id) = self
+ .tcx
+ .impl_of_method(def_id)
+ .filter(|&def_id| self.tcx.is_builtin_derive(def_id))
+ {
+ // If a method is defined in the local crate,
+ // the impl containing that method should also be.
+ self.tcx.ensure().unsafe_derive_on_repr_packed(impl_def_id.expect_local());
+ } else {
+ let source_info = self.source_info;
+ let lint_root = self.body.source_scopes[source_info.scope]
+ .local_data
+ .as_ref()
+ .assert_crate_local()
+ .lint_root;
+ self.tcx.struct_span_lint_hir(
+ UNALIGNED_REFERENCES,
+ lint_root,
+ source_info.span,
+ |lint| {
+ lint.build("reference to packed field is unaligned")
+ .note(
+ "fields of packed structs are not properly aligned, and creating \
+ a misaligned reference is undefined behavior (even if that \
+ reference is never dereferenced)",
+ )
+ .help(
+ "copy the field contents to a local variable, or replace the \
+ reference with a raw pointer and use `read_unaligned`/`write_unaligned` \
+ (loads and stores via `*p` must be properly aligned even when using raw pointers)"
+ )
+ .emit();
+ },
+ );
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/check_unsafety.rs b/compiler/rustc_mir_transform/src/check_unsafety.rs
new file mode 100644
index 000000000..d564f4801
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/check_unsafety.rs
@@ -0,0 +1,619 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::hir_id::HirId;
+use rustc_hir::intravisit;
+use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_middle::{lint, mir::*};
+use rustc_session::lint::builtin::{UNSAFE_OP_IN_UNSAFE_FN, UNUSED_UNSAFE};
+use rustc_session::lint::Level;
+
+use std::collections::hash_map;
+use std::ops::Bound;
+
+pub struct UnsafetyChecker<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ body_did: LocalDefId,
+ violations: Vec<UnsafetyViolation>,
+ source_info: SourceInfo,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+
+ /// Used `unsafe` blocks in this function. This is used for the "unused_unsafe" lint.
+ ///
+ /// The keys are the used `unsafe` blocks, the UnusedUnsafeKind indicates whether
+ /// or not any of the usages happen at a place that doesn't allow `unsafe_op_in_unsafe_fn`.
+ used_unsafe_blocks: FxHashMap<HirId, UsedUnsafeBlockData>,
+}
+
+impl<'a, 'tcx> UnsafetyChecker<'a, 'tcx> {
+ fn new(
+ body: &'a Body<'tcx>,
+ body_did: LocalDefId,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ Self {
+ body,
+ body_did,
+ violations: vec![],
+ source_info: SourceInfo::outermost(body.span),
+ tcx,
+ param_env,
+ used_unsafe_blocks: Default::default(),
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for UnsafetyChecker<'_, 'tcx> {
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ self.source_info = terminator.source_info;
+ match terminator.kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => {
+ // safe (at least as emitted during MIR construction)
+ }
+
+ TerminatorKind::Call { ref func, .. } => {
+ let func_ty = func.ty(self.body, self.tcx);
+ let func_id =
+ if let ty::FnDef(func_id, _) = func_ty.kind() { Some(func_id) } else { None };
+ let sig = func_ty.fn_sig(self.tcx);
+ if let hir::Unsafety::Unsafe = sig.unsafety() {
+ self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::CallToUnsafeFunction,
+ )
+ }
+
+ if let Some(func_id) = func_id {
+ self.check_target_features(*func_id);
+ }
+ }
+
+ TerminatorKind::InlineAsm { .. } => self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::UseOfInlineAssembly,
+ ),
+ }
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ self.source_info = statement.source_info;
+ match statement.kind {
+ StatementKind::Assign(..)
+ | StatementKind::FakeRead(..)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::Retag { .. }
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::Nop => {
+ // safe (at least as emitted during MIR construction)
+ }
+
+ StatementKind::CopyNonOverlapping(..) => unreachable!(),
+ }
+ self.super_statement(statement, location);
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ match rvalue {
+ Rvalue::Aggregate(box ref aggregate, _) => match aggregate {
+ &AggregateKind::Array(..) | &AggregateKind::Tuple => {}
+ &AggregateKind::Adt(adt_did, ..) => {
+ match self.tcx.layout_scalar_valid_range(adt_did) {
+ (Bound::Unbounded, Bound::Unbounded) => {}
+ _ => self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::InitializingTypeWith,
+ ),
+ }
+ }
+ &AggregateKind::Closure(def_id, _) | &AggregateKind::Generator(def_id, _, _) => {
+ let UnsafetyCheckResult { violations, used_unsafe_blocks, .. } =
+ self.tcx.unsafety_check_result(def_id);
+ self.register_violations(
+ violations,
+ used_unsafe_blocks.iter().map(|(&h, &d)| (h, d)),
+ );
+ }
+ },
+ _ => {}
+ }
+ self.super_rvalue(rvalue, location);
+ }
+
+ fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, _location: Location) {
+ // On types with `scalar_valid_range`, prevent
+ // * `&mut x.field`
+ // * `x.field = y;`
+ // * `&x.field` if `field`'s type has interior mutability
+ // because either of these would allow modifying the layout constrained field and
+ // insert values that violate the layout constraints.
+ if context.is_mutating_use() || context.is_borrow() {
+ self.check_mut_borrowing_layout_constrained_field(*place, context.is_mutating_use());
+ }
+
+ // Some checks below need the extra meta info of the local declaration.
+ let decl = &self.body.local_decls[place.local];
+
+ // Check the base local: it might be an unsafe-to-access static. We only check derefs of the
+ // temporary holding the static pointer to avoid duplicate errors
+ // <https://github.com/rust-lang/rust/pull/78068#issuecomment-731753506>.
+ if decl.internal && place.projection.first() == Some(&ProjectionElem::Deref) {
+ // If the projection root is an artificial local that we introduced when
+ // desugaring `static`, give a more specific error message
+ // (avoid the general "raw pointer" clause below, that would only be confusing).
+ if let Some(box LocalInfo::StaticRef { def_id, .. }) = decl.local_info {
+ if self.tcx.is_mutable_static(def_id) {
+ self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::UseOfMutableStatic,
+ );
+ return;
+ } else if self.tcx.is_foreign_item(def_id) {
+ self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::UseOfExternStatic,
+ );
+ return;
+ }
+ }
+ }
+
+ // Check for raw pointer `Deref`.
+ for (base, proj) in place.iter_projections() {
+ if proj == ProjectionElem::Deref {
+ let base_ty = base.ty(self.body, self.tcx).ty;
+ if base_ty.is_unsafe_ptr() {
+ self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::DerefOfRawPointer,
+ )
+ }
+ }
+ }
+
+ // Check for union fields. For this we traverse right-to-left, as the last `Deref` changes
+ // whether we *read* the union field or potentially *write* to it (if this place is being assigned to).
+ let mut saw_deref = false;
+ for (base, proj) in place.iter_projections().rev() {
+ if proj == ProjectionElem::Deref {
+ saw_deref = true;
+ continue;
+ }
+
+ let base_ty = base.ty(self.body, self.tcx).ty;
+ if base_ty.is_union() {
+ // If we did not hit a `Deref` yet and the overall place use is an assignment, the
+ // rules are different.
+ let assign_to_field = !saw_deref
+ && matches!(
+ context,
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Store
+ | MutatingUseContext::Drop
+ | MutatingUseContext::AsmOutput
+ )
+ );
+ // If this is just an assignment, determine if the assigned type needs dropping.
+ if assign_to_field {
+ // We have to check the actual type of the assignment, as that determines if the
+ // old value is being dropped.
+ let assigned_ty = place.ty(&self.body.local_decls, self.tcx).ty;
+ if assigned_ty.needs_drop(self.tcx, self.param_env) {
+ // This would be unsafe, but should be outright impossible since we reject such unions.
+ self.tcx.sess.delay_span_bug(
+ self.source_info.span,
+ format!("union fields that need dropping should be impossible: {assigned_ty}")
+ );
+ }
+ } else {
+ self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::AccessToUnionField,
+ )
+ }
+ }
+ }
+ }
+}
+
+impl<'tcx> UnsafetyChecker<'_, 'tcx> {
+ fn require_unsafe(&mut self, kind: UnsafetyViolationKind, details: UnsafetyViolationDetails) {
+ // Violations can turn out to be `UnsafeFn` during analysis, but they should not start out as such.
+ assert_ne!(kind, UnsafetyViolationKind::UnsafeFn);
+
+ let source_info = self.source_info;
+ let lint_root = self.body.source_scopes[self.source_info.scope]
+ .local_data
+ .as_ref()
+ .assert_crate_local()
+ .lint_root;
+ self.register_violations(
+ [&UnsafetyViolation { source_info, lint_root, kind, details }],
+ [],
+ );
+ }
+
+ fn register_violations<'a>(
+ &mut self,
+ violations: impl IntoIterator<Item = &'a UnsafetyViolation>,
+ new_used_unsafe_blocks: impl IntoIterator<Item = (HirId, UsedUnsafeBlockData)>,
+ ) {
+ use UsedUnsafeBlockData::{AllAllowedInUnsafeFn, SomeDisallowedInUnsafeFn};
+
+ let update_entry = |this: &mut Self, hir_id, new_usage| {
+ match this.used_unsafe_blocks.entry(hir_id) {
+ hash_map::Entry::Occupied(mut entry) => {
+ if new_usage == SomeDisallowedInUnsafeFn {
+ *entry.get_mut() = SomeDisallowedInUnsafeFn;
+ }
+ }
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(new_usage);
+ }
+ };
+ };
+ let safety = self.body.source_scopes[self.source_info.scope]
+ .local_data
+ .as_ref()
+ .assert_crate_local()
+ .safety;
+ match safety {
+ // `unsafe` blocks are required in safe code
+ Safety::Safe => violations.into_iter().for_each(|&violation| {
+ match violation.kind {
+ UnsafetyViolationKind::General => {}
+ UnsafetyViolationKind::UnsafeFn => {
+ bug!("`UnsafetyViolationKind::UnsafeFn` in an `Safe` context")
+ }
+ }
+ if !self.violations.contains(&violation) {
+ self.violations.push(violation)
+ }
+ }),
+ // With the RFC 2585, no longer allow `unsafe` operations in `unsafe fn`s
+ Safety::FnUnsafe => violations.into_iter().for_each(|&(mut violation)| {
+ violation.kind = UnsafetyViolationKind::UnsafeFn;
+ if !self.violations.contains(&violation) {
+ self.violations.push(violation)
+ }
+ }),
+ Safety::BuiltinUnsafe => {}
+ Safety::ExplicitUnsafe(hir_id) => violations.into_iter().for_each(|violation| {
+ update_entry(
+ self,
+ hir_id,
+ match self.tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, violation.lint_root).0
+ {
+ Level::Allow => AllAllowedInUnsafeFn(violation.lint_root),
+ _ => SomeDisallowedInUnsafeFn,
+ },
+ )
+ }),
+ };
+
+ new_used_unsafe_blocks
+ .into_iter()
+ .for_each(|(hir_id, usage_data)| update_entry(self, hir_id, usage_data));
+ }
+ fn check_mut_borrowing_layout_constrained_field(
+ &mut self,
+ place: Place<'tcx>,
+ is_mut_use: bool,
+ ) {
+ for (place_base, elem) in place.iter_projections().rev() {
+ match elem {
+ // Modifications behind a dereference don't affect the value of
+ // the pointer.
+ ProjectionElem::Deref => return,
+ ProjectionElem::Field(..) => {
+ let ty = place_base.ty(&self.body.local_decls, self.tcx).ty;
+ if let ty::Adt(def, _) = ty.kind() {
+ if self.tcx.layout_scalar_valid_range(def.did())
+ != (Bound::Unbounded, Bound::Unbounded)
+ {
+ let details = if is_mut_use {
+ UnsafetyViolationDetails::MutationOfLayoutConstrainedField
+
+ // Check `is_freeze` as late as possible to avoid cycle errors
+ // with opaque types.
+ } else if !place
+ .ty(self.body, self.tcx)
+ .ty
+ .is_freeze(self.tcx.at(self.source_info.span), self.param_env)
+ {
+ UnsafetyViolationDetails::BorrowOfLayoutConstrainedField
+ } else {
+ continue;
+ };
+ self.require_unsafe(UnsafetyViolationKind::General, details);
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+
+ /// Checks whether calling `func_did` needs an `unsafe` context or not, i.e. whether
+ /// the called function has target features the calling function hasn't.
+ fn check_target_features(&mut self, func_did: DefId) {
+ // Unsafety isn't required on wasm targets. For more information see
+ // the corresponding check in typeck/src/collect.rs
+ if self.tcx.sess.target.options.is_like_wasm {
+ return;
+ }
+
+ let callee_features = &self.tcx.codegen_fn_attrs(func_did).target_features;
+ // The body might be a constant, so it doesn't have codegen attributes.
+ let self_features = &self.tcx.body_codegen_attrs(self.body_did.to_def_id()).target_features;
+
+ // Is `callee_features` a subset of `calling_features`?
+ if !callee_features.iter().all(|feature| self_features.contains(feature)) {
+ self.require_unsafe(
+ UnsafetyViolationKind::General,
+ UnsafetyViolationDetails::CallToFunctionWith,
+ )
+ }
+ }
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers {
+ unsafety_check_result: |tcx, def_id| {
+ if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
+ tcx.unsafety_check_result_for_const_arg(def)
+ } else {
+ unsafety_check_result(tcx, ty::WithOptConstParam::unknown(def_id))
+ }
+ },
+ unsafety_check_result_for_const_arg: |tcx, (did, param_did)| {
+ unsafety_check_result(
+ tcx,
+ ty::WithOptConstParam { did, const_param_did: Some(param_did) },
+ )
+ },
+ ..*providers
+ };
+}
+
+/// Context information for [`UnusedUnsafeVisitor`] traversal,
+/// saves (innermost) relevant context
+#[derive(Copy, Clone, Debug)]
+enum Context {
+ Safe,
+ /// in an `unsafe fn`
+ UnsafeFn(HirId),
+ /// in a *used* `unsafe` block
+ /// (i.e. a block without unused-unsafe warning)
+ UnsafeBlock(HirId),
+}
+
+struct UnusedUnsafeVisitor<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ used_unsafe_blocks: &'a FxHashMap<HirId, UsedUnsafeBlockData>,
+ context: Context,
+ unused_unsafes: &'a mut Vec<(HirId, UnusedUnsafe)>,
+}
+
+impl<'tcx> intravisit::Visitor<'tcx> for UnusedUnsafeVisitor<'_, 'tcx> {
+ fn visit_block(&mut self, block: &'tcx hir::Block<'tcx>) {
+ use UsedUnsafeBlockData::{AllAllowedInUnsafeFn, SomeDisallowedInUnsafeFn};
+
+ if let hir::BlockCheckMode::UnsafeBlock(hir::UnsafeSource::UserProvided) = block.rules {
+ let used = match self.tcx.lint_level_at_node(UNUSED_UNSAFE, block.hir_id) {
+ (Level::Allow, _) => Some(SomeDisallowedInUnsafeFn),
+ _ => self.used_unsafe_blocks.get(&block.hir_id).copied(),
+ };
+ let unused_unsafe = match (self.context, used) {
+ (_, None) => UnusedUnsafe::Unused,
+ (Context::Safe, Some(_))
+ | (Context::UnsafeFn(_), Some(SomeDisallowedInUnsafeFn)) => {
+ let previous_context = self.context;
+ self.context = Context::UnsafeBlock(block.hir_id);
+ intravisit::walk_block(self, block);
+ self.context = previous_context;
+ return;
+ }
+ (Context::UnsafeFn(hir_id), Some(AllAllowedInUnsafeFn(lint_root))) => {
+ UnusedUnsafe::InUnsafeFn(hir_id, lint_root)
+ }
+ (Context::UnsafeBlock(hir_id), Some(_)) => UnusedUnsafe::InUnsafeBlock(hir_id),
+ };
+ self.unused_unsafes.push((block.hir_id, unused_unsafe));
+ }
+ intravisit::walk_block(self, block);
+ }
+
+ fn visit_fn(
+ &mut self,
+ fk: intravisit::FnKind<'tcx>,
+ _fd: &'tcx hir::FnDecl<'tcx>,
+ b: hir::BodyId,
+ _s: rustc_span::Span,
+ _id: HirId,
+ ) {
+ if matches!(fk, intravisit::FnKind::Closure) {
+ self.visit_body(self.tcx.hir().body(b))
+ }
+ }
+}
+
+fn check_unused_unsafe(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+ used_unsafe_blocks: &FxHashMap<HirId, UsedUnsafeBlockData>,
+) -> Vec<(HirId, UnusedUnsafe)> {
+ let body_id = tcx.hir().maybe_body_owned_by(def_id);
+
+ let Some(body_id) = body_id else {
+ debug!("check_unused_unsafe({:?}) - no body found", def_id);
+ return vec![];
+ };
+
+ let body = tcx.hir().body(body_id);
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let context = match tcx.hir().fn_sig_by_hir_id(hir_id) {
+ Some(sig) if sig.header.unsafety == hir::Unsafety::Unsafe => Context::UnsafeFn(hir_id),
+ _ => Context::Safe,
+ };
+
+ debug!(
+ "check_unused_unsafe({:?}, context={:?}, body={:?}, used_unsafe_blocks={:?})",
+ def_id, body, context, used_unsafe_blocks
+ );
+
+ let mut unused_unsafes = vec![];
+
+ let mut visitor = UnusedUnsafeVisitor {
+ tcx,
+ used_unsafe_blocks,
+ context,
+ unused_unsafes: &mut unused_unsafes,
+ };
+ intravisit::Visitor::visit_body(&mut visitor, body);
+
+ unused_unsafes
+}
+
+fn unsafety_check_result<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx UnsafetyCheckResult {
+ debug!("unsafety_violations({:?})", def);
+
+ // N.B., this borrow is valid because all the consumers of
+ // `mir_built` force this.
+ let body = &tcx.mir_built(def).borrow();
+
+ let param_env = tcx.param_env(def.did);
+
+ let mut checker = UnsafetyChecker::new(body, def.did, tcx, param_env);
+ checker.visit_body(&body);
+
+ let unused_unsafes = (!tcx.is_closure(def.did.to_def_id()))
+ .then(|| check_unused_unsafe(tcx, def.did, &checker.used_unsafe_blocks));
+
+ tcx.arena.alloc(UnsafetyCheckResult {
+ violations: checker.violations,
+ used_unsafe_blocks: checker.used_unsafe_blocks,
+ unused_unsafes,
+ })
+}
+
+fn report_unused_unsafe(tcx: TyCtxt<'_>, kind: UnusedUnsafe, id: HirId) {
+ let span = tcx.sess.source_map().guess_head_span(tcx.hir().span(id));
+ tcx.struct_span_lint_hir(UNUSED_UNSAFE, id, span, |lint| {
+ let msg = "unnecessary `unsafe` block";
+ let mut db = lint.build(msg);
+ db.span_label(span, msg);
+ match kind {
+ UnusedUnsafe::Unused => {}
+ UnusedUnsafe::InUnsafeBlock(id) => {
+ db.span_label(
+ tcx.sess.source_map().guess_head_span(tcx.hir().span(id)),
+ "because it's nested under this `unsafe` block",
+ );
+ }
+ UnusedUnsafe::InUnsafeFn(id, usage_lint_root) => {
+ db.span_label(
+ tcx.sess.source_map().guess_head_span(tcx.hir().span(id)),
+ "because it's nested under this `unsafe` fn",
+ )
+ .note(
+ "this `unsafe` block does contain unsafe operations, \
+ but those are already allowed in an `unsafe fn`",
+ );
+ let (level, source) =
+ tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, usage_lint_root);
+ assert_eq!(level, Level::Allow);
+ lint::explain_lint_level_source(
+ UNSAFE_OP_IN_UNSAFE_FN,
+ Level::Allow,
+ source,
+ &mut db,
+ );
+ }
+ }
+
+ db.emit();
+ });
+}
+
+pub fn check_unsafety(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ debug!("check_unsafety({:?})", def_id);
+
+ // closures are handled by their parent fn.
+ if tcx.is_closure(def_id.to_def_id()) {
+ return;
+ }
+
+ let UnsafetyCheckResult { violations, unused_unsafes, .. } = tcx.unsafety_check_result(def_id);
+
+ for &UnsafetyViolation { source_info, lint_root, kind, details } in violations.iter() {
+ let (description, note) = details.description_and_note();
+
+ // Report an error.
+ let unsafe_fn_msg =
+ if unsafe_op_in_unsafe_fn_allowed(tcx, lint_root) { " function or" } else { "" };
+
+ match kind {
+ UnsafetyViolationKind::General => {
+ // once
+ struct_span_err!(
+ tcx.sess,
+ source_info.span,
+ E0133,
+ "{} is unsafe and requires unsafe{} block",
+ description,
+ unsafe_fn_msg,
+ )
+ .span_label(source_info.span, description)
+ .note(note)
+ .emit();
+ }
+ UnsafetyViolationKind::UnsafeFn => tcx.struct_span_lint_hir(
+ UNSAFE_OP_IN_UNSAFE_FN,
+ lint_root,
+ source_info.span,
+ |lint| {
+ lint.build(&format!(
+ "{} is unsafe and requires unsafe block (error E0133)",
+ description,
+ ))
+ .span_label(source_info.span, description)
+ .note(note)
+ .emit();
+ },
+ ),
+ }
+ }
+
+ for &(block_id, kind) in unused_unsafes.as_ref().unwrap() {
+ report_unused_unsafe(tcx, kind, block_id);
+ }
+}
+
+fn unsafe_op_in_unsafe_fn_allowed(tcx: TyCtxt<'_>, id: HirId) -> bool {
+ tcx.lint_level_at_node(UNSAFE_OP_IN_UNSAFE_FN, id).0 == Level::Allow
+}
diff --git a/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
new file mode 100644
index 000000000..611d29a4e
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/cleanup_post_borrowck.rs
@@ -0,0 +1,59 @@
+//! This module provides a pass to replacing the following statements with
+//! [`Nop`]s
+//!
+//! - [`AscribeUserType`]
+//! - [`FakeRead`]
+//! - [`Assign`] statements with a [`Shallow`] borrow
+//!
+//! The `CleanFakeReadsAndBorrows` "pass" is actually implemented as two
+//! traversals (aka visits) of the input MIR. The first traversal,
+//! `DeleteAndRecordFakeReads`, deletes the fake reads and finds the
+//! temporaries read by [`ForMatchGuard`] reads, and `DeleteFakeBorrows`
+//! deletes the initialization of those temporaries.
+//!
+//! [`AscribeUserType`]: rustc_middle::mir::StatementKind::AscribeUserType
+//! [`Shallow`]: rustc_middle::mir::BorrowKind::Shallow
+//! [`FakeRead`]: rustc_middle::mir::StatementKind::FakeRead
+//! [`Assign`]: rustc_middle::mir::StatementKind::Assign
+//! [`ForMatchGuard`]: rustc_middle::mir::FakeReadCause::ForMatchGuard
+//! [`Nop`]: rustc_middle::mir::StatementKind::Nop
+
+use crate::MirPass;
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::{Body, BorrowKind, Location, Rvalue};
+use rustc_middle::mir::{Statement, StatementKind};
+use rustc_middle::ty::TyCtxt;
+
+pub struct CleanupNonCodegenStatements;
+
+pub struct DeleteNonCodegenStatements<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MirPass<'tcx> for CleanupNonCodegenStatements {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let mut delete = DeleteNonCodegenStatements { tcx };
+ delete.visit_body(body);
+ body.user_type_annotations.raw.clear();
+
+ for decl in &mut body.local_decls {
+ decl.user_ty = None;
+ }
+ }
+}
+
+impl<'tcx> MutVisitor<'tcx> for DeleteNonCodegenStatements<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
+ match statement.kind {
+ StatementKind::AscribeUserType(..)
+ | StatementKind::Assign(box (_, Rvalue::Ref(_, BorrowKind::Shallow, _)))
+ | StatementKind::FakeRead(..) => statement.make_nop(),
+ _ => (),
+ }
+ self.super_statement(statement, location);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/const_debuginfo.rs b/compiler/rustc_mir_transform/src/const_debuginfo.rs
new file mode 100644
index 000000000..6f0ae4f07
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/const_debuginfo.rs
@@ -0,0 +1,100 @@
+//! Finds locals which are assigned once to a const and unused except for debuginfo and converts
+//! their debuginfo to use the const directly, allowing the local to be removed.
+
+use rustc_middle::{
+ mir::{
+ visit::{PlaceContext, Visitor},
+ Body, Constant, Local, Location, Operand, Rvalue, StatementKind, VarDebugInfoContents,
+ },
+ ty::TyCtxt,
+};
+
+use crate::MirPass;
+use rustc_index::{bit_set::BitSet, vec::IndexVec};
+
+pub struct ConstDebugInfo;
+
+impl<'tcx> MirPass<'tcx> for ConstDebugInfo {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.opts.unstable_opts.unsound_mir_opts && sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ trace!("running ConstDebugInfo on {:?}", body.source);
+
+ for (local, constant) in find_optimization_oportunities(body) {
+ for debuginfo in &mut body.var_debug_info {
+ if let VarDebugInfoContents::Place(p) = debuginfo.value {
+ if p.local == local && p.projection.is_empty() {
+ trace!(
+ "changing debug info for {:?} from place {:?} to constant {:?}",
+ debuginfo.name,
+ p,
+ constant
+ );
+ debuginfo.value = VarDebugInfoContents::Const(constant);
+ }
+ }
+ }
+ }
+ }
+}
+
+struct LocalUseVisitor {
+ local_mutating_uses: IndexVec<Local, u8>,
+ local_assignment_locations: IndexVec<Local, Option<Location>>,
+}
+
+fn find_optimization_oportunities<'tcx>(body: &Body<'tcx>) -> Vec<(Local, Constant<'tcx>)> {
+ let mut visitor = LocalUseVisitor {
+ local_mutating_uses: IndexVec::from_elem(0, &body.local_decls),
+ local_assignment_locations: IndexVec::from_elem(None, &body.local_decls),
+ };
+
+ visitor.visit_body(body);
+
+ let mut locals_to_debuginfo = BitSet::new_empty(body.local_decls.len());
+ for debuginfo in &body.var_debug_info {
+ if let VarDebugInfoContents::Place(p) = debuginfo.value && let Some(l) = p.as_local() {
+ locals_to_debuginfo.insert(l);
+ }
+ }
+
+ let mut eligible_locals = Vec::new();
+ for (local, mutating_uses) in visitor.local_mutating_uses.drain_enumerated(..) {
+ if mutating_uses != 1 || !locals_to_debuginfo.contains(local) {
+ continue;
+ }
+
+ if let Some(location) = visitor.local_assignment_locations[local] {
+ let bb = &body[location.block];
+
+ // The value is assigned as the result of a call, not a constant
+ if bb.statements.len() == location.statement_index {
+ continue;
+ }
+
+ if let StatementKind::Assign(box (p, Rvalue::Use(Operand::Constant(box c)))) =
+ &bb.statements[location.statement_index].kind
+ {
+ if let Some(local) = p.as_local() {
+ eligible_locals.push((local, *c));
+ }
+ }
+ }
+ }
+
+ eligible_locals
+}
+
+impl Visitor<'_> for LocalUseVisitor {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
+ if context.is_mutating_use() {
+ self.local_mutating_uses[local] = self.local_mutating_uses[local].saturating_add(1);
+
+ if context.is_place_assignment() {
+ self.local_assignment_locations[local] = Some(location);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/const_goto.rs b/compiler/rustc_mir_transform/src/const_goto.rs
new file mode 100644
index 000000000..5acf939f0
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/const_goto.rs
@@ -0,0 +1,117 @@
+//! This pass optimizes the following sequence
+//! ```rust,ignore (example)
+//! bb2: {
+//! _2 = const true;
+//! goto -> bb3;
+//! }
+//!
+//! bb3: {
+//! switchInt(_2) -> [false: bb4, otherwise: bb5];
+//! }
+//! ```
+//! into
+//! ```rust,ignore (example)
+//! bb2: {
+//! _2 = const true;
+//! goto -> bb5;
+//! }
+//! ```
+
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::{mir::visit::Visitor, ty::ParamEnv};
+
+use super::simplify::{simplify_cfg, simplify_locals};
+
+pub struct ConstGoto;
+
+impl<'tcx> MirPass<'tcx> for ConstGoto {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 4
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ trace!("Running ConstGoto on {:?}", body.source);
+ let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+ let mut opt_finder =
+ ConstGotoOptimizationFinder { tcx, body, optimizations: vec![], param_env };
+ opt_finder.visit_body(body);
+ let should_simplify = !opt_finder.optimizations.is_empty();
+ for opt in opt_finder.optimizations {
+ let block = &mut body.basic_blocks_mut()[opt.bb_with_goto];
+ block.statements.extend(opt.stmts_move_up);
+ let terminator = block.terminator_mut();
+ let new_goto = TerminatorKind::Goto { target: opt.target_to_use_in_goto };
+ debug!("SUCCESS: replacing `{:?}` with `{:?}`", terminator.kind, new_goto);
+ terminator.kind = new_goto;
+ }
+
+ // if we applied optimizations, we potentially have some cfg to cleanup to
+ // make it easier for further passes
+ if should_simplify {
+ simplify_cfg(tcx, body);
+ simplify_locals(body, tcx);
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for ConstGotoOptimizationFinder<'_, 'tcx> {
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ let _: Option<_> = try {
+ let target = terminator.kind.as_goto()?;
+ // We only apply this optimization if the last statement is a const assignment
+ let last_statement = self.body.basic_blocks()[location.block].statements.last()?;
+
+ if let (place, Rvalue::Use(Operand::Constant(_const))) =
+ last_statement.kind.as_assign()?
+ {
+ // We found a constant being assigned to `place`.
+ // Now check that the target of this Goto switches on this place.
+ let target_bb = &self.body.basic_blocks()[target];
+
+ // The `StorageDead(..)` statement does not affect the functionality of mir.
+ // We can move this part of the statement up to the predecessor.
+ let mut stmts_move_up = Vec::new();
+ for stmt in &target_bb.statements {
+ if let StatementKind::StorageDead(..) = stmt.kind {
+ stmts_move_up.push(stmt.clone())
+ } else {
+ None?;
+ }
+ }
+
+ let target_bb_terminator = target_bb.terminator();
+ let (discr, switch_ty, targets) = target_bb_terminator.kind.as_switch()?;
+ if discr.place() == Some(*place) {
+ // We now know that the Switch matches on the const place, and it is statementless
+ // Now find which value in the Switch matches the const value.
+ let const_value =
+ _const.literal.try_eval_bits(self.tcx, self.param_env, switch_ty)?;
+ let target_to_use_in_goto = targets.target_for_value(const_value);
+ self.optimizations.push(OptimizationToApply {
+ bb_with_goto: location.block,
+ target_to_use_in_goto,
+ stmts_move_up,
+ });
+ }
+ }
+ Some(())
+ };
+
+ self.super_terminator(terminator, location);
+ }
+}
+
+struct OptimizationToApply<'tcx> {
+ bb_with_goto: BasicBlock,
+ target_to_use_in_goto: BasicBlock,
+ stmts_move_up: Vec<Statement<'tcx>>,
+}
+
+pub struct ConstGotoOptimizationFinder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ optimizations: Vec<OptimizationToApply<'tcx>>,
+}
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
new file mode 100644
index 000000000..fbc0a767f
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -0,0 +1,1142 @@
+//! Propagates constants for early reporting of statically known
+//! assertion failures
+
+use std::cell::Cell;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def::DefKind;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::{
+ MutVisitor, MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor,
+};
+use rustc_middle::mir::{
+ BasicBlock, BinOp, Body, Constant, ConstantKind, Local, LocalDecl, LocalKind, Location,
+ Operand, Place, Rvalue, SourceInfo, Statement, StatementKind, Terminator, TerminatorKind, UnOp,
+ RETURN_PLACE,
+};
+use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
+use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::{self, ConstKind, Instance, ParamEnv, Ty, TyCtxt, TypeVisitable};
+use rustc_span::{def_id::DefId, Span};
+use rustc_target::abi::{self, HasDataLayout, Size, TargetDataLayout};
+use rustc_target::spec::abi::Abi as CallAbi;
+use rustc_trait_selection::traits;
+
+use crate::MirPass;
+use rustc_const_eval::interpret::{
+ self, compile_time_machine, AllocId, ConstAllocation, ConstValue, CtfeValidationMode, Frame,
+ ImmTy, Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, PlaceTy,
+ Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup, StackPopUnwind,
+};
+
+/// The maximum number of bytes that we'll allocate space for a local or the return value.
+/// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just
+/// Severely regress performance.
+const MAX_ALLOC_LIMIT: u64 = 1024;
+
+/// Macro for machine-specific `InterpError` without allocation.
+/// (These will never be shown to the user, but they help diagnose ICEs.)
+macro_rules! throw_machine_stop_str {
+ ($($tt:tt)*) => {{
+ // We make a new local type for it. The type itself does not carry any information,
+ // but its vtable (for the `MachineStopType` trait) does.
+ struct Zst;
+ // Printing this type shows the desired string.
+ impl std::fmt::Display for Zst {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, $($tt)*)
+ }
+ }
+ impl rustc_middle::mir::interpret::MachineStopType for Zst {}
+ throw_machine_stop!(Zst)
+ }};
+}
+
+pub struct ConstProp;
+
+impl<'tcx> MirPass<'tcx> for ConstProp {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 1
+ }
+
+ #[instrument(skip(self, tcx), level = "debug")]
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // will be evaluated by miri and produce its errors there
+ if body.source.promoted.is_some() {
+ return;
+ }
+
+ let def_id = body.source.def_id().expect_local();
+ let def_kind = tcx.def_kind(def_id);
+ let is_fn_like = def_kind.is_fn_like();
+ let is_assoc_const = def_kind == DefKind::AssocConst;
+
+ // Only run const prop on functions, methods, closures and associated constants
+ if !is_fn_like && !is_assoc_const {
+ // skip anon_const/statics/consts because they'll be evaluated by miri anyway
+ trace!("ConstProp skipped for {:?}", def_id);
+ return;
+ }
+
+ let is_generator = tcx.type_of(def_id.to_def_id()).is_generator();
+ // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
+ // computing their layout.
+ if is_generator {
+ trace!("ConstProp skipped for generator {:?}", def_id);
+ return;
+ }
+
+ // Check if it's even possible to satisfy the 'where' clauses
+ // for this item.
+ // This branch will never be taken for any normal function.
+ // However, it's possible to `#!feature(trivial_bounds)]` to write
+ // a function with impossible to satisfy clauses, e.g.:
+ // `fn foo() where String: Copy {}`
+ //
+ // We don't usually need to worry about this kind of case,
+ // since we would get a compilation error if the user tried
+ // to call it. However, since we can do const propagation
+ // even without any calls to the function, we need to make
+ // sure that it even makes sense to try to evaluate the body.
+ // If there are unsatisfiable where clauses, then all bets are
+ // off, and we just give up.
+ //
+ // We manually filter the predicates, skipping anything that's not
+ // "global". We are in a potentially generic context
+ // (e.g. we are evaluating a function without substituting generic
+ // parameters, so this filtering serves two purposes:
+ //
+ // 1. We skip evaluating any predicates that we would
+ // never be able prove are unsatisfiable (e.g. `<T as Foo>`
+ // 2. We avoid trying to normalize predicates involving generic
+ // parameters (e.g. `<T as Foo>::MyItem`). This can confuse
+ // the normalization code (leading to cycle errors), since
+ // it's usually never invoked in this way.
+ let predicates = tcx
+ .predicates_of(def_id.to_def_id())
+ .predicates
+ .iter()
+ .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
+ if traits::impossible_predicates(
+ tcx,
+ traits::elaborate_predicates(tcx, predicates).map(|o| o.predicate).collect(),
+ ) {
+ trace!("ConstProp skipped for {:?}: found unsatisfiable predicates", def_id);
+ return;
+ }
+
+ trace!("ConstProp starting for {:?}", def_id);
+
+ let dummy_body = &Body::new(
+ body.source,
+ body.basic_blocks().clone(),
+ body.source_scopes.clone(),
+ body.local_decls.clone(),
+ Default::default(),
+ body.arg_count,
+ Default::default(),
+ body.span,
+ body.generator_kind(),
+ body.tainted_by_errors,
+ );
+
+ // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold
+ // constants, instead of just checking for const-folding succeeding.
+ // That would require a uniform one-def no-mutation analysis
+ // and RPO (or recursing when needing the value of a local).
+ let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx);
+ optimization_finder.visit_body(body);
+
+ trace!("ConstProp done for {:?}", def_id);
+ }
+}
+
+pub struct ConstPropMachine<'mir, 'tcx> {
+ /// The virtual call stack.
+ stack: Vec<Frame<'mir, 'tcx>>,
+ /// `OnlyInsideOwnBlock` locals that were written in the current block get erased at the end.
+ pub written_only_inside_own_block_locals: FxHashSet<Local>,
+ /// Locals that need to be cleared after every block terminates.
+ pub only_propagate_inside_block_locals: BitSet<Local>,
+ pub can_const_prop: IndexVec<Local, ConstPropMode>,
+}
+
+impl ConstPropMachine<'_, '_> {
+ pub fn new(
+ only_propagate_inside_block_locals: BitSet<Local>,
+ can_const_prop: IndexVec<Local, ConstPropMode>,
+ ) -> Self {
+ Self {
+ stack: Vec::new(),
+ written_only_inside_own_block_locals: Default::default(),
+ only_propagate_inside_block_locals,
+ can_const_prop,
+ }
+ }
+}
+
+impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx> {
+ compile_time_machine!(<'mir, 'tcx>);
+ const PANIC_ON_ALLOC_FAIL: bool = true; // all allocations are small (see `MAX_ALLOC_LIMIT`)
+
+ type MemoryKind = !;
+
+ fn load_mir(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _instance: ty::InstanceDef<'tcx>,
+ ) -> InterpResult<'tcx, &'tcx Body<'tcx>> {
+ throw_machine_stop_str!("calling functions isn't supported in ConstProp")
+ }
+
+ fn find_mir_or_eval_fn(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _instance: ty::Instance<'tcx>,
+ _abi: CallAbi,
+ _args: &[OpTy<'tcx>],
+ _destination: &PlaceTy<'tcx>,
+ _target: Option<BasicBlock>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx, Option<(&'mir Body<'tcx>, ty::Instance<'tcx>)>> {
+ Ok(None)
+ }
+
+ fn call_intrinsic(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _instance: ty::Instance<'tcx>,
+ _args: &[OpTy<'tcx>],
+ _destination: &PlaceTy<'tcx>,
+ _target: Option<BasicBlock>,
+ _unwind: StackPopUnwind,
+ ) -> InterpResult<'tcx> {
+ throw_machine_stop_str!("calling intrinsics isn't supported in ConstProp")
+ }
+
+ fn assert_panic(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _msg: &rustc_middle::mir::AssertMessage<'tcx>,
+ _unwind: Option<rustc_middle::mir::BasicBlock>,
+ ) -> InterpResult<'tcx> {
+ bug!("panics terminators are not evaluated in ConstProp")
+ }
+
+ fn binary_ptr_op(
+ _ecx: &InterpCx<'mir, 'tcx, Self>,
+ _bin_op: BinOp,
+ _left: &ImmTy<'tcx>,
+ _right: &ImmTy<'tcx>,
+ ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
+ // We can't do this because aliasing of memory can differ between const eval and llvm
+ throw_machine_stop_str!("pointer arithmetic or comparisons aren't supported in ConstProp")
+ }
+
+ fn access_local<'a>(
+ frame: &'a Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>,
+ local: Local,
+ ) -> InterpResult<'tcx, &'a interpret::Operand<Self::Provenance>> {
+ let l = &frame.locals[local];
+
+ if matches!(
+ l.value,
+ LocalValue::Live(interpret::Operand::Immediate(interpret::Immediate::Uninit))
+ ) {
+ // For us "uninit" means "we don't know its value, might be initiailized or not".
+ // So stop here.
+ throw_machine_stop_str!("tried to access alocal with unknown value ")
+ }
+
+ l.access()
+ }
+
+ fn access_local_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ frame: usize,
+ local: Local,
+ ) -> InterpResult<'tcx, &'a mut interpret::Operand<Self::Provenance>> {
+ if ecx.machine.can_const_prop[local] == ConstPropMode::NoPropagation {
+ throw_machine_stop_str!("tried to write to a local that is marked as not propagatable")
+ }
+ if frame == 0 && ecx.machine.only_propagate_inside_block_locals.contains(local) {
+ trace!(
+ "mutating local {:?} which is restricted to its block. \
+ Will remove it from const-prop after block is finished.",
+ local
+ );
+ ecx.machine.written_only_inside_own_block_locals.insert(local);
+ }
+ ecx.machine.stack[frame].locals[local].access_mut()
+ }
+
+ fn before_access_global(
+ _tcx: TyCtxt<'tcx>,
+ _machine: &Self,
+ _alloc_id: AllocId,
+ alloc: ConstAllocation<'tcx, Self::Provenance, Self::AllocExtra>,
+ _static_def_id: Option<DefId>,
+ is_write: bool,
+ ) -> InterpResult<'tcx> {
+ if is_write {
+ throw_machine_stop_str!("can't write to global");
+ }
+ // If the static allocation is mutable, then we can't const prop it as its content
+ // might be different at runtime.
+ if alloc.inner().mutability == Mutability::Mut {
+ throw_machine_stop_str!("can't access mutable globals in ConstProp");
+ }
+
+ Ok(())
+ }
+
+ #[inline(always)]
+ fn expose_ptr(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ _ptr: Pointer<AllocId>,
+ ) -> InterpResult<'tcx> {
+ throw_machine_stop_str!("exposing pointers isn't supported in ConstProp")
+ }
+
+ #[inline(always)]
+ fn init_frame_extra(
+ _ecx: &mut InterpCx<'mir, 'tcx, Self>,
+ frame: Frame<'mir, 'tcx>,
+ ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
+ Ok(frame)
+ }
+
+ #[inline(always)]
+ fn stack<'a>(
+ ecx: &'a InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
+ &ecx.machine.stack
+ }
+
+ #[inline(always)]
+ fn stack_mut<'a>(
+ ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
+ ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
+ &mut ecx.machine.stack
+ }
+}
+
+/// Finds optimization opportunities on the MIR.
+struct ConstPropagator<'mir, 'tcx> {
+ ecx: InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ local_decls: &'mir IndexVec<Local, LocalDecl<'tcx>>,
+ // Because we have `MutVisitor` we can't obtain the `SourceInfo` from a `Location`. So we store
+ // the last known `SourceInfo` here and just keep revisiting it.
+ source_info: Option<SourceInfo>,
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for ConstPropagator<'_, 'tcx> {
+ type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
+ err
+ }
+}
+
+impl HasDataLayout for ConstPropagator<'_, '_> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for ConstPropagator<'_, 'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx> ty::layout::HasParamEnv<'tcx> for ConstPropagator<'_, 'tcx> {
+ #[inline]
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
+ fn new(
+ body: &Body<'tcx>,
+ dummy_body: &'mir Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ ) -> ConstPropagator<'mir, 'tcx> {
+ let def_id = body.source.def_id();
+ let substs = &InternalSubsts::identity_for_item(tcx, def_id);
+ let param_env = tcx.param_env_reveal_all_normalized(def_id);
+
+ let can_const_prop = CanConstProp::check(tcx, param_env, body);
+ let mut only_propagate_inside_block_locals = BitSet::new_empty(can_const_prop.len());
+ for (l, mode) in can_const_prop.iter_enumerated() {
+ if *mode == ConstPropMode::OnlyInsideOwnBlock {
+ only_propagate_inside_block_locals.insert(l);
+ }
+ }
+ let mut ecx = InterpCx::new(
+ tcx,
+ tcx.def_span(def_id),
+ param_env,
+ ConstPropMachine::new(only_propagate_inside_block_locals, can_const_prop),
+ );
+
+ let ret_layout = ecx
+ .layout_of(body.bound_return_ty().subst(tcx, substs))
+ .ok()
+ // Don't bother allocating memory for large values.
+ // I don't know how return types can seem to be unsized but this happens in the
+ // `type/type-unsatisfiable.rs` test.
+ .filter(|ret_layout| {
+ !ret_layout.is_unsized() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
+ })
+ .unwrap_or_else(|| ecx.layout_of(tcx.types.unit).unwrap());
+
+ let ret = ecx
+ .allocate(ret_layout, MemoryKind::Stack)
+ .expect("couldn't perform small allocation")
+ .into();
+
+ ecx.push_stack_frame(
+ Instance::new(def_id, substs),
+ dummy_body,
+ &ret,
+ StackPopCleanup::Root { cleanup: false },
+ )
+ .expect("failed to push initial stack frame");
+
+ ConstPropagator {
+ ecx,
+ tcx,
+ param_env,
+ local_decls: &dummy_body.local_decls,
+ source_info: None,
+ }
+ }
+
+ fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
+ let op = match self.ecx.eval_place_to_op(place, None) {
+ Ok(op) => op,
+ Err(e) => {
+ trace!("get_const failed: {}", e);
+ return None;
+ }
+ };
+
+ // Try to read the local as an immediate so that if it is representable as a scalar, we can
+ // handle it as such, but otherwise, just return the value as is.
+ Some(match self.ecx.read_immediate_raw(&op, /*force*/ false) {
+ Ok(Ok(imm)) => imm.into(),
+ _ => op,
+ })
+ }
+
+ /// Remove `local` from the pool of `Locals`. Allows writing to them,
+ /// but not reading from them anymore.
+ fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) {
+ ecx.frame_mut().locals[local] = LocalState {
+ value: LocalValue::Live(interpret::Operand::Immediate(interpret::Immediate::Uninit)),
+ layout: Cell::new(None),
+ };
+ }
+
+ fn use_ecx<F, T>(&mut self, f: F) -> Option<T>
+ where
+ F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
+ {
+ match f(self) {
+ Ok(val) => Some(val),
+ Err(error) => {
+ trace!("InterpCx operation failed: {:?}", error);
+ // Some errors shouldn't come up because creating them causes
+ // an allocation, which we should avoid. When that happens,
+ // dedicated error variants should be introduced instead.
+ assert!(
+ !error.kind().formatted_string(),
+ "const-prop encountered formatting error: {}",
+ error
+ );
+ None
+ }
+ }
+ }
+
+ /// Returns the value, if any, of evaluating `c`.
+ fn eval_constant(&mut self, c: &Constant<'tcx>) -> Option<OpTy<'tcx>> {
+ // FIXME we need to revisit this for #67176
+ if c.needs_subst() {
+ return None;
+ }
+
+ self.ecx.mir_const_to_op(&c.literal, None).ok()
+ }
+
+ /// Returns the value, if any, of evaluating `place`.
+ fn eval_place(&mut self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
+ trace!("eval_place(place={:?})", place);
+ self.use_ecx(|this| this.ecx.eval_place_to_op(place, None))
+ }
+
+ /// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant`
+ /// or `eval_place`, depending on the variant of `Operand` used.
+ fn eval_operand(&mut self, op: &Operand<'tcx>) -> Option<OpTy<'tcx>> {
+ match *op {
+ Operand::Constant(ref c) => self.eval_constant(c),
+ Operand::Move(place) | Operand::Copy(place) => self.eval_place(place),
+ }
+ }
+
+ fn check_unary_op(&mut self, op: UnOp, arg: &Operand<'tcx>) -> Option<()> {
+ if self.use_ecx(|this| {
+ let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
+ let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
+ Ok(overflow)
+ })? {
+ // `AssertKind` only has an `OverflowNeg` variant, so make sure that is
+ // appropriate to use.
+ assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow");
+ return None;
+ }
+
+ Some(())
+ }
+
+ fn check_binary_op(
+ &mut self,
+ op: BinOp,
+ left: &Operand<'tcx>,
+ right: &Operand<'tcx>,
+ ) -> Option<()> {
+ let r = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?));
+ let l = self.use_ecx(|this| this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?));
+ // Check for exceeding shifts *even if* we cannot evaluate the LHS.
+ if op == BinOp::Shr || op == BinOp::Shl {
+ let r = r.clone()?;
+ // We need the type of the LHS. We cannot use `place_layout` as that is the type
+ // of the result, which for checked binops is not the same!
+ let left_ty = left.ty(self.local_decls, self.tcx);
+ let left_size = self.ecx.layout_of(left_ty).ok()?.size;
+ let right_size = r.layout.size;
+ let r_bits = r.to_scalar().ok();
+ let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok());
+ if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
+ return None;
+ }
+ }
+
+ if let (Some(l), Some(r)) = (&l, &r) {
+ // The remaining operators are handled through `overflowing_binary_op`.
+ if self.use_ecx(|this| {
+ let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, l, r)?;
+ Ok(overflow)
+ })? {
+ return None;
+ }
+ }
+ Some(())
+ }
+
+ fn propagate_operand(&mut self, operand: &mut Operand<'tcx>) {
+ match *operand {
+ Operand::Copy(l) | Operand::Move(l) => {
+ if let Some(value) = self.get_const(l) && self.should_const_prop(&value) {
+ // FIXME(felix91gr): this code only handles `Scalar` cases.
+ // For now, we're not handling `ScalarPair` cases because
+ // doing so here would require a lot of code duplication.
+ // We should hopefully generalize `Operand` handling into a fn,
+ // and use it to do const-prop here and everywhere else
+ // where it makes sense.
+ if let interpret::Operand::Immediate(interpret::Immediate::Scalar(
+ ScalarMaybeUninit::Scalar(scalar),
+ )) = *value
+ {
+ *operand = self.operand_from_scalar(
+ scalar,
+ value.layout.ty,
+ self.source_info.unwrap().span,
+ );
+ }
+ }
+ }
+ Operand::Constant(_) => (),
+ }
+ }
+
+ fn const_prop(&mut self, rvalue: &Rvalue<'tcx>, place: Place<'tcx>) -> Option<()> {
+ // Perform any special handling for specific Rvalue types.
+ // Generally, checks here fall into one of two categories:
+ // 1. Additional checking to provide useful lints to the user
+ // - In this case, we will do some validation and then fall through to the
+ // end of the function which evals the assignment.
+ // 2. Working around bugs in other parts of the compiler
+ // - In this case, we'll return `None` from this function to stop evaluation.
+ match rvalue {
+ // Additional checking: give lints to the user if an overflow would occur.
+ // We do this here and not in the `Assert` terminator as that terminator is
+ // only sometimes emitted (overflow checks can be disabled), but we want to always
+ // lint.
+ Rvalue::UnaryOp(op, arg) => {
+ trace!("checking UnaryOp(op = {:?}, arg = {:?})", op, arg);
+ self.check_unary_op(*op, arg)?;
+ }
+ Rvalue::BinaryOp(op, box (left, right)) => {
+ trace!("checking BinaryOp(op = {:?}, left = {:?}, right = {:?})", op, left, right);
+ self.check_binary_op(*op, left, right)?;
+ }
+ Rvalue::CheckedBinaryOp(op, box (left, right)) => {
+ trace!(
+ "checking CheckedBinaryOp(op = {:?}, left = {:?}, right = {:?})",
+ op,
+ left,
+ right
+ );
+ self.check_binary_op(*op, left, right)?;
+ }
+
+ // Do not try creating references (#67862)
+ Rvalue::AddressOf(_, place) | Rvalue::Ref(_, _, place) => {
+ trace!("skipping AddressOf | Ref for {:?}", place);
+
+ // This may be creating mutable references or immutable references to cells.
+ // If that happens, the pointed to value could be mutated via that reference.
+ // Since we aren't tracking references, the const propagator loses track of what
+ // value the local has right now.
+ // Thus, all locals that have their reference taken
+ // must not take part in propagation.
+ Self::remove_const(&mut self.ecx, place.local);
+
+ return None;
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ trace!("skipping ThreadLocalRef({:?})", def_id);
+
+ return None;
+ }
+
+ // There's no other checking to do at this time.
+ Rvalue::Aggregate(..)
+ | Rvalue::Use(..)
+ | Rvalue::CopyForDeref(..)
+ | Rvalue::Repeat(..)
+ | Rvalue::Len(..)
+ | Rvalue::Cast(..)
+ | Rvalue::ShallowInitBox(..)
+ | Rvalue::Discriminant(..)
+ | Rvalue::NullaryOp(..) => {}
+ }
+
+ // FIXME we need to revisit this for #67176
+ if rvalue.needs_subst() {
+ return None;
+ }
+
+ if self.tcx.sess.mir_opt_level() >= 4 {
+ self.eval_rvalue_with_identities(rvalue, place)
+ } else {
+ self.use_ecx(|this| this.ecx.eval_rvalue_into_place(rvalue, place))
+ }
+ }
+
+ // Attempt to use algebraic identities to eliminate constant expressions
+ fn eval_rvalue_with_identities(
+ &mut self,
+ rvalue: &Rvalue<'tcx>,
+ place: Place<'tcx>,
+ ) -> Option<()> {
+ self.use_ecx(|this| match rvalue {
+ Rvalue::BinaryOp(op, box (left, right))
+ | Rvalue::CheckedBinaryOp(op, box (left, right)) => {
+ let l = this.ecx.eval_operand(left, None);
+ let r = this.ecx.eval_operand(right, None);
+
+ let const_arg = match (l, r) {
+ (Ok(ref x), Err(_)) | (Err(_), Ok(ref x)) => this.ecx.read_immediate(x)?,
+ (Err(e), Err(_)) => return Err(e),
+ (Ok(_), Ok(_)) => return this.ecx.eval_rvalue_into_place(rvalue, place),
+ };
+
+ if !matches!(const_arg.layout.abi, abi::Abi::Scalar(..)) {
+ // We cannot handle Scalar Pair stuff.
+ return this.ecx.eval_rvalue_into_place(rvalue, place);
+ }
+
+ let arg_value = const_arg.to_scalar()?.to_bits(const_arg.layout.size)?;
+ let dest = this.ecx.eval_place(place)?;
+
+ match op {
+ BinOp::BitAnd if arg_value == 0 => this.ecx.write_immediate(*const_arg, &dest),
+ BinOp::BitOr
+ if arg_value == const_arg.layout.size.truncate(u128::MAX)
+ || (const_arg.layout.ty.is_bool() && arg_value == 1) =>
+ {
+ this.ecx.write_immediate(*const_arg, &dest)
+ }
+ BinOp::Mul if const_arg.layout.ty.is_integral() && arg_value == 0 => {
+ if let Rvalue::CheckedBinaryOp(_, _) = rvalue {
+ let val = Immediate::ScalarPair(
+ const_arg.to_scalar()?.into(),
+ Scalar::from_bool(false).into(),
+ );
+ this.ecx.write_immediate(val, &dest)
+ } else {
+ this.ecx.write_immediate(*const_arg, &dest)
+ }
+ }
+ _ => this.ecx.eval_rvalue_into_place(rvalue, place),
+ }
+ }
+ _ => this.ecx.eval_rvalue_into_place(rvalue, place),
+ })
+ }
+
+ /// Creates a new `Operand::Constant` from a `Scalar` value
+ fn operand_from_scalar(&self, scalar: Scalar, ty: Ty<'tcx>, span: Span) -> Operand<'tcx> {
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::from_scalar(self.tcx, scalar, ty),
+ }))
+ }
+
+ fn replace_with_const(
+ &mut self,
+ rval: &mut Rvalue<'tcx>,
+ value: &OpTy<'tcx>,
+ source_info: SourceInfo,
+ ) {
+ if let Rvalue::Use(Operand::Constant(c)) = rval {
+ match c.literal {
+ ConstantKind::Ty(c) if matches!(c.kind(), ConstKind::Unevaluated(..)) => {}
+ _ => {
+ trace!("skipping replace of Rvalue::Use({:?} because it is already a const", c);
+ return;
+ }
+ }
+ }
+
+ trace!("attempting to replace {:?} with {:?}", rval, value);
+ if let Err(e) = self.ecx.const_validate_operand(
+ value,
+ vec![],
+ // FIXME: is ref tracking too expensive?
+ // FIXME: what is the point of ref tracking if we do not even check the tracked refs?
+ &mut interpret::RefTracking::empty(),
+ CtfeValidationMode::Regular,
+ ) {
+ trace!("validation error, attempt failed: {:?}", e);
+ return;
+ }
+
+ // FIXME> figure out what to do when read_immediate_raw fails
+ let imm = self.use_ecx(|this| this.ecx.read_immediate_raw(value, /*force*/ false));
+
+ if let Some(Ok(imm)) = imm {
+ match *imm {
+ interpret::Immediate::Scalar(ScalarMaybeUninit::Scalar(scalar)) => {
+ *rval = Rvalue::Use(self.operand_from_scalar(
+ scalar,
+ value.layout.ty,
+ source_info.span,
+ ));
+ }
+ Immediate::ScalarPair(
+ ScalarMaybeUninit::Scalar(_),
+ ScalarMaybeUninit::Scalar(_),
+ ) => {
+ // Found a value represented as a pair. For now only do const-prop if the type
+ // of `rvalue` is also a tuple with two scalars.
+ // FIXME: enable the general case stated above ^.
+ let ty = value.layout.ty;
+ // Only do it for tuples
+ if let ty::Tuple(types) = ty.kind() {
+ // Only do it if tuple is also a pair with two scalars
+ if let [ty1, ty2] = types[..] {
+ let alloc = self.use_ecx(|this| {
+ let ty_is_scalar = |ty| {
+ this.ecx.layout_of(ty).ok().map(|layout| layout.abi.is_scalar())
+ == Some(true)
+ };
+ if ty_is_scalar(ty1) && ty_is_scalar(ty2) {
+ let alloc = this
+ .ecx
+ .intern_with_temp_alloc(value.layout, |ecx, dest| {
+ ecx.write_immediate(*imm, dest)
+ })
+ .unwrap();
+ Ok(Some(alloc))
+ } else {
+ Ok(None)
+ }
+ });
+
+ if let Some(Some(alloc)) = alloc {
+ // Assign entire constant in a single statement.
+ // We can't use aggregates, as we run after the aggregate-lowering `MirPhase`.
+ let const_val = ConstValue::ByRef { alloc, offset: Size::ZERO };
+ let literal = ConstantKind::Val(const_val, ty);
+ *rval = Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: source_info.span,
+ user_ty: None,
+ literal,
+ })));
+ }
+ }
+ }
+ }
+ // Scalars or scalar pairs that contain undef values are assumed to not have
+ // successfully evaluated and are thus not propagated.
+ _ => {}
+ }
+ }
+ }
+
+ /// Returns `true` if and only if this `op` should be const-propagated into.
+ fn should_const_prop(&mut self, op: &OpTy<'tcx>) -> bool {
+ if !self.tcx.consider_optimizing(|| format!("ConstantPropagation - OpTy: {:?}", op)) {
+ return false;
+ }
+
+ match **op {
+ interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => {
+ s.try_to_int().is_ok()
+ }
+ interpret::Operand::Immediate(Immediate::ScalarPair(
+ ScalarMaybeUninit::Scalar(l),
+ ScalarMaybeUninit::Scalar(r),
+ )) => l.try_to_int().is_ok() && r.try_to_int().is_ok(),
+ _ => false,
+ }
+ }
+}
+
+/// The mode that `ConstProp` is allowed to run in for a given `Local`.
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub enum ConstPropMode {
+ /// The `Local` can be propagated into and reads of this `Local` can also be propagated.
+ FullConstProp,
+ /// The `Local` can only be propagated into and from its own block.
+ OnlyInsideOwnBlock,
+ /// The `Local` can be propagated into but reads cannot be propagated.
+ OnlyPropagateInto,
+ /// The `Local` cannot be part of propagation at all. Any statement
+ /// referencing it either for reading or writing will not get propagated.
+ NoPropagation,
+}
+
+pub struct CanConstProp {
+ can_const_prop: IndexVec<Local, ConstPropMode>,
+ // False at the beginning. Once set, no more assignments are allowed to that local.
+ found_assignment: BitSet<Local>,
+ // Cache of locals' information
+ local_kinds: IndexVec<Local, LocalKind>,
+}
+
+impl CanConstProp {
+ /// Returns true if `local` can be propagated
+ pub fn check<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ body: &Body<'tcx>,
+ ) -> IndexVec<Local, ConstPropMode> {
+ let mut cpv = CanConstProp {
+ can_const_prop: IndexVec::from_elem(ConstPropMode::FullConstProp, &body.local_decls),
+ found_assignment: BitSet::new_empty(body.local_decls.len()),
+ local_kinds: IndexVec::from_fn_n(
+ |local| body.local_kind(local),
+ body.local_decls.len(),
+ ),
+ };
+ for (local, val) in cpv.can_const_prop.iter_enumerated_mut() {
+ let ty = body.local_decls[local].ty;
+ match tcx.layout_of(param_env.and(ty)) {
+ Ok(layout) if layout.size < Size::from_bytes(MAX_ALLOC_LIMIT) => {}
+ // Either the layout fails to compute, then we can't use this local anyway
+ // or the local is too large, then we don't want to.
+ _ => {
+ *val = ConstPropMode::NoPropagation;
+ continue;
+ }
+ }
+ // Cannot use args at all
+ // Cannot use locals because if x < y { y - x } else { x - y } would
+ // lint for x != y
+ // FIXME(oli-obk): lint variables until they are used in a condition
+ // FIXME(oli-obk): lint if return value is constant
+ if cpv.local_kinds[local] == LocalKind::Arg {
+ *val = ConstPropMode::OnlyPropagateInto;
+ trace!(
+ "local {:?} can't be const propagated because it's a function argument",
+ local
+ );
+ } else if cpv.local_kinds[local] == LocalKind::Var {
+ *val = ConstPropMode::OnlyInsideOwnBlock;
+ trace!(
+ "local {:?} will only be propagated inside its block, because it's a user variable",
+ local
+ );
+ }
+ }
+ cpv.visit_body(&body);
+ cpv.can_const_prop
+ }
+}
+
+impl Visitor<'_> for CanConstProp {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, _: Location) {
+ use rustc_middle::mir::visit::PlaceContext::*;
+ match context {
+ // Projections are fine, because `&mut foo.x` will be caught by
+ // `MutatingUseContext::Borrow` elsewhere.
+ MutatingUse(MutatingUseContext::Projection)
+ // These are just stores, where the storing is not propagatable, but there may be later
+ // mutations of the same local via `Store`
+ | MutatingUse(MutatingUseContext::Call)
+ | MutatingUse(MutatingUseContext::AsmOutput)
+ | MutatingUse(MutatingUseContext::Deinit)
+ // Actual store that can possibly even propagate a value
+ | MutatingUse(MutatingUseContext::Store)
+ | MutatingUse(MutatingUseContext::SetDiscriminant) => {
+ if !self.found_assignment.insert(local) {
+ match &mut self.can_const_prop[local] {
+ // If the local can only get propagated in its own block, then we don't have
+ // to worry about multiple assignments, as we'll nuke the const state at the
+ // end of the block anyway, and inside the block we overwrite previous
+ // states as applicable.
+ ConstPropMode::OnlyInsideOwnBlock => {}
+ ConstPropMode::NoPropagation => {}
+ ConstPropMode::OnlyPropagateInto => {}
+ other @ ConstPropMode::FullConstProp => {
+ trace!(
+ "local {:?} can't be propagated because of multiple assignments. Previous state: {:?}",
+ local, other,
+ );
+ *other = ConstPropMode::OnlyInsideOwnBlock;
+ }
+ }
+ }
+ }
+ // Reading constants is allowed an arbitrary number of times
+ NonMutatingUse(NonMutatingUseContext::Copy)
+ | NonMutatingUse(NonMutatingUseContext::Move)
+ | NonMutatingUse(NonMutatingUseContext::Inspect)
+ | NonMutatingUse(NonMutatingUseContext::Projection)
+ | NonUse(_) => {}
+
+ // These could be propagated with a smarter analysis or just some careful thinking about
+ // whether they'd be fine right now.
+ MutatingUse(MutatingUseContext::Yield)
+ | MutatingUse(MutatingUseContext::Drop)
+ | MutatingUse(MutatingUseContext::Retag)
+ // These can't ever be propagated under any scheme, as we can't reason about indirect
+ // mutation.
+ | NonMutatingUse(NonMutatingUseContext::SharedBorrow)
+ | NonMutatingUse(NonMutatingUseContext::ShallowBorrow)
+ | NonMutatingUse(NonMutatingUseContext::UniqueBorrow)
+ | NonMutatingUse(NonMutatingUseContext::AddressOf)
+ | MutatingUse(MutatingUseContext::Borrow)
+ | MutatingUse(MutatingUseContext::AddressOf) => {
+ trace!("local {:?} can't be propagaged because it's used: {:?}", local, context);
+ self.can_const_prop[local] = ConstPropMode::NoPropagation;
+ }
+ }
+ }
+}
+
+impl<'tcx> MutVisitor<'tcx> for ConstPropagator<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_body(&mut self, body: &mut Body<'tcx>) {
+ for (bb, data) in body.basic_blocks_mut().iter_enumerated_mut() {
+ self.visit_basic_block_data(bb, data);
+ }
+ }
+
+ fn visit_operand(&mut self, operand: &mut Operand<'tcx>, location: Location) {
+ self.super_operand(operand, location);
+
+ // Only const prop copies and moves on `mir_opt_level=3` as doing so
+ // currently slightly increases compile time in some cases.
+ if self.tcx.sess.mir_opt_level() >= 3 {
+ self.propagate_operand(operand)
+ }
+ }
+
+ fn visit_constant(&mut self, constant: &mut Constant<'tcx>, location: Location) {
+ trace!("visit_constant: {:?}", constant);
+ self.super_constant(constant, location);
+ self.eval_constant(constant);
+ }
+
+ fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
+ trace!("visit_statement: {:?}", statement);
+ let source_info = statement.source_info;
+ self.source_info = Some(source_info);
+ if let StatementKind::Assign(box (place, ref mut rval)) = statement.kind {
+ let can_const_prop = self.ecx.machine.can_const_prop[place.local];
+ if let Some(()) = self.const_prop(rval, place) {
+ // This will return None if the above `const_prop` invocation only "wrote" a
+ // type whose creation requires no write. E.g. a generator whose initial state
+ // consists solely of uninitialized memory (so it doesn't capture any locals).
+ if let Some(ref value) = self.get_const(place) && self.should_const_prop(value) {
+ trace!("replacing {:?} with {:?}", rval, value);
+ self.replace_with_const(rval, value, source_info);
+ if can_const_prop == ConstPropMode::FullConstProp
+ || can_const_prop == ConstPropMode::OnlyInsideOwnBlock
+ {
+ trace!("propagated into {:?}", place);
+ }
+ }
+ match can_const_prop {
+ ConstPropMode::OnlyInsideOwnBlock => {
+ trace!(
+ "found local restricted to its block. \
+ Will remove it from const-prop after block is finished. Local: {:?}",
+ place.local
+ );
+ }
+ ConstPropMode::OnlyPropagateInto | ConstPropMode::NoPropagation => {
+ trace!("can't propagate into {:?}", place);
+ if place.local != RETURN_PLACE {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ ConstPropMode::FullConstProp => {}
+ }
+ } else {
+ // Const prop failed, so erase the destination, ensuring that whatever happens
+ // from here on, does not know about the previous value.
+ // This is important in case we have
+ // ```rust
+ // let mut x = 42;
+ // x = SOME_MUTABLE_STATIC;
+ // // x must now be uninit
+ // ```
+ // FIXME: we overzealously erase the entire local, because that's easier to
+ // implement.
+ trace!(
+ "propagation into {:?} failed.
+ Nuking the entire site from orbit, it's the only way to be sure",
+ place,
+ );
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ } else {
+ match statement.kind {
+ StatementKind::SetDiscriminant { ref place, .. } => {
+ match self.ecx.machine.can_const_prop[place.local] {
+ ConstPropMode::FullConstProp | ConstPropMode::OnlyInsideOwnBlock => {
+ if self.use_ecx(|this| this.ecx.statement(statement)).is_some() {
+ trace!("propped discriminant into {:?}", place);
+ } else {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ ConstPropMode::OnlyPropagateInto | ConstPropMode::NoPropagation => {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ }
+ StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+ let frame = self.ecx.frame_mut();
+ frame.locals[local].value =
+ if let StatementKind::StorageLive(_) = statement.kind {
+ LocalValue::Live(interpret::Operand::Immediate(
+ interpret::Immediate::Uninit,
+ ))
+ } else {
+ LocalValue::Dead
+ };
+ }
+ _ => {}
+ }
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
+ let source_info = terminator.source_info;
+ self.source_info = Some(source_info);
+ self.super_terminator(terminator, location);
+ match &mut terminator.kind {
+ TerminatorKind::Assert { expected, ref mut cond, .. } => {
+ if let Some(ref value) = self.eval_operand(&cond) {
+ trace!("assertion on {:?} should be {:?}", value, expected);
+ let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
+ let value_const = self.ecx.read_scalar(&value).unwrap();
+ if expected != value_const {
+ // Poison all places this operand references so that further code
+ // doesn't use the invalid value
+ match cond {
+ Operand::Move(ref place) | Operand::Copy(ref place) => {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ Operand::Constant(_) => {}
+ }
+ } else {
+ if self.should_const_prop(value) {
+ if let ScalarMaybeUninit::Scalar(scalar) = value_const {
+ *cond = self.operand_from_scalar(
+ scalar,
+ self.tcx.types.bool,
+ source_info.span,
+ );
+ }
+ }
+ }
+ }
+ }
+ TerminatorKind::SwitchInt { ref mut discr, .. } => {
+ // FIXME: This is currently redundant with `visit_operand`, but sadly
+ // always visiting operands currently causes a perf regression in LLVM codegen, so
+ // `visit_operand` currently only runs for propagates places for `mir_opt_level=4`.
+ self.propagate_operand(discr)
+ }
+ // None of these have Operands to const-propagate.
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. } => {}
+ // Every argument in our function calls have already been propagated in `visit_operand`.
+ //
+ // NOTE: because LLVM codegen gives slight performance regressions with it, so this is
+ // gated on `mir_opt_level=3`.
+ TerminatorKind::Call { .. } => {}
+ }
+
+ // We remove all Locals which are restricted in propagation to their containing blocks and
+ // which were modified in the current block.
+ // Take it out of the ecx so we can get a mutable reference to the ecx for `remove_const`.
+ let mut locals = std::mem::take(&mut self.ecx.machine.written_only_inside_own_block_locals);
+ for &local in locals.iter() {
+ Self::remove_const(&mut self.ecx, local);
+ }
+ locals.clear();
+ // Put it back so we reuse the heap of the storage
+ self.ecx.machine.written_only_inside_own_block_locals = locals;
+ if cfg!(debug_assertions) {
+ // Ensure we are correctly erasing locals with the non-debug-assert logic.
+ for local in self.ecx.machine.only_propagate_inside_block_locals.iter() {
+ assert!(
+ self.get_const(local.into()).is_none()
+ || self
+ .layout_of(self.local_decls[local].ty)
+ .map_or(true, |layout| layout.is_zst())
+ )
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/const_prop_lint.rs b/compiler/rustc_mir_transform/src/const_prop_lint.rs
new file mode 100644
index 000000000..c2ea55af4
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/const_prop_lint.rs
@@ -0,0 +1,734 @@
+//! Propagates constants for early reporting of statically known
+//! assertion failures
+
+use crate::const_prop::CanConstProp;
+use crate::const_prop::ConstPropMachine;
+use crate::const_prop::ConstPropMode;
+use crate::MirLint;
+use rustc_const_eval::const_eval::ConstEvalErr;
+use rustc_const_eval::interpret::{
+ self, InterpCx, InterpResult, LocalState, LocalValue, MemoryKind, OpTy, Scalar,
+ ScalarMaybeUninit, StackPopCleanup,
+};
+use rustc_hir::def::DefKind;
+use rustc_hir::HirId;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{
+ AssertKind, BinOp, Body, Constant, ConstantKind, Local, LocalDecl, Location, Operand, Place,
+ Rvalue, SourceInfo, SourceScope, SourceScopeData, Statement, StatementKind, Terminator,
+ TerminatorKind, UnOp, RETURN_PLACE,
+};
+use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
+use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::{
+ self, ConstInt, ConstKind, Instance, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitable,
+};
+use rustc_session::lint;
+use rustc_span::Span;
+use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
+use rustc_trait_selection::traits;
+use std::cell::Cell;
+
+/// The maximum number of bytes that we'll allocate space for a local or the return value.
+/// Needed for #66397, because otherwise we eval into large places and that can cause OOM or just
+/// Severely regress performance.
+const MAX_ALLOC_LIMIT: u64 = 1024;
+pub struct ConstProp;
+
+impl<'tcx> MirLint<'tcx> for ConstProp {
+ fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ // will be evaluated by miri and produce its errors there
+ if body.source.promoted.is_some() {
+ return;
+ }
+
+ let def_id = body.source.def_id().expect_local();
+ let is_fn_like = tcx.def_kind(def_id).is_fn_like();
+ let is_assoc_const = tcx.def_kind(def_id) == DefKind::AssocConst;
+
+ // Only run const prop on functions, methods, closures and associated constants
+ if !is_fn_like && !is_assoc_const {
+ // skip anon_const/statics/consts because they'll be evaluated by miri anyway
+ trace!("ConstProp skipped for {:?}", def_id);
+ return;
+ }
+
+ let is_generator = tcx.type_of(def_id.to_def_id()).is_generator();
+ // FIXME(welseywiser) const prop doesn't work on generators because of query cycles
+ // computing their layout.
+ if is_generator {
+ trace!("ConstProp skipped for generator {:?}", def_id);
+ return;
+ }
+
+ // Check if it's even possible to satisfy the 'where' clauses
+ // for this item.
+ // This branch will never be taken for any normal function.
+ // However, it's possible to `#!feature(trivial_bounds)]` to write
+ // a function with impossible to satisfy clauses, e.g.:
+ // `fn foo() where String: Copy {}`
+ //
+ // We don't usually need to worry about this kind of case,
+ // since we would get a compilation error if the user tried
+ // to call it. However, since we can do const propagation
+ // even without any calls to the function, we need to make
+ // sure that it even makes sense to try to evaluate the body.
+ // If there are unsatisfiable where clauses, then all bets are
+ // off, and we just give up.
+ //
+ // We manually filter the predicates, skipping anything that's not
+ // "global". We are in a potentially generic context
+ // (e.g. we are evaluating a function without substituting generic
+ // parameters, so this filtering serves two purposes:
+ //
+ // 1. We skip evaluating any predicates that we would
+ // never be able prove are unsatisfiable (e.g. `<T as Foo>`
+ // 2. We avoid trying to normalize predicates involving generic
+ // parameters (e.g. `<T as Foo>::MyItem`). This can confuse
+ // the normalization code (leading to cycle errors), since
+ // it's usually never invoked in this way.
+ let predicates = tcx
+ .predicates_of(def_id.to_def_id())
+ .predicates
+ .iter()
+ .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None });
+ if traits::impossible_predicates(
+ tcx,
+ traits::elaborate_predicates(tcx, predicates).map(|o| o.predicate).collect(),
+ ) {
+ trace!("ConstProp skipped for {:?}: found unsatisfiable predicates", def_id);
+ return;
+ }
+
+ trace!("ConstProp starting for {:?}", def_id);
+
+ let dummy_body = &Body::new(
+ body.source,
+ body.basic_blocks().clone(),
+ body.source_scopes.clone(),
+ body.local_decls.clone(),
+ Default::default(),
+ body.arg_count,
+ Default::default(),
+ body.span,
+ body.generator_kind(),
+ body.tainted_by_errors,
+ );
+
+ // FIXME(oli-obk, eddyb) Optimize locals (or even local paths) to hold
+ // constants, instead of just checking for const-folding succeeding.
+ // That would require a uniform one-def no-mutation analysis
+ // and RPO (or recursing when needing the value of a local).
+ let mut optimization_finder = ConstPropagator::new(body, dummy_body, tcx);
+ optimization_finder.visit_body(body);
+
+ trace!("ConstProp done for {:?}", def_id);
+ }
+}
+
+/// Finds optimization opportunities on the MIR.
+struct ConstPropagator<'mir, 'tcx> {
+ ecx: InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ source_scopes: &'mir IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ local_decls: &'mir IndexVec<Local, LocalDecl<'tcx>>,
+ // Because we have `MutVisitor` we can't obtain the `SourceInfo` from a `Location`. So we store
+ // the last known `SourceInfo` here and just keep revisiting it.
+ source_info: Option<SourceInfo>,
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for ConstPropagator<'_, 'tcx> {
+ type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
+ err
+ }
+}
+
+impl HasDataLayout for ConstPropagator<'_, '_> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.tcx.data_layout
+ }
+}
+
+impl<'tcx> ty::layout::HasTyCtxt<'tcx> for ConstPropagator<'_, 'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx> ty::layout::HasParamEnv<'tcx> for ConstPropagator<'_, 'tcx> {
+ #[inline]
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
+ fn new(
+ body: &Body<'tcx>,
+ dummy_body: &'mir Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ ) -> ConstPropagator<'mir, 'tcx> {
+ let def_id = body.source.def_id();
+ let substs = &InternalSubsts::identity_for_item(tcx, def_id);
+ let param_env = tcx.param_env_reveal_all_normalized(def_id);
+
+ let can_const_prop = CanConstProp::check(tcx, param_env, body);
+ let mut only_propagate_inside_block_locals = BitSet::new_empty(can_const_prop.len());
+ for (l, mode) in can_const_prop.iter_enumerated() {
+ if *mode == ConstPropMode::OnlyInsideOwnBlock {
+ only_propagate_inside_block_locals.insert(l);
+ }
+ }
+ let mut ecx = InterpCx::new(
+ tcx,
+ tcx.def_span(def_id),
+ param_env,
+ ConstPropMachine::new(only_propagate_inside_block_locals, can_const_prop),
+ );
+
+ let ret_layout = ecx
+ .layout_of(body.bound_return_ty().subst(tcx, substs))
+ .ok()
+ // Don't bother allocating memory for large values.
+ // I don't know how return types can seem to be unsized but this happens in the
+ // `type/type-unsatisfiable.rs` test.
+ .filter(|ret_layout| {
+ !ret_layout.is_unsized() && ret_layout.size < Size::from_bytes(MAX_ALLOC_LIMIT)
+ })
+ .unwrap_or_else(|| ecx.layout_of(tcx.types.unit).unwrap());
+
+ let ret = ecx
+ .allocate(ret_layout, MemoryKind::Stack)
+ .expect("couldn't perform small allocation")
+ .into();
+
+ ecx.push_stack_frame(
+ Instance::new(def_id, substs),
+ dummy_body,
+ &ret,
+ StackPopCleanup::Root { cleanup: false },
+ )
+ .expect("failed to push initial stack frame");
+
+ ConstPropagator {
+ ecx,
+ tcx,
+ param_env,
+ source_scopes: &dummy_body.source_scopes,
+ local_decls: &dummy_body.local_decls,
+ source_info: None,
+ }
+ }
+
+ fn get_const(&self, place: Place<'tcx>) -> Option<OpTy<'tcx>> {
+ let op = match self.ecx.eval_place_to_op(place, None) {
+ Ok(op) => op,
+ Err(e) => {
+ trace!("get_const failed: {}", e);
+ return None;
+ }
+ };
+
+ // Try to read the local as an immediate so that if it is representable as a scalar, we can
+ // handle it as such, but otherwise, just return the value as is.
+ Some(match self.ecx.read_immediate_raw(&op, /*force*/ false) {
+ Ok(Ok(imm)) => imm.into(),
+ _ => op,
+ })
+ }
+
+ /// Remove `local` from the pool of `Locals`. Allows writing to them,
+ /// but not reading from them anymore.
+ fn remove_const(ecx: &mut InterpCx<'mir, 'tcx, ConstPropMachine<'mir, 'tcx>>, local: Local) {
+ ecx.frame_mut().locals[local] = LocalState {
+ value: LocalValue::Live(interpret::Operand::Immediate(interpret::Immediate::Uninit)),
+ layout: Cell::new(None),
+ };
+ }
+
+ fn lint_root(&self, source_info: SourceInfo) -> Option<HirId> {
+ source_info.scope.lint_root(self.source_scopes)
+ }
+
+ fn use_ecx<F, T>(&mut self, source_info: SourceInfo, f: F) -> Option<T>
+ where
+ F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
+ {
+ // Overwrite the PC -- whatever the interpreter does to it does not make any sense anyway.
+ self.ecx.frame_mut().loc = Err(source_info.span);
+ match f(self) {
+ Ok(val) => Some(val),
+ Err(error) => {
+ trace!("InterpCx operation failed: {:?}", error);
+ // Some errors shouldn't come up because creating them causes
+ // an allocation, which we should avoid. When that happens,
+ // dedicated error variants should be introduced instead.
+ assert!(
+ !error.kind().formatted_string(),
+ "const-prop encountered formatting error: {}",
+ error
+ );
+ None
+ }
+ }
+ }
+
+ /// Returns the value, if any, of evaluating `c`.
+ fn eval_constant(&mut self, c: &Constant<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+ // FIXME we need to revisit this for #67176
+ if c.needs_subst() {
+ return None;
+ }
+
+ match self.ecx.mir_const_to_op(&c.literal, None) {
+ Ok(op) => Some(op),
+ Err(error) => {
+ let tcx = self.ecx.tcx.at(c.span);
+ let err = ConstEvalErr::new(&self.ecx, error, Some(c.span));
+ if let Some(lint_root) = self.lint_root(source_info) {
+ let lint_only = match c.literal {
+ ConstantKind::Ty(ct) => match ct.kind() {
+ // Promoteds must lint and not error as the user didn't ask for them
+ ConstKind::Unevaluated(ty::Unevaluated {
+ def: _,
+ substs: _,
+ promoted: Some(_),
+ }) => true,
+ // Out of backwards compatibility we cannot report hard errors in unused
+ // generic functions using associated constants of the generic parameters.
+ _ => c.literal.needs_subst(),
+ },
+ ConstantKind::Val(_, ty) => ty.needs_subst(),
+ };
+ if lint_only {
+ // Out of backwards compatibility we cannot report hard errors in unused
+ // generic functions using associated constants of the generic parameters.
+ err.report_as_lint(tcx, "erroneous constant used", lint_root, Some(c.span));
+ } else {
+ err.report_as_error(tcx, "erroneous constant used");
+ }
+ } else {
+ err.report_as_error(tcx, "erroneous constant used");
+ }
+ None
+ }
+ }
+ }
+
+ /// Returns the value, if any, of evaluating `place`.
+ fn eval_place(&mut self, place: Place<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+ trace!("eval_place(place={:?})", place);
+ self.use_ecx(source_info, |this| this.ecx.eval_place_to_op(place, None))
+ }
+
+ /// Returns the value, if any, of evaluating `op`. Calls upon `eval_constant`
+ /// or `eval_place`, depending on the variant of `Operand` used.
+ fn eval_operand(&mut self, op: &Operand<'tcx>, source_info: SourceInfo) -> Option<OpTy<'tcx>> {
+ match *op {
+ Operand::Constant(ref c) => self.eval_constant(c, source_info),
+ Operand::Move(place) | Operand::Copy(place) => self.eval_place(place, source_info),
+ }
+ }
+
+ fn report_assert_as_lint(
+ &self,
+ lint: &'static lint::Lint,
+ source_info: SourceInfo,
+ message: &'static str,
+ panic: AssertKind<impl std::fmt::Debug>,
+ ) {
+ if let Some(lint_root) = self.lint_root(source_info) {
+ self.tcx.struct_span_lint_hir(lint, lint_root, source_info.span, |lint| {
+ let mut err = lint.build(message);
+ err.span_label(source_info.span, format!("{:?}", panic));
+ err.emit();
+ });
+ }
+ }
+
+ fn check_unary_op(
+ &mut self,
+ op: UnOp,
+ arg: &Operand<'tcx>,
+ source_info: SourceInfo,
+ ) -> Option<()> {
+ if let (val, true) = self.use_ecx(source_info, |this| {
+ let val = this.ecx.read_immediate(&this.ecx.eval_operand(arg, None)?)?;
+ let (_res, overflow, _ty) = this.ecx.overflowing_unary_op(op, &val)?;
+ Ok((val, overflow))
+ })? {
+ // `AssertKind` only has an `OverflowNeg` variant, so make sure that is
+ // appropriate to use.
+ assert_eq!(op, UnOp::Neg, "Neg is the only UnOp that can overflow");
+ self.report_assert_as_lint(
+ lint::builtin::ARITHMETIC_OVERFLOW,
+ source_info,
+ "this arithmetic operation will overflow",
+ AssertKind::OverflowNeg(val.to_const_int()),
+ );
+ return None;
+ }
+
+ Some(())
+ }
+
+ fn check_binary_op(
+ &mut self,
+ op: BinOp,
+ left: &Operand<'tcx>,
+ right: &Operand<'tcx>,
+ source_info: SourceInfo,
+ ) -> Option<()> {
+ let r = self.use_ecx(source_info, |this| {
+ this.ecx.read_immediate(&this.ecx.eval_operand(right, None)?)
+ });
+ let l = self.use_ecx(source_info, |this| {
+ this.ecx.read_immediate(&this.ecx.eval_operand(left, None)?)
+ });
+ // Check for exceeding shifts *even if* we cannot evaluate the LHS.
+ if op == BinOp::Shr || op == BinOp::Shl {
+ let r = r.clone()?;
+ // We need the type of the LHS. We cannot use `place_layout` as that is the type
+ // of the result, which for checked binops is not the same!
+ let left_ty = left.ty(self.local_decls, self.tcx);
+ let left_size = self.ecx.layout_of(left_ty).ok()?.size;
+ let right_size = r.layout.size;
+ let r_bits = r.to_scalar().ok();
+ let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok());
+ if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
+ debug!("check_binary_op: reporting assert for {:?}", source_info);
+ self.report_assert_as_lint(
+ lint::builtin::ARITHMETIC_OVERFLOW,
+ source_info,
+ "this arithmetic operation will overflow",
+ AssertKind::Overflow(
+ op,
+ match l {
+ Some(l) => l.to_const_int(),
+ // Invent a dummy value, the diagnostic ignores it anyway
+ None => ConstInt::new(
+ ScalarInt::try_from_uint(1_u8, left_size).unwrap(),
+ left_ty.is_signed(),
+ left_ty.is_ptr_sized_integral(),
+ ),
+ },
+ r.to_const_int(),
+ ),
+ );
+ return None;
+ }
+ }
+
+ if let (Some(l), Some(r)) = (l, r) {
+ // The remaining operators are handled through `overflowing_binary_op`.
+ if self.use_ecx(source_info, |this| {
+ let (_res, overflow, _ty) = this.ecx.overflowing_binary_op(op, &l, &r)?;
+ Ok(overflow)
+ })? {
+ self.report_assert_as_lint(
+ lint::builtin::ARITHMETIC_OVERFLOW,
+ source_info,
+ "this arithmetic operation will overflow",
+ AssertKind::Overflow(op, l.to_const_int(), r.to_const_int()),
+ );
+ return None;
+ }
+ }
+ Some(())
+ }
+
+ fn const_prop(
+ &mut self,
+ rvalue: &Rvalue<'tcx>,
+ source_info: SourceInfo,
+ place: Place<'tcx>,
+ ) -> Option<()> {
+ // Perform any special handling for specific Rvalue types.
+ // Generally, checks here fall into one of two categories:
+ // 1. Additional checking to provide useful lints to the user
+ // - In this case, we will do some validation and then fall through to the
+ // end of the function which evals the assignment.
+ // 2. Working around bugs in other parts of the compiler
+ // - In this case, we'll return `None` from this function to stop evaluation.
+ match rvalue {
+ // Additional checking: give lints to the user if an overflow would occur.
+ // We do this here and not in the `Assert` terminator as that terminator is
+ // only sometimes emitted (overflow checks can be disabled), but we want to always
+ // lint.
+ Rvalue::UnaryOp(op, arg) => {
+ trace!("checking UnaryOp(op = {:?}, arg = {:?})", op, arg);
+ self.check_unary_op(*op, arg, source_info)?;
+ }
+ Rvalue::BinaryOp(op, box (left, right)) => {
+ trace!("checking BinaryOp(op = {:?}, left = {:?}, right = {:?})", op, left, right);
+ self.check_binary_op(*op, left, right, source_info)?;
+ }
+ Rvalue::CheckedBinaryOp(op, box (left, right)) => {
+ trace!(
+ "checking CheckedBinaryOp(op = {:?}, left = {:?}, right = {:?})",
+ op,
+ left,
+ right
+ );
+ self.check_binary_op(*op, left, right, source_info)?;
+ }
+
+ // Do not try creating references (#67862)
+ Rvalue::AddressOf(_, place) | Rvalue::Ref(_, _, place) => {
+ trace!("skipping AddressOf | Ref for {:?}", place);
+
+ // This may be creating mutable references or immutable references to cells.
+ // If that happens, the pointed to value could be mutated via that reference.
+ // Since we aren't tracking references, the const propagator loses track of what
+ // value the local has right now.
+ // Thus, all locals that have their reference taken
+ // must not take part in propagation.
+ Self::remove_const(&mut self.ecx, place.local);
+
+ return None;
+ }
+ Rvalue::ThreadLocalRef(def_id) => {
+ trace!("skipping ThreadLocalRef({:?})", def_id);
+
+ return None;
+ }
+
+ // There's no other checking to do at this time.
+ Rvalue::Aggregate(..)
+ | Rvalue::Use(..)
+ | Rvalue::CopyForDeref(..)
+ | Rvalue::Repeat(..)
+ | Rvalue::Len(..)
+ | Rvalue::Cast(..)
+ | Rvalue::ShallowInitBox(..)
+ | Rvalue::Discriminant(..)
+ | Rvalue::NullaryOp(..) => {}
+ }
+
+ // FIXME we need to revisit this for #67176
+ if rvalue.needs_subst() {
+ return None;
+ }
+
+ self.use_ecx(source_info, |this| this.ecx.eval_rvalue_into_place(rvalue, place))
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
+ fn visit_body(&mut self, body: &Body<'tcx>) {
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
+ self.visit_basic_block_data(bb, data);
+ }
+ }
+
+ fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
+ self.super_operand(operand, location);
+ }
+
+ fn visit_constant(&mut self, constant: &Constant<'tcx>, location: Location) {
+ trace!("visit_constant: {:?}", constant);
+ self.super_constant(constant, location);
+ self.eval_constant(constant, self.source_info.unwrap());
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ trace!("visit_statement: {:?}", statement);
+ let source_info = statement.source_info;
+ self.source_info = Some(source_info);
+ if let StatementKind::Assign(box (place, ref rval)) = statement.kind {
+ let can_const_prop = self.ecx.machine.can_const_prop[place.local];
+ if let Some(()) = self.const_prop(rval, source_info, place) {
+ match can_const_prop {
+ ConstPropMode::OnlyInsideOwnBlock => {
+ trace!(
+ "found local restricted to its block. \
+ Will remove it from const-prop after block is finished. Local: {:?}",
+ place.local
+ );
+ }
+ ConstPropMode::OnlyPropagateInto | ConstPropMode::NoPropagation => {
+ trace!("can't propagate into {:?}", place);
+ if place.local != RETURN_PLACE {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ ConstPropMode::FullConstProp => {}
+ }
+ } else {
+ // Const prop failed, so erase the destination, ensuring that whatever happens
+ // from here on, does not know about the previous value.
+ // This is important in case we have
+ // ```rust
+ // let mut x = 42;
+ // x = SOME_MUTABLE_STATIC;
+ // // x must now be uninit
+ // ```
+ // FIXME: we overzealously erase the entire local, because that's easier to
+ // implement.
+ trace!(
+ "propagation into {:?} failed.
+ Nuking the entire site from orbit, it's the only way to be sure",
+ place,
+ );
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ } else {
+ match statement.kind {
+ StatementKind::SetDiscriminant { ref place, .. } => {
+ match self.ecx.machine.can_const_prop[place.local] {
+ ConstPropMode::FullConstProp | ConstPropMode::OnlyInsideOwnBlock => {
+ if self
+ .use_ecx(source_info, |this| this.ecx.statement(statement))
+ .is_some()
+ {
+ trace!("propped discriminant into {:?}", place);
+ } else {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ ConstPropMode::OnlyPropagateInto | ConstPropMode::NoPropagation => {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ }
+ }
+ StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+ let frame = self.ecx.frame_mut();
+ frame.locals[local].value =
+ if let StatementKind::StorageLive(_) = statement.kind {
+ LocalValue::Live(interpret::Operand::Immediate(
+ interpret::Immediate::Uninit,
+ ))
+ } else {
+ LocalValue::Dead
+ };
+ }
+ _ => {}
+ }
+ }
+
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ let source_info = terminator.source_info;
+ self.source_info = Some(source_info);
+ self.super_terminator(terminator, location);
+ match &terminator.kind {
+ TerminatorKind::Assert { expected, ref msg, ref cond, .. } => {
+ if let Some(ref value) = self.eval_operand(&cond, source_info) {
+ trace!("assertion on {:?} should be {:?}", value, expected);
+ let expected = ScalarMaybeUninit::from(Scalar::from_bool(*expected));
+ let value_const = self.ecx.read_scalar(&value).unwrap();
+ if expected != value_const {
+ enum DbgVal<T> {
+ Val(T),
+ Underscore,
+ }
+ impl<T: std::fmt::Debug> std::fmt::Debug for DbgVal<T> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Val(val) => val.fmt(fmt),
+ Self::Underscore => fmt.write_str("_"),
+ }
+ }
+ }
+ let mut eval_to_int = |op| {
+ // This can be `None` if the lhs wasn't const propagated and we just
+ // triggered the assert on the value of the rhs.
+ self.eval_operand(op, source_info).map_or(DbgVal::Underscore, |op| {
+ DbgVal::Val(self.ecx.read_immediate(&op).unwrap().to_const_int())
+ })
+ };
+ let msg = match msg {
+ AssertKind::DivisionByZero(op) => {
+ Some(AssertKind::DivisionByZero(eval_to_int(op)))
+ }
+ AssertKind::RemainderByZero(op) => {
+ Some(AssertKind::RemainderByZero(eval_to_int(op)))
+ }
+ AssertKind::Overflow(bin_op @ (BinOp::Div | BinOp::Rem), op1, op2) => {
+ // Division overflow is *UB* in the MIR, and different than the
+ // other overflow checks.
+ Some(AssertKind::Overflow(
+ *bin_op,
+ eval_to_int(op1),
+ eval_to_int(op2),
+ ))
+ }
+ AssertKind::BoundsCheck { ref len, ref index } => {
+ let len = eval_to_int(len);
+ let index = eval_to_int(index);
+ Some(AssertKind::BoundsCheck { len, index })
+ }
+ // Remaining overflow errors are already covered by checks on the binary operators.
+ AssertKind::Overflow(..) | AssertKind::OverflowNeg(_) => None,
+ // Need proper const propagator for these.
+ _ => None,
+ };
+ // Poison all places this operand references so that further code
+ // doesn't use the invalid value
+ match cond {
+ Operand::Move(ref place) | Operand::Copy(ref place) => {
+ Self::remove_const(&mut self.ecx, place.local);
+ }
+ Operand::Constant(_) => {}
+ }
+ if let Some(msg) = msg {
+ self.report_assert_as_lint(
+ lint::builtin::UNCONDITIONAL_PANIC,
+ source_info,
+ "this operation will panic at runtime",
+ msg,
+ );
+ }
+ }
+ }
+ }
+ // None of these have Operands to const-propagate.
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::InlineAsm { .. } => {}
+ }
+
+ // We remove all Locals which are restricted in propagation to their containing blocks and
+ // which were modified in the current block.
+ // Take it out of the ecx so we can get a mutable reference to the ecx for `remove_const`.
+ let mut locals = std::mem::take(&mut self.ecx.machine.written_only_inside_own_block_locals);
+ for &local in locals.iter() {
+ Self::remove_const(&mut self.ecx, local);
+ }
+ locals.clear();
+ // Put it back so we reuse the heap of the storage
+ self.ecx.machine.written_only_inside_own_block_locals = locals;
+ if cfg!(debug_assertions) {
+ // Ensure we are correctly erasing locals with the non-debug-assert logic.
+ for local in self.ecx.machine.only_propagate_inside_block_locals.iter() {
+ assert!(
+ self.get_const(local.into()).is_none()
+ || self
+ .layout_of(self.local_decls[local].ty)
+ .map_or(true, |layout| layout.is_zst())
+ )
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs
new file mode 100644
index 000000000..45de0c280
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/counters.rs
@@ -0,0 +1,614 @@
+use super::Error;
+
+use super::debug;
+use super::graph;
+use super::spans;
+
+use debug::{DebugCounters, NESTED_INDENT};
+use graph::{BasicCoverageBlock, BcbBranch, CoverageGraph, TraverseCoverageGraphWithLoops};
+use spans::CoverageSpan;
+
+use rustc_data_structures::graph::WithNumNodes;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::coverage::*;
+
+/// Manages the counter and expression indexes/IDs to generate `CoverageKind` components for MIR
+/// `Coverage` statements.
+pub(super) struct CoverageCounters {
+ function_source_hash: u64,
+ next_counter_id: u32,
+ num_expressions: u32,
+ pub debug_counters: DebugCounters,
+}
+
+impl CoverageCounters {
+ pub fn new(function_source_hash: u64) -> Self {
+ Self {
+ function_source_hash,
+ next_counter_id: CounterValueReference::START.as_u32(),
+ num_expressions: 0,
+ debug_counters: DebugCounters::new(),
+ }
+ }
+
+ /// Activate the `DebugCounters` data structures, to provide additional debug formatting
+ /// features when formatting `CoverageKind` (counter) values.
+ pub fn enable_debug(&mut self) {
+ self.debug_counters.enable();
+ }
+
+ /// Makes `CoverageKind` `Counter`s and `Expressions` for the `BasicCoverageBlock`s directly or
+ /// indirectly associated with `CoverageSpans`, and returns additional `Expression`s
+ /// representing intermediate values.
+ pub fn make_bcb_counters(
+ &mut self,
+ basic_coverage_blocks: &mut CoverageGraph,
+ coverage_spans: &[CoverageSpan],
+ ) -> Result<Vec<CoverageKind>, Error> {
+ let mut bcb_counters = BcbCounters::new(self, basic_coverage_blocks);
+ bcb_counters.make_bcb_counters(coverage_spans)
+ }
+
+ fn make_counter<F>(&mut self, debug_block_label_fn: F) -> CoverageKind
+ where
+ F: Fn() -> Option<String>,
+ {
+ let counter = CoverageKind::Counter {
+ function_source_hash: self.function_source_hash,
+ id: self.next_counter(),
+ };
+ if self.debug_counters.is_enabled() {
+ self.debug_counters.add_counter(&counter, (debug_block_label_fn)());
+ }
+ counter
+ }
+
+ fn make_expression<F>(
+ &mut self,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ debug_block_label_fn: F,
+ ) -> CoverageKind
+ where
+ F: Fn() -> Option<String>,
+ {
+ let id = self.next_expression();
+ let expression = CoverageKind::Expression { id, lhs, op, rhs };
+ if self.debug_counters.is_enabled() {
+ self.debug_counters.add_counter(&expression, (debug_block_label_fn)());
+ }
+ expression
+ }
+
+ pub fn make_identity_counter(&mut self, counter_operand: ExpressionOperandId) -> CoverageKind {
+ let some_debug_block_label = if self.debug_counters.is_enabled() {
+ self.debug_counters.some_block_label(counter_operand).cloned()
+ } else {
+ None
+ };
+ self.make_expression(counter_operand, Op::Add, ExpressionOperandId::ZERO, || {
+ some_debug_block_label.clone()
+ })
+ }
+
+ /// Counter IDs start from one and go up.
+ fn next_counter(&mut self) -> CounterValueReference {
+ assert!(self.next_counter_id < u32::MAX - self.num_expressions);
+ let next = self.next_counter_id;
+ self.next_counter_id += 1;
+ CounterValueReference::from(next)
+ }
+
+ /// Expression IDs start from u32::MAX and go down because an Expression can reference
+ /// (add or subtract counts) of both Counter regions and Expression regions. The counter
+ /// expression operand IDs must be unique across both types.
+ fn next_expression(&mut self) -> InjectedExpressionId {
+ assert!(self.next_counter_id < u32::MAX - self.num_expressions);
+ let next = u32::MAX - self.num_expressions;
+ self.num_expressions += 1;
+ InjectedExpressionId::from(next)
+ }
+}
+
+/// Traverse the `CoverageGraph` and add either a `Counter` or `Expression` to every BCB, to be
+/// injected with `CoverageSpan`s. `Expressions` have no runtime overhead, so if a viable expression
+/// (adding or subtracting two other counters or expressions) can compute the same result as an
+/// embedded counter, an `Expression` should be used.
+struct BcbCounters<'a> {
+ coverage_counters: &'a mut CoverageCounters,
+ basic_coverage_blocks: &'a mut CoverageGraph,
+}
+
+impl<'a> BcbCounters<'a> {
+ fn new(
+ coverage_counters: &'a mut CoverageCounters,
+ basic_coverage_blocks: &'a mut CoverageGraph,
+ ) -> Self {
+ Self { coverage_counters, basic_coverage_blocks }
+ }
+
+ /// If two `BasicCoverageBlock`s branch from another `BasicCoverageBlock`, one of the branches
+ /// can be counted by `Expression` by subtracting the other branch from the branching
+ /// block. Otherwise, the `BasicCoverageBlock` executed the least should have the `Counter`.
+ /// One way to predict which branch executes the least is by considering loops. A loop is exited
+ /// at a branch, so the branch that jumps to a `BasicCoverageBlock` outside the loop is almost
+ /// always executed less than the branch that does not exit the loop.
+ ///
+ /// Returns any non-code-span expressions created to represent intermediate values (such as to
+ /// add two counters so the result can be subtracted from another counter), or an Error with
+ /// message for subsequent debugging.
+ fn make_bcb_counters(
+ &mut self,
+ coverage_spans: &[CoverageSpan],
+ ) -> Result<Vec<CoverageKind>, Error> {
+ debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock");
+ let num_bcbs = self.basic_coverage_blocks.num_nodes();
+ let mut collect_intermediate_expressions = Vec::with_capacity(num_bcbs);
+
+ let mut bcbs_with_coverage = BitSet::new_empty(num_bcbs);
+ for covspan in coverage_spans {
+ bcbs_with_coverage.insert(covspan.bcb);
+ }
+
+ // Walk the `CoverageGraph`. For each `BasicCoverageBlock` node with an associated
+ // `CoverageSpan`, add a counter. If the `BasicCoverageBlock` branches, add a counter or
+ // expression to each branch `BasicCoverageBlock` (if the branch BCB has only one incoming
+ // edge) or edge from the branching BCB to the branch BCB (if the branch BCB has multiple
+ // incoming edges).
+ //
+ // The `TraverseCoverageGraphWithLoops` traversal ensures that, when a loop is encountered,
+ // all `BasicCoverageBlock` nodes in the loop are visited before visiting any node outside
+ // the loop. The `traversal` state includes a `context_stack`, providing a way to know if
+ // the current BCB is in one or more nested loops or not.
+ let mut traversal = TraverseCoverageGraphWithLoops::new(&self.basic_coverage_blocks);
+ while let Some(bcb) = traversal.next(self.basic_coverage_blocks) {
+ if bcbs_with_coverage.contains(bcb) {
+ debug!("{:?} has at least one `CoverageSpan`. Get or make its counter", bcb);
+ let branching_counter_operand =
+ self.get_or_make_counter_operand(bcb, &mut collect_intermediate_expressions)?;
+
+ if self.bcb_needs_branch_counters(bcb) {
+ self.make_branch_counters(
+ &mut traversal,
+ bcb,
+ branching_counter_operand,
+ &mut collect_intermediate_expressions,
+ )?;
+ }
+ } else {
+ debug!(
+ "{:?} does not have any `CoverageSpan`s. A counter will only be added if \
+ and when a covered BCB has an expression dependency.",
+ bcb,
+ );
+ }
+ }
+
+ if traversal.is_complete() {
+ Ok(collect_intermediate_expressions)
+ } else {
+ Error::from_string(format!(
+ "`TraverseCoverageGraphWithLoops` missed some `BasicCoverageBlock`s: {:?}",
+ traversal.unvisited(),
+ ))
+ }
+ }
+
+ fn make_branch_counters(
+ &mut self,
+ traversal: &mut TraverseCoverageGraphWithLoops,
+ branching_bcb: BasicCoverageBlock,
+ branching_counter_operand: ExpressionOperandId,
+ collect_intermediate_expressions: &mut Vec<CoverageKind>,
+ ) -> Result<(), Error> {
+ let branches = self.bcb_branches(branching_bcb);
+ debug!(
+ "{:?} has some branch(es) without counters:\n {}",
+ branching_bcb,
+ branches
+ .iter()
+ .map(|branch| {
+ format!("{:?}: {:?}", branch, branch.counter(&self.basic_coverage_blocks))
+ })
+ .collect::<Vec<_>>()
+ .join("\n "),
+ );
+
+ // Use the `traversal` state to decide if a subset of the branches exit a loop, making it
+ // likely that branch is executed less than branches that do not exit the same loop. In this
+ // case, any branch that does not exit the loop (and has not already been assigned a
+ // counter) should be counted by expression, if possible. (If a preferred expression branch
+ // is not selected based on the loop context, select any branch without an existing
+ // counter.)
+ let expression_branch = self.choose_preferred_expression_branch(traversal, &branches);
+
+ // Assign a Counter or Expression to each branch, plus additional `Expression`s, as needed,
+ // to sum up intermediate results.
+ let mut some_sumup_counter_operand = None;
+ for branch in branches {
+ // Skip the selected `expression_branch`, if any. It's expression will be assigned after
+ // all others.
+ if branch != expression_branch {
+ let branch_counter_operand = if branch.is_only_path_to_target() {
+ debug!(
+ " {:?} has only one incoming edge (from {:?}), so adding a \
+ counter",
+ branch, branching_bcb
+ );
+ self.get_or_make_counter_operand(
+ branch.target_bcb,
+ collect_intermediate_expressions,
+ )?
+ } else {
+ debug!(" {:?} has multiple incoming edges, so adding an edge counter", branch);
+ self.get_or_make_edge_counter_operand(
+ branching_bcb,
+ branch.target_bcb,
+ collect_intermediate_expressions,
+ )?
+ };
+ if let Some(sumup_counter_operand) =
+ some_sumup_counter_operand.replace(branch_counter_operand)
+ {
+ let intermediate_expression = self.coverage_counters.make_expression(
+ branch_counter_operand,
+ Op::Add,
+ sumup_counter_operand,
+ || None,
+ );
+ debug!(
+ " [new intermediate expression: {}]",
+ self.format_counter(&intermediate_expression)
+ );
+ let intermediate_expression_operand = intermediate_expression.as_operand_id();
+ collect_intermediate_expressions.push(intermediate_expression);
+ some_sumup_counter_operand.replace(intermediate_expression_operand);
+ }
+ }
+ }
+
+ // Assign the final expression to the `expression_branch` by subtracting the total of all
+ // other branches from the counter of the branching BCB.
+ let sumup_counter_operand =
+ some_sumup_counter_operand.expect("sumup_counter_operand should have a value");
+ debug!(
+ "Making an expression for the selected expression_branch: {:?} \
+ (expression_branch predecessors: {:?})",
+ expression_branch,
+ self.bcb_predecessors(expression_branch.target_bcb),
+ );
+ let expression = self.coverage_counters.make_expression(
+ branching_counter_operand,
+ Op::Subtract,
+ sumup_counter_operand,
+ || Some(format!("{:?}", expression_branch)),
+ );
+ debug!("{:?} gets an expression: {}", expression_branch, self.format_counter(&expression));
+ let bcb = expression_branch.target_bcb;
+ if expression_branch.is_only_path_to_target() {
+ self.basic_coverage_blocks[bcb].set_counter(expression)?;
+ } else {
+ self.basic_coverage_blocks[bcb].set_edge_counter_from(branching_bcb, expression)?;
+ }
+ Ok(())
+ }
+
+ fn get_or_make_counter_operand(
+ &mut self,
+ bcb: BasicCoverageBlock,
+ collect_intermediate_expressions: &mut Vec<CoverageKind>,
+ ) -> Result<ExpressionOperandId, Error> {
+ self.recursive_get_or_make_counter_operand(bcb, collect_intermediate_expressions, 1)
+ }
+
+ fn recursive_get_or_make_counter_operand(
+ &mut self,
+ bcb: BasicCoverageBlock,
+ collect_intermediate_expressions: &mut Vec<CoverageKind>,
+ debug_indent_level: usize,
+ ) -> Result<ExpressionOperandId, Error> {
+ // If the BCB already has a counter, return it.
+ if let Some(counter_kind) = self.basic_coverage_blocks[bcb].counter() {
+ debug!(
+ "{}{:?} already has a counter: {}",
+ NESTED_INDENT.repeat(debug_indent_level),
+ bcb,
+ self.format_counter(counter_kind),
+ );
+ return Ok(counter_kind.as_operand_id());
+ }
+
+ // A BCB with only one incoming edge gets a simple `Counter` (via `make_counter()`).
+ // Also, a BCB that loops back to itself gets a simple `Counter`. This may indicate the
+ // program results in a tight infinite loop, but it should still compile.
+ let one_path_to_target = self.bcb_has_one_path_to_target(bcb);
+ if one_path_to_target || self.bcb_predecessors(bcb).contains(&bcb) {
+ let counter_kind = self.coverage_counters.make_counter(|| Some(format!("{:?}", bcb)));
+ if one_path_to_target {
+ debug!(
+ "{}{:?} gets a new counter: {}",
+ NESTED_INDENT.repeat(debug_indent_level),
+ bcb,
+ self.format_counter(&counter_kind),
+ );
+ } else {
+ debug!(
+ "{}{:?} has itself as its own predecessor. It can't be part of its own \
+ Expression sum, so it will get its own new counter: {}. (Note, the compiled \
+ code will generate an infinite loop.)",
+ NESTED_INDENT.repeat(debug_indent_level),
+ bcb,
+ self.format_counter(&counter_kind),
+ );
+ }
+ return self.basic_coverage_blocks[bcb].set_counter(counter_kind);
+ }
+
+ // A BCB with multiple incoming edges can compute its count by `Expression`, summing up the
+ // counters and/or expressions of its incoming edges. This will recursively get or create
+ // counters for those incoming edges first, then call `make_expression()` to sum them up,
+ // with additional intermediate expressions as needed.
+ let mut predecessors = self.bcb_predecessors(bcb).to_owned().into_iter();
+ debug!(
+ "{}{:?} has multiple incoming edges and will get an expression that sums them up...",
+ NESTED_INDENT.repeat(debug_indent_level),
+ bcb,
+ );
+ let first_edge_counter_operand = self.recursive_get_or_make_edge_counter_operand(
+ predecessors.next().unwrap(),
+ bcb,
+ collect_intermediate_expressions,
+ debug_indent_level + 1,
+ )?;
+ let mut some_sumup_edge_counter_operand = None;
+ for predecessor in predecessors {
+ let edge_counter_operand = self.recursive_get_or_make_edge_counter_operand(
+ predecessor,
+ bcb,
+ collect_intermediate_expressions,
+ debug_indent_level + 1,
+ )?;
+ if let Some(sumup_edge_counter_operand) =
+ some_sumup_edge_counter_operand.replace(edge_counter_operand)
+ {
+ let intermediate_expression = self.coverage_counters.make_expression(
+ sumup_edge_counter_operand,
+ Op::Add,
+ edge_counter_operand,
+ || None,
+ );
+ debug!(
+ "{}new intermediate expression: {}",
+ NESTED_INDENT.repeat(debug_indent_level),
+ self.format_counter(&intermediate_expression)
+ );
+ let intermediate_expression_operand = intermediate_expression.as_operand_id();
+ collect_intermediate_expressions.push(intermediate_expression);
+ some_sumup_edge_counter_operand.replace(intermediate_expression_operand);
+ }
+ }
+ let counter_kind = self.coverage_counters.make_expression(
+ first_edge_counter_operand,
+ Op::Add,
+ some_sumup_edge_counter_operand.unwrap(),
+ || Some(format!("{:?}", bcb)),
+ );
+ debug!(
+ "{}{:?} gets a new counter (sum of predecessor counters): {}",
+ NESTED_INDENT.repeat(debug_indent_level),
+ bcb,
+ self.format_counter(&counter_kind)
+ );
+ self.basic_coverage_blocks[bcb].set_counter(counter_kind)
+ }
+
+ fn get_or_make_edge_counter_operand(
+ &mut self,
+ from_bcb: BasicCoverageBlock,
+ to_bcb: BasicCoverageBlock,
+ collect_intermediate_expressions: &mut Vec<CoverageKind>,
+ ) -> Result<ExpressionOperandId, Error> {
+ self.recursive_get_or_make_edge_counter_operand(
+ from_bcb,
+ to_bcb,
+ collect_intermediate_expressions,
+ 1,
+ )
+ }
+
+ fn recursive_get_or_make_edge_counter_operand(
+ &mut self,
+ from_bcb: BasicCoverageBlock,
+ to_bcb: BasicCoverageBlock,
+ collect_intermediate_expressions: &mut Vec<CoverageKind>,
+ debug_indent_level: usize,
+ ) -> Result<ExpressionOperandId, Error> {
+ // If the source BCB has only one successor (assumed to be the given target), an edge
+ // counter is unnecessary. Just get or make a counter for the source BCB.
+ let successors = self.bcb_successors(from_bcb).iter();
+ if successors.len() == 1 {
+ return self.recursive_get_or_make_counter_operand(
+ from_bcb,
+ collect_intermediate_expressions,
+ debug_indent_level + 1,
+ );
+ }
+
+ // If the edge already has a counter, return it.
+ if let Some(counter_kind) = self.basic_coverage_blocks[to_bcb].edge_counter_from(from_bcb) {
+ debug!(
+ "{}Edge {:?}->{:?} already has a counter: {}",
+ NESTED_INDENT.repeat(debug_indent_level),
+ from_bcb,
+ to_bcb,
+ self.format_counter(counter_kind)
+ );
+ return Ok(counter_kind.as_operand_id());
+ }
+
+ // Make a new counter to count this edge.
+ let counter_kind =
+ self.coverage_counters.make_counter(|| Some(format!("{:?}->{:?}", from_bcb, to_bcb)));
+ debug!(
+ "{}Edge {:?}->{:?} gets a new counter: {}",
+ NESTED_INDENT.repeat(debug_indent_level),
+ from_bcb,
+ to_bcb,
+ self.format_counter(&counter_kind)
+ );
+ self.basic_coverage_blocks[to_bcb].set_edge_counter_from(from_bcb, counter_kind)
+ }
+
+ /// Select a branch for the expression, either the recommended `reloop_branch`, or if none was
+ /// found, select any branch.
+ fn choose_preferred_expression_branch(
+ &self,
+ traversal: &TraverseCoverageGraphWithLoops,
+ branches: &[BcbBranch],
+ ) -> BcbBranch {
+ let branch_needs_a_counter =
+ |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+
+ let some_reloop_branch = self.find_some_reloop_branch(traversal, &branches);
+ if let Some(reloop_branch_without_counter) =
+ some_reloop_branch.filter(branch_needs_a_counter)
+ {
+ debug!(
+ "Selecting reloop_branch={:?} that still needs a counter, to get the \
+ `Expression`",
+ reloop_branch_without_counter
+ );
+ reloop_branch_without_counter
+ } else {
+ let &branch_without_counter = branches
+ .iter()
+ .find(|&&branch| branch.counter(&self.basic_coverage_blocks).is_none())
+ .expect(
+ "needs_branch_counters was `true` so there should be at least one \
+ branch",
+ );
+ debug!(
+ "Selecting any branch={:?} that still needs a counter, to get the \
+ `Expression` because there was no `reloop_branch`, or it already had a \
+ counter",
+ branch_without_counter
+ );
+ branch_without_counter
+ }
+ }
+
+ /// At most, one of the branches (or its edge, from the branching_bcb, if the branch has
+ /// multiple incoming edges) can have a counter computed by expression.
+ ///
+ /// If at least one of the branches leads outside of a loop (`found_loop_exit` is
+ /// true), and at least one other branch does not exit the loop (the first of which
+ /// is captured in `some_reloop_branch`), it's likely any reloop branch will be
+ /// executed far more often than loop exit branch, making the reloop branch a better
+ /// candidate for an expression.
+ fn find_some_reloop_branch(
+ &self,
+ traversal: &TraverseCoverageGraphWithLoops,
+ branches: &[BcbBranch],
+ ) -> Option<BcbBranch> {
+ let branch_needs_a_counter =
+ |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+
+ let mut some_reloop_branch: Option<BcbBranch> = None;
+ for context in traversal.context_stack.iter().rev() {
+ if let Some((backedge_from_bcbs, _)) = &context.loop_backedges {
+ let mut found_loop_exit = false;
+ for &branch in branches.iter() {
+ if backedge_from_bcbs.iter().any(|&backedge_from_bcb| {
+ self.bcb_is_dominated_by(backedge_from_bcb, branch.target_bcb)
+ }) {
+ if let Some(reloop_branch) = some_reloop_branch {
+ if reloop_branch.counter(&self.basic_coverage_blocks).is_none() {
+ // we already found a candidate reloop_branch that still
+ // needs a counter
+ continue;
+ }
+ }
+ // The path from branch leads back to the top of the loop. Set this
+ // branch as the `reloop_branch`. If this branch already has a
+ // counter, and we find another reloop branch that doesn't have a
+ // counter yet, that branch will be selected as the `reloop_branch`
+ // instead.
+ some_reloop_branch = Some(branch);
+ } else {
+ // The path from branch leads outside this loop
+ found_loop_exit = true;
+ }
+ if found_loop_exit
+ && some_reloop_branch.filter(branch_needs_a_counter).is_some()
+ {
+ // Found both a branch that exits the loop and a branch that returns
+ // to the top of the loop (`reloop_branch`), and the `reloop_branch`
+ // doesn't already have a counter.
+ break;
+ }
+ }
+ if !found_loop_exit {
+ debug!(
+ "No branches exit the loop, so any branch without an existing \
+ counter can have the `Expression`."
+ );
+ break;
+ }
+ if some_reloop_branch.is_some() {
+ debug!(
+ "Found a branch that exits the loop and a branch the loops back to \
+ the top of the loop (`reloop_branch`). The `reloop_branch` will \
+ get the `Expression`, as long as it still needs a counter."
+ );
+ break;
+ }
+ // else all branches exited this loop context, so run the same checks with
+ // the outer loop(s)
+ }
+ }
+ some_reloop_branch
+ }
+
+ #[inline]
+ fn bcb_predecessors(&self, bcb: BasicCoverageBlock) -> &[BasicCoverageBlock] {
+ &self.basic_coverage_blocks.predecessors[bcb]
+ }
+
+ #[inline]
+ fn bcb_successors(&self, bcb: BasicCoverageBlock) -> &[BasicCoverageBlock] {
+ &self.basic_coverage_blocks.successors[bcb]
+ }
+
+ #[inline]
+ fn bcb_branches(&self, from_bcb: BasicCoverageBlock) -> Vec<BcbBranch> {
+ self.bcb_successors(from_bcb)
+ .iter()
+ .map(|&to_bcb| BcbBranch::from_to(from_bcb, to_bcb, &self.basic_coverage_blocks))
+ .collect::<Vec<_>>()
+ }
+
+ fn bcb_needs_branch_counters(&self, bcb: BasicCoverageBlock) -> bool {
+ let branch_needs_a_counter =
+ |branch: &BcbBranch| branch.counter(&self.basic_coverage_blocks).is_none();
+ let branches = self.bcb_branches(bcb);
+ branches.len() > 1 && branches.iter().any(branch_needs_a_counter)
+ }
+
+ /// Returns true if the BasicCoverageBlock has zero or one incoming edge. (If zero, it should be
+ /// the entry point for the function.)
+ #[inline]
+ fn bcb_has_one_path_to_target(&self, bcb: BasicCoverageBlock) -> bool {
+ self.bcb_predecessors(bcb).len() <= 1
+ }
+
+ #[inline]
+ fn bcb_is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
+ self.basic_coverage_blocks.is_dominated_by(node, dom)
+ }
+
+ #[inline]
+ fn format_counter(&self, counter_kind: &CoverageKind) -> String {
+ self.coverage_counters.debug_counters.format_counter(counter_kind)
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/debug.rs b/compiler/rustc_mir_transform/src/coverage/debug.rs
new file mode 100644
index 000000000..0f8679b0b
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/debug.rs
@@ -0,0 +1,831 @@
+//! The `InstrumentCoverage` MIR pass implementation includes debugging tools and options
+//! to help developers understand and/or improve the analysis and instrumentation of a MIR.
+//!
+//! To enable coverage, include the rustc command line option:
+//!
+//! * `-C instrument-coverage`
+//!
+//! MIR Dump Files, with additional `CoverageGraph` graphviz and `CoverageSpan` spanview
+//! ------------------------------------------------------------------------------------
+//!
+//! Additional debugging options include:
+//!
+//! * `-Z dump-mir=InstrumentCoverage` - Generate `.mir` files showing the state of the MIR,
+//! before and after the `InstrumentCoverage` pass, for each compiled function.
+//!
+//! * `-Z dump-mir-graphviz` - If `-Z dump-mir` is also enabled for the current MIR node path,
+//! each MIR dump is accompanied by a before-and-after graphical view of the MIR, in Graphviz
+//! `.dot` file format (which can be visually rendered as a graph using any of a number of free
+//! Graphviz viewers and IDE extensions).
+//!
+//! For the `InstrumentCoverage` pass, this option also enables generation of an additional
+//! Graphviz `.dot` file for each function, rendering the `CoverageGraph`: the control flow
+//! graph (CFG) of `BasicCoverageBlocks` (BCBs), as nodes, internally labeled to show the
+//! `CoverageSpan`-based MIR elements each BCB represents (`BasicBlock`s, `Statement`s and
+//! `Terminator`s), assigned coverage counters and/or expressions, and edge counters, as needed.
+//!
+//! (Note the additional option, `-Z graphviz-dark-mode`, can be added, to change the rendered
+//! output from its default black-on-white background to a dark color theme, if desired.)
+//!
+//! * `-Z dump-mir-spanview` - If `-Z dump-mir` is also enabled for the current MIR node path,
+//! each MIR dump is accompanied by a before-and-after `.html` document showing the function's
+//! original source code, highlighted by it's MIR spans, at the `statement`-level (by default),
+//! `terminator` only, or encompassing span for the `Terminator` plus all `Statement`s, in each
+//! `block` (`BasicBlock`).
+//!
+//! For the `InstrumentCoverage` pass, this option also enables generation of an additional
+//! spanview `.html` file for each function, showing the aggregated `CoverageSpan`s that will
+//! require counters (or counter expressions) for accurate coverage analysis.
+//!
+//! Debug Logging
+//! -------------
+//!
+//! The `InstrumentCoverage` pass includes debug logging messages at various phases and decision
+//! points, which can be enabled via environment variable:
+//!
+//! ```shell
+//! RUSTC_LOG=rustc_mir_transform::transform::coverage=debug
+//! ```
+//!
+//! Other module paths with coverage-related debug logs may also be of interest, particularly for
+//! debugging the coverage map data, injected as global variables in the LLVM IR (during rustc's
+//! code generation pass). For example:
+//!
+//! ```shell
+//! RUSTC_LOG=rustc_mir_transform::transform::coverage,rustc_codegen_ssa::coverageinfo,rustc_codegen_llvm::coverageinfo=debug
+//! ```
+//!
+//! Coverage Debug Options
+//! ---------------------------------
+//!
+//! Additional debugging options can be enabled using the environment variable:
+//!
+//! ```shell
+//! RUSTC_COVERAGE_DEBUG_OPTIONS=<options>
+//! ```
+//!
+//! These options are comma-separated, and specified in the format `option-name=value`. For example:
+//!
+//! ```shell
+//! $ RUSTC_COVERAGE_DEBUG_OPTIONS=counter-format=id+operation,allow-unused-expressions=yes cargo build
+//! ```
+//!
+//! Coverage debug options include:
+//!
+//! * `allow-unused-expressions=yes` or `no` (default: `no`)
+//!
+//! The `InstrumentCoverage` algorithms _should_ only create and assign expressions to a
+//! `BasicCoverageBlock`, or an incoming edge, if that expression is either (a) required to
+//! count a `CoverageSpan`, or (b) a dependency of some other required counter expression.
+//!
+//! If an expression is generated that does not map to a `CoverageSpan` or dependency, this
+//! probably indicates there was a bug in the algorithm that creates and assigns counters
+//! and expressions.
+//!
+//! When this kind of bug is encountered, the rustc compiler will panic by default. Setting:
+//! `allow-unused-expressions=yes` will log a warning message instead of panicking (effectively
+//! ignoring the unused expressions), which may be helpful when debugging the root cause of
+//! the problem.
+//!
+//! * `counter-format=<choices>`, where `<choices>` can be any plus-separated combination of `id`,
+//! `block`, and/or `operation` (default: `block+operation`)
+//!
+//! This option effects both the `CoverageGraph` (graphviz `.dot` files) and debug logging, when
+//! generating labels for counters and expressions.
+//!
+//! Depending on the values and combinations, counters can be labeled by:
+//!
+//! * `id` - counter or expression ID (ascending counter IDs, starting at 1, or descending
+//! expression IDs, starting at `u32:MAX`)
+//! * `block` - the `BasicCoverageBlock` label (for example, `bcb0`) or edge label (for
+//! example `bcb0->bcb1`), for counters or expressions assigned to count a
+//! `BasicCoverageBlock` or edge. Intermediate expressions (not directly associated with
+//! a BCB or edge) will be labeled by their expression ID, unless `operation` is also
+//! specified.
+//! * `operation` - applied to expressions only, labels include the left-hand-side counter
+//! or expression label (lhs operand), the operator (`+` or `-`), and the right-hand-side
+//! counter or expression (rhs operand). Expression operand labels are generated
+//! recursively, generating labels with nested operations, enclosed in parentheses
+//! (for example: `bcb2 + (bcb0 - bcb1)`).
+
+use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+use super::spans::CoverageSpan;
+
+use itertools::Itertools;
+use rustc_middle::mir::create_dump_file;
+use rustc_middle::mir::generic_graphviz::GraphvizWriter;
+use rustc_middle::mir::spanview::{self, SpanViewable};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::{self, BasicBlock, TerminatorKind};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::Span;
+
+use std::iter;
+use std::ops::Deref;
+use std::sync::OnceLock;
+
+pub const NESTED_INDENT: &str = " ";
+
+const RUSTC_COVERAGE_DEBUG_OPTIONS: &str = "RUSTC_COVERAGE_DEBUG_OPTIONS";
+
+pub(super) fn debug_options<'a>() -> &'a DebugOptions {
+ static DEBUG_OPTIONS: OnceLock<DebugOptions> = OnceLock::new();
+
+ &DEBUG_OPTIONS.get_or_init(DebugOptions::from_env)
+}
+
+/// Parses and maintains coverage-specific debug options captured from the environment variable
+/// "RUSTC_COVERAGE_DEBUG_OPTIONS", if set.
+#[derive(Debug, Clone)]
+pub(super) struct DebugOptions {
+ pub allow_unused_expressions: bool,
+ counter_format: ExpressionFormat,
+}
+
+impl DebugOptions {
+ fn from_env() -> Self {
+ let mut allow_unused_expressions = true;
+ let mut counter_format = ExpressionFormat::default();
+
+ if let Ok(env_debug_options) = std::env::var(RUSTC_COVERAGE_DEBUG_OPTIONS) {
+ for setting_str in env_debug_options.replace(' ', "").replace('-', "_").split(',') {
+ let (option, value) = match setting_str.split_once('=') {
+ None => (setting_str, None),
+ Some((k, v)) => (k, Some(v)),
+ };
+ match option {
+ "allow_unused_expressions" => {
+ allow_unused_expressions = bool_option_val(option, value);
+ debug!(
+ "{} env option `allow_unused_expressions` is set to {}",
+ RUSTC_COVERAGE_DEBUG_OPTIONS, allow_unused_expressions
+ );
+ }
+ "counter_format" => {
+ match value {
+ None => {
+ bug!(
+ "`{}` option in environment variable {} requires one or more \
+ plus-separated choices (a non-empty subset of \
+ `id+block+operation`)",
+ option,
+ RUSTC_COVERAGE_DEBUG_OPTIONS
+ );
+ }
+ Some(val) => {
+ counter_format = counter_format_option_val(val);
+ debug!(
+ "{} env option `counter_format` is set to {:?}",
+ RUSTC_COVERAGE_DEBUG_OPTIONS, counter_format
+ );
+ }
+ };
+ }
+ _ => bug!(
+ "Unsupported setting `{}` in environment variable {}",
+ option,
+ RUSTC_COVERAGE_DEBUG_OPTIONS
+ ),
+ };
+ }
+ }
+
+ Self { allow_unused_expressions, counter_format }
+ }
+}
+
+fn bool_option_val(option: &str, some_strval: Option<&str>) -> bool {
+ if let Some(val) = some_strval {
+ if vec!["yes", "y", "on", "true"].contains(&val) {
+ true
+ } else if vec!["no", "n", "off", "false"].contains(&val) {
+ false
+ } else {
+ bug!(
+ "Unsupported value `{}` for option `{}` in environment variable {}",
+ option,
+ val,
+ RUSTC_COVERAGE_DEBUG_OPTIONS
+ )
+ }
+ } else {
+ true
+ }
+}
+
+fn counter_format_option_val(strval: &str) -> ExpressionFormat {
+ let mut counter_format = ExpressionFormat { id: false, block: false, operation: false };
+ let components = strval.splitn(3, '+');
+ for component in components {
+ match component {
+ "id" => counter_format.id = true,
+ "block" => counter_format.block = true,
+ "operation" => counter_format.operation = true,
+ _ => bug!(
+ "Unsupported counter_format choice `{}` in environment variable {}",
+ component,
+ RUSTC_COVERAGE_DEBUG_OPTIONS
+ ),
+ }
+ }
+ counter_format
+}
+
+#[derive(Debug, Clone)]
+struct ExpressionFormat {
+ id: bool,
+ block: bool,
+ operation: bool,
+}
+
+impl Default for ExpressionFormat {
+ fn default() -> Self {
+ Self { id: false, block: true, operation: true }
+ }
+}
+
+/// If enabled, this struct maintains a map from `CoverageKind` IDs (as `ExpressionOperandId`) to
+/// the `CoverageKind` data and optional label (normally, the counter's associated
+/// `BasicCoverageBlock` format string, if any).
+///
+/// Use `format_counter` to convert one of these `CoverageKind` counters to a debug output string,
+/// as directed by the `DebugOptions`. This allows the format of counter labels in logs and dump
+/// files (including the `CoverageGraph` graphviz file) to be changed at runtime, via environment
+/// variable.
+///
+/// `DebugCounters` supports a recursive rendering of `Expression` counters, so they can be
+/// presented as nested expressions such as `(bcb3 - (bcb0 + bcb1))`.
+pub(super) struct DebugCounters {
+ some_counters: Option<FxHashMap<ExpressionOperandId, DebugCounter>>,
+}
+
+impl DebugCounters {
+ pub fn new() -> Self {
+ Self { some_counters: None }
+ }
+
+ pub fn enable(&mut self) {
+ debug_assert!(!self.is_enabled());
+ self.some_counters.replace(FxHashMap::default());
+ }
+
+ pub fn is_enabled(&self) -> bool {
+ self.some_counters.is_some()
+ }
+
+ pub fn add_counter(&mut self, counter_kind: &CoverageKind, some_block_label: Option<String>) {
+ if let Some(counters) = &mut self.some_counters {
+ let id: ExpressionOperandId = match *counter_kind {
+ CoverageKind::Counter { id, .. } => id.into(),
+ CoverageKind::Expression { id, .. } => id.into(),
+ _ => bug!(
+ "the given `CoverageKind` is not an counter or expression: {:?}",
+ counter_kind
+ ),
+ };
+ counters
+ .try_insert(id, DebugCounter::new(counter_kind.clone(), some_block_label))
+ .expect("attempt to add the same counter_kind to DebugCounters more than once");
+ }
+ }
+
+ pub fn some_block_label(&self, operand: ExpressionOperandId) -> Option<&String> {
+ self.some_counters.as_ref().map_or(None, |counters| {
+ counters
+ .get(&operand)
+ .map_or(None, |debug_counter| debug_counter.some_block_label.as_ref())
+ })
+ }
+
+ pub fn format_counter(&self, counter_kind: &CoverageKind) -> String {
+ match *counter_kind {
+ CoverageKind::Counter { .. } => {
+ format!("Counter({})", self.format_counter_kind(counter_kind))
+ }
+ CoverageKind::Expression { .. } => {
+ format!("Expression({})", self.format_counter_kind(counter_kind))
+ }
+ CoverageKind::Unreachable { .. } => "Unreachable".to_owned(),
+ }
+ }
+
+ fn format_counter_kind(&self, counter_kind: &CoverageKind) -> String {
+ let counter_format = &debug_options().counter_format;
+ if let CoverageKind::Expression { id, lhs, op, rhs } = *counter_kind {
+ if counter_format.operation {
+ return format!(
+ "{}{} {} {}",
+ if counter_format.id || self.some_counters.is_none() {
+ format!("#{} = ", id.index())
+ } else {
+ String::new()
+ },
+ self.format_operand(lhs),
+ if op == Op::Add { "+" } else { "-" },
+ self.format_operand(rhs),
+ );
+ }
+ }
+
+ let id: ExpressionOperandId = match *counter_kind {
+ CoverageKind::Counter { id, .. } => id.into(),
+ CoverageKind::Expression { id, .. } => id.into(),
+ _ => {
+ bug!("the given `CoverageKind` is not an counter or expression: {:?}", counter_kind)
+ }
+ };
+ if self.some_counters.is_some() && (counter_format.block || !counter_format.id) {
+ let counters = self.some_counters.as_ref().unwrap();
+ if let Some(DebugCounter { some_block_label: Some(block_label), .. }) =
+ counters.get(&id)
+ {
+ return if counter_format.id {
+ format!("{}#{}", block_label, id.index())
+ } else {
+ block_label.to_string()
+ };
+ }
+ }
+ format!("#{}", id.index())
+ }
+
+ fn format_operand(&self, operand: ExpressionOperandId) -> String {
+ if operand.index() == 0 {
+ return String::from("0");
+ }
+ if let Some(counters) = &self.some_counters {
+ if let Some(DebugCounter { counter_kind, some_block_label }) = counters.get(&operand) {
+ if let CoverageKind::Expression { .. } = counter_kind {
+ if let Some(label) = some_block_label && debug_options().counter_format.block {
+ return format!(
+ "{}:({})",
+ label,
+ self.format_counter_kind(counter_kind)
+ );
+ }
+ return format!("({})", self.format_counter_kind(counter_kind));
+ }
+ return self.format_counter_kind(counter_kind);
+ }
+ }
+ format!("#{}", operand.index())
+ }
+}
+
+/// A non-public support class to `DebugCounters`.
+#[derive(Debug)]
+struct DebugCounter {
+ counter_kind: CoverageKind,
+ some_block_label: Option<String>,
+}
+
+impl DebugCounter {
+ fn new(counter_kind: CoverageKind, some_block_label: Option<String>) -> Self {
+ Self { counter_kind, some_block_label }
+ }
+}
+
+/// If enabled, this data structure captures additional debugging information used when generating
+/// a Graphviz (.dot file) representation of the `CoverageGraph`, for debugging purposes.
+pub(super) struct GraphvizData {
+ some_bcb_to_coverage_spans_with_counters:
+ Option<FxHashMap<BasicCoverageBlock, Vec<(CoverageSpan, CoverageKind)>>>,
+ some_bcb_to_dependency_counters: Option<FxHashMap<BasicCoverageBlock, Vec<CoverageKind>>>,
+ some_edge_to_counter: Option<FxHashMap<(BasicCoverageBlock, BasicBlock), CoverageKind>>,
+}
+
+impl GraphvizData {
+ pub fn new() -> Self {
+ Self {
+ some_bcb_to_coverage_spans_with_counters: None,
+ some_bcb_to_dependency_counters: None,
+ some_edge_to_counter: None,
+ }
+ }
+
+ pub fn enable(&mut self) {
+ debug_assert!(!self.is_enabled());
+ self.some_bcb_to_coverage_spans_with_counters = Some(FxHashMap::default());
+ self.some_bcb_to_dependency_counters = Some(FxHashMap::default());
+ self.some_edge_to_counter = Some(FxHashMap::default());
+ }
+
+ pub fn is_enabled(&self) -> bool {
+ self.some_bcb_to_coverage_spans_with_counters.is_some()
+ }
+
+ pub fn add_bcb_coverage_span_with_counter(
+ &mut self,
+ bcb: BasicCoverageBlock,
+ coverage_span: &CoverageSpan,
+ counter_kind: &CoverageKind,
+ ) {
+ if let Some(bcb_to_coverage_spans_with_counters) =
+ self.some_bcb_to_coverage_spans_with_counters.as_mut()
+ {
+ bcb_to_coverage_spans_with_counters
+ .entry(bcb)
+ .or_insert_with(Vec::new)
+ .push((coverage_span.clone(), counter_kind.clone()));
+ }
+ }
+
+ pub fn get_bcb_coverage_spans_with_counters(
+ &self,
+ bcb: BasicCoverageBlock,
+ ) -> Option<&[(CoverageSpan, CoverageKind)]> {
+ if let Some(bcb_to_coverage_spans_with_counters) =
+ self.some_bcb_to_coverage_spans_with_counters.as_ref()
+ {
+ bcb_to_coverage_spans_with_counters.get(&bcb).map(Deref::deref)
+ } else {
+ None
+ }
+ }
+
+ pub fn add_bcb_dependency_counter(
+ &mut self,
+ bcb: BasicCoverageBlock,
+ counter_kind: &CoverageKind,
+ ) {
+ if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_mut() {
+ bcb_to_dependency_counters
+ .entry(bcb)
+ .or_insert_with(Vec::new)
+ .push(counter_kind.clone());
+ }
+ }
+
+ pub fn get_bcb_dependency_counters(&self, bcb: BasicCoverageBlock) -> Option<&[CoverageKind]> {
+ if let Some(bcb_to_dependency_counters) = self.some_bcb_to_dependency_counters.as_ref() {
+ bcb_to_dependency_counters.get(&bcb).map(Deref::deref)
+ } else {
+ None
+ }
+ }
+
+ pub fn set_edge_counter(
+ &mut self,
+ from_bcb: BasicCoverageBlock,
+ to_bb: BasicBlock,
+ counter_kind: &CoverageKind,
+ ) {
+ if let Some(edge_to_counter) = self.some_edge_to_counter.as_mut() {
+ edge_to_counter
+ .try_insert((from_bcb, to_bb), counter_kind.clone())
+ .expect("invalid attempt to insert more than one edge counter for the same edge");
+ }
+ }
+
+ pub fn get_edge_counter(
+ &self,
+ from_bcb: BasicCoverageBlock,
+ to_bb: BasicBlock,
+ ) -> Option<&CoverageKind> {
+ if let Some(edge_to_counter) = self.some_edge_to_counter.as_ref() {
+ edge_to_counter.get(&(from_bcb, to_bb))
+ } else {
+ None
+ }
+ }
+}
+
+/// If enabled, this struct captures additional data used to track whether expressions were used,
+/// directly or indirectly, to compute the coverage counts for all `CoverageSpan`s, and any that are
+/// _not_ used are retained in the `unused_expressions` Vec, to be included in debug output (logs
+/// and/or a `CoverageGraph` graphviz output).
+pub(super) struct UsedExpressions {
+ some_used_expression_operands:
+ Option<FxHashMap<ExpressionOperandId, Vec<InjectedExpressionId>>>,
+ some_unused_expressions:
+ Option<Vec<(CoverageKind, Option<BasicCoverageBlock>, BasicCoverageBlock)>>,
+}
+
+impl UsedExpressions {
+ pub fn new() -> Self {
+ Self { some_used_expression_operands: None, some_unused_expressions: None }
+ }
+
+ pub fn enable(&mut self) {
+ debug_assert!(!self.is_enabled());
+ self.some_used_expression_operands = Some(FxHashMap::default());
+ self.some_unused_expressions = Some(Vec::new());
+ }
+
+ pub fn is_enabled(&self) -> bool {
+ self.some_used_expression_operands.is_some()
+ }
+
+ pub fn add_expression_operands(&mut self, expression: &CoverageKind) {
+ if let Some(used_expression_operands) = self.some_used_expression_operands.as_mut() {
+ if let CoverageKind::Expression { id, lhs, rhs, .. } = *expression {
+ used_expression_operands.entry(lhs).or_insert_with(Vec::new).push(id);
+ used_expression_operands.entry(rhs).or_insert_with(Vec::new).push(id);
+ }
+ }
+ }
+
+ pub fn expression_is_used(&self, expression: &CoverageKind) -> bool {
+ if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() {
+ used_expression_operands.contains_key(&expression.as_operand_id())
+ } else {
+ false
+ }
+ }
+
+ pub fn add_unused_expression_if_not_found(
+ &mut self,
+ expression: &CoverageKind,
+ edge_from_bcb: Option<BasicCoverageBlock>,
+ target_bcb: BasicCoverageBlock,
+ ) {
+ if let Some(used_expression_operands) = self.some_used_expression_operands.as_ref() {
+ if !used_expression_operands.contains_key(&expression.as_operand_id()) {
+ self.some_unused_expressions.as_mut().unwrap().push((
+ expression.clone(),
+ edge_from_bcb,
+ target_bcb,
+ ));
+ }
+ }
+ }
+
+ /// Return the list of unused counters (if any) as a tuple with the counter (`CoverageKind`),
+ /// optional `from_bcb` (if it was an edge counter), and `target_bcb`.
+ pub fn get_unused_expressions(
+ &self,
+ ) -> Vec<(CoverageKind, Option<BasicCoverageBlock>, BasicCoverageBlock)> {
+ if let Some(unused_expressions) = self.some_unused_expressions.as_ref() {
+ unused_expressions.clone()
+ } else {
+ Vec::new()
+ }
+ }
+
+ /// If enabled, validate that every BCB or edge counter not directly associated with a coverage
+ /// span is at least indirectly associated (it is a dependency of a BCB counter that _is_
+ /// associated with a coverage span).
+ pub fn validate(
+ &mut self,
+ bcb_counters_without_direct_coverage_spans: &[(
+ Option<BasicCoverageBlock>,
+ BasicCoverageBlock,
+ CoverageKind,
+ )],
+ ) {
+ if self.is_enabled() {
+ let mut not_validated = bcb_counters_without_direct_coverage_spans
+ .iter()
+ .map(|(_, _, counter_kind)| counter_kind)
+ .collect::<Vec<_>>();
+ let mut validating_count = 0;
+ while not_validated.len() != validating_count {
+ let to_validate = not_validated.split_off(0);
+ validating_count = to_validate.len();
+ for counter_kind in to_validate {
+ if self.expression_is_used(counter_kind) {
+ self.add_expression_operands(counter_kind);
+ } else {
+ not_validated.push(counter_kind);
+ }
+ }
+ }
+ }
+ }
+
+ pub fn alert_on_unused_expressions(&self, debug_counters: &DebugCounters) {
+ if let Some(unused_expressions) = self.some_unused_expressions.as_ref() {
+ for (counter_kind, edge_from_bcb, target_bcb) in unused_expressions {
+ let unused_counter_message = if let Some(from_bcb) = edge_from_bcb.as_ref() {
+ format!(
+ "non-coverage edge counter found without a dependent expression, in \
+ {:?}->{:?}; counter={}",
+ from_bcb,
+ target_bcb,
+ debug_counters.format_counter(&counter_kind),
+ )
+ } else {
+ format!(
+ "non-coverage counter found without a dependent expression, in {:?}; \
+ counter={}",
+ target_bcb,
+ debug_counters.format_counter(&counter_kind),
+ )
+ };
+
+ if debug_options().allow_unused_expressions {
+ debug!("WARNING: {}", unused_counter_message);
+ } else {
+ bug!("{}", unused_counter_message);
+ }
+ }
+ }
+ }
+}
+
+/// Generates the MIR pass `CoverageSpan`-specific spanview dump file.
+pub(super) fn dump_coverage_spanview<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mir_body: &mir::Body<'tcx>,
+ basic_coverage_blocks: &CoverageGraph,
+ pass_name: &str,
+ body_span: Span,
+ coverage_spans: &[CoverageSpan],
+) {
+ let mir_source = mir_body.source;
+ let def_id = mir_source.def_id();
+
+ let span_viewables = span_viewables(tcx, mir_body, basic_coverage_blocks, &coverage_spans);
+ let mut file = create_dump_file(tcx, "html", None, pass_name, &0, mir_source)
+ .expect("Unexpected error creating MIR spanview HTML file");
+ let crate_name = tcx.crate_name(def_id.krate);
+ let item_name = tcx.def_path(def_id).to_filename_friendly_no_crate();
+ let title = format!("{}.{} - Coverage Spans", crate_name, item_name);
+ spanview::write_document(tcx, body_span, span_viewables, &title, &mut file)
+ .expect("Unexpected IO error dumping coverage spans as HTML");
+}
+
+/// Converts the computed `BasicCoverageBlockData`s into `SpanViewable`s.
+fn span_viewables<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mir_body: &mir::Body<'tcx>,
+ basic_coverage_blocks: &CoverageGraph,
+ coverage_spans: &[CoverageSpan],
+) -> Vec<SpanViewable> {
+ let mut span_viewables = Vec::new();
+ for coverage_span in coverage_spans {
+ let tooltip = coverage_span.format_coverage_statements(tcx, mir_body);
+ let CoverageSpan { span, bcb, .. } = coverage_span;
+ let bcb_data = &basic_coverage_blocks[*bcb];
+ let id = bcb_data.id();
+ let leader_bb = bcb_data.leader_bb();
+ span_viewables.push(SpanViewable { bb: leader_bb, span: *span, id, tooltip });
+ }
+ span_viewables
+}
+
+/// Generates the MIR pass coverage-specific graphviz dump file.
+pub(super) fn dump_coverage_graphviz<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mir_body: &mir::Body<'tcx>,
+ pass_name: &str,
+ basic_coverage_blocks: &CoverageGraph,
+ debug_counters: &DebugCounters,
+ graphviz_data: &GraphvizData,
+ intermediate_expressions: &[CoverageKind],
+ debug_used_expressions: &UsedExpressions,
+) {
+ let mir_source = mir_body.source;
+ let def_id = mir_source.def_id();
+ let node_content = |bcb| {
+ bcb_to_string_sections(
+ tcx,
+ mir_body,
+ debug_counters,
+ &basic_coverage_blocks[bcb],
+ graphviz_data.get_bcb_coverage_spans_with_counters(bcb),
+ graphviz_data.get_bcb_dependency_counters(bcb),
+ // intermediate_expressions are injected into the mir::START_BLOCK, so
+ // include them in the first BCB.
+ if bcb.index() == 0 { Some(&intermediate_expressions) } else { None },
+ )
+ };
+ let edge_labels = |from_bcb| {
+ let from_bcb_data = &basic_coverage_blocks[from_bcb];
+ let from_terminator = from_bcb_data.terminator(mir_body);
+ let mut edge_labels = from_terminator.kind.fmt_successor_labels();
+ edge_labels.retain(|label| label != "unreachable");
+ let edge_counters = from_terminator
+ .successors()
+ .map(|successor_bb| graphviz_data.get_edge_counter(from_bcb, successor_bb));
+ iter::zip(&edge_labels, edge_counters)
+ .map(|(label, some_counter)| {
+ if let Some(counter) = some_counter {
+ format!("{}\n{}", label, debug_counters.format_counter(counter))
+ } else {
+ label.to_string()
+ }
+ })
+ .collect::<Vec<_>>()
+ };
+ let graphviz_name = format!("Cov_{}_{}", def_id.krate.index(), def_id.index.index());
+ let mut graphviz_writer =
+ GraphvizWriter::new(basic_coverage_blocks, &graphviz_name, node_content, edge_labels);
+ let unused_expressions = debug_used_expressions.get_unused_expressions();
+ if unused_expressions.len() > 0 {
+ graphviz_writer.set_graph_label(&format!(
+ "Unused expressions:\n {}",
+ unused_expressions
+ .as_slice()
+ .iter()
+ .map(|(counter_kind, edge_from_bcb, target_bcb)| {
+ if let Some(from_bcb) = edge_from_bcb.as_ref() {
+ format!(
+ "{:?}->{:?}: {}",
+ from_bcb,
+ target_bcb,
+ debug_counters.format_counter(&counter_kind),
+ )
+ } else {
+ format!(
+ "{:?}: {}",
+ target_bcb,
+ debug_counters.format_counter(&counter_kind),
+ )
+ }
+ })
+ .join("\n ")
+ ));
+ }
+ let mut file = create_dump_file(tcx, "dot", None, pass_name, &0, mir_source)
+ .expect("Unexpected error creating BasicCoverageBlock graphviz DOT file");
+ graphviz_writer
+ .write_graphviz(tcx, &mut file)
+ .expect("Unexpected error writing BasicCoverageBlock graphviz DOT file");
+}
+
+fn bcb_to_string_sections<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mir_body: &mir::Body<'tcx>,
+ debug_counters: &DebugCounters,
+ bcb_data: &BasicCoverageBlockData,
+ some_coverage_spans_with_counters: Option<&[(CoverageSpan, CoverageKind)]>,
+ some_dependency_counters: Option<&[CoverageKind]>,
+ some_intermediate_expressions: Option<&[CoverageKind]>,
+) -> Vec<String> {
+ let len = bcb_data.basic_blocks.len();
+ let mut sections = Vec::new();
+ if let Some(collect_intermediate_expressions) = some_intermediate_expressions {
+ sections.push(
+ collect_intermediate_expressions
+ .iter()
+ .map(|expression| {
+ format!("Intermediate {}", debug_counters.format_counter(expression))
+ })
+ .join("\n"),
+ );
+ }
+ if let Some(coverage_spans_with_counters) = some_coverage_spans_with_counters {
+ sections.push(
+ coverage_spans_with_counters
+ .iter()
+ .map(|(covspan, counter)| {
+ format!(
+ "{} at {}",
+ debug_counters.format_counter(counter),
+ covspan.format(tcx, mir_body)
+ )
+ })
+ .join("\n"),
+ );
+ }
+ if let Some(dependency_counters) = some_dependency_counters {
+ sections.push(format!(
+ "Non-coverage counters:\n {}",
+ dependency_counters
+ .iter()
+ .map(|counter| debug_counters.format_counter(counter))
+ .join(" \n"),
+ ));
+ }
+ if let Some(counter_kind) = &bcb_data.counter_kind {
+ sections.push(format!("{:?}", counter_kind));
+ }
+ let non_term_blocks = bcb_data.basic_blocks[0..len - 1]
+ .iter()
+ .map(|&bb| format!("{:?}: {}", bb, term_type(&mir_body[bb].terminator().kind)))
+ .collect::<Vec<_>>();
+ if non_term_blocks.len() > 0 {
+ sections.push(non_term_blocks.join("\n"));
+ }
+ sections.push(format!(
+ "{:?}: {}",
+ bcb_data.basic_blocks.last().unwrap(),
+ term_type(&bcb_data.terminator(mir_body).kind)
+ ));
+ sections
+}
+
+/// Returns a simple string representation of a `TerminatorKind` variant, independent of any
+/// values it might hold.
+pub(super) fn term_type(kind: &TerminatorKind<'_>) -> &'static str {
+ match kind {
+ TerminatorKind::Goto { .. } => "Goto",
+ TerminatorKind::SwitchInt { .. } => "SwitchInt",
+ TerminatorKind::Resume => "Resume",
+ TerminatorKind::Abort => "Abort",
+ TerminatorKind::Return => "Return",
+ TerminatorKind::Unreachable => "Unreachable",
+ TerminatorKind::Drop { .. } => "Drop",
+ TerminatorKind::DropAndReplace { .. } => "DropAndReplace",
+ TerminatorKind::Call { .. } => "Call",
+ TerminatorKind::Assert { .. } => "Assert",
+ TerminatorKind::Yield { .. } => "Yield",
+ TerminatorKind::GeneratorDrop => "GeneratorDrop",
+ TerminatorKind::FalseEdge { .. } => "FalseEdge",
+ TerminatorKind::FalseUnwind { .. } => "FalseUnwind",
+ TerminatorKind::InlineAsm { .. } => "InlineAsm",
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs
new file mode 100644
index 000000000..759ea7cd3
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/graph.rs
@@ -0,0 +1,753 @@
+use super::Error;
+
+use itertools::Itertools;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::graph::dominators::{self, Dominators};
+use rustc_data_structures::graph::{self, GraphSuccessors, WithNumNodes, WithStartNode};
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::{self, BasicBlock, BasicBlockData, Terminator, TerminatorKind};
+
+use std::ops::{Index, IndexMut};
+
+const ID_SEPARATOR: &str = ",";
+
+/// A coverage-specific simplification of the MIR control flow graph (CFG). The `CoverageGraph`s
+/// nodes are `BasicCoverageBlock`s, which encompass one or more MIR `BasicBlock`s, plus a
+/// `CoverageKind` counter (to be added by `CoverageCounters::make_bcb_counters`), and an optional
+/// set of additional counters--if needed--to count incoming edges, if there are more than one.
+/// (These "edge counters" are eventually converted into new MIR `BasicBlock`s.)
+#[derive(Debug)]
+pub(super) struct CoverageGraph {
+ bcbs: IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
+ bb_to_bcb: IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
+ pub successors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+ pub predecessors: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+ dominators: Option<Dominators<BasicCoverageBlock>>,
+}
+
+impl CoverageGraph {
+ pub fn from_mir(mir_body: &mir::Body<'_>) -> Self {
+ let (bcbs, bb_to_bcb) = Self::compute_basic_coverage_blocks(mir_body);
+
+ // Pre-transform MIR `BasicBlock` successors and predecessors into the BasicCoverageBlock
+ // equivalents. Note that since the BasicCoverageBlock graph has been fully simplified, the
+ // each predecessor of a BCB leader_bb should be in a unique BCB. It is possible for a
+ // `SwitchInt` to have multiple targets to the same destination `BasicBlock`, so
+ // de-duplication is required. This is done without reordering the successors.
+
+ let bcbs_len = bcbs.len();
+ let mut seen = IndexVec::from_elem_n(false, bcbs_len);
+ let successors = IndexVec::from_fn_n(
+ |bcb| {
+ for b in seen.iter_mut() {
+ *b = false;
+ }
+ let bcb_data = &bcbs[bcb];
+ let mut bcb_successors = Vec::new();
+ for successor in
+ bcb_filtered_successors(&mir_body, &bcb_data.terminator(mir_body).kind)
+ .filter_map(|successor_bb| bb_to_bcb[successor_bb])
+ {
+ if !seen[successor] {
+ seen[successor] = true;
+ bcb_successors.push(successor);
+ }
+ }
+ bcb_successors
+ },
+ bcbs.len(),
+ );
+
+ let mut predecessors = IndexVec::from_elem_n(Vec::new(), bcbs.len());
+ for (bcb, bcb_successors) in successors.iter_enumerated() {
+ for &successor in bcb_successors {
+ predecessors[successor].push(bcb);
+ }
+ }
+
+ let mut basic_coverage_blocks =
+ Self { bcbs, bb_to_bcb, successors, predecessors, dominators: None };
+ let dominators = dominators::dominators(&basic_coverage_blocks);
+ basic_coverage_blocks.dominators = Some(dominators);
+ basic_coverage_blocks
+ }
+
+ fn compute_basic_coverage_blocks(
+ mir_body: &mir::Body<'_>,
+ ) -> (
+ IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
+ IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
+ ) {
+ let num_basic_blocks = mir_body.basic_blocks.len();
+ let mut bcbs = IndexVec::with_capacity(num_basic_blocks);
+ let mut bb_to_bcb = IndexVec::from_elem_n(None, num_basic_blocks);
+
+ // Walk the MIR CFG using a Preorder traversal, which starts from `START_BLOCK` and follows
+ // each block terminator's `successors()`. Coverage spans must map to actual source code,
+ // so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
+ // intentionally omits unwind paths.
+ // FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
+ // `catch_unwind()` handlers.
+ let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
+
+ let mut basic_blocks = Vec::new();
+ for (bb, data) in mir_cfg_without_unwind {
+ if let Some(last) = basic_blocks.last() {
+ let predecessors = &mir_body.basic_blocks.predecessors()[bb];
+ if predecessors.len() > 1 || !predecessors.contains(last) {
+ // The `bb` has more than one _incoming_ edge, and should start its own
+ // `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
+ // include `bb`; it contains a sequence of one or more sequential basic_blocks
+ // with no intermediate branches in or out. Save these as a new
+ // `BasicCoverageBlockData` before starting the new one.)
+ Self::add_basic_coverage_block(
+ &mut bcbs,
+ &mut bb_to_bcb,
+ basic_blocks.split_off(0),
+ );
+ debug!(
+ " because {}",
+ if predecessors.len() > 1 {
+ "predecessors.len() > 1".to_owned()
+ } else {
+ format!("bb {} is not in precessors: {:?}", bb.index(), predecessors)
+ }
+ );
+ }
+ }
+ basic_blocks.push(bb);
+
+ let term = data.terminator();
+
+ match term.kind {
+ TerminatorKind::Return { .. }
+ | TerminatorKind::Abort
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::SwitchInt { .. } => {
+ // The `bb` has more than one _outgoing_ edge, or exits the function. Save the
+ // current sequence of `basic_blocks` gathered to this point, as a new
+ // `BasicCoverageBlockData`.
+ Self::add_basic_coverage_block(
+ &mut bcbs,
+ &mut bb_to_bcb,
+ basic_blocks.split_off(0),
+ );
+ debug!(" because term.kind = {:?}", term.kind);
+ // Note that this condition is based on `TerminatorKind`, even though it
+ // theoretically boils down to `successors().len() != 1`; that is, either zero
+ // (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
+ // since the BCB CFG ignores things like unwind branches (which exist in the
+ // `Terminator`s `successors()` list) checking the number of successors won't
+ // work.
+ }
+
+ // The following `TerminatorKind`s are either not expected outside an unwind branch,
+ // or they should not (under normal circumstances) branch. Coverage graphs are
+ // simplified by assuring coverage results are accurate for program executions that
+ // don't panic.
+ //
+ // Programs that panic and unwind may record slightly inaccurate coverage results
+ // for a coverage region containing the `Terminator` that began the panic. This
+ // is as intended. (See Issue #78544 for a possible future option to support
+ // coverage in test programs that panic.)
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. } => {}
+ }
+ }
+
+ if !basic_blocks.is_empty() {
+ // process any remaining basic_blocks into a final `BasicCoverageBlockData`
+ Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0));
+ debug!(" because the end of the MIR CFG was reached while traversing");
+ }
+
+ (bcbs, bb_to_bcb)
+ }
+
+ fn add_basic_coverage_block(
+ bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
+ bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
+ basic_blocks: Vec<BasicBlock>,
+ ) {
+ let bcb = BasicCoverageBlock::from_usize(bcbs.len());
+ for &bb in basic_blocks.iter() {
+ bb_to_bcb[bb] = Some(bcb);
+ }
+ let bcb_data = BasicCoverageBlockData::from(basic_blocks);
+ debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
+ bcbs.push(bcb_data);
+ }
+
+ #[inline(always)]
+ pub fn iter_enumerated(
+ &self,
+ ) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
+ self.bcbs.iter_enumerated()
+ }
+
+ #[inline(always)]
+ pub fn iter_enumerated_mut(
+ &mut self,
+ ) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
+ self.bcbs.iter_enumerated_mut()
+ }
+
+ #[inline(always)]
+ pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
+ if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
+ }
+
+ #[inline(always)]
+ pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
+ self.dominators.as_ref().unwrap().is_dominated_by(node, dom)
+ }
+
+ #[inline(always)]
+ pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> {
+ self.dominators.as_ref().unwrap()
+ }
+}
+
+impl Index<BasicCoverageBlock> for CoverageGraph {
+ type Output = BasicCoverageBlockData;
+
+ #[inline]
+ fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData {
+ &self.bcbs[index]
+ }
+}
+
+impl IndexMut<BasicCoverageBlock> for CoverageGraph {
+ #[inline]
+ fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
+ &mut self.bcbs[index]
+ }
+}
+
+impl graph::DirectedGraph for CoverageGraph {
+ type Node = BasicCoverageBlock;
+}
+
+impl graph::WithNumNodes for CoverageGraph {
+ #[inline]
+ fn num_nodes(&self) -> usize {
+ self.bcbs.len()
+ }
+}
+
+impl graph::WithStartNode for CoverageGraph {
+ #[inline]
+ fn start_node(&self) -> Self::Node {
+ self.bcb_from_bb(mir::START_BLOCK)
+ .expect("mir::START_BLOCK should be in a BasicCoverageBlock")
+ }
+}
+
+type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
+
+impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
+ type Item = BasicCoverageBlock;
+ type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
+}
+
+impl graph::WithSuccessors for CoverageGraph {
+ #[inline]
+ fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
+ self.successors[node].iter().cloned()
+ }
+}
+
+impl<'graph> graph::GraphPredecessors<'graph> for CoverageGraph {
+ type Item = BasicCoverageBlock;
+ type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>;
+}
+
+impl graph::WithPredecessors for CoverageGraph {
+ #[inline]
+ fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
+ self.predecessors[node].iter().copied()
+ }
+}
+
+rustc_index::newtype_index! {
+ /// A node in the control-flow graph of CoverageGraph.
+ pub(super) struct BasicCoverageBlock {
+ DEBUG_FORMAT = "bcb{}",
+ const START_BCB = 0,
+ }
+}
+
+/// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
+///
+/// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
+/// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
+/// altering the original MIR CFG.
+///
+/// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
+/// necessary). The BCB-based CFG is a more aggressive simplification. For example:
+///
+/// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
+/// that is injected by the Rust compiler but has no physical source code to count. This also
+/// means a BasicBlock with a `Call` terminator can be merged into its primary successor target
+/// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
+/// of `#[should_panic]` tests and `catch_unwind()` handlers")
+/// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
+/// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
+/// a `Goto`, and merged with its successor into the same BCB.
+///
+/// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
+/// In some cases, a BCB's execution count can be computed by `Expression`. Additional
+/// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
+/// to the BCB's primary counter or expression).
+///
+/// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
+/// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
+/// significance.
+#[derive(Debug, Clone)]
+pub(super) struct BasicCoverageBlockData {
+ pub basic_blocks: Vec<BasicBlock>,
+ pub counter_kind: Option<CoverageKind>,
+ edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
+}
+
+impl BasicCoverageBlockData {
+ pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
+ assert!(basic_blocks.len() > 0);
+ Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
+ }
+
+ #[inline(always)]
+ pub fn leader_bb(&self) -> BasicBlock {
+ self.basic_blocks[0]
+ }
+
+ #[inline(always)]
+ pub fn last_bb(&self) -> BasicBlock {
+ *self.basic_blocks.last().unwrap()
+ }
+
+ #[inline(always)]
+ pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
+ &mir_body[self.last_bb()].terminator()
+ }
+
+ pub fn set_counter(
+ &mut self,
+ counter_kind: CoverageKind,
+ ) -> Result<ExpressionOperandId, Error> {
+ debug_assert!(
+ // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
+ // have an expression (to be injected into an existing `BasicBlock` represented by this
+ // `BasicCoverageBlock`).
+ self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
+ "attempt to add a `Counter` to a BCB target with existing incoming edge counters"
+ );
+ let operand = counter_kind.as_operand_id();
+ if let Some(replaced) = self.counter_kind.replace(counter_kind) {
+ Error::from_string(format!(
+ "attempt to set a BasicCoverageBlock coverage counter more than once; \
+ {:?} already had counter {:?}",
+ self, replaced,
+ ))
+ } else {
+ Ok(operand)
+ }
+ }
+
+ #[inline(always)]
+ pub fn counter(&self) -> Option<&CoverageKind> {
+ self.counter_kind.as_ref()
+ }
+
+ #[inline(always)]
+ pub fn take_counter(&mut self) -> Option<CoverageKind> {
+ self.counter_kind.take()
+ }
+
+ pub fn set_edge_counter_from(
+ &mut self,
+ from_bcb: BasicCoverageBlock,
+ counter_kind: CoverageKind,
+ ) -> Result<ExpressionOperandId, Error> {
+ if level_enabled!(tracing::Level::DEBUG) {
+ // If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
+ // have an expression (to be injected into an existing `BasicBlock` represented by this
+ // `BasicCoverageBlock`).
+ if !self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) {
+ return Error::from_string(format!(
+ "attempt to add an incoming edge counter from {:?} when the target BCB already \
+ has a `Counter`",
+ from_bcb
+ ));
+ }
+ }
+ let operand = counter_kind.as_operand_id();
+ if let Some(replaced) =
+ self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind)
+ {
+ Error::from_string(format!(
+ "attempt to set an edge counter more than once; from_bcb: \
+ {:?} already had counter {:?}",
+ from_bcb, replaced,
+ ))
+ } else {
+ Ok(operand)
+ }
+ }
+
+ #[inline]
+ pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
+ if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
+ edge_from_bcbs.get(&from_bcb)
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn take_edge_counters(
+ &mut self,
+ ) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
+ self.edge_from_bcbs.take().map(|m| m.into_iter())
+ }
+
+ pub fn id(&self) -> String {
+ format!("@{}", self.basic_blocks.iter().map(|bb| bb.index().to_string()).join(ID_SEPARATOR))
+ }
+}
+
+/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
+/// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
+/// the specific branching BCB, representing the edge between the two. The latter case
+/// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub(super) struct BcbBranch {
+ pub edge_from_bcb: Option<BasicCoverageBlock>,
+ pub target_bcb: BasicCoverageBlock,
+}
+
+impl BcbBranch {
+ pub fn from_to(
+ from_bcb: BasicCoverageBlock,
+ to_bcb: BasicCoverageBlock,
+ basic_coverage_blocks: &CoverageGraph,
+ ) -> Self {
+ let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
+ Some(from_bcb)
+ } else {
+ None
+ };
+ Self { edge_from_bcb, target_bcb: to_bcb }
+ }
+
+ pub fn counter<'a>(
+ &self,
+ basic_coverage_blocks: &'a CoverageGraph,
+ ) -> Option<&'a CoverageKind> {
+ if let Some(from_bcb) = self.edge_from_bcb {
+ basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
+ } else {
+ basic_coverage_blocks[self.target_bcb].counter()
+ }
+ }
+
+ pub fn is_only_path_to_target(&self) -> bool {
+ self.edge_from_bcb.is_none()
+ }
+}
+
+impl std::fmt::Debug for BcbBranch {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ if let Some(from_bcb) = self.edge_from_bcb {
+ write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
+ } else {
+ write!(fmt, "{:?}", self.target_bcb)
+ }
+ }
+}
+
+// Returns the `Terminator`s non-unwind successors.
+// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
+// `catch_unwind()` handlers.
+fn bcb_filtered_successors<'a, 'tcx>(
+ body: &'a mir::Body<'tcx>,
+ term_kind: &'a TerminatorKind<'tcx>,
+) -> Box<dyn Iterator<Item = BasicBlock> + 'a> {
+ Box::new(
+ match &term_kind {
+ // SwitchInt successors are never unwind, and all of them should be traversed.
+ TerminatorKind::SwitchInt { ref targets, .. } => {
+ None.into_iter().chain(targets.all_targets().into_iter().copied())
+ }
+ // For all other kinds, return only the first successor, if any, and ignore unwinds.
+ // NOTE: `chain(&[])` is required to coerce the `option::iter` (from
+ // `next().into_iter()`) into the `mir::Successors` aliased type.
+ _ => term_kind.successors().next().into_iter().chain((&[]).into_iter().copied()),
+ }
+ .filter(move |&successor| body[successor].terminator().kind != TerminatorKind::Unreachable),
+ )
+}
+
+/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
+/// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
+/// ensures a loop is completely traversed before processing Blocks after the end of the loop.
+#[derive(Debug)]
+pub(super) struct TraversalContext {
+ /// From one or more backedges returning to a loop header.
+ pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
+
+ /// worklist, to be traversed, of CoverageGraph in the loop with the given loop
+ /// backedges, such that the loop is the inner inner-most loop containing these
+ /// CoverageGraph
+ pub worklist: Vec<BasicCoverageBlock>,
+}
+
+pub(super) struct TraverseCoverageGraphWithLoops {
+ pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
+ pub context_stack: Vec<TraversalContext>,
+ visited: BitSet<BasicCoverageBlock>,
+}
+
+impl TraverseCoverageGraphWithLoops {
+ pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
+ let start_bcb = basic_coverage_blocks.start_node();
+ let backedges = find_loop_backedges(basic_coverage_blocks);
+ let context_stack =
+ vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }];
+ // `context_stack` starts with a `TraversalContext` for the main function context (beginning
+ // with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
+ // of the stack as loops are entered, and popped off of the stack when a loop's worklist is
+ // exhausted.
+ let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
+ Self { backedges, context_stack, visited }
+ }
+
+ pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
+ debug!(
+ "TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
+ self.context_stack.iter().rev().collect::<Vec<_>>()
+ );
+ while let Some(next_bcb) = {
+ // Strip contexts with empty worklists from the top of the stack
+ while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) {
+ self.context_stack.pop();
+ }
+ // Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
+ self.context_stack.last_mut().map_or(None, |context| context.worklist.pop())
+ } {
+ if !self.visited.insert(next_bcb) {
+ debug!("Already visited: {:?}", next_bcb);
+ continue;
+ }
+ debug!("Visiting {:?}", next_bcb);
+ if self.backedges[next_bcb].len() > 0 {
+ debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
+ self.context_stack.push(TraversalContext {
+ loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
+ worklist: Vec::new(),
+ });
+ }
+ self.extend_worklist(basic_coverage_blocks, next_bcb);
+ return Some(next_bcb);
+ }
+ None
+ }
+
+ pub fn extend_worklist(
+ &mut self,
+ basic_coverage_blocks: &CoverageGraph,
+ bcb: BasicCoverageBlock,
+ ) {
+ let successors = &basic_coverage_blocks.successors[bcb];
+ debug!("{:?} has {} successors:", bcb, successors.len());
+ for &successor in successors {
+ if successor == bcb {
+ debug!(
+ "{:?} has itself as its own successor. (Note, the compiled code will \
+ generate an infinite loop.)",
+ bcb
+ );
+ // Don't re-add this successor to the worklist. We are already processing it.
+ break;
+ }
+ for context in self.context_stack.iter_mut().rev() {
+ // Add successors of the current BCB to the appropriate context. Successors that
+ // stay within a loop are added to the BCBs context worklist. Successors that
+ // exit the loop (they are not dominated by the loop header) must be reachable
+ // from other BCBs outside the loop, and they will be added to a different
+ // worklist.
+ //
+ // Branching blocks (with more than one successor) must be processed before
+ // blocks with only one successor, to prevent unnecessarily complicating
+ // `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
+ // branching block would have given an `Expression` (or vice versa).
+ let (some_successor_to_add, some_loop_header) =
+ if let Some((_, loop_header)) = context.loop_backedges {
+ if basic_coverage_blocks.is_dominated_by(successor, loop_header) {
+ (Some(successor), Some(loop_header))
+ } else {
+ (None, None)
+ }
+ } else {
+ (Some(successor), None)
+ };
+ if let Some(successor_to_add) = some_successor_to_add {
+ if basic_coverage_blocks.successors[successor_to_add].len() > 1 {
+ debug!(
+ "{:?} successor is branching. Prioritize it at the beginning of \
+ the {}",
+ successor_to_add,
+ if let Some(loop_header) = some_loop_header {
+ format!("worklist for the loop headed by {:?}", loop_header)
+ } else {
+ String::from("non-loop worklist")
+ },
+ );
+ context.worklist.insert(0, successor_to_add);
+ } else {
+ debug!(
+ "{:?} successor is non-branching. Defer it to the end of the {}",
+ successor_to_add,
+ if let Some(loop_header) = some_loop_header {
+ format!("worklist for the loop headed by {:?}", loop_header)
+ } else {
+ String::from("non-loop worklist")
+ },
+ );
+ context.worklist.push(successor_to_add);
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ pub fn is_complete(&self) -> bool {
+ self.visited.count() == self.visited.domain_size()
+ }
+
+ pub fn unvisited(&self) -> Vec<BasicCoverageBlock> {
+ let mut unvisited_set: BitSet<BasicCoverageBlock> =
+ BitSet::new_filled(self.visited.domain_size());
+ unvisited_set.subtract(&self.visited);
+ unvisited_set.iter().collect::<Vec<_>>()
+ }
+}
+
+pub(super) fn find_loop_backedges(
+ basic_coverage_blocks: &CoverageGraph,
+) -> IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>> {
+ let num_bcbs = basic_coverage_blocks.num_nodes();
+ let mut backedges = IndexVec::from_elem_n(Vec::<BasicCoverageBlock>::new(), num_bcbs);
+
+ // Identify loops by their backedges.
+ //
+ // The computational complexity is bounded by: n(s) x d where `n` is the number of
+ // `BasicCoverageBlock` nodes (the simplified/reduced representation of the CFG derived from the
+ // MIR); `s` is the average number of successors per node (which is most likely less than 2, and
+ // independent of the size of the function, so it can be treated as a constant);
+ // and `d` is the average number of dominators per node.
+ //
+ // The average number of dominators depends on the size and complexity of the function, and
+ // nodes near the start of the function's control flow graph typically have less dominators
+ // than nodes near the end of the CFG. Without doing a detailed mathematical analysis, I
+ // think the resulting complexity has the characteristics of O(n log n).
+ //
+ // The overall complexity appears to be comparable to many other MIR transform algorithms, and I
+ // don't expect that this function is creating a performance hot spot, but if this becomes an
+ // issue, there may be ways to optimize the `is_dominated_by` algorithm (as indicated by an
+ // existing `FIXME` comment in that code), or possibly ways to optimize it's usage here, perhaps
+ // by keeping track of results for visited `BasicCoverageBlock`s if they can be used to short
+ // circuit downstream `is_dominated_by` checks.
+ //
+ // For now, that kind of optimization seems unnecessarily complicated.
+ for (bcb, _) in basic_coverage_blocks.iter_enumerated() {
+ for &successor in &basic_coverage_blocks.successors[bcb] {
+ if basic_coverage_blocks.is_dominated_by(bcb, successor) {
+ let loop_header = successor;
+ let backedge_from_bcb = bcb;
+ debug!(
+ "Found BCB backedge: {:?} -> loop_header: {:?}",
+ backedge_from_bcb, loop_header
+ );
+ backedges[loop_header].push(backedge_from_bcb);
+ }
+ }
+ }
+ backedges
+}
+
+pub struct ShortCircuitPreorder<
+ 'a,
+ 'tcx,
+ F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
+> {
+ body: &'a mir::Body<'tcx>,
+ visited: BitSet<BasicBlock>,
+ worklist: Vec<BasicBlock>,
+ filtered_successors: F,
+}
+
+impl<
+ 'a,
+ 'tcx,
+ F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
+> ShortCircuitPreorder<'a, 'tcx, F>
+{
+ pub fn new(
+ body: &'a mir::Body<'tcx>,
+ filtered_successors: F,
+ ) -> ShortCircuitPreorder<'a, 'tcx, F> {
+ let worklist = vec![mir::START_BLOCK];
+
+ ShortCircuitPreorder {
+ body,
+ visited: BitSet::new_empty(body.basic_blocks().len()),
+ worklist,
+ filtered_successors,
+ }
+ }
+}
+
+impl<
+ 'a,
+ 'tcx,
+ F: Fn(&'a mir::Body<'tcx>, &'a TerminatorKind<'tcx>) -> Box<dyn Iterator<Item = BasicBlock> + 'a>,
+> Iterator for ShortCircuitPreorder<'a, 'tcx, F>
+{
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ while let Some(idx) = self.worklist.pop() {
+ if !self.visited.insert(idx) {
+ continue;
+ }
+
+ let data = &self.body[idx];
+
+ if let Some(ref term) = data.terminator {
+ self.worklist.extend((self.filtered_successors)(&self.body, &term.kind));
+ }
+
+ return Some((idx, data));
+ }
+
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let size = self.body.basic_blocks().len() - self.visited.count();
+ (size, Some(size))
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs
new file mode 100644
index 000000000..2619626a5
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/mod.rs
@@ -0,0 +1,580 @@
+pub mod query;
+
+mod counters;
+mod debug;
+mod graph;
+mod spans;
+
+#[cfg(test)]
+mod tests;
+
+use counters::CoverageCounters;
+use graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph};
+use spans::{CoverageSpan, CoverageSpans};
+
+use crate::MirPass;
+
+use rustc_data_structures::graph::WithNumNodes;
+use rustc_data_structures::sync::Lrc;
+use rustc_index::vec::IndexVec;
+use rustc_middle::hir;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::dump_enabled;
+use rustc_middle::mir::{
+ self, BasicBlock, BasicBlockData, Coverage, SourceInfo, Statement, StatementKind, Terminator,
+ TerminatorKind,
+};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::def_id::DefId;
+use rustc_span::source_map::SourceMap;
+use rustc_span::{CharPos, ExpnKind, Pos, SourceFile, Span, Symbol};
+
+/// A simple error message wrapper for `coverage::Error`s.
+#[derive(Debug)]
+struct Error {
+ message: String,
+}
+
+impl Error {
+ pub fn from_string<T>(message: String) -> Result<T, Error> {
+ Err(Self { message })
+ }
+}
+
+/// Inserts `StatementKind::Coverage` statements that either instrument the binary with injected
+/// counters, via intrinsic `llvm.instrprof.increment`, and/or inject metadata used during codegen
+/// to construct the coverage map.
+pub struct InstrumentCoverage;
+
+impl<'tcx> MirPass<'tcx> for InstrumentCoverage {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.instrument_coverage()
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, mir_body: &mut mir::Body<'tcx>) {
+ let mir_source = mir_body.source;
+
+ // If the InstrumentCoverage pass is called on promoted MIRs, skip them.
+ // See: https://github.com/rust-lang/rust/pull/73011#discussion_r438317601
+ if mir_source.promoted.is_some() {
+ trace!(
+ "InstrumentCoverage skipped for {:?} (already promoted for Miri evaluation)",
+ mir_source.def_id()
+ );
+ return;
+ }
+
+ let is_fn_like =
+ tcx.hir().get_by_def_id(mir_source.def_id().expect_local()).fn_kind().is_some();
+
+ // Only instrument functions, methods, and closures (not constants since they are evaluated
+ // at compile time by Miri).
+ // FIXME(#73156): Handle source code coverage in const eval, but note, if and when const
+ // expressions get coverage spans, we will probably have to "carve out" space for const
+ // expressions from coverage spans in enclosing MIR's, like we do for closures. (That might
+ // be tricky if const expressions have no corresponding statements in the enclosing MIR.
+ // Closures are carved out by their initial `Assign` statement.)
+ if !is_fn_like {
+ trace!("InstrumentCoverage skipped for {:?} (not an fn-like)", mir_source.def_id());
+ return;
+ }
+
+ match mir_body.basic_blocks()[mir::START_BLOCK].terminator().kind {
+ TerminatorKind::Unreachable => {
+ trace!("InstrumentCoverage skipped for unreachable `START_BLOCK`");
+ return;
+ }
+ _ => {}
+ }
+
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(mir_source.def_id());
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NO_COVERAGE) {
+ return;
+ }
+
+ trace!("InstrumentCoverage starting for {:?}", mir_source.def_id());
+ Instrumentor::new(&self.name(), tcx, mir_body).inject_counters();
+ trace!("InstrumentCoverage done for {:?}", mir_source.def_id());
+ }
+}
+
+struct Instrumentor<'a, 'tcx> {
+ pass_name: &'a str,
+ tcx: TyCtxt<'tcx>,
+ mir_body: &'a mut mir::Body<'tcx>,
+ source_file: Lrc<SourceFile>,
+ fn_sig_span: Span,
+ body_span: Span,
+ basic_coverage_blocks: CoverageGraph,
+ coverage_counters: CoverageCounters,
+}
+
+impl<'a, 'tcx> Instrumentor<'a, 'tcx> {
+ fn new(pass_name: &'a str, tcx: TyCtxt<'tcx>, mir_body: &'a mut mir::Body<'tcx>) -> Self {
+ let source_map = tcx.sess.source_map();
+ let def_id = mir_body.source.def_id();
+ let (some_fn_sig, hir_body) = fn_sig_and_body(tcx, def_id);
+
+ let body_span = get_body_span(tcx, hir_body, mir_body);
+
+ let source_file = source_map.lookup_source_file(body_span.lo());
+ let fn_sig_span = match some_fn_sig.filter(|fn_sig| {
+ fn_sig.span.eq_ctxt(body_span)
+ && Lrc::ptr_eq(&source_file, &source_map.lookup_source_file(fn_sig.span.lo()))
+ }) {
+ Some(fn_sig) => fn_sig.span.with_hi(body_span.lo()),
+ None => body_span.shrink_to_lo(),
+ };
+
+ debug!(
+ "instrumenting {}: {:?}, fn sig span: {:?}, body span: {:?}",
+ if tcx.is_closure(def_id) { "closure" } else { "function" },
+ def_id,
+ fn_sig_span,
+ body_span
+ );
+
+ let function_source_hash = hash_mir_source(tcx, hir_body);
+ let basic_coverage_blocks = CoverageGraph::from_mir(mir_body);
+ Self {
+ pass_name,
+ tcx,
+ mir_body,
+ source_file,
+ fn_sig_span,
+ body_span,
+ basic_coverage_blocks,
+ coverage_counters: CoverageCounters::new(function_source_hash),
+ }
+ }
+
+ fn inject_counters(&'a mut self) {
+ let tcx = self.tcx;
+ let mir_source = self.mir_body.source;
+ let def_id = mir_source.def_id();
+ let fn_sig_span = self.fn_sig_span;
+ let body_span = self.body_span;
+
+ let mut graphviz_data = debug::GraphvizData::new();
+ let mut debug_used_expressions = debug::UsedExpressions::new();
+
+ let dump_mir = dump_enabled(tcx, self.pass_name, def_id);
+ let dump_graphviz = dump_mir && tcx.sess.opts.unstable_opts.dump_mir_graphviz;
+ let dump_spanview = dump_mir && tcx.sess.opts.unstable_opts.dump_mir_spanview.is_some();
+
+ if dump_graphviz {
+ graphviz_data.enable();
+ self.coverage_counters.enable_debug();
+ }
+
+ if dump_graphviz || level_enabled!(tracing::Level::DEBUG) {
+ debug_used_expressions.enable();
+ }
+
+ ////////////////////////////////////////////////////
+ // Compute `CoverageSpan`s from the `CoverageGraph`.
+ let coverage_spans = CoverageSpans::generate_coverage_spans(
+ &self.mir_body,
+ fn_sig_span,
+ body_span,
+ &self.basic_coverage_blocks,
+ );
+
+ if dump_spanview {
+ debug::dump_coverage_spanview(
+ tcx,
+ self.mir_body,
+ &self.basic_coverage_blocks,
+ self.pass_name,
+ body_span,
+ &coverage_spans,
+ );
+ }
+
+ ////////////////////////////////////////////////////
+ // Create an optimized mix of `Counter`s and `Expression`s for the `CoverageGraph`. Ensure
+ // every `CoverageSpan` has a `Counter` or `Expression` assigned to its `BasicCoverageBlock`
+ // and all `Expression` dependencies (operands) are also generated, for any other
+ // `BasicCoverageBlock`s not already associated with a `CoverageSpan`.
+ //
+ // Intermediate expressions (used to compute other `Expression` values), which have no
+ // direct associate to any `BasicCoverageBlock`, are returned in the method `Result`.
+ let intermediate_expressions_or_error = self
+ .coverage_counters
+ .make_bcb_counters(&mut self.basic_coverage_blocks, &coverage_spans);
+
+ let (result, intermediate_expressions) = match intermediate_expressions_or_error {
+ Ok(intermediate_expressions) => {
+ // If debugging, add any intermediate expressions (which are not associated with any
+ // BCB) to the `debug_used_expressions` map.
+ if debug_used_expressions.is_enabled() {
+ for intermediate_expression in &intermediate_expressions {
+ debug_used_expressions.add_expression_operands(intermediate_expression);
+ }
+ }
+
+ ////////////////////////////////////////////////////
+ // Remove the counter or edge counter from of each `CoverageSpan`s associated
+ // `BasicCoverageBlock`, and inject a `Coverage` statement into the MIR.
+ //
+ // `Coverage` statements injected from `CoverageSpan`s will include the code regions
+ // (source code start and end positions) to be counted by the associated counter.
+ //
+ // These `CoverageSpan`-associated counters are removed from their associated
+ // `BasicCoverageBlock`s so that the only remaining counters in the `CoverageGraph`
+ // are indirect counters (to be injected next, without associated code regions).
+ self.inject_coverage_span_counters(
+ coverage_spans,
+ &mut graphviz_data,
+ &mut debug_used_expressions,
+ );
+
+ ////////////////////////////////////////////////////
+ // For any remaining `BasicCoverageBlock` counters (that were not associated with
+ // any `CoverageSpan`), inject `Coverage` statements (_without_ code region `Span`s)
+ // to ensure `BasicCoverageBlock` counters that other `Expression`s may depend on
+ // are in fact counted, even though they don't directly contribute to counting
+ // their own independent code region's coverage.
+ self.inject_indirect_counters(&mut graphviz_data, &mut debug_used_expressions);
+
+ // Intermediate expressions will be injected as the final step, after generating
+ // debug output, if any.
+ ////////////////////////////////////////////////////
+
+ (Ok(()), intermediate_expressions)
+ }
+ Err(e) => (Err(e), Vec::new()),
+ };
+
+ if graphviz_data.is_enabled() {
+ // Even if there was an error, a partial CoverageGraph can still generate a useful
+ // graphviz output.
+ debug::dump_coverage_graphviz(
+ tcx,
+ self.mir_body,
+ self.pass_name,
+ &self.basic_coverage_blocks,
+ &self.coverage_counters.debug_counters,
+ &graphviz_data,
+ &intermediate_expressions,
+ &debug_used_expressions,
+ );
+ }
+
+ if let Err(e) = result {
+ bug!("Error processing: {:?}: {:?}", self.mir_body.source.def_id(), e.message)
+ };
+
+ // Depending on current `debug_options()`, `alert_on_unused_expressions()` could panic, so
+ // this check is performed as late as possible, to allow other debug output (logs and dump
+ // files), which might be helpful in analyzing unused expressions, to still be generated.
+ debug_used_expressions.alert_on_unused_expressions(&self.coverage_counters.debug_counters);
+
+ ////////////////////////////////////////////////////
+ // Finally, inject the intermediate expressions collected along the way.
+ for intermediate_expression in intermediate_expressions {
+ inject_intermediate_expression(self.mir_body, intermediate_expression);
+ }
+ }
+
+ /// Inject a counter for each `CoverageSpan`. There can be multiple `CoverageSpan`s for a given
+ /// BCB, but only one actual counter needs to be incremented per BCB. `bb_counters` maps each
+ /// `bcb` to its `Counter`, when injected. Subsequent `CoverageSpan`s for a BCB that already has
+ /// a `Counter` will inject an `Expression` instead, and compute its value by adding `ZERO` to
+ /// the BCB `Counter` value.
+ ///
+ /// If debugging, add every BCB `Expression` associated with a `CoverageSpan`s to the
+ /// `used_expression_operands` map.
+ fn inject_coverage_span_counters(
+ &mut self,
+ coverage_spans: Vec<CoverageSpan>,
+ graphviz_data: &mut debug::GraphvizData,
+ debug_used_expressions: &mut debug::UsedExpressions,
+ ) {
+ let tcx = self.tcx;
+ let source_map = tcx.sess.source_map();
+ let body_span = self.body_span;
+ let file_name = Symbol::intern(&self.source_file.name.prefer_remapped().to_string_lossy());
+
+ let mut bcb_counters = IndexVec::from_elem_n(None, self.basic_coverage_blocks.num_nodes());
+ for covspan in coverage_spans {
+ let bcb = covspan.bcb;
+ let span = covspan.span;
+ let counter_kind = if let Some(&counter_operand) = bcb_counters[bcb].as_ref() {
+ self.coverage_counters.make_identity_counter(counter_operand)
+ } else if let Some(counter_kind) = self.bcb_data_mut(bcb).take_counter() {
+ bcb_counters[bcb] = Some(counter_kind.as_operand_id());
+ debug_used_expressions.add_expression_operands(&counter_kind);
+ counter_kind
+ } else {
+ bug!("Every BasicCoverageBlock should have a Counter or Expression");
+ };
+ graphviz_data.add_bcb_coverage_span_with_counter(bcb, &covspan, &counter_kind);
+
+ debug!(
+ "Calling make_code_region(file_name={}, source_file={:?}, span={}, body_span={})",
+ file_name,
+ self.source_file,
+ source_map.span_to_diagnostic_string(span),
+ source_map.span_to_diagnostic_string(body_span)
+ );
+
+ inject_statement(
+ self.mir_body,
+ counter_kind,
+ self.bcb_leader_bb(bcb),
+ Some(make_code_region(source_map, file_name, &self.source_file, span, body_span)),
+ );
+ }
+ }
+
+ /// `inject_coverage_span_counters()` looped through the `CoverageSpan`s and injected the
+ /// counter from the `CoverageSpan`s `BasicCoverageBlock`, removing it from the BCB in the
+ /// process (via `take_counter()`).
+ ///
+ /// Any other counter associated with a `BasicCoverageBlock`, or its incoming edge, but not
+ /// associated with a `CoverageSpan`, should only exist if the counter is an `Expression`
+ /// dependency (one of the expression operands). Collect them, and inject the additional
+ /// counters into the MIR, without a reportable coverage span.
+ fn inject_indirect_counters(
+ &mut self,
+ graphviz_data: &mut debug::GraphvizData,
+ debug_used_expressions: &mut debug::UsedExpressions,
+ ) {
+ let mut bcb_counters_without_direct_coverage_spans = Vec::new();
+ for (target_bcb, target_bcb_data) in self.basic_coverage_blocks.iter_enumerated_mut() {
+ if let Some(counter_kind) = target_bcb_data.take_counter() {
+ bcb_counters_without_direct_coverage_spans.push((None, target_bcb, counter_kind));
+ }
+ if let Some(edge_counters) = target_bcb_data.take_edge_counters() {
+ for (from_bcb, counter_kind) in edge_counters {
+ bcb_counters_without_direct_coverage_spans.push((
+ Some(from_bcb),
+ target_bcb,
+ counter_kind,
+ ));
+ }
+ }
+ }
+
+ // If debug is enabled, validate that every BCB or edge counter not directly associated
+ // with a coverage span is at least indirectly associated (it is a dependency of a BCB
+ // counter that _is_ associated with a coverage span).
+ debug_used_expressions.validate(&bcb_counters_without_direct_coverage_spans);
+
+ for (edge_from_bcb, target_bcb, counter_kind) in bcb_counters_without_direct_coverage_spans
+ {
+ debug_used_expressions.add_unused_expression_if_not_found(
+ &counter_kind,
+ edge_from_bcb,
+ target_bcb,
+ );
+
+ match counter_kind {
+ CoverageKind::Counter { .. } => {
+ let inject_to_bb = if let Some(from_bcb) = edge_from_bcb {
+ // The MIR edge starts `from_bb` (the outgoing / last BasicBlock in
+ // `from_bcb`) and ends at `to_bb` (the incoming / first BasicBlock in the
+ // `target_bcb`; also called the `leader_bb`).
+ let from_bb = self.bcb_last_bb(from_bcb);
+ let to_bb = self.bcb_leader_bb(target_bcb);
+
+ let new_bb = inject_edge_counter_basic_block(self.mir_body, from_bb, to_bb);
+ graphviz_data.set_edge_counter(from_bcb, new_bb, &counter_kind);
+ debug!(
+ "Edge {:?} (last {:?}) -> {:?} (leader {:?}) requires a new MIR \
+ BasicBlock {:?}, for unclaimed edge counter {}",
+ edge_from_bcb,
+ from_bb,
+ target_bcb,
+ to_bb,
+ new_bb,
+ self.format_counter(&counter_kind),
+ );
+ new_bb
+ } else {
+ let target_bb = self.bcb_last_bb(target_bcb);
+ graphviz_data.add_bcb_dependency_counter(target_bcb, &counter_kind);
+ debug!(
+ "{:?} ({:?}) gets a new Coverage statement for unclaimed counter {}",
+ target_bcb,
+ target_bb,
+ self.format_counter(&counter_kind),
+ );
+ target_bb
+ };
+
+ inject_statement(self.mir_body, counter_kind, inject_to_bb, None);
+ }
+ CoverageKind::Expression { .. } => {
+ inject_intermediate_expression(self.mir_body, counter_kind)
+ }
+ _ => bug!("CoverageKind should be a counter"),
+ }
+ }
+ }
+
+ #[inline]
+ fn bcb_leader_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
+ self.bcb_data(bcb).leader_bb()
+ }
+
+ #[inline]
+ fn bcb_last_bb(&self, bcb: BasicCoverageBlock) -> BasicBlock {
+ self.bcb_data(bcb).last_bb()
+ }
+
+ #[inline]
+ fn bcb_data(&self, bcb: BasicCoverageBlock) -> &BasicCoverageBlockData {
+ &self.basic_coverage_blocks[bcb]
+ }
+
+ #[inline]
+ fn bcb_data_mut(&mut self, bcb: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
+ &mut self.basic_coverage_blocks[bcb]
+ }
+
+ #[inline]
+ fn format_counter(&self, counter_kind: &CoverageKind) -> String {
+ self.coverage_counters.debug_counters.format_counter(counter_kind)
+ }
+}
+
+fn inject_edge_counter_basic_block(
+ mir_body: &mut mir::Body<'_>,
+ from_bb: BasicBlock,
+ to_bb: BasicBlock,
+) -> BasicBlock {
+ let span = mir_body[from_bb].terminator().source_info.span.shrink_to_hi();
+ let new_bb = mir_body.basic_blocks_mut().push(BasicBlockData {
+ statements: vec![], // counter will be injected here
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(span),
+ kind: TerminatorKind::Goto { target: to_bb },
+ }),
+ is_cleanup: false,
+ });
+ let edge_ref = mir_body[from_bb]
+ .terminator_mut()
+ .successors_mut()
+ .find(|successor| **successor == to_bb)
+ .expect("from_bb should have a successor for to_bb");
+ *edge_ref = new_bb;
+ new_bb
+}
+
+fn inject_statement(
+ mir_body: &mut mir::Body<'_>,
+ counter_kind: CoverageKind,
+ bb: BasicBlock,
+ some_code_region: Option<CodeRegion>,
+) {
+ debug!(
+ " injecting statement {:?} for {:?} at code region: {:?}",
+ counter_kind, bb, some_code_region
+ );
+ let data = &mut mir_body[bb];
+ let source_info = data.terminator().source_info;
+ let statement = Statement {
+ source_info,
+ kind: StatementKind::Coverage(Box::new(Coverage {
+ kind: counter_kind,
+ code_region: some_code_region,
+ })),
+ };
+ data.statements.insert(0, statement);
+}
+
+// Non-code expressions are injected into the coverage map, without generating executable code.
+fn inject_intermediate_expression(mir_body: &mut mir::Body<'_>, expression: CoverageKind) {
+ debug_assert!(matches!(expression, CoverageKind::Expression { .. }));
+ debug!(" injecting non-code expression {:?}", expression);
+ let inject_in_bb = mir::START_BLOCK;
+ let data = &mut mir_body[inject_in_bb];
+ let source_info = data.terminator().source_info;
+ let statement = Statement {
+ source_info,
+ kind: StatementKind::Coverage(Box::new(Coverage { kind: expression, code_region: None })),
+ };
+ data.statements.push(statement);
+}
+
+/// Convert the Span into its file name, start line and column, and end line and column
+fn make_code_region(
+ source_map: &SourceMap,
+ file_name: Symbol,
+ source_file: &Lrc<SourceFile>,
+ span: Span,
+ body_span: Span,
+) -> CodeRegion {
+ let (start_line, mut start_col) = source_file.lookup_file_pos(span.lo());
+ let (end_line, end_col) = if span.hi() == span.lo() {
+ let (end_line, mut end_col) = (start_line, start_col);
+ // Extend an empty span by one character so the region will be counted.
+ let CharPos(char_pos) = start_col;
+ if span.hi() == body_span.hi() {
+ start_col = CharPos(char_pos - 1);
+ } else {
+ end_col = CharPos(char_pos + 1);
+ }
+ (end_line, end_col)
+ } else {
+ source_file.lookup_file_pos(span.hi())
+ };
+ let start_line = source_map.doctest_offset_line(&source_file.name, start_line);
+ let end_line = source_map.doctest_offset_line(&source_file.name, end_line);
+ CodeRegion {
+ file_name,
+ start_line: start_line as u32,
+ start_col: start_col.to_u32() + 1,
+ end_line: end_line as u32,
+ end_col: end_col.to_u32() + 1,
+ }
+}
+
+fn fn_sig_and_body<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> (Option<&'tcx rustc_hir::FnSig<'tcx>>, &'tcx rustc_hir::Body<'tcx>) {
+ // FIXME(#79625): Consider improving MIR to provide the information needed, to avoid going back
+ // to HIR for it.
+ let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local");
+ let fn_body_id = hir::map::associated_body(hir_node).expect("HIR node is a function with body");
+ (hir::map::fn_sig(hir_node), tcx.hir().body(fn_body_id))
+}
+
+fn get_body_span<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ hir_body: &rustc_hir::Body<'tcx>,
+ mir_body: &mut mir::Body<'tcx>,
+) -> Span {
+ let mut body_span = hir_body.value.span;
+ let def_id = mir_body.source.def_id();
+
+ if tcx.is_closure(def_id) {
+ // If the MIR function is a closure, and if the closure body span
+ // starts from a macro, but it's content is not in that macro, try
+ // to find a non-macro callsite, and instrument the spans there
+ // instead.
+ loop {
+ let expn_data = body_span.ctxt().outer_expn_data();
+ if expn_data.is_root() {
+ break;
+ }
+ if let ExpnKind::Macro { .. } = expn_data.kind {
+ body_span = expn_data.call_site;
+ } else {
+ break;
+ }
+ }
+ }
+
+ body_span
+}
+
+fn hash_mir_source<'tcx>(tcx: TyCtxt<'tcx>, hir_body: &'tcx rustc_hir::Body<'tcx>) -> u64 {
+ // FIXME(cjgillot) Stop hashing HIR manually here.
+ let owner = hir_body.id().hir_id.owner;
+ tcx.hir_owner_nodes(owner).unwrap().hash_including_bodies.to_smaller_hash()
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs
new file mode 100644
index 000000000..9d02f58ae
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/query.rs
@@ -0,0 +1,170 @@
+use super::*;
+
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::{self, Body, Coverage, CoverageInfo};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::def_id::DefId;
+
+/// A `query` provider for retrieving coverage information injected into MIR.
+pub(crate) fn provide(providers: &mut Providers) {
+ providers.coverageinfo = |tcx, def_id| coverageinfo(tcx, def_id);
+ providers.covered_code_regions = |tcx, def_id| covered_code_regions(tcx, def_id);
+}
+
+/// The `num_counters` argument to `llvm.instrprof.increment` is the max counter_id + 1, or in
+/// other words, the number of counter value references injected into the MIR (plus 1 for the
+/// reserved `ZERO` counter, which uses counter ID `0` when included in an expression). Injected
+/// counters have a counter ID from `1..num_counters-1`.
+///
+/// `num_expressions` is the number of counter expressions added to the MIR body.
+///
+/// Both `num_counters` and `num_expressions` are used to initialize new vectors, during backend
+/// code generate, to lookup counters and expressions by simple u32 indexes.
+///
+/// MIR optimization may split and duplicate some BasicBlock sequences, or optimize out some code
+/// including injected counters. (It is OK if some counters are optimized out, but those counters
+/// are still included in the total `num_counters` or `num_expressions`.) Simply counting the
+/// calls may not work; but computing the number of counters or expressions by adding `1` to the
+/// highest ID (for a given instrumented function) is valid.
+///
+/// This visitor runs twice, first with `add_missing_operands` set to `false`, to find the maximum
+/// counter ID and maximum expression ID based on their enum variant `id` fields; then, as a
+/// safeguard, with `add_missing_operands` set to `true`, to find any other counter or expression
+/// IDs referenced by expression operands, if not already seen.
+///
+/// Ideally, each operand ID in a MIR `CoverageKind::Expression` will have a separate MIR `Coverage`
+/// statement for the `Counter` or `Expression` with the referenced ID. but since current or future
+/// MIR optimizations can theoretically optimize out segments of a MIR, it may not be possible to
+/// guarantee this, so the second pass ensures the `CoverageInfo` counts include all referenced IDs.
+struct CoverageVisitor {
+ info: CoverageInfo,
+ add_missing_operands: bool,
+}
+
+impl CoverageVisitor {
+ /// Updates `num_counters` to the maximum encountered zero-based counter_id plus 1. Note the
+ /// final computed number of counters should be the number of all `CoverageKind::Counter`
+ /// statements in the MIR *plus one* for the implicit `ZERO` counter.
+ #[inline(always)]
+ fn update_num_counters(&mut self, counter_id: u32) {
+ self.info.num_counters = std::cmp::max(self.info.num_counters, counter_id + 1);
+ }
+
+ /// Computes an expression index for each expression ID, and updates `num_expressions` to the
+ /// maximum encountered index plus 1.
+ #[inline(always)]
+ fn update_num_expressions(&mut self, expression_id: u32) {
+ let expression_index = u32::MAX - expression_id;
+ self.info.num_expressions = std::cmp::max(self.info.num_expressions, expression_index + 1);
+ }
+
+ fn update_from_expression_operand(&mut self, operand_id: u32) {
+ if operand_id >= self.info.num_counters {
+ let operand_as_expression_index = u32::MAX - operand_id;
+ if operand_as_expression_index >= self.info.num_expressions {
+ // The operand ID is outside the known range of counter IDs and also outside the
+ // known range of expression IDs. In either case, the result of a missing operand
+ // (if and when used in an expression) will be zero, so from a computation
+ // perspective, it doesn't matter whether it is interpreted as a counter or an
+ // expression.
+ //
+ // However, the `num_counters` and `num_expressions` query results are used to
+ // allocate arrays when generating the coverage map (during codegen), so choose
+ // the type that grows either `num_counters` or `num_expressions` the least.
+ if operand_id - self.info.num_counters
+ < operand_as_expression_index - self.info.num_expressions
+ {
+ self.update_num_counters(operand_id)
+ } else {
+ self.update_num_expressions(operand_id)
+ }
+ }
+ }
+ }
+
+ fn visit_body(&mut self, body: &Body<'_>) {
+ for bb_data in body.basic_blocks().iter() {
+ for statement in bb_data.statements.iter() {
+ if let StatementKind::Coverage(box ref coverage) = statement.kind {
+ if is_inlined(body, statement) {
+ continue;
+ }
+ self.visit_coverage(coverage);
+ }
+ }
+ }
+ }
+
+ fn visit_coverage(&mut self, coverage: &Coverage) {
+ if self.add_missing_operands {
+ match coverage.kind {
+ CoverageKind::Expression { lhs, rhs, .. } => {
+ self.update_from_expression_operand(u32::from(lhs));
+ self.update_from_expression_operand(u32::from(rhs));
+ }
+ _ => {}
+ }
+ } else {
+ match coverage.kind {
+ CoverageKind::Counter { id, .. } => {
+ self.update_num_counters(u32::from(id));
+ }
+ CoverageKind::Expression { id, .. } => {
+ self.update_num_expressions(u32::from(id));
+ }
+ _ => {}
+ }
+ }
+ }
+}
+
+fn coverageinfo<'tcx>(tcx: TyCtxt<'tcx>, instance_def: ty::InstanceDef<'tcx>) -> CoverageInfo {
+ let mir_body = tcx.instance_mir(instance_def);
+
+ let mut coverage_visitor = CoverageVisitor {
+ // num_counters always has at least the `ZERO` counter.
+ info: CoverageInfo { num_counters: 1, num_expressions: 0 },
+ add_missing_operands: false,
+ };
+
+ coverage_visitor.visit_body(mir_body);
+
+ coverage_visitor.add_missing_operands = true;
+ coverage_visitor.visit_body(mir_body);
+
+ coverage_visitor.info
+}
+
+fn covered_code_regions<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Vec<&'tcx CodeRegion> {
+ let body = mir_body(tcx, def_id);
+ body.basic_blocks()
+ .iter()
+ .flat_map(|data| {
+ data.statements.iter().filter_map(|statement| match statement.kind {
+ StatementKind::Coverage(box ref coverage) => {
+ if is_inlined(body, statement) {
+ None
+ } else {
+ coverage.code_region.as_ref() // may be None
+ }
+ }
+ _ => None,
+ })
+ })
+ .collect()
+}
+
+fn is_inlined(body: &Body<'_>, statement: &Statement<'_>) -> bool {
+ let scope_data = &body.source_scopes[statement.source_info.scope];
+ scope_data.inlined.is_some() || scope_data.inlined_parent_scope.is_some()
+}
+
+/// This function ensures we obtain the correct MIR for the given item irrespective of
+/// whether that means const mir or runtime mir. For `const fn` this opts for runtime
+/// mir.
+fn mir_body<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx mir::Body<'tcx> {
+ let id = ty::WithOptConstParam::unknown(def_id);
+ let def = ty::InstanceDef::Item(id);
+ tcx.instance_mir(def)
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs
new file mode 100644
index 000000000..423e78317
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/spans.rs
@@ -0,0 +1,892 @@
+use super::debug::term_type;
+use super::graph::{BasicCoverageBlock, BasicCoverageBlockData, CoverageGraph, START_BCB};
+
+use itertools::Itertools;
+use rustc_data_structures::graph::WithNumNodes;
+use rustc_middle::mir::spanview::source_range_no_file;
+use rustc_middle::mir::{
+ self, AggregateKind, BasicBlock, FakeReadCause, Rvalue, Statement, StatementKind, Terminator,
+ TerminatorKind,
+};
+use rustc_middle::ty::TyCtxt;
+use rustc_span::source_map::original_sp;
+use rustc_span::{BytePos, ExpnKind, MacroKind, Span, Symbol};
+
+use std::cell::RefCell;
+use std::cmp::Ordering;
+
+#[derive(Debug, Copy, Clone)]
+pub(super) enum CoverageStatement {
+ Statement(BasicBlock, Span, usize),
+ Terminator(BasicBlock, Span),
+}
+
+impl CoverageStatement {
+ pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String {
+ match *self {
+ Self::Statement(bb, span, stmt_index) => {
+ let stmt = &mir_body[bb].statements[stmt_index];
+ format!(
+ "{}: @{}[{}]: {:?}",
+ source_range_no_file(tcx, span),
+ bb.index(),
+ stmt_index,
+ stmt
+ )
+ }
+ Self::Terminator(bb, span) => {
+ let term = mir_body[bb].terminator();
+ format!(
+ "{}: @{}.{}: {:?}",
+ source_range_no_file(tcx, span),
+ bb.index(),
+ term_type(&term.kind),
+ term.kind
+ )
+ }
+ }
+ }
+
+ pub fn span(&self) -> Span {
+ match self {
+ Self::Statement(_, span, _) | Self::Terminator(_, span) => *span,
+ }
+ }
+}
+
+/// A BCB is deconstructed into one or more `Span`s. Each `Span` maps to a `CoverageSpan` that
+/// references the originating BCB and one or more MIR `Statement`s and/or `Terminator`s.
+/// Initially, the `Span`s come from the `Statement`s and `Terminator`s, but subsequent
+/// transforms can combine adjacent `Span`s and `CoverageSpan` from the same BCB, merging the
+/// `CoverageStatement` vectors, and the `Span`s to cover the extent of the combined `Span`s.
+///
+/// Note: A `CoverageStatement` merged into another CoverageSpan may come from a `BasicBlock` that
+/// is not part of the `CoverageSpan` bcb if the statement was included because it's `Span` matches
+/// or is subsumed by the `Span` associated with this `CoverageSpan`, and it's `BasicBlock`
+/// `is_dominated_by()` the `BasicBlock`s in this `CoverageSpan`.
+#[derive(Debug, Clone)]
+pub(super) struct CoverageSpan {
+ pub span: Span,
+ pub expn_span: Span,
+ pub current_macro_or_none: RefCell<Option<Option<Symbol>>>,
+ pub bcb: BasicCoverageBlock,
+ pub coverage_statements: Vec<CoverageStatement>,
+ pub is_closure: bool,
+}
+
+impl CoverageSpan {
+ pub fn for_fn_sig(fn_sig_span: Span) -> Self {
+ Self {
+ span: fn_sig_span,
+ expn_span: fn_sig_span,
+ current_macro_or_none: Default::default(),
+ bcb: START_BCB,
+ coverage_statements: vec![],
+ is_closure: false,
+ }
+ }
+
+ pub fn for_statement(
+ statement: &Statement<'_>,
+ span: Span,
+ expn_span: Span,
+ bcb: BasicCoverageBlock,
+ bb: BasicBlock,
+ stmt_index: usize,
+ ) -> Self {
+ let is_closure = match statement.kind {
+ StatementKind::Assign(box (_, Rvalue::Aggregate(box ref kind, _))) => {
+ matches!(kind, AggregateKind::Closure(_, _) | AggregateKind::Generator(_, _, _))
+ }
+ _ => false,
+ };
+
+ Self {
+ span,
+ expn_span,
+ current_macro_or_none: Default::default(),
+ bcb,
+ coverage_statements: vec![CoverageStatement::Statement(bb, span, stmt_index)],
+ is_closure,
+ }
+ }
+
+ pub fn for_terminator(
+ span: Span,
+ expn_span: Span,
+ bcb: BasicCoverageBlock,
+ bb: BasicBlock,
+ ) -> Self {
+ Self {
+ span,
+ expn_span,
+ current_macro_or_none: Default::default(),
+ bcb,
+ coverage_statements: vec![CoverageStatement::Terminator(bb, span)],
+ is_closure: false,
+ }
+ }
+
+ pub fn merge_from(&mut self, mut other: CoverageSpan) {
+ debug_assert!(self.is_mergeable(&other));
+ self.span = self.span.to(other.span);
+ self.coverage_statements.append(&mut other.coverage_statements);
+ }
+
+ pub fn cutoff_statements_at(&mut self, cutoff_pos: BytePos) {
+ self.coverage_statements.retain(|covstmt| covstmt.span().hi() <= cutoff_pos);
+ if let Some(highest_covstmt) =
+ self.coverage_statements.iter().max_by_key(|covstmt| covstmt.span().hi())
+ {
+ self.span = self.span.with_hi(highest_covstmt.span().hi());
+ }
+ }
+
+ #[inline]
+ pub fn is_mergeable(&self, other: &Self) -> bool {
+ self.is_in_same_bcb(other) && !(self.is_closure || other.is_closure)
+ }
+
+ #[inline]
+ pub fn is_in_same_bcb(&self, other: &Self) -> bool {
+ self.bcb == other.bcb
+ }
+
+ pub fn format<'tcx>(&self, tcx: TyCtxt<'tcx>, mir_body: &mir::Body<'tcx>) -> String {
+ format!(
+ "{}\n {}",
+ source_range_no_file(tcx, self.span),
+ self.format_coverage_statements(tcx, mir_body).replace('\n', "\n "),
+ )
+ }
+
+ pub fn format_coverage_statements<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ mir_body: &mir::Body<'tcx>,
+ ) -> String {
+ let mut sorted_coverage_statements = self.coverage_statements.clone();
+ sorted_coverage_statements.sort_unstable_by_key(|covstmt| match *covstmt {
+ CoverageStatement::Statement(bb, _, index) => (bb, index),
+ CoverageStatement::Terminator(bb, _) => (bb, usize::MAX),
+ });
+ sorted_coverage_statements.iter().map(|covstmt| covstmt.format(tcx, mir_body)).join("\n")
+ }
+
+ /// If the span is part of a macro, returns the macro name symbol.
+ pub fn current_macro(&self) -> Option<Symbol> {
+ self.current_macro_or_none
+ .borrow_mut()
+ .get_or_insert_with(|| {
+ if let ExpnKind::Macro(MacroKind::Bang, current_macro) =
+ self.expn_span.ctxt().outer_expn_data().kind
+ {
+ return Some(current_macro);
+ }
+ None
+ })
+ .map(|symbol| symbol)
+ }
+
+ /// If the span is part of a macro, and the macro is visible (expands directly to the given
+ /// body_span), returns the macro name symbol.
+ pub fn visible_macro(&self, body_span: Span) -> Option<Symbol> {
+ if let Some(current_macro) = self.current_macro() && self
+ .expn_span
+ .parent_callsite()
+ .unwrap_or_else(|| bug!("macro must have a parent"))
+ .eq_ctxt(body_span)
+ {
+ return Some(current_macro);
+ }
+ None
+ }
+
+ pub fn is_macro_expansion(&self) -> bool {
+ self.current_macro().is_some()
+ }
+}
+
+/// Converts the initial set of `CoverageSpan`s (one per MIR `Statement` or `Terminator`) into a
+/// minimal set of `CoverageSpan`s, using the BCB CFG to determine where it is safe and useful to:
+///
+/// * Remove duplicate source code coverage regions
+/// * Merge spans that represent continuous (both in source code and control flow), non-branching
+/// execution
+/// * Carve out (leave uncovered) any span that will be counted by another MIR (notably, closures)
+pub struct CoverageSpans<'a, 'tcx> {
+ /// The MIR, used to look up `BasicBlockData`.
+ mir_body: &'a mir::Body<'tcx>,
+
+ /// A `Span` covering the signature of function for the MIR.
+ fn_sig_span: Span,
+
+ /// A `Span` covering the function body of the MIR (typically from left curly brace to right
+ /// curly brace).
+ body_span: Span,
+
+ /// The BasicCoverageBlock Control Flow Graph (BCB CFG).
+ basic_coverage_blocks: &'a CoverageGraph,
+
+ /// The initial set of `CoverageSpan`s, sorted by `Span` (`lo` and `hi`) and by relative
+ /// dominance between the `BasicCoverageBlock`s of equal `Span`s.
+ sorted_spans_iter: Option<std::vec::IntoIter<CoverageSpan>>,
+
+ /// The current `CoverageSpan` to compare to its `prev`, to possibly merge, discard, force the
+ /// discard of the `prev` (and or `pending_dups`), or keep both (with `prev` moved to
+ /// `pending_dups`). If `curr` is not discarded or merged, it becomes `prev` for the next
+ /// iteration.
+ some_curr: Option<CoverageSpan>,
+
+ /// The original `span` for `curr`, in case `curr.span()` is modified. The `curr_original_span`
+ /// **must not be mutated** (except when advancing to the next `curr`), even if `curr.span()`
+ /// is mutated.
+ curr_original_span: Span,
+
+ /// The CoverageSpan from a prior iteration; typically assigned from that iteration's `curr`.
+ /// If that `curr` was discarded, `prev` retains its value from the previous iteration.
+ some_prev: Option<CoverageSpan>,
+
+ /// Assigned from `curr_original_span` from the previous iteration. The `prev_original_span`
+ /// **must not be mutated** (except when advancing to the next `prev`), even if `prev.span()`
+ /// is mutated.
+ prev_original_span: Span,
+
+ /// A copy of the expn_span from the prior iteration.
+ prev_expn_span: Option<Span>,
+
+ /// One or more `CoverageSpan`s with the same `Span` but different `BasicCoverageBlock`s, and
+ /// no `BasicCoverageBlock` in this list dominates another `BasicCoverageBlock` in the list.
+ /// If a new `curr` span also fits this criteria (compared to an existing list of
+ /// `pending_dups`), that `curr` `CoverageSpan` moves to `prev` before possibly being added to
+ /// the `pending_dups` list, on the next iteration. As a result, if `prev` and `pending_dups`
+ /// have the same `Span`, the criteria for `pending_dups` holds for `prev` as well: a `prev`
+ /// with a matching `Span` does not dominate any `pending_dup` and no `pending_dup` dominates a
+ /// `prev` with a matching `Span`)
+ pending_dups: Vec<CoverageSpan>,
+
+ /// The final `CoverageSpan`s to add to the coverage map. A `Counter` or `Expression`
+ /// will also be injected into the MIR for each `CoverageSpan`.
+ refined_spans: Vec<CoverageSpan>,
+}
+
+impl<'a, 'tcx> CoverageSpans<'a, 'tcx> {
+ /// Generate a minimal set of `CoverageSpan`s, each representing a contiguous code region to be
+ /// counted.
+ ///
+ /// The basic steps are:
+ ///
+ /// 1. Extract an initial set of spans from the `Statement`s and `Terminator`s of each
+ /// `BasicCoverageBlockData`.
+ /// 2. Sort the spans by span.lo() (starting position). Spans that start at the same position
+ /// are sorted with longer spans before shorter spans; and equal spans are sorted
+ /// (deterministically) based on "dominator" relationship (if any).
+ /// 3. Traverse the spans in sorted order to identify spans that can be dropped (for instance,
+ /// if another span or spans are already counting the same code region), or should be merged
+ /// into a broader combined span (because it represents a contiguous, non-branching, and
+ /// uninterrupted region of source code).
+ ///
+ /// Closures are exposed in their enclosing functions as `Assign` `Rvalue`s, and since
+ /// closures have their own MIR, their `Span` in their enclosing function should be left
+ /// "uncovered".
+ ///
+ /// Note the resulting vector of `CoverageSpan`s may not be fully sorted (and does not need
+ /// to be).
+ pub(super) fn generate_coverage_spans(
+ mir_body: &'a mir::Body<'tcx>,
+ fn_sig_span: Span, // Ensured to be same SourceFile and SyntaxContext as `body_span`
+ body_span: Span,
+ basic_coverage_blocks: &'a CoverageGraph,
+ ) -> Vec<CoverageSpan> {
+ let mut coverage_spans = CoverageSpans {
+ mir_body,
+ fn_sig_span,
+ body_span,
+ basic_coverage_blocks,
+ sorted_spans_iter: None,
+ refined_spans: Vec::with_capacity(basic_coverage_blocks.num_nodes() * 2),
+ some_curr: None,
+ curr_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
+ some_prev: None,
+ prev_original_span: Span::with_root_ctxt(BytePos(0), BytePos(0)),
+ prev_expn_span: None,
+ pending_dups: Vec::new(),
+ };
+
+ let sorted_spans = coverage_spans.mir_to_initial_sorted_coverage_spans();
+
+ coverage_spans.sorted_spans_iter = Some(sorted_spans.into_iter());
+
+ coverage_spans.to_refined_spans()
+ }
+
+ fn mir_to_initial_sorted_coverage_spans(&self) -> Vec<CoverageSpan> {
+ let mut initial_spans =
+ Vec::<CoverageSpan>::with_capacity(self.mir_body.basic_blocks.len() * 2);
+ for (bcb, bcb_data) in self.basic_coverage_blocks.iter_enumerated() {
+ initial_spans.extend(self.bcb_to_initial_coverage_spans(bcb, bcb_data));
+ }
+
+ if initial_spans.is_empty() {
+ // This can happen if, for example, the function is unreachable (contains only a
+ // `BasicBlock`(s) with an `Unreachable` terminator).
+ return initial_spans;
+ }
+
+ initial_spans.push(CoverageSpan::for_fn_sig(self.fn_sig_span));
+
+ initial_spans.sort_unstable_by(|a, b| {
+ if a.span.lo() == b.span.lo() {
+ if a.span.hi() == b.span.hi() {
+ if a.is_in_same_bcb(b) {
+ Some(Ordering::Equal)
+ } else {
+ // Sort equal spans by dominator relationship, in reverse order (so
+ // dominators always come after the dominated equal spans). When later
+ // comparing two spans in order, the first will either dominate the second,
+ // or they will have no dominator relationship.
+ self.basic_coverage_blocks.dominators().rank_partial_cmp(b.bcb, a.bcb)
+ }
+ } else {
+ // Sort hi() in reverse order so shorter spans are attempted after longer spans.
+ // This guarantees that, if a `prev` span overlaps, and is not equal to, a
+ // `curr` span, the prev span either extends further left of the curr span, or
+ // they start at the same position and the prev span extends further right of
+ // the end of the curr span.
+ b.span.hi().partial_cmp(&a.span.hi())
+ }
+ } else {
+ a.span.lo().partial_cmp(&b.span.lo())
+ }
+ .unwrap()
+ });
+
+ initial_spans
+ }
+
+ /// Iterate through the sorted `CoverageSpan`s, and return the refined list of merged and
+ /// de-duplicated `CoverageSpan`s.
+ fn to_refined_spans(mut self) -> Vec<CoverageSpan> {
+ while self.next_coverage_span() {
+ if self.some_prev.is_none() {
+ debug!(" initial span");
+ self.check_invoked_macro_name_span();
+ } else if self.curr().is_mergeable(self.prev()) {
+ debug!(" same bcb (and neither is a closure), merge with prev={:?}", self.prev());
+ let prev = self.take_prev();
+ self.curr_mut().merge_from(prev);
+ self.check_invoked_macro_name_span();
+ // Note that curr.span may now differ from curr_original_span
+ } else if self.prev_ends_before_curr() {
+ debug!(
+ " different bcbs and disjoint spans, so keep curr for next iter, and add \
+ prev={:?}",
+ self.prev()
+ );
+ let prev = self.take_prev();
+ self.push_refined_span(prev);
+ self.check_invoked_macro_name_span();
+ } else if self.prev().is_closure {
+ // drop any equal or overlapping span (`curr`) and keep `prev` to test again in the
+ // next iter
+ debug!(
+ " curr overlaps a closure (prev). Drop curr and keep prev for next iter. \
+ prev={:?}",
+ self.prev()
+ );
+ self.take_curr();
+ } else if self.curr().is_closure {
+ self.carve_out_span_for_closure();
+ } else if self.prev_original_span == self.curr().span {
+ // Note that this compares the new (`curr`) span to `prev_original_span`.
+ // In this branch, the actual span byte range of `prev_original_span` is not
+ // important. What is important is knowing whether the new `curr` span was
+ // **originally** the same as the original span of `prev()`. The original spans
+ // reflect their original sort order, and for equal spans, conveys a partial
+ // ordering based on CFG dominator priority.
+ if self.prev().is_macro_expansion() && self.curr().is_macro_expansion() {
+ // Macros that expand to include branching (such as
+ // `assert_eq!()`, `assert_ne!()`, `info!()`, `debug!()`, or
+ // `trace!()) typically generate callee spans with identical
+ // ranges (typically the full span of the macro) for all
+ // `BasicBlocks`. This makes it impossible to distinguish
+ // the condition (`if val1 != val2`) from the optional
+ // branched statements (such as the call to `panic!()` on
+ // assert failure). In this case it is better (or less
+ // worse) to drop the optional branch bcbs and keep the
+ // non-conditional statements, to count when reached.
+ debug!(
+ " curr and prev are part of a macro expansion, and curr has the same span \
+ as prev, but is in a different bcb. Drop curr and keep prev for next iter. \
+ prev={:?}",
+ self.prev()
+ );
+ self.take_curr();
+ } else {
+ self.hold_pending_dups_unless_dominated();
+ }
+ } else {
+ self.cutoff_prev_at_overlapping_curr();
+ self.check_invoked_macro_name_span();
+ }
+ }
+
+ debug!(" AT END, adding last prev={:?}", self.prev());
+ let prev = self.take_prev();
+ let pending_dups = self.pending_dups.split_off(0);
+ for dup in pending_dups {
+ debug!(" ...adding at least one pending dup={:?}", dup);
+ self.push_refined_span(dup);
+ }
+
+ // Async functions wrap a closure that implements the body to be executed. The enclosing
+ // function is called and returns an `impl Future` without initially executing any of the
+ // body. To avoid showing the return from the enclosing function as a "covered" return from
+ // the closure, the enclosing function's `TerminatorKind::Return`s `CoverageSpan` is
+ // excluded. The closure's `Return` is the only one that will be counted. This provides
+ // adequate coverage, and more intuitive counts. (Avoids double-counting the closing brace
+ // of the function body.)
+ let body_ends_with_closure = if let Some(last_covspan) = self.refined_spans.last() {
+ last_covspan.is_closure && last_covspan.span.hi() == self.body_span.hi()
+ } else {
+ false
+ };
+
+ if !body_ends_with_closure {
+ self.push_refined_span(prev);
+ }
+
+ // Remove `CoverageSpan`s derived from closures, originally added to ensure the coverage
+ // regions for the current function leave room for the closure's own coverage regions
+ // (injected separately, from the closure's own MIR).
+ self.refined_spans.retain(|covspan| !covspan.is_closure);
+ self.refined_spans
+ }
+
+ fn push_refined_span(&mut self, covspan: CoverageSpan) {
+ let len = self.refined_spans.len();
+ if len > 0 {
+ let last = &mut self.refined_spans[len - 1];
+ if last.is_mergeable(&covspan) {
+ debug!(
+ "merging new refined span with last refined span, last={:?}, covspan={:?}",
+ last, covspan
+ );
+ last.merge_from(covspan);
+ return;
+ }
+ }
+ self.refined_spans.push(covspan)
+ }
+
+ fn check_invoked_macro_name_span(&mut self) {
+ if let Some(visible_macro) = self.curr().visible_macro(self.body_span) {
+ if self.prev_expn_span.map_or(true, |prev_expn_span| {
+ self.curr().expn_span.ctxt() != prev_expn_span.ctxt()
+ }) {
+ let merged_prefix_len = self.curr_original_span.lo() - self.curr().span.lo();
+ let after_macro_bang =
+ merged_prefix_len + BytePos(visible_macro.as_str().len() as u32 + 1);
+ let mut macro_name_cov = self.curr().clone();
+ self.curr_mut().span =
+ self.curr().span.with_lo(self.curr().span.lo() + after_macro_bang);
+ macro_name_cov.span =
+ macro_name_cov.span.with_hi(macro_name_cov.span.lo() + after_macro_bang);
+ debug!(
+ " and curr starts a new macro expansion, so add a new span just for \
+ the macro `{}!`, new span={:?}",
+ visible_macro, macro_name_cov
+ );
+ self.push_refined_span(macro_name_cov);
+ }
+ }
+ }
+
+ // Generate a set of `CoverageSpan`s from the filtered set of `Statement`s and `Terminator`s of
+ // the `BasicBlock`(s) in the given `BasicCoverageBlockData`. One `CoverageSpan` is generated
+ // for each `Statement` and `Terminator`. (Note that subsequent stages of coverage analysis will
+ // merge some `CoverageSpan`s, at which point a `CoverageSpan` may represent multiple
+ // `Statement`s and/or `Terminator`s.)
+ fn bcb_to_initial_coverage_spans(
+ &self,
+ bcb: BasicCoverageBlock,
+ bcb_data: &'a BasicCoverageBlockData,
+ ) -> Vec<CoverageSpan> {
+ bcb_data
+ .basic_blocks
+ .iter()
+ .flat_map(|&bb| {
+ let data = &self.mir_body[bb];
+ data.statements
+ .iter()
+ .enumerate()
+ .filter_map(move |(index, statement)| {
+ filtered_statement_span(statement).map(|span| {
+ CoverageSpan::for_statement(
+ statement,
+ function_source_span(span, self.body_span),
+ span,
+ bcb,
+ bb,
+ index,
+ )
+ })
+ })
+ .chain(filtered_terminator_span(data.terminator()).map(|span| {
+ CoverageSpan::for_terminator(
+ function_source_span(span, self.body_span),
+ span,
+ bcb,
+ bb,
+ )
+ }))
+ })
+ .collect()
+ }
+
+ fn curr(&self) -> &CoverageSpan {
+ self.some_curr
+ .as_ref()
+ .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+ }
+
+ fn curr_mut(&mut self) -> &mut CoverageSpan {
+ self.some_curr
+ .as_mut()
+ .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+ }
+
+ fn prev(&self) -> &CoverageSpan {
+ self.some_prev
+ .as_ref()
+ .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+ }
+
+ fn prev_mut(&mut self) -> &mut CoverageSpan {
+ self.some_prev
+ .as_mut()
+ .unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+ }
+
+ fn take_prev(&mut self) -> CoverageSpan {
+ self.some_prev.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_prev"))
+ }
+
+ /// If there are `pending_dups` but `prev` is not a matching dup (`prev.span` doesn't match the
+ /// `pending_dups` spans), then one of the following two things happened during the previous
+ /// iteration:
+ /// * the previous `curr` span (which is now `prev`) was not a duplicate of the pending_dups
+ /// (in which case there should be at least two spans in `pending_dups`); or
+ /// * the `span` of `prev` was modified by `curr_mut().merge_from(prev)` (in which case
+ /// `pending_dups` could have as few as one span)
+ /// In either case, no more spans will match the span of `pending_dups`, so
+ /// add the `pending_dups` if they don't overlap `curr`, and clear the list.
+ fn check_pending_dups(&mut self) {
+ if let Some(dup) = self.pending_dups.last() && dup.span != self.prev().span {
+ debug!(
+ " SAME spans, but pending_dups are NOT THE SAME, so BCBs matched on \
+ previous iteration, or prev started a new disjoint span"
+ );
+ if dup.span.hi() <= self.curr().span.lo() {
+ let pending_dups = self.pending_dups.split_off(0);
+ for dup in pending_dups.into_iter() {
+ debug!(" ...adding at least one pending={:?}", dup);
+ self.push_refined_span(dup);
+ }
+ } else {
+ self.pending_dups.clear();
+ }
+ }
+ }
+
+ /// Advance `prev` to `curr` (if any), and `curr` to the next `CoverageSpan` in sorted order.
+ fn next_coverage_span(&mut self) -> bool {
+ if let Some(curr) = self.some_curr.take() {
+ self.prev_expn_span = Some(curr.expn_span);
+ self.some_prev = Some(curr);
+ self.prev_original_span = self.curr_original_span;
+ }
+ while let Some(curr) = self.sorted_spans_iter.as_mut().unwrap().next() {
+ debug!("FOR curr={:?}", curr);
+ if self.some_prev.is_some() && self.prev_starts_after_next(&curr) {
+ debug!(
+ " prev.span starts after curr.span, so curr will be dropped (skipping past \
+ closure?); prev={:?}",
+ self.prev()
+ );
+ } else {
+ // Save a copy of the original span for `curr` in case the `CoverageSpan` is changed
+ // by `self.curr_mut().merge_from(prev)`.
+ self.curr_original_span = curr.span;
+ self.some_curr.replace(curr);
+ self.check_pending_dups();
+ return true;
+ }
+ }
+ false
+ }
+
+ /// If called, then the next call to `next_coverage_span()` will *not* update `prev` with the
+ /// `curr` coverage span.
+ fn take_curr(&mut self) -> CoverageSpan {
+ self.some_curr.take().unwrap_or_else(|| bug!("invalid attempt to unwrap a None some_curr"))
+ }
+
+ /// Returns true if the curr span should be skipped because prev has already advanced beyond the
+ /// end of curr. This can only happen if a prior iteration updated `prev` to skip past a region
+ /// of code, such as skipping past a closure.
+ fn prev_starts_after_next(&self, next_curr: &CoverageSpan) -> bool {
+ self.prev().span.lo() > next_curr.span.lo()
+ }
+
+ /// Returns true if the curr span starts past the end of the prev span, which means they don't
+ /// overlap, so we now know the prev can be added to the refined coverage spans.
+ fn prev_ends_before_curr(&self) -> bool {
+ self.prev().span.hi() <= self.curr().span.lo()
+ }
+
+ /// If `prev`s span extends left of the closure (`curr`), carve out the closure's span from
+ /// `prev`'s span. (The closure's coverage counters will be injected when processing the
+ /// closure's own MIR.) Add the portion of the span to the left of the closure; and if the span
+ /// extends to the right of the closure, update `prev` to that portion of the span. For any
+ /// `pending_dups`, repeat the same process.
+ fn carve_out_span_for_closure(&mut self) {
+ let curr_span = self.curr().span;
+ let left_cutoff = curr_span.lo();
+ let right_cutoff = curr_span.hi();
+ let has_pre_closure_span = self.prev().span.lo() < right_cutoff;
+ let has_post_closure_span = self.prev().span.hi() > right_cutoff;
+ let mut pending_dups = self.pending_dups.split_off(0);
+ if has_pre_closure_span {
+ let mut pre_closure = self.prev().clone();
+ pre_closure.span = pre_closure.span.with_hi(left_cutoff);
+ debug!(" prev overlaps a closure. Adding span for pre_closure={:?}", pre_closure);
+ if !pending_dups.is_empty() {
+ for mut dup in pending_dups.iter().cloned() {
+ dup.span = dup.span.with_hi(left_cutoff);
+ debug!(" ...and at least one pre_closure dup={:?}", dup);
+ self.push_refined_span(dup);
+ }
+ }
+ self.push_refined_span(pre_closure);
+ }
+ if has_post_closure_span {
+ // Mutate `prev.span()` to start after the closure (and discard curr).
+ // (**NEVER** update `prev_original_span` because it affects the assumptions
+ // about how the `CoverageSpan`s are ordered.)
+ self.prev_mut().span = self.prev().span.with_lo(right_cutoff);
+ debug!(" Mutated prev.span to start after the closure. prev={:?}", self.prev());
+ for dup in pending_dups.iter_mut() {
+ debug!(" ...and at least one overlapping dup={:?}", dup);
+ dup.span = dup.span.with_lo(right_cutoff);
+ }
+ self.pending_dups.append(&mut pending_dups);
+ let closure_covspan = self.take_curr();
+ self.push_refined_span(closure_covspan); // since self.prev() was already updated
+ } else {
+ pending_dups.clear();
+ }
+ }
+
+ /// Called if `curr.span` equals `prev_original_span` (and potentially equal to all
+ /// `pending_dups` spans, if any). Keep in mind, `prev.span()` may have been changed.
+ /// If prev.span() was merged into other spans (with matching BCB, for instance),
+ /// `prev.span.hi()` will be greater than (further right of) `prev_original_span.hi()`.
+ /// If prev.span() was split off to the right of a closure, prev.span().lo() will be
+ /// greater than prev_original_span.lo(). The actual span of `prev_original_span` is
+ /// not as important as knowing that `prev()` **used to have the same span** as `curr(),
+ /// which means their sort order is still meaningful for determining the dominator
+ /// relationship.
+ ///
+ /// When two `CoverageSpan`s have the same `Span`, dominated spans can be discarded; but if
+ /// neither `CoverageSpan` dominates the other, both (or possibly more than two) are held,
+ /// until their disposition is determined. In this latter case, the `prev` dup is moved into
+ /// `pending_dups` so the new `curr` dup can be moved to `prev` for the next iteration.
+ fn hold_pending_dups_unless_dominated(&mut self) {
+ // Equal coverage spans are ordered by dominators before dominated (if any), so it should be
+ // impossible for `curr` to dominate any previous `CoverageSpan`.
+ debug_assert!(!self.span_bcb_is_dominated_by(self.prev(), self.curr()));
+
+ let initial_pending_count = self.pending_dups.len();
+ if initial_pending_count > 0 {
+ let mut pending_dups = self.pending_dups.split_off(0);
+ pending_dups.retain(|dup| !self.span_bcb_is_dominated_by(self.curr(), dup));
+ self.pending_dups.append(&mut pending_dups);
+ if self.pending_dups.len() < initial_pending_count {
+ debug!(
+ " discarded {} of {} pending_dups that dominated curr",
+ initial_pending_count - self.pending_dups.len(),
+ initial_pending_count
+ );
+ }
+ }
+
+ if self.span_bcb_is_dominated_by(self.curr(), self.prev()) {
+ debug!(
+ " different bcbs but SAME spans, and prev dominates curr. Discard prev={:?}",
+ self.prev()
+ );
+ self.cutoff_prev_at_overlapping_curr();
+ // If one span dominates the other, associate the span with the code from the dominated
+ // block only (`curr`), and discard the overlapping portion of the `prev` span. (Note
+ // that if `prev.span` is wider than `prev_original_span`, a `CoverageSpan` will still
+ // be created for `prev`s block, for the non-overlapping portion, left of `curr.span`.)
+ //
+ // For example:
+ // match somenum {
+ // x if x < 1 => { ... }
+ // }...
+ //
+ // The span for the first `x` is referenced by both the pattern block (every time it is
+ // evaluated) and the arm code (only when matched). The counter will be applied only to
+ // the dominated block. This allows coverage to track and highlight things like the
+ // assignment of `x` above, if the branch is matched, making `x` available to the arm
+ // code; and to track and highlight the question mark `?` "try" operator at the end of
+ // a function call returning a `Result`, so the `?` is covered when the function returns
+ // an `Err`, and not counted as covered if the function always returns `Ok`.
+ } else {
+ // Save `prev` in `pending_dups`. (`curr` will become `prev` in the next iteration.)
+ // If the `curr` CoverageSpan is later discarded, `pending_dups` can be discarded as
+ // well; but if `curr` is added to refined_spans, the `pending_dups` will also be added.
+ debug!(
+ " different bcbs but SAME spans, and neither dominates, so keep curr for \
+ next iter, and, pending upcoming spans (unless overlapping) add prev={:?}",
+ self.prev()
+ );
+ let prev = self.take_prev();
+ self.pending_dups.push(prev);
+ }
+ }
+
+ /// `curr` overlaps `prev`. If `prev`s span extends left of `curr`s span, keep _only_
+ /// statements that end before `curr.lo()` (if any), and add the portion of the
+ /// combined span for those statements. Any other statements have overlapping spans
+ /// that can be ignored because `curr` and/or other upcoming statements/spans inside
+ /// the overlap area will produce their own counters. This disambiguation process
+ /// avoids injecting multiple counters for overlapping spans, and the potential for
+ /// double-counting.
+ fn cutoff_prev_at_overlapping_curr(&mut self) {
+ debug!(
+ " different bcbs, overlapping spans, so ignore/drop pending and only add prev \
+ if it has statements that end before curr; prev={:?}",
+ self.prev()
+ );
+ if self.pending_dups.is_empty() {
+ let curr_span = self.curr().span;
+ self.prev_mut().cutoff_statements_at(curr_span.lo());
+ if self.prev().coverage_statements.is_empty() {
+ debug!(" ... no non-overlapping statements to add");
+ } else {
+ debug!(" ... adding modified prev={:?}", self.prev());
+ let prev = self.take_prev();
+ self.push_refined_span(prev);
+ }
+ } else {
+ // with `pending_dups`, `prev` cannot have any statements that don't overlap
+ self.pending_dups.clear();
+ }
+ }
+
+ fn span_bcb_is_dominated_by(&self, covspan: &CoverageSpan, dom_covspan: &CoverageSpan) -> bool {
+ self.basic_coverage_blocks.is_dominated_by(covspan.bcb, dom_covspan.bcb)
+ }
+}
+
+/// If the MIR `Statement` has a span contributive to computing coverage spans,
+/// return it; otherwise return `None`.
+pub(super) fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> {
+ match statement.kind {
+ // These statements have spans that are often outside the scope of the executed source code
+ // for their parent `BasicBlock`.
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ // Coverage should not be encountered, but don't inject coverage coverage
+ | StatementKind::Coverage(_)
+ // Ignore `Nop`s
+ | StatementKind::Nop => None,
+
+ // FIXME(#78546): MIR InstrumentCoverage - Can the source_info.span for `FakeRead`
+ // statements be more consistent?
+ //
+ // FakeReadCause::ForGuardBinding, in this example:
+ // match somenum {
+ // x if x < 1 => { ... }
+ // }...
+ // The BasicBlock within the match arm code included one of these statements, but the span
+ // for it covered the `1` in this source. The actual statements have nothing to do with that
+ // source span:
+ // FakeRead(ForGuardBinding, _4);
+ // where `_4` is:
+ // _4 = &_1; (at the span for the first `x`)
+ // and `_1` is the `Place` for `somenum`.
+ //
+ // If and when the Issue is resolved, remove this special case match pattern:
+ StatementKind::FakeRead(box (cause, _)) if cause == FakeReadCause::ForGuardBinding => None,
+
+ // Retain spans from all other statements
+ StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding`
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Assign(_)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::Retag(_, _)
+ | StatementKind::AscribeUserType(_, _) => {
+ Some(statement.source_info.span)
+ }
+ }
+}
+
+/// If the MIR `Terminator` has a span contributive to computing coverage spans,
+/// return it; otherwise return `None`.
+pub(super) fn filtered_terminator_span(terminator: &Terminator<'_>) -> Option<Span> {
+ match terminator.kind {
+ // These terminators have spans that don't positively contribute to computing a reasonable
+ // span of actually executed source code. (For example, SwitchInt terminators extracted from
+ // an `if condition { block }` has a span that includes the executed block, if true,
+ // but for coverage, the code region executed, up to *and* through the SwitchInt,
+ // actually stops before the if's block.)
+ TerminatorKind::Unreachable // Unreachable blocks are not connected to the MIR CFG
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::SwitchInt { .. }
+ // For `FalseEdge`, only the `real` branch is taken, so it is similar to a `Goto`.
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::Goto { .. } => None,
+
+ // Call `func` operand can have a more specific span when part of a chain of calls
+ | TerminatorKind::Call { ref func, .. } => {
+ let mut span = terminator.source_info.span;
+ if let mir::Operand::Constant(box constant) = func {
+ if constant.span.lo() > span.lo() {
+ span = span.with_lo(constant.span.lo());
+ }
+ }
+ Some(span)
+ }
+
+ // Retain spans from all other terminators
+ TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::InlineAsm { .. } => {
+ Some(terminator.source_info.span)
+ }
+ }
+}
+
+/// Returns an extrapolated span (pre-expansion[^1]) corresponding to a range
+/// within the function's body source. This span is guaranteed to be contained
+/// within, or equal to, the `body_span`. If the extrapolated span is not
+/// contained within the `body_span`, the `body_span` is returned.
+///
+/// [^1]Expansions result from Rust syntax including macros, syntactic sugar,
+/// etc.).
+#[inline]
+pub(super) fn function_source_span(span: Span, body_span: Span) -> Span {
+ let original_span = original_sp(span, body_span).with_ctxt(body_span.ctxt());
+ if body_span.contains(original_span) { original_span } else { body_span }
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml b/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
new file mode 100644
index 000000000..f5e8b6565
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/test_macros/Cargo.toml
@@ -0,0 +1,8 @@
+[package]
+name = "coverage_test_macros"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+proc-macro = true
+doctest = false
diff --git a/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
new file mode 100644
index 000000000..3d6095d27
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/test_macros/src/lib.rs
@@ -0,0 +1,6 @@
+use proc_macro::TokenStream;
+
+#[proc_macro]
+pub fn let_bcb(item: TokenStream) -> TokenStream {
+ format!("let bcb{} = graph::BasicCoverageBlock::from_usize({});", item, item).parse().unwrap()
+}
diff --git a/compiler/rustc_mir_transform/src/coverage/tests.rs b/compiler/rustc_mir_transform/src/coverage/tests.rs
new file mode 100644
index 000000000..6380f0352
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/coverage/tests.rs
@@ -0,0 +1,710 @@
+//! This crate hosts a selection of "unit tests" for components of the `InstrumentCoverage` MIR
+//! pass.
+//!
+//! ```shell
+//! ./x.py test --keep-stage 1 compiler/rustc_mir --test-args '--show-output coverage'
+//! ```
+//!
+//! The tests construct a few "mock" objects, as needed, to support the `InstrumentCoverage`
+//! functions and algorithms. Mocked objects include instances of `mir::Body`; including
+//! `Terminator`s of various `kind`s, and `Span` objects. Some functions used by or used on
+//! real, runtime versions of these mocked-up objects have constraints (such as cross-thread
+//! limitations) and deep dependencies on other elements of the full Rust compiler (which is
+//! *not* constructed or mocked for these tests).
+//!
+//! Of particular note, attempting to simply print elements of the `mir::Body` with default
+//! `Debug` formatting can fail because some `Debug` format implementations require the
+//! `TyCtxt`, obtained via a static global variable that is *not* set for these tests.
+//! Initializing the global type context is prohibitively complex for the scope and scale of these
+//! tests (essentially requiring initializing the entire compiler).
+//!
+//! Also note, some basic features of `Span` also rely on the `Span`s own "session globals", which
+//! are unrelated to the `TyCtxt` global. Without initializing the `Span` session globals, some
+//! basic, coverage-specific features would be impossible to test, but thankfully initializing these
+//! globals is comparatively simpler. The easiest way is to wrap the test in a closure argument
+//! to: `rustc_span::create_default_session_globals_then(|| { test_here(); })`.
+
+use super::counters;
+use super::debug;
+use super::graph;
+use super::spans;
+
+use coverage_test_macros::let_bcb;
+
+use itertools::Itertools;
+use rustc_data_structures::graph::WithNumNodes;
+use rustc_data_structures::graph::WithSuccessors;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::coverage::CoverageKind;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::{self, BytePos, Pos, Span, DUMMY_SP};
+
+// All `TEMP_BLOCK` targets should be replaced before calling `to_body() -> mir::Body`.
+const TEMP_BLOCK: BasicBlock = BasicBlock::MAX;
+
+struct MockBlocks<'tcx> {
+ blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ dummy_place: Place<'tcx>,
+ next_local: usize,
+ bool_ty: Ty<'tcx>,
+}
+
+impl<'tcx> MockBlocks<'tcx> {
+ fn new() -> Self {
+ Self {
+ blocks: IndexVec::new(),
+ dummy_place: Place { local: RETURN_PLACE, projection: ty::List::empty() },
+ next_local: 0,
+ bool_ty: TyCtxt::BOOL_TY_FOR_UNIT_TESTING,
+ }
+ }
+
+ fn new_temp(&mut self) -> Local {
+ let index = self.next_local;
+ self.next_local += 1;
+ Local::new(index)
+ }
+
+ fn push(&mut self, kind: TerminatorKind<'tcx>) -> BasicBlock {
+ let next_lo = if let Some(last) = self.blocks.last() {
+ self.blocks[last].terminator().source_info.span.hi()
+ } else {
+ BytePos(1)
+ };
+ let next_hi = next_lo + BytePos(1);
+ self.blocks.push(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(Span::with_root_ctxt(next_lo, next_hi)),
+ kind,
+ }),
+ is_cleanup: false,
+ })
+ }
+
+ fn link(&mut self, from_block: BasicBlock, to_block: BasicBlock) {
+ match self.blocks[from_block].terminator_mut().kind {
+ TerminatorKind::Assert { ref mut target, .. }
+ | TerminatorKind::Call { target: Some(ref mut target), .. }
+ | TerminatorKind::Drop { ref mut target, .. }
+ | TerminatorKind::DropAndReplace { ref mut target, .. }
+ | TerminatorKind::FalseEdge { real_target: ref mut target, .. }
+ | TerminatorKind::FalseUnwind { real_target: ref mut target, .. }
+ | TerminatorKind::Goto { ref mut target }
+ | TerminatorKind::InlineAsm { destination: Some(ref mut target), .. }
+ | TerminatorKind::Yield { resume: ref mut target, .. } => *target = to_block,
+ ref invalid => bug!("Invalid from_block: {:?}", invalid),
+ }
+ }
+
+ fn add_block_from(
+ &mut self,
+ some_from_block: Option<BasicBlock>,
+ to_kind: TerminatorKind<'tcx>,
+ ) -> BasicBlock {
+ let new_block = self.push(to_kind);
+ if let Some(from_block) = some_from_block {
+ self.link(from_block, new_block);
+ }
+ new_block
+ }
+
+ fn set_branch(&mut self, switchint: BasicBlock, branch_index: usize, to_block: BasicBlock) {
+ match self.blocks[switchint].terminator_mut().kind {
+ TerminatorKind::SwitchInt { ref mut targets, .. } => {
+ let mut branches = targets.iter().collect::<Vec<_>>();
+ let otherwise = if branch_index == branches.len() {
+ to_block
+ } else {
+ let old_otherwise = targets.otherwise();
+ if branch_index > branches.len() {
+ branches.push((branches.len() as u128, old_otherwise));
+ while branches.len() < branch_index {
+ branches.push((branches.len() as u128, TEMP_BLOCK));
+ }
+ to_block
+ } else {
+ branches[branch_index] = (branch_index as u128, to_block);
+ old_otherwise
+ }
+ };
+ *targets = SwitchTargets::new(branches.into_iter(), otherwise);
+ }
+ ref invalid => bug!("Invalid BasicBlock kind or no to_block: {:?}", invalid),
+ }
+ }
+
+ fn call(&mut self, some_from_block: Option<BasicBlock>) -> BasicBlock {
+ self.add_block_from(
+ some_from_block,
+ TerminatorKind::Call {
+ func: Operand::Copy(self.dummy_place.clone()),
+ args: vec![],
+ destination: self.dummy_place.clone(),
+ target: Some(TEMP_BLOCK),
+ cleanup: None,
+ from_hir_call: false,
+ fn_span: DUMMY_SP,
+ },
+ )
+ }
+
+ fn goto(&mut self, some_from_block: Option<BasicBlock>) -> BasicBlock {
+ self.add_block_from(some_from_block, TerminatorKind::Goto { target: TEMP_BLOCK })
+ }
+
+ fn switchint(&mut self, some_from_block: Option<BasicBlock>) -> BasicBlock {
+ let switchint_kind = TerminatorKind::SwitchInt {
+ discr: Operand::Move(Place::from(self.new_temp())),
+ switch_ty: self.bool_ty, // just a dummy value
+ targets: SwitchTargets::static_if(0, TEMP_BLOCK, TEMP_BLOCK),
+ };
+ self.add_block_from(some_from_block, switchint_kind)
+ }
+
+ fn return_(&mut self, some_from_block: Option<BasicBlock>) -> BasicBlock {
+ self.add_block_from(some_from_block, TerminatorKind::Return)
+ }
+
+ fn to_body(self) -> Body<'tcx> {
+ Body::new_cfg_only(self.blocks)
+ }
+}
+
+fn debug_basic_blocks<'tcx>(mir_body: &Body<'tcx>) -> String {
+ format!(
+ "{:?}",
+ mir_body
+ .basic_blocks()
+ .iter_enumerated()
+ .map(|(bb, data)| {
+ let term = &data.terminator();
+ let kind = &term.kind;
+ let span = term.source_info.span;
+ let sp = format!("(span:{},{})", span.lo().to_u32(), span.hi().to_u32());
+ match kind {
+ TerminatorKind::Assert { target, .. }
+ | TerminatorKind::Call { target: Some(target), .. }
+ | TerminatorKind::Drop { target, .. }
+ | TerminatorKind::DropAndReplace { target, .. }
+ | TerminatorKind::FalseEdge { real_target: target, .. }
+ | TerminatorKind::FalseUnwind { real_target: target, .. }
+ | TerminatorKind::Goto { target }
+ | TerminatorKind::InlineAsm { destination: Some(target), .. }
+ | TerminatorKind::Yield { resume: target, .. } => {
+ format!("{}{:?}:{} -> {:?}", sp, bb, debug::term_type(kind), target)
+ }
+ TerminatorKind::SwitchInt { targets, .. } => {
+ format!("{}{:?}:{} -> {:?}", sp, bb, debug::term_type(kind), targets)
+ }
+ _ => format!("{}{:?}:{}", sp, bb, debug::term_type(kind)),
+ }
+ })
+ .collect::<Vec<_>>()
+ )
+}
+
+static PRINT_GRAPHS: bool = false;
+
+fn print_mir_graphviz(name: &str, mir_body: &Body<'_>) {
+ if PRINT_GRAPHS {
+ println!(
+ "digraph {} {{\n{}\n}}",
+ name,
+ mir_body
+ .basic_blocks()
+ .iter_enumerated()
+ .map(|(bb, data)| {
+ format!(
+ " {:?} [label=\"{:?}: {}\"];\n{}",
+ bb,
+ bb,
+ debug::term_type(&data.terminator().kind),
+ mir_body
+ .basic_blocks
+ .successors(bb)
+ .map(|successor| { format!(" {:?} -> {:?};", bb, successor) })
+ .join("\n")
+ )
+ })
+ .join("\n")
+ );
+ }
+}
+
+fn print_coverage_graphviz(
+ name: &str,
+ mir_body: &Body<'_>,
+ basic_coverage_blocks: &graph::CoverageGraph,
+) {
+ if PRINT_GRAPHS {
+ println!(
+ "digraph {} {{\n{}\n}}",
+ name,
+ basic_coverage_blocks
+ .iter_enumerated()
+ .map(|(bcb, bcb_data)| {
+ format!(
+ " {:?} [label=\"{:?}: {}\"];\n{}",
+ bcb,
+ bcb,
+ debug::term_type(&bcb_data.terminator(mir_body).kind),
+ basic_coverage_blocks
+ .successors(bcb)
+ .map(|successor| { format!(" {:?} -> {:?};", bcb, successor) })
+ .join("\n")
+ )
+ })
+ .join("\n")
+ );
+ }
+}
+
+/// Create a mock `Body` with a simple flow.
+fn goto_switchint<'a>() -> Body<'a> {
+ let mut blocks = MockBlocks::new();
+ let start = blocks.call(None);
+ let goto = blocks.goto(Some(start));
+ let switchint = blocks.switchint(Some(goto));
+ let then_call = blocks.call(None);
+ let else_call = blocks.call(None);
+ blocks.set_branch(switchint, 0, then_call);
+ blocks.set_branch(switchint, 1, else_call);
+ blocks.return_(Some(then_call));
+ blocks.return_(Some(else_call));
+
+ let mir_body = blocks.to_body();
+ print_mir_graphviz("mir_goto_switchint", &mir_body);
+ /* Graphviz character plots created using: `graph-easy --as=boxart`:
+ ┌────────────────┐
+ │ bb0: Call │
+ └────────────────┘
+ │
+ │
+ ▼
+ ┌────────────────┐
+ │ bb1: Goto │
+ └────────────────┘
+ │
+ │
+ ▼
+ ┌─────────────┐ ┌────────────────┐
+ │ bb4: Call │ ◀── │ bb2: SwitchInt │
+ └─────────────┘ └────────────────┘
+ │ │
+ │ │
+ ▼ ▼
+ ┌─────────────┐ ┌────────────────┐
+ │ bb6: Return │ │ bb3: Call │
+ └─────────────┘ └────────────────┘
+ │
+ │
+ ▼
+ ┌────────────────┐
+ │ bb5: Return │
+ └────────────────┘
+ */
+ mir_body
+}
+
+macro_rules! assert_successors {
+ ($basic_coverage_blocks:ident, $i:ident, [$($successor:ident),*]) => {
+ let mut successors = $basic_coverage_blocks.successors[$i].clone();
+ successors.sort_unstable();
+ assert_eq!(successors, vec![$($successor),*]);
+ }
+}
+
+#[test]
+fn test_covgraph_goto_switchint() {
+ let mir_body = goto_switchint();
+ if false {
+ eprintln!("basic_blocks = {}", debug_basic_blocks(&mir_body));
+ }
+ let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ print_coverage_graphviz("covgraph_goto_switchint ", &mir_body, &basic_coverage_blocks);
+ /*
+ ┌──────────────┐ ┌─────────────────┐
+ │ bcb2: Return │ ◀── │ bcb0: SwitchInt │
+ └──────────────┘ └─────────────────┘
+ │
+ │
+ ▼
+ ┌─────────────────┐
+ │ bcb1: Return │
+ └─────────────────┘
+ */
+ assert_eq!(
+ basic_coverage_blocks.num_nodes(),
+ 3,
+ "basic_coverage_blocks: {:?}",
+ basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
+ );
+
+ let_bcb!(0);
+ let_bcb!(1);
+ let_bcb!(2);
+
+ assert_successors!(basic_coverage_blocks, bcb0, [bcb1, bcb2]);
+ assert_successors!(basic_coverage_blocks, bcb1, []);
+ assert_successors!(basic_coverage_blocks, bcb2, []);
+}
+
+/// Create a mock `Body` with a loop.
+fn switchint_then_loop_else_return<'a>() -> Body<'a> {
+ let mut blocks = MockBlocks::new();
+ let start = blocks.call(None);
+ let switchint = blocks.switchint(Some(start));
+ let then_call = blocks.call(None);
+ blocks.set_branch(switchint, 0, then_call);
+ let backedge_goto = blocks.goto(Some(then_call));
+ blocks.link(backedge_goto, switchint);
+ let else_return = blocks.return_(None);
+ blocks.set_branch(switchint, 1, else_return);
+
+ let mir_body = blocks.to_body();
+ print_mir_graphviz("mir_switchint_then_loop_else_return", &mir_body);
+ /*
+ ┌────────────────┐
+ │ bb0: Call │
+ └────────────────┘
+ │
+ │
+ ▼
+ ┌─────────────┐ ┌────────────────┐
+ │ bb4: Return │ ◀── │ bb1: SwitchInt │ ◀┐
+ └─────────────┘ └────────────────┘ │
+ │ │
+ │ │
+ ▼ │
+ ┌────────────────┐ │
+ │ bb2: Call │ │
+ └────────────────┘ │
+ │ │
+ │ │
+ ▼ │
+ ┌────────────────┐ │
+ │ bb3: Goto │ ─┘
+ └────────────────┘
+ */
+ mir_body
+}
+
+#[test]
+fn test_covgraph_switchint_then_loop_else_return() {
+ let mir_body = switchint_then_loop_else_return();
+ let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ print_coverage_graphviz(
+ "covgraph_switchint_then_loop_else_return",
+ &mir_body,
+ &basic_coverage_blocks,
+ );
+ /*
+ ┌─────────────────┐
+ │ bcb0: Call │
+ └─────────────────┘
+ │
+ │
+ ▼
+ ┌────────────┐ ┌─────────────────┐
+ │ bcb3: Goto │ ◀── │ bcb1: SwitchInt │ ◀┐
+ └────────────┘ └─────────────────┘ │
+ │ │ │
+ │ │ │
+ │ ▼ │
+ │ ┌─────────────────┐ │
+ │ │ bcb2: Return │ │
+ │ └─────────────────┘ │
+ │ │
+ └─────────────────────────────────────┘
+ */
+ assert_eq!(
+ basic_coverage_blocks.num_nodes(),
+ 4,
+ "basic_coverage_blocks: {:?}",
+ basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
+ );
+
+ let_bcb!(0);
+ let_bcb!(1);
+ let_bcb!(2);
+ let_bcb!(3);
+
+ assert_successors!(basic_coverage_blocks, bcb0, [bcb1]);
+ assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]);
+ assert_successors!(basic_coverage_blocks, bcb2, []);
+ assert_successors!(basic_coverage_blocks, bcb3, [bcb1]);
+}
+
+/// Create a mock `Body` with nested loops.
+fn switchint_loop_then_inner_loop_else_break<'a>() -> Body<'a> {
+ let mut blocks = MockBlocks::new();
+ let start = blocks.call(None);
+ let switchint = blocks.switchint(Some(start));
+ let then_call = blocks.call(None);
+ blocks.set_branch(switchint, 0, then_call);
+ let else_return = blocks.return_(None);
+ blocks.set_branch(switchint, 1, else_return);
+
+ let inner_start = blocks.call(Some(then_call));
+ let inner_switchint = blocks.switchint(Some(inner_start));
+ let inner_then_call = blocks.call(None);
+ blocks.set_branch(inner_switchint, 0, inner_then_call);
+ let inner_backedge_goto = blocks.goto(Some(inner_then_call));
+ blocks.link(inner_backedge_goto, inner_switchint);
+ let inner_else_break_goto = blocks.goto(None);
+ blocks.set_branch(inner_switchint, 1, inner_else_break_goto);
+
+ let backedge_goto = blocks.goto(Some(inner_else_break_goto));
+ blocks.link(backedge_goto, switchint);
+
+ let mir_body = blocks.to_body();
+ print_mir_graphviz("mir_switchint_loop_then_inner_loop_else_break", &mir_body);
+ /*
+ ┌────────────────┐
+ │ bb0: Call │
+ └────────────────┘
+ │
+ │
+ ▼
+ ┌─────────────┐ ┌────────────────┐
+ │ bb3: Return │ ◀── │ bb1: SwitchInt │ ◀─────┐
+ └─────────────┘ └────────────────┘ │
+ │ │
+ │ │
+ ▼ │
+ ┌────────────────┐ │
+ │ bb2: Call │ │
+ └────────────────┘ │
+ │ │
+ │ │
+ ▼ │
+ ┌────────────────┐ │
+ │ bb4: Call │ │
+ └────────────────┘ │
+ │ │
+ │ │
+ ▼ │
+ ┌─────────────┐ ┌────────────────┐ │
+ │ bb8: Goto │ ◀── │ bb5: SwitchInt │ ◀┐ │
+ └─────────────┘ └────────────────┘ │ │
+ │ │ │ │
+ │ │ │ │
+ ▼ ▼ │ │
+ ┌─────────────┐ ┌────────────────┐ │ │
+ │ bb9: Goto │ ─┐ │ bb6: Call │ │ │
+ └─────────────┘ │ └────────────────┘ │ │
+ │ │ │ │
+ │ │ │ │
+ │ ▼ │ │
+ │ ┌────────────────┐ │ │
+ │ │ bb7: Goto │ ─┘ │
+ │ └────────────────┘ │
+ │ │
+ └───────────────────────────┘
+ */
+ mir_body
+}
+
+#[test]
+fn test_covgraph_switchint_loop_then_inner_loop_else_break() {
+ let mir_body = switchint_loop_then_inner_loop_else_break();
+ let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ print_coverage_graphviz(
+ "covgraph_switchint_loop_then_inner_loop_else_break",
+ &mir_body,
+ &basic_coverage_blocks,
+ );
+ /*
+ ┌─────────────────┐
+ │ bcb0: Call │
+ └─────────────────┘
+ │
+ │
+ ▼
+ ┌──────────────┐ ┌─────────────────┐
+ │ bcb2: Return │ ◀── │ bcb1: SwitchInt │ ◀┐
+ └──────────────┘ └─────────────────┘ │
+ │ │
+ │ │
+ ▼ │
+ ┌─────────────────┐ │
+ │ bcb3: Call │ │
+ └─────────────────┘ │
+ │ │
+ │ │
+ ▼ │
+ ┌──────────────┐ ┌─────────────────┐ │
+ │ bcb6: Goto │ ◀── │ bcb4: SwitchInt │ ◀┼────┐
+ └──────────────┘ └─────────────────┘ │ │
+ │ │ │ │
+ │ │ │ │
+ │ ▼ │ │
+ │ ┌─────────────────┐ │ │
+ │ │ bcb5: Goto │ ─┘ │
+ │ └─────────────────┘ │
+ │ │
+ └────────────────────────────────────────────┘
+ */
+ assert_eq!(
+ basic_coverage_blocks.num_nodes(),
+ 7,
+ "basic_coverage_blocks: {:?}",
+ basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
+ );
+
+ let_bcb!(0);
+ let_bcb!(1);
+ let_bcb!(2);
+ let_bcb!(3);
+ let_bcb!(4);
+ let_bcb!(5);
+ let_bcb!(6);
+
+ assert_successors!(basic_coverage_blocks, bcb0, [bcb1]);
+ assert_successors!(basic_coverage_blocks, bcb1, [bcb2, bcb3]);
+ assert_successors!(basic_coverage_blocks, bcb2, []);
+ assert_successors!(basic_coverage_blocks, bcb3, [bcb4]);
+ assert_successors!(basic_coverage_blocks, bcb4, [bcb5, bcb6]);
+ assert_successors!(basic_coverage_blocks, bcb5, [bcb1]);
+ assert_successors!(basic_coverage_blocks, bcb6, [bcb4]);
+}
+
+#[test]
+fn test_find_loop_backedges_none() {
+ let mir_body = goto_switchint();
+ let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ if false {
+ eprintln!(
+ "basic_coverage_blocks = {:?}",
+ basic_coverage_blocks.iter_enumerated().collect::<Vec<_>>()
+ );
+ eprintln!("successors = {:?}", basic_coverage_blocks.successors);
+ }
+ let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
+ assert_eq!(
+ backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
+ 0,
+ "backedges: {:?}",
+ backedges
+ );
+}
+
+#[test]
+fn test_find_loop_backedges_one() {
+ let mir_body = switchint_then_loop_else_return();
+ let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
+ assert_eq!(
+ backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
+ 1,
+ "backedges: {:?}",
+ backedges
+ );
+
+ let_bcb!(1);
+ let_bcb!(3);
+
+ assert_eq!(backedges[bcb1], vec![bcb3]);
+}
+
+#[test]
+fn test_find_loop_backedges_two() {
+ let mir_body = switchint_loop_then_inner_loop_else_break();
+ let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ let backedges = graph::find_loop_backedges(&basic_coverage_blocks);
+ assert_eq!(
+ backedges.iter_enumerated().map(|(_bcb, backedges)| backedges.len()).sum::<usize>(),
+ 2,
+ "backedges: {:?}",
+ backedges
+ );
+
+ let_bcb!(1);
+ let_bcb!(4);
+ let_bcb!(5);
+ let_bcb!(6);
+
+ assert_eq!(backedges[bcb1], vec![bcb5]);
+ assert_eq!(backedges[bcb4], vec![bcb6]);
+}
+
+#[test]
+fn test_traverse_coverage_with_loops() {
+ let mir_body = switchint_loop_then_inner_loop_else_break();
+ let basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ let mut traversed_in_order = Vec::new();
+ let mut traversal = graph::TraverseCoverageGraphWithLoops::new(&basic_coverage_blocks);
+ while let Some(bcb) = traversal.next(&basic_coverage_blocks) {
+ traversed_in_order.push(bcb);
+ }
+
+ let_bcb!(6);
+
+ // bcb0 is visited first. Then bcb1 starts the first loop, and all remaining nodes, *except*
+ // bcb6 are inside the first loop.
+ assert_eq!(
+ *traversed_in_order.last().expect("should have elements"),
+ bcb6,
+ "bcb6 should not be visited until all nodes inside the first loop have been visited"
+ );
+}
+
+fn synthesize_body_span_from_terminators(mir_body: &Body<'_>) -> Span {
+ let mut some_span: Option<Span> = None;
+ for (_, data) in mir_body.basic_blocks().iter_enumerated() {
+ let term_span = data.terminator().source_info.span;
+ if let Some(span) = some_span.as_mut() {
+ *span = span.to(term_span);
+ } else {
+ some_span = Some(term_span)
+ }
+ }
+ some_span.expect("body must have at least one BasicBlock")
+}
+
+#[test]
+fn test_make_bcb_counters() {
+ rustc_span::create_default_session_globals_then(|| {
+ let mir_body = goto_switchint();
+ let body_span = synthesize_body_span_from_terminators(&mir_body);
+ let mut basic_coverage_blocks = graph::CoverageGraph::from_mir(&mir_body);
+ let mut coverage_spans = Vec::new();
+ for (bcb, data) in basic_coverage_blocks.iter_enumerated() {
+ if let Some(span) = spans::filtered_terminator_span(data.terminator(&mir_body)) {
+ coverage_spans.push(spans::CoverageSpan::for_terminator(
+ spans::function_source_span(span, body_span),
+ span,
+ bcb,
+ data.last_bb(),
+ ));
+ }
+ }
+ let mut coverage_counters = counters::CoverageCounters::new(0);
+ let intermediate_expressions = coverage_counters
+ .make_bcb_counters(&mut basic_coverage_blocks, &coverage_spans)
+ .expect("should be Ok");
+ assert_eq!(intermediate_expressions.len(), 0);
+
+ let_bcb!(1);
+ assert_eq!(
+ 1, // coincidentally, bcb1 has a `Counter` with id = 1
+ match basic_coverage_blocks[bcb1].counter().expect("should have a counter") {
+ CoverageKind::Counter { id, .. } => id,
+ _ => panic!("expected a Counter"),
+ }
+ .as_u32()
+ );
+
+ let_bcb!(2);
+ assert_eq!(
+ 2, // coincidentally, bcb2 has a `Counter` with id = 2
+ match basic_coverage_blocks[bcb2].counter().expect("should have a counter") {
+ CoverageKind::Counter { id, .. } => id,
+ _ => panic!("expected a Counter"),
+ }
+ .as_u32()
+ );
+ });
+}
diff --git a/compiler/rustc_mir_transform/src/dead_store_elimination.rs b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
new file mode 100644
index 000000000..9163672f5
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/dead_store_elimination.rs
@@ -0,0 +1,86 @@
+//! This module implements a dead store elimination (DSE) routine.
+//!
+//! This transformation was written specifically for the needs of dest prop. Although it is
+//! perfectly sound to use it in any context that might need it, its behavior should not be changed
+//! without analyzing the interaction this will have with dest prop. Specifically, in addition to
+//! the soundness of this pass in general, dest prop needs it to satisfy two additional conditions:
+//!
+//! 1. It's idempotent, meaning that running this pass a second time immediately after running it a
+//! first time will not cause any further changes.
+//! 2. This idempotence persists across dest prop's main transform, in other words inserting any
+//! number of iterations of dest prop between the first and second application of this transform
+//! will still not cause any further changes.
+//!
+
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_mir_dataflow::impls::{borrowed_locals, MaybeTransitiveLiveLocals};
+use rustc_mir_dataflow::Analysis;
+
+/// Performs the optimization on the body
+///
+/// The `borrowed` set must be a `BitSet` of all the locals that are ever borrowed in this body. It
+/// can be generated via the [`borrowed_locals`] function.
+pub fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, borrowed: &BitSet<Local>) {
+ let mut live = MaybeTransitiveLiveLocals::new(borrowed)
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+
+ let mut patch = Vec::new();
+ for (bb, bb_data) in traversal::preorder(body) {
+ for (statement_index, statement) in bb_data.statements.iter().enumerate().rev() {
+ let loc = Location { block: bb, statement_index };
+ if let StatementKind::Assign(assign) = &statement.kind {
+ if !assign.1.is_safe_to_remove() {
+ continue;
+ }
+ }
+ match &statement.kind {
+ StatementKind::Assign(box (place, _))
+ | StatementKind::SetDiscriminant { place: box place, .. }
+ | StatementKind::Deinit(box place) => {
+ if !place.is_indirect() && !borrowed.contains(place.local) {
+ live.seek_before_primary_effect(loc);
+ if !live.get().contains(place.local) {
+ patch.push(loc);
+ }
+ }
+ }
+ StatementKind::Retag(_, _)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Coverage(_)
+ | StatementKind::CopyNonOverlapping(_)
+ | StatementKind::Nop => (),
+
+ StatementKind::FakeRead(_) | StatementKind::AscribeUserType(_, _) => {
+ bug!("{:?} not found in this MIR phase!", &statement.kind)
+ }
+ }
+ }
+ }
+
+ if patch.is_empty() {
+ return;
+ }
+
+ let bbs = body.basic_blocks.as_mut_preserves_cfg();
+ for Location { block, statement_index } in patch {
+ bbs[block].statements[statement_index].make_nop();
+ }
+}
+
+pub struct DeadStoreElimination;
+
+impl<'tcx> MirPass<'tcx> for DeadStoreElimination {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 2
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let borrowed = borrowed_locals(body);
+ eliminate(tcx, body, &borrowed);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/deaggregator.rs b/compiler/rustc_mir_transform/src/deaggregator.rs
new file mode 100644
index 000000000..b93fe5879
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/deaggregator.rs
@@ -0,0 +1,49 @@
+use crate::util::expand_aggregate;
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct Deaggregator;
+
+impl<'tcx> MirPass<'tcx> for Deaggregator {
+ fn phase_change(&self) -> Option<MirPhase> {
+ Some(MirPhase::Deaggregated)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
+ for bb in basic_blocks {
+ bb.expand_statements(|stmt| {
+ // FIXME(eddyb) don't match twice on `stmt.kind` (post-NLL).
+ match stmt.kind {
+ // FIXME(#48193) Deaggregate arrays when it's cheaper to do so.
+ StatementKind::Assign(box (
+ _,
+ Rvalue::Aggregate(box AggregateKind::Array(_), _),
+ )) => {
+ return None;
+ }
+ StatementKind::Assign(box (_, Rvalue::Aggregate(_, _))) => {}
+ _ => return None,
+ }
+
+ let stmt = stmt.replace_nop();
+ let source_info = stmt.source_info;
+ let StatementKind::Assign(box (lhs, Rvalue::Aggregate(kind, operands))) = stmt.kind else {
+ bug!();
+ };
+
+ Some(expand_aggregate(
+ lhs,
+ operands.into_iter().map(|op| {
+ let ty = op.ty(&body.local_decls, tcx);
+ (op, ty)
+ }),
+ *kind,
+ source_info,
+ tcx,
+ ))
+ });
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/deduplicate_blocks.rs b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
new file mode 100644
index 000000000..d1977ed49
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/deduplicate_blocks.rs
@@ -0,0 +1,190 @@
+//! This pass finds basic blocks that are completely equal,
+//! and replaces all uses with just one of them.
+
+use std::{collections::hash_map::Entry, hash::Hash, hash::Hasher, iter};
+
+use crate::MirPass;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+use super::simplify::simplify_cfg;
+
+pub struct DeduplicateBlocks;
+
+impl<'tcx> MirPass<'tcx> for DeduplicateBlocks {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 4
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ debug!("Running DeduplicateBlocks on `{:?}`", body.source);
+ let duplicates = find_duplicates(body);
+ let has_opts_to_apply = !duplicates.is_empty();
+
+ if has_opts_to_apply {
+ let mut opt_applier = OptApplier { tcx, duplicates };
+ opt_applier.visit_body(body);
+ simplify_cfg(tcx, body);
+ }
+ }
+}
+
+struct OptApplier<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ duplicates: FxHashMap<BasicBlock, BasicBlock>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for OptApplier<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
+ for target in terminator.successors_mut() {
+ if let Some(replacement) = self.duplicates.get(target) {
+ debug!("SUCCESS: Replacing: `{:?}` with `{:?}`", target, replacement);
+ *target = *replacement;
+ }
+ }
+
+ self.super_terminator(terminator, location);
+ }
+}
+
+fn find_duplicates(body: &Body<'_>) -> FxHashMap<BasicBlock, BasicBlock> {
+ let mut duplicates = FxHashMap::default();
+
+ let bbs_to_go_through =
+ body.basic_blocks().iter_enumerated().filter(|(_, bbd)| !bbd.is_cleanup).count();
+
+ let mut same_hashes =
+ FxHashMap::with_capacity_and_hasher(bbs_to_go_through, Default::default());
+
+ // Go through the basic blocks backwards. This means that in case of duplicates,
+ // we can use the basic block with the highest index as the replacement for all lower ones.
+ // For example, if bb1, bb2 and bb3 are duplicates, we will first insert bb3 in same_hashes.
+ // Then we will see that bb2 is a duplicate of bb3,
+ // and insert bb2 with the replacement bb3 in the duplicates list.
+ // When we see bb1, we see that it is a duplicate of bb3, and therefore insert it in the duplicates list
+ // with replacement bb3.
+ // When the duplicates are removed, we will end up with only bb3.
+ for (bb, bbd) in body.basic_blocks().iter_enumerated().rev().filter(|(_, bbd)| !bbd.is_cleanup)
+ {
+ // Basic blocks can get really big, so to avoid checking for duplicates in basic blocks
+ // that are unlikely to have duplicates, we stop early. The early bail number has been
+ // found experimentally by eprintln while compiling the crates in the rustc-perf suite.
+ if bbd.statements.len() > 10 {
+ continue;
+ }
+
+ let to_hash = BasicBlockHashable { basic_block_data: bbd };
+ let entry = same_hashes.entry(to_hash);
+ match entry {
+ Entry::Occupied(occupied) => {
+ // The basic block was already in the hashmap, which means we have a duplicate
+ let value = *occupied.get();
+ debug!("Inserting {:?} -> {:?}", bb, value);
+ duplicates.try_insert(bb, value).expect("key was already inserted");
+ }
+ Entry::Vacant(vacant) => {
+ vacant.insert(bb);
+ }
+ }
+ }
+
+ duplicates
+}
+
+struct BasicBlockHashable<'tcx, 'a> {
+ basic_block_data: &'a BasicBlockData<'tcx>,
+}
+
+impl Hash for BasicBlockHashable<'_, '_> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ hash_statements(state, self.basic_block_data.statements.iter());
+ // Note that since we only hash the kind, we lose span information if we deduplicate the blocks
+ self.basic_block_data.terminator().kind.hash(state);
+ }
+}
+
+impl Eq for BasicBlockHashable<'_, '_> {}
+
+impl PartialEq for BasicBlockHashable<'_, '_> {
+ fn eq(&self, other: &Self) -> bool {
+ self.basic_block_data.statements.len() == other.basic_block_data.statements.len()
+ && &self.basic_block_data.terminator().kind == &other.basic_block_data.terminator().kind
+ && iter::zip(&self.basic_block_data.statements, &other.basic_block_data.statements)
+ .all(|(x, y)| statement_eq(&x.kind, &y.kind))
+ }
+}
+
+fn hash_statements<'a, 'tcx, H: Hasher>(
+ hasher: &mut H,
+ iter: impl Iterator<Item = &'a Statement<'tcx>>,
+) where
+ 'tcx: 'a,
+{
+ for stmt in iter {
+ statement_hash(hasher, &stmt.kind);
+ }
+}
+
+fn statement_hash<H: Hasher>(hasher: &mut H, stmt: &StatementKind<'_>) {
+ match stmt {
+ StatementKind::Assign(box (place, rvalue)) => {
+ place.hash(hasher);
+ rvalue_hash(hasher, rvalue)
+ }
+ x => x.hash(hasher),
+ };
+}
+
+fn rvalue_hash<H: Hasher>(hasher: &mut H, rvalue: &Rvalue<'_>) {
+ match rvalue {
+ Rvalue::Use(op) => operand_hash(hasher, op),
+ x => x.hash(hasher),
+ };
+}
+
+fn operand_hash<H: Hasher>(hasher: &mut H, operand: &Operand<'_>) {
+ match operand {
+ Operand::Constant(box Constant { user_ty: _, literal, span: _ }) => literal.hash(hasher),
+ x => x.hash(hasher),
+ };
+}
+
+fn statement_eq<'tcx>(lhs: &StatementKind<'tcx>, rhs: &StatementKind<'tcx>) -> bool {
+ let res = match (lhs, rhs) {
+ (
+ StatementKind::Assign(box (place, rvalue)),
+ StatementKind::Assign(box (place2, rvalue2)),
+ ) => place == place2 && rvalue_eq(rvalue, rvalue2),
+ (x, y) => x == y,
+ };
+ debug!("statement_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
+ res
+}
+
+fn rvalue_eq<'tcx>(lhs: &Rvalue<'tcx>, rhs: &Rvalue<'tcx>) -> bool {
+ let res = match (lhs, rhs) {
+ (Rvalue::Use(op1), Rvalue::Use(op2)) => operand_eq(op1, op2),
+ (x, y) => x == y,
+ };
+ debug!("rvalue_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
+ res
+}
+
+fn operand_eq<'tcx>(lhs: &Operand<'tcx>, rhs: &Operand<'tcx>) -> bool {
+ let res = match (lhs, rhs) {
+ (
+ Operand::Constant(box Constant { user_ty: _, literal, span: _ }),
+ Operand::Constant(box Constant { user_ty: _, literal: literal2, span: _ }),
+ ) => literal == literal2,
+ (x, y) => x == y,
+ };
+ debug!("operand_eq lhs: `{:?}` rhs: `{:?}` result: {:?}", lhs, rhs, res);
+ res
+}
diff --git a/compiler/rustc_mir_transform/src/deref_separator.rs b/compiler/rustc_mir_transform/src/deref_separator.rs
new file mode 100644
index 000000000..a00bb16f7
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/deref_separator.rs
@@ -0,0 +1,105 @@
+use crate::MirPass;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::patch::MirPatch;
+use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct Derefer;
+
+pub struct DerefChecker<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ patcher: MirPatch<'tcx>,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for DerefChecker<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_place(&mut self, place: &mut Place<'tcx>, cntxt: PlaceContext, loc: Location) {
+ if !place.projection.is_empty()
+ && cntxt != PlaceContext::NonUse(VarDebugInfo)
+ && place.projection[1..].contains(&ProjectionElem::Deref)
+ {
+ let mut place_local = place.local;
+ let mut last_len = 0;
+ let mut last_deref_idx = 0;
+
+ let mut prev_temp: Option<Local> = None;
+
+ for (idx, elem) in place.projection[0..].iter().enumerate() {
+ if *elem == ProjectionElem::Deref {
+ last_deref_idx = idx;
+ }
+ }
+
+ for (idx, (p_ref, p_elem)) in place.iter_projections().enumerate() {
+ if !p_ref.projection.is_empty() && p_elem == ProjectionElem::Deref {
+ let ty = p_ref.ty(&self.local_decls, self.tcx).ty;
+ let temp = self.patcher.new_local_with_info(
+ ty,
+ self.local_decls[p_ref.local].source_info.span,
+ Some(Box::new(LocalInfo::DerefTemp)),
+ );
+
+ self.patcher.add_statement(loc, StatementKind::StorageLive(temp));
+
+ // We are adding current p_ref's projections to our
+ // temp value, excluding projections we already covered.
+ let deref_place = Place::from(place_local)
+ .project_deeper(&p_ref.projection[last_len..], self.tcx);
+
+ self.patcher.add_assign(
+ loc,
+ Place::from(temp),
+ Rvalue::CopyForDeref(deref_place),
+ );
+ place_local = temp;
+ last_len = p_ref.projection.len();
+
+ // Change `Place` only if we are actually at the Place's last deref
+ if idx == last_deref_idx {
+ let temp_place =
+ Place::from(temp).project_deeper(&place.projection[idx..], self.tcx);
+ *place = temp_place;
+ }
+
+ // We are destroying the previous temp since it's no longer used.
+ if let Some(prev_temp) = prev_temp {
+ self.patcher.add_statement(loc, StatementKind::StorageDead(prev_temp));
+ }
+
+ prev_temp = Some(temp);
+ }
+ }
+
+ // Since we won't be able to reach final temp, we destroy it outside the loop.
+ if let Some(prev_temp) = prev_temp {
+ let last_loc =
+ Location { block: loc.block, statement_index: loc.statement_index + 1 };
+ self.patcher.add_statement(last_loc, StatementKind::StorageDead(prev_temp));
+ }
+ }
+ }
+}
+
+pub fn deref_finder<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let patch = MirPatch::new(body);
+ let mut checker = DerefChecker { tcx, patcher: patch, local_decls: body.local_decls.clone() };
+
+ for (bb, data) in body.basic_blocks_mut().iter_enumerated_mut() {
+ checker.visit_basic_block_data(bb, data);
+ }
+
+ checker.patcher.apply(body);
+}
+
+impl<'tcx> MirPass<'tcx> for Derefer {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ deref_finder(tcx, body);
+ body.phase = MirPhase::Derefered;
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs
new file mode 100644
index 000000000..33572068f
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/dest_prop.rs
@@ -0,0 +1,917 @@
+//! Propagates assignment destinations backwards in the CFG to eliminate redundant assignments.
+//!
+//! # Motivation
+//!
+//! MIR building can insert a lot of redundant copies, and Rust code in general often tends to move
+//! values around a lot. The result is a lot of assignments of the form `dest = {move} src;` in MIR.
+//! MIR building for constants in particular tends to create additional locals that are only used
+//! inside a single block to shuffle a value around unnecessarily.
+//!
+//! LLVM by itself is not good enough at eliminating these redundant copies (eg. see
+//! <https://github.com/rust-lang/rust/issues/32966>), so this leaves some performance on the table
+//! that we can regain by implementing an optimization for removing these assign statements in rustc
+//! itself. When this optimization runs fast enough, it can also speed up the constant evaluation
+//! and code generation phases of rustc due to the reduced number of statements and locals.
+//!
+//! # The Optimization
+//!
+//! Conceptually, this optimization is "destination propagation". It is similar to the Named Return
+//! Value Optimization, or NRVO, known from the C++ world, except that it isn't limited to return
+//! values or the return place `_0`. On a very high level, independent of the actual implementation
+//! details, it does the following:
+//!
+//! 1) Identify `dest = src;` statements that can be soundly eliminated.
+//! 2) Replace all mentions of `src` with `dest` ("unifying" them and propagating the destination
+//! backwards).
+//! 3) Delete the `dest = src;` statement (by making it a `nop`).
+//!
+//! Step 1) is by far the hardest, so it is explained in more detail below.
+//!
+//! ## Soundness
+//!
+//! Given an `Assign` statement `dest = src;`, where `dest` is a `Place` and `src` is an `Rvalue`,
+//! there are a few requirements that must hold for the optimization to be sound:
+//!
+//! * `dest` must not contain any *indirection* through a pointer. It must access part of the base
+//! local. Otherwise it might point to arbitrary memory that is hard to track.
+//!
+//! It must also not contain any indexing projections, since those take an arbitrary `Local` as
+//! the index, and that local might only be initialized shortly before `dest` is used.
+//!
+//! * `src` must be a bare `Local` without any indirections or field projections (FIXME: Is this a
+//! fundamental restriction or just current impl state?). It can be copied or moved by the
+//! assignment.
+//!
+//! * The `dest` and `src` locals must never be [*live*][liveness] at the same time. If they are, it
+//! means that they both hold a (potentially different) value that is needed by a future use of
+//! the locals. Unifying them would overwrite one of the values.
+//!
+//! Note that computing liveness of locals that have had their address taken is more difficult:
+//! Short of doing full escape analysis on the address/pointer/reference, the pass would need to
+//! assume that any operation that can potentially involve opaque user code (such as function
+//! calls, destructors, and inline assembly) may access any local that had its address taken
+//! before that point.
+//!
+//! Here, the first two conditions are simple structural requirements on the `Assign` statements
+//! that can be trivially checked. The liveness requirement however is more difficult and costly to
+//! check.
+//!
+//! ## Previous Work
+//!
+//! A [previous attempt] at implementing an optimization like this turned out to be a significant
+//! regression in compiler performance. Fixing the regressions introduced a lot of undesirable
+//! complexity to the implementation.
+//!
+//! A [subsequent approach] tried to avoid the costly computation by limiting itself to acyclic
+//! CFGs, but still turned out to be far too costly to run due to suboptimal performance within
+//! individual basic blocks, requiring a walk across the entire block for every assignment found
+//! within the block. For the `tuple-stress` benchmark, which has 458745 statements in a single
+//! block, this proved to be far too costly.
+//!
+//! Since the first attempt at this, the compiler has improved dramatically, and new analysis
+//! frameworks have been added that should make this approach viable without requiring a limited
+//! approach that only works for some classes of CFGs:
+//! - rustc now has a powerful dataflow analysis framework that can handle forwards and backwards
+//! analyses efficiently.
+//! - Layout optimizations for generators have been added to improve code generation for
+//! async/await, which are very similar in spirit to what this optimization does. Both walk the
+//! MIR and record conflicting uses of locals in a `BitMatrix`.
+//!
+//! Also, rustc now has a simple NRVO pass (see `nrvo.rs`), which handles a subset of the cases that
+//! this destination propagation pass handles, proving that similar optimizations can be performed
+//! on MIR.
+//!
+//! ## Pre/Post Optimization
+//!
+//! It is recommended to run `SimplifyCfg` and then `SimplifyLocals` some time after this pass, as
+//! it replaces the eliminated assign statements with `nop`s and leaves unused locals behind.
+//!
+//! [liveness]: https://en.wikipedia.org/wiki/Live_variable_analysis
+//! [previous attempt]: https://github.com/rust-lang/rust/pull/47954
+//! [subsequent approach]: https://github.com/rust-lang/rust/pull/71003
+
+use crate::MirPass;
+use itertools::Itertools;
+use rustc_data_structures::unify::{InPlaceUnificationTable, UnifyKey};
+use rustc_index::{
+ bit_set::{BitMatrix, BitSet},
+ vec::IndexVec,
+};
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
+use rustc_middle::mir::{dump_mir, PassWhere};
+use rustc_middle::mir::{
+ traversal, Body, InlineAsmOperand, Local, LocalKind, Location, Operand, Place, PlaceElem,
+ Rvalue, Statement, StatementKind, Terminator, TerminatorKind,
+};
+use rustc_middle::ty::TyCtxt;
+use rustc_mir_dataflow::impls::{borrowed_locals, MaybeInitializedLocals, MaybeLiveLocals};
+use rustc_mir_dataflow::Analysis;
+
+// Empirical measurements have resulted in some observations:
+// - Running on a body with a single block and 500 locals takes barely any time
+// - Running on a body with ~400 blocks and ~300 relevant locals takes "too long"
+// ...so we just limit both to somewhat reasonable-ish looking values.
+const MAX_LOCALS: usize = 500;
+const MAX_BLOCKS: usize = 250;
+
+pub struct DestinationPropagation;
+
+impl<'tcx> MirPass<'tcx> for DestinationPropagation {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ // FIXME(#79191, #82678): This is unsound.
+ //
+ // Only run at mir-opt-level=3 or higher for now (we don't fix up debuginfo and remove
+ // storage statements at the moment).
+ sess.opts.unstable_opts.unsound_mir_opts && sess.mir_opt_level() >= 3
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let def_id = body.source.def_id();
+
+ let candidates = find_candidates(body);
+ if candidates.is_empty() {
+ debug!("{:?}: no dest prop candidates, done", def_id);
+ return;
+ }
+
+ // Collect all locals we care about. We only compute conflicts for these to save time.
+ let mut relevant_locals = BitSet::new_empty(body.local_decls.len());
+ for CandidateAssignment { dest, src, loc: _ } in &candidates {
+ relevant_locals.insert(dest.local);
+ relevant_locals.insert(*src);
+ }
+
+ // This pass unfortunately has `O(l² * s)` performance, where `l` is the number of locals
+ // and `s` is the number of statements and terminators in the function.
+ // To prevent blowing up compile times too much, we bail out when there are too many locals.
+ let relevant = relevant_locals.count();
+ debug!(
+ "{:?}: {} locals ({} relevant), {} blocks",
+ def_id,
+ body.local_decls.len(),
+ relevant,
+ body.basic_blocks().len()
+ );
+ if relevant > MAX_LOCALS {
+ warn!(
+ "too many candidate locals in {:?} ({}, max is {}), not optimizing",
+ def_id, relevant, MAX_LOCALS
+ );
+ return;
+ }
+ if body.basic_blocks().len() > MAX_BLOCKS {
+ warn!(
+ "too many blocks in {:?} ({}, max is {}), not optimizing",
+ def_id,
+ body.basic_blocks().len(),
+ MAX_BLOCKS
+ );
+ return;
+ }
+
+ let mut conflicts = Conflicts::build(tcx, body, &relevant_locals);
+
+ let mut replacements = Replacements::new(body.local_decls.len());
+ for candidate @ CandidateAssignment { dest, src, loc } in candidates {
+ // Merge locals that don't conflict.
+ if !conflicts.can_unify(dest.local, src) {
+ debug!("at assignment {:?}, conflict {:?} vs. {:?}", loc, dest.local, src);
+ continue;
+ }
+
+ if replacements.for_src(candidate.src).is_some() {
+ debug!("src {:?} already has replacement", candidate.src);
+ continue;
+ }
+
+ if !tcx.consider_optimizing(|| {
+ format!("DestinationPropagation {:?} {:?}", def_id, candidate)
+ }) {
+ break;
+ }
+
+ replacements.push(candidate);
+ conflicts.unify(candidate.src, candidate.dest.local);
+ }
+
+ replacements.flatten(tcx);
+
+ debug!("replacements {:?}", replacements.map);
+
+ Replacer { tcx, replacements, place_elem_cache: Vec::new() }.visit_body(body);
+
+ // FIXME fix debug info
+ }
+}
+
+#[derive(Debug, Eq, PartialEq, Copy, Clone)]
+struct UnifyLocal(Local);
+
+impl From<Local> for UnifyLocal {
+ fn from(l: Local) -> Self {
+ Self(l)
+ }
+}
+
+impl UnifyKey for UnifyLocal {
+ type Value = ();
+ #[inline]
+ fn index(&self) -> u32 {
+ self.0.as_u32()
+ }
+ #[inline]
+ fn from_index(u: u32) -> Self {
+ Self(Local::from_u32(u))
+ }
+ fn tag() -> &'static str {
+ "UnifyLocal"
+ }
+}
+
+struct Replacements<'tcx> {
+ /// Maps locals to their replacement.
+ map: IndexVec<Local, Option<Place<'tcx>>>,
+
+ /// Whose locals' live ranges to kill.
+ kill: BitSet<Local>,
+}
+
+impl<'tcx> Replacements<'tcx> {
+ fn new(locals: usize) -> Self {
+ Self { map: IndexVec::from_elem_n(None, locals), kill: BitSet::new_empty(locals) }
+ }
+
+ fn push(&mut self, candidate: CandidateAssignment<'tcx>) {
+ trace!("Replacements::push({:?})", candidate);
+ let entry = &mut self.map[candidate.src];
+ assert!(entry.is_none());
+
+ *entry = Some(candidate.dest);
+ self.kill.insert(candidate.src);
+ self.kill.insert(candidate.dest.local);
+ }
+
+ /// Applies the stored replacements to all replacements, until no replacements would result in
+ /// locals that need further replacements when applied.
+ fn flatten(&mut self, tcx: TyCtxt<'tcx>) {
+ // Note: This assumes that there are no cycles in the replacements, which is enforced via
+ // `self.unified_locals`. Otherwise this can cause an infinite loop.
+
+ for local in self.map.indices() {
+ if let Some(replacement) = self.map[local] {
+ // Substitute the base local of `replacement` until fixpoint.
+ let mut base = replacement.local;
+ let mut reversed_projection_slices = Vec::with_capacity(1);
+ while let Some(replacement_for_replacement) = self.map[base] {
+ base = replacement_for_replacement.local;
+ reversed_projection_slices.push(replacement_for_replacement.projection);
+ }
+
+ let projection: Vec<_> = reversed_projection_slices
+ .iter()
+ .rev()
+ .flat_map(|projs| projs.iter())
+ .chain(replacement.projection.iter())
+ .collect();
+ let projection = tcx.intern_place_elems(&projection);
+
+ // Replace with the final `Place`.
+ self.map[local] = Some(Place { local: base, projection });
+ }
+ }
+ }
+
+ fn for_src(&self, src: Local) -> Option<Place<'tcx>> {
+ self.map[src]
+ }
+}
+
+struct Replacer<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ replacements: Replacements<'tcx>,
+ place_elem_cache: Vec<PlaceElem<'tcx>>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for Replacer<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, context: PlaceContext, location: Location) {
+ if context.is_use() && self.replacements.for_src(*local).is_some() {
+ bug!(
+ "use of local {:?} should have been replaced by visit_place; context={:?}, loc={:?}",
+ local,
+ context,
+ location,
+ );
+ }
+ }
+
+ fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
+ if let Some(replacement) = self.replacements.for_src(place.local) {
+ // Rebase `place`s projections onto `replacement`'s.
+ self.place_elem_cache.clear();
+ self.place_elem_cache.extend(replacement.projection.iter().chain(place.projection));
+ let projection = self.tcx.intern_place_elems(&self.place_elem_cache);
+ let new_place = Place { local: replacement.local, projection };
+
+ debug!("Replacer: {:?} -> {:?}", place, new_place);
+ *place = new_place;
+ }
+
+ self.super_place(place, context, location);
+ }
+
+ fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
+ self.super_statement(statement, location);
+
+ match &statement.kind {
+ // FIXME: Don't delete storage statements, merge the live ranges instead
+ StatementKind::StorageDead(local) | StatementKind::StorageLive(local)
+ if self.replacements.kill.contains(*local) =>
+ {
+ statement.make_nop()
+ }
+
+ StatementKind::Assign(box (dest, rvalue)) => {
+ match rvalue {
+ Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) => {
+ // These might've been turned into self-assignments by the replacement
+ // (this includes the original statement we wanted to eliminate).
+ if dest == place {
+ debug!("{:?} turned into self-assignment, deleting", location);
+ statement.make_nop();
+ }
+ }
+ _ => {}
+ }
+ }
+
+ _ => {}
+ }
+ }
+}
+
+struct Conflicts<'a> {
+ relevant_locals: &'a BitSet<Local>,
+
+ /// The conflict matrix. It is always symmetric and the adjacency matrix of the corresponding
+ /// conflict graph.
+ matrix: BitMatrix<Local, Local>,
+
+ /// Preallocated `BitSet` used by `unify`.
+ unify_cache: BitSet<Local>,
+
+ /// Tracks locals that have been merged together to prevent cycles and propagate conflicts.
+ unified_locals: InPlaceUnificationTable<UnifyLocal>,
+}
+
+impl<'a> Conflicts<'a> {
+ fn build<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &'_ Body<'tcx>,
+ relevant_locals: &'a BitSet<Local>,
+ ) -> Self {
+ // We don't have to look out for locals that have their address taken, since
+ // `find_candidates` already takes care of that.
+
+ let conflicts = BitMatrix::from_row_n(
+ &BitSet::new_empty(body.local_decls.len()),
+ body.local_decls.len(),
+ );
+
+ let mut init = MaybeInitializedLocals
+ .into_engine(tcx, body)
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+ let mut live =
+ MaybeLiveLocals.into_engine(tcx, body).iterate_to_fixpoint().into_results_cursor(body);
+
+ let mut reachable = None;
+ dump_mir(tcx, None, "DestinationPropagation-dataflow", &"", body, |pass_where, w| {
+ let reachable = reachable.get_or_insert_with(|| traversal::reachable_as_bitset(body));
+
+ match pass_where {
+ PassWhere::BeforeLocation(loc) if reachable.contains(loc.block) => {
+ init.seek_before_primary_effect(loc);
+ live.seek_after_primary_effect(loc);
+
+ writeln!(w, " // init: {:?}", init.get())?;
+ writeln!(w, " // live: {:?}", live.get())?;
+ }
+ PassWhere::AfterTerminator(bb) if reachable.contains(bb) => {
+ let loc = body.terminator_loc(bb);
+ init.seek_after_primary_effect(loc);
+ live.seek_before_primary_effect(loc);
+
+ writeln!(w, " // init: {:?}", init.get())?;
+ writeln!(w, " // live: {:?}", live.get())?;
+ }
+
+ PassWhere::BeforeBlock(bb) if reachable.contains(bb) => {
+ init.seek_to_block_start(bb);
+ live.seek_to_block_start(bb);
+
+ writeln!(w, " // init: {:?}", init.get())?;
+ writeln!(w, " // live: {:?}", live.get())?;
+ }
+
+ PassWhere::BeforeCFG | PassWhere::AfterCFG | PassWhere::AfterLocation(_) => {}
+
+ PassWhere::BeforeLocation(_) | PassWhere::AfterTerminator(_) => {
+ writeln!(w, " // init: <unreachable>")?;
+ writeln!(w, " // live: <unreachable>")?;
+ }
+
+ PassWhere::BeforeBlock(_) => {
+ writeln!(w, " // init: <unreachable>")?;
+ writeln!(w, " // live: <unreachable>")?;
+ }
+ }
+
+ Ok(())
+ });
+
+ let mut this = Self {
+ relevant_locals,
+ matrix: conflicts,
+ unify_cache: BitSet::new_empty(body.local_decls.len()),
+ unified_locals: {
+ let mut table = InPlaceUnificationTable::new();
+ // Pre-fill table with all locals (this creates N nodes / "connected" components,
+ // "graph"-ically speaking).
+ for local in 0..body.local_decls.len() {
+ assert_eq!(table.new_key(()), UnifyLocal(Local::from_usize(local)));
+ }
+ table
+ },
+ };
+
+ let mut live_and_init_locals = Vec::new();
+
+ // Visit only reachable basic blocks. The exact order is not important.
+ for (block, data) in traversal::preorder(body) {
+ // We need to observe the dataflow state *before* all possible locations (statement or
+ // terminator) in each basic block, and then observe the state *after* the terminator
+ // effect is applied. As long as neither `init` nor `borrowed` has a "before" effect,
+ // we will observe all possible dataflow states.
+
+ // Since liveness is a backwards analysis, we need to walk the results backwards. To do
+ // that, we first collect in the `MaybeInitializedLocals` results in a forwards
+ // traversal.
+
+ live_and_init_locals.resize_with(data.statements.len() + 1, || {
+ BitSet::new_empty(body.local_decls.len())
+ });
+
+ // First, go forwards for `MaybeInitializedLocals` and apply intra-statement/terminator
+ // conflicts.
+ for (i, statement) in data.statements.iter().enumerate() {
+ this.record_statement_conflicts(statement);
+
+ let loc = Location { block, statement_index: i };
+ init.seek_before_primary_effect(loc);
+
+ live_and_init_locals[i].clone_from(init.get());
+ }
+
+ this.record_terminator_conflicts(data.terminator());
+ let term_loc = Location { block, statement_index: data.statements.len() };
+ init.seek_before_primary_effect(term_loc);
+ live_and_init_locals[term_loc.statement_index].clone_from(init.get());
+
+ // Now, go backwards and union with the liveness results.
+ for statement_index in (0..=data.statements.len()).rev() {
+ let loc = Location { block, statement_index };
+ live.seek_after_primary_effect(loc);
+
+ live_and_init_locals[statement_index].intersect(live.get());
+
+ trace!("record conflicts at {:?}", loc);
+
+ this.record_dataflow_conflicts(&mut live_and_init_locals[statement_index]);
+ }
+
+ init.seek_to_block_end(block);
+ live.seek_to_block_end(block);
+ let mut conflicts = init.get().clone();
+ conflicts.intersect(live.get());
+ trace!("record conflicts at end of {:?}", block);
+
+ this.record_dataflow_conflicts(&mut conflicts);
+ }
+
+ this
+ }
+
+ fn record_dataflow_conflicts(&mut self, new_conflicts: &mut BitSet<Local>) {
+ // Remove all locals that are not candidates.
+ new_conflicts.intersect(self.relevant_locals);
+
+ for local in new_conflicts.iter() {
+ self.matrix.union_row_with(&new_conflicts, local);
+ }
+ }
+
+ fn record_local_conflict(&mut self, a: Local, b: Local, why: &str) {
+ trace!("conflict {:?} <-> {:?} due to {}", a, b, why);
+ self.matrix.insert(a, b);
+ self.matrix.insert(b, a);
+ }
+
+ /// Records locals that must not overlap during the evaluation of `stmt`. These locals conflict
+ /// and must not be merged.
+ fn record_statement_conflicts(&mut self, stmt: &Statement<'_>) {
+ match &stmt.kind {
+ // While the left and right sides of an assignment must not overlap, we do not mark
+ // conflicts here as that would make this optimization useless. When we optimize, we
+ // eliminate the resulting self-assignments automatically.
+ StatementKind::Assign(_) => {}
+
+ StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::Retag(..)
+ | StatementKind::FakeRead(..)
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Nop => {}
+ }
+ }
+
+ fn record_terminator_conflicts(&mut self, term: &Terminator<'_>) {
+ match &term.kind {
+ TerminatorKind::DropAndReplace {
+ place: dropped_place,
+ value,
+ target: _,
+ unwind: _,
+ } => {
+ if let Some(place) = value.place()
+ && !place.is_indirect()
+ && !dropped_place.is_indirect()
+ {
+ self.record_local_conflict(
+ place.local,
+ dropped_place.local,
+ "DropAndReplace operand overlap",
+ );
+ }
+ }
+ TerminatorKind::Yield { value, resume: _, resume_arg, drop: _ } => {
+ if let Some(place) = value.place() {
+ if !place.is_indirect() && !resume_arg.is_indirect() {
+ self.record_local_conflict(
+ place.local,
+ resume_arg.local,
+ "Yield operand overlap",
+ );
+ }
+ }
+ }
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target: _,
+ cleanup: _,
+ from_hir_call: _,
+ fn_span: _,
+ } => {
+ // No arguments may overlap with the destination.
+ for arg in args.iter().chain(Some(func)) {
+ if let Some(place) = arg.place() {
+ if !place.is_indirect() && !destination.is_indirect() {
+ self.record_local_conflict(
+ destination.local,
+ place.local,
+ "call dest/arg overlap",
+ );
+ }
+ }
+ }
+ }
+ TerminatorKind::InlineAsm {
+ template: _,
+ operands,
+ options: _,
+ line_spans: _,
+ destination: _,
+ cleanup: _,
+ } => {
+ // The intended semantics here aren't documented, we just assume that nothing that
+ // could be written to by the assembly may overlap with any other operands.
+ for op in operands {
+ match op {
+ InlineAsmOperand::Out { reg: _, late: _, place: Some(dest_place) }
+ | InlineAsmOperand::InOut {
+ reg: _,
+ late: _,
+ in_value: _,
+ out_place: Some(dest_place),
+ } => {
+ // For output place `place`, add all places accessed by the inline asm.
+ for op in operands {
+ match op {
+ InlineAsmOperand::In { reg: _, value } => {
+ if let Some(p) = value.place()
+ && !p.is_indirect()
+ && !dest_place.is_indirect()
+ {
+ self.record_local_conflict(
+ p.local,
+ dest_place.local,
+ "asm! operand overlap",
+ );
+ }
+ }
+ InlineAsmOperand::Out {
+ reg: _,
+ late: _,
+ place: Some(place),
+ } => {
+ if !place.is_indirect() && !dest_place.is_indirect() {
+ self.record_local_conflict(
+ place.local,
+ dest_place.local,
+ "asm! operand overlap",
+ );
+ }
+ }
+ InlineAsmOperand::InOut {
+ reg: _,
+ late: _,
+ in_value,
+ out_place,
+ } => {
+ if let Some(place) = in_value.place()
+ && !place.is_indirect()
+ && !dest_place.is_indirect()
+ {
+ self.record_local_conflict(
+ place.local,
+ dest_place.local,
+ "asm! operand overlap",
+ );
+ }
+
+ if let Some(place) = out_place
+ && !place.is_indirect()
+ && !dest_place.is_indirect()
+ {
+ self.record_local_conflict(
+ place.local,
+ dest_place.local,
+ "asm! operand overlap",
+ );
+ }
+ }
+ InlineAsmOperand::Out { reg: _, late: _, place: None }
+ | InlineAsmOperand::Const { value: _ }
+ | InlineAsmOperand::SymFn { value: _ }
+ | InlineAsmOperand::SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+ InlineAsmOperand::InOut {
+ reg: _,
+ late: _,
+ in_value: _,
+ out_place: None,
+ }
+ | InlineAsmOperand::In { reg: _, value: _ }
+ | InlineAsmOperand::Out { reg: _, late: _, place: None }
+ | InlineAsmOperand::Const { value: _ }
+ | InlineAsmOperand::SymFn { value: _ }
+ | InlineAsmOperand::SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => {}
+ }
+ }
+
+ /// Checks whether `a` and `b` may be merged. Returns `false` if there's a conflict.
+ fn can_unify(&mut self, a: Local, b: Local) -> bool {
+ // After some locals have been unified, their conflicts are only tracked in the root key,
+ // so look that up.
+ let a = self.unified_locals.find(a).0;
+ let b = self.unified_locals.find(b).0;
+
+ if a == b {
+ // Already merged (part of the same connected component).
+ return false;
+ }
+
+ if self.matrix.contains(a, b) {
+ // Conflict (derived via dataflow, intra-statement conflicts, or inherited from another
+ // local during unification).
+ return false;
+ }
+
+ true
+ }
+
+ /// Merges the conflicts of `a` and `b`, so that each one inherits all conflicts of the other.
+ ///
+ /// `can_unify` must have returned `true` for the same locals, or this may panic or lead to
+ /// miscompiles.
+ ///
+ /// This is called when the pass makes the decision to unify `a` and `b` (or parts of `a` and
+ /// `b`) and is needed to ensure that future unification decisions take potentially newly
+ /// introduced conflicts into account.
+ ///
+ /// For an example, assume we have locals `_0`, `_1`, `_2`, and `_3`. There are these conflicts:
+ ///
+ /// * `_0` <-> `_1`
+ /// * `_1` <-> `_2`
+ /// * `_3` <-> `_0`
+ ///
+ /// We then decide to merge `_2` with `_3` since they don't conflict. Then we decide to merge
+ /// `_2` with `_0`, which also doesn't have a conflict in the above list. However `_2` is now
+ /// `_3`, which does conflict with `_0`.
+ fn unify(&mut self, a: Local, b: Local) {
+ trace!("unify({:?}, {:?})", a, b);
+
+ // Get the root local of the connected components. The root local stores the conflicts of
+ // all locals in the connected component (and *is stored* as the conflicting local of other
+ // locals).
+ let a = self.unified_locals.find(a).0;
+ let b = self.unified_locals.find(b).0;
+ assert_ne!(a, b);
+
+ trace!("roots: a={:?}, b={:?}", a, b);
+ trace!("{:?} conflicts: {:?}", a, self.matrix.iter(a).format(", "));
+ trace!("{:?} conflicts: {:?}", b, self.matrix.iter(b).format(", "));
+
+ self.unified_locals.union(a, b);
+
+ let root = self.unified_locals.find(a).0;
+ assert!(root == a || root == b);
+
+ // Make all locals that conflict with `a` also conflict with `b`, and vice versa.
+ self.unify_cache.clear();
+ for conflicts_with_a in self.matrix.iter(a) {
+ self.unify_cache.insert(conflicts_with_a);
+ }
+ for conflicts_with_b in self.matrix.iter(b) {
+ self.unify_cache.insert(conflicts_with_b);
+ }
+ for conflicts_with_a_or_b in self.unify_cache.iter() {
+ // Set both `a` and `b` for this local's row.
+ self.matrix.insert(conflicts_with_a_or_b, a);
+ self.matrix.insert(conflicts_with_a_or_b, b);
+ }
+
+ // Write the locals `a` conflicts with to `b`'s row.
+ self.matrix.union_rows(a, b);
+ // Write the locals `b` conflicts with to `a`'s row.
+ self.matrix.union_rows(b, a);
+ }
+}
+
+/// A `dest = {move} src;` statement at `loc`.
+///
+/// We want to consider merging `dest` and `src` due to this assignment.
+#[derive(Debug, Copy, Clone)]
+struct CandidateAssignment<'tcx> {
+ /// Does not contain indirection or indexing (so the only local it contains is the place base).
+ dest: Place<'tcx>,
+ src: Local,
+ loc: Location,
+}
+
+/// Scans the MIR for assignments between locals that we might want to consider merging.
+///
+/// This will filter out assignments that do not match the right form (as described in the top-level
+/// comment) and also throw out assignments that involve a local that has its address taken or is
+/// otherwise ineligible (eg. locals used as array indices are ignored because we cannot propagate
+/// arbitrary places into array indices).
+fn find_candidates<'tcx>(body: &Body<'tcx>) -> Vec<CandidateAssignment<'tcx>> {
+ let mut visitor = FindAssignments {
+ body,
+ candidates: Vec::new(),
+ ever_borrowed_locals: borrowed_locals(body),
+ locals_used_as_array_index: locals_used_as_array_index(body),
+ };
+ visitor.visit_body(body);
+ visitor.candidates
+}
+
+struct FindAssignments<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ candidates: Vec<CandidateAssignment<'tcx>>,
+ ever_borrowed_locals: BitSet<Local>,
+ locals_used_as_array_index: BitSet<Local>,
+}
+
+impl<'tcx> Visitor<'tcx> for FindAssignments<'_, 'tcx> {
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ if let StatementKind::Assign(box (
+ dest,
+ Rvalue::Use(Operand::Copy(src) | Operand::Move(src)),
+ )) = &statement.kind
+ {
+ // `dest` must not have pointer indirection.
+ if dest.is_indirect() {
+ return;
+ }
+
+ // `src` must be a plain local.
+ if !src.projection.is_empty() {
+ return;
+ }
+
+ // Since we want to replace `src` with `dest`, `src` must not be required.
+ if is_local_required(src.local, self.body) {
+ return;
+ }
+
+ // Can't optimize if either local ever has their address taken. This optimization does
+ // liveness analysis only based on assignments, and a local can be live even if its
+ // never assigned to again, because a reference to it might be live.
+ // FIXME: This can be smarter and take `StorageDead` into account (which invalidates
+ // borrows).
+ if self.ever_borrowed_locals.contains(dest.local)
+ || self.ever_borrowed_locals.contains(src.local)
+ {
+ return;
+ }
+
+ assert_ne!(dest.local, src.local, "self-assignments are UB");
+
+ // We can't replace locals occurring in `PlaceElem::Index` for now.
+ if self.locals_used_as_array_index.contains(src.local) {
+ return;
+ }
+
+ for elem in dest.projection {
+ if let PlaceElem::Index(_) = elem {
+ // `dest` contains an indexing projection.
+ return;
+ }
+ }
+
+ self.candidates.push(CandidateAssignment {
+ dest: *dest,
+ src: src.local,
+ loc: location,
+ });
+ }
+ }
+}
+
+/// Some locals are part of the function's interface and can not be removed.
+///
+/// Note that these locals *can* still be merged with non-required locals by removing that other
+/// local.
+fn is_local_required(local: Local, body: &Body<'_>) -> bool {
+ match body.local_kind(local) {
+ LocalKind::Arg | LocalKind::ReturnPointer => true,
+ LocalKind::Var | LocalKind::Temp => false,
+ }
+}
+
+/// `PlaceElem::Index` only stores a `Local`, so we can't replace that with a full `Place`.
+///
+/// Collect locals used as indices so we don't generate candidates that are impossible to apply
+/// later.
+fn locals_used_as_array_index(body: &Body<'_>) -> BitSet<Local> {
+ let mut visitor = IndexCollector { locals: BitSet::new_empty(body.local_decls.len()) };
+ visitor.visit_body(body);
+ visitor.locals
+}
+
+struct IndexCollector {
+ locals: BitSet<Local>,
+}
+
+impl<'tcx> Visitor<'tcx> for IndexCollector {
+ fn visit_projection_elem(
+ &mut self,
+ local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ if let PlaceElem::Index(i) = elem {
+ self.locals.insert(i);
+ }
+ self.super_projection_elem(local, proj_base, elem, context, location);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/dump_mir.rs b/compiler/rustc_mir_transform/src/dump_mir.rs
new file mode 100644
index 000000000..6b995141a
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/dump_mir.rs
@@ -0,0 +1,28 @@
+//! This pass just dumps MIR at a specified point.
+
+use std::borrow::Cow;
+use std::fs::File;
+use std::io;
+
+use crate::MirPass;
+use rustc_middle::mir::write_mir_pretty;
+use rustc_middle::mir::Body;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::{OutputFilenames, OutputType};
+
+pub struct Marker(pub &'static str);
+
+impl<'tcx> MirPass<'tcx> for Marker {
+ fn name(&self) -> Cow<'_, str> {
+ Cow::Borrowed(self.0)
+ }
+
+ fn run_pass(&self, _tcx: TyCtxt<'tcx>, _body: &mut Body<'tcx>) {}
+}
+
+pub fn emit_mir(tcx: TyCtxt<'_>, outputs: &OutputFilenames) -> io::Result<()> {
+ let path = outputs.path(OutputType::Mir);
+ let mut f = io::BufWriter::new(File::create(&path)?);
+ write_mir_pretty(tcx, None, &mut f)?;
+ Ok(())
+}
diff --git a/compiler/rustc_mir_transform/src/early_otherwise_branch.rs b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
new file mode 100644
index 000000000..dba42f7af
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/early_otherwise_branch.rs
@@ -0,0 +1,429 @@
+use rustc_middle::mir::patch::MirPatch;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use std::fmt::Debug;
+
+use super::simplify::simplify_cfg;
+
+/// This pass optimizes something like
+/// ```ignore (syntax-highlighting-only)
+/// let x: Option<()>;
+/// let y: Option<()>;
+/// match (x,y) {
+/// (Some(_), Some(_)) => {0},
+/// _ => {1}
+/// }
+/// ```
+/// into something like
+/// ```ignore (syntax-highlighting-only)
+/// let x: Option<()>;
+/// let y: Option<()>;
+/// let discriminant_x = std::mem::discriminant(x);
+/// let discriminant_y = std::mem::discriminant(y);
+/// if discriminant_x == discriminant_y {
+/// match x {
+/// Some(_) => 0,
+/// _ => 1, // <----
+/// } // | Actually the same bb
+/// } else { // |
+/// 1 // <--------------
+/// }
+/// ```
+///
+/// Specifically, it looks for instances of control flow like this:
+/// ```text
+///
+/// =================
+/// | BB1 |
+/// |---------------| ============================
+/// | ... | /------> | BBC |
+/// |---------------| | |--------------------------|
+/// | switchInt(Q) | | | _cl = discriminant(P) |
+/// | c | --------/ |--------------------------|
+/// | d | -------\ | switchInt(_cl) |
+/// | ... | | | c | ---> BBC.2
+/// | otherwise | --\ | /--- | otherwise |
+/// ================= | | | ============================
+/// | | |
+/// ================= | | |
+/// | BBU | <-| | | ============================
+/// |---------------| | \-------> | BBD |
+/// |---------------| | | |--------------------------|
+/// | unreachable | | | | _dl = discriminant(P) |
+/// ================= | | |--------------------------|
+/// | | | switchInt(_dl) |
+/// ================= | | | d | ---> BBD.2
+/// | BB9 | <--------------- | otherwise |
+/// |---------------| ============================
+/// | ... |
+/// =================
+/// ```
+/// Where the `otherwise` branch on `BB1` is permitted to either go to `BBU` or to `BB9`. In the
+/// code:
+/// - `BB1` is `parent` and `BBC, BBD` are children
+/// - `P` is `child_place`
+/// - `child_ty` is the type of `_cl`.
+/// - `Q` is `parent_op`.
+/// - `parent_ty` is the type of `Q`.
+/// - `BB9` is `destination`
+/// All this is then transformed into:
+/// ```text
+///
+/// =======================
+/// | BB1 |
+/// |---------------------| ============================
+/// | ... | /------> | BBEq |
+/// | _s = discriminant(P)| | |--------------------------|
+/// | _t = Ne(Q, _s) | | |--------------------------|
+/// |---------------------| | | switchInt(Q) |
+/// | switchInt(_t) | | | c | ---> BBC.2
+/// | false | --------/ | d | ---> BBD.2
+/// | otherwise | ---------------- | otherwise |
+/// ======================= | ============================
+/// |
+/// ================= |
+/// | BB9 | <-----------/
+/// |---------------|
+/// | ... |
+/// =================
+/// ```
+///
+/// This is only correct for some `P`, since `P` is now computed outside the original `switchInt`.
+/// The filter on which `P` are allowed (together with discussion of its correctness) is found in
+/// `may_hoist`.
+pub struct EarlyOtherwiseBranch;
+
+impl<'tcx> MirPass<'tcx> for EarlyOtherwiseBranch {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 3 && sess.opts.unstable_opts.unsound_mir_opts
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ trace!("running EarlyOtherwiseBranch on {:?}", body.source);
+
+ let mut should_cleanup = false;
+
+ // Also consider newly generated bbs in the same pass
+ for i in 0..body.basic_blocks().len() {
+ let bbs = body.basic_blocks();
+ let parent = BasicBlock::from_usize(i);
+ let Some(opt_data) = evaluate_candidate(tcx, body, parent) else {
+ continue
+ };
+
+ if !tcx.consider_optimizing(|| format!("EarlyOtherwiseBranch {:?}", &opt_data)) {
+ break;
+ }
+
+ trace!("SUCCESS: found optimization possibility to apply: {:?}", &opt_data);
+
+ should_cleanup = true;
+
+ let TerminatorKind::SwitchInt {
+ discr: parent_op,
+ switch_ty: parent_ty,
+ targets: parent_targets
+ } = &bbs[parent].terminator().kind else {
+ unreachable!()
+ };
+ // Always correct since we can only switch on `Copy` types
+ let parent_op = match parent_op {
+ Operand::Move(x) => Operand::Copy(*x),
+ Operand::Copy(x) => Operand::Copy(*x),
+ Operand::Constant(x) => Operand::Constant(x.clone()),
+ };
+ let statements_before = bbs[parent].statements.len();
+ let parent_end = Location { block: parent, statement_index: statements_before };
+
+ let mut patch = MirPatch::new(body);
+
+ // create temp to store second discriminant in, `_s` in example above
+ let second_discriminant_temp =
+ patch.new_temp(opt_data.child_ty, opt_data.child_source.span);
+
+ patch.add_statement(parent_end, StatementKind::StorageLive(second_discriminant_temp));
+
+ // create assignment of discriminant
+ patch.add_assign(
+ parent_end,
+ Place::from(second_discriminant_temp),
+ Rvalue::Discriminant(opt_data.child_place),
+ );
+
+ // create temp to store inequality comparison between the two discriminants, `_t` in
+ // example above
+ let nequal = BinOp::Ne;
+ let comp_res_type = nequal.ty(tcx, *parent_ty, opt_data.child_ty);
+ let comp_temp = patch.new_temp(comp_res_type, opt_data.child_source.span);
+ patch.add_statement(parent_end, StatementKind::StorageLive(comp_temp));
+
+ // create inequality comparison between the two discriminants
+ let comp_rvalue = Rvalue::BinaryOp(
+ nequal,
+ Box::new((parent_op.clone(), Operand::Move(Place::from(second_discriminant_temp)))),
+ );
+ patch.add_statement(
+ parent_end,
+ StatementKind::Assign(Box::new((Place::from(comp_temp), comp_rvalue))),
+ );
+
+ let eq_new_targets = parent_targets.iter().map(|(value, child)| {
+ let TerminatorKind::SwitchInt{ targets, .. } = &bbs[child].terminator().kind else {
+ unreachable!()
+ };
+ (value, targets.target_for_value(value))
+ });
+ let eq_targets = SwitchTargets::new(eq_new_targets, opt_data.destination);
+
+ // Create `bbEq` in example above
+ let eq_switch = BasicBlockData::new(Some(Terminator {
+ source_info: bbs[parent].terminator().source_info,
+ kind: TerminatorKind::SwitchInt {
+ // switch on the first discriminant, so we can mark the second one as dead
+ discr: parent_op,
+ switch_ty: opt_data.child_ty,
+ targets: eq_targets,
+ },
+ }));
+
+ let eq_bb = patch.new_block(eq_switch);
+
+ // Jump to it on the basis of the inequality comparison
+ let true_case = opt_data.destination;
+ let false_case = eq_bb;
+ patch.patch_terminator(
+ parent,
+ TerminatorKind::if_(
+ tcx,
+ Operand::Move(Place::from(comp_temp)),
+ true_case,
+ false_case,
+ ),
+ );
+
+ // generate StorageDead for the second_discriminant_temp not in use anymore
+ patch.add_statement(parent_end, StatementKind::StorageDead(second_discriminant_temp));
+
+ // Generate a StorageDead for comp_temp in each of the targets, since we moved it into
+ // the switch
+ for bb in [false_case, true_case].iter() {
+ patch.add_statement(
+ Location { block: *bb, statement_index: 0 },
+ StatementKind::StorageDead(comp_temp),
+ );
+ }
+
+ patch.apply(body);
+ }
+
+ // Since this optimization adds new basic blocks and invalidates others,
+ // clean up the cfg to make it nicer for other passes
+ if should_cleanup {
+ simplify_cfg(tcx, body);
+ }
+ }
+}
+
+/// Returns true if computing the discriminant of `place` may be hoisted out of the branch
+fn may_hoist<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, place: Place<'tcx>) -> bool {
+ // FIXME(JakobDegen): This is unsound. Someone could write code like this:
+ // ```rust
+ // let Q = val;
+ // if discriminant(P) == otherwise {
+ // let ptr = &mut Q as *mut _ as *mut u8;
+ // unsafe { *ptr = 10; } // Any invalid value for the type
+ // }
+ //
+ // match P {
+ // A => match Q {
+ // A => {
+ // // code
+ // }
+ // _ => {
+ // // don't use Q
+ // }
+ // }
+ // _ => {
+ // // don't use Q
+ // }
+ // };
+ // ```
+ //
+ // Hoisting the `discriminant(Q)` out of the `A` arm causes us to compute the discriminant of an
+ // invalid value, which is UB.
+ //
+ // In order to fix this, we would either need to show that the discriminant computation of
+ // `place` is computed in all branches, including the `otherwise` branch, or we would need
+ // another analysis pass to determine that the place is fully initialized. It might even be best
+ // to have the hoisting be performed in a different pass and just do the CFG changing in this
+ // pass.
+ for (place, proj) in place.iter_projections() {
+ match proj {
+ // Dereferencing in the computation of `place` might cause issues from one of two
+ // categories. First, the referent might be invalid. We protect against this by
+ // dereferencing references only (not pointers). Second, the use of a reference may
+ // invalidate other references that are used later (for aliasing reasons). Consider
+ // where such an invalidated reference may appear:
+ // - In `Q`: Not possible since `Q` is used as the operand of a `SwitchInt` and so
+ // cannot contain referenced data.
+ // - In `BBU`: Not possible since that block contains only the `unreachable` terminator
+ // - In `BBC.2, BBD.2`: Not possible, since `discriminant(P)` was computed prior to
+ // reaching that block in the input to our transformation, and so any data
+ // invalidated by that computation could not have been used there.
+ // - In `BB9`: Not possible since control flow might have reached `BB9` via the
+ // `otherwise` branch in `BBC, BBD` in the input to our transformation, which would
+ // have invalidated the data when computing `discriminant(P)`
+ // So dereferencing here is correct.
+ ProjectionElem::Deref => match place.ty(body.local_decls(), tcx).ty.kind() {
+ ty::Ref(..) => {}
+ _ => return false,
+ },
+ // Field projections are always valid
+ ProjectionElem::Field(..) => {}
+ // We cannot allow
+ // downcasts either, since the correctness of the downcast may depend on the parent
+ // branch being taken. An easy example of this is
+ // ```
+ // Q = discriminant(_3)
+ // P = (_3 as Variant)
+ // ```
+ // However, checking if the child and parent place are the same and only erroring then
+ // is not sufficient either, since the `discriminant(_3) == 1` (or whatever) check may
+ // be replaced by another optimization pass with any other condition that can be proven
+ // equivalent.
+ ProjectionElem::Downcast(..) => {
+ return false;
+ }
+ // We cannot allow indexing since the index may be out of bounds.
+ _ => {
+ return false;
+ }
+ }
+ }
+ true
+}
+
+#[derive(Debug)]
+struct OptimizationData<'tcx> {
+ destination: BasicBlock,
+ child_place: Place<'tcx>,
+ child_ty: Ty<'tcx>,
+ child_source: SourceInfo,
+}
+
+fn evaluate_candidate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ parent: BasicBlock,
+) -> Option<OptimizationData<'tcx>> {
+ let bbs = body.basic_blocks();
+ let TerminatorKind::SwitchInt {
+ targets,
+ switch_ty: parent_ty,
+ ..
+ } = &bbs[parent].terminator().kind else {
+ return None
+ };
+ let parent_dest = {
+ let poss = targets.otherwise();
+ // If the fallthrough on the parent is trivially unreachable, we can let the
+ // children choose the destination
+ if bbs[poss].statements.len() == 0
+ && bbs[poss].terminator().kind == TerminatorKind::Unreachable
+ {
+ None
+ } else {
+ Some(poss)
+ }
+ };
+ let (_, child) = targets.iter().next()?;
+ let child_terminator = &bbs[child].terminator();
+ let TerminatorKind::SwitchInt {
+ switch_ty: child_ty,
+ targets: child_targets,
+ ..
+ } = &child_terminator.kind else {
+ return None
+ };
+ if child_ty != parent_ty {
+ return None;
+ }
+ let Some(StatementKind::Assign(boxed))
+ = &bbs[child].statements.first().map(|x| &x.kind) else {
+ return None;
+ };
+ let (_, Rvalue::Discriminant(child_place)) = &**boxed else {
+ return None;
+ };
+ let destination = parent_dest.unwrap_or(child_targets.otherwise());
+
+ // Verify that the optimization is legal in general
+ // We can hoist evaluating the child discriminant out of the branch
+ if !may_hoist(tcx, body, *child_place) {
+ return None;
+ }
+
+ // Verify that the optimization is legal for each branch
+ for (value, child) in targets.iter() {
+ if !verify_candidate_branch(&bbs[child], value, *child_place, destination) {
+ return None;
+ }
+ }
+ Some(OptimizationData {
+ destination,
+ child_place: *child_place,
+ child_ty: *child_ty,
+ child_source: child_terminator.source_info,
+ })
+}
+
+fn verify_candidate_branch<'tcx>(
+ branch: &BasicBlockData<'tcx>,
+ value: u128,
+ place: Place<'tcx>,
+ destination: BasicBlock,
+) -> bool {
+ // In order for the optimization to be correct, the branch must...
+ // ...have exactly one statement
+ if branch.statements.len() != 1 {
+ return false;
+ }
+ // ...assign the discriminant of `place` in that statement
+ let StatementKind::Assign(boxed) = &branch.statements[0].kind else {
+ return false
+ };
+ let (discr_place, Rvalue::Discriminant(from_place)) = &**boxed else {
+ return false
+ };
+ if *from_place != place {
+ return false;
+ }
+ // ...make that assignment to a local
+ if discr_place.projection.len() != 0 {
+ return false;
+ }
+ // ...terminate on a `SwitchInt` that invalidates that local
+ let TerminatorKind::SwitchInt{ discr: switch_op, targets, .. } = &branch.terminator().kind else {
+ return false
+ };
+ if *switch_op != Operand::Move(*discr_place) {
+ return false;
+ }
+ // ...fall through to `destination` if the switch misses
+ if destination != targets.otherwise() {
+ return false;
+ }
+ // ...have a branch for value `value`
+ let mut iter = targets.iter();
+ let Some((target_value, _)) = iter.next() else {
+ return false;
+ };
+ if target_value != value {
+ return false;
+ }
+ // ...and have no more branches
+ if let Some(_) = iter.next() {
+ return false;
+ }
+ return true;
+}
diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
new file mode 100644
index 000000000..44e3945d6
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs
@@ -0,0 +1,184 @@
+//! This pass transforms derefs of Box into a deref of the pointer inside Box.
+//!
+//! Box is not actually a pointer so it is incorrect to dereference it directly.
+
+use crate::MirPass;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::patch::MirPatch;
+use rustc_middle::mir::visit::MutVisitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{Ty, TyCtxt};
+
+/// Constructs the types used when accessing a Box's pointer
+pub fn build_ptr_tys<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ pointee: Ty<'tcx>,
+ unique_did: DefId,
+ nonnull_did: DefId,
+) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) {
+ let substs = tcx.intern_substs(&[pointee.into()]);
+ let unique_ty = tcx.bound_type_of(unique_did).subst(tcx, substs);
+ let nonnull_ty = tcx.bound_type_of(nonnull_did).subst(tcx, substs);
+ let ptr_ty = tcx.mk_imm_ptr(pointee);
+
+ (unique_ty, nonnull_ty, ptr_ty)
+}
+
+// Constructs the projection needed to access a Box's pointer
+pub fn build_projection<'tcx>(
+ unique_ty: Ty<'tcx>,
+ nonnull_ty: Ty<'tcx>,
+ ptr_ty: Ty<'tcx>,
+) -> [PlaceElem<'tcx>; 3] {
+ [
+ PlaceElem::Field(Field::new(0), unique_ty),
+ PlaceElem::Field(Field::new(0), nonnull_ty),
+ PlaceElem::Field(Field::new(0), ptr_ty),
+ ]
+}
+
+struct ElaborateBoxDerefVisitor<'tcx, 'a> {
+ tcx: TyCtxt<'tcx>,
+ unique_did: DefId,
+ nonnull_did: DefId,
+ local_decls: &'a mut LocalDecls<'tcx>,
+ patch: MirPatch<'tcx>,
+}
+
+impl<'tcx, 'a> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'tcx, 'a> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_place(
+ &mut self,
+ place: &mut Place<'tcx>,
+ context: visit::PlaceContext,
+ location: Location,
+ ) {
+ let tcx = self.tcx;
+
+ let base_ty = self.local_decls[place.local].ty;
+
+ // Derefer ensures that derefs are always the first projection
+ if place.projection.first() == Some(&PlaceElem::Deref) && base_ty.is_box() {
+ let source_info = self.local_decls[place.local].source_info;
+
+ let (unique_ty, nonnull_ty, ptr_ty) =
+ build_ptr_tys(tcx, base_ty.boxed_ty(), self.unique_did, self.nonnull_did);
+
+ let ptr_local = self.patch.new_temp(ptr_ty, source_info.span);
+ self.local_decls.push(LocalDecl::new(ptr_ty, source_info.span));
+
+ self.patch.add_statement(location, StatementKind::StorageLive(ptr_local));
+
+ self.patch.add_assign(
+ location,
+ Place::from(ptr_local),
+ Rvalue::Use(Operand::Copy(
+ Place::from(place.local)
+ .project_deeper(&build_projection(unique_ty, nonnull_ty, ptr_ty), tcx),
+ )),
+ );
+
+ place.local = ptr_local;
+
+ self.patch.add_statement(
+ Location { block: location.block, statement_index: location.statement_index + 1 },
+ StatementKind::StorageDead(ptr_local),
+ );
+ }
+
+ self.super_place(place, context, location);
+ }
+}
+
+pub struct ElaborateBoxDerefs;
+
+impl<'tcx> MirPass<'tcx> for ElaborateBoxDerefs {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ if let Some(def_id) = tcx.lang_items().owned_box() {
+ let unique_did = tcx.adt_def(def_id).non_enum_variant().fields[0].did;
+
+ let Some(nonnull_def) = tcx.type_of(unique_did).ty_adt_def() else {
+ span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique")
+ };
+
+ let nonnull_did = nonnull_def.non_enum_variant().fields[0].did;
+
+ let patch = MirPatch::new(body);
+
+ let local_decls = &mut body.local_decls;
+
+ let mut visitor =
+ ElaborateBoxDerefVisitor { tcx, unique_did, nonnull_did, local_decls, patch };
+
+ for (block, BasicBlockData { statements, terminator, .. }) in
+ body.basic_blocks.as_mut().iter_enumerated_mut()
+ {
+ let mut index = 0;
+ for statement in statements {
+ let location = Location { block, statement_index: index };
+ visitor.visit_statement(statement, location);
+ index += 1;
+ }
+
+ if let Some(terminator) = terminator
+ && !matches!(terminator.kind, TerminatorKind::Yield{..})
+ {
+ let location = Location { block, statement_index: index };
+ visitor.visit_terminator(terminator, location);
+ }
+
+ let location = Location { block, statement_index: index };
+ match terminator {
+ // yielding into a box is handled when lowering generators
+ Some(Terminator { kind: TerminatorKind::Yield { value, .. }, .. }) => {
+ visitor.visit_operand(value, location);
+ }
+ Some(terminator) => {
+ visitor.visit_terminator(terminator, location);
+ }
+ None => {}
+ }
+ }
+
+ visitor.patch.apply(body);
+
+ for debug_info in body.var_debug_info.iter_mut() {
+ if let VarDebugInfoContents::Place(place) = &mut debug_info.value {
+ let mut new_projections: Option<Vec<_>> = None;
+ let mut last_deref = 0;
+
+ for (i, (base, elem)) in place.iter_projections().enumerate() {
+ let base_ty = base.ty(&body.local_decls, tcx).ty;
+
+ if elem == PlaceElem::Deref && base_ty.is_box() {
+ let new_projections = new_projections.get_or_insert_default();
+
+ let (unique_ty, nonnull_ty, ptr_ty) =
+ build_ptr_tys(tcx, base_ty.boxed_ty(), unique_did, nonnull_did);
+
+ new_projections.extend_from_slice(&base.projection[last_deref..]);
+ new_projections.extend_from_slice(&build_projection(
+ unique_ty, nonnull_ty, ptr_ty,
+ ));
+ new_projections.push(PlaceElem::Deref);
+
+ last_deref = i;
+ }
+ }
+
+ if let Some(mut new_projections) = new_projections {
+ new_projections.extend_from_slice(&place.projection[last_deref..]);
+ place.projection = tcx.intern_place_elems(&new_projections);
+ }
+ }
+ }
+ } else {
+ // box is not present, this pass doesn't need to do anything
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs
new file mode 100644
index 000000000..9c1fcbaa6
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs
@@ -0,0 +1,613 @@
+use crate::deref_separator::deref_finder;
+use crate::MirPass;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::patch::MirPatch;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, DropFlagState, Unwind};
+use rustc_mir_dataflow::elaborate_drops::{DropElaborator, DropFlagMode, DropStyle};
+use rustc_mir_dataflow::impls::{MaybeInitializedPlaces, MaybeUninitializedPlaces};
+use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
+use rustc_mir_dataflow::on_lookup_result_bits;
+use rustc_mir_dataflow::un_derefer::UnDerefer;
+use rustc_mir_dataflow::MoveDataParamEnv;
+use rustc_mir_dataflow::{on_all_children_bits, on_all_drop_children_bits};
+use rustc_mir_dataflow::{Analysis, ResultsCursor};
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+use std::fmt;
+
+pub struct ElaborateDrops;
+
+impl<'tcx> MirPass<'tcx> for ElaborateDrops {
+ fn phase_change(&self) -> Option<MirPhase> {
+ Some(MirPhase::DropsLowered)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ debug!("elaborate_drops({:?} @ {:?})", body.source, body.span);
+
+ let def_id = body.source.def_id();
+ let param_env = tcx.param_env_reveal_all_normalized(def_id);
+ let (side_table, move_data) = match MoveData::gather_moves(body, tcx, param_env) {
+ Ok(move_data) => move_data,
+ Err((move_data, _)) => {
+ tcx.sess.delay_span_bug(
+ body.span,
+ "No `move_errors` should be allowed in MIR borrowck",
+ );
+ (Default::default(), move_data)
+ }
+ };
+ let un_derefer = UnDerefer { tcx: tcx, derefer_sidetable: side_table };
+ let elaborate_patch = {
+ let body = &*body;
+ let env = MoveDataParamEnv { move_data, param_env };
+ let dead_unwinds = find_dead_unwinds(tcx, body, &env, &un_derefer);
+
+ let inits = MaybeInitializedPlaces::new(tcx, body, &env)
+ .into_engine(tcx, body)
+ .dead_unwinds(&dead_unwinds)
+ .pass_name("elaborate_drops")
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+
+ let uninits = MaybeUninitializedPlaces::new(tcx, body, &env)
+ .mark_inactive_variants_as_uninit()
+ .into_engine(tcx, body)
+ .dead_unwinds(&dead_unwinds)
+ .pass_name("elaborate_drops")
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+
+ ElaborateDropsCtxt {
+ tcx,
+ body,
+ env: &env,
+ init_data: InitializationData { inits, uninits },
+ drop_flags: Default::default(),
+ patch: MirPatch::new(body),
+ un_derefer: un_derefer,
+ }
+ .elaborate()
+ };
+ elaborate_patch.apply(body);
+ deref_finder(tcx, body);
+ }
+}
+
+/// Returns the set of basic blocks whose unwind edges are known
+/// to not be reachable, because they are `drop` terminators
+/// that can't drop anything.
+fn find_dead_unwinds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ env: &MoveDataParamEnv<'tcx>,
+ und: &UnDerefer<'tcx>,
+) -> BitSet<BasicBlock> {
+ debug!("find_dead_unwinds({:?})", body.span);
+ // We only need to do this pass once, because unwind edges can only
+ // reach cleanup blocks, which can't have unwind edges themselves.
+ let mut dead_unwinds = BitSet::new_empty(body.basic_blocks().len());
+ let mut flow_inits = MaybeInitializedPlaces::new(tcx, body, &env)
+ .into_engine(tcx, body)
+ .pass_name("find_dead_unwinds")
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+ for (bb, bb_data) in body.basic_blocks().iter_enumerated() {
+ let place = match bb_data.terminator().kind {
+ TerminatorKind::Drop { ref place, unwind: Some(_), .. }
+ | TerminatorKind::DropAndReplace { ref place, unwind: Some(_), .. } => {
+ und.derefer(place.as_ref(), body).unwrap_or(*place)
+ }
+ _ => continue,
+ };
+
+ debug!("find_dead_unwinds @ {:?}: {:?}", bb, bb_data);
+
+ let LookupResult::Exact(path) = env.move_data.rev_lookup.find(place.as_ref()) else {
+ debug!("find_dead_unwinds: has parent; skipping");
+ continue;
+ };
+
+ flow_inits.seek_before_primary_effect(body.terminator_loc(bb));
+ debug!(
+ "find_dead_unwinds @ {:?}: path({:?})={:?}; init_data={:?}",
+ bb,
+ place,
+ path,
+ flow_inits.get()
+ );
+
+ let mut maybe_live = false;
+ on_all_drop_children_bits(tcx, body, &env, path, |child| {
+ maybe_live |= flow_inits.contains(child);
+ });
+
+ debug!("find_dead_unwinds @ {:?}: maybe_live={}", bb, maybe_live);
+ if !maybe_live {
+ dead_unwinds.insert(bb);
+ }
+ }
+
+ dead_unwinds
+}
+
+struct InitializationData<'mir, 'tcx> {
+ inits: ResultsCursor<'mir, 'tcx, MaybeInitializedPlaces<'mir, 'tcx>>,
+ uninits: ResultsCursor<'mir, 'tcx, MaybeUninitializedPlaces<'mir, 'tcx>>,
+}
+
+impl InitializationData<'_, '_> {
+ fn seek_before(&mut self, loc: Location) {
+ self.inits.seek_before_primary_effect(loc);
+ self.uninits.seek_before_primary_effect(loc);
+ }
+
+ fn maybe_live_dead(&self, path: MovePathIndex) -> (bool, bool) {
+ (self.inits.contains(path), self.uninits.contains(path))
+ }
+}
+
+struct Elaborator<'a, 'b, 'tcx> {
+ ctxt: &'a mut ElaborateDropsCtxt<'b, 'tcx>,
+}
+
+impl fmt::Debug for Elaborator<'_, '_, '_> {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Ok(())
+ }
+}
+
+impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> {
+ type Path = MovePathIndex;
+
+ fn patch(&mut self) -> &mut MirPatch<'tcx> {
+ &mut self.ctxt.patch
+ }
+
+ fn body(&self) -> &'a Body<'tcx> {
+ self.ctxt.body
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.ctxt.tcx
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.ctxt.param_env()
+ }
+
+ fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle {
+ let ((maybe_live, maybe_dead), multipart) = match mode {
+ DropFlagMode::Shallow => (self.ctxt.init_data.maybe_live_dead(path), false),
+ DropFlagMode::Deep => {
+ let mut some_live = false;
+ let mut some_dead = false;
+ let mut children_count = 0;
+ on_all_drop_children_bits(self.tcx(), self.body(), self.ctxt.env, path, |child| {
+ let (live, dead) = self.ctxt.init_data.maybe_live_dead(child);
+ debug!("elaborate_drop: state({:?}) = {:?}", child, (live, dead));
+ some_live |= live;
+ some_dead |= dead;
+ children_count += 1;
+ });
+ ((some_live, some_dead), children_count != 1)
+ }
+ };
+ match (maybe_live, maybe_dead, multipart) {
+ (false, _, _) => DropStyle::Dead,
+ (true, false, _) => DropStyle::Static,
+ (true, true, false) => DropStyle::Conditional,
+ (true, true, true) => DropStyle::Open,
+ }
+ }
+
+ fn clear_drop_flag(&mut self, loc: Location, path: Self::Path, mode: DropFlagMode) {
+ match mode {
+ DropFlagMode::Shallow => {
+ self.ctxt.set_drop_flag(loc, path, DropFlagState::Absent);
+ }
+ DropFlagMode::Deep => {
+ on_all_children_bits(
+ self.tcx(),
+ self.body(),
+ self.ctxt.move_data(),
+ path,
+ |child| self.ctxt.set_drop_flag(loc, child, DropFlagState::Absent),
+ );
+ }
+ }
+ }
+
+ fn field_subpath(&self, path: Self::Path, field: Field) -> Option<Self::Path> {
+ rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
+ ProjectionElem::Field(idx, _) => idx == field,
+ _ => false,
+ })
+ }
+
+ fn array_subpath(&self, path: Self::Path, index: u64, size: u64) -> Option<Self::Path> {
+ rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
+ ProjectionElem::ConstantIndex { offset, min_length, from_end } => {
+ debug_assert!(size == min_length, "min_length should be exact for arrays");
+ assert!(!from_end, "from_end should not be used for array element ConstantIndex");
+ offset == index
+ }
+ _ => false,
+ })
+ }
+
+ fn deref_subpath(&self, path: Self::Path) -> Option<Self::Path> {
+ rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| {
+ e == ProjectionElem::Deref
+ })
+ }
+
+ fn downcast_subpath(&self, path: Self::Path, variant: VariantIdx) -> Option<Self::Path> {
+ rustc_mir_dataflow::move_path_children_matching(self.ctxt.move_data(), path, |e| match e {
+ ProjectionElem::Downcast(_, idx) => idx == variant,
+ _ => false,
+ })
+ }
+
+ fn get_drop_flag(&mut self, path: Self::Path) -> Option<Operand<'tcx>> {
+ self.ctxt.drop_flag(path).map(Operand::Copy)
+ }
+}
+
+struct ElaborateDropsCtxt<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+ env: &'a MoveDataParamEnv<'tcx>,
+ init_data: InitializationData<'a, 'tcx>,
+ drop_flags: FxHashMap<MovePathIndex, Local>,
+ patch: MirPatch<'tcx>,
+ un_derefer: UnDerefer<'tcx>,
+}
+
+impl<'b, 'tcx> ElaborateDropsCtxt<'b, 'tcx> {
+ fn move_data(&self) -> &'b MoveData<'tcx> {
+ &self.env.move_data
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.env.param_env
+ }
+
+ fn create_drop_flag(&mut self, index: MovePathIndex, span: Span) {
+ let tcx = self.tcx;
+ let patch = &mut self.patch;
+ debug!("create_drop_flag({:?})", self.body.span);
+ self.drop_flags.entry(index).or_insert_with(|| patch.new_internal(tcx.types.bool, span));
+ }
+
+ fn drop_flag(&mut self, index: MovePathIndex) -> Option<Place<'tcx>> {
+ self.drop_flags.get(&index).map(|t| Place::from(*t))
+ }
+
+ /// create a patch that elaborates all drops in the input
+ /// MIR.
+ fn elaborate(mut self) -> MirPatch<'tcx> {
+ self.collect_drop_flags();
+
+ self.elaborate_drops();
+
+ self.drop_flags_on_init();
+ self.drop_flags_for_fn_rets();
+ self.drop_flags_for_args();
+ self.drop_flags_for_locs();
+
+ self.patch
+ }
+
+ fn collect_drop_flags(&mut self) {
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+ let terminator = data.terminator();
+ let place = match terminator.kind {
+ TerminatorKind::Drop { ref place, .. }
+ | TerminatorKind::DropAndReplace { ref place, .. } => {
+ self.un_derefer.derefer(place.as_ref(), self.body).unwrap_or(*place)
+ }
+ _ => continue,
+ };
+
+ self.init_data.seek_before(self.body.terminator_loc(bb));
+
+ let path = self.move_data().rev_lookup.find(place.as_ref());
+ debug!("collect_drop_flags: {:?}, place {:?} ({:?})", bb, place, path);
+
+ let path = match path {
+ LookupResult::Exact(e) => e,
+ LookupResult::Parent(None) => continue,
+ LookupResult::Parent(Some(parent)) => {
+ let (_maybe_live, maybe_dead) = self.init_data.maybe_live_dead(parent);
+
+ if self.body.local_decls[place.local].is_deref_temp() {
+ continue;
+ }
+
+ if maybe_dead {
+ self.tcx.sess.delay_span_bug(
+ terminator.source_info.span,
+ &format!(
+ "drop of untracked, uninitialized value {:?}, place {:?} ({:?})",
+ bb, place, path
+ ),
+ );
+ }
+ continue;
+ }
+ };
+
+ on_all_drop_children_bits(self.tcx, self.body, self.env, path, |child| {
+ let (maybe_live, maybe_dead) = self.init_data.maybe_live_dead(child);
+ debug!(
+ "collect_drop_flags: collecting {:?} from {:?}@{:?} - {:?}",
+ child,
+ place,
+ path,
+ (maybe_live, maybe_dead)
+ );
+ if maybe_live && maybe_dead {
+ self.create_drop_flag(child, terminator.source_info.span)
+ }
+ });
+ }
+ }
+
+ fn elaborate_drops(&mut self) {
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+ let loc = Location { block: bb, statement_index: data.statements.len() };
+ let terminator = data.terminator();
+
+ let resume_block = self.patch.resume_block();
+ match terminator.kind {
+ TerminatorKind::Drop { mut place, target, unwind } => {
+ if let Some(new_place) = self.un_derefer.derefer(place.as_ref(), self.body) {
+ place = new_place;
+ }
+
+ self.init_data.seek_before(loc);
+ match self.move_data().rev_lookup.find(place.as_ref()) {
+ LookupResult::Exact(path) => elaborate_drop(
+ &mut Elaborator { ctxt: self },
+ terminator.source_info,
+ place,
+ path,
+ target,
+ if data.is_cleanup {
+ Unwind::InCleanup
+ } else {
+ Unwind::To(Option::unwrap_or(unwind, resume_block))
+ },
+ bb,
+ ),
+ LookupResult::Parent(..) => {
+ self.tcx.sess.delay_span_bug(
+ terminator.source_info.span,
+ &format!("drop of untracked value {:?}", bb),
+ );
+ }
+ }
+ }
+ TerminatorKind::DropAndReplace { mut place, ref value, target, unwind } => {
+ assert!(!data.is_cleanup);
+
+ if let Some(new_place) = self.un_derefer.derefer(place.as_ref(), self.body) {
+ place = new_place;
+ }
+ self.elaborate_replace(loc, place, value, target, unwind);
+ }
+ _ => continue,
+ }
+ }
+ }
+
+ /// Elaborate a MIR `replace` terminator. This instruction
+ /// is not directly handled by codegen, and therefore
+ /// must be desugared.
+ ///
+ /// The desugaring drops the location if needed, and then writes
+ /// the value (including setting the drop flag) over it in *both* arms.
+ ///
+ /// The `replace` terminator can also be called on places that
+ /// are not tracked by elaboration (for example,
+ /// `replace x[i] <- tmp0`). The borrow checker requires that
+ /// these locations are initialized before the assignment,
+ /// so we just generate an unconditional drop.
+ fn elaborate_replace(
+ &mut self,
+ loc: Location,
+ place: Place<'tcx>,
+ value: &Operand<'tcx>,
+ target: BasicBlock,
+ unwind: Option<BasicBlock>,
+ ) {
+ let bb = loc.block;
+ let data = &self.body[bb];
+ let terminator = data.terminator();
+ assert!(!data.is_cleanup, "DropAndReplace in unwind path not supported");
+
+ let assign = Statement {
+ kind: StatementKind::Assign(Box::new((place, Rvalue::Use(value.clone())))),
+ source_info: terminator.source_info,
+ };
+
+ let unwind = unwind.unwrap_or_else(|| self.patch.resume_block());
+ let unwind = self.patch.new_block(BasicBlockData {
+ statements: vec![assign.clone()],
+ terminator: Some(Terminator {
+ kind: TerminatorKind::Goto { target: unwind },
+ ..*terminator
+ }),
+ is_cleanup: true,
+ });
+
+ let target = self.patch.new_block(BasicBlockData {
+ statements: vec![assign],
+ terminator: Some(Terminator { kind: TerminatorKind::Goto { target }, ..*terminator }),
+ is_cleanup: false,
+ });
+
+ match self.move_data().rev_lookup.find(place.as_ref()) {
+ LookupResult::Exact(path) => {
+ debug!("elaborate_drop_and_replace({:?}) - tracked {:?}", terminator, path);
+ self.init_data.seek_before(loc);
+ elaborate_drop(
+ &mut Elaborator { ctxt: self },
+ terminator.source_info,
+ place,
+ path,
+ target,
+ Unwind::To(unwind),
+ bb,
+ );
+ on_all_children_bits(self.tcx, self.body, self.move_data(), path, |child| {
+ self.set_drop_flag(
+ Location { block: target, statement_index: 0 },
+ child,
+ DropFlagState::Present,
+ );
+ self.set_drop_flag(
+ Location { block: unwind, statement_index: 0 },
+ child,
+ DropFlagState::Present,
+ );
+ });
+ }
+ LookupResult::Parent(parent) => {
+ // drop and replace behind a pointer/array/whatever. The location
+ // must be initialized.
+ debug!("elaborate_drop_and_replace({:?}) - untracked {:?}", terminator, parent);
+ self.patch.patch_terminator(
+ bb,
+ TerminatorKind::Drop { place, target, unwind: Some(unwind) },
+ );
+ }
+ }
+ }
+
+ fn constant_bool(&self, span: Span, val: bool) -> Rvalue<'tcx> {
+ Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::from_bool(self.tcx, val),
+ })))
+ }
+
+ fn set_drop_flag(&mut self, loc: Location, path: MovePathIndex, val: DropFlagState) {
+ if let Some(&flag) = self.drop_flags.get(&path) {
+ let span = self.patch.source_info_for_location(self.body, loc).span;
+ let val = self.constant_bool(span, val.value());
+ self.patch.add_assign(loc, Place::from(flag), val);
+ }
+ }
+
+ fn drop_flags_on_init(&mut self) {
+ let loc = Location::START;
+ let span = self.patch.source_info_for_location(self.body, loc).span;
+ let false_ = self.constant_bool(span, false);
+ for flag in self.drop_flags.values() {
+ self.patch.add_assign(loc, Place::from(*flag), false_.clone());
+ }
+ }
+
+ fn drop_flags_for_fn_rets(&mut self) {
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+ if let TerminatorKind::Call {
+ destination, target: Some(tgt), cleanup: Some(_), ..
+ } = data.terminator().kind
+ {
+ assert!(!self.patch.is_patched(bb));
+
+ let loc = Location { block: tgt, statement_index: 0 };
+ let path = self.move_data().rev_lookup.find(destination.as_ref());
+ on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
+ self.set_drop_flag(loc, child, DropFlagState::Present)
+ });
+ }
+ }
+ }
+
+ fn drop_flags_for_args(&mut self) {
+ let loc = Location::START;
+ rustc_mir_dataflow::drop_flag_effects_for_function_entry(
+ self.tcx,
+ self.body,
+ self.env,
+ |path, ds| {
+ self.set_drop_flag(loc, path, ds);
+ },
+ )
+ }
+
+ fn drop_flags_for_locs(&mut self) {
+ // We intentionally iterate only over the *old* basic blocks.
+ //
+ // Basic blocks created by drop elaboration update their
+ // drop flags by themselves, to avoid the drop flags being
+ // clobbered before they are read.
+
+ for (bb, data) in self.body.basic_blocks().iter_enumerated() {
+ debug!("drop_flags_for_locs({:?})", data);
+ for i in 0..(data.statements.len() + 1) {
+ debug!("drop_flag_for_locs: stmt {}", i);
+ let mut allow_initializations = true;
+ if i == data.statements.len() {
+ match data.terminator().kind {
+ TerminatorKind::Drop { .. } => {
+ // drop elaboration should handle that by itself
+ continue;
+ }
+ TerminatorKind::DropAndReplace { .. } => {
+ // this contains the move of the source and
+ // the initialization of the destination. We
+ // only want the former - the latter is handled
+ // by the elaboration code and must be done
+ // *after* the destination is dropped.
+ assert!(self.patch.is_patched(bb));
+ allow_initializations = false;
+ }
+ TerminatorKind::Resume => {
+ // It is possible for `Resume` to be patched
+ // (in particular it can be patched to be replaced with
+ // a Goto; see `MirPatch::new`).
+ }
+ _ => {
+ assert!(!self.patch.is_patched(bb));
+ }
+ }
+ }
+ let loc = Location { block: bb, statement_index: i };
+ rustc_mir_dataflow::drop_flag_effects_for_location(
+ self.tcx,
+ self.body,
+ self.env,
+ loc,
+ |path, ds| {
+ if ds == DropFlagState::Absent || allow_initializations {
+ self.set_drop_flag(loc, path, ds)
+ }
+ },
+ )
+ }
+
+ // There may be a critical edge after this call,
+ // so mark the return as initialized *before* the
+ // call.
+ if let TerminatorKind::Call { destination, target: Some(_), cleanup: None, .. } =
+ data.terminator().kind
+ {
+ assert!(!self.patch.is_patched(bb));
+
+ let loc = Location { block: bb, statement_index: data.statements.len() };
+ let path = self.move_data().rev_lookup.find(destination.as_ref());
+ on_lookup_result_bits(self.tcx, self.body, self.move_data(), path, |child| {
+ self.set_drop_flag(loc, child, DropFlagState::Present)
+ });
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
new file mode 100644
index 000000000..7728fdaff
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs
@@ -0,0 +1,170 @@
+use rustc_hir::def_id::{CrateNum, LocalDefId, LOCAL_CRATE};
+use rustc_middle::mir::*;
+use rustc_middle::ty::layout;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_session::lint::builtin::FFI_UNWIND_CALLS;
+use rustc_target::spec::abi::Abi;
+use rustc_target::spec::PanicStrategy;
+
+fn abi_can_unwind(abi: Abi) -> bool {
+ use Abi::*;
+ match abi {
+ C { unwind }
+ | System { unwind }
+ | Cdecl { unwind }
+ | Stdcall { unwind }
+ | Fastcall { unwind }
+ | Vectorcall { unwind }
+ | Thiscall { unwind }
+ | Aapcs { unwind }
+ | Win64 { unwind }
+ | SysV64 { unwind } => unwind,
+ PtxKernel
+ | Msp430Interrupt
+ | X86Interrupt
+ | AmdGpuKernel
+ | EfiApi
+ | AvrInterrupt
+ | AvrNonBlockingInterrupt
+ | CCmseNonSecureCall
+ | Wasm
+ | RustIntrinsic
+ | PlatformIntrinsic
+ | Unadjusted => false,
+ Rust | RustCall | RustCold => true,
+ }
+}
+
+// Check if the body of this def_id can possibly leak a foreign unwind into Rust code.
+fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool {
+ debug!("has_ffi_unwind_calls({local_def_id:?})");
+
+ // Only perform check on functions because constants cannot call FFI functions.
+ let def_id = local_def_id.to_def_id();
+ let kind = tcx.def_kind(def_id);
+ if !kind.is_fn_like() {
+ return false;
+ }
+
+ let body = &*tcx.mir_built(ty::WithOptConstParam::unknown(local_def_id)).borrow();
+
+ let body_ty = tcx.type_of(def_id);
+ let body_abi = match body_ty.kind() {
+ ty::FnDef(..) => body_ty.fn_sig(tcx).abi(),
+ ty::Closure(..) => Abi::RustCall,
+ ty::Generator(..) => Abi::Rust,
+ _ => span_bug!(body.span, "unexpected body ty: {:?}", body_ty),
+ };
+ let body_can_unwind = layout::fn_can_unwind(tcx, Some(def_id), body_abi);
+
+ // Foreign unwinds cannot leak past functions that themselves cannot unwind.
+ if !body_can_unwind {
+ return false;
+ }
+
+ let mut tainted = false;
+
+ for block in body.basic_blocks() {
+ if block.is_cleanup {
+ continue;
+ }
+ let Some(terminator) = &block.terminator else { continue };
+ let TerminatorKind::Call { func, .. } = &terminator.kind else { continue };
+
+ let ty = func.ty(body, tcx);
+ let sig = ty.fn_sig(tcx);
+
+ // Rust calls cannot themselves create foreign unwinds.
+ if let Abi::Rust | Abi::RustCall | Abi::RustCold = sig.abi() {
+ continue;
+ };
+
+ let fn_def_id = match ty.kind() {
+ ty::FnPtr(_) => None,
+ &ty::FnDef(def_id, _) => {
+ // Rust calls cannot themselves create foreign unwinds.
+ if !tcx.is_foreign_item(def_id) {
+ continue;
+ }
+ Some(def_id)
+ }
+ _ => bug!("invalid callee of type {:?}", ty),
+ };
+
+ if layout::fn_can_unwind(tcx, fn_def_id, sig.abi()) && abi_can_unwind(sig.abi()) {
+ // We have detected a call that can possibly leak foreign unwind.
+ //
+ // Because the function body itself can unwind, we are not aborting this function call
+ // upon unwind, so this call can possibly leak foreign unwind into Rust code if the
+ // panic runtime linked is panic-abort.
+
+ let lint_root = body.source_scopes[terminator.source_info.scope]
+ .local_data
+ .as_ref()
+ .assert_crate_local()
+ .lint_root;
+ let span = terminator.source_info.span;
+
+ tcx.struct_span_lint_hir(FFI_UNWIND_CALLS, lint_root, span, |lint| {
+ let msg = match fn_def_id {
+ Some(_) => "call to foreign function with FFI-unwind ABI",
+ None => "call to function pointer with FFI-unwind ABI",
+ };
+ let mut db = lint.build(msg);
+ db.span_label(span, msg);
+ db.emit();
+ });
+
+ tainted = true;
+ }
+ }
+
+ tainted
+}
+
+fn required_panic_strategy(tcx: TyCtxt<'_>, cnum: CrateNum) -> Option<PanicStrategy> {
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ if tcx.is_panic_runtime(LOCAL_CRATE) {
+ return Some(tcx.sess.panic_strategy());
+ }
+
+ if tcx.sess.panic_strategy() == PanicStrategy::Abort {
+ return Some(PanicStrategy::Abort);
+ }
+
+ for def_id in tcx.hir().body_owners() {
+ if tcx.has_ffi_unwind_calls(def_id) {
+ // Given that this crate is compiled in `-C panic=unwind`, the `AbortUnwindingCalls`
+ // MIR pass will not be run on FFI-unwind call sites, therefore a foreign exception
+ // can enter Rust through these sites.
+ //
+ // On the other hand, crates compiled with `-C panic=abort` expects that all Rust
+ // functions cannot unwind (whether it's caused by Rust panic or foreign exception),
+ // and this expectation mismatch can cause unsoundness (#96926).
+ //
+ // To address this issue, we enforce that if FFI-unwind calls are used in a crate
+ // compiled with `panic=unwind`, then the final panic strategy must be `panic=unwind`.
+ // This will ensure that no crates will have wrong unwindability assumption.
+ //
+ // It should be noted that it is okay to link `panic=unwind` into a `panic=abort`
+ // program if it contains no FFI-unwind calls. In such case foreign exception can only
+ // enter Rust in a `panic=abort` crate, which will lead to an abort. There will also
+ // be no exceptions generated from Rust, so the assumption which `panic=abort` crates
+ // make, that no Rust function can unwind, indeed holds for crates compiled with
+ // `panic=unwind` as well. In such case this function returns `None`, indicating that
+ // the crate does not require a particular final panic strategy, and can be freely
+ // linked to crates with either strategy (we need such ability for libstd and its
+ // dependencies).
+ return Some(PanicStrategy::Unwind);
+ }
+ }
+
+ // This crate can be linked with either runtime.
+ None
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { has_ffi_unwind_calls, required_panic_strategy, ..*providers };
+}
diff --git a/compiler/rustc_mir_transform/src/function_item_references.rs b/compiler/rustc_mir_transform/src/function_item_references.rs
new file mode 100644
index 000000000..2e4fe1e3e
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/function_item_references.rs
@@ -0,0 +1,205 @@
+use itertools::Itertools;
+use rustc_errors::Applicability;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{
+ self,
+ subst::{GenericArgKind, Subst, SubstsRef},
+ EarlyBinder, PredicateKind, Ty, TyCtxt,
+};
+use rustc_session::lint::builtin::FUNCTION_ITEM_REFERENCES;
+use rustc_span::{symbol::sym, Span};
+use rustc_target::spec::abi::Abi;
+
+use crate::MirLint;
+
+pub struct FunctionItemReferences;
+
+impl<'tcx> MirLint<'tcx> for FunctionItemReferences {
+ fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) {
+ let mut checker = FunctionItemRefChecker { tcx, body };
+ checker.visit_body(&body);
+ }
+}
+
+struct FunctionItemRefChecker<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a Body<'tcx>,
+}
+
+impl<'tcx> Visitor<'tcx> for FunctionItemRefChecker<'_, 'tcx> {
+ /// Emits a lint for function reference arguments bound by `fmt::Pointer` or passed to
+ /// `transmute`. This only handles arguments in calls outside macro expansions to avoid double
+ /// counting function references formatted as pointers by macros.
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ if let TerminatorKind::Call {
+ func,
+ args,
+ destination: _,
+ target: _,
+ cleanup: _,
+ from_hir_call: _,
+ fn_span: _,
+ } = &terminator.kind
+ {
+ let source_info = *self.body.source_info(location);
+ let func_ty = func.ty(self.body, self.tcx);
+ if let ty::FnDef(def_id, substs_ref) = *func_ty.kind() {
+ // Handle calls to `transmute`
+ if self.tcx.is_diagnostic_item(sym::transmute, def_id) {
+ let arg_ty = args[0].ty(self.body, self.tcx);
+ for generic_inner_ty in arg_ty.walk() {
+ if let GenericArgKind::Type(inner_ty) = generic_inner_ty.unpack() {
+ if let Some((fn_id, fn_substs)) =
+ FunctionItemRefChecker::is_fn_ref(inner_ty)
+ {
+ let span = self.nth_arg_span(&args, 0);
+ self.emit_lint(fn_id, fn_substs, source_info, span);
+ }
+ }
+ }
+ } else {
+ self.check_bound_args(def_id, substs_ref, &args, source_info);
+ }
+ }
+ }
+ self.super_terminator(terminator, location);
+ }
+}
+
+impl<'tcx> FunctionItemRefChecker<'_, 'tcx> {
+ /// Emits a lint for function reference arguments bound by `fmt::Pointer` in calls to the
+ /// function defined by `def_id` with the substitutions `substs_ref`.
+ fn check_bound_args(
+ &self,
+ def_id: DefId,
+ substs_ref: SubstsRef<'tcx>,
+ args: &[Operand<'tcx>],
+ source_info: SourceInfo,
+ ) {
+ let param_env = self.tcx.param_env(def_id);
+ let bounds = param_env.caller_bounds();
+ for bound in bounds {
+ if let Some(bound_ty) = self.is_pointer_trait(&bound.kind().skip_binder()) {
+ // Get the argument types as they appear in the function signature.
+ let arg_defs = self.tcx.fn_sig(def_id).skip_binder().inputs();
+ for (arg_num, arg_def) in arg_defs.iter().enumerate() {
+ // For all types reachable from the argument type in the fn sig
+ for generic_inner_ty in arg_def.walk() {
+ if let GenericArgKind::Type(inner_ty) = generic_inner_ty.unpack() {
+ // If the inner type matches the type bound by `Pointer`
+ if inner_ty == bound_ty {
+ // Do a substitution using the parameters from the callsite
+ let subst_ty = EarlyBinder(inner_ty).subst(self.tcx, substs_ref);
+ if let Some((fn_id, fn_substs)) =
+ FunctionItemRefChecker::is_fn_ref(subst_ty)
+ {
+ let mut span = self.nth_arg_span(args, arg_num);
+ if span.from_expansion() {
+ // The operand's ctxt wouldn't display the lint since it's inside a macro so
+ // we have to use the callsite's ctxt.
+ let callsite_ctxt = span.source_callsite().ctxt();
+ span = span.with_ctxt(callsite_ctxt);
+ }
+ self.emit_lint(fn_id, fn_substs, source_info, span);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// If the given predicate is the trait `fmt::Pointer`, returns the bound parameter type.
+ fn is_pointer_trait(&self, bound: &PredicateKind<'tcx>) -> Option<Ty<'tcx>> {
+ if let ty::PredicateKind::Trait(predicate) = bound {
+ if self.tcx.is_diagnostic_item(sym::Pointer, predicate.def_id()) {
+ Some(predicate.trait_ref.self_ty())
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+
+ /// If a type is a reference or raw pointer to the anonymous type of a function definition,
+ /// returns that function's `DefId` and `SubstsRef`.
+ fn is_fn_ref(ty: Ty<'tcx>) -> Option<(DefId, SubstsRef<'tcx>)> {
+ let referent_ty = match ty.kind() {
+ ty::Ref(_, referent_ty, _) => Some(referent_ty),
+ ty::RawPtr(ty_and_mut) => Some(&ty_and_mut.ty),
+ _ => None,
+ };
+ referent_ty
+ .map(|ref_ty| {
+ if let ty::FnDef(def_id, substs_ref) = *ref_ty.kind() {
+ Some((def_id, substs_ref))
+ } else {
+ None
+ }
+ })
+ .unwrap_or(None)
+ }
+
+ fn nth_arg_span(&self, args: &[Operand<'tcx>], n: usize) -> Span {
+ match &args[n] {
+ Operand::Copy(place) | Operand::Move(place) => {
+ self.body.local_decls[place.local].source_info.span
+ }
+ Operand::Constant(constant) => constant.span,
+ }
+ }
+
+ fn emit_lint(
+ &self,
+ fn_id: DefId,
+ fn_substs: SubstsRef<'tcx>,
+ source_info: SourceInfo,
+ span: Span,
+ ) {
+ let lint_root = self.body.source_scopes[source_info.scope]
+ .local_data
+ .as_ref()
+ .assert_crate_local()
+ .lint_root;
+ let fn_sig = self.tcx.fn_sig(fn_id);
+ let unsafety = fn_sig.unsafety().prefix_str();
+ let abi = match fn_sig.abi() {
+ Abi::Rust => String::from(""),
+ other_abi => {
+ let mut s = String::from("extern \"");
+ s.push_str(other_abi.name());
+ s.push_str("\" ");
+ s
+ }
+ };
+ let ident = self.tcx.item_name(fn_id).to_ident_string();
+ let ty_params = fn_substs.types().map(|ty| format!("{}", ty));
+ let const_params = fn_substs.consts().map(|c| format!("{}", c));
+ let params = ty_params.chain(const_params).join(", ");
+ let num_args = fn_sig.inputs().map_bound(|inputs| inputs.len()).skip_binder();
+ let variadic = if fn_sig.c_variadic() { ", ..." } else { "" };
+ let ret = if fn_sig.output().skip_binder().is_unit() { "" } else { " -> _" };
+ self.tcx.struct_span_lint_hir(FUNCTION_ITEM_REFERENCES, lint_root, span, |lint| {
+ lint.build("taking a reference to a function item does not give a function pointer")
+ .span_suggestion(
+ span,
+ &format!("cast `{}` to obtain a function pointer", ident),
+ format!(
+ "{} as {}{}fn({}{}){}",
+ if params.is_empty() { ident } else { format!("{}::<{}>", ident, params) },
+ unsafety,
+ abi,
+ vec!["_"; num_args].join(", "),
+ variadic,
+ ret,
+ ),
+ Applicability::Unspecified,
+ )
+ .emit();
+ });
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/generator.rs b/compiler/rustc_mir_transform/src/generator.rs
new file mode 100644
index 000000000..91ecf3879
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/generator.rs
@@ -0,0 +1,1581 @@
+//! This is the implementation of the pass which transforms generators into state machines.
+//!
+//! MIR generation for generators creates a function which has a self argument which
+//! passes by value. This argument is effectively a generator type which only contains upvars and
+//! is only used for this argument inside the MIR for the generator.
+//! It is passed by value to enable upvars to be moved out of it. Drop elaboration runs on that
+//! MIR before this pass and creates drop flags for MIR locals.
+//! It will also drop the generator argument (which only consists of upvars) if any of the upvars
+//! are moved out of. This pass elaborates the drops of upvars / generator argument in the case
+//! that none of the upvars were moved out of. This is because we cannot have any drops of this
+//! generator in the MIR, since it is used to create the drop glue for the generator. We'd get
+//! infinite recursion otherwise.
+//!
+//! This pass creates the implementation for the Generator::resume function and the drop shim
+//! for the generator based on the MIR input. It converts the generator argument from Self to
+//! &mut Self adding derefs in the MIR as needed. It computes the final layout of the generator
+//! struct which looks like this:
+//! First upvars are stored
+//! It is followed by the generator state field.
+//! Then finally the MIR locals which are live across a suspension point are stored.
+//! ```ignore (illustrative)
+//! struct Generator {
+//! upvars...,
+//! state: u32,
+//! mir_locals...,
+//! }
+//! ```
+//! This pass computes the meaning of the state field and the MIR locals which are live
+//! across a suspension point. There are however three hardcoded generator states:
+//! 0 - Generator have not been resumed yet
+//! 1 - Generator has returned / is completed
+//! 2 - Generator has been poisoned
+//!
+//! It also rewrites `return x` and `yield y` as setting a new generator state and returning
+//! GeneratorState::Complete(x) and GeneratorState::Yielded(y) respectively.
+//! MIR locals which are live across a suspension point are moved to the generator struct
+//! with references to them being updated with references to the generator struct.
+//!
+//! The pass creates two functions which have a switch on the generator state giving
+//! the action to take.
+//!
+//! One of them is the implementation of Generator::resume.
+//! For generators with state 0 (unresumed) it starts the execution of the generator.
+//! For generators with state 1 (returned) and state 2 (poisoned) it panics.
+//! Otherwise it continues the execution from the last suspension point.
+//!
+//! The other function is the drop glue for the generator.
+//! For generators with state 0 (unresumed) it drops the upvars of the generator.
+//! For generators with state 1 (returned) and state 2 (poisoned) it does nothing.
+//! Otherwise it drops all the values in scope at the last suspension point.
+
+use crate::deref_separator::deref_finder;
+use crate::simplify;
+use crate::util::expand_aggregate;
+use crate::MirPass;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::bit_set::{BitMatrix, BitSet, GrowableBitSet};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::dump_mir;
+use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::GeneratorSubsts;
+use rustc_middle::ty::{self, AdtDef, Ty, TyCtxt};
+use rustc_mir_dataflow::impls::{
+ MaybeBorrowedLocals, MaybeLiveLocals, MaybeRequiresStorage, MaybeStorageLive,
+};
+use rustc_mir_dataflow::storage::always_storage_live_locals;
+use rustc_mir_dataflow::{self, Analysis};
+use rustc_target::abi::VariantIdx;
+use rustc_target::spec::PanicStrategy;
+use std::{iter, ops};
+
+pub struct StateTransform;
+
+struct RenameLocalVisitor<'tcx> {
+ from: Local,
+ to: Local,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for RenameLocalVisitor<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+ if *local == self.from {
+ *local = self.to;
+ }
+ }
+
+ fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, location: Location) {
+ match terminator.kind {
+ TerminatorKind::Return => {
+ // Do not replace the implicit `_0` access here, as that's not possible. The
+ // transform already handles `return` correctly.
+ }
+ _ => self.super_terminator(terminator, location),
+ }
+ }
+}
+
+struct DerefArgVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for DerefArgVisitor<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+ assert_ne!(*local, SELF_ARG);
+ }
+
+ fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
+ if place.local == SELF_ARG {
+ replace_base(
+ place,
+ Place {
+ local: SELF_ARG,
+ projection: self.tcx().intern_place_elems(&[ProjectionElem::Deref]),
+ },
+ self.tcx,
+ );
+ } else {
+ self.visit_local(&mut place.local, context, location);
+
+ for elem in place.projection.iter() {
+ if let PlaceElem::Index(local) = elem {
+ assert_ne!(local, SELF_ARG);
+ }
+ }
+ }
+ }
+}
+
+struct PinArgVisitor<'tcx> {
+ ref_gen_ty: Ty<'tcx>,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for PinArgVisitor<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+ assert_ne!(*local, SELF_ARG);
+ }
+
+ fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
+ if place.local == SELF_ARG {
+ replace_base(
+ place,
+ Place {
+ local: SELF_ARG,
+ projection: self.tcx().intern_place_elems(&[ProjectionElem::Field(
+ Field::new(0),
+ self.ref_gen_ty,
+ )]),
+ },
+ self.tcx,
+ );
+ } else {
+ self.visit_local(&mut place.local, context, location);
+
+ for elem in place.projection.iter() {
+ if let PlaceElem::Index(local) = elem {
+ assert_ne!(local, SELF_ARG);
+ }
+ }
+ }
+ }
+}
+
+fn replace_base<'tcx>(place: &mut Place<'tcx>, new_base: Place<'tcx>, tcx: TyCtxt<'tcx>) {
+ place.local = new_base.local;
+
+ let mut new_projection = new_base.projection.to_vec();
+ new_projection.append(&mut place.projection.to_vec());
+
+ place.projection = tcx.intern_place_elems(&new_projection);
+}
+
+const SELF_ARG: Local = Local::from_u32(1);
+
+/// Generator has not been resumed yet.
+const UNRESUMED: usize = GeneratorSubsts::UNRESUMED;
+/// Generator has returned / is completed.
+const RETURNED: usize = GeneratorSubsts::RETURNED;
+/// Generator has panicked and is poisoned.
+const POISONED: usize = GeneratorSubsts::POISONED;
+
+/// Number of variants to reserve in generator state. Corresponds to
+/// `UNRESUMED` (beginning of a generator) and `RETURNED`/`POISONED`
+/// (end of a generator) states.
+const RESERVED_VARIANTS: usize = 3;
+
+/// A `yield` point in the generator.
+struct SuspensionPoint<'tcx> {
+ /// State discriminant used when suspending or resuming at this point.
+ state: usize,
+ /// The block to jump to after resumption.
+ resume: BasicBlock,
+ /// Where to move the resume argument after resumption.
+ resume_arg: Place<'tcx>,
+ /// Which block to jump to if the generator is dropped in this state.
+ drop: Option<BasicBlock>,
+ /// Set of locals that have live storage while at this suspension point.
+ storage_liveness: GrowableBitSet<Local>,
+}
+
+struct TransformVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ state_adt_ref: AdtDef<'tcx>,
+ state_substs: SubstsRef<'tcx>,
+
+ // The type of the discriminant in the generator struct
+ discr_ty: Ty<'tcx>,
+
+ // Mapping from Local to (type of local, generator struct index)
+ // FIXME(eddyb) This should use `IndexVec<Local, Option<_>>`.
+ remap: FxHashMap<Local, (Ty<'tcx>, VariantIdx, usize)>,
+
+ // A map from a suspension point in a block to the locals which have live storage at that point
+ storage_liveness: IndexVec<BasicBlock, Option<BitSet<Local>>>,
+
+ // A list of suspension points, generated during the transform
+ suspension_points: Vec<SuspensionPoint<'tcx>>,
+
+ // The set of locals that have no `StorageLive`/`StorageDead` annotations.
+ always_live_locals: BitSet<Local>,
+
+ // The original RETURN_PLACE local
+ new_ret_local: Local,
+}
+
+impl<'tcx> TransformVisitor<'tcx> {
+ // Make a GeneratorState variant assignment. `core::ops::GeneratorState` only has single
+ // element tuple variants, so we can just write to the downcasted first field and then set the
+ // discriminant to the appropriate variant.
+ fn make_state(
+ &self,
+ idx: VariantIdx,
+ val: Operand<'tcx>,
+ source_info: SourceInfo,
+ ) -> impl Iterator<Item = Statement<'tcx>> {
+ let kind = AggregateKind::Adt(self.state_adt_ref.did(), idx, self.state_substs, None, None);
+ assert_eq!(self.state_adt_ref.variant(idx).fields.len(), 1);
+ let ty = self
+ .tcx
+ .bound_type_of(self.state_adt_ref.variant(idx).fields[0].did)
+ .subst(self.tcx, self.state_substs);
+ expand_aggregate(
+ Place::return_place(),
+ std::iter::once((val, ty)),
+ kind,
+ source_info,
+ self.tcx,
+ )
+ }
+
+ // Create a Place referencing a generator struct field
+ fn make_field(&self, variant_index: VariantIdx, idx: usize, ty: Ty<'tcx>) -> Place<'tcx> {
+ let self_place = Place::from(SELF_ARG);
+ let base = self.tcx.mk_place_downcast_unnamed(self_place, variant_index);
+ let mut projection = base.projection.to_vec();
+ projection.push(ProjectionElem::Field(Field::new(idx), ty));
+
+ Place { local: base.local, projection: self.tcx.intern_place_elems(&projection) }
+ }
+
+ // Create a statement which changes the discriminant
+ fn set_discr(&self, state_disc: VariantIdx, source_info: SourceInfo) -> Statement<'tcx> {
+ let self_place = Place::from(SELF_ARG);
+ Statement {
+ source_info,
+ kind: StatementKind::SetDiscriminant {
+ place: Box::new(self_place),
+ variant_index: state_disc,
+ },
+ }
+ }
+
+ // Create a statement which reads the discriminant into a temporary
+ fn get_discr(&self, body: &mut Body<'tcx>) -> (Statement<'tcx>, Place<'tcx>) {
+ let temp_decl = LocalDecl::new(self.discr_ty, body.span).internal();
+ let local_decls_len = body.local_decls.push(temp_decl);
+ let temp = Place::from(local_decls_len);
+
+ let self_place = Place::from(SELF_ARG);
+ let assign = Statement {
+ source_info: SourceInfo::outermost(body.span),
+ kind: StatementKind::Assign(Box::new((temp, Rvalue::Discriminant(self_place)))),
+ };
+ (assign, temp)
+ }
+}
+
+impl<'tcx> MutVisitor<'tcx> for TransformVisitor<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, _: PlaceContext, _: Location) {
+ assert_eq!(self.remap.get(local), None);
+ }
+
+ fn visit_place(
+ &mut self,
+ place: &mut Place<'tcx>,
+ _context: PlaceContext,
+ _location: Location,
+ ) {
+ // Replace an Local in the remap with a generator struct access
+ if let Some(&(ty, variant_index, idx)) = self.remap.get(&place.local) {
+ replace_base(place, self.make_field(variant_index, idx, ty), self.tcx);
+ }
+ }
+
+ fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
+ // Remove StorageLive and StorageDead statements for remapped locals
+ data.retain_statements(|s| match s.kind {
+ StatementKind::StorageLive(l) | StatementKind::StorageDead(l) => {
+ !self.remap.contains_key(&l)
+ }
+ _ => true,
+ });
+
+ let ret_val = match data.terminator().kind {
+ TerminatorKind::Return => Some((
+ VariantIdx::new(1),
+ None,
+ Operand::Move(Place::from(self.new_ret_local)),
+ None,
+ )),
+ TerminatorKind::Yield { ref value, resume, resume_arg, drop } => {
+ Some((VariantIdx::new(0), Some((resume, resume_arg)), value.clone(), drop))
+ }
+ _ => None,
+ };
+
+ if let Some((state_idx, resume, v, drop)) = ret_val {
+ let source_info = data.terminator().source_info;
+ // We must assign the value first in case it gets declared dead below
+ data.statements.extend(self.make_state(state_idx, v, source_info));
+ let state = if let Some((resume, mut resume_arg)) = resume {
+ // Yield
+ let state = RESERVED_VARIANTS + self.suspension_points.len();
+
+ // The resume arg target location might itself be remapped if its base local is
+ // live across a yield.
+ let resume_arg =
+ if let Some(&(ty, variant, idx)) = self.remap.get(&resume_arg.local) {
+ replace_base(&mut resume_arg, self.make_field(variant, idx, ty), self.tcx);
+ resume_arg
+ } else {
+ resume_arg
+ };
+
+ self.suspension_points.push(SuspensionPoint {
+ state,
+ resume,
+ resume_arg,
+ drop,
+ storage_liveness: self.storage_liveness[block].clone().unwrap().into(),
+ });
+
+ VariantIdx::new(state)
+ } else {
+ // Return
+ VariantIdx::new(RETURNED) // state for returned
+ };
+ data.statements.push(self.set_discr(state, source_info));
+ data.terminator_mut().kind = TerminatorKind::Return;
+ }
+
+ self.super_basic_block_data(block, data);
+ }
+}
+
+fn make_generator_state_argument_indirect<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let gen_ty = body.local_decls.raw[1].ty;
+
+ let ref_gen_ty =
+ tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty: gen_ty, mutbl: Mutability::Mut });
+
+ // Replace the by value generator argument
+ body.local_decls.raw[1].ty = ref_gen_ty;
+
+ // Add a deref to accesses of the generator state
+ DerefArgVisitor { tcx }.visit_body(body);
+}
+
+fn make_generator_state_argument_pinned<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let ref_gen_ty = body.local_decls.raw[1].ty;
+
+ let pin_did = tcx.require_lang_item(LangItem::Pin, Some(body.span));
+ let pin_adt_ref = tcx.adt_def(pin_did);
+ let substs = tcx.intern_substs(&[ref_gen_ty.into()]);
+ let pin_ref_gen_ty = tcx.mk_adt(pin_adt_ref, substs);
+
+ // Replace the by ref generator argument
+ body.local_decls.raw[1].ty = pin_ref_gen_ty;
+
+ // Add the Pin field access to accesses of the generator state
+ PinArgVisitor { ref_gen_ty, tcx }.visit_body(body);
+}
+
+/// Allocates a new local and replaces all references of `local` with it. Returns the new local.
+///
+/// `local` will be changed to a new local decl with type `ty`.
+///
+/// Note that the new local will be uninitialized. It is the caller's responsibility to assign some
+/// valid value to it before its first use.
+fn replace_local<'tcx>(
+ local: Local,
+ ty: Ty<'tcx>,
+ body: &mut Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+) -> Local {
+ let new_decl = LocalDecl::new(ty, body.span);
+ let new_local = body.local_decls.push(new_decl);
+ body.local_decls.swap(local, new_local);
+
+ RenameLocalVisitor { from: local, to: new_local, tcx }.visit_body(body);
+
+ new_local
+}
+
+struct LivenessInfo {
+ /// Which locals are live across any suspension point.
+ saved_locals: GeneratorSavedLocals,
+
+ /// The set of saved locals live at each suspension point.
+ live_locals_at_suspension_points: Vec<BitSet<GeneratorSavedLocal>>,
+
+ /// Parallel vec to the above with SourceInfo for each yield terminator.
+ source_info_at_suspension_points: Vec<SourceInfo>,
+
+ /// For every saved local, the set of other saved locals that are
+ /// storage-live at the same time as this local. We cannot overlap locals in
+ /// the layout which have conflicting storage.
+ storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+
+ /// For every suspending block, the locals which are storage-live across
+ /// that suspension point.
+ storage_liveness: IndexVec<BasicBlock, Option<BitSet<Local>>>,
+}
+
+fn locals_live_across_suspend_points<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ always_live_locals: &BitSet<Local>,
+ movable: bool,
+) -> LivenessInfo {
+ let body_ref: &Body<'_> = &body;
+
+ // Calculate when MIR locals have live storage. This gives us an upper bound of their
+ // lifetimes.
+ let mut storage_live = MaybeStorageLive::new(always_live_locals.clone())
+ .into_engine(tcx, body_ref)
+ .iterate_to_fixpoint()
+ .into_results_cursor(body_ref);
+
+ // Calculate the MIR locals which have been previously
+ // borrowed (even if they are still active).
+ let borrowed_locals_results =
+ MaybeBorrowedLocals.into_engine(tcx, body_ref).pass_name("generator").iterate_to_fixpoint();
+
+ let mut borrowed_locals_cursor =
+ rustc_mir_dataflow::ResultsCursor::new(body_ref, &borrowed_locals_results);
+
+ // Calculate the MIR locals that we actually need to keep storage around
+ // for.
+ let requires_storage_results = MaybeRequiresStorage::new(body, &borrowed_locals_results)
+ .into_engine(tcx, body_ref)
+ .iterate_to_fixpoint();
+ let mut requires_storage_cursor =
+ rustc_mir_dataflow::ResultsCursor::new(body_ref, &requires_storage_results);
+
+ // Calculate the liveness of MIR locals ignoring borrows.
+ let mut liveness = MaybeLiveLocals
+ .into_engine(tcx, body_ref)
+ .pass_name("generator")
+ .iterate_to_fixpoint()
+ .into_results_cursor(body_ref);
+
+ let mut storage_liveness_map = IndexVec::from_elem(None, body.basic_blocks());
+ let mut live_locals_at_suspension_points = Vec::new();
+ let mut source_info_at_suspension_points = Vec::new();
+ let mut live_locals_at_any_suspension_point = BitSet::new_empty(body.local_decls.len());
+
+ for (block, data) in body.basic_blocks().iter_enumerated() {
+ if let TerminatorKind::Yield { .. } = data.terminator().kind {
+ let loc = Location { block, statement_index: data.statements.len() };
+
+ liveness.seek_to_block_end(block);
+ let mut live_locals: BitSet<_> = BitSet::new_empty(body.local_decls.len());
+ live_locals.union(liveness.get());
+
+ if !movable {
+ // The `liveness` variable contains the liveness of MIR locals ignoring borrows.
+ // This is correct for movable generators since borrows cannot live across
+ // suspension points. However for immovable generators we need to account for
+ // borrows, so we conservatively assume that all borrowed locals are live until
+ // we find a StorageDead statement referencing the locals.
+ // To do this we just union our `liveness` result with `borrowed_locals`, which
+ // contains all the locals which has been borrowed before this suspension point.
+ // If a borrow is converted to a raw reference, we must also assume that it lives
+ // forever. Note that the final liveness is still bounded by the storage liveness
+ // of the local, which happens using the `intersect` operation below.
+ borrowed_locals_cursor.seek_before_primary_effect(loc);
+ live_locals.union(borrowed_locals_cursor.get());
+ }
+
+ // Store the storage liveness for later use so we can restore the state
+ // after a suspension point
+ storage_live.seek_before_primary_effect(loc);
+ storage_liveness_map[block] = Some(storage_live.get().clone());
+
+ // Locals live are live at this point only if they are used across
+ // suspension points (the `liveness` variable)
+ // and their storage is required (the `storage_required` variable)
+ requires_storage_cursor.seek_before_primary_effect(loc);
+ live_locals.intersect(requires_storage_cursor.get());
+
+ // The generator argument is ignored.
+ live_locals.remove(SELF_ARG);
+
+ debug!("loc = {:?}, live_locals = {:?}", loc, live_locals);
+
+ // Add the locals live at this suspension point to the set of locals which live across
+ // any suspension points
+ live_locals_at_any_suspension_point.union(&live_locals);
+
+ live_locals_at_suspension_points.push(live_locals);
+ source_info_at_suspension_points.push(data.terminator().source_info);
+ }
+ }
+
+ debug!("live_locals_anywhere = {:?}", live_locals_at_any_suspension_point);
+ let saved_locals = GeneratorSavedLocals(live_locals_at_any_suspension_point);
+
+ // Renumber our liveness_map bitsets to include only the locals we are
+ // saving.
+ let live_locals_at_suspension_points = live_locals_at_suspension_points
+ .iter()
+ .map(|live_here| saved_locals.renumber_bitset(&live_here))
+ .collect();
+
+ let storage_conflicts = compute_storage_conflicts(
+ body_ref,
+ &saved_locals,
+ always_live_locals.clone(),
+ requires_storage_results,
+ );
+
+ LivenessInfo {
+ saved_locals,
+ live_locals_at_suspension_points,
+ source_info_at_suspension_points,
+ storage_conflicts,
+ storage_liveness: storage_liveness_map,
+ }
+}
+
+/// The set of `Local`s that must be saved across yield points.
+///
+/// `GeneratorSavedLocal` is indexed in terms of the elements in this set;
+/// i.e. `GeneratorSavedLocal::new(1)` corresponds to the second local
+/// included in this set.
+struct GeneratorSavedLocals(BitSet<Local>);
+
+impl GeneratorSavedLocals {
+ /// Returns an iterator over each `GeneratorSavedLocal` along with the `Local` it corresponds
+ /// to.
+ fn iter_enumerated(&self) -> impl '_ + Iterator<Item = (GeneratorSavedLocal, Local)> {
+ self.iter().enumerate().map(|(i, l)| (GeneratorSavedLocal::from(i), l))
+ }
+
+ /// Transforms a `BitSet<Local>` that contains only locals saved across yield points to the
+ /// equivalent `BitSet<GeneratorSavedLocal>`.
+ fn renumber_bitset(&self, input: &BitSet<Local>) -> BitSet<GeneratorSavedLocal> {
+ assert!(self.superset(&input), "{:?} not a superset of {:?}", self.0, input);
+ let mut out = BitSet::new_empty(self.count());
+ for (saved_local, local) in self.iter_enumerated() {
+ if input.contains(local) {
+ out.insert(saved_local);
+ }
+ }
+ out
+ }
+
+ fn get(&self, local: Local) -> Option<GeneratorSavedLocal> {
+ if !self.contains(local) {
+ return None;
+ }
+
+ let idx = self.iter().take_while(|&l| l < local).count();
+ Some(GeneratorSavedLocal::new(idx))
+ }
+}
+
+impl ops::Deref for GeneratorSavedLocals {
+ type Target = BitSet<Local>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+/// For every saved local, looks for which locals are StorageLive at the same
+/// time. Generates a bitset for every local of all the other locals that may be
+/// StorageLive simultaneously with that local. This is used in the layout
+/// computation; see `GeneratorLayout` for more.
+fn compute_storage_conflicts<'mir, 'tcx>(
+ body: &'mir Body<'tcx>,
+ saved_locals: &GeneratorSavedLocals,
+ always_live_locals: BitSet<Local>,
+ requires_storage: rustc_mir_dataflow::Results<'tcx, MaybeRequiresStorage<'mir, 'tcx>>,
+) -> BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal> {
+ assert_eq!(body.local_decls.len(), saved_locals.domain_size());
+
+ debug!("compute_storage_conflicts({:?})", body.span);
+ debug!("always_live = {:?}", always_live_locals);
+
+ // Locals that are always live or ones that need to be stored across
+ // suspension points are not eligible for overlap.
+ let mut ineligible_locals = always_live_locals;
+ ineligible_locals.intersect(&**saved_locals);
+
+ // Compute the storage conflicts for all eligible locals.
+ let mut visitor = StorageConflictVisitor {
+ body,
+ saved_locals: &saved_locals,
+ local_conflicts: BitMatrix::from_row_n(&ineligible_locals, body.local_decls.len()),
+ };
+
+ requires_storage.visit_reachable_with(body, &mut visitor);
+
+ let local_conflicts = visitor.local_conflicts;
+
+ // Compress the matrix using only stored locals (Local -> GeneratorSavedLocal).
+ //
+ // NOTE: Today we store a full conflict bitset for every local. Technically
+ // this is twice as many bits as we need, since the relation is symmetric.
+ // However, in practice these bitsets are not usually large. The layout code
+ // also needs to keep track of how many conflicts each local has, so it's
+ // simpler to keep it this way for now.
+ let mut storage_conflicts = BitMatrix::new(saved_locals.count(), saved_locals.count());
+ for (saved_local_a, local_a) in saved_locals.iter_enumerated() {
+ if ineligible_locals.contains(local_a) {
+ // Conflicts with everything.
+ storage_conflicts.insert_all_into_row(saved_local_a);
+ } else {
+ // Keep overlap information only for stored locals.
+ for (saved_local_b, local_b) in saved_locals.iter_enumerated() {
+ if local_conflicts.contains(local_a, local_b) {
+ storage_conflicts.insert(saved_local_a, saved_local_b);
+ }
+ }
+ }
+ }
+ storage_conflicts
+}
+
+struct StorageConflictVisitor<'mir, 'tcx, 's> {
+ body: &'mir Body<'tcx>,
+ saved_locals: &'s GeneratorSavedLocals,
+ // FIXME(tmandry): Consider using sparse bitsets here once we have good
+ // benchmarks for generators.
+ local_conflicts: BitMatrix<Local, Local>,
+}
+
+impl<'mir, 'tcx> rustc_mir_dataflow::ResultsVisitor<'mir, 'tcx>
+ for StorageConflictVisitor<'mir, 'tcx, '_>
+{
+ type FlowState = BitSet<Local>;
+
+ fn visit_statement_before_primary_effect(
+ &mut self,
+ state: &Self::FlowState,
+ _statement: &'mir Statement<'tcx>,
+ loc: Location,
+ ) {
+ self.apply_state(state, loc);
+ }
+
+ fn visit_terminator_before_primary_effect(
+ &mut self,
+ state: &Self::FlowState,
+ _terminator: &'mir Terminator<'tcx>,
+ loc: Location,
+ ) {
+ self.apply_state(state, loc);
+ }
+}
+
+impl StorageConflictVisitor<'_, '_, '_> {
+ fn apply_state(&mut self, flow_state: &BitSet<Local>, loc: Location) {
+ // Ignore unreachable blocks.
+ if self.body.basic_blocks()[loc.block].terminator().kind == TerminatorKind::Unreachable {
+ return;
+ }
+
+ let mut eligible_storage_live = flow_state.clone();
+ eligible_storage_live.intersect(&**self.saved_locals);
+
+ for local in eligible_storage_live.iter() {
+ self.local_conflicts.union_row_with(&eligible_storage_live, local);
+ }
+
+ if eligible_storage_live.count() > 1 {
+ trace!("at {:?}, eligible_storage_live={:?}", loc, eligible_storage_live);
+ }
+ }
+}
+
+/// Validates the typeck view of the generator against the actual set of types saved between
+/// yield points.
+fn sanitize_witness<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ witness: Ty<'tcx>,
+ upvars: Vec<Ty<'tcx>>,
+ saved_locals: &GeneratorSavedLocals,
+) {
+ let did = body.source.def_id();
+ let param_env = tcx.param_env(did);
+
+ let allowed_upvars = tcx.normalize_erasing_regions(param_env, upvars);
+ let allowed = match witness.kind() {
+ &ty::GeneratorWitness(interior_tys) => {
+ tcx.normalize_erasing_late_bound_regions(param_env, interior_tys)
+ }
+ _ => {
+ tcx.sess.delay_span_bug(
+ body.span,
+ &format!("unexpected generator witness type {:?}", witness.kind()),
+ );
+ return;
+ }
+ };
+
+ for (local, decl) in body.local_decls.iter_enumerated() {
+ // Ignore locals which are internal or not saved between yields.
+ if !saved_locals.contains(local) || decl.internal {
+ continue;
+ }
+ let decl_ty = tcx.normalize_erasing_regions(param_env, decl.ty);
+
+ // Sanity check that typeck knows about the type of locals which are
+ // live across a suspension point
+ if !allowed.contains(&decl_ty) && !allowed_upvars.contains(&decl_ty) {
+ span_bug!(
+ body.span,
+ "Broken MIR: generator contains type {} in MIR, \
+ but typeck only knows about {} and {:?}",
+ decl_ty,
+ allowed,
+ allowed_upvars
+ );
+ }
+ }
+}
+
+fn compute_layout<'tcx>(
+ liveness: LivenessInfo,
+ body: &mut Body<'tcx>,
+) -> (
+ FxHashMap<Local, (Ty<'tcx>, VariantIdx, usize)>,
+ GeneratorLayout<'tcx>,
+ IndexVec<BasicBlock, Option<BitSet<Local>>>,
+) {
+ let LivenessInfo {
+ saved_locals,
+ live_locals_at_suspension_points,
+ source_info_at_suspension_points,
+ storage_conflicts,
+ storage_liveness,
+ } = liveness;
+
+ // Gather live local types and their indices.
+ let mut locals = IndexVec::<GeneratorSavedLocal, _>::new();
+ let mut tys = IndexVec::<GeneratorSavedLocal, _>::new();
+ for (saved_local, local) in saved_locals.iter_enumerated() {
+ locals.push(local);
+ tys.push(body.local_decls[local].ty);
+ debug!("generator saved local {:?} => {:?}", saved_local, local);
+ }
+
+ // Leave empty variants for the UNRESUMED, RETURNED, and POISONED states.
+ // In debuginfo, these will correspond to the beginning (UNRESUMED) or end
+ // (RETURNED, POISONED) of the function.
+ let body_span = body.source_scopes[OUTERMOST_SOURCE_SCOPE].span;
+ let mut variant_source_info: IndexVec<VariantIdx, SourceInfo> = [
+ SourceInfo::outermost(body_span.shrink_to_lo()),
+ SourceInfo::outermost(body_span.shrink_to_hi()),
+ SourceInfo::outermost(body_span.shrink_to_hi()),
+ ]
+ .iter()
+ .copied()
+ .collect();
+
+ // Build the generator variant field list.
+ // Create a map from local indices to generator struct indices.
+ let mut variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>> =
+ iter::repeat(IndexVec::new()).take(RESERVED_VARIANTS).collect();
+ let mut remap = FxHashMap::default();
+ for (suspension_point_idx, live_locals) in live_locals_at_suspension_points.iter().enumerate() {
+ let variant_index = VariantIdx::from(RESERVED_VARIANTS + suspension_point_idx);
+ let mut fields = IndexVec::new();
+ for (idx, saved_local) in live_locals.iter().enumerate() {
+ fields.push(saved_local);
+ // Note that if a field is included in multiple variants, we will
+ // just use the first one here. That's fine; fields do not move
+ // around inside generators, so it doesn't matter which variant
+ // index we access them by.
+ remap.entry(locals[saved_local]).or_insert((tys[saved_local], variant_index, idx));
+ }
+ variant_fields.push(fields);
+ variant_source_info.push(source_info_at_suspension_points[suspension_point_idx]);
+ }
+ debug!("generator variant_fields = {:?}", variant_fields);
+ debug!("generator storage_conflicts = {:#?}", storage_conflicts);
+
+ let layout =
+ GeneratorLayout { field_tys: tys, variant_fields, variant_source_info, storage_conflicts };
+
+ (remap, layout, storage_liveness)
+}
+
+/// Replaces the entry point of `body` with a block that switches on the generator discriminant and
+/// dispatches to blocks according to `cases`.
+///
+/// After this function, the former entry point of the function will be bb1.
+fn insert_switch<'tcx>(
+ body: &mut Body<'tcx>,
+ cases: Vec<(usize, BasicBlock)>,
+ transform: &TransformVisitor<'tcx>,
+ default: TerminatorKind<'tcx>,
+) {
+ let default_block = insert_term_block(body, default);
+ let (assign, discr) = transform.get_discr(body);
+ let switch_targets =
+ SwitchTargets::new(cases.iter().map(|(i, bb)| ((*i) as u128, *bb)), default_block);
+ let switch = TerminatorKind::SwitchInt {
+ discr: Operand::Move(discr),
+ switch_ty: transform.discr_ty,
+ targets: switch_targets,
+ };
+
+ let source_info = SourceInfo::outermost(body.span);
+ body.basic_blocks_mut().raw.insert(
+ 0,
+ BasicBlockData {
+ statements: vec![assign],
+ terminator: Some(Terminator { source_info, kind: switch }),
+ is_cleanup: false,
+ },
+ );
+
+ let blocks = body.basic_blocks_mut().iter_mut();
+
+ for target in blocks.flat_map(|b| b.terminator_mut().successors_mut()) {
+ *target = BasicBlock::new(target.index() + 1);
+ }
+}
+
+fn elaborate_generator_drops<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ use crate::shim::DropShimElaborator;
+ use rustc_middle::mir::patch::MirPatch;
+ use rustc_mir_dataflow::elaborate_drops::{elaborate_drop, Unwind};
+
+ // Note that `elaborate_drops` only drops the upvars of a generator, and
+ // this is ok because `open_drop` can only be reached within that own
+ // generator's resume function.
+
+ let def_id = body.source.def_id();
+ let param_env = tcx.param_env(def_id);
+
+ let mut elaborator = DropShimElaborator { body, patch: MirPatch::new(body), tcx, param_env };
+
+ for (block, block_data) in body.basic_blocks().iter_enumerated() {
+ let (target, unwind, source_info) = match block_data.terminator() {
+ Terminator { source_info, kind: TerminatorKind::Drop { place, target, unwind } } => {
+ if let Some(local) = place.as_local() {
+ if local == SELF_ARG {
+ (target, unwind, source_info)
+ } else {
+ continue;
+ }
+ } else {
+ continue;
+ }
+ }
+ _ => continue,
+ };
+ let unwind = if block_data.is_cleanup {
+ Unwind::InCleanup
+ } else {
+ Unwind::To(unwind.unwrap_or_else(|| elaborator.patch.resume_block()))
+ };
+ elaborate_drop(
+ &mut elaborator,
+ *source_info,
+ Place::from(SELF_ARG),
+ (),
+ *target,
+ unwind,
+ block,
+ );
+ }
+ elaborator.patch.apply(body);
+}
+
+fn create_generator_drop_shim<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ transform: &TransformVisitor<'tcx>,
+ gen_ty: Ty<'tcx>,
+ body: &mut Body<'tcx>,
+ drop_clean: BasicBlock,
+) -> Body<'tcx> {
+ let mut body = body.clone();
+ body.arg_count = 1; // make sure the resume argument is not included here
+
+ let source_info = SourceInfo::outermost(body.span);
+
+ let mut cases = create_cases(&mut body, transform, Operation::Drop);
+
+ cases.insert(0, (UNRESUMED, drop_clean));
+
+ // The returned state and the poisoned state fall through to the default
+ // case which is just to return
+
+ insert_switch(&mut body, cases, &transform, TerminatorKind::Return);
+
+ for block in body.basic_blocks_mut() {
+ let kind = &mut block.terminator_mut().kind;
+ if let TerminatorKind::GeneratorDrop = *kind {
+ *kind = TerminatorKind::Return;
+ }
+ }
+
+ // Replace the return variable
+ body.local_decls[RETURN_PLACE] = LocalDecl::with_source_info(tcx.mk_unit(), source_info);
+
+ make_generator_state_argument_indirect(tcx, &mut body);
+
+ // Change the generator argument from &mut to *mut
+ body.local_decls[SELF_ARG] = LocalDecl::with_source_info(
+ tcx.mk_ptr(ty::TypeAndMut { ty: gen_ty, mutbl: hir::Mutability::Mut }),
+ source_info,
+ );
+ if tcx.sess.opts.unstable_opts.mir_emit_retag {
+ // Alias tracking must know we changed the type
+ body.basic_blocks_mut()[START_BLOCK].statements.insert(
+ 0,
+ Statement {
+ source_info,
+ kind: StatementKind::Retag(RetagKind::Raw, Box::new(Place::from(SELF_ARG))),
+ },
+ )
+ }
+
+ // Make sure we remove dead blocks to remove
+ // unrelated code from the resume part of the function
+ simplify::remove_dead_blocks(tcx, &mut body);
+
+ dump_mir(tcx, None, "generator_drop", &0, &body, |_, _| Ok(()));
+
+ body
+}
+
+fn insert_term_block<'tcx>(body: &mut Body<'tcx>, kind: TerminatorKind<'tcx>) -> BasicBlock {
+ let source_info = SourceInfo::outermost(body.span);
+ body.basic_blocks_mut().push(BasicBlockData {
+ statements: Vec::new(),
+ terminator: Some(Terminator { source_info, kind }),
+ is_cleanup: false,
+ })
+}
+
+fn insert_panic_block<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &mut Body<'tcx>,
+ message: AssertMessage<'tcx>,
+) -> BasicBlock {
+ let assert_block = BasicBlock::new(body.basic_blocks().len());
+ let term = TerminatorKind::Assert {
+ cond: Operand::Constant(Box::new(Constant {
+ span: body.span,
+ user_ty: None,
+ literal: ConstantKind::from_bool(tcx, false),
+ })),
+ expected: true,
+ msg: message,
+ target: assert_block,
+ cleanup: None,
+ };
+
+ let source_info = SourceInfo::outermost(body.span);
+ body.basic_blocks_mut().push(BasicBlockData {
+ statements: Vec::new(),
+ terminator: Some(Terminator { source_info, kind: term }),
+ is_cleanup: false,
+ });
+
+ assert_block
+}
+
+fn can_return<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ // Returning from a function with an uninhabited return type is undefined behavior.
+ if tcx.conservative_is_privately_uninhabited(param_env.and(body.return_ty())) {
+ return false;
+ }
+
+ // If there's a return terminator the function may return.
+ for block in body.basic_blocks() {
+ if let TerminatorKind::Return = block.terminator().kind {
+ return true;
+ }
+ }
+
+ // Otherwise the function can't return.
+ false
+}
+
+fn can_unwind<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) -> bool {
+ // Nothing can unwind when landing pads are off.
+ if tcx.sess.panic_strategy() == PanicStrategy::Abort {
+ return false;
+ }
+
+ // Unwinds can only start at certain terminators.
+ for block in body.basic_blocks() {
+ match block.terminator().kind {
+ // These never unwind.
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => {}
+
+ // Resume will *continue* unwinding, but if there's no other unwinding terminator it
+ // will never be reached.
+ TerminatorKind::Resume => {}
+
+ TerminatorKind::Yield { .. } => {
+ unreachable!("`can_unwind` called before generator transform")
+ }
+
+ // These may unwind.
+ TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::InlineAsm { .. }
+ | TerminatorKind::Assert { .. } => return true,
+ }
+ }
+
+ // If we didn't find an unwinding terminator, the function cannot unwind.
+ false
+}
+
+fn create_generator_resume_function<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ transform: TransformVisitor<'tcx>,
+ body: &mut Body<'tcx>,
+ can_return: bool,
+) {
+ let can_unwind = can_unwind(tcx, body);
+
+ // Poison the generator when it unwinds
+ if can_unwind {
+ let source_info = SourceInfo::outermost(body.span);
+ let poison_block = body.basic_blocks_mut().push(BasicBlockData {
+ statements: vec![transform.set_discr(VariantIdx::new(POISONED), source_info)],
+ terminator: Some(Terminator { source_info, kind: TerminatorKind::Resume }),
+ is_cleanup: true,
+ });
+
+ for (idx, block) in body.basic_blocks_mut().iter_enumerated_mut() {
+ let source_info = block.terminator().source_info;
+
+ if let TerminatorKind::Resume = block.terminator().kind {
+ // An existing `Resume` terminator is redirected to jump to our dedicated
+ // "poisoning block" above.
+ if idx != poison_block {
+ *block.terminator_mut() = Terminator {
+ source_info,
+ kind: TerminatorKind::Goto { target: poison_block },
+ };
+ }
+ } else if !block.is_cleanup {
+ // Any terminators that *can* unwind but don't have an unwind target set are also
+ // pointed at our poisoning block (unless they're part of the cleanup path).
+ if let Some(unwind @ None) = block.terminator_mut().unwind_mut() {
+ *unwind = Some(poison_block);
+ }
+ }
+ }
+ }
+
+ let mut cases = create_cases(body, &transform, Operation::Resume);
+
+ use rustc_middle::mir::AssertKind::{ResumedAfterPanic, ResumedAfterReturn};
+
+ // Jump to the entry point on the unresumed
+ cases.insert(0, (UNRESUMED, BasicBlock::new(0)));
+
+ // Panic when resumed on the returned or poisoned state
+ let generator_kind = body.generator_kind().unwrap();
+
+ if can_unwind {
+ cases.insert(
+ 1,
+ (POISONED, insert_panic_block(tcx, body, ResumedAfterPanic(generator_kind))),
+ );
+ }
+
+ if can_return {
+ cases.insert(
+ 1,
+ (RETURNED, insert_panic_block(tcx, body, ResumedAfterReturn(generator_kind))),
+ );
+ }
+
+ insert_switch(body, cases, &transform, TerminatorKind::Unreachable);
+
+ make_generator_state_argument_indirect(tcx, body);
+ make_generator_state_argument_pinned(tcx, body);
+
+ // Make sure we remove dead blocks to remove
+ // unrelated code from the drop part of the function
+ simplify::remove_dead_blocks(tcx, body);
+
+ dump_mir(tcx, None, "generator_resume", &0, body, |_, _| Ok(()));
+}
+
+fn insert_clean_drop(body: &mut Body<'_>) -> BasicBlock {
+ let return_block = insert_term_block(body, TerminatorKind::Return);
+
+ let term =
+ TerminatorKind::Drop { place: Place::from(SELF_ARG), target: return_block, unwind: None };
+ let source_info = SourceInfo::outermost(body.span);
+
+ // Create a block to destroy an unresumed generators. This can only destroy upvars.
+ body.basic_blocks_mut().push(BasicBlockData {
+ statements: Vec::new(),
+ terminator: Some(Terminator { source_info, kind: term }),
+ is_cleanup: false,
+ })
+}
+
+/// An operation that can be performed on a generator.
+#[derive(PartialEq, Copy, Clone)]
+enum Operation {
+ Resume,
+ Drop,
+}
+
+impl Operation {
+ fn target_block(self, point: &SuspensionPoint<'_>) -> Option<BasicBlock> {
+ match self {
+ Operation::Resume => Some(point.resume),
+ Operation::Drop => point.drop,
+ }
+ }
+}
+
+fn create_cases<'tcx>(
+ body: &mut Body<'tcx>,
+ transform: &TransformVisitor<'tcx>,
+ operation: Operation,
+) -> Vec<(usize, BasicBlock)> {
+ let tcx = transform.tcx;
+
+ let source_info = SourceInfo::outermost(body.span);
+
+ transform
+ .suspension_points
+ .iter()
+ .filter_map(|point| {
+ // Find the target for this suspension point, if applicable
+ operation.target_block(point).map(|target| {
+ let mut statements = Vec::new();
+
+ // Create StorageLive instructions for locals with live storage
+ for i in 0..(body.local_decls.len()) {
+ if i == 2 {
+ // The resume argument is live on function entry. Don't insert a
+ // `StorageLive`, or the following `Assign` will read from uninitialized
+ // memory.
+ continue;
+ }
+
+ let l = Local::new(i);
+ let needs_storage_live = point.storage_liveness.contains(l)
+ && !transform.remap.contains_key(&l)
+ && !transform.always_live_locals.contains(l);
+ if needs_storage_live {
+ statements
+ .push(Statement { source_info, kind: StatementKind::StorageLive(l) });
+ }
+ }
+
+ if operation == Operation::Resume {
+ // Move the resume argument to the destination place of the `Yield` terminator
+ let resume_arg = Local::new(2); // 0 = return, 1 = self
+
+ // handle `box yield` properly
+ let box_place = if let [projection @ .., ProjectionElem::Deref] =
+ &**point.resume_arg.projection
+ {
+ let box_place =
+ Place::from(point.resume_arg.local).project_deeper(projection, tcx);
+
+ let box_ty = box_place.ty(&body.local_decls, tcx).ty;
+
+ if box_ty.is_box() { Some((box_place, box_ty)) } else { None }
+ } else {
+ None
+ };
+
+ if let Some((box_place, box_ty)) = box_place {
+ let unique_did = box_ty
+ .ty_adt_def()
+ .expect("expected Box to be an Adt")
+ .non_enum_variant()
+ .fields[0]
+ .did;
+
+ let Some(nonnull_def) = tcx.type_of(unique_did).ty_adt_def() else {
+ span_bug!(tcx.def_span(unique_did), "expected Box to contain Unique")
+ };
+
+ let nonnull_did = nonnull_def.non_enum_variant().fields[0].did;
+
+ let (unique_ty, nonnull_ty, ptr_ty) =
+ crate::elaborate_box_derefs::build_ptr_tys(
+ tcx,
+ box_ty.boxed_ty(),
+ unique_did,
+ nonnull_did,
+ );
+
+ let ptr_local = body.local_decls.push(LocalDecl::new(ptr_ty, body.span));
+
+ statements.push(Statement {
+ source_info,
+ kind: StatementKind::StorageLive(ptr_local),
+ });
+
+ statements.push(Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((
+ Place::from(ptr_local),
+ Rvalue::Use(Operand::Copy(box_place.project_deeper(
+ &crate::elaborate_box_derefs::build_projection(
+ unique_ty, nonnull_ty, ptr_ty,
+ ),
+ tcx,
+ ))),
+ ))),
+ });
+
+ statements.push(Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((
+ Place::from(ptr_local)
+ .project_deeper(&[ProjectionElem::Deref], tcx),
+ Rvalue::Use(Operand::Move(resume_arg.into())),
+ ))),
+ });
+
+ statements.push(Statement {
+ source_info,
+ kind: StatementKind::StorageDead(ptr_local),
+ });
+ } else {
+ statements.push(Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((
+ point.resume_arg,
+ Rvalue::Use(Operand::Move(resume_arg.into())),
+ ))),
+ });
+ }
+ }
+
+ // Then jump to the real target
+ let block = body.basic_blocks_mut().push(BasicBlockData {
+ statements,
+ terminator: Some(Terminator {
+ source_info,
+ kind: TerminatorKind::Goto { target },
+ }),
+ is_cleanup: false,
+ });
+
+ (point.state, block)
+ })
+ })
+ .collect()
+}
+
+impl<'tcx> MirPass<'tcx> for StateTransform {
+ fn phase_change(&self) -> Option<MirPhase> {
+ Some(MirPhase::GeneratorsLowered)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let Some(yield_ty) = body.yield_ty() else {
+ // This only applies to generators
+ return;
+ };
+
+ assert!(body.generator_drop().is_none());
+
+ // The first argument is the generator type passed by value
+ let gen_ty = body.local_decls.raw[1].ty;
+
+ // Get the interior types and substs which typeck computed
+ let (upvars, interior, discr_ty, movable) = match *gen_ty.kind() {
+ ty::Generator(_, substs, movability) => {
+ let substs = substs.as_generator();
+ (
+ substs.upvar_tys().collect(),
+ substs.witness(),
+ substs.discr_ty(tcx),
+ movability == hir::Movability::Movable,
+ )
+ }
+ _ => {
+ tcx.sess
+ .delay_span_bug(body.span, &format!("unexpected generator type {}", gen_ty));
+ return;
+ }
+ };
+
+ // Compute GeneratorState<yield_ty, return_ty>
+ let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
+ let state_adt_ref = tcx.adt_def(state_did);
+ let state_substs = tcx.intern_substs(&[yield_ty.into(), body.return_ty().into()]);
+ let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
+
+ // We rename RETURN_PLACE which has type mir.return_ty to new_ret_local
+ // RETURN_PLACE then is a fresh unused local with type ret_ty.
+ let new_ret_local = replace_local(RETURN_PLACE, ret_ty, body, tcx);
+
+ // We also replace the resume argument and insert an `Assign`.
+ // This is needed because the resume argument `_2` might be live across a `yield`, in which
+ // case there is no `Assign` to it that the transform can turn into a store to the generator
+ // state. After the yield the slot in the generator state would then be uninitialized.
+ let resume_local = Local::new(2);
+ let new_resume_local =
+ replace_local(resume_local, body.local_decls[resume_local].ty, body, tcx);
+
+ // When first entering the generator, move the resume argument into its new local.
+ let source_info = SourceInfo::outermost(body.span);
+ let stmts = &mut body.basic_blocks_mut()[BasicBlock::new(0)].statements;
+ stmts.insert(
+ 0,
+ Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((
+ new_resume_local.into(),
+ Rvalue::Use(Operand::Move(resume_local.into())),
+ ))),
+ },
+ );
+
+ let always_live_locals = always_storage_live_locals(&body);
+
+ let liveness_info =
+ locals_live_across_suspend_points(tcx, body, &always_live_locals, movable);
+
+ sanitize_witness(tcx, body, interior, upvars, &liveness_info.saved_locals);
+
+ if tcx.sess.opts.unstable_opts.validate_mir {
+ let mut vis = EnsureGeneratorFieldAssignmentsNeverAlias {
+ assigned_local: None,
+ saved_locals: &liveness_info.saved_locals,
+ storage_conflicts: &liveness_info.storage_conflicts,
+ };
+
+ vis.visit_body(body);
+ }
+
+ // Extract locals which are live across suspension point into `layout`
+ // `remap` gives a mapping from local indices onto generator struct indices
+ // `storage_liveness` tells us which locals have live storage at suspension points
+ let (remap, layout, storage_liveness) = compute_layout(liveness_info, body);
+
+ let can_return = can_return(tcx, body, tcx.param_env(body.source.def_id()));
+
+ // Run the transformation which converts Places from Local to generator struct
+ // accesses for locals in `remap`.
+ // It also rewrites `return x` and `yield y` as writing a new generator state and returning
+ // GeneratorState::Complete(x) and GeneratorState::Yielded(y) respectively.
+ let mut transform = TransformVisitor {
+ tcx,
+ state_adt_ref,
+ state_substs,
+ remap,
+ storage_liveness,
+ always_live_locals,
+ suspension_points: Vec::new(),
+ new_ret_local,
+ discr_ty,
+ };
+ transform.visit_body(body);
+
+ // Update our MIR struct to reflect the changes we've made
+ body.arg_count = 2; // self, resume arg
+ body.spread_arg = None;
+
+ body.generator.as_mut().unwrap().yield_ty = None;
+ body.generator.as_mut().unwrap().generator_layout = Some(layout);
+
+ // Insert `drop(generator_struct)` which is used to drop upvars for generators in
+ // the unresumed state.
+ // This is expanded to a drop ladder in `elaborate_generator_drops`.
+ let drop_clean = insert_clean_drop(body);
+
+ dump_mir(tcx, None, "generator_pre-elab", &0, body, |_, _| Ok(()));
+
+ // Expand `drop(generator_struct)` to a drop ladder which destroys upvars.
+ // If any upvars are moved out of, drop elaboration will handle upvar destruction.
+ // However we need to also elaborate the code generated by `insert_clean_drop`.
+ elaborate_generator_drops(tcx, body);
+
+ dump_mir(tcx, None, "generator_post-transform", &0, body, |_, _| Ok(()));
+
+ // Create a copy of our MIR and use it to create the drop shim for the generator
+ let drop_shim = create_generator_drop_shim(tcx, &transform, gen_ty, body, drop_clean);
+
+ body.generator.as_mut().unwrap().generator_drop = Some(drop_shim);
+
+ // Create the Generator::resume function
+ create_generator_resume_function(tcx, transform, body, can_return);
+
+ // Run derefer to fix Derefs that are not in the first place
+ deref_finder(tcx, body);
+ }
+}
+
+/// Looks for any assignments between locals (e.g., `_4 = _5`) that will both be converted to fields
+/// in the generator state machine but whose storage is not marked as conflicting
+///
+/// Validation needs to happen immediately *before* `TransformVisitor` is invoked, not after.
+///
+/// This condition would arise when the assignment is the last use of `_5` but the initial
+/// definition of `_4` if we weren't extra careful to mark all locals used inside a statement as
+/// conflicting. Non-conflicting generator saved locals may be stored at the same location within
+/// the generator state machine, which would result in ill-formed MIR: the left-hand and right-hand
+/// sides of an assignment may not alias. This caused a miscompilation in [#73137].
+///
+/// [#73137]: https://github.com/rust-lang/rust/issues/73137
+struct EnsureGeneratorFieldAssignmentsNeverAlias<'a> {
+ saved_locals: &'a GeneratorSavedLocals,
+ storage_conflicts: &'a BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+ assigned_local: Option<GeneratorSavedLocal>,
+}
+
+impl EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
+ fn saved_local_for_direct_place(&self, place: Place<'_>) -> Option<GeneratorSavedLocal> {
+ if place.is_indirect() {
+ return None;
+ }
+
+ self.saved_locals.get(place.local)
+ }
+
+ fn check_assigned_place(&mut self, place: Place<'_>, f: impl FnOnce(&mut Self)) {
+ if let Some(assigned_local) = self.saved_local_for_direct_place(place) {
+ assert!(self.assigned_local.is_none(), "`check_assigned_place` must not recurse");
+
+ self.assigned_local = Some(assigned_local);
+ f(self);
+ self.assigned_local = None;
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for EnsureGeneratorFieldAssignmentsNeverAlias<'_> {
+ fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+ let Some(lhs) = self.assigned_local else {
+ // This visitor only invokes `visit_place` for the right-hand side of an assignment
+ // and only after setting `self.assigned_local`. However, the default impl of
+ // `Visitor::super_body` may call `visit_place` with a `NonUseContext` for places
+ // with debuginfo. Ignore them here.
+ assert!(!context.is_use());
+ return;
+ };
+
+ let Some(rhs) = self.saved_local_for_direct_place(*place) else { return };
+
+ if !self.storage_conflicts.contains(lhs, rhs) {
+ bug!(
+ "Assignment between generator saved locals whose storage is not \
+ marked as conflicting: {:?}: {:?} = {:?}",
+ location,
+ lhs,
+ rhs,
+ );
+ }
+ }
+
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match &statement.kind {
+ StatementKind::Assign(box (lhs, rhs)) => {
+ self.check_assigned_place(*lhs, |this| this.visit_rvalue(rhs, location));
+ }
+
+ StatementKind::FakeRead(..)
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag(..)
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Nop => {}
+ }
+ }
+
+ fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
+ // Checking for aliasing in terminators is probably overkill, but until we have actual
+ // semantics, we should be conservative here.
+ match &terminator.kind {
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target: Some(_),
+ cleanup: _,
+ from_hir_call: _,
+ fn_span: _,
+ } => {
+ self.check_assigned_place(*destination, |this| {
+ this.visit_operand(func, location);
+ for arg in args {
+ this.visit_operand(arg, location);
+ }
+ });
+ }
+
+ TerminatorKind::Yield { value, resume: _, resume_arg, drop: _ } => {
+ self.check_assigned_place(*resume_arg, |this| this.visit_operand(value, location));
+ }
+
+ // FIXME: Does `asm!` have any aliasing requirements?
+ TerminatorKind::InlineAsm { .. } => {}
+
+ TerminatorKind::Call { .. }
+ | TerminatorKind::Goto { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => {}
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs
new file mode 100644
index 000000000..76b1522f3
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/inline.rs
@@ -0,0 +1,1006 @@
+//! Inlining pass for MIR functions
+use crate::deref_separator::deref_finder;
+use rustc_attr::InlineAttr;
+use rustc_const_eval::transform::validate::equal_up_to_regions;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::Idx;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::visit::*;
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{self, ConstKind, Instance, InstanceDef, ParamEnv, Ty, TyCtxt};
+use rustc_span::{hygiene::ExpnKind, ExpnData, LocalExpnId, Span};
+use rustc_target::spec::abi::Abi;
+
+use super::simplify::{remove_dead_blocks, CfgSimplifier};
+use crate::MirPass;
+use std::iter;
+use std::ops::{Range, RangeFrom};
+
+pub(crate) mod cycle;
+
+const INSTR_COST: usize = 5;
+const CALL_PENALTY: usize = 25;
+const LANDINGPAD_PENALTY: usize = 50;
+const RESUME_PENALTY: usize = 45;
+
+const UNKNOWN_SIZE_COST: usize = 10;
+
+pub struct Inline;
+
+#[derive(Copy, Clone, Debug)]
+struct CallSite<'tcx> {
+ callee: Instance<'tcx>,
+ fn_sig: ty::PolyFnSig<'tcx>,
+ block: BasicBlock,
+ target: Option<BasicBlock>,
+ source_info: SourceInfo,
+}
+
+impl<'tcx> MirPass<'tcx> for Inline {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ if let Some(enabled) = sess.opts.unstable_opts.inline_mir {
+ return enabled;
+ }
+
+ // rust-lang/rust#101004: reverted to old inlining decision logic
+ sess.mir_opt_level() >= 3
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let span = trace_span!("inline", body = %tcx.def_path_str(body.source.def_id()));
+ let _guard = span.enter();
+ if inline(tcx, body) {
+ debug!("running simplify cfg on {:?}", body.source);
+ CfgSimplifier::new(body).simplify();
+ remove_dead_blocks(tcx, body);
+ deref_finder(tcx, body);
+ }
+ }
+}
+
+fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool {
+ let def_id = body.source.def_id().expect_local();
+
+ // Only do inlining into fn bodies.
+ if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() {
+ return false;
+ }
+ if body.source.promoted.is_some() {
+ return false;
+ }
+ // Avoid inlining into generators, since their `optimized_mir` is used for layout computation,
+ // which can create a cycle, even when no attempt is made to inline the function in the other
+ // direction.
+ if body.generator.is_some() {
+ return false;
+ }
+
+ let param_env = tcx.param_env_reveal_all_normalized(def_id);
+
+ let mut this = Inliner {
+ tcx,
+ param_env,
+ codegen_fn_attrs: tcx.codegen_fn_attrs(def_id),
+ history: Vec::new(),
+ changed: false,
+ };
+ let blocks = BasicBlock::new(0)..body.basic_blocks().next_index();
+ this.process_blocks(body, blocks);
+ this.changed
+}
+
+struct Inliner<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ /// Caller codegen attributes.
+ codegen_fn_attrs: &'tcx CodegenFnAttrs,
+ /// Stack of inlined Instances.
+ history: Vec<ty::Instance<'tcx>>,
+ /// Indicates that the caller body has been modified.
+ changed: bool,
+}
+
+impl<'tcx> Inliner<'tcx> {
+ fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range<BasicBlock>) {
+ for bb in blocks {
+ let bb_data = &caller_body[bb];
+ if bb_data.is_cleanup {
+ continue;
+ }
+
+ let Some(callsite) = self.resolve_callsite(caller_body, bb, bb_data) else {
+ continue;
+ };
+
+ let span = trace_span!("process_blocks", %callsite.callee, ?bb);
+ let _guard = span.enter();
+
+ match self.try_inlining(caller_body, &callsite) {
+ Err(reason) => {
+ debug!("not-inlined {} [{}]", callsite.callee, reason);
+ continue;
+ }
+ Ok(new_blocks) => {
+ debug!("inlined {}", callsite.callee);
+ self.changed = true;
+ self.history.push(callsite.callee);
+ self.process_blocks(caller_body, new_blocks);
+ self.history.pop();
+ }
+ }
+ }
+ }
+
+ /// Attempts to inline a callsite into the caller body. When successful returns basic blocks
+ /// containing the inlined body. Otherwise returns an error describing why inlining didn't take
+ /// place.
+ fn try_inlining(
+ &self,
+ caller_body: &mut Body<'tcx>,
+ callsite: &CallSite<'tcx>,
+ ) -> Result<std::ops::Range<BasicBlock>, &'static str> {
+ let callee_attrs = self.tcx.codegen_fn_attrs(callsite.callee.def_id());
+ self.check_codegen_attributes(callsite, callee_attrs)?;
+ self.check_mir_is_available(caller_body, &callsite.callee)?;
+ let callee_body = self.tcx.instance_mir(callsite.callee.def);
+ self.check_mir_body(callsite, callee_body, callee_attrs)?;
+
+ if !self.tcx.consider_optimizing(|| {
+ format!("Inline {:?} into {:?}", callsite.callee, caller_body.source)
+ }) {
+ return Err("optimization fuel exhausted");
+ }
+
+ let Ok(callee_body) = callsite.callee.try_subst_mir_and_normalize_erasing_regions(
+ self.tcx,
+ self.param_env,
+ callee_body.clone(),
+ ) else {
+ return Err("failed to normalize callee body");
+ };
+
+ // Check call signature compatibility.
+ // Normally, this shouldn't be required, but trait normalization failure can create a
+ // validation ICE.
+ let terminator = caller_body[callsite.block].terminator.as_ref().unwrap();
+ let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() };
+ let destination_ty = destination.ty(&caller_body.local_decls, self.tcx).ty;
+ let output_type = callee_body.return_ty();
+ if !equal_up_to_regions(self.tcx, self.param_env, output_type, destination_ty) {
+ trace!(?output_type, ?destination_ty);
+ return Err("failed to normalize return type");
+ }
+ if callsite.fn_sig.abi() == Abi::RustCall {
+ let (arg_tuple, skipped_args) = match &args[..] {
+ [arg_tuple] => (arg_tuple, 0),
+ [_, arg_tuple] => (arg_tuple, 1),
+ _ => bug!("Expected `rust-call` to have 1 or 2 args"),
+ };
+
+ let arg_tuple_ty = arg_tuple.ty(&caller_body.local_decls, self.tcx);
+ let ty::Tuple(arg_tuple_tys) = arg_tuple_ty.kind() else {
+ bug!("Closure arguments are not passed as a tuple");
+ };
+
+ for (arg_ty, input) in
+ arg_tuple_tys.iter().zip(callee_body.args_iter().skip(skipped_args))
+ {
+ let input_type = callee_body.local_decls[input].ty;
+ if !equal_up_to_regions(self.tcx, self.param_env, arg_ty, input_type) {
+ trace!(?arg_ty, ?input_type);
+ return Err("failed to normalize tuple argument type");
+ }
+ }
+ } else {
+ for (arg, input) in args.iter().zip(callee_body.args_iter()) {
+ let input_type = callee_body.local_decls[input].ty;
+ let arg_ty = arg.ty(&caller_body.local_decls, self.tcx);
+ if !equal_up_to_regions(self.tcx, self.param_env, arg_ty, input_type) {
+ trace!(?arg_ty, ?input_type);
+ return Err("failed to normalize argument type");
+ }
+ }
+ }
+
+ let old_blocks = caller_body.basic_blocks().next_index();
+ self.inline_call(caller_body, &callsite, callee_body);
+ let new_blocks = old_blocks..caller_body.basic_blocks().next_index();
+
+ Ok(new_blocks)
+ }
+
+ fn check_mir_is_available(
+ &self,
+ caller_body: &Body<'tcx>,
+ callee: &Instance<'tcx>,
+ ) -> Result<(), &'static str> {
+ let caller_def_id = caller_body.source.def_id();
+ let callee_def_id = callee.def_id();
+ if callee_def_id == caller_def_id {
+ return Err("self-recursion");
+ }
+
+ match callee.def {
+ InstanceDef::Item(_) => {
+ // If there is no MIR available (either because it was not in metadata or
+ // because it has no MIR because it's an extern function), then the inliner
+ // won't cause cycles on this.
+ if !self.tcx.is_mir_available(callee_def_id) {
+ return Err("item MIR unavailable");
+ }
+ }
+ // These have no own callable MIR.
+ InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => {
+ return Err("instance without MIR (intrinsic / virtual)");
+ }
+ // This cannot result in an immediate cycle since the callee MIR is a shim, which does
+ // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we
+ // do not need to catch this here, we can wait until the inliner decides to continue
+ // inlining a second time.
+ InstanceDef::VTableShim(_)
+ | InstanceDef::ReifyShim(_)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::DropGlue(..)
+ | InstanceDef::CloneShim(..) => return Ok(()),
+ }
+
+ if self.tcx.is_constructor(callee_def_id) {
+ trace!("constructors always have MIR");
+ // Constructor functions cannot cause a query cycle.
+ return Ok(());
+ }
+
+ if callee_def_id.is_local() {
+ // Avoid a cycle here by only using `instance_mir` only if we have
+ // a lower `DefPathHash` than the callee. This ensures that the callee will
+ // not inline us. This trick even works with incremental compilation,
+ // since `DefPathHash` is stable.
+ if self.tcx.def_path_hash(caller_def_id).local_hash()
+ < self.tcx.def_path_hash(callee_def_id).local_hash()
+ {
+ return Ok(());
+ }
+
+ // If we know for sure that the function we're calling will itself try to
+ // call us, then we avoid inlining that function.
+ if self.tcx.mir_callgraph_reachable((*callee, caller_def_id.expect_local())) {
+ return Err("caller might be reachable from callee (query cycle avoidance)");
+ }
+
+ Ok(())
+ } else {
+ // This cannot result in an immediate cycle since the callee MIR is from another crate
+ // and is already optimized. Any subsequent inlining may cause cycles, but we do
+ // not need to catch this here, we can wait until the inliner decides to continue
+ // inlining a second time.
+ trace!("functions from other crates always have MIR");
+ Ok(())
+ }
+ }
+
+ fn resolve_callsite(
+ &self,
+ caller_body: &Body<'tcx>,
+ bb: BasicBlock,
+ bb_data: &BasicBlockData<'tcx>,
+ ) -> Option<CallSite<'tcx>> {
+ // Only consider direct calls to functions
+ let terminator = bb_data.terminator();
+ if let TerminatorKind::Call { ref func, target, .. } = terminator.kind {
+ let func_ty = func.ty(caller_body, self.tcx);
+ if let ty::FnDef(def_id, substs) = *func_ty.kind() {
+ // To resolve an instance its substs have to be fully normalized.
+ let substs = self.tcx.try_normalize_erasing_regions(self.param_env, substs).ok()?;
+ let callee =
+ Instance::resolve(self.tcx, self.param_env, def_id, substs).ok().flatten()?;
+
+ if let InstanceDef::Virtual(..) | InstanceDef::Intrinsic(_) = callee.def {
+ return None;
+ }
+
+ if self.history.contains(&callee) {
+ return None;
+ }
+
+ let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, substs);
+
+ return Some(CallSite {
+ callee,
+ fn_sig,
+ block: bb,
+ target,
+ source_info: terminator.source_info,
+ });
+ }
+ }
+
+ None
+ }
+
+ /// Returns an error if inlining is not possible based on codegen attributes alone. A success
+ /// indicates that inlining decision should be based on other criteria.
+ fn check_codegen_attributes(
+ &self,
+ callsite: &CallSite<'tcx>,
+ callee_attrs: &CodegenFnAttrs,
+ ) -> Result<(), &'static str> {
+ match callee_attrs.inline {
+ InlineAttr::Never => return Err("never inline hint"),
+ InlineAttr::Always | InlineAttr::Hint => {}
+ InlineAttr::None => {
+ if self.tcx.sess.mir_opt_level() <= 2 {
+ return Err("at mir-opt-level=2, only #[inline] is inlined");
+ }
+ }
+ }
+
+ // Only inline local functions if they would be eligible for cross-crate
+ // inlining. This is to ensure that the final crate doesn't have MIR that
+ // reference unexported symbols
+ if callsite.callee.def_id().is_local() {
+ let is_generic = callsite.callee.substs.non_erasable_generics().next().is_some();
+ if !is_generic && !callee_attrs.requests_inline() {
+ return Err("not exported");
+ }
+ }
+
+ if callsite.fn_sig.c_variadic() {
+ return Err("C variadic");
+ }
+
+ if callee_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+ return Err("naked");
+ }
+
+ if callee_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
+ return Err("cold");
+ }
+
+ if callee_attrs.no_sanitize != self.codegen_fn_attrs.no_sanitize {
+ return Err("incompatible sanitizer set");
+ }
+
+ if callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set {
+ return Err("incompatible instruction set");
+ }
+
+ for feature in &callee_attrs.target_features {
+ if !self.codegen_fn_attrs.target_features.contains(feature) {
+ return Err("incompatible target feature");
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Returns inlining decision that is based on the examination of callee MIR body.
+ /// Assumes that codegen attributes have been checked for compatibility already.
+ #[instrument(level = "debug", skip(self, callee_body))]
+ fn check_mir_body(
+ &self,
+ callsite: &CallSite<'tcx>,
+ callee_body: &Body<'tcx>,
+ callee_attrs: &CodegenFnAttrs,
+ ) -> Result<(), &'static str> {
+ let tcx = self.tcx;
+
+ let mut threshold = if callee_attrs.requests_inline() {
+ self.tcx.sess.opts.unstable_opts.inline_mir_hint_threshold.unwrap_or(100)
+ } else {
+ self.tcx.sess.opts.unstable_opts.inline_mir_threshold.unwrap_or(50)
+ };
+
+ // Give a bonus functions with a small number of blocks,
+ // We normally have two or three blocks for even
+ // very small functions.
+ if callee_body.basic_blocks().len() <= 3 {
+ threshold += threshold / 4;
+ }
+ debug!(" final inline threshold = {}", threshold);
+
+ // FIXME: Give a bonus to functions with only a single caller
+ let mut first_block = true;
+ let mut cost = 0;
+
+ // Traverse the MIR manually so we can account for the effects of
+ // inlining on the CFG.
+ let mut work_list = vec![START_BLOCK];
+ let mut visited = BitSet::new_empty(callee_body.basic_blocks().len());
+ while let Some(bb) = work_list.pop() {
+ if !visited.insert(bb.index()) {
+ continue;
+ }
+ let blk = &callee_body.basic_blocks()[bb];
+
+ for stmt in &blk.statements {
+ // Don't count StorageLive/StorageDead in the inlining cost.
+ match stmt.kind {
+ StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Deinit(_)
+ | StatementKind::Nop => {}
+ _ => cost += INSTR_COST,
+ }
+ }
+ let term = blk.terminator();
+ let mut is_drop = false;
+ match term.kind {
+ TerminatorKind::Drop { ref place, target, unwind }
+ | TerminatorKind::DropAndReplace { ref place, target, unwind, .. } => {
+ is_drop = true;
+ work_list.push(target);
+ // If the place doesn't actually need dropping, treat it like
+ // a regular goto.
+ let ty = callsite.callee.subst_mir(self.tcx, &place.ty(callee_body, tcx).ty);
+ if ty.needs_drop(tcx, self.param_env) {
+ cost += CALL_PENALTY;
+ if let Some(unwind) = unwind {
+ cost += LANDINGPAD_PENALTY;
+ work_list.push(unwind);
+ }
+ } else {
+ cost += INSTR_COST;
+ }
+ }
+
+ TerminatorKind::Unreachable | TerminatorKind::Call { target: None, .. }
+ if first_block =>
+ {
+ // If the function always diverges, don't inline
+ // unless the cost is zero
+ threshold = 0;
+ }
+
+ TerminatorKind::Call { func: Operand::Constant(ref f), cleanup, .. } => {
+ if let ty::FnDef(def_id, _) =
+ *callsite.callee.subst_mir(self.tcx, &f.literal.ty()).kind()
+ {
+ // Don't give intrinsics the extra penalty for calls
+ if tcx.is_intrinsic(def_id) {
+ cost += INSTR_COST;
+ } else {
+ cost += CALL_PENALTY;
+ }
+ } else {
+ cost += CALL_PENALTY;
+ }
+ if cleanup.is_some() {
+ cost += LANDINGPAD_PENALTY;
+ }
+ }
+ TerminatorKind::Assert { cleanup, .. } => {
+ cost += CALL_PENALTY;
+
+ if cleanup.is_some() {
+ cost += LANDINGPAD_PENALTY;
+ }
+ }
+ TerminatorKind::Resume => cost += RESUME_PENALTY,
+ TerminatorKind::InlineAsm { cleanup, .. } => {
+ cost += INSTR_COST;
+
+ if cleanup.is_some() {
+ cost += LANDINGPAD_PENALTY;
+ }
+ }
+ _ => cost += INSTR_COST,
+ }
+
+ if !is_drop {
+ for succ in term.successors() {
+ work_list.push(succ);
+ }
+ }
+
+ first_block = false;
+ }
+
+ // Count up the cost of local variables and temps, if we know the size
+ // use that, otherwise we use a moderately-large dummy cost.
+
+ let ptr_size = tcx.data_layout.pointer_size.bytes();
+
+ for v in callee_body.vars_and_temps_iter() {
+ let ty = callsite.callee.subst_mir(self.tcx, &callee_body.local_decls[v].ty);
+ // Cost of the var is the size in machine-words, if we know
+ // it.
+ if let Some(size) = type_size_of(tcx, self.param_env, ty) {
+ cost += ((size + ptr_size - 1) / ptr_size) as usize;
+ } else {
+ cost += UNKNOWN_SIZE_COST;
+ }
+ }
+
+ if let InlineAttr::Always = callee_attrs.inline {
+ debug!("INLINING {:?} because inline(always) [cost={}]", callsite, cost);
+ Ok(())
+ } else if cost <= threshold {
+ debug!("INLINING {:?} [cost={} <= threshold={}]", callsite, cost, threshold);
+ Ok(())
+ } else {
+ debug!("NOT inlining {:?} [cost={} > threshold={}]", callsite, cost, threshold);
+ Err("cost above threshold")
+ }
+ }
+
+ fn inline_call(
+ &self,
+ caller_body: &mut Body<'tcx>,
+ callsite: &CallSite<'tcx>,
+ mut callee_body: Body<'tcx>,
+ ) {
+ let terminator = caller_body[callsite.block].terminator.take().unwrap();
+ match terminator.kind {
+ TerminatorKind::Call { args, destination, cleanup, .. } => {
+ // If the call is something like `a[*i] = f(i)`, where
+ // `i : &mut usize`, then just duplicating the `a[*i]`
+ // Place could result in two different locations if `f`
+ // writes to `i`. To prevent this we need to create a temporary
+ // borrow of the place and pass the destination as `*temp` instead.
+ fn dest_needs_borrow(place: Place<'_>) -> bool {
+ for elem in place.projection.iter() {
+ match elem {
+ ProjectionElem::Deref | ProjectionElem::Index(_) => return true,
+ _ => {}
+ }
+ }
+
+ false
+ }
+
+ let dest = if dest_needs_borrow(destination) {
+ trace!("creating temp for return destination");
+ let dest = Rvalue::Ref(
+ self.tcx.lifetimes.re_erased,
+ BorrowKind::Mut { allow_two_phase_borrow: false },
+ destination,
+ );
+ let dest_ty = dest.ty(caller_body, self.tcx);
+ let temp = Place::from(self.new_call_temp(caller_body, &callsite, dest_ty));
+ caller_body[callsite.block].statements.push(Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::Assign(Box::new((temp, dest))),
+ });
+ self.tcx.mk_place_deref(temp)
+ } else {
+ destination
+ };
+
+ // Copy the arguments if needed.
+ let args: Vec<_> = self.make_call_args(args, &callsite, caller_body, &callee_body);
+
+ let mut expn_data = ExpnData::default(
+ ExpnKind::Inlined,
+ callsite.source_info.span,
+ self.tcx.sess.edition(),
+ None,
+ None,
+ );
+ expn_data.def_site = callee_body.span;
+ let expn_data =
+ self.tcx.with_stable_hashing_context(|hcx| LocalExpnId::fresh(expn_data, hcx));
+ let mut integrator = Integrator {
+ args: &args,
+ new_locals: Local::new(caller_body.local_decls.len())..,
+ new_scopes: SourceScope::new(caller_body.source_scopes.len())..,
+ new_blocks: BasicBlock::new(caller_body.basic_blocks().len())..,
+ destination: dest,
+ callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(),
+ callsite,
+ cleanup_block: cleanup,
+ in_cleanup_block: false,
+ tcx: self.tcx,
+ expn_data,
+ always_live_locals: BitSet::new_filled(callee_body.local_decls.len()),
+ };
+
+ // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones
+ // (or existing ones, in a few special cases) in the caller.
+ integrator.visit_body(&mut callee_body);
+
+ // If there are any locals without storage markers, give them storage only for the
+ // duration of the call.
+ for local in callee_body.vars_and_temps_iter() {
+ if integrator.always_live_locals.contains(local) {
+ let new_local = integrator.map_local(local);
+ caller_body[callsite.block].statements.push(Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::StorageLive(new_local),
+ });
+ }
+ }
+ if let Some(block) = callsite.target {
+ // To avoid repeated O(n) insert, push any new statements to the end and rotate
+ // the slice once.
+ let mut n = 0;
+ for local in callee_body.vars_and_temps_iter().rev() {
+ if integrator.always_live_locals.contains(local) {
+ let new_local = integrator.map_local(local);
+ caller_body[block].statements.push(Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::StorageDead(new_local),
+ });
+ n += 1;
+ }
+ }
+ caller_body[block].statements.rotate_right(n);
+ }
+
+ // Insert all of the (mapped) parts of the callee body into the caller.
+ caller_body.local_decls.extend(callee_body.drain_vars_and_temps());
+ caller_body.source_scopes.extend(&mut callee_body.source_scopes.drain(..));
+ caller_body.var_debug_info.append(&mut callee_body.var_debug_info);
+ caller_body.basic_blocks_mut().extend(callee_body.basic_blocks_mut().drain(..));
+
+ caller_body[callsite.block].terminator = Some(Terminator {
+ source_info: callsite.source_info,
+ kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) },
+ });
+
+ // Copy only unevaluated constants from the callee_body into the caller_body.
+ // Although we are only pushing `ConstKind::Unevaluated` consts to
+ // `required_consts`, here we may not only have `ConstKind::Unevaluated`
+ // because we are calling `subst_and_normalize_erasing_regions`.
+ caller_body.required_consts.extend(
+ callee_body.required_consts.iter().copied().filter(|&ct| {
+ match ct.literal.const_for_ty() {
+ Some(ct) => matches!(ct.kind(), ConstKind::Unevaluated(_)),
+ None => true,
+ }
+ }),
+ );
+ }
+ kind => bug!("unexpected terminator kind {:?}", kind),
+ }
+ }
+
+ fn make_call_args(
+ &self,
+ args: Vec<Operand<'tcx>>,
+ callsite: &CallSite<'tcx>,
+ caller_body: &mut Body<'tcx>,
+ callee_body: &Body<'tcx>,
+ ) -> Vec<Local> {
+ let tcx = self.tcx;
+
+ // There is a bit of a mismatch between the *caller* of a closure and the *callee*.
+ // The caller provides the arguments wrapped up in a tuple:
+ //
+ // tuple_tmp = (a, b, c)
+ // Fn::call(closure_ref, tuple_tmp)
+ //
+ // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`)
+ // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has
+ // the job of unpacking this tuple. But here, we are codegen. =) So we want to create
+ // a vector like
+ //
+ // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2]
+ //
+ // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient
+ // if we "spill" that into *another* temporary, so that we can map the argument
+ // variable in the callee MIR directly to an argument variable on our side.
+ // So we introduce temporaries like:
+ //
+ // tmp0 = tuple_tmp.0
+ // tmp1 = tuple_tmp.1
+ // tmp2 = tuple_tmp.2
+ //
+ // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`.
+ if callsite.fn_sig.abi() == Abi::RustCall && callee_body.spread_arg.is_none() {
+ let mut args = args.into_iter();
+ let self_ = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
+ let tuple = self.create_temp_if_necessary(args.next().unwrap(), callsite, caller_body);
+ assert!(args.next().is_none());
+
+ let tuple = Place::from(tuple);
+ let ty::Tuple(tuple_tys) = tuple.ty(caller_body, tcx).ty.kind() else {
+ bug!("Closure arguments are not passed as a tuple");
+ };
+
+ // The `closure_ref` in our example above.
+ let closure_ref_arg = iter::once(self_);
+
+ // The `tmp0`, `tmp1`, and `tmp2` in our example above.
+ let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| {
+ // This is e.g., `tuple_tmp.0` in our example above.
+ let tuple_field = Operand::Move(tcx.mk_place_field(tuple, Field::new(i), ty));
+
+ // Spill to a local to make e.g., `tmp0`.
+ self.create_temp_if_necessary(tuple_field, callsite, caller_body)
+ });
+
+ closure_ref_arg.chain(tuple_tmp_args).collect()
+ } else {
+ args.into_iter()
+ .map(|a| self.create_temp_if_necessary(a, callsite, caller_body))
+ .collect()
+ }
+ }
+
+ /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh
+ /// temporary `T` and an instruction `T = arg`, and returns `T`.
+ fn create_temp_if_necessary(
+ &self,
+ arg: Operand<'tcx>,
+ callsite: &CallSite<'tcx>,
+ caller_body: &mut Body<'tcx>,
+ ) -> Local {
+ // Reuse the operand if it is a moved temporary.
+ if let Operand::Move(place) = &arg
+ && let Some(local) = place.as_local()
+ && caller_body.local_kind(local) == LocalKind::Temp
+ {
+ return local;
+ }
+
+ // Otherwise, create a temporary for the argument.
+ trace!("creating temp for argument {:?}", arg);
+ let arg_ty = arg.ty(caller_body, self.tcx);
+ let local = self.new_call_temp(caller_body, callsite, arg_ty);
+ caller_body[callsite.block].statements.push(Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg)))),
+ });
+ local
+ }
+
+ /// Introduces a new temporary into the caller body that is live for the duration of the call.
+ fn new_call_temp(
+ &self,
+ caller_body: &mut Body<'tcx>,
+ callsite: &CallSite<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Local {
+ let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span));
+
+ caller_body[callsite.block].statements.push(Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::StorageLive(local),
+ });
+
+ if let Some(block) = callsite.target {
+ caller_body[block].statements.insert(
+ 0,
+ Statement {
+ source_info: callsite.source_info,
+ kind: StatementKind::StorageDead(local),
+ },
+ );
+ }
+
+ local
+ }
+}
+
+fn type_size_of<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+) -> Option<u64> {
+ tcx.layout_of(param_env.and(ty)).ok().map(|layout| layout.size.bytes())
+}
+
+/**
+ * Integrator.
+ *
+ * Integrates blocks from the callee function into the calling function.
+ * Updates block indices, references to locals and other control flow
+ * stuff.
+*/
+struct Integrator<'a, 'tcx> {
+ args: &'a [Local],
+ new_locals: RangeFrom<Local>,
+ new_scopes: RangeFrom<SourceScope>,
+ new_blocks: RangeFrom<BasicBlock>,
+ destination: Place<'tcx>,
+ callsite_scope: SourceScopeData<'tcx>,
+ callsite: &'a CallSite<'tcx>,
+ cleanup_block: Option<BasicBlock>,
+ in_cleanup_block: bool,
+ tcx: TyCtxt<'tcx>,
+ expn_data: LocalExpnId,
+ always_live_locals: BitSet<Local>,
+}
+
+impl Integrator<'_, '_> {
+ fn map_local(&self, local: Local) -> Local {
+ let new = if local == RETURN_PLACE {
+ self.destination.local
+ } else {
+ let idx = local.index() - 1;
+ if idx < self.args.len() {
+ self.args[idx]
+ } else {
+ Local::new(self.new_locals.start.index() + (idx - self.args.len()))
+ }
+ };
+ trace!("mapping local `{:?}` to `{:?}`", local, new);
+ new
+ }
+
+ fn map_scope(&self, scope: SourceScope) -> SourceScope {
+ let new = SourceScope::new(self.new_scopes.start.index() + scope.index());
+ trace!("mapping scope `{:?}` to `{:?}`", scope, new);
+ new
+ }
+
+ fn map_block(&self, block: BasicBlock) -> BasicBlock {
+ let new = BasicBlock::new(self.new_blocks.start.index() + block.index());
+ trace!("mapping block `{:?}` to `{:?}`", block, new);
+ new
+ }
+}
+
+impl<'tcx> MutVisitor<'tcx> for Integrator<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, local: &mut Local, _ctxt: PlaceContext, _location: Location) {
+ *local = self.map_local(*local);
+ }
+
+ fn visit_source_scope_data(&mut self, scope_data: &mut SourceScopeData<'tcx>) {
+ self.super_source_scope_data(scope_data);
+ if scope_data.parent_scope.is_none() {
+ // Attach the outermost callee scope as a child of the callsite
+ // scope, via the `parent_scope` and `inlined_parent_scope` chains.
+ scope_data.parent_scope = Some(self.callsite.source_info.scope);
+ assert_eq!(scope_data.inlined_parent_scope, None);
+ scope_data.inlined_parent_scope = if self.callsite_scope.inlined.is_some() {
+ Some(self.callsite.source_info.scope)
+ } else {
+ self.callsite_scope.inlined_parent_scope
+ };
+
+ // Mark the outermost callee scope as an inlined one.
+ assert_eq!(scope_data.inlined, None);
+ scope_data.inlined = Some((self.callsite.callee, self.callsite.source_info.span));
+ } else if scope_data.inlined_parent_scope.is_none() {
+ // Make it easy to find the scope with `inlined` set above.
+ scope_data.inlined_parent_scope = Some(self.map_scope(OUTERMOST_SOURCE_SCOPE));
+ }
+ }
+
+ fn visit_source_scope(&mut self, scope: &mut SourceScope) {
+ *scope = self.map_scope(*scope);
+ }
+
+ fn visit_span(&mut self, span: &mut Span) {
+ // Make sure that all spans track the fact that they were inlined.
+ *span = span.fresh_expansion(self.expn_data);
+ }
+
+ fn visit_place(&mut self, place: &mut Place<'tcx>, context: PlaceContext, location: Location) {
+ for elem in place.projection {
+ // FIXME: Make sure that return place is not used in an indexing projection, since it
+ // won't be rebased as it is supposed to be.
+ assert_ne!(ProjectionElem::Index(RETURN_PLACE), elem);
+ }
+
+ // If this is the `RETURN_PLACE`, we need to rebase any projections onto it.
+ let dest_proj_len = self.destination.projection.len();
+ if place.local == RETURN_PLACE && dest_proj_len > 0 {
+ let mut projs = Vec::with_capacity(dest_proj_len + place.projection.len());
+ projs.extend(self.destination.projection);
+ projs.extend(place.projection);
+
+ place.projection = self.tcx.intern_place_elems(&*projs);
+ }
+ // Handles integrating any locals that occur in the base
+ // or projections
+ self.super_place(place, context, location)
+ }
+
+ fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
+ self.in_cleanup_block = data.is_cleanup;
+ self.super_basic_block_data(block, data);
+ self.in_cleanup_block = false;
+ }
+
+ fn visit_retag(&mut self, kind: &mut RetagKind, place: &mut Place<'tcx>, loc: Location) {
+ self.super_retag(kind, place, loc);
+
+ // We have to patch all inlined retags to be aware that they are no longer
+ // happening on function entry.
+ if *kind == RetagKind::FnEntry {
+ *kind = RetagKind::Default;
+ }
+ }
+
+ fn visit_statement(&mut self, statement: &mut Statement<'tcx>, location: Location) {
+ if let StatementKind::StorageLive(local) | StatementKind::StorageDead(local) =
+ statement.kind
+ {
+ self.always_live_locals.remove(local);
+ }
+ self.super_statement(statement, location);
+ }
+
+ fn visit_terminator(&mut self, terminator: &mut Terminator<'tcx>, loc: Location) {
+ // Don't try to modify the implicit `_0` access on return (`return` terminators are
+ // replaced down below anyways).
+ if !matches!(terminator.kind, TerminatorKind::Return) {
+ self.super_terminator(terminator, loc);
+ }
+
+ match terminator.kind {
+ TerminatorKind::GeneratorDrop | TerminatorKind::Yield { .. } => bug!(),
+ TerminatorKind::Goto { ref mut target } => {
+ *target = self.map_block(*target);
+ }
+ TerminatorKind::SwitchInt { ref mut targets, .. } => {
+ for tgt in targets.all_targets_mut() {
+ *tgt = self.map_block(*tgt);
+ }
+ }
+ TerminatorKind::Drop { ref mut target, ref mut unwind, .. }
+ | TerminatorKind::DropAndReplace { ref mut target, ref mut unwind, .. } => {
+ *target = self.map_block(*target);
+ if let Some(tgt) = *unwind {
+ *unwind = Some(self.map_block(tgt));
+ } else if !self.in_cleanup_block {
+ // Unless this drop is in a cleanup block, add an unwind edge to
+ // the original call's cleanup block
+ *unwind = self.cleanup_block;
+ }
+ }
+ TerminatorKind::Call { ref mut target, ref mut cleanup, .. } => {
+ if let Some(ref mut tgt) = *target {
+ *tgt = self.map_block(*tgt);
+ }
+ if let Some(tgt) = *cleanup {
+ *cleanup = Some(self.map_block(tgt));
+ } else if !self.in_cleanup_block {
+ // Unless this call is in a cleanup block, add an unwind edge to
+ // the original call's cleanup block
+ *cleanup = self.cleanup_block;
+ }
+ }
+ TerminatorKind::Assert { ref mut target, ref mut cleanup, .. } => {
+ *target = self.map_block(*target);
+ if let Some(tgt) = *cleanup {
+ *cleanup = Some(self.map_block(tgt));
+ } else if !self.in_cleanup_block {
+ // Unless this assert is in a cleanup block, add an unwind edge to
+ // the original call's cleanup block
+ *cleanup = self.cleanup_block;
+ }
+ }
+ TerminatorKind::Return => {
+ terminator.kind = if let Some(tgt) = self.callsite.target {
+ TerminatorKind::Goto { target: tgt }
+ } else {
+ TerminatorKind::Unreachable
+ }
+ }
+ TerminatorKind::Resume => {
+ if let Some(tgt) = self.cleanup_block {
+ terminator.kind = TerminatorKind::Goto { target: tgt }
+ }
+ }
+ TerminatorKind::Abort => {}
+ TerminatorKind::Unreachable => {}
+ TerminatorKind::FalseEdge { ref mut real_target, ref mut imaginary_target } => {
+ *real_target = self.map_block(*real_target);
+ *imaginary_target = self.map_block(*imaginary_target);
+ }
+ TerminatorKind::FalseUnwind { real_target: _, unwind: _ } =>
+ // see the ordering of passes in the optimized_mir query.
+ {
+ bug!("False unwinds should have been removed before inlining")
+ }
+ TerminatorKind::InlineAsm { ref mut destination, ref mut cleanup, .. } => {
+ if let Some(ref mut tgt) = *destination {
+ *tgt = self.map_block(*tgt);
+ } else if !self.in_cleanup_block {
+ // Unless this inline asm is in a cleanup block, add an unwind edge to
+ // the original call's cleanup block
+ *cleanup = self.cleanup_block;
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/inline/cycle.rs b/compiler/rustc_mir_transform/src/inline/cycle.rs
new file mode 100644
index 000000000..7810218fd
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/inline/cycle.rs
@@ -0,0 +1,168 @@
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::mir::TerminatorKind;
+use rustc_middle::ty::TypeVisitable;
+use rustc_middle::ty::{self, subst::SubstsRef, InstanceDef, TyCtxt};
+use rustc_session::Limit;
+
+// FIXME: check whether it is cheaper to precompute the entire call graph instead of invoking
+// this query ridiculously often.
+#[instrument(level = "debug", skip(tcx, root, target))]
+pub(crate) fn mir_callgraph_reachable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (root, target): (ty::Instance<'tcx>, LocalDefId),
+) -> bool {
+ trace!(%root, target = %tcx.def_path_str(target.to_def_id()));
+ let param_env = tcx.param_env_reveal_all_normalized(target);
+ assert_ne!(
+ root.def_id().expect_local(),
+ target,
+ "you should not call `mir_callgraph_reachable` on immediate self recursion"
+ );
+ assert!(
+ matches!(root.def, InstanceDef::Item(_)),
+ "you should not call `mir_callgraph_reachable` on shims"
+ );
+ assert!(
+ !tcx.is_constructor(root.def_id()),
+ "you should not call `mir_callgraph_reachable` on enum/struct constructor functions"
+ );
+ #[instrument(
+ level = "debug",
+ skip(tcx, param_env, target, stack, seen, recursion_limiter, caller, recursion_limit)
+ )]
+ fn process<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ caller: ty::Instance<'tcx>,
+ target: LocalDefId,
+ stack: &mut Vec<ty::Instance<'tcx>>,
+ seen: &mut FxHashSet<ty::Instance<'tcx>>,
+ recursion_limiter: &mut FxHashMap<DefId, usize>,
+ recursion_limit: Limit,
+ ) -> bool {
+ trace!(%caller);
+ for &(callee, substs) in tcx.mir_inliner_callees(caller.def) {
+ let Ok(substs) = caller.try_subst_mir_and_normalize_erasing_regions(tcx, param_env, substs) else {
+ trace!(?caller, ?param_env, ?substs, "cannot normalize, skipping");
+ continue;
+ };
+ let Ok(Some(callee)) = ty::Instance::resolve(tcx, param_env, callee, substs) else {
+ trace!(?callee, "cannot resolve, skipping");
+ continue;
+ };
+
+ // Found a path.
+ if callee.def_id() == target.to_def_id() {
+ return true;
+ }
+
+ if tcx.is_constructor(callee.def_id()) {
+ trace!("constructors always have MIR");
+ // Constructor functions cannot cause a query cycle.
+ continue;
+ }
+
+ match callee.def {
+ InstanceDef::Item(_) => {
+ // If there is no MIR available (either because it was not in metadata or
+ // because it has no MIR because it's an extern function), then the inliner
+ // won't cause cycles on this.
+ if !tcx.is_mir_available(callee.def_id()) {
+ trace!(?callee, "no mir available, skipping");
+ continue;
+ }
+ }
+ // These have no own callable MIR.
+ InstanceDef::Intrinsic(_) | InstanceDef::Virtual(..) => continue,
+ // These have MIR and if that MIR is inlined, substituted and then inlining is run
+ // again, a function item can end up getting inlined. Thus we'll be able to cause
+ // a cycle that way
+ InstanceDef::VTableShim(_)
+ | InstanceDef::ReifyShim(_)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::CloneShim(..) => {}
+ InstanceDef::DropGlue(..) => {
+ // FIXME: A not fully substituted drop shim can cause ICEs if one attempts to
+ // have its MIR built. Likely oli-obk just screwed up the `ParamEnv`s, so this
+ // needs some more analysis.
+ if callee.needs_subst() {
+ continue;
+ }
+ }
+ }
+
+ if seen.insert(callee) {
+ let recursion = recursion_limiter.entry(callee.def_id()).or_default();
+ trace!(?callee, recursion = *recursion);
+ if recursion_limit.value_within_limit(*recursion) {
+ *recursion += 1;
+ stack.push(callee);
+ let found_recursion = ensure_sufficient_stack(|| {
+ process(
+ tcx,
+ param_env,
+ callee,
+ target,
+ stack,
+ seen,
+ recursion_limiter,
+ recursion_limit,
+ )
+ });
+ if found_recursion {
+ return true;
+ }
+ stack.pop();
+ } else {
+ // Pessimistically assume that there could be recursion.
+ return true;
+ }
+ }
+ }
+ false
+ }
+ process(
+ tcx,
+ param_env,
+ root,
+ target,
+ &mut Vec::new(),
+ &mut FxHashSet::default(),
+ &mut FxHashMap::default(),
+ tcx.recursion_limit(),
+ )
+}
+
+pub(crate) fn mir_inliner_callees<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: ty::InstanceDef<'tcx>,
+) -> &'tcx [(DefId, SubstsRef<'tcx>)] {
+ let steal;
+ let guard;
+ let body = match (instance, instance.def_id().as_local()) {
+ (InstanceDef::Item(_), Some(def_id)) => {
+ let def = ty::WithOptConstParam::unknown(def_id);
+ steal = tcx.mir_promoted(def).0;
+ guard = steal.borrow();
+ &*guard
+ }
+ // Functions from other crates and MIR shims
+ _ => tcx.instance_mir(instance),
+ };
+ let mut calls = FxIndexSet::default();
+ for bb_data in body.basic_blocks() {
+ let terminator = bb_data.terminator();
+ if let TerminatorKind::Call { func, .. } = &terminator.kind {
+ let ty = func.ty(&body.local_decls, tcx);
+ let call = match ty.kind() {
+ ty::FnDef(def_id, substs) => (*def_id, *substs),
+ _ => continue,
+ };
+ calls.insert(call);
+ }
+ }
+ tcx.arena.alloc_from_iter(calls.iter().copied())
+}
diff --git a/compiler/rustc_mir_transform/src/instcombine.rs b/compiler/rustc_mir_transform/src/instcombine.rs
new file mode 100644
index 000000000..2f3c65869
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/instcombine.rs
@@ -0,0 +1,203 @@
+//! Performs various peephole optimizations.
+
+use crate::MirPass;
+use rustc_hir::Mutability;
+use rustc_middle::mir::{
+ BinOp, Body, Constant, ConstantKind, LocalDecls, Operand, Place, ProjectionElem, Rvalue,
+ SourceInfo, Statement, StatementKind, Terminator, TerminatorKind, UnOp,
+};
+use rustc_middle::ty::{self, TyCtxt};
+
+pub struct InstCombine;
+
+impl<'tcx> MirPass<'tcx> for InstCombine {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let ctx = InstCombineContext { tcx, local_decls: &body.local_decls };
+ for block in body.basic_blocks.as_mut() {
+ for statement in block.statements.iter_mut() {
+ match statement.kind {
+ StatementKind::Assign(box (_place, ref mut rvalue)) => {
+ ctx.combine_bool_cmp(&statement.source_info, rvalue);
+ ctx.combine_ref_deref(&statement.source_info, rvalue);
+ ctx.combine_len(&statement.source_info, rvalue);
+ }
+ _ => {}
+ }
+ }
+
+ ctx.combine_primitive_clone(
+ &mut block.terminator.as_mut().unwrap(),
+ &mut block.statements,
+ );
+ }
+ }
+}
+
+struct InstCombineContext<'tcx, 'a> {
+ tcx: TyCtxt<'tcx>,
+ local_decls: &'a LocalDecls<'tcx>,
+}
+
+impl<'tcx> InstCombineContext<'tcx, '_> {
+ fn should_combine(&self, source_info: &SourceInfo, rvalue: &Rvalue<'tcx>) -> bool {
+ self.tcx.consider_optimizing(|| {
+ format!("InstCombine - Rvalue: {:?} SourceInfo: {:?}", rvalue, source_info)
+ })
+ }
+
+ /// Transform boolean comparisons into logical operations.
+ fn combine_bool_cmp(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
+ match rvalue {
+ Rvalue::BinaryOp(op @ (BinOp::Eq | BinOp::Ne), box (a, b)) => {
+ let new = match (op, self.try_eval_bool(a), self.try_eval_bool(b)) {
+ // Transform "Eq(a, true)" ==> "a"
+ (BinOp::Eq, _, Some(true)) => Some(Rvalue::Use(a.clone())),
+
+ // Transform "Ne(a, false)" ==> "a"
+ (BinOp::Ne, _, Some(false)) => Some(Rvalue::Use(a.clone())),
+
+ // Transform "Eq(true, b)" ==> "b"
+ (BinOp::Eq, Some(true), _) => Some(Rvalue::Use(b.clone())),
+
+ // Transform "Ne(false, b)" ==> "b"
+ (BinOp::Ne, Some(false), _) => Some(Rvalue::Use(b.clone())),
+
+ // Transform "Eq(false, b)" ==> "Not(b)"
+ (BinOp::Eq, Some(false), _) => Some(Rvalue::UnaryOp(UnOp::Not, b.clone())),
+
+ // Transform "Ne(true, b)" ==> "Not(b)"
+ (BinOp::Ne, Some(true), _) => Some(Rvalue::UnaryOp(UnOp::Not, b.clone())),
+
+ // Transform "Eq(a, false)" ==> "Not(a)"
+ (BinOp::Eq, _, Some(false)) => Some(Rvalue::UnaryOp(UnOp::Not, a.clone())),
+
+ // Transform "Ne(a, true)" ==> "Not(a)"
+ (BinOp::Ne, _, Some(true)) => Some(Rvalue::UnaryOp(UnOp::Not, a.clone())),
+
+ _ => None,
+ };
+
+ if let Some(new) = new && self.should_combine(source_info, rvalue) {
+ *rvalue = new;
+ }
+ }
+
+ _ => {}
+ }
+ }
+
+ fn try_eval_bool(&self, a: &Operand<'_>) -> Option<bool> {
+ let a = a.constant()?;
+ if a.literal.ty().is_bool() { a.literal.try_to_bool() } else { None }
+ }
+
+ /// Transform "&(*a)" ==> "a".
+ fn combine_ref_deref(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
+ if let Rvalue::Ref(_, _, place) = rvalue {
+ if let Some((base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
+ if let ty::Ref(_, _, Mutability::Not) =
+ base.ty(self.local_decls, self.tcx).ty.kind()
+ {
+ // The dereferenced place must have type `&_`, so that we don't copy `&mut _`.
+ } else {
+ return;
+ }
+
+ if !self.should_combine(source_info, rvalue) {
+ return;
+ }
+
+ *rvalue = Rvalue::Use(Operand::Copy(Place {
+ local: base.local,
+ projection: self.tcx.intern_place_elems(base.projection),
+ }));
+ }
+ }
+ }
+
+ /// Transform "Len([_; N])" ==> "N".
+ fn combine_len(&self, source_info: &SourceInfo, rvalue: &mut Rvalue<'tcx>) {
+ if let Rvalue::Len(ref place) = *rvalue {
+ let place_ty = place.ty(self.local_decls, self.tcx).ty;
+ if let ty::Array(_, len) = *place_ty.kind() {
+ if !self.should_combine(source_info, rvalue) {
+ return;
+ }
+
+ let literal = ConstantKind::from_const(len, self.tcx);
+ let constant = Constant { span: source_info.span, literal, user_ty: None };
+ *rvalue = Rvalue::Use(Operand::Constant(Box::new(constant)));
+ }
+ }
+ }
+
+ fn combine_primitive_clone(
+ &self,
+ terminator: &mut Terminator<'tcx>,
+ statements: &mut Vec<Statement<'tcx>>,
+ ) {
+ let TerminatorKind::Call { func, args, destination, target, .. } = &mut terminator.kind
+ else { return };
+
+ // It's definitely not a clone if there are multiple arguments
+ if args.len() != 1 {
+ return;
+ }
+
+ let Some(destination_block) = *target
+ else { return };
+
+ // Only bother looking more if it's easy to know what we're calling
+ let Some((fn_def_id, fn_substs)) = func.const_fn_def()
+ else { return };
+
+ // Clone needs one subst, so we can cheaply rule out other stuff
+ if fn_substs.len() != 1 {
+ return;
+ }
+
+ // These types are easily available from locals, so check that before
+ // doing DefId lookups to figure out what we're actually calling.
+ let arg_ty = args[0].ty(self.local_decls, self.tcx);
+
+ let ty::Ref(_region, inner_ty, Mutability::Not) = *arg_ty.kind()
+ else { return };
+
+ if !inner_ty.is_trivially_pure_clone_copy() {
+ return;
+ }
+
+ let trait_def_id = self.tcx.trait_of_item(fn_def_id);
+ if trait_def_id.is_none() || trait_def_id != self.tcx.lang_items().clone_trait() {
+ return;
+ }
+
+ if !self.tcx.consider_optimizing(|| {
+ format!(
+ "InstCombine - Call: {:?} SourceInfo: {:?}",
+ (fn_def_id, fn_substs),
+ terminator.source_info
+ )
+ }) {
+ return;
+ }
+
+ let Some(arg_place) = args.pop().unwrap().place()
+ else { return };
+
+ statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::Assign(Box::new((
+ *destination,
+ Rvalue::Use(Operand::Copy(
+ arg_place.project_deeper(&[ProjectionElem::Deref], self.tcx),
+ )),
+ ))),
+ });
+ terminator.kind = TerminatorKind::Goto { target: destination_block };
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs
new file mode 100644
index 000000000..d968a4885
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/lib.rs
@@ -0,0 +1,575 @@
+#![allow(rustc::potential_query_instability)]
+#![feature(box_patterns)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(map_try_insert)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(once_cell)]
+#![feature(option_get_or_insert_default)]
+#![feature(trusted_step)]
+#![feature(try_blocks)]
+#![feature(yeet_expr)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+use required_consts::RequiredConstsVisitor;
+use rustc_const_eval::util;
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_data_structures::steal::Steal;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::visit::Visitor as _;
+use rustc_middle::mir::{traversal, Body, ConstQualifs, MirPass, MirPhase, Promoted};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
+use rustc_span::{Span, Symbol};
+
+#[macro_use]
+mod pass_manager;
+
+use pass_manager::{self as pm, Lint, MirLint, WithMinOptLevel};
+
+mod abort_unwinding_calls;
+mod add_call_guards;
+mod add_moves_for_packed_drops;
+mod add_retag;
+mod check_const_item_mutation;
+mod check_packed_ref;
+pub mod check_unsafety;
+// This pass is public to allow external drivers to perform MIR cleanup
+pub mod cleanup_post_borrowck;
+mod const_debuginfo;
+mod const_goto;
+mod const_prop;
+mod const_prop_lint;
+mod coverage;
+mod dead_store_elimination;
+mod deaggregator;
+mod deduplicate_blocks;
+mod deref_separator;
+mod dest_prop;
+pub mod dump_mir;
+mod early_otherwise_branch;
+mod elaborate_box_derefs;
+mod elaborate_drops;
+mod ffi_unwind_calls;
+mod function_item_references;
+mod generator;
+mod inline;
+mod instcombine;
+mod lower_intrinsics;
+mod lower_slice_len;
+mod marker;
+mod match_branches;
+mod multiple_return_terminators;
+mod normalize_array_len;
+mod nrvo;
+// This pass is public to allow external drivers to perform MIR cleanup
+pub mod remove_false_edges;
+mod remove_noop_landing_pads;
+mod remove_storage_markers;
+mod remove_uninit_drops;
+mod remove_unneeded_drops;
+mod remove_zsts;
+mod required_consts;
+mod reveal_all;
+mod separate_const_switch;
+mod shim;
+// This pass is public to allow external drivers to perform MIR cleanup
+pub mod simplify;
+mod simplify_branches;
+mod simplify_comparison_integral;
+mod simplify_try;
+mod uninhabited_enum_branching;
+mod unreachable_prop;
+
+use rustc_const_eval::transform::check_consts::{self, ConstCx};
+use rustc_const_eval::transform::promote_consts;
+use rustc_const_eval::transform::validate;
+use rustc_mir_dataflow::rustc_peek;
+
+pub fn provide(providers: &mut Providers) {
+ check_unsafety::provide(providers);
+ check_packed_ref::provide(providers);
+ coverage::query::provide(providers);
+ ffi_unwind_calls::provide(providers);
+ shim::provide(providers);
+ *providers = Providers {
+ mir_keys,
+ mir_const,
+ mir_const_qualif: |tcx, def_id| {
+ let def_id = def_id.expect_local();
+ if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
+ tcx.mir_const_qualif_const_arg(def)
+ } else {
+ mir_const_qualif(tcx, ty::WithOptConstParam::unknown(def_id))
+ }
+ },
+ mir_const_qualif_const_arg: |tcx, (did, param_did)| {
+ mir_const_qualif(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
+ },
+ mir_promoted,
+ mir_drops_elaborated_and_const_checked,
+ mir_for_ctfe,
+ mir_for_ctfe_of_const_arg,
+ optimized_mir,
+ is_mir_available,
+ is_ctfe_mir_available: |tcx, did| is_mir_available(tcx, did),
+ mir_callgraph_reachable: inline::cycle::mir_callgraph_reachable,
+ mir_inliner_callees: inline::cycle::mir_inliner_callees,
+ promoted_mir: |tcx, def_id| {
+ let def_id = def_id.expect_local();
+ if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
+ tcx.promoted_mir_of_const_arg(def)
+ } else {
+ promoted_mir(tcx, ty::WithOptConstParam::unknown(def_id))
+ }
+ },
+ promoted_mir_of_const_arg: |tcx, (did, param_did)| {
+ promoted_mir(tcx, ty::WithOptConstParam { did, const_param_did: Some(param_did) })
+ },
+ ..*providers
+ };
+}
+
+fn is_mir_available(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ let def_id = def_id.expect_local();
+ tcx.mir_keys(()).contains(&def_id)
+}
+
+/// Finds the full set of `DefId`s within the current crate that have
+/// MIR associated with them.
+fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> {
+ let mut set = FxIndexSet::default();
+
+ // All body-owners have MIR associated with them.
+ set.extend(tcx.hir().body_owners());
+
+ // Additionally, tuple struct/variant constructors have MIR, but
+ // they don't have a BodyId, so we need to build them separately.
+ struct GatherCtors<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ set: &'a mut FxIndexSet<LocalDefId>,
+ }
+ impl<'tcx> Visitor<'tcx> for GatherCtors<'_, 'tcx> {
+ fn visit_variant_data(
+ &mut self,
+ v: &'tcx hir::VariantData<'tcx>,
+ _: Symbol,
+ _: &'tcx hir::Generics<'tcx>,
+ _: hir::HirId,
+ _: Span,
+ ) {
+ if let hir::VariantData::Tuple(_, hir_id) = *v {
+ self.set.insert(self.tcx.hir().local_def_id(hir_id));
+ }
+ intravisit::walk_struct_def(self, v)
+ }
+ }
+ tcx.hir().visit_all_item_likes_in_crate(&mut GatherCtors { tcx, set: &mut set });
+
+ set
+}
+
+fn mir_const_qualif(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> ConstQualifs {
+ let const_kind = tcx.hir().body_const_context(def.did);
+
+ // No need to const-check a non-const `fn`.
+ if const_kind.is_none() {
+ return Default::default();
+ }
+
+ // N.B., this `borrow()` is guaranteed to be valid (i.e., the value
+ // cannot yet be stolen), because `mir_promoted()`, which steals
+ // from `mir_const(), forces this query to execute before
+ // performing the steal.
+ let body = &tcx.mir_const(def).borrow();
+
+ if body.return_ty().references_error() {
+ tcx.sess.delay_span_bug(body.span, "mir_const_qualif: MIR had errors");
+ return Default::default();
+ }
+
+ let ccx = check_consts::ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def.did) };
+
+ let mut validator = check_consts::check::Checker::new(&ccx);
+ validator.check_body();
+
+ // We return the qualifs in the return place for every MIR body, even though it is only used
+ // when deciding to promote a reference to a `const` for now.
+ validator.qualifs_in_return_place()
+}
+
+/// Make MIR ready for const evaluation. This is run on all MIR, not just on consts!
+fn mir_const<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx Steal<Body<'tcx>> {
+ if let Some(def) = def.try_upgrade(tcx) {
+ return tcx.mir_const(def);
+ }
+
+ // Unsafety check uses the raw mir, so make sure it is run.
+ if !tcx.sess.opts.unstable_opts.thir_unsafeck {
+ if let Some(param_did) = def.const_param_did {
+ tcx.ensure().unsafety_check_result_for_const_arg((def.did, param_did));
+ } else {
+ tcx.ensure().unsafety_check_result(def.did);
+ }
+ }
+
+ // has_ffi_unwind_calls query uses the raw mir, so make sure it is run.
+ tcx.ensure().has_ffi_unwind_calls(def.did);
+
+ let mut body = tcx.mir_built(def).steal();
+
+ rustc_middle::mir::dump_mir(tcx, None, "mir_map", &0, &body, |_, _| Ok(()));
+
+ pm::run_passes(
+ tcx,
+ &mut body,
+ &[
+ // MIR-level lints.
+ &Lint(check_packed_ref::CheckPackedRef),
+ &Lint(check_const_item_mutation::CheckConstItemMutation),
+ &Lint(function_item_references::FunctionItemReferences),
+ // What we need to do constant evaluation.
+ &simplify::SimplifyCfg::new("initial"),
+ &rustc_peek::SanityCheck, // Just a lint
+ &marker::PhaseChange(MirPhase::Const),
+ ],
+ );
+ tcx.alloc_steal_mir(body)
+}
+
+/// Compute the main MIR body and the list of MIR bodies of the promoteds.
+fn mir_promoted<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> (&'tcx Steal<Body<'tcx>>, &'tcx Steal<IndexVec<Promoted, Body<'tcx>>>) {
+ if let Some(def) = def.try_upgrade(tcx) {
+ return tcx.mir_promoted(def);
+ }
+
+ // Ensure that we compute the `mir_const_qualif` for constants at
+ // this point, before we steal the mir-const result.
+ // Also this means promotion can rely on all const checks having been done.
+ let const_qualifs = tcx.mir_const_qualif_opt_const_arg(def);
+ let mut body = tcx.mir_const(def).steal();
+ if let Some(error_reported) = const_qualifs.tainted_by_errors {
+ body.tainted_by_errors = Some(error_reported);
+ }
+
+ let mut required_consts = Vec::new();
+ let mut required_consts_visitor = RequiredConstsVisitor::new(&mut required_consts);
+ for (bb, bb_data) in traversal::reverse_postorder(&body) {
+ required_consts_visitor.visit_basic_block_data(bb, bb_data);
+ }
+ body.required_consts = required_consts;
+
+ // What we need to run borrowck etc.
+ let promote_pass = promote_consts::PromoteTemps::default();
+ pm::run_passes(
+ tcx,
+ &mut body,
+ &[
+ &promote_pass,
+ &simplify::SimplifyCfg::new("promote-consts"),
+ &coverage::InstrumentCoverage,
+ ],
+ );
+
+ let promoted = promote_pass.promoted_fragments.into_inner();
+ (tcx.alloc_steal_mir(body), tcx.alloc_steal_promoted(promoted))
+}
+
+/// Compute the MIR that is used during CTFE (and thus has no optimizations run on it)
+fn mir_for_ctfe<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> &'tcx Body<'tcx> {
+ let did = def_id.expect_local();
+ if let Some(def) = ty::WithOptConstParam::try_lookup(did, tcx) {
+ tcx.mir_for_ctfe_of_const_arg(def)
+ } else {
+ tcx.arena.alloc(inner_mir_for_ctfe(tcx, ty::WithOptConstParam::unknown(did)))
+ }
+}
+
+/// Same as `mir_for_ctfe`, but used to get the MIR of a const generic parameter.
+/// The docs on `WithOptConstParam` explain this a bit more, but the TLDR is that
+/// we'd get cycle errors with `mir_for_ctfe`, because typeck would need to typeck
+/// the const parameter while type checking the main body, which in turn would try
+/// to type check the main body again.
+fn mir_for_ctfe_of_const_arg<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (did, param_did): (LocalDefId, DefId),
+) -> &'tcx Body<'tcx> {
+ tcx.arena.alloc(inner_mir_for_ctfe(
+ tcx,
+ ty::WithOptConstParam { did, const_param_did: Some(param_did) },
+ ))
+}
+
+fn inner_mir_for_ctfe(tcx: TyCtxt<'_>, def: ty::WithOptConstParam<LocalDefId>) -> Body<'_> {
+ // FIXME: don't duplicate this between the optimized_mir/mir_for_ctfe queries
+ if tcx.is_constructor(def.did.to_def_id()) {
+ // There's no reason to run all of the MIR passes on constructors when
+ // we can just output the MIR we want directly. This also saves const
+ // qualification and borrow checking the trouble of special casing
+ // constructors.
+ return shim::build_adt_ctor(tcx, def.did.to_def_id());
+ }
+
+ let context = tcx
+ .hir()
+ .body_const_context(def.did)
+ .expect("mir_for_ctfe should not be used for runtime functions");
+
+ let mut body = tcx.mir_drops_elaborated_and_const_checked(def).borrow().clone();
+
+ match context {
+ // Do not const prop functions, either they get executed at runtime or exported to metadata,
+ // so we run const prop on them, or they don't, in which case we const evaluate some control
+ // flow paths of the function and any errors in those paths will get emitted as const eval
+ // errors.
+ hir::ConstContext::ConstFn => {}
+ // Static items always get evaluated, so we can just let const eval see if any erroneous
+ // control flow paths get executed.
+ hir::ConstContext::Static(_) => {}
+ // Associated constants get const prop run so we detect common failure situations in the
+ // crate that defined the constant.
+ // Technically we want to not run on regular const items, but oli-obk doesn't know how to
+ // conveniently detect that at this point without looking at the HIR.
+ hir::ConstContext::Const => {
+ pm::run_passes(
+ tcx,
+ &mut body,
+ &[&const_prop::ConstProp, &marker::PhaseChange(MirPhase::Optimized)],
+ );
+ }
+ }
+
+ debug_assert!(!body.has_free_regions(), "Free regions in MIR for CTFE");
+
+ body
+}
+
+/// Obtain just the main MIR (no promoteds) and run some cleanups on it. This also runs
+/// mir borrowck *before* doing so in order to ensure that borrowck can be run and doesn't
+/// end up missing the source MIR due to stealing happening.
+fn mir_drops_elaborated_and_const_checked<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx Steal<Body<'tcx>> {
+ if let Some(def) = def.try_upgrade(tcx) {
+ return tcx.mir_drops_elaborated_and_const_checked(def);
+ }
+
+ let mir_borrowck = tcx.mir_borrowck_opt_const_arg(def);
+
+ let is_fn_like = tcx.def_kind(def.did).is_fn_like();
+ if is_fn_like {
+ let did = def.did.to_def_id();
+ let def = ty::WithOptConstParam::unknown(did);
+
+ // Do not compute the mir call graph without said call graph actually being used.
+ if inline::Inline.is_enabled(&tcx.sess) {
+ let _ = tcx.mir_inliner_callees(ty::InstanceDef::Item(def));
+ }
+ }
+
+ let (body, _) = tcx.mir_promoted(def);
+ let mut body = body.steal();
+ if let Some(error_reported) = mir_borrowck.tainted_by_errors {
+ body.tainted_by_errors = Some(error_reported);
+ }
+
+ // IMPORTANT
+ pm::run_passes(tcx, &mut body, &[&remove_false_edges::RemoveFalseEdges]);
+
+ // Do a little drop elaboration before const-checking if `const_precise_live_drops` is enabled.
+ if check_consts::post_drop_elaboration::checking_enabled(&ConstCx::new(tcx, &body)) {
+ pm::run_passes(
+ tcx,
+ &mut body,
+ &[
+ &simplify::SimplifyCfg::new("remove-false-edges"),
+ &remove_uninit_drops::RemoveUninitDrops,
+ ],
+ );
+ check_consts::post_drop_elaboration::check_live_drops(tcx, &body); // FIXME: make this a MIR lint
+ }
+
+ run_post_borrowck_cleanup_passes(tcx, &mut body);
+ assert!(body.phase == MirPhase::Deaggregated);
+ tcx.alloc_steal_mir(body)
+}
+
+/// After this series of passes, no lifetime analysis based on borrowing can be done.
+fn run_post_borrowck_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ debug!("post_borrowck_cleanup({:?})", body.source.def_id());
+
+ let post_borrowck_cleanup: &[&dyn MirPass<'tcx>] = &[
+ // Remove all things only needed by analysis
+ &simplify_branches::SimplifyConstCondition::new("initial"),
+ &remove_noop_landing_pads::RemoveNoopLandingPads,
+ &cleanup_post_borrowck::CleanupNonCodegenStatements,
+ &simplify::SimplifyCfg::new("early-opt"),
+ &deref_separator::Derefer,
+ // These next passes must be executed together
+ &add_call_guards::CriticalCallEdges,
+ &elaborate_drops::ElaborateDrops,
+ // This will remove extraneous landing pads which are no longer
+ // necessary as well as well as forcing any call in a non-unwinding
+ // function calling a possibly-unwinding function to abort the process.
+ &abort_unwinding_calls::AbortUnwindingCalls,
+ // AddMovesForPackedDrops needs to run after drop
+ // elaboration.
+ &add_moves_for_packed_drops::AddMovesForPackedDrops,
+ // `AddRetag` needs to run after `ElaborateDrops`. Otherwise it should run fairly late,
+ // but before optimizations begin.
+ &elaborate_box_derefs::ElaborateBoxDerefs,
+ &add_retag::AddRetag,
+ &lower_intrinsics::LowerIntrinsics,
+ &simplify::SimplifyCfg::new("elaborate-drops"),
+ // `Deaggregator` is conceptually part of MIR building, some backends rely on it happening
+ // and it can help optimizations.
+ &deaggregator::Deaggregator,
+ &Lint(const_prop_lint::ConstProp),
+ ];
+
+ pm::run_passes(tcx, body, post_borrowck_cleanup);
+}
+
+fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ fn o1<T>(x: T) -> WithMinOptLevel<T> {
+ WithMinOptLevel(1, x)
+ }
+
+ // Lowering generator control-flow and variables has to happen before we do anything else
+ // to them. We run some optimizations before that, because they may be harder to do on the state
+ // machine than on MIR with async primitives.
+ pm::run_passes(
+ tcx,
+ body,
+ &[
+ &reveal_all::RevealAll, // has to be done before inlining, since inlined code is in RevealAll mode.
+ &lower_slice_len::LowerSliceLenCalls, // has to be done before inlining, otherwise actual call will be almost always inlined. Also simple, so can just do first
+ &normalize_array_len::NormalizeArrayLen, // has to run after `slice::len` lowering
+ &unreachable_prop::UnreachablePropagation,
+ &uninhabited_enum_branching::UninhabitedEnumBranching,
+ &o1(simplify::SimplifyCfg::new("after-uninhabited-enum-branching")),
+ &inline::Inline,
+ &generator::StateTransform,
+ ],
+ );
+
+ assert!(body.phase == MirPhase::GeneratorsLowered);
+
+ // The main optimizations that we do on MIR.
+ pm::run_passes(
+ tcx,
+ body,
+ &[
+ &remove_storage_markers::RemoveStorageMarkers,
+ &remove_zsts::RemoveZsts,
+ &const_goto::ConstGoto,
+ &remove_unneeded_drops::RemoveUnneededDrops,
+ &match_branches::MatchBranchSimplification,
+ // inst combine is after MatchBranchSimplification to clean up Ne(_1, false)
+ &multiple_return_terminators::MultipleReturnTerminators,
+ &instcombine::InstCombine,
+ &separate_const_switch::SeparateConstSwitch,
+ //
+ // FIXME(#70073): This pass is responsible for both optimization as well as some lints.
+ &const_prop::ConstProp,
+ //
+ // Const-prop runs unconditionally, but doesn't mutate the MIR at mir-opt-level=0.
+ &const_debuginfo::ConstDebugInfo,
+ &o1(simplify_branches::SimplifyConstCondition::new("after-const-prop")),
+ &early_otherwise_branch::EarlyOtherwiseBranch,
+ &simplify_comparison_integral::SimplifyComparisonIntegral,
+ &simplify_try::SimplifyArmIdentity,
+ &simplify_try::SimplifyBranchSame,
+ &dead_store_elimination::DeadStoreElimination,
+ &dest_prop::DestinationPropagation,
+ &o1(simplify_branches::SimplifyConstCondition::new("final")),
+ &o1(remove_noop_landing_pads::RemoveNoopLandingPads),
+ &o1(simplify::SimplifyCfg::new("final")),
+ &nrvo::RenameReturnPlace,
+ &simplify::SimplifyLocals,
+ &multiple_return_terminators::MultipleReturnTerminators,
+ &deduplicate_blocks::DeduplicateBlocks,
+ // Some cleanup necessary at least for LLVM and potentially other codegen backends.
+ &add_call_guards::CriticalCallEdges,
+ &marker::PhaseChange(MirPhase::Optimized),
+ // Dump the end result for testing and debugging purposes.
+ &dump_mir::Marker("PreCodegen"),
+ ],
+ );
+}
+
+/// Optimize the MIR and prepare it for codegen.
+fn optimized_mir<'tcx>(tcx: TyCtxt<'tcx>, did: DefId) -> &'tcx Body<'tcx> {
+ let did = did.expect_local();
+ assert_eq!(ty::WithOptConstParam::try_lookup(did, tcx), None);
+ tcx.arena.alloc(inner_optimized_mir(tcx, did))
+}
+
+fn inner_optimized_mir(tcx: TyCtxt<'_>, did: LocalDefId) -> Body<'_> {
+ if tcx.is_constructor(did.to_def_id()) {
+ // There's no reason to run all of the MIR passes on constructors when
+ // we can just output the MIR we want directly. This also saves const
+ // qualification and borrow checking the trouble of special casing
+ // constructors.
+ return shim::build_adt_ctor(tcx, did.to_def_id());
+ }
+
+ match tcx.hir().body_const_context(did) {
+ // Run the `mir_for_ctfe` query, which depends on `mir_drops_elaborated_and_const_checked`
+ // which we are going to steal below. Thus we need to run `mir_for_ctfe` first, so it
+ // computes and caches its result.
+ Some(hir::ConstContext::ConstFn) => tcx.ensure().mir_for_ctfe(did),
+ None => {}
+ Some(other) => panic!("do not use `optimized_mir` for constants: {:?}", other),
+ }
+ debug!("about to call mir_drops_elaborated...");
+ let mut body =
+ tcx.mir_drops_elaborated_and_const_checked(ty::WithOptConstParam::unknown(did)).steal();
+ debug!("body: {:#?}", body);
+ run_optimization_passes(tcx, &mut body);
+
+ debug_assert!(!body.has_free_regions(), "Free regions in optimized MIR");
+
+ body
+}
+
+/// Fetch all the promoteds of an item and prepare their MIR bodies to be ready for
+/// constant evaluation once all substitutions become known.
+fn promoted_mir<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
+ if tcx.is_constructor(def.did.to_def_id()) {
+ return tcx.arena.alloc(IndexVec::new());
+ }
+
+ let tainted_by_errors = tcx.mir_borrowck_opt_const_arg(def).tainted_by_errors;
+ let mut promoted = tcx.mir_promoted(def).1.steal();
+
+ for body in &mut promoted {
+ if let Some(error_reported) = tainted_by_errors {
+ body.tainted_by_errors = Some(error_reported);
+ }
+ run_post_borrowck_cleanup_passes(tcx, body);
+ }
+
+ debug_assert!(!promoted.has_free_regions(), "Free regions in promoted MIR");
+
+ tcx.arena.alloc(promoted)
+}
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
new file mode 100644
index 000000000..b7ba61651
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -0,0 +1,156 @@
+//! Lowers intrinsic calls
+
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+
+pub struct LowerIntrinsics;
+
+impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let local_decls = &body.local_decls;
+ for block in body.basic_blocks.as_mut() {
+ let terminator = block.terminator.as_mut().unwrap();
+ if let TerminatorKind::Call { func, args, destination, target, .. } =
+ &mut terminator.kind
+ {
+ let func_ty = func.ty(local_decls, tcx);
+ let Some((intrinsic_name, substs)) = resolve_rust_intrinsic(tcx, func_ty) else {
+ continue;
+ };
+ match intrinsic_name {
+ sym::unreachable => {
+ terminator.kind = TerminatorKind::Unreachable;
+ }
+ sym::forget => {
+ if let Some(target) = *target {
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::Assign(Box::new((
+ *destination,
+ Rvalue::Use(Operand::Constant(Box::new(Constant {
+ span: terminator.source_info.span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(tcx.types.unit),
+ }))),
+ ))),
+ });
+ terminator.kind = TerminatorKind::Goto { target };
+ }
+ }
+ sym::copy_nonoverlapping => {
+ let target = target.unwrap();
+ let mut args = args.drain(..);
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::CopyNonOverlapping(Box::new(
+ rustc_middle::mir::CopyNonOverlapping {
+ src: args.next().unwrap(),
+ dst: args.next().unwrap(),
+ count: args.next().unwrap(),
+ },
+ )),
+ });
+ assert_eq!(
+ args.next(),
+ None,
+ "Extra argument for copy_non_overlapping intrinsic"
+ );
+ drop(args);
+ terminator.kind = TerminatorKind::Goto { target };
+ }
+ sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => {
+ if let Some(target) = *target {
+ let lhs;
+ let rhs;
+ {
+ let mut args = args.drain(..);
+ lhs = args.next().unwrap();
+ rhs = args.next().unwrap();
+ }
+ let bin_op = match intrinsic_name {
+ sym::wrapping_add => BinOp::Add,
+ sym::wrapping_sub => BinOp::Sub,
+ sym::wrapping_mul => BinOp::Mul,
+ _ => bug!("unexpected intrinsic"),
+ };
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::Assign(Box::new((
+ *destination,
+ Rvalue::BinaryOp(bin_op, Box::new((lhs, rhs))),
+ ))),
+ });
+ terminator.kind = TerminatorKind::Goto { target };
+ }
+ }
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ // The checked binary operations are not suitable target for lowering here,
+ // since their semantics depend on the value of overflow-checks flag used
+ // during codegen. Issue #35310.
+ }
+ sym::size_of | sym::min_align_of => {
+ if let Some(target) = *target {
+ let tp_ty = substs.type_at(0);
+ let null_op = match intrinsic_name {
+ sym::size_of => NullOp::SizeOf,
+ sym::min_align_of => NullOp::AlignOf,
+ _ => bug!("unexpected intrinsic"),
+ };
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::Assign(Box::new((
+ *destination,
+ Rvalue::NullaryOp(null_op, tp_ty),
+ ))),
+ });
+ terminator.kind = TerminatorKind::Goto { target };
+ }
+ }
+ sym::discriminant_value => {
+ if let (Some(target), Some(arg)) = (*target, args[0].place()) {
+ let arg = tcx.mk_place_deref(arg);
+ block.statements.push(Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::Assign(Box::new((
+ *destination,
+ Rvalue::Discriminant(arg),
+ ))),
+ });
+ terminator.kind = TerminatorKind::Goto { target };
+ }
+ }
+ _ if intrinsic_name.as_str().starts_with("simd_shuffle") => {
+ validate_simd_shuffle(tcx, args, terminator.source_info.span);
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+}
+
+fn resolve_rust_intrinsic<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ func_ty: Ty<'tcx>,
+) -> Option<(Symbol, SubstsRef<'tcx>)> {
+ if let ty::FnDef(def_id, substs) = *func_ty.kind() {
+ if tcx.is_intrinsic(def_id) {
+ return Some((tcx.item_name(def_id), substs));
+ }
+ }
+ None
+}
+
+fn validate_simd_shuffle<'tcx>(tcx: TyCtxt<'tcx>, args: &[Operand<'tcx>], span: Span) {
+ match &args[2] {
+ Operand::Constant(_) => {} // all good
+ _ => {
+ let msg = "last argument of `simd_shuffle` is required to be a `const` item";
+ tcx.sess.span_err(span, msg);
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/lower_slice_len.rs b/compiler/rustc_mir_transform/src/lower_slice_len.rs
new file mode 100644
index 000000000..2f02d00ec
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/lower_slice_len.rs
@@ -0,0 +1,99 @@
+//! This pass lowers calls to core::slice::len to just Len op.
+//! It should run before inlining!
+
+use crate::MirPass;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+
+pub struct LowerSliceLenCalls;
+
+impl<'tcx> MirPass<'tcx> for LowerSliceLenCalls {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ lower_slice_len_calls(tcx, body)
+ }
+}
+
+pub fn lower_slice_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let language_items = tcx.lang_items();
+ let Some(slice_len_fn_item_def_id) = language_items.slice_len_fn() else {
+ // there is no language item to compare to :)
+ return;
+ };
+
+ // The one successor remains unchanged, so no need to invalidate
+ let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
+ for block in basic_blocks {
+ // lower `<[_]>::len` calls
+ lower_slice_len_call(tcx, block, &body.local_decls, slice_len_fn_item_def_id);
+ }
+}
+
+struct SliceLenPatchInformation<'tcx> {
+ add_statement: Statement<'tcx>,
+ new_terminator_kind: TerminatorKind<'tcx>,
+}
+
+fn lower_slice_len_call<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ block: &mut BasicBlockData<'tcx>,
+ local_decls: &IndexVec<Local, LocalDecl<'tcx>>,
+ slice_len_fn_item_def_id: DefId,
+) {
+ let mut patch_found: Option<SliceLenPatchInformation<'_>> = None;
+
+ let terminator = block.terminator();
+ match &terminator.kind {
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target: Some(bb),
+ cleanup: None,
+ from_hir_call: true,
+ ..
+ } => {
+ // some heuristics for fast rejection
+ if args.len() != 1 {
+ return;
+ }
+ let Some(arg) = args[0].place() else { return };
+ let func_ty = func.ty(local_decls, tcx);
+ match func_ty.kind() {
+ ty::FnDef(fn_def_id, _) if fn_def_id == &slice_len_fn_item_def_id => {
+ // perform modifications
+ // from something like `_5 = core::slice::<impl [u8]>::len(move _6) -> bb1`
+ // into `_5 = Len(*_6)
+ // goto bb1
+
+ // make new RValue for Len
+ let deref_arg = tcx.mk_place_deref(arg);
+ let r_value = Rvalue::Len(deref_arg);
+ let len_statement_kind =
+ StatementKind::Assign(Box::new((*destination, r_value)));
+ let add_statement =
+ Statement { kind: len_statement_kind, source_info: terminator.source_info };
+
+ // modify terminator into simple Goto
+ let new_terminator_kind = TerminatorKind::Goto { target: *bb };
+
+ let patch = SliceLenPatchInformation { add_statement, new_terminator_kind };
+
+ patch_found = Some(patch);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+
+ if let Some(SliceLenPatchInformation { add_statement, new_terminator_kind }) = patch_found {
+ block.statements.push(add_statement);
+ block.terminator_mut().kind = new_terminator_kind;
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/marker.rs b/compiler/rustc_mir_transform/src/marker.rs
new file mode 100644
index 000000000..06819fc1d
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/marker.rs
@@ -0,0 +1,20 @@
+use std::borrow::Cow;
+
+use crate::MirPass;
+use rustc_middle::mir::{Body, MirPhase};
+use rustc_middle::ty::TyCtxt;
+
+/// Changes the MIR phase without changing the MIR itself.
+pub struct PhaseChange(pub MirPhase);
+
+impl<'tcx> MirPass<'tcx> for PhaseChange {
+ fn phase_change(&self) -> Option<MirPhase> {
+ Some(self.0)
+ }
+
+ fn name(&self) -> Cow<'_, str> {
+ Cow::from(format!("PhaseChange-{:?}", self.0))
+ }
+
+ fn run_pass(&self, _: TyCtxt<'tcx>, _body: &mut Body<'tcx>) {}
+}
diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs
new file mode 100644
index 000000000..a0ba69c89
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/match_branches.rs
@@ -0,0 +1,176 @@
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use std::iter;
+
+use super::simplify::simplify_cfg;
+
+pub struct MatchBranchSimplification;
+
+/// If a source block is found that switches between two blocks that are exactly
+/// the same modulo const bool assignments (e.g., one assigns true another false
+/// to the same place), merge a target block statements into the source block,
+/// using Eq / Ne comparison with switch value where const bools value differ.
+///
+/// For example:
+///
+/// ```ignore (MIR)
+/// bb0: {
+/// switchInt(move _3) -> [42_isize: bb1, otherwise: bb2];
+/// }
+///
+/// bb1: {
+/// _2 = const true;
+/// goto -> bb3;
+/// }
+///
+/// bb2: {
+/// _2 = const false;
+/// goto -> bb3;
+/// }
+/// ```
+///
+/// into:
+///
+/// ```ignore (MIR)
+/// bb0: {
+/// _2 = Eq(move _3, const 42_isize);
+/// goto -> bb3;
+/// }
+/// ```
+
+impl<'tcx> MirPass<'tcx> for MatchBranchSimplification {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 3
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let def_id = body.source.def_id();
+ let param_env = tcx.param_env(def_id);
+
+ let bbs = body.basic_blocks.as_mut();
+ let mut should_cleanup = false;
+ 'outer: for bb_idx in bbs.indices() {
+ if !tcx.consider_optimizing(|| format!("MatchBranchSimplification {:?} ", def_id)) {
+ continue;
+ }
+
+ let (discr, val, switch_ty, first, second) = match bbs[bb_idx].terminator().kind {
+ TerminatorKind::SwitchInt {
+ discr: ref discr @ (Operand::Copy(_) | Operand::Move(_)),
+ switch_ty,
+ ref targets,
+ ..
+ } if targets.iter().len() == 1 => {
+ let (value, target) = targets.iter().next().unwrap();
+ if target == targets.otherwise() {
+ continue;
+ }
+ (discr, value, switch_ty, target, targets.otherwise())
+ }
+ // Only optimize switch int statements
+ _ => continue,
+ };
+
+ // Check that destinations are identical, and if not, then don't optimize this block
+ if bbs[first].terminator().kind != bbs[second].terminator().kind {
+ continue;
+ }
+
+ // Check that blocks are assignments of consts to the same place or same statement,
+ // and match up 1-1, if not don't optimize this block.
+ let first_stmts = &bbs[first].statements;
+ let scnd_stmts = &bbs[second].statements;
+ if first_stmts.len() != scnd_stmts.len() {
+ continue;
+ }
+ for (f, s) in iter::zip(first_stmts, scnd_stmts) {
+ match (&f.kind, &s.kind) {
+ // If two statements are exactly the same, we can optimize.
+ (f_s, s_s) if f_s == s_s => {}
+
+ // If two statements are const bool assignments to the same place, we can optimize.
+ (
+ StatementKind::Assign(box (lhs_f, Rvalue::Use(Operand::Constant(f_c)))),
+ StatementKind::Assign(box (lhs_s, Rvalue::Use(Operand::Constant(s_c)))),
+ ) if lhs_f == lhs_s
+ && f_c.literal.ty().is_bool()
+ && s_c.literal.ty().is_bool()
+ && f_c.literal.try_eval_bool(tcx, param_env).is_some()
+ && s_c.literal.try_eval_bool(tcx, param_env).is_some() => {}
+
+ // Otherwise we cannot optimize. Try another block.
+ _ => continue 'outer,
+ }
+ }
+ // Take ownership of items now that we know we can optimize.
+ let discr = discr.clone();
+
+ // Introduce a temporary for the discriminant value.
+ let source_info = bbs[bb_idx].terminator().source_info;
+ let discr_local = body.local_decls.push(LocalDecl::new(switch_ty, source_info.span));
+
+ // We already checked that first and second are different blocks,
+ // and bb_idx has a different terminator from both of them.
+ let (from, first, second) = bbs.pick3_mut(bb_idx, first, second);
+
+ let new_stmts = iter::zip(&first.statements, &second.statements).map(|(f, s)| {
+ match (&f.kind, &s.kind) {
+ (f_s, s_s) if f_s == s_s => (*f).clone(),
+
+ (
+ StatementKind::Assign(box (lhs, Rvalue::Use(Operand::Constant(f_c)))),
+ StatementKind::Assign(box (_, Rvalue::Use(Operand::Constant(s_c)))),
+ ) => {
+ // From earlier loop we know that we are dealing with bool constants only:
+ let f_b = f_c.literal.try_eval_bool(tcx, param_env).unwrap();
+ let s_b = s_c.literal.try_eval_bool(tcx, param_env).unwrap();
+ if f_b == s_b {
+ // Same value in both blocks. Use statement as is.
+ (*f).clone()
+ } else {
+ // Different value between blocks. Make value conditional on switch condition.
+ let size = tcx.layout_of(param_env.and(switch_ty)).unwrap().size;
+ let const_cmp = Operand::const_from_scalar(
+ tcx,
+ switch_ty,
+ rustc_const_eval::interpret::Scalar::from_uint(val, size),
+ rustc_span::DUMMY_SP,
+ );
+ let op = if f_b { BinOp::Eq } else { BinOp::Ne };
+ let rhs = Rvalue::BinaryOp(
+ op,
+ Box::new((Operand::Copy(Place::from(discr_local)), const_cmp)),
+ );
+ Statement {
+ source_info: f.source_info,
+ kind: StatementKind::Assign(Box::new((*lhs, rhs))),
+ }
+ }
+ }
+
+ _ => unreachable!(),
+ }
+ });
+
+ from.statements
+ .push(Statement { source_info, kind: StatementKind::StorageLive(discr_local) });
+ from.statements.push(Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((
+ Place::from(discr_local),
+ Rvalue::Use(discr),
+ ))),
+ });
+ from.statements.extend(new_stmts);
+ from.statements
+ .push(Statement { source_info, kind: StatementKind::StorageDead(discr_local) });
+ from.terminator_mut().kind = first.terminator().kind.clone();
+ should_cleanup = true;
+ }
+
+ if should_cleanup {
+ simplify_cfg(tcx, body);
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
new file mode 100644
index 000000000..22b6dead9
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs
@@ -0,0 +1,43 @@
+//! This pass removes jumps to basic blocks containing only a return, and replaces them with a
+//! return instead.
+
+use crate::{simplify, MirPass};
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct MultipleReturnTerminators;
+
+impl<'tcx> MirPass<'tcx> for MultipleReturnTerminators {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 4
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // find basic blocks with no statement and a return terminator
+ let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks().len());
+ let def_id = body.source.def_id();
+ let bbs = body.basic_blocks_mut();
+ for idx in bbs.indices() {
+ if bbs[idx].statements.is_empty()
+ && bbs[idx].terminator().kind == TerminatorKind::Return
+ {
+ bbs_simple_returns.insert(idx);
+ }
+ }
+
+ for bb in bbs {
+ if !tcx.consider_optimizing(|| format!("MultipleReturnTerminators {:?} ", def_id)) {
+ break;
+ }
+
+ if let TerminatorKind::Goto { target } = bb.terminator().kind {
+ if bbs_simple_returns.contains(target) {
+ bb.terminator_mut().kind = TerminatorKind::Return;
+ }
+ }
+ }
+
+ simplify::remove_dead_blocks(tcx, body)
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/normalize_array_len.rs b/compiler/rustc_mir_transform/src/normalize_array_len.rs
new file mode 100644
index 000000000..c0217a105
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/normalize_array_len.rs
@@ -0,0 +1,287 @@
+//! This pass eliminates casting of arrays into slices when their length
+//! is taken using `.len()` method. Handy to preserve information in MIR for const prop
+
+use crate::MirPass;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_data_structures::intern::Interned;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, ReErased, Region, TyCtxt};
+
+const MAX_NUM_BLOCKS: usize = 800;
+const MAX_NUM_LOCALS: usize = 3000;
+
+pub struct NormalizeArrayLen;
+
+impl<'tcx> MirPass<'tcx> for NormalizeArrayLen {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 4
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // early returns for edge cases of highly unrolled functions
+ if body.basic_blocks().len() > MAX_NUM_BLOCKS {
+ return;
+ }
+ if body.local_decls().len() > MAX_NUM_LOCALS {
+ return;
+ }
+ normalize_array_len_calls(tcx, body)
+ }
+}
+
+pub fn normalize_array_len_calls<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // We don't ever touch terminators, so no need to invalidate the CFG cache
+ let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
+ let local_decls = &mut body.local_decls;
+
+ // do a preliminary analysis to see if we ever have locals of type `[T;N]` or `&[T;N]`
+ let mut interesting_locals = BitSet::new_empty(local_decls.len());
+ for (local, decl) in local_decls.iter_enumerated() {
+ match decl.ty.kind() {
+ ty::Array(..) => {
+ interesting_locals.insert(local);
+ }
+ ty::Ref(.., ty, Mutability::Not) => match ty.kind() {
+ ty::Array(..) => {
+ interesting_locals.insert(local);
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ }
+ if interesting_locals.is_empty() {
+ // we have found nothing to analyze
+ return;
+ }
+ let num_intesting_locals = interesting_locals.count();
+ let mut state = FxIndexMap::with_capacity_and_hasher(num_intesting_locals, Default::default());
+ let mut patches_scratchpad =
+ FxIndexMap::with_capacity_and_hasher(num_intesting_locals, Default::default());
+ let mut replacements_scratchpad =
+ FxIndexMap::with_capacity_and_hasher(num_intesting_locals, Default::default());
+ for block in basic_blocks {
+ // make length calls for arrays [T; N] not to decay into length calls for &[T]
+ // that forbids constant propagation
+ normalize_array_len_call(
+ tcx,
+ block,
+ local_decls,
+ &interesting_locals,
+ &mut state,
+ &mut patches_scratchpad,
+ &mut replacements_scratchpad,
+ );
+ state.clear();
+ patches_scratchpad.clear();
+ replacements_scratchpad.clear();
+ }
+}
+
+struct Patcher<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ patches_scratchpad: &'a FxIndexMap<usize, usize>,
+ replacements_scratchpad: &'a mut FxIndexMap<usize, Local>,
+ local_decls: &'a mut IndexVec<Local, LocalDecl<'tcx>>,
+ statement_idx: usize,
+}
+
+impl<'tcx> Patcher<'_, 'tcx> {
+ fn patch_expand_statement(
+ &mut self,
+ statement: &mut Statement<'tcx>,
+ ) -> Option<std::vec::IntoIter<Statement<'tcx>>> {
+ let idx = self.statement_idx;
+ if let Some(len_statemnt_idx) = self.patches_scratchpad.get(&idx).copied() {
+ let mut statements = Vec::with_capacity(2);
+
+ // we are at statement that performs a cast. The only sound way is
+ // to create another local that performs a similar copy without a cast and then
+ // use this copy in the Len operation
+
+ match &statement.kind {
+ StatementKind::Assign(box (
+ ..,
+ Rvalue::Cast(
+ CastKind::Pointer(ty::adjustment::PointerCast::Unsize),
+ operand,
+ _,
+ ),
+ )) => {
+ match operand {
+ Operand::Copy(place) | Operand::Move(place) => {
+ // create new local
+ let ty = operand.ty(self.local_decls, self.tcx);
+ let local_decl = LocalDecl::with_source_info(ty, statement.source_info);
+ let local = self.local_decls.push(local_decl);
+ // make it live
+ let mut make_live_statement = statement.clone();
+ make_live_statement.kind = StatementKind::StorageLive(local);
+ statements.push(make_live_statement);
+ // copy into it
+
+ let operand = Operand::Copy(*place);
+ let mut make_copy_statement = statement.clone();
+ let assign_to = Place::from(local);
+ let rvalue = Rvalue::Use(operand);
+ make_copy_statement.kind =
+ StatementKind::Assign(Box::new((assign_to, rvalue)));
+ statements.push(make_copy_statement);
+
+ // to reorder we have to copy and make NOP
+ statements.push(statement.clone());
+ statement.make_nop();
+
+ self.replacements_scratchpad.insert(len_statemnt_idx, local);
+ }
+ _ => {
+ unreachable!("it's a bug in the implementation")
+ }
+ }
+ }
+ _ => {
+ unreachable!("it's a bug in the implementation")
+ }
+ }
+
+ self.statement_idx += 1;
+
+ Some(statements.into_iter())
+ } else if let Some(local) = self.replacements_scratchpad.get(&idx).copied() {
+ let mut statements = Vec::with_capacity(2);
+
+ match &statement.kind {
+ StatementKind::Assign(box (into, Rvalue::Len(place))) => {
+ let add_deref = if let Some(..) = place.as_local() {
+ false
+ } else if let Some(..) = place.local_or_deref_local() {
+ true
+ } else {
+ unreachable!("it's a bug in the implementation")
+ };
+ // replace len statement
+ let mut len_statement = statement.clone();
+ let mut place = Place::from(local);
+ if add_deref {
+ place = self.tcx.mk_place_deref(place);
+ }
+ len_statement.kind =
+ StatementKind::Assign(Box::new((*into, Rvalue::Len(place))));
+ statements.push(len_statement);
+
+ // make temporary dead
+ let mut make_dead_statement = statement.clone();
+ make_dead_statement.kind = StatementKind::StorageDead(local);
+ statements.push(make_dead_statement);
+
+ // make original statement NOP
+ statement.make_nop();
+ }
+ _ => {
+ unreachable!("it's a bug in the implementation")
+ }
+ }
+
+ self.statement_idx += 1;
+
+ Some(statements.into_iter())
+ } else {
+ self.statement_idx += 1;
+ None
+ }
+ }
+}
+
+fn normalize_array_len_call<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ block: &mut BasicBlockData<'tcx>,
+ local_decls: &mut IndexVec<Local, LocalDecl<'tcx>>,
+ interesting_locals: &BitSet<Local>,
+ state: &mut FxIndexMap<Local, usize>,
+ patches_scratchpad: &mut FxIndexMap<usize, usize>,
+ replacements_scratchpad: &mut FxIndexMap<usize, Local>,
+) {
+ for (statement_idx, statement) in block.statements.iter_mut().enumerate() {
+ match &mut statement.kind {
+ StatementKind::Assign(box (place, rvalue)) => {
+ match rvalue {
+ Rvalue::Cast(
+ CastKind::Pointer(ty::adjustment::PointerCast::Unsize),
+ operand,
+ cast_ty,
+ ) => {
+ let Some(local) = place.as_local() else { return };
+ match operand {
+ Operand::Copy(place) | Operand::Move(place) => {
+ let Some(operand_local) = place.local_or_deref_local() else { return; };
+ if !interesting_locals.contains(operand_local) {
+ return;
+ }
+ let operand_ty = local_decls[operand_local].ty;
+ match (operand_ty.kind(), cast_ty.kind()) {
+ (ty::Array(of_ty_src, ..), ty::Slice(of_ty_dst)) => {
+ if of_ty_src == of_ty_dst {
+ // this is a cast from [T; N] into [T], so we are good
+ state.insert(local, statement_idx);
+ }
+ }
+ // current way of patching doesn't allow to work with `mut`
+ (
+ ty::Ref(
+ Region(Interned(ReErased, _)),
+ operand_ty,
+ Mutability::Not,
+ ),
+ ty::Ref(
+ Region(Interned(ReErased, _)),
+ cast_ty,
+ Mutability::Not,
+ ),
+ ) => {
+ match (operand_ty.kind(), cast_ty.kind()) {
+ // current way of patching doesn't allow to work with `mut`
+ (ty::Array(of_ty_src, ..), ty::Slice(of_ty_dst)) => {
+ if of_ty_src == of_ty_dst {
+ // this is a cast from [T; N] into [T], so we are good
+ state.insert(local, statement_idx);
+ }
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+ }
+ Rvalue::Len(place) => {
+ let Some(local) = place.local_or_deref_local() else {
+ return;
+ };
+ if let Some(cast_statement_idx) = state.get(&local).copied() {
+ patches_scratchpad.insert(cast_statement_idx, statement_idx);
+ }
+ }
+ _ => {
+ // invalidate
+ state.remove(&place.local);
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ let mut patcher = Patcher {
+ tcx,
+ patches_scratchpad: &*patches_scratchpad,
+ replacements_scratchpad,
+ local_decls,
+ statement_idx: 0,
+ };
+
+ block.expand_statements(|st| patcher.patch_expand_statement(st));
+}
diff --git a/compiler/rustc_mir_transform/src/nrvo.rs b/compiler/rustc_mir_transform/src/nrvo.rs
new file mode 100644
index 000000000..bb063915f
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/nrvo.rs
@@ -0,0 +1,236 @@
+//! See the docs for [`RenameReturnPlace`].
+
+use rustc_hir::Mutability;
+use rustc_index::bit_set::HybridBitSet;
+use rustc_middle::mir::visit::{MutVisitor, NonUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::{self, BasicBlock, Local, Location};
+use rustc_middle::ty::TyCtxt;
+
+use crate::MirPass;
+
+/// This pass looks for MIR that always copies the same local into the return place and eliminates
+/// the copy by renaming all uses of that local to `_0`.
+///
+/// This allows LLVM to perform an optimization similar to the named return value optimization
+/// (NRVO) that is guaranteed in C++. This avoids a stack allocation and `memcpy` for the
+/// relatively common pattern of allocating a buffer on the stack, mutating it, and returning it by
+/// value like so:
+///
+/// ```rust
+/// fn foo(init: fn(&mut [u8; 1024])) -> [u8; 1024] {
+/// let mut buf = [0; 1024];
+/// init(&mut buf);
+/// buf
+/// }
+/// ```
+///
+/// For now, this pass is very simple and only capable of eliminating a single copy. A more general
+/// version of copy propagation, such as the one based on non-overlapping live ranges in [#47954] and
+/// [#71003], could yield even more benefits.
+///
+/// [#47954]: https://github.com/rust-lang/rust/pull/47954
+/// [#71003]: https://github.com/rust-lang/rust/pull/71003
+pub struct RenameReturnPlace;
+
+impl<'tcx> MirPass<'tcx> for RenameReturnPlace {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut mir::Body<'tcx>) {
+ let def_id = body.source.def_id();
+ let Some(returned_local) = local_eligible_for_nrvo(body) else {
+ debug!("`{:?}` was ineligible for NRVO", def_id);
+ return;
+ };
+
+ if !tcx.consider_optimizing(|| format!("RenameReturnPlace {:?}", def_id)) {
+ return;
+ }
+
+ debug!(
+ "`{:?}` was eligible for NRVO, making {:?} the return place",
+ def_id, returned_local
+ );
+
+ RenameToReturnPlace { tcx, to_rename: returned_local }.visit_body(body);
+
+ // Clean up the `NOP`s we inserted for statements made useless by our renaming.
+ for block_data in body.basic_blocks_mut() {
+ block_data.statements.retain(|stmt| stmt.kind != mir::StatementKind::Nop);
+ }
+
+ // Overwrite the debuginfo of `_0` with that of the renamed local.
+ let (renamed_decl, ret_decl) =
+ body.local_decls.pick2_mut(returned_local, mir::RETURN_PLACE);
+
+ // Sometimes, the return place is assigned a local of a different but coercible type, for
+ // example `&mut T` instead of `&T`. Overwriting the `LocalInfo` for the return place means
+ // its type may no longer match the return type of its function. This doesn't cause a
+ // problem in codegen because these two types are layout-compatible, but may be unexpected.
+ debug!("_0: {:?} = {:?}: {:?}", ret_decl.ty, returned_local, renamed_decl.ty);
+ ret_decl.clone_from(renamed_decl);
+
+ // The return place is always mutable.
+ ret_decl.mutability = Mutability::Mut;
+ }
+}
+
+/// MIR that is eligible for the NRVO must fulfill two conditions:
+/// 1. The return place must not be read prior to the `Return` terminator.
+/// 2. A simple assignment of a whole local to the return place (e.g., `_0 = _1`) must be the
+/// only definition of the return place reaching the `Return` terminator.
+///
+/// If the MIR fulfills both these conditions, this function returns the `Local` that is assigned
+/// to the return place along all possible paths through the control-flow graph.
+fn local_eligible_for_nrvo(body: &mut mir::Body<'_>) -> Option<Local> {
+ if IsReturnPlaceRead::run(body) {
+ return None;
+ }
+
+ let mut copied_to_return_place = None;
+ for block in body.basic_blocks().indices() {
+ // Look for blocks with a `Return` terminator.
+ if !matches!(body[block].terminator().kind, mir::TerminatorKind::Return) {
+ continue;
+ }
+
+ // Look for an assignment of a single local to the return place prior to the `Return`.
+ let returned_local = find_local_assigned_to_return_place(block, body)?;
+ match body.local_kind(returned_local) {
+ // FIXME: Can we do this for arguments as well?
+ mir::LocalKind::Arg => return None,
+
+ mir::LocalKind::ReturnPointer => bug!("Return place was assigned to itself?"),
+ mir::LocalKind::Var | mir::LocalKind::Temp => {}
+ }
+
+ // If multiple different locals are copied to the return place. We can't pick a
+ // single one to rename.
+ if copied_to_return_place.map_or(false, |old| old != returned_local) {
+ return None;
+ }
+
+ copied_to_return_place = Some(returned_local);
+ }
+
+ copied_to_return_place
+}
+
+fn find_local_assigned_to_return_place(
+ start: BasicBlock,
+ body: &mut mir::Body<'_>,
+) -> Option<Local> {
+ let mut block = start;
+ let mut seen = HybridBitSet::new_empty(body.basic_blocks().len());
+
+ // Iterate as long as `block` has exactly one predecessor that we have not yet visited.
+ while seen.insert(block) {
+ trace!("Looking for assignments to `_0` in {:?}", block);
+
+ let local = body[block].statements.iter().rev().find_map(as_local_assigned_to_return_place);
+ if local.is_some() {
+ return local;
+ }
+
+ match body.basic_blocks.predecessors()[block].as_slice() {
+ &[pred] => block = pred,
+ _ => return None,
+ }
+ }
+
+ None
+}
+
+// If this statement is an assignment of an unprojected local to the return place,
+// return that local.
+fn as_local_assigned_to_return_place(stmt: &mir::Statement<'_>) -> Option<Local> {
+ if let mir::StatementKind::Assign(box (lhs, rhs)) = &stmt.kind {
+ if lhs.as_local() == Some(mir::RETURN_PLACE) {
+ if let mir::Rvalue::Use(mir::Operand::Copy(rhs) | mir::Operand::Move(rhs)) = rhs {
+ return rhs.as_local();
+ }
+ }
+ }
+
+ None
+}
+
+struct RenameToReturnPlace<'tcx> {
+ to_rename: Local,
+ tcx: TyCtxt<'tcx>,
+}
+
+/// Replaces all uses of `self.to_rename` with `_0`.
+impl<'tcx> MutVisitor<'tcx> for RenameToReturnPlace<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_statement(&mut self, stmt: &mut mir::Statement<'tcx>, loc: Location) {
+ // Remove assignments of the local being replaced to the return place, since it is now the
+ // return place:
+ // _0 = _1
+ if as_local_assigned_to_return_place(stmt) == Some(self.to_rename) {
+ stmt.kind = mir::StatementKind::Nop;
+ return;
+ }
+
+ // Remove storage annotations for the local being replaced:
+ // StorageLive(_1)
+ if let mir::StatementKind::StorageLive(local) | mir::StatementKind::StorageDead(local) =
+ stmt.kind
+ {
+ if local == self.to_rename {
+ stmt.kind = mir::StatementKind::Nop;
+ return;
+ }
+ }
+
+ self.super_statement(stmt, loc)
+ }
+
+ fn visit_terminator(&mut self, terminator: &mut mir::Terminator<'tcx>, loc: Location) {
+ // Ignore the implicit "use" of the return place in a `Return` statement.
+ if let mir::TerminatorKind::Return = terminator.kind {
+ return;
+ }
+
+ self.super_terminator(terminator, loc);
+ }
+
+ fn visit_local(&mut self, l: &mut Local, ctxt: PlaceContext, _: Location) {
+ if *l == mir::RETURN_PLACE {
+ assert_eq!(ctxt, PlaceContext::NonUse(NonUseContext::VarDebugInfo));
+ } else if *l == self.to_rename {
+ *l = mir::RETURN_PLACE;
+ }
+ }
+}
+
+struct IsReturnPlaceRead(bool);
+
+impl IsReturnPlaceRead {
+ fn run(body: &mir::Body<'_>) -> bool {
+ let mut vis = IsReturnPlaceRead(false);
+ vis.visit_body(body);
+ vis.0
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for IsReturnPlaceRead {
+ fn visit_local(&mut self, l: Local, ctxt: PlaceContext, _: Location) {
+ if l == mir::RETURN_PLACE && ctxt.is_use() && !ctxt.is_place_assignment() {
+ self.0 = true;
+ }
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, loc: Location) {
+ // Ignore the implicit "use" of the return place in a `Return` statement.
+ if let mir::TerminatorKind::Return = terminator.kind {
+ return;
+ }
+
+ self.super_terminator(terminator, loc);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs
new file mode 100644
index 000000000..e27d4ab16
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/pass_manager.rs
@@ -0,0 +1,157 @@
+use std::borrow::Cow;
+
+use rustc_middle::mir::{self, Body, MirPhase};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::Session;
+
+use crate::{validate, MirPass};
+
+/// Just like `MirPass`, except it cannot mutate `Body`.
+pub trait MirLint<'tcx> {
+ fn name(&self) -> Cow<'_, str> {
+ let name = std::any::type_name::<Self>();
+ if let Some(tail) = name.rfind(':') {
+ Cow::from(&name[tail + 1..])
+ } else {
+ Cow::from(name)
+ }
+ }
+
+ fn is_enabled(&self, _sess: &Session) -> bool {
+ true
+ }
+
+ fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>);
+}
+
+/// An adapter for `MirLint`s that implements `MirPass`.
+#[derive(Debug, Clone)]
+pub struct Lint<T>(pub T);
+
+impl<'tcx, T> MirPass<'tcx> for Lint<T>
+where
+ T: MirLint<'tcx>,
+{
+ fn name(&self) -> Cow<'_, str> {
+ self.0.name()
+ }
+
+ fn is_enabled(&self, sess: &Session) -> bool {
+ self.0.is_enabled(sess)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ self.0.run_lint(tcx, body)
+ }
+
+ fn is_mir_dump_enabled(&self) -> bool {
+ false
+ }
+}
+
+pub struct WithMinOptLevel<T>(pub u32, pub T);
+
+impl<'tcx, T> MirPass<'tcx> for WithMinOptLevel<T>
+where
+ T: MirPass<'tcx>,
+{
+ fn name(&self) -> Cow<'_, str> {
+ self.1.name()
+ }
+
+ fn is_enabled(&self, sess: &Session) -> bool {
+ sess.mir_opt_level() >= self.0 as usize
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ self.1.run_pass(tcx, body)
+ }
+
+ fn phase_change(&self) -> Option<MirPhase> {
+ self.1.phase_change()
+ }
+}
+
+pub fn run_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, passes: &[&dyn MirPass<'tcx>]) {
+ let start_phase = body.phase;
+ let mut cnt = 0;
+
+ let validate = tcx.sess.opts.unstable_opts.validate_mir;
+ let overridden_passes = &tcx.sess.opts.unstable_opts.mir_enable_passes;
+ trace!(?overridden_passes);
+
+ if validate {
+ validate_body(tcx, body, format!("start of phase transition from {:?}", start_phase));
+ }
+
+ for pass in passes {
+ let name = pass.name();
+
+ if let Some((_, polarity)) = overridden_passes.iter().rev().find(|(s, _)| s == &*name) {
+ trace!(
+ pass = %name,
+ "{} as requested by flag",
+ if *polarity { "Running" } else { "Not running" },
+ );
+ if !polarity {
+ continue;
+ }
+ } else {
+ if !pass.is_enabled(&tcx.sess) {
+ continue;
+ }
+ }
+ let dump_enabled = pass.is_mir_dump_enabled();
+
+ if dump_enabled {
+ dump_mir(tcx, body, start_phase, &name, cnt, false);
+ }
+
+ pass.run_pass(tcx, body);
+
+ if dump_enabled {
+ dump_mir(tcx, body, start_phase, &name, cnt, true);
+ cnt += 1;
+ }
+
+ if let Some(new_phase) = pass.phase_change() {
+ if body.phase >= new_phase {
+ panic!("Invalid MIR phase transition from {:?} to {:?}", body.phase, new_phase);
+ }
+
+ body.phase = new_phase;
+ }
+
+ if validate {
+ validate_body(tcx, body, format!("after pass {}", pass.name()));
+ }
+ }
+
+ if validate || body.phase == MirPhase::Optimized {
+ validate_body(tcx, body, format!("end of phase transition to {:?}", body.phase));
+ }
+}
+
+pub fn validate_body<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, when: String) {
+ validate::Validator { when, mir_phase: body.phase }.run_pass(tcx, body);
+}
+
+pub fn dump_mir<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ phase: MirPhase,
+ pass_name: &str,
+ cnt: usize,
+ is_after: bool,
+) {
+ let phase_index = phase as u32;
+
+ mir::dump_mir(
+ tcx,
+ Some(&format_args!("{:03}-{:03}", phase_index, cnt)),
+ pass_name,
+ if is_after { &"after" } else { &"before" },
+ body,
+ |_, _| Ok(()),
+ );
+}
diff --git a/compiler/rustc_mir_transform/src/remove_false_edges.rs b/compiler/rustc_mir_transform/src/remove_false_edges.rs
new file mode 100644
index 000000000..71f5ccf7e
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/remove_false_edges.rs
@@ -0,0 +1,29 @@
+use rustc_middle::mir::{Body, TerminatorKind};
+use rustc_middle::ty::TyCtxt;
+
+use crate::MirPass;
+
+/// Removes `FalseEdge` and `FalseUnwind` terminators from the MIR.
+///
+/// These are only needed for borrow checking, and can be removed afterwards.
+///
+/// FIXME: This should probably have its own MIR phase.
+pub struct RemoveFalseEdges;
+
+impl<'tcx> MirPass<'tcx> for RemoveFalseEdges {
+ fn run_pass(&self, _: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ for block in body.basic_blocks_mut() {
+ let terminator = block.terminator_mut();
+ terminator.kind = match terminator.kind {
+ TerminatorKind::FalseEdge { real_target, .. } => {
+ TerminatorKind::Goto { target: real_target }
+ }
+ TerminatorKind::FalseUnwind { real_target, .. } => {
+ TerminatorKind::Goto { target: real_target }
+ }
+
+ _ => continue,
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
new file mode 100644
index 000000000..5c441c5b1
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs
@@ -0,0 +1,131 @@
+use crate::MirPass;
+use rustc_index::bit_set::BitSet;
+use rustc_middle::mir::patch::MirPatch;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_target::spec::PanicStrategy;
+
+/// A pass that removes noop landing pads and replaces jumps to them with
+/// `None`. This is important because otherwise LLVM generates terrible
+/// code for these.
+pub struct RemoveNoopLandingPads;
+
+impl<'tcx> MirPass<'tcx> for RemoveNoopLandingPads {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.panic_strategy() != PanicStrategy::Abort
+ }
+
+ fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ debug!("remove_noop_landing_pads({:?})", body);
+ self.remove_nop_landing_pads(body)
+ }
+}
+
+impl RemoveNoopLandingPads {
+ fn is_nop_landing_pad(
+ &self,
+ bb: BasicBlock,
+ body: &Body<'_>,
+ nop_landing_pads: &BitSet<BasicBlock>,
+ ) -> bool {
+ for stmt in &body[bb].statements {
+ match &stmt.kind {
+ StatementKind::FakeRead(..)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::AscribeUserType(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::Nop => {
+ // These are all noops in a landing pad
+ }
+
+ StatementKind::Assign(box (place, Rvalue::Use(_) | Rvalue::Discriminant(_))) => {
+ if place.as_local().is_some() {
+ // Writing to a local (e.g., a drop flag) does not
+ // turn a landing pad to a non-nop
+ } else {
+ return false;
+ }
+ }
+
+ StatementKind::Assign { .. }
+ | StatementKind::SetDiscriminant { .. }
+ | StatementKind::Deinit(..)
+ | StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Retag { .. } => {
+ return false;
+ }
+ }
+ }
+
+ let terminator = body[bb].terminator();
+ match terminator.kind {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::FalseEdge { .. }
+ | TerminatorKind::FalseUnwind { .. } => {
+ terminator.successors().all(|succ| nop_landing_pads.contains(succ))
+ }
+ TerminatorKind::GeneratorDrop
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::Return
+ | TerminatorKind::Abort
+ | TerminatorKind::Unreachable
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::InlineAsm { .. } => false,
+ }
+ }
+
+ fn remove_nop_landing_pads(&self, body: &mut Body<'_>) {
+ debug!("body: {:#?}", body);
+
+ // make sure there's a resume block
+ let resume_block = {
+ let mut patch = MirPatch::new(body);
+ let resume_block = patch.resume_block();
+ patch.apply(body);
+ resume_block
+ };
+ debug!("remove_noop_landing_pads: resume block is {:?}", resume_block);
+
+ let mut jumps_folded = 0;
+ let mut landing_pads_removed = 0;
+ let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks().len());
+
+ // This is a post-order traversal, so that if A post-dominates B
+ // then A will be visited before B.
+ let postorder: Vec<_> = traversal::postorder(body).map(|(bb, _)| bb).collect();
+ for bb in postorder {
+ debug!(" processing {:?}", bb);
+ if let Some(unwind) = body[bb].terminator_mut().unwind_mut() {
+ if let Some(unwind_bb) = *unwind {
+ if nop_landing_pads.contains(unwind_bb) {
+ debug!(" removing noop landing pad");
+ landing_pads_removed += 1;
+ *unwind = None;
+ }
+ }
+ }
+
+ for target in body[bb].terminator_mut().successors_mut() {
+ if *target != resume_block && nop_landing_pads.contains(*target) {
+ debug!(" folding noop jump to {:?} to resume block", target);
+ *target = resume_block;
+ jumps_folded += 1;
+ }
+ }
+
+ let is_nop_landing_pad = self.is_nop_landing_pad(bb, body, &nop_landing_pads);
+ if is_nop_landing_pad {
+ nop_landing_pads.insert(bb);
+ }
+ debug!(" is_nop_landing_pad({:?}) = {}", bb, is_nop_landing_pad);
+ }
+
+ debug!("removed {:?} jumps and {:?} landing pads", jumps_folded, landing_pads_removed);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/remove_storage_markers.rs b/compiler/rustc_mir_transform/src/remove_storage_markers.rs
new file mode 100644
index 000000000..dbe082e90
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/remove_storage_markers.rs
@@ -0,0 +1,29 @@
+//! This pass removes storage markers if they won't be emitted during codegen.
+
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct RemoveStorageMarkers;
+
+impl<'tcx> MirPass<'tcx> for RemoveStorageMarkers {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ if tcx.sess.emit_lifetime_markers() {
+ return;
+ }
+
+ trace!("Running RemoveStorageMarkers on {:?}", body.source);
+ for data in body.basic_blocks.as_mut_preserves_cfg() {
+ data.statements.retain(|statement| match statement.kind {
+ StatementKind::StorageLive(..)
+ | StatementKind::StorageDead(..)
+ | StatementKind::Nop => false,
+ _ => true,
+ })
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/remove_uninit_drops.rs b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
new file mode 100644
index 000000000..96b715402
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/remove_uninit_drops.rs
@@ -0,0 +1,171 @@
+use rustc_index::bit_set::ChunkedBitSet;
+use rustc_middle::mir::{Body, Field, Rvalue, Statement, StatementKind, TerminatorKind};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt, VariantDef};
+use rustc_mir_dataflow::impls::MaybeInitializedPlaces;
+use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex};
+use rustc_mir_dataflow::{self, move_path_children_matching, Analysis, MoveDataParamEnv};
+
+use crate::MirPass;
+
+/// Removes `Drop` and `DropAndReplace` terminators whose target is known to be uninitialized at
+/// that point.
+///
+/// This is redundant with drop elaboration, but we need to do it prior to const-checking, and
+/// running const-checking after drop elaboration makes it optimization dependent, causing issues
+/// like [#90770].
+///
+/// [#90770]: https://github.com/rust-lang/rust/issues/90770
+pub struct RemoveUninitDrops;
+
+impl<'tcx> MirPass<'tcx> for RemoveUninitDrops {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let param_env = tcx.param_env(body.source.def_id());
+ let Ok((_,move_data)) = MoveData::gather_moves(body, tcx, param_env) else {
+ // We could continue if there are move errors, but there's not much point since our
+ // init data isn't complete.
+ return;
+ };
+
+ let mdpe = MoveDataParamEnv { move_data, param_env };
+ let mut maybe_inits = MaybeInitializedPlaces::new(tcx, body, &mdpe)
+ .into_engine(tcx, body)
+ .pass_name("remove_uninit_drops")
+ .iterate_to_fixpoint()
+ .into_results_cursor(body);
+
+ let mut to_remove = vec![];
+ for (bb, block) in body.basic_blocks().iter_enumerated() {
+ let terminator = block.terminator();
+ let (TerminatorKind::Drop { place, .. } | TerminatorKind::DropAndReplace { place, .. })
+ = &terminator.kind
+ else { continue };
+
+ maybe_inits.seek_before_primary_effect(body.terminator_loc(bb));
+
+ // If there's no move path for the dropped place, it's probably a `Deref`. Let it alone.
+ let LookupResult::Exact(mpi) = mdpe.move_data.rev_lookup.find(place.as_ref()) else {
+ continue;
+ };
+
+ let should_keep = is_needs_drop_and_init(
+ tcx,
+ param_env,
+ maybe_inits.get(),
+ &mdpe.move_data,
+ place.ty(body, tcx).ty,
+ mpi,
+ );
+ if !should_keep {
+ to_remove.push(bb)
+ }
+ }
+
+ for bb in to_remove {
+ let block = &mut body.basic_blocks_mut()[bb];
+
+ let (TerminatorKind::Drop { target, .. } | TerminatorKind::DropAndReplace { target, .. })
+ = &block.terminator().kind
+ else { unreachable!() };
+
+ // Replace block terminator with `Goto`.
+ let target = *target;
+ let old_terminator_kind = std::mem::replace(
+ &mut block.terminator_mut().kind,
+ TerminatorKind::Goto { target },
+ );
+
+ // If this is a `DropAndReplace`, we need to emulate the assignment to the return place.
+ if let TerminatorKind::DropAndReplace { place, value, .. } = old_terminator_kind {
+ block.statements.push(Statement {
+ source_info: block.terminator().source_info,
+ kind: StatementKind::Assign(Box::new((place, Rvalue::Use(value)))),
+ });
+ }
+ }
+ }
+}
+
+fn is_needs_drop_and_init<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ maybe_inits: &ChunkedBitSet<MovePathIndex>,
+ move_data: &MoveData<'tcx>,
+ ty: Ty<'tcx>,
+ mpi: MovePathIndex,
+) -> bool {
+ // No need to look deeper if the root is definitely uninit or if it has no `Drop` impl.
+ if !maybe_inits.contains(mpi) || !ty.needs_drop(tcx, param_env) {
+ return false;
+ }
+
+ let field_needs_drop_and_init = |(f, f_ty, mpi)| {
+ let child = move_path_children_matching(move_data, mpi, |x| x.is_field_to(f));
+ let Some(mpi) = child else {
+ return Ty::needs_drop(f_ty, tcx, param_env);
+ };
+
+ is_needs_drop_and_init(tcx, param_env, maybe_inits, move_data, f_ty, mpi)
+ };
+
+ // This pass is only needed for const-checking, so it doesn't handle as many cases as
+ // `DropCtxt::open_drop`, since they aren't relevant in a const-context.
+ match ty.kind() {
+ ty::Adt(adt, substs) => {
+ let dont_elaborate = adt.is_union() || adt.is_manually_drop() || adt.has_dtor(tcx);
+ if dont_elaborate {
+ return true;
+ }
+
+ // Look at all our fields, or if we are an enum all our variants and their fields.
+ //
+ // If a field's projection *is not* present in `MoveData`, it has the same
+ // initializedness as its parent (maybe init).
+ //
+ // If its projection *is* present in `MoveData`, then the field may have been moved
+ // from separate from its parent. Recurse.
+ adt.variants().iter_enumerated().any(|(vid, variant)| {
+ // Enums have multiple variants, which are discriminated with a `Downcast` projection.
+ // Structs have a single variant, and don't use a `Downcast` projection.
+ let mpi = if adt.is_enum() {
+ let downcast =
+ move_path_children_matching(move_data, mpi, |x| x.is_downcast_to(vid));
+ let Some(dc_mpi) = downcast else {
+ return variant_needs_drop(tcx, param_env, substs, variant);
+ };
+
+ dc_mpi
+ } else {
+ mpi
+ };
+
+ variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(f, field)| (Field::from_usize(f), field.ty(tcx, substs), mpi))
+ .any(field_needs_drop_and_init)
+ })
+ }
+
+ ty::Tuple(fields) => fields
+ .iter()
+ .enumerate()
+ .map(|(f, f_ty)| (Field::from_usize(f), f_ty, mpi))
+ .any(field_needs_drop_and_init),
+
+ _ => true,
+ }
+}
+
+fn variant_needs_drop<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ substs: SubstsRef<'tcx>,
+ variant: &VariantDef,
+) -> bool {
+ variant.fields.iter().any(|field| {
+ let f_ty = field.ty(tcx, substs);
+ f_ty.needs_drop(tcx, param_env)
+ })
+}
diff --git a/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
new file mode 100644
index 000000000..84ccf6e1f
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/remove_unneeded_drops.rs
@@ -0,0 +1,45 @@
+//! This pass replaces a drop of a type that does not need dropping, with a goto.
+//!
+//! When the MIR is built, we check `needs_drop` before emitting a `Drop` for a place. This pass is
+//! useful because (unlike MIR building) it runs after type checking, so it can make use of
+//! `Reveal::All` to provide more precise type information.
+
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+use super::simplify::simplify_cfg;
+
+pub struct RemoveUnneededDrops;
+
+impl<'tcx> MirPass<'tcx> for RemoveUnneededDrops {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ trace!("Running RemoveUnneededDrops on {:?}", body.source);
+
+ let did = body.source.def_id();
+ let param_env = tcx.param_env_reveal_all_normalized(did);
+ let mut should_simplify = false;
+
+ for block in body.basic_blocks.as_mut() {
+ let terminator = block.terminator_mut();
+ if let TerminatorKind::Drop { place, target, .. } = terminator.kind {
+ let ty = place.ty(&body.local_decls, tcx);
+ if ty.ty.needs_drop(tcx, param_env) {
+ continue;
+ }
+ if !tcx.consider_optimizing(|| format!("RemoveUnneededDrops {:?} ", did)) {
+ continue;
+ }
+ debug!("SUCCESS: replacing `drop` with goto({:?})", target);
+ terminator.kind = TerminatorKind::Goto { target };
+ should_simplify = true;
+ }
+ }
+
+ // if we applied optimizations, we potentially have some cfg to cleanup to
+ // make it easier for further passes
+ if should_simplify {
+ simplify_cfg(tcx, body);
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs
new file mode 100644
index 000000000..40be4f146
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/remove_zsts.rs
@@ -0,0 +1,86 @@
+//! Removes assignments to ZST places.
+
+use crate::MirPass;
+use rustc_middle::mir::tcx::PlaceTy;
+use rustc_middle::mir::{Body, LocalDecls, Place, StatementKind};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+pub struct RemoveZsts;
+
+impl<'tcx> MirPass<'tcx> for RemoveZsts {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // Avoid query cycles (generators require optimized MIR for layout).
+ if tcx.type_of(body.source.def_id()).is_generator() {
+ return;
+ }
+ let param_env = tcx.param_env(body.source.def_id());
+ let basic_blocks = body.basic_blocks.as_mut_preserves_cfg();
+ let local_decls = &body.local_decls;
+ for block in basic_blocks {
+ for statement in block.statements.iter_mut() {
+ if let StatementKind::Assign(box (place, _)) | StatementKind::Deinit(box place) =
+ statement.kind
+ {
+ let place_ty = place.ty(local_decls, tcx).ty;
+ if !maybe_zst(place_ty) {
+ continue;
+ }
+ let Ok(layout) = tcx.layout_of(param_env.and(place_ty)) else {
+ continue;
+ };
+ if !layout.is_zst() {
+ continue;
+ }
+ if involves_a_union(place, local_decls, tcx) {
+ continue;
+ }
+ if tcx.consider_optimizing(|| {
+ format!(
+ "RemoveZsts - Place: {:?} SourceInfo: {:?}",
+ place, statement.source_info
+ )
+ }) {
+ statement.make_nop();
+ }
+ }
+ }
+ }
+ }
+}
+
+/// A cheap, approximate check to avoid unnecessary `layout_of` calls.
+fn maybe_zst(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ // maybe ZST (could be more precise)
+ ty::Adt(..) | ty::Array(..) | ty::Closure(..) | ty::Tuple(..) | ty::Opaque(..) => true,
+ // definitely ZST
+ ty::FnDef(..) | ty::Never => true,
+ // unreachable or can't be ZST
+ _ => false,
+ }
+}
+
+/// Miri lazily allocates memory for locals on assignment,
+/// so we must preserve writes to unions and union fields,
+/// or it will ICE on reads of those fields.
+fn involves_a_union<'tcx>(
+ place: Place<'tcx>,
+ local_decls: &LocalDecls<'tcx>,
+ tcx: TyCtxt<'tcx>,
+) -> bool {
+ let mut place_ty = PlaceTy::from_ty(local_decls[place.local].ty);
+ if place_ty.ty.is_union() {
+ return true;
+ }
+ for elem in place.projection {
+ place_ty = place_ty.projection_ty(tcx, elem);
+ if place_ty.ty.is_union() {
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/compiler/rustc_mir_transform/src/required_consts.rs b/compiler/rustc_mir_transform/src/required_consts.rs
new file mode 100644
index 000000000..827ce0c02
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/required_consts.rs
@@ -0,0 +1,22 @@
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::{Constant, Location};
+use rustc_middle::ty::ConstKind;
+
+pub struct RequiredConstsVisitor<'a, 'tcx> {
+ required_consts: &'a mut Vec<Constant<'tcx>>,
+}
+
+impl<'a, 'tcx> RequiredConstsVisitor<'a, 'tcx> {
+ pub fn new(required_consts: &'a mut Vec<Constant<'tcx>>) -> Self {
+ RequiredConstsVisitor { required_consts }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for RequiredConstsVisitor<'_, 'tcx> {
+ fn visit_constant(&mut self, constant: &Constant<'tcx>, _: Location) {
+ let literal = constant.literal;
+ if let Some(ct) = literal.const_for_ty() && let ConstKind::Unevaluated(_) = ct.kind() {
+ self.required_consts.push(*constant);
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs
new file mode 100644
index 000000000..4919ad400
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/reveal_all.rs
@@ -0,0 +1,44 @@
+//! Normalizes MIR in RevealAll mode.
+
+use crate::MirPass;
+use rustc_middle::mir::visit::*;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+pub struct RevealAll;
+
+impl<'tcx> MirPass<'tcx> for RevealAll {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 3 || super::inline::Inline.is_enabled(sess)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // Do not apply this transformation to generators.
+ if body.generator.is_some() {
+ return;
+ }
+
+ let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id());
+ RevealAllVisitor { tcx, param_env }.visit_body(body);
+ }
+}
+
+struct RevealAllVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ #[inline]
+ fn visit_ty(&mut self, ty: &mut Ty<'tcx>, _: TyContext) {
+ // We have to use `try_normalize_erasing_regions` here, since it's
+ // possible that we visit impossible-to-satisfy where clauses here,
+ // see #91745
+ *ty = self.tcx.try_normalize_erasing_regions(self.param_env, *ty).unwrap_or(*ty);
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/separate_const_switch.rs b/compiler/rustc_mir_transform/src/separate_const_switch.rs
new file mode 100644
index 000000000..925eb10a1
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/separate_const_switch.rs
@@ -0,0 +1,341 @@
+//! A pass that duplicates switch-terminated blocks
+//! into a new copy for each predecessor, provided
+//! the predecessor sets the value being switched
+//! over to a constant.
+//!
+//! The purpose of this pass is to help constant
+//! propagation passes to simplify the switch terminator
+//! of the copied blocks into gotos when some predecessors
+//! statically determine the output of switches.
+//!
+//! ```text
+//! x = 12 --- ---> something
+//! \ / 12
+//! --> switch x
+//! / \ otherwise
+//! x = y --- ---> something else
+//! ```
+//! becomes
+//! ```text
+//! x = 12 ---> switch x ------> something
+//! \ / 12
+//! X
+//! / \ otherwise
+//! x = y ---> switch x ------> something else
+//! ```
+//! so it can hopefully later be turned by another pass into
+//! ```text
+//! x = 12 --------------------> something
+//! / 12
+//! /
+//! / otherwise
+//! x = y ---- switch x ------> something else
+//! ```
+//!
+//! This optimization is meant to cover simple cases
+//! like `?` desugaring. For now, it thus focuses on
+//! simplicity rather than completeness (it notably
+//! sometimes duplicates abusively).
+
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use smallvec::SmallVec;
+
+pub struct SeparateConstSwitch;
+
+impl<'tcx> MirPass<'tcx> for SeparateConstSwitch {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() >= 4
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // If execution did something, applying a simplification layer
+ // helps later passes optimize the copy away.
+ if separate_const_switch(body) > 0 {
+ super::simplify::simplify_cfg(tcx, body);
+ }
+ }
+}
+
+/// Returns the amount of blocks that were duplicated
+pub fn separate_const_switch(body: &mut Body<'_>) -> usize {
+ let mut new_blocks: SmallVec<[(BasicBlock, BasicBlock); 6]> = SmallVec::new();
+ let predecessors = body.basic_blocks.predecessors();
+ 'block_iter: for (block_id, block) in body.basic_blocks().iter_enumerated() {
+ if let TerminatorKind::SwitchInt {
+ discr: Operand::Copy(switch_place) | Operand::Move(switch_place),
+ ..
+ } = block.terminator().kind
+ {
+ // If the block is on an unwind path, do not
+ // apply the optimization as unwind paths
+ // rely on a unique parent invariant
+ if block.is_cleanup {
+ continue 'block_iter;
+ }
+
+ // If the block has fewer than 2 predecessors, ignore it
+ // we could maybe chain blocks that have exactly one
+ // predecessor, but for now we ignore that
+ if predecessors[block_id].len() < 2 {
+ continue 'block_iter;
+ }
+
+ // First, let's find a non-const place
+ // that determines the result of the switch
+ if let Some(switch_place) = find_determining_place(switch_place, block) {
+ // We now have an input place for which it would
+ // be interesting if predecessors assigned it from a const
+
+ let mut predecessors_left = predecessors[block_id].len();
+ 'predec_iter: for predecessor_id in predecessors[block_id].iter().copied() {
+ let predecessor = &body.basic_blocks()[predecessor_id];
+
+ // First we make sure the predecessor jumps
+ // in a reasonable way
+ match &predecessor.terminator().kind {
+ // The following terminators are
+ // unconditionally valid
+ TerminatorKind::Goto { .. } | TerminatorKind::SwitchInt { .. } => {}
+
+ TerminatorKind::FalseEdge { real_target, .. } => {
+ if *real_target != block_id {
+ continue 'predec_iter;
+ }
+ }
+
+ // The following terminators are not allowed
+ TerminatorKind::Resume
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::InlineAsm { .. }
+ | TerminatorKind::GeneratorDrop => {
+ continue 'predec_iter;
+ }
+ }
+
+ if is_likely_const(switch_place, predecessor) {
+ new_blocks.push((predecessor_id, block_id));
+ predecessors_left -= 1;
+ if predecessors_left < 2 {
+ // If the original block only has one predecessor left,
+ // we have nothing left to do
+ break 'predec_iter;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Once the analysis is done, perform the duplication
+ let body_span = body.span;
+ let copied_blocks = new_blocks.len();
+ let blocks = body.basic_blocks_mut();
+ for (pred_id, target_id) in new_blocks {
+ let new_block = blocks[target_id].clone();
+ let new_block_id = blocks.push(new_block);
+ let terminator = blocks[pred_id].terminator_mut();
+
+ match terminator.kind {
+ TerminatorKind::Goto { ref mut target } => {
+ *target = new_block_id;
+ }
+
+ TerminatorKind::FalseEdge { ref mut real_target, .. } => {
+ if *real_target == target_id {
+ *real_target = new_block_id;
+ }
+ }
+
+ TerminatorKind::SwitchInt { ref mut targets, .. } => {
+ targets.all_targets_mut().iter_mut().for_each(|x| {
+ if *x == target_id {
+ *x = new_block_id;
+ }
+ });
+ }
+
+ TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Assert { .. }
+ | TerminatorKind::DropAndReplace { .. }
+ | TerminatorKind::FalseUnwind { .. }
+ | TerminatorKind::Drop { .. }
+ | TerminatorKind::Call { .. }
+ | TerminatorKind::InlineAsm { .. }
+ | TerminatorKind::Yield { .. } => {
+ span_bug!(
+ body_span,
+ "basic block terminator had unexpected kind {:?}",
+ &terminator.kind
+ )
+ }
+ }
+ }
+
+ copied_blocks
+}
+
+/// This function describes a rough heuristic guessing
+/// whether a place is last set with a const within the block.
+/// Notably, it will be overly pessimistic in cases that are already
+/// not handled by `separate_const_switch`.
+fn is_likely_const<'tcx>(mut tracked_place: Place<'tcx>, block: &BasicBlockData<'tcx>) -> bool {
+ for statement in block.statements.iter().rev() {
+ match &statement.kind {
+ StatementKind::Assign(assign) => {
+ if assign.0 == tracked_place {
+ match assign.1 {
+ // These rvalues are definitely constant
+ Rvalue::Use(Operand::Constant(_))
+ | Rvalue::Ref(_, _, _)
+ | Rvalue::AddressOf(_, _)
+ | Rvalue::Cast(_, Operand::Constant(_), _)
+ | Rvalue::NullaryOp(_, _)
+ | Rvalue::ShallowInitBox(_, _)
+ | Rvalue::UnaryOp(_, Operand::Constant(_)) => return true,
+
+ // These rvalues make things ambiguous
+ Rvalue::Repeat(_, _)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::Len(_)
+ | Rvalue::BinaryOp(_, _)
+ | Rvalue::CheckedBinaryOp(_, _)
+ | Rvalue::Aggregate(_, _) => return false,
+
+ // These rvalues move the place to track
+ Rvalue::Cast(_, Operand::Copy(place) | Operand::Move(place), _)
+ | Rvalue::Use(Operand::Copy(place) | Operand::Move(place))
+ | Rvalue::CopyForDeref(place)
+ | Rvalue::UnaryOp(_, Operand::Copy(place) | Operand::Move(place))
+ | Rvalue::Discriminant(place) => tracked_place = place,
+ }
+ }
+ }
+
+ // If the discriminant is set, it is always set
+ // as a constant, so the job is done.
+ // As we are **ignoring projections**, if the place
+ // we are tracking sees its discriminant be set,
+ // that means we had to be tracking the discriminant
+ // specifically (as it is impossible to switch over
+ // an enum directly, and if we were switching over
+ // its content, we would have had to at least cast it to
+ // some variant first)
+ StatementKind::SetDiscriminant { place, .. } => {
+ if **place == tracked_place {
+ return true;
+ }
+ }
+
+ // These statements have no influence on the place
+ // we are interested in
+ StatementKind::FakeRead(_)
+ | StatementKind::Deinit(_)
+ | StatementKind::StorageLive(_)
+ | StatementKind::Retag(_, _)
+ | StatementKind::AscribeUserType(_, _)
+ | StatementKind::Coverage(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::CopyNonOverlapping(_)
+ | StatementKind::Nop => {}
+ }
+ }
+
+ // If no good reason for the place to be const is found,
+ // give up. We could maybe go up predecessors, but in
+ // most cases giving up now should be sufficient.
+ false
+}
+
+/// Finds a unique place that entirely determines the value
+/// of `switch_place`, if it exists. This is only a heuristic.
+/// Ideally we would like to track multiple determining places
+/// for some edge cases, but one is enough for a lot of situations.
+fn find_determining_place<'tcx>(
+ mut switch_place: Place<'tcx>,
+ block: &BasicBlockData<'tcx>,
+) -> Option<Place<'tcx>> {
+ for statement in block.statements.iter().rev() {
+ match &statement.kind {
+ StatementKind::Assign(op) => {
+ if op.0 != switch_place {
+ continue;
+ }
+
+ match op.1 {
+ // The following rvalues move the place
+ // that may be const in the predecessor
+ Rvalue::Use(Operand::Move(new) | Operand::Copy(new))
+ | Rvalue::UnaryOp(_, Operand::Copy(new) | Operand::Move(new))
+ | Rvalue::CopyForDeref(new)
+ | Rvalue::Cast(_, Operand::Move(new) | Operand::Copy(new), _)
+ | Rvalue::Repeat(Operand::Move(new) | Operand::Copy(new), _)
+ | Rvalue::Discriminant(new)
+ => switch_place = new,
+
+ // The following rvalues might still make the block
+ // be valid but for now we reject them
+ Rvalue::Len(_)
+ | Rvalue::Ref(_, _, _)
+ | Rvalue::BinaryOp(_, _)
+ | Rvalue::CheckedBinaryOp(_, _)
+ | Rvalue::Aggregate(_, _)
+
+ // The following rvalues definitely mean we cannot
+ // or should not apply this optimization
+ | Rvalue::Use(Operand::Constant(_))
+ | Rvalue::Repeat(Operand::Constant(_), _)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::AddressOf(_, _)
+ | Rvalue::NullaryOp(_, _)
+ | Rvalue::ShallowInitBox(_, _)
+ | Rvalue::UnaryOp(_, Operand::Constant(_))
+ | Rvalue::Cast(_, Operand::Constant(_), _)
+ => return None,
+ }
+ }
+
+ // These statements have no influence on the place
+ // we are interested in
+ StatementKind::FakeRead(_)
+ | StatementKind::Deinit(_)
+ | StatementKind::StorageLive(_)
+ | StatementKind::StorageDead(_)
+ | StatementKind::Retag(_, _)
+ | StatementKind::AscribeUserType(_, _)
+ | StatementKind::Coverage(_)
+ | StatementKind::CopyNonOverlapping(_)
+ | StatementKind::Nop => {}
+
+ // If the discriminant is set, it is always set
+ // as a constant, so the job is already done.
+ // As we are **ignoring projections**, if the place
+ // we are tracking sees its discriminant be set,
+ // that means we had to be tracking the discriminant
+ // specifically (as it is impossible to switch over
+ // an enum directly, and if we were switching over
+ // its content, we would have had to at least cast it to
+ // some variant first)
+ StatementKind::SetDiscriminant { place, .. } => {
+ if **place == switch_place {
+ return None;
+ }
+ }
+ }
+ }
+
+ Some(switch_place)
+}
diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs
new file mode 100644
index 000000000..3620e94be
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/shim.rs
@@ -0,0 +1,790 @@
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir::*;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::{self, EarlyBinder, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+
+use rustc_index::vec::{Idx, IndexVec};
+
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
+
+use std::fmt;
+use std::iter;
+
+use crate::util::expand_aggregate;
+use crate::{
+ abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, marker, pass_manager as pm,
+ remove_noop_landing_pads, simplify,
+};
+use rustc_middle::mir::patch::MirPatch;
+use rustc_mir_dataflow::elaborate_drops::{self, DropElaborator, DropFlagMode, DropStyle};
+
+pub fn provide(providers: &mut Providers) {
+ providers.mir_shims = make_shim;
+}
+
+fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> Body<'tcx> {
+ debug!("make_shim({:?})", instance);
+
+ let mut result = match instance {
+ ty::InstanceDef::Item(..) => bug!("item {:?} passed to make_shim", instance),
+ ty::InstanceDef::VTableShim(def_id) => {
+ build_call_shim(tcx, instance, Some(Adjustment::Deref), CallKind::Direct(def_id))
+ }
+ ty::InstanceDef::FnPtrShim(def_id, ty) => {
+ let trait_ = tcx.trait_of_item(def_id).unwrap();
+ let adjustment = match tcx.fn_trait_kind_from_lang_item(trait_) {
+ Some(ty::ClosureKind::FnOnce) => Adjustment::Identity,
+ Some(ty::ClosureKind::FnMut | ty::ClosureKind::Fn) => Adjustment::Deref,
+ None => bug!("fn pointer {:?} is not an fn", ty),
+ };
+
+ build_call_shim(tcx, instance, Some(adjustment), CallKind::Indirect(ty))
+ }
+ // We are generating a call back to our def-id, which the
+ // codegen backend knows to turn to an actual call, be it
+ // a virtual call, or a direct call to a function for which
+ // indirect calls must be codegen'd differently than direct ones
+ // (such as `#[track_caller]`).
+ ty::InstanceDef::ReifyShim(def_id) => {
+ build_call_shim(tcx, instance, None, CallKind::Direct(def_id))
+ }
+ ty::InstanceDef::ClosureOnceShim { call_once: _, track_caller: _ } => {
+ let fn_mut = tcx.require_lang_item(LangItem::FnMut, None);
+ let call_mut = tcx
+ .associated_items(fn_mut)
+ .in_definition_order()
+ .find(|it| it.kind == ty::AssocKind::Fn)
+ .unwrap()
+ .def_id;
+
+ build_call_shim(tcx, instance, Some(Adjustment::RefMut), CallKind::Direct(call_mut))
+ }
+
+ ty::InstanceDef::DropGlue(def_id, ty) => {
+ // FIXME(#91576): Drop shims for generators aren't subject to the MIR passes at the end
+ // of this function. Is this intentional?
+ if let Some(ty::Generator(gen_def_id, substs, _)) = ty.map(Ty::kind) {
+ let body = tcx.optimized_mir(*gen_def_id).generator_drop().unwrap();
+ let body = EarlyBinder(body.clone()).subst(tcx, substs);
+ debug!("make_shim({:?}) = {:?}", instance, body);
+ return body;
+ }
+
+ build_drop_shim(tcx, def_id, ty)
+ }
+ ty::InstanceDef::CloneShim(def_id, ty) => build_clone_shim(tcx, def_id, ty),
+ ty::InstanceDef::Virtual(..) => {
+ bug!("InstanceDef::Virtual ({:?}) is for direct calls only", instance)
+ }
+ ty::InstanceDef::Intrinsic(_) => {
+ bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
+ }
+ };
+ debug!("make_shim({:?}) = untransformed {:?}", instance, result);
+
+ pm::run_passes(
+ tcx,
+ &mut result,
+ &[
+ &add_moves_for_packed_drops::AddMovesForPackedDrops,
+ &remove_noop_landing_pads::RemoveNoopLandingPads,
+ &simplify::SimplifyCfg::new("make_shim"),
+ &add_call_guards::CriticalCallEdges,
+ &abort_unwinding_calls::AbortUnwindingCalls,
+ &marker::PhaseChange(MirPhase::Const),
+ ],
+ );
+
+ debug!("make_shim({:?}) = {:?}", instance, result);
+
+ result
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum Adjustment {
+ /// Pass the receiver as-is.
+ Identity,
+
+ /// We get passed `&[mut] self` and call the target with `*self`.
+ ///
+ /// This either copies `self` (if `Self: Copy`, eg. for function items), or moves out of it
+ /// (for `VTableShim`, which effectively is passed `&own Self`).
+ Deref,
+
+ /// We get passed `self: Self` and call the target with `&mut self`.
+ ///
+ /// In this case we need to ensure that the `Self` is dropped after the call, as the callee
+ /// won't do it for us.
+ RefMut,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum CallKind<'tcx> {
+ /// Call the `FnPtr` that was passed as the receiver.
+ Indirect(Ty<'tcx>),
+
+ /// Call a known `FnDef`.
+ Direct(DefId),
+}
+
+fn local_decls_for_sig<'tcx>(
+ sig: &ty::FnSig<'tcx>,
+ span: Span,
+) -> IndexVec<Local, LocalDecl<'tcx>> {
+ iter::once(LocalDecl::new(sig.output(), span))
+ .chain(sig.inputs().iter().map(|ity| LocalDecl::new(*ity, span).immutable()))
+ .collect()
+}
+
+fn build_drop_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, ty: Option<Ty<'tcx>>) -> Body<'tcx> {
+ debug!("build_drop_shim(def_id={:?}, ty={:?})", def_id, ty);
+
+ assert!(!matches!(ty, Some(ty) if ty.is_generator()));
+
+ let substs = if let Some(ty) = ty {
+ tcx.intern_substs(&[ty.into()])
+ } else {
+ InternalSubsts::identity_for_item(tcx, def_id)
+ };
+ let sig = tcx.bound_fn_sig(def_id).subst(tcx, substs);
+ let sig = tcx.erase_late_bound_regions(sig);
+ let span = tcx.def_span(def_id);
+
+ let source_info = SourceInfo::outermost(span);
+
+ let return_block = BasicBlock::new(1);
+ let mut blocks = IndexVec::with_capacity(2);
+ let block = |blocks: &mut IndexVec<_, _>, kind| {
+ blocks.push(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator { source_info, kind }),
+ is_cleanup: false,
+ })
+ };
+ block(&mut blocks, TerminatorKind::Goto { target: return_block });
+ block(&mut blocks, TerminatorKind::Return);
+
+ let source = MirSource::from_instance(ty::InstanceDef::DropGlue(def_id, ty));
+ let mut body =
+ new_body(source, blocks, local_decls_for_sig(&sig, span), sig.inputs().len(), span);
+
+ if ty.is_some() {
+ // The first argument (index 0), but add 1 for the return value.
+ let dropee_ptr = Place::from(Local::new(1 + 0));
+ if tcx.sess.opts.unstable_opts.mir_emit_retag {
+ // Function arguments should be retagged, and we make this one raw.
+ body.basic_blocks_mut()[START_BLOCK].statements.insert(
+ 0,
+ Statement {
+ source_info,
+ kind: StatementKind::Retag(RetagKind::Raw, Box::new(dropee_ptr)),
+ },
+ );
+ }
+ let patch = {
+ let param_env = tcx.param_env_reveal_all_normalized(def_id);
+ let mut elaborator =
+ DropShimElaborator { body: &body, patch: MirPatch::new(&body), tcx, param_env };
+ let dropee = tcx.mk_place_deref(dropee_ptr);
+ let resume_block = elaborator.patch.resume_block();
+ elaborate_drops::elaborate_drop(
+ &mut elaborator,
+ source_info,
+ dropee,
+ (),
+ return_block,
+ elaborate_drops::Unwind::To(resume_block),
+ START_BLOCK,
+ );
+ elaborator.patch
+ };
+ patch.apply(&mut body);
+ }
+
+ body
+}
+
+fn new_body<'tcx>(
+ source: MirSource<'tcx>,
+ basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+ arg_count: usize,
+ span: Span,
+) -> Body<'tcx> {
+ Body::new(
+ source,
+ basic_blocks,
+ IndexVec::from_elem_n(
+ SourceScopeData {
+ span,
+ parent_scope: None,
+ inlined: None,
+ inlined_parent_scope: None,
+ local_data: ClearCrossCrate::Clear,
+ },
+ 1,
+ ),
+ local_decls,
+ IndexVec::new(),
+ arg_count,
+ vec![],
+ span,
+ None,
+ // FIXME(compiler-errors): is this correct?
+ None,
+ )
+}
+
+pub struct DropShimElaborator<'a, 'tcx> {
+ pub body: &'a Body<'tcx>,
+ pub patch: MirPatch<'tcx>,
+ pub tcx: TyCtxt<'tcx>,
+ pub param_env: ty::ParamEnv<'tcx>,
+}
+
+impl fmt::Debug for DropShimElaborator<'_, '_> {
+ fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
+ Ok(())
+ }
+}
+
+impl<'a, 'tcx> DropElaborator<'a, 'tcx> for DropShimElaborator<'a, 'tcx> {
+ type Path = ();
+
+ fn patch(&mut self) -> &mut MirPatch<'tcx> {
+ &mut self.patch
+ }
+ fn body(&self) -> &'a Body<'tcx> {
+ self.body
+ }
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+
+ fn drop_style(&self, _path: Self::Path, mode: DropFlagMode) -> DropStyle {
+ match mode {
+ DropFlagMode::Shallow => {
+ // Drops for the contained fields are "shallow" and "static" - they will simply call
+ // the field's own drop glue.
+ DropStyle::Static
+ }
+ DropFlagMode::Deep => {
+ // The top-level drop is "deep" and "open" - it will be elaborated to a drop ladder
+ // dropping each field contained in the value.
+ DropStyle::Open
+ }
+ }
+ }
+
+ fn get_drop_flag(&mut self, _path: Self::Path) -> Option<Operand<'tcx>> {
+ None
+ }
+
+ fn clear_drop_flag(&mut self, _location: Location, _path: Self::Path, _mode: DropFlagMode) {}
+
+ fn field_subpath(&self, _path: Self::Path, _field: Field) -> Option<Self::Path> {
+ None
+ }
+ fn deref_subpath(&self, _path: Self::Path) -> Option<Self::Path> {
+ None
+ }
+ fn downcast_subpath(&self, _path: Self::Path, _variant: VariantIdx) -> Option<Self::Path> {
+ Some(())
+ }
+ fn array_subpath(&self, _path: Self::Path, _index: u64, _size: u64) -> Option<Self::Path> {
+ None
+ }
+}
+
+/// Builds a `Clone::clone` shim for `self_ty`. Here, `def_id` is `Clone::clone`.
+fn build_clone_shim<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Body<'tcx> {
+ debug!("build_clone_shim(def_id={:?})", def_id);
+
+ let param_env = tcx.param_env(def_id);
+
+ let mut builder = CloneShimBuilder::new(tcx, def_id, self_ty);
+ let is_copy = self_ty.is_copy_modulo_regions(tcx.at(builder.span), param_env);
+
+ let dest = Place::return_place();
+ let src = tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
+
+ match self_ty.kind() {
+ _ if is_copy => builder.copy_shim(),
+ ty::Closure(_, substs) => {
+ builder.tuple_like_shim(dest, src, substs.as_closure().upvar_tys())
+ }
+ ty::Tuple(..) => builder.tuple_like_shim(dest, src, self_ty.tuple_fields()),
+ _ => bug!("clone shim for `{:?}` which is not `Copy` and is not an aggregate", self_ty),
+ };
+
+ builder.into_mir()
+}
+
+struct CloneShimBuilder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ local_decls: IndexVec<Local, LocalDecl<'tcx>>,
+ blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ span: Span,
+ sig: ty::FnSig<'tcx>,
+}
+
+impl<'tcx> CloneShimBuilder<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, def_id: DefId, self_ty: Ty<'tcx>) -> Self {
+ // we must subst the self_ty because it's
+ // otherwise going to be TySelf and we can't index
+ // or access fields of a Place of type TySelf.
+ let substs = tcx.mk_substs_trait(self_ty, &[]);
+ let sig = tcx.bound_fn_sig(def_id).subst(tcx, substs);
+ let sig = tcx.erase_late_bound_regions(sig);
+ let span = tcx.def_span(def_id);
+
+ CloneShimBuilder {
+ tcx,
+ def_id,
+ local_decls: local_decls_for_sig(&sig, span),
+ blocks: IndexVec::new(),
+ span,
+ sig,
+ }
+ }
+
+ fn into_mir(self) -> Body<'tcx> {
+ let source = MirSource::from_instance(ty::InstanceDef::CloneShim(
+ self.def_id,
+ self.sig.inputs_and_output[0],
+ ));
+ new_body(source, self.blocks, self.local_decls, self.sig.inputs().len(), self.span)
+ }
+
+ fn source_info(&self) -> SourceInfo {
+ SourceInfo::outermost(self.span)
+ }
+
+ fn block(
+ &mut self,
+ statements: Vec<Statement<'tcx>>,
+ kind: TerminatorKind<'tcx>,
+ is_cleanup: bool,
+ ) -> BasicBlock {
+ let source_info = self.source_info();
+ self.blocks.push(BasicBlockData {
+ statements,
+ terminator: Some(Terminator { source_info, kind }),
+ is_cleanup,
+ })
+ }
+
+ /// Gives the index of an upcoming BasicBlock, with an offset.
+ /// offset=0 will give you the index of the next BasicBlock,
+ /// offset=1 will give the index of the next-to-next block,
+ /// offset=-1 will give you the index of the last-created block
+ fn block_index_offset(&mut self, offset: usize) -> BasicBlock {
+ BasicBlock::new(self.blocks.len() + offset)
+ }
+
+ fn make_statement(&self, kind: StatementKind<'tcx>) -> Statement<'tcx> {
+ Statement { source_info: self.source_info(), kind }
+ }
+
+ fn copy_shim(&mut self) {
+ let rcvr = self.tcx.mk_place_deref(Place::from(Local::new(1 + 0)));
+ let ret_statement = self.make_statement(StatementKind::Assign(Box::new((
+ Place::return_place(),
+ Rvalue::Use(Operand::Copy(rcvr)),
+ ))));
+ self.block(vec![ret_statement], TerminatorKind::Return, false);
+ }
+
+ fn make_place(&mut self, mutability: Mutability, ty: Ty<'tcx>) -> Place<'tcx> {
+ let span = self.span;
+ let mut local = LocalDecl::new(ty, span);
+ if mutability == Mutability::Not {
+ local = local.immutable();
+ }
+ Place::from(self.local_decls.push(local))
+ }
+
+ fn make_clone_call(
+ &mut self,
+ dest: Place<'tcx>,
+ src: Place<'tcx>,
+ ty: Ty<'tcx>,
+ next: BasicBlock,
+ cleanup: BasicBlock,
+ ) {
+ let tcx = self.tcx;
+
+ let substs = tcx.mk_substs_trait(ty, &[]);
+
+ // `func == Clone::clone(&ty) -> ty`
+ let func_ty = tcx.mk_fn_def(self.def_id, substs);
+ let func = Operand::Constant(Box::new(Constant {
+ span: self.span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(func_ty),
+ }));
+
+ let ref_loc = self.make_place(
+ Mutability::Not,
+ tcx.mk_ref(tcx.lifetimes.re_erased, ty::TypeAndMut { ty, mutbl: hir::Mutability::Not }),
+ );
+
+ // `let ref_loc: &ty = &src;`
+ let statement = self.make_statement(StatementKind::Assign(Box::new((
+ ref_loc,
+ Rvalue::Ref(tcx.lifetimes.re_erased, BorrowKind::Shared, src),
+ ))));
+
+ // `let loc = Clone::clone(ref_loc);`
+ self.block(
+ vec![statement],
+ TerminatorKind::Call {
+ func,
+ args: vec![Operand::Move(ref_loc)],
+ destination: dest,
+ target: Some(next),
+ cleanup: Some(cleanup),
+ from_hir_call: true,
+ fn_span: self.span,
+ },
+ false,
+ );
+ }
+
+ fn tuple_like_shim<I>(&mut self, dest: Place<'tcx>, src: Place<'tcx>, tys: I)
+ where
+ I: IntoIterator<Item = Ty<'tcx>>,
+ {
+ let mut previous_field = None;
+ for (i, ity) in tys.into_iter().enumerate() {
+ let field = Field::new(i);
+ let src_field = self.tcx.mk_place_field(src, field, ity);
+
+ let dest_field = self.tcx.mk_place_field(dest, field, ity);
+
+ // #(2i + 1) is the cleanup block for the previous clone operation
+ let cleanup_block = self.block_index_offset(1);
+ // #(2i + 2) is the next cloning block
+ // (or the Return terminator if this is the last block)
+ let next_block = self.block_index_offset(2);
+
+ // BB #(2i)
+ // `dest.i = Clone::clone(&src.i);`
+ // Goto #(2i + 2) if ok, #(2i + 1) if unwinding happens.
+ self.make_clone_call(dest_field, src_field, ity, next_block, cleanup_block);
+
+ // BB #(2i + 1) (cleanup)
+ if let Some((previous_field, previous_cleanup)) = previous_field.take() {
+ // Drop previous field and goto previous cleanup block.
+ self.block(
+ vec![],
+ TerminatorKind::Drop {
+ place: previous_field,
+ target: previous_cleanup,
+ unwind: None,
+ },
+ true,
+ );
+ } else {
+ // Nothing to drop, just resume.
+ self.block(vec![], TerminatorKind::Resume, true);
+ }
+
+ previous_field = Some((dest_field, cleanup_block));
+ }
+
+ self.block(vec![], TerminatorKind::Return, false);
+ }
+}
+
+/// Builds a "call" shim for `instance`. The shim calls the function specified by `call_kind`,
+/// first adjusting its first argument according to `rcvr_adjustment`.
+fn build_call_shim<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: ty::InstanceDef<'tcx>,
+ rcvr_adjustment: Option<Adjustment>,
+ call_kind: CallKind<'tcx>,
+) -> Body<'tcx> {
+ debug!(
+ "build_call_shim(instance={:?}, rcvr_adjustment={:?}, call_kind={:?})",
+ instance, rcvr_adjustment, call_kind
+ );
+
+ // `FnPtrShim` contains the fn pointer type that a call shim is being built for - this is used
+ // to substitute into the signature of the shim. It is not necessary for users of this
+ // MIR body to perform further substitutions (see `InstanceDef::has_polymorphic_mir_body`).
+ let (sig_substs, untuple_args) = if let ty::InstanceDef::FnPtrShim(_, ty) = instance {
+ let sig = tcx.erase_late_bound_regions(ty.fn_sig(tcx));
+
+ let untuple_args = sig.inputs();
+
+ // Create substitutions for the `Self` and `Args` generic parameters of the shim body.
+ let arg_tup = tcx.mk_tup(untuple_args.iter());
+ let sig_substs = tcx.mk_substs_trait(ty, &[ty::subst::GenericArg::from(arg_tup)]);
+
+ (Some(sig_substs), Some(untuple_args))
+ } else {
+ (None, None)
+ };
+
+ let def_id = instance.def_id();
+ let sig = tcx.bound_fn_sig(def_id);
+ let sig = sig.map_bound(|sig| tcx.erase_late_bound_regions(sig));
+
+ assert_eq!(sig_substs.is_some(), !instance.has_polymorphic_mir_body());
+ let mut sig =
+ if let Some(sig_substs) = sig_substs { sig.subst(tcx, sig_substs) } else { sig.0 };
+
+ if let CallKind::Indirect(fnty) = call_kind {
+ // `sig` determines our local decls, and thus the callee type in the `Call` terminator. This
+ // can only be an `FnDef` or `FnPtr`, but currently will be `Self` since the types come from
+ // the implemented `FnX` trait.
+
+ // Apply the opposite adjustment to the MIR input.
+ let mut inputs_and_output = sig.inputs_and_output.to_vec();
+
+ // Initial signature is `fn(&? Self, Args) -> Self::Output` where `Args` is a tuple of the
+ // fn arguments. `Self` may be passed via (im)mutable reference or by-value.
+ assert_eq!(inputs_and_output.len(), 3);
+
+ // `Self` is always the original fn type `ty`. The MIR call terminator is only defined for
+ // `FnDef` and `FnPtr` callees, not the `Self` type param.
+ let self_arg = &mut inputs_and_output[0];
+ *self_arg = match rcvr_adjustment.unwrap() {
+ Adjustment::Identity => fnty,
+ Adjustment::Deref => tcx.mk_imm_ptr(fnty),
+ Adjustment::RefMut => tcx.mk_mut_ptr(fnty),
+ };
+ sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+ }
+
+ // FIXME(eddyb) avoid having this snippet both here and in
+ // `Instance::fn_sig` (introduce `InstanceDef::fn_sig`?).
+ if let ty::InstanceDef::VTableShim(..) = instance {
+ // Modify fn(self, ...) to fn(self: *mut Self, ...)
+ let mut inputs_and_output = sig.inputs_and_output.to_vec();
+ let self_arg = &mut inputs_and_output[0];
+ debug_assert!(tcx.generics_of(def_id).has_self && *self_arg == tcx.types.self_param);
+ *self_arg = tcx.mk_mut_ptr(*self_arg);
+ sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+ }
+
+ let span = tcx.def_span(def_id);
+
+ debug!("build_call_shim: sig={:?}", sig);
+
+ let mut local_decls = local_decls_for_sig(&sig, span);
+ let source_info = SourceInfo::outermost(span);
+
+ let rcvr_place = || {
+ assert!(rcvr_adjustment.is_some());
+ Place::from(Local::new(1 + 0))
+ };
+ let mut statements = vec![];
+
+ let rcvr = rcvr_adjustment.map(|rcvr_adjustment| match rcvr_adjustment {
+ Adjustment::Identity => Operand::Move(rcvr_place()),
+ Adjustment::Deref => Operand::Move(tcx.mk_place_deref(rcvr_place())),
+ Adjustment::RefMut => {
+ // let rcvr = &mut rcvr;
+ let ref_rcvr = local_decls.push(
+ LocalDecl::new(
+ tcx.mk_ref(
+ tcx.lifetimes.re_erased,
+ ty::TypeAndMut { ty: sig.inputs()[0], mutbl: hir::Mutability::Mut },
+ ),
+ span,
+ )
+ .immutable(),
+ );
+ let borrow_kind = BorrowKind::Mut { allow_two_phase_borrow: false };
+ statements.push(Statement {
+ source_info,
+ kind: StatementKind::Assign(Box::new((
+ Place::from(ref_rcvr),
+ Rvalue::Ref(tcx.lifetimes.re_erased, borrow_kind, rcvr_place()),
+ ))),
+ });
+ Operand::Move(Place::from(ref_rcvr))
+ }
+ });
+
+ let (callee, mut args) = match call_kind {
+ // `FnPtr` call has no receiver. Args are untupled below.
+ CallKind::Indirect(_) => (rcvr.unwrap(), vec![]),
+
+ // `FnDef` call with optional receiver.
+ CallKind::Direct(def_id) => {
+ let ty = tcx.type_of(def_id);
+ (
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::zero_sized(ty),
+ })),
+ rcvr.into_iter().collect::<Vec<_>>(),
+ )
+ }
+ };
+
+ let mut arg_range = 0..sig.inputs().len();
+
+ // Take the `self` ("receiver") argument out of the range (it's adjusted above).
+ if rcvr_adjustment.is_some() {
+ arg_range.start += 1;
+ }
+
+ // Take the last argument, if we need to untuple it (handled below).
+ if untuple_args.is_some() {
+ arg_range.end -= 1;
+ }
+
+ // Pass all of the non-special arguments directly.
+ args.extend(arg_range.map(|i| Operand::Move(Place::from(Local::new(1 + i)))));
+
+ // Untuple the last argument, if we have to.
+ if let Some(untuple_args) = untuple_args {
+ let tuple_arg = Local::new(1 + (sig.inputs().len() - 1));
+ args.extend(untuple_args.iter().enumerate().map(|(i, ity)| {
+ Operand::Move(tcx.mk_place_field(Place::from(tuple_arg), Field::new(i), *ity))
+ }));
+ }
+
+ let n_blocks = if let Some(Adjustment::RefMut) = rcvr_adjustment { 5 } else { 2 };
+ let mut blocks = IndexVec::with_capacity(n_blocks);
+ let block = |blocks: &mut IndexVec<_, _>, statements, kind, is_cleanup| {
+ blocks.push(BasicBlockData {
+ statements,
+ terminator: Some(Terminator { source_info, kind }),
+ is_cleanup,
+ })
+ };
+
+ // BB #0
+ block(
+ &mut blocks,
+ statements,
+ TerminatorKind::Call {
+ func: callee,
+ args,
+ destination: Place::return_place(),
+ target: Some(BasicBlock::new(1)),
+ cleanup: if let Some(Adjustment::RefMut) = rcvr_adjustment {
+ Some(BasicBlock::new(3))
+ } else {
+ None
+ },
+ from_hir_call: true,
+ fn_span: span,
+ },
+ false,
+ );
+
+ if let Some(Adjustment::RefMut) = rcvr_adjustment {
+ // BB #1 - drop for Self
+ block(
+ &mut blocks,
+ vec![],
+ TerminatorKind::Drop { place: rcvr_place(), target: BasicBlock::new(2), unwind: None },
+ false,
+ );
+ }
+ // BB #1/#2 - return
+ block(&mut blocks, vec![], TerminatorKind::Return, false);
+ if let Some(Adjustment::RefMut) = rcvr_adjustment {
+ // BB #3 - drop if closure panics
+ block(
+ &mut blocks,
+ vec![],
+ TerminatorKind::Drop { place: rcvr_place(), target: BasicBlock::new(4), unwind: None },
+ true,
+ );
+
+ // BB #4 - resume
+ block(&mut blocks, vec![], TerminatorKind::Resume, true);
+ }
+
+ let mut body =
+ new_body(MirSource::from_instance(instance), blocks, local_decls, sig.inputs().len(), span);
+
+ if let Abi::RustCall = sig.abi {
+ body.spread_arg = Some(Local::new(sig.inputs().len()));
+ }
+
+ body
+}
+
+pub fn build_adt_ctor(tcx: TyCtxt<'_>, ctor_id: DefId) -> Body<'_> {
+ debug_assert!(tcx.is_constructor(ctor_id));
+
+ let param_env = tcx.param_env(ctor_id);
+
+ // Normalize the sig.
+ let sig = tcx.fn_sig(ctor_id).no_bound_vars().expect("LBR in ADT constructor signature");
+ let sig = tcx.normalize_erasing_regions(param_env, sig);
+
+ let ty::Adt(adt_def, substs) = sig.output().kind() else {
+ bug!("unexpected type for ADT ctor {:?}", sig.output());
+ };
+
+ debug!("build_ctor: ctor_id={:?} sig={:?}", ctor_id, sig);
+
+ let span = tcx.def_span(ctor_id);
+
+ let local_decls = local_decls_for_sig(&sig, span);
+
+ let source_info = SourceInfo::outermost(span);
+
+ let variant_index = if adt_def.is_enum() {
+ adt_def.variant_index_with_ctor_id(ctor_id)
+ } else {
+ VariantIdx::new(0)
+ };
+
+ // Generate the following MIR:
+ //
+ // (return as Variant).field0 = arg0;
+ // (return as Variant).field1 = arg1;
+ //
+ // return;
+ debug!("build_ctor: variant_index={:?}", variant_index);
+
+ let statements = expand_aggregate(
+ Place::return_place(),
+ adt_def.variant(variant_index).fields.iter().enumerate().map(|(idx, field_def)| {
+ (Operand::Move(Place::from(Local::new(idx + 1))), field_def.ty(tcx, substs))
+ }),
+ AggregateKind::Adt(adt_def.did(), variant_index, substs, None, None),
+ source_info,
+ tcx,
+ )
+ .collect();
+
+ let start_block = BasicBlockData {
+ statements,
+ terminator: Some(Terminator { source_info, kind: TerminatorKind::Return }),
+ is_cleanup: false,
+ };
+
+ let source = MirSource::item(ctor_id);
+ let body = new_body(
+ source,
+ IndexVec::from_elem_n(start_block, 1),
+ local_decls,
+ sig.inputs().len(),
+ span,
+ );
+
+ rustc_middle::mir::dump_mir(tcx, None, "mir_map", &0, &body, |_, _| Ok(()));
+
+ body
+}
diff --git a/compiler/rustc_mir_transform/src/simplify.rs b/compiler/rustc_mir_transform/src/simplify.rs
new file mode 100644
index 000000000..180f4c7dc
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/simplify.rs
@@ -0,0 +1,590 @@
+//! A number of passes which remove various redundancies in the CFG.
+//!
+//! The `SimplifyCfg` pass gets rid of unnecessary blocks in the CFG, whereas the `SimplifyLocals`
+//! gets rid of all the unnecessary local variable declarations.
+//!
+//! The `SimplifyLocals` pass is kinda expensive and therefore not very suitable to be run often.
+//! Most of the passes should not care or be impacted in meaningful ways due to extra locals
+//! either, so running the pass once, right before codegen, should suffice.
+//!
+//! On the other side of the spectrum, the `SimplifyCfg` pass is considerably cheap to run, thus
+//! one should run it after every pass which may modify CFG in significant ways. This pass must
+//! also be run before any analysis passes because it removes dead blocks, and some of these can be
+//! ill-typed.
+//!
+//! The cause of this typing issue is typeck allowing most blocks whose end is not reachable have
+//! an arbitrary return type, rather than having the usual () return type (as a note, typeck's
+//! notion of reachability is in fact slightly weaker than MIR CFG reachability - see #31617). A
+//! standard example of the situation is:
+//!
+//! ```rust
+//! fn example() {
+//! let _a: char = { return; };
+//! }
+//! ```
+//!
+//! Here the block (`{ return; }`) has the return type `char`, rather than `()`, but the MIR we
+//! naively generate still contains the `_a = ()` write in the unreachable block "after" the
+//! return.
+
+use crate::MirPass;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::visit::{MutVisitor, MutatingUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use smallvec::SmallVec;
+use std::borrow::Cow;
+use std::convert::TryInto;
+
+pub struct SimplifyCfg {
+ label: String,
+}
+
+impl SimplifyCfg {
+ pub fn new(label: &str) -> Self {
+ SimplifyCfg { label: format!("SimplifyCfg-{}", label) }
+ }
+}
+
+pub fn simplify_cfg<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ CfgSimplifier::new(body).simplify();
+ remove_dead_blocks(tcx, body);
+
+ // FIXME: Should probably be moved into some kind of pass manager
+ body.basic_blocks_mut().raw.shrink_to_fit();
+}
+
+impl<'tcx> MirPass<'tcx> for SimplifyCfg {
+ fn name(&self) -> Cow<'_, str> {
+ Cow::Borrowed(&self.label)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ debug!("SimplifyCfg({:?}) - simplifying {:?}", self.label, body.source);
+ simplify_cfg(tcx, body);
+ }
+}
+
+pub struct CfgSimplifier<'a, 'tcx> {
+ basic_blocks: &'a mut IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ pred_count: IndexVec<BasicBlock, u32>,
+}
+
+impl<'a, 'tcx> CfgSimplifier<'a, 'tcx> {
+ pub fn new(body: &'a mut Body<'tcx>) -> Self {
+ let mut pred_count = IndexVec::from_elem(0u32, body.basic_blocks());
+
+ // we can't use mir.predecessors() here because that counts
+ // dead blocks, which we don't want to.
+ pred_count[START_BLOCK] = 1;
+
+ for (_, data) in traversal::preorder(body) {
+ if let Some(ref term) = data.terminator {
+ for tgt in term.successors() {
+ pred_count[tgt] += 1;
+ }
+ }
+ }
+
+ let basic_blocks = body.basic_blocks_mut();
+
+ CfgSimplifier { basic_blocks, pred_count }
+ }
+
+ pub fn simplify(mut self) {
+ self.strip_nops();
+
+ // Vec of the blocks that should be merged. We store the indices here, instead of the
+ // statements itself to avoid moving the (relatively) large statements twice.
+ // We do not push the statements directly into the target block (`bb`) as that is slower
+ // due to additional reallocations
+ let mut merged_blocks = Vec::new();
+ loop {
+ let mut changed = false;
+
+ for bb in self.basic_blocks.indices() {
+ if self.pred_count[bb] == 0 {
+ continue;
+ }
+
+ debug!("simplifying {:?}", bb);
+
+ let mut terminator =
+ self.basic_blocks[bb].terminator.take().expect("invalid terminator state");
+
+ for successor in terminator.successors_mut() {
+ self.collapse_goto_chain(successor, &mut changed);
+ }
+
+ let mut inner_changed = true;
+ merged_blocks.clear();
+ while inner_changed {
+ inner_changed = false;
+ inner_changed |= self.simplify_branch(&mut terminator);
+ inner_changed |= self.merge_successor(&mut merged_blocks, &mut terminator);
+ changed |= inner_changed;
+ }
+
+ let statements_to_merge =
+ merged_blocks.iter().map(|&i| self.basic_blocks[i].statements.len()).sum();
+
+ if statements_to_merge > 0 {
+ let mut statements = std::mem::take(&mut self.basic_blocks[bb].statements);
+ statements.reserve(statements_to_merge);
+ for &from in &merged_blocks {
+ statements.append(&mut self.basic_blocks[from].statements);
+ }
+ self.basic_blocks[bb].statements = statements;
+ }
+
+ self.basic_blocks[bb].terminator = Some(terminator);
+ }
+
+ if !changed {
+ break;
+ }
+ }
+ }
+
+ /// This function will return `None` if
+ /// * the block has statements
+ /// * the block has a terminator other than `goto`
+ /// * the block has no terminator (meaning some other part of the current optimization stole it)
+ fn take_terminator_if_simple_goto(&mut self, bb: BasicBlock) -> Option<Terminator<'tcx>> {
+ match self.basic_blocks[bb] {
+ BasicBlockData {
+ ref statements,
+ terminator:
+ ref mut terminator @ Some(Terminator { kind: TerminatorKind::Goto { .. }, .. }),
+ ..
+ } if statements.is_empty() => terminator.take(),
+ // if `terminator` is None, this means we are in a loop. In that
+ // case, let all the loop collapse to its entry.
+ _ => None,
+ }
+ }
+
+ /// Collapse a goto chain starting from `start`
+ fn collapse_goto_chain(&mut self, start: &mut BasicBlock, changed: &mut bool) {
+ // Using `SmallVec` here, because in some logs on libcore oli-obk saw many single-element
+ // goto chains. We should probably benchmark different sizes.
+ let mut terminators: SmallVec<[_; 1]> = Default::default();
+ let mut current = *start;
+ while let Some(terminator) = self.take_terminator_if_simple_goto(current) {
+ let Terminator { kind: TerminatorKind::Goto { target }, .. } = terminator else {
+ unreachable!();
+ };
+ terminators.push((current, terminator));
+ current = target;
+ }
+ let last = current;
+ *start = last;
+ while let Some((current, mut terminator)) = terminators.pop() {
+ let Terminator { kind: TerminatorKind::Goto { ref mut target }, .. } = terminator else {
+ unreachable!();
+ };
+ *changed |= *target != last;
+ *target = last;
+ debug!("collapsing goto chain from {:?} to {:?}", current, target);
+
+ if self.pred_count[current] == 1 {
+ // This is the last reference to current, so the pred-count to
+ // to target is moved into the current block.
+ self.pred_count[current] = 0;
+ } else {
+ self.pred_count[*target] += 1;
+ self.pred_count[current] -= 1;
+ }
+ self.basic_blocks[current].terminator = Some(terminator);
+ }
+ }
+
+ // merge a block with 1 `goto` predecessor to its parent
+ fn merge_successor(
+ &mut self,
+ merged_blocks: &mut Vec<BasicBlock>,
+ terminator: &mut Terminator<'tcx>,
+ ) -> bool {
+ let target = match terminator.kind {
+ TerminatorKind::Goto { target } if self.pred_count[target] == 1 => target,
+ _ => return false,
+ };
+
+ debug!("merging block {:?} into {:?}", target, terminator);
+ *terminator = match self.basic_blocks[target].terminator.take() {
+ Some(terminator) => terminator,
+ None => {
+ // unreachable loop - this should not be possible, as we
+ // don't strand blocks, but handle it correctly.
+ return false;
+ }
+ };
+
+ merged_blocks.push(target);
+ self.pred_count[target] = 0;
+
+ true
+ }
+
+ // turn a branch with all successors identical to a goto
+ fn simplify_branch(&mut self, terminator: &mut Terminator<'tcx>) -> bool {
+ match terminator.kind {
+ TerminatorKind::SwitchInt { .. } => {}
+ _ => return false,
+ };
+
+ let first_succ = {
+ if let Some(first_succ) = terminator.successors().next() {
+ if terminator.successors().all(|s| s == first_succ) {
+ let count = terminator.successors().count();
+ self.pred_count[first_succ] -= (count - 1) as u32;
+ first_succ
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+ };
+
+ debug!("simplifying branch {:?}", terminator);
+ terminator.kind = TerminatorKind::Goto { target: first_succ };
+ true
+ }
+
+ fn strip_nops(&mut self) {
+ for blk in self.basic_blocks.iter_mut() {
+ blk.statements.retain(|stmt| !matches!(stmt.kind, StatementKind::Nop))
+ }
+ }
+}
+
+pub fn remove_dead_blocks<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let reachable = traversal::reachable_as_bitset(body);
+ let num_blocks = body.basic_blocks().len();
+ if num_blocks == reachable.count() {
+ return;
+ }
+
+ let basic_blocks = body.basic_blocks.as_mut();
+ let source_scopes = &body.source_scopes;
+ let mut replacements: Vec<_> = (0..num_blocks).map(BasicBlock::new).collect();
+ let mut used_blocks = 0;
+ for alive_index in reachable.iter() {
+ let alive_index = alive_index.index();
+ replacements[alive_index] = BasicBlock::new(used_blocks);
+ if alive_index != used_blocks {
+ // Swap the next alive block data with the current available slot. Since
+ // alive_index is non-decreasing this is a valid operation.
+ basic_blocks.raw.swap(alive_index, used_blocks);
+ }
+ used_blocks += 1;
+ }
+
+ if tcx.sess.instrument_coverage() {
+ save_unreachable_coverage(basic_blocks, source_scopes, used_blocks);
+ }
+
+ basic_blocks.raw.truncate(used_blocks);
+
+ for block in basic_blocks {
+ for target in block.terminator_mut().successors_mut() {
+ *target = replacements[target.index()];
+ }
+ }
+}
+
+/// Some MIR transforms can determine at compile time that a sequences of
+/// statements will never be executed, so they can be dropped from the MIR.
+/// For example, an `if` or `else` block that is guaranteed to never be executed
+/// because its condition can be evaluated at compile time, such as by const
+/// evaluation: `if false { ... }`.
+///
+/// Those statements are bypassed by redirecting paths in the CFG around the
+/// `dead blocks`; but with `-C instrument-coverage`, the dead blocks usually
+/// include `Coverage` statements representing the Rust source code regions to
+/// be counted at runtime. Without these `Coverage` statements, the regions are
+/// lost, and the Rust source code will show no coverage information.
+///
+/// What we want to show in a coverage report is the dead code with coverage
+/// counts of `0`. To do this, we need to save the code regions, by injecting
+/// `Unreachable` coverage statements. These are non-executable statements whose
+/// code regions are still recorded in the coverage map, representing regions
+/// with `0` executions.
+///
+/// If there are no live `Counter` `Coverage` statements remaining, we remove
+/// `Coverage` statements along with the dead blocks. Since at least one
+/// counter per function is required by LLVM (and necessary, to add the
+/// `function_hash` to the counter's call to the LLVM intrinsic
+/// `instrprof.increment()`).
+///
+/// The `generator::StateTransform` MIR pass and MIR inlining can create
+/// atypical conditions, where all live `Counter`s are dropped from the MIR.
+///
+/// With MIR inlining we can have coverage counters belonging to different
+/// instances in a single body, so the strategy described above is applied to
+/// coverage counters from each instance individually.
+fn save_unreachable_coverage(
+ basic_blocks: &mut IndexVec<BasicBlock, BasicBlockData<'_>>,
+ source_scopes: &IndexVec<SourceScope, SourceScopeData<'_>>,
+ first_dead_block: usize,
+) {
+ // Identify instances that still have some live coverage counters left.
+ let mut live = FxHashSet::default();
+ for basic_block in &basic_blocks.raw[0..first_dead_block] {
+ for statement in &basic_block.statements {
+ let StatementKind::Coverage(coverage) = &statement.kind else { continue };
+ let CoverageKind::Counter { .. } = coverage.kind else { continue };
+ let instance = statement.source_info.scope.inlined_instance(source_scopes);
+ live.insert(instance);
+ }
+ }
+
+ for block in &mut basic_blocks.raw[..first_dead_block] {
+ for statement in &mut block.statements {
+ let StatementKind::Coverage(_) = &statement.kind else { continue };
+ let instance = statement.source_info.scope.inlined_instance(source_scopes);
+ if !live.contains(&instance) {
+ statement.make_nop();
+ }
+ }
+ }
+
+ if live.is_empty() {
+ return;
+ }
+
+ // Retain coverage for instances that still have some live counters left.
+ let mut retained_coverage = Vec::new();
+ for dead_block in &basic_blocks.raw[first_dead_block..] {
+ for statement in &dead_block.statements {
+ let StatementKind::Coverage(coverage) = &statement.kind else { continue };
+ let Some(code_region) = &coverage.code_region else { continue };
+ let instance = statement.source_info.scope.inlined_instance(source_scopes);
+ if live.contains(&instance) {
+ retained_coverage.push((statement.source_info, code_region.clone()));
+ }
+ }
+ }
+
+ let start_block = &mut basic_blocks[START_BLOCK];
+ start_block.statements.extend(retained_coverage.into_iter().map(
+ |(source_info, code_region)| Statement {
+ source_info,
+ kind: StatementKind::Coverage(Box::new(Coverage {
+ kind: CoverageKind::Unreachable,
+ code_region: Some(code_region),
+ })),
+ },
+ ));
+}
+
+pub struct SimplifyLocals;
+
+impl<'tcx> MirPass<'tcx> for SimplifyLocals {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ trace!("running SimplifyLocals on {:?}", body.source);
+ simplify_locals(body, tcx);
+ }
+}
+
+pub fn simplify_locals<'tcx>(body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>) {
+ // First, we're going to get a count of *actual* uses for every `Local`.
+ let mut used_locals = UsedLocals::new(body);
+
+ // Next, we're going to remove any `Local` with zero actual uses. When we remove those
+ // `Locals`, we're also going to subtract any uses of other `Locals` from the `used_locals`
+ // count. For example, if we removed `_2 = discriminant(_1)`, then we'll subtract one from
+ // `use_counts[_1]`. That in turn might make `_1` unused, so we loop until we hit a
+ // fixedpoint where there are no more unused locals.
+ remove_unused_definitions(&mut used_locals, body);
+
+ // Finally, we'll actually do the work of shrinking `body.local_decls` and remapping the `Local`s.
+ let map = make_local_map(&mut body.local_decls, &used_locals);
+
+ // Only bother running the `LocalUpdater` if we actually found locals to remove.
+ if map.iter().any(Option::is_none) {
+ // Update references to all vars and tmps now
+ let mut updater = LocalUpdater { map, tcx };
+ updater.visit_body(body);
+
+ body.local_decls.shrink_to_fit();
+ }
+}
+
+/// Construct the mapping while swapping out unused stuff out from the `vec`.
+fn make_local_map<V>(
+ local_decls: &mut IndexVec<Local, V>,
+ used_locals: &UsedLocals,
+) -> IndexVec<Local, Option<Local>> {
+ let mut map: IndexVec<Local, Option<Local>> = IndexVec::from_elem(None, &*local_decls);
+ let mut used = Local::new(0);
+
+ for alive_index in local_decls.indices() {
+ // `is_used` treats the `RETURN_PLACE` and arguments as used.
+ if !used_locals.is_used(alive_index) {
+ continue;
+ }
+
+ map[alive_index] = Some(used);
+ if alive_index != used {
+ local_decls.swap(alive_index, used);
+ }
+ used.increment_by(1);
+ }
+ local_decls.truncate(used.index());
+ map
+}
+
+/// Keeps track of used & unused locals.
+struct UsedLocals {
+ increment: bool,
+ arg_count: u32,
+ use_count: IndexVec<Local, u32>,
+}
+
+impl UsedLocals {
+ /// Determines which locals are used & unused in the given body.
+ fn new(body: &Body<'_>) -> Self {
+ let mut this = Self {
+ increment: true,
+ arg_count: body.arg_count.try_into().unwrap(),
+ use_count: IndexVec::from_elem(0, &body.local_decls),
+ };
+ this.visit_body(body);
+ this
+ }
+
+ /// Checks if local is used.
+ ///
+ /// Return place and arguments are always considered used.
+ fn is_used(&self, local: Local) -> bool {
+ trace!("is_used({:?}): use_count: {:?}", local, self.use_count[local]);
+ local.as_u32() <= self.arg_count || self.use_count[local] != 0
+ }
+
+ /// Updates the use counts to reflect the removal of given statement.
+ fn statement_removed(&mut self, statement: &Statement<'_>) {
+ self.increment = false;
+
+ // The location of the statement is irrelevant.
+ let location = Location { block: START_BLOCK, statement_index: 0 };
+ self.visit_statement(statement, location);
+ }
+
+ /// Visits a left-hand side of an assignment.
+ fn visit_lhs(&mut self, place: &Place<'_>, location: Location) {
+ if place.is_indirect() {
+ // A use, not a definition.
+ self.visit_place(place, PlaceContext::MutatingUse(MutatingUseContext::Store), location);
+ } else {
+ // A definition. The base local itself is not visited, so this occurrence is not counted
+ // toward its use count. There might be other locals still, used in an indexing
+ // projection.
+ self.super_projection(
+ place.as_ref(),
+ PlaceContext::MutatingUse(MutatingUseContext::Projection),
+ location,
+ );
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for UsedLocals {
+ fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
+ match statement.kind {
+ StatementKind::CopyNonOverlapping(..)
+ | StatementKind::Retag(..)
+ | StatementKind::Coverage(..)
+ | StatementKind::FakeRead(..)
+ | StatementKind::AscribeUserType(..) => {
+ self.super_statement(statement, location);
+ }
+
+ StatementKind::Nop => {}
+
+ StatementKind::StorageLive(_local) | StatementKind::StorageDead(_local) => {}
+
+ StatementKind::Assign(box (ref place, ref rvalue)) => {
+ if rvalue.is_safe_to_remove() {
+ self.visit_lhs(place, location);
+ self.visit_rvalue(rvalue, location);
+ } else {
+ self.super_statement(statement, location);
+ }
+ }
+
+ StatementKind::SetDiscriminant { ref place, variant_index: _ }
+ | StatementKind::Deinit(ref place) => {
+ self.visit_lhs(place, location);
+ }
+ }
+ }
+
+ fn visit_local(&mut self, local: Local, _ctx: PlaceContext, _location: Location) {
+ if self.increment {
+ self.use_count[local] += 1;
+ } else {
+ assert_ne!(self.use_count[local], 0);
+ self.use_count[local] -= 1;
+ }
+ }
+}
+
+/// Removes unused definitions. Updates the used locals to reflect the changes made.
+fn remove_unused_definitions(used_locals: &mut UsedLocals, body: &mut Body<'_>) {
+ // The use counts are updated as we remove the statements. A local might become unused
+ // during the retain operation, leading to a temporary inconsistency (storage statements or
+ // definitions referencing the local might remain). For correctness it is crucial that this
+ // computation reaches a fixed point.
+
+ let mut modified = true;
+ while modified {
+ modified = false;
+
+ for data in body.basic_blocks_mut() {
+ // Remove unnecessary StorageLive and StorageDead annotations.
+ data.statements.retain(|statement| {
+ let keep = match &statement.kind {
+ StatementKind::StorageLive(local) | StatementKind::StorageDead(local) => {
+ used_locals.is_used(*local)
+ }
+ StatementKind::Assign(box (place, _)) => used_locals.is_used(place.local),
+
+ StatementKind::SetDiscriminant { ref place, .. }
+ | StatementKind::Deinit(ref place) => used_locals.is_used(place.local),
+ _ => true,
+ };
+
+ if !keep {
+ trace!("removing statement {:?}", statement);
+ modified = true;
+ used_locals.statement_removed(statement);
+ }
+
+ keep
+ });
+ }
+ }
+}
+
+struct LocalUpdater<'tcx> {
+ map: IndexVec<Local, Option<Local>>,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> MutVisitor<'tcx> for LocalUpdater<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn visit_local(&mut self, l: &mut Local, _: PlaceContext, _: Location) {
+ *l = self.map[*l].unwrap();
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/simplify_branches.rs b/compiler/rustc_mir_transform/src/simplify_branches.rs
new file mode 100644
index 000000000..3bbae5b89
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/simplify_branches.rs
@@ -0,0 +1,52 @@
+use crate::MirPass;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+use std::borrow::Cow;
+
+/// A pass that replaces a branch with a goto when its condition is known.
+pub struct SimplifyConstCondition {
+ label: String,
+}
+
+impl SimplifyConstCondition {
+ pub fn new(label: &str) -> Self {
+ SimplifyConstCondition { label: format!("SimplifyConstCondition-{}", label) }
+ }
+}
+
+impl<'tcx> MirPass<'tcx> for SimplifyConstCondition {
+ fn name(&self) -> Cow<'_, str> {
+ Cow::Borrowed(&self.label)
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let param_env = tcx.param_env(body.source.def_id());
+ for block in body.basic_blocks_mut() {
+ let terminator = block.terminator_mut();
+ terminator.kind = match terminator.kind {
+ TerminatorKind::SwitchInt {
+ discr: Operand::Constant(ref c),
+ switch_ty,
+ ref targets,
+ ..
+ } => {
+ let constant = c.literal.try_eval_bits(tcx, param_env, switch_ty);
+ if let Some(constant) = constant {
+ let target = targets.target_for_value(constant);
+ TerminatorKind::Goto { target }
+ } else {
+ continue;
+ }
+ }
+ TerminatorKind::Assert {
+ target, cond: Operand::Constant(ref c), expected, ..
+ } => match c.literal.try_eval_bool(tcx, param_env) {
+ Some(v) if v == expected => TerminatorKind::Goto { target },
+ _ => continue,
+ },
+ _ => continue,
+ };
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
new file mode 100644
index 000000000..bbfaace70
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/simplify_comparison_integral.rs
@@ -0,0 +1,242 @@
+use std::iter;
+
+use super::MirPass;
+use rustc_middle::{
+ mir::{
+ interpret::Scalar, BasicBlock, BinOp, Body, Operand, Place, Rvalue, Statement,
+ StatementKind, SwitchTargets, TerminatorKind,
+ },
+ ty::{Ty, TyCtxt},
+};
+
+/// Pass to convert `if` conditions on integrals into switches on the integral.
+/// For an example, it turns something like
+///
+/// ```ignore (MIR)
+/// _3 = Eq(move _4, const 43i32);
+/// StorageDead(_4);
+/// switchInt(_3) -> [false: bb2, otherwise: bb3];
+/// ```
+///
+/// into:
+///
+/// ```ignore (MIR)
+/// switchInt(_4) -> [43i32: bb3, otherwise: bb2];
+/// ```
+pub struct SimplifyComparisonIntegral;
+
+impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ trace!("Running SimplifyComparisonIntegral on {:?}", body.source);
+
+ let helper = OptimizationFinder { body };
+ let opts = helper.find_optimizations();
+ let mut storage_deads_to_insert = vec![];
+ let mut storage_deads_to_remove: Vec<(usize, BasicBlock)> = vec![];
+ let param_env = tcx.param_env(body.source.def_id());
+ for opt in opts {
+ trace!("SUCCESS: Applying {:?}", opt);
+ // replace terminator with a switchInt that switches on the integer directly
+ let bbs = &mut body.basic_blocks_mut();
+ let bb = &mut bbs[opt.bb_idx];
+ let new_value = match opt.branch_value_scalar {
+ Scalar::Int(int) => {
+ let layout = tcx
+ .layout_of(param_env.and(opt.branch_value_ty))
+ .expect("if we have an evaluated constant we must know the layout");
+ int.assert_bits(layout.size)
+ }
+ Scalar::Ptr(..) => continue,
+ };
+ const FALSE: u128 = 0;
+
+ let mut new_targets = opt.targets;
+ let first_value = new_targets.iter().next().unwrap().0;
+ let first_is_false_target = first_value == FALSE;
+ match opt.op {
+ BinOp::Eq => {
+ // if the assignment was Eq we want the true case to be first
+ if first_is_false_target {
+ new_targets.all_targets_mut().swap(0, 1);
+ }
+ }
+ BinOp::Ne => {
+ // if the assignment was Ne we want the false case to be first
+ if !first_is_false_target {
+ new_targets.all_targets_mut().swap(0, 1);
+ }
+ }
+ _ => unreachable!(),
+ }
+
+ // delete comparison statement if it the value being switched on was moved, which means it can not be user later on
+ if opt.can_remove_bin_op_stmt {
+ bb.statements[opt.bin_op_stmt_idx].make_nop();
+ } else {
+ // if the integer being compared to a const integral is being moved into the comparison,
+ // e.g `_2 = Eq(move _3, const 'x');`
+ // we want to avoid making a double move later on in the switchInt on _3.
+ // So to avoid `switchInt(move _3) -> ['x': bb2, otherwise: bb1];`,
+ // we convert the move in the comparison statement to a copy.
+
+ // unwrap is safe as we know this statement is an assign
+ let (_, rhs) = bb.statements[opt.bin_op_stmt_idx].kind.as_assign_mut().unwrap();
+
+ use Operand::*;
+ match rhs {
+ Rvalue::BinaryOp(_, box (ref mut left @ Move(_), Constant(_))) => {
+ *left = Copy(opt.to_switch_on);
+ }
+ Rvalue::BinaryOp(_, box (Constant(_), ref mut right @ Move(_))) => {
+ *right = Copy(opt.to_switch_on);
+ }
+ _ => (),
+ }
+ }
+
+ let terminator = bb.terminator();
+
+ // remove StorageDead (if it exists) being used in the assign of the comparison
+ for (stmt_idx, stmt) in bb.statements.iter().enumerate() {
+ if !matches!(stmt.kind, StatementKind::StorageDead(local) if local == opt.to_switch_on.local)
+ {
+ continue;
+ }
+ storage_deads_to_remove.push((stmt_idx, opt.bb_idx));
+ // if we have StorageDeads to remove then make sure to insert them at the top of each target
+ for bb_idx in new_targets.all_targets() {
+ storage_deads_to_insert.push((
+ *bb_idx,
+ Statement {
+ source_info: terminator.source_info,
+ kind: StatementKind::StorageDead(opt.to_switch_on.local),
+ },
+ ));
+ }
+ }
+
+ let [bb_cond, bb_otherwise] = match new_targets.all_targets() {
+ [a, b] => [*a, *b],
+ e => bug!("expected 2 switch targets, got: {:?}", e),
+ };
+
+ let targets = SwitchTargets::new(iter::once((new_value, bb_cond)), bb_otherwise);
+
+ let terminator = bb.terminator_mut();
+ terminator.kind = TerminatorKind::SwitchInt {
+ discr: Operand::Move(opt.to_switch_on),
+ switch_ty: opt.branch_value_ty,
+ targets,
+ };
+ }
+
+ for (idx, bb_idx) in storage_deads_to_remove {
+ body.basic_blocks_mut()[bb_idx].statements[idx].make_nop();
+ }
+
+ for (idx, stmt) in storage_deads_to_insert {
+ body.basic_blocks_mut()[idx].statements.insert(0, stmt);
+ }
+ }
+}
+
+struct OptimizationFinder<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+}
+
+impl<'tcx> OptimizationFinder<'_, 'tcx> {
+ fn find_optimizations(&self) -> Vec<OptimizationInfo<'tcx>> {
+ self.body
+ .basic_blocks()
+ .iter_enumerated()
+ .filter_map(|(bb_idx, bb)| {
+ // find switch
+ let (place_switched_on, targets, place_switched_on_moved) =
+ match &bb.terminator().kind {
+ rustc_middle::mir::TerminatorKind::SwitchInt { discr, targets, .. } => {
+ Some((discr.place()?, targets, discr.is_move()))
+ }
+ _ => None,
+ }?;
+
+ // find the statement that assigns the place being switched on
+ bb.statements.iter().enumerate().rev().find_map(|(stmt_idx, stmt)| {
+ match &stmt.kind {
+ rustc_middle::mir::StatementKind::Assign(box (lhs, rhs))
+ if *lhs == place_switched_on =>
+ {
+ match rhs {
+ Rvalue::BinaryOp(
+ op @ (BinOp::Eq | BinOp::Ne),
+ box (left, right),
+ ) => {
+ let (branch_value_scalar, branch_value_ty, to_switch_on) =
+ find_branch_value_info(left, right)?;
+
+ Some(OptimizationInfo {
+ bin_op_stmt_idx: stmt_idx,
+ bb_idx,
+ can_remove_bin_op_stmt: place_switched_on_moved,
+ to_switch_on,
+ branch_value_scalar,
+ branch_value_ty,
+ op: *op,
+ targets: targets.clone(),
+ })
+ }
+ _ => None,
+ }
+ }
+ _ => None,
+ }
+ })
+ })
+ .collect()
+ }
+}
+
+fn find_branch_value_info<'tcx>(
+ left: &Operand<'tcx>,
+ right: &Operand<'tcx>,
+) -> Option<(Scalar, Ty<'tcx>, Place<'tcx>)> {
+ // check that either left or right is a constant.
+ // if any are, we can use the other to switch on, and the constant as a value in a switch
+ use Operand::*;
+ match (left, right) {
+ (Constant(branch_value), Copy(to_switch_on) | Move(to_switch_on))
+ | (Copy(to_switch_on) | Move(to_switch_on), Constant(branch_value)) => {
+ let branch_value_ty = branch_value.literal.ty();
+ // we only want to apply this optimization if we are matching on integrals (and chars), as it is not possible to switch on floats
+ if !branch_value_ty.is_integral() && !branch_value_ty.is_char() {
+ return None;
+ };
+ let branch_value_scalar = branch_value.literal.try_to_scalar()?;
+ Some((branch_value_scalar, branch_value_ty, *to_switch_on))
+ }
+ _ => None,
+ }
+}
+
+#[derive(Debug)]
+struct OptimizationInfo<'tcx> {
+ /// Basic block to apply the optimization
+ bb_idx: BasicBlock,
+ /// Statement index of Eq/Ne assignment that can be removed. None if the assignment can not be removed - i.e the statement is used later on
+ bin_op_stmt_idx: usize,
+ /// Can remove Eq/Ne assignment
+ can_remove_bin_op_stmt: bool,
+ /// Place that needs to be switched on. This place is of type integral
+ to_switch_on: Place<'tcx>,
+ /// Constant to use in switch target value
+ branch_value_scalar: Scalar,
+ /// Type of the constant value
+ branch_value_ty: Ty<'tcx>,
+ /// Either Eq or Ne
+ op: BinOp,
+ /// Current targets used in the switch
+ targets: SwitchTargets,
+}
diff --git a/compiler/rustc_mir_transform/src/simplify_try.rs b/compiler/rustc_mir_transform/src/simplify_try.rs
new file mode 100644
index 000000000..d52f1261b
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/simplify_try.rs
@@ -0,0 +1,822 @@
+//! The general point of the optimizations provided here is to simplify something like:
+//!
+//! ```rust
+//! # fn foo<T, E>(x: Result<T, E>) -> Result<T, E> {
+//! match x {
+//! Ok(x) => Ok(x),
+//! Err(x) => Err(x)
+//! }
+//! # }
+//! ```
+//!
+//! into just `x`.
+
+use crate::{simplify, MirPass};
+use itertools::Itertools as _;
+use rustc_index::{bit_set::BitSet, vec::IndexVec};
+use rustc_middle::mir::visit::{NonUseContext, PlaceContext, Visitor};
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, List, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+use std::iter::{once, Enumerate, Peekable};
+use std::slice::Iter;
+
+/// Simplifies arms of form `Variant(x) => Variant(x)` to just a move.
+///
+/// This is done by transforming basic blocks where the statements match:
+///
+/// ```ignore (MIR)
+/// _LOCAL_TMP = ((_LOCAL_1 as Variant ).FIELD: TY );
+/// _TMP_2 = _LOCAL_TMP;
+/// ((_LOCAL_0 as Variant).FIELD: TY) = move _TMP_2;
+/// discriminant(_LOCAL_0) = VAR_IDX;
+/// ```
+///
+/// into:
+///
+/// ```ignore (MIR)
+/// _LOCAL_0 = move _LOCAL_1
+/// ```
+pub struct SimplifyArmIdentity;
+
+#[derive(Debug)]
+struct ArmIdentityInfo<'tcx> {
+ /// Storage location for the variant's field
+ local_temp_0: Local,
+ /// Storage location holding the variant being read from
+ local_1: Local,
+ /// The variant field being read from
+ vf_s0: VarField<'tcx>,
+ /// Index of the statement which loads the variant being read
+ get_variant_field_stmt: usize,
+
+ /// Tracks each assignment to a temporary of the variant's field
+ field_tmp_assignments: Vec<(Local, Local)>,
+
+ /// Storage location holding the variant's field that was read from
+ local_tmp_s1: Local,
+ /// Storage location holding the enum that we are writing to
+ local_0: Local,
+ /// The variant field being written to
+ vf_s1: VarField<'tcx>,
+
+ /// Storage location that the discriminant is being written to
+ set_discr_local: Local,
+ /// The variant being written
+ set_discr_var_idx: VariantIdx,
+
+ /// Index of the statement that should be overwritten as a move
+ stmt_to_overwrite: usize,
+ /// SourceInfo for the new move
+ source_info: SourceInfo,
+
+ /// Indices of matching Storage{Live,Dead} statements encountered.
+ /// (StorageLive index,, StorageDead index, Local)
+ storage_stmts: Vec<(usize, usize, Local)>,
+
+ /// The statements that should be removed (turned into nops)
+ stmts_to_remove: Vec<usize>,
+
+ /// Indices of debug variables that need to be adjusted to point to
+ // `{local_0}.{dbg_projection}`.
+ dbg_info_to_adjust: Vec<usize>,
+
+ /// The projection used to rewrite debug info.
+ dbg_projection: &'tcx List<PlaceElem<'tcx>>,
+}
+
+fn get_arm_identity_info<'a, 'tcx>(
+ stmts: &'a [Statement<'tcx>],
+ locals_count: usize,
+ debug_info: &'a [VarDebugInfo<'tcx>],
+) -> Option<ArmIdentityInfo<'tcx>> {
+ // This can't possibly match unless there are at least 3 statements in the block
+ // so fail fast on tiny blocks.
+ if stmts.len() < 3 {
+ return None;
+ }
+
+ let mut tmp_assigns = Vec::new();
+ let mut nop_stmts = Vec::new();
+ let mut storage_stmts = Vec::new();
+ let mut storage_live_stmts = Vec::new();
+ let mut storage_dead_stmts = Vec::new();
+
+ type StmtIter<'a, 'tcx> = Peekable<Enumerate<Iter<'a, Statement<'tcx>>>>;
+
+ fn is_storage_stmt(stmt: &Statement<'_>) -> bool {
+ matches!(stmt.kind, StatementKind::StorageLive(_) | StatementKind::StorageDead(_))
+ }
+
+ /// Eats consecutive Statements which match `test`, performing the specified `action` for each.
+ /// The iterator `stmt_iter` is not advanced if none were matched.
+ fn try_eat<'a, 'tcx>(
+ stmt_iter: &mut StmtIter<'a, 'tcx>,
+ test: impl Fn(&'a Statement<'tcx>) -> bool,
+ mut action: impl FnMut(usize, &'a Statement<'tcx>),
+ ) {
+ while stmt_iter.peek().map_or(false, |(_, stmt)| test(stmt)) {
+ let (idx, stmt) = stmt_iter.next().unwrap();
+
+ action(idx, stmt);
+ }
+ }
+
+ /// Eats consecutive `StorageLive` and `StorageDead` Statements.
+ /// The iterator `stmt_iter` is not advanced if none were found.
+ fn try_eat_storage_stmts(
+ stmt_iter: &mut StmtIter<'_, '_>,
+ storage_live_stmts: &mut Vec<(usize, Local)>,
+ storage_dead_stmts: &mut Vec<(usize, Local)>,
+ ) {
+ try_eat(stmt_iter, is_storage_stmt, |idx, stmt| {
+ if let StatementKind::StorageLive(l) = stmt.kind {
+ storage_live_stmts.push((idx, l));
+ } else if let StatementKind::StorageDead(l) = stmt.kind {
+ storage_dead_stmts.push((idx, l));
+ }
+ })
+ }
+
+ fn is_tmp_storage_stmt(stmt: &Statement<'_>) -> bool {
+ use rustc_middle::mir::StatementKind::Assign;
+ if let Assign(box (place, Rvalue::Use(Operand::Copy(p) | Operand::Move(p)))) = &stmt.kind {
+ place.as_local().is_some() && p.as_local().is_some()
+ } else {
+ false
+ }
+ }
+
+ /// Eats consecutive `Assign` Statements.
+ // The iterator `stmt_iter` is not advanced if none were found.
+ fn try_eat_assign_tmp_stmts(
+ stmt_iter: &mut StmtIter<'_, '_>,
+ tmp_assigns: &mut Vec<(Local, Local)>,
+ nop_stmts: &mut Vec<usize>,
+ ) {
+ try_eat(stmt_iter, is_tmp_storage_stmt, |idx, stmt| {
+ use rustc_middle::mir::StatementKind::Assign;
+ if let Assign(box (place, Rvalue::Use(Operand::Copy(p) | Operand::Move(p)))) =
+ &stmt.kind
+ {
+ tmp_assigns.push((place.as_local().unwrap(), p.as_local().unwrap()));
+ nop_stmts.push(idx);
+ }
+ })
+ }
+
+ fn find_storage_live_dead_stmts_for_local(
+ local: Local,
+ stmts: &[Statement<'_>],
+ ) -> Option<(usize, usize)> {
+ trace!("looking for {:?}", local);
+ let mut storage_live_stmt = None;
+ let mut storage_dead_stmt = None;
+ for (idx, stmt) in stmts.iter().enumerate() {
+ if stmt.kind == StatementKind::StorageLive(local) {
+ storage_live_stmt = Some(idx);
+ } else if stmt.kind == StatementKind::StorageDead(local) {
+ storage_dead_stmt = Some(idx);
+ }
+ }
+
+ Some((storage_live_stmt?, storage_dead_stmt.unwrap_or(usize::MAX)))
+ }
+
+ // Try to match the expected MIR structure with the basic block we're processing.
+ // We want to see something that looks like:
+ // ```
+ // (StorageLive(_) | StorageDead(_));*
+ // _LOCAL_INTO = ((_LOCAL_FROM as Variant).FIELD: TY);
+ // (StorageLive(_) | StorageDead(_));*
+ // (tmp_n+1 = tmp_n);*
+ // (StorageLive(_) | StorageDead(_));*
+ // (tmp_n+1 = tmp_n);*
+ // ((LOCAL_FROM as Variant).FIELD: TY) = move tmp;
+ // discriminant(LOCAL_FROM) = VariantIdx;
+ // (StorageLive(_) | StorageDead(_));*
+ // ```
+ let mut stmt_iter = stmts.iter().enumerate().peekable();
+
+ try_eat_storage_stmts(&mut stmt_iter, &mut storage_live_stmts, &mut storage_dead_stmts);
+
+ let (get_variant_field_stmt, stmt) = stmt_iter.next()?;
+ let (local_tmp_s0, local_1, vf_s0, dbg_projection) = match_get_variant_field(stmt)?;
+
+ try_eat_storage_stmts(&mut stmt_iter, &mut storage_live_stmts, &mut storage_dead_stmts);
+
+ try_eat_assign_tmp_stmts(&mut stmt_iter, &mut tmp_assigns, &mut nop_stmts);
+
+ try_eat_storage_stmts(&mut stmt_iter, &mut storage_live_stmts, &mut storage_dead_stmts);
+
+ try_eat_assign_tmp_stmts(&mut stmt_iter, &mut tmp_assigns, &mut nop_stmts);
+
+ let (idx, stmt) = stmt_iter.next()?;
+ let (local_tmp_s1, local_0, vf_s1) = match_set_variant_field(stmt)?;
+ nop_stmts.push(idx);
+
+ let (idx, stmt) = stmt_iter.next()?;
+ let (set_discr_local, set_discr_var_idx) = match_set_discr(stmt)?;
+ let discr_stmt_source_info = stmt.source_info;
+ nop_stmts.push(idx);
+
+ try_eat_storage_stmts(&mut stmt_iter, &mut storage_live_stmts, &mut storage_dead_stmts);
+
+ for (live_idx, live_local) in storage_live_stmts {
+ if let Some(i) = storage_dead_stmts.iter().rposition(|(_, l)| *l == live_local) {
+ let (dead_idx, _) = storage_dead_stmts.swap_remove(i);
+ storage_stmts.push((live_idx, dead_idx, live_local));
+
+ if live_local == local_tmp_s0 {
+ nop_stmts.push(get_variant_field_stmt);
+ }
+ }
+ }
+ // We sort primitive usize here so we can use unstable sort
+ nop_stmts.sort_unstable();
+
+ // Use one of the statements we're going to discard between the point
+ // where the storage location for the variant field becomes live and
+ // is killed.
+ let (live_idx, dead_idx) = find_storage_live_dead_stmts_for_local(local_tmp_s0, stmts)?;
+ let stmt_to_overwrite =
+ nop_stmts.iter().find(|stmt_idx| live_idx < **stmt_idx && **stmt_idx < dead_idx);
+
+ let mut tmp_assigned_vars = BitSet::new_empty(locals_count);
+ for (l, r) in &tmp_assigns {
+ tmp_assigned_vars.insert(*l);
+ tmp_assigned_vars.insert(*r);
+ }
+
+ let dbg_info_to_adjust: Vec<_> = debug_info
+ .iter()
+ .enumerate()
+ .filter_map(|(i, var_info)| {
+ if let VarDebugInfoContents::Place(p) = var_info.value {
+ if tmp_assigned_vars.contains(p.local) {
+ return Some(i);
+ }
+ }
+
+ None
+ })
+ .collect();
+
+ Some(ArmIdentityInfo {
+ local_temp_0: local_tmp_s0,
+ local_1,
+ vf_s0,
+ get_variant_field_stmt,
+ field_tmp_assignments: tmp_assigns,
+ local_tmp_s1,
+ local_0,
+ vf_s1,
+ set_discr_local,
+ set_discr_var_idx,
+ stmt_to_overwrite: *stmt_to_overwrite?,
+ source_info: discr_stmt_source_info,
+ storage_stmts,
+ stmts_to_remove: nop_stmts,
+ dbg_info_to_adjust,
+ dbg_projection,
+ })
+}
+
+fn optimization_applies<'tcx>(
+ opt_info: &ArmIdentityInfo<'tcx>,
+ local_decls: &IndexVec<Local, LocalDecl<'tcx>>,
+ local_uses: &IndexVec<Local, usize>,
+ var_debug_info: &[VarDebugInfo<'tcx>],
+) -> bool {
+ trace!("testing if optimization applies...");
+
+ // FIXME(wesleywiser): possibly relax this restriction?
+ if opt_info.local_0 == opt_info.local_1 {
+ trace!("NO: moving into ourselves");
+ return false;
+ } else if opt_info.vf_s0 != opt_info.vf_s1 {
+ trace!("NO: the field-and-variant information do not match");
+ return false;
+ } else if local_decls[opt_info.local_0].ty != local_decls[opt_info.local_1].ty {
+ // FIXME(Centril,oli-obk): possibly relax to same layout?
+ trace!("NO: source and target locals have different types");
+ return false;
+ } else if (opt_info.local_0, opt_info.vf_s0.var_idx)
+ != (opt_info.set_discr_local, opt_info.set_discr_var_idx)
+ {
+ trace!("NO: the discriminants do not match");
+ return false;
+ }
+
+ // Verify the assignment chain consists of the form b = a; c = b; d = c; etc...
+ if opt_info.field_tmp_assignments.is_empty() {
+ trace!("NO: no assignments found");
+ return false;
+ }
+ let mut last_assigned_to = opt_info.field_tmp_assignments[0].1;
+ let source_local = last_assigned_to;
+ for (l, r) in &opt_info.field_tmp_assignments {
+ if *r != last_assigned_to {
+ trace!("NO: found unexpected assignment {:?} = {:?}", l, r);
+ return false;
+ }
+
+ last_assigned_to = *l;
+ }
+
+ // Check that the first and last used locals are only used twice
+ // since they are of the form:
+ //
+ // ```
+ // _first = ((_x as Variant).n: ty);
+ // _n = _first;
+ // ...
+ // ((_y as Variant).n: ty) = _n;
+ // discriminant(_y) = z;
+ // ```
+ for (l, r) in &opt_info.field_tmp_assignments {
+ if local_uses[*l] != 2 {
+ warn!("NO: FAILED assignment chain local {:?} was used more than twice", l);
+ return false;
+ } else if local_uses[*r] != 2 {
+ warn!("NO: FAILED assignment chain local {:?} was used more than twice", r);
+ return false;
+ }
+ }
+
+ // Check that debug info only points to full Locals and not projections.
+ for dbg_idx in &opt_info.dbg_info_to_adjust {
+ let dbg_info = &var_debug_info[*dbg_idx];
+ if let VarDebugInfoContents::Place(p) = dbg_info.value {
+ if !p.projection.is_empty() {
+ trace!("NO: debug info for {:?} had a projection {:?}", dbg_info.name, p);
+ return false;
+ }
+ }
+ }
+
+ if source_local != opt_info.local_temp_0 {
+ trace!(
+ "NO: start of assignment chain does not match enum variant temp: {:?} != {:?}",
+ source_local,
+ opt_info.local_temp_0
+ );
+ return false;
+ } else if last_assigned_to != opt_info.local_tmp_s1 {
+ trace!(
+ "NO: end of assignment chain does not match written enum temp: {:?} != {:?}",
+ last_assigned_to,
+ opt_info.local_tmp_s1
+ );
+ return false;
+ }
+
+ trace!("SUCCESS: optimization applies!");
+ true
+}
+
+impl<'tcx> MirPass<'tcx> for SimplifyArmIdentity {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // FIXME(77359): This optimization can result in unsoundness.
+ if !tcx.sess.opts.unstable_opts.unsound_mir_opts {
+ return;
+ }
+
+ let source = body.source;
+ trace!("running SimplifyArmIdentity on {:?}", source);
+
+ let local_uses = LocalUseCounter::get_local_uses(body);
+ for bb in body.basic_blocks.as_mut() {
+ if let Some(opt_info) =
+ get_arm_identity_info(&bb.statements, body.local_decls.len(), &body.var_debug_info)
+ {
+ trace!("got opt_info = {:#?}", opt_info);
+ if !optimization_applies(
+ &opt_info,
+ &body.local_decls,
+ &local_uses,
+ &body.var_debug_info,
+ ) {
+ debug!("optimization skipped for {:?}", source);
+ continue;
+ }
+
+ // Also remove unused Storage{Live,Dead} statements which correspond
+ // to temps used previously.
+ for (live_idx, dead_idx, local) in &opt_info.storage_stmts {
+ // The temporary that we've read the variant field into is scoped to this block,
+ // so we can remove the assignment.
+ if *local == opt_info.local_temp_0 {
+ bb.statements[opt_info.get_variant_field_stmt].make_nop();
+ }
+
+ for (left, right) in &opt_info.field_tmp_assignments {
+ if local == left || local == right {
+ bb.statements[*live_idx].make_nop();
+ bb.statements[*dead_idx].make_nop();
+ }
+ }
+ }
+
+ // Right shape; transform
+ for stmt_idx in opt_info.stmts_to_remove {
+ bb.statements[stmt_idx].make_nop();
+ }
+
+ let stmt = &mut bb.statements[opt_info.stmt_to_overwrite];
+ stmt.source_info = opt_info.source_info;
+ stmt.kind = StatementKind::Assign(Box::new((
+ opt_info.local_0.into(),
+ Rvalue::Use(Operand::Move(opt_info.local_1.into())),
+ )));
+
+ bb.statements.retain(|stmt| stmt.kind != StatementKind::Nop);
+
+ // Fix the debug info to point to the right local
+ for dbg_index in opt_info.dbg_info_to_adjust {
+ let dbg_info = &mut body.var_debug_info[dbg_index];
+ assert!(
+ matches!(dbg_info.value, VarDebugInfoContents::Place(_)),
+ "value was not a Place"
+ );
+ if let VarDebugInfoContents::Place(p) = &mut dbg_info.value {
+ assert!(p.projection.is_empty());
+ p.local = opt_info.local_0;
+ p.projection = opt_info.dbg_projection;
+ }
+ }
+
+ trace!("block is now {:?}", bb.statements);
+ }
+ }
+ }
+}
+
+struct LocalUseCounter {
+ local_uses: IndexVec<Local, usize>,
+}
+
+impl LocalUseCounter {
+ fn get_local_uses(body: &Body<'_>) -> IndexVec<Local, usize> {
+ let mut counter = LocalUseCounter { local_uses: IndexVec::from_elem(0, &body.local_decls) };
+ counter.visit_body(body);
+ counter.local_uses
+ }
+}
+
+impl Visitor<'_> for LocalUseCounter {
+ fn visit_local(&mut self, local: Local, context: PlaceContext, _location: Location) {
+ if context.is_storage_marker()
+ || context == PlaceContext::NonUse(NonUseContext::VarDebugInfo)
+ {
+ return;
+ }
+
+ self.local_uses[local] += 1;
+ }
+}
+
+/// Match on:
+/// ```ignore (MIR)
+/// _LOCAL_INTO = ((_LOCAL_FROM as Variant).FIELD: TY);
+/// ```
+fn match_get_variant_field<'tcx>(
+ stmt: &Statement<'tcx>,
+) -> Option<(Local, Local, VarField<'tcx>, &'tcx List<PlaceElem<'tcx>>)> {
+ match &stmt.kind {
+ StatementKind::Assign(box (
+ place_into,
+ Rvalue::Use(Operand::Copy(pf) | Operand::Move(pf)),
+ )) => {
+ let local_into = place_into.as_local()?;
+ let (local_from, vf) = match_variant_field_place(*pf)?;
+ Some((local_into, local_from, vf, pf.projection))
+ }
+ _ => None,
+ }
+}
+
+/// Match on:
+/// ```ignore (MIR)
+/// ((_LOCAL_FROM as Variant).FIELD: TY) = move _LOCAL_INTO;
+/// ```
+fn match_set_variant_field<'tcx>(stmt: &Statement<'tcx>) -> Option<(Local, Local, VarField<'tcx>)> {
+ match &stmt.kind {
+ StatementKind::Assign(box (place_from, Rvalue::Use(Operand::Move(place_into)))) => {
+ let local_into = place_into.as_local()?;
+ let (local_from, vf) = match_variant_field_place(*place_from)?;
+ Some((local_into, local_from, vf))
+ }
+ _ => None,
+ }
+}
+
+/// Match on:
+/// ```ignore (MIR)
+/// discriminant(_LOCAL_TO_SET) = VAR_IDX;
+/// ```
+fn match_set_discr(stmt: &Statement<'_>) -> Option<(Local, VariantIdx)> {
+ match &stmt.kind {
+ StatementKind::SetDiscriminant { place, variant_index } => {
+ Some((place.as_local()?, *variant_index))
+ }
+ _ => None,
+ }
+}
+
+#[derive(PartialEq, Debug)]
+struct VarField<'tcx> {
+ field: Field,
+ field_ty: Ty<'tcx>,
+ var_idx: VariantIdx,
+}
+
+/// Match on `((_LOCAL as Variant).FIELD: TY)`.
+fn match_variant_field_place<'tcx>(place: Place<'tcx>) -> Option<(Local, VarField<'tcx>)> {
+ match place.as_ref() {
+ PlaceRef {
+ local,
+ projection: &[ProjectionElem::Downcast(_, var_idx), ProjectionElem::Field(field, ty)],
+ } => Some((local, VarField { field, field_ty: ty, var_idx })),
+ _ => None,
+ }
+}
+
+/// Simplifies `SwitchInt(_) -> [targets]`,
+/// where all the `targets` have the same form,
+/// into `goto -> target_first`.
+pub struct SimplifyBranchSame;
+
+impl<'tcx> MirPass<'tcx> for SimplifyBranchSame {
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ // This optimization is disabled by default for now due to
+ // soundness concerns; see issue #89485 and PR #89489.
+ if !tcx.sess.opts.unstable_opts.unsound_mir_opts {
+ return;
+ }
+
+ trace!("Running SimplifyBranchSame on {:?}", body.source);
+ let finder = SimplifyBranchSameOptimizationFinder { body, tcx };
+ let opts = finder.find();
+
+ let did_remove_blocks = opts.len() > 0;
+ for opt in opts.iter() {
+ trace!("SUCCESS: Applying optimization {:?}", opt);
+ // Replace `SwitchInt(..) -> [bb_first, ..];` with a `goto -> bb_first;`.
+ body.basic_blocks_mut()[opt.bb_to_opt_terminator].terminator_mut().kind =
+ TerminatorKind::Goto { target: opt.bb_to_goto };
+ }
+
+ if did_remove_blocks {
+ // We have dead blocks now, so remove those.
+ simplify::remove_dead_blocks(tcx, body);
+ }
+ }
+}
+
+#[derive(Debug)]
+struct SimplifyBranchSameOptimization {
+ /// All basic blocks are equal so go to this one
+ bb_to_goto: BasicBlock,
+ /// Basic block where the terminator can be simplified to a goto
+ bb_to_opt_terminator: BasicBlock,
+}
+
+struct SwitchTargetAndValue {
+ target: BasicBlock,
+ // None in case of the `otherwise` case
+ value: Option<u128>,
+}
+
+struct SimplifyBranchSameOptimizationFinder<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> SimplifyBranchSameOptimizationFinder<'_, 'tcx> {
+ fn find(&self) -> Vec<SimplifyBranchSameOptimization> {
+ self.body
+ .basic_blocks()
+ .iter_enumerated()
+ .filter_map(|(bb_idx, bb)| {
+ let (discr_switched_on, targets_and_values) = match &bb.terminator().kind {
+ TerminatorKind::SwitchInt { targets, discr, .. } => {
+ let targets_and_values: Vec<_> = targets.iter()
+ .map(|(val, target)| SwitchTargetAndValue { target, value: Some(val) })
+ .chain(once(SwitchTargetAndValue { target: targets.otherwise(), value: None }))
+ .collect();
+ (discr, targets_and_values)
+ },
+ _ => return None,
+ };
+
+ // find the adt that has its discriminant read
+ // assuming this must be the last statement of the block
+ let adt_matched_on = match &bb.statements.last()?.kind {
+ StatementKind::Assign(box (place, rhs))
+ if Some(*place) == discr_switched_on.place() =>
+ {
+ match rhs {
+ Rvalue::Discriminant(adt_place) if adt_place.ty(self.body, self.tcx).ty.is_enum() => adt_place,
+ _ => {
+ trace!("NO: expected a discriminant read of an enum instead of: {:?}", rhs);
+ return None;
+ }
+ }
+ }
+ other => {
+ trace!("NO: expected an assignment of a discriminant read to a place. Found: {:?}", other);
+ return None
+ },
+ };
+
+ let mut iter_bbs_reachable = targets_and_values
+ .iter()
+ .map(|target_and_value| (target_and_value, &self.body.basic_blocks()[target_and_value.target]))
+ .filter(|(_, bb)| {
+ // Reaching `unreachable` is UB so assume it doesn't happen.
+ bb.terminator().kind != TerminatorKind::Unreachable
+ })
+ .peekable();
+
+ let bb_first = iter_bbs_reachable.peek().map_or(&targets_and_values[0], |(idx, _)| *idx);
+ let mut all_successors_equivalent = StatementEquality::TrivialEqual;
+
+ // All successor basic blocks must be equal or contain statements that are pairwise considered equal.
+ for ((target_and_value_l,bb_l), (target_and_value_r,bb_r)) in iter_bbs_reachable.tuple_windows() {
+ let trivial_checks = bb_l.is_cleanup == bb_r.is_cleanup
+ && bb_l.terminator().kind == bb_r.terminator().kind
+ && bb_l.statements.len() == bb_r.statements.len();
+ let statement_check = || {
+ bb_l.statements.iter().zip(&bb_r.statements).try_fold(StatementEquality::TrivialEqual, |acc,(l,r)| {
+ let stmt_equality = self.statement_equality(*adt_matched_on, &l, target_and_value_l, &r, target_and_value_r);
+ if matches!(stmt_equality, StatementEquality::NotEqual) {
+ // short circuit
+ None
+ } else {
+ Some(acc.combine(&stmt_equality))
+ }
+ })
+ .unwrap_or(StatementEquality::NotEqual)
+ };
+ if !trivial_checks {
+ all_successors_equivalent = StatementEquality::NotEqual;
+ break;
+ }
+ all_successors_equivalent = all_successors_equivalent.combine(&statement_check());
+ };
+
+ match all_successors_equivalent{
+ StatementEquality::TrivialEqual => {
+ // statements are trivially equal, so just take first
+ trace!("Statements are trivially equal");
+ Some(SimplifyBranchSameOptimization {
+ bb_to_goto: bb_first.target,
+ bb_to_opt_terminator: bb_idx,
+ })
+ }
+ StatementEquality::ConsideredEqual(bb_to_choose) => {
+ trace!("Statements are considered equal");
+ Some(SimplifyBranchSameOptimization {
+ bb_to_goto: bb_to_choose,
+ bb_to_opt_terminator: bb_idx,
+ })
+ }
+ StatementEquality::NotEqual => {
+ trace!("NO: not all successors of basic block {:?} were equivalent", bb_idx);
+ None
+ }
+ }
+ })
+ .collect()
+ }
+
+ /// Tests if two statements can be considered equal
+ ///
+ /// Statements can be trivially equal if the kinds match.
+ /// But they can also be considered equal in the following case A:
+ /// ```ignore (MIR)
+ /// discriminant(_0) = 0; // bb1
+ /// _0 = move _1; // bb2
+ /// ```
+ /// In this case the two statements are equal iff
+ /// - `_0` is an enum where the variant index 0 is fieldless, and
+ /// - bb1 was targeted by a switch where the discriminant of `_1` was switched on
+ fn statement_equality(
+ &self,
+ adt_matched_on: Place<'tcx>,
+ x: &Statement<'tcx>,
+ x_target_and_value: &SwitchTargetAndValue,
+ y: &Statement<'tcx>,
+ y_target_and_value: &SwitchTargetAndValue,
+ ) -> StatementEquality {
+ let helper = |rhs: &Rvalue<'tcx>,
+ place: &Place<'tcx>,
+ variant_index: VariantIdx,
+ switch_value: u128,
+ side_to_choose| {
+ let place_type = place.ty(self.body, self.tcx).ty;
+ let adt = match *place_type.kind() {
+ ty::Adt(adt, _) if adt.is_enum() => adt,
+ _ => return StatementEquality::NotEqual,
+ };
+ // We need to make sure that the switch value that targets the bb with
+ // SetDiscriminant is the same as the variant discriminant.
+ let variant_discr = adt.discriminant_for_variant(self.tcx, variant_index).val;
+ if variant_discr != switch_value {
+ trace!(
+ "NO: variant discriminant {} does not equal switch value {}",
+ variant_discr,
+ switch_value
+ );
+ return StatementEquality::NotEqual;
+ }
+ let variant_is_fieldless = adt.variant(variant_index).fields.is_empty();
+ if !variant_is_fieldless {
+ trace!("NO: variant {:?} was not fieldless", variant_index);
+ return StatementEquality::NotEqual;
+ }
+
+ match rhs {
+ Rvalue::Use(operand) if operand.place() == Some(adt_matched_on) => {
+ StatementEquality::ConsideredEqual(side_to_choose)
+ }
+ _ => {
+ trace!(
+ "NO: RHS of assignment was {:?}, but expected it to match the adt being matched on in the switch, which is {:?}",
+ rhs,
+ adt_matched_on
+ );
+ StatementEquality::NotEqual
+ }
+ }
+ };
+ match (&x.kind, &y.kind) {
+ // trivial case
+ (x, y) if x == y => StatementEquality::TrivialEqual,
+
+ // check for case A
+ (
+ StatementKind::Assign(box (_, rhs)),
+ &StatementKind::SetDiscriminant { ref place, variant_index },
+ ) if y_target_and_value.value.is_some() => {
+ // choose basic block of x, as that has the assign
+ helper(
+ rhs,
+ place,
+ variant_index,
+ y_target_and_value.value.unwrap(),
+ x_target_and_value.target,
+ )
+ }
+ (
+ &StatementKind::SetDiscriminant { ref place, variant_index },
+ &StatementKind::Assign(box (_, ref rhs)),
+ ) if x_target_and_value.value.is_some() => {
+ // choose basic block of y, as that has the assign
+ helper(
+ rhs,
+ place,
+ variant_index,
+ x_target_and_value.value.unwrap(),
+ y_target_and_value.target,
+ )
+ }
+ _ => {
+ trace!("NO: statements `{:?}` and `{:?}` not considered equal", x, y);
+ StatementEquality::NotEqual
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, Eq, PartialEq)]
+enum StatementEquality {
+ /// The two statements are trivially equal; same kind
+ TrivialEqual,
+ /// The two statements are considered equal, but may be of different kinds. The BasicBlock field is the basic block to jump to when performing the branch-same optimization.
+ /// For example, `_0 = _1` and `discriminant(_0) = discriminant(0)` are considered equal if 0 is a fieldless variant of an enum. But we don't want to jump to the basic block with the SetDiscriminant, as that is not legal if _1 is not the 0 variant index
+ ConsideredEqual(BasicBlock),
+ /// The two statements are not equal
+ NotEqual,
+}
+
+impl StatementEquality {
+ fn combine(&self, other: &StatementEquality) -> StatementEquality {
+ use StatementEquality::*;
+ match (self, other) {
+ (TrivialEqual, TrivialEqual) => TrivialEqual,
+ (TrivialEqual, ConsideredEqual(b)) | (ConsideredEqual(b), TrivialEqual) => {
+ ConsideredEqual(*b)
+ }
+ (ConsideredEqual(b1), ConsideredEqual(b2)) => {
+ if b1 == b2 {
+ ConsideredEqual(*b1)
+ } else {
+ NotEqual
+ }
+ }
+ (_, NotEqual) | (NotEqual, _) => NotEqual,
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
new file mode 100644
index 000000000..30be64f5b
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/uninhabited_enum_branching.rs
@@ -0,0 +1,149 @@
+//! A pass that eliminates branches on uninhabited enum variants.
+
+use crate::MirPass;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::mir::{
+ BasicBlockData, Body, Local, Operand, Rvalue, StatementKind, SwitchTargets, Terminator,
+ TerminatorKind,
+};
+use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::ty::{Ty, TyCtxt};
+use rustc_target::abi::{Abi, Variants};
+
+pub struct UninhabitedEnumBranching;
+
+fn get_discriminant_local(terminator: &TerminatorKind<'_>) -> Option<Local> {
+ if let TerminatorKind::SwitchInt { discr: Operand::Move(p), .. } = terminator {
+ p.as_local()
+ } else {
+ None
+ }
+}
+
+/// If the basic block terminates by switching on a discriminant, this returns the `Ty` the
+/// discriminant is read from. Otherwise, returns None.
+fn get_switched_on_type<'tcx>(
+ block_data: &BasicBlockData<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+) -> Option<Ty<'tcx>> {
+ let terminator = block_data.terminator();
+
+ // Only bother checking blocks which terminate by switching on a local.
+ if let Some(local) = get_discriminant_local(&terminator.kind) {
+ let stmt_before_term = (!block_data.statements.is_empty())
+ .then(|| &block_data.statements[block_data.statements.len() - 1].kind);
+
+ if let Some(StatementKind::Assign(box (l, Rvalue::Discriminant(place)))) = stmt_before_term
+ {
+ if l.as_local() == Some(local) {
+ let ty = place.ty(body, tcx).ty;
+ if ty.is_enum() {
+ return Some(ty);
+ }
+ }
+ }
+ }
+
+ None
+}
+
+fn variant_discriminants<'tcx>(
+ layout: &TyAndLayout<'tcx>,
+ ty: Ty<'tcx>,
+ tcx: TyCtxt<'tcx>,
+) -> FxHashSet<u128> {
+ match &layout.variants {
+ Variants::Single { index } => {
+ let mut res = FxHashSet::default();
+ res.insert(
+ ty.discriminant_for_variant(tcx, *index)
+ .map_or(index.as_u32() as u128, |discr| discr.val),
+ );
+ res
+ }
+ Variants::Multiple { variants, .. } => variants
+ .iter_enumerated()
+ .filter_map(|(idx, layout)| {
+ (layout.abi() != Abi::Uninhabited)
+ .then(|| ty.discriminant_for_variant(tcx, idx).unwrap().val)
+ })
+ .collect(),
+ }
+}
+
+/// Ensures that the `otherwise` branch leads to an unreachable bb, returning `None` if so and a new
+/// bb to use as the new target if not.
+fn ensure_otherwise_unreachable<'tcx>(
+ body: &Body<'tcx>,
+ targets: &SwitchTargets,
+) -> Option<BasicBlockData<'tcx>> {
+ let otherwise = targets.otherwise();
+ let bb = &body.basic_blocks()[otherwise];
+ if bb.terminator().kind == TerminatorKind::Unreachable
+ && bb.statements.iter().all(|s| matches!(&s.kind, StatementKind::StorageDead(_)))
+ {
+ return None;
+ }
+
+ let mut new_block = BasicBlockData::new(Some(Terminator {
+ source_info: bb.terminator().source_info,
+ kind: TerminatorKind::Unreachable,
+ }));
+ new_block.is_cleanup = bb.is_cleanup;
+ Some(new_block)
+}
+
+impl<'tcx> MirPass<'tcx> for UninhabitedEnumBranching {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ sess.mir_opt_level() > 0
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ trace!("UninhabitedEnumBranching starting for {:?}", body.source);
+
+ for bb in body.basic_blocks().indices() {
+ trace!("processing block {:?}", bb);
+
+ let Some(discriminant_ty) = get_switched_on_type(&body.basic_blocks()[bb], tcx, body) else {
+ continue;
+ };
+
+ let layout = tcx.layout_of(tcx.param_env(body.source.def_id()).and(discriminant_ty));
+
+ let allowed_variants = if let Ok(layout) = layout {
+ variant_discriminants(&layout, discriminant_ty, tcx)
+ } else {
+ continue;
+ };
+
+ trace!("allowed_variants = {:?}", allowed_variants);
+
+ if let TerminatorKind::SwitchInt { targets, .. } =
+ &mut body.basic_blocks_mut()[bb].terminator_mut().kind
+ {
+ let mut new_targets = SwitchTargets::new(
+ targets.iter().filter(|(val, _)| allowed_variants.contains(val)),
+ targets.otherwise(),
+ );
+
+ if new_targets.iter().count() == allowed_variants.len() {
+ if let Some(updated) = ensure_otherwise_unreachable(body, &new_targets) {
+ let new_otherwise = body.basic_blocks_mut().push(updated);
+ *new_targets.all_targets_mut().last_mut().unwrap() = new_otherwise;
+ }
+ }
+
+ if let TerminatorKind::SwitchInt { targets, .. } =
+ &mut body.basic_blocks_mut()[bb].terminator_mut().kind
+ {
+ *targets = new_targets;
+ } else {
+ unreachable!()
+ }
+ } else {
+ unreachable!()
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_mir_transform/src/unreachable_prop.rs b/compiler/rustc_mir_transform/src/unreachable_prop.rs
new file mode 100644
index 000000000..f916ca362
--- /dev/null
+++ b/compiler/rustc_mir_transform/src/unreachable_prop.rs
@@ -0,0 +1,102 @@
+//! A pass that propagates the unreachable terminator of a block to its predecessors
+//! when all of their successors are unreachable. This is achieved through a
+//! post-order traversal of the blocks.
+
+use crate::simplify;
+use crate::MirPass;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+pub struct UnreachablePropagation;
+
+impl MirPass<'_> for UnreachablePropagation {
+ fn is_enabled(&self, sess: &rustc_session::Session) -> bool {
+ // Enable only under -Zmir-opt-level=4 as in some cases (check the deeply-nested-opt
+ // perf benchmark) LLVM may spend quite a lot of time optimizing the generated code.
+ sess.mir_opt_level() >= 4
+ }
+
+ fn run_pass<'tcx>(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
+ let mut unreachable_blocks = FxHashSet::default();
+ let mut replacements = FxHashMap::default();
+
+ for (bb, bb_data) in traversal::postorder(body) {
+ let terminator = bb_data.terminator();
+ if terminator.kind == TerminatorKind::Unreachable {
+ unreachable_blocks.insert(bb);
+ } else {
+ let is_unreachable = |succ: BasicBlock| unreachable_blocks.contains(&succ);
+ let terminator_kind_opt = remove_successors(&terminator.kind, is_unreachable);
+
+ if let Some(terminator_kind) = terminator_kind_opt {
+ if terminator_kind == TerminatorKind::Unreachable {
+ unreachable_blocks.insert(bb);
+ }
+ replacements.insert(bb, terminator_kind);
+ }
+ }
+ }
+
+ let replaced = !replacements.is_empty();
+ for (bb, terminator_kind) in replacements {
+ if !tcx.consider_optimizing(|| {
+ format!("UnreachablePropagation {:?} ", body.source.def_id())
+ }) {
+ break;
+ }
+
+ body.basic_blocks_mut()[bb].terminator_mut().kind = terminator_kind;
+ }
+
+ if replaced {
+ simplify::remove_dead_blocks(tcx, body);
+ }
+ }
+}
+
+fn remove_successors<'tcx, F>(
+ terminator_kind: &TerminatorKind<'tcx>,
+ predicate: F,
+) -> Option<TerminatorKind<'tcx>>
+where
+ F: Fn(BasicBlock) -> bool,
+{
+ let terminator = match *terminator_kind {
+ TerminatorKind::Goto { target } if predicate(target) => TerminatorKind::Unreachable,
+ TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
+ let otherwise = targets.otherwise();
+
+ let original_targets_len = targets.iter().len() + 1;
+ let (mut values, mut targets): (Vec<_>, Vec<_>) =
+ targets.iter().filter(|(_, bb)| !predicate(*bb)).unzip();
+
+ if !predicate(otherwise) {
+ targets.push(otherwise);
+ } else {
+ values.pop();
+ }
+
+ let retained_targets_len = targets.len();
+
+ if targets.is_empty() {
+ TerminatorKind::Unreachable
+ } else if targets.len() == 1 {
+ TerminatorKind::Goto { target: targets[0] }
+ } else if original_targets_len != retained_targets_len {
+ TerminatorKind::SwitchInt {
+ discr: discr.clone(),
+ switch_ty,
+ targets: SwitchTargets::new(
+ values.iter().copied().zip(targets.iter().copied()),
+ *targets.last().unwrap(),
+ ),
+ }
+ } else {
+ return None;
+ }
+ }
+ _ => return None,
+ };
+ Some(terminator)
+}
diff --git a/compiler/rustc_monomorphize/Cargo.toml b/compiler/rustc_monomorphize/Cargo.toml
new file mode 100644
index 000000000..41ba4d4b6
--- /dev/null
+++ b/compiler/rustc_monomorphize/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "rustc_monomorphize"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+tracing = "0.1"
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs
new file mode 100644
index 000000000..68b65658c
--- /dev/null
+++ b/compiler/rustc_monomorphize/src/collector.rs
@@ -0,0 +1,1463 @@
+//! Mono Item Collection
+//! ====================
+//!
+//! This module is responsible for discovering all items that will contribute
+//! to code generation of the crate. The important part here is that it not only
+//! needs to find syntax-level items (functions, structs, etc) but also all
+//! their monomorphized instantiations. Every non-generic, non-const function
+//! maps to one LLVM artifact. Every generic function can produce
+//! from zero to N artifacts, depending on the sets of type arguments it
+//! is instantiated with.
+//! This also applies to generic items from other crates: A generic definition
+//! in crate X might produce monomorphizations that are compiled into crate Y.
+//! We also have to collect these here.
+//!
+//! The following kinds of "mono items" are handled here:
+//!
+//! - Functions
+//! - Methods
+//! - Closures
+//! - Statics
+//! - Drop glue
+//!
+//! The following things also result in LLVM artifacts, but are not collected
+//! here, since we instantiate them locally on demand when needed in a given
+//! codegen unit:
+//!
+//! - Constants
+//! - VTables
+//! - Object Shims
+//!
+//!
+//! General Algorithm
+//! -----------------
+//! Let's define some terms first:
+//!
+//! - A "mono item" is something that results in a function or global in
+//! the LLVM IR of a codegen unit. Mono items do not stand on their
+//! own, they can reference other mono items. For example, if function
+//! `foo()` calls function `bar()` then the mono item for `foo()`
+//! references the mono item for function `bar()`. In general, the
+//! definition for mono item A referencing a mono item B is that
+//! the LLVM artifact produced for A references the LLVM artifact produced
+//! for B.
+//!
+//! - Mono items and the references between them form a directed graph,
+//! where the mono items are the nodes and references form the edges.
+//! Let's call this graph the "mono item graph".
+//!
+//! - The mono item graph for a program contains all mono items
+//! that are needed in order to produce the complete LLVM IR of the program.
+//!
+//! The purpose of the algorithm implemented in this module is to build the
+//! mono item graph for the current crate. It runs in two phases:
+//!
+//! 1. Discover the roots of the graph by traversing the HIR of the crate.
+//! 2. Starting from the roots, find neighboring nodes by inspecting the MIR
+//! representation of the item corresponding to a given node, until no more
+//! new nodes are found.
+//!
+//! ### Discovering roots
+//!
+//! The roots of the mono item graph correspond to the public non-generic
+//! syntactic items in the source code. We find them by walking the HIR of the
+//! crate, and whenever we hit upon a public function, method, or static item,
+//! we create a mono item consisting of the items DefId and, since we only
+//! consider non-generic items, an empty type-substitution set. (In eager
+//! collection mode, during incremental compilation, all non-generic functions
+//! are considered as roots, as well as when the `-Clink-dead-code` option is
+//! specified. Functions marked `#[no_mangle]` and functions called by inlinable
+//! functions also always act as roots.)
+//!
+//! ### Finding neighbor nodes
+//! Given a mono item node, we can discover neighbors by inspecting its
+//! MIR. We walk the MIR and any time we hit upon something that signifies a
+//! reference to another mono item, we have found a neighbor. Since the
+//! mono item we are currently at is always monomorphic, we also know the
+//! concrete type arguments of its neighbors, and so all neighbors again will be
+//! monomorphic. The specific forms a reference to a neighboring node can take
+//! in MIR are quite diverse. Here is an overview:
+//!
+//! #### Calling Functions/Methods
+//! The most obvious form of one mono item referencing another is a
+//! function or method call (represented by a CALL terminator in MIR). But
+//! calls are not the only thing that might introduce a reference between two
+//! function mono items, and as we will see below, they are just a
+//! specialization of the form described next, and consequently will not get any
+//! special treatment in the algorithm.
+//!
+//! #### Taking a reference to a function or method
+//! A function does not need to actually be called in order to be a neighbor of
+//! another function. It suffices to just take a reference in order to introduce
+//! an edge. Consider the following example:
+//!
+//! ```
+//! # use core::fmt::Display;
+//! fn print_val<T: Display>(x: T) {
+//! println!("{}", x);
+//! }
+//!
+//! fn call_fn(f: &dyn Fn(i32), x: i32) {
+//! f(x);
+//! }
+//!
+//! fn main() {
+//! let print_i32 = print_val::<i32>;
+//! call_fn(&print_i32, 0);
+//! }
+//! ```
+//! The MIR of none of these functions will contain an explicit call to
+//! `print_val::<i32>`. Nonetheless, in order to mono this program, we need
+//! an instance of this function. Thus, whenever we encounter a function or
+//! method in operand position, we treat it as a neighbor of the current
+//! mono item. Calls are just a special case of that.
+//!
+//! #### Closures
+//! In a way, closures are a simple case. Since every closure object needs to be
+//! constructed somewhere, we can reliably discover them by observing
+//! `RValue::Aggregate` expressions with `AggregateKind::Closure`. This is also
+//! true for closures inlined from other crates.
+//!
+//! #### Drop glue
+//! Drop glue mono items are introduced by MIR drop-statements. The
+//! generated mono item will again have drop-glue item neighbors if the
+//! type to be dropped contains nested values that also need to be dropped. It
+//! might also have a function item neighbor for the explicit `Drop::drop`
+//! implementation of its type.
+//!
+//! #### Unsizing Casts
+//! A subtle way of introducing neighbor edges is by casting to a trait object.
+//! Since the resulting fat-pointer contains a reference to a vtable, we need to
+//! instantiate all object-save methods of the trait, as we need to store
+//! pointers to these functions even if they never get called anywhere. This can
+//! be seen as a special case of taking a function reference.
+//!
+//! #### Boxes
+//! Since `Box` expression have special compiler support, no explicit calls to
+//! `exchange_malloc()` and `box_free()` may show up in MIR, even if the
+//! compiler will generate them. We have to observe `Rvalue::Box` expressions
+//! and Box-typed drop-statements for that purpose.
+//!
+//!
+//! Interaction with Cross-Crate Inlining
+//! -------------------------------------
+//! The binary of a crate will not only contain machine code for the items
+//! defined in the source code of that crate. It will also contain monomorphic
+//! instantiations of any extern generic functions and of functions marked with
+//! `#[inline]`.
+//! The collection algorithm handles this more or less mono. If it is
+//! about to create a mono item for something with an external `DefId`,
+//! it will take a look if the MIR for that item is available, and if so just
+//! proceed normally. If the MIR is not available, it assumes that the item is
+//! just linked to and no node is created; which is exactly what we want, since
+//! no machine code should be generated in the current crate for such an item.
+//!
+//! Eager and Lazy Collection Mode
+//! ------------------------------
+//! Mono item collection can be performed in one of two modes:
+//!
+//! - Lazy mode means that items will only be instantiated when actually
+//! referenced. The goal is to produce the least amount of machine code
+//! possible.
+//!
+//! - Eager mode is meant to be used in conjunction with incremental compilation
+//! where a stable set of mono items is more important than a minimal
+//! one. Thus, eager mode will instantiate drop-glue for every drop-able type
+//! in the crate, even if no drop call for that type exists (yet). It will
+//! also instantiate default implementations of trait methods, something that
+//! otherwise is only done on demand.
+//!
+//!
+//! Open Issues
+//! -----------
+//! Some things are not yet fully implemented in the current version of this
+//! module.
+//!
+//! ### Const Fns
+//! Ideally, no mono item should be generated for const fns unless there
+//! is a call to them that cannot be evaluated at compile time. At the moment
+//! this is not implemented however: a mono item will be produced
+//! regardless of whether it is actually needed or not.
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::{par_for_each_in, MTLock, MTRef};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, DefIdMap, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_index::bit_set::GrowableBitSet;
+use rustc_middle::mir::interpret::{AllocId, ConstValue};
+use rustc_middle::mir::interpret::{ErrorHandled, GlobalAlloc, Scalar};
+use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
+use rustc_middle::mir::visit::Visitor as MirVisitor;
+use rustc_middle::mir::{self, Local, Location};
+use rustc_middle::ty::adjustment::{CustomCoerceUnsized, PointerCast};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts};
+use rustc_middle::ty::{
+ self, GenericParamDefKind, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitable, VtblEntry,
+};
+use rustc_middle::{middle::codegen_fn_attrs::CodegenFnAttrFlags, mir::visit::TyContext};
+use rustc_session::config::EntryFnType;
+use rustc_session::lint::builtin::LARGE_ASSIGNMENTS;
+use rustc_session::Limit;
+use rustc_span::source_map::{dummy_spanned, respan, Span, Spanned, DUMMY_SP};
+use rustc_target::abi::Size;
+use std::iter;
+use std::ops::Range;
+use std::path::PathBuf;
+
+#[derive(PartialEq)]
+pub enum MonoItemCollectionMode {
+ Eager,
+ Lazy,
+}
+
+/// Maps every mono item to all mono items it references in its
+/// body.
+pub struct InliningMap<'tcx> {
+ // Maps a source mono item to the range of mono items
+ // accessed by it.
+ // The range selects elements within the `targets` vecs.
+ index: FxHashMap<MonoItem<'tcx>, Range<usize>>,
+ targets: Vec<MonoItem<'tcx>>,
+
+ // Contains one bit per mono item in the `targets` field. That bit
+ // is true if that mono item needs to be inlined into every CGU.
+ inlines: GrowableBitSet<usize>,
+}
+
+/// Struct to store mono items in each collecting and if they should
+/// be inlined. We call `instantiation_mode` to get their inlining
+/// status when inserting new elements, which avoids calling it in
+/// `inlining_map.lock_mut()`. See the `collect_items_rec` implementation
+/// below.
+struct MonoItems<'tcx> {
+ // If this is false, we do not need to compute whether items
+ // will need to be inlined.
+ compute_inlining: bool,
+
+ // The TyCtxt used to determine whether the a item should
+ // be inlined.
+ tcx: TyCtxt<'tcx>,
+
+ // The collected mono items. The bool field in each element
+ // indicates whether this element should be inlined.
+ items: Vec<(Spanned<MonoItem<'tcx>>, bool /*inlined*/)>,
+}
+
+impl<'tcx> MonoItems<'tcx> {
+ #[inline]
+ fn push(&mut self, item: Spanned<MonoItem<'tcx>>) {
+ self.extend([item]);
+ }
+
+ #[inline]
+ fn extend<T: IntoIterator<Item = Spanned<MonoItem<'tcx>>>>(&mut self, iter: T) {
+ self.items.extend(iter.into_iter().map(|mono_item| {
+ let inlined = if !self.compute_inlining {
+ false
+ } else {
+ mono_item.node.instantiation_mode(self.tcx) == InstantiationMode::LocalCopy
+ };
+ (mono_item, inlined)
+ }))
+ }
+}
+
+impl<'tcx> InliningMap<'tcx> {
+ fn new() -> InliningMap<'tcx> {
+ InliningMap {
+ index: FxHashMap::default(),
+ targets: Vec::new(),
+ inlines: GrowableBitSet::with_capacity(1024),
+ }
+ }
+
+ fn record_accesses<'a>(
+ &mut self,
+ source: MonoItem<'tcx>,
+ new_targets: &'a [(Spanned<MonoItem<'tcx>>, bool)],
+ ) where
+ 'tcx: 'a,
+ {
+ let start_index = self.targets.len();
+ let new_items_count = new_targets.len();
+ let new_items_count_total = new_items_count + self.targets.len();
+
+ self.targets.reserve(new_items_count);
+ self.inlines.ensure(new_items_count_total);
+
+ for (i, (Spanned { node: mono_item, .. }, inlined)) in new_targets.into_iter().enumerate() {
+ self.targets.push(*mono_item);
+ if *inlined {
+ self.inlines.insert(i + start_index);
+ }
+ }
+
+ let end_index = self.targets.len();
+ assert!(self.index.insert(source, start_index..end_index).is_none());
+ }
+
+ // Internally iterate over all items referenced by `source` which will be
+ // made available for inlining.
+ pub fn with_inlining_candidates<F>(&self, source: MonoItem<'tcx>, mut f: F)
+ where
+ F: FnMut(MonoItem<'tcx>),
+ {
+ if let Some(range) = self.index.get(&source) {
+ for (i, candidate) in self.targets[range.clone()].iter().enumerate() {
+ if self.inlines.contains(range.start + i) {
+ f(*candidate);
+ }
+ }
+ }
+ }
+
+ // Internally iterate over all items and the things each accesses.
+ pub fn iter_accesses<F>(&self, mut f: F)
+ where
+ F: FnMut(MonoItem<'tcx>, &[MonoItem<'tcx>]),
+ {
+ for (&accessor, range) in &self.index {
+ f(accessor, &self.targets[range.clone()])
+ }
+ }
+}
+
+#[instrument(skip(tcx, mode), level = "debug")]
+pub fn collect_crate_mono_items(
+ tcx: TyCtxt<'_>,
+ mode: MonoItemCollectionMode,
+) -> (FxHashSet<MonoItem<'_>>, InliningMap<'_>) {
+ let _prof_timer = tcx.prof.generic_activity("monomorphization_collector");
+
+ let roots =
+ tcx.sess.time("monomorphization_collector_root_collections", || collect_roots(tcx, mode));
+
+ debug!("building mono item graph, beginning at roots");
+
+ let mut visited = MTLock::new(FxHashSet::default());
+ let mut inlining_map = MTLock::new(InliningMap::new());
+ let recursion_limit = tcx.recursion_limit();
+
+ {
+ let visited: MTRef<'_, _> = &mut visited;
+ let inlining_map: MTRef<'_, _> = &mut inlining_map;
+
+ tcx.sess.time("monomorphization_collector_graph_walk", || {
+ par_for_each_in(roots, |root| {
+ let mut recursion_depths = DefIdMap::default();
+ collect_items_rec(
+ tcx,
+ dummy_spanned(root),
+ visited,
+ &mut recursion_depths,
+ recursion_limit,
+ inlining_map,
+ );
+ });
+ });
+ }
+
+ (visited.into_inner(), inlining_map.into_inner())
+}
+
+// Find all non-generic items by walking the HIR. These items serve as roots to
+// start monomorphizing from.
+#[instrument(skip(tcx, mode), level = "debug")]
+fn collect_roots(tcx: TyCtxt<'_>, mode: MonoItemCollectionMode) -> Vec<MonoItem<'_>> {
+ debug!("collecting roots");
+ let mut roots = MonoItems { compute_inlining: false, tcx, items: Vec::new() };
+
+ {
+ let entry_fn = tcx.entry_fn(());
+
+ debug!("collect_roots: entry_fn = {:?}", entry_fn);
+
+ let mut collector = RootCollector { tcx, mode, entry_fn, output: &mut roots };
+
+ let crate_items = tcx.hir_crate_items(());
+
+ for id in crate_items.items() {
+ collector.process_item(id);
+ }
+
+ for id in crate_items.impl_items() {
+ collector.process_impl_item(id);
+ }
+
+ collector.push_extra_entry_roots();
+ }
+
+ // We can only codegen items that are instantiable - items all of
+ // whose predicates hold. Luckily, items that aren't instantiable
+ // can't actually be used, so we can just skip codegenning them.
+ roots
+ .items
+ .into_iter()
+ .filter_map(|(Spanned { node: mono_item, .. }, _)| {
+ mono_item.is_instantiable(tcx).then_some(mono_item)
+ })
+ .collect()
+}
+
+/// Collect all monomorphized items reachable from `starting_point`, and emit a note diagnostic if a
+/// post-monorphization error is encountered during a collection step.
+#[instrument(skip(tcx, visited, recursion_depths, recursion_limit, inlining_map), level = "debug")]
+fn collect_items_rec<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ starting_point: Spanned<MonoItem<'tcx>>,
+ visited: MTRef<'_, MTLock<FxHashSet<MonoItem<'tcx>>>>,
+ recursion_depths: &mut DefIdMap<usize>,
+ recursion_limit: Limit,
+ inlining_map: MTRef<'_, MTLock<InliningMap<'tcx>>>,
+) {
+ if !visited.lock_mut().insert(starting_point.node) {
+ // We've been here already, no need to search again.
+ return;
+ }
+ debug!("BEGIN collect_items_rec({})", starting_point.node);
+
+ let mut neighbors = MonoItems { compute_inlining: true, tcx, items: Vec::new() };
+ let recursion_depth_reset;
+
+ //
+ // Post-monomorphization errors MVP
+ //
+ // We can encounter errors while monomorphizing an item, but we don't have a good way of
+ // showing a complete stack of spans ultimately leading to collecting the erroneous one yet.
+ // (It's also currently unclear exactly which diagnostics and information would be interesting
+ // to report in such cases)
+ //
+ // This leads to suboptimal error reporting: a post-monomorphization error (PME) will be
+ // shown with just a spanned piece of code causing the error, without information on where
+ // it was called from. This is especially obscure if the erroneous mono item is in a
+ // dependency. See for example issue #85155, where, before minimization, a PME happened two
+ // crates downstream from libcore's stdarch, without a way to know which dependency was the
+ // cause.
+ //
+ // If such an error occurs in the current crate, its span will be enough to locate the
+ // source. If the cause is in another crate, the goal here is to quickly locate which mono
+ // item in the current crate is ultimately responsible for causing the error.
+ //
+ // To give at least _some_ context to the user: while collecting mono items, we check the
+ // error count. If it has changed, a PME occurred, and we trigger some diagnostics about the
+ // current step of mono items collection.
+ //
+ // FIXME: don't rely on global state, instead bubble up errors. Note: this is very hard to do.
+ let error_count = tcx.sess.diagnostic().err_count();
+
+ match starting_point.node {
+ MonoItem::Static(def_id) => {
+ let instance = Instance::mono(tcx, def_id);
+
+ // Sanity check whether this ended up being collected accidentally
+ debug_assert!(should_codegen_locally(tcx, &instance));
+
+ let ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
+ visit_drop_use(tcx, ty, true, starting_point.span, &mut neighbors);
+
+ recursion_depth_reset = None;
+
+ if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
+ for &id in alloc.inner().relocations().values() {
+ collect_miri(tcx, id, &mut neighbors);
+ }
+ }
+ }
+ MonoItem::Fn(instance) => {
+ // Sanity check whether this ended up being collected accidentally
+ debug_assert!(should_codegen_locally(tcx, &instance));
+
+ // Keep track of the monomorphization recursion depth
+ recursion_depth_reset = Some(check_recursion_limit(
+ tcx,
+ instance,
+ starting_point.span,
+ recursion_depths,
+ recursion_limit,
+ ));
+ check_type_length_limit(tcx, instance);
+
+ rustc_data_structures::stack::ensure_sufficient_stack(|| {
+ collect_neighbours(tcx, instance, &mut neighbors);
+ });
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ recursion_depth_reset = None;
+
+ let item = tcx.hir().item(item_id);
+ if let hir::ItemKind::GlobalAsm(asm) = item.kind {
+ for (op, op_sp) in asm.operands {
+ match op {
+ hir::InlineAsmOperand::Const { .. } => {
+ // Only constants which resolve to a plain integer
+ // are supported. Therefore the value should not
+ // depend on any other items.
+ }
+ hir::InlineAsmOperand::SymFn { anon_const } => {
+ let fn_ty =
+ tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
+ visit_fn_use(tcx, fn_ty, false, *op_sp, &mut neighbors);
+ }
+ hir::InlineAsmOperand::SymStatic { path: _, def_id } => {
+ let instance = Instance::mono(tcx, *def_id);
+ if should_codegen_locally(tcx, &instance) {
+ trace!("collecting static {:?}", def_id);
+ neighbors.push(dummy_spanned(MonoItem::Static(*def_id)));
+ }
+ }
+ hir::InlineAsmOperand::In { .. }
+ | hir::InlineAsmOperand::Out { .. }
+ | hir::InlineAsmOperand::InOut { .. }
+ | hir::InlineAsmOperand::SplitInOut { .. } => {
+ span_bug!(*op_sp, "invalid operand type for global_asm!")
+ }
+ }
+ }
+ } else {
+ span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
+ }
+ }
+ }
+
+ // Check for PMEs and emit a diagnostic if one happened. To try to show relevant edges of the
+ // mono item graph.
+ if tcx.sess.diagnostic().err_count() > error_count
+ && starting_point.node.is_generic_fn()
+ && starting_point.node.is_user_defined()
+ {
+ let formatted_item = with_no_trimmed_paths!(starting_point.node.to_string());
+ tcx.sess.span_note_without_error(
+ starting_point.span,
+ &format!("the above error was encountered while instantiating `{}`", formatted_item),
+ );
+ }
+ inlining_map.lock_mut().record_accesses(starting_point.node, &neighbors.items);
+
+ for (neighbour, _) in neighbors.items {
+ collect_items_rec(tcx, neighbour, visited, recursion_depths, recursion_limit, inlining_map);
+ }
+
+ if let Some((def_id, depth)) = recursion_depth_reset {
+ recursion_depths.insert(def_id, depth);
+ }
+
+ debug!("END collect_items_rec({})", starting_point.node);
+}
+
+/// Format instance name that is already known to be too long for rustc.
+/// Show only the first and last 32 characters to avoid blasting
+/// the user's terminal with thousands of lines of type-name.
+///
+/// If the type name is longer than before+after, it will be written to a file.
+fn shrunk_instance_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: &Instance<'tcx>,
+ before: usize,
+ after: usize,
+) -> (String, Option<PathBuf>) {
+ let s = instance.to_string();
+
+ // Only use the shrunk version if it's really shorter.
+ // This also avoids the case where before and after slices overlap.
+ if s.chars().nth(before + after + 1).is_some() {
+ // An iterator of all byte positions including the end of the string.
+ let positions = || s.char_indices().map(|(i, _)| i).chain(iter::once(s.len()));
+
+ let shrunk = format!(
+ "{before}...{after}",
+ before = &s[..positions().nth(before).unwrap_or(s.len())],
+ after = &s[positions().rev().nth(after).unwrap_or(0)..],
+ );
+
+ let path = tcx.output_filenames(()).temp_path_ext("long-type.txt", None);
+ let written_to_path = std::fs::write(&path, s).ok().map(|_| path);
+
+ (shrunk, written_to_path)
+ } else {
+ (s, None)
+ }
+}
+
+fn check_recursion_limit<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+ span: Span,
+ recursion_depths: &mut DefIdMap<usize>,
+ recursion_limit: Limit,
+) -> (DefId, usize) {
+ let def_id = instance.def_id();
+ let recursion_depth = recursion_depths.get(&def_id).cloned().unwrap_or(0);
+ debug!(" => recursion depth={}", recursion_depth);
+
+ let adjusted_recursion_depth = if Some(def_id) == tcx.lang_items().drop_in_place_fn() {
+ // HACK: drop_in_place creates tight monomorphization loops. Give
+ // it more margin.
+ recursion_depth / 4
+ } else {
+ recursion_depth
+ };
+
+ // Code that needs to instantiate the same function recursively
+ // more than the recursion limit is assumed to be causing an
+ // infinite expansion.
+ if !recursion_limit.value_within_limit(adjusted_recursion_depth) {
+ let (shrunk, written_to_path) = shrunk_instance_name(tcx, &instance, 32, 32);
+ let error = format!("reached the recursion limit while instantiating `{}`", shrunk);
+ let mut err = tcx.sess.struct_span_fatal(span, &error);
+ err.span_note(
+ tcx.def_span(def_id),
+ &format!("`{}` defined here", tcx.def_path_str(def_id)),
+ );
+ if let Some(path) = written_to_path {
+ err.note(&format!("the full type name has been written to '{}'", path.display()));
+ }
+ err.emit()
+ }
+
+ recursion_depths.insert(def_id, recursion_depth + 1);
+
+ (def_id, recursion_depth)
+}
+
+fn check_type_length_limit<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) {
+ let type_length = instance
+ .substs
+ .iter()
+ .flat_map(|arg| arg.walk())
+ .filter(|arg| match arg.unpack() {
+ GenericArgKind::Type(_) | GenericArgKind::Const(_) => true,
+ GenericArgKind::Lifetime(_) => false,
+ })
+ .count();
+ debug!(" => type length={}", type_length);
+
+ // Rust code can easily create exponentially-long types using only a
+ // polynomial recursion depth. Even with the default recursion
+ // depth, you can easily get cases that take >2^60 steps to run,
+ // which means that rustc basically hangs.
+ //
+ // Bail out in these cases to avoid that bad user experience.
+ if !tcx.type_length_limit().value_within_limit(type_length) {
+ let (shrunk, written_to_path) = shrunk_instance_name(tcx, &instance, 32, 32);
+ let msg = format!("reached the type-length limit while instantiating `{}`", shrunk);
+ let mut diag = tcx.sess.struct_span_fatal(tcx.def_span(instance.def_id()), &msg);
+ if let Some(path) = written_to_path {
+ diag.note(&format!("the full type name has been written to '{}'", path.display()));
+ }
+ diag.help(&format!(
+ "consider adding a `#![type_length_limit=\"{}\"]` attribute to your crate",
+ type_length
+ ));
+ diag.emit()
+ }
+}
+
+struct MirNeighborCollector<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body: &'a mir::Body<'tcx>,
+ output: &'a mut MonoItems<'tcx>,
+ instance: Instance<'tcx>,
+}
+
+impl<'a, 'tcx> MirNeighborCollector<'a, 'tcx> {
+ pub fn monomorphize<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!("monomorphize: self.instance={:?}", self.instance);
+ self.instance.subst_mir_and_normalize_erasing_regions(
+ self.tcx,
+ ty::ParamEnv::reveal_all(),
+ value,
+ )
+ }
+}
+
+impl<'a, 'tcx> MirVisitor<'tcx> for MirNeighborCollector<'a, 'tcx> {
+ fn visit_rvalue(&mut self, rvalue: &mir::Rvalue<'tcx>, location: Location) {
+ debug!("visiting rvalue {:?}", *rvalue);
+
+ let span = self.body.source_info(location).span;
+
+ match *rvalue {
+ // When doing an cast from a regular pointer to a fat pointer, we
+ // have to instantiate all methods of the trait being cast to, so we
+ // can build the appropriate vtable.
+ mir::Rvalue::Cast(
+ mir::CastKind::Pointer(PointerCast::Unsize),
+ ref operand,
+ target_ty,
+ ) => {
+ let target_ty = self.monomorphize(target_ty);
+ let source_ty = operand.ty(self.body, self.tcx);
+ let source_ty = self.monomorphize(source_ty);
+ let (source_ty, target_ty) =
+ find_vtable_types_for_unsizing(self.tcx, source_ty, target_ty);
+ // This could also be a different Unsize instruction, like
+ // from a fixed sized array to a slice. But we are only
+ // interested in things that produce a vtable.
+ if target_ty.is_trait() && !source_ty.is_trait() {
+ create_mono_items_for_vtable_methods(
+ self.tcx,
+ target_ty,
+ source_ty,
+ span,
+ self.output,
+ );
+ }
+ }
+ mir::Rvalue::Cast(
+ mir::CastKind::Pointer(PointerCast::ReifyFnPointer),
+ ref operand,
+ _,
+ ) => {
+ let fn_ty = operand.ty(self.body, self.tcx);
+ let fn_ty = self.monomorphize(fn_ty);
+ visit_fn_use(self.tcx, fn_ty, false, span, &mut self.output);
+ }
+ mir::Rvalue::Cast(
+ mir::CastKind::Pointer(PointerCast::ClosureFnPointer(_)),
+ ref operand,
+ _,
+ ) => {
+ let source_ty = operand.ty(self.body, self.tcx);
+ let source_ty = self.monomorphize(source_ty);
+ match *source_ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let instance = Instance::resolve_closure(
+ self.tcx,
+ def_id,
+ substs,
+ ty::ClosureKind::FnOnce,
+ )
+ .expect("failed to normalize and resolve closure during codegen");
+ if should_codegen_locally(self.tcx, &instance) {
+ self.output.push(create_fn_mono_item(self.tcx, instance, span));
+ }
+ }
+ _ => bug!(),
+ }
+ }
+ mir::Rvalue::ThreadLocalRef(def_id) => {
+ assert!(self.tcx.is_thread_local_static(def_id));
+ let instance = Instance::mono(self.tcx, def_id);
+ if should_codegen_locally(self.tcx, &instance) {
+ trace!("collecting thread-local static {:?}", def_id);
+ self.output.push(respan(span, MonoItem::Static(def_id)));
+ }
+ }
+ _ => { /* not interesting */ }
+ }
+
+ self.super_rvalue(rvalue, location);
+ }
+
+ /// This does not walk the constant, as it has been handled entirely here and trying
+ /// to walk it would attempt to evaluate the `ty::Const` inside, which doesn't necessarily
+ /// work, as some constants cannot be represented in the type system.
+ #[instrument(skip(self), level = "debug")]
+ fn visit_constant(&mut self, constant: &mir::Constant<'tcx>, location: Location) {
+ let literal = self.monomorphize(constant.literal);
+ let val = match literal {
+ mir::ConstantKind::Val(val, _) => val,
+ mir::ConstantKind::Ty(ct) => match ct.kind() {
+ ty::ConstKind::Value(val) => self.tcx.valtree_to_const_val((ct.ty(), val)),
+ ty::ConstKind::Unevaluated(ct) => {
+ debug!(?ct);
+ let param_env = ty::ParamEnv::reveal_all();
+ match self.tcx.const_eval_resolve(param_env, ct, None) {
+ // The `monomorphize` call should have evaluated that constant already.
+ Ok(val) => val,
+ Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => return,
+ Err(ErrorHandled::TooGeneric) => span_bug!(
+ self.body.source_info(location).span,
+ "collection encountered polymorphic constant: {:?}",
+ literal
+ ),
+ }
+ }
+ _ => return,
+ },
+ };
+ collect_const_value(self.tcx, val, self.output);
+ self.visit_ty(literal.ty(), TyContext::Location(location));
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn visit_const(&mut self, constant: ty::Const<'tcx>, location: Location) {
+ debug!("visiting const {:?} @ {:?}", constant, location);
+
+ let substituted_constant = self.monomorphize(constant);
+ let param_env = ty::ParamEnv::reveal_all();
+
+ match substituted_constant.kind() {
+ ty::ConstKind::Value(val) => {
+ let const_val = self.tcx.valtree_to_const_val((constant.ty(), val));
+ collect_const_value(self.tcx, const_val, self.output)
+ }
+ ty::ConstKind::Unevaluated(unevaluated) => {
+ match self.tcx.const_eval_resolve(param_env, unevaluated, None) {
+ // The `monomorphize` call should have evaluated that constant already.
+ Ok(val) => span_bug!(
+ self.body.source_info(location).span,
+ "collection encountered the unevaluated constant {} which evaluated to {:?}",
+ substituted_constant,
+ val
+ ),
+ Err(ErrorHandled::Reported(_) | ErrorHandled::Linted) => {}
+ Err(ErrorHandled::TooGeneric) => span_bug!(
+ self.body.source_info(location).span,
+ "collection encountered polymorphic constant: {}",
+ substituted_constant
+ ),
+ }
+ }
+ _ => {}
+ }
+
+ self.super_const(constant);
+ }
+
+ fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
+ debug!("visiting terminator {:?} @ {:?}", terminator, location);
+ let source = self.body.source_info(location).span;
+
+ let tcx = self.tcx;
+ match terminator.kind {
+ mir::TerminatorKind::Call { ref func, .. } => {
+ let callee_ty = func.ty(self.body, tcx);
+ let callee_ty = self.monomorphize(callee_ty);
+ visit_fn_use(self.tcx, callee_ty, true, source, &mut self.output);
+ }
+ mir::TerminatorKind::Drop { ref place, .. }
+ | mir::TerminatorKind::DropAndReplace { ref place, .. } => {
+ let ty = place.ty(self.body, self.tcx).ty;
+ let ty = self.monomorphize(ty);
+ visit_drop_use(self.tcx, ty, true, source, self.output);
+ }
+ mir::TerminatorKind::InlineAsm { ref operands, .. } => {
+ for op in operands {
+ match *op {
+ mir::InlineAsmOperand::SymFn { ref value } => {
+ let fn_ty = self.monomorphize(value.literal.ty());
+ visit_fn_use(self.tcx, fn_ty, false, source, &mut self.output);
+ }
+ mir::InlineAsmOperand::SymStatic { def_id } => {
+ let instance = Instance::mono(self.tcx, def_id);
+ if should_codegen_locally(self.tcx, &instance) {
+ trace!("collecting asm sym static {:?}", def_id);
+ self.output.push(respan(source, MonoItem::Static(def_id)));
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+ mir::TerminatorKind::Assert { ref msg, .. } => {
+ let lang_item = match msg {
+ mir::AssertKind::BoundsCheck { .. } => LangItem::PanicBoundsCheck,
+ _ => LangItem::Panic,
+ };
+ let instance = Instance::mono(tcx, tcx.require_lang_item(lang_item, Some(source)));
+ if should_codegen_locally(tcx, &instance) {
+ self.output.push(create_fn_mono_item(tcx, instance, source));
+ }
+ }
+ mir::TerminatorKind::Abort { .. } => {
+ let instance = Instance::mono(
+ tcx,
+ tcx.require_lang_item(LangItem::PanicNoUnwind, Some(source)),
+ );
+ if should_codegen_locally(tcx, &instance) {
+ self.output.push(create_fn_mono_item(tcx, instance, source));
+ }
+ }
+ mir::TerminatorKind::Goto { .. }
+ | mir::TerminatorKind::SwitchInt { .. }
+ | mir::TerminatorKind::Resume
+ | mir::TerminatorKind::Return
+ | mir::TerminatorKind::Unreachable => {}
+ mir::TerminatorKind::GeneratorDrop
+ | mir::TerminatorKind::Yield { .. }
+ | mir::TerminatorKind::FalseEdge { .. }
+ | mir::TerminatorKind::FalseUnwind { .. } => bug!(),
+ }
+
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
+ self.super_operand(operand, location);
+ let limit = self.tcx.move_size_limit().0;
+ if limit == 0 {
+ return;
+ }
+ let limit = Size::from_bytes(limit);
+ let ty = operand.ty(self.body, self.tcx);
+ let ty = self.monomorphize(ty);
+ let layout = self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty));
+ if let Ok(layout) = layout {
+ if layout.size > limit {
+ debug!(?layout);
+ let source_info = self.body.source_info(location);
+ debug!(?source_info);
+ let lint_root = source_info.scope.lint_root(&self.body.source_scopes);
+ debug!(?lint_root);
+ let Some(lint_root) = lint_root else {
+ // This happens when the issue is in a function from a foreign crate that
+ // we monomorphized in the current crate. We can't get a `HirId` for things
+ // in other crates.
+ // FIXME: Find out where to report the lint on. Maybe simply crate-level lint root
+ // but correct span? This would make the lint at least accept crate-level lint attributes.
+ return;
+ };
+ self.tcx.struct_span_lint_hir(
+ LARGE_ASSIGNMENTS,
+ lint_root,
+ source_info.span,
+ |lint| {
+ let mut err = lint.build(&format!("moving {} bytes", layout.size.bytes()));
+ err.span_label(source_info.span, "value moved from here");
+ err.note(&format!(r#"The current maximum size is {}, but it can be customized with the move_size_limit attribute: `#![move_size_limit = "..."]`"#, limit.bytes()));
+ err.emit();
+ },
+ );
+ }
+ }
+ }
+
+ fn visit_local(
+ &mut self,
+ _place_local: Local,
+ _context: mir::visit::PlaceContext,
+ _location: Location,
+ ) {
+ }
+}
+
+fn visit_drop_use<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ is_direct_call: bool,
+ source: Span,
+ output: &mut MonoItems<'tcx>,
+) {
+ let instance = Instance::resolve_drop_in_place(tcx, ty);
+ visit_instance_use(tcx, instance, is_direct_call, source, output);
+}
+
+fn visit_fn_use<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ is_direct_call: bool,
+ source: Span,
+ output: &mut MonoItems<'tcx>,
+) {
+ if let ty::FnDef(def_id, substs) = *ty.kind() {
+ let instance = if is_direct_call {
+ ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap().unwrap()
+ } else {
+ ty::Instance::resolve_for_fn_ptr(tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ .unwrap()
+ };
+ visit_instance_use(tcx, instance, is_direct_call, source, output);
+ }
+}
+
+fn visit_instance_use<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: ty::Instance<'tcx>,
+ is_direct_call: bool,
+ source: Span,
+ output: &mut MonoItems<'tcx>,
+) {
+ debug!("visit_item_use({:?}, is_direct_call={:?})", instance, is_direct_call);
+ if !should_codegen_locally(tcx, &instance) {
+ return;
+ }
+
+ match instance.def {
+ ty::InstanceDef::Virtual(..) | ty::InstanceDef::Intrinsic(_) => {
+ if !is_direct_call {
+ bug!("{:?} being reified", instance);
+ }
+ }
+ ty::InstanceDef::DropGlue(_, None) => {
+ // Don't need to emit noop drop glue if we are calling directly.
+ if !is_direct_call {
+ output.push(create_fn_mono_item(tcx, instance, source));
+ }
+ }
+ ty::InstanceDef::DropGlue(_, Some(_))
+ | ty::InstanceDef::VTableShim(..)
+ | ty::InstanceDef::ReifyShim(..)
+ | ty::InstanceDef::ClosureOnceShim { .. }
+ | ty::InstanceDef::Item(..)
+ | ty::InstanceDef::FnPtrShim(..)
+ | ty::InstanceDef::CloneShim(..) => {
+ output.push(create_fn_mono_item(tcx, instance, source));
+ }
+ }
+}
+
+/// Returns `true` if we should codegen an instance in the local crate, or returns `false` if we
+/// can just link to the upstream crate and therefore don't need a mono item.
+fn should_codegen_locally<'tcx>(tcx: TyCtxt<'tcx>, instance: &Instance<'tcx>) -> bool {
+ let Some(def_id) = instance.def.def_id_if_not_guaranteed_local_codegen() else {
+ return true;
+ };
+
+ if tcx.is_foreign_item(def_id) {
+ // Foreign items are always linked against, there's no way of instantiating them.
+ return false;
+ }
+
+ if def_id.is_local() {
+ // Local items cannot be referred to locally without monomorphizing them locally.
+ return true;
+ }
+
+ if tcx.is_reachable_non_generic(def_id)
+ || instance.polymorphize(tcx).upstream_monomorphization(tcx).is_some()
+ {
+ // We can link to the item in question, no instance needed in this crate.
+ return false;
+ }
+
+ if !tcx.is_mir_available(def_id) {
+ bug!("no MIR available for {:?}", def_id);
+ }
+
+ true
+}
+
+/// For a given pair of source and target type that occur in an unsizing coercion,
+/// this function finds the pair of types that determines the vtable linking
+/// them.
+///
+/// For example, the source type might be `&SomeStruct` and the target type
+/// might be `&SomeTrait` in a cast like:
+///
+/// let src: &SomeStruct = ...;
+/// let target = src as &SomeTrait;
+///
+/// Then the output of this function would be (SomeStruct, SomeTrait) since for
+/// constructing the `target` fat-pointer we need the vtable for that pair.
+///
+/// Things can get more complicated though because there's also the case where
+/// the unsized type occurs as a field:
+///
+/// ```rust
+/// struct ComplexStruct<T: ?Sized> {
+/// a: u32,
+/// b: f64,
+/// c: T
+/// }
+/// ```
+///
+/// In this case, if `T` is sized, `&ComplexStruct<T>` is a thin pointer. If `T`
+/// is unsized, `&SomeStruct` is a fat pointer, and the vtable it points to is
+/// for the pair of `T` (which is a trait) and the concrete type that `T` was
+/// originally coerced from:
+///
+/// let src: &ComplexStruct<SomeStruct> = ...;
+/// let target = src as &ComplexStruct<SomeTrait>;
+///
+/// Again, we want this `find_vtable_types_for_unsizing()` to provide the pair
+/// `(SomeStruct, SomeTrait)`.
+///
+/// Finally, there is also the case of custom unsizing coercions, e.g., for
+/// smart pointers such as `Rc` and `Arc`.
+fn find_vtable_types_for_unsizing<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ source_ty: Ty<'tcx>,
+ target_ty: Ty<'tcx>,
+) -> (Ty<'tcx>, Ty<'tcx>) {
+ let ptr_vtable = |inner_source: Ty<'tcx>, inner_target: Ty<'tcx>| {
+ let param_env = ty::ParamEnv::reveal_all();
+ let type_has_metadata = |ty: Ty<'tcx>| -> bool {
+ if ty.is_sized(tcx.at(DUMMY_SP), param_env) {
+ return false;
+ }
+ let tail = tcx.struct_tail_erasing_lifetimes(ty, param_env);
+ match tail.kind() {
+ ty::Foreign(..) => false,
+ ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
+ _ => bug!("unexpected unsized tail: {:?}", tail),
+ }
+ };
+ if type_has_metadata(inner_source) {
+ (inner_source, inner_target)
+ } else {
+ tcx.struct_lockstep_tails_erasing_lifetimes(inner_source, inner_target, param_env)
+ }
+ };
+
+ match (&source_ty.kind(), &target_ty.kind()) {
+ (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
+ | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
+ ptr_vtable(*a, *b)
+ }
+ (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
+ ptr_vtable(source_ty.boxed_ty(), target_ty.boxed_ty())
+ }
+
+ (&ty::Adt(source_adt_def, source_substs), &ty::Adt(target_adt_def, target_substs)) => {
+ assert_eq!(source_adt_def, target_adt_def);
+
+ let CustomCoerceUnsized::Struct(coerce_index) =
+ crate::custom_coerce_unsize_info(tcx, source_ty, target_ty);
+
+ let source_fields = &source_adt_def.non_enum_variant().fields;
+ let target_fields = &target_adt_def.non_enum_variant().fields;
+
+ assert!(
+ coerce_index < source_fields.len() && source_fields.len() == target_fields.len()
+ );
+
+ find_vtable_types_for_unsizing(
+ tcx,
+ source_fields[coerce_index].ty(tcx, source_substs),
+ target_fields[coerce_index].ty(tcx, target_substs),
+ )
+ }
+ _ => bug!(
+ "find_vtable_types_for_unsizing: invalid coercion {:?} -> {:?}",
+ source_ty,
+ target_ty
+ ),
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+fn create_fn_mono_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+ source: Span,
+) -> Spanned<MonoItem<'tcx>> {
+ debug!("create_fn_mono_item(instance={})", instance);
+
+ let def_id = instance.def_id();
+ if tcx.sess.opts.unstable_opts.profile_closures && def_id.is_local() && tcx.is_closure(def_id) {
+ crate::util::dump_closure_profile(tcx, instance);
+ }
+
+ let respanned = respan(source, MonoItem::Fn(instance.polymorphize(tcx)));
+ debug!(?respanned);
+
+ respanned
+}
+
+/// Creates a `MonoItem` for each method that is referenced by the vtable for
+/// the given trait/impl pair.
+fn create_mono_items_for_vtable_methods<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ty: Ty<'tcx>,
+ impl_ty: Ty<'tcx>,
+ source: Span,
+ output: &mut MonoItems<'tcx>,
+) {
+ assert!(!trait_ty.has_escaping_bound_vars() && !impl_ty.has_escaping_bound_vars());
+
+ if let ty::Dynamic(ref trait_ty, ..) = trait_ty.kind() {
+ if let Some(principal) = trait_ty.principal() {
+ let poly_trait_ref = principal.with_self_ty(tcx, impl_ty);
+ assert!(!poly_trait_ref.has_escaping_bound_vars());
+
+ // Walk all methods of the trait, including those of its supertraits
+ let entries = tcx.vtable_entries(poly_trait_ref);
+ let methods = entries
+ .iter()
+ .filter_map(|entry| match entry {
+ VtblEntry::MetadataDropInPlace
+ | VtblEntry::MetadataSize
+ | VtblEntry::MetadataAlign
+ | VtblEntry::Vacant => None,
+ VtblEntry::TraitVPtr(_) => {
+ // all super trait items already covered, so skip them.
+ None
+ }
+ VtblEntry::Method(instance) => {
+ Some(*instance).filter(|instance| should_codegen_locally(tcx, instance))
+ }
+ })
+ .map(|item| create_fn_mono_item(tcx, item, source));
+ output.extend(methods);
+ }
+
+ // Also add the destructor.
+ visit_drop_use(tcx, impl_ty, false, source, output);
+ }
+}
+
+//=-----------------------------------------------------------------------------
+// Root Collection
+//=-----------------------------------------------------------------------------
+
+struct RootCollector<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ mode: MonoItemCollectionMode,
+ output: &'a mut MonoItems<'tcx>,
+ entry_fn: Option<(DefId, EntryFnType)>,
+}
+
+impl<'v> RootCollector<'_, 'v> {
+ fn process_item(&mut self, id: hir::ItemId) {
+ match self.tcx.def_kind(id.def_id) {
+ DefKind::Enum | DefKind::Struct | DefKind::Union => {
+ let item = self.tcx.hir().item(id);
+ match item.kind {
+ hir::ItemKind::Enum(_, ref generics)
+ | hir::ItemKind::Struct(_, ref generics)
+ | hir::ItemKind::Union(_, ref generics) => {
+ if generics.params.is_empty() {
+ if self.mode == MonoItemCollectionMode::Eager {
+ debug!(
+ "RootCollector: ADT drop-glue for {}",
+ self.tcx.def_path_str(item.def_id.to_def_id())
+ );
+
+ let ty =
+ Instance::new(item.def_id.to_def_id(), InternalSubsts::empty())
+ .ty(self.tcx, ty::ParamEnv::reveal_all());
+ visit_drop_use(self.tcx, ty, true, DUMMY_SP, self.output);
+ }
+ }
+ }
+ _ => bug!(),
+ }
+ }
+ DefKind::GlobalAsm => {
+ debug!(
+ "RootCollector: ItemKind::GlobalAsm({})",
+ self.tcx.def_path_str(id.def_id.to_def_id())
+ );
+ self.output.push(dummy_spanned(MonoItem::GlobalAsm(id)));
+ }
+ DefKind::Static(..) => {
+ debug!(
+ "RootCollector: ItemKind::Static({})",
+ self.tcx.def_path_str(id.def_id.to_def_id())
+ );
+ self.output.push(dummy_spanned(MonoItem::Static(id.def_id.to_def_id())));
+ }
+ DefKind::Const => {
+ // const items only generate mono items if they are
+ // actually used somewhere. Just declaring them is insufficient.
+
+ // but even just declaring them must collect the items they refer to
+ if let Ok(val) = self.tcx.const_eval_poly(id.def_id.to_def_id()) {
+ collect_const_value(self.tcx, val, &mut self.output);
+ }
+ }
+ DefKind::Impl => {
+ if self.mode == MonoItemCollectionMode::Eager {
+ let item = self.tcx.hir().item(id);
+ create_mono_items_for_default_impls(self.tcx, item, self.output);
+ }
+ }
+ DefKind::Fn => {
+ self.push_if_root(id.def_id);
+ }
+ _ => {}
+ }
+ }
+
+ fn process_impl_item(&mut self, id: hir::ImplItemId) {
+ if matches!(self.tcx.def_kind(id.def_id), DefKind::AssocFn) {
+ self.push_if_root(id.def_id);
+ }
+ }
+
+ fn is_root(&self, def_id: LocalDefId) -> bool {
+ !item_requires_monomorphization(self.tcx, def_id)
+ && match self.mode {
+ MonoItemCollectionMode::Eager => true,
+ MonoItemCollectionMode::Lazy => {
+ self.entry_fn.and_then(|(id, _)| id.as_local()) == Some(def_id)
+ || self.tcx.is_reachable_non_generic(def_id)
+ || self
+ .tcx
+ .codegen_fn_attrs(def_id)
+ .flags
+ .contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL)
+ }
+ }
+ }
+
+ /// If `def_id` represents a root, pushes it onto the list of
+ /// outputs. (Note that all roots must be monomorphic.)
+ #[instrument(skip(self), level = "debug")]
+ fn push_if_root(&mut self, def_id: LocalDefId) {
+ if self.is_root(def_id) {
+ debug!("RootCollector::push_if_root: found root def_id={:?}", def_id);
+
+ let instance = Instance::mono(self.tcx, def_id.to_def_id());
+ self.output.push(create_fn_mono_item(self.tcx, instance, DUMMY_SP));
+ }
+ }
+
+ /// As a special case, when/if we encounter the
+ /// `main()` function, we also have to generate a
+ /// monomorphized copy of the start lang item based on
+ /// the return type of `main`. This is not needed when
+ /// the user writes their own `start` manually.
+ fn push_extra_entry_roots(&mut self) {
+ let Some((main_def_id, EntryFnType::Main)) = self.entry_fn else {
+ return;
+ };
+
+ let start_def_id = match self.tcx.lang_items().require(LangItem::Start) {
+ Ok(s) => s,
+ Err(err) => self.tcx.sess.fatal(&err),
+ };
+ let main_ret_ty = self.tcx.fn_sig(main_def_id).output();
+
+ // Given that `main()` has no arguments,
+ // then its return type cannot have
+ // late-bound regions, since late-bound
+ // regions must appear in the argument
+ // listing.
+ let main_ret_ty = self.tcx.normalize_erasing_regions(
+ ty::ParamEnv::reveal_all(),
+ main_ret_ty.no_bound_vars().unwrap(),
+ );
+
+ let start_instance = Instance::resolve(
+ self.tcx,
+ ty::ParamEnv::reveal_all(),
+ start_def_id,
+ self.tcx.intern_substs(&[main_ret_ty.into()]),
+ )
+ .unwrap()
+ .unwrap();
+
+ self.output.push(create_fn_mono_item(self.tcx, start_instance, DUMMY_SP));
+ }
+}
+
+fn item_requires_monomorphization(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ let generics = tcx.generics_of(def_id);
+ generics.requires_monomorphization(tcx)
+}
+
+fn create_mono_items_for_default_impls<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ item: &'tcx hir::Item<'tcx>,
+ output: &mut MonoItems<'tcx>,
+) {
+ match item.kind {
+ hir::ItemKind::Impl(ref impl_) => {
+ for param in impl_.generics.params {
+ match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => {}
+ hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => {
+ return;
+ }
+ }
+ }
+
+ debug!(
+ "create_mono_items_for_default_impls(item={})",
+ tcx.def_path_str(item.def_id.to_def_id())
+ );
+
+ if let Some(trait_ref) = tcx.impl_trait_ref(item.def_id) {
+ let param_env = ty::ParamEnv::reveal_all();
+ let trait_ref = tcx.normalize_erasing_regions(param_env, trait_ref);
+ let overridden_methods = tcx.impl_item_implementor_ids(item.def_id);
+ for method in tcx.provided_trait_methods(trait_ref.def_id) {
+ if overridden_methods.contains_key(&method.def_id) {
+ continue;
+ }
+
+ if tcx.generics_of(method.def_id).own_requires_monomorphization() {
+ continue;
+ }
+
+ let substs =
+ InternalSubsts::for_item(tcx, method.def_id, |param, _| match param.kind {
+ GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
+ GenericParamDefKind::Type { .. }
+ | GenericParamDefKind::Const { .. } => {
+ trait_ref.substs[param.index as usize]
+ }
+ });
+ let instance = ty::Instance::resolve(tcx, param_env, method.def_id, substs)
+ .unwrap()
+ .unwrap();
+
+ let mono_item = create_fn_mono_item(tcx, instance, DUMMY_SP);
+ if mono_item.node.is_instantiable(tcx) && should_codegen_locally(tcx, &instance)
+ {
+ output.push(mono_item);
+ }
+ }
+ }
+ }
+ _ => bug!(),
+ }
+}
+
+/// Scans the miri alloc in order to find function calls, closures, and drop-glue.
+fn collect_miri<'tcx>(tcx: TyCtxt<'tcx>, alloc_id: AllocId, output: &mut MonoItems<'tcx>) {
+ match tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Static(def_id) => {
+ assert!(!tcx.is_thread_local_static(def_id));
+ let instance = Instance::mono(tcx, def_id);
+ if should_codegen_locally(tcx, &instance) {
+ trace!("collecting static {:?}", def_id);
+ output.push(dummy_spanned(MonoItem::Static(def_id)));
+ }
+ }
+ GlobalAlloc::Memory(alloc) => {
+ trace!("collecting {:?} with {:#?}", alloc_id, alloc);
+ for &inner in alloc.inner().relocations().values() {
+ rustc_data_structures::stack::ensure_sufficient_stack(|| {
+ collect_miri(tcx, inner, output);
+ });
+ }
+ }
+ GlobalAlloc::Function(fn_instance) => {
+ if should_codegen_locally(tcx, &fn_instance) {
+ trace!("collecting {:?} with {:#?}", alloc_id, fn_instance);
+ output.push(create_fn_mono_item(tcx, fn_instance, DUMMY_SP));
+ }
+ }
+ GlobalAlloc::VTable(ty, trait_ref) => {
+ let alloc_id = tcx.vtable_allocation((ty, trait_ref));
+ collect_miri(tcx, alloc_id, output)
+ }
+ }
+}
+
+/// Scans the MIR in order to find function calls, closures, and drop-glue.
+#[instrument(skip(tcx, output), level = "debug")]
+fn collect_neighbours<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+ output: &mut MonoItems<'tcx>,
+) {
+ let body = tcx.instance_mir(instance.def);
+ MirNeighborCollector { tcx, body: &body, output, instance }.visit_body(&body);
+}
+
+#[instrument(skip(tcx, output), level = "debug")]
+fn collect_const_value<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ value: ConstValue<'tcx>,
+ output: &mut MonoItems<'tcx>,
+) {
+ match value {
+ ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_miri(tcx, ptr.provenance, output),
+ ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
+ for &id in alloc.inner().relocations().values() {
+ collect_miri(tcx, id, output);
+ }
+ }
+ _ => {}
+ }
+}
diff --git a/compiler/rustc_monomorphize/src/lib.rs b/compiler/rustc_monomorphize/src/lib.rs
new file mode 100644
index 000000000..ef4560b5e
--- /dev/null
+++ b/compiler/rustc_monomorphize/src/lib.rs
@@ -0,0 +1,49 @@
+#![feature(array_windows)]
+#![feature(control_flow_enum)]
+#![feature(let_else)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::traits;
+use rustc_middle::ty::adjustment::CustomCoerceUnsized;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+mod collector;
+mod partitioning;
+mod polymorphize;
+mod util;
+
+fn custom_coerce_unsize_info<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ source_ty: Ty<'tcx>,
+ target_ty: Ty<'tcx>,
+) -> CustomCoerceUnsized {
+ let def_id = tcx.require_lang_item(LangItem::CoerceUnsized, None);
+
+ let trait_ref = ty::Binder::dummy(ty::TraitRef {
+ def_id,
+ substs: tcx.mk_substs_trait(source_ty, &[target_ty.into()]),
+ });
+
+ match tcx.codegen_fulfill_obligation((ty::ParamEnv::reveal_all(), trait_ref)) {
+ Ok(traits::ImplSource::UserDefined(traits::ImplSourceUserDefinedData {
+ impl_def_id,
+ ..
+ })) => tcx.coerce_unsized_info(impl_def_id).custom_kind.unwrap(),
+ impl_source => {
+ bug!("invalid `CoerceUnsized` impl_source: {:?}", impl_source);
+ }
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ partitioning::provide(providers);
+ polymorphize::provide(providers);
+}
diff --git a/compiler/rustc_monomorphize/src/partitioning/default.rs b/compiler/rustc_monomorphize/src/partitioning/default.rs
new file mode 100644
index 000000000..15276569c
--- /dev/null
+++ b/compiler/rustc_monomorphize/src/partitioning/default.rs
@@ -0,0 +1,560 @@
+use std::collections::hash_map::Entry;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_hir::definitions::DefPathDataName;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
+use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, Linkage, Visibility};
+use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
+use rustc_middle::ty::print::characteristic_def_id_of_type;
+use rustc_middle::ty::{self, visit::TypeVisitable, DefIdTree, InstanceDef, TyCtxt};
+use rustc_span::symbol::Symbol;
+
+use super::PartitioningCx;
+use crate::collector::InliningMap;
+use crate::partitioning::merging;
+use crate::partitioning::{
+ MonoItemPlacement, Partitioner, PostInliningPartitioning, PreInliningPartitioning,
+};
+
+pub struct DefaultPartitioning;
+
+impl<'tcx> Partitioner<'tcx> for DefaultPartitioning {
+ fn place_root_mono_items(
+ &mut self,
+ cx: &PartitioningCx<'_, 'tcx>,
+ mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>,
+ ) -> PreInliningPartitioning<'tcx> {
+ let mut roots = FxHashSet::default();
+ let mut codegen_units = FxHashMap::default();
+ let is_incremental_build = cx.tcx.sess.opts.incremental.is_some();
+ let mut internalization_candidates = FxHashSet::default();
+
+ // Determine if monomorphizations instantiated in this crate will be made
+ // available to downstream crates. This depends on whether we are in
+ // share-generics mode and whether the current crate can even have
+ // downstream crates.
+ let export_generics =
+ cx.tcx.sess.opts.share_generics() && cx.tcx.local_crate_exports_generics();
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx);
+ let cgu_name_cache = &mut FxHashMap::default();
+
+ for mono_item in mono_items {
+ match mono_item.instantiation_mode(cx.tcx) {
+ InstantiationMode::GloballyShared { .. } => {}
+ InstantiationMode::LocalCopy => continue,
+ }
+
+ let characteristic_def_id = characteristic_def_id_of_mono_item(cx.tcx, mono_item);
+ let is_volatile = is_incremental_build && mono_item.is_generic_fn();
+
+ let codegen_unit_name = match characteristic_def_id {
+ Some(def_id) => compute_codegen_unit_name(
+ cx.tcx,
+ cgu_name_builder,
+ def_id,
+ is_volatile,
+ cgu_name_cache,
+ ),
+ None => fallback_cgu_name(cgu_name_builder),
+ };
+
+ let codegen_unit = codegen_units
+ .entry(codegen_unit_name)
+ .or_insert_with(|| CodegenUnit::new(codegen_unit_name));
+
+ let mut can_be_internalized = true;
+ let (linkage, visibility) = mono_item_linkage_and_visibility(
+ cx.tcx,
+ &mono_item,
+ &mut can_be_internalized,
+ export_generics,
+ );
+ if visibility == Visibility::Hidden && can_be_internalized {
+ internalization_candidates.insert(mono_item);
+ }
+
+ codegen_unit.items_mut().insert(mono_item, (linkage, visibility));
+ roots.insert(mono_item);
+ }
+
+ // Always ensure we have at least one CGU; otherwise, if we have a
+ // crate with just types (for example), we could wind up with no CGU.
+ if codegen_units.is_empty() {
+ let codegen_unit_name = fallback_cgu_name(cgu_name_builder);
+ codegen_units.insert(codegen_unit_name, CodegenUnit::new(codegen_unit_name));
+ }
+
+ PreInliningPartitioning {
+ codegen_units: codegen_units
+ .into_iter()
+ .map(|(_, codegen_unit)| codegen_unit)
+ .collect(),
+ roots,
+ internalization_candidates,
+ }
+ }
+
+ fn merge_codegen_units(
+ &mut self,
+ cx: &PartitioningCx<'_, 'tcx>,
+ initial_partitioning: &mut PreInliningPartitioning<'tcx>,
+ ) {
+ merging::merge_codegen_units(cx, initial_partitioning);
+ }
+
+ fn place_inlined_mono_items(
+ &mut self,
+ cx: &PartitioningCx<'_, 'tcx>,
+ initial_partitioning: PreInliningPartitioning<'tcx>,
+ ) -> PostInliningPartitioning<'tcx> {
+ let mut new_partitioning = Vec::new();
+ let mut mono_item_placements = FxHashMap::default();
+
+ let PreInliningPartitioning {
+ codegen_units: initial_cgus,
+ roots,
+ internalization_candidates,
+ } = initial_partitioning;
+
+ let single_codegen_unit = initial_cgus.len() == 1;
+
+ for old_codegen_unit in initial_cgus {
+ // Collect all items that need to be available in this codegen unit.
+ let mut reachable = FxHashSet::default();
+ for root in old_codegen_unit.items().keys() {
+ follow_inlining(*root, cx.inlining_map, &mut reachable);
+ }
+
+ let mut new_codegen_unit = CodegenUnit::new(old_codegen_unit.name());
+
+ // Add all monomorphizations that are not already there.
+ for mono_item in reachable {
+ if let Some(linkage) = old_codegen_unit.items().get(&mono_item) {
+ // This is a root, just copy it over.
+ new_codegen_unit.items_mut().insert(mono_item, *linkage);
+ } else {
+ if roots.contains(&mono_item) {
+ bug!(
+ "GloballyShared mono-item inlined into other CGU: \
+ {:?}",
+ mono_item
+ );
+ }
+
+ // This is a CGU-private copy.
+ new_codegen_unit
+ .items_mut()
+ .insert(mono_item, (Linkage::Internal, Visibility::Default));
+ }
+
+ if !single_codegen_unit {
+ // If there is more than one codegen unit, we need to keep track
+ // in which codegen units each monomorphization is placed.
+ match mono_item_placements.entry(mono_item) {
+ Entry::Occupied(e) => {
+ let placement = e.into_mut();
+ debug_assert!(match *placement {
+ MonoItemPlacement::SingleCgu { cgu_name } => {
+ cgu_name != new_codegen_unit.name()
+ }
+ MonoItemPlacement::MultipleCgus => true,
+ });
+ *placement = MonoItemPlacement::MultipleCgus;
+ }
+ Entry::Vacant(e) => {
+ e.insert(MonoItemPlacement::SingleCgu {
+ cgu_name: new_codegen_unit.name(),
+ });
+ }
+ }
+ }
+ }
+
+ new_partitioning.push(new_codegen_unit);
+ }
+
+ return PostInliningPartitioning {
+ codegen_units: new_partitioning,
+ mono_item_placements,
+ internalization_candidates,
+ };
+
+ fn follow_inlining<'tcx>(
+ mono_item: MonoItem<'tcx>,
+ inlining_map: &InliningMap<'tcx>,
+ visited: &mut FxHashSet<MonoItem<'tcx>>,
+ ) {
+ if !visited.insert(mono_item) {
+ return;
+ }
+
+ inlining_map.with_inlining_candidates(mono_item, |target| {
+ follow_inlining(target, inlining_map, visited);
+ });
+ }
+ }
+
+ fn internalize_symbols(
+ &mut self,
+ cx: &PartitioningCx<'_, 'tcx>,
+ partitioning: &mut PostInliningPartitioning<'tcx>,
+ ) {
+ if partitioning.codegen_units.len() == 1 {
+ // Fast path for when there is only one codegen unit. In this case we
+ // can internalize all candidates, since there is nowhere else they
+ // could be accessed from.
+ for cgu in &mut partitioning.codegen_units {
+ for candidate in &partitioning.internalization_candidates {
+ cgu.items_mut().insert(*candidate, (Linkage::Internal, Visibility::Default));
+ }
+ }
+
+ return;
+ }
+
+ // Build a map from every monomorphization to all the monomorphizations that
+ // reference it.
+ let mut accessor_map: FxHashMap<MonoItem<'tcx>, Vec<MonoItem<'tcx>>> = Default::default();
+ cx.inlining_map.iter_accesses(|accessor, accessees| {
+ for accessee in accessees {
+ accessor_map.entry(*accessee).or_default().push(accessor);
+ }
+ });
+
+ let mono_item_placements = &partitioning.mono_item_placements;
+
+ // For each internalization candidates in each codegen unit, check if it is
+ // accessed from outside its defining codegen unit.
+ for cgu in &mut partitioning.codegen_units {
+ let home_cgu = MonoItemPlacement::SingleCgu { cgu_name: cgu.name() };
+
+ for (accessee, linkage_and_visibility) in cgu.items_mut() {
+ if !partitioning.internalization_candidates.contains(accessee) {
+ // This item is no candidate for internalizing, so skip it.
+ continue;
+ }
+ debug_assert_eq!(mono_item_placements[accessee], home_cgu);
+
+ if let Some(accessors) = accessor_map.get(accessee) {
+ if accessors
+ .iter()
+ .filter_map(|accessor| {
+ // Some accessors might not have been
+ // instantiated. We can safely ignore those.
+ mono_item_placements.get(accessor)
+ })
+ .any(|placement| *placement != home_cgu)
+ {
+ // Found an accessor from another CGU, so skip to the next
+ // item without marking this one as internal.
+ continue;
+ }
+ }
+
+ // If we got here, we did not find any accesses from other CGUs,
+ // so it's fine to make this monomorphization internal.
+ *linkage_and_visibility = (Linkage::Internal, Visibility::Default);
+ }
+ }
+ }
+}
+
+fn characteristic_def_id_of_mono_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mono_item: MonoItem<'tcx>,
+) -> Option<DefId> {
+ match mono_item {
+ MonoItem::Fn(instance) => {
+ let def_id = match instance.def {
+ ty::InstanceDef::Item(def) => def.did,
+ ty::InstanceDef::VTableShim(..)
+ | ty::InstanceDef::ReifyShim(..)
+ | ty::InstanceDef::FnPtrShim(..)
+ | ty::InstanceDef::ClosureOnceShim { .. }
+ | ty::InstanceDef::Intrinsic(..)
+ | ty::InstanceDef::DropGlue(..)
+ | ty::InstanceDef::Virtual(..)
+ | ty::InstanceDef::CloneShim(..) => return None,
+ };
+
+ // If this is a method, we want to put it into the same module as
+ // its self-type. If the self-type does not provide a characteristic
+ // DefId, we use the location of the impl after all.
+
+ if tcx.trait_of_item(def_id).is_some() {
+ let self_ty = instance.substs.type_at(0);
+ // This is a default implementation of a trait method.
+ return characteristic_def_id_of_type(self_ty).or(Some(def_id));
+ }
+
+ if let Some(impl_def_id) = tcx.impl_of_method(def_id) {
+ if tcx.sess.opts.incremental.is_some()
+ && tcx.trait_id_of_impl(impl_def_id) == tcx.lang_items().drop_trait()
+ {
+ // Put `Drop::drop` into the same cgu as `drop_in_place`
+ // since `drop_in_place` is the only thing that can
+ // call it.
+ return None;
+ }
+
+ // When polymorphization is enabled, methods which do not depend on their generic
+ // parameters, but the self-type of their impl block do will fail to normalize.
+ if !tcx.sess.opts.unstable_opts.polymorphize || !instance.needs_subst() {
+ // This is a method within an impl, find out what the self-type is:
+ let impl_self_ty = tcx.subst_and_normalize_erasing_regions(
+ instance.substs,
+ ty::ParamEnv::reveal_all(),
+ tcx.type_of(impl_def_id),
+ );
+ if let Some(def_id) = characteristic_def_id_of_type(impl_self_ty) {
+ return Some(def_id);
+ }
+ }
+ }
+
+ Some(def_id)
+ }
+ MonoItem::Static(def_id) => Some(def_id),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.to_def_id()),
+ }
+}
+
+fn compute_codegen_unit_name(
+ tcx: TyCtxt<'_>,
+ name_builder: &mut CodegenUnitNameBuilder<'_>,
+ def_id: DefId,
+ volatile: bool,
+ cache: &mut CguNameCache,
+) -> Symbol {
+ // Find the innermost module that is not nested within a function.
+ let mut current_def_id = def_id;
+ let mut cgu_def_id = None;
+ // Walk backwards from the item we want to find the module for.
+ loop {
+ if current_def_id.is_crate_root() {
+ if cgu_def_id.is_none() {
+ // If we have not found a module yet, take the crate root.
+ cgu_def_id = Some(def_id.krate.as_def_id());
+ }
+ break;
+ } else if tcx.def_kind(current_def_id) == DefKind::Mod {
+ if cgu_def_id.is_none() {
+ cgu_def_id = Some(current_def_id);
+ }
+ } else {
+ // If we encounter something that is not a module, throw away
+ // any module that we've found so far because we now know that
+ // it is nested within something else.
+ cgu_def_id = None;
+ }
+
+ current_def_id = tcx.parent(current_def_id);
+ }
+
+ let cgu_def_id = cgu_def_id.unwrap();
+
+ *cache.entry((cgu_def_id, volatile)).or_insert_with(|| {
+ let def_path = tcx.def_path(cgu_def_id);
+
+ let components = def_path.data.iter().map(|part| match part.data.name() {
+ DefPathDataName::Named(name) => name,
+ DefPathDataName::Anon { .. } => unreachable!(),
+ });
+
+ let volatile_suffix = volatile.then_some("volatile");
+
+ name_builder.build_cgu_name(def_path.krate, components, volatile_suffix)
+ })
+}
+
+// Anything we can't find a proper codegen unit for goes into this.
+fn fallback_cgu_name(name_builder: &mut CodegenUnitNameBuilder<'_>) -> Symbol {
+ name_builder.build_cgu_name(LOCAL_CRATE, &["fallback"], Some("cgu"))
+}
+
+fn mono_item_linkage_and_visibility<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mono_item: &MonoItem<'tcx>,
+ can_be_internalized: &mut bool,
+ export_generics: bool,
+) -> (Linkage, Visibility) {
+ if let Some(explicit_linkage) = mono_item.explicit_linkage(tcx) {
+ return (explicit_linkage, Visibility::Default);
+ }
+ let vis = mono_item_visibility(tcx, mono_item, can_be_internalized, export_generics);
+ (Linkage::External, vis)
+}
+
+type CguNameCache = FxHashMap<(DefId, bool), Symbol>;
+
+fn mono_item_visibility<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mono_item: &MonoItem<'tcx>,
+ can_be_internalized: &mut bool,
+ export_generics: bool,
+) -> Visibility {
+ let instance = match mono_item {
+ // This is pretty complicated; see below.
+ MonoItem::Fn(instance) => instance,
+
+ // Misc handling for generics and such, but otherwise:
+ MonoItem::Static(def_id) => {
+ return if tcx.is_reachable_non_generic(*def_id) {
+ *can_be_internalized = false;
+ default_visibility(tcx, *def_id, false)
+ } else {
+ Visibility::Hidden
+ };
+ }
+ MonoItem::GlobalAsm(item_id) => {
+ return if tcx.is_reachable_non_generic(item_id.def_id) {
+ *can_be_internalized = false;
+ default_visibility(tcx, item_id.def_id.to_def_id(), false)
+ } else {
+ Visibility::Hidden
+ };
+ }
+ };
+
+ let def_id = match instance.def {
+ InstanceDef::Item(def) => def.did,
+ InstanceDef::DropGlue(def_id, Some(_)) => def_id,
+
+ // These are all compiler glue and such, never exported, always hidden.
+ InstanceDef::VTableShim(..)
+ | InstanceDef::ReifyShim(..)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::Virtual(..)
+ | InstanceDef::Intrinsic(..)
+ | InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::DropGlue(..)
+ | InstanceDef::CloneShim(..) => return Visibility::Hidden,
+ };
+
+ // The `start_fn` lang item is actually a monomorphized instance of a
+ // function in the standard library, used for the `main` function. We don't
+ // want to export it so we tag it with `Hidden` visibility but this symbol
+ // is only referenced from the actual `main` symbol which we unfortunately
+ // don't know anything about during partitioning/collection. As a result we
+ // forcibly keep this symbol out of the `internalization_candidates` set.
+ //
+ // FIXME: eventually we don't want to always force this symbol to have
+ // hidden visibility, it should indeed be a candidate for
+ // internalization, but we have to understand that it's referenced
+ // from the `main` symbol we'll generate later.
+ //
+ // This may be fixable with a new `InstanceDef` perhaps? Unsure!
+ if tcx.lang_items().start_fn() == Some(def_id) {
+ *can_be_internalized = false;
+ return Visibility::Hidden;
+ }
+
+ let is_generic = instance.substs.non_erasable_generics().next().is_some();
+
+ // Upstream `DefId` instances get different handling than local ones.
+ let Some(def_id) = def_id.as_local() else {
+ return if export_generics && is_generic {
+ // If it is an upstream monomorphization and we export generics, we must make
+ // it available to downstream crates.
+ *can_be_internalized = false;
+ default_visibility(tcx, def_id, true)
+ } else {
+ Visibility::Hidden
+ };
+ };
+
+ if is_generic {
+ if export_generics {
+ if tcx.is_unreachable_local_definition(def_id) {
+ // This instance cannot be used from another crate.
+ Visibility::Hidden
+ } else {
+ // This instance might be useful in a downstream crate.
+ *can_be_internalized = false;
+ default_visibility(tcx, def_id.to_def_id(), true)
+ }
+ } else {
+ // We are not exporting generics or the definition is not reachable
+ // for downstream crates, we can internalize its instantiations.
+ Visibility::Hidden
+ }
+ } else {
+ // If this isn't a generic function then we mark this a `Default` if
+ // this is a reachable item, meaning that it's a symbol other crates may
+ // access when they link to us.
+ if tcx.is_reachable_non_generic(def_id.to_def_id()) {
+ *can_be_internalized = false;
+ debug_assert!(!is_generic);
+ return default_visibility(tcx, def_id.to_def_id(), false);
+ }
+
+ // If this isn't reachable then we're gonna tag this with `Hidden`
+ // visibility. In some situations though we'll want to prevent this
+ // symbol from being internalized.
+ //
+ // There's two categories of items here:
+ //
+ // * First is weak lang items. These are basically mechanisms for
+ // libcore to forward-reference symbols defined later in crates like
+ // the standard library or `#[panic_handler]` definitions. The
+ // definition of these weak lang items needs to be referencable by
+ // libcore, so we're no longer a candidate for internalization.
+ // Removal of these functions can't be done by LLVM but rather must be
+ // done by the linker as it's a non-local decision.
+ //
+ // * Second is "std internal symbols". Currently this is primarily used
+ // for allocator symbols. Allocators are a little weird in their
+ // implementation, but the idea is that the compiler, at the last
+ // minute, defines an allocator with an injected object file. The
+ // `alloc` crate references these symbols (`__rust_alloc`) and the
+ // definition doesn't get hooked up until a linked crate artifact is
+ // generated.
+ //
+ // The symbols synthesized by the compiler (`__rust_alloc`) are thin
+ // veneers around the actual implementation, some other symbol which
+ // implements the same ABI. These symbols (things like `__rg_alloc`,
+ // `__rdl_alloc`, `__rde_alloc`, etc), are all tagged with "std
+ // internal symbols".
+ //
+ // The std-internal symbols here **should not show up in a dll as an
+ // exported interface**, so they return `false` from
+ // `is_reachable_non_generic` above and we'll give them `Hidden`
+ // visibility below. Like the weak lang items, though, we can't let
+ // LLVM internalize them as this decision is left up to the linker to
+ // omit them, so prevent them from being internalized.
+ let attrs = tcx.codegen_fn_attrs(def_id);
+ if attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
+ *can_be_internalized = false;
+ }
+
+ Visibility::Hidden
+ }
+}
+
+fn default_visibility(tcx: TyCtxt<'_>, id: DefId, is_generic: bool) -> Visibility {
+ if !tcx.sess.target.default_hidden_visibility {
+ return Visibility::Default;
+ }
+
+ // Generic functions never have export-level C.
+ if is_generic {
+ return Visibility::Hidden;
+ }
+
+ // Things with export level C don't get instantiated in
+ // downstream crates.
+ if !id.is_local() {
+ return Visibility::Hidden;
+ }
+
+ // C-export level items remain at `Default`, all other internal
+ // items become `Hidden`.
+ match tcx.reachable_non_generics(id.krate).get(&id) {
+ Some(SymbolExportInfo { level: SymbolExportLevel::C, .. }) => Visibility::Default,
+ _ => Visibility::Hidden,
+ }
+}
diff --git a/compiler/rustc_monomorphize/src/partitioning/merging.rs b/compiler/rustc_monomorphize/src/partitioning/merging.rs
new file mode 100644
index 000000000..02bb8dea0
--- /dev/null
+++ b/compiler/rustc_monomorphize/src/partitioning/merging.rs
@@ -0,0 +1,111 @@
+use std::cmp;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::LOCAL_CRATE;
+use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder};
+use rustc_span::symbol::Symbol;
+
+use super::PartitioningCx;
+use crate::partitioning::PreInliningPartitioning;
+
+pub fn merge_codegen_units<'tcx>(
+ cx: &PartitioningCx<'_, 'tcx>,
+ initial_partitioning: &mut PreInliningPartitioning<'tcx>,
+) {
+ assert!(cx.target_cgu_count >= 1);
+ let codegen_units = &mut initial_partitioning.codegen_units;
+
+ // Note that at this point in time the `codegen_units` here may not be in a
+ // deterministic order (but we know they're deterministically the same set).
+ // We want this merging to produce a deterministic ordering of codegen units
+ // from the input.
+ //
+ // Due to basically how we've implemented the merging below (merge the two
+ // smallest into each other) we're sure to start off with a deterministic
+ // order (sorted by name). This'll mean that if two cgus have the same size
+ // the stable sort below will keep everything nice and deterministic.
+ codegen_units.sort_by(|a, b| a.name().as_str().partial_cmp(b.name().as_str()).unwrap());
+
+ // This map keeps track of what got merged into what.
+ let mut cgu_contents: FxHashMap<Symbol, Vec<Symbol>> =
+ codegen_units.iter().map(|cgu| (cgu.name(), vec![cgu.name()])).collect();
+
+ // Merge the two smallest codegen units until the target size is reached.
+ while codegen_units.len() > cx.target_cgu_count {
+ // Sort small cgus to the back
+ codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
+ let mut smallest = codegen_units.pop().unwrap();
+ let second_smallest = codegen_units.last_mut().unwrap();
+
+ // Move the mono-items from `smallest` to `second_smallest`
+ second_smallest.modify_size_estimate(smallest.size_estimate());
+ for (k, v) in smallest.items_mut().drain() {
+ second_smallest.items_mut().insert(k, v);
+ }
+
+ // Record that `second_smallest` now contains all the stuff that was in
+ // `smallest` before.
+ let mut consumed_cgu_names = cgu_contents.remove(&smallest.name()).unwrap();
+ cgu_contents.get_mut(&second_smallest.name()).unwrap().append(&mut consumed_cgu_names);
+
+ debug!(
+ "CodegenUnit {} merged into CodegenUnit {}",
+ smallest.name(),
+ second_smallest.name()
+ );
+ }
+
+ let cgu_name_builder = &mut CodegenUnitNameBuilder::new(cx.tcx);
+
+ if cx.tcx.sess.opts.incremental.is_some() {
+ // If we are doing incremental compilation, we want CGU names to
+ // reflect the path of the source level module they correspond to.
+ // For CGUs that contain the code of multiple modules because of the
+ // merging done above, we use a concatenation of the names of
+ // all contained CGUs.
+ let new_cgu_names: FxHashMap<Symbol, String> = cgu_contents
+ .into_iter()
+ // This `filter` makes sure we only update the name of CGUs that
+ // were actually modified by merging.
+ .filter(|(_, cgu_contents)| cgu_contents.len() > 1)
+ .map(|(current_cgu_name, cgu_contents)| {
+ let mut cgu_contents: Vec<&str> = cgu_contents.iter().map(|s| s.as_str()).collect();
+
+ // Sort the names, so things are deterministic and easy to
+ // predict.
+
+ // We are sorting primitive &strs here so we can use unstable sort
+ cgu_contents.sort_unstable();
+
+ (current_cgu_name, cgu_contents.join("--"))
+ })
+ .collect();
+
+ for cgu in codegen_units.iter_mut() {
+ if let Some(new_cgu_name) = new_cgu_names.get(&cgu.name()) {
+ if cx.tcx.sess.opts.unstable_opts.human_readable_cgu_names {
+ cgu.set_name(Symbol::intern(&new_cgu_name));
+ } else {
+ // If we don't require CGU names to be human-readable, we
+ // use a fixed length hash of the composite CGU name
+ // instead.
+ let new_cgu_name = CodegenUnit::mangle_name(&new_cgu_name);
+ cgu.set_name(Symbol::intern(&new_cgu_name));
+ }
+ }
+ }
+ } else {
+ // If we are compiling non-incrementally we just generate simple CGU
+ // names containing an index.
+ for (index, cgu) in codegen_units.iter_mut().enumerate() {
+ cgu.set_name(numbered_codegen_unit_name(cgu_name_builder, index));
+ }
+ }
+}
+
+fn numbered_codegen_unit_name(
+ name_builder: &mut CodegenUnitNameBuilder<'_>,
+ index: usize,
+) -> Symbol {
+ name_builder.build_cgu_name_no_mangle(LOCAL_CRATE, &["cgu"], Some(index))
+}
diff --git a/compiler/rustc_monomorphize/src/partitioning/mod.rs b/compiler/rustc_monomorphize/src/partitioning/mod.rs
new file mode 100644
index 000000000..ff2d38693
--- /dev/null
+++ b/compiler/rustc_monomorphize/src/partitioning/mod.rs
@@ -0,0 +1,515 @@
+//! Partitioning Codegen Units for Incremental Compilation
+//! ======================================================
+//!
+//! The task of this module is to take the complete set of monomorphizations of
+//! a crate and produce a set of codegen units from it, where a codegen unit
+//! is a named set of (mono-item, linkage) pairs. That is, this module
+//! decides which monomorphization appears in which codegen units with which
+//! linkage. The following paragraphs describe some of the background on the
+//! partitioning scheme.
+//!
+//! The most important opportunity for saving on compilation time with
+//! incremental compilation is to avoid re-codegenning and re-optimizing code.
+//! Since the unit of codegen and optimization for LLVM is "modules" or, how
+//! we call them "codegen units", the particulars of how much time can be saved
+//! by incremental compilation are tightly linked to how the output program is
+//! partitioned into these codegen units prior to passing it to LLVM --
+//! especially because we have to treat codegen units as opaque entities once
+//! they are created: There is no way for us to incrementally update an existing
+//! LLVM module and so we have to build any such module from scratch if it was
+//! affected by some change in the source code.
+//!
+//! From that point of view it would make sense to maximize the number of
+//! codegen units by, for example, putting each function into its own module.
+//! That way only those modules would have to be re-compiled that were actually
+//! affected by some change, minimizing the number of functions that could have
+//! been re-used but just happened to be located in a module that is
+//! re-compiled.
+//!
+//! However, since LLVM optimization does not work across module boundaries,
+//! using such a highly granular partitioning would lead to very slow runtime
+//! code since it would effectively prohibit inlining and other inter-procedure
+//! optimizations. We want to avoid that as much as possible.
+//!
+//! Thus we end up with a trade-off: The bigger the codegen units, the better
+//! LLVM's optimizer can do its work, but also the smaller the compilation time
+//! reduction we get from incremental compilation.
+//!
+//! Ideally, we would create a partitioning such that there are few big codegen
+//! units with few interdependencies between them. For now though, we use the
+//! following heuristic to determine the partitioning:
+//!
+//! - There are two codegen units for every source-level module:
+//! - One for "stable", that is non-generic, code
+//! - One for more "volatile" code, i.e., monomorphized instances of functions
+//! defined in that module
+//!
+//! In order to see why this heuristic makes sense, let's take a look at when a
+//! codegen unit can get invalidated:
+//!
+//! 1. The most straightforward case is when the BODY of a function or global
+//! changes. Then any codegen unit containing the code for that item has to be
+//! re-compiled. Note that this includes all codegen units where the function
+//! has been inlined.
+//!
+//! 2. The next case is when the SIGNATURE of a function or global changes. In
+//! this case, all codegen units containing a REFERENCE to that item have to be
+//! re-compiled. This is a superset of case 1.
+//!
+//! 3. The final and most subtle case is when a REFERENCE to a generic function
+//! is added or removed somewhere. Even though the definition of the function
+//! might be unchanged, a new REFERENCE might introduce a new monomorphized
+//! instance of this function which has to be placed and compiled somewhere.
+//! Conversely, when removing a REFERENCE, it might have been the last one with
+//! that particular set of generic arguments and thus we have to remove it.
+//!
+//! From the above we see that just using one codegen unit per source-level
+//! module is not such a good idea, since just adding a REFERENCE to some
+//! generic item somewhere else would invalidate everything within the module
+//! containing the generic item. The heuristic above reduces this detrimental
+//! side-effect of references a little by at least not touching the non-generic
+//! code of the module.
+//!
+//! A Note on Inlining
+//! ------------------
+//! As briefly mentioned above, in order for LLVM to be able to inline a
+//! function call, the body of the function has to be available in the LLVM
+//! module where the call is made. This has a few consequences for partitioning:
+//!
+//! - The partitioning algorithm has to take care of placing functions into all
+//! codegen units where they should be available for inlining. It also has to
+//! decide on the correct linkage for these functions.
+//!
+//! - The partitioning algorithm has to know which functions are likely to get
+//! inlined, so it can distribute function instantiations accordingly. Since
+//! there is no way of knowing for sure which functions LLVM will decide to
+//! inline in the end, we apply a heuristic here: Only functions marked with
+//! `#[inline]` are considered for inlining by the partitioner. The current
+//! implementation will not try to determine if a function is likely to be
+//! inlined by looking at the functions definition.
+//!
+//! Note though that as a side-effect of creating a codegen units per
+//! source-level module, functions from the same module will be available for
+//! inlining, even when they are not marked `#[inline]`.
+
+mod default;
+mod merging;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync;
+use rustc_hir::def_id::DefIdSet;
+use rustc_middle::mir;
+use rustc_middle::mir::mono::MonoItem;
+use rustc_middle::mir::mono::{CodegenUnit, Linkage};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::Symbol;
+
+use crate::collector::InliningMap;
+use crate::collector::{self, MonoItemCollectionMode};
+
+pub struct PartitioningCx<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ target_cgu_count: usize,
+ inlining_map: &'a InliningMap<'tcx>,
+}
+
+trait Partitioner<'tcx> {
+ fn place_root_mono_items(
+ &mut self,
+ cx: &PartitioningCx<'_, 'tcx>,
+ mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>,
+ ) -> PreInliningPartitioning<'tcx>;
+
+ fn merge_codegen_units(
+ &mut self,
+ cx: &PartitioningCx<'_, 'tcx>,
+ initial_partitioning: &mut PreInliningPartitioning<'tcx>,
+ );
+
+ fn place_inlined_mono_items(
+ &mut self,
+ cx: &PartitioningCx<'_, 'tcx>,
+ initial_partitioning: PreInliningPartitioning<'tcx>,
+ ) -> PostInliningPartitioning<'tcx>;
+
+ fn internalize_symbols(
+ &mut self,
+ cx: &PartitioningCx<'_, 'tcx>,
+ partitioning: &mut PostInliningPartitioning<'tcx>,
+ );
+}
+
+fn get_partitioner<'tcx>(tcx: TyCtxt<'tcx>) -> Box<dyn Partitioner<'tcx>> {
+ let strategy = match &tcx.sess.opts.unstable_opts.cgu_partitioning_strategy {
+ None => "default",
+ Some(s) => &s[..],
+ };
+
+ match strategy {
+ "default" => Box::new(default::DefaultPartitioning),
+ _ => tcx.sess.fatal("unknown partitioning strategy"),
+ }
+}
+
+pub fn partition<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mono_items: &mut dyn Iterator<Item = MonoItem<'tcx>>,
+ max_cgu_count: usize,
+ inlining_map: &InliningMap<'tcx>,
+) -> Vec<CodegenUnit<'tcx>> {
+ let _prof_timer = tcx.prof.generic_activity("cgu_partitioning");
+
+ let mut partitioner = get_partitioner(tcx);
+ let cx = &PartitioningCx { tcx, target_cgu_count: max_cgu_count, inlining_map };
+ // In the first step, we place all regular monomorphizations into their
+ // respective 'home' codegen unit. Regular monomorphizations are all
+ // functions and statics defined in the local crate.
+ let mut initial_partitioning = {
+ let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_roots");
+ partitioner.place_root_mono_items(cx, mono_items)
+ };
+
+ initial_partitioning.codegen_units.iter_mut().for_each(|cgu| cgu.estimate_size(tcx));
+
+ debug_dump(tcx, "INITIAL PARTITIONING:", initial_partitioning.codegen_units.iter());
+
+ // Merge until we have at most `max_cgu_count` codegen units.
+ {
+ let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_merge_cgus");
+ partitioner.merge_codegen_units(cx, &mut initial_partitioning);
+ debug_dump(tcx, "POST MERGING:", initial_partitioning.codegen_units.iter());
+ }
+
+ // In the next step, we use the inlining map to determine which additional
+ // monomorphizations have to go into each codegen unit. These additional
+ // monomorphizations can be drop-glue, functions from external crates, and
+ // local functions the definition of which is marked with `#[inline]`.
+ let mut post_inlining = {
+ let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_place_inline_items");
+ partitioner.place_inlined_mono_items(cx, initial_partitioning)
+ };
+
+ post_inlining.codegen_units.iter_mut().for_each(|cgu| cgu.estimate_size(tcx));
+
+ debug_dump(tcx, "POST INLINING:", post_inlining.codegen_units.iter());
+
+ // Next we try to make as many symbols "internal" as possible, so LLVM has
+ // more freedom to optimize.
+ if !tcx.sess.link_dead_code() {
+ let _prof_timer = tcx.prof.generic_activity("cgu_partitioning_internalize_symbols");
+ partitioner.internalize_symbols(cx, &mut post_inlining);
+ }
+
+ let instrument_dead_code =
+ tcx.sess.instrument_coverage() && !tcx.sess.instrument_coverage_except_unused_functions();
+
+ if instrument_dead_code {
+ assert!(
+ post_inlining.codegen_units.len() > 0,
+ "There must be at least one CGU that code coverage data can be generated in."
+ );
+
+ // Find the smallest CGU that has exported symbols and put the dead
+ // function stubs in that CGU. We look for exported symbols to increase
+ // the likelihood the linker won't throw away the dead functions.
+ // FIXME(#92165): In order to truly resolve this, we need to make sure
+ // the object file (CGU) containing the dead function stubs is included
+ // in the final binary. This will probably require forcing these
+ // function symbols to be included via `-u` or `/include` linker args.
+ let mut cgus: Vec<_> = post_inlining.codegen_units.iter_mut().collect();
+ cgus.sort_by_key(|cgu| cgu.size_estimate());
+
+ let dead_code_cgu =
+ if let Some(cgu) = cgus.into_iter().rev().find(|cgu| {
+ cgu.items().iter().any(|(_, (linkage, _))| *linkage == Linkage::External)
+ }) {
+ cgu
+ } else {
+ // If there are no CGUs that have externally linked items,
+ // then we just pick the first CGU as a fallback.
+ &mut post_inlining.codegen_units[0]
+ };
+ dead_code_cgu.make_code_coverage_dead_code_cgu();
+ }
+
+ // Finally, sort by codegen unit name, so that we get deterministic results.
+ let PostInliningPartitioning {
+ codegen_units: mut result,
+ mono_item_placements: _,
+ internalization_candidates: _,
+ } = post_inlining;
+
+ result.sort_by(|a, b| a.name().as_str().partial_cmp(b.name().as_str()).unwrap());
+
+ result
+}
+
+pub struct PreInliningPartitioning<'tcx> {
+ codegen_units: Vec<CodegenUnit<'tcx>>,
+ roots: FxHashSet<MonoItem<'tcx>>,
+ internalization_candidates: FxHashSet<MonoItem<'tcx>>,
+}
+
+/// For symbol internalization, we need to know whether a symbol/mono-item is
+/// accessed from outside the codegen unit it is defined in. This type is used
+/// to keep track of that.
+#[derive(Clone, PartialEq, Eq, Debug)]
+enum MonoItemPlacement {
+ SingleCgu { cgu_name: Symbol },
+ MultipleCgus,
+}
+
+struct PostInliningPartitioning<'tcx> {
+ codegen_units: Vec<CodegenUnit<'tcx>>,
+ mono_item_placements: FxHashMap<MonoItem<'tcx>, MonoItemPlacement>,
+ internalization_candidates: FxHashSet<MonoItem<'tcx>>,
+}
+
+fn debug_dump<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, label: &str, cgus: I)
+where
+ I: Iterator<Item = &'a CodegenUnit<'tcx>>,
+ 'tcx: 'a,
+{
+ let dump = move || {
+ use std::fmt::Write;
+
+ let s = &mut String::new();
+ let _ = writeln!(s, "{}", label);
+ for cgu in cgus {
+ let _ =
+ writeln!(s, "CodegenUnit {} estimated size {} :", cgu.name(), cgu.size_estimate());
+
+ for (mono_item, linkage) in cgu.items() {
+ let symbol_name = mono_item.symbol_name(tcx).name;
+ let symbol_hash_start = symbol_name.rfind('h');
+ let symbol_hash = symbol_hash_start.map_or("<no hash>", |i| &symbol_name[i..]);
+
+ let _ = writeln!(
+ s,
+ " - {} [{:?}] [{}] estimated size {}",
+ mono_item,
+ linkage,
+ symbol_hash,
+ mono_item.size_estimate(tcx)
+ );
+ }
+
+ let _ = writeln!(s, "");
+ }
+
+ std::mem::take(s)
+ };
+
+ debug!("{}", dump());
+}
+
+#[inline(never)] // give this a place in the profiler
+fn assert_symbols_are_distinct<'a, 'tcx, I>(tcx: TyCtxt<'tcx>, mono_items: I)
+where
+ I: Iterator<Item = &'a MonoItem<'tcx>>,
+ 'tcx: 'a,
+{
+ let _prof_timer = tcx.prof.generic_activity("assert_symbols_are_distinct");
+
+ let mut symbols: Vec<_> =
+ mono_items.map(|mono_item| (mono_item, mono_item.symbol_name(tcx))).collect();
+
+ symbols.sort_by_key(|sym| sym.1);
+
+ for &[(mono_item1, ref sym1), (mono_item2, ref sym2)] in symbols.array_windows() {
+ if sym1 == sym2 {
+ let span1 = mono_item1.local_span(tcx);
+ let span2 = mono_item2.local_span(tcx);
+
+ // Deterministically select one of the spans for error reporting
+ let span = match (span1, span2) {
+ (Some(span1), Some(span2)) => {
+ Some(if span1.lo().0 > span2.lo().0 { span1 } else { span2 })
+ }
+ (span1, span2) => span1.or(span2),
+ };
+
+ let error_message = format!("symbol `{}` is already defined", sym1);
+
+ if let Some(span) = span {
+ tcx.sess.span_fatal(span, &error_message)
+ } else {
+ tcx.sess.fatal(&error_message)
+ }
+ }
+ }
+}
+
+fn collect_and_partition_mono_items<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (): (),
+) -> (&'tcx DefIdSet, &'tcx [CodegenUnit<'tcx>]) {
+ let collection_mode = match tcx.sess.opts.unstable_opts.print_mono_items {
+ Some(ref s) => {
+ let mode_string = s.to_lowercase();
+ let mode_string = mode_string.trim();
+ if mode_string == "eager" {
+ MonoItemCollectionMode::Eager
+ } else {
+ if mode_string != "lazy" {
+ let message = format!(
+ "Unknown codegen-item collection mode '{}'. \
+ Falling back to 'lazy' mode.",
+ mode_string
+ );
+ tcx.sess.warn(&message);
+ }
+
+ MonoItemCollectionMode::Lazy
+ }
+ }
+ None => {
+ if tcx.sess.link_dead_code() {
+ MonoItemCollectionMode::Eager
+ } else {
+ MonoItemCollectionMode::Lazy
+ }
+ }
+ };
+
+ let (items, inlining_map) = collector::collect_crate_mono_items(tcx, collection_mode);
+
+ tcx.sess.abort_if_errors();
+
+ let (codegen_units, _) = tcx.sess.time("partition_and_assert_distinct_symbols", || {
+ sync::join(
+ || {
+ let mut codegen_units = partition(
+ tcx,
+ &mut items.iter().cloned(),
+ tcx.sess.codegen_units(),
+ &inlining_map,
+ );
+ codegen_units[0].make_primary();
+ &*tcx.arena.alloc_from_iter(codegen_units)
+ },
+ || assert_symbols_are_distinct(tcx, items.iter()),
+ )
+ });
+
+ if tcx.prof.enabled() {
+ // Record CGU size estimates for self-profiling.
+ for cgu in codegen_units {
+ tcx.prof.artifact_size(
+ "codegen_unit_size_estimate",
+ cgu.name().as_str(),
+ cgu.size_estimate() as u64,
+ );
+ }
+ }
+
+ let mono_items: DefIdSet = items
+ .iter()
+ .filter_map(|mono_item| match *mono_item {
+ MonoItem::Fn(ref instance) => Some(instance.def_id()),
+ MonoItem::Static(def_id) => Some(def_id),
+ _ => None,
+ })
+ .collect();
+
+ if tcx.sess.opts.unstable_opts.print_mono_items.is_some() {
+ let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default();
+
+ for cgu in codegen_units {
+ for (&mono_item, &linkage) in cgu.items() {
+ item_to_cgus.entry(mono_item).or_default().push((cgu.name(), linkage));
+ }
+ }
+
+ let mut item_keys: Vec<_> = items
+ .iter()
+ .map(|i| {
+ let mut output = with_no_trimmed_paths!(i.to_string());
+ output.push_str(" @@");
+ let mut empty = Vec::new();
+ let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
+ cgus.sort_by_key(|(name, _)| *name);
+ cgus.dedup();
+ for &(ref cgu_name, (linkage, _)) in cgus.iter() {
+ output.push(' ');
+ output.push_str(cgu_name.as_str());
+
+ let linkage_abbrev = match linkage {
+ Linkage::External => "External",
+ Linkage::AvailableExternally => "Available",
+ Linkage::LinkOnceAny => "OnceAny",
+ Linkage::LinkOnceODR => "OnceODR",
+ Linkage::WeakAny => "WeakAny",
+ Linkage::WeakODR => "WeakODR",
+ Linkage::Appending => "Appending",
+ Linkage::Internal => "Internal",
+ Linkage::Private => "Private",
+ Linkage::ExternalWeak => "ExternalWeak",
+ Linkage::Common => "Common",
+ };
+
+ output.push('[');
+ output.push_str(linkage_abbrev);
+ output.push(']');
+ }
+ output
+ })
+ .collect();
+
+ item_keys.sort();
+
+ for item in item_keys {
+ println!("MONO_ITEM {}", item);
+ }
+ }
+
+ (tcx.arena.alloc(mono_items), codegen_units)
+}
+
+fn codegened_and_inlined_items<'tcx>(tcx: TyCtxt<'tcx>, (): ()) -> &'tcx DefIdSet {
+ let (items, cgus) = tcx.collect_and_partition_mono_items(());
+ let mut visited = DefIdSet::default();
+ let mut result = items.clone();
+
+ for cgu in cgus {
+ for (item, _) in cgu.items() {
+ if let MonoItem::Fn(ref instance) = item {
+ let did = instance.def_id();
+ if !visited.insert(did) {
+ continue;
+ }
+ let body = tcx.instance_mir(instance.def);
+ for block in body.basic_blocks() {
+ for statement in &block.statements {
+ let mir::StatementKind::Coverage(_) = statement.kind else { continue };
+ let scope = statement.source_info.scope;
+ if let Some(inlined) = scope.inlined_instance(&body.source_scopes) {
+ result.insert(inlined.def_id());
+ }
+ }
+ }
+ }
+ }
+ }
+
+ tcx.arena.alloc(result)
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.collect_and_partition_mono_items = collect_and_partition_mono_items;
+ providers.codegened_and_inlined_items = codegened_and_inlined_items;
+
+ providers.is_codegened_item = |tcx, def_id| {
+ let (all_mono_items, _) = tcx.collect_and_partition_mono_items(());
+ all_mono_items.contains(&def_id)
+ };
+
+ providers.codegen_unit = |tcx, name| {
+ let (_, all) = tcx.collect_and_partition_mono_items(());
+ all.iter()
+ .find(|cgu| cgu.name() == name)
+ .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name))
+ };
+}
diff --git a/compiler/rustc_monomorphize/src/polymorphize.rs b/compiler/rustc_monomorphize/src/polymorphize.rs
new file mode 100644
index 000000000..394843e51
--- /dev/null
+++ b/compiler/rustc_monomorphize/src/polymorphize.rs
@@ -0,0 +1,385 @@
+//! Polymorphization Analysis
+//! =========================
+//!
+//! This module implements an analysis of functions, methods and closures to determine which
+//! generic parameters are unused (and eventually, in what ways generic parameters are used - only
+//! for their size, offset of a field, etc.).
+
+use rustc_hir::{def::DefKind, def_id::DefId, ConstContext};
+use rustc_index::bit_set::FiniteBitSet;
+use rustc_middle::mir::{
+ visit::{TyContext, Visitor},
+ Local, LocalDecl, Location,
+};
+use rustc_middle::ty::{
+ self,
+ query::Providers,
+ subst::SubstsRef,
+ visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor},
+ Const, Ty, TyCtxt,
+};
+use rustc_span::symbol::sym;
+use std::convert::TryInto;
+use std::ops::ControlFlow;
+
+/// Provide implementations of queries relating to polymorphization analysis.
+pub fn provide(providers: &mut Providers) {
+ providers.unused_generic_params = unused_generic_params;
+}
+
+/// Determine which generic parameters are used by the instance.
+///
+/// Returns a bitset where bits representing unused parameters are set (`is_empty` indicates all
+/// parameters are used).
+#[instrument(level = "debug", skip(tcx))]
+fn unused_generic_params<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: ty::InstanceDef<'tcx>,
+) -> FiniteBitSet<u32> {
+ if !tcx.sess.opts.unstable_opts.polymorphize {
+ // If polymorphization disabled, then all parameters are used.
+ return FiniteBitSet::new_empty();
+ }
+
+ let def_id = instance.def_id();
+ // Exit early if this instance should not be polymorphized.
+ if !should_polymorphize(tcx, def_id, instance) {
+ return FiniteBitSet::new_empty();
+ }
+
+ let generics = tcx.generics_of(def_id);
+ debug!(?generics);
+
+ // Exit early when there are no parameters to be unused.
+ if generics.count() == 0 {
+ return FiniteBitSet::new_empty();
+ }
+
+ // Create a bitset with N rightmost ones for each parameter.
+ let generics_count: u32 =
+ generics.count().try_into().expect("more generic parameters than can fit into a `u32`");
+ let mut unused_parameters = FiniteBitSet::<u32>::new_empty();
+ unused_parameters.set_range(0..generics_count);
+ debug!(?unused_parameters, "(start)");
+
+ mark_used_by_default_parameters(tcx, def_id, generics, &mut unused_parameters);
+ debug!(?unused_parameters, "(after default)");
+
+ // Visit MIR and accumulate used generic parameters.
+ let body = match tcx.hir().body_const_context(def_id.expect_local()) {
+ // Const functions are actually called and should thus be considered for polymorphization
+ // via their runtime MIR.
+ Some(ConstContext::ConstFn) | None => tcx.optimized_mir(def_id),
+ Some(_) => tcx.mir_for_ctfe(def_id),
+ };
+ let mut vis = MarkUsedGenericParams { tcx, def_id, unused_parameters: &mut unused_parameters };
+ vis.visit_body(body);
+ debug!(?unused_parameters, "(end)");
+
+ // Emit errors for debugging and testing if enabled.
+ if !unused_parameters.is_empty() {
+ emit_unused_generic_params_error(tcx, def_id, generics, &unused_parameters);
+ }
+
+ unused_parameters
+}
+
+/// Returns `true` if the instance should be polymorphized.
+fn should_polymorphize<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ instance: ty::InstanceDef<'tcx>,
+) -> bool {
+ // If an instance's MIR body is not polymorphic then the modified substitutions that are
+ // derived from polymorphization's result won't make any difference.
+ if !instance.has_polymorphic_mir_body() {
+ return false;
+ }
+
+ // Don't polymorphize intrinsics or virtual calls - calling `instance_mir` will panic.
+ if matches!(instance, ty::InstanceDef::Intrinsic(..) | ty::InstanceDef::Virtual(..)) {
+ return false;
+ }
+
+ // Polymorphization results are stored in cross-crate metadata only when there are unused
+ // parameters, so assume that non-local items must have only used parameters (else this query
+ // would not be invoked, and the cross-crate metadata used instead).
+ if !def_id.is_local() {
+ return false;
+ }
+
+ // Foreign items have no bodies to analyze.
+ if tcx.is_foreign_item(def_id) {
+ return false;
+ }
+
+ // Make sure there is MIR available.
+ match tcx.hir().body_const_context(def_id.expect_local()) {
+ Some(ConstContext::ConstFn) | None if !tcx.is_mir_available(def_id) => {
+ debug!("no mir available");
+ return false;
+ }
+ Some(_) if !tcx.is_ctfe_mir_available(def_id) => {
+ debug!("no ctfe mir available");
+ return false;
+ }
+ _ => true,
+ }
+}
+
+/// Some parameters are considered used-by-default, such as non-generic parameters and the dummy
+/// generic parameters from closures, this function marks them as used. `leaf_is_closure` should
+/// be `true` if the item that `unused_generic_params` was invoked on is a closure.
+#[instrument(level = "debug", skip(tcx, def_id, generics, unused_parameters))]
+fn mark_used_by_default_parameters<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ generics: &'tcx ty::Generics,
+ unused_parameters: &mut FiniteBitSet<u32>,
+) {
+ match tcx.def_kind(def_id) {
+ DefKind::Closure | DefKind::Generator => {
+ for param in &generics.params {
+ debug!(?param, "(closure/gen)");
+ unused_parameters.clear(param.index);
+ }
+ }
+ DefKind::Mod
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Variant
+ | DefKind::Trait
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::TyParam
+ | DefKind::Fn
+ | DefKind::Const
+ | DefKind::ConstParam
+ | DefKind::Static(_)
+ | DefKind::Ctor(_, _)
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::Macro(_)
+ | DefKind::ExternCrate
+ | DefKind::Use
+ | DefKind::ForeignMod
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::OpaqueTy
+ | DefKind::Field
+ | DefKind::LifetimeParam
+ | DefKind::GlobalAsm
+ | DefKind::Impl => {
+ for param in &generics.params {
+ debug!(?param, "(other)");
+ if let ty::GenericParamDefKind::Lifetime = param.kind {
+ unused_parameters.clear(param.index);
+ }
+ }
+ }
+ }
+
+ if let Some(parent) = generics.parent {
+ mark_used_by_default_parameters(tcx, parent, tcx.generics_of(parent), unused_parameters);
+ }
+}
+
+/// Emit errors for the function annotated by `#[rustc_polymorphize_error]`, labelling each generic
+/// parameter which was unused.
+#[instrument(level = "debug", skip(tcx, generics))]
+fn emit_unused_generic_params_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ generics: &'tcx ty::Generics,
+ unused_parameters: &FiniteBitSet<u32>,
+) {
+ let base_def_id = tcx.typeck_root_def_id(def_id);
+ if !tcx.has_attr(base_def_id, sym::rustc_polymorphize_error) {
+ return;
+ }
+
+ let fn_span = match tcx.opt_item_ident(def_id) {
+ Some(ident) => ident.span,
+ _ => tcx.def_span(def_id),
+ };
+
+ let mut err = tcx.sess.struct_span_err(fn_span, "item has unused generic parameters");
+
+ let mut next_generics = Some(generics);
+ while let Some(generics) = next_generics {
+ for param in &generics.params {
+ if unused_parameters.contains(param.index).unwrap_or(false) {
+ debug!(?param);
+ let def_span = tcx.def_span(param.def_id);
+ err.span_label(def_span, &format!("generic parameter `{}` is unused", param.name));
+ }
+ }
+
+ next_generics = generics.parent.map(|did| tcx.generics_of(did));
+ }
+
+ err.emit();
+}
+
+/// Visitor used to aggregate generic parameter uses.
+struct MarkUsedGenericParams<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ unused_parameters: &'a mut FiniteBitSet<u32>,
+}
+
+impl<'a, 'tcx> MarkUsedGenericParams<'a, 'tcx> {
+ /// Invoke `unused_generic_params` on a body contained within the current item (e.g.
+ /// a closure, generator or constant).
+ #[instrument(level = "debug", skip(self, def_id, substs))]
+ fn visit_child_body(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) {
+ let instance = ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id));
+ let unused = self.tcx.unused_generic_params(instance);
+ debug!(?self.unused_parameters, ?unused);
+ for (i, arg) in substs.iter().enumerate() {
+ let i = i.try_into().unwrap();
+ if !unused.contains(i).unwrap_or(false) {
+ arg.visit_with(self);
+ }
+ }
+ debug!(?self.unused_parameters);
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
+ #[instrument(level = "debug", skip(self, local))]
+ fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) {
+ if local == Local::from_usize(1) {
+ let def_kind = self.tcx.def_kind(self.def_id);
+ if matches!(def_kind, DefKind::Closure | DefKind::Generator) {
+ // Skip visiting the closure/generator that is currently being processed. This only
+ // happens because the first argument to the closure is a reference to itself and
+ // that will call `visit_substs`, resulting in each generic parameter captured being
+ // considered used by default.
+ debug!("skipping closure substs");
+ return;
+ }
+ }
+
+ self.super_local_decl(local, local_decl);
+ }
+
+ fn visit_const(&mut self, c: Const<'tcx>, _: Location) {
+ c.visit_with(self);
+ }
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>, _: TyContext) {
+ ty.visit_with(self);
+ }
+}
+
+impl<'a, 'tcx> TypeVisitor<'tcx> for MarkUsedGenericParams<'a, 'tcx> {
+ #[instrument(level = "debug", skip(self))]
+ fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if !c.has_param_types_or_consts() {
+ return ControlFlow::CONTINUE;
+ }
+
+ match c.kind() {
+ ty::ConstKind::Param(param) => {
+ debug!(?param);
+ self.unused_parameters.clear(param.index);
+ ControlFlow::CONTINUE
+ }
+ ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs: _, promoted: Some(p)})
+ // Avoid considering `T` unused when constants are of the form:
+ // `<Self as Foo<T>>::foo::promoted[p]`
+ if self.def_id == def.did && !self.tcx.generics_of(def.did).has_self =>
+ {
+ // If there is a promoted, don't look at the substs - since it will always contain
+ // the generic parameters, instead, traverse the promoted MIR.
+ let promoted = self.tcx.promoted_mir(def.did);
+ self.visit_body(&promoted[p]);
+ ControlFlow::CONTINUE
+ }
+ ty::ConstKind::Unevaluated(uv)
+ if matches!(self.tcx.def_kind(uv.def.did), DefKind::AnonConst | DefKind::InlineConst) =>
+ {
+ self.visit_child_body(uv.def.did, uv.substs);
+ ControlFlow::CONTINUE
+ }
+ _ => c.super_visit_with(self),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if !ty.has_param_types_or_consts() {
+ return ControlFlow::CONTINUE;
+ }
+
+ match *ty.kind() {
+ ty::Closure(def_id, substs) | ty::Generator(def_id, substs, ..) => {
+ debug!(?def_id);
+ // Avoid cycle errors with generators.
+ if def_id == self.def_id {
+ return ControlFlow::CONTINUE;
+ }
+
+ // Consider any generic parameters used by any closures/generators as used in the
+ // parent.
+ self.visit_child_body(def_id, substs);
+ ControlFlow::CONTINUE
+ }
+ ty::Param(param) => {
+ debug!(?param);
+ self.unused_parameters.clear(param.index);
+ ControlFlow::CONTINUE
+ }
+ _ => ty.super_visit_with(self),
+ }
+ }
+}
+
+/// Visitor used to check if a generic parameter is used.
+struct HasUsedGenericParams<'a> {
+ unused_parameters: &'a FiniteBitSet<u32>,
+}
+
+impl<'a, 'tcx> TypeVisitor<'tcx> for HasUsedGenericParams<'a> {
+ type BreakTy = ();
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if !c.has_param_types_or_consts() {
+ return ControlFlow::CONTINUE;
+ }
+
+ match c.kind() {
+ ty::ConstKind::Param(param) => {
+ if self.unused_parameters.contains(param.index).unwrap_or(false) {
+ ControlFlow::CONTINUE
+ } else {
+ ControlFlow::BREAK
+ }
+ }
+ _ => c.super_visit_with(self),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if !ty.has_param_types_or_consts() {
+ return ControlFlow::CONTINUE;
+ }
+
+ match ty.kind() {
+ ty::Param(param) => {
+ if self.unused_parameters.contains(param.index).unwrap_or(false) {
+ ControlFlow::CONTINUE
+ } else {
+ ControlFlow::BREAK
+ }
+ }
+ _ => ty.super_visit_with(self),
+ }
+ }
+}
diff --git a/compiler/rustc_monomorphize/src/util.rs b/compiler/rustc_monomorphize/src/util.rs
new file mode 100644
index 000000000..847e64dc2
--- /dev/null
+++ b/compiler/rustc_monomorphize/src/util.rs
@@ -0,0 +1,70 @@
+use rustc_middle::ty::{self, ClosureSizeProfileData, Instance, TyCtxt};
+use std::fs::OpenOptions;
+use std::io::prelude::*;
+
+/// For a given closure, writes out the data for the profiling the impact of RFC 2229 on
+/// closure size into a CSV.
+///
+/// During the same compile all closures dump the information in the same file
+/// "closure_profile_XXXXX.csv", which is created in the directory where the compiler is invoked.
+pub(crate) fn dump_closure_profile<'tcx>(tcx: TyCtxt<'tcx>, closure_instance: Instance<'tcx>) {
+ let Ok(mut file) = OpenOptions::new()
+ .create(true)
+ .append(true)
+ .open(&format!("closure_profile_{}.csv", std::process::id()))
+ else {
+ eprintln!("Cound't open file for writing closure profile");
+ return;
+ };
+
+ let closure_def_id = closure_instance.def_id().expect_local();
+ let typeck_results = tcx.typeck(closure_def_id);
+
+ if typeck_results.closure_size_eval.contains_key(&closure_def_id) {
+ let param_env = ty::ParamEnv::reveal_all();
+
+ let ClosureSizeProfileData { before_feature_tys, after_feature_tys } =
+ typeck_results.closure_size_eval[&closure_def_id];
+
+ let before_feature_tys = tcx.subst_and_normalize_erasing_regions(
+ closure_instance.substs,
+ param_env,
+ before_feature_tys,
+ );
+ let after_feature_tys = tcx.subst_and_normalize_erasing_regions(
+ closure_instance.substs,
+ param_env,
+ after_feature_tys,
+ );
+
+ let new_size = tcx
+ .layout_of(param_env.and(after_feature_tys))
+ .map(|l| format!("{:?}", l.size.bytes()))
+ .unwrap_or_else(|e| format!("Failed {:?}", e));
+
+ let old_size = tcx
+ .layout_of(param_env.and(before_feature_tys))
+ .map(|l| format!("{:?}", l.size.bytes()))
+ .unwrap_or_else(|e| format!("Failed {:?}", e));
+
+ let closure_span = tcx.def_span(closure_def_id);
+ let src_file = tcx.sess.source_map().span_to_filename(closure_span);
+ let line_nos = tcx
+ .sess
+ .source_map()
+ .span_to_lines(closure_span)
+ .map(|l| format!("{:?} {:?}", l.lines.first(), l.lines.last()))
+ .unwrap_or_else(|e| format!("{:?}", e));
+
+ if let Err(e) = writeln!(
+ file,
+ "{}, {}, {}, {:?}",
+ old_size,
+ new_size,
+ src_file.prefer_local(),
+ line_nos
+ ) {
+ eprintln!("Error writing to file {}", e)
+ }
+ }
+}
diff --git a/compiler/rustc_parse/Cargo.toml b/compiler/rustc_parse/Cargo.toml
new file mode 100644
index 000000000..c6ca260e9
--- /dev/null
+++ b/compiler/rustc_parse/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "rustc_parse"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+bitflags = "1.0"
+tracing = "0.1"
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_lexer = { path = "../rustc_lexer" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_ast = { path = "../rustc_ast" }
+unicode-normalization = "0.1.11"
+unicode-width = "0.1.4"
diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs
new file mode 100644
index 000000000..848e142e5
--- /dev/null
+++ b/compiler/rustc_parse/src/lexer/mod.rs
@@ -0,0 +1,717 @@
+use crate::lexer::unicode_chars::UNICODE_ARRAY;
+use rustc_ast::ast::{self, AttrStyle};
+use rustc_ast::token::{self, CommentKind, Delimiter, Token, TokenKind};
+use rustc_ast::tokenstream::{Spacing, TokenStream};
+use rustc_ast::util::unicode::contains_text_flow_control_chars;
+use rustc_errors::{error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult};
+use rustc_lexer::unescape::{self, Mode};
+use rustc_lexer::{Base, DocStyle, RawStrError};
+use rustc_session::lint::builtin::{
+ RUST_2021_PREFIXES_INCOMPATIBLE_SYNTAX, TEXT_DIRECTION_CODEPOINT_IN_COMMENT,
+};
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_session::parse::ParseSess;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{edition::Edition, BytePos, Pos, Span};
+
+use tracing::debug;
+
+mod tokentrees;
+mod unescape_error_reporting;
+mod unicode_chars;
+
+use unescape_error_reporting::{emit_unescape_error, escaped_char};
+
+// This type is used a lot. Make sure it doesn't unintentionally get bigger.
+//
+// This assertion is in this crate, rather than in `rustc_lexer`, because that
+// crate cannot depend on `rustc_data_structures`.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(rustc_lexer::Token, 12);
+
+#[derive(Clone, Debug)]
+pub struct UnmatchedBrace {
+ pub expected_delim: Delimiter,
+ pub found_delim: Option<Delimiter>,
+ pub found_span: Span,
+ pub unclosed_span: Option<Span>,
+ pub candidate_span: Option<Span>,
+}
+
+pub(crate) fn parse_token_trees<'a>(
+ sess: &'a ParseSess,
+ src: &'a str,
+ start_pos: BytePos,
+ override_span: Option<Span>,
+) -> (PResult<'a, TokenStream>, Vec<UnmatchedBrace>) {
+ StringReader { sess, start_pos, pos: start_pos, src, override_span }.into_token_trees()
+}
+
+struct StringReader<'a> {
+ sess: &'a ParseSess,
+ /// Initial position, read-only.
+ start_pos: BytePos,
+ /// The absolute offset within the source_map of the current character.
+ pos: BytePos,
+ /// Source text to tokenize.
+ src: &'a str,
+ override_span: Option<Span>,
+}
+
+impl<'a> StringReader<'a> {
+ fn mk_sp(&self, lo: BytePos, hi: BytePos) -> Span {
+ self.override_span.unwrap_or_else(|| Span::with_root_ctxt(lo, hi))
+ }
+
+ /// Returns the next token, and info about preceding whitespace, if any.
+ fn next_token(&mut self) -> (Spacing, Token) {
+ let mut spacing = Spacing::Joint;
+
+ // Skip `#!` at the start of the file
+ if self.pos == self.start_pos
+ && let Some(shebang_len) = rustc_lexer::strip_shebang(self.src)
+ {
+ self.pos = self.pos + BytePos::from_usize(shebang_len);
+ spacing = Spacing::Alone;
+ }
+
+ // Skip trivial (whitespace & comments) tokens
+ loop {
+ let start_src_index = self.src_index(self.pos);
+ let text: &str = &self.src[start_src_index..];
+
+ if text.is_empty() {
+ let span = self.mk_sp(self.pos, self.pos);
+ return (spacing, Token::new(token::Eof, span));
+ }
+
+ let token = rustc_lexer::first_token(text);
+
+ let start = self.pos;
+ self.pos = self.pos + BytePos(token.len);
+
+ debug!("next_token: {:?}({:?})", token.kind, self.str_from(start));
+
+ match self.cook_lexer_token(token.kind, start) {
+ Some(kind) => {
+ let span = self.mk_sp(start, self.pos);
+ return (spacing, Token::new(kind, span));
+ }
+ None => spacing = Spacing::Alone,
+ }
+ }
+ }
+
+ /// Report a fatal lexical error with a given span.
+ fn fatal_span(&self, sp: Span, m: &str) -> ! {
+ self.sess.span_diagnostic.span_fatal(sp, m)
+ }
+
+ /// Report a lexical error with a given span.
+ fn err_span(&self, sp: Span, m: &str) {
+ self.sess.span_diagnostic.struct_span_err(sp, m).emit();
+ }
+
+ /// Report a fatal error spanning [`from_pos`, `to_pos`).
+ fn fatal_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) -> ! {
+ self.fatal_span(self.mk_sp(from_pos, to_pos), m)
+ }
+
+ /// Report a lexical error spanning [`from_pos`, `to_pos`).
+ fn err_span_(&self, from_pos: BytePos, to_pos: BytePos, m: &str) {
+ self.err_span(self.mk_sp(from_pos, to_pos), m)
+ }
+
+ fn struct_fatal_span_char(
+ &self,
+ from_pos: BytePos,
+ to_pos: BytePos,
+ m: &str,
+ c: char,
+ ) -> DiagnosticBuilder<'a, !> {
+ self.sess
+ .span_diagnostic
+ .struct_span_fatal(self.mk_sp(from_pos, to_pos), &format!("{}: {}", m, escaped_char(c)))
+ }
+
+ fn struct_err_span_char(
+ &self,
+ from_pos: BytePos,
+ to_pos: BytePos,
+ m: &str,
+ c: char,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ self.sess
+ .span_diagnostic
+ .struct_span_err(self.mk_sp(from_pos, to_pos), &format!("{}: {}", m, escaped_char(c)))
+ }
+
+ /// Detect usages of Unicode codepoints changing the direction of the text on screen and loudly
+ /// complain about it.
+ fn lint_unicode_text_flow(&self, start: BytePos) {
+ // Opening delimiter of the length 2 is not included into the comment text.
+ let content_start = start + BytePos(2);
+ let content = self.str_from(content_start);
+ if contains_text_flow_control_chars(content) {
+ let span = self.mk_sp(start, self.pos);
+ self.sess.buffer_lint_with_diagnostic(
+ &TEXT_DIRECTION_CODEPOINT_IN_COMMENT,
+ span,
+ ast::CRATE_NODE_ID,
+ "unicode codepoint changing visible direction of text present in comment",
+ BuiltinLintDiagnostics::UnicodeTextFlow(span, content.to_string()),
+ );
+ }
+ }
+
+ /// Turns simple `rustc_lexer::TokenKind` enum into a rich
+ /// `rustc_ast::TokenKind`. This turns strings into interned
+ /// symbols and runs additional validation.
+ fn cook_lexer_token(&self, token: rustc_lexer::TokenKind, start: BytePos) -> Option<TokenKind> {
+ Some(match token {
+ rustc_lexer::TokenKind::LineComment { doc_style } => {
+ // Skip non-doc comments
+ let Some(doc_style) = doc_style else {
+ self.lint_unicode_text_flow(start);
+ return None;
+ };
+
+ // Opening delimiter of the length 3 is not included into the symbol.
+ let content_start = start + BytePos(3);
+ let content = self.str_from(content_start);
+ self.cook_doc_comment(content_start, content, CommentKind::Line, doc_style)
+ }
+ rustc_lexer::TokenKind::BlockComment { doc_style, terminated } => {
+ if !terminated {
+ self.report_unterminated_block_comment(start, doc_style);
+ }
+
+ // Skip non-doc comments
+ let Some(doc_style) = doc_style else {
+ self.lint_unicode_text_flow(start);
+ return None;
+ };
+
+ // Opening delimiter of the length 3 and closing delimiter of the length 2
+ // are not included into the symbol.
+ let content_start = start + BytePos(3);
+ let content_end = self.pos - BytePos(if terminated { 2 } else { 0 });
+ let content = self.str_from_to(content_start, content_end);
+ self.cook_doc_comment(content_start, content, CommentKind::Block, doc_style)
+ }
+ rustc_lexer::TokenKind::Whitespace => return None,
+ rustc_lexer::TokenKind::Ident
+ | rustc_lexer::TokenKind::RawIdent
+ | rustc_lexer::TokenKind::UnknownPrefix => {
+ let is_raw_ident = token == rustc_lexer::TokenKind::RawIdent;
+ let is_unknown_prefix = token == rustc_lexer::TokenKind::UnknownPrefix;
+ let mut ident_start = start;
+ if is_raw_ident {
+ ident_start = ident_start + BytePos(2);
+ }
+ if is_unknown_prefix {
+ self.report_unknown_prefix(start);
+ }
+ let sym = nfc_normalize(self.str_from(ident_start));
+ let span = self.mk_sp(start, self.pos);
+ self.sess.symbol_gallery.insert(sym, span);
+ if is_raw_ident {
+ if !sym.can_be_raw() {
+ self.err_span(span, &format!("`{}` cannot be a raw identifier", sym));
+ }
+ self.sess.raw_identifier_spans.borrow_mut().push(span);
+ }
+ token::Ident(sym, is_raw_ident)
+ }
+ rustc_lexer::TokenKind::InvalidIdent
+ // Do not recover an identifier with emoji if the codepoint is a confusable
+ // with a recoverable substitution token, like `➖`.
+ if !UNICODE_ARRAY
+ .iter()
+ .any(|&(c, _, _)| {
+ let sym = self.str_from(start);
+ sym.chars().count() == 1 && c == sym.chars().next().unwrap()
+ })
+ =>
+ {
+ let sym = nfc_normalize(self.str_from(start));
+ let span = self.mk_sp(start, self.pos);
+ self.sess.bad_unicode_identifiers.borrow_mut().entry(sym).or_default().push(span);
+ token::Ident(sym, false)
+ }
+ rustc_lexer::TokenKind::Literal { kind, suffix_start } => {
+ let suffix_start = start + BytePos(suffix_start);
+ let (kind, symbol) = self.cook_lexer_literal(start, suffix_start, kind);
+ let suffix = if suffix_start < self.pos {
+ let string = self.str_from(suffix_start);
+ if string == "_" {
+ self.sess
+ .span_diagnostic
+ .struct_span_warn(
+ self.mk_sp(suffix_start, self.pos),
+ "underscore literal suffix is not allowed",
+ )
+ .warn(
+ "this was previously accepted by the compiler but is \
+ being phased out; it will become a hard error in \
+ a future release!",
+ )
+ .note(
+ "see issue #42326 \
+ <https://github.com/rust-lang/rust/issues/42326> \
+ for more information",
+ )
+ .emit();
+ None
+ } else {
+ Some(Symbol::intern(string))
+ }
+ } else {
+ None
+ };
+ token::Literal(token::Lit { kind, symbol, suffix })
+ }
+ rustc_lexer::TokenKind::Lifetime { starts_with_number } => {
+ // Include the leading `'` in the real identifier, for macro
+ // expansion purposes. See #12512 for the gory details of why
+ // this is necessary.
+ let lifetime_name = self.str_from(start);
+ if starts_with_number {
+ self.err_span_(start, self.pos, "lifetimes cannot start with a number");
+ }
+ let ident = Symbol::intern(lifetime_name);
+ token::Lifetime(ident)
+ }
+ rustc_lexer::TokenKind::Semi => token::Semi,
+ rustc_lexer::TokenKind::Comma => token::Comma,
+ rustc_lexer::TokenKind::Dot => token::Dot,
+ rustc_lexer::TokenKind::OpenParen => token::OpenDelim(Delimiter::Parenthesis),
+ rustc_lexer::TokenKind::CloseParen => token::CloseDelim(Delimiter::Parenthesis),
+ rustc_lexer::TokenKind::OpenBrace => token::OpenDelim(Delimiter::Brace),
+ rustc_lexer::TokenKind::CloseBrace => token::CloseDelim(Delimiter::Brace),
+ rustc_lexer::TokenKind::OpenBracket => token::OpenDelim(Delimiter::Bracket),
+ rustc_lexer::TokenKind::CloseBracket => token::CloseDelim(Delimiter::Bracket),
+ rustc_lexer::TokenKind::At => token::At,
+ rustc_lexer::TokenKind::Pound => token::Pound,
+ rustc_lexer::TokenKind::Tilde => token::Tilde,
+ rustc_lexer::TokenKind::Question => token::Question,
+ rustc_lexer::TokenKind::Colon => token::Colon,
+ rustc_lexer::TokenKind::Dollar => token::Dollar,
+ rustc_lexer::TokenKind::Eq => token::Eq,
+ rustc_lexer::TokenKind::Bang => token::Not,
+ rustc_lexer::TokenKind::Lt => token::Lt,
+ rustc_lexer::TokenKind::Gt => token::Gt,
+ rustc_lexer::TokenKind::Minus => token::BinOp(token::Minus),
+ rustc_lexer::TokenKind::And => token::BinOp(token::And),
+ rustc_lexer::TokenKind::Or => token::BinOp(token::Or),
+ rustc_lexer::TokenKind::Plus => token::BinOp(token::Plus),
+ rustc_lexer::TokenKind::Star => token::BinOp(token::Star),
+ rustc_lexer::TokenKind::Slash => token::BinOp(token::Slash),
+ rustc_lexer::TokenKind::Caret => token::BinOp(token::Caret),
+ rustc_lexer::TokenKind::Percent => token::BinOp(token::Percent),
+
+ rustc_lexer::TokenKind::Unknown | rustc_lexer::TokenKind::InvalidIdent => {
+ let c = self.str_from(start).chars().next().unwrap();
+ let mut err =
+ self.struct_err_span_char(start, self.pos, "unknown start of token", c);
+ // FIXME: the lexer could be used to turn the ASCII version of unicode homoglyphs,
+ // instead of keeping a table in `check_for_substitution`into the token. Ideally,
+ // this should be inside `rustc_lexer`. However, we should first remove compound
+ // tokens like `<<` from `rustc_lexer`, and then add fancier error recovery to it,
+ // as there will be less overall work to do this way.
+ let token = unicode_chars::check_for_substitution(self, start, c, &mut err);
+ if c == '\x00' {
+ err.help("source files must contain UTF-8 encoded text, unexpected null bytes might occur when a different encoding is used");
+ }
+ err.emit();
+ token?
+ }
+ })
+ }
+
+ fn cook_doc_comment(
+ &self,
+ content_start: BytePos,
+ content: &str,
+ comment_kind: CommentKind,
+ doc_style: DocStyle,
+ ) -> TokenKind {
+ if content.contains('\r') {
+ for (idx, _) in content.char_indices().filter(|&(_, c)| c == '\r') {
+ self.err_span_(
+ content_start + BytePos(idx as u32),
+ content_start + BytePos(idx as u32 + 1),
+ match comment_kind {
+ CommentKind::Line => "bare CR not allowed in doc-comment",
+ CommentKind::Block => "bare CR not allowed in block doc-comment",
+ },
+ );
+ }
+ }
+
+ let attr_style = match doc_style {
+ DocStyle::Outer => AttrStyle::Outer,
+ DocStyle::Inner => AttrStyle::Inner,
+ };
+
+ token::DocComment(comment_kind, attr_style, Symbol::intern(content))
+ }
+
+ fn cook_lexer_literal(
+ &self,
+ start: BytePos,
+ suffix_start: BytePos,
+ kind: rustc_lexer::LiteralKind,
+ ) -> (token::LitKind, Symbol) {
+ // prefix means `"` or `br"` or `r###"`, ...
+ let (lit_kind, mode, prefix_len, postfix_len) = match kind {
+ rustc_lexer::LiteralKind::Char { terminated } => {
+ if !terminated {
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start, suffix_start),
+ "unterminated character literal",
+ error_code!(E0762),
+ )
+ }
+ (token::Char, Mode::Char, 1, 1) // ' '
+ }
+ rustc_lexer::LiteralKind::Byte { terminated } => {
+ if !terminated {
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start + BytePos(1), suffix_start),
+ "unterminated byte constant",
+ error_code!(E0763),
+ )
+ }
+ (token::Byte, Mode::Byte, 2, 1) // b' '
+ }
+ rustc_lexer::LiteralKind::Str { terminated } => {
+ if !terminated {
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start, suffix_start),
+ "unterminated double quote string",
+ error_code!(E0765),
+ )
+ }
+ (token::Str, Mode::Str, 1, 1) // " "
+ }
+ rustc_lexer::LiteralKind::ByteStr { terminated } => {
+ if !terminated {
+ self.sess.span_diagnostic.span_fatal_with_code(
+ self.mk_sp(start + BytePos(1), suffix_start),
+ "unterminated double quote byte string",
+ error_code!(E0766),
+ )
+ }
+ (token::ByteStr, Mode::ByteStr, 2, 1) // b" "
+ }
+ rustc_lexer::LiteralKind::RawStr { n_hashes } => {
+ if let Some(n_hashes) = n_hashes {
+ let n = u32::from(n_hashes);
+ (token::StrRaw(n_hashes), Mode::RawStr, 2 + n, 1 + n) // r##" "##
+ } else {
+ self.report_raw_str_error(start, 1);
+ }
+ }
+ rustc_lexer::LiteralKind::RawByteStr { n_hashes } => {
+ if let Some(n_hashes) = n_hashes {
+ let n = u32::from(n_hashes);
+ (token::ByteStrRaw(n_hashes), Mode::RawByteStr, 3 + n, 1 + n) // br##" "##
+ } else {
+ self.report_raw_str_error(start, 2);
+ }
+ }
+ rustc_lexer::LiteralKind::Int { base, empty_int } => {
+ return if empty_int {
+ self.sess
+ .span_diagnostic
+ .struct_span_err_with_code(
+ self.mk_sp(start, suffix_start),
+ "no valid digits found for number",
+ error_code!(E0768),
+ )
+ .emit();
+ (token::Integer, sym::integer(0))
+ } else {
+ self.validate_int_literal(base, start, suffix_start);
+ (token::Integer, self.symbol_from_to(start, suffix_start))
+ };
+ }
+ rustc_lexer::LiteralKind::Float { base, empty_exponent } => {
+ if empty_exponent {
+ self.err_span_(start, self.pos, "expected at least one digit in exponent");
+ }
+
+ match base {
+ Base::Hexadecimal => self.err_span_(
+ start,
+ suffix_start,
+ "hexadecimal float literal is not supported",
+ ),
+ Base::Octal => {
+ self.err_span_(start, suffix_start, "octal float literal is not supported")
+ }
+ Base::Binary => {
+ self.err_span_(start, suffix_start, "binary float literal is not supported")
+ }
+ _ => (),
+ }
+
+ let id = self.symbol_from_to(start, suffix_start);
+ return (token::Float, id);
+ }
+ };
+ let content_start = start + BytePos(prefix_len);
+ let content_end = suffix_start - BytePos(postfix_len);
+ let id = self.symbol_from_to(content_start, content_end);
+ self.validate_literal_escape(mode, content_start, content_end, prefix_len, postfix_len);
+ (lit_kind, id)
+ }
+
+ #[inline]
+ fn src_index(&self, pos: BytePos) -> usize {
+ (pos - self.start_pos).to_usize()
+ }
+
+ /// Slice of the source text from `start` up to but excluding `self.pos`,
+ /// meaning the slice does not include the character `self.ch`.
+ fn str_from(&self, start: BytePos) -> &str {
+ self.str_from_to(start, self.pos)
+ }
+
+ /// As symbol_from, with an explicit endpoint.
+ fn symbol_from_to(&self, start: BytePos, end: BytePos) -> Symbol {
+ debug!("taking an ident from {:?} to {:?}", start, end);
+ Symbol::intern(self.str_from_to(start, end))
+ }
+
+ /// Slice of the source text spanning from `start` up to but excluding `end`.
+ fn str_from_to(&self, start: BytePos, end: BytePos) -> &str {
+ &self.src[self.src_index(start)..self.src_index(end)]
+ }
+
+ fn report_raw_str_error(&self, start: BytePos, prefix_len: u32) -> ! {
+ match rustc_lexer::validate_raw_str(self.str_from(start), prefix_len) {
+ Err(RawStrError::InvalidStarter { bad_char }) => {
+ self.report_non_started_raw_string(start, bad_char)
+ }
+ Err(RawStrError::NoTerminator { expected, found, possible_terminator_offset }) => self
+ .report_unterminated_raw_string(start, expected, possible_terminator_offset, found),
+ Err(RawStrError::TooManyDelimiters { found }) => {
+ self.report_too_many_hashes(start, found)
+ }
+ Ok(()) => panic!("no error found for supposedly invalid raw string literal"),
+ }
+ }
+
+ fn report_non_started_raw_string(&self, start: BytePos, bad_char: char) -> ! {
+ self.struct_fatal_span_char(
+ start,
+ self.pos,
+ "found invalid character; only `#` is allowed in raw string delimitation",
+ bad_char,
+ )
+ .emit()
+ }
+
+ fn report_unterminated_raw_string(
+ &self,
+ start: BytePos,
+ n_hashes: u32,
+ possible_offset: Option<u32>,
+ found_terminators: u32,
+ ) -> ! {
+ let mut err = self.sess.span_diagnostic.struct_span_fatal_with_code(
+ self.mk_sp(start, start),
+ "unterminated raw string",
+ error_code!(E0748),
+ );
+
+ err.span_label(self.mk_sp(start, start), "unterminated raw string");
+
+ if n_hashes > 0 {
+ err.note(&format!(
+ "this raw string should be terminated with `\"{}`",
+ "#".repeat(n_hashes as usize)
+ ));
+ }
+
+ if let Some(possible_offset) = possible_offset {
+ let lo = start + BytePos(possible_offset as u32);
+ let hi = lo + BytePos(found_terminators as u32);
+ let span = self.mk_sp(lo, hi);
+ err.span_suggestion(
+ span,
+ "consider terminating the string here",
+ "#".repeat(n_hashes as usize),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ err.emit()
+ }
+
+ fn report_unterminated_block_comment(&self, start: BytePos, doc_style: Option<DocStyle>) {
+ let msg = match doc_style {
+ Some(_) => "unterminated block doc-comment",
+ None => "unterminated block comment",
+ };
+ let last_bpos = self.pos;
+ let mut err = self.sess.span_diagnostic.struct_span_fatal_with_code(
+ self.mk_sp(start, last_bpos),
+ msg,
+ error_code!(E0758),
+ );
+ let mut nested_block_comment_open_idxs = vec![];
+ let mut last_nested_block_comment_idxs = None;
+ let mut content_chars = self.str_from(start).char_indices().peekable();
+
+ while let Some((idx, current_char)) = content_chars.next() {
+ match content_chars.peek() {
+ Some((_, '*')) if current_char == '/' => {
+ nested_block_comment_open_idxs.push(idx);
+ }
+ Some((_, '/')) if current_char == '*' => {
+ last_nested_block_comment_idxs =
+ nested_block_comment_open_idxs.pop().map(|open_idx| (open_idx, idx));
+ }
+ _ => {}
+ };
+ }
+
+ if let Some((nested_open_idx, nested_close_idx)) = last_nested_block_comment_idxs {
+ err.span_label(self.mk_sp(start, start + BytePos(2)), msg)
+ .span_label(
+ self.mk_sp(
+ start + BytePos(nested_open_idx as u32),
+ start + BytePos(nested_open_idx as u32 + 2),
+ ),
+ "...as last nested comment starts here, maybe you want to close this instead?",
+ )
+ .span_label(
+ self.mk_sp(
+ start + BytePos(nested_close_idx as u32),
+ start + BytePos(nested_close_idx as u32 + 2),
+ ),
+ "...and last nested comment terminates here.",
+ );
+ }
+
+ err.emit();
+ }
+
+ // RFC 3101 introduced the idea of (reserved) prefixes. As of Rust 2021,
+ // using a (unknown) prefix is an error. In earlier editions, however, they
+ // only result in a (allowed by default) lint, and are treated as regular
+ // identifier tokens.
+ fn report_unknown_prefix(&self, start: BytePos) {
+ let prefix_span = self.mk_sp(start, self.pos);
+ let prefix_str = self.str_from_to(start, self.pos);
+ let msg = format!("prefix `{}` is unknown", prefix_str);
+
+ let expn_data = prefix_span.ctxt().outer_expn_data();
+
+ if expn_data.edition >= Edition::Edition2021 {
+ // In Rust 2021, this is a hard error.
+ let mut err = self.sess.span_diagnostic.struct_span_err(prefix_span, &msg);
+ err.span_label(prefix_span, "unknown prefix");
+ if prefix_str == "rb" {
+ err.span_suggestion_verbose(
+ prefix_span,
+ "use `br` for a raw byte string",
+ "br",
+ Applicability::MaybeIncorrect,
+ );
+ } else if expn_data.is_root() {
+ err.span_suggestion_verbose(
+ prefix_span.shrink_to_hi(),
+ "consider inserting whitespace here",
+ " ",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.note("prefixed identifiers and literals are reserved since Rust 2021");
+ err.emit();
+ } else {
+ // Before Rust 2021, only emit a lint for migration.
+ self.sess.buffer_lint_with_diagnostic(
+ &RUST_2021_PREFIXES_INCOMPATIBLE_SYNTAX,
+ prefix_span,
+ ast::CRATE_NODE_ID,
+ &msg,
+ BuiltinLintDiagnostics::ReservedPrefix(prefix_span),
+ );
+ }
+ }
+
+ fn report_too_many_hashes(&self, start: BytePos, found: u32) -> ! {
+ self.fatal_span_(
+ start,
+ self.pos,
+ &format!(
+ "too many `#` symbols: raw strings may be delimited \
+ by up to 255 `#` symbols, but found {}",
+ found
+ ),
+ )
+ }
+
+ fn validate_literal_escape(
+ &self,
+ mode: Mode,
+ content_start: BytePos,
+ content_end: BytePos,
+ prefix_len: u32,
+ postfix_len: u32,
+ ) {
+ let lit_content = self.str_from_to(content_start, content_end);
+ unescape::unescape_literal(lit_content, mode, &mut |range, result| {
+ // Here we only check for errors. The actual unescaping is done later.
+ if let Err(err) = result {
+ let span_with_quotes = self
+ .mk_sp(content_start - BytePos(prefix_len), content_end + BytePos(postfix_len));
+ let (start, end) = (range.start as u32, range.end as u32);
+ let lo = content_start + BytePos(start);
+ let hi = lo + BytePos(end - start);
+ let span = self.mk_sp(lo, hi);
+ emit_unescape_error(
+ &self.sess.span_diagnostic,
+ lit_content,
+ span_with_quotes,
+ span,
+ mode,
+ range,
+ err,
+ );
+ }
+ });
+ }
+
+ fn validate_int_literal(&self, base: Base, content_start: BytePos, content_end: BytePos) {
+ let base = match base {
+ Base::Binary => 2,
+ Base::Octal => 8,
+ _ => return,
+ };
+ let s = self.str_from_to(content_start + BytePos(2), content_end);
+ for (idx, c) in s.char_indices() {
+ let idx = idx as u32;
+ if c != '_' && c.to_digit(base).is_none() {
+ let lo = content_start + BytePos(2 + idx);
+ let hi = content_start + BytePos(2 + idx + c.len_utf8() as u32);
+ self.err_span_(lo, hi, &format!("invalid digit for a base {} literal", base));
+ }
+ }
+ }
+}
+
+pub fn nfc_normalize(string: &str) -> Symbol {
+ use unicode_normalization::{is_nfc_quick, IsNormalized, UnicodeNormalization};
+ match is_nfc_quick(string.chars()) {
+ IsNormalized::Yes => Symbol::intern(string),
+ _ => {
+ let normalized_str: String = string.chars().nfc().collect();
+ Symbol::intern(&normalized_str)
+ }
+ }
+}
diff --git a/compiler/rustc_parse/src/lexer/tokentrees.rs b/compiler/rustc_parse/src/lexer/tokentrees.rs
new file mode 100644
index 000000000..aa70912dc
--- /dev/null
+++ b/compiler/rustc_parse/src/lexer/tokentrees.rs
@@ -0,0 +1,296 @@
+use super::{StringReader, UnmatchedBrace};
+
+use rustc_ast::token::{self, Delimiter, Token};
+use rustc_ast::tokenstream::{DelimSpan, Spacing, TokenStream, TokenTree};
+use rustc_ast_pretty::pprust::token_to_string;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::PResult;
+use rustc_span::Span;
+
+impl<'a> StringReader<'a> {
+ pub(super) fn into_token_trees(self) -> (PResult<'a, TokenStream>, Vec<UnmatchedBrace>) {
+ let mut tt_reader = TokenTreesReader {
+ string_reader: self,
+ token: Token::dummy(),
+ open_braces: Vec::new(),
+ unmatched_braces: Vec::new(),
+ matching_delim_spans: Vec::new(),
+ last_unclosed_found_span: None,
+ last_delim_empty_block_spans: FxHashMap::default(),
+ matching_block_spans: Vec::new(),
+ };
+ let res = tt_reader.parse_all_token_trees();
+ (res, tt_reader.unmatched_braces)
+ }
+}
+
+struct TokenTreesReader<'a> {
+ string_reader: StringReader<'a>,
+ token: Token,
+ /// Stack of open delimiters and their spans. Used for error message.
+ open_braces: Vec<(Delimiter, Span)>,
+ unmatched_braces: Vec<UnmatchedBrace>,
+ /// The type and spans for all braces
+ ///
+ /// Used only for error recovery when arriving to EOF with mismatched braces.
+ matching_delim_spans: Vec<(Delimiter, Span, Span)>,
+ last_unclosed_found_span: Option<Span>,
+ /// Collect empty block spans that might have been auto-inserted by editors.
+ last_delim_empty_block_spans: FxHashMap<Delimiter, Span>,
+ /// Collect the spans of braces (Open, Close). Used only
+ /// for detecting if blocks are empty and only braces.
+ matching_block_spans: Vec<(Span, Span)>,
+}
+
+impl<'a> TokenTreesReader<'a> {
+ // Parse a stream of tokens into a list of `TokenTree`s, up to an `Eof`.
+ fn parse_all_token_trees(&mut self) -> PResult<'a, TokenStream> {
+ let mut buf = TokenStreamBuilder::default();
+
+ self.bump();
+ while self.token != token::Eof {
+ buf.push(self.parse_token_tree()?);
+ }
+
+ Ok(buf.into_token_stream())
+ }
+
+ // Parse a stream of tokens into a list of `TokenTree`s, up to a `CloseDelim`.
+ fn parse_token_trees_until_close_delim(&mut self) -> TokenStream {
+ let mut buf = TokenStreamBuilder::default();
+ loop {
+ if let token::CloseDelim(..) = self.token.kind {
+ return buf.into_token_stream();
+ }
+
+ match self.parse_token_tree() {
+ Ok(tree) => buf.push(tree),
+ Err(mut e) => {
+ e.emit();
+ return buf.into_token_stream();
+ }
+ }
+ }
+ }
+
+ fn parse_token_tree(&mut self) -> PResult<'a, TokenTree> {
+ let sm = self.string_reader.sess.source_map();
+
+ match self.token.kind {
+ token::Eof => {
+ let msg = "this file contains an unclosed delimiter";
+ let mut err =
+ self.string_reader.sess.span_diagnostic.struct_span_err(self.token.span, msg);
+ for &(_, sp) in &self.open_braces {
+ err.span_label(sp, "unclosed delimiter");
+ self.unmatched_braces.push(UnmatchedBrace {
+ expected_delim: Delimiter::Brace,
+ found_delim: None,
+ found_span: self.token.span,
+ unclosed_span: Some(sp),
+ candidate_span: None,
+ });
+ }
+
+ if let Some((delim, _)) = self.open_braces.last() {
+ if let Some((_, open_sp, close_sp)) =
+ self.matching_delim_spans.iter().find(|(d, open_sp, close_sp)| {
+ if let Some(close_padding) = sm.span_to_margin(*close_sp) {
+ if let Some(open_padding) = sm.span_to_margin(*open_sp) {
+ return delim == d && close_padding != open_padding;
+ }
+ }
+ false
+ })
+ // these are in reverse order as they get inserted on close, but
+ {
+ // we want the last open/first close
+ err.span_label(*open_sp, "this delimiter might not be properly closed...");
+ err.span_label(
+ *close_sp,
+ "...as it matches this but it has different indentation",
+ );
+ }
+ }
+ Err(err)
+ }
+ token::OpenDelim(delim) => {
+ // The span for beginning of the delimited section
+ let pre_span = self.token.span;
+
+ // Parse the open delimiter.
+ self.open_braces.push((delim, self.token.span));
+ self.bump();
+
+ // Parse the token trees within the delimiters.
+ // We stop at any delimiter so we can try to recover if the user
+ // uses an incorrect delimiter.
+ let tts = self.parse_token_trees_until_close_delim();
+
+ // Expand to cover the entire delimited token tree
+ let delim_span = DelimSpan::from_pair(pre_span, self.token.span);
+
+ match self.token.kind {
+ // Correct delimiter.
+ token::CloseDelim(d) if d == delim => {
+ let (open_brace, open_brace_span) = self.open_braces.pop().unwrap();
+ let close_brace_span = self.token.span;
+
+ if tts.is_empty() {
+ let empty_block_span = open_brace_span.to(close_brace_span);
+ if !sm.is_multiline(empty_block_span) {
+ // Only track if the block is in the form of `{}`, otherwise it is
+ // likely that it was written on purpose.
+ self.last_delim_empty_block_spans.insert(delim, empty_block_span);
+ }
+ }
+
+ //only add braces
+ if let (Delimiter::Brace, Delimiter::Brace) = (open_brace, delim) {
+ self.matching_block_spans.push((open_brace_span, close_brace_span));
+ }
+
+ if self.open_braces.is_empty() {
+ // Clear up these spans to avoid suggesting them as we've found
+ // properly matched delimiters so far for an entire block.
+ self.matching_delim_spans.clear();
+ } else {
+ self.matching_delim_spans.push((
+ open_brace,
+ open_brace_span,
+ close_brace_span,
+ ));
+ }
+ // Parse the closing delimiter.
+ self.bump();
+ }
+ // Incorrect delimiter.
+ token::CloseDelim(other) => {
+ let mut unclosed_delimiter = None;
+ let mut candidate = None;
+
+ if self.last_unclosed_found_span != Some(self.token.span) {
+ // do not complain about the same unclosed delimiter multiple times
+ self.last_unclosed_found_span = Some(self.token.span);
+ // This is a conservative error: only report the last unclosed
+ // delimiter. The previous unclosed delimiters could actually be
+ // closed! The parser just hasn't gotten to them yet.
+ if let Some(&(_, sp)) = self.open_braces.last() {
+ unclosed_delimiter = Some(sp);
+ };
+ if let Some(current_padding) = sm.span_to_margin(self.token.span) {
+ for (brace, brace_span) in &self.open_braces {
+ if let Some(padding) = sm.span_to_margin(*brace_span) {
+ // high likelihood of these two corresponding
+ if current_padding == padding && brace == &other {
+ candidate = Some(*brace_span);
+ }
+ }
+ }
+ }
+ let (tok, _) = self.open_braces.pop().unwrap();
+ self.unmatched_braces.push(UnmatchedBrace {
+ expected_delim: tok,
+ found_delim: Some(other),
+ found_span: self.token.span,
+ unclosed_span: unclosed_delimiter,
+ candidate_span: candidate,
+ });
+ } else {
+ self.open_braces.pop();
+ }
+
+ // If the incorrect delimiter matches an earlier opening
+ // delimiter, then don't consume it (it can be used to
+ // close the earlier one). Otherwise, consume it.
+ // E.g., we try to recover from:
+ // fn foo() {
+ // bar(baz(
+ // } // Incorrect delimiter but matches the earlier `{`
+ if !self.open_braces.iter().any(|&(b, _)| b == other) {
+ self.bump();
+ }
+ }
+ token::Eof => {
+ // Silently recover, the EOF token will be seen again
+ // and an error emitted then. Thus we don't pop from
+ // self.open_braces here.
+ }
+ _ => {}
+ }
+
+ Ok(TokenTree::Delimited(delim_span, delim, tts))
+ }
+ token::CloseDelim(delim) => {
+ // An unexpected closing delimiter (i.e., there is no
+ // matching opening delimiter).
+ let token_str = token_to_string(&self.token);
+ let msg = format!("unexpected closing delimiter: `{}`", token_str);
+ let mut err =
+ self.string_reader.sess.span_diagnostic.struct_span_err(self.token.span, &msg);
+
+ // Braces are added at the end, so the last element is the biggest block
+ if let Some(parent) = self.matching_block_spans.last() {
+ if let Some(span) = self.last_delim_empty_block_spans.remove(&delim) {
+ // Check if the (empty block) is in the last properly closed block
+ if (parent.0.to(parent.1)).contains(span) {
+ err.span_label(
+ span,
+ "block is empty, you might have not meant to close it",
+ );
+ } else {
+ err.span_label(parent.0, "this opening brace...");
+
+ err.span_label(parent.1, "...matches this closing brace");
+ }
+ } else {
+ err.span_label(parent.0, "this opening brace...");
+
+ err.span_label(parent.1, "...matches this closing brace");
+ }
+ }
+
+ err.span_label(self.token.span, "unexpected closing delimiter");
+ Err(err)
+ }
+ _ => {
+ let tok = self.token.take();
+ let mut spacing = self.bump();
+ if !self.token.is_op() {
+ spacing = Spacing::Alone;
+ }
+ Ok(TokenTree::Token(tok, spacing))
+ }
+ }
+ }
+
+ fn bump(&mut self) -> Spacing {
+ let (spacing, token) = self.string_reader.next_token();
+ self.token = token;
+ spacing
+ }
+}
+
+#[derive(Default)]
+struct TokenStreamBuilder {
+ buf: Vec<TokenTree>,
+}
+
+impl TokenStreamBuilder {
+ #[inline(always)]
+ fn push(&mut self, tree: TokenTree) {
+ if let Some(TokenTree::Token(prev_token, Spacing::Joint)) = self.buf.last()
+ && let TokenTree::Token(token, joint) = &tree
+ && let Some(glued) = prev_token.glue(token)
+ {
+ self.buf.pop();
+ self.buf.push(TokenTree::Token(glued, *joint));
+ } else {
+ self.buf.push(tree)
+ }
+ }
+
+ fn into_token_stream(self) -> TokenStream {
+ TokenStream::new(self.buf)
+ }
+}
diff --git a/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
new file mode 100644
index 000000000..273827864
--- /dev/null
+++ b/compiler/rustc_parse/src/lexer/unescape_error_reporting.rs
@@ -0,0 +1,381 @@
+//! Utilities for rendering escape sequence errors as diagnostics.
+
+use std::iter::once;
+use std::ops::Range;
+
+use rustc_errors::{pluralize, Applicability, Handler};
+use rustc_lexer::unescape::{EscapeError, Mode};
+use rustc_span::{BytePos, Span};
+
+pub(crate) fn emit_unescape_error(
+ handler: &Handler,
+ // interior part of the literal, without quotes
+ lit: &str,
+ // full span of the literal, including quotes
+ span_with_quotes: Span,
+ // interior span of the literal, without quotes
+ span: Span,
+ mode: Mode,
+ // range of the error inside `lit`
+ range: Range<usize>,
+ error: EscapeError,
+) {
+ tracing::debug!(
+ "emit_unescape_error: {:?}, {:?}, {:?}, {:?}, {:?}",
+ lit,
+ span_with_quotes,
+ mode,
+ range,
+ error
+ );
+ let last_char = || {
+ let c = lit[range.clone()].chars().rev().next().unwrap();
+ let span = span.with_lo(span.hi() - BytePos(c.len_utf8() as u32));
+ (c, span)
+ };
+ match error {
+ EscapeError::LoneSurrogateUnicodeEscape => {
+ handler
+ .struct_span_err(span, "invalid unicode character escape")
+ .span_label(span, "invalid escape")
+ .help("unicode escape must not be a surrogate")
+ .emit();
+ }
+ EscapeError::OutOfRangeUnicodeEscape => {
+ handler
+ .struct_span_err(span, "invalid unicode character escape")
+ .span_label(span, "invalid escape")
+ .help("unicode escape must be at most 10FFFF")
+ .emit();
+ }
+ EscapeError::MoreThanOneChar => {
+ use unicode_normalization::{char::is_combining_mark, UnicodeNormalization};
+
+ let mut has_help = false;
+ let mut handler = handler.struct_span_err(
+ span_with_quotes,
+ "character literal may only contain one codepoint",
+ );
+
+ if lit.chars().skip(1).all(|c| is_combining_mark(c)) {
+ let escaped_marks =
+ lit.chars().skip(1).map(|c| c.escape_default().to_string()).collect::<Vec<_>>();
+ handler.span_note(
+ span,
+ &format!(
+ "this `{}` is followed by the combining mark{} `{}`",
+ lit.chars().next().unwrap(),
+ pluralize!(escaped_marks.len()),
+ escaped_marks.join(""),
+ ),
+ );
+ let normalized = lit.nfc().to_string();
+ if normalized.chars().count() == 1 {
+ has_help = true;
+ handler.span_suggestion(
+ span,
+ &format!(
+ "consider using the normalized form `{}` of this character",
+ normalized.chars().next().unwrap().escape_default()
+ ),
+ normalized,
+ Applicability::MachineApplicable,
+ );
+ }
+ } else {
+ let printable: Vec<char> = lit
+ .chars()
+ .filter(|&x| {
+ unicode_width::UnicodeWidthChar::width(x).unwrap_or(0) != 0
+ && !x.is_whitespace()
+ })
+ .collect();
+
+ if let [ch] = printable.as_slice() {
+ has_help = true;
+
+ handler.span_note(
+ span,
+ &format!(
+ "there are non-printing characters, the full sequence is `{}`",
+ lit.escape_default(),
+ ),
+ );
+
+ handler.span_suggestion(
+ span,
+ "consider removing the non-printing characters",
+ ch,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ if !has_help {
+ let (prefix, msg) = if mode.is_bytes() {
+ ("b", "if you meant to write a byte string literal, use double quotes")
+ } else {
+ ("", "if you meant to write a `str` literal, use double quotes")
+ };
+
+ handler.span_suggestion(
+ span_with_quotes,
+ msg,
+ format!("{}\"{}\"", prefix, lit),
+ Applicability::MachineApplicable,
+ );
+ }
+
+ handler.emit();
+ }
+ EscapeError::EscapeOnlyChar => {
+ let (c, char_span) = last_char();
+
+ let msg = if mode.is_bytes() {
+ "byte constant must be escaped"
+ } else {
+ "character constant must be escaped"
+ };
+ handler
+ .struct_span_err(span, &format!("{}: `{}`", msg, escaped_char(c)))
+ .span_suggestion(
+ char_span,
+ "escape the character",
+ c.escape_default(),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ EscapeError::BareCarriageReturn => {
+ let msg = if mode.in_double_quotes() {
+ "bare CR not allowed in string, use `\\r` instead"
+ } else {
+ "character constant must be escaped: `\\r`"
+ };
+ handler
+ .struct_span_err(span, msg)
+ .span_suggestion(
+ span,
+ "escape the character",
+ "\\r",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ EscapeError::BareCarriageReturnInRawString => {
+ assert!(mode.in_double_quotes());
+ let msg = "bare CR not allowed in raw string";
+ handler.span_err(span, msg);
+ }
+ EscapeError::InvalidEscape => {
+ let (c, span) = last_char();
+
+ let label =
+ if mode.is_bytes() { "unknown byte escape" } else { "unknown character escape" };
+ let ec = escaped_char(c);
+ let mut diag = handler.struct_span_err(span, &format!("{}: `{}`", label, ec));
+ diag.span_label(span, label);
+ if c == '{' || c == '}' && !mode.is_bytes() {
+ diag.help(
+ "if used in a formatting string, curly braces are escaped with `{{` and `}}`",
+ );
+ } else if c == '\r' {
+ diag.help(
+ "this is an isolated carriage return; consider checking your editor and \
+ version control settings",
+ );
+ } else {
+ if !mode.is_bytes() {
+ diag.span_suggestion(
+ span_with_quotes,
+ "if you meant to write a literal backslash (perhaps escaping in a regular expression), consider a raw string literal",
+ format!("r\"{}\"", lit),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ diag.help(
+ "for more information, visit \
+ <https://static.rust-lang.org/doc/master/reference.html#literals>",
+ );
+ }
+ diag.emit();
+ }
+ EscapeError::TooShortHexEscape => {
+ handler.span_err(span, "numeric character escape is too short");
+ }
+ EscapeError::InvalidCharInHexEscape | EscapeError::InvalidCharInUnicodeEscape => {
+ let (c, span) = last_char();
+
+ let msg = if error == EscapeError::InvalidCharInHexEscape {
+ "invalid character in numeric character escape"
+ } else {
+ "invalid character in unicode escape"
+ };
+ let c = escaped_char(c);
+
+ handler
+ .struct_span_err(span, &format!("{}: `{}`", msg, c))
+ .span_label(span, msg)
+ .emit();
+ }
+ EscapeError::NonAsciiCharInByte => {
+ assert!(mode.is_bytes());
+ let (c, span) = last_char();
+ let mut err = handler.struct_span_err(span, "non-ASCII character in byte constant");
+ let postfix = if unicode_width::UnicodeWidthChar::width(c).unwrap_or(1) == 0 {
+ format!(" but is {:?}", c)
+ } else {
+ String::new()
+ };
+ err.span_label(span, &format!("byte constant must be ASCII{}", postfix));
+ if (c as u32) <= 0xFF {
+ err.span_suggestion(
+ span,
+ &format!(
+ "if you meant to use the unicode code point for {:?}, use a \\xHH escape",
+ c
+ ),
+ format!("\\x{:X}", c as u32),
+ Applicability::MaybeIncorrect,
+ );
+ } else if matches!(mode, Mode::Byte) {
+ err.span_label(span, "this multibyte character does not fit into a single byte");
+ } else if matches!(mode, Mode::ByteStr) {
+ let mut utf8 = String::new();
+ utf8.push(c);
+ err.span_suggestion(
+ span,
+ &format!(
+ "if you meant to use the UTF-8 encoding of {:?}, use \\xHH escapes",
+ c
+ ),
+ utf8.as_bytes()
+ .iter()
+ .map(|b: &u8| format!("\\x{:X}", *b))
+ .fold("".to_string(), |a, c| a + &c),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+ EscapeError::NonAsciiCharInByteString => {
+ assert!(mode.is_bytes());
+ let (c, span) = last_char();
+ let postfix = if unicode_width::UnicodeWidthChar::width(c).unwrap_or(1) == 0 {
+ format!(" but is {:?}", c)
+ } else {
+ String::new()
+ };
+ handler
+ .struct_span_err(span, "raw byte string must be ASCII")
+ .span_label(span, &format!("must be ASCII{}", postfix))
+ .emit();
+ }
+ EscapeError::OutOfRangeHexEscape => {
+ handler
+ .struct_span_err(span, "out of range hex escape")
+ .span_label(span, "must be a character in the range [\\x00-\\x7f]")
+ .emit();
+ }
+ EscapeError::LeadingUnderscoreUnicodeEscape => {
+ let (c, span) = last_char();
+ let msg = "invalid start of unicode escape";
+ handler
+ .struct_span_err(span, &format!("{}: `{}`", msg, c))
+ .span_label(span, msg)
+ .emit();
+ }
+ EscapeError::OverlongUnicodeEscape => {
+ handler
+ .struct_span_err(span, "overlong unicode escape")
+ .span_label(span, "must have at most 6 hex digits")
+ .emit();
+ }
+ EscapeError::UnclosedUnicodeEscape => {
+ handler
+ .struct_span_err(span, "unterminated unicode escape")
+ .span_label(span, "missing a closing `}`")
+ .span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "terminate the unicode escape",
+ "}",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ EscapeError::NoBraceInUnicodeEscape => {
+ let msg = "incorrect unicode escape sequence";
+ let mut diag = handler.struct_span_err(span, msg);
+
+ let mut suggestion = "\\u{".to_owned();
+ let mut suggestion_len = 0;
+ let (c, char_span) = last_char();
+ let chars = once(c).chain(lit[range.end..].chars());
+ for c in chars.take(6).take_while(|c| c.is_digit(16)) {
+ suggestion.push(c);
+ suggestion_len += c.len_utf8();
+ }
+
+ if suggestion_len > 0 {
+ suggestion.push('}');
+ let hi = char_span.lo() + BytePos(suggestion_len as u32);
+ diag.span_suggestion(
+ span.with_hi(hi),
+ "format of unicode escape sequences uses braces",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ diag.span_label(span, msg);
+ diag.help("format of unicode escape sequences is `\\u{...}`");
+ }
+
+ diag.emit();
+ }
+ EscapeError::UnicodeEscapeInByte => {
+ let msg = "unicode escape in byte string";
+ handler
+ .struct_span_err(span, msg)
+ .span_label(span, msg)
+ .help("unicode escape sequences cannot be used as a byte or in a byte string")
+ .emit();
+ }
+ EscapeError::EmptyUnicodeEscape => {
+ handler
+ .struct_span_err(span, "empty unicode escape")
+ .span_label(span, "this escape must have at least 1 hex digit")
+ .emit();
+ }
+ EscapeError::ZeroChars => {
+ let msg = "empty character literal";
+ handler.struct_span_err(span, msg).span_label(span, msg).emit();
+ }
+ EscapeError::LoneSlash => {
+ let msg = "invalid trailing slash in literal";
+ handler.struct_span_err(span, msg).span_label(span, msg).emit();
+ }
+ EscapeError::UnskippedWhitespaceWarning => {
+ let (c, char_span) = last_char();
+ let msg =
+ format!("non-ASCII whitespace symbol '{}' is not skipped", c.escape_unicode());
+ handler.struct_span_warn(span, &msg).span_label(char_span, &msg).emit();
+ }
+ EscapeError::MultipleSkippedLinesWarning => {
+ let msg = "multiple lines skipped by escaped newline";
+ let bottom_msg = "skipping everything up to and including this point";
+ handler.struct_span_warn(span, msg).span_label(span, bottom_msg).emit();
+ }
+ }
+}
+
+/// Pushes a character to a message string for error reporting
+pub(crate) fn escaped_char(c: char) -> String {
+ match c {
+ '\u{20}'..='\u{7e}' => {
+ // Don't escape \, ' or " for user-facing messages
+ c.to_string()
+ }
+ _ => c.escape_default().to_string(),
+ }
+}
diff --git a/compiler/rustc_parse/src/lexer/unicode_chars.rs b/compiler/rustc_parse/src/lexer/unicode_chars.rs
new file mode 100644
index 000000000..2c68cc589
--- /dev/null
+++ b/compiler/rustc_parse/src/lexer/unicode_chars.rs
@@ -0,0 +1,386 @@
+// Characters and their corresponding confusables were collected from
+// https://www.unicode.org/Public/security/10.0.0/confusables.txt
+
+use super::StringReader;
+use crate::token::{self, Delimiter};
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_span::{symbol::kw, BytePos, Pos, Span};
+
+#[rustfmt::skip] // for line breaks
+pub(crate) const UNICODE_ARRAY: &[(char, &str, char)] = &[
+ ('
', "Line Separator", ' '),
+ ('
', "Paragraph Separator", ' '),
+ (' ', "Ogham Space mark", ' '),
+ (' ', "En Quad", ' '),
+ (' ', "Em Quad", ' '),
+ (' ', "En Space", ' '),
+ (' ', "Em Space", ' '),
+ (' ', "Three-Per-Em Space", ' '),
+ (' ', "Four-Per-Em Space", ' '),
+ (' ', "Six-Per-Em Space", ' '),
+ (' ', "Punctuation Space", ' '),
+ (' ', "Thin Space", ' '),
+ (' ', "Hair Space", ' '),
+ (' ', "Medium Mathematical Space", ' '),
+ (' ', "No-Break Space", ' '),
+ (' ', "Figure Space", ' '),
+ (' ', "Narrow No-Break Space", ' '),
+ (' ', "Ideographic Space", ' '),
+
+ ('ߺ', "Nko Lajanyalan", '_'),
+ ('﹍', "Dashed Low Line", '_'),
+ ('﹎', "Centreline Low Line", '_'),
+ ('﹏', "Wavy Low Line", '_'),
+ ('_', "Fullwidth Low Line", '_'),
+
+ ('‐', "Hyphen", '-'),
+ ('‑', "Non-Breaking Hyphen", '-'),
+ ('‒', "Figure Dash", '-'),
+ ('–', "En Dash", '-'),
+ ('—', "Em Dash", '-'),
+ ('﹘', "Small Em Dash", '-'),
+ ('۔', "Arabic Full Stop", '-'),
+ ('⁃', "Hyphen Bullet", '-'),
+ ('˗', "Modifier Letter Minus Sign", '-'),
+ ('−', "Minus Sign", '-'),
+ ('➖', "Heavy Minus Sign", '-'),
+ ('Ⲻ', "Coptic Letter Dialect-P Ni", '-'),
+ ('ー', "Katakana-Hiragana Prolonged Sound Mark", '-'),
+ ('-', "Fullwidth Hyphen-Minus", '-'),
+ ('―', "Horizontal Bar", '-'),
+ ('─', "Box Drawings Light Horizontal", '-'),
+ ('━', "Box Drawings Heavy Horizontal", '-'),
+ ('㇐', "CJK Stroke H", '-'),
+ ('ꟷ', "Latin Epigraphic Letter Sideways I", '-'),
+ ('ᅳ', "Hangul Jungseong Eu", '-'),
+ ('ㅡ', "Hangul Letter Eu", '-'),
+ ('一', "CJK Unified Ideograph-4E00", '-'),
+ ('⼀', "Kangxi Radical One", '-'),
+
+ ('؍', "Arabic Date Separator", ','),
+ ('٫', "Arabic Decimal Separator", ','),
+ ('‚', "Single Low-9 Quotation Mark", ','),
+ ('¸', "Cedilla", ','),
+ ('ꓹ', "Lisu Letter Tone Na Po", ','),
+ (',', "Fullwidth Comma", ','),
+
+ (';', "Greek Question Mark", ';'),
+ (';', "Fullwidth Semicolon", ';'),
+ ('︔', "Presentation Form For Vertical Semicolon", ';'),
+
+ ('ः', "Devanagari Sign Visarga", ':'),
+ ('ઃ', "Gujarati Sign Visarga", ':'),
+ (':', "Fullwidth Colon", ':'),
+ ('։', "Armenian Full Stop", ':'),
+ ('܃', "Syriac Supralinear Colon", ':'),
+ ('܄', "Syriac Sublinear Colon", ':'),
+ ('᛬', "Runic Multiple Punctuation", ':'),
+ ('︰', "Presentation Form For Vertical Two Dot Leader", ':'),
+ ('᠃', "Mongolian Full Stop", ':'),
+ ('᠉', "Mongolian Manchu Full Stop", ':'),
+ ('⁚', "Two Dot Punctuation", ':'),
+ ('׃', "Hebrew Punctuation Sof Pasuq", ':'),
+ ('˸', "Modifier Letter Raised Colon", ':'),
+ ('꞉', "Modifier Letter Colon", ':'),
+ ('∶', "Ratio", ':'),
+ ('ː', "Modifier Letter Triangular Colon", ':'),
+ ('ꓽ', "Lisu Letter Tone Mya Jeu", ':'),
+ ('︓', "Presentation Form For Vertical Colon", ':'),
+
+ ('!', "Fullwidth Exclamation Mark", '!'),
+ ('ǃ', "Latin Letter Retroflex Click", '!'),
+ ('ⵑ', "Tifinagh Letter Tuareg Yang", '!'),
+ ('︕', "Presentation Form For Vertical Exclamation Mark", '!'),
+
+ ('ʔ', "Latin Letter Glottal Stop", '?'),
+ ('Ɂ', "Latin Capital Letter Glottal Stop", '?'),
+ ('ॽ', "Devanagari Letter Glottal Stop", '?'),
+ ('Ꭾ', "Cherokee Letter He", '?'),
+ ('ꛫ', "Bamum Letter Ntuu", '?'),
+ ('?', "Fullwidth Question Mark", '?'),
+ ('︖', "Presentation Form For Vertical Question Mark", '?'),
+
+ ('𝅭', "Musical Symbol Combining Augmentation Dot", '.'),
+ ('․', "One Dot Leader", '.'),
+ ('܁', "Syriac Supralinear Full Stop", '.'),
+ ('܂', "Syriac Sublinear Full Stop", '.'),
+ ('꘎', "Vai Full Stop", '.'),
+ ('𐩐', "Kharoshthi Punctuation Dot", '.'),
+ ('٠', "Arabic-Indic Digit Zero", '.'),
+ ('۰', "Extended Arabic-Indic Digit Zero", '.'),
+ ('ꓸ', "Lisu Letter Tone Mya Ti", '.'),
+ ('·', "Middle Dot", '.'),
+ ('・', "Katakana Middle Dot", '.'),
+ ('・', "Halfwidth Katakana Middle Dot", '.'),
+ ('᛫', "Runic Single Punctuation", '.'),
+ ('·', "Greek Ano Teleia", '.'),
+ ('⸱', "Word Separator Middle Dot", '.'),
+ ('𐄁', "Aegean Word Separator Dot", '.'),
+ ('•', "Bullet", '.'),
+ ('‧', "Hyphenation Point", '.'),
+ ('∙', "Bullet Operator", '.'),
+ ('⋅', "Dot Operator", '.'),
+ ('ꞏ', "Latin Letter Sinological Dot", '.'),
+ ('ᐧ', "Canadian Syllabics Final Middle Dot", '.'),
+ ('ᐧ', "Canadian Syllabics Final Middle Dot", '.'),
+ ('.', "Fullwidth Full Stop", '.'),
+ ('。', "Ideographic Full Stop", '.'),
+ ('︒', "Presentation Form For Vertical Ideographic Full Stop", '.'),
+
+ ('՝', "Armenian Comma", '\''),
+ (''', "Fullwidth Apostrophe", '\''),
+ ('‘', "Left Single Quotation Mark", '\''),
+ ('’', "Right Single Quotation Mark", '\''),
+ ('‛', "Single High-Reversed-9 Quotation Mark", '\''),
+ ('′', "Prime", '\''),
+ ('‵', "Reversed Prime", '\''),
+ ('՚', "Armenian Apostrophe", '\''),
+ ('׳', "Hebrew Punctuation Geresh", '\''),
+ ('`', "Grave Accent", '\''),
+ ('`', "Greek Varia", '\''),
+ ('`', "Fullwidth Grave Accent", '\''),
+ ('´', "Acute Accent", '\''),
+ ('΄', "Greek Tonos", '\''),
+ ('´', "Greek Oxia", '\''),
+ ('᾽', "Greek Koronis", '\''),
+ ('᾿', "Greek Psili", '\''),
+ ('῾', "Greek Dasia", '\''),
+ ('ʹ', "Modifier Letter Prime", '\''),
+ ('ʹ', "Greek Numeral Sign", '\''),
+ ('ˈ', "Modifier Letter Vertical Line", '\''),
+ ('ˊ', "Modifier Letter Acute Accent", '\''),
+ ('ˋ', "Modifier Letter Grave Accent", '\''),
+ ('˴', "Modifier Letter Middle Grave Accent", '\''),
+ ('ʻ', "Modifier Letter Turned Comma", '\''),
+ ('ʽ', "Modifier Letter Reversed Comma", '\''),
+ ('ʼ', "Modifier Letter Apostrophe", '\''),
+ ('ʾ', "Modifier Letter Right Half Ring", '\''),
+ ('ꞌ', "Latin Small Letter Saltillo", '\''),
+ ('י', "Hebrew Letter Yod", '\''),
+ ('ߴ', "Nko High Tone Apostrophe", '\''),
+ ('ߵ', "Nko Low Tone Apostrophe", '\''),
+ ('ᑊ', "Canadian Syllabics West-Cree P", '\''),
+ ('ᛌ', "Runic Letter Short-Twig-Sol S", '\''),
+ ('𖽑', "Miao Sign Aspiration", '\''),
+ ('𖽒', "Miao Sign Reformed Voicing", '\''),
+
+ ('᳓', "Vedic Sign Nihshvasa", '"'),
+ ('"', "Fullwidth Quotation Mark", '"'),
+ ('“', "Left Double Quotation Mark", '"'),
+ ('”', "Right Double Quotation Mark", '"'),
+ ('‟', "Double High-Reversed-9 Quotation Mark", '"'),
+ ('″', "Double Prime", '"'),
+ ('‶', "Reversed Double Prime", '"'),
+ ('〃', "Ditto Mark", '"'),
+ ('״', "Hebrew Punctuation Gershayim", '"'),
+ ('˝', "Double Acute Accent", '"'),
+ ('ʺ', "Modifier Letter Double Prime", '"'),
+ ('˶', "Modifier Letter Middle Double Acute Accent", '"'),
+ ('˵', "Modifier Letter Middle Double Grave Accent", '"'),
+ ('ˮ', "Modifier Letter Double Apostrophe", '"'),
+ ('ײ', "Hebrew Ligature Yiddish Double Yod", '"'),
+ ('❞', "Heavy Double Comma Quotation Mark Ornament", '"'),
+ ('❝', "Heavy Double Turned Comma Quotation Mark Ornament", '"'),
+
+ ('(', "Fullwidth Left Parenthesis", '('),
+ ('❨', "Medium Left Parenthesis Ornament", '('),
+ ('﴾', "Ornate Left Parenthesis", '('),
+
+ (')', "Fullwidth Right Parenthesis", ')'),
+ ('❩', "Medium Right Parenthesis Ornament", ')'),
+ ('﴿', "Ornate Right Parenthesis", ')'),
+
+ ('[', "Fullwidth Left Square Bracket", '['),
+ ('❲', "Light Left Tortoise Shell Bracket Ornament", '['),
+ ('「', "Left Corner Bracket", '['),
+ ('『', "Left White Corner Bracket", '['),
+ ('【', "Left Black Lenticular Bracket", '['),
+ ('〔', "Left Tortoise Shell Bracket", '['),
+ ('〖', "Left White Lenticular Bracket", '['),
+ ('〘', "Left White Tortoise Shell Bracket", '['),
+ ('〚', "Left White Square Bracket", '['),
+
+ (']', "Fullwidth Right Square Bracket", ']'),
+ ('❳', "Light Right Tortoise Shell Bracket Ornament", ']'),
+ ('」', "Right Corner Bracket", ']'),
+ ('』', "Right White Corner Bracket", ']'),
+ ('】', "Right Black Lenticular Bracket", ']'),
+ ('〕', "Right Tortoise Shell Bracket", ']'),
+ ('〗', "Right White Lenticular Bracket", ']'),
+ ('〙', "Right White Tortoise Shell Bracket", ']'),
+ ('〛', "Right White Square Bracket", ']'),
+
+ ('❴', "Medium Left Curly Bracket Ornament", '{'),
+ ('𝄔', "Musical Symbol Brace", '{'),
+ ('{', "Fullwidth Left Curly Bracket", '{'),
+
+ ('❵', "Medium Right Curly Bracket Ornament", '}'),
+ ('}', "Fullwidth Right Curly Bracket", '}'),
+
+ ('⁎', "Low Asterisk", '*'),
+ ('٭', "Arabic Five Pointed Star", '*'),
+ ('∗', "Asterisk Operator", '*'),
+ ('𐌟', "Old Italic Letter Ess", '*'),
+ ('*', "Fullwidth Asterisk", '*'),
+
+ ('᜵', "Philippine Single Punctuation", '/'),
+ ('⁁', "Caret Insertion Point", '/'),
+ ('∕', "Division Slash", '/'),
+ ('⁄', "Fraction Slash", '/'),
+ ('╱', "Box Drawings Light Diagonal Upper Right To Lower Left", '/'),
+ ('⟋', "Mathematical Rising Diagonal", '/'),
+ ('⧸', "Big Solidus", '/'),
+ ('𝈺', "Greek Instrumental Notation Symbol-47", '/'),
+ ('㇓', "CJK Stroke Sp", '/'),
+ ('〳', "Vertical Kana Repeat Mark Upper Half", '/'),
+ ('Ⳇ', "Coptic Capital Letter Old Coptic Esh", '/'),
+ ('ノ', "Katakana Letter No", '/'),
+ ('丿', "CJK Unified Ideograph-4E3F", '/'),
+ ('⼃', "Kangxi Radical Slash", '/'),
+ ('/', "Fullwidth Solidus", '/'),
+
+ ('\', "Fullwidth Reverse Solidus", '\\'),
+ ('﹨', "Small Reverse Solidus", '\\'),
+ ('∖', "Set Minus", '\\'),
+ ('⟍', "Mathematical Falling Diagonal", '\\'),
+ ('⧵', "Reverse Solidus Operator", '\\'),
+ ('⧹', "Big Reverse Solidus", '\\'),
+ ('⧹', "Greek Vocal Notation Symbol-16", '\\'),
+ ('⧹', "Greek Instrumental Symbol-48", '\\'),
+ ('㇔', "CJK Stroke D", '\\'),
+ ('丶', "CJK Unified Ideograph-4E36", '\\'),
+ ('⼂', "Kangxi Radical Dot", '\\'),
+ ('、', "Ideographic Comma", '\\'),
+ ('ヽ', "Katakana Iteration Mark", '\\'),
+
+ ('ꝸ', "Latin Small Letter Um", '&'),
+ ('&', "Fullwidth Ampersand", '&'),
+
+ ('᛭', "Runic Cross Punctuation", '+'),
+ ('➕', "Heavy Plus Sign", '+'),
+ ('𐊛', "Lycian Letter H", '+'),
+ ('﬩', "Hebrew Letter Alternative Plus Sign", '+'),
+ ('+', "Fullwidth Plus Sign", '+'),
+
+ ('‹', "Single Left-Pointing Angle Quotation Mark", '<'),
+ ('❮', "Heavy Left-Pointing Angle Quotation Mark Ornament", '<'),
+ ('˂', "Modifier Letter Left Arrowhead", '<'),
+ ('𝈶', "Greek Instrumental Symbol-40", '<'),
+ ('ᐸ', "Canadian Syllabics Pa", '<'),
+ ('ᚲ', "Runic Letter Kauna", '<'),
+ ('❬', "Medium Left-Pointing Angle Bracket Ornament", '<'),
+ ('⟨', "Mathematical Left Angle Bracket", '<'),
+ ('〈', "Left-Pointing Angle Bracket", '<'),
+ ('〈', "Left Angle Bracket", '<'),
+ ('㇛', "CJK Stroke Pd", '<'),
+ ('く', "Hiragana Letter Ku", '<'),
+ ('𡿨', "CJK Unified Ideograph-21FE8", '<'),
+ ('《', "Left Double Angle Bracket", '<'),
+ ('<', "Fullwidth Less-Than Sign", '<'),
+
+ ('᐀', "Canadian Syllabics Hyphen", '='),
+ ('⹀', "Double Hyphen", '='),
+ ('゠', "Katakana-Hiragana Double Hyphen", '='),
+ ('꓿', "Lisu Punctuation Full Stop", '='),
+ ('=', "Fullwidth Equals Sign", '='),
+
+ ('›', "Single Right-Pointing Angle Quotation Mark", '>'),
+ ('❯', "Heavy Right-Pointing Angle Quotation Mark Ornament", '>'),
+ ('˃', "Modifier Letter Right Arrowhead", '>'),
+ ('𝈷', "Greek Instrumental Symbol-42", '>'),
+ ('ᐳ', "Canadian Syllabics Po", '>'),
+ ('𖼿', "Miao Letter Archaic Zza", '>'),
+ ('❭', "Medium Right-Pointing Angle Bracket Ornament", '>'),
+ ('⟩', "Mathematical Right Angle Bracket", '>'),
+ ('〉', "Right-Pointing Angle Bracket", '>'),
+ ('〉', "Right Angle Bracket", '>'),
+ ('》', "Right Double Angle Bracket", '>'),
+ ('>', "Fullwidth Greater-Than Sign", '>'),
+];
+
+// FIXME: the lexer could be used to turn the ASCII version of unicode homoglyphs, instead of
+// keeping the substitution token in this table. Ideally, this should be inside `rustc_lexer`.
+// However, we should first remove compound tokens like `<<` from `rustc_lexer`, and then add
+// fancier error recovery to it, as there will be less overall work to do this way.
+const ASCII_ARRAY: &[(char, &str, Option<token::TokenKind>)] = &[
+ (' ', "Space", None),
+ ('_', "Underscore", Some(token::Ident(kw::Underscore, false))),
+ ('-', "Minus/Hyphen", Some(token::BinOp(token::Minus))),
+ (',', "Comma", Some(token::Comma)),
+ (';', "Semicolon", Some(token::Semi)),
+ (':', "Colon", Some(token::Colon)),
+ ('!', "Exclamation Mark", Some(token::Not)),
+ ('?', "Question Mark", Some(token::Question)),
+ ('.', "Period", Some(token::Dot)),
+ ('(', "Left Parenthesis", Some(token::OpenDelim(Delimiter::Parenthesis))),
+ (')', "Right Parenthesis", Some(token::CloseDelim(Delimiter::Parenthesis))),
+ ('[', "Left Square Bracket", Some(token::OpenDelim(Delimiter::Bracket))),
+ (']', "Right Square Bracket", Some(token::CloseDelim(Delimiter::Bracket))),
+ ('{', "Left Curly Brace", Some(token::OpenDelim(Delimiter::Brace))),
+ ('}', "Right Curly Brace", Some(token::CloseDelim(Delimiter::Brace))),
+ ('*', "Asterisk", Some(token::BinOp(token::Star))),
+ ('/', "Slash", Some(token::BinOp(token::Slash))),
+ ('\\', "Backslash", None),
+ ('&', "Ampersand", Some(token::BinOp(token::And))),
+ ('+', "Plus Sign", Some(token::BinOp(token::Plus))),
+ ('<', "Less-Than Sign", Some(token::Lt)),
+ ('=', "Equals Sign", Some(token::Eq)),
+ ('>', "Greater-Than Sign", Some(token::Gt)),
+ // FIXME: Literals are already lexed by this point, so we can't recover gracefully just by
+ // spitting the correct token out.
+ ('\'', "Single Quote", None),
+ ('"', "Quotation Mark", None),
+];
+
+pub(super) fn check_for_substitution<'a>(
+ reader: &StringReader<'a>,
+ pos: BytePos,
+ ch: char,
+ err: &mut Diagnostic,
+) -> Option<token::TokenKind> {
+ let &(_u_char, u_name, ascii_char) = UNICODE_ARRAY.iter().find(|&&(c, _, _)| c == ch)?;
+
+ let span = Span::with_root_ctxt(pos, pos + Pos::from_usize(ch.len_utf8()));
+
+ let Some((_ascii_char, ascii_name, token)) = ASCII_ARRAY.iter().find(|&&(c, _, _)| c == ascii_char) else {
+ let msg = format!("substitution character not found for '{}'", ch);
+ reader.sess.span_diagnostic.span_bug_no_panic(span, &msg);
+ return None;
+ };
+
+ // special help suggestion for "directed" double quotes
+ if let Some(s) = peek_delimited(&reader.src[reader.src_index(pos)..], '“', '”') {
+ let msg = format!(
+ "Unicode characters '“' (Left Double Quotation Mark) and \
+ '”' (Right Double Quotation Mark) look like '{}' ({}), but are not",
+ ascii_char, ascii_name
+ );
+ err.span_suggestion(
+ Span::with_root_ctxt(
+ pos,
+ pos + Pos::from_usize('“'.len_utf8() + s.len() + '”'.len_utf8()),
+ ),
+ &msg,
+ format!("\"{}\"", s),
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ let msg = format!(
+ "Unicode character '{}' ({}) looks like '{}' ({}), but it is not",
+ ch, u_name, ascii_char, ascii_name
+ );
+ err.span_suggestion(span, &msg, ascii_char, Applicability::MaybeIncorrect);
+ }
+ token.clone()
+}
+
+/// Extract string if found at current position with given delimiters
+fn peek_delimited(text: &str, from_ch: char, to_ch: char) -> Option<&str> {
+ let mut chars = text.chars();
+ let first_char = chars.next()?;
+ if first_char != from_ch {
+ return None;
+ }
+ let last_char_idx = chars.as_str().find(to_ch)?;
+ Some(&chars.as_str()[..last_char_idx])
+}
diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs
new file mode 100644
index 000000000..8c087c65c
--- /dev/null
+++ b/compiler/rustc_parse/src/lib.rs
@@ -0,0 +1,291 @@
+//! The main parser interface.
+
+#![feature(array_windows)]
+#![feature(box_patterns)]
+#![feature(if_let_guard)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(never_type)]
+#![feature(rustc_attrs)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+
+use rustc_ast as ast;
+use rustc_ast::token;
+use rustc_ast::tokenstream::TokenStream;
+use rustc_ast::Attribute;
+use rustc_ast::{AttrItem, MetaItem};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, Diagnostic, FatalError, Level, PResult};
+use rustc_session::parse::ParseSess;
+use rustc_span::{FileName, SourceFile, Span};
+
+use std::path::Path;
+
+pub const MACRO_ARGUMENTS: Option<&str> = Some("macro arguments");
+
+#[macro_use]
+pub mod parser;
+use parser::{emit_unclosed_delims, make_unclosed_delims_error, Parser};
+pub mod lexer;
+pub mod validate_attr;
+
+// A bunch of utility functions of the form `parse_<thing>_from_<source>`
+// where <thing> includes crate, expr, item, stmt, tts, and one that
+// uses a HOF to parse anything, and <source> includes file and
+// `source_str`.
+
+/// A variant of 'panictry!' that works on a Vec<Diagnostic> instead of a single DiagnosticBuilder.
+macro_rules! panictry_buffer {
+ ($handler:expr, $e:expr) => {{
+ use rustc_errors::FatalError;
+ use std::result::Result::{Err, Ok};
+ match $e {
+ Ok(e) => e,
+ Err(errs) => {
+ for mut e in errs {
+ $handler.emit_diagnostic(&mut e);
+ }
+ FatalError.raise()
+ }
+ }
+ }};
+}
+
+pub fn parse_crate_from_file<'a>(input: &Path, sess: &'a ParseSess) -> PResult<'a, ast::Crate> {
+ let mut parser = new_parser_from_file(sess, input, None);
+ parser.parse_crate_mod()
+}
+
+pub fn parse_crate_attrs_from_file<'a>(
+ input: &Path,
+ sess: &'a ParseSess,
+) -> PResult<'a, Vec<ast::Attribute>> {
+ let mut parser = new_parser_from_file(sess, input, None);
+ parser.parse_inner_attributes()
+}
+
+pub fn parse_crate_from_source_str(
+ name: FileName,
+ source: String,
+ sess: &ParseSess,
+) -> PResult<'_, ast::Crate> {
+ new_parser_from_source_str(sess, name, source).parse_crate_mod()
+}
+
+pub fn parse_crate_attrs_from_source_str(
+ name: FileName,
+ source: String,
+ sess: &ParseSess,
+) -> PResult<'_, Vec<ast::Attribute>> {
+ new_parser_from_source_str(sess, name, source).parse_inner_attributes()
+}
+
+pub fn parse_stream_from_source_str(
+ name: FileName,
+ source: String,
+ sess: &ParseSess,
+ override_span: Option<Span>,
+) -> TokenStream {
+ let (stream, mut errors) =
+ source_file_to_stream(sess, sess.source_map().new_source_file(name, source), override_span);
+ emit_unclosed_delims(&mut errors, &sess);
+ stream
+}
+
+/// Creates a new parser from a source string.
+pub fn new_parser_from_source_str(sess: &ParseSess, name: FileName, source: String) -> Parser<'_> {
+ panictry_buffer!(&sess.span_diagnostic, maybe_new_parser_from_source_str(sess, name, source))
+}
+
+/// Creates a new parser from a source string. Returns any buffered errors from lexing the initial
+/// token stream.
+pub fn maybe_new_parser_from_source_str(
+ sess: &ParseSess,
+ name: FileName,
+ source: String,
+) -> Result<Parser<'_>, Vec<Diagnostic>> {
+ maybe_source_file_to_parser(sess, sess.source_map().new_source_file(name, source))
+}
+
+/// Creates a new parser, handling errors as appropriate if the file doesn't exist.
+/// If a span is given, that is used on an error as the source of the problem.
+pub fn new_parser_from_file<'a>(sess: &'a ParseSess, path: &Path, sp: Option<Span>) -> Parser<'a> {
+ source_file_to_parser(sess, file_to_source_file(sess, path, sp))
+}
+
+/// Given a session and a `source_file`, returns a parser.
+fn source_file_to_parser(sess: &ParseSess, source_file: Lrc<SourceFile>) -> Parser<'_> {
+ panictry_buffer!(&sess.span_diagnostic, maybe_source_file_to_parser(sess, source_file))
+}
+
+/// Given a session and a `source_file`, return a parser. Returns any buffered errors from lexing the
+/// initial token stream.
+fn maybe_source_file_to_parser(
+ sess: &ParseSess,
+ source_file: Lrc<SourceFile>,
+) -> Result<Parser<'_>, Vec<Diagnostic>> {
+ let end_pos = source_file.end_pos;
+ let (stream, unclosed_delims) = maybe_file_to_stream(sess, source_file, None)?;
+ let mut parser = stream_to_parser(sess, stream, None);
+ parser.unclosed_delims = unclosed_delims;
+ if parser.token == token::Eof {
+ parser.token.span = Span::new(end_pos, end_pos, parser.token.span.ctxt(), None);
+ }
+
+ Ok(parser)
+}
+
+// Base abstractions
+
+/// Given a session and a path and an optional span (for error reporting),
+/// add the path to the session's source_map and return the new source_file or
+/// error when a file can't be read.
+fn try_file_to_source_file(
+ sess: &ParseSess,
+ path: &Path,
+ spanopt: Option<Span>,
+) -> Result<Lrc<SourceFile>, Diagnostic> {
+ sess.source_map().load_file(path).map_err(|e| {
+ let msg = format!("couldn't read {}: {}", path.display(), e);
+ let mut diag = Diagnostic::new(Level::Fatal, &msg);
+ if let Some(sp) = spanopt {
+ diag.set_span(sp);
+ }
+ diag
+ })
+}
+
+/// Given a session and a path and an optional span (for error reporting),
+/// adds the path to the session's `source_map` and returns the new `source_file`.
+fn file_to_source_file(sess: &ParseSess, path: &Path, spanopt: Option<Span>) -> Lrc<SourceFile> {
+ match try_file_to_source_file(sess, path, spanopt) {
+ Ok(source_file) => source_file,
+ Err(mut d) => {
+ sess.span_diagnostic.emit_diagnostic(&mut d);
+ FatalError.raise();
+ }
+ }
+}
+
+/// Given a `source_file`, produces a sequence of token trees.
+pub fn source_file_to_stream(
+ sess: &ParseSess,
+ source_file: Lrc<SourceFile>,
+ override_span: Option<Span>,
+) -> (TokenStream, Vec<lexer::UnmatchedBrace>) {
+ panictry_buffer!(&sess.span_diagnostic, maybe_file_to_stream(sess, source_file, override_span))
+}
+
+/// Given a source file, produces a sequence of token trees. Returns any buffered errors from
+/// parsing the token stream.
+pub fn maybe_file_to_stream(
+ sess: &ParseSess,
+ source_file: Lrc<SourceFile>,
+ override_span: Option<Span>,
+) -> Result<(TokenStream, Vec<lexer::UnmatchedBrace>), Vec<Diagnostic>> {
+ let src = source_file.src.as_ref().unwrap_or_else(|| {
+ sess.span_diagnostic.bug(&format!(
+ "cannot lex `source_file` without source: {}",
+ sess.source_map().filename_for_diagnostics(&source_file.name)
+ ));
+ });
+
+ let (token_trees, unmatched_braces) =
+ lexer::parse_token_trees(sess, src.as_str(), source_file.start_pos, override_span);
+
+ match token_trees {
+ Ok(stream) => Ok((stream, unmatched_braces)),
+ Err(err) => {
+ let mut buffer = Vec::with_capacity(1);
+ err.buffer(&mut buffer);
+ // Not using `emit_unclosed_delims` to use `db.buffer`
+ for unmatched in unmatched_braces {
+ if let Some(err) = make_unclosed_delims_error(unmatched, &sess) {
+ err.buffer(&mut buffer);
+ }
+ }
+ Err(buffer)
+ }
+ }
+}
+
+/// Given a stream and the `ParseSess`, produces a parser.
+pub fn stream_to_parser<'a>(
+ sess: &'a ParseSess,
+ stream: TokenStream,
+ subparser_name: Option<&'static str>,
+) -> Parser<'a> {
+ Parser::new(sess, stream, false, subparser_name)
+}
+
+/// Runs the given subparser `f` on the tokens of the given `attr`'s item.
+pub fn parse_in<'a, T>(
+ sess: &'a ParseSess,
+ tts: TokenStream,
+ name: &'static str,
+ mut f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+) -> PResult<'a, T> {
+ let mut parser = Parser::new(sess, tts, false, Some(name));
+ let result = f(&mut parser)?;
+ if parser.token != token::Eof {
+ parser.unexpected()?;
+ }
+ Ok(result)
+}
+
+pub fn fake_token_stream_for_item(sess: &ParseSess, item: &ast::Item) -> TokenStream {
+ let source = pprust::item_to_string(item);
+ let filename = FileName::macro_expansion_source_code(&source);
+ parse_stream_from_source_str(filename, source, sess, Some(item.span))
+}
+
+pub fn fake_token_stream_for_crate(sess: &ParseSess, krate: &ast::Crate) -> TokenStream {
+ let source = pprust::crate_to_string_for_macros(krate);
+ let filename = FileName::macro_expansion_source_code(&source);
+ parse_stream_from_source_str(filename, source, sess, Some(krate.spans.inner_span))
+}
+
+pub fn parse_cfg_attr(
+ attr: &Attribute,
+ parse_sess: &ParseSess,
+) -> Option<(MetaItem, Vec<(AttrItem, Span)>)> {
+ match attr.get_normal_item().args {
+ ast::MacArgs::Delimited(dspan, delim, ref tts) if !tts.is_empty() => {
+ let msg = "wrong `cfg_attr` delimiters";
+ crate::validate_attr::check_meta_bad_delim(parse_sess, dspan, delim, msg);
+ match parse_in(parse_sess, tts.clone(), "`cfg_attr` input", |p| p.parse_cfg_attr()) {
+ Ok(r) => return Some(r),
+ Err(mut e) => {
+ e.help(&format!("the valid syntax is `{}`", CFG_ATTR_GRAMMAR_HELP))
+ .note(CFG_ATTR_NOTE_REF)
+ .emit();
+ }
+ }
+ }
+ _ => error_malformed_cfg_attr_missing(attr.span, parse_sess),
+ }
+ None
+}
+
+const CFG_ATTR_GRAMMAR_HELP: &str = "#[cfg_attr(condition, attribute, other_attribute, ...)]";
+const CFG_ATTR_NOTE_REF: &str = "for more information, visit \
+ <https://doc.rust-lang.org/reference/conditional-compilation.html\
+ #the-cfg_attr-attribute>";
+
+fn error_malformed_cfg_attr_missing(span: Span, parse_sess: &ParseSess) {
+ parse_sess
+ .span_diagnostic
+ .struct_span_err(span, "malformed `cfg_attr` attribute input")
+ .span_suggestion(
+ span,
+ "missing condition and attribute",
+ CFG_ATTR_GRAMMAR_HELP,
+ Applicability::HasPlaceholders,
+ )
+ .note(CFG_ATTR_NOTE_REF)
+ .emit();
+}
diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs
new file mode 100644
index 000000000..acdbddf40
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/attr.rs
@@ -0,0 +1,444 @@
+use super::{AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, PathStyle};
+use rustc_ast as ast;
+use rustc_ast::attr;
+use rustc_ast::token::{self, Delimiter, Nonterminal};
+use rustc_ast_pretty::pprust;
+use rustc_errors::{error_code, Diagnostic, PResult};
+use rustc_span::{sym, BytePos, Span};
+use std::convert::TryInto;
+
+use tracing::debug;
+
+// Public for rustfmt usage
+#[derive(Debug)]
+pub enum InnerAttrPolicy<'a> {
+ Permitted,
+ Forbidden { reason: &'a str, saw_doc_comment: bool, prev_outer_attr_sp: Option<Span> },
+}
+
+const DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG: &str = "an inner attribute is not \
+ permitted in this context";
+
+pub(super) const DEFAULT_INNER_ATTR_FORBIDDEN: InnerAttrPolicy<'_> = InnerAttrPolicy::Forbidden {
+ reason: DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG,
+ saw_doc_comment: false,
+ prev_outer_attr_sp: None,
+};
+
+enum OuterAttributeType {
+ DocComment,
+ DocBlockComment,
+ Attribute,
+}
+
+impl<'a> Parser<'a> {
+ /// Parses attributes that appear before an item.
+ pub(super) fn parse_outer_attributes(&mut self) -> PResult<'a, AttrWrapper> {
+ let mut outer_attrs: Vec<ast::Attribute> = Vec::new();
+ let mut just_parsed_doc_comment = false;
+ let start_pos = self.token_cursor.num_next_calls;
+ loop {
+ let attr = if self.check(&token::Pound) {
+ let prev_outer_attr_sp = outer_attrs.last().map(|attr| attr.span);
+
+ let inner_error_reason = if just_parsed_doc_comment {
+ "an inner attribute is not permitted following an outer doc comment"
+ } else if prev_outer_attr_sp.is_some() {
+ "an inner attribute is not permitted following an outer attribute"
+ } else {
+ DEFAULT_UNEXPECTED_INNER_ATTR_ERR_MSG
+ };
+ let inner_parse_policy = InnerAttrPolicy::Forbidden {
+ reason: inner_error_reason,
+ saw_doc_comment: just_parsed_doc_comment,
+ prev_outer_attr_sp,
+ };
+ just_parsed_doc_comment = false;
+ Some(self.parse_attribute(inner_parse_policy)?)
+ } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
+ if attr_style != ast::AttrStyle::Outer {
+ let span = self.token.span;
+ let mut err = self.sess.span_diagnostic.struct_span_err_with_code(
+ span,
+ "expected outer doc comment",
+ error_code!(E0753),
+ );
+ if let Some(replacement_span) = self.annotate_following_item_if_applicable(
+ &mut err,
+ span,
+ match comment_kind {
+ token::CommentKind::Line => OuterAttributeType::DocComment,
+ token::CommentKind::Block => OuterAttributeType::DocBlockComment,
+ },
+ ) {
+ err.note(
+ "inner doc comments like this (starting with `//!` or `/*!`) can \
+ only appear before items",
+ );
+ err.span_suggestion_verbose(
+ replacement_span,
+ "you might have meant to write a regular comment",
+ "",
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ }
+ self.bump();
+ just_parsed_doc_comment = true;
+ // Always make an outer attribute - this allows us to recover from a misplaced
+ // inner attribute.
+ Some(attr::mk_doc_comment(
+ comment_kind,
+ ast::AttrStyle::Outer,
+ data,
+ self.prev_token.span,
+ ))
+ } else {
+ None
+ };
+
+ if let Some(attr) = attr {
+ if attr.style == ast::AttrStyle::Outer {
+ outer_attrs.push(attr);
+ }
+ } else {
+ break;
+ }
+ }
+ Ok(AttrWrapper::new(outer_attrs.into(), start_pos))
+ }
+
+ /// Matches `attribute = # ! [ meta_item ]`.
+ /// `inner_parse_policy` prescribes how to handle inner attributes.
+ // Public for rustfmt usage.
+ pub fn parse_attribute(
+ &mut self,
+ inner_parse_policy: InnerAttrPolicy<'_>,
+ ) -> PResult<'a, ast::Attribute> {
+ debug!(
+ "parse_attribute: inner_parse_policy={:?} self.token={:?}",
+ inner_parse_policy, self.token
+ );
+ let lo = self.token.span;
+ // Attributes can't have attributes of their own [Editor's note: not with that attitude]
+ self.collect_tokens_no_attrs(|this| {
+ if this.eat(&token::Pound) {
+ let style = if this.eat(&token::Not) {
+ ast::AttrStyle::Inner
+ } else {
+ ast::AttrStyle::Outer
+ };
+
+ this.expect(&token::OpenDelim(Delimiter::Bracket))?;
+ let item = this.parse_attr_item(false)?;
+ this.expect(&token::CloseDelim(Delimiter::Bracket))?;
+ let attr_sp = lo.to(this.prev_token.span);
+
+ // Emit error if inner attribute is encountered and forbidden.
+ if style == ast::AttrStyle::Inner {
+ this.error_on_forbidden_inner_attr(attr_sp, inner_parse_policy);
+ }
+
+ Ok(attr::mk_attr_from_item(item, None, style, attr_sp))
+ } else {
+ let token_str = pprust::token_to_string(&this.token);
+ let msg = &format!("expected `#`, found `{token_str}`");
+ Err(this.struct_span_err(this.token.span, msg))
+ }
+ })
+ }
+
+ fn annotate_following_item_if_applicable(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ attr_type: OuterAttributeType,
+ ) -> Option<Span> {
+ let mut snapshot = self.create_snapshot_for_diagnostic();
+ let lo = span.lo()
+ + BytePos(match attr_type {
+ OuterAttributeType::Attribute => 1,
+ _ => 2,
+ });
+ let hi = lo + BytePos(1);
+ let replacement_span = span.with_lo(lo).with_hi(hi);
+ if let OuterAttributeType::DocBlockComment | OuterAttributeType::DocComment = attr_type {
+ snapshot.bump();
+ }
+ loop {
+ // skip any other attributes, we want the item
+ if snapshot.token.kind == token::Pound {
+ if let Err(err) = snapshot.parse_attribute(InnerAttrPolicy::Permitted) {
+ err.cancel();
+ return Some(replacement_span);
+ }
+ } else {
+ break;
+ }
+ }
+ match snapshot.parse_item_common(
+ AttrWrapper::empty(),
+ true,
+ false,
+ FnParseMode { req_name: |_| true, req_body: true },
+ ForceCollect::No,
+ ) {
+ Ok(Some(item)) => {
+ let attr_name = match attr_type {
+ OuterAttributeType::Attribute => "attribute",
+ _ => "doc comment",
+ };
+ err.span_label(
+ item.span,
+ &format!("the inner {} doesn't annotate this {}", attr_name, item.kind.descr()),
+ );
+ err.span_suggestion_verbose(
+ replacement_span,
+ &format!(
+ "to annotate the {}, change the {} from inner to outer style",
+ item.kind.descr(),
+ attr_name
+ ),
+ match attr_type {
+ OuterAttributeType::Attribute => "",
+ OuterAttributeType::DocBlockComment => "*",
+ OuterAttributeType::DocComment => "/",
+ },
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ return None;
+ }
+ Err(item_err) => {
+ item_err.cancel();
+ }
+ Ok(None) => {}
+ }
+ Some(replacement_span)
+ }
+
+ pub(super) fn error_on_forbidden_inner_attr(&self, attr_sp: Span, policy: InnerAttrPolicy<'_>) {
+ if let InnerAttrPolicy::Forbidden { reason, saw_doc_comment, prev_outer_attr_sp } = policy {
+ let prev_outer_attr_note =
+ if saw_doc_comment { "previous doc comment" } else { "previous outer attribute" };
+
+ let mut diag = self.struct_span_err(attr_sp, reason);
+
+ if let Some(prev_outer_attr_sp) = prev_outer_attr_sp {
+ diag.span_label(attr_sp, "not permitted following an outer attribute")
+ .span_label(prev_outer_attr_sp, prev_outer_attr_note);
+ }
+
+ diag.note(
+ "inner attributes, like `#![no_std]`, annotate the item enclosing them, and \
+ are usually found at the beginning of source files",
+ );
+ if self
+ .annotate_following_item_if_applicable(
+ &mut diag,
+ attr_sp,
+ OuterAttributeType::Attribute,
+ )
+ .is_some()
+ {
+ diag.note("outer attributes, like `#[test]`, annotate the item following them");
+ };
+ diag.emit();
+ }
+ }
+
+ /// Parses an inner part of an attribute (the path and following tokens).
+ /// The tokens must be either a delimited token stream, or empty token stream,
+ /// or the "legacy" key-value form.
+ /// PATH `(` TOKEN_STREAM `)`
+ /// PATH `[` TOKEN_STREAM `]`
+ /// PATH `{` TOKEN_STREAM `}`
+ /// PATH
+ /// PATH `=` UNSUFFIXED_LIT
+ /// The delimiters or `=` are still put into the resulting token stream.
+ pub fn parse_attr_item(&mut self, capture_tokens: bool) -> PResult<'a, ast::AttrItem> {
+ let item = match self.token.kind {
+ token::Interpolated(ref nt) => match **nt {
+ Nonterminal::NtMeta(ref item) => Some(item.clone().into_inner()),
+ _ => None,
+ },
+ _ => None,
+ };
+ Ok(if let Some(item) = item {
+ self.bump();
+ item
+ } else {
+ let do_parse = |this: &mut Self| {
+ let path = this.parse_path(PathStyle::Mod)?;
+ let args = this.parse_attr_args()?;
+ Ok(ast::AttrItem { path, args, tokens: None })
+ };
+ // Attr items don't have attributes
+ if capture_tokens { self.collect_tokens_no_attrs(do_parse) } else { do_parse(self) }?
+ })
+ }
+
+ /// Parses attributes that appear after the opening of an item. These should
+ /// be preceded by an exclamation mark, but we accept and warn about one
+ /// terminated by a semicolon.
+ ///
+ /// Matches `inner_attrs*`.
+ pub(crate) fn parse_inner_attributes(&mut self) -> PResult<'a, Vec<ast::Attribute>> {
+ let mut attrs: Vec<ast::Attribute> = vec![];
+ loop {
+ let start_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
+ // Only try to parse if it is an inner attribute (has `!`).
+ let attr = if self.check(&token::Pound) && self.look_ahead(1, |t| t == &token::Not) {
+ Some(self.parse_attribute(InnerAttrPolicy::Permitted)?)
+ } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind {
+ if attr_style == ast::AttrStyle::Inner {
+ self.bump();
+ Some(attr::mk_doc_comment(comment_kind, attr_style, data, self.prev_token.span))
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+ if let Some(attr) = attr {
+ let end_pos: u32 = self.token_cursor.num_next_calls.try_into().unwrap();
+ // If we are currently capturing tokens, mark the location of this inner attribute.
+ // If capturing ends up creating a `LazyTokenStream`, we will include
+ // this replace range with it, removing the inner attribute from the final
+ // `AttrAnnotatedTokenStream`. Inner attributes are stored in the parsed AST note.
+ // During macro expansion, they are selectively inserted back into the
+ // token stream (the first inner attribute is removed each time we invoke the
+ // corresponding macro).
+ let range = start_pos..end_pos;
+ if let Capturing::Yes = self.capture_state.capturing {
+ self.capture_state.inner_attr_ranges.insert(attr.id, (range, vec![]));
+ }
+ attrs.push(attr);
+ } else {
+ break;
+ }
+ }
+ Ok(attrs)
+ }
+
+ pub(crate) fn parse_unsuffixed_lit(&mut self) -> PResult<'a, ast::Lit> {
+ let lit = self.parse_lit()?;
+ debug!("checking if {:?} is unusuffixed", lit);
+
+ if !lit.kind.is_unsuffixed() {
+ self.struct_span_err(lit.span, "suffixed literals are not allowed in attributes")
+ .help(
+ "instead of using a suffixed literal (`1u8`, `1.0f32`, etc.), \
+ use an unsuffixed version (`1`, `1.0`, etc.)",
+ )
+ .emit();
+ }
+
+ Ok(lit)
+ }
+
+ /// Parses `cfg_attr(pred, attr_item_list)` where `attr_item_list` is comma-delimited.
+ pub fn parse_cfg_attr(&mut self) -> PResult<'a, (ast::MetaItem, Vec<(ast::AttrItem, Span)>)> {
+ let cfg_predicate = self.parse_meta_item()?;
+ self.expect(&token::Comma)?;
+
+ // Presumably, the majority of the time there will only be one attr.
+ let mut expanded_attrs = Vec::with_capacity(1);
+ while self.token.kind != token::Eof {
+ let lo = self.token.span;
+ let item = self.parse_attr_item(true)?;
+ expanded_attrs.push((item, lo.to(self.prev_token.span)));
+ if !self.eat(&token::Comma) {
+ break;
+ }
+ }
+
+ Ok((cfg_predicate, expanded_attrs))
+ }
+
+ /// Matches `COMMASEP(meta_item_inner)`.
+ pub(crate) fn parse_meta_seq_top(&mut self) -> PResult<'a, Vec<ast::NestedMetaItem>> {
+ // Presumably, the majority of the time there will only be one attr.
+ let mut nmis = Vec::with_capacity(1);
+ while self.token.kind != token::Eof {
+ nmis.push(self.parse_meta_item_inner()?);
+ if !self.eat(&token::Comma) {
+ break;
+ }
+ }
+ Ok(nmis)
+ }
+
+ /// Matches the following grammar (per RFC 1559).
+ /// ```ebnf
+ /// meta_item : PATH ( '=' UNSUFFIXED_LIT | '(' meta_item_inner? ')' )? ;
+ /// meta_item_inner : (meta_item | UNSUFFIXED_LIT) (',' meta_item_inner)? ;
+ /// ```
+ pub fn parse_meta_item(&mut self) -> PResult<'a, ast::MetaItem> {
+ let nt_meta = match self.token.kind {
+ token::Interpolated(ref nt) => match **nt {
+ token::NtMeta(ref e) => Some(e.clone()),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ if let Some(item) = nt_meta {
+ return match item.meta(item.path.span) {
+ Some(meta) => {
+ self.bump();
+ Ok(meta)
+ }
+ None => self.unexpected(),
+ };
+ }
+
+ let lo = self.token.span;
+ let path = self.parse_path(PathStyle::Mod)?;
+ let kind = self.parse_meta_item_kind()?;
+ let span = lo.to(self.prev_token.span);
+ Ok(ast::MetaItem { path, kind, span })
+ }
+
+ pub(crate) fn parse_meta_item_kind(&mut self) -> PResult<'a, ast::MetaItemKind> {
+ Ok(if self.eat(&token::Eq) {
+ ast::MetaItemKind::NameValue(self.parse_unsuffixed_lit()?)
+ } else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ // Matches `meta_seq = ( COMMASEP(meta_item_inner) )`.
+ let (list, _) = self.parse_paren_comma_seq(|p| p.parse_meta_item_inner())?;
+ ast::MetaItemKind::List(list)
+ } else {
+ ast::MetaItemKind::Word
+ })
+ }
+
+ /// Matches `meta_item_inner : (meta_item | UNSUFFIXED_LIT) ;`.
+ fn parse_meta_item_inner(&mut self) -> PResult<'a, ast::NestedMetaItem> {
+ match self.parse_unsuffixed_lit() {
+ Ok(lit) => return Ok(ast::NestedMetaItem::Literal(lit)),
+ Err(err) => err.cancel(),
+ }
+
+ match self.parse_meta_item() {
+ Ok(mi) => return Ok(ast::NestedMetaItem::MetaItem(mi)),
+ Err(err) => err.cancel(),
+ }
+
+ let found = pprust::token_to_string(&self.token);
+ let msg = format!("expected unsuffixed literal or identifier, found `{found}`");
+ Err(self.struct_span_err(self.token.span, &msg))
+ }
+}
+
+pub fn maybe_needs_tokens(attrs: &[ast::Attribute]) -> bool {
+ // One of the attributes may either itself be a macro,
+ // or expand to macro attributes (`cfg_attr`).
+ attrs.iter().any(|attr| {
+ if attr.is_doc_comment() {
+ return false;
+ }
+ attr.ident().map_or(true, |ident| {
+ ident.name == sym::cfg_attr || !rustc_feature::is_builtin_attr_name(ident.name)
+ })
+ })
+}
diff --git a/compiler/rustc_parse/src/parser/attr_wrapper.rs b/compiler/rustc_parse/src/parser/attr_wrapper.rs
new file mode 100644
index 000000000..6c750ff42
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/attr_wrapper.rs
@@ -0,0 +1,464 @@
+use super::{Capturing, FlatToken, ForceCollect, Parser, ReplaceRange, TokenCursor, TrailingToken};
+use rustc_ast::token::{self, Delimiter, Token, TokenKind};
+use rustc_ast::tokenstream::{AttrAnnotatedTokenStream, AttributesData, CreateTokenStream};
+use rustc_ast::tokenstream::{AttrAnnotatedTokenTree, DelimSpan, LazyTokenStream, Spacing};
+use rustc_ast::{self as ast};
+use rustc_ast::{AttrVec, Attribute, HasAttrs, HasTokens};
+use rustc_errors::PResult;
+use rustc_span::{sym, Span};
+
+use std::convert::TryInto;
+use std::ops::Range;
+
+/// A wrapper type to ensure that the parser handles outer attributes correctly.
+/// When we parse outer attributes, we need to ensure that we capture tokens
+/// for the attribute target. This allows us to perform cfg-expansion on
+/// a token stream before we invoke a derive proc-macro.
+///
+/// This wrapper prevents direct access to the underlying `Vec<ast::Attribute>`.
+/// Parsing code can only get access to the underlying attributes
+/// by passing an `AttrWrapper` to `collect_tokens_trailing_tokens`.
+/// This makes it difficult to accidentally construct an AST node
+/// (which stores a `Vec<ast::Attribute>`) without first collecting tokens.
+///
+/// This struct has its own module, to ensure that the parser code
+/// cannot directly access the `attrs` field
+#[derive(Debug, Clone)]
+pub struct AttrWrapper {
+ attrs: AttrVec,
+ // The start of the outer attributes in the token cursor.
+ // This allows us to create a `ReplaceRange` for the entire attribute
+ // target, including outer attributes.
+ start_pos: usize,
+}
+
+// This struct is passed around very frequently,
+// so make sure it doesn't accidentally get larger
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(AttrWrapper, 16);
+
+impl AttrWrapper {
+ pub(super) fn new(attrs: AttrVec, start_pos: usize) -> AttrWrapper {
+ AttrWrapper { attrs, start_pos }
+ }
+ pub fn empty() -> AttrWrapper {
+ AttrWrapper { attrs: AttrVec::new(), start_pos: usize::MAX }
+ }
+ // FIXME: Delay span bug here?
+ pub(crate) fn take_for_recovery(self) -> AttrVec {
+ self.attrs
+ }
+
+ // FIXME: require passing an NT to prevent misuse of this method
+ pub(crate) fn prepend_to_nt_inner(self, attrs: &mut Vec<Attribute>) {
+ let mut self_attrs: Vec<_> = self.attrs.into();
+ std::mem::swap(attrs, &mut self_attrs);
+ attrs.extend(self_attrs);
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.attrs.is_empty()
+ }
+
+ pub fn maybe_needs_tokens(&self) -> bool {
+ crate::parser::attr::maybe_needs_tokens(&self.attrs)
+ }
+}
+
+/// Returns `true` if `attrs` contains a `cfg` or `cfg_attr` attribute
+fn has_cfg_or_cfg_attr(attrs: &[Attribute]) -> bool {
+ // NOTE: Builtin attributes like `cfg` and `cfg_attr` cannot be renamed via imports.
+ // Therefore, the absence of a literal `cfg` or `cfg_attr` guarantees that
+ // we don't need to do any eager expansion.
+ attrs.iter().any(|attr| {
+ attr.ident().map_or(false, |ident| ident.name == sym::cfg || ident.name == sym::cfg_attr)
+ })
+}
+
+// Produces a `TokenStream` on-demand. Using `cursor_snapshot`
+// and `num_calls`, we can reconstruct the `TokenStream` seen
+// by the callback. This allows us to avoid producing a `TokenStream`
+// if it is never needed - for example, a captured `macro_rules!`
+// argument that is never passed to a proc macro.
+// In practice token stream creation happens rarely compared to
+// calls to `collect_tokens` (see some statistics in #78736),
+// so we are doing as little up-front work as possible.
+//
+// This also makes `Parser` very cheap to clone, since
+// there is no intermediate collection buffer to clone.
+#[derive(Clone)]
+struct LazyTokenStreamImpl {
+ start_token: (Token, Spacing),
+ cursor_snapshot: TokenCursor,
+ num_calls: usize,
+ break_last_token: bool,
+ replace_ranges: Box<[ReplaceRange]>,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(LazyTokenStreamImpl, 144);
+
+impl CreateTokenStream for LazyTokenStreamImpl {
+ fn create_token_stream(&self) -> AttrAnnotatedTokenStream {
+ // The token produced by the final call to `{,inlined_}next` was not
+ // actually consumed by the callback. The combination of chaining the
+ // initial token and using `take` produces the desired result - we
+ // produce an empty `TokenStream` if no calls were made, and omit the
+ // final token otherwise.
+ let mut cursor_snapshot = self.cursor_snapshot.clone();
+ let tokens =
+ std::iter::once((FlatToken::Token(self.start_token.0.clone()), self.start_token.1))
+ .chain((0..self.num_calls).map(|_| {
+ let token = cursor_snapshot.next(cursor_snapshot.desugar_doc_comments);
+ (FlatToken::Token(token.0), token.1)
+ }))
+ .take(self.num_calls);
+
+ if !self.replace_ranges.is_empty() {
+ let mut tokens: Vec<_> = tokens.collect();
+ let mut replace_ranges = self.replace_ranges.clone();
+ replace_ranges.sort_by_key(|(range, _)| range.start);
+
+ #[cfg(debug_assertions)]
+ {
+ for [(range, tokens), (next_range, next_tokens)] in replace_ranges.array_windows() {
+ assert!(
+ range.end <= next_range.start || range.end >= next_range.end,
+ "Replace ranges should either be disjoint or nested: ({:?}, {:?}) ({:?}, {:?})",
+ range,
+ tokens,
+ next_range,
+ next_tokens,
+ );
+ }
+ }
+
+ // Process the replace ranges, starting from the highest start
+ // position and working our way back. If have tokens like:
+ //
+ // `#[cfg(FALSE)]` struct Foo { #[cfg(FALSE)] field: bool }`
+ //
+ // Then we will generate replace ranges for both
+ // the `#[cfg(FALSE)] field: bool` and the entire
+ // `#[cfg(FALSE)]` struct Foo { #[cfg(FALSE)] field: bool }`
+ //
+ // By starting processing from the replace range with the greatest
+ // start position, we ensure that any replace range which encloses
+ // another replace range will capture the *replaced* tokens for the inner
+ // range, not the original tokens.
+ for (range, new_tokens) in replace_ranges.iter().rev() {
+ assert!(!range.is_empty(), "Cannot replace an empty range: {:?}", range);
+ // Replace ranges are only allowed to decrease the number of tokens.
+ assert!(
+ range.len() >= new_tokens.len(),
+ "Range {:?} has greater len than {:?}",
+ range,
+ new_tokens
+ );
+
+ // Replace any removed tokens with `FlatToken::Empty`.
+ // This keeps the total length of `tokens` constant throughout the
+ // replacement process, allowing us to use all of the `ReplaceRanges` entries
+ // without adjusting indices.
+ let filler = std::iter::repeat((FlatToken::Empty, Spacing::Alone))
+ .take(range.len() - new_tokens.len());
+
+ tokens.splice(
+ (range.start as usize)..(range.end as usize),
+ new_tokens.clone().into_iter().chain(filler),
+ );
+ }
+ make_token_stream(tokens.into_iter(), self.break_last_token)
+ } else {
+ make_token_stream(tokens, self.break_last_token)
+ }
+ }
+}
+
+impl<'a> Parser<'a> {
+ /// Records all tokens consumed by the provided callback,
+ /// including the current token. These tokens are collected
+ /// into a `LazyTokenStream`, and returned along with the result
+ /// of the callback.
+ ///
+ /// Note: If your callback consumes an opening delimiter
+ /// (including the case where you call `collect_tokens`
+ /// when the current token is an opening delimiter),
+ /// you must also consume the corresponding closing delimiter.
+ ///
+ /// That is, you can consume
+ /// `something ([{ }])` or `([{}])`, but not `([{}]`
+ ///
+ /// This restriction shouldn't be an issue in practice,
+ /// since this function is used to record the tokens for
+ /// a parsed AST item, which always has matching delimiters.
+ pub fn collect_tokens_trailing_token<R: HasAttrs + HasTokens>(
+ &mut self,
+ attrs: AttrWrapper,
+ force_collect: ForceCollect,
+ f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, (R, TrailingToken)>,
+ ) -> PResult<'a, R> {
+ // We only bail out when nothing could possibly observe the collected tokens:
+ // 1. We cannot be force collecting tokens (since force-collecting requires tokens
+ // by definition
+ if matches!(force_collect, ForceCollect::No)
+ // None of our outer attributes can require tokens (e.g. a proc-macro)
+ && !attrs.maybe_needs_tokens()
+ // If our target supports custom inner attributes, then we cannot bail
+ // out early, since we may need to capture tokens for a custom inner attribute
+ // invocation.
+ && !R::SUPPORTS_CUSTOM_INNER_ATTRS
+ // Never bail out early in `capture_cfg` mode, since there might be `#[cfg]`
+ // or `#[cfg_attr]` attributes.
+ && !self.capture_cfg
+ {
+ return Ok(f(self, attrs.attrs.into())?.0);
+ }
+
+ let start_token = (self.token.clone(), self.token_spacing);
+ let cursor_snapshot = self.token_cursor.clone();
+
+ let has_outer_attrs = !attrs.attrs.is_empty();
+ let prev_capturing = std::mem::replace(&mut self.capture_state.capturing, Capturing::Yes);
+ let replace_ranges_start = self.capture_state.replace_ranges.len();
+
+ let ret = f(self, attrs.attrs.into());
+
+ self.capture_state.capturing = prev_capturing;
+
+ let (mut ret, trailing) = ret?;
+
+ // When we're not in `capture-cfg` mode, then bail out early if:
+ // 1. Our target doesn't support tokens at all (e.g we're parsing an `NtIdent`)
+ // so there's nothing for us to do.
+ // 2. Our target already has tokens set (e.g. we've parsed something
+ // like `#[my_attr] $item`. The actual parsing code takes care of prepending
+ // any attributes to the nonterminal, so we don't need to modify the
+ // already captured tokens.
+ // Note that this check is independent of `force_collect`- if we already
+ // have tokens, or can't even store them, then there's never a need to
+ // force collection of new tokens.
+ if !self.capture_cfg && matches!(ret.tokens_mut(), None | Some(Some(_))) {
+ return Ok(ret);
+ }
+
+ // This is very similar to the bail out check at the start of this function.
+ // Now that we've parsed an AST node, we have more information available.
+ if matches!(force_collect, ForceCollect::No)
+ // We now have inner attributes available, so this check is more precise
+ // than `attrs.maybe_needs_tokens()` at the start of the function.
+ // As a result, we don't need to check `R::SUPPORTS_CUSTOM_INNER_ATTRS`
+ && !crate::parser::attr::maybe_needs_tokens(ret.attrs())
+ // Subtle: We call `has_cfg_or_cfg_attr` with the attrs from `ret`.
+ // This ensures that we consider inner attributes (e.g. `#![cfg]`),
+ // which require us to have tokens available
+ // We also call `has_cfg_or_cfg_attr` at the beginning of this function,
+ // but we only bail out if there's no possibility of inner attributes
+ // (!R::SUPPORTS_CUSTOM_INNER_ATTRS)
+ // We only capture about `#[cfg]` or `#[cfg_attr]` in `capture_cfg`
+ // mode - during normal parsing, we don't need any special capturing
+ // for those attributes, since they're builtin.
+ && !(self.capture_cfg && has_cfg_or_cfg_attr(ret.attrs()))
+ {
+ return Ok(ret);
+ }
+
+ let mut inner_attr_replace_ranges = Vec::new();
+ // Take the captured ranges for any inner attributes that we parsed.
+ for inner_attr in ret.attrs().iter().filter(|a| a.style == ast::AttrStyle::Inner) {
+ if let Some(attr_range) = self.capture_state.inner_attr_ranges.remove(&inner_attr.id) {
+ inner_attr_replace_ranges.push(attr_range);
+ } else {
+ self.sess
+ .span_diagnostic
+ .delay_span_bug(inner_attr.span, "Missing token range for attribute");
+ }
+ }
+
+ let replace_ranges_end = self.capture_state.replace_ranges.len();
+
+ let cursor_snapshot_next_calls = cursor_snapshot.num_next_calls;
+ let mut end_pos = self.token_cursor.num_next_calls;
+
+ // Capture a trailing token if requested by the callback 'f'
+ match trailing {
+ TrailingToken::None => {}
+ TrailingToken::Semi => {
+ assert_eq!(self.token.kind, token::Semi);
+ end_pos += 1;
+ }
+ TrailingToken::MaybeComma => {
+ if self.token.kind == token::Comma {
+ end_pos += 1;
+ }
+ }
+ }
+
+ // If we 'broke' the last token (e.g. breaking a '>>' token to two '>' tokens),
+ // then extend the range of captured tokens to include it, since the parser
+ // was not actually bumped past it. When the `LazyTokenStream` gets converted
+ // into an `AttrAnnotatedTokenStream`, we will create the proper token.
+ if self.token_cursor.break_last_token {
+ assert_eq!(
+ trailing,
+ TrailingToken::None,
+ "Cannot set `break_last_token` and have trailing token"
+ );
+ end_pos += 1;
+ }
+
+ let num_calls = end_pos - cursor_snapshot_next_calls;
+
+ // If we have no attributes, then we will never need to
+ // use any replace ranges.
+ let replace_ranges: Box<[ReplaceRange]> = if ret.attrs().is_empty() && !self.capture_cfg {
+ Box::new([])
+ } else {
+ // Grab any replace ranges that occur *inside* the current AST node.
+ // We will perform the actual replacement when we convert the `LazyTokenStream`
+ // to an `AttrAnnotatedTokenStream`
+ let start_calls: u32 = cursor_snapshot_next_calls.try_into().unwrap();
+ self.capture_state.replace_ranges[replace_ranges_start..replace_ranges_end]
+ .iter()
+ .cloned()
+ .chain(inner_attr_replace_ranges.clone().into_iter())
+ .map(|(range, tokens)| {
+ ((range.start - start_calls)..(range.end - start_calls), tokens)
+ })
+ .collect()
+ };
+
+ let tokens = LazyTokenStream::new(LazyTokenStreamImpl {
+ start_token,
+ num_calls,
+ cursor_snapshot,
+ break_last_token: self.token_cursor.break_last_token,
+ replace_ranges,
+ });
+
+ // If we support tokens at all
+ if let Some(target_tokens) = ret.tokens_mut() {
+ if target_tokens.is_none() {
+ // Store se our newly captured tokens into the AST node
+ *target_tokens = Some(tokens.clone());
+ }
+ }
+
+ let final_attrs = ret.attrs();
+
+ // If `capture_cfg` is set and we're inside a recursive call to
+ // `collect_tokens_trailing_token`, then we need to register a replace range
+ // if we have `#[cfg]` or `#[cfg_attr]`. This allows us to run eager cfg-expansion
+ // on the captured token stream.
+ if self.capture_cfg
+ && matches!(self.capture_state.capturing, Capturing::Yes)
+ && has_cfg_or_cfg_attr(&final_attrs)
+ {
+ let attr_data = AttributesData { attrs: final_attrs.to_vec().into(), tokens };
+
+ // Replace the entire AST node that we just parsed, including attributes,
+ // with a `FlatToken::AttrTarget`. If this AST node is inside an item
+ // that has `#[derive]`, then this will allow us to cfg-expand this
+ // AST node.
+ let start_pos =
+ if has_outer_attrs { attrs.start_pos } else { cursor_snapshot_next_calls };
+ let new_tokens = vec![(FlatToken::AttrTarget(attr_data), Spacing::Alone)];
+
+ assert!(
+ !self.token_cursor.break_last_token,
+ "Should not have unglued last token with cfg attr"
+ );
+ let range: Range<u32> = (start_pos.try_into().unwrap())..(end_pos.try_into().unwrap());
+ self.capture_state.replace_ranges.push((range, new_tokens));
+ self.capture_state.replace_ranges.extend(inner_attr_replace_ranges);
+ }
+
+ // Only clear our `replace_ranges` when we're finished capturing entirely.
+ if matches!(self.capture_state.capturing, Capturing::No) {
+ self.capture_state.replace_ranges.clear();
+ // We don't clear `inner_attr_ranges`, as doing so repeatedly
+ // had a measurable performance impact. Most inner attributes that
+ // we insert will get removed - when we drop the parser, we'll free
+ // up the memory used by any attributes that we didn't remove from the map.
+ }
+ Ok(ret)
+ }
+}
+
+/// Converts a flattened iterator of tokens (including open and close delimiter tokens)
+/// into a `TokenStream`, creating a `TokenTree::Delimited` for each matching pair
+/// of open and close delims.
+fn make_token_stream(
+ mut iter: impl Iterator<Item = (FlatToken, Spacing)>,
+ break_last_token: bool,
+) -> AttrAnnotatedTokenStream {
+ #[derive(Debug)]
+ struct FrameData {
+ // This is `None` for the first frame, `Some` for all others.
+ open_delim_sp: Option<(Delimiter, Span)>,
+ inner: Vec<(AttrAnnotatedTokenTree, Spacing)>,
+ }
+ let mut stack = vec![FrameData { open_delim_sp: None, inner: vec![] }];
+ let mut token_and_spacing = iter.next();
+ while let Some((token, spacing)) = token_and_spacing {
+ match token {
+ FlatToken::Token(Token { kind: TokenKind::OpenDelim(delim), span }) => {
+ stack.push(FrameData { open_delim_sp: Some((delim, span)), inner: vec![] });
+ }
+ FlatToken::Token(Token { kind: TokenKind::CloseDelim(delim), span }) => {
+ let frame_data = stack
+ .pop()
+ .unwrap_or_else(|| panic!("Token stack was empty for token: {:?}", token));
+
+ let (open_delim, open_sp) = frame_data.open_delim_sp.unwrap();
+ assert_eq!(
+ open_delim, delim,
+ "Mismatched open/close delims: open={:?} close={:?}",
+ open_delim, span
+ );
+ let dspan = DelimSpan::from_pair(open_sp, span);
+ let stream = AttrAnnotatedTokenStream::new(frame_data.inner);
+ let delimited = AttrAnnotatedTokenTree::Delimited(dspan, delim, stream);
+ stack
+ .last_mut()
+ .unwrap_or_else(|| {
+ panic!("Bottom token frame is missing for token: {:?}", token)
+ })
+ .inner
+ .push((delimited, Spacing::Alone));
+ }
+ FlatToken::Token(token) => stack
+ .last_mut()
+ .expect("Bottom token frame is missing!")
+ .inner
+ .push((AttrAnnotatedTokenTree::Token(token), spacing)),
+ FlatToken::AttrTarget(data) => stack
+ .last_mut()
+ .expect("Bottom token frame is missing!")
+ .inner
+ .push((AttrAnnotatedTokenTree::Attributes(data), spacing)),
+ FlatToken::Empty => {}
+ }
+ token_and_spacing = iter.next();
+ }
+ let mut final_buf = stack.pop().expect("Missing final buf!");
+ if break_last_token {
+ let (last_token, spacing) = final_buf.inner.pop().unwrap();
+ if let AttrAnnotatedTokenTree::Token(last_token) = last_token {
+ let unglued_first = last_token.kind.break_two_token_op().unwrap().0;
+
+ // An 'unglued' token is always two ASCII characters
+ let mut first_span = last_token.span.shrink_to_lo();
+ first_span = first_span.with_hi(first_span.lo() + rustc_span::BytePos(1));
+
+ final_buf.inner.push((
+ AttrAnnotatedTokenTree::Token(Token::new(unglued_first, first_span)),
+ spacing,
+ ));
+ } else {
+ panic!("Unexpected last token {:?}", last_token)
+ }
+ }
+ assert!(stack.is_empty(), "Stack should be empty: final_buf={:?} stack={:?}", final_buf, stack);
+ AttrAnnotatedTokenStream::new(final_buf.inner)
+}
diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs
new file mode 100644
index 000000000..a2155ac1d
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/diagnostics.rs
@@ -0,0 +1,2740 @@
+use super::pat::Expected;
+use super::{
+ BlockMode, CommaRecoveryMode, Parser, PathStyle, Restrictions, SemiColonMode, SeqSep,
+ TokenExpectType, TokenType,
+};
+
+use crate::lexer::UnmatchedBrace;
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, Lit, LitKind, TokenKind};
+use rustc_ast::util::parser::AssocOp;
+use rustc_ast::{
+ AngleBracketedArg, AngleBracketedArgs, AnonConst, AttrVec, BinOpKind, BindingMode, Block,
+ BlockCheckMode, Expr, ExprKind, GenericArg, Generics, Item, ItemKind, Mutability, Param, Pat,
+ PatKind, Path, PathSegment, QSelf, Ty, TyKind,
+};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{
+ fluent, Applicability, DiagnosticBuilder, DiagnosticMessage, Handler, MultiSpan, PResult,
+};
+use rustc_errors::{pluralize, struct_span_err, Diagnostic, EmissionGuarantee, ErrorGuaranteed};
+use rustc_macros::{SessionDiagnostic, SessionSubdiagnostic};
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::{Span, SpanSnippetError, DUMMY_SP};
+use std::ops::{Deref, DerefMut};
+
+use std::mem::take;
+
+use crate::parser;
+use tracing::{debug, trace};
+
+const TURBOFISH_SUGGESTION_STR: &str =
+ "use `::<...>` instead of `<...>` to specify lifetime, type, or const arguments";
+
+/// Creates a placeholder argument.
+pub(super) fn dummy_arg(ident: Ident) -> Param {
+ let pat = P(Pat {
+ id: ast::DUMMY_NODE_ID,
+ kind: PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None),
+ span: ident.span,
+ tokens: None,
+ });
+ let ty = Ty { kind: TyKind::Err, span: ident.span, id: ast::DUMMY_NODE_ID, tokens: None };
+ Param {
+ attrs: AttrVec::default(),
+ id: ast::DUMMY_NODE_ID,
+ pat,
+ span: ident.span,
+ ty: P(ty),
+ is_placeholder: false,
+ }
+}
+
+pub enum Error {
+ UselessDocComment,
+}
+
+impl Error {
+ fn span_err(
+ self,
+ sp: impl Into<MultiSpan>,
+ handler: &Handler,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ match self {
+ Error::UselessDocComment => {
+ let mut err = struct_span_err!(
+ handler,
+ sp,
+ E0585,
+ "found a documentation comment that doesn't document anything",
+ );
+ err.help(
+ "doc comments must come before what they document, maybe a comment was \
+ intended with `//`?",
+ );
+ err
+ }
+ }
+ }
+}
+
+pub(super) trait RecoverQPath: Sized + 'static {
+ const PATH_STYLE: PathStyle = PathStyle::Expr;
+ fn to_ty(&self) -> Option<P<Ty>>;
+ fn recovered(qself: Option<QSelf>, path: ast::Path) -> Self;
+}
+
+impl RecoverQPath for Ty {
+ const PATH_STYLE: PathStyle = PathStyle::Type;
+ fn to_ty(&self) -> Option<P<Ty>> {
+ Some(P(self.clone()))
+ }
+ fn recovered(qself: Option<QSelf>, path: ast::Path) -> Self {
+ Self {
+ span: path.span,
+ kind: TyKind::Path(qself, path),
+ id: ast::DUMMY_NODE_ID,
+ tokens: None,
+ }
+ }
+}
+
+impl RecoverQPath for Pat {
+ fn to_ty(&self) -> Option<P<Ty>> {
+ self.to_ty()
+ }
+ fn recovered(qself: Option<QSelf>, path: ast::Path) -> Self {
+ Self {
+ span: path.span,
+ kind: PatKind::Path(qself, path),
+ id: ast::DUMMY_NODE_ID,
+ tokens: None,
+ }
+ }
+}
+
+impl RecoverQPath for Expr {
+ fn to_ty(&self) -> Option<P<Ty>> {
+ self.to_ty()
+ }
+ fn recovered(qself: Option<QSelf>, path: ast::Path) -> Self {
+ Self {
+ span: path.span,
+ kind: ExprKind::Path(qself, path),
+ attrs: AttrVec::new(),
+ id: ast::DUMMY_NODE_ID,
+ tokens: None,
+ }
+ }
+}
+
+/// Control whether the closing delimiter should be consumed when calling `Parser::consume_block`.
+pub(crate) enum ConsumeClosingDelim {
+ Yes,
+ No,
+}
+
+#[derive(Clone, Copy)]
+pub enum AttemptLocalParseRecovery {
+ Yes,
+ No,
+}
+
+impl AttemptLocalParseRecovery {
+ pub fn yes(&self) -> bool {
+ match self {
+ AttemptLocalParseRecovery::Yes => true,
+ AttemptLocalParseRecovery::No => false,
+ }
+ }
+
+ pub fn no(&self) -> bool {
+ match self {
+ AttemptLocalParseRecovery::Yes => false,
+ AttemptLocalParseRecovery::No => true,
+ }
+ }
+}
+
+/// Information for emitting suggestions and recovering from
+/// C-style `i++`, `--i`, etc.
+#[derive(Debug, Copy, Clone)]
+struct IncDecRecovery {
+ /// Is this increment/decrement its own statement?
+ standalone: IsStandalone,
+ /// Is this an increment or decrement?
+ op: IncOrDec,
+ /// Is this pre- or postfix?
+ fixity: UnaryFixity,
+}
+
+/// Is an increment or decrement expression its own statement?
+#[derive(Debug, Copy, Clone)]
+enum IsStandalone {
+ /// It's standalone, i.e., its own statement.
+ Standalone,
+ /// It's a subexpression, i.e., *not* standalone.
+ Subexpr,
+ /// It's maybe standalone; we're not sure.
+ Maybe,
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum IncOrDec {
+ Inc,
+ // FIXME: `i--` recovery isn't implemented yet
+ #[allow(dead_code)]
+ Dec,
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq)]
+enum UnaryFixity {
+ Pre,
+ Post,
+}
+
+impl IncOrDec {
+ fn chr(&self) -> char {
+ match self {
+ Self::Inc => '+',
+ Self::Dec => '-',
+ }
+ }
+
+ fn name(&self) -> &'static str {
+ match self {
+ Self::Inc => "increment",
+ Self::Dec => "decrement",
+ }
+ }
+}
+
+impl std::fmt::Display for UnaryFixity {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ Self::Pre => write!(f, "prefix"),
+ Self::Post => write!(f, "postfix"),
+ }
+ }
+}
+
+struct MultiSugg {
+ msg: String,
+ patches: Vec<(Span, String)>,
+ applicability: Applicability,
+}
+
+impl MultiSugg {
+ fn emit<G: EmissionGuarantee>(self, err: &mut DiagnosticBuilder<'_, G>) {
+ err.multipart_suggestion(&self.msg, self.patches, self.applicability);
+ }
+
+ /// Overrides individual messages and applicabilities.
+ fn emit_many<G: EmissionGuarantee>(
+ err: &mut DiagnosticBuilder<'_, G>,
+ msg: &str,
+ applicability: Applicability,
+ suggestions: impl Iterator<Item = Self>,
+ ) {
+ err.multipart_suggestions(msg, suggestions.map(|s| s.patches), applicability);
+ }
+}
+
+#[derive(SessionDiagnostic)]
+#[error(parser::maybe_report_ambiguous_plus)]
+struct AmbiguousPlus {
+ pub sum_ty: String,
+ #[primary_span]
+ #[suggestion(code = "({sum_ty})")]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(parser::maybe_recover_from_bad_type_plus, code = "E0178")]
+struct BadTypePlus {
+ pub ty: String,
+ #[primary_span]
+ pub span: Span,
+ #[subdiagnostic]
+ pub sub: BadTypePlusSub,
+}
+
+#[derive(SessionSubdiagnostic)]
+pub enum BadTypePlusSub {
+ #[suggestion(
+ parser::add_paren,
+ code = "{sum_with_parens}",
+ applicability = "machine-applicable"
+ )]
+ AddParen {
+ sum_with_parens: String,
+ #[primary_span]
+ span: Span,
+ },
+ #[label(parser::forgot_paren)]
+ ForgotParen {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(parser::expect_path)]
+ ExpectPath {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(SessionDiagnostic)]
+#[error(parser::maybe_recover_from_bad_qpath_stage_2)]
+struct BadQPathStage2 {
+ #[primary_span]
+ #[suggestion(applicability = "maybe-incorrect")]
+ span: Span,
+ ty: String,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(parser::incorrect_semicolon)]
+struct IncorrectSemicolon<'a> {
+ #[primary_span]
+ #[suggestion_short(applicability = "machine-applicable")]
+ span: Span,
+ #[help]
+ opt_help: Option<()>,
+ name: &'a str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(parser::incorrect_use_of_await)]
+struct IncorrectUseOfAwait {
+ #[primary_span]
+ #[suggestion(parser::parentheses_suggestion, applicability = "machine-applicable")]
+ span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(parser::incorrect_use_of_await)]
+struct IncorrectAwait {
+ #[primary_span]
+ span: Span,
+ #[suggestion(parser::postfix_suggestion, code = "{expr}.await{question_mark}")]
+ sugg_span: (Span, Applicability),
+ expr: String,
+ question_mark: &'static str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(parser::in_in_typo)]
+struct InInTypo {
+ #[primary_span]
+ span: Span,
+ #[suggestion(applicability = "machine-applicable")]
+ sugg_span: Span,
+}
+
+// SnapshotParser is used to create a snapshot of the parser
+// without causing duplicate errors being emitted when the `Parser`
+// is dropped.
+pub struct SnapshotParser<'a> {
+ parser: Parser<'a>,
+ unclosed_delims: Vec<UnmatchedBrace>,
+}
+
+impl<'a> Deref for SnapshotParser<'a> {
+ type Target = Parser<'a>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.parser
+ }
+}
+
+impl<'a> DerefMut for SnapshotParser<'a> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.parser
+ }
+}
+
+impl<'a> Parser<'a> {
+ #[rustc_lint_diagnostics]
+ pub(super) fn span_err<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ err: Error,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ err.span_err(sp, self.diagnostic())
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_err<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ m: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ self.sess.span_diagnostic.struct_span_err(sp, m)
+ }
+
+ pub fn span_bug<S: Into<MultiSpan>>(&self, sp: S, m: impl Into<DiagnosticMessage>) -> ! {
+ self.sess.span_diagnostic.span_bug(sp, m)
+ }
+
+ pub(super) fn diagnostic(&self) -> &'a Handler {
+ &self.sess.span_diagnostic
+ }
+
+ /// Replace `self` with `snapshot.parser` and extend `unclosed_delims` with `snapshot.unclosed_delims`.
+ /// This is to avoid losing unclosed delims errors `create_snapshot_for_diagnostic` clears.
+ pub(super) fn restore_snapshot(&mut self, snapshot: SnapshotParser<'a>) {
+ *self = snapshot.parser;
+ self.unclosed_delims.extend(snapshot.unclosed_delims.clone());
+ }
+
+ pub fn unclosed_delims(&self) -> &[UnmatchedBrace] {
+ &self.unclosed_delims
+ }
+
+ /// Create a snapshot of the `Parser`.
+ pub fn create_snapshot_for_diagnostic(&self) -> SnapshotParser<'a> {
+ let mut snapshot = self.clone();
+ let unclosed_delims = self.unclosed_delims.clone();
+ // Clear `unclosed_delims` in snapshot to avoid
+ // duplicate errors being emitted when the `Parser`
+ // is dropped (which may or may not happen, depending
+ // if the parsing the snapshot is created for is successful)
+ snapshot.unclosed_delims.clear();
+ SnapshotParser { parser: snapshot, unclosed_delims }
+ }
+
+ pub(super) fn span_to_snippet(&self, span: Span) -> Result<String, SpanSnippetError> {
+ self.sess.source_map().span_to_snippet(span)
+ }
+
+ pub(super) fn expected_ident_found(&self) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err = self.struct_span_err(
+ self.token.span,
+ &format!("expected identifier, found {}", super::token_descr(&self.token)),
+ );
+ let valid_follow = &[
+ TokenKind::Eq,
+ TokenKind::Colon,
+ TokenKind::Comma,
+ TokenKind::Semi,
+ TokenKind::ModSep,
+ TokenKind::OpenDelim(Delimiter::Brace),
+ TokenKind::OpenDelim(Delimiter::Parenthesis),
+ TokenKind::CloseDelim(Delimiter::Brace),
+ TokenKind::CloseDelim(Delimiter::Parenthesis),
+ ];
+ match self.token.ident() {
+ Some((ident, false))
+ if ident.is_raw_guess()
+ && self.look_ahead(1, |t| valid_follow.contains(&t.kind)) =>
+ {
+ err.span_suggestion_verbose(
+ ident.span.shrink_to_lo(),
+ &format!("escape `{}` to use it as an identifier", ident.name),
+ "r#",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ }
+ if let Some(token_descr) = super::token_descr_opt(&self.token) {
+ err.span_label(self.token.span, format!("expected identifier, found {}", token_descr));
+ } else {
+ err.span_label(self.token.span, "expected identifier");
+ if self.token == token::Comma && self.look_ahead(1, |t| t.is_ident()) {
+ err.span_suggestion(
+ self.token.span,
+ "remove this comma",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ err
+ }
+
+ pub(super) fn expected_one_of_not_found(
+ &mut self,
+ edible: &[TokenKind],
+ inedible: &[TokenKind],
+ ) -> PResult<'a, bool /* recovered */> {
+ debug!("expected_one_of_not_found(edible: {:?}, inedible: {:?})", edible, inedible);
+ fn tokens_to_string(tokens: &[TokenType]) -> String {
+ let mut i = tokens.iter();
+ // This might be a sign we need a connect method on `Iterator`.
+ let b = i.next().map_or_else(String::new, |t| t.to_string());
+ i.enumerate().fold(b, |mut b, (i, a)| {
+ if tokens.len() > 2 && i == tokens.len() - 2 {
+ b.push_str(", or ");
+ } else if tokens.len() == 2 && i == tokens.len() - 2 {
+ b.push_str(" or ");
+ } else {
+ b.push_str(", ");
+ }
+ b.push_str(&a.to_string());
+ b
+ })
+ }
+
+ let mut expected = edible
+ .iter()
+ .map(|x| TokenType::Token(x.clone()))
+ .chain(inedible.iter().map(|x| TokenType::Token(x.clone())))
+ .chain(self.expected_tokens.iter().cloned())
+ .filter_map(|token| {
+ // filter out suggestions which suggest the same token which was found and deemed incorrect
+ fn is_ident_eq_keyword(found: &TokenKind, expected: &TokenType) -> bool {
+ if let TokenKind::Ident(current_sym, _) = found {
+ if let TokenType::Keyword(suggested_sym) = expected {
+ return current_sym == suggested_sym;
+ }
+ }
+ false
+ }
+ if token != parser::TokenType::Token(self.token.kind.clone()) {
+ let eq = is_ident_eq_keyword(&self.token.kind, &token);
+ // if the suggestion is a keyword and the found token is an ident,
+ // the content of which are equal to the suggestion's content,
+ // we can remove that suggestion (see the return None statement below)
+
+ // if this isn't the case however, and the suggestion is a token the
+ // content of which is the same as the found token's, we remove it as well
+ if !eq {
+ if let TokenType::Token(kind) = &token {
+ if kind == &self.token.kind {
+ return None;
+ }
+ }
+ return Some(token);
+ }
+ }
+ return None;
+ })
+ .collect::<Vec<_>>();
+ expected.sort_by_cached_key(|x| x.to_string());
+ expected.dedup();
+
+ let sm = self.sess.source_map();
+ let msg = format!("expected `;`, found {}", super::token_descr(&self.token));
+ let appl = Applicability::MachineApplicable;
+ if expected.contains(&TokenType::Token(token::Semi)) {
+ if self.token.span == DUMMY_SP || self.prev_token.span == DUMMY_SP {
+ // Likely inside a macro, can't provide meaningful suggestions.
+ } else if !sm.is_multiline(self.prev_token.span.until(self.token.span)) {
+ // The current token is in the same line as the prior token, not recoverable.
+ } else if [token::Comma, token::Colon].contains(&self.token.kind)
+ && self.prev_token.kind == token::CloseDelim(Delimiter::Parenthesis)
+ {
+ // Likely typo: The current token is on a new line and is expected to be
+ // `.`, `;`, `?`, or an operator after a close delimiter token.
+ //
+ // let a = std::process::Command::new("echo")
+ // .arg("1")
+ // ,arg("2")
+ // ^
+ // https://github.com/rust-lang/rust/issues/72253
+ } else if self.look_ahead(1, |t| {
+ t == &token::CloseDelim(Delimiter::Brace)
+ || t.can_begin_expr() && t.kind != token::Colon
+ }) && [token::Comma, token::Colon].contains(&self.token.kind)
+ {
+ // Likely typo: `,` → `;` or `:` → `;`. This is triggered if the current token is
+ // either `,` or `:`, and the next token could either start a new statement or is a
+ // block close. For example:
+ //
+ // let x = 32:
+ // let y = 42;
+ self.bump();
+ let sp = self.prev_token.span;
+ self.struct_span_err(sp, &msg)
+ .span_suggestion_short(sp, "change this to `;`", ";", appl)
+ .emit();
+ return Ok(true);
+ } else if self.look_ahead(0, |t| {
+ t == &token::CloseDelim(Delimiter::Brace)
+ || (t.can_begin_expr() && t != &token::Semi && t != &token::Pound)
+ // Avoid triggering with too many trailing `#` in raw string.
+ || (sm.is_multiline(
+ self.prev_token.span.shrink_to_hi().until(self.token.span.shrink_to_lo())
+ ) && t == &token::Pound)
+ }) && !expected.contains(&TokenType::Token(token::Comma))
+ {
+ // Missing semicolon typo. This is triggered if the next token could either start a
+ // new statement or is a block close. For example:
+ //
+ // let x = 32
+ // let y = 42;
+ let sp = self.prev_token.span.shrink_to_hi();
+ self.struct_span_err(sp, &msg)
+ .span_label(self.token.span, "unexpected token")
+ .span_suggestion_short(sp, "add `;` here", ";", appl)
+ .emit();
+ return Ok(true);
+ }
+ }
+
+ let expect = tokens_to_string(&expected);
+ let actual = super::token_descr(&self.token);
+ let (msg_exp, (label_sp, label_exp)) = if expected.len() > 1 {
+ let short_expect = if expected.len() > 6 {
+ format!("{} possible tokens", expected.len())
+ } else {
+ expect.clone()
+ };
+ (
+ format!("expected one of {expect}, found {actual}"),
+ (self.prev_token.span.shrink_to_hi(), format!("expected one of {short_expect}")),
+ )
+ } else if expected.is_empty() {
+ (
+ format!("unexpected token: {}", actual),
+ (self.prev_token.span, "unexpected token after this".to_string()),
+ )
+ } else {
+ (
+ format!("expected {expect}, found {actual}"),
+ (self.prev_token.span.shrink_to_hi(), format!("expected {expect}")),
+ )
+ };
+ self.last_unexpected_token_span = Some(self.token.span);
+ let mut err = self.struct_span_err(self.token.span, &msg_exp);
+
+ if let TokenKind::Ident(symbol, _) = &self.prev_token.kind {
+ if symbol.as_str() == "public" {
+ err.span_suggestion_short(
+ self.prev_token.span,
+ "write `pub` instead of `public` to make the item public",
+ "pub",
+ appl,
+ );
+ }
+ }
+
+ // Add suggestion for a missing closing angle bracket if '>' is included in expected_tokens
+ // there are unclosed angle brackets
+ if self.unmatched_angle_bracket_count > 0
+ && self.token.kind == TokenKind::Eq
+ && expected.iter().any(|tok| matches!(tok, TokenType::Token(TokenKind::Gt)))
+ {
+ err.span_label(self.prev_token.span, "maybe try to close unmatched angle bracket");
+ }
+
+ let sp = if self.token == token::Eof {
+ // This is EOF; don't want to point at the following char, but rather the last token.
+ self.prev_token.span
+ } else {
+ label_sp
+ };
+ match self.recover_closing_delimiter(
+ &expected
+ .iter()
+ .filter_map(|tt| match tt {
+ TokenType::Token(t) => Some(t.clone()),
+ _ => None,
+ })
+ .collect::<Vec<_>>(),
+ err,
+ ) {
+ Err(e) => err = e,
+ Ok(recovered) => {
+ return Ok(recovered);
+ }
+ }
+
+ if self.check_too_many_raw_str_terminators(&mut err) {
+ if expected.contains(&TokenType::Token(token::Semi)) && self.eat(&token::Semi) {
+ err.emit();
+ return Ok(true);
+ } else {
+ return Err(err);
+ }
+ }
+
+ if self.prev_token.span == DUMMY_SP {
+ // Account for macro context where the previous span might not be
+ // available to avoid incorrect output (#54841).
+ err.span_label(self.token.span, label_exp);
+ } else if !sm.is_multiline(self.token.span.shrink_to_hi().until(sp.shrink_to_lo())) {
+ // When the spans are in the same line, it means that the only content between
+ // them is whitespace, point at the found token in that case:
+ //
+ // X | () => { syntax error };
+ // | ^^^^^ expected one of 8 possible tokens here
+ //
+ // instead of having:
+ //
+ // X | () => { syntax error };
+ // | -^^^^^ unexpected token
+ // | |
+ // | expected one of 8 possible tokens here
+ err.span_label(self.token.span, label_exp);
+ } else {
+ err.span_label(sp, label_exp);
+ err.span_label(self.token.span, "unexpected token");
+ }
+ self.maybe_annotate_with_ascription(&mut err, false);
+ Err(err)
+ }
+
+ fn check_too_many_raw_str_terminators(&mut self, err: &mut Diagnostic) -> bool {
+ let sm = self.sess.source_map();
+ match (&self.prev_token.kind, &self.token.kind) {
+ (
+ TokenKind::Literal(Lit {
+ kind: LitKind::StrRaw(n_hashes) | LitKind::ByteStrRaw(n_hashes),
+ ..
+ }),
+ TokenKind::Pound,
+ ) if !sm.is_multiline(
+ self.prev_token.span.shrink_to_hi().until(self.token.span.shrink_to_lo()),
+ ) =>
+ {
+ let n_hashes: u8 = *n_hashes;
+ err.set_primary_message("too many `#` when terminating raw string");
+ let str_span = self.prev_token.span;
+ let mut span = self.token.span;
+ let mut count = 0;
+ while self.token.kind == TokenKind::Pound
+ && !sm.is_multiline(span.shrink_to_hi().until(self.token.span.shrink_to_lo()))
+ {
+ span = span.with_hi(self.token.span.hi());
+ self.bump();
+ count += 1;
+ }
+ err.set_span(span);
+ err.span_suggestion(
+ span,
+ &format!("remove the extra `#`{}", pluralize!(count)),
+ "",
+ Applicability::MachineApplicable,
+ );
+ err.span_label(
+ str_span,
+ &format!("this raw string started with {n_hashes} `#`{}", pluralize!(n_hashes)),
+ );
+ true
+ }
+ _ => false,
+ }
+ }
+
+ pub fn maybe_suggest_struct_literal(
+ &mut self,
+ lo: Span,
+ s: BlockCheckMode,
+ ) -> Option<PResult<'a, P<Block>>> {
+ if self.token.is_ident() && self.look_ahead(1, |t| t == &token::Colon) {
+ // We might be having a struct literal where people forgot to include the path:
+ // fn foo() -> Foo {
+ // field: value,
+ // }
+ let mut snapshot = self.create_snapshot_for_diagnostic();
+ let path =
+ Path { segments: vec![], span: self.prev_token.span.shrink_to_lo(), tokens: None };
+ let struct_expr = snapshot.parse_struct_expr(None, path, AttrVec::new(), false);
+ let block_tail = self.parse_block_tail(lo, s, AttemptLocalParseRecovery::No);
+ return Some(match (struct_expr, block_tail) {
+ (Ok(expr), Err(mut err)) => {
+ // We have encountered the following:
+ // fn foo() -> Foo {
+ // field: value,
+ // }
+ // Suggest:
+ // fn foo() -> Foo { Path {
+ // field: value,
+ // } }
+ err.delay_as_bug();
+ self.struct_span_err(
+ expr.span,
+ fluent::parser::struct_literal_body_without_path,
+ )
+ .multipart_suggestion(
+ fluent::parser::suggestion,
+ vec![
+ (expr.span.shrink_to_lo(), "{ SomeStruct ".to_string()),
+ (expr.span.shrink_to_hi(), " }".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ self.restore_snapshot(snapshot);
+ let mut tail = self.mk_block(
+ vec![self.mk_stmt_err(expr.span)],
+ s,
+ lo.to(self.prev_token.span),
+ );
+ tail.could_be_bare_literal = true;
+ Ok(tail)
+ }
+ (Err(err), Ok(tail)) => {
+ // We have a block tail that contains a somehow valid type ascription expr.
+ err.cancel();
+ Ok(tail)
+ }
+ (Err(snapshot_err), Err(err)) => {
+ // We don't know what went wrong, emit the normal error.
+ snapshot_err.cancel();
+ self.consume_block(Delimiter::Brace, ConsumeClosingDelim::Yes);
+ Err(err)
+ }
+ (Ok(_), Ok(mut tail)) => {
+ tail.could_be_bare_literal = true;
+ Ok(tail)
+ }
+ });
+ }
+ None
+ }
+
+ pub fn maybe_annotate_with_ascription(
+ &mut self,
+ err: &mut Diagnostic,
+ maybe_expected_semicolon: bool,
+ ) {
+ if let Some((sp, likely_path)) = self.last_type_ascription.take() {
+ let sm = self.sess.source_map();
+ let next_pos = sm.lookup_char_pos(self.token.span.lo());
+ let op_pos = sm.lookup_char_pos(sp.hi());
+
+ let allow_unstable = self.sess.unstable_features.is_nightly_build();
+
+ if likely_path {
+ err.span_suggestion(
+ sp,
+ "maybe write a path separator here",
+ "::",
+ if allow_unstable {
+ Applicability::MaybeIncorrect
+ } else {
+ Applicability::MachineApplicable
+ },
+ );
+ self.sess.type_ascription_path_suggestions.borrow_mut().insert(sp);
+ } else if op_pos.line != next_pos.line && maybe_expected_semicolon {
+ err.span_suggestion(
+ sp,
+ "try using a semicolon",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ } else if allow_unstable {
+ err.span_label(sp, "tried to parse a type due to this type ascription");
+ } else {
+ err.span_label(sp, "tried to parse a type due to this");
+ }
+ if allow_unstable {
+ // Give extra information about type ascription only if it's a nightly compiler.
+ err.note(
+ "`#![feature(type_ascription)]` lets you annotate an expression with a type: \
+ `<expr>: <type>`",
+ );
+ if !likely_path {
+ // Avoid giving too much info when it was likely an unrelated typo.
+ err.note(
+ "see issue #23416 <https://github.com/rust-lang/rust/issues/23416> \
+ for more information",
+ );
+ }
+ }
+ }
+ }
+
+ /// Eats and discards tokens until one of `kets` is encountered. Respects token trees,
+ /// passes through any errors encountered. Used for error recovery.
+ pub(super) fn eat_to_tokens(&mut self, kets: &[&TokenKind]) {
+ if let Err(err) =
+ self.parse_seq_to_before_tokens(kets, SeqSep::none(), TokenExpectType::Expect, |p| {
+ Ok(p.parse_token_tree())
+ })
+ {
+ err.cancel();
+ }
+ }
+
+ /// This function checks if there are trailing angle brackets and produces
+ /// a diagnostic to suggest removing them.
+ ///
+ /// ```ignore (diagnostic)
+ /// let _ = [1, 2, 3].into_iter().collect::<Vec<usize>>>>();
+ /// ^^ help: remove extra angle brackets
+ /// ```
+ ///
+ /// If `true` is returned, then trailing brackets were recovered, tokens were consumed
+ /// up until one of the tokens in 'end' was encountered, and an error was emitted.
+ pub(super) fn check_trailing_angle_brackets(
+ &mut self,
+ segment: &PathSegment,
+ end: &[&TokenKind],
+ ) -> bool {
+ // This function is intended to be invoked after parsing a path segment where there are two
+ // cases:
+ //
+ // 1. A specific token is expected after the path segment.
+ // eg. `x.foo(`, `x.foo::<u32>(` (parenthesis - method call),
+ // `Foo::`, or `Foo::<Bar>::` (mod sep - continued path).
+ // 2. No specific token is expected after the path segment.
+ // eg. `x.foo` (field access)
+ //
+ // This function is called after parsing `.foo` and before parsing the token `end` (if
+ // present). This includes any angle bracket arguments, such as `.foo::<u32>` or
+ // `Foo::<Bar>`.
+
+ // We only care about trailing angle brackets if we previously parsed angle bracket
+ // arguments. This helps stop us incorrectly suggesting that extra angle brackets be
+ // removed in this case:
+ //
+ // `x.foo >> (3)` (where `x.foo` is a `u32` for example)
+ //
+ // This case is particularly tricky as we won't notice it just looking at the tokens -
+ // it will appear the same (in terms of upcoming tokens) as below (since the `::<u32>` will
+ // have already been parsed):
+ //
+ // `x.foo::<u32>>>(3)`
+ let parsed_angle_bracket_args =
+ segment.args.as_ref().map_or(false, |args| args.is_angle_bracketed());
+
+ debug!(
+ "check_trailing_angle_brackets: parsed_angle_bracket_args={:?}",
+ parsed_angle_bracket_args,
+ );
+ if !parsed_angle_bracket_args {
+ return false;
+ }
+
+ // Keep the span at the start so we can highlight the sequence of `>` characters to be
+ // removed.
+ let lo = self.token.span;
+
+ // We need to look-ahead to see if we have `>` characters without moving the cursor forward
+ // (since we might have the field access case and the characters we're eating are
+ // actual operators and not trailing characters - ie `x.foo >> 3`).
+ let mut position = 0;
+
+ // We can encounter `>` or `>>` tokens in any order, so we need to keep track of how
+ // many of each (so we can correctly pluralize our error messages) and continue to
+ // advance.
+ let mut number_of_shr = 0;
+ let mut number_of_gt = 0;
+ while self.look_ahead(position, |t| {
+ trace!("check_trailing_angle_brackets: t={:?}", t);
+ if *t == token::BinOp(token::BinOpToken::Shr) {
+ number_of_shr += 1;
+ true
+ } else if *t == token::Gt {
+ number_of_gt += 1;
+ true
+ } else {
+ false
+ }
+ }) {
+ position += 1;
+ }
+
+ // If we didn't find any trailing `>` characters, then we have nothing to error about.
+ debug!(
+ "check_trailing_angle_brackets: number_of_gt={:?} number_of_shr={:?}",
+ number_of_gt, number_of_shr,
+ );
+ if number_of_gt < 1 && number_of_shr < 1 {
+ return false;
+ }
+
+ // Finally, double check that we have our end token as otherwise this is the
+ // second case.
+ if self.look_ahead(position, |t| {
+ trace!("check_trailing_angle_brackets: t={:?}", t);
+ end.contains(&&t.kind)
+ }) {
+ // Eat from where we started until the end token so that parsing can continue
+ // as if we didn't have those extra angle brackets.
+ self.eat_to_tokens(end);
+ let span = lo.until(self.token.span);
+
+ let total_num_of_gt = number_of_gt + number_of_shr * 2;
+ self.struct_span_err(
+ span,
+ &format!("unmatched angle bracket{}", pluralize!(total_num_of_gt)),
+ )
+ .span_suggestion(
+ span,
+ &format!("remove extra angle bracket{}", pluralize!(total_num_of_gt)),
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ return true;
+ }
+ false
+ }
+
+ /// Check if a method call with an intended turbofish has been written without surrounding
+ /// angle brackets.
+ pub(super) fn check_turbofish_missing_angle_brackets(&mut self, segment: &mut PathSegment) {
+ if token::ModSep == self.token.kind && segment.args.is_none() {
+ let snapshot = self.create_snapshot_for_diagnostic();
+ self.bump();
+ let lo = self.token.span;
+ match self.parse_angle_args(None) {
+ Ok(args) => {
+ let span = lo.to(self.prev_token.span);
+ // Detect trailing `>` like in `x.collect::Vec<_>>()`.
+ let mut trailing_span = self.prev_token.span.shrink_to_hi();
+ while self.token.kind == token::BinOp(token::Shr)
+ || self.token.kind == token::Gt
+ {
+ trailing_span = trailing_span.to(self.token.span);
+ self.bump();
+ }
+ if self.token.kind == token::OpenDelim(Delimiter::Parenthesis) {
+ // Recover from bad turbofish: `foo.collect::Vec<_>()`.
+ let args = AngleBracketedArgs { args, span }.into();
+ segment.args = args;
+
+ self.struct_span_err(
+ span,
+ "generic parameters without surrounding angle brackets",
+ )
+ .multipart_suggestion(
+ "surround the type parameters with angle brackets",
+ vec![
+ (span.shrink_to_lo(), "<".to_string()),
+ (trailing_span, ">".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ } else {
+ // This doesn't look like an invalid turbofish, can't recover parse state.
+ self.restore_snapshot(snapshot);
+ }
+ }
+ Err(err) => {
+ // We couldn't parse generic parameters, unlikely to be a turbofish. Rely on
+ // generic parse error instead.
+ err.cancel();
+ self.restore_snapshot(snapshot);
+ }
+ }
+ }
+ }
+
+ /// When writing a turbofish with multiple type parameters missing the leading `::`, we will
+ /// encounter a parse error when encountering the first `,`.
+ pub(super) fn check_mistyped_turbofish_with_multiple_type_params(
+ &mut self,
+ mut e: DiagnosticBuilder<'a, ErrorGuaranteed>,
+ expr: &mut P<Expr>,
+ ) -> PResult<'a, ()> {
+ if let ExprKind::Binary(binop, _, _) = &expr.kind
+ && let ast::BinOpKind::Lt = binop.node
+ && self.eat(&token::Comma)
+ {
+ let x = self.parse_seq_to_before_end(
+ &token::Gt,
+ SeqSep::trailing_allowed(token::Comma),
+ |p| p.parse_generic_arg(None),
+ );
+ match x {
+ Ok((_, _, false)) => {
+ if self.eat(&token::Gt) {
+ e.span_suggestion_verbose(
+ binop.span.shrink_to_lo(),
+ TURBOFISH_SUGGESTION_STR,
+ "::",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ match self.parse_expr() {
+ Ok(_) => {
+ *expr =
+ self.mk_expr_err(expr.span.to(self.prev_token.span));
+ return Ok(());
+ }
+ Err(err) => {
+ *expr = self.mk_expr_err(expr.span);
+ err.cancel();
+ }
+ }
+ }
+ }
+ Err(err) => {
+ err.cancel();
+ }
+ _ => {}
+ }
+ }
+ Err(e)
+ }
+
+ /// Check to see if a pair of chained operators looks like an attempt at chained comparison,
+ /// e.g. `1 < x <= 3`. If so, suggest either splitting the comparison into two, or
+ /// parenthesising the leftmost comparison.
+ fn attempt_chained_comparison_suggestion(
+ &mut self,
+ err: &mut Diagnostic,
+ inner_op: &Expr,
+ outer_op: &Spanned<AssocOp>,
+ ) -> bool /* advanced the cursor */ {
+ if let ExprKind::Binary(op, ref l1, ref r1) = inner_op.kind {
+ if let ExprKind::Field(_, ident) = l1.kind
+ && ident.as_str().parse::<i32>().is_err()
+ && !matches!(r1.kind, ExprKind::Lit(_))
+ {
+ // The parser has encountered `foo.bar<baz`, the likelihood of the turbofish
+ // suggestion being the only one to apply is high.
+ return false;
+ }
+ let mut enclose = |left: Span, right: Span| {
+ err.multipart_suggestion(
+ "parenthesize the comparison",
+ vec![
+ (left.shrink_to_lo(), "(".to_string()),
+ (right.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ };
+ return match (op.node, &outer_op.node) {
+ // `x == y == z`
+ (BinOpKind::Eq, AssocOp::Equal) |
+ // `x < y < z` and friends.
+ (BinOpKind::Lt, AssocOp::Less | AssocOp::LessEqual) |
+ (BinOpKind::Le, AssocOp::LessEqual | AssocOp::Less) |
+ // `x > y > z` and friends.
+ (BinOpKind::Gt, AssocOp::Greater | AssocOp::GreaterEqual) |
+ (BinOpKind::Ge, AssocOp::GreaterEqual | AssocOp::Greater) => {
+ let expr_to_str = |e: &Expr| {
+ self.span_to_snippet(e.span)
+ .unwrap_or_else(|_| pprust::expr_to_string(&e))
+ };
+ err.span_suggestion_verbose(
+ inner_op.span.shrink_to_hi(),
+ "split the comparison into two",
+ format!(" && {}", expr_to_str(&r1)),
+ Applicability::MaybeIncorrect,
+ );
+ false // Keep the current parse behavior, where the AST is `(x < y) < z`.
+ }
+ // `x == y < z`
+ (BinOpKind::Eq, AssocOp::Less | AssocOp::LessEqual | AssocOp::Greater | AssocOp::GreaterEqual) => {
+ // Consume `z`/outer-op-rhs.
+ let snapshot = self.create_snapshot_for_diagnostic();
+ match self.parse_expr() {
+ Ok(r2) => {
+ // We are sure that outer-op-rhs could be consumed, the suggestion is
+ // likely correct.
+ enclose(r1.span, r2.span);
+ true
+ }
+ Err(expr_err) => {
+ expr_err.cancel();
+ self.restore_snapshot(snapshot);
+ false
+ }
+ }
+ }
+ // `x > y == z`
+ (BinOpKind::Lt | BinOpKind::Le | BinOpKind::Gt | BinOpKind::Ge, AssocOp::Equal) => {
+ let snapshot = self.create_snapshot_for_diagnostic();
+ // At this point it is always valid to enclose the lhs in parentheses, no
+ // further checks are necessary.
+ match self.parse_expr() {
+ Ok(_) => {
+ enclose(l1.span, r1.span);
+ true
+ }
+ Err(expr_err) => {
+ expr_err.cancel();
+ self.restore_snapshot(snapshot);
+ false
+ }
+ }
+ }
+ _ => false,
+ };
+ }
+ false
+ }
+
+ /// Produces an error if comparison operators are chained (RFC #558).
+ /// We only need to check the LHS, not the RHS, because all comparison ops have same
+ /// precedence (see `fn precedence`) and are left-associative (see `fn fixity`).
+ ///
+ /// This can also be hit if someone incorrectly writes `foo<bar>()` when they should have used
+ /// the turbofish (`foo::<bar>()`) syntax. We attempt some heuristic recovery if that is the
+ /// case.
+ ///
+ /// Keep in mind that given that `outer_op.is_comparison()` holds and comparison ops are left
+ /// associative we can infer that we have:
+ ///
+ /// ```text
+ /// outer_op
+ /// / \
+ /// inner_op r2
+ /// / \
+ /// l1 r1
+ /// ```
+ pub(super) fn check_no_chained_comparison(
+ &mut self,
+ inner_op: &Expr,
+ outer_op: &Spanned<AssocOp>,
+ ) -> PResult<'a, Option<P<Expr>>> {
+ debug_assert!(
+ outer_op.node.is_comparison(),
+ "check_no_chained_comparison: {:?} is not comparison",
+ outer_op.node,
+ );
+
+ let mk_err_expr =
+ |this: &Self, span| Ok(Some(this.mk_expr(span, ExprKind::Err, AttrVec::new())));
+
+ match inner_op.kind {
+ ExprKind::Binary(op, ref l1, ref r1) if op.node.is_comparison() => {
+ let mut err = self.struct_span_err(
+ vec![op.span, self.prev_token.span],
+ "comparison operators cannot be chained",
+ );
+
+ let suggest = |err: &mut Diagnostic| {
+ err.span_suggestion_verbose(
+ op.span.shrink_to_lo(),
+ TURBOFISH_SUGGESTION_STR,
+ "::",
+ Applicability::MaybeIncorrect,
+ );
+ };
+
+ // Include `<` to provide this recommendation even in a case like
+ // `Foo<Bar<Baz<Qux, ()>>>`
+ if op.node == BinOpKind::Lt && outer_op.node == AssocOp::Less
+ || outer_op.node == AssocOp::Greater
+ {
+ if outer_op.node == AssocOp::Less {
+ let snapshot = self.create_snapshot_for_diagnostic();
+ self.bump();
+ // So far we have parsed `foo<bar<`, consume the rest of the type args.
+ let modifiers =
+ [(token::Lt, 1), (token::Gt, -1), (token::BinOp(token::Shr), -2)];
+ self.consume_tts(1, &modifiers);
+
+ if !&[token::OpenDelim(Delimiter::Parenthesis), token::ModSep]
+ .contains(&self.token.kind)
+ {
+ // We don't have `foo< bar >(` or `foo< bar >::`, so we rewind the
+ // parser and bail out.
+ self.restore_snapshot(snapshot);
+ }
+ }
+ return if token::ModSep == self.token.kind {
+ // We have some certainty that this was a bad turbofish at this point.
+ // `foo< bar >::`
+ suggest(&mut err);
+
+ let snapshot = self.create_snapshot_for_diagnostic();
+ self.bump(); // `::`
+
+ // Consume the rest of the likely `foo<bar>::new()` or return at `foo<bar>`.
+ match self.parse_expr() {
+ Ok(_) => {
+ // 99% certain that the suggestion is correct, continue parsing.
+ err.emit();
+ // FIXME: actually check that the two expressions in the binop are
+ // paths and resynthesize new fn call expression instead of using
+ // `ExprKind::Err` placeholder.
+ mk_err_expr(self, inner_op.span.to(self.prev_token.span))
+ }
+ Err(expr_err) => {
+ expr_err.cancel();
+ // Not entirely sure now, but we bubble the error up with the
+ // suggestion.
+ self.restore_snapshot(snapshot);
+ Err(err)
+ }
+ }
+ } else if token::OpenDelim(Delimiter::Parenthesis) == self.token.kind {
+ // We have high certainty that this was a bad turbofish at this point.
+ // `foo< bar >(`
+ suggest(&mut err);
+ // Consume the fn call arguments.
+ match self.consume_fn_args() {
+ Err(()) => Err(err),
+ Ok(()) => {
+ err.emit();
+ // FIXME: actually check that the two expressions in the binop are
+ // paths and resynthesize new fn call expression instead of using
+ // `ExprKind::Err` placeholder.
+ mk_err_expr(self, inner_op.span.to(self.prev_token.span))
+ }
+ }
+ } else {
+ if !matches!(l1.kind, ExprKind::Lit(_))
+ && !matches!(r1.kind, ExprKind::Lit(_))
+ {
+ // All we know is that this is `foo < bar >` and *nothing* else. Try to
+ // be helpful, but don't attempt to recover.
+ err.help(TURBOFISH_SUGGESTION_STR);
+ err.help("or use `(...)` if you meant to specify fn arguments");
+ }
+
+ // If it looks like a genuine attempt to chain operators (as opposed to a
+ // misformatted turbofish, for instance), suggest a correct form.
+ if self.attempt_chained_comparison_suggestion(&mut err, inner_op, outer_op)
+ {
+ err.emit();
+ mk_err_expr(self, inner_op.span.to(self.prev_token.span))
+ } else {
+ // These cases cause too many knock-down errors, bail out (#61329).
+ Err(err)
+ }
+ };
+ }
+ let recover =
+ self.attempt_chained_comparison_suggestion(&mut err, inner_op, outer_op);
+ err.emit();
+ if recover {
+ return mk_err_expr(self, inner_op.span.to(self.prev_token.span));
+ }
+ }
+ _ => {}
+ }
+ Ok(None)
+ }
+
+ fn consume_fn_args(&mut self) -> Result<(), ()> {
+ let snapshot = self.create_snapshot_for_diagnostic();
+ self.bump(); // `(`
+
+ // Consume the fn call arguments.
+ let modifiers = [
+ (token::OpenDelim(Delimiter::Parenthesis), 1),
+ (token::CloseDelim(Delimiter::Parenthesis), -1),
+ ];
+ self.consume_tts(1, &modifiers);
+
+ if self.token.kind == token::Eof {
+ // Not entirely sure that what we consumed were fn arguments, rollback.
+ self.restore_snapshot(snapshot);
+ Err(())
+ } else {
+ // 99% certain that the suggestion is correct, continue parsing.
+ Ok(())
+ }
+ }
+
+ pub(super) fn maybe_report_ambiguous_plus(&mut self, impl_dyn_multi: bool, ty: &Ty) {
+ if impl_dyn_multi {
+ self.sess.emit_err(AmbiguousPlus { sum_ty: pprust::ty_to_string(&ty), span: ty.span });
+ }
+ }
+
+ /// Swift lets users write `Ty?` to mean `Option<Ty>`. Parse the construct and recover from it.
+ pub(super) fn maybe_recover_from_question_mark(&mut self, ty: P<Ty>) -> P<Ty> {
+ if self.token == token::Question {
+ self.bump();
+ self.struct_span_err(self.prev_token.span, "invalid `?` in type")
+ .span_label(self.prev_token.span, "`?` is only allowed on expressions, not types")
+ .multipart_suggestion(
+ "if you meant to express that the type might not contain a value, use the `Option` wrapper type",
+ vec![
+ (ty.span.shrink_to_lo(), "Option<".to_string()),
+ (self.prev_token.span, ">".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ self.mk_ty(ty.span.to(self.prev_token.span), TyKind::Err)
+ } else {
+ ty
+ }
+ }
+
+ pub(super) fn maybe_recover_from_bad_type_plus(&mut self, ty: &Ty) -> PResult<'a, ()> {
+ // Do not add `+` to expected tokens.
+ if !self.token.is_like_plus() {
+ return Ok(());
+ }
+
+ self.bump(); // `+`
+ let bounds = self.parse_generic_bounds(None)?;
+ let sum_span = ty.span.to(self.prev_token.span);
+
+ let sub = match ty.kind {
+ TyKind::Rptr(ref lifetime, ref mut_ty) => {
+ let sum_with_parens = pprust::to_string(|s| {
+ s.s.word("&");
+ s.print_opt_lifetime(lifetime);
+ s.print_mutability(mut_ty.mutbl, false);
+ s.popen();
+ s.print_type(&mut_ty.ty);
+ if !bounds.is_empty() {
+ s.word(" + ");
+ s.print_type_bounds(&bounds);
+ }
+ s.pclose()
+ });
+
+ BadTypePlusSub::AddParen { sum_with_parens, span: sum_span }
+ }
+ TyKind::Ptr(..) | TyKind::BareFn(..) => BadTypePlusSub::ForgotParen { span: sum_span },
+ _ => BadTypePlusSub::ExpectPath { span: sum_span },
+ };
+
+ self.sess.emit_err(BadTypePlus { ty: pprust::ty_to_string(ty), span: sum_span, sub });
+
+ Ok(())
+ }
+
+ pub(super) fn recover_from_prefix_increment(
+ &mut self,
+ operand_expr: P<Expr>,
+ op_span: Span,
+ prev_is_semi: bool,
+ ) -> PResult<'a, P<Expr>> {
+ let standalone =
+ if prev_is_semi { IsStandalone::Standalone } else { IsStandalone::Subexpr };
+ let kind = IncDecRecovery { standalone, op: IncOrDec::Inc, fixity: UnaryFixity::Pre };
+
+ self.recover_from_inc_dec(operand_expr, kind, op_span)
+ }
+
+ pub(super) fn recover_from_postfix_increment(
+ &mut self,
+ operand_expr: P<Expr>,
+ op_span: Span,
+ ) -> PResult<'a, P<Expr>> {
+ let kind = IncDecRecovery {
+ standalone: IsStandalone::Maybe,
+ op: IncOrDec::Inc,
+ fixity: UnaryFixity::Post,
+ };
+
+ self.recover_from_inc_dec(operand_expr, kind, op_span)
+ }
+
+ fn recover_from_inc_dec(
+ &mut self,
+ base: P<Expr>,
+ kind: IncDecRecovery,
+ op_span: Span,
+ ) -> PResult<'a, P<Expr>> {
+ let mut err = self.struct_span_err(
+ op_span,
+ &format!("Rust has no {} {} operator", kind.fixity, kind.op.name()),
+ );
+ err.span_label(op_span, &format!("not a valid {} operator", kind.fixity));
+
+ let help_base_case = |mut err: DiagnosticBuilder<'_, _>, base| {
+ err.help(&format!("use `{}= 1` instead", kind.op.chr()));
+ err.emit();
+ Ok(base)
+ };
+
+ // (pre, post)
+ let spans = match kind.fixity {
+ UnaryFixity::Pre => (op_span, base.span.shrink_to_hi()),
+ UnaryFixity::Post => (base.span.shrink_to_lo(), op_span),
+ };
+
+ match kind.standalone {
+ IsStandalone::Standalone => self.inc_dec_standalone_suggest(kind, spans).emit(&mut err),
+ IsStandalone::Subexpr => {
+ let Ok(base_src) = self.span_to_snippet(base.span)
+ else { return help_base_case(err, base) };
+ match kind.fixity {
+ UnaryFixity::Pre => {
+ self.prefix_inc_dec_suggest(base_src, kind, spans).emit(&mut err)
+ }
+ UnaryFixity::Post => {
+ self.postfix_inc_dec_suggest(base_src, kind, spans).emit(&mut err)
+ }
+ }
+ }
+ IsStandalone::Maybe => {
+ let Ok(base_src) = self.span_to_snippet(base.span)
+ else { return help_base_case(err, base) };
+ let sugg1 = match kind.fixity {
+ UnaryFixity::Pre => self.prefix_inc_dec_suggest(base_src, kind, spans),
+ UnaryFixity::Post => self.postfix_inc_dec_suggest(base_src, kind, spans),
+ };
+ let sugg2 = self.inc_dec_standalone_suggest(kind, spans);
+ MultiSugg::emit_many(
+ &mut err,
+ "use `+= 1` instead",
+ Applicability::Unspecified,
+ [sugg1, sugg2].into_iter(),
+ )
+ }
+ }
+ Err(err)
+ }
+
+ fn prefix_inc_dec_suggest(
+ &mut self,
+ base_src: String,
+ kind: IncDecRecovery,
+ (pre_span, post_span): (Span, Span),
+ ) -> MultiSugg {
+ MultiSugg {
+ msg: format!("use `{}= 1` instead", kind.op.chr()),
+ patches: vec![
+ (pre_span, "{ ".to_string()),
+ (post_span, format!(" {}= 1; {} }}", kind.op.chr(), base_src)),
+ ],
+ applicability: Applicability::MachineApplicable,
+ }
+ }
+
+ fn postfix_inc_dec_suggest(
+ &mut self,
+ base_src: String,
+ kind: IncDecRecovery,
+ (pre_span, post_span): (Span, Span),
+ ) -> MultiSugg {
+ let tmp_var = if base_src.trim() == "tmp" { "tmp_" } else { "tmp" };
+ MultiSugg {
+ msg: format!("use `{}= 1` instead", kind.op.chr()),
+ patches: vec![
+ (pre_span, format!("{{ let {} = ", tmp_var)),
+ (post_span, format!("; {} {}= 1; {} }}", base_src, kind.op.chr(), tmp_var)),
+ ],
+ applicability: Applicability::HasPlaceholders,
+ }
+ }
+
+ fn inc_dec_standalone_suggest(
+ &mut self,
+ kind: IncDecRecovery,
+ (pre_span, post_span): (Span, Span),
+ ) -> MultiSugg {
+ MultiSugg {
+ msg: format!("use `{}= 1` instead", kind.op.chr()),
+ patches: vec![(pre_span, String::new()), (post_span, format!(" {}= 1", kind.op.chr()))],
+ applicability: Applicability::MachineApplicable,
+ }
+ }
+
+ /// Tries to recover from associated item paths like `[T]::AssocItem` / `(T, U)::AssocItem`.
+ /// Attempts to convert the base expression/pattern/type into a type, parses the `::AssocItem`
+ /// tail, and combines them into a `<Ty>::AssocItem` expression/pattern/type.
+ pub(super) fn maybe_recover_from_bad_qpath<T: RecoverQPath>(
+ &mut self,
+ base: P<T>,
+ ) -> PResult<'a, P<T>> {
+ // Do not add `::` to expected tokens.
+ if self.token == token::ModSep {
+ if let Some(ty) = base.to_ty() {
+ return self.maybe_recover_from_bad_qpath_stage_2(ty.span, ty);
+ }
+ }
+ Ok(base)
+ }
+
+ /// Given an already parsed `Ty`, parses the `::AssocItem` tail and
+ /// combines them into a `<Ty>::AssocItem` expression/pattern/type.
+ pub(super) fn maybe_recover_from_bad_qpath_stage_2<T: RecoverQPath>(
+ &mut self,
+ ty_span: Span,
+ ty: P<Ty>,
+ ) -> PResult<'a, P<T>> {
+ self.expect(&token::ModSep)?;
+
+ let mut path = ast::Path { segments: Vec::new(), span: DUMMY_SP, tokens: None };
+ self.parse_path_segments(&mut path.segments, T::PATH_STYLE, None)?;
+ path.span = ty_span.to(self.prev_token.span);
+
+ let ty_str = self.span_to_snippet(ty_span).unwrap_or_else(|_| pprust::ty_to_string(&ty));
+ self.sess.emit_err(BadQPathStage2 {
+ span: path.span,
+ ty: format!("<{}>::{}", ty_str, pprust::path_to_string(&path)),
+ });
+
+ let path_span = ty_span.shrink_to_hi(); // Use an empty path since `position == 0`.
+ Ok(P(T::recovered(Some(QSelf { ty, path_span, position: 0 }), path)))
+ }
+
+ pub fn maybe_consume_incorrect_semicolon(&mut self, items: &[P<Item>]) -> bool {
+ if self.token.kind == TokenKind::Semi {
+ self.bump();
+
+ let mut err =
+ IncorrectSemicolon { span: self.prev_token.span, opt_help: None, name: "" };
+
+ if !items.is_empty() {
+ let previous_item = &items[items.len() - 1];
+ let previous_item_kind_name = match previous_item.kind {
+ // Say "braced struct" because tuple-structs and
+ // braceless-empty-struct declarations do take a semicolon.
+ ItemKind::Struct(..) => Some("braced struct"),
+ ItemKind::Enum(..) => Some("enum"),
+ ItemKind::Trait(..) => Some("trait"),
+ ItemKind::Union(..) => Some("union"),
+ _ => None,
+ };
+ if let Some(name) = previous_item_kind_name {
+ err.opt_help = Some(());
+ err.name = name;
+ }
+ }
+ self.sess.emit_err(err);
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Creates a `DiagnosticBuilder` for an unexpected token `t` and tries to recover if it is a
+ /// closing delimiter.
+ pub(super) fn unexpected_try_recover(
+ &mut self,
+ t: &TokenKind,
+ ) -> PResult<'a, bool /* recovered */> {
+ let token_str = pprust::token_kind_to_string(t);
+ let this_token_str = super::token_descr(&self.token);
+ let (prev_sp, sp) = match (&self.token.kind, self.subparser_name) {
+ // Point at the end of the macro call when reaching end of macro arguments.
+ (token::Eof, Some(_)) => {
+ let sp = self.sess.source_map().next_point(self.prev_token.span);
+ (sp, sp)
+ }
+ // We don't want to point at the following span after DUMMY_SP.
+ // This happens when the parser finds an empty TokenStream.
+ _ if self.prev_token.span == DUMMY_SP => (self.token.span, self.token.span),
+ // EOF, don't want to point at the following char, but rather the last token.
+ (token::Eof, None) => (self.prev_token.span, self.token.span),
+ _ => (self.prev_token.span.shrink_to_hi(), self.token.span),
+ };
+ let msg = format!(
+ "expected `{}`, found {}",
+ token_str,
+ match (&self.token.kind, self.subparser_name) {
+ (token::Eof, Some(origin)) => format!("end of {origin}"),
+ _ => this_token_str,
+ },
+ );
+ let mut err = self.struct_span_err(sp, &msg);
+ let label_exp = format!("expected `{token_str}`");
+ match self.recover_closing_delimiter(&[t.clone()], err) {
+ Err(e) => err = e,
+ Ok(recovered) => {
+ return Ok(recovered);
+ }
+ }
+ let sm = self.sess.source_map();
+ if !sm.is_multiline(prev_sp.until(sp)) {
+ // When the spans are in the same line, it means that the only content
+ // between them is whitespace, point only at the found token.
+ err.span_label(sp, label_exp);
+ } else {
+ err.span_label(prev_sp, label_exp);
+ err.span_label(sp, "unexpected token");
+ }
+ Err(err)
+ }
+
+ pub(super) fn expect_semi(&mut self) -> PResult<'a, ()> {
+ if self.eat(&token::Semi) {
+ return Ok(());
+ }
+ self.expect(&token::Semi).map(drop) // Error unconditionally
+ }
+
+ /// Consumes alternative await syntaxes like `await!(<expr>)`, `await <expr>`,
+ /// `await? <expr>`, `await(<expr>)`, and `await { <expr> }`.
+ pub(super) fn recover_incorrect_await_syntax(
+ &mut self,
+ lo: Span,
+ await_sp: Span,
+ attrs: AttrVec,
+ ) -> PResult<'a, P<Expr>> {
+ let (hi, expr, is_question) = if self.token == token::Not {
+ // Handle `await!(<expr>)`.
+ self.recover_await_macro()?
+ } else {
+ self.recover_await_prefix(await_sp)?
+ };
+ let sp = self.error_on_incorrect_await(lo, hi, &expr, is_question);
+ let kind = match expr.kind {
+ // Avoid knock-down errors as we don't know whether to interpret this as `foo().await?`
+ // or `foo()?.await` (the very reason we went with postfix syntax 😅).
+ ExprKind::Try(_) => ExprKind::Err,
+ _ => ExprKind::Await(expr),
+ };
+ let expr = self.mk_expr(lo.to(sp), kind, attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+
+ fn recover_await_macro(&mut self) -> PResult<'a, (Span, P<Expr>, bool)> {
+ self.expect(&token::Not)?;
+ self.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
+ let expr = self.parse_expr()?;
+ self.expect(&token::CloseDelim(Delimiter::Parenthesis))?;
+ Ok((self.prev_token.span, expr, false))
+ }
+
+ fn recover_await_prefix(&mut self, await_sp: Span) -> PResult<'a, (Span, P<Expr>, bool)> {
+ let is_question = self.eat(&token::Question); // Handle `await? <expr>`.
+ let expr = if self.token == token::OpenDelim(Delimiter::Brace) {
+ // Handle `await { <expr> }`.
+ // This needs to be handled separately from the next arm to avoid
+ // interpreting `await { <expr> }?` as `<expr>?.await`.
+ self.parse_block_expr(None, self.token.span, BlockCheckMode::Default, AttrVec::new())
+ } else {
+ self.parse_expr()
+ }
+ .map_err(|mut err| {
+ err.span_label(await_sp, "while parsing this incorrect await expression");
+ err
+ })?;
+ Ok((expr.span, expr, is_question))
+ }
+
+ fn error_on_incorrect_await(&self, lo: Span, hi: Span, expr: &Expr, is_question: bool) -> Span {
+ let span = lo.to(hi);
+ let applicability = match expr.kind {
+ ExprKind::Try(_) => Applicability::MaybeIncorrect, // `await <expr>?`
+ _ => Applicability::MachineApplicable,
+ };
+
+ self.sess.emit_err(IncorrectAwait {
+ span,
+ sugg_span: (span, applicability),
+ expr: self.span_to_snippet(expr.span).unwrap_or_else(|_| pprust::expr_to_string(&expr)),
+ question_mark: if is_question { "?" } else { "" },
+ });
+
+ span
+ }
+
+ /// If encountering `future.await()`, consumes and emits an error.
+ pub(super) fn recover_from_await_method_call(&mut self) {
+ if self.token == token::OpenDelim(Delimiter::Parenthesis)
+ && self.look_ahead(1, |t| t == &token::CloseDelim(Delimiter::Parenthesis))
+ {
+ // future.await()
+ let lo = self.token.span;
+ self.bump(); // (
+ let span = lo.to(self.token.span);
+ self.bump(); // )
+
+ self.sess.emit_err(IncorrectUseOfAwait { span });
+ }
+ }
+
+ pub(super) fn try_macro_suggestion(&mut self) -> PResult<'a, P<Expr>> {
+ let is_try = self.token.is_keyword(kw::Try);
+ let is_questionmark = self.look_ahead(1, |t| t == &token::Not); //check for !
+ let is_open = self.look_ahead(2, |t| t == &token::OpenDelim(Delimiter::Parenthesis)); //check for (
+
+ if is_try && is_questionmark && is_open {
+ let lo = self.token.span;
+ self.bump(); //remove try
+ self.bump(); //remove !
+ let try_span = lo.to(self.token.span); //we take the try!( span
+ self.bump(); //remove (
+ let is_empty = self.token == token::CloseDelim(Delimiter::Parenthesis); //check if the block is empty
+ self.consume_block(Delimiter::Parenthesis, ConsumeClosingDelim::No); //eat the block
+ let hi = self.token.span;
+ self.bump(); //remove )
+ let mut err = self.struct_span_err(lo.to(hi), "use of deprecated `try` macro");
+ err.note("in the 2018 edition `try` is a reserved keyword, and the `try!()` macro is deprecated");
+ let prefix = if is_empty { "" } else { "alternatively, " };
+ if !is_empty {
+ err.multipart_suggestion(
+ "you can use the `?` operator instead",
+ vec![(try_span, "".to_owned()), (hi, "?".to_owned())],
+ Applicability::MachineApplicable,
+ );
+ }
+ err.span_suggestion(lo.shrink_to_lo(), &format!("{prefix}you can still access the deprecated `try!()` macro using the \"raw identifier\" syntax"), "r#", Applicability::MachineApplicable);
+ err.emit();
+ Ok(self.mk_expr_err(lo.to(hi)))
+ } else {
+ Err(self.expected_expression_found()) // The user isn't trying to invoke the try! macro
+ }
+ }
+
+ /// Recovers a situation like `for ( $pat in $expr )`
+ /// and suggest writing `for $pat in $expr` instead.
+ ///
+ /// This should be called before parsing the `$block`.
+ pub(super) fn recover_parens_around_for_head(
+ &mut self,
+ pat: P<Pat>,
+ begin_paren: Option<Span>,
+ ) -> P<Pat> {
+ match (&self.token.kind, begin_paren) {
+ (token::CloseDelim(Delimiter::Parenthesis), Some(begin_par_sp)) => {
+ self.bump();
+
+ self.struct_span_err(
+ MultiSpan::from_spans(vec![begin_par_sp, self.prev_token.span]),
+ "unexpected parentheses surrounding `for` loop head",
+ )
+ .multipart_suggestion(
+ "remove parentheses in `for` loop",
+ vec![(begin_par_sp, String::new()), (self.prev_token.span, String::new())],
+ // With e.g. `for (x) in y)` this would replace `(x) in y)`
+ // with `x) in y)` which is syntactically invalid.
+ // However, this is prevented before we get here.
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ // Unwrap `(pat)` into `pat` to avoid the `unused_parens` lint.
+ pat.and_then(|pat| match pat.kind {
+ PatKind::Paren(pat) => pat,
+ _ => P(pat),
+ })
+ }
+ _ => pat,
+ }
+ }
+
+ pub(super) fn could_ascription_be_path(&self, node: &ast::ExprKind) -> bool {
+ (self.token == token::Lt && // `foo:<bar`, likely a typoed turbofish.
+ self.look_ahead(1, |t| t.is_ident() && !t.is_reserved_ident()))
+ || self.token.is_ident() &&
+ matches!(node, ast::ExprKind::Path(..) | ast::ExprKind::Field(..)) &&
+ !self.token.is_reserved_ident() && // v `foo:bar(baz)`
+ self.look_ahead(1, |t| t == &token::OpenDelim(Delimiter::Parenthesis))
+ || self.look_ahead(1, |t| t == &token::OpenDelim(Delimiter::Brace)) // `foo:bar {`
+ || self.look_ahead(1, |t| t == &token::Colon) && // `foo:bar::<baz`
+ self.look_ahead(2, |t| t == &token::Lt) &&
+ self.look_ahead(3, |t| t.is_ident())
+ || self.look_ahead(1, |t| t == &token::Colon) && // `foo:bar:baz`
+ self.look_ahead(2, |t| t.is_ident())
+ || self.look_ahead(1, |t| t == &token::ModSep)
+ && (self.look_ahead(2, |t| t.is_ident()) || // `foo:bar::baz`
+ self.look_ahead(2, |t| t == &token::Lt)) // `foo:bar::<baz>`
+ }
+
+ pub(super) fn recover_seq_parse_error(
+ &mut self,
+ delim: Delimiter,
+ lo: Span,
+ result: PResult<'a, P<Expr>>,
+ ) -> P<Expr> {
+ match result {
+ Ok(x) => x,
+ Err(mut err) => {
+ err.emit();
+ // Recover from parse error, callers expect the closing delim to be consumed.
+ self.consume_block(delim, ConsumeClosingDelim::Yes);
+ self.mk_expr(lo.to(self.prev_token.span), ExprKind::Err, AttrVec::new())
+ }
+ }
+ }
+
+ pub(super) fn recover_closing_delimiter(
+ &mut self,
+ tokens: &[TokenKind],
+ mut err: DiagnosticBuilder<'a, ErrorGuaranteed>,
+ ) -> PResult<'a, bool> {
+ let mut pos = None;
+ // We want to use the last closing delim that would apply.
+ for (i, unmatched) in self.unclosed_delims.iter().enumerate().rev() {
+ if tokens.contains(&token::CloseDelim(unmatched.expected_delim))
+ && Some(self.token.span) > unmatched.unclosed_span
+ {
+ pos = Some(i);
+ }
+ }
+ match pos {
+ Some(pos) => {
+ // Recover and assume that the detected unclosed delimiter was meant for
+ // this location. Emit the diagnostic and act as if the delimiter was
+ // present for the parser's sake.
+
+ // Don't attempt to recover from this unclosed delimiter more than once.
+ let unmatched = self.unclosed_delims.remove(pos);
+ let delim = TokenType::Token(token::CloseDelim(unmatched.expected_delim));
+ if unmatched.found_delim.is_none() {
+ // We encountered `Eof`, set this fact here to avoid complaining about missing
+ // `fn main()` when we found place to suggest the closing brace.
+ *self.sess.reached_eof.borrow_mut() = true;
+ }
+
+ // We want to suggest the inclusion of the closing delimiter where it makes
+ // the most sense, which is immediately after the last token:
+ //
+ // {foo(bar {}}
+ // ^ ^
+ // | |
+ // | help: `)` may belong here
+ // |
+ // unclosed delimiter
+ if let Some(sp) = unmatched.unclosed_span {
+ let mut primary_span: Vec<Span> =
+ err.span.primary_spans().iter().cloned().collect();
+ primary_span.push(sp);
+ let mut primary_span: MultiSpan = primary_span.into();
+ for span_label in err.span.span_labels() {
+ if let Some(label) = span_label.label {
+ primary_span.push_span_label(span_label.span, label);
+ }
+ }
+ err.set_span(primary_span);
+ err.span_label(sp, "unclosed delimiter");
+ }
+ // Backticks should be removed to apply suggestions.
+ let mut delim = delim.to_string();
+ delim.retain(|c| c != '`');
+ err.span_suggestion_short(
+ self.prev_token.span.shrink_to_hi(),
+ &format!("`{delim}` may belong here"),
+ delim,
+ Applicability::MaybeIncorrect,
+ );
+ if unmatched.found_delim.is_none() {
+ // Encountered `Eof` when lexing blocks. Do not recover here to avoid knockdown
+ // errors which would be emitted elsewhere in the parser and let other error
+ // recovery consume the rest of the file.
+ Err(err)
+ } else {
+ err.emit();
+ self.expected_tokens.clear(); // Reduce the number of errors.
+ Ok(true)
+ }
+ }
+ _ => Err(err),
+ }
+ }
+
+ /// Eats tokens until we can be relatively sure we reached the end of the
+ /// statement. This is something of a best-effort heuristic.
+ ///
+ /// We terminate when we find an unmatched `}` (without consuming it).
+ pub(super) fn recover_stmt(&mut self) {
+ self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore)
+ }
+
+ /// If `break_on_semi` is `Break`, then we will stop consuming tokens after
+ /// finding (and consuming) a `;` outside of `{}` or `[]` (note that this is
+ /// approximate -- it can mean we break too early due to macros, but that
+ /// should only lead to sub-optimal recovery, not inaccurate parsing).
+ ///
+ /// If `break_on_block` is `Break`, then we will stop consuming tokens
+ /// after finding (and consuming) a brace-delimited block.
+ pub(super) fn recover_stmt_(
+ &mut self,
+ break_on_semi: SemiColonMode,
+ break_on_block: BlockMode,
+ ) {
+ let mut brace_depth = 0;
+ let mut bracket_depth = 0;
+ let mut in_block = false;
+ debug!("recover_stmt_ enter loop (semi={:?}, block={:?})", break_on_semi, break_on_block);
+ loop {
+ debug!("recover_stmt_ loop {:?}", self.token);
+ match self.token.kind {
+ token::OpenDelim(Delimiter::Brace) => {
+ brace_depth += 1;
+ self.bump();
+ if break_on_block == BlockMode::Break && brace_depth == 1 && bracket_depth == 0
+ {
+ in_block = true;
+ }
+ }
+ token::OpenDelim(Delimiter::Bracket) => {
+ bracket_depth += 1;
+ self.bump();
+ }
+ token::CloseDelim(Delimiter::Brace) => {
+ if brace_depth == 0 {
+ debug!("recover_stmt_ return - close delim {:?}", self.token);
+ break;
+ }
+ brace_depth -= 1;
+ self.bump();
+ if in_block && bracket_depth == 0 && brace_depth == 0 {
+ debug!("recover_stmt_ return - block end {:?}", self.token);
+ break;
+ }
+ }
+ token::CloseDelim(Delimiter::Bracket) => {
+ bracket_depth -= 1;
+ if bracket_depth < 0 {
+ bracket_depth = 0;
+ }
+ self.bump();
+ }
+ token::Eof => {
+ debug!("recover_stmt_ return - Eof");
+ break;
+ }
+ token::Semi => {
+ self.bump();
+ if break_on_semi == SemiColonMode::Break
+ && brace_depth == 0
+ && bracket_depth == 0
+ {
+ debug!("recover_stmt_ return - Semi");
+ break;
+ }
+ }
+ token::Comma
+ if break_on_semi == SemiColonMode::Comma
+ && brace_depth == 0
+ && bracket_depth == 0 =>
+ {
+ debug!("recover_stmt_ return - Semi");
+ break;
+ }
+ _ => self.bump(),
+ }
+ }
+ }
+
+ pub(super) fn check_for_for_in_in_typo(&mut self, in_span: Span) {
+ if self.eat_keyword(kw::In) {
+ // a common typo: `for _ in in bar {}`
+ self.sess.emit_err(InInTypo {
+ span: self.prev_token.span,
+ sugg_span: in_span.until(self.prev_token.span),
+ });
+ }
+ }
+
+ pub(super) fn eat_incorrect_doc_comment_for_param_type(&mut self) {
+ if let token::DocComment(..) = self.token.kind {
+ self.struct_span_err(
+ self.token.span,
+ "documentation comments cannot be applied to a function parameter's type",
+ )
+ .span_label(self.token.span, "doc comments are not allowed here")
+ .emit();
+ self.bump();
+ } else if self.token == token::Pound
+ && self.look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Bracket))
+ {
+ let lo = self.token.span;
+ // Skip every token until next possible arg.
+ while self.token != token::CloseDelim(Delimiter::Bracket) {
+ self.bump();
+ }
+ let sp = lo.to(self.token.span);
+ self.bump();
+ self.struct_span_err(sp, "attributes cannot be applied to a function parameter's type")
+ .span_label(sp, "attributes are not allowed here")
+ .emit();
+ }
+ }
+
+ pub(super) fn parameter_without_type(
+ &mut self,
+ err: &mut Diagnostic,
+ pat: P<ast::Pat>,
+ require_name: bool,
+ first_param: bool,
+ ) -> Option<Ident> {
+ // If we find a pattern followed by an identifier, it could be an (incorrect)
+ // C-style parameter declaration.
+ if self.check_ident()
+ && self.look_ahead(1, |t| {
+ *t == token::Comma || *t == token::CloseDelim(Delimiter::Parenthesis)
+ })
+ {
+ // `fn foo(String s) {}`
+ let ident = self.parse_ident().unwrap();
+ let span = pat.span.with_hi(ident.span.hi());
+
+ err.span_suggestion(
+ span,
+ "declare the type after the parameter binding",
+ "<identifier>: <type>",
+ Applicability::HasPlaceholders,
+ );
+ return Some(ident);
+ } else if require_name
+ && (self.token == token::Comma
+ || self.token == token::Lt
+ || self.token == token::CloseDelim(Delimiter::Parenthesis))
+ {
+ let rfc_note = "anonymous parameters are removed in the 2018 edition (see RFC 1685)";
+
+ let (ident, self_sugg, param_sugg, type_sugg, self_span, param_span, type_span) =
+ match pat.kind {
+ PatKind::Ident(_, ident, _) => (
+ ident,
+ "self: ",
+ ": TypeName".to_string(),
+ "_: ",
+ pat.span.shrink_to_lo(),
+ pat.span.shrink_to_hi(),
+ pat.span.shrink_to_lo(),
+ ),
+ // Also catches `fn foo(&a)`.
+ PatKind::Ref(ref inner_pat, mutab)
+ if matches!(inner_pat.clone().into_inner().kind, PatKind::Ident(..)) =>
+ {
+ match inner_pat.clone().into_inner().kind {
+ PatKind::Ident(_, ident, _) => {
+ let mutab = mutab.prefix_str();
+ (
+ ident,
+ "self: ",
+ format!("{ident}: &{mutab}TypeName"),
+ "_: ",
+ pat.span.shrink_to_lo(),
+ pat.span,
+ pat.span.shrink_to_lo(),
+ )
+ }
+ _ => unreachable!(),
+ }
+ }
+ _ => {
+ // Otherwise, try to get a type and emit a suggestion.
+ if let Some(ty) = pat.to_ty() {
+ err.span_suggestion_verbose(
+ pat.span,
+ "explicitly ignore the parameter name",
+ format!("_: {}", pprust::ty_to_string(&ty)),
+ Applicability::MachineApplicable,
+ );
+ err.note(rfc_note);
+ }
+
+ return None;
+ }
+ };
+
+ // `fn foo(a, b) {}`, `fn foo(a<x>, b<y>) {}` or `fn foo(usize, usize) {}`
+ if first_param {
+ err.span_suggestion(
+ self_span,
+ "if this is a `self` type, give it a parameter name",
+ self_sugg,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ // Avoid suggesting that `fn foo(HashMap<u32>)` is fixed with a change to
+ // `fn foo(HashMap: TypeName<u32>)`.
+ if self.token != token::Lt {
+ err.span_suggestion(
+ param_span,
+ "if this is a parameter name, give it a type",
+ param_sugg,
+ Applicability::HasPlaceholders,
+ );
+ }
+ err.span_suggestion(
+ type_span,
+ "if this is a type, explicitly ignore the parameter name",
+ type_sugg,
+ Applicability::MachineApplicable,
+ );
+ err.note(rfc_note);
+
+ // Don't attempt to recover by using the `X` in `X<Y>` as the parameter name.
+ return if self.token == token::Lt { None } else { Some(ident) };
+ }
+ None
+ }
+
+ pub(super) fn recover_arg_parse(&mut self) -> PResult<'a, (P<ast::Pat>, P<ast::Ty>)> {
+ let pat = self.parse_pat_no_top_alt(Some("argument name"))?;
+ self.expect(&token::Colon)?;
+ let ty = self.parse_ty()?;
+
+ struct_span_err!(
+ self.diagnostic(),
+ pat.span,
+ E0642,
+ "patterns aren't allowed in methods without bodies",
+ )
+ .span_suggestion_short(
+ pat.span,
+ "give this argument a name or use an underscore to ignore it",
+ "_",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ // Pretend the pattern is `_`, to avoid duplicate errors from AST validation.
+ let pat =
+ P(Pat { kind: PatKind::Wild, span: pat.span, id: ast::DUMMY_NODE_ID, tokens: None });
+ Ok((pat, ty))
+ }
+
+ pub(super) fn recover_bad_self_param(&mut self, mut param: Param) -> PResult<'a, Param> {
+ let sp = param.pat.span;
+ param.ty.kind = TyKind::Err;
+ self.struct_span_err(sp, "unexpected `self` parameter in function")
+ .span_label(sp, "must be the first parameter of an associated function")
+ .emit();
+ Ok(param)
+ }
+
+ pub(super) fn consume_block(&mut self, delim: Delimiter, consume_close: ConsumeClosingDelim) {
+ let mut brace_depth = 0;
+ loop {
+ if self.eat(&token::OpenDelim(delim)) {
+ brace_depth += 1;
+ } else if self.check(&token::CloseDelim(delim)) {
+ if brace_depth == 0 {
+ if let ConsumeClosingDelim::Yes = consume_close {
+ // Some of the callers of this method expect to be able to parse the
+ // closing delimiter themselves, so we leave it alone. Otherwise we advance
+ // the parser.
+ self.bump();
+ }
+ return;
+ } else {
+ self.bump();
+ brace_depth -= 1;
+ continue;
+ }
+ } else if self.token == token::Eof {
+ return;
+ } else {
+ self.bump();
+ }
+ }
+ }
+
+ pub(super) fn expected_expression_found(&self) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let (span, msg) = match (&self.token.kind, self.subparser_name) {
+ (&token::Eof, Some(origin)) => {
+ let sp = self.sess.source_map().next_point(self.prev_token.span);
+ (sp, format!("expected expression, found end of {origin}"))
+ }
+ _ => (
+ self.token.span,
+ format!("expected expression, found {}", super::token_descr(&self.token),),
+ ),
+ };
+ let mut err = self.struct_span_err(span, &msg);
+ let sp = self.sess.source_map().start_point(self.token.span);
+ if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&sp) {
+ self.sess.expr_parentheses_needed(&mut err, *sp);
+ }
+ err.span_label(span, "expected expression");
+ err
+ }
+
+ fn consume_tts(
+ &mut self,
+ mut acc: i64, // `i64` because malformed code can have more closing delims than opening.
+ // Not using `FxHashMap` due to `token::TokenKind: !Eq + !Hash`.
+ modifier: &[(token::TokenKind, i64)],
+ ) {
+ while acc > 0 {
+ if let Some((_, val)) = modifier.iter().find(|(t, _)| *t == self.token.kind) {
+ acc += *val;
+ }
+ if self.token.kind == token::Eof {
+ break;
+ }
+ self.bump();
+ }
+ }
+
+ /// Replace duplicated recovered parameters with `_` pattern to avoid unnecessary errors.
+ ///
+ /// This is necessary because at this point we don't know whether we parsed a function with
+ /// anonymous parameters or a function with names but no types. In order to minimize
+ /// unnecessary errors, we assume the parameters are in the shape of `fn foo(a, b, c)` where
+ /// the parameters are *names* (so we don't emit errors about not being able to find `b` in
+ /// the local scope), but if we find the same name multiple times, like in `fn foo(i8, i8)`,
+ /// we deduplicate them to not complain about duplicated parameter names.
+ pub(super) fn deduplicate_recovered_params_names(&self, fn_inputs: &mut Vec<Param>) {
+ let mut seen_inputs = FxHashSet::default();
+ for input in fn_inputs.iter_mut() {
+ let opt_ident = if let (PatKind::Ident(_, ident, _), TyKind::Err) =
+ (&input.pat.kind, &input.ty.kind)
+ {
+ Some(*ident)
+ } else {
+ None
+ };
+ if let Some(ident) = opt_ident {
+ if seen_inputs.contains(&ident) {
+ input.pat.kind = PatKind::Wild;
+ }
+ seen_inputs.insert(ident);
+ }
+ }
+ }
+
+ /// Handle encountering a symbol in a generic argument list that is not a `,` or `>`. In this
+ /// case, we emit an error and try to suggest enclosing a const argument in braces if it looks
+ /// like the user has forgotten them.
+ pub fn handle_ambiguous_unbraced_const_arg(
+ &mut self,
+ args: &mut Vec<AngleBracketedArg>,
+ ) -> PResult<'a, bool> {
+ // If we haven't encountered a closing `>`, then the argument is malformed.
+ // It's likely that the user has written a const expression without enclosing it
+ // in braces, so we try to recover here.
+ let arg = args.pop().unwrap();
+ // FIXME: for some reason using `unexpected` or `expected_one_of_not_found` has
+ // adverse side-effects to subsequent errors and seems to advance the parser.
+ // We are causing this error here exclusively in case that a `const` expression
+ // could be recovered from the current parser state, even if followed by more
+ // arguments after a comma.
+ let mut err = self.struct_span_err(
+ self.token.span,
+ &format!("expected one of `,` or `>`, found {}", super::token_descr(&self.token)),
+ );
+ err.span_label(self.token.span, "expected one of `,` or `>`");
+ match self.recover_const_arg(arg.span(), err) {
+ Ok(arg) => {
+ args.push(AngleBracketedArg::Arg(arg));
+ if self.eat(&token::Comma) {
+ return Ok(true); // Continue
+ }
+ }
+ Err(mut err) => {
+ args.push(arg);
+ // We will emit a more generic error later.
+ err.delay_as_bug();
+ }
+ }
+ return Ok(false); // Don't continue.
+ }
+
+ /// Attempt to parse a generic const argument that has not been enclosed in braces.
+ /// There are a limited number of expressions that are permitted without being encoded
+ /// in braces:
+ /// - Literals.
+ /// - Single-segment paths (i.e. standalone generic const parameters).
+ /// All other expressions that can be parsed will emit an error suggesting the expression be
+ /// wrapped in braces.
+ pub fn handle_unambiguous_unbraced_const_arg(&mut self) -> PResult<'a, P<Expr>> {
+ let start = self.token.span;
+ let expr = self.parse_expr_res(Restrictions::CONST_EXPR, None).map_err(|mut err| {
+ err.span_label(
+ start.shrink_to_lo(),
+ "while parsing a const generic argument starting here",
+ );
+ err
+ })?;
+ if !self.expr_is_valid_const_arg(&expr) {
+ self.struct_span_err(
+ expr.span,
+ "expressions must be enclosed in braces to be used as const generic \
+ arguments",
+ )
+ .multipart_suggestion(
+ "enclose the `const` expression in braces",
+ vec![
+ (expr.span.shrink_to_lo(), "{ ".to_string()),
+ (expr.span.shrink_to_hi(), " }".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ Ok(expr)
+ }
+
+ fn recover_const_param_decl(&mut self, ty_generics: Option<&Generics>) -> Option<GenericArg> {
+ let snapshot = self.create_snapshot_for_diagnostic();
+ let param = match self.parse_const_param(vec![]) {
+ Ok(param) => param,
+ Err(err) => {
+ err.cancel();
+ self.restore_snapshot(snapshot);
+ return None;
+ }
+ };
+ let mut err =
+ self.struct_span_err(param.span(), "unexpected `const` parameter declaration");
+ err.span_label(param.span(), "expected a `const` expression, not a parameter declaration");
+ if let (Some(generics), Ok(snippet)) =
+ (ty_generics, self.sess.source_map().span_to_snippet(param.span()))
+ {
+ let (span, sugg) = match &generics.params[..] {
+ [] => (generics.span, format!("<{snippet}>")),
+ [.., generic] => (generic.span().shrink_to_hi(), format!(", {snippet}")),
+ };
+ err.multipart_suggestion(
+ "`const` parameters must be declared for the `impl`",
+ vec![(span, sugg), (param.span(), param.ident.to_string())],
+ Applicability::MachineApplicable,
+ );
+ }
+ let value = self.mk_expr_err(param.span());
+ err.emit();
+ Some(GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value }))
+ }
+
+ pub fn recover_const_param_declaration(
+ &mut self,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, Option<GenericArg>> {
+ // We have to check for a few different cases.
+ if let Some(arg) = self.recover_const_param_decl(ty_generics) {
+ return Ok(Some(arg));
+ }
+
+ // We haven't consumed `const` yet.
+ let start = self.token.span;
+ self.bump(); // `const`
+
+ // Detect and recover from the old, pre-RFC2000 syntax for const generics.
+ let mut err = self
+ .struct_span_err(start, "expected lifetime, type, or constant, found keyword `const`");
+ if self.check_const_arg() {
+ err.span_suggestion_verbose(
+ start.until(self.token.span),
+ "the `const` keyword is only needed in the definition of the type",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+ Ok(Some(GenericArg::Const(self.parse_const_arg()?)))
+ } else {
+ let after_kw_const = self.token.span;
+ self.recover_const_arg(after_kw_const, err).map(Some)
+ }
+ }
+
+ /// Try to recover from possible generic const argument without `{` and `}`.
+ ///
+ /// When encountering code like `foo::< bar + 3 >` or `foo::< bar - baz >` we suggest
+ /// `foo::<{ bar + 3 }>` and `foo::<{ bar - baz }>`, respectively. We only provide a suggestion
+ /// if we think that that the resulting expression would be well formed.
+ pub fn recover_const_arg(
+ &mut self,
+ start: Span,
+ mut err: DiagnosticBuilder<'a, ErrorGuaranteed>,
+ ) -> PResult<'a, GenericArg> {
+ let is_op_or_dot = AssocOp::from_token(&self.token)
+ .and_then(|op| {
+ if let AssocOp::Greater
+ | AssocOp::Less
+ | AssocOp::ShiftRight
+ | AssocOp::GreaterEqual
+ // Don't recover from `foo::<bar = baz>`, because this could be an attempt to
+ // assign a value to a defaulted generic parameter.
+ | AssocOp::Assign
+ | AssocOp::AssignOp(_) = op
+ {
+ None
+ } else {
+ Some(op)
+ }
+ })
+ .is_some()
+ || self.token.kind == TokenKind::Dot;
+ // This will be true when a trait object type `Foo +` or a path which was a `const fn` with
+ // type params has been parsed.
+ let was_op =
+ matches!(self.prev_token.kind, token::BinOp(token::Plus | token::Shr) | token::Gt);
+ if !is_op_or_dot && !was_op {
+ // We perform these checks and early return to avoid taking a snapshot unnecessarily.
+ return Err(err);
+ }
+ let snapshot = self.create_snapshot_for_diagnostic();
+ if is_op_or_dot {
+ self.bump();
+ }
+ match self.parse_expr_res(Restrictions::CONST_EXPR, None) {
+ Ok(expr) => {
+ // Find a mistake like `MyTrait<Assoc == S::Assoc>`.
+ if token::EqEq == snapshot.token.kind {
+ err.span_suggestion(
+ snapshot.token.span,
+ "if you meant to use an associated type binding, replace `==` with `=`",
+ "=",
+ Applicability::MaybeIncorrect,
+ );
+ let value = self.mk_expr_err(start.to(expr.span));
+ err.emit();
+ return Ok(GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value }));
+ } else if token::Colon == snapshot.token.kind
+ && expr.span.lo() == snapshot.token.span.hi()
+ && matches!(expr.kind, ExprKind::Path(..))
+ {
+ // Find a mistake like "foo::var:A".
+ err.span_suggestion(
+ snapshot.token.span,
+ "write a path separator here",
+ "::",
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+ return Ok(GenericArg::Type(self.mk_ty(start.to(expr.span), TyKind::Err)));
+ } else if token::Comma == self.token.kind || self.token.kind.should_end_const_arg()
+ {
+ // Avoid the following output by checking that we consumed a full const arg:
+ // help: expressions must be enclosed in braces to be used as const generic
+ // arguments
+ // |
+ // LL | let sr: Vec<{ (u32, _, _) = vec![] };
+ // | ^ ^
+ return Ok(self.dummy_const_arg_needs_braces(err, start.to(expr.span)));
+ }
+ }
+ Err(err) => {
+ err.cancel();
+ }
+ }
+ self.restore_snapshot(snapshot);
+ Err(err)
+ }
+
+ /// Creates a dummy const argument, and reports that the expression must be enclosed in braces
+ pub fn dummy_const_arg_needs_braces(
+ &self,
+ mut err: DiagnosticBuilder<'a, ErrorGuaranteed>,
+ span: Span,
+ ) -> GenericArg {
+ err.multipart_suggestion(
+ "expressions must be enclosed in braces to be used as const generic \
+ arguments",
+ vec![(span.shrink_to_lo(), "{ ".to_string()), (span.shrink_to_hi(), " }".to_string())],
+ Applicability::MaybeIncorrect,
+ );
+ let value = self.mk_expr_err(span);
+ err.emit();
+ GenericArg::Const(AnonConst { id: ast::DUMMY_NODE_ID, value })
+ }
+
+ /// Get the diagnostics for the cases where `move async` is found.
+ ///
+ /// `move_async_span` starts at the 'm' of the move keyword and ends with the 'c' of the async keyword
+ pub(super) fn incorrect_move_async_order_found(
+ &self,
+ move_async_span: Span,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err =
+ self.struct_span_err(move_async_span, "the order of `move` and `async` is incorrect");
+ err.span_suggestion_verbose(
+ move_async_span,
+ "try switching the order",
+ "async move",
+ Applicability::MaybeIncorrect,
+ );
+ err
+ }
+
+ /// Some special error handling for the "top-level" patterns in a match arm,
+ /// `for` loop, `let`, &c. (in contrast to subpatterns within such).
+ pub(crate) fn maybe_recover_colon_colon_in_pat_typo(
+ &mut self,
+ mut first_pat: P<Pat>,
+ expected: Expected,
+ ) -> P<Pat> {
+ if token::Colon != self.token.kind {
+ return first_pat;
+ }
+ if !matches!(first_pat.kind, PatKind::Ident(_, _, None) | PatKind::Path(..))
+ || !self.look_ahead(1, |token| token.is_ident() && !token.is_reserved_ident())
+ {
+ return first_pat;
+ }
+ // The pattern looks like it might be a path with a `::` -> `:` typo:
+ // `match foo { bar:baz => {} }`
+ let span = self.token.span;
+ // We only emit "unexpected `:`" error here if we can successfully parse the
+ // whole pattern correctly in that case.
+ let snapshot = self.create_snapshot_for_diagnostic();
+
+ // Create error for "unexpected `:`".
+ match self.expected_one_of_not_found(&[], &[]) {
+ Err(mut err) => {
+ self.bump(); // Skip the `:`.
+ match self.parse_pat_no_top_alt(expected) {
+ Err(inner_err) => {
+ // Carry on as if we had not done anything, callers will emit a
+ // reasonable error.
+ inner_err.cancel();
+ err.cancel();
+ self.restore_snapshot(snapshot);
+ }
+ Ok(mut pat) => {
+ // We've parsed the rest of the pattern.
+ let new_span = first_pat.span.to(pat.span);
+ let mut show_sugg = false;
+ // Try to construct a recovered pattern.
+ match &mut pat.kind {
+ PatKind::Struct(qself @ None, path, ..)
+ | PatKind::TupleStruct(qself @ None, path, _)
+ | PatKind::Path(qself @ None, path) => match &first_pat.kind {
+ PatKind::Ident(_, ident, _) => {
+ path.segments.insert(0, PathSegment::from_ident(*ident));
+ path.span = new_span;
+ show_sugg = true;
+ first_pat = pat;
+ }
+ PatKind::Path(old_qself, old_path) => {
+ path.segments = old_path
+ .segments
+ .iter()
+ .cloned()
+ .chain(take(&mut path.segments))
+ .collect();
+ path.span = new_span;
+ *qself = old_qself.clone();
+ first_pat = pat;
+ show_sugg = true;
+ }
+ _ => {}
+ },
+ PatKind::Ident(BindingMode::ByValue(Mutability::Not), ident, None) => {
+ match &first_pat.kind {
+ PatKind::Ident(_, old_ident, _) => {
+ let path = PatKind::Path(
+ None,
+ Path {
+ span: new_span,
+ segments: vec![
+ PathSegment::from_ident(*old_ident),
+ PathSegment::from_ident(*ident),
+ ],
+ tokens: None,
+ },
+ );
+ first_pat = self.mk_pat(new_span, path);
+ show_sugg = true;
+ }
+ PatKind::Path(old_qself, old_path) => {
+ let mut segments = old_path.segments.clone();
+ segments.push(PathSegment::from_ident(*ident));
+ let path = PatKind::Path(
+ old_qself.clone(),
+ Path { span: new_span, segments, tokens: None },
+ );
+ first_pat = self.mk_pat(new_span, path);
+ show_sugg = true;
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+ if show_sugg {
+ err.span_suggestion(
+ span,
+ "maybe write a path separator here",
+ "::",
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ first_pat = self.mk_pat(new_span, PatKind::Wild);
+ }
+ err.emit();
+ }
+ }
+ }
+ _ => {
+ // Carry on as if we had not done anything. This should be unreachable.
+ self.restore_snapshot(snapshot);
+ }
+ };
+ first_pat
+ }
+
+ pub(crate) fn maybe_recover_unexpected_block_label(&mut self) -> bool {
+ let Some(label) = self.eat_label().filter(|_| {
+ self.eat(&token::Colon) && self.token.kind == token::OpenDelim(Delimiter::Brace)
+ }) else {
+ return false;
+ };
+ let span = label.ident.span.to(self.prev_token.span);
+ let mut err = self.struct_span_err(span, "block label not supported here");
+ err.span_label(span, "not supported here");
+ err.tool_only_span_suggestion(
+ label.ident.span.until(self.token.span),
+ "remove this block label",
+ "",
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ true
+ }
+
+ /// Some special error handling for the "top-level" patterns in a match arm,
+ /// `for` loop, `let`, &c. (in contrast to subpatterns within such).
+ pub(crate) fn maybe_recover_unexpected_comma(
+ &mut self,
+ lo: Span,
+ rt: CommaRecoveryMode,
+ ) -> PResult<'a, ()> {
+ if self.token != token::Comma {
+ return Ok(());
+ }
+
+ // An unexpected comma after a top-level pattern is a clue that the
+ // user (perhaps more accustomed to some other language) forgot the
+ // parentheses in what should have been a tuple pattern; return a
+ // suggestion-enhanced error here rather than choking on the comma later.
+ let comma_span = self.token.span;
+ self.bump();
+ if let Err(err) = self.skip_pat_list() {
+ // We didn't expect this to work anyway; we just wanted to advance to the
+ // end of the comma-sequence so we know the span to suggest parenthesizing.
+ err.cancel();
+ }
+ let seq_span = lo.to(self.prev_token.span);
+ let mut err = self.struct_span_err(comma_span, "unexpected `,` in pattern");
+ if let Ok(seq_snippet) = self.span_to_snippet(seq_span) {
+ err.multipart_suggestion(
+ &format!(
+ "try adding parentheses to match on a tuple{}",
+ if let CommaRecoveryMode::LikelyTuple = rt { "" } else { "..." },
+ ),
+ vec![
+ (seq_span.shrink_to_lo(), "(".to_string()),
+ (seq_span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ if let CommaRecoveryMode::EitherTupleOrPipe = rt {
+ err.span_suggestion(
+ seq_span,
+ "...or a vertical bar to match on multiple alternatives",
+ seq_snippet.replace(',', " |"),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ Err(err)
+ }
+
+ pub(crate) fn maybe_recover_bounds_doubled_colon(&mut self, ty: &Ty) -> PResult<'a, ()> {
+ let TyKind::Path(qself, path) = &ty.kind else { return Ok(()) };
+ let qself_position = qself.as_ref().map(|qself| qself.position);
+ for (i, segments) in path.segments.windows(2).enumerate() {
+ if qself_position.map(|pos| i < pos).unwrap_or(false) {
+ continue;
+ }
+ if let [a, b] = segments {
+ let (a_span, b_span) = (a.span(), b.span());
+ let between_span = a_span.shrink_to_hi().to(b_span.shrink_to_lo());
+ if self.span_to_snippet(between_span).as_ref().map(|a| &a[..]) == Ok(":: ") {
+ let mut err = self.struct_span_err(
+ path.span.shrink_to_hi(),
+ "expected `:` followed by trait or lifetime",
+ );
+ err.span_suggestion(
+ between_span,
+ "use single colon",
+ ": ",
+ Applicability::MachineApplicable,
+ );
+ return Err(err);
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// Parse and throw away a parenthesized comma separated
+ /// sequence of patterns until `)` is reached.
+ fn skip_pat_list(&mut self) -> PResult<'a, ()> {
+ while !self.check(&token::CloseDelim(Delimiter::Parenthesis)) {
+ self.parse_pat_no_top_alt(None)?;
+ if !self.eat(&token::Comma) {
+ return Ok(());
+ }
+ }
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs
new file mode 100644
index 000000000..0719a0ef0
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/expr.rs
@@ -0,0 +1,3288 @@
+use super::diagnostics::SnapshotParser;
+use super::pat::{CommaRecoveryMode, RecoverColon, RecoverComma, PARAM_EXPECTED};
+use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
+use super::{
+ AttrWrapper, BlockMode, ClosureSpans, ForceCollect, Parser, PathStyle, Restrictions,
+ SemiColonMode, SeqSep, TokenExpectType, TokenType, TrailingToken,
+};
+use crate::maybe_recover_from_interpolated_ty_qpath;
+
+use core::mem;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, Token, TokenKind};
+use rustc_ast::tokenstream::Spacing;
+use rustc_ast::util::classify;
+use rustc_ast::util::literal::LitError;
+use rustc_ast::util::parser::{prec_let_scrutinee_needs_par, AssocOp, Fixity};
+use rustc_ast::visit::Visitor;
+use rustc_ast::{self as ast, AttrStyle, AttrVec, CaptureBy, ExprField, Lit, UnOp, DUMMY_NODE_ID};
+use rustc_ast::{AnonConst, BinOp, BinOpKind, FnDecl, FnRetTy, MacCall, Param, Ty, TyKind};
+use rustc_ast::{Arm, Async, BlockCheckMode, Expr, ExprKind, Label, Movability, RangeLimits};
+use rustc_ast::{ClosureBinder, StmtKind};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, PResult};
+use rustc_session::lint::builtin::BREAK_WITH_LABEL_AND_LOOP;
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_span::source_map::{self, Span, Spanned};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{BytePos, Pos};
+
+/// Possibly accepts an `token::Interpolated` expression (a pre-parsed expression
+/// dropped into the token stream, which happens while parsing the result of
+/// macro expansion). Placement of these is not as complex as I feared it would
+/// be. The important thing is to make sure that lookahead doesn't balk at
+/// `token::Interpolated` tokens.
+macro_rules! maybe_whole_expr {
+ ($p:expr) => {
+ if let token::Interpolated(nt) = &$p.token.kind {
+ match &**nt {
+ token::NtExpr(e) | token::NtLiteral(e) => {
+ let e = e.clone();
+ $p.bump();
+ return Ok(e);
+ }
+ token::NtPath(path) => {
+ let path = (**path).clone();
+ $p.bump();
+ return Ok($p.mk_expr(
+ $p.prev_token.span,
+ ExprKind::Path(None, path),
+ AttrVec::new(),
+ ));
+ }
+ token::NtBlock(block) => {
+ let block = block.clone();
+ $p.bump();
+ return Ok($p.mk_expr(
+ $p.prev_token.span,
+ ExprKind::Block(block, None),
+ AttrVec::new(),
+ ));
+ }
+ _ => {}
+ };
+ }
+ };
+}
+
+#[derive(Debug)]
+pub(super) enum LhsExpr {
+ NotYetParsed,
+ AttributesParsed(AttrWrapper),
+ AlreadyParsed(P<Expr>),
+}
+
+impl From<Option<AttrWrapper>> for LhsExpr {
+ /// Converts `Some(attrs)` into `LhsExpr::AttributesParsed(attrs)`
+ /// and `None` into `LhsExpr::NotYetParsed`.
+ ///
+ /// This conversion does not allocate.
+ fn from(o: Option<AttrWrapper>) -> Self {
+ if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed }
+ }
+}
+
+impl From<P<Expr>> for LhsExpr {
+ /// Converts the `expr: P<Expr>` into `LhsExpr::AlreadyParsed(expr)`.
+ ///
+ /// This conversion does not allocate.
+ fn from(expr: P<Expr>) -> Self {
+ LhsExpr::AlreadyParsed(expr)
+ }
+}
+
+impl<'a> Parser<'a> {
+ /// Parses an expression.
+ #[inline]
+ pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> {
+ self.current_closure.take();
+
+ self.parse_expr_res(Restrictions::empty(), None)
+ }
+
+ /// Parses an expression, forcing tokens to be collected
+ pub fn parse_expr_force_collect(&mut self) -> PResult<'a, P<Expr>> {
+ self.collect_tokens_no_attrs(|this| this.parse_expr())
+ }
+
+ pub fn parse_anon_const_expr(&mut self) -> PResult<'a, AnonConst> {
+ self.parse_expr().map(|value| AnonConst { id: DUMMY_NODE_ID, value })
+ }
+
+ fn parse_expr_catch_underscore(&mut self) -> PResult<'a, P<Expr>> {
+ match self.parse_expr() {
+ Ok(expr) => Ok(expr),
+ Err(mut err) => match self.token.ident() {
+ Some((Ident { name: kw::Underscore, .. }, false))
+ if self.look_ahead(1, |t| t == &token::Comma) =>
+ {
+ // Special-case handling of `foo(_, _, _)`
+ err.emit();
+ self.bump();
+ Ok(self.mk_expr(self.prev_token.span, ExprKind::Err, AttrVec::new()))
+ }
+ _ => Err(err),
+ },
+ }
+ }
+
+ /// Parses a sequence of expressions delimited by parentheses.
+ fn parse_paren_expr_seq(&mut self) -> PResult<'a, Vec<P<Expr>>> {
+ self.parse_paren_comma_seq(|p| p.parse_expr_catch_underscore()).map(|(r, _)| r)
+ }
+
+ /// Parses an expression, subject to the given restrictions.
+ #[inline]
+ pub(super) fn parse_expr_res(
+ &mut self,
+ r: Restrictions,
+ already_parsed_attrs: Option<AttrWrapper>,
+ ) -> PResult<'a, P<Expr>> {
+ self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs))
+ }
+
+ /// Parses an associative expression.
+ ///
+ /// This parses an expression accounting for associativity and precedence of the operators in
+ /// the expression.
+ #[inline]
+ fn parse_assoc_expr(
+ &mut self,
+ already_parsed_attrs: Option<AttrWrapper>,
+ ) -> PResult<'a, P<Expr>> {
+ self.parse_assoc_expr_with(0, already_parsed_attrs.into())
+ }
+
+ /// Parses an associative expression with operators of at least `min_prec` precedence.
+ pub(super) fn parse_assoc_expr_with(
+ &mut self,
+ min_prec: usize,
+ lhs: LhsExpr,
+ ) -> PResult<'a, P<Expr>> {
+ let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs {
+ expr
+ } else {
+ let attrs = match lhs {
+ LhsExpr::AttributesParsed(attrs) => Some(attrs),
+ _ => None,
+ };
+ if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind) {
+ return self.parse_prefix_range_expr(attrs);
+ } else {
+ self.parse_prefix_expr(attrs)?
+ }
+ };
+ let last_type_ascription_set = self.last_type_ascription.is_some();
+
+ if !self.should_continue_as_assoc_expr(&lhs) {
+ self.last_type_ascription = None;
+ return Ok(lhs);
+ }
+
+ self.expected_tokens.push(TokenType::Operator);
+ while let Some(op) = self.check_assoc_op() {
+ // Adjust the span for interpolated LHS to point to the `$lhs` token
+ // and not to what it refers to.
+ let lhs_span = match self.prev_token.kind {
+ TokenKind::Interpolated(..) => self.prev_token.span,
+ _ => lhs.span,
+ };
+
+ let cur_op_span = self.token.span;
+ let restrictions = if op.node.is_assign_like() {
+ self.restrictions & Restrictions::NO_STRUCT_LITERAL
+ } else {
+ self.restrictions
+ };
+ let prec = op.node.precedence();
+ if prec < min_prec {
+ break;
+ }
+ // Check for deprecated `...` syntax
+ if self.token == token::DotDotDot && op.node == AssocOp::DotDotEq {
+ self.err_dotdotdot_syntax(self.token.span);
+ }
+
+ if self.token == token::LArrow {
+ self.err_larrow_operator(self.token.span);
+ }
+
+ self.bump();
+ if op.node.is_comparison() {
+ if let Some(expr) = self.check_no_chained_comparison(&lhs, &op)? {
+ return Ok(expr);
+ }
+ }
+
+ // Look for JS' `===` and `!==` and recover
+ if (op.node == AssocOp::Equal || op.node == AssocOp::NotEqual)
+ && self.token.kind == token::Eq
+ && self.prev_token.span.hi() == self.token.span.lo()
+ {
+ let sp = op.span.to(self.token.span);
+ let sugg = match op.node {
+ AssocOp::Equal => "==",
+ AssocOp::NotEqual => "!=",
+ _ => unreachable!(),
+ };
+ self.struct_span_err(sp, &format!("invalid comparison operator `{sugg}=`"))
+ .span_suggestion_short(
+ sp,
+ &format!("`{s}=` is not a valid comparison operator, use `{s}`", s = sugg),
+ sugg,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ self.bump();
+ }
+
+ // Look for PHP's `<>` and recover
+ if op.node == AssocOp::Less
+ && self.token.kind == token::Gt
+ && self.prev_token.span.hi() == self.token.span.lo()
+ {
+ let sp = op.span.to(self.token.span);
+ self.struct_span_err(sp, "invalid comparison operator `<>`")
+ .span_suggestion_short(
+ sp,
+ "`<>` is not a valid comparison operator, use `!=`",
+ "!=",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ self.bump();
+ }
+
+ // Look for C++'s `<=>` and recover
+ if op.node == AssocOp::LessEqual
+ && self.token.kind == token::Gt
+ && self.prev_token.span.hi() == self.token.span.lo()
+ {
+ let sp = op.span.to(self.token.span);
+ self.struct_span_err(sp, "invalid comparison operator `<=>`")
+ .span_label(
+ sp,
+ "`<=>` is not a valid comparison operator, use `std::cmp::Ordering`",
+ )
+ .emit();
+ self.bump();
+ }
+
+ if self.prev_token == token::BinOp(token::Plus)
+ && self.token == token::BinOp(token::Plus)
+ && self.prev_token.span.between(self.token.span).is_empty()
+ {
+ let op_span = self.prev_token.span.to(self.token.span);
+ // Eat the second `+`
+ self.bump();
+ lhs = self.recover_from_postfix_increment(lhs, op_span)?;
+ continue;
+ }
+
+ let op = op.node;
+ // Special cases:
+ if op == AssocOp::As {
+ lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?;
+ continue;
+ } else if op == AssocOp::Colon {
+ lhs = self.parse_assoc_op_ascribe(lhs, lhs_span)?;
+ continue;
+ } else if op == AssocOp::DotDot || op == AssocOp::DotDotEq {
+ // If we didn't have to handle `x..`/`x..=`, it would be pretty easy to
+ // generalise it to the Fixity::None code.
+ lhs = self.parse_range_expr(prec, lhs, op, cur_op_span)?;
+ break;
+ }
+
+ let fixity = op.fixity();
+ let prec_adjustment = match fixity {
+ Fixity::Right => 0,
+ Fixity::Left => 1,
+ // We currently have no non-associative operators that are not handled above by
+ // the special cases. The code is here only for future convenience.
+ Fixity::None => 1,
+ };
+ let rhs = self.with_res(restrictions - Restrictions::STMT_EXPR, |this| {
+ this.parse_assoc_expr_with(prec + prec_adjustment, LhsExpr::NotYetParsed)
+ })?;
+
+ let span = self.mk_expr_sp(&lhs, lhs_span, rhs.span);
+ lhs = match op {
+ AssocOp::Add
+ | AssocOp::Subtract
+ | AssocOp::Multiply
+ | AssocOp::Divide
+ | AssocOp::Modulus
+ | AssocOp::LAnd
+ | AssocOp::LOr
+ | AssocOp::BitXor
+ | AssocOp::BitAnd
+ | AssocOp::BitOr
+ | AssocOp::ShiftLeft
+ | AssocOp::ShiftRight
+ | AssocOp::Equal
+ | AssocOp::Less
+ | AssocOp::LessEqual
+ | AssocOp::NotEqual
+ | AssocOp::Greater
+ | AssocOp::GreaterEqual => {
+ let ast_op = op.to_ast_binop().unwrap();
+ let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs);
+ self.mk_expr(span, binary, AttrVec::new())
+ }
+ AssocOp::Assign => {
+ self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span), AttrVec::new())
+ }
+ AssocOp::AssignOp(k) => {
+ let aop = match k {
+ token::Plus => BinOpKind::Add,
+ token::Minus => BinOpKind::Sub,
+ token::Star => BinOpKind::Mul,
+ token::Slash => BinOpKind::Div,
+ token::Percent => BinOpKind::Rem,
+ token::Caret => BinOpKind::BitXor,
+ token::And => BinOpKind::BitAnd,
+ token::Or => BinOpKind::BitOr,
+ token::Shl => BinOpKind::Shl,
+ token::Shr => BinOpKind::Shr,
+ };
+ let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs);
+ self.mk_expr(span, aopexpr, AttrVec::new())
+ }
+ AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => {
+ self.span_bug(span, "AssocOp should have been handled by special case")
+ }
+ };
+
+ if let Fixity::None = fixity {
+ break;
+ }
+ }
+ if last_type_ascription_set {
+ self.last_type_ascription = None;
+ }
+ Ok(lhs)
+ }
+
+ fn should_continue_as_assoc_expr(&mut self, lhs: &Expr) -> bool {
+ match (self.expr_is_complete(lhs), AssocOp::from_token(&self.token)) {
+ // Semi-statement forms are odd:
+ // See https://github.com/rust-lang/rust/issues/29071
+ (true, None) => false,
+ (false, _) => true, // Continue parsing the expression.
+ // An exhaustive check is done in the following block, but these are checked first
+ // because they *are* ambiguous but also reasonable looking incorrect syntax, so we
+ // want to keep their span info to improve diagnostics in these cases in a later stage.
+ (true, Some(AssocOp::Multiply)) | // `{ 42 } *foo = bar;` or `{ 42 } * 3`
+ (true, Some(AssocOp::Subtract)) | // `{ 42 } -5`
+ (true, Some(AssocOp::Add)) // `{ 42 } + 42
+ // If the next token is a keyword, then the tokens above *are* unambiguously incorrect:
+ // `if x { a } else { b } && if y { c } else { d }`
+ if !self.look_ahead(1, |t| t.is_used_keyword()) => {
+ // These cases are ambiguous and can't be identified in the parser alone.
+ let sp = self.sess.source_map().start_point(self.token.span);
+ self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span);
+ false
+ }
+ (true, Some(AssocOp::LAnd)) |
+ (true, Some(AssocOp::LOr)) |
+ (true, Some(AssocOp::BitOr)) => {
+ // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`. Separated from the
+ // above due to #74233.
+ // These cases are ambiguous and can't be identified in the parser alone.
+ //
+ // Bitwise AND is left out because guessing intent is hard. We can make
+ // suggestions based on the assumption that double-refs are rarely intentional,
+ // and closures are distinct enough that they don't get mixed up with their
+ // return value.
+ let sp = self.sess.source_map().start_point(self.token.span);
+ self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span);
+ false
+ }
+ (true, Some(ref op)) if !op.can_continue_expr_unambiguously() => false,
+ (true, Some(_)) => {
+ self.error_found_expr_would_be_stmt(lhs);
+ true
+ }
+ }
+ }
+
+ /// We've found an expression that would be parsed as a statement,
+ /// but the next token implies this should be parsed as an expression.
+ /// For example: `if let Some(x) = x { x } else { 0 } / 2`.
+ fn error_found_expr_would_be_stmt(&self, lhs: &Expr) {
+ let mut err = self.struct_span_err(
+ self.token.span,
+ &format!("expected expression, found `{}`", pprust::token_to_string(&self.token),),
+ );
+ err.span_label(self.token.span, "expected expression");
+ self.sess.expr_parentheses_needed(&mut err, lhs.span);
+ err.emit();
+ }
+
+ /// Possibly translate the current token to an associative operator.
+ /// The method does not advance the current token.
+ ///
+ /// Also performs recovery for `and` / `or` which are mistaken for `&&` and `||` respectively.
+ fn check_assoc_op(&self) -> Option<Spanned<AssocOp>> {
+ let (op, span) = match (AssocOp::from_token(&self.token), self.token.ident()) {
+ // When parsing const expressions, stop parsing when encountering `>`.
+ (
+ Some(
+ AssocOp::ShiftRight
+ | AssocOp::Greater
+ | AssocOp::GreaterEqual
+ | AssocOp::AssignOp(token::BinOpToken::Shr),
+ ),
+ _,
+ ) if self.restrictions.contains(Restrictions::CONST_EXPR) => {
+ return None;
+ }
+ (Some(op), _) => (op, self.token.span),
+ (None, Some((Ident { name: sym::and, span }, false))) => {
+ self.error_bad_logical_op("and", "&&", "conjunction");
+ (AssocOp::LAnd, span)
+ }
+ (None, Some((Ident { name: sym::or, span }, false))) => {
+ self.error_bad_logical_op("or", "||", "disjunction");
+ (AssocOp::LOr, span)
+ }
+ _ => return None,
+ };
+ Some(source_map::respan(span, op))
+ }
+
+ /// Error on `and` and `or` suggesting `&&` and `||` respectively.
+ fn error_bad_logical_op(&self, bad: &str, good: &str, english: &str) {
+ self.struct_span_err(self.token.span, &format!("`{bad}` is not a logical operator"))
+ .span_suggestion_short(
+ self.token.span,
+ &format!("use `{good}` to perform logical {english}"),
+ good,
+ Applicability::MachineApplicable,
+ )
+ .note("unlike in e.g., python and PHP, `&&` and `||` are used for logical operators")
+ .emit();
+ }
+
+ /// Checks if this expression is a successfully parsed statement.
+ fn expr_is_complete(&self, e: &Expr) -> bool {
+ self.restrictions.contains(Restrictions::STMT_EXPR)
+ && !classify::expr_requires_semi_to_be_stmt(e)
+ }
+
+ /// Parses `x..y`, `x..=y`, and `x..`/`x..=`.
+ /// The other two variants are handled in `parse_prefix_range_expr` below.
+ fn parse_range_expr(
+ &mut self,
+ prec: usize,
+ lhs: P<Expr>,
+ op: AssocOp,
+ cur_op_span: Span,
+ ) -> PResult<'a, P<Expr>> {
+ let rhs = if self.is_at_start_of_range_notation_rhs() {
+ Some(self.parse_assoc_expr_with(prec + 1, LhsExpr::NotYetParsed)?)
+ } else {
+ None
+ };
+ let rhs_span = rhs.as_ref().map_or(cur_op_span, |x| x.span);
+ let span = self.mk_expr_sp(&lhs, lhs.span, rhs_span);
+ let limits =
+ if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed };
+ let range = self.mk_range(Some(lhs), rhs, limits);
+ Ok(self.mk_expr(span, range, AttrVec::new()))
+ }
+
+ fn is_at_start_of_range_notation_rhs(&self) -> bool {
+ if self.token.can_begin_expr() {
+ // Parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`.
+ if self.token == token::OpenDelim(Delimiter::Brace) {
+ return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL);
+ }
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Parses prefix-forms of range notation: `..expr`, `..`, `..=expr`.
+ fn parse_prefix_range_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
+ // Check for deprecated `...` syntax.
+ if self.token == token::DotDotDot {
+ self.err_dotdotdot_syntax(self.token.span);
+ }
+
+ debug_assert!(
+ [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind),
+ "parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq",
+ self.token
+ );
+
+ let limits = match self.token.kind {
+ token::DotDot => RangeLimits::HalfOpen,
+ _ => RangeLimits::Closed,
+ };
+ let op = AssocOp::from_token(&self.token);
+ // FIXME: `parse_prefix_range_expr` is called when the current
+ // token is `DotDot`, `DotDotDot`, or `DotDotEq`. If we haven't already
+ // parsed attributes, then trying to parse them here will always fail.
+ // We should figure out how we want attributes on range expressions to work.
+ let attrs = self.parse_or_use_outer_attributes(attrs)?;
+ self.collect_tokens_for_expr(attrs, |this, attrs| {
+ let lo = this.token.span;
+ this.bump();
+ let (span, opt_end) = if this.is_at_start_of_range_notation_rhs() {
+ // RHS must be parsed with more associativity than the dots.
+ this.parse_assoc_expr_with(op.unwrap().precedence() + 1, LhsExpr::NotYetParsed)
+ .map(|x| (lo.to(x.span), Some(x)))?
+ } else {
+ (lo, None)
+ };
+ let range = this.mk_range(None, opt_end, limits);
+ Ok(this.mk_expr(span, range, attrs.into()))
+ })
+ }
+
+ /// Parses a prefix-unary-operator expr.
+ fn parse_prefix_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
+ let attrs = self.parse_or_use_outer_attributes(attrs)?;
+ let lo = self.token.span;
+
+ macro_rules! make_it {
+ ($this:ident, $attrs:expr, |this, _| $body:expr) => {
+ $this.collect_tokens_for_expr($attrs, |$this, attrs| {
+ let (hi, ex) = $body?;
+ Ok($this.mk_expr(lo.to(hi), ex, attrs.into()))
+ })
+ };
+ }
+
+ let this = self;
+
+ // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr()
+ match this.token.uninterpolate().kind {
+ token::Not => make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Not)), // `!expr`
+ token::Tilde => make_it!(this, attrs, |this, _| this.recover_tilde_expr(lo)), // `~expr`
+ token::BinOp(token::Minus) => {
+ make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Neg))
+ } // `-expr`
+ token::BinOp(token::Star) => {
+ make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Deref))
+ } // `*expr`
+ token::BinOp(token::And) | token::AndAnd => {
+ make_it!(this, attrs, |this, _| this.parse_borrow_expr(lo))
+ }
+ token::BinOp(token::Plus) if this.look_ahead(1, |tok| tok.is_numeric_lit()) => {
+ let mut err = this.struct_span_err(lo, "leading `+` is not supported");
+ err.span_label(lo, "unexpected `+`");
+
+ // a block on the LHS might have been intended to be an expression instead
+ if let Some(sp) = this.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
+ this.sess.expr_parentheses_needed(&mut err, *sp);
+ } else {
+ err.span_suggestion_verbose(
+ lo,
+ "try removing the `+`",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+
+ this.bump();
+ this.parse_prefix_expr(None)
+ } // `+expr`
+ // Recover from `++x`:
+ token::BinOp(token::Plus)
+ if this.look_ahead(1, |t| *t == token::BinOp(token::Plus)) =>
+ {
+ let prev_is_semi = this.prev_token == token::Semi;
+ let pre_span = this.token.span.to(this.look_ahead(1, |t| t.span));
+ // Eat both `+`s.
+ this.bump();
+ this.bump();
+
+ let operand_expr = this.parse_dot_or_call_expr(Default::default())?;
+ this.recover_from_prefix_increment(operand_expr, pre_span, prev_is_semi)
+ }
+ token::Ident(..) if this.token.is_keyword(kw::Box) => {
+ make_it!(this, attrs, |this, _| this.parse_box_expr(lo))
+ }
+ token::Ident(..) if this.is_mistaken_not_ident_negation() => {
+ make_it!(this, attrs, |this, _| this.recover_not_expr(lo))
+ }
+ _ => return this.parse_dot_or_call_expr(Some(attrs)),
+ }
+ }
+
+ fn parse_prefix_expr_common(&mut self, lo: Span) -> PResult<'a, (Span, P<Expr>)> {
+ self.bump();
+ let expr = self.parse_prefix_expr(None);
+ let (span, expr) = self.interpolated_or_expr_span(expr)?;
+ Ok((lo.to(span), expr))
+ }
+
+ fn parse_unary_expr(&mut self, lo: Span, op: UnOp) -> PResult<'a, (Span, ExprKind)> {
+ let (span, expr) = self.parse_prefix_expr_common(lo)?;
+ Ok((span, self.mk_unary(op, expr)))
+ }
+
+ // Recover on `!` suggesting for bitwise negation instead.
+ fn recover_tilde_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
+ self.struct_span_err(lo, "`~` cannot be used as a unary operator")
+ .span_suggestion_short(
+ lo,
+ "use `!` to perform bitwise not",
+ "!",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ self.parse_unary_expr(lo, UnOp::Not)
+ }
+
+ /// Parse `box expr`.
+ fn parse_box_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
+ let (span, expr) = self.parse_prefix_expr_common(lo)?;
+ self.sess.gated_spans.gate(sym::box_syntax, span);
+ Ok((span, ExprKind::Box(expr)))
+ }
+
+ fn is_mistaken_not_ident_negation(&self) -> bool {
+ let token_cannot_continue_expr = |t: &Token| match t.uninterpolate().kind {
+ // These tokens can start an expression after `!`, but
+ // can't continue an expression after an ident
+ token::Ident(name, is_raw) => token::ident_can_begin_expr(name, t.span, is_raw),
+ token::Literal(..) | token::Pound => true,
+ _ => t.is_whole_expr(),
+ };
+ self.token.is_ident_named(sym::not) && self.look_ahead(1, token_cannot_continue_expr)
+ }
+
+ /// Recover on `not expr` in favor of `!expr`.
+ fn recover_not_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
+ // Emit the error...
+ let not_token = self.look_ahead(1, |t| t.clone());
+ self.struct_span_err(
+ not_token.span,
+ &format!("unexpected {} after identifier", super::token_descr(&not_token)),
+ )
+ .span_suggestion_short(
+ // Span the `not` plus trailing whitespace to avoid
+ // trailing whitespace after the `!` in our suggestion
+ self.sess.source_map().span_until_non_whitespace(lo.to(not_token.span)),
+ "use `!` to perform logical negation",
+ "!",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ // ...and recover!
+ self.parse_unary_expr(lo, UnOp::Not)
+ }
+
+ /// Returns the span of expr, if it was not interpolated or the span of the interpolated token.
+ fn interpolated_or_expr_span(
+ &self,
+ expr: PResult<'a, P<Expr>>,
+ ) -> PResult<'a, (Span, P<Expr>)> {
+ expr.map(|e| {
+ (
+ match self.prev_token.kind {
+ TokenKind::Interpolated(..) => self.prev_token.span,
+ _ => e.span,
+ },
+ e,
+ )
+ })
+ }
+
+ fn parse_assoc_op_cast(
+ &mut self,
+ lhs: P<Expr>,
+ lhs_span: Span,
+ expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind,
+ ) -> PResult<'a, P<Expr>> {
+ let mk_expr = |this: &mut Self, lhs: P<Expr>, rhs: P<Ty>| {
+ this.mk_expr(
+ this.mk_expr_sp(&lhs, lhs_span, rhs.span),
+ expr_kind(lhs, rhs),
+ AttrVec::new(),
+ )
+ };
+
+ // Save the state of the parser before parsing type normally, in case there is a
+ // LessThan comparison after this cast.
+ let parser_snapshot_before_type = self.clone();
+ let cast_expr = match self.parse_as_cast_ty() {
+ Ok(rhs) => mk_expr(self, lhs, rhs),
+ Err(type_err) => {
+ // Rewind to before attempting to parse the type with generics, to recover
+ // from situations like `x as usize < y` in which we first tried to parse
+ // `usize < y` as a type with generic arguments.
+ let parser_snapshot_after_type = mem::replace(self, parser_snapshot_before_type);
+
+ // Check for typo of `'a: loop { break 'a }` with a missing `'`.
+ match (&lhs.kind, &self.token.kind) {
+ (
+ // `foo: `
+ ExprKind::Path(None, ast::Path { segments, .. }),
+ TokenKind::Ident(kw::For | kw::Loop | kw::While, false),
+ ) if segments.len() == 1 => {
+ let snapshot = self.create_snapshot_for_diagnostic();
+ let label = Label {
+ ident: Ident::from_str_and_span(
+ &format!("'{}", segments[0].ident),
+ segments[0].ident.span,
+ ),
+ };
+ match self.parse_labeled_expr(label, AttrVec::new(), false) {
+ Ok(expr) => {
+ type_err.cancel();
+ self.struct_span_err(label.ident.span, "malformed loop label")
+ .span_suggestion(
+ label.ident.span,
+ "use the correct loop label format",
+ label.ident,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ return Ok(expr);
+ }
+ Err(err) => {
+ err.cancel();
+ self.restore_snapshot(snapshot);
+ }
+ }
+ }
+ _ => {}
+ }
+
+ match self.parse_path(PathStyle::Expr) {
+ Ok(path) => {
+ let (op_noun, op_verb) = match self.token.kind {
+ token::Lt => ("comparison", "comparing"),
+ token::BinOp(token::Shl) => ("shift", "shifting"),
+ _ => {
+ // We can end up here even without `<` being the next token, for
+ // example because `parse_ty_no_plus` returns `Err` on keywords,
+ // but `parse_path` returns `Ok` on them due to error recovery.
+ // Return original error and parser state.
+ *self = parser_snapshot_after_type;
+ return Err(type_err);
+ }
+ };
+
+ // Successfully parsed the type path leaving a `<` yet to parse.
+ type_err.cancel();
+
+ // Report non-fatal diagnostics, keep `x as usize` as an expression
+ // in AST and continue parsing.
+ let msg = format!(
+ "`<` is interpreted as a start of generic arguments for `{}`, not a {}",
+ pprust::path_to_string(&path),
+ op_noun,
+ );
+ let span_after_type = parser_snapshot_after_type.token.span;
+ let expr =
+ mk_expr(self, lhs, self.mk_ty(path.span, TyKind::Path(None, path)));
+
+ self.struct_span_err(self.token.span, &msg)
+ .span_label(
+ self.look_ahead(1, |t| t.span).to(span_after_type),
+ "interpreted as generic arguments",
+ )
+ .span_label(self.token.span, format!("not interpreted as {op_noun}"))
+ .multipart_suggestion(
+ &format!("try {op_verb} the cast value"),
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ expr
+ }
+ Err(path_err) => {
+ // Couldn't parse as a path, return original error and parser state.
+ path_err.cancel();
+ *self = parser_snapshot_after_type;
+ return Err(type_err);
+ }
+ }
+ }
+ };
+
+ self.parse_and_disallow_postfix_after_cast(cast_expr)
+ }
+
+ /// Parses a postfix operators such as `.`, `?`, or index (`[]`) after a cast,
+ /// then emits an error and returns the newly parsed tree.
+ /// The resulting parse tree for `&x as T[0]` has a precedence of `((&x) as T)[0]`.
+ fn parse_and_disallow_postfix_after_cast(
+ &mut self,
+ cast_expr: P<Expr>,
+ ) -> PResult<'a, P<Expr>> {
+ let span = cast_expr.span;
+ let (cast_kind, maybe_ascription_span) =
+ if let ExprKind::Type(ascripted_expr, _) = &cast_expr.kind {
+ ("type ascription", Some(ascripted_expr.span.shrink_to_hi().with_hi(span.hi())))
+ } else {
+ ("cast", None)
+ };
+
+ // Save the memory location of expr before parsing any following postfix operators.
+ // This will be compared with the memory location of the output expression.
+ // If they different we can assume we parsed another expression because the existing expression is not reallocated.
+ let addr_before = &*cast_expr as *const _ as usize;
+ let with_postfix = self.parse_dot_or_call_expr_with_(cast_expr, span)?;
+ let changed = addr_before != &*with_postfix as *const _ as usize;
+
+ // Check if an illegal postfix operator has been added after the cast.
+ // If the resulting expression is not a cast, or has a different memory location, it is an illegal postfix operator.
+ if !matches!(with_postfix.kind, ExprKind::Cast(_, _) | ExprKind::Type(_, _)) || changed {
+ let msg = format!(
+ "{cast_kind} cannot be followed by {}",
+ match with_postfix.kind {
+ ExprKind::Index(_, _) => "indexing",
+ ExprKind::Try(_) => "`?`",
+ ExprKind::Field(_, _) => "a field access",
+ ExprKind::MethodCall(_, _, _) => "a method call",
+ ExprKind::Call(_, _) => "a function call",
+ ExprKind::Await(_) => "`.await`",
+ ExprKind::Err => return Ok(with_postfix),
+ _ => unreachable!("parse_dot_or_call_expr_with_ shouldn't produce this"),
+ }
+ );
+ let mut err = self.struct_span_err(span, &msg);
+
+ let suggest_parens = |err: &mut DiagnosticBuilder<'_, _>| {
+ let suggestions = vec![
+ (span.shrink_to_lo(), "(".to_string()),
+ (span.shrink_to_hi(), ")".to_string()),
+ ];
+ err.multipart_suggestion(
+ "try surrounding the expression in parentheses",
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+ };
+
+ // If type ascription is "likely an error", the user will already be getting a useful
+ // help message, and doesn't need a second.
+ if self.last_type_ascription.map_or(false, |last_ascription| last_ascription.1) {
+ self.maybe_annotate_with_ascription(&mut err, false);
+ } else if let Some(ascription_span) = maybe_ascription_span {
+ let is_nightly = self.sess.unstable_features.is_nightly_build();
+ if is_nightly {
+ suggest_parens(&mut err);
+ }
+ err.span_suggestion(
+ ascription_span,
+ &format!(
+ "{}remove the type ascription",
+ if is_nightly { "alternatively, " } else { "" }
+ ),
+ "",
+ if is_nightly {
+ Applicability::MaybeIncorrect
+ } else {
+ Applicability::MachineApplicable
+ },
+ );
+ } else {
+ suggest_parens(&mut err);
+ }
+ err.emit();
+ };
+ Ok(with_postfix)
+ }
+
+ fn parse_assoc_op_ascribe(&mut self, lhs: P<Expr>, lhs_span: Span) -> PResult<'a, P<Expr>> {
+ let maybe_path = self.could_ascription_be_path(&lhs.kind);
+ self.last_type_ascription = Some((self.prev_token.span, maybe_path));
+ let lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type)?;
+ self.sess.gated_spans.gate(sym::type_ascription, lhs.span);
+ Ok(lhs)
+ }
+
+ /// Parse `& mut? <expr>` or `& raw [ const | mut ] <expr>`.
+ fn parse_borrow_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
+ self.expect_and()?;
+ let has_lifetime = self.token.is_lifetime() && self.look_ahead(1, |t| t != &token::Colon);
+ let lifetime = has_lifetime.then(|| self.expect_lifetime()); // For recovery, see below.
+ let (borrow_kind, mutbl) = self.parse_borrow_modifiers(lo);
+ let expr = self.parse_prefix_expr(None);
+ let (hi, expr) = self.interpolated_or_expr_span(expr)?;
+ let span = lo.to(hi);
+ if let Some(lt) = lifetime {
+ self.error_remove_borrow_lifetime(span, lt.ident.span);
+ }
+ Ok((span, ExprKind::AddrOf(borrow_kind, mutbl, expr)))
+ }
+
+ fn error_remove_borrow_lifetime(&self, span: Span, lt_span: Span) {
+ self.struct_span_err(span, "borrow expressions cannot be annotated with lifetimes")
+ .span_label(lt_span, "annotated with lifetime here")
+ .span_suggestion(
+ lt_span,
+ "remove the lifetime annotation",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ /// Parse `mut?` or `raw [ const | mut ]`.
+ fn parse_borrow_modifiers(&mut self, lo: Span) -> (ast::BorrowKind, ast::Mutability) {
+ if self.check_keyword(kw::Raw) && self.look_ahead(1, Token::is_mutability) {
+ // `raw [ const | mut ]`.
+ let found_raw = self.eat_keyword(kw::Raw);
+ assert!(found_raw);
+ let mutability = self.parse_const_or_mut().unwrap();
+ self.sess.gated_spans.gate(sym::raw_ref_op, lo.to(self.prev_token.span));
+ (ast::BorrowKind::Raw, mutability)
+ } else {
+ // `mut?`
+ (ast::BorrowKind::Ref, self.parse_mutability())
+ }
+ }
+
+ /// Parses `a.b` or `a(13)` or `a[4]` or just `a`.
+ fn parse_dot_or_call_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
+ let attrs = self.parse_or_use_outer_attributes(attrs)?;
+ self.collect_tokens_for_expr(attrs, |this, attrs| {
+ let base = this.parse_bottom_expr();
+ let (span, base) = this.interpolated_or_expr_span(base)?;
+ this.parse_dot_or_call_expr_with(base, span, attrs)
+ })
+ }
+
+ pub(super) fn parse_dot_or_call_expr_with(
+ &mut self,
+ e0: P<Expr>,
+ lo: Span,
+ mut attrs: Vec<ast::Attribute>,
+ ) -> PResult<'a, P<Expr>> {
+ // Stitch the list of outer attributes onto the return value.
+ // A little bit ugly, but the best way given the current code
+ // structure
+ self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| {
+ expr.map(|mut expr| {
+ attrs.extend::<Vec<_>>(expr.attrs.into());
+ expr.attrs = attrs.into();
+ expr
+ })
+ })
+ }
+
+ fn parse_dot_or_call_expr_with_(&mut self, mut e: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
+ loop {
+ let has_question = if self.prev_token.kind == TokenKind::Ident(kw::Return, false) {
+ // we are using noexpect here because we don't expect a `?` directly after a `return`
+ // which could be suggested otherwise
+ self.eat_noexpect(&token::Question)
+ } else {
+ self.eat(&token::Question)
+ };
+ if has_question {
+ // `expr?`
+ e = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Try(e), AttrVec::new());
+ continue;
+ }
+ let has_dot = if self.prev_token.kind == TokenKind::Ident(kw::Return, false) {
+ // we are using noexpect here because we don't expect a `.` directly after a `return`
+ // which could be suggested otherwise
+ self.eat_noexpect(&token::Dot)
+ } else {
+ self.eat(&token::Dot)
+ };
+ if has_dot {
+ // expr.f
+ e = self.parse_dot_suffix_expr(lo, e)?;
+ continue;
+ }
+ if self.expr_is_complete(&e) {
+ return Ok(e);
+ }
+ e = match self.token.kind {
+ token::OpenDelim(Delimiter::Parenthesis) => self.parse_fn_call_expr(lo, e),
+ token::OpenDelim(Delimiter::Bracket) => self.parse_index_expr(lo, e)?,
+ _ => return Ok(e),
+ }
+ }
+ }
+
+ fn look_ahead_type_ascription_as_field(&mut self) -> bool {
+ self.look_ahead(1, |t| t.is_ident())
+ && self.look_ahead(2, |t| t == &token::Colon)
+ && self.look_ahead(3, |t| t.can_begin_expr())
+ }
+
+ fn parse_dot_suffix_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> {
+ match self.token.uninterpolate().kind {
+ token::Ident(..) => self.parse_dot_suffix(base, lo),
+ token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) => {
+ Ok(self.parse_tuple_field_access_expr(lo, base, symbol, suffix, None))
+ }
+ token::Literal(token::Lit { kind: token::Float, symbol, suffix }) => {
+ Ok(self.parse_tuple_field_access_expr_float(lo, base, symbol, suffix))
+ }
+ _ => {
+ self.error_unexpected_after_dot();
+ Ok(base)
+ }
+ }
+ }
+
+ fn error_unexpected_after_dot(&self) {
+ // FIXME Could factor this out into non_fatal_unexpected or something.
+ let actual = pprust::token_to_string(&self.token);
+ self.struct_span_err(self.token.span, &format!("unexpected token: `{actual}`")).emit();
+ }
+
+ // We need an identifier or integer, but the next token is a float.
+ // Break the float into components to extract the identifier or integer.
+ // FIXME: With current `TokenCursor` it's hard to break tokens into more than 2
+ // parts unless those parts are processed immediately. `TokenCursor` should either
+ // support pushing "future tokens" (would be also helpful to `break_and_eat`), or
+ // we should break everything including floats into more basic proc-macro style
+ // tokens in the lexer (probably preferable).
+ fn parse_tuple_field_access_expr_float(
+ &mut self,
+ lo: Span,
+ base: P<Expr>,
+ float: Symbol,
+ suffix: Option<Symbol>,
+ ) -> P<Expr> {
+ #[derive(Debug)]
+ enum FloatComponent {
+ IdentLike(String),
+ Punct(char),
+ }
+ use FloatComponent::*;
+
+ let float_str = float.as_str();
+ let mut components = Vec::new();
+ let mut ident_like = String::new();
+ for c in float_str.chars() {
+ if c == '_' || c.is_ascii_alphanumeric() {
+ ident_like.push(c);
+ } else if matches!(c, '.' | '+' | '-') {
+ if !ident_like.is_empty() {
+ components.push(IdentLike(mem::take(&mut ident_like)));
+ }
+ components.push(Punct(c));
+ } else {
+ panic!("unexpected character in a float token: {:?}", c)
+ }
+ }
+ if !ident_like.is_empty() {
+ components.push(IdentLike(ident_like));
+ }
+
+ // With proc macros the span can refer to anything, the source may be too short,
+ // or too long, or non-ASCII. It only makes sense to break our span into components
+ // if its underlying text is identical to our float literal.
+ let span = self.token.span;
+ let can_take_span_apart =
+ || self.span_to_snippet(span).as_deref() == Ok(float_str).as_deref();
+
+ match &*components {
+ // 1e2
+ [IdentLike(i)] => {
+ self.parse_tuple_field_access_expr(lo, base, Symbol::intern(&i), suffix, None)
+ }
+ // 1.
+ [IdentLike(i), Punct('.')] => {
+ let (ident_span, dot_span) = if can_take_span_apart() {
+ let (span, ident_len) = (span.data(), BytePos::from_usize(i.len()));
+ let ident_span = span.with_hi(span.lo + ident_len);
+ let dot_span = span.with_lo(span.lo + ident_len);
+ (ident_span, dot_span)
+ } else {
+ (span, span)
+ };
+ assert!(suffix.is_none());
+ let symbol = Symbol::intern(&i);
+ self.token = Token::new(token::Ident(symbol, false), ident_span);
+ let next_token = (Token::new(token::Dot, dot_span), self.token_spacing);
+ self.parse_tuple_field_access_expr(lo, base, symbol, None, Some(next_token))
+ }
+ // 1.2 | 1.2e3
+ [IdentLike(i1), Punct('.'), IdentLike(i2)] => {
+ let (ident1_span, dot_span, ident2_span) = if can_take_span_apart() {
+ let (span, ident1_len) = (span.data(), BytePos::from_usize(i1.len()));
+ let ident1_span = span.with_hi(span.lo + ident1_len);
+ let dot_span = span
+ .with_lo(span.lo + ident1_len)
+ .with_hi(span.lo + ident1_len + BytePos(1));
+ let ident2_span = self.token.span.with_lo(span.lo + ident1_len + BytePos(1));
+ (ident1_span, dot_span, ident2_span)
+ } else {
+ (span, span, span)
+ };
+ let symbol1 = Symbol::intern(&i1);
+ self.token = Token::new(token::Ident(symbol1, false), ident1_span);
+ // This needs to be `Spacing::Alone` to prevent regressions.
+ // See issue #76399 and PR #76285 for more details
+ let next_token1 = (Token::new(token::Dot, dot_span), Spacing::Alone);
+ let base1 =
+ self.parse_tuple_field_access_expr(lo, base, symbol1, None, Some(next_token1));
+ let symbol2 = Symbol::intern(&i2);
+ let next_token2 = Token::new(token::Ident(symbol2, false), ident2_span);
+ self.bump_with((next_token2, self.token_spacing)); // `.`
+ self.parse_tuple_field_access_expr(lo, base1, symbol2, suffix, None)
+ }
+ // 1e+ | 1e- (recovered)
+ [IdentLike(_), Punct('+' | '-')] |
+ // 1e+2 | 1e-2
+ [IdentLike(_), Punct('+' | '-'), IdentLike(_)] |
+ // 1.2e+ | 1.2e-
+ [IdentLike(_), Punct('.'), IdentLike(_), Punct('+' | '-')] |
+ // 1.2e+3 | 1.2e-3
+ [IdentLike(_), Punct('.'), IdentLike(_), Punct('+' | '-'), IdentLike(_)] => {
+ // See the FIXME about `TokenCursor` above.
+ self.error_unexpected_after_dot();
+ base
+ }
+ _ => panic!("unexpected components in a float token: {:?}", components),
+ }
+ }
+
+ fn parse_tuple_field_access_expr(
+ &mut self,
+ lo: Span,
+ base: P<Expr>,
+ field: Symbol,
+ suffix: Option<Symbol>,
+ next_token: Option<(Token, Spacing)>,
+ ) -> P<Expr> {
+ match next_token {
+ Some(next_token) => self.bump_with(next_token),
+ None => self.bump(),
+ }
+ let span = self.prev_token.span;
+ let field = ExprKind::Field(base, Ident::new(field, span));
+ self.expect_no_suffix(span, "a tuple index", suffix);
+ self.mk_expr(lo.to(span), field, AttrVec::new())
+ }
+
+ /// Parse a function call expression, `expr(...)`.
+ fn parse_fn_call_expr(&mut self, lo: Span, fun: P<Expr>) -> P<Expr> {
+ let snapshot = if self.token.kind == token::OpenDelim(Delimiter::Parenthesis)
+ && self.look_ahead_type_ascription_as_field()
+ {
+ Some((self.create_snapshot_for_diagnostic(), fun.kind.clone()))
+ } else {
+ None
+ };
+ let open_paren = self.token.span;
+
+ let mut seq = self.parse_paren_expr_seq().map(|args| {
+ self.mk_expr(lo.to(self.prev_token.span), self.mk_call(fun, args), AttrVec::new())
+ });
+ if let Some(expr) =
+ self.maybe_recover_struct_lit_bad_delims(lo, open_paren, &mut seq, snapshot)
+ {
+ return expr;
+ }
+ self.recover_seq_parse_error(Delimiter::Parenthesis, lo, seq)
+ }
+
+ /// If we encounter a parser state that looks like the user has written a `struct` literal with
+ /// parentheses instead of braces, recover the parser state and provide suggestions.
+ #[instrument(skip(self, seq, snapshot), level = "trace")]
+ fn maybe_recover_struct_lit_bad_delims(
+ &mut self,
+ lo: Span,
+ open_paren: Span,
+ seq: &mut PResult<'a, P<Expr>>,
+ snapshot: Option<(SnapshotParser<'a>, ExprKind)>,
+ ) -> Option<P<Expr>> {
+ match (seq.as_mut(), snapshot) {
+ (Err(err), Some((mut snapshot, ExprKind::Path(None, path)))) => {
+ let name = pprust::path_to_string(&path);
+ snapshot.bump(); // `(`
+ match snapshot.parse_struct_fields(path, false, Delimiter::Parenthesis) {
+ Ok((fields, ..))
+ if snapshot.eat(&token::CloseDelim(Delimiter::Parenthesis)) =>
+ {
+ // We are certain we have `Enum::Foo(a: 3, b: 4)`, suggest
+ // `Enum::Foo { a: 3, b: 4 }` or `Enum::Foo(3, 4)`.
+ self.restore_snapshot(snapshot);
+ let close_paren = self.prev_token.span;
+ let span = lo.to(self.prev_token.span);
+ if !fields.is_empty() {
+ let replacement_err = self.struct_span_err(
+ span,
+ "invalid `struct` delimiters or `fn` call arguments",
+ );
+ mem::replace(err, replacement_err).cancel();
+
+ err.multipart_suggestion(
+ &format!("if `{name}` is a struct, use braces as delimiters"),
+ vec![
+ (open_paren, " { ".to_string()),
+ (close_paren, " }".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ err.multipart_suggestion(
+ &format!("if `{name}` is a function, use the arguments directly"),
+ fields
+ .into_iter()
+ .map(|field| (field.span.until(field.expr.span), String::new()))
+ .collect(),
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+ } else {
+ err.emit();
+ }
+ return Some(self.mk_expr_err(span));
+ }
+ Ok(_) => {}
+ Err(mut err) => {
+ err.emit();
+ }
+ }
+ }
+ _ => {}
+ }
+ None
+ }
+
+ /// Parse an indexing expression `expr[...]`.
+ fn parse_index_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> {
+ self.bump(); // `[`
+ let index = self.parse_expr()?;
+ self.expect(&token::CloseDelim(Delimiter::Bracket))?;
+ Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index), AttrVec::new()))
+ }
+
+ /// Assuming we have just parsed `.`, continue parsing into an expression.
+ fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
+ if self.token.uninterpolated_span().rust_2018() && self.eat_keyword(kw::Await) {
+ return Ok(self.mk_await_expr(self_arg, lo));
+ }
+
+ let fn_span_lo = self.token.span;
+ let mut segment = self.parse_path_segment(PathStyle::Expr, None)?;
+ self.check_trailing_angle_brackets(&segment, &[&token::OpenDelim(Delimiter::Parenthesis)]);
+ self.check_turbofish_missing_angle_brackets(&mut segment);
+
+ if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ // Method call `expr.f()`
+ let mut args = self.parse_paren_expr_seq()?;
+ args.insert(0, self_arg);
+
+ let fn_span = fn_span_lo.to(self.prev_token.span);
+ let span = lo.to(self.prev_token.span);
+ Ok(self.mk_expr(span, ExprKind::MethodCall(segment, args, fn_span), AttrVec::new()))
+ } else {
+ // Field access `expr.f`
+ if let Some(args) = segment.args {
+ self.struct_span_err(
+ args.span(),
+ "field expressions cannot have generic arguments",
+ )
+ .emit();
+ }
+
+ let span = lo.to(self.prev_token.span);
+ Ok(self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), AttrVec::new()))
+ }
+ }
+
+ /// At the bottom (top?) of the precedence hierarchy,
+ /// Parses things like parenthesized exprs, macros, `return`, etc.
+ ///
+ /// N.B., this does not parse outer attributes, and is private because it only works
+ /// correctly if called from `parse_dot_or_call_expr()`.
+ fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> {
+ maybe_recover_from_interpolated_ty_qpath!(self, true);
+ maybe_whole_expr!(self);
+
+ // Outer attributes are already parsed and will be
+ // added to the return value after the fact.
+ //
+ // Therefore, prevent sub-parser from parsing
+ // attributes by giving them an empty "already-parsed" list.
+ let attrs = AttrVec::new();
+
+ // Note: when adding new syntax here, don't forget to adjust `TokenKind::can_begin_expr()`.
+ let lo = self.token.span;
+ if let token::Literal(_) = self.token.kind {
+ // This match arm is a special-case of the `_` match arm below and
+ // could be removed without changing functionality, but it's faster
+ // to have it here, especially for programs with large constants.
+ self.parse_lit_expr(attrs)
+ } else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ self.parse_tuple_parens_expr(attrs)
+ } else if self.check(&token::OpenDelim(Delimiter::Brace)) {
+ self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs)
+ } else if self.check(&token::BinOp(token::Or)) || self.check(&token::OrOr) {
+ self.parse_closure_expr(attrs).map_err(|mut err| {
+ // If the input is something like `if a { 1 } else { 2 } | if a { 3 } else { 4 }`
+ // then suggest parens around the lhs.
+ if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
+ self.sess.expr_parentheses_needed(&mut err, *sp);
+ }
+ err
+ })
+ } else if self.check(&token::OpenDelim(Delimiter::Bracket)) {
+ self.parse_array_or_repeat_expr(attrs, Delimiter::Bracket)
+ } else if self.check_path() {
+ self.parse_path_start_expr(attrs)
+ } else if self.check_keyword(kw::Move) || self.check_keyword(kw::Static) {
+ self.parse_closure_expr(attrs)
+ } else if self.eat_keyword(kw::If) {
+ self.parse_if_expr(attrs)
+ } else if self.check_keyword(kw::For) {
+ if self.choose_generics_over_qpath(1) {
+ self.parse_closure_expr(attrs)
+ } else {
+ assert!(self.eat_keyword(kw::For));
+ self.parse_for_expr(None, self.prev_token.span, attrs)
+ }
+ } else if self.eat_keyword(kw::While) {
+ self.parse_while_expr(None, self.prev_token.span, attrs)
+ } else if let Some(label) = self.eat_label() {
+ self.parse_labeled_expr(label, attrs, true)
+ } else if self.eat_keyword(kw::Loop) {
+ let sp = self.prev_token.span;
+ self.parse_loop_expr(None, self.prev_token.span, attrs).map_err(|mut err| {
+ err.span_label(sp, "while parsing this `loop` expression");
+ err
+ })
+ } else if self.eat_keyword(kw::Continue) {
+ let kind = ExprKind::Continue(self.eat_label());
+ Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
+ } else if self.eat_keyword(kw::Match) {
+ let match_sp = self.prev_token.span;
+ self.parse_match_expr(attrs).map_err(|mut err| {
+ err.span_label(match_sp, "while parsing this `match` expression");
+ err
+ })
+ } else if self.eat_keyword(kw::Unsafe) {
+ let sp = self.prev_token.span;
+ self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs)
+ .map_err(|mut err| {
+ err.span_label(sp, "while parsing this `unsafe` expression");
+ err
+ })
+ } else if self.check_inline_const(0) {
+ self.parse_const_block(lo.to(self.token.span), false)
+ } else if self.is_do_catch_block() {
+ self.recover_do_catch(attrs)
+ } else if self.is_try_block() {
+ self.expect_keyword(kw::Try)?;
+ self.parse_try_block(lo, attrs)
+ } else if self.eat_keyword(kw::Return) {
+ self.parse_return_expr(attrs)
+ } else if self.eat_keyword(kw::Break) {
+ self.parse_break_expr(attrs)
+ } else if self.eat_keyword(kw::Yield) {
+ self.parse_yield_expr(attrs)
+ } else if self.is_do_yeet() {
+ self.parse_yeet_expr(attrs)
+ } else if self.check_keyword(kw::Let) {
+ self.parse_let_expr(attrs)
+ } else if self.eat_keyword(kw::Underscore) {
+ Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore, attrs))
+ } else if !self.unclosed_delims.is_empty() && self.check(&token::Semi) {
+ // Don't complain about bare semicolons after unclosed braces
+ // recovery in order to keep the error count down. Fixing the
+ // delimiters will possibly also fix the bare semicolon found in
+ // expression context. For example, silence the following error:
+ //
+ // error: expected expression, found `;`
+ // --> file.rs:2:13
+ // |
+ // 2 | foo(bar(;
+ // | ^ expected expression
+ self.bump();
+ Ok(self.mk_expr_err(self.token.span))
+ } else if self.token.uninterpolated_span().rust_2018() {
+ // `Span::rust_2018()` is somewhat expensive; don't get it repeatedly.
+ if self.check_keyword(kw::Async) {
+ if self.is_async_block() {
+ // Check for `async {` and `async move {`.
+ self.parse_async_block(attrs)
+ } else {
+ self.parse_closure_expr(attrs)
+ }
+ } else if self.eat_keyword(kw::Await) {
+ self.recover_incorrect_await_syntax(lo, self.prev_token.span, attrs)
+ } else {
+ self.parse_lit_expr(attrs)
+ }
+ } else {
+ self.parse_lit_expr(attrs)
+ }
+ }
+
+ fn parse_lit_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.token.span;
+ match self.parse_opt_lit() {
+ Some(literal) => {
+ let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal), attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+ None => self.try_macro_suggestion(),
+ }
+ }
+
+ fn parse_tuple_parens_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.token.span;
+ self.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
+ let (es, trailing_comma) = match self.parse_seq_to_end(
+ &token::CloseDelim(Delimiter::Parenthesis),
+ SeqSep::trailing_allowed(token::Comma),
+ |p| p.parse_expr_catch_underscore(),
+ ) {
+ Ok(x) => x,
+ Err(err) => {
+ return Ok(self.recover_seq_parse_error(Delimiter::Parenthesis, lo, Err(err)));
+ }
+ };
+ let kind = if es.len() == 1 && !trailing_comma {
+ // `(e)` is parenthesized `e`.
+ ExprKind::Paren(es.into_iter().next().unwrap())
+ } else {
+ // `(e,)` is a tuple with only one field, `e`.
+ ExprKind::Tup(es)
+ };
+ let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+
+ fn parse_array_or_repeat_expr(
+ &mut self,
+ attrs: AttrVec,
+ close_delim: Delimiter,
+ ) -> PResult<'a, P<Expr>> {
+ let lo = self.token.span;
+ self.bump(); // `[` or other open delim
+
+ let close = &token::CloseDelim(close_delim);
+ let kind = if self.eat(close) {
+ // Empty vector
+ ExprKind::Array(Vec::new())
+ } else {
+ // Non-empty vector
+ let first_expr = self.parse_expr()?;
+ if self.eat(&token::Semi) {
+ // Repeating array syntax: `[ 0; 512 ]`
+ let count = self.parse_anon_const_expr()?;
+ self.expect(close)?;
+ ExprKind::Repeat(first_expr, count)
+ } else if self.eat(&token::Comma) {
+ // Vector with two or more elements.
+ let sep = SeqSep::trailing_allowed(token::Comma);
+ let (remaining_exprs, _) = self.parse_seq_to_end(close, sep, |p| p.parse_expr())?;
+ let mut exprs = vec![first_expr];
+ exprs.extend(remaining_exprs);
+ ExprKind::Array(exprs)
+ } else {
+ // Vector with one element
+ self.expect(close)?;
+ ExprKind::Array(vec![first_expr])
+ }
+ };
+ let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+
+ fn parse_path_start_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let (qself, path) = if self.eat_lt() {
+ let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
+ (Some(qself), path)
+ } else {
+ (None, self.parse_path(PathStyle::Expr)?)
+ };
+ let lo = path.span;
+
+ // `!`, as an operator, is prefix, so we know this isn't that.
+ let (hi, kind) = if self.eat(&token::Not) {
+ // MACRO INVOCATION expression
+ if qself.is_some() {
+ self.struct_span_err(path.span, "macros cannot use qualified paths").emit();
+ }
+ let mac = MacCall {
+ path,
+ args: self.parse_mac_args()?,
+ prior_type_ascription: self.last_type_ascription,
+ };
+ (self.prev_token.span, ExprKind::MacCall(mac))
+ } else if self.check(&token::OpenDelim(Delimiter::Brace)) {
+ if let Some(expr) = self.maybe_parse_struct_expr(qself.as_ref(), &path, &attrs) {
+ if qself.is_some() {
+ self.sess.gated_spans.gate(sym::more_qualified_paths, path.span);
+ }
+ return expr;
+ } else {
+ (path.span, ExprKind::Path(qself, path))
+ }
+ } else {
+ (path.span, ExprKind::Path(qself, path))
+ };
+
+ let expr = self.mk_expr(lo.to(hi), kind, attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+
+ /// Parse `'label: $expr`. The label is already parsed.
+ fn parse_labeled_expr(
+ &mut self,
+ label: Label,
+ attrs: AttrVec,
+ mut consume_colon: bool,
+ ) -> PResult<'a, P<Expr>> {
+ let lo = label.ident.span;
+ let label = Some(label);
+ let ate_colon = self.eat(&token::Colon);
+ let expr = if self.eat_keyword(kw::While) {
+ self.parse_while_expr(label, lo, attrs)
+ } else if self.eat_keyword(kw::For) {
+ self.parse_for_expr(label, lo, attrs)
+ } else if self.eat_keyword(kw::Loop) {
+ self.parse_loop_expr(label, lo, attrs)
+ } else if self.check_noexpect(&token::OpenDelim(Delimiter::Brace))
+ || self.token.is_whole_block()
+ {
+ self.parse_block_expr(label, lo, BlockCheckMode::Default, attrs)
+ } else if !ate_colon
+ && (self.check_noexpect(&TokenKind::Comma) || self.check_noexpect(&TokenKind::Gt))
+ {
+ // We're probably inside of a `Path<'a>` that needs a turbofish
+ let msg = "expected `while`, `for`, `loop` or `{` after a label";
+ self.struct_span_err(self.token.span, msg).span_label(self.token.span, msg).emit();
+ consume_colon = false;
+ Ok(self.mk_expr_err(lo))
+ } else {
+ let msg = "expected `while`, `for`, `loop` or `{` after a label";
+
+ let mut err = self.struct_span_err(self.token.span, msg);
+ err.span_label(self.token.span, msg);
+
+ // Continue as an expression in an effort to recover on `'label: non_block_expr`.
+ let expr = self.parse_expr().map(|expr| {
+ let span = expr.span;
+
+ let found_labeled_breaks = {
+ struct FindLabeledBreaksVisitor(bool);
+
+ impl<'ast> Visitor<'ast> for FindLabeledBreaksVisitor {
+ fn visit_expr_post(&mut self, ex: &'ast Expr) {
+ if let ExprKind::Break(Some(_label), _) = ex.kind {
+ self.0 = true;
+ }
+ }
+ }
+
+ let mut vis = FindLabeledBreaksVisitor(false);
+ vis.visit_expr(&expr);
+ vis.0
+ };
+
+ // Suggestion involves adding a (as of time of writing this, unstable) labeled block.
+ //
+ // If there are no breaks that may use this label, suggest removing the label and
+ // recover to the unmodified expression.
+ if !found_labeled_breaks {
+ let msg = "consider removing the label";
+ err.span_suggestion_verbose(
+ lo.until(span),
+ msg,
+ "",
+ Applicability::MachineApplicable,
+ );
+
+ return expr;
+ }
+
+ let sugg_msg = "consider enclosing expression in a block";
+ let suggestions = vec![
+ (span.shrink_to_lo(), "{ ".to_owned()),
+ (span.shrink_to_hi(), " }".to_owned()),
+ ];
+
+ err.multipart_suggestion_verbose(
+ sugg_msg,
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+
+ // Replace `'label: non_block_expr` with `'label: {non_block_expr}` in order to supress future errors about `break 'label`.
+ let stmt = self.mk_stmt(span, StmtKind::Expr(expr));
+ let blk = self.mk_block(vec![stmt], BlockCheckMode::Default, span);
+ self.mk_expr(span, ExprKind::Block(blk, label), ThinVec::new())
+ });
+
+ err.emit();
+ expr
+ }?;
+
+ if !ate_colon && consume_colon {
+ self.error_labeled_expr_must_be_followed_by_colon(lo, expr.span);
+ }
+
+ Ok(expr)
+ }
+
+ fn error_labeled_expr_must_be_followed_by_colon(&self, lo: Span, span: Span) {
+ self.struct_span_err(span, "labeled expression must be followed by `:`")
+ .span_label(lo, "the label")
+ .span_suggestion_short(
+ lo.shrink_to_hi(),
+ "add `:` after the label",
+ ": ",
+ Applicability::MachineApplicable,
+ )
+ .note("labels are used before loops and blocks, allowing e.g., `break 'label` to them")
+ .emit();
+ }
+
+ /// Recover on the syntax `do catch { ... }` suggesting `try { ... }` instead.
+ fn recover_do_catch(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.token.span;
+
+ self.bump(); // `do`
+ self.bump(); // `catch`
+
+ let span_dc = lo.to(self.prev_token.span);
+ self.struct_span_err(span_dc, "found removed `do catch` syntax")
+ .span_suggestion(
+ span_dc,
+ "replace with the new syntax",
+ "try",
+ Applicability::MachineApplicable,
+ )
+ .note("following RFC #2388, the new non-placeholder syntax is `try`")
+ .emit();
+
+ self.parse_try_block(lo, attrs)
+ }
+
+ /// Parse an expression if the token can begin one.
+ fn parse_expr_opt(&mut self) -> PResult<'a, Option<P<Expr>>> {
+ Ok(if self.token.can_begin_expr() { Some(self.parse_expr()?) } else { None })
+ }
+
+ /// Parse `"return" expr?`.
+ fn parse_return_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.prev_token.span;
+ let kind = ExprKind::Ret(self.parse_expr_opt()?);
+ let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+
+ /// Parse `"do" "yeet" expr?`.
+ fn parse_yeet_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.token.span;
+
+ self.bump(); // `do`
+ self.bump(); // `yeet`
+
+ let kind = ExprKind::Yeet(self.parse_expr_opt()?);
+
+ let span = lo.to(self.prev_token.span);
+ self.sess.gated_spans.gate(sym::yeet_expr, span);
+ let expr = self.mk_expr(span, kind, attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+
+ /// Parse `"break" (('label (:? expr)?) | expr?)` with `"break"` token already eaten.
+ /// If the label is followed immediately by a `:` token, the label and `:` are
+ /// parsed as part of the expression (i.e. a labeled loop). The language team has
+ /// decided in #87026 to require parentheses as a visual aid to avoid confusion if
+ /// the break expression of an unlabeled break is a labeled loop (as in
+ /// `break 'lbl: loop {}`); a labeled break with an unlabeled loop as its value
+ /// expression only gets a warning for compatibility reasons; and a labeled break
+ /// with a labeled loop does not even get a warning because there is no ambiguity.
+ fn parse_break_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.prev_token.span;
+ let mut label = self.eat_label();
+ let kind = if label.is_some() && self.token == token::Colon {
+ // The value expression can be a labeled loop, see issue #86948, e.g.:
+ // `loop { break 'label: loop { break 'label 42; }; }`
+ let lexpr = self.parse_labeled_expr(label.take().unwrap(), AttrVec::new(), true)?;
+ self.struct_span_err(
+ lexpr.span,
+ "parentheses are required around this expression to avoid confusion with a labeled break expression",
+ )
+ .multipart_suggestion(
+ "wrap the expression in parentheses",
+ vec![
+ (lexpr.span.shrink_to_lo(), "(".to_string()),
+ (lexpr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ Some(lexpr)
+ } else if self.token != token::OpenDelim(Delimiter::Brace)
+ || !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
+ {
+ let expr = self.parse_expr_opt()?;
+ if let Some(ref expr) = expr {
+ if label.is_some()
+ && matches!(
+ expr.kind,
+ ExprKind::While(_, _, None)
+ | ExprKind::ForLoop(_, _, _, None)
+ | ExprKind::Loop(_, None)
+ | ExprKind::Block(_, None)
+ )
+ {
+ self.sess.buffer_lint_with_diagnostic(
+ BREAK_WITH_LABEL_AND_LOOP,
+ lo.to(expr.span),
+ ast::CRATE_NODE_ID,
+ "this labeled break expression is easy to confuse with an unlabeled break with a labeled value expression",
+ BuiltinLintDiagnostics::BreakWithLabelAndLoop(expr.span),
+ );
+ }
+ }
+ expr
+ } else {
+ None
+ };
+ let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Break(label, kind), attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+
+ /// Parse `"yield" expr?`.
+ fn parse_yield_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.prev_token.span;
+ let kind = ExprKind::Yield(self.parse_expr_opt()?);
+ let span = lo.to(self.prev_token.span);
+ self.sess.gated_spans.gate(sym::generators, span);
+ let expr = self.mk_expr(span, kind, attrs);
+ self.maybe_recover_from_bad_qpath(expr)
+ }
+
+ /// Returns a string literal if the next token is a string literal.
+ /// In case of error returns `Some(lit)` if the next token is a literal with a wrong kind,
+ /// and returns `None` if the next token is not literal at all.
+ pub fn parse_str_lit(&mut self) -> Result<ast::StrLit, Option<Lit>> {
+ match self.parse_opt_lit() {
+ Some(lit) => match lit.kind {
+ ast::LitKind::Str(symbol_unescaped, style) => Ok(ast::StrLit {
+ style,
+ symbol: lit.token.symbol,
+ suffix: lit.token.suffix,
+ span: lit.span,
+ symbol_unescaped,
+ }),
+ _ => Err(Some(lit)),
+ },
+ None => Err(None),
+ }
+ }
+
+ pub(super) fn parse_lit(&mut self) -> PResult<'a, Lit> {
+ self.parse_opt_lit().ok_or_else(|| {
+ if let token::Interpolated(inner) = &self.token.kind {
+ let expr = match inner.as_ref() {
+ token::NtExpr(expr) => Some(expr),
+ token::NtLiteral(expr) => Some(expr),
+ _ => None,
+ };
+ if let Some(expr) = expr {
+ if matches!(expr.kind, ExprKind::Err) {
+ let mut err = self
+ .diagnostic()
+ .struct_span_err(self.token.span, "invalid interpolated expression");
+ err.downgrade_to_delayed_bug();
+ return err;
+ }
+ }
+ }
+ let msg = format!("unexpected token: {}", super::token_descr(&self.token));
+ self.struct_span_err(self.token.span, &msg)
+ })
+ }
+
+ /// Matches `lit = true | false | token_lit`.
+ /// Returns `None` if the next token is not a literal.
+ pub(super) fn parse_opt_lit(&mut self) -> Option<Lit> {
+ let mut recovered = None;
+ if self.token == token::Dot {
+ // Attempt to recover `.4` as `0.4`. We don't currently have any syntax where
+ // dot would follow an optional literal, so we do this unconditionally.
+ recovered = self.look_ahead(1, |next_token| {
+ if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) =
+ next_token.kind
+ {
+ if self.token.span.hi() == next_token.span.lo() {
+ let s = String::from("0.") + symbol.as_str();
+ let kind = TokenKind::lit(token::Float, Symbol::intern(&s), suffix);
+ return Some(Token::new(kind, self.token.span.to(next_token.span)));
+ }
+ }
+ None
+ });
+ if let Some(token) = &recovered {
+ self.bump();
+ self.error_float_lits_must_have_int_part(&token);
+ }
+ }
+
+ let token = recovered.as_ref().unwrap_or(&self.token);
+ match Lit::from_token(token) {
+ Ok(lit) => {
+ self.bump();
+ Some(lit)
+ }
+ Err(LitError::NotLiteral) => None,
+ Err(err) => {
+ let span = token.span;
+ let token::Literal(lit) = token.kind else {
+ unreachable!();
+ };
+ self.bump();
+ self.report_lit_error(err, lit, span);
+ // Pack possible quotes and prefixes from the original literal into
+ // the error literal's symbol so they can be pretty-printed faithfully.
+ let suffixless_lit = token::Lit::new(lit.kind, lit.symbol, None);
+ let symbol = Symbol::intern(&suffixless_lit.to_string());
+ let lit = token::Lit::new(token::Err, symbol, lit.suffix);
+ Some(Lit::from_lit_token(lit, span).unwrap_or_else(|_| unreachable!()))
+ }
+ }
+ }
+
+ fn error_float_lits_must_have_int_part(&self, token: &Token) {
+ self.struct_span_err(token.span, "float literals must have an integer part")
+ .span_suggestion(
+ token.span,
+ "must have an integer part",
+ pprust::token_to_string(token),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ fn report_lit_error(&self, err: LitError, lit: token::Lit, span: Span) {
+ // Checks if `s` looks like i32 or u1234 etc.
+ fn looks_like_width_suffix(first_chars: &[char], s: &str) -> bool {
+ s.len() > 1 && s.starts_with(first_chars) && s[1..].chars().all(|c| c.is_ascii_digit())
+ }
+
+ // Try to lowercase the prefix if it's a valid base prefix.
+ fn fix_base_capitalisation(s: &str) -> Option<String> {
+ if let Some(stripped) = s.strip_prefix('B') {
+ Some(format!("0b{stripped}"))
+ } else if let Some(stripped) = s.strip_prefix('O') {
+ Some(format!("0o{stripped}"))
+ } else if let Some(stripped) = s.strip_prefix('X') {
+ Some(format!("0x{stripped}"))
+ } else {
+ None
+ }
+ }
+
+ let token::Lit { kind, suffix, .. } = lit;
+ match err {
+ // `NotLiteral` is not an error by itself, so we don't report
+ // it and give the parser opportunity to try something else.
+ LitError::NotLiteral => {}
+ // `LexerError` *is* an error, but it was already reported
+ // by lexer, so here we don't report it the second time.
+ LitError::LexerError => {}
+ LitError::InvalidSuffix => {
+ self.expect_no_suffix(
+ span,
+ &format!("{} {} literal", kind.article(), kind.descr()),
+ suffix,
+ );
+ }
+ LitError::InvalidIntSuffix => {
+ let suf = suffix.expect("suffix error with no suffix");
+ let suf = suf.as_str();
+ if looks_like_width_suffix(&['i', 'u'], &suf) {
+ // If it looks like a width, try to be helpful.
+ let msg = format!("invalid width `{}` for integer literal", &suf[1..]);
+ self.struct_span_err(span, &msg)
+ .help("valid widths are 8, 16, 32, 64 and 128")
+ .emit();
+ } else if let Some(fixed) = fix_base_capitalisation(suf) {
+ let msg = "invalid base prefix for number literal";
+
+ self.struct_span_err(span, msg)
+ .note("base prefixes (`0xff`, `0b1010`, `0o755`) are lowercase")
+ .span_suggestion(
+ span,
+ "try making the prefix lowercase",
+ fixed,
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ } else {
+ let msg = format!("invalid suffix `{suf}` for number literal");
+ self.struct_span_err(span, &msg)
+ .span_label(span, format!("invalid suffix `{suf}`"))
+ .help("the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.)")
+ .emit();
+ }
+ }
+ LitError::InvalidFloatSuffix => {
+ let suf = suffix.expect("suffix error with no suffix");
+ let suf = suf.as_str();
+ if looks_like_width_suffix(&['f'], suf) {
+ // If it looks like a width, try to be helpful.
+ let msg = format!("invalid width `{}` for float literal", &suf[1..]);
+ self.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit();
+ } else {
+ let msg = format!("invalid suffix `{suf}` for float literal");
+ self.struct_span_err(span, &msg)
+ .span_label(span, format!("invalid suffix `{suf}`"))
+ .help("valid suffixes are `f32` and `f64`")
+ .emit();
+ }
+ }
+ LitError::NonDecimalFloat(base) => {
+ let descr = match base {
+ 16 => "hexadecimal",
+ 8 => "octal",
+ 2 => "binary",
+ _ => unreachable!(),
+ };
+ self.struct_span_err(span, &format!("{descr} float literal is not supported"))
+ .span_label(span, "not supported")
+ .emit();
+ }
+ LitError::IntTooLarge => {
+ self.struct_span_err(span, "integer literal is too large").emit();
+ }
+ }
+ }
+
+ pub(super) fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<Symbol>) {
+ if let Some(suf) = suffix {
+ let mut err = if kind == "a tuple index"
+ && [sym::i32, sym::u32, sym::isize, sym::usize].contains(&suf)
+ {
+ // #59553: warn instead of reject out of hand to allow the fix to percolate
+ // through the ecosystem when people fix their macros
+ let mut err = self
+ .sess
+ .span_diagnostic
+ .struct_span_warn(sp, &format!("suffixes on {kind} are invalid"));
+ err.note(&format!(
+ "`{}` is *temporarily* accepted on tuple index fields as it was \
+ incorrectly accepted on stable for a few releases",
+ suf,
+ ));
+ err.help(
+ "on proc macros, you'll want to use `syn::Index::from` or \
+ `proc_macro::Literal::*_unsuffixed` for code that will desugar \
+ to tuple field access",
+ );
+ err.note(
+ "see issue #60210 <https://github.com/rust-lang/rust/issues/60210> \
+ for more information",
+ );
+ err
+ } else {
+ self.struct_span_err(sp, &format!("suffixes on {kind} are invalid"))
+ .forget_guarantee()
+ };
+ err.span_label(sp, format!("invalid suffix `{suf}`"));
+ err.emit();
+ }
+ }
+
+ /// Matches `'-' lit | lit` (cf. `ast_validation::AstValidator::check_expr_within_pat`).
+ /// Keep this in sync with `Token::can_begin_literal_maybe_minus`.
+ pub fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> {
+ maybe_whole_expr!(self);
+
+ let lo = self.token.span;
+ let minus_present = self.eat(&token::BinOp(token::Minus));
+ let lit = self.parse_lit()?;
+ let expr = self.mk_expr(lit.span, ExprKind::Lit(lit), AttrVec::new());
+
+ if minus_present {
+ Ok(self.mk_expr(
+ lo.to(self.prev_token.span),
+ self.mk_unary(UnOp::Neg, expr),
+ AttrVec::new(),
+ ))
+ } else {
+ Ok(expr)
+ }
+ }
+
+ fn is_array_like_block(&mut self) -> bool {
+ self.look_ahead(1, |t| matches!(t.kind, TokenKind::Ident(..) | TokenKind::Literal(_)))
+ && self.look_ahead(2, |t| t == &token::Comma)
+ && self.look_ahead(3, |t| t.can_begin_expr())
+ }
+
+ /// Emits a suggestion if it looks like the user meant an array but
+ /// accidentally used braces, causing the code to be interpreted as a block
+ /// expression.
+ fn maybe_suggest_brackets_instead_of_braces(
+ &mut self,
+ lo: Span,
+ attrs: AttrVec,
+ ) -> Option<P<Expr>> {
+ let mut snapshot = self.create_snapshot_for_diagnostic();
+ match snapshot.parse_array_or_repeat_expr(attrs, Delimiter::Brace) {
+ Ok(arr) => {
+ let hi = snapshot.prev_token.span;
+ self.struct_span_err(arr.span, "this is a block expression, not an array")
+ .multipart_suggestion(
+ "to make an array, use square brackets instead of curly braces",
+ vec![(lo, "[".to_owned()), (hi, "]".to_owned())],
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+
+ self.restore_snapshot(snapshot);
+ Some(self.mk_expr_err(arr.span))
+ }
+ Err(e) => {
+ e.cancel();
+ None
+ }
+ }
+ }
+
+ /// Parses a block or unsafe block.
+ pub(super) fn parse_block_expr(
+ &mut self,
+ opt_label: Option<Label>,
+ lo: Span,
+ blk_mode: BlockCheckMode,
+ mut attrs: AttrVec,
+ ) -> PResult<'a, P<Expr>> {
+ if self.is_array_like_block() {
+ if let Some(arr) = self.maybe_suggest_brackets_instead_of_braces(lo, attrs.clone()) {
+ return Ok(arr);
+ }
+ }
+
+ if let Some(label) = opt_label {
+ self.sess.gated_spans.gate(sym::label_break_value, label.ident.span);
+ }
+
+ if self.token.is_whole_block() {
+ self.struct_span_err(self.token.span, "cannot use a `block` macro fragment here")
+ .span_label(lo.to(self.token.span), "the `block` fragment is within this context")
+ .emit();
+ }
+
+ let (inner_attrs, blk) = self.parse_block_common(lo, blk_mode)?;
+ attrs.extend(inner_attrs);
+ Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs))
+ }
+
+ /// Parse a block which takes no attributes and has no label
+ fn parse_simple_block(&mut self) -> PResult<'a, P<Expr>> {
+ let blk = self.parse_block()?;
+ Ok(self.mk_expr(blk.span, ExprKind::Block(blk, None), AttrVec::new()))
+ }
+
+ /// Parses a closure expression (e.g., `move |args| expr`).
+ fn parse_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.token.span;
+
+ let binder = if self.check_keyword(kw::For) {
+ let lo = self.token.span;
+ let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
+ let span = lo.to(self.prev_token.span);
+
+ self.sess.gated_spans.gate(sym::closure_lifetime_binder, span);
+
+ ClosureBinder::For { span, generic_params: P::from_vec(lifetime_defs) }
+ } else {
+ ClosureBinder::NotPresent
+ };
+
+ let movability =
+ if self.eat_keyword(kw::Static) { Movability::Static } else { Movability::Movable };
+
+ let asyncness = if self.token.uninterpolated_span().rust_2018() {
+ self.parse_asyncness()
+ } else {
+ Async::No
+ };
+
+ let capture_clause = self.parse_capture_clause()?;
+ let decl = self.parse_fn_block_decl()?;
+ let decl_hi = self.prev_token.span;
+ let mut body = match decl.output {
+ FnRetTy::Default(_) => {
+ let restrictions = self.restrictions - Restrictions::STMT_EXPR;
+ self.parse_expr_res(restrictions, None)?
+ }
+ _ => {
+ // If an explicit return type is given, require a block to appear (RFC 968).
+ let body_lo = self.token.span;
+ self.parse_block_expr(None, body_lo, BlockCheckMode::Default, AttrVec::new())?
+ }
+ };
+
+ if let Async::Yes { span, .. } = asyncness {
+ // Feature-gate `async ||` closures.
+ self.sess.gated_spans.gate(sym::async_closure, span);
+ }
+
+ if self.token.kind == TokenKind::Semi
+ && matches!(self.token_cursor.frame.delim_sp, Some((Delimiter::Parenthesis, _)))
+ {
+ // It is likely that the closure body is a block but where the
+ // braces have been removed. We will recover and eat the next
+ // statements later in the parsing process.
+ body = self.mk_expr_err(body.span);
+ }
+
+ let body_span = body.span;
+
+ let closure = self.mk_expr(
+ lo.to(body.span),
+ ExprKind::Closure(
+ binder,
+ capture_clause,
+ asyncness,
+ movability,
+ decl,
+ body,
+ lo.to(decl_hi),
+ ),
+ attrs,
+ );
+
+ // Disable recovery for closure body
+ let spans =
+ ClosureSpans { whole_closure: closure.span, closing_pipe: decl_hi, body: body_span };
+ self.current_closure = Some(spans);
+
+ Ok(closure)
+ }
+
+ /// Parses an optional `move` prefix to a closure-like construct.
+ fn parse_capture_clause(&mut self) -> PResult<'a, CaptureBy> {
+ if self.eat_keyword(kw::Move) {
+ // Check for `move async` and recover
+ if self.check_keyword(kw::Async) {
+ let move_async_span = self.token.span.with_lo(self.prev_token.span.data().lo);
+ Err(self.incorrect_move_async_order_found(move_async_span))
+ } else {
+ Ok(CaptureBy::Value)
+ }
+ } else {
+ Ok(CaptureBy::Ref)
+ }
+ }
+
+ /// Parses the `|arg, arg|` header of a closure.
+ fn parse_fn_block_decl(&mut self) -> PResult<'a, P<FnDecl>> {
+ let inputs = if self.eat(&token::OrOr) {
+ Vec::new()
+ } else {
+ self.expect(&token::BinOp(token::Or))?;
+ let args = self
+ .parse_seq_to_before_tokens(
+ &[&token::BinOp(token::Or), &token::OrOr],
+ SeqSep::trailing_allowed(token::Comma),
+ TokenExpectType::NoExpect,
+ |p| p.parse_fn_block_param(),
+ )?
+ .0;
+ self.expect_or()?;
+ args
+ };
+ let output =
+ self.parse_ret_ty(AllowPlus::Yes, RecoverQPath::Yes, RecoverReturnSign::Yes)?;
+
+ Ok(P(FnDecl { inputs, output }))
+ }
+
+ /// Parses a parameter in a closure header (e.g., `|arg, arg|`).
+ fn parse_fn_block_param(&mut self) -> PResult<'a, Param> {
+ let lo = self.token.span;
+ let attrs = self.parse_outer_attributes()?;
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let pat = this.parse_pat_no_top_alt(PARAM_EXPECTED)?;
+ let ty = if this.eat(&token::Colon) {
+ this.parse_ty()?
+ } else {
+ this.mk_ty(this.prev_token.span, TyKind::Infer)
+ };
+
+ Ok((
+ Param {
+ attrs: attrs.into(),
+ ty,
+ pat,
+ span: lo.to(this.token.span),
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ },
+ TrailingToken::MaybeComma,
+ ))
+ })
+ }
+
+ /// Parses an `if` expression (`if` token already eaten).
+ fn parse_if_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.prev_token.span;
+ let cond = self.parse_cond_expr()?;
+
+ self.parse_if_after_cond(attrs, lo, cond)
+ }
+
+ fn parse_if_after_cond(
+ &mut self,
+ attrs: AttrVec,
+ lo: Span,
+ mut cond: P<Expr>,
+ ) -> PResult<'a, P<Expr>> {
+ let cond_span = cond.span;
+ // Tries to interpret `cond` as either a missing expression if it's a block,
+ // or as an unfinished expression if it's a binop and the RHS is a block.
+ // We could probably add more recoveries here too...
+ let mut recover_block_from_condition = |this: &mut Self| {
+ let block = match &mut cond.kind {
+ ExprKind::Binary(Spanned { span: binop_span, .. }, _, right)
+ if let ExprKind::Block(_, None) = right.kind => {
+ this.error_missing_if_then_block(lo, cond_span.shrink_to_lo().to(*binop_span), true).emit();
+ std::mem::replace(right, this.mk_expr_err(binop_span.shrink_to_hi()))
+ },
+ ExprKind::Block(_, None) => {
+ this.error_missing_if_cond(lo, cond_span).emit();
+ std::mem::replace(&mut cond, this.mk_expr_err(cond_span.shrink_to_hi()))
+ }
+ _ => {
+ return None;
+ }
+ };
+ if let ExprKind::Block(block, _) = &block.kind {
+ Some(block.clone())
+ } else {
+ unreachable!()
+ }
+ };
+ // Parse then block
+ let thn = if self.token.is_keyword(kw::Else) {
+ if let Some(block) = recover_block_from_condition(self) {
+ block
+ } else {
+ self.error_missing_if_then_block(lo, cond_span, false).emit();
+ self.mk_block_err(cond_span.shrink_to_hi())
+ }
+ } else {
+ let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
+ let block = if self.check(&token::OpenDelim(Delimiter::Brace)) {
+ self.parse_block()?
+ } else {
+ if let Some(block) = recover_block_from_condition(self) {
+ block
+ } else {
+ // Parse block, which will always fail, but we can add a nice note to the error
+ self.parse_block().map_err(|mut err| {
+ err.span_note(
+ cond_span,
+ "the `if` expression is missing a block after this condition",
+ );
+ err
+ })?
+ }
+ };
+ self.error_on_if_block_attrs(lo, false, block.span, &attrs);
+ block
+ };
+ let els = if self.eat_keyword(kw::Else) { Some(self.parse_else_expr()?) } else { None };
+ Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::If(cond, thn, els), attrs))
+ }
+
+ fn error_missing_if_then_block(
+ &self,
+ if_span: Span,
+ cond_span: Span,
+ is_unfinished: bool,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err = self.struct_span_err(
+ if_span,
+ "this `if` expression is missing a block after the condition",
+ );
+ if is_unfinished {
+ err.span_help(cond_span, "this binary operation is possibly unfinished");
+ } else {
+ err.span_help(cond_span.shrink_to_hi(), "add a block here");
+ }
+ err
+ }
+
+ fn error_missing_if_cond(
+ &self,
+ lo: Span,
+ span: Span,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let next_span = self.sess.source_map().next_point(lo);
+ let mut err = self.struct_span_err(next_span, "missing condition for `if` expression");
+ err.span_label(next_span, "expected condition here");
+ err.span_label(
+ self.sess.source_map().start_point(span),
+ "if this block is the condition of the `if` expression, then it must be followed by another block"
+ );
+ err
+ }
+
+ /// Parses the condition of a `if` or `while` expression.
+ fn parse_cond_expr(&mut self) -> PResult<'a, P<Expr>> {
+ let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL | Restrictions::ALLOW_LET, None)?;
+
+ if let ExprKind::Let(..) = cond.kind {
+ // Remove the last feature gating of a `let` expression since it's stable.
+ self.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
+ }
+
+ Ok(cond)
+ }
+
+ /// Parses a `let $pat = $expr` pseudo-expression.
+ fn parse_let_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ // This is a *approximate* heuristic that detects if `let` chains are
+ // being parsed in the right position. It's approximate because it
+ // doesn't deny all invalid `let` expressions, just completely wrong usages.
+ let not_in_chain = !matches!(
+ self.prev_token.kind,
+ TokenKind::AndAnd | TokenKind::Ident(kw::If, _) | TokenKind::Ident(kw::While, _)
+ );
+ if !self.restrictions.contains(Restrictions::ALLOW_LET) || not_in_chain {
+ self.struct_span_err(self.token.span, "expected expression, found `let` statement")
+ .emit();
+ }
+
+ self.bump(); // Eat `let` token
+ let lo = self.prev_token.span;
+ let pat = self.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::Yes,
+ RecoverColon::Yes,
+ CommaRecoveryMode::LikelyTuple,
+ )?;
+ self.expect(&token::Eq)?;
+ let expr = self.with_res(self.restrictions | Restrictions::NO_STRUCT_LITERAL, |this| {
+ this.parse_assoc_expr_with(1 + prec_let_scrutinee_needs_par(), None.into())
+ })?;
+ let span = lo.to(expr.span);
+ self.sess.gated_spans.gate(sym::let_chains, span);
+ Ok(self.mk_expr(span, ExprKind::Let(pat, expr, span), attrs))
+ }
+
+ /// Parses an `else { ... }` expression (`else` token already eaten).
+ fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> {
+ let else_span = self.prev_token.span; // `else`
+ let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
+ let expr = if self.eat_keyword(kw::If) {
+ self.parse_if_expr(AttrVec::new())?
+ } else if self.check(&TokenKind::OpenDelim(Delimiter::Brace)) {
+ self.parse_simple_block()?
+ } else {
+ let snapshot = self.create_snapshot_for_diagnostic();
+ let first_tok = super::token_descr(&self.token);
+ let first_tok_span = self.token.span;
+ match self.parse_expr() {
+ Ok(cond)
+ // If it's not a free-standing expression, and is followed by a block,
+ // then it's very likely the condition to an `else if`.
+ if self.check(&TokenKind::OpenDelim(Delimiter::Brace))
+ && classify::expr_requires_semi_to_be_stmt(&cond) =>
+ {
+ self.struct_span_err(first_tok_span, format!("expected `{{`, found {first_tok}"))
+ .span_label(else_span, "expected an `if` or a block after this `else`")
+ .span_suggestion(
+ cond.span.shrink_to_lo(),
+ "add an `if` if this is the condition of a chained `else if` statement",
+ "if ",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ self.parse_if_after_cond(AttrVec::new(), cond.span.shrink_to_lo(), cond)?
+ }
+ Err(e) => {
+ e.cancel();
+ self.restore_snapshot(snapshot);
+ self.parse_simple_block()?
+ },
+ Ok(_) => {
+ self.restore_snapshot(snapshot);
+ self.parse_simple_block()?
+ },
+ }
+ };
+ self.error_on_if_block_attrs(else_span, true, expr.span, &attrs);
+ Ok(expr)
+ }
+
+ fn error_on_if_block_attrs(
+ &self,
+ ctx_span: Span,
+ is_ctx_else: bool,
+ branch_span: Span,
+ attrs: &[ast::Attribute],
+ ) {
+ let (span, last) = match attrs {
+ [] => return,
+ [x0 @ xn] | [x0, .., xn] => (x0.span.to(xn.span), xn.span),
+ };
+ let ctx = if is_ctx_else { "else" } else { "if" };
+ self.struct_span_err(last, "outer attributes are not allowed on `if` and `else` branches")
+ .span_label(branch_span, "the attributes are attached to this branch")
+ .span_label(ctx_span, format!("the branch belongs to this `{ctx}`"))
+ .span_suggestion(span, "remove the attributes", "", Applicability::MachineApplicable)
+ .emit();
+ }
+
+ /// Parses `for <src_pat> in <src_expr> <src_loop_block>` (`for` token already eaten).
+ fn parse_for_expr(
+ &mut self,
+ opt_label: Option<Label>,
+ lo: Span,
+ mut attrs: AttrVec,
+ ) -> PResult<'a, P<Expr>> {
+ // Record whether we are about to parse `for (`.
+ // This is used below for recovery in case of `for ( $stuff ) $block`
+ // in which case we will suggest `for $stuff $block`.
+ let begin_paren = match self.token.kind {
+ token::OpenDelim(Delimiter::Parenthesis) => Some(self.token.span),
+ _ => None,
+ };
+
+ let pat = self.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::Yes,
+ RecoverColon::Yes,
+ CommaRecoveryMode::LikelyTuple,
+ )?;
+ if !self.eat_keyword(kw::In) {
+ self.error_missing_in_for_loop();
+ }
+ self.check_for_for_in_in_typo(self.prev_token.span);
+ let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
+
+ let pat = self.recover_parens_around_for_head(pat, begin_paren);
+
+ let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?;
+ attrs.extend(iattrs);
+
+ let kind = ExprKind::ForLoop(pat, expr, loop_block, opt_label);
+ Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
+ }
+
+ fn error_missing_in_for_loop(&mut self) {
+ let (span, msg, sugg) = if self.token.is_ident_named(sym::of) {
+ // Possibly using JS syntax (#75311).
+ let span = self.token.span;
+ self.bump();
+ (span, "try using `in` here instead", "in")
+ } else {
+ (self.prev_token.span.between(self.token.span), "try adding `in` here", " in ")
+ };
+ self.struct_span_err(span, "missing `in` in `for` loop")
+ .span_suggestion_short(
+ span,
+ msg,
+ sugg,
+ // Has been misleading, at least in the past (closed Issue #48492).
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+
+ /// Parses a `while` or `while let` expression (`while` token already eaten).
+ fn parse_while_expr(
+ &mut self,
+ opt_label: Option<Label>,
+ lo: Span,
+ mut attrs: AttrVec,
+ ) -> PResult<'a, P<Expr>> {
+ let cond = self.parse_cond_expr().map_err(|mut err| {
+ err.span_label(lo, "while parsing the condition of this `while` expression");
+ err
+ })?;
+ let (iattrs, body) = self.parse_inner_attrs_and_block().map_err(|mut err| {
+ err.span_label(lo, "while parsing the body of this `while` expression");
+ err.span_label(cond.span, "this `while` condition successfully parsed");
+ err
+ })?;
+ attrs.extend(iattrs);
+ Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::While(cond, body, opt_label), attrs))
+ }
+
+ /// Parses `loop { ... }` (`loop` token already eaten).
+ fn parse_loop_expr(
+ &mut self,
+ opt_label: Option<Label>,
+ lo: Span,
+ mut attrs: AttrVec,
+ ) -> PResult<'a, P<Expr>> {
+ let (iattrs, body) = self.parse_inner_attrs_and_block()?;
+ attrs.extend(iattrs);
+ Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::Loop(body, opt_label), attrs))
+ }
+
+ pub(crate) fn eat_label(&mut self) -> Option<Label> {
+ self.token.lifetime().map(|ident| {
+ self.bump();
+ Label { ident }
+ })
+ }
+
+ /// Parses a `match ... { ... }` expression (`match` token already eaten).
+ fn parse_match_expr(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let match_span = self.prev_token.span;
+ let lo = self.prev_token.span;
+ let scrutinee = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
+ if let Err(mut e) = self.expect(&token::OpenDelim(Delimiter::Brace)) {
+ if self.token == token::Semi {
+ e.span_suggestion_short(
+ match_span,
+ "try removing this `match`",
+ "",
+ Applicability::MaybeIncorrect, // speculative
+ );
+ }
+ if self.maybe_recover_unexpected_block_label() {
+ e.cancel();
+ self.bump();
+ } else {
+ return Err(e);
+ }
+ }
+ attrs.extend(self.parse_inner_attributes()?);
+
+ let mut arms: Vec<Arm> = Vec::new();
+ while self.token != token::CloseDelim(Delimiter::Brace) {
+ match self.parse_arm() {
+ Ok(arm) => arms.push(arm),
+ Err(mut e) => {
+ // Recover by skipping to the end of the block.
+ e.emit();
+ self.recover_stmt();
+ let span = lo.to(self.token.span);
+ if self.token == token::CloseDelim(Delimiter::Brace) {
+ self.bump();
+ }
+ return Ok(self.mk_expr(span, ExprKind::Match(scrutinee, arms), attrs));
+ }
+ }
+ }
+ let hi = self.token.span;
+ self.bump();
+ Ok(self.mk_expr(lo.to(hi), ExprKind::Match(scrutinee, arms), attrs))
+ }
+
+ /// Attempt to recover from match arm body with statements and no surrounding braces.
+ fn parse_arm_body_missing_braces(
+ &mut self,
+ first_expr: &P<Expr>,
+ arrow_span: Span,
+ ) -> Option<P<Expr>> {
+ if self.token.kind != token::Semi {
+ return None;
+ }
+ let start_snapshot = self.create_snapshot_for_diagnostic();
+ let semi_sp = self.token.span;
+ self.bump(); // `;`
+ let mut stmts =
+ vec![self.mk_stmt(first_expr.span, ast::StmtKind::Expr(first_expr.clone()))];
+ let err = |this: &mut Parser<'_>, stmts: Vec<ast::Stmt>| {
+ let span = stmts[0].span.to(stmts[stmts.len() - 1].span);
+ let mut err = this.struct_span_err(span, "`match` arm body without braces");
+ let (these, s, are) =
+ if stmts.len() > 1 { ("these", "s", "are") } else { ("this", "", "is") };
+ err.span_label(
+ span,
+ &format!(
+ "{these} statement{s} {are} not surrounded by a body",
+ these = these,
+ s = s,
+ are = are
+ ),
+ );
+ err.span_label(arrow_span, "while parsing the `match` arm starting here");
+ if stmts.len() > 1 {
+ err.multipart_suggestion(
+ &format!("surround the statement{s} with a body"),
+ vec![
+ (span.shrink_to_lo(), "{ ".to_string()),
+ (span.shrink_to_hi(), " }".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion(
+ semi_sp,
+ "use a comma to end a `match` arm expression",
+ ",",
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ this.mk_expr_err(span)
+ };
+ // We might have either a `,` -> `;` typo, or a block without braces. We need
+ // a more subtle parsing strategy.
+ loop {
+ if self.token.kind == token::CloseDelim(Delimiter::Brace) {
+ // We have reached the closing brace of the `match` expression.
+ return Some(err(self, stmts));
+ }
+ if self.token.kind == token::Comma {
+ self.restore_snapshot(start_snapshot);
+ return None;
+ }
+ let pre_pat_snapshot = self.create_snapshot_for_diagnostic();
+ match self.parse_pat_no_top_alt(None) {
+ Ok(_pat) => {
+ if self.token.kind == token::FatArrow {
+ // Reached arm end.
+ self.restore_snapshot(pre_pat_snapshot);
+ return Some(err(self, stmts));
+ }
+ }
+ Err(err) => {
+ err.cancel();
+ }
+ }
+
+ self.restore_snapshot(pre_pat_snapshot);
+ match self.parse_stmt_without_recovery(true, ForceCollect::No) {
+ // Consume statements for as long as possible.
+ Ok(Some(stmt)) => {
+ stmts.push(stmt);
+ }
+ Ok(None) => {
+ self.restore_snapshot(start_snapshot);
+ break;
+ }
+ // We couldn't parse either yet another statement missing it's
+ // enclosing block nor the next arm's pattern or closing brace.
+ Err(stmt_err) => {
+ stmt_err.cancel();
+ self.restore_snapshot(start_snapshot);
+ break;
+ }
+ }
+ }
+ None
+ }
+
+ pub(super) fn parse_arm(&mut self) -> PResult<'a, Arm> {
+ // Used to check the `let_chains` and `if_let_guard` features mostly by scaning
+ // `&&` tokens.
+ fn check_let_expr(expr: &Expr) -> (bool, bool) {
+ match expr.kind {
+ ExprKind::Binary(BinOp { node: BinOpKind::And, .. }, ref lhs, ref rhs) => {
+ let lhs_rslt = check_let_expr(lhs);
+ let rhs_rslt = check_let_expr(rhs);
+ (lhs_rslt.0 || rhs_rslt.0, false)
+ }
+ ExprKind::Let(..) => (true, true),
+ _ => (false, true),
+ }
+ }
+ let attrs = self.parse_outer_attributes()?;
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let lo = this.token.span;
+ let pat = this.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::Yes,
+ RecoverColon::Yes,
+ CommaRecoveryMode::EitherTupleOrPipe,
+ )?;
+ let guard = if this.eat_keyword(kw::If) {
+ let if_span = this.prev_token.span;
+ let cond = this.parse_expr_res(Restrictions::ALLOW_LET, None)?;
+ let (has_let_expr, does_not_have_bin_op) = check_let_expr(&cond);
+ if has_let_expr {
+ if does_not_have_bin_op {
+ // Remove the last feature gating of a `let` expression since it's stable.
+ this.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
+ }
+ let span = if_span.to(cond.span);
+ this.sess.gated_spans.gate(sym::if_let_guard, span);
+ }
+ Some(cond)
+ } else {
+ None
+ };
+ let arrow_span = this.token.span;
+ if let Err(mut err) = this.expect(&token::FatArrow) {
+ // We might have a `=>` -> `=` or `->` typo (issue #89396).
+ if TokenKind::FatArrow
+ .similar_tokens()
+ .map_or(false, |similar_tokens| similar_tokens.contains(&this.token.kind))
+ {
+ err.span_suggestion(
+ this.token.span,
+ "try using a fat arrow here",
+ "=>",
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+ this.bump();
+ } else {
+ return Err(err);
+ }
+ }
+ let arm_start_span = this.token.span;
+
+ let expr = this.parse_expr_res(Restrictions::STMT_EXPR, None).map_err(|mut err| {
+ err.span_label(arrow_span, "while parsing the `match` arm starting here");
+ err
+ })?;
+
+ let require_comma = classify::expr_requires_semi_to_be_stmt(&expr)
+ && this.token != token::CloseDelim(Delimiter::Brace);
+
+ let hi = this.prev_token.span;
+
+ if require_comma {
+ let sm = this.sess.source_map();
+ if let Some(body) = this.parse_arm_body_missing_braces(&expr, arrow_span) {
+ let span = body.span;
+ return Ok((
+ ast::Arm {
+ attrs: attrs.into(),
+ pat,
+ guard,
+ body,
+ span,
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ },
+ TrailingToken::None,
+ ));
+ }
+ this.expect_one_of(&[token::Comma], &[token::CloseDelim(Delimiter::Brace)])
+ .or_else(|mut err| {
+ if this.token == token::FatArrow {
+ if let Ok(expr_lines) = sm.span_to_lines(expr.span)
+ && let Ok(arm_start_lines) = sm.span_to_lines(arm_start_span)
+ && arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col
+ && expr_lines.lines.len() == 2
+ {
+ // We check whether there's any trailing code in the parse span,
+ // if there isn't, we very likely have the following:
+ //
+ // X | &Y => "y"
+ // | -- - missing comma
+ // | |
+ // | arrow_span
+ // X | &X => "x"
+ // | - ^^ self.token.span
+ // | |
+ // | parsed until here as `"y" & X`
+ err.span_suggestion_short(
+ arm_start_span.shrink_to_hi(),
+ "missing a comma here to end this `match` arm",
+ ",",
+ Applicability::MachineApplicable,
+ );
+ return Err(err);
+ }
+ } else {
+ // FIXME(compiler-errors): We could also recover `; PAT =>` here
+
+ // Try to parse a following `PAT =>`, if successful
+ // then we should recover.
+ let mut snapshot = this.create_snapshot_for_diagnostic();
+ let pattern_follows = snapshot
+ .parse_pat_allow_top_alt(
+ None,
+ RecoverComma::Yes,
+ RecoverColon::Yes,
+ CommaRecoveryMode::EitherTupleOrPipe,
+ )
+ .map_err(|err| err.cancel())
+ .is_ok();
+ if pattern_follows && snapshot.check(&TokenKind::FatArrow) {
+ err.cancel();
+ this.struct_span_err(
+ hi.shrink_to_hi(),
+ "expected `,` following `match` arm",
+ )
+ .span_suggestion(
+ hi.shrink_to_hi(),
+ "missing a comma here to end this `match` arm",
+ ",",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ return Ok(true);
+ }
+ }
+ err.span_label(arrow_span, "while parsing the `match` arm starting here");
+ Err(err)
+ })?;
+ } else {
+ this.eat(&token::Comma);
+ }
+
+ Ok((
+ ast::Arm {
+ attrs: attrs.into(),
+ pat,
+ guard,
+ body: expr,
+ span: lo.to(hi),
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ },
+ TrailingToken::None,
+ ))
+ })
+ }
+
+ /// Parses a `try {...}` expression (`try` token already eaten).
+ fn parse_try_block(&mut self, span_lo: Span, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let (iattrs, body) = self.parse_inner_attrs_and_block()?;
+ attrs.extend(iattrs);
+ if self.eat_keyword(kw::Catch) {
+ let mut error = self.struct_span_err(
+ self.prev_token.span,
+ "keyword `catch` cannot follow a `try` block",
+ );
+ error.help("try using `match` on the result of the `try` block instead");
+ error.emit();
+ Err(error)
+ } else {
+ let span = span_lo.to(body.span);
+ self.sess.gated_spans.gate(sym::try_blocks, span);
+ Ok(self.mk_expr(span, ExprKind::TryBlock(body), attrs))
+ }
+ }
+
+ fn is_do_catch_block(&self) -> bool {
+ self.token.is_keyword(kw::Do)
+ && self.is_keyword_ahead(1, &[kw::Catch])
+ && self.look_ahead(2, |t| *t == token::OpenDelim(Delimiter::Brace))
+ && !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
+ }
+
+ fn is_do_yeet(&self) -> bool {
+ self.token.is_keyword(kw::Do) && self.is_keyword_ahead(1, &[kw::Yeet])
+ }
+
+ fn is_try_block(&self) -> bool {
+ self.token.is_keyword(kw::Try)
+ && self.look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Brace))
+ && self.token.uninterpolated_span().rust_2018()
+ }
+
+ /// Parses an `async move? {...}` expression.
+ fn parse_async_block(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
+ let lo = self.token.span;
+ self.expect_keyword(kw::Async)?;
+ let capture_clause = self.parse_capture_clause()?;
+ let (iattrs, body) = self.parse_inner_attrs_and_block()?;
+ attrs.extend(iattrs);
+ let kind = ExprKind::Async(capture_clause, DUMMY_NODE_ID, body);
+ Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
+ }
+
+ fn is_async_block(&self) -> bool {
+ self.token.is_keyword(kw::Async)
+ && ((
+ // `async move {`
+ self.is_keyword_ahead(1, &[kw::Move])
+ && self.look_ahead(2, |t| *t == token::OpenDelim(Delimiter::Brace))
+ ) || (
+ // `async {`
+ self.look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Brace))
+ ))
+ }
+
+ fn is_certainly_not_a_block(&self) -> bool {
+ self.look_ahead(1, |t| t.is_ident())
+ && (
+ // `{ ident, ` cannot start a block.
+ self.look_ahead(2, |t| t == &token::Comma)
+ || self.look_ahead(2, |t| t == &token::Colon)
+ && (
+ // `{ ident: token, ` cannot start a block.
+ self.look_ahead(4, |t| t == &token::Comma) ||
+ // `{ ident: ` cannot start a block unless it's a type ascription `ident: Type`.
+ self.look_ahead(3, |t| !t.can_begin_type())
+ )
+ )
+ }
+
+ fn maybe_parse_struct_expr(
+ &mut self,
+ qself: Option<&ast::QSelf>,
+ path: &ast::Path,
+ attrs: &AttrVec,
+ ) -> Option<PResult<'a, P<Expr>>> {
+ let struct_allowed = !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL);
+ if struct_allowed || self.is_certainly_not_a_block() {
+ if let Err(err) = self.expect(&token::OpenDelim(Delimiter::Brace)) {
+ return Some(Err(err));
+ }
+ let expr = self.parse_struct_expr(qself.cloned(), path.clone(), attrs.clone(), true);
+ if let (Ok(expr), false) = (&expr, struct_allowed) {
+ // This is a struct literal, but we don't can't accept them here.
+ self.error_struct_lit_not_allowed_here(path.span, expr.span);
+ }
+ return Some(expr);
+ }
+ None
+ }
+
+ fn error_struct_lit_not_allowed_here(&self, lo: Span, sp: Span) {
+ self.struct_span_err(sp, "struct literals are not allowed here")
+ .multipart_suggestion(
+ "surround the struct literal with parentheses",
+ vec![(lo.shrink_to_lo(), "(".to_string()), (sp.shrink_to_hi(), ")".to_string())],
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ pub(super) fn parse_struct_fields(
+ &mut self,
+ pth: ast::Path,
+ recover: bool,
+ close_delim: Delimiter,
+ ) -> PResult<'a, (Vec<ExprField>, ast::StructRest, bool)> {
+ let mut fields = Vec::new();
+ let mut base = ast::StructRest::None;
+ let mut recover_async = false;
+
+ let mut async_block_err = |e: &mut Diagnostic, span: Span| {
+ recover_async = true;
+ e.span_label(span, "`async` blocks are only allowed in Rust 2018 or later");
+ e.help_use_latest_edition();
+ };
+
+ while self.token != token::CloseDelim(close_delim) {
+ if self.eat(&token::DotDot) {
+ let exp_span = self.prev_token.span;
+ // We permit `.. }` on the left-hand side of a destructuring assignment.
+ if self.check(&token::CloseDelim(close_delim)) {
+ base = ast::StructRest::Rest(self.prev_token.span.shrink_to_hi());
+ break;
+ }
+ match self.parse_expr() {
+ Ok(e) => base = ast::StructRest::Base(e),
+ Err(mut e) if recover => {
+ e.emit();
+ self.recover_stmt();
+ }
+ Err(e) => return Err(e),
+ }
+ self.recover_struct_comma_after_dotdot(exp_span);
+ break;
+ }
+
+ let recovery_field = self.find_struct_error_after_field_looking_code();
+ let parsed_field = match self.parse_expr_field() {
+ Ok(f) => Some(f),
+ Err(mut e) => {
+ if pth == kw::Async {
+ async_block_err(&mut e, pth.span);
+ } else {
+ e.span_label(pth.span, "while parsing this struct");
+ }
+ e.emit();
+
+ // If the next token is a comma, then try to parse
+ // what comes next as additional fields, rather than
+ // bailing out until next `}`.
+ if self.token != token::Comma {
+ self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore);
+ if self.token != token::Comma {
+ break;
+ }
+ }
+ None
+ }
+ };
+
+ let is_shorthand = parsed_field.as_ref().map_or(false, |f| f.is_shorthand);
+ // A shorthand field can be turned into a full field with `:`.
+ // We should point this out.
+ self.check_or_expected(!is_shorthand, TokenType::Token(token::Colon));
+
+ match self.expect_one_of(&[token::Comma], &[token::CloseDelim(close_delim)]) {
+ Ok(_) => {
+ if let Some(f) = parsed_field.or(recovery_field) {
+ // Only include the field if there's no parse error for the field name.
+ fields.push(f);
+ }
+ }
+ Err(mut e) => {
+ if pth == kw::Async {
+ async_block_err(&mut e, pth.span);
+ } else {
+ e.span_label(pth.span, "while parsing this struct");
+ if let Some(f) = recovery_field {
+ fields.push(f);
+ e.span_suggestion(
+ self.prev_token.span.shrink_to_hi(),
+ "try adding a comma",
+ ",",
+ Applicability::MachineApplicable,
+ );
+ } else if is_shorthand
+ && (AssocOp::from_token(&self.token).is_some()
+ || matches!(&self.token.kind, token::OpenDelim(_))
+ || self.token.kind == token::Dot)
+ {
+ // Looks like they tried to write a shorthand, complex expression.
+ let ident = parsed_field.expect("is_shorthand implies Some").ident;
+ e.span_suggestion(
+ ident.span.shrink_to_lo(),
+ "try naming a field",
+ &format!("{ident}: "),
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ if !recover {
+ return Err(e);
+ }
+ e.emit();
+ self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore);
+ self.eat(&token::Comma);
+ }
+ }
+ }
+ Ok((fields, base, recover_async))
+ }
+
+ /// Precondition: already parsed the '{'.
+ pub(super) fn parse_struct_expr(
+ &mut self,
+ qself: Option<ast::QSelf>,
+ pth: ast::Path,
+ attrs: AttrVec,
+ recover: bool,
+ ) -> PResult<'a, P<Expr>> {
+ let lo = pth.span;
+ let (fields, base, recover_async) =
+ self.parse_struct_fields(pth.clone(), recover, Delimiter::Brace)?;
+ let span = lo.to(self.token.span);
+ self.expect(&token::CloseDelim(Delimiter::Brace))?;
+ let expr = if recover_async {
+ ExprKind::Err
+ } else {
+ ExprKind::Struct(P(ast::StructExpr { qself, path: pth, fields, rest: base }))
+ };
+ Ok(self.mk_expr(span, expr, attrs))
+ }
+
+ /// Use in case of error after field-looking code: `S { foo: () with a }`.
+ fn find_struct_error_after_field_looking_code(&self) -> Option<ExprField> {
+ match self.token.ident() {
+ Some((ident, is_raw))
+ if (is_raw || !ident.is_reserved())
+ && self.look_ahead(1, |t| *t == token::Colon) =>
+ {
+ Some(ast::ExprField {
+ ident,
+ span: self.token.span,
+ expr: self.mk_expr_err(self.token.span),
+ is_shorthand: false,
+ attrs: AttrVec::new(),
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ })
+ }
+ _ => None,
+ }
+ }
+
+ fn recover_struct_comma_after_dotdot(&mut self, span: Span) {
+ if self.token != token::Comma {
+ return;
+ }
+ self.struct_span_err(
+ span.to(self.prev_token.span),
+ "cannot use a comma after the base struct",
+ )
+ .span_suggestion_short(
+ self.token.span,
+ "remove this comma",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .note("the base struct must always be the last field")
+ .emit();
+ self.recover_stmt();
+ }
+
+ /// Parses `ident (COLON expr)?`.
+ fn parse_expr_field(&mut self) -> PResult<'a, ExprField> {
+ let attrs = self.parse_outer_attributes()?;
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let lo = this.token.span;
+
+ // Check if a colon exists one ahead. This means we're parsing a fieldname.
+ let is_shorthand = !this.look_ahead(1, |t| t == &token::Colon || t == &token::Eq);
+ let (ident, expr) = if is_shorthand {
+ // Mimic `x: x` for the `x` field shorthand.
+ let ident = this.parse_ident_common(false)?;
+ let path = ast::Path::from_ident(ident);
+ (ident, this.mk_expr(ident.span, ExprKind::Path(None, path), AttrVec::new()))
+ } else {
+ let ident = this.parse_field_name()?;
+ this.error_on_eq_field_init(ident);
+ this.bump(); // `:`
+ (ident, this.parse_expr()?)
+ };
+
+ Ok((
+ ast::ExprField {
+ ident,
+ span: lo.to(expr.span),
+ expr,
+ is_shorthand,
+ attrs: attrs.into(),
+ id: DUMMY_NODE_ID,
+ is_placeholder: false,
+ },
+ TrailingToken::MaybeComma,
+ ))
+ })
+ }
+
+ /// Check for `=`. This means the source incorrectly attempts to
+ /// initialize a field with an eq rather than a colon.
+ fn error_on_eq_field_init(&self, field_name: Ident) {
+ if self.token != token::Eq {
+ return;
+ }
+
+ self.struct_span_err(self.token.span, "expected `:`, found `=`")
+ .span_suggestion(
+ field_name.span.shrink_to_hi().to(self.token.span),
+ "replace equals symbol with a colon",
+ ":",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ fn err_dotdotdot_syntax(&self, span: Span) {
+ self.struct_span_err(span, "unexpected token: `...`")
+ .span_suggestion(
+ span,
+ "use `..` for an exclusive range",
+ "..",
+ Applicability::MaybeIncorrect,
+ )
+ .span_suggestion(
+ span,
+ "or `..=` for an inclusive range",
+ "..=",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+
+ fn err_larrow_operator(&self, span: Span) {
+ self.struct_span_err(span, "unexpected token: `<-`")
+ .span_suggestion(
+ span,
+ "if you meant to write a comparison against a negative value, add a \
+ space in between `<` and `-`",
+ "< -",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+
+ fn mk_assign_op(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind {
+ ExprKind::AssignOp(binop, lhs, rhs)
+ }
+
+ fn mk_range(
+ &mut self,
+ start: Option<P<Expr>>,
+ end: Option<P<Expr>>,
+ limits: RangeLimits,
+ ) -> ExprKind {
+ if end.is_none() && limits == RangeLimits::Closed {
+ self.inclusive_range_with_incorrect_end(self.prev_token.span);
+ ExprKind::Err
+ } else {
+ ExprKind::Range(start, end, limits)
+ }
+ }
+
+ fn mk_unary(&self, unop: UnOp, expr: P<Expr>) -> ExprKind {
+ ExprKind::Unary(unop, expr)
+ }
+
+ fn mk_binary(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind {
+ ExprKind::Binary(binop, lhs, rhs)
+ }
+
+ fn mk_index(&self, expr: P<Expr>, idx: P<Expr>) -> ExprKind {
+ ExprKind::Index(expr, idx)
+ }
+
+ fn mk_call(&self, f: P<Expr>, args: Vec<P<Expr>>) -> ExprKind {
+ ExprKind::Call(f, args)
+ }
+
+ fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> P<Expr> {
+ let span = lo.to(self.prev_token.span);
+ let await_expr = self.mk_expr(span, ExprKind::Await(self_arg), AttrVec::new());
+ self.recover_from_await_method_call();
+ await_expr
+ }
+
+ pub(crate) fn mk_expr(&self, span: Span, kind: ExprKind, attrs: AttrVec) -> P<Expr> {
+ P(Expr { kind, span, attrs, id: DUMMY_NODE_ID, tokens: None })
+ }
+
+ pub(super) fn mk_expr_err(&self, span: Span) -> P<Expr> {
+ self.mk_expr(span, ExprKind::Err, AttrVec::new())
+ }
+
+ /// Create expression span ensuring the span of the parent node
+ /// is larger than the span of lhs and rhs, including the attributes.
+ fn mk_expr_sp(&self, lhs: &P<Expr>, lhs_span: Span, rhs_span: Span) -> Span {
+ lhs.attrs
+ .iter()
+ .find(|a| a.style == AttrStyle::Outer)
+ .map_or(lhs_span, |a| a.span)
+ .to(rhs_span)
+ }
+
+ fn collect_tokens_for_expr(
+ &mut self,
+ attrs: AttrWrapper,
+ f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, P<Expr>>,
+ ) -> PResult<'a, P<Expr>> {
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let res = f(this, attrs)?;
+ let trailing = if this.restrictions.contains(Restrictions::STMT_EXPR)
+ && this.token.kind == token::Semi
+ {
+ TrailingToken::Semi
+ } else {
+ // FIXME - pass this through from the place where we know
+ // we need a comma, rather than assuming that `#[attr] expr,`
+ // always captures a trailing comma
+ TrailingToken::MaybeComma
+ };
+ Ok((res, trailing))
+ })
+ }
+}
diff --git a/compiler/rustc_parse/src/parser/generics.rs b/compiler/rustc_parse/src/parser/generics.rs
new file mode 100644
index 000000000..1acfd93d8
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/generics.rs
@@ -0,0 +1,350 @@
+use super::{ForceCollect, Parser, TrailingToken};
+
+use rustc_ast::token;
+use rustc_ast::{
+ self as ast, Attribute, GenericBounds, GenericParam, GenericParamKind, WhereClause,
+};
+use rustc_errors::{Applicability, PResult};
+use rustc_span::symbol::kw;
+
+impl<'a> Parser<'a> {
+ /// Parses bounds of a lifetime parameter `BOUND + BOUND + BOUND`, possibly with trailing `+`.
+ ///
+ /// ```text
+ /// BOUND = LT_BOUND (e.g., `'a`)
+ /// ```
+ fn parse_lt_param_bounds(&mut self) -> GenericBounds {
+ let mut lifetimes = Vec::new();
+ while self.check_lifetime() {
+ lifetimes.push(ast::GenericBound::Outlives(self.expect_lifetime()));
+
+ if !self.eat_plus() {
+ break;
+ }
+ }
+ lifetimes
+ }
+
+ /// Matches `typaram = IDENT (`?` unbound)? optbounds ( EQ ty )?`.
+ fn parse_ty_param(&mut self, preceding_attrs: Vec<Attribute>) -> PResult<'a, GenericParam> {
+ let ident = self.parse_ident()?;
+
+ // Parse optional colon and param bounds.
+ let mut colon_span = None;
+ let bounds = if self.eat(&token::Colon) {
+ colon_span = Some(self.prev_token.span);
+ self.parse_generic_bounds(colon_span)?
+ } else {
+ Vec::new()
+ };
+
+ let default = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None };
+
+ Ok(GenericParam {
+ ident,
+ id: ast::DUMMY_NODE_ID,
+ attrs: preceding_attrs.into(),
+ bounds,
+ kind: GenericParamKind::Type { default },
+ is_placeholder: false,
+ colon_span,
+ })
+ }
+
+ pub(crate) fn parse_const_param(
+ &mut self,
+ preceding_attrs: Vec<Attribute>,
+ ) -> PResult<'a, GenericParam> {
+ let const_span = self.token.span;
+
+ self.expect_keyword(kw::Const)?;
+ let ident = self.parse_ident()?;
+ self.expect(&token::Colon)?;
+ let ty = self.parse_ty()?;
+
+ // Parse optional const generics default value.
+ let default = if self.eat(&token::Eq) { Some(self.parse_const_arg()?) } else { None };
+
+ Ok(GenericParam {
+ ident,
+ id: ast::DUMMY_NODE_ID,
+ attrs: preceding_attrs.into(),
+ bounds: Vec::new(),
+ kind: GenericParamKind::Const { ty, kw_span: const_span, default },
+ is_placeholder: false,
+ colon_span: None,
+ })
+ }
+
+ /// Parses a (possibly empty) list of lifetime and type parameters, possibly including
+ /// a trailing comma and erroneous trailing attributes.
+ pub(super) fn parse_generic_params(&mut self) -> PResult<'a, Vec<ast::GenericParam>> {
+ let mut params = Vec::new();
+ let mut done = false;
+ while !done {
+ let attrs = self.parse_outer_attributes()?;
+ let param =
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ if this.eat_keyword_noexpect(kw::SelfUpper) {
+ // `Self` as a generic param is invalid. Here we emit the diagnostic and continue parsing
+ // as if `Self` never existed.
+ this.struct_span_err(
+ this.prev_token.span,
+ "unexpected keyword `Self` in generic parameters",
+ )
+ .note("you cannot use `Self` as a generic parameter because it is reserved for associated items")
+ .emit();
+
+ this.eat(&token::Comma);
+ }
+
+ let param = if this.check_lifetime() {
+ let lifetime = this.expect_lifetime();
+ // Parse lifetime parameter.
+ let (colon_span, bounds) = if this.eat(&token::Colon) {
+ (Some(this.prev_token.span), this.parse_lt_param_bounds())
+ } else {
+ (None, Vec::new())
+ };
+ Some(ast::GenericParam {
+ ident: lifetime.ident,
+ id: lifetime.id,
+ attrs: attrs.into(),
+ bounds,
+ kind: ast::GenericParamKind::Lifetime,
+ is_placeholder: false,
+ colon_span,
+ })
+ } else if this.check_keyword(kw::Const) {
+ // Parse const parameter.
+ Some(this.parse_const_param(attrs)?)
+ } else if this.check_ident() {
+ // Parse type parameter.
+ Some(this.parse_ty_param(attrs)?)
+ } else if this.token.can_begin_type() {
+ // Trying to write an associated type bound? (#26271)
+ let snapshot = this.create_snapshot_for_diagnostic();
+ match this.parse_ty_where_predicate() {
+ Ok(where_predicate) => {
+ this.struct_span_err(
+ where_predicate.span(),
+ "bounds on associated types do not belong here",
+ )
+ .span_label(where_predicate.span(), "belongs in `where` clause")
+ .emit();
+ // FIXME - try to continue parsing other generics?
+ return Ok((None, TrailingToken::None));
+ }
+ Err(err) => {
+ err.cancel();
+ // FIXME - maybe we should overwrite 'self' outside of `collect_tokens`?
+ this.restore_snapshot(snapshot);
+ return Ok((None, TrailingToken::None));
+ }
+ }
+ } else {
+ // Check for trailing attributes and stop parsing.
+ if !attrs.is_empty() {
+ if !params.is_empty() {
+ this.struct_span_err(
+ attrs[0].span,
+ "trailing attribute after generic parameter",
+ )
+ .span_label(attrs[0].span, "attributes must go before parameters")
+ .emit();
+ } else {
+ this.struct_span_err(
+ attrs[0].span,
+ "attribute without generic parameters",
+ )
+ .span_label(
+ attrs[0].span,
+ "attributes are only permitted when preceding parameters",
+ )
+ .emit();
+ }
+ }
+ return Ok((None, TrailingToken::None));
+ };
+
+ if !this.eat(&token::Comma) {
+ done = true;
+ }
+ // We just ate the comma, so no need to use `TrailingToken`
+ Ok((param, TrailingToken::None))
+ })?;
+
+ if let Some(param) = param {
+ params.push(param);
+ } else {
+ break;
+ }
+ }
+ Ok(params)
+ }
+
+ /// Parses a set of optional generic type parameter declarations. Where
+ /// clauses are not parsed here, and must be added later via
+ /// `parse_where_clause()`.
+ ///
+ /// matches generics = ( ) | ( < > ) | ( < typaramseq ( , )? > ) | ( < lifetimes ( , )? > )
+ /// | ( < lifetimes , typaramseq ( , )? > )
+ /// where typaramseq = ( typaram ) | ( typaram , typaramseq )
+ pub(super) fn parse_generics(&mut self) -> PResult<'a, ast::Generics> {
+ let span_lo = self.token.span;
+ let (params, span) = if self.eat_lt() {
+ let params = self.parse_generic_params()?;
+ self.expect_gt()?;
+ (params, span_lo.to(self.prev_token.span))
+ } else {
+ (vec![], self.prev_token.span.shrink_to_hi())
+ };
+ Ok(ast::Generics {
+ params,
+ where_clause: WhereClause {
+ has_where_token: false,
+ predicates: Vec::new(),
+ span: self.prev_token.span.shrink_to_hi(),
+ },
+ span,
+ })
+ }
+
+ /// Parses an optional where-clause and places it in `generics`.
+ ///
+ /// ```ignore (only-for-syntax-highlight)
+ /// where T : Trait<U, V> + 'b, 'a : 'b
+ /// ```
+ pub(super) fn parse_where_clause(&mut self) -> PResult<'a, WhereClause> {
+ let mut where_clause = WhereClause {
+ has_where_token: false,
+ predicates: Vec::new(),
+ span: self.prev_token.span.shrink_to_hi(),
+ };
+
+ if !self.eat_keyword(kw::Where) {
+ return Ok(where_clause);
+ }
+ where_clause.has_where_token = true;
+ let lo = self.prev_token.span;
+
+ // We are considering adding generics to the `where` keyword as an alternative higher-rank
+ // parameter syntax (as in `where<'a>` or `where<T>`. To avoid that being a breaking
+ // change we parse those generics now, but report an error.
+ if self.choose_generics_over_qpath(0) {
+ let generics = self.parse_generics()?;
+ self.struct_span_err(
+ generics.span,
+ "generic parameters on `where` clauses are reserved for future use",
+ )
+ .span_label(generics.span, "currently unsupported")
+ .emit();
+ }
+
+ loop {
+ let lo = self.token.span;
+ if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) {
+ let lifetime = self.expect_lifetime();
+ // Bounds starting with a colon are mandatory, but possibly empty.
+ self.expect(&token::Colon)?;
+ let bounds = self.parse_lt_param_bounds();
+ where_clause.predicates.push(ast::WherePredicate::RegionPredicate(
+ ast::WhereRegionPredicate {
+ span: lo.to(self.prev_token.span),
+ lifetime,
+ bounds,
+ },
+ ));
+ } else if self.check_type() {
+ where_clause.predicates.push(self.parse_ty_where_predicate()?);
+ } else {
+ break;
+ }
+
+ let prev_token = self.prev_token.span;
+ let ate_comma = self.eat(&token::Comma);
+
+ if self.eat_keyword_noexpect(kw::Where) {
+ let msg = "cannot define duplicate `where` clauses on an item";
+ let mut err = self.struct_span_err(self.token.span, msg);
+ err.span_label(lo, "previous `where` clause starts here");
+ err.span_suggestion_verbose(
+ prev_token.shrink_to_hi().to(self.prev_token.span),
+ "consider joining the two `where` clauses into one",
+ ",",
+ Applicability::MaybeIncorrect,
+ );
+ err.emit();
+ } else if !ate_comma {
+ break;
+ }
+ }
+
+ where_clause.span = lo.to(self.prev_token.span);
+ Ok(where_clause)
+ }
+
+ fn parse_ty_where_predicate(&mut self) -> PResult<'a, ast::WherePredicate> {
+ let lo = self.token.span;
+ // Parse optional `for<'a, 'b>`.
+ // This `for` is parsed greedily and applies to the whole predicate,
+ // the bounded type can have its own `for` applying only to it.
+ // Examples:
+ // * `for<'a> Trait1<'a>: Trait2<'a /* ok */>`
+ // * `(for<'a> Trait1<'a>): Trait2<'a /* not ok */>`
+ // * `for<'a> for<'b> Trait1<'a, 'b>: Trait2<'a /* ok */, 'b /* not ok */>`
+ let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
+
+ // Parse type with mandatory colon and (possibly empty) bounds,
+ // or with mandatory equality sign and the second type.
+ let ty = self.parse_ty_for_where_clause()?;
+ if self.eat(&token::Colon) {
+ let bounds = self.parse_generic_bounds(Some(self.prev_token.span))?;
+ Ok(ast::WherePredicate::BoundPredicate(ast::WhereBoundPredicate {
+ span: lo.to(self.prev_token.span),
+ bound_generic_params: lifetime_defs,
+ bounded_ty: ty,
+ bounds,
+ }))
+ // FIXME: Decide what should be used here, `=` or `==`.
+ // FIXME: We are just dropping the binders in lifetime_defs on the floor here.
+ } else if self.eat(&token::Eq) || self.eat(&token::EqEq) {
+ let rhs_ty = self.parse_ty()?;
+ Ok(ast::WherePredicate::EqPredicate(ast::WhereEqPredicate {
+ span: lo.to(self.prev_token.span),
+ lhs_ty: ty,
+ rhs_ty,
+ id: ast::DUMMY_NODE_ID,
+ }))
+ } else {
+ self.maybe_recover_bounds_doubled_colon(&ty)?;
+ self.unexpected()
+ }
+ }
+
+ pub(super) fn choose_generics_over_qpath(&self, start: usize) -> bool {
+ // There's an ambiguity between generic parameters and qualified paths in impls.
+ // If we see `<` it may start both, so we have to inspect some following tokens.
+ // The following combinations can only start generics,
+ // but not qualified paths (with one exception):
+ // `<` `>` - empty generic parameters
+ // `<` `#` - generic parameters with attributes
+ // `<` (LIFETIME|IDENT) `>` - single generic parameter
+ // `<` (LIFETIME|IDENT) `,` - first generic parameter in a list
+ // `<` (LIFETIME|IDENT) `:` - generic parameter with bounds
+ // `<` (LIFETIME|IDENT) `=` - generic parameter with a default
+ // `<` const - generic const parameter
+ // The only truly ambiguous case is
+ // `<` IDENT `>` `::` IDENT ...
+ // we disambiguate it in favor of generics (`impl<T> ::absolute::Path<T> { ... }`)
+ // because this is what almost always expected in practice, qualified paths in impls
+ // (`impl <Type>::AssocTy { ... }`) aren't even allowed by type checker at the moment.
+ self.look_ahead(start, |t| t == &token::Lt)
+ && (self.look_ahead(start + 1, |t| t == &token::Pound || t == &token::Gt)
+ || self.look_ahead(start + 1, |t| t.is_lifetime() || t.is_ident())
+ && self.look_ahead(start + 2, |t| {
+ matches!(t.kind, token::Gt | token::Comma | token::Colon | token::Eq)
+ })
+ || self.is_keyword_ahead(start + 1, &[kw::Const]))
+ }
+}
diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs
new file mode 100644
index 000000000..567072925
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/item.rs
@@ -0,0 +1,2426 @@
+use super::diagnostics::{dummy_arg, ConsumeClosingDelim, Error};
+use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
+use super::{AttrWrapper, FollowedByType, ForceCollect, Parser, PathStyle, TrailingToken};
+
+use rustc_ast::ast::*;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, TokenKind};
+use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree};
+use rustc_ast::{self as ast, AttrVec, Attribute, DUMMY_NODE_ID};
+use rustc_ast::{Async, Const, Defaultness, IsAuto, Mutability, Unsafe, UseTree, UseTreeKind};
+use rustc_ast::{BindingMode, Block, FnDecl, FnSig, Param, SelfKind};
+use rustc_ast::{EnumDef, FieldDef, Generics, TraitRef, Ty, TyKind, Variant, VariantData};
+use rustc_ast::{FnHeader, ForeignItem, Path, PathSegment, Visibility, VisibilityKind};
+use rustc_ast::{MacArgs, MacCall, MacDelimiter};
+use rustc_ast_pretty::pprust;
+use rustc_errors::{struct_span_err, Applicability, PResult, StashKey};
+use rustc_span::edition::Edition;
+use rustc_span::lev_distance::lev_distance;
+use rustc_span::source_map::{self, Span};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::DUMMY_SP;
+
+use std::convert::TryFrom;
+use std::mem;
+use tracing::debug;
+
+impl<'a> Parser<'a> {
+ /// Parses a source module as a crate. This is the main entry point for the parser.
+ pub fn parse_crate_mod(&mut self) -> PResult<'a, ast::Crate> {
+ let (attrs, items, spans) = self.parse_mod(&token::Eof)?;
+ Ok(ast::Crate { attrs, items, spans, id: DUMMY_NODE_ID, is_placeholder: false })
+ }
+
+ /// Parses a `mod <foo> { ... }` or `mod <foo>;` item.
+ fn parse_item_mod(&mut self, attrs: &mut Vec<Attribute>) -> PResult<'a, ItemInfo> {
+ let unsafety = self.parse_unsafety();
+ self.expect_keyword(kw::Mod)?;
+ let id = self.parse_ident()?;
+ let mod_kind = if self.eat(&token::Semi) {
+ ModKind::Unloaded
+ } else {
+ self.expect(&token::OpenDelim(Delimiter::Brace))?;
+ let (mut inner_attrs, items, inner_span) =
+ self.parse_mod(&token::CloseDelim(Delimiter::Brace))?;
+ attrs.append(&mut inner_attrs);
+ ModKind::Loaded(items, Inline::Yes, inner_span)
+ };
+ Ok((id, ItemKind::Mod(unsafety, mod_kind)))
+ }
+
+ /// Parses the contents of a module (inner attributes followed by module items).
+ pub fn parse_mod(
+ &mut self,
+ term: &TokenKind,
+ ) -> PResult<'a, (Vec<Attribute>, Vec<P<Item>>, ModSpans)> {
+ let lo = self.token.span;
+ let attrs = self.parse_inner_attributes()?;
+
+ let post_attr_lo = self.token.span;
+ let mut items = vec![];
+ while let Some(item) = self.parse_item(ForceCollect::No)? {
+ items.push(item);
+ self.maybe_consume_incorrect_semicolon(&items);
+ }
+
+ if !self.eat(term) {
+ let token_str = super::token_descr(&self.token);
+ if !self.maybe_consume_incorrect_semicolon(&items) {
+ let msg = &format!("expected item, found {token_str}");
+ let mut err = self.struct_span_err(self.token.span, msg);
+ err.span_label(self.token.span, "expected item");
+ return Err(err);
+ }
+ }
+
+ let inject_use_span = post_attr_lo.data().with_hi(post_attr_lo.lo());
+ let mod_spans = ModSpans { inner_span: lo.to(self.prev_token.span), inject_use_span };
+ Ok((attrs, items, mod_spans))
+ }
+}
+
+pub(super) type ItemInfo = (Ident, ItemKind);
+
+impl<'a> Parser<'a> {
+ pub fn parse_item(&mut self, force_collect: ForceCollect) -> PResult<'a, Option<P<Item>>> {
+ let fn_parse_mode = FnParseMode { req_name: |_| true, req_body: true };
+ self.parse_item_(fn_parse_mode, force_collect).map(|i| i.map(P))
+ }
+
+ fn parse_item_(
+ &mut self,
+ fn_parse_mode: FnParseMode,
+ force_collect: ForceCollect,
+ ) -> PResult<'a, Option<Item>> {
+ let attrs = self.parse_outer_attributes()?;
+ self.parse_item_common(attrs, true, false, fn_parse_mode, force_collect)
+ }
+
+ pub(super) fn parse_item_common(
+ &mut self,
+ attrs: AttrWrapper,
+ mac_allowed: bool,
+ attrs_allowed: bool,
+ fn_parse_mode: FnParseMode,
+ force_collect: ForceCollect,
+ ) -> PResult<'a, Option<Item>> {
+ // Don't use `maybe_whole` so that we have precise control
+ // over when we bump the parser
+ if let token::Interpolated(nt) = &self.token.kind && let token::NtItem(item) = &**nt {
+ let mut item = item.clone();
+ self.bump();
+
+ attrs.prepend_to_nt_inner(&mut item.attrs);
+ return Ok(Some(item.into_inner()));
+ };
+
+ let mut unclosed_delims = vec![];
+ let item =
+ self.collect_tokens_trailing_token(attrs, force_collect, |this: &mut Self, attrs| {
+ let item =
+ this.parse_item_common_(attrs, mac_allowed, attrs_allowed, fn_parse_mode);
+ unclosed_delims.append(&mut this.unclosed_delims);
+ Ok((item?, TrailingToken::None))
+ })?;
+
+ self.unclosed_delims.append(&mut unclosed_delims);
+ Ok(item)
+ }
+
+ fn parse_item_common_(
+ &mut self,
+ mut attrs: Vec<Attribute>,
+ mac_allowed: bool,
+ attrs_allowed: bool,
+ fn_parse_mode: FnParseMode,
+ ) -> PResult<'a, Option<Item>> {
+ let lo = self.token.span;
+ let vis = self.parse_visibility(FollowedByType::No)?;
+ let mut def = self.parse_defaultness();
+ let kind =
+ self.parse_item_kind(&mut attrs, mac_allowed, lo, &vis, &mut def, fn_parse_mode)?;
+ if let Some((ident, kind)) = kind {
+ self.error_on_unconsumed_default(def, &kind);
+ let span = lo.to(self.prev_token.span);
+ let id = DUMMY_NODE_ID;
+ let item = Item { ident, attrs, id, kind, vis, span, tokens: None };
+ return Ok(Some(item));
+ }
+
+ // At this point, we have failed to parse an item.
+ self.error_on_unmatched_vis(&vis);
+ self.error_on_unmatched_defaultness(def);
+ if !attrs_allowed {
+ self.recover_attrs_no_item(&attrs)?;
+ }
+ Ok(None)
+ }
+
+ /// Error in-case a non-inherited visibility was parsed but no item followed.
+ fn error_on_unmatched_vis(&self, vis: &Visibility) {
+ if let VisibilityKind::Inherited = vis.kind {
+ return;
+ }
+ let vs = pprust::vis_to_string(&vis);
+ let vs = vs.trim_end();
+ self.struct_span_err(vis.span, &format!("visibility `{vs}` is not followed by an item"))
+ .span_label(vis.span, "the visibility")
+ .help(&format!("you likely meant to define an item, e.g., `{vs} fn foo() {{}}`"))
+ .emit();
+ }
+
+ /// Error in-case a `default` was parsed but no item followed.
+ fn error_on_unmatched_defaultness(&self, def: Defaultness) {
+ if let Defaultness::Default(sp) = def {
+ self.struct_span_err(sp, "`default` is not followed by an item")
+ .span_label(sp, "the `default` qualifier")
+ .note("only `fn`, `const`, `type`, or `impl` items may be prefixed by `default`")
+ .emit();
+ }
+ }
+
+ /// Error in-case `default` was parsed in an in-appropriate context.
+ fn error_on_unconsumed_default(&self, def: Defaultness, kind: &ItemKind) {
+ if let Defaultness::Default(span) = def {
+ let msg = format!("{} {} cannot be `default`", kind.article(), kind.descr());
+ self.struct_span_err(span, &msg)
+ .span_label(span, "`default` because of this")
+ .note("only associated `fn`, `const`, and `type` items can be `default`")
+ .emit();
+ }
+ }
+
+ /// Parses one of the items allowed by the flags.
+ fn parse_item_kind(
+ &mut self,
+ attrs: &mut Vec<Attribute>,
+ macros_allowed: bool,
+ lo: Span,
+ vis: &Visibility,
+ def: &mut Defaultness,
+ fn_parse_mode: FnParseMode,
+ ) -> PResult<'a, Option<ItemInfo>> {
+ let def_final = def == &Defaultness::Final;
+ let mut def = || mem::replace(def, Defaultness::Final);
+
+ let info = if self.eat_keyword(kw::Use) {
+ self.parse_use_item()?
+ } else if self.check_fn_front_matter(def_final) {
+ // FUNCTION ITEM
+ let (ident, sig, generics, body) = self.parse_fn(attrs, fn_parse_mode, lo, vis)?;
+ (ident, ItemKind::Fn(Box::new(Fn { defaultness: def(), sig, generics, body })))
+ } else if self.eat_keyword(kw::Extern) {
+ if self.eat_keyword(kw::Crate) {
+ // EXTERN CRATE
+ self.parse_item_extern_crate()?
+ } else {
+ // EXTERN BLOCK
+ self.parse_item_foreign_mod(attrs, Unsafe::No)?
+ }
+ } else if self.is_unsafe_foreign_mod() {
+ // EXTERN BLOCK
+ let unsafety = self.parse_unsafety();
+ self.expect_keyword(kw::Extern)?;
+ self.parse_item_foreign_mod(attrs, unsafety)?
+ } else if self.is_static_global() {
+ // STATIC ITEM
+ self.bump(); // `static`
+ let m = self.parse_mutability();
+ let (ident, ty, expr) = self.parse_item_global(Some(m))?;
+ (ident, ItemKind::Static(ty, m, expr))
+ } else if let Const::Yes(const_span) = self.parse_constness() {
+ // CONST ITEM
+ if self.token.is_keyword(kw::Impl) {
+ // recover from `const impl`, suggest `impl const`
+ self.recover_const_impl(const_span, attrs, def())?
+ } else {
+ self.recover_const_mut(const_span);
+ let (ident, ty, expr) = self.parse_item_global(None)?;
+ (ident, ItemKind::Const(def(), ty, expr))
+ }
+ } else if self.check_keyword(kw::Trait) || self.check_auto_or_unsafe_trait_item() {
+ // TRAIT ITEM
+ self.parse_item_trait(attrs, lo)?
+ } else if self.check_keyword(kw::Impl)
+ || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Impl])
+ {
+ // IMPL ITEM
+ self.parse_item_impl(attrs, def())?
+ } else if self.check_keyword(kw::Mod)
+ || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Mod])
+ {
+ // MODULE ITEM
+ self.parse_item_mod(attrs)?
+ } else if self.eat_keyword(kw::Type) {
+ // TYPE ITEM
+ self.parse_type_alias(def())?
+ } else if self.eat_keyword(kw::Enum) {
+ // ENUM ITEM
+ self.parse_item_enum()?
+ } else if self.eat_keyword(kw::Struct) {
+ // STRUCT ITEM
+ self.parse_item_struct()?
+ } else if self.is_kw_followed_by_ident(kw::Union) {
+ // UNION ITEM
+ self.bump(); // `union`
+ self.parse_item_union()?
+ } else if self.eat_keyword(kw::Macro) {
+ // MACROS 2.0 ITEM
+ self.parse_item_decl_macro(lo)?
+ } else if let IsMacroRulesItem::Yes { has_bang } = self.is_macro_rules_item() {
+ // MACRO_RULES ITEM
+ self.parse_item_macro_rules(vis, has_bang)?
+ } else if self.isnt_macro_invocation()
+ && (self.token.is_ident_named(sym::import) || self.token.is_ident_named(sym::using))
+ {
+ return self.recover_import_as_use();
+ } else if self.isnt_macro_invocation() && vis.kind.is_pub() {
+ self.recover_missing_kw_before_item()?;
+ return Ok(None);
+ } else if macros_allowed && self.check_path() {
+ // MACRO INVOCATION ITEM
+ (Ident::empty(), ItemKind::MacCall(self.parse_item_macro(vis)?))
+ } else {
+ return Ok(None);
+ };
+ Ok(Some(info))
+ }
+
+ fn recover_import_as_use(&mut self) -> PResult<'a, Option<(Ident, ItemKind)>> {
+ let span = self.token.span;
+ let token_name = super::token_descr(&self.token);
+ let snapshot = self.create_snapshot_for_diagnostic();
+ self.bump();
+ match self.parse_use_item() {
+ Ok(u) => {
+ self.struct_span_err(span, format!("expected item, found {token_name}"))
+ .span_suggestion_short(
+ span,
+ "items are imported using the `use` keyword",
+ "use",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ Ok(Some(u))
+ }
+ Err(e) => {
+ e.cancel();
+ self.restore_snapshot(snapshot);
+ Ok(None)
+ }
+ }
+ }
+
+ fn parse_use_item(&mut self) -> PResult<'a, (Ident, ItemKind)> {
+ let tree = self.parse_use_tree()?;
+ if let Err(mut e) = self.expect_semi() {
+ match tree.kind {
+ UseTreeKind::Glob => {
+ e.note("the wildcard token must be last on the path");
+ }
+ UseTreeKind::Nested(..) => {
+ e.note("glob-like brace syntax must be last on the path");
+ }
+ _ => (),
+ }
+ return Err(e);
+ }
+ Ok((Ident::empty(), ItemKind::Use(tree)))
+ }
+
+ /// When parsing a statement, would the start of a path be an item?
+ pub(super) fn is_path_start_item(&mut self) -> bool {
+ self.is_kw_followed_by_ident(kw::Union) // no: `union::b`, yes: `union U { .. }`
+ || self.check_auto_or_unsafe_trait_item() // no: `auto::b`, yes: `auto trait X { .. }`
+ || self.is_async_fn() // no(2015): `async::b`, yes: `async fn`
+ || matches!(self.is_macro_rules_item(), IsMacroRulesItem::Yes{..}) // no: `macro_rules::b`, yes: `macro_rules! mac`
+ }
+
+ /// Are we sure this could not possibly be a macro invocation?
+ fn isnt_macro_invocation(&mut self) -> bool {
+ self.check_ident() && self.look_ahead(1, |t| *t != token::Not && *t != token::ModSep)
+ }
+
+ /// Recover on encountering a struct or method definition where the user
+ /// forgot to add the `struct` or `fn` keyword after writing `pub`: `pub S {}`.
+ fn recover_missing_kw_before_item(&mut self) -> PResult<'a, ()> {
+ // Space between `pub` keyword and the identifier
+ //
+ // pub S {}
+ // ^^^ `sp` points here
+ let sp = self.prev_token.span.between(self.token.span);
+ let full_sp = self.prev_token.span.to(self.token.span);
+ let ident_sp = self.token.span;
+ if self.look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Brace)) {
+ // possible public struct definition where `struct` was forgotten
+ let ident = self.parse_ident().unwrap();
+ let msg = format!("add `struct` here to parse `{ident}` as a public struct");
+ let mut err = self.struct_span_err(sp, "missing `struct` for struct definition");
+ err.span_suggestion_short(
+ sp,
+ &msg,
+ " struct ",
+ Applicability::MaybeIncorrect, // speculative
+ );
+ Err(err)
+ } else if self.look_ahead(1, |t| *t == token::OpenDelim(Delimiter::Parenthesis)) {
+ let ident = self.parse_ident().unwrap();
+ self.bump(); // `(`
+ let kw_name = self.recover_first_param();
+ self.consume_block(Delimiter::Parenthesis, ConsumeClosingDelim::Yes);
+ let (kw, kw_name, ambiguous) = if self.check(&token::RArrow) {
+ self.eat_to_tokens(&[&token::OpenDelim(Delimiter::Brace)]);
+ self.bump(); // `{`
+ ("fn", kw_name, false)
+ } else if self.check(&token::OpenDelim(Delimiter::Brace)) {
+ self.bump(); // `{`
+ ("fn", kw_name, false)
+ } else if self.check(&token::Colon) {
+ let kw = "struct";
+ (kw, kw, false)
+ } else {
+ ("fn` or `struct", "function or struct", true)
+ };
+
+ let msg = format!("missing `{kw}` for {kw_name} definition");
+ let mut err = self.struct_span_err(sp, &msg);
+ if !ambiguous {
+ self.consume_block(Delimiter::Brace, ConsumeClosingDelim::Yes);
+ let suggestion =
+ format!("add `{kw}` here to parse `{ident}` as a public {kw_name}");
+ err.span_suggestion_short(
+ sp,
+ &suggestion,
+ format!(" {kw} "),
+ Applicability::MachineApplicable,
+ );
+ } else if let Ok(snippet) = self.span_to_snippet(ident_sp) {
+ err.span_suggestion(
+ full_sp,
+ "if you meant to call a macro, try",
+ format!("{}!", snippet),
+ // this is the `ambiguous` conditional branch
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.help(
+ "if you meant to call a macro, remove the `pub` \
+ and add a trailing `!` after the identifier",
+ );
+ }
+ Err(err)
+ } else if self.look_ahead(1, |t| *t == token::Lt) {
+ let ident = self.parse_ident().unwrap();
+ self.eat_to_tokens(&[&token::Gt]);
+ self.bump(); // `>`
+ let (kw, kw_name, ambiguous) = if self.eat(&token::OpenDelim(Delimiter::Parenthesis)) {
+ ("fn", self.recover_first_param(), false)
+ } else if self.check(&token::OpenDelim(Delimiter::Brace)) {
+ ("struct", "struct", false)
+ } else {
+ ("fn` or `struct", "function or struct", true)
+ };
+ let msg = format!("missing `{kw}` for {kw_name} definition");
+ let mut err = self.struct_span_err(sp, &msg);
+ if !ambiguous {
+ err.span_suggestion_short(
+ sp,
+ &format!("add `{kw}` here to parse `{ident}` as a public {kw_name}"),
+ format!(" {} ", kw),
+ Applicability::MachineApplicable,
+ );
+ }
+ Err(err)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Parses an item macro, e.g., `item!();`.
+ fn parse_item_macro(&mut self, vis: &Visibility) -> PResult<'a, MacCall> {
+ let path = self.parse_path(PathStyle::Mod)?; // `foo::bar`
+ self.expect(&token::Not)?; // `!`
+ match self.parse_mac_args() {
+ // `( .. )` or `[ .. ]` (followed by `;`), or `{ .. }`.
+ Ok(args) => {
+ self.eat_semi_for_macro_if_needed(&args);
+ self.complain_if_pub_macro(vis, false);
+ Ok(MacCall { path, args, prior_type_ascription: self.last_type_ascription })
+ }
+
+ Err(mut err) => {
+ // Maybe the user misspelled `macro_rules` (issue #91227)
+ if self.token.is_ident()
+ && path.segments.len() == 1
+ && lev_distance("macro_rules", &path.segments[0].ident.to_string(), 3).is_some()
+ {
+ err.span_suggestion(
+ path.span,
+ "perhaps you meant to define a macro",
+ "macro_rules",
+ Applicability::MachineApplicable,
+ );
+ }
+ Err(err)
+ }
+ }
+ }
+
+ /// Recover if we parsed attributes and expected an item but there was none.
+ fn recover_attrs_no_item(&mut self, attrs: &[Attribute]) -> PResult<'a, ()> {
+ let ([start @ end] | [start, .., end]) = attrs else {
+ return Ok(());
+ };
+ let msg = if end.is_doc_comment() {
+ "expected item after doc comment"
+ } else {
+ "expected item after attributes"
+ };
+ let mut err = self.struct_span_err(end.span, msg);
+ if end.is_doc_comment() {
+ err.span_label(end.span, "this doc comment doesn't document anything");
+ }
+ if end.meta_kind().is_some() {
+ if self.token.kind == TokenKind::Semi {
+ err.span_suggestion_verbose(
+ self.token.span,
+ "consider removing this semicolon",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ if let [.., penultimate, _] = attrs {
+ err.span_label(start.span.to(penultimate.span), "other attributes here");
+ }
+ Err(err)
+ }
+
+ fn is_async_fn(&self) -> bool {
+ self.token.is_keyword(kw::Async) && self.is_keyword_ahead(1, &[kw::Fn])
+ }
+
+ fn parse_polarity(&mut self) -> ast::ImplPolarity {
+ // Disambiguate `impl !Trait for Type { ... }` and `impl ! { ... }` for the never type.
+ if self.check(&token::Not) && self.look_ahead(1, |t| t.can_begin_type()) {
+ self.bump(); // `!`
+ ast::ImplPolarity::Negative(self.prev_token.span)
+ } else {
+ ast::ImplPolarity::Positive
+ }
+ }
+
+ /// Parses an implementation item.
+ ///
+ /// ```ignore (illustrative)
+ /// impl<'a, T> TYPE { /* impl items */ }
+ /// impl<'a, T> TRAIT for TYPE { /* impl items */ }
+ /// impl<'a, T> !TRAIT for TYPE { /* impl items */ }
+ /// impl<'a, T> const TRAIT for TYPE { /* impl items */ }
+ /// ```
+ ///
+ /// We actually parse slightly more relaxed grammar for better error reporting and recovery.
+ /// ```ebnf
+ /// "impl" GENERICS "const"? "!"? TYPE "for"? (TYPE | "..") ("where" PREDICATES)? "{" BODY "}"
+ /// "impl" GENERICS "const"? "!"? TYPE ("where" PREDICATES)? "{" BODY "}"
+ /// ```
+ fn parse_item_impl(
+ &mut self,
+ attrs: &mut Vec<Attribute>,
+ defaultness: Defaultness,
+ ) -> PResult<'a, ItemInfo> {
+ let unsafety = self.parse_unsafety();
+ self.expect_keyword(kw::Impl)?;
+
+ // First, parse generic parameters if necessary.
+ let mut generics = if self.choose_generics_over_qpath(0) {
+ self.parse_generics()?
+ } else {
+ let mut generics = Generics::default();
+ // impl A for B {}
+ // /\ this is where `generics.span` should point when there are no type params.
+ generics.span = self.prev_token.span.shrink_to_hi();
+ generics
+ };
+
+ let constness = self.parse_constness();
+ if let Const::Yes(span) = constness {
+ self.sess.gated_spans.gate(sym::const_trait_impl, span);
+ }
+
+ let polarity = self.parse_polarity();
+
+ // Parse both types and traits as a type, then reinterpret if necessary.
+ let err_path = |span| ast::Path::from_ident(Ident::new(kw::Empty, span));
+ let ty_first = if self.token.is_keyword(kw::For) && self.look_ahead(1, |t| t != &token::Lt)
+ {
+ let span = self.prev_token.span.between(self.token.span);
+ self.struct_span_err(span, "missing trait in a trait impl")
+ .span_suggestion(
+ span,
+ "add a trait here",
+ " Trait ",
+ Applicability::HasPlaceholders,
+ )
+ .span_suggestion(
+ span.to(self.token.span),
+ "for an inherent impl, drop this `for`",
+ "",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ P(Ty {
+ kind: TyKind::Path(None, err_path(span)),
+ span,
+ id: DUMMY_NODE_ID,
+ tokens: None,
+ })
+ } else {
+ self.parse_ty_with_generics_recovery(&generics)?
+ };
+
+ // If `for` is missing we try to recover.
+ let has_for = self.eat_keyword(kw::For);
+ let missing_for_span = self.prev_token.span.between(self.token.span);
+
+ let ty_second = if self.token == token::DotDot {
+ // We need to report this error after `cfg` expansion for compatibility reasons
+ self.bump(); // `..`, do not add it to expected tokens
+ Some(self.mk_ty(self.prev_token.span, TyKind::Err))
+ } else if has_for || self.token.can_begin_type() {
+ Some(self.parse_ty()?)
+ } else {
+ None
+ };
+
+ generics.where_clause = self.parse_where_clause()?;
+
+ let impl_items = self.parse_item_list(attrs, |p| p.parse_impl_item(ForceCollect::No))?;
+
+ let item_kind = match ty_second {
+ Some(ty_second) => {
+ // impl Trait for Type
+ if !has_for {
+ self.struct_span_err(missing_for_span, "missing `for` in a trait impl")
+ .span_suggestion_short(
+ missing_for_span,
+ "add `for` here",
+ " for ",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ let ty_first = ty_first.into_inner();
+ let path = match ty_first.kind {
+ // This notably includes paths passed through `ty` macro fragments (#46438).
+ TyKind::Path(None, path) => path,
+ _ => {
+ self.struct_span_err(ty_first.span, "expected a trait, found type").emit();
+ err_path(ty_first.span)
+ }
+ };
+ let trait_ref = TraitRef { path, ref_id: ty_first.id };
+
+ ItemKind::Impl(Box::new(Impl {
+ unsafety,
+ polarity,
+ defaultness,
+ constness,
+ generics,
+ of_trait: Some(trait_ref),
+ self_ty: ty_second,
+ items: impl_items,
+ }))
+ }
+ None => {
+ // impl Type
+ ItemKind::Impl(Box::new(Impl {
+ unsafety,
+ polarity,
+ defaultness,
+ constness,
+ generics,
+ of_trait: None,
+ self_ty: ty_first,
+ items: impl_items,
+ }))
+ }
+ };
+
+ Ok((Ident::empty(), item_kind))
+ }
+
+ fn parse_item_list<T>(
+ &mut self,
+ attrs: &mut Vec<Attribute>,
+ mut parse_item: impl FnMut(&mut Parser<'a>) -> PResult<'a, Option<Option<T>>>,
+ ) -> PResult<'a, Vec<T>> {
+ let open_brace_span = self.token.span;
+ self.expect(&token::OpenDelim(Delimiter::Brace))?;
+ attrs.append(&mut self.parse_inner_attributes()?);
+
+ let mut items = Vec::new();
+ while !self.eat(&token::CloseDelim(Delimiter::Brace)) {
+ if self.recover_doc_comment_before_brace() {
+ continue;
+ }
+ match parse_item(self) {
+ Ok(None) => {
+ // We have to bail or we'll potentially never make progress.
+ let non_item_span = self.token.span;
+ self.consume_block(Delimiter::Brace, ConsumeClosingDelim::Yes);
+ self.struct_span_err(non_item_span, "non-item in item list")
+ .span_label(open_brace_span, "item list starts here")
+ .span_label(non_item_span, "non-item starts here")
+ .span_label(self.prev_token.span, "item list ends here")
+ .emit();
+ break;
+ }
+ Ok(Some(item)) => items.extend(item),
+ Err(mut err) => {
+ self.consume_block(Delimiter::Brace, ConsumeClosingDelim::Yes);
+ err.span_label(open_brace_span, "while parsing this item list starting here")
+ .span_label(self.prev_token.span, "the item list ends here")
+ .emit();
+ break;
+ }
+ }
+ }
+ Ok(items)
+ }
+
+ /// Recover on a doc comment before `}`.
+ fn recover_doc_comment_before_brace(&mut self) -> bool {
+ if let token::DocComment(..) = self.token.kind {
+ if self.look_ahead(1, |tok| tok == &token::CloseDelim(Delimiter::Brace)) {
+ struct_span_err!(
+ self.diagnostic(),
+ self.token.span,
+ E0584,
+ "found a documentation comment that doesn't document anything",
+ )
+ .span_label(self.token.span, "this doc comment doesn't document anything")
+ .help(
+ "doc comments must come before what they document, maybe a \
+ comment was intended with `//`?",
+ )
+ .emit();
+ self.bump();
+ return true;
+ }
+ }
+ false
+ }
+
+ /// Parses defaultness (i.e., `default` or nothing).
+ fn parse_defaultness(&mut self) -> Defaultness {
+ // We are interested in `default` followed by another identifier.
+ // However, we must avoid keywords that occur as binary operators.
+ // Currently, the only applicable keyword is `as` (`default as Ty`).
+ if self.check_keyword(kw::Default)
+ && self.look_ahead(1, |t| t.is_non_raw_ident_where(|i| i.name != kw::As))
+ {
+ self.bump(); // `default`
+ Defaultness::Default(self.prev_token.uninterpolated_span())
+ } else {
+ Defaultness::Final
+ }
+ }
+
+ /// Is this an `(unsafe auto? | auto) trait` item?
+ fn check_auto_or_unsafe_trait_item(&mut self) -> bool {
+ // auto trait
+ self.check_keyword(kw::Auto) && self.is_keyword_ahead(1, &[kw::Trait])
+ // unsafe auto trait
+ || self.check_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Trait, kw::Auto])
+ }
+
+ /// Parses `unsafe? auto? trait Foo { ... }` or `trait Foo = Bar;`.
+ fn parse_item_trait(&mut self, attrs: &mut Vec<Attribute>, lo: Span) -> PResult<'a, ItemInfo> {
+ let unsafety = self.parse_unsafety();
+ // Parse optional `auto` prefix.
+ let is_auto = if self.eat_keyword(kw::Auto) { IsAuto::Yes } else { IsAuto::No };
+
+ self.expect_keyword(kw::Trait)?;
+ let ident = self.parse_ident()?;
+ let mut generics = self.parse_generics()?;
+
+ // Parse optional colon and supertrait bounds.
+ let had_colon = self.eat(&token::Colon);
+ let span_at_colon = self.prev_token.span;
+ let bounds = if had_colon {
+ self.parse_generic_bounds(Some(self.prev_token.span))?
+ } else {
+ Vec::new()
+ };
+
+ let span_before_eq = self.prev_token.span;
+ if self.eat(&token::Eq) {
+ // It's a trait alias.
+ if had_colon {
+ let span = span_at_colon.to(span_before_eq);
+ self.struct_span_err(span, "bounds are not allowed on trait aliases").emit();
+ }
+
+ let bounds = self.parse_generic_bounds(None)?;
+ generics.where_clause = self.parse_where_clause()?;
+ self.expect_semi()?;
+
+ let whole_span = lo.to(self.prev_token.span);
+ if is_auto == IsAuto::Yes {
+ let msg = "trait aliases cannot be `auto`";
+ self.struct_span_err(whole_span, msg).span_label(whole_span, msg).emit();
+ }
+ if let Unsafe::Yes(_) = unsafety {
+ let msg = "trait aliases cannot be `unsafe`";
+ self.struct_span_err(whole_span, msg).span_label(whole_span, msg).emit();
+ }
+
+ self.sess.gated_spans.gate(sym::trait_alias, whole_span);
+
+ Ok((ident, ItemKind::TraitAlias(generics, bounds)))
+ } else {
+ // It's a normal trait.
+ generics.where_clause = self.parse_where_clause()?;
+ let items = self.parse_item_list(attrs, |p| p.parse_trait_item(ForceCollect::No))?;
+ Ok((
+ ident,
+ ItemKind::Trait(Box::new(Trait { is_auto, unsafety, generics, bounds, items })),
+ ))
+ }
+ }
+
+ pub fn parse_impl_item(
+ &mut self,
+ force_collect: ForceCollect,
+ ) -> PResult<'a, Option<Option<P<AssocItem>>>> {
+ let fn_parse_mode = FnParseMode { req_name: |_| true, req_body: true };
+ self.parse_assoc_item(fn_parse_mode, force_collect)
+ }
+
+ pub fn parse_trait_item(
+ &mut self,
+ force_collect: ForceCollect,
+ ) -> PResult<'a, Option<Option<P<AssocItem>>>> {
+ let fn_parse_mode =
+ FnParseMode { req_name: |edition| edition >= Edition::Edition2018, req_body: false };
+ self.parse_assoc_item(fn_parse_mode, force_collect)
+ }
+
+ /// Parses associated items.
+ fn parse_assoc_item(
+ &mut self,
+ fn_parse_mode: FnParseMode,
+ force_collect: ForceCollect,
+ ) -> PResult<'a, Option<Option<P<AssocItem>>>> {
+ Ok(self.parse_item_(fn_parse_mode, force_collect)?.map(
+ |Item { attrs, id, span, vis, ident, kind, tokens }| {
+ let kind = match AssocItemKind::try_from(kind) {
+ Ok(kind) => kind,
+ Err(kind) => match kind {
+ ItemKind::Static(a, _, b) => {
+ self.struct_span_err(span, "associated `static` items are not allowed")
+ .emit();
+ AssocItemKind::Const(Defaultness::Final, a, b)
+ }
+ _ => return self.error_bad_item_kind(span, &kind, "`trait`s or `impl`s"),
+ },
+ };
+ Some(P(Item { attrs, id, span, vis, ident, kind, tokens }))
+ },
+ ))
+ }
+
+ /// Parses a `type` alias with the following grammar:
+ /// ```ebnf
+ /// TypeAlias = "type" Ident Generics {":" GenericBounds}? {"=" Ty}? ";" ;
+ /// ```
+ /// The `"type"` has already been eaten.
+ fn parse_type_alias(&mut self, defaultness: Defaultness) -> PResult<'a, ItemInfo> {
+ let ident = self.parse_ident()?;
+ let mut generics = self.parse_generics()?;
+
+ // Parse optional colon and param bounds.
+ let bounds =
+ if self.eat(&token::Colon) { self.parse_generic_bounds(None)? } else { Vec::new() };
+ let before_where_clause = self.parse_where_clause()?;
+
+ let ty = if self.eat(&token::Eq) { Some(self.parse_ty()?) } else { None };
+
+ let after_where_clause = self.parse_where_clause()?;
+
+ let where_clauses = (
+ TyAliasWhereClause(before_where_clause.has_where_token, before_where_clause.span),
+ TyAliasWhereClause(after_where_clause.has_where_token, after_where_clause.span),
+ );
+ let where_predicates_split = before_where_clause.predicates.len();
+ let mut predicates = before_where_clause.predicates;
+ predicates.extend(after_where_clause.predicates.into_iter());
+ let where_clause = WhereClause {
+ has_where_token: before_where_clause.has_where_token
+ || after_where_clause.has_where_token,
+ predicates,
+ span: DUMMY_SP,
+ };
+ generics.where_clause = where_clause;
+
+ self.expect_semi()?;
+
+ Ok((
+ ident,
+ ItemKind::TyAlias(Box::new(TyAlias {
+ defaultness,
+ generics,
+ where_clauses,
+ where_predicates_split,
+ bounds,
+ ty,
+ })),
+ ))
+ }
+
+ /// Parses a `UseTree`.
+ ///
+ /// ```text
+ /// USE_TREE = [`::`] `*` |
+ /// [`::`] `{` USE_TREE_LIST `}` |
+ /// PATH `::` `*` |
+ /// PATH `::` `{` USE_TREE_LIST `}` |
+ /// PATH [`as` IDENT]
+ /// ```
+ fn parse_use_tree(&mut self) -> PResult<'a, UseTree> {
+ let lo = self.token.span;
+
+ let mut prefix = ast::Path { segments: Vec::new(), span: lo.shrink_to_lo(), tokens: None };
+ let kind = if self.check(&token::OpenDelim(Delimiter::Brace))
+ || self.check(&token::BinOp(token::Star))
+ || self.is_import_coupler()
+ {
+ // `use *;` or `use ::*;` or `use {...};` or `use ::{...};`
+ let mod_sep_ctxt = self.token.span.ctxt();
+ if self.eat(&token::ModSep) {
+ prefix
+ .segments
+ .push(PathSegment::path_root(lo.shrink_to_lo().with_ctxt(mod_sep_ctxt)));
+ }
+
+ self.parse_use_tree_glob_or_nested()?
+ } else {
+ // `use path::*;` or `use path::{...};` or `use path;` or `use path as bar;`
+ prefix = self.parse_path(PathStyle::Mod)?;
+
+ if self.eat(&token::ModSep) {
+ self.parse_use_tree_glob_or_nested()?
+ } else {
+ UseTreeKind::Simple(self.parse_rename()?, DUMMY_NODE_ID, DUMMY_NODE_ID)
+ }
+ };
+
+ Ok(UseTree { prefix, kind, span: lo.to(self.prev_token.span) })
+ }
+
+ /// Parses `*` or `{...}`.
+ fn parse_use_tree_glob_or_nested(&mut self) -> PResult<'a, UseTreeKind> {
+ Ok(if self.eat(&token::BinOp(token::Star)) {
+ UseTreeKind::Glob
+ } else {
+ UseTreeKind::Nested(self.parse_use_tree_list()?)
+ })
+ }
+
+ /// Parses a `UseTreeKind::Nested(list)`.
+ ///
+ /// ```text
+ /// USE_TREE_LIST = Ø | (USE_TREE `,`)* USE_TREE [`,`]
+ /// ```
+ fn parse_use_tree_list(&mut self) -> PResult<'a, Vec<(UseTree, ast::NodeId)>> {
+ self.parse_delim_comma_seq(Delimiter::Brace, |p| Ok((p.parse_use_tree()?, DUMMY_NODE_ID)))
+ .map(|(r, _)| r)
+ }
+
+ fn parse_rename(&mut self) -> PResult<'a, Option<Ident>> {
+ if self.eat_keyword(kw::As) { self.parse_ident_or_underscore().map(Some) } else { Ok(None) }
+ }
+
+ fn parse_ident_or_underscore(&mut self) -> PResult<'a, Ident> {
+ match self.token.ident() {
+ Some((ident @ Ident { name: kw::Underscore, .. }, false)) => {
+ self.bump();
+ Ok(ident)
+ }
+ _ => self.parse_ident(),
+ }
+ }
+
+ /// Parses `extern crate` links.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (illustrative)
+ /// extern crate foo;
+ /// extern crate bar as foo;
+ /// ```
+ fn parse_item_extern_crate(&mut self) -> PResult<'a, ItemInfo> {
+ // Accept `extern crate name-like-this` for better diagnostics
+ let orig_name = self.parse_crate_name_with_dashes()?;
+ let (item_name, orig_name) = if let Some(rename) = self.parse_rename()? {
+ (rename, Some(orig_name.name))
+ } else {
+ (orig_name, None)
+ };
+ self.expect_semi()?;
+ Ok((item_name, ItemKind::ExternCrate(orig_name)))
+ }
+
+ fn parse_crate_name_with_dashes(&mut self) -> PResult<'a, Ident> {
+ let error_msg = "crate name using dashes are not valid in `extern crate` statements";
+ let suggestion_msg = "if the original crate name uses dashes you need to use underscores \
+ in the code";
+ let mut ident = if self.token.is_keyword(kw::SelfLower) {
+ self.parse_path_segment_ident()
+ } else {
+ self.parse_ident()
+ }?;
+ let mut idents = vec![];
+ let mut replacement = vec![];
+ let mut fixed_crate_name = false;
+ // Accept `extern crate name-like-this` for better diagnostics.
+ let dash = token::BinOp(token::BinOpToken::Minus);
+ if self.token == dash {
+ // Do not include `-` as part of the expected tokens list.
+ while self.eat(&dash) {
+ fixed_crate_name = true;
+ replacement.push((self.prev_token.span, "_".to_string()));
+ idents.push(self.parse_ident()?);
+ }
+ }
+ if fixed_crate_name {
+ let fixed_name_sp = ident.span.to(idents.last().unwrap().span);
+ let mut fixed_name = ident.name.to_string();
+ for part in idents {
+ fixed_name.push_str(&format!("_{}", part.name));
+ }
+ ident = Ident::from_str_and_span(&fixed_name, fixed_name_sp);
+
+ self.struct_span_err(fixed_name_sp, error_msg)
+ .span_label(fixed_name_sp, "dash-separated idents are not valid")
+ .multipart_suggestion(suggestion_msg, replacement, Applicability::MachineApplicable)
+ .emit();
+ }
+ Ok(ident)
+ }
+
+ /// Parses `extern` for foreign ABIs modules.
+ ///
+ /// `extern` is expected to have been consumed before calling this method.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (only-for-syntax-highlight)
+ /// extern "C" {}
+ /// extern {}
+ /// ```
+ fn parse_item_foreign_mod(
+ &mut self,
+ attrs: &mut Vec<Attribute>,
+ mut unsafety: Unsafe,
+ ) -> PResult<'a, ItemInfo> {
+ let abi = self.parse_abi(); // ABI?
+ if unsafety == Unsafe::No
+ && self.token.is_keyword(kw::Unsafe)
+ && self.look_ahead(1, |t| t.kind == token::OpenDelim(Delimiter::Brace))
+ {
+ let mut err = self.expect(&token::OpenDelim(Delimiter::Brace)).unwrap_err();
+ err.emit();
+ unsafety = Unsafe::Yes(self.token.span);
+ self.eat_keyword(kw::Unsafe);
+ }
+ let module = ast::ForeignMod {
+ unsafety,
+ abi,
+ items: self.parse_item_list(attrs, |p| p.parse_foreign_item(ForceCollect::No))?,
+ };
+ Ok((Ident::empty(), ItemKind::ForeignMod(module)))
+ }
+
+ /// Parses a foreign item (one in an `extern { ... }` block).
+ pub fn parse_foreign_item(
+ &mut self,
+ force_collect: ForceCollect,
+ ) -> PResult<'a, Option<Option<P<ForeignItem>>>> {
+ let fn_parse_mode = FnParseMode { req_name: |_| true, req_body: false };
+ Ok(self.parse_item_(fn_parse_mode, force_collect)?.map(
+ |Item { attrs, id, span, vis, ident, kind, tokens }| {
+ let kind = match ForeignItemKind::try_from(kind) {
+ Ok(kind) => kind,
+ Err(kind) => match kind {
+ ItemKind::Const(_, a, b) => {
+ self.error_on_foreign_const(span, ident);
+ ForeignItemKind::Static(a, Mutability::Not, b)
+ }
+ _ => return self.error_bad_item_kind(span, &kind, "`extern` blocks"),
+ },
+ };
+ Some(P(Item { attrs, id, span, vis, ident, kind, tokens }))
+ },
+ ))
+ }
+
+ fn error_bad_item_kind<T>(&self, span: Span, kind: &ItemKind, ctx: &str) -> Option<T> {
+ let span = self.sess.source_map().guess_head_span(span);
+ let descr = kind.descr();
+ self.struct_span_err(span, &format!("{descr} is not supported in {ctx}"))
+ .help(&format!("consider moving the {descr} out to a nearby module scope"))
+ .emit();
+ None
+ }
+
+ fn error_on_foreign_const(&self, span: Span, ident: Ident) {
+ self.struct_span_err(ident.span, "extern items cannot be `const`")
+ .span_suggestion(
+ span.with_hi(ident.span.lo()),
+ "try using a static value",
+ "static ",
+ Applicability::MachineApplicable,
+ )
+ .note("for more information, visit https://doc.rust-lang.org/std/keyword.extern.html")
+ .emit();
+ }
+
+ fn is_unsafe_foreign_mod(&self) -> bool {
+ self.token.is_keyword(kw::Unsafe)
+ && self.is_keyword_ahead(1, &[kw::Extern])
+ && self.look_ahead(
+ 2 + self.look_ahead(2, |t| t.can_begin_literal_maybe_minus() as usize),
+ |t| t.kind == token::OpenDelim(Delimiter::Brace),
+ )
+ }
+
+ fn is_static_global(&mut self) -> bool {
+ if self.check_keyword(kw::Static) {
+ // Check if this could be a closure.
+ !self.look_ahead(1, |token| {
+ if token.is_keyword(kw::Move) {
+ return true;
+ }
+ matches!(token.kind, token::BinOp(token::Or) | token::OrOr)
+ })
+ } else {
+ false
+ }
+ }
+
+ /// Recover on `const mut` with `const` already eaten.
+ fn recover_const_mut(&mut self, const_span: Span) {
+ if self.eat_keyword(kw::Mut) {
+ let span = self.prev_token.span;
+ self.struct_span_err(span, "const globals cannot be mutable")
+ .span_label(span, "cannot be mutable")
+ .span_suggestion(
+ const_span,
+ "you might want to declare a static instead",
+ "static",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ }
+
+ /// Recover on `const impl` with `const` already eaten.
+ fn recover_const_impl(
+ &mut self,
+ const_span: Span,
+ attrs: &mut Vec<Attribute>,
+ defaultness: Defaultness,
+ ) -> PResult<'a, ItemInfo> {
+ let impl_span = self.token.span;
+ let mut err = self.expected_ident_found();
+
+ // Only try to recover if this is implementing a trait for a type
+ let mut impl_info = match self.parse_item_impl(attrs, defaultness) {
+ Ok(impl_info) => impl_info,
+ Err(recovery_error) => {
+ // Recovery failed, raise the "expected identifier" error
+ recovery_error.cancel();
+ return Err(err);
+ }
+ };
+
+ match impl_info.1 {
+ ItemKind::Impl(box Impl { of_trait: Some(ref trai), ref mut constness, .. }) => {
+ *constness = Const::Yes(const_span);
+
+ let before_trait = trai.path.span.shrink_to_lo();
+ let const_up_to_impl = const_span.with_hi(impl_span.lo());
+ err.multipart_suggestion(
+ "you might have meant to write a const trait impl",
+ vec![(const_up_to_impl, "".to_owned()), (before_trait, "const ".to_owned())],
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ ItemKind::Impl { .. } => return Err(err),
+ _ => unreachable!(),
+ }
+
+ Ok(impl_info)
+ }
+
+ /// Parse `["const" | ("static" "mut"?)] $ident ":" $ty (= $expr)?` with
+ /// `["const" | ("static" "mut"?)]` already parsed and stored in `m`.
+ ///
+ /// When `m` is `"const"`, `$ident` may also be `"_"`.
+ fn parse_item_global(
+ &mut self,
+ m: Option<Mutability>,
+ ) -> PResult<'a, (Ident, P<Ty>, Option<P<ast::Expr>>)> {
+ let id = if m.is_none() { self.parse_ident_or_underscore() } else { self.parse_ident() }?;
+
+ // Parse the type of a `const` or `static mut?` item.
+ // That is, the `":" $ty` fragment.
+ let ty = if self.eat(&token::Colon) {
+ self.parse_ty()?
+ } else {
+ self.recover_missing_const_type(id, m)
+ };
+
+ let expr = if self.eat(&token::Eq) { Some(self.parse_expr()?) } else { None };
+ self.expect_semi()?;
+ Ok((id, ty, expr))
+ }
+
+ /// We were supposed to parse `:` but the `:` was missing.
+ /// This means that the type is missing.
+ fn recover_missing_const_type(&mut self, id: Ident, m: Option<Mutability>) -> P<Ty> {
+ // Construct the error and stash it away with the hope
+ // that typeck will later enrich the error with a type.
+ let kind = match m {
+ Some(Mutability::Mut) => "static mut",
+ Some(Mutability::Not) => "static",
+ None => "const",
+ };
+ let mut err = self.struct_span_err(id.span, &format!("missing type for `{kind}` item"));
+ err.span_suggestion(
+ id.span,
+ "provide a type for the item",
+ format!("{id}: <type>"),
+ Applicability::HasPlaceholders,
+ );
+ err.stash(id.span, StashKey::ItemNoType);
+
+ // The user intended that the type be inferred,
+ // so treat this as if the user wrote e.g. `const A: _ = expr;`.
+ P(Ty { kind: TyKind::Infer, span: id.span, id: ast::DUMMY_NODE_ID, tokens: None })
+ }
+
+ /// Parses an enum declaration.
+ fn parse_item_enum(&mut self) -> PResult<'a, ItemInfo> {
+ if self.token.is_keyword(kw::Struct) {
+ let mut err = self.struct_span_err(
+ self.prev_token.span.to(self.token.span),
+ "`enum` and `struct` are mutually exclusive",
+ );
+ err.span_suggestion(
+ self.prev_token.span.to(self.token.span),
+ "replace `enum struct` with",
+ "enum",
+ Applicability::MachineApplicable,
+ );
+ if self.look_ahead(1, |t| t.is_ident()) {
+ self.bump();
+ err.emit();
+ } else {
+ return Err(err);
+ }
+ }
+
+ let id = self.parse_ident()?;
+ let mut generics = self.parse_generics()?;
+ generics.where_clause = self.parse_where_clause()?;
+
+ let (variants, _) = self
+ .parse_delim_comma_seq(Delimiter::Brace, |p| p.parse_enum_variant())
+ .map_err(|e| {
+ self.recover_stmt();
+ e
+ })?;
+
+ let enum_definition = EnumDef { variants: variants.into_iter().flatten().collect() };
+ Ok((id, ItemKind::Enum(enum_definition, generics)))
+ }
+
+ fn parse_enum_variant(&mut self) -> PResult<'a, Option<Variant>> {
+ let variant_attrs = self.parse_outer_attributes()?;
+ self.collect_tokens_trailing_token(
+ variant_attrs,
+ ForceCollect::No,
+ |this, variant_attrs| {
+ let vlo = this.token.span;
+
+ let vis = this.parse_visibility(FollowedByType::No)?;
+ if !this.recover_nested_adt_item(kw::Enum)? {
+ return Ok((None, TrailingToken::None));
+ }
+ let ident = this.parse_field_ident("enum", vlo)?;
+
+ let struct_def = if this.check(&token::OpenDelim(Delimiter::Brace)) {
+ // Parse a struct variant.
+ let (fields, recovered) = this.parse_record_struct_body("struct", false)?;
+ VariantData::Struct(fields, recovered)
+ } else if this.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ VariantData::Tuple(this.parse_tuple_struct_body()?, DUMMY_NODE_ID)
+ } else {
+ VariantData::Unit(DUMMY_NODE_ID)
+ };
+
+ let disr_expr =
+ if this.eat(&token::Eq) { Some(this.parse_anon_const_expr()?) } else { None };
+
+ let vr = ast::Variant {
+ ident,
+ vis,
+ id: DUMMY_NODE_ID,
+ attrs: variant_attrs.into(),
+ data: struct_def,
+ disr_expr,
+ span: vlo.to(this.prev_token.span),
+ is_placeholder: false,
+ };
+
+ Ok((Some(vr), TrailingToken::MaybeComma))
+ },
+ )
+ }
+
+ /// Parses `struct Foo { ... }`.
+ fn parse_item_struct(&mut self) -> PResult<'a, ItemInfo> {
+ let class_name = self.parse_ident()?;
+
+ let mut generics = self.parse_generics()?;
+
+ // There is a special case worth noting here, as reported in issue #17904.
+ // If we are parsing a tuple struct it is the case that the where clause
+ // should follow the field list. Like so:
+ //
+ // struct Foo<T>(T) where T: Copy;
+ //
+ // If we are parsing a normal record-style struct it is the case
+ // that the where clause comes before the body, and after the generics.
+ // So if we look ahead and see a brace or a where-clause we begin
+ // parsing a record style struct.
+ //
+ // Otherwise if we look ahead and see a paren we parse a tuple-style
+ // struct.
+
+ let vdata = if self.token.is_keyword(kw::Where) {
+ generics.where_clause = self.parse_where_clause()?;
+ if self.eat(&token::Semi) {
+ // If we see a: `struct Foo<T> where T: Copy;` style decl.
+ VariantData::Unit(DUMMY_NODE_ID)
+ } else {
+ // If we see: `struct Foo<T> where T: Copy { ... }`
+ let (fields, recovered) =
+ self.parse_record_struct_body("struct", generics.where_clause.has_where_token)?;
+ VariantData::Struct(fields, recovered)
+ }
+ // No `where` so: `struct Foo<T>;`
+ } else if self.eat(&token::Semi) {
+ VariantData::Unit(DUMMY_NODE_ID)
+ // Record-style struct definition
+ } else if self.token == token::OpenDelim(Delimiter::Brace) {
+ let (fields, recovered) =
+ self.parse_record_struct_body("struct", generics.where_clause.has_where_token)?;
+ VariantData::Struct(fields, recovered)
+ // Tuple-style struct definition with optional where-clause.
+ } else if self.token == token::OpenDelim(Delimiter::Parenthesis) {
+ let body = VariantData::Tuple(self.parse_tuple_struct_body()?, DUMMY_NODE_ID);
+ generics.where_clause = self.parse_where_clause()?;
+ self.expect_semi()?;
+ body
+ } else {
+ let token_str = super::token_descr(&self.token);
+ let msg = &format!(
+ "expected `where`, `{{`, `(`, or `;` after struct name, found {token_str}"
+ );
+ let mut err = self.struct_span_err(self.token.span, msg);
+ err.span_label(self.token.span, "expected `where`, `{`, `(`, or `;` after struct name");
+ return Err(err);
+ };
+
+ Ok((class_name, ItemKind::Struct(vdata, generics)))
+ }
+
+ /// Parses `union Foo { ... }`.
+ fn parse_item_union(&mut self) -> PResult<'a, ItemInfo> {
+ let class_name = self.parse_ident()?;
+
+ let mut generics = self.parse_generics()?;
+
+ let vdata = if self.token.is_keyword(kw::Where) {
+ generics.where_clause = self.parse_where_clause()?;
+ let (fields, recovered) =
+ self.parse_record_struct_body("union", generics.where_clause.has_where_token)?;
+ VariantData::Struct(fields, recovered)
+ } else if self.token == token::OpenDelim(Delimiter::Brace) {
+ let (fields, recovered) =
+ self.parse_record_struct_body("union", generics.where_clause.has_where_token)?;
+ VariantData::Struct(fields, recovered)
+ } else {
+ let token_str = super::token_descr(&self.token);
+ let msg = &format!("expected `where` or `{{` after union name, found {token_str}");
+ let mut err = self.struct_span_err(self.token.span, msg);
+ err.span_label(self.token.span, "expected `where` or `{` after union name");
+ return Err(err);
+ };
+
+ Ok((class_name, ItemKind::Union(vdata, generics)))
+ }
+
+ fn parse_record_struct_body(
+ &mut self,
+ adt_ty: &str,
+ parsed_where: bool,
+ ) -> PResult<'a, (Vec<FieldDef>, /* recovered */ bool)> {
+ let mut fields = Vec::new();
+ let mut recovered = false;
+ if self.eat(&token::OpenDelim(Delimiter::Brace)) {
+ while self.token != token::CloseDelim(Delimiter::Brace) {
+ let field = self.parse_field_def(adt_ty).map_err(|e| {
+ self.consume_block(Delimiter::Brace, ConsumeClosingDelim::No);
+ recovered = true;
+ e
+ });
+ match field {
+ Ok(field) => fields.push(field),
+ Err(mut err) => {
+ err.emit();
+ break;
+ }
+ }
+ }
+ self.eat(&token::CloseDelim(Delimiter::Brace));
+ } else {
+ let token_str = super::token_descr(&self.token);
+ let msg = &format!(
+ "expected {}`{{` after struct name, found {}",
+ if parsed_where { "" } else { "`where`, or " },
+ token_str
+ );
+ let mut err = self.struct_span_err(self.token.span, msg);
+ err.span_label(
+ self.token.span,
+ format!(
+ "expected {}`{{` after struct name",
+ if parsed_where { "" } else { "`where`, or " }
+ ),
+ );
+ return Err(err);
+ }
+
+ Ok((fields, recovered))
+ }
+
+ fn parse_tuple_struct_body(&mut self) -> PResult<'a, Vec<FieldDef>> {
+ // This is the case where we find `struct Foo<T>(T) where T: Copy;`
+ // Unit like structs are handled in parse_item_struct function
+ self.parse_paren_comma_seq(|p| {
+ let attrs = p.parse_outer_attributes()?;
+ p.collect_tokens_trailing_token(attrs, ForceCollect::No, |p, attrs| {
+ let lo = p.token.span;
+ let vis = p.parse_visibility(FollowedByType::Yes)?;
+ let ty = p.parse_ty()?;
+
+ Ok((
+ FieldDef {
+ span: lo.to(ty.span),
+ vis,
+ ident: None,
+ id: DUMMY_NODE_ID,
+ ty,
+ attrs: attrs.into(),
+ is_placeholder: false,
+ },
+ TrailingToken::MaybeComma,
+ ))
+ })
+ })
+ .map(|(r, _)| r)
+ }
+
+ /// Parses an element of a struct declaration.
+ fn parse_field_def(&mut self, adt_ty: &str) -> PResult<'a, FieldDef> {
+ let attrs = self.parse_outer_attributes()?;
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let lo = this.token.span;
+ let vis = this.parse_visibility(FollowedByType::No)?;
+ Ok((this.parse_single_struct_field(adt_ty, lo, vis, attrs)?, TrailingToken::None))
+ })
+ }
+
+ /// Parses a structure field declaration.
+ fn parse_single_struct_field(
+ &mut self,
+ adt_ty: &str,
+ lo: Span,
+ vis: Visibility,
+ attrs: Vec<Attribute>,
+ ) -> PResult<'a, FieldDef> {
+ let mut seen_comma: bool = false;
+ let a_var = self.parse_name_and_ty(adt_ty, lo, vis, attrs)?;
+ if self.token == token::Comma {
+ seen_comma = true;
+ }
+ match self.token.kind {
+ token::Comma => {
+ self.bump();
+ }
+ token::CloseDelim(Delimiter::Brace) => {}
+ token::DocComment(..) => {
+ let previous_span = self.prev_token.span;
+ let mut err = self.span_err(self.token.span, Error::UselessDocComment);
+ self.bump(); // consume the doc comment
+ let comma_after_doc_seen = self.eat(&token::Comma);
+ // `seen_comma` is always false, because we are inside doc block
+ // condition is here to make code more readable
+ if !seen_comma && comma_after_doc_seen {
+ seen_comma = true;
+ }
+ if comma_after_doc_seen || self.token == token::CloseDelim(Delimiter::Brace) {
+ err.emit();
+ } else {
+ if !seen_comma {
+ let sp = self.sess.source_map().next_point(previous_span);
+ err.span_suggestion(
+ sp,
+ "missing comma here",
+ ",",
+ Applicability::MachineApplicable,
+ );
+ }
+ return Err(err);
+ }
+ }
+ _ => {
+ let sp = self.prev_token.span.shrink_to_hi();
+ let mut err = self.struct_span_err(
+ sp,
+ &format!("expected `,`, or `}}`, found {}", super::token_descr(&self.token)),
+ );
+
+ // Try to recover extra trailing angle brackets
+ let mut recovered = false;
+ if let TyKind::Path(_, Path { segments, .. }) = &a_var.ty.kind {
+ if let Some(last_segment) = segments.last() {
+ recovered = self.check_trailing_angle_brackets(
+ last_segment,
+ &[&token::Comma, &token::CloseDelim(Delimiter::Brace)],
+ );
+ if recovered {
+ // Handle a case like `Vec<u8>>,` where we can continue parsing fields
+ // after the comma
+ self.eat(&token::Comma);
+ // `check_trailing_angle_brackets` already emitted a nicer error
+ // NOTE(eddyb) this was `.cancel()`, but `err`
+ // gets returned, so we can't fully defuse it.
+ err.delay_as_bug();
+ }
+ }
+ }
+
+ if self.token.is_ident() {
+ // This is likely another field; emit the diagnostic and keep going
+ err.span_suggestion(
+ sp,
+ "try adding a comma",
+ ",",
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ recovered = true;
+ }
+
+ if recovered {
+ // Make sure an error was emitted (either by recovering an angle bracket,
+ // or by finding an identifier as the next token), since we're
+ // going to continue parsing
+ assert!(self.sess.span_diagnostic.has_errors().is_some());
+ } else {
+ return Err(err);
+ }
+ }
+ }
+ Ok(a_var)
+ }
+
+ fn expect_field_ty_separator(&mut self) -> PResult<'a, ()> {
+ if let Err(mut err) = self.expect(&token::Colon) {
+ let sm = self.sess.source_map();
+ let eq_typo = self.token.kind == token::Eq && self.look_ahead(1, |t| t.is_path_start());
+ let semi_typo = self.token.kind == token::Semi
+ && self.look_ahead(1, |t| {
+ t.is_path_start()
+ // We check that we are in a situation like `foo; bar` to avoid bad suggestions
+ // when there's no type and `;` was used instead of a comma.
+ && match (sm.lookup_line(self.token.span.hi()), sm.lookup_line(t.span.lo())) {
+ (Ok(l), Ok(r)) => l.line == r.line,
+ _ => true,
+ }
+ });
+ if eq_typo || semi_typo {
+ self.bump();
+ // Gracefully handle small typos.
+ err.span_suggestion_short(
+ self.prev_token.span,
+ "field names and their types are separated with `:`",
+ ":",
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ } else {
+ return Err(err);
+ }
+ }
+ Ok(())
+ }
+
+ /// Parses a structure field.
+ fn parse_name_and_ty(
+ &mut self,
+ adt_ty: &str,
+ lo: Span,
+ vis: Visibility,
+ attrs: Vec<Attribute>,
+ ) -> PResult<'a, FieldDef> {
+ let name = self.parse_field_ident(adt_ty, lo)?;
+ self.expect_field_ty_separator()?;
+ let ty = self.parse_ty()?;
+ if self.token.kind == token::Colon && self.look_ahead(1, |tok| tok.kind != token::Colon) {
+ self.struct_span_err(self.token.span, "found single colon in a struct field type path")
+ .span_suggestion_verbose(
+ self.token.span,
+ "write a path separator here",
+ "::",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+ if self.token.kind == token::Eq {
+ self.bump();
+ let const_expr = self.parse_anon_const_expr()?;
+ let sp = ty.span.shrink_to_hi().to(const_expr.value.span);
+ self.struct_span_err(sp, "default values on `struct` fields aren't supported")
+ .span_suggestion(
+ sp,
+ "remove this unsupported default value",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ Ok(FieldDef {
+ span: lo.to(self.prev_token.span),
+ ident: Some(name),
+ vis,
+ id: DUMMY_NODE_ID,
+ ty,
+ attrs: attrs.into(),
+ is_placeholder: false,
+ })
+ }
+
+ /// Parses a field identifier. Specialized version of `parse_ident_common`
+ /// for better diagnostics and suggestions.
+ fn parse_field_ident(&mut self, adt_ty: &str, lo: Span) -> PResult<'a, Ident> {
+ let (ident, is_raw) = self.ident_or_err()?;
+ if !is_raw && ident.is_reserved() {
+ let err = if self.check_fn_front_matter(false) {
+ let inherited_vis = Visibility {
+ span: rustc_span::DUMMY_SP,
+ kind: VisibilityKind::Inherited,
+ tokens: None,
+ };
+ // We use `parse_fn` to get a span for the function
+ let fn_parse_mode = FnParseMode { req_name: |_| true, req_body: true };
+ if let Err(mut db) =
+ self.parse_fn(&mut Vec::new(), fn_parse_mode, lo, &inherited_vis)
+ {
+ db.delay_as_bug();
+ }
+ let mut err = self.struct_span_err(
+ lo.to(self.prev_token.span),
+ &format!("functions are not allowed in {adt_ty} definitions"),
+ );
+ err.help("unlike in C++, Java, and C#, functions are declared in `impl` blocks");
+ err.help("see https://doc.rust-lang.org/book/ch05-03-method-syntax.html for more information");
+ err
+ } else {
+ self.expected_ident_found()
+ };
+ return Err(err);
+ }
+ self.bump();
+ Ok(ident)
+ }
+
+ /// Parses a declarative macro 2.0 definition.
+ /// The `macro` keyword has already been parsed.
+ /// ```ebnf
+ /// MacBody = "{" TOKEN_STREAM "}" ;
+ /// MacParams = "(" TOKEN_STREAM ")" ;
+ /// DeclMac = "macro" Ident MacParams? MacBody ;
+ /// ```
+ fn parse_item_decl_macro(&mut self, lo: Span) -> PResult<'a, ItemInfo> {
+ let ident = self.parse_ident()?;
+ let body = if self.check(&token::OpenDelim(Delimiter::Brace)) {
+ self.parse_mac_args()? // `MacBody`
+ } else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ let params = self.parse_token_tree(); // `MacParams`
+ let pspan = params.span();
+ if !self.check(&token::OpenDelim(Delimiter::Brace)) {
+ return self.unexpected();
+ }
+ let body = self.parse_token_tree(); // `MacBody`
+ // Convert `MacParams MacBody` into `{ MacParams => MacBody }`.
+ let bspan = body.span();
+ let arrow = TokenTree::token_alone(token::FatArrow, pspan.between(bspan)); // `=>`
+ let tokens = TokenStream::new(vec![params, arrow, body]);
+ let dspan = DelimSpan::from_pair(pspan.shrink_to_lo(), bspan.shrink_to_hi());
+ P(MacArgs::Delimited(dspan, MacDelimiter::Brace, tokens))
+ } else {
+ return self.unexpected();
+ };
+
+ self.sess.gated_spans.gate(sym::decl_macro, lo.to(self.prev_token.span));
+ Ok((ident, ItemKind::MacroDef(ast::MacroDef { body, macro_rules: false })))
+ }
+
+ /// Is this a possibly malformed start of a `macro_rules! foo` item definition?
+ fn is_macro_rules_item(&mut self) -> IsMacroRulesItem {
+ if self.check_keyword(kw::MacroRules) {
+ let macro_rules_span = self.token.span;
+
+ if self.look_ahead(1, |t| *t == token::Not) && self.look_ahead(2, |t| t.is_ident()) {
+ return IsMacroRulesItem::Yes { has_bang: true };
+ } else if self.look_ahead(1, |t| (t.is_ident())) {
+ // macro_rules foo
+ self.struct_span_err(macro_rules_span, "expected `!` after `macro_rules`")
+ .span_suggestion(
+ macro_rules_span,
+ "add a `!`",
+ "macro_rules!",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ return IsMacroRulesItem::Yes { has_bang: false };
+ }
+ }
+
+ IsMacroRulesItem::No
+ }
+
+ /// Parses a `macro_rules! foo { ... }` declarative macro.
+ fn parse_item_macro_rules(
+ &mut self,
+ vis: &Visibility,
+ has_bang: bool,
+ ) -> PResult<'a, ItemInfo> {
+ self.expect_keyword(kw::MacroRules)?; // `macro_rules`
+
+ if has_bang {
+ self.expect(&token::Not)?; // `!`
+ }
+ let ident = self.parse_ident()?;
+
+ if self.eat(&token::Not) {
+ // Handle macro_rules! foo!
+ let span = self.prev_token.span;
+ self.struct_span_err(span, "macro names aren't followed by a `!`")
+ .span_suggestion(span, "remove the `!`", "", Applicability::MachineApplicable)
+ .emit();
+ }
+
+ let body = self.parse_mac_args()?;
+ self.eat_semi_for_macro_if_needed(&body);
+ self.complain_if_pub_macro(vis, true);
+
+ Ok((ident, ItemKind::MacroDef(ast::MacroDef { body, macro_rules: true })))
+ }
+
+ /// Item macro invocations or `macro_rules!` definitions need inherited visibility.
+ /// If that's not the case, emit an error.
+ fn complain_if_pub_macro(&self, vis: &Visibility, macro_rules: bool) {
+ if let VisibilityKind::Inherited = vis.kind {
+ return;
+ }
+
+ let vstr = pprust::vis_to_string(vis);
+ let vstr = vstr.trim_end();
+ if macro_rules {
+ let msg = format!("can't qualify macro_rules invocation with `{vstr}`");
+ self.struct_span_err(vis.span, &msg)
+ .span_suggestion(
+ vis.span,
+ "try exporting the macro",
+ "#[macro_export]",
+ Applicability::MaybeIncorrect, // speculative
+ )
+ .emit();
+ } else {
+ self.struct_span_err(vis.span, "can't qualify macro invocation with `pub`")
+ .span_suggestion(
+ vis.span,
+ "remove the visibility",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .help(&format!("try adjusting the macro to put `{vstr}` inside the invocation"))
+ .emit();
+ }
+ }
+
+ fn eat_semi_for_macro_if_needed(&mut self, args: &MacArgs) {
+ if args.need_semicolon() && !self.eat(&token::Semi) {
+ self.report_invalid_macro_expansion_item(args);
+ }
+ }
+
+ fn report_invalid_macro_expansion_item(&self, args: &MacArgs) {
+ let span = args.span().expect("undelimited macro call");
+ let mut err = self.struct_span_err(
+ span,
+ "macros that expand to items must be delimited with braces or followed by a semicolon",
+ );
+ // FIXME: This will make us not emit the help even for declarative
+ // macros within the same crate (that we can fix), which is sad.
+ if !span.from_expansion() {
+ if self.unclosed_delims.is_empty() {
+ let DelimSpan { open, close } = match args {
+ MacArgs::Empty | MacArgs::Eq(..) => unreachable!(),
+ MacArgs::Delimited(dspan, ..) => *dspan,
+ };
+ err.multipart_suggestion(
+ "change the delimiters to curly braces",
+ vec![(open, "{".to_string()), (close, '}'.to_string())],
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_suggestion(
+ span,
+ "change the delimiters to curly braces",
+ " { /* items */ }",
+ Applicability::HasPlaceholders,
+ );
+ }
+ err.span_suggestion(
+ span.shrink_to_hi(),
+ "add a semicolon",
+ ';',
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+
+ /// Checks if current token is one of tokens which cannot be nested like `kw::Enum`. In case
+ /// it is, we try to parse the item and report error about nested types.
+ fn recover_nested_adt_item(&mut self, keyword: Symbol) -> PResult<'a, bool> {
+ if (self.token.is_keyword(kw::Enum)
+ || self.token.is_keyword(kw::Struct)
+ || self.token.is_keyword(kw::Union))
+ && self.look_ahead(1, |t| t.is_ident())
+ {
+ let kw_token = self.token.clone();
+ let kw_str = pprust::token_to_string(&kw_token);
+ let item = self.parse_item(ForceCollect::No)?;
+
+ self.struct_span_err(
+ kw_token.span,
+ &format!("`{kw_str}` definition cannot be nested inside `{keyword}`"),
+ )
+ .span_suggestion(
+ item.unwrap().span,
+ &format!("consider creating a new `{kw_str}` definition instead of nesting"),
+ "",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ // We successfully parsed the item but we must inform the caller about nested problem.
+ return Ok(false);
+ }
+ Ok(true)
+ }
+}
+
+/// The parsing configuration used to parse a parameter list (see `parse_fn_params`).
+///
+/// The function decides if, per-parameter `p`, `p` must have a pattern or just a type.
+///
+/// This function pointer accepts an edition, because in edition 2015, trait declarations
+/// were allowed to omit parameter names. In 2018, they became required.
+type ReqName = fn(Edition) -> bool;
+
+/// Parsing configuration for functions.
+///
+/// The syntax of function items is slightly different within trait definitions,
+/// impl blocks, and modules. It is still parsed using the same code, just with
+/// different flags set, so that even when the input is wrong and produces a parse
+/// error, it still gets into the AST and the rest of the parser and
+/// type checker can run.
+#[derive(Clone, Copy)]
+pub(crate) struct FnParseMode {
+ /// A function pointer that decides if, per-parameter `p`, `p` must have a
+ /// pattern or just a type. This field affects parsing of the parameters list.
+ ///
+ /// ```text
+ /// fn foo(alef: A) -> X { X::new() }
+ /// -----^^ affects parsing this part of the function signature
+ /// |
+ /// if req_name returns false, then this name is optional
+ ///
+ /// fn bar(A) -> X;
+ /// ^
+ /// |
+ /// if req_name returns true, this is an error
+ /// ```
+ ///
+ /// Calling this function pointer should only return false if:
+ ///
+ /// * The item is being parsed inside of a trait definition.
+ /// Within an impl block or a module, it should always evaluate
+ /// to true.
+ /// * The span is from Edition 2015. In particular, you can get a
+ /// 2015 span inside a 2021 crate using macros.
+ pub req_name: ReqName,
+ /// If this flag is set to `true`, then plain, semicolon-terminated function
+ /// prototypes are not allowed here.
+ ///
+ /// ```text
+ /// fn foo(alef: A) -> X { X::new() }
+ /// ^^^^^^^^^^^^
+ /// |
+ /// this is always allowed
+ ///
+ /// fn bar(alef: A, bet: B) -> X;
+ /// ^
+ /// |
+ /// if req_body is set to true, this is an error
+ /// ```
+ ///
+ /// This field should only be set to false if the item is inside of a trait
+ /// definition or extern block. Within an impl block or a module, it should
+ /// always be set to true.
+ pub req_body: bool,
+}
+
+/// Parsing of functions and methods.
+impl<'a> Parser<'a> {
+ /// Parse a function starting from the front matter (`const ...`) to the body `{ ... }` or `;`.
+ fn parse_fn(
+ &mut self,
+ attrs: &mut Vec<Attribute>,
+ fn_parse_mode: FnParseMode,
+ sig_lo: Span,
+ vis: &Visibility,
+ ) -> PResult<'a, (Ident, FnSig, Generics, Option<P<Block>>)> {
+ let header = self.parse_fn_front_matter(vis)?; // `const ... fn`
+ let ident = self.parse_ident()?; // `foo`
+ let mut generics = self.parse_generics()?; // `<'a, T, ...>`
+ let decl =
+ self.parse_fn_decl(fn_parse_mode.req_name, AllowPlus::Yes, RecoverReturnSign::Yes)?; // `(p: u8, ...)`
+ generics.where_clause = self.parse_where_clause()?; // `where T: Ord`
+
+ let mut sig_hi = self.prev_token.span;
+ let body = self.parse_fn_body(attrs, &ident, &mut sig_hi, fn_parse_mode.req_body)?; // `;` or `{ ... }`.
+ let fn_sig_span = sig_lo.to(sig_hi);
+ Ok((ident, FnSig { header, decl, span: fn_sig_span }, generics, body))
+ }
+
+ /// Parse the "body" of a function.
+ /// This can either be `;` when there's no body,
+ /// or e.g. a block when the function is a provided one.
+ fn parse_fn_body(
+ &mut self,
+ attrs: &mut Vec<Attribute>,
+ ident: &Ident,
+ sig_hi: &mut Span,
+ req_body: bool,
+ ) -> PResult<'a, Option<P<Block>>> {
+ let has_semi = if req_body {
+ self.token.kind == TokenKind::Semi
+ } else {
+ // Only include `;` in list of expected tokens if body is not required
+ self.check(&TokenKind::Semi)
+ };
+ let (inner_attrs, body) = if has_semi {
+ // Include the trailing semicolon in the span of the signature
+ self.expect_semi()?;
+ *sig_hi = self.prev_token.span;
+ (Vec::new(), None)
+ } else if self.check(&token::OpenDelim(Delimiter::Brace)) || self.token.is_whole_block() {
+ self.parse_inner_attrs_and_block().map(|(attrs, body)| (attrs, Some(body)))?
+ } else if self.token.kind == token::Eq {
+ // Recover `fn foo() = $expr;`.
+ self.bump(); // `=`
+ let eq_sp = self.prev_token.span;
+ let _ = self.parse_expr()?;
+ self.expect_semi()?; // `;`
+ let span = eq_sp.to(self.prev_token.span);
+ self.struct_span_err(span, "function body cannot be `= expression;`")
+ .multipart_suggestion(
+ "surround the expression with `{` and `}` instead of `=` and `;`",
+ vec![(eq_sp, "{".to_string()), (self.prev_token.span, " }".to_string())],
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ (Vec::new(), Some(self.mk_block_err(span)))
+ } else {
+ let expected = if req_body {
+ &[token::OpenDelim(Delimiter::Brace)][..]
+ } else {
+ &[token::Semi, token::OpenDelim(Delimiter::Brace)]
+ };
+ if let Err(mut err) = self.expected_one_of_not_found(&[], &expected) {
+ if self.token.kind == token::CloseDelim(Delimiter::Brace) {
+ // The enclosing `mod`, `trait` or `impl` is being closed, so keep the `fn` in
+ // the AST for typechecking.
+ err.span_label(ident.span, "while parsing this `fn`");
+ err.emit();
+ } else {
+ return Err(err);
+ }
+ }
+ (Vec::new(), None)
+ };
+ attrs.extend(inner_attrs);
+ Ok(body)
+ }
+
+ /// Is the current token the start of an `FnHeader` / not a valid parse?
+ ///
+ /// `check_pub` adds additional `pub` to the checks in case users place it
+ /// wrongly, can be used to ensure `pub` never comes after `default`.
+ pub(super) fn check_fn_front_matter(&mut self, check_pub: bool) -> bool {
+ // We use an over-approximation here.
+ // `const const`, `fn const` won't parse, but we're not stepping over other syntax either.
+ // `pub` is added in case users got confused with the ordering like `async pub fn`,
+ // only if it wasn't preceded by `default` as `default pub` is invalid.
+ let quals: &[Symbol] = if check_pub {
+ &[kw::Pub, kw::Const, kw::Async, kw::Unsafe, kw::Extern]
+ } else {
+ &[kw::Const, kw::Async, kw::Unsafe, kw::Extern]
+ };
+ self.check_keyword(kw::Fn) // Definitely an `fn`.
+ // `$qual fn` or `$qual $qual`:
+ || quals.iter().any(|&kw| self.check_keyword(kw))
+ && self.look_ahead(1, |t| {
+ // `$qual fn`, e.g. `const fn` or `async fn`.
+ t.is_keyword(kw::Fn)
+ // Two qualifiers `$qual $qual` is enough, e.g. `async unsafe`.
+ || t.is_non_raw_ident_where(|i| quals.contains(&i.name)
+ // Rule out 2015 `const async: T = val`.
+ && i.is_reserved()
+ // Rule out unsafe extern block.
+ && !self.is_unsafe_foreign_mod())
+ })
+ // `extern ABI fn`
+ || self.check_keyword(kw::Extern)
+ && self.look_ahead(1, |t| t.can_begin_literal_maybe_minus())
+ && self.look_ahead(2, |t| t.is_keyword(kw::Fn))
+ }
+
+ /// Parses all the "front matter" (or "qualifiers") for a `fn` declaration,
+ /// up to and including the `fn` keyword. The formal grammar is:
+ ///
+ /// ```text
+ /// Extern = "extern" StringLit? ;
+ /// FnQual = "const"? "async"? "unsafe"? Extern? ;
+ /// FnFrontMatter = FnQual "fn" ;
+ /// ```
+ ///
+ /// `vis` represents the visibility that was already parsed, if any. Use
+ /// `Visibility::Inherited` when no visibility is known.
+ pub(super) fn parse_fn_front_matter(&mut self, orig_vis: &Visibility) -> PResult<'a, FnHeader> {
+ let sp_start = self.token.span;
+ let constness = self.parse_constness();
+
+ let async_start_sp = self.token.span;
+ let asyncness = self.parse_asyncness();
+
+ let unsafe_start_sp = self.token.span;
+ let unsafety = self.parse_unsafety();
+
+ let ext_start_sp = self.token.span;
+ let ext = self.parse_extern();
+
+ if let Async::Yes { span, .. } = asyncness {
+ self.ban_async_in_2015(span);
+ }
+
+ if !self.eat_keyword(kw::Fn) {
+ // It is possible for `expect_one_of` to recover given the contents of
+ // `self.expected_tokens`, therefore, do not use `self.unexpected()` which doesn't
+ // account for this.
+ match self.expect_one_of(&[], &[]) {
+ Ok(true) => {}
+ Ok(false) => unreachable!(),
+ Err(mut err) => {
+ // Qualifier keywords ordering check
+ enum WrongKw {
+ Duplicated(Span),
+ Misplaced(Span),
+ }
+
+ // This will allow the machine fix to directly place the keyword in the correct place or to indicate
+ // that the keyword is already present and the second instance should be removed.
+ let wrong_kw = if self.check_keyword(kw::Const) {
+ match constness {
+ Const::Yes(sp) => Some(WrongKw::Duplicated(sp)),
+ Const::No => Some(WrongKw::Misplaced(async_start_sp)),
+ }
+ } else if self.check_keyword(kw::Async) {
+ match asyncness {
+ Async::Yes { span, .. } => Some(WrongKw::Duplicated(span)),
+ Async::No => Some(WrongKw::Misplaced(unsafe_start_sp)),
+ }
+ } else if self.check_keyword(kw::Unsafe) {
+ match unsafety {
+ Unsafe::Yes(sp) => Some(WrongKw::Duplicated(sp)),
+ Unsafe::No => Some(WrongKw::Misplaced(ext_start_sp)),
+ }
+ } else {
+ None
+ };
+
+ // The keyword is already present, suggest removal of the second instance
+ if let Some(WrongKw::Duplicated(original_sp)) = wrong_kw {
+ let original_kw = self
+ .span_to_snippet(original_sp)
+ .expect("Span extracted directly from keyword should always work");
+
+ err.span_suggestion(
+ self.token.uninterpolated_span(),
+ &format!("`{original_kw}` already used earlier, remove this one"),
+ "",
+ Applicability::MachineApplicable,
+ )
+ .span_note(original_sp, &format!("`{original_kw}` first seen here"));
+ }
+ // The keyword has not been seen yet, suggest correct placement in the function front matter
+ else if let Some(WrongKw::Misplaced(correct_pos_sp)) = wrong_kw {
+ let correct_pos_sp = correct_pos_sp.to(self.prev_token.span);
+ if let Ok(current_qual) = self.span_to_snippet(correct_pos_sp) {
+ let misplaced_qual_sp = self.token.uninterpolated_span();
+ let misplaced_qual = self.span_to_snippet(misplaced_qual_sp).unwrap();
+
+ err.span_suggestion(
+ correct_pos_sp.to(misplaced_qual_sp),
+ &format!("`{misplaced_qual}` must come before `{current_qual}`"),
+ format!("{misplaced_qual} {current_qual}"),
+ Applicability::MachineApplicable,
+ ).note("keyword order for functions declaration is `pub`, `default`, `const`, `async`, `unsafe`, `extern`");
+ }
+ }
+ // Recover incorrect visibility order such as `async pub`
+ else if self.check_keyword(kw::Pub) {
+ let sp = sp_start.to(self.prev_token.span);
+ if let Ok(snippet) = self.span_to_snippet(sp) {
+ let current_vis = match self.parse_visibility(FollowedByType::No) {
+ Ok(v) => v,
+ Err(d) => {
+ d.cancel();
+ return Err(err);
+ }
+ };
+ let vs = pprust::vis_to_string(&current_vis);
+ let vs = vs.trim_end();
+
+ // There was no explicit visibility
+ if matches!(orig_vis.kind, VisibilityKind::Inherited) {
+ err.span_suggestion(
+ sp_start.to(self.prev_token.span),
+ &format!("visibility `{vs}` must come before `{snippet}`"),
+ format!("{vs} {snippet}"),
+ Applicability::MachineApplicable,
+ );
+ }
+ // There was an explicit visibility
+ else {
+ err.span_suggestion(
+ current_vis.span,
+ "there is already a visibility modifier, remove one",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .span_note(orig_vis.span, "explicit visibility first seen here");
+ }
+ }
+ }
+ return Err(err);
+ }
+ }
+ }
+
+ Ok(FnHeader { constness, unsafety, asyncness, ext })
+ }
+
+ /// We are parsing `async fn`. If we are on Rust 2015, emit an error.
+ fn ban_async_in_2015(&self, span: Span) {
+ if span.rust_2015() {
+ let diag = self.diagnostic();
+ struct_span_err!(diag, span, E0670, "`async fn` is not permitted in Rust 2015")
+ .span_label(span, "to use `async fn`, switch to Rust 2018 or later")
+ .help_use_latest_edition()
+ .emit();
+ }
+ }
+
+ /// Parses the parameter list and result type of a function declaration.
+ pub(super) fn parse_fn_decl(
+ &mut self,
+ req_name: ReqName,
+ ret_allow_plus: AllowPlus,
+ recover_return_sign: RecoverReturnSign,
+ ) -> PResult<'a, P<FnDecl>> {
+ Ok(P(FnDecl {
+ inputs: self.parse_fn_params(req_name)?,
+ output: self.parse_ret_ty(ret_allow_plus, RecoverQPath::Yes, recover_return_sign)?,
+ }))
+ }
+
+ /// Parses the parameter list of a function, including the `(` and `)` delimiters.
+ fn parse_fn_params(&mut self, req_name: ReqName) -> PResult<'a, Vec<Param>> {
+ let mut first_param = true;
+ // Parse the arguments, starting out with `self` being allowed...
+ let (mut params, _) = self.parse_paren_comma_seq(|p| {
+ let param = p.parse_param_general(req_name, first_param).or_else(|mut e| {
+ e.emit();
+ let lo = p.prev_token.span;
+ // Skip every token until next possible arg or end.
+ p.eat_to_tokens(&[&token::Comma, &token::CloseDelim(Delimiter::Parenthesis)]);
+ // Create a placeholder argument for proper arg count (issue #34264).
+ Ok(dummy_arg(Ident::new(kw::Empty, lo.to(p.prev_token.span))))
+ });
+ // ...now that we've parsed the first argument, `self` is no longer allowed.
+ first_param = false;
+ param
+ })?;
+ // Replace duplicated recovered params with `_` pattern to avoid unnecessary errors.
+ self.deduplicate_recovered_params_names(&mut params);
+ Ok(params)
+ }
+
+ /// Parses a single function parameter.
+ ///
+ /// - `self` is syntactically allowed when `first_param` holds.
+ fn parse_param_general(&mut self, req_name: ReqName, first_param: bool) -> PResult<'a, Param> {
+ let lo = self.token.span;
+ let attrs = self.parse_outer_attributes()?;
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ // Possibly parse `self`. Recover if we parsed it and it wasn't allowed here.
+ if let Some(mut param) = this.parse_self_param()? {
+ param.attrs = attrs.into();
+ let res = if first_param { Ok(param) } else { this.recover_bad_self_param(param) };
+ return Ok((res?, TrailingToken::None));
+ }
+
+ let is_name_required = match this.token.kind {
+ token::DotDotDot => false,
+ _ => req_name(this.token.span.edition()),
+ };
+ let (pat, ty) = if is_name_required || this.is_named_param() {
+ debug!("parse_param_general parse_pat (is_name_required:{})", is_name_required);
+
+ let (pat, colon) = this.parse_fn_param_pat_colon()?;
+ if !colon {
+ let mut err = this.unexpected::<()>().unwrap_err();
+ return if let Some(ident) =
+ this.parameter_without_type(&mut err, pat, is_name_required, first_param)
+ {
+ err.emit();
+ Ok((dummy_arg(ident), TrailingToken::None))
+ } else {
+ Err(err)
+ };
+ }
+
+ this.eat_incorrect_doc_comment_for_param_type();
+ (pat, this.parse_ty_for_param()?)
+ } else {
+ debug!("parse_param_general ident_to_pat");
+ let parser_snapshot_before_ty = this.clone();
+ this.eat_incorrect_doc_comment_for_param_type();
+ let mut ty = this.parse_ty_for_param();
+ if ty.is_ok()
+ && this.token != token::Comma
+ && this.token != token::CloseDelim(Delimiter::Parenthesis)
+ {
+ // This wasn't actually a type, but a pattern looking like a type,
+ // so we are going to rollback and re-parse for recovery.
+ ty = this.unexpected();
+ }
+ match ty {
+ Ok(ty) => {
+ let ident = Ident::new(kw::Empty, this.prev_token.span);
+ let bm = BindingMode::ByValue(Mutability::Not);
+ let pat = this.mk_pat_ident(ty.span, bm, ident);
+ (pat, ty)
+ }
+ // If this is a C-variadic argument and we hit an error, return the error.
+ Err(err) if this.token == token::DotDotDot => return Err(err),
+ // Recover from attempting to parse the argument as a type without pattern.
+ Err(err) => {
+ err.cancel();
+ *this = parser_snapshot_before_ty;
+ this.recover_arg_parse()?
+ }
+ }
+ };
+
+ let span = lo.until(this.token.span);
+
+ Ok((
+ Param {
+ attrs: attrs.into(),
+ id: ast::DUMMY_NODE_ID,
+ is_placeholder: false,
+ pat,
+ span,
+ ty,
+ },
+ TrailingToken::None,
+ ))
+ })
+ }
+
+ /// Returns the parsed optional self parameter and whether a self shortcut was used.
+ fn parse_self_param(&mut self) -> PResult<'a, Option<Param>> {
+ // Extract an identifier *after* having confirmed that the token is one.
+ let expect_self_ident = |this: &mut Self| match this.token.ident() {
+ Some((ident, false)) => {
+ this.bump();
+ ident
+ }
+ _ => unreachable!(),
+ };
+ // Is `self` `n` tokens ahead?
+ let is_isolated_self = |this: &Self, n| {
+ this.is_keyword_ahead(n, &[kw::SelfLower])
+ && this.look_ahead(n + 1, |t| t != &token::ModSep)
+ };
+ // Is `mut self` `n` tokens ahead?
+ let is_isolated_mut_self =
+ |this: &Self, n| this.is_keyword_ahead(n, &[kw::Mut]) && is_isolated_self(this, n + 1);
+ // Parse `self` or `self: TYPE`. We already know the current token is `self`.
+ let parse_self_possibly_typed = |this: &mut Self, m| {
+ let eself_ident = expect_self_ident(this);
+ let eself_hi = this.prev_token.span;
+ let eself = if this.eat(&token::Colon) {
+ SelfKind::Explicit(this.parse_ty()?, m)
+ } else {
+ SelfKind::Value(m)
+ };
+ Ok((eself, eself_ident, eself_hi))
+ };
+ // Recover for the grammar `*self`, `*const self`, and `*mut self`.
+ let recover_self_ptr = |this: &mut Self| {
+ let msg = "cannot pass `self` by raw pointer";
+ let span = this.token.span;
+ this.struct_span_err(span, msg).span_label(span, msg).emit();
+
+ Ok((SelfKind::Value(Mutability::Not), expect_self_ident(this), this.prev_token.span))
+ };
+
+ // Parse optional `self` parameter of a method.
+ // Only a limited set of initial token sequences is considered `self` parameters; anything
+ // else is parsed as a normal function parameter list, so some lookahead is required.
+ let eself_lo = self.token.span;
+ let (eself, eself_ident, eself_hi) = match self.token.uninterpolate().kind {
+ token::BinOp(token::And) => {
+ let eself = if is_isolated_self(self, 1) {
+ // `&self`
+ self.bump();
+ SelfKind::Region(None, Mutability::Not)
+ } else if is_isolated_mut_self(self, 1) {
+ // `&mut self`
+ self.bump();
+ self.bump();
+ SelfKind::Region(None, Mutability::Mut)
+ } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_self(self, 2) {
+ // `&'lt self`
+ self.bump();
+ let lt = self.expect_lifetime();
+ SelfKind::Region(Some(lt), Mutability::Not)
+ } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_mut_self(self, 2) {
+ // `&'lt mut self`
+ self.bump();
+ let lt = self.expect_lifetime();
+ self.bump();
+ SelfKind::Region(Some(lt), Mutability::Mut)
+ } else {
+ // `&not_self`
+ return Ok(None);
+ };
+ (eself, expect_self_ident(self), self.prev_token.span)
+ }
+ // `*self`
+ token::BinOp(token::Star) if is_isolated_self(self, 1) => {
+ self.bump();
+ recover_self_ptr(self)?
+ }
+ // `*mut self` and `*const self`
+ token::BinOp(token::Star)
+ if self.look_ahead(1, |t| t.is_mutability()) && is_isolated_self(self, 2) =>
+ {
+ self.bump();
+ self.bump();
+ recover_self_ptr(self)?
+ }
+ // `self` and `self: TYPE`
+ token::Ident(..) if is_isolated_self(self, 0) => {
+ parse_self_possibly_typed(self, Mutability::Not)?
+ }
+ // `mut self` and `mut self: TYPE`
+ token::Ident(..) if is_isolated_mut_self(self, 0) => {
+ self.bump();
+ parse_self_possibly_typed(self, Mutability::Mut)?
+ }
+ _ => return Ok(None),
+ };
+
+ let eself = source_map::respan(eself_lo.to(eself_hi), eself);
+ Ok(Some(Param::from_self(AttrVec::default(), eself, eself_ident)))
+ }
+
+ fn is_named_param(&self) -> bool {
+ let offset = match self.token.kind {
+ token::Interpolated(ref nt) => match **nt {
+ token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon),
+ _ => 0,
+ },
+ token::BinOp(token::And) | token::AndAnd => 1,
+ _ if self.token.is_keyword(kw::Mut) => 1,
+ _ => 0,
+ };
+
+ self.look_ahead(offset, |t| t.is_ident())
+ && self.look_ahead(offset + 1, |t| t == &token::Colon)
+ }
+
+ fn recover_first_param(&mut self) -> &'static str {
+ match self
+ .parse_outer_attributes()
+ .and_then(|_| self.parse_self_param())
+ .map_err(|e| e.cancel())
+ {
+ Ok(Some(_)) => "method",
+ _ => "function",
+ }
+ }
+}
+
+enum IsMacroRulesItem {
+ Yes { has_bang: bool },
+ No,
+}
diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs
new file mode 100644
index 000000000..0c523ad22
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/mod.rs
@@ -0,0 +1,1481 @@
+pub mod attr;
+mod attr_wrapper;
+mod diagnostics;
+mod expr;
+mod generics;
+mod item;
+mod nonterminal;
+mod pat;
+mod path;
+mod stmt;
+mod ty;
+
+use crate::lexer::UnmatchedBrace;
+pub use attr_wrapper::AttrWrapper;
+pub use diagnostics::AttemptLocalParseRecovery;
+use diagnostics::Error;
+pub(crate) use item::FnParseMode;
+pub use pat::{CommaRecoveryMode, RecoverColon, RecoverComma};
+pub use path::PathStyle;
+
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, Nonterminal, Token, TokenKind};
+use rustc_ast::tokenstream::AttributesData;
+use rustc_ast::tokenstream::{self, DelimSpan, Spacing};
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
+use rustc_ast::AttrId;
+use rustc_ast::DUMMY_NODE_ID;
+use rustc_ast::{self as ast, AnonConst, AttrStyle, AttrVec, Const, Extern};
+use rustc_ast::{Async, Expr, ExprKind, MacArgs, MacArgsEq, MacDelimiter, Mutability, StrLit};
+use rustc_ast::{HasAttrs, HasTokens, Unsafe, Visibility, VisibilityKind};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::PResult;
+use rustc_errors::{
+ struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, FatalError, MultiSpan,
+};
+use rustc_session::parse::ParseSess;
+use rustc_span::source_map::{Span, DUMMY_SP};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use tracing::debug;
+
+use std::ops::Range;
+use std::{cmp, mem, slice};
+
+bitflags::bitflags! {
+ struct Restrictions: u8 {
+ const STMT_EXPR = 1 << 0;
+ const NO_STRUCT_LITERAL = 1 << 1;
+ const CONST_EXPR = 1 << 2;
+ const ALLOW_LET = 1 << 3;
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+enum SemiColonMode {
+ Break,
+ Ignore,
+ Comma,
+}
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+enum BlockMode {
+ Break,
+ Ignore,
+}
+
+/// Whether or not we should force collection of tokens for an AST node,
+/// regardless of whether or not it has attributes
+#[derive(Clone, Copy, PartialEq)]
+pub enum ForceCollect {
+ Yes,
+ No,
+}
+
+#[derive(Debug, Eq, PartialEq)]
+pub enum TrailingToken {
+ None,
+ Semi,
+ /// If the trailing token is a comma, then capture it
+ /// Otherwise, ignore the trailing token
+ MaybeComma,
+}
+
+/// Like `maybe_whole_expr`, but for things other than expressions.
+#[macro_export]
+macro_rules! maybe_whole {
+ ($p:expr, $constructor:ident, |$x:ident| $e:expr) => {
+ if let token::Interpolated(nt) = &$p.token.kind {
+ if let token::$constructor(x) = &**nt {
+ let $x = x.clone();
+ $p.bump();
+ return Ok($e);
+ }
+ }
+ };
+}
+
+/// If the next tokens are ill-formed `$ty::` recover them as `<$ty>::`.
+#[macro_export]
+macro_rules! maybe_recover_from_interpolated_ty_qpath {
+ ($self: expr, $allow_qpath_recovery: expr) => {
+ if $allow_qpath_recovery
+ && $self.look_ahead(1, |t| t == &token::ModSep)
+ && let token::Interpolated(nt) = &$self.token.kind
+ && let token::NtTy(ty) = &**nt
+ {
+ let ty = ty.clone();
+ $self.bump();
+ return $self.maybe_recover_from_bad_qpath_stage_2($self.prev_token.span, ty);
+ }
+ };
+}
+
+#[derive(Clone)]
+pub struct Parser<'a> {
+ pub sess: &'a ParseSess,
+ /// The current token.
+ pub token: Token,
+ /// The spacing for the current token
+ pub token_spacing: Spacing,
+ /// The previous token.
+ pub prev_token: Token,
+ pub capture_cfg: bool,
+ restrictions: Restrictions,
+ expected_tokens: Vec<TokenType>,
+ // Important: This must only be advanced from `bump` to ensure that
+ // `token_cursor.num_next_calls` is updated properly.
+ token_cursor: TokenCursor,
+ desugar_doc_comments: bool,
+ /// This field is used to keep track of how many left angle brackets we have seen. This is
+ /// required in order to detect extra leading left angle brackets (`<` characters) and error
+ /// appropriately.
+ ///
+ /// See the comments in the `parse_path_segment` function for more details.
+ unmatched_angle_bracket_count: u32,
+ max_angle_bracket_count: u32,
+ /// A list of all unclosed delimiters found by the lexer. If an entry is used for error recovery
+ /// it gets removed from here. Every entry left at the end gets emitted as an independent
+ /// error.
+ pub(super) unclosed_delims: Vec<UnmatchedBrace>,
+ last_unexpected_token_span: Option<Span>,
+ /// Span pointing at the `:` for the last type ascription the parser has seen, and whether it
+ /// looked like it could have been a mistyped path or literal `Option:Some(42)`).
+ pub last_type_ascription: Option<(Span, bool /* likely path typo */)>,
+ /// If present, this `Parser` is not parsing Rust code but rather a macro call.
+ subparser_name: Option<&'static str>,
+ capture_state: CaptureState,
+ /// This allows us to recover when the user forget to add braces around
+ /// multiple statements in the closure body.
+ pub current_closure: Option<ClosureSpans>,
+}
+
+// This type is used a lot, e.g. it's cloned when matching many declarative macro rules. Make sure
+// it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(Parser<'_>, 328);
+
+/// Stores span information about a closure.
+#[derive(Clone)]
+pub struct ClosureSpans {
+ pub whole_closure: Span,
+ pub closing_pipe: Span,
+ pub body: Span,
+}
+
+/// Indicates a range of tokens that should be replaced by
+/// the tokens in the provided vector. This is used in two
+/// places during token collection:
+///
+/// 1. During the parsing of an AST node that may have a `#[derive]`
+/// attribute, we parse a nested AST node that has `#[cfg]` or `#[cfg_attr]`
+/// In this case, we use a `ReplaceRange` to replace the entire inner AST node
+/// with `FlatToken::AttrTarget`, allowing us to perform eager cfg-expansion
+/// on an `AttrAnnotatedTokenStream`
+///
+/// 2. When we parse an inner attribute while collecting tokens. We
+/// remove inner attributes from the token stream entirely, and
+/// instead track them through the `attrs` field on the AST node.
+/// This allows us to easily manipulate them (for example, removing
+/// the first macro inner attribute to invoke a proc-macro).
+/// When create a `TokenStream`, the inner attributes get inserted
+/// into the proper place in the token stream.
+pub type ReplaceRange = (Range<u32>, Vec<(FlatToken, Spacing)>);
+
+/// Controls how we capture tokens. Capturing can be expensive,
+/// so we try to avoid performing capturing in cases where
+/// we will never need an `AttrAnnotatedTokenStream`
+#[derive(Copy, Clone)]
+pub enum Capturing {
+ /// We aren't performing any capturing - this is the default mode.
+ No,
+ /// We are capturing tokens
+ Yes,
+}
+
+#[derive(Clone)]
+struct CaptureState {
+ capturing: Capturing,
+ replace_ranges: Vec<ReplaceRange>,
+ inner_attr_ranges: FxHashMap<AttrId, ReplaceRange>,
+}
+
+impl<'a> Drop for Parser<'a> {
+ fn drop(&mut self) {
+ emit_unclosed_delims(&mut self.unclosed_delims, &self.sess);
+ }
+}
+
+#[derive(Clone)]
+struct TokenCursor {
+ // The current (innermost) frame. `frame` and `stack` could be combined,
+ // but it's faster to have them separately to access `frame` directly
+ // rather than via something like `stack.last().unwrap()` or
+ // `stack[stack.len() - 1]`.
+ frame: TokenCursorFrame,
+ // Additional frames that enclose `frame`.
+ stack: Vec<TokenCursorFrame>,
+ desugar_doc_comments: bool,
+ // Counts the number of calls to `{,inlined_}next`.
+ num_next_calls: usize,
+ // During parsing, we may sometimes need to 'unglue' a
+ // glued token into two component tokens
+ // (e.g. '>>' into '>' and '>), so that the parser
+ // can consume them one at a time. This process
+ // bypasses the normal capturing mechanism
+ // (e.g. `num_next_calls` will not be incremented),
+ // since the 'unglued' tokens due not exist in
+ // the original `TokenStream`.
+ //
+ // If we end up consuming both unglued tokens,
+ // then this is not an issue - we'll end up
+ // capturing the single 'glued' token.
+ //
+ // However, in certain circumstances, we may
+ // want to capture just the first 'unglued' token.
+ // For example, capturing the `Vec<u8>`
+ // in `Option<Vec<u8>>` requires us to unglue
+ // the trailing `>>` token. The `break_last_token`
+ // field is used to track this token - it gets
+ // appended to the captured stream when
+ // we evaluate a `LazyTokenStream`
+ break_last_token: bool,
+}
+
+#[derive(Clone)]
+struct TokenCursorFrame {
+ delim_sp: Option<(Delimiter, DelimSpan)>,
+ tree_cursor: tokenstream::Cursor,
+}
+
+impl TokenCursorFrame {
+ fn new(delim_sp: Option<(Delimiter, DelimSpan)>, tts: TokenStream) -> Self {
+ TokenCursorFrame { delim_sp, tree_cursor: tts.into_trees() }
+ }
+}
+
+impl TokenCursor {
+ fn next(&mut self, desugar_doc_comments: bool) -> (Token, Spacing) {
+ self.inlined_next(desugar_doc_comments)
+ }
+
+ /// This always-inlined version should only be used on hot code paths.
+ #[inline(always)]
+ fn inlined_next(&mut self, desugar_doc_comments: bool) -> (Token, Spacing) {
+ loop {
+ // FIXME: we currently don't return `Delimiter` open/close delims. To fix #67062 we will
+ // need to, whereupon the `delim != Delimiter::Invisible` conditions below can be
+ // removed.
+ if let Some(tree) = self.frame.tree_cursor.next_ref() {
+ match tree {
+ &TokenTree::Token(ref token, spacing) => match (desugar_doc_comments, token) {
+ (true, &Token { kind: token::DocComment(_, attr_style, data), span }) => {
+ return self.desugar(attr_style, data, span);
+ }
+ _ => return (token.clone(), spacing),
+ },
+ &TokenTree::Delimited(sp, delim, ref tts) => {
+ // Set `open_delim` to true here because we deal with it immediately.
+ let frame = TokenCursorFrame::new(Some((delim, sp)), tts.clone());
+ self.stack.push(mem::replace(&mut self.frame, frame));
+ if delim != Delimiter::Invisible {
+ return (Token::new(token::OpenDelim(delim), sp.open), Spacing::Alone);
+ }
+ // No open delimeter to return; continue on to the next iteration.
+ }
+ };
+ } else if let Some(frame) = self.stack.pop() {
+ if let Some((delim, span)) = self.frame.delim_sp && delim != Delimiter::Invisible {
+ self.frame = frame;
+ return (Token::new(token::CloseDelim(delim), span.close), Spacing::Alone);
+ }
+ self.frame = frame;
+ // No close delimiter to return; continue on to the next iteration.
+ } else {
+ return (Token::new(token::Eof, DUMMY_SP), Spacing::Alone);
+ }
+ }
+ }
+
+ fn desugar(&mut self, attr_style: AttrStyle, data: Symbol, span: Span) -> (Token, Spacing) {
+ // Searches for the occurrences of `"#*` and returns the minimum number of `#`s
+ // required to wrap the text.
+ let mut num_of_hashes = 0;
+ let mut count = 0;
+ for ch in data.as_str().chars() {
+ count = match ch {
+ '"' => 1,
+ '#' if count > 0 => count + 1,
+ _ => 0,
+ };
+ num_of_hashes = cmp::max(num_of_hashes, count);
+ }
+
+ let delim_span = DelimSpan::from_single(span);
+ let body = TokenTree::Delimited(
+ delim_span,
+ Delimiter::Bracket,
+ [
+ TokenTree::token_alone(token::Ident(sym::doc, false), span),
+ TokenTree::token_alone(token::Eq, span),
+ TokenTree::token_alone(
+ TokenKind::lit(token::StrRaw(num_of_hashes), data, None),
+ span,
+ ),
+ ]
+ .into_iter()
+ .collect::<TokenStream>(),
+ );
+
+ self.stack.push(mem::replace(
+ &mut self.frame,
+ TokenCursorFrame::new(
+ None,
+ if attr_style == AttrStyle::Inner {
+ [
+ TokenTree::token_alone(token::Pound, span),
+ TokenTree::token_alone(token::Not, span),
+ body,
+ ]
+ .into_iter()
+ .collect::<TokenStream>()
+ } else {
+ [TokenTree::token_alone(token::Pound, span), body]
+ .into_iter()
+ .collect::<TokenStream>()
+ },
+ ),
+ ));
+
+ self.next(/* desugar_doc_comments */ false)
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+enum TokenType {
+ Token(TokenKind),
+ Keyword(Symbol),
+ Operator,
+ Lifetime,
+ Ident,
+ Path,
+ Type,
+ Const,
+}
+
+impl TokenType {
+ fn to_string(&self) -> String {
+ match *self {
+ TokenType::Token(ref t) => format!("`{}`", pprust::token_kind_to_string(t)),
+ TokenType::Keyword(kw) => format!("`{}`", kw),
+ TokenType::Operator => "an operator".to_string(),
+ TokenType::Lifetime => "lifetime".to_string(),
+ TokenType::Ident => "identifier".to_string(),
+ TokenType::Path => "path".to_string(),
+ TokenType::Type => "type".to_string(),
+ TokenType::Const => "a const expression".to_string(),
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+enum TokenExpectType {
+ Expect,
+ NoExpect,
+}
+
+/// A sequence separator.
+struct SeqSep {
+ /// The separator token.
+ sep: Option<TokenKind>,
+ /// `true` if a trailing separator is allowed.
+ trailing_sep_allowed: bool,
+}
+
+impl SeqSep {
+ fn trailing_allowed(t: TokenKind) -> SeqSep {
+ SeqSep { sep: Some(t), trailing_sep_allowed: true }
+ }
+
+ fn none() -> SeqSep {
+ SeqSep { sep: None, trailing_sep_allowed: false }
+ }
+}
+
+pub enum FollowedByType {
+ Yes,
+ No,
+}
+
+fn token_descr_opt(token: &Token) -> Option<&'static str> {
+ Some(match token.kind {
+ _ if token.is_special_ident() => "reserved identifier",
+ _ if token.is_used_keyword() => "keyword",
+ _ if token.is_unused_keyword() => "reserved keyword",
+ token::DocComment(..) => "doc comment",
+ _ => return None,
+ })
+}
+
+pub(super) fn token_descr(token: &Token) -> String {
+ let token_str = pprust::token_to_string(token);
+ match token_descr_opt(token) {
+ Some(prefix) => format!("{} `{}`", prefix, token_str),
+ _ => format!("`{}`", token_str),
+ }
+}
+
+impl<'a> Parser<'a> {
+ pub fn new(
+ sess: &'a ParseSess,
+ tokens: TokenStream,
+ desugar_doc_comments: bool,
+ subparser_name: Option<&'static str>,
+ ) -> Self {
+ let mut parser = Parser {
+ sess,
+ token: Token::dummy(),
+ token_spacing: Spacing::Alone,
+ prev_token: Token::dummy(),
+ capture_cfg: false,
+ restrictions: Restrictions::empty(),
+ expected_tokens: Vec::new(),
+ token_cursor: TokenCursor {
+ frame: TokenCursorFrame::new(None, tokens),
+ stack: Vec::new(),
+ num_next_calls: 0,
+ desugar_doc_comments,
+ break_last_token: false,
+ },
+ desugar_doc_comments,
+ unmatched_angle_bracket_count: 0,
+ max_angle_bracket_count: 0,
+ unclosed_delims: Vec::new(),
+ last_unexpected_token_span: None,
+ last_type_ascription: None,
+ subparser_name,
+ capture_state: CaptureState {
+ capturing: Capturing::No,
+ replace_ranges: Vec::new(),
+ inner_attr_ranges: Default::default(),
+ },
+ current_closure: None,
+ };
+
+ // Make parser point to the first token.
+ parser.bump();
+
+ parser
+ }
+
+ pub fn unexpected<T>(&mut self) -> PResult<'a, T> {
+ match self.expect_one_of(&[], &[]) {
+ Err(e) => Err(e),
+ // We can get `Ok(true)` from `recover_closing_delimiter`
+ // which is called in `expected_one_of_not_found`.
+ Ok(_) => FatalError.raise(),
+ }
+ }
+
+ /// Expects and consumes the token `t`. Signals an error if the next token is not `t`.
+ pub fn expect(&mut self, t: &TokenKind) -> PResult<'a, bool /* recovered */> {
+ if self.expected_tokens.is_empty() {
+ if self.token == *t {
+ self.bump();
+ Ok(false)
+ } else {
+ self.unexpected_try_recover(t)
+ }
+ } else {
+ self.expect_one_of(slice::from_ref(t), &[])
+ }
+ }
+
+ /// Expect next token to be edible or inedible token. If edible,
+ /// then consume it; if inedible, then return without consuming
+ /// anything. Signal a fatal error if next token is unexpected.
+ pub fn expect_one_of(
+ &mut self,
+ edible: &[TokenKind],
+ inedible: &[TokenKind],
+ ) -> PResult<'a, bool /* recovered */> {
+ if edible.contains(&self.token.kind) {
+ self.bump();
+ Ok(false)
+ } else if inedible.contains(&self.token.kind) {
+ // leave it in the input
+ Ok(false)
+ } else if self.last_unexpected_token_span == Some(self.token.span) {
+ FatalError.raise();
+ } else {
+ self.expected_one_of_not_found(edible, inedible)
+ }
+ }
+
+ // Public for rustfmt usage.
+ pub fn parse_ident(&mut self) -> PResult<'a, Ident> {
+ self.parse_ident_common(true)
+ }
+
+ fn ident_or_err(&mut self) -> PResult<'a, (Ident, /* is_raw */ bool)> {
+ self.token.ident().ok_or_else(|| match self.prev_token.kind {
+ TokenKind::DocComment(..) => {
+ self.span_err(self.prev_token.span, Error::UselessDocComment)
+ }
+ _ => self.expected_ident_found(),
+ })
+ }
+
+ fn parse_ident_common(&mut self, recover: bool) -> PResult<'a, Ident> {
+ let (ident, is_raw) = self.ident_or_err()?;
+ if !is_raw && ident.is_reserved() {
+ let mut err = self.expected_ident_found();
+ if recover {
+ err.emit();
+ } else {
+ return Err(err);
+ }
+ }
+ self.bump();
+ Ok(ident)
+ }
+
+ /// Checks if the next token is `tok`, and returns `true` if so.
+ ///
+ /// This method will automatically add `tok` to `expected_tokens` if `tok` is not
+ /// encountered.
+ fn check(&mut self, tok: &TokenKind) -> bool {
+ let is_present = self.token == *tok;
+ if !is_present {
+ self.expected_tokens.push(TokenType::Token(tok.clone()));
+ }
+ is_present
+ }
+
+ fn check_noexpect(&self, tok: &TokenKind) -> bool {
+ self.token == *tok
+ }
+
+ /// Consumes a token 'tok' if it exists. Returns whether the given token was present.
+ ///
+ /// the main purpose of this function is to reduce the cluttering of the suggestions list
+ /// which using the normal eat method could introduce in some cases.
+ pub fn eat_noexpect(&mut self, tok: &TokenKind) -> bool {
+ let is_present = self.check_noexpect(tok);
+ if is_present {
+ self.bump()
+ }
+ is_present
+ }
+
+ /// Consumes a token 'tok' if it exists. Returns whether the given token was present.
+ pub fn eat(&mut self, tok: &TokenKind) -> bool {
+ let is_present = self.check(tok);
+ if is_present {
+ self.bump()
+ }
+ is_present
+ }
+
+ /// If the next token is the given keyword, returns `true` without eating it.
+ /// An expectation is also added for diagnostics purposes.
+ fn check_keyword(&mut self, kw: Symbol) -> bool {
+ self.expected_tokens.push(TokenType::Keyword(kw));
+ self.token.is_keyword(kw)
+ }
+
+ /// If the next token is the given keyword, eats it and returns `true`.
+ /// Otherwise, returns `false`. An expectation is also added for diagnostics purposes.
+ // Public for rustfmt usage.
+ pub fn eat_keyword(&mut self, kw: Symbol) -> bool {
+ if self.check_keyword(kw) {
+ self.bump();
+ true
+ } else {
+ false
+ }
+ }
+
+ fn eat_keyword_noexpect(&mut self, kw: Symbol) -> bool {
+ if self.token.is_keyword(kw) {
+ self.bump();
+ true
+ } else {
+ false
+ }
+ }
+
+ /// If the given word is not a keyword, signals an error.
+ /// If the next token is not the given word, signals an error.
+ /// Otherwise, eats it.
+ fn expect_keyword(&mut self, kw: Symbol) -> PResult<'a, ()> {
+ if !self.eat_keyword(kw) { self.unexpected() } else { Ok(()) }
+ }
+
+ /// Is the given keyword `kw` followed by a non-reserved identifier?
+ fn is_kw_followed_by_ident(&self, kw: Symbol) -> bool {
+ self.token.is_keyword(kw) && self.look_ahead(1, |t| t.is_ident() && !t.is_reserved_ident())
+ }
+
+ fn check_or_expected(&mut self, ok: bool, typ: TokenType) -> bool {
+ if ok {
+ true
+ } else {
+ self.expected_tokens.push(typ);
+ false
+ }
+ }
+
+ fn check_ident(&mut self) -> bool {
+ self.check_or_expected(self.token.is_ident(), TokenType::Ident)
+ }
+
+ fn check_path(&mut self) -> bool {
+ self.check_or_expected(self.token.is_path_start(), TokenType::Path)
+ }
+
+ fn check_type(&mut self) -> bool {
+ self.check_or_expected(self.token.can_begin_type(), TokenType::Type)
+ }
+
+ fn check_const_arg(&mut self) -> bool {
+ self.check_or_expected(self.token.can_begin_const_arg(), TokenType::Const)
+ }
+
+ fn check_inline_const(&self, dist: usize) -> bool {
+ self.is_keyword_ahead(dist, &[kw::Const])
+ && self.look_ahead(dist + 1, |t| match t.kind {
+ token::Interpolated(ref nt) => matches!(**nt, token::NtBlock(..)),
+ token::OpenDelim(Delimiter::Brace) => true,
+ _ => false,
+ })
+ }
+
+ /// Checks to see if the next token is either `+` or `+=`.
+ /// Otherwise returns `false`.
+ fn check_plus(&mut self) -> bool {
+ self.check_or_expected(
+ self.token.is_like_plus(),
+ TokenType::Token(token::BinOp(token::Plus)),
+ )
+ }
+
+ /// Eats the expected token if it's present possibly breaking
+ /// compound tokens like multi-character operators in process.
+ /// Returns `true` if the token was eaten.
+ fn break_and_eat(&mut self, expected: TokenKind) -> bool {
+ if self.token.kind == expected {
+ self.bump();
+ return true;
+ }
+ match self.token.kind.break_two_token_op() {
+ Some((first, second)) if first == expected => {
+ let first_span = self.sess.source_map().start_point(self.token.span);
+ let second_span = self.token.span.with_lo(first_span.hi());
+ self.token = Token::new(first, first_span);
+ // Keep track of this token - if we end token capturing now,
+ // we'll want to append this token to the captured stream.
+ //
+ // If we consume any additional tokens, then this token
+ // is not needed (we'll capture the entire 'glued' token),
+ // and `bump` will set this field to `None`
+ self.token_cursor.break_last_token = true;
+ // Use the spacing of the glued token as the spacing
+ // of the unglued second token.
+ self.bump_with((Token::new(second, second_span), self.token_spacing));
+ true
+ }
+ _ => {
+ self.expected_tokens.push(TokenType::Token(expected));
+ false
+ }
+ }
+ }
+
+ /// Eats `+` possibly breaking tokens like `+=` in process.
+ fn eat_plus(&mut self) -> bool {
+ self.break_and_eat(token::BinOp(token::Plus))
+ }
+
+ /// Eats `&` possibly breaking tokens like `&&` in process.
+ /// Signals an error if `&` is not eaten.
+ fn expect_and(&mut self) -> PResult<'a, ()> {
+ if self.break_and_eat(token::BinOp(token::And)) { Ok(()) } else { self.unexpected() }
+ }
+
+ /// Eats `|` possibly breaking tokens like `||` in process.
+ /// Signals an error if `|` was not eaten.
+ fn expect_or(&mut self) -> PResult<'a, ()> {
+ if self.break_and_eat(token::BinOp(token::Or)) { Ok(()) } else { self.unexpected() }
+ }
+
+ /// Eats `<` possibly breaking tokens like `<<` in process.
+ fn eat_lt(&mut self) -> bool {
+ let ate = self.break_and_eat(token::Lt);
+ if ate {
+ // See doc comment for `unmatched_angle_bracket_count`.
+ self.unmatched_angle_bracket_count += 1;
+ self.max_angle_bracket_count += 1;
+ debug!("eat_lt: (increment) count={:?}", self.unmatched_angle_bracket_count);
+ }
+ ate
+ }
+
+ /// Eats `<` possibly breaking tokens like `<<` in process.
+ /// Signals an error if `<` was not eaten.
+ fn expect_lt(&mut self) -> PResult<'a, ()> {
+ if self.eat_lt() { Ok(()) } else { self.unexpected() }
+ }
+
+ /// Eats `>` possibly breaking tokens like `>>` in process.
+ /// Signals an error if `>` was not eaten.
+ fn expect_gt(&mut self) -> PResult<'a, ()> {
+ if self.break_and_eat(token::Gt) {
+ // See doc comment for `unmatched_angle_bracket_count`.
+ if self.unmatched_angle_bracket_count > 0 {
+ self.unmatched_angle_bracket_count -= 1;
+ debug!("expect_gt: (decrement) count={:?}", self.unmatched_angle_bracket_count);
+ }
+ Ok(())
+ } else {
+ self.unexpected()
+ }
+ }
+
+ fn expect_any_with_type(&mut self, kets: &[&TokenKind], expect: TokenExpectType) -> bool {
+ kets.iter().any(|k| match expect {
+ TokenExpectType::Expect => self.check(k),
+ TokenExpectType::NoExpect => self.token == **k,
+ })
+ }
+
+ fn parse_seq_to_before_tokens<T>(
+ &mut self,
+ kets: &[&TokenKind],
+ sep: SeqSep,
+ expect: TokenExpectType,
+ mut f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+ ) -> PResult<'a, (Vec<T>, bool /* trailing */, bool /* recovered */)> {
+ let mut first = true;
+ let mut recovered = false;
+ let mut trailing = false;
+ let mut v = vec![];
+ let unclosed_delims = !self.unclosed_delims.is_empty();
+
+ while !self.expect_any_with_type(kets, expect) {
+ if let token::CloseDelim(..) | token::Eof = self.token.kind {
+ break;
+ }
+ if let Some(ref t) = sep.sep {
+ if first {
+ first = false;
+ } else {
+ match self.expect(t) {
+ Ok(false) => {
+ self.current_closure.take();
+ }
+ Ok(true) => {
+ self.current_closure.take();
+ recovered = true;
+ break;
+ }
+ Err(mut expect_err) => {
+ let sp = self.prev_token.span.shrink_to_hi();
+ let token_str = pprust::token_kind_to_string(t);
+
+ match self.current_closure.take() {
+ Some(closure_spans) if self.token.kind == TokenKind::Semi => {
+ // Finding a semicolon instead of a comma
+ // after a closure body indicates that the
+ // closure body may be a block but the user
+ // forgot to put braces around its
+ // statements.
+
+ self.recover_missing_braces_around_closure_body(
+ closure_spans,
+ expect_err,
+ )?;
+
+ continue;
+ }
+
+ _ => {
+ // Attempt to keep parsing if it was a similar separator.
+ if let Some(ref tokens) = t.similar_tokens() {
+ if tokens.contains(&self.token.kind) && !unclosed_delims {
+ self.bump();
+ }
+ }
+ }
+ }
+
+ // If this was a missing `@` in a binding pattern
+ // bail with a suggestion
+ // https://github.com/rust-lang/rust/issues/72373
+ if self.prev_token.is_ident() && self.token.kind == token::DotDot {
+ let msg = format!(
+ "if you meant to bind the contents of \
+ the rest of the array pattern into `{}`, use `@`",
+ pprust::token_to_string(&self.prev_token)
+ );
+ expect_err
+ .span_suggestion_verbose(
+ self.prev_token.span.shrink_to_hi().until(self.token.span),
+ &msg,
+ " @ ",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ break;
+ }
+
+ // Attempt to keep parsing if it was an omitted separator.
+ match f(self) {
+ Ok(t) => {
+ // Parsed successfully, therefore most probably the code only
+ // misses a separator.
+ expect_err
+ .span_suggestion_short(
+ sp,
+ &format!("missing `{}`", token_str),
+ token_str,
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+
+ v.push(t);
+ continue;
+ }
+ Err(e) => {
+ // Parsing failed, therefore it must be something more serious
+ // than just a missing separator.
+ expect_err.emit();
+
+ e.cancel();
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ if sep.trailing_sep_allowed && self.expect_any_with_type(kets, expect) {
+ trailing = true;
+ break;
+ }
+
+ let t = f(self)?;
+ v.push(t);
+ }
+
+ Ok((v, trailing, recovered))
+ }
+
+ fn recover_missing_braces_around_closure_body(
+ &mut self,
+ closure_spans: ClosureSpans,
+ mut expect_err: DiagnosticBuilder<'_, ErrorGuaranteed>,
+ ) -> PResult<'a, ()> {
+ let initial_semicolon = self.token.span;
+
+ while self.eat(&TokenKind::Semi) {
+ let _ = self.parse_stmt(ForceCollect::Yes)?;
+ }
+
+ expect_err.set_primary_message(
+ "closure bodies that contain statements must be surrounded by braces",
+ );
+
+ let preceding_pipe_span = closure_spans.closing_pipe;
+ let following_token_span = self.token.span;
+
+ let mut first_note = MultiSpan::from(vec![initial_semicolon]);
+ first_note.push_span_label(
+ initial_semicolon,
+ "this `;` turns the preceding closure into a statement",
+ );
+ first_note.push_span_label(
+ closure_spans.body,
+ "this expression is a statement because of the trailing semicolon",
+ );
+ expect_err.span_note(first_note, "statement found outside of a block");
+
+ let mut second_note = MultiSpan::from(vec![closure_spans.whole_closure]);
+ second_note.push_span_label(closure_spans.whole_closure, "this is the parsed closure...");
+ second_note.push_span_label(
+ following_token_span,
+ "...but likely you meant the closure to end here",
+ );
+ expect_err.span_note(second_note, "the closure body may be incorrectly delimited");
+
+ expect_err.set_span(vec![preceding_pipe_span, following_token_span]);
+
+ let opening_suggestion_str = " {".to_string();
+ let closing_suggestion_str = "}".to_string();
+
+ expect_err.multipart_suggestion(
+ "try adding braces",
+ vec![
+ (preceding_pipe_span.shrink_to_hi(), opening_suggestion_str),
+ (following_token_span.shrink_to_lo(), closing_suggestion_str),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+
+ expect_err.emit();
+
+ Ok(())
+ }
+
+ /// Parses a sequence, not including the closing delimiter. The function
+ /// `f` must consume tokens until reaching the next separator or
+ /// closing bracket.
+ fn parse_seq_to_before_end<T>(
+ &mut self,
+ ket: &TokenKind,
+ sep: SeqSep,
+ f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+ ) -> PResult<'a, (Vec<T>, bool, bool)> {
+ self.parse_seq_to_before_tokens(&[ket], sep, TokenExpectType::Expect, f)
+ }
+
+ /// Parses a sequence, including the closing delimiter. The function
+ /// `f` must consume tokens until reaching the next separator or
+ /// closing bracket.
+ fn parse_seq_to_end<T>(
+ &mut self,
+ ket: &TokenKind,
+ sep: SeqSep,
+ f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+ ) -> PResult<'a, (Vec<T>, bool /* trailing */)> {
+ let (val, trailing, recovered) = self.parse_seq_to_before_end(ket, sep, f)?;
+ if !recovered {
+ self.eat(ket);
+ }
+ Ok((val, trailing))
+ }
+
+ /// Parses a sequence, including the closing delimiter. The function
+ /// `f` must consume tokens until reaching the next separator or
+ /// closing bracket.
+ fn parse_unspanned_seq<T>(
+ &mut self,
+ bra: &TokenKind,
+ ket: &TokenKind,
+ sep: SeqSep,
+ f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+ ) -> PResult<'a, (Vec<T>, bool)> {
+ self.expect(bra)?;
+ self.parse_seq_to_end(ket, sep, f)
+ }
+
+ fn parse_delim_comma_seq<T>(
+ &mut self,
+ delim: Delimiter,
+ f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+ ) -> PResult<'a, (Vec<T>, bool)> {
+ self.parse_unspanned_seq(
+ &token::OpenDelim(delim),
+ &token::CloseDelim(delim),
+ SeqSep::trailing_allowed(token::Comma),
+ f,
+ )
+ }
+
+ fn parse_paren_comma_seq<T>(
+ &mut self,
+ f: impl FnMut(&mut Parser<'a>) -> PResult<'a, T>,
+ ) -> PResult<'a, (Vec<T>, bool)> {
+ self.parse_delim_comma_seq(Delimiter::Parenthesis, f)
+ }
+
+ /// Advance the parser by one token using provided token as the next one.
+ fn bump_with(&mut self, next: (Token, Spacing)) {
+ self.inlined_bump_with(next)
+ }
+
+ /// This always-inlined version should only be used on hot code paths.
+ #[inline(always)]
+ fn inlined_bump_with(&mut self, (next_token, next_spacing): (Token, Spacing)) {
+ // Update the current and previous tokens.
+ self.prev_token = mem::replace(&mut self.token, next_token);
+ self.token_spacing = next_spacing;
+
+ // Diagnostics.
+ self.expected_tokens.clear();
+ }
+
+ /// Advance the parser by one token.
+ pub fn bump(&mut self) {
+ // Note: destructuring here would give nicer code, but it was found in #96210 to be slower
+ // than `.0`/`.1` access.
+ let mut next = self.token_cursor.inlined_next(self.desugar_doc_comments);
+ self.token_cursor.num_next_calls += 1;
+ // We've retrieved an token from the underlying
+ // cursor, so we no longer need to worry about
+ // an unglued token. See `break_and_eat` for more details
+ self.token_cursor.break_last_token = false;
+ if next.0.span.is_dummy() {
+ // Tweak the location for better diagnostics, but keep syntactic context intact.
+ let fallback_span = self.token.span;
+ next.0.span = fallback_span.with_ctxt(next.0.span.ctxt());
+ }
+ debug_assert!(!matches!(
+ next.0.kind,
+ token::OpenDelim(Delimiter::Invisible) | token::CloseDelim(Delimiter::Invisible)
+ ));
+ self.inlined_bump_with(next)
+ }
+
+ /// Look-ahead `dist` tokens of `self.token` and get access to that token there.
+ /// When `dist == 0` then the current token is looked at.
+ pub fn look_ahead<R>(&self, dist: usize, looker: impl FnOnce(&Token) -> R) -> R {
+ if dist == 0 {
+ return looker(&self.token);
+ }
+
+ let frame = &self.token_cursor.frame;
+ if let Some((delim, span)) = frame.delim_sp && delim != Delimiter::Invisible {
+ let all_normal = (0..dist).all(|i| {
+ let token = frame.tree_cursor.look_ahead(i);
+ !matches!(token, Some(TokenTree::Delimited(_, Delimiter::Invisible, _)))
+ });
+ if all_normal {
+ return match frame.tree_cursor.look_ahead(dist - 1) {
+ Some(tree) => match tree {
+ TokenTree::Token(token, _) => looker(token),
+ TokenTree::Delimited(dspan, delim, _) => {
+ looker(&Token::new(token::OpenDelim(*delim), dspan.open))
+ }
+ },
+ None => looker(&Token::new(token::CloseDelim(delim), span.close)),
+ };
+ }
+ }
+
+ let mut cursor = self.token_cursor.clone();
+ let mut i = 0;
+ let mut token = Token::dummy();
+ while i < dist {
+ token = cursor.next(/* desugar_doc_comments */ false).0;
+ if matches!(
+ token.kind,
+ token::OpenDelim(Delimiter::Invisible) | token::CloseDelim(Delimiter::Invisible)
+ ) {
+ continue;
+ }
+ i += 1;
+ }
+ return looker(&token);
+ }
+
+ /// Returns whether any of the given keywords are `dist` tokens ahead of the current one.
+ fn is_keyword_ahead(&self, dist: usize, kws: &[Symbol]) -> bool {
+ self.look_ahead(dist, |t| kws.iter().any(|&kw| t.is_keyword(kw)))
+ }
+
+ /// Parses asyncness: `async` or nothing.
+ fn parse_asyncness(&mut self) -> Async {
+ if self.eat_keyword(kw::Async) {
+ let span = self.prev_token.uninterpolated_span();
+ Async::Yes { span, closure_id: DUMMY_NODE_ID, return_impl_trait_id: DUMMY_NODE_ID }
+ } else {
+ Async::No
+ }
+ }
+
+ /// Parses unsafety: `unsafe` or nothing.
+ fn parse_unsafety(&mut self) -> Unsafe {
+ if self.eat_keyword(kw::Unsafe) {
+ Unsafe::Yes(self.prev_token.uninterpolated_span())
+ } else {
+ Unsafe::No
+ }
+ }
+
+ /// Parses constness: `const` or nothing.
+ fn parse_constness(&mut self) -> Const {
+ // Avoid const blocks to be parsed as const items
+ if self.look_ahead(1, |t| t != &token::OpenDelim(Delimiter::Brace))
+ && self.eat_keyword(kw::Const)
+ {
+ Const::Yes(self.prev_token.uninterpolated_span())
+ } else {
+ Const::No
+ }
+ }
+
+ /// Parses inline const expressions.
+ fn parse_const_block(&mut self, span: Span, pat: bool) -> PResult<'a, P<Expr>> {
+ if pat {
+ self.sess.gated_spans.gate(sym::inline_const_pat, span);
+ } else {
+ self.sess.gated_spans.gate(sym::inline_const, span);
+ }
+ self.eat_keyword(kw::Const);
+ let (attrs, blk) = self.parse_inner_attrs_and_block()?;
+ let anon_const = AnonConst {
+ id: DUMMY_NODE_ID,
+ value: self.mk_expr(blk.span, ExprKind::Block(blk, None), AttrVec::new()),
+ };
+ let blk_span = anon_const.value.span;
+ Ok(self.mk_expr(span.to(blk_span), ExprKind::ConstBlock(anon_const), AttrVec::from(attrs)))
+ }
+
+ /// Parses mutability (`mut` or nothing).
+ fn parse_mutability(&mut self) -> Mutability {
+ if self.eat_keyword(kw::Mut) { Mutability::Mut } else { Mutability::Not }
+ }
+
+ /// Possibly parses mutability (`const` or `mut`).
+ fn parse_const_or_mut(&mut self) -> Option<Mutability> {
+ if self.eat_keyword(kw::Mut) {
+ Some(Mutability::Mut)
+ } else if self.eat_keyword(kw::Const) {
+ Some(Mutability::Not)
+ } else {
+ None
+ }
+ }
+
+ fn parse_field_name(&mut self) -> PResult<'a, Ident> {
+ if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) = self.token.kind
+ {
+ self.expect_no_suffix(self.token.span, "a tuple index", suffix);
+ self.bump();
+ Ok(Ident::new(symbol, self.prev_token.span))
+ } else {
+ self.parse_ident_common(true)
+ }
+ }
+
+ fn parse_mac_args(&mut self) -> PResult<'a, P<MacArgs>> {
+ self.parse_mac_args_common(true).map(P)
+ }
+
+ fn parse_attr_args(&mut self) -> PResult<'a, MacArgs> {
+ self.parse_mac_args_common(false)
+ }
+
+ fn parse_mac_args_common(&mut self, delimited_only: bool) -> PResult<'a, MacArgs> {
+ Ok(
+ if self.check(&token::OpenDelim(Delimiter::Parenthesis))
+ || self.check(&token::OpenDelim(Delimiter::Bracket))
+ || self.check(&token::OpenDelim(Delimiter::Brace))
+ {
+ match self.parse_token_tree() {
+ TokenTree::Delimited(dspan, delim, tokens) =>
+ // We've confirmed above that there is a delimiter so unwrapping is OK.
+ {
+ MacArgs::Delimited(dspan, MacDelimiter::from_token(delim).unwrap(), tokens)
+ }
+ _ => unreachable!(),
+ }
+ } else if !delimited_only {
+ if self.eat(&token::Eq) {
+ let eq_span = self.prev_token.span;
+ MacArgs::Eq(eq_span, MacArgsEq::Ast(self.parse_expr_force_collect()?))
+ } else {
+ MacArgs::Empty
+ }
+ } else {
+ return self.unexpected();
+ },
+ )
+ }
+
+ fn parse_or_use_outer_attributes(
+ &mut self,
+ already_parsed_attrs: Option<AttrWrapper>,
+ ) -> PResult<'a, AttrWrapper> {
+ if let Some(attrs) = already_parsed_attrs {
+ Ok(attrs)
+ } else {
+ self.parse_outer_attributes()
+ }
+ }
+
+ /// Parses a single token tree from the input.
+ pub(crate) fn parse_token_tree(&mut self) -> TokenTree {
+ match self.token.kind {
+ token::OpenDelim(..) => {
+ // Grab the tokens from this frame.
+ let frame = &self.token_cursor.frame;
+ let stream = frame.tree_cursor.stream.clone();
+ let (delim, span) = frame.delim_sp.unwrap();
+
+ // Advance the token cursor through the entire delimited
+ // sequence. After getting the `OpenDelim` we are *within* the
+ // delimited sequence, i.e. at depth `d`. After getting the
+ // matching `CloseDelim` we are *after* the delimited sequence,
+ // i.e. at depth `d - 1`.
+ let target_depth = self.token_cursor.stack.len() - 1;
+ loop {
+ // Advance one token at a time, so `TokenCursor::next()`
+ // can capture these tokens if necessary.
+ self.bump();
+ if self.token_cursor.stack.len() == target_depth {
+ debug_assert!(matches!(self.token.kind, token::CloseDelim(_)));
+ break;
+ }
+ }
+
+ // Consume close delimiter
+ self.bump();
+ TokenTree::Delimited(span, delim, stream)
+ }
+ token::CloseDelim(_) | token::Eof => unreachable!(),
+ _ => {
+ self.bump();
+ TokenTree::Token(self.prev_token.clone(), Spacing::Alone)
+ }
+ }
+ }
+
+ /// Parses a stream of tokens into a list of `TokenTree`s, up to EOF.
+ pub fn parse_all_token_trees(&mut self) -> PResult<'a, Vec<TokenTree>> {
+ let mut tts = Vec::new();
+ while self.token != token::Eof {
+ tts.push(self.parse_token_tree());
+ }
+ Ok(tts)
+ }
+
+ pub fn parse_tokens(&mut self) -> TokenStream {
+ let mut result = Vec::new();
+ loop {
+ match self.token.kind {
+ token::Eof | token::CloseDelim(..) => break,
+ _ => result.push(self.parse_token_tree()),
+ }
+ }
+ TokenStream::new(result)
+ }
+
+ /// Evaluates the closure with restrictions in place.
+ ///
+ /// Afters the closure is evaluated, restrictions are reset.
+ fn with_res<T>(&mut self, res: Restrictions, f: impl FnOnce(&mut Self) -> T) -> T {
+ let old = self.restrictions;
+ self.restrictions = res;
+ let res = f(self);
+ self.restrictions = old;
+ res
+ }
+
+ /// Parses `pub` and `pub(in path)` plus shortcuts `pub(crate)` for `pub(in crate)`, `pub(self)`
+ /// for `pub(in self)` and `pub(super)` for `pub(in super)`.
+ /// If the following element can't be a tuple (i.e., it's a function definition), then
+ /// it's not a tuple struct field), and the contents within the parentheses aren't valid,
+ /// so emit a proper diagnostic.
+ // Public for rustfmt usage.
+ pub fn parse_visibility(&mut self, fbt: FollowedByType) -> PResult<'a, Visibility> {
+ maybe_whole!(self, NtVis, |x| x.into_inner());
+
+ if !self.eat_keyword(kw::Pub) {
+ // We need a span for our `Spanned<VisibilityKind>`, but there's inherently no
+ // keyword to grab a span from for inherited visibility; an empty span at the
+ // beginning of the current token would seem to be the "Schelling span".
+ return Ok(Visibility {
+ span: self.token.span.shrink_to_lo(),
+ kind: VisibilityKind::Inherited,
+ tokens: None,
+ });
+ }
+ let lo = self.prev_token.span;
+
+ if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ // We don't `self.bump()` the `(` yet because this might be a struct definition where
+ // `()` or a tuple might be allowed. For example, `struct Struct(pub (), pub (usize));`.
+ // Because of this, we only `bump` the `(` if we're assured it is appropriate to do so
+ // by the following tokens.
+ if self.is_keyword_ahead(1, &[kw::In]) {
+ // Parse `pub(in path)`.
+ self.bump(); // `(`
+ self.bump(); // `in`
+ let path = self.parse_path(PathStyle::Mod)?; // `path`
+ self.expect(&token::CloseDelim(Delimiter::Parenthesis))?; // `)`
+ let vis = VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID };
+ return Ok(Visibility {
+ span: lo.to(self.prev_token.span),
+ kind: vis,
+ tokens: None,
+ });
+ } else if self.look_ahead(2, |t| t == &token::CloseDelim(Delimiter::Parenthesis))
+ && self.is_keyword_ahead(1, &[kw::Crate, kw::Super, kw::SelfLower])
+ {
+ // Parse `pub(crate)`, `pub(self)`, or `pub(super)`.
+ self.bump(); // `(`
+ let path = self.parse_path(PathStyle::Mod)?; // `crate`/`super`/`self`
+ self.expect(&token::CloseDelim(Delimiter::Parenthesis))?; // `)`
+ let vis = VisibilityKind::Restricted { path: P(path), id: ast::DUMMY_NODE_ID };
+ return Ok(Visibility {
+ span: lo.to(self.prev_token.span),
+ kind: vis,
+ tokens: None,
+ });
+ } else if let FollowedByType::No = fbt {
+ // Provide this diagnostic if a type cannot follow;
+ // in particular, if this is not a tuple struct.
+ self.recover_incorrect_vis_restriction()?;
+ // Emit diagnostic, but continue with public visibility.
+ }
+ }
+
+ Ok(Visibility { span: lo, kind: VisibilityKind::Public, tokens: None })
+ }
+
+ /// Recovery for e.g. `pub(something) fn ...` or `struct X { pub(something) y: Z }`
+ fn recover_incorrect_vis_restriction(&mut self) -> PResult<'a, ()> {
+ self.bump(); // `(`
+ let path = self.parse_path(PathStyle::Mod)?;
+ self.expect(&token::CloseDelim(Delimiter::Parenthesis))?; // `)`
+
+ let msg = "incorrect visibility restriction";
+ let suggestion = r##"some possible visibility restrictions are:
+`pub(crate)`: visible only on the current crate
+`pub(super)`: visible only in the current module's parent
+`pub(in path::to::module)`: visible only on the specified path"##;
+
+ let path_str = pprust::path_to_string(&path);
+
+ struct_span_err!(self.sess.span_diagnostic, path.span, E0704, "{}", msg)
+ .help(suggestion)
+ .span_suggestion(
+ path.span,
+ &format!("make this visible only to module `{}` with `in`", path_str),
+ format!("in {}", path_str),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ Ok(())
+ }
+
+ /// Parses `extern string_literal?`.
+ fn parse_extern(&mut self) -> Extern {
+ if self.eat_keyword(kw::Extern) {
+ let mut extern_span = self.prev_token.span;
+ let abi = self.parse_abi();
+ if let Some(abi) = abi {
+ extern_span = extern_span.to(abi.span);
+ }
+ Extern::from_abi(abi, extern_span)
+ } else {
+ Extern::None
+ }
+ }
+
+ /// Parses a string literal as an ABI spec.
+ fn parse_abi(&mut self) -> Option<StrLit> {
+ match self.parse_str_lit() {
+ Ok(str_lit) => Some(str_lit),
+ Err(Some(lit)) => match lit.kind {
+ ast::LitKind::Err(_) => None,
+ _ => {
+ self.struct_span_err(lit.span, "non-string ABI literal")
+ .span_suggestion(
+ lit.span,
+ "specify the ABI with a string literal",
+ "\"C\"",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ None
+ }
+ },
+ Err(None) => None,
+ }
+ }
+
+ pub fn collect_tokens_no_attrs<R: HasAttrs + HasTokens>(
+ &mut self,
+ f: impl FnOnce(&mut Self) -> PResult<'a, R>,
+ ) -> PResult<'a, R> {
+ // The only reason to call `collect_tokens_no_attrs` is if you want tokens, so use
+ // `ForceCollect::Yes`
+ self.collect_tokens_trailing_token(
+ AttrWrapper::empty(),
+ ForceCollect::Yes,
+ |this, _attrs| Ok((f(this)?, TrailingToken::None)),
+ )
+ }
+
+ /// `::{` or `::*`
+ fn is_import_coupler(&mut self) -> bool {
+ self.check(&token::ModSep)
+ && self.look_ahead(1, |t| {
+ *t == token::OpenDelim(Delimiter::Brace) || *t == token::BinOp(token::Star)
+ })
+ }
+
+ pub fn clear_expected_tokens(&mut self) {
+ self.expected_tokens.clear();
+ }
+}
+
+pub(crate) fn make_unclosed_delims_error(
+ unmatched: UnmatchedBrace,
+ sess: &ParseSess,
+) -> Option<DiagnosticBuilder<'_, ErrorGuaranteed>> {
+ // `None` here means an `Eof` was found. We already emit those errors elsewhere, we add them to
+ // `unmatched_braces` only for error recovery in the `Parser`.
+ let found_delim = unmatched.found_delim?;
+ let span: MultiSpan = if let Some(sp) = unmatched.unclosed_span {
+ vec![unmatched.found_span, sp].into()
+ } else {
+ unmatched.found_span.into()
+ };
+ let mut err = sess.span_diagnostic.struct_span_err(
+ span,
+ &format!(
+ "mismatched closing delimiter: `{}`",
+ pprust::token_kind_to_string(&token::CloseDelim(found_delim)),
+ ),
+ );
+ err.span_label(unmatched.found_span, "mismatched closing delimiter");
+ if let Some(sp) = unmatched.candidate_span {
+ err.span_label(sp, "closing delimiter possibly meant for this");
+ }
+ if let Some(sp) = unmatched.unclosed_span {
+ err.span_label(sp, "unclosed delimiter");
+ }
+ Some(err)
+}
+
+pub fn emit_unclosed_delims(unclosed_delims: &mut Vec<UnmatchedBrace>, sess: &ParseSess) {
+ *sess.reached_eof.borrow_mut() |=
+ unclosed_delims.iter().any(|unmatched_delim| unmatched_delim.found_delim.is_none());
+ for unmatched in unclosed_delims.drain(..) {
+ if let Some(mut e) = make_unclosed_delims_error(unmatched, sess) {
+ e.emit();
+ }
+ }
+}
+
+/// A helper struct used when building an `AttrAnnotatedTokenStream` from
+/// a `LazyTokenStream`. Both delimiter and non-delimited tokens
+/// are stored as `FlatToken::Token`. A vector of `FlatToken`s
+/// is then 'parsed' to build up an `AttrAnnotatedTokenStream` with nested
+/// `AttrAnnotatedTokenTree::Delimited` tokens
+#[derive(Debug, Clone)]
+pub enum FlatToken {
+ /// A token - this holds both delimiter (e.g. '{' and '}')
+ /// and non-delimiter tokens
+ Token(Token),
+ /// Holds the `AttributesData` for an AST node. The
+ /// `AttributesData` is inserted directly into the
+ /// constructed `AttrAnnotatedTokenStream` as
+ /// an `AttrAnnotatedTokenTree::Attributes`
+ AttrTarget(AttributesData),
+ /// A special 'empty' token that is ignored during the conversion
+ /// to an `AttrAnnotatedTokenStream`. This is used to simplify the
+ /// handling of replace ranges.
+ Empty,
+}
+
+#[derive(Debug)]
+pub enum NtOrTt {
+ Nt(Nonterminal),
+ Tt(TokenTree),
+}
diff --git a/compiler/rustc_parse/src/parser/nonterminal.rs b/compiler/rustc_parse/src/parser/nonterminal.rs
new file mode 100644
index 000000000..e215b6872
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/nonterminal.rs
@@ -0,0 +1,203 @@
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, NonterminalKind, Token};
+use rustc_ast::HasTokens;
+use rustc_ast_pretty::pprust;
+use rustc_errors::PResult;
+use rustc_span::symbol::{kw, Ident};
+
+use crate::parser::pat::{CommaRecoveryMode, RecoverColon, RecoverComma};
+use crate::parser::{FollowedByType, ForceCollect, NtOrTt, Parser, PathStyle};
+
+impl<'a> Parser<'a> {
+ /// Checks whether a non-terminal may begin with a particular token.
+ ///
+ /// Returning `false` is a *stability guarantee* that such a matcher will *never* begin with
+ /// that token. Be conservative (return true) if not sure. Inlined because it has a single call
+ /// site.
+ #[inline]
+ pub fn nonterminal_may_begin_with(kind: NonterminalKind, token: &Token) -> bool {
+ /// Checks whether the non-terminal may contain a single (non-keyword) identifier.
+ fn may_be_ident(nt: &token::Nonterminal) -> bool {
+ match *nt {
+ token::NtItem(_) | token::NtBlock(_) | token::NtVis(_) | token::NtLifetime(_) => {
+ false
+ }
+ _ => true,
+ }
+ }
+
+ match kind {
+ NonterminalKind::Expr => {
+ token.can_begin_expr()
+ // This exception is here for backwards compatibility.
+ && !token.is_keyword(kw::Let)
+ // This exception is here for backwards compatibility.
+ && !token.is_keyword(kw::Const)
+ }
+ NonterminalKind::Ty => token.can_begin_type(),
+ NonterminalKind::Ident => get_macro_ident(token).is_some(),
+ NonterminalKind::Literal => token.can_begin_literal_maybe_minus(),
+ NonterminalKind::Vis => match token.kind {
+ // The follow-set of :vis + "priv" keyword + interpolated
+ token::Comma | token::Ident(..) | token::Interpolated(..) => true,
+ _ => token.can_begin_type(),
+ },
+ NonterminalKind::Block => match token.kind {
+ token::OpenDelim(Delimiter::Brace) => true,
+ token::Interpolated(ref nt) => !matches!(
+ **nt,
+ token::NtItem(_)
+ | token::NtPat(_)
+ | token::NtTy(_)
+ | token::NtIdent(..)
+ | token::NtMeta(_)
+ | token::NtPath(_)
+ | token::NtVis(_)
+ ),
+ _ => false,
+ },
+ NonterminalKind::Path | NonterminalKind::Meta => match token.kind {
+ token::ModSep | token::Ident(..) => true,
+ token::Interpolated(ref nt) => match **nt {
+ token::NtPath(_) | token::NtMeta(_) => true,
+ _ => may_be_ident(&nt),
+ },
+ _ => false,
+ },
+ NonterminalKind::PatParam { .. } | NonterminalKind::PatWithOr { .. } => {
+ match token.kind {
+ token::Ident(..) | // box, ref, mut, and other identifiers (can stricten)
+ token::OpenDelim(Delimiter::Parenthesis) | // tuple pattern
+ token::OpenDelim(Delimiter::Bracket) | // slice pattern
+ token::BinOp(token::And) | // reference
+ token::BinOp(token::Minus) | // negative literal
+ token::AndAnd | // double reference
+ token::Literal(..) | // literal
+ token::DotDot | // range pattern (future compat)
+ token::DotDotDot | // range pattern (future compat)
+ token::ModSep | // path
+ token::Lt | // path (UFCS constant)
+ token::BinOp(token::Shl) => true, // path (double UFCS)
+ // leading vert `|` or-pattern
+ token::BinOp(token::Or) => matches!(kind, NonterminalKind::PatWithOr {..}),
+ token::Interpolated(ref nt) => may_be_ident(nt),
+ _ => false,
+ }
+ }
+ NonterminalKind::Lifetime => match token.kind {
+ token::Lifetime(_) => true,
+ token::Interpolated(ref nt) => {
+ matches!(**nt, token::NtLifetime(_))
+ }
+ _ => false,
+ },
+ NonterminalKind::TT | NonterminalKind::Item | NonterminalKind::Stmt => {
+ !matches!(token.kind, token::CloseDelim(_))
+ }
+ }
+ }
+
+ /// Parse a non-terminal (e.g. MBE `:pat` or `:ident`). Inlined because there is only one call
+ /// site.
+ #[inline]
+ pub fn parse_nonterminal(&mut self, kind: NonterminalKind) -> PResult<'a, NtOrTt> {
+ // Any `Nonterminal` which stores its tokens (currently `NtItem` and `NtExpr`)
+ // needs to have them force-captured here.
+ // A `macro_rules!` invocation may pass a captured item/expr to a proc-macro,
+ // which requires having captured tokens available. Since we cannot determine
+ // in advance whether or not a proc-macro will be (transitively) invoked,
+ // we always capture tokens for any `Nonterminal` which needs them.
+ let mut nt = match kind {
+ // Note that TT is treated differently to all the others.
+ NonterminalKind::TT => return Ok(NtOrTt::Tt(self.parse_token_tree())),
+ NonterminalKind::Item => match self.parse_item(ForceCollect::Yes)? {
+ Some(item) => token::NtItem(item),
+ None => {
+ return Err(self.struct_span_err(self.token.span, "expected an item keyword"));
+ }
+ },
+ NonterminalKind::Block => {
+ // While a block *expression* may have attributes (e.g. `#[my_attr] { ... }`),
+ // the ':block' matcher does not support them
+ token::NtBlock(self.collect_tokens_no_attrs(|this| this.parse_block())?)
+ }
+ NonterminalKind::Stmt => match self.parse_stmt(ForceCollect::Yes)? {
+ Some(s) => token::NtStmt(P(s)),
+ None => {
+ return Err(self.struct_span_err(self.token.span, "expected a statement"));
+ }
+ },
+ NonterminalKind::PatParam { .. } | NonterminalKind::PatWithOr { .. } => {
+ token::NtPat(self.collect_tokens_no_attrs(|this| match kind {
+ NonterminalKind::PatParam { .. } => this.parse_pat_no_top_alt(None),
+ NonterminalKind::PatWithOr { .. } => this.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::No,
+ RecoverColon::No,
+ CommaRecoveryMode::EitherTupleOrPipe,
+ ),
+ _ => unreachable!(),
+ })?)
+ }
+
+ NonterminalKind::Expr => token::NtExpr(self.parse_expr_force_collect()?),
+ NonterminalKind::Literal => {
+ // The `:literal` matcher does not support attributes
+ token::NtLiteral(
+ self.collect_tokens_no_attrs(|this| this.parse_literal_maybe_minus())?,
+ )
+ }
+
+ NonterminalKind::Ty => token::NtTy(
+ self.collect_tokens_no_attrs(|this| this.parse_no_question_mark_recover())?,
+ ),
+
+ // this could be handled like a token, since it is one
+ NonterminalKind::Ident
+ if let Some((ident, is_raw)) = get_macro_ident(&self.token) =>
+ {
+ self.bump();
+ token::NtIdent(ident, is_raw)
+ }
+ NonterminalKind::Ident => {
+ let token_str = pprust::token_to_string(&self.token);
+ let msg = &format!("expected ident, found {}", &token_str);
+ return Err(self.struct_span_err(self.token.span, msg));
+ }
+ NonterminalKind::Path => token::NtPath(
+ P(self.collect_tokens_no_attrs(|this| this.parse_path(PathStyle::Type))?),
+ ),
+ NonterminalKind::Meta => token::NtMeta(P(self.parse_attr_item(true)?)),
+ NonterminalKind::Vis => token::NtVis(
+ P(self.collect_tokens_no_attrs(|this| this.parse_visibility(FollowedByType::Yes))?),
+ ),
+ NonterminalKind::Lifetime => {
+ if self.check_lifetime() {
+ token::NtLifetime(self.expect_lifetime().ident)
+ } else {
+ let token_str = pprust::token_to_string(&self.token);
+ let msg = &format!("expected a lifetime, found `{}`", &token_str);
+ return Err(self.struct_span_err(self.token.span, msg));
+ }
+ }
+ };
+
+ // If tokens are supported at all, they should be collected.
+ if matches!(nt.tokens_mut(), Some(None)) {
+ panic!(
+ "Missing tokens for nt {:?} at {:?}: {:?}",
+ nt,
+ nt.span(),
+ pprust::nonterminal_to_string(&nt)
+ );
+ }
+
+ Ok(NtOrTt::Nt(nt))
+ }
+}
+
+/// The token is an identifier, but not `_`.
+/// We prohibit passing `_` to macros expecting `ident` for now.
+fn get_macro_ident(token: &Token) -> Option<(Ident, bool)> {
+ token.ident().filter(|(ident, _)| ident.name != kw::Underscore)
+}
diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs
new file mode 100644
index 000000000..ba77a3958
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/pat.rs
@@ -0,0 +1,1151 @@
+use super::{ForceCollect, Parser, PathStyle, TrailingToken};
+use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole};
+use rustc_ast::mut_visit::{noop_visit_pat, MutVisitor};
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter};
+use rustc_ast::{
+ self as ast, AttrVec, Attribute, BindingMode, Expr, ExprKind, MacCall, Mutability, Pat,
+ PatField, PatKind, Path, QSelf, RangeEnd, RangeSyntax,
+};
+use rustc_ast_pretty::pprust;
+use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult};
+use rustc_span::source_map::{respan, Span, Spanned};
+use rustc_span::symbol::{kw, sym, Ident};
+
+pub(super) type Expected = Option<&'static str>;
+
+/// `Expected` for function and lambda parameter patterns.
+pub(super) const PARAM_EXPECTED: Expected = Some("parameter name");
+
+const WHILE_PARSING_OR_MSG: &str = "while parsing this or-pattern starting here";
+
+/// Whether or not to recover a `,` when parsing or-patterns.
+#[derive(PartialEq, Copy, Clone)]
+pub enum RecoverComma {
+ Yes,
+ No,
+}
+
+/// Whether or not to recover a `:` when parsing patterns that were meant to be paths.
+#[derive(PartialEq, Copy, Clone)]
+pub enum RecoverColon {
+ Yes,
+ No,
+}
+
+/// Whether or not to recover a `a, b` when parsing patterns as `(a, b)` or that *and* `a | b`.
+#[derive(PartialEq, Copy, Clone)]
+pub enum CommaRecoveryMode {
+ LikelyTuple,
+ EitherTupleOrPipe,
+}
+
+/// The result of `eat_or_separator`. We want to distinguish which case we are in to avoid
+/// emitting duplicate diagnostics.
+#[derive(Debug, Clone, Copy)]
+enum EatOrResult {
+ /// We recovered from a trailing vert.
+ TrailingVert,
+ /// We ate an `|` (or `||` and recovered).
+ AteOr,
+ /// We did not eat anything (i.e. the current token is not `|` or `||`).
+ None,
+}
+
+impl<'a> Parser<'a> {
+ /// Parses a pattern.
+ ///
+ /// Corresponds to `pat<no_top_alt>` in RFC 2535 and does not admit or-patterns
+ /// at the top level. Used when parsing the parameters of lambda expressions,
+ /// functions, function pointers, and `pat` macro fragments.
+ pub fn parse_pat_no_top_alt(&mut self, expected: Expected) -> PResult<'a, P<Pat>> {
+ self.parse_pat_with_range_pat(true, expected)
+ }
+
+ /// Parses a pattern.
+ ///
+ /// Corresponds to `top_pat` in RFC 2535 and allows or-pattern at the top level.
+ /// Used for parsing patterns in all cases when `pat<no_top_alt>` is not used.
+ ///
+ /// Note that after the FCP in <https://github.com/rust-lang/rust/issues/81415>,
+ /// a leading vert is allowed in nested or-patterns, too. This allows us to
+ /// simplify the grammar somewhat.
+ pub fn parse_pat_allow_top_alt(
+ &mut self,
+ expected: Expected,
+ rc: RecoverComma,
+ ra: RecoverColon,
+ rt: CommaRecoveryMode,
+ ) -> PResult<'a, P<Pat>> {
+ self.parse_pat_allow_top_alt_inner(expected, rc, ra, rt).map(|(pat, _)| pat)
+ }
+
+ /// Returns the pattern and a bool indicating whether we recovered from a trailing vert (true =
+ /// recovered).
+ fn parse_pat_allow_top_alt_inner(
+ &mut self,
+ expected: Expected,
+ rc: RecoverComma,
+ ra: RecoverColon,
+ rt: CommaRecoveryMode,
+ ) -> PResult<'a, (P<Pat>, bool)> {
+ // Keep track of whether we recovered from a trailing vert so that we can avoid duplicated
+ // suggestions (which bothers rustfix).
+ //
+ // Allow a '|' before the pats (RFCs 1925, 2530, and 2535).
+ let (leading_vert_span, mut trailing_vert) = match self.eat_or_separator(None) {
+ EatOrResult::AteOr => (Some(self.prev_token.span), false),
+ EatOrResult::TrailingVert => (None, true),
+ EatOrResult::None => (None, false),
+ };
+
+ // Parse the first pattern (`p_0`).
+ let mut first_pat = self.parse_pat_no_top_alt(expected)?;
+ if rc == RecoverComma::Yes {
+ self.maybe_recover_unexpected_comma(first_pat.span, rt)?;
+ }
+
+ // If the next token is not a `|`,
+ // this is not an or-pattern and we should exit here.
+ if !self.check(&token::BinOp(token::Or)) && self.token != token::OrOr {
+ // If we parsed a leading `|` which should be gated,
+ // then we should really gate the leading `|`.
+ // This complicated procedure is done purely for diagnostics UX.
+
+ // Check if the user wrote `foo:bar` instead of `foo::bar`.
+ if ra == RecoverColon::Yes {
+ first_pat = self.maybe_recover_colon_colon_in_pat_typo(first_pat, expected);
+ }
+
+ if let Some(leading_vert_span) = leading_vert_span {
+ // If there was a leading vert, treat this as an or-pattern. This improves
+ // diagnostics.
+ let span = leading_vert_span.to(self.prev_token.span);
+ return Ok((self.mk_pat(span, PatKind::Or(vec![first_pat])), trailing_vert));
+ }
+
+ return Ok((first_pat, trailing_vert));
+ }
+
+ // Parse the patterns `p_1 | ... | p_n` where `n > 0`.
+ let lo = leading_vert_span.unwrap_or(first_pat.span);
+ let mut pats = vec![first_pat];
+ loop {
+ match self.eat_or_separator(Some(lo)) {
+ EatOrResult::AteOr => {}
+ EatOrResult::None => break,
+ EatOrResult::TrailingVert => {
+ trailing_vert = true;
+ break;
+ }
+ }
+ let pat = self.parse_pat_no_top_alt(expected).map_err(|mut err| {
+ err.span_label(lo, WHILE_PARSING_OR_MSG);
+ err
+ })?;
+ if rc == RecoverComma::Yes {
+ self.maybe_recover_unexpected_comma(pat.span, rt)?;
+ }
+ pats.push(pat);
+ }
+ let or_pattern_span = lo.to(self.prev_token.span);
+
+ Ok((self.mk_pat(or_pattern_span, PatKind::Or(pats)), trailing_vert))
+ }
+
+ /// Parse a pattern and (maybe) a `Colon` in positions where a pattern may be followed by a
+ /// type annotation (e.g. for `let` bindings or `fn` params).
+ ///
+ /// Generally, this corresponds to `pat_no_top_alt` followed by an optional `Colon`. It will
+ /// eat the `Colon` token if one is present.
+ ///
+ /// The return value represents the parsed pattern and `true` if a `Colon` was parsed (`false`
+ /// otherwise).
+ pub(super) fn parse_pat_before_ty(
+ &mut self,
+ expected: Expected,
+ rc: RecoverComma,
+ syntax_loc: &str,
+ ) -> PResult<'a, (P<Pat>, bool)> {
+ // We use `parse_pat_allow_top_alt` regardless of whether we actually want top-level
+ // or-patterns so that we can detect when a user tries to use it. This allows us to print a
+ // better error message.
+ let (pat, trailing_vert) = self.parse_pat_allow_top_alt_inner(
+ expected,
+ rc,
+ RecoverColon::No,
+ CommaRecoveryMode::LikelyTuple,
+ )?;
+ let colon = self.eat(&token::Colon);
+
+ if let PatKind::Or(pats) = &pat.kind {
+ let msg = format!("top-level or-patterns are not allowed in {}", syntax_loc);
+ let (help, fix) = if pats.len() == 1 {
+ // If all we have is a leading vert, then print a special message. This is the case
+ // if `parse_pat_allow_top_alt` returns an or-pattern with one variant.
+ let msg = "remove the `|`";
+ let fix = pprust::pat_to_string(&pat);
+ (msg, fix)
+ } else {
+ let msg = "wrap the pattern in parentheses";
+ let fix = format!("({})", pprust::pat_to_string(&pat));
+ (msg, fix)
+ };
+
+ if trailing_vert {
+ // We already emitted an error and suggestion to remove the trailing vert. Don't
+ // emit again.
+ self.sess.span_diagnostic.delay_span_bug(pat.span, &msg);
+ } else {
+ self.struct_span_err(pat.span, &msg)
+ .span_suggestion(pat.span, help, fix, Applicability::MachineApplicable)
+ .emit();
+ }
+ }
+
+ Ok((pat, colon))
+ }
+
+ /// Parse the pattern for a function or function pointer parameter, followed by a colon.
+ ///
+ /// The return value represents the parsed pattern and `true` if a `Colon` was parsed (`false`
+ /// otherwise).
+ pub(super) fn parse_fn_param_pat_colon(&mut self) -> PResult<'a, (P<Pat>, bool)> {
+ // In order to get good UX, we first recover in the case of a leading vert for an illegal
+ // top-level or-pat. Normally, this means recovering both `|` and `||`, but in this case,
+ // a leading `||` probably doesn't indicate an or-pattern attempt, so we handle that
+ // separately.
+ if let token::OrOr = self.token.kind {
+ let span = self.token.span;
+ let mut err = self.struct_span_err(span, "unexpected `||` before function parameter");
+ err.span_suggestion(span, "remove the `||`", "", Applicability::MachineApplicable);
+ err.note("alternatives in or-patterns are separated with `|`, not `||`");
+ err.emit();
+ self.bump();
+ }
+
+ self.parse_pat_before_ty(PARAM_EXPECTED, RecoverComma::No, "function parameters")
+ }
+
+ /// Eat the or-pattern `|` separator.
+ /// If instead a `||` token is encountered, recover and pretend we parsed `|`.
+ fn eat_or_separator(&mut self, lo: Option<Span>) -> EatOrResult {
+ if self.recover_trailing_vert(lo) {
+ EatOrResult::TrailingVert
+ } else if matches!(self.token.kind, token::OrOr) {
+ // Found `||`; Recover and pretend we parsed `|`.
+ self.ban_unexpected_or_or(lo);
+ self.bump();
+ EatOrResult::AteOr
+ } else if self.eat(&token::BinOp(token::Or)) {
+ EatOrResult::AteOr
+ } else {
+ EatOrResult::None
+ }
+ }
+
+ /// Recover if `|` or `||` is the current token and we have one of the
+ /// tokens `=>`, `if`, `=`, `:`, `;`, `,`, `]`, `)`, or `}` ahead of us.
+ ///
+ /// These tokens all indicate that we reached the end of the or-pattern
+ /// list and can now reliably say that the `|` was an illegal trailing vert.
+ /// Note that there are more tokens such as `@` for which we know that the `|`
+ /// is an illegal parse. However, the user's intent is less clear in that case.
+ fn recover_trailing_vert(&mut self, lo: Option<Span>) -> bool {
+ let is_end_ahead = self.look_ahead(1, |token| {
+ matches!(
+ &token.uninterpolate().kind,
+ token::FatArrow // e.g. `a | => 0,`.
+ | token::Ident(kw::If, false) // e.g. `a | if expr`.
+ | token::Eq // e.g. `let a | = 0`.
+ | token::Semi // e.g. `let a |;`.
+ | token::Colon // e.g. `let a | :`.
+ | token::Comma // e.g. `let (a |,)`.
+ | token::CloseDelim(Delimiter::Bracket) // e.g. `let [a | ]`.
+ | token::CloseDelim(Delimiter::Parenthesis) // e.g. `let (a | )`.
+ | token::CloseDelim(Delimiter::Brace) // e.g. `let A { f: a | }`.
+ )
+ });
+ match (is_end_ahead, &self.token.kind) {
+ (true, token::BinOp(token::Or) | token::OrOr) => {
+ self.ban_illegal_vert(lo, "trailing", "not allowed in an or-pattern");
+ self.bump();
+ true
+ }
+ _ => false,
+ }
+ }
+
+ /// We have parsed `||` instead of `|`. Error and suggest `|` instead.
+ fn ban_unexpected_or_or(&mut self, lo: Option<Span>) {
+ let mut err = self.struct_span_err(self.token.span, "unexpected token `||` in pattern");
+ err.span_suggestion(
+ self.token.span,
+ "use a single `|` to separate multiple alternative patterns",
+ "|",
+ Applicability::MachineApplicable,
+ );
+ if let Some(lo) = lo {
+ err.span_label(lo, WHILE_PARSING_OR_MSG);
+ }
+ err.emit();
+ }
+
+ /// A `|` or possibly `||` token shouldn't be here. Ban it.
+ fn ban_illegal_vert(&mut self, lo: Option<Span>, pos: &str, ctx: &str) {
+ let span = self.token.span;
+ let mut err = self.struct_span_err(span, &format!("a {} `|` is {}", pos, ctx));
+ err.span_suggestion(
+ span,
+ &format!("remove the `{}`", pprust::token_to_string(&self.token)),
+ "",
+ Applicability::MachineApplicable,
+ );
+ if let Some(lo) = lo {
+ err.span_label(lo, WHILE_PARSING_OR_MSG);
+ }
+ if let token::OrOr = self.token.kind {
+ err.note("alternatives in or-patterns are separated with `|`, not `||`");
+ }
+ err.emit();
+ }
+
+ /// Parses a pattern, with a setting whether modern range patterns (e.g., `a..=b`, `a..b` are
+ /// allowed).
+ fn parse_pat_with_range_pat(
+ &mut self,
+ allow_range_pat: bool,
+ expected: Expected,
+ ) -> PResult<'a, P<Pat>> {
+ maybe_recover_from_interpolated_ty_qpath!(self, true);
+ maybe_whole!(self, NtPat, |x| x);
+
+ let lo = self.token.span;
+
+ let pat = if self.check(&token::BinOp(token::And)) || self.token.kind == token::AndAnd {
+ self.parse_pat_deref(expected)?
+ } else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ self.parse_pat_tuple_or_parens()?
+ } else if self.check(&token::OpenDelim(Delimiter::Bracket)) {
+ // Parse `[pat, pat,...]` as a slice pattern.
+ let (pats, _) = self.parse_delim_comma_seq(Delimiter::Bracket, |p| {
+ p.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::No,
+ RecoverColon::No,
+ CommaRecoveryMode::EitherTupleOrPipe,
+ )
+ })?;
+ PatKind::Slice(pats)
+ } else if self.check(&token::DotDot) && !self.is_pat_range_end_start(1) {
+ // A rest pattern `..`.
+ self.bump(); // `..`
+ PatKind::Rest
+ } else if self.check(&token::DotDotDot) && !self.is_pat_range_end_start(1) {
+ self.recover_dotdotdot_rest_pat(lo)
+ } else if let Some(form) = self.parse_range_end() {
+ self.parse_pat_range_to(form)? // `..=X`, `...X`, or `..X`.
+ } else if self.eat_keyword(kw::Underscore) {
+ // Parse _
+ PatKind::Wild
+ } else if self.eat_keyword(kw::Mut) {
+ self.parse_pat_ident_mut()?
+ } else if self.eat_keyword(kw::Ref) {
+ // Parse ref ident @ pat / ref mut ident @ pat
+ let mutbl = self.parse_mutability();
+ self.parse_pat_ident(BindingMode::ByRef(mutbl))?
+ } else if self.eat_keyword(kw::Box) {
+ self.parse_pat_box()?
+ } else if self.check_inline_const(0) {
+ // Parse `const pat`
+ let const_expr = self.parse_const_block(lo.to(self.token.span), true)?;
+
+ if let Some(re) = self.parse_range_end() {
+ self.parse_pat_range_begin_with(const_expr, re)?
+ } else {
+ PatKind::Lit(const_expr)
+ }
+ } else if self.can_be_ident_pat() {
+ // Parse `ident @ pat`
+ // This can give false positives and parse nullary enums,
+ // they are dealt with later in resolve.
+ self.parse_pat_ident(BindingMode::ByValue(Mutability::Not))?
+ } else if self.is_start_of_pat_with_path() {
+ // Parse pattern starting with a path
+ let (qself, path) = if self.eat_lt() {
+ // Parse a qualified path
+ let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
+ (Some(qself), path)
+ } else {
+ // Parse an unqualified path
+ (None, self.parse_path(PathStyle::Expr)?)
+ };
+ let span = lo.to(self.prev_token.span);
+
+ if qself.is_none() && self.check(&token::Not) {
+ self.parse_pat_mac_invoc(path)?
+ } else if let Some(form) = self.parse_range_end() {
+ let begin = self.mk_expr(span, ExprKind::Path(qself, path), AttrVec::new());
+ self.parse_pat_range_begin_with(begin, form)?
+ } else if self.check(&token::OpenDelim(Delimiter::Brace)) {
+ self.parse_pat_struct(qself, path)?
+ } else if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ self.parse_pat_tuple_struct(qself, path)?
+ } else {
+ PatKind::Path(qself, path)
+ }
+ } else {
+ // Try to parse everything else as literal with optional minus
+ match self.parse_literal_maybe_minus() {
+ Ok(begin) => match self.parse_range_end() {
+ Some(form) => self.parse_pat_range_begin_with(begin, form)?,
+ None => PatKind::Lit(begin),
+ },
+ Err(err) => return self.fatal_unexpected_non_pat(err, expected),
+ }
+ };
+
+ let pat = self.mk_pat(lo.to(self.prev_token.span), pat);
+ let pat = self.maybe_recover_from_bad_qpath(pat)?;
+ let pat = self.recover_intersection_pat(pat)?;
+
+ if !allow_range_pat {
+ self.ban_pat_range_if_ambiguous(&pat)
+ }
+
+ Ok(pat)
+ }
+
+ /// Recover from a typoed `...` pattern that was encountered
+ /// Ref: Issue #70388
+ fn recover_dotdotdot_rest_pat(&mut self, lo: Span) -> PatKind {
+ // A typoed rest pattern `...`.
+ self.bump(); // `...`
+
+ // The user probably mistook `...` for a rest pattern `..`.
+ self.struct_span_err(lo, "unexpected `...`")
+ .span_label(lo, "not a valid pattern")
+ .span_suggestion_short(
+ lo,
+ "for a rest pattern, use `..` instead of `...`",
+ "..",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ PatKind::Rest
+ }
+
+ /// Try to recover the more general form `intersect ::= $pat_lhs @ $pat_rhs`.
+ ///
+ /// Allowed binding patterns generated by `binding ::= ref? mut? $ident @ $pat_rhs`
+ /// should already have been parsed by now at this point,
+ /// if the next token is `@` then we can try to parse the more general form.
+ ///
+ /// Consult `parse_pat_ident` for the `binding` grammar.
+ ///
+ /// The notion of intersection patterns are found in
+ /// e.g. [F#][and] where they are called AND-patterns.
+ ///
+ /// [and]: https://docs.microsoft.com/en-us/dotnet/fsharp/language-reference/pattern-matching
+ fn recover_intersection_pat(&mut self, lhs: P<Pat>) -> PResult<'a, P<Pat>> {
+ if self.token.kind != token::At {
+ // Next token is not `@` so it's not going to be an intersection pattern.
+ return Ok(lhs);
+ }
+
+ // At this point we attempt to parse `@ $pat_rhs` and emit an error.
+ self.bump(); // `@`
+ let mut rhs = self.parse_pat_no_top_alt(None)?;
+ let sp = lhs.span.to(rhs.span);
+
+ if let PatKind::Ident(_, _, ref mut sub @ None) = rhs.kind {
+ // The user inverted the order, so help them fix that.
+ let mut applicability = Applicability::MachineApplicable;
+ // FIXME(bindings_after_at): Remove this code when stabilizing the feature.
+ lhs.walk(&mut |p| match p.kind {
+ // `check_match` is unhappy if the subpattern has a binding anywhere.
+ PatKind::Ident(..) => {
+ applicability = Applicability::MaybeIncorrect;
+ false // Short-circuit.
+ }
+ _ => true,
+ });
+
+ let lhs_span = lhs.span;
+ // Move the LHS into the RHS as a subpattern.
+ // The RHS is now the full pattern.
+ *sub = Some(lhs);
+
+ self.struct_span_err(sp, "pattern on wrong side of `@`")
+ .span_label(lhs_span, "pattern on the left, should be on the right")
+ .span_label(rhs.span, "binding on the right, should be on the left")
+ .span_suggestion(sp, "switch the order", pprust::pat_to_string(&rhs), applicability)
+ .emit();
+ } else {
+ // The special case above doesn't apply so we may have e.g. `A(x) @ B(y)`.
+ rhs.kind = PatKind::Wild;
+ self.struct_span_err(sp, "left-hand side of `@` must be a binding")
+ .span_label(lhs.span, "interpreted as a pattern, not a binding")
+ .span_label(rhs.span, "also a pattern")
+ .note("bindings are `x`, `mut x`, `ref x`, and `ref mut x`")
+ .emit();
+ }
+
+ rhs.span = sp;
+ Ok(rhs)
+ }
+
+ /// Ban a range pattern if it has an ambiguous interpretation.
+ fn ban_pat_range_if_ambiguous(&self, pat: &Pat) {
+ match pat.kind {
+ PatKind::Range(
+ ..,
+ Spanned { node: RangeEnd::Included(RangeSyntax::DotDotDot), .. },
+ ) => return,
+ PatKind::Range(..) => {}
+ _ => return,
+ }
+
+ self.struct_span_err(pat.span, "the range pattern here has ambiguous interpretation")
+ .span_suggestion(
+ pat.span,
+ "add parentheses to clarify the precedence",
+ format!("({})", pprust::pat_to_string(&pat)),
+ // "ambiguous interpretation" implies that we have to be guessing
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+
+ /// Parse `&pat` / `&mut pat`.
+ fn parse_pat_deref(&mut self, expected: Expected) -> PResult<'a, PatKind> {
+ self.expect_and()?;
+ self.recover_lifetime_in_deref_pat();
+ let mutbl = self.parse_mutability();
+ let subpat = self.parse_pat_with_range_pat(false, expected)?;
+ Ok(PatKind::Ref(subpat, mutbl))
+ }
+
+ fn recover_lifetime_in_deref_pat(&mut self) {
+ if let token::Lifetime(name) = self.token.kind {
+ self.bump(); // `'a`
+
+ let span = self.prev_token.span;
+ self.struct_span_err(span, &format!("unexpected lifetime `{}` in pattern", name))
+ .span_suggestion(span, "remove the lifetime", "", Applicability::MachineApplicable)
+ .emit();
+ }
+ }
+
+ /// Parse a tuple or parenthesis pattern.
+ fn parse_pat_tuple_or_parens(&mut self) -> PResult<'a, PatKind> {
+ let (fields, trailing_comma) = self.parse_paren_comma_seq(|p| {
+ p.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::No,
+ RecoverColon::No,
+ CommaRecoveryMode::LikelyTuple,
+ )
+ })?;
+
+ // Here, `(pat,)` is a tuple pattern.
+ // For backward compatibility, `(..)` is a tuple pattern as well.
+ Ok(if fields.len() == 1 && !(trailing_comma || fields[0].is_rest()) {
+ PatKind::Paren(fields.into_iter().next().unwrap())
+ } else {
+ PatKind::Tuple(fields)
+ })
+ }
+
+ /// Parse a mutable binding with the `mut` token already eaten.
+ fn parse_pat_ident_mut(&mut self) -> PResult<'a, PatKind> {
+ let mut_span = self.prev_token.span;
+
+ if self.eat_keyword(kw::Ref) {
+ return self.recover_mut_ref_ident(mut_span);
+ }
+
+ self.recover_additional_muts();
+
+ // Make sure we don't allow e.g. `let mut $p;` where `$p:pat`.
+ if let token::Interpolated(ref nt) = self.token.kind {
+ if let token::NtPat(_) = **nt {
+ self.expected_ident_found().emit();
+ }
+ }
+
+ // Parse the pattern we hope to be an identifier.
+ let mut pat = self.parse_pat_no_top_alt(Some("identifier"))?;
+
+ // If we don't have `mut $ident (@ pat)?`, error.
+ if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind {
+ // Don't recurse into the subpattern.
+ // `mut` on the outer binding doesn't affect the inner bindings.
+ *m = Mutability::Mut;
+ } else {
+ // Add `mut` to any binding in the parsed pattern.
+ let changed_any_binding = Self::make_all_value_bindings_mutable(&mut pat);
+ self.ban_mut_general_pat(mut_span, &pat, changed_any_binding);
+ }
+
+ Ok(pat.into_inner().kind)
+ }
+
+ /// Recover on `mut ref? ident @ pat` and suggest
+ /// that the order of `mut` and `ref` is incorrect.
+ fn recover_mut_ref_ident(&mut self, lo: Span) -> PResult<'a, PatKind> {
+ let mutref_span = lo.to(self.prev_token.span);
+ self.struct_span_err(mutref_span, "the order of `mut` and `ref` is incorrect")
+ .span_suggestion(
+ mutref_span,
+ "try switching the order",
+ "ref mut",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ self.parse_pat_ident(BindingMode::ByRef(Mutability::Mut))
+ }
+
+ /// Turn all by-value immutable bindings in a pattern into mutable bindings.
+ /// Returns `true` if any change was made.
+ fn make_all_value_bindings_mutable(pat: &mut P<Pat>) -> bool {
+ struct AddMut(bool);
+ impl MutVisitor for AddMut {
+ fn visit_pat(&mut self, pat: &mut P<Pat>) {
+ if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind
+ {
+ self.0 = true;
+ *m = Mutability::Mut;
+ }
+ noop_visit_pat(pat, self);
+ }
+ }
+
+ let mut add_mut = AddMut(false);
+ add_mut.visit_pat(pat);
+ add_mut.0
+ }
+
+ /// Error on `mut $pat` where `$pat` is not an ident.
+ fn ban_mut_general_pat(&self, lo: Span, pat: &Pat, changed_any_binding: bool) {
+ let span = lo.to(pat.span);
+ let fix = pprust::pat_to_string(&pat);
+ let (problem, suggestion) = if changed_any_binding {
+ ("`mut` must be attached to each individual binding", "add `mut` to each binding")
+ } else {
+ ("`mut` must be followed by a named binding", "remove the `mut` prefix")
+ };
+ self.struct_span_err(span, problem)
+ .span_suggestion(span, suggestion, fix, Applicability::MachineApplicable)
+ .note("`mut` may be followed by `variable` and `variable @ pattern`")
+ .emit();
+ }
+
+ /// Eat any extraneous `mut`s and error + recover if we ate any.
+ fn recover_additional_muts(&mut self) {
+ let lo = self.token.span;
+ while self.eat_keyword(kw::Mut) {}
+ if lo == self.token.span {
+ return;
+ }
+
+ let span = lo.to(self.prev_token.span);
+ self.struct_span_err(span, "`mut` on a binding may not be repeated")
+ .span_suggestion(
+ span,
+ "remove the additional `mut`s",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ /// Parse macro invocation
+ fn parse_pat_mac_invoc(&mut self, path: Path) -> PResult<'a, PatKind> {
+ self.bump();
+ let args = self.parse_mac_args()?;
+ let mac = MacCall { path, args, prior_type_ascription: self.last_type_ascription };
+ Ok(PatKind::MacCall(mac))
+ }
+
+ fn fatal_unexpected_non_pat(
+ &mut self,
+ err: DiagnosticBuilder<'a, ErrorGuaranteed>,
+ expected: Expected,
+ ) -> PResult<'a, P<Pat>> {
+ err.cancel();
+
+ let expected = expected.unwrap_or("pattern");
+ let msg = format!("expected {}, found {}", expected, super::token_descr(&self.token));
+
+ let mut err = self.struct_span_err(self.token.span, &msg);
+ err.span_label(self.token.span, format!("expected {}", expected));
+
+ let sp = self.sess.source_map().start_point(self.token.span);
+ if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&sp) {
+ self.sess.expr_parentheses_needed(&mut err, *sp);
+ }
+
+ Err(err)
+ }
+
+ /// Parses the range pattern end form `".." | "..." | "..=" ;`.
+ fn parse_range_end(&mut self) -> Option<Spanned<RangeEnd>> {
+ let re = if self.eat(&token::DotDotDot) {
+ RangeEnd::Included(RangeSyntax::DotDotDot)
+ } else if self.eat(&token::DotDotEq) {
+ RangeEnd::Included(RangeSyntax::DotDotEq)
+ } else if self.eat(&token::DotDot) {
+ RangeEnd::Excluded
+ } else {
+ return None;
+ };
+ Some(respan(self.prev_token.span, re))
+ }
+
+ /// Parse a range pattern `$begin $form $end?` where `$form = ".." | "..." | "..=" ;`.
+ /// `$begin $form` has already been parsed.
+ fn parse_pat_range_begin_with(
+ &mut self,
+ begin: P<Expr>,
+ re: Spanned<RangeEnd>,
+ ) -> PResult<'a, PatKind> {
+ let end = if self.is_pat_range_end_start(0) {
+ // Parsing e.g. `X..=Y`.
+ Some(self.parse_pat_range_end()?)
+ } else {
+ // Parsing e.g. `X..`.
+ if let RangeEnd::Included(_) = re.node {
+ // FIXME(Centril): Consider semantic errors instead in `ast_validation`.
+ self.inclusive_range_with_incorrect_end(re.span);
+ }
+ None
+ };
+ Ok(PatKind::Range(Some(begin), end, re))
+ }
+
+ pub(super) fn inclusive_range_with_incorrect_end(&mut self, span: Span) {
+ let tok = &self.token;
+
+ // If the user typed "..==" instead of "..=", we want to give them
+ // a specific error message telling them to use "..=".
+ // Otherwise, we assume that they meant to type a half open exclusive
+ // range and give them an error telling them to do that instead.
+ if matches!(tok.kind, token::Eq) && tok.span.lo() == span.hi() {
+ let span_with_eq = span.to(tok.span);
+
+ // Ensure the user doesn't receive unhelpful unexpected token errors
+ self.bump();
+ if self.is_pat_range_end_start(0) {
+ let _ = self.parse_pat_range_end().map_err(|e| e.cancel());
+ }
+
+ self.error_inclusive_range_with_extra_equals(span_with_eq);
+ } else {
+ self.error_inclusive_range_with_no_end(span);
+ }
+ }
+
+ fn error_inclusive_range_with_extra_equals(&self, span: Span) {
+ self.struct_span_err(span, "unexpected `=` after inclusive range")
+ .span_suggestion_short(span, "use `..=` instead", "..=", Applicability::MaybeIncorrect)
+ .note("inclusive ranges end with a single equals sign (`..=`)")
+ .emit();
+ }
+
+ fn error_inclusive_range_with_no_end(&self, span: Span) {
+ struct_span_err!(self.sess.span_diagnostic, span, E0586, "inclusive range with no end")
+ .span_suggestion_short(span, "use `..` instead", "..", Applicability::MachineApplicable)
+ .note("inclusive ranges must be bounded at the end (`..=b` or `a..=b`)")
+ .emit();
+ }
+
+ /// Parse a range-to pattern, `..X` or `..=X` where `X` remains to be parsed.
+ ///
+ /// The form `...X` is prohibited to reduce confusion with the potential
+ /// expression syntax `...expr` for splatting in expressions.
+ fn parse_pat_range_to(&mut self, mut re: Spanned<RangeEnd>) -> PResult<'a, PatKind> {
+ let end = self.parse_pat_range_end()?;
+ self.sess.gated_spans.gate(sym::half_open_range_patterns, re.span.to(self.prev_token.span));
+ if let RangeEnd::Included(ref mut syn @ RangeSyntax::DotDotDot) = &mut re.node {
+ *syn = RangeSyntax::DotDotEq;
+ self.struct_span_err(re.span, "range-to patterns with `...` are not allowed")
+ .span_suggestion_short(
+ re.span,
+ "use `..=` instead",
+ "..=",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ Ok(PatKind::Range(None, Some(end), re))
+ }
+
+ /// Is the token `dist` away from the current suitable as the start of a range patterns end?
+ fn is_pat_range_end_start(&self, dist: usize) -> bool {
+ self.check_inline_const(dist)
+ || self.look_ahead(dist, |t| {
+ t.is_path_start() // e.g. `MY_CONST`;
+ || t.kind == token::Dot // e.g. `.5` for recovery;
+ || t.can_begin_literal_maybe_minus() // e.g. `42`.
+ || t.is_whole_expr()
+ })
+ }
+
+ fn parse_pat_range_end(&mut self) -> PResult<'a, P<Expr>> {
+ if self.check_inline_const(0) {
+ self.parse_const_block(self.token.span, true)
+ } else if self.check_path() {
+ let lo = self.token.span;
+ let (qself, path) = if self.eat_lt() {
+ // Parse a qualified path
+ let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
+ (Some(qself), path)
+ } else {
+ // Parse an unqualified path
+ (None, self.parse_path(PathStyle::Expr)?)
+ };
+ let hi = self.prev_token.span;
+ Ok(self.mk_expr(lo.to(hi), ExprKind::Path(qself, path), AttrVec::new()))
+ } else {
+ self.parse_literal_maybe_minus()
+ }
+ }
+
+ /// Is this the start of a pattern beginning with a path?
+ fn is_start_of_pat_with_path(&mut self) -> bool {
+ self.check_path()
+ // Just for recovery (see `can_be_ident`).
+ || self.token.is_ident() && !self.token.is_bool_lit() && !self.token.is_keyword(kw::In)
+ }
+
+ /// Would `parse_pat_ident` be appropriate here?
+ fn can_be_ident_pat(&mut self) -> bool {
+ self.check_ident()
+ && !self.token.is_bool_lit() // Avoid `true` or `false` as a binding as it is a literal.
+ && !self.token.is_path_segment_keyword() // Avoid e.g. `Self` as it is a path.
+ // Avoid `in`. Due to recovery in the list parser this messes with `for ( $pat in $expr )`.
+ && !self.token.is_keyword(kw::In)
+ // Try to do something more complex?
+ && self.look_ahead(1, |t| !matches!(t.kind, token::OpenDelim(Delimiter::Parenthesis) // A tuple struct pattern.
+ | token::OpenDelim(Delimiter::Brace) // A struct pattern.
+ | token::DotDotDot | token::DotDotEq | token::DotDot // A range pattern.
+ | token::ModSep // A tuple / struct variant pattern.
+ | token::Not)) // A macro expanding to a pattern.
+ }
+
+ /// Parses `ident` or `ident @ pat`.
+ /// Used by the copy foo and ref foo patterns to give a good
+ /// error message when parsing mistakes like `ref foo(a, b)`.
+ fn parse_pat_ident(&mut self, binding_mode: BindingMode) -> PResult<'a, PatKind> {
+ let ident = self.parse_ident()?;
+ let sub = if self.eat(&token::At) {
+ Some(self.parse_pat_no_top_alt(Some("binding pattern"))?)
+ } else {
+ None
+ };
+
+ // Just to be friendly, if they write something like `ref Some(i)`,
+ // we end up here with `(` as the current token.
+ // This shortly leads to a parse error. Note that if there is no explicit
+ // binding mode then we do not end up here, because the lookahead
+ // will direct us over to `parse_enum_variant()`.
+ if self.token == token::OpenDelim(Delimiter::Parenthesis) {
+ return Err(self
+ .struct_span_err(self.prev_token.span, "expected identifier, found enum pattern"));
+ }
+
+ Ok(PatKind::Ident(binding_mode, ident, sub))
+ }
+
+ /// Parse a struct ("record") pattern (e.g. `Foo { ... }` or `Foo::Bar { ... }`).
+ fn parse_pat_struct(&mut self, qself: Option<QSelf>, path: Path) -> PResult<'a, PatKind> {
+ if qself.is_some() {
+ // Feature gate the use of qualified paths in patterns
+ self.sess.gated_spans.gate(sym::more_qualified_paths, path.span);
+ }
+ self.bump();
+ let (fields, etc) = self.parse_pat_fields().unwrap_or_else(|mut e| {
+ e.span_label(path.span, "while parsing the fields for this pattern");
+ e.emit();
+ self.recover_stmt();
+ (vec![], true)
+ });
+ self.bump();
+ Ok(PatKind::Struct(qself, path, fields, etc))
+ }
+
+ /// Parse tuple struct or tuple variant pattern (e.g. `Foo(...)` or `Foo::Bar(...)`).
+ fn parse_pat_tuple_struct(&mut self, qself: Option<QSelf>, path: Path) -> PResult<'a, PatKind> {
+ let (fields, _) = self.parse_paren_comma_seq(|p| {
+ p.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::No,
+ RecoverColon::No,
+ CommaRecoveryMode::EitherTupleOrPipe,
+ )
+ })?;
+ if qself.is_some() {
+ self.sess.gated_spans.gate(sym::more_qualified_paths, path.span);
+ }
+ Ok(PatKind::TupleStruct(qself, path, fields))
+ }
+
+ /// Are we sure this could not possibly be the start of a pattern?
+ ///
+ /// Currently, this only accounts for tokens that can follow identifiers
+ /// in patterns, but this can be extended as necessary.
+ fn isnt_pattern_start(&self) -> bool {
+ [
+ token::Eq,
+ token::Colon,
+ token::Comma,
+ token::Semi,
+ token::At,
+ token::OpenDelim(Delimiter::Brace),
+ token::CloseDelim(Delimiter::Brace),
+ token::CloseDelim(Delimiter::Parenthesis),
+ ]
+ .contains(&self.token.kind)
+ }
+
+ /// Parses `box pat`
+ fn parse_pat_box(&mut self) -> PResult<'a, PatKind> {
+ let box_span = self.prev_token.span;
+
+ if self.isnt_pattern_start() {
+ self.struct_span_err(
+ self.token.span,
+ format!("expected pattern, found {}", super::token_descr(&self.token)),
+ )
+ .span_note(box_span, "`box` is a reserved keyword")
+ .span_suggestion_verbose(
+ box_span.shrink_to_lo(),
+ "escape `box` to use it as an identifier",
+ "r#",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+
+ // We cannot use `parse_pat_ident()` since it will complain `box`
+ // is not an identifier.
+ let sub = if self.eat(&token::At) {
+ Some(self.parse_pat_no_top_alt(Some("binding pattern"))?)
+ } else {
+ None
+ };
+
+ Ok(PatKind::Ident(
+ BindingMode::ByValue(Mutability::Not),
+ Ident::new(kw::Box, box_span),
+ sub,
+ ))
+ } else {
+ let pat = self.parse_pat_with_range_pat(false, None)?;
+ self.sess.gated_spans.gate(sym::box_patterns, box_span.to(self.prev_token.span));
+ Ok(PatKind::Box(pat))
+ }
+ }
+
+ /// Parses the fields of a struct-like pattern.
+ fn parse_pat_fields(&mut self) -> PResult<'a, (Vec<PatField>, bool)> {
+ let mut fields = Vec::new();
+ let mut etc = false;
+ let mut ate_comma = true;
+ let mut delayed_err: Option<DiagnosticBuilder<'a, ErrorGuaranteed>> = None;
+ let mut etc_span = None;
+
+ while self.token != token::CloseDelim(Delimiter::Brace) {
+ let attrs = match self.parse_outer_attributes() {
+ Ok(attrs) => attrs,
+ Err(err) => {
+ if let Some(mut delayed) = delayed_err {
+ delayed.emit();
+ }
+ return Err(err);
+ }
+ };
+ let lo = self.token.span;
+
+ // check that a comma comes after every field
+ if !ate_comma {
+ let err = self.struct_span_err(self.token.span, "expected `,`");
+ if let Some(mut delayed) = delayed_err {
+ delayed.emit();
+ }
+ return Err(err);
+ }
+ ate_comma = false;
+
+ if self.check(&token::DotDot) || self.token == token::DotDotDot {
+ etc = true;
+ let mut etc_sp = self.token.span;
+
+ self.recover_one_fewer_dotdot();
+ self.bump(); // `..` || `...`
+
+ if self.token == token::CloseDelim(Delimiter::Brace) {
+ etc_span = Some(etc_sp);
+ break;
+ }
+ let token_str = super::token_descr(&self.token);
+ let msg = &format!("expected `}}`, found {}", token_str);
+ let mut err = self.struct_span_err(self.token.span, msg);
+
+ err.span_label(self.token.span, "expected `}`");
+ let mut comma_sp = None;
+ if self.token == token::Comma {
+ // Issue #49257
+ let nw_span = self.sess.source_map().span_until_non_whitespace(self.token.span);
+ etc_sp = etc_sp.to(nw_span);
+ err.span_label(
+ etc_sp,
+ "`..` must be at the end and cannot have a trailing comma",
+ );
+ comma_sp = Some(self.token.span);
+ self.bump();
+ ate_comma = true;
+ }
+
+ etc_span = Some(etc_sp.until(self.token.span));
+ if self.token == token::CloseDelim(Delimiter::Brace) {
+ // If the struct looks otherwise well formed, recover and continue.
+ if let Some(sp) = comma_sp {
+ err.span_suggestion_short(
+ sp,
+ "remove this comma",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ break;
+ } else if self.token.is_ident() && ate_comma {
+ // Accept fields coming after `..,`.
+ // This way we avoid "pattern missing fields" errors afterwards.
+ // We delay this error until the end in order to have a span for a
+ // suggested fix.
+ if let Some(mut delayed_err) = delayed_err {
+ delayed_err.emit();
+ return Err(err);
+ } else {
+ delayed_err = Some(err);
+ }
+ } else {
+ if let Some(mut err) = delayed_err {
+ err.emit();
+ }
+ return Err(err);
+ }
+ }
+
+ let field =
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let field = match this.parse_pat_field(lo, attrs) {
+ Ok(field) => Ok(field),
+ Err(err) => {
+ if let Some(mut delayed_err) = delayed_err.take() {
+ delayed_err.emit();
+ }
+ return Err(err);
+ }
+ }?;
+ ate_comma = this.eat(&token::Comma);
+ // We just ate a comma, so there's no need to use
+ // `TrailingToken::Comma`
+ Ok((field, TrailingToken::None))
+ })?;
+
+ fields.push(field)
+ }
+
+ if let Some(mut err) = delayed_err {
+ if let Some(etc_span) = etc_span {
+ err.multipart_suggestion(
+ "move the `..` to the end of the field list",
+ vec![
+ (etc_span, String::new()),
+ (self.token.span, format!("{}.. }}", if ate_comma { "" } else { ", " })),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ }
+ Ok((fields, etc))
+ }
+
+ /// Recover on `...` as if it were `..` to avoid further errors.
+ /// See issue #46718.
+ fn recover_one_fewer_dotdot(&self) {
+ if self.token != token::DotDotDot {
+ return;
+ }
+
+ self.struct_span_err(self.token.span, "expected field pattern, found `...`")
+ .span_suggestion(
+ self.token.span,
+ "to omit remaining fields, use one fewer `.`",
+ "..",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+
+ fn parse_pat_field(&mut self, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, PatField> {
+ // Check if a colon exists one ahead. This means we're parsing a fieldname.
+ let hi;
+ let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) {
+ // Parsing a pattern of the form `fieldname: pat`.
+ let fieldname = self.parse_field_name()?;
+ self.bump();
+ let pat = self.parse_pat_allow_top_alt(
+ None,
+ RecoverComma::No,
+ RecoverColon::No,
+ CommaRecoveryMode::EitherTupleOrPipe,
+ )?;
+ hi = pat.span;
+ (pat, fieldname, false)
+ } else {
+ // Parsing a pattern of the form `(box) (ref) (mut) fieldname`.
+ let is_box = self.eat_keyword(kw::Box);
+ let boxed_span = self.token.span;
+ let is_ref = self.eat_keyword(kw::Ref);
+ let is_mut = self.eat_keyword(kw::Mut);
+ let fieldname = self.parse_field_name()?;
+ hi = self.prev_token.span;
+
+ let bind_type = match (is_ref, is_mut) {
+ (true, true) => BindingMode::ByRef(Mutability::Mut),
+ (true, false) => BindingMode::ByRef(Mutability::Not),
+ (false, true) => BindingMode::ByValue(Mutability::Mut),
+ (false, false) => BindingMode::ByValue(Mutability::Not),
+ };
+
+ let fieldpat = self.mk_pat_ident(boxed_span.to(hi), bind_type, fieldname);
+ let subpat =
+ if is_box { self.mk_pat(lo.to(hi), PatKind::Box(fieldpat)) } else { fieldpat };
+ (subpat, fieldname, true)
+ };
+
+ Ok(PatField {
+ ident: fieldname,
+ pat: subpat,
+ is_shorthand,
+ attrs: attrs.into(),
+ id: ast::DUMMY_NODE_ID,
+ span: lo.to(hi),
+ is_placeholder: false,
+ })
+ }
+
+ pub(super) fn mk_pat_ident(&self, span: Span, bm: BindingMode, ident: Ident) -> P<Pat> {
+ self.mk_pat(span, PatKind::Ident(bm, ident, None))
+ }
+
+ pub(super) fn mk_pat(&self, span: Span, kind: PatKind) -> P<Pat> {
+ P(Pat { kind, span, id: ast::DUMMY_NODE_ID, tokens: None })
+ }
+}
diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs
new file mode 100644
index 000000000..5cf1758c3
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/path.rs
@@ -0,0 +1,754 @@
+use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
+use super::{Parser, Restrictions, TokenType};
+use crate::maybe_whole;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, Token, TokenKind};
+use rustc_ast::{
+ self as ast, AngleBracketedArg, AngleBracketedArgs, AnonConst, AssocConstraint,
+ AssocConstraintKind, BlockCheckMode, GenericArg, GenericArgs, Generics, ParenthesizedArgs,
+ Path, PathSegment, QSelf,
+};
+use rustc_errors::{pluralize, Applicability, PResult};
+use rustc_span::source_map::{BytePos, Span};
+use rustc_span::symbol::{kw, sym, Ident};
+
+use std::mem;
+use tracing::debug;
+
+/// Specifies how to parse a path.
+#[derive(Copy, Clone, PartialEq)]
+pub enum PathStyle {
+ /// In some contexts, notably in expressions, paths with generic arguments are ambiguous
+ /// with something else. For example, in expressions `segment < ....` can be interpreted
+ /// as a comparison and `segment ( ....` can be interpreted as a function call.
+ /// In all such contexts the non-path interpretation is preferred by default for practical
+ /// reasons, but the path interpretation can be forced by the disambiguator `::`, e.g.
+ /// `x<y>` - comparisons, `x::<y>` - unambiguously a path.
+ Expr,
+ /// In other contexts, notably in types, no ambiguity exists and paths can be written
+ /// without the disambiguator, e.g., `x<y>` - unambiguously a path.
+ /// Paths with disambiguators are still accepted, `x::<Y>` - unambiguously a path too.
+ Type,
+ /// A path with generic arguments disallowed, e.g., `foo::bar::Baz`, used in imports,
+ /// visibilities or attributes.
+ /// Technically, this variant is unnecessary and e.g., `Expr` can be used instead
+ /// (paths in "mod" contexts have to be checked later for absence of generic arguments
+ /// anyway, due to macros), but it is used to avoid weird suggestions about expected
+ /// tokens when something goes wrong.
+ Mod,
+}
+
+impl<'a> Parser<'a> {
+ /// Parses a qualified path.
+ /// Assumes that the leading `<` has been parsed already.
+ ///
+ /// `qualified_path = <type [as trait_ref]>::path`
+ ///
+ /// # Examples
+ /// `<T>::default`
+ /// `<T as U>::a`
+ /// `<T as U>::F::a<S>` (without disambiguator)
+ /// `<T as U>::F::a::<S>` (with disambiguator)
+ pub(super) fn parse_qpath(&mut self, style: PathStyle) -> PResult<'a, (QSelf, Path)> {
+ let lo = self.prev_token.span;
+ let ty = self.parse_ty()?;
+
+ // `path` will contain the prefix of the path up to the `>`,
+ // if any (e.g., `U` in the `<T as U>::*` examples
+ // above). `path_span` has the span of that path, or an empty
+ // span in the case of something like `<T>::Bar`.
+ let (mut path, path_span);
+ if self.eat_keyword(kw::As) {
+ let path_lo = self.token.span;
+ path = self.parse_path(PathStyle::Type)?;
+ path_span = path_lo.to(self.prev_token.span);
+ } else {
+ path_span = self.token.span.to(self.token.span);
+ path = ast::Path { segments: Vec::new(), span: path_span, tokens: None };
+ }
+
+ // See doc comment for `unmatched_angle_bracket_count`.
+ self.expect(&token::Gt)?;
+ if self.unmatched_angle_bracket_count > 0 {
+ self.unmatched_angle_bracket_count -= 1;
+ debug!("parse_qpath: (decrement) count={:?}", self.unmatched_angle_bracket_count);
+ }
+
+ if !self.recover_colon_before_qpath_proj() {
+ self.expect(&token::ModSep)?;
+ }
+
+ let qself = QSelf { ty, path_span, position: path.segments.len() };
+ self.parse_path_segments(&mut path.segments, style, None)?;
+
+ Ok((
+ qself,
+ Path { segments: path.segments, span: lo.to(self.prev_token.span), tokens: None },
+ ))
+ }
+
+ /// Recover from an invalid single colon, when the user likely meant a qualified path.
+ /// We avoid emitting this if not followed by an identifier, as our assumption that the user
+ /// intended this to be a qualified path may not be correct.
+ ///
+ /// ```ignore (diagnostics)
+ /// <Bar as Baz<T>>:Qux
+ /// ^ help: use double colon
+ /// ```
+ fn recover_colon_before_qpath_proj(&mut self) -> bool {
+ if !self.check_noexpect(&TokenKind::Colon)
+ || self.look_ahead(1, |t| !t.is_ident() || t.is_reserved_ident())
+ {
+ return false;
+ }
+
+ self.bump(); // colon
+
+ self.diagnostic()
+ .struct_span_err(
+ self.prev_token.span,
+ "found single colon before projection in qualified path",
+ )
+ .span_suggestion(
+ self.prev_token.span,
+ "use double colon",
+ "::",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ true
+ }
+
+ pub(super) fn parse_path(&mut self, style: PathStyle) -> PResult<'a, Path> {
+ self.parse_path_inner(style, None)
+ }
+
+ /// Parses simple paths.
+ ///
+ /// `path = [::] segment+`
+ /// `segment = ident | ident[::]<args> | ident[::](args) [-> type]`
+ ///
+ /// # Examples
+ /// `a::b::C<D>` (without disambiguator)
+ /// `a::b::C::<D>` (with disambiguator)
+ /// `Fn(Args)` (without disambiguator)
+ /// `Fn::(Args)` (with disambiguator)
+ pub(super) fn parse_path_inner(
+ &mut self,
+ style: PathStyle,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, Path> {
+ let reject_generics_if_mod_style = |parser: &Parser<'_>, path: &Path| {
+ // Ensure generic arguments don't end up in attribute paths, such as:
+ //
+ // macro_rules! m {
+ // ($p:path) => { #[$p] struct S; }
+ // }
+ //
+ // m!(inline<u8>); //~ ERROR: unexpected generic arguments in path
+ //
+ if style == PathStyle::Mod && path.segments.iter().any(|segment| segment.args.is_some())
+ {
+ parser
+ .struct_span_err(
+ path.segments
+ .iter()
+ .filter_map(|segment| segment.args.as_ref())
+ .map(|arg| arg.span())
+ .collect::<Vec<_>>(),
+ "unexpected generic arguments in path",
+ )
+ .emit();
+ }
+ };
+
+ maybe_whole!(self, NtPath, |path| {
+ reject_generics_if_mod_style(self, &path);
+ path.into_inner()
+ });
+
+ if let token::Interpolated(nt) = &self.token.kind {
+ if let token::NtTy(ty) = &**nt {
+ if let ast::TyKind::Path(None, path) = &ty.kind {
+ let path = path.clone();
+ self.bump();
+ reject_generics_if_mod_style(self, &path);
+ return Ok(path);
+ }
+ }
+ }
+
+ let lo = self.token.span;
+ let mut segments = Vec::new();
+ let mod_sep_ctxt = self.token.span.ctxt();
+ if self.eat(&token::ModSep) {
+ segments.push(PathSegment::path_root(lo.shrink_to_lo().with_ctxt(mod_sep_ctxt)));
+ }
+ self.parse_path_segments(&mut segments, style, ty_generics)?;
+
+ Ok(Path { segments, span: lo.to(self.prev_token.span), tokens: None })
+ }
+
+ pub(super) fn parse_path_segments(
+ &mut self,
+ segments: &mut Vec<PathSegment>,
+ style: PathStyle,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, ()> {
+ loop {
+ let segment = self.parse_path_segment(style, ty_generics)?;
+ if style == PathStyle::Expr {
+ // In order to check for trailing angle brackets, we must have finished
+ // recursing (`parse_path_segment` can indirectly call this function),
+ // that is, the next token must be the highlighted part of the below example:
+ //
+ // `Foo::<Bar as Baz<T>>::Qux`
+ // ^ here
+ //
+ // As opposed to the below highlight (if we had only finished the first
+ // recursion):
+ //
+ // `Foo::<Bar as Baz<T>>::Qux`
+ // ^ here
+ //
+ // `PathStyle::Expr` is only provided at the root invocation and never in
+ // `parse_path_segment` to recurse and therefore can be checked to maintain
+ // this invariant.
+ self.check_trailing_angle_brackets(&segment, &[&token::ModSep]);
+ }
+ segments.push(segment);
+
+ if self.is_import_coupler() || !self.eat(&token::ModSep) {
+ return Ok(());
+ }
+ }
+ }
+
+ pub(super) fn parse_path_segment(
+ &mut self,
+ style: PathStyle,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, PathSegment> {
+ let ident = self.parse_path_segment_ident()?;
+ let is_args_start = |token: &Token| {
+ matches!(
+ token.kind,
+ token::Lt
+ | token::BinOp(token::Shl)
+ | token::OpenDelim(Delimiter::Parenthesis)
+ | token::LArrow
+ )
+ };
+ let check_args_start = |this: &mut Self| {
+ this.expected_tokens.extend_from_slice(&[
+ TokenType::Token(token::Lt),
+ TokenType::Token(token::OpenDelim(Delimiter::Parenthesis)),
+ ]);
+ is_args_start(&this.token)
+ };
+
+ Ok(
+ if style == PathStyle::Type && check_args_start(self)
+ || style != PathStyle::Mod
+ && self.check(&token::ModSep)
+ && self.look_ahead(1, |t| is_args_start(t))
+ {
+ // We use `style == PathStyle::Expr` to check if this is in a recursion or not. If
+ // it isn't, then we reset the unmatched angle bracket count as we're about to start
+ // parsing a new path.
+ if style == PathStyle::Expr {
+ self.unmatched_angle_bracket_count = 0;
+ self.max_angle_bracket_count = 0;
+ }
+
+ // Generic arguments are found - `<`, `(`, `::<` or `::(`.
+ self.eat(&token::ModSep);
+ let lo = self.token.span;
+ let args = if self.eat_lt() {
+ // `<'a, T, A = U>`
+ let args = self.parse_angle_args_with_leading_angle_bracket_recovery(
+ style,
+ lo,
+ ty_generics,
+ )?;
+ self.expect_gt().map_err(|mut err| {
+ // Attempt to find places where a missing `>` might belong.
+ if let Some(arg) = args
+ .iter()
+ .rev()
+ .skip_while(|arg| matches!(arg, AngleBracketedArg::Constraint(_)))
+ .next()
+ {
+ err.span_suggestion_verbose(
+ arg.span().shrink_to_hi(),
+ "you might have meant to end the type parameters here",
+ ">",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err
+ })?;
+ let span = lo.to(self.prev_token.span);
+ AngleBracketedArgs { args, span }.into()
+ } else {
+ // `(T, U) -> R`
+ let (inputs, _) = self.parse_paren_comma_seq(|p| p.parse_ty())?;
+ let inputs_span = lo.to(self.prev_token.span);
+ let output =
+ self.parse_ret_ty(AllowPlus::No, RecoverQPath::No, RecoverReturnSign::No)?;
+ let span = ident.span.to(self.prev_token.span);
+ ParenthesizedArgs { span, inputs, inputs_span, output }.into()
+ };
+
+ PathSegment { ident, args, id: ast::DUMMY_NODE_ID }
+ } else {
+ // Generic arguments are not found.
+ PathSegment::from_ident(ident)
+ },
+ )
+ }
+
+ pub(super) fn parse_path_segment_ident(&mut self) -> PResult<'a, Ident> {
+ match self.token.ident() {
+ Some((ident, false)) if ident.is_path_segment_keyword() => {
+ self.bump();
+ Ok(ident)
+ }
+ _ => self.parse_ident(),
+ }
+ }
+
+ /// Parses generic args (within a path segment) with recovery for extra leading angle brackets.
+ /// For the purposes of understanding the parsing logic of generic arguments, this function
+ /// can be thought of being the same as just calling `self.parse_angle_args()` if the source
+ /// had the correct amount of leading angle brackets.
+ ///
+ /// ```ignore (diagnostics)
+ /// bar::<<<<T as Foo>::Output>();
+ /// ^^ help: remove extra angle brackets
+ /// ```
+ fn parse_angle_args_with_leading_angle_bracket_recovery(
+ &mut self,
+ style: PathStyle,
+ lo: Span,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, Vec<AngleBracketedArg>> {
+ // We need to detect whether there are extra leading left angle brackets and produce an
+ // appropriate error and suggestion. This cannot be implemented by looking ahead at
+ // upcoming tokens for a matching `>` character - if there are unmatched `<` tokens
+ // then there won't be matching `>` tokens to find.
+ //
+ // To explain how this detection works, consider the following example:
+ //
+ // ```ignore (diagnostics)
+ // bar::<<<<T as Foo>::Output>();
+ // ^^ help: remove extra angle brackets
+ // ```
+ //
+ // Parsing of the left angle brackets starts in this function. We start by parsing the
+ // `<` token (incrementing the counter of unmatched angle brackets on `Parser` via
+ // `eat_lt`):
+ //
+ // *Upcoming tokens:* `<<<<T as Foo>::Output>;`
+ // *Unmatched count:* 1
+ // *`parse_path_segment` calls deep:* 0
+ //
+ // This has the effect of recursing as this function is called if a `<` character
+ // is found within the expected generic arguments:
+ //
+ // *Upcoming tokens:* `<<<T as Foo>::Output>;`
+ // *Unmatched count:* 2
+ // *`parse_path_segment` calls deep:* 1
+ //
+ // Eventually we will have recursed until having consumed all of the `<` tokens and
+ // this will be reflected in the count:
+ //
+ // *Upcoming tokens:* `T as Foo>::Output>;`
+ // *Unmatched count:* 4
+ // `parse_path_segment` calls deep:* 3
+ //
+ // The parser will continue until reaching the first `>` - this will decrement the
+ // unmatched angle bracket count and return to the parent invocation of this function
+ // having succeeded in parsing:
+ //
+ // *Upcoming tokens:* `::Output>;`
+ // *Unmatched count:* 3
+ // *`parse_path_segment` calls deep:* 2
+ //
+ // This will continue until the next `>` character which will also return successfully
+ // to the parent invocation of this function and decrement the count:
+ //
+ // *Upcoming tokens:* `;`
+ // *Unmatched count:* 2
+ // *`parse_path_segment` calls deep:* 1
+ //
+ // At this point, this function will expect to find another matching `>` character but
+ // won't be able to and will return an error. This will continue all the way up the
+ // call stack until the first invocation:
+ //
+ // *Upcoming tokens:* `;`
+ // *Unmatched count:* 2
+ // *`parse_path_segment` calls deep:* 0
+ //
+ // In doing this, we have managed to work out how many unmatched leading left angle
+ // brackets there are, but we cannot recover as the unmatched angle brackets have
+ // already been consumed. To remedy this, we keep a snapshot of the parser state
+ // before we do the above. We can then inspect whether we ended up with a parsing error
+ // and unmatched left angle brackets and if so, restore the parser state before we
+ // consumed any `<` characters to emit an error and consume the erroneous tokens to
+ // recover by attempting to parse again.
+ //
+ // In practice, the recursion of this function is indirect and there will be other
+ // locations that consume some `<` characters - as long as we update the count when
+ // this happens, it isn't an issue.
+
+ let is_first_invocation = style == PathStyle::Expr;
+ // Take a snapshot before attempting to parse - we can restore this later.
+ let snapshot = if is_first_invocation { Some(self.clone()) } else { None };
+
+ debug!("parse_generic_args_with_leading_angle_bracket_recovery: (snapshotting)");
+ match self.parse_angle_args(ty_generics) {
+ Ok(args) => Ok(args),
+ Err(e) if is_first_invocation && self.unmatched_angle_bracket_count > 0 => {
+ // Swap `self` with our backup of the parser state before attempting to parse
+ // generic arguments.
+ let snapshot = mem::replace(self, snapshot.unwrap());
+
+ // Eat the unmatched angle brackets.
+ let all_angle_brackets = (0..snapshot.unmatched_angle_bracket_count)
+ .fold(true, |a, _| a && self.eat_lt());
+
+ if !all_angle_brackets {
+ // If there are other tokens in between the extraneous `<`s, we cannot simply
+ // suggest to remove them. This check also prevents us from accidentally ending
+ // up in the middle of a multibyte character (issue #84104).
+ let _ = mem::replace(self, snapshot);
+ Err(e)
+ } else {
+ // Cancel error from being unable to find `>`. We know the error
+ // must have been this due to a non-zero unmatched angle bracket
+ // count.
+ e.cancel();
+
+ debug!(
+ "parse_generic_args_with_leading_angle_bracket_recovery: (snapshot failure) \
+ snapshot.count={:?}",
+ snapshot.unmatched_angle_bracket_count,
+ );
+
+ // Make a span over ${unmatched angle bracket count} characters.
+ // This is safe because `all_angle_brackets` ensures that there are only `<`s,
+ // i.e. no multibyte characters, in this range.
+ let span =
+ lo.with_hi(lo.lo() + BytePos(snapshot.unmatched_angle_bracket_count));
+ self.struct_span_err(
+ span,
+ &format!(
+ "unmatched angle bracket{}",
+ pluralize!(snapshot.unmatched_angle_bracket_count)
+ ),
+ )
+ .span_suggestion(
+ span,
+ &format!(
+ "remove extra angle bracket{}",
+ pluralize!(snapshot.unmatched_angle_bracket_count)
+ ),
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+
+ // Try again without unmatched angle bracket characters.
+ self.parse_angle_args(ty_generics)
+ }
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ /// Parses (possibly empty) list of generic arguments / associated item constraints,
+ /// possibly including trailing comma.
+ pub(super) fn parse_angle_args(
+ &mut self,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, Vec<AngleBracketedArg>> {
+ let mut args = Vec::new();
+ while let Some(arg) = self.parse_angle_arg(ty_generics)? {
+ args.push(arg);
+ if !self.eat(&token::Comma) {
+ if self.check_noexpect(&TokenKind::Semi)
+ && self.look_ahead(1, |t| t.is_ident() || t.is_lifetime())
+ {
+ // Add `>` to the list of expected tokens.
+ self.check(&token::Gt);
+ // Handle `,` to `;` substitution
+ let mut err = self.unexpected::<()>().unwrap_err();
+ self.bump();
+ err.span_suggestion_verbose(
+ self.prev_token.span.until(self.token.span),
+ "use a comma to separate type parameters",
+ ", ",
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ continue;
+ }
+ if !self.token.kind.should_end_const_arg() {
+ if self.handle_ambiguous_unbraced_const_arg(&mut args)? {
+ // We've managed to (partially) recover, so continue trying to parse
+ // arguments.
+ continue;
+ }
+ }
+ break;
+ }
+ }
+ Ok(args)
+ }
+
+ /// Parses a single argument in the angle arguments `<...>` of a path segment.
+ fn parse_angle_arg(
+ &mut self,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, Option<AngleBracketedArg>> {
+ let lo = self.token.span;
+ let arg = self.parse_generic_arg(ty_generics)?;
+ match arg {
+ Some(arg) => {
+ // we are using noexpect here because we first want to find out if either `=` or `:`
+ // is present and then use that info to push the other token onto the tokens list
+ let separated =
+ self.check_noexpect(&token::Colon) || self.check_noexpect(&token::Eq);
+ if separated && (self.check(&token::Colon) | self.check(&token::Eq)) {
+ let arg_span = arg.span();
+ let (binder, ident, gen_args) = match self.get_ident_from_generic_arg(&arg) {
+ Ok(ident_gen_args) => ident_gen_args,
+ Err(()) => return Ok(Some(AngleBracketedArg::Arg(arg))),
+ };
+ if binder.is_some() {
+ // FIXME(compiler-errors): this could be improved by suggesting lifting
+ // this up to the trait, at least before this becomes real syntax.
+ // e.g. `Trait<for<'a> Assoc = Ty>` -> `for<'a> Trait<Assoc = Ty>`
+ return Err(self.struct_span_err(
+ arg_span,
+ "`for<...>` is not allowed on associated type bounds",
+ ));
+ }
+ let kind = if self.eat(&token::Colon) {
+ // Parse associated type constraint bound.
+
+ let bounds = self.parse_generic_bounds(Some(self.prev_token.span))?;
+ AssocConstraintKind::Bound { bounds }
+ } else if self.eat(&token::Eq) {
+ self.parse_assoc_equality_term(ident, self.prev_token.span)?
+ } else {
+ unreachable!();
+ };
+
+ let span = lo.to(self.prev_token.span);
+
+ // Gate associated type bounds, e.g., `Iterator<Item: Ord>`.
+ if let AssocConstraintKind::Bound { .. } = kind {
+ self.sess.gated_spans.gate(sym::associated_type_bounds, span);
+ }
+ let constraint =
+ AssocConstraint { id: ast::DUMMY_NODE_ID, ident, gen_args, kind, span };
+ Ok(Some(AngleBracketedArg::Constraint(constraint)))
+ } else {
+ // we only want to suggest `:` and `=` in contexts where the previous token
+ // is an ident and the current token or the next token is an ident
+ if self.prev_token.is_ident()
+ && (self.token.is_ident() || self.look_ahead(1, |token| token.is_ident()))
+ {
+ self.check(&token::Colon);
+ self.check(&token::Eq);
+ }
+ Ok(Some(AngleBracketedArg::Arg(arg)))
+ }
+ }
+ _ => Ok(None),
+ }
+ }
+
+ /// Parse the term to the right of an associated item equality constraint.
+ /// That is, parse `<term>` in `Item = <term>`.
+ /// Right now, this only admits types in `<term>`.
+ fn parse_assoc_equality_term(
+ &mut self,
+ ident: Ident,
+ eq: Span,
+ ) -> PResult<'a, AssocConstraintKind> {
+ let arg = self.parse_generic_arg(None)?;
+ let span = ident.span.to(self.prev_token.span);
+ let term = match arg {
+ Some(GenericArg::Type(ty)) => ty.into(),
+ Some(GenericArg::Const(c)) => {
+ self.sess.gated_spans.gate(sym::associated_const_equality, span);
+ c.into()
+ }
+ Some(GenericArg::Lifetime(lt)) => {
+ self.struct_span_err(span, "associated lifetimes are not supported")
+ .span_label(lt.ident.span, "the lifetime is given here")
+ .help("if you meant to specify a trait object, write `dyn Trait + 'lifetime`")
+ .emit();
+ self.mk_ty(span, ast::TyKind::Err).into()
+ }
+ None => {
+ let after_eq = eq.shrink_to_hi();
+ let before_next = self.token.span.shrink_to_lo();
+ let mut err = self
+ .struct_span_err(after_eq.to(before_next), "missing type to the right of `=`");
+ if matches!(self.token.kind, token::Comma | token::Gt) {
+ err.span_suggestion(
+ self.sess.source_map().next_point(eq).to(before_next),
+ "to constrain the associated type, add a type after `=`",
+ " TheType",
+ Applicability::HasPlaceholders,
+ );
+ err.span_suggestion(
+ eq.to(before_next),
+ &format!("remove the `=` if `{}` is a type", ident),
+ "",
+ Applicability::MaybeIncorrect,
+ )
+ } else {
+ err.span_label(
+ self.token.span,
+ &format!("expected type, found {}", super::token_descr(&self.token)),
+ )
+ };
+ return Err(err);
+ }
+ };
+ Ok(AssocConstraintKind::Equality { term })
+ }
+
+ /// We do not permit arbitrary expressions as const arguments. They must be one of:
+ /// - An expression surrounded in `{}`.
+ /// - A literal.
+ /// - A numeric literal prefixed by `-`.
+ /// - A single-segment path.
+ pub(super) fn expr_is_valid_const_arg(&self, expr: &P<rustc_ast::Expr>) -> bool {
+ match &expr.kind {
+ ast::ExprKind::Block(_, _) | ast::ExprKind::Lit(_) => true,
+ ast::ExprKind::Unary(ast::UnOp::Neg, expr) => {
+ matches!(expr.kind, ast::ExprKind::Lit(_))
+ }
+ // We can only resolve single-segment paths at the moment, because multi-segment paths
+ // require type-checking: see `visit_generic_arg` in `src/librustc_resolve/late.rs`.
+ ast::ExprKind::Path(None, path)
+ if path.segments.len() == 1 && path.segments[0].args.is_none() =>
+ {
+ true
+ }
+ _ => false,
+ }
+ }
+
+ /// Parse a const argument, e.g. `<3>`. It is assumed the angle brackets will be parsed by
+ /// the caller.
+ pub(super) fn parse_const_arg(&mut self) -> PResult<'a, AnonConst> {
+ // Parse const argument.
+ let value = if let token::OpenDelim(Delimiter::Brace) = self.token.kind {
+ self.parse_block_expr(
+ None,
+ self.token.span,
+ BlockCheckMode::Default,
+ ast::AttrVec::new(),
+ )?
+ } else {
+ self.handle_unambiguous_unbraced_const_arg()?
+ };
+ Ok(AnonConst { id: ast::DUMMY_NODE_ID, value })
+ }
+
+ /// Parse a generic argument in a path segment.
+ /// This does not include constraints, e.g., `Item = u8`, which is handled in `parse_angle_arg`.
+ pub(super) fn parse_generic_arg(
+ &mut self,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, Option<GenericArg>> {
+ let start = self.token.span;
+ let arg = if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) {
+ // Parse lifetime argument.
+ GenericArg::Lifetime(self.expect_lifetime())
+ } else if self.check_const_arg() {
+ // Parse const argument.
+ GenericArg::Const(self.parse_const_arg()?)
+ } else if self.check_type() {
+ // Parse type argument.
+ let is_const_fn =
+ self.look_ahead(1, |t| t.kind == token::OpenDelim(Delimiter::Parenthesis));
+ let mut snapshot = self.create_snapshot_for_diagnostic();
+ match self.parse_ty() {
+ Ok(ty) => GenericArg::Type(ty),
+ Err(err) => {
+ if is_const_fn {
+ match (*snapshot).parse_expr_res(Restrictions::CONST_EXPR, None) {
+ Ok(expr) => {
+ self.restore_snapshot(snapshot);
+ return Ok(Some(self.dummy_const_arg_needs_braces(err, expr.span)));
+ }
+ Err(err) => {
+ err.cancel();
+ }
+ }
+ }
+ // Try to recover from possible `const` arg without braces.
+ return self.recover_const_arg(start, err).map(Some);
+ }
+ }
+ } else if self.token.is_keyword(kw::Const) {
+ return self.recover_const_param_declaration(ty_generics);
+ } else {
+ // Fall back by trying to parse a const-expr expression. If we successfully do so,
+ // then we should report an error that it needs to be wrapped in braces.
+ let snapshot = self.create_snapshot_for_diagnostic();
+ match self.parse_expr_res(Restrictions::CONST_EXPR, None) {
+ Ok(expr) => {
+ return Ok(Some(self.dummy_const_arg_needs_braces(
+ self.struct_span_err(expr.span, "invalid const generic expression"),
+ expr.span,
+ )));
+ }
+ Err(err) => {
+ self.restore_snapshot(snapshot);
+ err.cancel();
+ return Ok(None);
+ }
+ }
+ };
+ Ok(Some(arg))
+ }
+
+ /// Given a arg inside of generics, we try to destructure it as if it were the LHS in
+ /// `LHS = ...`, i.e. an associated type binding.
+ /// This returns (optionally, if they are present) any `for<'a, 'b>` binder args, the
+ /// identifier, and any GAT arguments.
+ fn get_ident_from_generic_arg(
+ &self,
+ gen_arg: &GenericArg,
+ ) -> Result<(Option<Vec<ast::GenericParam>>, Ident, Option<GenericArgs>), ()> {
+ if let GenericArg::Type(ty) = gen_arg {
+ if let ast::TyKind::Path(qself, path) = &ty.kind
+ && qself.is_none()
+ && let [seg] = path.segments.as_slice()
+ {
+ return Ok((None, seg.ident, seg.args.as_deref().cloned()));
+ } else if let ast::TyKind::TraitObject(bounds, ast::TraitObjectSyntax::None) = &ty.kind
+ && let [ast::GenericBound::Trait(trait_ref, ast::TraitBoundModifier::None)] =
+ bounds.as_slice()
+ && let [seg] = trait_ref.trait_ref.path.segments.as_slice()
+ {
+ return Ok((
+ Some(trait_ref.bound_generic_params.clone()),
+ seg.ident,
+ seg.args.as_deref().cloned(),
+ ));
+ }
+ }
+ Err(())
+ }
+}
diff --git a/compiler/rustc_parse/src/parser/stmt.rs b/compiler/rustc_parse/src/parser/stmt.rs
new file mode 100644
index 000000000..51bd9d2d3
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/stmt.rs
@@ -0,0 +1,648 @@
+use super::attr::DEFAULT_INNER_ATTR_FORBIDDEN;
+use super::diagnostics::{AttemptLocalParseRecovery, Error};
+use super::expr::LhsExpr;
+use super::pat::RecoverComma;
+use super::path::PathStyle;
+use super::TrailingToken;
+use super::{
+ AttrWrapper, BlockMode, FnParseMode, ForceCollect, Parser, Restrictions, SemiColonMode,
+};
+use crate::maybe_whole;
+
+use rustc_ast as ast;
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, TokenKind};
+use rustc_ast::util::classify;
+use rustc_ast::{AttrStyle, AttrVec, Attribute, LocalKind, MacCall, MacCallStmt, MacStmtStyle};
+use rustc_ast::{Block, BlockCheckMode, Expr, ExprKind, HasAttrs, Local, Stmt};
+use rustc_ast::{StmtKind, DUMMY_NODE_ID};
+use rustc_errors::{Applicability, DiagnosticBuilder, ErrorGuaranteed, PResult};
+use rustc_span::source_map::{BytePos, Span};
+use rustc_span::symbol::{kw, sym};
+
+use std::mem;
+
+impl<'a> Parser<'a> {
+ /// Parses a statement. This stops just before trailing semicolons on everything but items.
+ /// e.g., a `StmtKind::Semi` parses to a `StmtKind::Expr`, leaving the trailing `;` unconsumed.
+ // Public for rustfmt usage.
+ pub fn parse_stmt(&mut self, force_collect: ForceCollect) -> PResult<'a, Option<Stmt>> {
+ Ok(self.parse_stmt_without_recovery(false, force_collect).unwrap_or_else(|mut e| {
+ e.emit();
+ self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore);
+ None
+ }))
+ }
+
+ /// If `force_capture` is true, forces collection of tokens regardless of whether
+ /// or not we have attributes
+ pub(crate) fn parse_stmt_without_recovery(
+ &mut self,
+ capture_semi: bool,
+ force_collect: ForceCollect,
+ ) -> PResult<'a, Option<Stmt>> {
+ let attrs = self.parse_outer_attributes()?;
+ let lo = self.token.span;
+
+ // Don't use `maybe_whole` so that we have precise control
+ // over when we bump the parser
+ if let token::Interpolated(nt) = &self.token.kind && let token::NtStmt(stmt) = &**nt {
+ let mut stmt = stmt.clone();
+ self.bump();
+ stmt.visit_attrs(|stmt_attrs| {
+ attrs.prepend_to_nt_inner(stmt_attrs);
+ });
+ return Ok(Some(stmt.into_inner()));
+ }
+
+ Ok(Some(if self.token.is_keyword(kw::Let) {
+ self.parse_local_mk(lo, attrs, capture_semi, force_collect)?
+ } else if self.is_kw_followed_by_ident(kw::Mut) {
+ self.recover_stmt_local(lo, attrs, "missing keyword", "let mut")?
+ } else if self.is_kw_followed_by_ident(kw::Auto) {
+ self.bump(); // `auto`
+ let msg = "write `let` instead of `auto` to introduce a new variable";
+ self.recover_stmt_local(lo, attrs, msg, "let")?
+ } else if self.is_kw_followed_by_ident(sym::var) {
+ self.bump(); // `var`
+ let msg = "write `let` instead of `var` to introduce a new variable";
+ self.recover_stmt_local(lo, attrs, msg, "let")?
+ } else if self.check_path() && !self.token.is_qpath_start() && !self.is_path_start_item() {
+ // We have avoided contextual keywords like `union`, items with `crate` visibility,
+ // or `auto trait` items. We aim to parse an arbitrary path `a::b` but not something
+ // that starts like a path (1 token), but it fact not a path.
+ // Also, we avoid stealing syntax from `parse_item_`.
+ if force_collect == ForceCollect::Yes {
+ self.collect_tokens_no_attrs(|this| this.parse_stmt_path_start(lo, attrs))
+ } else {
+ self.parse_stmt_path_start(lo, attrs)
+ }?
+ } else if let Some(item) = self.parse_item_common(
+ attrs.clone(),
+ false,
+ true,
+ FnParseMode { req_name: |_| true, req_body: true },
+ force_collect,
+ )? {
+ // FIXME: Bad copy of attrs
+ self.mk_stmt(lo.to(item.span), StmtKind::Item(P(item)))
+ } else if self.eat(&token::Semi) {
+ // Do not attempt to parse an expression if we're done here.
+ self.error_outer_attrs(&attrs.take_for_recovery());
+ self.mk_stmt(lo, StmtKind::Empty)
+ } else if self.token != token::CloseDelim(Delimiter::Brace) {
+ // Remainder are line-expr stmts.
+ let e = if force_collect == ForceCollect::Yes {
+ self.collect_tokens_no_attrs(|this| {
+ this.parse_expr_res(Restrictions::STMT_EXPR, Some(attrs))
+ })
+ } else {
+ self.parse_expr_res(Restrictions::STMT_EXPR, Some(attrs))
+ }?;
+ if matches!(e.kind, ExprKind::Assign(..)) && self.eat_keyword(kw::Else) {
+ let bl = self.parse_block()?;
+ // Destructuring assignment ... else.
+ // This is not allowed, but point it out in a nice way.
+ let mut err = self.struct_span_err(
+ e.span.to(bl.span),
+ "<assignment> ... else { ... } is not allowed",
+ );
+ err.emit();
+ }
+ self.mk_stmt(lo.to(e.span), StmtKind::Expr(e))
+ } else {
+ self.error_outer_attrs(&attrs.take_for_recovery());
+ return Ok(None);
+ }))
+ }
+
+ fn parse_stmt_path_start(&mut self, lo: Span, attrs: AttrWrapper) -> PResult<'a, Stmt> {
+ let stmt = self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let path = this.parse_path(PathStyle::Expr)?;
+
+ if this.eat(&token::Not) {
+ let stmt_mac = this.parse_stmt_mac(lo, attrs.into(), path)?;
+ if this.token == token::Semi {
+ return Ok((stmt_mac, TrailingToken::Semi));
+ } else {
+ return Ok((stmt_mac, TrailingToken::None));
+ }
+ }
+
+ let expr = if this.eat(&token::OpenDelim(Delimiter::Brace)) {
+ this.parse_struct_expr(None, path, AttrVec::new(), true)?
+ } else {
+ let hi = this.prev_token.span;
+ this.mk_expr(lo.to(hi), ExprKind::Path(None, path), AttrVec::new())
+ };
+
+ let expr = this.with_res(Restrictions::STMT_EXPR, |this| {
+ this.parse_dot_or_call_expr_with(expr, lo, attrs)
+ })?;
+ // `DUMMY_SP` will get overwritten later in this function
+ Ok((this.mk_stmt(rustc_span::DUMMY_SP, StmtKind::Expr(expr)), TrailingToken::None))
+ })?;
+
+ if let StmtKind::Expr(expr) = stmt.kind {
+ // Perform this outside of the `collect_tokens_trailing_token` closure,
+ // since our outer attributes do not apply to this part of the expression
+ let expr = self.with_res(Restrictions::STMT_EXPR, |this| {
+ this.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(expr))
+ })?;
+ Ok(self.mk_stmt(lo.to(self.prev_token.span), StmtKind::Expr(expr)))
+ } else {
+ Ok(stmt)
+ }
+ }
+
+ /// Parses a statement macro `mac!(args)` provided a `path` representing `mac`.
+ /// At this point, the `!` token after the path has already been eaten.
+ fn parse_stmt_mac(&mut self, lo: Span, attrs: AttrVec, path: ast::Path) -> PResult<'a, Stmt> {
+ let args = self.parse_mac_args()?;
+ let delim = args.delim();
+ let hi = self.prev_token.span;
+
+ let style = match delim {
+ Some(Delimiter::Brace) => MacStmtStyle::Braces,
+ Some(_) => MacStmtStyle::NoBraces,
+ None => unreachable!(),
+ };
+
+ let mac = MacCall { path, args, prior_type_ascription: self.last_type_ascription };
+
+ let kind = if (style == MacStmtStyle::Braces
+ && self.token != token::Dot
+ && self.token != token::Question)
+ || self.token == token::Semi
+ || self.token == token::Eof
+ {
+ StmtKind::MacCall(P(MacCallStmt { mac, style, attrs, tokens: None }))
+ } else {
+ // Since none of the above applied, this is an expression statement macro.
+ let e = self.mk_expr(lo.to(hi), ExprKind::MacCall(mac), AttrVec::new());
+ let e = self.maybe_recover_from_bad_qpath(e)?;
+ let e = self.parse_dot_or_call_expr_with(e, lo, attrs.into())?;
+ let e = self.parse_assoc_expr_with(0, LhsExpr::AlreadyParsed(e))?;
+ StmtKind::Expr(e)
+ };
+ Ok(self.mk_stmt(lo.to(hi), kind))
+ }
+
+ /// Error on outer attributes in this context.
+ /// Also error if the previous token was a doc comment.
+ fn error_outer_attrs(&self, attrs: &[Attribute]) {
+ if let [.., last] = attrs {
+ if last.is_doc_comment() {
+ self.span_err(last.span, Error::UselessDocComment).emit();
+ } else if attrs.iter().any(|a| a.style == AttrStyle::Outer) {
+ self.struct_span_err(last.span, "expected statement after outer attribute").emit();
+ }
+ }
+ }
+
+ fn recover_stmt_local(
+ &mut self,
+ lo: Span,
+ attrs: AttrWrapper,
+ msg: &str,
+ sugg: &str,
+ ) -> PResult<'a, Stmt> {
+ let stmt = self.recover_local_after_let(lo, attrs)?;
+ self.struct_span_err(lo, "invalid variable declaration")
+ .span_suggestion(lo, msg, sugg, Applicability::MachineApplicable)
+ .emit();
+ Ok(stmt)
+ }
+
+ fn parse_local_mk(
+ &mut self,
+ lo: Span,
+ attrs: AttrWrapper,
+ capture_semi: bool,
+ force_collect: ForceCollect,
+ ) -> PResult<'a, Stmt> {
+ self.collect_tokens_trailing_token(attrs, force_collect, |this, attrs| {
+ this.expect_keyword(kw::Let)?;
+ let local = this.parse_local(attrs.into())?;
+ let trailing = if capture_semi && this.token.kind == token::Semi {
+ TrailingToken::Semi
+ } else {
+ TrailingToken::None
+ };
+ Ok((this.mk_stmt(lo.to(this.prev_token.span), StmtKind::Local(local)), trailing))
+ })
+ }
+
+ fn recover_local_after_let(&mut self, lo: Span, attrs: AttrWrapper) -> PResult<'a, Stmt> {
+ self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
+ let local = this.parse_local(attrs.into())?;
+ // FIXME - maybe capture semicolon in recovery?
+ Ok((
+ this.mk_stmt(lo.to(this.prev_token.span), StmtKind::Local(local)),
+ TrailingToken::None,
+ ))
+ })
+ }
+
+ /// Parses a local variable declaration.
+ fn parse_local(&mut self, attrs: AttrVec) -> PResult<'a, P<Local>> {
+ let lo = self.prev_token.span;
+ let (pat, colon) = self.parse_pat_before_ty(None, RecoverComma::Yes, "`let` bindings")?;
+
+ let (err, ty) = if colon {
+ // Save the state of the parser before parsing type normally, in case there is a `:`
+ // instead of an `=` typo.
+ let parser_snapshot_before_type = self.clone();
+ let colon_sp = self.prev_token.span;
+ match self.parse_ty() {
+ Ok(ty) => (None, Some(ty)),
+ Err(mut err) => {
+ if let Ok(snip) = self.span_to_snippet(pat.span) {
+ err.span_label(pat.span, format!("while parsing the type for `{}`", snip));
+ }
+ // we use noexpect here because we don't actually expect Eq to be here
+ // but we are still checking for it in order to be able to handle it if
+ // it is there
+ let err = if self.check_noexpect(&token::Eq) {
+ err.emit();
+ None
+ } else {
+ // Rewind to before attempting to parse the type and continue parsing.
+ let parser_snapshot_after_type =
+ mem::replace(self, parser_snapshot_before_type);
+ Some((parser_snapshot_after_type, colon_sp, err))
+ };
+ (err, None)
+ }
+ }
+ } else {
+ (None, None)
+ };
+ let init = match (self.parse_initializer(err.is_some()), err) {
+ (Ok(init), None) => {
+ // init parsed, ty parsed
+ init
+ }
+ (Ok(init), Some((_, colon_sp, mut err))) => {
+ // init parsed, ty error
+ // Could parse the type as if it were the initializer, it is likely there was a
+ // typo in the code: `:` instead of `=`. Add suggestion and emit the error.
+ err.span_suggestion_short(
+ colon_sp,
+ "use `=` if you meant to assign",
+ " =",
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ // As this was parsed successfully, continue as if the code has been fixed for the
+ // rest of the file. It will still fail due to the emitted error, but we avoid
+ // extra noise.
+ init
+ }
+ (Err(init_err), Some((snapshot, _, ty_err))) => {
+ // init error, ty error
+ init_err.cancel();
+ // Couldn't parse the type nor the initializer, only raise the type error and
+ // return to the parser state before parsing the type as the initializer.
+ // let x: <parse_error>;
+ *self = snapshot;
+ return Err(ty_err);
+ }
+ (Err(err), None) => {
+ // init error, ty parsed
+ // Couldn't parse the initializer and we're not attempting to recover a failed
+ // parse of the type, return the error.
+ return Err(err);
+ }
+ };
+ let kind = match init {
+ None => LocalKind::Decl,
+ Some(init) => {
+ if self.eat_keyword(kw::Else) {
+ if self.token.is_keyword(kw::If) {
+ // `let...else if`. Emit the same error that `parse_block()` would,
+ // but explicitly point out that this pattern is not allowed.
+ let msg = "conditional `else if` is not supported for `let...else`";
+ return Err(self.error_block_no_opening_brace_msg(msg));
+ }
+ let els = self.parse_block()?;
+ self.check_let_else_init_bool_expr(&init);
+ self.check_let_else_init_trailing_brace(&init);
+ LocalKind::InitElse(init, els)
+ } else {
+ LocalKind::Init(init)
+ }
+ }
+ };
+ let hi = if self.token == token::Semi { self.token.span } else { self.prev_token.span };
+ Ok(P(ast::Local { ty, pat, kind, id: DUMMY_NODE_ID, span: lo.to(hi), attrs, tokens: None }))
+ }
+
+ fn check_let_else_init_bool_expr(&self, init: &ast::Expr) {
+ if let ast::ExprKind::Binary(op, ..) = init.kind {
+ if op.node.lazy() {
+ let suggs = vec![
+ (init.span.shrink_to_lo(), "(".to_string()),
+ (init.span.shrink_to_hi(), ")".to_string()),
+ ];
+ self.struct_span_err(
+ init.span,
+ &format!(
+ "a `{}` expression cannot be directly assigned in `let...else`",
+ op.node.to_string()
+ ),
+ )
+ .multipart_suggestion(
+ "wrap the expression in parentheses",
+ suggs,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ }
+ }
+
+ fn check_let_else_init_trailing_brace(&self, init: &ast::Expr) {
+ if let Some(trailing) = classify::expr_trailing_brace(init) {
+ let err_span = trailing.span.with_lo(trailing.span.hi() - BytePos(1));
+ let suggs = vec![
+ (trailing.span.shrink_to_lo(), "(".to_string()),
+ (trailing.span.shrink_to_hi(), ")".to_string()),
+ ];
+ self.struct_span_err(
+ err_span,
+ "right curly brace `}` before `else` in a `let...else` statement not allowed",
+ )
+ .multipart_suggestion(
+ "try wrapping the expression in parentheses",
+ suggs,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ }
+
+ /// Parses the RHS of a local variable declaration (e.g., `= 14;`).
+ fn parse_initializer(&mut self, eq_optional: bool) -> PResult<'a, Option<P<Expr>>> {
+ let eq_consumed = match self.token.kind {
+ token::BinOpEq(..) => {
+ // Recover `let x <op>= 1` as `let x = 1`
+ self.struct_span_err(
+ self.token.span,
+ "can't reassign to an uninitialized variable",
+ )
+ .span_suggestion_short(
+ self.token.span,
+ "initialize the variable",
+ "=",
+ Applicability::MaybeIncorrect,
+ )
+ .help("if you meant to overwrite, remove the `let` binding")
+ .emit();
+ self.bump();
+ true
+ }
+ _ => self.eat(&token::Eq),
+ };
+
+ Ok(if eq_consumed || eq_optional { Some(self.parse_expr()?) } else { None })
+ }
+
+ /// Parses a block. No inner attributes are allowed.
+ pub(super) fn parse_block(&mut self) -> PResult<'a, P<Block>> {
+ let (attrs, block) = self.parse_inner_attrs_and_block()?;
+ if let [.., last] = &*attrs {
+ self.error_on_forbidden_inner_attr(last.span, DEFAULT_INNER_ATTR_FORBIDDEN);
+ }
+ Ok(block)
+ }
+
+ fn error_block_no_opening_brace_msg(
+ &mut self,
+ msg: &str,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let sp = self.token.span;
+ let mut e = self.struct_span_err(sp, msg);
+ let do_not_suggest_help = self.token.is_keyword(kw::In) || self.token == token::Colon;
+
+ // Check to see if the user has written something like
+ //
+ // if (cond)
+ // bar;
+ //
+ // which is valid in other languages, but not Rust.
+ match self.parse_stmt_without_recovery(false, ForceCollect::No) {
+ // If the next token is an open brace, e.g., we have:
+ //
+ // if expr other_expr {
+ // ^ ^ ^- lookahead(1) is a brace
+ // | |- current token is not "else"
+ // |- (statement we just parsed)
+ //
+ // the place-inside-a-block suggestion would be more likely wrong than right.
+ //
+ // FIXME(compiler-errors): this should probably parse an arbitrary expr and not
+ // just lookahead one token, so we can see if there's a brace after _that_,
+ // since we want to protect against:
+ // `if 1 1 + 1 {` being suggested as `if { 1 } 1 + 1 {`
+ // + +
+ Ok(Some(_))
+ if (!self.token.is_keyword(kw::Else)
+ && self.look_ahead(1, |t| t == &token::OpenDelim(Delimiter::Brace)))
+ || do_not_suggest_help => {}
+ // Do not suggest `if foo println!("") {;}` (as would be seen in test for #46836).
+ Ok(Some(Stmt { kind: StmtKind::Empty, .. })) => {}
+ Ok(Some(stmt)) => {
+ let stmt_own_line = self.sess.source_map().is_line_before_span_empty(sp);
+ let stmt_span = if stmt_own_line && self.eat(&token::Semi) {
+ // Expand the span to include the semicolon.
+ stmt.span.with_hi(self.prev_token.span.hi())
+ } else {
+ stmt.span
+ };
+ e.multipart_suggestion(
+ "try placing this code inside a block",
+ vec![
+ (stmt_span.shrink_to_lo(), "{ ".to_string()),
+ (stmt_span.shrink_to_hi(), " }".to_string()),
+ ],
+ // Speculative; has been misleading in the past (#46836).
+ Applicability::MaybeIncorrect,
+ );
+ }
+ Err(e) => {
+ self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore);
+ e.cancel();
+ }
+ _ => {}
+ }
+ e.span_label(sp, "expected `{`");
+ e
+ }
+
+ fn error_block_no_opening_brace<T>(&mut self) -> PResult<'a, T> {
+ let tok = super::token_descr(&self.token);
+ let msg = format!("expected `{{`, found {}", tok);
+ Err(self.error_block_no_opening_brace_msg(&msg))
+ }
+
+ /// Parses a block. Inner attributes are allowed.
+ pub(super) fn parse_inner_attrs_and_block(
+ &mut self,
+ ) -> PResult<'a, (Vec<Attribute>, P<Block>)> {
+ self.parse_block_common(self.token.span, BlockCheckMode::Default)
+ }
+
+ /// Parses a block. Inner attributes are allowed.
+ pub(super) fn parse_block_common(
+ &mut self,
+ lo: Span,
+ blk_mode: BlockCheckMode,
+ ) -> PResult<'a, (Vec<Attribute>, P<Block>)> {
+ maybe_whole!(self, NtBlock, |x| (Vec::new(), x));
+
+ self.maybe_recover_unexpected_block_label();
+ if !self.eat(&token::OpenDelim(Delimiter::Brace)) {
+ return self.error_block_no_opening_brace();
+ }
+
+ let attrs = self.parse_inner_attributes()?;
+ let tail = match self.maybe_suggest_struct_literal(lo, blk_mode) {
+ Some(tail) => tail?,
+ None => self.parse_block_tail(lo, blk_mode, AttemptLocalParseRecovery::Yes)?,
+ };
+ Ok((attrs, tail))
+ }
+
+ /// Parses the rest of a block expression or function body.
+ /// Precondition: already parsed the '{'.
+ pub(crate) fn parse_block_tail(
+ &mut self,
+ lo: Span,
+ s: BlockCheckMode,
+ recover: AttemptLocalParseRecovery,
+ ) -> PResult<'a, P<Block>> {
+ let mut stmts = vec![];
+ while !self.eat(&token::CloseDelim(Delimiter::Brace)) {
+ if self.token == token::Eof {
+ break;
+ }
+ let stmt = match self.parse_full_stmt(recover) {
+ Err(mut err) if recover.yes() => {
+ self.maybe_annotate_with_ascription(&mut err, false);
+ err.emit();
+ self.recover_stmt_(SemiColonMode::Ignore, BlockMode::Ignore);
+ Some(self.mk_stmt_err(self.token.span))
+ }
+ Ok(stmt) => stmt,
+ Err(err) => return Err(err),
+ };
+ if let Some(stmt) = stmt {
+ stmts.push(stmt);
+ } else {
+ // Found only `;` or `}`.
+ continue;
+ };
+ }
+ Ok(self.mk_block(stmts, s, lo.to(self.prev_token.span)))
+ }
+
+ /// Parses a statement, including the trailing semicolon.
+ pub fn parse_full_stmt(
+ &mut self,
+ recover: AttemptLocalParseRecovery,
+ ) -> PResult<'a, Option<Stmt>> {
+ // Skip looking for a trailing semicolon when we have an interpolated statement.
+ maybe_whole!(self, NtStmt, |x| Some(x.into_inner()));
+
+ let Some(mut stmt) = self.parse_stmt_without_recovery(true, ForceCollect::No)? else {
+ return Ok(None);
+ };
+
+ let mut eat_semi = true;
+ match stmt.kind {
+ // Expression without semicolon.
+ StmtKind::Expr(ref mut expr)
+ if self.token != token::Eof && classify::expr_requires_semi_to_be_stmt(expr) =>
+ {
+ // Just check for errors and recover; do not eat semicolon yet.
+ if let Err(mut e) =
+ self.expect_one_of(&[], &[token::Semi, token::CloseDelim(Delimiter::Brace)])
+ {
+ if let TokenKind::DocComment(..) = self.token.kind {
+ if let Ok(snippet) = self.span_to_snippet(self.token.span) {
+ let sp = self.token.span;
+ let marker = &snippet[..3];
+ let (comment_marker, doc_comment_marker) = marker.split_at(2);
+
+ e.span_suggestion(
+ sp.with_hi(sp.lo() + BytePos(marker.len() as u32)),
+ &format!(
+ "add a space before `{}` to use a regular comment",
+ doc_comment_marker,
+ ),
+ format!("{} {}", comment_marker, doc_comment_marker),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ if let Err(mut e) =
+ self.check_mistyped_turbofish_with_multiple_type_params(e, expr)
+ {
+ if recover.no() {
+ return Err(e);
+ }
+ e.emit();
+ self.recover_stmt();
+ }
+ // Don't complain about type errors in body tail after parse error (#57383).
+ let sp = expr.span.to(self.prev_token.span);
+ *expr = self.mk_expr_err(sp);
+ }
+ }
+ StmtKind::Expr(_) | StmtKind::MacCall(_) => {}
+ StmtKind::Local(ref mut local) if let Err(e) = self.expect_semi() => {
+ // We might be at the `,` in `let x = foo<bar, baz>;`. Try to recover.
+ match &mut local.kind {
+ LocalKind::Init(expr) | LocalKind::InitElse(expr, _) => {
+ self.check_mistyped_turbofish_with_multiple_type_params(e, expr)?;
+ // We found `foo<bar, baz>`, have we fully recovered?
+ self.expect_semi()?;
+ }
+ LocalKind::Decl => return Err(e),
+ }
+ eat_semi = false;
+ }
+ StmtKind::Empty | StmtKind::Item(_) | StmtKind::Local(_) | StmtKind::Semi(_) => eat_semi = false,
+ }
+
+ if eat_semi && self.eat(&token::Semi) {
+ stmt = stmt.add_trailing_semicolon();
+ }
+ stmt.span = stmt.span.to(self.prev_token.span);
+ Ok(Some(stmt))
+ }
+
+ pub(super) fn mk_block(&self, stmts: Vec<Stmt>, rules: BlockCheckMode, span: Span) -> P<Block> {
+ P(Block {
+ stmts,
+ id: DUMMY_NODE_ID,
+ rules,
+ span,
+ tokens: None,
+ could_be_bare_literal: false,
+ })
+ }
+
+ pub(super) fn mk_stmt(&self, span: Span, kind: StmtKind) -> Stmt {
+ Stmt { id: DUMMY_NODE_ID, kind, span }
+ }
+
+ pub(super) fn mk_stmt_err(&self, span: Span) -> Stmt {
+ self.mk_stmt(span, StmtKind::Expr(self.mk_expr_err(span)))
+ }
+
+ pub(super) fn mk_block_err(&self, span: Span) -> P<Block> {
+ self.mk_block(vec![self.mk_stmt_err(span)], BlockCheckMode::Default, span)
+ }
+}
diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs
new file mode 100644
index 000000000..31b40a83e
--- /dev/null
+++ b/compiler/rustc_parse/src/parser/ty.rs
@@ -0,0 +1,891 @@
+use super::{Parser, PathStyle, TokenType};
+
+use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole};
+
+use rustc_ast::ptr::P;
+use rustc_ast::token::{self, Delimiter, Token, TokenKind};
+use rustc_ast::{
+ self as ast, BareFnTy, FnRetTy, GenericBound, GenericBounds, GenericParam, Generics, Lifetime,
+ MacCall, MutTy, Mutability, PolyTraitRef, TraitBoundModifier, TraitObjectSyntax, Ty, TyKind,
+};
+use rustc_errors::{pluralize, struct_span_err, Applicability, PResult};
+use rustc_span::source_map::Span;
+use rustc_span::symbol::{kw, sym};
+
+/// Any `?` or `~const` modifiers that appear at the start of a bound.
+struct BoundModifiers {
+ /// `?Trait`.
+ maybe: Option<Span>,
+
+ /// `~const Trait`.
+ maybe_const: Option<Span>,
+}
+
+impl BoundModifiers {
+ fn to_trait_bound_modifier(&self) -> TraitBoundModifier {
+ match (self.maybe, self.maybe_const) {
+ (None, None) => TraitBoundModifier::None,
+ (Some(_), None) => TraitBoundModifier::Maybe,
+ (None, Some(_)) => TraitBoundModifier::MaybeConst,
+ (Some(_), Some(_)) => TraitBoundModifier::MaybeConstMaybe,
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub(super) enum AllowPlus {
+ Yes,
+ No,
+}
+
+#[derive(PartialEq)]
+pub(super) enum RecoverQPath {
+ Yes,
+ No,
+}
+
+pub(super) enum RecoverQuestionMark {
+ Yes,
+ No,
+}
+
+/// Signals whether parsing a type should recover `->`.
+///
+/// More specifically, when parsing a function like:
+/// ```compile_fail
+/// fn foo() => u8 { 0 }
+/// fn bar(): u8 { 0 }
+/// ```
+/// The compiler will try to recover interpreting `foo() => u8` as `foo() -> u8` when calling
+/// `parse_ty` with anything except `RecoverReturnSign::No`, and it will try to recover `bar(): u8`
+/// as `bar() -> u8` when passing `RecoverReturnSign::Yes` to `parse_ty`
+#[derive(Copy, Clone, PartialEq)]
+pub(super) enum RecoverReturnSign {
+ Yes,
+ OnlyFatArrow,
+ No,
+}
+
+impl RecoverReturnSign {
+ /// [RecoverReturnSign::Yes] allows for recovering `fn foo() => u8` and `fn foo(): u8`,
+ /// [RecoverReturnSign::OnlyFatArrow] allows for recovering only `fn foo() => u8` (recovering
+ /// colons can cause problems when parsing where clauses), and
+ /// [RecoverReturnSign::No] doesn't allow for any recovery of the return type arrow
+ fn can_recover(self, token: &TokenKind) -> bool {
+ match self {
+ Self::Yes => matches!(token, token::FatArrow | token::Colon),
+ Self::OnlyFatArrow => matches!(token, token::FatArrow),
+ Self::No => false,
+ }
+ }
+}
+
+// Is `...` (`CVarArgs`) legal at this level of type parsing?
+#[derive(PartialEq)]
+enum AllowCVariadic {
+ Yes,
+ No,
+}
+
+/// Returns `true` if `IDENT t` can start a type -- `IDENT::a::b`, `IDENT<u8, u8>`,
+/// `IDENT<<u8 as Trait>::AssocTy>`.
+///
+/// Types can also be of the form `IDENT(u8, u8) -> u8`, however this assumes
+/// that `IDENT` is not the ident of a fn trait.
+fn can_continue_type_after_non_fn_ident(t: &Token) -> bool {
+ t == &token::ModSep || t == &token::Lt || t == &token::BinOp(token::Shl)
+}
+
+impl<'a> Parser<'a> {
+ /// Parses a type.
+ pub fn parse_ty(&mut self) -> PResult<'a, P<Ty>> {
+ self.parse_ty_common(
+ AllowPlus::Yes,
+ AllowCVariadic::No,
+ RecoverQPath::Yes,
+ RecoverReturnSign::Yes,
+ None,
+ RecoverQuestionMark::Yes,
+ )
+ }
+
+ pub(super) fn parse_ty_with_generics_recovery(
+ &mut self,
+ ty_params: &Generics,
+ ) -> PResult<'a, P<Ty>> {
+ self.parse_ty_common(
+ AllowPlus::Yes,
+ AllowCVariadic::No,
+ RecoverQPath::Yes,
+ RecoverReturnSign::Yes,
+ Some(ty_params),
+ RecoverQuestionMark::Yes,
+ )
+ }
+
+ /// Parse a type suitable for a function or function pointer parameter.
+ /// The difference from `parse_ty` is that this version allows `...`
+ /// (`CVarArgs`) at the top level of the type.
+ pub(super) fn parse_ty_for_param(&mut self) -> PResult<'a, P<Ty>> {
+ self.parse_ty_common(
+ AllowPlus::Yes,
+ AllowCVariadic::Yes,
+ RecoverQPath::Yes,
+ RecoverReturnSign::Yes,
+ None,
+ RecoverQuestionMark::Yes,
+ )
+ }
+
+ /// Parses a type in restricted contexts where `+` is not permitted.
+ ///
+ /// Example 1: `&'a TYPE`
+ /// `+` is prohibited to maintain operator priority (P(+) < P(&)).
+ /// Example 2: `value1 as TYPE + value2`
+ /// `+` is prohibited to avoid interactions with expression grammar.
+ pub(super) fn parse_ty_no_plus(&mut self) -> PResult<'a, P<Ty>> {
+ self.parse_ty_common(
+ AllowPlus::No,
+ AllowCVariadic::No,
+ RecoverQPath::Yes,
+ RecoverReturnSign::Yes,
+ None,
+ RecoverQuestionMark::Yes,
+ )
+ }
+
+ /// Parses a type following an `as` cast. Similar to `parse_ty_no_plus`, but signaling origin
+ /// for better diagnostics involving `?`.
+ pub(super) fn parse_as_cast_ty(&mut self) -> PResult<'a, P<Ty>> {
+ self.parse_ty_common(
+ AllowPlus::No,
+ AllowCVariadic::No,
+ RecoverQPath::Yes,
+ RecoverReturnSign::Yes,
+ None,
+ RecoverQuestionMark::No,
+ )
+ }
+
+ pub(super) fn parse_no_question_mark_recover(&mut self) -> PResult<'a, P<Ty>> {
+ self.parse_ty_common(
+ AllowPlus::Yes,
+ AllowCVariadic::No,
+ RecoverQPath::Yes,
+ RecoverReturnSign::Yes,
+ None,
+ RecoverQuestionMark::No,
+ )
+ }
+
+ /// Parse a type without recovering `:` as `->` to avoid breaking code such as `where fn() : for<'a>`
+ pub(super) fn parse_ty_for_where_clause(&mut self) -> PResult<'a, P<Ty>> {
+ self.parse_ty_common(
+ AllowPlus::Yes,
+ AllowCVariadic::Yes,
+ RecoverQPath::Yes,
+ RecoverReturnSign::OnlyFatArrow,
+ None,
+ RecoverQuestionMark::Yes,
+ )
+ }
+
+ /// Parses an optional return type `[ -> TY ]` in a function declaration.
+ pub(super) fn parse_ret_ty(
+ &mut self,
+ allow_plus: AllowPlus,
+ recover_qpath: RecoverQPath,
+ recover_return_sign: RecoverReturnSign,
+ ) -> PResult<'a, FnRetTy> {
+ Ok(if self.eat(&token::RArrow) {
+ // FIXME(Centril): Can we unconditionally `allow_plus`?
+ let ty = self.parse_ty_common(
+ allow_plus,
+ AllowCVariadic::No,
+ recover_qpath,
+ recover_return_sign,
+ None,
+ RecoverQuestionMark::Yes,
+ )?;
+ FnRetTy::Ty(ty)
+ } else if recover_return_sign.can_recover(&self.token.kind) {
+ // Don't `eat` to prevent `=>` from being added as an expected token which isn't
+ // actually expected and could only confuse users
+ self.bump();
+ self.struct_span_err(self.prev_token.span, "return types are denoted using `->`")
+ .span_suggestion_short(
+ self.prev_token.span,
+ "use `->` instead",
+ "->",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ let ty = self.parse_ty_common(
+ allow_plus,
+ AllowCVariadic::No,
+ recover_qpath,
+ recover_return_sign,
+ None,
+ RecoverQuestionMark::Yes,
+ )?;
+ FnRetTy::Ty(ty)
+ } else {
+ FnRetTy::Default(self.token.span.shrink_to_lo())
+ })
+ }
+
+ fn parse_ty_common(
+ &mut self,
+ allow_plus: AllowPlus,
+ allow_c_variadic: AllowCVariadic,
+ recover_qpath: RecoverQPath,
+ recover_return_sign: RecoverReturnSign,
+ ty_generics: Option<&Generics>,
+ recover_question_mark: RecoverQuestionMark,
+ ) -> PResult<'a, P<Ty>> {
+ let allow_qpath_recovery = recover_qpath == RecoverQPath::Yes;
+ maybe_recover_from_interpolated_ty_qpath!(self, allow_qpath_recovery);
+ maybe_whole!(self, NtTy, |x| x);
+
+ let lo = self.token.span;
+ let mut impl_dyn_multi = false;
+ let kind = if self.check(&token::OpenDelim(Delimiter::Parenthesis)) {
+ self.parse_ty_tuple_or_parens(lo, allow_plus)?
+ } else if self.eat(&token::Not) {
+ // Never type `!`
+ TyKind::Never
+ } else if self.eat(&token::BinOp(token::Star)) {
+ self.parse_ty_ptr()?
+ } else if self.eat(&token::OpenDelim(Delimiter::Bracket)) {
+ self.parse_array_or_slice_ty()?
+ } else if self.check(&token::BinOp(token::And)) || self.check(&token::AndAnd) {
+ // Reference
+ self.expect_and()?;
+ self.parse_borrowed_pointee()?
+ } else if self.eat_keyword_noexpect(kw::Typeof) {
+ self.parse_typeof_ty()?
+ } else if self.eat_keyword(kw::Underscore) {
+ // A type to be inferred `_`
+ TyKind::Infer
+ } else if self.check_fn_front_matter(false) {
+ // Function pointer type
+ self.parse_ty_bare_fn(lo, Vec::new(), recover_return_sign)?
+ } else if self.check_keyword(kw::For) {
+ // Function pointer type or bound list (trait object type) starting with a poly-trait.
+ // `for<'lt> [unsafe] [extern "ABI"] fn (&'lt S) -> T`
+ // `for<'lt> Trait1<'lt> + Trait2 + 'a`
+ let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
+ if self.check_fn_front_matter(false) {
+ self.parse_ty_bare_fn(lo, lifetime_defs, recover_return_sign)?
+ } else {
+ let path = self.parse_path(PathStyle::Type)?;
+ let parse_plus = allow_plus == AllowPlus::Yes && self.check_plus();
+ self.parse_remaining_bounds_path(lifetime_defs, path, lo, parse_plus)?
+ }
+ } else if self.eat_keyword(kw::Impl) {
+ self.parse_impl_ty(&mut impl_dyn_multi)?
+ } else if self.is_explicit_dyn_type() {
+ self.parse_dyn_ty(&mut impl_dyn_multi)?
+ } else if self.eat_lt() {
+ // Qualified path
+ let (qself, path) = self.parse_qpath(PathStyle::Type)?;
+ TyKind::Path(Some(qself), path)
+ } else if self.check_path() {
+ self.parse_path_start_ty(lo, allow_plus, ty_generics)?
+ } else if self.can_begin_bound() {
+ self.parse_bare_trait_object(lo, allow_plus)?
+ } else if self.eat(&token::DotDotDot) {
+ if allow_c_variadic == AllowCVariadic::Yes {
+ TyKind::CVarArgs
+ } else {
+ // FIXME(Centril): Should we just allow `...` syntactically
+ // anywhere in a type and use semantic restrictions instead?
+ self.error_illegal_c_varadic_ty(lo);
+ TyKind::Err
+ }
+ } else {
+ let msg = format!("expected type, found {}", super::token_descr(&self.token));
+ let mut err = self.struct_span_err(self.token.span, &msg);
+ err.span_label(self.token.span, "expected type");
+ self.maybe_annotate_with_ascription(&mut err, true);
+ return Err(err);
+ };
+
+ let span = lo.to(self.prev_token.span);
+ let mut ty = self.mk_ty(span, kind);
+
+ // Try to recover from use of `+` with incorrect priority.
+ if matches!(allow_plus, AllowPlus::Yes) {
+ self.maybe_recover_from_bad_type_plus(&ty)?;
+ } else {
+ self.maybe_report_ambiguous_plus(impl_dyn_multi, &ty);
+ }
+ if let RecoverQuestionMark::Yes = recover_question_mark {
+ ty = self.maybe_recover_from_question_mark(ty);
+ }
+ if allow_qpath_recovery { self.maybe_recover_from_bad_qpath(ty) } else { Ok(ty) }
+ }
+
+ /// Parses either:
+ /// - `(TYPE)`, a parenthesized type.
+ /// - `(TYPE,)`, a tuple with a single field of type TYPE.
+ fn parse_ty_tuple_or_parens(&mut self, lo: Span, allow_plus: AllowPlus) -> PResult<'a, TyKind> {
+ let mut trailing_plus = false;
+ let (ts, trailing) = self.parse_paren_comma_seq(|p| {
+ let ty = p.parse_ty()?;
+ trailing_plus = p.prev_token.kind == TokenKind::BinOp(token::Plus);
+ Ok(ty)
+ })?;
+
+ if ts.len() == 1 && !trailing {
+ let ty = ts.into_iter().next().unwrap().into_inner();
+ let maybe_bounds = allow_plus == AllowPlus::Yes && self.token.is_like_plus();
+ match ty.kind {
+ // `(TY_BOUND_NOPAREN) + BOUND + ...`.
+ TyKind::Path(None, path) if maybe_bounds => {
+ self.parse_remaining_bounds_path(Vec::new(), path, lo, true)
+ }
+ TyKind::TraitObject(bounds, TraitObjectSyntax::None)
+ if maybe_bounds && bounds.len() == 1 && !trailing_plus =>
+ {
+ self.parse_remaining_bounds(bounds, true)
+ }
+ // `(TYPE)`
+ _ => Ok(TyKind::Paren(P(ty))),
+ }
+ } else {
+ Ok(TyKind::Tup(ts))
+ }
+ }
+
+ fn parse_bare_trait_object(&mut self, lo: Span, allow_plus: AllowPlus) -> PResult<'a, TyKind> {
+ let lt_no_plus = self.check_lifetime() && !self.look_ahead(1, |t| t.is_like_plus());
+ let bounds = self.parse_generic_bounds_common(allow_plus, None)?;
+ if lt_no_plus {
+ self.struct_span_err(lo, "lifetime in trait object type must be followed by `+`")
+ .emit();
+ }
+ Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::None))
+ }
+
+ fn parse_remaining_bounds_path(
+ &mut self,
+ generic_params: Vec<GenericParam>,
+ path: ast::Path,
+ lo: Span,
+ parse_plus: bool,
+ ) -> PResult<'a, TyKind> {
+ let poly_trait_ref = PolyTraitRef::new(generic_params, path, lo.to(self.prev_token.span));
+ let bounds = vec![GenericBound::Trait(poly_trait_ref, TraitBoundModifier::None)];
+ self.parse_remaining_bounds(bounds, parse_plus)
+ }
+
+ /// Parse the remainder of a bare trait object type given an already parsed list.
+ fn parse_remaining_bounds(
+ &mut self,
+ mut bounds: GenericBounds,
+ plus: bool,
+ ) -> PResult<'a, TyKind> {
+ if plus {
+ self.eat_plus(); // `+`, or `+=` gets split and `+` is discarded
+ bounds.append(&mut self.parse_generic_bounds(Some(self.prev_token.span))?);
+ }
+ Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::None))
+ }
+
+ /// Parses a raw pointer type: `*[const | mut] $type`.
+ fn parse_ty_ptr(&mut self) -> PResult<'a, TyKind> {
+ let mutbl = self.parse_const_or_mut().unwrap_or_else(|| {
+ let span = self.prev_token.span;
+ let msg = "expected mut or const in raw pointer type";
+ self.struct_span_err(span, msg)
+ .span_label(span, msg)
+ .help("use `*mut T` or `*const T` as appropriate")
+ .emit();
+ Mutability::Not
+ });
+ let ty = self.parse_ty_no_plus()?;
+ Ok(TyKind::Ptr(MutTy { ty, mutbl }))
+ }
+
+ /// Parses an array (`[TYPE; EXPR]`) or slice (`[TYPE]`) type.
+ /// The opening `[` bracket is already eaten.
+ fn parse_array_or_slice_ty(&mut self) -> PResult<'a, TyKind> {
+ let elt_ty = match self.parse_ty() {
+ Ok(ty) => ty,
+ Err(mut err)
+ if self.look_ahead(1, |t| t.kind == token::CloseDelim(Delimiter::Bracket))
+ | self.look_ahead(1, |t| t.kind == token::Semi) =>
+ {
+ // Recover from `[LIT; EXPR]` and `[LIT]`
+ self.bump();
+ err.emit();
+ self.mk_ty(self.prev_token.span, TyKind::Err)
+ }
+ Err(err) => return Err(err),
+ };
+
+ let ty = if self.eat(&token::Semi) {
+ let mut length = self.parse_anon_const_expr()?;
+ if let Err(e) = self.expect(&token::CloseDelim(Delimiter::Bracket)) {
+ // Try to recover from `X<Y, ...>` when `X::<Y, ...>` works
+ self.check_mistyped_turbofish_with_multiple_type_params(e, &mut length.value)?;
+ self.expect(&token::CloseDelim(Delimiter::Bracket))?;
+ }
+ TyKind::Array(elt_ty, length)
+ } else {
+ self.expect(&token::CloseDelim(Delimiter::Bracket))?;
+ TyKind::Slice(elt_ty)
+ };
+
+ Ok(ty)
+ }
+
+ fn parse_borrowed_pointee(&mut self) -> PResult<'a, TyKind> {
+ let and_span = self.prev_token.span;
+ let mut opt_lifetime =
+ if self.check_lifetime() { Some(self.expect_lifetime()) } else { None };
+ let mut mutbl = self.parse_mutability();
+ if self.token.is_lifetime() && mutbl == Mutability::Mut && opt_lifetime.is_none() {
+ // A lifetime is invalid here: it would be part of a bare trait bound, which requires
+ // it to be followed by a plus, but we disallow plus in the pointee type.
+ // So we can handle this case as an error here, and suggest `'a mut`.
+ // If there *is* a plus next though, handling the error later provides better suggestions
+ // (like adding parentheses)
+ if !self.look_ahead(1, |t| t.is_like_plus()) {
+ let lifetime_span = self.token.span;
+ let span = and_span.to(lifetime_span);
+
+ let mut err = self.struct_span_err(span, "lifetime must precede `mut`");
+ if let Ok(lifetime_src) = self.span_to_snippet(lifetime_span) {
+ err.span_suggestion(
+ span,
+ "place the lifetime before `mut`",
+ format!("&{} mut", lifetime_src),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+
+ opt_lifetime = Some(self.expect_lifetime());
+ }
+ } else if self.token.is_keyword(kw::Dyn)
+ && mutbl == Mutability::Not
+ && self.look_ahead(1, |t| t.is_keyword(kw::Mut))
+ {
+ // We have `&dyn mut ...`, which is invalid and should be `&mut dyn ...`.
+ let span = and_span.to(self.look_ahead(1, |t| t.span));
+ let mut err = self.struct_span_err(span, "`mut` must precede `dyn`");
+ err.span_suggestion(
+ span,
+ "place `mut` before `dyn`",
+ "&mut dyn",
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+
+ // Recovery
+ mutbl = Mutability::Mut;
+ let (dyn_tok, dyn_tok_sp) = (self.token.clone(), self.token_spacing);
+ self.bump();
+ self.bump_with((dyn_tok, dyn_tok_sp));
+ }
+ let ty = self.parse_ty_no_plus()?;
+ Ok(TyKind::Rptr(opt_lifetime, MutTy { ty, mutbl }))
+ }
+
+ // Parses the `typeof(EXPR)`.
+ // To avoid ambiguity, the type is surrounded by parentheses.
+ fn parse_typeof_ty(&mut self) -> PResult<'a, TyKind> {
+ self.expect(&token::OpenDelim(Delimiter::Parenthesis))?;
+ let expr = self.parse_anon_const_expr()?;
+ self.expect(&token::CloseDelim(Delimiter::Parenthesis))?;
+ Ok(TyKind::Typeof(expr))
+ }
+
+ /// Parses a function pointer type (`TyKind::BareFn`).
+ /// ```ignore (illustrative)
+ /// [unsafe] [extern "ABI"] fn (S) -> T
+ /// // ^~~~~^ ^~~~^ ^~^ ^
+ /// // | | | |
+ /// // | | | Return type
+ /// // Function Style ABI Parameter types
+ /// ```
+ /// We actually parse `FnHeader FnDecl`, but we error on `const` and `async` qualifiers.
+ fn parse_ty_bare_fn(
+ &mut self,
+ lo: Span,
+ params: Vec<GenericParam>,
+ recover_return_sign: RecoverReturnSign,
+ ) -> PResult<'a, TyKind> {
+ let inherited_vis = rustc_ast::Visibility {
+ span: rustc_span::DUMMY_SP,
+ kind: rustc_ast::VisibilityKind::Inherited,
+ tokens: None,
+ };
+ let span_start = self.token.span;
+ let ast::FnHeader { ext, unsafety, constness, asyncness } =
+ self.parse_fn_front_matter(&inherited_vis)?;
+ let decl = self.parse_fn_decl(|_| false, AllowPlus::No, recover_return_sign)?;
+ let whole_span = lo.to(self.prev_token.span);
+ if let ast::Const::Yes(span) = constness {
+ // If we ever start to allow `const fn()`, then update
+ // feature gating for `#![feature(const_extern_fn)]` to
+ // cover it.
+ self.error_fn_ptr_bad_qualifier(whole_span, span, "const");
+ }
+ if let ast::Async::Yes { span, .. } = asyncness {
+ self.error_fn_ptr_bad_qualifier(whole_span, span, "async");
+ }
+ let decl_span = span_start.to(self.token.span);
+ Ok(TyKind::BareFn(P(BareFnTy { ext, unsafety, generic_params: params, decl, decl_span })))
+ }
+
+ /// Emit an error for the given bad function pointer qualifier.
+ fn error_fn_ptr_bad_qualifier(&self, span: Span, qual_span: Span, qual: &str) {
+ self.struct_span_err(span, &format!("an `fn` pointer type cannot be `{}`", qual))
+ .span_label(qual_span, format!("`{}` because of this", qual))
+ .span_suggestion_short(
+ qual_span,
+ &format!("remove the `{}` qualifier", qual),
+ "",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ }
+
+ /// Parses an `impl B0 + ... + Bn` type.
+ fn parse_impl_ty(&mut self, impl_dyn_multi: &mut bool) -> PResult<'a, TyKind> {
+ // Always parse bounds greedily for better error recovery.
+ let bounds = self.parse_generic_bounds(None)?;
+ *impl_dyn_multi = bounds.len() > 1 || self.prev_token.kind == TokenKind::BinOp(token::Plus);
+ Ok(TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds))
+ }
+
+ /// Is a `dyn B0 + ... + Bn` type allowed here?
+ fn is_explicit_dyn_type(&mut self) -> bool {
+ self.check_keyword(kw::Dyn)
+ && (!self.token.uninterpolated_span().rust_2015()
+ || self.look_ahead(1, |t| {
+ t.can_begin_bound() && !can_continue_type_after_non_fn_ident(t)
+ }))
+ }
+
+ /// Parses a `dyn B0 + ... + Bn` type.
+ ///
+ /// Note that this does *not* parse bare trait objects.
+ fn parse_dyn_ty(&mut self, impl_dyn_multi: &mut bool) -> PResult<'a, TyKind> {
+ self.bump(); // `dyn`
+ // Always parse bounds greedily for better error recovery.
+ let bounds = self.parse_generic_bounds(None)?;
+ *impl_dyn_multi = bounds.len() > 1 || self.prev_token.kind == TokenKind::BinOp(token::Plus);
+ Ok(TyKind::TraitObject(bounds, TraitObjectSyntax::Dyn))
+ }
+
+ /// Parses a type starting with a path.
+ ///
+ /// This can be:
+ /// 1. a type macro, `mac!(...)`,
+ /// 2. a bare trait object, `B0 + ... + Bn`,
+ /// 3. or a path, `path::to::MyType`.
+ fn parse_path_start_ty(
+ &mut self,
+ lo: Span,
+ allow_plus: AllowPlus,
+ ty_generics: Option<&Generics>,
+ ) -> PResult<'a, TyKind> {
+ // Simple path
+ let path = self.parse_path_inner(PathStyle::Type, ty_generics)?;
+ if self.eat(&token::Not) {
+ // Macro invocation in type position
+ Ok(TyKind::MacCall(MacCall {
+ path,
+ args: self.parse_mac_args()?,
+ prior_type_ascription: self.last_type_ascription,
+ }))
+ } else if allow_plus == AllowPlus::Yes && self.check_plus() {
+ // `Trait1 + Trait2 + 'a`
+ self.parse_remaining_bounds_path(Vec::new(), path, lo, true)
+ } else {
+ // Just a type path.
+ Ok(TyKind::Path(None, path))
+ }
+ }
+
+ fn error_illegal_c_varadic_ty(&self, lo: Span) {
+ struct_span_err!(
+ self.sess.span_diagnostic,
+ lo.to(self.prev_token.span),
+ E0743,
+ "C-variadic type `...` may not be nested inside another type",
+ )
+ .emit();
+ }
+
+ pub(super) fn parse_generic_bounds(
+ &mut self,
+ colon_span: Option<Span>,
+ ) -> PResult<'a, GenericBounds> {
+ self.parse_generic_bounds_common(AllowPlus::Yes, colon_span)
+ }
+
+ /// Parses bounds of a type parameter `BOUND + BOUND + ...`, possibly with trailing `+`.
+ ///
+ /// See `parse_generic_bound` for the `BOUND` grammar.
+ fn parse_generic_bounds_common(
+ &mut self,
+ allow_plus: AllowPlus,
+ colon_span: Option<Span>,
+ ) -> PResult<'a, GenericBounds> {
+ let mut bounds = Vec::new();
+ let mut negative_bounds = Vec::new();
+
+ while self.can_begin_bound() || self.token.is_keyword(kw::Dyn) {
+ if self.token.is_keyword(kw::Dyn) {
+ // Account for `&dyn Trait + dyn Other`.
+ self.struct_span_err(self.token.span, "invalid `dyn` keyword")
+ .help("`dyn` is only needed at the start of a trait `+`-separated list")
+ .span_suggestion(
+ self.token.span,
+ "remove this keyword",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ self.bump();
+ }
+ match self.parse_generic_bound()? {
+ Ok(bound) => bounds.push(bound),
+ Err(neg_sp) => negative_bounds.push(neg_sp),
+ }
+ if allow_plus == AllowPlus::No || !self.eat_plus() {
+ break;
+ }
+ }
+
+ if !negative_bounds.is_empty() {
+ self.error_negative_bounds(colon_span, &bounds, negative_bounds);
+ }
+
+ Ok(bounds)
+ }
+
+ /// Can the current token begin a bound?
+ fn can_begin_bound(&mut self) -> bool {
+ // This needs to be synchronized with `TokenKind::can_begin_bound`.
+ self.check_path()
+ || self.check_lifetime()
+ || self.check(&token::Not) // Used for error reporting only.
+ || self.check(&token::Question)
+ || self.check(&token::Tilde)
+ || self.check_keyword(kw::For)
+ || self.check(&token::OpenDelim(Delimiter::Parenthesis))
+ }
+
+ fn error_negative_bounds(
+ &self,
+ colon_span: Option<Span>,
+ bounds: &[GenericBound],
+ negative_bounds: Vec<Span>,
+ ) {
+ let negative_bounds_len = negative_bounds.len();
+ let last_span = *negative_bounds.last().expect("no negative bounds, but still error?");
+ let mut err = self.struct_span_err(negative_bounds, "negative bounds are not supported");
+ err.span_label(last_span, "negative bounds are not supported");
+ if let Some(bound_list) = colon_span {
+ let bound_list = bound_list.to(self.prev_token.span);
+ let mut new_bound_list = String::new();
+ if !bounds.is_empty() {
+ let mut snippets = bounds.iter().map(|bound| self.span_to_snippet(bound.span()));
+ while let Some(Ok(snippet)) = snippets.next() {
+ new_bound_list.push_str(" + ");
+ new_bound_list.push_str(&snippet);
+ }
+ new_bound_list = new_bound_list.replacen(" +", ":", 1);
+ }
+ err.tool_only_span_suggestion(
+ bound_list,
+ &format!("remove the bound{}", pluralize!(negative_bounds_len)),
+ new_bound_list,
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ }
+
+ /// Parses a bound according to the grammar:
+ /// ```ebnf
+ /// BOUND = TY_BOUND | LT_BOUND
+ /// ```
+ fn parse_generic_bound(&mut self) -> PResult<'a, Result<GenericBound, Span>> {
+ let anchor_lo = self.prev_token.span;
+ let lo = self.token.span;
+ let has_parens = self.eat(&token::OpenDelim(Delimiter::Parenthesis));
+ let inner_lo = self.token.span;
+ let is_negative = self.eat(&token::Not);
+
+ let modifiers = self.parse_ty_bound_modifiers()?;
+ let bound = if self.token.is_lifetime() {
+ self.error_lt_bound_with_modifiers(modifiers);
+ self.parse_generic_lt_bound(lo, inner_lo, has_parens)?
+ } else {
+ self.parse_generic_ty_bound(lo, has_parens, modifiers)?
+ };
+
+ Ok(if is_negative { Err(anchor_lo.to(self.prev_token.span)) } else { Ok(bound) })
+ }
+
+ /// Parses a lifetime ("outlives") bound, e.g. `'a`, according to:
+ /// ```ebnf
+ /// LT_BOUND = LIFETIME
+ /// ```
+ fn parse_generic_lt_bound(
+ &mut self,
+ lo: Span,
+ inner_lo: Span,
+ has_parens: bool,
+ ) -> PResult<'a, GenericBound> {
+ let bound = GenericBound::Outlives(self.expect_lifetime());
+ if has_parens {
+ // FIXME(Centril): Consider not erroring here and accepting `('lt)` instead,
+ // possibly introducing `GenericBound::Paren(P<GenericBound>)`?
+ self.recover_paren_lifetime(lo, inner_lo)?;
+ }
+ Ok(bound)
+ }
+
+ /// Emits an error if any trait bound modifiers were present.
+ fn error_lt_bound_with_modifiers(&self, modifiers: BoundModifiers) {
+ if let Some(span) = modifiers.maybe_const {
+ self.struct_span_err(
+ span,
+ "`~const` may only modify trait bounds, not lifetime bounds",
+ )
+ .emit();
+ }
+
+ if let Some(span) = modifiers.maybe {
+ self.struct_span_err(span, "`?` may only modify trait bounds, not lifetime bounds")
+ .emit();
+ }
+ }
+
+ /// Recover on `('lifetime)` with `(` already eaten.
+ fn recover_paren_lifetime(&mut self, lo: Span, inner_lo: Span) -> PResult<'a, ()> {
+ let inner_span = inner_lo.to(self.prev_token.span);
+ self.expect(&token::CloseDelim(Delimiter::Parenthesis))?;
+ let mut err = self.struct_span_err(
+ lo.to(self.prev_token.span),
+ "parenthesized lifetime bounds are not supported",
+ );
+ if let Ok(snippet) = self.span_to_snippet(inner_span) {
+ err.span_suggestion_short(
+ lo.to(self.prev_token.span),
+ "remove the parentheses",
+ snippet,
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ Ok(())
+ }
+
+ /// Parses the modifiers that may precede a trait in a bound, e.g. `?Trait` or `~const Trait`.
+ ///
+ /// If no modifiers are present, this does not consume any tokens.
+ ///
+ /// ```ebnf
+ /// TY_BOUND_MODIFIERS = ["~const"] ["?"]
+ /// ```
+ fn parse_ty_bound_modifiers(&mut self) -> PResult<'a, BoundModifiers> {
+ let maybe_const = if self.eat(&token::Tilde) {
+ let tilde = self.prev_token.span;
+ self.expect_keyword(kw::Const)?;
+ let span = tilde.to(self.prev_token.span);
+ self.sess.gated_spans.gate(sym::const_trait_impl, span);
+ Some(span)
+ } else {
+ None
+ };
+
+ let maybe = if self.eat(&token::Question) { Some(self.prev_token.span) } else { None };
+
+ Ok(BoundModifiers { maybe, maybe_const })
+ }
+
+ /// Parses a type bound according to:
+ /// ```ebnf
+ /// TY_BOUND = TY_BOUND_NOPAREN | (TY_BOUND_NOPAREN)
+ /// TY_BOUND_NOPAREN = [TY_BOUND_MODIFIERS] [for<LT_PARAM_DEFS>] SIMPLE_PATH
+ /// ```
+ ///
+ /// For example, this grammar accepts `~const ?for<'a: 'b> m::Trait<'a>`.
+ fn parse_generic_ty_bound(
+ &mut self,
+ lo: Span,
+ has_parens: bool,
+ modifiers: BoundModifiers,
+ ) -> PResult<'a, GenericBound> {
+ let lifetime_defs = self.parse_late_bound_lifetime_defs()?;
+ let path = self.parse_path(PathStyle::Type)?;
+ if has_parens {
+ if self.token.is_like_plus() {
+ // Someone has written something like `&dyn (Trait + Other)`. The correct code
+ // would be `&(dyn Trait + Other)`, but we don't have access to the appropriate
+ // span to suggest that. When written as `&dyn Trait + Other`, an appropriate
+ // suggestion is given.
+ let bounds = vec![];
+ self.parse_remaining_bounds(bounds, true)?;
+ self.expect(&token::CloseDelim(Delimiter::Parenthesis))?;
+ let sp = vec![lo, self.prev_token.span];
+ let sugg: Vec<_> = sp.iter().map(|sp| (*sp, String::new())).collect();
+ self.struct_span_err(sp, "incorrect braces around trait bounds")
+ .multipart_suggestion(
+ "remove the parentheses",
+ sugg,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ } else {
+ self.expect(&token::CloseDelim(Delimiter::Parenthesis))?;
+ }
+ }
+
+ let modifier = modifiers.to_trait_bound_modifier();
+ let poly_trait = PolyTraitRef::new(lifetime_defs, path, lo.to(self.prev_token.span));
+ Ok(GenericBound::Trait(poly_trait, modifier))
+ }
+
+ /// Optionally parses `for<$generic_params>`.
+ pub(super) fn parse_late_bound_lifetime_defs(&mut self) -> PResult<'a, Vec<GenericParam>> {
+ if self.eat_keyword(kw::For) {
+ self.expect_lt()?;
+ let params = self.parse_generic_params()?;
+ self.expect_gt()?;
+ // We rely on AST validation to rule out invalid cases: There must not be type
+ // parameters, and the lifetime parameters must not have bounds.
+ Ok(params)
+ } else {
+ Ok(Vec::new())
+ }
+ }
+
+ pub(super) fn check_lifetime(&mut self) -> bool {
+ self.expected_tokens.push(TokenType::Lifetime);
+ self.token.is_lifetime()
+ }
+
+ /// Parses a single lifetime `'a` or panics.
+ pub(super) fn expect_lifetime(&mut self) -> Lifetime {
+ if let Some(ident) = self.token.lifetime() {
+ self.bump();
+ Lifetime { ident, id: ast::DUMMY_NODE_ID }
+ } else {
+ self.span_bug(self.token.span, "not a lifetime")
+ }
+ }
+
+ pub(super) fn mk_ty(&self, span: Span, kind: TyKind) -> P<Ty> {
+ P(Ty { kind, span, id: ast::DUMMY_NODE_ID, tokens: None })
+ }
+}
diff --git a/compiler/rustc_parse/src/validate_attr.rs b/compiler/rustc_parse/src/validate_attr.rs
new file mode 100644
index 000000000..47477898b
--- /dev/null
+++ b/compiler/rustc_parse/src/validate_attr.rs
@@ -0,0 +1,200 @@
+//! Meta-syntax validation logic of attributes for post-expansion.
+
+use crate::parse_in;
+
+use rustc_ast::tokenstream::DelimSpan;
+use rustc_ast::{self as ast, Attribute, MacArgs, MacArgsEq, MacDelimiter, MetaItem, MetaItemKind};
+use rustc_ast_pretty::pprust;
+use rustc_errors::{Applicability, FatalError, PResult};
+use rustc_feature::{AttributeTemplate, BuiltinAttribute, BUILTIN_ATTRIBUTE_MAP};
+use rustc_session::lint::builtin::ILL_FORMED_ATTRIBUTE_INPUT;
+use rustc_session::parse::ParseSess;
+use rustc_span::{sym, Symbol};
+
+pub fn check_meta(sess: &ParseSess, attr: &Attribute) {
+ if attr.is_doc_comment() {
+ return;
+ }
+
+ let attr_info = attr.ident().and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name));
+
+ // Check input tokens for built-in and key-value attributes.
+ match attr_info {
+ // `rustc_dummy` doesn't have any restrictions specific to built-in attributes.
+ Some(BuiltinAttribute { name, template, .. }) if *name != sym::rustc_dummy => {
+ check_builtin_attribute(sess, attr, *name, *template)
+ }
+ _ if let MacArgs::Eq(..) = attr.get_normal_item().args => {
+ // All key-value attributes are restricted to meta-item syntax.
+ parse_meta(sess, attr)
+ .map_err(|mut err| {
+ err.emit();
+ })
+ .ok();
+ }
+ _ => {}
+ }
+}
+
+pub fn parse_meta<'a>(sess: &'a ParseSess, attr: &Attribute) -> PResult<'a, MetaItem> {
+ let item = attr.get_normal_item();
+ Ok(MetaItem {
+ span: attr.span,
+ path: item.path.clone(),
+ kind: match &item.args {
+ MacArgs::Empty => MetaItemKind::Word,
+ MacArgs::Delimited(dspan, delim, t) => {
+ check_meta_bad_delim(sess, *dspan, *delim, "wrong meta list delimiters");
+ let nmis = parse_in(sess, t.clone(), "meta list", |p| p.parse_meta_seq_top())?;
+ MetaItemKind::List(nmis)
+ }
+ MacArgs::Eq(_, MacArgsEq::Ast(expr)) => {
+ if let ast::ExprKind::Lit(lit) = &expr.kind {
+ if !lit.kind.is_unsuffixed() {
+ let mut err = sess.span_diagnostic.struct_span_err(
+ lit.span,
+ "suffixed literals are not allowed in attributes",
+ );
+ err.help(
+ "instead of using a suffixed literal (`1u8`, `1.0f32`, etc.), \
+ use an unsuffixed version (`1`, `1.0`, etc.)",
+ );
+ return Err(err);
+ } else {
+ MetaItemKind::NameValue(lit.clone())
+ }
+ } else {
+ // The non-error case can happen with e.g. `#[foo = 1+1]`. The error case can
+ // happen with e.g. `#[foo = include_str!("non-existent-file.rs")]`; in that
+ // case we delay the error because an earlier error will have already been
+ // reported.
+ let msg = format!("unexpected expression: `{}`", pprust::expr_to_string(expr));
+ let mut err = sess.span_diagnostic.struct_span_err(expr.span, msg);
+ if let ast::ExprKind::Err = expr.kind {
+ err.downgrade_to_delayed_bug();
+ }
+ return Err(err);
+ }
+ }
+ MacArgs::Eq(_, MacArgsEq::Hir(lit)) => MetaItemKind::NameValue(lit.clone()),
+ },
+ })
+}
+
+pub fn check_meta_bad_delim(sess: &ParseSess, span: DelimSpan, delim: MacDelimiter, msg: &str) {
+ if let ast::MacDelimiter::Parenthesis = delim {
+ return;
+ }
+
+ sess.span_diagnostic
+ .struct_span_err(span.entire(), msg)
+ .multipart_suggestion(
+ "the delimiters should be `(` and `)`",
+ vec![(span.open, "(".to_string()), (span.close, ")".to_string())],
+ Applicability::MachineApplicable,
+ )
+ .emit();
+}
+
+/// Checks that the given meta-item is compatible with this `AttributeTemplate`.
+fn is_attr_template_compatible(template: &AttributeTemplate, meta: &ast::MetaItemKind) -> bool {
+ match meta {
+ MetaItemKind::Word => template.word,
+ MetaItemKind::List(..) => template.list.is_some(),
+ MetaItemKind::NameValue(lit) if lit.kind.is_str() => template.name_value_str.is_some(),
+ MetaItemKind::NameValue(..) => false,
+ }
+}
+
+pub fn check_builtin_attribute(
+ sess: &ParseSess,
+ attr: &Attribute,
+ name: Symbol,
+ template: AttributeTemplate,
+) {
+ // Some special attributes like `cfg` must be checked
+ // before the generic check, so we skip them here.
+ let should_skip = |name| name == sym::cfg;
+
+ match parse_meta(sess, attr) {
+ Ok(meta) => {
+ if !should_skip(name) && !is_attr_template_compatible(&template, &meta.kind) {
+ emit_malformed_attribute(sess, attr, name, template);
+ }
+ }
+ Err(mut err) => {
+ err.emit();
+ }
+ }
+}
+
+fn emit_malformed_attribute(
+ sess: &ParseSess,
+ attr: &Attribute,
+ name: Symbol,
+ template: AttributeTemplate,
+) {
+ // Some of previously accepted forms were used in practice,
+ // report them as warnings for now.
+ let should_warn = |name| {
+ matches!(name, sym::doc | sym::ignore | sym::inline | sym::link | sym::test | sym::bench)
+ };
+
+ let error_msg = format!("malformed `{}` attribute input", name);
+ let mut msg = "attribute must be of the form ".to_owned();
+ let mut suggestions = vec![];
+ let mut first = true;
+ let inner = if attr.style == ast::AttrStyle::Inner { "!" } else { "" };
+ if template.word {
+ first = false;
+ let code = format!("#{}[{}]", inner, name);
+ msg.push_str(&format!("`{}`", &code));
+ suggestions.push(code);
+ }
+ if let Some(descr) = template.list {
+ if !first {
+ msg.push_str(" or ");
+ }
+ first = false;
+ let code = format!("#{}[{}({})]", inner, name, descr);
+ msg.push_str(&format!("`{}`", &code));
+ suggestions.push(code);
+ }
+ if let Some(descr) = template.name_value_str {
+ if !first {
+ msg.push_str(" or ");
+ }
+ let code = format!("#{}[{} = \"{}\"]", inner, name, descr);
+ msg.push_str(&format!("`{}`", &code));
+ suggestions.push(code);
+ }
+ if should_warn(name) {
+ sess.buffer_lint(&ILL_FORMED_ATTRIBUTE_INPUT, attr.span, ast::CRATE_NODE_ID, &msg);
+ } else {
+ sess.span_diagnostic
+ .struct_span_err(attr.span, &error_msg)
+ .span_suggestions(
+ attr.span,
+ if suggestions.len() == 1 {
+ "must be of the form"
+ } else {
+ "the following are the possible correct uses"
+ },
+ suggestions.into_iter(),
+ Applicability::HasPlaceholders,
+ )
+ .emit();
+ }
+}
+
+pub fn emit_fatal_malformed_builtin_attribute(
+ sess: &ParseSess,
+ attr: &Attribute,
+ name: Symbol,
+) -> ! {
+ let template = BUILTIN_ATTRIBUTE_MAP.get(&name).expect("builtin attr defined").template;
+ emit_malformed_attribute(sess, attr, name, template);
+ // This is fatal, otherwise it will likely cause a cascade of other errors
+ // (and an error here is expected to be very rare).
+ FatalError.raise()
+}
diff --git a/compiler/rustc_parse_format/Cargo.toml b/compiler/rustc_parse_format/Cargo.toml
new file mode 100644
index 000000000..fcc68b3a2
--- /dev/null
+++ b/compiler/rustc_parse_format/Cargo.toml
@@ -0,0 +1,7 @@
+[package]
+name = "rustc_parse_format"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+rustc_lexer = { path = "../rustc_lexer" }
diff --git a/compiler/rustc_parse_format/src/lib.rs b/compiler/rustc_parse_format/src/lib.rs
new file mode 100644
index 000000000..4890fade5
--- /dev/null
+++ b/compiler/rustc_parse_format/src/lib.rs
@@ -0,0 +1,884 @@
+//! Macro support for format strings
+//!
+//! These structures are used when parsing format strings for the compiler.
+//! Parsing does not happen at runtime: structures of `std::fmt::rt` are
+//! generated instead.
+
+#![doc(
+ html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
+ html_playground_url = "https://play.rust-lang.org/",
+ test(attr(deny(warnings)))
+)]
+// We want to be able to build this crate with a stable compiler, so no
+// `#![feature]` attributes should be added.
+
+pub use Alignment::*;
+pub use Count::*;
+pub use Flag::*;
+pub use Piece::*;
+pub use Position::*;
+
+use std::iter;
+use std::str;
+use std::string;
+
+// Note: copied from rustc_span
+/// Range inside of a `Span` used for diagnostics when we only have access to relative positions.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct InnerSpan {
+ pub start: usize,
+ pub end: usize,
+}
+
+impl InnerSpan {
+ pub fn new(start: usize, end: usize) -> InnerSpan {
+ InnerSpan { start, end }
+ }
+}
+
+/// The type of format string that we are parsing.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum ParseMode {
+ /// A normal format string as per `format_args!`.
+ Format,
+ /// An inline assembly template string for `asm!`.
+ InlineAsm,
+}
+
+#[derive(Copy, Clone)]
+struct InnerOffset(usize);
+
+impl InnerOffset {
+ fn to(self, end: InnerOffset) -> InnerSpan {
+ InnerSpan::new(self.0, end.0)
+ }
+}
+
+/// A piece is a portion of the format string which represents the next part
+/// to emit. These are emitted as a stream by the `Parser` class.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum Piece<'a> {
+ /// A literal string which should directly be emitted
+ String(&'a str),
+ /// This describes that formatting should process the next argument (as
+ /// specified inside) for emission.
+ NextArgument(Argument<'a>),
+}
+
+/// Representation of an argument specification.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub struct Argument<'a> {
+ /// Where to find this argument
+ pub position: Position<'a>,
+ /// The span of the position indicator. Includes any whitespace in implicit
+ /// positions (`{ }`).
+ pub position_span: InnerSpan,
+ /// How to format the argument
+ pub format: FormatSpec<'a>,
+}
+
+/// Specification for the formatting of an argument in the format string.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub struct FormatSpec<'a> {
+ /// Optionally specified character to fill alignment with.
+ pub fill: Option<char>,
+ /// Optionally specified alignment.
+ pub align: Alignment,
+ /// Packed version of various flags provided.
+ pub flags: u32,
+ /// The integer precision to use.
+ pub precision: Count<'a>,
+ /// The span of the precision formatting flag (for diagnostics).
+ pub precision_span: Option<InnerSpan>,
+ /// The string width requested for the resulting format.
+ pub width: Count<'a>,
+ /// The span of the width formatting flag (for diagnostics).
+ pub width_span: Option<InnerSpan>,
+ /// The descriptor string representing the name of the format desired for
+ /// this argument, this can be empty or any number of characters, although
+ /// it is required to be one word.
+ pub ty: &'a str,
+ /// The span of the descriptor string (for diagnostics).
+ pub ty_span: Option<InnerSpan>,
+}
+
+/// Enum describing where an argument for a format can be located.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum Position<'a> {
+ /// The argument is implied to be located at an index
+ ArgumentImplicitlyIs(usize),
+ /// The argument is located at a specific index given in the format,
+ ArgumentIs(usize),
+ /// The argument has a name.
+ ArgumentNamed(&'a str),
+}
+
+impl Position<'_> {
+ pub fn index(&self) -> Option<usize> {
+ match self {
+ ArgumentIs(i, ..) | ArgumentImplicitlyIs(i) => Some(*i),
+ _ => None,
+ }
+ }
+}
+
+/// Enum of alignments which are supported.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum Alignment {
+ /// The value will be aligned to the left.
+ AlignLeft,
+ /// The value will be aligned to the right.
+ AlignRight,
+ /// The value will be aligned in the center.
+ AlignCenter,
+ /// The value will take on a default alignment.
+ AlignUnknown,
+}
+
+/// Various flags which can be applied to format strings. The meaning of these
+/// flags is defined by the formatters themselves.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum Flag {
+ /// A `+` will be used to denote positive numbers.
+ FlagSignPlus,
+ /// A `-` will be used to denote negative numbers. This is the default.
+ FlagSignMinus,
+ /// An alternate form will be used for the value. In the case of numbers,
+ /// this means that the number will be prefixed with the supplied string.
+ FlagAlternate,
+ /// For numbers, this means that the number will be padded with zeroes,
+ /// and the sign (`+` or `-`) will precede them.
+ FlagSignAwareZeroPad,
+ /// For Debug / `?`, format integers in lower-case hexadecimal.
+ FlagDebugLowerHex,
+ /// For Debug / `?`, format integers in upper-case hexadecimal.
+ FlagDebugUpperHex,
+}
+
+/// A count is used for the precision and width parameters of an integer, and
+/// can reference either an argument or a literal integer.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum Count<'a> {
+ /// The count is specified explicitly.
+ CountIs(usize),
+ /// The count is specified by the argument with the given name.
+ CountIsName(&'a str, InnerSpan),
+ /// The count is specified by the argument at the given index.
+ CountIsParam(usize),
+ /// The count is implied and cannot be explicitly specified.
+ CountImplied,
+}
+
+pub struct ParseError {
+ pub description: string::String,
+ pub note: Option<string::String>,
+ pub label: string::String,
+ pub span: InnerSpan,
+ pub secondary_label: Option<(string::String, InnerSpan)>,
+ pub should_be_replaced_with_positional_argument: bool,
+}
+
+/// The parser structure for interpreting the input format string. This is
+/// modeled as an iterator over `Piece` structures to form a stream of tokens
+/// being output.
+///
+/// This is a recursive-descent parser for the sake of simplicity, and if
+/// necessary there's probably lots of room for improvement performance-wise.
+pub struct Parser<'a> {
+ mode: ParseMode,
+ input: &'a str,
+ cur: iter::Peekable<str::CharIndices<'a>>,
+ /// Error messages accumulated during parsing
+ pub errors: Vec<ParseError>,
+ /// Current position of implicit positional argument pointer
+ pub curarg: usize,
+ /// `Some(raw count)` when the string is "raw", used to position spans correctly
+ style: Option<usize>,
+ /// Start and end byte offset of every successfully parsed argument
+ pub arg_places: Vec<InnerSpan>,
+ /// Characters that need to be shifted
+ skips: Vec<usize>,
+ /// Span of the last opening brace seen, used for error reporting
+ last_opening_brace: Option<InnerSpan>,
+ /// Whether the source string is comes from `println!` as opposed to `format!` or `print!`
+ append_newline: bool,
+ /// Whether this formatting string is a literal or it comes from a macro.
+ pub is_literal: bool,
+ /// Start position of the current line.
+ cur_line_start: usize,
+ /// Start and end byte offset of every line of the format string. Excludes
+ /// newline characters and leading whitespace.
+ pub line_spans: Vec<InnerSpan>,
+}
+
+impl<'a> Iterator for Parser<'a> {
+ type Item = Piece<'a>;
+
+ fn next(&mut self) -> Option<Piece<'a>> {
+ if let Some(&(pos, c)) = self.cur.peek() {
+ match c {
+ '{' => {
+ let curr_last_brace = self.last_opening_brace;
+ let byte_pos = self.to_span_index(pos);
+ let lbrace_end = InnerOffset(byte_pos.0 + 1);
+ self.last_opening_brace = Some(byte_pos.to(lbrace_end));
+ self.cur.next();
+ if self.consume('{') {
+ self.last_opening_brace = curr_last_brace;
+
+ Some(String(self.string(pos + 1)))
+ } else {
+ let arg = self.argument(lbrace_end);
+ if let Some(rbrace_byte_idx) = self.must_consume('}') {
+ let lbrace_inner_offset = self.to_span_index(pos);
+ let rbrace_inner_offset = self.to_span_index(rbrace_byte_idx);
+ if self.is_literal {
+ self.arg_places.push(
+ lbrace_inner_offset.to(InnerOffset(rbrace_inner_offset.0 + 1)),
+ );
+ }
+ } else {
+ self.suggest_positional_arg_instead_of_captured_arg(arg);
+ }
+ Some(NextArgument(arg))
+ }
+ }
+ '}' => {
+ self.cur.next();
+ if self.consume('}') {
+ Some(String(self.string(pos + 1)))
+ } else {
+ let err_pos = self.to_span_index(pos);
+ self.err_with_note(
+ "unmatched `}` found",
+ "unmatched `}`",
+ "if you intended to print `}`, you can escape it using `}}`",
+ err_pos.to(err_pos),
+ );
+ None
+ }
+ }
+ _ => Some(String(self.string(pos))),
+ }
+ } else {
+ if self.is_literal {
+ let start = self.to_span_index(self.cur_line_start);
+ let end = self.to_span_index(self.input.len());
+ let span = start.to(end);
+ if self.line_spans.last() != Some(&span) {
+ self.line_spans.push(span);
+ }
+ }
+ None
+ }
+ }
+}
+
+impl<'a> Parser<'a> {
+ /// Creates a new parser for the given format string
+ pub fn new(
+ s: &'a str,
+ style: Option<usize>,
+ snippet: Option<string::String>,
+ append_newline: bool,
+ mode: ParseMode,
+ ) -> Parser<'a> {
+ let (skips, is_literal) = find_skips_from_snippet(snippet, style);
+ Parser {
+ mode,
+ input: s,
+ cur: s.char_indices().peekable(),
+ errors: vec![],
+ curarg: 0,
+ style,
+ arg_places: vec![],
+ skips,
+ last_opening_brace: None,
+ append_newline,
+ is_literal,
+ cur_line_start: 0,
+ line_spans: vec![],
+ }
+ }
+
+ /// Notifies of an error. The message doesn't actually need to be of type
+ /// String, but I think it does when this eventually uses conditions so it
+ /// might as well start using it now.
+ fn err<S1: Into<string::String>, S2: Into<string::String>>(
+ &mut self,
+ description: S1,
+ label: S2,
+ span: InnerSpan,
+ ) {
+ self.errors.push(ParseError {
+ description: description.into(),
+ note: None,
+ label: label.into(),
+ span,
+ secondary_label: None,
+ should_be_replaced_with_positional_argument: false,
+ });
+ }
+
+ /// Notifies of an error. The message doesn't actually need to be of type
+ /// String, but I think it does when this eventually uses conditions so it
+ /// might as well start using it now.
+ fn err_with_note<
+ S1: Into<string::String>,
+ S2: Into<string::String>,
+ S3: Into<string::String>,
+ >(
+ &mut self,
+ description: S1,
+ label: S2,
+ note: S3,
+ span: InnerSpan,
+ ) {
+ self.errors.push(ParseError {
+ description: description.into(),
+ note: Some(note.into()),
+ label: label.into(),
+ span,
+ secondary_label: None,
+ should_be_replaced_with_positional_argument: false,
+ });
+ }
+
+ /// Optionally consumes the specified character. If the character is not at
+ /// the current position, then the current iterator isn't moved and `false` is
+ /// returned, otherwise the character is consumed and `true` is returned.
+ fn consume(&mut self, c: char) -> bool {
+ self.consume_pos(c).is_some()
+ }
+
+ /// Optionally consumes the specified character. If the character is not at
+ /// the current position, then the current iterator isn't moved and `None` is
+ /// returned, otherwise the character is consumed and the current position is
+ /// returned.
+ fn consume_pos(&mut self, c: char) -> Option<usize> {
+ if let Some(&(pos, maybe)) = self.cur.peek() {
+ if c == maybe {
+ self.cur.next();
+ return Some(pos);
+ }
+ }
+ None
+ }
+
+ fn to_span_index(&self, pos: usize) -> InnerOffset {
+ let mut pos = pos;
+ // This handles the raw string case, the raw argument is the number of #
+ // in r###"..."### (we need to add one because of the `r`).
+ let raw = self.style.map_or(0, |raw| raw + 1);
+ for skip in &self.skips {
+ if pos > *skip {
+ pos += 1;
+ } else if pos == *skip && raw == 0 {
+ pos += 1;
+ } else {
+ break;
+ }
+ }
+ InnerOffset(raw + pos + 1)
+ }
+
+ /// Forces consumption of the specified character. If the character is not
+ /// found, an error is emitted.
+ fn must_consume(&mut self, c: char) -> Option<usize> {
+ self.ws();
+
+ if let Some(&(pos, maybe)) = self.cur.peek() {
+ if c == maybe {
+ self.cur.next();
+ Some(pos)
+ } else {
+ let pos = self.to_span_index(pos);
+ let description = format!("expected `'}}'`, found `{:?}`", maybe);
+ let label = "expected `}`".to_owned();
+ let (note, secondary_label) = if c == '}' {
+ (
+ Some(
+ "if you intended to print `{`, you can escape it using `{{`".to_owned(),
+ ),
+ self.last_opening_brace
+ .map(|sp| ("because of this opening brace".to_owned(), sp)),
+ )
+ } else {
+ (None, None)
+ };
+ self.errors.push(ParseError {
+ description,
+ note,
+ label,
+ span: pos.to(pos),
+ secondary_label,
+ should_be_replaced_with_positional_argument: false,
+ });
+ None
+ }
+ } else {
+ let description = format!("expected `{:?}` but string was terminated", c);
+ // point at closing `"`
+ let pos = self.input.len() - if self.append_newline { 1 } else { 0 };
+ let pos = self.to_span_index(pos);
+ if c == '}' {
+ let label = format!("expected `{:?}`", c);
+ let (note, secondary_label) = if c == '}' {
+ (
+ Some(
+ "if you intended to print `{`, you can escape it using `{{`".to_owned(),
+ ),
+ self.last_opening_brace
+ .map(|sp| ("because of this opening brace".to_owned(), sp)),
+ )
+ } else {
+ (None, None)
+ };
+ self.errors.push(ParseError {
+ description,
+ note,
+ label,
+ span: pos.to(pos),
+ secondary_label,
+ should_be_replaced_with_positional_argument: false,
+ });
+ } else {
+ self.err(description, format!("expected `{:?}`", c), pos.to(pos));
+ }
+ None
+ }
+ }
+
+ /// Consumes all whitespace characters until the first non-whitespace character
+ fn ws(&mut self) {
+ while let Some(&(_, c)) = self.cur.peek() {
+ if c.is_whitespace() {
+ self.cur.next();
+ } else {
+ break;
+ }
+ }
+ }
+
+ /// Parses all of a string which is to be considered a "raw literal" in a
+ /// format string. This is everything outside of the braces.
+ fn string(&mut self, start: usize) -> &'a str {
+ // we may not consume the character, peek the iterator
+ while let Some(&(pos, c)) = self.cur.peek() {
+ match c {
+ '{' | '}' => {
+ return &self.input[start..pos];
+ }
+ '\n' if self.is_literal => {
+ let start = self.to_span_index(self.cur_line_start);
+ let end = self.to_span_index(pos);
+ self.line_spans.push(start.to(end));
+ self.cur_line_start = pos + 1;
+ self.cur.next();
+ }
+ _ => {
+ if self.is_literal && pos == self.cur_line_start && c.is_whitespace() {
+ self.cur_line_start = pos + c.len_utf8();
+ }
+ self.cur.next();
+ }
+ }
+ }
+ &self.input[start..self.input.len()]
+ }
+
+ /// Parses an `Argument` structure, or what's contained within braces inside the format string.
+ fn argument(&mut self, start: InnerOffset) -> Argument<'a> {
+ let pos = self.position();
+
+ let end = self
+ .cur
+ .clone()
+ .find(|(_, ch)| !ch.is_whitespace())
+ .map_or(start, |(end, _)| self.to_span_index(end));
+ let position_span = start.to(end);
+
+ let format = match self.mode {
+ ParseMode::Format => self.format(),
+ ParseMode::InlineAsm => self.inline_asm(),
+ };
+
+ // Resolve position after parsing format spec.
+ let pos = match pos {
+ Some(position) => position,
+ None => {
+ let i = self.curarg;
+ self.curarg += 1;
+ ArgumentImplicitlyIs(i)
+ }
+ };
+
+ Argument { position: pos, position_span, format }
+ }
+
+ /// Parses a positional argument for a format. This could either be an
+ /// integer index of an argument, a named argument, or a blank string.
+ /// Returns `Some(parsed_position)` if the position is not implicitly
+ /// consuming a macro argument, `None` if it's the case.
+ fn position(&mut self) -> Option<Position<'a>> {
+ if let Some(i) = self.integer() {
+ Some(ArgumentIs(i))
+ } else {
+ match self.cur.peek() {
+ Some(&(_, c)) if rustc_lexer::is_id_start(c) => Some(ArgumentNamed(self.word())),
+
+ // This is an `ArgumentNext`.
+ // Record the fact and do the resolution after parsing the
+ // format spec, to make things like `{:.*}` work.
+ _ => None,
+ }
+ }
+ }
+
+ /// Parses a format specifier at the current position, returning all of the
+ /// relevant information in the `FormatSpec` struct.
+ fn format(&mut self) -> FormatSpec<'a> {
+ let mut spec = FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountImplied,
+ precision_span: None,
+ width: CountImplied,
+ width_span: None,
+ ty: &self.input[..0],
+ ty_span: None,
+ };
+ if !self.consume(':') {
+ return spec;
+ }
+
+ // fill character
+ if let Some(&(_, c)) = self.cur.peek() {
+ if let Some((_, '>' | '<' | '^')) = self.cur.clone().nth(1) {
+ spec.fill = Some(c);
+ self.cur.next();
+ }
+ }
+ // Alignment
+ if self.consume('<') {
+ spec.align = AlignLeft;
+ } else if self.consume('>') {
+ spec.align = AlignRight;
+ } else if self.consume('^') {
+ spec.align = AlignCenter;
+ }
+ // Sign flags
+ if self.consume('+') {
+ spec.flags |= 1 << (FlagSignPlus as u32);
+ } else if self.consume('-') {
+ spec.flags |= 1 << (FlagSignMinus as u32);
+ }
+ // Alternate marker
+ if self.consume('#') {
+ spec.flags |= 1 << (FlagAlternate as u32);
+ }
+ // Width and precision
+ let mut havewidth = false;
+
+ if self.consume('0') {
+ // small ambiguity with '0$' as a format string. In theory this is a
+ // '0' flag and then an ill-formatted format string with just a '$'
+ // and no count, but this is better if we instead interpret this as
+ // no '0' flag and '0$' as the width instead.
+ if let Some(end) = self.consume_pos('$') {
+ spec.width = CountIsParam(0);
+
+ if let Some((pos, _)) = self.cur.peek().cloned() {
+ spec.width_span = Some(self.to_span_index(pos - 2).to(self.to_span_index(pos)));
+ }
+ havewidth = true;
+ spec.width_span = Some(self.to_span_index(end - 1).to(self.to_span_index(end + 1)));
+ } else {
+ spec.flags |= 1 << (FlagSignAwareZeroPad as u32);
+ }
+ }
+ if !havewidth {
+ let width_span_start = if let Some((pos, _)) = self.cur.peek() { *pos } else { 0 };
+ let (w, sp) = self.count(width_span_start);
+ spec.width = w;
+ spec.width_span = sp;
+ }
+
+ if let Some(start) = self.consume_pos('.') {
+ if let Some(end) = self.consume_pos('*') {
+ // Resolve `CountIsNextParam`.
+ // We can do this immediately as `position` is resolved later.
+ let i = self.curarg;
+ self.curarg += 1;
+ spec.precision = CountIsParam(i);
+ spec.precision_span =
+ Some(self.to_span_index(start).to(self.to_span_index(end + 1)));
+ } else {
+ let (p, sp) = self.count(start);
+ spec.precision = p;
+ spec.precision_span = sp;
+ }
+ }
+ let ty_span_start = self.cur.peek().map(|(pos, _)| *pos);
+ // Optional radix followed by the actual format specifier
+ if self.consume('x') {
+ if self.consume('?') {
+ spec.flags |= 1 << (FlagDebugLowerHex as u32);
+ spec.ty = "?";
+ } else {
+ spec.ty = "x";
+ }
+ } else if self.consume('X') {
+ if self.consume('?') {
+ spec.flags |= 1 << (FlagDebugUpperHex as u32);
+ spec.ty = "?";
+ } else {
+ spec.ty = "X";
+ }
+ } else if self.consume('?') {
+ spec.ty = "?";
+ } else {
+ spec.ty = self.word();
+ let ty_span_end = self.cur.peek().map(|(pos, _)| *pos);
+ if !spec.ty.is_empty() {
+ spec.ty_span = ty_span_start
+ .and_then(|s| ty_span_end.map(|e| (s, e)))
+ .map(|(start, end)| self.to_span_index(start).to(self.to_span_index(end)));
+ }
+ }
+ spec
+ }
+
+ /// Parses an inline assembly template modifier at the current position, returning the modifier
+ /// in the `ty` field of the `FormatSpec` struct.
+ fn inline_asm(&mut self) -> FormatSpec<'a> {
+ let mut spec = FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountImplied,
+ precision_span: None,
+ width: CountImplied,
+ width_span: None,
+ ty: &self.input[..0],
+ ty_span: None,
+ };
+ if !self.consume(':') {
+ return spec;
+ }
+
+ let ty_span_start = self.cur.peek().map(|(pos, _)| *pos);
+ spec.ty = self.word();
+ let ty_span_end = self.cur.peek().map(|(pos, _)| *pos);
+ if !spec.ty.is_empty() {
+ spec.ty_span = ty_span_start
+ .and_then(|s| ty_span_end.map(|e| (s, e)))
+ .map(|(start, end)| self.to_span_index(start).to(self.to_span_index(end)));
+ }
+
+ spec
+ }
+
+ /// Parses a `Count` parameter at the current position. This does not check
+ /// for 'CountIsNextParam' because that is only used in precision, not
+ /// width.
+ fn count(&mut self, start: usize) -> (Count<'a>, Option<InnerSpan>) {
+ if let Some(i) = self.integer() {
+ if let Some(end) = self.consume_pos('$') {
+ let span = self.to_span_index(start).to(self.to_span_index(end + 1));
+ (CountIsParam(i), Some(span))
+ } else {
+ (CountIs(i), None)
+ }
+ } else {
+ let tmp = self.cur.clone();
+ let word = self.word();
+ if word.is_empty() {
+ self.cur = tmp;
+ (CountImplied, None)
+ } else if let Some(end) = self.consume_pos('$') {
+ let span = self.to_span_index(start + 1).to(self.to_span_index(end));
+ (CountIsName(word, span), None)
+ } else {
+ self.cur = tmp;
+ (CountImplied, None)
+ }
+ }
+ }
+
+ /// Parses a word starting at the current position. A word is the same as
+ /// Rust identifier, except that it can't start with `_` character.
+ fn word(&mut self) -> &'a str {
+ let start = match self.cur.peek() {
+ Some(&(pos, c)) if rustc_lexer::is_id_start(c) => {
+ self.cur.next();
+ pos
+ }
+ _ => {
+ return "";
+ }
+ };
+ let mut end = None;
+ while let Some(&(pos, c)) = self.cur.peek() {
+ if rustc_lexer::is_id_continue(c) {
+ self.cur.next();
+ } else {
+ end = Some(pos);
+ break;
+ }
+ }
+ let end = end.unwrap_or(self.input.len());
+ let word = &self.input[start..end];
+ if word == "_" {
+ self.err_with_note(
+ "invalid argument name `_`",
+ "invalid argument name",
+ "argument name cannot be a single underscore",
+ self.to_span_index(start).to(self.to_span_index(end)),
+ );
+ }
+ word
+ }
+
+ /// Optionally parses an integer at the current position. This doesn't deal
+ /// with overflow at all, it's just accumulating digits.
+ fn integer(&mut self) -> Option<usize> {
+ let mut cur = 0;
+ let mut found = false;
+ while let Some(&(_, c)) = self.cur.peek() {
+ if let Some(i) = c.to_digit(10) {
+ cur = cur * 10 + i as usize;
+ found = true;
+ self.cur.next();
+ } else {
+ break;
+ }
+ }
+ if found { Some(cur) } else { None }
+ }
+
+ fn suggest_positional_arg_instead_of_captured_arg(&mut self, arg: Argument<'a>) {
+ if let Some(end) = self.consume_pos('.') {
+ let byte_pos = self.to_span_index(end);
+ let start = InnerOffset(byte_pos.0 + 1);
+ let field = self.argument(start);
+ // We can only parse `foo.bar` field access, any deeper nesting,
+ // or another type of expression, like method calls, are not supported
+ if !self.consume('}') {
+ return;
+ }
+ if let ArgumentNamed(_) = arg.position {
+ if let ArgumentNamed(_) = field.position {
+ self.errors.insert(
+ 0,
+ ParseError {
+ description: "field access isn't supported".to_string(),
+ note: None,
+ label: "not supported".to_string(),
+ span: InnerSpan::new(arg.position_span.start, field.position_span.end),
+ secondary_label: None,
+ should_be_replaced_with_positional_argument: true,
+ },
+ );
+ }
+ }
+ }
+ }
+}
+
+/// Finds the indices of all characters that have been processed and differ between the actual
+/// written code (code snippet) and the `InternedString` that gets processed in the `Parser`
+/// in order to properly synthesise the intra-string `Span`s for error diagnostics.
+fn find_skips_from_snippet(
+ snippet: Option<string::String>,
+ str_style: Option<usize>,
+) -> (Vec<usize>, bool) {
+ let snippet = match snippet {
+ Some(ref s) if s.starts_with('"') || s.starts_with("r\"") || s.starts_with("r#") => s,
+ _ => return (vec![], false),
+ };
+
+ fn find_skips(snippet: &str, is_raw: bool) -> Vec<usize> {
+ let mut s = snippet.char_indices().peekable();
+ let mut skips = vec![];
+ while let Some((pos, c)) = s.next() {
+ match (c, s.peek()) {
+ // skip whitespace and empty lines ending in '\\'
+ ('\\', Some((next_pos, '\n'))) if !is_raw => {
+ skips.push(pos);
+ skips.push(*next_pos);
+ let _ = s.next();
+
+ while let Some((pos, c)) = s.peek() {
+ if matches!(c, ' ' | '\n' | '\t') {
+ skips.push(*pos);
+ let _ = s.next();
+ } else {
+ break;
+ }
+ }
+ }
+ ('\\', Some((next_pos, 'n' | 't' | 'r' | '0' | '\\' | '\'' | '\"'))) => {
+ skips.push(*next_pos);
+ let _ = s.next();
+ }
+ ('\\', Some((_, 'x'))) if !is_raw => {
+ for _ in 0..3 {
+ // consume `\xAB` literal
+ if let Some((pos, _)) = s.next() {
+ skips.push(pos);
+ } else {
+ break;
+ }
+ }
+ }
+ ('\\', Some((_, 'u'))) if !is_raw => {
+ if let Some((pos, _)) = s.next() {
+ skips.push(pos);
+ }
+ if let Some((next_pos, next_c)) = s.next() {
+ if next_c == '{' {
+ skips.push(next_pos);
+ let mut i = 0; // consume up to 6 hexanumeric chars + closing `}`
+ while let (Some((next_pos, c)), true) = (s.next(), i < 7) {
+ if c.is_digit(16) {
+ skips.push(next_pos);
+ } else if c == '}' {
+ skips.push(next_pos);
+ break;
+ } else {
+ break;
+ }
+ i += 1;
+ }
+ } else if next_c.is_digit(16) {
+ skips.push(next_pos);
+ // We suggest adding `{` and `}` when appropriate, accept it here as if
+ // it were correct
+ let mut i = 0; // consume up to 6 hexanumeric chars
+ while let (Some((next_pos, c)), _) = (s.next(), i < 6) {
+ if c.is_digit(16) {
+ skips.push(next_pos);
+ } else {
+ break;
+ }
+ i += 1;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ skips
+ }
+
+ let r_start = str_style.map_or(0, |r| r + 1);
+ let r_end = str_style.unwrap_or(0);
+ let s = &snippet[r_start + 1..snippet.len() - r_end - 1];
+ (find_skips(s, str_style.is_some()), true)
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/compiler/rustc_parse_format/src/tests.rs b/compiler/rustc_parse_format/src/tests.rs
new file mode 100644
index 000000000..578530696
--- /dev/null
+++ b/compiler/rustc_parse_format/src/tests.rs
@@ -0,0 +1,374 @@
+use super::*;
+
+fn same(fmt: &'static str, p: &[Piece<'static>]) {
+ let parser = Parser::new(fmt, None, None, false, ParseMode::Format);
+ assert_eq!(parser.collect::<Vec<Piece<'static>>>(), p);
+}
+
+fn fmtdflt() -> FormatSpec<'static> {
+ return FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountImplied,
+ width: CountImplied,
+ precision_span: None,
+ width_span: None,
+ ty: "",
+ ty_span: None,
+ };
+}
+
+fn musterr(s: &str) {
+ let mut p = Parser::new(s, None, None, false, ParseMode::Format);
+ p.next();
+ assert!(!p.errors.is_empty());
+}
+
+#[test]
+fn simple() {
+ same("asdf", &[String("asdf")]);
+ same("a{{b", &[String("a"), String("{b")]);
+ same("a}}b", &[String("a"), String("}b")]);
+ same("a}}", &[String("a"), String("}")]);
+ same("}}", &[String("}")]);
+ same("\\}}", &[String("\\"), String("}")]);
+}
+
+#[test]
+fn invalid01() {
+ musterr("{")
+}
+#[test]
+fn invalid02() {
+ musterr("}")
+}
+#[test]
+fn invalid04() {
+ musterr("{3a}")
+}
+#[test]
+fn invalid05() {
+ musterr("{:|}")
+}
+#[test]
+fn invalid06() {
+ musterr("{:>>>}")
+}
+
+#[test]
+fn format_nothing() {
+ same(
+ "{}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: fmtdflt(),
+ })],
+ );
+}
+#[test]
+fn format_position() {
+ same(
+ "{3}",
+ &[NextArgument(Argument {
+ position: ArgumentIs(3),
+ position_span: InnerSpan { start: 2, end: 3 },
+ format: fmtdflt(),
+ })],
+ );
+}
+#[test]
+fn format_position_nothing_else() {
+ same(
+ "{3:}",
+ &[NextArgument(Argument {
+ position: ArgumentIs(3),
+ position_span: InnerSpan { start: 2, end: 3 },
+ format: fmtdflt(),
+ })],
+ );
+}
+#[test]
+fn format_named() {
+ same(
+ "{name}",
+ &[NextArgument(Argument {
+ position: ArgumentNamed("name"),
+ position_span: InnerSpan { start: 2, end: 6 },
+ format: fmtdflt(),
+ })],
+ )
+}
+#[test]
+fn format_type() {
+ same(
+ "{3:x}",
+ &[NextArgument(Argument {
+ position: ArgumentIs(3),
+ position_span: InnerSpan { start: 2, end: 3 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountImplied,
+ width: CountImplied,
+ precision_span: None,
+ width_span: None,
+ ty: "x",
+ ty_span: None,
+ },
+ })],
+ );
+}
+#[test]
+fn format_align_fill() {
+ same(
+ "{3:>}",
+ &[NextArgument(Argument {
+ position: ArgumentIs(3),
+ position_span: InnerSpan { start: 2, end: 3 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignRight,
+ flags: 0,
+ precision: CountImplied,
+ width: CountImplied,
+ precision_span: None,
+ width_span: None,
+ ty: "",
+ ty_span: None,
+ },
+ })],
+ );
+ same(
+ "{3:0<}",
+ &[NextArgument(Argument {
+ position: ArgumentIs(3),
+ position_span: InnerSpan { start: 2, end: 3 },
+ format: FormatSpec {
+ fill: Some('0'),
+ align: AlignLeft,
+ flags: 0,
+ precision: CountImplied,
+ width: CountImplied,
+ precision_span: None,
+ width_span: None,
+ ty: "",
+ ty_span: None,
+ },
+ })],
+ );
+ same(
+ "{3:*<abcd}",
+ &[NextArgument(Argument {
+ position: ArgumentIs(3),
+ position_span: InnerSpan { start: 2, end: 3 },
+ format: FormatSpec {
+ fill: Some('*'),
+ align: AlignLeft,
+ flags: 0,
+ precision: CountImplied,
+ width: CountImplied,
+ precision_span: None,
+ width_span: None,
+ ty: "abcd",
+ ty_span: Some(InnerSpan::new(6, 10)),
+ },
+ })],
+ );
+}
+#[test]
+fn format_counts() {
+ same(
+ "{:10x}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountImplied,
+ width: CountIs(10),
+ precision_span: None,
+ width_span: None,
+ ty: "x",
+ ty_span: None,
+ },
+ })],
+ );
+ same(
+ "{:10$.10x}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountIs(10),
+ width: CountIsParam(10),
+ precision_span: None,
+ width_span: Some(InnerSpan::new(3, 6)),
+ ty: "x",
+ ty_span: None,
+ },
+ })],
+ );
+ same(
+ "{1:0$.10x}",
+ &[NextArgument(Argument {
+ position: ArgumentIs(1),
+ position_span: InnerSpan { start: 2, end: 3 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountIs(10),
+ width: CountIsParam(0),
+ precision_span: None,
+ width_span: Some(InnerSpan::new(4, 6)),
+ ty: "x",
+ ty_span: None,
+ },
+ })],
+ );
+ same(
+ "{:.*x}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(1),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountIsParam(0),
+ width: CountImplied,
+ precision_span: Some(InnerSpan::new(3, 5)),
+ width_span: None,
+ ty: "x",
+ ty_span: None,
+ },
+ })],
+ );
+ same(
+ "{:.10$x}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountIsParam(10),
+ width: CountImplied,
+ precision_span: Some(InnerSpan::new(3, 7)),
+ width_span: None,
+ ty: "x",
+ ty_span: None,
+ },
+ })],
+ );
+ same(
+ "{:a$.b$?}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountIsName("b", InnerSpan::new(6, 7)),
+ width: CountIsName("a", InnerSpan::new(4, 4)),
+ precision_span: None,
+ width_span: None,
+ ty: "?",
+ ty_span: None,
+ },
+ })],
+ );
+}
+#[test]
+fn format_flags() {
+ same(
+ "{:-}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: (1 << FlagSignMinus as u32),
+ precision: CountImplied,
+ width: CountImplied,
+ precision_span: None,
+ width_span: None,
+ ty: "",
+ ty_span: None,
+ },
+ })],
+ );
+ same(
+ "{:+#}",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 2 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: (1 << FlagSignPlus as u32) | (1 << FlagAlternate as u32),
+ precision: CountImplied,
+ width: CountImplied,
+ precision_span: None,
+ width_span: None,
+ ty: "",
+ ty_span: None,
+ },
+ })],
+ );
+}
+#[test]
+fn format_mixture() {
+ same(
+ "abcd {3:x} efg",
+ &[
+ String("abcd "),
+ NextArgument(Argument {
+ position: ArgumentIs(3),
+ position_span: InnerSpan { start: 7, end: 8 },
+ format: FormatSpec {
+ fill: None,
+ align: AlignUnknown,
+ flags: 0,
+ precision: CountImplied,
+ width: CountImplied,
+ precision_span: None,
+ width_span: None,
+ ty: "x",
+ ty_span: None,
+ },
+ }),
+ String(" efg"),
+ ],
+ );
+}
+#[test]
+fn format_whitespace() {
+ same(
+ "{ }",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 3 },
+ format: fmtdflt(),
+ })],
+ );
+ same(
+ "{ }",
+ &[NextArgument(Argument {
+ position: ArgumentImplicitlyIs(0),
+ position_span: InnerSpan { start: 2, end: 4 },
+ format: fmtdflt(),
+ })],
+ );
+}
diff --git a/compiler/rustc_passes/Cargo.toml b/compiler/rustc_passes/Cargo.toml
new file mode 100644
index 000000000..faa9c493d
--- /dev/null
+++ b/compiler/rustc_passes/Cargo.toml
@@ -0,0 +1,24 @@
+[package]
+name = "rustc_passes"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+tracing = "0.1"
+itertools = "0.10.1"
+rustc_middle = { path = "../rustc_middle" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_expand = { path = "../rustc_expand" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_session = { path = "../rustc_session" }
+rustc_target = { path = "../rustc_target" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_span = { path = "../rustc_span" }
+rustc_lexer = { path = "../rustc_lexer" }
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_feature = { path = "../rustc_feature" }
diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs
new file mode 100644
index 000000000..a2ac329f2
--- /dev/null
+++ b/compiler/rustc_passes/src/check_attr.rs
@@ -0,0 +1,2217 @@
+//! This module implements some validity checks for attributes.
+//! In particular it verifies that `#[inline]` and `#[repr]` attributes are
+//! attached to items that actually support them and if there are
+//! conflicts between multiple such attributes attached to the same
+//! item.
+
+use crate::errors;
+use rustc_ast::{ast, AttrStyle, Attribute, Lit, LitKind, MetaItemKind, NestedMetaItem};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{fluent, struct_span_err, Applicability, MultiSpan};
+use rustc_expand::base::resolve_path;
+use rustc_feature::{AttributeDuplicates, AttributeType, BuiltinAttribute, BUILTIN_ATTRIBUTE_MAP};
+use rustc_hir as hir;
+use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{self, FnSig, ForeignItem, HirId, Item, ItemKind, TraitItem, CRATE_HIR_ID};
+use rustc_hir::{MethodKind, Target};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::lint::builtin::{
+ CONFLICTING_REPR_HINTS, INVALID_DOC_ATTRIBUTES, UNUSED_ATTRIBUTES,
+};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::spec::abi::Abi;
+use std::collections::hash_map::Entry;
+
+pub(crate) fn target_from_impl_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_item: &hir::ImplItem<'_>,
+) -> Target {
+ match impl_item.kind {
+ hir::ImplItemKind::Const(..) => Target::AssocConst,
+ hir::ImplItemKind::Fn(..) => {
+ let parent_hir_id = tcx.hir().get_parent_item(impl_item.hir_id());
+ let containing_item = tcx.hir().expect_item(parent_hir_id);
+ let containing_impl_is_for_trait = match &containing_item.kind {
+ hir::ItemKind::Impl(impl_) => impl_.of_trait.is_some(),
+ _ => bug!("parent of an ImplItem must be an Impl"),
+ };
+ if containing_impl_is_for_trait {
+ Target::Method(MethodKind::Trait { body: true })
+ } else {
+ Target::Method(MethodKind::Inherent)
+ }
+ }
+ hir::ImplItemKind::TyAlias(..) => Target::AssocTy,
+ }
+}
+
+#[derive(Clone, Copy)]
+enum ItemLike<'tcx> {
+ Item(&'tcx Item<'tcx>),
+ ForeignItem,
+}
+
+struct CheckAttrVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl CheckAttrVisitor<'_> {
+ /// Checks any attribute.
+ fn check_attributes(
+ &self,
+ hir_id: HirId,
+ span: Span,
+ target: Target,
+ item: Option<ItemLike<'_>>,
+ ) {
+ let mut doc_aliases = FxHashMap::default();
+ let mut is_valid = true;
+ let mut specified_inline = None;
+ let mut seen = FxHashMap::default();
+ let attrs = self.tcx.hir().attrs(hir_id);
+ for attr in attrs {
+ let attr_is_valid = match attr.name_or_empty() {
+ sym::inline => self.check_inline(hir_id, attr, span, target),
+ sym::no_coverage => self.check_no_coverage(hir_id, attr, span, target),
+ sym::non_exhaustive => self.check_non_exhaustive(hir_id, attr, span, target),
+ sym::marker => self.check_marker(hir_id, attr, span, target),
+ sym::rustc_must_implement_one_of => {
+ self.check_rustc_must_implement_one_of(attr, span, target)
+ }
+ sym::target_feature => self.check_target_feature(hir_id, attr, span, target),
+ sym::thread_local => self.check_thread_local(attr, span, target),
+ sym::track_caller => {
+ self.check_track_caller(hir_id, attr.span, attrs, span, target)
+ }
+ sym::doc => self.check_doc_attrs(
+ attr,
+ hir_id,
+ target,
+ &mut specified_inline,
+ &mut doc_aliases,
+ ),
+ sym::no_link => self.check_no_link(hir_id, &attr, span, target),
+ sym::export_name => self.check_export_name(hir_id, &attr, span, target),
+ sym::rustc_layout_scalar_valid_range_start
+ | sym::rustc_layout_scalar_valid_range_end => {
+ self.check_rustc_layout_scalar_valid_range(&attr, span, target)
+ }
+ sym::allow_internal_unstable => {
+ self.check_allow_internal_unstable(hir_id, &attr, span, target, &attrs)
+ }
+ sym::debugger_visualizer => self.check_debugger_visualizer(&attr, target),
+ sym::rustc_allow_const_fn_unstable => {
+ self.check_rustc_allow_const_fn_unstable(hir_id, &attr, span, target)
+ }
+ sym::rustc_std_internal_symbol => {
+ self.check_rustc_std_internal_symbol(&attr, span, target)
+ }
+ sym::naked => self.check_naked(hir_id, attr, span, target),
+ sym::rustc_legacy_const_generics => {
+ self.check_rustc_legacy_const_generics(&attr, span, target, item)
+ }
+ sym::rustc_lint_query_instability => {
+ self.check_rustc_lint_query_instability(&attr, span, target)
+ }
+ sym::rustc_lint_diagnostics => {
+ self.check_rustc_lint_diagnostics(&attr, span, target)
+ }
+ sym::rustc_lint_opt_ty => self.check_rustc_lint_opt_ty(&attr, span, target),
+ sym::rustc_lint_opt_deny_field_access => {
+ self.check_rustc_lint_opt_deny_field_access(&attr, span, target)
+ }
+ sym::rustc_clean
+ | sym::rustc_dirty
+ | sym::rustc_if_this_changed
+ | sym::rustc_then_this_would_need => self.check_rustc_dirty_clean(&attr),
+ sym::cmse_nonsecure_entry => self.check_cmse_nonsecure_entry(attr, span, target),
+ sym::const_trait => self.check_const_trait(attr, span, target),
+ sym::must_not_suspend => self.check_must_not_suspend(&attr, span, target),
+ sym::must_use => self.check_must_use(hir_id, &attr, span, target),
+ sym::rustc_pass_by_value => self.check_pass_by_value(&attr, span, target),
+ sym::rustc_allow_incoherent_impl => {
+ self.check_allow_incoherent_impl(&attr, span, target)
+ }
+ sym::rustc_has_incoherent_inherent_impls => {
+ self.check_has_incoherent_inherent_impls(&attr, span, target)
+ }
+ sym::rustc_const_unstable
+ | sym::rustc_const_stable
+ | sym::unstable
+ | sym::stable
+ | sym::rustc_allowed_through_unstable_modules
+ | sym::rustc_promotable => self.check_stability_promotable(&attr, span, target),
+ _ => true,
+ };
+ is_valid &= attr_is_valid;
+
+ // lint-only checks
+ match attr.name_or_empty() {
+ sym::cold => self.check_cold(hir_id, attr, span, target),
+ sym::link => self.check_link(hir_id, attr, span, target),
+ sym::link_name => self.check_link_name(hir_id, attr, span, target),
+ sym::link_section => self.check_link_section(hir_id, attr, span, target),
+ sym::no_mangle => self.check_no_mangle(hir_id, attr, span, target),
+ sym::deprecated => self.check_deprecated(hir_id, attr, span, target),
+ sym::macro_use | sym::macro_escape => self.check_macro_use(hir_id, attr, target),
+ sym::path => self.check_generic_attr(hir_id, attr, target, &[Target::Mod]),
+ sym::plugin_registrar => self.check_plugin_registrar(hir_id, attr, target),
+ sym::macro_export => self.check_macro_export(hir_id, attr, target),
+ sym::ignore | sym::should_panic | sym::proc_macro_derive => {
+ self.check_generic_attr(hir_id, attr, target, &[Target::Fn])
+ }
+ sym::automatically_derived => {
+ self.check_generic_attr(hir_id, attr, target, &[Target::Impl])
+ }
+ sym::no_implicit_prelude => {
+ self.check_generic_attr(hir_id, attr, target, &[Target::Mod])
+ }
+ _ => {}
+ }
+
+ let builtin = attr.ident().and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name));
+
+ if hir_id != CRATE_HIR_ID {
+ if let Some(BuiltinAttribute { type_: AttributeType::CrateLevel, .. }) =
+ attr.ident().and_then(|ident| BUILTIN_ATTRIBUTE_MAP.get(&ident.name))
+ {
+ match attr.style {
+ ast::AttrStyle::Outer => self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::OuterCrateLevelAttr,
+ ),
+ ast::AttrStyle::Inner => self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::InnerCrateLevelAttr,
+ ),
+ }
+ }
+ }
+
+ if let Some(BuiltinAttribute { duplicates, .. }) = builtin {
+ check_duplicates(self.tcx, attr, hir_id, *duplicates, &mut seen);
+ }
+
+ self.check_unused_attribute(hir_id, attr)
+ }
+
+ if !is_valid {
+ return;
+ }
+
+ // FIXME(@lcnr): this doesn't belong here.
+ if matches!(target, Target::Closure | Target::Fn | Target::Method(_) | Target::ForeignFn) {
+ self.tcx.ensure().codegen_fn_attrs(self.tcx.hir().local_def_id(hir_id));
+ }
+
+ self.check_repr(attrs, span, target, item, hir_id);
+ self.check_used(attrs, target);
+ }
+
+ fn inline_attr_str_error_with_macro_def(&self, hir_id: HirId, attr: &Attribute, sym: &str) {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::IgnoredAttrWithMacro { sym },
+ );
+ }
+
+ fn inline_attr_str_error_without_macro_def(&self, hir_id: HirId, attr: &Attribute, sym: &str) {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::IgnoredAttr { sym },
+ );
+ }
+
+ /// Checks if an `#[inline]` is applied to a function or a closure. Returns `true` if valid.
+ fn check_inline(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::Fn
+ | Target::Closure
+ | Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
+ Target::Method(MethodKind::Trait { body: false }) | Target::ForeignFn => {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::IgnoredInlineAttrFnProto,
+ );
+ true
+ }
+ // FIXME(#65833): We permit associated consts to have an `#[inline]` attribute with
+ // just a lint, because we previously erroneously allowed it and some crates used it
+ // accidentally, to to be compatible with crates depending on them, we can't throw an
+ // error here.
+ Target::AssocConst => {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::IgnoredInlineAttrConstants,
+ );
+ true
+ }
+ // FIXME(#80564): Same for fields, arms, and macro defs
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "inline");
+ true
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::InlineNotFnOrClosure {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ /// Checks if a `#[no_coverage]` is applied directly to a function
+ fn check_no_coverage(
+ &self,
+ hir_id: HirId,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ // no_coverage on function is fine
+ Target::Fn
+ | Target::Closure
+ | Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
+
+ // function prototypes can't be covered
+ Target::Method(MethodKind::Trait { body: false }) | Target::ForeignFn => {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::IgnoredNoCoverageFnProto,
+ );
+ true
+ }
+
+ Target::Mod | Target::ForeignMod | Target::Impl | Target::Trait => {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::IgnoredNoCoveragePropagate,
+ );
+ true
+ }
+
+ Target::Expression | Target::Statement | Target::Arm => {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::IgnoredNoCoverageFnDefn,
+ );
+ true
+ }
+
+ _ => {
+ self.tcx.sess.emit_err(errors::IgnoredNoCoverageNotCoverable {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ fn check_generic_attr(
+ &self,
+ hir_id: HirId,
+ attr: &Attribute,
+ target: Target,
+ allowed_targets: &[Target],
+ ) {
+ if !allowed_targets.iter().any(|t| t == &target) {
+ let name = attr.name_or_empty();
+ let mut i = allowed_targets.iter();
+ // Pluralize
+ let b = i.next().map_or_else(String::new, |t| t.to_string() + "s");
+ let supported_names = i.enumerate().fold(b, |mut b, (i, allowed_target)| {
+ if allowed_targets.len() > 2 && i == allowed_targets.len() - 2 {
+ b.push_str(", and ");
+ } else if allowed_targets.len() == 2 && i == allowed_targets.len() - 2 {
+ b.push_str(" and ");
+ } else {
+ b.push_str(", ");
+ }
+ // Pluralize
+ b.push_str(&(allowed_target.to_string() + "s"));
+ b
+ });
+ self.tcx.struct_span_lint_hir(UNUSED_ATTRIBUTES, hir_id, attr.span, |lint| {
+ lint.build(&format!("`#[{name}]` only has an effect on {}", supported_names))
+ .emit();
+ });
+ }
+ }
+
+ /// Checks if `#[naked]` is applied to a function definition.
+ fn check_naked(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::Fn
+ | Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[allow_internal_unstable]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "naked");
+ true
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::AttrShouldBeAppliedToFn {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ /// Checks if `#[cmse_nonsecure_entry]` is applied to a function definition.
+ fn check_cmse_nonsecure_entry(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::Fn
+ | Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::AttrShouldBeAppliedToFn {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ /// Checks if a `#[track_caller]` is applied to a non-naked function. Returns `true` if valid.
+ fn check_track_caller(
+ &self,
+ hir_id: HirId,
+ attr_span: Span,
+ attrs: &[Attribute],
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ _ if attrs.iter().any(|attr| attr.has_name(sym::naked)) => {
+ self.tcx.sess.emit_err(errors::NakedTrackedCaller { attr_span });
+ false
+ }
+ Target::Fn | Target::Method(..) | Target::ForeignFn | Target::Closure => true,
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[track_caller]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ for attr in attrs {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "track_caller");
+ }
+ true
+ }
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(errors::TrackedCallerWrongLocation { attr_span, defn_span: span });
+ false
+ }
+ }
+ }
+
+ /// Checks if the `#[non_exhaustive]` attribute on an `item` is valid. Returns `true` if valid.
+ fn check_non_exhaustive(
+ &self,
+ hir_id: HirId,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ Target::Struct | Target::Enum | Target::Variant => true,
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[non_exhaustive]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "non_exhaustive");
+ true
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::NonExhaustiveWrongLocation {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ /// Checks if the `#[marker]` attribute on an `item` is valid. Returns `true` if valid.
+ fn check_marker(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::Trait => true,
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[marker]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "marker");
+ true
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::AttrShouldBeAppliedToTrait {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ /// Checks if the `#[rustc_must_implement_one_of]` attribute on a `target` is valid. Returns `true` if valid.
+ fn check_rustc_must_implement_one_of(
+ &self,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ Target::Trait => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::AttrShouldBeAppliedToTrait {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ /// Checks if the `#[target_feature]` attribute on `item` is valid. Returns `true` if valid.
+ fn check_target_feature(
+ &self,
+ hir_id: HirId,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ Target::Fn
+ | Target::Method(MethodKind::Trait { body: true } | MethodKind::Inherent) => true,
+ // FIXME: #[target_feature] was previously erroneously allowed on statements and some
+ // crates used this, so only emit a warning.
+ Target::Statement => {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::TargetFeatureOnStatement,
+ );
+ true
+ }
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[target_feature]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "target_feature");
+ true
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::AttrShouldBeAppliedToFn {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ /// Checks if the `#[thread_local]` attribute on `item` is valid. Returns `true` if valid.
+ fn check_thread_local(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::ForeignStatic | Target::Static => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::AttrShouldBeAppliedToStatic {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ }
+ }
+ }
+
+ fn doc_attr_str_error(&self, meta: &NestedMetaItem, attr_name: &str) {
+ self.tcx.sess.emit_err(errors::DocExpectStr { attr_span: meta.span(), attr_name });
+ }
+
+ fn check_doc_alias_value(
+ &self,
+ meta: &NestedMetaItem,
+ doc_alias: Symbol,
+ hir_id: HirId,
+ target: Target,
+ is_list: bool,
+ aliases: &mut FxHashMap<String, Span>,
+ ) -> bool {
+ let tcx = self.tcx;
+ let span = meta.name_value_literal_span().unwrap_or_else(|| meta.span());
+ let attr_str =
+ &format!("`#[doc(alias{})]`", if is_list { "(\"...\")" } else { " = \"...\"" });
+ if doc_alias == kw::Empty {
+ tcx.sess.emit_err(errors::DocAliasEmpty { span, attr_str });
+ return false;
+ }
+
+ let doc_alias_str = doc_alias.as_str();
+ if let Some(c) = doc_alias_str
+ .chars()
+ .find(|&c| c == '"' || c == '\'' || (c.is_whitespace() && c != ' '))
+ {
+ tcx.sess.emit_err(errors::DocAliasBadChar { span, attr_str, char_: c });
+ return false;
+ }
+ if doc_alias_str.starts_with(' ') || doc_alias_str.ends_with(' ') {
+ tcx.sess.emit_err(errors::DocAliasStartEnd { span, attr_str });
+ return false;
+ }
+
+ let span = meta.span();
+ if let Some(location) = match target {
+ Target::AssocTy => {
+ let parent_hir_id = self.tcx.hir().get_parent_item(hir_id);
+ let containing_item = self.tcx.hir().expect_item(parent_hir_id);
+ if Target::from_item(containing_item) == Target::Impl {
+ Some("type alias in implementation block")
+ } else {
+ None
+ }
+ }
+ Target::AssocConst => {
+ let parent_hir_id = self.tcx.hir().get_parent_item(hir_id);
+ let containing_item = self.tcx.hir().expect_item(parent_hir_id);
+ // We can't link to trait impl's consts.
+ let err = "associated constant in trait implementation block";
+ match containing_item.kind {
+ ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) => Some(err),
+ _ => None,
+ }
+ }
+ // we check the validity of params elsewhere
+ Target::Param => return false,
+ Target::Expression
+ | Target::Statement
+ | Target::Arm
+ | Target::ForeignMod
+ | Target::Closure
+ | Target::Impl => Some(target.name()),
+ Target::ExternCrate
+ | Target::Use
+ | Target::Static
+ | Target::Const
+ | Target::Fn
+ | Target::Mod
+ | Target::GlobalAsm
+ | Target::TyAlias
+ | Target::OpaqueTy
+ | Target::Enum
+ | Target::Variant
+ | Target::Struct
+ | Target::Field
+ | Target::Union
+ | Target::Trait
+ | Target::TraitAlias
+ | Target::Method(..)
+ | Target::ForeignFn
+ | Target::ForeignStatic
+ | Target::ForeignTy
+ | Target::GenericParam(..)
+ | Target::MacroDef => None,
+ } {
+ tcx.sess.emit_err(errors::DocAliasBadLocation { span, attr_str, location });
+ return false;
+ }
+ let item_name = self.tcx.hir().name(hir_id);
+ if item_name == doc_alias {
+ tcx.sess.emit_err(errors::DocAliasNotAnAlias { span, attr_str });
+ return false;
+ }
+ if let Err(entry) = aliases.try_insert(doc_alias_str.to_owned(), span) {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ span,
+ errors::DocAliasDuplicated { first_defn: *entry.entry.get() },
+ );
+ }
+ true
+ }
+
+ fn check_doc_alias(
+ &self,
+ meta: &NestedMetaItem,
+ hir_id: HirId,
+ target: Target,
+ aliases: &mut FxHashMap<String, Span>,
+ ) -> bool {
+ if let Some(values) = meta.meta_item_list() {
+ let mut errors = 0;
+ for v in values {
+ match v.literal() {
+ Some(l) => match l.kind {
+ LitKind::Str(s, _) => {
+ if !self.check_doc_alias_value(v, s, hir_id, target, true, aliases) {
+ errors += 1;
+ }
+ }
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(errors::DocAliasNotStringLiteral { span: v.span() });
+ errors += 1;
+ }
+ },
+ None => {
+ self.tcx.sess.emit_err(errors::DocAliasNotStringLiteral { span: v.span() });
+ errors += 1;
+ }
+ }
+ }
+ errors == 0
+ } else if let Some(doc_alias) = meta.value_str() {
+ self.check_doc_alias_value(meta, doc_alias, hir_id, target, false, aliases)
+ } else {
+ self.tcx.sess.emit_err(errors::DocAliasMalformed { span: meta.span() });
+ false
+ }
+ }
+
+ fn check_doc_keyword(&self, meta: &NestedMetaItem, hir_id: HirId) -> bool {
+ let doc_keyword = meta.value_str().unwrap_or(kw::Empty);
+ if doc_keyword == kw::Empty {
+ self.doc_attr_str_error(meta, "keyword");
+ return false;
+ }
+ match self.tcx.hir().find(hir_id).and_then(|node| match node {
+ hir::Node::Item(item) => Some(&item.kind),
+ _ => None,
+ }) {
+ Some(ItemKind::Mod(ref module)) => {
+ if !module.item_ids.is_empty() {
+ self.tcx.sess.emit_err(errors::DocKeywordEmptyMod { span: meta.span() });
+ return false;
+ }
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::DocKeywordNotMod { span: meta.span() });
+ return false;
+ }
+ }
+ if !rustc_lexer::is_ident(doc_keyword.as_str()) {
+ self.tcx.sess.emit_err(errors::DocKeywordInvalidIdent {
+ span: meta.name_value_literal_span().unwrap_or_else(|| meta.span()),
+ doc_keyword,
+ });
+ return false;
+ }
+ true
+ }
+
+ fn check_doc_fake_variadic(&self, meta: &NestedMetaItem, hir_id: HirId) -> bool {
+ match self.tcx.hir().find(hir_id).and_then(|node| match node {
+ hir::Node::Item(item) => Some(&item.kind),
+ _ => None,
+ }) {
+ Some(ItemKind::Impl(ref i)) => {
+ let is_valid = matches!(&i.self_ty.kind, hir::TyKind::Tup([_]))
+ || if let hir::TyKind::BareFn(bare_fn_ty) = &i.self_ty.kind {
+ bare_fn_ty.decl.inputs.len() == 1
+ } else {
+ false
+ };
+ if !is_valid {
+ self.tcx.sess.emit_err(errors::DocFakeVariadicNotValid { span: meta.span() });
+ return false;
+ }
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::DocKeywordOnlyImpl { span: meta.span() });
+ return false;
+ }
+ }
+ true
+ }
+
+ /// Checks `#[doc(inline)]`/`#[doc(no_inline)]` attributes. Returns `true` if valid.
+ ///
+ /// A doc inlining attribute is invalid if it is applied to a non-`use` item, or
+ /// if there are conflicting attributes for one item.
+ ///
+ /// `specified_inline` is used to keep track of whether we have
+ /// already seen an inlining attribute for this item.
+ /// If so, `specified_inline` holds the value and the span of
+ /// the first `inline`/`no_inline` attribute.
+ fn check_doc_inline(
+ &self,
+ attr: &Attribute,
+ meta: &NestedMetaItem,
+ hir_id: HirId,
+ target: Target,
+ specified_inline: &mut Option<(bool, Span)>,
+ ) -> bool {
+ if target == Target::Use || target == Target::ExternCrate {
+ let do_inline = meta.name_or_empty() == sym::inline;
+ if let Some((prev_inline, prev_span)) = *specified_inline {
+ if do_inline != prev_inline {
+ let mut spans = MultiSpan::from_spans(vec![prev_span, meta.span()]);
+ spans.push_span_label(prev_span, fluent::passes::doc_inline_conflict_first);
+ spans.push_span_label(meta.span(), fluent::passes::doc_inline_conflict_second);
+ self.tcx.sess.emit_err(errors::DocKeywordConflict { spans });
+ return false;
+ }
+ true
+ } else {
+ *specified_inline = Some((do_inline, meta.span()));
+ true
+ }
+ } else {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ errors::DocInlineOnlyUse {
+ attr_span: meta.span(),
+ item_span: (attr.style == AttrStyle::Outer)
+ .then(|| self.tcx.hir().span(hir_id)),
+ },
+ );
+ false
+ }
+ }
+
+ /// Checks that an attribute is *not* used at the crate level. Returns `true` if valid.
+ fn check_attr_not_crate_level(
+ &self,
+ meta: &NestedMetaItem,
+ hir_id: HirId,
+ attr_name: &str,
+ ) -> bool {
+ if CRATE_HIR_ID == hir_id {
+ self.tcx.sess.emit_err(errors::DocAttrNotCrateLevel { span: meta.span(), attr_name });
+ return false;
+ }
+ true
+ }
+
+ /// Checks that an attribute is used at the crate level. Returns `true` if valid.
+ fn check_attr_crate_level(
+ &self,
+ attr: &Attribute,
+ meta: &NestedMetaItem,
+ hir_id: HirId,
+ ) -> bool {
+ if hir_id != CRATE_HIR_ID {
+ self.tcx.struct_span_lint_hir(INVALID_DOC_ATTRIBUTES, hir_id, meta.span(), |lint| {
+ let mut err = lint.build(fluent::passes::attr_crate_level);
+ if attr.style == AttrStyle::Outer
+ && self.tcx.hir().get_parent_item(hir_id) == CRATE_DEF_ID
+ {
+ if let Ok(mut src) = self.tcx.sess.source_map().span_to_snippet(attr.span) {
+ src.insert(1, '!');
+ err.span_suggestion_verbose(
+ attr.span,
+ fluent::passes::suggestion,
+ src,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_help(attr.span, fluent::passes::help);
+ }
+ }
+ err.note(fluent::passes::note).emit();
+ });
+ return false;
+ }
+ true
+ }
+
+ /// Checks that `doc(test(...))` attribute contains only valid attributes. Returns `true` if
+ /// valid.
+ fn check_test_attr(&self, meta: &NestedMetaItem, hir_id: HirId) -> bool {
+ let mut is_valid = true;
+ if let Some(metas) = meta.meta_item_list() {
+ for i_meta in metas {
+ match i_meta.name_or_empty() {
+ sym::attr | sym::no_crate_inject => {}
+ _ => {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ i_meta.span(),
+ errors::DocTestUnknown {
+ path: rustc_ast_pretty::pprust::path_to_string(
+ &i_meta.meta_item().unwrap().path,
+ ),
+ },
+ );
+ is_valid = false;
+ }
+ }
+ }
+ } else {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ errors::DocTestTakesList,
+ );
+ is_valid = false;
+ }
+ is_valid
+ }
+
+ /// Runs various checks on `#[doc]` attributes. Returns `true` if valid.
+ ///
+ /// `specified_inline` should be initialized to `None` and kept for the scope
+ /// of one item. Read the documentation of [`check_doc_inline`] for more information.
+ ///
+ /// [`check_doc_inline`]: Self::check_doc_inline
+ fn check_doc_attrs(
+ &self,
+ attr: &Attribute,
+ hir_id: HirId,
+ target: Target,
+ specified_inline: &mut Option<(bool, Span)>,
+ aliases: &mut FxHashMap<String, Span>,
+ ) -> bool {
+ let mut is_valid = true;
+
+ if let Some(mi) = attr.meta() && let Some(list) = mi.meta_item_list() {
+ for meta in list {
+ if let Some(i_meta) = meta.meta_item() {
+ match i_meta.name_or_empty() {
+ sym::alias
+ if !self.check_attr_not_crate_level(meta, hir_id, "alias")
+ || !self.check_doc_alias(meta, hir_id, target, aliases) =>
+ {
+ is_valid = false
+ }
+
+ sym::keyword
+ if !self.check_attr_not_crate_level(meta, hir_id, "keyword")
+ || !self.check_doc_keyword(meta, hir_id) =>
+ {
+ is_valid = false
+ }
+
+ sym::fake_variadic
+ if !self.check_attr_not_crate_level(meta, hir_id, "fake_variadic")
+ || !self.check_doc_fake_variadic(meta, hir_id) =>
+ {
+ is_valid = false
+ }
+
+ sym::html_favicon_url
+ | sym::html_logo_url
+ | sym::html_playground_url
+ | sym::issue_tracker_base_url
+ | sym::html_root_url
+ | sym::html_no_source
+ | sym::test
+ if !self.check_attr_crate_level(attr, meta, hir_id) =>
+ {
+ is_valid = false;
+ }
+
+ sym::inline | sym::no_inline
+ if !self.check_doc_inline(
+ attr,
+ meta,
+ hir_id,
+ target,
+ specified_inline,
+ ) =>
+ {
+ is_valid = false;
+ }
+
+ // no_default_passes: deprecated
+ // passes: deprecated
+ // plugins: removed, but rustdoc warns about it itself
+ sym::alias
+ | sym::cfg
+ | sym::cfg_hide
+ | sym::hidden
+ | sym::html_favicon_url
+ | sym::html_logo_url
+ | sym::html_no_source
+ | sym::html_playground_url
+ | sym::html_root_url
+ | sym::inline
+ | sym::issue_tracker_base_url
+ | sym::keyword
+ | sym::masked
+ | sym::no_default_passes
+ | sym::no_inline
+ | sym::notable_trait
+ | sym::passes
+ | sym::plugins
+ | sym::fake_variadic => {}
+
+ sym::test => {
+ if !self.check_test_attr(meta, hir_id) {
+ is_valid = false;
+ }
+ }
+
+ sym::primitive => {
+ if !self.tcx.features().rustdoc_internals {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ i_meta.span,
+ errors::DocPrimitive,
+ );
+ }
+ }
+
+ _ => {
+ let path = rustc_ast_pretty::pprust::path_to_string(&i_meta.path);
+ if i_meta.has_name(sym::spotlight) {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ i_meta.span,
+ errors::DocTestUnknownSpotlight {
+ path,
+ span: i_meta.span
+ }
+ );
+ } else if i_meta.has_name(sym::include) &&
+ let Some(value) = i_meta.value_str() {
+ let applicability = if list.len() == 1 {
+ Applicability::MachineApplicable
+ } else {
+ Applicability::MaybeIncorrect
+ };
+ // If there are multiple attributes, the suggestion would suggest
+ // deleting all of them, which is incorrect.
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ i_meta.span,
+ errors::DocTestUnknownInclude {
+ path,
+ value: value.to_string(),
+ inner: (attr.style == AttrStyle::Inner)
+ .then_some("!")
+ .unwrap_or(""),
+ sugg: (attr.meta().unwrap().span, applicability),
+ }
+ );
+ } else {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ i_meta.span,
+ errors::DocTestUnknownAny { path }
+ );
+ }
+ is_valid = false;
+ }
+ }
+ } else {
+ self.tcx.emit_spanned_lint(
+ INVALID_DOC_ATTRIBUTES,
+ hir_id,
+ meta.span(),
+ errors::DocInvalid,
+ );
+ is_valid = false;
+ }
+ }
+ }
+
+ is_valid
+ }
+
+ /// Warns against some misuses of `#[pass_by_value]`
+ fn check_pass_by_value(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::Struct | Target::Enum | Target::TyAlias => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::PassByValue { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ fn check_allow_incoherent_impl(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::Method(MethodKind::Inherent) => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::AllowIncoherentImpl { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ fn check_has_incoherent_inherent_impls(
+ &self,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ Target::Trait | Target::Struct | Target::Enum | Target::Union | Target::ForeignTy => {
+ true
+ }
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(errors::HasIncoherentInherentImpl { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ /// Warns against some misuses of `#[must_use]`
+ fn check_must_use(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ let node = self.tcx.hir().get(hir_id);
+ if let Some(kind) = node.fn_kind() && let rustc_hir::IsAsync::Async = kind.asyncness() {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::MustUseAsync { span }
+ );
+ }
+
+ if !matches!(
+ target,
+ Target::Fn
+ | Target::Enum
+ | Target::Struct
+ | Target::Union
+ | Target::Method(_)
+ | Target::ForeignFn
+ // `impl Trait` in return position can trip
+ // `unused_must_use` if `Trait` is marked as
+ // `#[must_use]`
+ | Target::Trait
+ ) {
+ let article = match target {
+ Target::ExternCrate
+ | Target::OpaqueTy
+ | Target::Enum
+ | Target::Impl
+ | Target::Expression
+ | Target::Arm
+ | Target::AssocConst
+ | Target::AssocTy => "an",
+ _ => "a",
+ };
+
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::MustUseNoEffect { article, target },
+ );
+ }
+
+ // For now, its always valid
+ true
+ }
+
+ /// Checks if `#[must_not_suspend]` is applied to a function. Returns `true` if valid.
+ fn check_must_not_suspend(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::Struct | Target::Enum | Target::Union | Target::Trait => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::MustNotSuspend { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ /// Checks if `#[cold]` is applied to a non-function. Returns `true` if valid.
+ fn check_cold(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ match target {
+ Target::Fn | Target::Method(..) | Target::ForeignFn | Target::Closure => {}
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[cold]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "cold");
+ }
+ _ => {
+ // FIXME: #[cold] was previously allowed on non-functions and some crates used
+ // this, so only emit a warning.
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::Cold { span },
+ );
+ }
+ }
+ }
+
+ /// Checks if `#[link]` is applied to an item other than a foreign module.
+ fn check_link(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ if target == Target::ForeignMod
+ && let hir::Node::Item(item) = self.tcx.hir().get(hir_id)
+ && let Item { kind: ItemKind::ForeignMod { abi, .. }, .. } = item
+ && !matches!(abi, Abi::Rust | Abi::RustIntrinsic | Abi::PlatformIntrinsic)
+ {
+ return;
+ }
+
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::Link { span: (target != Target::ForeignMod).then_some(span) },
+ );
+ }
+
+ /// Checks if `#[link_name]` is applied to an item other than a foreign function or static.
+ fn check_link_name(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ match target {
+ Target::ForeignFn | Target::ForeignStatic => {}
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[link_name]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "link_name");
+ }
+ _ => {
+ // FIXME: #[cold] was previously allowed on non-functions/statics and some crates
+ // used this, so only emit a warning.
+ let attr_span = matches!(target, Target::ForeignMod).then_some(attr.span);
+ if let Some(s) = attr.value_str() {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::LinkName { span, attr_span, value: s.as_str() },
+ );
+ } else {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::LinkName { span, attr_span, value: "..." },
+ );
+ };
+ }
+ }
+ }
+
+ /// Checks if `#[no_link]` is applied to an `extern crate`. Returns `true` if valid.
+ fn check_no_link(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::ExternCrate => true,
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[no_link]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "no_link");
+ true
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::NoLink { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ fn is_impl_item(&self, hir_id: HirId) -> bool {
+ matches!(self.tcx.hir().get(hir_id), hir::Node::ImplItem(..))
+ }
+
+ /// Checks if `#[export_name]` is applied to a function or static. Returns `true` if valid.
+ fn check_export_name(
+ &self,
+ hir_id: HirId,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ Target::Static | Target::Fn => true,
+ Target::Method(..) if self.is_impl_item(hir_id) => true,
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[export_name]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "export_name");
+ true
+ }
+ _ => {
+ self.tcx.sess.emit_err(errors::ExportName { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ fn check_rustc_layout_scalar_valid_range(
+ &self,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ if target != Target::Struct {
+ self.tcx.sess.emit_err(errors::RustcLayoutScalarValidRangeNotStruct {
+ attr_span: attr.span,
+ span,
+ });
+ return false;
+ }
+
+ let Some(list) = attr.meta_item_list() else {
+ return false;
+ };
+
+ if matches!(&list[..], &[NestedMetaItem::Literal(Lit { kind: LitKind::Int(..), .. })]) {
+ true
+ } else {
+ self.tcx.sess.emit_err(errors::RustcLayoutScalarValidRangeArg { attr_span: attr.span });
+ false
+ }
+ }
+
+ /// Checks if `#[rustc_legacy_const_generics]` is applied to a function and has a valid argument.
+ fn check_rustc_legacy_const_generics(
+ &self,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ item: Option<ItemLike<'_>>,
+ ) -> bool {
+ let is_function = matches!(target, Target::Fn);
+ if !is_function {
+ self.tcx.sess.emit_err(errors::AttrShouldBeAppliedToFn {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ return false;
+ }
+
+ let Some(list) = attr.meta_item_list() else {
+ // The attribute form is validated on AST.
+ return false;
+ };
+
+ let Some(ItemLike::Item(Item {
+ kind: ItemKind::Fn(FnSig { decl, .. }, generics, _),
+ ..
+ })) = item else {
+ bug!("should be a function item");
+ };
+
+ for param in generics.params {
+ match param.kind {
+ hir::GenericParamKind::Const { .. } => {}
+ _ => {
+ self.tcx.sess.emit_err(errors::RustcLegacyConstGenericsOnly {
+ attr_span: attr.span,
+ param_span: param.span,
+ });
+ return false;
+ }
+ }
+ }
+
+ if list.len() != generics.params.len() {
+ self.tcx.sess.emit_err(errors::RustcLegacyConstGenericsIndex {
+ attr_span: attr.span,
+ generics_span: generics.span,
+ });
+ return false;
+ }
+
+ let arg_count = decl.inputs.len() as u128 + generics.params.len() as u128;
+ let mut invalid_args = vec![];
+ for meta in list {
+ if let Some(LitKind::Int(val, _)) = meta.literal().map(|lit| &lit.kind) {
+ if *val >= arg_count {
+ let span = meta.span();
+ self.tcx.sess.emit_err(errors::RustcLegacyConstGenericsIndexExceed {
+ span,
+ arg_count: arg_count as usize,
+ });
+ return false;
+ }
+ } else {
+ invalid_args.push(meta.span());
+ }
+ }
+
+ if !invalid_args.is_empty() {
+ self.tcx.sess.emit_err(errors::RustcLegacyConstGenericsIndexNegative { invalid_args });
+ false
+ } else {
+ true
+ }
+ }
+
+ /// Helper function for checking that the provided attribute is only applied to a function or
+ /// method.
+ fn check_applied_to_fn_or_method(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ let is_function = matches!(target, Target::Fn | Target::Method(..));
+ if !is_function {
+ self.tcx.sess.emit_err(errors::AttrShouldBeAppliedToFn {
+ attr_span: attr.span,
+ defn_span: span,
+ });
+ false
+ } else {
+ true
+ }
+ }
+
+ /// Checks that the `#[rustc_lint_query_instability]` attribute is only applied to a function
+ /// or method.
+ fn check_rustc_lint_query_instability(
+ &self,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ self.check_applied_to_fn_or_method(attr, span, target)
+ }
+
+ /// Checks that the `#[rustc_lint_diagnostics]` attribute is only applied to a function or
+ /// method.
+ fn check_rustc_lint_diagnostics(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ self.check_applied_to_fn_or_method(attr, span, target)
+ }
+
+ /// Checks that the `#[rustc_lint_opt_ty]` attribute is only applied to a struct.
+ fn check_rustc_lint_opt_ty(&self, attr: &Attribute, span: Span, target: Target) -> bool {
+ match target {
+ Target::Struct => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::RustcLintOptTy { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ /// Checks that the `#[rustc_lint_opt_deny_field_access]` attribute is only applied to a field.
+ fn check_rustc_lint_opt_deny_field_access(
+ &self,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ Target::Field => true,
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(errors::RustcLintOptDenyFieldAccess { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ /// Checks that the dep-graph debugging attributes are only present when the query-dep-graph
+ /// option is passed to the compiler.
+ fn check_rustc_dirty_clean(&self, attr: &Attribute) -> bool {
+ if self.tcx.sess.opts.unstable_opts.query_dep_graph {
+ true
+ } else {
+ self.tcx.sess.emit_err(errors::RustcDirtyClean { span: attr.span });
+ false
+ }
+ }
+
+ /// Checks if `#[link_section]` is applied to a function or static.
+ fn check_link_section(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ match target {
+ Target::Static | Target::Fn | Target::Method(..) => {}
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[link_section]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "link_section");
+ }
+ _ => {
+ // FIXME: #[link_section] was previously allowed on non-functions/statics and some
+ // crates used this, so only emit a warning.
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::LinkSection { span },
+ );
+ }
+ }
+ }
+
+ /// Checks if `#[no_mangle]` is applied to a function or static.
+ fn check_no_mangle(&self, hir_id: HirId, attr: &Attribute, span: Span, target: Target) {
+ match target {
+ Target::Static | Target::Fn => {}
+ Target::Method(..) if self.is_impl_item(hir_id) => {}
+ // FIXME(#80564): We permit struct fields, match arms and macro defs to have an
+ // `#[no_mangle]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "no_mangle");
+ }
+ // FIXME: #[no_mangle] was previously allowed on non-functions/statics, this should be an error
+ // The error should specify that the item that is wrong is specifically a *foreign* fn/static
+ // otherwise the error seems odd
+ Target::ForeignFn | Target::ForeignStatic => {
+ let foreign_item_kind = match target {
+ Target::ForeignFn => "function",
+ Target::ForeignStatic => "static",
+ _ => unreachable!(),
+ };
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::NoMangleForeign { span, attr_span: attr.span, foreign_item_kind },
+ );
+ }
+ _ => {
+ // FIXME: #[no_mangle] was previously allowed on non-functions/statics and some
+ // crates used this, so only emit a warning.
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::NoMangle { span },
+ );
+ }
+ }
+ }
+
+ /// Checks if the `#[repr]` attributes on `item` are valid.
+ fn check_repr(
+ &self,
+ attrs: &[Attribute],
+ span: Span,
+ target: Target,
+ item: Option<ItemLike<'_>>,
+ hir_id: HirId,
+ ) {
+ // Extract the names of all repr hints, e.g., [foo, bar, align] for:
+ // ```
+ // #[repr(foo)]
+ // #[repr(bar, align(8))]
+ // ```
+ let hints: Vec<_> = attrs
+ .iter()
+ .filter(|attr| attr.has_name(sym::repr))
+ .filter_map(|attr| attr.meta_item_list())
+ .flatten()
+ .collect();
+
+ let mut int_reprs = 0;
+ let mut is_c = false;
+ let mut is_simd = false;
+ let mut is_transparent = false;
+
+ for hint in &hints {
+ if !hint.is_meta_item() {
+ self.tcx.sess.emit_err(errors::ReprIdent { span: hint.span() });
+ continue;
+ }
+
+ let (article, allowed_targets) = match hint.name_or_empty() {
+ sym::C => {
+ is_c = true;
+ match target {
+ Target::Struct | Target::Union | Target::Enum => continue,
+ _ => ("a", "struct, enum, or union"),
+ }
+ }
+ sym::align => {
+ if let (Target::Fn, false) = (target, self.tcx.features().fn_align) {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::fn_align,
+ hint.span(),
+ "`repr(align)` attributes on functions are unstable",
+ )
+ .emit();
+ }
+
+ match target {
+ Target::Struct | Target::Union | Target::Enum | Target::Fn => continue,
+ _ => ("a", "struct, enum, function, or union"),
+ }
+ }
+ sym::packed => {
+ if target != Target::Struct && target != Target::Union {
+ ("a", "struct or union")
+ } else {
+ continue;
+ }
+ }
+ sym::simd => {
+ is_simd = true;
+ if target != Target::Struct {
+ ("a", "struct")
+ } else {
+ continue;
+ }
+ }
+ sym::transparent => {
+ is_transparent = true;
+ match target {
+ Target::Struct | Target::Union | Target::Enum => continue,
+ _ => ("a", "struct, enum, or union"),
+ }
+ }
+ sym::i8
+ | sym::u8
+ | sym::i16
+ | sym::u16
+ | sym::i32
+ | sym::u32
+ | sym::i64
+ | sym::u64
+ | sym::i128
+ | sym::u128
+ | sym::isize
+ | sym::usize => {
+ int_reprs += 1;
+ if target != Target::Enum {
+ ("an", "enum")
+ } else {
+ continue;
+ }
+ }
+ _ => {
+ struct_span_err!(
+ self.tcx.sess,
+ hint.span(),
+ E0552,
+ "unrecognized representation hint"
+ )
+ .emit();
+
+ continue;
+ }
+ };
+
+ struct_span_err!(
+ self.tcx.sess,
+ hint.span(),
+ E0517,
+ "{}",
+ &format!("attribute should be applied to {article} {allowed_targets}")
+ )
+ .span_label(span, &format!("not {article} {allowed_targets}"))
+ .emit();
+ }
+
+ // Just point at all repr hints if there are any incompatibilities.
+ // This is not ideal, but tracking precisely which ones are at fault is a huge hassle.
+ let hint_spans = hints.iter().map(|hint| hint.span());
+
+ // Error on repr(transparent, <anything else>).
+ if is_transparent && hints.len() > 1 {
+ let hint_spans: Vec<_> = hint_spans.clone().collect();
+ struct_span_err!(
+ self.tcx.sess,
+ hint_spans,
+ E0692,
+ "transparent {} cannot have other repr hints",
+ target
+ )
+ .emit();
+ }
+ // Warn on repr(u8, u16), repr(C, simd), and c-like-enum-repr(C, u8)
+ if (int_reprs > 1)
+ || (is_simd && is_c)
+ || (int_reprs == 1
+ && is_c
+ && item.map_or(false, |item| {
+ if let ItemLike::Item(item) = item {
+ return is_c_like_enum(item);
+ }
+ return false;
+ }))
+ {
+ self.tcx.emit_spanned_lint(
+ CONFLICTING_REPR_HINTS,
+ hir_id,
+ hint_spans.collect::<Vec<Span>>(),
+ errors::ReprConflicting,
+ );
+ }
+ }
+
+ fn check_used(&self, attrs: &[Attribute], target: Target) {
+ let mut used_linker_span = None;
+ let mut used_compiler_span = None;
+ for attr in attrs.iter().filter(|attr| attr.has_name(sym::used)) {
+ if target != Target::Static {
+ self.tcx.sess.emit_err(errors::UsedStatic { span: attr.span });
+ }
+ let inner = attr.meta_item_list();
+ match inner.as_deref() {
+ Some([item]) if item.has_name(sym::linker) => {
+ if used_linker_span.is_none() {
+ used_linker_span = Some(attr.span);
+ }
+ }
+ Some([item]) if item.has_name(sym::compiler) => {
+ if used_compiler_span.is_none() {
+ used_compiler_span = Some(attr.span);
+ }
+ }
+ Some(_) => {
+ // This error case is handled in rustc_typeck::collect.
+ }
+ None => {
+ // Default case (compiler) when arg isn't defined.
+ if used_compiler_span.is_none() {
+ used_compiler_span = Some(attr.span);
+ }
+ }
+ }
+ }
+ if let (Some(linker_span), Some(compiler_span)) = (used_linker_span, used_compiler_span) {
+ self.tcx
+ .sess
+ .emit_err(errors::UsedCompilerLinker { spans: vec![linker_span, compiler_span] });
+ }
+ }
+
+ /// Outputs an error for `#[allow_internal_unstable]` which can only be applied to macros.
+ /// (Allows proc_macro functions)
+ fn check_allow_internal_unstable(
+ &self,
+ hir_id: HirId,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ attrs: &[Attribute],
+ ) -> bool {
+ debug!("Checking target: {:?}", target);
+ match target {
+ Target::Fn => {
+ for attr in attrs {
+ if self.tcx.sess.is_proc_macro_attr(attr) {
+ debug!("Is proc macro attr");
+ return true;
+ }
+ }
+ debug!("Is not proc macro attr");
+ false
+ }
+ Target::MacroDef => true,
+ // FIXME(#80564): We permit struct fields and match arms to have an
+ // `#[allow_internal_unstable]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm => {
+ self.inline_attr_str_error_without_macro_def(
+ hir_id,
+ attr,
+ "allow_internal_unstable",
+ );
+ true
+ }
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(errors::AllowInternalUnstable { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ /// Checks if the items on the `#[debugger_visualizer]` attribute are valid.
+ fn check_debugger_visualizer(&self, attr: &Attribute, target: Target) -> bool {
+ match target {
+ Target::Mod => {}
+ _ => {
+ self.tcx.sess.emit_err(errors::DebugVisualizerPlacement { span: attr.span });
+ return false;
+ }
+ }
+
+ let Some(hints) = attr.meta_item_list() else {
+ self.tcx.sess.emit_err(errors::DebugVisualizerInvalid { span: attr.span });
+ return false;
+ };
+
+ let hint = match hints.len() {
+ 1 => &hints[0],
+ _ => {
+ self.tcx.sess.emit_err(errors::DebugVisualizerInvalid { span: attr.span });
+ return false;
+ }
+ };
+
+ let Some(meta_item) = hint.meta_item() else {
+ self.tcx.sess.emit_err(errors::DebugVisualizerInvalid { span: attr.span });
+ return false;
+ };
+
+ let visualizer_path = match (meta_item.name_or_empty(), meta_item.value_str()) {
+ (sym::natvis_file, Some(value)) => value,
+ (sym::gdb_script_file, Some(value)) => value,
+ (_, _) => {
+ self.tcx.sess.emit_err(errors::DebugVisualizerInvalid { span: meta_item.span });
+ return false;
+ }
+ };
+
+ let file =
+ match resolve_path(&self.tcx.sess.parse_sess, visualizer_path.as_str(), attr.span) {
+ Ok(file) => file,
+ Err(mut err) => {
+ err.emit();
+ return false;
+ }
+ };
+
+ match std::fs::File::open(&file) {
+ Ok(_) => true,
+ Err(err) => {
+ self.tcx
+ .sess
+ .struct_span_err(
+ meta_item.span,
+ &format!("couldn't read {}: {}", file.display(), err),
+ )
+ .emit();
+ false
+ }
+ }
+ }
+
+ /// Outputs an error for `#[allow_internal_unstable]` which can only be applied to macros.
+ /// (Allows proc_macro functions)
+ fn check_rustc_allow_const_fn_unstable(
+ &self,
+ hir_id: HirId,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ Target::Fn | Target::Method(_)
+ if self.tcx.is_const_fn_raw(self.tcx.hir().local_def_id(hir_id).to_def_id()) =>
+ {
+ true
+ }
+ // FIXME(#80564): We permit struct fields and match arms to have an
+ // `#[allow_internal_unstable]` attribute with just a lint, because we previously
+ // erroneously allowed it and some crates used it accidentally, to to be compatible
+ // with crates depending on them, we can't throw an error here.
+ Target::Field | Target::Arm | Target::MacroDef => {
+ self.inline_attr_str_error_with_macro_def(hir_id, attr, "allow_internal_unstable");
+ true
+ }
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(errors::RustcAllowConstFnUnstable { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ fn check_rustc_std_internal_symbol(
+ &self,
+ attr: &Attribute,
+ span: Span,
+ target: Target,
+ ) -> bool {
+ match target {
+ Target::Fn | Target::Static => true,
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(errors::RustcStdInternalSymbol { attr_span: attr.span, span });
+ false
+ }
+ }
+ }
+
+ /// `#[const_trait]` only applies to traits.
+ fn check_const_trait(&self, attr: &Attribute, _span: Span, target: Target) -> bool {
+ match target {
+ Target::Trait => true,
+ _ => {
+ self.tcx.sess.emit_err(errors::ConstTrait { attr_span: attr.span });
+ false
+ }
+ }
+ }
+
+ fn check_stability_promotable(&self, attr: &Attribute, _span: Span, target: Target) -> bool {
+ match target {
+ Target::Expression => {
+ self.tcx.sess.emit_err(errors::StabilityPromotable { attr_span: attr.span });
+ false
+ }
+ _ => true,
+ }
+ }
+
+ fn check_deprecated(&self, hir_id: HirId, attr: &Attribute, _span: Span, target: Target) {
+ match target {
+ Target::Closure | Target::Expression | Target::Statement | Target::Arm => {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::Deprecated,
+ );
+ }
+ _ => {}
+ }
+ }
+
+ fn check_macro_use(&self, hir_id: HirId, attr: &Attribute, target: Target) {
+ let name = attr.name_or_empty();
+ match target {
+ Target::ExternCrate | Target::Mod => {}
+ _ => {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::MacroUse { name },
+ );
+ }
+ }
+ }
+
+ fn check_macro_export(&self, hir_id: HirId, attr: &Attribute, target: Target) {
+ if target != Target::MacroDef {
+ self.tcx.emit_spanned_lint(UNUSED_ATTRIBUTES, hir_id, attr.span, errors::MacroExport);
+ }
+ }
+
+ fn check_plugin_registrar(&self, hir_id: HirId, attr: &Attribute, target: Target) {
+ if target != Target::Fn {
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::PluginRegistrar,
+ );
+ }
+ }
+
+ fn check_unused_attribute(&self, hir_id: HirId, attr: &Attribute) {
+ // Warn on useless empty attributes.
+ let note = if matches!(
+ attr.name_or_empty(),
+ sym::macro_use
+ | sym::allow
+ | sym::expect
+ | sym::warn
+ | sym::deny
+ | sym::forbid
+ | sym::feature
+ | sym::repr
+ | sym::target_feature
+ ) && attr.meta_item_list().map_or(false, |list| list.is_empty())
+ {
+ errors::UnusedNote::EmptyList { name: attr.name_or_empty() }
+ } else if matches!(
+ attr.name_or_empty(),
+ sym::allow | sym::warn | sym::deny | sym::forbid | sym::expect
+ ) && let Some(meta) = attr.meta_item_list()
+ && meta.len() == 1
+ && let Some(item) = meta[0].meta_item()
+ && let MetaItemKind::NameValue(_) = &item.kind
+ && item.path == sym::reason
+ {
+ errors::UnusedNote::NoLints { name: attr.name_or_empty() }
+ } else if attr.name_or_empty() == sym::default_method_body_is_const {
+ errors::UnusedNote::DefaultMethodBodyConst
+ } else {
+ return;
+ };
+
+ self.tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ attr.span,
+ errors::Unused { attr_span: attr.span, note },
+ );
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for CheckAttrVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx Item<'tcx>) {
+ // Historically we've run more checks on non-exported than exported macros,
+ // so this lets us continue to run them while maintaining backwards compatibility.
+ // In the long run, the checks should be harmonized.
+ if let ItemKind::Macro(ref macro_def, _) = item.kind {
+ let def_id = item.def_id.to_def_id();
+ if macro_def.macro_rules && !self.tcx.has_attr(def_id, sym::macro_export) {
+ check_non_exported_macro_for_invalid_attrs(self.tcx, item);
+ }
+ }
+
+ let target = Target::from_item(item);
+ self.check_attributes(item.hir_id(), item.span, target, Some(ItemLike::Item(item)));
+ intravisit::walk_item(self, item)
+ }
+
+ fn visit_generic_param(&mut self, generic_param: &'tcx hir::GenericParam<'tcx>) {
+ let target = Target::from_generic_param(generic_param);
+ self.check_attributes(generic_param.hir_id, generic_param.span, target, None);
+ intravisit::walk_generic_param(self, generic_param)
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx TraitItem<'tcx>) {
+ let target = Target::from_trait_item(trait_item);
+ self.check_attributes(trait_item.hir_id(), trait_item.span, target, None);
+ intravisit::walk_trait_item(self, trait_item)
+ }
+
+ fn visit_field_def(&mut self, struct_field: &'tcx hir::FieldDef<'tcx>) {
+ self.check_attributes(struct_field.hir_id, struct_field.span, Target::Field, None);
+ intravisit::walk_field_def(self, struct_field);
+ }
+
+ fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
+ self.check_attributes(arm.hir_id, arm.span, Target::Arm, None);
+ intravisit::walk_arm(self, arm);
+ }
+
+ fn visit_foreign_item(&mut self, f_item: &'tcx ForeignItem<'tcx>) {
+ let target = Target::from_foreign_item(f_item);
+ self.check_attributes(f_item.hir_id(), f_item.span, target, Some(ItemLike::ForeignItem));
+ intravisit::walk_foreign_item(self, f_item)
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ let target = target_from_impl_item(self.tcx, impl_item);
+ self.check_attributes(impl_item.hir_id(), impl_item.span, target, None);
+ intravisit::walk_impl_item(self, impl_item)
+ }
+
+ fn visit_stmt(&mut self, stmt: &'tcx hir::Stmt<'tcx>) {
+ // When checking statements ignore expressions, they will be checked later.
+ if let hir::StmtKind::Local(ref l) = stmt.kind {
+ self.check_attributes(l.hir_id, stmt.span, Target::Statement, None);
+ }
+ intravisit::walk_stmt(self, stmt)
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ let target = match expr.kind {
+ hir::ExprKind::Closure { .. } => Target::Closure,
+ _ => Target::Expression,
+ };
+
+ self.check_attributes(expr.hir_id, expr.span, target, None);
+ intravisit::walk_expr(self, expr)
+ }
+
+ fn visit_variant(
+ &mut self,
+ variant: &'tcx hir::Variant<'tcx>,
+ generics: &'tcx hir::Generics<'tcx>,
+ item_id: HirId,
+ ) {
+ self.check_attributes(variant.id, variant.span, Target::Variant, None);
+ intravisit::walk_variant(self, variant, generics, item_id)
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ self.check_attributes(param.hir_id, param.span, Target::Param, None);
+
+ intravisit::walk_param(self, param);
+ }
+}
+
+fn is_c_like_enum(item: &Item<'_>) -> bool {
+ if let ItemKind::Enum(ref def, _) = item.kind {
+ for variant in def.variants {
+ match variant.data {
+ hir::VariantData::Unit(..) => { /* continue */ }
+ _ => return false,
+ }
+ }
+ true
+ } else {
+ false
+ }
+}
+
+// FIXME: Fix "Cannot determine resolution" error and remove built-in macros
+// from this check.
+fn check_invalid_crate_level_attr(tcx: TyCtxt<'_>, attrs: &[Attribute]) {
+ // Check for builtin attributes at the crate level
+ // which were unsuccessfully resolved due to cannot determine
+ // resolution for the attribute macro error.
+ const ATTRS_TO_CHECK: &[Symbol] = &[
+ sym::macro_export,
+ sym::repr,
+ sym::path,
+ sym::automatically_derived,
+ sym::start,
+ sym::rustc_main,
+ sym::derive,
+ sym::test,
+ sym::test_case,
+ sym::global_allocator,
+ sym::bench,
+ ];
+
+ for attr in attrs {
+ // This function should only be called with crate attributes
+ // which are inner attributes always but lets check to make sure
+ if attr.style == AttrStyle::Inner {
+ for attr_to_check in ATTRS_TO_CHECK {
+ if attr.has_name(*attr_to_check) {
+ let mut err = tcx.sess.struct_span_err(
+ attr.span,
+ &format!(
+ "`{}` attribute cannot be used at crate level",
+ attr_to_check.to_ident_string()
+ ),
+ );
+ // Only emit an error with a suggestion if we can create a
+ // string out of the attribute span
+ if let Ok(src) = tcx.sess.source_map().span_to_snippet(attr.span) {
+ let replacement = src.replace("#!", "#");
+ err.span_suggestion_verbose(
+ attr.span,
+ "perhaps you meant to use an outer attribute",
+ replacement,
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ }
+ }
+ }
+ }
+}
+
+fn check_non_exported_macro_for_invalid_attrs(tcx: TyCtxt<'_>, item: &Item<'_>) {
+ let attrs = tcx.hir().attrs(item.hir_id());
+
+ for attr in attrs {
+ if attr.has_name(sym::inline) {
+ tcx.sess.emit_err(errors::NonExportedMacroInvalidAttrs { attr_span: attr.span });
+ }
+ }
+}
+
+fn check_mod_attrs(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ let check_attr_visitor = &mut CheckAttrVisitor { tcx };
+ tcx.hir().visit_item_likes_in_module(module_def_id, check_attr_visitor);
+ if module_def_id.is_top_level_module() {
+ check_attr_visitor.check_attributes(CRATE_HIR_ID, DUMMY_SP, Target::Mod, None);
+ check_invalid_crate_level_attr(tcx, tcx.hir().krate_attrs());
+ }
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { check_mod_attrs, ..*providers };
+}
+
+fn check_duplicates(
+ tcx: TyCtxt<'_>,
+ attr: &Attribute,
+ hir_id: HirId,
+ duplicates: AttributeDuplicates,
+ seen: &mut FxHashMap<Symbol, Span>,
+) {
+ use AttributeDuplicates::*;
+ if matches!(duplicates, WarnFollowingWordOnly) && !attr.is_word() {
+ return;
+ }
+ match duplicates {
+ DuplicatesOk => {}
+ WarnFollowing | FutureWarnFollowing | WarnFollowingWordOnly | FutureWarnPreceding => {
+ match seen.entry(attr.name_or_empty()) {
+ Entry::Occupied(mut entry) => {
+ let (this, other) = if matches!(duplicates, FutureWarnPreceding) {
+ let to_remove = entry.insert(attr.span);
+ (to_remove, attr.span)
+ } else {
+ (attr.span, *entry.get())
+ };
+ tcx.emit_spanned_lint(
+ UNUSED_ATTRIBUTES,
+ hir_id,
+ this,
+ errors::UnusedDuplicate {
+ this,
+ other,
+ warning: matches!(
+ duplicates,
+ FutureWarnFollowing | FutureWarnPreceding
+ )
+ .then_some(()),
+ },
+ );
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(attr.span);
+ }
+ }
+ }
+ ErrorFollowing | ErrorPreceding => match seen.entry(attr.name_or_empty()) {
+ Entry::Occupied(mut entry) => {
+ let (this, other) = if matches!(duplicates, ErrorPreceding) {
+ let to_remove = entry.insert(attr.span);
+ (to_remove, attr.span)
+ } else {
+ (attr.span, *entry.get())
+ };
+ tcx.sess.emit_err(errors::UnusedMultiple {
+ this,
+ other,
+ name: attr.name_or_empty(),
+ });
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(attr.span);
+ }
+ },
+ }
+}
diff --git a/compiler/rustc_passes/src/check_const.rs b/compiler/rustc_passes/src/check_const.rs
new file mode 100644
index 000000000..70518284c
--- /dev/null
+++ b/compiler/rustc_passes/src/check_const.rs
@@ -0,0 +1,236 @@
+//! This pass checks HIR bodies that may be evaluated at compile-time (e.g., `const`, `static`,
+//! `const fn`) for structured control flow (e.g. `if`, `while`), which is forbidden in a const
+//! context.
+//!
+//! By the time the MIR const-checker runs, these high-level constructs have been lowered to
+//! control-flow primitives (e.g., `Goto`, `SwitchInt`), making it tough to properly attribute
+//! errors. We still look for those primitives in the MIR const-checker to ensure nothing slips
+//! through, but errors for structured control flow in a `const` should be emitted here.
+
+use rustc_attr as attr;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::parse::feature_err;
+use rustc_span::{sym, Span, Symbol};
+
+/// An expression that is not *always* legal in a const context.
+#[derive(Clone, Copy)]
+enum NonConstExpr {
+ Loop(hir::LoopSource),
+ Match(hir::MatchSource),
+}
+
+impl NonConstExpr {
+ fn name(self) -> String {
+ match self {
+ Self::Loop(src) => format!("`{}`", src.name()),
+ Self::Match(src) => format!("`{}`", src.name()),
+ }
+ }
+
+ fn required_feature_gates(self) -> Option<&'static [Symbol]> {
+ use hir::LoopSource::*;
+ use hir::MatchSource::*;
+
+ let gates: &[_] = match self {
+ Self::Match(AwaitDesugar) => {
+ return None;
+ }
+
+ Self::Loop(ForLoop) | Self::Match(ForLoopDesugar) => &[sym::const_for],
+
+ Self::Match(TryDesugar) => &[sym::const_try],
+
+ // All other expressions are allowed.
+ Self::Loop(Loop | While) | Self::Match(Normal) => &[],
+ };
+
+ Some(gates)
+ }
+}
+
+fn check_mod_const_bodies(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ let mut vis = CheckConstVisitor::new(tcx);
+ tcx.hir().visit_item_likes_in_module(module_def_id, &mut vis);
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { check_mod_const_bodies, ..*providers };
+}
+
+#[derive(Copy, Clone)]
+struct CheckConstVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ const_kind: Option<hir::ConstContext>,
+ def_id: Option<LocalDefId>,
+}
+
+impl<'tcx> CheckConstVisitor<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>) -> Self {
+ CheckConstVisitor { tcx, const_kind: None, def_id: None }
+ }
+
+ /// Emits an error when an unsupported expression is found in a const context.
+ fn const_check_violated(&self, expr: NonConstExpr, span: Span) {
+ let Self { tcx, def_id, const_kind } = *self;
+
+ let features = tcx.features();
+ let required_gates = expr.required_feature_gates();
+
+ let is_feature_allowed = |feature_gate| {
+ // All features require that the corresponding gate be enabled,
+ // even if the function has `#[rustc_allow_const_fn_unstable(the_gate)]`.
+ if !tcx.features().enabled(feature_gate) {
+ return false;
+ }
+
+ // If `def_id` is `None`, we don't need to consider stability attributes.
+ let def_id = match def_id {
+ Some(x) => x,
+ None => return true,
+ };
+
+ // If the function belongs to a trait, then it must enable the const_trait_impl
+ // feature to use that trait function (with a const default body).
+ if tcx.trait_of_item(def_id.to_def_id()).is_some() {
+ return true;
+ }
+
+ // If this crate is not using stability attributes, or this function is not claiming to be a
+ // stable `const fn`, that is all that is required.
+ if !tcx.features().staged_api
+ || tcx.has_attr(def_id.to_def_id(), sym::rustc_const_unstable)
+ {
+ return true;
+ }
+
+ // However, we cannot allow stable `const fn`s to use unstable features without an explicit
+ // opt-in via `rustc_allow_const_fn_unstable`.
+ let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(def_id));
+ attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate)
+ };
+
+ match required_gates {
+ // Don't emit an error if the user has enabled the requisite feature gates.
+ Some(gates) if gates.iter().copied().all(is_feature_allowed) => return,
+
+ // `-Zunleash-the-miri-inside-of-you` only works for expressions that don't have a
+ // corresponding feature gate. This encourages nightly users to use feature gates when
+ // possible.
+ None if tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you => {
+ tcx.sess.span_warn(span, "skipping const checks");
+ return;
+ }
+
+ _ => {}
+ }
+
+ let const_kind =
+ const_kind.expect("`const_check_violated` may only be called inside a const context");
+
+ let msg = format!("{} is not allowed in a `{}`", expr.name(), const_kind.keyword_name());
+
+ let required_gates = required_gates.unwrap_or(&[]);
+ let missing_gates: Vec<_> =
+ required_gates.iter().copied().filter(|&g| !features.enabled(g)).collect();
+
+ match missing_gates.as_slice() {
+ [] => {
+ struct_span_err!(tcx.sess, span, E0744, "{}", msg).emit();
+ }
+
+ [missing_primary, ref missing_secondary @ ..] => {
+ let mut err = feature_err(&tcx.sess.parse_sess, *missing_primary, span, &msg);
+
+ // If multiple feature gates would be required to enable this expression, include
+ // them as help messages. Don't emit a separate error for each missing feature gate.
+ //
+ // FIXME(ecstaticmorse): Maybe this could be incorporated into `feature_err`? This
+ // is a pretty narrow case, however.
+ if tcx.sess.is_nightly_build() {
+ for gate in missing_secondary {
+ let note = format!(
+ "add `#![feature({})]` to the crate attributes to enable",
+ gate,
+ );
+ err.help(&note);
+ }
+ }
+
+ err.emit();
+ }
+ }
+ }
+
+ /// Saves the parent `const_kind` before calling `f` and restores it afterwards.
+ fn recurse_into(
+ &mut self,
+ kind: Option<hir::ConstContext>,
+ def_id: Option<LocalDefId>,
+ f: impl FnOnce(&mut Self),
+ ) {
+ let parent_def_id = self.def_id;
+ let parent_kind = self.const_kind;
+ self.def_id = def_id;
+ self.const_kind = kind;
+ f(self);
+ self.def_id = parent_def_id;
+ self.const_kind = parent_kind;
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for CheckConstVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ intravisit::walk_item(self, item);
+ }
+
+ fn visit_anon_const(&mut self, anon: &'tcx hir::AnonConst) {
+ let kind = Some(hir::ConstContext::Const);
+ self.recurse_into(kind, None, |this| intravisit::walk_anon_const(this, anon));
+ }
+
+ fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) {
+ let owner = self.tcx.hir().body_owner_def_id(body.id());
+ let kind = self.tcx.hir().body_const_context(owner);
+ self.recurse_into(kind, Some(owner), |this| intravisit::walk_body(this, body));
+ }
+
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ match &e.kind {
+ // Skip the following checks if we are not currently in a const context.
+ _ if self.const_kind.is_none() => {}
+
+ hir::ExprKind::Loop(_, _, source, _) => {
+ self.const_check_violated(NonConstExpr::Loop(*source), e.span);
+ }
+
+ hir::ExprKind::Match(_, _, source) => {
+ let non_const_expr = match source {
+ // These are handled by `ExprKind::Loop` above.
+ hir::MatchSource::ForLoopDesugar => None,
+
+ _ => Some(NonConstExpr::Match(*source)),
+ };
+
+ if let Some(expr) = non_const_expr {
+ self.const_check_violated(expr, e.span);
+ }
+ }
+
+ _ => {}
+ }
+
+ intravisit::walk_expr(self, e);
+ }
+}
diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs
new file mode 100644
index 000000000..1e2fbeb38
--- /dev/null
+++ b/compiler/rustc_passes/src/dead.rs
@@ -0,0 +1,960 @@
+// This implements the dead-code warning pass. It follows middle::reachable
+// closely. The idea is that all reachable symbols are live, codes called
+// from live codes are live, and everything else is dead.
+
+use itertools::Itertools;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, Applicability, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Node, PatKind, TyKind};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::middle::privacy;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, DefIdTree, TyCtxt};
+use rustc_session::lint;
+use rustc_span::symbol::{sym, Symbol};
+use std::mem;
+
+// Any local node that may call something in its body block should be
+// explored. For example, if it's a live Node::Item that is a
+// function, then we should explore its block to check for codes that
+// may need to be marked as live.
+fn should_explore(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
+ matches!(
+ tcx.hir().find_by_def_id(def_id),
+ Some(
+ Node::Item(..)
+ | Node::ImplItem(..)
+ | Node::ForeignItem(..)
+ | Node::TraitItem(..)
+ | Node::Variant(..)
+ | Node::AnonConst(..)
+ )
+ )
+}
+
+struct MarkSymbolVisitor<'tcx> {
+ worklist: Vec<LocalDefId>,
+ tcx: TyCtxt<'tcx>,
+ maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>,
+ live_symbols: FxHashSet<LocalDefId>,
+ repr_has_repr_c: bool,
+ repr_has_repr_simd: bool,
+ in_pat: bool,
+ ignore_variant_stack: Vec<DefId>,
+ // maps from tuple struct constructors to tuple struct items
+ struct_constructors: FxHashMap<LocalDefId, LocalDefId>,
+ // maps from ADTs to ignored derived traits (e.g. Debug and Clone)
+ // and the span of their respective impl (i.e., part of the derive
+ // macro)
+ ignored_derived_traits: FxHashMap<LocalDefId, Vec<(DefId, DefId)>>,
+}
+
+impl<'tcx> MarkSymbolVisitor<'tcx> {
+ /// Gets the type-checking results for the current body.
+ /// As this will ICE if called outside bodies, only call when working with
+ /// `Expr` or `Pat` nodes (they are guaranteed to be found only in bodies).
+ #[track_caller]
+ fn typeck_results(&self) -> &'tcx ty::TypeckResults<'tcx> {
+ self.maybe_typeck_results
+ .expect("`MarkSymbolVisitor::typeck_results` called outside of body")
+ }
+
+ fn check_def_id(&mut self, def_id: DefId) {
+ if let Some(def_id) = def_id.as_local() {
+ if should_explore(self.tcx, def_id) || self.struct_constructors.contains_key(&def_id) {
+ self.worklist.push(def_id);
+ }
+ self.live_symbols.insert(def_id);
+ }
+ }
+
+ fn insert_def_id(&mut self, def_id: DefId) {
+ if let Some(def_id) = def_id.as_local() {
+ debug_assert!(!should_explore(self.tcx, def_id));
+ self.live_symbols.insert(def_id);
+ }
+ }
+
+ fn handle_res(&mut self, res: Res) {
+ match res {
+ Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::TyAlias, def_id) => {
+ self.check_def_id(def_id);
+ }
+ _ if self.in_pat => {}
+ Res::PrimTy(..) | Res::SelfCtor(..) | Res::Local(..) => {}
+ Res::Def(DefKind::Ctor(CtorOf::Variant, ..), ctor_def_id) => {
+ let variant_id = self.tcx.parent(ctor_def_id);
+ let enum_id = self.tcx.parent(variant_id);
+ self.check_def_id(enum_id);
+ if !self.ignore_variant_stack.contains(&ctor_def_id) {
+ self.check_def_id(variant_id);
+ }
+ }
+ Res::Def(DefKind::Variant, variant_id) => {
+ let enum_id = self.tcx.parent(variant_id);
+ self.check_def_id(enum_id);
+ if !self.ignore_variant_stack.contains(&variant_id) {
+ self.check_def_id(variant_id);
+ }
+ }
+ Res::Def(_, def_id) => self.check_def_id(def_id),
+ Res::SelfTy { trait_: t, alias_to: i } => {
+ if let Some(t) = t {
+ self.check_def_id(t);
+ }
+ if let Some((i, _)) = i {
+ self.check_def_id(i);
+ }
+ }
+ Res::ToolMod | Res::NonMacroAttr(..) | Res::Err => {}
+ }
+ }
+
+ fn lookup_and_handle_method(&mut self, id: hir::HirId) {
+ if let Some(def_id) = self.typeck_results().type_dependent_def_id(id) {
+ self.check_def_id(def_id);
+ } else {
+ bug!("no type-dependent def for method");
+ }
+ }
+
+ fn handle_field_access(&mut self, lhs: &hir::Expr<'_>, hir_id: hir::HirId) {
+ match self.typeck_results().expr_ty_adjusted(lhs).kind() {
+ ty::Adt(def, _) => {
+ let index = self.tcx.field_index(hir_id, self.typeck_results());
+ self.insert_def_id(def.non_enum_variant().fields[index].did);
+ }
+ ty::Tuple(..) => {}
+ _ => span_bug!(lhs.span, "named field access on non-ADT"),
+ }
+ }
+
+ #[allow(dead_code)] // FIXME(81658): should be used + lint reinstated after #83171 relands.
+ fn handle_assign(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ if self
+ .typeck_results()
+ .expr_adjustments(expr)
+ .iter()
+ .any(|adj| matches!(adj.kind, ty::adjustment::Adjust::Deref(_)))
+ {
+ self.visit_expr(expr);
+ } else if let hir::ExprKind::Field(base, ..) = expr.kind {
+ // Ignore write to field
+ self.handle_assign(base);
+ } else {
+ self.visit_expr(expr);
+ }
+ }
+
+ #[allow(dead_code)] // FIXME(81658): should be used + lint reinstated after #83171 relands.
+ fn check_for_self_assign(&mut self, assign: &'tcx hir::Expr<'tcx>) {
+ fn check_for_self_assign_helper<'tcx>(
+ typeck_results: &'tcx ty::TypeckResults<'tcx>,
+ lhs: &'tcx hir::Expr<'tcx>,
+ rhs: &'tcx hir::Expr<'tcx>,
+ ) -> bool {
+ match (&lhs.kind, &rhs.kind) {
+ (hir::ExprKind::Path(ref qpath_l), hir::ExprKind::Path(ref qpath_r)) => {
+ if let (Res::Local(id_l), Res::Local(id_r)) = (
+ typeck_results.qpath_res(qpath_l, lhs.hir_id),
+ typeck_results.qpath_res(qpath_r, rhs.hir_id),
+ ) {
+ if id_l == id_r {
+ return true;
+ }
+ }
+ return false;
+ }
+ (hir::ExprKind::Field(lhs_l, ident_l), hir::ExprKind::Field(lhs_r, ident_r)) => {
+ if ident_l == ident_r {
+ return check_for_self_assign_helper(typeck_results, lhs_l, lhs_r);
+ }
+ return false;
+ }
+ _ => {
+ return false;
+ }
+ }
+ }
+
+ if let hir::ExprKind::Assign(lhs, rhs, _) = assign.kind
+ && check_for_self_assign_helper(self.typeck_results(), lhs, rhs)
+ && !assign.span.from_expansion()
+ {
+ let is_field_assign = matches!(lhs.kind, hir::ExprKind::Field(..));
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::DEAD_CODE,
+ assign.hir_id,
+ assign.span,
+ |lint| {
+ lint.build(&format!(
+ "useless assignment of {} of type `{}` to itself",
+ if is_field_assign { "field" } else { "variable" },
+ self.typeck_results().expr_ty(lhs),
+ ))
+ .emit();
+ },
+ )
+ }
+ }
+
+ fn handle_field_pattern_match(
+ &mut self,
+ lhs: &hir::Pat<'_>,
+ res: Res,
+ pats: &[hir::PatField<'_>],
+ ) {
+ let variant = match self.typeck_results().node_type(lhs.hir_id).kind() {
+ ty::Adt(adt, _) => adt.variant_of_res(res),
+ _ => span_bug!(lhs.span, "non-ADT in struct pattern"),
+ };
+ for pat in pats {
+ if let PatKind::Wild = pat.pat.kind {
+ continue;
+ }
+ let index = self.tcx.field_index(pat.hir_id, self.typeck_results());
+ self.insert_def_id(variant.fields[index].did);
+ }
+ }
+
+ fn handle_tuple_field_pattern_match(
+ &mut self,
+ lhs: &hir::Pat<'_>,
+ res: Res,
+ pats: &[hir::Pat<'_>],
+ dotdot: Option<usize>,
+ ) {
+ let variant = match self.typeck_results().node_type(lhs.hir_id).kind() {
+ ty::Adt(adt, _) => adt.variant_of_res(res),
+ _ => span_bug!(lhs.span, "non-ADT in tuple struct pattern"),
+ };
+ let first_n = pats.iter().enumerate().take(dotdot.unwrap_or(pats.len()));
+ let missing = variant.fields.len() - pats.len();
+ let last_n = pats
+ .iter()
+ .enumerate()
+ .skip(dotdot.unwrap_or(pats.len()))
+ .map(|(idx, pat)| (idx + missing, pat));
+ for (idx, pat) in first_n.chain(last_n) {
+ if let PatKind::Wild = pat.kind {
+ continue;
+ }
+ self.insert_def_id(variant.fields[idx].did);
+ }
+ }
+
+ fn mark_live_symbols(&mut self) {
+ let mut scanned = FxHashSet::default();
+ while let Some(id) = self.worklist.pop() {
+ if !scanned.insert(id) {
+ continue;
+ }
+
+ // in the case of tuple struct constructors we want to check the item, not the generated
+ // tuple struct constructor function
+ let id = self.struct_constructors.get(&id).copied().unwrap_or(id);
+
+ if let Some(node) = self.tcx.hir().find_by_def_id(id) {
+ self.live_symbols.insert(id);
+ self.visit_node(node);
+ }
+ }
+ }
+
+ /// Automatically generated items marked with `rustc_trivial_field_reads`
+ /// will be ignored for the purposes of dead code analysis (see PR #85200
+ /// for discussion).
+ fn should_ignore_item(&mut self, def_id: DefId) -> bool {
+ if let Some(impl_of) = self.tcx.impl_of_method(def_id) {
+ if !self.tcx.has_attr(impl_of, sym::automatically_derived) {
+ return false;
+ }
+
+ if let Some(trait_of) = self.tcx.trait_id_of_impl(impl_of)
+ && self.tcx.has_attr(trait_of, sym::rustc_trivial_field_reads)
+ {
+ let trait_ref = self.tcx.impl_trait_ref(impl_of).unwrap();
+ if let ty::Adt(adt_def, _) = trait_ref.self_ty().kind()
+ && let Some(adt_def_id) = adt_def.did().as_local()
+ {
+ self.ignored_derived_traits
+ .entry(adt_def_id)
+ .or_default()
+ .push((trait_of, impl_of));
+ }
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ fn visit_node(&mut self, node: Node<'tcx>) {
+ if let Node::ImplItem(hir::ImplItem { def_id, .. }) = node
+ && self.should_ignore_item(def_id.to_def_id())
+ {
+ return;
+ }
+
+ let had_repr_c = self.repr_has_repr_c;
+ let had_repr_simd = self.repr_has_repr_simd;
+ self.repr_has_repr_c = false;
+ self.repr_has_repr_simd = false;
+ match node {
+ Node::Item(item) => match item.kind {
+ hir::ItemKind::Struct(..) | hir::ItemKind::Union(..) => {
+ let def = self.tcx.adt_def(item.def_id);
+ self.repr_has_repr_c = def.repr().c();
+ self.repr_has_repr_simd = def.repr().simd();
+
+ intravisit::walk_item(self, &item)
+ }
+ hir::ItemKind::ForeignMod { .. } => {}
+ _ => intravisit::walk_item(self, &item),
+ },
+ Node::TraitItem(trait_item) => {
+ intravisit::walk_trait_item(self, trait_item);
+ }
+ Node::ImplItem(impl_item) => {
+ let item = self.tcx.local_parent(impl_item.def_id);
+ if self.tcx.impl_trait_ref(item).is_none() {
+ //// If it's a type whose items are live, then it's live, too.
+ //// This is done to handle the case where, for example, the static
+ //// method of a private type is used, but the type itself is never
+ //// called directly.
+ let self_ty = self.tcx.type_of(item);
+ match *self_ty.kind() {
+ ty::Adt(def, _) => self.check_def_id(def.did()),
+ ty::Foreign(did) => self.check_def_id(did),
+ ty::Dynamic(data, ..) => {
+ if let Some(def_id) = data.principal_def_id() {
+ self.check_def_id(def_id)
+ }
+ }
+ _ => {}
+ }
+ }
+ intravisit::walk_impl_item(self, impl_item);
+ }
+ Node::ForeignItem(foreign_item) => {
+ intravisit::walk_foreign_item(self, &foreign_item);
+ }
+ _ => {}
+ }
+ self.repr_has_repr_simd = had_repr_simd;
+ self.repr_has_repr_c = had_repr_c;
+ }
+
+ fn mark_as_used_if_union(&mut self, adt: ty::AdtDef<'tcx>, fields: &[hir::ExprField<'_>]) {
+ if adt.is_union() && adt.non_enum_variant().fields.len() > 1 && adt.did().is_local() {
+ for field in fields {
+ let index = self.tcx.field_index(field.hir_id, self.typeck_results());
+ self.insert_def_id(adt.non_enum_variant().fields[index].did);
+ }
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for MarkSymbolVisitor<'tcx> {
+ fn visit_nested_body(&mut self, body: hir::BodyId) {
+ let old_maybe_typeck_results =
+ self.maybe_typeck_results.replace(self.tcx.typeck_body(body));
+ let body = self.tcx.hir().body(body);
+ self.visit_body(body);
+ self.maybe_typeck_results = old_maybe_typeck_results;
+ }
+
+ fn visit_variant_data(
+ &mut self,
+ def: &'tcx hir::VariantData<'tcx>,
+ _: Symbol,
+ _: &hir::Generics<'_>,
+ _: hir::HirId,
+ _: rustc_span::Span,
+ ) {
+ let tcx = self.tcx;
+ let has_repr_c = self.repr_has_repr_c;
+ let has_repr_simd = self.repr_has_repr_simd;
+ let live_fields = def.fields().iter().filter_map(|f| {
+ let def_id = tcx.hir().local_def_id(f.hir_id);
+ if has_repr_c || (f.is_positional() && has_repr_simd) {
+ return Some(def_id);
+ }
+ if !tcx.visibility(f.hir_id.owner).is_public() {
+ return None;
+ }
+ if tcx.visibility(def_id).is_public() { Some(def_id) } else { None }
+ });
+ self.live_symbols.extend(live_fields);
+
+ intravisit::walk_struct_def(self, def);
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ match expr.kind {
+ hir::ExprKind::Path(ref qpath @ hir::QPath::TypeRelative(..)) => {
+ let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
+ self.handle_res(res);
+ }
+ hir::ExprKind::MethodCall(..) => {
+ self.lookup_and_handle_method(expr.hir_id);
+ }
+ hir::ExprKind::Field(ref lhs, ..) => {
+ self.handle_field_access(&lhs, expr.hir_id);
+ }
+ hir::ExprKind::Struct(ref qpath, ref fields, _) => {
+ let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
+ self.handle_res(res);
+ if let ty::Adt(adt, _) = self.typeck_results().expr_ty(expr).kind() {
+ self.mark_as_used_if_union(*adt, fields);
+ }
+ }
+ _ => (),
+ }
+
+ intravisit::walk_expr(self, expr);
+ }
+
+ fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
+ // Inside the body, ignore constructions of variants
+ // necessary for the pattern to match. Those construction sites
+ // can't be reached unless the variant is constructed elsewhere.
+ let len = self.ignore_variant_stack.len();
+ self.ignore_variant_stack.extend(arm.pat.necessary_variants());
+ intravisit::walk_arm(self, arm);
+ self.ignore_variant_stack.truncate(len);
+ }
+
+ fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
+ self.in_pat = true;
+ match pat.kind {
+ PatKind::Struct(ref path, ref fields, _) => {
+ let res = self.typeck_results().qpath_res(path, pat.hir_id);
+ self.handle_field_pattern_match(pat, res, fields);
+ }
+ PatKind::Path(ref qpath) => {
+ let res = self.typeck_results().qpath_res(qpath, pat.hir_id);
+ self.handle_res(res);
+ }
+ PatKind::TupleStruct(ref qpath, ref fields, dotdot) => {
+ let res = self.typeck_results().qpath_res(qpath, pat.hir_id);
+ self.handle_tuple_field_pattern_match(pat, res, fields, dotdot);
+ }
+ _ => (),
+ }
+
+ intravisit::walk_pat(self, pat);
+ self.in_pat = false;
+ }
+
+ fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
+ self.handle_res(path.res);
+ intravisit::walk_path(self, path);
+ }
+
+ fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
+ if let TyKind::OpaqueDef(item_id, _) = ty.kind {
+ let item = self.tcx.hir().item(item_id);
+ intravisit::walk_item(self, item);
+ }
+ intravisit::walk_ty(self, ty);
+ }
+
+ fn visit_anon_const(&mut self, c: &'tcx hir::AnonConst) {
+ // When inline const blocks are used in pattern position, paths
+ // referenced by it should be considered as used.
+ let in_pat = mem::replace(&mut self.in_pat, false);
+
+ self.live_symbols.insert(self.tcx.hir().local_def_id(c.hir_id));
+ intravisit::walk_anon_const(self, c);
+
+ self.in_pat = in_pat;
+ }
+}
+
+fn has_allow_dead_code_or_lang_attr_helper(
+ tcx: TyCtxt<'_>,
+ id: hir::HirId,
+ lint: &'static lint::Lint,
+) -> bool {
+ let attrs = tcx.hir().attrs(id);
+ if tcx.sess.contains_name(attrs, sym::lang) {
+ return true;
+ }
+
+ // Stable attribute for #[lang = "panic_impl"]
+ if tcx.sess.contains_name(attrs, sym::panic_handler) {
+ return true;
+ }
+
+ // (To be) stable attribute for #[lang = "oom"]
+ if tcx.sess.contains_name(attrs, sym::alloc_error_handler) {
+ return true;
+ }
+
+ let def_id = tcx.hir().local_def_id(id);
+ if tcx.def_kind(def_id).has_codegen_attrs() {
+ let cg_attrs = tcx.codegen_fn_attrs(def_id);
+
+ // #[used], #[no_mangle], #[export_name], etc also keeps the item alive
+ // forcefully, e.g., for placing it in a specific section.
+ if cg_attrs.contains_extern_indicator()
+ || cg_attrs.flags.contains(CodegenFnAttrFlags::USED)
+ || cg_attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)
+ {
+ return true;
+ }
+ }
+
+ tcx.lint_level_at_node(lint, id).0 == lint::Allow
+}
+
+fn has_allow_dead_code_or_lang_attr(tcx: TyCtxt<'_>, id: hir::HirId) -> bool {
+ has_allow_dead_code_or_lang_attr_helper(tcx, id, lint::builtin::DEAD_CODE)
+}
+
+// These check_* functions seeds items that
+// 1) We want to explicitly consider as live:
+// * Item annotated with #[allow(dead_code)]
+// - This is done so that if we want to suppress warnings for a
+// group of dead functions, we only have to annotate the "root".
+// For example, if both `f` and `g` are dead and `f` calls `g`,
+// then annotating `f` with `#[allow(dead_code)]` will suppress
+// warning for both `f` and `g`.
+// * Item annotated with #[lang=".."]
+// - This is because lang items are always callable from elsewhere.
+// or
+// 2) We are not sure to be live or not
+// * Implementations of traits and trait methods
+fn check_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ worklist: &mut Vec<LocalDefId>,
+ struct_constructors: &mut FxHashMap<LocalDefId, LocalDefId>,
+ id: hir::ItemId,
+) {
+ let allow_dead_code = has_allow_dead_code_or_lang_attr(tcx, id.hir_id());
+ if allow_dead_code {
+ worklist.push(id.def_id);
+ }
+
+ match tcx.def_kind(id.def_id) {
+ DefKind::Enum => {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::Enum(ref enum_def, _) = item.kind {
+ let hir = tcx.hir();
+ if allow_dead_code {
+ worklist.extend(
+ enum_def.variants.iter().map(|variant| hir.local_def_id(variant.id)),
+ );
+ }
+
+ for variant in enum_def.variants {
+ if let Some(ctor_hir_id) = variant.data.ctor_hir_id() {
+ struct_constructors
+ .insert(hir.local_def_id(ctor_hir_id), hir.local_def_id(variant.id));
+ }
+ }
+ }
+ }
+ DefKind::Impl => {
+ let of_trait = tcx.impl_trait_ref(id.def_id);
+
+ if of_trait.is_some() {
+ worklist.push(id.def_id);
+ }
+
+ // get DefIds from another query
+ let local_def_ids = tcx
+ .associated_item_def_ids(id.def_id)
+ .iter()
+ .filter_map(|def_id| def_id.as_local());
+
+ // And we access the Map here to get HirId from LocalDefId
+ for id in local_def_ids {
+ if of_trait.is_some()
+ || has_allow_dead_code_or_lang_attr(tcx, tcx.hir().local_def_id_to_hir_id(id))
+ {
+ worklist.push(id);
+ }
+ }
+ }
+ DefKind::Struct => {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::Struct(ref variant_data, _) = item.kind
+ && let Some(ctor_hir_id) = variant_data.ctor_hir_id()
+ {
+ struct_constructors.insert(tcx.hir().local_def_id(ctor_hir_id), item.def_id);
+ }
+ }
+ DefKind::GlobalAsm => {
+ // global_asm! is always live.
+ worklist.push(id.def_id);
+ }
+ _ => {}
+ }
+}
+
+fn check_trait_item<'tcx>(tcx: TyCtxt<'tcx>, worklist: &mut Vec<LocalDefId>, id: hir::TraitItemId) {
+ use hir::TraitItemKind::{Const, Fn};
+ if matches!(tcx.def_kind(id.def_id), DefKind::AssocConst | DefKind::AssocFn) {
+ let trait_item = tcx.hir().trait_item(id);
+ if matches!(trait_item.kind, Const(_, Some(_)) | Fn(_, hir::TraitFn::Provided(_)))
+ && has_allow_dead_code_or_lang_attr(tcx, trait_item.hir_id())
+ {
+ worklist.push(trait_item.def_id);
+ }
+ }
+}
+
+fn check_foreign_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ worklist: &mut Vec<LocalDefId>,
+ id: hir::ForeignItemId,
+) {
+ if matches!(tcx.def_kind(id.def_id), DefKind::Static(_) | DefKind::Fn)
+ && has_allow_dead_code_or_lang_attr(tcx, id.hir_id())
+ {
+ worklist.push(id.def_id);
+ }
+}
+
+fn create_and_seed_worklist<'tcx>(
+ tcx: TyCtxt<'tcx>,
+) -> (Vec<LocalDefId>, FxHashMap<LocalDefId, LocalDefId>) {
+ let access_levels = &tcx.privacy_access_levels(());
+ // see `MarkSymbolVisitor::struct_constructors`
+ let mut struct_constructors = Default::default();
+ let mut worklist = access_levels
+ .map
+ .iter()
+ .filter_map(
+ |(&id, &level)| {
+ if level >= privacy::AccessLevel::Reachable { Some(id) } else { None }
+ },
+ )
+ // Seed entry point
+ .chain(tcx.entry_fn(()).and_then(|(def_id, _)| def_id.as_local()))
+ .collect::<Vec<_>>();
+
+ let crate_items = tcx.hir_crate_items(());
+ for id in crate_items.items() {
+ check_item(tcx, &mut worklist, &mut struct_constructors, id);
+ }
+
+ for id in crate_items.trait_items() {
+ check_trait_item(tcx, &mut worklist, id);
+ }
+
+ for id in crate_items.foreign_items() {
+ check_foreign_item(tcx, &mut worklist, id);
+ }
+
+ (worklist, struct_constructors)
+}
+
+fn live_symbols_and_ignored_derived_traits<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (): (),
+) -> (FxHashSet<LocalDefId>, FxHashMap<LocalDefId, Vec<(DefId, DefId)>>) {
+ let (worklist, struct_constructors) = create_and_seed_worklist(tcx);
+ let mut symbol_visitor = MarkSymbolVisitor {
+ worklist,
+ tcx,
+ maybe_typeck_results: None,
+ live_symbols: Default::default(),
+ repr_has_repr_c: false,
+ repr_has_repr_simd: false,
+ in_pat: false,
+ ignore_variant_stack: vec![],
+ struct_constructors,
+ ignored_derived_traits: FxHashMap::default(),
+ };
+ symbol_visitor.mark_live_symbols();
+ (symbol_visitor.live_symbols, symbol_visitor.ignored_derived_traits)
+}
+
+struct DeadVariant {
+ def_id: LocalDefId,
+ name: Symbol,
+ level: lint::Level,
+}
+
+struct DeadVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ live_symbols: &'tcx FxHashSet<LocalDefId>,
+ ignored_derived_traits: &'tcx FxHashMap<LocalDefId, Vec<(DefId, DefId)>>,
+}
+
+enum ShouldWarnAboutField {
+ Yes(bool), // positional?
+ No,
+}
+
+impl<'tcx> DeadVisitor<'tcx> {
+ fn should_warn_about_field(&mut self, field: &ty::FieldDef) -> ShouldWarnAboutField {
+ if self.live_symbols.contains(&field.did.expect_local()) {
+ return ShouldWarnAboutField::No;
+ }
+ let field_type = self.tcx.type_of(field.did);
+ if field_type.is_phantom_data() {
+ return ShouldWarnAboutField::No;
+ }
+ let is_positional = field.name.as_str().starts_with(|c: char| c.is_ascii_digit());
+ if is_positional
+ && self
+ .tcx
+ .layout_of(self.tcx.param_env(field.did).and(field_type))
+ .map_or(true, |layout| layout.is_zst())
+ {
+ return ShouldWarnAboutField::No;
+ }
+ ShouldWarnAboutField::Yes(is_positional)
+ }
+
+ fn warn_multiple_dead_codes(
+ &self,
+ dead_codes: &[LocalDefId],
+ participle: &str,
+ parent_item: Option<LocalDefId>,
+ is_positional: bool,
+ ) {
+ if let Some(&first_id) = dead_codes.first() {
+ let tcx = self.tcx;
+ let names: Vec<_> = dead_codes
+ .iter()
+ .map(|&def_id| tcx.item_name(def_id.to_def_id()).to_string())
+ .collect();
+ let spans: Vec<_> = dead_codes
+ .iter()
+ .map(|&def_id| match tcx.def_ident_span(def_id) {
+ Some(s) => s.with_ctxt(tcx.def_span(def_id).ctxt()),
+ None => tcx.def_span(def_id),
+ })
+ .collect();
+
+ tcx.struct_span_lint_hir(
+ if is_positional {
+ lint::builtin::UNUSED_TUPLE_STRUCT_FIELDS
+ } else {
+ lint::builtin::DEAD_CODE
+ },
+ tcx.hir().local_def_id_to_hir_id(first_id),
+ MultiSpan::from_spans(spans.clone()),
+ |lint| {
+ let descr = tcx.def_kind(first_id).descr(first_id.to_def_id());
+ let span_len = dead_codes.len();
+ let names = match &names[..] {
+ _ if span_len > 6 => String::new(),
+ [name] => format!("`{name}` "),
+ [names @ .., last] => {
+ format!(
+ "{} and `{last}` ",
+ names.iter().map(|name| format!("`{name}`")).join(", ")
+ )
+ }
+ [] => unreachable!(),
+ };
+ let mut err = lint.build(&format!(
+ "{these}{descr}{s} {names}{are} never {participle}",
+ these = if span_len > 6 { "multiple " } else { "" },
+ s = pluralize!(span_len),
+ are = pluralize!("is", span_len),
+ ));
+
+ if is_positional {
+ err.multipart_suggestion(
+ &format!(
+ "consider changing the field{s} to be of unit type to \
+ suppress this warning while preserving the field \
+ numbering, or remove the field{s}",
+ s = pluralize!(span_len)
+ ),
+ spans.iter().map(|sp| (*sp, "()".to_string())).collect(),
+ // "HasPlaceholders" because applying this fix by itself isn't
+ // enough: All constructor calls have to be adjusted as well
+ Applicability::HasPlaceholders,
+ );
+ }
+
+ if let Some(parent_item) = parent_item {
+ let parent_descr = tcx.def_kind(parent_item).descr(parent_item.to_def_id());
+ err.span_label(
+ tcx.def_ident_span(parent_item).unwrap(),
+ format!("{descr}{s} in this {parent_descr}", s = pluralize!(span_len)),
+ );
+ }
+
+ let encl_def_id = parent_item.unwrap_or(first_id);
+ if let Some(ign_traits) = self.ignored_derived_traits.get(&encl_def_id) {
+ let traits_str = ign_traits
+ .iter()
+ .map(|(trait_id, _)| format!("`{}`", self.tcx.item_name(*trait_id)))
+ .collect::<Vec<_>>()
+ .join(" and ");
+ let plural_s = pluralize!(ign_traits.len());
+ let article = if ign_traits.len() > 1 { "" } else { "a " };
+ let is_are = if ign_traits.len() > 1 { "these are" } else { "this is" };
+ let msg = format!(
+ "`{}` has {}derived impl{} for the trait{} {}, but {} \
+ intentionally ignored during dead code analysis",
+ self.tcx.item_name(encl_def_id.to_def_id()),
+ article,
+ plural_s,
+ plural_s,
+ traits_str,
+ is_are
+ );
+ err.note(&msg);
+ }
+ err.emit();
+ },
+ );
+ }
+ }
+
+ fn warn_dead_fields_and_variants(
+ &self,
+ def_id: LocalDefId,
+ participle: &str,
+ dead_codes: Vec<DeadVariant>,
+ is_positional: bool,
+ ) {
+ let mut dead_codes = dead_codes
+ .iter()
+ .filter(|v| !v.name.as_str().starts_with('_'))
+ .map(|v| v)
+ .collect::<Vec<&DeadVariant>>();
+ if dead_codes.is_empty() {
+ return;
+ }
+ dead_codes.sort_by_key(|v| v.level);
+ for (_, group) in &dead_codes.into_iter().group_by(|v| v.level) {
+ self.warn_multiple_dead_codes(
+ &group.map(|v| v.def_id).collect::<Vec<_>>(),
+ participle,
+ Some(def_id),
+ is_positional,
+ );
+ }
+ }
+
+ fn warn_dead_code(&mut self, id: LocalDefId, participle: &str) {
+ self.warn_multiple_dead_codes(&[id], participle, None, false);
+ }
+
+ fn check_definition(&mut self, def_id: LocalDefId) {
+ if self.live_symbols.contains(&def_id) {
+ return;
+ }
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ if has_allow_dead_code_or_lang_attr(self.tcx, hir_id) {
+ return;
+ }
+ let Some(name) = self.tcx.opt_item_name(def_id.to_def_id()) else {
+ return
+ };
+ if name.as_str().starts_with('_') {
+ return;
+ }
+ match self.tcx.def_kind(def_id) {
+ DefKind::AssocConst
+ | DefKind::AssocFn
+ | DefKind::Fn
+ | DefKind::Static(_)
+ | DefKind::Const
+ | DefKind::TyAlias
+ | DefKind::Enum
+ | DefKind::Union
+ | DefKind::ForeignTy => self.warn_dead_code(def_id, "used"),
+ DefKind::Struct => self.warn_dead_code(def_id, "constructed"),
+ DefKind::Variant | DefKind::Field => bug!("should be handled specially"),
+ _ => {}
+ }
+ }
+}
+
+fn check_mod_deathness(tcx: TyCtxt<'_>, module: LocalDefId) {
+ let (live_symbols, ignored_derived_traits) = tcx.live_symbols_and_ignored_derived_traits(());
+ let mut visitor = DeadVisitor { tcx, live_symbols, ignored_derived_traits };
+
+ let module_items = tcx.hir_module_items(module);
+
+ for item in module_items.items() {
+ if !live_symbols.contains(&item.def_id) {
+ let parent = tcx.local_parent(item.def_id);
+ if parent != module && !live_symbols.contains(&parent) {
+ // We already have diagnosed something.
+ continue;
+ }
+ visitor.check_definition(item.def_id);
+ continue;
+ }
+
+ let def_kind = tcx.def_kind(item.def_id);
+ if let DefKind::Struct | DefKind::Union | DefKind::Enum = def_kind {
+ let adt = tcx.adt_def(item.def_id);
+ let mut dead_variants = Vec::new();
+
+ for variant in adt.variants() {
+ let def_id = variant.def_id.expect_local();
+ if !live_symbols.contains(&def_id) {
+ // Record to group diagnostics.
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let level = tcx.lint_level_at_node(lint::builtin::DEAD_CODE, hir_id).0;
+ dead_variants.push(DeadVariant { def_id, name: variant.name, level });
+ continue;
+ }
+
+ let mut is_positional = false;
+ let dead_fields = variant
+ .fields
+ .iter()
+ .filter_map(|field| {
+ let def_id = field.did.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ if let ShouldWarnAboutField::Yes(is_pos) =
+ visitor.should_warn_about_field(&field)
+ {
+ let level = tcx
+ .lint_level_at_node(
+ if is_pos {
+ is_positional = true;
+ lint::builtin::UNUSED_TUPLE_STRUCT_FIELDS
+ } else {
+ lint::builtin::DEAD_CODE
+ },
+ hir_id,
+ )
+ .0;
+ Some(DeadVariant { def_id, name: field.name, level })
+ } else {
+ None
+ }
+ })
+ .collect();
+ visitor.warn_dead_fields_and_variants(def_id, "read", dead_fields, is_positional)
+ }
+
+ visitor.warn_dead_fields_and_variants(item.def_id, "constructed", dead_variants, false);
+ }
+ }
+
+ for impl_item in module_items.impl_items() {
+ visitor.check_definition(impl_item.def_id);
+ }
+
+ for foreign_item in module_items.foreign_items() {
+ visitor.check_definition(foreign_item.def_id);
+ }
+
+ // We do not warn trait items.
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers =
+ Providers { live_symbols_and_ignored_derived_traits, check_mod_deathness, ..*providers };
+}
diff --git a/compiler/rustc_passes/src/debugger_visualizer.rs b/compiler/rustc_passes/src/debugger_visualizer.rs
new file mode 100644
index 000000000..e08683fe2
--- /dev/null
+++ b/compiler/rustc_passes/src/debugger_visualizer.rs
@@ -0,0 +1,99 @@
+//! Detecting usage of the `#[debugger_visualizer]` attribute.
+
+use hir::CRATE_HIR_ID;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_expand::base::resolve_path;
+use rustc_hir as hir;
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::HirId;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::def_id::LOCAL_CRATE;
+use rustc_span::{sym, DebuggerVisualizerFile, DebuggerVisualizerType};
+
+use std::sync::Arc;
+
+fn check_for_debugger_visualizer<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ hir_id: HirId,
+ debugger_visualizers: &mut FxHashSet<DebuggerVisualizerFile>,
+) {
+ let attrs = tcx.hir().attrs(hir_id);
+ for attr in attrs {
+ if attr.has_name(sym::debugger_visualizer) {
+ let Some(list) = attr.meta_item_list() else {
+ continue
+ };
+
+ let meta_item = match list.len() {
+ 1 => match list[0].meta_item() {
+ Some(meta_item) => meta_item,
+ _ => continue,
+ },
+ _ => continue,
+ };
+
+ let visualizer_type = match meta_item.name_or_empty() {
+ sym::natvis_file => DebuggerVisualizerType::Natvis,
+ sym::gdb_script_file => DebuggerVisualizerType::GdbPrettyPrinter,
+ _ => continue,
+ };
+
+ let file = match meta_item.value_str() {
+ Some(value) => {
+ match resolve_path(&tcx.sess.parse_sess, value.as_str(), attr.span) {
+ Ok(file) => file,
+ _ => continue,
+ }
+ }
+ None => continue,
+ };
+
+ match std::fs::read(&file) {
+ Ok(contents) => {
+ debugger_visualizers
+ .insert(DebuggerVisualizerFile::new(Arc::from(contents), visualizer_type));
+ }
+ Err(err) => {
+ tcx.sess
+ .struct_span_err(
+ meta_item.span,
+ &format!("couldn't read {}: {}", file.display(), err),
+ )
+ .emit();
+ }
+ }
+ }
+ }
+}
+
+/// Traverses and collects the debugger visualizers for a specific crate.
+fn debugger_visualizers<'tcx>(tcx: TyCtxt<'tcx>, cnum: CrateNum) -> Vec<DebuggerVisualizerFile> {
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ // Initialize the collector.
+ let mut debugger_visualizers = FxHashSet::default();
+
+ // Collect debugger visualizers in this crate.
+ tcx.hir().for_each_module(|id| {
+ check_for_debugger_visualizer(
+ tcx,
+ tcx.hir().local_def_id_to_hir_id(id),
+ &mut debugger_visualizers,
+ )
+ });
+
+ // Collect debugger visualizers on the crate attributes.
+ check_for_debugger_visualizer(tcx, CRATE_HIR_ID, &mut debugger_visualizers);
+
+ // Extract out the found debugger_visualizer items.
+ let mut visualizers = debugger_visualizers.into_iter().collect::<Vec<_>>();
+
+ // Sort the visualizers so we always get a deterministic query result.
+ visualizers.sort();
+ visualizers
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.debugger_visualizers = debugger_visualizers;
+}
diff --git a/compiler/rustc_passes/src/diagnostic_items.rs b/compiler/rustc_passes/src/diagnostic_items.rs
new file mode 100644
index 000000000..e6b69d898
--- /dev/null
+++ b/compiler/rustc_passes/src/diagnostic_items.rs
@@ -0,0 +1,113 @@
+//! Detecting diagnostic items.
+//!
+//! Diagnostic items are items that are not language-inherent, but can reasonably be expected to
+//! exist for diagnostic purposes. This allows diagnostic authors to refer to specific items
+//! directly, without having to guess module paths and crates.
+//! Examples are:
+//!
+//! * Traits like `Debug`, that have no bearing on language semantics
+//!
+//! * Compiler internal types like `Ty` and `TyCtxt`
+
+use rustc_ast as ast;
+use rustc_hir::diagnostic_items::DiagnosticItems;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
+use rustc_span::symbol::{sym, Symbol};
+
+fn observe_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ diagnostic_items: &mut DiagnosticItems,
+ def_id: LocalDefId,
+) {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let attrs = tcx.hir().attrs(hir_id);
+ if let Some(name) = extract(attrs) {
+ // insert into our table
+ collect_item(tcx, diagnostic_items, name, def_id.to_def_id());
+ }
+}
+
+fn collect_item(tcx: TyCtxt<'_>, items: &mut DiagnosticItems, name: Symbol, item_def_id: DefId) {
+ items.id_to_name.insert(item_def_id, name);
+ if let Some(original_def_id) = items.name_to_id.insert(name, item_def_id) {
+ if original_def_id != item_def_id {
+ let mut err = match tcx.hir().span_if_local(item_def_id) {
+ Some(span) => tcx
+ .sess
+ .struct_span_err(span, &format!("duplicate diagnostic item found: `{name}`.")),
+ None => tcx.sess.struct_err(&format!(
+ "duplicate diagnostic item in crate `{}`: `{}`.",
+ tcx.crate_name(item_def_id.krate),
+ name
+ )),
+ };
+ if let Some(span) = tcx.hir().span_if_local(original_def_id) {
+ err.span_note(span, "the diagnostic item is first defined here");
+ } else {
+ err.note(&format!(
+ "the diagnostic item is first defined in crate `{}`.",
+ tcx.crate_name(original_def_id.krate)
+ ));
+ }
+ err.emit();
+ }
+ }
+}
+
+/// Extract the first `rustc_diagnostic_item = "$name"` out of a list of attributes.
+fn extract(attrs: &[ast::Attribute]) -> Option<Symbol> {
+ attrs.iter().find_map(|attr| {
+ if attr.has_name(sym::rustc_diagnostic_item) { attr.value_str() } else { None }
+ })
+}
+
+/// Traverse and collect the diagnostic items in the current
+fn diagnostic_items<'tcx>(tcx: TyCtxt<'tcx>, cnum: CrateNum) -> DiagnosticItems {
+ assert_eq!(cnum, LOCAL_CRATE);
+
+ // Initialize the collector.
+ let mut diagnostic_items = DiagnosticItems::default();
+
+ // Collect diagnostic items in this crate.
+ let crate_items = tcx.hir_crate_items(());
+
+ for id in crate_items.items() {
+ observe_item(tcx, &mut diagnostic_items, id.def_id);
+ }
+
+ for id in crate_items.trait_items() {
+ observe_item(tcx, &mut diagnostic_items, id.def_id);
+ }
+
+ for id in crate_items.impl_items() {
+ observe_item(tcx, &mut diagnostic_items, id.def_id);
+ }
+
+ for id in crate_items.foreign_items() {
+ observe_item(tcx, &mut diagnostic_items, id.def_id);
+ }
+
+ diagnostic_items
+}
+
+/// Traverse and collect all the diagnostic items in all crates.
+fn all_diagnostic_items<'tcx>(tcx: TyCtxt<'tcx>, (): ()) -> DiagnosticItems {
+ // Initialize the collector.
+ let mut items = DiagnosticItems::default();
+
+ // Collect diagnostic items in other crates.
+ for &cnum in tcx.crates(()).iter().chain(std::iter::once(&LOCAL_CRATE)) {
+ for (&name, &def_id) in &tcx.diagnostic_items(cnum).name_to_id {
+ collect_item(tcx, &mut items, name, def_id);
+ }
+ }
+
+ items
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.diagnostic_items = diagnostic_items;
+ providers.all_diagnostic_items = all_diagnostic_items;
+}
diff --git a/compiler/rustc_passes/src/entry.rs b/compiler/rustc_passes/src/entry.rs
new file mode 100644
index 000000000..7381019a6
--- /dev/null
+++ b/compiler/rustc_passes/src/entry.rs
@@ -0,0 +1,231 @@
+use rustc_ast::{entry::EntryPointType, Attribute};
+use rustc_errors::struct_span_err;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::{ItemId, Node, CRATE_HIR_ID};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{DefIdTree, TyCtxt};
+use rustc_session::config::{CrateType, EntryFnType};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{Span, Symbol, DUMMY_SP};
+
+struct EntryContext<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ /// The function that has attribute named `main`.
+ attr_main_fn: Option<(LocalDefId, Span)>,
+
+ /// The function that has the attribute 'start' on it.
+ start_fn: Option<(LocalDefId, Span)>,
+
+ /// The functions that one might think are `main` but aren't, e.g.
+ /// main functions not defined at the top level. For diagnostics.
+ non_main_fns: Vec<Span>,
+}
+
+fn entry_fn(tcx: TyCtxt<'_>, (): ()) -> Option<(DefId, EntryFnType)> {
+ let any_exe = tcx.sess.crate_types().iter().any(|ty| *ty == CrateType::Executable);
+ if !any_exe {
+ // No need to find a main function.
+ return None;
+ }
+
+ // If the user wants no main function at all, then stop here.
+ if tcx.sess.contains_name(&tcx.hir().attrs(CRATE_HIR_ID), sym::no_main) {
+ return None;
+ }
+
+ let mut ctxt =
+ EntryContext { tcx, attr_main_fn: None, start_fn: None, non_main_fns: Vec::new() };
+
+ for id in tcx.hir().items() {
+ find_item(id, &mut ctxt);
+ }
+
+ configure_main(tcx, &ctxt)
+}
+
+// Beware, this is duplicated in `librustc_builtin_macros/test_harness.rs`
+// (with `ast::Item`), so make sure to keep them in sync.
+// A small optimization was added so that hir::Item is fetched only when needed.
+// An equivalent optimization was not applied to the duplicated code in test_harness.rs.
+fn entry_point_type(ctxt: &EntryContext<'_>, id: ItemId, at_root: bool) -> EntryPointType {
+ let attrs = ctxt.tcx.hir().attrs(id.hir_id());
+ if ctxt.tcx.sess.contains_name(attrs, sym::start) {
+ EntryPointType::Start
+ } else if ctxt.tcx.sess.contains_name(attrs, sym::rustc_main) {
+ EntryPointType::RustcMainAttr
+ } else {
+ if let Some(name) = ctxt.tcx.opt_item_name(id.def_id.to_def_id())
+ && name == sym::main {
+ if at_root {
+ // This is a top-level function so can be `main`.
+ EntryPointType::MainNamed
+ } else {
+ EntryPointType::OtherMain
+ }
+ } else {
+ EntryPointType::None
+ }
+ }
+}
+
+fn err_if_attr_found(ctxt: &EntryContext<'_>, attrs: &[Attribute], sym: Symbol) {
+ if let Some(attr) = ctxt.tcx.sess.find_by_name(attrs, sym) {
+ ctxt.tcx
+ .sess
+ .struct_span_err(
+ attr.span,
+ &format!("`{}` attribute can only be used on functions", sym),
+ )
+ .emit();
+ }
+}
+
+fn find_item(id: ItemId, ctxt: &mut EntryContext<'_>) {
+ let at_root = ctxt.tcx.opt_local_parent(id.def_id) == Some(CRATE_DEF_ID);
+
+ match entry_point_type(ctxt, id, at_root) {
+ EntryPointType::None => (),
+ _ if !matches!(ctxt.tcx.def_kind(id.def_id), DefKind::Fn) => {
+ let attrs = ctxt.tcx.hir().attrs(id.hir_id());
+ err_if_attr_found(ctxt, attrs, sym::start);
+ err_if_attr_found(ctxt, attrs, sym::rustc_main);
+ }
+ EntryPointType::MainNamed => (),
+ EntryPointType::OtherMain => {
+ ctxt.non_main_fns.push(ctxt.tcx.def_span(id.def_id));
+ }
+ EntryPointType::RustcMainAttr => {
+ if ctxt.attr_main_fn.is_none() {
+ ctxt.attr_main_fn = Some((id.def_id, ctxt.tcx.def_span(id.def_id.to_def_id())));
+ } else {
+ struct_span_err!(
+ ctxt.tcx.sess,
+ ctxt.tcx.def_span(id.def_id.to_def_id()),
+ E0137,
+ "multiple functions with a `#[rustc_main]` attribute"
+ )
+ .span_label(
+ ctxt.tcx.def_span(id.def_id.to_def_id()),
+ "additional `#[rustc_main]` function",
+ )
+ .span_label(ctxt.attr_main_fn.unwrap().1, "first `#[rustc_main]` function")
+ .emit();
+ }
+ }
+ EntryPointType::Start => {
+ if ctxt.start_fn.is_none() {
+ ctxt.start_fn = Some((id.def_id, ctxt.tcx.def_span(id.def_id.to_def_id())));
+ } else {
+ struct_span_err!(
+ ctxt.tcx.sess,
+ ctxt.tcx.def_span(id.def_id.to_def_id()),
+ E0138,
+ "multiple `start` functions"
+ )
+ .span_label(ctxt.start_fn.unwrap().1, "previous `#[start]` function here")
+ .span_label(ctxt.tcx.def_span(id.def_id.to_def_id()), "multiple `start` functions")
+ .emit();
+ }
+ }
+ }
+}
+
+fn configure_main(tcx: TyCtxt<'_>, visitor: &EntryContext<'_>) -> Option<(DefId, EntryFnType)> {
+ if let Some((def_id, _)) = visitor.start_fn {
+ Some((def_id.to_def_id(), EntryFnType::Start))
+ } else if let Some((def_id, _)) = visitor.attr_main_fn {
+ Some((def_id.to_def_id(), EntryFnType::Main))
+ } else {
+ if let Some(main_def) = tcx.resolutions(()).main_def && let Some(def_id) = main_def.opt_fn_def_id() {
+ // non-local main imports are handled below
+ if let Some(def_id) = def_id.as_local() && matches!(tcx.hir().find_by_def_id(def_id), Some(Node::ForeignItem(_))) {
+ tcx.sess
+ .struct_span_err(
+ tcx.def_span(def_id),
+ "the `main` function cannot be declared in an `extern` block",
+ )
+ .emit();
+ return None;
+ }
+
+ if main_def.is_import && !tcx.features().imported_main {
+ let span = main_def.span;
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::imported_main,
+ span,
+ "using an imported function as entry point `main` is experimental",
+ )
+ .emit();
+ }
+ return Some((def_id, EntryFnType::Main));
+ }
+ no_main_err(tcx, visitor);
+ None
+ }
+}
+
+fn no_main_err(tcx: TyCtxt<'_>, visitor: &EntryContext<'_>) {
+ let sp = tcx.def_span(CRATE_DEF_ID);
+ if *tcx.sess.parse_sess.reached_eof.borrow() {
+ // There's an unclosed brace that made the parser reach `Eof`, we shouldn't complain about
+ // the missing `fn main()` then as it might have been hidden inside an unclosed block.
+ tcx.sess.delay_span_bug(sp, "`main` not found, but expected unclosed brace error");
+ return;
+ }
+
+ // There is no main function.
+ let mut err = struct_span_err!(
+ tcx.sess,
+ DUMMY_SP,
+ E0601,
+ "`main` function not found in crate `{}`",
+ tcx.crate_name(LOCAL_CRATE)
+ );
+ let filename = &tcx.sess.local_crate_source_file;
+ let note = if !visitor.non_main_fns.is_empty() {
+ for &span in &visitor.non_main_fns {
+ err.span_note(span, "here is a function named `main`");
+ }
+ err.note("you have one or more functions named `main` not defined at the crate level");
+ err.help("consider moving the `main` function definitions");
+ // There were some functions named `main` though. Try to give the user a hint.
+ format!(
+ "the main function must be defined at the crate level{}",
+ filename.as_ref().map(|f| format!(" (in `{}`)", f.display())).unwrap_or_default()
+ )
+ } else if let Some(filename) = filename {
+ format!("consider adding a `main` function to `{}`", filename.display())
+ } else {
+ String::from("consider adding a `main` function at the crate level")
+ };
+ // The file may be empty, which leads to the diagnostic machinery not emitting this
+ // note. This is a relatively simple way to detect that case and emit a span-less
+ // note instead.
+ if tcx.sess.source_map().lookup_line(sp.hi()).is_ok() {
+ err.set_span(sp.shrink_to_hi());
+ err.span_label(sp.shrink_to_hi(), &note);
+ } else {
+ err.note(&note);
+ }
+
+ if let Some(main_def) = tcx.resolutions(()).main_def && main_def.opt_fn_def_id().is_none(){
+ // There is something at `crate::main`, but it is not a function definition.
+ err.span_label(main_def.span, "non-function item at `crate::main` is found");
+ }
+
+ if tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "If you don't know the basics of Rust, you can go look to the Rust Book \
+ to get started: https://doc.rust-lang.org/book/",
+ );
+ }
+ err.emit();
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { entry_fn, ..*providers };
+}
diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs
new file mode 100644
index 000000000..5feb0e295
--- /dev/null
+++ b/compiler/rustc_passes/src/errors.rs
@@ -0,0 +1,645 @@
+use rustc_errors::{Applicability, MultiSpan};
+use rustc_macros::{LintDiagnostic, SessionDiagnostic, SessionSubdiagnostic};
+use rustc_span::{Span, Symbol};
+
+#[derive(LintDiagnostic)]
+#[lint(passes::outer_crate_level_attr)]
+pub struct OuterCrateLevelAttr;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::inner_crate_level_attr)]
+pub struct InnerCrateLevelAttr;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::ignored_attr_with_macro)]
+pub struct IgnoredAttrWithMacro<'a> {
+ pub sym: &'a str,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::ignored_attr)]
+pub struct IgnoredAttr<'a> {
+ pub sym: &'a str,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::inline_ignored_function_prototype)]
+pub struct IgnoredInlineAttrFnProto;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::inline_ignored_constants)]
+#[warn_]
+#[note]
+pub struct IgnoredInlineAttrConstants;
+
+#[derive(SessionDiagnostic)]
+#[error(passes::inline_not_fn_or_closure, code = "E0518")]
+pub struct InlineNotFnOrClosure {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub defn_span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::no_coverage_ignored_function_prototype)]
+pub struct IgnoredNoCoverageFnProto;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::no_coverage_propagate)]
+pub struct IgnoredNoCoveragePropagate;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::no_coverage_fn_defn)]
+pub struct IgnoredNoCoverageFnDefn;
+
+#[derive(SessionDiagnostic)]
+#[error(passes::no_coverage_not_coverable, code = "E0788")]
+pub struct IgnoredNoCoverageNotCoverable {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub defn_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::should_be_applied_to_fn)]
+pub struct AttrShouldBeAppliedToFn {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub defn_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::naked_tracked_caller, code = "E0736")]
+pub struct NakedTrackedCaller {
+ #[primary_span]
+ pub attr_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::should_be_applied_to_fn, code = "E0739")]
+pub struct TrackedCallerWrongLocation {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub defn_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::should_be_applied_to_struct_enum, code = "E0701")]
+pub struct NonExhaustiveWrongLocation {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub defn_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::should_be_applied_to_trait)]
+pub struct AttrShouldBeAppliedToTrait {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub defn_span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::target_feature_on_statement)]
+pub struct TargetFeatureOnStatement;
+
+#[derive(SessionDiagnostic)]
+#[error(passes::should_be_applied_to_static)]
+pub struct AttrShouldBeAppliedToStatic {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub defn_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_expect_str)]
+pub struct DocExpectStr<'a> {
+ #[primary_span]
+ pub attr_span: Span,
+ pub attr_name: &'a str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_alias_empty)]
+pub struct DocAliasEmpty<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub attr_str: &'a str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_alias_bad_char)]
+pub struct DocAliasBadChar<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub attr_str: &'a str,
+ pub char_: char,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_alias_start_end)]
+pub struct DocAliasStartEnd<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub attr_str: &'a str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_alias_bad_location)]
+pub struct DocAliasBadLocation<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub attr_str: &'a str,
+ pub location: &'a str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_alias_not_an_alias)]
+pub struct DocAliasNotAnAlias<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub attr_str: &'a str,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_alias_duplicated)]
+pub struct DocAliasDuplicated {
+ #[label]
+ pub first_defn: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_alias_not_string_literal)]
+pub struct DocAliasNotStringLiteral {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_alias_malformed)]
+pub struct DocAliasMalformed {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_keyword_empty_mod)]
+pub struct DocKeywordEmptyMod {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_keyword_not_mod)]
+pub struct DocKeywordNotMod {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_keyword_invalid_ident)]
+pub struct DocKeywordInvalidIdent {
+ #[primary_span]
+ pub span: Span,
+ pub doc_keyword: Symbol,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_fake_variadic_not_valid)]
+pub struct DocFakeVariadicNotValid {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_keyword_only_impl)]
+pub struct DocKeywordOnlyImpl {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_inline_conflict)]
+#[help]
+pub struct DocKeywordConflict {
+ #[primary_span]
+ pub spans: MultiSpan,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_inline_only_use)]
+#[note]
+pub struct DocInlineOnlyUse {
+ #[label]
+ pub attr_span: Span,
+ #[label(passes::not_a_use_item_label)]
+ pub item_span: Option<Span>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::doc_attr_not_crate_level)]
+pub struct DocAttrNotCrateLevel<'a> {
+ #[primary_span]
+ pub span: Span,
+ pub attr_name: &'a str,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_test_unknown)]
+pub struct DocTestUnknown {
+ pub path: String,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_test_takes_list)]
+pub struct DocTestTakesList;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_primitive)]
+pub struct DocPrimitive;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_test_unknown_any)]
+pub struct DocTestUnknownAny {
+ pub path: String,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_test_unknown_spotlight)]
+#[note]
+#[note(passes::no_op_note)]
+pub struct DocTestUnknownSpotlight {
+ pub path: String,
+ #[suggestion_short(applicability = "machine-applicable", code = "notable_trait")]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_test_unknown_include)]
+pub struct DocTestUnknownInclude {
+ pub path: String,
+ pub value: String,
+ pub inner: &'static str,
+ #[suggestion(code = "#{inner}[doc = include_str!(\"{value}\")]")]
+ pub sugg: (Span, Applicability),
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::doc_invalid)]
+pub struct DocInvalid;
+
+#[derive(SessionDiagnostic)]
+#[error(passes::pass_by_value)]
+pub struct PassByValue {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::allow_incoherent_impl)]
+pub struct AllowIncoherentImpl {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::has_incoherent_inherent_impl)]
+pub struct HasIncoherentInherentImpl {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::must_use_async)]
+pub struct MustUseAsync {
+ #[label]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::must_use_no_effect)]
+pub struct MustUseNoEffect {
+ pub article: &'static str,
+ pub target: rustc_hir::Target,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::must_not_suspend)]
+pub struct MustNotSuspend {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::cold)]
+#[warn_]
+pub struct Cold {
+ #[label]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::link)]
+#[warn_]
+pub struct Link {
+ #[label]
+ pub span: Option<Span>,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::link_name)]
+#[warn_]
+pub struct LinkName<'a> {
+ #[help]
+ pub attr_span: Option<Span>,
+ #[label]
+ pub span: Span,
+ pub value: &'a str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::no_link)]
+pub struct NoLink {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::export_name)]
+pub struct ExportName {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_layout_scalar_valid_range_not_struct)]
+pub struct RustcLayoutScalarValidRangeNotStruct {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_layout_scalar_valid_range_arg)]
+pub struct RustcLayoutScalarValidRangeArg {
+ #[primary_span]
+ pub attr_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_legacy_const_generics_only)]
+pub struct RustcLegacyConstGenericsOnly {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub param_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_legacy_const_generics_index)]
+pub struct RustcLegacyConstGenericsIndex {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub generics_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_legacy_const_generics_index_exceed)]
+pub struct RustcLegacyConstGenericsIndexExceed {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub arg_count: usize,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_legacy_const_generics_index_negative)]
+pub struct RustcLegacyConstGenericsIndexNegative {
+ #[primary_span]
+ pub invalid_args: Vec<Span>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_dirty_clean)]
+pub struct RustcDirtyClean {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::link_section)]
+#[warn_]
+pub struct LinkSection {
+ #[label]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::no_mangle_foreign)]
+#[warn_]
+#[note]
+pub struct NoMangleForeign {
+ #[label]
+ pub span: Span,
+ #[suggestion(applicability = "machine-applicable")]
+ pub attr_span: Span,
+ pub foreign_item_kind: &'static str,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::no_mangle)]
+#[warn_]
+pub struct NoMangle {
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::repr_ident, code = "E0565")]
+pub struct ReprIdent {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::repr_conflicting, code = "E0566")]
+pub struct ReprConflicting;
+
+#[derive(SessionDiagnostic)]
+#[error(passes::used_static)]
+pub struct UsedStatic {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::used_compiler_linker)]
+pub struct UsedCompilerLinker {
+ #[primary_span]
+ pub spans: Vec<Span>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::allow_internal_unstable)]
+pub struct AllowInternalUnstable {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::debug_visualizer_placement)]
+pub struct DebugVisualizerPlacement {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::debug_visualizer_invalid)]
+#[note(passes::note_1)]
+#[note(passes::note_2)]
+#[note(passes::note_3)]
+pub struct DebugVisualizerInvalid {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_allow_const_fn_unstable)]
+pub struct RustcAllowConstFnUnstable {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_std_internal_symbol)]
+pub struct RustcStdInternalSymbol {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::const_trait)]
+pub struct ConstTrait {
+ #[primary_span]
+ pub attr_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::stability_promotable)]
+pub struct StabilityPromotable {
+ #[primary_span]
+ pub attr_span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::deprecated)]
+pub struct Deprecated;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::macro_use)]
+pub struct MacroUse {
+ pub name: Symbol,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::macro_export)]
+pub struct MacroExport;
+
+#[derive(LintDiagnostic)]
+#[lint(passes::plugin_registrar)]
+pub struct PluginRegistrar;
+
+#[derive(SessionSubdiagnostic)]
+pub enum UnusedNote {
+ #[note(passes::unused_empty_lints_note)]
+ EmptyList { name: Symbol },
+ #[note(passes::unused_no_lints_note)]
+ NoLints { name: Symbol },
+ #[note(passes::unused_default_method_body_const_note)]
+ DefaultMethodBodyConst,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::unused)]
+pub struct Unused {
+ #[suggestion(applicability = "machine-applicable")]
+ pub attr_span: Span,
+ #[subdiagnostic]
+ pub note: UnusedNote,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::non_exported_macro_invalid_attrs, code = "E0518")]
+pub struct NonExportedMacroInvalidAttrs {
+ #[primary_span]
+ #[label]
+ pub attr_span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(passes::unused_duplicate)]
+pub struct UnusedDuplicate {
+ #[primary_span]
+ #[suggestion(code = "", applicability = "machine-applicable")]
+ pub this: Span,
+ #[note]
+ pub other: Span,
+ #[warn_]
+ pub warning: Option<()>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::unused_multiple)]
+pub struct UnusedMultiple {
+ #[primary_span]
+ #[suggestion(code = "", applicability = "machine-applicable")]
+ pub this: Span,
+ #[note]
+ pub other: Span,
+ pub name: Symbol,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_lint_opt_ty)]
+pub struct RustcLintOptTy {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(passes::rustc_lint_opt_deny_field_access)]
+pub struct RustcLintOptDenyFieldAccess {
+ #[primary_span]
+ pub attr_span: Span,
+ #[label]
+ pub span: Span,
+}
diff --git a/compiler/rustc_passes/src/hir_id_validator.rs b/compiler/rustc_passes/src/hir_id_validator.rs
new file mode 100644
index 000000000..212ea9e57
--- /dev/null
+++ b/compiler/rustc_passes/src/hir_id_validator.rs
@@ -0,0 +1,164 @@
+use rustc_data_structures::sync::Lock;
+use rustc_hir as hir;
+use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::intravisit;
+use rustc_hir::{HirId, ItemLocalId};
+use rustc_index::bit_set::GrowableBitSet;
+use rustc_middle::hir::map::Map;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::TyCtxt;
+
+pub fn check_crate(tcx: TyCtxt<'_>) {
+ tcx.dep_graph.assert_ignored();
+
+ if tcx.sess.opts.unstable_opts.hir_stats {
+ crate::hir_stats::print_hir_stats(tcx);
+ }
+
+ #[cfg(debug_assertions)]
+ {
+ let errors = Lock::new(Vec::new());
+ let hir_map = tcx.hir();
+
+ hir_map.par_for_each_module(|module_id| {
+ let mut v = HirIdValidator {
+ hir_map,
+ owner: None,
+ hir_ids_seen: Default::default(),
+ errors: &errors,
+ };
+
+ tcx.hir().visit_item_likes_in_module(module_id, &mut v);
+ });
+
+ let errors = errors.into_inner();
+
+ if !errors.is_empty() {
+ let message = errors.iter().fold(String::new(), |s1, s2| s1 + "\n" + s2);
+ tcx.sess.delay_span_bug(rustc_span::DUMMY_SP, &message);
+ }
+ }
+}
+
+struct HirIdValidator<'a, 'hir> {
+ hir_map: Map<'hir>,
+ owner: Option<LocalDefId>,
+ hir_ids_seen: GrowableBitSet<ItemLocalId>,
+ errors: &'a Lock<Vec<String>>,
+}
+
+impl<'a, 'hir> HirIdValidator<'a, 'hir> {
+ fn new_visitor(&self, hir_map: Map<'hir>) -> HirIdValidator<'a, 'hir> {
+ HirIdValidator {
+ hir_map,
+ owner: None,
+ hir_ids_seen: Default::default(),
+ errors: self.errors,
+ }
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn error(&self, f: impl FnOnce() -> String) {
+ self.errors.lock().push(f());
+ }
+
+ fn check<F: FnOnce(&mut HirIdValidator<'a, 'hir>)>(&mut self, owner: LocalDefId, walk: F) {
+ assert!(self.owner.is_none());
+ self.owner = Some(owner);
+ walk(self);
+
+ if owner == CRATE_DEF_ID {
+ return;
+ }
+
+ // There's always at least one entry for the owning item itself
+ let max = self
+ .hir_ids_seen
+ .iter()
+ .map(|local_id| local_id.as_usize())
+ .max()
+ .expect("owning item has no entry");
+
+ if max != self.hir_ids_seen.len() - 1 {
+ // Collect the missing ItemLocalIds
+ let missing: Vec<_> = (0..=max as u32)
+ .filter(|&i| !self.hir_ids_seen.contains(ItemLocalId::from_u32(i)))
+ .collect();
+
+ // Try to map those to something more useful
+ let mut missing_items = Vec::with_capacity(missing.len());
+
+ for local_id in missing {
+ let hir_id = HirId { owner, local_id: ItemLocalId::from_u32(local_id) };
+
+ trace!("missing hir id {:#?}", hir_id);
+
+ missing_items.push(format!(
+ "[local_id: {}, owner: {}]",
+ local_id,
+ self.hir_map.def_path(owner).to_string_no_crate_verbose()
+ ));
+ }
+ self.error(|| {
+ format!(
+ "ItemLocalIds not assigned densely in {}. \
+ Max ItemLocalId = {}, missing IDs = {:?}; seens IDs = {:?}",
+ self.hir_map.def_path(owner).to_string_no_crate_verbose(),
+ max,
+ missing_items,
+ self.hir_ids_seen
+ .iter()
+ .map(|local_id| HirId { owner, local_id })
+ .map(|h| format!("({:?} {})", h, self.hir_map.node_to_string(h)))
+ .collect::<Vec<_>>()
+ )
+ });
+ }
+ }
+}
+
+impl<'a, 'hir> intravisit::Visitor<'hir> for HirIdValidator<'a, 'hir> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.hir_map
+ }
+
+ fn visit_item(&mut self, i: &'hir hir::Item<'hir>) {
+ let mut inner_visitor = self.new_visitor(self.hir_map);
+ inner_visitor.check(i.def_id, |this| intravisit::walk_item(this, i));
+ }
+
+ fn visit_id(&mut self, hir_id: HirId) {
+ let owner = self.owner.expect("no owner");
+
+ if owner != hir_id.owner {
+ self.error(|| {
+ format!(
+ "HirIdValidator: The recorded owner of {} is {} instead of {}",
+ self.hir_map.node_to_string(hir_id),
+ self.hir_map.def_path(hir_id.owner).to_string_no_crate_verbose(),
+ self.hir_map.def_path(owner).to_string_no_crate_verbose()
+ )
+ });
+ }
+
+ self.hir_ids_seen.insert(hir_id.local_id);
+ }
+
+ fn visit_foreign_item(&mut self, i: &'hir hir::ForeignItem<'hir>) {
+ let mut inner_visitor = self.new_visitor(self.hir_map);
+ inner_visitor.check(i.def_id, |this| intravisit::walk_foreign_item(this, i));
+ }
+
+ fn visit_trait_item(&mut self, i: &'hir hir::TraitItem<'hir>) {
+ let mut inner_visitor = self.new_visitor(self.hir_map);
+ inner_visitor.check(i.def_id, |this| intravisit::walk_trait_item(this, i));
+ }
+
+ fn visit_impl_item(&mut self, i: &'hir hir::ImplItem<'hir>) {
+ let mut inner_visitor = self.new_visitor(self.hir_map);
+ inner_visitor.check(i.def_id, |this| intravisit::walk_impl_item(this, i));
+ }
+}
diff --git a/compiler/rustc_passes/src/hir_stats.rs b/compiler/rustc_passes/src/hir_stats.rs
new file mode 100644
index 000000000..a3be827a7
--- /dev/null
+++ b/compiler/rustc_passes/src/hir_stats.rs
@@ -0,0 +1,344 @@
+// The visitors in this module collect sizes and counts of the most important
+// pieces of AST and HIR. The resulting numbers are good approximations but not
+// completely accurate (some things might be counted twice, others missed).
+
+use rustc_ast::visit as ast_visit;
+use rustc_ast::visit::BoundKind;
+use rustc_ast::{self as ast, AttrId, NodeId};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir as hir;
+use rustc_hir::intravisit as hir_visit;
+use rustc_hir::HirId;
+use rustc_middle::hir::map::Map;
+use rustc_middle::ty::TyCtxt;
+use rustc_middle::util::common::to_readable_str;
+use rustc_span::Span;
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+enum Id {
+ Node(HirId),
+ Attr(AttrId),
+ None,
+}
+
+struct NodeData {
+ count: usize,
+ size: usize,
+}
+
+struct StatCollector<'k> {
+ krate: Option<Map<'k>>,
+ data: FxHashMap<&'static str, NodeData>,
+ seen: FxHashSet<Id>,
+}
+
+pub fn print_hir_stats(tcx: TyCtxt<'_>) {
+ let mut collector = StatCollector {
+ krate: Some(tcx.hir()),
+ data: FxHashMap::default(),
+ seen: FxHashSet::default(),
+ };
+ tcx.hir().walk_toplevel_module(&mut collector);
+ tcx.hir().walk_attributes(&mut collector);
+ collector.print("HIR STATS");
+}
+
+pub fn print_ast_stats(krate: &ast::Crate, title: &str) {
+ let mut collector =
+ StatCollector { krate: None, data: FxHashMap::default(), seen: FxHashSet::default() };
+ ast_visit::walk_crate(&mut collector, krate);
+ collector.print(title);
+}
+
+impl<'k> StatCollector<'k> {
+ fn record<T>(&mut self, label: &'static str, id: Id, node: &T) {
+ if id != Id::None && !self.seen.insert(id) {
+ return;
+ }
+
+ let entry = self.data.entry(label).or_insert(NodeData { count: 0, size: 0 });
+
+ entry.count += 1;
+ entry.size = std::mem::size_of_val(node);
+ }
+
+ fn print(&self, title: &str) {
+ let mut stats: Vec<_> = self.data.iter().collect();
+
+ stats.sort_by_key(|&(_, ref d)| d.count * d.size);
+
+ let mut total_size = 0;
+
+ eprintln!("\n{}\n", title);
+
+ eprintln!("{:<18}{:>18}{:>14}{:>14}", "Name", "Accumulated Size", "Count", "Item Size");
+ eprintln!("----------------------------------------------------------------");
+
+ for (label, data) in stats {
+ eprintln!(
+ "{:<18}{:>18}{:>14}{:>14}",
+ label,
+ to_readable_str(data.count * data.size),
+ to_readable_str(data.count),
+ to_readable_str(data.size)
+ );
+
+ total_size += data.count * data.size;
+ }
+ eprintln!("----------------------------------------------------------------");
+ eprintln!("{:<18}{:>18}\n", "Total", to_readable_str(total_size));
+ }
+}
+
+impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> {
+ fn visit_param(&mut self, param: &'v hir::Param<'v>) {
+ self.record("Param", Id::Node(param.hir_id), param);
+ hir_visit::walk_param(self, param)
+ }
+
+ fn visit_nested_item(&mut self, id: hir::ItemId) {
+ let nested_item = self.krate.unwrap().item(id);
+ self.visit_item(nested_item)
+ }
+
+ fn visit_nested_trait_item(&mut self, trait_item_id: hir::TraitItemId) {
+ let nested_trait_item = self.krate.unwrap().trait_item(trait_item_id);
+ self.visit_trait_item(nested_trait_item)
+ }
+
+ fn visit_nested_impl_item(&mut self, impl_item_id: hir::ImplItemId) {
+ let nested_impl_item = self.krate.unwrap().impl_item(impl_item_id);
+ self.visit_impl_item(nested_impl_item)
+ }
+
+ fn visit_nested_foreign_item(&mut self, id: hir::ForeignItemId) {
+ let nested_foreign_item = self.krate.unwrap().foreign_item(id);
+ self.visit_foreign_item(nested_foreign_item);
+ }
+
+ fn visit_nested_body(&mut self, body_id: hir::BodyId) {
+ let nested_body = self.krate.unwrap().body(body_id);
+ self.visit_body(nested_body)
+ }
+
+ fn visit_item(&mut self, i: &'v hir::Item<'v>) {
+ self.record("Item", Id::Node(i.hir_id()), i);
+ hir_visit::walk_item(self, i)
+ }
+
+ fn visit_foreign_item(&mut self, i: &'v hir::ForeignItem<'v>) {
+ self.record("ForeignItem", Id::Node(i.hir_id()), i);
+ hir_visit::walk_foreign_item(self, i)
+ }
+
+ fn visit_local(&mut self, l: &'v hir::Local<'v>) {
+ self.record("Local", Id::Node(l.hir_id), l);
+ hir_visit::walk_local(self, l)
+ }
+
+ fn visit_block(&mut self, b: &'v hir::Block<'v>) {
+ self.record("Block", Id::Node(b.hir_id), b);
+ hir_visit::walk_block(self, b)
+ }
+
+ fn visit_stmt(&mut self, s: &'v hir::Stmt<'v>) {
+ self.record("Stmt", Id::Node(s.hir_id), s);
+ hir_visit::walk_stmt(self, s)
+ }
+
+ fn visit_arm(&mut self, a: &'v hir::Arm<'v>) {
+ self.record("Arm", Id::Node(a.hir_id), a);
+ hir_visit::walk_arm(self, a)
+ }
+
+ fn visit_pat(&mut self, p: &'v hir::Pat<'v>) {
+ self.record("Pat", Id::Node(p.hir_id), p);
+ hir_visit::walk_pat(self, p)
+ }
+
+ fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) {
+ self.record("Expr", Id::Node(ex.hir_id), ex);
+ hir_visit::walk_expr(self, ex)
+ }
+
+ fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
+ self.record("Ty", Id::Node(t.hir_id), t);
+ hir_visit::walk_ty(self, t)
+ }
+
+ fn visit_fn(
+ &mut self,
+ fk: hir_visit::FnKind<'v>,
+ fd: &'v hir::FnDecl<'v>,
+ b: hir::BodyId,
+ s: Span,
+ id: hir::HirId,
+ ) {
+ self.record("FnDecl", Id::None, fd);
+ hir_visit::walk_fn(self, fk, fd, b, s, id)
+ }
+
+ fn visit_where_predicate(&mut self, predicate: &'v hir::WherePredicate<'v>) {
+ self.record("WherePredicate", Id::None, predicate);
+ hir_visit::walk_where_predicate(self, predicate)
+ }
+
+ fn visit_trait_item(&mut self, ti: &'v hir::TraitItem<'v>) {
+ self.record("TraitItem", Id::Node(ti.hir_id()), ti);
+ hir_visit::walk_trait_item(self, ti)
+ }
+
+ fn visit_impl_item(&mut self, ii: &'v hir::ImplItem<'v>) {
+ self.record("ImplItem", Id::Node(ii.hir_id()), ii);
+ hir_visit::walk_impl_item(self, ii)
+ }
+
+ fn visit_param_bound(&mut self, bounds: &'v hir::GenericBound<'v>) {
+ self.record("GenericBound", Id::None, bounds);
+ hir_visit::walk_param_bound(self, bounds)
+ }
+
+ fn visit_field_def(&mut self, s: &'v hir::FieldDef<'v>) {
+ self.record("FieldDef", Id::Node(s.hir_id), s);
+ hir_visit::walk_field_def(self, s)
+ }
+
+ fn visit_variant(
+ &mut self,
+ v: &'v hir::Variant<'v>,
+ g: &'v hir::Generics<'v>,
+ item_id: hir::HirId,
+ ) {
+ self.record("Variant", Id::None, v);
+ hir_visit::walk_variant(self, v, g, item_id)
+ }
+
+ fn visit_lifetime(&mut self, lifetime: &'v hir::Lifetime) {
+ self.record("Lifetime", Id::Node(lifetime.hir_id), lifetime);
+ hir_visit::walk_lifetime(self, lifetime)
+ }
+
+ fn visit_qpath(&mut self, qpath: &'v hir::QPath<'v>, id: hir::HirId, span: Span) {
+ self.record("QPath", Id::None, qpath);
+ hir_visit::walk_qpath(self, qpath, id, span)
+ }
+
+ fn visit_path(&mut self, path: &'v hir::Path<'v>, _id: hir::HirId) {
+ self.record("Path", Id::None, path);
+ hir_visit::walk_path(self, path)
+ }
+
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v hir::PathSegment<'v>) {
+ self.record("PathSegment", Id::None, path_segment);
+ hir_visit::walk_path_segment(self, path_span, path_segment)
+ }
+
+ fn visit_assoc_type_binding(&mut self, type_binding: &'v hir::TypeBinding<'v>) {
+ self.record("TypeBinding", Id::Node(type_binding.hir_id), type_binding);
+ hir_visit::walk_assoc_type_binding(self, type_binding)
+ }
+
+ fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
+ self.record("Attribute", Id::Attr(attr.id), attr);
+ }
+}
+
+impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> {
+ fn visit_foreign_item(&mut self, i: &'v ast::ForeignItem) {
+ self.record("ForeignItem", Id::None, i);
+ ast_visit::walk_foreign_item(self, i)
+ }
+
+ fn visit_item(&mut self, i: &'v ast::Item) {
+ self.record("Item", Id::None, i);
+ ast_visit::walk_item(self, i)
+ }
+
+ fn visit_local(&mut self, l: &'v ast::Local) {
+ self.record("Local", Id::None, l);
+ ast_visit::walk_local(self, l)
+ }
+
+ fn visit_block(&mut self, b: &'v ast::Block) {
+ self.record("Block", Id::None, b);
+ ast_visit::walk_block(self, b)
+ }
+
+ fn visit_stmt(&mut self, s: &'v ast::Stmt) {
+ self.record("Stmt", Id::None, s);
+ ast_visit::walk_stmt(self, s)
+ }
+
+ fn visit_arm(&mut self, a: &'v ast::Arm) {
+ self.record("Arm", Id::None, a);
+ ast_visit::walk_arm(self, a)
+ }
+
+ fn visit_pat(&mut self, p: &'v ast::Pat) {
+ self.record("Pat", Id::None, p);
+ ast_visit::walk_pat(self, p)
+ }
+
+ fn visit_expr(&mut self, ex: &'v ast::Expr) {
+ self.record("Expr", Id::None, ex);
+ ast_visit::walk_expr(self, ex)
+ }
+
+ fn visit_ty(&mut self, t: &'v ast::Ty) {
+ self.record("Ty", Id::None, t);
+ ast_visit::walk_ty(self, t)
+ }
+
+ fn visit_fn(&mut self, fk: ast_visit::FnKind<'v>, s: Span, _: NodeId) {
+ self.record("FnDecl", Id::None, fk.decl());
+ ast_visit::walk_fn(self, fk, s)
+ }
+
+ fn visit_assoc_item(&mut self, item: &'v ast::AssocItem, ctxt: ast_visit::AssocCtxt) {
+ let label = match ctxt {
+ ast_visit::AssocCtxt::Trait => "TraitItem",
+ ast_visit::AssocCtxt::Impl => "ImplItem",
+ };
+ self.record(label, Id::None, item);
+ ast_visit::walk_assoc_item(self, item, ctxt);
+ }
+
+ fn visit_param_bound(&mut self, bounds: &'v ast::GenericBound, _ctxt: BoundKind) {
+ self.record("GenericBound", Id::None, bounds);
+ ast_visit::walk_param_bound(self, bounds)
+ }
+
+ fn visit_field_def(&mut self, s: &'v ast::FieldDef) {
+ self.record("FieldDef", Id::None, s);
+ ast_visit::walk_field_def(self, s)
+ }
+
+ fn visit_variant(&mut self, v: &'v ast::Variant) {
+ self.record("Variant", Id::None, v);
+ ast_visit::walk_variant(self, v)
+ }
+
+ fn visit_lifetime(&mut self, lifetime: &'v ast::Lifetime, _: ast_visit::LifetimeCtxt) {
+ self.record("Lifetime", Id::None, lifetime);
+ ast_visit::walk_lifetime(self, lifetime)
+ }
+
+ fn visit_mac_call(&mut self, mac: &'v ast::MacCall) {
+ self.record("MacCall", Id::None, mac);
+ ast_visit::walk_mac(self, mac)
+ }
+
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'v ast::PathSegment) {
+ self.record("PathSegment", Id::None, path_segment);
+ ast_visit::walk_path_segment(self, path_span, path_segment)
+ }
+
+ fn visit_assoc_constraint(&mut self, constraint: &'v ast::AssocConstraint) {
+ self.record("AssocConstraint", Id::None, constraint);
+ ast_visit::walk_assoc_constraint(self, constraint)
+ }
+
+ fn visit_attribute(&mut self, attr: &'v ast::Attribute) {
+ self.record("Attribute", Id::None, attr);
+ }
+}
diff --git a/compiler/rustc_passes/src/lang_items.rs b/compiler/rustc_passes/src/lang_items.rs
new file mode 100644
index 000000000..79900a90a
--- /dev/null
+++ b/compiler/rustc_passes/src/lang_items.rs
@@ -0,0 +1,278 @@
+//! Detecting language items.
+//!
+//! Language items are items that represent concepts intrinsic to the language
+//! itself. Examples are:
+//!
+//! * Traits that specify "kinds"; e.g., `Sync`, `Send`.
+//! * Traits that represent operators; e.g., `Add`, `Sub`, `Index`.
+//! * Functions called by the compiler itself.
+
+use crate::check_attr::target_from_impl_item;
+use crate::weak_lang_items;
+
+use rustc_errors::{pluralize, struct_span_err};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::{extract, GenericRequirement, ITEM_REFS};
+use rustc_hir::{HirId, LangItem, LanguageItems, Target};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::cstore::ExternCrate;
+use rustc_span::Span;
+
+use rustc_middle::ty::query::Providers;
+
+struct LanguageItemCollector<'tcx> {
+ items: LanguageItems,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> LanguageItemCollector<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>) -> LanguageItemCollector<'tcx> {
+ LanguageItemCollector { tcx, items: LanguageItems::new() }
+ }
+
+ fn check_for_lang(&mut self, actual_target: Target, hir_id: HirId) {
+ let attrs = self.tcx.hir().attrs(hir_id);
+ if let Some((value, span)) = extract(&attrs) {
+ match ITEM_REFS.get(&value).cloned() {
+ // Known lang item with attribute on correct target.
+ Some((item_index, expected_target)) if actual_target == expected_target => {
+ self.collect_item_extended(item_index, hir_id, span);
+ }
+ // Known lang item with attribute on incorrect target.
+ Some((_, expected_target)) => {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0718,
+ "`{}` language item must be applied to a {}",
+ value,
+ expected_target,
+ )
+ .span_label(
+ span,
+ format!(
+ "attribute should be applied to a {}, not a {}",
+ expected_target, actual_target,
+ ),
+ )
+ .emit();
+ }
+ // Unknown lang item.
+ _ => {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0522,
+ "definition of an unknown language item: `{}`",
+ value
+ )
+ .span_label(span, format!("definition of unknown language item `{}`", value))
+ .emit();
+ }
+ }
+ }
+ }
+
+ fn collect_item(&mut self, item_index: usize, item_def_id: DefId) {
+ // Check for duplicates.
+ if let Some(original_def_id) = self.items.items[item_index] {
+ if original_def_id != item_def_id {
+ let lang_item = LangItem::from_u32(item_index as u32).unwrap();
+ let name = lang_item.name();
+ let mut err = match self.tcx.hir().span_if_local(item_def_id) {
+ Some(span) => struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0152,
+ "found duplicate lang item `{}`",
+ name
+ ),
+ None => match self.tcx.extern_crate(item_def_id) {
+ Some(ExternCrate { dependency_of, .. }) => {
+ self.tcx.sess.struct_err(&format!(
+ "duplicate lang item in crate `{}` (which `{}` depends on): `{}`.",
+ self.tcx.crate_name(item_def_id.krate),
+ self.tcx.crate_name(*dependency_of),
+ name
+ ))
+ }
+ _ => self.tcx.sess.struct_err(&format!(
+ "duplicate lang item in crate `{}`: `{}`.",
+ self.tcx.crate_name(item_def_id.krate),
+ name
+ )),
+ },
+ };
+ if let Some(span) = self.tcx.hir().span_if_local(original_def_id) {
+ err.span_note(span, "the lang item is first defined here");
+ } else {
+ match self.tcx.extern_crate(original_def_id) {
+ Some(ExternCrate { dependency_of, .. }) => {
+ err.note(&format!(
+ "the lang item is first defined in crate `{}` (which `{}` depends on)",
+ self.tcx.crate_name(original_def_id.krate),
+ self.tcx.crate_name(*dependency_of)
+ ));
+ }
+ _ => {
+ err.note(&format!(
+ "the lang item is first defined in crate `{}`.",
+ self.tcx.crate_name(original_def_id.krate)
+ ));
+ }
+ }
+ let mut note_def = |which, def_id: DefId| {
+ let crate_name = self.tcx.crate_name(def_id.krate);
+ let note = if def_id.is_local() {
+ format!("{} definition in the local crate (`{}`)", which, crate_name)
+ } else {
+ let paths: Vec<_> = self
+ .tcx
+ .crate_extern_paths(def_id.krate)
+ .iter()
+ .map(|p| p.display().to_string())
+ .collect();
+ format!(
+ "{} definition in `{}` loaded from {}",
+ which,
+ crate_name,
+ paths.join(", ")
+ )
+ };
+ err.note(&note);
+ };
+ note_def("first", original_def_id);
+ note_def("second", item_def_id);
+ }
+ err.emit();
+ }
+ }
+
+ // Matched.
+ self.items.items[item_index] = Some(item_def_id);
+ if let Some(group) = LangItem::from_u32(item_index as u32).unwrap().group() {
+ self.items.groups[group as usize].push(item_def_id);
+ }
+ }
+
+ // Like collect_item() above, but also checks whether the lang item is declared
+ // with the right number of generic arguments.
+ fn collect_item_extended(&mut self, item_index: usize, hir_id: HirId, span: Span) {
+ let item_def_id = self.tcx.hir().local_def_id(hir_id).to_def_id();
+ let lang_item = LangItem::from_u32(item_index as u32).unwrap();
+ let name = lang_item.name();
+
+ // Now check whether the lang_item has the expected number of generic
+ // arguments. Generally speaking, binary and indexing operations have
+ // one (for the RHS/index), unary operations have none, the closure
+ // traits have one for the argument list, generators have one for the
+ // resume argument, and ordering/equality relations have one for the RHS
+ // Some other types like Box and various functions like drop_in_place
+ // have minimum requirements.
+
+ if let hir::Node::Item(hir::Item { kind, span: item_span, .. }) = self.tcx.hir().get(hir_id)
+ {
+ let (actual_num, generics_span) = match kind.generics() {
+ Some(generics) => (generics.params.len(), generics.span),
+ None => (0, *item_span),
+ };
+
+ let required = match lang_item.required_generics() {
+ GenericRequirement::Exact(num) if num != actual_num => {
+ Some((format!("{}", num), pluralize!(num)))
+ }
+ GenericRequirement::Minimum(num) if actual_num < num => {
+ Some((format!("at least {}", num), pluralize!(num)))
+ }
+ // If the number matches, or there is no requirement, handle it normally
+ _ => None,
+ };
+
+ if let Some((range_str, pluralized)) = required {
+ // We are issuing E0718 "incorrect target" here, because while the
+ // item kind of the target is correct, the target is still wrong
+ // because of the wrong number of generic arguments.
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0718,
+ "`{}` language item must be applied to a {} with {} generic argument{}",
+ name,
+ kind.descr(),
+ range_str,
+ pluralized,
+ )
+ .span_label(
+ generics_span,
+ format!(
+ "this {} has {} generic argument{}",
+ kind.descr(),
+ actual_num,
+ pluralize!(actual_num),
+ ),
+ )
+ .emit();
+
+ // return early to not collect the lang item
+ return;
+ }
+ }
+
+ self.collect_item(item_index, item_def_id);
+ }
+}
+
+/// Traverses and collects all the lang items in all crates.
+fn get_lang_items(tcx: TyCtxt<'_>, (): ()) -> LanguageItems {
+ // Initialize the collector.
+ let mut collector = LanguageItemCollector::new(tcx);
+
+ // Collect lang items in other crates.
+ for &cnum in tcx.crates(()).iter() {
+ for &(def_id, item_index) in tcx.defined_lang_items(cnum).iter() {
+ collector.collect_item(item_index, def_id);
+ }
+ }
+
+ // Collect lang items in this crate.
+ let crate_items = tcx.hir_crate_items(());
+
+ for id in crate_items.items() {
+ collector.check_for_lang(Target::from_def_kind(tcx.def_kind(id.def_id)), id.hir_id());
+
+ if matches!(tcx.def_kind(id.def_id), DefKind::Enum) {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::Enum(def, ..) = &item.kind {
+ for variant in def.variants {
+ collector.check_for_lang(Target::Variant, variant.id);
+ }
+ }
+ }
+ }
+
+ // FIXME: avoid calling trait_item() when possible
+ for id in crate_items.trait_items() {
+ let item = tcx.hir().trait_item(id);
+ collector.check_for_lang(Target::from_trait_item(item), item.hir_id())
+ }
+
+ // FIXME: avoid calling impl_item() when possible
+ for id in crate_items.impl_items() {
+ let item = tcx.hir().impl_item(id);
+ collector.check_for_lang(target_from_impl_item(tcx, item), item.hir_id())
+ }
+
+ // Extract out the found lang items.
+ let LanguageItemCollector { mut items, .. } = collector;
+
+ // Find all required but not-yet-defined lang items.
+ weak_lang_items::check_crate(tcx, &mut items);
+
+ items
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.get_lang_items = get_lang_items;
+}
diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs
new file mode 100644
index 000000000..fd03f6571
--- /dev/null
+++ b/compiler/rustc_passes/src/layout_test.rs
@@ -0,0 +1,132 @@
+use rustc_ast::Attribute;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
+use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_target::abi::{HasDataLayout, TargetDataLayout};
+
+pub fn test_layout(tcx: TyCtxt<'_>) {
+ if tcx.features().rustc_attrs {
+ // if the `rustc_attrs` feature is not enabled, don't bother testing layout
+ for id in tcx.hir().items() {
+ if matches!(
+ tcx.def_kind(id.def_id),
+ DefKind::TyAlias | DefKind::Enum | DefKind::Struct | DefKind::Union
+ ) {
+ for attr in tcx.get_attrs(id.def_id.to_def_id(), sym::rustc_layout) {
+ dump_layout_of(tcx, id.def_id, attr);
+ }
+ }
+ }
+ }
+}
+
+fn dump_layout_of<'tcx>(tcx: TyCtxt<'tcx>, item_def_id: LocalDefId, attr: &Attribute) {
+ let tcx = tcx;
+ let param_env = tcx.param_env(item_def_id);
+ let ty = tcx.type_of(item_def_id);
+ match tcx.layout_of(param_env.and(ty)) {
+ Ok(ty_layout) => {
+ // Check out the `#[rustc_layout(..)]` attribute to tell what to dump.
+ // The `..` are the names of fields to dump.
+ let meta_items = attr.meta_item_list().unwrap_or_default();
+ for meta_item in meta_items {
+ match meta_item.name_or_empty() {
+ sym::abi => {
+ tcx.sess.span_err(
+ tcx.def_span(item_def_id.to_def_id()),
+ &format!("abi: {:?}", ty_layout.abi),
+ );
+ }
+
+ sym::align => {
+ tcx.sess.span_err(
+ tcx.def_span(item_def_id.to_def_id()),
+ &format!("align: {:?}", ty_layout.align),
+ );
+ }
+
+ sym::size => {
+ tcx.sess.span_err(
+ tcx.def_span(item_def_id.to_def_id()),
+ &format!("size: {:?}", ty_layout.size),
+ );
+ }
+
+ sym::homogeneous_aggregate => {
+ tcx.sess.span_err(
+ tcx.def_span(item_def_id.to_def_id()),
+ &format!(
+ "homogeneous_aggregate: {:?}",
+ ty_layout.homogeneous_aggregate(&UnwrapLayoutCx { tcx, param_env }),
+ ),
+ );
+ }
+
+ sym::debug => {
+ let normalized_ty = tcx.normalize_erasing_regions(
+ param_env.with_reveal_all_normalized(tcx),
+ ty,
+ );
+ tcx.sess.span_err(
+ tcx.def_span(item_def_id.to_def_id()),
+ &format!("layout_of({:?}) = {:#?}", normalized_ty, *ty_layout),
+ );
+ }
+
+ name => {
+ tcx.sess.span_err(
+ meta_item.span(),
+ &format!("unrecognized field name `{}`", name),
+ );
+ }
+ }
+ }
+ }
+
+ Err(layout_error) => {
+ tcx.sess.span_err(
+ tcx.def_span(item_def_id.to_def_id()),
+ &format!("layout error: {:?}", layout_error),
+ );
+ }
+ }
+}
+
+struct UnwrapLayoutCx<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for UnwrapLayoutCx<'tcx> {
+ type LayoutOfResult = TyAndLayout<'tcx>;
+
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
+ span_bug!(
+ span,
+ "`#[rustc_layout(..)]` test resulted in `layout_of({}) = Err({})`",
+ ty,
+ err
+ );
+ }
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for UnwrapLayoutCx<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
+
+impl<'tcx> HasParamEnv<'tcx> for UnwrapLayoutCx<'tcx> {
+ fn param_env(&self) -> ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'tcx> HasDataLayout for UnwrapLayoutCx<'tcx> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ self.tcx.data_layout()
+ }
+}
diff --git a/compiler/rustc_passes/src/lib.rs b/compiler/rustc_passes/src/lib.rs
new file mode 100644
index 000000000..7b2f83958
--- /dev/null
+++ b/compiler/rustc_passes/src/lib.rs
@@ -0,0 +1,59 @@
+//! Various checks
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![allow(rustc::potential_query_instability)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(iter_intersperse)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(map_try_insert)]
+#![feature(min_specialization)]
+#![feature(try_blocks)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate rustc_middle;
+#[macro_use]
+extern crate tracing;
+
+use rustc_middle::ty::query::Providers;
+
+mod check_attr;
+mod check_const;
+pub mod dead;
+mod debugger_visualizer;
+mod diagnostic_items;
+pub mod entry;
+mod errors;
+pub mod hir_id_validator;
+pub mod hir_stats;
+mod lang_items;
+pub mod layout_test;
+mod lib_features;
+mod liveness;
+pub mod loops;
+mod naked_functions;
+mod reachable;
+pub mod stability;
+mod upvars;
+mod weak_lang_items;
+
+pub fn provide(providers: &mut Providers) {
+ check_attr::provide(providers);
+ check_const::provide(providers);
+ dead::provide(providers);
+ debugger_visualizer::provide(providers);
+ diagnostic_items::provide(providers);
+ entry::provide(providers);
+ lang_items::provide(providers);
+ lib_features::provide(providers);
+ loops::provide(providers);
+ naked_functions::provide(providers);
+ liveness::provide(providers);
+ reachable::provide(providers);
+ stability::provide(providers);
+ upvars::provide(providers);
+}
diff --git a/compiler/rustc_passes/src/lib_features.rs b/compiler/rustc_passes/src/lib_features.rs
new file mode 100644
index 000000000..e05994f13
--- /dev/null
+++ b/compiler/rustc_passes/src/lib_features.rs
@@ -0,0 +1,138 @@
+//! Detecting lib features (i.e., features that are not lang features).
+//!
+//! These are declared using stability attributes (e.g., `#[stable (..)]` and `#[unstable (..)]`),
+//! but are not declared in one single location (unlike lang features), which means we need to
+//! collect them instead.
+
+use rustc_ast::{Attribute, MetaItemKind};
+use rustc_errors::struct_span_err;
+use rustc_hir::intravisit::Visitor;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::lib_features::LibFeatures;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::Symbol;
+use rustc_span::{sym, Span};
+
+fn new_lib_features() -> LibFeatures {
+ LibFeatures { stable: Default::default(), unstable: Default::default() }
+}
+
+pub struct LibFeatureCollector<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ lib_features: LibFeatures,
+}
+
+impl<'tcx> LibFeatureCollector<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>) -> LibFeatureCollector<'tcx> {
+ LibFeatureCollector { tcx, lib_features: new_lib_features() }
+ }
+
+ fn extract(&self, attr: &Attribute) -> Option<(Symbol, Option<Symbol>, Span)> {
+ let stab_attrs =
+ [sym::stable, sym::unstable, sym::rustc_const_stable, sym::rustc_const_unstable];
+
+ // Find a stability attribute: one of #[stable(…)], #[unstable(…)],
+ // #[rustc_const_stable(…)], or #[rustc_const_unstable(…)].
+ if let Some(stab_attr) = stab_attrs.iter().find(|stab_attr| attr.has_name(**stab_attr)) {
+ let meta_kind = attr.meta_kind();
+ if let Some(MetaItemKind::List(ref metas)) = meta_kind {
+ let mut feature = None;
+ let mut since = None;
+ for meta in metas {
+ if let Some(mi) = meta.meta_item() {
+ // Find the `feature = ".."` meta-item.
+ match (mi.name_or_empty(), mi.value_str()) {
+ (sym::feature, val) => feature = val,
+ (sym::since, val) => since = val,
+ _ => {}
+ }
+ }
+ }
+ if let Some(feature) = feature {
+ // This additional check for stability is to make sure we
+ // don't emit additional, irrelevant errors for malformed
+ // attributes.
+ let is_unstable =
+ matches!(*stab_attr, sym::unstable | sym::rustc_const_unstable);
+ if since.is_some() || is_unstable {
+ return Some((feature, since, attr.span));
+ }
+ }
+ // We need to iterate over the other attributes, because
+ // `rustc_const_unstable` is not mutually exclusive with
+ // the other stability attributes, so we can't just `break`
+ // here.
+ }
+ }
+
+ None
+ }
+
+ fn collect_feature(&mut self, feature: Symbol, since: Option<Symbol>, span: Span) {
+ let already_in_stable = self.lib_features.stable.contains_key(&feature);
+ let already_in_unstable = self.lib_features.unstable.contains_key(&feature);
+
+ match (since, already_in_stable, already_in_unstable) {
+ (Some(since), _, false) => {
+ if let Some((prev_since, _)) = self.lib_features.stable.get(&feature) {
+ if *prev_since != since {
+ self.span_feature_error(
+ span,
+ &format!(
+ "feature `{}` is declared stable since {}, \
+ but was previously declared stable since {}",
+ feature, since, prev_since,
+ ),
+ );
+ return;
+ }
+ }
+
+ self.lib_features.stable.insert(feature, (since, span));
+ }
+ (None, false, _) => {
+ self.lib_features.unstable.insert(feature, span);
+ }
+ (Some(_), _, true) | (None, true, _) => {
+ self.span_feature_error(
+ span,
+ &format!(
+ "feature `{}` is declared {}, but was previously declared {}",
+ feature,
+ if since.is_some() { "stable" } else { "unstable" },
+ if since.is_none() { "stable" } else { "unstable" },
+ ),
+ );
+ }
+ }
+ }
+
+ fn span_feature_error(&self, span: Span, msg: &str) {
+ struct_span_err!(self.tcx.sess, span, E0711, "{}", &msg).emit();
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for LibFeatureCollector<'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_attribute(&mut self, attr: &'tcx Attribute) {
+ if let Some((feature, stable, span)) = self.extract(attr) {
+ self.collect_feature(feature, stable, span);
+ }
+ }
+}
+
+fn lib_features(tcx: TyCtxt<'_>, (): ()) -> LibFeatures {
+ let mut collector = LibFeatureCollector::new(tcx);
+ tcx.hir().walk_attributes(&mut collector);
+ collector.lib_features
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.lib_features = lib_features;
+}
diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs
new file mode 100644
index 000000000..461dd52b9
--- /dev/null
+++ b/compiler/rustc_passes/src/liveness.rs
@@ -0,0 +1,1687 @@
+//! A classic liveness analysis based on dataflow over the AST. Computes,
+//! for each local variable in a function, whether that variable is live
+//! at a given point. Program execution points are identified by their
+//! IDs.
+//!
+//! # Basic idea
+//!
+//! The basic model is that each local variable is assigned an index. We
+//! represent sets of local variables using a vector indexed by this
+//! index. The value in the vector is either 0, indicating the variable
+//! is dead, or the ID of an expression that uses the variable.
+//!
+//! We conceptually walk over the AST in reverse execution order. If we
+//! find a use of a variable, we add it to the set of live variables. If
+//! we find an assignment to a variable, we remove it from the set of live
+//! variables. When we have to merge two flows, we take the union of
+//! those two flows -- if the variable is live on both paths, we simply
+//! pick one ID. In the event of loops, we continue doing this until a
+//! fixed point is reached.
+//!
+//! ## Checking initialization
+//!
+//! At the function entry point, all variables must be dead. If this is
+//! not the case, we can report an error using the ID found in the set of
+//! live variables, which identifies a use of the variable which is not
+//! dominated by an assignment.
+//!
+//! ## Checking moves
+//!
+//! After each explicit move, the variable must be dead.
+//!
+//! ## Computing last uses
+//!
+//! Any use of the variable where the variable is dead afterwards is a
+//! last use.
+//!
+//! # Implementation details
+//!
+//! The actual implementation contains two (nested) walks over the AST.
+//! The outer walk has the job of building up the ir_maps instance for the
+//! enclosing function. On the way down the tree, it identifies those AST
+//! nodes and variable IDs that will be needed for the liveness analysis
+//! and assigns them contiguous IDs. The liveness ID for an AST node is
+//! called a `live_node` (it's a newtype'd `u32`) and the ID for a variable
+//! is called a `variable` (another newtype'd `u32`).
+//!
+//! On the way back up the tree, as we are about to exit from a function
+//! declaration we allocate a `liveness` instance. Now that we know
+//! precisely how many nodes and variables we need, we can allocate all
+//! the various arrays that we will need to precisely the right size. We then
+//! perform the actual propagation on the `liveness` instance.
+//!
+//! This propagation is encoded in the various `propagate_through_*()`
+//! methods. It effectively does a reverse walk of the AST; whenever we
+//! reach a loop node, we iterate until a fixed point is reached.
+//!
+//! ## The `RWU` struct
+//!
+//! At each live node `N`, we track three pieces of information for each
+//! variable `V` (these are encapsulated in the `RWU` struct):
+//!
+//! - `reader`: the `LiveNode` ID of some node which will read the value
+//! that `V` holds on entry to `N`. Formally: a node `M` such
+//! that there exists a path `P` from `N` to `M` where `P` does not
+//! write `V`. If the `reader` is `None`, then the current
+//! value will never be read (the variable is dead, essentially).
+//!
+//! - `writer`: the `LiveNode` ID of some node which will write the
+//! variable `V` and which is reachable from `N`. Formally: a node `M`
+//! such that there exists a path `P` from `N` to `M` and `M` writes
+//! `V`. If the `writer` is `None`, then there is no writer
+//! of `V` that follows `N`.
+//!
+//! - `used`: a boolean value indicating whether `V` is *used*. We
+//! distinguish a *read* from a *use* in that a *use* is some read that
+//! is not just used to generate a new value. For example, `x += 1` is
+//! a read but not a use. This is used to generate better warnings.
+//!
+//! ## Special nodes and variables
+//!
+//! We generate various special nodes for various, well, special purposes.
+//! These are described in the `Liveness` struct.
+
+use self::LiveNodeKind::*;
+use self::VarKind::*;
+
+use rustc_ast::InlineAsmOptions;
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_hir::def::*;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Expr, HirId, HirIdMap, HirIdSet};
+use rustc_index::vec::IndexVec;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, DefIdTree, RootVariableMinCaptureList, Ty, TyCtxt};
+use rustc_session::lint;
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::Span;
+
+use std::collections::VecDeque;
+use std::io;
+use std::io::prelude::*;
+use std::rc::Rc;
+
+mod rwu_table;
+
+rustc_index::newtype_index! {
+ pub struct Variable {
+ DEBUG_FORMAT = "v({})",
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct LiveNode {
+ DEBUG_FORMAT = "ln({})",
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+enum LiveNodeKind {
+ UpvarNode(Span),
+ ExprNode(Span, HirId),
+ VarDefNode(Span, HirId),
+ ClosureNode,
+ ExitNode,
+}
+
+fn live_node_kind_to_string(lnk: LiveNodeKind, tcx: TyCtxt<'_>) -> String {
+ let sm = tcx.sess.source_map();
+ match lnk {
+ UpvarNode(s) => format!("Upvar node [{}]", sm.span_to_diagnostic_string(s)),
+ ExprNode(s, _) => format!("Expr node [{}]", sm.span_to_diagnostic_string(s)),
+ VarDefNode(s, _) => format!("Var def node [{}]", sm.span_to_diagnostic_string(s)),
+ ClosureNode => "Closure node".to_owned(),
+ ExitNode => "Exit node".to_owned(),
+ }
+}
+
+fn check_mod_liveness(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ tcx.hir().visit_item_likes_in_module(module_def_id, &mut IrMaps::new(tcx));
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { check_mod_liveness, ..*providers };
+}
+
+// ______________________________________________________________________
+// Creating ir_maps
+//
+// This is the first pass and the one that drives the main
+// computation. It walks up and down the IR once. On the way down,
+// we count for each function the number of variables as well as
+// liveness nodes. A liveness node is basically an expression or
+// capture clause that does something of interest: either it has
+// interesting control flow or it uses/defines a local variable.
+//
+// On the way back up, at each function node we create liveness sets
+// (we now know precisely how big to make our various vectors and so
+// forth) and then do the data-flow propagation to compute the set
+// of live variables at each program point.
+//
+// Finally, we run back over the IR one last time and, using the
+// computed liveness, check various safety conditions. For example,
+// there must be no live nodes at the definition site for a variable
+// unless it has an initializer. Similarly, each non-mutable local
+// variable must not be assigned if there is some successor
+// assignment. And so forth.
+
+struct CaptureInfo {
+ ln: LiveNode,
+ var_hid: HirId,
+}
+
+#[derive(Copy, Clone, Debug)]
+struct LocalInfo {
+ id: HirId,
+ name: Symbol,
+ is_shorthand: bool,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum VarKind {
+ Param(HirId, Symbol),
+ Local(LocalInfo),
+ Upvar(HirId, Symbol),
+}
+
+struct IrMaps<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ live_node_map: HirIdMap<LiveNode>,
+ variable_map: HirIdMap<Variable>,
+ capture_info_map: HirIdMap<Rc<Vec<CaptureInfo>>>,
+ var_kinds: IndexVec<Variable, VarKind>,
+ lnks: IndexVec<LiveNode, LiveNodeKind>,
+}
+
+impl<'tcx> IrMaps<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>) -> IrMaps<'tcx> {
+ IrMaps {
+ tcx,
+ live_node_map: HirIdMap::default(),
+ variable_map: HirIdMap::default(),
+ capture_info_map: Default::default(),
+ var_kinds: IndexVec::new(),
+ lnks: IndexVec::new(),
+ }
+ }
+
+ fn add_live_node(&mut self, lnk: LiveNodeKind) -> LiveNode {
+ let ln = self.lnks.push(lnk);
+
+ debug!("{:?} is of kind {}", ln, live_node_kind_to_string(lnk, self.tcx));
+
+ ln
+ }
+
+ fn add_live_node_for_node(&mut self, hir_id: HirId, lnk: LiveNodeKind) {
+ let ln = self.add_live_node(lnk);
+ self.live_node_map.insert(hir_id, ln);
+
+ debug!("{:?} is node {:?}", ln, hir_id);
+ }
+
+ fn add_variable(&mut self, vk: VarKind) -> Variable {
+ let v = self.var_kinds.push(vk);
+
+ match vk {
+ Local(LocalInfo { id: node_id, .. }) | Param(node_id, _) | Upvar(node_id, _) => {
+ self.variable_map.insert(node_id, v);
+ }
+ }
+
+ debug!("{:?} is {:?}", v, vk);
+
+ v
+ }
+
+ fn variable(&self, hir_id: HirId, span: Span) -> Variable {
+ match self.variable_map.get(&hir_id) {
+ Some(&var) => var,
+ None => {
+ span_bug!(span, "no variable registered for id {:?}", hir_id);
+ }
+ }
+ }
+
+ fn variable_name(&self, var: Variable) -> Symbol {
+ match self.var_kinds[var] {
+ Local(LocalInfo { name, .. }) | Param(_, name) | Upvar(_, name) => name,
+ }
+ }
+
+ fn variable_is_shorthand(&self, var: Variable) -> bool {
+ match self.var_kinds[var] {
+ Local(LocalInfo { is_shorthand, .. }) => is_shorthand,
+ Param(..) | Upvar(..) => false,
+ }
+ }
+
+ fn set_captures(&mut self, hir_id: HirId, cs: Vec<CaptureInfo>) {
+ self.capture_info_map.insert(hir_id, Rc::new(cs));
+ }
+
+ fn collect_shorthand_field_ids(&self, pat: &hir::Pat<'tcx>) -> HirIdSet {
+ // For struct patterns, take note of which fields used shorthand
+ // (`x` rather than `x: x`).
+ let mut shorthand_field_ids = HirIdSet::default();
+ let mut pats = VecDeque::new();
+ pats.push_back(pat);
+
+ while let Some(pat) = pats.pop_front() {
+ use rustc_hir::PatKind::*;
+ match &pat.kind {
+ Binding(.., inner_pat) => {
+ pats.extend(inner_pat.iter());
+ }
+ Struct(_, fields, _) => {
+ let (short, not_short): (Vec<_>, _) =
+ fields.iter().partition(|f| f.is_shorthand);
+ shorthand_field_ids.extend(short.iter().map(|f| f.pat.hir_id));
+ pats.extend(not_short.iter().map(|f| f.pat));
+ }
+ Ref(inner_pat, _) | Box(inner_pat) => {
+ pats.push_back(inner_pat);
+ }
+ TupleStruct(_, inner_pats, _) | Tuple(inner_pats, _) | Or(inner_pats) => {
+ pats.extend(inner_pats.iter());
+ }
+ Slice(pre_pats, inner_pat, post_pats) => {
+ pats.extend(pre_pats.iter());
+ pats.extend(inner_pat.iter());
+ pats.extend(post_pats.iter());
+ }
+ _ => {}
+ }
+ }
+
+ shorthand_field_ids
+ }
+
+ fn add_from_pat(&mut self, pat: &hir::Pat<'tcx>) {
+ let shorthand_field_ids = self.collect_shorthand_field_ids(pat);
+
+ pat.each_binding(|_, hir_id, _, ident| {
+ self.add_live_node_for_node(hir_id, VarDefNode(ident.span, hir_id));
+ self.add_variable(Local(LocalInfo {
+ id: hir_id,
+ name: ident.name,
+ is_shorthand: shorthand_field_ids.contains(&hir_id),
+ }));
+ });
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for IrMaps<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) {
+ debug!("visit_body {:?}", body.id());
+
+ // swap in a new set of IR maps for this body
+ let mut maps = IrMaps::new(self.tcx);
+ let hir_id = maps.tcx.hir().body_owner(body.id());
+ let local_def_id = maps.tcx.hir().local_def_id(hir_id);
+ let def_id = local_def_id.to_def_id();
+
+ // Don't run unused pass for #[derive()]
+ let parent = self.tcx.local_parent(local_def_id);
+ if let DefKind::Impl = self.tcx.def_kind(parent)
+ && self.tcx.has_attr(parent.to_def_id(), sym::automatically_derived)
+ {
+ return;
+ }
+
+ // Don't run unused pass for #[naked]
+ if self.tcx.has_attr(def_id, sym::naked) {
+ return;
+ }
+
+ if let Some(upvars) = maps.tcx.upvars_mentioned(def_id) {
+ for &var_hir_id in upvars.keys() {
+ let var_name = maps.tcx.hir().name(var_hir_id);
+ maps.add_variable(Upvar(var_hir_id, var_name));
+ }
+ }
+
+ // gather up the various local variables, significant expressions,
+ // and so forth:
+ intravisit::walk_body(&mut maps, body);
+
+ // compute liveness
+ let mut lsets = Liveness::new(&mut maps, local_def_id);
+ let entry_ln = lsets.compute(&body, hir_id);
+ lsets.log_liveness(entry_ln, body.id().hir_id);
+
+ // check for various error conditions
+ lsets.visit_body(body);
+ lsets.warn_about_unused_upvars(entry_ln);
+ lsets.warn_about_unused_args(body, entry_ln);
+ }
+
+ fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) {
+ self.add_from_pat(&local.pat);
+ if local.els.is_some() {
+ self.add_live_node_for_node(local.hir_id, ExprNode(local.span, local.hir_id));
+ }
+ intravisit::walk_local(self, local);
+ }
+
+ fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
+ self.add_from_pat(&arm.pat);
+ if let Some(hir::Guard::IfLet(ref let_expr)) = arm.guard {
+ self.add_from_pat(let_expr.pat);
+ }
+ intravisit::walk_arm(self, arm);
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ let shorthand_field_ids = self.collect_shorthand_field_ids(param.pat);
+ param.pat.each_binding(|_bm, hir_id, _x, ident| {
+ let var = match param.pat.kind {
+ rustc_hir::PatKind::Struct(..) => Local(LocalInfo {
+ id: hir_id,
+ name: ident.name,
+ is_shorthand: shorthand_field_ids.contains(&hir_id),
+ }),
+ _ => Param(hir_id, ident.name),
+ };
+ self.add_variable(var);
+ });
+ intravisit::walk_param(self, param);
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ match expr.kind {
+ // live nodes required for uses or definitions of variables:
+ hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => {
+ debug!("expr {}: path that leads to {:?}", expr.hir_id, path.res);
+ if let Res::Local(_var_hir_id) = path.res {
+ self.add_live_node_for_node(expr.hir_id, ExprNode(expr.span, expr.hir_id));
+ }
+ intravisit::walk_expr(self, expr);
+ }
+ hir::ExprKind::Closure { .. } => {
+ // Interesting control flow (for loops can contain labeled
+ // breaks or continues)
+ self.add_live_node_for_node(expr.hir_id, ExprNode(expr.span, expr.hir_id));
+
+ // Make a live_node for each mentioned variable, with the span
+ // being the location that the variable is used. This results
+ // in better error messages than just pointing at the closure
+ // construction site.
+ let mut call_caps = Vec::new();
+ let closure_def_id = self.tcx.hir().local_def_id(expr.hir_id);
+ if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) {
+ call_caps.extend(upvars.keys().map(|var_id| {
+ let upvar = upvars[var_id];
+ let upvar_ln = self.add_live_node(UpvarNode(upvar.span));
+ CaptureInfo { ln: upvar_ln, var_hid: *var_id }
+ }));
+ }
+ self.set_captures(expr.hir_id, call_caps);
+ intravisit::walk_expr(self, expr);
+ }
+
+ hir::ExprKind::Let(let_expr) => {
+ self.add_from_pat(let_expr.pat);
+ intravisit::walk_expr(self, expr);
+ }
+
+ // live nodes required for interesting control flow:
+ hir::ExprKind::If(..)
+ | hir::ExprKind::Match(..)
+ | hir::ExprKind::Loop(..)
+ | hir::ExprKind::Yield(..) => {
+ self.add_live_node_for_node(expr.hir_id, ExprNode(expr.span, expr.hir_id));
+ intravisit::walk_expr(self, expr);
+ }
+ hir::ExprKind::Binary(op, ..) if op.node.is_lazy() => {
+ self.add_live_node_for_node(expr.hir_id, ExprNode(expr.span, expr.hir_id));
+ intravisit::walk_expr(self, expr);
+ }
+
+ // otherwise, live nodes are not required:
+ hir::ExprKind::Index(..)
+ | hir::ExprKind::Field(..)
+ | hir::ExprKind::Array(..)
+ | hir::ExprKind::Call(..)
+ | hir::ExprKind::MethodCall(..)
+ | hir::ExprKind::Tup(..)
+ | hir::ExprKind::Binary(..)
+ | hir::ExprKind::AddrOf(..)
+ | hir::ExprKind::Cast(..)
+ | hir::ExprKind::DropTemps(..)
+ | hir::ExprKind::Unary(..)
+ | hir::ExprKind::Break(..)
+ | hir::ExprKind::Continue(_)
+ | hir::ExprKind::Lit(_)
+ | hir::ExprKind::ConstBlock(..)
+ | hir::ExprKind::Ret(..)
+ | hir::ExprKind::Block(..)
+ | hir::ExprKind::Assign(..)
+ | hir::ExprKind::AssignOp(..)
+ | hir::ExprKind::Struct(..)
+ | hir::ExprKind::Repeat(..)
+ | hir::ExprKind::InlineAsm(..)
+ | hir::ExprKind::Box(..)
+ | hir::ExprKind::Type(..)
+ | hir::ExprKind::Err
+ | hir::ExprKind::Path(hir::QPath::TypeRelative(..))
+ | hir::ExprKind::Path(hir::QPath::LangItem(..)) => {
+ intravisit::walk_expr(self, expr);
+ }
+ }
+ }
+}
+
+// ______________________________________________________________________
+// Computing liveness sets
+//
+// Actually we compute just a bit more than just liveness, but we use
+// the same basic propagation framework in all cases.
+
+const ACC_READ: u32 = 1;
+const ACC_WRITE: u32 = 2;
+const ACC_USE: u32 = 4;
+
+struct Liveness<'a, 'tcx> {
+ ir: &'a mut IrMaps<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ closure_min_captures: Option<&'tcx RootVariableMinCaptureList<'tcx>>,
+ successors: IndexVec<LiveNode, Option<LiveNode>>,
+ rwu_table: rwu_table::RWUTable,
+
+ /// A live node representing a point of execution before closure entry &
+ /// after closure exit. Used to calculate liveness of captured variables
+ /// through calls to the same closure. Used for Fn & FnMut closures only.
+ closure_ln: LiveNode,
+ /// A live node representing every 'exit' from the function, whether it be
+ /// by explicit return, panic, or other means.
+ exit_ln: LiveNode,
+
+ // mappings from loop node ID to LiveNode
+ // ("break" label should map to loop node ID,
+ // it probably doesn't now)
+ break_ln: HirIdMap<LiveNode>,
+ cont_ln: HirIdMap<LiveNode>,
+}
+
+impl<'a, 'tcx> Liveness<'a, 'tcx> {
+ fn new(ir: &'a mut IrMaps<'tcx>, body_owner: LocalDefId) -> Liveness<'a, 'tcx> {
+ let typeck_results = ir.tcx.typeck(body_owner);
+ let param_env = ir.tcx.param_env(body_owner);
+ let closure_min_captures = typeck_results.closure_min_captures.get(&body_owner);
+ let closure_ln = ir.add_live_node(ClosureNode);
+ let exit_ln = ir.add_live_node(ExitNode);
+
+ let num_live_nodes = ir.lnks.len();
+ let num_vars = ir.var_kinds.len();
+
+ Liveness {
+ ir,
+ typeck_results,
+ param_env,
+ closure_min_captures,
+ successors: IndexVec::from_elem_n(None, num_live_nodes),
+ rwu_table: rwu_table::RWUTable::new(num_live_nodes, num_vars),
+ closure_ln,
+ exit_ln,
+ break_ln: Default::default(),
+ cont_ln: Default::default(),
+ }
+ }
+
+ fn live_node(&self, hir_id: HirId, span: Span) -> LiveNode {
+ match self.ir.live_node_map.get(&hir_id) {
+ Some(&ln) => ln,
+ None => {
+ // This must be a mismatch between the ir_map construction
+ // above and the propagation code below; the two sets of
+ // code have to agree about which AST nodes are worth
+ // creating liveness nodes for.
+ span_bug!(span, "no live node registered for node {:?}", hir_id);
+ }
+ }
+ }
+
+ fn variable(&self, hir_id: HirId, span: Span) -> Variable {
+ self.ir.variable(hir_id, span)
+ }
+
+ fn define_bindings_in_pat(&mut self, pat: &hir::Pat<'_>, mut succ: LiveNode) -> LiveNode {
+ // In an or-pattern, only consider the first pattern; any later patterns
+ // must have the same bindings, and we also consider the first pattern
+ // to be the "authoritative" set of ids.
+ pat.each_binding_or_first(&mut |_, hir_id, pat_sp, ident| {
+ let ln = self.live_node(hir_id, pat_sp);
+ let var = self.variable(hir_id, ident.span);
+ self.init_from_succ(ln, succ);
+ self.define(ln, var);
+ succ = ln;
+ });
+ succ
+ }
+
+ fn live_on_entry(&self, ln: LiveNode, var: Variable) -> bool {
+ self.rwu_table.get_reader(ln, var)
+ }
+
+ // Is this variable live on entry to any of its successor nodes?
+ fn live_on_exit(&self, ln: LiveNode, var: Variable) -> bool {
+ let successor = self.successors[ln].unwrap();
+ self.live_on_entry(successor, var)
+ }
+
+ fn used_on_entry(&self, ln: LiveNode, var: Variable) -> bool {
+ self.rwu_table.get_used(ln, var)
+ }
+
+ fn assigned_on_entry(&self, ln: LiveNode, var: Variable) -> bool {
+ self.rwu_table.get_writer(ln, var)
+ }
+
+ fn assigned_on_exit(&self, ln: LiveNode, var: Variable) -> bool {
+ let successor = self.successors[ln].unwrap();
+ self.assigned_on_entry(successor, var)
+ }
+
+ fn write_vars<F>(&self, wr: &mut dyn Write, mut test: F) -> io::Result<()>
+ where
+ F: FnMut(Variable) -> bool,
+ {
+ for var_idx in 0..self.ir.var_kinds.len() {
+ let var = Variable::from(var_idx);
+ if test(var) {
+ write!(wr, " {:?}", var)?;
+ }
+ }
+ Ok(())
+ }
+
+ #[allow(unused_must_use)]
+ fn ln_str(&self, ln: LiveNode) -> String {
+ let mut wr = Vec::new();
+ {
+ let wr = &mut wr as &mut dyn Write;
+ write!(wr, "[{:?} of kind {:?} reads", ln, self.ir.lnks[ln]);
+ self.write_vars(wr, |var| self.rwu_table.get_reader(ln, var));
+ write!(wr, " writes");
+ self.write_vars(wr, |var| self.rwu_table.get_writer(ln, var));
+ write!(wr, " uses");
+ self.write_vars(wr, |var| self.rwu_table.get_used(ln, var));
+
+ write!(wr, " precedes {:?}]", self.successors[ln]);
+ }
+ String::from_utf8(wr).unwrap()
+ }
+
+ fn log_liveness(&self, entry_ln: LiveNode, hir_id: hir::HirId) {
+ // hack to skip the loop unless debug! is enabled:
+ debug!(
+ "^^ liveness computation results for body {} (entry={:?})",
+ {
+ for ln_idx in 0..self.ir.lnks.len() {
+ debug!("{:?}", self.ln_str(LiveNode::from(ln_idx)));
+ }
+ hir_id
+ },
+ entry_ln
+ );
+ }
+
+ fn init_empty(&mut self, ln: LiveNode, succ_ln: LiveNode) {
+ self.successors[ln] = Some(succ_ln);
+
+ // It is not necessary to initialize the RWUs here because they are all
+ // empty when created, and the sets only grow during iterations.
+ }
+
+ fn init_from_succ(&mut self, ln: LiveNode, succ_ln: LiveNode) {
+ // more efficient version of init_empty() / merge_from_succ()
+ self.successors[ln] = Some(succ_ln);
+ self.rwu_table.copy(ln, succ_ln);
+ debug!("init_from_succ(ln={}, succ={})", self.ln_str(ln), self.ln_str(succ_ln));
+ }
+
+ fn merge_from_succ(&mut self, ln: LiveNode, succ_ln: LiveNode) -> bool {
+ if ln == succ_ln {
+ return false;
+ }
+
+ let changed = self.rwu_table.union(ln, succ_ln);
+ debug!("merge_from_succ(ln={:?}, succ={}, changed={})", ln, self.ln_str(succ_ln), changed);
+ changed
+ }
+
+ // Indicates that a local variable was *defined*; we know that no
+ // uses of the variable can precede the definition (resolve checks
+ // this) so we just clear out all the data.
+ fn define(&mut self, writer: LiveNode, var: Variable) {
+ let used = self.rwu_table.get_used(writer, var);
+ self.rwu_table.set(writer, var, rwu_table::RWU { reader: false, writer: false, used });
+ debug!("{:?} defines {:?}: {}", writer, var, self.ln_str(writer));
+ }
+
+ // Either read, write, or both depending on the acc bitset
+ fn acc(&mut self, ln: LiveNode, var: Variable, acc: u32) {
+ debug!("{:?} accesses[{:x}] {:?}: {}", ln, acc, var, self.ln_str(ln));
+
+ let mut rwu = self.rwu_table.get(ln, var);
+
+ if (acc & ACC_WRITE) != 0 {
+ rwu.reader = false;
+ rwu.writer = true;
+ }
+
+ // Important: if we both read/write, must do read second
+ // or else the write will override.
+ if (acc & ACC_READ) != 0 {
+ rwu.reader = true;
+ }
+
+ if (acc & ACC_USE) != 0 {
+ rwu.used = true;
+ }
+
+ self.rwu_table.set(ln, var, rwu);
+ }
+
+ fn compute(&mut self, body: &hir::Body<'_>, hir_id: HirId) -> LiveNode {
+ debug!("compute: for body {:?}", body.id().hir_id);
+
+ // # Liveness of captured variables
+ //
+ // When computing the liveness for captured variables we take into
+ // account how variable is captured (ByRef vs ByValue) and what is the
+ // closure kind (Generator / FnOnce vs Fn / FnMut).
+ //
+ // Variables captured by reference are assumed to be used on the exit
+ // from the closure.
+ //
+ // In FnOnce closures, variables captured by value are known to be dead
+ // on exit since it is impossible to call the closure again.
+ //
+ // In Fn / FnMut closures, variables captured by value are live on exit
+ // if they are live on the entry to the closure, since only the closure
+ // itself can access them on subsequent calls.
+
+ if let Some(closure_min_captures) = self.closure_min_captures {
+ // Mark upvars captured by reference as used after closure exits.
+ for (&var_hir_id, min_capture_list) in closure_min_captures {
+ for captured_place in min_capture_list {
+ match captured_place.info.capture_kind {
+ ty::UpvarCapture::ByRef(_) => {
+ let var = self.variable(
+ var_hir_id,
+ captured_place.get_capture_kind_span(self.ir.tcx),
+ );
+ self.acc(self.exit_ln, var, ACC_READ | ACC_USE);
+ }
+ ty::UpvarCapture::ByValue => {}
+ }
+ }
+ }
+ }
+
+ let succ = self.propagate_through_expr(&body.value, self.exit_ln);
+
+ if self.closure_min_captures.is_none() {
+ // Either not a closure, or closure without any captured variables.
+ // No need to determine liveness of captured variables, since there
+ // are none.
+ return succ;
+ }
+
+ let ty = self.typeck_results.node_type(hir_id);
+ match ty.kind() {
+ ty::Closure(_def_id, substs) => match substs.as_closure().kind() {
+ ty::ClosureKind::Fn => {}
+ ty::ClosureKind::FnMut => {}
+ ty::ClosureKind::FnOnce => return succ,
+ },
+ ty::Generator(..) => return succ,
+ _ => {
+ span_bug!(
+ body.value.span,
+ "{} has upvars so it should have a closure type: {:?}",
+ hir_id,
+ ty
+ );
+ }
+ };
+
+ // Propagate through calls to the closure.
+ loop {
+ self.init_from_succ(self.closure_ln, succ);
+ for param in body.params {
+ param.pat.each_binding(|_bm, hir_id, _x, ident| {
+ let var = self.variable(hir_id, ident.span);
+ self.define(self.closure_ln, var);
+ })
+ }
+
+ if !self.merge_from_succ(self.exit_ln, self.closure_ln) {
+ break;
+ }
+ assert_eq!(succ, self.propagate_through_expr(&body.value, self.exit_ln));
+ }
+
+ succ
+ }
+
+ fn propagate_through_block(&mut self, blk: &hir::Block<'_>, succ: LiveNode) -> LiveNode {
+ if blk.targeted_by_break {
+ self.break_ln.insert(blk.hir_id, succ);
+ }
+ let succ = self.propagate_through_opt_expr(blk.expr, succ);
+ blk.stmts.iter().rev().fold(succ, |succ, stmt| self.propagate_through_stmt(stmt, succ))
+ }
+
+ fn propagate_through_stmt(&mut self, stmt: &hir::Stmt<'_>, succ: LiveNode) -> LiveNode {
+ match stmt.kind {
+ hir::StmtKind::Local(ref local) => {
+ // Note: we mark the variable as defined regardless of whether
+ // there is an initializer. Initially I had thought to only mark
+ // the live variable as defined if it was initialized, and then we
+ // could check for uninit variables just by scanning what is live
+ // at the start of the function. But that doesn't work so well for
+ // immutable variables defined in a loop:
+ // loop { let x; x = 5; }
+ // because the "assignment" loops back around and generates an error.
+ //
+ // So now we just check that variables defined w/o an
+ // initializer are not live at the point of their
+ // initialization, which is mildly more complex than checking
+ // once at the func header but otherwise equivalent.
+
+ if let Some(els) = local.els {
+ // Eventually, `let pat: ty = init else { els };` is mostly equivalent to
+ // `let (bindings, ...) = match init { pat => (bindings, ...), _ => els };`
+ // except that extended lifetime applies at the `init` location.
+ //
+ // (e)
+ // |
+ // v
+ // (expr)
+ // / \
+ // | |
+ // v v
+ // bindings els
+ // |
+ // v
+ // ( succ )
+ //
+ if let Some(init) = local.init {
+ let else_ln = self.propagate_through_block(els, succ);
+ let ln = self.live_node(local.hir_id, local.span);
+ self.init_from_succ(ln, succ);
+ self.merge_from_succ(ln, else_ln);
+ let succ = self.propagate_through_expr(init, ln);
+ self.define_bindings_in_pat(&local.pat, succ)
+ } else {
+ span_bug!(
+ stmt.span,
+ "variable is uninitialized but an unexpected else branch is found"
+ )
+ }
+ } else {
+ let succ = self.propagate_through_opt_expr(local.init, succ);
+ self.define_bindings_in_pat(&local.pat, succ)
+ }
+ }
+ hir::StmtKind::Item(..) => succ,
+ hir::StmtKind::Expr(ref expr) | hir::StmtKind::Semi(ref expr) => {
+ self.propagate_through_expr(&expr, succ)
+ }
+ }
+ }
+
+ fn propagate_through_exprs(&mut self, exprs: &[Expr<'_>], succ: LiveNode) -> LiveNode {
+ exprs.iter().rev().fold(succ, |succ, expr| self.propagate_through_expr(&expr, succ))
+ }
+
+ fn propagate_through_opt_expr(
+ &mut self,
+ opt_expr: Option<&Expr<'_>>,
+ succ: LiveNode,
+ ) -> LiveNode {
+ opt_expr.map_or(succ, |expr| self.propagate_through_expr(expr, succ))
+ }
+
+ fn propagate_through_expr(&mut self, expr: &Expr<'_>, succ: LiveNode) -> LiveNode {
+ debug!("propagate_through_expr: {:?}", expr);
+
+ match expr.kind {
+ // Interesting cases with control flow or which gen/kill
+ hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => {
+ self.access_path(expr.hir_id, path, succ, ACC_READ | ACC_USE)
+ }
+
+ hir::ExprKind::Field(ref e, _) => self.propagate_through_expr(&e, succ),
+
+ hir::ExprKind::Closure { .. } => {
+ debug!("{:?} is an ExprKind::Closure", expr);
+
+ // the construction of a closure itself is not important,
+ // but we have to consider the closed over variables.
+ let caps = self
+ .ir
+ .capture_info_map
+ .get(&expr.hir_id)
+ .cloned()
+ .unwrap_or_else(|| span_bug!(expr.span, "no registered caps"));
+
+ caps.iter().rev().fold(succ, |succ, cap| {
+ self.init_from_succ(cap.ln, succ);
+ let var = self.variable(cap.var_hid, expr.span);
+ self.acc(cap.ln, var, ACC_READ | ACC_USE);
+ cap.ln
+ })
+ }
+
+ hir::ExprKind::Let(let_expr) => {
+ let succ = self.propagate_through_expr(let_expr.init, succ);
+ self.define_bindings_in_pat(let_expr.pat, succ)
+ }
+
+ // Note that labels have been resolved, so we don't need to look
+ // at the label ident
+ hir::ExprKind::Loop(ref blk, ..) => self.propagate_through_loop(expr, &blk, succ),
+
+ hir::ExprKind::Yield(ref e, ..) => {
+ let yield_ln = self.live_node(expr.hir_id, expr.span);
+ self.init_from_succ(yield_ln, succ);
+ self.merge_from_succ(yield_ln, self.exit_ln);
+ self.propagate_through_expr(e, yield_ln)
+ }
+
+ hir::ExprKind::If(ref cond, ref then, ref else_opt) => {
+ //
+ // (cond)
+ // |
+ // v
+ // (expr)
+ // / \
+ // | |
+ // v v
+ // (then)(els)
+ // | |
+ // v v
+ // ( succ )
+ //
+ let else_ln =
+ self.propagate_through_opt_expr(else_opt.as_ref().map(|e| &**e), succ);
+ let then_ln = self.propagate_through_expr(&then, succ);
+ let ln = self.live_node(expr.hir_id, expr.span);
+ self.init_from_succ(ln, else_ln);
+ self.merge_from_succ(ln, then_ln);
+ self.propagate_through_expr(&cond, ln)
+ }
+
+ hir::ExprKind::Match(ref e, arms, _) => {
+ //
+ // (e)
+ // |
+ // v
+ // (expr)
+ // / | \
+ // | | |
+ // v v v
+ // (..arms..)
+ // | | |
+ // v v v
+ // ( succ )
+ //
+ //
+ let ln = self.live_node(expr.hir_id, expr.span);
+ self.init_empty(ln, succ);
+ for arm in arms {
+ let body_succ = self.propagate_through_expr(&arm.body, succ);
+
+ let guard_succ = arm.guard.as_ref().map_or(body_succ, |g| match g {
+ hir::Guard::If(e) => self.propagate_through_expr(e, body_succ),
+ hir::Guard::IfLet(let_expr) => {
+ let let_bind = self.define_bindings_in_pat(let_expr.pat, body_succ);
+ self.propagate_through_expr(let_expr.init, let_bind)
+ }
+ });
+ let arm_succ = self.define_bindings_in_pat(&arm.pat, guard_succ);
+ self.merge_from_succ(ln, arm_succ);
+ }
+ self.propagate_through_expr(&e, ln)
+ }
+
+ hir::ExprKind::Ret(ref o_e) => {
+ // Ignore succ and subst exit_ln.
+ self.propagate_through_opt_expr(o_e.as_ref().map(|e| &**e), self.exit_ln)
+ }
+
+ hir::ExprKind::Break(label, ref opt_expr) => {
+ // Find which label this break jumps to
+ let target = match label.target_id {
+ Ok(hir_id) => self.break_ln.get(&hir_id),
+ Err(err) => span_bug!(expr.span, "loop scope error: {}", err),
+ }
+ .cloned();
+
+ // Now that we know the label we're going to,
+ // look it up in the break loop nodes table
+
+ match target {
+ Some(b) => self.propagate_through_opt_expr(opt_expr.as_ref().map(|e| &**e), b),
+ None => span_bug!(expr.span, "`break` to unknown label"),
+ }
+ }
+
+ hir::ExprKind::Continue(label) => {
+ // Find which label this expr continues to
+ let sc = label
+ .target_id
+ .unwrap_or_else(|err| span_bug!(expr.span, "loop scope error: {}", err));
+
+ // Now that we know the label we're going to,
+ // look it up in the continue loop nodes table
+ self.cont_ln
+ .get(&sc)
+ .cloned()
+ .unwrap_or_else(|| span_bug!(expr.span, "continue to unknown label"))
+ }
+
+ hir::ExprKind::Assign(ref l, ref r, _) => {
+ // see comment on places in
+ // propagate_through_place_components()
+ let succ = self.write_place(&l, succ, ACC_WRITE);
+ let succ = self.propagate_through_place_components(&l, succ);
+ self.propagate_through_expr(&r, succ)
+ }
+
+ hir::ExprKind::AssignOp(_, ref l, ref r) => {
+ // an overloaded assign op is like a method call
+ if self.typeck_results.is_method_call(expr) {
+ let succ = self.propagate_through_expr(&l, succ);
+ self.propagate_through_expr(&r, succ)
+ } else {
+ // see comment on places in
+ // propagate_through_place_components()
+ let succ = self.write_place(&l, succ, ACC_WRITE | ACC_READ);
+ let succ = self.propagate_through_expr(&r, succ);
+ self.propagate_through_place_components(&l, succ)
+ }
+ }
+
+ // Uninteresting cases: just propagate in rev exec order
+ hir::ExprKind::Array(ref exprs) => self.propagate_through_exprs(exprs, succ),
+
+ hir::ExprKind::Struct(_, ref fields, ref with_expr) => {
+ let succ = self.propagate_through_opt_expr(with_expr.as_ref().map(|e| &**e), succ);
+ fields
+ .iter()
+ .rev()
+ .fold(succ, |succ, field| self.propagate_through_expr(&field.expr, succ))
+ }
+
+ hir::ExprKind::Call(ref f, ref args) => {
+ let succ = self.check_is_ty_uninhabited(expr, succ);
+ let succ = self.propagate_through_exprs(args, succ);
+ self.propagate_through_expr(&f, succ)
+ }
+
+ hir::ExprKind::MethodCall(.., ref args, _) => {
+ let succ = self.check_is_ty_uninhabited(expr, succ);
+ self.propagate_through_exprs(args, succ)
+ }
+
+ hir::ExprKind::Tup(ref exprs) => self.propagate_through_exprs(exprs, succ),
+
+ hir::ExprKind::Binary(op, ref l, ref r) if op.node.is_lazy() => {
+ let r_succ = self.propagate_through_expr(&r, succ);
+
+ let ln = self.live_node(expr.hir_id, expr.span);
+ self.init_from_succ(ln, succ);
+ self.merge_from_succ(ln, r_succ);
+
+ self.propagate_through_expr(&l, ln)
+ }
+
+ hir::ExprKind::Index(ref l, ref r) | hir::ExprKind::Binary(_, ref l, ref r) => {
+ let r_succ = self.propagate_through_expr(&r, succ);
+ self.propagate_through_expr(&l, r_succ)
+ }
+
+ hir::ExprKind::Box(ref e)
+ | hir::ExprKind::AddrOf(_, _, ref e)
+ | hir::ExprKind::Cast(ref e, _)
+ | hir::ExprKind::Type(ref e, _)
+ | hir::ExprKind::DropTemps(ref e)
+ | hir::ExprKind::Unary(_, ref e)
+ | hir::ExprKind::Repeat(ref e, _) => self.propagate_through_expr(&e, succ),
+
+ hir::ExprKind::InlineAsm(ref asm) => {
+ // Handle non-returning asm
+ let mut succ = if asm.options.contains(InlineAsmOptions::NORETURN) {
+ self.exit_ln
+ } else {
+ succ
+ };
+
+ // Do a first pass for writing outputs only
+ for (op, _op_sp) in asm.operands.iter().rev() {
+ match op {
+ hir::InlineAsmOperand::In { .. }
+ | hir::InlineAsmOperand::Const { .. }
+ | hir::InlineAsmOperand::SymFn { .. }
+ | hir::InlineAsmOperand::SymStatic { .. } => {}
+ hir::InlineAsmOperand::Out { expr, .. } => {
+ if let Some(expr) = expr {
+ succ = self.write_place(expr, succ, ACC_WRITE);
+ }
+ }
+ hir::InlineAsmOperand::InOut { expr, .. } => {
+ succ = self.write_place(expr, succ, ACC_READ | ACC_WRITE | ACC_USE);
+ }
+ hir::InlineAsmOperand::SplitInOut { out_expr, .. } => {
+ if let Some(expr) = out_expr {
+ succ = self.write_place(expr, succ, ACC_WRITE);
+ }
+ }
+ }
+ }
+
+ // Then do a second pass for inputs
+ let mut succ = succ;
+ for (op, _op_sp) in asm.operands.iter().rev() {
+ match op {
+ hir::InlineAsmOperand::In { expr, .. } => {
+ succ = self.propagate_through_expr(expr, succ)
+ }
+ hir::InlineAsmOperand::Out { expr, .. } => {
+ if let Some(expr) = expr {
+ succ = self.propagate_through_place_components(expr, succ);
+ }
+ }
+ hir::InlineAsmOperand::InOut { expr, .. } => {
+ succ = self.propagate_through_place_components(expr, succ);
+ }
+ hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ if let Some(expr) = out_expr {
+ succ = self.propagate_through_place_components(expr, succ);
+ }
+ succ = self.propagate_through_expr(in_expr, succ);
+ }
+ hir::InlineAsmOperand::Const { .. }
+ | hir::InlineAsmOperand::SymFn { .. }
+ | hir::InlineAsmOperand::SymStatic { .. } => {}
+ }
+ }
+ succ
+ }
+
+ hir::ExprKind::Lit(..)
+ | hir::ExprKind::ConstBlock(..)
+ | hir::ExprKind::Err
+ | hir::ExprKind::Path(hir::QPath::TypeRelative(..))
+ | hir::ExprKind::Path(hir::QPath::LangItem(..)) => succ,
+
+ // Note that labels have been resolved, so we don't need to look
+ // at the label ident
+ hir::ExprKind::Block(ref blk, _) => self.propagate_through_block(&blk, succ),
+ }
+ }
+
+ fn propagate_through_place_components(&mut self, expr: &Expr<'_>, succ: LiveNode) -> LiveNode {
+ // # Places
+ //
+ // In general, the full flow graph structure for an
+ // assignment/move/etc can be handled in one of two ways,
+ // depending on whether what is being assigned is a "tracked
+ // value" or not. A tracked value is basically a local
+ // variable or argument.
+ //
+ // The two kinds of graphs are:
+ //
+ // Tracked place Untracked place
+ // ----------------------++-----------------------
+ // ||
+ // | || |
+ // v || v
+ // (rvalue) || (rvalue)
+ // | || |
+ // v || v
+ // (write of place) || (place components)
+ // | || |
+ // v || v
+ // (succ) || (succ)
+ // ||
+ // ----------------------++-----------------------
+ //
+ // I will cover the two cases in turn:
+ //
+ // # Tracked places
+ //
+ // A tracked place is a local variable/argument `x`. In
+ // these cases, the link_node where the write occurs is linked
+ // to node id of `x`. The `write_place()` routine generates
+ // the contents of this node. There are no subcomponents to
+ // consider.
+ //
+ // # Non-tracked places
+ //
+ // These are places like `x[5]` or `x.f`. In that case, we
+ // basically ignore the value which is written to but generate
+ // reads for the components---`x` in these two examples. The
+ // components reads are generated by
+ // `propagate_through_place_components()` (this fn).
+ //
+ // # Illegal places
+ //
+ // It is still possible to observe assignments to non-places;
+ // these errors are detected in the later pass borrowck. We
+ // just ignore such cases and treat them as reads.
+
+ match expr.kind {
+ hir::ExprKind::Path(_) => succ,
+ hir::ExprKind::Field(ref e, _) => self.propagate_through_expr(&e, succ),
+ _ => self.propagate_through_expr(expr, succ),
+ }
+ }
+
+ // see comment on propagate_through_place()
+ fn write_place(&mut self, expr: &Expr<'_>, succ: LiveNode, acc: u32) -> LiveNode {
+ match expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => {
+ self.access_path(expr.hir_id, path, succ, acc)
+ }
+
+ // We do not track other places, so just propagate through
+ // to their subcomponents. Also, it may happen that
+ // non-places occur here, because those are detected in the
+ // later pass borrowck.
+ _ => succ,
+ }
+ }
+
+ fn access_var(
+ &mut self,
+ hir_id: HirId,
+ var_hid: HirId,
+ succ: LiveNode,
+ acc: u32,
+ span: Span,
+ ) -> LiveNode {
+ let ln = self.live_node(hir_id, span);
+ if acc != 0 {
+ self.init_from_succ(ln, succ);
+ let var = self.variable(var_hid, span);
+ self.acc(ln, var, acc);
+ }
+ ln
+ }
+
+ fn access_path(
+ &mut self,
+ hir_id: HirId,
+ path: &hir::Path<'_>,
+ succ: LiveNode,
+ acc: u32,
+ ) -> LiveNode {
+ match path.res {
+ Res::Local(hid) => self.access_var(hir_id, hid, succ, acc, path.span),
+ _ => succ,
+ }
+ }
+
+ fn propagate_through_loop(
+ &mut self,
+ expr: &Expr<'_>,
+ body: &hir::Block<'_>,
+ succ: LiveNode,
+ ) -> LiveNode {
+ /*
+ We model control flow like this:
+
+ (expr) <-+
+ | |
+ v |
+ (body) --+
+
+ Note that a `continue` expression targeting the `loop` will have a successor of `expr`.
+ Meanwhile, a `break` expression will have a successor of `succ`.
+ */
+
+ // first iteration:
+ let ln = self.live_node(expr.hir_id, expr.span);
+ self.init_empty(ln, succ);
+ debug!("propagate_through_loop: using id for loop body {} {:?}", expr.hir_id, body);
+
+ self.break_ln.insert(expr.hir_id, succ);
+
+ self.cont_ln.insert(expr.hir_id, ln);
+
+ let body_ln = self.propagate_through_block(body, ln);
+
+ // repeat until fixed point is reached:
+ while self.merge_from_succ(ln, body_ln) {
+ assert_eq!(body_ln, self.propagate_through_block(body, ln));
+ }
+
+ ln
+ }
+
+ fn check_is_ty_uninhabited(&mut self, expr: &Expr<'_>, succ: LiveNode) -> LiveNode {
+ let ty = self.typeck_results.expr_ty(expr);
+ let m = self.ir.tcx.parent_module(expr.hir_id).to_def_id();
+ if self.ir.tcx.is_ty_uninhabited_from(m, ty, self.param_env) {
+ match self.ir.lnks[succ] {
+ LiveNodeKind::ExprNode(succ_span, succ_id) => {
+ self.warn_about_unreachable(expr.span, ty, succ_span, succ_id, "expression");
+ }
+ LiveNodeKind::VarDefNode(succ_span, succ_id) => {
+ self.warn_about_unreachable(expr.span, ty, succ_span, succ_id, "definition");
+ }
+ _ => {}
+ };
+ self.exit_ln
+ } else {
+ succ
+ }
+ }
+
+ fn warn_about_unreachable(
+ &mut self,
+ orig_span: Span,
+ orig_ty: Ty<'tcx>,
+ expr_span: Span,
+ expr_id: HirId,
+ descr: &str,
+ ) {
+ if !orig_ty.is_never() {
+ // Unreachable code warnings are already emitted during type checking.
+ // However, during type checking, full type information is being
+ // calculated but not yet available, so the check for diverging
+ // expressions due to uninhabited result types is pretty crude and
+ // only checks whether ty.is_never(). Here, we have full type
+ // information available and can issue warnings for less obviously
+ // uninhabited types (e.g. empty enums). The check above is used so
+ // that we do not emit the same warning twice if the uninhabited type
+ // is indeed `!`.
+
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNREACHABLE_CODE,
+ expr_id,
+ expr_span,
+ |lint| {
+ let msg = format!("unreachable {}", descr);
+ lint.build(&msg)
+ .span_label(expr_span, &msg)
+ .span_label(orig_span, "any code following this expression is unreachable")
+ .span_note(
+ orig_span,
+ &format!(
+ "this expression has type `{}`, which is uninhabited",
+ orig_ty
+ ),
+ )
+ .emit();
+ },
+ );
+ }
+ }
+}
+
+// _______________________________________________________________________
+// Checking for error conditions
+
+impl<'a, 'tcx> Visitor<'tcx> for Liveness<'a, 'tcx> {
+ fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) {
+ self.check_unused_vars_in_pat(&local.pat, None, |spans, hir_id, ln, var| {
+ if local.init.is_some() {
+ self.warn_about_dead_assign(spans, hir_id, ln, var);
+ }
+ });
+
+ intravisit::walk_local(self, local);
+ }
+
+ fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
+ check_expr(self, ex);
+ intravisit::walk_expr(self, ex);
+ }
+
+ fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
+ self.check_unused_vars_in_pat(&arm.pat, None, |_, _, _, _| {});
+ intravisit::walk_arm(self, arm);
+ }
+}
+
+fn check_expr<'tcx>(this: &mut Liveness<'_, 'tcx>, expr: &'tcx Expr<'tcx>) {
+ match expr.kind {
+ hir::ExprKind::Assign(ref l, ..) => {
+ this.check_place(&l);
+ }
+
+ hir::ExprKind::AssignOp(_, ref l, _) => {
+ if !this.typeck_results.is_method_call(expr) {
+ this.check_place(&l);
+ }
+ }
+
+ hir::ExprKind::InlineAsm(ref asm) => {
+ for (op, _op_sp) in asm.operands {
+ match op {
+ hir::InlineAsmOperand::Out { expr, .. } => {
+ if let Some(expr) = expr {
+ this.check_place(expr);
+ }
+ }
+ hir::InlineAsmOperand::InOut { expr, .. } => {
+ this.check_place(expr);
+ }
+ hir::InlineAsmOperand::SplitInOut { out_expr, .. } => {
+ if let Some(out_expr) = out_expr {
+ this.check_place(out_expr);
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+
+ hir::ExprKind::Let(let_expr) => {
+ this.check_unused_vars_in_pat(let_expr.pat, None, |_, _, _, _| {});
+ }
+
+ // no correctness conditions related to liveness
+ hir::ExprKind::Call(..)
+ | hir::ExprKind::MethodCall(..)
+ | hir::ExprKind::Match(..)
+ | hir::ExprKind::Loop(..)
+ | hir::ExprKind::Index(..)
+ | hir::ExprKind::Field(..)
+ | hir::ExprKind::Array(..)
+ | hir::ExprKind::Tup(..)
+ | hir::ExprKind::Binary(..)
+ | hir::ExprKind::Cast(..)
+ | hir::ExprKind::If(..)
+ | hir::ExprKind::DropTemps(..)
+ | hir::ExprKind::Unary(..)
+ | hir::ExprKind::Ret(..)
+ | hir::ExprKind::Break(..)
+ | hir::ExprKind::Continue(..)
+ | hir::ExprKind::Lit(_)
+ | hir::ExprKind::ConstBlock(..)
+ | hir::ExprKind::Block(..)
+ | hir::ExprKind::AddrOf(..)
+ | hir::ExprKind::Struct(..)
+ | hir::ExprKind::Repeat(..)
+ | hir::ExprKind::Closure { .. }
+ | hir::ExprKind::Path(_)
+ | hir::ExprKind::Yield(..)
+ | hir::ExprKind::Box(..)
+ | hir::ExprKind::Type(..)
+ | hir::ExprKind::Err => {}
+ }
+}
+
+impl<'tcx> Liveness<'_, 'tcx> {
+ fn check_place(&mut self, expr: &'tcx Expr<'tcx>) {
+ match expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) => {
+ if let Res::Local(var_hid) = path.res {
+ // Assignment to an immutable variable or argument: only legal
+ // if there is no later assignment. If this local is actually
+ // mutable, then check for a reassignment to flag the mutability
+ // as being used.
+ let ln = self.live_node(expr.hir_id, expr.span);
+ let var = self.variable(var_hid, expr.span);
+ self.warn_about_dead_assign(vec![expr.span], expr.hir_id, ln, var);
+ }
+ }
+ _ => {
+ // For other kinds of places, no checks are required,
+ // and any embedded expressions are actually rvalues
+ intravisit::walk_expr(self, expr);
+ }
+ }
+ }
+
+ fn should_warn(&self, var: Variable) -> Option<String> {
+ let name = self.ir.variable_name(var);
+ if name == kw::Empty {
+ return None;
+ }
+ let name = name.as_str();
+ if name.as_bytes()[0] == b'_' {
+ return None;
+ }
+ Some(name.to_owned())
+ }
+
+ fn warn_about_unused_upvars(&self, entry_ln: LiveNode) {
+ let Some(closure_min_captures) = self.closure_min_captures else {
+ return;
+ };
+
+ // If closure_min_captures is Some(), upvars must be Some() too.
+ for (&var_hir_id, min_capture_list) in closure_min_captures {
+ for captured_place in min_capture_list {
+ match captured_place.info.capture_kind {
+ ty::UpvarCapture::ByValue => {}
+ ty::UpvarCapture::ByRef(..) => continue,
+ };
+ let span = captured_place.get_capture_kind_span(self.ir.tcx);
+ let var = self.variable(var_hir_id, span);
+ if self.used_on_entry(entry_ln, var) {
+ if !self.live_on_entry(entry_ln, var) {
+ if let Some(name) = self.should_warn(var) {
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_ASSIGNMENTS,
+ var_hir_id,
+ vec![span],
+ |lint| {
+ lint.build(&format!(
+ "value captured by `{}` is never read",
+ name
+ ))
+ .help("did you mean to capture by reference instead?")
+ .emit();
+ },
+ );
+ }
+ }
+ } else {
+ if let Some(name) = self.should_warn(var) {
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_VARIABLES,
+ var_hir_id,
+ vec![span],
+ |lint| {
+ lint.build(&format!("unused variable: `{}`", name))
+ .help("did you mean to capture by reference instead?")
+ .emit();
+ },
+ );
+ }
+ }
+ }
+ }
+ }
+
+ fn warn_about_unused_args(&self, body: &hir::Body<'_>, entry_ln: LiveNode) {
+ for p in body.params {
+ self.check_unused_vars_in_pat(&p.pat, Some(entry_ln), |spans, hir_id, ln, var| {
+ if !self.live_on_entry(ln, var) {
+ self.report_unused_assign(hir_id, spans, var, |name| {
+ format!("value passed to `{}` is never read", name)
+ });
+ }
+ });
+ }
+ }
+
+ fn check_unused_vars_in_pat(
+ &self,
+ pat: &hir::Pat<'_>,
+ entry_ln: Option<LiveNode>,
+ on_used_on_entry: impl Fn(Vec<Span>, HirId, LiveNode, Variable),
+ ) {
+ // In an or-pattern, only consider the variable; any later patterns must have the same
+ // bindings, and we also consider the first pattern to be the "authoritative" set of ids.
+ // However, we should take the ids and spans of variables with the same name from the later
+ // patterns so the suggestions to prefix with underscores will apply to those too.
+ let mut vars: FxIndexMap<Symbol, (LiveNode, Variable, Vec<(HirId, Span, Span)>)> =
+ <_>::default();
+
+ pat.each_binding(|_, hir_id, pat_sp, ident| {
+ let ln = entry_ln.unwrap_or_else(|| self.live_node(hir_id, pat_sp));
+ let var = self.variable(hir_id, ident.span);
+ let id_and_sp = (hir_id, pat_sp, ident.span);
+ vars.entry(self.ir.variable_name(var))
+ .and_modify(|(.., hir_ids_and_spans)| hir_ids_and_spans.push(id_and_sp))
+ .or_insert_with(|| (ln, var, vec![id_and_sp]));
+ });
+
+ for (_, (ln, var, hir_ids_and_spans)) in vars {
+ if self.used_on_entry(ln, var) {
+ let id = hir_ids_and_spans[0].0;
+ let spans =
+ hir_ids_and_spans.into_iter().map(|(_, _, ident_span)| ident_span).collect();
+ on_used_on_entry(spans, id, ln, var);
+ } else {
+ self.report_unused(hir_ids_and_spans, ln, var);
+ }
+ }
+ }
+
+ fn report_unused(
+ &self,
+ hir_ids_and_spans: Vec<(HirId, Span, Span)>,
+ ln: LiveNode,
+ var: Variable,
+ ) {
+ let first_hir_id = hir_ids_and_spans[0].0;
+
+ if let Some(name) = self.should_warn(var).filter(|name| name != "self") {
+ // annoying: for parameters in funcs like `fn(x: i32)
+ // {ret}`, there is only one node, so asking about
+ // assigned_on_exit() is not meaningful.
+ let is_assigned =
+ if ln == self.exit_ln { false } else { self.assigned_on_exit(ln, var) };
+
+ if is_assigned {
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_VARIABLES,
+ first_hir_id,
+ hir_ids_and_spans
+ .into_iter()
+ .map(|(_, _, ident_span)| ident_span)
+ .collect::<Vec<_>>(),
+ |lint| {
+ lint.build(&format!("variable `{}` is assigned to, but never used", name))
+ .note(&format!("consider using `_{}` instead", name))
+ .emit();
+ },
+ )
+ } else {
+ let (shorthands, non_shorthands): (Vec<_>, Vec<_>) =
+ hir_ids_and_spans.iter().copied().partition(|(hir_id, _, ident_span)| {
+ let var = self.variable(*hir_id, *ident_span);
+ self.ir.variable_is_shorthand(var)
+ });
+
+ // If we have both shorthand and non-shorthand, prefer the "try ignoring
+ // the field" message, and suggest `_` for the non-shorthands. If we only
+ // have non-shorthand, then prefix with an underscore instead.
+ if !shorthands.is_empty() {
+ let shorthands = shorthands
+ .into_iter()
+ .map(|(_, pat_span, _)| (pat_span, format!("{}: _", name)))
+ .chain(
+ non_shorthands
+ .into_iter()
+ .map(|(_, pat_span, _)| (pat_span, "_".to_string())),
+ )
+ .collect::<Vec<_>>();
+
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_VARIABLES,
+ first_hir_id,
+ hir_ids_and_spans
+ .iter()
+ .map(|(_, pat_span, _)| *pat_span)
+ .collect::<Vec<_>>(),
+ |lint| {
+ let mut err = lint.build(&format!("unused variable: `{}`", name));
+ err.multipart_suggestion(
+ "try ignoring the field",
+ shorthands,
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ },
+ );
+ } else {
+ let non_shorthands = non_shorthands
+ .into_iter()
+ .map(|(_, _, ident_span)| (ident_span, format!("_{}", name)))
+ .collect::<Vec<_>>();
+
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_VARIABLES,
+ first_hir_id,
+ hir_ids_and_spans
+ .iter()
+ .map(|(_, _, ident_span)| *ident_span)
+ .collect::<Vec<_>>(),
+ |lint| {
+ let mut err = lint.build(&format!("unused variable: `{}`", name));
+ err.multipart_suggestion(
+ "if this is intentional, prefix it with an underscore",
+ non_shorthands,
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ },
+ );
+ }
+ }
+ }
+ }
+
+ fn warn_about_dead_assign(&self, spans: Vec<Span>, hir_id: HirId, ln: LiveNode, var: Variable) {
+ if !self.live_on_exit(ln, var) {
+ self.report_unused_assign(hir_id, spans, var, |name| {
+ format!("value assigned to `{}` is never read", name)
+ });
+ }
+ }
+
+ fn report_unused_assign(
+ &self,
+ hir_id: HirId,
+ spans: Vec<Span>,
+ var: Variable,
+ message: impl Fn(&str) -> String,
+ ) {
+ if let Some(name) = self.should_warn(var) {
+ self.ir.tcx.struct_span_lint_hir(
+ lint::builtin::UNUSED_ASSIGNMENTS,
+ hir_id,
+ spans,
+ |lint| {
+ lint.build(&message(&name))
+ .help("maybe it is overwritten before being read?")
+ .emit();
+ },
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_passes/src/liveness/rwu_table.rs b/compiler/rustc_passes/src/liveness/rwu_table.rs
new file mode 100644
index 000000000..6d5983f53
--- /dev/null
+++ b/compiler/rustc_passes/src/liveness/rwu_table.rs
@@ -0,0 +1,145 @@
+use crate::liveness::{LiveNode, Variable};
+use std::iter;
+
+#[derive(Clone, Copy)]
+pub(super) struct RWU {
+ pub(super) reader: bool,
+ pub(super) writer: bool,
+ pub(super) used: bool,
+}
+
+/// Conceptually, this is like a `Vec<Vec<RWU>>`. But the number of
+/// RWU`s can get very large, so it uses a more compact representation.
+pub(super) struct RWUTable {
+ /// Total number of live nodes.
+ live_nodes: usize,
+ /// Total number of variables.
+ vars: usize,
+
+ /// A compressed representation of `RWU`s.
+ ///
+ /// Each word represents 2 different `RWU`s packed together. Each packed RWU
+ /// is stored in 4 bits: a reader bit, a writer bit, a used bit and a
+ /// padding bit.
+ ///
+ /// The data for each live node is contiguous and starts at a word boundary,
+ /// so there might be an unused space left.
+ words: Vec<u8>,
+ /// Number of words per each live node.
+ live_node_words: usize,
+}
+
+impl RWUTable {
+ const RWU_READER: u8 = 0b0001;
+ const RWU_WRITER: u8 = 0b0010;
+ const RWU_USED: u8 = 0b0100;
+ const RWU_MASK: u8 = 0b1111;
+
+ /// Size of packed RWU in bits.
+ const RWU_BITS: usize = 4;
+ /// Size of a word in bits.
+ const WORD_BITS: usize = std::mem::size_of::<u8>() * 8;
+ /// Number of packed RWUs that fit into a single word.
+ const WORD_RWU_COUNT: usize = Self::WORD_BITS / Self::RWU_BITS;
+
+ pub(super) fn new(live_nodes: usize, vars: usize) -> RWUTable {
+ let live_node_words = (vars + Self::WORD_RWU_COUNT - 1) / Self::WORD_RWU_COUNT;
+ Self { live_nodes, vars, live_node_words, words: vec![0u8; live_node_words * live_nodes] }
+ }
+
+ fn word_and_shift(&self, ln: LiveNode, var: Variable) -> (usize, u32) {
+ assert!(ln.index() < self.live_nodes);
+ assert!(var.index() < self.vars);
+
+ let var = var.index();
+ let word = var / Self::WORD_RWU_COUNT;
+ let shift = Self::RWU_BITS * (var % Self::WORD_RWU_COUNT);
+ (ln.index() * self.live_node_words + word, shift as u32)
+ }
+
+ fn pick2_rows_mut(&mut self, a: LiveNode, b: LiveNode) -> (&mut [u8], &mut [u8]) {
+ assert!(a.index() < self.live_nodes);
+ assert!(b.index() < self.live_nodes);
+ assert!(a != b);
+
+ let a_start = a.index() * self.live_node_words;
+ let b_start = b.index() * self.live_node_words;
+
+ unsafe {
+ let ptr = self.words.as_mut_ptr();
+ (
+ std::slice::from_raw_parts_mut(ptr.add(a_start), self.live_node_words),
+ std::slice::from_raw_parts_mut(ptr.add(b_start), self.live_node_words),
+ )
+ }
+ }
+
+ pub(super) fn copy(&mut self, dst: LiveNode, src: LiveNode) {
+ if dst == src {
+ return;
+ }
+
+ let (dst_row, src_row) = self.pick2_rows_mut(dst, src);
+ dst_row.copy_from_slice(src_row);
+ }
+
+ /// Sets `dst` to the union of `dst` and `src`, returns true if `dst` was
+ /// changed.
+ pub(super) fn union(&mut self, dst: LiveNode, src: LiveNode) -> bool {
+ if dst == src {
+ return false;
+ }
+
+ let mut changed = false;
+ let (dst_row, src_row) = self.pick2_rows_mut(dst, src);
+ for (dst_word, src_word) in iter::zip(dst_row, &*src_row) {
+ let old = *dst_word;
+ let new = *dst_word | src_word;
+ *dst_word = new;
+ changed |= old != new;
+ }
+ changed
+ }
+
+ pub(super) fn get_reader(&self, ln: LiveNode, var: Variable) -> bool {
+ let (word, shift) = self.word_and_shift(ln, var);
+ (self.words[word] >> shift) & Self::RWU_READER != 0
+ }
+
+ pub(super) fn get_writer(&self, ln: LiveNode, var: Variable) -> bool {
+ let (word, shift) = self.word_and_shift(ln, var);
+ (self.words[word] >> shift) & Self::RWU_WRITER != 0
+ }
+
+ pub(super) fn get_used(&self, ln: LiveNode, var: Variable) -> bool {
+ let (word, shift) = self.word_and_shift(ln, var);
+ (self.words[word] >> shift) & Self::RWU_USED != 0
+ }
+
+ pub(super) fn get(&self, ln: LiveNode, var: Variable) -> RWU {
+ let (word, shift) = self.word_and_shift(ln, var);
+ let rwu_packed = self.words[word] >> shift;
+ RWU {
+ reader: rwu_packed & Self::RWU_READER != 0,
+ writer: rwu_packed & Self::RWU_WRITER != 0,
+ used: rwu_packed & Self::RWU_USED != 0,
+ }
+ }
+
+ pub(super) fn set(&mut self, ln: LiveNode, var: Variable, rwu: RWU) {
+ let mut packed = 0;
+ if rwu.reader {
+ packed |= Self::RWU_READER;
+ }
+ if rwu.writer {
+ packed |= Self::RWU_WRITER;
+ }
+ if rwu.used {
+ packed |= Self::RWU_USED;
+ }
+
+ let (word, shift) = self.word_and_shift(ln, var);
+ let word = &mut self.words[word];
+ *word = (*word & !(Self::RWU_MASK << shift)) | (packed << shift)
+ }
+}
diff --git a/compiler/rustc_passes/src/loops.rs b/compiler/rustc_passes/src/loops.rs
new file mode 100644
index 000000000..cdda0e388
--- /dev/null
+++ b/compiler/rustc_passes/src/loops.rs
@@ -0,0 +1,287 @@
+use Context::*;
+
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Destination, Movability, Node};
+use rustc_middle::hir::map::Map;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::Session;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::Span;
+
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum Context {
+ Normal,
+ Loop(hir::LoopSource),
+ Closure(Span),
+ AsyncClosure(Span),
+ LabeledBlock,
+ AnonConst,
+}
+
+#[derive(Copy, Clone)]
+struct CheckLoopVisitor<'a, 'hir> {
+ sess: &'a Session,
+ hir_map: Map<'hir>,
+ cx: Context,
+}
+
+fn check_mod_loops(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ tcx.hir().visit_item_likes_in_module(
+ module_def_id,
+ &mut CheckLoopVisitor { sess: &tcx.sess, hir_map: tcx.hir(), cx: Normal },
+ );
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { check_mod_loops, ..*providers };
+}
+
+impl<'a, 'hir> Visitor<'hir> for CheckLoopVisitor<'a, 'hir> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.hir_map
+ }
+
+ fn visit_anon_const(&mut self, c: &'hir hir::AnonConst) {
+ self.with_context(AnonConst, |v| intravisit::walk_anon_const(v, c));
+ }
+
+ fn visit_expr(&mut self, e: &'hir hir::Expr<'hir>) {
+ match e.kind {
+ hir::ExprKind::Loop(ref b, _, source, _) => {
+ self.with_context(Loop(source), |v| v.visit_block(&b));
+ }
+ hir::ExprKind::Closure(&hir::Closure {
+ ref fn_decl,
+ body,
+ fn_decl_span,
+ movability,
+ ..
+ }) => {
+ let cx = if let Some(Movability::Static) = movability {
+ AsyncClosure(fn_decl_span)
+ } else {
+ Closure(fn_decl_span)
+ };
+ self.visit_fn_decl(&fn_decl);
+ self.with_context(cx, |v| v.visit_nested_body(body));
+ }
+ hir::ExprKind::Block(ref b, Some(_label)) => {
+ self.with_context(LabeledBlock, |v| v.visit_block(&b));
+ }
+ hir::ExprKind::Break(break_label, ref opt_expr) => {
+ if let Some(e) = opt_expr {
+ self.visit_expr(e);
+ }
+
+ if self.require_label_in_labeled_block(e.span, &break_label, "break") {
+ // If we emitted an error about an unlabeled break in a labeled
+ // block, we don't need any further checking for this break any more
+ return;
+ }
+
+ let loop_id = match break_label.target_id {
+ Ok(loop_id) => Some(loop_id),
+ Err(hir::LoopIdError::OutsideLoopScope) => None,
+ Err(hir::LoopIdError::UnlabeledCfInWhileCondition) => {
+ self.emit_unlabled_cf_in_while_condition(e.span, "break");
+ None
+ }
+ Err(hir::LoopIdError::UnresolvedLabel) => None,
+ };
+
+ if let Some(Node::Block(_)) = loop_id.and_then(|id| self.hir_map.find(id)) {
+ return;
+ }
+
+ if let Some(break_expr) = opt_expr {
+ let (head, loop_label, loop_kind) = if let Some(loop_id) = loop_id {
+ match self.hir_map.expect_expr(loop_id).kind {
+ hir::ExprKind::Loop(_, label, source, sp) => {
+ (Some(sp), label, Some(source))
+ }
+ ref r => {
+ span_bug!(e.span, "break label resolved to a non-loop: {:?}", r)
+ }
+ }
+ } else {
+ (None, None, None)
+ };
+ match loop_kind {
+ None | Some(hir::LoopSource::Loop) => (),
+ Some(kind) => {
+ let mut err = struct_span_err!(
+ self.sess,
+ e.span,
+ E0571,
+ "`break` with value from a `{}` loop",
+ kind.name()
+ );
+ err.span_label(
+ e.span,
+ "can only break with a value inside `loop` or breakable block",
+ );
+ if let Some(head) = head {
+ err.span_label(
+ head,
+ &format!(
+ "you can't `break` with a value in a `{}` loop",
+ kind.name()
+ ),
+ );
+ }
+ err.span_suggestion(
+ e.span,
+ &format!(
+ "use `break` on its own without a value inside this `{}` loop",
+ kind.name(),
+ ),
+ format!(
+ "break{}",
+ break_label
+ .label
+ .map_or_else(String::new, |l| format!(" {}", l.ident))
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ if let (Some(label), None) = (loop_label, break_label.label) {
+ match break_expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path {
+ segments: [segment],
+ res: hir::def::Res::Err,
+ ..
+ },
+ )) if label.ident.to_string()
+ == format!("'{}", segment.ident) =>
+ {
+ // This error is redundant, we will have already emitted a
+ // suggestion to use the label when `segment` wasn't found
+ // (hence the `Res::Err` check).
+ err.delay_as_bug();
+ }
+ _ => {
+ err.span_suggestion(
+ break_expr.span,
+ "alternatively, you might have meant to use the \
+ available loop label",
+ label.ident,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ err.emit();
+ }
+ }
+ }
+
+ self.require_break_cx("break", e.span);
+ }
+ hir::ExprKind::Continue(destination) => {
+ self.require_label_in_labeled_block(e.span, &destination, "continue");
+
+ match destination.target_id {
+ Ok(loop_id) => {
+ if let Node::Block(block) = self.hir_map.find(loop_id).unwrap() {
+ struct_span_err!(
+ self.sess,
+ e.span,
+ E0696,
+ "`continue` pointing to a labeled block"
+ )
+ .span_label(e.span, "labeled blocks cannot be `continue`'d")
+ .span_label(block.span, "labeled block the `continue` points to")
+ .emit();
+ }
+ }
+ Err(hir::LoopIdError::UnlabeledCfInWhileCondition) => {
+ self.emit_unlabled_cf_in_while_condition(e.span, "continue");
+ }
+ Err(_) => {}
+ }
+ self.require_break_cx("continue", e.span)
+ }
+ _ => intravisit::walk_expr(self, e),
+ }
+ }
+}
+
+impl<'a, 'hir> CheckLoopVisitor<'a, 'hir> {
+ fn with_context<F>(&mut self, cx: Context, f: F)
+ where
+ F: FnOnce(&mut CheckLoopVisitor<'a, 'hir>),
+ {
+ let old_cx = self.cx;
+ self.cx = cx;
+ f(self);
+ self.cx = old_cx;
+ }
+
+ fn require_break_cx(&self, name: &str, span: Span) {
+ let err_inside_of = |article, ty, closure_span| {
+ struct_span_err!(self.sess, span, E0267, "`{}` inside of {} {}", name, article, ty)
+ .span_label(span, format!("cannot `{}` inside of {} {}", name, article, ty))
+ .span_label(closure_span, &format!("enclosing {}", ty))
+ .emit();
+ };
+
+ match self.cx {
+ LabeledBlock | Loop(_) => {}
+ Closure(closure_span) => err_inside_of("a", "closure", closure_span),
+ AsyncClosure(closure_span) => err_inside_of("an", "`async` block", closure_span),
+ Normal | AnonConst => {
+ struct_span_err!(self.sess, span, E0268, "`{}` outside of a loop", name)
+ .span_label(span, format!("cannot `{}` outside of a loop", name))
+ .emit();
+ }
+ }
+ }
+
+ fn require_label_in_labeled_block(
+ &mut self,
+ span: Span,
+ label: &Destination,
+ cf_type: &str,
+ ) -> bool {
+ if !span.is_desugaring(DesugaringKind::QuestionMark) && self.cx == LabeledBlock {
+ if label.label.is_none() {
+ struct_span_err!(
+ self.sess,
+ span,
+ E0695,
+ "unlabeled `{}` inside of a labeled block",
+ cf_type
+ )
+ .span_label(
+ span,
+ format!(
+ "`{}` statements that would diverge to or through \
+ a labeled block need to bear a label",
+ cf_type
+ ),
+ )
+ .emit();
+ return true;
+ }
+ }
+ false
+ }
+ fn emit_unlabled_cf_in_while_condition(&mut self, span: Span, cf_type: &str) {
+ struct_span_err!(
+ self.sess,
+ span,
+ E0590,
+ "`break` or `continue` with no label in the condition of a `while` loop"
+ )
+ .span_label(span, format!("unlabeled `{}` in the condition of a `while` loop", cf_type))
+ .emit();
+ }
+}
diff --git a/compiler/rustc_passes/src/naked_functions.rs b/compiler/rustc_passes/src/naked_functions.rs
new file mode 100644
index 000000000..20765abf3
--- /dev/null
+++ b/compiler/rustc_passes/src/naked_functions.rs
@@ -0,0 +1,335 @@
+//! Checks validity of naked functions.
+
+use rustc_ast::{Attribute, InlineAsmOptions};
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit::{FnKind, Visitor};
+use rustc_hir::{ExprKind, HirId, InlineAsmOperand, StmtKind};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::lint::builtin::UNDEFINED_NAKED_FUNCTION_ABI;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
+
+fn check_mod_naked_functions(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ tcx.hir().visit_item_likes_in_module(module_def_id, &mut CheckNakedFunctions { tcx });
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers { check_mod_naked_functions, ..*providers };
+}
+
+struct CheckNakedFunctions<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> Visitor<'tcx> for CheckNakedFunctions<'tcx> {
+ fn visit_fn(
+ &mut self,
+ fk: FnKind<'_>,
+ _fd: &'tcx hir::FnDecl<'tcx>,
+ body_id: hir::BodyId,
+ span: Span,
+ hir_id: HirId,
+ ) {
+ let ident_span;
+ let fn_header;
+
+ match fk {
+ FnKind::Closure => {
+ // Closures with a naked attribute are rejected during attribute
+ // check. Don't validate them any further.
+ return;
+ }
+ FnKind::ItemFn(ident, _, ref header, ..) => {
+ ident_span = ident.span;
+ fn_header = header;
+ }
+
+ FnKind::Method(ident, ref sig, ..) => {
+ ident_span = ident.span;
+ fn_header = &sig.header;
+ }
+ }
+
+ let attrs = self.tcx.hir().attrs(hir_id);
+ let naked = attrs.iter().any(|attr| attr.has_name(sym::naked));
+ if naked {
+ let body = self.tcx.hir().body(body_id);
+ check_abi(self.tcx, hir_id, fn_header.abi, ident_span);
+ check_no_patterns(self.tcx, body.params);
+ check_no_parameters_use(self.tcx, body);
+ check_asm(self.tcx, body, span);
+ check_inline(self.tcx, attrs);
+ }
+ }
+}
+
+/// Check that the function isn't inlined.
+fn check_inline(tcx: TyCtxt<'_>, attrs: &[Attribute]) {
+ for attr in attrs.iter().filter(|attr| attr.has_name(sym::inline)) {
+ tcx.sess.struct_span_err(attr.span, "naked functions cannot be inlined").emit();
+ }
+}
+
+/// Checks that function uses non-Rust ABI.
+fn check_abi(tcx: TyCtxt<'_>, hir_id: HirId, abi: Abi, fn_ident_span: Span) {
+ if abi == Abi::Rust {
+ tcx.struct_span_lint_hir(UNDEFINED_NAKED_FUNCTION_ABI, hir_id, fn_ident_span, |lint| {
+ lint.build("Rust ABI is unsupported in naked functions").emit();
+ });
+ }
+}
+
+/// Checks that parameters don't use patterns. Mirrors the checks for function declarations.
+fn check_no_patterns(tcx: TyCtxt<'_>, params: &[hir::Param<'_>]) {
+ for param in params {
+ match param.pat.kind {
+ hir::PatKind::Wild
+ | hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, _, _, None) => {}
+ _ => {
+ tcx.sess
+ .struct_span_err(
+ param.pat.span,
+ "patterns not allowed in naked function parameters",
+ )
+ .emit();
+ }
+ }
+ }
+}
+
+/// Checks that function parameters aren't used in the function body.
+fn check_no_parameters_use<'tcx>(tcx: TyCtxt<'tcx>, body: &'tcx hir::Body<'tcx>) {
+ let mut params = hir::HirIdSet::default();
+ for param in body.params {
+ param.pat.each_binding(|_binding_mode, hir_id, _span, _ident| {
+ params.insert(hir_id);
+ });
+ }
+ CheckParameters { tcx, params }.visit_body(body);
+}
+
+struct CheckParameters<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ params: hir::HirIdSet,
+}
+
+impl<'tcx> Visitor<'tcx> for CheckParameters<'tcx> {
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Path(hir::QPath::Resolved(
+ _,
+ hir::Path { res: hir::def::Res::Local(var_hir_id), .. },
+ )) = expr.kind
+ {
+ if self.params.contains(var_hir_id) {
+ self.tcx
+ .sess
+ .struct_span_err(
+ expr.span,
+ "referencing function parameters is not allowed in naked functions",
+ )
+ .help("follow the calling convention in asm block to use parameters")
+ .emit();
+ return;
+ }
+ }
+ hir::intravisit::walk_expr(self, expr);
+ }
+}
+
+/// Checks that function body contains a single inline assembly block.
+fn check_asm<'tcx>(tcx: TyCtxt<'tcx>, body: &'tcx hir::Body<'tcx>, fn_span: Span) {
+ let mut this = CheckInlineAssembly { tcx, items: Vec::new() };
+ this.visit_body(body);
+ if let [(ItemKind::Asm | ItemKind::Err, _)] = this.items[..] {
+ // Ok.
+ } else {
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ fn_span,
+ E0787,
+ "naked functions must contain a single asm block"
+ );
+
+ let mut must_show_error = false;
+ let mut has_asm = false;
+ let mut has_err = false;
+ for &(kind, span) in &this.items {
+ match kind {
+ ItemKind::Asm if has_asm => {
+ must_show_error = true;
+ diag.span_label(span, "multiple asm blocks are unsupported in naked functions");
+ }
+ ItemKind::Asm => has_asm = true,
+ ItemKind::NonAsm => {
+ must_show_error = true;
+ diag.span_label(span, "non-asm is unsupported in naked functions");
+ }
+ ItemKind::Err => has_err = true,
+ }
+ }
+
+ // If the naked function only contains a single asm block and a non-zero number of
+ // errors, then don't show an additional error. This allows for appending/prepending
+ // `compile_error!("...")` statements and reduces error noise.
+ if must_show_error || !has_err {
+ diag.emit();
+ } else {
+ diag.cancel();
+ }
+ }
+}
+
+struct CheckInlineAssembly<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ items: Vec<(ItemKind, Span)>,
+}
+
+#[derive(Copy, Clone)]
+enum ItemKind {
+ Asm,
+ NonAsm,
+ Err,
+}
+
+impl<'tcx> CheckInlineAssembly<'tcx> {
+ fn check_expr(&mut self, expr: &'tcx hir::Expr<'tcx>, span: Span) {
+ match expr.kind {
+ ExprKind::Box(..)
+ | ExprKind::ConstBlock(..)
+ | ExprKind::Array(..)
+ | ExprKind::Call(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Tup(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Unary(..)
+ | ExprKind::Lit(..)
+ | ExprKind::Cast(..)
+ | ExprKind::Type(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Match(..)
+ | ExprKind::If(..)
+ | ExprKind::Closure { .. }
+ | ExprKind::Assign(..)
+ | ExprKind::AssignOp(..)
+ | ExprKind::Field(..)
+ | ExprKind::Index(..)
+ | ExprKind::Path(..)
+ | ExprKind::AddrOf(..)
+ | ExprKind::Let(..)
+ | ExprKind::Break(..)
+ | ExprKind::Continue(..)
+ | ExprKind::Ret(..)
+ | ExprKind::Struct(..)
+ | ExprKind::Repeat(..)
+ | ExprKind::Yield(..) => {
+ self.items.push((ItemKind::NonAsm, span));
+ }
+
+ ExprKind::InlineAsm(ref asm) => {
+ self.items.push((ItemKind::Asm, span));
+ self.check_inline_asm(asm, span);
+ }
+
+ ExprKind::DropTemps(..) | ExprKind::Block(..) => {
+ hir::intravisit::walk_expr(self, expr);
+ }
+
+ ExprKind::Err => {
+ self.items.push((ItemKind::Err, span));
+ }
+ }
+ }
+
+ fn check_inline_asm(&self, asm: &'tcx hir::InlineAsm<'tcx>, span: Span) {
+ let unsupported_operands: Vec<Span> = asm
+ .operands
+ .iter()
+ .filter_map(|&(ref op, op_sp)| match op {
+ InlineAsmOperand::Const { .. }
+ | InlineAsmOperand::SymFn { .. }
+ | InlineAsmOperand::SymStatic { .. } => None,
+ InlineAsmOperand::In { .. }
+ | InlineAsmOperand::Out { .. }
+ | InlineAsmOperand::InOut { .. }
+ | InlineAsmOperand::SplitInOut { .. } => Some(op_sp),
+ })
+ .collect();
+ if !unsupported_operands.is_empty() {
+ struct_span_err!(
+ self.tcx.sess,
+ unsupported_operands,
+ E0787,
+ "only `const` and `sym` operands are supported in naked functions",
+ )
+ .emit();
+ }
+
+ let unsupported_options: Vec<&'static str> = [
+ (InlineAsmOptions::MAY_UNWIND, "`may_unwind`"),
+ (InlineAsmOptions::NOMEM, "`nomem`"),
+ (InlineAsmOptions::NOSTACK, "`nostack`"),
+ (InlineAsmOptions::PRESERVES_FLAGS, "`preserves_flags`"),
+ (InlineAsmOptions::PURE, "`pure`"),
+ (InlineAsmOptions::READONLY, "`readonly`"),
+ ]
+ .iter()
+ .filter_map(|&(option, name)| if asm.options.contains(option) { Some(name) } else { None })
+ .collect();
+
+ if !unsupported_options.is_empty() {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0787,
+ "asm options unsupported in naked functions: {}",
+ unsupported_options.join(", ")
+ )
+ .emit();
+ }
+
+ if !asm.options.contains(InlineAsmOptions::NORETURN) {
+ let last_span = asm
+ .operands
+ .last()
+ .map_or_else(|| asm.template_strs.last().unwrap().2, |op| op.1)
+ .shrink_to_hi();
+
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0787,
+ "asm in naked functions must use `noreturn` option"
+ )
+ .span_suggestion(
+ last_span,
+ "consider specifying that the asm block is responsible \
+ for returning from the function",
+ ", options(noreturn)",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for CheckInlineAssembly<'tcx> {
+ fn visit_stmt(&mut self, stmt: &'tcx hir::Stmt<'tcx>) {
+ match stmt.kind {
+ StmtKind::Item(..) => {}
+ StmtKind::Local(..) => {
+ self.items.push((ItemKind::NonAsm, stmt.span));
+ }
+ StmtKind::Expr(ref expr) | StmtKind::Semi(ref expr) => {
+ self.check_expr(expr, stmt.span);
+ }
+ }
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ self.check_expr(&expr, expr.span);
+ }
+}
diff --git a/compiler/rustc_passes/src/reachable.rs b/compiler/rustc_passes/src/reachable.rs
new file mode 100644
index 000000000..f7e3fac6b
--- /dev/null
+++ b/compiler/rustc_passes/src/reachable.rs
@@ -0,0 +1,417 @@
+// Finds items that are externally reachable, to determine which items
+// need to have their metadata (and possibly their AST) serialized.
+// All items that can be referred to through an exported name are
+// reachable, and when a reachable thing is inline or generic, it
+// makes all other generics or inline functions that it references
+// reachable as well.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::Node;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::middle::privacy;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, DefIdTree, TyCtxt};
+use rustc_session::config::CrateType;
+use rustc_target::spec::abi::Abi;
+
+// Returns true if the given item must be inlined because it may be
+// monomorphized or it was marked with `#[inline]`. This will only return
+// true for functions.
+fn item_might_be_inlined(tcx: TyCtxt<'_>, item: &hir::Item<'_>, attrs: &CodegenFnAttrs) -> bool {
+ if attrs.requests_inline() {
+ return true;
+ }
+
+ match item.kind {
+ hir::ItemKind::Fn(ref sig, ..) if sig.header.is_const() => true,
+ hir::ItemKind::Impl { .. } | hir::ItemKind::Fn(..) => {
+ let generics = tcx.generics_of(item.def_id);
+ generics.requires_monomorphization(tcx)
+ }
+ _ => false,
+ }
+}
+
+fn method_might_be_inlined(
+ tcx: TyCtxt<'_>,
+ impl_item: &hir::ImplItem<'_>,
+ impl_src: LocalDefId,
+) -> bool {
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(impl_item.hir_id().owner.to_def_id());
+ let generics = tcx.generics_of(impl_item.def_id);
+ if codegen_fn_attrs.requests_inline() || generics.requires_monomorphization(tcx) {
+ return true;
+ }
+ if let hir::ImplItemKind::Fn(method_sig, _) = &impl_item.kind {
+ if method_sig.header.is_const() {
+ return true;
+ }
+ }
+ match tcx.hir().find_by_def_id(impl_src) {
+ Some(Node::Item(item)) => item_might_be_inlined(tcx, &item, codegen_fn_attrs),
+ Some(..) | None => span_bug!(impl_item.span, "impl did is not an item"),
+ }
+}
+
+// Information needed while computing reachability.
+struct ReachableContext<'tcx> {
+ // The type context.
+ tcx: TyCtxt<'tcx>,
+ maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>,
+ // The set of items which must be exported in the linkage sense.
+ reachable_symbols: FxHashSet<LocalDefId>,
+ // A worklist of item IDs. Each item ID in this worklist will be inlined
+ // and will be scanned for further references.
+ // FIXME(eddyb) benchmark if this would be faster as a `VecDeque`.
+ worklist: Vec<LocalDefId>,
+ // Whether any output of this compilation is a library
+ any_library: bool,
+}
+
+impl<'tcx> Visitor<'tcx> for ReachableContext<'tcx> {
+ fn visit_nested_body(&mut self, body: hir::BodyId) {
+ let old_maybe_typeck_results =
+ self.maybe_typeck_results.replace(self.tcx.typeck_body(body));
+ let body = self.tcx.hir().body(body);
+ self.visit_body(body);
+ self.maybe_typeck_results = old_maybe_typeck_results;
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ let res = match expr.kind {
+ hir::ExprKind::Path(ref qpath) => {
+ Some(self.typeck_results().qpath_res(qpath, expr.hir_id))
+ }
+ hir::ExprKind::MethodCall(..) => self
+ .typeck_results()
+ .type_dependent_def(expr.hir_id)
+ .map(|(kind, def_id)| Res::Def(kind, def_id)),
+ _ => None,
+ };
+
+ if let Some(res) = res && let Some(def_id) = res.opt_def_id().and_then(|el| el.as_local()) {
+ if self.def_id_represents_local_inlined_item(def_id.to_def_id()) {
+ self.worklist.push(def_id);
+ } else {
+ match res {
+ // If this path leads to a constant, then we need to
+ // recurse into the constant to continue finding
+ // items that are reachable.
+ Res::Def(DefKind::Const | DefKind::AssocConst, _) => {
+ self.worklist.push(def_id);
+ }
+
+ // If this wasn't a static, then the destination is
+ // surely reachable.
+ _ => {
+ self.reachable_symbols.insert(def_id);
+ }
+ }
+ }
+ }
+
+ intravisit::walk_expr(self, expr)
+ }
+}
+
+impl<'tcx> ReachableContext<'tcx> {
+ /// Gets the type-checking results for the current body.
+ /// As this will ICE if called outside bodies, only call when working with
+ /// `Expr` or `Pat` nodes (they are guaranteed to be found only in bodies).
+ #[track_caller]
+ fn typeck_results(&self) -> &'tcx ty::TypeckResults<'tcx> {
+ self.maybe_typeck_results
+ .expect("`ReachableContext::typeck_results` called outside of body")
+ }
+
+ // Returns true if the given def ID represents a local item that is
+ // eligible for inlining and false otherwise.
+ fn def_id_represents_local_inlined_item(&self, def_id: DefId) -> bool {
+ let Some(def_id) = def_id.as_local() else {
+ return false;
+ };
+
+ match self.tcx.hir().find_by_def_id(def_id) {
+ Some(Node::Item(item)) => match item.kind {
+ hir::ItemKind::Fn(..) => {
+ item_might_be_inlined(self.tcx, &item, self.tcx.codegen_fn_attrs(def_id))
+ }
+ _ => false,
+ },
+ Some(Node::TraitItem(trait_method)) => match trait_method.kind {
+ hir::TraitItemKind::Const(_, ref default) => default.is_some(),
+ hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)) => true,
+ hir::TraitItemKind::Fn(_, hir::TraitFn::Required(_))
+ | hir::TraitItemKind::Type(..) => false,
+ },
+ Some(Node::ImplItem(impl_item)) => match impl_item.kind {
+ hir::ImplItemKind::Const(..) => true,
+ hir::ImplItemKind::Fn(..) => {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let impl_did = self.tcx.hir().get_parent_item(hir_id);
+ method_might_be_inlined(self.tcx, impl_item, impl_did)
+ }
+ hir::ImplItemKind::TyAlias(_) => false,
+ },
+ Some(_) => false,
+ None => false, // This will happen for default methods.
+ }
+ }
+
+ // Step 2: Mark all symbols that the symbols on the worklist touch.
+ fn propagate(&mut self) {
+ let mut scanned = FxHashSet::default();
+ while let Some(search_item) = self.worklist.pop() {
+ if !scanned.insert(search_item) {
+ continue;
+ }
+
+ if let Some(ref item) = self.tcx.hir().find_by_def_id(search_item) {
+ self.propagate_node(item, search_item);
+ }
+ }
+ }
+
+ fn propagate_node(&mut self, node: &Node<'tcx>, search_item: LocalDefId) {
+ if !self.any_library {
+ // If we are building an executable, only explicitly extern
+ // types need to be exported.
+ let reachable =
+ if let Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, ..), .. })
+ | Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(sig, ..), ..
+ }) = *node
+ {
+ sig.header.abi != Abi::Rust
+ } else {
+ false
+ };
+ let codegen_attrs = if self.tcx.def_kind(search_item).has_codegen_attrs() {
+ self.tcx.codegen_fn_attrs(search_item)
+ } else {
+ CodegenFnAttrs::EMPTY
+ };
+ let is_extern = codegen_attrs.contains_extern_indicator();
+ let std_internal =
+ codegen_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL);
+ if reachable || is_extern || std_internal {
+ self.reachable_symbols.insert(search_item);
+ }
+ } else {
+ // If we are building a library, then reachable symbols will
+ // continue to participate in linkage after this product is
+ // produced. In this case, we traverse the ast node, recursing on
+ // all reachable nodes from this one.
+ self.reachable_symbols.insert(search_item);
+ }
+
+ match *node {
+ Node::Item(item) => {
+ match item.kind {
+ hir::ItemKind::Fn(.., body) => {
+ if item_might_be_inlined(
+ self.tcx,
+ &item,
+ self.tcx.codegen_fn_attrs(item.def_id),
+ ) {
+ self.visit_nested_body(body);
+ }
+ }
+
+ // Reachable constants will be inlined into other crates
+ // unconditionally, so we need to make sure that their
+ // contents are also reachable.
+ hir::ItemKind::Const(_, init) | hir::ItemKind::Static(_, _, init) => {
+ self.visit_nested_body(init);
+ }
+
+ // These are normal, nothing reachable about these
+ // inherently and their children are already in the
+ // worklist, as determined by the privacy pass
+ hir::ItemKind::ExternCrate(_)
+ | hir::ItemKind::Use(..)
+ | hir::ItemKind::OpaqueTy(..)
+ | hir::ItemKind::TyAlias(..)
+ | hir::ItemKind::Macro(..)
+ | hir::ItemKind::Mod(..)
+ | hir::ItemKind::ForeignMod { .. }
+ | hir::ItemKind::Impl { .. }
+ | hir::ItemKind::Trait(..)
+ | hir::ItemKind::TraitAlias(..)
+ | hir::ItemKind::Struct(..)
+ | hir::ItemKind::Enum(..)
+ | hir::ItemKind::Union(..)
+ | hir::ItemKind::GlobalAsm(..) => {}
+ }
+ }
+ Node::TraitItem(trait_method) => {
+ match trait_method.kind {
+ hir::TraitItemKind::Const(_, None)
+ | hir::TraitItemKind::Fn(_, hir::TraitFn::Required(_)) => {
+ // Keep going, nothing to get exported
+ }
+ hir::TraitItemKind::Const(_, Some(body_id))
+ | hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(body_id)) => {
+ self.visit_nested_body(body_id);
+ }
+ hir::TraitItemKind::Type(..) => {}
+ }
+ }
+ Node::ImplItem(impl_item) => match impl_item.kind {
+ hir::ImplItemKind::Const(_, body) => {
+ self.visit_nested_body(body);
+ }
+ hir::ImplItemKind::Fn(_, body) => {
+ let impl_def_id = self.tcx.local_parent(search_item);
+ if method_might_be_inlined(self.tcx, impl_item, impl_def_id) {
+ self.visit_nested_body(body)
+ }
+ }
+ hir::ImplItemKind::TyAlias(_) => {}
+ },
+ Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { body, .. }),
+ ..
+ }) => {
+ self.visit_nested_body(body);
+ }
+ // Nothing to recurse on for these
+ Node::ForeignItem(_)
+ | Node::Variant(_)
+ | Node::Ctor(..)
+ | Node::Field(_)
+ | Node::Ty(_)
+ | Node::Crate(_) => {}
+ _ => {
+ bug!(
+ "found unexpected node kind in worklist: {} ({:?})",
+ self.tcx
+ .hir()
+ .node_to_string(self.tcx.hir().local_def_id_to_hir_id(search_item)),
+ node,
+ );
+ }
+ }
+ }
+}
+
+fn check_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ id: hir::ItemId,
+ worklist: &mut Vec<LocalDefId>,
+ access_levels: &privacy::AccessLevels,
+) {
+ if has_custom_linkage(tcx, id.def_id) {
+ worklist.push(id.def_id);
+ }
+
+ if !matches!(tcx.def_kind(id.def_id), DefKind::Impl) {
+ return;
+ }
+
+ // We need only trait impls here, not inherent impls, and only non-exported ones
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::Impl(hir::Impl { of_trait: Some(ref trait_ref), ref items, .. }) =
+ item.kind
+ {
+ if !access_levels.is_reachable(item.def_id) {
+ worklist.extend(items.iter().map(|ii_ref| ii_ref.id.def_id));
+
+ let Res::Def(DefKind::Trait, trait_def_id) = trait_ref.path.res else {
+ unreachable!();
+ };
+
+ if !trait_def_id.is_local() {
+ return;
+ }
+
+ worklist.extend(
+ tcx.provided_trait_methods(trait_def_id).map(|assoc| assoc.def_id.expect_local()),
+ );
+ }
+ }
+}
+
+fn has_custom_linkage<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> bool {
+ // Anything which has custom linkage gets thrown on the worklist no
+ // matter where it is in the crate, along with "special std symbols"
+ // which are currently akin to allocator symbols.
+ if !tcx.def_kind(def_id).has_codegen_attrs() {
+ return false;
+ }
+ let codegen_attrs = tcx.codegen_fn_attrs(def_id);
+ codegen_attrs.contains_extern_indicator()
+ || codegen_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL)
+ // FIXME(nbdd0121): `#[used]` are marked as reachable here so it's picked up by
+ // `linked_symbols` in cg_ssa. They won't be exported in binary or cdylib due to their
+ // `SymbolExportLevel::Rust` export level but may end up being exported in dylibs.
+ || codegen_attrs.flags.contains(CodegenFnAttrFlags::USED)
+ || codegen_attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)
+}
+
+fn reachable_set<'tcx>(tcx: TyCtxt<'tcx>, (): ()) -> FxHashSet<LocalDefId> {
+ let access_levels = &tcx.privacy_access_levels(());
+
+ let any_library =
+ tcx.sess.crate_types().iter().any(|ty| {
+ *ty == CrateType::Rlib || *ty == CrateType::Dylib || *ty == CrateType::ProcMacro
+ });
+ let mut reachable_context = ReachableContext {
+ tcx,
+ maybe_typeck_results: None,
+ reachable_symbols: Default::default(),
+ worklist: Vec::new(),
+ any_library,
+ };
+
+ // Step 1: Seed the worklist with all nodes which were found to be public as
+ // a result of the privacy pass along with all local lang items and impl items.
+ // If other crates link to us, they're going to expect to be able to
+ // use the lang items, so we need to be sure to mark them as
+ // exported.
+ reachable_context.worklist.extend(access_levels.map.keys());
+ for item in tcx.lang_items().items().iter() {
+ if let Some(def_id) = *item {
+ if let Some(def_id) = def_id.as_local() {
+ reachable_context.worklist.push(def_id);
+ }
+ }
+ }
+ {
+ // Some methods from non-exported (completely private) trait impls still have to be
+ // reachable if they are called from inlinable code. Generally, it's not known until
+ // monomorphization if a specific trait impl item can be reachable or not. So, we
+ // conservatively mark all of them as reachable.
+ // FIXME: One possible strategy for pruning the reachable set is to avoid marking impl
+ // items of non-exported traits (or maybe all local traits?) unless their respective
+ // trait items are used from inlinable code through method call syntax or UFCS, or their
+ // trait is a lang item.
+ let crate_items = tcx.hir_crate_items(());
+
+ for id in crate_items.items() {
+ check_item(tcx, id, &mut reachable_context.worklist, access_levels);
+ }
+
+ for id in crate_items.impl_items() {
+ if has_custom_linkage(tcx, id.def_id) {
+ reachable_context.worklist.push(id.def_id);
+ }
+ }
+ }
+
+ // Step 2: Mark all symbols that the symbols on the worklist touch.
+ reachable_context.propagate();
+
+ debug!("Inline reachability shows: {:?}", reachable_context.reachable_symbols);
+
+ // Return the set of reachable symbols.
+ reachable_context.reachable_symbols
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { reachable_set, ..*providers };
+}
diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs
new file mode 100644
index 000000000..ca6a2ac3d
--- /dev/null
+++ b/compiler/rustc_passes/src/stability.rs
@@ -0,0 +1,1063 @@
+//! A pass that annotates every item and method with its stability level,
+//! propagating default levels lexically from parent to children ast nodes.
+
+use attr::StabilityLevel;
+use rustc_attr::{self as attr, ConstStability, Stability, Unstable, UnstableReason};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{LocalDefId, CRATE_DEF_ID};
+use rustc_hir::hir_id::CRATE_HIR_ID;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{FieldDef, Generics, HirId, Item, ItemKind, TraitRef, Ty, TyKind, Variant};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::privacy::AccessLevels;
+use rustc_middle::middle::stability::{AllowUnstable, DeprecationEntry, Index};
+use rustc_middle::ty::{query::Providers, TyCtxt};
+use rustc_session::lint;
+use rustc_session::lint::builtin::{INEFFECTIVE_UNSTABLE_TRAIT_IMPL, USELESS_DEPRECATED};
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
+
+use std::cmp::Ordering;
+use std::iter;
+use std::mem::replace;
+use std::num::NonZeroU32;
+
+#[derive(PartialEq)]
+enum AnnotationKind {
+ /// Annotation is required if not inherited from unstable parents.
+ Required,
+ /// Annotation is useless, reject it.
+ Prohibited,
+ /// Deprecation annotation is useless, reject it. (Stability attribute is still required.)
+ DeprecationProhibited,
+ /// Annotation itself is useless, but it can be propagated to children.
+ Container,
+}
+
+/// Whether to inherit deprecation flags for nested items. In most cases, we do want to inherit
+/// deprecation, because nested items rarely have individual deprecation attributes, and so
+/// should be treated as deprecated if their parent is. However, default generic parameters
+/// have separate deprecation attributes from their parents, so we do not wish to inherit
+/// deprecation in this case. For example, inheriting deprecation for `T` in `Foo<T>`
+/// would cause a duplicate warning arising from both `Foo` and `T` being deprecated.
+#[derive(Clone)]
+enum InheritDeprecation {
+ Yes,
+ No,
+}
+
+impl InheritDeprecation {
+ fn yes(&self) -> bool {
+ matches!(self, InheritDeprecation::Yes)
+ }
+}
+
+/// Whether to inherit const stability flags for nested items. In most cases, we do not want to
+/// inherit const stability: just because an enclosing `fn` is const-stable does not mean
+/// all `extern` imports declared in it should be const-stable! However, trait methods
+/// inherit const stability attributes from their parent and do not have their own.
+enum InheritConstStability {
+ Yes,
+ No,
+}
+
+impl InheritConstStability {
+ fn yes(&self) -> bool {
+ matches!(self, InheritConstStability::Yes)
+ }
+}
+
+enum InheritStability {
+ Yes,
+ No,
+}
+
+impl InheritStability {
+ fn yes(&self) -> bool {
+ matches!(self, InheritStability::Yes)
+ }
+}
+
+/// A private tree-walker for producing an `Index`.
+struct Annotator<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ index: &'a mut Index,
+ parent_stab: Option<Stability>,
+ parent_const_stab: Option<ConstStability>,
+ parent_depr: Option<DeprecationEntry>,
+ in_trait_impl: bool,
+}
+
+impl<'a, 'tcx> Annotator<'a, 'tcx> {
+ /// Determine the stability for a node based on its attributes and inherited stability. The
+ /// stability is recorded in the index and used as the parent. If the node is a function,
+ /// `fn_sig` is its signature.
+ fn annotate<F>(
+ &mut self,
+ def_id: LocalDefId,
+ item_sp: Span,
+ fn_sig: Option<&'tcx hir::FnSig<'tcx>>,
+ kind: AnnotationKind,
+ inherit_deprecation: InheritDeprecation,
+ inherit_const_stability: InheritConstStability,
+ inherit_from_parent: InheritStability,
+ visit_children: F,
+ ) where
+ F: FnOnce(&mut Self),
+ {
+ let attrs = self.tcx.hir().attrs(self.tcx.hir().local_def_id_to_hir_id(def_id));
+ debug!("annotate(id = {:?}, attrs = {:?})", def_id, attrs);
+
+ let depr = attr::find_deprecation(&self.tcx.sess, attrs);
+ let mut is_deprecated = false;
+ if let Some((depr, span)) = &depr {
+ is_deprecated = true;
+
+ if kind == AnnotationKind::Prohibited || kind == AnnotationKind::DeprecationProhibited {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ self.tcx.struct_span_lint_hir(USELESS_DEPRECATED, hir_id, *span, |lint| {
+ lint.build("this `#[deprecated]` annotation has no effect")
+ .span_suggestion_short(
+ *span,
+ "remove the unnecessary deprecation attribute",
+ "",
+ rustc_errors::Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ }
+
+ // `Deprecation` is just two pointers, no need to intern it
+ let depr_entry = DeprecationEntry::local(*depr, def_id);
+ self.index.depr_map.insert(def_id, depr_entry);
+ } else if let Some(parent_depr) = self.parent_depr {
+ if inherit_deprecation.yes() {
+ is_deprecated = true;
+ info!("tagging child {:?} as deprecated from parent", def_id);
+ self.index.depr_map.insert(def_id, parent_depr);
+ }
+ }
+
+ if !self.tcx.features().staged_api {
+ // Propagate unstability. This can happen even for non-staged-api crates in case
+ // -Zforce-unstable-if-unmarked is set.
+ if let Some(stab) = self.parent_stab {
+ if inherit_deprecation.yes() && stab.is_unstable() {
+ self.index.stab_map.insert(def_id, stab);
+ }
+ }
+
+ self.recurse_with_stability_attrs(
+ depr.map(|(d, _)| DeprecationEntry::local(d, def_id)),
+ None,
+ None,
+ visit_children,
+ );
+ return;
+ }
+
+ let (stab, const_stab) = attr::find_stability(&self.tcx.sess, attrs, item_sp);
+ let mut const_span = None;
+
+ let const_stab = const_stab.map(|(const_stab, const_span_node)| {
+ self.index.const_stab_map.insert(def_id, const_stab);
+ const_span = Some(const_span_node);
+ const_stab
+ });
+
+ // If the current node is a function, has const stability attributes and if it doesn not have an intrinsic ABI,
+ // check if the function/method is const or the parent impl block is const
+ if let (Some(const_span), Some(fn_sig)) = (const_span, fn_sig) {
+ if fn_sig.header.abi != Abi::RustIntrinsic
+ && fn_sig.header.abi != Abi::PlatformIntrinsic
+ && !fn_sig.header.is_const()
+ {
+ if !self.in_trait_impl
+ || (self.in_trait_impl && !self.tcx.is_const_fn_raw(def_id.to_def_id()))
+ {
+ missing_const_err(&self.tcx.sess, fn_sig.span, const_span);
+ }
+ }
+ }
+
+ // `impl const Trait for Type` items forward their const stability to their
+ // immediate children.
+ if const_stab.is_none() {
+ debug!("annotate: const_stab not found, parent = {:?}", self.parent_const_stab);
+ if let Some(parent) = self.parent_const_stab {
+ if parent.is_const_unstable() {
+ self.index.const_stab_map.insert(def_id, parent);
+ }
+ }
+ }
+
+ if let Some((rustc_attr::Deprecation { is_since_rustc_version: true, .. }, span)) = &depr {
+ if stab.is_none() {
+ struct_span_err!(
+ self.tcx.sess,
+ *span,
+ E0549,
+ "deprecated attribute must be paired with \
+ either stable or unstable attribute"
+ )
+ .emit();
+ }
+ }
+
+ let stab = stab.map(|(stab, span)| {
+ // Error if prohibited, or can't inherit anything from a container.
+ if kind == AnnotationKind::Prohibited
+ || (kind == AnnotationKind::Container && stab.level.is_stable() && is_deprecated)
+ {
+ self.tcx.sess.struct_span_err(span,"this stability annotation is useless")
+ .span_label(span, "useless stability annotation")
+ .span_label(item_sp, "the stability attribute annotates this item")
+ .emit();
+ }
+
+ debug!("annotate: found {:?}", stab);
+
+ // Check if deprecated_since < stable_since. If it is,
+ // this is *almost surely* an accident.
+ if let (&Some(dep_since), &attr::Stable { since: stab_since, .. }) =
+ (&depr.as_ref().and_then(|(d, _)| d.since), &stab.level)
+ {
+ // Explicit version of iter::order::lt to handle parse errors properly
+ for (dep_v, stab_v) in
+ iter::zip(dep_since.as_str().split('.'), stab_since.as_str().split('.'))
+ {
+ match stab_v.parse::<u64>() {
+ Err(_) => {
+ self.tcx.sess.struct_span_err(span, "invalid stability version found")
+ .span_label(span, "invalid stability version")
+ .span_label(item_sp, "the stability attribute annotates this item")
+ .emit();
+ break;
+ }
+ Ok(stab_vp) => match dep_v.parse::<u64>() {
+ Ok(dep_vp) => match dep_vp.cmp(&stab_vp) {
+ Ordering::Less => {
+ self.tcx.sess.struct_span_err(span, "an API can't be stabilized after it is deprecated")
+ .span_label(span, "invalid version")
+ .span_label(item_sp, "the stability attribute annotates this item")
+ .emit();
+ break;
+ }
+ Ordering::Equal => continue,
+ Ordering::Greater => break,
+ },
+ Err(_) => {
+ if dep_v != "TBD" {
+ self.tcx.sess.struct_span_err(span, "invalid deprecation version found")
+ .span_label(span, "invalid deprecation version")
+ .span_label(item_sp, "the stability attribute annotates this item")
+ .emit();
+ }
+ break;
+ }
+ },
+ }
+ }
+ }
+
+ if let Stability { level: Unstable { implied_by: Some(implied_by), .. }, feature } = stab {
+ self.index.implications.insert(implied_by, feature);
+ }
+
+ self.index.stab_map.insert(def_id, stab);
+ stab
+ });
+
+ if stab.is_none() {
+ debug!("annotate: stab not found, parent = {:?}", self.parent_stab);
+ if let Some(stab) = self.parent_stab {
+ if inherit_deprecation.yes() && stab.is_unstable() || inherit_from_parent.yes() {
+ self.index.stab_map.insert(def_id, stab);
+ }
+ }
+ }
+
+ self.recurse_with_stability_attrs(
+ depr.map(|(d, _)| DeprecationEntry::local(d, def_id)),
+ stab,
+ if inherit_const_stability.yes() { const_stab } else { None },
+ visit_children,
+ );
+ }
+
+ fn recurse_with_stability_attrs(
+ &mut self,
+ depr: Option<DeprecationEntry>,
+ stab: Option<Stability>,
+ const_stab: Option<ConstStability>,
+ f: impl FnOnce(&mut Self),
+ ) {
+ // These will be `Some` if this item changes the corresponding stability attribute.
+ let mut replaced_parent_depr = None;
+ let mut replaced_parent_stab = None;
+ let mut replaced_parent_const_stab = None;
+
+ if let Some(depr) = depr {
+ replaced_parent_depr = Some(replace(&mut self.parent_depr, Some(depr)));
+ }
+ if let Some(stab) = stab {
+ replaced_parent_stab = Some(replace(&mut self.parent_stab, Some(stab)));
+ }
+ if let Some(const_stab) = const_stab {
+ replaced_parent_const_stab =
+ Some(replace(&mut self.parent_const_stab, Some(const_stab)));
+ }
+
+ f(self);
+
+ if let Some(orig_parent_depr) = replaced_parent_depr {
+ self.parent_depr = orig_parent_depr;
+ }
+ if let Some(orig_parent_stab) = replaced_parent_stab {
+ self.parent_stab = orig_parent_stab;
+ }
+ if let Some(orig_parent_const_stab) = replaced_parent_const_stab {
+ self.parent_const_stab = orig_parent_const_stab;
+ }
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> {
+ /// Because stability levels are scoped lexically, we want to walk
+ /// nested items in the context of the outer item, so enable
+ /// deep-walking.
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, i: &'tcx Item<'tcx>) {
+ let orig_in_trait_impl = self.in_trait_impl;
+ let mut kind = AnnotationKind::Required;
+ let mut const_stab_inherit = InheritConstStability::No;
+ let mut fn_sig = None;
+
+ match i.kind {
+ // Inherent impls and foreign modules serve only as containers for other items,
+ // they don't have their own stability. They still can be annotated as unstable
+ // and propagate this instability to children, but this annotation is completely
+ // optional. They inherit stability from their parents when unannotated.
+ hir::ItemKind::Impl(hir::Impl { of_trait: None, .. })
+ | hir::ItemKind::ForeignMod { .. } => {
+ self.in_trait_impl = false;
+ kind = AnnotationKind::Container;
+ }
+ hir::ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) => {
+ self.in_trait_impl = true;
+ kind = AnnotationKind::DeprecationProhibited;
+ const_stab_inherit = InheritConstStability::Yes;
+ }
+ hir::ItemKind::Struct(ref sd, _) => {
+ if let Some(ctor_hir_id) = sd.ctor_hir_id() {
+ self.annotate(
+ self.tcx.hir().local_def_id(ctor_hir_id),
+ i.span,
+ None,
+ AnnotationKind::Required,
+ InheritDeprecation::Yes,
+ InheritConstStability::No,
+ InheritStability::Yes,
+ |_| {},
+ )
+ }
+ }
+ hir::ItemKind::Fn(ref item_fn_sig, _, _) => {
+ fn_sig = Some(item_fn_sig);
+ }
+ _ => {}
+ }
+
+ self.annotate(
+ i.def_id,
+ i.span,
+ fn_sig,
+ kind,
+ InheritDeprecation::Yes,
+ const_stab_inherit,
+ InheritStability::No,
+ |v| intravisit::walk_item(v, i),
+ );
+ self.in_trait_impl = orig_in_trait_impl;
+ }
+
+ fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
+ let fn_sig = match ti.kind {
+ hir::TraitItemKind::Fn(ref fn_sig, _) => Some(fn_sig),
+ _ => None,
+ };
+
+ self.annotate(
+ ti.def_id,
+ ti.span,
+ fn_sig,
+ AnnotationKind::Required,
+ InheritDeprecation::Yes,
+ InheritConstStability::No,
+ InheritStability::No,
+ |v| {
+ intravisit::walk_trait_item(v, ti);
+ },
+ );
+ }
+
+ fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) {
+ let kind =
+ if self.in_trait_impl { AnnotationKind::Prohibited } else { AnnotationKind::Required };
+
+ let fn_sig = match ii.kind {
+ hir::ImplItemKind::Fn(ref fn_sig, _) => Some(fn_sig),
+ _ => None,
+ };
+
+ self.annotate(
+ ii.def_id,
+ ii.span,
+ fn_sig,
+ kind,
+ InheritDeprecation::Yes,
+ InheritConstStability::No,
+ InheritStability::No,
+ |v| {
+ intravisit::walk_impl_item(v, ii);
+ },
+ );
+ }
+
+ fn visit_variant(&mut self, var: &'tcx Variant<'tcx>, g: &'tcx Generics<'tcx>, item_id: HirId) {
+ self.annotate(
+ self.tcx.hir().local_def_id(var.id),
+ var.span,
+ None,
+ AnnotationKind::Required,
+ InheritDeprecation::Yes,
+ InheritConstStability::No,
+ InheritStability::Yes,
+ |v| {
+ if let Some(ctor_hir_id) = var.data.ctor_hir_id() {
+ v.annotate(
+ v.tcx.hir().local_def_id(ctor_hir_id),
+ var.span,
+ None,
+ AnnotationKind::Required,
+ InheritDeprecation::Yes,
+ InheritConstStability::No,
+ InheritStability::No,
+ |_| {},
+ );
+ }
+
+ intravisit::walk_variant(v, var, g, item_id)
+ },
+ )
+ }
+
+ fn visit_field_def(&mut self, s: &'tcx FieldDef<'tcx>) {
+ self.annotate(
+ self.tcx.hir().local_def_id(s.hir_id),
+ s.span,
+ None,
+ AnnotationKind::Required,
+ InheritDeprecation::Yes,
+ InheritConstStability::No,
+ InheritStability::Yes,
+ |v| {
+ intravisit::walk_field_def(v, s);
+ },
+ );
+ }
+
+ fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem<'tcx>) {
+ self.annotate(
+ i.def_id,
+ i.span,
+ None,
+ AnnotationKind::Required,
+ InheritDeprecation::Yes,
+ InheritConstStability::No,
+ InheritStability::No,
+ |v| {
+ intravisit::walk_foreign_item(v, i);
+ },
+ );
+ }
+
+ fn visit_generic_param(&mut self, p: &'tcx hir::GenericParam<'tcx>) {
+ let kind = match &p.kind {
+ // Allow stability attributes on default generic arguments.
+ hir::GenericParamKind::Type { default: Some(_), .. }
+ | hir::GenericParamKind::Const { default: Some(_), .. } => AnnotationKind::Container,
+ _ => AnnotationKind::Prohibited,
+ };
+
+ self.annotate(
+ self.tcx.hir().local_def_id(p.hir_id),
+ p.span,
+ None,
+ kind,
+ InheritDeprecation::No,
+ InheritConstStability::No,
+ InheritStability::No,
+ |v| {
+ intravisit::walk_generic_param(v, p);
+ },
+ );
+ }
+}
+
+struct MissingStabilityAnnotations<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ access_levels: &'tcx AccessLevels,
+}
+
+impl<'tcx> MissingStabilityAnnotations<'tcx> {
+ fn check_missing_stability(&self, def_id: LocalDefId, span: Span) {
+ let stab = self.tcx.stability().local_stability(def_id);
+ if !self.tcx.sess.opts.test && stab.is_none() && self.access_levels.is_reachable(def_id) {
+ let descr = self.tcx.def_kind(def_id).descr(def_id.to_def_id());
+ self.tcx.sess.span_err(span, &format!("{} has missing stability attribute", descr));
+ }
+ }
+
+ fn check_missing_const_stability(&self, def_id: LocalDefId, span: Span) {
+ if !self.tcx.features().staged_api {
+ return;
+ }
+
+ let is_const = self.tcx.is_const_fn(def_id.to_def_id())
+ || self.tcx.is_const_trait_impl_raw(def_id.to_def_id());
+ let is_stable = self
+ .tcx
+ .lookup_stability(def_id)
+ .map_or(false, |stability| stability.level.is_stable());
+ let missing_const_stability_attribute = self.tcx.lookup_const_stability(def_id).is_none();
+ let is_reachable = self.access_levels.is_reachable(def_id);
+
+ if is_const && is_stable && missing_const_stability_attribute && is_reachable {
+ let descr = self.tcx.def_kind(def_id).descr(def_id.to_def_id());
+ self.tcx.sess.span_err(span, &format!("{descr} has missing const stability attribute"));
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for MissingStabilityAnnotations<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, i: &'tcx Item<'tcx>) {
+ // Inherent impls and foreign modules serve only as containers for other items,
+ // they don't have their own stability. They still can be annotated as unstable
+ // and propagate this instability to children, but this annotation is completely
+ // optional. They inherit stability from their parents when unannotated.
+ if !matches!(
+ i.kind,
+ hir::ItemKind::Impl(hir::Impl { of_trait: None, .. })
+ | hir::ItemKind::ForeignMod { .. }
+ ) {
+ self.check_missing_stability(i.def_id, i.span);
+ }
+
+ // Ensure stable `const fn` have a const stability attribute.
+ self.check_missing_const_stability(i.def_id, i.span);
+
+ intravisit::walk_item(self, i)
+ }
+
+ fn visit_trait_item(&mut self, ti: &'tcx hir::TraitItem<'tcx>) {
+ self.check_missing_stability(ti.def_id, ti.span);
+ intravisit::walk_trait_item(self, ti);
+ }
+
+ fn visit_impl_item(&mut self, ii: &'tcx hir::ImplItem<'tcx>) {
+ let impl_def_id = self.tcx.hir().get_parent_item(ii.hir_id());
+ if self.tcx.impl_trait_ref(impl_def_id).is_none() {
+ self.check_missing_stability(ii.def_id, ii.span);
+ self.check_missing_const_stability(ii.def_id, ii.span);
+ }
+ intravisit::walk_impl_item(self, ii);
+ }
+
+ fn visit_variant(&mut self, var: &'tcx Variant<'tcx>, g: &'tcx Generics<'tcx>, item_id: HirId) {
+ self.check_missing_stability(self.tcx.hir().local_def_id(var.id), var.span);
+ intravisit::walk_variant(self, var, g, item_id);
+ }
+
+ fn visit_field_def(&mut self, s: &'tcx FieldDef<'tcx>) {
+ self.check_missing_stability(self.tcx.hir().local_def_id(s.hir_id), s.span);
+ intravisit::walk_field_def(self, s);
+ }
+
+ fn visit_foreign_item(&mut self, i: &'tcx hir::ForeignItem<'tcx>) {
+ self.check_missing_stability(i.def_id, i.span);
+ intravisit::walk_foreign_item(self, i);
+ }
+ // Note that we don't need to `check_missing_stability` for default generic parameters,
+ // as we assume that any default generic parameters without attributes are automatically
+ // stable (assuming they have not inherited instability from their parent).
+}
+
+fn stability_index(tcx: TyCtxt<'_>, (): ()) -> Index {
+ let mut index = Index {
+ stab_map: Default::default(),
+ const_stab_map: Default::default(),
+ depr_map: Default::default(),
+ implications: Default::default(),
+ };
+
+ {
+ let mut annotator = Annotator {
+ tcx,
+ index: &mut index,
+ parent_stab: None,
+ parent_const_stab: None,
+ parent_depr: None,
+ in_trait_impl: false,
+ };
+
+ // If the `-Z force-unstable-if-unmarked` flag is passed then we provide
+ // a parent stability annotation which indicates that this is private
+ // with the `rustc_private` feature. This is intended for use when
+ // compiling `librustc_*` crates themselves so we can leverage crates.io
+ // while maintaining the invariant that all sysroot crates are unstable
+ // by default and are unable to be used.
+ if tcx.sess.opts.unstable_opts.force_unstable_if_unmarked {
+ let stability = Stability {
+ level: attr::StabilityLevel::Unstable {
+ reason: UnstableReason::Default,
+ issue: NonZeroU32::new(27812),
+ is_soft: false,
+ implied_by: None,
+ },
+ feature: sym::rustc_private,
+ };
+ annotator.parent_stab = Some(stability);
+ }
+
+ annotator.annotate(
+ CRATE_DEF_ID,
+ tcx.hir().span(CRATE_HIR_ID),
+ None,
+ AnnotationKind::Required,
+ InheritDeprecation::Yes,
+ InheritConstStability::No,
+ InheritStability::No,
+ |v| tcx.hir().walk_toplevel_module(v),
+ );
+ }
+ index
+}
+
+/// Cross-references the feature names of unstable APIs with enabled
+/// features and possibly prints errors.
+fn check_mod_unstable_api_usage(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ tcx.hir().visit_item_likes_in_module(module_def_id, &mut Checker { tcx });
+}
+
+pub(crate) fn provide(providers: &mut Providers) {
+ *providers = Providers {
+ check_mod_unstable_api_usage,
+ stability_index,
+ stability_implications: |tcx, _| tcx.stability().implications.clone(),
+ lookup_stability: |tcx, id| tcx.stability().local_stability(id.expect_local()),
+ lookup_const_stability: |tcx, id| tcx.stability().local_const_stability(id.expect_local()),
+ lookup_deprecation_entry: |tcx, id| {
+ tcx.stability().local_deprecation_entry(id.expect_local())
+ },
+ ..*providers
+ };
+}
+
+struct Checker<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> Visitor<'tcx> for Checker<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ /// Because stability levels are scoped lexically, we want to walk
+ /// nested items in the context of the outer item, so enable
+ /// deep-walking.
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ match item.kind {
+ hir::ItemKind::ExternCrate(_) => {
+ // compiler-generated `extern crate` items have a dummy span.
+ // `std` is still checked for the `restricted-std` feature.
+ if item.span.is_dummy() && item.ident.name != sym::std {
+ return;
+ }
+
+ let Some(cnum) = self.tcx.extern_mod_stmt_cnum(item.def_id) else {
+ return;
+ };
+ let def_id = cnum.as_def_id();
+ self.tcx.check_stability(def_id, Some(item.hir_id()), item.span, None);
+ }
+
+ // For implementations of traits, check the stability of each item
+ // individually as it's possible to have a stable trait with unstable
+ // items.
+ hir::ItemKind::Impl(hir::Impl {
+ of_trait: Some(ref t),
+ self_ty,
+ items,
+ constness,
+ ..
+ }) => {
+ let features = self.tcx.features();
+ if features.staged_api {
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ let (stab, const_stab) = attr::find_stability(&self.tcx.sess, attrs, item.span);
+
+ // If this impl block has an #[unstable] attribute, give an
+ // error if all involved types and traits are stable, because
+ // it will have no effect.
+ // See: https://github.com/rust-lang/rust/issues/55436
+ if let Some((Stability { level: attr::Unstable { .. }, .. }, span)) = stab {
+ let mut c = CheckTraitImplStable { tcx: self.tcx, fully_stable: true };
+ c.visit_ty(self_ty);
+ c.visit_trait_ref(t);
+ if c.fully_stable {
+ self.tcx.struct_span_lint_hir(
+ INEFFECTIVE_UNSTABLE_TRAIT_IMPL,
+ item.hir_id(),
+ span,
+ |lint| {lint
+ .build("an `#[unstable]` annotation here has no effect")
+ .note("see issue #55436 <https://github.com/rust-lang/rust/issues/55436> for more information")
+ .emit();}
+ );
+ }
+ }
+
+ // `#![feature(const_trait_impl)]` is unstable, so any impl declared stable
+ // needs to have an error emitted.
+ if features.const_trait_impl
+ && *constness == hir::Constness::Const
+ && const_stab.map_or(false, |(stab, _)| stab.is_const_stable())
+ {
+ self.tcx
+ .sess
+ .struct_span_err(item.span, "trait implementations cannot be const stable yet")
+ .note("see issue #67792 <https://github.com/rust-lang/rust/issues/67792> for more information")
+ .emit();
+ }
+ }
+
+ for impl_item_ref in *items {
+ let impl_item = self.tcx.associated_item(impl_item_ref.id.def_id);
+
+ if let Some(def_id) = impl_item.trait_item_def_id {
+ // Pass `None` to skip deprecation warnings.
+ self.tcx.check_stability(def_id, None, impl_item_ref.span, None);
+ }
+ }
+ }
+
+ _ => (/* pass */),
+ }
+ intravisit::walk_item(self, item);
+ }
+
+ fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, id: hir::HirId) {
+ if let Some(def_id) = path.res.opt_def_id() {
+ let method_span = path.segments.last().map(|s| s.ident.span);
+ let item_is_allowed = self.tcx.check_stability_allow_unstable(
+ def_id,
+ Some(id),
+ path.span,
+ method_span,
+ if is_unstable_reexport(self.tcx, id) {
+ AllowUnstable::Yes
+ } else {
+ AllowUnstable::No
+ },
+ );
+
+ let is_allowed_through_unstable_modules = |def_id| {
+ self.tcx
+ .lookup_stability(def_id)
+ .map(|stab| match stab.level {
+ StabilityLevel::Stable { allowed_through_unstable_modules, .. } => {
+ allowed_through_unstable_modules
+ }
+ _ => false,
+ })
+ .unwrap_or(false)
+ };
+
+ if item_is_allowed && !is_allowed_through_unstable_modules(def_id) {
+ // Check parent modules stability as well if the item the path refers to is itself
+ // stable. We only emit warnings for unstable path segments if the item is stable
+ // or allowed because stability is often inherited, so the most common case is that
+ // both the segments and the item are unstable behind the same feature flag.
+ //
+ // We check here rather than in `visit_path_segment` to prevent visiting the last
+ // path segment twice
+ //
+ // We include special cases via #[rustc_allowed_through_unstable_modules] for items
+ // that were accidentally stabilized through unstable paths before this check was
+ // added, such as `core::intrinsics::transmute`
+ let parents = path.segments.iter().rev().skip(1);
+ for path_segment in parents {
+ if let Some(def_id) = path_segment.res.as_ref().and_then(Res::opt_def_id) {
+ // use `None` for id to prevent deprecation check
+ self.tcx.check_stability_allow_unstable(
+ def_id,
+ None,
+ path.span,
+ None,
+ if is_unstable_reexport(self.tcx, id) {
+ AllowUnstable::Yes
+ } else {
+ AllowUnstable::No
+ },
+ );
+ }
+ }
+ }
+ }
+
+ intravisit::walk_path(self, path)
+ }
+}
+
+/// Check whether a path is a `use` item that has been marked as unstable.
+///
+/// See issue #94972 for details on why this is a special case
+fn is_unstable_reexport<'tcx>(tcx: TyCtxt<'tcx>, id: hir::HirId) -> bool {
+ // Get the LocalDefId so we can lookup the item to check the kind.
+ let Some(def_id) = tcx.hir().opt_local_def_id(id) else { return false; };
+
+ let Some(stab) = tcx.stability().local_stability(def_id) else {
+ return false;
+ };
+
+ if stab.level.is_stable() {
+ // The re-export is not marked as unstable, don't override
+ return false;
+ }
+
+ // If this is a path that isn't a use, we don't need to do anything special
+ if !matches!(tcx.hir().item(hir::ItemId { def_id }).kind, ItemKind::Use(..)) {
+ return false;
+ }
+
+ true
+}
+
+struct CheckTraitImplStable<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ fully_stable: bool,
+}
+
+impl<'tcx> Visitor<'tcx> for CheckTraitImplStable<'tcx> {
+ fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _id: hir::HirId) {
+ if let Some(def_id) = path.res.opt_def_id() {
+ if let Some(stab) = self.tcx.lookup_stability(def_id) {
+ self.fully_stable &= stab.level.is_stable();
+ }
+ }
+ intravisit::walk_path(self, path)
+ }
+
+ fn visit_trait_ref(&mut self, t: &'tcx TraitRef<'tcx>) {
+ if let Res::Def(DefKind::Trait, trait_did) = t.path.res {
+ if let Some(stab) = self.tcx.lookup_stability(trait_did) {
+ self.fully_stable &= stab.level.is_stable();
+ }
+ }
+ intravisit::walk_trait_ref(self, t)
+ }
+
+ fn visit_ty(&mut self, t: &'tcx Ty<'tcx>) {
+ if let TyKind::Never = t.kind {
+ self.fully_stable = false;
+ }
+ intravisit::walk_ty(self, t)
+ }
+}
+
+/// Given the list of enabled features that were not language features (i.e., that
+/// were expected to be library features), and the list of features used from
+/// libraries, identify activated features that don't exist and error about them.
+pub fn check_unused_or_stable_features(tcx: TyCtxt<'_>) {
+ let is_staged_api =
+ tcx.sess.opts.unstable_opts.force_unstable_if_unmarked || tcx.features().staged_api;
+ if is_staged_api {
+ let access_levels = &tcx.privacy_access_levels(());
+ let mut missing = MissingStabilityAnnotations { tcx, access_levels };
+ missing.check_missing_stability(CRATE_DEF_ID, tcx.hir().span(CRATE_HIR_ID));
+ tcx.hir().walk_toplevel_module(&mut missing);
+ tcx.hir().visit_all_item_likes_in_crate(&mut missing);
+ }
+
+ let declared_lang_features = &tcx.features().declared_lang_features;
+ let mut lang_features = FxHashSet::default();
+ for &(feature, span, since) in declared_lang_features {
+ if let Some(since) = since {
+ // Warn if the user has enabled an already-stable lang feature.
+ unnecessary_stable_feature_lint(tcx, span, feature, since);
+ }
+ if !lang_features.insert(feature) {
+ // Warn if the user enables a lang feature multiple times.
+ duplicate_feature_err(tcx.sess, span, feature);
+ }
+ }
+
+ let declared_lib_features = &tcx.features().declared_lib_features;
+ let mut remaining_lib_features = FxIndexMap::default();
+ for (feature, span) in declared_lib_features {
+ if !tcx.sess.opts.unstable_features.is_nightly_build() {
+ struct_span_err!(
+ tcx.sess,
+ *span,
+ E0554,
+ "`#![feature]` may not be used on the {} release channel",
+ env!("CFG_RELEASE_CHANNEL")
+ )
+ .emit();
+ }
+ if remaining_lib_features.contains_key(&feature) {
+ // Warn if the user enables a lib feature multiple times.
+ duplicate_feature_err(tcx.sess, *span, *feature);
+ }
+ remaining_lib_features.insert(feature, *span);
+ }
+ // `stdbuild` has special handling for `libc`, so we need to
+ // recognise the feature when building std.
+ // Likewise, libtest is handled specially, so `test` isn't
+ // available as we'd like it to be.
+ // FIXME: only remove `libc` when `stdbuild` is active.
+ // FIXME: remove special casing for `test`.
+ remaining_lib_features.remove(&sym::libc);
+ remaining_lib_features.remove(&sym::test);
+
+ // We always collect the lib features declared in the current crate, even if there are
+ // no unknown features, because the collection also does feature attribute validation.
+ let local_defined_features = tcx.lib_features(());
+ let mut all_lib_features: FxHashMap<_, _> =
+ local_defined_features.to_vec().iter().map(|el| *el).collect();
+ let mut implications = tcx.stability_implications(rustc_hir::def_id::LOCAL_CRATE).clone();
+ for &cnum in tcx.crates(()) {
+ implications.extend(tcx.stability_implications(cnum));
+ all_lib_features.extend(tcx.defined_lib_features(cnum).iter().map(|el| *el));
+ }
+
+ // Check that every feature referenced by an `implied_by` exists (for features defined in the
+ // local crate).
+ for (implied_by, feature) in tcx.stability_implications(rustc_hir::def_id::LOCAL_CRATE) {
+ // Only `implied_by` needs to be checked, `feature` is guaranteed to exist.
+ if !all_lib_features.contains_key(implied_by) {
+ let span = local_defined_features
+ .stable
+ .get(feature)
+ .map(|(_, span)| span)
+ .or_else(|| local_defined_features.unstable.get(feature))
+ .expect("feature that implied another does not exist");
+ tcx.sess
+ .struct_span_err(
+ *span,
+ format!("feature `{implied_by}` implying `{feature}` does not exist"),
+ )
+ .emit();
+ }
+ }
+
+ if !remaining_lib_features.is_empty() {
+ for (feature, since) in all_lib_features.iter() {
+ if let Some(since) = since && let Some(span) = remaining_lib_features.get(&feature) {
+ // Warn if the user has enabled an already-stable lib feature.
+ if let Some(implies) = implications.get(&feature) {
+ unnecessary_partially_stable_feature_lint(tcx, *span, *feature, *implies, *since);
+ } else {
+ unnecessary_stable_feature_lint(tcx, *span, *feature, *since);
+ }
+ }
+ remaining_lib_features.remove(&feature);
+ if remaining_lib_features.is_empty() {
+ break;
+ }
+ }
+ }
+
+ for (feature, span) in remaining_lib_features {
+ struct_span_err!(tcx.sess, span, E0635, "unknown feature `{}`", feature).emit();
+ }
+
+ // FIXME(#44232): the `used_features` table no longer exists, so we
+ // don't lint about unused features. We should re-enable this one day!
+}
+
+fn unnecessary_partially_stable_feature_lint(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ feature: Symbol,
+ implies: Symbol,
+ since: Symbol,
+) {
+ tcx.struct_span_lint_hir(lint::builtin::STABLE_FEATURES, hir::CRATE_HIR_ID, span, |lint| {
+ lint.build(&format!(
+ "the feature `{feature}` has been partially stabilized since {since} and is succeeded \
+ by the feature `{implies}`"
+ ))
+ .span_suggestion(
+ span,
+ &format!(
+ "if you are using features which are still unstable, change to using `{implies}`"
+ ),
+ implies,
+ Applicability::MaybeIncorrect,
+ )
+ .span_suggestion(
+ tcx.sess.source_map().span_extend_to_line(span),
+ "if you are using features which are now stable, remove this line",
+ "",
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ });
+}
+
+fn unnecessary_stable_feature_lint(tcx: TyCtxt<'_>, span: Span, feature: Symbol, since: Symbol) {
+ tcx.struct_span_lint_hir(lint::builtin::STABLE_FEATURES, hir::CRATE_HIR_ID, span, |lint| {
+ lint.build(&format!(
+ "the feature `{feature}` has been stable since {since} and no longer requires an \
+ attribute to enable",
+ ))
+ .emit();
+ });
+}
+
+fn duplicate_feature_err(sess: &Session, span: Span, feature: Symbol) {
+ struct_span_err!(sess, span, E0636, "the feature `{}` has already been declared", feature)
+ .emit();
+}
+
+fn missing_const_err(session: &Session, fn_sig_span: Span, const_span: Span) {
+ const ERROR_MSG: &'static str = "attributes `#[rustc_const_unstable]` \
+ and `#[rustc_const_stable]` require \
+ the function or method to be `const`";
+
+ session
+ .struct_span_err(fn_sig_span, ERROR_MSG)
+ .span_help(fn_sig_span, "make the function or method const")
+ .span_label(const_span, "attribute specified here")
+ .emit();
+}
diff --git a/compiler/rustc_passes/src/upvars.rs b/compiler/rustc_passes/src/upvars.rs
new file mode 100644
index 000000000..68d9bf22b
--- /dev/null
+++ b/compiler/rustc_passes/src/upvars.rs
@@ -0,0 +1,97 @@
+//! Upvar (closure capture) collection from cross-body HIR uses of `Res::Local`s.
+
+use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{self, HirId};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::Span;
+
+pub fn provide(providers: &mut Providers) {
+ providers.upvars_mentioned = |tcx, def_id| {
+ if !tcx.is_closure(def_id) {
+ return None;
+ }
+
+ let local_def_id = def_id.expect_local();
+ let body = tcx.hir().body(tcx.hir().maybe_body_owned_by(local_def_id)?);
+
+ let mut local_collector = LocalCollector::default();
+ local_collector.visit_body(body);
+
+ let mut capture_collector = CaptureCollector {
+ tcx,
+ locals: &local_collector.locals,
+ upvars: FxIndexMap::default(),
+ };
+ capture_collector.visit_body(body);
+
+ if !capture_collector.upvars.is_empty() {
+ Some(tcx.arena.alloc(capture_collector.upvars))
+ } else {
+ None
+ }
+ };
+}
+
+#[derive(Default)]
+struct LocalCollector {
+ // FIXME(eddyb) perhaps use `ItemLocalId` instead?
+ locals: FxHashSet<HirId>,
+}
+
+impl<'tcx> Visitor<'tcx> for LocalCollector {
+ fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
+ if let hir::PatKind::Binding(_, hir_id, ..) = pat.kind {
+ self.locals.insert(hir_id);
+ }
+ intravisit::walk_pat(self, pat);
+ }
+}
+
+struct CaptureCollector<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ locals: &'a FxHashSet<HirId>,
+ upvars: FxIndexMap<HirId, hir::Upvar>,
+}
+
+impl CaptureCollector<'_, '_> {
+ fn visit_local_use(&mut self, var_id: HirId, span: Span) {
+ if !self.locals.contains(&var_id) {
+ self.upvars.entry(var_id).or_insert(hir::Upvar { span });
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for CaptureCollector<'_, 'tcx> {
+ fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
+ if let Res::Local(var_id) = path.res {
+ self.visit_local_use(var_id, path.span);
+ }
+
+ intravisit::walk_path(self, path);
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Closure { .. } = expr.kind {
+ let closure_def_id = self.tcx.hir().local_def_id(expr.hir_id);
+ if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) {
+ // Every capture of a closure expression is a local in scope,
+ // that is moved/copied/borrowed into the closure value, and
+ // for this analysis they are like any other access to a local.
+ //
+ // E.g. in `|b| |c| (a, b, c)`, the upvars of the inner closure
+ // are `a` and `b`, and while `a` is not directly used in the
+ // outer closure, it needs to be an upvar there too, so that
+ // the inner closure can take it (from the outer closure's env).
+ for (&var_id, upvar) in upvars {
+ self.visit_local_use(var_id, upvar.span);
+ }
+ }
+ }
+
+ intravisit::walk_expr(self, expr);
+ }
+}
diff --git a/compiler/rustc_passes/src/weak_lang_items.rs b/compiler/rustc_passes/src/weak_lang_items.rs
new file mode 100644
index 000000000..c48b4ecf8
--- /dev/null
+++ b/compiler/rustc_passes/src/weak_lang_items.rs
@@ -0,0 +1,91 @@
+//! Validity checking for weak lang items
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::struct_span_err;
+use rustc_hir::lang_items::{self, LangItem};
+use rustc_hir::weak_lang_items::WEAK_ITEMS_REFS;
+use rustc_middle::middle::lang_items::required;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::CrateType;
+
+/// Checks the crate for usage of weak lang items, returning a vector of all the
+/// language items required by this crate, but not defined yet.
+pub fn check_crate<'tcx>(tcx: TyCtxt<'tcx>, items: &mut lang_items::LanguageItems) {
+ // These are never called by user code, they're generated by the compiler.
+ // They will never implicitly be added to the `missing` array unless we do
+ // so here.
+ if items.eh_personality().is_none() {
+ items.missing.push(LangItem::EhPersonality);
+ }
+ if tcx.sess.target.os == "emscripten" && items.eh_catch_typeinfo().is_none() {
+ items.missing.push(LangItem::EhCatchTypeinfo);
+ }
+
+ let crate_items = tcx.hir_crate_items(());
+ for id in crate_items.foreign_items() {
+ let attrs = tcx.hir().attrs(id.hir_id());
+ if let Some((lang_item, _)) = lang_items::extract(attrs) {
+ if let Some(&item) = WEAK_ITEMS_REFS.get(&lang_item) {
+ if items.require(item).is_err() {
+ items.missing.push(item);
+ }
+ } else {
+ let span = tcx.def_span(id.def_id);
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0264,
+ "unknown external lang item: `{}`",
+ lang_item
+ )
+ .emit();
+ }
+ }
+ }
+
+ verify(tcx, items);
+}
+
+fn verify<'tcx>(tcx: TyCtxt<'tcx>, items: &lang_items::LanguageItems) {
+ // We only need to check for the presence of weak lang items if we're
+ // emitting something that's not an rlib.
+ let needs_check = tcx.sess.crate_types().iter().any(|kind| match *kind {
+ CrateType::Dylib
+ | CrateType::ProcMacro
+ | CrateType::Cdylib
+ | CrateType::Executable
+ | CrateType::Staticlib => true,
+ CrateType::Rlib => false,
+ });
+ if !needs_check {
+ return;
+ }
+
+ let mut missing = FxHashSet::default();
+ for &cnum in tcx.crates(()).iter() {
+ for &item in tcx.missing_lang_items(cnum).iter() {
+ missing.insert(item);
+ }
+ }
+
+ for (name, &item) in WEAK_ITEMS_REFS.iter() {
+ if missing.contains(&item) && required(tcx, item) && items.require(item).is_err() {
+ if item == LangItem::PanicImpl {
+ tcx.sess.err("`#[panic_handler]` function required, but not found");
+ } else if item == LangItem::Oom {
+ if !tcx.features().default_alloc_error_handler {
+ tcx.sess.err("`#[alloc_error_handler]` function required, but not found");
+ tcx.sess.note_without_error("use `#![feature(default_alloc_error_handler)]` for a default error handler");
+ }
+ } else {
+ tcx
+ .sess
+ .diagnostic()
+ .struct_err(&format!("language item required, but not found: `{}`", name))
+ .note(&format!("this can occur when a binary crate with `#![no_std]` is compiled for a target where `{}` is defined in the standard library", name))
+ .help(&format!("you may be able to compile for a target that doesn't need `{}`, specify a target with `--target` or in `.cargo/config`", name))
+ .emit();
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_plugin_impl/Cargo.toml b/compiler/rustc_plugin_impl/Cargo.toml
new file mode 100644
index 000000000..b6ea533c8
--- /dev/null
+++ b/compiler/rustc_plugin_impl/Cargo.toml
@@ -0,0 +1,17 @@
+[package]
+name = "rustc_plugin_impl"
+version = "0.0.0"
+build = false
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+libloading = "0.7.1"
+rustc_errors = { path = "../rustc_errors" }
+rustc_lint = { path = "../rustc_lint" }
+rustc_metadata = { path = "../rustc_metadata" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_plugin_impl/src/lib.rs b/compiler/rustc_plugin_impl/src/lib.rs
new file mode 100644
index 000000000..1195045bd
--- /dev/null
+++ b/compiler/rustc_plugin_impl/src/lib.rs
@@ -0,0 +1,23 @@
+//! Infrastructure for compiler plugins.
+//!
+//! Plugins are a deprecated way to extend the behavior of `rustc` in various ways.
+//!
+//! See the [`plugin`
+//! feature](https://doc.rust-lang.org/nightly/unstable-book/language-features/plugin.html)
+//! of the Unstable Book for some examples.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![recursion_limit = "256"]
+
+use rustc_lint::LintStore;
+
+pub mod load;
+
+/// Structure used to register plugins.
+///
+/// A plugin registrar function takes an `&mut Registry` and should call
+/// methods to register its plugins.
+pub struct Registry<'a> {
+ /// The `LintStore` allows plugins to register new lints.
+ pub lint_store: &'a mut LintStore,
+}
diff --git a/compiler/rustc_plugin_impl/src/load.rs b/compiler/rustc_plugin_impl/src/load.rs
new file mode 100644
index 000000000..618682da4
--- /dev/null
+++ b/compiler/rustc_plugin_impl/src/load.rs
@@ -0,0 +1,84 @@
+//! Used by `rustc` when loading a plugin.
+
+use crate::Registry;
+use libloading::Library;
+use rustc_ast::Crate;
+use rustc_errors::struct_span_err;
+use rustc_metadata::locator;
+use rustc_session::cstore::MetadataLoader;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+
+use std::borrow::ToOwned;
+use std::env;
+use std::mem;
+use std::path::PathBuf;
+
+/// Pointer to a registrar function.
+type PluginRegistrarFn = fn(&mut Registry<'_>);
+
+fn call_malformed_plugin_attribute(sess: &Session, span: Span) {
+ struct_span_err!(sess, span, E0498, "malformed `plugin` attribute")
+ .span_label(span, "malformed attribute")
+ .emit();
+}
+
+/// Read plugin metadata and dynamically load registrar functions.
+pub fn load_plugins(
+ sess: &Session,
+ metadata_loader: &dyn MetadataLoader,
+ krate: &Crate,
+) -> Vec<PluginRegistrarFn> {
+ let mut plugins = Vec::new();
+
+ for attr in &krate.attrs {
+ if !attr.has_name(sym::plugin) {
+ continue;
+ }
+
+ for plugin in attr.meta_item_list().unwrap_or_default() {
+ match plugin.ident() {
+ Some(ident) if plugin.is_word() => {
+ load_plugin(&mut plugins, sess, metadata_loader, ident)
+ }
+ _ => call_malformed_plugin_attribute(sess, plugin.span()),
+ }
+ }
+ }
+
+ plugins
+}
+
+fn load_plugin(
+ plugins: &mut Vec<PluginRegistrarFn>,
+ sess: &Session,
+ metadata_loader: &dyn MetadataLoader,
+ ident: Ident,
+) {
+ let lib = locator::find_plugin_registrar(sess, metadata_loader, ident.span, ident.name);
+ let fun = dylink_registrar(lib).unwrap_or_else(|err| {
+ // This is fatal: there are almost certainly macros we need inside this crate, so
+ // continuing would spew "macro undefined" errors.
+ sess.span_fatal(ident.span, &err.to_string());
+ });
+ plugins.push(fun);
+}
+
+/// Dynamically link a registrar function into the compiler process.
+fn dylink_registrar(lib_path: PathBuf) -> Result<PluginRegistrarFn, libloading::Error> {
+ // Make sure the path contains a / or the linker will search for it.
+ let lib_path = env::current_dir().unwrap().join(&lib_path);
+
+ let lib = unsafe { Library::new(&lib_path) }?;
+
+ let registrar_sym = unsafe { lib.get::<PluginRegistrarFn>(b"__rustc_plugin_registrar") }?;
+
+ // Intentionally leak the dynamic library. We can't ever unload it
+ // since the library can make things that will live arbitrarily long
+ // (e.g., an Rc cycle or a thread).
+ let registrar_sym = unsafe { registrar_sym.into_raw() };
+ mem::forget(lib);
+
+ Ok(*registrar_sym)
+}
diff --git a/compiler/rustc_privacy/Cargo.toml b/compiler/rustc_privacy/Cargo.toml
new file mode 100644
index 000000000..5785921fb
--- /dev/null
+++ b/compiler/rustc_privacy/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "rustc_privacy"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+rustc_ast = { path = "../rustc_ast" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_typeck = { path = "../rustc_typeck" }
+tracing = "0.1"
diff --git a/compiler/rustc_privacy/src/errors.rs b/compiler/rustc_privacy/src/errors.rs
new file mode 100644
index 000000000..aca7d770f
--- /dev/null
+++ b/compiler/rustc_privacy/src/errors.rs
@@ -0,0 +1,92 @@
+use rustc_errors::DiagnosticArgFromDisplay;
+use rustc_macros::{LintDiagnostic, SessionDiagnostic, SessionSubdiagnostic};
+use rustc_span::{Span, Symbol};
+
+#[derive(SessionDiagnostic)]
+#[error(privacy::field_is_private, code = "E0451")]
+pub struct FieldIsPrivate {
+ #[primary_span]
+ pub span: Span,
+ pub field_name: Symbol,
+ pub variant_descr: &'static str,
+ pub def_path_str: String,
+ #[subdiagnostic]
+ pub label: FieldIsPrivateLabel,
+}
+
+#[derive(SessionSubdiagnostic)]
+pub enum FieldIsPrivateLabel {
+ #[label(privacy::field_is_private_is_update_syntax_label)]
+ IsUpdateSyntax {
+ #[primary_span]
+ span: Span,
+ field_name: Symbol,
+ },
+ #[label(privacy::field_is_private_label)]
+ Other {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(SessionDiagnostic)]
+#[error(privacy::item_is_private)]
+pub struct ItemIsPrivate<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub kind: &'a str,
+ pub descr: DiagnosticArgFromDisplay<'a>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(privacy::unnamed_item_is_private)]
+pub struct UnnamedItemIsPrivate {
+ #[primary_span]
+ pub span: Span,
+ pub kind: &'static str,
+}
+
+// Duplicate of `InPublicInterface` but with a different error code, shares the same slug.
+#[derive(SessionDiagnostic)]
+#[error(privacy::in_public_interface, code = "E0445")]
+pub struct InPublicInterfaceTraits<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub vis_descr: &'static str,
+ pub kind: &'a str,
+ pub descr: DiagnosticArgFromDisplay<'a>,
+ #[label(privacy::visibility_label)]
+ pub vis_span: Span,
+}
+
+// Duplicate of `InPublicInterfaceTraits` but with a different error code, shares the same slug.
+#[derive(SessionDiagnostic)]
+#[error(privacy::in_public_interface, code = "E0446")]
+pub struct InPublicInterface<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub vis_descr: &'static str,
+ pub kind: &'a str,
+ pub descr: DiagnosticArgFromDisplay<'a>,
+ #[label(privacy::visibility_label)]
+ pub vis_span: Span,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(privacy::from_private_dep_in_public_interface)]
+pub struct FromPrivateDependencyInPublicInterface<'a> {
+ pub kind: &'a str,
+ pub descr: DiagnosticArgFromDisplay<'a>,
+ pub krate: Symbol,
+}
+
+#[derive(LintDiagnostic)]
+#[lint(privacy::private_in_public_lint)]
+pub struct PrivateInPublicLint<'a> {
+ pub vis_descr: &'static str,
+ pub kind: &'a str,
+ pub descr: DiagnosticArgFromDisplay<'a>,
+}
diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs
new file mode 100644
index 000000000..c28d0569d
--- /dev/null
+++ b/compiler/rustc_privacy/src/lib.rs
@@ -0,0 +1,2093 @@
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(associated_type_defaults)]
+#![feature(control_flow_enum)]
+#![feature(rustc_private)]
+#![feature(try_blocks)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+#![cfg_attr(not(bootstrap), deny(rustc::untranslatable_diagnostic))]
+#![cfg_attr(not(bootstrap), deny(rustc::diagnostic_outside_of_impl))]
+
+mod errors;
+
+use rustc_ast::MacroDef;
+use rustc_attr as attr;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::intern::Interned;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{DefId, LocalDefId, LocalDefIdSet, CRATE_DEF_ID};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{AssocItemKind, HirIdSet, ItemId, Node, PatKind};
+use rustc_middle::bug;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::privacy::{AccessLevel, AccessLevels};
+use rustc_middle::span_bug;
+use rustc_middle::ty::abstract_const::{walk_abstract_const, AbstractConst, Node as ACNode};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{self, Const, DefIdTree, GenericParamDefKind};
+use rustc_middle::ty::{TraitRef, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use rustc_session::lint;
+use rustc_span::hygiene::Transparency;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::Span;
+
+use std::marker::PhantomData;
+use std::ops::ControlFlow;
+use std::{cmp, fmt, mem};
+
+use errors::{
+ FieldIsPrivate, FieldIsPrivateLabel, FromPrivateDependencyInPublicInterface, InPublicInterface,
+ InPublicInterfaceTraits, ItemIsPrivate, PrivateInPublicLint, UnnamedItemIsPrivate,
+};
+
+////////////////////////////////////////////////////////////////////////////////
+/// Generic infrastructure used to implement specific visitors below.
+////////////////////////////////////////////////////////////////////////////////
+
+/// Implemented to visit all `DefId`s in a type.
+/// Visiting `DefId`s is useful because visibilities and reachabilities are attached to them.
+/// The idea is to visit "all components of a type", as documented in
+/// <https://github.com/rust-lang/rfcs/blob/master/text/2145-type-privacy.md#how-to-determine-visibility-of-a-type>.
+/// The default type visitor (`TypeVisitor`) does most of the job, but it has some shortcomings.
+/// First, it doesn't have overridable `fn visit_trait_ref`, so we have to catch trait `DefId`s
+/// manually. Second, it doesn't visit some type components like signatures of fn types, or traits
+/// in `impl Trait`, see individual comments in `DefIdVisitorSkeleton::visit_ty`.
+trait DefIdVisitor<'tcx> {
+ type BreakTy = ();
+
+ fn tcx(&self) -> TyCtxt<'tcx>;
+ fn shallow(&self) -> bool {
+ false
+ }
+ fn skip_assoc_tys(&self) -> bool {
+ false
+ }
+ fn visit_def_id(
+ &mut self,
+ def_id: DefId,
+ kind: &str,
+ descr: &dyn fmt::Display,
+ ) -> ControlFlow<Self::BreakTy>;
+
+ /// Not overridden, but used to actually visit types and traits.
+ fn skeleton(&mut self) -> DefIdVisitorSkeleton<'_, 'tcx, Self> {
+ DefIdVisitorSkeleton {
+ def_id_visitor: self,
+ visited_opaque_tys: Default::default(),
+ dummy: Default::default(),
+ }
+ }
+ fn visit(&mut self, ty_fragment: impl TypeVisitable<'tcx>) -> ControlFlow<Self::BreakTy> {
+ ty_fragment.visit_with(&mut self.skeleton())
+ }
+ fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow<Self::BreakTy> {
+ self.skeleton().visit_trait(trait_ref)
+ }
+ fn visit_projection_ty(
+ &mut self,
+ projection: ty::ProjectionTy<'tcx>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.skeleton().visit_projection_ty(projection)
+ }
+ fn visit_predicates(
+ &mut self,
+ predicates: ty::GenericPredicates<'tcx>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.skeleton().visit_predicates(predicates)
+ }
+}
+
+struct DefIdVisitorSkeleton<'v, 'tcx, V: ?Sized> {
+ def_id_visitor: &'v mut V,
+ visited_opaque_tys: FxHashSet<DefId>,
+ dummy: PhantomData<TyCtxt<'tcx>>,
+}
+
+impl<'tcx, V> DefIdVisitorSkeleton<'_, 'tcx, V>
+where
+ V: DefIdVisitor<'tcx> + ?Sized,
+{
+ fn visit_trait(&mut self, trait_ref: TraitRef<'tcx>) -> ControlFlow<V::BreakTy> {
+ let TraitRef { def_id, substs } = trait_ref;
+ self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref.print_only_trait_path())?;
+ if self.def_id_visitor.shallow() { ControlFlow::CONTINUE } else { substs.visit_with(self) }
+ }
+
+ fn visit_projection_ty(
+ &mut self,
+ projection: ty::ProjectionTy<'tcx>,
+ ) -> ControlFlow<V::BreakTy> {
+ let (trait_ref, assoc_substs) =
+ projection.trait_ref_and_own_substs(self.def_id_visitor.tcx());
+ self.visit_trait(trait_ref)?;
+ if self.def_id_visitor.shallow() {
+ ControlFlow::CONTINUE
+ } else {
+ assoc_substs.iter().try_for_each(|subst| subst.visit_with(self))
+ }
+ }
+
+ fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<V::BreakTy> {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(ty::TraitPredicate {
+ trait_ref,
+ constness: _,
+ polarity: _,
+ }) => self.visit_trait(trait_ref),
+ ty::PredicateKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => {
+ term.visit_with(self)?;
+ self.visit_projection_ty(projection_ty)
+ }
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty, _region)) => {
+ ty.visit_with(self)
+ }
+ ty::PredicateKind::RegionOutlives(..) => ControlFlow::CONTINUE,
+ ty::PredicateKind::ConstEvaluatable(uv)
+ if self.def_id_visitor.tcx().features().generic_const_exprs =>
+ {
+ let tcx = self.def_id_visitor.tcx();
+ if let Ok(Some(ct)) = AbstractConst::new(tcx, uv) {
+ self.visit_abstract_const_expr(tcx, ct)?;
+ }
+ ControlFlow::CONTINUE
+ }
+ ty::PredicateKind::WellFormed(arg) => arg.visit_with(self),
+ _ => bug!("unexpected predicate: {:?}", predicate),
+ }
+ }
+
+ fn visit_abstract_const_expr(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ ct: AbstractConst<'tcx>,
+ ) -> ControlFlow<V::BreakTy> {
+ walk_abstract_const(tcx, ct, |node| match node.root(tcx) {
+ ACNode::Leaf(leaf) => self.visit_const(leaf),
+ ACNode::Cast(_, _, ty) => self.visit_ty(ty),
+ ACNode::Binop(..) | ACNode::UnaryOp(..) | ACNode::FunctionCall(_, _) => {
+ ControlFlow::CONTINUE
+ }
+ })
+ }
+
+ fn visit_predicates(
+ &mut self,
+ predicates: ty::GenericPredicates<'tcx>,
+ ) -> ControlFlow<V::BreakTy> {
+ let ty::GenericPredicates { parent: _, predicates } = predicates;
+ predicates.iter().try_for_each(|&(predicate, _span)| self.visit_predicate(predicate))
+ }
+}
+
+impl<'tcx, V> TypeVisitor<'tcx> for DefIdVisitorSkeleton<'_, 'tcx, V>
+where
+ V: DefIdVisitor<'tcx> + ?Sized,
+{
+ type BreakTy = V::BreakTy;
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<V::BreakTy> {
+ let tcx = self.def_id_visitor.tcx();
+ // InternalSubsts are not visited here because they are visited below
+ // in `super_visit_with`.
+ match *ty.kind() {
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), ..)
+ | ty::Foreign(def_id)
+ | ty::FnDef(def_id, ..)
+ | ty::Closure(def_id, ..)
+ | ty::Generator(def_id, ..) => {
+ self.def_id_visitor.visit_def_id(def_id, "type", &ty)?;
+ if self.def_id_visitor.shallow() {
+ return ControlFlow::CONTINUE;
+ }
+ // Default type visitor doesn't visit signatures of fn types.
+ // Something like `fn() -> Priv {my_func}` is considered a private type even if
+ // `my_func` is public, so we need to visit signatures.
+ if let ty::FnDef(..) = ty.kind() {
+ tcx.fn_sig(def_id).visit_with(self)?;
+ }
+ // Inherent static methods don't have self type in substs.
+ // Something like `fn() {my_method}` type of the method
+ // `impl Pub<Priv> { pub fn my_method() {} }` is considered a private type,
+ // so we need to visit the self type additionally.
+ if let Some(assoc_item) = tcx.opt_associated_item(def_id) {
+ if let Some(impl_def_id) = assoc_item.impl_container(tcx) {
+ tcx.type_of(impl_def_id).visit_with(self)?;
+ }
+ }
+ }
+ ty::Projection(proj) => {
+ if self.def_id_visitor.skip_assoc_tys() {
+ // Visitors searching for minimal visibility/reachability want to
+ // conservatively approximate associated types like `<Type as Trait>::Alias`
+ // as visible/reachable even if both `Type` and `Trait` are private.
+ // Ideally, associated types should be substituted in the same way as
+ // free type aliases, but this isn't done yet.
+ return ControlFlow::CONTINUE;
+ }
+ // This will also visit substs if necessary, so we don't need to recurse.
+ return self.visit_projection_ty(proj);
+ }
+ ty::Dynamic(predicates, ..) => {
+ // All traits in the list are considered the "primary" part of the type
+ // and are visited by shallow visitors.
+ for predicate in predicates {
+ let trait_ref = match predicate.skip_binder() {
+ ty::ExistentialPredicate::Trait(trait_ref) => trait_ref,
+ ty::ExistentialPredicate::Projection(proj) => proj.trait_ref(tcx),
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ ty::ExistentialTraitRef { def_id, substs: InternalSubsts::empty() }
+ }
+ };
+ let ty::ExistentialTraitRef { def_id, substs: _ } = trait_ref;
+ self.def_id_visitor.visit_def_id(def_id, "trait", &trait_ref)?;
+ }
+ }
+ ty::Opaque(def_id, ..) => {
+ // Skip repeated `Opaque`s to avoid infinite recursion.
+ if self.visited_opaque_tys.insert(def_id) {
+ // The intent is to treat `impl Trait1 + Trait2` identically to
+ // `dyn Trait1 + Trait2`. Therefore we ignore def-id of the opaque type itself
+ // (it either has no visibility, or its visibility is insignificant, like
+ // visibilities of type aliases) and recurse into bounds instead to go
+ // through the trait list (default type visitor doesn't visit those traits).
+ // All traits in the list are considered the "primary" part of the type
+ // and are visited by shallow visitors.
+ self.visit_predicates(ty::GenericPredicates {
+ parent: None,
+ predicates: tcx.explicit_item_bounds(def_id),
+ })?;
+ }
+ }
+ // These types don't have their own def-ids (but may have subcomponents
+ // with def-ids that should be visited recursively).
+ ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Str
+ | ty::Never
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::Tuple(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnPtr(..)
+ | ty::Param(..)
+ | ty::Error(_)
+ | ty::GeneratorWitness(..) => {}
+ ty::Bound(..) | ty::Placeholder(..) | ty::Infer(..) => {
+ bug!("unexpected type: {:?}", ty)
+ }
+ }
+
+ if self.def_id_visitor.shallow() {
+ ControlFlow::CONTINUE
+ } else {
+ ty.super_visit_with(self)
+ }
+ }
+
+ fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ self.visit_ty(c.ty())?;
+ let tcx = self.def_id_visitor.tcx();
+ if let Ok(Some(ct)) = AbstractConst::from_const(tcx, c) {
+ self.visit_abstract_const_expr(tcx, ct)?;
+ }
+ ControlFlow::CONTINUE
+ }
+}
+
+fn min(vis1: ty::Visibility, vis2: ty::Visibility, tcx: TyCtxt<'_>) -> ty::Visibility {
+ if vis1.is_at_least(vis2, tcx) { vis2 } else { vis1 }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// Visitor used to determine impl visibility and reachability.
+////////////////////////////////////////////////////////////////////////////////
+
+struct FindMin<'a, 'tcx, VL: VisibilityLike> {
+ tcx: TyCtxt<'tcx>,
+ access_levels: &'a AccessLevels,
+ min: VL,
+}
+
+impl<'a, 'tcx, VL: VisibilityLike> DefIdVisitor<'tcx> for FindMin<'a, 'tcx, VL> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn shallow(&self) -> bool {
+ VL::SHALLOW
+ }
+ fn skip_assoc_tys(&self) -> bool {
+ true
+ }
+ fn visit_def_id(
+ &mut self,
+ def_id: DefId,
+ _kind: &str,
+ _descr: &dyn fmt::Display,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.min = VL::new_min(self, def_id);
+ ControlFlow::CONTINUE
+ }
+}
+
+trait VisibilityLike: Sized {
+ const MAX: Self;
+ const SHALLOW: bool = false;
+ fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self;
+
+ // Returns an over-approximation (`skip_assoc_tys` = true) of visibility due to
+ // associated types for which we can't determine visibility precisely.
+ fn of_impl(def_id: LocalDefId, tcx: TyCtxt<'_>, access_levels: &AccessLevels) -> Self {
+ let mut find = FindMin { tcx, access_levels, min: Self::MAX };
+ find.visit(tcx.type_of(def_id));
+ if let Some(trait_ref) = tcx.impl_trait_ref(def_id) {
+ find.visit_trait(trait_ref);
+ }
+ find.min
+ }
+}
+impl VisibilityLike for ty::Visibility {
+ const MAX: Self = ty::Visibility::Public;
+ fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
+ min(find.tcx.visibility(def_id), find.min, find.tcx)
+ }
+}
+impl VisibilityLike for Option<AccessLevel> {
+ const MAX: Self = Some(AccessLevel::Public);
+ // Type inference is very smart sometimes.
+ // It can make an impl reachable even some components of its type or trait are unreachable.
+ // E.g. methods of `impl ReachableTrait<UnreachableTy> for ReachableTy<UnreachableTy> { ... }`
+ // can be usable from other crates (#57264). So we skip substs when calculating reachability
+ // and consider an impl reachable if its "shallow" type and trait are reachable.
+ //
+ // The assumption we make here is that type-inference won't let you use an impl without knowing
+ // both "shallow" version of its self type and "shallow" version of its trait if it exists
+ // (which require reaching the `DefId`s in them).
+ const SHALLOW: bool = true;
+ fn new_min(find: &FindMin<'_, '_, Self>, def_id: DefId) -> Self {
+ cmp::min(
+ if let Some(def_id) = def_id.as_local() {
+ find.access_levels.map.get(&def_id).copied()
+ } else {
+ Self::MAX
+ },
+ find.min,
+ )
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+/// The embargo visitor, used to determine the exports of the AST.
+////////////////////////////////////////////////////////////////////////////////
+
+struct EmbargoVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ /// Accessibility levels for reachable nodes.
+ access_levels: AccessLevels,
+ /// A set of pairs corresponding to modules, where the first module is
+ /// reachable via a macro that's defined in the second module. This cannot
+ /// be represented as reachable because it can't handle the following case:
+ ///
+ /// pub mod n { // Should be `Public`
+ /// pub(crate) mod p { // Should *not* be accessible
+ /// pub fn f() -> i32 { 12 } // Must be `Reachable`
+ /// }
+ /// }
+ /// pub macro m() {
+ /// n::p::f()
+ /// }
+ macro_reachable: FxHashSet<(LocalDefId, LocalDefId)>,
+ /// Previous accessibility level; `None` means unreachable.
+ prev_level: Option<AccessLevel>,
+ /// Has something changed in the level map?
+ changed: bool,
+}
+
+struct ReachEverythingInTheInterfaceVisitor<'a, 'tcx> {
+ access_level: Option<AccessLevel>,
+ item_def_id: LocalDefId,
+ ev: &'a mut EmbargoVisitor<'tcx>,
+}
+
+impl<'tcx> EmbargoVisitor<'tcx> {
+ fn get(&self, def_id: LocalDefId) -> Option<AccessLevel> {
+ self.access_levels.map.get(&def_id).copied()
+ }
+
+ fn update_with_hir_id(
+ &mut self,
+ hir_id: hir::HirId,
+ level: Option<AccessLevel>,
+ ) -> Option<AccessLevel> {
+ let def_id = self.tcx.hir().local_def_id(hir_id);
+ self.update(def_id, level)
+ }
+
+ /// Updates node level and returns the updated level.
+ fn update(&mut self, def_id: LocalDefId, level: Option<AccessLevel>) -> Option<AccessLevel> {
+ let old_level = self.get(def_id);
+ // Accessibility levels can only grow.
+ if level > old_level {
+ self.access_levels.map.insert(def_id, level.unwrap());
+ self.changed = true;
+ level
+ } else {
+ old_level
+ }
+ }
+
+ fn reach(
+ &mut self,
+ def_id: LocalDefId,
+ access_level: Option<AccessLevel>,
+ ) -> ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
+ ReachEverythingInTheInterfaceVisitor {
+ access_level: cmp::min(access_level, Some(AccessLevel::Reachable)),
+ item_def_id: def_id,
+ ev: self,
+ }
+ }
+
+ // We have to make sure that the items that macros might reference
+ // are reachable, since they might be exported transitively.
+ fn update_reachability_from_macro(&mut self, local_def_id: LocalDefId, md: &MacroDef) {
+ // Non-opaque macros cannot make other items more accessible than they already are.
+
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(local_def_id);
+ let attrs = self.tcx.hir().attrs(hir_id);
+ if attr::find_transparency(attrs, md.macro_rules).0 != Transparency::Opaque {
+ return;
+ }
+
+ let macro_module_def_id = self.tcx.local_parent(local_def_id);
+ if self.tcx.opt_def_kind(macro_module_def_id) != Some(DefKind::Mod) {
+ // The macro's parent doesn't correspond to a `mod`, return early (#63164, #65252).
+ return;
+ }
+
+ if self.get(local_def_id).is_none() {
+ return;
+ }
+
+ // Since we are starting from an externally visible module,
+ // all the parents in the loop below are also guaranteed to be modules.
+ let mut module_def_id = macro_module_def_id;
+ loop {
+ let changed_reachability =
+ self.update_macro_reachable(module_def_id, macro_module_def_id);
+ if changed_reachability || module_def_id == CRATE_DEF_ID {
+ break;
+ }
+ module_def_id = self.tcx.local_parent(module_def_id);
+ }
+ }
+
+ /// Updates the item as being reachable through a macro defined in the given
+ /// module. Returns `true` if the level has changed.
+ fn update_macro_reachable(
+ &mut self,
+ module_def_id: LocalDefId,
+ defining_mod: LocalDefId,
+ ) -> bool {
+ if self.macro_reachable.insert((module_def_id, defining_mod)) {
+ self.update_macro_reachable_mod(module_def_id, defining_mod);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn update_macro_reachable_mod(&mut self, module_def_id: LocalDefId, defining_mod: LocalDefId) {
+ let module = self.tcx.hir().get_module(module_def_id).0;
+ for item_id in module.item_ids {
+ let def_kind = self.tcx.def_kind(item_id.def_id);
+ let vis = self.tcx.visibility(item_id.def_id);
+ self.update_macro_reachable_def(item_id.def_id, def_kind, vis, defining_mod);
+ }
+ if let Some(exports) = self.tcx.module_reexports(module_def_id) {
+ for export in exports {
+ if export.vis.is_accessible_from(defining_mod.to_def_id(), self.tcx) {
+ if let Res::Def(def_kind, def_id) = export.res {
+ if let Some(def_id) = def_id.as_local() {
+ let vis = self.tcx.visibility(def_id.to_def_id());
+ self.update_macro_reachable_def(def_id, def_kind, vis, defining_mod);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn update_macro_reachable_def(
+ &mut self,
+ def_id: LocalDefId,
+ def_kind: DefKind,
+ vis: ty::Visibility,
+ module: LocalDefId,
+ ) {
+ let level = Some(AccessLevel::Reachable);
+ if vis.is_public() {
+ self.update(def_id, level);
+ }
+ match def_kind {
+ // No type privacy, so can be directly marked as reachable.
+ DefKind::Const | DefKind::Static(_) | DefKind::TraitAlias | DefKind::TyAlias => {
+ if vis.is_accessible_from(module.to_def_id(), self.tcx) {
+ self.update(def_id, level);
+ }
+ }
+
+ // Hygiene isn't really implemented for `macro_rules!` macros at the
+ // moment. Accordingly, marking them as reachable is unwise. `macro` macros
+ // have normal hygiene, so we can treat them like other items without type
+ // privacy and mark them reachable.
+ DefKind::Macro(_) => {
+ let item = self.tcx.hir().expect_item(def_id);
+ if let hir::ItemKind::Macro(MacroDef { macro_rules: false, .. }, _) = item.kind {
+ if vis.is_accessible_from(module.to_def_id(), self.tcx) {
+ self.update(def_id, level);
+ }
+ }
+ }
+
+ // We can't use a module name as the final segment of a path, except
+ // in use statements. Since re-export checking doesn't consider
+ // hygiene these don't need to be marked reachable. The contents of
+ // the module, however may be reachable.
+ DefKind::Mod => {
+ if vis.is_accessible_from(module.to_def_id(), self.tcx) {
+ self.update_macro_reachable(def_id, module);
+ }
+ }
+
+ DefKind::Struct | DefKind::Union => {
+ // While structs and unions have type privacy, their fields do not.
+ if vis.is_public() {
+ let item = self.tcx.hir().expect_item(def_id);
+ if let hir::ItemKind::Struct(ref struct_def, _)
+ | hir::ItemKind::Union(ref struct_def, _) = item.kind
+ {
+ for field in struct_def.fields() {
+ let def_id = self.tcx.hir().local_def_id(field.hir_id);
+ let field_vis = self.tcx.visibility(def_id);
+ if field_vis.is_accessible_from(module.to_def_id(), self.tcx) {
+ self.reach(def_id, level).ty();
+ }
+ }
+ } else {
+ bug!("item {:?} with DefKind {:?}", item, def_kind);
+ }
+ }
+ }
+
+ // These have type privacy, so are not reachable unless they're
+ // public, or are not namespaced at all.
+ DefKind::AssocConst
+ | DefKind::AssocTy
+ | DefKind::ConstParam
+ | DefKind::Ctor(_, _)
+ | DefKind::Enum
+ | DefKind::ForeignTy
+ | DefKind::Fn
+ | DefKind::OpaqueTy
+ | DefKind::AssocFn
+ | DefKind::Trait
+ | DefKind::TyParam
+ | DefKind::Variant
+ | DefKind::LifetimeParam
+ | DefKind::ExternCrate
+ | DefKind::Use
+ | DefKind::ForeignMod
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::Field
+ | DefKind::GlobalAsm
+ | DefKind::Impl
+ | DefKind::Closure
+ | DefKind::Generator => (),
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ /// We want to visit items in the context of their containing
+ /// module and so forth, so supply a crate for doing a deep walk.
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ let item_level = match item.kind {
+ hir::ItemKind::Impl { .. } => {
+ let impl_level =
+ Option::<AccessLevel>::of_impl(item.def_id, self.tcx, &self.access_levels);
+ self.update(item.def_id, impl_level)
+ }
+ _ => self.get(item.def_id),
+ };
+
+ // Update levels of nested things.
+ match item.kind {
+ hir::ItemKind::Enum(ref def, _) => {
+ for variant in def.variants {
+ let variant_level = self.update_with_hir_id(variant.id, item_level);
+ if let Some(ctor_hir_id) = variant.data.ctor_hir_id() {
+ self.update_with_hir_id(ctor_hir_id, item_level);
+ }
+ for field in variant.data.fields() {
+ self.update_with_hir_id(field.hir_id, variant_level);
+ }
+ }
+ }
+ hir::ItemKind::Impl(ref impl_) => {
+ for impl_item_ref in impl_.items {
+ if impl_.of_trait.is_some()
+ || self.tcx.visibility(impl_item_ref.id.def_id) == ty::Visibility::Public
+ {
+ self.update(impl_item_ref.id.def_id, item_level);
+ }
+ }
+ }
+ hir::ItemKind::Trait(.., trait_item_refs) => {
+ for trait_item_ref in trait_item_refs {
+ self.update(trait_item_ref.id.def_id, item_level);
+ }
+ }
+ hir::ItemKind::Struct(ref def, _) | hir::ItemKind::Union(ref def, _) => {
+ if let Some(ctor_hir_id) = def.ctor_hir_id() {
+ self.update_with_hir_id(ctor_hir_id, item_level);
+ }
+ for field in def.fields() {
+ let def_id = self.tcx.hir().local_def_id(field.hir_id);
+ let vis = self.tcx.visibility(def_id);
+ if vis.is_public() {
+ self.update_with_hir_id(field.hir_id, item_level);
+ }
+ }
+ }
+ hir::ItemKind::Macro(ref macro_def, _) => {
+ self.update_reachability_from_macro(item.def_id, macro_def);
+ }
+ hir::ItemKind::ForeignMod { items, .. } => {
+ for foreign_item in items {
+ if self.tcx.visibility(foreign_item.id.def_id) == ty::Visibility::Public {
+ self.update(foreign_item.id.def_id, item_level);
+ }
+ }
+ }
+
+ hir::ItemKind::OpaqueTy(..)
+ | hir::ItemKind::Use(..)
+ | hir::ItemKind::Static(..)
+ | hir::ItemKind::Const(..)
+ | hir::ItemKind::GlobalAsm(..)
+ | hir::ItemKind::TyAlias(..)
+ | hir::ItemKind::Mod(..)
+ | hir::ItemKind::TraitAlias(..)
+ | hir::ItemKind::Fn(..)
+ | hir::ItemKind::ExternCrate(..) => {}
+ }
+
+ // Mark all items in interfaces of reachable items as reachable.
+ match item.kind {
+ // The interface is empty.
+ hir::ItemKind::Macro(..) | hir::ItemKind::ExternCrate(..) => {}
+ // All nested items are checked by `visit_item`.
+ hir::ItemKind::Mod(..) => {}
+ // Handled in the access level of in rustc_resolve
+ hir::ItemKind::Use(..) => {}
+ // The interface is empty.
+ hir::ItemKind::GlobalAsm(..) => {}
+ hir::ItemKind::OpaqueTy(..) => {
+ // HACK(jynelson): trying to infer the type of `impl trait` breaks `async-std` (and `pub async fn` in general)
+ // Since rustdoc never needs to do codegen and doesn't care about link-time reachability,
+ // mark this as unreachable.
+ // See https://github.com/rust-lang/rust/issues/75100
+ if !self.tcx.sess.opts.actually_rustdoc {
+ // FIXME: This is some serious pessimization intended to workaround deficiencies
+ // in the reachability pass (`middle/reachable.rs`). Types are marked as link-time
+ // reachable if they are returned via `impl Trait`, even from private functions.
+ let exist_level =
+ cmp::max(item_level, Some(AccessLevel::ReachableFromImplTrait));
+ self.reach(item.def_id, exist_level).generics().predicates().ty();
+ }
+ }
+ // Visit everything.
+ hir::ItemKind::Const(..)
+ | hir::ItemKind::Static(..)
+ | hir::ItemKind::Fn(..)
+ | hir::ItemKind::TyAlias(..) => {
+ if item_level.is_some() {
+ self.reach(item.def_id, item_level).generics().predicates().ty();
+ }
+ }
+ hir::ItemKind::Trait(.., trait_item_refs) => {
+ if item_level.is_some() {
+ self.reach(item.def_id, item_level).generics().predicates();
+
+ for trait_item_ref in trait_item_refs {
+ let tcx = self.tcx;
+ let mut reach = self.reach(trait_item_ref.id.def_id, item_level);
+ reach.generics().predicates();
+
+ if trait_item_ref.kind == AssocItemKind::Type
+ && !tcx.impl_defaultness(trait_item_ref.id.def_id).has_value()
+ {
+ // No type to visit.
+ } else {
+ reach.ty();
+ }
+ }
+ }
+ }
+ hir::ItemKind::TraitAlias(..) => {
+ if item_level.is_some() {
+ self.reach(item.def_id, item_level).generics().predicates();
+ }
+ }
+ // Visit everything except for private impl items.
+ hir::ItemKind::Impl(ref impl_) => {
+ if item_level.is_some() {
+ self.reach(item.def_id, item_level).generics().predicates().ty().trait_ref();
+
+ for impl_item_ref in impl_.items {
+ let impl_item_level = self.get(impl_item_ref.id.def_id);
+ if impl_item_level.is_some() {
+ self.reach(impl_item_ref.id.def_id, impl_item_level)
+ .generics()
+ .predicates()
+ .ty();
+ }
+ }
+ }
+ }
+
+ // Visit everything, but enum variants have their own levels.
+ hir::ItemKind::Enum(ref def, _) => {
+ if item_level.is_some() {
+ self.reach(item.def_id, item_level).generics().predicates();
+ }
+ for variant in def.variants {
+ let variant_level = self.get(self.tcx.hir().local_def_id(variant.id));
+ if variant_level.is_some() {
+ for field in variant.data.fields() {
+ self.reach(self.tcx.hir().local_def_id(field.hir_id), variant_level)
+ .ty();
+ }
+ // Corner case: if the variant is reachable, but its
+ // enum is not, make the enum reachable as well.
+ self.reach(item.def_id, variant_level).ty();
+ }
+ if let Some(hir_id) = variant.data.ctor_hir_id() {
+ let ctor_def_id = self.tcx.hir().local_def_id(hir_id);
+ let ctor_level = self.get(ctor_def_id);
+ if ctor_level.is_some() {
+ self.reach(item.def_id, ctor_level).ty();
+ }
+ }
+ }
+ }
+ // Visit everything, but foreign items have their own levels.
+ hir::ItemKind::ForeignMod { items, .. } => {
+ for foreign_item in items {
+ let foreign_item_level = self.get(foreign_item.id.def_id);
+ if foreign_item_level.is_some() {
+ self.reach(foreign_item.id.def_id, foreign_item_level)
+ .generics()
+ .predicates()
+ .ty();
+ }
+ }
+ }
+ // Visit everything except for private fields.
+ hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
+ if item_level.is_some() {
+ self.reach(item.def_id, item_level).generics().predicates();
+ for field in struct_def.fields() {
+ let def_id = self.tcx.hir().local_def_id(field.hir_id);
+ let field_level = self.get(def_id);
+ if field_level.is_some() {
+ self.reach(def_id, field_level).ty();
+ }
+ }
+ }
+ if let Some(hir_id) = struct_def.ctor_hir_id() {
+ let ctor_def_id = self.tcx.hir().local_def_id(hir_id);
+ let ctor_level = self.get(ctor_def_id);
+ if ctor_level.is_some() {
+ self.reach(item.def_id, ctor_level).ty();
+ }
+ }
+ }
+ }
+
+ let orig_level = mem::replace(&mut self.prev_level, item_level);
+ intravisit::walk_item(self, item);
+ self.prev_level = orig_level;
+ }
+
+ fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) {
+ // Blocks can have public items, for example impls, but they always
+ // start as completely private regardless of publicity of a function,
+ // constant, type, field, etc., in which this block resides.
+ let orig_level = mem::replace(&mut self.prev_level, None);
+ intravisit::walk_block(self, b);
+ self.prev_level = orig_level;
+ }
+}
+
+impl ReachEverythingInTheInterfaceVisitor<'_, '_> {
+ fn generics(&mut self) -> &mut Self {
+ for param in &self.ev.tcx.generics_of(self.item_def_id).params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {}
+ GenericParamDefKind::Type { has_default, .. } => {
+ if has_default {
+ self.visit(self.ev.tcx.type_of(param.def_id));
+ }
+ }
+ GenericParamDefKind::Const { has_default } => {
+ self.visit(self.ev.tcx.type_of(param.def_id));
+ if has_default {
+ self.visit(self.ev.tcx.const_param_default(param.def_id));
+ }
+ }
+ }
+ }
+ self
+ }
+
+ fn predicates(&mut self) -> &mut Self {
+ self.visit_predicates(self.ev.tcx.predicates_of(self.item_def_id));
+ self
+ }
+
+ fn ty(&mut self) -> &mut Self {
+ self.visit(self.ev.tcx.type_of(self.item_def_id));
+ self
+ }
+
+ fn trait_ref(&mut self) -> &mut Self {
+ if let Some(trait_ref) = self.ev.tcx.impl_trait_ref(self.item_def_id) {
+ self.visit_trait(trait_ref);
+ }
+ self
+ }
+}
+
+impl<'tcx> DefIdVisitor<'tcx> for ReachEverythingInTheInterfaceVisitor<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.ev.tcx
+ }
+ fn visit_def_id(
+ &mut self,
+ def_id: DefId,
+ _kind: &str,
+ _descr: &dyn fmt::Display,
+ ) -> ControlFlow<Self::BreakTy> {
+ if let Some(def_id) = def_id.as_local() {
+ if let (ty::Visibility::Public, _) | (_, Some(AccessLevel::ReachableFromImplTrait)) =
+ (self.tcx().visibility(def_id.to_def_id()), self.access_level)
+ {
+ self.ev.update(def_id, self.access_level);
+ }
+ }
+ ControlFlow::CONTINUE
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////////////////
+/// Name privacy visitor, checks privacy and reports violations.
+/// Most of name privacy checks are performed during the main resolution phase,
+/// or later in type checking when field accesses and associated items are resolved.
+/// This pass performs remaining checks for fields in struct expressions and patterns.
+//////////////////////////////////////////////////////////////////////////////////////
+
+struct NamePrivacyVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>,
+ current_item: LocalDefId,
+}
+
+impl<'tcx> NamePrivacyVisitor<'tcx> {
+ /// Gets the type-checking results for the current body.
+ /// As this will ICE if called outside bodies, only call when working with
+ /// `Expr` or `Pat` nodes (they are guaranteed to be found only in bodies).
+ #[track_caller]
+ fn typeck_results(&self) -> &'tcx ty::TypeckResults<'tcx> {
+ self.maybe_typeck_results
+ .expect("`NamePrivacyVisitor::typeck_results` called outside of body")
+ }
+
+ // Checks that a field in a struct constructor (expression or pattern) is accessible.
+ fn check_field(
+ &mut self,
+ use_ctxt: Span, // syntax context of the field name at the use site
+ span: Span, // span of the field pattern, e.g., `x: 0`
+ def: ty::AdtDef<'tcx>, // definition of the struct or enum
+ field: &'tcx ty::FieldDef,
+ in_update_syntax: bool,
+ ) {
+ if def.is_enum() {
+ return;
+ }
+
+ // definition of the field
+ let ident = Ident::new(kw::Empty, use_ctxt);
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(self.current_item);
+ let def_id = self.tcx.adjust_ident_and_get_scope(ident, def.did(), hir_id).1;
+ if !field.vis.is_accessible_from(def_id, self.tcx) {
+ self.tcx.sess.emit_err(FieldIsPrivate {
+ span,
+ field_name: field.name,
+ variant_descr: def.variant_descr(),
+ def_path_str: self.tcx.def_path_str(def.did()),
+ label: if in_update_syntax {
+ FieldIsPrivateLabel::IsUpdateSyntax { span, field_name: field.name }
+ } else {
+ FieldIsPrivateLabel::Other { span }
+ },
+ });
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for NamePrivacyVisitor<'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ /// We want to visit items in the context of their containing
+ /// module and so forth, so supply a crate for doing a deep walk.
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
+ // Don't visit nested modules, since we run a separate visitor walk
+ // for each module in `privacy_access_levels`
+ }
+
+ fn visit_nested_body(&mut self, body: hir::BodyId) {
+ let old_maybe_typeck_results =
+ self.maybe_typeck_results.replace(self.tcx.typeck_body(body));
+ let body = self.tcx.hir().body(body);
+ self.visit_body(body);
+ self.maybe_typeck_results = old_maybe_typeck_results;
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ let orig_current_item = mem::replace(&mut self.current_item, item.def_id);
+ intravisit::walk_item(self, item);
+ self.current_item = orig_current_item;
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Struct(qpath, fields, ref base) = expr.kind {
+ let res = self.typeck_results().qpath_res(qpath, expr.hir_id);
+ let adt = self.typeck_results().expr_ty(expr).ty_adt_def().unwrap();
+ let variant = adt.variant_of_res(res);
+ if let Some(base) = *base {
+ // If the expression uses FRU we need to make sure all the unmentioned fields
+ // are checked for privacy (RFC 736). Rather than computing the set of
+ // unmentioned fields, just check them all.
+ for (vf_index, variant_field) in variant.fields.iter().enumerate() {
+ let field = fields.iter().find(|f| {
+ self.tcx.field_index(f.hir_id, self.typeck_results()) == vf_index
+ });
+ let (use_ctxt, span) = match field {
+ Some(field) => (field.ident.span, field.span),
+ None => (base.span, base.span),
+ };
+ self.check_field(use_ctxt, span, adt, variant_field, true);
+ }
+ } else {
+ for field in fields {
+ let use_ctxt = field.ident.span;
+ let index = self.tcx.field_index(field.hir_id, self.typeck_results());
+ self.check_field(use_ctxt, field.span, adt, &variant.fields[index], false);
+ }
+ }
+ }
+
+ intravisit::walk_expr(self, expr);
+ }
+
+ fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
+ if let PatKind::Struct(ref qpath, fields, _) = pat.kind {
+ let res = self.typeck_results().qpath_res(qpath, pat.hir_id);
+ let adt = self.typeck_results().pat_ty(pat).ty_adt_def().unwrap();
+ let variant = adt.variant_of_res(res);
+ for field in fields {
+ let use_ctxt = field.ident.span;
+ let index = self.tcx.field_index(field.hir_id, self.typeck_results());
+ self.check_field(use_ctxt, field.span, adt, &variant.fields[index], false);
+ }
+ }
+
+ intravisit::walk_pat(self, pat);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////////////////
+/// Type privacy visitor, checks types for privacy and reports violations.
+/// Both explicitly written types and inferred types of expressions and patterns are checked.
+/// Checks are performed on "semantic" types regardless of names and their hygiene.
+////////////////////////////////////////////////////////////////////////////////////////////
+
+struct TypePrivacyVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>,
+ current_item: LocalDefId,
+ span: Span,
+}
+
+impl<'tcx> TypePrivacyVisitor<'tcx> {
+ /// Gets the type-checking results for the current body.
+ /// As this will ICE if called outside bodies, only call when working with
+ /// `Expr` or `Pat` nodes (they are guaranteed to be found only in bodies).
+ #[track_caller]
+ fn typeck_results(&self) -> &'tcx ty::TypeckResults<'tcx> {
+ self.maybe_typeck_results
+ .expect("`TypePrivacyVisitor::typeck_results` called outside of body")
+ }
+
+ fn item_is_accessible(&self, did: DefId) -> bool {
+ self.tcx.visibility(did).is_accessible_from(self.current_item.to_def_id(), self.tcx)
+ }
+
+ // Take node-id of an expression or pattern and check its type for privacy.
+ fn check_expr_pat_type(&mut self, id: hir::HirId, span: Span) -> bool {
+ self.span = span;
+ let typeck_results = self.typeck_results();
+ let result: ControlFlow<()> = try {
+ self.visit(typeck_results.node_type(id))?;
+ self.visit(typeck_results.node_substs(id))?;
+ if let Some(adjustments) = typeck_results.adjustments().get(id) {
+ adjustments.iter().try_for_each(|adjustment| self.visit(adjustment.target))?;
+ }
+ };
+ result.is_break()
+ }
+
+ fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
+ let is_error = !self.item_is_accessible(def_id);
+ if is_error {
+ self.tcx.sess.emit_err(ItemIsPrivate { span: self.span, kind, descr: descr.into() });
+ }
+ is_error
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for TypePrivacyVisitor<'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ /// We want to visit items in the context of their containing
+ /// module and so forth, so supply a crate for doing a deep walk.
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_mod(&mut self, _m: &'tcx hir::Mod<'tcx>, _s: Span, _n: hir::HirId) {
+ // Don't visit nested modules, since we run a separate visitor walk
+ // for each module in `privacy_access_levels`
+ }
+
+ fn visit_nested_body(&mut self, body: hir::BodyId) {
+ let old_maybe_typeck_results =
+ self.maybe_typeck_results.replace(self.tcx.typeck_body(body));
+ let body = self.tcx.hir().body(body);
+ self.visit_body(body);
+ self.maybe_typeck_results = old_maybe_typeck_results;
+ }
+
+ fn visit_generic_arg(&mut self, generic_arg: &'tcx hir::GenericArg<'tcx>) {
+ match generic_arg {
+ hir::GenericArg::Type(t) => self.visit_ty(t),
+ hir::GenericArg::Infer(inf) => self.visit_infer(inf),
+ hir::GenericArg::Lifetime(_) | hir::GenericArg::Const(_) => {}
+ }
+ }
+
+ fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) {
+ self.span = hir_ty.span;
+ if let Some(typeck_results) = self.maybe_typeck_results {
+ // Types in bodies.
+ if self.visit(typeck_results.node_type(hir_ty.hir_id)).is_break() {
+ return;
+ }
+ } else {
+ // Types in signatures.
+ // FIXME: This is very ineffective. Ideally each HIR type should be converted
+ // into a semantic type only once and the result should be cached somehow.
+ if self.visit(rustc_typeck::hir_ty_to_ty(self.tcx, hir_ty)).is_break() {
+ return;
+ }
+ }
+
+ intravisit::walk_ty(self, hir_ty);
+ }
+
+ fn visit_infer(&mut self, inf: &'tcx hir::InferArg) {
+ self.span = inf.span;
+ if let Some(typeck_results) = self.maybe_typeck_results {
+ if let Some(ty) = typeck_results.node_type_opt(inf.hir_id) {
+ if self.visit(ty).is_break() {
+ return;
+ }
+ } else {
+ // We don't do anything for const infers here.
+ }
+ } else {
+ bug!("visit_infer without typeck_results");
+ }
+ intravisit::walk_inf(self, inf);
+ }
+
+ fn visit_trait_ref(&mut self, trait_ref: &'tcx hir::TraitRef<'tcx>) {
+ self.span = trait_ref.path.span;
+ if self.maybe_typeck_results.is_none() {
+ // Avoid calling `hir_trait_to_predicates` in bodies, it will ICE.
+ // The traits' privacy in bodies is already checked as a part of trait object types.
+ let bounds = rustc_typeck::hir_trait_to_predicates(
+ self.tcx,
+ trait_ref,
+ // NOTE: This isn't really right, but the actual type doesn't matter here. It's
+ // just required by `ty::TraitRef`.
+ self.tcx.types.never,
+ );
+
+ for (trait_predicate, _, _) in bounds.trait_bounds {
+ if self.visit_trait(trait_predicate.skip_binder()).is_break() {
+ return;
+ }
+ }
+
+ for (poly_predicate, _) in bounds.projection_bounds {
+ let pred = poly_predicate.skip_binder();
+ let poly_pred_term = self.visit(pred.term);
+ if poly_pred_term.is_break()
+ || self.visit_projection_ty(pred.projection_ty).is_break()
+ {
+ return;
+ }
+ }
+ }
+
+ intravisit::walk_trait_ref(self, trait_ref);
+ }
+
+ // Check types of expressions
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ if self.check_expr_pat_type(expr.hir_id, expr.span) {
+ // Do not check nested expressions if the error already happened.
+ return;
+ }
+ match expr.kind {
+ hir::ExprKind::Assign(_, rhs, _) | hir::ExprKind::Match(rhs, ..) => {
+ // Do not report duplicate errors for `x = y` and `match x { ... }`.
+ if self.check_expr_pat_type(rhs.hir_id, rhs.span) {
+ return;
+ }
+ }
+ hir::ExprKind::MethodCall(segment, ..) => {
+ // Method calls have to be checked specially.
+ self.span = segment.ident.span;
+ if let Some(def_id) = self.typeck_results().type_dependent_def_id(expr.hir_id) {
+ if self.visit(self.tcx.type_of(def_id)).is_break() {
+ return;
+ }
+ } else {
+ self.tcx
+ .sess
+ .delay_span_bug(expr.span, "no type-dependent def for method call");
+ }
+ }
+ _ => {}
+ }
+
+ intravisit::walk_expr(self, expr);
+ }
+
+ // Prohibit access to associated items with insufficient nominal visibility.
+ //
+ // Additionally, until better reachability analysis for macros 2.0 is available,
+ // we prohibit access to private statics from other crates, this allows to give
+ // more code internal visibility at link time. (Access to private functions
+ // is already prohibited by type privacy for function types.)
+ fn visit_qpath(&mut self, qpath: &'tcx hir::QPath<'tcx>, id: hir::HirId, span: Span) {
+ let def = match qpath {
+ hir::QPath::Resolved(_, path) => match path.res {
+ Res::Def(kind, def_id) => Some((kind, def_id)),
+ _ => None,
+ },
+ hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => self
+ .maybe_typeck_results
+ .and_then(|typeck_results| typeck_results.type_dependent_def(id)),
+ };
+ let def = def.filter(|(kind, _)| {
+ matches!(
+ kind,
+ DefKind::AssocFn | DefKind::AssocConst | DefKind::AssocTy | DefKind::Static(_)
+ )
+ });
+ if let Some((kind, def_id)) = def {
+ let is_local_static =
+ if let DefKind::Static(_) = kind { def_id.is_local() } else { false };
+ if !self.item_is_accessible(def_id) && !is_local_static {
+ let sess = self.tcx.sess;
+ let sm = sess.source_map();
+ let name = match qpath {
+ hir::QPath::Resolved(..) | hir::QPath::LangItem(..) => {
+ sm.span_to_snippet(qpath.span()).ok()
+ }
+ hir::QPath::TypeRelative(_, segment) => Some(segment.ident.to_string()),
+ };
+ let kind = kind.descr(def_id);
+ let _ = match name {
+ Some(name) => {
+ sess.emit_err(ItemIsPrivate { span, kind, descr: (&name).into() })
+ }
+ None => sess.emit_err(UnnamedItemIsPrivate { span, kind }),
+ };
+ return;
+ }
+ }
+
+ intravisit::walk_qpath(self, qpath, id, span);
+ }
+
+ // Check types of patterns.
+ fn visit_pat(&mut self, pattern: &'tcx hir::Pat<'tcx>) {
+ if self.check_expr_pat_type(pattern.hir_id, pattern.span) {
+ // Do not check nested patterns if the error already happened.
+ return;
+ }
+
+ intravisit::walk_pat(self, pattern);
+ }
+
+ fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) {
+ if let Some(init) = local.init {
+ if self.check_expr_pat_type(init.hir_id, init.span) {
+ // Do not report duplicate errors for `let x = y`.
+ return;
+ }
+ }
+
+ intravisit::walk_local(self, local);
+ }
+
+ // Check types in item interfaces.
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ let orig_current_item = mem::replace(&mut self.current_item, item.def_id);
+ let old_maybe_typeck_results = self.maybe_typeck_results.take();
+ intravisit::walk_item(self, item);
+ self.maybe_typeck_results = old_maybe_typeck_results;
+ self.current_item = orig_current_item;
+ }
+}
+
+impl<'tcx> DefIdVisitor<'tcx> for TypePrivacyVisitor<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn visit_def_id(
+ &mut self,
+ def_id: DefId,
+ kind: &str,
+ descr: &dyn fmt::Display,
+ ) -> ControlFlow<Self::BreakTy> {
+ if self.check_def_id(def_id, kind, descr) {
+ ControlFlow::BREAK
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+/// Obsolete visitors for checking for private items in public interfaces.
+/// These visitors are supposed to be kept in frozen state and produce an
+/// "old error node set". For backward compatibility the new visitor reports
+/// warnings instead of hard errors when the erroneous node is not in this old set.
+///////////////////////////////////////////////////////////////////////////////
+
+struct ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ access_levels: &'a AccessLevels,
+ in_variant: bool,
+ // Set of errors produced by this obsolete visitor.
+ old_error_set: HirIdSet,
+}
+
+struct ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
+ inner: &'a ObsoleteVisiblePrivateTypesVisitor<'b, 'tcx>,
+ /// Whether the type refers to private types.
+ contains_private: bool,
+ /// Whether we've recurred at all (i.e., if we're pointing at the
+ /// first type on which `visit_ty` was called).
+ at_outer_type: bool,
+ /// Whether that first type is a public path.
+ outer_type_is_public_path: bool,
+}
+
+impl<'a, 'tcx> ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
+ fn path_is_private_type(&self, path: &hir::Path<'_>) -> bool {
+ let did = match path.res {
+ Res::PrimTy(..) | Res::SelfTy { .. } | Res::Err => return false,
+ res => res.def_id(),
+ };
+
+ // A path can only be private if:
+ // it's in this crate...
+ if let Some(did) = did.as_local() {
+ // .. and it corresponds to a private type in the AST (this returns
+ // `None` for type parameters).
+ match self.tcx.hir().find(self.tcx.hir().local_def_id_to_hir_id(did)) {
+ Some(Node::Item(_)) => !self.tcx.visibility(did).is_public(),
+ Some(_) | None => false,
+ }
+ } else {
+ false
+ }
+ }
+
+ fn trait_is_public(&self, trait_id: LocalDefId) -> bool {
+ // FIXME: this would preferably be using `exported_items`, but all
+ // traits are exported currently (see `EmbargoVisitor.exported_trait`).
+ self.access_levels.is_public(trait_id)
+ }
+
+ fn check_generic_bound(&mut self, bound: &hir::GenericBound<'_>) {
+ if let hir::GenericBound::Trait(ref trait_ref, _) = *bound {
+ if self.path_is_private_type(trait_ref.trait_ref.path) {
+ self.old_error_set.insert(trait_ref.trait_ref.hir_ref_id);
+ }
+ }
+ }
+
+ fn item_is_public(&self, def_id: LocalDefId) -> bool {
+ self.access_levels.is_reachable(def_id) || self.tcx.visibility(def_id).is_public()
+ }
+}
+
+impl<'a, 'b, 'tcx, 'v> Visitor<'v> for ObsoleteCheckTypeForPrivatenessVisitor<'a, 'b, 'tcx> {
+ fn visit_generic_arg(&mut self, generic_arg: &'v hir::GenericArg<'v>) {
+ match generic_arg {
+ hir::GenericArg::Type(t) => self.visit_ty(t),
+ hir::GenericArg::Infer(inf) => self.visit_ty(&inf.to_ty()),
+ hir::GenericArg::Lifetime(_) | hir::GenericArg::Const(_) => {}
+ }
+ }
+
+ fn visit_ty(&mut self, ty: &hir::Ty<'_>) {
+ if let hir::TyKind::Path(hir::QPath::Resolved(_, path)) = ty.kind {
+ if self.inner.path_is_private_type(path) {
+ self.contains_private = true;
+ // Found what we're looking for, so let's stop working.
+ return;
+ }
+ }
+ if let hir::TyKind::Path(_) = ty.kind {
+ if self.at_outer_type {
+ self.outer_type_is_public_path = true;
+ }
+ }
+ self.at_outer_type = false;
+ intravisit::walk_ty(self, ty)
+ }
+
+ // Don't want to recurse into `[, .. expr]`.
+ fn visit_expr(&mut self, _: &hir::Expr<'_>) {}
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for ObsoleteVisiblePrivateTypesVisitor<'a, 'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ /// We want to visit items in the context of their containing
+ /// module and so forth, so supply a crate for doing a deep walk.
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ match item.kind {
+ // Contents of a private mod can be re-exported, so we need
+ // to check internals.
+ hir::ItemKind::Mod(_) => {}
+
+ // An `extern {}` doesn't introduce a new privacy
+ // namespace (the contents have their own privacies).
+ hir::ItemKind::ForeignMod { .. } => {}
+
+ hir::ItemKind::Trait(.., bounds, _) => {
+ if !self.trait_is_public(item.def_id) {
+ return;
+ }
+
+ for bound in bounds.iter() {
+ self.check_generic_bound(bound)
+ }
+ }
+
+ // Impls need some special handling to try to offer useful
+ // error messages without (too many) false positives
+ // (i.e., we could just return here to not check them at
+ // all, or some worse estimation of whether an impl is
+ // publicly visible).
+ hir::ItemKind::Impl(ref impl_) => {
+ // `impl [... for] Private` is never visible.
+ let self_contains_private;
+ // `impl [... for] Public<...>`, but not `impl [... for]
+ // Vec<Public>` or `(Public,)`, etc.
+ let self_is_public_path;
+
+ // Check the properties of the `Self` type:
+ {
+ let mut visitor = ObsoleteCheckTypeForPrivatenessVisitor {
+ inner: self,
+ contains_private: false,
+ at_outer_type: true,
+ outer_type_is_public_path: false,
+ };
+ visitor.visit_ty(impl_.self_ty);
+ self_contains_private = visitor.contains_private;
+ self_is_public_path = visitor.outer_type_is_public_path;
+ }
+
+ // Miscellaneous info about the impl:
+
+ // `true` iff this is `impl Private for ...`.
+ let not_private_trait = impl_.of_trait.as_ref().map_or(
+ true, // no trait counts as public trait
+ |tr| {
+ if let Some(def_id) = tr.path.res.def_id().as_local() {
+ self.trait_is_public(def_id)
+ } else {
+ true // external traits must be public
+ }
+ },
+ );
+
+ // `true` iff this is a trait impl or at least one method is public.
+ //
+ // `impl Public { $( fn ...() {} )* }` is not visible.
+ //
+ // This is required over just using the methods' privacy
+ // directly because we might have `impl<T: Foo<Private>> ...`,
+ // and we shouldn't warn about the generics if all the methods
+ // are private (because `T` won't be visible externally).
+ let trait_or_some_public_method = impl_.of_trait.is_some()
+ || impl_.items.iter().any(|impl_item_ref| {
+ let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
+ match impl_item.kind {
+ hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..) => {
+ self.access_levels.is_reachable(impl_item_ref.id.def_id)
+ }
+ hir::ImplItemKind::TyAlias(_) => false,
+ }
+ });
+
+ if !self_contains_private && not_private_trait && trait_or_some_public_method {
+ intravisit::walk_generics(self, &impl_.generics);
+
+ match impl_.of_trait {
+ None => {
+ for impl_item_ref in impl_.items {
+ // This is where we choose whether to walk down
+ // further into the impl to check its items. We
+ // should only walk into public items so that we
+ // don't erroneously report errors for private
+ // types in private items.
+ let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
+ match impl_item.kind {
+ hir::ImplItemKind::Const(..) | hir::ImplItemKind::Fn(..)
+ if self.item_is_public(impl_item.def_id) =>
+ {
+ intravisit::walk_impl_item(self, impl_item)
+ }
+ hir::ImplItemKind::TyAlias(..) => {
+ intravisit::walk_impl_item(self, impl_item)
+ }
+ _ => {}
+ }
+ }
+ }
+ Some(ref tr) => {
+ // Any private types in a trait impl fall into three
+ // categories.
+ // 1. mentioned in the trait definition
+ // 2. mentioned in the type params/generics
+ // 3. mentioned in the associated types of the impl
+ //
+ // Those in 1. can only occur if the trait is in
+ // this crate and will have been warned about on the
+ // trait definition (there's no need to warn twice
+ // so we don't check the methods).
+ //
+ // Those in 2. are warned via walk_generics and this
+ // call here.
+ intravisit::walk_path(self, tr.path);
+
+ // Those in 3. are warned with this call.
+ for impl_item_ref in impl_.items {
+ let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
+ if let hir::ImplItemKind::TyAlias(ty) = impl_item.kind {
+ self.visit_ty(ty);
+ }
+ }
+ }
+ }
+ } else if impl_.of_trait.is_none() && self_is_public_path {
+ // `impl Public<Private> { ... }`. Any public static
+ // methods will be visible as `Public::foo`.
+ let mut found_pub_static = false;
+ for impl_item_ref in impl_.items {
+ if self.access_levels.is_reachable(impl_item_ref.id.def_id)
+ || self.tcx.visibility(impl_item_ref.id.def_id)
+ == ty::Visibility::Public
+ {
+ let impl_item = self.tcx.hir().impl_item(impl_item_ref.id);
+ match impl_item_ref.kind {
+ AssocItemKind::Const => {
+ found_pub_static = true;
+ intravisit::walk_impl_item(self, impl_item);
+ }
+ AssocItemKind::Fn { has_self: false } => {
+ found_pub_static = true;
+ intravisit::walk_impl_item(self, impl_item);
+ }
+ _ => {}
+ }
+ }
+ }
+ if found_pub_static {
+ intravisit::walk_generics(self, &impl_.generics)
+ }
+ }
+ return;
+ }
+
+ // `type ... = ...;` can contain private types, because
+ // we're introducing a new name.
+ hir::ItemKind::TyAlias(..) => return,
+
+ // Not at all public, so we don't care.
+ _ if !self.item_is_public(item.def_id) => {
+ return;
+ }
+
+ _ => {}
+ }
+
+ // We've carefully constructed it so that if we're here, then
+ // any `visit_ty`'s will be called on things that are in
+ // public signatures, i.e., things that we're interested in for
+ // this visitor.
+ intravisit::walk_item(self, item);
+ }
+
+ fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
+ for predicate in generics.predicates {
+ match predicate {
+ hir::WherePredicate::BoundPredicate(bound_pred) => {
+ for bound in bound_pred.bounds.iter() {
+ self.check_generic_bound(bound)
+ }
+ }
+ hir::WherePredicate::RegionPredicate(_) => {}
+ hir::WherePredicate::EqPredicate(eq_pred) => {
+ self.visit_ty(eq_pred.rhs_ty);
+ }
+ }
+ }
+ }
+
+ fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
+ if self.access_levels.is_reachable(item.def_id) {
+ intravisit::walk_foreign_item(self, item)
+ }
+ }
+
+ fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) {
+ if let hir::TyKind::Path(hir::QPath::Resolved(_, path)) = t.kind {
+ if self.path_is_private_type(path) {
+ self.old_error_set.insert(t.hir_id);
+ }
+ }
+ intravisit::walk_ty(self, t)
+ }
+
+ fn visit_variant(
+ &mut self,
+ v: &'tcx hir::Variant<'tcx>,
+ g: &'tcx hir::Generics<'tcx>,
+ item_id: hir::HirId,
+ ) {
+ if self.access_levels.is_reachable(self.tcx.hir().local_def_id(v.id)) {
+ self.in_variant = true;
+ intravisit::walk_variant(self, v, g, item_id);
+ self.in_variant = false;
+ }
+ }
+
+ fn visit_field_def(&mut self, s: &'tcx hir::FieldDef<'tcx>) {
+ let def_id = self.tcx.hir().local_def_id(s.hir_id);
+ let vis = self.tcx.visibility(def_id);
+ if vis.is_public() || self.in_variant {
+ intravisit::walk_field_def(self, s);
+ }
+ }
+
+ // We don't need to introspect into these at all: an
+ // expression/block context can't possibly contain exported things.
+ // (Making them no-ops stops us from traversing the whole AST without
+ // having to be super careful about our `walk_...` calls above.)
+ fn visit_block(&mut self, _: &'tcx hir::Block<'tcx>) {}
+ fn visit_expr(&mut self, _: &'tcx hir::Expr<'tcx>) {}
+}
+
+///////////////////////////////////////////////////////////////////////////////
+/// SearchInterfaceForPrivateItemsVisitor traverses an item's interface and
+/// finds any private components in it.
+/// PrivateItemsInPublicInterfacesVisitor ensures there are no private types
+/// and traits in public interfaces.
+///////////////////////////////////////////////////////////////////////////////
+
+struct SearchInterfaceForPrivateItemsVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ item_def_id: LocalDefId,
+ /// The visitor checks that each component type is at least this visible.
+ required_visibility: ty::Visibility,
+ has_old_errors: bool,
+ in_assoc_ty: bool,
+}
+
+impl SearchInterfaceForPrivateItemsVisitor<'_> {
+ fn generics(&mut self) -> &mut Self {
+ for param in &self.tcx.generics_of(self.item_def_id).params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {}
+ GenericParamDefKind::Type { has_default, .. } => {
+ if has_default {
+ self.visit(self.tcx.type_of(param.def_id));
+ }
+ }
+ // FIXME(generic_const_exprs): May want to look inside const here
+ GenericParamDefKind::Const { .. } => {
+ self.visit(self.tcx.type_of(param.def_id));
+ }
+ }
+ }
+ self
+ }
+
+ fn predicates(&mut self) -> &mut Self {
+ // N.B., we use `explicit_predicates_of` and not `predicates_of`
+ // because we don't want to report privacy errors due to where
+ // clauses that the compiler inferred. We only want to
+ // consider the ones that the user wrote. This is important
+ // for the inferred outlives rules; see
+ // `src/test/ui/rfc-2093-infer-outlives/privacy.rs`.
+ self.visit_predicates(self.tcx.explicit_predicates_of(self.item_def_id));
+ self
+ }
+
+ fn bounds(&mut self) -> &mut Self {
+ self.visit_predicates(ty::GenericPredicates {
+ parent: None,
+ predicates: self.tcx.explicit_item_bounds(self.item_def_id),
+ });
+ self
+ }
+
+ fn ty(&mut self) -> &mut Self {
+ self.visit(self.tcx.type_of(self.item_def_id));
+ self
+ }
+
+ fn check_def_id(&mut self, def_id: DefId, kind: &str, descr: &dyn fmt::Display) -> bool {
+ if self.leaks_private_dep(def_id) {
+ self.tcx.emit_spanned_lint(
+ lint::builtin::EXPORTED_PRIVATE_DEPENDENCIES,
+ self.tcx.hir().local_def_id_to_hir_id(self.item_def_id),
+ self.tcx.def_span(self.item_def_id.to_def_id()),
+ FromPrivateDependencyInPublicInterface {
+ kind,
+ descr: descr.into(),
+ krate: self.tcx.crate_name(def_id.krate),
+ },
+ );
+ }
+
+ let hir_id = match def_id.as_local() {
+ Some(def_id) => self.tcx.hir().local_def_id_to_hir_id(def_id),
+ None => return false,
+ };
+
+ let vis = self.tcx.visibility(def_id);
+ if !vis.is_at_least(self.required_visibility, self.tcx) {
+ let vis_descr = match vis {
+ ty::Visibility::Public => "public",
+ ty::Visibility::Invisible => "private",
+ ty::Visibility::Restricted(vis_def_id) => {
+ if vis_def_id == self.tcx.parent_module(hir_id).to_def_id() {
+ "private"
+ } else if vis_def_id.is_top_level_module() {
+ "crate-private"
+ } else {
+ "restricted"
+ }
+ }
+ };
+ let span = self.tcx.def_span(self.item_def_id.to_def_id());
+ if self.has_old_errors
+ || self.in_assoc_ty
+ || self.tcx.resolutions(()).has_pub_restricted
+ {
+ let vis_span = self.tcx.def_span(def_id);
+ if kind == "trait" {
+ self.tcx.sess.emit_err(InPublicInterfaceTraits {
+ span,
+ vis_descr,
+ kind,
+ descr: descr.into(),
+ vis_span,
+ });
+ } else {
+ self.tcx.sess.emit_err(InPublicInterface {
+ span,
+ vis_descr,
+ kind,
+ descr: descr.into(),
+ vis_span,
+ });
+ }
+ } else {
+ self.tcx.emit_spanned_lint(
+ lint::builtin::PRIVATE_IN_PUBLIC,
+ hir_id,
+ span,
+ PrivateInPublicLint { vis_descr, kind, descr: descr.into() },
+ );
+ }
+ }
+
+ false
+ }
+
+ /// An item is 'leaked' from a private dependency if all
+ /// of the following are true:
+ /// 1. It's contained within a public type
+ /// 2. It comes from a private crate
+ fn leaks_private_dep(&self, item_id: DefId) -> bool {
+ let ret = self.required_visibility.is_public() && self.tcx.is_private_dep(item_id.krate);
+
+ tracing::debug!("leaks_private_dep(item_id={:?})={}", item_id, ret);
+ ret
+ }
+}
+
+impl<'tcx> DefIdVisitor<'tcx> for SearchInterfaceForPrivateItemsVisitor<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn visit_def_id(
+ &mut self,
+ def_id: DefId,
+ kind: &str,
+ descr: &dyn fmt::Display,
+ ) -> ControlFlow<Self::BreakTy> {
+ if self.check_def_id(def_id, kind, descr) {
+ ControlFlow::BREAK
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+}
+
+struct PrivateItemsInPublicInterfacesChecker<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ old_error_set_ancestry: LocalDefIdSet,
+}
+
+impl<'tcx> PrivateItemsInPublicInterfacesChecker<'tcx> {
+ fn check(
+ &self,
+ def_id: LocalDefId,
+ required_visibility: ty::Visibility,
+ ) -> SearchInterfaceForPrivateItemsVisitor<'tcx> {
+ SearchInterfaceForPrivateItemsVisitor {
+ tcx: self.tcx,
+ item_def_id: def_id,
+ required_visibility,
+ has_old_errors: self.old_error_set_ancestry.contains(&def_id),
+ in_assoc_ty: false,
+ }
+ }
+
+ fn check_assoc_item(
+ &self,
+ def_id: LocalDefId,
+ assoc_item_kind: AssocItemKind,
+ vis: ty::Visibility,
+ ) {
+ let mut check = self.check(def_id, vis);
+
+ let (check_ty, is_assoc_ty) = match assoc_item_kind {
+ AssocItemKind::Const | AssocItemKind::Fn { .. } => (true, false),
+ AssocItemKind::Type => (self.tcx.impl_defaultness(def_id).has_value(), true),
+ };
+ check.in_assoc_ty = is_assoc_ty;
+ check.generics().predicates();
+ if check_ty {
+ check.ty();
+ }
+ }
+
+ pub fn check_item(&mut self, id: ItemId) {
+ let tcx = self.tcx;
+ let item_visibility = tcx.visibility(id.def_id);
+ let def_kind = tcx.def_kind(id.def_id);
+
+ match def_kind {
+ DefKind::Const | DefKind::Static(_) | DefKind::Fn | DefKind::TyAlias => {
+ self.check(id.def_id, item_visibility).generics().predicates().ty();
+ }
+ DefKind::OpaqueTy => {
+ // `ty()` for opaque types is the underlying type,
+ // it's not a part of interface, so we skip it.
+ self.check(id.def_id, item_visibility).generics().bounds();
+ }
+ DefKind::Trait => {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::Trait(.., trait_item_refs) = item.kind {
+ self.check(item.def_id, item_visibility).generics().predicates();
+
+ for trait_item_ref in trait_item_refs {
+ self.check_assoc_item(
+ trait_item_ref.id.def_id,
+ trait_item_ref.kind,
+ item_visibility,
+ );
+
+ if let AssocItemKind::Type = trait_item_ref.kind {
+ self.check(trait_item_ref.id.def_id, item_visibility).bounds();
+ }
+ }
+ }
+ }
+ DefKind::TraitAlias => {
+ self.check(id.def_id, item_visibility).generics().predicates();
+ }
+ DefKind::Enum => {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::Enum(ref def, _) = item.kind {
+ self.check(item.def_id, item_visibility).generics().predicates();
+
+ for variant in def.variants {
+ for field in variant.data.fields() {
+ self.check(self.tcx.hir().local_def_id(field.hir_id), item_visibility)
+ .ty();
+ }
+ }
+ }
+ }
+ // Subitems of foreign modules have their own publicity.
+ DefKind::ForeignMod => {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::ForeignMod { items, .. } = item.kind {
+ for foreign_item in items {
+ let vis = tcx.visibility(foreign_item.id.def_id);
+ self.check(foreign_item.id.def_id, vis).generics().predicates().ty();
+ }
+ }
+ }
+ // Subitems of structs and unions have their own publicity.
+ DefKind::Struct | DefKind::Union => {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::Struct(ref struct_def, _)
+ | hir::ItemKind::Union(ref struct_def, _) = item.kind
+ {
+ self.check(item.def_id, item_visibility).generics().predicates();
+
+ for field in struct_def.fields() {
+ let def_id = tcx.hir().local_def_id(field.hir_id);
+ let field_visibility = tcx.visibility(def_id);
+ self.check(def_id, min(item_visibility, field_visibility, tcx)).ty();
+ }
+ }
+ }
+ // An inherent impl is public when its type is public
+ // Subitems of inherent impls have their own publicity.
+ // A trait impl is public when both its type and its trait are public
+ // Subitems of trait impls have inherited publicity.
+ DefKind::Impl => {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::Impl(ref impl_) = item.kind {
+ let impl_vis = ty::Visibility::of_impl(item.def_id, tcx, &Default::default());
+ // check that private components do not appear in the generics or predicates of inherent impls
+ // this check is intentionally NOT performed for impls of traits, per #90586
+ if impl_.of_trait.is_none() {
+ self.check(item.def_id, impl_vis).generics().predicates();
+ }
+ for impl_item_ref in impl_.items {
+ let impl_item_vis = if impl_.of_trait.is_none() {
+ min(tcx.visibility(impl_item_ref.id.def_id), impl_vis, tcx)
+ } else {
+ impl_vis
+ };
+ self.check_assoc_item(
+ impl_item_ref.id.def_id,
+ impl_item_ref.kind,
+ impl_item_vis,
+ );
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers {
+ visibility,
+ privacy_access_levels,
+ check_private_in_public,
+ check_mod_privacy,
+ ..*providers
+ };
+}
+
+fn visibility(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Visibility {
+ let def_id = def_id.expect_local();
+ match tcx.resolutions(()).visibilities.get(&def_id) {
+ Some(vis) => *vis,
+ None => {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ match tcx.hir().get(hir_id) {
+ // Unique types created for closures participate in type privacy checking.
+ // They have visibilities inherited from the module they are defined in.
+ Node::Expr(hir::Expr { kind: hir::ExprKind::Closure{..}, .. })
+ // - AST lowering creates dummy `use` items which don't
+ // get their entries in the resolver's visibility table.
+ // - AST lowering also creates opaque type items with inherited visibilities.
+ // Visibility on them should have no effect, but to avoid the visibility
+ // query failing on some items, we provide it for opaque types as well.
+ | Node::Item(hir::Item {
+ kind: hir::ItemKind::Use(_, hir::UseKind::ListStem) | hir::ItemKind::OpaqueTy(..),
+ ..
+ }) => ty::Visibility::Restricted(tcx.parent_module(hir_id).to_def_id()),
+ // Visibilities of trait impl items are inherited from their traits
+ // and are not filled in resolve.
+ Node::ImplItem(impl_item) => {
+ match tcx.hir().get_by_def_id(tcx.hir().get_parent_item(hir_id)) {
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { of_trait: Some(tr), .. }),
+ ..
+ }) => tr.path.res.opt_def_id().map_or_else(
+ || {
+ tcx.sess.delay_span_bug(tr.path.span, "trait without a def-id");
+ ty::Visibility::Public
+ },
+ |def_id| tcx.visibility(def_id),
+ ),
+ _ => span_bug!(impl_item.span, "the parent is not a trait impl"),
+ }
+ }
+ _ => span_bug!(
+ tcx.def_span(def_id),
+ "visibility table unexpectedly missing a def-id: {:?}",
+ def_id,
+ ),
+ }
+ }
+ }
+}
+
+fn check_mod_privacy(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ // Check privacy of names not checked in previous compilation stages.
+ let mut visitor =
+ NamePrivacyVisitor { tcx, maybe_typeck_results: None, current_item: module_def_id };
+ let (module, span, hir_id) = tcx.hir().get_module(module_def_id);
+
+ intravisit::walk_mod(&mut visitor, module, hir_id);
+
+ // Check privacy of explicitly written types and traits as well as
+ // inferred types of expressions and patterns.
+ let mut visitor =
+ TypePrivacyVisitor { tcx, maybe_typeck_results: None, current_item: module_def_id, span };
+ intravisit::walk_mod(&mut visitor, module, hir_id);
+}
+
+fn privacy_access_levels(tcx: TyCtxt<'_>, (): ()) -> &AccessLevels {
+ // Build up a set of all exported items in the AST. This is a set of all
+ // items which are reachable from external crates based on visibility.
+ let mut visitor = EmbargoVisitor {
+ tcx,
+ access_levels: tcx.resolutions(()).access_levels.clone(),
+ macro_reachable: Default::default(),
+ prev_level: Some(AccessLevel::Public),
+ changed: false,
+ };
+
+ loop {
+ tcx.hir().walk_toplevel_module(&mut visitor);
+ if visitor.changed {
+ visitor.changed = false;
+ } else {
+ break;
+ }
+ }
+
+ tcx.arena.alloc(visitor.access_levels)
+}
+
+fn check_private_in_public(tcx: TyCtxt<'_>, (): ()) {
+ let access_levels = tcx.privacy_access_levels(());
+
+ let mut visitor = ObsoleteVisiblePrivateTypesVisitor {
+ tcx,
+ access_levels,
+ in_variant: false,
+ old_error_set: Default::default(),
+ };
+ tcx.hir().walk_toplevel_module(&mut visitor);
+
+ let mut old_error_set_ancestry = HirIdSet::default();
+ for mut id in visitor.old_error_set.iter().copied() {
+ loop {
+ if !old_error_set_ancestry.insert(id) {
+ break;
+ }
+ let parent = tcx.hir().get_parent_node(id);
+ if parent == id {
+ break;
+ }
+ id = parent;
+ }
+ }
+
+ // Check for private types and traits in public interfaces.
+ let mut checker = PrivateItemsInPublicInterfacesChecker {
+ tcx,
+ // Only definition IDs are ever searched in `old_error_set_ancestry`,
+ // so we can filter away all non-definition IDs at this point.
+ old_error_set_ancestry: old_error_set_ancestry
+ .into_iter()
+ .filter_map(|hir_id| tcx.hir().opt_local_def_id(hir_id))
+ .collect(),
+ };
+
+ for id in tcx.hir().items() {
+ checker.check_item(id);
+ }
+}
diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml
new file mode 100644
index 000000000..5673bb83b
--- /dev/null
+++ b/compiler/rustc_query_impl/Cargo.toml
@@ -0,0 +1,26 @@
+[package]
+name = "rustc_query_impl"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+measureme = "10.0.0"
+rustc-rayon-core = { version = "0.4.0", optional = true }
+rustc_ast = { path = "../rustc_ast" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+tracing = "0.1"
+
+[features]
+rustc_use_parallel_compiler = ["rustc-rayon-core", "rustc_query_system/rustc_use_parallel_compiler"]
diff --git a/compiler/rustc_query_impl/src/README.md b/compiler/rustc_query_impl/src/README.md
new file mode 100644
index 000000000..8ec07b9fd
--- /dev/null
+++ b/compiler/rustc_query_impl/src/README.md
@@ -0,0 +1,3 @@
+For more information about how the query system works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html
diff --git a/compiler/rustc_query_impl/src/keys.rs b/compiler/rustc_query_impl/src/keys.rs
new file mode 100644
index 000000000..49175e97f
--- /dev/null
+++ b/compiler/rustc_query_impl/src/keys.rs
@@ -0,0 +1,545 @@
+//! Defines the set of legal keys that can be used in queries.
+
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LOCAL_CRATE};
+use rustc_middle::infer::canonical::Canonical;
+use rustc_middle::mir;
+use rustc_middle::traits;
+use rustc_middle::ty::fast_reject::SimplifiedType;
+use rustc_middle::ty::subst::{GenericArg, SubstsRef};
+use rustc_middle::ty::{self, layout::TyAndLayout, Ty, TyCtxt};
+use rustc_span::symbol::{Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+/// The `Key` trait controls what types can legally be used as the key
+/// for a query.
+pub trait Key {
+ /// Given an instance of this key, what crate is it referring to?
+ /// This is used to find the provider.
+ fn query_crate_is_local(&self) -> bool;
+
+ /// In the event that a cycle occurs, if no explicit span has been
+ /// given for a query with key `self`, what span should we use?
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span;
+
+ /// If the key is a [`DefId`] or `DefId`--equivalent, return that `DefId`.
+ /// Otherwise, return `None`.
+ fn key_as_def_id(&self) -> Option<DefId> {
+ None
+ }
+}
+
+impl Key for () {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for ty::InstanceDef<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(self.def_id())
+ }
+}
+
+impl<'tcx> Key for ty::Instance<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(self.def_id())
+ }
+}
+
+impl<'tcx> Key for mir::interpret::GlobalId<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.instance.default_span(tcx)
+ }
+}
+
+impl<'tcx> Key for (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for mir::interpret::LitToConstInput<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl Key for CrateNum {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ *self == LOCAL_CRATE
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl Key for LocalDefId {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.to_def_id().default_span(tcx)
+ }
+ fn key_as_def_id(&self) -> Option<DefId> {
+ Some(self.to_def_id())
+ }
+}
+
+impl Key for DefId {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(*self)
+ }
+ #[inline(always)]
+ fn key_as_def_id(&self) -> Option<DefId> {
+ Some(*self)
+ }
+}
+
+impl Key for ty::WithOptConstParam<LocalDefId> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.did.default_span(tcx)
+ }
+}
+
+impl Key for SimplifiedType {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl Key for (DefId, DefId) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0.krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.1.default_span(tcx)
+ }
+}
+
+impl<'tcx> Key for (ty::Instance<'tcx>, LocalDefId) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.0.default_span(tcx)
+ }
+}
+
+impl Key for (DefId, LocalDefId) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0.krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.1.default_span(tcx)
+ }
+}
+
+impl Key for (LocalDefId, DefId) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.0.default_span(tcx)
+ }
+}
+
+impl Key for (LocalDefId, LocalDefId) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.0.default_span(tcx)
+ }
+}
+
+impl Key for (DefId, Option<Ident>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0.krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(self.0)
+ }
+ #[inline(always)]
+ fn key_as_def_id(&self) -> Option<DefId> {
+ Some(self.0)
+ }
+}
+
+impl Key for (DefId, LocalDefId, Ident) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0.krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.1.default_span(tcx)
+ }
+}
+
+impl Key for (CrateNum, DefId) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0 == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.1.default_span(tcx)
+ }
+}
+
+impl Key for (CrateNum, SimplifiedType) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0 == LOCAL_CRATE
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl Key for (DefId, SimplifiedType) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0.krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.0.default_span(tcx)
+ }
+}
+
+impl<'tcx> Key for SubstsRef<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for (DefId, SubstsRef<'tcx>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0.krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.0.default_span(tcx)
+ }
+}
+
+impl<'tcx> Key for (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ (self.0).def.did.krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ (self.0).def.did.default_span(tcx)
+ }
+}
+
+impl<'tcx> Key for (LocalDefId, DefId, SubstsRef<'tcx>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.0.default_span(tcx)
+ }
+}
+
+impl<'tcx> Key for (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.1.def_id().krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(self.1.def_id())
+ }
+}
+
+impl<'tcx> Key for (ty::Const<'tcx>, mir::Field) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for mir::interpret::ConstAlloc<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for ty::PolyTraitRef<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.def_id().krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(self.def_id())
+ }
+}
+
+impl<'tcx> Key for ty::PolyExistentialTraitRef<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.def_id().krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(self.def_id())
+ }
+}
+
+impl<'tcx> Key for (ty::PolyTraitRef<'tcx>, ty::PolyTraitRef<'tcx>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.0.def_id().krate == LOCAL_CRATE
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.def_span(self.0.def_id())
+ }
+}
+
+impl<'tcx> Key for GenericArg<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for mir::ConstantKind<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for ty::Const<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for Ty<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for TyAndLayout<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for (Ty<'tcx>, Ty<'tcx>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for &'tcx ty::List<ty::Predicate<'tcx>> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for ty::ParamEnv<'tcx> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx, T: Key> Key for ty::ParamEnvAnd<'tcx, T> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ self.value.query_crate_is_local()
+ }
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.value.default_span(tcx)
+ }
+}
+
+impl Key for Symbol {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl Key for Option<Symbol> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+ fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+/// Canonical query goals correspond to abstract trait operations that
+/// are not tied to any crate in particular.
+impl<'tcx, T> Key for Canonical<'tcx, T> {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl Key for (Symbol, u32, u32) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for (DefId, Ty<'tcx>, SubstsRef<'tcx>, ty::ParamEnv<'tcx>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for (ty::Predicate<'tcx>, traits::WellFormedLoc) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _tcx: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
+
+impl<'tcx> Key for (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, tcx: TyCtxt<'_>) -> Span {
+ self.0.default_span(tcx)
+ }
+}
+
+impl<'tcx> Key for (Ty<'tcx>, ty::ValTree<'tcx>) {
+ #[inline(always)]
+ fn query_crate_is_local(&self) -> bool {
+ true
+ }
+
+ fn default_span(&self, _: TyCtxt<'_>) -> Span {
+ DUMMY_SP
+ }
+}
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs
new file mode 100644
index 000000000..eda61df77
--- /dev/null
+++ b/compiler/rustc_query_impl/src/lib.rs
@@ -0,0 +1,63 @@
+//! Support for serializing the dep-graph and reloading it.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(once_cell)]
+#![feature(rustc_attrs)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate rustc_middle;
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::AtomicU64;
+use rustc_middle::arena::Arena;
+use rustc_middle::dep_graph::{self, DepKindStruct, SerializedDepNodeIndex};
+use rustc_middle::ty::query::{query_keys, query_storage, query_stored, query_values};
+use rustc_middle::ty::query::{ExternProviders, Providers, QueryEngine};
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::def_id::{LocalDefId, LOCAL_CRATE};
+use rustc_span::Span;
+
+#[macro_use]
+mod plumbing;
+pub use plumbing::QueryCtxt;
+use rustc_query_system::query::*;
+#[cfg(parallel_compiler)]
+pub use rustc_query_system::query::{deadlock, QueryContext};
+
+mod keys;
+use keys::Key;
+
+mod values;
+use self::values::Value;
+
+pub use rustc_query_system::query::QueryConfig;
+pub(crate) use rustc_query_system::query::{QueryDescription, QueryVTable};
+
+mod on_disk_cache;
+pub use on_disk_cache::OnDiskCache;
+
+mod profiling_support;
+pub use self::profiling_support::alloc_self_profile_query_strings;
+
+fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
+ if def_id.is_top_level_module() {
+ "top-level module".to_string()
+ } else {
+ format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
+ }
+}
+
+rustc_query_append! { [define_queries!][<'tcx>] }
+
+impl<'tcx> Queries<'tcx> {
+ // Force codegen in the dyn-trait transformation in this crate.
+ pub fn as_dyn(&'tcx self) -> &'tcx dyn QueryEngine<'tcx> {
+ self
+ }
+}
diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs
new file mode 100644
index 000000000..56fd90c98
--- /dev/null
+++ b/compiler/rustc_query_impl/src/on_disk_cache.rs
@@ -0,0 +1,1012 @@
+use crate::QueryCtxt;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, RwLock};
+use rustc_data_structures::unhash::UnhashMap;
+use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, StableCrateId, LOCAL_CRATE};
+use rustc_hir::definitions::DefPathHash;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
+use rustc_middle::mir::interpret::{AllocDecodingSession, AllocDecodingState};
+use rustc_middle::mir::{self, interpret};
+use rustc_middle::ty::codec::{RefDecodable, TyDecoder, TyEncoder};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_query_system::dep_graph::DepContext;
+use rustc_query_system::query::{QueryCache, QueryContext, QuerySideEffects};
+use rustc_serialize::{
+ opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder},
+ Decodable, Decoder, Encodable, Encoder,
+};
+use rustc_session::Session;
+use rustc_span::hygiene::{
+ ExpnId, HygieneDecodeContext, HygieneEncodeContext, SyntaxContext, SyntaxContextData,
+};
+use rustc_span::source_map::{SourceMap, StableSourceFileId};
+use rustc_span::CachingSourceMapView;
+use rustc_span::{BytePos, ExpnData, ExpnHash, Pos, SourceFile, Span};
+use std::io;
+use std::mem;
+
+const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE;
+
+// A normal span encoded with both location information and a `SyntaxContext`
+const TAG_FULL_SPAN: u8 = 0;
+// A partial span with no location information, encoded only with a `SyntaxContext`
+const TAG_PARTIAL_SPAN: u8 = 1;
+const TAG_RELATIVE_SPAN: u8 = 2;
+
+const TAG_SYNTAX_CONTEXT: u8 = 0;
+const TAG_EXPN_DATA: u8 = 1;
+
+/// Provides an interface to incremental compilation data cached from the
+/// previous compilation session. This data will eventually include the results
+/// of a few selected queries (like `typeck` and `mir_optimized`) and
+/// any side effects that have been emitted during a query.
+pub struct OnDiskCache<'sess> {
+ // The complete cache data in serialized form.
+ serialized_data: RwLock<Option<Mmap>>,
+
+ // Collects all `QuerySideEffects` created during the current compilation
+ // session.
+ current_side_effects: Lock<FxHashMap<DepNodeIndex, QuerySideEffects>>,
+
+ source_map: &'sess SourceMap,
+ file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>,
+
+ // Caches that are populated lazily during decoding.
+ file_index_to_file: Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>,
+
+ // A map from dep-node to the position of the cached query result in
+ // `serialized_data`.
+ query_result_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
+
+ // A map from dep-node to the position of any associated `QuerySideEffects` in
+ // `serialized_data`.
+ prev_side_effects_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
+
+ alloc_decoding_state: AllocDecodingState,
+
+ // A map from syntax context ids to the position of their associated
+ // `SyntaxContextData`. We use a `u32` instead of a `SyntaxContext`
+ // to represent the fact that we are storing *encoded* ids. When we decode
+ // a `SyntaxContext`, a new id will be allocated from the global `HygieneData`,
+ // which will almost certainly be different than the serialized id.
+ syntax_contexts: FxHashMap<u32, AbsoluteBytePos>,
+ // A map from the `DefPathHash` of an `ExpnId` to the position
+ // of their associated `ExpnData`. Ideally, we would store a `DefId`,
+ // but we need to decode this before we've constructed a `TyCtxt` (which
+ // makes it difficult to decode a `DefId`).
+
+ // Note that these `DefPathHashes` correspond to both local and foreign
+ // `ExpnData` (e.g `ExpnData.krate` may not be `LOCAL_CRATE`). Alternatively,
+ // we could look up the `ExpnData` from the metadata of foreign crates,
+ // but it seemed easier to have `OnDiskCache` be independent of the `CStore`.
+ expn_data: UnhashMap<ExpnHash, AbsoluteBytePos>,
+ // Additional information used when decoding hygiene data.
+ hygiene_context: HygieneDecodeContext,
+ // Maps `ExpnHash`es to their raw value from the *previous*
+ // compilation session. This is used as an initial 'guess' when
+ // we try to map an `ExpnHash` to its value in the current
+ // compilation session.
+ foreign_expn_data: UnhashMap<ExpnHash, u32>,
+}
+
+// This type is used only for serialization and deserialization.
+#[derive(Encodable, Decodable)]
+struct Footer {
+ file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>,
+ query_result_index: EncodedDepNodeIndex,
+ side_effects_index: EncodedDepNodeIndex,
+ // The location of all allocations.
+ interpret_alloc_index: Vec<u32>,
+ // See `OnDiskCache.syntax_contexts`
+ syntax_contexts: FxHashMap<u32, AbsoluteBytePos>,
+ // See `OnDiskCache.expn_data`
+ expn_data: UnhashMap<ExpnHash, AbsoluteBytePos>,
+ foreign_expn_data: UnhashMap<ExpnHash, u32>,
+}
+
+pub type EncodedDepNodeIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>;
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)]
+struct SourceFileIndex(u32);
+
+#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)]
+pub struct AbsoluteBytePos(u32);
+
+impl AbsoluteBytePos {
+ fn new(pos: usize) -> AbsoluteBytePos {
+ debug_assert!(pos <= u32::MAX as usize);
+ AbsoluteBytePos(pos as u32)
+ }
+
+ fn to_usize(self) -> usize {
+ self.0 as usize
+ }
+}
+
+/// An `EncodedSourceFileId` is the same as a `StableSourceFileId` except that
+/// the source crate is represented as a [StableCrateId] instead of as a
+/// `CrateNum`. This way `EncodedSourceFileId` can be encoded and decoded
+/// without any additional context, i.e. with a simple `opaque::Decoder` (which
+/// is the only thing available when decoding the cache's [Footer].
+#[derive(Encodable, Decodable, Clone, Debug)]
+struct EncodedSourceFileId {
+ file_name_hash: u64,
+ stable_crate_id: StableCrateId,
+}
+
+impl EncodedSourceFileId {
+ fn translate(&self, tcx: TyCtxt<'_>) -> StableSourceFileId {
+ let cnum = tcx.stable_crate_id_to_crate_num(self.stable_crate_id);
+ StableSourceFileId { file_name_hash: self.file_name_hash, cnum }
+ }
+
+ fn new(tcx: TyCtxt<'_>, file: &SourceFile) -> EncodedSourceFileId {
+ let source_file_id = StableSourceFileId::new(file);
+ EncodedSourceFileId {
+ file_name_hash: source_file_id.file_name_hash,
+ stable_crate_id: tcx.stable_crate_id(source_file_id.cnum),
+ }
+ }
+}
+
+impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> {
+ /// Creates a new `OnDiskCache` instance from the serialized data in `data`.
+ fn new(sess: &'sess Session, data: Mmap, start_pos: usize) -> Self {
+ debug_assert!(sess.opts.incremental.is_some());
+
+ // Wrap in a scope so we can borrow `data`.
+ let footer: Footer = {
+ let mut decoder = MemDecoder::new(&data, start_pos);
+
+ // Decode the *position* of the footer, which can be found in the
+ // last 8 bytes of the file.
+ decoder.set_position(data.len() - IntEncodedWithFixedSize::ENCODED_SIZE);
+ let footer_pos = IntEncodedWithFixedSize::decode(&mut decoder).0 as usize;
+
+ // Decode the file footer, which contains all the lookup tables, etc.
+ decoder.set_position(footer_pos);
+
+ decode_tagged(&mut decoder, TAG_FILE_FOOTER)
+ };
+
+ Self {
+ serialized_data: RwLock::new(Some(data)),
+ file_index_to_stable_id: footer.file_index_to_stable_id,
+ file_index_to_file: Default::default(),
+ source_map: sess.source_map(),
+ current_side_effects: Default::default(),
+ query_result_index: footer.query_result_index.into_iter().collect(),
+ prev_side_effects_index: footer.side_effects_index.into_iter().collect(),
+ alloc_decoding_state: AllocDecodingState::new(footer.interpret_alloc_index),
+ syntax_contexts: footer.syntax_contexts,
+ expn_data: footer.expn_data,
+ foreign_expn_data: footer.foreign_expn_data,
+ hygiene_context: Default::default(),
+ }
+ }
+
+ fn new_empty(source_map: &'sess SourceMap) -> Self {
+ Self {
+ serialized_data: RwLock::new(None),
+ file_index_to_stable_id: Default::default(),
+ file_index_to_file: Default::default(),
+ source_map,
+ current_side_effects: Default::default(),
+ query_result_index: Default::default(),
+ prev_side_effects_index: Default::default(),
+ alloc_decoding_state: AllocDecodingState::new(Vec::new()),
+ syntax_contexts: FxHashMap::default(),
+ expn_data: UnhashMap::default(),
+ foreign_expn_data: UnhashMap::default(),
+ hygiene_context: Default::default(),
+ }
+ }
+
+ /// Execute all cache promotions and release the serialized backing Mmap.
+ ///
+ /// Cache promotions require invoking queries, which needs to read the serialized data.
+ /// In order to serialize the new on-disk cache, the former on-disk cache file needs to be
+ /// deleted, hence we won't be able to refer to its memmapped data.
+ fn drop_serialized_data(&self, tcx: TyCtxt<'_>) {
+ // Load everything into memory so we can write it out to the on-disk
+ // cache. The vast majority of cacheable query results should already
+ // be in memory, so this should be a cheap operation.
+ // Do this *before* we clone 'latest_foreign_def_path_hashes', since
+ // loading existing queries may cause us to create new DepNodes, which
+ // may in turn end up invoking `store_foreign_def_id_hash`
+ tcx.dep_graph.exec_cache_promotions(tcx);
+
+ *self.serialized_data.write() = None;
+ }
+
+ fn serialize<'tcx>(&self, tcx: TyCtxt<'tcx>, encoder: FileEncoder) -> FileEncodeResult {
+ // Serializing the `DepGraph` should not modify it.
+ tcx.dep_graph.with_ignore(|| {
+ // Allocate `SourceFileIndex`es.
+ let (file_to_file_index, file_index_to_stable_id) = {
+ let files = tcx.sess.source_map().files();
+ let mut file_to_file_index =
+ FxHashMap::with_capacity_and_hasher(files.len(), Default::default());
+ let mut file_index_to_stable_id =
+ FxHashMap::with_capacity_and_hasher(files.len(), Default::default());
+
+ for (index, file) in files.iter().enumerate() {
+ let index = SourceFileIndex(index as u32);
+ let file_ptr: *const SourceFile = &**file as *const _;
+ file_to_file_index.insert(file_ptr, index);
+ let source_file_id = EncodedSourceFileId::new(tcx, &file);
+ file_index_to_stable_id.insert(index, source_file_id);
+ }
+
+ (file_to_file_index, file_index_to_stable_id)
+ };
+
+ let hygiene_encode_context = HygieneEncodeContext::default();
+
+ let mut encoder = CacheEncoder {
+ tcx,
+ encoder,
+ type_shorthands: Default::default(),
+ predicate_shorthands: Default::default(),
+ interpret_allocs: Default::default(),
+ source_map: CachingSourceMapView::new(tcx.sess.source_map()),
+ file_to_file_index,
+ hygiene_context: &hygiene_encode_context,
+ };
+
+ // Encode query results.
+ let mut query_result_index = EncodedDepNodeIndex::new();
+
+ tcx.sess.time("encode_query_results", || {
+ let enc = &mut encoder;
+ let qri = &mut query_result_index;
+ QueryCtxt::from_tcx(tcx).encode_query_results(enc, qri);
+ });
+
+ // Encode side effects.
+ let side_effects_index: EncodedDepNodeIndex = self
+ .current_side_effects
+ .borrow()
+ .iter()
+ .map(|(dep_node_index, side_effects)| {
+ let pos = AbsoluteBytePos::new(encoder.position());
+ let dep_node_index = SerializedDepNodeIndex::new(dep_node_index.index());
+ encoder.encode_tagged(dep_node_index, side_effects);
+
+ (dep_node_index, pos)
+ })
+ .collect();
+
+ let interpret_alloc_index = {
+ let mut interpret_alloc_index = Vec::new();
+ let mut n = 0;
+ loop {
+ let new_n = encoder.interpret_allocs.len();
+ // If we have found new IDs, serialize those too.
+ if n == new_n {
+ // Otherwise, abort.
+ break;
+ }
+ interpret_alloc_index.reserve(new_n - n);
+ for idx in n..new_n {
+ let id = encoder.interpret_allocs[idx];
+ let pos = encoder.position() as u32;
+ interpret_alloc_index.push(pos);
+ interpret::specialized_encode_alloc_id(&mut encoder, tcx, id);
+ }
+ n = new_n;
+ }
+ interpret_alloc_index
+ };
+
+ let mut syntax_contexts = FxHashMap::default();
+ let mut expn_data = UnhashMap::default();
+ let mut foreign_expn_data = UnhashMap::default();
+
+ // Encode all hygiene data (`SyntaxContextData` and `ExpnData`) from the current
+ // session.
+
+ hygiene_encode_context.encode(
+ &mut encoder,
+ |encoder, index, ctxt_data| {
+ let pos = AbsoluteBytePos::new(encoder.position());
+ encoder.encode_tagged(TAG_SYNTAX_CONTEXT, ctxt_data);
+ syntax_contexts.insert(index, pos);
+ },
+ |encoder, expn_id, data, hash| {
+ if expn_id.krate == LOCAL_CRATE {
+ let pos = AbsoluteBytePos::new(encoder.position());
+ encoder.encode_tagged(TAG_EXPN_DATA, data);
+ expn_data.insert(hash, pos);
+ } else {
+ foreign_expn_data.insert(hash, expn_id.local_id.as_u32());
+ }
+ },
+ );
+
+ // `Encode the file footer.
+ let footer_pos = encoder.position() as u64;
+ encoder.encode_tagged(
+ TAG_FILE_FOOTER,
+ &Footer {
+ file_index_to_stable_id,
+ query_result_index,
+ side_effects_index,
+ interpret_alloc_index,
+ syntax_contexts,
+ expn_data,
+ foreign_expn_data,
+ },
+ );
+
+ // Encode the position of the footer as the last 8 bytes of the
+ // file so we know where to look for it.
+ IntEncodedWithFixedSize(footer_pos).encode(&mut encoder.encoder);
+
+ // DO NOT WRITE ANYTHING TO THE ENCODER AFTER THIS POINT! The address
+ // of the footer must be the last thing in the data stream.
+
+ encoder.finish()
+ })
+ }
+}
+
+impl<'sess> OnDiskCache<'sess> {
+ pub fn as_dyn(&self) -> &dyn rustc_middle::ty::OnDiskCache<'sess> {
+ self as _
+ }
+
+ /// Loads a `QuerySideEffects` created during the previous compilation session.
+ pub fn load_side_effects(
+ &self,
+ tcx: TyCtxt<'_>,
+ dep_node_index: SerializedDepNodeIndex,
+ ) -> QuerySideEffects {
+ let side_effects: Option<QuerySideEffects> =
+ self.load_indexed(tcx, dep_node_index, &self.prev_side_effects_index);
+
+ side_effects.unwrap_or_default()
+ }
+
+ /// Stores a `QuerySideEffects` emitted during the current compilation session.
+ /// Anything stored like this will be available via `load_side_effects` in
+ /// the next compilation session.
+ #[inline(never)]
+ #[cold]
+ pub fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) {
+ let mut current_side_effects = self.current_side_effects.borrow_mut();
+ let prev = current_side_effects.insert(dep_node_index, side_effects);
+ debug_assert!(prev.is_none());
+ }
+
+ /// Returns the cached query result if there is something in the cache for
+ /// the given `SerializedDepNodeIndex`; otherwise returns `None`.
+ pub fn try_load_query_result<'tcx, T>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ dep_node_index: SerializedDepNodeIndex,
+ ) -> Option<T>
+ where
+ T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>,
+ {
+ self.load_indexed(tcx, dep_node_index, &self.query_result_index)
+ }
+
+ /// Stores side effect emitted during computation of an anonymous query.
+ /// Since many anonymous queries can share the same `DepNode`, we aggregate
+ /// them -- as opposed to regular queries where we assume that there is a
+ /// 1:1 relationship between query-key and `DepNode`.
+ #[inline(never)]
+ #[cold]
+ pub fn store_side_effects_for_anon_node(
+ &self,
+ dep_node_index: DepNodeIndex,
+ side_effects: QuerySideEffects,
+ ) {
+ let mut current_side_effects = self.current_side_effects.borrow_mut();
+
+ let x = current_side_effects.entry(dep_node_index).or_default();
+ x.append(side_effects);
+ }
+
+ fn load_indexed<'tcx, T>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ dep_node_index: SerializedDepNodeIndex,
+ index: &FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>,
+ ) -> Option<T>
+ where
+ T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>,
+ {
+ let pos = index.get(&dep_node_index).cloned()?;
+
+ self.with_decoder(tcx, pos, |decoder| Some(decode_tagged(decoder, dep_node_index)))
+ }
+
+ fn with_decoder<'a, 'tcx, T, F: for<'s> FnOnce(&mut CacheDecoder<'s, 'tcx>) -> T>(
+ &'sess self,
+ tcx: TyCtxt<'tcx>,
+ pos: AbsoluteBytePos,
+ f: F,
+ ) -> T
+ where
+ T: Decodable<CacheDecoder<'a, 'tcx>>,
+ {
+ let serialized_data = self.serialized_data.read();
+ let mut decoder = CacheDecoder {
+ tcx,
+ opaque: MemDecoder::new(serialized_data.as_deref().unwrap_or(&[]), pos.to_usize()),
+ source_map: self.source_map,
+ file_index_to_file: &self.file_index_to_file,
+ file_index_to_stable_id: &self.file_index_to_stable_id,
+ alloc_decoding_session: self.alloc_decoding_state.new_decoding_session(),
+ syntax_contexts: &self.syntax_contexts,
+ expn_data: &self.expn_data,
+ foreign_expn_data: &self.foreign_expn_data,
+ hygiene_context: &self.hygiene_context,
+ };
+ f(&mut decoder)
+ }
+}
+
+//- DECODING -------------------------------------------------------------------
+
+/// A decoder that can read from the incremental compilation cache. It is similar to the one
+/// we use for crate metadata decoding in that it can rebase spans and eventually
+/// will also handle things that contain `Ty` instances.
+pub struct CacheDecoder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ opaque: MemDecoder<'a>,
+ source_map: &'a SourceMap,
+ file_index_to_file: &'a Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>,
+ file_index_to_stable_id: &'a FxHashMap<SourceFileIndex, EncodedSourceFileId>,
+ alloc_decoding_session: AllocDecodingSession<'a>,
+ syntax_contexts: &'a FxHashMap<u32, AbsoluteBytePos>,
+ expn_data: &'a UnhashMap<ExpnHash, AbsoluteBytePos>,
+ foreign_expn_data: &'a UnhashMap<ExpnHash, u32>,
+ hygiene_context: &'a HygieneDecodeContext,
+}
+
+impl<'a, 'tcx> CacheDecoder<'a, 'tcx> {
+ fn file_index_to_file(&self, index: SourceFileIndex) -> Lrc<SourceFile> {
+ let CacheDecoder {
+ tcx,
+ ref file_index_to_file,
+ ref file_index_to_stable_id,
+ ref source_map,
+ ..
+ } = *self;
+
+ file_index_to_file
+ .borrow_mut()
+ .entry(index)
+ .or_insert_with(|| {
+ let stable_id = file_index_to_stable_id[&index].translate(tcx);
+
+ // If this `SourceFile` is from a foreign crate, then make sure
+ // that we've imported all of the source files from that crate.
+ // This has usually already been done during macro invocation.
+ // However, when encoding query results like `TypeckResults`,
+ // we might encode an `AdtDef` for a foreign type (because it
+ // was referenced in the body of the function). There is no guarantee
+ // that we will load the source files from that crate during macro
+ // expansion, so we use `import_source_files` to ensure that the foreign
+ // source files are actually imported before we call `source_file_by_stable_id`.
+ if stable_id.cnum != LOCAL_CRATE {
+ self.tcx.cstore_untracked().import_source_files(self.tcx.sess, stable_id.cnum);
+ }
+
+ source_map
+ .source_file_by_stable_id(stable_id)
+ .expect("failed to lookup `SourceFile` in new context")
+ })
+ .clone()
+ }
+}
+
+trait DecoderWithPosition: Decoder {
+ fn position(&self) -> usize;
+}
+
+impl<'a> DecoderWithPosition for MemDecoder<'a> {
+ fn position(&self) -> usize {
+ self.position()
+ }
+}
+
+impl<'a, 'tcx> DecoderWithPosition for CacheDecoder<'a, 'tcx> {
+ fn position(&self) -> usize {
+ self.opaque.position()
+ }
+}
+
+// Decodes something that was encoded with `encode_tagged()` and verify that the
+// tag matches and the correct amount of bytes was read.
+fn decode_tagged<D, T, V>(decoder: &mut D, expected_tag: T) -> V
+where
+ T: Decodable<D> + Eq + std::fmt::Debug,
+ V: Decodable<D>,
+ D: DecoderWithPosition,
+{
+ let start_pos = decoder.position();
+
+ let actual_tag = T::decode(decoder);
+ assert_eq!(actual_tag, expected_tag);
+ let value = V::decode(decoder);
+ let end_pos = decoder.position();
+
+ let expected_len: u64 = Decodable::decode(decoder);
+ assert_eq!((end_pos - start_pos) as u64, expected_len);
+
+ value
+}
+
+impl<'a, 'tcx> TyDecoder for CacheDecoder<'a, 'tcx> {
+ type I = TyCtxt<'tcx>;
+ const CLEAR_CROSS_CRATE: bool = false;
+
+ #[inline]
+ fn interner(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ #[inline]
+ fn position(&self) -> usize {
+ self.opaque.position()
+ }
+
+ #[inline]
+ fn peek_byte(&self) -> u8 {
+ self.opaque.data[self.opaque.position()]
+ }
+
+ fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx>
+ where
+ F: FnOnce(&mut Self) -> Ty<'tcx>,
+ {
+ let tcx = self.tcx;
+
+ let cache_key = ty::CReaderCacheKey { cnum: None, pos: shorthand };
+
+ if let Some(&ty) = tcx.ty_rcache.borrow().get(&cache_key) {
+ return ty;
+ }
+
+ let ty = or_insert_with(self);
+ // This may overwrite the entry, but it should overwrite with the same value.
+ tcx.ty_rcache.borrow_mut().insert_same(cache_key, ty);
+ ty
+ }
+
+ fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
+ where
+ F: FnOnce(&mut Self) -> R,
+ {
+ debug_assert!(pos < self.opaque.data.len());
+
+ let new_opaque = MemDecoder::new(self.opaque.data, pos);
+ let old_opaque = mem::replace(&mut self.opaque, new_opaque);
+ let r = f(self);
+ self.opaque = old_opaque;
+ r
+ }
+
+ fn decode_alloc_id(&mut self) -> interpret::AllocId {
+ let alloc_decoding_session = self.alloc_decoding_session;
+ alloc_decoding_session.decode_alloc_id(self)
+ }
+}
+
+rustc_middle::implement_ty_decoder!(CacheDecoder<'a, 'tcx>);
+
+// This ensures that the `Decodable<opaque::Decoder>::decode` specialization for `Vec<u8>` is used
+// when a `CacheDecoder` is passed to `Decodable::decode`. Unfortunately, we have to manually opt
+// into specializations this way, given how `CacheDecoder` and the decoding traits currently work.
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Vec<u8> {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ Decodable::decode(&mut d.opaque)
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for SyntaxContext {
+ fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ let syntax_contexts = decoder.syntax_contexts;
+ rustc_span::hygiene::decode_syntax_context(decoder, decoder.hygiene_context, |this, id| {
+ // This closure is invoked if we haven't already decoded the data for the `SyntaxContext` we are deserializing.
+ // We look up the position of the associated `SyntaxData` and decode it.
+ let pos = syntax_contexts.get(&id).unwrap();
+ this.with_position(pos.to_usize(), |decoder| {
+ let data: SyntaxContextData = decode_tagged(decoder, TAG_SYNTAX_CONTEXT);
+ data
+ })
+ })
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for ExpnId {
+ fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ let hash = ExpnHash::decode(decoder);
+ if hash.is_root() {
+ return ExpnId::root();
+ }
+
+ if let Some(expn_id) = ExpnId::from_hash(hash) {
+ return expn_id;
+ }
+
+ let krate = decoder.tcx.stable_crate_id_to_crate_num(hash.stable_crate_id());
+
+ let expn_id = if krate == LOCAL_CRATE {
+ // We look up the position of the associated `ExpnData` and decode it.
+ let pos = decoder
+ .expn_data
+ .get(&hash)
+ .unwrap_or_else(|| panic!("Bad hash {:?} (map {:?})", hash, decoder.expn_data));
+
+ let data: ExpnData = decoder
+ .with_position(pos.to_usize(), |decoder| decode_tagged(decoder, TAG_EXPN_DATA));
+ let expn_id = rustc_span::hygiene::register_local_expn_id(data, hash);
+
+ #[cfg(debug_assertions)]
+ {
+ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+ let local_hash: u64 = decoder.tcx.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+ expn_id.expn_data().hash_stable(&mut hcx, &mut hasher);
+ hasher.finish()
+ });
+ debug_assert_eq!(hash.local_hash(), local_hash);
+ }
+
+ expn_id
+ } else {
+ let index_guess = decoder.foreign_expn_data[&hash];
+ decoder.tcx.cstore_untracked().expn_hash_to_expn_id(
+ decoder.tcx.sess,
+ krate,
+ index_guess,
+ hash,
+ )
+ };
+
+ debug_assert_eq!(expn_id.krate, krate);
+ expn_id
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span {
+ fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ let ctxt = SyntaxContext::decode(decoder);
+ let parent = Option::<LocalDefId>::decode(decoder);
+ let tag: u8 = Decodable::decode(decoder);
+
+ if tag == TAG_PARTIAL_SPAN {
+ return Span::new(BytePos(0), BytePos(0), ctxt, parent);
+ } else if tag == TAG_RELATIVE_SPAN {
+ let dlo = u32::decode(decoder);
+ let dto = u32::decode(decoder);
+
+ let enclosing = decoder.tcx.source_span_untracked(parent.unwrap()).data_untracked();
+ let span = Span::new(
+ enclosing.lo + BytePos::from_u32(dlo),
+ enclosing.lo + BytePos::from_u32(dto),
+ ctxt,
+ parent,
+ );
+
+ return span;
+ } else {
+ debug_assert_eq!(tag, TAG_FULL_SPAN);
+ }
+
+ let file_lo_index = SourceFileIndex::decode(decoder);
+ let line_lo = usize::decode(decoder);
+ let col_lo = BytePos::decode(decoder);
+ let len = BytePos::decode(decoder);
+
+ let file_lo = decoder.file_index_to_file(file_lo_index);
+ let lo = file_lo.lines(|lines| lines[line_lo - 1] + col_lo);
+ let hi = lo + len;
+
+ Span::new(lo, hi, ctxt, parent)
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for CrateNum {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ let stable_id = StableCrateId::decode(d);
+ let cnum = d.tcx.stable_crate_id_to_crate_num(stable_id);
+ cnum
+ }
+}
+
+// This impl makes sure that we get a runtime error when we try decode a
+// `DefIndex` that is not contained in a `DefId`. Such a case would be problematic
+// because we would not know how to transform the `DefIndex` to the current
+// context.
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefIndex {
+ fn decode(_d: &mut CacheDecoder<'a, 'tcx>) -> DefIndex {
+ panic!("trying to decode `DefIndex` outside the context of a `DefId`")
+ }
+}
+
+// Both the `CrateNum` and the `DefIndex` of a `DefId` can change in between two
+// compilation sessions. We use the `DefPathHash`, which is stable across
+// sessions, to map the old `DefId` to the new one.
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ // Load the `DefPathHash` which is was we encoded the `DefId` as.
+ let def_path_hash = DefPathHash::decode(d);
+
+ // Using the `DefPathHash`, we can lookup the new `DefId`.
+ // Subtle: We only encode a `DefId` as part of a query result.
+ // If we get to this point, then all of the query inputs were green,
+ // which means that the definition with this hash is guaranteed to
+ // still exist in the current compilation session.
+ d.tcx.def_path_hash_to_def_id(def_path_hash, &mut || {
+ panic!("Failed to convert DefPathHash {:?}", def_path_hash)
+ })
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx FxHashSet<LocalDefId> {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ RefDecodable::decode(d)
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>>
+ for &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>>
+{
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ RefDecodable::decode(d)
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [ty::abstract_const::Node<'tcx>] {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ RefDecodable::decode(d)
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ RefDecodable::decode(d)
+ }
+}
+
+impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [rustc_ast::InlineAsmTemplatePiece] {
+ fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self {
+ RefDecodable::decode(d)
+ }
+}
+
+macro_rules! impl_ref_decoder {
+ (<$tcx:tt> $($ty:ty,)*) => {
+ $(impl<'a, $tcx> Decodable<CacheDecoder<'a, $tcx>> for &$tcx [$ty] {
+ fn decode(d: &mut CacheDecoder<'a, $tcx>) -> Self {
+ RefDecodable::decode(d)
+ }
+ })*
+ };
+}
+
+impl_ref_decoder! {<'tcx>
+ Span,
+ rustc_ast::Attribute,
+ rustc_span::symbol::Ident,
+ ty::Variance,
+ rustc_span::def_id::DefId,
+ rustc_span::def_id::LocalDefId,
+ (rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
+}
+
+//- ENCODING -------------------------------------------------------------------
+
+/// An encoder that can write to the incremental compilation cache.
+pub struct CacheEncoder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ encoder: FileEncoder,
+ type_shorthands: FxHashMap<Ty<'tcx>, usize>,
+ predicate_shorthands: FxHashMap<ty::PredicateKind<'tcx>, usize>,
+ interpret_allocs: FxIndexSet<interpret::AllocId>,
+ source_map: CachingSourceMapView<'tcx>,
+ file_to_file_index: FxHashMap<*const SourceFile, SourceFileIndex>,
+ hygiene_context: &'a HygieneEncodeContext,
+}
+
+impl<'a, 'tcx> CacheEncoder<'a, 'tcx> {
+ fn source_file_index(&mut self, source_file: Lrc<SourceFile>) -> SourceFileIndex {
+ self.file_to_file_index[&(&*source_file as *const SourceFile)]
+ }
+
+ /// Encode something with additional information that allows to do some
+ /// sanity checks when decoding the data again. This method will first
+ /// encode the specified tag, then the given value, then the number of
+ /// bytes taken up by tag and value. On decoding, we can then verify that
+ /// we get the expected tag and read the expected number of bytes.
+ fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, value: &V) {
+ let start_pos = self.position();
+
+ tag.encode(self);
+ value.encode(self);
+
+ let end_pos = self.position();
+ ((end_pos - start_pos) as u64).encode(self);
+ }
+
+ fn finish(self) -> Result<usize, io::Error> {
+ self.encoder.finish()
+ }
+}
+
+impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for SyntaxContext {
+ fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
+ rustc_span::hygiene::raw_encode_syntax_context(*self, s.hygiene_context, s);
+ }
+}
+
+impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for ExpnId {
+ fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
+ s.hygiene_context.schedule_expn_data_for_encoding(*self);
+ self.expn_hash().encode(s);
+ }
+}
+
+impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Span {
+ fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
+ let span_data = self.data_untracked();
+ span_data.ctxt.encode(s);
+ span_data.parent.encode(s);
+
+ if span_data.is_dummy() {
+ return TAG_PARTIAL_SPAN.encode(s);
+ }
+
+ if let Some(parent) = span_data.parent {
+ let enclosing = s.tcx.source_span(parent).data_untracked();
+ if enclosing.contains(span_data) {
+ TAG_RELATIVE_SPAN.encode(s);
+ (span_data.lo - enclosing.lo).to_u32().encode(s);
+ (span_data.hi - enclosing.lo).to_u32().encode(s);
+ return;
+ }
+ }
+
+ let pos = s.source_map.byte_pos_to_line_and_col(span_data.lo);
+ let partial_span = match &pos {
+ Some((file_lo, _, _)) => !file_lo.contains(span_data.hi),
+ None => true,
+ };
+
+ if partial_span {
+ return TAG_PARTIAL_SPAN.encode(s);
+ }
+
+ let (file_lo, line_lo, col_lo) = pos.unwrap();
+
+ let len = span_data.hi - span_data.lo;
+
+ let source_file_index = s.source_file_index(file_lo);
+
+ TAG_FULL_SPAN.encode(s);
+ source_file_index.encode(s);
+ line_lo.encode(s);
+ col_lo.encode(s);
+ len.encode(s);
+ }
+}
+
+impl<'a, 'tcx> TyEncoder for CacheEncoder<'a, 'tcx> {
+ type I = TyCtxt<'tcx>;
+ const CLEAR_CROSS_CRATE: bool = false;
+
+ fn position(&self) -> usize {
+ self.encoder.position()
+ }
+ fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize> {
+ &mut self.type_shorthands
+ }
+ fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::PredicateKind<'tcx>, usize> {
+ &mut self.predicate_shorthands
+ }
+ fn encode_alloc_id(&mut self, alloc_id: &interpret::AllocId) {
+ let (index, _) = self.interpret_allocs.insert_full(*alloc_id);
+
+ index.encode(self);
+ }
+}
+
+impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for CrateNum {
+ fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
+ s.tcx.stable_crate_id(*self).encode(s);
+ }
+}
+
+impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for DefId {
+ fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) {
+ s.tcx.def_path_hash(*self).encode(s);
+ }
+}
+
+impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for DefIndex {
+ fn encode(&self, _: &mut CacheEncoder<'a, 'tcx>) {
+ bug!("encoding `DefIndex` without context");
+ }
+}
+
+macro_rules! encoder_methods {
+ ($($name:ident($ty:ty);)*) => {
+ #[inline]
+ $(fn $name(&mut self, value: $ty) {
+ self.encoder.$name(value)
+ })*
+ }
+}
+
+impl<'a, 'tcx> Encoder for CacheEncoder<'a, 'tcx> {
+ encoder_methods! {
+ emit_usize(usize);
+ emit_u128(u128);
+ emit_u64(u64);
+ emit_u32(u32);
+ emit_u16(u16);
+ emit_u8(u8);
+
+ emit_isize(isize);
+ emit_i128(i128);
+ emit_i64(i64);
+ emit_i32(i32);
+ emit_i16(i16);
+ emit_i8(i8);
+
+ emit_bool(bool);
+ emit_f64(f64);
+ emit_f32(f32);
+ emit_char(char);
+ emit_str(&str);
+ emit_raw_bytes(&[u8]);
+ }
+}
+
+// This ensures that the `Encodable<opaque::FileEncoder>::encode` specialization for byte slices
+// is used when a `CacheEncoder` having an `opaque::FileEncoder` is passed to `Encodable::encode`.
+// Unfortunately, we have to manually opt into specializations this way, given how `CacheEncoder`
+// and the encoding traits currently work.
+impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for [u8] {
+ fn encode(&self, e: &mut CacheEncoder<'a, 'tcx>) {
+ self.encode(&mut e.encoder);
+ }
+}
+
+pub fn encode_query_results<'a, 'tcx, CTX, Q>(
+ tcx: CTX,
+ encoder: &mut CacheEncoder<'a, 'tcx>,
+ query_result_index: &mut EncodedDepNodeIndex,
+) where
+ CTX: QueryContext + 'tcx,
+ Q: super::QueryDescription<CTX>,
+ Q::Value: Encodable<CacheEncoder<'a, 'tcx>>,
+{
+ let _timer = tcx
+ .dep_context()
+ .profiler()
+ .extra_verbose_generic_activity("encode_query_results_for", std::any::type_name::<Q>());
+
+ assert!(Q::query_state(tcx).all_inactive());
+ let cache = Q::query_cache(tcx);
+ cache.iter(&mut |key, value, dep_node| {
+ if Q::cache_on_disk(*tcx.dep_context(), &key) {
+ let dep_node = SerializedDepNodeIndex::new(dep_node.index());
+
+ // Record position of the cache entry.
+ query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.encoder.position())));
+
+ // Encode the type check tables with the `SerializedDepNodeIndex`
+ // as tag.
+ encoder.encode_tagged(dep_node, value);
+ }
+ });
+}
diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs
new file mode 100644
index 000000000..eda4401c8
--- /dev/null
+++ b/compiler/rustc_query_impl/src/plumbing.rs
@@ -0,0 +1,558 @@
+//! The implementation of the query system itself. This defines the macros that
+//! generate the actual methods on tcx which find and execute the provider,
+//! manage the caches, and so forth.
+
+use crate::{on_disk_cache, Queries};
+use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex};
+use rustc_middle::ty::tls::{self, ImplicitCtxt};
+use rustc_middle::ty::TyCtxt;
+use rustc_query_system::dep_graph::HasDepContext;
+use rustc_query_system::query::{QueryContext, QueryJobId, QueryMap, QuerySideEffects};
+
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::{Diagnostic, Handler};
+
+use std::any::Any;
+use std::num::NonZeroU64;
+
+#[derive(Copy, Clone)]
+pub struct QueryCtxt<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub queries: &'tcx Queries<'tcx>,
+}
+
+impl<'tcx> std::ops::Deref for QueryCtxt<'tcx> {
+ type Target = TyCtxt<'tcx>;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ &self.tcx
+ }
+}
+
+impl<'tcx> HasDepContext for QueryCtxt<'tcx> {
+ type DepKind = rustc_middle::dep_graph::DepKind;
+ type DepContext = TyCtxt<'tcx>;
+
+ #[inline]
+ fn dep_context(&self) -> &Self::DepContext {
+ &self.tcx
+ }
+}
+
+impl QueryContext for QueryCtxt<'_> {
+ fn next_job_id(&self) -> QueryJobId {
+ QueryJobId(
+ NonZeroU64::new(
+ self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed),
+ )
+ .unwrap(),
+ )
+ }
+
+ fn current_query_job(&self) -> Option<QueryJobId> {
+ tls::with_related_context(**self, |icx| icx.query)
+ }
+
+ fn try_collect_active_jobs(&self) -> Option<QueryMap> {
+ self.queries.try_collect_active_jobs(**self)
+ }
+
+ // Interactions with on_disk_cache
+ fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects {
+ self.queries
+ .on_disk_cache
+ .as_ref()
+ .map(|c| c.load_side_effects(**self, prev_dep_node_index))
+ .unwrap_or_default()
+ }
+
+ fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) {
+ if let Some(c) = self.queries.on_disk_cache.as_ref() {
+ c.store_side_effects(dep_node_index, side_effects)
+ }
+ }
+
+ fn store_side_effects_for_anon_node(
+ &self,
+ dep_node_index: DepNodeIndex,
+ side_effects: QuerySideEffects,
+ ) {
+ if let Some(c) = self.queries.on_disk_cache.as_ref() {
+ c.store_side_effects_for_anon_node(dep_node_index, side_effects)
+ }
+ }
+
+ /// Executes a job by changing the `ImplicitCtxt` to point to the
+ /// new query job while it executes. It returns the diagnostics
+ /// captured during execution and the actual result.
+ #[inline(always)]
+ fn start_query<R>(
+ &self,
+ token: QueryJobId,
+ diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
+ compute: impl FnOnce() -> R,
+ ) -> R {
+ // The `TyCtxt` stored in TLS has the same global interner lifetime
+ // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes
+ // when accessing the `ImplicitCtxt`.
+ tls::with_related_context(**self, move |current_icx| {
+ // Update the `ImplicitCtxt` to point to our new query job.
+ let new_icx = ImplicitCtxt {
+ tcx: **self,
+ query: Some(token),
+ diagnostics,
+ layout_depth: current_icx.layout_depth,
+ task_deps: current_icx.task_deps,
+ };
+
+ // Use the `ImplicitCtxt` while we execute the query.
+ tls::enter_context(&new_icx, |_| {
+ rustc_data_structures::stack::ensure_sufficient_stack(compute)
+ })
+ })
+ }
+}
+
+impl<'tcx> QueryCtxt<'tcx> {
+ #[inline]
+ pub fn from_tcx(tcx: TyCtxt<'tcx>) -> Self {
+ let queries = tcx.queries.as_any();
+ let queries = unsafe {
+ let queries = std::mem::transmute::<&dyn Any, &dyn Any>(queries);
+ let queries = queries.downcast_ref().unwrap();
+ let queries = std::mem::transmute::<&Queries<'_>, &Queries<'_>>(queries);
+ queries
+ };
+ QueryCtxt { tcx, queries }
+ }
+
+ pub(crate) fn on_disk_cache(self) -> Option<&'tcx on_disk_cache::OnDiskCache<'tcx>> {
+ self.queries.on_disk_cache.as_ref()
+ }
+
+ pub(super) fn encode_query_results(
+ self,
+ encoder: &mut on_disk_cache::CacheEncoder<'_, 'tcx>,
+ query_result_index: &mut on_disk_cache::EncodedDepNodeIndex,
+ ) {
+ macro_rules! encode_queries {
+ ($($query:ident,)*) => {
+ $(
+ on_disk_cache::encode_query_results::<_, super::queries::$query<'_>>(
+ self,
+ encoder,
+ query_result_index
+ );
+ )*
+ }
+ }
+
+ rustc_cached_queries!(encode_queries!);
+ }
+
+ pub fn try_print_query_stack(
+ self,
+ query: Option<QueryJobId>,
+ handler: &Handler,
+ num_frames: Option<usize>,
+ ) -> usize {
+ rustc_query_system::query::print_query_stack(self, query, handler, num_frames)
+ }
+}
+
+macro_rules! handle_cycle_error {
+ ([][$tcx: expr, $error:expr]) => {{
+ $error.emit();
+ Value::from_cycle_error($tcx)
+ }};
+ ([(fatal_cycle) $($rest:tt)*][$tcx:expr, $error:expr]) => {{
+ $error.emit();
+ $tcx.sess.abort_if_errors();
+ unreachable!()
+ }};
+ ([(cycle_delay_bug) $($rest:tt)*][$tcx:expr, $error:expr]) => {{
+ $error.delay_as_bug();
+ Value::from_cycle_error($tcx)
+ }};
+ ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
+ handle_cycle_error!([$($modifiers)*][$($args)*])
+ };
+}
+
+macro_rules! is_anon {
+ ([]) => {{
+ false
+ }};
+ ([(anon) $($rest:tt)*]) => {{
+ true
+ }};
+ ([$other:tt $($modifiers:tt)*]) => {
+ is_anon!([$($modifiers)*])
+ };
+}
+
+macro_rules! is_eval_always {
+ ([]) => {{
+ false
+ }};
+ ([(eval_always) $($rest:tt)*]) => {{
+ true
+ }};
+ ([$other:tt $($modifiers:tt)*]) => {
+ is_eval_always!([$($modifiers)*])
+ };
+}
+
+macro_rules! hash_result {
+ ([]) => {{
+ Some(dep_graph::hash_result)
+ }};
+ ([(no_hash) $($rest:tt)*]) => {{
+ None
+ }};
+ ([$other:tt $($modifiers:tt)*]) => {
+ hash_result!([$($modifiers)*])
+ };
+}
+
+macro_rules! get_provider {
+ ([][$tcx:expr, $name:ident, $key:expr]) => {{
+ $tcx.queries.local_providers.$name
+ }};
+ ([(separate_provide_extern) $($rest:tt)*][$tcx:expr, $name:ident, $key:expr]) => {{
+ if $key.query_crate_is_local() {
+ $tcx.queries.local_providers.$name
+ } else {
+ $tcx.queries.extern_providers.$name
+ }
+ }};
+ ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
+ get_provider!([$($modifiers)*][$($args)*])
+ };
+}
+
+macro_rules! opt_remap_env_constness {
+ ([][$name:ident]) => {};
+ ([(remap_env_constness) $($rest:tt)*][$name:ident]) => {
+ let $name = $name.without_const();
+ };
+ ([$other:tt $($modifiers:tt)*][$name:ident]) => {
+ opt_remap_env_constness!([$($modifiers)*][$name])
+ };
+}
+
+macro_rules! define_queries {
+ (<$tcx:tt>
+ $($(#[$attr:meta])*
+ [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
+
+ define_queries_struct! {
+ tcx: $tcx,
+ input: ($(([$($modifiers)*] [$($attr)*] [$name]))*)
+ }
+
+ mod make_query {
+ use super::*;
+
+ // Create an eponymous constructor for each query.
+ $(#[allow(nonstandard_style)] $(#[$attr])*
+ pub fn $name<$tcx>(tcx: QueryCtxt<$tcx>, key: query_keys::$name<$tcx>) -> QueryStackFrame {
+ opt_remap_env_constness!([$($modifiers)*][key]);
+ let kind = dep_graph::DepKind::$name;
+ let name = stringify!($name);
+ // Disable visible paths printing for performance reasons.
+ // Showing visible path instead of any path is not that important in production.
+ let description = ty::print::with_no_visible_paths!(
+ // Force filename-line mode to avoid invoking `type_of` query.
+ ty::print::with_forced_impl_filename_line!(
+ queries::$name::describe(tcx, key)
+ )
+ );
+ let description = if tcx.sess.verbose() {
+ format!("{} [{}]", description, name)
+ } else {
+ description
+ };
+ let span = if kind == dep_graph::DepKind::def_span {
+ // The `def_span` query is used to calculate `default_span`,
+ // so exit to avoid infinite recursion.
+ None
+ } else {
+ Some(key.default_span(*tcx))
+ };
+ let def_kind = if kind == dep_graph::DepKind::opt_def_kind {
+ // Try to avoid infinite recursion.
+ None
+ } else {
+ key.key_as_def_id()
+ .and_then(|def_id| def_id.as_local())
+ .and_then(|def_id| tcx.opt_def_kind(def_id))
+ };
+ let hash = || {
+ tcx.with_stable_hashing_context(|mut hcx|{
+ let mut hasher = StableHasher::new();
+ std::mem::discriminant(&kind).hash_stable(&mut hcx, &mut hasher);
+ key.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish::<u64>()
+ })
+ };
+
+ QueryStackFrame::new(name, description, span, def_kind, hash)
+ })*
+ }
+
+ #[allow(nonstandard_style)]
+ mod queries {
+ use std::marker::PhantomData;
+
+ $(pub struct $name<$tcx> {
+ data: PhantomData<&$tcx ()>
+ })*
+ }
+
+ $(impl<$tcx> QueryConfig for queries::$name<$tcx> {
+ type Key = query_keys::$name<$tcx>;
+ type Value = query_values::$name<$tcx>;
+ type Stored = query_stored::$name<$tcx>;
+ const NAME: &'static str = stringify!($name);
+ }
+
+ impl<$tcx> QueryDescription<QueryCtxt<$tcx>> for queries::$name<$tcx> {
+ rustc_query_description! { $name<$tcx> }
+
+ type Cache = query_storage::$name<$tcx>;
+
+ #[inline(always)]
+ fn query_state<'a>(tcx: QueryCtxt<$tcx>) -> &'a QueryState<Self::Key>
+ where QueryCtxt<$tcx>: 'a
+ {
+ &tcx.queries.$name
+ }
+
+ #[inline(always)]
+ fn query_cache<'a>(tcx: QueryCtxt<$tcx>) -> &'a Self::Cache
+ where 'tcx:'a
+ {
+ &tcx.query_caches.$name
+ }
+
+ #[inline]
+ fn make_vtable(tcx: QueryCtxt<'tcx>, key: &Self::Key) ->
+ QueryVTable<QueryCtxt<$tcx>, Self::Key, Self::Value>
+ {
+ let compute = get_provider!([$($modifiers)*][tcx, $name, key]);
+ let cache_on_disk = Self::cache_on_disk(tcx.tcx, key);
+ QueryVTable {
+ anon: is_anon!([$($modifiers)*]),
+ eval_always: is_eval_always!([$($modifiers)*]),
+ dep_kind: dep_graph::DepKind::$name,
+ hash_result: hash_result!([$($modifiers)*]),
+ handle_cycle_error: |tcx, mut error| handle_cycle_error!([$($modifiers)*][tcx, error]),
+ compute,
+ cache_on_disk,
+ try_load_from_disk: Self::TRY_LOAD_FROM_DISK,
+ }
+ }
+ })*
+
+ #[allow(nonstandard_style)]
+ mod query_callbacks {
+ use super::*;
+ use rustc_middle::dep_graph::DepNode;
+ use rustc_middle::ty::query::query_keys;
+ use rustc_query_system::dep_graph::DepNodeParams;
+ use rustc_query_system::query::{force_query, QueryDescription};
+ use rustc_query_system::dep_graph::FingerprintStyle;
+
+ // We use this for most things when incr. comp. is turned off.
+ pub fn Null() -> DepKindStruct {
+ DepKindStruct {
+ is_anon: false,
+ is_eval_always: false,
+ fingerprint_style: FingerprintStyle::Unit,
+ force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)),
+ try_load_from_on_disk_cache: None,
+ }
+ }
+
+ // We use this for the forever-red node.
+ pub fn Red() -> DepKindStruct {
+ DepKindStruct {
+ is_anon: false,
+ is_eval_always: false,
+ fingerprint_style: FingerprintStyle::Unit,
+ force_from_dep_node: Some(|_, dep_node| bug!("force_from_dep_node: encountered {:?}", dep_node)),
+ try_load_from_on_disk_cache: None,
+ }
+ }
+
+ pub fn TraitSelect() -> DepKindStruct {
+ DepKindStruct {
+ is_anon: true,
+ is_eval_always: false,
+ fingerprint_style: FingerprintStyle::Unit,
+ force_from_dep_node: None,
+ try_load_from_on_disk_cache: None,
+ }
+ }
+
+ pub fn CompileCodegenUnit() -> DepKindStruct {
+ DepKindStruct {
+ is_anon: false,
+ is_eval_always: false,
+ fingerprint_style: FingerprintStyle::Opaque,
+ force_from_dep_node: None,
+ try_load_from_on_disk_cache: None,
+ }
+ }
+
+ pub fn CompileMonoItem() -> DepKindStruct {
+ DepKindStruct {
+ is_anon: false,
+ is_eval_always: false,
+ fingerprint_style: FingerprintStyle::Opaque,
+ force_from_dep_node: None,
+ try_load_from_on_disk_cache: None,
+ }
+ }
+
+ $(pub(crate) fn $name()-> DepKindStruct {
+ let is_anon = is_anon!([$($modifiers)*]);
+ let is_eval_always = is_eval_always!([$($modifiers)*]);
+
+ let fingerprint_style =
+ <query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::fingerprint_style();
+
+ if is_anon || !fingerprint_style.reconstructible() {
+ return DepKindStruct {
+ is_anon,
+ is_eval_always,
+ fingerprint_style,
+ force_from_dep_node: None,
+ try_load_from_on_disk_cache: None,
+ }
+ }
+
+ #[inline(always)]
+ fn recover<'tcx>(tcx: TyCtxt<'tcx>, dep_node: DepNode) -> Option<query_keys::$name<'tcx>> {
+ <query_keys::$name<'_> as DepNodeParams<TyCtxt<'_>>>::recover(tcx, &dep_node)
+ }
+
+ fn force_from_dep_node(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool {
+ if let Some(key) = recover(tcx, dep_node) {
+ #[cfg(debug_assertions)]
+ let _guard = tracing::span!(tracing::Level::TRACE, stringify!($name), ?key).entered();
+ let tcx = QueryCtxt::from_tcx(tcx);
+ force_query::<queries::$name<'_>, _>(tcx, key, dep_node);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn try_load_from_on_disk_cache(tcx: TyCtxt<'_>, dep_node: DepNode) {
+ debug_assert!(tcx.dep_graph.is_green(&dep_node));
+
+ let key = recover(tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash));
+ if queries::$name::cache_on_disk(tcx, &key) {
+ let _ = tcx.$name(key);
+ }
+ }
+
+ DepKindStruct {
+ is_anon,
+ is_eval_always,
+ fingerprint_style,
+ force_from_dep_node: Some(force_from_dep_node),
+ try_load_from_on_disk_cache: Some(try_load_from_on_disk_cache),
+ }
+ })*
+ }
+
+ pub fn query_callbacks<'tcx>(arena: &'tcx Arena<'tcx>) -> &'tcx [DepKindStruct] {
+ arena.alloc_from_iter(make_dep_kind_array!(query_callbacks))
+ }
+ }
+}
+
+// FIXME(eddyb) this macro (and others?) use `$tcx` and `'tcx` interchangeably.
+// We should either not take `$tcx` at all and use `'tcx` everywhere, or use
+// `$tcx` everywhere (even if that isn't necessary due to lack of hygiene).
+macro_rules! define_queries_struct {
+ (tcx: $tcx:tt,
+ input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => {
+ pub struct Queries<$tcx> {
+ local_providers: Box<Providers>,
+ extern_providers: Box<ExternProviders>,
+
+ pub on_disk_cache: Option<OnDiskCache<$tcx>>,
+
+ jobs: AtomicU64,
+
+ $($(#[$attr])* $name: QueryState<query_keys::$name<$tcx>>,)*
+ }
+
+ impl<$tcx> Queries<$tcx> {
+ pub fn new(
+ local_providers: Providers,
+ extern_providers: ExternProviders,
+ on_disk_cache: Option<OnDiskCache<$tcx>>,
+ ) -> Self {
+ Queries {
+ local_providers: Box::new(local_providers),
+ extern_providers: Box::new(extern_providers),
+ on_disk_cache,
+ jobs: AtomicU64::new(1),
+ $($name: Default::default()),*
+ }
+ }
+
+ pub(crate) fn try_collect_active_jobs(
+ &$tcx self,
+ tcx: TyCtxt<$tcx>,
+ ) -> Option<QueryMap> {
+ let tcx = QueryCtxt { tcx, queries: self };
+ let mut jobs = QueryMap::default();
+
+ $(
+ self.$name.try_collect_active_jobs(
+ tcx,
+ make_query::$name,
+ &mut jobs,
+ )?;
+ )*
+
+ Some(jobs)
+ }
+ }
+
+ impl<'tcx> QueryEngine<'tcx> for Queries<'tcx> {
+ fn as_any(&'tcx self) -> &'tcx dyn std::any::Any {
+ let this = unsafe { std::mem::transmute::<&Queries<'_>, &Queries<'_>>(self) };
+ this as _
+ }
+
+ fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool {
+ let qcx = QueryCtxt { tcx, queries: self };
+ tcx.dep_graph.try_mark_green(qcx, dep_node).is_some()
+ }
+
+ $($(#[$attr])*
+ #[inline(always)]
+ #[tracing::instrument(level = "trace", skip(self, tcx))]
+ fn $name(
+ &'tcx self,
+ tcx: TyCtxt<$tcx>,
+ span: Span,
+ key: query_keys::$name<$tcx>,
+ mode: QueryMode,
+ ) -> Option<query_stored::$name<$tcx>> {
+ opt_remap_env_constness!([$($modifiers)*][key]);
+ let qcx = QueryCtxt { tcx, queries: self };
+ get_query::<queries::$name<$tcx>, _>(qcx, span, key, mode)
+ })*
+ }
+ };
+}
diff --git a/compiler/rustc_query_impl/src/profiling_support.rs b/compiler/rustc_query_impl/src/profiling_support.rs
new file mode 100644
index 000000000..551f09420
--- /dev/null
+++ b/compiler/rustc_query_impl/src/profiling_support.rs
@@ -0,0 +1,324 @@
+use measureme::{StringComponent, StringId};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::profiling::SelfProfiler;
+use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, LOCAL_CRATE};
+use rustc_hir::definitions::DefPathData;
+use rustc_middle::ty::{TyCtxt, WithOptConstParam};
+use rustc_query_system::query::QueryCache;
+use std::fmt::Debug;
+use std::io::Write;
+
+struct QueryKeyStringCache {
+ def_id_cache: FxHashMap<DefId, StringId>,
+}
+
+impl QueryKeyStringCache {
+ fn new() -> QueryKeyStringCache {
+ QueryKeyStringCache { def_id_cache: Default::default() }
+ }
+}
+
+struct QueryKeyStringBuilder<'p, 'c, 'tcx> {
+ profiler: &'p SelfProfiler,
+ tcx: TyCtxt<'tcx>,
+ string_cache: &'c mut QueryKeyStringCache,
+}
+
+impl<'p, 'c, 'tcx> QueryKeyStringBuilder<'p, 'c, 'tcx> {
+ fn new(
+ profiler: &'p SelfProfiler,
+ tcx: TyCtxt<'tcx>,
+ string_cache: &'c mut QueryKeyStringCache,
+ ) -> QueryKeyStringBuilder<'p, 'c, 'tcx> {
+ QueryKeyStringBuilder { profiler, tcx, string_cache }
+ }
+
+ // The current implementation is rather crude. In the future it might be a
+ // good idea to base this on `ty::print` in order to get nicer and more
+ // efficient query keys.
+ fn def_id_to_string_id(&mut self, def_id: DefId) -> StringId {
+ if let Some(&string_id) = self.string_cache.def_id_cache.get(&def_id) {
+ return string_id;
+ }
+
+ let def_key = self.tcx.def_key(def_id);
+
+ let (parent_string_id, start_index) = match def_key.parent {
+ Some(parent_index) => {
+ let parent_def_id = DefId { index: parent_index, krate: def_id.krate };
+
+ (self.def_id_to_string_id(parent_def_id), 0)
+ }
+ None => (StringId::INVALID, 2),
+ };
+
+ let dis_buffer = &mut [0u8; 16];
+ let crate_name;
+ let other_name;
+ let name;
+ let dis;
+ let end_index;
+
+ match def_key.disambiguated_data.data {
+ DefPathData::CrateRoot => {
+ crate_name = self.tcx.crate_name(def_id.krate);
+ name = crate_name.as_str();
+ dis = "";
+ end_index = 3;
+ }
+ other => {
+ other_name = other.to_string();
+ name = other_name.as_str();
+ if def_key.disambiguated_data.disambiguator == 0 {
+ dis = "";
+ end_index = 3;
+ } else {
+ write!(&mut dis_buffer[..], "[{}]", def_key.disambiguated_data.disambiguator)
+ .unwrap();
+ let end_of_dis = dis_buffer.iter().position(|&c| c == b']').unwrap();
+ dis = std::str::from_utf8(&dis_buffer[..end_of_dis + 1]).unwrap();
+ end_index = 4;
+ }
+ }
+ }
+
+ let components = [
+ StringComponent::Ref(parent_string_id),
+ StringComponent::Value("::"),
+ StringComponent::Value(name),
+ StringComponent::Value(dis),
+ ];
+
+ let string_id = self.profiler.alloc_string(&components[start_index..end_index]);
+
+ self.string_cache.def_id_cache.insert(def_id, string_id);
+
+ string_id
+ }
+}
+
+trait IntoSelfProfilingString {
+ fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId;
+}
+
+// The default implementation of `IntoSelfProfilingString` just uses `Debug`
+// which is slow and causes lots of duplication of string data.
+// The specialized impls below take care of making the `DefId` case more
+// efficient.
+impl<T: Debug> IntoSelfProfilingString for T {
+ default fn to_self_profile_string(
+ &self,
+ builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+ ) -> StringId {
+ let s = format!("{:?}", self);
+ builder.profiler.alloc_string(&s[..])
+ }
+}
+
+impl<T: SpecIntoSelfProfilingString> IntoSelfProfilingString for T {
+ fn to_self_profile_string(&self, builder: &mut QueryKeyStringBuilder<'_, '_, '_>) -> StringId {
+ self.spec_to_self_profile_string(builder)
+ }
+}
+
+#[rustc_specialization_trait]
+trait SpecIntoSelfProfilingString: Debug {
+ fn spec_to_self_profile_string(
+ &self,
+ builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+ ) -> StringId;
+}
+
+impl SpecIntoSelfProfilingString for DefId {
+ fn spec_to_self_profile_string(
+ &self,
+ builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+ ) -> StringId {
+ builder.def_id_to_string_id(*self)
+ }
+}
+
+impl SpecIntoSelfProfilingString for CrateNum {
+ fn spec_to_self_profile_string(
+ &self,
+ builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+ ) -> StringId {
+ builder.def_id_to_string_id(self.as_def_id())
+ }
+}
+
+impl SpecIntoSelfProfilingString for DefIndex {
+ fn spec_to_self_profile_string(
+ &self,
+ builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+ ) -> StringId {
+ builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: *self })
+ }
+}
+
+impl SpecIntoSelfProfilingString for LocalDefId {
+ fn spec_to_self_profile_string(
+ &self,
+ builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+ ) -> StringId {
+ builder.def_id_to_string_id(DefId { krate: LOCAL_CRATE, index: self.local_def_index })
+ }
+}
+
+impl<T: SpecIntoSelfProfilingString> SpecIntoSelfProfilingString for WithOptConstParam<T> {
+ fn spec_to_self_profile_string(
+ &self,
+ builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+ ) -> StringId {
+ // We print `WithOptConstParam` values as tuples to make them shorter
+ // and more readable, without losing information:
+ //
+ // "WithOptConstParam { did: foo::bar, const_param_did: Some(foo::baz) }"
+ // becomes "(foo::bar, foo::baz)" and
+ // "WithOptConstParam { did: foo::bar, const_param_did: None }"
+ // becomes "(foo::bar, _)".
+
+ let did = StringComponent::Ref(self.did.to_self_profile_string(builder));
+
+ let const_param_did = if let Some(const_param_did) = self.const_param_did {
+ let const_param_did = builder.def_id_to_string_id(const_param_did);
+ StringComponent::Ref(const_param_did)
+ } else {
+ StringComponent::Value("_")
+ };
+
+ let components = [
+ StringComponent::Value("("),
+ did,
+ StringComponent::Value(", "),
+ const_param_did,
+ StringComponent::Value(")"),
+ ];
+
+ builder.profiler.alloc_string(&components[..])
+ }
+}
+
+impl<T0, T1> SpecIntoSelfProfilingString for (T0, T1)
+where
+ T0: SpecIntoSelfProfilingString,
+ T1: SpecIntoSelfProfilingString,
+{
+ fn spec_to_self_profile_string(
+ &self,
+ builder: &mut QueryKeyStringBuilder<'_, '_, '_>,
+ ) -> StringId {
+ let val0 = self.0.to_self_profile_string(builder);
+ let val1 = self.1.to_self_profile_string(builder);
+
+ let components = &[
+ StringComponent::Value("("),
+ StringComponent::Ref(val0),
+ StringComponent::Value(","),
+ StringComponent::Ref(val1),
+ StringComponent::Value(")"),
+ ];
+
+ builder.profiler.alloc_string(components)
+ }
+}
+
+/// Allocate the self-profiling query strings for a single query cache. This
+/// method is called from `alloc_self_profile_query_strings` which knows all
+/// the queries via macro magic.
+fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
+ tcx: TyCtxt<'tcx>,
+ query_name: &'static str,
+ query_cache: &C,
+ string_cache: &mut QueryKeyStringCache,
+) where
+ C: QueryCache,
+ C::Key: Debug + Clone,
+{
+ tcx.prof.with_profiler(|profiler| {
+ let event_id_builder = profiler.event_id_builder();
+
+ // Walk the entire query cache and allocate the appropriate
+ // string representations. Each cache entry is uniquely
+ // identified by its dep_node_index.
+ if profiler.query_key_recording_enabled() {
+ let mut query_string_builder = QueryKeyStringBuilder::new(profiler, tcx, string_cache);
+
+ let query_name = profiler.get_or_alloc_cached_string(query_name);
+
+ // Since building the string representation of query keys might
+ // need to invoke queries itself, we cannot keep the query caches
+ // locked while doing so. Instead we copy out the
+ // `(query_key, dep_node_index)` pairs and release the lock again.
+ let mut query_keys_and_indices = Vec::new();
+ query_cache.iter(&mut |k, _, i| query_keys_and_indices.push((k.clone(), i)));
+
+ // Now actually allocate the strings. If allocating the strings
+ // generates new entries in the query cache, we'll miss them but
+ // we don't actually care.
+ for (query_key, dep_node_index) in query_keys_and_indices {
+ // Translate the DepNodeIndex into a QueryInvocationId
+ let query_invocation_id = dep_node_index.into();
+
+ // Create the string version of the query-key
+ let query_key = query_key.to_self_profile_string(&mut query_string_builder);
+ let event_id = event_id_builder.from_label_and_arg(query_name, query_key);
+
+ // Doing this in bulk might be a good idea:
+ profiler.map_query_invocation_id_to_string(
+ query_invocation_id,
+ event_id.to_string_id(),
+ );
+ }
+ } else {
+ // In this branch we don't allocate query keys
+ let query_name = profiler.get_or_alloc_cached_string(query_name);
+ let event_id = event_id_builder.from_label(query_name).to_string_id();
+
+ // FIXME(eddyb) make this O(1) by using a pre-cached query name `EventId`,
+ // instead of passing the `DepNodeIndex` to `finish_with_query_invocation_id`,
+ // when recording the event in the first place.
+ let mut query_invocation_ids = Vec::new();
+ query_cache.iter(&mut |_, _, i| {
+ query_invocation_ids.push(i.into());
+ });
+
+ profiler.bulk_map_query_invocation_id_to_single_string(
+ query_invocation_ids.into_iter(),
+ event_id,
+ );
+ }
+ });
+}
+
+/// All self-profiling events generated by the query engine use
+/// virtual `StringId`s for their `event_id`. This method makes all
+/// those virtual `StringId`s point to actual strings.
+///
+/// If we are recording only summary data, the ids will point to
+/// just the query names. If we are recording query keys too, we
+/// allocate the corresponding strings here.
+pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) {
+ if !tcx.prof.enabled() {
+ return;
+ }
+
+ let mut string_cache = QueryKeyStringCache::new();
+
+ macro_rules! alloc_once {
+ (<$tcx:tt>
+ $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($K:ty) -> $V:ty,)*
+ ) => {
+ $({
+ alloc_self_profile_query_strings_for_query_cache(
+ tcx,
+ stringify!($name),
+ &tcx.query_caches.$name,
+ &mut string_cache,
+ );
+ })*
+ }
+ }
+
+ rustc_query_append! { [alloc_once!][<'tcx>] }
+}
diff --git a/compiler/rustc_query_impl/src/values.rs b/compiler/rustc_query_impl/src/values.rs
new file mode 100644
index 000000000..718a2971c
--- /dev/null
+++ b/compiler/rustc_query_impl/src/values.rs
@@ -0,0 +1,45 @@
+use super::QueryCtxt;
+use rustc_middle::ty::{self, AdtSizedConstraint, Ty};
+
+pub(super) trait Value<'tcx>: Sized {
+ fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self;
+}
+
+impl<'tcx, T> Value<'tcx> for T {
+ default fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> T {
+ tcx.sess.abort_if_errors();
+ bug!("Value::from_cycle_error called without errors");
+ }
+}
+
+impl<'tcx> Value<'tcx> for Ty<'_> {
+ fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
+ // SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
+ // FIXME: Represent the above fact in the trait system somehow.
+ unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) }
+ }
+}
+
+impl<'tcx> Value<'tcx> for ty::SymbolName<'_> {
+ fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
+ // SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
+ // FIXME: Represent the above fact in the trait system somehow.
+ unsafe {
+ std::mem::transmute::<ty::SymbolName<'tcx>, ty::SymbolName<'_>>(ty::SymbolName::new(
+ *tcx, "<error>",
+ ))
+ }
+ }
+}
+
+impl<'tcx> Value<'tcx> for AdtSizedConstraint<'_> {
+ fn from_cycle_error(tcx: QueryCtxt<'tcx>) -> Self {
+ // SAFETY: This is never called when `Self` is not `AdtSizedConstraint<'tcx>`.
+ // FIXME: Represent the above fact in the trait system somehow.
+ unsafe {
+ std::mem::transmute::<AdtSizedConstraint<'tcx>, AdtSizedConstraint<'_>>(
+ AdtSizedConstraint(tcx.intern_type_list(&[tcx.ty_error()])),
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_query_system/Cargo.toml b/compiler/rustc_query_system/Cargo.toml
new file mode 100644
index 000000000..b7787aeb8
--- /dev/null
+++ b/compiler/rustc_query_system/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "rustc_query_system"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_arena = { path = "../rustc_arena" }
+tracing = "0.1"
+rustc-rayon-core = { version = "0.4.0", optional = true }
+rustc_ast = { path = "../rustc_ast" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+parking_lot = "0.11"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+
+[features]
+rustc_use_parallel_compiler = ["rustc-rayon-core"]
diff --git a/compiler/rustc_query_system/src/cache.rs b/compiler/rustc_query_system/src/cache.rs
new file mode 100644
index 000000000..d592812f7
--- /dev/null
+++ b/compiler/rustc_query_system/src/cache.rs
@@ -0,0 +1,53 @@
+//! Cache for candidate selection.
+
+use crate::dep_graph::{DepContext, DepNodeIndex};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lock;
+
+use std::hash::Hash;
+
+#[derive(Clone)]
+pub struct Cache<Key, Value> {
+ hashmap: Lock<FxHashMap<Key, WithDepNode<Value>>>,
+}
+
+impl<Key, Value> Default for Cache<Key, Value> {
+ fn default() -> Self {
+ Self { hashmap: Default::default() }
+ }
+}
+
+impl<Key, Value> Cache<Key, Value> {
+ /// Actually frees the underlying memory in contrast to what stdlib containers do on `clear`
+ pub fn clear(&self) {
+ *self.hashmap.borrow_mut() = Default::default();
+ }
+}
+
+impl<Key: Eq + Hash, Value: Clone> Cache<Key, Value> {
+ pub fn get<CTX: DepContext>(&self, key: &Key, tcx: CTX) -> Option<Value> {
+ Some(self.hashmap.borrow().get(key)?.get(tcx))
+ }
+
+ pub fn insert(&self, key: Key, dep_node: DepNodeIndex, value: Value) {
+ self.hashmap.borrow_mut().insert(key, WithDepNode::new(dep_node, value));
+ }
+}
+
+#[derive(Clone, Eq, PartialEq)]
+pub struct WithDepNode<T> {
+ dep_node: DepNodeIndex,
+ cached_value: T,
+}
+
+impl<T: Clone> WithDepNode<T> {
+ pub fn new(dep_node: DepNodeIndex, cached_value: T) -> Self {
+ WithDepNode { dep_node, cached_value }
+ }
+
+ pub fn get<CTX: DepContext>(&self, tcx: CTX) -> T {
+ tcx.dep_graph().read_index(self.dep_node);
+ self.cached_value.clone()
+ }
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/README.md b/compiler/rustc_query_system/src/dep_graph/README.md
new file mode 100644
index 000000000..b9d91cd35
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/README.md
@@ -0,0 +1,4 @@
+To learn more about how dependency tracking works in rustc, see the [rustc
+guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html
diff --git a/compiler/rustc_query_system/src/dep_graph/debug.rs b/compiler/rustc_query_system/src/dep_graph/debug.rs
new file mode 100644
index 000000000..f9f3169af
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/debug.rs
@@ -0,0 +1,63 @@
+//! Code for debugging the dep-graph.
+
+use super::{DepKind, DepNode, DepNodeIndex};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::Lock;
+use std::error::Error;
+
+/// A dep-node filter goes from a user-defined string to a query over
+/// nodes. Right now the format is like this:
+/// ```ignore (illustrative)
+/// x & y & z
+/// ```
+/// where the format-string of the dep-node must contain `x`, `y`, and
+/// `z`.
+#[derive(Debug)]
+pub struct DepNodeFilter {
+ text: String,
+}
+
+impl DepNodeFilter {
+ pub fn new(text: &str) -> Self {
+ DepNodeFilter { text: text.trim().to_string() }
+ }
+
+ /// Returns `true` if all nodes always pass the filter.
+ pub fn accepts_all(&self) -> bool {
+ self.text.is_empty()
+ }
+
+ /// Tests whether `node` meets the filter, returning true if so.
+ pub fn test<K: DepKind>(&self, node: &DepNode<K>) -> bool {
+ let debug_str = format!("{:?}", node);
+ self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f))
+ }
+}
+
+/// A filter like `F -> G` where `F` and `G` are valid dep-node
+/// filters. This can be used to test the source/target independently.
+pub struct EdgeFilter<K: DepKind> {
+ pub source: DepNodeFilter,
+ pub target: DepNodeFilter,
+ pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode<K>>>,
+}
+
+impl<K: DepKind> EdgeFilter<K> {
+ pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> {
+ let parts: Vec<_> = test.split("->").collect();
+ if parts.len() != 2 {
+ Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into())
+ } else {
+ Ok(EdgeFilter {
+ source: DepNodeFilter::new(parts[0]),
+ target: DepNodeFilter::new(parts[1]),
+ index_to_node: Lock::new(FxHashMap::default()),
+ })
+ }
+ }
+
+ #[cfg(debug_assertions)]
+ pub fn test(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool {
+ self.source.test(source) && self.target.test(target)
+ }
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
new file mode 100644
index 000000000..162c274d8
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -0,0 +1,176 @@
+//! This module defines the `DepNode` type which the compiler uses to represent
+//! nodes in the dependency graph. A `DepNode` consists of a `DepKind` (which
+//! specifies the kind of thing it represents, like a piece of HIR, MIR, etc)
+//! and a `Fingerprint`, a 128 bit hash value the exact meaning of which
+//! depends on the node's `DepKind`. Together, the kind and the fingerprint
+//! fully identify a dependency node, even across multiple compilation sessions.
+//! In other words, the value of the fingerprint does not depend on anything
+//! that is specific to a given compilation session, like an unpredictable
+//! interning key (e.g., NodeId, DefId, Symbol) or the numeric value of a
+//! pointer. The concept behind this could be compared to how git commit hashes
+//! uniquely identify a given commit and has a few advantages:
+//!
+//! * A `DepNode` can simply be serialized to disk and loaded in another session
+//! without the need to do any "rebasing (like we have to do for Spans and
+//! NodeIds) or "retracing" like we had to do for `DefId` in earlier
+//! implementations of the dependency graph.
+//! * A `Fingerprint` is just a bunch of bits, which allows `DepNode` to
+//! implement `Copy`, `Sync`, `Send`, `Freeze`, etc.
+//! * Since we just have a bit pattern, `DepNode` can be mapped from disk into
+//! memory without any post-processing (e.g., "abomination-style" pointer
+//! reconstruction).
+//! * Because a `DepNode` is self-contained, we can instantiate `DepNodes` that
+//! refer to things that do not exist anymore. In previous implementations
+//! `DepNode` contained a `DefId`. A `DepNode` referring to something that
+//! had been removed between the previous and the current compilation session
+//! could not be instantiated because the current compilation session
+//! contained no `DefId` for thing that had been removed.
+//!
+//! `DepNode` definition happens in `rustc_middle` with the `define_dep_nodes!()` macro.
+//! This macro defines the `DepKind` enum and a corresponding `DepConstructor` enum. The
+//! `DepConstructor` enum links a `DepKind` to the parameters that are needed at runtime in order
+//! to construct a valid `DepNode` fingerprint.
+//!
+//! Because the macro sees what parameters a given `DepKind` requires, it can
+//! "infer" some properties for each kind of `DepNode`:
+//!
+//! * Whether a `DepNode` of a given kind has any parameters at all. Some
+//! `DepNode`s could represent global concepts with only one value.
+//! * Whether it is possible, in principle, to reconstruct a query key from a
+//! given `DepNode`. Many `DepKind`s only require a single `DefId` parameter,
+//! in which case it is possible to map the node's fingerprint back to the
+//! `DefId` it was computed from. In other cases, too much information gets
+//! lost during fingerprint computation.
+
+use super::{DepContext, DepKind, FingerprintStyle};
+use crate::ich::StableHashingContext;
+
+use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use std::fmt;
+use std::hash::Hash;
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+pub struct DepNode<K> {
+ pub kind: K,
+ pub hash: PackedFingerprint,
+}
+
+impl<K: DepKind> DepNode<K> {
+ /// Creates a new, parameterless DepNode. This method will assert
+ /// that the DepNode corresponding to the given DepKind actually
+ /// does not require any parameters.
+ pub fn new_no_params<Ctxt>(tcx: Ctxt, kind: K) -> DepNode<K>
+ where
+ Ctxt: super::DepContext<DepKind = K>,
+ {
+ debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit);
+ DepNode { kind, hash: Fingerprint::ZERO.into() }
+ }
+
+ pub fn construct<Ctxt, Key>(tcx: Ctxt, kind: K, arg: &Key) -> DepNode<K>
+ where
+ Ctxt: super::DepContext<DepKind = K>,
+ Key: DepNodeParams<Ctxt>,
+ {
+ let hash = arg.to_fingerprint(tcx);
+ let dep_node = DepNode { kind, hash: hash.into() };
+
+ #[cfg(debug_assertions)]
+ {
+ if !tcx.fingerprint_style(kind).reconstructible()
+ && (tcx.sess().opts.unstable_opts.incremental_info
+ || tcx.sess().opts.unstable_opts.query_dep_graph)
+ {
+ tcx.dep_graph().register_dep_node_debug_str(dep_node, || arg.to_debug_str(tcx));
+ }
+ }
+
+ dep_node
+ }
+}
+
+impl<K: DepKind> fmt::Debug for DepNode<K> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ K::debug_node(self, f)
+ }
+}
+
+pub trait DepNodeParams<Ctxt: DepContext>: fmt::Debug + Sized {
+ fn fingerprint_style() -> FingerprintStyle;
+
+ /// This method turns the parameters of a DepNodeConstructor into an opaque
+ /// Fingerprint to be used in DepNode.
+ /// Not all DepNodeParams support being turned into a Fingerprint (they
+ /// don't need to if the corresponding DepNode is anonymous).
+ fn to_fingerprint(&self, _: Ctxt) -> Fingerprint {
+ panic!("Not implemented. Accidentally called on anonymous node?")
+ }
+
+ fn to_debug_str(&self, _: Ctxt) -> String {
+ format!("{:?}", self)
+ }
+
+ /// This method tries to recover the query key from the given `DepNode`,
+ /// something which is needed when forcing `DepNode`s during red-green
+ /// evaluation. The query system will only call this method if
+ /// `fingerprint_style()` is not `FingerprintStyle::Opaque`.
+ /// It is always valid to return `None` here, in which case incremental
+ /// compilation will treat the query as having changed instead of forcing it.
+ fn recover(tcx: Ctxt, dep_node: &DepNode<Ctxt::DepKind>) -> Option<Self>;
+}
+
+impl<Ctxt: DepContext, T> DepNodeParams<Ctxt> for T
+where
+ T: for<'a> HashStable<StableHashingContext<'a>> + fmt::Debug,
+{
+ #[inline(always)]
+ default fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::Opaque
+ }
+
+ #[inline(always)]
+ default fn to_fingerprint(&self, tcx: Ctxt) -> Fingerprint {
+ tcx.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+ self.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish()
+ })
+ }
+
+ #[inline(always)]
+ default fn to_debug_str(&self, _: Ctxt) -> String {
+ format!("{:?}", *self)
+ }
+
+ #[inline(always)]
+ default fn recover(_: Ctxt, _: &DepNode<Ctxt::DepKind>) -> Option<Self> {
+ None
+ }
+}
+
+/// A "work product" corresponds to a `.o` (or other) file that we
+/// save in between runs. These IDs do not have a `DefId` but rather
+/// some independent path or string that persists between runs without
+/// the need to be mapped or unmapped. (This ensures we can serialize
+/// them even in the absence of a tcx.)
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[derive(Encodable, Decodable)]
+pub struct WorkProductId {
+ hash: Fingerprint,
+}
+
+impl WorkProductId {
+ pub fn from_cgu_name(cgu_name: &str) -> WorkProductId {
+ let mut hasher = StableHasher::new();
+ cgu_name.hash(&mut hasher);
+ WorkProductId { hash: hasher.finish() }
+ }
+}
+
+impl<HCX> HashStable<HCX> for WorkProductId {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) {
+ self.hash.hash_stable(hcx, hasher)
+ }
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
new file mode 100644
index 000000000..8ff561327
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -0,0 +1,1288 @@
+use parking_lot::Mutex;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
+use rustc_data_structures::sharded::{self, Sharded};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::steal::Steal;
+use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
+use rustc_index::vec::IndexVec;
+use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
+use smallvec::{smallvec, SmallVec};
+use std::assert_matches::assert_matches;
+use std::collections::hash_map::Entry;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::marker::PhantomData;
+use std::sync::atomic::Ordering::Relaxed;
+
+use super::query::DepGraphQuery;
+use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
+use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId};
+use crate::ich::StableHashingContext;
+use crate::query::{QueryContext, QuerySideEffects};
+
+#[cfg(debug_assertions)]
+use {super::debug::EdgeFilter, std::env};
+
+#[derive(Clone)]
+pub struct DepGraph<K: DepKind> {
+ data: Option<Lrc<DepGraphData<K>>>,
+
+ /// This field is used for assigning DepNodeIndices when running in
+ /// non-incremental mode. Even in non-incremental mode we make sure that
+ /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
+ /// ID is used for self-profiling.
+ virtual_dep_node_index: Lrc<AtomicU32>,
+}
+
+rustc_index::newtype_index! {
+ pub struct DepNodeIndex { .. }
+}
+
+impl DepNodeIndex {
+ pub const INVALID: DepNodeIndex = DepNodeIndex::MAX;
+ pub const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::from_u32(0);
+ pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
+}
+
+impl std::convert::From<DepNodeIndex> for QueryInvocationId {
+ #[inline]
+ fn from(dep_node_index: DepNodeIndex) -> Self {
+ QueryInvocationId(dep_node_index.as_u32())
+ }
+}
+
+#[derive(PartialEq)]
+pub enum DepNodeColor {
+ Red,
+ Green(DepNodeIndex),
+}
+
+impl DepNodeColor {
+ #[inline]
+ pub fn is_green(self) -> bool {
+ match self {
+ DepNodeColor::Red => false,
+ DepNodeColor::Green(_) => true,
+ }
+ }
+}
+
+struct DepGraphData<K: DepKind> {
+ /// The new encoding of the dependency graph, optimized for red/green
+ /// tracking. The `current` field is the dependency graph of only the
+ /// current compilation session: We don't merge the previous dep-graph into
+ /// current one anymore, but we do reference shared data to save space.
+ current: CurrentDepGraph<K>,
+
+ /// The dep-graph from the previous compilation session. It contains all
+ /// nodes and edges as well as all fingerprints of nodes that have them.
+ previous: SerializedDepGraph<K>,
+
+ colors: DepNodeColorMap,
+
+ processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>,
+
+ /// When we load, there may be `.o` files, cached MIR, or other such
+ /// things available to us. If we find that they are not dirty, we
+ /// load the path to the file storing those work-products here into
+ /// this map. We can later look for and extract that data.
+ previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
+
+ dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
+
+ /// Used by incremental compilation tests to assert that
+ /// a particular query result was decoded from disk
+ /// (not just marked green)
+ debug_loaded_from_disk: Lock<FxHashSet<DepNode<K>>>,
+}
+
+pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
+where
+ R: for<'a> HashStable<StableHashingContext<'a>>,
+{
+ let mut stable_hasher = StableHasher::new();
+ result.hash_stable(hcx, &mut stable_hasher);
+ stable_hasher.finish()
+}
+
+impl<K: DepKind> DepGraph<K> {
+ pub fn new(
+ profiler: &SelfProfilerRef,
+ prev_graph: SerializedDepGraph<K>,
+ prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
+ encoder: FileEncoder,
+ record_graph: bool,
+ record_stats: bool,
+ ) -> DepGraph<K> {
+ let prev_graph_node_count = prev_graph.node_count();
+
+ let current = CurrentDepGraph::new(
+ profiler,
+ prev_graph_node_count,
+ encoder,
+ record_graph,
+ record_stats,
+ );
+
+ let colors = DepNodeColorMap::new(prev_graph_node_count);
+
+ // Instantiate a dependy-less node only once for anonymous queries.
+ let _green_node_index = current.intern_new_node(
+ profiler,
+ DepNode { kind: DepKind::NULL, hash: current.anon_id_seed.into() },
+ smallvec![],
+ Fingerprint::ZERO,
+ );
+ assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
+
+ // Instantiate a dependy-less red node only once for anonymous queries.
+ let (_red_node_index, _prev_and_index) = current.intern_node(
+ profiler,
+ &prev_graph,
+ DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() },
+ smallvec![],
+ None,
+ false,
+ );
+ assert_eq!(_red_node_index, DepNodeIndex::FOREVER_RED_NODE);
+ assert!(matches!(_prev_and_index, None | Some((_, DepNodeColor::Red))));
+
+ DepGraph {
+ data: Some(Lrc::new(DepGraphData {
+ previous_work_products: prev_work_products,
+ dep_node_debug: Default::default(),
+ current,
+ processed_side_effects: Default::default(),
+ previous: prev_graph,
+ colors,
+ debug_loaded_from_disk: Default::default(),
+ })),
+ virtual_dep_node_index: Lrc::new(AtomicU32::new(0)),
+ }
+ }
+
+ pub fn new_disabled() -> DepGraph<K> {
+ DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
+ }
+
+ /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
+ #[inline]
+ pub fn is_fully_enabled(&self) -> bool {
+ self.data.is_some()
+ }
+
+ pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
+ if let Some(data) = &self.data {
+ data.current.encoder.borrow().with_query(f)
+ }
+ }
+
+ pub fn assert_ignored(&self) {
+ if let Some(..) = self.data {
+ K::read_deps(|task_deps| {
+ assert_matches!(
+ task_deps,
+ TaskDepsRef::Ignore,
+ "expected no task dependency tracking"
+ );
+ })
+ }
+ }
+
+ pub fn with_ignore<OP, R>(&self, op: OP) -> R
+ where
+ OP: FnOnce() -> R,
+ {
+ K::with_deps(TaskDepsRef::Ignore, op)
+ }
+
+ /// Used to wrap the deserialization of a query result from disk,
+ /// This method enforces that no new `DepNodes` are created during
+ /// query result deserialization.
+ ///
+ /// Enforcing this makes the query dep graph simpler - all nodes
+ /// must be created during the query execution, and should be
+ /// created from inside the 'body' of a query (the implementation
+ /// provided by a particular compiler crate).
+ ///
+ /// Consider the case of three queries `A`, `B`, and `C`, where
+ /// `A` invokes `B` and `B` invokes `C`:
+ ///
+ /// `A -> B -> C`
+ ///
+ /// Suppose that decoding the result of query `B` required re-computing
+ /// the query `C`. If we did not create a fresh `TaskDeps` when
+ /// decoding `B`, we would still be using the `TaskDeps` for query `A`
+ /// (if we needed to re-execute `A`). This would cause us to create
+ /// a new edge `A -> C`. If this edge did not previously
+ /// exist in the `DepGraph`, then we could end up with a different
+ /// `DepGraph` at the end of compilation, even if there were no
+ /// meaningful changes to the overall program (e.g. a newline was added).
+ /// In addition, this edge might cause a subsequent compilation run
+ /// to try to force `C` before marking other necessary nodes green. If
+ /// `C` did not exist in the new compilation session, then we could
+ /// get an ICE. Normally, we would have tried (and failed) to mark
+ /// some other query green (e.g. `item_children`) which was used
+ /// to obtain `C`, which would prevent us from ever trying to force
+ /// a non-existent `D`.
+ ///
+ /// It might be possible to enforce that all `DepNode`s read during
+ /// deserialization already exist in the previous `DepGraph`. In
+ /// the above example, we would invoke `D` during the deserialization
+ /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
+ /// of `B`, this would result in an edge `B -> D`. If that edge already
+ /// existed (with the same `DepPathHash`es), then it should be correct
+ /// to allow the invocation of the query to proceed during deserialization
+ /// of a query result. We would merely assert that the dep-graph fragment
+ /// that would have been added by invoking `C` while decoding `B`
+ /// is equivalent to the dep-graph fragment that we already instantiated for B
+ /// (at the point where we successfully marked B as green).
+ ///
+ /// However, this would require additional complexity
+ /// in the query infrastructure, and is not currently needed by the
+ /// decoding of any query results. Should the need arise in the future,
+ /// we should consider extending the query system with this functionality.
+ pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
+ where
+ OP: FnOnce() -> R,
+ {
+ K::with_deps(TaskDepsRef::Forbid, op)
+ }
+
+ /// Starts a new dep-graph task. Dep-graph tasks are specified
+ /// using a free function (`task`) and **not** a closure -- this
+ /// is intentional because we want to exercise tight control over
+ /// what state they have access to. In particular, we want to
+ /// prevent implicit 'leaks' of tracked state into the task (which
+ /// could then be read without generating correct edges in the
+ /// dep-graph -- see the [rustc dev guide] for more details on
+ /// the dep-graph). To this end, the task function gets exactly two
+ /// pieces of state: the context `cx` and an argument `arg`. Both
+ /// of these bits of state must be of some type that implements
+ /// `DepGraphSafe` and hence does not leak.
+ ///
+ /// The choice of two arguments is not fundamental. One argument
+ /// would work just as well, since multiple values can be
+ /// collected using tuples. However, using two arguments works out
+ /// to be quite convenient, since it is common to need a context
+ /// (`cx`) and some argument (e.g., a `DefId` identifying what
+ /// item to process).
+ ///
+ /// For cases where you need some other number of arguments:
+ ///
+ /// - If you only need one argument, just use `()` for the `arg`
+ /// parameter.
+ /// - If you need 3+ arguments, use a tuple for the
+ /// `arg` parameter.
+ ///
+ /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/incremental-compilation.html
+ pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+ &self,
+ key: DepNode<K>,
+ cx: Ctxt,
+ arg: A,
+ task: fn(Ctxt, A) -> R,
+ hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
+ ) -> (R, DepNodeIndex) {
+ if self.is_fully_enabled() {
+ self.with_task_impl(key, cx, arg, task, hash_result)
+ } else {
+ // Incremental compilation is turned off. We just execute the task
+ // without tracking. We still provide a dep-node index that uniquely
+ // identifies the task so that we have a cheap way of referring to
+ // the query for self-profiling.
+ (task(cx, arg), self.next_virtual_depnode_index())
+ }
+ }
+
+ fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+ &self,
+ key: DepNode<K>,
+ cx: Ctxt,
+ arg: A,
+ task: fn(Ctxt, A) -> R,
+ hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
+ ) -> (R, DepNodeIndex) {
+ // This function is only called when the graph is enabled.
+ let data = self.data.as_ref().unwrap();
+
+ // If the following assertion triggers, it can have two reasons:
+ // 1. Something is wrong with DepNode creation, either here or
+ // in `DepGraph::try_mark_green()`.
+ // 2. Two distinct query keys get mapped to the same `DepNode`
+ // (see for example #48923).
+ assert!(
+ !self.dep_node_exists(&key),
+ "forcing query with already existing `DepNode`\n\
+ - query-key: {:?}\n\
+ - dep-node: {:?}",
+ arg,
+ key
+ );
+
+ let task_deps = if cx.dep_context().is_eval_always(key.kind) {
+ None
+ } else {
+ Some(Lock::new(TaskDeps {
+ #[cfg(debug_assertions)]
+ node: Some(key),
+ reads: SmallVec::new(),
+ read_set: Default::default(),
+ phantom_data: PhantomData,
+ }))
+ };
+
+ let task_deps_ref = match &task_deps {
+ Some(deps) => TaskDepsRef::Allow(deps),
+ None => TaskDepsRef::Ignore,
+ };
+
+ let result = K::with_deps(task_deps_ref, || task(cx, arg));
+ let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
+
+ let dcx = cx.dep_context();
+ let hashing_timer = dcx.profiler().incr_result_hashing();
+ let current_fingerprint =
+ hash_result.map(|f| dcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, &result)));
+
+ let print_status = cfg!(debug_assertions) && dcx.sess().opts.unstable_opts.dep_tasks;
+
+ // Intern the new `DepNode`.
+ let (dep_node_index, prev_and_color) = data.current.intern_node(
+ dcx.profiler(),
+ &data.previous,
+ key,
+ edges,
+ current_fingerprint,
+ print_status,
+ );
+
+ hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ if let Some((prev_index, color)) = prev_and_color {
+ debug_assert!(
+ data.colors.get(prev_index).is_none(),
+ "DepGraph::with_task() - Duplicate DepNodeColor \
+ insertion for {:?}",
+ key
+ );
+
+ data.colors.insert(prev_index, color);
+ }
+
+ (result, dep_node_index)
+ }
+
+ /// Executes something within an "anonymous" task, that is, a task the
+ /// `DepNode` of which is determined by the list of inputs it read from.
+ pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
+ &self,
+ cx: Ctxt,
+ dep_kind: K,
+ op: OP,
+ ) -> (R, DepNodeIndex)
+ where
+ OP: FnOnce() -> R,
+ {
+ debug_assert!(!cx.is_eval_always(dep_kind));
+
+ if let Some(ref data) = self.data {
+ let task_deps = Lock::new(TaskDeps::default());
+ let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
+ let task_deps = task_deps.into_inner();
+ let task_deps = task_deps.reads;
+
+ let dep_node_index = match task_deps.len() {
+ 0 => {
+ // Because the dep-node id of anon nodes is computed from the sets of its
+ // dependencies we already know what the ID of this dependency-less node is
+ // going to be (i.e. equal to the precomputed
+ // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
+ // a `StableHasher` and sending the node through interning.
+ DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
+ }
+ 1 => {
+ // When there is only one dependency, don't bother creating a node.
+ task_deps[0]
+ }
+ _ => {
+ // The dep node indices are hashed here instead of hashing the dep nodes of the
+ // dependencies. These indices may refer to different nodes per session, but this isn't
+ // a problem here because we that ensure the final dep node hash is per session only by
+ // combining it with the per session random number `anon_id_seed`. This hash only need
+ // to map the dependencies to a single value on a per session basis.
+ let mut hasher = StableHasher::new();
+ task_deps.hash(&mut hasher);
+
+ let target_dep_node = DepNode {
+ kind: dep_kind,
+ // Fingerprint::combine() is faster than sending Fingerprint
+ // through the StableHasher (at least as long as StableHasher
+ // is so slow).
+ hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
+ };
+
+ data.current.intern_new_node(
+ cx.profiler(),
+ target_dep_node,
+ task_deps,
+ Fingerprint::ZERO,
+ )
+ }
+ };
+
+ (result, dep_node_index)
+ } else {
+ (op(), self.next_virtual_depnode_index())
+ }
+ }
+
+ #[inline]
+ pub fn read_index(&self, dep_node_index: DepNodeIndex) {
+ if let Some(ref data) = self.data {
+ K::read_deps(|task_deps| {
+ let mut task_deps = match task_deps {
+ TaskDepsRef::Allow(deps) => deps.lock(),
+ TaskDepsRef::Ignore => return,
+ TaskDepsRef::Forbid => {
+ panic!("Illegal read of: {:?}", dep_node_index)
+ }
+ };
+ let task_deps = &mut *task_deps;
+
+ if cfg!(debug_assertions) {
+ data.current.total_read_count.fetch_add(1, Relaxed);
+ }
+
+ // As long as we only have a low number of reads we can avoid doing a hash
+ // insert and potentially allocating/reallocating the hashmap
+ let new_read = if task_deps.reads.len() < TASK_DEPS_READS_CAP {
+ task_deps.reads.iter().all(|other| *other != dep_node_index)
+ } else {
+ task_deps.read_set.insert(dep_node_index)
+ };
+ if new_read {
+ task_deps.reads.push(dep_node_index);
+ if task_deps.reads.len() == TASK_DEPS_READS_CAP {
+ // Fill `read_set` with what we have so far so we can use the hashset
+ // next time
+ task_deps.read_set.extend(task_deps.reads.iter().copied());
+ }
+
+ #[cfg(debug_assertions)]
+ {
+ if let Some(target) = task_deps.node {
+ if let Some(ref forbidden_edge) = data.current.forbidden_edge {
+ let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
+ if forbidden_edge.test(&src, &target) {
+ panic!("forbidden edge {:?} -> {:?} created", src, target)
+ }
+ }
+ }
+ }
+ } else if cfg!(debug_assertions) {
+ data.current.total_duplicate_read_count.fetch_add(1, Relaxed);
+ }
+ })
+ }
+ }
+
+ #[inline]
+ pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
+ self.dep_node_index_of_opt(dep_node).unwrap()
+ }
+
+ #[inline]
+ pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
+ let data = self.data.as_ref().unwrap();
+ let current = &data.current;
+
+ if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
+ current.prev_index_to_index.lock()[prev_index]
+ } else {
+ current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
+ }
+ }
+
+ #[inline]
+ pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+ self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
+ }
+
+ pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
+ self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
+ }
+
+ /// Checks whether a previous work product exists for `v` and, if
+ /// so, return the path that leads to it. Used to skip doing work.
+ pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
+ self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
+ }
+
+ /// Access the map of work-products created during the cached run. Only
+ /// used during saving of the dep-graph.
+ pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
+ &self.data.as_ref().unwrap().previous_work_products
+ }
+
+ pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
+ self.data.as_ref().unwrap().debug_loaded_from_disk.lock().insert(dep_node);
+ }
+
+ pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
+ self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
+ }
+
+ #[inline(always)]
+ pub fn register_dep_node_debug_str<F>(&self, dep_node: DepNode<K>, debug_str_gen: F)
+ where
+ F: FnOnce() -> String,
+ {
+ let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
+
+ if dep_node_debug.borrow().contains_key(&dep_node) {
+ return;
+ }
+ let debug_str = debug_str_gen();
+ dep_node_debug.borrow_mut().insert(dep_node, debug_str);
+ }
+
+ pub fn dep_node_debug_str(&self, dep_node: DepNode<K>) -> Option<String> {
+ self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
+ }
+
+ fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+ if let Some(ref data) = self.data {
+ if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
+ return data.colors.get(prev_index);
+ } else {
+ // This is a node that did not exist in the previous compilation session.
+ return None;
+ }
+ }
+
+ None
+ }
+
+ /// Try to mark a node index for the node dep_node.
+ ///
+ /// A node will have an index, when it's already been marked green, or when we can mark it
+ /// green. This function will mark the current task as a reader of the specified node, when
+ /// a node index can be found for that node.
+ pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
+ &self,
+ tcx: Ctxt,
+ dep_node: &DepNode<K>,
+ ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
+ debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
+
+ // Return None if the dep graph is disabled
+ let data = self.data.as_ref()?;
+
+ // Return None if the dep node didn't exist in the previous session
+ let prev_index = data.previous.node_to_index_opt(dep_node)?;
+
+ match data.colors.get(prev_index) {
+ Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
+ Some(DepNodeColor::Red) => None,
+ None => {
+ // This DepNode and the corresponding query invocation existed
+ // in the previous compilation session too, so we can try to
+ // mark it as green by recursively marking all of its
+ // dependencies green.
+ self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
+ .map(|dep_node_index| (prev_index, dep_node_index))
+ }
+ }
+ }
+
+ fn try_mark_parent_green<Ctxt: QueryContext<DepKind = K>>(
+ &self,
+ tcx: Ctxt,
+ data: &DepGraphData<K>,
+ parent_dep_node_index: SerializedDepNodeIndex,
+ dep_node: &DepNode<K>,
+ ) -> Option<()> {
+ let dep_dep_node_color = data.colors.get(parent_dep_node_index);
+ let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index);
+
+ match dep_dep_node_color {
+ Some(DepNodeColor::Green(_)) => {
+ // This dependency has been marked as green before, we are
+ // still fine and can continue with checking the other
+ // dependencies.
+ debug!(
+ "try_mark_previous_green({:?}) --- found dependency {:?} to \
+ be immediately green",
+ dep_node, dep_dep_node,
+ );
+ return Some(());
+ }
+ Some(DepNodeColor::Red) => {
+ // We found a dependency the value of which has changed
+ // compared to the previous compilation session. We cannot
+ // mark the DepNode as green and also don't need to bother
+ // with checking any of the other dependencies.
+ debug!(
+ "try_mark_previous_green({:?}) - END - dependency {:?} was immediately red",
+ dep_node, dep_dep_node,
+ );
+ return None;
+ }
+ None => {}
+ }
+
+ // We don't know the state of this dependency. If it isn't
+ // an eval_always node, let's try to mark it green recursively.
+ if !tcx.dep_context().is_eval_always(dep_dep_node.kind) {
+ debug!(
+ "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
+ is unknown, trying to mark it green",
+ dep_node, dep_dep_node, dep_dep_node.hash,
+ );
+
+ let node_index =
+ self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node);
+ if node_index.is_some() {
+ debug!(
+ "try_mark_previous_green({:?}) --- managed to MARK dependency {:?} as green",
+ dep_node, dep_dep_node
+ );
+ return Some(());
+ }
+ }
+
+ // We failed to mark it green, so we try to force the query.
+ debug!(
+ "try_mark_previous_green({:?}) --- trying to force dependency {:?}",
+ dep_node, dep_dep_node
+ );
+ if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
+ // The DepNode could not be forced.
+ debug!(
+ "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
+ dep_node, dep_dep_node
+ );
+ return None;
+ }
+
+ let dep_dep_node_color = data.colors.get(parent_dep_node_index);
+
+ match dep_dep_node_color {
+ Some(DepNodeColor::Green(_)) => {
+ debug!(
+ "try_mark_previous_green({:?}) --- managed to FORCE dependency {:?} to green",
+ dep_node, dep_dep_node
+ );
+ return Some(());
+ }
+ Some(DepNodeColor::Red) => {
+ debug!(
+ "try_mark_previous_green({:?}) - END - dependency {:?} was red after forcing",
+ dep_node, dep_dep_node
+ );
+ return None;
+ }
+ None => {}
+ }
+
+ if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
+ panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
+ }
+
+ // If the query we just forced has resulted in
+ // some kind of compilation error, we cannot rely on
+ // the dep-node color having been properly updated.
+ // This means that the query system has reached an
+ // invalid state. We let the compiler continue (by
+ // returning `None`) so it can emit error messages
+ // and wind down, but rely on the fact that this
+ // invalid state will not be persisted to the
+ // incremental compilation cache because of
+ // compilation errors being present.
+ debug!(
+ "try_mark_previous_green({:?}) - END - dependency {:?} resulted in compilation error",
+ dep_node, dep_dep_node
+ );
+ return None;
+ }
+
+ /// Try to mark a dep-node which existed in the previous compilation session as green.
+ fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
+ &self,
+ tcx: Ctxt,
+ data: &DepGraphData<K>,
+ prev_dep_node_index: SerializedDepNodeIndex,
+ dep_node: &DepNode<K>,
+ ) -> Option<DepNodeIndex> {
+ debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
+
+ #[cfg(not(parallel_compiler))]
+ {
+ debug_assert!(!self.dep_node_exists(dep_node));
+ debug_assert!(data.colors.get(prev_dep_node_index).is_none());
+ }
+
+ // We never try to mark eval_always nodes as green
+ debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
+
+ debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
+
+ let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
+
+ for &dep_dep_node_index in prev_deps {
+ self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)?
+ }
+
+ // If we got here without hitting a `return` that means that all
+ // dependencies of this DepNode could be marked as green. Therefore we
+ // can also mark this DepNode as green.
+
+ // There may be multiple threads trying to mark the same dep node green concurrently
+
+ // We allocating an entry for the node in the current dependency graph and
+ // adding all the appropriate edges imported from the previous graph
+ let dep_node_index = data.current.promote_node_and_deps_to_current(
+ tcx.dep_context().profiler(),
+ &data.previous,
+ prev_dep_node_index,
+ );
+
+ // ... emitting any stored diagnostic ...
+
+ // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
+ // Maybe store a list on disk and encode this fact in the DepNodeState
+ let side_effects = tcx.load_side_effects(prev_dep_node_index);
+
+ #[cfg(not(parallel_compiler))]
+ debug_assert!(
+ data.colors.get(prev_dep_node_index).is_none(),
+ "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
+ insertion for {:?}",
+ dep_node
+ );
+
+ if !side_effects.is_empty() {
+ self.emit_side_effects(tcx, data, dep_node_index, side_effects);
+ }
+
+ // ... and finally storing a "Green" entry in the color map.
+ // Multiple threads can all write the same color here
+ data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
+
+ debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
+ Some(dep_node_index)
+ }
+
+ /// Atomically emits some loaded diagnostics.
+ /// This may be called concurrently on multiple threads for the same dep node.
+ #[cold]
+ #[inline(never)]
+ fn emit_side_effects<Ctxt: QueryContext<DepKind = K>>(
+ &self,
+ tcx: Ctxt,
+ data: &DepGraphData<K>,
+ dep_node_index: DepNodeIndex,
+ side_effects: QuerySideEffects,
+ ) {
+ let mut processed = data.processed_side_effects.lock();
+
+ if processed.insert(dep_node_index) {
+ // We were the first to insert the node in the set so this thread
+ // must process side effects
+
+ // Promote the previous diagnostics to the current session.
+ tcx.store_side_effects(dep_node_index, side_effects.clone());
+
+ let handle = tcx.dep_context().sess().diagnostic();
+
+ for mut diagnostic in side_effects.diagnostics {
+ handle.emit_diagnostic(&mut diagnostic);
+ }
+ }
+ }
+
+ // Returns true if the given node has been marked as red during the
+ // current compilation session. Used in various assertions
+ pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
+ self.node_color(dep_node) == Some(DepNodeColor::Red)
+ }
+
+ // Returns true if the given node has been marked as green during the
+ // current compilation session. Used in various assertions
+ pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
+ self.node_color(dep_node).map_or(false, |c| c.is_green())
+ }
+
+ // This method loads all on-disk cacheable query results into memory, so
+ // they can be written out to the new cache file again. Most query results
+ // will already be in memory but in the case where we marked something as
+ // green but then did not need the value, that value will never have been
+ // loaded from disk.
+ //
+ // This method will only load queries that will end up in the disk cache.
+ // Other queries will not be executed.
+ pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
+ let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
+
+ let data = self.data.as_ref().unwrap();
+ for prev_index in data.colors.values.indices() {
+ match data.colors.get(prev_index) {
+ Some(DepNodeColor::Green(_)) => {
+ let dep_node = data.previous.index_to_node(prev_index);
+ tcx.try_load_from_on_disk_cache(dep_node);
+ }
+ None | Some(DepNodeColor::Red) => {
+ // We can skip red nodes because a node can only be marked
+ // as red if the query result was recomputed and thus is
+ // already in memory.
+ }
+ }
+ }
+ }
+
+ pub fn print_incremental_info(&self) {
+ if let Some(data) = &self.data {
+ data.current.encoder.borrow().print_incremental_info(
+ data.current.total_read_count.load(Relaxed),
+ data.current.total_duplicate_read_count.load(Relaxed),
+ )
+ }
+ }
+
+ pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult {
+ if let Some(data) = &self.data {
+ data.current.encoder.steal().finish(profiler)
+ } else {
+ Ok(0)
+ }
+ }
+
+ pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
+ let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
+ DepNodeIndex::from_u32(index)
+ }
+}
+
+/// A "work product" is an intermediate result that we save into the
+/// incremental directory for later re-use. The primary example are
+/// the object files that we save for each partition at code
+/// generation time.
+///
+/// Each work product is associated with a dep-node, representing the
+/// process that produced the work-product. If that dep-node is found
+/// to be dirty when we load up, then we will delete the work-product
+/// at load time. If the work-product is found to be clean, then we
+/// will keep a record in the `previous_work_products` list.
+///
+/// In addition, work products have an associated hash. This hash is
+/// an extra hash that can be used to decide if the work-product from
+/// a previous compilation can be re-used (in addition to the dirty
+/// edges check).
+///
+/// As the primary example, consider the object files we generate for
+/// each partition. In the first run, we create partitions based on
+/// the symbols that need to be compiled. For each partition P, we
+/// hash the symbols in P and create a `WorkProduct` record associated
+/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
+/// in P.
+///
+/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
+/// judged to be clean (which means none of the things we read to
+/// generate the partition were found to be dirty), it will be loaded
+/// into previous work products. We will then regenerate the set of
+/// symbols in the partition P and hash them (note that new symbols
+/// may be added -- for example, new monomorphizations -- even if
+/// nothing in P changed!). We will compare that hash against the
+/// previous hash. If it matches up, we can reuse the object file.
+#[derive(Clone, Debug, Encodable, Decodable)]
+pub struct WorkProduct {
+ pub cgu_name: String,
+ /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
+ /// saved file and the key is some identifier for the type of file being saved.
+ ///
+ /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
+ /// the object file's path, and "dwo" to the dwarf object file's path.
+ pub saved_files: FxHashMap<String, String>,
+}
+
+// Index type for `DepNodeData`'s edges.
+rustc_index::newtype_index! {
+ struct EdgeIndex { .. }
+}
+
+/// `CurrentDepGraph` stores the dependency graph for the current session. It
+/// will be populated as we run queries or tasks. We never remove nodes from the
+/// graph: they are only added.
+///
+/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
+/// in memory. This is important, because these graph structures are some of the
+/// largest in the compiler.
+///
+/// For this reason, we avoid storing `DepNode`s more than once as map
+/// keys. The `new_node_to_index` map only contains nodes not in the previous
+/// graph, and we map nodes in the previous graph to indices via a two-step
+/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
+/// and the `prev_index_to_index` vector (which is more compact and faster than
+/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
+///
+/// This struct uses three locks internally. The `data`, `new_node_to_index`,
+/// and `prev_index_to_index` fields are locked separately. Operations that take
+/// a `DepNodeIndex` typically just access the `data` field.
+///
+/// We only need to manipulate at most two locks simultaneously:
+/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
+/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
+/// first, and `data` second.
+pub(super) struct CurrentDepGraph<K: DepKind> {
+ encoder: Steal<GraphEncoder<K>>,
+ new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
+ prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
+
+ /// Used to trap when a specific edge is added to the graph.
+ /// This is used for debug purposes and is only active with `debug_assertions`.
+ #[cfg(debug_assertions)]
+ forbidden_edge: Option<EdgeFilter<K>>,
+
+ /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
+ /// their edges. This has the beneficial side-effect that multiple anonymous
+ /// nodes can be coalesced into one without changing the semantics of the
+ /// dependency graph. However, the merging of nodes can lead to a subtle
+ /// problem during red-green marking: The color of an anonymous node from
+ /// the current session might "shadow" the color of the node with the same
+ /// ID from the previous session. In order to side-step this problem, we make
+ /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
+ /// This is implemented by mixing a session-key into the ID fingerprint of
+ /// each anon node. The session-key is just a random number generated when
+ /// the `DepGraph` is created.
+ anon_id_seed: Fingerprint,
+
+ /// These are simple counters that are for profiling and
+ /// debugging and only active with `debug_assertions`.
+ total_read_count: AtomicU64,
+ total_duplicate_read_count: AtomicU64,
+
+ /// The cached event id for profiling node interning. This saves us
+ /// from having to look up the event id every time we intern a node
+ /// which may incur too much overhead.
+ /// This will be None if self-profiling is disabled.
+ node_intern_event_id: Option<EventId>,
+}
+
+impl<K: DepKind> CurrentDepGraph<K> {
+ fn new(
+ profiler: &SelfProfilerRef,
+ prev_graph_node_count: usize,
+ encoder: FileEncoder,
+ record_graph: bool,
+ record_stats: bool,
+ ) -> CurrentDepGraph<K> {
+ use std::time::{SystemTime, UNIX_EPOCH};
+
+ let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
+ let nanos = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64;
+ let mut stable_hasher = StableHasher::new();
+ nanos.hash(&mut stable_hasher);
+ let anon_id_seed = stable_hasher.finish();
+
+ #[cfg(debug_assertions)]
+ let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
+ Ok(s) => match EdgeFilter::new(&s) {
+ Ok(f) => Some(f),
+ Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
+ },
+ Err(_) => None,
+ };
+
+ // We store a large collection of these in `prev_index_to_index` during
+ // non-full incremental builds, and want to ensure that the element size
+ // doesn't inadvertently increase.
+ static_assert_size!(Option<DepNodeIndex>, 4);
+
+ let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
+
+ let node_intern_event_id = profiler
+ .get_or_alloc_cached_string("incr_comp_intern_dep_graph_node")
+ .map(EventId::from_label);
+
+ CurrentDepGraph {
+ encoder: Steal::new(GraphEncoder::new(
+ encoder,
+ prev_graph_node_count,
+ record_graph,
+ record_stats,
+ )),
+ new_node_to_index: Sharded::new(|| {
+ FxHashMap::with_capacity_and_hasher(
+ new_node_count_estimate / sharded::SHARDS,
+ Default::default(),
+ )
+ }),
+ prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
+ anon_id_seed,
+ #[cfg(debug_assertions)]
+ forbidden_edge,
+ total_read_count: AtomicU64::new(0),
+ total_duplicate_read_count: AtomicU64::new(0),
+ node_intern_event_id,
+ }
+ }
+
+ #[cfg(debug_assertions)]
+ fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) {
+ if let Some(forbidden_edge) = &self.forbidden_edge {
+ forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
+ }
+ }
+
+ /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
+ /// Assumes that this is a node that has no equivalent in the previous dep-graph.
+ fn intern_new_node(
+ &self,
+ profiler: &SelfProfilerRef,
+ key: DepNode<K>,
+ edges: EdgesVec,
+ current_fingerprint: Fingerprint,
+ ) -> DepNodeIndex {
+ match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) {
+ Entry::Occupied(entry) => *entry.get(),
+ Entry::Vacant(entry) => {
+ let dep_node_index =
+ self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
+ entry.insert(dep_node_index);
+ #[cfg(debug_assertions)]
+ self.record_edge(dep_node_index, key);
+ dep_node_index
+ }
+ }
+ }
+
+ fn intern_node(
+ &self,
+ profiler: &SelfProfilerRef,
+ prev_graph: &SerializedDepGraph<K>,
+ key: DepNode<K>,
+ edges: EdgesVec,
+ fingerprint: Option<Fingerprint>,
+ print_status: bool,
+ ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
+ let print_status = cfg!(debug_assertions) && print_status;
+
+ // Get timer for profiling `DepNode` interning
+ let _node_intern_timer =
+ self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid));
+
+ if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
+ // Determine the color and index of the new `DepNode`.
+ if let Some(fingerprint) = fingerprint {
+ if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
+ if print_status {
+ eprintln!("[task::green] {:?}", key);
+ }
+
+ // This is a green node: it existed in the previous compilation,
+ // its query was re-executed, and it has the same result as before.
+ let mut prev_index_to_index = self.prev_index_to_index.lock();
+
+ let dep_node_index = match prev_index_to_index[prev_index] {
+ Some(dep_node_index) => dep_node_index,
+ None => {
+ let dep_node_index =
+ self.encoder.borrow().send(profiler, key, fingerprint, edges);
+ prev_index_to_index[prev_index] = Some(dep_node_index);
+ dep_node_index
+ }
+ };
+
+ #[cfg(debug_assertions)]
+ self.record_edge(dep_node_index, key);
+ (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
+ } else {
+ if print_status {
+ eprintln!("[task::red] {:?}", key);
+ }
+
+ // This is a red node: it existed in the previous compilation, its query
+ // was re-executed, but it has a different result from before.
+ let mut prev_index_to_index = self.prev_index_to_index.lock();
+
+ let dep_node_index = match prev_index_to_index[prev_index] {
+ Some(dep_node_index) => dep_node_index,
+ None => {
+ let dep_node_index =
+ self.encoder.borrow().send(profiler, key, fingerprint, edges);
+ prev_index_to_index[prev_index] = Some(dep_node_index);
+ dep_node_index
+ }
+ };
+
+ #[cfg(debug_assertions)]
+ self.record_edge(dep_node_index, key);
+ (dep_node_index, Some((prev_index, DepNodeColor::Red)))
+ }
+ } else {
+ if print_status {
+ eprintln!("[task::unknown] {:?}", key);
+ }
+
+ // This is a red node, effectively: it existed in the previous compilation
+ // session, its query was re-executed, but it doesn't compute a result hash
+ // (i.e. it represents a `no_hash` query), so we have no way of determining
+ // whether or not the result was the same as before.
+ let mut prev_index_to_index = self.prev_index_to_index.lock();
+
+ let dep_node_index = match prev_index_to_index[prev_index] {
+ Some(dep_node_index) => dep_node_index,
+ None => {
+ let dep_node_index =
+ self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges);
+ prev_index_to_index[prev_index] = Some(dep_node_index);
+ dep_node_index
+ }
+ };
+
+ #[cfg(debug_assertions)]
+ self.record_edge(dep_node_index, key);
+ (dep_node_index, Some((prev_index, DepNodeColor::Red)))
+ }
+ } else {
+ if print_status {
+ eprintln!("[task::new] {:?}", key);
+ }
+
+ let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
+
+ // This is a new node: it didn't exist in the previous compilation session.
+ let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint);
+
+ (dep_node_index, None)
+ }
+ }
+
+ fn promote_node_and_deps_to_current(
+ &self,
+ profiler: &SelfProfilerRef,
+ prev_graph: &SerializedDepGraph<K>,
+ prev_index: SerializedDepNodeIndex,
+ ) -> DepNodeIndex {
+ self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
+
+ let mut prev_index_to_index = self.prev_index_to_index.lock();
+
+ match prev_index_to_index[prev_index] {
+ Some(dep_node_index) => dep_node_index,
+ None => {
+ let key = prev_graph.index_to_node(prev_index);
+ let dep_node_index = self.encoder.borrow().send(
+ profiler,
+ key,
+ prev_graph.fingerprint_by_index(prev_index),
+ prev_graph
+ .edge_targets_from(prev_index)
+ .iter()
+ .map(|i| prev_index_to_index[*i].unwrap())
+ .collect(),
+ );
+ prev_index_to_index[prev_index] = Some(dep_node_index);
+ #[cfg(debug_assertions)]
+ self.record_edge(dep_node_index, key);
+ dep_node_index
+ }
+ }
+ }
+
+ #[inline]
+ fn debug_assert_not_in_new_nodes(
+ &self,
+ prev_graph: &SerializedDepGraph<K>,
+ prev_index: SerializedDepNodeIndex,
+ ) {
+ let node = &prev_graph.index_to_node(prev_index);
+ debug_assert!(
+ !self.new_node_to_index.get_shard_by_value(node).lock().contains_key(node),
+ "node from previous graph present in new node collection"
+ );
+ }
+}
+
+/// The capacity of the `reads` field `SmallVec`
+const TASK_DEPS_READS_CAP: usize = 8;
+type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>;
+
+#[derive(Debug, Clone, Copy)]
+pub enum TaskDepsRef<'a, K: DepKind> {
+ /// New dependencies can be added to the
+ /// `TaskDeps`. This is used when executing a 'normal' query
+ /// (no `eval_always` modifier)
+ Allow(&'a Lock<TaskDeps<K>>),
+ /// New dependencies are ignored. This is used when
+ /// executing an `eval_always` query, since there's no
+ /// need to track dependencies for a query that's always
+ /// re-executed. This is also used for `dep_graph.with_ignore`
+ Ignore,
+ /// Any attempt to add new dependencies will cause a panic.
+ /// This is used when decoding a query result from disk,
+ /// to ensure that the decoding process doesn't itself
+ /// require the execution of any queries.
+ Forbid,
+}
+
+#[derive(Debug)]
+pub struct TaskDeps<K: DepKind> {
+ #[cfg(debug_assertions)]
+ node: Option<DepNode<K>>,
+ reads: EdgesVec,
+ read_set: FxHashSet<DepNodeIndex>,
+ phantom_data: PhantomData<DepNode<K>>,
+}
+
+impl<K: DepKind> Default for TaskDeps<K> {
+ fn default() -> Self {
+ Self {
+ #[cfg(debug_assertions)]
+ node: None,
+ reads: EdgesVec::new(),
+ read_set: FxHashSet::default(),
+ phantom_data: PhantomData,
+ }
+ }
+}
+
+// A data structure that stores Option<DepNodeColor> values as a contiguous
+// array, using one u32 per entry.
+struct DepNodeColorMap {
+ values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
+}
+
+const COMPRESSED_NONE: u32 = 0;
+const COMPRESSED_RED: u32 = 1;
+const COMPRESSED_FIRST_GREEN: u32 = 2;
+
+impl DepNodeColorMap {
+ fn new(size: usize) -> DepNodeColorMap {
+ DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
+ }
+
+ #[inline]
+ fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
+ match self.values[index].load(Ordering::Acquire) {
+ COMPRESSED_NONE => None,
+ COMPRESSED_RED => Some(DepNodeColor::Red),
+ value => {
+ Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
+ }
+ }
+ }
+
+ fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
+ self.values[index].store(
+ match color {
+ DepNodeColor::Red => COMPRESSED_RED,
+ DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
+ },
+ Ordering::Release,
+ )
+ }
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
new file mode 100644
index 000000000..342d95ca4
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -0,0 +1,106 @@
+pub mod debug;
+mod dep_node;
+mod graph;
+mod query;
+mod serialized;
+
+pub use dep_node::{DepNode, DepNodeParams, WorkProductId};
+pub use graph::{
+ hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef, WorkProduct,
+};
+pub use query::DepGraphQuery;
+pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
+
+use crate::ich::StableHashingContext;
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_serialize::{opaque::FileEncoder, Encodable};
+use rustc_session::Session;
+
+use std::fmt;
+use std::hash::Hash;
+
+pub trait DepContext: Copy {
+ type DepKind: self::DepKind;
+
+ /// Create a hashing context for hashing new results.
+ fn with_stable_hashing_context<R>(&self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R;
+
+ /// Access the DepGraph.
+ fn dep_graph(&self) -> &DepGraph<Self::DepKind>;
+
+ /// Access the profiler.
+ fn profiler(&self) -> &SelfProfilerRef;
+
+ /// Access the compiler session.
+ fn sess(&self) -> &Session;
+
+ /// Return whether this kind always require evaluation.
+ fn is_eval_always(&self, kind: Self::DepKind) -> bool;
+
+ fn fingerprint_style(&self, kind: Self::DepKind) -> FingerprintStyle;
+
+ /// Try to force a dep node to execute and see if it's green.
+ fn try_force_from_dep_node(&self, dep_node: DepNode<Self::DepKind>) -> bool;
+
+ /// Load data from the on-disk cache.
+ fn try_load_from_on_disk_cache(&self, dep_node: DepNode<Self::DepKind>);
+}
+
+pub trait HasDepContext: Copy {
+ type DepKind: self::DepKind;
+ type DepContext: self::DepContext<DepKind = Self::DepKind>;
+
+ fn dep_context(&self) -> &Self::DepContext;
+}
+
+impl<T: DepContext> HasDepContext for T {
+ type DepKind = T::DepKind;
+ type DepContext = Self;
+
+ fn dep_context(&self) -> &Self::DepContext {
+ self
+ }
+}
+
+/// Describes the contents of the fingerprint generated by a given query.
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+pub enum FingerprintStyle {
+ /// The fingerprint is actually a DefPathHash.
+ DefPathHash,
+ /// Query key was `()` or equivalent, so fingerprint is just zero.
+ Unit,
+ /// Some opaque hash.
+ Opaque,
+}
+
+impl FingerprintStyle {
+ #[inline]
+ pub fn reconstructible(self) -> bool {
+ match self {
+ FingerprintStyle::DefPathHash | FingerprintStyle::Unit => true,
+ FingerprintStyle::Opaque => false,
+ }
+ }
+}
+
+/// Describe the different families of dependency nodes.
+pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static {
+ /// DepKind to use when incr. comp. is turned off.
+ const NULL: Self;
+
+ /// DepKind to use to create the initial forever-red node.
+ const RED: Self;
+
+ /// Implementation of `std::fmt::Debug` for `DepNode`.
+ fn debug_node(node: &DepNode<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result;
+
+ /// Execute the operation with provided dependencies.
+ fn with_deps<OP, R>(deps: TaskDepsRef<'_, Self>, op: OP) -> R
+ where
+ OP: FnOnce() -> R;
+
+ /// Access dependencies from current implicit context.
+ fn read_deps<OP>(op: OP)
+ where
+ OP: for<'a> FnOnce(TaskDepsRef<'a, Self>);
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/query.rs b/compiler/rustc_query_system/src/dep_graph/query.rs
new file mode 100644
index 000000000..27b3b5e13
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/query.rs
@@ -0,0 +1,68 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
+use rustc_index::vec::IndexVec;
+
+use super::{DepKind, DepNode, DepNodeIndex};
+
+pub struct DepGraphQuery<K> {
+ pub graph: Graph<DepNode<K>, ()>,
+ pub indices: FxHashMap<DepNode<K>, NodeIndex>,
+ pub dep_index_to_index: IndexVec<DepNodeIndex, Option<NodeIndex>>,
+}
+
+impl<K: DepKind> DepGraphQuery<K> {
+ pub fn new(prev_node_count: usize) -> DepGraphQuery<K> {
+ let node_count = prev_node_count + prev_node_count / 4;
+ let edge_count = 6 * node_count;
+
+ let graph = Graph::with_capacity(node_count, edge_count);
+ let indices = FxHashMap::default();
+ let dep_index_to_index = IndexVec::new();
+
+ DepGraphQuery { graph, indices, dep_index_to_index }
+ }
+
+ pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) {
+ let source = self.graph.add_node(node);
+ if index.index() >= self.dep_index_to_index.len() {
+ self.dep_index_to_index.resize(index.index() + 1, None);
+ }
+ self.dep_index_to_index[index] = Some(source);
+ self.indices.insert(node, source);
+
+ for &target in edges.iter() {
+ let target = self.dep_index_to_index[target];
+ // We may miss the edges that are pushed while the `DepGraphQuery` is being accessed.
+ // Skip them to issues.
+ if let Some(target) = target {
+ self.graph.add_edge(source, target, ());
+ }
+ }
+ }
+
+ pub fn nodes(&self) -> Vec<&DepNode<K>> {
+ self.graph.all_nodes().iter().map(|n| &n.data).collect()
+ }
+
+ pub fn edges(&self) -> Vec<(&DepNode<K>, &DepNode<K>)> {
+ self.graph
+ .all_edges()
+ .iter()
+ .map(|edge| (edge.source(), edge.target()))
+ .map(|(s, t)| (self.graph.node_data(s), self.graph.node_data(t)))
+ .collect()
+ }
+
+ fn reachable_nodes(&self, node: &DepNode<K>, direction: Direction) -> Vec<&DepNode<K>> {
+ if let Some(&index) = self.indices.get(node) {
+ self.graph.depth_traverse(index, direction).map(|s| self.graph.node_data(s)).collect()
+ } else {
+ vec![]
+ }
+ }
+
+ /// All nodes that can reach `node`.
+ pub fn transitive_predecessors(&self, node: &DepNode<K>) -> Vec<&DepNode<K>> {
+ self.reachable_nodes(node, INCOMING)
+ }
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs
new file mode 100644
index 000000000..3b20ec70d
--- /dev/null
+++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs
@@ -0,0 +1,330 @@
+//! The data that we will serialize and deserialize.
+//!
+//! The dep-graph is serialized as a sequence of NodeInfo, with the dependencies
+//! specified inline. The total number of nodes and edges are stored as the last
+//! 16 bytes of the file, so we can find them easily at decoding time.
+//!
+//! The serialisation is performed on-demand when each node is emitted. Using this
+//! scheme, we do not need to keep the current graph in memory.
+//!
+//! The deserialization is performed manually, in order to convert from the stored
+//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the
+//! node and edge count are stored at the end of the file, all the arrays can be
+//! pre-allocated with the right length.
+
+use super::query::DepGraphQuery;
+use super::{DepKind, DepNode, DepNodeIndex};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::sync::Lock;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder};
+use rustc_serialize::{Decodable, Decoder, Encodable};
+use smallvec::SmallVec;
+use std::convert::TryInto;
+
+// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
+// unused so that we can store multiple index types in `CompressedHybridIndex`,
+// and use those bits to encode which index type it contains.
+rustc_index::newtype_index! {
+ pub struct SerializedDepNodeIndex {
+ MAX = 0x7FFF_FFFF
+ }
+}
+
+/// Data for use when recompiling the **current crate**.
+#[derive(Debug)]
+pub struct SerializedDepGraph<K: DepKind> {
+ /// The set of all DepNodes in the graph
+ nodes: IndexVec<SerializedDepNodeIndex, DepNode<K>>,
+ /// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
+ /// the DepNode at the same index in the nodes vector.
+ fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint>,
+ /// For each DepNode, stores the list of edges originating from that
+ /// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
+ /// which holds the actual DepNodeIndices of the target nodes.
+ edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)>,
+ /// A flattened list of all edge targets in the graph. Edge sources are
+ /// implicit in edge_list_indices.
+ edge_list_data: Vec<SerializedDepNodeIndex>,
+ /// Reciprocal map to `nodes`.
+ index: FxHashMap<DepNode<K>, SerializedDepNodeIndex>,
+}
+
+impl<K: DepKind> Default for SerializedDepGraph<K> {
+ fn default() -> Self {
+ SerializedDepGraph {
+ nodes: Default::default(),
+ fingerprints: Default::default(),
+ edge_list_indices: Default::default(),
+ edge_list_data: Default::default(),
+ index: Default::default(),
+ }
+ }
+}
+
+impl<K: DepKind> SerializedDepGraph<K> {
+ #[inline]
+ pub fn edge_targets_from(&self, source: SerializedDepNodeIndex) -> &[SerializedDepNodeIndex] {
+ let targets = self.edge_list_indices[source];
+ &self.edge_list_data[targets.0 as usize..targets.1 as usize]
+ }
+
+ #[inline]
+ pub fn index_to_node(&self, dep_node_index: SerializedDepNodeIndex) -> DepNode<K> {
+ self.nodes[dep_node_index]
+ }
+
+ #[inline]
+ pub fn node_to_index_opt(&self, dep_node: &DepNode<K>) -> Option<SerializedDepNodeIndex> {
+ self.index.get(dep_node).cloned()
+ }
+
+ #[inline]
+ pub fn fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
+ self.index.get(dep_node).map(|&node_index| self.fingerprints[node_index])
+ }
+
+ #[inline]
+ pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint {
+ self.fingerprints[dep_node_index]
+ }
+
+ pub fn node_count(&self) -> usize {
+ self.index.len()
+ }
+}
+
+impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
+ for SerializedDepGraph<K>
+{
+ #[instrument(level = "debug", skip(d))]
+ fn decode(d: &mut MemDecoder<'a>) -> SerializedDepGraph<K> {
+ let start_position = d.position();
+
+ // The last 16 bytes are the node count and edge count.
+ debug!("position: {:?}", d.position());
+ d.set_position(d.data.len() - 2 * IntEncodedWithFixedSize::ENCODED_SIZE);
+ debug!("position: {:?}", d.position());
+
+ let node_count = IntEncodedWithFixedSize::decode(d).0 as usize;
+ let edge_count = IntEncodedWithFixedSize::decode(d).0 as usize;
+ debug!(?node_count, ?edge_count);
+
+ debug!("position: {:?}", d.position());
+ d.set_position(start_position);
+ debug!("position: {:?}", d.position());
+
+ let mut nodes = IndexVec::with_capacity(node_count);
+ let mut fingerprints = IndexVec::with_capacity(node_count);
+ let mut edge_list_indices = IndexVec::with_capacity(node_count);
+ let mut edge_list_data = Vec::with_capacity(edge_count);
+
+ for _index in 0..node_count {
+ let dep_node: DepNode<K> = Decodable::decode(d);
+ let _i: SerializedDepNodeIndex = nodes.push(dep_node);
+ debug_assert_eq!(_i.index(), _index);
+
+ let fingerprint: Fingerprint = Decodable::decode(d);
+ let _i: SerializedDepNodeIndex = fingerprints.push(fingerprint);
+ debug_assert_eq!(_i.index(), _index);
+
+ // Deserialize edges -- sequence of DepNodeIndex
+ let len = d.read_usize();
+ let start = edge_list_data.len().try_into().unwrap();
+ for _ in 0..len {
+ let edge = Decodable::decode(d);
+ edge_list_data.push(edge);
+ }
+ let end = edge_list_data.len().try_into().unwrap();
+ let _i: SerializedDepNodeIndex = edge_list_indices.push((start, end));
+ debug_assert_eq!(_i.index(), _index);
+ }
+
+ let index: FxHashMap<_, _> =
+ nodes.iter_enumerated().map(|(idx, &dep_node)| (dep_node, idx)).collect();
+
+ SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }
+ }
+}
+
+#[derive(Debug, Encodable, Decodable)]
+pub struct NodeInfo<K: DepKind> {
+ node: DepNode<K>,
+ fingerprint: Fingerprint,
+ edges: SmallVec<[DepNodeIndex; 8]>,
+}
+
+struct Stat<K: DepKind> {
+ kind: K,
+ node_counter: u64,
+ edge_counter: u64,
+}
+
+struct EncoderState<K: DepKind> {
+ encoder: FileEncoder,
+ total_node_count: usize,
+ total_edge_count: usize,
+ stats: Option<FxHashMap<K, Stat<K>>>,
+}
+
+impl<K: DepKind> EncoderState<K> {
+ fn new(encoder: FileEncoder, record_stats: bool) -> Self {
+ Self {
+ encoder,
+ total_edge_count: 0,
+ total_node_count: 0,
+ stats: record_stats.then(FxHashMap::default),
+ }
+ }
+
+ fn encode_node(
+ &mut self,
+ node: &NodeInfo<K>,
+ record_graph: &Option<Lock<DepGraphQuery<K>>>,
+ ) -> DepNodeIndex {
+ let index = DepNodeIndex::new(self.total_node_count);
+ self.total_node_count += 1;
+
+ let edge_count = node.edges.len();
+ self.total_edge_count += edge_count;
+
+ if let Some(record_graph) = &record_graph {
+ // Do not ICE when a query is called from within `with_query`.
+ if let Some(record_graph) = &mut record_graph.try_lock() {
+ record_graph.push(index, node.node, &node.edges);
+ }
+ }
+
+ if let Some(stats) = &mut self.stats {
+ let kind = node.node.kind;
+
+ let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 });
+ stat.node_counter += 1;
+ stat.edge_counter += edge_count as u64;
+ }
+
+ let encoder = &mut self.encoder;
+ node.encode(encoder);
+ index
+ }
+
+ fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
+ let Self { mut encoder, total_node_count, total_edge_count, stats: _ } = self;
+
+ let node_count = total_node_count.try_into().unwrap();
+ let edge_count = total_edge_count.try_into().unwrap();
+
+ debug!(?node_count, ?edge_count);
+ debug!("position: {:?}", encoder.position());
+ IntEncodedWithFixedSize(node_count).encode(&mut encoder);
+ IntEncodedWithFixedSize(edge_count).encode(&mut encoder);
+ debug!("position: {:?}", encoder.position());
+ // Drop the encoder so that nothing is written after the counts.
+ let result = encoder.finish();
+ if let Ok(position) = result {
+ // FIXME(rylev): we hardcode the dep graph file name so we
+ // don't need a dependency on rustc_incremental just for that.
+ profiler.artifact_size("dep_graph", "dep-graph.bin", position as u64);
+ }
+ result
+ }
+}
+
+pub struct GraphEncoder<K: DepKind> {
+ status: Lock<EncoderState<K>>,
+ record_graph: Option<Lock<DepGraphQuery<K>>>,
+}
+
+impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> {
+ pub fn new(
+ encoder: FileEncoder,
+ prev_node_count: usize,
+ record_graph: bool,
+ record_stats: bool,
+ ) -> Self {
+ let record_graph =
+ if record_graph { Some(Lock::new(DepGraphQuery::new(prev_node_count))) } else { None };
+ let status = Lock::new(EncoderState::new(encoder, record_stats));
+ GraphEncoder { status, record_graph }
+ }
+
+ pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) {
+ if let Some(record_graph) = &self.record_graph {
+ f(&record_graph.lock())
+ }
+ }
+
+ pub(crate) fn print_incremental_info(
+ &self,
+ total_read_count: u64,
+ total_duplicate_read_count: u64,
+ ) {
+ let status = self.status.lock();
+ if let Some(record_stats) = &status.stats {
+ let mut stats: Vec<_> = record_stats.values().collect();
+ stats.sort_by_key(|s| -(s.node_counter as i64));
+
+ const SEPARATOR: &str = "[incremental] --------------------------------\
+ ----------------------------------------------\
+ ------------";
+
+ eprintln!("[incremental]");
+ eprintln!("[incremental] DepGraph Statistics");
+ eprintln!("{}", SEPARATOR);
+ eprintln!("[incremental]");
+ eprintln!("[incremental] Total Node Count: {}", status.total_node_count);
+ eprintln!("[incremental] Total Edge Count: {}", status.total_edge_count);
+
+ if cfg!(debug_assertions) {
+ eprintln!("[incremental] Total Edge Reads: {}", total_read_count);
+ eprintln!(
+ "[incremental] Total Duplicate Edge Reads: {}",
+ total_duplicate_read_count
+ );
+ }
+
+ eprintln!("[incremental]");
+ eprintln!(
+ "[incremental] {:<36}| {:<17}| {:<12}| {:<17}|",
+ "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count"
+ );
+ eprintln!("{}", SEPARATOR);
+
+ for stat in stats {
+ let node_kind_ratio =
+ (100.0 * (stat.node_counter as f64)) / (status.total_node_count as f64);
+ let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64);
+
+ eprintln!(
+ "[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
+ format!("{:?}", stat.kind),
+ node_kind_ratio,
+ stat.node_counter,
+ node_kind_avg_edges,
+ );
+ }
+
+ eprintln!("{}", SEPARATOR);
+ eprintln!("[incremental]");
+ }
+ }
+
+ pub(crate) fn send(
+ &self,
+ profiler: &SelfProfilerRef,
+ node: DepNode<K>,
+ fingerprint: Fingerprint,
+ edges: SmallVec<[DepNodeIndex; 8]>,
+ ) -> DepNodeIndex {
+ let _prof_timer = profiler.generic_activity("incr_comp_encode_dep_graph");
+ let node = NodeInfo { node, fingerprint, edges };
+ self.status.lock().encode_node(&node, &self.record_graph)
+ }
+
+ pub fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult {
+ let _prof_timer = profiler.generic_activity("incr_comp_encode_dep_graph");
+ self.status.into_inner().finish(profiler)
+ }
+}
diff --git a/compiler/rustc_query_system/src/ich/hcx.rs b/compiler/rustc_query_system/src/ich/hcx.rs
new file mode 100644
index 000000000..217fac341
--- /dev/null
+++ b/compiler/rustc_query_system/src/ich/hcx.rs
@@ -0,0 +1,223 @@
+use crate::ich;
+
+use rustc_ast as ast;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_data_structures::stable_hasher::{HashStable, HashingControls, StableHasher};
+use rustc_data_structures::sync::Lrc;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::definitions::{DefPathHash, Definitions};
+use rustc_index::vec::IndexVec;
+use rustc_session::cstore::CrateStore;
+use rustc_session::Session;
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::Symbol;
+use rustc_span::{BytePos, CachingSourceMapView, SourceFile, Span, SpanData};
+
+/// This is the context state available during incr. comp. hashing. It contains
+/// enough information to transform `DefId`s and `HirId`s into stable `DefPath`s (i.e.,
+/// a reference to the `TyCtxt`) and it holds a few caches for speeding up various
+/// things (e.g., each `DefId`/`DefPath` is only hashed once).
+#[derive(Clone)]
+pub struct StableHashingContext<'a> {
+ definitions: &'a Definitions,
+ cstore: &'a dyn CrateStore,
+ source_span: &'a IndexVec<LocalDefId, Span>,
+ // The value of `-Z incremental-ignore-spans`.
+ // This field should only be used by `unstable_opts_incremental_ignore_span`
+ incremental_ignore_spans: bool,
+ pub(super) body_resolver: BodyResolver<'a>,
+ // Very often, we are hashing something that does not need the
+ // `CachingSourceMapView`, so we initialize it lazily.
+ raw_source_map: &'a SourceMap,
+ caching_source_map: Option<CachingSourceMapView<'a>>,
+ pub(super) hashing_controls: HashingControls,
+}
+
+/// The `BodyResolver` allows mapping a `BodyId` to the corresponding `hir::Body`.
+/// We could also just store a plain reference to the `hir::Crate` but we want
+/// to avoid that the crate is used to get untracked access to all of the HIR.
+#[derive(Clone, Copy)]
+pub(super) enum BodyResolver<'tcx> {
+ Forbidden,
+ Traverse {
+ hash_bodies: bool,
+ owner: LocalDefId,
+ bodies: &'tcx SortedMap<hir::ItemLocalId, &'tcx hir::Body<'tcx>>,
+ },
+}
+
+impl<'a> StableHashingContext<'a> {
+ #[inline]
+ fn new_with_or_without_spans(
+ sess: &'a Session,
+ definitions: &'a Definitions,
+ cstore: &'a dyn CrateStore,
+ source_span: &'a IndexVec<LocalDefId, Span>,
+ always_ignore_spans: bool,
+ ) -> Self {
+ let hash_spans_initial =
+ !always_ignore_spans && !sess.opts.unstable_opts.incremental_ignore_spans;
+
+ StableHashingContext {
+ body_resolver: BodyResolver::Forbidden,
+ definitions,
+ cstore,
+ source_span,
+ incremental_ignore_spans: sess.opts.unstable_opts.incremental_ignore_spans,
+ caching_source_map: None,
+ raw_source_map: sess.source_map(),
+ hashing_controls: HashingControls { hash_spans: hash_spans_initial },
+ }
+ }
+
+ #[inline]
+ pub fn new(
+ sess: &'a Session,
+ definitions: &'a Definitions,
+ cstore: &'a dyn CrateStore,
+ source_span: &'a IndexVec<LocalDefId, Span>,
+ ) -> Self {
+ Self::new_with_or_without_spans(
+ sess,
+ definitions,
+ cstore,
+ source_span,
+ /*always_ignore_spans=*/ false,
+ )
+ }
+
+ #[inline]
+ pub fn ignore_spans(
+ sess: &'a Session,
+ definitions: &'a Definitions,
+ cstore: &'a dyn CrateStore,
+ source_span: &'a IndexVec<LocalDefId, Span>,
+ ) -> Self {
+ let always_ignore_spans = true;
+ Self::new_with_or_without_spans(sess, definitions, cstore, source_span, always_ignore_spans)
+ }
+
+ /// Allow hashing
+ #[inline]
+ pub fn while_hashing_hir_bodies(&mut self, hb: bool, f: impl FnOnce(&mut Self)) {
+ let prev = match &mut self.body_resolver {
+ BodyResolver::Forbidden => panic!("Hashing HIR bodies is forbidden."),
+ BodyResolver::Traverse { ref mut hash_bodies, .. } => {
+ std::mem::replace(hash_bodies, hb)
+ }
+ };
+ f(self);
+ match &mut self.body_resolver {
+ BodyResolver::Forbidden => unreachable!(),
+ BodyResolver::Traverse { ref mut hash_bodies, .. } => *hash_bodies = prev,
+ }
+ }
+
+ #[inline]
+ pub fn with_hir_bodies(
+ &mut self,
+ hash_bodies: bool,
+ owner: LocalDefId,
+ bodies: &SortedMap<hir::ItemLocalId, &hir::Body<'_>>,
+ f: impl FnOnce(&mut StableHashingContext<'_>),
+ ) {
+ f(&mut StableHashingContext {
+ body_resolver: BodyResolver::Traverse { hash_bodies, owner, bodies },
+ ..self.clone()
+ });
+ }
+
+ #[inline]
+ pub fn while_hashing_spans<F: FnOnce(&mut Self)>(&mut self, hash_spans: bool, f: F) {
+ let prev_hash_spans = self.hashing_controls.hash_spans;
+ self.hashing_controls.hash_spans = hash_spans;
+ f(self);
+ self.hashing_controls.hash_spans = prev_hash_spans;
+ }
+
+ #[inline]
+ pub fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
+ if let Some(def_id) = def_id.as_local() {
+ self.local_def_path_hash(def_id)
+ } else {
+ self.cstore.def_path_hash(def_id)
+ }
+ }
+
+ #[inline]
+ pub fn local_def_path_hash(&self, def_id: LocalDefId) -> DefPathHash {
+ self.definitions.def_path_hash(def_id)
+ }
+
+ #[inline]
+ pub fn source_map(&mut self) -> &mut CachingSourceMapView<'a> {
+ match self.caching_source_map {
+ Some(ref mut sm) => sm,
+ ref mut none => {
+ *none = Some(CachingSourceMapView::new(self.raw_source_map));
+ none.as_mut().unwrap()
+ }
+ }
+ }
+
+ #[inline]
+ pub fn is_ignored_attr(&self, name: Symbol) -> bool {
+ ich::IGNORED_ATTRIBUTES.contains(&name)
+ }
+
+ #[inline]
+ pub fn hashing_controls(&self) -> HashingControls {
+ self.hashing_controls.clone()
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ast::NodeId {
+ #[inline]
+ fn hash_stable(&self, _: &mut StableHashingContext<'a>, _: &mut StableHasher) {
+ panic!("Node IDs should not appear in incremental state");
+ }
+}
+
+impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> {
+ #[inline]
+ fn hash_spans(&self) -> bool {
+ self.hashing_controls.hash_spans
+ }
+
+ #[inline]
+ fn unstable_opts_incremental_ignore_spans(&self) -> bool {
+ self.incremental_ignore_spans
+ }
+
+ #[inline]
+ fn def_path_hash(&self, def_id: DefId) -> DefPathHash {
+ self.def_path_hash(def_id)
+ }
+
+ #[inline]
+ fn def_span(&self, def_id: LocalDefId) -> Span {
+ self.source_span[def_id]
+ }
+
+ #[inline]
+ fn span_data_to_lines_and_cols(
+ &mut self,
+ span: &SpanData,
+ ) -> Option<(Lrc<SourceFile>, usize, BytePos, usize, BytePos)> {
+ self.source_map().span_data_to_lines_and_cols(span)
+ }
+
+ #[inline]
+ fn hashing_controls(&self) -> HashingControls {
+ self.hashing_controls.clone()
+ }
+}
+
+impl<'a> rustc_data_structures::intern::InternedHashingContext for StableHashingContext<'a> {
+ fn with_def_path_and_no_spans(&mut self, f: impl FnOnce(&mut Self)) {
+ self.while_hashing_spans(false, f);
+ }
+}
+
+impl<'a> rustc_session::HashStableContext for StableHashingContext<'a> {}
diff --git a/compiler/rustc_query_system/src/ich/impls_hir.rs b/compiler/rustc_query_system/src/ich/impls_hir.rs
new file mode 100644
index 000000000..3390ed9eb
--- /dev/null
+++ b/compiler/rustc_query_system/src/ich/impls_hir.rs
@@ -0,0 +1,42 @@
+//! This module contains `HashStable` implementations for various HIR data
+//! types in no particular order.
+
+use crate::ich::hcx::BodyResolver;
+use crate::ich::StableHashingContext;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir as hir;
+
+impl<'ctx> rustc_hir::HashStableContext for StableHashingContext<'ctx> {
+ #[inline]
+ fn hash_body_id(&mut self, id: hir::BodyId, hasher: &mut StableHasher) {
+ let hcx = self;
+ match hcx.body_resolver {
+ BodyResolver::Forbidden => panic!("Hashing HIR bodies is forbidden."),
+ BodyResolver::Traverse { hash_bodies: false, .. } => {}
+ BodyResolver::Traverse { hash_bodies: true, owner, bodies } => {
+ assert_eq!(id.hir_id.owner, owner);
+ bodies[&id.hir_id.local_id].hash_stable(hcx, hasher);
+ }
+ }
+ }
+
+ fn hash_hir_expr(&mut self, expr: &hir::Expr<'_>, hasher: &mut StableHasher) {
+ self.while_hashing_hir_bodies(true, |hcx| {
+ let hir::Expr { hir_id, ref span, ref kind } = *expr;
+
+ hir_id.hash_stable(hcx, hasher);
+ span.hash_stable(hcx, hasher);
+ kind.hash_stable(hcx, hasher);
+ })
+ }
+
+ fn hash_hir_ty(&mut self, ty: &hir::Ty<'_>, hasher: &mut StableHasher) {
+ self.while_hashing_hir_bodies(true, |hcx| {
+ let hir::Ty { hir_id, ref kind, ref span } = *ty;
+
+ hir_id.hash_stable(hcx, hasher);
+ kind.hash_stable(hcx, hasher);
+ span.hash_stable(hcx, hasher);
+ })
+ }
+}
diff --git a/compiler/rustc_query_system/src/ich/impls_syntax.rs b/compiler/rustc_query_system/src/ich/impls_syntax.rs
new file mode 100644
index 000000000..1fa085926
--- /dev/null
+++ b/compiler/rustc_query_system/src/ich/impls_syntax.rs
@@ -0,0 +1,150 @@
+//! This module contains `HashStable` implementations for various data types
+//! from `rustc_ast` in no particular order.
+
+use crate::ich::StableHashingContext;
+
+use rustc_ast as ast;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_span::{BytePos, NormalizedPos, SourceFile};
+use std::assert_matches::assert_matches;
+
+use smallvec::SmallVec;
+
+impl<'ctx> rustc_target::HashStableContext for StableHashingContext<'ctx> {}
+
+impl<'a> HashStable<StableHashingContext<'a>> for [ast::Attribute] {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ if self.is_empty() {
+ self.len().hash_stable(hcx, hasher);
+ return;
+ }
+
+ // Some attributes are always ignored during hashing.
+ let filtered: SmallVec<[&ast::Attribute; 8]> = self
+ .iter()
+ .filter(|attr| {
+ !attr.is_doc_comment()
+ && !attr.ident().map_or(false, |ident| hcx.is_ignored_attr(ident.name))
+ })
+ .collect();
+
+ filtered.len().hash_stable(hcx, hasher);
+ for attr in filtered {
+ attr.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+impl<'ctx> rustc_ast::HashStableContext for StableHashingContext<'ctx> {
+ fn hash_attr(&mut self, attr: &ast::Attribute, hasher: &mut StableHasher) {
+ // Make sure that these have been filtered out.
+ debug_assert!(!attr.ident().map_or(false, |ident| self.is_ignored_attr(ident.name)));
+ debug_assert!(!attr.is_doc_comment());
+
+ let ast::Attribute { kind, id: _, style, span } = attr;
+ if let ast::AttrKind::Normal(item, tokens) = kind {
+ item.hash_stable(self, hasher);
+ style.hash_stable(self, hasher);
+ span.hash_stable(self, hasher);
+ assert_matches!(
+ tokens.as_ref(),
+ None,
+ "Tokens should have been removed during lowering!"
+ );
+ } else {
+ unreachable!();
+ }
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let SourceFile {
+ name: _, // We hash the smaller name_hash instead of this
+ name_hash,
+ cnum,
+ // Do not hash the source as it is not encoded
+ src: _,
+ ref src_hash,
+ external_src: _,
+ start_pos,
+ end_pos: _,
+ lines: _,
+ ref multibyte_chars,
+ ref non_narrow_chars,
+ ref normalized_pos,
+ } = *self;
+
+ (name_hash as u64).hash_stable(hcx, hasher);
+
+ src_hash.hash_stable(hcx, hasher);
+
+ // We are always in `Lines` form by the time we reach here.
+ assert!(self.lines.borrow().is_lines());
+ self.lines(|lines| {
+ // We only hash the relative position within this source_file
+ lines.len().hash_stable(hcx, hasher);
+ for &line in lines.iter() {
+ stable_byte_pos(line, start_pos).hash_stable(hcx, hasher);
+ }
+ });
+
+ // We only hash the relative position within this source_file
+ multibyte_chars.len().hash_stable(hcx, hasher);
+ for &char_pos in multibyte_chars.iter() {
+ stable_multibyte_char(char_pos, start_pos).hash_stable(hcx, hasher);
+ }
+
+ non_narrow_chars.len().hash_stable(hcx, hasher);
+ for &char_pos in non_narrow_chars.iter() {
+ stable_non_narrow_char(char_pos, start_pos).hash_stable(hcx, hasher);
+ }
+
+ normalized_pos.len().hash_stable(hcx, hasher);
+ for &char_pos in normalized_pos.iter() {
+ stable_normalized_pos(char_pos, start_pos).hash_stable(hcx, hasher);
+ }
+
+ cnum.hash_stable(hcx, hasher);
+ }
+}
+
+fn stable_byte_pos(pos: BytePos, source_file_start: BytePos) -> u32 {
+ pos.0 - source_file_start.0
+}
+
+fn stable_multibyte_char(mbc: rustc_span::MultiByteChar, source_file_start: BytePos) -> (u32, u32) {
+ let rustc_span::MultiByteChar { pos, bytes } = mbc;
+
+ (pos.0 - source_file_start.0, bytes as u32)
+}
+
+fn stable_non_narrow_char(
+ swc: rustc_span::NonNarrowChar,
+ source_file_start: BytePos,
+) -> (u32, u32) {
+ let pos = swc.pos();
+ let width = swc.width();
+
+ (pos.0 - source_file_start.0, width as u32)
+}
+
+fn stable_normalized_pos(np: NormalizedPos, source_file_start: BytePos) -> (u32, u32) {
+ let NormalizedPos { pos, diff } = np;
+
+ (pos.0 - source_file_start.0, diff)
+}
+
+impl<'tcx> HashStable<StableHashingContext<'tcx>> for rustc_feature::Features {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'tcx>, hasher: &mut StableHasher) {
+ // Unfortunately we cannot exhaustively list fields here, since the
+ // struct is macro generated.
+ self.declared_lang_features.hash_stable(hcx, hasher);
+ self.declared_lib_features.hash_stable(hcx, hasher);
+
+ self.walk_feature_fields(|feature_name, value| {
+ feature_name.hash_stable(hcx, hasher);
+ value.hash_stable(hcx, hasher);
+ });
+ }
+}
diff --git a/compiler/rustc_query_system/src/ich/mod.rs b/compiler/rustc_query_system/src/ich/mod.rs
new file mode 100644
index 000000000..0a1c350b2
--- /dev/null
+++ b/compiler/rustc_query_system/src/ich/mod.rs
@@ -0,0 +1,19 @@
+//! ICH - Incremental Compilation Hash
+
+pub use self::hcx::StableHashingContext;
+use rustc_span::symbol::{sym, Symbol};
+
+mod hcx;
+mod impls_hir;
+mod impls_syntax;
+
+pub const IGNORED_ATTRIBUTES: &[Symbol] = &[
+ sym::cfg,
+ sym::rustc_if_this_changed,
+ sym::rustc_then_this_would_need,
+ sym::rustc_dirty,
+ sym::rustc_clean,
+ sym::rustc_partition_reused,
+ sym::rustc_partition_codegened,
+ sym::rustc_expected_cgu_reuse,
+];
diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs
new file mode 100644
index 000000000..68284dcaa
--- /dev/null
+++ b/compiler/rustc_query_system/src/lib.rs
@@ -0,0 +1,19 @@
+#![feature(assert_matches)]
+#![feature(core_intrinsics)]
+#![feature(hash_raw_entry)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(extern_types)]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_data_structures;
+#[macro_use]
+extern crate rustc_macros;
+
+pub mod cache;
+pub mod dep_graph;
+pub mod ich;
+pub mod query;
diff --git a/compiler/rustc_query_system/src/query/README.md b/compiler/rustc_query_system/src/query/README.md
new file mode 100644
index 000000000..8ec07b9fd
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/README.md
@@ -0,0 +1,3 @@
+For more information about how the query system works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
new file mode 100644
index 000000000..85c5af72e
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -0,0 +1,226 @@
+use crate::dep_graph::DepNodeIndex;
+
+use rustc_arena::TypedArena;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sharded;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::sharded::Sharded;
+#[cfg(not(parallel_compiler))]
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::sync::WorkerLocal;
+use std::default::Default;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::marker::PhantomData;
+
+pub trait CacheSelector<K, V> {
+ type Cache;
+}
+
+pub trait QueryStorage {
+ type Value: Debug;
+ type Stored: Clone;
+
+ /// Store a value without putting it in the cache.
+ /// This is meant to be used with cycle errors.
+ fn store_nocache(&self, value: Self::Value) -> Self::Stored;
+}
+
+pub trait QueryCache: QueryStorage + Sized {
+ type Key: Hash + Eq + Clone + Debug;
+
+ /// Checks if the query is already computed and in the cache.
+ /// It returns the shard index and a lock guard to the shard,
+ /// which will be used if the query is not in the cache and we need
+ /// to compute it.
+ fn lookup<R, OnHit>(
+ &self,
+ key: &Self::Key,
+ // `on_hit` can be called while holding a lock to the query state shard.
+ on_hit: OnHit,
+ ) -> Result<R, ()>
+ where
+ OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
+
+ fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex) -> Self::Stored;
+
+ fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex));
+}
+
+pub struct DefaultCacheSelector;
+
+impl<K: Eq + Hash, V: Clone> CacheSelector<K, V> for DefaultCacheSelector {
+ type Cache = DefaultCache<K, V>;
+}
+
+pub struct DefaultCache<K, V> {
+ #[cfg(parallel_compiler)]
+ cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
+ #[cfg(not(parallel_compiler))]
+ cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
+}
+
+impl<K, V> Default for DefaultCache<K, V> {
+ fn default() -> Self {
+ DefaultCache { cache: Default::default() }
+ }
+}
+
+impl<K: Eq + Hash, V: Clone + Debug> QueryStorage for DefaultCache<K, V> {
+ type Value = V;
+ type Stored = V;
+
+ #[inline]
+ fn store_nocache(&self, value: Self::Value) -> Self::Stored {
+ // We have no dedicated storage
+ value
+ }
+}
+
+impl<K, V> QueryCache for DefaultCache<K, V>
+where
+ K: Eq + Hash + Clone + Debug,
+ V: Clone + Debug,
+{
+ type Key = K;
+
+ #[inline(always)]
+ fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
+ where
+ OnHit: FnOnce(&V, DepNodeIndex) -> R,
+ {
+ let key_hash = sharded::make_hash(key);
+ #[cfg(parallel_compiler)]
+ let lock = self.cache.get_shard_by_hash(key_hash).lock();
+ #[cfg(not(parallel_compiler))]
+ let lock = self.cache.lock();
+ let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
+
+ if let Some((_, value)) = result {
+ let hit_result = on_hit(&value.0, value.1);
+ Ok(hit_result)
+ } else {
+ Err(())
+ }
+ }
+
+ #[inline]
+ fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
+ #[cfg(parallel_compiler)]
+ let mut lock = self.cache.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut lock = self.cache.lock();
+ lock.insert(key, (value.clone(), index));
+ value
+ }
+
+ fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
+ #[cfg(parallel_compiler)]
+ {
+ let shards = self.cache.lock_shards();
+ for shard in shards.iter() {
+ for (k, v) in shard.iter() {
+ f(k, &v.0, v.1);
+ }
+ }
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ let map = self.cache.lock();
+ for (k, v) in map.iter() {
+ f(k, &v.0, v.1);
+ }
+ }
+ }
+}
+
+pub struct ArenaCacheSelector<'tcx>(PhantomData<&'tcx ()>);
+
+impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<K, V> for ArenaCacheSelector<'tcx> {
+ type Cache = ArenaCache<'tcx, K, V>;
+}
+
+pub struct ArenaCache<'tcx, K, V> {
+ arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
+ #[cfg(parallel_compiler)]
+ cache: Sharded<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
+ #[cfg(not(parallel_compiler))]
+ cache: Lock<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
+}
+
+impl<'tcx, K, V> Default for ArenaCache<'tcx, K, V> {
+ fn default() -> Self {
+ ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() }
+ }
+}
+
+impl<'tcx, K: Eq + Hash, V: Debug + 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
+ type Value = V;
+ type Stored = &'tcx V;
+
+ #[inline]
+ fn store_nocache(&self, value: Self::Value) -> Self::Stored {
+ let value = self.arena.alloc((value, DepNodeIndex::INVALID));
+ let value = unsafe { &*(&value.0 as *const _) };
+ &value
+ }
+}
+
+impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
+where
+ K: Eq + Hash + Clone + Debug,
+ V: Debug,
+{
+ type Key = K;
+
+ #[inline(always)]
+ fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
+ where
+ OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
+ {
+ let key_hash = sharded::make_hash(key);
+ #[cfg(parallel_compiler)]
+ let lock = self.cache.get_shard_by_hash(key_hash).lock();
+ #[cfg(not(parallel_compiler))]
+ let lock = self.cache.lock();
+ let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
+
+ if let Some((_, value)) = result {
+ let hit_result = on_hit(&&value.0, value.1);
+ Ok(hit_result)
+ } else {
+ Err(())
+ }
+ }
+
+ #[inline]
+ fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
+ let value = self.arena.alloc((value, index));
+ let value = unsafe { &*(value as *const _) };
+ #[cfg(parallel_compiler)]
+ let mut lock = self.cache.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut lock = self.cache.lock();
+ lock.insert(key, value);
+ &value.0
+ }
+
+ fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
+ #[cfg(parallel_compiler)]
+ {
+ let shards = self.cache.lock_shards();
+ for shard in shards.iter() {
+ for (k, v) in shard.iter() {
+ f(k, &v.0, v.1);
+ }
+ }
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ let map = self.cache.lock();
+ for (k, v) in map.iter() {
+ f(k, &v.0, v.1);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
new file mode 100644
index 000000000..964914a13
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -0,0 +1,75 @@
+//! Query configuration and description traits.
+
+use crate::dep_graph::DepNode;
+use crate::dep_graph::SerializedDepNodeIndex;
+use crate::ich::StableHashingContext;
+use crate::query::caches::QueryCache;
+use crate::query::{QueryContext, QueryState};
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
+use std::fmt::Debug;
+use std::hash::Hash;
+
+pub trait QueryConfig {
+ const NAME: &'static str;
+
+ type Key: Eq + Hash + Clone + Debug;
+ type Value;
+ type Stored: Clone;
+}
+
+pub struct QueryVTable<CTX: QueryContext, K, V> {
+ pub anon: bool,
+ pub dep_kind: CTX::DepKind,
+ pub eval_always: bool,
+ pub cache_on_disk: bool,
+
+ pub compute: fn(CTX::DepContext, K) -> V,
+ pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
+ pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
+ pub try_load_from_disk: Option<fn(CTX, SerializedDepNodeIndex) -> Option<V>>,
+}
+
+impl<CTX: QueryContext, K, V> QueryVTable<CTX, K, V> {
+ pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode<CTX::DepKind>
+ where
+ K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+ {
+ DepNode::construct(tcx, self.dep_kind, key)
+ }
+
+ pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V {
+ (self.compute)(tcx, key)
+ }
+
+ pub(crate) fn try_load_from_disk(&self, tcx: CTX, index: SerializedDepNodeIndex) -> Option<V> {
+ self.try_load_from_disk
+ .expect("QueryDescription::load_from_disk() called for an unsupported query.")(
+ tcx, index,
+ )
+ }
+}
+
+pub trait QueryDescription<CTX: QueryContext>: QueryConfig {
+ const TRY_LOAD_FROM_DISK: Option<fn(CTX, SerializedDepNodeIndex) -> Option<Self::Value>>;
+
+ type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
+
+ fn describe(tcx: CTX, key: Self::Key) -> String;
+
+ // Don't use this method to access query results, instead use the methods on TyCtxt
+ fn query_state<'a>(tcx: CTX) -> &'a QueryState<Self::Key>
+ where
+ CTX: 'a;
+
+ // Don't use this method to access query results, instead use the methods on TyCtxt
+ fn query_cache<'a>(tcx: CTX) -> &'a Self::Cache
+ where
+ CTX: 'a;
+
+ // Don't use this method to compute query results, instead use the methods on TyCtxt
+ fn make_vtable(tcx: CTX, key: &Self::Key) -> QueryVTable<CTX, Self::Key, Self::Value>;
+
+ fn cache_on_disk(tcx: CTX::DepContext, key: &Self::Key) -> bool;
+}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
new file mode 100644
index 000000000..6d2aff381
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -0,0 +1,612 @@
+use crate::query::plumbing::CycleError;
+use crate::query::{QueryContext, QueryStackFrame};
+use rustc_hir::def::DefKind;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{
+ struct_span_err, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, Level,
+};
+use rustc_session::Session;
+use rustc_span::Span;
+
+use std::hash::Hash;
+use std::num::NonZeroU64;
+
+#[cfg(parallel_compiler)]
+use {
+ parking_lot::{Condvar, Mutex},
+ rustc_data_structures::fx::FxHashSet,
+ rustc_data_structures::sync::Lock,
+ rustc_data_structures::sync::Lrc,
+ rustc_data_structures::{jobserver, OnDrop},
+ rustc_rayon_core as rayon_core,
+ rustc_span::DUMMY_SP,
+ std::iter::{self, FromIterator},
+ std::{mem, process},
+};
+
+/// Represents a span and a query key.
+#[derive(Clone, Debug)]
+pub struct QueryInfo {
+ /// The span corresponding to the reason for which this query was required.
+ pub span: Span,
+ pub query: QueryStackFrame,
+}
+
+pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
+
+/// A value uniquely identifying an active query job.
+#[derive(Copy, Clone, Eq, PartialEq, Hash)]
+pub struct QueryJobId(pub NonZeroU64);
+
+impl QueryJobId {
+ fn query(self, map: &QueryMap) -> QueryStackFrame {
+ map.get(&self).unwrap().query.clone()
+ }
+
+ #[cfg(parallel_compiler)]
+ fn span(self, map: &QueryMap) -> Span {
+ map.get(&self).unwrap().job.span
+ }
+
+ #[cfg(parallel_compiler)]
+ fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
+ map.get(&self).unwrap().job.parent
+ }
+
+ #[cfg(parallel_compiler)]
+ fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> {
+ map.get(&self).unwrap().job.latch.as_ref()
+ }
+}
+
+pub struct QueryJobInfo {
+ pub query: QueryStackFrame,
+ pub job: QueryJob,
+}
+
+/// Represents an active query job.
+#[derive(Clone)]
+pub struct QueryJob {
+ pub id: QueryJobId,
+
+ /// The span corresponding to the reason for which this query was required.
+ pub span: Span,
+
+ /// The parent query job which created this job and is implicitly waiting on it.
+ pub parent: Option<QueryJobId>,
+
+ /// The latch that is used to wait on this job.
+ #[cfg(parallel_compiler)]
+ latch: Option<QueryLatch>,
+}
+
+impl QueryJob {
+ /// Creates a new query job.
+ #[inline]
+ pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
+ QueryJob {
+ id,
+ span,
+ parent,
+ #[cfg(parallel_compiler)]
+ latch: None,
+ }
+ }
+
+ #[cfg(parallel_compiler)]
+ pub(super) fn latch(&mut self) -> QueryLatch {
+ if self.latch.is_none() {
+ self.latch = Some(QueryLatch::new());
+ }
+ self.latch.as_ref().unwrap().clone()
+ }
+
+ /// Signals to waiters that the query is complete.
+ ///
+ /// This does nothing for single threaded rustc,
+ /// as there are no concurrent jobs which could be waiting on us
+ #[inline]
+ pub fn signal_complete(self) {
+ #[cfg(parallel_compiler)]
+ {
+ if let Some(latch) = self.latch {
+ latch.set();
+ }
+ }
+ }
+}
+
+#[cfg(not(parallel_compiler))]
+impl QueryJobId {
+ #[cold]
+ #[inline(never)]
+ pub(super) fn find_cycle_in_stack(
+ &self,
+ query_map: QueryMap,
+ current_job: &Option<QueryJobId>,
+ span: Span,
+ ) -> CycleError {
+ // Find the waitee amongst `current_job` parents
+ let mut cycle = Vec::new();
+ let mut current_job = Option::clone(current_job);
+
+ while let Some(job) = current_job {
+ let info = query_map.get(&job).unwrap();
+ cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
+
+ if job == *self {
+ cycle.reverse();
+
+ // This is the end of the cycle
+ // The span entry we included was for the usage
+ // of the cycle itself, and not part of the cycle
+ // Replace it with the span which caused the cycle to form
+ cycle[0].span = span;
+ // Find out why the cycle itself was used
+ let usage = info
+ .job
+ .parent
+ .as_ref()
+ .map(|parent| (info.job.span, parent.query(&query_map)));
+ return CycleError { usage, cycle };
+ }
+
+ current_job = info.job.parent;
+ }
+
+ panic!("did not find a cycle")
+ }
+}
+
+#[cfg(parallel_compiler)]
+struct QueryWaiter {
+ query: Option<QueryJobId>,
+ condvar: Condvar,
+ span: Span,
+ cycle: Lock<Option<CycleError>>,
+}
+
+#[cfg(parallel_compiler)]
+impl QueryWaiter {
+ fn notify(&self, registry: &rayon_core::Registry) {
+ rayon_core::mark_unblocked(registry);
+ self.condvar.notify_one();
+ }
+}
+
+#[cfg(parallel_compiler)]
+struct QueryLatchInfo {
+ complete: bool,
+ waiters: Vec<Lrc<QueryWaiter>>,
+}
+
+#[cfg(parallel_compiler)]
+#[derive(Clone)]
+pub(super) struct QueryLatch {
+ info: Lrc<Mutex<QueryLatchInfo>>,
+}
+
+#[cfg(parallel_compiler)]
+impl QueryLatch {
+ fn new() -> Self {
+ QueryLatch {
+ info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
+ }
+ }
+
+ /// Awaits for the query job to complete.
+ pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
+ let waiter =
+ Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
+ self.wait_on_inner(&waiter);
+ // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
+ // although another thread may still have a Lrc reference so we cannot
+ // use Lrc::get_mut
+ let mut cycle = waiter.cycle.lock();
+ match cycle.take() {
+ None => Ok(()),
+ Some(cycle) => Err(cycle),
+ }
+ }
+
+ /// Awaits the caller on this latch by blocking the current thread.
+ fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) {
+ let mut info = self.info.lock();
+ if !info.complete {
+ // We push the waiter on to the `waiters` list. It can be accessed inside
+ // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
+ // Both of these will remove it from the `waiters` list before resuming
+ // this thread.
+ info.waiters.push(waiter.clone());
+
+ // If this detects a deadlock and the deadlock handler wants to resume this thread
+ // we have to be in the `wait` call. This is ensured by the deadlock handler
+ // getting the self.info lock.
+ rayon_core::mark_blocked();
+ jobserver::release_thread();
+ waiter.condvar.wait(&mut info);
+ // Release the lock before we potentially block in `acquire_thread`
+ mem::drop(info);
+ jobserver::acquire_thread();
+ }
+ }
+
+ /// Sets the latch and resumes all waiters on it
+ fn set(&self) {
+ let mut info = self.info.lock();
+ debug_assert!(!info.complete);
+ info.complete = true;
+ let registry = rayon_core::Registry::current();
+ for waiter in info.waiters.drain(..) {
+ waiter.notify(&registry);
+ }
+ }
+
+ /// Removes a single waiter from the list of waiters.
+ /// This is used to break query cycles.
+ fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> {
+ let mut info = self.info.lock();
+ debug_assert!(!info.complete);
+ // Remove the waiter from the list of waiters
+ info.waiters.remove(waiter)
+ }
+}
+
+/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
+#[cfg(parallel_compiler)]
+type Waiter = (QueryJobId, usize);
+
+/// Visits all the non-resumable and resumable waiters of a query.
+/// Only waiters in a query are visited.
+/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
+/// and a span indicating the reason the query waited on `query_ref`.
+/// If `visit` returns Some, this function returns.
+/// For visits of non-resumable waiters it returns the return value of `visit`.
+/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
+/// required information to resume the waiter.
+/// If all `visit` calls returns None, this function also returns None.
+#[cfg(parallel_compiler)]
+fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
+where
+ F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
+{
+ // Visit the parent query which is a non-resumable waiter since it's on the same stack
+ if let Some(parent) = query.parent(query_map) {
+ if let Some(cycle) = visit(query.span(query_map), parent) {
+ return Some(cycle);
+ }
+ }
+
+ // Visit the explicit waiters which use condvars and are resumable
+ if let Some(latch) = query.latch(query_map) {
+ for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
+ if let Some(waiter_query) = waiter.query {
+ if visit(waiter.span, waiter_query).is_some() {
+ // Return a value which indicates that this waiter can be resumed
+ return Some(Some((query, i)));
+ }
+ }
+ }
+ }
+
+ None
+}
+
+/// Look for query cycles by doing a depth first search starting at `query`.
+/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
+/// If a cycle is detected, this initial value is replaced with the span causing
+/// the cycle.
+#[cfg(parallel_compiler)]
+fn cycle_check(
+ query_map: &QueryMap,
+ query: QueryJobId,
+ span: Span,
+ stack: &mut Vec<(Span, QueryJobId)>,
+ visited: &mut FxHashSet<QueryJobId>,
+) -> Option<Option<Waiter>> {
+ if !visited.insert(query) {
+ return if let Some(p) = stack.iter().position(|q| q.1 == query) {
+ // We detected a query cycle, fix up the initial span and return Some
+
+ // Remove previous stack entries
+ stack.drain(0..p);
+ // Replace the span for the first query with the cycle cause
+ stack[0].0 = span;
+ Some(None)
+ } else {
+ None
+ };
+ }
+
+ // Query marked as visited is added it to the stack
+ stack.push((span, query));
+
+ // Visit all the waiters
+ let r = visit_waiters(query_map, query, |span, successor| {
+ cycle_check(query_map, successor, span, stack, visited)
+ });
+
+ // Remove the entry in our stack if we didn't find a cycle
+ if r.is_none() {
+ stack.pop();
+ }
+
+ r
+}
+
+/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
+/// from `query` without going through any of the queries in `visited`.
+/// This is achieved with a depth first search.
+#[cfg(parallel_compiler)]
+fn connected_to_root(
+ query_map: &QueryMap,
+ query: QueryJobId,
+ visited: &mut FxHashSet<QueryJobId>,
+) -> bool {
+ // We already visited this or we're deliberately ignoring it
+ if !visited.insert(query) {
+ return false;
+ }
+
+ // This query is connected to the root (it has no query parent), return true
+ if query.parent(query_map).is_none() {
+ return true;
+ }
+
+ visit_waiters(query_map, query, |_, successor| {
+ connected_to_root(query_map, successor, visited).then_some(None)
+ })
+ .is_some()
+}
+
+// Deterministically pick an query from a list
+#[cfg(parallel_compiler)]
+fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
+where
+ F: Fn(&T) -> (Span, QueryJobId),
+{
+ // Deterministically pick an entry point
+ // FIXME: Sort this instead
+ queries
+ .iter()
+ .min_by_key(|v| {
+ let (span, query) = f(v);
+ let hash = query.query(query_map).hash;
+ // Prefer entry points which have valid spans for nicer error messages
+ // We add an integer to the tuple ensuring that entry points
+ // with valid spans are picked first
+ let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
+ (span_cmp, hash)
+ })
+ .unwrap()
+}
+
+/// Looks for query cycles starting from the last query in `jobs`.
+/// If a cycle is found, all queries in the cycle is removed from `jobs` and
+/// the function return true.
+/// If a cycle was not found, the starting query is removed from `jobs` and
+/// the function returns false.
+#[cfg(parallel_compiler)]
+fn remove_cycle(
+ query_map: &QueryMap,
+ jobs: &mut Vec<QueryJobId>,
+ wakelist: &mut Vec<Lrc<QueryWaiter>>,
+) -> bool {
+ let mut visited = FxHashSet::default();
+ let mut stack = Vec::new();
+ // Look for a cycle starting with the last query in `jobs`
+ if let Some(waiter) =
+ cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
+ {
+ // The stack is a vector of pairs of spans and queries; reverse it so that
+ // the earlier entries require later entries
+ let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
+
+ // Shift the spans so that queries are matched with the span for their waitee
+ spans.rotate_right(1);
+
+ // Zip them back together
+ let mut stack: Vec<_> = iter::zip(spans, queries).collect();
+
+ // Remove the queries in our cycle from the list of jobs to look at
+ for r in &stack {
+ if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
+ jobs.remove(pos);
+ }
+ }
+
+ // Find the queries in the cycle which are
+ // connected to queries outside the cycle
+ let entry_points = stack
+ .iter()
+ .filter_map(|&(span, query)| {
+ if query.parent(query_map).is_none() {
+ // This query is connected to the root (it has no query parent)
+ Some((span, query, None))
+ } else {
+ let mut waiters = Vec::new();
+ // Find all the direct waiters who lead to the root
+ visit_waiters(query_map, query, |span, waiter| {
+ // Mark all the other queries in the cycle as already visited
+ let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
+
+ if connected_to_root(query_map, waiter, &mut visited) {
+ waiters.push((span, waiter));
+ }
+
+ None
+ });
+ if waiters.is_empty() {
+ None
+ } else {
+ // Deterministically pick one of the waiters to show to the user
+ let waiter = *pick_query(query_map, &waiters, |s| *s);
+ Some((span, query, Some(waiter)))
+ }
+ }
+ })
+ .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
+
+ // Deterministically pick an entry point
+ let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
+
+ // Shift the stack so that our entry point is first
+ let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
+ if let Some(pos) = entry_point_pos {
+ stack.rotate_left(pos);
+ }
+
+ let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
+
+ // Create the cycle error
+ let error = CycleError {
+ usage,
+ cycle: stack
+ .iter()
+ .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
+ .collect(),
+ };
+
+ // We unwrap `waiter` here since there must always be one
+ // edge which is resumable / waited using a query latch
+ let (waitee_query, waiter_idx) = waiter.unwrap();
+
+ // Extract the waiter we want to resume
+ let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
+
+ // Set the cycle error so it will be picked up when resumed
+ *waiter.cycle.lock() = Some(error);
+
+ // Put the waiter on the list of things to resume
+ wakelist.push(waiter);
+
+ true
+ } else {
+ false
+ }
+}
+
+/// Detects query cycles by using depth first search over all active query jobs.
+/// If a query cycle is found it will break the cycle by finding an edge which
+/// uses a query latch and then resuming that waiter.
+/// There may be multiple cycles involved in a deadlock, so this searches
+/// all active queries for cycles before finally resuming all the waiters at once.
+#[cfg(parallel_compiler)]
+pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
+ let on_panic = OnDrop(|| {
+ eprintln!("deadlock handler panicked, aborting process");
+ process::abort();
+ });
+
+ let mut wakelist = Vec::new();
+ let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
+
+ let mut found_cycle = false;
+
+ while jobs.len() > 0 {
+ if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
+ found_cycle = true;
+ }
+ }
+
+ // Check that a cycle was found. It is possible for a deadlock to occur without
+ // a query cycle if a query which can be waited on uses Rayon to do multithreading
+ // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
+ // wait using Rayon on B. Rayon may then switch to executing another query (Y)
+ // which in turn will wait on X causing a deadlock. We have a false dependency from
+ // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
+ // only considers the true dependency and won't detect a cycle.
+ assert!(found_cycle);
+
+ // FIXME: Ensure this won't cause a deadlock before we return
+ for waiter in wakelist.into_iter() {
+ waiter.notify(registry);
+ }
+
+ on_panic.disable();
+}
+
+#[inline(never)]
+#[cold]
+pub(crate) fn report_cycle<'a>(
+ sess: &'a Session,
+ CycleError { usage, cycle: stack }: CycleError,
+) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ assert!(!stack.is_empty());
+
+ let span = stack[0].query.default_span(stack[1 % stack.len()].span);
+ let mut err =
+ struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description);
+
+ for i in 1..stack.len() {
+ let query = &stack[i].query;
+ let span = query.default_span(stack[(i + 1) % stack.len()].span);
+ err.span_note(span, &format!("...which requires {}...", query.description));
+ }
+
+ if stack.len() == 1 {
+ err.note(&format!("...which immediately requires {} again", stack[0].query.description));
+ } else {
+ err.note(&format!(
+ "...which again requires {}, completing the cycle",
+ stack[0].query.description
+ ));
+ }
+
+ if stack.iter().all(|entry| {
+ entry
+ .query
+ .def_kind
+ .map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias | DefKind::TraitAlias))
+ }) {
+ if stack.iter().all(|entry| {
+ entry.query.def_kind.map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias))
+ }) {
+ err.note("type aliases cannot be recursive");
+ err.help("consider using a struct, enum, or union instead to break the cycle");
+ err.help("see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information");
+ } else {
+ err.note("trait aliases cannot be recursive");
+ }
+ }
+
+ if let Some((span, query)) = usage {
+ err.span_note(query.default_span(span), &format!("cycle used when {}", query.description));
+ }
+
+ err
+}
+
+pub fn print_query_stack<CTX: QueryContext>(
+ tcx: CTX,
+ mut current_query: Option<QueryJobId>,
+ handler: &Handler,
+ num_frames: Option<usize>,
+) -> usize {
+ // Be careful relying on global state here: this code is called from
+ // a panic hook, which means that the global `Handler` may be in a weird
+ // state if it was responsible for triggering the panic.
+ let mut i = 0;
+ let query_map = tcx.try_collect_active_jobs();
+
+ while let Some(query) = current_query {
+ if Some(i) == num_frames {
+ break;
+ }
+ let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else {
+ break;
+ };
+ let mut diag = Diagnostic::new(
+ Level::FailureNote,
+ &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description),
+ );
+ diag.span = query_info.job.span.into();
+ handler.force_print_diagnostic(diag);
+
+ current_query = query_info.job.parent;
+ i += 1;
+ }
+
+ i
+}
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
new file mode 100644
index 000000000..fb2258434
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -0,0 +1,125 @@
+mod plumbing;
+pub use self::plumbing::*;
+
+mod job;
+#[cfg(parallel_compiler)]
+pub use self::job::deadlock;
+pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap};
+
+mod caches;
+pub use self::caches::{
+ ArenaCacheSelector, CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage,
+};
+
+mod config;
+pub use self::config::{QueryConfig, QueryDescription, QueryVTable};
+
+use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
+
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::Diagnostic;
+use rustc_hir::def::DefKind;
+use rustc_span::Span;
+
+/// Description of a frame in the query stack.
+///
+/// This is mostly used in case of cycles for error reporting.
+#[derive(Clone, Debug)]
+pub struct QueryStackFrame {
+ pub name: &'static str,
+ pub description: String,
+ span: Option<Span>,
+ def_kind: Option<DefKind>,
+ /// This hash is used to deterministically pick
+ /// a query to remove cycles in the parallel compiler.
+ #[cfg(parallel_compiler)]
+ hash: u64,
+}
+
+impl QueryStackFrame {
+ #[inline]
+ pub fn new(
+ name: &'static str,
+ description: String,
+ span: Option<Span>,
+ def_kind: Option<DefKind>,
+ _hash: impl FnOnce() -> u64,
+ ) -> Self {
+ Self {
+ name,
+ description,
+ span,
+ def_kind,
+ #[cfg(parallel_compiler)]
+ hash: _hash(),
+ }
+ }
+
+ // FIXME(eddyb) Get more valid `Span`s on queries.
+ #[inline]
+ pub fn default_span(&self, span: Span) -> Span {
+ if !span.is_dummy() {
+ return span;
+ }
+ self.span.unwrap_or(span)
+ }
+}
+
+/// Tracks 'side effects' for a particular query.
+/// This struct is saved to disk along with the query result,
+/// and loaded from disk if we mark the query as green.
+/// This allows us to 'replay' changes to global state
+/// that would otherwise only occur if we actually
+/// executed the query method.
+#[derive(Debug, Clone, Default, Encodable, Decodable)]
+pub struct QuerySideEffects {
+ /// Stores any diagnostics emitted during query execution.
+ /// These diagnostics will be re-emitted if we mark
+ /// the query as green.
+ pub(super) diagnostics: ThinVec<Diagnostic>,
+}
+
+impl QuerySideEffects {
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ let QuerySideEffects { diagnostics } = self;
+ diagnostics.is_empty()
+ }
+ pub fn append(&mut self, other: QuerySideEffects) {
+ let QuerySideEffects { diagnostics } = self;
+ diagnostics.extend(other.diagnostics);
+ }
+}
+
+pub trait QueryContext: HasDepContext {
+ fn next_job_id(&self) -> QueryJobId;
+
+ /// Get the query information from the TLS context.
+ fn current_query_job(&self) -> Option<QueryJobId>;
+
+ fn try_collect_active_jobs(&self) -> Option<QueryMap>;
+
+ /// Load side effects associated to the node in the previous session.
+ fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
+
+ /// Register diagnostics for the given node, for use in next session.
+ fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects);
+
+ /// Register diagnostics for the given node, for use in next session.
+ fn store_side_effects_for_anon_node(
+ &self,
+ dep_node_index: DepNodeIndex,
+ side_effects: QuerySideEffects,
+ );
+
+ /// Executes a job by changing the `ImplicitCtxt` to point to the
+ /// new query job while it executes. It returns the diagnostics
+ /// captured during execution and the actual result.
+ fn start_query<R>(
+ &self,
+ token: QueryJobId,
+ diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
+ compute: impl FnOnce() -> R,
+ ) -> R;
+}
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
new file mode 100644
index 000000000..5e8ea07d0
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -0,0 +1,742 @@
+//! The implementation of the query system itself. This defines the macros that
+//! generate the actual methods on tcx which find and execute the provider,
+//! manage the caches, and so forth.
+
+use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
+use crate::query::caches::QueryCache;
+use crate::query::config::{QueryDescription, QueryVTable};
+use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
+use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::profiling::TimingGuard;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::sharded::Sharded;
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
+use rustc_session::Session;
+use rustc_span::{Span, DUMMY_SP};
+use std::cell::Cell;
+use std::collections::hash_map::Entry;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::mem;
+use std::ptr;
+
+pub struct QueryState<K> {
+ #[cfg(parallel_compiler)]
+ active: Sharded<FxHashMap<K, QueryResult>>,
+ #[cfg(not(parallel_compiler))]
+ active: Lock<FxHashMap<K, QueryResult>>,
+}
+
+/// Indicates the state of a query for a given key in a query map.
+enum QueryResult {
+ /// An already executing query. The query job can be used to await for its completion.
+ Started(QueryJob),
+
+ /// The query panicked. Queries trying to wait on this will raise a fatal error which will
+ /// silently panic.
+ Poisoned,
+}
+
+impl<K> QueryState<K>
+where
+ K: Eq + Hash + Clone + Debug,
+{
+ pub fn all_inactive(&self) -> bool {
+ #[cfg(parallel_compiler)]
+ {
+ let shards = self.active.lock_shards();
+ shards.iter().all(|shard| shard.is_empty())
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ self.active.lock().is_empty()
+ }
+ }
+
+ pub fn try_collect_active_jobs<CTX: Copy>(
+ &self,
+ tcx: CTX,
+ make_query: fn(CTX, K) -> QueryStackFrame,
+ jobs: &mut QueryMap,
+ ) -> Option<()> {
+ #[cfg(parallel_compiler)]
+ {
+ // We use try_lock_shards here since we are called from the
+ // deadlock handler, and this shouldn't be locked.
+ let shards = self.active.try_lock_shards()?;
+ for shard in shards.iter() {
+ for (k, v) in shard.iter() {
+ if let QueryResult::Started(ref job) = *v {
+ let query = make_query(tcx, k.clone());
+ jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
+ }
+ }
+ }
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ // We use try_lock here since we are called from the
+ // deadlock handler, and this shouldn't be locked.
+ // (FIXME: Is this relevant for non-parallel compilers? It doesn't
+ // really hurt much.)
+ for (k, v) in self.active.try_lock()?.iter() {
+ if let QueryResult::Started(ref job) = *v {
+ let query = make_query(tcx, k.clone());
+ jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
+ }
+ }
+ }
+
+ Some(())
+ }
+}
+
+impl<K> Default for QueryState<K> {
+ fn default() -> QueryState<K> {
+ QueryState { active: Default::default() }
+ }
+}
+
+/// A type representing the responsibility to execute the job in the `job` field.
+/// This will poison the relevant query if dropped.
+struct JobOwner<'tcx, K>
+where
+ K: Eq + Hash + Clone,
+{
+ state: &'tcx QueryState<K>,
+ key: K,
+ id: QueryJobId,
+}
+
+#[cold]
+#[inline(never)]
+fn mk_cycle<CTX, V, R>(
+ tcx: CTX,
+ error: CycleError,
+ handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
+ cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
+) -> R
+where
+ CTX: QueryContext,
+ V: std::fmt::Debug,
+ R: Clone,
+{
+ let error = report_cycle(tcx.dep_context().sess(), error);
+ let value = handle_cycle_error(tcx, error);
+ cache.store_nocache(value)
+}
+
+impl<'tcx, K> JobOwner<'tcx, K>
+where
+ K: Eq + Hash + Clone,
+{
+ /// Either gets a `JobOwner` corresponding the query, allowing us to
+ /// start executing the query, or returns with the result of the query.
+ /// This function assumes that `try_get_cached` is already called and returned `lookup`.
+ /// If the query is executing elsewhere, this will wait for it and return the result.
+ /// If the query panicked, this will silently panic.
+ ///
+ /// This function is inlined because that results in a noticeable speed-up
+ /// for some compile-time benchmarks.
+ #[inline(always)]
+ fn try_start<'b, CTX>(
+ tcx: &'b CTX,
+ state: &'b QueryState<K>,
+ span: Span,
+ key: K,
+ ) -> TryGetJob<'b, K>
+ where
+ CTX: QueryContext,
+ {
+ #[cfg(parallel_compiler)]
+ let mut state_lock = state.active.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut state_lock = state.active.lock();
+ let lock = &mut *state_lock;
+
+ match lock.entry(key) {
+ Entry::Vacant(entry) => {
+ let id = tcx.next_job_id();
+ let job = tcx.current_query_job();
+ let job = QueryJob::new(id, span, job);
+
+ let key = entry.key().clone();
+ entry.insert(QueryResult::Started(job));
+
+ let owner = JobOwner { state, id, key };
+ return TryGetJob::NotYetStarted(owner);
+ }
+ Entry::Occupied(mut entry) => {
+ match entry.get_mut() {
+ #[cfg(not(parallel_compiler))]
+ QueryResult::Started(job) => {
+ let id = job.id;
+ drop(state_lock);
+
+ // If we are single-threaded we know that we have cycle error,
+ // so we just return the error.
+ return TryGetJob::Cycle(id.find_cycle_in_stack(
+ tcx.try_collect_active_jobs().unwrap(),
+ &tcx.current_query_job(),
+ span,
+ ));
+ }
+ #[cfg(parallel_compiler)]
+ QueryResult::Started(job) => {
+ // For parallel queries, we'll block and wait until the query running
+ // in another thread has completed. Record how long we wait in the
+ // self-profiler.
+ let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
+
+ // Get the latch out
+ let latch = job.latch();
+
+ drop(state_lock);
+
+ // With parallel queries we might just have to wait on some other
+ // thread.
+ let result = latch.wait_on(tcx.current_query_job(), span);
+
+ match result {
+ Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
+ Err(cycle) => TryGetJob::Cycle(cycle),
+ }
+ }
+ QueryResult::Poisoned => FatalError.raise(),
+ }
+ }
+ }
+ }
+
+ /// Completes the query by updating the query cache with the `result`,
+ /// signals the waiter and forgets the JobOwner, so it won't poison the query
+ fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored
+ where
+ C: QueryCache<Key = K>,
+ {
+ // We can move out of `self` here because we `mem::forget` it below
+ let key = unsafe { ptr::read(&self.key) };
+ let state = self.state;
+
+ // Forget ourself so our destructor won't poison the query
+ mem::forget(self);
+
+ let (job, result) = {
+ let job = {
+ #[cfg(parallel_compiler)]
+ let mut lock = state.active.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut lock = state.active.lock();
+ match lock.remove(&key).unwrap() {
+ QueryResult::Started(job) => job,
+ QueryResult::Poisoned => panic!(),
+ }
+ };
+ let result = cache.complete(key, result, dep_node_index);
+ (job, result)
+ };
+
+ job.signal_complete();
+ result
+ }
+}
+
+impl<'tcx, K> Drop for JobOwner<'tcx, K>
+where
+ K: Eq + Hash + Clone,
+{
+ #[inline(never)]
+ #[cold]
+ fn drop(&mut self) {
+ // Poison the query so jobs waiting on it panic.
+ let state = self.state;
+ let job = {
+ #[cfg(parallel_compiler)]
+ let mut shard = state.active.get_shard_by_value(&self.key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut shard = state.active.lock();
+ let job = match shard.remove(&self.key).unwrap() {
+ QueryResult::Started(job) => job,
+ QueryResult::Poisoned => panic!(),
+ };
+ shard.insert(self.key.clone(), QueryResult::Poisoned);
+ job
+ };
+ // Also signal the completion of the job, so waiters
+ // will continue execution.
+ job.signal_complete();
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct CycleError {
+ /// The query and related span that uses the cycle.
+ pub usage: Option<(Span, QueryStackFrame)>,
+ pub cycle: Vec<QueryInfo>,
+}
+
+/// The result of `try_start`.
+enum TryGetJob<'tcx, K>
+where
+ K: Eq + Hash + Clone,
+{
+ /// The query is not yet started. Contains a guard to the cache eventually used to start it.
+ NotYetStarted(JobOwner<'tcx, K>),
+
+ /// The query was already completed.
+ /// Returns the result of the query and its dep-node index
+ /// if it succeeded or a cycle error if it failed.
+ #[cfg(parallel_compiler)]
+ JobCompleted(TimingGuard<'tcx>),
+
+ /// Trying to execute the query resulted in a cycle.
+ Cycle(CycleError),
+}
+
+/// Checks if the query is already computed and in the cache.
+/// It returns the shard index and a lock guard to the shard,
+/// which will be used if the query is not in the cache and we need
+/// to compute it.
+#[inline]
+pub fn try_get_cached<'a, CTX, C, R, OnHit>(
+ tcx: CTX,
+ cache: &'a C,
+ key: &C::Key,
+ // `on_hit` can be called while holding a lock to the query cache
+ on_hit: OnHit,
+) -> Result<R, ()>
+where
+ C: QueryCache,
+ CTX: DepContext,
+ OnHit: FnOnce(&C::Stored) -> R,
+{
+ cache.lookup(&key, |value, index| {
+ if std::intrinsics::unlikely(tcx.profiler().enabled()) {
+ tcx.profiler().query_cache_hit(index.into());
+ }
+ tcx.dep_graph().read_index(index);
+ on_hit(value)
+ })
+}
+
+fn try_execute_query<CTX, C>(
+ tcx: CTX,
+ state: &QueryState<C::Key>,
+ cache: &C,
+ span: Span,
+ key: C::Key,
+ dep_node: Option<DepNode<CTX::DepKind>>,
+ query: &QueryVTable<CTX, C::Key, C::Value>,
+) -> (C::Stored, Option<DepNodeIndex>)
+where
+ C: QueryCache,
+ C::Key: Clone + DepNodeParams<CTX::DepContext>,
+ CTX: QueryContext,
+{
+ match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) {
+ TryGetJob::NotYetStarted(job) => {
+ let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
+ let result = job.complete(cache, result, dep_node_index);
+ (result, Some(dep_node_index))
+ }
+ TryGetJob::Cycle(error) => {
+ let result = mk_cycle(tcx, error, query.handle_cycle_error, cache);
+ (result, None)
+ }
+ #[cfg(parallel_compiler)]
+ TryGetJob::JobCompleted(query_blocked_prof_timer) => {
+ let (v, index) = cache
+ .lookup(&key, |value, index| (value.clone(), index))
+ .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
+
+ if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
+ tcx.dep_context().profiler().query_cache_hit(index.into());
+ }
+ query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
+
+ (v, Some(index))
+ }
+ }
+}
+
+fn execute_job<CTX, K, V>(
+ tcx: CTX,
+ key: K,
+ mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
+ query: &QueryVTable<CTX, K, V>,
+ job_id: QueryJobId,
+) -> (V, DepNodeIndex)
+where
+ K: Clone + DepNodeParams<CTX::DepContext>,
+ V: Debug,
+ CTX: QueryContext,
+{
+ let dep_graph = tcx.dep_context().dep_graph();
+
+ // Fast path for when incr. comp. is off.
+ if !dep_graph.is_fully_enabled() {
+ let prof_timer = tcx.dep_context().profiler().query_provider();
+ let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
+ let dep_node_index = dep_graph.next_virtual_depnode_index();
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+ return (result, dep_node_index);
+ }
+
+ if !query.anon && !query.eval_always {
+ // `to_dep_node` is expensive for some `DepKind`s.
+ let dep_node =
+ dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
+
+ // The diagnostics for this query will be promoted to the current session during
+ // `try_mark_green()`, so we can ignore them here.
+ if let Some(ret) = tcx.start_query(job_id, None, || {
+ try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
+ }) {
+ return ret;
+ }
+ }
+
+ let prof_timer = tcx.dep_context().profiler().query_provider();
+ let diagnostics = Lock::new(ThinVec::new());
+
+ let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
+ if query.anon {
+ return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
+ query.compute(*tcx.dep_context(), key)
+ });
+ }
+
+ // `to_dep_node` is expensive for some `DepKind`s.
+ let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
+
+ dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
+ });
+
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ let diagnostics = diagnostics.into_inner();
+ let side_effects = QuerySideEffects { diagnostics };
+
+ if std::intrinsics::unlikely(!side_effects.is_empty()) {
+ if query.anon {
+ tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
+ } else {
+ tcx.store_side_effects(dep_node_index, side_effects);
+ }
+ }
+
+ (result, dep_node_index)
+}
+
+fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
+ tcx: CTX,
+ key: &K,
+ dep_node: &DepNode<CTX::DepKind>,
+ query: &QueryVTable<CTX, K, V>,
+) -> Option<(V, DepNodeIndex)>
+where
+ K: Clone,
+ CTX: QueryContext,
+ V: Debug,
+{
+ // Note this function can be called concurrently from the same query
+ // We must ensure that this is handled correctly.
+
+ let dep_graph = tcx.dep_context().dep_graph();
+ let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(tcx, &dep_node)?;
+
+ debug_assert!(dep_graph.is_green(dep_node));
+
+ // First we try to load the result from the on-disk cache.
+ // Some things are never cached on disk.
+ if query.cache_on_disk {
+ let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
+
+ // The call to `with_query_deserialization` enforces that no new `DepNodes`
+ // are created during deserialization. See the docs of that method for more
+ // details.
+ let result = dep_graph
+ .with_query_deserialization(|| query.try_load_from_disk(tcx, prev_dep_node_index));
+
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ if let Some(result) = result {
+ if std::intrinsics::unlikely(
+ tcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
+ ) {
+ dep_graph.mark_debug_loaded_from_disk(*dep_node)
+ }
+
+ let prev_fingerprint = tcx
+ .dep_context()
+ .dep_graph()
+ .prev_fingerprint_of(dep_node)
+ .unwrap_or(Fingerprint::ZERO);
+ // If `-Zincremental-verify-ich` is specified, re-hash results from
+ // the cache and make sure that they have the expected fingerprint.
+ //
+ // If not, we still seek to verify a subset of fingerprints loaded
+ // from disk. Re-hashing results is fairly expensive, so we can't
+ // currently afford to verify every hash. This subset should still
+ // give us some coverage of potential bugs though.
+ let try_verify = prev_fingerprint.as_value().1 % 32 == 0;
+ if std::intrinsics::unlikely(
+ try_verify || tcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
+ ) {
+ incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
+ }
+
+ return Some((result, dep_node_index));
+ }
+
+ // We always expect to find a cached result for things that
+ // can be forced from `DepNode`.
+ debug_assert!(
+ !tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
+ "missing on-disk cache entry for {:?}",
+ dep_node
+ );
+ }
+
+ // We could not load a result from the on-disk cache, so
+ // recompute.
+ let prof_timer = tcx.dep_context().profiler().query_provider();
+
+ // The dep-graph for this computation is already in-place.
+ let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone()));
+
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ // Verify that re-running the query produced a result with the expected hash
+ // This catches bugs in query implementations, turning them into ICEs.
+ // For example, a query might sort its result by `DefId` - since `DefId`s are
+ // not stable across compilation sessions, the result could get up getting sorted
+ // in a different order when the query is re-run, even though all of the inputs
+ // (e.g. `DefPathHash` values) were green.
+ //
+ // See issue #82920 for an example of a miscompilation that would get turned into
+ // an ICE by this check
+ incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
+
+ Some((result, dep_node_index))
+}
+
+fn incremental_verify_ich<CTX, K, V: Debug>(
+ tcx: CTX::DepContext,
+ result: &V,
+ dep_node: &DepNode<CTX::DepKind>,
+ query: &QueryVTable<CTX, K, V>,
+) where
+ CTX: QueryContext,
+{
+ assert!(
+ tcx.dep_graph().is_green(dep_node),
+ "fingerprint for green query instance not loaded from cache: {:?}",
+ dep_node,
+ );
+
+ debug!("BEGIN verify_ich({:?})", dep_node);
+ let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
+ tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
+ });
+ let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
+ debug!("END verify_ich({:?})", dep_node);
+
+ if Some(new_hash) != old_hash {
+ incremental_verify_ich_cold(tcx.sess(), DebugArg::from(&dep_node), DebugArg::from(&result));
+ }
+}
+
+// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
+// currently not exposed publicly.
+//
+// The PR which added this attempted to use `&dyn Debug` instead, but that
+// showed statistically significant worse compiler performance. It's not
+// actually clear what the cause there was -- the code should be cold. If this
+// can be replaced with `&dyn Debug` with on perf impact, then it probably
+// should be.
+extern "C" {
+ type Opaque;
+}
+
+struct DebugArg<'a> {
+ value: &'a Opaque,
+ fmt: fn(&Opaque, &mut std::fmt::Formatter<'_>) -> std::fmt::Result,
+}
+
+impl<'a, T> From<&'a T> for DebugArg<'a>
+where
+ T: std::fmt::Debug,
+{
+ fn from(value: &'a T) -> DebugArg<'a> {
+ DebugArg {
+ value: unsafe { std::mem::transmute(value) },
+ fmt: unsafe {
+ std::mem::transmute(<T as std::fmt::Debug>::fmt as fn(_, _) -> std::fmt::Result)
+ },
+ }
+ }
+}
+
+impl std::fmt::Debug for DebugArg<'_> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ (self.fmt)(self.value, f)
+ }
+}
+
+// Note that this is marked #[cold] and intentionally takes the equivalent of
+// `dyn Debug` for its arguments, as we want to avoid generating a bunch of
+// different implementations for LLVM to chew on (and filling up the final
+// binary, too).
+#[cold]
+fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
+ let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
+ format!("`cargo clean -p {}` or `cargo clean`", crate_name)
+ } else {
+ "`cargo clean`".to_string()
+ };
+
+ // When we emit an error message and panic, we try to debug-print the `DepNode`
+ // and query result. Unfortunately, this can cause us to run additional queries,
+ // which may result in another fingerprint mismatch while we're in the middle
+ // of processing this one. To avoid a double-panic (which kills the process
+ // before we can print out the query static), we print out a terse
+ // but 'safe' message if we detect a re-entrant call to this method.
+ thread_local! {
+ static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
+ };
+
+ let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
+
+ if old_in_panic {
+ sess.struct_err(
+ "internal compiler error: re-entrant incremental verify failure, suppressing message",
+ )
+ .emit();
+ } else {
+ sess.struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
+ .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
+ .note("Please follow the instructions below to create a bug report with the provided information")
+ .note("See <https://github.com/rust-lang/rust/issues/84970> for more information")
+ .emit();
+ panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
+ }
+
+ INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
+}
+
+/// Ensure that either this query has all green inputs or been executed.
+/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
+/// Returns true if the query should still run.
+///
+/// This function is particularly useful when executing passes for their
+/// side-effects -- e.g., in order to report errors for erroneous programs.
+///
+/// Note: The optimization is only available during incr. comp.
+#[inline(never)]
+fn ensure_must_run<CTX, K, V>(
+ tcx: CTX,
+ key: &K,
+ query: &QueryVTable<CTX, K, V>,
+) -> (bool, Option<DepNode<CTX::DepKind>>)
+where
+ K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+ CTX: QueryContext,
+{
+ if query.eval_always {
+ return (true, None);
+ }
+
+ // Ensuring an anonymous query makes no sense
+ assert!(!query.anon);
+
+ let dep_node = query.to_dep_node(*tcx.dep_context(), key);
+
+ let dep_graph = tcx.dep_context().dep_graph();
+ match dep_graph.try_mark_green(tcx, &dep_node) {
+ None => {
+ // A None return from `try_mark_green` means that this is either
+ // a new dep node or that the dep node has already been marked red.
+ // Either way, we can't call `dep_graph.read()` as we don't have the
+ // DepNodeIndex. We must invoke the query itself. The performance cost
+ // this introduces should be negligible as we'll immediately hit the
+ // in-memory cache, or another query down the line will.
+ (true, Some(dep_node))
+ }
+ Some((_, dep_node_index)) => {
+ dep_graph.read_index(dep_node_index);
+ tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
+ (false, None)
+ }
+ }
+}
+
+#[derive(Debug)]
+pub enum QueryMode {
+ Get,
+ Ensure,
+}
+
+pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
+where
+ Q: QueryDescription<CTX>,
+ Q::Key: DepNodeParams<CTX::DepContext>,
+ CTX: QueryContext,
+{
+ let query = Q::make_vtable(tcx, &key);
+ let dep_node = if let QueryMode::Ensure = mode {
+ let (must_run, dep_node) = ensure_must_run(tcx, &key, &query);
+ if !must_run {
+ return None;
+ }
+ dep_node
+ } else {
+ None
+ };
+
+ let (result, dep_node_index) = try_execute_query(
+ tcx,
+ Q::query_state(tcx),
+ Q::query_cache(tcx),
+ span,
+ key,
+ dep_node,
+ &query,
+ );
+ if let Some(dep_node_index) = dep_node_index {
+ tcx.dep_context().dep_graph().read_index(dep_node_index)
+ }
+ Some(result)
+}
+
+pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
+where
+ Q: QueryDescription<CTX>,
+ Q::Key: DepNodeParams<CTX::DepContext>,
+ CTX: QueryContext,
+{
+ // We may be concurrently trying both execute and force a query.
+ // Ensure that only one of them runs the query.
+ let cache = Q::query_cache(tcx);
+ let cached = cache.lookup(&key, |_, index| {
+ if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
+ tcx.dep_context().profiler().query_cache_hit(index.into());
+ }
+ });
+
+ match cached {
+ Ok(()) => return,
+ Err(()) => {}
+ }
+
+ let query = Q::make_vtable(tcx, &key);
+ let state = Q::query_state(tcx);
+ debug_assert!(!query.anon);
+
+ try_execute_query(tcx, state, cache, DUMMY_SP, key, Some(dep_node), &query);
+}
diff --git a/compiler/rustc_resolve/Cargo.toml b/compiler/rustc_resolve/Cargo.toml
new file mode 100644
index 000000000..5d2b606b4
--- /dev/null
+++ b/compiler/rustc_resolve/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "rustc_resolve"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+bitflags = "1.2.1"
+tracing = "0.1"
+rustc_ast = { path = "../rustc_ast" }
+rustc_arena = { path = "../rustc_arena" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_expand = { path = "../rustc_expand" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_metadata = { path = "../rustc_metadata" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_resolve/src/access_levels.rs b/compiler/rustc_resolve/src/access_levels.rs
new file mode 100644
index 000000000..3fba923d9
--- /dev/null
+++ b/compiler/rustc_resolve/src/access_levels.rs
@@ -0,0 +1,237 @@
+use rustc_ast::ast;
+use rustc_ast::visit;
+use rustc_ast::visit::Visitor;
+use rustc_ast::Crate;
+use rustc_ast::EnumDef;
+use rustc_ast::ForeignMod;
+use rustc_ast::NodeId;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::def_id::CRATE_DEF_ID;
+use rustc_middle::middle::privacy::AccessLevel;
+use rustc_middle::ty::Visibility;
+use rustc_span::sym;
+
+use crate::imports::ImportKind;
+use crate::BindingKey;
+use crate::NameBinding;
+use crate::NameBindingKind;
+use crate::Resolver;
+
+pub struct AccessLevelsVisitor<'r, 'a> {
+ r: &'r mut Resolver<'a>,
+ prev_level: Option<AccessLevel>,
+ changed: bool,
+}
+
+impl<'r, 'a> AccessLevelsVisitor<'r, 'a> {
+ /// Fills the `Resolver::access_levels` table with public & exported items
+ /// For now, this doesn't resolve macros (FIXME) and cannot resolve Impl, as we
+ /// need access to a TyCtxt for that.
+ pub fn compute_access_levels<'c>(r: &'r mut Resolver<'a>, krate: &'c Crate) {
+ let mut visitor =
+ AccessLevelsVisitor { r, changed: false, prev_level: Some(AccessLevel::Public) };
+
+ visitor.set_access_level_def_id(CRATE_DEF_ID, Some(AccessLevel::Public));
+ visitor.set_exports_access_level(CRATE_DEF_ID);
+
+ while visitor.changed {
+ visitor.reset();
+ visit::walk_crate(&mut visitor, krate);
+ }
+
+ tracing::info!("resolve::access_levels: {:#?}", r.access_levels);
+ }
+
+ fn reset(&mut self) {
+ self.changed = false;
+ self.prev_level = Some(AccessLevel::Public);
+ }
+
+ /// Update the access level of the exports of the given module accordingly. The module access
+ /// level has to be Exported or Public.
+ /// This will also follow `use` chains (see PrivacyVisitor::set_import_binding_access_level).
+ fn set_exports_access_level(&mut self, module_id: LocalDefId) {
+ assert!(self.r.module_map.contains_key(&&module_id.to_def_id()));
+
+ // Set the given binding access level to `AccessLevel::Public` and
+ // sets the rest of the `use` chain to `AccessLevel::Exported` until
+ // we hit the actual exported item.
+ let set_import_binding_access_level =
+ |this: &mut Self, mut binding: &NameBinding<'a>, mut access_level| {
+ while let NameBindingKind::Import { binding: nested_binding, import, .. } =
+ binding.kind
+ {
+ this.set_access_level(import.id, access_level);
+ if let ImportKind::Single { additional_ids, .. } = import.kind {
+ this.set_access_level(additional_ids.0, access_level);
+ this.set_access_level(additional_ids.1, access_level);
+ }
+
+ access_level = Some(AccessLevel::Exported);
+ binding = nested_binding;
+ }
+ };
+
+ let module_level = self.r.access_levels.map.get(&module_id).copied();
+ assert!(module_level >= Some(AccessLevel::Exported));
+
+ if let Some(exports) = self.r.reexport_map.get(&module_id) {
+ let pub_exports = exports
+ .iter()
+ .filter(|ex| ex.vis == Visibility::Public)
+ .cloned()
+ .collect::<Vec<_>>();
+
+ let module = self.r.get_module(module_id.to_def_id()).unwrap();
+ for export in pub_exports.into_iter() {
+ if let Some(export_def_id) = export.res.opt_def_id().and_then(|id| id.as_local()) {
+ self.set_access_level_def_id(export_def_id, Some(AccessLevel::Exported));
+ }
+
+ if let Some(ns) = export.res.ns() {
+ let key = BindingKey { ident: export.ident, ns, disambiguator: 0 };
+ let name_res = self.r.resolution(module, key);
+ if let Some(binding) = name_res.borrow().binding() {
+ set_import_binding_access_level(self, binding, module_level)
+ }
+ }
+ }
+ }
+ }
+
+ /// Sets the access level of the `LocalDefId` corresponding to the given `NodeId`.
+ /// This function will panic if the `NodeId` does not have a `LocalDefId`
+ fn set_access_level(
+ &mut self,
+ node_id: NodeId,
+ access_level: Option<AccessLevel>,
+ ) -> Option<AccessLevel> {
+ self.set_access_level_def_id(self.r.local_def_id(node_id), access_level)
+ }
+
+ fn set_access_level_def_id(
+ &mut self,
+ def_id: LocalDefId,
+ access_level: Option<AccessLevel>,
+ ) -> Option<AccessLevel> {
+ let old_level = self.r.access_levels.map.get(&def_id).copied();
+ if old_level < access_level {
+ self.r.access_levels.map.insert(def_id, access_level.unwrap());
+ self.changed = true;
+ access_level
+ } else {
+ old_level
+ }
+ }
+}
+
+impl<'r, 'ast> Visitor<'ast> for AccessLevelsVisitor<'ast, 'r> {
+ fn visit_item(&mut self, item: &'ast ast::Item) {
+ let inherited_item_level = match item.kind {
+ // Resolved in rustc_privacy when types are available
+ ast::ItemKind::Impl(..) => return,
+
+ // Only exported `macro_rules!` items are public, but they always are
+ ast::ItemKind::MacroDef(ref macro_def) if macro_def.macro_rules => {
+ let is_macro_export =
+ item.attrs.iter().any(|attr| attr.has_name(sym::macro_export));
+ if is_macro_export { Some(AccessLevel::Public) } else { None }
+ }
+
+ // Foreign modules inherit level from parents.
+ ast::ItemKind::ForeignMod(..) => self.prev_level,
+
+ // Other `pub` items inherit levels from parents.
+ ast::ItemKind::ExternCrate(..)
+ | ast::ItemKind::Use(..)
+ | ast::ItemKind::Static(..)
+ | ast::ItemKind::Const(..)
+ | ast::ItemKind::Fn(..)
+ | ast::ItemKind::Mod(..)
+ | ast::ItemKind::GlobalAsm(..)
+ | ast::ItemKind::TyAlias(..)
+ | ast::ItemKind::Enum(..)
+ | ast::ItemKind::Struct(..)
+ | ast::ItemKind::Union(..)
+ | ast::ItemKind::Trait(..)
+ | ast::ItemKind::TraitAlias(..)
+ | ast::ItemKind::MacroDef(..) => {
+ if item.vis.kind.is_pub() {
+ self.prev_level
+ } else {
+ None
+ }
+ }
+
+ // Should be unreachable at this stage
+ ast::ItemKind::MacCall(..) => panic!(
+ "ast::ItemKind::MacCall encountered, this should not anymore appear at this stage"
+ ),
+ };
+
+ let access_level = self.set_access_level(item.id, inherited_item_level);
+
+ // Set access level of nested items.
+ // If it's a mod, also make the visitor walk all of its items
+ match item.kind {
+ ast::ItemKind::Mod(..) => {
+ if access_level.is_some() {
+ self.set_exports_access_level(self.r.local_def_id(item.id));
+ }
+
+ let orig_level = std::mem::replace(&mut self.prev_level, access_level);
+ visit::walk_item(self, item);
+ self.prev_level = orig_level;
+ }
+
+ ast::ItemKind::ForeignMod(ForeignMod { ref items, .. }) => {
+ for nested in items {
+ if nested.vis.kind.is_pub() {
+ self.set_access_level(nested.id, access_level);
+ }
+ }
+ }
+ ast::ItemKind::Enum(EnumDef { ref variants }, _) => {
+ for variant in variants {
+ let variant_level = self.set_access_level(variant.id, access_level);
+ if let Some(ctor_id) = variant.data.ctor_id() {
+ self.set_access_level(ctor_id, access_level);
+ }
+
+ for field in variant.data.fields() {
+ self.set_access_level(field.id, variant_level);
+ }
+ }
+ }
+ ast::ItemKind::Struct(ref def, _) | ast::ItemKind::Union(ref def, _) => {
+ if let Some(ctor_id) = def.ctor_id() {
+ self.set_access_level(ctor_id, access_level);
+ }
+
+ for field in def.fields() {
+ if field.vis.kind.is_pub() {
+ self.set_access_level(field.id, access_level);
+ }
+ }
+ }
+ ast::ItemKind::Trait(ref trait_kind) => {
+ for nested in trait_kind.items.iter() {
+ self.set_access_level(nested.id, access_level);
+ }
+ }
+
+ ast::ItemKind::ExternCrate(..)
+ | ast::ItemKind::Use(..)
+ | ast::ItemKind::Static(..)
+ | ast::ItemKind::Const(..)
+ | ast::ItemKind::GlobalAsm(..)
+ | ast::ItemKind::TyAlias(..)
+ | ast::ItemKind::TraitAlias(..)
+ | ast::ItemKind::MacroDef(..)
+ | ast::ItemKind::Fn(..) => return,
+
+ // Unreachable kinds
+ ast::ItemKind::Impl(..) | ast::ItemKind::MacCall(..) => unreachable!(),
+ }
+ }
+}
diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs
new file mode 100644
index 000000000..e955a1798
--- /dev/null
+++ b/compiler/rustc_resolve/src/build_reduced_graph.rs
@@ -0,0 +1,1546 @@
+//! After we obtain a fresh AST fragment from a macro, code in this module helps to integrate
+//! that fragment into the module structures that are already partially built.
+//!
+//! Items from the fragment are placed into modules,
+//! unexpanded macros in the fragment are visited and registered.
+//! Imports are also considered items and placed into modules here, but not resolved yet.
+
+use crate::def_collector::collect_definitions;
+use crate::imports::{Import, ImportKind};
+use crate::macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef};
+use crate::Namespace::{self, MacroNS, TypeNS, ValueNS};
+use crate::{Determinacy, ExternPreludeEntry, Finalize, Module, ModuleKind, ModuleOrUniformRoot};
+use crate::{
+ MacroData, NameBinding, NameBindingKind, ParentScope, PathResult, PerNS, ResolutionError,
+};
+use crate::{Resolver, ResolverArenas, Segment, ToNameBinding, VisResolutionError};
+
+use rustc_ast::visit::{self, AssocCtxt, Visitor};
+use rustc_ast::{self as ast, AssocItem, AssocItemKind, MetaItemKind, StmtKind};
+use rustc_ast::{Block, Fn, ForeignItem, ForeignItemKind, Impl, Item, ItemKind, NodeId};
+use rustc_attr as attr;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{struct_span_err, Applicability};
+use rustc_expand::expand::AstFragment;
+use rustc_hir::def::{self, *};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_metadata::creader::LoadedMacro;
+use rustc_middle::bug;
+use rustc_middle::metadata::ModChild;
+use rustc_middle::ty::{self, DefIdTree};
+use rustc_session::cstore::CrateStore;
+use rustc_span::hygiene::{ExpnId, LocalExpnId, MacroKind};
+use rustc_span::source_map::{respan, Spanned};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::Span;
+
+use std::cell::Cell;
+use std::ptr;
+use tracing::debug;
+
+type Res = def::Res<NodeId>;
+
+impl<'a> ToNameBinding<'a> for (Module<'a>, ty::Visibility, Span, LocalExpnId) {
+ fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> {
+ arenas.alloc_name_binding(NameBinding {
+ kind: NameBindingKind::Module(self.0),
+ ambiguity: None,
+ vis: self.1,
+ span: self.2,
+ expansion: self.3,
+ })
+ }
+}
+
+impl<'a> ToNameBinding<'a> for (Res, ty::Visibility, Span, LocalExpnId) {
+ fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> {
+ arenas.alloc_name_binding(NameBinding {
+ kind: NameBindingKind::Res(self.0, false),
+ ambiguity: None,
+ vis: self.1,
+ span: self.2,
+ expansion: self.3,
+ })
+ }
+}
+
+struct IsMacroExport;
+
+impl<'a> ToNameBinding<'a> for (Res, ty::Visibility, Span, LocalExpnId, IsMacroExport) {
+ fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> {
+ arenas.alloc_name_binding(NameBinding {
+ kind: NameBindingKind::Res(self.0, true),
+ ambiguity: None,
+ vis: self.1,
+ span: self.2,
+ expansion: self.3,
+ })
+ }
+}
+
+impl<'a> Resolver<'a> {
+ /// Defines `name` in namespace `ns` of module `parent` to be `def` if it is not yet defined;
+ /// otherwise, reports an error.
+ pub(crate) fn define<T>(&mut self, parent: Module<'a>, ident: Ident, ns: Namespace, def: T)
+ where
+ T: ToNameBinding<'a>,
+ {
+ let binding = def.to_name_binding(self.arenas);
+ let key = self.new_key(ident, ns);
+ if let Err(old_binding) = self.try_define(parent, key, binding) {
+ self.report_conflict(parent, ident, ns, old_binding, &binding);
+ }
+ }
+
+ /// Walks up the tree of definitions starting at `def_id`,
+ /// stopping at the first encountered module.
+ /// Parent block modules for arbitrary def-ids are not recorded for the local crate,
+ /// and are not preserved in metadata for foreign crates, so block modules are never
+ /// returned by this function.
+ ///
+ /// For the local crate ignoring block modules may be incorrect, so use this method with care.
+ ///
+ /// For foreign crates block modules can be ignored without introducing observable differences,
+ /// moreover they has to be ignored right now because they are not kept in metadata.
+ /// Foreign parent modules are used for resolving names used by foreign macros with def-site
+ /// hygiene, therefore block module ignorability relies on macros with def-site hygiene and
+ /// block module parents being unreachable from other crates.
+ /// Reachable macros with block module parents exist due to `#[macro_export] macro_rules!`,
+ /// but they cannot use def-site hygiene, so the assumption holds
+ /// (<https://github.com/rust-lang/rust/pull/77984#issuecomment-712445508>).
+ pub fn get_nearest_non_block_module(&mut self, mut def_id: DefId) -> Module<'a> {
+ loop {
+ match self.get_module(def_id) {
+ Some(module) => return module,
+ None => def_id = self.parent(def_id),
+ }
+ }
+ }
+
+ pub fn expect_module(&mut self, def_id: DefId) -> Module<'a> {
+ self.get_module(def_id).expect("argument `DefId` is not a module")
+ }
+
+ /// If `def_id` refers to a module (in resolver's sense, i.e. a module item, crate root, enum,
+ /// or trait), then this function returns that module's resolver representation, otherwise it
+ /// returns `None`.
+ pub(crate) fn get_module(&mut self, def_id: DefId) -> Option<Module<'a>> {
+ if let module @ Some(..) = self.module_map.get(&def_id) {
+ return module.copied();
+ }
+
+ if !def_id.is_local() {
+ let def_kind = self.cstore().def_kind(def_id);
+ match def_kind {
+ DefKind::Mod | DefKind::Enum | DefKind::Trait => {
+ let def_key = self.cstore().def_key(def_id);
+ let parent = def_key.parent.map(|index| {
+ self.get_nearest_non_block_module(DefId { index, krate: def_id.krate })
+ });
+ let name = if let Some(cnum) = def_id.as_crate_root() {
+ self.cstore().crate_name(cnum)
+ } else {
+ def_key.disambiguated_data.data.get_opt_name().expect("module without name")
+ };
+
+ Some(self.new_module(
+ parent,
+ ModuleKind::Def(def_kind, def_id, name),
+ self.cstore().module_expansion_untracked(def_id, &self.session),
+ self.cstore().get_span_untracked(def_id, &self.session),
+ // FIXME: Account for `#[no_implicit_prelude]` attributes.
+ parent.map_or(false, |module| module.no_implicit_prelude),
+ ))
+ }
+ _ => None,
+ }
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn expn_def_scope(&mut self, expn_id: ExpnId) -> Module<'a> {
+ match expn_id.expn_data().macro_def_id {
+ Some(def_id) => self.macro_def_scope(def_id),
+ None => expn_id
+ .as_local()
+ .and_then(|expn_id| self.ast_transform_scopes.get(&expn_id))
+ .unwrap_or(&self.graph_root),
+ }
+ }
+
+ pub(crate) fn macro_def_scope(&mut self, def_id: DefId) -> Module<'a> {
+ if let Some(id) = def_id.as_local() {
+ self.local_macro_def_scopes[&id]
+ } else {
+ self.get_nearest_non_block_module(def_id)
+ }
+ }
+
+ pub(crate) fn get_macro(&mut self, res: Res) -> Option<MacroData> {
+ match res {
+ Res::Def(DefKind::Macro(..), def_id) => Some(self.get_macro_by_def_id(def_id)),
+ Res::NonMacroAttr(_) => {
+ Some(MacroData { ext: self.non_macro_attr.clone(), macro_rules: false })
+ }
+ _ => None,
+ }
+ }
+
+ pub(crate) fn get_macro_by_def_id(&mut self, def_id: DefId) -> MacroData {
+ if let Some(macro_data) = self.macro_map.get(&def_id) {
+ return macro_data.clone();
+ }
+
+ let (ext, macro_rules) = match self.cstore().load_macro_untracked(def_id, &self.session) {
+ LoadedMacro::MacroDef(item, edition) => (
+ Lrc::new(self.compile_macro(&item, edition).0),
+ matches!(item.kind, ItemKind::MacroDef(def) if def.macro_rules),
+ ),
+ LoadedMacro::ProcMacro(extz) => (Lrc::new(extz), false),
+ };
+
+ let macro_data = MacroData { ext, macro_rules };
+ self.macro_map.insert(def_id, macro_data.clone());
+ macro_data
+ }
+
+ pub(crate) fn build_reduced_graph(
+ &mut self,
+ fragment: &AstFragment,
+ parent_scope: ParentScope<'a>,
+ ) -> MacroRulesScopeRef<'a> {
+ collect_definitions(self, fragment, parent_scope.expansion);
+ let mut visitor = BuildReducedGraphVisitor { r: self, parent_scope };
+ fragment.visit_with(&mut visitor);
+ visitor.parent_scope.macro_rules
+ }
+
+ pub(crate) fn build_reduced_graph_external(&mut self, module: Module<'a>) {
+ for child in self.cstore().module_children_untracked(module.def_id(), self.session) {
+ let parent_scope = ParentScope::module(module, self);
+ BuildReducedGraphVisitor { r: self, parent_scope }
+ .build_reduced_graph_for_external_crate_res(child);
+ }
+ }
+}
+
+struct BuildReducedGraphVisitor<'a, 'b> {
+ r: &'b mut Resolver<'a>,
+ parent_scope: ParentScope<'a>,
+}
+
+impl<'a> AsMut<Resolver<'a>> for BuildReducedGraphVisitor<'a, '_> {
+ fn as_mut(&mut self) -> &mut Resolver<'a> {
+ self.r
+ }
+}
+
+impl<'a, 'b> BuildReducedGraphVisitor<'a, 'b> {
+ fn resolve_visibility(&mut self, vis: &ast::Visibility) -> ty::Visibility {
+ self.try_resolve_visibility(vis, true).unwrap_or_else(|err| {
+ self.r.report_vis_error(err);
+ ty::Visibility::Public
+ })
+ }
+
+ fn try_resolve_visibility<'ast>(
+ &mut self,
+ vis: &'ast ast::Visibility,
+ finalize: bool,
+ ) -> Result<ty::Visibility, VisResolutionError<'ast>> {
+ let parent_scope = &self.parent_scope;
+ match vis.kind {
+ ast::VisibilityKind::Public => Ok(ty::Visibility::Public),
+ ast::VisibilityKind::Inherited => {
+ Ok(match self.parent_scope.module.kind {
+ // Any inherited visibility resolved directly inside an enum or trait
+ // (i.e. variants, fields, and trait items) inherits from the visibility
+ // of the enum or trait.
+ ModuleKind::Def(DefKind::Enum | DefKind::Trait, def_id, _) => {
+ self.r.visibilities[&def_id.expect_local()]
+ }
+ // Otherwise, the visibility is restricted to the nearest parent `mod` item.
+ _ => ty::Visibility::Restricted(self.parent_scope.module.nearest_parent_mod()),
+ })
+ }
+ ast::VisibilityKind::Restricted { ref path, id, .. } => {
+ // Make `PRIVATE_IN_PUBLIC` lint a hard error.
+ self.r.has_pub_restricted = true;
+ // For visibilities we are not ready to provide correct implementation of "uniform
+ // paths" right now, so on 2018 edition we only allow module-relative paths for now.
+ // On 2015 edition visibilities are resolved as crate-relative by default,
+ // so we are prepending a root segment if necessary.
+ let ident = path.segments.get(0).expect("empty path in visibility").ident;
+ let crate_root = if ident.is_path_segment_keyword() {
+ None
+ } else if ident.span.rust_2015() {
+ Some(Segment::from_ident(Ident::new(
+ kw::PathRoot,
+ path.span.shrink_to_lo().with_ctxt(ident.span.ctxt()),
+ )))
+ } else {
+ return Err(VisResolutionError::Relative2018(ident.span, path));
+ };
+
+ let segments = crate_root
+ .into_iter()
+ .chain(path.segments.iter().map(|seg| seg.into()))
+ .collect::<Vec<_>>();
+ let expected_found_error = |res| {
+ Err(VisResolutionError::ExpectedFound(
+ path.span,
+ Segment::names_to_string(&segments),
+ res,
+ ))
+ };
+ match self.r.resolve_path(
+ &segments,
+ Some(TypeNS),
+ parent_scope,
+ finalize.then(|| Finalize::new(id, path.span)),
+ None,
+ ) {
+ PathResult::Module(ModuleOrUniformRoot::Module(module)) => {
+ let res = module.res().expect("visibility resolved to unnamed block");
+ if finalize {
+ self.r.record_partial_res(id, PartialRes::new(res));
+ }
+ if module.is_normal() {
+ if res == Res::Err {
+ Ok(ty::Visibility::Public)
+ } else {
+ let vis = ty::Visibility::Restricted(res.def_id());
+ if self.r.is_accessible_from(vis, parent_scope.module) {
+ Ok(vis)
+ } else {
+ Err(VisResolutionError::AncestorOnly(path.span))
+ }
+ }
+ } else {
+ expected_found_error(res)
+ }
+ }
+ PathResult::Module(..) => Err(VisResolutionError::ModuleOnly(path.span)),
+ PathResult::NonModule(partial_res) => {
+ expected_found_error(partial_res.base_res())
+ }
+ PathResult::Failed { span, label, suggestion, .. } => {
+ Err(VisResolutionError::FailedToResolve(span, label, suggestion))
+ }
+ PathResult::Indeterminate => Err(VisResolutionError::Indeterminate(path.span)),
+ }
+ }
+ }
+ }
+
+ fn insert_field_names_local(&mut self, def_id: DefId, vdata: &ast::VariantData) {
+ let field_names = vdata
+ .fields()
+ .iter()
+ .map(|field| respan(field.span, field.ident.map_or(kw::Empty, |ident| ident.name)))
+ .collect();
+ self.insert_field_names(def_id, field_names);
+ }
+
+ fn insert_field_names(&mut self, def_id: DefId, field_names: Vec<Spanned<Symbol>>) {
+ self.r.field_names.insert(def_id, field_names);
+ }
+
+ fn block_needs_anonymous_module(&mut self, block: &Block) -> bool {
+ // If any statements are items, we need to create an anonymous module
+ block
+ .stmts
+ .iter()
+ .any(|statement| matches!(statement.kind, StmtKind::Item(_) | StmtKind::MacCall(_)))
+ }
+
+ // Add an import to the current module.
+ fn add_import(
+ &mut self,
+ module_path: Vec<Segment>,
+ kind: ImportKind<'a>,
+ span: Span,
+ id: NodeId,
+ item: &ast::Item,
+ root_span: Span,
+ root_id: NodeId,
+ vis: ty::Visibility,
+ ) {
+ let current_module = self.parent_scope.module;
+ let import = self.r.arenas.alloc_import(Import {
+ kind,
+ parent_scope: self.parent_scope,
+ module_path,
+ imported_module: Cell::new(None),
+ span,
+ id,
+ use_span: item.span,
+ use_span_with_attributes: item.span_with_attributes(),
+ has_attributes: !item.attrs.is_empty(),
+ root_span,
+ root_id,
+ vis: Cell::new(vis),
+ used: Cell::new(false),
+ });
+
+ self.r.indeterminate_imports.push(import);
+ match import.kind {
+ // Don't add unresolved underscore imports to modules
+ ImportKind::Single { target: Ident { name: kw::Underscore, .. }, .. } => {}
+ ImportKind::Single { target, type_ns_only, .. } => {
+ self.r.per_ns(|this, ns| {
+ if !type_ns_only || ns == TypeNS {
+ let key = this.new_key(target, ns);
+ let mut resolution = this.resolution(current_module, key).borrow_mut();
+ resolution.add_single_import(import);
+ }
+ });
+ }
+ // We don't add prelude imports to the globs since they only affect lexical scopes,
+ // which are not relevant to import resolution.
+ ImportKind::Glob { is_prelude: true, .. } => {}
+ ImportKind::Glob { .. } => current_module.globs.borrow_mut().push(import),
+ _ => unreachable!(),
+ }
+ }
+
+ fn build_reduced_graph_for_use_tree(
+ &mut self,
+ // This particular use tree
+ use_tree: &ast::UseTree,
+ id: NodeId,
+ parent_prefix: &[Segment],
+ nested: bool,
+ // The whole `use` item
+ item: &Item,
+ vis: ty::Visibility,
+ root_span: Span,
+ ) {
+ debug!(
+ "build_reduced_graph_for_use_tree(parent_prefix={:?}, use_tree={:?}, nested={})",
+ parent_prefix, use_tree, nested
+ );
+
+ let mut prefix_iter = parent_prefix
+ .iter()
+ .cloned()
+ .chain(use_tree.prefix.segments.iter().map(|seg| seg.into()))
+ .peekable();
+
+ // On 2015 edition imports are resolved as crate-relative by default,
+ // so prefixes are prepended with crate root segment if necessary.
+ // The root is prepended lazily, when the first non-empty prefix or terminating glob
+ // appears, so imports in braced groups can have roots prepended independently.
+ let is_glob = matches!(use_tree.kind, ast::UseTreeKind::Glob);
+ let crate_root = match prefix_iter.peek() {
+ Some(seg) if !seg.ident.is_path_segment_keyword() && seg.ident.span.rust_2015() => {
+ Some(seg.ident.span.ctxt())
+ }
+ None if is_glob && use_tree.span.rust_2015() => Some(use_tree.span.ctxt()),
+ _ => None,
+ }
+ .map(|ctxt| {
+ Segment::from_ident(Ident::new(
+ kw::PathRoot,
+ use_tree.prefix.span.shrink_to_lo().with_ctxt(ctxt),
+ ))
+ });
+
+ let prefix = crate_root.into_iter().chain(prefix_iter).collect::<Vec<_>>();
+ debug!("build_reduced_graph_for_use_tree: prefix={:?}", prefix);
+
+ let empty_for_self = |prefix: &[Segment]| {
+ prefix.is_empty() || prefix.len() == 1 && prefix[0].ident.name == kw::PathRoot
+ };
+ match use_tree.kind {
+ ast::UseTreeKind::Simple(rename, id1, id2) => {
+ let mut ident = use_tree.ident();
+ let mut module_path = prefix;
+ let mut source = module_path.pop().unwrap();
+ let mut type_ns_only = false;
+
+ self.r.visibilities.insert(self.r.local_def_id(id), vis);
+ if id1 != ast::DUMMY_NODE_ID {
+ self.r.visibilities.insert(self.r.local_def_id(id1), vis);
+ }
+ if id2 != ast::DUMMY_NODE_ID {
+ self.r.visibilities.insert(self.r.local_def_id(id2), vis);
+ }
+
+ if nested {
+ // Correctly handle `self`
+ if source.ident.name == kw::SelfLower {
+ type_ns_only = true;
+
+ if empty_for_self(&module_path) {
+ self.r.report_error(
+ use_tree.span,
+ ResolutionError::SelfImportOnlyInImportListWithNonEmptyPrefix,
+ );
+ return;
+ }
+
+ // Replace `use foo::{ self };` with `use foo;`
+ source = module_path.pop().unwrap();
+ if rename.is_none() {
+ ident = source.ident;
+ }
+ }
+ } else {
+ // Disallow `self`
+ if source.ident.name == kw::SelfLower {
+ let parent = module_path.last();
+
+ let span = match parent {
+ // only `::self` from `use foo::self as bar`
+ Some(seg) => seg.ident.span.shrink_to_hi().to(source.ident.span),
+ None => source.ident.span,
+ };
+ let span_with_rename = match rename {
+ // only `self as bar` from `use foo::self as bar`
+ Some(rename) => source.ident.span.to(rename.span),
+ None => source.ident.span,
+ };
+ self.r.report_error(
+ span,
+ ResolutionError::SelfImportsOnlyAllowedWithin {
+ root: parent.is_none(),
+ span_with_rename,
+ },
+ );
+
+ // Error recovery: replace `use foo::self;` with `use foo;`
+ if let Some(parent) = module_path.pop() {
+ source = parent;
+ if rename.is_none() {
+ ident = source.ident;
+ }
+ }
+ }
+
+ // Disallow `use $crate;`
+ if source.ident.name == kw::DollarCrate && module_path.is_empty() {
+ let crate_root = self.r.resolve_crate_root(source.ident);
+ let crate_name = match crate_root.kind {
+ ModuleKind::Def(.., name) => name,
+ ModuleKind::Block => unreachable!(),
+ };
+ // HACK(eddyb) unclear how good this is, but keeping `$crate`
+ // in `source` breaks `src/test/ui/imports/import-crate-var.rs`,
+ // while the current crate doesn't have a valid `crate_name`.
+ if crate_name != kw::Empty {
+ // `crate_name` should not be interpreted as relative.
+ module_path.push(Segment::from_ident_and_id(
+ Ident { name: kw::PathRoot, span: source.ident.span },
+ self.r.next_node_id(),
+ ));
+ source.ident.name = crate_name;
+ }
+ if rename.is_none() {
+ ident.name = crate_name;
+ }
+
+ self.r
+ .session
+ .struct_span_err(item.span, "`$crate` may not be imported")
+ .emit();
+ }
+ }
+
+ if ident.name == kw::Crate {
+ self.r.session.span_err(
+ ident.span,
+ "crate root imports need to be explicitly named: \
+ `use crate as name;`",
+ );
+ }
+
+ let kind = ImportKind::Single {
+ source: source.ident,
+ target: ident,
+ source_bindings: PerNS {
+ type_ns: Cell::new(Err(Determinacy::Undetermined)),
+ value_ns: Cell::new(Err(Determinacy::Undetermined)),
+ macro_ns: Cell::new(Err(Determinacy::Undetermined)),
+ },
+ target_bindings: PerNS {
+ type_ns: Cell::new(None),
+ value_ns: Cell::new(None),
+ macro_ns: Cell::new(None),
+ },
+ type_ns_only,
+ nested,
+ additional_ids: (id1, id2),
+ };
+
+ self.add_import(
+ module_path,
+ kind,
+ use_tree.span,
+ id,
+ item,
+ root_span,
+ item.id,
+ vis,
+ );
+ }
+ ast::UseTreeKind::Glob => {
+ let kind = ImportKind::Glob {
+ is_prelude: self.r.session.contains_name(&item.attrs, sym::prelude_import),
+ max_vis: Cell::new(ty::Visibility::Invisible),
+ };
+ self.r.visibilities.insert(self.r.local_def_id(id), vis);
+ self.add_import(prefix, kind, use_tree.span, id, item, root_span, item.id, vis);
+ }
+ ast::UseTreeKind::Nested(ref items) => {
+ // Ensure there is at most one `self` in the list
+ let self_spans = items
+ .iter()
+ .filter_map(|&(ref use_tree, _)| {
+ if let ast::UseTreeKind::Simple(..) = use_tree.kind {
+ if use_tree.ident().name == kw::SelfLower {
+ return Some(use_tree.span);
+ }
+ }
+
+ None
+ })
+ .collect::<Vec<_>>();
+ if self_spans.len() > 1 {
+ let mut e = self.r.into_struct_error(
+ self_spans[0],
+ ResolutionError::SelfImportCanOnlyAppearOnceInTheList,
+ );
+
+ for other_span in self_spans.iter().skip(1) {
+ e.span_label(*other_span, "another `self` import appears here");
+ }
+
+ e.emit();
+ }
+
+ for &(ref tree, id) in items {
+ self.build_reduced_graph_for_use_tree(
+ // This particular use tree
+ tree, id, &prefix, true, // The whole `use` item
+ item, vis, root_span,
+ );
+ }
+
+ // Empty groups `a::b::{}` are turned into synthetic `self` imports
+ // `a::b::c::{self as _}`, so that their prefixes are correctly
+ // resolved and checked for privacy/stability/etc.
+ if items.is_empty() && !empty_for_self(&prefix) {
+ let new_span = prefix[prefix.len() - 1].ident.span;
+ let tree = ast::UseTree {
+ prefix: ast::Path::from_ident(Ident::new(kw::SelfLower, new_span)),
+ kind: ast::UseTreeKind::Simple(
+ Some(Ident::new(kw::Underscore, new_span)),
+ ast::DUMMY_NODE_ID,
+ ast::DUMMY_NODE_ID,
+ ),
+ span: use_tree.span,
+ };
+ self.build_reduced_graph_for_use_tree(
+ // This particular use tree
+ &tree,
+ id,
+ &prefix,
+ true,
+ // The whole `use` item
+ item,
+ ty::Visibility::Invisible,
+ root_span,
+ );
+ }
+ }
+ }
+ }
+
+ /// Constructs the reduced graph for one item.
+ fn build_reduced_graph_for_item(&mut self, item: &'b Item) {
+ let parent_scope = &self.parent_scope;
+ let parent = parent_scope.module;
+ let expansion = parent_scope.expansion;
+ let ident = item.ident;
+ let sp = item.span;
+ let vis = self.resolve_visibility(&item.vis);
+ let local_def_id = self.r.local_def_id(item.id);
+ let def_id = local_def_id.to_def_id();
+
+ self.r.visibilities.insert(local_def_id, vis);
+
+ match item.kind {
+ ItemKind::Use(ref use_tree) => {
+ self.build_reduced_graph_for_use_tree(
+ // This particular use tree
+ use_tree,
+ item.id,
+ &[],
+ false,
+ // The whole `use` item
+ item,
+ vis,
+ use_tree.span,
+ );
+ }
+
+ ItemKind::ExternCrate(orig_name) => {
+ self.build_reduced_graph_for_extern_crate(
+ orig_name,
+ item,
+ local_def_id,
+ vis,
+ parent,
+ );
+ }
+
+ ItemKind::Mod(..) => {
+ let module = self.r.new_module(
+ Some(parent),
+ ModuleKind::Def(DefKind::Mod, def_id, ident.name),
+ expansion.to_expn_id(),
+ item.span,
+ parent.no_implicit_prelude
+ || self.r.session.contains_name(&item.attrs, sym::no_implicit_prelude),
+ );
+ self.r.define(parent, ident, TypeNS, (module, vis, sp, expansion));
+
+ // Descend into the module.
+ self.parent_scope.module = module;
+ }
+
+ // These items live in the value namespace.
+ ItemKind::Static(_, mt, _) => {
+ let res = Res::Def(DefKind::Static(mt), def_id);
+ self.r.define(parent, ident, ValueNS, (res, vis, sp, expansion));
+ }
+ ItemKind::Const(..) => {
+ let res = Res::Def(DefKind::Const, def_id);
+ self.r.define(parent, ident, ValueNS, (res, vis, sp, expansion));
+ }
+ ItemKind::Fn(..) => {
+ let res = Res::Def(DefKind::Fn, def_id);
+ self.r.define(parent, ident, ValueNS, (res, vis, sp, expansion));
+
+ // Functions introducing procedural macros reserve a slot
+ // in the macro namespace as well (see #52225).
+ self.define_macro(item);
+ }
+
+ // These items live in the type namespace.
+ ItemKind::TyAlias(..) => {
+ let res = Res::Def(DefKind::TyAlias, def_id);
+ self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
+ }
+
+ ItemKind::Enum(_, _) => {
+ let module = self.r.new_module(
+ Some(parent),
+ ModuleKind::Def(DefKind::Enum, def_id, ident.name),
+ expansion.to_expn_id(),
+ item.span,
+ parent.no_implicit_prelude,
+ );
+ self.r.define(parent, ident, TypeNS, (module, vis, sp, expansion));
+ self.parent_scope.module = module;
+ }
+
+ ItemKind::TraitAlias(..) => {
+ let res = Res::Def(DefKind::TraitAlias, def_id);
+ self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
+ }
+
+ // These items live in both the type and value namespaces.
+ ItemKind::Struct(ref vdata, _) => {
+ // Define a name in the type namespace.
+ let res = Res::Def(DefKind::Struct, def_id);
+ self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
+
+ // Record field names for error reporting.
+ self.insert_field_names_local(def_id, vdata);
+
+ // If this is a tuple or unit struct, define a name
+ // in the value namespace as well.
+ if let Some(ctor_node_id) = vdata.ctor_id() {
+ // If the structure is marked as non_exhaustive then lower the visibility
+ // to within the crate.
+ let mut ctor_vis = if vis == ty::Visibility::Public
+ && self.r.session.contains_name(&item.attrs, sym::non_exhaustive)
+ {
+ ty::Visibility::Restricted(CRATE_DEF_ID.to_def_id())
+ } else {
+ vis
+ };
+
+ let mut ret_fields = Vec::with_capacity(vdata.fields().len());
+
+ for field in vdata.fields() {
+ // NOTE: The field may be an expansion placeholder, but expansion sets
+ // correct visibilities for unnamed field placeholders specifically, so the
+ // constructor visibility should still be determined correctly.
+ let field_vis = self
+ .try_resolve_visibility(&field.vis, false)
+ .unwrap_or(ty::Visibility::Public);
+ if ctor_vis.is_at_least(field_vis, &*self.r) {
+ ctor_vis = field_vis;
+ }
+ ret_fields.push(field_vis);
+ }
+ let ctor_def_id = self.r.local_def_id(ctor_node_id);
+ let ctor_res = Res::Def(
+ DefKind::Ctor(CtorOf::Struct, CtorKind::from_ast(vdata)),
+ ctor_def_id.to_def_id(),
+ );
+ self.r.define(parent, ident, ValueNS, (ctor_res, ctor_vis, sp, expansion));
+ self.r.visibilities.insert(ctor_def_id, ctor_vis);
+
+ self.r.struct_constructors.insert(def_id, (ctor_res, ctor_vis, ret_fields));
+ }
+ }
+
+ ItemKind::Union(ref vdata, _) => {
+ let res = Res::Def(DefKind::Union, def_id);
+ self.r.define(parent, ident, TypeNS, (res, vis, sp, expansion));
+
+ // Record field names for error reporting.
+ self.insert_field_names_local(def_id, vdata);
+ }
+
+ ItemKind::Trait(..) => {
+ // Add all the items within to a new module.
+ let module = self.r.new_module(
+ Some(parent),
+ ModuleKind::Def(DefKind::Trait, def_id, ident.name),
+ expansion.to_expn_id(),
+ item.span,
+ parent.no_implicit_prelude,
+ );
+ self.r.define(parent, ident, TypeNS, (module, vis, sp, expansion));
+ self.parent_scope.module = module;
+ }
+
+ // These items do not add names to modules.
+ ItemKind::Impl(box Impl { of_trait: Some(..), .. }) => {
+ self.r.trait_impl_items.insert(local_def_id);
+ }
+ ItemKind::Impl { .. } | ItemKind::ForeignMod(..) | ItemKind::GlobalAsm(..) => {}
+
+ ItemKind::MacroDef(..) | ItemKind::MacCall(_) => unreachable!(),
+ }
+ }
+
+ fn build_reduced_graph_for_extern_crate(
+ &mut self,
+ orig_name: Option<Symbol>,
+ item: &Item,
+ local_def_id: LocalDefId,
+ vis: ty::Visibility,
+ parent: Module<'a>,
+ ) {
+ let ident = item.ident;
+ let sp = item.span;
+ let parent_scope = self.parent_scope;
+ let expansion = parent_scope.expansion;
+
+ let (used, module, binding) = if orig_name.is_none() && ident.name == kw::SelfLower {
+ self.r
+ .session
+ .struct_span_err(item.span, "`extern crate self;` requires renaming")
+ .span_suggestion(
+ item.span,
+ "rename the `self` crate to be able to import it",
+ "extern crate self as name;",
+ Applicability::HasPlaceholders,
+ )
+ .emit();
+ return;
+ } else if orig_name == Some(kw::SelfLower) {
+ Some(self.r.graph_root)
+ } else {
+ self.r.crate_loader.process_extern_crate(item, &self.r.definitions, local_def_id).map(
+ |crate_id| {
+ self.r.extern_crate_map.insert(local_def_id, crate_id);
+ self.r.expect_module(crate_id.as_def_id())
+ },
+ )
+ }
+ .map(|module| {
+ let used = self.process_macro_use_imports(item, module);
+ let binding =
+ (module, ty::Visibility::Public, sp, expansion).to_name_binding(self.r.arenas);
+ (used, Some(ModuleOrUniformRoot::Module(module)), binding)
+ })
+ .unwrap_or((true, None, self.r.dummy_binding));
+ let import = self.r.arenas.alloc_import(Import {
+ kind: ImportKind::ExternCrate { source: orig_name, target: ident },
+ root_id: item.id,
+ id: item.id,
+ parent_scope: self.parent_scope,
+ imported_module: Cell::new(module),
+ has_attributes: !item.attrs.is_empty(),
+ use_span_with_attributes: item.span_with_attributes(),
+ use_span: item.span,
+ root_span: item.span,
+ span: item.span,
+ module_path: Vec::new(),
+ vis: Cell::new(vis),
+ used: Cell::new(used),
+ });
+ self.r.potentially_unused_imports.push(import);
+ let imported_binding = self.r.import(binding, import);
+ if ptr::eq(parent, self.r.graph_root) {
+ if let Some(entry) = self.r.extern_prelude.get(&ident.normalize_to_macros_2_0()) {
+ if expansion != LocalExpnId::ROOT
+ && orig_name.is_some()
+ && entry.extern_crate_item.is_none()
+ {
+ let msg = "macro-expanded `extern crate` items cannot \
+ shadow names passed with `--extern`";
+ self.r.session.span_err(item.span, msg);
+ }
+ }
+ let entry = self.r.extern_prelude.entry(ident.normalize_to_macros_2_0()).or_insert(
+ ExternPreludeEntry { extern_crate_item: None, introduced_by_item: true },
+ );
+ entry.extern_crate_item = Some(imported_binding);
+ if orig_name.is_some() {
+ entry.introduced_by_item = true;
+ }
+ }
+ self.r.define(parent, ident, TypeNS, imported_binding);
+ }
+
+ /// Constructs the reduced graph for one foreign item.
+ fn build_reduced_graph_for_foreign_item(&mut self, item: &ForeignItem) {
+ let local_def_id = self.r.local_def_id(item.id);
+ let def_id = local_def_id.to_def_id();
+ let (def_kind, ns) = match item.kind {
+ ForeignItemKind::Fn(..) => (DefKind::Fn, ValueNS),
+ ForeignItemKind::Static(_, mt, _) => (DefKind::Static(mt), ValueNS),
+ ForeignItemKind::TyAlias(..) => (DefKind::ForeignTy, TypeNS),
+ ForeignItemKind::MacCall(_) => unreachable!(),
+ };
+ let parent = self.parent_scope.module;
+ let expansion = self.parent_scope.expansion;
+ let vis = self.resolve_visibility(&item.vis);
+ let res = Res::Def(def_kind, def_id);
+ self.r.define(parent, item.ident, ns, (res, vis, item.span, expansion));
+ self.r.visibilities.insert(local_def_id, vis);
+ }
+
+ fn build_reduced_graph_for_block(&mut self, block: &Block) {
+ let parent = self.parent_scope.module;
+ let expansion = self.parent_scope.expansion;
+ if self.block_needs_anonymous_module(block) {
+ let module = self.r.new_module(
+ Some(parent),
+ ModuleKind::Block,
+ expansion.to_expn_id(),
+ block.span,
+ parent.no_implicit_prelude,
+ );
+ self.r.block_map.insert(block.id, module);
+ self.parent_scope.module = module; // Descend into the block.
+ }
+ }
+
+ /// Builds the reduced graph for a single item in an external crate.
+ fn build_reduced_graph_for_external_crate_res(&mut self, child: ModChild) {
+ let parent = self.parent_scope.module;
+ let ModChild { ident, res, vis, span, macro_rules } = child;
+ let res = res.expect_non_local();
+ let expansion = self.parent_scope.expansion;
+ // Record primary definitions.
+ match res {
+ Res::Def(DefKind::Mod | DefKind::Enum | DefKind::Trait, def_id) => {
+ let module = self.r.expect_module(def_id);
+ self.r.define(parent, ident, TypeNS, (module, vis, span, expansion));
+ }
+ Res::Def(
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Variant
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::OpaqueTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy,
+ _,
+ )
+ | Res::PrimTy(..)
+ | Res::ToolMod => self.r.define(parent, ident, TypeNS, (res, vis, span, expansion)),
+ Res::Def(
+ DefKind::Fn
+ | DefKind::AssocFn
+ | DefKind::Static(_)
+ | DefKind::Const
+ | DefKind::AssocConst
+ | DefKind::Ctor(..),
+ _,
+ ) => self.r.define(parent, ident, ValueNS, (res, vis, span, expansion)),
+ Res::Def(DefKind::Macro(..), _) | Res::NonMacroAttr(..) => {
+ if !macro_rules {
+ self.r.define(parent, ident, MacroNS, (res, vis, span, expansion))
+ }
+ }
+ Res::Def(
+ DefKind::TyParam
+ | DefKind::ConstParam
+ | DefKind::ExternCrate
+ | DefKind::Use
+ | DefKind::ForeignMod
+ | DefKind::AnonConst
+ | DefKind::InlineConst
+ | DefKind::Field
+ | DefKind::LifetimeParam
+ | DefKind::GlobalAsm
+ | DefKind::Closure
+ | DefKind::Impl
+ | DefKind::Generator,
+ _,
+ )
+ | Res::Local(..)
+ | Res::SelfTy { .. }
+ | Res::SelfCtor(..)
+ | Res::Err => bug!("unexpected resolution: {:?}", res),
+ }
+ // Record some extra data for better diagnostics.
+ let cstore = self.r.cstore();
+ match res {
+ Res::Def(DefKind::Struct, def_id) => {
+ let field_names =
+ cstore.struct_field_names_untracked(def_id, self.r.session).collect();
+ let ctor = cstore.ctor_def_id_and_kind_untracked(def_id);
+ if let Some((ctor_def_id, ctor_kind)) = ctor {
+ let ctor_res = Res::Def(DefKind::Ctor(CtorOf::Struct, ctor_kind), ctor_def_id);
+ let ctor_vis = cstore.visibility_untracked(ctor_def_id);
+ let field_visibilities =
+ cstore.struct_field_visibilities_untracked(def_id).collect();
+ self.r
+ .struct_constructors
+ .insert(def_id, (ctor_res, ctor_vis, field_visibilities));
+ }
+ self.insert_field_names(def_id, field_names);
+ }
+ Res::Def(DefKind::Union, def_id) => {
+ let field_names =
+ cstore.struct_field_names_untracked(def_id, self.r.session).collect();
+ self.insert_field_names(def_id, field_names);
+ }
+ Res::Def(DefKind::AssocFn, def_id) => {
+ if cstore.fn_has_self_parameter_untracked(def_id) {
+ self.r.has_self.insert(def_id);
+ }
+ }
+ _ => {}
+ }
+ }
+
+ fn add_macro_use_binding(
+ &mut self,
+ name: Symbol,
+ binding: &'a NameBinding<'a>,
+ span: Span,
+ allow_shadowing: bool,
+ ) {
+ if self.r.macro_use_prelude.insert(name, binding).is_some() && !allow_shadowing {
+ let msg = format!("`{}` is already in scope", name);
+ let note =
+ "macro-expanded `#[macro_use]`s may not shadow existing macros (see RFC 1560)";
+ self.r.session.struct_span_err(span, &msg).note(note).emit();
+ }
+ }
+
+ /// Returns `true` if we should consider the underlying `extern crate` to be used.
+ fn process_macro_use_imports(&mut self, item: &Item, module: Module<'a>) -> bool {
+ let mut import_all = None;
+ let mut single_imports = Vec::new();
+ for attr in &item.attrs {
+ if attr.has_name(sym::macro_use) {
+ if self.parent_scope.module.parent.is_some() {
+ struct_span_err!(
+ self.r.session,
+ item.span,
+ E0468,
+ "an `extern crate` loading macros must be at the crate root"
+ )
+ .emit();
+ }
+ if let ItemKind::ExternCrate(Some(orig_name)) = item.kind {
+ if orig_name == kw::SelfLower {
+ self.r
+ .session
+ .struct_span_err(
+ attr.span,
+ "`#[macro_use]` is not supported on `extern crate self`",
+ )
+ .emit();
+ }
+ }
+ let ill_formed = |span| {
+ struct_span_err!(self.r.session, span, E0466, "bad macro import").emit();
+ };
+ match attr.meta() {
+ Some(meta) => match meta.kind {
+ MetaItemKind::Word => {
+ import_all = Some(meta.span);
+ break;
+ }
+ MetaItemKind::List(nested_metas) => {
+ for nested_meta in nested_metas {
+ match nested_meta.ident() {
+ Some(ident) if nested_meta.is_word() => {
+ single_imports.push(ident)
+ }
+ _ => ill_formed(nested_meta.span()),
+ }
+ }
+ }
+ MetaItemKind::NameValue(..) => ill_formed(meta.span),
+ },
+ None => ill_formed(attr.span),
+ }
+ }
+ }
+
+ let macro_use_import = |this: &Self, span| {
+ this.r.arenas.alloc_import(Import {
+ kind: ImportKind::MacroUse,
+ root_id: item.id,
+ id: item.id,
+ parent_scope: this.parent_scope,
+ imported_module: Cell::new(Some(ModuleOrUniformRoot::Module(module))),
+ use_span_with_attributes: item.span_with_attributes(),
+ has_attributes: !item.attrs.is_empty(),
+ use_span: item.span,
+ root_span: span,
+ span,
+ module_path: Vec::new(),
+ vis: Cell::new(ty::Visibility::Restricted(CRATE_DEF_ID.to_def_id())),
+ used: Cell::new(false),
+ })
+ };
+
+ let allow_shadowing = self.parent_scope.expansion == LocalExpnId::ROOT;
+ if let Some(span) = import_all {
+ let import = macro_use_import(self, span);
+ self.r.potentially_unused_imports.push(import);
+ module.for_each_child(self, |this, ident, ns, binding| {
+ if ns == MacroNS {
+ let imported_binding = this.r.import(binding, import);
+ this.add_macro_use_binding(ident.name, imported_binding, span, allow_shadowing);
+ }
+ });
+ } else {
+ for ident in single_imports.iter().cloned() {
+ let result = self.r.maybe_resolve_ident_in_module(
+ ModuleOrUniformRoot::Module(module),
+ ident,
+ MacroNS,
+ &self.parent_scope,
+ );
+ if let Ok(binding) = result {
+ let import = macro_use_import(self, ident.span);
+ self.r.potentially_unused_imports.push(import);
+ let imported_binding = self.r.import(binding, import);
+ self.add_macro_use_binding(
+ ident.name,
+ imported_binding,
+ ident.span,
+ allow_shadowing,
+ );
+ } else {
+ struct_span_err!(self.r.session, ident.span, E0469, "imported macro not found")
+ .emit();
+ }
+ }
+ }
+ import_all.is_some() || !single_imports.is_empty()
+ }
+
+ /// Returns `true` if this attribute list contains `macro_use`.
+ fn contains_macro_use(&mut self, attrs: &[ast::Attribute]) -> bool {
+ for attr in attrs {
+ if attr.has_name(sym::macro_escape) {
+ let msg = "`#[macro_escape]` is a deprecated synonym for `#[macro_use]`";
+ let mut err = self.r.session.struct_span_warn(attr.span, msg);
+ if let ast::AttrStyle::Inner = attr.style {
+ err.help("try an outer attribute: `#[macro_use]`").emit();
+ } else {
+ err.emit();
+ }
+ } else if !attr.has_name(sym::macro_use) {
+ continue;
+ }
+
+ if !attr.is_word() {
+ self.r.session.span_err(attr.span, "arguments to `macro_use` are not allowed here");
+ }
+ return true;
+ }
+
+ false
+ }
+
+ fn visit_invoc(&mut self, id: NodeId) -> LocalExpnId {
+ let invoc_id = id.placeholder_to_expn_id();
+ let old_parent_scope = self.r.invocation_parent_scopes.insert(invoc_id, self.parent_scope);
+ assert!(old_parent_scope.is_none(), "invocation data is reset for an invocation");
+ invoc_id
+ }
+
+ /// Visit invocation in context in which it can emit a named item (possibly `macro_rules`)
+ /// directly into its parent scope's module.
+ fn visit_invoc_in_module(&mut self, id: NodeId) -> MacroRulesScopeRef<'a> {
+ let invoc_id = self.visit_invoc(id);
+ self.parent_scope.module.unexpanded_invocations.borrow_mut().insert(invoc_id);
+ self.r.arenas.alloc_macro_rules_scope(MacroRulesScope::Invocation(invoc_id))
+ }
+
+ fn proc_macro_stub(&self, item: &ast::Item) -> Option<(MacroKind, Ident, Span)> {
+ if self.r.session.contains_name(&item.attrs, sym::proc_macro) {
+ return Some((MacroKind::Bang, item.ident, item.span));
+ } else if self.r.session.contains_name(&item.attrs, sym::proc_macro_attribute) {
+ return Some((MacroKind::Attr, item.ident, item.span));
+ } else if let Some(attr) = self.r.session.find_by_name(&item.attrs, sym::proc_macro_derive)
+ {
+ if let Some(nested_meta) = attr.meta_item_list().and_then(|list| list.get(0).cloned()) {
+ if let Some(ident) = nested_meta.ident() {
+ return Some((MacroKind::Derive, ident, ident.span));
+ }
+ }
+ }
+ None
+ }
+
+ // Mark the given macro as unused unless its name starts with `_`.
+ // Macro uses will remove items from this set, and the remaining
+ // items will be reported as `unused_macros`.
+ fn insert_unused_macro(
+ &mut self,
+ ident: Ident,
+ def_id: LocalDefId,
+ node_id: NodeId,
+ rule_spans: &[(usize, Span)],
+ ) {
+ if !ident.as_str().starts_with('_') {
+ self.r.unused_macros.insert(def_id, (node_id, ident));
+ for (rule_i, rule_span) in rule_spans.iter() {
+ self.r.unused_macro_rules.insert((def_id, *rule_i), (ident, *rule_span));
+ }
+ }
+ }
+
+ fn define_macro(&mut self, item: &ast::Item) -> MacroRulesScopeRef<'a> {
+ let parent_scope = self.parent_scope;
+ let expansion = parent_scope.expansion;
+ let def_id = self.r.local_def_id(item.id);
+ let (ext, ident, span, macro_rules, rule_spans) = match &item.kind {
+ ItemKind::MacroDef(def) => {
+ let (ext, rule_spans) = self.r.compile_macro(item, self.r.session.edition());
+ let ext = Lrc::new(ext);
+ (ext, item.ident, item.span, def.macro_rules, rule_spans)
+ }
+ ItemKind::Fn(..) => match self.proc_macro_stub(item) {
+ Some((macro_kind, ident, span)) => {
+ self.r.proc_macro_stubs.insert(def_id);
+ (self.r.dummy_ext(macro_kind), ident, span, false, Vec::new())
+ }
+ None => return parent_scope.macro_rules,
+ },
+ _ => unreachable!(),
+ };
+
+ let res = Res::Def(DefKind::Macro(ext.macro_kind()), def_id.to_def_id());
+ self.r.macro_map.insert(def_id.to_def_id(), MacroData { ext, macro_rules });
+ self.r.local_macro_def_scopes.insert(def_id, parent_scope.module);
+
+ if macro_rules {
+ let ident = ident.normalize_to_macros_2_0();
+ self.r.macro_names.insert(ident);
+ let is_macro_export = self.r.session.contains_name(&item.attrs, sym::macro_export);
+ let vis = if is_macro_export {
+ ty::Visibility::Public
+ } else {
+ ty::Visibility::Restricted(CRATE_DEF_ID.to_def_id())
+ };
+ let binding = (res, vis, span, expansion).to_name_binding(self.r.arenas);
+ self.r.set_binding_parent_module(binding, parent_scope.module);
+ if is_macro_export {
+ let module = self.r.graph_root;
+ self.r.define(module, ident, MacroNS, (res, vis, span, expansion, IsMacroExport));
+ } else {
+ self.r.check_reserved_macro_name(ident, res);
+ self.insert_unused_macro(ident, def_id, item.id, &rule_spans);
+ }
+ self.r.visibilities.insert(def_id, vis);
+ let scope = self.r.arenas.alloc_macro_rules_scope(MacroRulesScope::Binding(
+ self.r.arenas.alloc_macro_rules_binding(MacroRulesBinding {
+ parent_macro_rules_scope: parent_scope.macro_rules,
+ binding,
+ ident,
+ }),
+ ));
+ self.r.macro_rules_scopes.insert(def_id, scope);
+ scope
+ } else {
+ let module = parent_scope.module;
+ let vis = match item.kind {
+ // Visibilities must not be resolved non-speculatively twice
+ // and we already resolved this one as a `fn` item visibility.
+ ItemKind::Fn(..) => {
+ self.try_resolve_visibility(&item.vis, false).unwrap_or(ty::Visibility::Public)
+ }
+ _ => self.resolve_visibility(&item.vis),
+ };
+ if vis != ty::Visibility::Public {
+ self.insert_unused_macro(ident, def_id, item.id, &rule_spans);
+ }
+ self.r.define(module, ident, MacroNS, (res, vis, span, expansion));
+ self.r.visibilities.insert(def_id, vis);
+ self.parent_scope.macro_rules
+ }
+ }
+}
+
+macro_rules! method {
+ ($visit:ident: $ty:ty, $invoc:path, $walk:ident) => {
+ fn $visit(&mut self, node: &'b $ty) {
+ if let $invoc(..) = node.kind {
+ self.visit_invoc(node.id);
+ } else {
+ visit::$walk(self, node);
+ }
+ }
+ };
+}
+
+impl<'a, 'b> Visitor<'b> for BuildReducedGraphVisitor<'a, 'b> {
+ method!(visit_expr: ast::Expr, ast::ExprKind::MacCall, walk_expr);
+ method!(visit_pat: ast::Pat, ast::PatKind::MacCall, walk_pat);
+ method!(visit_ty: ast::Ty, ast::TyKind::MacCall, walk_ty);
+
+ fn visit_item(&mut self, item: &'b Item) {
+ let orig_module_scope = self.parent_scope.module;
+ self.parent_scope.macro_rules = match item.kind {
+ ItemKind::MacroDef(..) => {
+ let macro_rules_scope = self.define_macro(item);
+ visit::walk_item(self, item);
+ macro_rules_scope
+ }
+ ItemKind::MacCall(..) => {
+ let macro_rules_scope = self.visit_invoc_in_module(item.id);
+ visit::walk_item(self, item);
+ macro_rules_scope
+ }
+ _ => {
+ let orig_macro_rules_scope = self.parent_scope.macro_rules;
+ self.build_reduced_graph_for_item(item);
+ visit::walk_item(self, item);
+ match item.kind {
+ ItemKind::Mod(..) if self.contains_macro_use(&item.attrs) => {
+ self.parent_scope.macro_rules
+ }
+ _ => orig_macro_rules_scope,
+ }
+ }
+ };
+ self.parent_scope.module = orig_module_scope;
+ }
+
+ fn visit_stmt(&mut self, stmt: &'b ast::Stmt) {
+ if let ast::StmtKind::MacCall(..) = stmt.kind {
+ self.parent_scope.macro_rules = self.visit_invoc_in_module(stmt.id);
+ } else {
+ visit::walk_stmt(self, stmt);
+ }
+ }
+
+ fn visit_foreign_item(&mut self, foreign_item: &'b ForeignItem) {
+ if let ForeignItemKind::MacCall(_) = foreign_item.kind {
+ self.visit_invoc_in_module(foreign_item.id);
+ return;
+ }
+
+ self.build_reduced_graph_for_foreign_item(foreign_item);
+ visit::walk_foreign_item(self, foreign_item);
+ }
+
+ fn visit_block(&mut self, block: &'b Block) {
+ let orig_current_module = self.parent_scope.module;
+ let orig_current_macro_rules_scope = self.parent_scope.macro_rules;
+ self.build_reduced_graph_for_block(block);
+ visit::walk_block(self, block);
+ self.parent_scope.module = orig_current_module;
+ self.parent_scope.macro_rules = orig_current_macro_rules_scope;
+ }
+
+ fn visit_assoc_item(&mut self, item: &'b AssocItem, ctxt: AssocCtxt) {
+ if let AssocItemKind::MacCall(_) = item.kind {
+ match ctxt {
+ AssocCtxt::Trait => {
+ self.visit_invoc_in_module(item.id);
+ }
+ AssocCtxt::Impl => {
+ self.visit_invoc(item.id);
+ }
+ }
+ return;
+ }
+
+ let vis = self.resolve_visibility(&item.vis);
+ let local_def_id = self.r.local_def_id(item.id);
+ let def_id = local_def_id.to_def_id();
+
+ if !(ctxt == AssocCtxt::Impl
+ && matches!(item.vis.kind, ast::VisibilityKind::Inherited)
+ && self
+ .r
+ .trait_impl_items
+ .contains(&ty::DefIdTree::local_parent(&*self.r, local_def_id)))
+ {
+ // Trait impl item visibility is inherited from its trait when not specified
+ // explicitly. In that case we cannot determine it here in early resolve,
+ // so we leave a hole in the visibility table to be filled later.
+ self.r.visibilities.insert(local_def_id, vis);
+ }
+
+ if ctxt == AssocCtxt::Trait {
+ let (def_kind, ns) = match item.kind {
+ AssocItemKind::Const(..) => (DefKind::AssocConst, ValueNS),
+ AssocItemKind::Fn(box Fn { ref sig, .. }) => {
+ if sig.decl.has_self() {
+ self.r.has_self.insert(def_id);
+ }
+ (DefKind::AssocFn, ValueNS)
+ }
+ AssocItemKind::TyAlias(..) => (DefKind::AssocTy, TypeNS),
+ AssocItemKind::MacCall(_) => bug!(), // handled above
+ };
+
+ let parent = self.parent_scope.module;
+ let expansion = self.parent_scope.expansion;
+ let res = Res::Def(def_kind, def_id);
+ self.r.define(parent, item.ident, ns, (res, vis, item.span, expansion));
+ }
+
+ visit::walk_assoc_item(self, item, ctxt);
+ }
+
+ fn visit_attribute(&mut self, attr: &'b ast::Attribute) {
+ if !attr.is_doc_comment() && attr::is_builtin_attr(attr) {
+ self.r
+ .builtin_attrs
+ .push((attr.get_normal_item().path.segments[0].ident, self.parent_scope));
+ }
+ visit::walk_attribute(self, attr);
+ }
+
+ fn visit_arm(&mut self, arm: &'b ast::Arm) {
+ if arm.is_placeholder {
+ self.visit_invoc(arm.id);
+ } else {
+ visit::walk_arm(self, arm);
+ }
+ }
+
+ fn visit_expr_field(&mut self, f: &'b ast::ExprField) {
+ if f.is_placeholder {
+ self.visit_invoc(f.id);
+ } else {
+ visit::walk_expr_field(self, f);
+ }
+ }
+
+ fn visit_pat_field(&mut self, fp: &'b ast::PatField) {
+ if fp.is_placeholder {
+ self.visit_invoc(fp.id);
+ } else {
+ visit::walk_pat_field(self, fp);
+ }
+ }
+
+ fn visit_generic_param(&mut self, param: &'b ast::GenericParam) {
+ if param.is_placeholder {
+ self.visit_invoc(param.id);
+ } else {
+ visit::walk_generic_param(self, param);
+ }
+ }
+
+ fn visit_param(&mut self, p: &'b ast::Param) {
+ if p.is_placeholder {
+ self.visit_invoc(p.id);
+ } else {
+ visit::walk_param(self, p);
+ }
+ }
+
+ fn visit_field_def(&mut self, sf: &'b ast::FieldDef) {
+ if sf.is_placeholder {
+ self.visit_invoc(sf.id);
+ } else {
+ let vis = self.resolve_visibility(&sf.vis);
+ self.r.visibilities.insert(self.r.local_def_id(sf.id), vis);
+ visit::walk_field_def(self, sf);
+ }
+ }
+
+ // Constructs the reduced graph for one variant. Variants exist in the
+ // type and value namespaces.
+ fn visit_variant(&mut self, variant: &'b ast::Variant) {
+ if variant.is_placeholder {
+ self.visit_invoc_in_module(variant.id);
+ return;
+ }
+
+ let parent = self.parent_scope.module;
+ let expn_id = self.parent_scope.expansion;
+ let ident = variant.ident;
+
+ // Define a name in the type namespace.
+ let def_id = self.r.local_def_id(variant.id);
+ let res = Res::Def(DefKind::Variant, def_id.to_def_id());
+ let vis = self.resolve_visibility(&variant.vis);
+ self.r.define(parent, ident, TypeNS, (res, vis, variant.span, expn_id));
+ self.r.visibilities.insert(def_id, vis);
+
+ // If the variant is marked as non_exhaustive then lower the visibility to within the crate.
+ let ctor_vis = if vis == ty::Visibility::Public
+ && self.r.session.contains_name(&variant.attrs, sym::non_exhaustive)
+ {
+ ty::Visibility::Restricted(CRATE_DEF_ID.to_def_id())
+ } else {
+ vis
+ };
+
+ // Define a constructor name in the value namespace.
+ // Braced variants, unlike structs, generate unusable names in
+ // value namespace, they are reserved for possible future use.
+ // It's ok to use the variant's id as a ctor id since an
+ // error will be reported on any use of such resolution anyway.
+ let ctor_node_id = variant.data.ctor_id().unwrap_or(variant.id);
+ let ctor_def_id = self.r.local_def_id(ctor_node_id);
+ let ctor_kind = CtorKind::from_ast(&variant.data);
+ let ctor_res = Res::Def(DefKind::Ctor(CtorOf::Variant, ctor_kind), ctor_def_id.to_def_id());
+ self.r.define(parent, ident, ValueNS, (ctor_res, ctor_vis, variant.span, expn_id));
+ if ctor_def_id != def_id {
+ self.r.visibilities.insert(ctor_def_id, ctor_vis);
+ }
+ // Record field names for error reporting.
+ self.insert_field_names_local(ctor_def_id.to_def_id(), &variant.data);
+
+ visit::walk_variant(self, variant);
+ }
+
+ fn visit_crate(&mut self, krate: &'b ast::Crate) {
+ if krate.is_placeholder {
+ self.visit_invoc_in_module(krate.id);
+ } else {
+ visit::walk_crate(self, krate);
+ self.contains_macro_use(&krate.attrs);
+ }
+ }
+}
diff --git a/compiler/rustc_resolve/src/check_unused.rs b/compiler/rustc_resolve/src/check_unused.rs
new file mode 100644
index 000000000..f2f6f1d89
--- /dev/null
+++ b/compiler/rustc_resolve/src/check_unused.rs
@@ -0,0 +1,350 @@
+//
+// Unused import checking
+//
+// Although this is mostly a lint pass, it lives in here because it depends on
+// resolve data structures and because it finalises the privacy information for
+// `use` items.
+//
+// Unused trait imports can't be checked until the method resolution. We save
+// candidates here, and do the actual check in librustc_typeck/check_unused.rs.
+//
+// Checking for unused imports is split into three steps:
+//
+// - `UnusedImportCheckVisitor` walks the AST to find all the unused imports
+// inside of `UseTree`s, recording their `NodeId`s and grouping them by
+// the parent `use` item
+//
+// - `calc_unused_spans` then walks over all the `use` items marked in the
+// previous step to collect the spans associated with the `NodeId`s and to
+// calculate the spans that can be removed by rustfix; This is done in a
+// separate step to be able to collapse the adjacent spans that rustfix
+// will remove
+//
+// - `check_crate` finally emits the diagnostics based on the data generated
+// in the last step
+
+use crate::imports::ImportKind;
+use crate::module_to_string;
+use crate::Resolver;
+
+use rustc_ast as ast;
+use rustc_ast::node_id::NodeMap;
+use rustc_ast::visit::{self, Visitor};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{pluralize, MultiSpan};
+use rustc_session::lint::builtin::{MACRO_USE_EXTERN_CRATE, UNUSED_IMPORTS};
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_span::{Span, DUMMY_SP};
+
+struct UnusedImport<'a> {
+ use_tree: &'a ast::UseTree,
+ use_tree_id: ast::NodeId,
+ item_span: Span,
+ unused: FxHashSet<ast::NodeId>,
+}
+
+impl<'a> UnusedImport<'a> {
+ fn add(&mut self, id: ast::NodeId) {
+ self.unused.insert(id);
+ }
+}
+
+struct UnusedImportCheckVisitor<'a, 'b> {
+ r: &'a mut Resolver<'b>,
+ /// All the (so far) unused imports, grouped path list
+ unused_imports: NodeMap<UnusedImport<'a>>,
+ base_use_tree: Option<&'a ast::UseTree>,
+ base_id: ast::NodeId,
+ item_span: Span,
+}
+
+impl<'a, 'b> UnusedImportCheckVisitor<'a, 'b> {
+ // We have information about whether `use` (import) items are actually
+ // used now. If an import is not used at all, we signal a lint error.
+ fn check_import(&mut self, id: ast::NodeId) {
+ let used = self.r.used_imports.contains(&id);
+ let def_id = self.r.local_def_id(id);
+ if !used {
+ if self.r.maybe_unused_trait_imports.contains(&def_id) {
+ // Check later.
+ return;
+ }
+ self.unused_import(self.base_id).add(id);
+ } else {
+ // This trait import is definitely used, in a way other than
+ // method resolution.
+ self.r.maybe_unused_trait_imports.remove(&def_id);
+ if let Some(i) = self.unused_imports.get_mut(&self.base_id) {
+ i.unused.remove(&id);
+ }
+ }
+ }
+
+ fn unused_import(&mut self, id: ast::NodeId) -> &mut UnusedImport<'a> {
+ let use_tree_id = self.base_id;
+ let use_tree = self.base_use_tree.unwrap();
+ let item_span = self.item_span;
+
+ self.unused_imports.entry(id).or_insert_with(|| UnusedImport {
+ use_tree,
+ use_tree_id,
+ item_span,
+ unused: FxHashSet::default(),
+ })
+ }
+}
+
+impl<'a, 'b> Visitor<'a> for UnusedImportCheckVisitor<'a, 'b> {
+ fn visit_item(&mut self, item: &'a ast::Item) {
+ self.item_span = item.span_with_attributes();
+
+ // Ignore is_public import statements because there's no way to be sure
+ // whether they're used or not. Also ignore imports with a dummy span
+ // because this means that they were generated in some fashion by the
+ // compiler and we don't need to consider them.
+ if let ast::ItemKind::Use(..) = item.kind {
+ if item.vis.kind.is_pub() || item.span.is_dummy() {
+ return;
+ }
+ }
+
+ visit::walk_item(self, item);
+ }
+
+ fn visit_use_tree(&mut self, use_tree: &'a ast::UseTree, id: ast::NodeId, nested: bool) {
+ // Use the base UseTree's NodeId as the item id
+ // This allows the grouping of all the lints in the same item
+ if !nested {
+ self.base_id = id;
+ self.base_use_tree = Some(use_tree);
+ }
+
+ if let ast::UseTreeKind::Nested(ref items) = use_tree.kind {
+ if items.is_empty() {
+ self.unused_import(self.base_id).add(id);
+ }
+ } else {
+ self.check_import(id);
+ }
+
+ visit::walk_use_tree(self, use_tree, id);
+ }
+}
+
+enum UnusedSpanResult {
+ Used,
+ FlatUnused(Span, Span),
+ NestedFullUnused(Vec<Span>, Span),
+ NestedPartialUnused(Vec<Span>, Vec<Span>),
+}
+
+fn calc_unused_spans(
+ unused_import: &UnusedImport<'_>,
+ use_tree: &ast::UseTree,
+ use_tree_id: ast::NodeId,
+) -> UnusedSpanResult {
+ // The full span is the whole item's span if this current tree is not nested inside another
+ // This tells rustfix to remove the whole item if all the imports are unused
+ let full_span = if unused_import.use_tree.span == use_tree.span {
+ unused_import.item_span
+ } else {
+ use_tree.span
+ };
+ match use_tree.kind {
+ ast::UseTreeKind::Simple(..) | ast::UseTreeKind::Glob => {
+ if unused_import.unused.contains(&use_tree_id) {
+ UnusedSpanResult::FlatUnused(use_tree.span, full_span)
+ } else {
+ UnusedSpanResult::Used
+ }
+ }
+ ast::UseTreeKind::Nested(ref nested) => {
+ if nested.is_empty() {
+ return UnusedSpanResult::FlatUnused(use_tree.span, full_span);
+ }
+
+ let mut unused_spans = Vec::new();
+ let mut to_remove = Vec::new();
+ let mut all_nested_unused = true;
+ let mut previous_unused = false;
+ for (pos, (use_tree, use_tree_id)) in nested.iter().enumerate() {
+ let remove = match calc_unused_spans(unused_import, use_tree, *use_tree_id) {
+ UnusedSpanResult::Used => {
+ all_nested_unused = false;
+ None
+ }
+ UnusedSpanResult::FlatUnused(span, remove) => {
+ unused_spans.push(span);
+ Some(remove)
+ }
+ UnusedSpanResult::NestedFullUnused(mut spans, remove) => {
+ unused_spans.append(&mut spans);
+ Some(remove)
+ }
+ UnusedSpanResult::NestedPartialUnused(mut spans, mut to_remove_extra) => {
+ all_nested_unused = false;
+ unused_spans.append(&mut spans);
+ to_remove.append(&mut to_remove_extra);
+ None
+ }
+ };
+ if let Some(remove) = remove {
+ let remove_span = if nested.len() == 1 {
+ remove
+ } else if pos == nested.len() - 1 || !all_nested_unused {
+ // Delete everything from the end of the last import, to delete the
+ // previous comma
+ nested[pos - 1].0.span.shrink_to_hi().to(use_tree.span)
+ } else {
+ // Delete everything until the next import, to delete the trailing commas
+ use_tree.span.to(nested[pos + 1].0.span.shrink_to_lo())
+ };
+
+ // Try to collapse adjacent spans into a single one. This prevents all cases of
+ // overlapping removals, which are not supported by rustfix
+ if previous_unused && !to_remove.is_empty() {
+ let previous = to_remove.pop().unwrap();
+ to_remove.push(previous.to(remove_span));
+ } else {
+ to_remove.push(remove_span);
+ }
+ }
+ previous_unused = remove.is_some();
+ }
+ if unused_spans.is_empty() {
+ UnusedSpanResult::Used
+ } else if all_nested_unused {
+ UnusedSpanResult::NestedFullUnused(unused_spans, full_span)
+ } else {
+ UnusedSpanResult::NestedPartialUnused(unused_spans, to_remove)
+ }
+ }
+ }
+}
+
+impl Resolver<'_> {
+ pub(crate) fn check_unused(&mut self, krate: &ast::Crate) {
+ for import in self.potentially_unused_imports.iter() {
+ match import.kind {
+ _ if import.used.get()
+ || import.vis.get().is_public()
+ || import.span.is_dummy() =>
+ {
+ if let ImportKind::MacroUse = import.kind {
+ if !import.span.is_dummy() {
+ self.lint_buffer.buffer_lint(
+ MACRO_USE_EXTERN_CRATE,
+ import.id,
+ import.span,
+ "deprecated `#[macro_use]` attribute used to \
+ import macros should be replaced at use sites \
+ with a `use` item to import the macro \
+ instead",
+ );
+ }
+ }
+ }
+ ImportKind::ExternCrate { .. } => {
+ let def_id = self.local_def_id(import.id);
+ self.maybe_unused_extern_crates.push((def_id, import.span));
+ }
+ ImportKind::MacroUse => {
+ let msg = "unused `#[macro_use]` import";
+ self.lint_buffer.buffer_lint(UNUSED_IMPORTS, import.id, import.span, msg);
+ }
+ _ => {}
+ }
+ }
+
+ let mut visitor = UnusedImportCheckVisitor {
+ r: self,
+ unused_imports: Default::default(),
+ base_use_tree: None,
+ base_id: ast::DUMMY_NODE_ID,
+ item_span: DUMMY_SP,
+ };
+ visit::walk_crate(&mut visitor, krate);
+
+ for unused in visitor.unused_imports.values() {
+ let mut fixes = Vec::new();
+ let mut spans = match calc_unused_spans(unused, unused.use_tree, unused.use_tree_id) {
+ UnusedSpanResult::Used => continue,
+ UnusedSpanResult::FlatUnused(span, remove) => {
+ fixes.push((remove, String::new()));
+ vec![span]
+ }
+ UnusedSpanResult::NestedFullUnused(spans, remove) => {
+ fixes.push((remove, String::new()));
+ spans
+ }
+ UnusedSpanResult::NestedPartialUnused(spans, remove) => {
+ for fix in &remove {
+ fixes.push((*fix, String::new()));
+ }
+ spans
+ }
+ };
+
+ let len = spans.len();
+ spans.sort();
+ let ms = MultiSpan::from_spans(spans.clone());
+ let mut span_snippets = spans
+ .iter()
+ .filter_map(|s| match visitor.r.session.source_map().span_to_snippet(*s) {
+ Ok(s) => Some(format!("`{}`", s)),
+ _ => None,
+ })
+ .collect::<Vec<String>>();
+ span_snippets.sort();
+ let msg = format!(
+ "unused import{}{}",
+ pluralize!(len),
+ if !span_snippets.is_empty() {
+ format!(": {}", span_snippets.join(", "))
+ } else {
+ String::new()
+ }
+ );
+
+ let fix_msg = if fixes.len() == 1 && fixes[0].0 == unused.item_span {
+ "remove the whole `use` item"
+ } else if spans.len() > 1 {
+ "remove the unused imports"
+ } else {
+ "remove the unused import"
+ };
+
+ // If we are in the `--test` mode, suppress a help that adds the `#[cfg(test)]`
+ // attribute; however, if not, suggest adding the attribute. There is no way to
+ // retrieve attributes here because we do not have a `TyCtxt` yet.
+ let test_module_span = if visitor.r.session.opts.test {
+ None
+ } else {
+ let parent_module = visitor.r.get_nearest_non_block_module(
+ visitor.r.local_def_id(unused.use_tree_id).to_def_id(),
+ );
+ match module_to_string(parent_module) {
+ Some(module)
+ if module == "test"
+ || module == "tests"
+ || module.starts_with("test_")
+ || module.starts_with("tests_")
+ || module.ends_with("_test")
+ || module.ends_with("_tests") =>
+ {
+ Some(parent_module.span)
+ }
+ _ => None,
+ }
+ };
+
+ visitor.r.lint_buffer.buffer_lint_with_diagnostic(
+ UNUSED_IMPORTS,
+ unused.use_tree_id,
+ ms,
+ &msg,
+ BuiltinLintDiagnostics::UnusedImports(fix_msg.into(), fixes, test_module_span),
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_resolve/src/def_collector.rs b/compiler/rustc_resolve/src/def_collector.rs
new file mode 100644
index 000000000..66641fb2c
--- /dev/null
+++ b/compiler/rustc_resolve/src/def_collector.rs
@@ -0,0 +1,354 @@
+use crate::{ImplTraitContext, Resolver};
+use rustc_ast::visit::{self, FnKind};
+use rustc_ast::walk_list;
+use rustc_ast::*;
+use rustc_expand::expand::AstFragment;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::definitions::*;
+use rustc_span::hygiene::LocalExpnId;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use tracing::debug;
+
+pub(crate) fn collect_definitions(
+ resolver: &mut Resolver<'_>,
+ fragment: &AstFragment,
+ expansion: LocalExpnId,
+) {
+ let (parent_def, impl_trait_context) = resolver.invocation_parents[&expansion];
+ fragment.visit_with(&mut DefCollector { resolver, parent_def, expansion, impl_trait_context });
+}
+
+/// Creates `DefId`s for nodes in the AST.
+struct DefCollector<'a, 'b> {
+ resolver: &'a mut Resolver<'b>,
+ parent_def: LocalDefId,
+ impl_trait_context: ImplTraitContext,
+ expansion: LocalExpnId,
+}
+
+impl<'a, 'b> DefCollector<'a, 'b> {
+ fn create_def(&mut self, node_id: NodeId, data: DefPathData, span: Span) -> LocalDefId {
+ let parent_def = self.parent_def;
+ debug!("create_def(node_id={:?}, data={:?}, parent_def={:?})", node_id, data, parent_def);
+ self.resolver.create_def(
+ parent_def,
+ node_id,
+ data,
+ self.expansion.to_expn_id(),
+ span.with_parent(None),
+ )
+ }
+
+ fn with_parent<F: FnOnce(&mut Self)>(&mut self, parent_def: LocalDefId, f: F) {
+ let orig_parent_def = std::mem::replace(&mut self.parent_def, parent_def);
+ f(self);
+ self.parent_def = orig_parent_def;
+ }
+
+ fn with_impl_trait<F: FnOnce(&mut Self)>(
+ &mut self,
+ impl_trait_context: ImplTraitContext,
+ f: F,
+ ) {
+ let orig_itc = std::mem::replace(&mut self.impl_trait_context, impl_trait_context);
+ f(self);
+ self.impl_trait_context = orig_itc;
+ }
+
+ fn collect_field(&mut self, field: &'a FieldDef, index: Option<usize>) {
+ let index = |this: &Self| {
+ index.unwrap_or_else(|| {
+ let node_id = NodeId::placeholder_from_expn_id(this.expansion);
+ this.resolver.placeholder_field_indices[&node_id]
+ })
+ };
+
+ if field.is_placeholder {
+ let old_index = self.resolver.placeholder_field_indices.insert(field.id, index(self));
+ assert!(old_index.is_none(), "placeholder field index is reset for a node ID");
+ self.visit_macro_invoc(field.id);
+ } else {
+ let name = field.ident.map_or_else(|| sym::integer(index(self)), |ident| ident.name);
+ let def = self.create_def(field.id, DefPathData::ValueNs(name), field.span);
+ self.with_parent(def, |this| visit::walk_field_def(this, field));
+ }
+ }
+
+ fn visit_macro_invoc(&mut self, id: NodeId) {
+ let id = id.placeholder_to_expn_id();
+ let old_parent =
+ self.resolver.invocation_parents.insert(id, (self.parent_def, self.impl_trait_context));
+ assert!(old_parent.is_none(), "parent `LocalDefId` is reset for an invocation");
+ }
+}
+
+impl<'a, 'b> visit::Visitor<'a> for DefCollector<'a, 'b> {
+ fn visit_item(&mut self, i: &'a Item) {
+ debug!("visit_item: {:?}", i);
+
+ // Pick the def data. This need not be unique, but the more
+ // information we encapsulate into, the better
+ let def_data = match &i.kind {
+ ItemKind::Impl { .. } => DefPathData::Impl,
+ ItemKind::ForeignMod(..) => DefPathData::ForeignMod,
+ ItemKind::Mod(..)
+ | ItemKind::Trait(..)
+ | ItemKind::TraitAlias(..)
+ | ItemKind::Enum(..)
+ | ItemKind::Struct(..)
+ | ItemKind::Union(..)
+ | ItemKind::ExternCrate(..)
+ | ItemKind::TyAlias(..) => DefPathData::TypeNs(i.ident.name),
+ ItemKind::Static(..) | ItemKind::Const(..) | ItemKind::Fn(..) => {
+ DefPathData::ValueNs(i.ident.name)
+ }
+ ItemKind::MacroDef(..) => DefPathData::MacroNs(i.ident.name),
+ ItemKind::MacCall(..) => {
+ visit::walk_item(self, i);
+ return self.visit_macro_invoc(i.id);
+ }
+ ItemKind::GlobalAsm(..) => DefPathData::GlobalAsm,
+ ItemKind::Use(..) => {
+ return visit::walk_item(self, i);
+ }
+ };
+ let def = self.create_def(i.id, def_data, i.span);
+
+ self.with_parent(def, |this| {
+ this.with_impl_trait(ImplTraitContext::Existential, |this| {
+ match i.kind {
+ ItemKind::Struct(ref struct_def, _) | ItemKind::Union(ref struct_def, _) => {
+ // If this is a unit or tuple-like struct, register the constructor.
+ if let Some(ctor_hir_id) = struct_def.ctor_id() {
+ this.create_def(ctor_hir_id, DefPathData::Ctor, i.span);
+ }
+ }
+ _ => {}
+ }
+ visit::walk_item(this, i);
+ })
+ });
+ }
+
+ fn visit_fn(&mut self, fn_kind: FnKind<'a>, span: Span, _: NodeId) {
+ if let FnKind::Fn(_, _, sig, _, generics, body) = fn_kind {
+ if let Async::Yes { closure_id, return_impl_trait_id, .. } = sig.header.asyncness {
+ self.visit_generics(generics);
+
+ let return_impl_trait_id =
+ self.create_def(return_impl_trait_id, DefPathData::ImplTrait, span);
+
+ // For async functions, we need to create their inner defs inside of a
+ // closure to match their desugared representation. Besides that,
+ // we must mirror everything that `visit::walk_fn` below does.
+ self.visit_fn_header(&sig.header);
+ for param in &sig.decl.inputs {
+ self.visit_param(param);
+ }
+ self.with_parent(return_impl_trait_id, |this| {
+ this.visit_fn_ret_ty(&sig.decl.output)
+ });
+ let closure_def = self.create_def(closure_id, DefPathData::ClosureExpr, span);
+ self.with_parent(closure_def, |this| walk_list!(this, visit_block, body));
+ return;
+ }
+ }
+
+ visit::walk_fn(self, fn_kind, span);
+ }
+
+ fn visit_use_tree(&mut self, use_tree: &'a UseTree, id: NodeId, _nested: bool) {
+ self.create_def(id, DefPathData::Use, use_tree.span);
+ match use_tree.kind {
+ UseTreeKind::Simple(_, id1, id2) => {
+ self.create_def(id1, DefPathData::Use, use_tree.prefix.span);
+ self.create_def(id2, DefPathData::Use, use_tree.prefix.span);
+ }
+ UseTreeKind::Glob => (),
+ UseTreeKind::Nested(..) => {}
+ }
+ visit::walk_use_tree(self, use_tree, id);
+ }
+
+ fn visit_foreign_item(&mut self, foreign_item: &'a ForeignItem) {
+ if let ForeignItemKind::MacCall(_) = foreign_item.kind {
+ return self.visit_macro_invoc(foreign_item.id);
+ }
+
+ let def = self.create_def(
+ foreign_item.id,
+ DefPathData::ValueNs(foreign_item.ident.name),
+ foreign_item.span,
+ );
+
+ self.with_parent(def, |this| {
+ visit::walk_foreign_item(this, foreign_item);
+ });
+ }
+
+ fn visit_variant(&mut self, v: &'a Variant) {
+ if v.is_placeholder {
+ return self.visit_macro_invoc(v.id);
+ }
+ let def = self.create_def(v.id, DefPathData::TypeNs(v.ident.name), v.span);
+ self.with_parent(def, |this| {
+ if let Some(ctor_hir_id) = v.data.ctor_id() {
+ this.create_def(ctor_hir_id, DefPathData::Ctor, v.span);
+ }
+ visit::walk_variant(this, v)
+ });
+ }
+
+ fn visit_variant_data(&mut self, data: &'a VariantData) {
+ // The assumption here is that non-`cfg` macro expansion cannot change field indices.
+ // It currently holds because only inert attributes are accepted on fields,
+ // and every such attribute expands into a single field after it's resolved.
+ for (index, field) in data.fields().iter().enumerate() {
+ self.collect_field(field, Some(index));
+ }
+ }
+
+ fn visit_generic_param(&mut self, param: &'a GenericParam) {
+ if param.is_placeholder {
+ self.visit_macro_invoc(param.id);
+ return;
+ }
+ let name = param.ident.name;
+ let def_path_data = match param.kind {
+ GenericParamKind::Lifetime { .. } => DefPathData::LifetimeNs(name),
+ GenericParamKind::Type { .. } => DefPathData::TypeNs(name),
+ GenericParamKind::Const { .. } => DefPathData::ValueNs(name),
+ };
+ self.create_def(param.id, def_path_data, param.ident.span);
+
+ // impl-Trait can happen inside generic parameters, like
+ // ```
+ // fn foo<U: Iterator<Item = impl Clone>>() {}
+ // ```
+ //
+ // In that case, the impl-trait is lowered as an additional generic parameter.
+ self.with_impl_trait(ImplTraitContext::Universal(self.parent_def), |this| {
+ visit::walk_generic_param(this, param)
+ });
+ }
+
+ fn visit_assoc_item(&mut self, i: &'a AssocItem, ctxt: visit::AssocCtxt) {
+ let def_data = match &i.kind {
+ AssocItemKind::Fn(..) | AssocItemKind::Const(..) => DefPathData::ValueNs(i.ident.name),
+ AssocItemKind::TyAlias(..) => DefPathData::TypeNs(i.ident.name),
+ AssocItemKind::MacCall(..) => return self.visit_macro_invoc(i.id),
+ };
+
+ let def = self.create_def(i.id, def_data, i.span);
+ self.with_parent(def, |this| visit::walk_assoc_item(this, i, ctxt));
+ }
+
+ fn visit_pat(&mut self, pat: &'a Pat) {
+ match pat.kind {
+ PatKind::MacCall(..) => self.visit_macro_invoc(pat.id),
+ _ => visit::walk_pat(self, pat),
+ }
+ }
+
+ fn visit_anon_const(&mut self, constant: &'a AnonConst) {
+ let def = self.create_def(constant.id, DefPathData::AnonConst, constant.value.span);
+ self.with_parent(def, |this| visit::walk_anon_const(this, constant));
+ }
+
+ fn visit_expr(&mut self, expr: &'a Expr) {
+ let parent_def = match expr.kind {
+ ExprKind::MacCall(..) => return self.visit_macro_invoc(expr.id),
+ ExprKind::Closure(_, _, asyncness, ..) => {
+ // Async closures desugar to closures inside of closures, so
+ // we must create two defs.
+ let closure_def = self.create_def(expr.id, DefPathData::ClosureExpr, expr.span);
+ match asyncness {
+ Async::Yes { closure_id, .. } => {
+ self.create_def(closure_id, DefPathData::ClosureExpr, expr.span)
+ }
+ Async::No => closure_def,
+ }
+ }
+ ExprKind::Async(_, async_id, _) => {
+ self.create_def(async_id, DefPathData::ClosureExpr, expr.span)
+ }
+ _ => self.parent_def,
+ };
+
+ self.with_parent(parent_def, |this| visit::walk_expr(this, expr));
+ }
+
+ fn visit_ty(&mut self, ty: &'a Ty) {
+ match ty.kind {
+ TyKind::MacCall(..) => self.visit_macro_invoc(ty.id),
+ TyKind::ImplTrait(node_id, _) => {
+ let parent_def = match self.impl_trait_context {
+ ImplTraitContext::Universal(item_def) => self.resolver.create_def(
+ item_def,
+ node_id,
+ DefPathData::ImplTrait,
+ self.expansion.to_expn_id(),
+ ty.span,
+ ),
+ ImplTraitContext::Existential => {
+ self.create_def(node_id, DefPathData::ImplTrait, ty.span)
+ }
+ };
+ self.with_parent(parent_def, |this| visit::walk_ty(this, ty))
+ }
+ _ => visit::walk_ty(self, ty),
+ }
+ }
+
+ fn visit_stmt(&mut self, stmt: &'a Stmt) {
+ match stmt.kind {
+ StmtKind::MacCall(..) => self.visit_macro_invoc(stmt.id),
+ _ => visit::walk_stmt(self, stmt),
+ }
+ }
+
+ fn visit_arm(&mut self, arm: &'a Arm) {
+ if arm.is_placeholder { self.visit_macro_invoc(arm.id) } else { visit::walk_arm(self, arm) }
+ }
+
+ fn visit_expr_field(&mut self, f: &'a ExprField) {
+ if f.is_placeholder {
+ self.visit_macro_invoc(f.id)
+ } else {
+ visit::walk_expr_field(self, f)
+ }
+ }
+
+ fn visit_pat_field(&mut self, fp: &'a PatField) {
+ if fp.is_placeholder {
+ self.visit_macro_invoc(fp.id)
+ } else {
+ visit::walk_pat_field(self, fp)
+ }
+ }
+
+ fn visit_param(&mut self, p: &'a Param) {
+ if p.is_placeholder {
+ self.visit_macro_invoc(p.id)
+ } else {
+ self.with_impl_trait(ImplTraitContext::Universal(self.parent_def), |this| {
+ visit::walk_param(this, p)
+ })
+ }
+ }
+
+ // This method is called only when we are visiting an individual field
+ // after expanding an attribute on it.
+ fn visit_field_def(&mut self, field: &'a FieldDef) {
+ self.collect_field(field, None);
+ }
+
+ fn visit_crate(&mut self, krate: &'a Crate) {
+ if krate.is_placeholder {
+ self.visit_macro_invoc(krate.id)
+ } else {
+ visit::walk_crate(self, krate)
+ }
+ }
+}
diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs
new file mode 100644
index 000000000..8839fb1a1
--- /dev/null
+++ b/compiler/rustc_resolve/src/diagnostics.rs
@@ -0,0 +1,2714 @@
+use std::ptr;
+
+use rustc_ast::ptr::P;
+use rustc_ast::visit::{self, Visitor};
+use rustc_ast::{self as ast, Crate, ItemKind, ModKind, NodeId, Path, CRATE_NODE_ID};
+use rustc_ast_pretty::pprust;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::struct_span_err;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, MultiSpan};
+use rustc_feature::BUILTIN_ATTRIBUTES;
+use rustc_hir::def::Namespace::{self, *};
+use rustc_hir::def::{self, CtorKind, CtorOf, DefKind, NonMacroAttrKind, PerNS};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::PrimTy;
+use rustc_index::vec::IndexVec;
+use rustc_middle::bug;
+use rustc_middle::ty::DefIdTree;
+use rustc_session::lint::builtin::ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE;
+use rustc_session::lint::builtin::MACRO_EXPANDED_MACRO_EXPORTS_ACCESSED_BY_ABSOLUTE_PATHS;
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_session::Session;
+use rustc_span::edition::Edition;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{BytePos, Span};
+use tracing::debug;
+
+use crate::imports::{Import, ImportKind, ImportResolver};
+use crate::late::{PatternSource, Rib};
+use crate::path_names_to_string;
+use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BindingError, Finalize};
+use crate::{HasGenericParams, MacroRulesScope, Module, ModuleKind, ModuleOrUniformRoot};
+use crate::{LexicalScopeBinding, NameBinding, NameBindingKind, PrivacyError, VisResolutionError};
+use crate::{ParentScope, PathResult, ResolutionError, Resolver, Scope, ScopeSet};
+use crate::{Segment, UseError};
+
+#[cfg(test)]
+mod tests;
+
+type Res = def::Res<ast::NodeId>;
+
+/// A vector of spans and replacements, a message and applicability.
+pub(crate) type Suggestion = (Vec<(Span, String)>, String, Applicability);
+
+/// Potential candidate for an undeclared or out-of-scope label - contains the ident of a
+/// similarly named label and whether or not it is reachable.
+pub(crate) type LabelSuggestion = (Ident, bool);
+
+pub(crate) enum SuggestionTarget {
+ /// The target has a similar name as the name used by the programmer (probably a typo)
+ SimilarlyNamed,
+ /// The target is the only valid item that can be used in the corresponding context
+ SingleItem,
+}
+
+pub(crate) struct TypoSuggestion {
+ pub candidate: Symbol,
+ pub res: Res,
+ pub target: SuggestionTarget,
+}
+
+impl TypoSuggestion {
+ pub(crate) fn typo_from_res(candidate: Symbol, res: Res) -> TypoSuggestion {
+ Self { candidate, res, target: SuggestionTarget::SimilarlyNamed }
+ }
+ pub(crate) fn single_item_from_res(candidate: Symbol, res: Res) -> TypoSuggestion {
+ Self { candidate, res, target: SuggestionTarget::SingleItem }
+ }
+}
+
+/// A free importable items suggested in case of resolution failure.
+pub(crate) struct ImportSuggestion {
+ pub did: Option<DefId>,
+ pub descr: &'static str,
+ pub path: Path,
+ pub accessible: bool,
+ /// An extra note that should be issued if this item is suggested
+ pub note: Option<String>,
+}
+
+/// Adjust the impl span so that just the `impl` keyword is taken by removing
+/// everything after `<` (`"impl<T> Iterator for A<T> {}" -> "impl"`) and
+/// everything after the first whitespace (`"impl Iterator for A" -> "impl"`).
+///
+/// *Attention*: the method used is very fragile since it essentially duplicates the work of the
+/// parser. If you need to use this function or something similar, please consider updating the
+/// `source_map` functions and this function to something more robust.
+fn reduce_impl_span_to_impl_keyword(sm: &SourceMap, impl_span: Span) -> Span {
+ let impl_span = sm.span_until_char(impl_span, '<');
+ sm.span_until_whitespace(impl_span)
+}
+
+impl<'a> Resolver<'a> {
+ pub(crate) fn report_errors(&mut self, krate: &Crate) {
+ self.report_with_use_injections(krate);
+
+ for &(span_use, span_def) in &self.macro_expanded_macro_export_errors {
+ let msg = "macro-expanded `macro_export` macros from the current crate \
+ cannot be referred to by absolute paths";
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ MACRO_EXPANDED_MACRO_EXPORTS_ACCESSED_BY_ABSOLUTE_PATHS,
+ CRATE_NODE_ID,
+ span_use,
+ msg,
+ BuiltinLintDiagnostics::MacroExpandedMacroExportsAccessedByAbsolutePaths(span_def),
+ );
+ }
+
+ for ambiguity_error in &self.ambiguity_errors {
+ self.report_ambiguity_error(ambiguity_error);
+ }
+
+ let mut reported_spans = FxHashSet::default();
+ for error in &self.privacy_errors {
+ if reported_spans.insert(error.dedup_span) {
+ self.report_privacy_error(error);
+ }
+ }
+ }
+
+ fn report_with_use_injections(&mut self, krate: &Crate) {
+ for UseError { mut err, candidates, def_id, instead, suggestion, path } in
+ self.use_injections.drain(..)
+ {
+ let (span, found_use) = if let Some(def_id) = def_id.as_local() {
+ UsePlacementFinder::check(krate, self.def_id_to_node_id[def_id])
+ } else {
+ (None, FoundUse::No)
+ };
+ if !candidates.is_empty() {
+ show_candidates(
+ &self.session,
+ &self.source_span,
+ &mut err,
+ span,
+ &candidates,
+ if instead { Instead::Yes } else { Instead::No },
+ found_use,
+ IsPattern::No,
+ path,
+ );
+ } else if let Some((span, msg, sugg, appl)) = suggestion {
+ err.span_suggestion(span, msg, sugg, appl);
+ }
+ err.emit();
+ }
+ }
+
+ pub(crate) fn report_conflict<'b>(
+ &mut self,
+ parent: Module<'_>,
+ ident: Ident,
+ ns: Namespace,
+ new_binding: &NameBinding<'b>,
+ old_binding: &NameBinding<'b>,
+ ) {
+ // Error on the second of two conflicting names
+ if old_binding.span.lo() > new_binding.span.lo() {
+ return self.report_conflict(parent, ident, ns, old_binding, new_binding);
+ }
+
+ let container = match parent.kind {
+ ModuleKind::Def(kind, _, _) => kind.descr(parent.def_id()),
+ ModuleKind::Block => "block",
+ };
+
+ let old_noun = match old_binding.is_import() {
+ true => "import",
+ false => "definition",
+ };
+
+ let new_participle = match new_binding.is_import() {
+ true => "imported",
+ false => "defined",
+ };
+
+ let (name, span) =
+ (ident.name, self.session.source_map().guess_head_span(new_binding.span));
+
+ if let Some(s) = self.name_already_seen.get(&name) {
+ if s == &span {
+ return;
+ }
+ }
+
+ let old_kind = match (ns, old_binding.module()) {
+ (ValueNS, _) => "value",
+ (MacroNS, _) => "macro",
+ (TypeNS, _) if old_binding.is_extern_crate() => "extern crate",
+ (TypeNS, Some(module)) if module.is_normal() => "module",
+ (TypeNS, Some(module)) if module.is_trait() => "trait",
+ (TypeNS, _) => "type",
+ };
+
+ let msg = format!("the name `{}` is defined multiple times", name);
+
+ let mut err = match (old_binding.is_extern_crate(), new_binding.is_extern_crate()) {
+ (true, true) => struct_span_err!(self.session, span, E0259, "{}", msg),
+ (true, _) | (_, true) => match new_binding.is_import() && old_binding.is_import() {
+ true => struct_span_err!(self.session, span, E0254, "{}", msg),
+ false => struct_span_err!(self.session, span, E0260, "{}", msg),
+ },
+ _ => match (old_binding.is_import(), new_binding.is_import()) {
+ (false, false) => struct_span_err!(self.session, span, E0428, "{}", msg),
+ (true, true) => struct_span_err!(self.session, span, E0252, "{}", msg),
+ _ => struct_span_err!(self.session, span, E0255, "{}", msg),
+ },
+ };
+
+ err.note(&format!(
+ "`{}` must be defined only once in the {} namespace of this {}",
+ name,
+ ns.descr(),
+ container
+ ));
+
+ err.span_label(span, format!("`{}` re{} here", name, new_participle));
+ err.span_label(
+ self.session.source_map().guess_head_span(old_binding.span),
+ format!("previous {} of the {} `{}` here", old_noun, old_kind, name),
+ );
+
+ // See https://github.com/rust-lang/rust/issues/32354
+ use NameBindingKind::Import;
+ let import = match (&new_binding.kind, &old_binding.kind) {
+ // If there are two imports where one or both have attributes then prefer removing the
+ // import without attributes.
+ (Import { import: new, .. }, Import { import: old, .. })
+ if {
+ !new_binding.span.is_dummy()
+ && !old_binding.span.is_dummy()
+ && (new.has_attributes || old.has_attributes)
+ } =>
+ {
+ if old.has_attributes {
+ Some((new, new_binding.span, true))
+ } else {
+ Some((old, old_binding.span, true))
+ }
+ }
+ // Otherwise prioritize the new binding.
+ (Import { import, .. }, other) if !new_binding.span.is_dummy() => {
+ Some((import, new_binding.span, other.is_import()))
+ }
+ (other, Import { import, .. }) if !old_binding.span.is_dummy() => {
+ Some((import, old_binding.span, other.is_import()))
+ }
+ _ => None,
+ };
+
+ // Check if the target of the use for both bindings is the same.
+ let duplicate = new_binding.res().opt_def_id() == old_binding.res().opt_def_id();
+ let has_dummy_span = new_binding.span.is_dummy() || old_binding.span.is_dummy();
+ let from_item =
+ self.extern_prelude.get(&ident).map_or(true, |entry| entry.introduced_by_item);
+ // Only suggest removing an import if both bindings are to the same def, if both spans
+ // aren't dummy spans. Further, if both bindings are imports, then the ident must have
+ // been introduced by an item.
+ let should_remove_import = duplicate
+ && !has_dummy_span
+ && ((new_binding.is_extern_crate() || old_binding.is_extern_crate()) || from_item);
+
+ match import {
+ Some((import, span, true)) if should_remove_import && import.is_nested() => {
+ self.add_suggestion_for_duplicate_nested_use(&mut err, import, span)
+ }
+ Some((import, _, true)) if should_remove_import && !import.is_glob() => {
+ // Simple case - remove the entire import. Due to the above match arm, this can
+ // only be a single use so just remove it entirely.
+ err.tool_only_span_suggestion(
+ import.use_span_with_attributes,
+ "remove unnecessary import",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ Some((import, span, _)) => {
+ self.add_suggestion_for_rename_of_use(&mut err, name, import, span)
+ }
+ _ => {}
+ }
+
+ err.emit();
+ self.name_already_seen.insert(name, span);
+ }
+
+ /// This function adds a suggestion to change the binding name of a new import that conflicts
+ /// with an existing import.
+ ///
+ /// ```text,ignore (diagnostic)
+ /// help: you can use `as` to change the binding name of the import
+ /// |
+ /// LL | use foo::bar as other_bar;
+ /// | ^^^^^^^^^^^^^^^^^^^^^
+ /// ```
+ fn add_suggestion_for_rename_of_use(
+ &self,
+ err: &mut Diagnostic,
+ name: Symbol,
+ import: &Import<'_>,
+ binding_span: Span,
+ ) {
+ let suggested_name = if name.as_str().chars().next().unwrap().is_uppercase() {
+ format!("Other{}", name)
+ } else {
+ format!("other_{}", name)
+ };
+
+ let mut suggestion = None;
+ match import.kind {
+ ImportKind::Single { type_ns_only: true, .. } => {
+ suggestion = Some(format!("self as {}", suggested_name))
+ }
+ ImportKind::Single { source, .. } => {
+ if let Some(pos) =
+ source.span.hi().0.checked_sub(binding_span.lo().0).map(|pos| pos as usize)
+ {
+ if let Ok(snippet) = self.session.source_map().span_to_snippet(binding_span) {
+ if pos <= snippet.len() {
+ suggestion = Some(format!(
+ "{} as {}{}",
+ &snippet[..pos],
+ suggested_name,
+ if snippet.ends_with(';') { ";" } else { "" }
+ ))
+ }
+ }
+ }
+ }
+ ImportKind::ExternCrate { source, target } => {
+ suggestion = Some(format!(
+ "extern crate {} as {};",
+ source.unwrap_or(target.name),
+ suggested_name,
+ ))
+ }
+ _ => unreachable!(),
+ }
+
+ let rename_msg = "you can use `as` to change the binding name of the import";
+ if let Some(suggestion) = suggestion {
+ err.span_suggestion(
+ binding_span,
+ rename_msg,
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(binding_span, rename_msg);
+ }
+ }
+
+ /// This function adds a suggestion to remove an unnecessary binding from an import that is
+ /// nested. In the following example, this function will be invoked to remove the `a` binding
+ /// in the second use statement:
+ ///
+ /// ```ignore (diagnostic)
+ /// use issue_52891::a;
+ /// use issue_52891::{d, a, e};
+ /// ```
+ ///
+ /// The following suggestion will be added:
+ ///
+ /// ```ignore (diagnostic)
+ /// use issue_52891::{d, a, e};
+ /// ^-- help: remove unnecessary import
+ /// ```
+ ///
+ /// If the nested use contains only one import then the suggestion will remove the entire
+ /// line.
+ ///
+ /// It is expected that the provided import is nested - this isn't checked by the
+ /// function. If this invariant is not upheld, this function's behaviour will be unexpected
+ /// as characters expected by span manipulations won't be present.
+ fn add_suggestion_for_duplicate_nested_use(
+ &self,
+ err: &mut Diagnostic,
+ import: &Import<'_>,
+ binding_span: Span,
+ ) {
+ assert!(import.is_nested());
+ let message = "remove unnecessary import";
+
+ // Two examples will be used to illustrate the span manipulations we're doing:
+ //
+ // - Given `use issue_52891::{d, a, e};` where `a` is a duplicate then `binding_span` is
+ // `a` and `import.use_span` is `issue_52891::{d, a, e};`.
+ // - Given `use issue_52891::{d, e, a};` where `a` is a duplicate then `binding_span` is
+ // `a` and `import.use_span` is `issue_52891::{d, e, a};`.
+
+ let (found_closing_brace, span) =
+ find_span_of_binding_until_next_binding(self.session, binding_span, import.use_span);
+
+ // If there was a closing brace then identify the span to remove any trailing commas from
+ // previous imports.
+ if found_closing_brace {
+ if let Some(span) = extend_span_to_previous_binding(self.session, span) {
+ err.tool_only_span_suggestion(span, message, "", Applicability::MaybeIncorrect);
+ } else {
+ // Remove the entire line if we cannot extend the span back, this indicates an
+ // `issue_52891::{self}` case.
+ err.span_suggestion(
+ import.use_span_with_attributes,
+ message,
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ return;
+ }
+
+ err.span_suggestion(span, message, "", Applicability::MachineApplicable);
+ }
+
+ pub(crate) fn lint_if_path_starts_with_module(
+ &mut self,
+ finalize: Option<Finalize>,
+ path: &[Segment],
+ second_binding: Option<&NameBinding<'_>>,
+ ) {
+ let Some(Finalize { node_id, root_span, .. }) = finalize else {
+ return;
+ };
+
+ let first_name = match path.get(0) {
+ // In the 2018 edition this lint is a hard error, so nothing to do
+ Some(seg) if seg.ident.span.rust_2015() && self.session.rust_2015() => seg.ident.name,
+ _ => return,
+ };
+
+ // We're only interested in `use` paths which should start with
+ // `{{root}}` currently.
+ if first_name != kw::PathRoot {
+ return;
+ }
+
+ match path.get(1) {
+ // If this import looks like `crate::...` it's already good
+ Some(Segment { ident, .. }) if ident.name == kw::Crate => return,
+ // Otherwise go below to see if it's an extern crate
+ Some(_) => {}
+ // If the path has length one (and it's `PathRoot` most likely)
+ // then we don't know whether we're gonna be importing a crate or an
+ // item in our crate. Defer this lint to elsewhere
+ None => return,
+ }
+
+ // If the first element of our path was actually resolved to an
+ // `ExternCrate` (also used for `crate::...`) then no need to issue a
+ // warning, this looks all good!
+ if let Some(binding) = second_binding {
+ if let NameBindingKind::Import { import, .. } = binding.kind {
+ // Careful: we still want to rewrite paths from renamed extern crates.
+ if let ImportKind::ExternCrate { source: None, .. } = import.kind {
+ return;
+ }
+ }
+ }
+
+ let diag = BuiltinLintDiagnostics::AbsPathWithModule(root_span);
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE,
+ node_id,
+ root_span,
+ "absolute paths must start with `self`, `super`, \
+ `crate`, or an external crate name in the 2018 edition",
+ diag,
+ );
+ }
+
+ pub(crate) fn add_module_candidates(
+ &mut self,
+ module: Module<'a>,
+ names: &mut Vec<TypoSuggestion>,
+ filter_fn: &impl Fn(Res) -> bool,
+ ) {
+ for (key, resolution) in self.resolutions(module).borrow().iter() {
+ if let Some(binding) = resolution.borrow().binding {
+ let res = binding.res();
+ if filter_fn(res) {
+ names.push(TypoSuggestion::typo_from_res(key.ident.name, res));
+ }
+ }
+ }
+ }
+
+ /// Combines an error with provided span and emits it.
+ ///
+ /// This takes the error provided, combines it with the span and any additional spans inside the
+ /// error and emits it.
+ pub(crate) fn report_error(&mut self, span: Span, resolution_error: ResolutionError<'a>) {
+ self.into_struct_error(span, resolution_error).emit();
+ }
+
+ pub(crate) fn into_struct_error(
+ &mut self,
+ span: Span,
+ resolution_error: ResolutionError<'a>,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ match resolution_error {
+ ResolutionError::GenericParamsFromOuterFunction(outer_res, has_generic_params) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0401,
+ "can't use generic parameters from outer function",
+ );
+ err.span_label(span, "use of generic parameter from outer function");
+
+ let sm = self.session.source_map();
+ match outer_res {
+ Res::SelfTy { trait_: maybe_trait_defid, alias_to: maybe_impl_defid } => {
+ if let Some(impl_span) =
+ maybe_impl_defid.and_then(|(def_id, _)| self.opt_span(def_id))
+ {
+ err.span_label(
+ reduce_impl_span_to_impl_keyword(sm, impl_span),
+ "`Self` type implicitly declared here, by this `impl`",
+ );
+ }
+ match (maybe_trait_defid, maybe_impl_defid) {
+ (Some(_), None) => {
+ err.span_label(span, "can't use `Self` here");
+ }
+ (_, Some(_)) => {
+ err.span_label(span, "use a type here instead");
+ }
+ (None, None) => bug!("`impl` without trait nor type?"),
+ }
+ return err;
+ }
+ Res::Def(DefKind::TyParam, def_id) => {
+ if let Some(span) = self.opt_span(def_id) {
+ err.span_label(span, "type parameter from outer function");
+ }
+ }
+ Res::Def(DefKind::ConstParam, def_id) => {
+ if let Some(span) = self.opt_span(def_id) {
+ err.span_label(span, "const parameter from outer function");
+ }
+ }
+ _ => {
+ bug!(
+ "GenericParamsFromOuterFunction should only be used with Res::SelfTy, \
+ DefKind::TyParam or DefKind::ConstParam"
+ );
+ }
+ }
+
+ if has_generic_params == HasGenericParams::Yes {
+ // Try to retrieve the span of the function signature and generate a new
+ // message with a local type or const parameter.
+ let sugg_msg = "try using a local generic parameter instead";
+ if let Some((sugg_span, snippet)) = sm.generate_local_type_param_snippet(span) {
+ // Suggest the modification to the user
+ err.span_suggestion(
+ sugg_span,
+ sugg_msg,
+ snippet,
+ Applicability::MachineApplicable,
+ );
+ } else if let Some(sp) = sm.generate_fn_name_span(span) {
+ err.span_label(
+ sp,
+ "try adding a local generic parameter in this method instead",
+ );
+ } else {
+ err.help("try using a local generic parameter instead");
+ }
+ }
+
+ err
+ }
+ ResolutionError::NameAlreadyUsedInParameterList(name, first_use_span) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0403,
+ "the name `{}` is already used for a generic \
+ parameter in this item's generic parameters",
+ name,
+ );
+ err.span_label(span, "already used");
+ err.span_label(first_use_span, format!("first use of `{}`", name));
+ err
+ }
+ ResolutionError::MethodNotMemberOfTrait(method, trait_, candidate) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0407,
+ "method `{}` is not a member of trait `{}`",
+ method,
+ trait_
+ );
+ err.span_label(span, format!("not a member of trait `{}`", trait_));
+ if let Some(candidate) = candidate {
+ err.span_suggestion(
+ method.span,
+ "there is an associated function with a similar name",
+ candidate.to_ident_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err
+ }
+ ResolutionError::TypeNotMemberOfTrait(type_, trait_, candidate) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0437,
+ "type `{}` is not a member of trait `{}`",
+ type_,
+ trait_
+ );
+ err.span_label(span, format!("not a member of trait `{}`", trait_));
+ if let Some(candidate) = candidate {
+ err.span_suggestion(
+ type_.span,
+ "there is an associated type with a similar name",
+ candidate.to_ident_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err
+ }
+ ResolutionError::ConstNotMemberOfTrait(const_, trait_, candidate) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0438,
+ "const `{}` is not a member of trait `{}`",
+ const_,
+ trait_
+ );
+ err.span_label(span, format!("not a member of trait `{}`", trait_));
+ if let Some(candidate) = candidate {
+ err.span_suggestion(
+ const_.span,
+ "there is an associated constant with a similar name",
+ candidate.to_ident_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err
+ }
+ ResolutionError::VariableNotBoundInPattern(binding_error, parent_scope) => {
+ let BindingError { name, target, origin, could_be_path } = binding_error;
+
+ let target_sp = target.iter().copied().collect::<Vec<_>>();
+ let origin_sp = origin.iter().copied().collect::<Vec<_>>();
+
+ let msp = MultiSpan::from_spans(target_sp.clone());
+ let mut err = struct_span_err!(
+ self.session,
+ msp,
+ E0408,
+ "variable `{}` is not bound in all patterns",
+ name,
+ );
+ for sp in target_sp {
+ err.span_label(sp, format!("pattern doesn't bind `{}`", name));
+ }
+ for sp in origin_sp {
+ err.span_label(sp, "variable not in all patterns");
+ }
+ if could_be_path {
+ let import_suggestions = self.lookup_import_candidates(
+ Ident::with_dummy_span(name),
+ Namespace::ValueNS,
+ &parent_scope,
+ &|res: Res| match res {
+ Res::Def(
+ DefKind::Ctor(CtorOf::Variant, CtorKind::Const)
+ | DefKind::Ctor(CtorOf::Struct, CtorKind::Const)
+ | DefKind::Const
+ | DefKind::AssocConst,
+ _,
+ ) => true,
+ _ => false,
+ },
+ );
+
+ if import_suggestions.is_empty() {
+ let help_msg = format!(
+ "if you meant to match on a variant or a `const` item, consider \
+ making the path in the pattern qualified: `path::to::ModOrType::{}`",
+ name,
+ );
+ err.span_help(span, &help_msg);
+ }
+ show_candidates(
+ &self.session,
+ &self.source_span,
+ &mut err,
+ Some(span),
+ &import_suggestions,
+ Instead::No,
+ FoundUse::Yes,
+ IsPattern::Yes,
+ vec![],
+ );
+ }
+ err
+ }
+ ResolutionError::VariableBoundWithDifferentMode(variable_name, first_binding_span) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0409,
+ "variable `{}` is bound inconsistently across alternatives separated by `|`",
+ variable_name
+ );
+ err.span_label(span, "bound in different ways");
+ err.span_label(first_binding_span, "first binding");
+ err
+ }
+ ResolutionError::IdentifierBoundMoreThanOnceInParameterList(identifier) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0415,
+ "identifier `{}` is bound more than once in this parameter list",
+ identifier
+ );
+ err.span_label(span, "used as parameter more than once");
+ err
+ }
+ ResolutionError::IdentifierBoundMoreThanOnceInSamePattern(identifier) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0416,
+ "identifier `{}` is bound more than once in the same pattern",
+ identifier
+ );
+ err.span_label(span, "used in a pattern more than once");
+ err
+ }
+ ResolutionError::UndeclaredLabel { name, suggestion } => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0426,
+ "use of undeclared label `{}`",
+ name
+ );
+
+ err.span_label(span, format!("undeclared label `{}`", name));
+
+ match suggestion {
+ // A reachable label with a similar name exists.
+ Some((ident, true)) => {
+ err.span_label(ident.span, "a label with a similar name is reachable");
+ err.span_suggestion(
+ span,
+ "try using similarly named label",
+ ident.name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ // An unreachable label with a similar name exists.
+ Some((ident, false)) => {
+ err.span_label(
+ ident.span,
+ "a label with a similar name exists but is unreachable",
+ );
+ }
+ // No similarly-named labels exist.
+ None => (),
+ }
+
+ err
+ }
+ ResolutionError::SelfImportsOnlyAllowedWithin { root, span_with_rename } => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0429,
+ "{}",
+ "`self` imports are only allowed within a { } list"
+ );
+
+ // None of the suggestions below would help with a case like `use self`.
+ if !root {
+ // use foo::bar::self -> foo::bar
+ // use foo::bar::self as abc -> foo::bar as abc
+ err.span_suggestion(
+ span,
+ "consider importing the module directly",
+ "",
+ Applicability::MachineApplicable,
+ );
+
+ // use foo::bar::self -> foo::bar::{self}
+ // use foo::bar::self as abc -> foo::bar::{self as abc}
+ let braces = vec![
+ (span_with_rename.shrink_to_lo(), "{".to_string()),
+ (span_with_rename.shrink_to_hi(), "}".to_string()),
+ ];
+ err.multipart_suggestion(
+ "alternatively, use the multi-path `use` syntax to import `self`",
+ braces,
+ Applicability::MachineApplicable,
+ );
+ }
+ err
+ }
+ ResolutionError::SelfImportCanOnlyAppearOnceInTheList => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0430,
+ "`self` import can only appear once in an import list"
+ );
+ err.span_label(span, "can only appear once in an import list");
+ err
+ }
+ ResolutionError::SelfImportOnlyInImportListWithNonEmptyPrefix => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0431,
+ "`self` import can only appear in an import list with \
+ a non-empty prefix"
+ );
+ err.span_label(span, "can only appear in an import list with a non-empty prefix");
+ err
+ }
+ ResolutionError::FailedToResolve { label, suggestion } => {
+ let mut err =
+ struct_span_err!(self.session, span, E0433, "failed to resolve: {}", &label);
+ err.span_label(span, label);
+
+ if let Some((suggestions, msg, applicability)) = suggestion {
+ if suggestions.is_empty() {
+ err.help(&msg);
+ return err;
+ }
+ err.multipart_suggestion(&msg, suggestions, applicability);
+ }
+
+ err
+ }
+ ResolutionError::CannotCaptureDynamicEnvironmentInFnItem => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0434,
+ "{}",
+ "can't capture dynamic environment in a fn item"
+ );
+ err.help("use the `|| { ... }` closure form instead");
+ err
+ }
+ ResolutionError::AttemptToUseNonConstantValueInConstant(ident, sugg, current) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0435,
+ "attempt to use a non-constant value in a constant"
+ );
+ // let foo =...
+ // ^^^ given this Span
+ // ------- get this Span to have an applicable suggestion
+
+ // edit:
+ // only do this if the const and usage of the non-constant value are on the same line
+ // the further the two are apart, the higher the chance of the suggestion being wrong
+
+ let sp = self
+ .session
+ .source_map()
+ .span_extend_to_prev_str(ident.span, current, true, false);
+
+ match sp {
+ Some(sp) if !self.session.source_map().is_multiline(sp) => {
+ let sp = sp.with_lo(BytePos(sp.lo().0 - (current.len() as u32)));
+ err.span_suggestion(
+ sp,
+ &format!("consider using `{}` instead of `{}`", sugg, current),
+ format!("{} {}", sugg, ident),
+ Applicability::MaybeIncorrect,
+ );
+ err.span_label(span, "non-constant value");
+ }
+ _ => {
+ err.span_label(ident.span, &format!("this would need to be a `{}`", sugg));
+ }
+ }
+
+ err
+ }
+ ResolutionError::BindingShadowsSomethingUnacceptable {
+ shadowing_binding,
+ name,
+ participle,
+ article,
+ shadowed_binding,
+ shadowed_binding_span,
+ } => {
+ let shadowed_binding_descr = shadowed_binding.descr();
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0530,
+ "{}s cannot shadow {}s",
+ shadowing_binding.descr(),
+ shadowed_binding_descr,
+ );
+ err.span_label(
+ span,
+ format!("cannot be named the same as {} {}", article, shadowed_binding_descr),
+ );
+ match (shadowing_binding, shadowed_binding) {
+ (
+ PatternSource::Match,
+ Res::Def(DefKind::Ctor(CtorOf::Variant | CtorOf::Struct, CtorKind::Fn), _),
+ ) => {
+ err.span_suggestion(
+ span,
+ "try specify the pattern arguments",
+ format!("{}(..)", name),
+ Applicability::Unspecified,
+ );
+ }
+ _ => (),
+ }
+ let msg =
+ format!("the {} `{}` is {} here", shadowed_binding_descr, name, participle);
+ err.span_label(shadowed_binding_span, msg);
+ err
+ }
+ ResolutionError::ForwardDeclaredGenericParam => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0128,
+ "generic parameters with a default cannot use \
+ forward declared identifiers"
+ );
+ err.span_label(span, "defaulted generic parameters cannot be forward declared");
+ err
+ }
+ ResolutionError::ParamInTyOfConstParam(name) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0770,
+ "the type of const parameters must not depend on other generic parameters"
+ );
+ err.span_label(
+ span,
+ format!("the type must not depend on the parameter `{}`", name),
+ );
+ err
+ }
+ ResolutionError::ParamInNonTrivialAnonConst { name, is_type } => {
+ let mut err = self.session.struct_span_err(
+ span,
+ "generic parameters may not be used in const operations",
+ );
+ err.span_label(span, &format!("cannot perform const operation using `{}`", name));
+
+ if is_type {
+ err.note("type parameters may not be used in const expressions");
+ } else {
+ err.help(&format!(
+ "const parameters may only be used as standalone arguments, i.e. `{}`",
+ name
+ ));
+ }
+
+ if self.session.is_nightly_build() {
+ err.help(
+ "use `#![feature(generic_const_exprs)]` to allow generic const expressions",
+ );
+ }
+
+ err
+ }
+ ResolutionError::SelfInGenericParamDefault => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0735,
+ "generic parameters cannot use `Self` in their defaults"
+ );
+ err.span_label(span, "`Self` in generic parameter default");
+ err
+ }
+ ResolutionError::UnreachableLabel { name, definition_span, suggestion } => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0767,
+ "use of unreachable label `{}`",
+ name,
+ );
+
+ err.span_label(definition_span, "unreachable label defined here");
+ err.span_label(span, format!("unreachable label `{}`", name));
+ err.note(
+ "labels are unreachable through functions, closures, async blocks and modules",
+ );
+
+ match suggestion {
+ // A reachable label with a similar name exists.
+ Some((ident, true)) => {
+ err.span_label(ident.span, "a label with a similar name is reachable");
+ err.span_suggestion(
+ span,
+ "try using similarly named label",
+ ident.name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ // An unreachable label with a similar name exists.
+ Some((ident, false)) => {
+ err.span_label(
+ ident.span,
+ "a label with a similar name exists but is also unreachable",
+ );
+ }
+ // No similarly-named labels exist.
+ None => (),
+ }
+
+ err
+ }
+ ResolutionError::TraitImplMismatch {
+ name,
+ kind,
+ code,
+ trait_item_span,
+ trait_path,
+ } => {
+ let mut err = self.session.struct_span_err_with_code(
+ span,
+ &format!(
+ "item `{}` is an associated {}, which doesn't match its trait `{}`",
+ name, kind, trait_path,
+ ),
+ code,
+ );
+ err.span_label(span, "does not match trait");
+ err.span_label(trait_item_span, "item in trait");
+ err
+ }
+ ResolutionError::InvalidAsmSym => {
+ let mut err = self.session.struct_span_err(span, "invalid `sym` operand");
+ err.span_label(span, "is a local variable");
+ err.help("`sym` operands must refer to either a function or a static");
+ err
+ }
+ }
+ }
+
+ pub(crate) fn report_vis_error(
+ &mut self,
+ vis_resolution_error: VisResolutionError<'_>,
+ ) -> ErrorGuaranteed {
+ match vis_resolution_error {
+ VisResolutionError::Relative2018(span, path) => {
+ let mut err = self.session.struct_span_err(
+ span,
+ "relative paths are not supported in visibilities in 2018 edition or later",
+ );
+ err.span_suggestion(
+ path.span,
+ "try",
+ format!("crate::{}", pprust::path_to_string(&path)),
+ Applicability::MaybeIncorrect,
+ );
+ err
+ }
+ VisResolutionError::AncestorOnly(span) => struct_span_err!(
+ self.session,
+ span,
+ E0742,
+ "visibilities can only be restricted to ancestor modules"
+ ),
+ VisResolutionError::FailedToResolve(span, label, suggestion) => {
+ self.into_struct_error(span, ResolutionError::FailedToResolve { label, suggestion })
+ }
+ VisResolutionError::ExpectedFound(span, path_str, res) => {
+ let mut err = struct_span_err!(
+ self.session,
+ span,
+ E0577,
+ "expected module, found {} `{}`",
+ res.descr(),
+ path_str
+ );
+ err.span_label(span, "not a module");
+ err
+ }
+ VisResolutionError::Indeterminate(span) => struct_span_err!(
+ self.session,
+ span,
+ E0578,
+ "cannot determine resolution for the visibility"
+ ),
+ VisResolutionError::ModuleOnly(span) => {
+ self.session.struct_span_err(span, "visibility must resolve to a module")
+ }
+ }
+ .emit()
+ }
+
+ /// Lookup typo candidate in scope for a macro or import.
+ fn early_lookup_typo_candidate(
+ &mut self,
+ scope_set: ScopeSet<'a>,
+ parent_scope: &ParentScope<'a>,
+ ident: Ident,
+ filter_fn: &impl Fn(Res) -> bool,
+ ) -> Option<TypoSuggestion> {
+ let mut suggestions = Vec::new();
+ let ctxt = ident.span.ctxt();
+ self.visit_scopes(scope_set, parent_scope, ctxt, |this, scope, use_prelude, _| {
+ match scope {
+ Scope::DeriveHelpers(expn_id) => {
+ let res = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper);
+ if filter_fn(res) {
+ suggestions.extend(
+ this.helper_attrs
+ .get(&expn_id)
+ .into_iter()
+ .flatten()
+ .map(|ident| TypoSuggestion::typo_from_res(ident.name, res)),
+ );
+ }
+ }
+ Scope::DeriveHelpersCompat => {
+ let res = Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat);
+ if filter_fn(res) {
+ for derive in parent_scope.derives {
+ let parent_scope = &ParentScope { derives: &[], ..*parent_scope };
+ if let Ok((Some(ext), _)) = this.resolve_macro_path(
+ derive,
+ Some(MacroKind::Derive),
+ parent_scope,
+ false,
+ false,
+ ) {
+ suggestions.extend(
+ ext.helper_attrs
+ .iter()
+ .map(|name| TypoSuggestion::typo_from_res(*name, res)),
+ );
+ }
+ }
+ }
+ }
+ Scope::MacroRules(macro_rules_scope) => {
+ if let MacroRulesScope::Binding(macro_rules_binding) = macro_rules_scope.get() {
+ let res = macro_rules_binding.binding.res();
+ if filter_fn(res) {
+ suggestions.push(TypoSuggestion::typo_from_res(
+ macro_rules_binding.ident.name,
+ res,
+ ))
+ }
+ }
+ }
+ Scope::CrateRoot => {
+ let root_ident = Ident::new(kw::PathRoot, ident.span);
+ let root_module = this.resolve_crate_root(root_ident);
+ this.add_module_candidates(root_module, &mut suggestions, filter_fn);
+ }
+ Scope::Module(module, _) => {
+ this.add_module_candidates(module, &mut suggestions, filter_fn);
+ }
+ Scope::RegisteredAttrs => {
+ let res = Res::NonMacroAttr(NonMacroAttrKind::Registered);
+ if filter_fn(res) {
+ suggestions.extend(
+ this.registered_attrs
+ .iter()
+ .map(|ident| TypoSuggestion::typo_from_res(ident.name, res)),
+ );
+ }
+ }
+ Scope::MacroUsePrelude => {
+ suggestions.extend(this.macro_use_prelude.iter().filter_map(
+ |(name, binding)| {
+ let res = binding.res();
+ filter_fn(res).then_some(TypoSuggestion::typo_from_res(*name, res))
+ },
+ ));
+ }
+ Scope::BuiltinAttrs => {
+ let res = Res::NonMacroAttr(NonMacroAttrKind::Builtin(kw::Empty));
+ if filter_fn(res) {
+ suggestions.extend(
+ BUILTIN_ATTRIBUTES
+ .iter()
+ .map(|attr| TypoSuggestion::typo_from_res(attr.name, res)),
+ );
+ }
+ }
+ Scope::ExternPrelude => {
+ suggestions.extend(this.extern_prelude.iter().filter_map(|(ident, _)| {
+ let res = Res::Def(DefKind::Mod, CRATE_DEF_ID.to_def_id());
+ filter_fn(res).then_some(TypoSuggestion::typo_from_res(ident.name, res))
+ }));
+ }
+ Scope::ToolPrelude => {
+ let res = Res::NonMacroAttr(NonMacroAttrKind::Tool);
+ suggestions.extend(
+ this.registered_tools
+ .iter()
+ .map(|ident| TypoSuggestion::typo_from_res(ident.name, res)),
+ );
+ }
+ Scope::StdLibPrelude => {
+ if let Some(prelude) = this.prelude {
+ let mut tmp_suggestions = Vec::new();
+ this.add_module_candidates(prelude, &mut tmp_suggestions, filter_fn);
+ suggestions.extend(
+ tmp_suggestions
+ .into_iter()
+ .filter(|s| use_prelude || this.is_builtin_macro(s.res)),
+ );
+ }
+ }
+ Scope::BuiltinTypes => {
+ suggestions.extend(PrimTy::ALL.iter().filter_map(|prim_ty| {
+ let res = Res::PrimTy(*prim_ty);
+ filter_fn(res).then_some(TypoSuggestion::typo_from_res(prim_ty.name(), res))
+ }))
+ }
+ }
+
+ None::<()>
+ });
+
+ // Make sure error reporting is deterministic.
+ suggestions.sort_by(|a, b| a.candidate.as_str().partial_cmp(b.candidate.as_str()).unwrap());
+
+ match find_best_match_for_name(
+ &suggestions.iter().map(|suggestion| suggestion.candidate).collect::<Vec<Symbol>>(),
+ ident.name,
+ None,
+ ) {
+ Some(found) if found != ident.name => {
+ suggestions.into_iter().find(|suggestion| suggestion.candidate == found)
+ }
+ _ => None,
+ }
+ }
+
+ fn lookup_import_candidates_from_module<FilterFn>(
+ &mut self,
+ lookup_ident: Ident,
+ namespace: Namespace,
+ parent_scope: &ParentScope<'a>,
+ start_module: Module<'a>,
+ crate_name: Ident,
+ filter_fn: FilterFn,
+ ) -> Vec<ImportSuggestion>
+ where
+ FilterFn: Fn(Res) -> bool,
+ {
+ let mut candidates = Vec::new();
+ let mut seen_modules = FxHashSet::default();
+ let mut worklist = vec![(start_module, Vec::<ast::PathSegment>::new(), true)];
+ let mut worklist_via_import = vec![];
+
+ while let Some((in_module, path_segments, accessible)) = match worklist.pop() {
+ None => worklist_via_import.pop(),
+ Some(x) => Some(x),
+ } {
+ let in_module_is_extern = !in_module.def_id().is_local();
+ // We have to visit module children in deterministic order to avoid
+ // instabilities in reported imports (#43552).
+ in_module.for_each_child(self, |this, ident, ns, name_binding| {
+ // avoid non-importable candidates
+ if !name_binding.is_importable() {
+ return;
+ }
+
+ let child_accessible =
+ accessible && this.is_accessible_from(name_binding.vis, parent_scope.module);
+
+ // do not venture inside inaccessible items of other crates
+ if in_module_is_extern && !child_accessible {
+ return;
+ }
+
+ let via_import = name_binding.is_import() && !name_binding.is_extern_crate();
+
+ // There is an assumption elsewhere that paths of variants are in the enum's
+ // declaration and not imported. With this assumption, the variant component is
+ // chopped and the rest of the path is assumed to be the enum's own path. For
+ // errors where a variant is used as the type instead of the enum, this causes
+ // funny looking invalid suggestions, i.e `foo` instead of `foo::MyEnum`.
+ if via_import && name_binding.is_possibly_imported_variant() {
+ return;
+ }
+
+ // #90113: Do not count an inaccessible reexported item as a candidate.
+ if let NameBindingKind::Import { binding, .. } = name_binding.kind {
+ if this.is_accessible_from(binding.vis, parent_scope.module)
+ && !this.is_accessible_from(name_binding.vis, parent_scope.module)
+ {
+ return;
+ }
+ }
+
+ // collect results based on the filter function
+ // avoid suggesting anything from the same module in which we are resolving
+ // avoid suggesting anything with a hygienic name
+ if ident.name == lookup_ident.name
+ && ns == namespace
+ && !ptr::eq(in_module, parent_scope.module)
+ && !ident.span.normalize_to_macros_2_0().from_expansion()
+ {
+ let res = name_binding.res();
+ if filter_fn(res) {
+ // create the path
+ let mut segms = path_segments.clone();
+ if lookup_ident.span.rust_2018() {
+ // crate-local absolute paths start with `crate::` in edition 2018
+ // FIXME: may also be stabilized for Rust 2015 (Issues #45477, #44660)
+ segms.insert(0, ast::PathSegment::from_ident(crate_name));
+ }
+
+ segms.push(ast::PathSegment::from_ident(ident));
+ let path = Path { span: name_binding.span, segments: segms, tokens: None };
+ let did = match res {
+ Res::Def(DefKind::Ctor(..), did) => this.opt_parent(did),
+ _ => res.opt_def_id(),
+ };
+
+ if child_accessible {
+ // Remove invisible match if exists
+ if let Some(idx) = candidates
+ .iter()
+ .position(|v: &ImportSuggestion| v.did == did && !v.accessible)
+ {
+ candidates.remove(idx);
+ }
+ }
+
+ if candidates.iter().all(|v: &ImportSuggestion| v.did != did) {
+ // See if we're recommending TryFrom, TryInto, or FromIterator and add
+ // a note about editions
+ let note = if let Some(did) = did {
+ let requires_note = !did.is_local()
+ && this.cstore().item_attrs_untracked(did, this.session).any(
+ |attr| {
+ if attr.has_name(sym::rustc_diagnostic_item) {
+ [sym::TryInto, sym::TryFrom, sym::FromIterator]
+ .map(|x| Some(x))
+ .contains(&attr.value_str())
+ } else {
+ false
+ }
+ },
+ );
+
+ requires_note.then(|| {
+ format!(
+ "'{}' is included in the prelude starting in Edition 2021",
+ path_names_to_string(&path)
+ )
+ })
+ } else {
+ None
+ };
+
+ candidates.push(ImportSuggestion {
+ did,
+ descr: res.descr(),
+ path,
+ accessible: child_accessible,
+ note,
+ });
+ }
+ }
+ }
+
+ // collect submodules to explore
+ if let Some(module) = name_binding.module() {
+ // form the path
+ let mut path_segments = path_segments.clone();
+ path_segments.push(ast::PathSegment::from_ident(ident));
+
+ let is_extern_crate_that_also_appears_in_prelude =
+ name_binding.is_extern_crate() && lookup_ident.span.rust_2018();
+
+ if !is_extern_crate_that_also_appears_in_prelude {
+ // add the module to the lookup
+ if seen_modules.insert(module.def_id()) {
+ if via_import { &mut worklist_via_import } else { &mut worklist }
+ .push((module, path_segments, child_accessible));
+ }
+ }
+ }
+ })
+ }
+
+ // If only some candidates are accessible, take just them
+ if !candidates.iter().all(|v: &ImportSuggestion| !v.accessible) {
+ candidates = candidates.into_iter().filter(|x| x.accessible).collect();
+ }
+
+ candidates
+ }
+
+ /// When name resolution fails, this method can be used to look up candidate
+ /// entities with the expected name. It allows filtering them using the
+ /// supplied predicate (which should be used to only accept the types of
+ /// definitions expected, e.g., traits). The lookup spans across all crates.
+ ///
+ /// N.B., the method does not look into imports, but this is not a problem,
+ /// since we report the definitions (thus, the de-aliased imports).
+ pub(crate) fn lookup_import_candidates<FilterFn>(
+ &mut self,
+ lookup_ident: Ident,
+ namespace: Namespace,
+ parent_scope: &ParentScope<'a>,
+ filter_fn: FilterFn,
+ ) -> Vec<ImportSuggestion>
+ where
+ FilterFn: Fn(Res) -> bool,
+ {
+ let mut suggestions = self.lookup_import_candidates_from_module(
+ lookup_ident,
+ namespace,
+ parent_scope,
+ self.graph_root,
+ Ident::with_dummy_span(kw::Crate),
+ &filter_fn,
+ );
+
+ if lookup_ident.span.rust_2018() {
+ let extern_prelude_names = self.extern_prelude.clone();
+ for (ident, _) in extern_prelude_names.into_iter() {
+ if ident.span.from_expansion() {
+ // Idents are adjusted to the root context before being
+ // resolved in the extern prelude, so reporting this to the
+ // user is no help. This skips the injected
+ // `extern crate std` in the 2018 edition, which would
+ // otherwise cause duplicate suggestions.
+ continue;
+ }
+ if let Some(crate_id) = self.crate_loader.maybe_process_path_extern(ident.name) {
+ let crate_root = self.expect_module(crate_id.as_def_id());
+ suggestions.extend(self.lookup_import_candidates_from_module(
+ lookup_ident,
+ namespace,
+ parent_scope,
+ crate_root,
+ ident,
+ &filter_fn,
+ ));
+ }
+ }
+ }
+
+ suggestions
+ }
+
+ pub(crate) fn unresolved_macro_suggestions(
+ &mut self,
+ err: &mut Diagnostic,
+ macro_kind: MacroKind,
+ parent_scope: &ParentScope<'a>,
+ ident: Ident,
+ ) {
+ let is_expected = &|res: Res| res.macro_kind() == Some(macro_kind);
+ let suggestion = self.early_lookup_typo_candidate(
+ ScopeSet::Macro(macro_kind),
+ parent_scope,
+ ident,
+ is_expected,
+ );
+ self.add_typo_suggestion(err, suggestion, ident.span);
+
+ let import_suggestions =
+ self.lookup_import_candidates(ident, Namespace::MacroNS, parent_scope, is_expected);
+ show_candidates(
+ &self.session,
+ &self.source_span,
+ err,
+ None,
+ &import_suggestions,
+ Instead::No,
+ FoundUse::Yes,
+ IsPattern::No,
+ vec![],
+ );
+
+ if macro_kind == MacroKind::Derive && (ident.name == sym::Send || ident.name == sym::Sync) {
+ let msg = format!("unsafe traits like `{}` should be implemented explicitly", ident);
+ err.span_note(ident.span, &msg);
+ return;
+ }
+ if self.macro_names.contains(&ident.normalize_to_macros_2_0()) {
+ err.help("have you added the `#[macro_use]` on the module/import?");
+ return;
+ }
+ if ident.name == kw::Default
+ && let ModuleKind::Def(DefKind::Enum, def_id, _) = parent_scope.module.kind
+ && let Some(span) = self.opt_span(def_id)
+ {
+ let source_map = self.session.source_map();
+ let head_span = source_map.guess_head_span(span);
+ if let Ok(head) = source_map.span_to_snippet(head_span) {
+ err.span_suggestion(head_span, "consider adding a derive", format!("#[derive(Default)]\n{head}"), Applicability::MaybeIncorrect);
+ } else {
+ err.span_help(
+ head_span,
+ "consider adding `#[derive(Default)]` to this enum",
+ );
+ }
+ }
+ for ns in [Namespace::MacroNS, Namespace::TypeNS, Namespace::ValueNS] {
+ if let Ok(binding) = self.early_resolve_ident_in_lexical_scope(
+ ident,
+ ScopeSet::All(ns, false),
+ &parent_scope,
+ None,
+ false,
+ None,
+ ) {
+ let desc = match binding.res() {
+ Res::Def(DefKind::Macro(MacroKind::Bang), _) => {
+ "a function-like macro".to_string()
+ }
+ Res::Def(DefKind::Macro(MacroKind::Attr), _) | Res::NonMacroAttr(..) => {
+ format!("an attribute: `#[{}]`", ident)
+ }
+ Res::Def(DefKind::Macro(MacroKind::Derive), _) => {
+ format!("a derive macro: `#[derive({})]`", ident)
+ }
+ Res::ToolMod => {
+ // Don't confuse the user with tool modules.
+ continue;
+ }
+ Res::Def(DefKind::Trait, _) if macro_kind == MacroKind::Derive => {
+ "only a trait, without a derive macro".to_string()
+ }
+ res => format!(
+ "{} {}, not {} {}",
+ res.article(),
+ res.descr(),
+ macro_kind.article(),
+ macro_kind.descr_expected(),
+ ),
+ };
+ if let crate::NameBindingKind::Import { import, .. } = binding.kind {
+ if !import.span.is_dummy() {
+ err.span_note(
+ import.span,
+ &format!("`{}` is imported here, but it is {}", ident, desc),
+ );
+ // Silence the 'unused import' warning we might get,
+ // since this diagnostic already covers that import.
+ self.record_use(ident, binding, false);
+ return;
+ }
+ }
+ err.note(&format!("`{}` is in scope, but it is {}", ident, desc));
+ return;
+ }
+ }
+ }
+
+ pub(crate) fn add_typo_suggestion(
+ &self,
+ err: &mut Diagnostic,
+ suggestion: Option<TypoSuggestion>,
+ span: Span,
+ ) -> bool {
+ let suggestion = match suggestion {
+ None => return false,
+ // We shouldn't suggest underscore.
+ Some(suggestion) if suggestion.candidate == kw::Underscore => return false,
+ Some(suggestion) => suggestion,
+ };
+ let def_span = suggestion.res.opt_def_id().and_then(|def_id| match def_id.krate {
+ LOCAL_CRATE => self.opt_span(def_id),
+ _ => Some(self.cstore().get_span_untracked(def_id, self.session)),
+ });
+ if let Some(def_span) = def_span {
+ if span.overlaps(def_span) {
+ // Don't suggest typo suggestion for itself like in the following:
+ // error[E0423]: expected function, tuple struct or tuple variant, found struct `X`
+ // --> $DIR/issue-64792-bad-unicode-ctor.rs:3:14
+ // |
+ // LL | struct X {}
+ // | ----------- `X` defined here
+ // LL |
+ // LL | const Y: X = X("ö");
+ // | -------------^^^^^^- similarly named constant `Y` defined here
+ // |
+ // help: use struct literal syntax instead
+ // |
+ // LL | const Y: X = X {};
+ // | ^^^^
+ // help: a constant with a similar name exists
+ // |
+ // LL | const Y: X = Y("ö");
+ // | ^
+ return false;
+ }
+ let prefix = match suggestion.target {
+ SuggestionTarget::SimilarlyNamed => "similarly named ",
+ SuggestionTarget::SingleItem => "",
+ };
+
+ err.span_label(
+ self.session.source_map().guess_head_span(def_span),
+ &format!(
+ "{}{} `{}` defined here",
+ prefix,
+ suggestion.res.descr(),
+ suggestion.candidate,
+ ),
+ );
+ }
+ let msg = match suggestion.target {
+ SuggestionTarget::SimilarlyNamed => format!(
+ "{} {} with a similar name exists",
+ suggestion.res.article(),
+ suggestion.res.descr()
+ ),
+ SuggestionTarget::SingleItem => {
+ format!("maybe you meant this {}", suggestion.res.descr())
+ }
+ };
+ err.span_suggestion(span, &msg, suggestion.candidate, Applicability::MaybeIncorrect);
+ true
+ }
+
+ fn binding_description(&self, b: &NameBinding<'_>, ident: Ident, from_prelude: bool) -> String {
+ let res = b.res();
+ if b.span.is_dummy() || !self.session.source_map().is_span_accessible(b.span) {
+ // These already contain the "built-in" prefix or look bad with it.
+ let add_built_in =
+ !matches!(b.res(), Res::NonMacroAttr(..) | Res::PrimTy(..) | Res::ToolMod);
+ let (built_in, from) = if from_prelude {
+ ("", " from prelude")
+ } else if b.is_extern_crate()
+ && !b.is_import()
+ && self.session.opts.externs.get(ident.as_str()).is_some()
+ {
+ ("", " passed with `--extern`")
+ } else if add_built_in {
+ (" built-in", "")
+ } else {
+ ("", "")
+ };
+
+ let a = if built_in.is_empty() { res.article() } else { "a" };
+ format!("{a}{built_in} {thing}{from}", thing = res.descr())
+ } else {
+ let introduced = if b.is_import() { "imported" } else { "defined" };
+ format!("the {thing} {introduced} here", thing = res.descr())
+ }
+ }
+
+ fn report_ambiguity_error(&self, ambiguity_error: &AmbiguityError<'_>) {
+ let AmbiguityError { kind, ident, b1, b2, misc1, misc2 } = *ambiguity_error;
+ let (b1, b2, misc1, misc2, swapped) = if b2.span.is_dummy() && !b1.span.is_dummy() {
+ // We have to print the span-less alternative first, otherwise formatting looks bad.
+ (b2, b1, misc2, misc1, true)
+ } else {
+ (b1, b2, misc1, misc2, false)
+ };
+
+ let mut err = struct_span_err!(self.session, ident.span, E0659, "`{ident}` is ambiguous");
+ err.span_label(ident.span, "ambiguous name");
+ err.note(&format!("ambiguous because of {}", kind.descr()));
+
+ let mut could_refer_to = |b: &NameBinding<'_>, misc: AmbiguityErrorMisc, also: &str| {
+ let what = self.binding_description(b, ident, misc == AmbiguityErrorMisc::FromPrelude);
+ let note_msg = format!("`{ident}` could{also} refer to {what}");
+
+ let thing = b.res().descr();
+ let mut help_msgs = Vec::new();
+ if b.is_glob_import()
+ && (kind == AmbiguityKind::GlobVsGlob
+ || kind == AmbiguityKind::GlobVsExpanded
+ || kind == AmbiguityKind::GlobVsOuter && swapped != also.is_empty())
+ {
+ help_msgs.push(format!(
+ "consider adding an explicit import of `{ident}` to disambiguate"
+ ))
+ }
+ if b.is_extern_crate() && ident.span.rust_2018() {
+ help_msgs.push(format!("use `::{ident}` to refer to this {thing} unambiguously"))
+ }
+ if misc == AmbiguityErrorMisc::SuggestCrate {
+ help_msgs
+ .push(format!("use `crate::{ident}` to refer to this {thing} unambiguously"))
+ } else if misc == AmbiguityErrorMisc::SuggestSelf {
+ help_msgs
+ .push(format!("use `self::{ident}` to refer to this {thing} unambiguously"))
+ }
+
+ err.span_note(b.span, &note_msg);
+ for (i, help_msg) in help_msgs.iter().enumerate() {
+ let or = if i == 0 { "" } else { "or " };
+ err.help(&format!("{}{}", or, help_msg));
+ }
+ };
+
+ could_refer_to(b1, misc1, "");
+ could_refer_to(b2, misc2, " also");
+ err.emit();
+ }
+
+ /// If the binding refers to a tuple struct constructor with fields,
+ /// returns the span of its fields.
+ fn ctor_fields_span(&self, binding: &NameBinding<'_>) -> Option<Span> {
+ if let NameBindingKind::Res(
+ Res::Def(DefKind::Ctor(CtorOf::Struct, CtorKind::Fn), ctor_def_id),
+ _,
+ ) = binding.kind
+ {
+ let def_id = self.parent(ctor_def_id);
+ let fields = self.field_names.get(&def_id)?;
+ return fields.iter().map(|name| name.span).reduce(Span::to); // None for `struct Foo()`
+ }
+ None
+ }
+
+ fn report_privacy_error(&self, privacy_error: &PrivacyError<'_>) {
+ let PrivacyError { ident, binding, .. } = *privacy_error;
+
+ let res = binding.res();
+ let ctor_fields_span = self.ctor_fields_span(binding);
+ let plain_descr = res.descr().to_string();
+ let nonimport_descr =
+ if ctor_fields_span.is_some() { plain_descr + " constructor" } else { plain_descr };
+ let import_descr = nonimport_descr.clone() + " import";
+ let get_descr =
+ |b: &NameBinding<'_>| if b.is_import() { &import_descr } else { &nonimport_descr };
+
+ // Print the primary message.
+ let descr = get_descr(binding);
+ let mut err =
+ struct_span_err!(self.session, ident.span, E0603, "{} `{}` is private", descr, ident);
+ err.span_label(ident.span, &format!("private {}", descr));
+ if let Some(span) = ctor_fields_span {
+ err.span_label(span, "a constructor is private if any of the fields is private");
+ }
+
+ // Print the whole import chain to make it easier to see what happens.
+ let first_binding = binding;
+ let mut next_binding = Some(binding);
+ let mut next_ident = ident;
+ while let Some(binding) = next_binding {
+ let name = next_ident;
+ next_binding = match binding.kind {
+ _ if res == Res::Err => None,
+ NameBindingKind::Import { binding, import, .. } => match import.kind {
+ _ if binding.span.is_dummy() => None,
+ ImportKind::Single { source, .. } => {
+ next_ident = source;
+ Some(binding)
+ }
+ ImportKind::Glob { .. } | ImportKind::MacroUse => Some(binding),
+ ImportKind::ExternCrate { .. } => None,
+ },
+ _ => None,
+ };
+
+ let first = ptr::eq(binding, first_binding);
+ let msg = format!(
+ "{and_refers_to}the {item} `{name}`{which} is defined here{dots}",
+ and_refers_to = if first { "" } else { "...and refers to " },
+ item = get_descr(binding),
+ which = if first { "" } else { " which" },
+ dots = if next_binding.is_some() { "..." } else { "" },
+ );
+ let def_span = self.session.source_map().guess_head_span(binding.span);
+ let mut note_span = MultiSpan::from_span(def_span);
+ if !first && binding.vis.is_public() {
+ note_span.push_span_label(def_span, "consider importing it directly");
+ }
+ err.span_note(note_span, &msg);
+ }
+
+ err.emit();
+ }
+
+ pub(crate) fn find_similarly_named_module_or_crate(
+ &mut self,
+ ident: Symbol,
+ current_module: &Module<'a>,
+ ) -> Option<Symbol> {
+ let mut candidates = self
+ .extern_prelude
+ .iter()
+ .map(|(ident, _)| ident.name)
+ .chain(
+ self.module_map
+ .iter()
+ .filter(|(_, module)| {
+ current_module.is_ancestor_of(module) && !ptr::eq(current_module, *module)
+ })
+ .flat_map(|(_, module)| module.kind.name()),
+ )
+ .filter(|c| !c.to_string().is_empty())
+ .collect::<Vec<_>>();
+ candidates.sort();
+ candidates.dedup();
+ match find_best_match_for_name(&candidates, ident, None) {
+ Some(sugg) if sugg == ident => None,
+ sugg => sugg,
+ }
+ }
+
+ pub(crate) fn report_path_resolution_error(
+ &mut self,
+ path: &[Segment],
+ opt_ns: Option<Namespace>, // `None` indicates a module path in import
+ parent_scope: &ParentScope<'a>,
+ ribs: Option<&PerNS<Vec<Rib<'a>>>>,
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ module: Option<ModuleOrUniformRoot<'a>>,
+ i: usize,
+ ident: Ident,
+ ) -> (String, Option<Suggestion>) {
+ let is_last = i == path.len() - 1;
+ let ns = if is_last { opt_ns.unwrap_or(TypeNS) } else { TypeNS };
+ let module_res = match module {
+ Some(ModuleOrUniformRoot::Module(module)) => module.res(),
+ _ => None,
+ };
+ if module_res == self.graph_root.res() {
+ let is_mod = |res| matches!(res, Res::Def(DefKind::Mod, _));
+ let mut candidates = self.lookup_import_candidates(ident, TypeNS, parent_scope, is_mod);
+ candidates
+ .sort_by_cached_key(|c| (c.path.segments.len(), pprust::path_to_string(&c.path)));
+ if let Some(candidate) = candidates.get(0) {
+ (
+ String::from("unresolved import"),
+ Some((
+ vec![(ident.span, pprust::path_to_string(&candidate.path))],
+ String::from("a similar path exists"),
+ Applicability::MaybeIncorrect,
+ )),
+ )
+ } else if self.session.edition() == Edition::Edition2015 {
+ (
+ format!("maybe a missing crate `{ident}`?"),
+ Some((
+ vec![],
+ format!(
+ "consider adding `extern crate {ident}` to use the `{ident}` crate"
+ ),
+ Applicability::MaybeIncorrect,
+ )),
+ )
+ } else {
+ (format!("could not find `{ident}` in the crate root"), None)
+ }
+ } else if i > 0 {
+ let parent = path[i - 1].ident.name;
+ let parent = match parent {
+ // ::foo is mounted at the crate root for 2015, and is the extern
+ // prelude for 2018+
+ kw::PathRoot if self.session.edition() > Edition::Edition2015 => {
+ "the list of imported crates".to_owned()
+ }
+ kw::PathRoot | kw::Crate => "the crate root".to_owned(),
+ _ => format!("`{parent}`"),
+ };
+
+ let mut msg = format!("could not find `{}` in {}", ident, parent);
+ if ns == TypeNS || ns == ValueNS {
+ let ns_to_try = if ns == TypeNS { ValueNS } else { TypeNS };
+ let binding = if let Some(module) = module {
+ self.resolve_ident_in_module(
+ module,
+ ident,
+ ns_to_try,
+ parent_scope,
+ None,
+ ignore_binding,
+ ).ok()
+ } else if let Some(ribs) = ribs
+ && let Some(TypeNS | ValueNS) = opt_ns
+ {
+ match self.resolve_ident_in_lexical_scope(
+ ident,
+ ns_to_try,
+ parent_scope,
+ None,
+ &ribs[ns_to_try],
+ ignore_binding,
+ ) {
+ // we found a locally-imported or available item/module
+ Some(LexicalScopeBinding::Item(binding)) => Some(binding),
+ _ => None,
+ }
+ } else {
+ let scopes = ScopeSet::All(ns_to_try, opt_ns.is_none());
+ self.early_resolve_ident_in_lexical_scope(
+ ident,
+ scopes,
+ parent_scope,
+ None,
+ false,
+ ignore_binding,
+ ).ok()
+ };
+ if let Some(binding) = binding {
+ let mut found = |what| {
+ msg = format!(
+ "expected {}, found {} `{}` in {}",
+ ns.descr(),
+ what,
+ ident,
+ parent
+ )
+ };
+ if binding.module().is_some() {
+ found("module")
+ } else {
+ match binding.res() {
+ Res::Def(kind, id) => found(kind.descr(id)),
+ _ => found(ns_to_try.descr()),
+ }
+ }
+ };
+ }
+ (msg, None)
+ } else if ident.name == kw::SelfUpper {
+ ("`Self` is only available in impls, traits, and type definitions".to_string(), None)
+ } else if ident.name.as_str().chars().next().map_or(false, |c| c.is_ascii_uppercase()) {
+ // Check whether the name refers to an item in the value namespace.
+ let binding = if let Some(ribs) = ribs {
+ self.resolve_ident_in_lexical_scope(
+ ident,
+ ValueNS,
+ parent_scope,
+ None,
+ &ribs[ValueNS],
+ ignore_binding,
+ )
+ } else {
+ None
+ };
+ let match_span = match binding {
+ // Name matches a local variable. For example:
+ // ```
+ // fn f() {
+ // let Foo: &str = "";
+ // println!("{}", Foo::Bar); // Name refers to local
+ // // variable `Foo`.
+ // }
+ // ```
+ Some(LexicalScopeBinding::Res(Res::Local(id))) => {
+ Some(*self.pat_span_map.get(&id).unwrap())
+ }
+ // Name matches item from a local name binding
+ // created by `use` declaration. For example:
+ // ```
+ // pub Foo: &str = "";
+ //
+ // mod submod {
+ // use super::Foo;
+ // println!("{}", Foo::Bar); // Name refers to local
+ // // binding `Foo`.
+ // }
+ // ```
+ Some(LexicalScopeBinding::Item(name_binding)) => Some(name_binding.span),
+ _ => None,
+ };
+ let suggestion = if let Some(span) = match_span {
+ Some((
+ vec![(span, String::from(""))],
+ format!("`{}` is defined here, but is not a type", ident),
+ Applicability::MaybeIncorrect,
+ ))
+ } else {
+ None
+ };
+
+ (format!("use of undeclared type `{}`", ident), suggestion)
+ } else {
+ let suggestion = if ident.name == sym::alloc {
+ Some((
+ vec![],
+ String::from("add `extern crate alloc` to use the `alloc` crate"),
+ Applicability::MaybeIncorrect,
+ ))
+ } else {
+ self.find_similarly_named_module_or_crate(ident.name, &parent_scope.module).map(
+ |sugg| {
+ (
+ vec![(ident.span, sugg.to_string())],
+ String::from("there is a crate or module with a similar name"),
+ Applicability::MaybeIncorrect,
+ )
+ },
+ )
+ };
+ (format!("use of undeclared crate or module `{}`", ident), suggestion)
+ }
+ }
+}
+
+impl<'a, 'b> ImportResolver<'a, 'b> {
+ /// Adds suggestions for a path that cannot be resolved.
+ pub(crate) fn make_path_suggestion(
+ &mut self,
+ span: Span,
+ mut path: Vec<Segment>,
+ parent_scope: &ParentScope<'b>,
+ ) -> Option<(Vec<Segment>, Option<String>)> {
+ debug!("make_path_suggestion: span={:?} path={:?}", span, path);
+
+ match (path.get(0), path.get(1)) {
+ // `{{root}}::ident::...` on both editions.
+ // On 2015 `{{root}}` is usually added implicitly.
+ (Some(fst), Some(snd))
+ if fst.ident.name == kw::PathRoot && !snd.ident.is_path_segment_keyword() => {}
+ // `ident::...` on 2018.
+ (Some(fst), _)
+ if fst.ident.span.rust_2018() && !fst.ident.is_path_segment_keyword() =>
+ {
+ // Insert a placeholder that's later replaced by `self`/`super`/etc.
+ path.insert(0, Segment::from_ident(Ident::empty()));
+ }
+ _ => return None,
+ }
+
+ self.make_missing_self_suggestion(path.clone(), parent_scope)
+ .or_else(|| self.make_missing_crate_suggestion(path.clone(), parent_scope))
+ .or_else(|| self.make_missing_super_suggestion(path.clone(), parent_scope))
+ .or_else(|| self.make_external_crate_suggestion(path, parent_scope))
+ }
+
+ /// Suggest a missing `self::` if that resolves to an correct module.
+ ///
+ /// ```text
+ /// |
+ /// LL | use foo::Bar;
+ /// | ^^^ did you mean `self::foo`?
+ /// ```
+ fn make_missing_self_suggestion(
+ &mut self,
+ mut path: Vec<Segment>,
+ parent_scope: &ParentScope<'b>,
+ ) -> Option<(Vec<Segment>, Option<String>)> {
+ // Replace first ident with `self` and check if that is valid.
+ path[0].ident.name = kw::SelfLower;
+ let result = self.r.maybe_resolve_path(&path, None, parent_scope);
+ debug!("make_missing_self_suggestion: path={:?} result={:?}", path, result);
+ if let PathResult::Module(..) = result { Some((path, None)) } else { None }
+ }
+
+ /// Suggests a missing `crate::` if that resolves to an correct module.
+ ///
+ /// ```text
+ /// |
+ /// LL | use foo::Bar;
+ /// | ^^^ did you mean `crate::foo`?
+ /// ```
+ fn make_missing_crate_suggestion(
+ &mut self,
+ mut path: Vec<Segment>,
+ parent_scope: &ParentScope<'b>,
+ ) -> Option<(Vec<Segment>, Option<String>)> {
+ // Replace first ident with `crate` and check if that is valid.
+ path[0].ident.name = kw::Crate;
+ let result = self.r.maybe_resolve_path(&path, None, parent_scope);
+ debug!("make_missing_crate_suggestion: path={:?} result={:?}", path, result);
+ if let PathResult::Module(..) = result {
+ Some((
+ path,
+ Some(
+ "`use` statements changed in Rust 2018; read more at \
+ <https://doc.rust-lang.org/edition-guide/rust-2018/module-system/path-\
+ clarity.html>"
+ .to_string(),
+ ),
+ ))
+ } else {
+ None
+ }
+ }
+
+ /// Suggests a missing `super::` if that resolves to an correct module.
+ ///
+ /// ```text
+ /// |
+ /// LL | use foo::Bar;
+ /// | ^^^ did you mean `super::foo`?
+ /// ```
+ fn make_missing_super_suggestion(
+ &mut self,
+ mut path: Vec<Segment>,
+ parent_scope: &ParentScope<'b>,
+ ) -> Option<(Vec<Segment>, Option<String>)> {
+ // Replace first ident with `crate` and check if that is valid.
+ path[0].ident.name = kw::Super;
+ let result = self.r.maybe_resolve_path(&path, None, parent_scope);
+ debug!("make_missing_super_suggestion: path={:?} result={:?}", path, result);
+ if let PathResult::Module(..) = result { Some((path, None)) } else { None }
+ }
+
+ /// Suggests a missing external crate name if that resolves to an correct module.
+ ///
+ /// ```text
+ /// |
+ /// LL | use foobar::Baz;
+ /// | ^^^^^^ did you mean `baz::foobar`?
+ /// ```
+ ///
+ /// Used when importing a submodule of an external crate but missing that crate's
+ /// name as the first part of path.
+ fn make_external_crate_suggestion(
+ &mut self,
+ mut path: Vec<Segment>,
+ parent_scope: &ParentScope<'b>,
+ ) -> Option<(Vec<Segment>, Option<String>)> {
+ if path[1].ident.span.rust_2015() {
+ return None;
+ }
+
+ // Sort extern crate names in *reverse* order to get
+ // 1) some consistent ordering for emitted diagnostics, and
+ // 2) `std` suggestions before `core` suggestions.
+ let mut extern_crate_names =
+ self.r.extern_prelude.iter().map(|(ident, _)| ident.name).collect::<Vec<_>>();
+ extern_crate_names.sort_by(|a, b| b.as_str().partial_cmp(a.as_str()).unwrap());
+
+ for name in extern_crate_names.into_iter() {
+ // Replace first ident with a crate name and check if that is valid.
+ path[0].ident.name = name;
+ let result = self.r.maybe_resolve_path(&path, None, parent_scope);
+ debug!(
+ "make_external_crate_suggestion: name={:?} path={:?} result={:?}",
+ name, path, result
+ );
+ if let PathResult::Module(..) = result {
+ return Some((path, None));
+ }
+ }
+
+ None
+ }
+
+ /// Suggests importing a macro from the root of the crate rather than a module within
+ /// the crate.
+ ///
+ /// ```text
+ /// help: a macro with this name exists at the root of the crate
+ /// |
+ /// LL | use issue_59764::makro;
+ /// | ^^^^^^^^^^^^^^^^^^
+ /// |
+ /// = note: this could be because a macro annotated with `#[macro_export]` will be exported
+ /// at the root of the crate instead of the module where it is defined
+ /// ```
+ pub(crate) fn check_for_module_export_macro(
+ &mut self,
+ import: &'b Import<'b>,
+ module: ModuleOrUniformRoot<'b>,
+ ident: Ident,
+ ) -> Option<(Option<Suggestion>, Option<String>)> {
+ let ModuleOrUniformRoot::Module(mut crate_module) = module else {
+ return None;
+ };
+
+ while let Some(parent) = crate_module.parent {
+ crate_module = parent;
+ }
+
+ if ModuleOrUniformRoot::same_def(ModuleOrUniformRoot::Module(crate_module), module) {
+ // Don't make a suggestion if the import was already from the root of the
+ // crate.
+ return None;
+ }
+
+ let resolutions = self.r.resolutions(crate_module).borrow();
+ let resolution = resolutions.get(&self.r.new_key(ident, MacroNS))?;
+ let binding = resolution.borrow().binding()?;
+ if let Res::Def(DefKind::Macro(MacroKind::Bang), _) = binding.res() {
+ let module_name = crate_module.kind.name().unwrap();
+ let import_snippet = match import.kind {
+ ImportKind::Single { source, target, .. } if source != target => {
+ format!("{} as {}", source, target)
+ }
+ _ => format!("{}", ident),
+ };
+
+ let mut corrections: Vec<(Span, String)> = Vec::new();
+ if !import.is_nested() {
+ // Assume this is the easy case of `use issue_59764::foo::makro;` and just remove
+ // intermediate segments.
+ corrections.push((import.span, format!("{}::{}", module_name, import_snippet)));
+ } else {
+ // Find the binding span (and any trailing commas and spaces).
+ // ie. `use a::b::{c, d, e};`
+ // ^^^
+ let (found_closing_brace, binding_span) = find_span_of_binding_until_next_binding(
+ self.r.session,
+ import.span,
+ import.use_span,
+ );
+ debug!(
+ "check_for_module_export_macro: found_closing_brace={:?} binding_span={:?}",
+ found_closing_brace, binding_span
+ );
+
+ let mut removal_span = binding_span;
+ if found_closing_brace {
+ // If the binding span ended with a closing brace, as in the below example:
+ // ie. `use a::b::{c, d};`
+ // ^
+ // Then expand the span of characters to remove to include the previous
+ // binding's trailing comma.
+ // ie. `use a::b::{c, d};`
+ // ^^^
+ if let Some(previous_span) =
+ extend_span_to_previous_binding(self.r.session, binding_span)
+ {
+ debug!("check_for_module_export_macro: previous_span={:?}", previous_span);
+ removal_span = removal_span.with_lo(previous_span.lo());
+ }
+ }
+ debug!("check_for_module_export_macro: removal_span={:?}", removal_span);
+
+ // Remove the `removal_span`.
+ corrections.push((removal_span, "".to_string()));
+
+ // Find the span after the crate name and if it has nested imports immediately
+ // after the crate name already.
+ // ie. `use a::b::{c, d};`
+ // ^^^^^^^^^
+ // or `use a::{b, c, d}};`
+ // ^^^^^^^^^^^
+ let (has_nested, after_crate_name) = find_span_immediately_after_crate_name(
+ self.r.session,
+ module_name,
+ import.use_span,
+ );
+ debug!(
+ "check_for_module_export_macro: has_nested={:?} after_crate_name={:?}",
+ has_nested, after_crate_name
+ );
+
+ let source_map = self.r.session.source_map();
+
+ // Add the import to the start, with a `{` if required.
+ let start_point = source_map.start_point(after_crate_name);
+ if let Ok(start_snippet) = source_map.span_to_snippet(start_point) {
+ corrections.push((
+ start_point,
+ if has_nested {
+ // In this case, `start_snippet` must equal '{'.
+ format!("{}{}, ", start_snippet, import_snippet)
+ } else {
+ // In this case, add a `{`, then the moved import, then whatever
+ // was there before.
+ format!("{{{}, {}", import_snippet, start_snippet)
+ },
+ ));
+ }
+
+ // Add a `};` to the end if nested, matching the `{` added at the start.
+ if !has_nested {
+ corrections.push((source_map.end_point(after_crate_name), "};".to_string()));
+ }
+ }
+
+ let suggestion = Some((
+ corrections,
+ String::from("a macro with this name exists at the root of the crate"),
+ Applicability::MaybeIncorrect,
+ ));
+ Some((suggestion, Some("this could be because a macro annotated with `#[macro_export]` will be exported \
+ at the root of the crate instead of the module where it is defined"
+ .to_string())))
+ } else {
+ None
+ }
+ }
+}
+
+/// Given a `binding_span` of a binding within a use statement:
+///
+/// ```ignore (illustrative)
+/// use foo::{a, b, c};
+/// // ^
+/// ```
+///
+/// then return the span until the next binding or the end of the statement:
+///
+/// ```ignore (illustrative)
+/// use foo::{a, b, c};
+/// // ^^^
+/// ```
+fn find_span_of_binding_until_next_binding(
+ sess: &Session,
+ binding_span: Span,
+ use_span: Span,
+) -> (bool, Span) {
+ let source_map = sess.source_map();
+
+ // Find the span of everything after the binding.
+ // ie. `a, e};` or `a};`
+ let binding_until_end = binding_span.with_hi(use_span.hi());
+
+ // Find everything after the binding but not including the binding.
+ // ie. `, e};` or `};`
+ let after_binding_until_end = binding_until_end.with_lo(binding_span.hi());
+
+ // Keep characters in the span until we encounter something that isn't a comma or
+ // whitespace.
+ // ie. `, ` or ``.
+ //
+ // Also note whether a closing brace character was encountered. If there
+ // was, then later go backwards to remove any trailing commas that are left.
+ let mut found_closing_brace = false;
+ let after_binding_until_next_binding =
+ source_map.span_take_while(after_binding_until_end, |&ch| {
+ if ch == '}' {
+ found_closing_brace = true;
+ }
+ ch == ' ' || ch == ','
+ });
+
+ // Combine the two spans.
+ // ie. `a, ` or `a`.
+ //
+ // Removing these would leave `issue_52891::{d, e};` or `issue_52891::{d, e, };`
+ let span = binding_span.with_hi(after_binding_until_next_binding.hi());
+
+ (found_closing_brace, span)
+}
+
+/// Given a `binding_span`, return the span through to the comma or opening brace of the previous
+/// binding.
+///
+/// ```ignore (illustrative)
+/// use foo::a::{a, b, c};
+/// // ^^--- binding span
+/// // |
+/// // returned span
+///
+/// use foo::{a, b, c};
+/// // --- binding span
+/// ```
+fn extend_span_to_previous_binding(sess: &Session, binding_span: Span) -> Option<Span> {
+ let source_map = sess.source_map();
+
+ // `prev_source` will contain all of the source that came before the span.
+ // Then split based on a command and take the first (ie. closest to our span)
+ // snippet. In the example, this is a space.
+ let prev_source = source_map.span_to_prev_source(binding_span).ok()?;
+
+ let prev_comma = prev_source.rsplit(',').collect::<Vec<_>>();
+ let prev_starting_brace = prev_source.rsplit('{').collect::<Vec<_>>();
+ if prev_comma.len() <= 1 || prev_starting_brace.len() <= 1 {
+ return None;
+ }
+
+ let prev_comma = prev_comma.first().unwrap();
+ let prev_starting_brace = prev_starting_brace.first().unwrap();
+
+ // If the amount of source code before the comma is greater than
+ // the amount of source code before the starting brace then we've only
+ // got one item in the nested item (eg. `issue_52891::{self}`).
+ if prev_comma.len() > prev_starting_brace.len() {
+ return None;
+ }
+
+ Some(binding_span.with_lo(BytePos(
+ // Take away the number of bytes for the characters we've found and an
+ // extra for the comma.
+ binding_span.lo().0 - (prev_comma.as_bytes().len() as u32) - 1,
+ )))
+}
+
+/// Given a `use_span` of a binding within a use statement, returns the highlighted span and if
+/// it is a nested use tree.
+///
+/// ```ignore (illustrative)
+/// use foo::a::{b, c};
+/// // ^^^^^^^^^^ -- false
+///
+/// use foo::{a, b, c};
+/// // ^^^^^^^^^^ -- true
+///
+/// use foo::{a, b::{c, d}};
+/// // ^^^^^^^^^^^^^^^ -- true
+/// ```
+fn find_span_immediately_after_crate_name(
+ sess: &Session,
+ module_name: Symbol,
+ use_span: Span,
+) -> (bool, Span) {
+ debug!(
+ "find_span_immediately_after_crate_name: module_name={:?} use_span={:?}",
+ module_name, use_span
+ );
+ let source_map = sess.source_map();
+
+ // Using `use issue_59764::foo::{baz, makro};` as an example throughout..
+ let mut num_colons = 0;
+ // Find second colon.. `use issue_59764:`
+ let until_second_colon = source_map.span_take_while(use_span, |c| {
+ if *c == ':' {
+ num_colons += 1;
+ }
+ !matches!(c, ':' if num_colons == 2)
+ });
+ // Find everything after the second colon.. `foo::{baz, makro};`
+ let from_second_colon = use_span.with_lo(until_second_colon.hi() + BytePos(1));
+
+ let mut found_a_non_whitespace_character = false;
+ // Find the first non-whitespace character in `from_second_colon`.. `f`
+ let after_second_colon = source_map.span_take_while(from_second_colon, |c| {
+ if found_a_non_whitespace_character {
+ return false;
+ }
+ if !c.is_whitespace() {
+ found_a_non_whitespace_character = true;
+ }
+ true
+ });
+
+ // Find the first `{` in from_second_colon.. `foo::{`
+ let next_left_bracket = source_map.span_through_char(from_second_colon, '{');
+
+ (next_left_bracket == after_second_colon, from_second_colon)
+}
+
+/// A suggestion has already been emitted, change the wording slightly to clarify that both are
+/// independent options.
+enum Instead {
+ Yes,
+ No,
+}
+
+/// Whether an existing place with an `use` item was found.
+enum FoundUse {
+ Yes,
+ No,
+}
+
+/// Whether a binding is part of a pattern or an expression. Used for diagnostics.
+enum IsPattern {
+ /// The binding is part of a pattern
+ Yes,
+ /// The binding is part of an expression
+ No,
+}
+
+/// When an entity with a given name is not available in scope, we search for
+/// entities with that name in all crates. This method allows outputting the
+/// results of this search in a programmer-friendly way
+fn show_candidates(
+ session: &Session,
+ source_span: &IndexVec<LocalDefId, Span>,
+ err: &mut Diagnostic,
+ // This is `None` if all placement locations are inside expansions
+ use_placement_span: Option<Span>,
+ candidates: &[ImportSuggestion],
+ instead: Instead,
+ found_use: FoundUse,
+ is_pattern: IsPattern,
+ path: Vec<Segment>,
+) {
+ if candidates.is_empty() {
+ return;
+ }
+
+ let mut accessible_path_strings: Vec<(String, &str, Option<DefId>, &Option<String>)> =
+ Vec::new();
+ let mut inaccessible_path_strings: Vec<(String, &str, Option<DefId>, &Option<String>)> =
+ Vec::new();
+
+ candidates.iter().for_each(|c| {
+ (if c.accessible { &mut accessible_path_strings } else { &mut inaccessible_path_strings })
+ .push((path_names_to_string(&c.path), c.descr, c.did, &c.note))
+ });
+
+ // we want consistent results across executions, but candidates are produced
+ // by iterating through a hash map, so make sure they are ordered:
+ for path_strings in [&mut accessible_path_strings, &mut inaccessible_path_strings] {
+ path_strings.sort_by(|a, b| a.0.cmp(&b.0));
+ let core_path_strings =
+ path_strings.drain_filter(|p| p.0.starts_with("core::")).collect::<Vec<_>>();
+ path_strings.extend(core_path_strings);
+ path_strings.dedup_by(|a, b| a.0 == b.0);
+ }
+
+ if !accessible_path_strings.is_empty() {
+ let (determiner, kind, name) = if accessible_path_strings.len() == 1 {
+ ("this", accessible_path_strings[0].1, format!(" `{}`", accessible_path_strings[0].0))
+ } else {
+ ("one of these", "items", String::new())
+ };
+
+ let instead = if let Instead::Yes = instead { " instead" } else { "" };
+ let mut msg = if let IsPattern::Yes = is_pattern {
+ format!(
+ "if you meant to match on {}{}{}, use the full path in the pattern",
+ kind, instead, name
+ )
+ } else {
+ format!("consider importing {} {}{}", determiner, kind, instead)
+ };
+
+ for note in accessible_path_strings.iter().flat_map(|cand| cand.3.as_ref()) {
+ err.note(note);
+ }
+
+ if let (IsPattern::Yes, Some(span)) = (is_pattern, use_placement_span) {
+ err.span_suggestions(
+ span,
+ &msg,
+ accessible_path_strings.into_iter().map(|a| a.0),
+ Applicability::MaybeIncorrect,
+ );
+ } else if let Some(span) = use_placement_span {
+ for candidate in &mut accessible_path_strings {
+ // produce an additional newline to separate the new use statement
+ // from the directly following item.
+ let additional_newline = if let FoundUse::Yes = found_use { "" } else { "\n" };
+ candidate.0 = format!("use {};\n{}", &candidate.0, additional_newline);
+ }
+
+ err.span_suggestions(
+ span,
+ &msg,
+ accessible_path_strings.into_iter().map(|a| a.0),
+ Applicability::MaybeIncorrect,
+ );
+ if let [first, .., last] = &path[..] {
+ err.span_suggestion_verbose(
+ first.ident.span.until(last.ident.span),
+ &format!("if you import `{}`, refer to it directly", last.ident),
+ "",
+ Applicability::Unspecified,
+ );
+ }
+ } else {
+ msg.push(':');
+
+ for candidate in accessible_path_strings {
+ msg.push('\n');
+ msg.push_str(&candidate.0);
+ }
+
+ err.note(&msg);
+ }
+ } else {
+ assert!(!inaccessible_path_strings.is_empty());
+
+ let prefix =
+ if let IsPattern::Yes = is_pattern { "you might have meant to match on " } else { "" };
+ if inaccessible_path_strings.len() == 1 {
+ let (name, descr, def_id, note) = &inaccessible_path_strings[0];
+ let msg = format!(
+ "{}{} `{}`{} exists but is inaccessible",
+ prefix,
+ descr,
+ name,
+ if let IsPattern::Yes = is_pattern { ", which" } else { "" }
+ );
+
+ if let Some(local_def_id) = def_id.and_then(|did| did.as_local()) {
+ let span = source_span[local_def_id];
+ let span = session.source_map().guess_head_span(span);
+ let mut multi_span = MultiSpan::from_span(span);
+ multi_span.push_span_label(span, "not accessible");
+ err.span_note(multi_span, &msg);
+ } else {
+ err.note(&msg);
+ }
+ if let Some(note) = (*note).as_deref() {
+ err.note(note);
+ }
+ } else {
+ let (_, descr_first, _, _) = &inaccessible_path_strings[0];
+ let descr = if inaccessible_path_strings
+ .iter()
+ .skip(1)
+ .all(|(_, descr, _, _)| descr == descr_first)
+ {
+ descr_first
+ } else {
+ "item"
+ };
+ let plural_descr =
+ if descr.ends_with('s') { format!("{}es", descr) } else { format!("{}s", descr) };
+
+ let mut msg = format!("{}these {} exist but are inaccessible", prefix, plural_descr);
+ let mut has_colon = false;
+
+ let mut spans = Vec::new();
+ for (name, _, def_id, _) in &inaccessible_path_strings {
+ if let Some(local_def_id) = def_id.and_then(|did| did.as_local()) {
+ let span = source_span[local_def_id];
+ let span = session.source_map().guess_head_span(span);
+ spans.push((name, span));
+ } else {
+ if !has_colon {
+ msg.push(':');
+ has_colon = true;
+ }
+ msg.push('\n');
+ msg.push_str(name);
+ }
+ }
+
+ let mut multi_span = MultiSpan::from_spans(spans.iter().map(|(_, sp)| *sp).collect());
+ for (name, span) in spans {
+ multi_span.push_span_label(span, format!("`{}`: not accessible", name));
+ }
+
+ for note in inaccessible_path_strings.iter().flat_map(|cand| cand.3.as_ref()) {
+ err.note(note);
+ }
+
+ err.span_note(multi_span, &msg);
+ }
+ }
+}
+
+#[derive(Debug)]
+struct UsePlacementFinder {
+ target_module: NodeId,
+ first_legal_span: Option<Span>,
+ first_use_span: Option<Span>,
+}
+
+impl UsePlacementFinder {
+ fn check(krate: &Crate, target_module: NodeId) -> (Option<Span>, FoundUse) {
+ let mut finder =
+ UsePlacementFinder { target_module, first_legal_span: None, first_use_span: None };
+ finder.visit_crate(krate);
+ if let Some(use_span) = finder.first_use_span {
+ (Some(use_span), FoundUse::Yes)
+ } else {
+ (finder.first_legal_span, FoundUse::No)
+ }
+ }
+}
+
+impl<'tcx> visit::Visitor<'tcx> for UsePlacementFinder {
+ fn visit_crate(&mut self, c: &Crate) {
+ if self.target_module == CRATE_NODE_ID {
+ let inject = c.spans.inject_use_span;
+ if is_span_suitable_for_use_injection(inject) {
+ self.first_legal_span = Some(inject);
+ }
+ self.first_use_span = search_for_any_use_in_items(&c.items);
+ return;
+ } else {
+ visit::walk_crate(self, c);
+ }
+ }
+
+ fn visit_item(&mut self, item: &'tcx ast::Item) {
+ if self.target_module == item.id {
+ if let ItemKind::Mod(_, ModKind::Loaded(items, _inline, mod_spans)) = &item.kind {
+ let inject = mod_spans.inject_use_span;
+ if is_span_suitable_for_use_injection(inject) {
+ self.first_legal_span = Some(inject);
+ }
+ self.first_use_span = search_for_any_use_in_items(items);
+ return;
+ }
+ } else {
+ visit::walk_item(self, item);
+ }
+ }
+}
+
+fn search_for_any_use_in_items(items: &[P<ast::Item>]) -> Option<Span> {
+ for item in items {
+ if let ItemKind::Use(..) = item.kind {
+ if is_span_suitable_for_use_injection(item.span) {
+ return Some(item.span.shrink_to_lo());
+ }
+ }
+ }
+ return None;
+}
+
+fn is_span_suitable_for_use_injection(s: Span) -> bool {
+ // don't suggest placing a use before the prelude
+ // import or other generated ones
+ !s.from_expansion()
+}
+
+/// Convert the given number into the corresponding ordinal
+pub(crate) fn ordinalize(v: usize) -> String {
+ let suffix = match ((11..=13).contains(&(v % 100)), v % 10) {
+ (false, 1) => "st",
+ (false, 2) => "nd",
+ (false, 3) => "rd",
+ _ => "th",
+ };
+ format!("{v}{suffix}")
+}
diff --git a/compiler/rustc_resolve/src/diagnostics/tests.rs b/compiler/rustc_resolve/src/diagnostics/tests.rs
new file mode 100644
index 000000000..2aa6cc61e
--- /dev/null
+++ b/compiler/rustc_resolve/src/diagnostics/tests.rs
@@ -0,0 +1,40 @@
+use super::ordinalize;
+
+#[test]
+fn test_ordinalize() {
+ assert_eq!(ordinalize(1), "1st");
+ assert_eq!(ordinalize(2), "2nd");
+ assert_eq!(ordinalize(3), "3rd");
+ assert_eq!(ordinalize(4), "4th");
+ assert_eq!(ordinalize(5), "5th");
+ // ...
+ assert_eq!(ordinalize(10), "10th");
+ assert_eq!(ordinalize(11), "11th");
+ assert_eq!(ordinalize(12), "12th");
+ assert_eq!(ordinalize(13), "13th");
+ assert_eq!(ordinalize(14), "14th");
+ // ...
+ assert_eq!(ordinalize(20), "20th");
+ assert_eq!(ordinalize(21), "21st");
+ assert_eq!(ordinalize(22), "22nd");
+ assert_eq!(ordinalize(23), "23rd");
+ assert_eq!(ordinalize(24), "24th");
+ // ...
+ assert_eq!(ordinalize(30), "30th");
+ assert_eq!(ordinalize(31), "31st");
+ assert_eq!(ordinalize(32), "32nd");
+ assert_eq!(ordinalize(33), "33rd");
+ assert_eq!(ordinalize(34), "34th");
+ // ...
+ assert_eq!(ordinalize(7010), "7010th");
+ assert_eq!(ordinalize(7011), "7011th");
+ assert_eq!(ordinalize(7012), "7012th");
+ assert_eq!(ordinalize(7013), "7013th");
+ assert_eq!(ordinalize(7014), "7014th");
+ // ...
+ assert_eq!(ordinalize(7020), "7020th");
+ assert_eq!(ordinalize(7021), "7021st");
+ assert_eq!(ordinalize(7022), "7022nd");
+ assert_eq!(ordinalize(7023), "7023rd");
+ assert_eq!(ordinalize(7024), "7024th");
+}
diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs
new file mode 100644
index 000000000..6e6782881
--- /dev/null
+++ b/compiler/rustc_resolve/src/ident.rs
@@ -0,0 +1,1556 @@
+use rustc_ast::{self as ast, NodeId};
+use rustc_feature::is_builtin_attr_name;
+use rustc_hir::def::{DefKind, Namespace, NonMacroAttrKind, PartialRes, PerNS};
+use rustc_hir::PrimTy;
+use rustc_middle::bug;
+use rustc_middle::ty;
+use rustc_session::lint::builtin::PROC_MACRO_DERIVE_RESOLUTION_FALLBACK;
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_span::edition::Edition;
+use rustc_span::hygiene::{ExpnId, ExpnKind, LocalExpnId, MacroKind, SyntaxContext};
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::{Span, DUMMY_SP};
+
+use std::ptr;
+
+use crate::late::{ConstantItemKind, HasGenericParams, PathSource, Rib, RibKind};
+use crate::macros::{sub_namespace_match, MacroRulesScope};
+use crate::{AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, Determinacy, Finalize};
+use crate::{ImportKind, LexicalScopeBinding, Module, ModuleKind, ModuleOrUniformRoot};
+use crate::{NameBinding, NameBindingKind, ParentScope, PathResult, PrivacyError, Res};
+use crate::{ResolutionError, Resolver, Scope, ScopeSet, Segment, ToNameBinding, Weak};
+
+use Determinacy::*;
+use Namespace::*;
+use RibKind::*;
+
+impl<'a> Resolver<'a> {
+ /// A generic scope visitor.
+ /// Visits scopes in order to resolve some identifier in them or perform other actions.
+ /// If the callback returns `Some` result, we stop visiting scopes and return it.
+ pub(crate) fn visit_scopes<T>(
+ &mut self,
+ scope_set: ScopeSet<'a>,
+ parent_scope: &ParentScope<'a>,
+ ctxt: SyntaxContext,
+ mut visitor: impl FnMut(
+ &mut Self,
+ Scope<'a>,
+ /*use_prelude*/ bool,
+ SyntaxContext,
+ ) -> Option<T>,
+ ) -> Option<T> {
+ // General principles:
+ // 1. Not controlled (user-defined) names should have higher priority than controlled names
+ // built into the language or standard library. This way we can add new names into the
+ // language or standard library without breaking user code.
+ // 2. "Closed set" below means new names cannot appear after the current resolution attempt.
+ // Places to search (in order of decreasing priority):
+ // (Type NS)
+ // 1. FIXME: Ribs (type parameters), there's no necessary infrastructure yet
+ // (open set, not controlled).
+ // 2. Names in modules (both normal `mod`ules and blocks), loop through hygienic parents
+ // (open, not controlled).
+ // 3. Extern prelude (open, the open part is from macro expansions, not controlled).
+ // 4. Tool modules (closed, controlled right now, but not in the future).
+ // 5. Standard library prelude (de-facto closed, controlled).
+ // 6. Language prelude (closed, controlled).
+ // (Value NS)
+ // 1. FIXME: Ribs (local variables), there's no necessary infrastructure yet
+ // (open set, not controlled).
+ // 2. Names in modules (both normal `mod`ules and blocks), loop through hygienic parents
+ // (open, not controlled).
+ // 3. Standard library prelude (de-facto closed, controlled).
+ // (Macro NS)
+ // 1-3. Derive helpers (open, not controlled). All ambiguities with other names
+ // are currently reported as errors. They should be higher in priority than preludes
+ // and probably even names in modules according to the "general principles" above. They
+ // also should be subject to restricted shadowing because are effectively produced by
+ // derives (you need to resolve the derive first to add helpers into scope), but they
+ // should be available before the derive is expanded for compatibility.
+ // It's mess in general, so we are being conservative for now.
+ // 1-3. `macro_rules` (open, not controlled), loop through `macro_rules` scopes. Have higher
+ // priority than prelude macros, but create ambiguities with macros in modules.
+ // 1-3. Names in modules (both normal `mod`ules and blocks), loop through hygienic parents
+ // (open, not controlled). Have higher priority than prelude macros, but create
+ // ambiguities with `macro_rules`.
+ // 4. `macro_use` prelude (open, the open part is from macro expansions, not controlled).
+ // 4a. User-defined prelude from macro-use
+ // (open, the open part is from macro expansions, not controlled).
+ // 4b. "Standard library prelude" part implemented through `macro-use` (closed, controlled).
+ // 4c. Standard library prelude (de-facto closed, controlled).
+ // 6. Language prelude: builtin attributes (closed, controlled).
+
+ let rust_2015 = ctxt.edition() == Edition::Edition2015;
+ let (ns, macro_kind, is_absolute_path) = match scope_set {
+ ScopeSet::All(ns, _) => (ns, None, false),
+ ScopeSet::AbsolutePath(ns) => (ns, None, true),
+ ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind), false),
+ ScopeSet::Late(ns, ..) => (ns, None, false),
+ };
+ let module = match scope_set {
+ // Start with the specified module.
+ ScopeSet::Late(_, module, _) => module,
+ // Jump out of trait or enum modules, they do not act as scopes.
+ _ => parent_scope.module.nearest_item_scope(),
+ };
+ let mut scope = match ns {
+ _ if is_absolute_path => Scope::CrateRoot,
+ TypeNS | ValueNS => Scope::Module(module, None),
+ MacroNS => Scope::DeriveHelpers(parent_scope.expansion),
+ };
+ let mut ctxt = ctxt.normalize_to_macros_2_0();
+ let mut use_prelude = !module.no_implicit_prelude;
+
+ loop {
+ let visit = match scope {
+ // Derive helpers are not in scope when resolving derives in the same container.
+ Scope::DeriveHelpers(expn_id) => {
+ !(expn_id == parent_scope.expansion && macro_kind == Some(MacroKind::Derive))
+ }
+ Scope::DeriveHelpersCompat => true,
+ Scope::MacroRules(macro_rules_scope) => {
+ // Use "path compression" on `macro_rules` scope chains. This is an optimization
+ // used to avoid long scope chains, see the comments on `MacroRulesScopeRef`.
+ // As another consequence of this optimization visitors never observe invocation
+ // scopes for macros that were already expanded.
+ while let MacroRulesScope::Invocation(invoc_id) = macro_rules_scope.get() {
+ if let Some(next_scope) = self.output_macro_rules_scopes.get(&invoc_id) {
+ macro_rules_scope.set(next_scope.get());
+ } else {
+ break;
+ }
+ }
+ true
+ }
+ Scope::CrateRoot => true,
+ Scope::Module(..) => true,
+ Scope::RegisteredAttrs => use_prelude,
+ Scope::MacroUsePrelude => use_prelude || rust_2015,
+ Scope::BuiltinAttrs => true,
+ Scope::ExternPrelude => use_prelude || is_absolute_path,
+ Scope::ToolPrelude => use_prelude,
+ Scope::StdLibPrelude => use_prelude || ns == MacroNS,
+ Scope::BuiltinTypes => true,
+ };
+
+ if visit {
+ if let break_result @ Some(..) = visitor(self, scope, use_prelude, ctxt) {
+ return break_result;
+ }
+ }
+
+ scope = match scope {
+ Scope::DeriveHelpers(LocalExpnId::ROOT) => Scope::DeriveHelpersCompat,
+ Scope::DeriveHelpers(expn_id) => {
+ // Derive helpers are not visible to code generated by bang or derive macros.
+ let expn_data = expn_id.expn_data();
+ match expn_data.kind {
+ ExpnKind::Root
+ | ExpnKind::Macro(MacroKind::Bang | MacroKind::Derive, _) => {
+ Scope::DeriveHelpersCompat
+ }
+ _ => Scope::DeriveHelpers(expn_data.parent.expect_local()),
+ }
+ }
+ Scope::DeriveHelpersCompat => Scope::MacroRules(parent_scope.macro_rules),
+ Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() {
+ MacroRulesScope::Binding(binding) => {
+ Scope::MacroRules(binding.parent_macro_rules_scope)
+ }
+ MacroRulesScope::Invocation(invoc_id) => {
+ Scope::MacroRules(self.invocation_parent_scopes[&invoc_id].macro_rules)
+ }
+ MacroRulesScope::Empty => Scope::Module(module, None),
+ },
+ Scope::CrateRoot => match ns {
+ TypeNS => {
+ ctxt.adjust(ExpnId::root());
+ Scope::ExternPrelude
+ }
+ ValueNS | MacroNS => break,
+ },
+ Scope::Module(module, prev_lint_id) => {
+ use_prelude = !module.no_implicit_prelude;
+ let derive_fallback_lint_id = match scope_set {
+ ScopeSet::Late(.., lint_id) => lint_id,
+ _ => None,
+ };
+ match self.hygienic_lexical_parent(module, &mut ctxt, derive_fallback_lint_id) {
+ Some((parent_module, lint_id)) => {
+ Scope::Module(parent_module, lint_id.or(prev_lint_id))
+ }
+ None => {
+ ctxt.adjust(ExpnId::root());
+ match ns {
+ TypeNS => Scope::ExternPrelude,
+ ValueNS => Scope::StdLibPrelude,
+ MacroNS => Scope::RegisteredAttrs,
+ }
+ }
+ }
+ }
+ Scope::RegisteredAttrs => Scope::MacroUsePrelude,
+ Scope::MacroUsePrelude => Scope::StdLibPrelude,
+ Scope::BuiltinAttrs => break, // nowhere else to search
+ Scope::ExternPrelude if is_absolute_path => break,
+ Scope::ExternPrelude => Scope::ToolPrelude,
+ Scope::ToolPrelude => Scope::StdLibPrelude,
+ Scope::StdLibPrelude => match ns {
+ TypeNS => Scope::BuiltinTypes,
+ ValueNS => break, // nowhere else to search
+ MacroNS => Scope::BuiltinAttrs,
+ },
+ Scope::BuiltinTypes => break, // nowhere else to search
+ };
+ }
+
+ None
+ }
+
+ fn hygienic_lexical_parent(
+ &mut self,
+ module: Module<'a>,
+ ctxt: &mut SyntaxContext,
+ derive_fallback_lint_id: Option<NodeId>,
+ ) -> Option<(Module<'a>, Option<NodeId>)> {
+ if !module.expansion.outer_expn_is_descendant_of(*ctxt) {
+ return Some((self.expn_def_scope(ctxt.remove_mark()), None));
+ }
+
+ if let ModuleKind::Block = module.kind {
+ return Some((module.parent.unwrap().nearest_item_scope(), None));
+ }
+
+ // We need to support the next case under a deprecation warning
+ // ```
+ // struct MyStruct;
+ // ---- begin: this comes from a proc macro derive
+ // mod implementation_details {
+ // // Note that `MyStruct` is not in scope here.
+ // impl SomeTrait for MyStruct { ... }
+ // }
+ // ---- end
+ // ```
+ // So we have to fall back to the module's parent during lexical resolution in this case.
+ if derive_fallback_lint_id.is_some() {
+ if let Some(parent) = module.parent {
+ // Inner module is inside the macro, parent module is outside of the macro.
+ if module.expansion != parent.expansion
+ && module.expansion.is_descendant_of(parent.expansion)
+ {
+ // The macro is a proc macro derive
+ if let Some(def_id) = module.expansion.expn_data().macro_def_id {
+ let ext = self.get_macro_by_def_id(def_id).ext;
+ if ext.builtin_name.is_none()
+ && ext.macro_kind() == MacroKind::Derive
+ && parent.expansion.outer_expn_is_descendant_of(*ctxt)
+ {
+ return Some((parent, derive_fallback_lint_id));
+ }
+ }
+ }
+ }
+ }
+
+ None
+ }
+
+ /// This resolves the identifier `ident` in the namespace `ns` in the current lexical scope.
+ /// More specifically, we proceed up the hierarchy of scopes and return the binding for
+ /// `ident` in the first scope that defines it (or None if no scopes define it).
+ ///
+ /// A block's items are above its local variables in the scope hierarchy, regardless of where
+ /// the items are defined in the block. For example,
+ /// ```rust
+ /// fn f() {
+ /// g(); // Since there are no local variables in scope yet, this resolves to the item.
+ /// let g = || {};
+ /// fn g() {}
+ /// g(); // This resolves to the local variable `g` since it shadows the item.
+ /// }
+ /// ```
+ ///
+ /// Invariant: This must only be called during main resolution, not during
+ /// import resolution.
+ #[tracing::instrument(level = "debug", skip(self, ribs))]
+ pub(crate) fn resolve_ident_in_lexical_scope(
+ &mut self,
+ mut ident: Ident,
+ ns: Namespace,
+ parent_scope: &ParentScope<'a>,
+ finalize: Option<Finalize>,
+ ribs: &[Rib<'a>],
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> Option<LexicalScopeBinding<'a>> {
+ assert!(ns == TypeNS || ns == ValueNS);
+ let orig_ident = ident;
+ if ident.name == kw::Empty {
+ return Some(LexicalScopeBinding::Res(Res::Err));
+ }
+ let (general_span, normalized_span) = if ident.name == kw::SelfUpper {
+ // FIXME(jseyfried) improve `Self` hygiene
+ let empty_span = ident.span.with_ctxt(SyntaxContext::root());
+ (empty_span, empty_span)
+ } else if ns == TypeNS {
+ let normalized_span = ident.span.normalize_to_macros_2_0();
+ (normalized_span, normalized_span)
+ } else {
+ (ident.span.normalize_to_macro_rules(), ident.span.normalize_to_macros_2_0())
+ };
+ ident.span = general_span;
+ let normalized_ident = Ident { span: normalized_span, ..ident };
+
+ // Walk backwards up the ribs in scope.
+ let mut module = self.graph_root;
+ for i in (0..ribs.len()).rev() {
+ debug!("walk rib\n{:?}", ribs[i].bindings);
+ // Use the rib kind to determine whether we are resolving parameters
+ // (macro 2.0 hygiene) or local variables (`macro_rules` hygiene).
+ let rib_ident = if ribs[i].kind.contains_params() { normalized_ident } else { ident };
+ if let Some((original_rib_ident_def, res)) = ribs[i].bindings.get_key_value(&rib_ident)
+ {
+ // The ident resolves to a type parameter or local variable.
+ return Some(LexicalScopeBinding::Res(self.validate_res_from_ribs(
+ i,
+ rib_ident,
+ *res,
+ finalize.map(|finalize| finalize.path_span),
+ *original_rib_ident_def,
+ ribs,
+ )));
+ }
+
+ module = match ribs[i].kind {
+ ModuleRibKind(module) => module,
+ MacroDefinition(def) if def == self.macro_def(ident.span.ctxt()) => {
+ // If an invocation of this macro created `ident`, give up on `ident`
+ // and switch to `ident`'s source from the macro definition.
+ ident.span.remove_mark();
+ continue;
+ }
+ _ => continue,
+ };
+
+ match module.kind {
+ ModuleKind::Block => {} // We can see through blocks
+ _ => break,
+ }
+
+ let item = self.resolve_ident_in_module_unadjusted(
+ ModuleOrUniformRoot::Module(module),
+ ident,
+ ns,
+ parent_scope,
+ finalize,
+ ignore_binding,
+ );
+ if let Ok(binding) = item {
+ // The ident resolves to an item.
+ return Some(LexicalScopeBinding::Item(binding));
+ }
+ }
+ self.early_resolve_ident_in_lexical_scope(
+ orig_ident,
+ ScopeSet::Late(ns, module, finalize.map(|finalize| finalize.node_id)),
+ parent_scope,
+ finalize,
+ finalize.is_some(),
+ ignore_binding,
+ )
+ .ok()
+ .map(LexicalScopeBinding::Item)
+ }
+
+ /// Resolve an identifier in lexical scope.
+ /// This is a variation of `fn resolve_ident_in_lexical_scope` that can be run during
+ /// expansion and import resolution (perhaps they can be merged in the future).
+ /// The function is used for resolving initial segments of macro paths (e.g., `foo` in
+ /// `foo::bar!(); or `foo!();`) and also for import paths on 2018 edition.
+ #[tracing::instrument(level = "debug", skip(self, scope_set))]
+ pub(crate) fn early_resolve_ident_in_lexical_scope(
+ &mut self,
+ orig_ident: Ident,
+ scope_set: ScopeSet<'a>,
+ parent_scope: &ParentScope<'a>,
+ finalize: Option<Finalize>,
+ force: bool,
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> Result<&'a NameBinding<'a>, Determinacy> {
+ bitflags::bitflags! {
+ struct Flags: u8 {
+ const MACRO_RULES = 1 << 0;
+ const MODULE = 1 << 1;
+ const MISC_SUGGEST_CRATE = 1 << 2;
+ const MISC_SUGGEST_SELF = 1 << 3;
+ const MISC_FROM_PRELUDE = 1 << 4;
+ }
+ }
+
+ assert!(force || !finalize.is_some()); // `finalize` implies `force`
+
+ // Make sure `self`, `super` etc produce an error when passed to here.
+ if orig_ident.is_path_segment_keyword() {
+ return Err(Determinacy::Determined);
+ }
+
+ let (ns, macro_kind, is_import) = match scope_set {
+ ScopeSet::All(ns, is_import) => (ns, None, is_import),
+ ScopeSet::AbsolutePath(ns) => (ns, None, false),
+ ScopeSet::Macro(macro_kind) => (MacroNS, Some(macro_kind), false),
+ ScopeSet::Late(ns, ..) => (ns, None, false),
+ };
+
+ // This is *the* result, resolution from the scope closest to the resolved identifier.
+ // However, sometimes this result is "weak" because it comes from a glob import or
+ // a macro expansion, and in this case it cannot shadow names from outer scopes, e.g.
+ // mod m { ... } // solution in outer scope
+ // {
+ // use prefix::*; // imports another `m` - innermost solution
+ // // weak, cannot shadow the outer `m`, need to report ambiguity error
+ // m::mac!();
+ // }
+ // So we have to save the innermost solution and continue searching in outer scopes
+ // to detect potential ambiguities.
+ let mut innermost_result: Option<(&NameBinding<'_>, Flags)> = None;
+ let mut determinacy = Determinacy::Determined;
+
+ // Go through all the scopes and try to resolve the name.
+ let break_result = self.visit_scopes(
+ scope_set,
+ parent_scope,
+ orig_ident.span.ctxt(),
+ |this, scope, use_prelude, ctxt| {
+ let ident = Ident::new(orig_ident.name, orig_ident.span.with_ctxt(ctxt));
+ let ok = |res, span, arenas| {
+ Ok((
+ (res, ty::Visibility::Public, span, LocalExpnId::ROOT)
+ .to_name_binding(arenas),
+ Flags::empty(),
+ ))
+ };
+ let result = match scope {
+ Scope::DeriveHelpers(expn_id) => {
+ if let Some(attr) = this
+ .helper_attrs
+ .get(&expn_id)
+ .and_then(|attrs| attrs.iter().rfind(|i| ident == **i))
+ {
+ let binding = (
+ Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper),
+ ty::Visibility::Public,
+ attr.span,
+ expn_id,
+ )
+ .to_name_binding(this.arenas);
+ Ok((binding, Flags::empty()))
+ } else {
+ Err(Determinacy::Determined)
+ }
+ }
+ Scope::DeriveHelpersCompat => {
+ let mut result = Err(Determinacy::Determined);
+ for derive in parent_scope.derives {
+ let parent_scope = &ParentScope { derives: &[], ..*parent_scope };
+ match this.resolve_macro_path(
+ derive,
+ Some(MacroKind::Derive),
+ parent_scope,
+ true,
+ force,
+ ) {
+ Ok((Some(ext), _)) => {
+ if ext.helper_attrs.contains(&ident.name) {
+ result = ok(
+ Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat),
+ derive.span,
+ this.arenas,
+ );
+ break;
+ }
+ }
+ Ok(_) | Err(Determinacy::Determined) => {}
+ Err(Determinacy::Undetermined) => {
+ result = Err(Determinacy::Undetermined)
+ }
+ }
+ }
+ result
+ }
+ Scope::MacroRules(macro_rules_scope) => match macro_rules_scope.get() {
+ MacroRulesScope::Binding(macro_rules_binding)
+ if ident == macro_rules_binding.ident =>
+ {
+ Ok((macro_rules_binding.binding, Flags::MACRO_RULES))
+ }
+ MacroRulesScope::Invocation(_) => Err(Determinacy::Undetermined),
+ _ => Err(Determinacy::Determined),
+ },
+ Scope::CrateRoot => {
+ let root_ident = Ident::new(kw::PathRoot, ident.span);
+ let root_module = this.resolve_crate_root(root_ident);
+ let binding = this.resolve_ident_in_module_ext(
+ ModuleOrUniformRoot::Module(root_module),
+ ident,
+ ns,
+ parent_scope,
+ finalize,
+ ignore_binding,
+ );
+ match binding {
+ Ok(binding) => Ok((binding, Flags::MODULE | Flags::MISC_SUGGEST_CRATE)),
+ Err((Determinacy::Undetermined, Weak::No)) => {
+ return Some(Err(Determinacy::determined(force)));
+ }
+ Err((Determinacy::Undetermined, Weak::Yes)) => {
+ Err(Determinacy::Undetermined)
+ }
+ Err((Determinacy::Determined, _)) => Err(Determinacy::Determined),
+ }
+ }
+ Scope::Module(module, derive_fallback_lint_id) => {
+ let adjusted_parent_scope = &ParentScope { module, ..*parent_scope };
+ let binding = this.resolve_ident_in_module_unadjusted_ext(
+ ModuleOrUniformRoot::Module(module),
+ ident,
+ ns,
+ adjusted_parent_scope,
+ !matches!(scope_set, ScopeSet::Late(..)),
+ finalize,
+ ignore_binding,
+ );
+ match binding {
+ Ok(binding) => {
+ if let Some(lint_id) = derive_fallback_lint_id {
+ this.lint_buffer.buffer_lint_with_diagnostic(
+ PROC_MACRO_DERIVE_RESOLUTION_FALLBACK,
+ lint_id,
+ orig_ident.span,
+ &format!(
+ "cannot find {} `{}` in this scope",
+ ns.descr(),
+ ident
+ ),
+ BuiltinLintDiagnostics::ProcMacroDeriveResolutionFallback(
+ orig_ident.span,
+ ),
+ );
+ }
+ let misc_flags = if ptr::eq(module, this.graph_root) {
+ Flags::MISC_SUGGEST_CRATE
+ } else if module.is_normal() {
+ Flags::MISC_SUGGEST_SELF
+ } else {
+ Flags::empty()
+ };
+ Ok((binding, Flags::MODULE | misc_flags))
+ }
+ Err((Determinacy::Undetermined, Weak::No)) => {
+ return Some(Err(Determinacy::determined(force)));
+ }
+ Err((Determinacy::Undetermined, Weak::Yes)) => {
+ Err(Determinacy::Undetermined)
+ }
+ Err((Determinacy::Determined, _)) => Err(Determinacy::Determined),
+ }
+ }
+ Scope::RegisteredAttrs => match this.registered_attrs.get(&ident).cloned() {
+ Some(ident) => ok(
+ Res::NonMacroAttr(NonMacroAttrKind::Registered),
+ ident.span,
+ this.arenas,
+ ),
+ None => Err(Determinacy::Determined),
+ },
+ Scope::MacroUsePrelude => {
+ match this.macro_use_prelude.get(&ident.name).cloned() {
+ Some(binding) => Ok((binding, Flags::MISC_FROM_PRELUDE)),
+ None => Err(Determinacy::determined(
+ this.graph_root.unexpanded_invocations.borrow().is_empty(),
+ )),
+ }
+ }
+ Scope::BuiltinAttrs => {
+ if is_builtin_attr_name(ident.name) {
+ ok(
+ Res::NonMacroAttr(NonMacroAttrKind::Builtin(ident.name)),
+ DUMMY_SP,
+ this.arenas,
+ )
+ } else {
+ Err(Determinacy::Determined)
+ }
+ }
+ Scope::ExternPrelude => {
+ match this.extern_prelude_get(ident, finalize.is_some()) {
+ Some(binding) => Ok((binding, Flags::empty())),
+ None => Err(Determinacy::determined(
+ this.graph_root.unexpanded_invocations.borrow().is_empty(),
+ )),
+ }
+ }
+ Scope::ToolPrelude => match this.registered_tools.get(&ident).cloned() {
+ Some(ident) => ok(Res::ToolMod, ident.span, this.arenas),
+ None => Err(Determinacy::Determined),
+ },
+ Scope::StdLibPrelude => {
+ let mut result = Err(Determinacy::Determined);
+ if let Some(prelude) = this.prelude {
+ if let Ok(binding) = this.resolve_ident_in_module_unadjusted(
+ ModuleOrUniformRoot::Module(prelude),
+ ident,
+ ns,
+ parent_scope,
+ None,
+ ignore_binding,
+ ) {
+ if use_prelude || this.is_builtin_macro(binding.res()) {
+ result = Ok((binding, Flags::MISC_FROM_PRELUDE));
+ }
+ }
+ }
+ result
+ }
+ Scope::BuiltinTypes => match PrimTy::from_name(ident.name) {
+ Some(prim_ty) => ok(Res::PrimTy(prim_ty), DUMMY_SP, this.arenas),
+ None => Err(Determinacy::Determined),
+ },
+ };
+
+ match result {
+ Ok((binding, flags))
+ if sub_namespace_match(binding.macro_kind(), macro_kind) =>
+ {
+ if finalize.is_none() || matches!(scope_set, ScopeSet::Late(..)) {
+ return Some(Ok(binding));
+ }
+
+ if let Some((innermost_binding, innermost_flags)) = innermost_result {
+ // Found another solution, if the first one was "weak", report an error.
+ let (res, innermost_res) = (binding.res(), innermost_binding.res());
+ if res != innermost_res {
+ let is_builtin = |res| {
+ matches!(res, Res::NonMacroAttr(NonMacroAttrKind::Builtin(..)))
+ };
+ let derive_helper =
+ Res::NonMacroAttr(NonMacroAttrKind::DeriveHelper);
+ let derive_helper_compat =
+ Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat);
+
+ let ambiguity_error_kind = if is_import {
+ Some(AmbiguityKind::Import)
+ } else if is_builtin(innermost_res) || is_builtin(res) {
+ Some(AmbiguityKind::BuiltinAttr)
+ } else if innermost_res == derive_helper_compat
+ || res == derive_helper_compat && innermost_res != derive_helper
+ {
+ Some(AmbiguityKind::DeriveHelper)
+ } else if innermost_flags.contains(Flags::MACRO_RULES)
+ && flags.contains(Flags::MODULE)
+ && !this.disambiguate_macro_rules_vs_modularized(
+ innermost_binding,
+ binding,
+ )
+ || flags.contains(Flags::MACRO_RULES)
+ && innermost_flags.contains(Flags::MODULE)
+ && !this.disambiguate_macro_rules_vs_modularized(
+ binding,
+ innermost_binding,
+ )
+ {
+ Some(AmbiguityKind::MacroRulesVsModularized)
+ } else if innermost_binding.is_glob_import() {
+ Some(AmbiguityKind::GlobVsOuter)
+ } else if innermost_binding
+ .may_appear_after(parent_scope.expansion, binding)
+ {
+ Some(AmbiguityKind::MoreExpandedVsOuter)
+ } else {
+ None
+ };
+ if let Some(kind) = ambiguity_error_kind {
+ let misc = |f: Flags| {
+ if f.contains(Flags::MISC_SUGGEST_CRATE) {
+ AmbiguityErrorMisc::SuggestCrate
+ } else if f.contains(Flags::MISC_SUGGEST_SELF) {
+ AmbiguityErrorMisc::SuggestSelf
+ } else if f.contains(Flags::MISC_FROM_PRELUDE) {
+ AmbiguityErrorMisc::FromPrelude
+ } else {
+ AmbiguityErrorMisc::None
+ }
+ };
+ this.ambiguity_errors.push(AmbiguityError {
+ kind,
+ ident: orig_ident,
+ b1: innermost_binding,
+ b2: binding,
+ misc1: misc(innermost_flags),
+ misc2: misc(flags),
+ });
+ return Some(Ok(innermost_binding));
+ }
+ }
+ } else {
+ // Found the first solution.
+ innermost_result = Some((binding, flags));
+ }
+ }
+ Ok(..) | Err(Determinacy::Determined) => {}
+ Err(Determinacy::Undetermined) => determinacy = Determinacy::Undetermined,
+ }
+
+ None
+ },
+ );
+
+ if let Some(break_result) = break_result {
+ return break_result;
+ }
+
+ // The first found solution was the only one, return it.
+ if let Some((binding, _)) = innermost_result {
+ return Ok(binding);
+ }
+
+ Err(Determinacy::determined(determinacy == Determinacy::Determined || force))
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ pub(crate) fn maybe_resolve_ident_in_module(
+ &mut self,
+ module: ModuleOrUniformRoot<'a>,
+ ident: Ident,
+ ns: Namespace,
+ parent_scope: &ParentScope<'a>,
+ ) -> Result<&'a NameBinding<'a>, Determinacy> {
+ self.resolve_ident_in_module_ext(module, ident, ns, parent_scope, None, None)
+ .map_err(|(determinacy, _)| determinacy)
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ pub(crate) fn resolve_ident_in_module(
+ &mut self,
+ module: ModuleOrUniformRoot<'a>,
+ ident: Ident,
+ ns: Namespace,
+ parent_scope: &ParentScope<'a>,
+ finalize: Option<Finalize>,
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> Result<&'a NameBinding<'a>, Determinacy> {
+ self.resolve_ident_in_module_ext(module, ident, ns, parent_scope, finalize, ignore_binding)
+ .map_err(|(determinacy, _)| determinacy)
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn resolve_ident_in_module_ext(
+ &mut self,
+ module: ModuleOrUniformRoot<'a>,
+ mut ident: Ident,
+ ns: Namespace,
+ parent_scope: &ParentScope<'a>,
+ finalize: Option<Finalize>,
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> Result<&'a NameBinding<'a>, (Determinacy, Weak)> {
+ let tmp_parent_scope;
+ let mut adjusted_parent_scope = parent_scope;
+ match module {
+ ModuleOrUniformRoot::Module(m) => {
+ if let Some(def) = ident.span.normalize_to_macros_2_0_and_adjust(m.expansion) {
+ tmp_parent_scope =
+ ParentScope { module: self.expn_def_scope(def), ..*parent_scope };
+ adjusted_parent_scope = &tmp_parent_scope;
+ }
+ }
+ ModuleOrUniformRoot::ExternPrelude => {
+ ident.span.normalize_to_macros_2_0_and_adjust(ExpnId::root());
+ }
+ ModuleOrUniformRoot::CrateRootAndExternPrelude | ModuleOrUniformRoot::CurrentScope => {
+ // No adjustments
+ }
+ }
+ self.resolve_ident_in_module_unadjusted_ext(
+ module,
+ ident,
+ ns,
+ adjusted_parent_scope,
+ false,
+ finalize,
+ ignore_binding,
+ )
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn resolve_ident_in_module_unadjusted(
+ &mut self,
+ module: ModuleOrUniformRoot<'a>,
+ ident: Ident,
+ ns: Namespace,
+ parent_scope: &ParentScope<'a>,
+ finalize: Option<Finalize>,
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> Result<&'a NameBinding<'a>, Determinacy> {
+ self.resolve_ident_in_module_unadjusted_ext(
+ module,
+ ident,
+ ns,
+ parent_scope,
+ false,
+ finalize,
+ ignore_binding,
+ )
+ .map_err(|(determinacy, _)| determinacy)
+ }
+
+ /// Attempts to resolve `ident` in namespaces `ns` of `module`.
+ /// Invariant: if `finalize` is `Some`, expansion and import resolution must be complete.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn resolve_ident_in_module_unadjusted_ext(
+ &mut self,
+ module: ModuleOrUniformRoot<'a>,
+ ident: Ident,
+ ns: Namespace,
+ parent_scope: &ParentScope<'a>,
+ restricted_shadowing: bool,
+ finalize: Option<Finalize>,
+ // This binding should be ignored during in-module resolution, so that we don't get
+ // "self-confirming" import resolutions during import validation and checking.
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> Result<&'a NameBinding<'a>, (Determinacy, Weak)> {
+ let module = match module {
+ ModuleOrUniformRoot::Module(module) => module,
+ ModuleOrUniformRoot::CrateRootAndExternPrelude => {
+ assert!(!restricted_shadowing);
+ let binding = self.early_resolve_ident_in_lexical_scope(
+ ident,
+ ScopeSet::AbsolutePath(ns),
+ parent_scope,
+ finalize,
+ finalize.is_some(),
+ ignore_binding,
+ );
+ return binding.map_err(|determinacy| (determinacy, Weak::No));
+ }
+ ModuleOrUniformRoot::ExternPrelude => {
+ assert!(!restricted_shadowing);
+ return if ns != TypeNS {
+ Err((Determined, Weak::No))
+ } else if let Some(binding) = self.extern_prelude_get(ident, finalize.is_some()) {
+ Ok(binding)
+ } else if !self.graph_root.unexpanded_invocations.borrow().is_empty() {
+ // Macro-expanded `extern crate` items can add names to extern prelude.
+ Err((Undetermined, Weak::No))
+ } else {
+ Err((Determined, Weak::No))
+ };
+ }
+ ModuleOrUniformRoot::CurrentScope => {
+ assert!(!restricted_shadowing);
+ if ns == TypeNS {
+ if ident.name == kw::Crate || ident.name == kw::DollarCrate {
+ let module = self.resolve_crate_root(ident);
+ let binding =
+ (module, ty::Visibility::Public, module.span, LocalExpnId::ROOT)
+ .to_name_binding(self.arenas);
+ return Ok(binding);
+ } else if ident.name == kw::Super || ident.name == kw::SelfLower {
+ // FIXME: Implement these with renaming requirements so that e.g.
+ // `use super;` doesn't work, but `use super as name;` does.
+ // Fall through here to get an error from `early_resolve_...`.
+ }
+ }
+
+ let scopes = ScopeSet::All(ns, true);
+ let binding = self.early_resolve_ident_in_lexical_scope(
+ ident,
+ scopes,
+ parent_scope,
+ finalize,
+ finalize.is_some(),
+ ignore_binding,
+ );
+ return binding.map_err(|determinacy| (determinacy, Weak::No));
+ }
+ };
+
+ let key = self.new_key(ident, ns);
+ let resolution =
+ self.resolution(module, key).try_borrow_mut().map_err(|_| (Determined, Weak::No))?; // This happens when there is a cycle of imports.
+
+ if let Some(Finalize { path_span, report_private, .. }) = finalize {
+ // If the primary binding is unusable, search further and return the shadowed glob
+ // binding if it exists. What we really want here is having two separate scopes in
+ // a module - one for non-globs and one for globs, but until that's done use this
+ // hack to avoid inconsistent resolution ICEs during import validation.
+ let binding = [resolution.binding, resolution.shadowed_glob]
+ .into_iter()
+ .filter_map(|binding| match (binding, ignore_binding) {
+ (Some(binding), Some(ignored)) if ptr::eq(binding, ignored) => None,
+ _ => binding,
+ })
+ .next();
+ let Some(binding) = binding else {
+ return Err((Determined, Weak::No));
+ };
+
+ if !self.is_accessible_from(binding.vis, parent_scope.module) {
+ if report_private {
+ self.privacy_errors.push(PrivacyError {
+ ident,
+ binding,
+ dedup_span: path_span,
+ });
+ } else {
+ return Err((Determined, Weak::No));
+ }
+ }
+
+ // Forbid expanded shadowing to avoid time travel.
+ if let Some(shadowed_glob) = resolution.shadowed_glob
+ && restricted_shadowing
+ && binding.expansion != LocalExpnId::ROOT
+ && binding.res() != shadowed_glob.res()
+ {
+ self.ambiguity_errors.push(AmbiguityError {
+ kind: AmbiguityKind::GlobVsExpanded,
+ ident,
+ b1: binding,
+ b2: shadowed_glob,
+ misc1: AmbiguityErrorMisc::None,
+ misc2: AmbiguityErrorMisc::None,
+ });
+ }
+
+ if !restricted_shadowing && binding.expansion != LocalExpnId::ROOT {
+ if let NameBindingKind::Res(_, true) = binding.kind {
+ self.macro_expanded_macro_export_errors.insert((path_span, binding.span));
+ }
+ }
+
+ self.record_use(ident, binding, restricted_shadowing);
+ return Ok(binding);
+ }
+
+ let check_usable = |this: &mut Self, binding: &'a NameBinding<'a>| {
+ if let Some(ignored) = ignore_binding && ptr::eq(binding, ignored) {
+ return Err((Determined, Weak::No));
+ }
+ let usable = this.is_accessible_from(binding.vis, parent_scope.module);
+ if usable { Ok(binding) } else { Err((Determined, Weak::No)) }
+ };
+
+ // Items and single imports are not shadowable, if we have one, then it's determined.
+ if let Some(binding) = resolution.binding {
+ if !binding.is_glob_import() {
+ return check_usable(self, binding);
+ }
+ }
+
+ // --- From now on we either have a glob resolution or no resolution. ---
+
+ // Check if one of single imports can still define the name,
+ // if it can then our result is not determined and can be invalidated.
+ for single_import in &resolution.single_imports {
+ if !self.is_accessible_from(single_import.vis.get(), parent_scope.module) {
+ continue;
+ }
+ let Some(module) = single_import.imported_module.get() else {
+ return Err((Undetermined, Weak::No));
+ };
+ let ImportKind::Single { source: ident, .. } = single_import.kind else {
+ unreachable!();
+ };
+ match self.resolve_ident_in_module(
+ module,
+ ident,
+ ns,
+ &single_import.parent_scope,
+ None,
+ ignore_binding,
+ ) {
+ Err(Determined) => continue,
+ Ok(binding)
+ if !self.is_accessible_from(binding.vis, single_import.parent_scope.module) =>
+ {
+ continue;
+ }
+ Ok(_) | Err(Undetermined) => return Err((Undetermined, Weak::No)),
+ }
+ }
+
+ // So we have a resolution that's from a glob import. This resolution is determined
+ // if it cannot be shadowed by some new item/import expanded from a macro.
+ // This happens either if there are no unexpanded macros, or expanded names cannot
+ // shadow globs (that happens in macro namespace or with restricted shadowing).
+ //
+ // Additionally, any macro in any module can plant names in the root module if it creates
+ // `macro_export` macros, so the root module effectively has unresolved invocations if any
+ // module has unresolved invocations.
+ // However, it causes resolution/expansion to stuck too often (#53144), so, to make
+ // progress, we have to ignore those potential unresolved invocations from other modules
+ // and prohibit access to macro-expanded `macro_export` macros instead (unless restricted
+ // shadowing is enabled, see `macro_expanded_macro_export_errors`).
+ let unexpanded_macros = !module.unexpanded_invocations.borrow().is_empty();
+ if let Some(binding) = resolution.binding {
+ if !unexpanded_macros || ns == MacroNS || restricted_shadowing {
+ return check_usable(self, binding);
+ } else {
+ return Err((Undetermined, Weak::No));
+ }
+ }
+
+ // --- From now on we have no resolution. ---
+
+ // Now we are in situation when new item/import can appear only from a glob or a macro
+ // expansion. With restricted shadowing names from globs and macro expansions cannot
+ // shadow names from outer scopes, so we can freely fallback from module search to search
+ // in outer scopes. For `early_resolve_ident_in_lexical_scope` to continue search in outer
+ // scopes we return `Undetermined` with `Weak::Yes`.
+
+ // Check if one of unexpanded macros can still define the name,
+ // if it can then our "no resolution" result is not determined and can be invalidated.
+ if unexpanded_macros {
+ return Err((Undetermined, Weak::Yes));
+ }
+
+ // Check if one of glob imports can still define the name,
+ // if it can then our "no resolution" result is not determined and can be invalidated.
+ for glob_import in module.globs.borrow().iter() {
+ if !self.is_accessible_from(glob_import.vis.get(), parent_scope.module) {
+ continue;
+ }
+ let module = match glob_import.imported_module.get() {
+ Some(ModuleOrUniformRoot::Module(module)) => module,
+ Some(_) => continue,
+ None => return Err((Undetermined, Weak::Yes)),
+ };
+ let tmp_parent_scope;
+ let (mut adjusted_parent_scope, mut ident) =
+ (parent_scope, ident.normalize_to_macros_2_0());
+ match ident.span.glob_adjust(module.expansion, glob_import.span) {
+ Some(Some(def)) => {
+ tmp_parent_scope =
+ ParentScope { module: self.expn_def_scope(def), ..*parent_scope };
+ adjusted_parent_scope = &tmp_parent_scope;
+ }
+ Some(None) => {}
+ None => continue,
+ };
+ let result = self.resolve_ident_in_module_unadjusted(
+ ModuleOrUniformRoot::Module(module),
+ ident,
+ ns,
+ adjusted_parent_scope,
+ None,
+ ignore_binding,
+ );
+
+ match result {
+ Err(Determined) => continue,
+ Ok(binding)
+ if !self.is_accessible_from(binding.vis, glob_import.parent_scope.module) =>
+ {
+ continue;
+ }
+ Ok(_) | Err(Undetermined) => return Err((Undetermined, Weak::Yes)),
+ }
+ }
+
+ // No resolution and no one else can define the name - determinate error.
+ Err((Determined, Weak::No))
+ }
+
+ /// Validate a local resolution (from ribs).
+ #[tracing::instrument(level = "debug", skip(self, all_ribs))]
+ fn validate_res_from_ribs(
+ &mut self,
+ rib_index: usize,
+ rib_ident: Ident,
+ mut res: Res,
+ finalize: Option<Span>,
+ original_rib_ident_def: Ident,
+ all_ribs: &[Rib<'a>],
+ ) -> Res {
+ const CG_BUG_STR: &str = "min_const_generics resolve check didn't stop compilation";
+ debug!("validate_res_from_ribs({:?})", res);
+ let ribs = &all_ribs[rib_index + 1..];
+
+ // An invalid forward use of a generic parameter from a previous default.
+ if let ForwardGenericParamBanRibKind = all_ribs[rib_index].kind {
+ if let Some(span) = finalize {
+ let res_error = if rib_ident.name == kw::SelfUpper {
+ ResolutionError::SelfInGenericParamDefault
+ } else {
+ ResolutionError::ForwardDeclaredGenericParam
+ };
+ self.report_error(span, res_error);
+ }
+ assert_eq!(res, Res::Err);
+ return Res::Err;
+ }
+
+ match res {
+ Res::Local(_) => {
+ use ResolutionError::*;
+ let mut res_err = None;
+
+ for rib in ribs {
+ match rib.kind {
+ NormalRibKind
+ | ClosureOrAsyncRibKind
+ | ModuleRibKind(..)
+ | MacroDefinition(..)
+ | ForwardGenericParamBanRibKind => {
+ // Nothing to do. Continue.
+ }
+ ItemRibKind(_) | FnItemRibKind | AssocItemRibKind => {
+ // This was an attempt to access an upvar inside a
+ // named function item. This is not allowed, so we
+ // report an error.
+ if let Some(span) = finalize {
+ // We don't immediately trigger a resolve error, because
+ // we want certain other resolution errors (namely those
+ // emitted for `ConstantItemRibKind` below) to take
+ // precedence.
+ res_err = Some((span, CannotCaptureDynamicEnvironmentInFnItem));
+ }
+ }
+ ConstantItemRibKind(_, item) => {
+ // Still doesn't deal with upvars
+ if let Some(span) = finalize {
+ let (span, resolution_error) =
+ if let Some((ident, constant_item_kind)) = item {
+ let kind_str = match constant_item_kind {
+ ConstantItemKind::Const => "const",
+ ConstantItemKind::Static => "static",
+ };
+ (
+ span,
+ AttemptToUseNonConstantValueInConstant(
+ ident, "let", kind_str,
+ ),
+ )
+ } else {
+ (
+ rib_ident.span,
+ AttemptToUseNonConstantValueInConstant(
+ original_rib_ident_def,
+ "const",
+ "let",
+ ),
+ )
+ };
+ self.report_error(span, resolution_error);
+ }
+ return Res::Err;
+ }
+ ConstParamTyRibKind => {
+ if let Some(span) = finalize {
+ self.report_error(span, ParamInTyOfConstParam(rib_ident.name));
+ }
+ return Res::Err;
+ }
+ InlineAsmSymRibKind => {
+ if let Some(span) = finalize {
+ self.report_error(span, InvalidAsmSym);
+ }
+ return Res::Err;
+ }
+ }
+ }
+ if let Some((span, res_err)) = res_err {
+ self.report_error(span, res_err);
+ return Res::Err;
+ }
+ }
+ Res::Def(DefKind::TyParam, _) | Res::SelfTy { .. } => {
+ for rib in ribs {
+ let has_generic_params: HasGenericParams = match rib.kind {
+ NormalRibKind
+ | ClosureOrAsyncRibKind
+ | AssocItemRibKind
+ | ModuleRibKind(..)
+ | MacroDefinition(..)
+ | InlineAsmSymRibKind
+ | ForwardGenericParamBanRibKind => {
+ // Nothing to do. Continue.
+ continue;
+ }
+
+ ConstantItemRibKind(trivial, _) => {
+ let features = self.session.features_untracked();
+ // HACK(min_const_generics): We currently only allow `N` or `{ N }`.
+ if !(trivial == HasGenericParams::Yes || features.generic_const_exprs) {
+ // HACK(min_const_generics): If we encounter `Self` in an anonymous constant
+ // we can't easily tell if it's generic at this stage, so we instead remember
+ // this and then enforce the self type to be concrete later on.
+ if let Res::SelfTy { trait_, alias_to: Some((def, _)) } = res {
+ res = Res::SelfTy { trait_, alias_to: Some((def, true)) }
+ } else {
+ if let Some(span) = finalize {
+ self.report_error(
+ span,
+ ResolutionError::ParamInNonTrivialAnonConst {
+ name: rib_ident.name,
+ is_type: true,
+ },
+ );
+ self.session.delay_span_bug(span, CG_BUG_STR);
+ }
+
+ return Res::Err;
+ }
+ }
+
+ continue;
+ }
+
+ // This was an attempt to use a type parameter outside its scope.
+ ItemRibKind(has_generic_params) => has_generic_params,
+ FnItemRibKind => HasGenericParams::Yes,
+ ConstParamTyRibKind => {
+ if let Some(span) = finalize {
+ self.report_error(
+ span,
+ ResolutionError::ParamInTyOfConstParam(rib_ident.name),
+ );
+ }
+ return Res::Err;
+ }
+ };
+
+ if let Some(span) = finalize {
+ self.report_error(
+ span,
+ ResolutionError::GenericParamsFromOuterFunction(
+ res,
+ has_generic_params,
+ ),
+ );
+ }
+ return Res::Err;
+ }
+ }
+ Res::Def(DefKind::ConstParam, _) => {
+ let mut ribs = ribs.iter().peekable();
+ if let Some(Rib { kind: FnItemRibKind, .. }) = ribs.peek() {
+ // When declaring const parameters inside function signatures, the first rib
+ // is always a `FnItemRibKind`. In this case, we can skip it, to avoid it
+ // (spuriously) conflicting with the const param.
+ ribs.next();
+ }
+
+ for rib in ribs {
+ let has_generic_params = match rib.kind {
+ NormalRibKind
+ | ClosureOrAsyncRibKind
+ | AssocItemRibKind
+ | ModuleRibKind(..)
+ | MacroDefinition(..)
+ | InlineAsmSymRibKind
+ | ForwardGenericParamBanRibKind => continue,
+
+ ConstantItemRibKind(trivial, _) => {
+ let features = self.session.features_untracked();
+ // HACK(min_const_generics): We currently only allow `N` or `{ N }`.
+ if !(trivial == HasGenericParams::Yes || features.generic_const_exprs) {
+ if let Some(span) = finalize {
+ self.report_error(
+ span,
+ ResolutionError::ParamInNonTrivialAnonConst {
+ name: rib_ident.name,
+ is_type: false,
+ },
+ );
+ self.session.delay_span_bug(span, CG_BUG_STR);
+ }
+
+ return Res::Err;
+ }
+
+ continue;
+ }
+
+ ItemRibKind(has_generic_params) => has_generic_params,
+ FnItemRibKind => HasGenericParams::Yes,
+ ConstParamTyRibKind => {
+ if let Some(span) = finalize {
+ self.report_error(
+ span,
+ ResolutionError::ParamInTyOfConstParam(rib_ident.name),
+ );
+ }
+ return Res::Err;
+ }
+ };
+
+ // This was an attempt to use a const parameter outside its scope.
+ if let Some(span) = finalize {
+ self.report_error(
+ span,
+ ResolutionError::GenericParamsFromOuterFunction(
+ res,
+ has_generic_params,
+ ),
+ );
+ }
+ return Res::Err;
+ }
+ }
+ _ => {}
+ }
+ res
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ pub(crate) fn maybe_resolve_path(
+ &mut self,
+ path: &[Segment],
+ opt_ns: Option<Namespace>, // `None` indicates a module path in import
+ parent_scope: &ParentScope<'a>,
+ ) -> PathResult<'a> {
+ self.resolve_path_with_ribs(path, opt_ns, parent_scope, None, None, None)
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ pub(crate) fn resolve_path(
+ &mut self,
+ path: &[Segment],
+ opt_ns: Option<Namespace>, // `None` indicates a module path in import
+ parent_scope: &ParentScope<'a>,
+ finalize: Option<Finalize>,
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> PathResult<'a> {
+ self.resolve_path_with_ribs(path, opt_ns, parent_scope, finalize, None, ignore_binding)
+ }
+
+ pub(crate) fn resolve_path_with_ribs(
+ &mut self,
+ path: &[Segment],
+ opt_ns: Option<Namespace>, // `None` indicates a module path in import
+ parent_scope: &ParentScope<'a>,
+ finalize: Option<Finalize>,
+ ribs: Option<&PerNS<Vec<Rib<'a>>>>,
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> PathResult<'a> {
+ debug!("resolve_path(path={:?}, opt_ns={:?}, finalize={:?})", path, opt_ns, finalize);
+
+ let mut module = None;
+ let mut allow_super = true;
+ let mut second_binding = None;
+
+ for (i, &Segment { ident, id, .. }) in path.iter().enumerate() {
+ debug!("resolve_path ident {} {:?} {:?}", i, ident, id);
+ let record_segment_res = |this: &mut Self, res| {
+ if finalize.is_some() {
+ if let Some(id) = id {
+ if !this.partial_res_map.contains_key(&id) {
+ assert!(id != ast::DUMMY_NODE_ID, "Trying to resolve dummy id");
+ this.record_partial_res(id, PartialRes::new(res));
+ }
+ }
+ }
+ };
+
+ let is_last = i == path.len() - 1;
+ let ns = if is_last { opt_ns.unwrap_or(TypeNS) } else { TypeNS };
+ let name = ident.name;
+
+ allow_super &= ns == TypeNS && (name == kw::SelfLower || name == kw::Super);
+
+ if ns == TypeNS {
+ if allow_super && name == kw::Super {
+ let mut ctxt = ident.span.ctxt().normalize_to_macros_2_0();
+ let self_module = match i {
+ 0 => Some(self.resolve_self(&mut ctxt, parent_scope.module)),
+ _ => match module {
+ Some(ModuleOrUniformRoot::Module(module)) => Some(module),
+ _ => None,
+ },
+ };
+ if let Some(self_module) = self_module {
+ if let Some(parent) = self_module.parent {
+ module = Some(ModuleOrUniformRoot::Module(
+ self.resolve_self(&mut ctxt, parent),
+ ));
+ continue;
+ }
+ }
+ return PathResult::failed(ident.span, false, finalize.is_some(), || {
+ ("there are too many leading `super` keywords".to_string(), None)
+ });
+ }
+ if i == 0 {
+ if name == kw::SelfLower {
+ let mut ctxt = ident.span.ctxt().normalize_to_macros_2_0();
+ module = Some(ModuleOrUniformRoot::Module(
+ self.resolve_self(&mut ctxt, parent_scope.module),
+ ));
+ continue;
+ }
+ if name == kw::PathRoot && ident.span.rust_2018() {
+ module = Some(ModuleOrUniformRoot::ExternPrelude);
+ continue;
+ }
+ if name == kw::PathRoot && ident.span.rust_2015() && self.session.rust_2018() {
+ // `::a::b` from 2015 macro on 2018 global edition
+ module = Some(ModuleOrUniformRoot::CrateRootAndExternPrelude);
+ continue;
+ }
+ if name == kw::PathRoot || name == kw::Crate || name == kw::DollarCrate {
+ // `::a::b`, `crate::a::b` or `$crate::a::b`
+ module = Some(ModuleOrUniformRoot::Module(self.resolve_crate_root(ident)));
+ continue;
+ }
+ }
+ }
+
+ // Report special messages for path segment keywords in wrong positions.
+ if ident.is_path_segment_keyword() && i != 0 {
+ return PathResult::failed(ident.span, false, finalize.is_some(), || {
+ let name_str = if name == kw::PathRoot {
+ "crate root".to_string()
+ } else {
+ format!("`{}`", name)
+ };
+ let label = if i == 1 && path[0].ident.name == kw::PathRoot {
+ format!("global paths cannot start with {}", name_str)
+ } else {
+ format!("{} in paths can only be used in start position", name_str)
+ };
+ (label, None)
+ });
+ }
+
+ enum FindBindingResult<'a> {
+ Binding(Result<&'a NameBinding<'a>, Determinacy>),
+ Res(Res),
+ }
+ let find_binding_in_ns = |this: &mut Self, ns| {
+ let binding = if let Some(module) = module {
+ this.resolve_ident_in_module(
+ module,
+ ident,
+ ns,
+ parent_scope,
+ finalize,
+ ignore_binding,
+ )
+ } else if let Some(ribs) = ribs
+ && let Some(TypeNS | ValueNS) = opt_ns
+ {
+ match this.resolve_ident_in_lexical_scope(
+ ident,
+ ns,
+ parent_scope,
+ finalize,
+ &ribs[ns],
+ ignore_binding,
+ ) {
+ // we found a locally-imported or available item/module
+ Some(LexicalScopeBinding::Item(binding)) => Ok(binding),
+ // we found a local variable or type param
+ Some(LexicalScopeBinding::Res(res)) => return FindBindingResult::Res(res),
+ _ => Err(Determinacy::determined(finalize.is_some())),
+ }
+ } else {
+ let scopes = ScopeSet::All(ns, opt_ns.is_none());
+ this.early_resolve_ident_in_lexical_scope(
+ ident,
+ scopes,
+ parent_scope,
+ finalize,
+ finalize.is_some(),
+ ignore_binding,
+ )
+ };
+ FindBindingResult::Binding(binding)
+ };
+ let binding = match find_binding_in_ns(self, ns) {
+ FindBindingResult::Res(res) => {
+ record_segment_res(self, res);
+ return PathResult::NonModule(PartialRes::with_unresolved_segments(
+ res,
+ path.len() - 1,
+ ));
+ }
+ FindBindingResult::Binding(binding) => binding,
+ };
+ match binding {
+ Ok(binding) => {
+ if i == 1 {
+ second_binding = Some(binding);
+ }
+ let res = binding.res();
+ let maybe_assoc = opt_ns != Some(MacroNS) && PathSource::Type.is_expected(res);
+ if let Some(next_module) = binding.module() {
+ module = Some(ModuleOrUniformRoot::Module(next_module));
+ record_segment_res(self, res);
+ } else if res == Res::ToolMod && i + 1 != path.len() {
+ if binding.is_import() {
+ self.session
+ .struct_span_err(
+ ident.span,
+ "cannot use a tool module through an import",
+ )
+ .span_note(binding.span, "the tool module imported here")
+ .emit();
+ }
+ let res = Res::NonMacroAttr(NonMacroAttrKind::Tool);
+ return PathResult::NonModule(PartialRes::new(res));
+ } else if res == Res::Err {
+ return PathResult::NonModule(PartialRes::new(Res::Err));
+ } else if opt_ns.is_some() && (is_last || maybe_assoc) {
+ self.lint_if_path_starts_with_module(finalize, path, second_binding);
+ record_segment_res(self, res);
+ return PathResult::NonModule(PartialRes::with_unresolved_segments(
+ res,
+ path.len() - i - 1,
+ ));
+ } else {
+ return PathResult::failed(ident.span, is_last, finalize.is_some(), || {
+ let label = format!(
+ "`{ident}` is {} {}, not a module",
+ res.article(),
+ res.descr()
+ );
+ (label, None)
+ });
+ }
+ }
+ Err(Undetermined) => return PathResult::Indeterminate,
+ Err(Determined) => {
+ if let Some(ModuleOrUniformRoot::Module(module)) = module {
+ if opt_ns.is_some() && !module.is_normal() {
+ return PathResult::NonModule(PartialRes::with_unresolved_segments(
+ module.res().unwrap(),
+ path.len() - i,
+ ));
+ }
+ }
+
+ return PathResult::failed(ident.span, is_last, finalize.is_some(), || {
+ self.report_path_resolution_error(
+ path,
+ opt_ns,
+ parent_scope,
+ ribs,
+ ignore_binding,
+ module,
+ i,
+ ident,
+ )
+ });
+ }
+ }
+ }
+
+ self.lint_if_path_starts_with_module(finalize, path, second_binding);
+
+ PathResult::Module(match module {
+ Some(module) => module,
+ None if path.is_empty() => ModuleOrUniformRoot::CurrentScope,
+ _ => bug!("resolve_path: non-empty path `{:?}` has no module", path),
+ })
+ }
+}
diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs
new file mode 100644
index 000000000..b89273990
--- /dev/null
+++ b/compiler/rustc_resolve/src/imports.rs
@@ -0,0 +1,1151 @@
+//! A bunch of methods and structures more or less related to resolving imports.
+
+use crate::diagnostics::Suggestion;
+use crate::Determinacy::{self, *};
+use crate::Namespace::{MacroNS, TypeNS};
+use crate::{module_to_string, names_to_string};
+use crate::{AmbiguityKind, BindingKey, ModuleKind, ResolutionError, Resolver, Segment};
+use crate::{Finalize, Module, ModuleOrUniformRoot, ParentScope, PerNS, ScopeSet};
+use crate::{NameBinding, NameBindingKind, PathResult};
+
+use rustc_ast::NodeId;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::intern::Interned;
+use rustc_errors::{pluralize, struct_span_err, Applicability, MultiSpan};
+use rustc_hir::def::{self, DefKind, PartialRes};
+use rustc_middle::metadata::ModChild;
+use rustc_middle::span_bug;
+use rustc_middle::ty;
+use rustc_session::lint::builtin::{PUB_USE_OF_PRIVATE_EXTERN_CRATE, UNUSED_IMPORTS};
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_span::hygiene::LocalExpnId;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_span::Span;
+
+use tracing::*;
+
+use std::cell::Cell;
+use std::{mem, ptr};
+
+type Res = def::Res<NodeId>;
+
+/// Contains data for specific kinds of imports.
+#[derive(Clone, Debug)]
+pub enum ImportKind<'a> {
+ Single {
+ /// `source` in `use prefix::source as target`.
+ source: Ident,
+ /// `target` in `use prefix::source as target`.
+ target: Ident,
+ /// Bindings to which `source` refers to.
+ source_bindings: PerNS<Cell<Result<&'a NameBinding<'a>, Determinacy>>>,
+ /// Bindings introduced by `target`.
+ target_bindings: PerNS<Cell<Option<&'a NameBinding<'a>>>>,
+ /// `true` for `...::{self [as target]}` imports, `false` otherwise.
+ type_ns_only: bool,
+ /// Did this import result from a nested import? ie. `use foo::{bar, baz};`
+ nested: bool,
+ /// Additional `NodeId`s allocated to a `ast::UseTree` for automatically generated `use` statement
+ /// (eg. implicit struct constructors)
+ additional_ids: (NodeId, NodeId),
+ },
+ Glob {
+ is_prelude: bool,
+ max_vis: Cell<ty::Visibility>, // The visibility of the greatest re-export.
+ // n.b. `max_vis` is only used in `finalize_import` to check for re-export errors.
+ },
+ ExternCrate {
+ source: Option<Symbol>,
+ target: Ident,
+ },
+ MacroUse,
+}
+
+/// One import.
+#[derive(Debug, Clone)]
+pub(crate) struct Import<'a> {
+ pub kind: ImportKind<'a>,
+
+ /// The ID of the `extern crate`, `UseTree` etc that imported this `Import`.
+ ///
+ /// In the case where the `Import` was expanded from a "nested" use tree,
+ /// this id is the ID of the leaf tree. For example:
+ ///
+ /// ```ignore (pacify the merciless tidy)
+ /// use foo::bar::{a, b}
+ /// ```
+ ///
+ /// If this is the import for `foo::bar::a`, we would have the ID of the `UseTree`
+ /// for `a` in this field.
+ pub id: NodeId,
+
+ /// The `id` of the "root" use-kind -- this is always the same as
+ /// `id` except in the case of "nested" use trees, in which case
+ /// it will be the `id` of the root use tree. e.g., in the example
+ /// from `id`, this would be the ID of the `use foo::bar`
+ /// `UseTree` node.
+ pub root_id: NodeId,
+
+ /// Span of the entire use statement.
+ pub use_span: Span,
+
+ /// Span of the entire use statement with attributes.
+ pub use_span_with_attributes: Span,
+
+ /// Did the use statement have any attributes?
+ pub has_attributes: bool,
+
+ /// Span of this use tree.
+ pub span: Span,
+
+ /// Span of the *root* use tree (see `root_id`).
+ pub root_span: Span,
+
+ pub parent_scope: ParentScope<'a>,
+ pub module_path: Vec<Segment>,
+ /// The resolution of `module_path`.
+ pub imported_module: Cell<Option<ModuleOrUniformRoot<'a>>>,
+ pub vis: Cell<ty::Visibility>,
+ pub used: Cell<bool>,
+}
+
+impl<'a> Import<'a> {
+ pub fn is_glob(&self) -> bool {
+ matches!(self.kind, ImportKind::Glob { .. })
+ }
+
+ pub fn is_nested(&self) -> bool {
+ match self.kind {
+ ImportKind::Single { nested, .. } => nested,
+ _ => false,
+ }
+ }
+}
+
+/// Records information about the resolution of a name in a namespace of a module.
+#[derive(Clone, Default, Debug)]
+pub(crate) struct NameResolution<'a> {
+ /// Single imports that may define the name in the namespace.
+ /// Imports are arena-allocated, so it's ok to use pointers as keys.
+ pub single_imports: FxHashSet<Interned<'a, Import<'a>>>,
+ /// The least shadowable known binding for this name, or None if there are no known bindings.
+ pub binding: Option<&'a NameBinding<'a>>,
+ pub shadowed_glob: Option<&'a NameBinding<'a>>,
+}
+
+impl<'a> NameResolution<'a> {
+ // Returns the binding for the name if it is known or None if it not known.
+ pub(crate) fn binding(&self) -> Option<&'a NameBinding<'a>> {
+ self.binding.and_then(|binding| {
+ if !binding.is_glob_import() || self.single_imports.is_empty() {
+ Some(binding)
+ } else {
+ None
+ }
+ })
+ }
+
+ pub(crate) fn add_single_import(&mut self, import: &'a Import<'a>) {
+ self.single_imports.insert(Interned::new_unchecked(import));
+ }
+}
+
+// Reexports of the form `pub use foo as bar;` where `foo` is `extern crate foo;`
+// are permitted for backward-compatibility under a deprecation lint.
+fn pub_use_of_private_extern_crate_hack(import: &Import<'_>, binding: &NameBinding<'_>) -> bool {
+ match (&import.kind, &binding.kind) {
+ (
+ ImportKind::Single { .. },
+ NameBindingKind::Import {
+ import: Import { kind: ImportKind::ExternCrate { .. }, .. },
+ ..
+ },
+ ) => import.vis.get().is_public(),
+ _ => false,
+ }
+}
+
+impl<'a> Resolver<'a> {
+ // Given a binding and an import that resolves to it,
+ // return the corresponding binding defined by the import.
+ pub(crate) fn import(
+ &self,
+ binding: &'a NameBinding<'a>,
+ import: &'a Import<'a>,
+ ) -> &'a NameBinding<'a> {
+ let vis = if binding.vis.is_at_least(import.vis.get(), self)
+ || pub_use_of_private_extern_crate_hack(import, binding)
+ {
+ import.vis.get()
+ } else {
+ binding.vis
+ };
+
+ if let ImportKind::Glob { ref max_vis, .. } = import.kind {
+ if vis == import.vis.get() || vis.is_at_least(max_vis.get(), self) {
+ max_vis.set(vis)
+ }
+ }
+
+ self.arenas.alloc_name_binding(NameBinding {
+ kind: NameBindingKind::Import { binding, import, used: Cell::new(false) },
+ ambiguity: None,
+ span: import.span,
+ vis,
+ expansion: import.parent_scope.expansion,
+ })
+ }
+
+ // Define the name or return the existing binding if there is a collision.
+ pub(crate) fn try_define(
+ &mut self,
+ module: Module<'a>,
+ key: BindingKey,
+ binding: &'a NameBinding<'a>,
+ ) -> Result<(), &'a NameBinding<'a>> {
+ let res = binding.res();
+ self.check_reserved_macro_name(key.ident, res);
+ self.set_binding_parent_module(binding, module);
+ self.update_resolution(module, key, |this, resolution| {
+ if let Some(old_binding) = resolution.binding {
+ if res == Res::Err {
+ // Do not override real bindings with `Res::Err`s from error recovery.
+ return Ok(());
+ }
+ match (old_binding.is_glob_import(), binding.is_glob_import()) {
+ (true, true) => {
+ if res != old_binding.res() {
+ resolution.binding = Some(this.ambiguity(
+ AmbiguityKind::GlobVsGlob,
+ old_binding,
+ binding,
+ ));
+ } else if !old_binding.vis.is_at_least(binding.vis, &*this) {
+ // We are glob-importing the same item but with greater visibility.
+ resolution.binding = Some(binding);
+ }
+ }
+ (old_glob @ true, false) | (old_glob @ false, true) => {
+ let (glob_binding, nonglob_binding) =
+ if old_glob { (old_binding, binding) } else { (binding, old_binding) };
+ if glob_binding.res() != nonglob_binding.res()
+ && key.ns == MacroNS
+ && nonglob_binding.expansion != LocalExpnId::ROOT
+ {
+ resolution.binding = Some(this.ambiguity(
+ AmbiguityKind::GlobVsExpanded,
+ nonglob_binding,
+ glob_binding,
+ ));
+ } else {
+ resolution.binding = Some(nonglob_binding);
+ }
+ resolution.shadowed_glob = Some(glob_binding);
+ }
+ (false, false) => {
+ return Err(old_binding);
+ }
+ }
+ } else {
+ resolution.binding = Some(binding);
+ }
+
+ Ok(())
+ })
+ }
+
+ fn ambiguity(
+ &self,
+ kind: AmbiguityKind,
+ primary_binding: &'a NameBinding<'a>,
+ secondary_binding: &'a NameBinding<'a>,
+ ) -> &'a NameBinding<'a> {
+ self.arenas.alloc_name_binding(NameBinding {
+ ambiguity: Some((secondary_binding, kind)),
+ ..primary_binding.clone()
+ })
+ }
+
+ // Use `f` to mutate the resolution of the name in the module.
+ // If the resolution becomes a success, define it in the module's glob importers.
+ fn update_resolution<T, F>(&mut self, module: Module<'a>, key: BindingKey, f: F) -> T
+ where
+ F: FnOnce(&mut Resolver<'a>, &mut NameResolution<'a>) -> T,
+ {
+ // Ensure that `resolution` isn't borrowed when defining in the module's glob importers,
+ // during which the resolution might end up getting re-defined via a glob cycle.
+ let (binding, t) = {
+ let resolution = &mut *self.resolution(module, key).borrow_mut();
+ let old_binding = resolution.binding();
+
+ let t = f(self, resolution);
+
+ match resolution.binding() {
+ _ if old_binding.is_some() => return t,
+ None => return t,
+ Some(binding) => match old_binding {
+ Some(old_binding) if ptr::eq(old_binding, binding) => return t,
+ _ => (binding, t),
+ },
+ }
+ };
+
+ // Define `binding` in `module`s glob importers.
+ for import in module.glob_importers.borrow_mut().iter() {
+ let mut ident = key.ident;
+ let scope = match ident.span.reverse_glob_adjust(module.expansion, import.span) {
+ Some(Some(def)) => self.expn_def_scope(def),
+ Some(None) => import.parent_scope.module,
+ None => continue,
+ };
+ if self.is_accessible_from(binding.vis, scope) {
+ let imported_binding = self.import(binding, import);
+ let key = BindingKey { ident, ..key };
+ let _ = self.try_define(import.parent_scope.module, key, imported_binding);
+ }
+ }
+
+ t
+ }
+
+ // Define a dummy resolution containing a `Res::Err` as a placeholder for a failed resolution,
+ // also mark such failed imports as used to avoid duplicate diagnostics.
+ fn import_dummy_binding(&mut self, import: &'a Import<'a>) {
+ if let ImportKind::Single { target, ref target_bindings, .. } = import.kind {
+ if target_bindings.iter().any(|binding| binding.get().is_some()) {
+ return; // Has resolution, do not create the dummy binding
+ }
+ let dummy_binding = self.dummy_binding;
+ let dummy_binding = self.import(dummy_binding, import);
+ self.per_ns(|this, ns| {
+ let key = this.new_key(target, ns);
+ let _ = this.try_define(import.parent_scope.module, key, dummy_binding);
+ });
+ self.record_use(target, dummy_binding, false);
+ } else if import.imported_module.get().is_none() {
+ import.used.set(true);
+ self.used_imports.insert(import.id);
+ }
+ }
+}
+
+/// An error that may be transformed into a diagnostic later. Used to combine multiple unresolved
+/// import errors within the same use tree into a single diagnostic.
+#[derive(Debug, Clone)]
+struct UnresolvedImportError {
+ span: Span,
+ label: Option<String>,
+ note: Option<String>,
+ suggestion: Option<Suggestion>,
+}
+
+pub struct ImportResolver<'a, 'b> {
+ pub r: &'a mut Resolver<'b>,
+}
+
+impl<'a, 'b> ImportResolver<'a, 'b> {
+ // Import resolution
+ //
+ // This is a fixed-point algorithm. We resolve imports until our efforts
+ // are stymied by an unresolved import; then we bail out of the current
+ // module and continue. We terminate successfully once no more imports
+ // remain or unsuccessfully when no forward progress in resolving imports
+ // is made.
+
+ /// Resolves all imports for the crate. This method performs the fixed-
+ /// point iteration.
+ pub fn resolve_imports(&mut self) {
+ let mut prev_num_indeterminates = self.r.indeterminate_imports.len() + 1;
+ while self.r.indeterminate_imports.len() < prev_num_indeterminates {
+ prev_num_indeterminates = self.r.indeterminate_imports.len();
+ for import in mem::take(&mut self.r.indeterminate_imports) {
+ match self.resolve_import(&import) {
+ true => self.r.determined_imports.push(import),
+ false => self.r.indeterminate_imports.push(import),
+ }
+ }
+ }
+ }
+
+ pub fn finalize_imports(&mut self) {
+ for module in self.r.arenas.local_modules().iter() {
+ self.finalize_resolutions_in(module);
+ }
+
+ let mut seen_spans = FxHashSet::default();
+ let mut errors = vec![];
+ let mut prev_root_id: NodeId = NodeId::from_u32(0);
+ let determined_imports = mem::take(&mut self.r.determined_imports);
+ let indeterminate_imports = mem::take(&mut self.r.indeterminate_imports);
+
+ for (is_indeterminate, import) in determined_imports
+ .into_iter()
+ .map(|i| (false, i))
+ .chain(indeterminate_imports.into_iter().map(|i| (true, i)))
+ {
+ let unresolved_import_error = self.finalize_import(import);
+
+ // If this import is unresolved then create a dummy import
+ // resolution for it so that later resolve stages won't complain.
+ self.r.import_dummy_binding(import);
+
+ if let Some(err) = unresolved_import_error {
+ if let ImportKind::Single { source, ref source_bindings, .. } = import.kind {
+ if source.name == kw::SelfLower {
+ // Silence `unresolved import` error if E0429 is already emitted
+ if let Err(Determined) = source_bindings.value_ns.get() {
+ continue;
+ }
+ }
+ }
+
+ if prev_root_id.as_u32() != 0
+ && prev_root_id.as_u32() != import.root_id.as_u32()
+ && !errors.is_empty()
+ {
+ // In the case of a new import line, throw a diagnostic message
+ // for the previous line.
+ self.throw_unresolved_import_error(errors, None);
+ errors = vec![];
+ }
+ if seen_spans.insert(err.span) {
+ let path = import_path_to_string(
+ &import.module_path.iter().map(|seg| seg.ident).collect::<Vec<_>>(),
+ &import.kind,
+ err.span,
+ );
+ errors.push((path, err));
+ prev_root_id = import.root_id;
+ }
+ } else if is_indeterminate {
+ let path = import_path_to_string(
+ &import.module_path.iter().map(|seg| seg.ident).collect::<Vec<_>>(),
+ &import.kind,
+ import.span,
+ );
+ let err = UnresolvedImportError {
+ span: import.span,
+ label: None,
+ note: None,
+ suggestion: None,
+ };
+ if path.contains("::") {
+ errors.push((path, err))
+ }
+ }
+ }
+
+ if !errors.is_empty() {
+ self.throw_unresolved_import_error(errors, None);
+ }
+ }
+
+ fn throw_unresolved_import_error(
+ &self,
+ errors: Vec<(String, UnresolvedImportError)>,
+ span: Option<MultiSpan>,
+ ) {
+ /// Upper limit on the number of `span_label` messages.
+ const MAX_LABEL_COUNT: usize = 10;
+
+ let (span, msg) = if errors.is_empty() {
+ (span.unwrap(), "unresolved import".to_string())
+ } else {
+ let span = MultiSpan::from_spans(errors.iter().map(|(_, err)| err.span).collect());
+
+ let paths = errors.iter().map(|(path, _)| format!("`{}`", path)).collect::<Vec<_>>();
+
+ let msg = format!("unresolved import{} {}", pluralize!(paths.len()), paths.join(", "),);
+
+ (span, msg)
+ };
+
+ let mut diag = struct_span_err!(self.r.session, span, E0432, "{}", &msg);
+
+ if let Some((_, UnresolvedImportError { note: Some(note), .. })) = errors.iter().last() {
+ diag.note(note);
+ }
+
+ for (_, err) in errors.into_iter().take(MAX_LABEL_COUNT) {
+ if let Some(label) = err.label {
+ diag.span_label(err.span, label);
+ }
+
+ if let Some((suggestions, msg, applicability)) = err.suggestion {
+ if suggestions.is_empty() {
+ diag.help(&msg);
+ continue;
+ }
+ diag.multipart_suggestion(&msg, suggestions, applicability);
+ }
+ }
+
+ diag.emit();
+ }
+
+ /// Attempts to resolve the given import, returning true if its resolution is determined.
+ /// If successful, the resolved bindings are written into the module.
+ fn resolve_import(&mut self, import: &'b Import<'b>) -> bool {
+ debug!(
+ "(resolving import for module) resolving import `{}::...` in `{}`",
+ Segment::names_to_string(&import.module_path),
+ module_to_string(import.parent_scope.module).unwrap_or_else(|| "???".to_string()),
+ );
+
+ let module = if let Some(module) = import.imported_module.get() {
+ module
+ } else {
+ // For better failure detection, pretend that the import will
+ // not define any names while resolving its module path.
+ let orig_vis = import.vis.replace(ty::Visibility::Invisible);
+ let path_res =
+ self.r.maybe_resolve_path(&import.module_path, None, &import.parent_scope);
+ import.vis.set(orig_vis);
+
+ match path_res {
+ PathResult::Module(module) => module,
+ PathResult::Indeterminate => return false,
+ PathResult::NonModule(..) | PathResult::Failed { .. } => return true,
+ }
+ };
+
+ import.imported_module.set(Some(module));
+ let (source, target, source_bindings, target_bindings, type_ns_only) = match import.kind {
+ ImportKind::Single {
+ source,
+ target,
+ ref source_bindings,
+ ref target_bindings,
+ type_ns_only,
+ ..
+ } => (source, target, source_bindings, target_bindings, type_ns_only),
+ ImportKind::Glob { .. } => {
+ self.resolve_glob_import(import);
+ return true;
+ }
+ _ => unreachable!(),
+ };
+
+ let mut indeterminate = false;
+ self.r.per_ns(|this, ns| {
+ if !type_ns_only || ns == TypeNS {
+ if let Err(Undetermined) = source_bindings[ns].get() {
+ // For better failure detection, pretend that the import will
+ // not define any names while resolving its module path.
+ let orig_vis = import.vis.replace(ty::Visibility::Invisible);
+ let binding = this.resolve_ident_in_module(
+ module,
+ source,
+ ns,
+ &import.parent_scope,
+ None,
+ None,
+ );
+ import.vis.set(orig_vis);
+ source_bindings[ns].set(binding);
+ } else {
+ return;
+ };
+
+ let parent = import.parent_scope.module;
+ match source_bindings[ns].get() {
+ Err(Undetermined) => indeterminate = true,
+ // Don't update the resolution, because it was never added.
+ Err(Determined) if target.name == kw::Underscore => {}
+ Ok(binding) if binding.is_importable() => {
+ let imported_binding = this.import(binding, import);
+ target_bindings[ns].set(Some(imported_binding));
+ this.define(parent, target, ns, imported_binding);
+ }
+ source_binding @ (Ok(..) | Err(Determined)) => {
+ if source_binding.is_ok() {
+ let msg = format!("`{}` is not directly importable", target);
+ struct_span_err!(this.session, import.span, E0253, "{}", &msg)
+ .span_label(import.span, "cannot be imported directly")
+ .emit();
+ }
+ let key = this.new_key(target, ns);
+ this.update_resolution(parent, key, |_, resolution| {
+ resolution.single_imports.remove(&Interned::new_unchecked(import));
+ });
+ }
+ }
+ }
+ });
+
+ !indeterminate
+ }
+
+ /// Performs final import resolution, consistency checks and error reporting.
+ ///
+ /// Optionally returns an unresolved import error. This error is buffered and used to
+ /// consolidate multiple unresolved import errors into a single diagnostic.
+ fn finalize_import(&mut self, import: &'b Import<'b>) -> Option<UnresolvedImportError> {
+ let orig_vis = import.vis.replace(ty::Visibility::Invisible);
+ let ignore_binding = match &import.kind {
+ ImportKind::Single { target_bindings, .. } => target_bindings[TypeNS].get(),
+ _ => None,
+ };
+ let prev_ambiguity_errors_len = self.r.ambiguity_errors.len();
+ let finalize = Finalize::with_root_span(import.root_id, import.span, import.root_span);
+ let path_res = self.r.resolve_path(
+ &import.module_path,
+ None,
+ &import.parent_scope,
+ Some(finalize),
+ ignore_binding,
+ );
+ let no_ambiguity = self.r.ambiguity_errors.len() == prev_ambiguity_errors_len;
+ import.vis.set(orig_vis);
+ let module = match path_res {
+ PathResult::Module(module) => {
+ // Consistency checks, analogous to `finalize_macro_resolutions`.
+ if let Some(initial_module) = import.imported_module.get() {
+ if !ModuleOrUniformRoot::same_def(module, initial_module) && no_ambiguity {
+ span_bug!(import.span, "inconsistent resolution for an import");
+ }
+ } else if self.r.privacy_errors.is_empty() {
+ let msg = "cannot determine resolution for the import";
+ let msg_note = "import resolution is stuck, try simplifying other imports";
+ self.r.session.struct_span_err(import.span, msg).note(msg_note).emit();
+ }
+
+ module
+ }
+ PathResult::Failed { is_error_from_last_segment: false, span, label, suggestion } => {
+ if no_ambiguity {
+ assert!(import.imported_module.get().is_none());
+ self.r
+ .report_error(span, ResolutionError::FailedToResolve { label, suggestion });
+ }
+ return None;
+ }
+ PathResult::Failed { is_error_from_last_segment: true, span, label, suggestion } => {
+ if no_ambiguity {
+ assert!(import.imported_module.get().is_none());
+ let err = match self.make_path_suggestion(
+ span,
+ import.module_path.clone(),
+ &import.parent_scope,
+ ) {
+ Some((suggestion, note)) => UnresolvedImportError {
+ span,
+ label: None,
+ note,
+ suggestion: Some((
+ vec![(span, Segment::names_to_string(&suggestion))],
+ String::from("a similar path exists"),
+ Applicability::MaybeIncorrect,
+ )),
+ },
+ None => UnresolvedImportError {
+ span,
+ label: Some(label),
+ note: None,
+ suggestion,
+ },
+ };
+ return Some(err);
+ }
+ return None;
+ }
+ PathResult::NonModule(_) => {
+ if no_ambiguity {
+ assert!(import.imported_module.get().is_none());
+ }
+ // The error was already reported earlier.
+ return None;
+ }
+ PathResult::Indeterminate => unreachable!(),
+ };
+
+ let (ident, target, source_bindings, target_bindings, type_ns_only) = match import.kind {
+ ImportKind::Single {
+ source,
+ target,
+ ref source_bindings,
+ ref target_bindings,
+ type_ns_only,
+ ..
+ } => (source, target, source_bindings, target_bindings, type_ns_only),
+ ImportKind::Glob { is_prelude, ref max_vis } => {
+ if import.module_path.len() <= 1 {
+ // HACK(eddyb) `lint_if_path_starts_with_module` needs at least
+ // 2 segments, so the `resolve_path` above won't trigger it.
+ let mut full_path = import.module_path.clone();
+ full_path.push(Segment::from_ident(Ident::empty()));
+ self.r.lint_if_path_starts_with_module(Some(finalize), &full_path, None);
+ }
+
+ if let ModuleOrUniformRoot::Module(module) = module {
+ if ptr::eq(module, import.parent_scope.module) {
+ // Importing a module into itself is not allowed.
+ return Some(UnresolvedImportError {
+ span: import.span,
+ label: Some(String::from("cannot glob-import a module into itself")),
+ note: None,
+ suggestion: None,
+ });
+ }
+ }
+ if !is_prelude &&
+ max_vis.get() != ty::Visibility::Invisible && // Allow empty globs.
+ !max_vis.get().is_at_least(import.vis.get(), &*self.r)
+ {
+ let msg = "glob import doesn't reexport anything because no candidate is public enough";
+ self.r.lint_buffer.buffer_lint(UNUSED_IMPORTS, import.id, import.span, msg);
+ }
+ return None;
+ }
+ _ => unreachable!(),
+ };
+
+ let mut all_ns_err = true;
+ self.r.per_ns(|this, ns| {
+ if !type_ns_only || ns == TypeNS {
+ let orig_vis = import.vis.replace(ty::Visibility::Invisible);
+ let binding = this.resolve_ident_in_module(
+ module,
+ ident,
+ ns,
+ &import.parent_scope,
+ Some(Finalize { report_private: false, ..finalize }),
+ target_bindings[ns].get(),
+ );
+ import.vis.set(orig_vis);
+
+ match binding {
+ Ok(binding) => {
+ // Consistency checks, analogous to `finalize_macro_resolutions`.
+ let initial_res = source_bindings[ns].get().map(|initial_binding| {
+ all_ns_err = false;
+ if let Some(target_binding) = target_bindings[ns].get() {
+ if target.name == kw::Underscore
+ && initial_binding.is_extern_crate()
+ && !initial_binding.is_import()
+ {
+ this.record_use(
+ ident,
+ target_binding,
+ import.module_path.is_empty(),
+ );
+ }
+ }
+ initial_binding.res()
+ });
+ let res = binding.res();
+ if let Ok(initial_res) = initial_res {
+ if res != initial_res && this.ambiguity_errors.is_empty() {
+ span_bug!(import.span, "inconsistent resolution for an import");
+ }
+ } else if res != Res::Err
+ && this.ambiguity_errors.is_empty()
+ && this.privacy_errors.is_empty()
+ {
+ let msg = "cannot determine resolution for the import";
+ let msg_note =
+ "import resolution is stuck, try simplifying other imports";
+ this.session.struct_span_err(import.span, msg).note(msg_note).emit();
+ }
+ }
+ Err(..) => {
+ // FIXME: This assert may fire if public glob is later shadowed by a private
+ // single import (see test `issue-55884-2.rs`). In theory single imports should
+ // always block globs, even if they are not yet resolved, so that this kind of
+ // self-inconsistent resolution never happens.
+ // Re-enable the assert when the issue is fixed.
+ // assert!(result[ns].get().is_err());
+ }
+ }
+ }
+ });
+
+ if all_ns_err {
+ let mut all_ns_failed = true;
+ self.r.per_ns(|this, ns| {
+ if !type_ns_only || ns == TypeNS {
+ let binding = this.resolve_ident_in_module(
+ module,
+ ident,
+ ns,
+ &import.parent_scope,
+ Some(finalize),
+ None,
+ );
+ if binding.is_ok() {
+ all_ns_failed = false;
+ }
+ }
+ });
+
+ return if all_ns_failed {
+ let resolutions = match module {
+ ModuleOrUniformRoot::Module(module) => {
+ Some(self.r.resolutions(module).borrow())
+ }
+ _ => None,
+ };
+ let resolutions = resolutions.as_ref().into_iter().flat_map(|r| r.iter());
+ let names = resolutions
+ .filter_map(|(BindingKey { ident: i, .. }, resolution)| {
+ if *i == ident {
+ return None;
+ } // Never suggest the same name
+ match *resolution.borrow() {
+ NameResolution { binding: Some(name_binding), .. } => {
+ match name_binding.kind {
+ NameBindingKind::Import { binding, .. } => {
+ match binding.kind {
+ // Never suggest the name that has binding error
+ // i.e., the name that cannot be previously resolved
+ NameBindingKind::Res(Res::Err, _) => None,
+ _ => Some(i.name),
+ }
+ }
+ _ => Some(i.name),
+ }
+ }
+ NameResolution { ref single_imports, .. }
+ if single_imports.is_empty() =>
+ {
+ None
+ }
+ _ => Some(i.name),
+ }
+ })
+ .collect::<Vec<Symbol>>();
+
+ let lev_suggestion =
+ find_best_match_for_name(&names, ident.name, None).map(|suggestion| {
+ (
+ vec![(ident.span, suggestion.to_string())],
+ String::from("a similar name exists in the module"),
+ Applicability::MaybeIncorrect,
+ )
+ });
+
+ let (suggestion, note) =
+ match self.check_for_module_export_macro(import, module, ident) {
+ Some((suggestion, note)) => (suggestion.or(lev_suggestion), note),
+ _ => (lev_suggestion, None),
+ };
+
+ let label = match module {
+ ModuleOrUniformRoot::Module(module) => {
+ let module_str = module_to_string(module);
+ if let Some(module_str) = module_str {
+ format!("no `{}` in `{}`", ident, module_str)
+ } else {
+ format!("no `{}` in the root", ident)
+ }
+ }
+ _ => {
+ if !ident.is_path_segment_keyword() {
+ format!("no external crate `{}`", ident)
+ } else {
+ // HACK(eddyb) this shows up for `self` & `super`, which
+ // should work instead - for now keep the same error message.
+ format!("no `{}` in the root", ident)
+ }
+ }
+ };
+
+ Some(UnresolvedImportError {
+ span: import.span,
+ label: Some(label),
+ note,
+ suggestion,
+ })
+ } else {
+ // `resolve_ident_in_module` reported a privacy error.
+ None
+ };
+ }
+
+ let mut reexport_error = None;
+ let mut any_successful_reexport = false;
+ let mut crate_private_reexport = false;
+ self.r.per_ns(|this, ns| {
+ if let Ok(binding) = source_bindings[ns].get() {
+ let vis = import.vis.get();
+ if !binding.vis.is_at_least(vis, &*this) {
+ reexport_error = Some((ns, binding));
+ if let ty::Visibility::Restricted(binding_def_id) = binding.vis {
+ if binding_def_id.is_top_level_module() {
+ crate_private_reexport = true;
+ }
+ }
+ } else {
+ any_successful_reexport = true;
+ }
+ }
+ });
+
+ // All namespaces must be re-exported with extra visibility for an error to occur.
+ if !any_successful_reexport {
+ let (ns, binding) = reexport_error.unwrap();
+ if pub_use_of_private_extern_crate_hack(import, binding) {
+ let msg = format!(
+ "extern crate `{}` is private, and cannot be \
+ re-exported (error E0365), consider declaring with \
+ `pub`",
+ ident
+ );
+ self.r.lint_buffer.buffer_lint(
+ PUB_USE_OF_PRIVATE_EXTERN_CRATE,
+ import.id,
+ import.span,
+ &msg,
+ );
+ } else {
+ let error_msg = if crate_private_reexport {
+ format!(
+ "`{}` is only public within the crate, and cannot be re-exported outside",
+ ident
+ )
+ } else {
+ format!("`{}` is private, and cannot be re-exported", ident)
+ };
+
+ if ns == TypeNS {
+ let label_msg = if crate_private_reexport {
+ format!("re-export of crate public `{}`", ident)
+ } else {
+ format!("re-export of private `{}`", ident)
+ };
+
+ struct_span_err!(self.r.session, import.span, E0365, "{}", error_msg)
+ .span_label(import.span, label_msg)
+ .note(&format!("consider declaring type or module `{}` with `pub`", ident))
+ .emit();
+ } else {
+ let mut err =
+ struct_span_err!(self.r.session, import.span, E0364, "{error_msg}");
+ match binding.kind {
+ NameBindingKind::Res(Res::Def(DefKind::Macro(_), def_id), _)
+ // exclude decl_macro
+ if self.r.get_macro_by_def_id(def_id).macro_rules =>
+ {
+ err.span_help(
+ binding.span,
+ "consider adding a `#[macro_export]` to the macro in the imported module",
+ );
+ }
+ _ => {
+ err.span_note(
+ import.span,
+ &format!(
+ "consider marking `{ident}` as `pub` in the imported module"
+ ),
+ );
+ }
+ }
+ err.emit();
+ }
+ }
+ }
+
+ if import.module_path.len() <= 1 {
+ // HACK(eddyb) `lint_if_path_starts_with_module` needs at least
+ // 2 segments, so the `resolve_path` above won't trigger it.
+ let mut full_path = import.module_path.clone();
+ full_path.push(Segment::from_ident(ident));
+ self.r.per_ns(|this, ns| {
+ if let Ok(binding) = source_bindings[ns].get() {
+ this.lint_if_path_starts_with_module(Some(finalize), &full_path, Some(binding));
+ }
+ });
+ }
+
+ // Record what this import resolves to for later uses in documentation,
+ // this may resolve to either a value or a type, but for documentation
+ // purposes it's good enough to just favor one over the other.
+ self.r.per_ns(|this, ns| {
+ if let Ok(binding) = source_bindings[ns].get() {
+ this.import_res_map.entry(import.id).or_default()[ns] = Some(binding.res());
+ }
+ });
+
+ self.check_for_redundant_imports(ident, import, source_bindings, target_bindings, target);
+
+ debug!("(resolving single import) successfully resolved import");
+ None
+ }
+
+ fn check_for_redundant_imports(
+ &mut self,
+ ident: Ident,
+ import: &'b Import<'b>,
+ source_bindings: &PerNS<Cell<Result<&'b NameBinding<'b>, Determinacy>>>,
+ target_bindings: &PerNS<Cell<Option<&'b NameBinding<'b>>>>,
+ target: Ident,
+ ) {
+ // Skip if the import was produced by a macro.
+ if import.parent_scope.expansion != LocalExpnId::ROOT {
+ return;
+ }
+
+ // Skip if we are inside a named module (in contrast to an anonymous
+ // module defined by a block).
+ if let ModuleKind::Def(..) = import.parent_scope.module.kind {
+ return;
+ }
+
+ let mut is_redundant = PerNS { value_ns: None, type_ns: None, macro_ns: None };
+
+ let mut redundant_span = PerNS { value_ns: None, type_ns: None, macro_ns: None };
+
+ self.r.per_ns(|this, ns| {
+ if let Ok(binding) = source_bindings[ns].get() {
+ if binding.res() == Res::Err {
+ return;
+ }
+
+ match this.early_resolve_ident_in_lexical_scope(
+ target,
+ ScopeSet::All(ns, false),
+ &import.parent_scope,
+ None,
+ false,
+ target_bindings[ns].get(),
+ ) {
+ Ok(other_binding) => {
+ is_redundant[ns] = Some(
+ binding.res() == other_binding.res() && !other_binding.is_ambiguity(),
+ );
+ redundant_span[ns] = Some((other_binding.span, other_binding.is_import()));
+ }
+ Err(_) => is_redundant[ns] = Some(false),
+ }
+ }
+ });
+
+ if !is_redundant.is_empty() && is_redundant.present_items().all(|is_redundant| is_redundant)
+ {
+ let mut redundant_spans: Vec<_> = redundant_span.present_items().collect();
+ redundant_spans.sort();
+ redundant_spans.dedup();
+ self.r.lint_buffer.buffer_lint_with_diagnostic(
+ UNUSED_IMPORTS,
+ import.id,
+ import.span,
+ &format!("the item `{}` is imported redundantly", ident),
+ BuiltinLintDiagnostics::RedundantImport(redundant_spans, ident),
+ );
+ }
+ }
+
+ fn resolve_glob_import(&mut self, import: &'b Import<'b>) {
+ let ModuleOrUniformRoot::Module(module) = import.imported_module.get().unwrap() else {
+ self.r.session.span_err(import.span, "cannot glob-import all possible crates");
+ return;
+ };
+
+ if module.is_trait() {
+ self.r.session.span_err(import.span, "items in traits are not importable");
+ return;
+ } else if ptr::eq(module, import.parent_scope.module) {
+ return;
+ } else if let ImportKind::Glob { is_prelude: true, .. } = import.kind {
+ self.r.prelude = Some(module);
+ return;
+ }
+
+ // Add to module's glob_importers
+ module.glob_importers.borrow_mut().push(import);
+
+ // Ensure that `resolutions` isn't borrowed during `try_define`,
+ // since it might get updated via a glob cycle.
+ let bindings = self
+ .r
+ .resolutions(module)
+ .borrow()
+ .iter()
+ .filter_map(|(key, resolution)| {
+ resolution.borrow().binding().map(|binding| (*key, binding))
+ })
+ .collect::<Vec<_>>();
+ for (mut key, binding) in bindings {
+ let scope = match key.ident.span.reverse_glob_adjust(module.expansion, import.span) {
+ Some(Some(def)) => self.r.expn_def_scope(def),
+ Some(None) => import.parent_scope.module,
+ None => continue,
+ };
+ if self.r.is_accessible_from(binding.vis, scope) {
+ let imported_binding = self.r.import(binding, import);
+ let _ = self.r.try_define(import.parent_scope.module, key, imported_binding);
+ }
+ }
+
+ // Record the destination of this import
+ self.r.record_partial_res(import.id, PartialRes::new(module.res().unwrap()));
+ }
+
+ // Miscellaneous post-processing, including recording re-exports,
+ // reporting conflicts, and reporting unresolved imports.
+ fn finalize_resolutions_in(&mut self, module: Module<'b>) {
+ // Since import resolution is finished, globs will not define any more names.
+ *module.globs.borrow_mut() = Vec::new();
+
+ if let Some(def_id) = module.opt_def_id() {
+ let mut reexports = Vec::new();
+
+ module.for_each_child(self.r, |_, ident, _, binding| {
+ // FIXME: Consider changing the binding inserted by `#[macro_export] macro_rules`
+ // into the crate root to actual `NameBindingKind::Import`.
+ if binding.is_import()
+ || matches!(binding.kind, NameBindingKind::Res(_, _is_macro_export @ true))
+ {
+ let res = binding.res().expect_non_local();
+ // Ambiguous imports are treated as errors at this point and are
+ // not exposed to other crates (see #36837 for more details).
+ if res != def::Res::Err && !binding.is_ambiguity() {
+ reexports.push(ModChild {
+ ident,
+ res,
+ vis: binding.vis,
+ span: binding.span,
+ macro_rules: false,
+ });
+ }
+ }
+ });
+
+ if !reexports.is_empty() {
+ // Call to `expect_local` should be fine because current
+ // code is only called for local modules.
+ self.r.reexport_map.insert(def_id.expect_local(), reexports);
+ }
+ }
+ }
+}
+
+fn import_path_to_string(names: &[Ident], import_kind: &ImportKind<'_>, span: Span) -> String {
+ let pos = names.iter().position(|p| span == p.span && p.name != kw::PathRoot);
+ let global = !names.is_empty() && names[0].name == kw::PathRoot;
+ if let Some(pos) = pos {
+ let names = if global { &names[1..pos + 1] } else { &names[..pos + 1] };
+ names_to_string(&names.iter().map(|ident| ident.name).collect::<Vec<_>>())
+ } else {
+ let names = if global { &names[1..] } else { names };
+ if names.is_empty() {
+ import_kind_to_string(import_kind)
+ } else {
+ format!(
+ "{}::{}",
+ names_to_string(&names.iter().map(|ident| ident.name).collect::<Vec<_>>()),
+ import_kind_to_string(import_kind),
+ )
+ }
+ }
+}
+
+fn import_kind_to_string(import_kind: &ImportKind<'_>) -> String {
+ match import_kind {
+ ImportKind::Single { source, .. } => source.to_string(),
+ ImportKind::Glob { .. } => "*".to_string(),
+ ImportKind::ExternCrate { .. } => "<extern crate>".to_string(),
+ ImportKind::MacroUse => "#[macro_use]".to_string(),
+ }
+}
diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs
new file mode 100644
index 000000000..dea3eaecd
--- /dev/null
+++ b/compiler/rustc_resolve/src/late.rs
@@ -0,0 +1,3984 @@
+// ignore-tidy-filelength
+//! "Late resolution" is the pass that resolves most of names in a crate beside imports and macros.
+//! It runs when the crate is fully expanded and its module structure is fully built.
+//! So it just walks through the crate and resolves all the expressions, types, etc.
+//!
+//! If you wonder why there's no `early.rs`, that's because it's split into three files -
+//! `build_reduced_graph.rs`, `macros.rs` and `imports.rs`.
+
+use RibKind::*;
+
+use crate::{path_names_to_string, BindingError, Finalize, LexicalScopeBinding};
+use crate::{Module, ModuleOrUniformRoot, NameBinding, ParentScope, PathResult};
+use crate::{ResolutionError, Resolver, Segment, UseError};
+
+use rustc_ast::ptr::P;
+use rustc_ast::visit::{self, AssocCtxt, BoundKind, FnCtxt, FnKind, Visitor};
+use rustc_ast::*;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
+use rustc_errors::DiagnosticId;
+use rustc_hir::def::Namespace::{self, *};
+use rustc_hir::def::{self, CtorKind, DefKind, LifetimeRes, PartialRes, PerNS};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_hir::{PrimTy, TraitCandidate};
+use rustc_middle::middle::resolve_lifetime::Set1;
+use rustc_middle::ty::DefIdTree;
+use rustc_middle::{bug, span_bug};
+use rustc_session::lint;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{BytePos, Span};
+use smallvec::{smallvec, SmallVec};
+
+use rustc_span::source_map::{respan, Spanned};
+use std::collections::{hash_map::Entry, BTreeSet};
+use std::mem::{replace, take};
+use tracing::debug;
+
+mod diagnostics;
+pub(crate) mod lifetimes;
+
+type Res = def::Res<NodeId>;
+
+type IdentMap<T> = FxHashMap<Ident, T>;
+
+/// Map from the name in a pattern to its binding mode.
+type BindingMap = IdentMap<BindingInfo>;
+
+use diagnostics::{
+ ElisionFnParameter, LifetimeElisionCandidate, MissingLifetime, MissingLifetimeKind,
+};
+
+#[derive(Copy, Clone, Debug)]
+struct BindingInfo {
+ span: Span,
+ binding_mode: BindingMode,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum PatternSource {
+ Match,
+ Let,
+ For,
+ FnParam,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+enum IsRepeatExpr {
+ No,
+ Yes,
+}
+
+impl PatternSource {
+ pub fn descr(self) -> &'static str {
+ match self {
+ PatternSource::Match => "match binding",
+ PatternSource::Let => "let binding",
+ PatternSource::For => "for binding",
+ PatternSource::FnParam => "function parameter",
+ }
+ }
+}
+
+/// Denotes whether the context for the set of already bound bindings is a `Product`
+/// or `Or` context. This is used in e.g., `fresh_binding` and `resolve_pattern_inner`.
+/// See those functions for more information.
+#[derive(PartialEq)]
+enum PatBoundCtx {
+ /// A product pattern context, e.g., `Variant(a, b)`.
+ Product,
+ /// An or-pattern context, e.g., `p_0 | ... | p_n`.
+ Or,
+}
+
+/// Does this the item (from the item rib scope) allow generic parameters?
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum HasGenericParams {
+ Yes,
+ No,
+}
+
+impl HasGenericParams {
+ fn force_yes_if(self, b: bool) -> Self {
+ if b { Self::Yes } else { self }
+ }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum ConstantItemKind {
+ Const,
+ Static,
+}
+
+/// The rib kind restricts certain accesses,
+/// e.g. to a `Res::Local` of an outer item.
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum RibKind<'a> {
+ /// No restriction needs to be applied.
+ NormalRibKind,
+
+ /// We passed through an impl or trait and are now in one of its
+ /// methods or associated types. Allow references to ty params that impl or trait
+ /// binds. Disallow any other upvars (including other ty params that are
+ /// upvars).
+ AssocItemRibKind,
+
+ /// We passed through a closure. Disallow labels.
+ ClosureOrAsyncRibKind,
+
+ /// We passed through a function definition. Disallow upvars.
+ /// Permit only those const parameters that are specified in the function's generics.
+ FnItemRibKind,
+
+ /// We passed through an item scope. Disallow upvars.
+ ItemRibKind(HasGenericParams),
+
+ /// We're in a constant item. Can't refer to dynamic stuff.
+ ///
+ /// The item may reference generic parameters in trivial constant expressions.
+ /// All other constants aren't allowed to use generic params at all.
+ ConstantItemRibKind(HasGenericParams, Option<(Ident, ConstantItemKind)>),
+
+ /// We passed through a module.
+ ModuleRibKind(Module<'a>),
+
+ /// We passed through a `macro_rules!` statement
+ MacroDefinition(DefId),
+
+ /// All bindings in this rib are generic parameters that can't be used
+ /// from the default of a generic parameter because they're not declared
+ /// before said generic parameter. Also see the `visit_generics` override.
+ ForwardGenericParamBanRibKind,
+
+ /// We are inside of the type of a const parameter. Can't refer to any
+ /// parameters.
+ ConstParamTyRibKind,
+
+ /// We are inside a `sym` inline assembly operand. Can only refer to
+ /// globals.
+ InlineAsmSymRibKind,
+}
+
+impl RibKind<'_> {
+ /// Whether this rib kind contains generic parameters, as opposed to local
+ /// variables.
+ pub(crate) fn contains_params(&self) -> bool {
+ match self {
+ NormalRibKind
+ | ClosureOrAsyncRibKind
+ | FnItemRibKind
+ | ConstantItemRibKind(..)
+ | ModuleRibKind(_)
+ | MacroDefinition(_)
+ | ConstParamTyRibKind
+ | InlineAsmSymRibKind => false,
+ AssocItemRibKind | ItemRibKind(_) | ForwardGenericParamBanRibKind => true,
+ }
+ }
+
+ /// This rib forbids referring to labels defined in upwards ribs.
+ fn is_label_barrier(self) -> bool {
+ match self {
+ NormalRibKind | MacroDefinition(..) => false,
+
+ AssocItemRibKind
+ | ClosureOrAsyncRibKind
+ | FnItemRibKind
+ | ItemRibKind(..)
+ | ConstantItemRibKind(..)
+ | ModuleRibKind(..)
+ | ForwardGenericParamBanRibKind
+ | ConstParamTyRibKind
+ | InlineAsmSymRibKind => true,
+ }
+ }
+}
+
+/// A single local scope.
+///
+/// A rib represents a scope names can live in. Note that these appear in many places, not just
+/// around braces. At any place where the list of accessible names (of the given namespace)
+/// changes or a new restrictions on the name accessibility are introduced, a new rib is put onto a
+/// stack. This may be, for example, a `let` statement (because it introduces variables), a macro,
+/// etc.
+///
+/// Different [rib kinds](enum@RibKind) are transparent for different names.
+///
+/// The resolution keeps a separate stack of ribs as it traverses the AST for each namespace. When
+/// resolving, the name is looked up from inside out.
+#[derive(Debug)]
+pub(crate) struct Rib<'a, R = Res> {
+ pub bindings: IdentMap<R>,
+ pub kind: RibKind<'a>,
+}
+
+impl<'a, R> Rib<'a, R> {
+ fn new(kind: RibKind<'a>) -> Rib<'a, R> {
+ Rib { bindings: Default::default(), kind }
+ }
+}
+
+#[derive(Clone, Copy, Debug)]
+enum LifetimeUseSet {
+ One { use_span: Span, use_ctxt: visit::LifetimeCtxt },
+ Many,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum LifetimeRibKind {
+ /// This rib acts as a barrier to forbid reference to lifetimes of a parent item.
+ Item,
+
+ /// This rib declares generic parameters.
+ Generics { binder: NodeId, span: Span, kind: LifetimeBinderKind },
+
+ /// FIXME(const_generics): This patches over an ICE caused by non-'static lifetimes in const
+ /// generics. We are disallowing this until we can decide on how we want to handle non-'static
+ /// lifetimes in const generics. See issue #74052 for discussion.
+ ConstGeneric,
+
+ /// Non-static lifetimes are prohibited in anonymous constants under `min_const_generics`.
+ /// This function will emit an error if `generic_const_exprs` is not enabled, the body identified by
+ /// `body_id` is an anonymous constant and `lifetime_ref` is non-static.
+ AnonConst,
+
+ /// Create a new anonymous lifetime parameter and reference it.
+ ///
+ /// If `report_in_path`, report an error when encountering lifetime elision in a path:
+ /// ```compile_fail
+ /// struct Foo<'a> { x: &'a () }
+ /// async fn foo(x: Foo) {}
+ /// ```
+ ///
+ /// Note: the error should not trigger when the elided lifetime is in a pattern or
+ /// expression-position path:
+ /// ```
+ /// struct Foo<'a> { x: &'a () }
+ /// async fn foo(Foo { x: _ }: Foo<'_>) {}
+ /// ```
+ AnonymousCreateParameter { binder: NodeId, report_in_path: bool },
+
+ /// Give a hard error when either `&` or `'_` is written. Used to
+ /// rule out things like `where T: Foo<'_>`. Does not imply an
+ /// error on default object bounds (e.g., `Box<dyn Foo>`).
+ AnonymousReportError,
+
+ /// Replace all anonymous lifetimes by provided lifetime.
+ Elided(LifetimeRes),
+
+ /// Signal we cannot find which should be the anonymous lifetime.
+ ElisionFailure,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum LifetimeBinderKind {
+ BareFnType,
+ PolyTrait,
+ WhereBound,
+ Item,
+ Function,
+ Closure,
+ ImplBlock,
+}
+
+impl LifetimeBinderKind {
+ fn descr(self) -> &'static str {
+ use LifetimeBinderKind::*;
+ match self {
+ BareFnType => "type",
+ PolyTrait => "bound",
+ WhereBound => "bound",
+ Item => "item",
+ ImplBlock => "impl block",
+ Function => "function",
+ Closure => "closure",
+ }
+ }
+}
+
+#[derive(Debug)]
+struct LifetimeRib {
+ kind: LifetimeRibKind,
+ // We need to preserve insertion order for async fns.
+ bindings: FxIndexMap<Ident, (NodeId, LifetimeRes)>,
+}
+
+impl LifetimeRib {
+ fn new(kind: LifetimeRibKind) -> LifetimeRib {
+ LifetimeRib { bindings: Default::default(), kind }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub(crate) enum AliasPossibility {
+ No,
+ Maybe,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub(crate) enum PathSource<'a> {
+ // Type paths `Path`.
+ Type,
+ // Trait paths in bounds or impls.
+ Trait(AliasPossibility),
+ // Expression paths `path`, with optional parent context.
+ Expr(Option<&'a Expr>),
+ // Paths in path patterns `Path`.
+ Pat,
+ // Paths in struct expressions and patterns `Path { .. }`.
+ Struct,
+ // Paths in tuple struct patterns `Path(..)`.
+ TupleStruct(Span, &'a [Span]),
+ // `m::A::B` in `<T as m::A>::B::C`.
+ TraitItem(Namespace),
+}
+
+impl<'a> PathSource<'a> {
+ fn namespace(self) -> Namespace {
+ match self {
+ PathSource::Type | PathSource::Trait(_) | PathSource::Struct => TypeNS,
+ PathSource::Expr(..) | PathSource::Pat | PathSource::TupleStruct(..) => ValueNS,
+ PathSource::TraitItem(ns) => ns,
+ }
+ }
+
+ fn defer_to_typeck(self) -> bool {
+ match self {
+ PathSource::Type
+ | PathSource::Expr(..)
+ | PathSource::Pat
+ | PathSource::Struct
+ | PathSource::TupleStruct(..) => true,
+ PathSource::Trait(_) | PathSource::TraitItem(..) => false,
+ }
+ }
+
+ fn descr_expected(self) -> &'static str {
+ match &self {
+ PathSource::Type => "type",
+ PathSource::Trait(_) => "trait",
+ PathSource::Pat => "unit struct, unit variant or constant",
+ PathSource::Struct => "struct, variant or union type",
+ PathSource::TupleStruct(..) => "tuple struct or tuple variant",
+ PathSource::TraitItem(ns) => match ns {
+ TypeNS => "associated type",
+ ValueNS => "method or associated constant",
+ MacroNS => bug!("associated macro"),
+ },
+ PathSource::Expr(parent) => match parent.as_ref().map(|p| &p.kind) {
+ // "function" here means "anything callable" rather than `DefKind::Fn`,
+ // this is not precise but usually more helpful than just "value".
+ Some(ExprKind::Call(call_expr, _)) => match &call_expr.kind {
+ // the case of `::some_crate()`
+ ExprKind::Path(_, path)
+ if path.segments.len() == 2
+ && path.segments[0].ident.name == kw::PathRoot =>
+ {
+ "external crate"
+ }
+ ExprKind::Path(_, path) => {
+ let mut msg = "function";
+ if let Some(segment) = path.segments.iter().last() {
+ if let Some(c) = segment.ident.to_string().chars().next() {
+ if c.is_uppercase() {
+ msg = "function, tuple struct or tuple variant";
+ }
+ }
+ }
+ msg
+ }
+ _ => "function",
+ },
+ _ => "value",
+ },
+ }
+ }
+
+ fn is_call(self) -> bool {
+ matches!(self, PathSource::Expr(Some(&Expr { kind: ExprKind::Call(..), .. })))
+ }
+
+ pub(crate) fn is_expected(self, res: Res) -> bool {
+ match self {
+ PathSource::Type => matches!(
+ res,
+ Res::Def(
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Trait
+ | DefKind::TraitAlias
+ | DefKind::TyAlias
+ | DefKind::AssocTy
+ | DefKind::TyParam
+ | DefKind::OpaqueTy
+ | DefKind::ForeignTy,
+ _,
+ ) | Res::PrimTy(..)
+ | Res::SelfTy { .. }
+ ),
+ PathSource::Trait(AliasPossibility::No) => matches!(res, Res::Def(DefKind::Trait, _)),
+ PathSource::Trait(AliasPossibility::Maybe) => {
+ matches!(res, Res::Def(DefKind::Trait | DefKind::TraitAlias, _))
+ }
+ PathSource::Expr(..) => matches!(
+ res,
+ Res::Def(
+ DefKind::Ctor(_, CtorKind::Const | CtorKind::Fn)
+ | DefKind::Const
+ | DefKind::Static(_)
+ | DefKind::Fn
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::ConstParam,
+ _,
+ ) | Res::Local(..)
+ | Res::SelfCtor(..)
+ ),
+ PathSource::Pat => {
+ res.expected_in_unit_struct_pat()
+ || matches!(res, Res::Def(DefKind::Const | DefKind::AssocConst, _))
+ }
+ PathSource::TupleStruct(..) => res.expected_in_tuple_struct_pat(),
+ PathSource::Struct => matches!(
+ res,
+ Res::Def(
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Variant
+ | DefKind::TyAlias
+ | DefKind::AssocTy,
+ _,
+ ) | Res::SelfTy { .. }
+ ),
+ PathSource::TraitItem(ns) => match res {
+ Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) if ns == ValueNS => true,
+ Res::Def(DefKind::AssocTy, _) if ns == TypeNS => true,
+ _ => false,
+ },
+ }
+ }
+
+ fn error_code(self, has_unexpected_resolution: bool) -> DiagnosticId {
+ use rustc_errors::error_code;
+ match (self, has_unexpected_resolution) {
+ (PathSource::Trait(_), true) => error_code!(E0404),
+ (PathSource::Trait(_), false) => error_code!(E0405),
+ (PathSource::Type, true) => error_code!(E0573),
+ (PathSource::Type, false) => error_code!(E0412),
+ (PathSource::Struct, true) => error_code!(E0574),
+ (PathSource::Struct, false) => error_code!(E0422),
+ (PathSource::Expr(..), true) => error_code!(E0423),
+ (PathSource::Expr(..), false) => error_code!(E0425),
+ (PathSource::Pat | PathSource::TupleStruct(..), true) => error_code!(E0532),
+ (PathSource::Pat | PathSource::TupleStruct(..), false) => error_code!(E0531),
+ (PathSource::TraitItem(..), true) => error_code!(E0575),
+ (PathSource::TraitItem(..), false) => error_code!(E0576),
+ }
+ }
+}
+
+#[derive(Default)]
+struct DiagnosticMetadata<'ast> {
+ /// The current trait's associated items' ident, used for diagnostic suggestions.
+ current_trait_assoc_items: Option<&'ast [P<AssocItem>]>,
+
+ /// The current self type if inside an impl (used for better errors).
+ current_self_type: Option<Ty>,
+
+ /// The current self item if inside an ADT (used for better errors).
+ current_self_item: Option<NodeId>,
+
+ /// The current trait (used to suggest).
+ current_item: Option<&'ast Item>,
+
+ /// When processing generics and encountering a type not found, suggest introducing a type
+ /// param.
+ currently_processing_generics: bool,
+
+ /// The current enclosing (non-closure) function (used for better errors).
+ current_function: Option<(FnKind<'ast>, Span)>,
+
+ /// A list of labels as of yet unused. Labels will be removed from this map when
+ /// they are used (in a `break` or `continue` statement)
+ unused_labels: FxHashMap<NodeId, Span>,
+
+ /// Only used for better errors on `fn(): fn()`.
+ current_type_ascription: Vec<Span>,
+
+ /// Only used for better errors on `let x = { foo: bar };`.
+ /// In the case of a parse error with `let x = { foo: bar, };`, this isn't needed, it's only
+ /// needed for cases where this parses as a correct type ascription.
+ current_block_could_be_bare_struct_literal: Option<Span>,
+
+ /// Only used for better errors on `let <pat>: <expr, not type>;`.
+ current_let_binding: Option<(Span, Option<Span>, Option<Span>)>,
+
+ /// Used to detect possible `if let` written without `let` and to provide structured suggestion.
+ in_if_condition: Option<&'ast Expr>,
+
+ /// If we are currently in a trait object definition. Used to point at the bounds when
+ /// encountering a struct or enum.
+ current_trait_object: Option<&'ast [ast::GenericBound]>,
+
+ /// Given `where <T as Bar>::Baz: String`, suggest `where T: Bar<Baz = String>`.
+ current_where_predicate: Option<&'ast WherePredicate>,
+
+ current_type_path: Option<&'ast Ty>,
+
+ /// The current impl items (used to suggest).
+ current_impl_items: Option<&'ast [P<AssocItem>]>,
+
+ /// When processing impl trait
+ currently_processing_impl_trait: Option<(TraitRef, Ty)>,
+
+ /// Accumulate the errors due to missed lifetime elision,
+ /// and report them all at once for each function.
+ current_elision_failures: Vec<MissingLifetime>,
+}
+
+struct LateResolutionVisitor<'a, 'b, 'ast> {
+ r: &'b mut Resolver<'a>,
+
+ /// The module that represents the current item scope.
+ parent_scope: ParentScope<'a>,
+
+ /// The current set of local scopes for types and values.
+ /// FIXME #4948: Reuse ribs to avoid allocation.
+ ribs: PerNS<Vec<Rib<'a>>>,
+
+ /// The current set of local scopes, for labels.
+ label_ribs: Vec<Rib<'a, NodeId>>,
+
+ /// The current set of local scopes for lifetimes.
+ lifetime_ribs: Vec<LifetimeRib>,
+
+ /// We are looking for lifetimes in an elision context.
+ /// The set contains all the resolutions that we encountered so far.
+ /// They will be used to determine the correct lifetime for the fn return type.
+ /// The `LifetimeElisionCandidate` is used for diagnostics, to suggest introducing named
+ /// lifetimes.
+ lifetime_elision_candidates: Option<FxIndexMap<LifetimeRes, LifetimeElisionCandidate>>,
+
+ /// The trait that the current context can refer to.
+ current_trait_ref: Option<(Module<'a>, TraitRef)>,
+
+ /// Fields used to add information to diagnostic errors.
+ diagnostic_metadata: Box<DiagnosticMetadata<'ast>>,
+
+ /// State used to know whether to ignore resolution errors for function bodies.
+ ///
+ /// In particular, rustdoc uses this to avoid giving errors for `cfg()` items.
+ /// In most cases this will be `None`, in which case errors will always be reported.
+ /// If it is `true`, then it will be updated when entering a nested function or trait body.
+ in_func_body: bool,
+
+ /// Count the number of places a lifetime is used.
+ lifetime_uses: FxHashMap<LocalDefId, LifetimeUseSet>,
+}
+
+/// Walks the whole crate in DFS order, visiting each item, resolving names as it goes.
+impl<'a: 'ast, 'ast> Visitor<'ast> for LateResolutionVisitor<'a, '_, 'ast> {
+ fn visit_attribute(&mut self, _: &'ast Attribute) {
+ // We do not want to resolve expressions that appear in attributes,
+ // as they do not correspond to actual code.
+ }
+ fn visit_item(&mut self, item: &'ast Item) {
+ let prev = replace(&mut self.diagnostic_metadata.current_item, Some(item));
+ // Always report errors in items we just entered.
+ let old_ignore = replace(&mut self.in_func_body, false);
+ self.with_lifetime_rib(LifetimeRibKind::Item, |this| this.resolve_item(item));
+ self.in_func_body = old_ignore;
+ self.diagnostic_metadata.current_item = prev;
+ }
+ fn visit_arm(&mut self, arm: &'ast Arm) {
+ self.resolve_arm(arm);
+ }
+ fn visit_block(&mut self, block: &'ast Block) {
+ self.resolve_block(block);
+ }
+ fn visit_anon_const(&mut self, constant: &'ast AnonConst) {
+ // We deal with repeat expressions explicitly in `resolve_expr`.
+ self.with_lifetime_rib(LifetimeRibKind::AnonConst, |this| {
+ this.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Static), |this| {
+ this.resolve_anon_const(constant, IsRepeatExpr::No);
+ })
+ })
+ }
+ fn visit_expr(&mut self, expr: &'ast Expr) {
+ self.resolve_expr(expr, None);
+ }
+ fn visit_local(&mut self, local: &'ast Local) {
+ let local_spans = match local.pat.kind {
+ // We check for this to avoid tuple struct fields.
+ PatKind::Wild => None,
+ _ => Some((
+ local.pat.span,
+ local.ty.as_ref().map(|ty| ty.span),
+ local.kind.init().map(|init| init.span),
+ )),
+ };
+ let original = replace(&mut self.diagnostic_metadata.current_let_binding, local_spans);
+ self.resolve_local(local);
+ self.diagnostic_metadata.current_let_binding = original;
+ }
+ fn visit_ty(&mut self, ty: &'ast Ty) {
+ let prev = self.diagnostic_metadata.current_trait_object;
+ let prev_ty = self.diagnostic_metadata.current_type_path;
+ match ty.kind {
+ TyKind::Rptr(None, _) => {
+ // Elided lifetime in reference: we resolve as if there was some lifetime `'_` with
+ // NodeId `ty.id`.
+ // This span will be used in case of elision failure.
+ let span = self.r.session.source_map().next_point(ty.span.shrink_to_lo());
+ self.resolve_elided_lifetime(ty.id, span);
+ visit::walk_ty(self, ty);
+ }
+ TyKind::Path(ref qself, ref path) => {
+ self.diagnostic_metadata.current_type_path = Some(ty);
+ self.smart_resolve_path(ty.id, qself.as_ref(), path, PathSource::Type);
+
+ // Check whether we should interpret this as a bare trait object.
+ if qself.is_none()
+ && let Some(partial_res) = self.r.partial_res_map.get(&ty.id)
+ && partial_res.unresolved_segments() == 0
+ && let Res::Def(DefKind::Trait | DefKind::TraitAlias, _) = partial_res.base_res()
+ {
+ // This path is actually a bare trait object. In case of a bare `Fn`-trait
+ // object with anonymous lifetimes, we need this rib to correctly place the
+ // synthetic lifetimes.
+ let span = ty.span.shrink_to_lo().to(path.span.shrink_to_lo());
+ self.with_generic_param_rib(
+ &[],
+ NormalRibKind,
+ LifetimeRibKind::Generics {
+ binder: ty.id,
+ kind: LifetimeBinderKind::PolyTrait,
+ span,
+ },
+ |this| this.visit_path(&path, ty.id),
+ );
+ } else {
+ visit::walk_ty(self, ty)
+ }
+ }
+ TyKind::ImplicitSelf => {
+ let self_ty = Ident::with_dummy_span(kw::SelfUpper);
+ let res = self
+ .resolve_ident_in_lexical_scope(
+ self_ty,
+ TypeNS,
+ Some(Finalize::new(ty.id, ty.span)),
+ None,
+ )
+ .map_or(Res::Err, |d| d.res());
+ self.r.record_partial_res(ty.id, PartialRes::new(res));
+ visit::walk_ty(self, ty)
+ }
+ TyKind::ImplTrait(..) => {
+ let candidates = self.lifetime_elision_candidates.take();
+ visit::walk_ty(self, ty);
+ self.lifetime_elision_candidates = candidates;
+ }
+ TyKind::TraitObject(ref bounds, ..) => {
+ self.diagnostic_metadata.current_trait_object = Some(&bounds[..]);
+ visit::walk_ty(self, ty)
+ }
+ TyKind::BareFn(ref bare_fn) => {
+ let span = ty.span.shrink_to_lo().to(bare_fn.decl_span.shrink_to_lo());
+ self.with_generic_param_rib(
+ &bare_fn.generic_params,
+ NormalRibKind,
+ LifetimeRibKind::Generics {
+ binder: ty.id,
+ kind: LifetimeBinderKind::BareFnType,
+ span,
+ },
+ |this| {
+ this.visit_generic_params(&bare_fn.generic_params, false);
+ this.with_lifetime_rib(
+ LifetimeRibKind::AnonymousCreateParameter {
+ binder: ty.id,
+ report_in_path: false,
+ },
+ |this| {
+ this.resolve_fn_signature(
+ ty.id,
+ false,
+ // We don't need to deal with patterns in parameters, because
+ // they are not possible for foreign or bodiless functions.
+ bare_fn
+ .decl
+ .inputs
+ .iter()
+ .map(|Param { ty, .. }| (None, &**ty)),
+ &bare_fn.decl.output,
+ )
+ },
+ );
+ },
+ )
+ }
+ _ => visit::walk_ty(self, ty),
+ }
+ self.diagnostic_metadata.current_trait_object = prev;
+ self.diagnostic_metadata.current_type_path = prev_ty;
+ }
+ fn visit_poly_trait_ref(&mut self, tref: &'ast PolyTraitRef, _: &'ast TraitBoundModifier) {
+ let span = tref.span.shrink_to_lo().to(tref.trait_ref.path.span.shrink_to_lo());
+ self.with_generic_param_rib(
+ &tref.bound_generic_params,
+ NormalRibKind,
+ LifetimeRibKind::Generics {
+ binder: tref.trait_ref.ref_id,
+ kind: LifetimeBinderKind::PolyTrait,
+ span,
+ },
+ |this| {
+ this.visit_generic_params(&tref.bound_generic_params, false);
+ this.smart_resolve_path(
+ tref.trait_ref.ref_id,
+ None,
+ &tref.trait_ref.path,
+ PathSource::Trait(AliasPossibility::Maybe),
+ );
+ this.visit_trait_ref(&tref.trait_ref);
+ },
+ );
+ }
+ fn visit_foreign_item(&mut self, foreign_item: &'ast ForeignItem) {
+ match foreign_item.kind {
+ ForeignItemKind::TyAlias(box TyAlias { ref generics, .. }) => {
+ self.with_lifetime_rib(LifetimeRibKind::Item, |this| {
+ this.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes),
+ LifetimeRibKind::Generics {
+ binder: foreign_item.id,
+ kind: LifetimeBinderKind::Item,
+ span: generics.span,
+ },
+ |this| visit::walk_foreign_item(this, foreign_item),
+ )
+ });
+ }
+ ForeignItemKind::Fn(box Fn { ref generics, .. }) => {
+ self.with_lifetime_rib(LifetimeRibKind::Item, |this| {
+ this.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes),
+ LifetimeRibKind::Generics {
+ binder: foreign_item.id,
+ kind: LifetimeBinderKind::Function,
+ span: generics.span,
+ },
+ |this| visit::walk_foreign_item(this, foreign_item),
+ )
+ });
+ }
+ ForeignItemKind::Static(..) => {
+ self.with_item_rib(|this| {
+ visit::walk_foreign_item(this, foreign_item);
+ });
+ }
+ ForeignItemKind::MacCall(..) => {
+ panic!("unexpanded macro in resolve!")
+ }
+ }
+ }
+ fn visit_fn(&mut self, fn_kind: FnKind<'ast>, sp: Span, fn_id: NodeId) {
+ let rib_kind = match fn_kind {
+ // Bail if the function is foreign, and thus cannot validly have
+ // a body, or if there's no body for some other reason.
+ FnKind::Fn(FnCtxt::Foreign, _, sig, _, generics, _)
+ | FnKind::Fn(_, _, sig, _, generics, None) => {
+ self.visit_fn_header(&sig.header);
+ self.visit_generics(generics);
+ self.with_lifetime_rib(
+ LifetimeRibKind::AnonymousCreateParameter {
+ binder: fn_id,
+ report_in_path: false,
+ },
+ |this| {
+ this.resolve_fn_signature(
+ fn_id,
+ sig.decl.has_self(),
+ sig.decl.inputs.iter().map(|Param { ty, .. }| (None, &**ty)),
+ &sig.decl.output,
+ )
+ },
+ );
+ return;
+ }
+ FnKind::Fn(FnCtxt::Free, ..) => FnItemRibKind,
+ FnKind::Fn(FnCtxt::Assoc(_), ..) => NormalRibKind,
+ FnKind::Closure(..) => ClosureOrAsyncRibKind,
+ };
+ let previous_value = self.diagnostic_metadata.current_function;
+ if matches!(fn_kind, FnKind::Fn(..)) {
+ self.diagnostic_metadata.current_function = Some((fn_kind, sp));
+ }
+ debug!("(resolving function) entering function");
+
+ // Create a value rib for the function.
+ self.with_rib(ValueNS, rib_kind, |this| {
+ // Create a label rib for the function.
+ this.with_label_rib(FnItemRibKind, |this| {
+ match fn_kind {
+ FnKind::Fn(_, _, sig, _, generics, body) => {
+ this.visit_generics(generics);
+
+ let declaration = &sig.decl;
+ let async_node_id = sig.header.asyncness.opt_return_id();
+
+ this.with_lifetime_rib(
+ LifetimeRibKind::AnonymousCreateParameter {
+ binder: fn_id,
+ report_in_path: async_node_id.is_some(),
+ },
+ |this| {
+ this.resolve_fn_signature(
+ fn_id,
+ declaration.has_self(),
+ declaration
+ .inputs
+ .iter()
+ .map(|Param { pat, ty, .. }| (Some(&**pat), &**ty)),
+ &declaration.output,
+ )
+ },
+ );
+
+ // Construct the list of in-scope lifetime parameters for async lowering.
+ // We include all lifetime parameters, either named or "Fresh".
+ // The order of those parameters does not matter, as long as it is
+ // deterministic.
+ if let Some(async_node_id) = async_node_id {
+ let mut extra_lifetime_params = this
+ .r
+ .extra_lifetime_params_map
+ .get(&fn_id)
+ .cloned()
+ .unwrap_or_default();
+ for rib in this.lifetime_ribs.iter().rev() {
+ extra_lifetime_params.extend(
+ rib.bindings
+ .iter()
+ .map(|(&ident, &(node_id, res))| (ident, node_id, res)),
+ );
+ match rib.kind {
+ LifetimeRibKind::Item => break,
+ LifetimeRibKind::AnonymousCreateParameter {
+ binder, ..
+ } => {
+ if let Some(earlier_fresh) =
+ this.r.extra_lifetime_params_map.get(&binder)
+ {
+ extra_lifetime_params.extend(earlier_fresh);
+ }
+ }
+ _ => {}
+ }
+ }
+ this.r
+ .extra_lifetime_params_map
+ .insert(async_node_id, extra_lifetime_params);
+ }
+
+ if let Some(body) = body {
+ // Ignore errors in function bodies if this is rustdoc
+ // Be sure not to set this until the function signature has been resolved.
+ let previous_state = replace(&mut this.in_func_body, true);
+ // Resolve the function body, potentially inside the body of an async closure
+ this.with_lifetime_rib(
+ LifetimeRibKind::Elided(LifetimeRes::Infer),
+ |this| this.visit_block(body),
+ );
+
+ debug!("(resolving function) leaving function");
+ this.in_func_body = previous_state;
+ }
+ }
+ FnKind::Closure(binder, declaration, body) => {
+ this.visit_closure_binder(binder);
+
+ this.with_lifetime_rib(
+ match binder {
+ // We do not have any explicit generic lifetime parameter.
+ ClosureBinder::NotPresent => {
+ LifetimeRibKind::AnonymousCreateParameter {
+ binder: fn_id,
+ report_in_path: false,
+ }
+ }
+ ClosureBinder::For { .. } => LifetimeRibKind::AnonymousReportError,
+ },
+ // Add each argument to the rib.
+ |this| this.resolve_params(&declaration.inputs),
+ );
+ this.with_lifetime_rib(
+ match binder {
+ ClosureBinder::NotPresent => {
+ LifetimeRibKind::Elided(LifetimeRes::Infer)
+ }
+ ClosureBinder::For { .. } => LifetimeRibKind::AnonymousReportError,
+ },
+ |this| visit::walk_fn_ret_ty(this, &declaration.output),
+ );
+
+ // Ignore errors in function bodies if this is rustdoc
+ // Be sure not to set this until the function signature has been resolved.
+ let previous_state = replace(&mut this.in_func_body, true);
+ // Resolve the function body, potentially inside the body of an async closure
+ this.with_lifetime_rib(
+ LifetimeRibKind::Elided(LifetimeRes::Infer),
+ |this| this.visit_expr(body),
+ );
+
+ debug!("(resolving function) leaving function");
+ this.in_func_body = previous_state;
+ }
+ }
+ })
+ });
+ self.diagnostic_metadata.current_function = previous_value;
+ }
+ fn visit_lifetime(&mut self, lifetime: &'ast Lifetime, use_ctxt: visit::LifetimeCtxt) {
+ self.resolve_lifetime(lifetime, use_ctxt)
+ }
+
+ fn visit_generics(&mut self, generics: &'ast Generics) {
+ self.visit_generic_params(
+ &generics.params,
+ self.diagnostic_metadata.current_self_item.is_some(),
+ );
+ for p in &generics.where_clause.predicates {
+ self.visit_where_predicate(p);
+ }
+ }
+
+ fn visit_closure_binder(&mut self, b: &'ast ClosureBinder) {
+ match b {
+ ClosureBinder::NotPresent => {}
+ ClosureBinder::For { generic_params, .. } => {
+ self.visit_generic_params(
+ &generic_params,
+ self.diagnostic_metadata.current_self_item.is_some(),
+ );
+ }
+ }
+ }
+
+ fn visit_generic_arg(&mut self, arg: &'ast GenericArg) {
+ debug!("visit_generic_arg({:?})", arg);
+ let prev = replace(&mut self.diagnostic_metadata.currently_processing_generics, true);
+ match arg {
+ GenericArg::Type(ref ty) => {
+ // We parse const arguments as path types as we cannot distinguish them during
+ // parsing. We try to resolve that ambiguity by attempting resolution the type
+ // namespace first, and if that fails we try again in the value namespace. If
+ // resolution in the value namespace succeeds, we have an generic const argument on
+ // our hands.
+ if let TyKind::Path(ref qself, ref path) = ty.kind {
+ // We cannot disambiguate multi-segment paths right now as that requires type
+ // checking.
+ if path.segments.len() == 1 && path.segments[0].args.is_none() {
+ let mut check_ns = |ns| {
+ self.maybe_resolve_ident_in_lexical_scope(path.segments[0].ident, ns)
+ .is_some()
+ };
+ if !check_ns(TypeNS) && check_ns(ValueNS) {
+ // This must be equivalent to `visit_anon_const`, but we cannot call it
+ // directly due to visitor lifetimes so we have to copy-paste some code.
+ //
+ // Note that we might not be inside of an repeat expression here,
+ // but considering that `IsRepeatExpr` is only relevant for
+ // non-trivial constants this is doesn't matter.
+ self.with_constant_rib(
+ IsRepeatExpr::No,
+ HasGenericParams::Yes,
+ None,
+ |this| {
+ this.smart_resolve_path(
+ ty.id,
+ qself.as_ref(),
+ path,
+ PathSource::Expr(None),
+ );
+
+ if let Some(ref qself) = *qself {
+ this.visit_ty(&qself.ty);
+ }
+ this.visit_path(path, ty.id);
+ },
+ );
+
+ self.diagnostic_metadata.currently_processing_generics = prev;
+ return;
+ }
+ }
+ }
+
+ self.visit_ty(ty);
+ }
+ GenericArg::Lifetime(lt) => self.visit_lifetime(lt, visit::LifetimeCtxt::GenericArg),
+ GenericArg::Const(ct) => self.visit_anon_const(ct),
+ }
+ self.diagnostic_metadata.currently_processing_generics = prev;
+ }
+
+ fn visit_assoc_constraint(&mut self, constraint: &'ast AssocConstraint) {
+ self.visit_ident(constraint.ident);
+ if let Some(ref gen_args) = constraint.gen_args {
+ // Forbid anonymous lifetimes in GAT parameters until proper semantics are decided.
+ self.with_lifetime_rib(LifetimeRibKind::AnonymousReportError, |this| {
+ this.visit_generic_args(gen_args.span(), gen_args)
+ });
+ }
+ match constraint.kind {
+ AssocConstraintKind::Equality { ref term } => match term {
+ Term::Ty(ty) => self.visit_ty(ty),
+ Term::Const(c) => self.visit_anon_const(c),
+ },
+ AssocConstraintKind::Bound { ref bounds } => {
+ walk_list!(self, visit_param_bound, bounds, BoundKind::Bound);
+ }
+ }
+ }
+
+ fn visit_path_segment(&mut self, path_span: Span, path_segment: &'ast PathSegment) {
+ if let Some(ref args) = path_segment.args {
+ match &**args {
+ GenericArgs::AngleBracketed(..) => visit::walk_generic_args(self, path_span, args),
+ GenericArgs::Parenthesized(p_args) => {
+ // Probe the lifetime ribs to know how to behave.
+ for rib in self.lifetime_ribs.iter().rev() {
+ match rib.kind {
+ // We are inside a `PolyTraitRef`. The lifetimes are
+ // to be intoduced in that (maybe implicit) `for<>` binder.
+ LifetimeRibKind::Generics {
+ binder,
+ kind: LifetimeBinderKind::PolyTrait,
+ ..
+ } => {
+ self.with_lifetime_rib(
+ LifetimeRibKind::AnonymousCreateParameter {
+ binder,
+ report_in_path: false,
+ },
+ |this| {
+ this.resolve_fn_signature(
+ binder,
+ false,
+ p_args.inputs.iter().map(|ty| (None, &**ty)),
+ &p_args.output,
+ )
+ },
+ );
+ break;
+ }
+ // We have nowhere to introduce generics. Code is malformed,
+ // so use regular lifetime resolution to avoid spurious errors.
+ LifetimeRibKind::Item | LifetimeRibKind::Generics { .. } => {
+ visit::walk_generic_args(self, path_span, args);
+ break;
+ }
+ LifetimeRibKind::AnonymousCreateParameter { .. }
+ | LifetimeRibKind::AnonymousReportError
+ | LifetimeRibKind::Elided(_)
+ | LifetimeRibKind::ElisionFailure
+ | LifetimeRibKind::AnonConst
+ | LifetimeRibKind::ConstGeneric => {}
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn visit_where_predicate(&mut self, p: &'ast WherePredicate) {
+ debug!("visit_where_predicate {:?}", p);
+ let previous_value =
+ replace(&mut self.diagnostic_metadata.current_where_predicate, Some(p));
+ self.with_lifetime_rib(LifetimeRibKind::AnonymousReportError, |this| {
+ if let WherePredicate::BoundPredicate(WhereBoundPredicate {
+ ref bounded_ty,
+ ref bounds,
+ ref bound_generic_params,
+ span: predicate_span,
+ ..
+ }) = p
+ {
+ let span = predicate_span.shrink_to_lo().to(bounded_ty.span.shrink_to_lo());
+ this.with_generic_param_rib(
+ &bound_generic_params,
+ NormalRibKind,
+ LifetimeRibKind::Generics {
+ binder: bounded_ty.id,
+ kind: LifetimeBinderKind::WhereBound,
+ span,
+ },
+ |this| {
+ this.visit_generic_params(&bound_generic_params, false);
+ this.visit_ty(bounded_ty);
+ for bound in bounds {
+ this.visit_param_bound(bound, BoundKind::Bound)
+ }
+ },
+ );
+ } else {
+ visit::walk_where_predicate(this, p);
+ }
+ });
+ self.diagnostic_metadata.current_where_predicate = previous_value;
+ }
+
+ fn visit_inline_asm(&mut self, asm: &'ast InlineAsm) {
+ for (op, _) in &asm.operands {
+ match op {
+ InlineAsmOperand::In { expr, .. }
+ | InlineAsmOperand::Out { expr: Some(expr), .. }
+ | InlineAsmOperand::InOut { expr, .. } => self.visit_expr(expr),
+ InlineAsmOperand::Out { expr: None, .. } => {}
+ InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ self.visit_expr(in_expr);
+ if let Some(out_expr) = out_expr {
+ self.visit_expr(out_expr);
+ }
+ }
+ InlineAsmOperand::Const { anon_const, .. } => {
+ // Although this is `DefKind::AnonConst`, it is allowed to reference outer
+ // generic parameters like an inline const.
+ self.resolve_inline_const(anon_const);
+ }
+ InlineAsmOperand::Sym { sym } => self.visit_inline_asm_sym(sym),
+ }
+ }
+ }
+
+ fn visit_inline_asm_sym(&mut self, sym: &'ast InlineAsmSym) {
+ // This is similar to the code for AnonConst.
+ self.with_rib(ValueNS, InlineAsmSymRibKind, |this| {
+ this.with_rib(TypeNS, InlineAsmSymRibKind, |this| {
+ this.with_label_rib(InlineAsmSymRibKind, |this| {
+ this.smart_resolve_path(
+ sym.id,
+ sym.qself.as_ref(),
+ &sym.path,
+ PathSource::Expr(None),
+ );
+ visit::walk_inline_asm_sym(this, sym);
+ });
+ })
+ });
+ }
+}
+
+impl<'a: 'ast, 'b, 'ast> LateResolutionVisitor<'a, 'b, 'ast> {
+ fn new(resolver: &'b mut Resolver<'a>) -> LateResolutionVisitor<'a, 'b, 'ast> {
+ // During late resolution we only track the module component of the parent scope,
+ // although it may be useful to track other components as well for diagnostics.
+ let graph_root = resolver.graph_root;
+ let parent_scope = ParentScope::module(graph_root, resolver);
+ let start_rib_kind = ModuleRibKind(graph_root);
+ LateResolutionVisitor {
+ r: resolver,
+ parent_scope,
+ ribs: PerNS {
+ value_ns: vec![Rib::new(start_rib_kind)],
+ type_ns: vec![Rib::new(start_rib_kind)],
+ macro_ns: vec![Rib::new(start_rib_kind)],
+ },
+ label_ribs: Vec::new(),
+ lifetime_ribs: Vec::new(),
+ lifetime_elision_candidates: None,
+ current_trait_ref: None,
+ diagnostic_metadata: Box::new(DiagnosticMetadata::default()),
+ // errors at module scope should always be reported
+ in_func_body: false,
+ lifetime_uses: Default::default(),
+ }
+ }
+
+ fn maybe_resolve_ident_in_lexical_scope(
+ &mut self,
+ ident: Ident,
+ ns: Namespace,
+ ) -> Option<LexicalScopeBinding<'a>> {
+ self.r.resolve_ident_in_lexical_scope(
+ ident,
+ ns,
+ &self.parent_scope,
+ None,
+ &self.ribs[ns],
+ None,
+ )
+ }
+
+ fn resolve_ident_in_lexical_scope(
+ &mut self,
+ ident: Ident,
+ ns: Namespace,
+ finalize: Option<Finalize>,
+ ignore_binding: Option<&'a NameBinding<'a>>,
+ ) -> Option<LexicalScopeBinding<'a>> {
+ self.r.resolve_ident_in_lexical_scope(
+ ident,
+ ns,
+ &self.parent_scope,
+ finalize,
+ &self.ribs[ns],
+ ignore_binding,
+ )
+ }
+
+ fn resolve_path(
+ &mut self,
+ path: &[Segment],
+ opt_ns: Option<Namespace>, // `None` indicates a module path in import
+ finalize: Option<Finalize>,
+ ) -> PathResult<'a> {
+ self.r.resolve_path_with_ribs(
+ path,
+ opt_ns,
+ &self.parent_scope,
+ finalize,
+ Some(&self.ribs),
+ None,
+ )
+ }
+
+ // AST resolution
+ //
+ // We maintain a list of value ribs and type ribs.
+ //
+ // Simultaneously, we keep track of the current position in the module
+ // graph in the `parent_scope.module` pointer. When we go to resolve a name in
+ // the value or type namespaces, we first look through all the ribs and
+ // then query the module graph. When we resolve a name in the module
+ // namespace, we can skip all the ribs (since nested modules are not
+ // allowed within blocks in Rust) and jump straight to the current module
+ // graph node.
+ //
+ // Named implementations are handled separately. When we find a method
+ // call, we consult the module node to find all of the implementations in
+ // scope. This information is lazily cached in the module node. We then
+ // generate a fake "implementation scope" containing all the
+ // implementations thus found, for compatibility with old resolve pass.
+
+ /// Do some `work` within a new innermost rib of the given `kind` in the given namespace (`ns`).
+ fn with_rib<T>(
+ &mut self,
+ ns: Namespace,
+ kind: RibKind<'a>,
+ work: impl FnOnce(&mut Self) -> T,
+ ) -> T {
+ self.ribs[ns].push(Rib::new(kind));
+ let ret = work(self);
+ self.ribs[ns].pop();
+ ret
+ }
+
+ fn with_scope<T>(&mut self, id: NodeId, f: impl FnOnce(&mut Self) -> T) -> T {
+ if let Some(module) = self.r.get_module(self.r.local_def_id(id).to_def_id()) {
+ // Move down in the graph.
+ let orig_module = replace(&mut self.parent_scope.module, module);
+ self.with_rib(ValueNS, ModuleRibKind(module), |this| {
+ this.with_rib(TypeNS, ModuleRibKind(module), |this| {
+ let ret = f(this);
+ this.parent_scope.module = orig_module;
+ ret
+ })
+ })
+ } else {
+ f(self)
+ }
+ }
+
+ fn visit_generic_params(&mut self, params: &'ast [GenericParam], add_self_upper: bool) {
+ // For type parameter defaults, we have to ban access
+ // to following type parameters, as the InternalSubsts can only
+ // provide previous type parameters as they're built. We
+ // put all the parameters on the ban list and then remove
+ // them one by one as they are processed and become available.
+ let mut forward_ty_ban_rib = Rib::new(ForwardGenericParamBanRibKind);
+ let mut forward_const_ban_rib = Rib::new(ForwardGenericParamBanRibKind);
+ for param in params.iter() {
+ match param.kind {
+ GenericParamKind::Type { .. } => {
+ forward_ty_ban_rib
+ .bindings
+ .insert(Ident::with_dummy_span(param.ident.name), Res::Err);
+ }
+ GenericParamKind::Const { .. } => {
+ forward_const_ban_rib
+ .bindings
+ .insert(Ident::with_dummy_span(param.ident.name), Res::Err);
+ }
+ GenericParamKind::Lifetime => {}
+ }
+ }
+
+ // rust-lang/rust#61631: The type `Self` is essentially
+ // another type parameter. For ADTs, we consider it
+ // well-defined only after all of the ADT type parameters have
+ // been provided. Therefore, we do not allow use of `Self`
+ // anywhere in ADT type parameter defaults.
+ //
+ // (We however cannot ban `Self` for defaults on *all* generic
+ // lists; e.g. trait generics can usefully refer to `Self`,
+ // such as in the case of `trait Add<Rhs = Self>`.)
+ if add_self_upper {
+ // (`Some` if + only if we are in ADT's generics.)
+ forward_ty_ban_rib.bindings.insert(Ident::with_dummy_span(kw::SelfUpper), Res::Err);
+ }
+
+ self.with_lifetime_rib(LifetimeRibKind::AnonymousReportError, |this| {
+ for param in params {
+ match param.kind {
+ GenericParamKind::Lifetime => {
+ for bound in &param.bounds {
+ this.visit_param_bound(bound, BoundKind::Bound);
+ }
+ }
+ GenericParamKind::Type { ref default } => {
+ for bound in &param.bounds {
+ this.visit_param_bound(bound, BoundKind::Bound);
+ }
+
+ if let Some(ref ty) = default {
+ this.ribs[TypeNS].push(forward_ty_ban_rib);
+ this.ribs[ValueNS].push(forward_const_ban_rib);
+ this.visit_ty(ty);
+ forward_const_ban_rib = this.ribs[ValueNS].pop().unwrap();
+ forward_ty_ban_rib = this.ribs[TypeNS].pop().unwrap();
+ }
+
+ // Allow all following defaults to refer to this type parameter.
+ forward_ty_ban_rib
+ .bindings
+ .remove(&Ident::with_dummy_span(param.ident.name));
+ }
+ GenericParamKind::Const { ref ty, kw_span: _, ref default } => {
+ // Const parameters can't have param bounds.
+ assert!(param.bounds.is_empty());
+
+ this.ribs[TypeNS].push(Rib::new(ConstParamTyRibKind));
+ this.ribs[ValueNS].push(Rib::new(ConstParamTyRibKind));
+ this.with_lifetime_rib(LifetimeRibKind::ConstGeneric, |this| {
+ this.visit_ty(ty)
+ });
+ this.ribs[TypeNS].pop().unwrap();
+ this.ribs[ValueNS].pop().unwrap();
+
+ if let Some(ref expr) = default {
+ this.ribs[TypeNS].push(forward_ty_ban_rib);
+ this.ribs[ValueNS].push(forward_const_ban_rib);
+ this.with_lifetime_rib(LifetimeRibKind::ConstGeneric, |this| {
+ this.resolve_anon_const(expr, IsRepeatExpr::No)
+ });
+ forward_const_ban_rib = this.ribs[ValueNS].pop().unwrap();
+ forward_ty_ban_rib = this.ribs[TypeNS].pop().unwrap();
+ }
+
+ // Allow all following defaults to refer to this const parameter.
+ forward_const_ban_rib
+ .bindings
+ .remove(&Ident::with_dummy_span(param.ident.name));
+ }
+ }
+ }
+ })
+ }
+
+ #[tracing::instrument(level = "debug", skip(self, work))]
+ fn with_lifetime_rib<T>(
+ &mut self,
+ kind: LifetimeRibKind,
+ work: impl FnOnce(&mut Self) -> T,
+ ) -> T {
+ self.lifetime_ribs.push(LifetimeRib::new(kind));
+ let outer_elision_candidates = self.lifetime_elision_candidates.take();
+ let ret = work(self);
+ self.lifetime_elision_candidates = outer_elision_candidates;
+ self.lifetime_ribs.pop();
+ ret
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn resolve_lifetime(&mut self, lifetime: &'ast Lifetime, use_ctxt: visit::LifetimeCtxt) {
+ let ident = lifetime.ident;
+
+ if ident.name == kw::StaticLifetime {
+ self.record_lifetime_res(
+ lifetime.id,
+ LifetimeRes::Static,
+ LifetimeElisionCandidate::Named,
+ );
+ return;
+ }
+
+ if ident.name == kw::UnderscoreLifetime {
+ return self.resolve_anonymous_lifetime(lifetime, false);
+ }
+
+ let mut indices = (0..self.lifetime_ribs.len()).rev();
+ for i in &mut indices {
+ let rib = &self.lifetime_ribs[i];
+ let normalized_ident = ident.normalize_to_macros_2_0();
+ if let Some(&(_, res)) = rib.bindings.get(&normalized_ident) {
+ self.record_lifetime_res(lifetime.id, res, LifetimeElisionCandidate::Named);
+
+ if let LifetimeRes::Param { param, .. } = res {
+ match self.lifetime_uses.entry(param) {
+ Entry::Vacant(v) => {
+ debug!("First use of {:?} at {:?}", res, ident.span);
+ let use_set = self
+ .lifetime_ribs
+ .iter()
+ .rev()
+ .find_map(|rib| match rib.kind {
+ // Do not suggest eliding a lifetime where an anonymous
+ // lifetime would be illegal.
+ LifetimeRibKind::Item
+ | LifetimeRibKind::AnonymousReportError
+ | LifetimeRibKind::ElisionFailure => Some(LifetimeUseSet::Many),
+ // An anonymous lifetime is legal here, go ahead.
+ LifetimeRibKind::AnonymousCreateParameter { .. } => {
+ Some(LifetimeUseSet::One { use_span: ident.span, use_ctxt })
+ }
+ // Only report if eliding the lifetime would have the same
+ // semantics.
+ LifetimeRibKind::Elided(r) => Some(if res == r {
+ LifetimeUseSet::One { use_span: ident.span, use_ctxt }
+ } else {
+ LifetimeUseSet::Many
+ }),
+ LifetimeRibKind::Generics { .. }
+ | LifetimeRibKind::ConstGeneric
+ | LifetimeRibKind::AnonConst => None,
+ })
+ .unwrap_or(LifetimeUseSet::Many);
+ debug!(?use_ctxt, ?use_set);
+ v.insert(use_set);
+ }
+ Entry::Occupied(mut o) => {
+ debug!("Many uses of {:?} at {:?}", res, ident.span);
+ *o.get_mut() = LifetimeUseSet::Many;
+ }
+ }
+ }
+ return;
+ }
+
+ match rib.kind {
+ LifetimeRibKind::Item => break,
+ LifetimeRibKind::ConstGeneric => {
+ self.emit_non_static_lt_in_const_generic_error(lifetime);
+ self.record_lifetime_res(
+ lifetime.id,
+ LifetimeRes::Error,
+ LifetimeElisionCandidate::Ignore,
+ );
+ return;
+ }
+ LifetimeRibKind::AnonConst => {
+ self.maybe_emit_forbidden_non_static_lifetime_error(lifetime);
+ self.record_lifetime_res(
+ lifetime.id,
+ LifetimeRes::Error,
+ LifetimeElisionCandidate::Ignore,
+ );
+ return;
+ }
+ _ => {}
+ }
+ }
+
+ let mut outer_res = None;
+ for i in indices {
+ let rib = &self.lifetime_ribs[i];
+ let normalized_ident = ident.normalize_to_macros_2_0();
+ if let Some((&outer, _)) = rib.bindings.get_key_value(&normalized_ident) {
+ outer_res = Some(outer);
+ break;
+ }
+ }
+
+ self.emit_undeclared_lifetime_error(lifetime, outer_res);
+ self.record_lifetime_res(lifetime.id, LifetimeRes::Error, LifetimeElisionCandidate::Named);
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn resolve_anonymous_lifetime(&mut self, lifetime: &Lifetime, elided: bool) {
+ debug_assert_eq!(lifetime.ident.name, kw::UnderscoreLifetime);
+
+ let missing_lifetime = MissingLifetime {
+ id: lifetime.id,
+ span: lifetime.ident.span,
+ kind: if elided {
+ MissingLifetimeKind::Ampersand
+ } else {
+ MissingLifetimeKind::Underscore
+ },
+ count: 1,
+ };
+ let elision_candidate = LifetimeElisionCandidate::Missing(missing_lifetime);
+ for i in (0..self.lifetime_ribs.len()).rev() {
+ let rib = &mut self.lifetime_ribs[i];
+ debug!(?rib.kind);
+ match rib.kind {
+ LifetimeRibKind::AnonymousCreateParameter { binder, .. } => {
+ let res = self.create_fresh_lifetime(lifetime.id, lifetime.ident, binder);
+ self.record_lifetime_res(lifetime.id, res, elision_candidate);
+ return;
+ }
+ LifetimeRibKind::AnonymousReportError => {
+ let (msg, note) = if elided {
+ (
+ "`&` without an explicit lifetime name cannot be used here",
+ "explicit lifetime name needed here",
+ )
+ } else {
+ ("`'_` cannot be used here", "`'_` is a reserved lifetime name")
+ };
+ rustc_errors::struct_span_err!(
+ self.r.session,
+ lifetime.ident.span,
+ E0637,
+ "{}",
+ msg,
+ )
+ .span_label(lifetime.ident.span, note)
+ .emit();
+
+ self.record_lifetime_res(lifetime.id, LifetimeRes::Error, elision_candidate);
+ return;
+ }
+ LifetimeRibKind::Elided(res) => {
+ self.record_lifetime_res(lifetime.id, res, elision_candidate);
+ return;
+ }
+ LifetimeRibKind::ElisionFailure => {
+ self.diagnostic_metadata.current_elision_failures.push(missing_lifetime);
+ self.record_lifetime_res(lifetime.id, LifetimeRes::Error, elision_candidate);
+ return;
+ }
+ LifetimeRibKind::Item => break,
+ LifetimeRibKind::Generics { .. }
+ | LifetimeRibKind::ConstGeneric
+ | LifetimeRibKind::AnonConst => {}
+ }
+ }
+ self.record_lifetime_res(lifetime.id, LifetimeRes::Error, elision_candidate);
+ self.report_missing_lifetime_specifiers(vec![missing_lifetime], None);
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn resolve_elided_lifetime(&mut self, anchor_id: NodeId, span: Span) {
+ let id = self.r.next_node_id();
+ let lt = Lifetime { id, ident: Ident::new(kw::UnderscoreLifetime, span) };
+
+ self.record_lifetime_res(
+ anchor_id,
+ LifetimeRes::ElidedAnchor { start: id, end: NodeId::from_u32(id.as_u32() + 1) },
+ LifetimeElisionCandidate::Ignore,
+ );
+ self.resolve_anonymous_lifetime(&lt, true);
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn create_fresh_lifetime(&mut self, id: NodeId, ident: Ident, binder: NodeId) -> LifetimeRes {
+ debug_assert_eq!(ident.name, kw::UnderscoreLifetime);
+ debug!(?ident.span);
+
+ // Leave the responsibility to create the `LocalDefId` to lowering.
+ let param = self.r.next_node_id();
+ let res = LifetimeRes::Fresh { param, binder };
+
+ // Record the created lifetime parameter so lowering can pick it up and add it to HIR.
+ self.r
+ .extra_lifetime_params_map
+ .entry(binder)
+ .or_insert_with(Vec::new)
+ .push((ident, param, res));
+ res
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn resolve_elided_lifetimes_in_path(
+ &mut self,
+ path_id: NodeId,
+ partial_res: PartialRes,
+ path: &[Segment],
+ source: PathSource<'_>,
+ path_span: Span,
+ ) {
+ let proj_start = path.len() - partial_res.unresolved_segments();
+ for (i, segment) in path.iter().enumerate() {
+ if segment.has_lifetime_args {
+ continue;
+ }
+ let Some(segment_id) = segment.id else {
+ continue;
+ };
+
+ // Figure out if this is a type/trait segment,
+ // which may need lifetime elision performed.
+ let type_def_id = match partial_res.base_res() {
+ Res::Def(DefKind::AssocTy, def_id) if i + 2 == proj_start => self.r.parent(def_id),
+ Res::Def(DefKind::Variant, def_id) if i + 1 == proj_start => self.r.parent(def_id),
+ Res::Def(DefKind::Struct, def_id)
+ | Res::Def(DefKind::Union, def_id)
+ | Res::Def(DefKind::Enum, def_id)
+ | Res::Def(DefKind::TyAlias, def_id)
+ | Res::Def(DefKind::Trait, def_id)
+ if i + 1 == proj_start =>
+ {
+ def_id
+ }
+ _ => continue,
+ };
+
+ let expected_lifetimes = self.r.item_generics_num_lifetimes(type_def_id);
+ if expected_lifetimes == 0 {
+ continue;
+ }
+
+ let node_ids = self.r.next_node_ids(expected_lifetimes);
+ self.record_lifetime_res(
+ segment_id,
+ LifetimeRes::ElidedAnchor { start: node_ids.start, end: node_ids.end },
+ LifetimeElisionCandidate::Ignore,
+ );
+
+ let inferred = match source {
+ PathSource::Trait(..) | PathSource::TraitItem(..) | PathSource::Type => false,
+ PathSource::Expr(..)
+ | PathSource::Pat
+ | PathSource::Struct
+ | PathSource::TupleStruct(..) => true,
+ };
+ if inferred {
+ // Do not create a parameter for patterns and expressions: type checking can infer
+ // the appropriate lifetime for us.
+ for id in node_ids {
+ self.record_lifetime_res(
+ id,
+ LifetimeRes::Infer,
+ LifetimeElisionCandidate::Named,
+ );
+ }
+ continue;
+ }
+
+ let elided_lifetime_span = if segment.has_generic_args {
+ // If there are brackets, but not generic arguments, then use the opening bracket
+ segment.args_span.with_hi(segment.args_span.lo() + BytePos(1))
+ } else {
+ // If there are no brackets, use the identifier span.
+ // HACK: we use find_ancestor_inside to properly suggest elided spans in paths
+ // originating from macros, since the segment's span might be from a macro arg.
+ segment.ident.span.find_ancestor_inside(path_span).unwrap_or(path_span)
+ };
+ let ident = Ident::new(kw::UnderscoreLifetime, elided_lifetime_span);
+
+ let missing_lifetime = MissingLifetime {
+ id: node_ids.start,
+ span: elided_lifetime_span,
+ kind: if segment.has_generic_args {
+ MissingLifetimeKind::Comma
+ } else {
+ MissingLifetimeKind::Brackets
+ },
+ count: expected_lifetimes,
+ };
+ let mut should_lint = true;
+ for rib in self.lifetime_ribs.iter().rev() {
+ match rib.kind {
+ // In create-parameter mode we error here because we don't want to support
+ // deprecated impl elision in new features like impl elision and `async fn`,
+ // both of which work using the `CreateParameter` mode:
+ //
+ // impl Foo for std::cell::Ref<u32> // note lack of '_
+ // async fn foo(_: std::cell::Ref<u32>) { ... }
+ LifetimeRibKind::AnonymousCreateParameter { report_in_path: true, .. } => {
+ let sess = self.r.session;
+ let mut err = rustc_errors::struct_span_err!(
+ sess,
+ path_span,
+ E0726,
+ "implicit elided lifetime not allowed here"
+ );
+ rustc_errors::add_elided_lifetime_in_path_suggestion(
+ sess.source_map(),
+ &mut err,
+ expected_lifetimes,
+ path_span,
+ !segment.has_generic_args,
+ elided_lifetime_span,
+ );
+ err.note("assuming a `'static` lifetime...");
+ err.emit();
+ should_lint = false;
+
+ for id in node_ids {
+ self.record_lifetime_res(
+ id,
+ LifetimeRes::Error,
+ LifetimeElisionCandidate::Named,
+ );
+ }
+ break;
+ }
+ // Do not create a parameter for patterns and expressions.
+ LifetimeRibKind::AnonymousCreateParameter { binder, .. } => {
+ // Group all suggestions into the first record.
+ let mut candidate = LifetimeElisionCandidate::Missing(missing_lifetime);
+ for id in node_ids {
+ let res = self.create_fresh_lifetime(id, ident, binder);
+ self.record_lifetime_res(
+ id,
+ res,
+ replace(&mut candidate, LifetimeElisionCandidate::Named),
+ );
+ }
+ break;
+ }
+ LifetimeRibKind::Elided(res) => {
+ let mut candidate = LifetimeElisionCandidate::Missing(missing_lifetime);
+ for id in node_ids {
+ self.record_lifetime_res(
+ id,
+ res,
+ replace(&mut candidate, LifetimeElisionCandidate::Ignore),
+ );
+ }
+ break;
+ }
+ LifetimeRibKind::ElisionFailure => {
+ self.diagnostic_metadata.current_elision_failures.push(missing_lifetime);
+ for id in node_ids {
+ self.record_lifetime_res(
+ id,
+ LifetimeRes::Error,
+ LifetimeElisionCandidate::Ignore,
+ );
+ }
+ break;
+ }
+ // `LifetimeRes::Error`, which would usually be used in the case of
+ // `ReportError`, is unsuitable here, as we don't emit an error yet. Instead,
+ // we simply resolve to an implicit lifetime, which will be checked later, at
+ // which point a suitable error will be emitted.
+ LifetimeRibKind::AnonymousReportError | LifetimeRibKind::Item => {
+ for id in node_ids {
+ self.record_lifetime_res(
+ id,
+ LifetimeRes::Error,
+ LifetimeElisionCandidate::Ignore,
+ );
+ }
+ self.report_missing_lifetime_specifiers(vec![missing_lifetime], None);
+ break;
+ }
+ LifetimeRibKind::Generics { .. }
+ | LifetimeRibKind::ConstGeneric
+ | LifetimeRibKind::AnonConst => {}
+ }
+ }
+
+ if should_lint {
+ self.r.lint_buffer.buffer_lint_with_diagnostic(
+ lint::builtin::ELIDED_LIFETIMES_IN_PATHS,
+ segment_id,
+ elided_lifetime_span,
+ "hidden lifetime parameters in types are deprecated",
+ lint::BuiltinLintDiagnostics::ElidedLifetimesInPaths(
+ expected_lifetimes,
+ path_span,
+ !segment.has_generic_args,
+ elided_lifetime_span,
+ ),
+ );
+ }
+ }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn record_lifetime_res(
+ &mut self,
+ id: NodeId,
+ res: LifetimeRes,
+ candidate: LifetimeElisionCandidate,
+ ) {
+ if let Some(prev_res) = self.r.lifetimes_res_map.insert(id, res) {
+ panic!(
+ "lifetime {:?} resolved multiple times ({:?} before, {:?} now)",
+ id, prev_res, res
+ )
+ }
+ match res {
+ LifetimeRes::Param { .. } | LifetimeRes::Fresh { .. } | LifetimeRes::Static => {
+ if let Some(ref mut candidates) = self.lifetime_elision_candidates {
+ candidates.insert(res, candidate);
+ }
+ }
+ LifetimeRes::Infer | LifetimeRes::Error | LifetimeRes::ElidedAnchor { .. } => {}
+ }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn record_lifetime_param(&mut self, id: NodeId, res: LifetimeRes) {
+ if let Some(prev_res) = self.r.lifetimes_res_map.insert(id, res) {
+ panic!(
+ "lifetime parameter {:?} resolved multiple times ({:?} before, {:?} now)",
+ id, prev_res, res
+ )
+ }
+ }
+
+ /// Perform resolution of a function signature, accounting for lifetime elision.
+ #[tracing::instrument(level = "debug", skip(self, inputs))]
+ fn resolve_fn_signature(
+ &mut self,
+ fn_id: NodeId,
+ has_self: bool,
+ inputs: impl Iterator<Item = (Option<&'ast Pat>, &'ast Ty)> + Clone,
+ output_ty: &'ast FnRetTy,
+ ) {
+ // Add each argument to the rib.
+ let elision_lifetime = self.resolve_fn_params(has_self, inputs);
+ debug!(?elision_lifetime);
+
+ let outer_failures = take(&mut self.diagnostic_metadata.current_elision_failures);
+ let output_rib = if let Ok(res) = elision_lifetime.as_ref() {
+ LifetimeRibKind::Elided(*res)
+ } else {
+ LifetimeRibKind::ElisionFailure
+ };
+ self.with_lifetime_rib(output_rib, |this| visit::walk_fn_ret_ty(this, &output_ty));
+ let elision_failures =
+ replace(&mut self.diagnostic_metadata.current_elision_failures, outer_failures);
+ if !elision_failures.is_empty() {
+ let Err(failure_info) = elision_lifetime else { bug!() };
+ self.report_missing_lifetime_specifiers(elision_failures, Some(failure_info));
+ }
+ }
+
+ /// Resolve inside function parameters and parameter types.
+ /// Returns the lifetime for elision in fn return type,
+ /// or diagnostic information in case of elision failure.
+ fn resolve_fn_params(
+ &mut self,
+ has_self: bool,
+ inputs: impl Iterator<Item = (Option<&'ast Pat>, &'ast Ty)>,
+ ) -> Result<LifetimeRes, (Vec<MissingLifetime>, Vec<ElisionFnParameter>)> {
+ let outer_candidates =
+ replace(&mut self.lifetime_elision_candidates, Some(Default::default()));
+
+ let mut elision_lifetime = None;
+ let mut lifetime_count = 0;
+ let mut parameter_info = Vec::new();
+
+ let mut bindings = smallvec![(PatBoundCtx::Product, Default::default())];
+ for (index, (pat, ty)) in inputs.enumerate() {
+ debug!(?pat, ?ty);
+ if let Some(pat) = pat {
+ self.resolve_pattern(pat, PatternSource::FnParam, &mut bindings);
+ }
+ self.visit_ty(ty);
+
+ if let Some(ref candidates) = self.lifetime_elision_candidates {
+ let new_count = candidates.len();
+ let local_count = new_count - lifetime_count;
+ if local_count != 0 {
+ parameter_info.push(ElisionFnParameter {
+ index,
+ ident: if let Some(pat) = pat && let PatKind::Ident(_, ident, _) = pat.kind {
+ Some(ident)
+ } else {
+ None
+ },
+ lifetime_count: local_count,
+ span: ty.span,
+ });
+ }
+ lifetime_count = new_count;
+ }
+
+ // Handle `self` specially.
+ if index == 0 && has_self {
+ let self_lifetime = self.find_lifetime_for_self(ty);
+ if let Set1::One(lifetime) = self_lifetime {
+ elision_lifetime = Some(lifetime);
+ self.lifetime_elision_candidates = None;
+ } else {
+ self.lifetime_elision_candidates = Some(Default::default());
+ lifetime_count = 0;
+ }
+ }
+ debug!("(resolving function / closure) recorded parameter");
+ }
+
+ let all_candidates = replace(&mut self.lifetime_elision_candidates, outer_candidates);
+ debug!(?all_candidates);
+
+ if let Some(res) = elision_lifetime {
+ return Ok(res);
+ }
+
+ // We do not have a `self` candidate, look at the full list.
+ let all_candidates = all_candidates.unwrap();
+ if all_candidates.len() == 1 {
+ Ok(*all_candidates.first().unwrap().0)
+ } else {
+ let all_candidates = all_candidates
+ .into_iter()
+ .filter_map(|(_, candidate)| match candidate {
+ LifetimeElisionCandidate::Ignore | LifetimeElisionCandidate::Named => None,
+ LifetimeElisionCandidate::Missing(missing) => Some(missing),
+ })
+ .collect();
+ Err((all_candidates, parameter_info))
+ }
+ }
+
+ /// List all the lifetimes that appear in the provided type.
+ fn find_lifetime_for_self(&self, ty: &'ast Ty) -> Set1<LifetimeRes> {
+ struct SelfVisitor<'r, 'a> {
+ r: &'r Resolver<'a>,
+ impl_self: Option<Res>,
+ lifetime: Set1<LifetimeRes>,
+ }
+
+ impl SelfVisitor<'_, '_> {
+ // Look for `self: &'a Self` - also desugared from `&'a self`,
+ // and if that matches, use it for elision and return early.
+ fn is_self_ty(&self, ty: &Ty) -> bool {
+ match ty.kind {
+ TyKind::ImplicitSelf => true,
+ TyKind::Path(None, _) => {
+ let path_res = self.r.partial_res_map[&ty.id].base_res();
+ if let Res::SelfTy { .. } = path_res {
+ return true;
+ }
+ Some(path_res) == self.impl_self
+ }
+ _ => false,
+ }
+ }
+ }
+
+ impl<'a> Visitor<'a> for SelfVisitor<'_, '_> {
+ fn visit_ty(&mut self, ty: &'a Ty) {
+ trace!("SelfVisitor considering ty={:?}", ty);
+ if let TyKind::Rptr(lt, ref mt) = ty.kind && self.is_self_ty(&mt.ty) {
+ let lt_id = if let Some(lt) = lt {
+ lt.id
+ } else {
+ let res = self.r.lifetimes_res_map[&ty.id];
+ let LifetimeRes::ElidedAnchor { start, .. } = res else { bug!() };
+ start
+ };
+ let lt_res = self.r.lifetimes_res_map[&lt_id];
+ trace!("SelfVisitor inserting res={:?}", lt_res);
+ self.lifetime.insert(lt_res);
+ }
+ visit::walk_ty(self, ty)
+ }
+ }
+
+ let impl_self = self
+ .diagnostic_metadata
+ .current_self_type
+ .as_ref()
+ .and_then(|ty| {
+ if let TyKind::Path(None, _) = ty.kind {
+ self.r.partial_res_map.get(&ty.id)
+ } else {
+ None
+ }
+ })
+ .map(|res| res.base_res())
+ .filter(|res| {
+ // Permit the types that unambiguously always
+ // result in the same type constructor being used
+ // (it can't differ between `Self` and `self`).
+ matches!(
+ res,
+ Res::Def(DefKind::Struct | DefKind::Union | DefKind::Enum, _,) | Res::PrimTy(_)
+ )
+ });
+ let mut visitor = SelfVisitor { r: self.r, impl_self, lifetime: Set1::Empty };
+ visitor.visit_ty(ty);
+ trace!("SelfVisitor found={:?}", visitor.lifetime);
+ visitor.lifetime
+ }
+
+ /// Searches the current set of local scopes for labels. Returns the `NodeId` of the resolved
+ /// label and reports an error if the label is not found or is unreachable.
+ fn resolve_label(&mut self, mut label: Ident) -> Result<(NodeId, Span), ResolutionError<'a>> {
+ let mut suggestion = None;
+
+ for i in (0..self.label_ribs.len()).rev() {
+ let rib = &self.label_ribs[i];
+
+ if let MacroDefinition(def) = rib.kind {
+ // If an invocation of this macro created `ident`, give up on `ident`
+ // and switch to `ident`'s source from the macro definition.
+ if def == self.r.macro_def(label.span.ctxt()) {
+ label.span.remove_mark();
+ }
+ }
+
+ let ident = label.normalize_to_macro_rules();
+ if let Some((ident, id)) = rib.bindings.get_key_value(&ident) {
+ let definition_span = ident.span;
+ return if self.is_label_valid_from_rib(i) {
+ Ok((*id, definition_span))
+ } else {
+ Err(ResolutionError::UnreachableLabel {
+ name: label.name,
+ definition_span,
+ suggestion,
+ })
+ };
+ }
+
+ // Diagnostics: Check if this rib contains a label with a similar name, keep track of
+ // the first such label that is encountered.
+ suggestion = suggestion.or_else(|| self.suggestion_for_label_in_rib(i, label));
+ }
+
+ Err(ResolutionError::UndeclaredLabel { name: label.name, suggestion })
+ }
+
+ /// Determine whether or not a label from the `rib_index`th label rib is reachable.
+ fn is_label_valid_from_rib(&self, rib_index: usize) -> bool {
+ let ribs = &self.label_ribs[rib_index + 1..];
+
+ for rib in ribs {
+ if rib.kind.is_label_barrier() {
+ return false;
+ }
+ }
+
+ true
+ }
+
+ fn resolve_adt(&mut self, item: &'ast Item, generics: &'ast Generics) {
+ debug!("resolve_adt");
+ self.with_current_self_item(item, |this| {
+ this.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes),
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ kind: LifetimeBinderKind::Item,
+ span: generics.span,
+ },
+ |this| {
+ let item_def_id = this.r.local_def_id(item.id).to_def_id();
+ this.with_self_rib(
+ Res::SelfTy { trait_: None, alias_to: Some((item_def_id, false)) },
+ |this| {
+ visit::walk_item(this, item);
+ },
+ );
+ },
+ );
+ });
+ }
+
+ fn future_proof_import(&mut self, use_tree: &UseTree) {
+ let segments = &use_tree.prefix.segments;
+ if !segments.is_empty() {
+ let ident = segments[0].ident;
+ if ident.is_path_segment_keyword() || ident.span.rust_2015() {
+ return;
+ }
+
+ let nss = match use_tree.kind {
+ UseTreeKind::Simple(..) if segments.len() == 1 => &[TypeNS, ValueNS][..],
+ _ => &[TypeNS],
+ };
+ let report_error = |this: &Self, ns| {
+ let what = if ns == TypeNS { "type parameters" } else { "local variables" };
+ if this.should_report_errs() {
+ this.r
+ .session
+ .span_err(ident.span, &format!("imports cannot refer to {}", what));
+ }
+ };
+
+ for &ns in nss {
+ match self.maybe_resolve_ident_in_lexical_scope(ident, ns) {
+ Some(LexicalScopeBinding::Res(..)) => {
+ report_error(self, ns);
+ }
+ Some(LexicalScopeBinding::Item(binding)) => {
+ if let Some(LexicalScopeBinding::Res(..)) =
+ self.resolve_ident_in_lexical_scope(ident, ns, None, Some(binding))
+ {
+ report_error(self, ns);
+ }
+ }
+ None => {}
+ }
+ }
+ } else if let UseTreeKind::Nested(use_trees) = &use_tree.kind {
+ for (use_tree, _) in use_trees {
+ self.future_proof_import(use_tree);
+ }
+ }
+ }
+
+ fn resolve_item(&mut self, item: &'ast Item) {
+ let name = item.ident.name;
+ debug!("(resolving item) resolving {} ({:?})", name, item.kind);
+
+ match item.kind {
+ ItemKind::TyAlias(box TyAlias { ref generics, .. }) => {
+ self.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes),
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ kind: LifetimeBinderKind::Item,
+ span: generics.span,
+ },
+ |this| visit::walk_item(this, item),
+ );
+ }
+
+ ItemKind::Fn(box Fn { ref generics, .. }) => {
+ self.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes),
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ kind: LifetimeBinderKind::Function,
+ span: generics.span,
+ },
+ |this| visit::walk_item(this, item),
+ );
+ }
+
+ ItemKind::Enum(_, ref generics)
+ | ItemKind::Struct(_, ref generics)
+ | ItemKind::Union(_, ref generics) => {
+ self.resolve_adt(item, generics);
+ }
+
+ ItemKind::Impl(box Impl {
+ ref generics,
+ ref of_trait,
+ ref self_ty,
+ items: ref impl_items,
+ ..
+ }) => {
+ self.diagnostic_metadata.current_impl_items = Some(impl_items);
+ self.resolve_implementation(generics, of_trait, &self_ty, item.id, impl_items);
+ self.diagnostic_metadata.current_impl_items = None;
+ }
+
+ ItemKind::Trait(box Trait { ref generics, ref bounds, ref items, .. }) => {
+ // Create a new rib for the trait-wide type parameters.
+ self.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes),
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ kind: LifetimeBinderKind::Item,
+ span: generics.span,
+ },
+ |this| {
+ let local_def_id = this.r.local_def_id(item.id).to_def_id();
+ this.with_self_rib(
+ Res::SelfTy { trait_: Some(local_def_id), alias_to: None },
+ |this| {
+ this.visit_generics(generics);
+ walk_list!(this, visit_param_bound, bounds, BoundKind::SuperTraits);
+ this.resolve_trait_items(items);
+ },
+ );
+ },
+ );
+ }
+
+ ItemKind::TraitAlias(ref generics, ref bounds) => {
+ // Create a new rib for the trait-wide type parameters.
+ self.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes),
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ kind: LifetimeBinderKind::Item,
+ span: generics.span,
+ },
+ |this| {
+ let local_def_id = this.r.local_def_id(item.id).to_def_id();
+ this.with_self_rib(
+ Res::SelfTy { trait_: Some(local_def_id), alias_to: None },
+ |this| {
+ this.visit_generics(generics);
+ walk_list!(this, visit_param_bound, bounds, BoundKind::Bound);
+ },
+ );
+ },
+ );
+ }
+
+ ItemKind::Mod(..) | ItemKind::ForeignMod(_) => {
+ self.with_scope(item.id, |this| {
+ visit::walk_item(this, item);
+ });
+ }
+
+ ItemKind::Static(ref ty, _, ref expr) | ItemKind::Const(_, ref ty, ref expr) => {
+ self.with_item_rib(|this| {
+ this.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Static), |this| {
+ this.visit_ty(ty);
+ });
+ this.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
+ if let Some(expr) = expr {
+ let constant_item_kind = match item.kind {
+ ItemKind::Const(..) => ConstantItemKind::Const,
+ ItemKind::Static(..) => ConstantItemKind::Static,
+ _ => unreachable!(),
+ };
+ // We already forbid generic params because of the above item rib,
+ // so it doesn't matter whether this is a trivial constant.
+ this.with_constant_rib(
+ IsRepeatExpr::No,
+ HasGenericParams::Yes,
+ Some((item.ident, constant_item_kind)),
+ |this| this.visit_expr(expr),
+ );
+ }
+ });
+ });
+ }
+
+ ItemKind::Use(ref use_tree) => {
+ self.future_proof_import(use_tree);
+ }
+
+ ItemKind::ExternCrate(..) | ItemKind::MacroDef(..) => {
+ // do nothing, these are just around to be encoded
+ }
+
+ ItemKind::GlobalAsm(_) => {
+ visit::walk_item(self, item);
+ }
+
+ ItemKind::MacCall(_) => panic!("unexpanded macro in resolve!"),
+ }
+ }
+
+ fn with_generic_param_rib<'c, F>(
+ &'c mut self,
+ params: &'c [GenericParam],
+ kind: RibKind<'a>,
+ lifetime_kind: LifetimeRibKind,
+ f: F,
+ ) where
+ F: FnOnce(&mut Self),
+ {
+ debug!("with_generic_param_rib");
+ let LifetimeRibKind::Generics { binder, span: generics_span, kind: generics_kind, .. }
+ = lifetime_kind else { panic!() };
+
+ let mut function_type_rib = Rib::new(kind);
+ let mut function_value_rib = Rib::new(kind);
+ let mut function_lifetime_rib = LifetimeRib::new(lifetime_kind);
+ let mut seen_bindings = FxHashMap::default();
+ // Store all seen lifetimes names from outer scopes.
+ let mut seen_lifetimes = FxHashSet::default();
+
+ // We also can't shadow bindings from the parent item
+ if let AssocItemRibKind = kind {
+ let mut add_bindings_for_ns = |ns| {
+ let parent_rib = self.ribs[ns]
+ .iter()
+ .rfind(|r| matches!(r.kind, ItemRibKind(_)))
+ .expect("associated item outside of an item");
+ seen_bindings
+ .extend(parent_rib.bindings.iter().map(|(ident, _)| (*ident, ident.span)));
+ };
+ add_bindings_for_ns(ValueNS);
+ add_bindings_for_ns(TypeNS);
+ }
+
+ // Forbid shadowing lifetime bindings
+ for rib in self.lifetime_ribs.iter().rev() {
+ seen_lifetimes.extend(rib.bindings.iter().map(|(ident, _)| *ident));
+ if let LifetimeRibKind::Item = rib.kind {
+ break;
+ }
+ }
+
+ for param in params {
+ let ident = param.ident.normalize_to_macros_2_0();
+ debug!("with_generic_param_rib: {}", param.id);
+
+ if let GenericParamKind::Lifetime = param.kind
+ && let Some(&original) = seen_lifetimes.get(&ident)
+ {
+ diagnostics::signal_lifetime_shadowing(self.r.session, original, param.ident);
+ // Record lifetime res, so lowering knows there is something fishy.
+ self.record_lifetime_param(param.id, LifetimeRes::Error);
+ continue;
+ }
+
+ match seen_bindings.entry(ident) {
+ Entry::Occupied(entry) => {
+ let span = *entry.get();
+ let err = ResolutionError::NameAlreadyUsedInParameterList(ident.name, span);
+ self.report_error(param.ident.span, err);
+ if let GenericParamKind::Lifetime = param.kind {
+ // Record lifetime res, so lowering knows there is something fishy.
+ self.record_lifetime_param(param.id, LifetimeRes::Error);
+ continue;
+ }
+ }
+ Entry::Vacant(entry) => {
+ entry.insert(param.ident.span);
+ }
+ }
+
+ if param.ident.name == kw::UnderscoreLifetime {
+ rustc_errors::struct_span_err!(
+ self.r.session,
+ param.ident.span,
+ E0637,
+ "`'_` cannot be used here"
+ )
+ .span_label(param.ident.span, "`'_` is a reserved lifetime name")
+ .emit();
+ // Record lifetime res, so lowering knows there is something fishy.
+ self.record_lifetime_param(param.id, LifetimeRes::Error);
+ continue;
+ }
+
+ if param.ident.name == kw::StaticLifetime {
+ rustc_errors::struct_span_err!(
+ self.r.session,
+ param.ident.span,
+ E0262,
+ "invalid lifetime parameter name: `{}`",
+ param.ident,
+ )
+ .span_label(param.ident.span, "'static is a reserved lifetime name")
+ .emit();
+ // Record lifetime res, so lowering knows there is something fishy.
+ self.record_lifetime_param(param.id, LifetimeRes::Error);
+ continue;
+ }
+
+ let def_id = self.r.local_def_id(param.id);
+
+ // Plain insert (no renaming).
+ let (rib, def_kind) = match param.kind {
+ GenericParamKind::Type { .. } => (&mut function_type_rib, DefKind::TyParam),
+ GenericParamKind::Const { .. } => (&mut function_value_rib, DefKind::ConstParam),
+ GenericParamKind::Lifetime => {
+ let res = LifetimeRes::Param { param: def_id, binder };
+ self.record_lifetime_param(param.id, res);
+ function_lifetime_rib.bindings.insert(ident, (param.id, res));
+ continue;
+ }
+ };
+
+ let res = match kind {
+ ItemRibKind(..) | AssocItemRibKind => Res::Def(def_kind, def_id.to_def_id()),
+ NormalRibKind => Res::Err,
+ _ => span_bug!(param.ident.span, "Unexpected rib kind {:?}", kind),
+ };
+ self.r.record_partial_res(param.id, PartialRes::new(res));
+ rib.bindings.insert(ident, res);
+ }
+
+ self.lifetime_ribs.push(function_lifetime_rib);
+ self.ribs[ValueNS].push(function_value_rib);
+ self.ribs[TypeNS].push(function_type_rib);
+
+ f(self);
+
+ self.ribs[TypeNS].pop();
+ self.ribs[ValueNS].pop();
+ let function_lifetime_rib = self.lifetime_ribs.pop().unwrap();
+
+ // Do not account for the parameters we just bound for function lifetime elision.
+ if let Some(ref mut candidates) = self.lifetime_elision_candidates {
+ for (_, res) in function_lifetime_rib.bindings.values() {
+ candidates.remove(res);
+ }
+ }
+
+ if let LifetimeBinderKind::BareFnType
+ | LifetimeBinderKind::WhereBound
+ | LifetimeBinderKind::Function
+ | LifetimeBinderKind::ImplBlock = generics_kind
+ {
+ self.maybe_report_lifetime_uses(generics_span, params)
+ }
+ }
+
+ fn with_label_rib(&mut self, kind: RibKind<'a>, f: impl FnOnce(&mut Self)) {
+ self.label_ribs.push(Rib::new(kind));
+ f(self);
+ self.label_ribs.pop();
+ }
+
+ fn with_item_rib(&mut self, f: impl FnOnce(&mut Self)) {
+ let kind = ItemRibKind(HasGenericParams::No);
+ self.with_lifetime_rib(LifetimeRibKind::Item, |this| {
+ this.with_rib(ValueNS, kind, |this| this.with_rib(TypeNS, kind, f))
+ })
+ }
+
+ // HACK(min_const_generics,const_evaluatable_unchecked): We
+ // want to keep allowing `[0; std::mem::size_of::<*mut T>()]`
+ // with a future compat lint for now. We do this by adding an
+ // additional special case for repeat expressions.
+ //
+ // Note that we intentionally still forbid `[0; N + 1]` during
+ // name resolution so that we don't extend the future
+ // compat lint to new cases.
+ #[instrument(level = "debug", skip(self, f))]
+ fn with_constant_rib(
+ &mut self,
+ is_repeat: IsRepeatExpr,
+ may_use_generics: HasGenericParams,
+ item: Option<(Ident, ConstantItemKind)>,
+ f: impl FnOnce(&mut Self),
+ ) {
+ self.with_rib(ValueNS, ConstantItemRibKind(may_use_generics, item), |this| {
+ this.with_rib(
+ TypeNS,
+ ConstantItemRibKind(
+ may_use_generics.force_yes_if(is_repeat == IsRepeatExpr::Yes),
+ item,
+ ),
+ |this| {
+ this.with_label_rib(ConstantItemRibKind(may_use_generics, item), f);
+ },
+ )
+ });
+ }
+
+ fn with_current_self_type<T>(&mut self, self_type: &Ty, f: impl FnOnce(&mut Self) -> T) -> T {
+ // Handle nested impls (inside fn bodies)
+ let previous_value =
+ replace(&mut self.diagnostic_metadata.current_self_type, Some(self_type.clone()));
+ let result = f(self);
+ self.diagnostic_metadata.current_self_type = previous_value;
+ result
+ }
+
+ fn with_current_self_item<T>(&mut self, self_item: &Item, f: impl FnOnce(&mut Self) -> T) -> T {
+ let previous_value =
+ replace(&mut self.diagnostic_metadata.current_self_item, Some(self_item.id));
+ let result = f(self);
+ self.diagnostic_metadata.current_self_item = previous_value;
+ result
+ }
+
+ /// When evaluating a `trait` use its associated types' idents for suggestions in E0412.
+ fn resolve_trait_items(&mut self, trait_items: &'ast [P<AssocItem>]) {
+ let trait_assoc_items =
+ replace(&mut self.diagnostic_metadata.current_trait_assoc_items, Some(&trait_items));
+
+ let walk_assoc_item =
+ |this: &mut Self, generics: &Generics, kind, item: &'ast AssocItem| {
+ this.with_generic_param_rib(
+ &generics.params,
+ AssocItemRibKind,
+ LifetimeRibKind::Generics { binder: item.id, span: generics.span, kind },
+ |this| visit::walk_assoc_item(this, item, AssocCtxt::Trait),
+ );
+ };
+
+ for item in trait_items {
+ match &item.kind {
+ AssocItemKind::Const(_, ty, default) => {
+ self.visit_ty(ty);
+ // Only impose the restrictions of `ConstRibKind` for an
+ // actual constant expression in a provided default.
+ if let Some(expr) = default {
+ // We allow arbitrary const expressions inside of associated consts,
+ // even if they are potentially not const evaluatable.
+ //
+ // Type parameters can already be used and as associated consts are
+ // not used as part of the type system, this is far less surprising.
+ self.with_lifetime_rib(
+ LifetimeRibKind::Elided(LifetimeRes::Infer),
+ |this| {
+ this.with_constant_rib(
+ IsRepeatExpr::No,
+ HasGenericParams::Yes,
+ None,
+ |this| this.visit_expr(expr),
+ )
+ },
+ );
+ }
+ }
+ AssocItemKind::Fn(box Fn { generics, .. }) => {
+ walk_assoc_item(self, generics, LifetimeBinderKind::Function, item);
+ }
+ AssocItemKind::TyAlias(box TyAlias { generics, .. }) => self
+ .with_lifetime_rib(LifetimeRibKind::AnonymousReportError, |this| {
+ walk_assoc_item(this, generics, LifetimeBinderKind::Item, item)
+ }),
+ AssocItemKind::MacCall(_) => {
+ panic!("unexpanded macro in resolve!")
+ }
+ };
+ }
+
+ self.diagnostic_metadata.current_trait_assoc_items = trait_assoc_items;
+ }
+
+ /// This is called to resolve a trait reference from an `impl` (i.e., `impl Trait for Foo`).
+ fn with_optional_trait_ref<T>(
+ &mut self,
+ opt_trait_ref: Option<&TraitRef>,
+ self_type: &'ast Ty,
+ f: impl FnOnce(&mut Self, Option<DefId>) -> T,
+ ) -> T {
+ let mut new_val = None;
+ let mut new_id = None;
+ if let Some(trait_ref) = opt_trait_ref {
+ let path: Vec<_> = Segment::from_path(&trait_ref.path);
+ self.diagnostic_metadata.currently_processing_impl_trait =
+ Some((trait_ref.clone(), self_type.clone()));
+ let res = self.smart_resolve_path_fragment(
+ None,
+ &path,
+ PathSource::Trait(AliasPossibility::No),
+ Finalize::new(trait_ref.ref_id, trait_ref.path.span),
+ );
+ self.diagnostic_metadata.currently_processing_impl_trait = None;
+ if let Some(def_id) = res.base_res().opt_def_id() {
+ new_id = Some(def_id);
+ new_val = Some((self.r.expect_module(def_id), trait_ref.clone()));
+ }
+ }
+ let original_trait_ref = replace(&mut self.current_trait_ref, new_val);
+ let result = f(self, new_id);
+ self.current_trait_ref = original_trait_ref;
+ result
+ }
+
+ fn with_self_rib_ns(&mut self, ns: Namespace, self_res: Res, f: impl FnOnce(&mut Self)) {
+ let mut self_type_rib = Rib::new(NormalRibKind);
+
+ // Plain insert (no renaming, since types are not currently hygienic)
+ self_type_rib.bindings.insert(Ident::with_dummy_span(kw::SelfUpper), self_res);
+ self.ribs[ns].push(self_type_rib);
+ f(self);
+ self.ribs[ns].pop();
+ }
+
+ fn with_self_rib(&mut self, self_res: Res, f: impl FnOnce(&mut Self)) {
+ self.with_self_rib_ns(TypeNS, self_res, f)
+ }
+
+ fn resolve_implementation(
+ &mut self,
+ generics: &'ast Generics,
+ opt_trait_reference: &'ast Option<TraitRef>,
+ self_type: &'ast Ty,
+ item_id: NodeId,
+ impl_items: &'ast [P<AssocItem>],
+ ) {
+ debug!("resolve_implementation");
+ // If applicable, create a rib for the type parameters.
+ self.with_generic_param_rib(
+ &generics.params,
+ ItemRibKind(HasGenericParams::Yes),
+ LifetimeRibKind::Generics {
+ span: generics.span,
+ binder: item_id,
+ kind: LifetimeBinderKind::ImplBlock,
+ },
+ |this| {
+ // Dummy self type for better errors if `Self` is used in the trait path.
+ this.with_self_rib(Res::SelfTy { trait_: None, alias_to: None }, |this| {
+ this.with_lifetime_rib(
+ LifetimeRibKind::AnonymousCreateParameter {
+ binder: item_id,
+ report_in_path: true
+ },
+ |this| {
+ // Resolve the trait reference, if necessary.
+ this.with_optional_trait_ref(
+ opt_trait_reference.as_ref(),
+ self_type,
+ |this, trait_id| {
+ let item_def_id = this.r.local_def_id(item_id);
+
+ // Register the trait definitions from here.
+ if let Some(trait_id) = trait_id {
+ this.r
+ .trait_impls
+ .entry(trait_id)
+ .or_default()
+ .push(item_def_id);
+ }
+
+ let item_def_id = item_def_id.to_def_id();
+ let res = Res::SelfTy {
+ trait_: trait_id,
+ alias_to: Some((item_def_id, false)),
+ };
+ this.with_self_rib(res, |this| {
+ if let Some(trait_ref) = opt_trait_reference.as_ref() {
+ // Resolve type arguments in the trait path.
+ visit::walk_trait_ref(this, trait_ref);
+ }
+ // Resolve the self type.
+ this.visit_ty(self_type);
+ // Resolve the generic parameters.
+ this.visit_generics(generics);
+
+ // Resolve the items within the impl.
+ this.with_current_self_type(self_type, |this| {
+ this.with_self_rib_ns(ValueNS, Res::SelfCtor(item_def_id), |this| {
+ debug!("resolve_implementation with_self_rib_ns(ValueNS, ...)");
+ for item in impl_items {
+ this.resolve_impl_item(&**item);
+ }
+ });
+ });
+ });
+ },
+ )
+ },
+ );
+ });
+ },
+ );
+ }
+
+ fn resolve_impl_item(&mut self, item: &'ast AssocItem) {
+ use crate::ResolutionError::*;
+ match &item.kind {
+ AssocItemKind::Const(_, ty, default) => {
+ debug!("resolve_implementation AssocItemKind::Const");
+ // If this is a trait impl, ensure the const
+ // exists in trait
+ self.check_trait_item(
+ item.id,
+ item.ident,
+ &item.kind,
+ ValueNS,
+ item.span,
+ |i, s, c| ConstNotMemberOfTrait(i, s, c),
+ );
+
+ self.visit_ty(ty);
+ if let Some(expr) = default {
+ // We allow arbitrary const expressions inside of associated consts,
+ // even if they are potentially not const evaluatable.
+ //
+ // Type parameters can already be used and as associated consts are
+ // not used as part of the type system, this is far less surprising.
+ self.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Infer), |this| {
+ this.with_constant_rib(
+ IsRepeatExpr::No,
+ HasGenericParams::Yes,
+ None,
+ |this| this.visit_expr(expr),
+ )
+ });
+ }
+ }
+ AssocItemKind::Fn(box Fn { generics, .. }) => {
+ debug!("resolve_implementation AssocItemKind::Fn");
+ // We also need a new scope for the impl item type parameters.
+ self.with_generic_param_rib(
+ &generics.params,
+ AssocItemRibKind,
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ span: generics.span,
+ kind: LifetimeBinderKind::Function,
+ },
+ |this| {
+ // If this is a trait impl, ensure the method
+ // exists in trait
+ this.check_trait_item(
+ item.id,
+ item.ident,
+ &item.kind,
+ ValueNS,
+ item.span,
+ |i, s, c| MethodNotMemberOfTrait(i, s, c),
+ );
+
+ visit::walk_assoc_item(this, item, AssocCtxt::Impl)
+ },
+ );
+ }
+ AssocItemKind::TyAlias(box TyAlias { generics, .. }) => {
+ debug!("resolve_implementation AssocItemKind::TyAlias");
+ // We also need a new scope for the impl item type parameters.
+ self.with_generic_param_rib(
+ &generics.params,
+ AssocItemRibKind,
+ LifetimeRibKind::Generics {
+ binder: item.id,
+ span: generics.span,
+ kind: LifetimeBinderKind::Item,
+ },
+ |this| {
+ this.with_lifetime_rib(LifetimeRibKind::AnonymousReportError, |this| {
+ // If this is a trait impl, ensure the type
+ // exists in trait
+ this.check_trait_item(
+ item.id,
+ item.ident,
+ &item.kind,
+ TypeNS,
+ item.span,
+ |i, s, c| TypeNotMemberOfTrait(i, s, c),
+ );
+
+ visit::walk_assoc_item(this, item, AssocCtxt::Impl)
+ });
+ },
+ );
+ }
+ AssocItemKind::MacCall(_) => {
+ panic!("unexpanded macro in resolve!")
+ }
+ }
+ }
+
+ fn check_trait_item<F>(
+ &mut self,
+ id: NodeId,
+ mut ident: Ident,
+ kind: &AssocItemKind,
+ ns: Namespace,
+ span: Span,
+ err: F,
+ ) where
+ F: FnOnce(Ident, String, Option<Symbol>) -> ResolutionError<'a>,
+ {
+ // If there is a TraitRef in scope for an impl, then the method must be in the trait.
+ let Some((module, _)) = &self.current_trait_ref else { return; };
+ ident.span.normalize_to_macros_2_0_and_adjust(module.expansion);
+ let key = self.r.new_key(ident, ns);
+ let mut binding = self.r.resolution(module, key).try_borrow().ok().and_then(|r| r.binding);
+ debug!(?binding);
+ if binding.is_none() {
+ // We could not find the trait item in the correct namespace.
+ // Check the other namespace to report an error.
+ let ns = match ns {
+ ValueNS => TypeNS,
+ TypeNS => ValueNS,
+ _ => ns,
+ };
+ let key = self.r.new_key(ident, ns);
+ binding = self.r.resolution(module, key).try_borrow().ok().and_then(|r| r.binding);
+ debug!(?binding);
+ }
+ let Some(binding) = binding else {
+ // We could not find the method: report an error.
+ let candidate = self.find_similarly_named_assoc_item(ident.name, kind);
+ let path = &self.current_trait_ref.as_ref().unwrap().1.path;
+ let path_names = path_names_to_string(path);
+ self.report_error(span, err(ident, path_names, candidate));
+ return;
+ };
+
+ let res = binding.res();
+ let Res::Def(def_kind, _) = res else { bug!() };
+ match (def_kind, kind) {
+ (DefKind::AssocTy, AssocItemKind::TyAlias(..))
+ | (DefKind::AssocFn, AssocItemKind::Fn(..))
+ | (DefKind::AssocConst, AssocItemKind::Const(..)) => {
+ self.r.record_partial_res(id, PartialRes::new(res));
+ return;
+ }
+ _ => {}
+ }
+
+ // The method kind does not correspond to what appeared in the trait, report.
+ let path = &self.current_trait_ref.as_ref().unwrap().1.path;
+ let (code, kind) = match kind {
+ AssocItemKind::Const(..) => (rustc_errors::error_code!(E0323), "const"),
+ AssocItemKind::Fn(..) => (rustc_errors::error_code!(E0324), "method"),
+ AssocItemKind::TyAlias(..) => (rustc_errors::error_code!(E0325), "type"),
+ AssocItemKind::MacCall(..) => span_bug!(span, "unexpanded macro"),
+ };
+ let trait_path = path_names_to_string(path);
+ self.report_error(
+ span,
+ ResolutionError::TraitImplMismatch {
+ name: ident.name,
+ kind,
+ code,
+ trait_path,
+ trait_item_span: binding.span,
+ },
+ );
+ }
+
+ fn resolve_params(&mut self, params: &'ast [Param]) {
+ let mut bindings = smallvec![(PatBoundCtx::Product, Default::default())];
+ for Param { pat, ty, .. } in params {
+ self.resolve_pattern(pat, PatternSource::FnParam, &mut bindings);
+ self.visit_ty(ty);
+ debug!("(resolving function / closure) recorded parameter");
+ }
+ }
+
+ fn resolve_local(&mut self, local: &'ast Local) {
+ debug!("resolving local ({:?})", local);
+ // Resolve the type.
+ walk_list!(self, visit_ty, &local.ty);
+
+ // Resolve the initializer.
+ if let Some((init, els)) = local.kind.init_else_opt() {
+ self.visit_expr(init);
+
+ // Resolve the `else` block
+ if let Some(els) = els {
+ self.visit_block(els);
+ }
+ }
+
+ // Resolve the pattern.
+ self.resolve_pattern_top(&local.pat, PatternSource::Let);
+ }
+
+ /// build a map from pattern identifiers to binding-info's.
+ /// this is done hygienically. This could arise for a macro
+ /// that expands into an or-pattern where one 'x' was from the
+ /// user and one 'x' came from the macro.
+ fn binding_mode_map(&mut self, pat: &Pat) -> BindingMap {
+ let mut binding_map = FxHashMap::default();
+
+ pat.walk(&mut |pat| {
+ match pat.kind {
+ PatKind::Ident(binding_mode, ident, ref sub_pat)
+ if sub_pat.is_some() || self.is_base_res_local(pat.id) =>
+ {
+ binding_map.insert(ident, BindingInfo { span: ident.span, binding_mode });
+ }
+ PatKind::Or(ref ps) => {
+ // Check the consistency of this or-pattern and
+ // then add all bindings to the larger map.
+ for bm in self.check_consistent_bindings(ps) {
+ binding_map.extend(bm);
+ }
+ return false;
+ }
+ _ => {}
+ }
+
+ true
+ });
+
+ binding_map
+ }
+
+ fn is_base_res_local(&self, nid: NodeId) -> bool {
+ matches!(self.r.partial_res_map.get(&nid).map(|res| res.base_res()), Some(Res::Local(..)))
+ }
+
+ /// Checks that all of the arms in an or-pattern have exactly the
+ /// same set of bindings, with the same binding modes for each.
+ fn check_consistent_bindings(&mut self, pats: &[P<Pat>]) -> Vec<BindingMap> {
+ let mut missing_vars = FxHashMap::default();
+ let mut inconsistent_vars = FxHashMap::default();
+
+ // 1) Compute the binding maps of all arms.
+ let maps = pats.iter().map(|pat| self.binding_mode_map(pat)).collect::<Vec<_>>();
+
+ // 2) Record any missing bindings or binding mode inconsistencies.
+ for (map_outer, pat_outer) in pats.iter().enumerate().map(|(idx, pat)| (&maps[idx], pat)) {
+ // Check against all arms except for the same pattern which is always self-consistent.
+ let inners = pats
+ .iter()
+ .enumerate()
+ .filter(|(_, pat)| pat.id != pat_outer.id)
+ .flat_map(|(idx, _)| maps[idx].iter())
+ .map(|(key, binding)| (key.name, map_outer.get(&key), binding));
+
+ for (name, info, &binding_inner) in inners {
+ match info {
+ None => {
+ // The inner binding is missing in the outer.
+ let binding_error =
+ missing_vars.entry(name).or_insert_with(|| BindingError {
+ name,
+ origin: BTreeSet::new(),
+ target: BTreeSet::new(),
+ could_be_path: name.as_str().starts_with(char::is_uppercase),
+ });
+ binding_error.origin.insert(binding_inner.span);
+ binding_error.target.insert(pat_outer.span);
+ }
+ Some(binding_outer) => {
+ if binding_outer.binding_mode != binding_inner.binding_mode {
+ // The binding modes in the outer and inner bindings differ.
+ inconsistent_vars
+ .entry(name)
+ .or_insert((binding_inner.span, binding_outer.span));
+ }
+ }
+ }
+ }
+ }
+
+ // 3) Report all missing variables we found.
+ let mut missing_vars = missing_vars.into_iter().collect::<Vec<_>>();
+ missing_vars.sort_by_key(|&(sym, ref _err)| sym);
+
+ for (name, mut v) in missing_vars.into_iter() {
+ if inconsistent_vars.contains_key(&name) {
+ v.could_be_path = false;
+ }
+ self.report_error(
+ *v.origin.iter().next().unwrap(),
+ ResolutionError::VariableNotBoundInPattern(v, self.parent_scope),
+ );
+ }
+
+ // 4) Report all inconsistencies in binding modes we found.
+ let mut inconsistent_vars = inconsistent_vars.iter().collect::<Vec<_>>();
+ inconsistent_vars.sort();
+ for (name, v) in inconsistent_vars {
+ self.report_error(v.0, ResolutionError::VariableBoundWithDifferentMode(*name, v.1));
+ }
+
+ // 5) Finally bubble up all the binding maps.
+ maps
+ }
+
+ /// Check the consistency of the outermost or-patterns.
+ fn check_consistent_bindings_top(&mut self, pat: &'ast Pat) {
+ pat.walk(&mut |pat| match pat.kind {
+ PatKind::Or(ref ps) => {
+ self.check_consistent_bindings(ps);
+ false
+ }
+ _ => true,
+ })
+ }
+
+ fn resolve_arm(&mut self, arm: &'ast Arm) {
+ self.with_rib(ValueNS, NormalRibKind, |this| {
+ this.resolve_pattern_top(&arm.pat, PatternSource::Match);
+ walk_list!(this, visit_expr, &arm.guard);
+ this.visit_expr(&arm.body);
+ });
+ }
+
+ /// Arising from `source`, resolve a top level pattern.
+ fn resolve_pattern_top(&mut self, pat: &'ast Pat, pat_src: PatternSource) {
+ let mut bindings = smallvec![(PatBoundCtx::Product, Default::default())];
+ self.resolve_pattern(pat, pat_src, &mut bindings);
+ }
+
+ fn resolve_pattern(
+ &mut self,
+ pat: &'ast Pat,
+ pat_src: PatternSource,
+ bindings: &mut SmallVec<[(PatBoundCtx, FxHashSet<Ident>); 1]>,
+ ) {
+ // We walk the pattern before declaring the pattern's inner bindings,
+ // so that we avoid resolving a literal expression to a binding defined
+ // by the pattern.
+ visit::walk_pat(self, pat);
+ self.resolve_pattern_inner(pat, pat_src, bindings);
+ // This has to happen *after* we determine which pat_idents are variants:
+ self.check_consistent_bindings_top(pat);
+ }
+
+ /// Resolve bindings in a pattern. This is a helper to `resolve_pattern`.
+ ///
+ /// ### `bindings`
+ ///
+ /// A stack of sets of bindings accumulated.
+ ///
+ /// In each set, `PatBoundCtx::Product` denotes that a found binding in it should
+ /// be interpreted as re-binding an already bound binding. This results in an error.
+ /// Meanwhile, `PatBound::Or` denotes that a found binding in the set should result
+ /// in reusing this binding rather than creating a fresh one.
+ ///
+ /// When called at the top level, the stack must have a single element
+ /// with `PatBound::Product`. Otherwise, pushing to the stack happens as
+ /// or-patterns (`p_0 | ... | p_n`) are encountered and the context needs
+ /// to be switched to `PatBoundCtx::Or` and then `PatBoundCtx::Product` for each `p_i`.
+ /// When each `p_i` has been dealt with, the top set is merged with its parent.
+ /// When a whole or-pattern has been dealt with, the thing happens.
+ ///
+ /// See the implementation and `fresh_binding` for more details.
+ fn resolve_pattern_inner(
+ &mut self,
+ pat: &Pat,
+ pat_src: PatternSource,
+ bindings: &mut SmallVec<[(PatBoundCtx, FxHashSet<Ident>); 1]>,
+ ) {
+ // Visit all direct subpatterns of this pattern.
+ pat.walk(&mut |pat| {
+ debug!("resolve_pattern pat={:?} node={:?}", pat, pat.kind);
+ match pat.kind {
+ PatKind::Ident(bmode, ident, ref sub) => {
+ // First try to resolve the identifier as some existing entity,
+ // then fall back to a fresh binding.
+ let has_sub = sub.is_some();
+ let res = self
+ .try_resolve_as_non_binding(pat_src, bmode, ident, has_sub)
+ .unwrap_or_else(|| self.fresh_binding(ident, pat.id, pat_src, bindings));
+ self.r.record_partial_res(pat.id, PartialRes::new(res));
+ self.r.record_pat_span(pat.id, pat.span);
+ }
+ PatKind::TupleStruct(ref qself, ref path, ref sub_patterns) => {
+ self.smart_resolve_path(
+ pat.id,
+ qself.as_ref(),
+ path,
+ PathSource::TupleStruct(
+ pat.span,
+ self.r.arenas.alloc_pattern_spans(sub_patterns.iter().map(|p| p.span)),
+ ),
+ );
+ }
+ PatKind::Path(ref qself, ref path) => {
+ self.smart_resolve_path(pat.id, qself.as_ref(), path, PathSource::Pat);
+ }
+ PatKind::Struct(ref qself, ref path, ..) => {
+ self.smart_resolve_path(pat.id, qself.as_ref(), path, PathSource::Struct);
+ }
+ PatKind::Or(ref ps) => {
+ // Add a new set of bindings to the stack. `Or` here records that when a
+ // binding already exists in this set, it should not result in an error because
+ // `V1(a) | V2(a)` must be allowed and are checked for consistency later.
+ bindings.push((PatBoundCtx::Or, Default::default()));
+ for p in ps {
+ // Now we need to switch back to a product context so that each
+ // part of the or-pattern internally rejects already bound names.
+ // For example, `V1(a) | V2(a, a)` and `V1(a, a) | V2(a)` are bad.
+ bindings.push((PatBoundCtx::Product, Default::default()));
+ self.resolve_pattern_inner(p, pat_src, bindings);
+ // Move up the non-overlapping bindings to the or-pattern.
+ // Existing bindings just get "merged".
+ let collected = bindings.pop().unwrap().1;
+ bindings.last_mut().unwrap().1.extend(collected);
+ }
+ // This or-pattern itself can itself be part of a product,
+ // e.g. `(V1(a) | V2(a), a)` or `(a, V1(a) | V2(a))`.
+ // Both cases bind `a` again in a product pattern and must be rejected.
+ let collected = bindings.pop().unwrap().1;
+ bindings.last_mut().unwrap().1.extend(collected);
+
+ // Prevent visiting `ps` as we've already done so above.
+ return false;
+ }
+ _ => {}
+ }
+ true
+ });
+ }
+
+ fn fresh_binding(
+ &mut self,
+ ident: Ident,
+ pat_id: NodeId,
+ pat_src: PatternSource,
+ bindings: &mut SmallVec<[(PatBoundCtx, FxHashSet<Ident>); 1]>,
+ ) -> Res {
+ // Add the binding to the local ribs, if it doesn't already exist in the bindings map.
+ // (We must not add it if it's in the bindings map because that breaks the assumptions
+ // later passes make about or-patterns.)
+ let ident = ident.normalize_to_macro_rules();
+
+ let mut bound_iter = bindings.iter().filter(|(_, set)| set.contains(&ident));
+ // Already bound in a product pattern? e.g. `(a, a)` which is not allowed.
+ let already_bound_and = bound_iter.clone().any(|(ctx, _)| *ctx == PatBoundCtx::Product);
+ // Already bound in an or-pattern? e.g. `V1(a) | V2(a)`.
+ // This is *required* for consistency which is checked later.
+ let already_bound_or = bound_iter.any(|(ctx, _)| *ctx == PatBoundCtx::Or);
+
+ if already_bound_and {
+ // Overlap in a product pattern somewhere; report an error.
+ use ResolutionError::*;
+ let error = match pat_src {
+ // `fn f(a: u8, a: u8)`:
+ PatternSource::FnParam => IdentifierBoundMoreThanOnceInParameterList,
+ // `Variant(a, a)`:
+ _ => IdentifierBoundMoreThanOnceInSamePattern,
+ };
+ self.report_error(ident.span, error(ident.name));
+ }
+
+ // Record as bound if it's valid:
+ let ident_valid = ident.name != kw::Empty;
+ if ident_valid {
+ bindings.last_mut().unwrap().1.insert(ident);
+ }
+
+ if already_bound_or {
+ // `Variant1(a) | Variant2(a)`, ok
+ // Reuse definition from the first `a`.
+ self.innermost_rib_bindings(ValueNS)[&ident]
+ } else {
+ let res = Res::Local(pat_id);
+ if ident_valid {
+ // A completely fresh binding add to the set if it's valid.
+ self.innermost_rib_bindings(ValueNS).insert(ident, res);
+ }
+ res
+ }
+ }
+
+ fn innermost_rib_bindings(&mut self, ns: Namespace) -> &mut IdentMap<Res> {
+ &mut self.ribs[ns].last_mut().unwrap().bindings
+ }
+
+ fn try_resolve_as_non_binding(
+ &mut self,
+ pat_src: PatternSource,
+ bm: BindingMode,
+ ident: Ident,
+ has_sub: bool,
+ ) -> Option<Res> {
+ // An immutable (no `mut`) by-value (no `ref`) binding pattern without
+ // a sub pattern (no `@ $pat`) is syntactically ambiguous as it could
+ // also be interpreted as a path to e.g. a constant, variant, etc.
+ let is_syntactic_ambiguity = !has_sub && bm == BindingMode::ByValue(Mutability::Not);
+
+ let ls_binding = self.maybe_resolve_ident_in_lexical_scope(ident, ValueNS)?;
+ let (res, binding) = match ls_binding {
+ LexicalScopeBinding::Item(binding)
+ if is_syntactic_ambiguity && binding.is_ambiguity() =>
+ {
+ // For ambiguous bindings we don't know all their definitions and cannot check
+ // whether they can be shadowed by fresh bindings or not, so force an error.
+ // issues/33118#issuecomment-233962221 (see below) still applies here,
+ // but we have to ignore it for backward compatibility.
+ self.r.record_use(ident, binding, false);
+ return None;
+ }
+ LexicalScopeBinding::Item(binding) => (binding.res(), Some(binding)),
+ LexicalScopeBinding::Res(res) => (res, None),
+ };
+
+ match res {
+ Res::SelfCtor(_) // See #70549.
+ | Res::Def(
+ DefKind::Ctor(_, CtorKind::Const) | DefKind::Const | DefKind::ConstParam,
+ _,
+ ) if is_syntactic_ambiguity => {
+ // Disambiguate in favor of a unit struct/variant or constant pattern.
+ if let Some(binding) = binding {
+ self.r.record_use(ident, binding, false);
+ }
+ Some(res)
+ }
+ Res::Def(DefKind::Ctor(..) | DefKind::Const | DefKind::Static(_), _) => {
+ // This is unambiguously a fresh binding, either syntactically
+ // (e.g., `IDENT @ PAT` or `ref IDENT`) or because `IDENT` resolves
+ // to something unusable as a pattern (e.g., constructor function),
+ // but we still conservatively report an error, see
+ // issues/33118#issuecomment-233962221 for one reason why.
+ let binding = binding.expect("no binding for a ctor or static");
+ self.report_error(
+ ident.span,
+ ResolutionError::BindingShadowsSomethingUnacceptable {
+ shadowing_binding: pat_src,
+ name: ident.name,
+ participle: if binding.is_import() { "imported" } else { "defined" },
+ article: binding.res().article(),
+ shadowed_binding: binding.res(),
+ shadowed_binding_span: binding.span,
+ },
+ );
+ None
+ }
+ Res::Def(DefKind::ConstParam, def_id) => {
+ // Same as for DefKind::Const above, but here, `binding` is `None`, so we
+ // have to construct the error differently
+ self.report_error(
+ ident.span,
+ ResolutionError::BindingShadowsSomethingUnacceptable {
+ shadowing_binding: pat_src,
+ name: ident.name,
+ participle: "defined",
+ article: res.article(),
+ shadowed_binding: res,
+ shadowed_binding_span: self.r.opt_span(def_id).expect("const parameter defined outside of local crate"),
+ }
+ );
+ None
+ }
+ Res::Def(DefKind::Fn, _) | Res::Local(..) | Res::Err => {
+ // These entities are explicitly allowed to be shadowed by fresh bindings.
+ None
+ }
+ Res::SelfCtor(_) => {
+ // We resolve `Self` in pattern position as an ident sometimes during recovery,
+ // so delay a bug instead of ICEing.
+ self.r.session.delay_span_bug(
+ ident.span,
+ "unexpected `SelfCtor` in pattern, expected identifier"
+ );
+ None
+ }
+ _ => span_bug!(
+ ident.span,
+ "unexpected resolution for an identifier in pattern: {:?}",
+ res,
+ ),
+ }
+ }
+
+ // High-level and context dependent path resolution routine.
+ // Resolves the path and records the resolution into definition map.
+ // If resolution fails tries several techniques to find likely
+ // resolution candidates, suggest imports or other help, and report
+ // errors in user friendly way.
+ fn smart_resolve_path(
+ &mut self,
+ id: NodeId,
+ qself: Option<&QSelf>,
+ path: &Path,
+ source: PathSource<'ast>,
+ ) {
+ self.smart_resolve_path_fragment(
+ qself,
+ &Segment::from_path(path),
+ source,
+ Finalize::new(id, path.span),
+ );
+ }
+
+ fn smart_resolve_path_fragment(
+ &mut self,
+ qself: Option<&QSelf>,
+ path: &[Segment],
+ source: PathSource<'ast>,
+ finalize: Finalize,
+ ) -> PartialRes {
+ tracing::debug!(
+ "smart_resolve_path_fragment(qself={:?}, path={:?}, finalize={:?})",
+ qself,
+ path,
+ finalize,
+ );
+ let ns = source.namespace();
+
+ let Finalize { node_id, path_span, .. } = finalize;
+ let report_errors = |this: &mut Self, res: Option<Res>| {
+ if this.should_report_errs() {
+ let (err, candidates) =
+ this.smart_resolve_report_errors(path, path_span, source, res);
+
+ let def_id = this.parent_scope.module.nearest_parent_mod();
+ let instead = res.is_some();
+ let suggestion =
+ if res.is_none() { this.report_missing_type_error(path) } else { None };
+
+ this.r.use_injections.push(UseError {
+ err,
+ candidates,
+ def_id,
+ instead,
+ suggestion,
+ path: path.into(),
+ });
+ }
+
+ PartialRes::new(Res::Err)
+ };
+
+ // For paths originating from calls (like in `HashMap::new()`), tries
+ // to enrich the plain `failed to resolve: ...` message with hints
+ // about possible missing imports.
+ //
+ // Similar thing, for types, happens in `report_errors` above.
+ let report_errors_for_call = |this: &mut Self, parent_err: Spanned<ResolutionError<'a>>| {
+ if !source.is_call() {
+ return Some(parent_err);
+ }
+
+ // Before we start looking for candidates, we have to get our hands
+ // on the type user is trying to perform invocation on; basically:
+ // we're transforming `HashMap::new` into just `HashMap`.
+ let path = match path.split_last() {
+ Some((_, path)) if !path.is_empty() => path,
+ _ => return Some(parent_err),
+ };
+
+ let (mut err, candidates) =
+ this.smart_resolve_report_errors(path, path_span, PathSource::Type, None);
+
+ if candidates.is_empty() {
+ err.cancel();
+ return Some(parent_err);
+ }
+
+ // There are two different error messages user might receive at
+ // this point:
+ // - E0412 cannot find type `{}` in this scope
+ // - E0433 failed to resolve: use of undeclared type or module `{}`
+ //
+ // The first one is emitted for paths in type-position, and the
+ // latter one - for paths in expression-position.
+ //
+ // Thus (since we're in expression-position at this point), not to
+ // confuse the user, we want to keep the *message* from E0432 (so
+ // `parent_err`), but we want *hints* from E0412 (so `err`).
+ //
+ // And that's what happens below - we're just mixing both messages
+ // into a single one.
+ let mut parent_err = this.r.into_struct_error(parent_err.span, parent_err.node);
+
+ err.message = take(&mut parent_err.message);
+ err.code = take(&mut parent_err.code);
+ err.children = take(&mut parent_err.children);
+
+ parent_err.cancel();
+
+ let def_id = this.parent_scope.module.nearest_parent_mod();
+
+ if this.should_report_errs() {
+ this.r.use_injections.push(UseError {
+ err,
+ candidates,
+ def_id,
+ instead: false,
+ suggestion: None,
+ path: path.into(),
+ });
+ } else {
+ err.cancel();
+ }
+
+ // We don't return `Some(parent_err)` here, because the error will
+ // be already printed as part of the `use` injections
+ None
+ };
+
+ let partial_res = match self.resolve_qpath_anywhere(
+ qself,
+ path,
+ ns,
+ path_span,
+ source.defer_to_typeck(),
+ finalize,
+ ) {
+ Ok(Some(partial_res)) if partial_res.unresolved_segments() == 0 => {
+ if source.is_expected(partial_res.base_res()) || partial_res.base_res() == Res::Err
+ {
+ partial_res
+ } else {
+ report_errors(self, Some(partial_res.base_res()))
+ }
+ }
+
+ Ok(Some(partial_res)) if source.defer_to_typeck() => {
+ // Not fully resolved associated item `T::A::B` or `<T as Tr>::A::B`
+ // or `<T>::A::B`. If `B` should be resolved in value namespace then
+ // it needs to be added to the trait map.
+ if ns == ValueNS {
+ let item_name = path.last().unwrap().ident;
+ let traits = self.traits_in_scope(item_name, ns);
+ self.r.trait_map.insert(node_id, traits);
+ }
+
+ if PrimTy::from_name(path[0].ident.name).is_some() {
+ let mut std_path = Vec::with_capacity(1 + path.len());
+
+ std_path.push(Segment::from_ident(Ident::with_dummy_span(sym::std)));
+ std_path.extend(path);
+ if let PathResult::Module(_) | PathResult::NonModule(_) =
+ self.resolve_path(&std_path, Some(ns), None)
+ {
+ // Check if we wrote `str::from_utf8` instead of `std::str::from_utf8`
+ let item_span =
+ path.iter().last().map_or(path_span, |segment| segment.ident.span);
+
+ self.r.confused_type_with_std_module.insert(item_span, path_span);
+ self.r.confused_type_with_std_module.insert(path_span, path_span);
+ }
+ }
+
+ partial_res
+ }
+
+ Err(err) => {
+ if let Some(err) = report_errors_for_call(self, err) {
+ self.report_error(err.span, err.node);
+ }
+
+ PartialRes::new(Res::Err)
+ }
+
+ _ => report_errors(self, None),
+ };
+
+ if !matches!(source, PathSource::TraitItem(..)) {
+ // Avoid recording definition of `A::B` in `<T as A>::B::C`.
+ self.r.record_partial_res(node_id, partial_res);
+ self.resolve_elided_lifetimes_in_path(node_id, partial_res, path, source, path_span);
+ }
+
+ partial_res
+ }
+
+ fn self_type_is_available(&mut self) -> bool {
+ let binding = self
+ .maybe_resolve_ident_in_lexical_scope(Ident::with_dummy_span(kw::SelfUpper), TypeNS);
+ if let Some(LexicalScopeBinding::Res(res)) = binding { res != Res::Err } else { false }
+ }
+
+ fn self_value_is_available(&mut self, self_span: Span) -> bool {
+ let ident = Ident::new(kw::SelfLower, self_span);
+ let binding = self.maybe_resolve_ident_in_lexical_scope(ident, ValueNS);
+ if let Some(LexicalScopeBinding::Res(res)) = binding { res != Res::Err } else { false }
+ }
+
+ /// A wrapper around [`Resolver::report_error`].
+ ///
+ /// This doesn't emit errors for function bodies if this is rustdoc.
+ fn report_error(&mut self, span: Span, resolution_error: ResolutionError<'a>) {
+ if self.should_report_errs() {
+ self.r.report_error(span, resolution_error);
+ }
+ }
+
+ #[inline]
+ /// If we're actually rustdoc then avoid giving a name resolution error for `cfg()` items.
+ fn should_report_errs(&self) -> bool {
+ !(self.r.session.opts.actually_rustdoc && self.in_func_body)
+ }
+
+ // Resolve in alternative namespaces if resolution in the primary namespace fails.
+ fn resolve_qpath_anywhere(
+ &mut self,
+ qself: Option<&QSelf>,
+ path: &[Segment],
+ primary_ns: Namespace,
+ span: Span,
+ defer_to_typeck: bool,
+ finalize: Finalize,
+ ) -> Result<Option<PartialRes>, Spanned<ResolutionError<'a>>> {
+ let mut fin_res = None;
+
+ for (i, &ns) in [primary_ns, TypeNS, ValueNS].iter().enumerate() {
+ if i == 0 || ns != primary_ns {
+ match self.resolve_qpath(qself, path, ns, finalize)? {
+ Some(partial_res)
+ if partial_res.unresolved_segments() == 0 || defer_to_typeck =>
+ {
+ return Ok(Some(partial_res));
+ }
+ partial_res => {
+ if fin_res.is_none() {
+ fin_res = partial_res;
+ }
+ }
+ }
+ }
+ }
+
+ assert!(primary_ns != MacroNS);
+
+ if qself.is_none() {
+ let path_seg = |seg: &Segment| PathSegment::from_ident(seg.ident);
+ let path = Path { segments: path.iter().map(path_seg).collect(), span, tokens: None };
+ if let Ok((_, res)) =
+ self.r.resolve_macro_path(&path, None, &self.parent_scope, false, false)
+ {
+ return Ok(Some(PartialRes::new(res)));
+ }
+ }
+
+ Ok(fin_res)
+ }
+
+ /// Handles paths that may refer to associated items.
+ fn resolve_qpath(
+ &mut self,
+ qself: Option<&QSelf>,
+ path: &[Segment],
+ ns: Namespace,
+ finalize: Finalize,
+ ) -> Result<Option<PartialRes>, Spanned<ResolutionError<'a>>> {
+ debug!(
+ "resolve_qpath(qself={:?}, path={:?}, ns={:?}, finalize={:?})",
+ qself, path, ns, finalize,
+ );
+
+ if let Some(qself) = qself {
+ if qself.position == 0 {
+ // This is a case like `<T>::B`, where there is no
+ // trait to resolve. In that case, we leave the `B`
+ // segment to be resolved by type-check.
+ return Ok(Some(PartialRes::with_unresolved_segments(
+ Res::Def(DefKind::Mod, CRATE_DEF_ID.to_def_id()),
+ path.len(),
+ )));
+ }
+
+ // Make sure `A::B` in `<T as A::B>::C` is a trait item.
+ //
+ // Currently, `path` names the full item (`A::B::C`, in
+ // our example). so we extract the prefix of that that is
+ // the trait (the slice upto and including
+ // `qself.position`). And then we recursively resolve that,
+ // but with `qself` set to `None`.
+ let ns = if qself.position + 1 == path.len() { ns } else { TypeNS };
+ let partial_res = self.smart_resolve_path_fragment(
+ None,
+ &path[..=qself.position],
+ PathSource::TraitItem(ns),
+ Finalize::with_root_span(finalize.node_id, finalize.path_span, qself.path_span),
+ );
+
+ // The remaining segments (the `C` in our example) will
+ // have to be resolved by type-check, since that requires doing
+ // trait resolution.
+ return Ok(Some(PartialRes::with_unresolved_segments(
+ partial_res.base_res(),
+ partial_res.unresolved_segments() + path.len() - qself.position - 1,
+ )));
+ }
+
+ let result = match self.resolve_path(&path, Some(ns), Some(finalize)) {
+ PathResult::NonModule(path_res) => path_res,
+ PathResult::Module(ModuleOrUniformRoot::Module(module)) if !module.is_normal() => {
+ PartialRes::new(module.res().unwrap())
+ }
+ // In `a(::assoc_item)*` `a` cannot be a module. If `a` does resolve to a module we
+ // don't report an error right away, but try to fallback to a primitive type.
+ // So, we are still able to successfully resolve something like
+ //
+ // use std::u8; // bring module u8 in scope
+ // fn f() -> u8 { // OK, resolves to primitive u8, not to std::u8
+ // u8::max_value() // OK, resolves to associated function <u8>::max_value,
+ // // not to non-existent std::u8::max_value
+ // }
+ //
+ // Such behavior is required for backward compatibility.
+ // The same fallback is used when `a` resolves to nothing.
+ PathResult::Module(ModuleOrUniformRoot::Module(_)) | PathResult::Failed { .. }
+ if (ns == TypeNS || path.len() > 1)
+ && PrimTy::from_name(path[0].ident.name).is_some() =>
+ {
+ let prim = PrimTy::from_name(path[0].ident.name).unwrap();
+ PartialRes::with_unresolved_segments(Res::PrimTy(prim), path.len() - 1)
+ }
+ PathResult::Module(ModuleOrUniformRoot::Module(module)) => {
+ PartialRes::new(module.res().unwrap())
+ }
+ PathResult::Failed { is_error_from_last_segment: false, span, label, suggestion } => {
+ return Err(respan(span, ResolutionError::FailedToResolve { label, suggestion }));
+ }
+ PathResult::Module(..) | PathResult::Failed { .. } => return Ok(None),
+ PathResult::Indeterminate => bug!("indeterminate path result in resolve_qpath"),
+ };
+
+ if path.len() > 1
+ && result.base_res() != Res::Err
+ && path[0].ident.name != kw::PathRoot
+ && path[0].ident.name != kw::DollarCrate
+ {
+ let unqualified_result = {
+ match self.resolve_path(&[*path.last().unwrap()], Some(ns), None) {
+ PathResult::NonModule(path_res) => path_res.base_res(),
+ PathResult::Module(ModuleOrUniformRoot::Module(module)) => {
+ module.res().unwrap()
+ }
+ _ => return Ok(Some(result)),
+ }
+ };
+ if result.base_res() == unqualified_result {
+ let lint = lint::builtin::UNUSED_QUALIFICATIONS;
+ self.r.lint_buffer.buffer_lint(
+ lint,
+ finalize.node_id,
+ finalize.path_span,
+ "unnecessary qualification",
+ )
+ }
+ }
+
+ Ok(Some(result))
+ }
+
+ fn with_resolved_label(&mut self, label: Option<Label>, id: NodeId, f: impl FnOnce(&mut Self)) {
+ if let Some(label) = label {
+ if label.ident.as_str().as_bytes()[1] != b'_' {
+ self.diagnostic_metadata.unused_labels.insert(id, label.ident.span);
+ }
+
+ if let Ok((_, orig_span)) = self.resolve_label(label.ident) {
+ diagnostics::signal_label_shadowing(self.r.session, orig_span, label.ident)
+ }
+
+ self.with_label_rib(NormalRibKind, |this| {
+ let ident = label.ident.normalize_to_macro_rules();
+ this.label_ribs.last_mut().unwrap().bindings.insert(ident, id);
+ f(this);
+ });
+ } else {
+ f(self);
+ }
+ }
+
+ fn resolve_labeled_block(&mut self, label: Option<Label>, id: NodeId, block: &'ast Block) {
+ self.with_resolved_label(label, id, |this| this.visit_block(block));
+ }
+
+ fn resolve_block(&mut self, block: &'ast Block) {
+ debug!("(resolving block) entering block");
+ // Move down in the graph, if there's an anonymous module rooted here.
+ let orig_module = self.parent_scope.module;
+ let anonymous_module = self.r.block_map.get(&block.id).cloned(); // clones a reference
+
+ let mut num_macro_definition_ribs = 0;
+ if let Some(anonymous_module) = anonymous_module {
+ debug!("(resolving block) found anonymous module, moving down");
+ self.ribs[ValueNS].push(Rib::new(ModuleRibKind(anonymous_module)));
+ self.ribs[TypeNS].push(Rib::new(ModuleRibKind(anonymous_module)));
+ self.parent_scope.module = anonymous_module;
+ } else {
+ self.ribs[ValueNS].push(Rib::new(NormalRibKind));
+ }
+
+ let prev = self.diagnostic_metadata.current_block_could_be_bare_struct_literal.take();
+ if let (true, [Stmt { kind: StmtKind::Expr(expr), .. }]) =
+ (block.could_be_bare_literal, &block.stmts[..])
+ && let ExprKind::Type(..) = expr.kind
+ {
+ self.diagnostic_metadata.current_block_could_be_bare_struct_literal =
+ Some(block.span);
+ }
+ // Descend into the block.
+ for stmt in &block.stmts {
+ if let StmtKind::Item(ref item) = stmt.kind
+ && let ItemKind::MacroDef(..) = item.kind {
+ num_macro_definition_ribs += 1;
+ let res = self.r.local_def_id(item.id).to_def_id();
+ self.ribs[ValueNS].push(Rib::new(MacroDefinition(res)));
+ self.label_ribs.push(Rib::new(MacroDefinition(res)));
+ }
+
+ self.visit_stmt(stmt);
+ }
+ self.diagnostic_metadata.current_block_could_be_bare_struct_literal = prev;
+
+ // Move back up.
+ self.parent_scope.module = orig_module;
+ for _ in 0..num_macro_definition_ribs {
+ self.ribs[ValueNS].pop();
+ self.label_ribs.pop();
+ }
+ self.ribs[ValueNS].pop();
+ if anonymous_module.is_some() {
+ self.ribs[TypeNS].pop();
+ }
+ debug!("(resolving block) leaving block");
+ }
+
+ fn resolve_anon_const(&mut self, constant: &'ast AnonConst, is_repeat: IsRepeatExpr) {
+ debug!("resolve_anon_const {:?} is_repeat: {:?}", constant, is_repeat);
+ self.with_constant_rib(
+ is_repeat,
+ if constant.value.is_potential_trivial_const_param() {
+ HasGenericParams::Yes
+ } else {
+ HasGenericParams::No
+ },
+ None,
+ |this| visit::walk_anon_const(this, constant),
+ );
+ }
+
+ fn resolve_inline_const(&mut self, constant: &'ast AnonConst) {
+ debug!("resolve_anon_const {constant:?}");
+ self.with_constant_rib(IsRepeatExpr::No, HasGenericParams::Yes, None, |this| {
+ visit::walk_anon_const(this, constant);
+ });
+ }
+
+ fn resolve_expr(&mut self, expr: &'ast Expr, parent: Option<&'ast Expr>) {
+ // First, record candidate traits for this expression if it could
+ // result in the invocation of a method call.
+
+ self.record_candidate_traits_for_expr_if_necessary(expr);
+
+ // Next, resolve the node.
+ match expr.kind {
+ ExprKind::Path(ref qself, ref path) => {
+ self.smart_resolve_path(expr.id, qself.as_ref(), path, PathSource::Expr(parent));
+ visit::walk_expr(self, expr);
+ }
+
+ ExprKind::Struct(ref se) => {
+ self.smart_resolve_path(expr.id, se.qself.as_ref(), &se.path, PathSource::Struct);
+ visit::walk_expr(self, expr);
+ }
+
+ ExprKind::Break(Some(label), _) | ExprKind::Continue(Some(label)) => {
+ match self.resolve_label(label.ident) {
+ Ok((node_id, _)) => {
+ // Since this res is a label, it is never read.
+ self.r.label_res_map.insert(expr.id, node_id);
+ self.diagnostic_metadata.unused_labels.remove(&node_id);
+ }
+ Err(error) => {
+ self.report_error(label.ident.span, error);
+ }
+ }
+
+ // visit `break` argument if any
+ visit::walk_expr(self, expr);
+ }
+
+ ExprKind::Break(None, Some(ref e)) => {
+ // We use this instead of `visit::walk_expr` to keep the parent expr around for
+ // better diagnostics.
+ self.resolve_expr(e, Some(&expr));
+ }
+
+ ExprKind::Let(ref pat, ref scrutinee, _) => {
+ self.visit_expr(scrutinee);
+ self.resolve_pattern_top(pat, PatternSource::Let);
+ }
+
+ ExprKind::If(ref cond, ref then, ref opt_else) => {
+ self.with_rib(ValueNS, NormalRibKind, |this| {
+ let old = this.diagnostic_metadata.in_if_condition.replace(cond);
+ this.visit_expr(cond);
+ this.diagnostic_metadata.in_if_condition = old;
+ this.visit_block(then);
+ });
+ if let Some(expr) = opt_else {
+ self.visit_expr(expr);
+ }
+ }
+
+ ExprKind::Loop(ref block, label) => self.resolve_labeled_block(label, expr.id, &block),
+
+ ExprKind::While(ref cond, ref block, label) => {
+ self.with_resolved_label(label, expr.id, |this| {
+ this.with_rib(ValueNS, NormalRibKind, |this| {
+ let old = this.diagnostic_metadata.in_if_condition.replace(cond);
+ this.visit_expr(cond);
+ this.diagnostic_metadata.in_if_condition = old;
+ this.visit_block(block);
+ })
+ });
+ }
+
+ ExprKind::ForLoop(ref pat, ref iter_expr, ref block, label) => {
+ self.visit_expr(iter_expr);
+ self.with_rib(ValueNS, NormalRibKind, |this| {
+ this.resolve_pattern_top(pat, PatternSource::For);
+ this.resolve_labeled_block(label, expr.id, block);
+ });
+ }
+
+ ExprKind::Block(ref block, label) => self.resolve_labeled_block(label, block.id, block),
+
+ // Equivalent to `visit::walk_expr` + passing some context to children.
+ ExprKind::Field(ref subexpression, _) => {
+ self.resolve_expr(subexpression, Some(expr));
+ }
+ ExprKind::MethodCall(ref segment, ref arguments, _) => {
+ let mut arguments = arguments.iter();
+ self.resolve_expr(arguments.next().unwrap(), Some(expr));
+ for argument in arguments {
+ self.resolve_expr(argument, None);
+ }
+ self.visit_path_segment(expr.span, segment);
+ }
+
+ ExprKind::Call(ref callee, ref arguments) => {
+ self.resolve_expr(callee, Some(expr));
+ let const_args = self.r.legacy_const_generic_args(callee).unwrap_or_default();
+ for (idx, argument) in arguments.iter().enumerate() {
+ // Constant arguments need to be treated as AnonConst since
+ // that is how they will be later lowered to HIR.
+ if const_args.contains(&idx) {
+ self.with_constant_rib(
+ IsRepeatExpr::No,
+ if argument.is_potential_trivial_const_param() {
+ HasGenericParams::Yes
+ } else {
+ HasGenericParams::No
+ },
+ None,
+ |this| {
+ this.resolve_expr(argument, None);
+ },
+ );
+ } else {
+ self.resolve_expr(argument, None);
+ }
+ }
+ }
+ ExprKind::Type(ref type_expr, ref ty) => {
+ // `ParseSess::type_ascription_path_suggestions` keeps spans of colon tokens in
+ // type ascription. Here we are trying to retrieve the span of the colon token as
+ // well, but only if it's written without spaces `expr:Ty` and therefore confusable
+ // with `expr::Ty`, only in this case it will match the span from
+ // `type_ascription_path_suggestions`.
+ self.diagnostic_metadata
+ .current_type_ascription
+ .push(type_expr.span.between(ty.span));
+ visit::walk_expr(self, expr);
+ self.diagnostic_metadata.current_type_ascription.pop();
+ }
+ // `async |x| ...` gets desugared to `|x| future_from_generator(|| ...)`, so we need to
+ // resolve the arguments within the proper scopes so that usages of them inside the
+ // closure are detected as upvars rather than normal closure arg usages.
+ ExprKind::Closure(_, _, Async::Yes { .. }, _, ref fn_decl, ref body, _span) => {
+ self.with_rib(ValueNS, NormalRibKind, |this| {
+ this.with_label_rib(ClosureOrAsyncRibKind, |this| {
+ // Resolve arguments:
+ this.resolve_params(&fn_decl.inputs);
+ // No need to resolve return type --
+ // the outer closure return type is `FnRetTy::Default`.
+
+ // Now resolve the inner closure
+ {
+ // No need to resolve arguments: the inner closure has none.
+ // Resolve the return type:
+ visit::walk_fn_ret_ty(this, &fn_decl.output);
+ // Resolve the body
+ this.visit_expr(body);
+ }
+ })
+ });
+ }
+ // For closures, ClosureOrAsyncRibKind is added in visit_fn
+ ExprKind::Closure(ClosureBinder::For { ref generic_params, span }, ..) => {
+ self.with_generic_param_rib(
+ &generic_params,
+ NormalRibKind,
+ LifetimeRibKind::Generics {
+ binder: expr.id,
+ kind: LifetimeBinderKind::Closure,
+ span,
+ },
+ |this| visit::walk_expr(this, expr),
+ );
+ }
+ ExprKind::Closure(..) => visit::walk_expr(self, expr),
+ ExprKind::Async(..) => {
+ self.with_label_rib(ClosureOrAsyncRibKind, |this| visit::walk_expr(this, expr));
+ }
+ ExprKind::Repeat(ref elem, ref ct) => {
+ self.visit_expr(elem);
+ self.with_lifetime_rib(LifetimeRibKind::AnonConst, |this| {
+ this.with_lifetime_rib(LifetimeRibKind::Elided(LifetimeRes::Static), |this| {
+ this.resolve_anon_const(ct, IsRepeatExpr::Yes)
+ })
+ });
+ }
+ ExprKind::ConstBlock(ref ct) => {
+ self.resolve_inline_const(ct);
+ }
+ ExprKind::Index(ref elem, ref idx) => {
+ self.resolve_expr(elem, Some(expr));
+ self.visit_expr(idx);
+ }
+ _ => {
+ visit::walk_expr(self, expr);
+ }
+ }
+ }
+
+ fn record_candidate_traits_for_expr_if_necessary(&mut self, expr: &'ast Expr) {
+ match expr.kind {
+ ExprKind::Field(_, ident) => {
+ // FIXME(#6890): Even though you can't treat a method like a
+ // field, we need to add any trait methods we find that match
+ // the field name so that we can do some nice error reporting
+ // later on in typeck.
+ let traits = self.traits_in_scope(ident, ValueNS);
+ self.r.trait_map.insert(expr.id, traits);
+ }
+ ExprKind::MethodCall(ref segment, ..) => {
+ debug!("(recording candidate traits for expr) recording traits for {}", expr.id);
+ let traits = self.traits_in_scope(segment.ident, ValueNS);
+ self.r.trait_map.insert(expr.id, traits);
+ }
+ _ => {
+ // Nothing to do.
+ }
+ }
+ }
+
+ fn traits_in_scope(&mut self, ident: Ident, ns: Namespace) -> Vec<TraitCandidate> {
+ self.r.traits_in_scope(
+ self.current_trait_ref.as_ref().map(|(module, _)| *module),
+ &self.parent_scope,
+ ident.span.ctxt(),
+ Some((ident.name, ns)),
+ )
+ }
+}
+
+struct LifetimeCountVisitor<'a, 'b> {
+ r: &'b mut Resolver<'a>,
+}
+
+/// Walks the whole crate in DFS order, visiting each item, counting the declared number of
+/// lifetime generic parameters.
+impl<'ast> Visitor<'ast> for LifetimeCountVisitor<'_, '_> {
+ fn visit_item(&mut self, item: &'ast Item) {
+ match &item.kind {
+ ItemKind::TyAlias(box TyAlias { ref generics, .. })
+ | ItemKind::Fn(box Fn { ref generics, .. })
+ | ItemKind::Enum(_, ref generics)
+ | ItemKind::Struct(_, ref generics)
+ | ItemKind::Union(_, ref generics)
+ | ItemKind::Impl(box Impl { ref generics, .. })
+ | ItemKind::Trait(box Trait { ref generics, .. })
+ | ItemKind::TraitAlias(ref generics, _) => {
+ let def_id = self.r.local_def_id(item.id);
+ let count = generics
+ .params
+ .iter()
+ .filter(|param| matches!(param.kind, ast::GenericParamKind::Lifetime { .. }))
+ .count();
+ self.r.item_generics_num_lifetimes.insert(def_id, count);
+ }
+
+ ItemKind::Mod(..)
+ | ItemKind::ForeignMod(..)
+ | ItemKind::Static(..)
+ | ItemKind::Const(..)
+ | ItemKind::Use(..)
+ | ItemKind::ExternCrate(..)
+ | ItemKind::MacroDef(..)
+ | ItemKind::GlobalAsm(..)
+ | ItemKind::MacCall(..) => {}
+ }
+ visit::walk_item(self, item)
+ }
+}
+
+impl<'a> Resolver<'a> {
+ pub(crate) fn late_resolve_crate(&mut self, krate: &Crate) {
+ visit::walk_crate(&mut LifetimeCountVisitor { r: self }, krate);
+ let mut late_resolution_visitor = LateResolutionVisitor::new(self);
+ visit::walk_crate(&mut late_resolution_visitor, krate);
+ for (id, span) in late_resolution_visitor.diagnostic_metadata.unused_labels.iter() {
+ self.lint_buffer.buffer_lint(lint::builtin::UNUSED_LABELS, *id, *span, "unused label");
+ }
+ }
+}
diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs
new file mode 100644
index 000000000..2b1f2b88e
--- /dev/null
+++ b/compiler/rustc_resolve/src/late/diagnostics.rs
@@ -0,0 +1,2369 @@
+use crate::diagnostics::{ImportSuggestion, LabelSuggestion, TypoSuggestion};
+use crate::late::{AliasPossibility, LateResolutionVisitor, RibKind};
+use crate::late::{LifetimeBinderKind, LifetimeRes, LifetimeRibKind, LifetimeUseSet};
+use crate::path_names_to_string;
+use crate::{Module, ModuleKind, ModuleOrUniformRoot};
+use crate::{PathResult, PathSource, Segment};
+
+use rustc_ast::visit::{FnCtxt, FnKind, LifetimeCtxt};
+use rustc_ast::{
+ self as ast, AssocItemKind, Expr, ExprKind, GenericParam, GenericParamKind, Item, ItemKind,
+ NodeId, Path, Ty, TyKind, DUMMY_NODE_ID,
+};
+use rustc_ast_pretty::pprust::path_segment_to_string;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+ MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::Namespace::{self, *};
+use rustc_hir::def::{self, CtorKind, CtorOf, DefKind};
+use rustc_hir::def_id::{DefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::PrimTy;
+use rustc_session::lint;
+use rustc_session::parse::feature_err;
+use rustc_session::Session;
+use rustc_span::edition::Edition;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{BytePos, Span};
+
+use std::iter;
+use std::ops::Deref;
+
+use tracing::debug;
+
+type Res = def::Res<ast::NodeId>;
+
+/// A field or associated item from self type suggested in case of resolution failure.
+enum AssocSuggestion {
+ Field,
+ MethodWithSelf,
+ AssocFn,
+ AssocType,
+ AssocConst,
+}
+
+impl AssocSuggestion {
+ fn action(&self) -> &'static str {
+ match self {
+ AssocSuggestion::Field => "use the available field",
+ AssocSuggestion::MethodWithSelf => "call the method with the fully-qualified path",
+ AssocSuggestion::AssocFn => "call the associated function",
+ AssocSuggestion::AssocConst => "use the associated `const`",
+ AssocSuggestion::AssocType => "use the associated type",
+ }
+ }
+}
+
+fn is_self_type(path: &[Segment], namespace: Namespace) -> bool {
+ namespace == TypeNS && path.len() == 1 && path[0].ident.name == kw::SelfUpper
+}
+
+fn is_self_value(path: &[Segment], namespace: Namespace) -> bool {
+ namespace == ValueNS && path.len() == 1 && path[0].ident.name == kw::SelfLower
+}
+
+/// Gets the stringified path for an enum from an `ImportSuggestion` for an enum variant.
+fn import_candidate_to_enum_paths(suggestion: &ImportSuggestion) -> (String, String) {
+ let variant_path = &suggestion.path;
+ let variant_path_string = path_names_to_string(variant_path);
+
+ let path_len = suggestion.path.segments.len();
+ let enum_path = ast::Path {
+ span: suggestion.path.span,
+ segments: suggestion.path.segments[0..path_len - 1].to_vec(),
+ tokens: None,
+ };
+ let enum_path_string = path_names_to_string(&enum_path);
+
+ (variant_path_string, enum_path_string)
+}
+
+/// Description of an elided lifetime.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
+pub(super) struct MissingLifetime {
+ /// Used to overwrite the resolution with the suggestion, to avoid cascasing errors.
+ pub id: NodeId,
+ /// Where to suggest adding the lifetime.
+ pub span: Span,
+ /// How the lifetime was introduced, to have the correct space and comma.
+ pub kind: MissingLifetimeKind,
+ /// Number of elided lifetimes, used for elision in path.
+ pub count: usize,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug)]
+pub(super) enum MissingLifetimeKind {
+ /// An explicit `'_`.
+ Underscore,
+ /// An elided lifetime `&' ty`.
+ Ampersand,
+ /// An elided lifetime in brackets with written brackets.
+ Comma,
+ /// An elided lifetime with elided brackets.
+ Brackets,
+}
+
+/// Description of the lifetimes appearing in a function parameter.
+/// This is used to provide a literal explanation to the elision failure.
+#[derive(Clone, Debug)]
+pub(super) struct ElisionFnParameter {
+ /// The index of the argument in the original definition.
+ pub index: usize,
+ /// The name of the argument if it's a simple ident.
+ pub ident: Option<Ident>,
+ /// The number of lifetimes in the parameter.
+ pub lifetime_count: usize,
+ /// The span of the parameter.
+ pub span: Span,
+}
+
+/// Description of lifetimes that appear as candidates for elision.
+/// This is used to suggest introducing an explicit lifetime.
+#[derive(Debug)]
+pub(super) enum LifetimeElisionCandidate {
+ /// This is not a real lifetime.
+ Ignore,
+ /// There is a named lifetime, we won't suggest anything.
+ Named,
+ Missing(MissingLifetime),
+}
+
+impl<'a: 'ast, 'ast> LateResolutionVisitor<'a, '_, 'ast> {
+ fn def_span(&self, def_id: DefId) -> Option<Span> {
+ match def_id.krate {
+ LOCAL_CRATE => self.r.opt_span(def_id),
+ _ => Some(self.r.cstore().get_span_untracked(def_id, self.r.session)),
+ }
+ }
+
+ /// Handles error reporting for `smart_resolve_path_fragment` function.
+ /// Creates base error and amends it with one short label and possibly some longer helps/notes.
+ pub(crate) fn smart_resolve_report_errors(
+ &mut self,
+ path: &[Segment],
+ span: Span,
+ source: PathSource<'_>,
+ res: Option<Res>,
+ ) -> (DiagnosticBuilder<'a, ErrorGuaranteed>, Vec<ImportSuggestion>) {
+ let ident_span = path.last().map_or(span, |ident| ident.ident.span);
+ let ns = source.namespace();
+ let is_expected = &|res| source.is_expected(res);
+ let is_enum_variant = &|res| matches!(res, Res::Def(DefKind::Variant, _));
+
+ debug!(?res, ?source);
+
+ // Make the base error.
+ struct BaseError<'a> {
+ msg: String,
+ fallback_label: String,
+ span: Span,
+ could_be_expr: bool,
+ suggestion: Option<(Span, &'a str, String)>,
+ }
+ let mut expected = source.descr_expected();
+ let path_str = Segment::names_to_string(path);
+ let item_str = path.last().unwrap().ident;
+ let base_error = if let Some(res) = res {
+ BaseError {
+ msg: format!("expected {}, found {} `{}`", expected, res.descr(), path_str),
+ fallback_label: format!("not a {expected}"),
+ span,
+ could_be_expr: match res {
+ Res::Def(DefKind::Fn, _) => {
+ // Verify whether this is a fn call or an Fn used as a type.
+ self.r
+ .session
+ .source_map()
+ .span_to_snippet(span)
+ .map(|snippet| snippet.ends_with(')'))
+ .unwrap_or(false)
+ }
+ Res::Def(
+ DefKind::Ctor(..) | DefKind::AssocFn | DefKind::Const | DefKind::AssocConst,
+ _,
+ )
+ | Res::SelfCtor(_)
+ | Res::PrimTy(_)
+ | Res::Local(_) => true,
+ _ => false,
+ },
+ suggestion: None,
+ }
+ } else {
+ let item_span = path.last().unwrap().ident.span;
+ let (mod_prefix, mod_str, suggestion) = if path.len() == 1 {
+ debug!(?self.diagnostic_metadata.current_impl_items);
+ debug!(?self.diagnostic_metadata.current_function);
+ let suggestion = if let Some(items) = self.diagnostic_metadata.current_impl_items
+ && let Some((fn_kind, _)) = self.diagnostic_metadata.current_function
+ && self.current_trait_ref.is_none()
+ && let Some(FnCtxt::Assoc(_)) = fn_kind.ctxt()
+ && let Some(item) = items.iter().find(|i| {
+ if let AssocItemKind::Fn(fn_) = &i.kind
+ && !fn_.sig.decl.has_self()
+ && i.ident.name == item_str.name
+ {
+ debug!(?item_str.name);
+ debug!(?fn_.sig.decl.inputs);
+ return true
+ }
+ false
+ })
+ {
+ Some((
+ item_span,
+ "consider using the associated function",
+ format!("Self::{}", item.ident)
+ ))
+ } else {
+ None
+ };
+ (String::new(), "this scope".to_string(), suggestion)
+ } else if path.len() == 2 && path[0].ident.name == kw::PathRoot {
+ if self.r.session.edition() > Edition::Edition2015 {
+ // In edition 2018 onwards, the `::foo` syntax may only pull from the extern prelude
+ // which overrides all other expectations of item type
+ expected = "crate";
+ (String::new(), "the list of imported crates".to_string(), None)
+ } else {
+ (String::new(), "the crate root".to_string(), None)
+ }
+ } else if path.len() == 2 && path[0].ident.name == kw::Crate {
+ (String::new(), "the crate root".to_string(), None)
+ } else {
+ let mod_path = &path[..path.len() - 1];
+ let mod_prefix = match self.resolve_path(mod_path, Some(TypeNS), None) {
+ PathResult::Module(ModuleOrUniformRoot::Module(module)) => module.res(),
+ _ => None,
+ }
+ .map_or_else(String::new, |res| format!("{} ", res.descr()));
+ (mod_prefix, format!("`{}`", Segment::names_to_string(mod_path)), None)
+ };
+ BaseError {
+ msg: format!("cannot find {expected} `{item_str}` in {mod_prefix}{mod_str}"),
+ fallback_label: if path_str == "async" && expected.starts_with("struct") {
+ "`async` blocks are only allowed in Rust 2018 or later".to_string()
+ } else {
+ format!("not found in {mod_str}")
+ },
+ span: item_span,
+ could_be_expr: false,
+ suggestion,
+ }
+ };
+
+ let code = source.error_code(res.is_some());
+ let mut err =
+ self.r.session.struct_span_err_with_code(base_error.span, &base_error.msg, code);
+
+ self.suggest_swapping_misplaced_self_ty_and_trait(&mut err, source, res, base_error.span);
+
+ if let Some(sugg) = base_error.suggestion {
+ err.span_suggestion_verbose(sugg.0, sugg.1, sugg.2, Applicability::MaybeIncorrect);
+ }
+
+ if let Some(span) = self.diagnostic_metadata.current_block_could_be_bare_struct_literal {
+ err.multipart_suggestion(
+ "you might have meant to write a `struct` literal",
+ vec![
+ (span.shrink_to_lo(), "{ SomeStruct ".to_string()),
+ (span.shrink_to_hi(), "}".to_string()),
+ ],
+ Applicability::HasPlaceholders,
+ );
+ }
+ match (source, self.diagnostic_metadata.in_if_condition) {
+ (
+ PathSource::Expr(_),
+ Some(Expr { span: expr_span, kind: ExprKind::Assign(lhs, _, _), .. }),
+ ) => {
+ // Icky heuristic so we don't suggest:
+ // `if (i + 2) = 2` => `if let (i + 2) = 2` (approximately pattern)
+ // `if 2 = i` => `if let 2 = i` (lhs needs to contain error span)
+ if lhs.is_approximately_pattern() && lhs.span.contains(span) {
+ err.span_suggestion_verbose(
+ expr_span.shrink_to_lo(),
+ "you might have meant to use pattern matching",
+ "let ",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ _ => {}
+ }
+
+ let is_assoc_fn = self.self_type_is_available();
+ // Emit help message for fake-self from other languages (e.g., `this` in Javascript).
+ if ["this", "my"].contains(&item_str.as_str()) && is_assoc_fn {
+ err.span_suggestion_short(
+ span,
+ "you might have meant to use `self` here instead",
+ "self",
+ Applicability::MaybeIncorrect,
+ );
+ if !self.self_value_is_available(path[0].ident.span) {
+ if let Some((FnKind::Fn(_, _, sig, ..), fn_span)) =
+ &self.diagnostic_metadata.current_function
+ {
+ let (span, sugg) = if let Some(param) = sig.decl.inputs.get(0) {
+ (param.span.shrink_to_lo(), "&self, ")
+ } else {
+ (
+ self.r
+ .session
+ .source_map()
+ .span_through_char(*fn_span, '(')
+ .shrink_to_hi(),
+ "&self",
+ )
+ };
+ err.span_suggestion_verbose(
+ span,
+ "if you meant to use `self`, you are also missing a `self` receiver \
+ argument",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ self.detect_assoct_type_constraint_meant_as_path(base_error.span, &mut err);
+
+ // Emit special messages for unresolved `Self` and `self`.
+ if is_self_type(path, ns) {
+ err.code(rustc_errors::error_code!(E0411));
+ err.span_label(
+ span,
+ "`Self` is only available in impls, traits, and type definitions".to_string(),
+ );
+ if let Some(item_kind) = self.diagnostic_metadata.current_item {
+ err.span_label(
+ item_kind.ident.span,
+ format!(
+ "`Self` not allowed in {} {}",
+ item_kind.kind.article(),
+ item_kind.kind.descr()
+ ),
+ );
+ }
+ return (err, Vec::new());
+ }
+ if is_self_value(path, ns) {
+ debug!("smart_resolve_path_fragment: E0424, source={:?}", source);
+
+ err.code(rustc_errors::error_code!(E0424));
+ err.span_label(span, match source {
+ PathSource::Pat => "`self` value is a keyword and may not be bound to variables or shadowed",
+ _ => "`self` value is a keyword only available in methods with a `self` parameter",
+ });
+ if let Some((fn_kind, span)) = &self.diagnostic_metadata.current_function {
+ // The current function has a `self' parameter, but we were unable to resolve
+ // a reference to `self`. This can only happen if the `self` identifier we
+ // are resolving came from a different hygiene context.
+ if fn_kind.decl().inputs.get(0).map_or(false, |p| p.is_self()) {
+ err.span_label(*span, "this function has a `self` parameter, but a macro invocation can only access identifiers it receives from parameters");
+ } else {
+ let doesnt = if is_assoc_fn {
+ let (span, sugg) = fn_kind
+ .decl()
+ .inputs
+ .get(0)
+ .map(|p| (p.span.shrink_to_lo(), "&self, "))
+ .unwrap_or_else(|| {
+ // Try to look for the "(" after the function name, if possible.
+ // This avoids placing the suggestion into the visibility specifier.
+ let span = fn_kind
+ .ident()
+ .map_or(*span, |ident| span.with_lo(ident.span.hi()));
+ (
+ self.r
+ .session
+ .source_map()
+ .span_through_char(span, '(')
+ .shrink_to_hi(),
+ "&self",
+ )
+ });
+ err.span_suggestion_verbose(
+ span,
+ "add a `self` receiver parameter to make the associated `fn` a method",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ "doesn't"
+ } else {
+ "can't"
+ };
+ if let Some(ident) = fn_kind.ident() {
+ err.span_label(
+ ident.span,
+ &format!("this function {} have a `self` parameter", doesnt),
+ );
+ }
+ }
+ } else if let Some(item_kind) = self.diagnostic_metadata.current_item {
+ err.span_label(
+ item_kind.ident.span,
+ format!(
+ "`self` not allowed in {} {}",
+ item_kind.kind.article(),
+ item_kind.kind.descr()
+ ),
+ );
+ }
+ return (err, Vec::new());
+ }
+
+ // Try to lookup name in more relaxed fashion for better error reporting.
+ let ident = path.last().unwrap().ident;
+ let mut candidates = self
+ .r
+ .lookup_import_candidates(ident, ns, &self.parent_scope, is_expected)
+ .into_iter()
+ .filter(|ImportSuggestion { did, .. }| {
+ match (did, res.and_then(|res| res.opt_def_id())) {
+ (Some(suggestion_did), Some(actual_did)) => *suggestion_did != actual_did,
+ _ => true,
+ }
+ })
+ .collect::<Vec<_>>();
+ let crate_def_id = CRATE_DEF_ID.to_def_id();
+ // Try to filter out intrinsics candidates, as long as we have
+ // some other candidates to suggest.
+ let intrinsic_candidates: Vec<_> = candidates
+ .drain_filter(|sugg| {
+ let path = path_names_to_string(&sugg.path);
+ path.starts_with("core::intrinsics::") || path.starts_with("std::intrinsics::")
+ })
+ .collect();
+ if candidates.is_empty() {
+ // Put them back if we have no more candidates to suggest...
+ candidates.extend(intrinsic_candidates);
+ }
+ if candidates.is_empty() && is_expected(Res::Def(DefKind::Enum, crate_def_id)) {
+ let mut enum_candidates: Vec<_> = self
+ .r
+ .lookup_import_candidates(ident, ns, &self.parent_scope, is_enum_variant)
+ .into_iter()
+ .map(|suggestion| import_candidate_to_enum_paths(&suggestion))
+ .filter(|(_, enum_ty_path)| !enum_ty_path.starts_with("std::prelude::"))
+ .collect();
+ if !enum_candidates.is_empty() {
+ if let (PathSource::Type, Some(span)) =
+ (source, self.diagnostic_metadata.current_type_ascription.last())
+ {
+ if self
+ .r
+ .session
+ .parse_sess
+ .type_ascription_path_suggestions
+ .borrow()
+ .contains(span)
+ {
+ // Already reported this issue on the lhs of the type ascription.
+ err.delay_as_bug();
+ return (err, candidates);
+ }
+ }
+
+ enum_candidates.sort();
+
+ // Contextualize for E0412 "cannot find type", but don't belabor the point
+ // (that it's a variant) for E0573 "expected type, found variant".
+ let preamble = if res.is_none() {
+ let others = match enum_candidates.len() {
+ 1 => String::new(),
+ 2 => " and 1 other".to_owned(),
+ n => format!(" and {} others", n),
+ };
+ format!("there is an enum variant `{}`{}; ", enum_candidates[0].0, others)
+ } else {
+ String::new()
+ };
+ let msg = format!("{}try using the variant's enum", preamble);
+
+ err.span_suggestions(
+ span,
+ &msg,
+ enum_candidates.into_iter().map(|(_variant_path, enum_ty_path)| enum_ty_path),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ // Try Levenshtein algorithm.
+ let typo_sugg = self.lookup_typo_candidate(path, ns, is_expected);
+ if path.len() == 1 && self.self_type_is_available() {
+ if let Some(candidate) = self.lookup_assoc_candidate(ident, ns, is_expected) {
+ let self_is_available = self.self_value_is_available(path[0].ident.span);
+ match candidate {
+ AssocSuggestion::Field => {
+ if self_is_available {
+ err.span_suggestion(
+ span,
+ "you might have meant to use the available field",
+ format!("self.{path_str}"),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_label(span, "a field by this name exists in `Self`");
+ }
+ }
+ AssocSuggestion::MethodWithSelf if self_is_available => {
+ err.span_suggestion(
+ span,
+ "you might have meant to call the method",
+ format!("self.{path_str}"),
+ Applicability::MachineApplicable,
+ );
+ }
+ AssocSuggestion::MethodWithSelf
+ | AssocSuggestion::AssocFn
+ | AssocSuggestion::AssocConst
+ | AssocSuggestion::AssocType => {
+ err.span_suggestion(
+ span,
+ &format!("you might have meant to {}", candidate.action()),
+ format!("Self::{path_str}"),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span);
+ return (err, candidates);
+ }
+
+ // If the first argument in call is `self` suggest calling a method.
+ if let Some((call_span, args_span)) = self.call_has_self_arg(source) {
+ let mut args_snippet = String::new();
+ if let Some(args_span) = args_span {
+ if let Ok(snippet) = self.r.session.source_map().span_to_snippet(args_span) {
+ args_snippet = snippet;
+ }
+ }
+
+ err.span_suggestion(
+ call_span,
+ &format!("try calling `{ident}` as a method"),
+ format!("self.{path_str}({args_snippet})"),
+ Applicability::MachineApplicable,
+ );
+ return (err, candidates);
+ }
+ }
+
+ // Try context-dependent help if relaxed lookup didn't work.
+ if let Some(res) = res {
+ if self.smart_resolve_context_dependent_help(
+ &mut err,
+ span,
+ source,
+ res,
+ &path_str,
+ &base_error.fallback_label,
+ ) {
+ // We do this to avoid losing a secondary span when we override the main error span.
+ self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span);
+ return (err, candidates);
+ }
+ }
+
+ let is_macro =
+ base_error.span.from_expansion() && base_error.span.desugaring_kind().is_none();
+ if !self.type_ascription_suggestion(&mut err, base_error.span) {
+ let mut fallback = false;
+ if let (
+ PathSource::Trait(AliasPossibility::Maybe),
+ Some(Res::Def(DefKind::Struct | DefKind::Enum | DefKind::Union, _)),
+ false,
+ ) = (source, res, is_macro)
+ {
+ if let Some(bounds @ [_, .., _]) = self.diagnostic_metadata.current_trait_object {
+ fallback = true;
+ let spans: Vec<Span> = bounds
+ .iter()
+ .map(|bound| bound.span())
+ .filter(|&sp| sp != base_error.span)
+ .collect();
+
+ let start_span = bounds.iter().map(|bound| bound.span()).next().unwrap();
+ // `end_span` is the end of the poly trait ref (Foo + 'baz + Bar><)
+ let end_span = bounds.iter().map(|bound| bound.span()).last().unwrap();
+ // `last_bound_span` is the last bound of the poly trait ref (Foo + >'baz< + Bar)
+ let last_bound_span = spans.last().cloned().unwrap();
+ let mut multi_span: MultiSpan = spans.clone().into();
+ for sp in spans {
+ let msg = if sp == last_bound_span {
+ format!(
+ "...because of {these} bound{s}",
+ these = pluralize!("this", bounds.len() - 1),
+ s = pluralize!(bounds.len() - 1),
+ )
+ } else {
+ String::new()
+ };
+ multi_span.push_span_label(sp, msg);
+ }
+ multi_span
+ .push_span_label(base_error.span, "expected this type to be a trait...");
+ err.span_help(
+ multi_span,
+ "`+` is used to constrain a \"trait object\" type with lifetimes or \
+ auto-traits; structs and enums can't be bound in that way",
+ );
+ if bounds.iter().all(|bound| match bound {
+ ast::GenericBound::Outlives(_) => true,
+ ast::GenericBound::Trait(tr, _) => tr.span == base_error.span,
+ }) {
+ let mut sugg = vec![];
+ if base_error.span != start_span {
+ sugg.push((start_span.until(base_error.span), String::new()));
+ }
+ if base_error.span != end_span {
+ sugg.push((base_error.span.shrink_to_hi().to(end_span), String::new()));
+ }
+
+ err.multipart_suggestion(
+ "if you meant to use a type and not a trait here, remove the bounds",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ fallback |= self.restrict_assoc_type_in_where_clause(span, &mut err);
+
+ if !self.r.add_typo_suggestion(&mut err, typo_sugg, ident_span) {
+ fallback = true;
+ match self.diagnostic_metadata.current_let_binding {
+ Some((pat_sp, Some(ty_sp), None))
+ if ty_sp.contains(base_error.span) && base_error.could_be_expr =>
+ {
+ err.span_suggestion_short(
+ pat_sp.between(ty_sp),
+ "use `=` if you meant to assign",
+ " = ",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ }
+
+ // If the trait has a single item (which wasn't matched by Levenshtein), suggest it
+ let suggestion = self.get_single_associated_item(&path, &source, is_expected);
+ self.r.add_typo_suggestion(&mut err, suggestion, ident_span);
+ }
+ if fallback {
+ // Fallback label.
+ err.span_label(base_error.span, base_error.fallback_label);
+ }
+ }
+ if let Some(err_code) = &err.code {
+ if err_code == &rustc_errors::error_code!(E0425) {
+ for label_rib in &self.label_ribs {
+ for (label_ident, node_id) in &label_rib.bindings {
+ if format!("'{}", ident) == label_ident.to_string() {
+ err.span_label(label_ident.span, "a label with a similar name exists");
+ if let PathSource::Expr(Some(Expr {
+ kind: ExprKind::Break(None, Some(_)),
+ ..
+ })) = source
+ {
+ err.span_suggestion(
+ span,
+ "use the similarly named label",
+ label_ident.name,
+ Applicability::MaybeIncorrect,
+ );
+ // Do not lint against unused label when we suggest them.
+ self.diagnostic_metadata.unused_labels.remove(node_id);
+ }
+ }
+ }
+ }
+ } else if err_code == &rustc_errors::error_code!(E0412) {
+ if let Some(correct) = Self::likely_rust_type(path) {
+ err.span_suggestion(
+ span,
+ "perhaps you intended to use this type",
+ correct,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ (err, candidates)
+ }
+
+ fn detect_assoct_type_constraint_meant_as_path(&self, base_span: Span, err: &mut Diagnostic) {
+ let Some(ty) = self.diagnostic_metadata.current_type_path else { return; };
+ let TyKind::Path(_, path) = &ty.kind else { return; };
+ for segment in &path.segments {
+ let Some(params) = &segment.args else { continue; };
+ let ast::GenericArgs::AngleBracketed(ref params) = params.deref() else { continue; };
+ for param in &params.args {
+ let ast::AngleBracketedArg::Constraint(constraint) = param else { continue; };
+ let ast::AssocConstraintKind::Bound { bounds } = &constraint.kind else {
+ continue;
+ };
+ for bound in bounds {
+ let ast::GenericBound::Trait(trait_ref, ast::TraitBoundModifier::None)
+ = bound else
+ {
+ continue;
+ };
+ if base_span == trait_ref.span {
+ err.span_suggestion_verbose(
+ constraint.ident.span.between(trait_ref.span),
+ "you might have meant to write a path instead of an associated type bound",
+ "::",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ }
+
+ fn suggest_swapping_misplaced_self_ty_and_trait(
+ &mut self,
+ err: &mut Diagnostic,
+ source: PathSource<'_>,
+ res: Option<Res>,
+ span: Span,
+ ) {
+ if let Some((trait_ref, self_ty)) =
+ self.diagnostic_metadata.currently_processing_impl_trait.clone()
+ && let TyKind::Path(_, self_ty_path) = &self_ty.kind
+ && let PathResult::Module(ModuleOrUniformRoot::Module(module)) =
+ self.resolve_path(&Segment::from_path(self_ty_path), Some(TypeNS), None)
+ && let ModuleKind::Def(DefKind::Trait, ..) = module.kind
+ && trait_ref.path.span == span
+ && let PathSource::Trait(_) = source
+ && let Some(Res::Def(DefKind::Struct | DefKind::Enum | DefKind::Union, _)) = res
+ && let Ok(self_ty_str) =
+ self.r.session.source_map().span_to_snippet(self_ty.span)
+ && let Ok(trait_ref_str) =
+ self.r.session.source_map().span_to_snippet(trait_ref.path.span)
+ {
+ err.multipart_suggestion(
+ "`impl` items mention the trait being implemented first and the type it is being implemented for second",
+ vec![(trait_ref.path.span, self_ty_str), (self_ty.span, trait_ref_str)],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ fn get_single_associated_item(
+ &mut self,
+ path: &[Segment],
+ source: &PathSource<'_>,
+ filter_fn: &impl Fn(Res) -> bool,
+ ) -> Option<TypoSuggestion> {
+ if let crate::PathSource::TraitItem(_) = source {
+ let mod_path = &path[..path.len() - 1];
+ if let PathResult::Module(ModuleOrUniformRoot::Module(module)) =
+ self.resolve_path(mod_path, None, None)
+ {
+ let resolutions = self.r.resolutions(module).borrow();
+ let targets: Vec<_> =
+ resolutions
+ .iter()
+ .filter_map(|(key, resolution)| {
+ resolution.borrow().binding.map(|binding| binding.res()).and_then(
+ |res| if filter_fn(res) { Some((key, res)) } else { None },
+ )
+ })
+ .collect();
+ if targets.len() == 1 {
+ let target = targets[0];
+ return Some(TypoSuggestion::single_item_from_res(
+ target.0.ident.name,
+ target.1,
+ ));
+ }
+ }
+ }
+ None
+ }
+
+ /// Given `where <T as Bar>::Baz: String`, suggest `where T: Bar<Baz = String>`.
+ fn restrict_assoc_type_in_where_clause(&mut self, span: Span, err: &mut Diagnostic) -> bool {
+ // Detect that we are actually in a `where` predicate.
+ let (bounded_ty, bounds, where_span) =
+ if let Some(ast::WherePredicate::BoundPredicate(ast::WhereBoundPredicate {
+ bounded_ty,
+ bound_generic_params,
+ bounds,
+ span,
+ })) = self.diagnostic_metadata.current_where_predicate
+ {
+ if !bound_generic_params.is_empty() {
+ return false;
+ }
+ (bounded_ty, bounds, span)
+ } else {
+ return false;
+ };
+
+ // Confirm that the target is an associated type.
+ let (ty, position, path) = if let ast::TyKind::Path(
+ Some(ast::QSelf { ty, position, .. }),
+ path,
+ ) = &bounded_ty.kind
+ {
+ // use this to verify that ident is a type param.
+ let Some(partial_res) = self.r.partial_res_map.get(&bounded_ty.id) else {
+ return false;
+ };
+ if !(matches!(
+ partial_res.base_res(),
+ hir::def::Res::Def(hir::def::DefKind::AssocTy, _)
+ ) && partial_res.unresolved_segments() == 0)
+ {
+ return false;
+ }
+ (ty, position, path)
+ } else {
+ return false;
+ };
+
+ let peeled_ty = ty.peel_refs();
+ if let ast::TyKind::Path(None, type_param_path) = &peeled_ty.kind {
+ // Confirm that the `SelfTy` is a type parameter.
+ let Some(partial_res) = self.r.partial_res_map.get(&peeled_ty.id) else {
+ return false;
+ };
+ if !(matches!(
+ partial_res.base_res(),
+ hir::def::Res::Def(hir::def::DefKind::TyParam, _)
+ ) && partial_res.unresolved_segments() == 0)
+ {
+ return false;
+ }
+ if let (
+ [ast::PathSegment { ident: constrain_ident, args: None, .. }],
+ [ast::GenericBound::Trait(poly_trait_ref, ast::TraitBoundModifier::None)],
+ ) = (&type_param_path.segments[..], &bounds[..])
+ {
+ if let [ast::PathSegment { ident, args: None, .. }] =
+ &poly_trait_ref.trait_ref.path.segments[..]
+ {
+ if ident.span == span {
+ err.span_suggestion_verbose(
+ *where_span,
+ &format!("constrain the associated type to `{}`", ident),
+ format!(
+ "{}: {}<{} = {}>",
+ self.r
+ .session
+ .source_map()
+ .span_to_snippet(ty.span) // Account for `<&'a T as Foo>::Bar`.
+ .unwrap_or_else(|_| constrain_ident.to_string()),
+ path.segments[..*position]
+ .iter()
+ .map(|segment| path_segment_to_string(segment))
+ .collect::<Vec<_>>()
+ .join("::"),
+ path.segments[*position..]
+ .iter()
+ .map(|segment| path_segment_to_string(segment))
+ .collect::<Vec<_>>()
+ .join("::"),
+ ident,
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ return true;
+ }
+ }
+ }
+ false
+ }
+
+ /// Check if the source is call expression and the first argument is `self`. If true,
+ /// return the span of whole call and the span for all arguments expect the first one (`self`).
+ fn call_has_self_arg(&self, source: PathSource<'_>) -> Option<(Span, Option<Span>)> {
+ let mut has_self_arg = None;
+ if let PathSource::Expr(Some(parent)) = source {
+ match &parent.kind {
+ ExprKind::Call(_, args) if !args.is_empty() => {
+ let mut expr_kind = &args[0].kind;
+ loop {
+ match expr_kind {
+ ExprKind::Path(_, arg_name) if arg_name.segments.len() == 1 => {
+ if arg_name.segments[0].ident.name == kw::SelfLower {
+ let call_span = parent.span;
+ let tail_args_span = if args.len() > 1 {
+ Some(Span::new(
+ args[1].span.lo(),
+ args.last().unwrap().span.hi(),
+ call_span.ctxt(),
+ None,
+ ))
+ } else {
+ None
+ };
+ has_self_arg = Some((call_span, tail_args_span));
+ }
+ break;
+ }
+ ExprKind::AddrOf(_, _, expr) => expr_kind = &expr.kind,
+ _ => break,
+ }
+ }
+ }
+ _ => (),
+ }
+ };
+ has_self_arg
+ }
+
+ fn followed_by_brace(&self, span: Span) -> (bool, Option<Span>) {
+ // HACK(estebank): find a better way to figure out that this was a
+ // parser issue where a struct literal is being used on an expression
+ // where a brace being opened means a block is being started. Look
+ // ahead for the next text to see if `span` is followed by a `{`.
+ let sm = self.r.session.source_map();
+ let mut sp = span;
+ loop {
+ sp = sm.next_point(sp);
+ match sm.span_to_snippet(sp) {
+ Ok(ref snippet) => {
+ if snippet.chars().any(|c| !c.is_whitespace()) {
+ break;
+ }
+ }
+ _ => break,
+ }
+ }
+ let followed_by_brace = matches!(sm.span_to_snippet(sp), Ok(ref snippet) if snippet == "{");
+ // In case this could be a struct literal that needs to be surrounded
+ // by parentheses, find the appropriate span.
+ let mut i = 0;
+ let mut closing_brace = None;
+ loop {
+ sp = sm.next_point(sp);
+ match sm.span_to_snippet(sp) {
+ Ok(ref snippet) => {
+ if snippet == "}" {
+ closing_brace = Some(span.to(sp));
+ break;
+ }
+ }
+ _ => break,
+ }
+ i += 1;
+ // The bigger the span, the more likely we're incorrect --
+ // bound it to 100 chars long.
+ if i > 100 {
+ break;
+ }
+ }
+ (followed_by_brace, closing_brace)
+ }
+
+ /// Provides context-dependent help for errors reported by the `smart_resolve_path_fragment`
+ /// function.
+ /// Returns `true` if able to provide context-dependent help.
+ fn smart_resolve_context_dependent_help(
+ &mut self,
+ err: &mut Diagnostic,
+ span: Span,
+ source: PathSource<'_>,
+ res: Res,
+ path_str: &str,
+ fallback_label: &str,
+ ) -> bool {
+ let ns = source.namespace();
+ let is_expected = &|res| source.is_expected(res);
+
+ let path_sep = |err: &mut Diagnostic, expr: &Expr| match expr.kind {
+ ExprKind::Field(_, ident) => {
+ err.span_suggestion(
+ expr.span,
+ "use the path separator to refer to an item",
+ format!("{}::{}", path_str, ident),
+ Applicability::MaybeIncorrect,
+ );
+ true
+ }
+ ExprKind::MethodCall(ref segment, ..) => {
+ let span = expr.span.with_hi(segment.ident.span.hi());
+ err.span_suggestion(
+ span,
+ "use the path separator to refer to an item",
+ format!("{}::{}", path_str, segment.ident),
+ Applicability::MaybeIncorrect,
+ );
+ true
+ }
+ _ => false,
+ };
+
+ let find_span = |source: &PathSource<'_>, err: &mut Diagnostic| {
+ match source {
+ PathSource::Expr(Some(Expr { span, kind: ExprKind::Call(_, _), .. }))
+ | PathSource::TupleStruct(span, _) => {
+ // We want the main underline to cover the suggested code as well for
+ // cleaner output.
+ err.set_span(*span);
+ *span
+ }
+ _ => span,
+ }
+ };
+
+ let mut bad_struct_syntax_suggestion = |def_id: DefId| {
+ let (followed_by_brace, closing_brace) = self.followed_by_brace(span);
+
+ match source {
+ PathSource::Expr(Some(
+ parent @ Expr { kind: ExprKind::Field(..) | ExprKind::MethodCall(..), .. },
+ )) if path_sep(err, &parent) => {}
+ PathSource::Expr(
+ None
+ | Some(Expr {
+ kind:
+ ExprKind::Path(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Unary(..)
+ | ExprKind::If(..)
+ | ExprKind::While(..)
+ | ExprKind::ForLoop(..)
+ | ExprKind::Match(..),
+ ..
+ }),
+ ) if followed_by_brace => {
+ if let Some(sp) = closing_brace {
+ err.span_label(span, fallback_label);
+ err.multipart_suggestion(
+ "surround the struct literal with parentheses",
+ vec![
+ (sp.shrink_to_lo(), "(".to_string()),
+ (sp.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(
+ span, // Note the parentheses surrounding the suggestion below
+ format!(
+ "you might want to surround a struct literal with parentheses: \
+ `({} {{ /* fields */ }})`?",
+ path_str
+ ),
+ );
+ }
+ }
+ PathSource::Expr(_) | PathSource::TupleStruct(..) | PathSource::Pat => {
+ let span = find_span(&source, err);
+ if let Some(span) = self.def_span(def_id) {
+ err.span_label(span, &format!("`{}` defined here", path_str));
+ }
+ let (tail, descr, applicability) = match source {
+ PathSource::Pat | PathSource::TupleStruct(..) => {
+ ("", "pattern", Applicability::MachineApplicable)
+ }
+ _ => (": val", "literal", Applicability::HasPlaceholders),
+ };
+ let (fields, applicability) = match self.r.field_names.get(&def_id) {
+ Some(fields) => (
+ fields
+ .iter()
+ .map(|f| format!("{}{}", f.node, tail))
+ .collect::<Vec<String>>()
+ .join(", "),
+ applicability,
+ ),
+ None => ("/* fields */".to_string(), Applicability::HasPlaceholders),
+ };
+ let pad = match self.r.field_names.get(&def_id) {
+ Some(fields) if fields.is_empty() => "",
+ _ => " ",
+ };
+ err.span_suggestion(
+ span,
+ &format!("use struct {} syntax instead", descr),
+ format!("{path_str} {{{pad}{fields}{pad}}}"),
+ applicability,
+ );
+ }
+ _ => {
+ err.span_label(span, fallback_label);
+ }
+ }
+ };
+
+ match (res, source) {
+ (
+ Res::Def(DefKind::Macro(MacroKind::Bang), _),
+ PathSource::Expr(Some(Expr {
+ kind: ExprKind::Index(..) | ExprKind::Call(..), ..
+ }))
+ | PathSource::Struct,
+ ) => {
+ err.span_label(span, fallback_label);
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "use `!` to invoke the macro",
+ "!",
+ Applicability::MaybeIncorrect,
+ );
+ if path_str == "try" && span.rust_2015() {
+ err.note("if you want the `try` keyword, you need Rust 2018 or later");
+ }
+ }
+ (Res::Def(DefKind::Macro(MacroKind::Bang), _), _) => {
+ err.span_label(span, fallback_label);
+ }
+ (Res::Def(DefKind::TyAlias, def_id), PathSource::Trait(_)) => {
+ err.span_label(span, "type aliases cannot be used as traits");
+ if self.r.session.is_nightly_build() {
+ let msg = "you might have meant to use `#![feature(trait_alias)]` instead of a \
+ `type` alias";
+ if let Some(span) = self.def_span(def_id) {
+ if let Ok(snip) = self.r.session.source_map().span_to_snippet(span) {
+ // The span contains a type alias so we should be able to
+ // replace `type` with `trait`.
+ let snip = snip.replacen("type", "trait", 1);
+ err.span_suggestion(span, msg, snip, Applicability::MaybeIncorrect);
+ } else {
+ err.span_help(span, msg);
+ }
+ } else {
+ err.help(msg);
+ }
+ }
+ }
+ (Res::Def(DefKind::Mod, _), PathSource::Expr(Some(parent))) => {
+ if !path_sep(err, &parent) {
+ return false;
+ }
+ }
+ (
+ Res::Def(DefKind::Enum, def_id),
+ PathSource::TupleStruct(..) | PathSource::Expr(..),
+ ) => {
+ if self
+ .diagnostic_metadata
+ .current_type_ascription
+ .last()
+ .map(|sp| {
+ self.r
+ .session
+ .parse_sess
+ .type_ascription_path_suggestions
+ .borrow()
+ .contains(&sp)
+ })
+ .unwrap_or(false)
+ {
+ err.downgrade_to_delayed_bug();
+ // We already suggested changing `:` into `::` during parsing.
+ return false;
+ }
+
+ self.suggest_using_enum_variant(err, source, def_id, span);
+ }
+ (Res::Def(DefKind::Struct, def_id), source) if ns == ValueNS => {
+ let (ctor_def, ctor_vis, fields) =
+ if let Some(struct_ctor) = self.r.struct_constructors.get(&def_id).cloned() {
+ if let PathSource::Expr(Some(parent)) = source {
+ if let ExprKind::Field(..) | ExprKind::MethodCall(..) = parent.kind {
+ bad_struct_syntax_suggestion(def_id);
+ return true;
+ }
+ }
+ struct_ctor
+ } else {
+ bad_struct_syntax_suggestion(def_id);
+ return true;
+ };
+
+ let is_accessible = self.r.is_accessible_from(ctor_vis, self.parent_scope.module);
+ if !is_expected(ctor_def) || is_accessible {
+ return true;
+ }
+
+ let field_spans = match source {
+ // e.g. `if let Enum::TupleVariant(field1, field2) = _`
+ PathSource::TupleStruct(_, pattern_spans) => {
+ err.set_primary_message(
+ "cannot match against a tuple struct which contains private fields",
+ );
+
+ // Use spans of the tuple struct pattern.
+ Some(Vec::from(pattern_spans))
+ }
+ // e.g. `let _ = Enum::TupleVariant(field1, field2);`
+ _ if source.is_call() => {
+ err.set_primary_message(
+ "cannot initialize a tuple struct which contains private fields",
+ );
+
+ // Use spans of the tuple struct definition.
+ self.r
+ .field_names
+ .get(&def_id)
+ .map(|fields| fields.iter().map(|f| f.span).collect::<Vec<_>>())
+ }
+ _ => None,
+ };
+
+ if let Some(spans) =
+ field_spans.filter(|spans| spans.len() > 0 && fields.len() == spans.len())
+ {
+ let non_visible_spans: Vec<Span> = iter::zip(&fields, &spans)
+ .filter(|(vis, _)| {
+ !self.r.is_accessible_from(**vis, self.parent_scope.module)
+ })
+ .map(|(_, span)| *span)
+ .collect();
+
+ if non_visible_spans.len() > 0 {
+ let mut m: MultiSpan = non_visible_spans.clone().into();
+ non_visible_spans
+ .into_iter()
+ .for_each(|s| m.push_span_label(s, "private field"));
+ err.span_note(m, "constructor is not visible here due to private fields");
+ }
+
+ return true;
+ }
+
+ err.span_label(span, "constructor is not visible here due to private fields");
+ }
+ (
+ Res::Def(
+ DefKind::Union | DefKind::Variant | DefKind::Ctor(_, CtorKind::Fictive),
+ def_id,
+ ),
+ _,
+ ) if ns == ValueNS => {
+ bad_struct_syntax_suggestion(def_id);
+ }
+ (Res::Def(DefKind::Ctor(_, CtorKind::Const), def_id), _) if ns == ValueNS => {
+ match source {
+ PathSource::Expr(_) | PathSource::TupleStruct(..) | PathSource::Pat => {
+ let span = find_span(&source, err);
+ if let Some(span) = self.def_span(def_id) {
+ err.span_label(span, &format!("`{}` defined here", path_str));
+ }
+ err.span_suggestion(
+ span,
+ "use this syntax instead",
+ path_str,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => return false,
+ }
+ }
+ (Res::Def(DefKind::Ctor(_, CtorKind::Fn), def_id), _) if ns == ValueNS => {
+ if let Some(span) = self.def_span(def_id) {
+ err.span_label(span, &format!("`{}` defined here", path_str));
+ }
+ let fields = self.r.field_names.get(&def_id).map_or_else(
+ || "/* fields */".to_string(),
+ |fields| vec!["_"; fields.len()].join(", "),
+ );
+ err.span_suggestion(
+ span,
+ "use the tuple variant pattern syntax instead",
+ format!("{}({})", path_str, fields),
+ Applicability::HasPlaceholders,
+ );
+ }
+ (Res::SelfTy { .. }, _) if ns == ValueNS => {
+ err.span_label(span, fallback_label);
+ err.note("can't use `Self` as a constructor, you must use the implemented struct");
+ }
+ (Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), _) if ns == ValueNS => {
+ err.note("can't use a type alias as a constructor");
+ }
+ _ => return false,
+ }
+ true
+ }
+
+ /// Given the target `ident` and `kind`, search for the similarly named associated item
+ /// in `self.current_trait_ref`.
+ pub(crate) fn find_similarly_named_assoc_item(
+ &mut self,
+ ident: Symbol,
+ kind: &AssocItemKind,
+ ) -> Option<Symbol> {
+ let (module, _) = self.current_trait_ref.as_ref()?;
+ if ident == kw::Underscore {
+ // We do nothing for `_`.
+ return None;
+ }
+
+ let resolutions = self.r.resolutions(module);
+ let targets = resolutions
+ .borrow()
+ .iter()
+ .filter_map(|(key, res)| res.borrow().binding.map(|binding| (key, binding.res())))
+ .filter(|(_, res)| match (kind, res) {
+ (AssocItemKind::Const(..), Res::Def(DefKind::AssocConst, _)) => true,
+ (AssocItemKind::Fn(_), Res::Def(DefKind::AssocFn, _)) => true,
+ (AssocItemKind::TyAlias(..), Res::Def(DefKind::AssocTy, _)) => true,
+ _ => false,
+ })
+ .map(|(key, _)| key.ident.name)
+ .collect::<Vec<_>>();
+
+ find_best_match_for_name(&targets, ident, None)
+ }
+
+ fn lookup_assoc_candidate<FilterFn>(
+ &mut self,
+ ident: Ident,
+ ns: Namespace,
+ filter_fn: FilterFn,
+ ) -> Option<AssocSuggestion>
+ where
+ FilterFn: Fn(Res) -> bool,
+ {
+ fn extract_node_id(t: &Ty) -> Option<NodeId> {
+ match t.kind {
+ TyKind::Path(None, _) => Some(t.id),
+ TyKind::Rptr(_, ref mut_ty) => extract_node_id(&mut_ty.ty),
+ // This doesn't handle the remaining `Ty` variants as they are not
+ // that commonly the self_type, it might be interesting to provide
+ // support for those in future.
+ _ => None,
+ }
+ }
+
+ // Fields are generally expected in the same contexts as locals.
+ if filter_fn(Res::Local(ast::DUMMY_NODE_ID)) {
+ if let Some(node_id) =
+ self.diagnostic_metadata.current_self_type.as_ref().and_then(extract_node_id)
+ {
+ // Look for a field with the same name in the current self_type.
+ if let Some(resolution) = self.r.partial_res_map.get(&node_id) {
+ match resolution.base_res() {
+ Res::Def(DefKind::Struct | DefKind::Union, did)
+ if resolution.unresolved_segments() == 0 =>
+ {
+ if let Some(field_names) = self.r.field_names.get(&did) {
+ if field_names
+ .iter()
+ .any(|&field_name| ident.name == field_name.node)
+ {
+ return Some(AssocSuggestion::Field);
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+
+ if let Some(items) = self.diagnostic_metadata.current_trait_assoc_items {
+ for assoc_item in items {
+ if assoc_item.ident == ident {
+ return Some(match &assoc_item.kind {
+ ast::AssocItemKind::Const(..) => AssocSuggestion::AssocConst,
+ ast::AssocItemKind::Fn(box ast::Fn { sig, .. }) if sig.decl.has_self() => {
+ AssocSuggestion::MethodWithSelf
+ }
+ ast::AssocItemKind::Fn(..) => AssocSuggestion::AssocFn,
+ ast::AssocItemKind::TyAlias(..) => AssocSuggestion::AssocType,
+ ast::AssocItemKind::MacCall(_) => continue,
+ });
+ }
+ }
+ }
+
+ // Look for associated items in the current trait.
+ if let Some((module, _)) = self.current_trait_ref {
+ if let Ok(binding) = self.r.maybe_resolve_ident_in_module(
+ ModuleOrUniformRoot::Module(module),
+ ident,
+ ns,
+ &self.parent_scope,
+ ) {
+ let res = binding.res();
+ if filter_fn(res) {
+ if self.r.has_self.contains(&res.def_id()) {
+ return Some(AssocSuggestion::MethodWithSelf);
+ } else {
+ match res {
+ Res::Def(DefKind::AssocFn, _) => return Some(AssocSuggestion::AssocFn),
+ Res::Def(DefKind::AssocConst, _) => {
+ return Some(AssocSuggestion::AssocConst);
+ }
+ Res::Def(DefKind::AssocTy, _) => {
+ return Some(AssocSuggestion::AssocType);
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ }
+
+ None
+ }
+
+ fn lookup_typo_candidate(
+ &mut self,
+ path: &[Segment],
+ ns: Namespace,
+ filter_fn: &impl Fn(Res) -> bool,
+ ) -> Option<TypoSuggestion> {
+ let mut names = Vec::new();
+ if path.len() == 1 {
+ // Search in lexical scope.
+ // Walk backwards up the ribs in scope and collect candidates.
+ for rib in self.ribs[ns].iter().rev() {
+ // Locals and type parameters
+ for (ident, &res) in &rib.bindings {
+ if filter_fn(res) {
+ names.push(TypoSuggestion::typo_from_res(ident.name, res));
+ }
+ }
+ // Items in scope
+ if let RibKind::ModuleRibKind(module) = rib.kind {
+ // Items from this module
+ self.r.add_module_candidates(module, &mut names, &filter_fn);
+
+ if let ModuleKind::Block = module.kind {
+ // We can see through blocks
+ } else {
+ // Items from the prelude
+ if !module.no_implicit_prelude {
+ let extern_prelude = self.r.extern_prelude.clone();
+ names.extend(extern_prelude.iter().flat_map(|(ident, _)| {
+ self.r.crate_loader.maybe_process_path_extern(ident.name).and_then(
+ |crate_id| {
+ let crate_mod =
+ Res::Def(DefKind::Mod, crate_id.as_def_id());
+
+ if filter_fn(crate_mod) {
+ Some(TypoSuggestion::typo_from_res(
+ ident.name, crate_mod,
+ ))
+ } else {
+ None
+ }
+ },
+ )
+ }));
+
+ if let Some(prelude) = self.r.prelude {
+ self.r.add_module_candidates(prelude, &mut names, &filter_fn);
+ }
+ }
+ break;
+ }
+ }
+ }
+ // Add primitive types to the mix
+ if filter_fn(Res::PrimTy(PrimTy::Bool)) {
+ names.extend(PrimTy::ALL.iter().map(|prim_ty| {
+ TypoSuggestion::typo_from_res(prim_ty.name(), Res::PrimTy(*prim_ty))
+ }))
+ }
+ } else {
+ // Search in module.
+ let mod_path = &path[..path.len() - 1];
+ if let PathResult::Module(ModuleOrUniformRoot::Module(module)) =
+ self.resolve_path(mod_path, Some(TypeNS), None)
+ {
+ self.r.add_module_candidates(module, &mut names, &filter_fn);
+ }
+ }
+
+ let name = path[path.len() - 1].ident.name;
+ // Make sure error reporting is deterministic.
+ names.sort_by(|a, b| a.candidate.as_str().partial_cmp(b.candidate.as_str()).unwrap());
+
+ match find_best_match_for_name(
+ &names.iter().map(|suggestion| suggestion.candidate).collect::<Vec<Symbol>>(),
+ name,
+ None,
+ ) {
+ Some(found) if found != name => {
+ names.into_iter().find(|suggestion| suggestion.candidate == found)
+ }
+ _ => None,
+ }
+ }
+
+ // Returns the name of the Rust type approximately corresponding to
+ // a type name in another programming language.
+ fn likely_rust_type(path: &[Segment]) -> Option<Symbol> {
+ let name = path[path.len() - 1].ident.as_str();
+ // Common Java types
+ Some(match name {
+ "byte" => sym::u8, // In Java, bytes are signed, but in practice one almost always wants unsigned bytes.
+ "short" => sym::i16,
+ "Bool" => sym::bool,
+ "Boolean" => sym::bool,
+ "boolean" => sym::bool,
+ "int" => sym::i32,
+ "long" => sym::i64,
+ "float" => sym::f32,
+ "double" => sym::f64,
+ _ => return None,
+ })
+ }
+
+ /// Only used in a specific case of type ascription suggestions
+ fn get_colon_suggestion_span(&self, start: Span) -> Span {
+ let sm = self.r.session.source_map();
+ start.to(sm.next_point(start))
+ }
+
+ fn type_ascription_suggestion(&self, err: &mut Diagnostic, base_span: Span) -> bool {
+ let sm = self.r.session.source_map();
+ let base_snippet = sm.span_to_snippet(base_span);
+ if let Some(&sp) = self.diagnostic_metadata.current_type_ascription.last() {
+ if let Ok(snippet) = sm.span_to_snippet(sp) {
+ let len = snippet.trim_end().len() as u32;
+ if snippet.trim() == ":" {
+ let colon_sp =
+ sp.with_lo(sp.lo() + BytePos(len - 1)).with_hi(sp.lo() + BytePos(len));
+ let mut show_label = true;
+ if sm.is_multiline(sp) {
+ err.span_suggestion_short(
+ colon_sp,
+ "maybe you meant to write `;` here",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ let after_colon_sp =
+ self.get_colon_suggestion_span(colon_sp.shrink_to_hi());
+ if snippet.len() == 1 {
+ // `foo:bar`
+ err.span_suggestion(
+ colon_sp,
+ "maybe you meant to write a path separator here",
+ "::",
+ Applicability::MaybeIncorrect,
+ );
+ show_label = false;
+ if !self
+ .r
+ .session
+ .parse_sess
+ .type_ascription_path_suggestions
+ .borrow_mut()
+ .insert(colon_sp)
+ {
+ err.downgrade_to_delayed_bug();
+ }
+ }
+ if let Ok(base_snippet) = base_snippet {
+ let mut sp = after_colon_sp;
+ for _ in 0..100 {
+ // Try to find an assignment
+ sp = sm.next_point(sp);
+ let snippet = sm.span_to_snippet(sp.to(sm.next_point(sp)));
+ match snippet {
+ Ok(ref x) if x.as_str() == "=" => {
+ err.span_suggestion(
+ base_span,
+ "maybe you meant to write an assignment here",
+ format!("let {}", base_snippet),
+ Applicability::MaybeIncorrect,
+ );
+ show_label = false;
+ break;
+ }
+ Ok(ref x) if x.as_str() == "\n" => break,
+ Err(_) => break,
+ Ok(_) => {}
+ }
+ }
+ }
+ }
+ if show_label {
+ err.span_label(
+ base_span,
+ "expecting a type here because of type ascription",
+ );
+ }
+ return show_label;
+ }
+ }
+ }
+ false
+ }
+
+ fn find_module(&mut self, def_id: DefId) -> Option<(Module<'a>, ImportSuggestion)> {
+ let mut result = None;
+ let mut seen_modules = FxHashSet::default();
+ let mut worklist = vec![(self.r.graph_root, Vec::new())];
+
+ while let Some((in_module, path_segments)) = worklist.pop() {
+ // abort if the module is already found
+ if result.is_some() {
+ break;
+ }
+
+ in_module.for_each_child(self.r, |_, ident, _, name_binding| {
+ // abort if the module is already found or if name_binding is private external
+ if result.is_some() || !name_binding.vis.is_visible_locally() {
+ return;
+ }
+ if let Some(module) = name_binding.module() {
+ // form the path
+ let mut path_segments = path_segments.clone();
+ path_segments.push(ast::PathSegment::from_ident(ident));
+ let module_def_id = module.def_id();
+ if module_def_id == def_id {
+ let path =
+ Path { span: name_binding.span, segments: path_segments, tokens: None };
+ result = Some((
+ module,
+ ImportSuggestion {
+ did: Some(def_id),
+ descr: "module",
+ path,
+ accessible: true,
+ note: None,
+ },
+ ));
+ } else {
+ // add the module to the lookup
+ if seen_modules.insert(module_def_id) {
+ worklist.push((module, path_segments));
+ }
+ }
+ }
+ });
+ }
+
+ result
+ }
+
+ fn collect_enum_ctors(&mut self, def_id: DefId) -> Option<Vec<(Path, DefId, CtorKind)>> {
+ self.find_module(def_id).map(|(enum_module, enum_import_suggestion)| {
+ let mut variants = Vec::new();
+ enum_module.for_each_child(self.r, |_, ident, _, name_binding| {
+ if let Res::Def(DefKind::Ctor(CtorOf::Variant, kind), def_id) = name_binding.res() {
+ let mut segms = enum_import_suggestion.path.segments.clone();
+ segms.push(ast::PathSegment::from_ident(ident));
+ let path = Path { span: name_binding.span, segments: segms, tokens: None };
+ variants.push((path, def_id, kind));
+ }
+ });
+ variants
+ })
+ }
+
+ /// Adds a suggestion for using an enum's variant when an enum is used instead.
+ fn suggest_using_enum_variant(
+ &mut self,
+ err: &mut Diagnostic,
+ source: PathSource<'_>,
+ def_id: DefId,
+ span: Span,
+ ) {
+ let Some(variants) = self.collect_enum_ctors(def_id) else {
+ err.note("you might have meant to use one of the enum's variants");
+ return;
+ };
+
+ let suggest_only_tuple_variants =
+ matches!(source, PathSource::TupleStruct(..)) || source.is_call();
+ if suggest_only_tuple_variants {
+ // Suggest only tuple variants regardless of whether they have fields and do not
+ // suggest path with added parentheses.
+ let suggestable_variants = variants
+ .iter()
+ .filter(|(.., kind)| *kind == CtorKind::Fn)
+ .map(|(variant, ..)| path_names_to_string(variant))
+ .collect::<Vec<_>>();
+
+ let non_suggestable_variant_count = variants.len() - suggestable_variants.len();
+
+ let source_msg = if source.is_call() {
+ "to construct"
+ } else if matches!(source, PathSource::TupleStruct(..)) {
+ "to match against"
+ } else {
+ unreachable!()
+ };
+
+ if !suggestable_variants.is_empty() {
+ let msg = if non_suggestable_variant_count == 0 && suggestable_variants.len() == 1 {
+ format!("try {} the enum's variant", source_msg)
+ } else {
+ format!("try {} one of the enum's variants", source_msg)
+ };
+
+ err.span_suggestions(
+ span,
+ &msg,
+ suggestable_variants.into_iter(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ // If the enum has no tuple variants..
+ if non_suggestable_variant_count == variants.len() {
+ err.help(&format!("the enum has no tuple variants {}", source_msg));
+ }
+
+ // If there are also non-tuple variants..
+ if non_suggestable_variant_count == 1 {
+ err.help(&format!(
+ "you might have meant {} the enum's non-tuple variant",
+ source_msg
+ ));
+ } else if non_suggestable_variant_count >= 1 {
+ err.help(&format!(
+ "you might have meant {} one of the enum's non-tuple variants",
+ source_msg
+ ));
+ }
+ } else {
+ let needs_placeholder = |def_id: DefId, kind: CtorKind| {
+ let has_no_fields = self.r.field_names.get(&def_id).map_or(false, |f| f.is_empty());
+ match kind {
+ CtorKind::Const => false,
+ CtorKind::Fn | CtorKind::Fictive if has_no_fields => false,
+ _ => true,
+ }
+ };
+
+ let mut suggestable_variants = variants
+ .iter()
+ .filter(|(_, def_id, kind)| !needs_placeholder(*def_id, *kind))
+ .map(|(variant, _, kind)| (path_names_to_string(variant), kind))
+ .map(|(variant, kind)| match kind {
+ CtorKind::Const => variant,
+ CtorKind::Fn => format!("({}())", variant),
+ CtorKind::Fictive => format!("({} {{}})", variant),
+ })
+ .collect::<Vec<_>>();
+
+ if !suggestable_variants.is_empty() {
+ let msg = if suggestable_variants.len() == 1 {
+ "you might have meant to use the following enum variant"
+ } else {
+ "you might have meant to use one of the following enum variants"
+ };
+
+ err.span_suggestions(
+ span,
+ msg,
+ suggestable_variants.drain(..),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ let suggestable_variants_with_placeholders = variants
+ .iter()
+ .filter(|(_, def_id, kind)| needs_placeholder(*def_id, *kind))
+ .map(|(variant, _, kind)| (path_names_to_string(variant), kind))
+ .filter_map(|(variant, kind)| match kind {
+ CtorKind::Fn => Some(format!("({}(/* fields */))", variant)),
+ CtorKind::Fictive => Some(format!("({} {{ /* fields */ }})", variant)),
+ _ => None,
+ })
+ .collect::<Vec<_>>();
+
+ if !suggestable_variants_with_placeholders.is_empty() {
+ let msg = match (
+ suggestable_variants.is_empty(),
+ suggestable_variants_with_placeholders.len(),
+ ) {
+ (true, 1) => "the following enum variant is available",
+ (true, _) => "the following enum variants are available",
+ (false, 1) => "alternatively, the following enum variant is available",
+ (false, _) => "alternatively, the following enum variants are also available",
+ };
+
+ err.span_suggestions(
+ span,
+ msg,
+ suggestable_variants_with_placeholders.into_iter(),
+ Applicability::HasPlaceholders,
+ );
+ }
+ };
+
+ if def_id.is_local() {
+ if let Some(span) = self.def_span(def_id) {
+ err.span_note(span, "the enum is defined here");
+ }
+ }
+ }
+
+ pub(crate) fn report_missing_type_error(
+ &self,
+ path: &[Segment],
+ ) -> Option<(Span, &'static str, String, Applicability)> {
+ let (ident, span) = match path {
+ [segment] if !segment.has_generic_args && segment.ident.name != kw::SelfUpper => {
+ (segment.ident.to_string(), segment.ident.span)
+ }
+ _ => return None,
+ };
+ let mut iter = ident.chars().map(|c| c.is_uppercase());
+ let single_uppercase_char =
+ matches!(iter.next(), Some(true)) && matches!(iter.next(), None);
+ if !self.diagnostic_metadata.currently_processing_generics && !single_uppercase_char {
+ return None;
+ }
+ match (self.diagnostic_metadata.current_item, single_uppercase_char, self.diagnostic_metadata.currently_processing_generics) {
+ (Some(Item { kind: ItemKind::Fn(..), ident, .. }), _, _) if ident.name == sym::main => {
+ // Ignore `fn main()` as we don't want to suggest `fn main<T>()`
+ }
+ (
+ Some(Item {
+ kind:
+ kind @ ItemKind::Fn(..)
+ | kind @ ItemKind::Enum(..)
+ | kind @ ItemKind::Struct(..)
+ | kind @ ItemKind::Union(..),
+ ..
+ }),
+ true, _
+ )
+ // Without the 2nd `true`, we'd suggest `impl <T>` for `impl T` when a type `T` isn't found
+ | (Some(Item { kind: kind @ ItemKind::Impl(..), .. }), true, true)
+ | (Some(Item { kind, .. }), false, _) => {
+ // Likely missing type parameter.
+ if let Some(generics) = kind.generics() {
+ if span.overlaps(generics.span) {
+ // Avoid the following:
+ // error[E0405]: cannot find trait `A` in this scope
+ // --> $DIR/typo-suggestion-named-underscore.rs:CC:LL
+ // |
+ // L | fn foo<T: A>(x: T) {} // Shouldn't suggest underscore
+ // | ^- help: you might be missing a type parameter: `, A`
+ // | |
+ // | not found in this scope
+ return None;
+ }
+ let msg = "you might be missing a type parameter";
+ let (span, sugg) = if let [.., param] = &generics.params[..] {
+ let span = if let [.., bound] = &param.bounds[..] {
+ bound.span()
+ } else if let GenericParam {
+ kind: GenericParamKind::Const { ty, kw_span: _, default }, ..
+ } = param {
+ default.as_ref().map(|def| def.value.span).unwrap_or(ty.span)
+ } else {
+ param.ident.span
+ };
+ (span, format!(", {}", ident))
+ } else {
+ (generics.span, format!("<{}>", ident))
+ };
+ // Do not suggest if this is coming from macro expansion.
+ if span.can_be_used_for_suggestions() {
+ return Some((
+ span.shrink_to_hi(),
+ msg,
+ sugg,
+ Applicability::MaybeIncorrect,
+ ));
+ }
+ }
+ }
+ _ => {}
+ }
+ None
+ }
+
+ /// Given the target `label`, search the `rib_index`th label rib for similarly named labels,
+ /// optionally returning the closest match and whether it is reachable.
+ pub(crate) fn suggestion_for_label_in_rib(
+ &self,
+ rib_index: usize,
+ label: Ident,
+ ) -> Option<LabelSuggestion> {
+ // Are ribs from this `rib_index` within scope?
+ let within_scope = self.is_label_valid_from_rib(rib_index);
+
+ let rib = &self.label_ribs[rib_index];
+ let names = rib
+ .bindings
+ .iter()
+ .filter(|(id, _)| id.span.eq_ctxt(label.span))
+ .map(|(id, _)| id.name)
+ .collect::<Vec<Symbol>>();
+
+ find_best_match_for_name(&names, label.name, None).map(|symbol| {
+ // Upon finding a similar name, get the ident that it was from - the span
+ // contained within helps make a useful diagnostic. In addition, determine
+ // whether this candidate is within scope.
+ let (ident, _) = rib.bindings.iter().find(|(ident, _)| ident.name == symbol).unwrap();
+ (*ident, within_scope)
+ })
+ }
+
+ pub(crate) fn maybe_report_lifetime_uses(
+ &mut self,
+ generics_span: Span,
+ params: &[ast::GenericParam],
+ ) {
+ for (param_index, param) in params.iter().enumerate() {
+ let GenericParamKind::Lifetime = param.kind else { continue };
+
+ let def_id = self.r.local_def_id(param.id);
+
+ let use_set = self.lifetime_uses.remove(&def_id);
+ debug!(
+ "Use set for {:?}({:?} at {:?}) is {:?}",
+ def_id, param.ident, param.ident.span, use_set
+ );
+
+ let deletion_span = || {
+ if params.len() == 1 {
+ // if sole lifetime, remove the entire `<>` brackets
+ generics_span
+ } else if param_index == 0 {
+ // if removing within `<>` brackets, we also want to
+ // delete a leading or trailing comma as appropriate
+ param.span().to(params[param_index + 1].span().shrink_to_lo())
+ } else {
+ // if removing within `<>` brackets, we also want to
+ // delete a leading or trailing comma as appropriate
+ params[param_index - 1].span().shrink_to_hi().to(param.span())
+ }
+ };
+ match use_set {
+ Some(LifetimeUseSet::Many) => {}
+ Some(LifetimeUseSet::One { use_span, use_ctxt }) => {
+ debug!(?param.ident, ?param.ident.span, ?use_span);
+
+ let elidable = matches!(use_ctxt, LifetimeCtxt::Rptr);
+
+ let deletion_span = deletion_span();
+ self.r.lint_buffer.buffer_lint_with_diagnostic(
+ lint::builtin::SINGLE_USE_LIFETIMES,
+ param.id,
+ param.ident.span,
+ &format!("lifetime parameter `{}` only used once", param.ident),
+ lint::BuiltinLintDiagnostics::SingleUseLifetime {
+ param_span: param.ident.span,
+ use_span: Some((use_span, elidable)),
+ deletion_span,
+ },
+ );
+ }
+ None => {
+ debug!(?param.ident, ?param.ident.span);
+
+ let deletion_span = deletion_span();
+ self.r.lint_buffer.buffer_lint_with_diagnostic(
+ lint::builtin::UNUSED_LIFETIMES,
+ param.id,
+ param.ident.span,
+ &format!("lifetime parameter `{}` never used", param.ident),
+ lint::BuiltinLintDiagnostics::SingleUseLifetime {
+ param_span: param.ident.span,
+ use_span: None,
+ deletion_span,
+ },
+ );
+ }
+ }
+ }
+ }
+
+ pub(crate) fn emit_undeclared_lifetime_error(
+ &self,
+ lifetime_ref: &ast::Lifetime,
+ outer_lifetime_ref: Option<Ident>,
+ ) {
+ debug_assert_ne!(lifetime_ref.ident.name, kw::UnderscoreLifetime);
+ let mut err = if let Some(outer) = outer_lifetime_ref {
+ let mut err = struct_span_err!(
+ self.r.session,
+ lifetime_ref.ident.span,
+ E0401,
+ "can't use generic parameters from outer item",
+ );
+ err.span_label(lifetime_ref.ident.span, "use of generic parameter from outer item");
+ err.span_label(outer.span, "lifetime parameter from outer item");
+ err
+ } else {
+ let mut err = struct_span_err!(
+ self.r.session,
+ lifetime_ref.ident.span,
+ E0261,
+ "use of undeclared lifetime name `{}`",
+ lifetime_ref.ident
+ );
+ err.span_label(lifetime_ref.ident.span, "undeclared lifetime");
+ err
+ };
+ self.suggest_introducing_lifetime(
+ &mut err,
+ Some(lifetime_ref.ident.name.as_str()),
+ |err, _, span, message, suggestion| {
+ err.span_suggestion(span, message, suggestion, Applicability::MaybeIncorrect);
+ true
+ },
+ );
+ err.emit();
+ }
+
+ fn suggest_introducing_lifetime(
+ &self,
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ name: Option<&str>,
+ suggest: impl Fn(&mut DiagnosticBuilder<'_, ErrorGuaranteed>, bool, Span, &str, String) -> bool,
+ ) {
+ let mut suggest_note = true;
+ for rib in self.lifetime_ribs.iter().rev() {
+ let mut should_continue = true;
+ match rib.kind {
+ LifetimeRibKind::Generics { binder: _, span, kind } => {
+ if !span.can_be_used_for_suggestions() && suggest_note && let Some(name) = name {
+ suggest_note = false; // Avoid displaying the same help multiple times.
+ err.span_label(
+ span,
+ &format!(
+ "lifetime `{}` is missing in item created through this procedural macro",
+ name,
+ ),
+ );
+ continue;
+ }
+
+ let higher_ranked = matches!(
+ kind,
+ LifetimeBinderKind::BareFnType
+ | LifetimeBinderKind::PolyTrait
+ | LifetimeBinderKind::WhereBound
+ );
+ let (span, sugg) = if span.is_empty() {
+ let sugg = format!(
+ "{}<{}>{}",
+ if higher_ranked { "for" } else { "" },
+ name.unwrap_or("'a"),
+ if higher_ranked { " " } else { "" },
+ );
+ (span, sugg)
+ } else {
+ let span =
+ self.r.session.source_map().span_through_char(span, '<').shrink_to_hi();
+ let sugg = format!("{}, ", name.unwrap_or("'a"));
+ (span, sugg)
+ };
+ if higher_ranked {
+ let message = format!(
+ "consider making the {} lifetime-generic with a new `{}` lifetime",
+ kind.descr(),
+ name.unwrap_or("'a"),
+ );
+ should_continue = suggest(err, true, span, &message, sugg);
+ err.note_once(
+ "for more information on higher-ranked polymorphism, visit \
+ https://doc.rust-lang.org/nomicon/hrtb.html",
+ );
+ } else if let Some(name) = name {
+ let message = format!("consider introducing lifetime `{}` here", name);
+ should_continue = suggest(err, false, span, &message, sugg);
+ } else {
+ let message = format!("consider introducing a named lifetime parameter");
+ should_continue = suggest(err, false, span, &message, sugg);
+ }
+ }
+ LifetimeRibKind::Item => break,
+ _ => {}
+ }
+ if !should_continue {
+ break;
+ }
+ }
+ }
+
+ pub(crate) fn emit_non_static_lt_in_const_generic_error(&self, lifetime_ref: &ast::Lifetime) {
+ struct_span_err!(
+ self.r.session,
+ lifetime_ref.ident.span,
+ E0771,
+ "use of non-static lifetime `{}` in const generic",
+ lifetime_ref.ident
+ )
+ .note(
+ "for more information, see issue #74052 \
+ <https://github.com/rust-lang/rust/issues/74052>",
+ )
+ .emit();
+ }
+
+ /// Non-static lifetimes are prohibited in anonymous constants under `min_const_generics`.
+ /// This function will emit an error if `generic_const_exprs` is not enabled, the body identified by
+ /// `body_id` is an anonymous constant and `lifetime_ref` is non-static.
+ pub(crate) fn maybe_emit_forbidden_non_static_lifetime_error(
+ &self,
+ lifetime_ref: &ast::Lifetime,
+ ) {
+ let feature_active = self.r.session.features_untracked().generic_const_exprs;
+ if !feature_active {
+ feature_err(
+ &self.r.session.parse_sess,
+ sym::generic_const_exprs,
+ lifetime_ref.ident.span,
+ "a non-static lifetime is not allowed in a `const`",
+ )
+ .emit();
+ }
+ }
+
+ pub(crate) fn report_missing_lifetime_specifiers(
+ &mut self,
+ lifetime_refs: Vec<MissingLifetime>,
+ function_param_lifetimes: Option<(Vec<MissingLifetime>, Vec<ElisionFnParameter>)>,
+ ) -> ErrorGuaranteed {
+ let num_lifetimes: usize = lifetime_refs.iter().map(|lt| lt.count).sum();
+ let spans: Vec<_> = lifetime_refs.iter().map(|lt| lt.span).collect();
+
+ let mut err = struct_span_err!(
+ self.r.session,
+ spans,
+ E0106,
+ "missing lifetime specifier{}",
+ pluralize!(num_lifetimes)
+ );
+ self.add_missing_lifetime_specifiers_label(
+ &mut err,
+ lifetime_refs,
+ function_param_lifetimes,
+ );
+ err.emit()
+ }
+
+ pub(crate) fn add_missing_lifetime_specifiers_label(
+ &mut self,
+ err: &mut DiagnosticBuilder<'_, ErrorGuaranteed>,
+ lifetime_refs: Vec<MissingLifetime>,
+ function_param_lifetimes: Option<(Vec<MissingLifetime>, Vec<ElisionFnParameter>)>,
+ ) {
+ for &lt in &lifetime_refs {
+ err.span_label(
+ lt.span,
+ format!(
+ "expected {} lifetime parameter{}",
+ if lt.count == 1 { "named".to_string() } else { lt.count.to_string() },
+ pluralize!(lt.count),
+ ),
+ );
+ }
+
+ let mut in_scope_lifetimes: Vec<_> = self
+ .lifetime_ribs
+ .iter()
+ .rev()
+ .take_while(|rib| !matches!(rib.kind, LifetimeRibKind::Item))
+ .flat_map(|rib| rib.bindings.iter())
+ .map(|(&ident, &res)| (ident, res))
+ .filter(|(ident, _)| ident.name != kw::UnderscoreLifetime)
+ .collect();
+ debug!(?in_scope_lifetimes);
+
+ debug!(?function_param_lifetimes);
+ if let Some((param_lifetimes, params)) = &function_param_lifetimes {
+ let elided_len = param_lifetimes.len();
+ let num_params = params.len();
+
+ let mut m = String::new();
+
+ for (i, info) in params.iter().enumerate() {
+ let ElisionFnParameter { ident, index, lifetime_count, span } = *info;
+ debug_assert_ne!(lifetime_count, 0);
+
+ err.span_label(span, "");
+
+ if i != 0 {
+ if i + 1 < num_params {
+ m.push_str(", ");
+ } else if num_params == 2 {
+ m.push_str(" or ");
+ } else {
+ m.push_str(", or ");
+ }
+ }
+
+ let help_name = if let Some(ident) = ident {
+ format!("`{}`", ident)
+ } else {
+ format!("argument {}", index + 1)
+ };
+
+ if lifetime_count == 1 {
+ m.push_str(&help_name[..])
+ } else {
+ m.push_str(&format!("one of {}'s {} lifetimes", help_name, lifetime_count)[..])
+ }
+ }
+
+ if num_params == 0 {
+ err.help(
+ "this function's return type contains a borrowed value, \
+ but there is no value for it to be borrowed from",
+ );
+ if in_scope_lifetimes.is_empty() {
+ in_scope_lifetimes = vec![(
+ Ident::with_dummy_span(kw::StaticLifetime),
+ (DUMMY_NODE_ID, LifetimeRes::Static),
+ )];
+ }
+ } else if elided_len == 0 {
+ err.help(
+ "this function's return type contains a borrowed value with \
+ an elided lifetime, but the lifetime cannot be derived from \
+ the arguments",
+ );
+ if in_scope_lifetimes.is_empty() {
+ in_scope_lifetimes = vec![(
+ Ident::with_dummy_span(kw::StaticLifetime),
+ (DUMMY_NODE_ID, LifetimeRes::Static),
+ )];
+ }
+ } else if num_params == 1 {
+ err.help(&format!(
+ "this function's return type contains a borrowed value, \
+ but the signature does not say which {} it is borrowed from",
+ m
+ ));
+ } else {
+ err.help(&format!(
+ "this function's return type contains a borrowed value, \
+ but the signature does not say whether it is borrowed from {}",
+ m
+ ));
+ }
+ }
+
+ let existing_name = match &in_scope_lifetimes[..] {
+ [] => Symbol::intern("'a"),
+ [(existing, _)] => existing.name,
+ _ => Symbol::intern("'lifetime"),
+ };
+
+ let mut spans_suggs: Vec<_> = Vec::new();
+ let build_sugg = |lt: MissingLifetime| match lt.kind {
+ MissingLifetimeKind::Underscore => {
+ debug_assert_eq!(lt.count, 1);
+ (lt.span, existing_name.to_string())
+ }
+ MissingLifetimeKind::Ampersand => {
+ debug_assert_eq!(lt.count, 1);
+ (lt.span.shrink_to_hi(), format!("{} ", existing_name))
+ }
+ MissingLifetimeKind::Comma => {
+ let sugg: String = std::iter::repeat([existing_name.as_str(), ", "])
+ .take(lt.count)
+ .flatten()
+ .collect();
+ (lt.span.shrink_to_hi(), sugg)
+ }
+ MissingLifetimeKind::Brackets => {
+ let sugg: String = std::iter::once("<")
+ .chain(
+ std::iter::repeat(existing_name.as_str()).take(lt.count).intersperse(", "),
+ )
+ .chain([">"])
+ .collect();
+ (lt.span.shrink_to_hi(), sugg)
+ }
+ };
+ for &lt in &lifetime_refs {
+ spans_suggs.push(build_sugg(lt));
+ }
+ debug!(?spans_suggs);
+ match in_scope_lifetimes.len() {
+ 0 => {
+ if let Some((param_lifetimes, _)) = function_param_lifetimes {
+ for lt in param_lifetimes {
+ spans_suggs.push(build_sugg(lt))
+ }
+ }
+ self.suggest_introducing_lifetime(
+ err,
+ None,
+ |err, higher_ranked, span, message, intro_sugg| {
+ err.multipart_suggestion_verbose(
+ message,
+ std::iter::once((span, intro_sugg))
+ .chain(spans_suggs.clone())
+ .collect(),
+ Applicability::MaybeIncorrect,
+ );
+ higher_ranked
+ },
+ );
+ }
+ 1 => {
+ err.multipart_suggestion_verbose(
+ &format!("consider using the `{}` lifetime", existing_name),
+ spans_suggs,
+ Applicability::MaybeIncorrect,
+ );
+
+ // Record as using the suggested resolution.
+ let (_, (_, res)) = in_scope_lifetimes[0];
+ for &lt in &lifetime_refs {
+ self.r.lifetimes_res_map.insert(lt.id, res);
+ }
+ }
+ _ => {
+ let lifetime_spans: Vec<_> =
+ in_scope_lifetimes.iter().map(|(ident, _)| ident.span).collect();
+ err.span_note(lifetime_spans, "these named lifetimes are available to use");
+
+ if spans_suggs.len() > 0 {
+ // This happens when we have `Foo<T>` where we point at the space before `T`,
+ // but this can be confusing so we give a suggestion with placeholders.
+ err.multipart_suggestion_verbose(
+ "consider using one of the available lifetimes here",
+ spans_suggs,
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ }
+ }
+}
+
+/// Report lifetime/lifetime shadowing as an error.
+pub fn signal_lifetime_shadowing(sess: &Session, orig: Ident, shadower: Ident) {
+ let mut err = struct_span_err!(
+ sess,
+ shadower.span,
+ E0496,
+ "lifetime name `{}` shadows a lifetime name that is already in scope",
+ orig.name,
+ );
+ err.span_label(orig.span, "first declared here");
+ err.span_label(shadower.span, format!("lifetime `{}` already in scope", orig.name));
+ err.emit();
+}
+
+/// Shadowing involving a label is only a warning for historical reasons.
+//FIXME: make this a proper lint.
+pub fn signal_label_shadowing(sess: &Session, orig: Span, shadower: Ident) {
+ let name = shadower.name;
+ let shadower = shadower.span;
+ let mut err = sess.struct_span_warn(
+ shadower,
+ &format!("label name `{}` shadows a label name that is already in scope", name),
+ );
+ err.span_label(orig, "first declared here");
+ err.span_label(shadower, format!("label `{}` already in scope", name));
+ err.emit();
+}
diff --git a/compiler/rustc_resolve/src/late/lifetimes.rs b/compiler/rustc_resolve/src/late/lifetimes.rs
new file mode 100644
index 000000000..94460e33d
--- /dev/null
+++ b/compiler/rustc_resolve/src/late/lifetimes.rs
@@ -0,0 +1,2144 @@
+//! Resolution of early vs late bound lifetimes.
+//!
+//! Name resolution for lifetimes is performed on the AST and embedded into HIR. From this
+//! information, typechecking needs to transform the lifetime parameters into bound lifetimes.
+//! Lifetimes can be early-bound or late-bound. Construction of typechecking terms needs to visit
+//! the types in HIR to identify late-bound lifetimes and assign their Debruijn indices. This file
+//! is also responsible for assigning their semantics to implicit lifetimes in trait objects.
+
+use rustc_ast::walk_list;
+use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet};
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{DefIdMap, LocalDefId};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{GenericArg, GenericParam, GenericParamKind, HirIdMap, LifetimeName, Node};
+use rustc_middle::bug;
+use rustc_middle::hir::map::Map;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::resolve_lifetime::*;
+use rustc_middle::ty::{self, GenericParamDefKind, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use std::borrow::Cow;
+use std::fmt;
+use std::mem::take;
+
+trait RegionExt {
+ fn early(hir_map: Map<'_>, index: &mut u32, param: &GenericParam<'_>) -> (LocalDefId, Region);
+
+ fn late(index: u32, hir_map: Map<'_>, param: &GenericParam<'_>) -> (LocalDefId, Region);
+
+ fn id(&self) -> Option<DefId>;
+
+ fn shifted(self, amount: u32) -> Region;
+
+ fn shifted_out_to_binder(self, binder: ty::DebruijnIndex) -> Region;
+
+ fn subst<'a, L>(self, params: L, map: &NamedRegionMap) -> Option<Region>
+ where
+ L: Iterator<Item = &'a hir::Lifetime>;
+}
+
+impl RegionExt for Region {
+ fn early(hir_map: Map<'_>, index: &mut u32, param: &GenericParam<'_>) -> (LocalDefId, Region) {
+ let i = *index;
+ *index += 1;
+ let def_id = hir_map.local_def_id(param.hir_id);
+ debug!("Region::early: index={} def_id={:?}", i, def_id);
+ (def_id, Region::EarlyBound(i, def_id.to_def_id()))
+ }
+
+ fn late(idx: u32, hir_map: Map<'_>, param: &GenericParam<'_>) -> (LocalDefId, Region) {
+ let depth = ty::INNERMOST;
+ let def_id = hir_map.local_def_id(param.hir_id);
+ debug!(
+ "Region::late: idx={:?}, param={:?} depth={:?} def_id={:?}",
+ idx, param, depth, def_id,
+ );
+ (def_id, Region::LateBound(depth, idx, def_id.to_def_id()))
+ }
+
+ fn id(&self) -> Option<DefId> {
+ match *self {
+ Region::Static => None,
+
+ Region::EarlyBound(_, id) | Region::LateBound(_, _, id) | Region::Free(_, id) => {
+ Some(id)
+ }
+ }
+ }
+
+ fn shifted(self, amount: u32) -> Region {
+ match self {
+ Region::LateBound(debruijn, idx, id) => {
+ Region::LateBound(debruijn.shifted_in(amount), idx, id)
+ }
+ _ => self,
+ }
+ }
+
+ fn shifted_out_to_binder(self, binder: ty::DebruijnIndex) -> Region {
+ match self {
+ Region::LateBound(debruijn, index, id) => {
+ Region::LateBound(debruijn.shifted_out_to_binder(binder), index, id)
+ }
+ _ => self,
+ }
+ }
+
+ fn subst<'a, L>(self, mut params: L, map: &NamedRegionMap) -> Option<Region>
+ where
+ L: Iterator<Item = &'a hir::Lifetime>,
+ {
+ if let Region::EarlyBound(index, _) = self {
+ params.nth(index as usize).and_then(|lifetime| map.defs.get(&lifetime.hir_id).cloned())
+ } else {
+ Some(self)
+ }
+ }
+}
+
+/// Maps the id of each lifetime reference to the lifetime decl
+/// that it corresponds to.
+///
+/// FIXME. This struct gets converted to a `ResolveLifetimes` for
+/// actual use. It has the same data, but indexed by `LocalDefId`. This
+/// is silly.
+#[derive(Debug, Default)]
+struct NamedRegionMap {
+ // maps from every use of a named (not anonymous) lifetime to a
+ // `Region` describing how that region is bound
+ defs: HirIdMap<Region>,
+
+ // Maps relevant hir items to the bound vars on them. These include:
+ // - function defs
+ // - function pointers
+ // - closures
+ // - trait refs
+ // - bound types (like `T` in `for<'a> T<'a>: Foo`)
+ late_bound_vars: HirIdMap<Vec<ty::BoundVariableKind>>,
+}
+
+pub(crate) struct LifetimeContext<'a, 'tcx> {
+ pub(crate) tcx: TyCtxt<'tcx>,
+ map: &'a mut NamedRegionMap,
+ scope: ScopeRef<'a>,
+
+ /// Indicates that we only care about the definition of a trait. This should
+ /// be false if the `Item` we are resolving lifetimes for is not a trait or
+ /// we eventually need lifetimes resolve for trait items.
+ trait_definition_only: bool,
+
+ /// Cache for cross-crate per-definition object lifetime defaults.
+ xcrate_object_lifetime_defaults: DefIdMap<Vec<ObjectLifetimeDefault>>,
+}
+
+#[derive(Debug)]
+enum Scope<'a> {
+ /// Declares lifetimes, and each can be early-bound or late-bound.
+ /// The `DebruijnIndex` of late-bound lifetimes starts at `1` and
+ /// it should be shifted by the number of `Binder`s in between the
+ /// declaration `Binder` and the location it's referenced from.
+ Binder {
+ /// We use an IndexMap here because we want these lifetimes in order
+ /// for diagnostics.
+ lifetimes: FxIndexMap<LocalDefId, Region>,
+
+ /// if we extend this scope with another scope, what is the next index
+ /// we should use for an early-bound region?
+ next_early_index: u32,
+
+ /// Whether or not this binder would serve as the parent
+ /// binder for opaque types introduced within. For example:
+ ///
+ /// ```text
+ /// fn foo<'a>() -> impl for<'b> Trait<Item = impl Trait2<'a>>
+ /// ```
+ ///
+ /// Here, the opaque types we create for the `impl Trait`
+ /// and `impl Trait2` references will both have the `foo` item
+ /// as their parent. When we get to `impl Trait2`, we find
+ /// that it is nested within the `for<>` binder -- this flag
+ /// allows us to skip that when looking for the parent binder
+ /// of the resulting opaque type.
+ opaque_type_parent: bool,
+
+ scope_type: BinderScopeType,
+
+ /// The late bound vars for a given item are stored by `HirId` to be
+ /// queried later. However, if we enter an elision scope, we have to
+ /// later append the elided bound vars to the list and need to know what
+ /// to append to.
+ hir_id: hir::HirId,
+
+ s: ScopeRef<'a>,
+
+ /// If this binder comes from a where clause, specify how it was created.
+ /// This is used to diagnose inaccessible lifetimes in APIT:
+ /// ```ignore (illustrative)
+ /// fn foo(x: impl for<'a> Trait<'a, Assoc = impl Copy + 'a>) {}
+ /// ```
+ where_bound_origin: Option<hir::PredicateOrigin>,
+ },
+
+ /// Lifetimes introduced by a fn are scoped to the call-site for that fn,
+ /// if this is a fn body, otherwise the original definitions are used.
+ /// Unspecified lifetimes are inferred, unless an elision scope is nested,
+ /// e.g., `(&T, fn(&T) -> &T);` becomes `(&'_ T, for<'a> fn(&'a T) -> &'a T)`.
+ Body {
+ id: hir::BodyId,
+ s: ScopeRef<'a>,
+ },
+
+ /// A scope which either determines unspecified lifetimes or errors
+ /// on them (e.g., due to ambiguity).
+ Elision {
+ s: ScopeRef<'a>,
+ },
+
+ /// Use a specific lifetime (if `Some`) or leave it unset (to be
+ /// inferred in a function body or potentially error outside one),
+ /// for the default choice of lifetime in a trait object type.
+ ObjectLifetimeDefault {
+ lifetime: Option<Region>,
+ s: ScopeRef<'a>,
+ },
+
+ /// When we have nested trait refs, we concatenate late bound vars for inner
+ /// trait refs from outer ones. But we also need to include any HRTB
+ /// lifetimes encountered when identifying the trait that an associated type
+ /// is declared on.
+ Supertrait {
+ lifetimes: Vec<ty::BoundVariableKind>,
+ s: ScopeRef<'a>,
+ },
+
+ TraitRefBoundary {
+ s: ScopeRef<'a>,
+ },
+
+ Root,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum BinderScopeType {
+ /// Any non-concatenating binder scopes.
+ Normal,
+ /// Within a syntactic trait ref, there may be multiple poly trait refs that
+ /// are nested (under the `associated_type_bounds` feature). The binders of
+ /// the inner poly trait refs are extended from the outer poly trait refs
+ /// and don't increase the late bound depth. If you had
+ /// `T: for<'a> Foo<Bar: for<'b> Baz<'a, 'b>>`, then the `for<'b>` scope
+ /// would be `Concatenating`. This also used in trait refs in where clauses
+ /// where we have two binders `for<> T: for<> Foo` (I've intentionally left
+ /// out any lifetimes because they aren't needed to show the two scopes).
+ /// The inner `for<>` has a scope of `Concatenating`.
+ Concatenating,
+}
+
+// A helper struct for debugging scopes without printing parent scopes
+struct TruncatedScopeDebug<'a>(&'a Scope<'a>);
+
+impl<'a> fmt::Debug for TruncatedScopeDebug<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.0 {
+ Scope::Binder {
+ lifetimes,
+ next_early_index,
+ opaque_type_parent,
+ scope_type,
+ hir_id,
+ where_bound_origin,
+ s: _,
+ } => f
+ .debug_struct("Binder")
+ .field("lifetimes", lifetimes)
+ .field("next_early_index", next_early_index)
+ .field("opaque_type_parent", opaque_type_parent)
+ .field("scope_type", scope_type)
+ .field("hir_id", hir_id)
+ .field("where_bound_origin", where_bound_origin)
+ .field("s", &"..")
+ .finish(),
+ Scope::Body { id, s: _ } => {
+ f.debug_struct("Body").field("id", id).field("s", &"..").finish()
+ }
+ Scope::Elision { s: _ } => f.debug_struct("Elision").field("s", &"..").finish(),
+ Scope::ObjectLifetimeDefault { lifetime, s: _ } => f
+ .debug_struct("ObjectLifetimeDefault")
+ .field("lifetime", lifetime)
+ .field("s", &"..")
+ .finish(),
+ Scope::Supertrait { lifetimes, s: _ } => f
+ .debug_struct("Supertrait")
+ .field("lifetimes", lifetimes)
+ .field("s", &"..")
+ .finish(),
+ Scope::TraitRefBoundary { s: _ } => f.debug_struct("TraitRefBoundary").finish(),
+ Scope::Root => f.debug_struct("Root").finish(),
+ }
+ }
+}
+
+type ScopeRef<'a> = &'a Scope<'a>;
+
+const ROOT_SCOPE: ScopeRef<'static> = &Scope::Root;
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers {
+ resolve_lifetimes_trait_definition,
+ resolve_lifetimes,
+
+ named_region_map: |tcx, id| resolve_lifetimes_for(tcx, id).defs.get(&id),
+ is_late_bound_map,
+ object_lifetime_defaults: |tcx, id| match tcx.hir().find_by_def_id(id) {
+ Some(Node::Item(item)) => compute_object_lifetime_defaults(tcx, item),
+ _ => None,
+ },
+ late_bound_vars_map: |tcx, id| resolve_lifetimes_for(tcx, id).late_bound_vars.get(&id),
+
+ ..*providers
+ };
+}
+
+/// Like `resolve_lifetimes`, but does not resolve lifetimes for trait items.
+/// Also does not generate any diagnostics.
+///
+/// This is ultimately a subset of the `resolve_lifetimes` work. It effectively
+/// resolves lifetimes only within the trait "header" -- that is, the trait
+/// and supertrait list. In contrast, `resolve_lifetimes` resolves all the
+/// lifetimes within the trait and its items. There is room to refactor this,
+/// for example to resolve lifetimes for each trait item in separate queries,
+/// but it's convenient to do the entire trait at once because the lifetimes
+/// from the trait definition are in scope within the trait items as well.
+///
+/// The reason for this separate call is to resolve what would otherwise
+/// be a cycle. Consider this example:
+///
+/// ```ignore UNSOLVED (maybe @jackh726 knows what lifetime parameter to give Sub)
+/// trait Base<'a> {
+/// type BaseItem;
+/// }
+/// trait Sub<'b>: for<'a> Base<'a> {
+/// type SubItem: Sub<BaseItem = &'b u32>;
+/// }
+/// ```
+///
+/// When we resolve `Sub` and all its items, we also have to resolve `Sub<BaseItem = &'b u32>`.
+/// To figure out the index of `'b`, we have to know about the supertraits
+/// of `Sub` so that we can determine that the `for<'a>` will be in scope.
+/// (This is because we -- currently at least -- flatten all the late-bound
+/// lifetimes into a single binder.) This requires us to resolve the
+/// *trait definition* of `Sub`; basically just enough lifetime information
+/// to look at the supertraits.
+#[tracing::instrument(level = "debug", skip(tcx))]
+fn resolve_lifetimes_trait_definition(
+ tcx: TyCtxt<'_>,
+ local_def_id: LocalDefId,
+) -> ResolveLifetimes {
+ convert_named_region_map(do_resolve(tcx, local_def_id, true))
+}
+
+/// Computes the `ResolveLifetimes` map that contains data for an entire `Item`.
+/// You should not read the result of this query directly, but rather use
+/// `named_region_map`, `is_late_bound_map`, etc.
+#[tracing::instrument(level = "debug", skip(tcx))]
+fn resolve_lifetimes(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> ResolveLifetimes {
+ convert_named_region_map(do_resolve(tcx, local_def_id, false))
+}
+
+fn do_resolve(
+ tcx: TyCtxt<'_>,
+ local_def_id: LocalDefId,
+ trait_definition_only: bool,
+) -> NamedRegionMap {
+ let item = tcx.hir().expect_item(local_def_id);
+ let mut named_region_map =
+ NamedRegionMap { defs: Default::default(), late_bound_vars: Default::default() };
+ let mut visitor = LifetimeContext {
+ tcx,
+ map: &mut named_region_map,
+ scope: ROOT_SCOPE,
+ trait_definition_only,
+ xcrate_object_lifetime_defaults: Default::default(),
+ };
+ visitor.visit_item(item);
+
+ named_region_map
+}
+
+fn convert_named_region_map(named_region_map: NamedRegionMap) -> ResolveLifetimes {
+ let mut rl = ResolveLifetimes::default();
+
+ for (hir_id, v) in named_region_map.defs {
+ let map = rl.defs.entry(hir_id.owner).or_default();
+ map.insert(hir_id.local_id, v);
+ }
+ for (hir_id, v) in named_region_map.late_bound_vars {
+ let map = rl.late_bound_vars.entry(hir_id.owner).or_default();
+ map.insert(hir_id.local_id, v);
+ }
+
+ debug!(?rl.defs);
+ rl
+}
+
+/// Given `any` owner (structs, traits, trait methods, etc.), does lifetime resolution.
+/// There are two important things this does.
+/// First, we have to resolve lifetimes for
+/// the entire *`Item`* that contains this owner, because that's the largest "scope"
+/// where we can have relevant lifetimes.
+/// Second, if we are asking for lifetimes in a trait *definition*, we use `resolve_lifetimes_trait_definition`
+/// instead of `resolve_lifetimes`, which does not descend into the trait items and does not emit diagnostics.
+/// This allows us to avoid cycles. Importantly, if we ask for lifetimes for lifetimes that have an owner
+/// other than the trait itself (like the trait methods or associated types), then we just use the regular
+/// `resolve_lifetimes`.
+fn resolve_lifetimes_for<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx ResolveLifetimes {
+ let item_id = item_for(tcx, def_id);
+ if item_id == def_id {
+ let item = tcx.hir().item(hir::ItemId { def_id: item_id });
+ match item.kind {
+ hir::ItemKind::Trait(..) => tcx.resolve_lifetimes_trait_definition(item_id),
+ _ => tcx.resolve_lifetimes(item_id),
+ }
+ } else {
+ tcx.resolve_lifetimes(item_id)
+ }
+}
+
+/// Finds the `Item` that contains the given `LocalDefId`
+fn item_for(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> LocalDefId {
+ match tcx.hir().find_by_def_id(local_def_id) {
+ Some(Node::Item(item)) => {
+ return item.def_id;
+ }
+ _ => {}
+ }
+ let item = {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id);
+ let mut parent_iter = tcx.hir().parent_iter(hir_id);
+ loop {
+ let node = parent_iter.next().map(|n| n.1);
+ match node {
+ Some(hir::Node::Item(item)) => break item.def_id,
+ Some(hir::Node::Crate(_)) | None => bug!("Called `item_for` on an Item."),
+ _ => {}
+ }
+ }
+ };
+ item
+}
+
+/// In traits, there is an implicit `Self` type parameter which comes before the generics.
+/// We have to account for this when computing the index of the other generic parameters.
+/// This function returns whether there is such an implicit parameter defined on the given item.
+fn sub_items_have_self_param(node: &hir::ItemKind<'_>) -> bool {
+ matches!(*node, hir::ItemKind::Trait(..) | hir::ItemKind::TraitAlias(..))
+}
+
+fn late_region_as_bound_region<'tcx>(tcx: TyCtxt<'tcx>, region: &Region) -> ty::BoundVariableKind {
+ match region {
+ Region::LateBound(_, _, def_id) => {
+ let name = tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id.expect_local()));
+ ty::BoundVariableKind::Region(ty::BrNamed(*def_id, name))
+ }
+ _ => bug!("{:?} is not a late region", region),
+ }
+}
+
+impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
+ /// Returns the binders in scope and the type of `Binder` that should be created for a poly trait ref.
+ fn poly_trait_ref_binder_info(&mut self) -> (Vec<ty::BoundVariableKind>, BinderScopeType) {
+ let mut scope = self.scope;
+ let mut supertrait_lifetimes = vec![];
+ loop {
+ match scope {
+ Scope::Body { .. } | Scope::Root => {
+ break (vec![], BinderScopeType::Normal);
+ }
+
+ Scope::Elision { s, .. } | Scope::ObjectLifetimeDefault { s, .. } => {
+ scope = s;
+ }
+
+ Scope::Supertrait { s, lifetimes } => {
+ supertrait_lifetimes = lifetimes.clone();
+ scope = s;
+ }
+
+ Scope::TraitRefBoundary { .. } => {
+ // We should only see super trait lifetimes if there is a `Binder` above
+ assert!(supertrait_lifetimes.is_empty());
+ break (vec![], BinderScopeType::Normal);
+ }
+
+ Scope::Binder { hir_id, .. } => {
+ // Nested poly trait refs have the binders concatenated
+ let mut full_binders =
+ self.map.late_bound_vars.entry(*hir_id).or_default().clone();
+ full_binders.extend(supertrait_lifetimes.into_iter());
+ break (full_binders, BinderScopeType::Concatenating);
+ }
+ }
+ }
+ }
+}
+impl<'a, 'tcx> Visitor<'tcx> for LifetimeContext<'a, 'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ // We want to nest trait/impl items in their parent, but nothing else.
+ fn visit_nested_item(&mut self, _: hir::ItemId) {}
+
+ fn visit_trait_item_ref(&mut self, ii: &'tcx hir::TraitItemRef) {
+ if !self.trait_definition_only {
+ intravisit::walk_trait_item_ref(self, ii)
+ }
+ }
+
+ fn visit_nested_body(&mut self, body: hir::BodyId) {
+ let body = self.tcx.hir().body(body);
+ self.with(Scope::Body { id: body.id(), s: self.scope }, |this| {
+ this.visit_body(body);
+ });
+ }
+
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Closure(hir::Closure {
+ binder, bound_generic_params, fn_decl, ..
+ }) = e.kind
+ {
+ if let &hir::ClosureBinder::For { span: for_sp, .. } = binder {
+ fn span_of_infer(ty: &hir::Ty<'_>) -> Option<Span> {
+ struct V(Option<Span>);
+
+ impl<'v> Visitor<'v> for V {
+ fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
+ match t.kind {
+ _ if self.0.is_some() => (),
+ hir::TyKind::Infer => {
+ self.0 = Some(t.span);
+ }
+ _ => intravisit::walk_ty(self, t),
+ }
+ }
+ }
+
+ let mut v = V(None);
+ v.visit_ty(ty);
+ v.0
+ }
+
+ let infer_in_rt_sp = match fn_decl.output {
+ hir::FnRetTy::DefaultReturn(sp) => Some(sp),
+ hir::FnRetTy::Return(ty) => span_of_infer(ty),
+ };
+
+ let infer_spans = fn_decl
+ .inputs
+ .into_iter()
+ .filter_map(span_of_infer)
+ .chain(infer_in_rt_sp)
+ .collect::<Vec<_>>();
+
+ if !infer_spans.is_empty() {
+ self.tcx.sess
+ .struct_span_err(
+ infer_spans,
+ "implicit types in closure signatures are forbidden when `for<...>` is present",
+ )
+ .span_label(for_sp, "`for<...>` is here")
+ .emit();
+ }
+ }
+
+ let next_early_index = self.next_early_index();
+ let (lifetimes, binders): (FxIndexMap<LocalDefId, Region>, Vec<_>) =
+ bound_generic_params
+ .iter()
+ .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
+ let r = late_region_as_bound_region(self.tcx, &pair.1);
+ (pair, r)
+ })
+ .unzip();
+
+ self.map.late_bound_vars.insert(e.hir_id, binders);
+ let scope = Scope::Binder {
+ hir_id: e.hir_id,
+ lifetimes,
+ s: self.scope,
+ next_early_index,
+ opaque_type_parent: false,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+
+ self.with(scope, |this| {
+ // a closure has no bounds, so everything
+ // contained within is scoped within its binder.
+ intravisit::walk_expr(this, e)
+ });
+ } else {
+ intravisit::walk_expr(self, e)
+ }
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ match &item.kind {
+ hir::ItemKind::Impl(hir::Impl { of_trait, .. }) => {
+ if let Some(of_trait) = of_trait {
+ self.map.late_bound_vars.insert(of_trait.hir_ref_id, Vec::default());
+ }
+ }
+ _ => {}
+ }
+ match item.kind {
+ hir::ItemKind::Fn(_, ref generics, _) => {
+ self.visit_early_late(None, item.hir_id(), generics, |this| {
+ intravisit::walk_item(this, item);
+ });
+ }
+
+ hir::ItemKind::ExternCrate(_)
+ | hir::ItemKind::Use(..)
+ | hir::ItemKind::Macro(..)
+ | hir::ItemKind::Mod(..)
+ | hir::ItemKind::ForeignMod { .. }
+ | hir::ItemKind::GlobalAsm(..) => {
+ // These sorts of items have no lifetime parameters at all.
+ intravisit::walk_item(self, item);
+ }
+ hir::ItemKind::Static(..) | hir::ItemKind::Const(..) => {
+ // No lifetime parameters, but implied 'static.
+ self.with(Scope::Elision { s: self.scope }, |this| {
+ intravisit::walk_item(this, item)
+ });
+ }
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { .. }) => {
+ // Opaque types are visited when we visit the
+ // `TyKind::OpaqueDef`, so that they have the lifetimes from
+ // their parent opaque_ty in scope.
+ //
+ // The core idea here is that since OpaqueTys are generated with the impl Trait as
+ // their owner, we can keep going until we find the Item that owns that. We then
+ // conservatively add all resolved lifetimes. Otherwise we run into problems in
+ // cases like `type Foo<'a> = impl Bar<As = impl Baz + 'a>`.
+ for (_hir_id, node) in
+ self.tcx.hir().parent_iter(self.tcx.hir().local_def_id_to_hir_id(item.def_id))
+ {
+ match node {
+ hir::Node::Item(parent_item) => {
+ let resolved_lifetimes: &ResolveLifetimes =
+ self.tcx.resolve_lifetimes(item_for(self.tcx, parent_item.def_id));
+ // We need to add *all* deps, since opaque tys may want them from *us*
+ for (&owner, defs) in resolved_lifetimes.defs.iter() {
+ defs.iter().for_each(|(&local_id, region)| {
+ self.map.defs.insert(hir::HirId { owner, local_id }, *region);
+ });
+ }
+ for (&owner, late_bound_vars) in
+ resolved_lifetimes.late_bound_vars.iter()
+ {
+ late_bound_vars.iter().for_each(|(&local_id, late_bound_vars)| {
+ self.map.late_bound_vars.insert(
+ hir::HirId { owner, local_id },
+ late_bound_vars.clone(),
+ );
+ });
+ }
+ break;
+ }
+ hir::Node::Crate(_) => bug!("No Item about an OpaqueTy"),
+ _ => {}
+ }
+ }
+ }
+ hir::ItemKind::TyAlias(_, ref generics)
+ | hir::ItemKind::Enum(_, ref generics)
+ | hir::ItemKind::Struct(_, ref generics)
+ | hir::ItemKind::Union(_, ref generics)
+ | hir::ItemKind::Trait(_, _, ref generics, ..)
+ | hir::ItemKind::TraitAlias(ref generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { ref generics, .. }) => {
+ // These kinds of items have only early-bound lifetime parameters.
+ let mut index = if sub_items_have_self_param(&item.kind) {
+ 1 // Self comes before lifetimes
+ } else {
+ 0
+ };
+ let mut non_lifetime_count = 0;
+ let lifetimes = generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ Some(Region::early(self.tcx.hir(), &mut index, param))
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
+ non_lifetime_count += 1;
+ None
+ }
+ })
+ .collect();
+ self.map.late_bound_vars.insert(item.hir_id(), vec![]);
+ let scope = Scope::Binder {
+ hir_id: item.hir_id(),
+ lifetimes,
+ next_early_index: index + non_lifetime_count,
+ opaque_type_parent: true,
+ scope_type: BinderScopeType::Normal,
+ s: ROOT_SCOPE,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, |this| {
+ intravisit::walk_item(this, item);
+ });
+ });
+ }
+ }
+ }
+
+ fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
+ match item.kind {
+ hir::ForeignItemKind::Fn(_, _, ref generics) => {
+ self.visit_early_late(None, item.hir_id(), generics, |this| {
+ intravisit::walk_foreign_item(this, item);
+ })
+ }
+ hir::ForeignItemKind::Static(..) => {
+ intravisit::walk_foreign_item(self, item);
+ }
+ hir::ForeignItemKind::Type => {
+ intravisit::walk_foreign_item(self, item);
+ }
+ }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
+ match ty.kind {
+ hir::TyKind::BareFn(ref c) => {
+ let next_early_index = self.next_early_index();
+ let (lifetimes, binders): (FxIndexMap<LocalDefId, Region>, Vec<_>) = c
+ .generic_params
+ .iter()
+ .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
+ let r = late_region_as_bound_region(self.tcx, &pair.1);
+ (pair, r)
+ })
+ .unzip();
+ self.map.late_bound_vars.insert(ty.hir_id, binders);
+ let scope = Scope::Binder {
+ hir_id: ty.hir_id,
+ lifetimes,
+ s: self.scope,
+ next_early_index,
+ opaque_type_parent: false,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ // a bare fn has no bounds, so everything
+ // contained within is scoped within its binder.
+ intravisit::walk_ty(this, ty);
+ });
+ }
+ hir::TyKind::TraitObject(bounds, ref lifetime, _) => {
+ debug!(?bounds, ?lifetime, "TraitObject");
+ let scope = Scope::TraitRefBoundary { s: self.scope };
+ self.with(scope, |this| {
+ for bound in bounds {
+ this.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None);
+ }
+ });
+ match lifetime.name {
+ LifetimeName::ImplicitObjectLifetimeDefault => {
+ // If the user does not write *anything*, we
+ // use the object lifetime defaulting
+ // rules. So e.g., `Box<dyn Debug>` becomes
+ // `Box<dyn Debug + 'static>`.
+ self.resolve_object_lifetime_default(lifetime)
+ }
+ LifetimeName::Infer => {
+ // If the user writes `'_`, we use the *ordinary* elision
+ // rules. So the `'_` in e.g., `Box<dyn Debug + '_>` will be
+ // resolved the same as the `'_` in `&'_ Foo`.
+ //
+ // cc #48468
+ }
+ LifetimeName::Param(..) | LifetimeName::Static => {
+ // If the user wrote an explicit name, use that.
+ self.visit_lifetime(lifetime);
+ }
+ LifetimeName::Error => {}
+ }
+ }
+ hir::TyKind::Rptr(ref lifetime_ref, ref mt) => {
+ self.visit_lifetime(lifetime_ref);
+ let scope = Scope::ObjectLifetimeDefault {
+ lifetime: self.map.defs.get(&lifetime_ref.hir_id).cloned(),
+ s: self.scope,
+ };
+ self.with(scope, |this| this.visit_ty(&mt.ty));
+ }
+ hir::TyKind::OpaqueDef(item_id, lifetimes) => {
+ // Resolve the lifetimes in the bounds to the lifetime defs in the generics.
+ // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
+ // `type MyAnonTy<'b> = impl MyTrait<'b>;`
+ // ^ ^ this gets resolved in the scope of
+ // the opaque_ty generics
+ let opaque_ty = self.tcx.hir().item(item_id);
+ let (generics, bounds) = match opaque_ty.kind {
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::TyAlias,
+ ..
+ }) => {
+ intravisit::walk_ty(self, ty);
+
+ // Elided lifetimes are not allowed in non-return
+ // position impl Trait
+ let scope = Scope::TraitRefBoundary { s: self.scope };
+ self.with(scope, |this| {
+ let scope = Scope::Elision { s: this.scope };
+ this.with(scope, |this| {
+ intravisit::walk_item(this, opaque_ty);
+ })
+ });
+
+ return;
+ }
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..),
+ ref generics,
+ bounds,
+ ..
+ }) => (generics, bounds),
+ ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i),
+ };
+
+ // Resolve the lifetimes that are applied to the opaque type.
+ // These are resolved in the current scope.
+ // `fn foo<'a>() -> impl MyTrait<'a> { ... }` desugars to
+ // `fn foo<'a>() -> MyAnonTy<'a> { ... }`
+ // ^ ^this gets resolved in the current scope
+ for lifetime in lifetimes {
+ let hir::GenericArg::Lifetime(lifetime) = lifetime else {
+ continue
+ };
+ self.visit_lifetime(lifetime);
+
+ // Check for predicates like `impl for<'a> Trait<impl OtherTrait<'a>>`
+ // and ban them. Type variables instantiated inside binders aren't
+ // well-supported at the moment, so this doesn't work.
+ // In the future, this should be fixed and this error should be removed.
+ let def = self.map.defs.get(&lifetime.hir_id).cloned();
+ let Some(Region::LateBound(_, _, def_id)) = def else {
+ continue
+ };
+ let Some(def_id) = def_id.as_local() else {
+ continue
+ };
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ // Ensure that the parent of the def is an item, not HRTB
+ let parent_id = self.tcx.hir().get_parent_node(hir_id);
+ if !parent_id.is_owner() {
+ if !self.trait_definition_only {
+ struct_span_err!(
+ self.tcx.sess,
+ lifetime.span,
+ E0657,
+ "`impl Trait` can only capture lifetimes \
+ bound at the fn or impl level"
+ )
+ .emit();
+ }
+ self.uninsert_lifetime_on_error(lifetime, def.unwrap());
+ }
+ if let hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::OpaqueTy { .. }, ..
+ }) = self.tcx.hir().get(parent_id)
+ {
+ if !self.trait_definition_only {
+ let mut err = self.tcx.sess.struct_span_err(
+ lifetime.span,
+ "higher kinded lifetime bounds on nested opaque types are not supported yet",
+ );
+ err.span_note(self.tcx.def_span(def_id), "lifetime declared here");
+ err.emit();
+ }
+ self.uninsert_lifetime_on_error(lifetime, def.unwrap());
+ }
+ }
+
+ // We want to start our early-bound indices at the end of the parent scope,
+ // not including any parent `impl Trait`s.
+ let mut index = self.next_early_index_for_opaque_type();
+ debug!(?index);
+
+ let mut lifetimes = FxIndexMap::default();
+ let mut non_lifetime_count = 0;
+ debug!(?generics.params);
+ for param in generics.params {
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ let (def_id, reg) = Region::early(self.tcx.hir(), &mut index, &param);
+ lifetimes.insert(def_id, reg);
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
+ non_lifetime_count += 1;
+ }
+ }
+ }
+ let next_early_index = index + non_lifetime_count;
+ self.map.late_bound_vars.insert(ty.hir_id, vec![]);
+
+ let scope = Scope::Binder {
+ hir_id: ty.hir_id,
+ lifetimes,
+ next_early_index,
+ s: self.scope,
+ opaque_type_parent: false,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, |this| {
+ this.visit_generics(generics);
+ for bound in bounds {
+ this.visit_param_bound(bound);
+ }
+ })
+ });
+ }
+ _ => intravisit::walk_ty(self, ty),
+ }
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ use self::hir::TraitItemKind::*;
+ match trait_item.kind {
+ Fn(_, _) => {
+ let tcx = self.tcx;
+ self.visit_early_late(
+ Some(tcx.hir().get_parent_item(trait_item.hir_id())),
+ trait_item.hir_id(),
+ &trait_item.generics,
+ |this| intravisit::walk_trait_item(this, trait_item),
+ );
+ }
+ Type(bounds, ref ty) => {
+ let generics = &trait_item.generics;
+ let mut index = self.next_early_index();
+ debug!("visit_ty: index = {}", index);
+ let mut non_lifetime_count = 0;
+ let lifetimes = generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ Some(Region::early(self.tcx.hir(), &mut index, param))
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
+ non_lifetime_count += 1;
+ None
+ }
+ })
+ .collect();
+ self.map.late_bound_vars.insert(trait_item.hir_id(), vec![]);
+ let scope = Scope::Binder {
+ hir_id: trait_item.hir_id(),
+ lifetimes,
+ next_early_index: index + non_lifetime_count,
+ s: self.scope,
+ opaque_type_parent: true,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, |this| {
+ this.visit_generics(generics);
+ for bound in bounds {
+ this.visit_param_bound(bound);
+ }
+ if let Some(ty) = ty {
+ this.visit_ty(ty);
+ }
+ })
+ });
+ }
+ Const(_, _) => {
+ // Only methods and types support generics.
+ assert!(trait_item.generics.params.is_empty());
+ intravisit::walk_trait_item(self, trait_item);
+ }
+ }
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ use self::hir::ImplItemKind::*;
+ match impl_item.kind {
+ Fn(..) => {
+ let tcx = self.tcx;
+ self.visit_early_late(
+ Some(tcx.hir().get_parent_item(impl_item.hir_id())),
+ impl_item.hir_id(),
+ &impl_item.generics,
+ |this| intravisit::walk_impl_item(this, impl_item),
+ );
+ }
+ TyAlias(ref ty) => {
+ let generics = &impl_item.generics;
+ let mut index = self.next_early_index();
+ let mut non_lifetime_count = 0;
+ debug!("visit_ty: index = {}", index);
+ let lifetimes: FxIndexMap<LocalDefId, Region> = generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ Some(Region::early(self.tcx.hir(), &mut index, param))
+ }
+ GenericParamKind::Const { .. } | GenericParamKind::Type { .. } => {
+ non_lifetime_count += 1;
+ None
+ }
+ })
+ .collect();
+ self.map.late_bound_vars.insert(ty.hir_id, vec![]);
+ let scope = Scope::Binder {
+ hir_id: ty.hir_id,
+ lifetimes,
+ next_early_index: index + non_lifetime_count,
+ s: self.scope,
+ opaque_type_parent: true,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ let scope = Scope::TraitRefBoundary { s: this.scope };
+ this.with(scope, |this| {
+ this.visit_generics(generics);
+ this.visit_ty(ty);
+ })
+ });
+ }
+ Const(_, _) => {
+ // Only methods and types support generics.
+ assert!(impl_item.generics.params.is_empty());
+ intravisit::walk_impl_item(self, impl_item);
+ }
+ }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
+ match lifetime_ref.name {
+ hir::LifetimeName::Static => self.insert_lifetime(lifetime_ref, Region::Static),
+ hir::LifetimeName::Param(param_def_id, _) => {
+ self.resolve_lifetime_ref(param_def_id, lifetime_ref)
+ }
+ // If we've already reported an error, just ignore `lifetime_ref`.
+ hir::LifetimeName::Error => {}
+ // Those will be resolved by typechecking.
+ hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Infer => {}
+ }
+ }
+
+ fn visit_path(&mut self, path: &'tcx hir::Path<'tcx>, _: hir::HirId) {
+ for (i, segment) in path.segments.iter().enumerate() {
+ let depth = path.segments.len() - i - 1;
+ if let Some(ref args) = segment.args {
+ self.visit_segment_args(path.res, depth, args);
+ }
+ }
+ }
+
+ fn visit_fn(
+ &mut self,
+ fk: intravisit::FnKind<'tcx>,
+ fd: &'tcx hir::FnDecl<'tcx>,
+ body_id: hir::BodyId,
+ _: Span,
+ _: hir::HirId,
+ ) {
+ let output = match fd.output {
+ hir::FnRetTy::DefaultReturn(_) => None,
+ hir::FnRetTy::Return(ref ty) => Some(&**ty),
+ };
+ self.visit_fn_like_elision(&fd.inputs, output, matches!(fk, intravisit::FnKind::Closure));
+ intravisit::walk_fn_kind(self, fk);
+ self.visit_nested_body(body_id)
+ }
+
+ fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
+ let scope = Scope::TraitRefBoundary { s: self.scope };
+ self.with(scope, |this| {
+ for param in generics.params {
+ match param.kind {
+ GenericParamKind::Lifetime { .. } => {}
+ GenericParamKind::Type { ref default, .. } => {
+ if let Some(ref ty) = default {
+ this.visit_ty(&ty);
+ }
+ }
+ GenericParamKind::Const { ref ty, default } => {
+ this.visit_ty(&ty);
+ if let Some(default) = default {
+ this.visit_body(this.tcx.hir().body(default.body));
+ }
+ }
+ }
+ }
+ for predicate in generics.predicates {
+ match predicate {
+ &hir::WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ ref bounded_ty,
+ bounds,
+ ref bound_generic_params,
+ origin,
+ ..
+ }) => {
+ let (lifetimes, binders): (FxIndexMap<LocalDefId, Region>, Vec<_>) =
+ bound_generic_params
+ .iter()
+ .filter(|param| {
+ matches!(param.kind, GenericParamKind::Lifetime { .. })
+ })
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair =
+ Region::late(late_bound_idx as u32, this.tcx.hir(), param);
+ let r = late_region_as_bound_region(this.tcx, &pair.1);
+ (pair, r)
+ })
+ .unzip();
+ this.map.late_bound_vars.insert(bounded_ty.hir_id, binders.clone());
+ let next_early_index = this.next_early_index();
+ // Even if there are no lifetimes defined here, we still wrap it in a binder
+ // scope. If there happens to be a nested poly trait ref (an error), that
+ // will be `Concatenating` anyways, so we don't have to worry about the depth
+ // being wrong.
+ let scope = Scope::Binder {
+ hir_id: bounded_ty.hir_id,
+ lifetimes,
+ s: this.scope,
+ next_early_index,
+ opaque_type_parent: false,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: Some(origin),
+ };
+ this.with(scope, |this| {
+ this.visit_ty(&bounded_ty);
+ walk_list!(this, visit_param_bound, bounds);
+ })
+ }
+ &hir::WherePredicate::RegionPredicate(hir::WhereRegionPredicate {
+ ref lifetime,
+ bounds,
+ ..
+ }) => {
+ this.visit_lifetime(lifetime);
+ walk_list!(this, visit_param_bound, bounds);
+
+ if lifetime.name != hir::LifetimeName::Static {
+ for bound in bounds {
+ let hir::GenericBound::Outlives(ref lt) = bound else {
+ continue;
+ };
+ if lt.name != hir::LifetimeName::Static {
+ continue;
+ }
+ this.insert_lifetime(lt, Region::Static);
+ this.tcx
+ .sess
+ .struct_span_warn(
+ lifetime.span,
+ &format!(
+ "unnecessary lifetime parameter `{}`",
+ lifetime.name.ident(),
+ ),
+ )
+ .help(&format!(
+ "you can use the `'static` lifetime directly, in place of `{}`",
+ lifetime.name.ident(),
+ ))
+ .emit();
+ }
+ }
+ }
+ &hir::WherePredicate::EqPredicate(hir::WhereEqPredicate {
+ ref lhs_ty,
+ ref rhs_ty,
+ ..
+ }) => {
+ this.visit_ty(lhs_ty);
+ this.visit_ty(rhs_ty);
+ }
+ }
+ }
+ })
+ }
+
+ fn visit_param_bound(&mut self, bound: &'tcx hir::GenericBound<'tcx>) {
+ match bound {
+ hir::GenericBound::LangItemTrait(_, _, hir_id, _) => {
+ // FIXME(jackh726): This is pretty weird. `LangItemTrait` doesn't go
+ // through the regular poly trait ref code, so we don't get another
+ // chance to introduce a binder. For now, I'm keeping the existing logic
+ // of "if there isn't a Binder scope above us, add one", but I
+ // imagine there's a better way to go about this.
+ let (binders, scope_type) = self.poly_trait_ref_binder_info();
+
+ self.map.late_bound_vars.insert(*hir_id, binders);
+ let scope = Scope::Binder {
+ hir_id: *hir_id,
+ lifetimes: FxIndexMap::default(),
+ s: self.scope,
+ next_early_index: self.next_early_index(),
+ opaque_type_parent: false,
+ scope_type,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ intravisit::walk_param_bound(this, bound);
+ });
+ }
+ _ => intravisit::walk_param_bound(self, bound),
+ }
+ }
+
+ fn visit_poly_trait_ref(
+ &mut self,
+ trait_ref: &'tcx hir::PolyTraitRef<'tcx>,
+ _modifier: hir::TraitBoundModifier,
+ ) {
+ debug!("visit_poly_trait_ref(trait_ref={:?})", trait_ref);
+
+ let next_early_index = self.next_early_index();
+ let (mut binders, scope_type) = self.poly_trait_ref_binder_info();
+
+ let initial_bound_vars = binders.len() as u32;
+ let mut lifetimes: FxIndexMap<LocalDefId, Region> = FxIndexMap::default();
+ let binders_iter = trait_ref
+ .bound_generic_params
+ .iter()
+ .filter(|param| matches!(param.kind, GenericParamKind::Lifetime { .. }))
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair =
+ Region::late(initial_bound_vars + late_bound_idx as u32, self.tcx.hir(), param);
+ let r = late_region_as_bound_region(self.tcx, &pair.1);
+ lifetimes.insert(pair.0, pair.1);
+ r
+ });
+ binders.extend(binders_iter);
+
+ debug!(?binders);
+ self.map.late_bound_vars.insert(trait_ref.trait_ref.hir_ref_id, binders);
+
+ // Always introduce a scope here, even if this is in a where clause and
+ // we introduced the binders around the bounded Ty. In that case, we
+ // just reuse the concatenation functionality also present in nested trait
+ // refs.
+ let scope = Scope::Binder {
+ hir_id: trait_ref.trait_ref.hir_ref_id,
+ lifetimes,
+ s: self.scope,
+ next_early_index,
+ opaque_type_parent: false,
+ scope_type,
+ where_bound_origin: None,
+ };
+ self.with(scope, |this| {
+ walk_list!(this, visit_generic_param, trait_ref.bound_generic_params);
+ this.visit_trait_ref(&trait_ref.trait_ref);
+ });
+ }
+}
+
+fn compute_object_lifetime_defaults<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ item: &hir::Item<'_>,
+) -> Option<&'tcx [ObjectLifetimeDefault]> {
+ match item.kind {
+ hir::ItemKind::Struct(_, ref generics)
+ | hir::ItemKind::Union(_, ref generics)
+ | hir::ItemKind::Enum(_, ref generics)
+ | hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ ref generics,
+ origin: hir::OpaqueTyOrigin::TyAlias,
+ ..
+ })
+ | hir::ItemKind::TyAlias(_, ref generics)
+ | hir::ItemKind::Trait(_, _, ref generics, ..) => {
+ let result = object_lifetime_defaults_for_item(tcx, generics);
+
+ // Debugging aid.
+ let attrs = tcx.hir().attrs(item.hir_id());
+ if tcx.sess.contains_name(attrs, sym::rustc_object_lifetime_default) {
+ let object_lifetime_default_reprs: String = result
+ .iter()
+ .map(|set| match *set {
+ Set1::Empty => "BaseDefault".into(),
+ Set1::One(Region::Static) => "'static".into(),
+ Set1::One(Region::EarlyBound(mut i, _)) => generics
+ .params
+ .iter()
+ .find_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ if i == 0 {
+ return Some(param.name.ident().to_string().into());
+ }
+ i -= 1;
+ None
+ }
+ _ => None,
+ })
+ .unwrap(),
+ Set1::One(_) => bug!(),
+ Set1::Many => "Ambiguous".into(),
+ })
+ .collect::<Vec<Cow<'static, str>>>()
+ .join(",");
+ tcx.sess.span_err(item.span, &object_lifetime_default_reprs);
+ }
+
+ Some(result)
+ }
+ _ => None,
+ }
+}
+
+/// Scan the bounds and where-clauses on parameters to extract bounds
+/// of the form `T:'a` so as to determine the `ObjectLifetimeDefault`
+/// for each type parameter.
+fn object_lifetime_defaults_for_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &hir::Generics<'_>,
+) -> &'tcx [ObjectLifetimeDefault] {
+ fn add_bounds(set: &mut Set1<hir::LifetimeName>, bounds: &[hir::GenericBound<'_>]) {
+ for bound in bounds {
+ if let hir::GenericBound::Outlives(ref lifetime) = *bound {
+ set.insert(lifetime.name.normalize_to_macros_2_0());
+ }
+ }
+ }
+
+ let process_param = |param: &hir::GenericParam<'_>| match param.kind {
+ GenericParamKind::Lifetime { .. } => None,
+ GenericParamKind::Type { .. } => {
+ let mut set = Set1::Empty;
+
+ let param_def_id = tcx.hir().local_def_id(param.hir_id);
+ for predicate in generics.predicates {
+ // Look for `type: ...` where clauses.
+ let hir::WherePredicate::BoundPredicate(ref data) = *predicate else { continue };
+
+ // Ignore `for<'a> type: ...` as they can change what
+ // lifetimes mean (although we could "just" handle it).
+ if !data.bound_generic_params.is_empty() {
+ continue;
+ }
+
+ let res = match data.bounded_ty.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => path.res,
+ _ => continue,
+ };
+
+ if res == Res::Def(DefKind::TyParam, param_def_id.to_def_id()) {
+ add_bounds(&mut set, &data.bounds);
+ }
+ }
+
+ Some(match set {
+ Set1::Empty => Set1::Empty,
+ Set1::One(name) => {
+ if name == hir::LifetimeName::Static {
+ Set1::One(Region::Static)
+ } else {
+ generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ let param_def_id = tcx.hir().local_def_id(param.hir_id);
+ Some((
+ param_def_id,
+ hir::LifetimeName::Param(param_def_id, param.name),
+ ))
+ }
+ _ => None,
+ })
+ .enumerate()
+ .find(|&(_, (_, lt_name))| lt_name == name)
+ .map_or(Set1::Many, |(i, (def_id, _))| {
+ Set1::One(Region::EarlyBound(i as u32, def_id.to_def_id()))
+ })
+ }
+ }
+ Set1::Many => Set1::Many,
+ })
+ }
+ GenericParamKind::Const { .. } => {
+ // Generic consts don't impose any constraints.
+ //
+ // We still store a dummy value here to allow generic parameters
+ // in an arbitrary order.
+ Some(Set1::Empty)
+ }
+ };
+
+ tcx.arena.alloc_from_iter(generics.params.iter().filter_map(process_param))
+}
+
+impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
+ fn with<F>(&mut self, wrap_scope: Scope<'_>, f: F)
+ where
+ F: for<'b> FnOnce(&mut LifetimeContext<'b, 'tcx>),
+ {
+ let LifetimeContext { tcx, map, .. } = self;
+ let xcrate_object_lifetime_defaults = take(&mut self.xcrate_object_lifetime_defaults);
+ let mut this = LifetimeContext {
+ tcx: *tcx,
+ map,
+ scope: &wrap_scope,
+ trait_definition_only: self.trait_definition_only,
+ xcrate_object_lifetime_defaults,
+ };
+ let span = tracing::debug_span!("scope", scope = ?TruncatedScopeDebug(&this.scope));
+ {
+ let _enter = span.enter();
+ f(&mut this);
+ }
+ self.xcrate_object_lifetime_defaults = this.xcrate_object_lifetime_defaults;
+ }
+
+ /// Visits self by adding a scope and handling recursive walk over the contents with `walk`.
+ ///
+ /// Handles visiting fns and methods. These are a bit complicated because we must distinguish
+ /// early- vs late-bound lifetime parameters. We do this by checking which lifetimes appear
+ /// within type bounds; those are early bound lifetimes, and the rest are late bound.
+ ///
+ /// For example:
+ ///
+ /// fn foo<'a,'b,'c,T:Trait<'b>>(...)
+ ///
+ /// Here `'a` and `'c` are late bound but `'b` is early bound. Note that early- and late-bound
+ /// lifetimes may be interspersed together.
+ ///
+ /// If early bound lifetimes are present, we separate them into their own list (and likewise
+ /// for late bound). They will be numbered sequentially, starting from the lowest index that is
+ /// already in scope (for a fn item, that will be 0, but for a method it might not be). Late
+ /// bound lifetimes are resolved by name and associated with a binder ID (`binder_id`), so the
+ /// ordering is not important there.
+ fn visit_early_late<F>(
+ &mut self,
+ parent_id: Option<LocalDefId>,
+ hir_id: hir::HirId,
+ generics: &'tcx hir::Generics<'tcx>,
+ walk: F,
+ ) where
+ F: for<'b, 'c> FnOnce(&'b mut LifetimeContext<'c, 'tcx>),
+ {
+ // Find the start of nested early scopes, e.g., in methods.
+ let mut next_early_index = 0;
+ if let Some(parent_id) = parent_id {
+ let parent = self.tcx.hir().expect_item(parent_id);
+ if sub_items_have_self_param(&parent.kind) {
+ next_early_index += 1; // Self comes before lifetimes
+ }
+ match parent.kind {
+ hir::ItemKind::Trait(_, _, ref generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { ref generics, .. }) => {
+ next_early_index += generics.params.len() as u32;
+ }
+ _ => {}
+ }
+ }
+
+ let mut non_lifetime_count = 0;
+ let mut named_late_bound_vars = 0;
+ let lifetimes: FxIndexMap<LocalDefId, Region> = generics
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => {
+ if self.tcx.is_late_bound(param.hir_id) {
+ let late_bound_idx = named_late_bound_vars;
+ named_late_bound_vars += 1;
+ Some(Region::late(late_bound_idx, self.tcx.hir(), param))
+ } else {
+ Some(Region::early(self.tcx.hir(), &mut next_early_index, param))
+ }
+ }
+ GenericParamKind::Type { .. } | GenericParamKind::Const { .. } => {
+ non_lifetime_count += 1;
+ None
+ }
+ })
+ .collect();
+ let next_early_index = next_early_index + non_lifetime_count;
+
+ let binders: Vec<_> = generics
+ .params
+ .iter()
+ .filter(|param| {
+ matches!(param.kind, GenericParamKind::Lifetime { .. })
+ && self.tcx.is_late_bound(param.hir_id)
+ })
+ .enumerate()
+ .map(|(late_bound_idx, param)| {
+ let pair = Region::late(late_bound_idx as u32, self.tcx.hir(), param);
+ late_region_as_bound_region(self.tcx, &pair.1)
+ })
+ .collect();
+ self.map.late_bound_vars.insert(hir_id, binders);
+ let scope = Scope::Binder {
+ hir_id,
+ lifetimes,
+ next_early_index,
+ s: self.scope,
+ opaque_type_parent: true,
+ scope_type: BinderScopeType::Normal,
+ where_bound_origin: None,
+ };
+ self.with(scope, walk);
+ }
+
+ fn next_early_index_helper(&self, only_opaque_type_parent: bool) -> u32 {
+ let mut scope = self.scope;
+ loop {
+ match *scope {
+ Scope::Root => return 0,
+
+ Scope::Binder { next_early_index, opaque_type_parent, .. }
+ if (!only_opaque_type_parent || opaque_type_parent) =>
+ {
+ return next_early_index;
+ }
+
+ Scope::Binder { s, .. }
+ | Scope::Body { s, .. }
+ | Scope::Elision { s, .. }
+ | Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Supertrait { s, .. }
+ | Scope::TraitRefBoundary { s, .. } => scope = s,
+ }
+ }
+ }
+
+ /// Returns the next index one would use for an early-bound-region
+ /// if extending the current scope.
+ fn next_early_index(&self) -> u32 {
+ self.next_early_index_helper(true)
+ }
+
+ /// Returns the next index one would use for an `impl Trait` that
+ /// is being converted into an opaque type alias `impl Trait`. This will be the
+ /// next early index from the enclosing item, for the most
+ /// part. See the `opaque_type_parent` field for more info.
+ fn next_early_index_for_opaque_type(&self) -> u32 {
+ self.next_early_index_helper(false)
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn resolve_lifetime_ref(
+ &mut self,
+ region_def_id: LocalDefId,
+ lifetime_ref: &'tcx hir::Lifetime,
+ ) {
+ // Walk up the scope chain, tracking the number of fn scopes
+ // that we pass through, until we find a lifetime with the
+ // given name or we run out of scopes.
+ // search.
+ let mut late_depth = 0;
+ let mut scope = self.scope;
+ let mut outermost_body = None;
+ let result = loop {
+ match *scope {
+ Scope::Body { id, s } => {
+ outermost_body = Some(id);
+ scope = s;
+ }
+
+ Scope::Root => {
+ break None;
+ }
+
+ Scope::Binder { ref lifetimes, scope_type, s, where_bound_origin, .. } => {
+ if let Some(&def) = lifetimes.get(&region_def_id) {
+ break Some(def.shifted(late_depth));
+ }
+ match scope_type {
+ BinderScopeType::Normal => late_depth += 1,
+ BinderScopeType::Concatenating => {}
+ }
+ // Fresh lifetimes in APIT used to be allowed in async fns and forbidden in
+ // regular fns.
+ if let Some(hir::PredicateOrigin::ImplTrait) = where_bound_origin
+ && let hir::LifetimeName::Param(_, hir::ParamName::Fresh) = lifetime_ref.name
+ && let hir::IsAsync::NotAsync = self.tcx.asyncness(lifetime_ref.hir_id.owner)
+ && !self.tcx.features().anonymous_lifetime_in_impl_trait
+ {
+ rustc_session::parse::feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::anonymous_lifetime_in_impl_trait,
+ lifetime_ref.span,
+ "anonymous lifetimes in `impl Trait` are unstable",
+ ).emit();
+ return;
+ }
+ scope = s;
+ }
+
+ Scope::Elision { s, .. }
+ | Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Supertrait { s, .. }
+ | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+ }
+ };
+
+ if let Some(mut def) = result {
+ if let Region::EarlyBound(..) = def {
+ // Do not free early-bound regions, only late-bound ones.
+ } else if let Some(body_id) = outermost_body {
+ let fn_id = self.tcx.hir().body_owner(body_id);
+ match self.tcx.hir().get(fn_id) {
+ Node::Item(&hir::Item { kind: hir::ItemKind::Fn(..), .. })
+ | Node::TraitItem(&hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(..), ..
+ })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) => {
+ let scope = self.tcx.hir().local_def_id(fn_id);
+ def = Region::Free(scope.to_def_id(), def.id().unwrap());
+ }
+ _ => {}
+ }
+ }
+
+ self.insert_lifetime(lifetime_ref, def);
+ return;
+ }
+
+ // We may fail to resolve higher-ranked lifetimes that are mentionned by APIT.
+ // AST-based resolution does not care for impl-trait desugaring, which are the
+ // responibility of lowering. This may create a mismatch between the resolution
+ // AST found (`region_def_id`) which points to HRTB, and what HIR allows.
+ // ```
+ // fn foo(x: impl for<'a> Trait<'a, Assoc = impl Copy + 'a>) {}
+ // ```
+ //
+ // In such case, walk back the binders to diagnose it properly.
+ let mut scope = self.scope;
+ loop {
+ match *scope {
+ Scope::Binder {
+ where_bound_origin: Some(hir::PredicateOrigin::ImplTrait), ..
+ } => {
+ let mut err = self.tcx.sess.struct_span_err(
+ lifetime_ref.span,
+ "`impl Trait` can only mention lifetimes bound at the fn or impl level",
+ );
+ err.span_note(self.tcx.def_span(region_def_id), "lifetime declared here");
+ err.emit();
+ return;
+ }
+ Scope::Root => break,
+ Scope::Binder { s, .. }
+ | Scope::Body { s, .. }
+ | Scope::Elision { s, .. }
+ | Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Supertrait { s, .. }
+ | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+ }
+ }
+
+ self.tcx.sess.delay_span_bug(
+ lifetime_ref.span,
+ &format!("Could not resolve {:?} in scope {:#?}", lifetime_ref, self.scope,),
+ );
+ }
+
+ fn visit_segment_args(
+ &mut self,
+ res: Res,
+ depth: usize,
+ generic_args: &'tcx hir::GenericArgs<'tcx>,
+ ) {
+ debug!(
+ "visit_segment_args(res={:?}, depth={:?}, generic_args={:?})",
+ res, depth, generic_args,
+ );
+
+ if generic_args.parenthesized {
+ self.visit_fn_like_elision(
+ generic_args.inputs(),
+ Some(generic_args.bindings[0].ty()),
+ false,
+ );
+ return;
+ }
+
+ for arg in generic_args.args {
+ if let hir::GenericArg::Lifetime(lt) = arg {
+ self.visit_lifetime(lt);
+ }
+ }
+
+ // Figure out if this is a type/trait segment,
+ // which requires object lifetime defaults.
+ let parent_def_id = |this: &mut Self, def_id: DefId| {
+ let def_key = this.tcx.def_key(def_id);
+ DefId { krate: def_id.krate, index: def_key.parent.expect("missing parent") }
+ };
+ let type_def_id = match res {
+ Res::Def(DefKind::AssocTy, def_id) if depth == 1 => Some(parent_def_id(self, def_id)),
+ Res::Def(DefKind::Variant, def_id) if depth == 0 => Some(parent_def_id(self, def_id)),
+ Res::Def(
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::TyAlias
+ | DefKind::Trait,
+ def_id,
+ ) if depth == 0 => Some(def_id),
+ _ => None,
+ };
+
+ debug!("visit_segment_args: type_def_id={:?}", type_def_id);
+
+ // Compute a vector of defaults, one for each type parameter,
+ // per the rules given in RFCs 599 and 1156. Example:
+ //
+ // ```rust
+ // struct Foo<'a, T: 'a, U> { }
+ // ```
+ //
+ // If you have `Foo<'x, dyn Bar, dyn Baz>`, we want to default
+ // `dyn Bar` to `dyn Bar + 'x` (because of the `T: 'a` bound)
+ // and `dyn Baz` to `dyn Baz + 'static` (because there is no
+ // such bound).
+ //
+ // Therefore, we would compute `object_lifetime_defaults` to a
+ // vector like `['x, 'static]`. Note that the vector only
+ // includes type parameters.
+ let object_lifetime_defaults = type_def_id.map_or_else(Vec::new, |def_id| {
+ let in_body = {
+ let mut scope = self.scope;
+ loop {
+ match *scope {
+ Scope::Root => break false,
+
+ Scope::Body { .. } => break true,
+
+ Scope::Binder { s, .. }
+ | Scope::Elision { s, .. }
+ | Scope::ObjectLifetimeDefault { s, .. }
+ | Scope::Supertrait { s, .. }
+ | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+ }
+ }
+ };
+
+ let map = &self.map;
+ let set_to_region = |set: &ObjectLifetimeDefault| match *set {
+ Set1::Empty => {
+ if in_body {
+ None
+ } else {
+ Some(Region::Static)
+ }
+ }
+ Set1::One(r) => {
+ let lifetimes = generic_args.args.iter().filter_map(|arg| match arg {
+ GenericArg::Lifetime(lt) => Some(lt),
+ _ => None,
+ });
+ r.subst(lifetimes, map)
+ }
+ Set1::Many => None,
+ };
+ if let Some(def_id) = def_id.as_local() {
+ let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ self.tcx
+ .object_lifetime_defaults(id.owner)
+ .unwrap()
+ .iter()
+ .map(set_to_region)
+ .collect()
+ } else {
+ let tcx = self.tcx;
+ self.xcrate_object_lifetime_defaults
+ .entry(def_id)
+ .or_insert_with(|| {
+ tcx.generics_of(def_id)
+ .params
+ .iter()
+ .filter_map(|param| match param.kind {
+ GenericParamDefKind::Type { object_lifetime_default, .. } => {
+ Some(object_lifetime_default)
+ }
+ GenericParamDefKind::Const { .. } => Some(Set1::Empty),
+ GenericParamDefKind::Lifetime => None,
+ })
+ .collect()
+ })
+ .iter()
+ .map(set_to_region)
+ .collect()
+ }
+ });
+
+ debug!("visit_segment_args: object_lifetime_defaults={:?}", object_lifetime_defaults);
+
+ let mut i = 0;
+ for arg in generic_args.args {
+ match arg {
+ GenericArg::Lifetime(_) => {}
+ GenericArg::Type(ty) => {
+ if let Some(&lt) = object_lifetime_defaults.get(i) {
+ let scope = Scope::ObjectLifetimeDefault { lifetime: lt, s: self.scope };
+ self.with(scope, |this| this.visit_ty(ty));
+ } else {
+ self.visit_ty(ty);
+ }
+ i += 1;
+ }
+ GenericArg::Const(ct) => {
+ self.visit_anon_const(&ct.value);
+ i += 1;
+ }
+ GenericArg::Infer(inf) => {
+ self.visit_id(inf.hir_id);
+ i += 1;
+ }
+ }
+ }
+
+ // Hack: when resolving the type `XX` in binding like `dyn
+ // Foo<'b, Item = XX>`, the current object-lifetime default
+ // would be to examine the trait `Foo` to check whether it has
+ // a lifetime bound declared on `Item`. e.g., if `Foo` is
+ // declared like so, then the default object lifetime bound in
+ // `XX` should be `'b`:
+ //
+ // ```rust
+ // trait Foo<'a> {
+ // type Item: 'a;
+ // }
+ // ```
+ //
+ // but if we just have `type Item;`, then it would be
+ // `'static`. However, we don't get all of this logic correct.
+ //
+ // Instead, we do something hacky: if there are no lifetime parameters
+ // to the trait, then we simply use a default object lifetime
+ // bound of `'static`, because there is no other possibility. On the other hand,
+ // if there ARE lifetime parameters, then we require the user to give an
+ // explicit bound for now.
+ //
+ // This is intended to leave room for us to implement the
+ // correct behavior in the future.
+ let has_lifetime_parameter =
+ generic_args.args.iter().any(|arg| matches!(arg, GenericArg::Lifetime(_)));
+
+ // Resolve lifetimes found in the bindings, so either in the type `XX` in `Item = XX` or
+ // in the trait ref `YY<...>` in `Item: YY<...>`.
+ for binding in generic_args.bindings {
+ let scope = Scope::ObjectLifetimeDefault {
+ lifetime: if has_lifetime_parameter { None } else { Some(Region::Static) },
+ s: self.scope,
+ };
+ if let Some(type_def_id) = type_def_id {
+ let lifetimes = LifetimeContext::supertrait_hrtb_lifetimes(
+ self.tcx,
+ type_def_id,
+ binding.ident,
+ );
+ self.with(scope, |this| {
+ let scope = Scope::Supertrait {
+ lifetimes: lifetimes.unwrap_or_default(),
+ s: this.scope,
+ };
+ this.with(scope, |this| this.visit_assoc_type_binding(binding));
+ });
+ } else {
+ self.with(scope, |this| this.visit_assoc_type_binding(binding));
+ }
+ }
+ }
+
+ /// Returns all the late-bound vars that come into scope from supertrait HRTBs, based on the
+ /// associated type name and starting trait.
+ /// For example, imagine we have
+ /// ```ignore (illustrative)
+ /// trait Foo<'a, 'b> {
+ /// type As;
+ /// }
+ /// trait Bar<'b>: for<'a> Foo<'a, 'b> {}
+ /// trait Bar: for<'b> Bar<'b> {}
+ /// ```
+ /// In this case, if we wanted to the supertrait HRTB lifetimes for `As` on
+ /// the starting trait `Bar`, we would return `Some(['b, 'a])`.
+ fn supertrait_hrtb_lifetimes(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ assoc_name: Ident,
+ ) -> Option<Vec<ty::BoundVariableKind>> {
+ let trait_defines_associated_type_named = |trait_def_id: DefId| {
+ tcx.associated_items(trait_def_id)
+ .find_by_name_and_kind(tcx, assoc_name, ty::AssocKind::Type, trait_def_id)
+ .is_some()
+ };
+
+ use smallvec::{smallvec, SmallVec};
+ let mut stack: SmallVec<[(DefId, SmallVec<[ty::BoundVariableKind; 8]>); 8]> =
+ smallvec![(def_id, smallvec![])];
+ let mut visited: FxHashSet<DefId> = FxHashSet::default();
+ loop {
+ let Some((def_id, bound_vars)) = stack.pop() else {
+ break None;
+ };
+ // See issue #83753. If someone writes an associated type on a non-trait, just treat it as
+ // there being no supertrait HRTBs.
+ match tcx.def_kind(def_id) {
+ DefKind::Trait | DefKind::TraitAlias | DefKind::Impl => {}
+ _ => break None,
+ }
+
+ if trait_defines_associated_type_named(def_id) {
+ break Some(bound_vars.into_iter().collect());
+ }
+ let predicates =
+ tcx.super_predicates_that_define_assoc_type((def_id, Some(assoc_name)));
+ let obligations = predicates.predicates.iter().filter_map(|&(pred, _)| {
+ let bound_predicate = pred.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(data) => {
+ // The order here needs to match what we would get from `subst_supertrait`
+ let pred_bound_vars = bound_predicate.bound_vars();
+ let mut all_bound_vars = bound_vars.clone();
+ all_bound_vars.extend(pred_bound_vars.iter());
+ let super_def_id = data.trait_ref.def_id;
+ Some((super_def_id, all_bound_vars))
+ }
+ _ => None,
+ }
+ });
+
+ let obligations = obligations.filter(|o| visited.insert(o.0));
+ stack.extend(obligations);
+ }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn visit_fn_like_elision(
+ &mut self,
+ inputs: &'tcx [hir::Ty<'tcx>],
+ output: Option<&'tcx hir::Ty<'tcx>>,
+ in_closure: bool,
+ ) {
+ self.with(Scope::Elision { s: self.scope }, |this| {
+ for input in inputs {
+ this.visit_ty(input);
+ }
+ if !in_closure && let Some(output) = output {
+ this.visit_ty(output);
+ }
+ });
+ if in_closure && let Some(output) = output {
+ self.visit_ty(output);
+ }
+ }
+
+ fn resolve_object_lifetime_default(&mut self, lifetime_ref: &'tcx hir::Lifetime) {
+ debug!("resolve_object_lifetime_default(lifetime_ref={:?})", lifetime_ref);
+ let mut late_depth = 0;
+ let mut scope = self.scope;
+ let lifetime = loop {
+ match *scope {
+ Scope::Binder { s, scope_type, .. } => {
+ match scope_type {
+ BinderScopeType::Normal => late_depth += 1,
+ BinderScopeType::Concatenating => {}
+ }
+ scope = s;
+ }
+
+ Scope::Root | Scope::Elision { .. } => break Region::Static,
+
+ Scope::Body { .. } | Scope::ObjectLifetimeDefault { lifetime: None, .. } => return,
+
+ Scope::ObjectLifetimeDefault { lifetime: Some(l), .. } => break l,
+
+ Scope::Supertrait { s, .. } | Scope::TraitRefBoundary { s, .. } => {
+ scope = s;
+ }
+ }
+ };
+ self.insert_lifetime(lifetime_ref, lifetime.shifted(late_depth));
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn insert_lifetime(&mut self, lifetime_ref: &'tcx hir::Lifetime, def: Region) {
+ debug!(
+ node = ?self.tcx.hir().node_to_string(lifetime_ref.hir_id),
+ span = ?self.tcx.sess.source_map().span_to_diagnostic_string(lifetime_ref.span)
+ );
+ self.map.defs.insert(lifetime_ref.hir_id, def);
+ }
+
+ /// Sometimes we resolve a lifetime, but later find that it is an
+ /// error (esp. around impl trait). In that case, we remove the
+ /// entry into `map.defs` so as not to confuse later code.
+ fn uninsert_lifetime_on_error(&mut self, lifetime_ref: &'tcx hir::Lifetime, bad_def: Region) {
+ let old_value = self.map.defs.remove(&lifetime_ref.hir_id);
+ assert_eq!(old_value, Some(bad_def));
+ }
+}
+
+/// Detects late-bound lifetimes and inserts them into
+/// `late_bound`.
+///
+/// A region declared on a fn is **late-bound** if:
+/// - it is constrained by an argument type;
+/// - it does not appear in a where-clause.
+///
+/// "Constrained" basically means that it appears in any type but
+/// not amongst the inputs to a projection. In other words, `<&'a
+/// T as Trait<''b>>::Foo` does not constrain `'a` or `'b`.
+fn is_late_bound_map(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<&FxIndexSet<LocalDefId>> {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let decl = tcx.hir().fn_decl_by_hir_id(hir_id)?;
+ let generics = tcx.hir().get_generics(def_id)?;
+
+ let mut late_bound = FxIndexSet::default();
+
+ let mut constrained_by_input = ConstrainedCollector::default();
+ for arg_ty in decl.inputs {
+ constrained_by_input.visit_ty(arg_ty);
+ }
+
+ let mut appears_in_output = AllCollector::default();
+ intravisit::walk_fn_ret_ty(&mut appears_in_output, &decl.output);
+
+ debug!(?constrained_by_input.regions);
+
+ // Walk the lifetimes that appear in where clauses.
+ //
+ // Subtle point: because we disallow nested bindings, we can just
+ // ignore binders here and scrape up all names we see.
+ let mut appears_in_where_clause = AllCollector::default();
+ appears_in_where_clause.visit_generics(generics);
+ debug!(?appears_in_where_clause.regions);
+
+ // Late bound regions are those that:
+ // - appear in the inputs
+ // - do not appear in the where-clauses
+ // - are not implicitly captured by `impl Trait`
+ for param in generics.params {
+ match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => { /* fall through */ }
+
+ // Neither types nor consts are late-bound.
+ hir::GenericParamKind::Type { .. } | hir::GenericParamKind::Const { .. } => continue,
+ }
+
+ let param_def_id = tcx.hir().local_def_id(param.hir_id);
+
+ // appears in the where clauses? early-bound.
+ if appears_in_where_clause.regions.contains(&param_def_id) {
+ continue;
+ }
+
+ // does not appear in the inputs, but appears in the return type? early-bound.
+ if !constrained_by_input.regions.contains(&param_def_id)
+ && appears_in_output.regions.contains(&param_def_id)
+ {
+ continue;
+ }
+
+ debug!("lifetime {:?} with id {:?} is late-bound", param.name.ident(), param.hir_id);
+
+ let inserted = late_bound.insert(param_def_id);
+ assert!(inserted, "visited lifetime {:?} twice", param.hir_id);
+ }
+
+ debug!(?late_bound);
+ return Some(tcx.arena.alloc(late_bound));
+
+ #[derive(Default)]
+ struct ConstrainedCollector {
+ regions: FxHashSet<LocalDefId>,
+ }
+
+ impl<'v> Visitor<'v> for ConstrainedCollector {
+ fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
+ match ty.kind {
+ hir::TyKind::Path(
+ hir::QPath::Resolved(Some(_), _) | hir::QPath::TypeRelative(..),
+ ) => {
+ // ignore lifetimes appearing in associated type
+ // projections, as they are not *constrained*
+ // (defined above)
+ }
+
+ hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => {
+ // consider only the lifetimes on the final
+ // segment; I am not sure it's even currently
+ // valid to have them elsewhere, but even if it
+ // is, those would be potentially inputs to
+ // projections
+ if let Some(last_segment) = path.segments.last() {
+ self.visit_path_segment(path.span, last_segment);
+ }
+ }
+
+ _ => {
+ intravisit::walk_ty(self, ty);
+ }
+ }
+ }
+
+ fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
+ if let hir::LifetimeName::Param(def_id, _) = lifetime_ref.name {
+ self.regions.insert(def_id);
+ }
+ }
+ }
+
+ #[derive(Default)]
+ struct AllCollector {
+ regions: FxHashSet<LocalDefId>,
+ }
+
+ impl<'v> Visitor<'v> for AllCollector {
+ fn visit_lifetime(&mut self, lifetime_ref: &'v hir::Lifetime) {
+ if let hir::LifetimeName::Param(def_id, _) = lifetime_ref.name {
+ self.regions.insert(def_id);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs
new file mode 100644
index 000000000..62843c651
--- /dev/null
+++ b/compiler/rustc_resolve/src/lib.rs
@@ -0,0 +1,2089 @@
+//! This crate is responsible for the part of name resolution that doesn't require type checker.
+//!
+//! Module structure of the crate is built here.
+//! Paths in macros, imports, expressions, types, patterns are resolved here.
+//! Label and lifetime names are resolved here as well.
+//!
+//! Type-relative name resolution (methods, fields, associated items) happens in `rustc_typeck`.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(box_patterns)]
+#![feature(drain_filter)]
+#![feature(if_let_guard)]
+#![feature(iter_intersperse)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(never_type)]
+#![recursion_limit = "256"]
+#![allow(rustdoc::private_intra_doc_links)]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate tracing;
+
+pub use rustc_hir::def::{Namespace, PerNS};
+
+use rustc_arena::{DroplessArena, TypedArena};
+use rustc_ast::node_id::NodeMap;
+use rustc_ast::{self as ast, NodeId, CRATE_NODE_ID};
+use rustc_ast::{AngleBracketedArg, Crate, Expr, ExprKind, GenericArg, GenericArgs, LitKind, Path};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
+use rustc_data_structures::intern::Interned;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_expand::base::{DeriveResolutions, SyntaxExtension, SyntaxExtensionKind};
+use rustc_hir::def::Namespace::*;
+use rustc_hir::def::{self, CtorOf, DefKind, LifetimeRes, PartialRes};
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId};
+use rustc_hir::def_id::{CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::definitions::{DefPathData, Definitions};
+use rustc_hir::TraitCandidate;
+use rustc_index::vec::IndexVec;
+use rustc_metadata::creader::{CStore, CrateLoader};
+use rustc_middle::metadata::ModChild;
+use rustc_middle::middle::privacy::AccessLevels;
+use rustc_middle::span_bug;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, DefIdTree, MainDefinition, RegisteredTools, ResolverOutputs};
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::cstore::{CrateStore, CrateStoreDyn, MetadataLoaderDyn};
+use rustc_session::lint::LintBuffer;
+use rustc_session::Session;
+use rustc_span::hygiene::{ExpnId, LocalExpnId, MacroKind, SyntaxContext, Transparency};
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+use smallvec::{smallvec, SmallVec};
+use std::cell::{Cell, RefCell};
+use std::collections::BTreeSet;
+use std::{cmp, fmt, ptr};
+use tracing::debug;
+
+use diagnostics::{ImportSuggestion, LabelSuggestion, Suggestion};
+use imports::{Import, ImportKind, ImportResolver, NameResolution};
+use late::{HasGenericParams, PathSource, PatternSource};
+use macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef};
+
+use crate::access_levels::AccessLevelsVisitor;
+
+type Res = def::Res<NodeId>;
+
+mod access_levels;
+mod build_reduced_graph;
+mod check_unused;
+mod def_collector;
+mod diagnostics;
+mod ident;
+mod imports;
+mod late;
+mod macros;
+
+enum Weak {
+ Yes,
+ No,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum Determinacy {
+ Determined,
+ Undetermined,
+}
+
+impl Determinacy {
+ fn determined(determined: bool) -> Determinacy {
+ if determined { Determinacy::Determined } else { Determinacy::Undetermined }
+ }
+}
+
+/// A specific scope in which a name can be looked up.
+/// This enum is currently used only for early resolution (imports and macros),
+/// but not for late resolution yet.
+#[derive(Clone, Copy)]
+enum Scope<'a> {
+ DeriveHelpers(LocalExpnId),
+ DeriveHelpersCompat,
+ MacroRules(MacroRulesScopeRef<'a>),
+ CrateRoot,
+ // The node ID is for reporting the `PROC_MACRO_DERIVE_RESOLUTION_FALLBACK`
+ // lint if it should be reported.
+ Module(Module<'a>, Option<NodeId>),
+ RegisteredAttrs,
+ MacroUsePrelude,
+ BuiltinAttrs,
+ ExternPrelude,
+ ToolPrelude,
+ StdLibPrelude,
+ BuiltinTypes,
+}
+
+/// Names from different contexts may want to visit different subsets of all specific scopes
+/// with different restrictions when looking up the resolution.
+/// This enum is currently used only for early resolution (imports and macros),
+/// but not for late resolution yet.
+#[derive(Clone, Copy)]
+enum ScopeSet<'a> {
+ /// All scopes with the given namespace.
+ All(Namespace, /*is_import*/ bool),
+ /// Crate root, then extern prelude (used for mixed 2015-2018 mode in macros).
+ AbsolutePath(Namespace),
+ /// All scopes with macro namespace and the given macro kind restriction.
+ Macro(MacroKind),
+ /// All scopes with the given namespace, used for partially performing late resolution.
+ /// The node id enables lints and is used for reporting them.
+ Late(Namespace, Module<'a>, Option<NodeId>),
+}
+
+/// Everything you need to know about a name's location to resolve it.
+/// Serves as a starting point for the scope visitor.
+/// This struct is currently used only for early resolution (imports and macros),
+/// but not for late resolution yet.
+#[derive(Clone, Copy, Debug)]
+pub struct ParentScope<'a> {
+ pub module: Module<'a>,
+ expansion: LocalExpnId,
+ pub macro_rules: MacroRulesScopeRef<'a>,
+ derives: &'a [ast::Path],
+}
+
+impl<'a> ParentScope<'a> {
+ /// Creates a parent scope with the passed argument used as the module scope component,
+ /// and other scope components set to default empty values.
+ pub fn module(module: Module<'a>, resolver: &Resolver<'a>) -> ParentScope<'a> {
+ ParentScope {
+ module,
+ expansion: LocalExpnId::ROOT,
+ macro_rules: resolver.arenas.alloc_macro_rules_scope(MacroRulesScope::Empty),
+ derives: &[],
+ }
+ }
+}
+
+#[derive(Copy, Debug, Clone)]
+enum ImplTraitContext {
+ Existential,
+ Universal(LocalDefId),
+}
+
+#[derive(Eq)]
+struct BindingError {
+ name: Symbol,
+ origin: BTreeSet<Span>,
+ target: BTreeSet<Span>,
+ could_be_path: bool,
+}
+
+impl PartialOrd for BindingError {
+ fn partial_cmp(&self, other: &BindingError) -> Option<cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl PartialEq for BindingError {
+ fn eq(&self, other: &BindingError) -> bool {
+ self.name == other.name
+ }
+}
+
+impl Ord for BindingError {
+ fn cmp(&self, other: &BindingError) -> cmp::Ordering {
+ self.name.cmp(&other.name)
+ }
+}
+
+enum ResolutionError<'a> {
+ /// Error E0401: can't use type or const parameters from outer function.
+ GenericParamsFromOuterFunction(Res, HasGenericParams),
+ /// Error E0403: the name is already used for a type or const parameter in this generic
+ /// parameter list.
+ NameAlreadyUsedInParameterList(Symbol, Span),
+ /// Error E0407: method is not a member of trait.
+ MethodNotMemberOfTrait(Ident, String, Option<Symbol>),
+ /// Error E0437: type is not a member of trait.
+ TypeNotMemberOfTrait(Ident, String, Option<Symbol>),
+ /// Error E0438: const is not a member of trait.
+ ConstNotMemberOfTrait(Ident, String, Option<Symbol>),
+ /// Error E0408: variable `{}` is not bound in all patterns.
+ VariableNotBoundInPattern(BindingError, ParentScope<'a>),
+ /// Error E0409: variable `{}` is bound in inconsistent ways within the same match arm.
+ VariableBoundWithDifferentMode(Symbol, Span),
+ /// Error E0415: identifier is bound more than once in this parameter list.
+ IdentifierBoundMoreThanOnceInParameterList(Symbol),
+ /// Error E0416: identifier is bound more than once in the same pattern.
+ IdentifierBoundMoreThanOnceInSamePattern(Symbol),
+ /// Error E0426: use of undeclared label.
+ UndeclaredLabel { name: Symbol, suggestion: Option<LabelSuggestion> },
+ /// Error E0429: `self` imports are only allowed within a `{ }` list.
+ SelfImportsOnlyAllowedWithin { root: bool, span_with_rename: Span },
+ /// Error E0430: `self` import can only appear once in the list.
+ SelfImportCanOnlyAppearOnceInTheList,
+ /// Error E0431: `self` import can only appear in an import list with a non-empty prefix.
+ SelfImportOnlyInImportListWithNonEmptyPrefix,
+ /// Error E0433: failed to resolve.
+ FailedToResolve { label: String, suggestion: Option<Suggestion> },
+ /// Error E0434: can't capture dynamic environment in a fn item.
+ CannotCaptureDynamicEnvironmentInFnItem,
+ /// Error E0435: attempt to use a non-constant value in a constant.
+ AttemptToUseNonConstantValueInConstant(
+ Ident,
+ /* suggestion */ &'static str,
+ /* current */ &'static str,
+ ),
+ /// Error E0530: `X` bindings cannot shadow `Y`s.
+ BindingShadowsSomethingUnacceptable {
+ shadowing_binding: PatternSource,
+ name: Symbol,
+ participle: &'static str,
+ article: &'static str,
+ shadowed_binding: Res,
+ shadowed_binding_span: Span,
+ },
+ /// Error E0128: generic parameters with a default cannot use forward-declared identifiers.
+ ForwardDeclaredGenericParam,
+ /// ERROR E0770: the type of const parameters must not depend on other generic parameters.
+ ParamInTyOfConstParam(Symbol),
+ /// generic parameters must not be used inside const evaluations.
+ ///
+ /// This error is only emitted when using `min_const_generics`.
+ ParamInNonTrivialAnonConst { name: Symbol, is_type: bool },
+ /// Error E0735: generic parameters with a default cannot use `Self`
+ SelfInGenericParamDefault,
+ /// Error E0767: use of unreachable label
+ UnreachableLabel { name: Symbol, definition_span: Span, suggestion: Option<LabelSuggestion> },
+ /// Error E0323, E0324, E0325: mismatch between trait item and impl item.
+ TraitImplMismatch {
+ name: Symbol,
+ kind: &'static str,
+ trait_path: String,
+ trait_item_span: Span,
+ code: rustc_errors::DiagnosticId,
+ },
+ /// Inline asm `sym` operand must refer to a `fn` or `static`.
+ InvalidAsmSym,
+}
+
+enum VisResolutionError<'a> {
+ Relative2018(Span, &'a ast::Path),
+ AncestorOnly(Span),
+ FailedToResolve(Span, String, Option<Suggestion>),
+ ExpectedFound(Span, String, Res),
+ Indeterminate(Span),
+ ModuleOnly(Span),
+}
+
+/// A minimal representation of a path segment. We use this in resolve because we synthesize 'path
+/// segments' which don't have the rest of an AST or HIR `PathSegment`.
+#[derive(Clone, Copy, Debug)]
+pub struct Segment {
+ ident: Ident,
+ id: Option<NodeId>,
+ /// Signals whether this `PathSegment` has generic arguments. Used to avoid providing
+ /// nonsensical suggestions.
+ has_generic_args: bool,
+ /// Signals whether this `PathSegment` has lifetime arguments.
+ has_lifetime_args: bool,
+ args_span: Span,
+}
+
+impl Segment {
+ fn from_path(path: &Path) -> Vec<Segment> {
+ path.segments.iter().map(|s| s.into()).collect()
+ }
+
+ fn from_ident(ident: Ident) -> Segment {
+ Segment {
+ ident,
+ id: None,
+ has_generic_args: false,
+ has_lifetime_args: false,
+ args_span: DUMMY_SP,
+ }
+ }
+
+ fn from_ident_and_id(ident: Ident, id: NodeId) -> Segment {
+ Segment {
+ ident,
+ id: Some(id),
+ has_generic_args: false,
+ has_lifetime_args: false,
+ args_span: DUMMY_SP,
+ }
+ }
+
+ fn names_to_string(segments: &[Segment]) -> String {
+ names_to_string(&segments.iter().map(|seg| seg.ident.name).collect::<Vec<_>>())
+ }
+}
+
+impl<'a> From<&'a ast::PathSegment> for Segment {
+ fn from(seg: &'a ast::PathSegment) -> Segment {
+ let has_generic_args = seg.args.is_some();
+ let (args_span, has_lifetime_args) = if let Some(args) = seg.args.as_deref() {
+ match args {
+ GenericArgs::AngleBracketed(args) => {
+ let found_lifetimes = args
+ .args
+ .iter()
+ .any(|arg| matches!(arg, AngleBracketedArg::Arg(GenericArg::Lifetime(_))));
+ (args.span, found_lifetimes)
+ }
+ GenericArgs::Parenthesized(args) => (args.span, true),
+ }
+ } else {
+ (DUMMY_SP, false)
+ };
+ Segment {
+ ident: seg.ident,
+ id: Some(seg.id),
+ has_generic_args,
+ has_lifetime_args,
+ args_span,
+ }
+ }
+}
+
+/// An intermediate resolution result.
+///
+/// This refers to the thing referred by a name. The difference between `Res` and `Item` is that
+/// items are visible in their whole block, while `Res`es only from the place they are defined
+/// forward.
+#[derive(Debug)]
+enum LexicalScopeBinding<'a> {
+ Item(&'a NameBinding<'a>),
+ Res(Res),
+}
+
+impl<'a> LexicalScopeBinding<'a> {
+ fn res(self) -> Res {
+ match self {
+ LexicalScopeBinding::Item(binding) => binding.res(),
+ LexicalScopeBinding::Res(res) => res,
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+enum ModuleOrUniformRoot<'a> {
+ /// Regular module.
+ Module(Module<'a>),
+
+ /// Virtual module that denotes resolution in crate root with fallback to extern prelude.
+ CrateRootAndExternPrelude,
+
+ /// Virtual module that denotes resolution in extern prelude.
+ /// Used for paths starting with `::` on 2018 edition.
+ ExternPrelude,
+
+ /// Virtual module that denotes resolution in current scope.
+ /// Used only for resolving single-segment imports. The reason it exists is that import paths
+ /// are always split into two parts, the first of which should be some kind of module.
+ CurrentScope,
+}
+
+impl ModuleOrUniformRoot<'_> {
+ fn same_def(lhs: Self, rhs: Self) -> bool {
+ match (lhs, rhs) {
+ (ModuleOrUniformRoot::Module(lhs), ModuleOrUniformRoot::Module(rhs)) => {
+ ptr::eq(lhs, rhs)
+ }
+ (
+ ModuleOrUniformRoot::CrateRootAndExternPrelude,
+ ModuleOrUniformRoot::CrateRootAndExternPrelude,
+ )
+ | (ModuleOrUniformRoot::ExternPrelude, ModuleOrUniformRoot::ExternPrelude)
+ | (ModuleOrUniformRoot::CurrentScope, ModuleOrUniformRoot::CurrentScope) => true,
+ _ => false,
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+enum PathResult<'a> {
+ Module(ModuleOrUniformRoot<'a>),
+ NonModule(PartialRes),
+ Indeterminate,
+ Failed {
+ span: Span,
+ label: String,
+ suggestion: Option<Suggestion>,
+ is_error_from_last_segment: bool,
+ },
+}
+
+impl<'a> PathResult<'a> {
+ fn failed(
+ span: Span,
+ is_error_from_last_segment: bool,
+ finalize: bool,
+ label_and_suggestion: impl FnOnce() -> (String, Option<Suggestion>),
+ ) -> PathResult<'a> {
+ let (label, suggestion) =
+ if finalize { label_and_suggestion() } else { (String::new(), None) };
+ PathResult::Failed { span, label, suggestion, is_error_from_last_segment }
+ }
+}
+
+#[derive(Debug)]
+enum ModuleKind {
+ /// An anonymous module; e.g., just a block.
+ ///
+ /// ```
+ /// fn main() {
+ /// fn f() {} // (1)
+ /// { // This is an anonymous module
+ /// f(); // This resolves to (2) as we are inside the block.
+ /// fn f() {} // (2)
+ /// }
+ /// f(); // Resolves to (1)
+ /// }
+ /// ```
+ Block,
+ /// Any module with a name.
+ ///
+ /// This could be:
+ ///
+ /// * A normal module – either `mod from_file;` or `mod from_block { }` –
+ /// or the crate root (which is conceptually a top-level module).
+ /// Note that the crate root's [name][Self::name] will be [`kw::Empty`].
+ /// * A trait or an enum (it implicitly contains associated types, methods and variant
+ /// constructors).
+ Def(DefKind, DefId, Symbol),
+}
+
+impl ModuleKind {
+ /// Get name of the module.
+ pub fn name(&self) -> Option<Symbol> {
+ match self {
+ ModuleKind::Block => None,
+ ModuleKind::Def(.., name) => Some(*name),
+ }
+ }
+}
+
+/// A key that identifies a binding in a given `Module`.
+///
+/// Multiple bindings in the same module can have the same key (in a valid
+/// program) if all but one of them come from glob imports.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+struct BindingKey {
+ /// The identifier for the binding, always the `normalize_to_macros_2_0` version of the
+ /// identifier.
+ ident: Ident,
+ ns: Namespace,
+ /// 0 if ident is not `_`, otherwise a value that's unique to the specific
+ /// `_` in the expanded AST that introduced this binding.
+ disambiguator: u32,
+}
+
+type Resolutions<'a> = RefCell<FxIndexMap<BindingKey, &'a RefCell<NameResolution<'a>>>>;
+
+/// One node in the tree of modules.
+///
+/// Note that a "module" in resolve is broader than a `mod` that you declare in Rust code. It may be one of these:
+///
+/// * `mod`
+/// * crate root (aka, top-level anonymous module)
+/// * `enum`
+/// * `trait`
+/// * curly-braced block with statements
+///
+/// You can use [`ModuleData::kind`] to determine the kind of module this is.
+pub struct ModuleData<'a> {
+ /// The direct parent module (it may not be a `mod`, however).
+ parent: Option<Module<'a>>,
+ /// What kind of module this is, because this may not be a `mod`.
+ kind: ModuleKind,
+
+ /// Mapping between names and their (possibly in-progress) resolutions in this module.
+ /// Resolutions in modules from other crates are not populated until accessed.
+ lazy_resolutions: Resolutions<'a>,
+ /// True if this is a module from other crate that needs to be populated on access.
+ populate_on_access: Cell<bool>,
+
+ /// Macro invocations that can expand into items in this module.
+ unexpanded_invocations: RefCell<FxHashSet<LocalExpnId>>,
+
+ /// Whether `#[no_implicit_prelude]` is active.
+ no_implicit_prelude: bool,
+
+ glob_importers: RefCell<Vec<&'a Import<'a>>>,
+ globs: RefCell<Vec<&'a Import<'a>>>,
+
+ /// Used to memoize the traits in this module for faster searches through all traits in scope.
+ traits: RefCell<Option<Box<[(Ident, &'a NameBinding<'a>)]>>>,
+
+ /// Span of the module itself. Used for error reporting.
+ span: Span,
+
+ expansion: ExpnId,
+}
+
+type Module<'a> = &'a ModuleData<'a>;
+
+impl<'a> ModuleData<'a> {
+ fn new(
+ parent: Option<Module<'a>>,
+ kind: ModuleKind,
+ expansion: ExpnId,
+ span: Span,
+ no_implicit_prelude: bool,
+ ) -> Self {
+ let is_foreign = match kind {
+ ModuleKind::Def(_, def_id, _) => !def_id.is_local(),
+ ModuleKind::Block => false,
+ };
+ ModuleData {
+ parent,
+ kind,
+ lazy_resolutions: Default::default(),
+ populate_on_access: Cell::new(is_foreign),
+ unexpanded_invocations: Default::default(),
+ no_implicit_prelude,
+ glob_importers: RefCell::new(Vec::new()),
+ globs: RefCell::new(Vec::new()),
+ traits: RefCell::new(None),
+ span,
+ expansion,
+ }
+ }
+
+ fn for_each_child<R, F>(&'a self, resolver: &mut R, mut f: F)
+ where
+ R: AsMut<Resolver<'a>>,
+ F: FnMut(&mut R, Ident, Namespace, &'a NameBinding<'a>),
+ {
+ for (key, name_resolution) in resolver.as_mut().resolutions(self).borrow().iter() {
+ if let Some(binding) = name_resolution.borrow().binding {
+ f(resolver, key.ident, key.ns, binding);
+ }
+ }
+ }
+
+ /// This modifies `self` in place. The traits will be stored in `self.traits`.
+ fn ensure_traits<R>(&'a self, resolver: &mut R)
+ where
+ R: AsMut<Resolver<'a>>,
+ {
+ let mut traits = self.traits.borrow_mut();
+ if traits.is_none() {
+ let mut collected_traits = Vec::new();
+ self.for_each_child(resolver, |_, name, ns, binding| {
+ if ns != TypeNS {
+ return;
+ }
+ if let Res::Def(DefKind::Trait | DefKind::TraitAlias, _) = binding.res() {
+ collected_traits.push((name, binding))
+ }
+ });
+ *traits = Some(collected_traits.into_boxed_slice());
+ }
+ }
+
+ fn res(&self) -> Option<Res> {
+ match self.kind {
+ ModuleKind::Def(kind, def_id, _) => Some(Res::Def(kind, def_id)),
+ _ => None,
+ }
+ }
+
+ // Public for rustdoc.
+ pub fn def_id(&self) -> DefId {
+ self.opt_def_id().expect("`ModuleData::def_id` is called on a block module")
+ }
+
+ fn opt_def_id(&self) -> Option<DefId> {
+ match self.kind {
+ ModuleKind::Def(_, def_id, _) => Some(def_id),
+ _ => None,
+ }
+ }
+
+ // `self` resolves to the first module ancestor that `is_normal`.
+ fn is_normal(&self) -> bool {
+ matches!(self.kind, ModuleKind::Def(DefKind::Mod, _, _))
+ }
+
+ fn is_trait(&self) -> bool {
+ matches!(self.kind, ModuleKind::Def(DefKind::Trait, _, _))
+ }
+
+ fn nearest_item_scope(&'a self) -> Module<'a> {
+ match self.kind {
+ ModuleKind::Def(DefKind::Enum | DefKind::Trait, ..) => {
+ self.parent.expect("enum or trait module without a parent")
+ }
+ _ => self,
+ }
+ }
+
+ /// The [`DefId`] of the nearest `mod` item ancestor (which may be this module).
+ /// This may be the crate root.
+ fn nearest_parent_mod(&self) -> DefId {
+ match self.kind {
+ ModuleKind::Def(DefKind::Mod, def_id, _) => def_id,
+ _ => self.parent.expect("non-root module without parent").nearest_parent_mod(),
+ }
+ }
+
+ fn is_ancestor_of(&self, mut other: &Self) -> bool {
+ while !ptr::eq(self, other) {
+ if let Some(parent) = other.parent {
+ other = parent;
+ } else {
+ return false;
+ }
+ }
+ true
+ }
+}
+
+impl<'a> fmt::Debug for ModuleData<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.res())
+ }
+}
+
+/// Records a possibly-private value, type, or module definition.
+#[derive(Clone, Debug)]
+pub struct NameBinding<'a> {
+ kind: NameBindingKind<'a>,
+ ambiguity: Option<(&'a NameBinding<'a>, AmbiguityKind)>,
+ expansion: LocalExpnId,
+ span: Span,
+ vis: ty::Visibility,
+}
+
+pub trait ToNameBinding<'a> {
+ fn to_name_binding(self, arenas: &'a ResolverArenas<'a>) -> &'a NameBinding<'a>;
+}
+
+impl<'a> ToNameBinding<'a> for &'a NameBinding<'a> {
+ fn to_name_binding(self, _: &'a ResolverArenas<'a>) -> &'a NameBinding<'a> {
+ self
+ }
+}
+
+#[derive(Clone, Debug)]
+enum NameBindingKind<'a> {
+ Res(Res, /* is_macro_export */ bool),
+ Module(Module<'a>),
+ Import { binding: &'a NameBinding<'a>, import: &'a Import<'a>, used: Cell<bool> },
+}
+
+impl<'a> NameBindingKind<'a> {
+ /// Is this a name binding of an import?
+ fn is_import(&self) -> bool {
+ matches!(*self, NameBindingKind::Import { .. })
+ }
+}
+
+struct PrivacyError<'a> {
+ ident: Ident,
+ binding: &'a NameBinding<'a>,
+ dedup_span: Span,
+}
+
+struct UseError<'a> {
+ err: DiagnosticBuilder<'a, ErrorGuaranteed>,
+ /// Candidates which user could `use` to access the missing type.
+ candidates: Vec<ImportSuggestion>,
+ /// The `DefId` of the module to place the use-statements in.
+ def_id: DefId,
+ /// Whether the diagnostic should say "instead" (as in `consider importing ... instead`).
+ instead: bool,
+ /// Extra free-form suggestion.
+ suggestion: Option<(Span, &'static str, String, Applicability)>,
+ /// Path `Segment`s at the place of use that failed. Used for accurate suggestion after telling
+ /// the user to import the item directly.
+ path: Vec<Segment>,
+}
+
+#[derive(Clone, Copy, PartialEq, Debug)]
+enum AmbiguityKind {
+ Import,
+ BuiltinAttr,
+ DeriveHelper,
+ MacroRulesVsModularized,
+ GlobVsOuter,
+ GlobVsGlob,
+ GlobVsExpanded,
+ MoreExpandedVsOuter,
+}
+
+impl AmbiguityKind {
+ fn descr(self) -> &'static str {
+ match self {
+ AmbiguityKind::Import => "multiple potential import sources",
+ AmbiguityKind::BuiltinAttr => "a name conflict with a builtin attribute",
+ AmbiguityKind::DeriveHelper => "a name conflict with a derive helper attribute",
+ AmbiguityKind::MacroRulesVsModularized => {
+ "a conflict between a `macro_rules` name and a non-`macro_rules` name from another module"
+ }
+ AmbiguityKind::GlobVsOuter => {
+ "a conflict between a name from a glob import and an outer scope during import or macro resolution"
+ }
+ AmbiguityKind::GlobVsGlob => "multiple glob imports of a name in the same module",
+ AmbiguityKind::GlobVsExpanded => {
+ "a conflict between a name from a glob import and a macro-expanded name in the same module during import or macro resolution"
+ }
+ AmbiguityKind::MoreExpandedVsOuter => {
+ "a conflict between a macro-expanded name and a less macro-expanded name from outer scope during import or macro resolution"
+ }
+ }
+ }
+}
+
+/// Miscellaneous bits of metadata for better ambiguity error reporting.
+#[derive(Clone, Copy, PartialEq)]
+enum AmbiguityErrorMisc {
+ SuggestCrate,
+ SuggestSelf,
+ FromPrelude,
+ None,
+}
+
+struct AmbiguityError<'a> {
+ kind: AmbiguityKind,
+ ident: Ident,
+ b1: &'a NameBinding<'a>,
+ b2: &'a NameBinding<'a>,
+ misc1: AmbiguityErrorMisc,
+ misc2: AmbiguityErrorMisc,
+}
+
+impl<'a> NameBinding<'a> {
+ fn module(&self) -> Option<Module<'a>> {
+ match self.kind {
+ NameBindingKind::Module(module) => Some(module),
+ NameBindingKind::Import { binding, .. } => binding.module(),
+ _ => None,
+ }
+ }
+
+ fn res(&self) -> Res {
+ match self.kind {
+ NameBindingKind::Res(res, _) => res,
+ NameBindingKind::Module(module) => module.res().unwrap(),
+ NameBindingKind::Import { binding, .. } => binding.res(),
+ }
+ }
+
+ fn is_ambiguity(&self) -> bool {
+ self.ambiguity.is_some()
+ || match self.kind {
+ NameBindingKind::Import { binding, .. } => binding.is_ambiguity(),
+ _ => false,
+ }
+ }
+
+ fn is_possibly_imported_variant(&self) -> bool {
+ match self.kind {
+ NameBindingKind::Import { binding, .. } => binding.is_possibly_imported_variant(),
+ NameBindingKind::Res(
+ Res::Def(DefKind::Variant | DefKind::Ctor(CtorOf::Variant, ..), _),
+ _,
+ ) => true,
+ NameBindingKind::Res(..) | NameBindingKind::Module(..) => false,
+ }
+ }
+
+ fn is_extern_crate(&self) -> bool {
+ match self.kind {
+ NameBindingKind::Import {
+ import: &Import { kind: ImportKind::ExternCrate { .. }, .. },
+ ..
+ } => true,
+ NameBindingKind::Module(&ModuleData {
+ kind: ModuleKind::Def(DefKind::Mod, def_id, _),
+ ..
+ }) => def_id.is_crate_root(),
+ _ => false,
+ }
+ }
+
+ fn is_import(&self) -> bool {
+ matches!(self.kind, NameBindingKind::Import { .. })
+ }
+
+ fn is_glob_import(&self) -> bool {
+ match self.kind {
+ NameBindingKind::Import { import, .. } => import.is_glob(),
+ _ => false,
+ }
+ }
+
+ fn is_importable(&self) -> bool {
+ !matches!(
+ self.res(),
+ Res::Def(DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy, _)
+ )
+ }
+
+ fn macro_kind(&self) -> Option<MacroKind> {
+ self.res().macro_kind()
+ }
+
+ // Suppose that we resolved macro invocation with `invoc_parent_expansion` to binding `binding`
+ // at some expansion round `max(invoc, binding)` when they both emerged from macros.
+ // Then this function returns `true` if `self` may emerge from a macro *after* that
+ // in some later round and screw up our previously found resolution.
+ // See more detailed explanation in
+ // https://github.com/rust-lang/rust/pull/53778#issuecomment-419224049
+ fn may_appear_after(
+ &self,
+ invoc_parent_expansion: LocalExpnId,
+ binding: &NameBinding<'_>,
+ ) -> bool {
+ // self > max(invoc, binding) => !(self <= invoc || self <= binding)
+ // Expansions are partially ordered, so "may appear after" is an inversion of
+ // "certainly appears before or simultaneously" and includes unordered cases.
+ let self_parent_expansion = self.expansion;
+ let other_parent_expansion = binding.expansion;
+ let certainly_before_other_or_simultaneously =
+ other_parent_expansion.is_descendant_of(self_parent_expansion);
+ let certainly_before_invoc_or_simultaneously =
+ invoc_parent_expansion.is_descendant_of(self_parent_expansion);
+ !(certainly_before_other_or_simultaneously || certainly_before_invoc_or_simultaneously)
+ }
+}
+
+#[derive(Debug, Default, Clone)]
+pub struct ExternPreludeEntry<'a> {
+ extern_crate_item: Option<&'a NameBinding<'a>>,
+ pub introduced_by_item: bool,
+}
+
+/// Used for better errors for E0773
+enum BuiltinMacroState {
+ NotYetSeen(SyntaxExtensionKind),
+ AlreadySeen(Span),
+}
+
+struct DeriveData {
+ resolutions: DeriveResolutions,
+ helper_attrs: Vec<(usize, Ident)>,
+ has_derive_copy: bool,
+}
+
+#[derive(Clone)]
+struct MacroData {
+ ext: Lrc<SyntaxExtension>,
+ macro_rules: bool,
+}
+
+/// The main resolver class.
+///
+/// This is the visitor that walks the whole crate.
+pub struct Resolver<'a> {
+ session: &'a Session,
+
+ definitions: Definitions,
+ /// Item with a given `LocalDefId` was defined during macro expansion with ID `ExpnId`.
+ expn_that_defined: FxHashMap<LocalDefId, ExpnId>,
+ /// Reference span for definitions.
+ source_span: IndexVec<LocalDefId, Span>,
+
+ graph_root: Module<'a>,
+
+ prelude: Option<Module<'a>>,
+ extern_prelude: FxHashMap<Ident, ExternPreludeEntry<'a>>,
+
+ /// N.B., this is used only for better diagnostics, not name resolution itself.
+ has_self: FxHashSet<DefId>,
+
+ /// Names of fields of an item `DefId` accessible with dot syntax.
+ /// Used for hints during error reporting.
+ field_names: FxHashMap<DefId, Vec<Spanned<Symbol>>>,
+
+ /// All imports known to succeed or fail.
+ determined_imports: Vec<&'a Import<'a>>,
+
+ /// All non-determined imports.
+ indeterminate_imports: Vec<&'a Import<'a>>,
+
+ // Spans for local variables found during pattern resolution.
+ // Used for suggestions during error reporting.
+ pat_span_map: NodeMap<Span>,
+
+ /// Resolutions for nodes that have a single resolution.
+ partial_res_map: NodeMap<PartialRes>,
+ /// Resolutions for import nodes, which have multiple resolutions in different namespaces.
+ import_res_map: NodeMap<PerNS<Option<Res>>>,
+ /// Resolutions for labels (node IDs of their corresponding blocks or loops).
+ label_res_map: NodeMap<NodeId>,
+ /// Resolutions for lifetimes.
+ lifetimes_res_map: NodeMap<LifetimeRes>,
+ /// Mapping from generics `def_id`s to TAIT generics `def_id`s.
+ /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
+ /// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
+ /// field from the original parameter 'a to the new parameter 'a1.
+ generics_def_id_map: Vec<FxHashMap<LocalDefId, LocalDefId>>,
+ /// Lifetime parameters that lowering will have to introduce.
+ extra_lifetime_params_map: NodeMap<Vec<(Ident, NodeId, LifetimeRes)>>,
+
+ /// `CrateNum` resolutions of `extern crate` items.
+ extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
+ reexport_map: FxHashMap<LocalDefId, Vec<ModChild>>,
+ trait_map: NodeMap<Vec<TraitCandidate>>,
+
+ /// A map from nodes to anonymous modules.
+ /// Anonymous modules are pseudo-modules that are implicitly created around items
+ /// contained within blocks.
+ ///
+ /// For example, if we have this:
+ ///
+ /// fn f() {
+ /// fn g() {
+ /// ...
+ /// }
+ /// }
+ ///
+ /// There will be an anonymous module created around `g` with the ID of the
+ /// entry block for `f`.
+ block_map: NodeMap<Module<'a>>,
+ /// A fake module that contains no definition and no prelude. Used so that
+ /// some AST passes can generate identifiers that only resolve to local or
+ /// language items.
+ empty_module: Module<'a>,
+ module_map: FxHashMap<DefId, Module<'a>>,
+ binding_parent_modules: FxHashMap<Interned<'a, NameBinding<'a>>, Module<'a>>,
+ underscore_disambiguator: u32,
+
+ /// Maps glob imports to the names of items actually imported.
+ glob_map: FxHashMap<LocalDefId, FxHashSet<Symbol>>,
+ /// Visibilities in "lowered" form, for all entities that have them.
+ visibilities: FxHashMap<LocalDefId, ty::Visibility>,
+ has_pub_restricted: bool,
+ used_imports: FxHashSet<NodeId>,
+ maybe_unused_trait_imports: FxIndexSet<LocalDefId>,
+ maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
+
+ /// Privacy errors are delayed until the end in order to deduplicate them.
+ privacy_errors: Vec<PrivacyError<'a>>,
+ /// Ambiguity errors are delayed for deduplication.
+ ambiguity_errors: Vec<AmbiguityError<'a>>,
+ /// `use` injections are delayed for better placement and deduplication.
+ use_injections: Vec<UseError<'a>>,
+ /// Crate-local macro expanded `macro_export` referred to by a module-relative path.
+ macro_expanded_macro_export_errors: BTreeSet<(Span, Span)>,
+
+ arenas: &'a ResolverArenas<'a>,
+ dummy_binding: &'a NameBinding<'a>,
+
+ crate_loader: CrateLoader<'a>,
+ macro_names: FxHashSet<Ident>,
+ builtin_macros: FxHashMap<Symbol, BuiltinMacroState>,
+ /// A small map keeping true kinds of built-in macros that appear to be fn-like on
+ /// the surface (`macro` items in libcore), but are actually attributes or derives.
+ builtin_macro_kinds: FxHashMap<LocalDefId, MacroKind>,
+ registered_attrs: FxHashSet<Ident>,
+ registered_tools: RegisteredTools,
+ macro_use_prelude: FxHashMap<Symbol, &'a NameBinding<'a>>,
+ macro_map: FxHashMap<DefId, MacroData>,
+ dummy_ext_bang: Lrc<SyntaxExtension>,
+ dummy_ext_derive: Lrc<SyntaxExtension>,
+ non_macro_attr: Lrc<SyntaxExtension>,
+ local_macro_def_scopes: FxHashMap<LocalDefId, Module<'a>>,
+ ast_transform_scopes: FxHashMap<LocalExpnId, Module<'a>>,
+ unused_macros: FxHashMap<LocalDefId, (NodeId, Ident)>,
+ unused_macro_rules: FxHashMap<(LocalDefId, usize), (Ident, Span)>,
+ proc_macro_stubs: FxHashSet<LocalDefId>,
+ /// Traces collected during macro resolution and validated when it's complete.
+ single_segment_macro_resolutions:
+ Vec<(Ident, MacroKind, ParentScope<'a>, Option<&'a NameBinding<'a>>)>,
+ multi_segment_macro_resolutions:
+ Vec<(Vec<Segment>, Span, MacroKind, ParentScope<'a>, Option<Res>)>,
+ builtin_attrs: Vec<(Ident, ParentScope<'a>)>,
+ /// `derive(Copy)` marks items they are applied to so they are treated specially later.
+ /// Derive macros cannot modify the item themselves and have to store the markers in the global
+ /// context, so they attach the markers to derive container IDs using this resolver table.
+ containers_deriving_copy: FxHashSet<LocalExpnId>,
+ /// Parent scopes in which the macros were invoked.
+ /// FIXME: `derives` are missing in these parent scopes and need to be taken from elsewhere.
+ invocation_parent_scopes: FxHashMap<LocalExpnId, ParentScope<'a>>,
+ /// `macro_rules` scopes *produced* by expanding the macro invocations,
+ /// include all the `macro_rules` items and other invocations generated by them.
+ output_macro_rules_scopes: FxHashMap<LocalExpnId, MacroRulesScopeRef<'a>>,
+ /// `macro_rules` scopes produced by `macro_rules` item definitions.
+ macro_rules_scopes: FxHashMap<LocalDefId, MacroRulesScopeRef<'a>>,
+ /// Helper attributes that are in scope for the given expansion.
+ helper_attrs: FxHashMap<LocalExpnId, Vec<Ident>>,
+ /// Ready or in-progress results of resolving paths inside the `#[derive(...)]` attribute
+ /// with the given `ExpnId`.
+ derive_data: FxHashMap<LocalExpnId, DeriveData>,
+
+ /// Avoid duplicated errors for "name already defined".
+ name_already_seen: FxHashMap<Symbol, Span>,
+
+ potentially_unused_imports: Vec<&'a Import<'a>>,
+
+ /// Table for mapping struct IDs into struct constructor IDs,
+ /// it's not used during normal resolution, only for better error reporting.
+ /// Also includes of list of each fields visibility
+ struct_constructors: DefIdMap<(Res, ty::Visibility, Vec<ty::Visibility>)>,
+
+ /// Features enabled for this crate.
+ active_features: FxHashSet<Symbol>,
+
+ lint_buffer: LintBuffer,
+
+ next_node_id: NodeId,
+
+ node_id_to_def_id: FxHashMap<ast::NodeId, LocalDefId>,
+ def_id_to_node_id: IndexVec<LocalDefId, ast::NodeId>,
+
+ /// Indices of unnamed struct or variant fields with unresolved attributes.
+ placeholder_field_indices: FxHashMap<NodeId, usize>,
+ /// When collecting definitions from an AST fragment produced by a macro invocation `ExpnId`
+ /// we know what parent node that fragment should be attached to thanks to this table,
+ /// and how the `impl Trait` fragments were introduced.
+ invocation_parents: FxHashMap<LocalExpnId, (LocalDefId, ImplTraitContext)>,
+
+ /// Some way to know that we are in a *trait* impl in `visit_assoc_item`.
+ /// FIXME: Replace with a more general AST map (together with some other fields).
+ trait_impl_items: FxHashSet<LocalDefId>,
+
+ legacy_const_generic_args: FxHashMap<DefId, Option<Vec<usize>>>,
+ /// Amount of lifetime parameters for each item in the crate.
+ item_generics_num_lifetimes: FxHashMap<LocalDefId, usize>,
+
+ main_def: Option<MainDefinition>,
+ trait_impls: FxIndexMap<DefId, Vec<LocalDefId>>,
+ /// A list of proc macro LocalDefIds, written out in the order in which
+ /// they are declared in the static array generated by proc_macro_harness.
+ proc_macros: Vec<NodeId>,
+ confused_type_with_std_module: FxHashMap<Span, Span>,
+
+ access_levels: AccessLevels,
+}
+
+/// Nothing really interesting here; it just provides memory for the rest of the crate.
+#[derive(Default)]
+pub struct ResolverArenas<'a> {
+ modules: TypedArena<ModuleData<'a>>,
+ local_modules: RefCell<Vec<Module<'a>>>,
+ imports: TypedArena<Import<'a>>,
+ name_resolutions: TypedArena<RefCell<NameResolution<'a>>>,
+ ast_paths: TypedArena<ast::Path>,
+ dropless: DroplessArena,
+}
+
+impl<'a> ResolverArenas<'a> {
+ fn new_module(
+ &'a self,
+ parent: Option<Module<'a>>,
+ kind: ModuleKind,
+ expn_id: ExpnId,
+ span: Span,
+ no_implicit_prelude: bool,
+ module_map: &mut FxHashMap<DefId, Module<'a>>,
+ ) -> Module<'a> {
+ let module =
+ self.modules.alloc(ModuleData::new(parent, kind, expn_id, span, no_implicit_prelude));
+ let def_id = module.opt_def_id();
+ if def_id.map_or(true, |def_id| def_id.is_local()) {
+ self.local_modules.borrow_mut().push(module);
+ }
+ if let Some(def_id) = def_id {
+ module_map.insert(def_id, module);
+ }
+ module
+ }
+ fn local_modules(&'a self) -> std::cell::Ref<'a, Vec<Module<'a>>> {
+ self.local_modules.borrow()
+ }
+ fn alloc_name_binding(&'a self, name_binding: NameBinding<'a>) -> &'a NameBinding<'a> {
+ self.dropless.alloc(name_binding)
+ }
+ fn alloc_import(&'a self, import: Import<'a>) -> &'a Import<'_> {
+ self.imports.alloc(import)
+ }
+ fn alloc_name_resolution(&'a self) -> &'a RefCell<NameResolution<'a>> {
+ self.name_resolutions.alloc(Default::default())
+ }
+ fn alloc_macro_rules_scope(&'a self, scope: MacroRulesScope<'a>) -> MacroRulesScopeRef<'a> {
+ Interned::new_unchecked(self.dropless.alloc(Cell::new(scope)))
+ }
+ fn alloc_macro_rules_binding(
+ &'a self,
+ binding: MacroRulesBinding<'a>,
+ ) -> &'a MacroRulesBinding<'a> {
+ self.dropless.alloc(binding)
+ }
+ fn alloc_ast_paths(&'a self, paths: &[ast::Path]) -> &'a [ast::Path] {
+ self.ast_paths.alloc_from_iter(paths.iter().cloned())
+ }
+ fn alloc_pattern_spans(&'a self, spans: impl Iterator<Item = Span>) -> &'a [Span] {
+ self.dropless.alloc_from_iter(spans)
+ }
+}
+
+impl<'a> AsMut<Resolver<'a>> for Resolver<'a> {
+ fn as_mut(&mut self) -> &mut Resolver<'a> {
+ self
+ }
+}
+
+impl<'a, 'b> DefIdTree for &'a Resolver<'b> {
+ #[inline]
+ fn opt_parent(self, id: DefId) -> Option<DefId> {
+ match id.as_local() {
+ Some(id) => self.definitions.def_key(id).parent,
+ None => self.cstore().def_key(id).parent,
+ }
+ .map(|index| DefId { index, ..id })
+ }
+}
+
+impl Resolver<'_> {
+ fn opt_local_def_id(&self, node: NodeId) -> Option<LocalDefId> {
+ self.node_id_to_def_id.get(&node).copied()
+ }
+
+ pub fn local_def_id(&self, node: NodeId) -> LocalDefId {
+ self.opt_local_def_id(node).unwrap_or_else(|| panic!("no entry for node id: `{:?}`", node))
+ }
+
+ /// Adds a definition with a parent definition.
+ fn create_def(
+ &mut self,
+ parent: LocalDefId,
+ node_id: ast::NodeId,
+ data: DefPathData,
+ expn_id: ExpnId,
+ span: Span,
+ ) -> LocalDefId {
+ assert!(
+ !self.node_id_to_def_id.contains_key(&node_id),
+ "adding a def'n for node-id {:?} and data {:?} but a previous def'n exists: {:?}",
+ node_id,
+ data,
+ self.definitions.def_key(self.node_id_to_def_id[&node_id]),
+ );
+
+ let def_id = self.definitions.create_def(parent, data);
+
+ // Create the definition.
+ if expn_id != ExpnId::root() {
+ self.expn_that_defined.insert(def_id, expn_id);
+ }
+
+ // A relative span's parent must be an absolute span.
+ debug_assert_eq!(span.data_untracked().parent, None);
+ let _id = self.source_span.push(span);
+ debug_assert_eq!(_id, def_id);
+
+ // Some things for which we allocate `LocalDefId`s don't correspond to
+ // anything in the AST, so they don't have a `NodeId`. For these cases
+ // we don't need a mapping from `NodeId` to `LocalDefId`.
+ if node_id != ast::DUMMY_NODE_ID {
+ debug!("create_def: def_id_to_node_id[{:?}] <-> {:?}", def_id, node_id);
+ self.node_id_to_def_id.insert(node_id, def_id);
+ }
+ assert_eq!(self.def_id_to_node_id.push(node_id), def_id);
+
+ def_id
+ }
+
+ fn item_generics_num_lifetimes(&self, def_id: DefId) -> usize {
+ if let Some(def_id) = def_id.as_local() {
+ self.item_generics_num_lifetimes[&def_id]
+ } else {
+ self.cstore().item_generics_num_lifetimes(def_id, self.session)
+ }
+ }
+}
+
+impl<'a> Resolver<'a> {
+ pub fn new(
+ session: &'a Session,
+ krate: &Crate,
+ crate_name: &str,
+ metadata_loader: Box<MetadataLoaderDyn>,
+ arenas: &'a ResolverArenas<'a>,
+ ) -> Resolver<'a> {
+ let root_def_id = CRATE_DEF_ID.to_def_id();
+ let mut module_map = FxHashMap::default();
+ let graph_root = arenas.new_module(
+ None,
+ ModuleKind::Def(DefKind::Mod, root_def_id, kw::Empty),
+ ExpnId::root(),
+ krate.spans.inner_span,
+ session.contains_name(&krate.attrs, sym::no_implicit_prelude),
+ &mut module_map,
+ );
+ let empty_module = arenas.new_module(
+ None,
+ ModuleKind::Def(DefKind::Mod, root_def_id, kw::Empty),
+ ExpnId::root(),
+ DUMMY_SP,
+ true,
+ &mut FxHashMap::default(),
+ );
+
+ let definitions = Definitions::new(session.local_stable_crate_id());
+
+ let mut visibilities = FxHashMap::default();
+ visibilities.insert(CRATE_DEF_ID, ty::Visibility::Public);
+
+ let mut def_id_to_node_id = IndexVec::default();
+ assert_eq!(def_id_to_node_id.push(CRATE_NODE_ID), CRATE_DEF_ID);
+ let mut node_id_to_def_id = FxHashMap::default();
+ node_id_to_def_id.insert(CRATE_NODE_ID, CRATE_DEF_ID);
+
+ let mut invocation_parents = FxHashMap::default();
+ invocation_parents.insert(LocalExpnId::ROOT, (CRATE_DEF_ID, ImplTraitContext::Existential));
+
+ let mut source_span = IndexVec::default();
+ let _id = source_span.push(krate.spans.inner_span);
+ debug_assert_eq!(_id, CRATE_DEF_ID);
+
+ let mut extern_prelude: FxHashMap<Ident, ExternPreludeEntry<'_>> = session
+ .opts
+ .externs
+ .iter()
+ .filter(|(_, entry)| entry.add_prelude)
+ .map(|(name, _)| (Ident::from_str(name), Default::default()))
+ .collect();
+
+ if !session.contains_name(&krate.attrs, sym::no_core) {
+ extern_prelude.insert(Ident::with_dummy_span(sym::core), Default::default());
+ if !session.contains_name(&krate.attrs, sym::no_std) {
+ extern_prelude.insert(Ident::with_dummy_span(sym::std), Default::default());
+ }
+ }
+
+ let (registered_attrs, registered_tools) =
+ macros::registered_attrs_and_tools(session, &krate.attrs);
+
+ let features = session.features_untracked();
+
+ let mut resolver = Resolver {
+ session,
+
+ definitions,
+ expn_that_defined: Default::default(),
+ source_span,
+
+ // The outermost module has def ID 0; this is not reflected in the
+ // AST.
+ graph_root,
+ prelude: None,
+ extern_prelude,
+
+ has_self: FxHashSet::default(),
+ field_names: FxHashMap::default(),
+
+ determined_imports: Vec::new(),
+ indeterminate_imports: Vec::new(),
+
+ pat_span_map: Default::default(),
+ partial_res_map: Default::default(),
+ import_res_map: Default::default(),
+ label_res_map: Default::default(),
+ lifetimes_res_map: Default::default(),
+ generics_def_id_map: Vec::new(),
+ extra_lifetime_params_map: Default::default(),
+ extern_crate_map: Default::default(),
+ reexport_map: FxHashMap::default(),
+ trait_map: NodeMap::default(),
+ underscore_disambiguator: 0,
+ empty_module,
+ module_map,
+ block_map: Default::default(),
+ binding_parent_modules: FxHashMap::default(),
+ ast_transform_scopes: FxHashMap::default(),
+
+ glob_map: Default::default(),
+ visibilities,
+ has_pub_restricted: false,
+ used_imports: FxHashSet::default(),
+ maybe_unused_trait_imports: Default::default(),
+ maybe_unused_extern_crates: Vec::new(),
+
+ privacy_errors: Vec::new(),
+ ambiguity_errors: Vec::new(),
+ use_injections: Vec::new(),
+ macro_expanded_macro_export_errors: BTreeSet::new(),
+
+ arenas,
+ dummy_binding: arenas.alloc_name_binding(NameBinding {
+ kind: NameBindingKind::Res(Res::Err, false),
+ ambiguity: None,
+ expansion: LocalExpnId::ROOT,
+ span: DUMMY_SP,
+ vis: ty::Visibility::Public,
+ }),
+
+ crate_loader: CrateLoader::new(session, metadata_loader, crate_name),
+ macro_names: FxHashSet::default(),
+ builtin_macros: Default::default(),
+ builtin_macro_kinds: Default::default(),
+ registered_attrs,
+ registered_tools,
+ macro_use_prelude: FxHashMap::default(),
+ macro_map: FxHashMap::default(),
+ dummy_ext_bang: Lrc::new(SyntaxExtension::dummy_bang(session.edition())),
+ dummy_ext_derive: Lrc::new(SyntaxExtension::dummy_derive(session.edition())),
+ non_macro_attr: Lrc::new(SyntaxExtension::non_macro_attr(session.edition())),
+ invocation_parent_scopes: Default::default(),
+ output_macro_rules_scopes: Default::default(),
+ macro_rules_scopes: Default::default(),
+ helper_attrs: Default::default(),
+ derive_data: Default::default(),
+ local_macro_def_scopes: FxHashMap::default(),
+ name_already_seen: FxHashMap::default(),
+ potentially_unused_imports: Vec::new(),
+ struct_constructors: Default::default(),
+ unused_macros: Default::default(),
+ unused_macro_rules: Default::default(),
+ proc_macro_stubs: Default::default(),
+ single_segment_macro_resolutions: Default::default(),
+ multi_segment_macro_resolutions: Default::default(),
+ builtin_attrs: Default::default(),
+ containers_deriving_copy: Default::default(),
+ active_features: features
+ .declared_lib_features
+ .iter()
+ .map(|(feat, ..)| *feat)
+ .chain(features.declared_lang_features.iter().map(|(feat, ..)| *feat))
+ .collect(),
+ lint_buffer: LintBuffer::default(),
+ next_node_id: CRATE_NODE_ID,
+ node_id_to_def_id,
+ def_id_to_node_id,
+ placeholder_field_indices: Default::default(),
+ invocation_parents,
+ trait_impl_items: Default::default(),
+ legacy_const_generic_args: Default::default(),
+ item_generics_num_lifetimes: Default::default(),
+ main_def: Default::default(),
+ trait_impls: Default::default(),
+ proc_macros: Default::default(),
+ confused_type_with_std_module: Default::default(),
+ access_levels: Default::default(),
+ };
+
+ let root_parent_scope = ParentScope::module(graph_root, &resolver);
+ resolver.invocation_parent_scopes.insert(LocalExpnId::ROOT, root_parent_scope);
+
+ resolver
+ }
+
+ fn new_module(
+ &mut self,
+ parent: Option<Module<'a>>,
+ kind: ModuleKind,
+ expn_id: ExpnId,
+ span: Span,
+ no_implicit_prelude: bool,
+ ) -> Module<'a> {
+ let module_map = &mut self.module_map;
+ self.arenas.new_module(parent, kind, expn_id, span, no_implicit_prelude, module_map)
+ }
+
+ pub fn next_node_id(&mut self) -> NodeId {
+ let start = self.next_node_id;
+ let next = start.as_u32().checked_add(1).expect("input too large; ran out of NodeIds");
+ self.next_node_id = ast::NodeId::from_u32(next);
+ start
+ }
+
+ pub fn next_node_ids(&mut self, count: usize) -> std::ops::Range<NodeId> {
+ let start = self.next_node_id;
+ let end = start.as_usize().checked_add(count).expect("input too large; ran out of NodeIds");
+ self.next_node_id = ast::NodeId::from_usize(end);
+ start..self.next_node_id
+ }
+
+ pub fn lint_buffer(&mut self) -> &mut LintBuffer {
+ &mut self.lint_buffer
+ }
+
+ pub fn arenas() -> ResolverArenas<'a> {
+ Default::default()
+ }
+
+ pub fn into_outputs(
+ self,
+ ) -> (Definitions, Box<CrateStoreDyn>, ResolverOutputs, ty::ResolverAstLowering) {
+ let proc_macros = self.proc_macros.iter().map(|id| self.local_def_id(*id)).collect();
+ let definitions = self.definitions;
+ let cstore = Box::new(self.crate_loader.into_cstore());
+ let source_span = self.source_span;
+ let expn_that_defined = self.expn_that_defined;
+ let visibilities = self.visibilities;
+ let has_pub_restricted = self.has_pub_restricted;
+ let extern_crate_map = self.extern_crate_map;
+ let reexport_map = self.reexport_map;
+ let maybe_unused_trait_imports = self.maybe_unused_trait_imports;
+ let maybe_unused_extern_crates = self.maybe_unused_extern_crates;
+ let glob_map = self.glob_map;
+ let main_def = self.main_def;
+ let confused_type_with_std_module = self.confused_type_with_std_module;
+ let access_levels = self.access_levels;
+ let resolutions = ResolverOutputs {
+ source_span,
+ expn_that_defined,
+ visibilities,
+ has_pub_restricted,
+ access_levels,
+ extern_crate_map,
+ reexport_map,
+ glob_map,
+ maybe_unused_trait_imports,
+ maybe_unused_extern_crates,
+ extern_prelude: self
+ .extern_prelude
+ .iter()
+ .map(|(ident, entry)| (ident.name, entry.introduced_by_item))
+ .collect(),
+ main_def,
+ trait_impls: self.trait_impls,
+ proc_macros,
+ confused_type_with_std_module,
+ registered_tools: self.registered_tools,
+ };
+ let resolutions_lowering = ty::ResolverAstLowering {
+ legacy_const_generic_args: self.legacy_const_generic_args,
+ partial_res_map: self.partial_res_map,
+ import_res_map: self.import_res_map,
+ label_res_map: self.label_res_map,
+ lifetimes_res_map: self.lifetimes_res_map,
+ generics_def_id_map: self.generics_def_id_map,
+ extra_lifetime_params_map: self.extra_lifetime_params_map,
+ next_node_id: self.next_node_id,
+ node_id_to_def_id: self.node_id_to_def_id,
+ def_id_to_node_id: self.def_id_to_node_id,
+ trait_map: self.trait_map,
+ builtin_macro_kinds: self.builtin_macro_kinds,
+ };
+ (definitions, cstore, resolutions, resolutions_lowering)
+ }
+
+ pub fn clone_outputs(
+ &self,
+ ) -> (Definitions, Box<CrateStoreDyn>, ResolverOutputs, ty::ResolverAstLowering) {
+ let proc_macros = self.proc_macros.iter().map(|id| self.local_def_id(*id)).collect();
+ let definitions = self.definitions.clone();
+ let cstore = Box::new(self.cstore().clone());
+ let resolutions = ResolverOutputs {
+ source_span: self.source_span.clone(),
+ expn_that_defined: self.expn_that_defined.clone(),
+ visibilities: self.visibilities.clone(),
+ has_pub_restricted: self.has_pub_restricted,
+ extern_crate_map: self.extern_crate_map.clone(),
+ reexport_map: self.reexport_map.clone(),
+ glob_map: self.glob_map.clone(),
+ maybe_unused_trait_imports: self.maybe_unused_trait_imports.clone(),
+ maybe_unused_extern_crates: self.maybe_unused_extern_crates.clone(),
+ extern_prelude: self
+ .extern_prelude
+ .iter()
+ .map(|(ident, entry)| (ident.name, entry.introduced_by_item))
+ .collect(),
+ main_def: self.main_def,
+ trait_impls: self.trait_impls.clone(),
+ proc_macros,
+ confused_type_with_std_module: self.confused_type_with_std_module.clone(),
+ registered_tools: self.registered_tools.clone(),
+ access_levels: self.access_levels.clone(),
+ };
+ let resolutions_lowering = ty::ResolverAstLowering {
+ legacy_const_generic_args: self.legacy_const_generic_args.clone(),
+ partial_res_map: self.partial_res_map.clone(),
+ import_res_map: self.import_res_map.clone(),
+ label_res_map: self.label_res_map.clone(),
+ lifetimes_res_map: self.lifetimes_res_map.clone(),
+ generics_def_id_map: self.generics_def_id_map.clone(),
+ extra_lifetime_params_map: self.extra_lifetime_params_map.clone(),
+ next_node_id: self.next_node_id.clone(),
+ node_id_to_def_id: self.node_id_to_def_id.clone(),
+ def_id_to_node_id: self.def_id_to_node_id.clone(),
+ trait_map: self.trait_map.clone(),
+ builtin_macro_kinds: self.builtin_macro_kinds.clone(),
+ };
+ (definitions, cstore, resolutions, resolutions_lowering)
+ }
+
+ fn create_stable_hashing_context(&self) -> StableHashingContext<'_> {
+ StableHashingContext::new(
+ self.session,
+ &self.definitions,
+ self.crate_loader.cstore(),
+ &self.source_span,
+ )
+ }
+
+ pub fn cstore(&self) -> &CStore {
+ self.crate_loader.cstore()
+ }
+
+ fn dummy_ext(&self, macro_kind: MacroKind) -> Lrc<SyntaxExtension> {
+ match macro_kind {
+ MacroKind::Bang => self.dummy_ext_bang.clone(),
+ MacroKind::Derive => self.dummy_ext_derive.clone(),
+ MacroKind::Attr => self.non_macro_attr.clone(),
+ }
+ }
+
+ /// Runs the function on each namespace.
+ fn per_ns<F: FnMut(&mut Self, Namespace)>(&mut self, mut f: F) {
+ f(self, TypeNS);
+ f(self, ValueNS);
+ f(self, MacroNS);
+ }
+
+ fn is_builtin_macro(&mut self, res: Res) -> bool {
+ self.get_macro(res).map_or(false, |macro_data| macro_data.ext.builtin_name.is_some())
+ }
+
+ fn macro_def(&self, mut ctxt: SyntaxContext) -> DefId {
+ loop {
+ match ctxt.outer_expn_data().macro_def_id {
+ Some(def_id) => return def_id,
+ None => ctxt.remove_mark(),
+ };
+ }
+ }
+
+ /// Entry point to crate resolution.
+ pub fn resolve_crate(&mut self, krate: &Crate) {
+ self.session.time("resolve_crate", || {
+ self.session.time("finalize_imports", || ImportResolver { r: self }.finalize_imports());
+ self.session.time("resolve_access_levels", || {
+ AccessLevelsVisitor::compute_access_levels(self, krate)
+ });
+ self.session.time("finalize_macro_resolutions", || self.finalize_macro_resolutions());
+ self.session.time("late_resolve_crate", || self.late_resolve_crate(krate));
+ self.session.time("resolve_main", || self.resolve_main());
+ self.session.time("resolve_check_unused", || self.check_unused(krate));
+ self.session.time("resolve_report_errors", || self.report_errors(krate));
+ self.session.time("resolve_postprocess", || self.crate_loader.postprocess(krate));
+ });
+ }
+
+ pub fn traits_in_scope(
+ &mut self,
+ current_trait: Option<Module<'a>>,
+ parent_scope: &ParentScope<'a>,
+ ctxt: SyntaxContext,
+ assoc_item: Option<(Symbol, Namespace)>,
+ ) -> Vec<TraitCandidate> {
+ let mut found_traits = Vec::new();
+
+ if let Some(module) = current_trait {
+ if self.trait_may_have_item(Some(module), assoc_item) {
+ let def_id = module.def_id();
+ found_traits.push(TraitCandidate { def_id, import_ids: smallvec![] });
+ }
+ }
+
+ self.visit_scopes(ScopeSet::All(TypeNS, false), parent_scope, ctxt, |this, scope, _, _| {
+ match scope {
+ Scope::Module(module, _) => {
+ this.traits_in_module(module, assoc_item, &mut found_traits);
+ }
+ Scope::StdLibPrelude => {
+ if let Some(module) = this.prelude {
+ this.traits_in_module(module, assoc_item, &mut found_traits);
+ }
+ }
+ Scope::ExternPrelude | Scope::ToolPrelude | Scope::BuiltinTypes => {}
+ _ => unreachable!(),
+ }
+ None::<()>
+ });
+
+ found_traits
+ }
+
+ fn traits_in_module(
+ &mut self,
+ module: Module<'a>,
+ assoc_item: Option<(Symbol, Namespace)>,
+ found_traits: &mut Vec<TraitCandidate>,
+ ) {
+ module.ensure_traits(self);
+ let traits = module.traits.borrow();
+ for (trait_name, trait_binding) in traits.as_ref().unwrap().iter() {
+ if self.trait_may_have_item(trait_binding.module(), assoc_item) {
+ let def_id = trait_binding.res().def_id();
+ let import_ids = self.find_transitive_imports(&trait_binding.kind, *trait_name);
+ found_traits.push(TraitCandidate { def_id, import_ids });
+ }
+ }
+ }
+
+ // List of traits in scope is pruned on best effort basis. We reject traits not having an
+ // associated item with the given name and namespace (if specified). This is a conservative
+ // optimization, proper hygienic type-based resolution of associated items is done in typeck.
+ // We don't reject trait aliases (`trait_module == None`) because we don't have access to their
+ // associated items.
+ fn trait_may_have_item(
+ &mut self,
+ trait_module: Option<Module<'a>>,
+ assoc_item: Option<(Symbol, Namespace)>,
+ ) -> bool {
+ match (trait_module, assoc_item) {
+ (Some(trait_module), Some((name, ns))) => {
+ self.resolutions(trait_module).borrow().iter().any(|resolution| {
+ let (&BindingKey { ident: assoc_ident, ns: assoc_ns, .. }, _) = resolution;
+ assoc_ns == ns && assoc_ident.name == name
+ })
+ }
+ _ => true,
+ }
+ }
+
+ fn find_transitive_imports(
+ &mut self,
+ mut kind: &NameBindingKind<'_>,
+ trait_name: Ident,
+ ) -> SmallVec<[LocalDefId; 1]> {
+ let mut import_ids = smallvec![];
+ while let NameBindingKind::Import { import, binding, .. } = kind {
+ let id = self.local_def_id(import.id);
+ self.maybe_unused_trait_imports.insert(id);
+ self.add_to_glob_map(&import, trait_name);
+ import_ids.push(id);
+ kind = &binding.kind;
+ }
+ import_ids
+ }
+
+ fn new_key(&mut self, ident: Ident, ns: Namespace) -> BindingKey {
+ let ident = ident.normalize_to_macros_2_0();
+ let disambiguator = if ident.name == kw::Underscore {
+ self.underscore_disambiguator += 1;
+ self.underscore_disambiguator
+ } else {
+ 0
+ };
+ BindingKey { ident, ns, disambiguator }
+ }
+
+ fn resolutions(&mut self, module: Module<'a>) -> &'a Resolutions<'a> {
+ if module.populate_on_access.get() {
+ module.populate_on_access.set(false);
+ self.build_reduced_graph_external(module);
+ }
+ &module.lazy_resolutions
+ }
+
+ fn resolution(
+ &mut self,
+ module: Module<'a>,
+ key: BindingKey,
+ ) -> &'a RefCell<NameResolution<'a>> {
+ *self
+ .resolutions(module)
+ .borrow_mut()
+ .entry(key)
+ .or_insert_with(|| self.arenas.alloc_name_resolution())
+ }
+
+ fn record_use(
+ &mut self,
+ ident: Ident,
+ used_binding: &'a NameBinding<'a>,
+ is_lexical_scope: bool,
+ ) {
+ if let Some((b2, kind)) = used_binding.ambiguity {
+ self.ambiguity_errors.push(AmbiguityError {
+ kind,
+ ident,
+ b1: used_binding,
+ b2,
+ misc1: AmbiguityErrorMisc::None,
+ misc2: AmbiguityErrorMisc::None,
+ });
+ }
+ if let NameBindingKind::Import { import, binding, ref used } = used_binding.kind {
+ // Avoid marking `extern crate` items that refer to a name from extern prelude,
+ // but not introduce it, as used if they are accessed from lexical scope.
+ if is_lexical_scope {
+ if let Some(entry) = self.extern_prelude.get(&ident.normalize_to_macros_2_0()) {
+ if let Some(crate_item) = entry.extern_crate_item {
+ if ptr::eq(used_binding, crate_item) && !entry.introduced_by_item {
+ return;
+ }
+ }
+ }
+ }
+ used.set(true);
+ import.used.set(true);
+ self.used_imports.insert(import.id);
+ self.add_to_glob_map(&import, ident);
+ self.record_use(ident, binding, false);
+ }
+ }
+
+ #[inline]
+ fn add_to_glob_map(&mut self, import: &Import<'_>, ident: Ident) {
+ if import.is_glob() {
+ let def_id = self.local_def_id(import.id);
+ self.glob_map.entry(def_id).or_default().insert(ident.name);
+ }
+ }
+
+ fn resolve_crate_root(&mut self, ident: Ident) -> Module<'a> {
+ debug!("resolve_crate_root({:?})", ident);
+ let mut ctxt = ident.span.ctxt();
+ let mark = if ident.name == kw::DollarCrate {
+ // When resolving `$crate` from a `macro_rules!` invoked in a `macro`,
+ // we don't want to pretend that the `macro_rules!` definition is in the `macro`
+ // as described in `SyntaxContext::apply_mark`, so we ignore prepended opaque marks.
+ // FIXME: This is only a guess and it doesn't work correctly for `macro_rules!`
+ // definitions actually produced by `macro` and `macro` definitions produced by
+ // `macro_rules!`, but at least such configurations are not stable yet.
+ ctxt = ctxt.normalize_to_macro_rules();
+ debug!(
+ "resolve_crate_root: marks={:?}",
+ ctxt.marks().into_iter().map(|(i, t)| (i.expn_data(), t)).collect::<Vec<_>>()
+ );
+ let mut iter = ctxt.marks().into_iter().rev().peekable();
+ let mut result = None;
+ // Find the last opaque mark from the end if it exists.
+ while let Some(&(mark, transparency)) = iter.peek() {
+ if transparency == Transparency::Opaque {
+ result = Some(mark);
+ iter.next();
+ } else {
+ break;
+ }
+ }
+ debug!(
+ "resolve_crate_root: found opaque mark {:?} {:?}",
+ result,
+ result.map(|r| r.expn_data())
+ );
+ // Then find the last semi-transparent mark from the end if it exists.
+ for (mark, transparency) in iter {
+ if transparency == Transparency::SemiTransparent {
+ result = Some(mark);
+ } else {
+ break;
+ }
+ }
+ debug!(
+ "resolve_crate_root: found semi-transparent mark {:?} {:?}",
+ result,
+ result.map(|r| r.expn_data())
+ );
+ result
+ } else {
+ debug!("resolve_crate_root: not DollarCrate");
+ ctxt = ctxt.normalize_to_macros_2_0();
+ ctxt.adjust(ExpnId::root())
+ };
+ let module = match mark {
+ Some(def) => self.expn_def_scope(def),
+ None => {
+ debug!(
+ "resolve_crate_root({:?}): found no mark (ident.span = {:?})",
+ ident, ident.span
+ );
+ return self.graph_root;
+ }
+ };
+ let module = self.expect_module(
+ module.opt_def_id().map_or(LOCAL_CRATE, |def_id| def_id.krate).as_def_id(),
+ );
+ debug!(
+ "resolve_crate_root({:?}): got module {:?} ({:?}) (ident.span = {:?})",
+ ident,
+ module,
+ module.kind.name(),
+ ident.span
+ );
+ module
+ }
+
+ fn resolve_self(&mut self, ctxt: &mut SyntaxContext, module: Module<'a>) -> Module<'a> {
+ let mut module = self.expect_module(module.nearest_parent_mod());
+ while module.span.ctxt().normalize_to_macros_2_0() != *ctxt {
+ let parent = module.parent.unwrap_or_else(|| self.expn_def_scope(ctxt.remove_mark()));
+ module = self.expect_module(parent.nearest_parent_mod());
+ }
+ module
+ }
+
+ fn record_partial_res(&mut self, node_id: NodeId, resolution: PartialRes) {
+ debug!("(recording res) recording {:?} for {}", resolution, node_id);
+ if let Some(prev_res) = self.partial_res_map.insert(node_id, resolution) {
+ panic!("path resolved multiple times ({:?} before, {:?} now)", prev_res, resolution);
+ }
+ }
+
+ fn record_pat_span(&mut self, node: NodeId, span: Span) {
+ debug!("(recording pat) recording {:?} for {:?}", node, span);
+ self.pat_span_map.insert(node, span);
+ }
+
+ fn is_accessible_from(&self, vis: ty::Visibility, module: Module<'a>) -> bool {
+ vis.is_accessible_from(module.nearest_parent_mod(), self)
+ }
+
+ fn set_binding_parent_module(&mut self, binding: &'a NameBinding<'a>, module: Module<'a>) {
+ if let Some(old_module) =
+ self.binding_parent_modules.insert(Interned::new_unchecked(binding), module)
+ {
+ if !ptr::eq(module, old_module) {
+ span_bug!(binding.span, "parent module is reset for binding");
+ }
+ }
+ }
+
+ fn disambiguate_macro_rules_vs_modularized(
+ &self,
+ macro_rules: &'a NameBinding<'a>,
+ modularized: &'a NameBinding<'a>,
+ ) -> bool {
+ // Some non-controversial subset of ambiguities "modularized macro name" vs "macro_rules"
+ // is disambiguated to mitigate regressions from macro modularization.
+ // Scoping for `macro_rules` behaves like scoping for `let` at module level, in general.
+ match (
+ self.binding_parent_modules.get(&Interned::new_unchecked(macro_rules)),
+ self.binding_parent_modules.get(&Interned::new_unchecked(modularized)),
+ ) {
+ (Some(macro_rules), Some(modularized)) => {
+ macro_rules.nearest_parent_mod() == modularized.nearest_parent_mod()
+ && modularized.is_ancestor_of(macro_rules)
+ }
+ _ => false,
+ }
+ }
+
+ fn extern_prelude_get(&mut self, ident: Ident, finalize: bool) -> Option<&'a NameBinding<'a>> {
+ if ident.is_path_segment_keyword() {
+ // Make sure `self`, `super` etc produce an error when passed to here.
+ return None;
+ }
+ self.extern_prelude.get(&ident.normalize_to_macros_2_0()).cloned().and_then(|entry| {
+ if let Some(binding) = entry.extern_crate_item {
+ if finalize && entry.introduced_by_item {
+ self.record_use(ident, binding, false);
+ }
+ Some(binding)
+ } else {
+ let crate_id = if finalize {
+ let Some(crate_id) =
+ self.crate_loader.process_path_extern(ident.name, ident.span) else { return Some(self.dummy_binding); };
+ crate_id
+ } else {
+ self.crate_loader.maybe_process_path_extern(ident.name)?
+ };
+ let crate_root = self.expect_module(crate_id.as_def_id());
+ Some(
+ (crate_root, ty::Visibility::Public, DUMMY_SP, LocalExpnId::ROOT)
+ .to_name_binding(self.arenas),
+ )
+ }
+ })
+ }
+
+ /// Rustdoc uses this to resolve doc link paths in a recoverable way. `PathResult<'a>`
+ /// isn't something that can be returned because it can't be made to live that long,
+ /// and also it's a private type. Fortunately rustdoc doesn't need to know the error,
+ /// just that an error occurred.
+ pub fn resolve_rustdoc_path(
+ &mut self,
+ path_str: &str,
+ ns: Namespace,
+ mut parent_scope: ParentScope<'a>,
+ ) -> Option<Res> {
+ let mut segments =
+ Vec::from_iter(path_str.split("::").map(Ident::from_str).map(Segment::from_ident));
+ if let Some(segment) = segments.first_mut() {
+ if segment.ident.name == kw::Crate {
+ // FIXME: `resolve_path` always resolves `crate` to the current crate root, but
+ // rustdoc wants it to resolve to the `parent_scope`'s crate root. This trick of
+ // replacing `crate` with `self` and changing the current module should achieve
+ // the same effect.
+ segment.ident.name = kw::SelfLower;
+ parent_scope.module =
+ self.expect_module(parent_scope.module.def_id().krate.as_def_id());
+ } else if segment.ident.name == kw::Empty {
+ segment.ident.name = kw::PathRoot;
+ }
+ }
+
+ match self.maybe_resolve_path(&segments, Some(ns), &parent_scope) {
+ PathResult::Module(ModuleOrUniformRoot::Module(module)) => Some(module.res().unwrap()),
+ PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
+ Some(path_res.base_res())
+ }
+ PathResult::Module(ModuleOrUniformRoot::ExternPrelude)
+ | PathResult::NonModule(..)
+ | PathResult::Failed { .. } => None,
+ PathResult::Module(..) | PathResult::Indeterminate => unreachable!(),
+ }
+ }
+
+ /// For rustdoc.
+ /// For local modules returns only reexports, for external modules returns all children.
+ pub fn module_children_or_reexports(&self, def_id: DefId) -> Vec<ModChild> {
+ if let Some(def_id) = def_id.as_local() {
+ self.reexport_map.get(&def_id).cloned().unwrap_or_default()
+ } else {
+ self.cstore().module_children_untracked(def_id, self.session)
+ }
+ }
+
+ /// For rustdoc.
+ pub fn macro_rules_scope(&self, def_id: LocalDefId) -> (MacroRulesScopeRef<'a>, Res) {
+ let scope = *self.macro_rules_scopes.get(&def_id).expect("not a `macro_rules` item");
+ match scope.get() {
+ MacroRulesScope::Binding(mb) => (scope, mb.binding.res()),
+ _ => unreachable!(),
+ }
+ }
+
+ /// Retrieves the span of the given `DefId` if `DefId` is in the local crate.
+ #[inline]
+ pub fn opt_span(&self, def_id: DefId) -> Option<Span> {
+ def_id.as_local().map(|def_id| self.source_span[def_id])
+ }
+
+ /// Checks if an expression refers to a function marked with
+ /// `#[rustc_legacy_const_generics]` and returns the argument index list
+ /// from the attribute.
+ pub fn legacy_const_generic_args(&mut self, expr: &Expr) -> Option<Vec<usize>> {
+ if let ExprKind::Path(None, path) = &expr.kind {
+ // Don't perform legacy const generics rewriting if the path already
+ // has generic arguments.
+ if path.segments.last().unwrap().args.is_some() {
+ return None;
+ }
+
+ let partial_res = self.partial_res_map.get(&expr.id)?;
+ if partial_res.unresolved_segments() != 0 {
+ return None;
+ }
+
+ if let Res::Def(def::DefKind::Fn, def_id) = partial_res.base_res() {
+ // We only support cross-crate argument rewriting. Uses
+ // within the same crate should be updated to use the new
+ // const generics style.
+ if def_id.is_local() {
+ return None;
+ }
+
+ if let Some(v) = self.legacy_const_generic_args.get(&def_id) {
+ return v.clone();
+ }
+
+ let attr = self
+ .cstore()
+ .item_attrs_untracked(def_id, self.session)
+ .find(|a| a.has_name(sym::rustc_legacy_const_generics))?;
+ let mut ret = Vec::new();
+ for meta in attr.meta_item_list()? {
+ match meta.literal()?.kind {
+ LitKind::Int(a, _) => ret.push(a as usize),
+ _ => panic!("invalid arg index"),
+ }
+ }
+ // Cache the lookup to avoid parsing attributes for an iterm multiple times.
+ self.legacy_const_generic_args.insert(def_id, Some(ret.clone()));
+ return Some(ret);
+ }
+ }
+ None
+ }
+
+ fn resolve_main(&mut self) {
+ let module = self.graph_root;
+ let ident = Ident::with_dummy_span(sym::main);
+ let parent_scope = &ParentScope::module(module, self);
+
+ let Ok(name_binding) = self.maybe_resolve_ident_in_module(
+ ModuleOrUniformRoot::Module(module),
+ ident,
+ ValueNS,
+ parent_scope,
+ ) else {
+ return;
+ };
+
+ let res = name_binding.res();
+ let is_import = name_binding.is_import();
+ let span = name_binding.span;
+ if let Res::Def(DefKind::Fn, _) = res {
+ self.record_use(ident, name_binding, false);
+ }
+ self.main_def = Some(MainDefinition { res, is_import, span });
+ }
+}
+
+fn names_to_string(names: &[Symbol]) -> String {
+ let mut result = String::new();
+ for (i, name) in names.iter().filter(|name| **name != kw::PathRoot).enumerate() {
+ if i > 0 {
+ result.push_str("::");
+ }
+ if Ident::with_dummy_span(*name).is_raw_guess() {
+ result.push_str("r#");
+ }
+ result.push_str(name.as_str());
+ }
+ result
+}
+
+fn path_names_to_string(path: &Path) -> String {
+ names_to_string(&path.segments.iter().map(|seg| seg.ident.name).collect::<Vec<_>>())
+}
+
+/// A somewhat inefficient routine to obtain the name of a module.
+fn module_to_string(module: Module<'_>) -> Option<String> {
+ let mut names = Vec::new();
+
+ fn collect_mod(names: &mut Vec<Symbol>, module: Module<'_>) {
+ if let ModuleKind::Def(.., name) = module.kind {
+ if let Some(parent) = module.parent {
+ names.push(name);
+ collect_mod(names, parent);
+ }
+ } else {
+ names.push(Symbol::intern("<opaque>"));
+ collect_mod(names, module.parent.unwrap());
+ }
+ }
+ collect_mod(&mut names, module);
+
+ if names.is_empty() {
+ return None;
+ }
+ names.reverse();
+ Some(names_to_string(&names))
+}
+
+#[derive(Copy, Clone, Debug)]
+struct Finalize {
+ /// Node ID for linting.
+ node_id: NodeId,
+ /// Span of the whole path or some its characteristic fragment.
+ /// E.g. span of `b` in `foo::{a, b, c}`, or full span for regular paths.
+ path_span: Span,
+ /// Span of the path start, suitable for prepending something to to it.
+ /// E.g. span of `foo` in `foo::{a, b, c}`, or full span for regular paths.
+ root_span: Span,
+ /// Whether to report privacy errors or silently return "no resolution" for them,
+ /// similarly to speculative resolution.
+ report_private: bool,
+}
+
+impl Finalize {
+ fn new(node_id: NodeId, path_span: Span) -> Finalize {
+ Finalize::with_root_span(node_id, path_span, path_span)
+ }
+
+ fn with_root_span(node_id: NodeId, path_span: Span, root_span: Span) -> Finalize {
+ Finalize { node_id, path_span, root_span, report_private: true }
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ late::lifetimes::provide(providers);
+}
diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs
new file mode 100644
index 000000000..070fb9c72
--- /dev/null
+++ b/compiler/rustc_resolve/src/macros.rs
@@ -0,0 +1,921 @@
+//! A bunch of methods and structures more or less related to resolving macros and
+//! interface provided by `Resolver` to macro expander.
+
+use crate::imports::ImportResolver;
+use crate::Namespace::*;
+use crate::{BuiltinMacroState, Determinacy};
+use crate::{DeriveData, Finalize, ParentScope, ResolutionError, Resolver, ScopeSet};
+use crate::{ModuleKind, ModuleOrUniformRoot, NameBinding, PathResult, Segment};
+use rustc_ast::{self as ast, Inline, ItemKind, ModKind, NodeId};
+use rustc_ast_pretty::pprust;
+use rustc_attr::StabilityLevel;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::intern::Interned;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::struct_span_err;
+use rustc_expand::base::{Annotatable, DeriveResolutions, Indeterminate, ResolverExpand};
+use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind};
+use rustc_expand::compile_declarative_macro;
+use rustc_expand::expand::{AstFragment, Invocation, InvocationKind, SupportsMacroExpansion};
+use rustc_hir::def::{self, DefKind, NonMacroAttrKind};
+use rustc_hir::def_id::{CrateNum, LocalDefId};
+use rustc_middle::middle::stability;
+use rustc_middle::ty::RegisteredTools;
+use rustc_session::lint::builtin::{LEGACY_DERIVE_HELPERS, SOFT_UNSTABLE};
+use rustc_session::lint::builtin::{UNUSED_MACROS, UNUSED_MACRO_RULES};
+use rustc_session::lint::BuiltinLintDiagnostics;
+use rustc_session::parse::feature_err;
+use rustc_session::Session;
+use rustc_span::edition::Edition;
+use rustc_span::hygiene::{self, ExpnData, ExpnKind, LocalExpnId};
+use rustc_span::hygiene::{AstPass, MacroKind};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use std::cell::Cell;
+use std::mem;
+
+type Res = def::Res<NodeId>;
+
+/// Binding produced by a `macro_rules` item.
+/// Not modularized, can shadow previous `macro_rules` bindings, etc.
+#[derive(Debug)]
+pub struct MacroRulesBinding<'a> {
+ pub(crate) binding: &'a NameBinding<'a>,
+ /// `macro_rules` scope into which the `macro_rules` item was planted.
+ pub(crate) parent_macro_rules_scope: MacroRulesScopeRef<'a>,
+ pub(crate) ident: Ident,
+}
+
+/// The scope introduced by a `macro_rules!` macro.
+/// This starts at the macro's definition and ends at the end of the macro's parent
+/// module (named or unnamed), or even further if it escapes with `#[macro_use]`.
+/// Some macro invocations need to introduce `macro_rules` scopes too because they
+/// can potentially expand into macro definitions.
+#[derive(Copy, Clone, Debug)]
+pub enum MacroRulesScope<'a> {
+ /// Empty "root" scope at the crate start containing no names.
+ Empty,
+ /// The scope introduced by a `macro_rules!` macro definition.
+ Binding(&'a MacroRulesBinding<'a>),
+ /// The scope introduced by a macro invocation that can potentially
+ /// create a `macro_rules!` macro definition.
+ Invocation(LocalExpnId),
+}
+
+/// `macro_rules!` scopes are always kept by reference and inside a cell.
+/// The reason is that we update scopes with value `MacroRulesScope::Invocation(invoc_id)`
+/// in-place after `invoc_id` gets expanded.
+/// This helps to avoid uncontrollable growth of `macro_rules!` scope chains,
+/// which usually grow linearly with the number of macro invocations
+/// in a module (including derives) and hurt performance.
+pub(crate) type MacroRulesScopeRef<'a> = Interned<'a, Cell<MacroRulesScope<'a>>>;
+
+/// Macro namespace is separated into two sub-namespaces, one for bang macros and
+/// one for attribute-like macros (attributes, derives).
+/// We ignore resolutions from one sub-namespace when searching names in scope for another.
+pub(crate) fn sub_namespace_match(
+ candidate: Option<MacroKind>,
+ requirement: Option<MacroKind>,
+) -> bool {
+ #[derive(PartialEq)]
+ enum SubNS {
+ Bang,
+ AttrLike,
+ }
+ let sub_ns = |kind| match kind {
+ MacroKind::Bang => SubNS::Bang,
+ MacroKind::Attr | MacroKind::Derive => SubNS::AttrLike,
+ };
+ let candidate = candidate.map(sub_ns);
+ let requirement = requirement.map(sub_ns);
+ // "No specific sub-namespace" means "matches anything" for both requirements and candidates.
+ candidate.is_none() || requirement.is_none() || candidate == requirement
+}
+
+// We don't want to format a path using pretty-printing,
+// `format!("{}", path)`, because that tries to insert
+// line-breaks and is slow.
+fn fast_print_path(path: &ast::Path) -> Symbol {
+ if path.segments.len() == 1 {
+ path.segments[0].ident.name
+ } else {
+ let mut path_str = String::with_capacity(64);
+ for (i, segment) in path.segments.iter().enumerate() {
+ if i != 0 {
+ path_str.push_str("::");
+ }
+ if segment.ident.name != kw::PathRoot {
+ path_str.push_str(segment.ident.as_str())
+ }
+ }
+ Symbol::intern(&path_str)
+ }
+}
+
+/// The code common between processing `#![register_tool]` and `#![register_attr]`.
+fn registered_idents(
+ sess: &Session,
+ attrs: &[ast::Attribute],
+ attr_name: Symbol,
+ descr: &str,
+) -> FxHashSet<Ident> {
+ let mut registered = FxHashSet::default();
+ for attr in sess.filter_by_name(attrs, attr_name) {
+ for nested_meta in attr.meta_item_list().unwrap_or_default() {
+ match nested_meta.ident() {
+ Some(ident) => {
+ if let Some(old_ident) = registered.replace(ident) {
+ let msg = format!("{} `{}` was already registered", descr, ident);
+ sess.struct_span_err(ident.span, &msg)
+ .span_label(old_ident.span, "already registered here")
+ .emit();
+ }
+ }
+ None => {
+ let msg = format!("`{}` only accepts identifiers", attr_name);
+ let span = nested_meta.span();
+ sess.struct_span_err(span, &msg).span_label(span, "not an identifier").emit();
+ }
+ }
+ }
+ }
+ registered
+}
+
+pub(crate) fn registered_attrs_and_tools(
+ sess: &Session,
+ attrs: &[ast::Attribute],
+) -> (FxHashSet<Ident>, FxHashSet<Ident>) {
+ let registered_attrs = registered_idents(sess, attrs, sym::register_attr, "attribute");
+ let mut registered_tools = registered_idents(sess, attrs, sym::register_tool, "tool");
+ // We implicitly add `rustfmt` and `clippy` to known tools,
+ // but it's not an error to register them explicitly.
+ let predefined_tools = [sym::clippy, sym::rustfmt];
+ registered_tools.extend(predefined_tools.iter().cloned().map(Ident::with_dummy_span));
+ (registered_attrs, registered_tools)
+}
+
+// Some feature gates for inner attributes are reported as lints for backward compatibility.
+fn soft_custom_inner_attributes_gate(path: &ast::Path, invoc: &Invocation) -> bool {
+ match &path.segments[..] {
+ // `#![test]`
+ [seg] if seg.ident.name == sym::test => return true,
+ // `#![rustfmt::skip]` on out-of-line modules
+ [seg1, seg2] if seg1.ident.name == sym::rustfmt && seg2.ident.name == sym::skip => {
+ if let InvocationKind::Attr { item, .. } = &invoc.kind {
+ if let Annotatable::Item(item) = item {
+ if let ItemKind::Mod(_, ModKind::Loaded(_, Inline::No, _)) = item.kind {
+ return true;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ false
+}
+
+impl<'a> ResolverExpand for Resolver<'a> {
+ fn next_node_id(&mut self) -> NodeId {
+ self.next_node_id()
+ }
+
+ fn invocation_parent(&self, id: LocalExpnId) -> LocalDefId {
+ self.invocation_parents[&id].0
+ }
+
+ fn resolve_dollar_crates(&mut self) {
+ hygiene::update_dollar_crate_names(|ctxt| {
+ let ident = Ident::new(kw::DollarCrate, DUMMY_SP.with_ctxt(ctxt));
+ match self.resolve_crate_root(ident).kind {
+ ModuleKind::Def(.., name) if name != kw::Empty => name,
+ _ => kw::Crate,
+ }
+ });
+ }
+
+ fn visit_ast_fragment_with_placeholders(
+ &mut self,
+ expansion: LocalExpnId,
+ fragment: &AstFragment,
+ ) {
+ // Integrate the new AST fragment into all the definition and module structures.
+ // We are inside the `expansion` now, but other parent scope components are still the same.
+ let parent_scope = ParentScope { expansion, ..self.invocation_parent_scopes[&expansion] };
+ let output_macro_rules_scope = self.build_reduced_graph(fragment, parent_scope);
+ self.output_macro_rules_scopes.insert(expansion, output_macro_rules_scope);
+
+ parent_scope.module.unexpanded_invocations.borrow_mut().remove(&expansion);
+ }
+
+ fn register_builtin_macro(&mut self, name: Symbol, ext: SyntaxExtensionKind) {
+ if self.builtin_macros.insert(name, BuiltinMacroState::NotYetSeen(ext)).is_some() {
+ self.session
+ .diagnostic()
+ .bug(&format!("built-in macro `{}` was already registered", name));
+ }
+ }
+
+ // Create a new Expansion with a definition site of the provided module, or
+ // a fake empty `#[no_implicit_prelude]` module if no module is provided.
+ fn expansion_for_ast_pass(
+ &mut self,
+ call_site: Span,
+ pass: AstPass,
+ features: &[Symbol],
+ parent_module_id: Option<NodeId>,
+ ) -> LocalExpnId {
+ let parent_module =
+ parent_module_id.map(|module_id| self.local_def_id(module_id).to_def_id());
+ let expn_id = LocalExpnId::fresh(
+ ExpnData::allow_unstable(
+ ExpnKind::AstPass(pass),
+ call_site,
+ self.session.edition(),
+ features.into(),
+ None,
+ parent_module,
+ ),
+ self.create_stable_hashing_context(),
+ );
+
+ let parent_scope =
+ parent_module.map_or(self.empty_module, |def_id| self.expect_module(def_id));
+ self.ast_transform_scopes.insert(expn_id, parent_scope);
+
+ expn_id
+ }
+
+ fn resolve_imports(&mut self) {
+ ImportResolver { r: self }.resolve_imports()
+ }
+
+ fn resolve_macro_invocation(
+ &mut self,
+ invoc: &Invocation,
+ eager_expansion_root: LocalExpnId,
+ force: bool,
+ ) -> Result<Lrc<SyntaxExtension>, Indeterminate> {
+ let invoc_id = invoc.expansion_data.id;
+ let parent_scope = match self.invocation_parent_scopes.get(&invoc_id) {
+ Some(parent_scope) => *parent_scope,
+ None => {
+ // If there's no entry in the table, then we are resolving an eagerly expanded
+ // macro, which should inherit its parent scope from its eager expansion root -
+ // the macro that requested this eager expansion.
+ let parent_scope = *self
+ .invocation_parent_scopes
+ .get(&eager_expansion_root)
+ .expect("non-eager expansion without a parent scope");
+ self.invocation_parent_scopes.insert(invoc_id, parent_scope);
+ parent_scope
+ }
+ };
+
+ let (path, kind, inner_attr, derives) = match invoc.kind {
+ InvocationKind::Attr { ref attr, ref derives, .. } => (
+ &attr.get_normal_item().path,
+ MacroKind::Attr,
+ attr.style == ast::AttrStyle::Inner,
+ self.arenas.alloc_ast_paths(derives),
+ ),
+ InvocationKind::Bang { ref mac, .. } => (&mac.path, MacroKind::Bang, false, &[][..]),
+ InvocationKind::Derive { ref path, .. } => (path, MacroKind::Derive, false, &[][..]),
+ };
+
+ // Derives are not included when `invocations` are collected, so we have to add them here.
+ let parent_scope = &ParentScope { derives, ..parent_scope };
+ let supports_macro_expansion = invoc.fragment_kind.supports_macro_expansion();
+ let node_id = invoc.expansion_data.lint_node_id;
+ let (ext, res) = self.smart_resolve_macro_path(
+ path,
+ kind,
+ supports_macro_expansion,
+ inner_attr,
+ parent_scope,
+ node_id,
+ force,
+ soft_custom_inner_attributes_gate(path, invoc),
+ )?;
+
+ let span = invoc.span();
+ let def_id = res.opt_def_id();
+ invoc_id.set_expn_data(
+ ext.expn_data(
+ parent_scope.expansion,
+ span,
+ fast_print_path(path),
+ def_id,
+ def_id.map(|def_id| self.macro_def_scope(def_id).nearest_parent_mod()),
+ ),
+ self.create_stable_hashing_context(),
+ );
+
+ Ok(ext)
+ }
+
+ fn record_macro_rule_usage(&mut self, id: NodeId, rule_i: usize) {
+ let did = self.local_def_id(id);
+ self.unused_macro_rules.remove(&(did, rule_i));
+ }
+
+ fn check_unused_macros(&mut self) {
+ for (_, &(node_id, ident)) in self.unused_macros.iter() {
+ self.lint_buffer.buffer_lint(
+ UNUSED_MACROS,
+ node_id,
+ ident.span,
+ &format!("unused macro definition: `{}`", ident.name),
+ );
+ }
+ for (&(def_id, arm_i), &(ident, rule_span)) in self.unused_macro_rules.iter() {
+ if self.unused_macros.contains_key(&def_id) {
+ // We already lint the entire macro as unused
+ continue;
+ }
+ let node_id = self.def_id_to_node_id[def_id];
+ self.lint_buffer.buffer_lint(
+ UNUSED_MACRO_RULES,
+ node_id,
+ rule_span,
+ &format!(
+ "{} rule of macro `{}` is never used",
+ crate::diagnostics::ordinalize(arm_i + 1),
+ ident.name
+ ),
+ );
+ }
+ }
+
+ fn has_derive_copy(&self, expn_id: LocalExpnId) -> bool {
+ self.containers_deriving_copy.contains(&expn_id)
+ }
+
+ fn resolve_derives(
+ &mut self,
+ expn_id: LocalExpnId,
+ force: bool,
+ derive_paths: &dyn Fn() -> DeriveResolutions,
+ ) -> Result<(), Indeterminate> {
+ // Block expansion of the container until we resolve all derives in it.
+ // This is required for two reasons:
+ // - Derive helper attributes are in scope for the item to which the `#[derive]`
+ // is applied, so they have to be produced by the container's expansion rather
+ // than by individual derives.
+ // - Derives in the container need to know whether one of them is a built-in `Copy`.
+ // Temporarily take the data to avoid borrow checker conflicts.
+ let mut derive_data = mem::take(&mut self.derive_data);
+ let entry = derive_data.entry(expn_id).or_insert_with(|| DeriveData {
+ resolutions: derive_paths(),
+ helper_attrs: Vec::new(),
+ has_derive_copy: false,
+ });
+ let parent_scope = self.invocation_parent_scopes[&expn_id];
+ for (i, (path, _, opt_ext)) in entry.resolutions.iter_mut().enumerate() {
+ if opt_ext.is_none() {
+ *opt_ext = Some(
+ match self.resolve_macro_path(
+ &path,
+ Some(MacroKind::Derive),
+ &parent_scope,
+ true,
+ force,
+ ) {
+ Ok((Some(ext), _)) => {
+ if !ext.helper_attrs.is_empty() {
+ let last_seg = path.segments.last().unwrap();
+ let span = last_seg.ident.span.normalize_to_macros_2_0();
+ entry.helper_attrs.extend(
+ ext.helper_attrs
+ .iter()
+ .map(|name| (i, Ident::new(*name, span))),
+ );
+ }
+ entry.has_derive_copy |= ext.builtin_name == Some(sym::Copy);
+ ext
+ }
+ Ok(_) | Err(Determinacy::Determined) => self.dummy_ext(MacroKind::Derive),
+ Err(Determinacy::Undetermined) => {
+ assert!(self.derive_data.is_empty());
+ self.derive_data = derive_data;
+ return Err(Indeterminate);
+ }
+ },
+ );
+ }
+ }
+ // Sort helpers in a stable way independent from the derive resolution order.
+ entry.helper_attrs.sort_by_key(|(i, _)| *i);
+ self.helper_attrs
+ .insert(expn_id, entry.helper_attrs.iter().map(|(_, ident)| *ident).collect());
+ // Mark this derive as having `Copy` either if it has `Copy` itself or if its parent derive
+ // has `Copy`, to support cases like `#[derive(Clone, Copy)] #[derive(Debug)]`.
+ if entry.has_derive_copy || self.has_derive_copy(parent_scope.expansion) {
+ self.containers_deriving_copy.insert(expn_id);
+ }
+ assert!(self.derive_data.is_empty());
+ self.derive_data = derive_data;
+ Ok(())
+ }
+
+ fn take_derive_resolutions(&mut self, expn_id: LocalExpnId) -> Option<DeriveResolutions> {
+ self.derive_data.remove(&expn_id).map(|data| data.resolutions)
+ }
+
+ // The function that implements the resolution logic of `#[cfg_accessible(path)]`.
+ // Returns true if the path can certainly be resolved in one of three namespaces,
+ // returns false if the path certainly cannot be resolved in any of the three namespaces.
+ // Returns `Indeterminate` if we cannot give a certain answer yet.
+ fn cfg_accessible(
+ &mut self,
+ expn_id: LocalExpnId,
+ path: &ast::Path,
+ ) -> Result<bool, Indeterminate> {
+ let span = path.span;
+ let path = &Segment::from_path(path);
+ let parent_scope = self.invocation_parent_scopes[&expn_id];
+
+ let mut indeterminate = false;
+ for ns in [TypeNS, ValueNS, MacroNS].iter().copied() {
+ match self.maybe_resolve_path(path, Some(ns), &parent_scope) {
+ PathResult::Module(ModuleOrUniformRoot::Module(_)) => return Ok(true),
+ PathResult::NonModule(partial_res) if partial_res.unresolved_segments() == 0 => {
+ return Ok(true);
+ }
+ PathResult::NonModule(..) |
+ // HACK(Urgau): This shouldn't be necessary
+ PathResult::Failed { is_error_from_last_segment: false, .. } => {
+ self.session
+ .struct_span_err(span, "not sure whether the path is accessible or not")
+ .note("the type may have associated items, but we are currently not checking them")
+ .emit();
+
+ // If we get a partially resolved NonModule in one namespace, we should get the
+ // same result in any other namespaces, so we can return early.
+ return Ok(false);
+ }
+ PathResult::Indeterminate => indeterminate = true,
+ // We can only be sure that a path doesn't exist after having tested all the
+ // posibilities, only at that time we can return false.
+ PathResult::Failed { .. } => {}
+ PathResult::Module(_) => panic!("unexpected path resolution"),
+ }
+ }
+
+ if indeterminate {
+ return Err(Indeterminate);
+ }
+
+ Ok(false)
+ }
+
+ fn get_proc_macro_quoted_span(&self, krate: CrateNum, id: usize) -> Span {
+ self.crate_loader.cstore().get_proc_macro_quoted_span_untracked(krate, id, self.session)
+ }
+
+ fn declare_proc_macro(&mut self, id: NodeId) {
+ self.proc_macros.push(id)
+ }
+
+ fn registered_tools(&self) -> &RegisteredTools {
+ &self.registered_tools
+ }
+}
+
+impl<'a> Resolver<'a> {
+ /// Resolve macro path with error reporting and recovery.
+ /// Uses dummy syntax extensions for unresolved macros or macros with unexpected resolutions
+ /// for better error recovery.
+ fn smart_resolve_macro_path(
+ &mut self,
+ path: &ast::Path,
+ kind: MacroKind,
+ supports_macro_expansion: SupportsMacroExpansion,
+ inner_attr: bool,
+ parent_scope: &ParentScope<'a>,
+ node_id: NodeId,
+ force: bool,
+ soft_custom_inner_attributes_gate: bool,
+ ) -> Result<(Lrc<SyntaxExtension>, Res), Indeterminate> {
+ let (ext, res) = match self.resolve_macro_path(path, Some(kind), parent_scope, true, force)
+ {
+ Ok((Some(ext), res)) => (ext, res),
+ Ok((None, res)) => (self.dummy_ext(kind), res),
+ Err(Determinacy::Determined) => (self.dummy_ext(kind), Res::Err),
+ Err(Determinacy::Undetermined) => return Err(Indeterminate),
+ };
+
+ // Report errors for the resolved macro.
+ for segment in &path.segments {
+ if let Some(args) = &segment.args {
+ self.session.span_err(args.span(), "generic arguments in macro path");
+ }
+ if kind == MacroKind::Attr && segment.ident.as_str().starts_with("rustc") {
+ self.session.span_err(
+ segment.ident.span,
+ "attributes starting with `rustc` are reserved for use by the `rustc` compiler",
+ );
+ }
+ }
+
+ match res {
+ Res::Def(DefKind::Macro(_), def_id) => {
+ if let Some(def_id) = def_id.as_local() {
+ self.unused_macros.remove(&def_id);
+ if self.proc_macro_stubs.contains(&def_id) {
+ self.session.span_err(
+ path.span,
+ "can't use a procedural macro from the same crate that defines it",
+ );
+ }
+ }
+ }
+ Res::NonMacroAttr(..) | Res::Err => {}
+ _ => panic!("expected `DefKind::Macro` or `Res::NonMacroAttr`"),
+ };
+
+ self.check_stability_and_deprecation(&ext, path, node_id);
+
+ let unexpected_res = if ext.macro_kind() != kind {
+ Some((kind.article(), kind.descr_expected()))
+ } else if matches!(res, Res::Def(..)) {
+ match supports_macro_expansion {
+ SupportsMacroExpansion::No => Some(("a", "non-macro attribute")),
+ SupportsMacroExpansion::Yes { supports_inner_attrs } => {
+ if inner_attr && !supports_inner_attrs {
+ Some(("a", "non-macro inner attribute"))
+ } else {
+ None
+ }
+ }
+ }
+ } else {
+ None
+ };
+ if let Some((article, expected)) = unexpected_res {
+ let path_str = pprust::path_to_string(path);
+ let msg = format!("expected {}, found {} `{}`", expected, res.descr(), path_str);
+ self.session
+ .struct_span_err(path.span, &msg)
+ .span_label(path.span, format!("not {} {}", article, expected))
+ .emit();
+ return Ok((self.dummy_ext(kind), Res::Err));
+ }
+
+ // We are trying to avoid reporting this error if other related errors were reported.
+ if res != Res::Err
+ && inner_attr
+ && !self.session.features_untracked().custom_inner_attributes
+ {
+ let msg = match res {
+ Res::Def(..) => "inner macro attributes are unstable",
+ Res::NonMacroAttr(..) => "custom inner attributes are unstable",
+ _ => unreachable!(),
+ };
+ if soft_custom_inner_attributes_gate {
+ self.session.parse_sess.buffer_lint(SOFT_UNSTABLE, path.span, node_id, msg);
+ } else {
+ feature_err(&self.session.parse_sess, sym::custom_inner_attributes, path.span, msg)
+ .emit();
+ }
+ }
+
+ Ok((ext, res))
+ }
+
+ pub fn resolve_macro_path(
+ &mut self,
+ path: &ast::Path,
+ kind: Option<MacroKind>,
+ parent_scope: &ParentScope<'a>,
+ trace: bool,
+ force: bool,
+ ) -> Result<(Option<Lrc<SyntaxExtension>>, Res), Determinacy> {
+ let path_span = path.span;
+ let mut path = Segment::from_path(path);
+
+ // Possibly apply the macro helper hack
+ if kind == Some(MacroKind::Bang)
+ && path.len() == 1
+ && path[0].ident.span.ctxt().outer_expn_data().local_inner_macros
+ {
+ let root = Ident::new(kw::DollarCrate, path[0].ident.span);
+ path.insert(0, Segment::from_ident(root));
+ }
+
+ let res = if path.len() > 1 {
+ let res = match self.maybe_resolve_path(&path, Some(MacroNS), parent_scope) {
+ PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
+ Ok(path_res.base_res())
+ }
+ PathResult::Indeterminate if !force => return Err(Determinacy::Undetermined),
+ PathResult::NonModule(..)
+ | PathResult::Indeterminate
+ | PathResult::Failed { .. } => Err(Determinacy::Determined),
+ PathResult::Module(..) => unreachable!(),
+ };
+
+ if trace {
+ let kind = kind.expect("macro kind must be specified if tracing is enabled");
+ self.multi_segment_macro_resolutions.push((
+ path,
+ path_span,
+ kind,
+ *parent_scope,
+ res.ok(),
+ ));
+ }
+
+ self.prohibit_imported_non_macro_attrs(None, res.ok(), path_span);
+ res
+ } else {
+ let scope_set = kind.map_or(ScopeSet::All(MacroNS, false), ScopeSet::Macro);
+ let binding = self.early_resolve_ident_in_lexical_scope(
+ path[0].ident,
+ scope_set,
+ parent_scope,
+ None,
+ force,
+ None,
+ );
+ if let Err(Determinacy::Undetermined) = binding {
+ return Err(Determinacy::Undetermined);
+ }
+
+ if trace {
+ let kind = kind.expect("macro kind must be specified if tracing is enabled");
+ self.single_segment_macro_resolutions.push((
+ path[0].ident,
+ kind,
+ *parent_scope,
+ binding.ok(),
+ ));
+ }
+
+ let res = binding.map(|binding| binding.res());
+ self.prohibit_imported_non_macro_attrs(binding.ok(), res.ok(), path_span);
+ res
+ };
+
+ res.map(|res| (self.get_macro(res).map(|macro_data| macro_data.ext), res))
+ }
+
+ pub(crate) fn finalize_macro_resolutions(&mut self) {
+ let check_consistency = |this: &mut Self,
+ path: &[Segment],
+ span,
+ kind: MacroKind,
+ initial_res: Option<Res>,
+ res: Res| {
+ if let Some(initial_res) = initial_res {
+ if res != initial_res {
+ // Make sure compilation does not succeed if preferred macro resolution
+ // has changed after the macro had been expanded. In theory all such
+ // situations should be reported as errors, so this is a bug.
+ this.session.delay_span_bug(span, "inconsistent resolution for a macro");
+ }
+ } else {
+ // It's possible that the macro was unresolved (indeterminate) and silently
+ // expanded into a dummy fragment for recovery during expansion.
+ // Now, post-expansion, the resolution may succeed, but we can't change the
+ // past and need to report an error.
+ // However, non-speculative `resolve_path` can successfully return private items
+ // even if speculative `resolve_path` returned nothing previously, so we skip this
+ // less informative error if the privacy error is reported elsewhere.
+ if this.privacy_errors.is_empty() {
+ let msg = format!(
+ "cannot determine resolution for the {} `{}`",
+ kind.descr(),
+ Segment::names_to_string(path)
+ );
+ let msg_note = "import resolution is stuck, try simplifying macro imports";
+ this.session.struct_span_err(span, &msg).note(msg_note).emit();
+ }
+ }
+ };
+
+ let macro_resolutions = mem::take(&mut self.multi_segment_macro_resolutions);
+ for (mut path, path_span, kind, parent_scope, initial_res) in macro_resolutions {
+ // FIXME: Path resolution will ICE if segment IDs present.
+ for seg in &mut path {
+ seg.id = None;
+ }
+ match self.resolve_path(
+ &path,
+ Some(MacroNS),
+ &parent_scope,
+ Some(Finalize::new(ast::CRATE_NODE_ID, path_span)),
+ None,
+ ) {
+ PathResult::NonModule(path_res) if path_res.unresolved_segments() == 0 => {
+ let res = path_res.base_res();
+ check_consistency(self, &path, path_span, kind, initial_res, res);
+ }
+ path_res @ PathResult::NonModule(..) | path_res @ PathResult::Failed { .. } => {
+ let (span, label) = if let PathResult::Failed { span, label, .. } = path_res {
+ (span, label)
+ } else {
+ (
+ path_span,
+ format!(
+ "partially resolved path in {} {}",
+ kind.article(),
+ kind.descr()
+ ),
+ )
+ };
+ self.report_error(
+ span,
+ ResolutionError::FailedToResolve { label, suggestion: None },
+ );
+ }
+ PathResult::Module(..) | PathResult::Indeterminate => unreachable!(),
+ }
+ }
+
+ let macro_resolutions = mem::take(&mut self.single_segment_macro_resolutions);
+ for (ident, kind, parent_scope, initial_binding) in macro_resolutions {
+ match self.early_resolve_ident_in_lexical_scope(
+ ident,
+ ScopeSet::Macro(kind),
+ &parent_scope,
+ Some(Finalize::new(ast::CRATE_NODE_ID, ident.span)),
+ true,
+ None,
+ ) {
+ Ok(binding) => {
+ let initial_res = initial_binding.map(|initial_binding| {
+ self.record_use(ident, initial_binding, false);
+ initial_binding.res()
+ });
+ let res = binding.res();
+ let seg = Segment::from_ident(ident);
+ check_consistency(self, &[seg], ident.span, kind, initial_res, res);
+ if res == Res::NonMacroAttr(NonMacroAttrKind::DeriveHelperCompat) {
+ let node_id = self
+ .invocation_parents
+ .get(&parent_scope.expansion)
+ .map_or(ast::CRATE_NODE_ID, |id| self.def_id_to_node_id[id.0]);
+ self.lint_buffer.buffer_lint_with_diagnostic(
+ LEGACY_DERIVE_HELPERS,
+ node_id,
+ ident.span,
+ "derive helper attribute is used before it is introduced",
+ BuiltinLintDiagnostics::LegacyDeriveHelpers(binding.span),
+ );
+ }
+ }
+ Err(..) => {
+ let expected = kind.descr_expected();
+ let msg = format!("cannot find {} `{}` in this scope", expected, ident);
+ let mut err = self.session.struct_span_err(ident.span, &msg);
+ self.unresolved_macro_suggestions(&mut err, kind, &parent_scope, ident);
+ err.emit();
+ }
+ }
+ }
+
+ let builtin_attrs = mem::take(&mut self.builtin_attrs);
+ for (ident, parent_scope) in builtin_attrs {
+ let _ = self.early_resolve_ident_in_lexical_scope(
+ ident,
+ ScopeSet::Macro(MacroKind::Attr),
+ &parent_scope,
+ Some(Finalize::new(ast::CRATE_NODE_ID, ident.span)),
+ true,
+ None,
+ );
+ }
+ }
+
+ fn check_stability_and_deprecation(
+ &mut self,
+ ext: &SyntaxExtension,
+ path: &ast::Path,
+ node_id: NodeId,
+ ) {
+ let span = path.span;
+ if let Some(stability) = &ext.stability {
+ if let StabilityLevel::Unstable { reason, issue, is_soft, implied_by } = stability.level
+ {
+ let feature = stability.feature;
+
+ let is_allowed = |feature| {
+ self.active_features.contains(&feature) || span.allows_unstable(feature)
+ };
+ let allowed_by_implication =
+ implied_by.map(|feature| is_allowed(feature)).unwrap_or(false);
+ if !is_allowed(feature) && !allowed_by_implication {
+ let lint_buffer = &mut self.lint_buffer;
+ let soft_handler =
+ |lint, span, msg: &_| lint_buffer.buffer_lint(lint, node_id, span, msg);
+ stability::report_unstable(
+ self.session,
+ feature,
+ reason.to_opt_reason(),
+ issue,
+ None,
+ is_soft,
+ span,
+ soft_handler,
+ );
+ }
+ }
+ }
+ if let Some(depr) = &ext.deprecation {
+ let path = pprust::path_to_string(&path);
+ let (message, lint) = stability::deprecation_message_and_lint(depr, "macro", &path);
+ stability::early_report_deprecation(
+ &mut self.lint_buffer,
+ &message,
+ depr.suggestion,
+ lint,
+ span,
+ node_id,
+ );
+ }
+ }
+
+ fn prohibit_imported_non_macro_attrs(
+ &self,
+ binding: Option<&'a NameBinding<'a>>,
+ res: Option<Res>,
+ span: Span,
+ ) {
+ if let Some(Res::NonMacroAttr(kind)) = res {
+ if kind != NonMacroAttrKind::Tool && binding.map_or(true, |b| b.is_import()) {
+ let msg =
+ format!("cannot use {} {} through an import", kind.article(), kind.descr());
+ let mut err = self.session.struct_span_err(span, &msg);
+ if let Some(binding) = binding {
+ err.span_note(binding.span, &format!("the {} imported here", kind.descr()));
+ }
+ err.emit();
+ }
+ }
+ }
+
+ pub(crate) fn check_reserved_macro_name(&mut self, ident: Ident, res: Res) {
+ // Reserve some names that are not quite covered by the general check
+ // performed on `Resolver::builtin_attrs`.
+ if ident.name == sym::cfg || ident.name == sym::cfg_attr {
+ let macro_kind = self.get_macro(res).map(|macro_data| macro_data.ext.macro_kind());
+ if macro_kind.is_some() && sub_namespace_match(macro_kind, Some(MacroKind::Attr)) {
+ self.session.span_err(
+ ident.span,
+ &format!("name `{}` is reserved in attribute namespace", ident),
+ );
+ }
+ }
+ }
+
+ /// Compile the macro into a `SyntaxExtension` and its rule spans.
+ ///
+ /// Possibly replace its expander to a pre-defined one for built-in macros.
+ pub(crate) fn compile_macro(
+ &mut self,
+ item: &ast::Item,
+ edition: Edition,
+ ) -> (SyntaxExtension, Vec<(usize, Span)>) {
+ let (mut result, mut rule_spans) = compile_declarative_macro(
+ &self.session,
+ self.session.features_untracked(),
+ item,
+ edition,
+ );
+
+ if let Some(builtin_name) = result.builtin_name {
+ // The macro was marked with `#[rustc_builtin_macro]`.
+ if let Some(builtin_macro) = self.builtin_macros.get_mut(&builtin_name) {
+ // The macro is a built-in, replace its expander function
+ // while still taking everything else from the source code.
+ // If we already loaded this builtin macro, give a better error message than 'no such builtin macro'.
+ match mem::replace(builtin_macro, BuiltinMacroState::AlreadySeen(item.span)) {
+ BuiltinMacroState::NotYetSeen(ext) => {
+ result.kind = ext;
+ rule_spans = Vec::new();
+ if item.id != ast::DUMMY_NODE_ID {
+ self.builtin_macro_kinds
+ .insert(self.local_def_id(item.id), result.macro_kind());
+ }
+ }
+ BuiltinMacroState::AlreadySeen(span) => {
+ struct_span_err!(
+ self.session,
+ item.span,
+ E0773,
+ "attempted to define built-in macro more than once"
+ )
+ .span_note(span, "previously defined here")
+ .emit();
+ }
+ }
+ } else {
+ let msg = format!("cannot find a built-in macro with name `{}`", item.ident);
+ self.session.span_err(item.span, &msg);
+ }
+ }
+
+ (result, rule_spans)
+ }
+}
diff --git a/compiler/rustc_save_analysis/Cargo.toml b/compiler/rustc_save_analysis/Cargo.toml
new file mode 100644
index 000000000..15a89d82f
--- /dev/null
+++ b/compiler/rustc_save_analysis/Cargo.toml
@@ -0,0 +1,19 @@
+[package]
+name = "rustc_save_analysis"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+tracing = "0.1"
+rustc_middle = { path = "../rustc_middle" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_ast_pretty = { path = "../rustc_ast_pretty" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_hir_pretty = { path = "../rustc_hir_pretty" }
+rustc_lexer = { path = "../rustc_lexer" }
+serde_json = "1"
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rls-data = "0.19"
+rls-span = "0.5"
diff --git a/compiler/rustc_save_analysis/src/dump_visitor.rs b/compiler/rustc_save_analysis/src/dump_visitor.rs
new file mode 100644
index 000000000..e2e0e1f5b
--- /dev/null
+++ b/compiler/rustc_save_analysis/src/dump_visitor.rs
@@ -0,0 +1,1463 @@
+//! Write the output of rustc's analysis to an implementor of Dump.
+//!
+//! Dumping the analysis is implemented by walking the AST and getting a bunch of
+//! info out from all over the place. We use `DefId`s to identify objects. The
+//! tricky part is getting syntactic (span, source text) and semantic (reference
+//! `DefId`s) information for parts of expressions which the compiler has discarded.
+//! E.g., in a path `foo::bar::baz`, the compiler only keeps a span for the whole
+//! path and a reference to `baz`, but we want spans and references for all three
+//! idents.
+//!
+//! SpanUtils is used to manipulate spans. In particular, to extract sub-spans
+//! from spans (e.g., the span for `bar` from the above example path).
+//! DumpVisitor walks the AST and processes it, and Dumper is used for
+//! recording the output.
+
+use rustc_ast as ast;
+use rustc_ast::walk_list;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind as HirDefKind, Res};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir_pretty::{bounds_to_string, fn_to_string, generic_params_to_string, ty_to_string};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::span_bug;
+use rustc_middle::ty::{self, DefIdTree, TyCtxt};
+use rustc_session::config::Input;
+use rustc_span::symbol::Ident;
+use rustc_span::*;
+
+use std::env;
+use std::path::Path;
+
+use crate::dumper::{Access, Dumper};
+use crate::sig;
+use crate::span_utils::SpanUtils;
+use crate::{
+ escape, generated_code, id_from_def_id, id_from_hir_id, lower_attributes, PathCollector,
+ SaveContext,
+};
+
+use rls_data::{
+ CompilationOptions, CratePreludeData, Def, DefKind, GlobalCrateId, Import, ImportKind, Ref,
+ RefKind, Relation, RelationKind, SpanData,
+};
+
+use tracing::{debug, error};
+
+#[rustfmt::skip] // https://github.com/rust-lang/rustfmt/issues/5213
+macro_rules! down_cast_data {
+ ($id:ident, $kind:ident, $sp:expr) => {
+ let super::Data::$kind($id) = $id else {
+ span_bug!($sp, "unexpected data kind: {:?}", $id);
+ };
+ };
+}
+
+macro_rules! access_from {
+ ($save_ctxt:expr, $id:expr) => {
+ Access {
+ public: $save_ctxt.tcx.visibility($id).is_public(),
+ reachable: $save_ctxt.access_levels.is_reachable($id),
+ }
+ };
+}
+
+pub struct DumpVisitor<'tcx> {
+ pub save_ctxt: SaveContext<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ dumper: Dumper,
+
+ span: SpanUtils<'tcx>,
+ // Set of macro definition (callee) spans, and the set
+ // of macro use (callsite) spans. We store these to ensure
+ // we only write one macro def per unique macro definition, and
+ // one macro use per unique callsite span.
+ // mac_defs: FxHashSet<Span>,
+ // macro_calls: FxHashSet<Span>,
+}
+
+impl<'tcx> DumpVisitor<'tcx> {
+ pub fn new(save_ctxt: SaveContext<'tcx>) -> DumpVisitor<'tcx> {
+ let span_utils = SpanUtils::new(&save_ctxt.tcx.sess);
+ let dumper = Dumper::new(save_ctxt.config.clone());
+ DumpVisitor { tcx: save_ctxt.tcx, save_ctxt, dumper, span: span_utils }
+ }
+
+ pub fn analysis(&self) -> &rls_data::Analysis {
+ self.dumper.analysis()
+ }
+
+ fn nest_typeck_results<F>(&mut self, item_def_id: LocalDefId, f: F)
+ where
+ F: FnOnce(&mut Self),
+ {
+ let typeck_results = if self.tcx.has_typeck_results(item_def_id) {
+ Some(self.tcx.typeck(item_def_id))
+ } else {
+ None
+ };
+
+ let old_maybe_typeck_results = self.save_ctxt.maybe_typeck_results;
+ self.save_ctxt.maybe_typeck_results = typeck_results;
+ f(self);
+ self.save_ctxt.maybe_typeck_results = old_maybe_typeck_results;
+ }
+
+ fn span_from_span(&self, span: Span) -> SpanData {
+ self.save_ctxt.span_from_span(span)
+ }
+
+ fn lookup_def_id(&self, ref_id: hir::HirId) -> Option<DefId> {
+ self.save_ctxt.lookup_def_id(ref_id)
+ }
+
+ pub fn dump_crate_info(&mut self, name: &str) {
+ let source_file = self.tcx.sess.local_crate_source_file.as_ref();
+ let crate_root = source_file.map(|source_file| {
+ let source_file = Path::new(source_file);
+ match source_file.file_name() {
+ Some(_) => source_file.parent().unwrap().display(),
+ None => source_file.display(),
+ }
+ .to_string()
+ });
+
+ let data = CratePreludeData {
+ crate_id: GlobalCrateId {
+ name: name.into(),
+ disambiguator: (self.tcx.sess.local_stable_crate_id().to_u64(), 0),
+ },
+ crate_root: crate_root.unwrap_or_else(|| "<no source>".to_owned()),
+ external_crates: self.save_ctxt.get_external_crates(),
+ span: self.span_from_span(self.tcx.def_span(CRATE_DEF_ID)),
+ };
+
+ self.dumper.crate_prelude(data);
+ }
+
+ pub fn dump_compilation_options(&mut self, input: &Input, crate_name: &str) {
+ // Apply possible `remap-path-prefix` remapping to the input source file
+ // (and don't include remapping args anymore)
+ let (program, arguments) = {
+ let remap_arg_indices = {
+ let mut indices = FxHashSet::default();
+ // Args are guaranteed to be valid UTF-8 (checked early)
+ for (i, e) in env::args().enumerate() {
+ if e.starts_with("--remap-path-prefix=") {
+ indices.insert(i);
+ } else if e == "--remap-path-prefix" {
+ indices.insert(i);
+ indices.insert(i + 1);
+ }
+ }
+ indices
+ };
+
+ let mut args = env::args()
+ .enumerate()
+ .filter(|(i, _)| !remap_arg_indices.contains(i))
+ .map(|(_, arg)| match input {
+ Input::File(ref path) if path == Path::new(&arg) => {
+ let mapped = &self.tcx.sess.local_crate_source_file;
+ mapped.as_ref().unwrap().to_string_lossy().into()
+ }
+ _ => arg,
+ });
+
+ (args.next().unwrap(), args.collect())
+ };
+
+ let data = CompilationOptions {
+ directory: self.tcx.sess.opts.working_dir.remapped_path_if_available().into(),
+ program,
+ arguments,
+ output: self.save_ctxt.compilation_output(crate_name),
+ };
+
+ self.dumper.compilation_opts(data);
+ }
+
+ fn write_segments(&mut self, segments: impl IntoIterator<Item = &'tcx hir::PathSegment<'tcx>>) {
+ for seg in segments {
+ if let Some(data) = self.save_ctxt.get_path_segment_data(seg) {
+ self.dumper.dump_ref(data);
+ }
+ }
+ }
+
+ fn write_sub_paths(&mut self, path: &'tcx hir::Path<'tcx>) {
+ self.write_segments(path.segments)
+ }
+
+ // As write_sub_paths, but does not process the last ident in the path (assuming it
+ // will be processed elsewhere). See note on write_sub_paths about global.
+ fn write_sub_paths_truncated(&mut self, path: &'tcx hir::Path<'tcx>) {
+ if let [segments @ .., _] = path.segments {
+ self.write_segments(segments)
+ }
+ }
+
+ fn process_formals(&mut self, formals: &'tcx [hir::Param<'tcx>], qualname: &str) {
+ for arg in formals {
+ self.visit_pat(&arg.pat);
+ let mut collector = PathCollector::new(self.tcx);
+ collector.visit_pat(&arg.pat);
+
+ for (hir_id, ident, ..) in collector.collected_idents {
+ let typ = match self.save_ctxt.typeck_results().node_type_opt(hir_id) {
+ Some(s) => s.to_string(),
+ None => continue,
+ };
+ if !self.span.filter_generated(ident.span) {
+ let id = id_from_hir_id(hir_id, &self.save_ctxt);
+ let span = self.span_from_span(ident.span);
+
+ self.dumper.dump_def(
+ &Access { public: false, reachable: false },
+ Def {
+ kind: DefKind::Local,
+ id,
+ span,
+ name: ident.to_string(),
+ qualname: format!("{}::{}", qualname, ident),
+ value: typ,
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: String::new(),
+ sig: None,
+ attributes: vec![],
+ },
+ );
+ }
+ }
+ }
+ }
+
+ fn process_method(
+ &mut self,
+ sig: &'tcx hir::FnSig<'tcx>,
+ body: Option<hir::BodyId>,
+ def_id: LocalDefId,
+ ident: Ident,
+ generics: &'tcx hir::Generics<'tcx>,
+ span: Span,
+ ) {
+ debug!("process_method: {:?}:{}", def_id, ident);
+
+ let map = self.tcx.hir();
+ let hir_id = map.local_def_id_to_hir_id(def_id);
+ self.nest_typeck_results(def_id, |v| {
+ if let Some(mut method_data) = v.save_ctxt.get_method_data(hir_id, ident, span) {
+ if let Some(body) = body {
+ v.process_formals(map.body(body).params, &method_data.qualname);
+ }
+ v.process_generic_params(&generics, &method_data.qualname, hir_id);
+
+ method_data.value =
+ fn_to_string(sig.decl, sig.header, Some(ident.name), generics, &[], None);
+ method_data.sig = sig::method_signature(hir_id, ident, generics, sig, &v.save_ctxt);
+
+ v.dumper.dump_def(&access_from!(v.save_ctxt, def_id), method_data);
+ }
+
+ // walk arg and return types
+ for arg in sig.decl.inputs {
+ v.visit_ty(arg);
+ }
+
+ if let hir::FnRetTy::Return(ref ret_ty) = sig.decl.output {
+ v.visit_ty(ret_ty)
+ }
+
+ // walk the fn body
+ if let Some(body) = body {
+ v.visit_expr(&map.body(body).value);
+ }
+ });
+ }
+
+ fn process_struct_field_def(
+ &mut self,
+ field: &'tcx hir::FieldDef<'tcx>,
+ parent_id: hir::HirId,
+ ) {
+ let field_data = self.save_ctxt.get_field_data(field, parent_id);
+ if let Some(field_data) = field_data {
+ self.dumper.dump_def(
+ &access_from!(self.save_ctxt, self.tcx.hir().local_def_id(field.hir_id)),
+ field_data,
+ );
+ }
+ }
+
+ // Dump generic params bindings, then visit_generics
+ fn process_generic_params(
+ &mut self,
+ generics: &'tcx hir::Generics<'tcx>,
+ prefix: &str,
+ id: hir::HirId,
+ ) {
+ for param in generics.params {
+ match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => {}
+ hir::GenericParamKind::Type { .. } => {
+ let param_ss = param.name.ident().span;
+ let name = escape(self.span.snippet(param_ss));
+ // Append $id to name to make sure each one is unique.
+ let qualname = format!("{}::{}${}", prefix, name, id);
+ if !self.span.filter_generated(param_ss) {
+ let id = id_from_hir_id(param.hir_id, &self.save_ctxt);
+ let span = self.span_from_span(param_ss);
+
+ self.dumper.dump_def(
+ &Access { public: false, reachable: false },
+ Def {
+ kind: DefKind::Type,
+ id,
+ span,
+ name,
+ qualname,
+ value: String::new(),
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: String::new(),
+ sig: None,
+ attributes: vec![],
+ },
+ );
+ }
+ }
+ hir::GenericParamKind::Const { .. } => {}
+ }
+ }
+
+ self.visit_generics(generics)
+ }
+
+ fn process_fn(
+ &mut self,
+ item: &'tcx hir::Item<'tcx>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ _header: &'tcx hir::FnHeader,
+ ty_params: &'tcx hir::Generics<'tcx>,
+ body: hir::BodyId,
+ ) {
+ let map = self.tcx.hir();
+ self.nest_typeck_results(item.def_id, |v| {
+ let body = map.body(body);
+ if let Some(fn_data) = v.save_ctxt.get_item_data(item) {
+ down_cast_data!(fn_data, DefData, item.span);
+ v.process_formals(body.params, &fn_data.qualname);
+ v.process_generic_params(ty_params, &fn_data.qualname, item.hir_id());
+
+ v.dumper.dump_def(&access_from!(v.save_ctxt, item.def_id), fn_data);
+ }
+
+ for arg in decl.inputs {
+ v.visit_ty(arg)
+ }
+
+ if let hir::FnRetTy::Return(ref ret_ty) = decl.output {
+ v.visit_ty(ret_ty)
+ }
+
+ v.visit_expr(&body.value);
+ });
+ }
+
+ fn process_static_or_const_item(
+ &mut self,
+ item: &'tcx hir::Item<'tcx>,
+ typ: &'tcx hir::Ty<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) {
+ self.nest_typeck_results(item.def_id, |v| {
+ if let Some(var_data) = v.save_ctxt.get_item_data(item) {
+ down_cast_data!(var_data, DefData, item.span);
+ v.dumper.dump_def(&access_from!(v.save_ctxt, item.def_id), var_data);
+ }
+ v.visit_ty(&typ);
+ v.visit_expr(expr);
+ });
+ }
+
+ fn process_assoc_const(
+ &mut self,
+ def_id: LocalDefId,
+ ident: Ident,
+ typ: &'tcx hir::Ty<'tcx>,
+ expr: Option<&'tcx hir::Expr<'tcx>>,
+ parent_id: DefId,
+ attrs: &'tcx [ast::Attribute],
+ ) {
+ let qualname = format!("::{}", self.tcx.def_path_str(def_id.to_def_id()));
+
+ if !self.span.filter_generated(ident.span) {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(def_id);
+ let sig = sig::assoc_const_signature(hir_id, ident.name, typ, expr, &self.save_ctxt);
+ let span = self.span_from_span(ident.span);
+
+ self.dumper.dump_def(
+ &access_from!(self.save_ctxt, def_id),
+ Def {
+ kind: DefKind::Const,
+ id: id_from_hir_id(hir_id, &self.save_ctxt),
+ span,
+ name: ident.name.to_string(),
+ qualname,
+ value: ty_to_string(&typ),
+ parent: Some(id_from_def_id(parent_id)),
+ children: vec![],
+ decl_id: None,
+ docs: self.save_ctxt.docs_for_attrs(attrs),
+ sig,
+ attributes: lower_attributes(attrs.to_owned(), &self.save_ctxt),
+ },
+ );
+ }
+
+ // walk type and init value
+ self.nest_typeck_results(def_id, |v| {
+ v.visit_ty(typ);
+ if let Some(expr) = expr {
+ v.visit_expr(expr);
+ }
+ });
+ }
+
+ // FIXME tuple structs should generate tuple-specific data.
+ fn process_struct(
+ &mut self,
+ item: &'tcx hir::Item<'tcx>,
+ def: &'tcx hir::VariantData<'tcx>,
+ ty_params: &'tcx hir::Generics<'tcx>,
+ ) {
+ debug!("process_struct {:?} {:?}", item, item.span);
+ let name = item.ident.to_string();
+ let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
+
+ let kind = match item.kind {
+ hir::ItemKind::Struct(_, _) => DefKind::Struct,
+ hir::ItemKind::Union(_, _) => DefKind::Union,
+ _ => unreachable!(),
+ };
+
+ let (value, fields) = match item.kind {
+ hir::ItemKind::Struct(hir::VariantData::Struct(ref fields, ..), ..)
+ | hir::ItemKind::Union(hir::VariantData::Struct(ref fields, ..), ..) => {
+ let include_priv_fields = !self.save_ctxt.config.pub_only;
+ let fields_str = fields
+ .iter()
+ .filter_map(|f| {
+ if include_priv_fields {
+ return Some(f.ident.to_string());
+ }
+ let def_id = self.save_ctxt.tcx.hir().local_def_id(f.hir_id);
+ if self.save_ctxt.tcx.visibility(def_id).is_public() {
+ Some(f.ident.to_string())
+ } else {
+ None
+ }
+ })
+ .collect::<Vec<_>>()
+ .join(", ");
+ let value = format!("{} {{ {} }}", name, fields_str);
+ (value, fields.iter().map(|f| id_from_hir_id(f.hir_id, &self.save_ctxt)).collect())
+ }
+ _ => (String::new(), vec![]),
+ };
+
+ if !self.span.filter_generated(item.ident.span) {
+ let span = self.span_from_span(item.ident.span);
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ self.dumper.dump_def(
+ &access_from!(self.save_ctxt, item.def_id),
+ Def {
+ kind,
+ id: id_from_def_id(item.def_id.to_def_id()),
+ span,
+ name,
+ qualname: qualname.clone(),
+ value,
+ parent: None,
+ children: fields,
+ decl_id: None,
+ docs: self.save_ctxt.docs_for_attrs(attrs),
+ sig: sig::item_signature(item, &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
+ },
+ );
+ }
+
+ self.nest_typeck_results(item.def_id, |v| {
+ for field in def.fields() {
+ v.process_struct_field_def(field, item.hir_id());
+ v.visit_ty(&field.ty);
+ }
+
+ v.process_generic_params(ty_params, &qualname, item.hir_id());
+ });
+ }
+
+ fn process_enum(
+ &mut self,
+ item: &'tcx hir::Item<'tcx>,
+ enum_definition: &'tcx hir::EnumDef<'tcx>,
+ ty_params: &'tcx hir::Generics<'tcx>,
+ ) {
+ let enum_data = self.save_ctxt.get_item_data(item);
+ let Some(enum_data) = enum_data else {
+ return;
+ };
+ down_cast_data!(enum_data, DefData, item.span);
+
+ let access = access_from!(self.save_ctxt, item.def_id);
+
+ for variant in enum_definition.variants {
+ let name = variant.ident.name.to_string();
+ let qualname = format!("{}::{}", enum_data.qualname, name);
+ let name_span = variant.ident.span;
+
+ match variant.data {
+ hir::VariantData::Struct(ref fields, ..) => {
+ let fields_str =
+ fields.iter().map(|f| f.ident.to_string()).collect::<Vec<_>>().join(", ");
+ let value = format!("{}::{} {{ {} }}", enum_data.name, name, fields_str);
+ if !self.span.filter_generated(name_span) {
+ let span = self.span_from_span(name_span);
+ let id = id_from_hir_id(variant.id, &self.save_ctxt);
+ let parent = Some(id_from_def_id(item.def_id.to_def_id()));
+ let attrs = self.tcx.hir().attrs(variant.id);
+
+ self.dumper.dump_def(
+ &access,
+ Def {
+ kind: DefKind::StructVariant,
+ id,
+ span,
+ name,
+ qualname,
+ value,
+ parent,
+ children: vec![],
+ decl_id: None,
+ docs: self.save_ctxt.docs_for_attrs(attrs),
+ sig: sig::variant_signature(variant, &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
+ },
+ );
+ }
+ }
+ ref v => {
+ let mut value = format!("{}::{}", enum_data.name, name);
+ if let hir::VariantData::Tuple(fields, _) = v {
+ value.push('(');
+ value.push_str(
+ &fields
+ .iter()
+ .map(|f| ty_to_string(&f.ty))
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+ value.push(')');
+ }
+ if !self.span.filter_generated(name_span) {
+ let span = self.span_from_span(name_span);
+ let id = id_from_hir_id(variant.id, &self.save_ctxt);
+ let parent = Some(id_from_def_id(item.def_id.to_def_id()));
+ let attrs = self.tcx.hir().attrs(variant.id);
+
+ self.dumper.dump_def(
+ &access,
+ Def {
+ kind: DefKind::TupleVariant,
+ id,
+ span,
+ name,
+ qualname,
+ value,
+ parent,
+ children: vec![],
+ decl_id: None,
+ docs: self.save_ctxt.docs_for_attrs(attrs),
+ sig: sig::variant_signature(variant, &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
+ },
+ );
+ }
+ }
+ }
+
+ for field in variant.data.fields() {
+ self.process_struct_field_def(field, variant.id);
+ self.visit_ty(field.ty);
+ }
+ }
+ self.process_generic_params(ty_params, &enum_data.qualname, item.hir_id());
+ self.dumper.dump_def(&access, enum_data);
+ }
+
+ fn process_impl(&mut self, item: &'tcx hir::Item<'tcx>, impl_: &'tcx hir::Impl<'tcx>) {
+ if let Some(impl_data) = self.save_ctxt.get_item_data(item) {
+ if !self.span.filter_generated(item.span) {
+ if let super::Data::RelationData(rel, imp) = impl_data {
+ self.dumper.dump_relation(rel);
+ self.dumper.dump_impl(imp);
+ } else {
+ span_bug!(item.span, "unexpected data kind: {:?}", impl_data);
+ }
+ }
+ }
+
+ let map = self.tcx.hir();
+ self.nest_typeck_results(item.def_id, |v| {
+ v.visit_ty(&impl_.self_ty);
+ if let Some(trait_ref) = &impl_.of_trait {
+ v.process_path(trait_ref.hir_ref_id, &hir::QPath::Resolved(None, &trait_ref.path));
+ }
+ v.process_generic_params(&impl_.generics, "", item.hir_id());
+ for impl_item in impl_.items {
+ v.process_impl_item(map.impl_item(impl_item.id), item.def_id.to_def_id());
+ }
+ });
+ }
+
+ fn process_trait(
+ &mut self,
+ item: &'tcx hir::Item<'tcx>,
+ generics: &'tcx hir::Generics<'tcx>,
+ trait_refs: hir::GenericBounds<'tcx>,
+ methods: &'tcx [hir::TraitItemRef],
+ ) {
+ let name = item.ident.to_string();
+ let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
+ let mut val = name.clone();
+ if !generics.params.is_empty() {
+ val.push_str(&generic_params_to_string(generics.params));
+ }
+ if !trait_refs.is_empty() {
+ val.push_str(": ");
+ val.push_str(&bounds_to_string(trait_refs));
+ }
+ if !self.span.filter_generated(item.ident.span) {
+ let id = id_from_def_id(item.def_id.to_def_id());
+ let span = self.span_from_span(item.ident.span);
+ let children =
+ methods.iter().map(|i| id_from_def_id(i.id.def_id.to_def_id())).collect();
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ self.dumper.dump_def(
+ &access_from!(self.save_ctxt, item.def_id),
+ Def {
+ kind: DefKind::Trait,
+ id,
+ span,
+ name,
+ qualname: qualname.clone(),
+ value: val,
+ parent: None,
+ children,
+ decl_id: None,
+ docs: self.save_ctxt.docs_for_attrs(attrs),
+ sig: sig::item_signature(item, &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
+ },
+ );
+ }
+
+ // supertraits
+ for super_bound in trait_refs.iter() {
+ let (def_id, sub_span) = match *super_bound {
+ hir::GenericBound::Trait(ref trait_ref, _) => (
+ self.lookup_def_id(trait_ref.trait_ref.hir_ref_id),
+ trait_ref.trait_ref.path.segments.last().unwrap().ident.span,
+ ),
+ hir::GenericBound::LangItemTrait(lang_item, span, _, _) => {
+ (Some(self.tcx.require_lang_item(lang_item, Some(span))), span)
+ }
+ hir::GenericBound::Outlives(..) => continue,
+ };
+
+ if let Some(id) = def_id {
+ if !self.span.filter_generated(sub_span) {
+ let span = self.span_from_span(sub_span);
+ self.dumper.dump_ref(Ref {
+ kind: RefKind::Type,
+ span: span.clone(),
+ ref_id: id_from_def_id(id),
+ });
+
+ self.dumper.dump_relation(Relation {
+ kind: RelationKind::SuperTrait,
+ span,
+ from: id_from_def_id(id),
+ to: id_from_def_id(item.def_id.to_def_id()),
+ });
+ }
+ }
+ }
+
+ // walk generics and methods
+ self.process_generic_params(generics, &qualname, item.hir_id());
+ for method in methods {
+ let map = self.tcx.hir();
+ self.process_trait_item(map.trait_item(method.id), item.def_id.to_def_id())
+ }
+ }
+
+ // `item` is the module in question, represented as an( item.
+ fn process_mod(&mut self, item: &'tcx hir::Item<'tcx>) {
+ if let Some(mod_data) = self.save_ctxt.get_item_data(item) {
+ down_cast_data!(mod_data, DefData, item.span);
+ self.dumper.dump_def(&access_from!(self.save_ctxt, item.def_id), mod_data);
+ }
+ }
+
+ fn dump_path_ref(&mut self, id: hir::HirId, path: &hir::QPath<'tcx>) {
+ let path_data = self.save_ctxt.get_path_data(id, path);
+ if let Some(path_data) = path_data {
+ self.dumper.dump_ref(path_data);
+ }
+ }
+
+ fn dump_path_segment_ref(&mut self, id: hir::HirId, segment: &hir::PathSegment<'tcx>) {
+ let segment_data = self.save_ctxt.get_path_segment_data_with_id(segment, id);
+ if let Some(segment_data) = segment_data {
+ self.dumper.dump_ref(segment_data);
+ }
+ }
+
+ fn process_path(&mut self, id: hir::HirId, path: &hir::QPath<'tcx>) {
+ if self.span.filter_generated(path.span()) {
+ return;
+ }
+ self.dump_path_ref(id, path);
+
+ // Type arguments
+ let segments = match path {
+ hir::QPath::Resolved(ty, path) => {
+ if let Some(ty) = ty {
+ self.visit_ty(ty);
+ }
+ path.segments
+ }
+ hir::QPath::TypeRelative(ty, segment) => {
+ self.visit_ty(ty);
+ std::slice::from_ref(*segment)
+ }
+ hir::QPath::LangItem(..) => return,
+ };
+ for seg in segments {
+ if let Some(ref generic_args) = seg.args {
+ for arg in generic_args.args {
+ if let hir::GenericArg::Type(ref ty) = arg {
+ self.visit_ty(ty);
+ }
+ }
+ }
+ }
+
+ if let hir::QPath::Resolved(_, path) = path {
+ self.write_sub_paths_truncated(path);
+ }
+ }
+
+ fn process_struct_lit(
+ &mut self,
+ ex: &'tcx hir::Expr<'tcx>,
+ path: &'tcx hir::QPath<'tcx>,
+ fields: &'tcx [hir::ExprField<'tcx>],
+ variant: &'tcx ty::VariantDef,
+ rest: Option<&'tcx hir::Expr<'tcx>>,
+ ) {
+ if let Some(_ex_res_data) = self.save_ctxt.get_expr_data(ex) {
+ if let hir::QPath::Resolved(_, path) = path {
+ self.write_sub_paths_truncated(path);
+ }
+ // For MyEnum::MyVariant, get_expr_data gives us MyEnum, not MyVariant.
+ // For recording the span's ref id, we want MyVariant.
+ if !generated_code(ex.span) {
+ let sub_span = path.last_segment_span();
+ let span = self.save_ctxt.span_from_span(sub_span);
+ let reff =
+ Ref { kind: RefKind::Type, span, ref_id: id_from_def_id(variant.def_id) };
+ self.dumper.dump_ref(reff);
+ }
+
+ for field in fields {
+ if let Some(field_data) = self.save_ctxt.get_field_ref_data(field, variant) {
+ self.dumper.dump_ref(field_data);
+ }
+
+ self.visit_expr(&field.expr)
+ }
+ }
+
+ if let Some(base) = rest {
+ self.visit_expr(&base);
+ }
+ }
+
+ fn process_method_call(
+ &mut self,
+ ex: &'tcx hir::Expr<'tcx>,
+ seg: &'tcx hir::PathSegment<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) {
+ debug!("process_method_call {:?} {:?}", ex, ex.span);
+ if let Some(mcd) = self.save_ctxt.get_expr_data(ex) {
+ down_cast_data!(mcd, RefData, ex.span);
+ if !generated_code(ex.span) {
+ self.dumper.dump_ref(mcd);
+ }
+ }
+
+ // Explicit types in the turbo-fish.
+ if let Some(generic_args) = seg.args {
+ for arg in generic_args.args {
+ if let hir::GenericArg::Type(ty) = arg {
+ self.visit_ty(&ty)
+ };
+ }
+ }
+
+ // walk receiver and args
+ walk_list!(self, visit_expr, args);
+ }
+
+ fn process_pat(&mut self, p: &'tcx hir::Pat<'tcx>) {
+ match p.kind {
+ hir::PatKind::Struct(ref _path, fields, _) => {
+ // FIXME do something with _path?
+ let adt = match self.save_ctxt.typeck_results().node_type_opt(p.hir_id) {
+ Some(ty) if ty.ty_adt_def().is_some() => ty.ty_adt_def().unwrap(),
+ _ => {
+ intravisit::walk_pat(self, p);
+ return;
+ }
+ };
+ let variant = adt.variant_of_res(self.save_ctxt.get_path_res(p.hir_id));
+
+ for field in fields {
+ if let Some(index) = self.tcx.find_field_index(field.ident, variant) {
+ if !self.span.filter_generated(field.ident.span) {
+ let span = self.span_from_span(field.ident.span);
+ self.dumper.dump_ref(Ref {
+ kind: RefKind::Variable,
+ span,
+ ref_id: id_from_def_id(variant.fields[index].did),
+ });
+ }
+ }
+ self.visit_pat(&field.pat);
+ }
+ }
+ _ => intravisit::walk_pat(self, p),
+ }
+ }
+
+ fn process_var_decl(&mut self, pat: &'tcx hir::Pat<'tcx>) {
+ // The pattern could declare multiple new vars,
+ // we must walk the pattern and collect them all.
+ let mut collector = PathCollector::new(self.tcx);
+ collector.visit_pat(&pat);
+ self.visit_pat(&pat);
+
+ // Process collected paths.
+ for (id, ident, _) in collector.collected_idents {
+ let res = self.save_ctxt.get_path_res(id);
+ match res {
+ Res::Local(hir_id) => {
+ let typ = self
+ .save_ctxt
+ .typeck_results()
+ .node_type_opt(hir_id)
+ .map(|t| t.to_string())
+ .unwrap_or_default();
+
+ // Rust uses the id of the pattern for var lookups, so we'll use it too.
+ if !self.span.filter_generated(ident.span) {
+ let qualname = format!("{}${}", ident, hir_id);
+ let id = id_from_hir_id(hir_id, &self.save_ctxt);
+ let span = self.span_from_span(ident.span);
+
+ self.dumper.dump_def(
+ &Access { public: false, reachable: false },
+ Def {
+ kind: DefKind::Local,
+ id,
+ span,
+ name: ident.to_string(),
+ qualname,
+ value: typ,
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: String::new(),
+ sig: None,
+ attributes: vec![],
+ },
+ );
+ }
+ }
+ Res::Def(
+ HirDefKind::Ctor(..)
+ | HirDefKind::Const
+ | HirDefKind::AssocConst
+ | HirDefKind::Struct
+ | HirDefKind::Variant
+ | HirDefKind::TyAlias
+ | HirDefKind::AssocTy,
+ _,
+ )
+ | Res::SelfTy { .. } => {
+ self.dump_path_segment_ref(id, &hir::PathSegment::from_ident(ident));
+ }
+ def => {
+ error!("unexpected definition kind when processing collected idents: {:?}", def)
+ }
+ }
+ }
+
+ for (id, ref path) in collector.collected_paths {
+ self.process_path(id, path);
+ }
+ }
+
+ /// Extracts macro use and definition information from the AST node defined
+ /// by the given NodeId, using the expansion information from the node's
+ /// span.
+ ///
+ /// If the span is not macro-generated, do nothing, else use callee and
+ /// callsite spans to record macro definition and use data, using the
+ /// mac_uses and mac_defs sets to prevent multiples.
+ fn process_macro_use(&mut self, _span: Span) {
+ // FIXME if we're not dumping the defs (see below), there is no point
+ // dumping refs either.
+ // let source_span = span.source_callsite();
+ // if !self.macro_calls.insert(source_span) {
+ // return;
+ // }
+
+ // let data = match self.save_ctxt.get_macro_use_data(span) {
+ // None => return,
+ // Some(data) => data,
+ // };
+
+ // self.dumper.macro_use(data);
+
+ // FIXME write the macro def
+ // let mut hasher = DefaultHasher::new();
+ // data.callee_span.hash(&mut hasher);
+ // let hash = hasher.finish();
+ // let qualname = format!("{}::{}", data.name, hash);
+ // Don't write macro definition for imported macros
+ // if !self.mac_defs.contains(&data.callee_span)
+ // && !data.imported {
+ // self.mac_defs.insert(data.callee_span);
+ // if let Some(sub_span) = self.span.span_for_macro_def_name(data.callee_span) {
+ // self.dumper.macro_data(MacroData {
+ // span: sub_span,
+ // name: data.name.clone(),
+ // qualname: qualname.clone(),
+ // // FIXME where do macro docs come from?
+ // docs: String::new(),
+ // }.lower(self.tcx));
+ // }
+ // }
+ }
+
+ fn process_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>, trait_id: DefId) {
+ self.process_macro_use(trait_item.span);
+ match trait_item.kind {
+ hir::TraitItemKind::Const(ref ty, body) => {
+ let body = body.map(|b| &self.tcx.hir().body(b).value);
+ let attrs = self.tcx.hir().attrs(trait_item.hir_id());
+ self.process_assoc_const(
+ trait_item.def_id,
+ trait_item.ident,
+ &ty,
+ body,
+ trait_id,
+ attrs,
+ );
+ }
+ hir::TraitItemKind::Fn(ref sig, ref trait_fn) => {
+ let body =
+ if let hir::TraitFn::Provided(body) = trait_fn { Some(*body) } else { None };
+ self.process_method(
+ sig,
+ body,
+ trait_item.def_id,
+ trait_item.ident,
+ &trait_item.generics,
+ trait_item.span,
+ );
+ }
+ hir::TraitItemKind::Type(ref bounds, ref default_ty) => {
+ // FIXME do something with _bounds (for type refs)
+ let name = trait_item.ident.name.to_string();
+ let qualname =
+ format!("::{}", self.tcx.def_path_str(trait_item.def_id.to_def_id()));
+
+ if !self.span.filter_generated(trait_item.ident.span) {
+ let span = self.span_from_span(trait_item.ident.span);
+ let id = id_from_def_id(trait_item.def_id.to_def_id());
+ let attrs = self.tcx.hir().attrs(trait_item.hir_id());
+
+ self.dumper.dump_def(
+ &Access { public: true, reachable: true },
+ Def {
+ kind: DefKind::Type,
+ id,
+ span,
+ name,
+ qualname,
+ value: self.span.snippet(trait_item.span),
+ parent: Some(id_from_def_id(trait_id)),
+ children: vec![],
+ decl_id: None,
+ docs: self.save_ctxt.docs_for_attrs(attrs),
+ sig: sig::assoc_type_signature(
+ trait_item.hir_id(),
+ trait_item.ident,
+ Some(bounds),
+ default_ty.as_ref().map(|ty| &**ty),
+ &self.save_ctxt,
+ ),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
+ },
+ );
+ }
+
+ if let Some(default_ty) = default_ty {
+ self.visit_ty(default_ty)
+ }
+ }
+ }
+ }
+
+ fn process_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>, impl_id: DefId) {
+ self.process_macro_use(impl_item.span);
+ match impl_item.kind {
+ hir::ImplItemKind::Const(ref ty, body) => {
+ let body = self.tcx.hir().body(body);
+ let attrs = self.tcx.hir().attrs(impl_item.hir_id());
+ self.process_assoc_const(
+ impl_item.def_id,
+ impl_item.ident,
+ &ty,
+ Some(&body.value),
+ impl_id,
+ attrs,
+ );
+ }
+ hir::ImplItemKind::Fn(ref sig, body) => {
+ self.process_method(
+ sig,
+ Some(body),
+ impl_item.def_id,
+ impl_item.ident,
+ &impl_item.generics,
+ impl_item.span,
+ );
+ }
+ hir::ImplItemKind::TyAlias(ref ty) => {
+ // FIXME: uses of the assoc type should ideally point to this
+ // 'def' and the name here should be a ref to the def in the
+ // trait.
+ self.visit_ty(ty)
+ }
+ }
+ }
+
+ pub(crate) fn process_crate(&mut self) {
+ let id = hir::CRATE_HIR_ID;
+ let qualname =
+ format!("::{}", self.tcx.def_path_str(self.tcx.hir().local_def_id(id).to_def_id()));
+
+ let sm = self.tcx.sess.source_map();
+ let krate_mod = self.tcx.hir().root_module();
+ let filename = sm.span_to_filename(krate_mod.spans.inner_span);
+ let data_id = id_from_hir_id(id, &self.save_ctxt);
+ let children =
+ krate_mod.item_ids.iter().map(|i| id_from_def_id(i.def_id.to_def_id())).collect();
+ let span = self.span_from_span(krate_mod.spans.inner_span);
+ let attrs = self.tcx.hir().attrs(id);
+
+ self.dumper.dump_def(
+ &Access { public: true, reachable: true },
+ Def {
+ kind: DefKind::Mod,
+ id: data_id,
+ name: String::new(),
+ qualname,
+ span,
+ value: filename.prefer_remapped().to_string(),
+ children,
+ parent: None,
+ decl_id: None,
+ docs: self.save_ctxt.docs_for_attrs(attrs),
+ sig: None,
+ attributes: lower_attributes(attrs.to_owned(), &self.save_ctxt),
+ },
+ );
+ self.tcx.hir().walk_toplevel_module(self);
+ }
+
+ fn process_bounds(&mut self, bounds: hir::GenericBounds<'tcx>) {
+ for bound in bounds {
+ if let hir::GenericBound::Trait(ref trait_ref, _) = *bound {
+ self.process_path(
+ trait_ref.trait_ref.hir_ref_id,
+ &hir::QPath::Resolved(None, &trait_ref.trait_ref.path),
+ )
+ }
+ }
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for DumpVisitor<'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ self.process_macro_use(item.span);
+ match item.kind {
+ hir::ItemKind::Use(path, hir::UseKind::Single) => {
+ let sub_span = path.segments.last().unwrap().ident.span;
+ if !self.span.filter_generated(sub_span) {
+ let access = access_from!(self.save_ctxt, item.def_id);
+ let ref_id = self.lookup_def_id(item.hir_id()).map(id_from_def_id);
+ let span = self.span_from_span(sub_span);
+ let parent = self.save_ctxt.tcx.local_parent(item.def_id);
+ self.dumper.import(
+ &access,
+ Import {
+ kind: ImportKind::Use,
+ ref_id,
+ span,
+ alias_span: None,
+ name: item.ident.to_string(),
+ value: String::new(),
+ parent: Some(id_from_def_id(parent.to_def_id())),
+ },
+ );
+ self.write_sub_paths_truncated(&path);
+ }
+ }
+ hir::ItemKind::Use(path, hir::UseKind::Glob) => {
+ // Make a comma-separated list of names of imported modules.
+ let names = self.tcx.names_imported_by_glob_use(item.def_id);
+ let names: Vec<_> = names.iter().map(|n| n.to_string()).collect();
+
+ // Otherwise it's a span with wrong macro expansion info, which
+ // we don't want to track anyway, since it's probably macro-internal `use`
+ if let Some(sub_span) = self.span.sub_span_of_star(item.span) {
+ if !self.span.filter_generated(item.span) {
+ let access = access_from!(self.save_ctxt, item.def_id);
+ let span = self.span_from_span(sub_span);
+ let parent = self.save_ctxt.tcx.local_parent(item.def_id);
+ self.dumper.import(
+ &access,
+ Import {
+ kind: ImportKind::GlobUse,
+ ref_id: None,
+ span,
+ alias_span: None,
+ name: "*".to_owned(),
+ value: names.join(", "),
+ parent: Some(id_from_def_id(parent.to_def_id())),
+ },
+ );
+ self.write_sub_paths(&path);
+ }
+ }
+ }
+ hir::ItemKind::ExternCrate(_) => {
+ let name_span = item.ident.span;
+ if !self.span.filter_generated(name_span) {
+ let span = self.span_from_span(name_span);
+ let parent = self.save_ctxt.tcx.local_parent(item.def_id);
+ self.dumper.import(
+ &Access { public: false, reachable: false },
+ Import {
+ kind: ImportKind::ExternCrate,
+ ref_id: None,
+ span,
+ alias_span: None,
+ name: item.ident.to_string(),
+ value: String::new(),
+ parent: Some(id_from_def_id(parent.to_def_id())),
+ },
+ );
+ }
+ }
+ hir::ItemKind::Fn(ref sig, ref ty_params, body) => {
+ self.process_fn(item, sig.decl, &sig.header, ty_params, body)
+ }
+ hir::ItemKind::Static(ref typ, _, body) => {
+ let body = self.tcx.hir().body(body);
+ self.process_static_or_const_item(item, typ, &body.value)
+ }
+ hir::ItemKind::Const(ref typ, body) => {
+ let body = self.tcx.hir().body(body);
+ self.process_static_or_const_item(item, typ, &body.value)
+ }
+ hir::ItemKind::Struct(ref def, ref ty_params)
+ | hir::ItemKind::Union(ref def, ref ty_params) => {
+ self.process_struct(item, def, ty_params)
+ }
+ hir::ItemKind::Enum(ref def, ref ty_params) => self.process_enum(item, def, ty_params),
+ hir::ItemKind::Impl(ref impl_) => self.process_impl(item, impl_),
+ hir::ItemKind::Trait(_, _, ref generics, ref trait_refs, methods) => {
+ self.process_trait(item, generics, trait_refs, methods)
+ }
+ hir::ItemKind::Mod(ref m) => {
+ self.process_mod(item);
+ intravisit::walk_mod(self, m, item.hir_id());
+ }
+ hir::ItemKind::TyAlias(ty, ref generics) => {
+ let qualname = format!("::{}", self.tcx.def_path_str(item.def_id.to_def_id()));
+ let value = ty_to_string(&ty);
+ if !self.span.filter_generated(item.ident.span) {
+ let span = self.span_from_span(item.ident.span);
+ let id = id_from_def_id(item.def_id.to_def_id());
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+
+ self.dumper.dump_def(
+ &access_from!(self.save_ctxt, item.def_id),
+ Def {
+ kind: DefKind::Type,
+ id,
+ span,
+ name: item.ident.to_string(),
+ qualname: qualname.clone(),
+ value,
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: self.save_ctxt.docs_for_attrs(attrs),
+ sig: sig::item_signature(item, &self.save_ctxt),
+ attributes: lower_attributes(attrs.to_vec(), &self.save_ctxt),
+ },
+ );
+ }
+
+ self.visit_ty(ty);
+ self.process_generic_params(generics, &qualname, item.hir_id());
+ }
+ _ => intravisit::walk_item(self, item),
+ }
+ }
+
+ fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
+ for param in generics.params {
+ match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => {}
+ hir::GenericParamKind::Type { ref default, .. } => {
+ if let Some(ref ty) = default {
+ self.visit_ty(ty);
+ }
+ }
+ hir::GenericParamKind::Const { ref ty, ref default } => {
+ self.visit_ty(ty);
+ if let Some(default) = default {
+ self.visit_anon_const(default);
+ }
+ }
+ }
+ }
+ for pred in generics.predicates {
+ if let hir::WherePredicate::BoundPredicate(ref wbp) = *pred {
+ self.process_bounds(wbp.bounds);
+ self.visit_ty(wbp.bounded_ty);
+ }
+ }
+ }
+
+ fn visit_ty(&mut self, t: &'tcx hir::Ty<'tcx>) {
+ self.process_macro_use(t.span);
+ match t.kind {
+ hir::TyKind::Path(ref path) => {
+ if generated_code(t.span) {
+ return;
+ }
+
+ if let Some(id) = self.lookup_def_id(t.hir_id) {
+ let sub_span = path.last_segment_span();
+ let span = self.span_from_span(sub_span);
+ self.dumper.dump_ref(Ref {
+ kind: RefKind::Type,
+ span,
+ ref_id: id_from_def_id(id),
+ });
+ }
+
+ if let hir::QPath::Resolved(_, path) = path {
+ self.write_sub_paths_truncated(path);
+ }
+ intravisit::walk_qpath(self, path, t.hir_id, t.span);
+ }
+ hir::TyKind::Array(ref ty, ref length) => {
+ self.visit_ty(ty);
+ let map = self.tcx.hir();
+ match length {
+ // FIXME(generic_arg_infer): We probably want to
+ // output the inferred type here? :shrug:
+ hir::ArrayLen::Infer(..) => {}
+ hir::ArrayLen::Body(anon_const) => self
+ .nest_typeck_results(self.tcx.hir().local_def_id(anon_const.hir_id), |v| {
+ v.visit_expr(&map.body(anon_const.body).value)
+ }),
+ }
+ }
+ hir::TyKind::OpaqueDef(item_id, _) => {
+ let item = self.tcx.hir().item(item_id);
+ self.nest_typeck_results(item_id.def_id, |v| v.visit_item(item));
+ }
+ _ => intravisit::walk_ty(self, t),
+ }
+ }
+
+ fn visit_expr(&mut self, ex: &'tcx hir::Expr<'tcx>) {
+ debug!("visit_expr {:?}", ex.kind);
+ self.process_macro_use(ex.span);
+ match ex.kind {
+ hir::ExprKind::Struct(ref path, ref fields, ref rest) => {
+ let hir_expr = self.save_ctxt.tcx.hir().expect_expr(ex.hir_id);
+ let adt = match self.save_ctxt.typeck_results().expr_ty_opt(&hir_expr) {
+ Some(ty) if ty.ty_adt_def().is_some() => ty.ty_adt_def().unwrap(),
+ _ => {
+ intravisit::walk_expr(self, ex);
+ return;
+ }
+ };
+ let res = self.save_ctxt.get_path_res(hir_expr.hir_id);
+ self.process_struct_lit(ex, path, fields, adt.variant_of_res(res), *rest)
+ }
+ hir::ExprKind::MethodCall(ref seg, args, _) => self.process_method_call(ex, seg, args),
+ hir::ExprKind::Field(ref sub_ex, _) => {
+ self.visit_expr(&sub_ex);
+
+ if let Some(field_data) = self.save_ctxt.get_expr_data(ex) {
+ down_cast_data!(field_data, RefData, ex.span);
+ if !generated_code(ex.span) {
+ self.dumper.dump_ref(field_data);
+ }
+ }
+ }
+ hir::ExprKind::Closure(&hir::Closure { ref fn_decl, body, .. }) => {
+ let id = format!("${}", ex.hir_id);
+
+ // walk arg and return types
+ for ty in fn_decl.inputs {
+ self.visit_ty(ty);
+ }
+
+ if let hir::FnRetTy::Return(ref ret_ty) = fn_decl.output {
+ self.visit_ty(ret_ty);
+ }
+
+ // walk the body
+ let map = self.tcx.hir();
+ self.nest_typeck_results(self.tcx.hir().local_def_id(ex.hir_id), |v| {
+ let body = map.body(body);
+ v.process_formals(body.params, &id);
+ v.visit_expr(&body.value)
+ });
+ }
+ hir::ExprKind::Repeat(ref expr, ref length) => {
+ self.visit_expr(expr);
+ let map = self.tcx.hir();
+ match length {
+ // FIXME(generic_arg_infer): We probably want to
+ // output the inferred type here? :shrug:
+ hir::ArrayLen::Infer(..) => {}
+ hir::ArrayLen::Body(anon_const) => self
+ .nest_typeck_results(self.tcx.hir().local_def_id(anon_const.hir_id), |v| {
+ v.visit_expr(&map.body(anon_const.body).value)
+ }),
+ }
+ }
+ // In particular, we take this branch for call and path expressions,
+ // where we'll index the idents involved just by continuing to walk.
+ _ => intravisit::walk_expr(self, ex),
+ }
+ }
+
+ fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) {
+ self.process_macro_use(p.span);
+ self.process_pat(p);
+ }
+
+ fn visit_arm(&mut self, arm: &'tcx hir::Arm<'tcx>) {
+ self.process_var_decl(&arm.pat);
+ if let Some(hir::Guard::If(expr)) = &arm.guard {
+ self.visit_expr(expr);
+ }
+ self.visit_expr(&arm.body);
+ }
+
+ fn visit_qpath(&mut self, path: &'tcx hir::QPath<'tcx>, id: hir::HirId, _: Span) {
+ self.process_path(id, path);
+ }
+
+ fn visit_stmt(&mut self, s: &'tcx hir::Stmt<'tcx>) {
+ self.process_macro_use(s.span);
+ intravisit::walk_stmt(self, s)
+ }
+
+ fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
+ self.process_macro_use(l.span);
+ self.process_var_decl(&l.pat);
+
+ // Just walk the initializer, the else branch and type (don't want to walk the pattern again).
+ walk_list!(self, visit_ty, &l.ty);
+ walk_list!(self, visit_expr, &l.init);
+ walk_list!(self, visit_block, l.els);
+ }
+
+ fn visit_foreign_item(&mut self, item: &'tcx hir::ForeignItem<'tcx>) {
+ let access = access_from!(self.save_ctxt, item.def_id);
+
+ match item.kind {
+ hir::ForeignItemKind::Fn(decl, _, ref generics) => {
+ if let Some(fn_data) = self.save_ctxt.get_extern_item_data(item) {
+ down_cast_data!(fn_data, DefData, item.span);
+
+ self.process_generic_params(generics, &fn_data.qualname, item.hir_id());
+ self.dumper.dump_def(&access, fn_data);
+ }
+
+ for ty in decl.inputs {
+ self.visit_ty(ty);
+ }
+
+ if let hir::FnRetTy::Return(ref ret_ty) = decl.output {
+ self.visit_ty(ret_ty);
+ }
+ }
+ hir::ForeignItemKind::Static(ref ty, _) => {
+ if let Some(var_data) = self.save_ctxt.get_extern_item_data(item) {
+ down_cast_data!(var_data, DefData, item.span);
+ self.dumper.dump_def(&access, var_data);
+ }
+
+ self.visit_ty(ty);
+ }
+ hir::ForeignItemKind::Type => {
+ if let Some(var_data) = self.save_ctxt.get_extern_item_data(item) {
+ down_cast_data!(var_data, DefData, item.span);
+ self.dumper.dump_def(&access, var_data);
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_save_analysis/src/dumper.rs b/compiler/rustc_save_analysis/src/dumper.rs
new file mode 100644
index 000000000..5a2628287
--- /dev/null
+++ b/compiler/rustc_save_analysis/src/dumper.rs
@@ -0,0 +1,91 @@
+use rls_data::config::Config;
+use rls_data::{
+ self, Analysis, CompilationOptions, CratePreludeData, Def, DefKind, Impl, Import, MacroRef,
+ Ref, RefKind, Relation,
+};
+use rls_span::{Column, Row};
+
+#[derive(Debug)]
+pub struct Access {
+ pub reachable: bool,
+ pub public: bool,
+}
+
+pub struct Dumper {
+ result: Analysis,
+ config: Config,
+}
+
+impl Dumper {
+ pub fn new(config: Config) -> Dumper {
+ Dumper { config: config.clone(), result: Analysis::new(config) }
+ }
+
+ pub fn analysis(&self) -> &Analysis {
+ &self.result
+ }
+}
+
+impl Dumper {
+ pub fn crate_prelude(&mut self, data: CratePreludeData) {
+ self.result.prelude = Some(data)
+ }
+
+ pub fn compilation_opts(&mut self, data: CompilationOptions) {
+ self.result.compilation = Some(data);
+ }
+
+ pub fn _macro_use(&mut self, data: MacroRef) {
+ if self.config.pub_only || self.config.reachable_only {
+ return;
+ }
+ self.result.macro_refs.push(data);
+ }
+
+ pub fn import(&mut self, access: &Access, import: Import) {
+ if !access.public && self.config.pub_only || !access.reachable && self.config.reachable_only
+ {
+ return;
+ }
+ self.result.imports.push(import);
+ }
+
+ pub fn dump_ref(&mut self, data: Ref) {
+ if self.config.pub_only || self.config.reachable_only {
+ return;
+ }
+ self.result.refs.push(data);
+ }
+
+ pub fn dump_def(&mut self, access: &Access, mut data: Def) {
+ if !access.public && self.config.pub_only || !access.reachable && self.config.reachable_only
+ {
+ return;
+ }
+ if data.kind == DefKind::Mod && data.span.file_name.to_str().unwrap() != data.value {
+ // If the module is an out-of-line definition, then we'll make the
+ // definition the first character in the module's file and turn
+ // the declaration into a reference to it.
+ let rf = Ref { kind: RefKind::Mod, span: data.span, ref_id: data.id };
+ self.result.refs.push(rf);
+ data.span = rls_data::SpanData {
+ file_name: data.value.clone().into(),
+ byte_start: 0,
+ byte_end: 0,
+ line_start: Row::new_one_indexed(1),
+ line_end: Row::new_one_indexed(1),
+ column_start: Column::new_one_indexed(1),
+ column_end: Column::new_one_indexed(1),
+ }
+ }
+ self.result.defs.push(data);
+ }
+
+ pub fn dump_relation(&mut self, data: Relation) {
+ self.result.relations.push(data);
+ }
+
+ pub fn dump_impl(&mut self, data: Impl) {
+ self.result.impls.push(data);
+ }
+}
diff --git a/compiler/rustc_save_analysis/src/lib.rs b/compiler/rustc_save_analysis/src/lib.rs
new file mode 100644
index 000000000..a1a2040bb
--- /dev/null
+++ b/compiler/rustc_save_analysis/src/lib.rs
@@ -0,0 +1,1075 @@
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(if_let_guard)]
+#![feature(let_else)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+mod dump_visitor;
+mod dumper;
+#[macro_use]
+mod span_utils;
+mod sig;
+
+use rustc_ast as ast;
+use rustc_ast::util::comments::beautify_doc_string;
+use rustc_ast_pretty::pprust::attribute_to_string;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind as HirDefKind, Res};
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::Node;
+use rustc_hir_pretty::{enum_def_to_string, fn_to_string, ty_to_string};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::privacy::AccessLevels;
+use rustc_middle::ty::{self, print::with_no_trimmed_paths, DefIdTree, TyCtxt};
+use rustc_middle::{bug, span_bug};
+use rustc_session::config::{CrateType, Input, OutputType};
+use rustc_session::cstore::ExternCrate;
+use rustc_session::output::{filename_for_metadata, out_filename};
+use rustc_span::symbol::Ident;
+use rustc_span::*;
+
+use std::cell::Cell;
+use std::default::Default;
+use std::env;
+use std::fs::File;
+use std::io::BufWriter;
+use std::path::{Path, PathBuf};
+
+use dump_visitor::DumpVisitor;
+use span_utils::SpanUtils;
+
+use rls_data::config::Config;
+use rls_data::{
+ Analysis, Def, DefKind, ExternalCrateData, GlobalCrateId, Impl, ImplKind, MacroRef, Ref,
+ RefKind, Relation, RelationKind, SpanData,
+};
+
+use tracing::{debug, error, info};
+
+pub struct SaveContext<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ maybe_typeck_results: Option<&'tcx ty::TypeckResults<'tcx>>,
+ access_levels: &'tcx AccessLevels,
+ span_utils: SpanUtils<'tcx>,
+ config: Config,
+ impl_counter: Cell<u32>,
+}
+
+#[derive(Debug)]
+pub enum Data {
+ RefData(Ref),
+ DefData(Def),
+ RelationData(Relation, Impl),
+}
+
+impl<'tcx> SaveContext<'tcx> {
+ /// Gets the type-checking results for the current body.
+ /// As this will ICE if called outside bodies, only call when working with
+ /// `Expr` or `Pat` nodes (they are guaranteed to be found only in bodies).
+ #[track_caller]
+ fn typeck_results(&self) -> &'tcx ty::TypeckResults<'tcx> {
+ self.maybe_typeck_results.expect("`SaveContext::typeck_results` called outside of body")
+ }
+
+ fn span_from_span(&self, span: Span) -> SpanData {
+ use rls_span::{Column, Row};
+
+ let sm = self.tcx.sess.source_map();
+ let start = sm.lookup_char_pos(span.lo());
+ let end = sm.lookup_char_pos(span.hi());
+
+ SpanData {
+ file_name: start.file.name.prefer_remapped().to_string().into(),
+ byte_start: span.lo().0,
+ byte_end: span.hi().0,
+ line_start: Row::new_one_indexed(start.line as u32),
+ line_end: Row::new_one_indexed(end.line as u32),
+ column_start: Column::new_one_indexed(start.col.0 as u32 + 1),
+ column_end: Column::new_one_indexed(end.col.0 as u32 + 1),
+ }
+ }
+
+ // Returns path to the compilation output (e.g., libfoo-12345678.rmeta)
+ pub fn compilation_output(&self, crate_name: &str) -> PathBuf {
+ let sess = &self.tcx.sess;
+ // Save-analysis is emitted per whole session, not per each crate type
+ let crate_type = sess.crate_types()[0];
+ let outputs = &*self.tcx.output_filenames(());
+
+ if outputs.outputs.contains_key(&OutputType::Metadata) {
+ filename_for_metadata(sess, crate_name, outputs)
+ } else if outputs.outputs.should_codegen() {
+ out_filename(sess, crate_type, outputs, crate_name)
+ } else {
+ // Otherwise it's only a DepInfo, in which case we return early and
+ // not even reach the analysis stage.
+ unreachable!()
+ }
+ }
+
+ // List external crates used by the current crate.
+ pub fn get_external_crates(&self) -> Vec<ExternalCrateData> {
+ let mut result = Vec::with_capacity(self.tcx.crates(()).len());
+
+ for &n in self.tcx.crates(()).iter() {
+ let Some(&ExternCrate { span, .. }) = self.tcx.extern_crate(n.as_def_id()) else {
+ debug!("skipping crate {}, no data", n);
+ continue;
+ };
+ let lo_loc = self.span_utils.sess.source_map().lookup_char_pos(span.lo());
+ result.push(ExternalCrateData {
+ // FIXME: change file_name field to PathBuf in rls-data
+ // https://github.com/nrc/rls-data/issues/7
+ file_name: self.span_utils.make_filename_string(&lo_loc.file),
+ num: n.as_u32(),
+ id: GlobalCrateId {
+ name: self.tcx.crate_name(n).to_string(),
+ disambiguator: (
+ self.tcx.def_path_hash(n.as_def_id()).stable_crate_id().to_u64(),
+ 0,
+ ),
+ },
+ });
+ }
+
+ result
+ }
+
+ pub fn get_extern_item_data(&self, item: &hir::ForeignItem<'_>) -> Option<Data> {
+ let def_id = item.def_id.to_def_id();
+ let qualname = format!("::{}", self.tcx.def_path_str(def_id));
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ match item.kind {
+ hir::ForeignItemKind::Fn(ref decl, arg_names, ref generics) => {
+ filter!(self.span_utils, item.ident.span);
+
+ Some(Data::DefData(Def {
+ kind: DefKind::ForeignFunction,
+ id: id_from_def_id(def_id),
+ span: self.span_from_span(item.ident.span),
+ name: item.ident.to_string(),
+ qualname,
+ value: fn_to_string(
+ decl,
+ hir::FnHeader {
+ // functions in extern block are implicitly unsafe
+ unsafety: hir::Unsafety::Unsafe,
+ // functions in extern block cannot be const
+ constness: hir::Constness::NotConst,
+ abi: self.tcx.hir().get_foreign_abi(item.hir_id()),
+ // functions in extern block cannot be async
+ asyncness: hir::IsAsync::NotAsync,
+ },
+ Some(item.ident.name),
+ generics,
+ arg_names,
+ None,
+ ),
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: self.docs_for_attrs(attrs),
+ sig: sig::foreign_item_signature(item, self),
+ attributes: lower_attributes(attrs.to_vec(), self),
+ }))
+ }
+ hir::ForeignItemKind::Static(ref ty, _) => {
+ filter!(self.span_utils, item.ident.span);
+
+ let id = id_from_def_id(def_id);
+ let span = self.span_from_span(item.ident.span);
+
+ Some(Data::DefData(Def {
+ kind: DefKind::ForeignStatic,
+ id,
+ span,
+ name: item.ident.to_string(),
+ qualname,
+ value: ty_to_string(ty),
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: self.docs_for_attrs(attrs),
+ sig: sig::foreign_item_signature(item, self),
+ attributes: lower_attributes(attrs.to_vec(), self),
+ }))
+ }
+ // FIXME(plietar): needs a new DefKind in rls-data
+ hir::ForeignItemKind::Type => None,
+ }
+ }
+
+ pub fn get_item_data(&self, item: &hir::Item<'_>) -> Option<Data> {
+ let def_id = item.def_id.to_def_id();
+ let attrs = self.tcx.hir().attrs(item.hir_id());
+ match item.kind {
+ hir::ItemKind::Fn(ref sig, ref generics, _) => {
+ let qualname = format!("::{}", self.tcx.def_path_str(def_id));
+ filter!(self.span_utils, item.ident.span);
+ Some(Data::DefData(Def {
+ kind: DefKind::Function,
+ id: id_from_def_id(def_id),
+ span: self.span_from_span(item.ident.span),
+ name: item.ident.to_string(),
+ qualname,
+ value: fn_to_string(
+ sig.decl,
+ sig.header,
+ Some(item.ident.name),
+ generics,
+ &[],
+ None,
+ ),
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: self.docs_for_attrs(attrs),
+ sig: sig::item_signature(item, self),
+ attributes: lower_attributes(attrs.to_vec(), self),
+ }))
+ }
+ hir::ItemKind::Static(ref typ, ..) => {
+ let qualname = format!("::{}", self.tcx.def_path_str(def_id));
+
+ filter!(self.span_utils, item.ident.span);
+
+ let id = id_from_def_id(def_id);
+ let span = self.span_from_span(item.ident.span);
+
+ Some(Data::DefData(Def {
+ kind: DefKind::Static,
+ id,
+ span,
+ name: item.ident.to_string(),
+ qualname,
+ value: ty_to_string(&typ),
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: self.docs_for_attrs(attrs),
+ sig: sig::item_signature(item, self),
+ attributes: lower_attributes(attrs.to_vec(), self),
+ }))
+ }
+ hir::ItemKind::Const(ref typ, _) => {
+ let qualname = format!("::{}", self.tcx.def_path_str(def_id));
+ filter!(self.span_utils, item.ident.span);
+
+ let id = id_from_def_id(def_id);
+ let span = self.span_from_span(item.ident.span);
+
+ Some(Data::DefData(Def {
+ kind: DefKind::Const,
+ id,
+ span,
+ name: item.ident.to_string(),
+ qualname,
+ value: ty_to_string(typ),
+ parent: None,
+ children: vec![],
+ decl_id: None,
+ docs: self.docs_for_attrs(attrs),
+ sig: sig::item_signature(item, self),
+ attributes: lower_attributes(attrs.to_vec(), self),
+ }))
+ }
+ hir::ItemKind::Mod(ref m) => {
+ let qualname = format!("::{}", self.tcx.def_path_str(def_id));
+
+ let sm = self.tcx.sess.source_map();
+ let filename = sm.span_to_filename(m.spans.inner_span);
+
+ filter!(self.span_utils, item.ident.span);
+
+ Some(Data::DefData(Def {
+ kind: DefKind::Mod,
+ id: id_from_def_id(def_id),
+ name: item.ident.to_string(),
+ qualname,
+ span: self.span_from_span(item.ident.span),
+ value: filename.prefer_remapped().to_string(),
+ parent: None,
+ children: m
+ .item_ids
+ .iter()
+ .map(|i| id_from_def_id(i.def_id.to_def_id()))
+ .collect(),
+ decl_id: None,
+ docs: self.docs_for_attrs(attrs),
+ sig: sig::item_signature(item, self),
+ attributes: lower_attributes(attrs.to_vec(), self),
+ }))
+ }
+ hir::ItemKind::Enum(ref def, ref generics) => {
+ let name = item.ident.to_string();
+ let qualname = format!("::{}", self.tcx.def_path_str(def_id));
+ filter!(self.span_utils, item.ident.span);
+ let value =
+ enum_def_to_string(def, generics, item.ident.name, item.span);
+ Some(Data::DefData(Def {
+ kind: DefKind::Enum,
+ id: id_from_def_id(def_id),
+ span: self.span_from_span(item.ident.span),
+ name,
+ qualname,
+ value,
+ parent: None,
+ children: def.variants.iter().map(|v| id_from_hir_id(v.id, self)).collect(),
+ decl_id: None,
+ docs: self.docs_for_attrs(attrs),
+ sig: sig::item_signature(item, self),
+ attributes: lower_attributes(attrs.to_vec(), self),
+ }))
+ }
+ hir::ItemKind::Impl(hir::Impl { ref of_trait, ref self_ty, ref items, .. })
+ if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = self_ty.kind =>
+ {
+ // Common case impl for a struct or something basic.
+ if generated_code(path.span) {
+ return None;
+ }
+ let sub_span = path.segments.last().unwrap().ident.span;
+ filter!(self.span_utils, sub_span);
+
+ let impl_id = self.next_impl_id();
+ let span = self.span_from_span(sub_span);
+
+ let type_data = self.lookup_def_id(self_ty.hir_id);
+ type_data.map(|type_data| {
+ Data::RelationData(
+ Relation {
+ kind: RelationKind::Impl { id: impl_id },
+ span: span.clone(),
+ from: id_from_def_id(type_data),
+ to: of_trait
+ .as_ref()
+ .and_then(|t| self.lookup_def_id(t.hir_ref_id))
+ .map(id_from_def_id)
+ .unwrap_or_else(null_id),
+ },
+ Impl {
+ id: impl_id,
+ kind: match *of_trait {
+ Some(_) => ImplKind::Direct,
+ None => ImplKind::Inherent,
+ },
+ span,
+ value: String::new(),
+ parent: None,
+ children: items
+ .iter()
+ .map(|i| id_from_def_id(i.id.def_id.to_def_id()))
+ .collect(),
+ docs: String::new(),
+ sig: None,
+ attributes: vec![],
+ },
+ )
+ })
+ }
+ hir::ItemKind::Impl(_) => None,
+ _ => {
+ // FIXME
+ bug!();
+ }
+ }
+ }
+
+ pub fn get_field_data(&self, field: &hir::FieldDef<'_>, scope: hir::HirId) -> Option<Def> {
+ let name = field.ident.to_string();
+ let scope_def_id = self.tcx.hir().local_def_id(scope).to_def_id();
+ let qualname = format!("::{}::{}", self.tcx.def_path_str(scope_def_id), field.ident);
+ filter!(self.span_utils, field.ident.span);
+ let field_def_id = self.tcx.hir().local_def_id(field.hir_id).to_def_id();
+ let typ = self.tcx.type_of(field_def_id).to_string();
+
+ let id = id_from_def_id(field_def_id);
+ let span = self.span_from_span(field.ident.span);
+ let attrs = self.tcx.hir().attrs(field.hir_id);
+
+ Some(Def {
+ kind: DefKind::Field,
+ id,
+ span,
+ name,
+ qualname,
+ value: typ,
+ parent: Some(id_from_def_id(scope_def_id)),
+ children: vec![],
+ decl_id: None,
+ docs: self.docs_for_attrs(attrs),
+ sig: sig::field_signature(field, self),
+ attributes: lower_attributes(attrs.to_vec(), self),
+ })
+ }
+
+ // FIXME would be nice to take a MethodItem here, but the ast provides both
+ // trait and impl flavours, so the caller must do the disassembly.
+ pub fn get_method_data(&self, hir_id: hir::HirId, ident: Ident, span: Span) -> Option<Def> {
+ // The qualname for a method is the trait name or name of the struct in an impl in
+ // which the method is declared in, followed by the method's name.
+ let def_id = self.tcx.hir().local_def_id(hir_id).to_def_id();
+ let (qualname, parent_scope, decl_id, docs, attributes) =
+ match self.tcx.impl_of_method(def_id) {
+ Some(impl_id) => match self.tcx.hir().get_if_local(impl_id) {
+ Some(Node::Item(item)) => match item.kind {
+ hir::ItemKind::Impl(hir::Impl { ref self_ty, .. }) => {
+ let hir = self.tcx.hir();
+
+ let mut qualname = String::from("<");
+ qualname
+ .push_str(&rustc_hir_pretty::id_to_string(&hir, self_ty.hir_id));
+
+ let trait_id = self.tcx.trait_id_of_impl(impl_id);
+ let mut docs = String::new();
+ let mut attrs = vec![];
+ if let Some(Node::ImplItem(_)) = hir.find(hir_id) {
+ attrs = self.tcx.hir().attrs(hir_id).to_vec();
+ docs = self.docs_for_attrs(&attrs);
+ }
+
+ let mut decl_id = None;
+ if let Some(def_id) = trait_id {
+ // A method in a trait impl.
+ qualname.push_str(" as ");
+ qualname.push_str(&self.tcx.def_path_str(def_id));
+
+ decl_id = self
+ .tcx
+ .associated_items(def_id)
+ .filter_by_name_unhygienic(ident.name)
+ .next()
+ .map(|item| item.def_id);
+ }
+ qualname.push('>');
+
+ (qualname, trait_id, decl_id, docs, attrs)
+ }
+ _ => {
+ span_bug!(
+ span,
+ "Container {:?} for method {} not an impl?",
+ impl_id,
+ hir_id
+ );
+ }
+ },
+ r => {
+ span_bug!(
+ span,
+ "Container {:?} for method {} is not a node item {:?}",
+ impl_id,
+ hir_id,
+ r
+ );
+ }
+ },
+ None => match self.tcx.trait_of_item(def_id) {
+ Some(def_id) => {
+ let mut docs = String::new();
+ let mut attrs = vec![];
+
+ if let Some(Node::TraitItem(_)) = self.tcx.hir().find(hir_id) {
+ attrs = self.tcx.hir().attrs(hir_id).to_vec();
+ docs = self.docs_for_attrs(&attrs);
+ }
+
+ (
+ format!("::{}", self.tcx.def_path_str(def_id)),
+ Some(def_id),
+ None,
+ docs,
+ attrs,
+ )
+ }
+ None => {
+ debug!("could not find container for method {} at {:?}", hir_id, span);
+ // This is not necessarily a bug, if there was a compilation error,
+ // the typeck results we need might not exist.
+ return None;
+ }
+ },
+ };
+
+ let qualname = format!("{}::{}", qualname, ident.name);
+
+ filter!(self.span_utils, ident.span);
+
+ Some(Def {
+ kind: DefKind::Method,
+ id: id_from_def_id(def_id),
+ span: self.span_from_span(ident.span),
+ name: ident.name.to_string(),
+ qualname,
+ // FIXME you get better data here by using the visitor.
+ value: String::new(),
+ parent: parent_scope.map(id_from_def_id),
+ children: vec![],
+ decl_id: decl_id.map(id_from_def_id),
+ docs,
+ sig: None,
+ attributes: lower_attributes(attributes, self),
+ })
+ }
+
+ pub fn get_expr_data(&self, expr: &hir::Expr<'_>) -> Option<Data> {
+ let ty = self.typeck_results().expr_ty_adjusted_opt(expr)?;
+ if matches!(ty.kind(), ty::Error(_)) {
+ return None;
+ }
+ match expr.kind {
+ hir::ExprKind::Field(ref sub_ex, ident) => {
+ match self.typeck_results().expr_ty_adjusted(&sub_ex).kind() {
+ ty::Adt(def, _) if !def.is_enum() => {
+ let variant = &def.non_enum_variant();
+ filter!(self.span_utils, ident.span);
+ let span = self.span_from_span(ident.span);
+ Some(Data::RefData(Ref {
+ kind: RefKind::Variable,
+ span,
+ ref_id: self
+ .tcx
+ .find_field_index(ident, variant)
+ .map(|index| id_from_def_id(variant.fields[index].did))
+ .unwrap_or_else(null_id),
+ }))
+ }
+ ty::Tuple(..) => None,
+ _ => {
+ debug!("expected struct or union type, found {:?}", ty);
+ None
+ }
+ }
+ }
+ hir::ExprKind::Struct(qpath, ..) => match ty.kind() {
+ ty::Adt(def, _) => {
+ let sub_span = qpath.last_segment_span();
+ filter!(self.span_utils, sub_span);
+ let span = self.span_from_span(sub_span);
+ Some(Data::RefData(Ref {
+ kind: RefKind::Type,
+ span,
+ ref_id: id_from_def_id(def.did()),
+ }))
+ }
+ _ => {
+ debug!("expected adt, found {:?}", ty);
+ None
+ }
+ },
+ hir::ExprKind::MethodCall(ref seg, ..) => {
+ let Some(method_id) = self.typeck_results().type_dependent_def_id(expr.hir_id) else {
+ debug!("could not resolve method id for {:?}", expr);
+ return None;
+ };
+ let (def_id, decl_id) = match self.tcx.associated_item(method_id).container {
+ ty::ImplContainer => (Some(method_id), None),
+ ty::TraitContainer => (None, Some(method_id)),
+ };
+ let sub_span = seg.ident.span;
+ filter!(self.span_utils, sub_span);
+ let span = self.span_from_span(sub_span);
+ Some(Data::RefData(Ref {
+ kind: RefKind::Function,
+ span,
+ ref_id: def_id.or(decl_id).map(id_from_def_id).unwrap_or_else(null_id),
+ }))
+ }
+ hir::ExprKind::Path(ref path) => {
+ self.get_path_data(expr.hir_id, path).map(Data::RefData)
+ }
+ _ => {
+ // FIXME
+ bug!("invalid expression: {:?}", expr);
+ }
+ }
+ }
+
+ pub fn get_path_res(&self, hir_id: hir::HirId) -> Res {
+ match self.tcx.hir().get(hir_id) {
+ Node::TraitRef(tr) => tr.path.res,
+
+ Node::Item(&hir::Item { kind: hir::ItemKind::Use(path, _), .. }) => path.res,
+ Node::PathSegment(seg) => match seg.res {
+ Some(res) if res != Res::Err => res,
+ _ => {
+ let parent_node = self.tcx.hir().get_parent_node(hir_id);
+ self.get_path_res(parent_node)
+ }
+ },
+
+ Node::Expr(&hir::Expr { kind: hir::ExprKind::Struct(ref qpath, ..), .. }) => {
+ self.typeck_results().qpath_res(qpath, hir_id)
+ }
+
+ Node::Expr(&hir::Expr { kind: hir::ExprKind::Path(ref qpath), .. })
+ | Node::Pat(&hir::Pat {
+ kind:
+ hir::PatKind::Path(ref qpath)
+ | hir::PatKind::Struct(ref qpath, ..)
+ | hir::PatKind::TupleStruct(ref qpath, ..),
+ ..
+ })
+ | Node::Ty(&hir::Ty { kind: hir::TyKind::Path(ref qpath), .. }) => match qpath {
+ hir::QPath::Resolved(_, path) => path.res,
+ hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => {
+ // #75962: `self.typeck_results` may be different from the `hir_id`'s result.
+ if self.tcx.has_typeck_results(hir_id.owner.to_def_id()) {
+ self.tcx.typeck(hir_id.owner).qpath_res(qpath, hir_id)
+ } else {
+ Res::Err
+ }
+ }
+ },
+
+ Node::Pat(&hir::Pat { kind: hir::PatKind::Binding(_, canonical_id, ..), .. }) => {
+ Res::Local(canonical_id)
+ }
+
+ _ => Res::Err,
+ }
+ }
+
+ pub fn get_path_data(&self, id: hir::HirId, path: &hir::QPath<'_>) -> Option<Ref> {
+ let segment = match path {
+ hir::QPath::Resolved(_, path) => path.segments.last(),
+ hir::QPath::TypeRelative(_, segment) => Some(*segment),
+ hir::QPath::LangItem(..) => None,
+ };
+ segment.and_then(|seg| {
+ self.get_path_segment_data(seg).or_else(|| self.get_path_segment_data_with_id(seg, id))
+ })
+ }
+
+ pub fn get_path_segment_data(&self, path_seg: &hir::PathSegment<'_>) -> Option<Ref> {
+ self.get_path_segment_data_with_id(path_seg, path_seg.hir_id?)
+ }
+
+ pub fn get_path_segment_data_with_id(
+ &self,
+ path_seg: &hir::PathSegment<'_>,
+ id: hir::HirId,
+ ) -> Option<Ref> {
+ // Returns true if the path is function type sugar, e.g., `Fn(A) -> B`.
+ fn fn_type(seg: &hir::PathSegment<'_>) -> bool {
+ seg.args.map_or(false, |args| args.parenthesized)
+ }
+
+ let res = self.get_path_res(id);
+ let span = path_seg.ident.span;
+ filter!(self.span_utils, span);
+ let span = self.span_from_span(span);
+
+ match res {
+ Res::Local(id) => {
+ Some(Ref { kind: RefKind::Variable, span, ref_id: id_from_hir_id(id, self) })
+ }
+ Res::Def(HirDefKind::Trait, def_id) if fn_type(path_seg) => {
+ Some(Ref { kind: RefKind::Type, span, ref_id: id_from_def_id(def_id) })
+ }
+ Res::Def(
+ HirDefKind::Struct
+ | HirDefKind::Variant
+ | HirDefKind::Union
+ | HirDefKind::Enum
+ | HirDefKind::TyAlias
+ | HirDefKind::ForeignTy
+ | HirDefKind::TraitAlias
+ | HirDefKind::AssocTy
+ | HirDefKind::Trait
+ | HirDefKind::OpaqueTy
+ | HirDefKind::TyParam,
+ def_id,
+ ) => Some(Ref { kind: RefKind::Type, span, ref_id: id_from_def_id(def_id) }),
+ Res::Def(HirDefKind::ConstParam, def_id) => {
+ Some(Ref { kind: RefKind::Variable, span, ref_id: id_from_def_id(def_id) })
+ }
+ Res::Def(HirDefKind::Ctor(..), def_id) => {
+ // This is a reference to a tuple struct or an enum variant where the def_id points
+ // to an invisible constructor function. That is not a very useful
+ // def, so adjust to point to the tuple struct or enum variant itself.
+ let parent_def_id = self.tcx.parent(def_id);
+ Some(Ref { kind: RefKind::Type, span, ref_id: id_from_def_id(parent_def_id) })
+ }
+ Res::Def(HirDefKind::Static(_) | HirDefKind::Const | HirDefKind::AssocConst, _) => {
+ Some(Ref { kind: RefKind::Variable, span, ref_id: id_from_def_id(res.def_id()) })
+ }
+ Res::Def(HirDefKind::AssocFn, decl_id) => {
+ let def_id = if decl_id.is_local() {
+ if self.tcx.impl_defaultness(decl_id).has_value() {
+ Some(decl_id)
+ } else {
+ None
+ }
+ } else {
+ None
+ };
+ Some(Ref {
+ kind: RefKind::Function,
+ span,
+ ref_id: id_from_def_id(def_id.unwrap_or(decl_id)),
+ })
+ }
+ Res::Def(HirDefKind::Fn, def_id) => {
+ Some(Ref { kind: RefKind::Function, span, ref_id: id_from_def_id(def_id) })
+ }
+ Res::Def(HirDefKind::Mod, def_id) => {
+ Some(Ref { kind: RefKind::Mod, span, ref_id: id_from_def_id(def_id) })
+ }
+
+ Res::Def(
+ HirDefKind::Macro(..)
+ | HirDefKind::ExternCrate
+ | HirDefKind::ForeignMod
+ | HirDefKind::LifetimeParam
+ | HirDefKind::AnonConst
+ | HirDefKind::InlineConst
+ | HirDefKind::Use
+ | HirDefKind::Field
+ | HirDefKind::GlobalAsm
+ | HirDefKind::Impl
+ | HirDefKind::Closure
+ | HirDefKind::Generator,
+ _,
+ )
+ | Res::PrimTy(..)
+ | Res::SelfTy { .. }
+ | Res::ToolMod
+ | Res::NonMacroAttr(..)
+ | Res::SelfCtor(..)
+ | Res::Err => None,
+ }
+ }
+
+ pub fn get_field_ref_data(
+ &self,
+ field_ref: &hir::ExprField<'_>,
+ variant: &ty::VariantDef,
+ ) -> Option<Ref> {
+ filter!(self.span_utils, field_ref.ident.span);
+ self.tcx.find_field_index(field_ref.ident, variant).map(|index| {
+ let span = self.span_from_span(field_ref.ident.span);
+ Ref { kind: RefKind::Variable, span, ref_id: id_from_def_id(variant.fields[index].did) }
+ })
+ }
+
+ /// Attempt to return MacroRef for any AST node.
+ ///
+ /// For a given piece of AST defined by the supplied Span and NodeId,
+ /// returns `None` if the node is not macro-generated or the span is malformed,
+ /// else uses the expansion callsite and callee to return some MacroRef.
+ ///
+ /// FIXME: [`DumpVisitor::process_macro_use`] should actually dump this data
+ #[allow(dead_code)]
+ fn get_macro_use_data(&self, span: Span) -> Option<MacroRef> {
+ if !generated_code(span) {
+ return None;
+ }
+ // Note we take care to use the source callsite/callee, to handle
+ // nested expansions and ensure we only generate data for source-visible
+ // macro uses.
+ let callsite = span.source_callsite();
+ let callsite_span = self.span_from_span(callsite);
+ let callee = span.source_callee()?;
+
+ let mac_name = match callee.kind {
+ ExpnKind::Macro(kind, name) => match kind {
+ MacroKind::Bang => name,
+
+ // Ignore attribute macros, their spans are usually mangled
+ // FIXME(eddyb) is this really the case anymore?
+ MacroKind::Attr | MacroKind::Derive => return None,
+ },
+
+ // These are not macros.
+ // FIXME(eddyb) maybe there is a way to handle them usefully?
+ ExpnKind::Inlined | ExpnKind::Root | ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => {
+ return None;
+ }
+ };
+
+ let callee_span = self.span_from_span(callee.def_site);
+ Some(MacroRef {
+ span: callsite_span,
+ qualname: mac_name.to_string(), // FIXME: generate the real qualname
+ callee_span,
+ })
+ }
+
+ fn lookup_def_id(&self, ref_id: hir::HirId) -> Option<DefId> {
+ match self.get_path_res(ref_id) {
+ Res::PrimTy(_) | Res::SelfTy { .. } | Res::Err => None,
+ def => def.opt_def_id(),
+ }
+ }
+
+ fn docs_for_attrs(&self, attrs: &[ast::Attribute]) -> String {
+ let mut result = String::new();
+
+ for attr in attrs {
+ if let Some((val, kind)) = attr.doc_str_and_comment_kind() {
+ // FIXME: Should save-analysis beautify doc strings itself or leave it to users?
+ result.push_str(beautify_doc_string(val, kind).as_str());
+ result.push('\n');
+ }
+ }
+
+ if !self.config.full_docs {
+ if let Some(index) = result.find("\n\n") {
+ result.truncate(index);
+ }
+ }
+
+ result
+ }
+
+ fn next_impl_id(&self) -> u32 {
+ let next = self.impl_counter.get();
+ self.impl_counter.set(next + 1);
+ next
+ }
+}
+
+// An AST visitor for collecting paths (e.g., the names of structs) and formal
+// variables (idents) from patterns.
+struct PathCollector<'l> {
+ tcx: TyCtxt<'l>,
+ collected_paths: Vec<(hir::HirId, &'l hir::QPath<'l>)>,
+ collected_idents: Vec<(hir::HirId, Ident, hir::Mutability)>,
+}
+
+impl<'l> PathCollector<'l> {
+ fn new(tcx: TyCtxt<'l>) -> PathCollector<'l> {
+ PathCollector { tcx, collected_paths: vec![], collected_idents: vec![] }
+ }
+}
+
+impl<'l> Visitor<'l> for PathCollector<'l> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_pat(&mut self, p: &'l hir::Pat<'l>) {
+ match p.kind {
+ hir::PatKind::Struct(ref path, ..) => {
+ self.collected_paths.push((p.hir_id, path));
+ }
+ hir::PatKind::TupleStruct(ref path, ..) | hir::PatKind::Path(ref path) => {
+ self.collected_paths.push((p.hir_id, path));
+ }
+ hir::PatKind::Binding(bm, _, ident, _) => {
+ debug!(
+ "PathCollector, visit ident in pat {}: {:?} {:?}",
+ ident, p.span, ident.span
+ );
+ let immut = match bm {
+ // Even if the ref is mut, you can't change the ref, only
+ // the data pointed at, so showing the initialising expression
+ // is still worthwhile.
+ hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Ref => {
+ hir::Mutability::Not
+ }
+ hir::BindingAnnotation::Mutable | hir::BindingAnnotation::RefMut => {
+ hir::Mutability::Mut
+ }
+ };
+ self.collected_idents.push((p.hir_id, ident, immut));
+ }
+ _ => {}
+ }
+ intravisit::walk_pat(self, p);
+ }
+}
+
+/// Defines what to do with the results of saving the analysis.
+pub trait SaveHandler {
+ fn save(&mut self, save_ctxt: &SaveContext<'_>, analysis: &Analysis);
+}
+
+/// Dump the save-analysis results to a file.
+pub struct DumpHandler<'a> {
+ odir: Option<&'a Path>,
+ cratename: String,
+}
+
+impl<'a> DumpHandler<'a> {
+ pub fn new(odir: Option<&'a Path>, cratename: &str) -> DumpHandler<'a> {
+ DumpHandler { odir, cratename: cratename.to_owned() }
+ }
+
+ fn output_file(&self, ctx: &SaveContext<'_>) -> (BufWriter<File>, PathBuf) {
+ let sess = &ctx.tcx.sess;
+ let file_name = match ctx.config.output_file {
+ Some(ref s) => PathBuf::from(s),
+ None => {
+ let mut root_path = match self.odir {
+ Some(val) => val.join("save-analysis"),
+ None => PathBuf::from("save-analysis-temp"),
+ };
+
+ if let Err(e) = std::fs::create_dir_all(&root_path) {
+ error!("Could not create directory {}: {}", root_path.display(), e);
+ }
+
+ let executable = sess.crate_types().iter().any(|ct| *ct == CrateType::Executable);
+ let mut out_name = if executable { String::new() } else { "lib".to_owned() };
+ out_name.push_str(&self.cratename);
+ out_name.push_str(&sess.opts.cg.extra_filename);
+ out_name.push_str(".json");
+ root_path.push(&out_name);
+
+ root_path
+ }
+ };
+
+ info!("Writing output to {}", file_name.display());
+
+ let output_file = BufWriter::new(File::create(&file_name).unwrap_or_else(|e| {
+ sess.fatal(&format!("Could not open {}: {}", file_name.display(), e))
+ }));
+
+ (output_file, file_name)
+ }
+}
+
+impl SaveHandler for DumpHandler<'_> {
+ fn save(&mut self, save_ctxt: &SaveContext<'_>, analysis: &Analysis) {
+ let sess = &save_ctxt.tcx.sess;
+ let (output, file_name) = self.output_file(&save_ctxt);
+ if let Err(e) = serde_json::to_writer(output, &analysis) {
+ error!("Can't serialize save-analysis: {:?}", e);
+ }
+
+ if sess.opts.json_artifact_notifications {
+ sess.parse_sess.span_diagnostic.emit_artifact_notification(&file_name, "save-analysis");
+ }
+ }
+}
+
+/// Call a callback with the results of save-analysis.
+pub struct CallbackHandler<'b> {
+ pub callback: &'b mut dyn FnMut(&rls_data::Analysis),
+}
+
+impl SaveHandler for CallbackHandler<'_> {
+ fn save(&mut self, _: &SaveContext<'_>, analysis: &Analysis) {
+ (self.callback)(analysis)
+ }
+}
+
+pub fn process_crate<'l, 'tcx, H: SaveHandler>(
+ tcx: TyCtxt<'tcx>,
+ cratename: &str,
+ input: &'l Input,
+ config: Option<Config>,
+ mut handler: H,
+) {
+ with_no_trimmed_paths!({
+ tcx.dep_graph.with_ignore(|| {
+ info!("Dumping crate {}", cratename);
+
+ // Privacy checking must be done outside of type inference; use a
+ // fallback in case the access levels couldn't have been correctly computed.
+ let access_levels = match tcx.sess.compile_status() {
+ Ok(..) => tcx.privacy_access_levels(()),
+ Err(..) => tcx.arena.alloc(AccessLevels::default()),
+ };
+
+ let save_ctxt = SaveContext {
+ tcx,
+ maybe_typeck_results: None,
+ access_levels: &access_levels,
+ span_utils: SpanUtils::new(&tcx.sess),
+ config: find_config(config),
+ impl_counter: Cell::new(0),
+ };
+
+ let mut visitor = DumpVisitor::new(save_ctxt);
+
+ visitor.dump_crate_info(cratename);
+ visitor.dump_compilation_options(input, cratename);
+ visitor.process_crate();
+
+ handler.save(&visitor.save_ctxt, &visitor.analysis())
+ })
+ })
+}
+
+fn find_config(supplied: Option<Config>) -> Config {
+ if let Some(config) = supplied {
+ return config;
+ }
+
+ match env::var_os("RUST_SAVE_ANALYSIS_CONFIG") {
+ None => Config::default(),
+ Some(config) => config
+ .to_str()
+ .ok_or(())
+ .map_err(|_| error!("`RUST_SAVE_ANALYSIS_CONFIG` isn't UTF-8"))
+ .and_then(|cfg| {
+ serde_json::from_str(cfg)
+ .map_err(|_| error!("Could not deserialize save-analysis config"))
+ })
+ .unwrap_or_default(),
+ }
+}
+
+// Utility functions for the module.
+
+// Helper function to escape quotes in a string
+fn escape(s: String) -> String {
+ s.replace('\"', "\"\"")
+}
+
+// Helper function to determine if a span came from a
+// macro expansion or syntax extension.
+fn generated_code(span: Span) -> bool {
+ span.from_expansion() || span.is_dummy()
+}
+
+// DefId::index is a newtype and so the JSON serialisation is ugly. Therefore
+// we use our own Id which is the same, but without the newtype.
+fn id_from_def_id(id: DefId) -> rls_data::Id {
+ rls_data::Id { krate: id.krate.as_u32(), index: id.index.as_u32() }
+}
+
+fn id_from_hir_id(id: hir::HirId, scx: &SaveContext<'_>) -> rls_data::Id {
+ let def_id = scx.tcx.hir().opt_local_def_id(id);
+ def_id.map(|id| id_from_def_id(id.to_def_id())).unwrap_or_else(|| {
+ // Create a *fake* `DefId` out of a `HirId` by combining the owner
+ // `local_def_index` and the `local_id`.
+ // This will work unless you have *billions* of definitions in a single
+ // crate (very unlikely to actually happen).
+ rls_data::Id {
+ krate: LOCAL_CRATE.as_u32(),
+ index: id.owner.local_def_index.as_u32() | id.local_id.as_u32().reverse_bits(),
+ }
+ })
+}
+
+fn null_id() -> rls_data::Id {
+ rls_data::Id { krate: u32::MAX, index: u32::MAX }
+}
+
+fn lower_attributes(attrs: Vec<ast::Attribute>, scx: &SaveContext<'_>) -> Vec<rls_data::Attribute> {
+ attrs
+ .into_iter()
+ // Only retain real attributes. Doc comments are lowered separately.
+ .filter(|attr| !attr.has_name(sym::doc))
+ .map(|mut attr| {
+ // Remove the surrounding '#[..]' or '#![..]' of the pretty printed
+ // attribute. First normalize all inner attribute (#![..]) to outer
+ // ones (#[..]), then remove the two leading and the one trailing character.
+ attr.style = ast::AttrStyle::Outer;
+ let value = attribute_to_string(&attr);
+ // This str slicing works correctly, because the leading and trailing characters
+ // are in the ASCII range and thus exactly one byte each.
+ let value = value[2..value.len() - 1].to_string();
+
+ rls_data::Attribute { value, span: scx.span_from_span(attr.span) }
+ })
+ .collect()
+}
diff --git a/compiler/rustc_save_analysis/src/sig.rs b/compiler/rustc_save_analysis/src/sig.rs
new file mode 100644
index 000000000..d1286c9b8
--- /dev/null
+++ b/compiler/rustc_save_analysis/src/sig.rs
@@ -0,0 +1,917 @@
+// A signature is a string representation of an item's type signature, excluding
+// any body. It also includes ids for any defs or refs in the signature. For
+// example:
+//
+// ```
+// fn foo(x: String) {
+// println!("{}", x);
+// }
+// ```
+// The signature string is something like "fn foo(x: String) {}" and the signature
+// will have defs for `foo` and `x` and a ref for `String`.
+//
+// All signature text should parse in the correct context (i.e., in a module or
+// impl, etc.). Clients may want to trim trailing `{}` or `;`. The text of a
+// signature is not guaranteed to be stable (it may improve or change as the
+// syntax changes, or whitespace or punctuation may change). It is also likely
+// not to be pretty - no attempt is made to prettify the text. It is recommended
+// that clients run the text through Rustfmt.
+//
+// This module generates Signatures for items by walking the AST and looking up
+// references.
+//
+// Signatures do not include visibility info. I'm not sure if this is a feature
+// or an omission (FIXME).
+//
+// FIXME where clauses need implementing, defs/refs in generics are mostly missing.
+
+use crate::{id_from_def_id, id_from_hir_id, SaveContext};
+
+use rls_data::{SigElement, Signature};
+
+use rustc_ast::Mutability;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir_pretty::id_to_string;
+use rustc_hir_pretty::{bounds_to_string, path_segment_to_string, path_to_string, ty_to_string};
+use rustc_span::symbol::{Ident, Symbol};
+
+pub fn item_signature(item: &hir::Item<'_>, scx: &SaveContext<'_>) -> Option<Signature> {
+ if !scx.config.signatures {
+ return None;
+ }
+ item.make(0, None, scx).ok()
+}
+
+pub fn foreign_item_signature(
+ item: &hir::ForeignItem<'_>,
+ scx: &SaveContext<'_>,
+) -> Option<Signature> {
+ if !scx.config.signatures {
+ return None;
+ }
+ item.make(0, None, scx).ok()
+}
+
+/// Signature for a struct or tuple field declaration.
+/// Does not include a trailing comma.
+pub fn field_signature(field: &hir::FieldDef<'_>, scx: &SaveContext<'_>) -> Option<Signature> {
+ if !scx.config.signatures {
+ return None;
+ }
+ field.make(0, None, scx).ok()
+}
+
+/// Does not include a trailing comma.
+pub fn variant_signature(variant: &hir::Variant<'_>, scx: &SaveContext<'_>) -> Option<Signature> {
+ if !scx.config.signatures {
+ return None;
+ }
+ variant.make(0, None, scx).ok()
+}
+
+pub fn method_signature(
+ id: hir::HirId,
+ ident: Ident,
+ generics: &hir::Generics<'_>,
+ m: &hir::FnSig<'_>,
+ scx: &SaveContext<'_>,
+) -> Option<Signature> {
+ if !scx.config.signatures {
+ return None;
+ }
+ make_method_signature(id, ident, generics, m, scx).ok()
+}
+
+pub fn assoc_const_signature(
+ id: hir::HirId,
+ ident: Symbol,
+ ty: &hir::Ty<'_>,
+ default: Option<&hir::Expr<'_>>,
+ scx: &SaveContext<'_>,
+) -> Option<Signature> {
+ if !scx.config.signatures {
+ return None;
+ }
+ make_assoc_const_signature(id, ident, ty, default, scx).ok()
+}
+
+pub fn assoc_type_signature(
+ id: hir::HirId,
+ ident: Ident,
+ bounds: Option<hir::GenericBounds<'_>>,
+ default: Option<&hir::Ty<'_>>,
+ scx: &SaveContext<'_>,
+) -> Option<Signature> {
+ if !scx.config.signatures {
+ return None;
+ }
+ make_assoc_type_signature(id, ident, bounds, default, scx).ok()
+}
+
+type Result = std::result::Result<Signature, &'static str>;
+
+trait Sig {
+ fn make(&self, offset: usize, id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result;
+}
+
+fn extend_sig(
+ mut sig: Signature,
+ text: String,
+ defs: Vec<SigElement>,
+ refs: Vec<SigElement>,
+) -> Signature {
+ sig.text = text;
+ sig.defs.extend(defs.into_iter());
+ sig.refs.extend(refs.into_iter());
+ sig
+}
+
+fn replace_text(mut sig: Signature, text: String) -> Signature {
+ sig.text = text;
+ sig
+}
+
+fn merge_sigs(text: String, sigs: Vec<Signature>) -> Signature {
+ let mut result = Signature { text, defs: vec![], refs: vec![] };
+
+ let (defs, refs): (Vec<_>, Vec<_>) = sigs.into_iter().map(|s| (s.defs, s.refs)).unzip();
+
+ result.defs.extend(defs.into_iter().flat_map(|ds| ds.into_iter()));
+ result.refs.extend(refs.into_iter().flat_map(|rs| rs.into_iter()));
+
+ result
+}
+
+fn text_sig(text: String) -> Signature {
+ Signature { text, defs: vec![], refs: vec![] }
+}
+
+impl<'hir> Sig for hir::Ty<'hir> {
+ fn make(&self, offset: usize, _parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
+ let id = Some(self.hir_id);
+ match self.kind {
+ hir::TyKind::Slice(ref ty) => {
+ let nested = ty.make(offset + 1, id, scx)?;
+ let text = format!("[{}]", nested.text);
+ Ok(replace_text(nested, text))
+ }
+ hir::TyKind::Ptr(ref mt) => {
+ let prefix = match mt.mutbl {
+ hir::Mutability::Mut => "*mut ",
+ hir::Mutability::Not => "*const ",
+ };
+ let nested = mt.ty.make(offset + prefix.len(), id, scx)?;
+ let text = format!("{}{}", prefix, nested.text);
+ Ok(replace_text(nested, text))
+ }
+ hir::TyKind::Rptr(ref lifetime, ref mt) => {
+ let mut prefix = "&".to_owned();
+ prefix.push_str(&lifetime.name.ident().to_string());
+ prefix.push(' ');
+ if let hir::Mutability::Mut = mt.mutbl {
+ prefix.push_str("mut ");
+ };
+
+ let nested = mt.ty.make(offset + prefix.len(), id, scx)?;
+ let text = format!("{}{}", prefix, nested.text);
+ Ok(replace_text(nested, text))
+ }
+ hir::TyKind::Never => Ok(text_sig("!".to_owned())),
+ hir::TyKind::Tup(ts) => {
+ let mut text = "(".to_owned();
+ let mut defs = vec![];
+ let mut refs = vec![];
+ for t in ts {
+ let nested = t.make(offset + text.len(), id, scx)?;
+ text.push_str(&nested.text);
+ text.push(',');
+ defs.extend(nested.defs.into_iter());
+ refs.extend(nested.refs.into_iter());
+ }
+ text.push(')');
+ Ok(Signature { text, defs, refs })
+ }
+ hir::TyKind::BareFn(ref f) => {
+ let mut text = String::new();
+ if !f.generic_params.is_empty() {
+ // FIXME defs, bounds on lifetimes
+ text.push_str("for<");
+ text.push_str(
+ &f.generic_params
+ .iter()
+ .filter_map(|param| match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => {
+ Some(param.name.ident().to_string())
+ }
+ _ => None,
+ })
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+ text.push('>');
+ }
+
+ if let hir::Unsafety::Unsafe = f.unsafety {
+ text.push_str("unsafe ");
+ }
+ text.push_str("fn(");
+
+ let mut defs = vec![];
+ let mut refs = vec![];
+ for i in f.decl.inputs {
+ let nested = i.make(offset + text.len(), Some(i.hir_id), scx)?;
+ text.push_str(&nested.text);
+ text.push(',');
+ defs.extend(nested.defs.into_iter());
+ refs.extend(nested.refs.into_iter());
+ }
+ text.push(')');
+ if let hir::FnRetTy::Return(ref t) = f.decl.output {
+ text.push_str(" -> ");
+ let nested = t.make(offset + text.len(), None, scx)?;
+ text.push_str(&nested.text);
+ text.push(',');
+ defs.extend(nested.defs.into_iter());
+ refs.extend(nested.refs.into_iter());
+ }
+
+ Ok(Signature { text, defs, refs })
+ }
+ hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) => path.make(offset, id, scx),
+ hir::TyKind::Path(hir::QPath::Resolved(Some(ref qself), ref path)) => {
+ let nested_ty = qself.make(offset + 1, id, scx)?;
+ let prefix = format!(
+ "<{} as {}>::",
+ nested_ty.text,
+ path_segment_to_string(&path.segments[0])
+ );
+
+ let name = path_segment_to_string(path.segments.last().ok_or("Bad path")?);
+ let res = scx.get_path_res(id.ok_or("Missing id for Path")?);
+ let id = id_from_def_id(res.def_id());
+ if path.segments.len() == 2 {
+ let start = offset + prefix.len();
+ let end = start + name.len();
+
+ Ok(Signature {
+ text: prefix + &name,
+ defs: vec![],
+ refs: vec![SigElement { id, start, end }],
+ })
+ } else {
+ let start = offset + prefix.len() + 5;
+ let end = start + name.len();
+ // FIXME should put the proper path in there, not ellipsis.
+ Ok(Signature {
+ text: prefix + "...::" + &name,
+ defs: vec![],
+ refs: vec![SigElement { id, start, end }],
+ })
+ }
+ }
+ hir::TyKind::Path(hir::QPath::TypeRelative(ty, segment)) => {
+ let nested_ty = ty.make(offset + 1, id, scx)?;
+ let prefix = format!("<{}>::", nested_ty.text);
+
+ let name = path_segment_to_string(segment);
+ let res = scx.get_path_res(id.ok_or("Missing id for Path")?);
+ let id = id_from_def_id(res.def_id());
+
+ let start = offset + prefix.len();
+ let end = start + name.len();
+ Ok(Signature {
+ text: prefix + &name,
+ defs: vec![],
+ refs: vec![SigElement { id, start, end }],
+ })
+ }
+ hir::TyKind::Path(hir::QPath::LangItem(lang_item, _, _)) => {
+ Ok(text_sig(format!("#[lang = \"{}\"]", lang_item.name())))
+ }
+ hir::TyKind::TraitObject(bounds, ..) => {
+ // FIXME recurse into bounds
+ let bounds: Vec<hir::GenericBound<'_>> = bounds
+ .iter()
+ .map(|hir::PolyTraitRef { bound_generic_params, trait_ref, span }| {
+ hir::GenericBound::Trait(
+ hir::PolyTraitRef {
+ bound_generic_params,
+ trait_ref: hir::TraitRef {
+ path: trait_ref.path,
+ hir_ref_id: trait_ref.hir_ref_id,
+ },
+ span: *span,
+ },
+ hir::TraitBoundModifier::None,
+ )
+ })
+ .collect();
+ let nested = bounds_to_string(&bounds);
+ Ok(text_sig(nested))
+ }
+ hir::TyKind::Array(ref ty, ref length) => {
+ let nested_ty = ty.make(offset + 1, id, scx)?;
+ let expr = id_to_string(&scx.tcx.hir(), length.hir_id()).replace('\n', " ");
+ let text = format!("[{}; {}]", nested_ty.text, expr);
+ Ok(replace_text(nested_ty, text))
+ }
+ hir::TyKind::OpaqueDef(item_id, _) => {
+ let item = scx.tcx.hir().item(item_id);
+ item.make(offset, Some(item_id.hir_id()), scx)
+ }
+ hir::TyKind::Typeof(_) | hir::TyKind::Infer | hir::TyKind::Err => Err("Ty"),
+ }
+ }
+}
+
+impl<'hir> Sig for hir::Item<'hir> {
+ fn make(&self, offset: usize, _parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
+ let id = Some(self.hir_id());
+
+ match self.kind {
+ hir::ItemKind::Static(ref ty, m, ref body) => {
+ let mut text = "static ".to_owned();
+ if m == hir::Mutability::Mut {
+ text.push_str("mut ");
+ }
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_def_id(self.def_id.to_def_id()),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ text.push_str(": ");
+
+ let ty = ty.make(offset + text.len(), id, scx)?;
+ text.push_str(&ty.text);
+
+ text.push_str(" = ");
+ let expr = id_to_string(&scx.tcx.hir(), body.hir_id).replace('\n', " ");
+ text.push_str(&expr);
+
+ text.push(';');
+
+ Ok(extend_sig(ty, text, defs, vec![]))
+ }
+ hir::ItemKind::Const(ref ty, ref body) => {
+ let mut text = "const ".to_owned();
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_def_id(self.def_id.to_def_id()),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ text.push_str(": ");
+
+ let ty = ty.make(offset + text.len(), id, scx)?;
+ text.push_str(&ty.text);
+
+ text.push_str(" = ");
+ let expr = id_to_string(&scx.tcx.hir(), body.hir_id).replace('\n', " ");
+ text.push_str(&expr);
+
+ text.push(';');
+
+ Ok(extend_sig(ty, text, defs, vec![]))
+ }
+ hir::ItemKind::Fn(hir::FnSig { ref decl, header, span: _ }, ref generics, _) => {
+ let mut text = String::new();
+ if let hir::Constness::Const = header.constness {
+ text.push_str("const ");
+ }
+ if hir::IsAsync::Async == header.asyncness {
+ text.push_str("async ");
+ }
+ if let hir::Unsafety::Unsafe = header.unsafety {
+ text.push_str("unsafe ");
+ }
+ text.push_str("fn ");
+
+ let mut sig =
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
+
+ sig.text.push('(');
+ for i in decl.inputs {
+ // FIXME should descend into patterns to add defs.
+ sig.text.push_str(": ");
+ let nested = i.make(offset + sig.text.len(), Some(i.hir_id), scx)?;
+ sig.text.push_str(&nested.text);
+ sig.text.push(',');
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push(')');
+
+ if let hir::FnRetTy::Return(ref t) = decl.output {
+ sig.text.push_str(" -> ");
+ let nested = t.make(offset + sig.text.len(), None, scx)?;
+ sig.text.push_str(&nested.text);
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push_str(" {}");
+
+ Ok(sig)
+ }
+ hir::ItemKind::Macro(..) => {
+ let mut text = "macro".to_owned();
+ let name = self.ident.to_string();
+ text.push_str(&name);
+ text.push_str(&"! {}");
+
+ Ok(text_sig(text))
+ }
+ hir::ItemKind::Mod(ref _mod) => {
+ let mut text = "mod ".to_owned();
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_def_id(self.def_id.to_def_id()),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ // Could be either `mod foo;` or `mod foo { ... }`, but we'll just pick one.
+ text.push(';');
+
+ Ok(Signature { text, defs, refs: vec![] })
+ }
+ hir::ItemKind::TyAlias(ref ty, ref generics) => {
+ let text = "type ".to_owned();
+ let mut sig =
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
+
+ sig.text.push_str(" = ");
+ let ty = ty.make(offset + sig.text.len(), id, scx)?;
+ sig.text.push_str(&ty.text);
+ sig.text.push(';');
+
+ Ok(merge_sigs(sig.text.clone(), vec![sig, ty]))
+ }
+ hir::ItemKind::Enum(_, ref generics) => {
+ let text = "enum ".to_owned();
+ let mut sig =
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
+ sig.text.push_str(" {}");
+ Ok(sig)
+ }
+ hir::ItemKind::Struct(_, ref generics) => {
+ let text = "struct ".to_owned();
+ let mut sig =
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
+ sig.text.push_str(" {}");
+ Ok(sig)
+ }
+ hir::ItemKind::Union(_, ref generics) => {
+ let text = "union ".to_owned();
+ let mut sig =
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
+ sig.text.push_str(" {}");
+ Ok(sig)
+ }
+ hir::ItemKind::Trait(is_auto, unsafety, ref generics, bounds, _) => {
+ let mut text = String::new();
+
+ if is_auto == hir::IsAuto::Yes {
+ text.push_str("auto ");
+ }
+
+ if let hir::Unsafety::Unsafe = unsafety {
+ text.push_str("unsafe ");
+ }
+ text.push_str("trait ");
+ let mut sig =
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
+
+ if !bounds.is_empty() {
+ sig.text.push_str(": ");
+ sig.text.push_str(&bounds_to_string(bounds));
+ }
+ // FIXME where clause
+ sig.text.push_str(" {}");
+
+ Ok(sig)
+ }
+ hir::ItemKind::TraitAlias(ref generics, bounds) => {
+ let mut text = String::new();
+ text.push_str("trait ");
+ let mut sig =
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
+
+ if !bounds.is_empty() {
+ sig.text.push_str(" = ");
+ sig.text.push_str(&bounds_to_string(bounds));
+ }
+ // FIXME where clause
+ sig.text.push(';');
+
+ Ok(sig)
+ }
+ hir::ItemKind::Impl(hir::Impl {
+ unsafety,
+ polarity,
+ defaultness,
+ defaultness_span: _,
+ constness,
+ ref generics,
+ ref of_trait,
+ ref self_ty,
+ items: _,
+ }) => {
+ let mut text = String::new();
+ if let hir::Defaultness::Default { .. } = defaultness {
+ text.push_str("default ");
+ }
+ if let hir::Unsafety::Unsafe = unsafety {
+ text.push_str("unsafe ");
+ }
+ text.push_str("impl");
+ if let hir::Constness::Const = constness {
+ text.push_str(" const");
+ }
+
+ let generics_sig = generics.make(offset + text.len(), id, scx)?;
+ text.push_str(&generics_sig.text);
+
+ text.push(' ');
+
+ let trait_sig = if let Some(ref t) = *of_trait {
+ if let hir::ImplPolarity::Negative(_) = polarity {
+ text.push('!');
+ }
+ let trait_sig = t.path.make(offset + text.len(), id, scx)?;
+ text.push_str(&trait_sig.text);
+ text.push_str(" for ");
+ trait_sig
+ } else {
+ text_sig(String::new())
+ };
+
+ let ty_sig = self_ty.make(offset + text.len(), id, scx)?;
+ text.push_str(&ty_sig.text);
+
+ text.push_str(" {}");
+
+ Ok(merge_sigs(text, vec![generics_sig, trait_sig, ty_sig]))
+
+ // FIXME where clause
+ }
+ hir::ItemKind::ForeignMod { .. } => Err("extern mod"),
+ hir::ItemKind::GlobalAsm(_) => Err("global asm"),
+ hir::ItemKind::ExternCrate(_) => Err("extern crate"),
+ hir::ItemKind::OpaqueTy(..) => Err("opaque type"),
+ // FIXME should implement this (e.g., pub use).
+ hir::ItemKind::Use(..) => Err("import"),
+ }
+ }
+}
+
+impl<'hir> Sig for hir::Path<'hir> {
+ fn make(&self, offset: usize, id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
+ let res = scx.get_path_res(id.ok_or("Missing id for Path")?);
+
+ let (name, start, end) = match res {
+ Res::PrimTy(..) | Res::SelfTy { .. } | Res::Err => {
+ return Ok(Signature { text: path_to_string(self), defs: vec![], refs: vec![] });
+ }
+ Res::Def(DefKind::AssocConst | DefKind::Variant | DefKind::Ctor(..), _) => {
+ let len = self.segments.len();
+ if len < 2 {
+ return Err("Bad path");
+ }
+ // FIXME: really we should descend into the generics here and add SigElements for
+ // them.
+ // FIXME: would be nice to have a def for the first path segment.
+ let seg1 = path_segment_to_string(&self.segments[len - 2]);
+ let seg2 = path_segment_to_string(&self.segments[len - 1]);
+ let start = offset + seg1.len() + 2;
+ (format!("{}::{}", seg1, seg2), start, start + seg2.len())
+ }
+ _ => {
+ let name = path_segment_to_string(self.segments.last().ok_or("Bad path")?);
+ let end = offset + name.len();
+ (name, offset, end)
+ }
+ };
+
+ let id = id_from_def_id(res.def_id());
+ Ok(Signature { text: name, defs: vec![], refs: vec![SigElement { id, start, end }] })
+ }
+}
+
+// This does not cover the where clause, which must be processed separately.
+impl<'hir> Sig for hir::Generics<'hir> {
+ fn make(&self, offset: usize, _parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
+ if self.params.is_empty() {
+ return Ok(text_sig(String::new()));
+ }
+
+ let mut text = "<".to_owned();
+
+ let mut defs = Vec::with_capacity(self.params.len());
+ for param in self.params {
+ let mut param_text = String::new();
+ if let hir::GenericParamKind::Const { .. } = param.kind {
+ param_text.push_str("const ");
+ }
+ param_text.push_str(param.name.ident().as_str());
+ defs.push(SigElement {
+ id: id_from_hir_id(param.hir_id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + param_text.as_str().len(),
+ });
+ if let hir::GenericParamKind::Const { ref ty, default } = param.kind {
+ param_text.push_str(": ");
+ param_text.push_str(&ty_to_string(&ty));
+ if let Some(default) = default {
+ param_text.push_str(" = ");
+ param_text.push_str(&id_to_string(&scx.tcx.hir(), default.hir_id));
+ }
+ }
+ text.push_str(&param_text);
+ text.push(',');
+ }
+
+ text.push('>');
+ Ok(Signature { text, defs, refs: vec![] })
+ }
+}
+
+impl<'hir> Sig for hir::FieldDef<'hir> {
+ fn make(&self, offset: usize, _parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
+ let mut text = String::new();
+
+ text.push_str(&self.ident.to_string());
+ let defs = Some(SigElement {
+ id: id_from_hir_id(self.hir_id, scx),
+ start: offset,
+ end: offset + text.len(),
+ });
+ text.push_str(": ");
+
+ let mut ty_sig = self.ty.make(offset + text.len(), Some(self.hir_id), scx)?;
+ text.push_str(&ty_sig.text);
+ ty_sig.text = text;
+ ty_sig.defs.extend(defs.into_iter());
+ Ok(ty_sig)
+ }
+}
+
+impl<'hir> Sig for hir::Variant<'hir> {
+ fn make(&self, offset: usize, parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
+ let mut text = self.ident.to_string();
+ match self.data {
+ hir::VariantData::Struct(fields, r) => {
+ let id = parent_id.ok_or("Missing id for Variant's parent")?;
+ let name_def = SigElement {
+ id: id_from_hir_id(id, scx),
+ start: offset,
+ end: offset + text.len(),
+ };
+ text.push_str(" { ");
+ let mut defs = vec![name_def];
+ let mut refs = vec![];
+ if r {
+ text.push_str("/* parse error */ ");
+ } else {
+ for f in fields {
+ let field_sig = f.make(offset + text.len(), Some(id), scx)?;
+ text.push_str(&field_sig.text);
+ text.push_str(", ");
+ defs.extend(field_sig.defs.into_iter());
+ refs.extend(field_sig.refs.into_iter());
+ }
+ }
+ text.push('}');
+ Ok(Signature { text, defs, refs })
+ }
+ hir::VariantData::Tuple(fields, id) => {
+ let name_def = SigElement {
+ id: id_from_hir_id(id, scx),
+ start: offset,
+ end: offset + text.len(),
+ };
+ text.push('(');
+ let mut defs = vec![name_def];
+ let mut refs = vec![];
+ for f in fields {
+ let field_sig = f.make(offset + text.len(), Some(id), scx)?;
+ text.push_str(&field_sig.text);
+ text.push_str(", ");
+ defs.extend(field_sig.defs.into_iter());
+ refs.extend(field_sig.refs.into_iter());
+ }
+ text.push(')');
+ Ok(Signature { text, defs, refs })
+ }
+ hir::VariantData::Unit(id) => {
+ let name_def = SigElement {
+ id: id_from_hir_id(id, scx),
+ start: offset,
+ end: offset + text.len(),
+ };
+ Ok(Signature { text, defs: vec![name_def], refs: vec![] })
+ }
+ }
+ }
+}
+
+impl<'hir> Sig for hir::ForeignItem<'hir> {
+ fn make(&self, offset: usize, _parent_id: Option<hir::HirId>, scx: &SaveContext<'_>) -> Result {
+ let id = Some(self.hir_id());
+ match self.kind {
+ hir::ForeignItemKind::Fn(decl, _, ref generics) => {
+ let mut text = String::new();
+ text.push_str("fn ");
+
+ let mut sig =
+ name_and_generics(text, offset, generics, self.hir_id(), self.ident, scx)?;
+
+ sig.text.push('(');
+ for i in decl.inputs {
+ sig.text.push_str(": ");
+ let nested = i.make(offset + sig.text.len(), Some(i.hir_id), scx)?;
+ sig.text.push_str(&nested.text);
+ sig.text.push(',');
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push(')');
+
+ if let hir::FnRetTy::Return(ref t) = decl.output {
+ sig.text.push_str(" -> ");
+ let nested = t.make(offset + sig.text.len(), None, scx)?;
+ sig.text.push_str(&nested.text);
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push(';');
+
+ Ok(sig)
+ }
+ hir::ForeignItemKind::Static(ref ty, m) => {
+ let mut text = "static ".to_owned();
+ if m == Mutability::Mut {
+ text.push_str("mut ");
+ }
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_def_id(self.def_id.to_def_id()),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ text.push_str(": ");
+
+ let ty_sig = ty.make(offset + text.len(), id, scx)?;
+ text.push(';');
+
+ Ok(extend_sig(ty_sig, text, defs, vec![]))
+ }
+ hir::ForeignItemKind::Type => {
+ let mut text = "type ".to_owned();
+ let name = self.ident.to_string();
+ let defs = vec![SigElement {
+ id: id_from_def_id(self.def_id.to_def_id()),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ }];
+ text.push_str(&name);
+ text.push(';');
+
+ Ok(Signature { text, defs, refs: vec![] })
+ }
+ }
+ }
+}
+
+fn name_and_generics(
+ mut text: String,
+ offset: usize,
+ generics: &hir::Generics<'_>,
+ id: hir::HirId,
+ name: Ident,
+ scx: &SaveContext<'_>,
+) -> Result {
+ let name = name.to_string();
+ let def = SigElement {
+ id: id_from_hir_id(id, scx),
+ start: offset + text.len(),
+ end: offset + text.len() + name.len(),
+ };
+ text.push_str(&name);
+ let generics: Signature = generics.make(offset + text.len(), Some(id), scx)?;
+ // FIXME where clause
+ let text = format!("{}{}", text, generics.text);
+ Ok(extend_sig(generics, text, vec![def], vec![]))
+}
+
+fn make_assoc_type_signature(
+ id: hir::HirId,
+ ident: Ident,
+ bounds: Option<hir::GenericBounds<'_>>,
+ default: Option<&hir::Ty<'_>>,
+ scx: &SaveContext<'_>,
+) -> Result {
+ let mut text = "type ".to_owned();
+ let name = ident.to_string();
+ let mut defs = vec![SigElement {
+ id: id_from_hir_id(id, scx),
+ start: text.len(),
+ end: text.len() + name.len(),
+ }];
+ let mut refs = vec![];
+ text.push_str(&name);
+ if let Some(bounds) = bounds {
+ text.push_str(": ");
+ // FIXME should descend into bounds
+ text.push_str(&bounds_to_string(bounds));
+ }
+ if let Some(default) = default {
+ text.push_str(" = ");
+ let ty_sig = default.make(text.len(), Some(id), scx)?;
+ text.push_str(&ty_sig.text);
+ defs.extend(ty_sig.defs.into_iter());
+ refs.extend(ty_sig.refs.into_iter());
+ }
+ text.push(';');
+ Ok(Signature { text, defs, refs })
+}
+
+fn make_assoc_const_signature(
+ id: hir::HirId,
+ ident: Symbol,
+ ty: &hir::Ty<'_>,
+ default: Option<&hir::Expr<'_>>,
+ scx: &SaveContext<'_>,
+) -> Result {
+ let mut text = "const ".to_owned();
+ let name = ident.to_string();
+ let mut defs = vec![SigElement {
+ id: id_from_hir_id(id, scx),
+ start: text.len(),
+ end: text.len() + name.len(),
+ }];
+ let mut refs = vec![];
+ text.push_str(&name);
+ text.push_str(": ");
+
+ let ty_sig = ty.make(text.len(), Some(id), scx)?;
+ text.push_str(&ty_sig.text);
+ defs.extend(ty_sig.defs.into_iter());
+ refs.extend(ty_sig.refs.into_iter());
+
+ if let Some(default) = default {
+ text.push_str(" = ");
+ text.push_str(&id_to_string(&scx.tcx.hir(), default.hir_id));
+ }
+ text.push(';');
+ Ok(Signature { text, defs, refs })
+}
+
+fn make_method_signature(
+ id: hir::HirId,
+ ident: Ident,
+ generics: &hir::Generics<'_>,
+ m: &hir::FnSig<'_>,
+ scx: &SaveContext<'_>,
+) -> Result {
+ // FIXME code dup with function signature
+ let mut text = String::new();
+ if let hir::Constness::Const = m.header.constness {
+ text.push_str("const ");
+ }
+ if hir::IsAsync::Async == m.header.asyncness {
+ text.push_str("async ");
+ }
+ if let hir::Unsafety::Unsafe = m.header.unsafety {
+ text.push_str("unsafe ");
+ }
+ text.push_str("fn ");
+
+ let mut sig = name_and_generics(text, 0, generics, id, ident, scx)?;
+
+ sig.text.push('(');
+ for i in m.decl.inputs {
+ sig.text.push_str(": ");
+ let nested = i.make(sig.text.len(), Some(i.hir_id), scx)?;
+ sig.text.push_str(&nested.text);
+ sig.text.push(',');
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push(')');
+
+ if let hir::FnRetTy::Return(ref t) = m.decl.output {
+ sig.text.push_str(" -> ");
+ let nested = t.make(sig.text.len(), None, scx)?;
+ sig.text.push_str(&nested.text);
+ sig.defs.extend(nested.defs.into_iter());
+ sig.refs.extend(nested.refs.into_iter());
+ }
+ sig.text.push_str(" {}");
+
+ Ok(sig)
+}
diff --git a/compiler/rustc_save_analysis/src/span_utils.rs b/compiler/rustc_save_analysis/src/span_utils.rs
new file mode 100644
index 000000000..8d6758f40
--- /dev/null
+++ b/compiler/rustc_save_analysis/src/span_utils.rs
@@ -0,0 +1,102 @@
+use crate::generated_code;
+use rustc_data_structures::sync::Lrc;
+use rustc_lexer::{tokenize, TokenKind};
+use rustc_session::Session;
+use rustc_span::*;
+
+#[derive(Clone)]
+pub struct SpanUtils<'a> {
+ pub sess: &'a Session,
+}
+
+impl<'a> SpanUtils<'a> {
+ pub fn new(sess: &'a Session) -> SpanUtils<'a> {
+ SpanUtils { sess }
+ }
+
+ pub fn make_filename_string(&self, file: &SourceFile) -> String {
+ match &file.name {
+ FileName::Real(RealFileName::LocalPath(path)) => {
+ if path.is_absolute() {
+ self.sess
+ .source_map()
+ .path_mapping()
+ .map_prefix(path.into())
+ .0
+ .display()
+ .to_string()
+ } else {
+ self.sess
+ .opts
+ .working_dir
+ .remapped_path_if_available()
+ .join(&path)
+ .display()
+ .to_string()
+ }
+ }
+ filename => filename.prefer_remapped().to_string(),
+ }
+ }
+
+ pub fn snippet(&self, span: Span) -> String {
+ match self.sess.source_map().span_to_snippet(span) {
+ Ok(s) => s,
+ Err(_) => String::new(),
+ }
+ }
+
+ /// Finds the span of `*` token withing the larger `span`.
+ pub fn sub_span_of_star(&self, mut span: Span) -> Option<Span> {
+ let begin = self.sess.source_map().lookup_byte_offset(span.lo());
+ let end = self.sess.source_map().lookup_byte_offset(span.hi());
+ // Make the range zero-length if the span is invalid.
+ if begin.sf.start_pos != end.sf.start_pos {
+ span = span.shrink_to_lo();
+ }
+
+ let sf = Lrc::clone(&begin.sf);
+
+ self.sess.source_map().ensure_source_file_source_present(Lrc::clone(&sf));
+ let src =
+ sf.src.clone().or_else(|| sf.external_src.borrow().get_source().map(Lrc::clone))?;
+ let to_index = |pos: BytePos| -> usize { (pos - sf.start_pos).0 as usize };
+ let text = &src[to_index(span.lo())..to_index(span.hi())];
+ let start_pos = {
+ let mut pos = 0;
+ tokenize(text)
+ .map(|token| {
+ let start = pos;
+ pos += token.len;
+ (start, token)
+ })
+ .find(|(_pos, token)| token.kind == TokenKind::Star)?
+ .0
+ };
+ let lo = span.lo() + BytePos(start_pos as u32);
+ let hi = lo + BytePos(1);
+ Some(span.with_lo(lo).with_hi(hi))
+ }
+
+ /// Return true if the span is generated code, and
+ /// it is not a subspan of the root callsite.
+ ///
+ /// Used to filter out spans of minimal value,
+ /// such as references to macro internal variables.
+ pub fn filter_generated(&self, span: Span) -> bool {
+ if generated_code(span) {
+ return true;
+ }
+
+ //If the span comes from a fake source_file, filter it.
+ !self.sess.source_map().lookup_char_pos(span.lo()).file.is_real_file()
+ }
+}
+
+macro_rules! filter {
+ ($util: expr, $parent: expr) => {
+ if $util.filter_generated($parent) {
+ return None;
+ }
+ };
+}
diff --git a/compiler/rustc_serialize/Cargo.toml b/compiler/rustc_serialize/Cargo.toml
new file mode 100644
index 000000000..dbc5c1519
--- /dev/null
+++ b/compiler/rustc_serialize/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "rustc_serialize"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+indexmap = "1.9.1"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+
+[dev-dependencies]
+rustc_macros = { path = "../rustc_macros" }
diff --git a/compiler/rustc_serialize/src/collection_impls.rs b/compiler/rustc_serialize/src/collection_impls.rs
new file mode 100644
index 000000000..5e53f0b10
--- /dev/null
+++ b/compiler/rustc_serialize/src/collection_impls.rs
@@ -0,0 +1,267 @@
+//! Implementations of serialization for structures found in liballoc
+
+use std::hash::{BuildHasher, Hash};
+
+use crate::{Decodable, Decoder, Encodable, Encoder};
+use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, LinkedList, VecDeque};
+use std::rc::Rc;
+use std::sync::Arc;
+
+use smallvec::{Array, SmallVec};
+
+impl<S: Encoder, A: Array<Item: Encodable<S>>> Encodable<S> for SmallVec<A> {
+ fn encode(&self, s: &mut S) {
+ let slice: &[A::Item] = self;
+ slice.encode(s);
+ }
+}
+
+impl<D: Decoder, A: Array<Item: Decodable<D>>> Decodable<D> for SmallVec<A> {
+ fn decode(d: &mut D) -> SmallVec<A> {
+ let len = d.read_usize();
+ (0..len).map(|_| Decodable::decode(d)).collect()
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for LinkedList<T> {
+ fn encode(&self, s: &mut S) {
+ s.emit_usize(self.len());
+ for e in self.iter() {
+ e.encode(s);
+ }
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for LinkedList<T> {
+ fn decode(d: &mut D) -> LinkedList<T> {
+ let len = d.read_usize();
+ (0..len).map(|_| Decodable::decode(d)).collect()
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for VecDeque<T> {
+ fn encode(&self, s: &mut S) {
+ s.emit_usize(self.len());
+ for e in self.iter() {
+ e.encode(s);
+ }
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for VecDeque<T> {
+ fn decode(d: &mut D) -> VecDeque<T> {
+ let len = d.read_usize();
+ (0..len).map(|_| Decodable::decode(d)).collect()
+ }
+}
+
+impl<S: Encoder, K, V> Encodable<S> for BTreeMap<K, V>
+where
+ K: Encodable<S> + PartialEq + Ord,
+ V: Encodable<S>,
+{
+ fn encode(&self, e: &mut S) {
+ e.emit_usize(self.len());
+ for (key, val) in self.iter() {
+ key.encode(e);
+ val.encode(e);
+ }
+ }
+}
+
+impl<D: Decoder, K, V> Decodable<D> for BTreeMap<K, V>
+where
+ K: Decodable<D> + PartialEq + Ord,
+ V: Decodable<D>,
+{
+ fn decode(d: &mut D) -> BTreeMap<K, V> {
+ let len = d.read_usize();
+ let mut map = BTreeMap::new();
+ for _ in 0..len {
+ let key = Decodable::decode(d);
+ let val = Decodable::decode(d);
+ map.insert(key, val);
+ }
+ map
+ }
+}
+
+impl<S: Encoder, T> Encodable<S> for BTreeSet<T>
+where
+ T: Encodable<S> + PartialEq + Ord,
+{
+ fn encode(&self, s: &mut S) {
+ s.emit_usize(self.len());
+ for e in self.iter() {
+ e.encode(s);
+ }
+ }
+}
+
+impl<D: Decoder, T> Decodable<D> for BTreeSet<T>
+where
+ T: Decodable<D> + PartialEq + Ord,
+{
+ fn decode(d: &mut D) -> BTreeSet<T> {
+ let len = d.read_usize();
+ let mut set = BTreeSet::new();
+ for _ in 0..len {
+ set.insert(Decodable::decode(d));
+ }
+ set
+ }
+}
+
+impl<E: Encoder, K, V, S> Encodable<E> for HashMap<K, V, S>
+where
+ K: Encodable<E> + Eq,
+ V: Encodable<E>,
+ S: BuildHasher,
+{
+ fn encode(&self, e: &mut E) {
+ e.emit_usize(self.len());
+ for (key, val) in self.iter() {
+ key.encode(e);
+ val.encode(e);
+ }
+ }
+}
+
+impl<D: Decoder, K, V, S> Decodable<D> for HashMap<K, V, S>
+where
+ K: Decodable<D> + Hash + Eq,
+ V: Decodable<D>,
+ S: BuildHasher + Default,
+{
+ fn decode(d: &mut D) -> HashMap<K, V, S> {
+ let len = d.read_usize();
+ let state = Default::default();
+ let mut map = HashMap::with_capacity_and_hasher(len, state);
+ for _ in 0..len {
+ let key = Decodable::decode(d);
+ let val = Decodable::decode(d);
+ map.insert(key, val);
+ }
+ map
+ }
+}
+
+impl<E: Encoder, T, S> Encodable<E> for HashSet<T, S>
+where
+ T: Encodable<E> + Eq,
+ S: BuildHasher,
+{
+ fn encode(&self, s: &mut E) {
+ s.emit_usize(self.len());
+ for e in self.iter() {
+ e.encode(s);
+ }
+ }
+}
+
+impl<D: Decoder, T, S> Decodable<D> for HashSet<T, S>
+where
+ T: Decodable<D> + Hash + Eq,
+ S: BuildHasher + Default,
+{
+ fn decode(d: &mut D) -> HashSet<T, S> {
+ let len = d.read_usize();
+ let state = Default::default();
+ let mut set = HashSet::with_capacity_and_hasher(len, state);
+ for _ in 0..len {
+ set.insert(Decodable::decode(d));
+ }
+ set
+ }
+}
+
+impl<E: Encoder, K, V, S> Encodable<E> for indexmap::IndexMap<K, V, S>
+where
+ K: Encodable<E> + Hash + Eq,
+ V: Encodable<E>,
+ S: BuildHasher,
+{
+ fn encode(&self, e: &mut E) {
+ e.emit_usize(self.len());
+ for (key, val) in self.iter() {
+ key.encode(e);
+ val.encode(e);
+ }
+ }
+}
+
+impl<D: Decoder, K, V, S> Decodable<D> for indexmap::IndexMap<K, V, S>
+where
+ K: Decodable<D> + Hash + Eq,
+ V: Decodable<D>,
+ S: BuildHasher + Default,
+{
+ fn decode(d: &mut D) -> indexmap::IndexMap<K, V, S> {
+ let len = d.read_usize();
+ let state = Default::default();
+ let mut map = indexmap::IndexMap::with_capacity_and_hasher(len, state);
+ for _ in 0..len {
+ let key = Decodable::decode(d);
+ let val = Decodable::decode(d);
+ map.insert(key, val);
+ }
+ map
+ }
+}
+
+impl<E: Encoder, T, S> Encodable<E> for indexmap::IndexSet<T, S>
+where
+ T: Encodable<E> + Hash + Eq,
+ S: BuildHasher,
+{
+ fn encode(&self, s: &mut E) {
+ s.emit_usize(self.len());
+ for e in self.iter() {
+ e.encode(s);
+ }
+ }
+}
+
+impl<D: Decoder, T, S> Decodable<D> for indexmap::IndexSet<T, S>
+where
+ T: Decodable<D> + Hash + Eq,
+ S: BuildHasher + Default,
+{
+ fn decode(d: &mut D) -> indexmap::IndexSet<T, S> {
+ let len = d.read_usize();
+ let state = Default::default();
+ let mut set = indexmap::IndexSet::with_capacity_and_hasher(len, state);
+ for _ in 0..len {
+ set.insert(Decodable::decode(d));
+ }
+ set
+ }
+}
+
+impl<E: Encoder, T: Encodable<E>> Encodable<E> for Rc<[T]> {
+ fn encode(&self, s: &mut E) {
+ let slice: &[T] = self;
+ slice.encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for Rc<[T]> {
+ fn decode(d: &mut D) -> Rc<[T]> {
+ let vec: Vec<T> = Decodable::decode(d);
+ vec.into()
+ }
+}
+
+impl<E: Encoder, T: Encodable<E>> Encodable<E> for Arc<[T]> {
+ fn encode(&self, s: &mut E) {
+ let slice: &[T] = self;
+ slice.encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for Arc<[T]> {
+ fn decode(d: &mut D) -> Arc<[T]> {
+ let vec: Vec<T> = Decodable::decode(d);
+ vec.into()
+ }
+}
diff --git a/compiler/rustc_serialize/src/leb128.rs b/compiler/rustc_serialize/src/leb128.rs
new file mode 100644
index 000000000..08b3c0542
--- /dev/null
+++ b/compiler/rustc_serialize/src/leb128.rs
@@ -0,0 +1,163 @@
+#![macro_use]
+
+macro_rules! max_leb128_len {
+ ($int_ty:ty) => {
+ // The longest LEB128 encoding for an integer uses 7 bits per byte.
+ (std::mem::size_of::<$int_ty>() * 8 + 6) / 7
+ };
+}
+
+// Returns the longest LEB128 encoding of all supported integer types.
+pub const fn max_leb128_len() -> usize {
+ max_leb128_len!(u128)
+}
+
+macro_rules! impl_write_unsigned_leb128 {
+ ($fn_name:ident, $int_ty:ty) => {
+ #[inline]
+ pub fn $fn_name(
+ out: &mut [::std::mem::MaybeUninit<u8>; max_leb128_len!($int_ty)],
+ mut value: $int_ty,
+ ) -> &[u8] {
+ let mut i = 0;
+
+ loop {
+ if value < 0x80 {
+ unsafe {
+ *out.get_unchecked_mut(i).as_mut_ptr() = value as u8;
+ }
+
+ i += 1;
+ break;
+ } else {
+ unsafe {
+ *out.get_unchecked_mut(i).as_mut_ptr() = ((value & 0x7f) | 0x80) as u8;
+ }
+
+ value >>= 7;
+ i += 1;
+ }
+ }
+
+ unsafe { ::std::mem::MaybeUninit::slice_assume_init_ref(&out.get_unchecked(..i)) }
+ }
+ };
+}
+
+impl_write_unsigned_leb128!(write_u16_leb128, u16);
+impl_write_unsigned_leb128!(write_u32_leb128, u32);
+impl_write_unsigned_leb128!(write_u64_leb128, u64);
+impl_write_unsigned_leb128!(write_u128_leb128, u128);
+impl_write_unsigned_leb128!(write_usize_leb128, usize);
+
+macro_rules! impl_read_unsigned_leb128 {
+ ($fn_name:ident, $int_ty:ty) => {
+ #[inline]
+ pub fn $fn_name(slice: &[u8], position: &mut usize) -> $int_ty {
+ // The first iteration of this loop is unpeeled. This is a
+ // performance win because this code is hot and integer values less
+ // than 128 are very common, typically occurring 50-80% or more of
+ // the time, even for u64 and u128.
+ let byte = slice[*position];
+ *position += 1;
+ if (byte & 0x80) == 0 {
+ return byte as $int_ty;
+ }
+ let mut result = (byte & 0x7F) as $int_ty;
+ let mut shift = 7;
+ loop {
+ let byte = slice[*position];
+ *position += 1;
+ if (byte & 0x80) == 0 {
+ result |= (byte as $int_ty) << shift;
+ return result;
+ } else {
+ result |= ((byte & 0x7F) as $int_ty) << shift;
+ }
+ shift += 7;
+ }
+ }
+ };
+}
+
+impl_read_unsigned_leb128!(read_u16_leb128, u16);
+impl_read_unsigned_leb128!(read_u32_leb128, u32);
+impl_read_unsigned_leb128!(read_u64_leb128, u64);
+impl_read_unsigned_leb128!(read_u128_leb128, u128);
+impl_read_unsigned_leb128!(read_usize_leb128, usize);
+
+macro_rules! impl_write_signed_leb128 {
+ ($fn_name:ident, $int_ty:ty) => {
+ #[inline]
+ pub fn $fn_name(
+ out: &mut [::std::mem::MaybeUninit<u8>; max_leb128_len!($int_ty)],
+ mut value: $int_ty,
+ ) -> &[u8] {
+ let mut i = 0;
+
+ loop {
+ let mut byte = (value as u8) & 0x7f;
+ value >>= 7;
+ let more = !(((value == 0) && ((byte & 0x40) == 0))
+ || ((value == -1) && ((byte & 0x40) != 0)));
+
+ if more {
+ byte |= 0x80; // Mark this byte to show that more bytes will follow.
+ }
+
+ unsafe {
+ *out.get_unchecked_mut(i).as_mut_ptr() = byte;
+ }
+
+ i += 1;
+
+ if !more {
+ break;
+ }
+ }
+
+ unsafe { ::std::mem::MaybeUninit::slice_assume_init_ref(&out.get_unchecked(..i)) }
+ }
+ };
+}
+
+impl_write_signed_leb128!(write_i16_leb128, i16);
+impl_write_signed_leb128!(write_i32_leb128, i32);
+impl_write_signed_leb128!(write_i64_leb128, i64);
+impl_write_signed_leb128!(write_i128_leb128, i128);
+impl_write_signed_leb128!(write_isize_leb128, isize);
+
+macro_rules! impl_read_signed_leb128 {
+ ($fn_name:ident, $int_ty:ty) => {
+ #[inline]
+ pub fn $fn_name(slice: &[u8], position: &mut usize) -> $int_ty {
+ let mut result = 0;
+ let mut shift = 0;
+ let mut byte;
+
+ loop {
+ byte = slice[*position];
+ *position += 1;
+ result |= <$int_ty>::from(byte & 0x7F) << shift;
+ shift += 7;
+
+ if (byte & 0x80) == 0 {
+ break;
+ }
+ }
+
+ if (shift < <$int_ty>::BITS) && ((byte & 0x40) != 0) {
+ // sign extend
+ result |= (!0 << shift);
+ }
+
+ result
+ }
+ };
+}
+
+impl_read_signed_leb128!(read_i16_leb128, i16);
+impl_read_signed_leb128!(read_i32_leb128, i32);
+impl_read_signed_leb128!(read_i64_leb128, i64);
+impl_read_signed_leb128!(read_i128_leb128, i128);
+impl_read_signed_leb128!(read_isize_leb128, isize);
diff --git a/compiler/rustc_serialize/src/lib.rs b/compiler/rustc_serialize/src/lib.rs
new file mode 100644
index 000000000..e606f4273
--- /dev/null
+++ b/compiler/rustc_serialize/src/lib.rs
@@ -0,0 +1,28 @@
+//! Support code for encoding and decoding types.
+
+/*
+Core encoding and decoding interfaces.
+*/
+
+#![doc(
+ html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
+ html_playground_url = "https://play.rust-lang.org/",
+ test(attr(allow(unused_variables), deny(warnings)))
+)]
+#![feature(never_type)]
+#![feature(associated_type_bounds)]
+#![feature(min_specialization)]
+#![feature(core_intrinsics)]
+#![feature(maybe_uninit_slice)]
+#![feature(let_else)]
+#![feature(new_uninit)]
+#![cfg_attr(test, feature(test))]
+#![allow(rustc::internal)]
+
+pub use self::serialize::{Decodable, Decoder, Encodable, Encoder};
+
+mod collection_impls;
+mod serialize;
+
+pub mod leb128;
+pub mod opaque;
diff --git a/compiler/rustc_serialize/src/opaque.rs b/compiler/rustc_serialize/src/opaque.rs
new file mode 100644
index 000000000..5c17ef6ac
--- /dev/null
+++ b/compiler/rustc_serialize/src/opaque.rs
@@ -0,0 +1,750 @@
+use crate::leb128::{self, max_leb128_len};
+use crate::serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::convert::TryInto;
+use std::fs::File;
+use std::io::{self, Write};
+use std::mem::MaybeUninit;
+use std::path::Path;
+use std::ptr;
+
+// -----------------------------------------------------------------------------
+// Encoder
+// -----------------------------------------------------------------------------
+
+pub struct MemEncoder {
+ pub data: Vec<u8>,
+}
+
+impl MemEncoder {
+ pub fn new() -> MemEncoder {
+ MemEncoder { data: vec![] }
+ }
+
+ #[inline]
+ pub fn position(&self) -> usize {
+ self.data.len()
+ }
+
+ pub fn finish(self) -> Vec<u8> {
+ self.data
+ }
+}
+
+macro_rules! write_leb128 {
+ ($enc:expr, $value:expr, $int_ty:ty, $fun:ident) => {{
+ const MAX_ENCODED_LEN: usize = max_leb128_len!($int_ty);
+ let old_len = $enc.data.len();
+
+ if MAX_ENCODED_LEN > $enc.data.capacity() - old_len {
+ $enc.data.reserve(MAX_ENCODED_LEN);
+ }
+
+ // SAFETY: The above check and `reserve` ensures that there is enough
+ // room to write the encoded value to the vector's internal buffer.
+ unsafe {
+ let buf = &mut *($enc.data.as_mut_ptr().add(old_len)
+ as *mut [MaybeUninit<u8>; MAX_ENCODED_LEN]);
+ let encoded = leb128::$fun(buf, $value);
+ $enc.data.set_len(old_len + encoded.len());
+ }
+ }};
+}
+
+/// A byte that [cannot occur in UTF8 sequences][utf8]. Used to mark the end of a string.
+/// This way we can skip validation and still be relatively sure that deserialization
+/// did not desynchronize.
+///
+/// [utf8]: https://en.wikipedia.org/w/index.php?title=UTF-8&oldid=1058865525#Codepage_layout
+const STR_SENTINEL: u8 = 0xC1;
+
+impl Encoder for MemEncoder {
+ #[inline]
+ fn emit_usize(&mut self, v: usize) {
+ write_leb128!(self, v, usize, write_usize_leb128)
+ }
+
+ #[inline]
+ fn emit_u128(&mut self, v: u128) {
+ write_leb128!(self, v, u128, write_u128_leb128);
+ }
+
+ #[inline]
+ fn emit_u64(&mut self, v: u64) {
+ write_leb128!(self, v, u64, write_u64_leb128);
+ }
+
+ #[inline]
+ fn emit_u32(&mut self, v: u32) {
+ write_leb128!(self, v, u32, write_u32_leb128);
+ }
+
+ #[inline]
+ fn emit_u16(&mut self, v: u16) {
+ self.data.extend_from_slice(&v.to_le_bytes());
+ }
+
+ #[inline]
+ fn emit_u8(&mut self, v: u8) {
+ self.data.push(v);
+ }
+
+ #[inline]
+ fn emit_isize(&mut self, v: isize) {
+ write_leb128!(self, v, isize, write_isize_leb128)
+ }
+
+ #[inline]
+ fn emit_i128(&mut self, v: i128) {
+ write_leb128!(self, v, i128, write_i128_leb128)
+ }
+
+ #[inline]
+ fn emit_i64(&mut self, v: i64) {
+ write_leb128!(self, v, i64, write_i64_leb128)
+ }
+
+ #[inline]
+ fn emit_i32(&mut self, v: i32) {
+ write_leb128!(self, v, i32, write_i32_leb128)
+ }
+
+ #[inline]
+ fn emit_i16(&mut self, v: i16) {
+ self.data.extend_from_slice(&v.to_le_bytes());
+ }
+
+ #[inline]
+ fn emit_i8(&mut self, v: i8) {
+ self.emit_u8(v as u8);
+ }
+
+ #[inline]
+ fn emit_bool(&mut self, v: bool) {
+ self.emit_u8(if v { 1 } else { 0 });
+ }
+
+ #[inline]
+ fn emit_f64(&mut self, v: f64) {
+ let as_u64: u64 = v.to_bits();
+ self.emit_u64(as_u64);
+ }
+
+ #[inline]
+ fn emit_f32(&mut self, v: f32) {
+ let as_u32: u32 = v.to_bits();
+ self.emit_u32(as_u32);
+ }
+
+ #[inline]
+ fn emit_char(&mut self, v: char) {
+ self.emit_u32(v as u32);
+ }
+
+ #[inline]
+ fn emit_str(&mut self, v: &str) {
+ self.emit_usize(v.len());
+ self.emit_raw_bytes(v.as_bytes());
+ self.emit_u8(STR_SENTINEL);
+ }
+
+ #[inline]
+ fn emit_raw_bytes(&mut self, s: &[u8]) {
+ self.data.extend_from_slice(s);
+ }
+}
+
+pub type FileEncodeResult = Result<usize, io::Error>;
+
+// `FileEncoder` encodes data to file via fixed-size buffer.
+//
+// When encoding large amounts of data to a file, using `FileEncoder` may be
+// preferred over using `MemEncoder` to encode to a `Vec`, and then writing the
+// `Vec` to file, as the latter uses as much memory as there is encoded data,
+// while the former uses the fixed amount of memory allocated to the buffer.
+// `FileEncoder` also has the advantage of not needing to reallocate as data
+// is appended to it, but the disadvantage of requiring more error handling,
+// which has some runtime overhead.
+pub struct FileEncoder {
+ // The input buffer. For adequate performance, we need more control over
+ // buffering than `BufWriter` offers. If `BufWriter` ever offers a raw
+ // buffer access API, we can use it, and remove `buf` and `buffered`.
+ buf: Box<[MaybeUninit<u8>]>,
+ buffered: usize,
+ flushed: usize,
+ file: File,
+ // This is used to implement delayed error handling, as described in the
+ // comment on `trait Encoder`.
+ res: Result<(), io::Error>,
+}
+
+impl FileEncoder {
+ pub fn new<P: AsRef<Path>>(path: P) -> io::Result<Self> {
+ const DEFAULT_BUF_SIZE: usize = 8192;
+ FileEncoder::with_capacity(path, DEFAULT_BUF_SIZE)
+ }
+
+ pub fn with_capacity<P: AsRef<Path>>(path: P, capacity: usize) -> io::Result<Self> {
+ // Require capacity at least as large as the largest LEB128 encoding
+ // here, so that we don't have to check or handle this on every write.
+ assert!(capacity >= max_leb128_len());
+
+ // Require capacity small enough such that some capacity checks can be
+ // done using guaranteed non-overflowing add rather than sub, which
+ // shaves an instruction off those code paths (on x86 at least).
+ assert!(capacity <= usize::MAX - max_leb128_len());
+
+ let file = File::create(path)?;
+
+ Ok(FileEncoder {
+ buf: Box::new_uninit_slice(capacity),
+ buffered: 0,
+ flushed: 0,
+ file,
+ res: Ok(()),
+ })
+ }
+
+ #[inline]
+ pub fn position(&self) -> usize {
+ // Tracking position this way instead of having a `self.position` field
+ // means that we don't have to update the position on every write call.
+ self.flushed + self.buffered
+ }
+
+ pub fn flush(&mut self) {
+ // This is basically a copy of `BufWriter::flush`. If `BufWriter` ever
+ // offers a raw buffer access API, we can use it, and remove this.
+
+ /// Helper struct to ensure the buffer is updated after all the writes
+ /// are complete. It tracks the number of written bytes and drains them
+ /// all from the front of the buffer when dropped.
+ struct BufGuard<'a> {
+ buffer: &'a mut [u8],
+ encoder_buffered: &'a mut usize,
+ encoder_flushed: &'a mut usize,
+ flushed: usize,
+ }
+
+ impl<'a> BufGuard<'a> {
+ fn new(
+ buffer: &'a mut [u8],
+ encoder_buffered: &'a mut usize,
+ encoder_flushed: &'a mut usize,
+ ) -> Self {
+ assert_eq!(buffer.len(), *encoder_buffered);
+ Self { buffer, encoder_buffered, encoder_flushed, flushed: 0 }
+ }
+
+ /// The unwritten part of the buffer
+ fn remaining(&self) -> &[u8] {
+ &self.buffer[self.flushed..]
+ }
+
+ /// Flag some bytes as removed from the front of the buffer
+ fn consume(&mut self, amt: usize) {
+ self.flushed += amt;
+ }
+
+ /// true if all of the bytes have been written
+ fn done(&self) -> bool {
+ self.flushed >= *self.encoder_buffered
+ }
+ }
+
+ impl Drop for BufGuard<'_> {
+ fn drop(&mut self) {
+ if self.flushed > 0 {
+ if self.done() {
+ *self.encoder_flushed += *self.encoder_buffered;
+ *self.encoder_buffered = 0;
+ } else {
+ self.buffer.copy_within(self.flushed.., 0);
+ *self.encoder_flushed += self.flushed;
+ *self.encoder_buffered -= self.flushed;
+ }
+ }
+ }
+ }
+
+ // If we've already had an error, do nothing. It'll get reported after
+ // `finish` is called.
+ if self.res.is_err() {
+ return;
+ }
+
+ let mut guard = BufGuard::new(
+ unsafe { MaybeUninit::slice_assume_init_mut(&mut self.buf[..self.buffered]) },
+ &mut self.buffered,
+ &mut self.flushed,
+ );
+
+ while !guard.done() {
+ match self.file.write(guard.remaining()) {
+ Ok(0) => {
+ self.res = Err(io::Error::new(
+ io::ErrorKind::WriteZero,
+ "failed to write the buffered data",
+ ));
+ return;
+ }
+ Ok(n) => guard.consume(n),
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => {
+ self.res = Err(e);
+ return;
+ }
+ }
+ }
+ }
+
+ pub fn file(&self) -> &File {
+ &self.file
+ }
+
+ #[inline]
+ fn capacity(&self) -> usize {
+ self.buf.len()
+ }
+
+ #[inline]
+ fn write_one(&mut self, value: u8) {
+ // We ensure this during `FileEncoder` construction.
+ debug_assert!(self.capacity() >= 1);
+
+ let mut buffered = self.buffered;
+
+ if std::intrinsics::unlikely(buffered >= self.capacity()) {
+ self.flush();
+ buffered = 0;
+ }
+
+ // SAFETY: The above check and `flush` ensures that there is enough
+ // room to write the input to the buffer.
+ unsafe {
+ *MaybeUninit::slice_as_mut_ptr(&mut self.buf).add(buffered) = value;
+ }
+
+ self.buffered = buffered + 1;
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) {
+ let capacity = self.capacity();
+ let buf_len = buf.len();
+
+ if std::intrinsics::likely(buf_len <= capacity) {
+ let mut buffered = self.buffered;
+
+ if std::intrinsics::unlikely(buf_len > capacity - buffered) {
+ self.flush();
+ buffered = 0;
+ }
+
+ // SAFETY: The above check and `flush` ensures that there is enough
+ // room to write the input to the buffer.
+ unsafe {
+ let src = buf.as_ptr();
+ let dst = MaybeUninit::slice_as_mut_ptr(&mut self.buf).add(buffered);
+ ptr::copy_nonoverlapping(src, dst, buf_len);
+ }
+
+ self.buffered = buffered + buf_len;
+ } else {
+ self.write_all_unbuffered(buf);
+ }
+ }
+
+ fn write_all_unbuffered(&mut self, mut buf: &[u8]) {
+ // If we've already had an error, do nothing. It'll get reported after
+ // `finish` is called.
+ if self.res.is_err() {
+ return;
+ }
+
+ if self.buffered > 0 {
+ self.flush();
+ }
+
+ // This is basically a copy of `Write::write_all` but also updates our
+ // `self.flushed`. It's necessary because `Write::write_all` does not
+ // return the number of bytes written when an error is encountered, and
+ // without that, we cannot accurately update `self.flushed` on error.
+ while !buf.is_empty() {
+ match self.file.write(buf) {
+ Ok(0) => {
+ self.res = Err(io::Error::new(
+ io::ErrorKind::WriteZero,
+ "failed to write whole buffer",
+ ));
+ return;
+ }
+ Ok(n) => {
+ buf = &buf[n..];
+ self.flushed += n;
+ }
+ Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
+ Err(e) => {
+ self.res = Err(e);
+ return;
+ }
+ }
+ }
+ }
+
+ pub fn finish(mut self) -> Result<usize, io::Error> {
+ self.flush();
+
+ let res = std::mem::replace(&mut self.res, Ok(()));
+ res.map(|()| self.position())
+ }
+}
+
+impl Drop for FileEncoder {
+ fn drop(&mut self) {
+ // Likely to be a no-op, because `finish` should have been called and
+ // it also flushes. But do it just in case.
+ let _result = self.flush();
+ }
+}
+
+macro_rules! file_encoder_write_leb128 {
+ ($enc:expr, $value:expr, $int_ty:ty, $fun:ident) => {{
+ const MAX_ENCODED_LEN: usize = max_leb128_len!($int_ty);
+
+ // We ensure this during `FileEncoder` construction.
+ debug_assert!($enc.capacity() >= MAX_ENCODED_LEN);
+
+ let mut buffered = $enc.buffered;
+
+ // This can't overflow. See assertion in `FileEncoder::with_capacity`.
+ if std::intrinsics::unlikely(buffered + MAX_ENCODED_LEN > $enc.capacity()) {
+ $enc.flush();
+ buffered = 0;
+ }
+
+ // SAFETY: The above check and flush ensures that there is enough
+ // room to write the encoded value to the buffer.
+ let buf = unsafe {
+ &mut *($enc.buf.as_mut_ptr().add(buffered) as *mut [MaybeUninit<u8>; MAX_ENCODED_LEN])
+ };
+
+ let encoded = leb128::$fun(buf, $value);
+ $enc.buffered = buffered + encoded.len();
+ }};
+}
+
+impl Encoder for FileEncoder {
+ #[inline]
+ fn emit_usize(&mut self, v: usize) {
+ file_encoder_write_leb128!(self, v, usize, write_usize_leb128)
+ }
+
+ #[inline]
+ fn emit_u128(&mut self, v: u128) {
+ file_encoder_write_leb128!(self, v, u128, write_u128_leb128)
+ }
+
+ #[inline]
+ fn emit_u64(&mut self, v: u64) {
+ file_encoder_write_leb128!(self, v, u64, write_u64_leb128)
+ }
+
+ #[inline]
+ fn emit_u32(&mut self, v: u32) {
+ file_encoder_write_leb128!(self, v, u32, write_u32_leb128)
+ }
+
+ #[inline]
+ fn emit_u16(&mut self, v: u16) {
+ self.write_all(&v.to_le_bytes());
+ }
+
+ #[inline]
+ fn emit_u8(&mut self, v: u8) {
+ self.write_one(v);
+ }
+
+ #[inline]
+ fn emit_isize(&mut self, v: isize) {
+ file_encoder_write_leb128!(self, v, isize, write_isize_leb128)
+ }
+
+ #[inline]
+ fn emit_i128(&mut self, v: i128) {
+ file_encoder_write_leb128!(self, v, i128, write_i128_leb128)
+ }
+
+ #[inline]
+ fn emit_i64(&mut self, v: i64) {
+ file_encoder_write_leb128!(self, v, i64, write_i64_leb128)
+ }
+
+ #[inline]
+ fn emit_i32(&mut self, v: i32) {
+ file_encoder_write_leb128!(self, v, i32, write_i32_leb128)
+ }
+
+ #[inline]
+ fn emit_i16(&mut self, v: i16) {
+ self.write_all(&v.to_le_bytes());
+ }
+
+ #[inline]
+ fn emit_i8(&mut self, v: i8) {
+ self.emit_u8(v as u8);
+ }
+
+ #[inline]
+ fn emit_bool(&mut self, v: bool) {
+ self.emit_u8(if v { 1 } else { 0 });
+ }
+
+ #[inline]
+ fn emit_f64(&mut self, v: f64) {
+ let as_u64: u64 = v.to_bits();
+ self.emit_u64(as_u64);
+ }
+
+ #[inline]
+ fn emit_f32(&mut self, v: f32) {
+ let as_u32: u32 = v.to_bits();
+ self.emit_u32(as_u32);
+ }
+
+ #[inline]
+ fn emit_char(&mut self, v: char) {
+ self.emit_u32(v as u32);
+ }
+
+ #[inline]
+ fn emit_str(&mut self, v: &str) {
+ self.emit_usize(v.len());
+ self.emit_raw_bytes(v.as_bytes());
+ self.emit_u8(STR_SENTINEL);
+ }
+
+ #[inline]
+ fn emit_raw_bytes(&mut self, s: &[u8]) {
+ self.write_all(s);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Decoder
+// -----------------------------------------------------------------------------
+
+pub struct MemDecoder<'a> {
+ pub data: &'a [u8],
+ position: usize,
+}
+
+impl<'a> MemDecoder<'a> {
+ #[inline]
+ pub fn new(data: &'a [u8], position: usize) -> MemDecoder<'a> {
+ MemDecoder { data, position }
+ }
+
+ #[inline]
+ pub fn position(&self) -> usize {
+ self.position
+ }
+
+ #[inline]
+ pub fn set_position(&mut self, pos: usize) {
+ self.position = pos
+ }
+
+ #[inline]
+ pub fn advance(&mut self, bytes: usize) {
+ self.position += bytes;
+ }
+}
+
+macro_rules! read_leb128 {
+ ($dec:expr, $fun:ident) => {{ leb128::$fun($dec.data, &mut $dec.position) }};
+}
+
+impl<'a> Decoder for MemDecoder<'a> {
+ #[inline]
+ fn read_u128(&mut self) -> u128 {
+ read_leb128!(self, read_u128_leb128)
+ }
+
+ #[inline]
+ fn read_u64(&mut self) -> u64 {
+ read_leb128!(self, read_u64_leb128)
+ }
+
+ #[inline]
+ fn read_u32(&mut self) -> u32 {
+ read_leb128!(self, read_u32_leb128)
+ }
+
+ #[inline]
+ fn read_u16(&mut self) -> u16 {
+ let bytes = [self.data[self.position], self.data[self.position + 1]];
+ let value = u16::from_le_bytes(bytes);
+ self.position += 2;
+ value
+ }
+
+ #[inline]
+ fn read_u8(&mut self) -> u8 {
+ let value = self.data[self.position];
+ self.position += 1;
+ value
+ }
+
+ #[inline]
+ fn read_usize(&mut self) -> usize {
+ read_leb128!(self, read_usize_leb128)
+ }
+
+ #[inline]
+ fn read_i128(&mut self) -> i128 {
+ read_leb128!(self, read_i128_leb128)
+ }
+
+ #[inline]
+ fn read_i64(&mut self) -> i64 {
+ read_leb128!(self, read_i64_leb128)
+ }
+
+ #[inline]
+ fn read_i32(&mut self) -> i32 {
+ read_leb128!(self, read_i32_leb128)
+ }
+
+ #[inline]
+ fn read_i16(&mut self) -> i16 {
+ let bytes = [self.data[self.position], self.data[self.position + 1]];
+ let value = i16::from_le_bytes(bytes);
+ self.position += 2;
+ value
+ }
+
+ #[inline]
+ fn read_i8(&mut self) -> i8 {
+ let value = self.data[self.position];
+ self.position += 1;
+ value as i8
+ }
+
+ #[inline]
+ fn read_isize(&mut self) -> isize {
+ read_leb128!(self, read_isize_leb128)
+ }
+
+ #[inline]
+ fn read_bool(&mut self) -> bool {
+ let value = self.read_u8();
+ value != 0
+ }
+
+ #[inline]
+ fn read_f64(&mut self) -> f64 {
+ let bits = self.read_u64();
+ f64::from_bits(bits)
+ }
+
+ #[inline]
+ fn read_f32(&mut self) -> f32 {
+ let bits = self.read_u32();
+ f32::from_bits(bits)
+ }
+
+ #[inline]
+ fn read_char(&mut self) -> char {
+ let bits = self.read_u32();
+ std::char::from_u32(bits).unwrap()
+ }
+
+ #[inline]
+ fn read_str(&mut self) -> &'a str {
+ let len = self.read_usize();
+ let sentinel = self.data[self.position + len];
+ assert!(sentinel == STR_SENTINEL);
+ let s = unsafe {
+ std::str::from_utf8_unchecked(&self.data[self.position..self.position + len])
+ };
+ self.position += len + 1;
+ s
+ }
+
+ #[inline]
+ fn read_raw_bytes(&mut self, bytes: usize) -> &'a [u8] {
+ let start = self.position;
+ self.position += bytes;
+ &self.data[start..self.position]
+ }
+}
+
+// Specializations for contiguous byte sequences follow. The default implementations for slices
+// encode and decode each element individually. This isn't necessary for `u8` slices when using
+// opaque encoders and decoders, because each `u8` is unchanged by encoding and decoding.
+// Therefore, we can use more efficient implementations that process the entire sequence at once.
+
+// Specialize encoding byte slices. This specialization also applies to encoding `Vec<u8>`s, etc.,
+// since the default implementations call `encode` on their slices internally.
+impl Encodable<MemEncoder> for [u8] {
+ fn encode(&self, e: &mut MemEncoder) {
+ Encoder::emit_usize(e, self.len());
+ e.emit_raw_bytes(self);
+ }
+}
+
+impl Encodable<FileEncoder> for [u8] {
+ fn encode(&self, e: &mut FileEncoder) {
+ Encoder::emit_usize(e, self.len());
+ e.emit_raw_bytes(self);
+ }
+}
+
+// Specialize decoding `Vec<u8>`. This specialization also applies to decoding `Box<[u8]>`s, etc.,
+// since the default implementations call `decode` to produce a `Vec<u8>` internally.
+impl<'a> Decodable<MemDecoder<'a>> for Vec<u8> {
+ fn decode(d: &mut MemDecoder<'a>) -> Self {
+ let len = Decoder::read_usize(d);
+ d.read_raw_bytes(len).to_owned()
+ }
+}
+
+// An integer that will always encode to 8 bytes.
+pub struct IntEncodedWithFixedSize(pub u64);
+
+impl IntEncodedWithFixedSize {
+ pub const ENCODED_SIZE: usize = 8;
+}
+
+impl Encodable<MemEncoder> for IntEncodedWithFixedSize {
+ #[inline]
+ fn encode(&self, e: &mut MemEncoder) {
+ let _start_pos = e.position();
+ e.emit_raw_bytes(&self.0.to_le_bytes());
+ let _end_pos = e.position();
+ debug_assert_eq!((_end_pos - _start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
+ }
+}
+
+impl Encodable<FileEncoder> for IntEncodedWithFixedSize {
+ #[inline]
+ fn encode(&self, e: &mut FileEncoder) {
+ let _start_pos = e.position();
+ e.emit_raw_bytes(&self.0.to_le_bytes());
+ let _end_pos = e.position();
+ debug_assert_eq!((_end_pos - _start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
+ }
+}
+
+impl<'a> Decodable<MemDecoder<'a>> for IntEncodedWithFixedSize {
+ #[inline]
+ fn decode(decoder: &mut MemDecoder<'a>) -> IntEncodedWithFixedSize {
+ let _start_pos = decoder.position();
+ let bytes = decoder.read_raw_bytes(IntEncodedWithFixedSize::ENCODED_SIZE);
+ let value = u64::from_le_bytes(bytes.try_into().unwrap());
+ let _end_pos = decoder.position();
+ debug_assert_eq!((_end_pos - _start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
+
+ IntEncodedWithFixedSize(value)
+ }
+}
diff --git a/compiler/rustc_serialize/src/serialize.rs b/compiler/rustc_serialize/src/serialize.rs
new file mode 100644
index 000000000..36585b8d7
--- /dev/null
+++ b/compiler/rustc_serialize/src/serialize.rs
@@ -0,0 +1,469 @@
+//! Support code for encoding and decoding types.
+
+/*
+Core encoding and decoding interfaces.
+*/
+
+use std::borrow::Cow;
+use std::cell::{Cell, RefCell};
+use std::marker::PhantomData;
+use std::path;
+use std::rc::Rc;
+use std::sync::Arc;
+
+/// A note about error handling.
+///
+/// Encoders may be fallible, but in practice failure is rare and there are so
+/// many nested calls that typical Rust error handling (via `Result` and `?`)
+/// is pervasive and has non-trivial cost. Instead, impls of this trait must
+/// implement a delayed error handling strategy. If a failure occurs, they
+/// should record this internally, and all subsequent encoding operations can
+/// be processed or ignored, whichever is appropriate. Then they should provide
+/// a `finish` method that finishes up encoding. If the encoder is fallible,
+/// `finish` should return a `Result` that indicates success or failure.
+pub trait Encoder {
+ // Primitive types:
+ fn emit_usize(&mut self, v: usize);
+ fn emit_u128(&mut self, v: u128);
+ fn emit_u64(&mut self, v: u64);
+ fn emit_u32(&mut self, v: u32);
+ fn emit_u16(&mut self, v: u16);
+ fn emit_u8(&mut self, v: u8);
+ fn emit_isize(&mut self, v: isize);
+ fn emit_i128(&mut self, v: i128);
+ fn emit_i64(&mut self, v: i64);
+ fn emit_i32(&mut self, v: i32);
+ fn emit_i16(&mut self, v: i16);
+ fn emit_i8(&mut self, v: i8);
+ fn emit_bool(&mut self, v: bool);
+ fn emit_f64(&mut self, v: f64);
+ fn emit_f32(&mut self, v: f32);
+ fn emit_char(&mut self, v: char);
+ fn emit_str(&mut self, v: &str);
+ fn emit_raw_bytes(&mut self, s: &[u8]);
+
+ // Convenience for the derive macro:
+ fn emit_enum_variant<F>(&mut self, v_id: usize, f: F)
+ where
+ F: FnOnce(&mut Self),
+ {
+ self.emit_usize(v_id);
+ f(self);
+ }
+
+ // We put the field index in a const generic to allow the emit_usize to be
+ // compiled into a more efficient form. In practice, the variant index is
+ // known at compile-time, and that knowledge allows much more efficient
+ // codegen than we'd otherwise get. LLVM isn't always able to make the
+ // optimization that would otherwise be necessary here, likely due to the
+ // multiple levels of inlining and const-prop that are needed.
+ #[inline]
+ fn emit_fieldless_enum_variant<const ID: usize>(&mut self) {
+ self.emit_usize(ID)
+ }
+}
+
+// Note: all the methods in this trait are infallible, which may be surprising.
+// They used to be fallible (i.e. return a `Result`) but many of the impls just
+// panicked when something went wrong, and for the cases that didn't the
+// top-level invocation would also just panic on failure. Switching to
+// infallibility made things faster and lots of code a little simpler and more
+// concise.
+pub trait Decoder {
+ // Primitive types:
+ fn read_usize(&mut self) -> usize;
+ fn read_u128(&mut self) -> u128;
+ fn read_u64(&mut self) -> u64;
+ fn read_u32(&mut self) -> u32;
+ fn read_u16(&mut self) -> u16;
+ fn read_u8(&mut self) -> u8;
+ fn read_isize(&mut self) -> isize;
+ fn read_i128(&mut self) -> i128;
+ fn read_i64(&mut self) -> i64;
+ fn read_i32(&mut self) -> i32;
+ fn read_i16(&mut self) -> i16;
+ fn read_i8(&mut self) -> i8;
+ fn read_bool(&mut self) -> bool;
+ fn read_f64(&mut self) -> f64;
+ fn read_f32(&mut self) -> f32;
+ fn read_char(&mut self) -> char;
+ fn read_str(&mut self) -> &str;
+ fn read_raw_bytes(&mut self, len: usize) -> &[u8];
+}
+
+/// Trait for types that can be serialized
+///
+/// This can be implemented using the `Encodable`, `TyEncodable` and
+/// `MetadataEncodable` macros.
+///
+/// * `Encodable` should be used in crates that don't depend on
+/// `rustc_middle`.
+/// * `MetadataEncodable` is used in `rustc_metadata` for types that contain
+/// `rustc_metadata::rmeta::Lazy`.
+/// * `TyEncodable` should be used for types that are only serialized in crate
+/// metadata or the incremental cache. This is most types in `rustc_middle`.
+pub trait Encodable<S: Encoder> {
+ fn encode(&self, s: &mut S);
+}
+
+/// Trait for types that can be deserialized
+///
+/// This can be implemented using the `Decodable`, `TyDecodable` and
+/// `MetadataDecodable` macros.
+///
+/// * `Decodable` should be used in crates that don't depend on
+/// `rustc_middle`.
+/// * `MetadataDecodable` is used in `rustc_metadata` for types that contain
+/// `rustc_metadata::rmeta::Lazy`.
+/// * `TyDecodable` should be used for types that are only serialized in crate
+/// metadata or the incremental cache. This is most types in `rustc_middle`.
+pub trait Decodable<D: Decoder>: Sized {
+ fn decode(d: &mut D) -> Self;
+}
+
+macro_rules! direct_serialize_impls {
+ ($($ty:ident $emit_method:ident $read_method:ident),*) => {
+ $(
+ impl<S: Encoder> Encodable<S> for $ty {
+ fn encode(&self, s: &mut S) {
+ s.$emit_method(*self);
+ }
+ }
+
+ impl<D: Decoder> Decodable<D> for $ty {
+ fn decode(d: &mut D) -> $ty {
+ d.$read_method()
+ }
+ }
+ )*
+ }
+}
+
+direct_serialize_impls! {
+ usize emit_usize read_usize,
+ u8 emit_u8 read_u8,
+ u16 emit_u16 read_u16,
+ u32 emit_u32 read_u32,
+ u64 emit_u64 read_u64,
+ u128 emit_u128 read_u128,
+
+ isize emit_isize read_isize,
+ i8 emit_i8 read_i8,
+ i16 emit_i16 read_i16,
+ i32 emit_i32 read_i32,
+ i64 emit_i64 read_i64,
+ i128 emit_i128 read_i128,
+
+ f32 emit_f32 read_f32,
+ f64 emit_f64 read_f64,
+ bool emit_bool read_bool,
+ char emit_char read_char
+}
+
+impl<S: Encoder, T: ?Sized> Encodable<S> for &T
+where
+ T: Encodable<S>,
+{
+ fn encode(&self, s: &mut S) {
+ (**self).encode(s)
+ }
+}
+
+impl<S: Encoder> Encodable<S> for ! {
+ fn encode(&self, _s: &mut S) {
+ unreachable!();
+ }
+}
+
+impl<D: Decoder> Decodable<D> for ! {
+ fn decode(_d: &mut D) -> ! {
+ unreachable!()
+ }
+}
+
+impl<S: Encoder> Encodable<S> for ::std::num::NonZeroU32 {
+ fn encode(&self, s: &mut S) {
+ s.emit_u32(self.get());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for ::std::num::NonZeroU32 {
+ fn decode(d: &mut D) -> Self {
+ ::std::num::NonZeroU32::new(d.read_u32()).unwrap()
+ }
+}
+
+impl<S: Encoder> Encodable<S> for str {
+ fn encode(&self, s: &mut S) {
+ s.emit_str(self);
+ }
+}
+
+impl<S: Encoder> Encodable<S> for String {
+ fn encode(&self, s: &mut S) {
+ s.emit_str(&self[..]);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for String {
+ fn decode(d: &mut D) -> String {
+ d.read_str().to_owned()
+ }
+}
+
+impl<S: Encoder> Encodable<S> for () {
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder> Decodable<D> for () {
+ fn decode(_: &mut D) -> () {}
+}
+
+impl<S: Encoder, T> Encodable<S> for PhantomData<T> {
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder, T> Decodable<D> for PhantomData<T> {
+ fn decode(_: &mut D) -> PhantomData<T> {
+ PhantomData
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for Box<[T]> {
+ fn decode(d: &mut D) -> Box<[T]> {
+ let v: Vec<T> = Decodable::decode(d);
+ v.into_boxed_slice()
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for Rc<T> {
+ fn encode(&self, s: &mut S) {
+ (**self).encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for Rc<T> {
+ fn decode(d: &mut D) -> Rc<T> {
+ Rc::new(Decodable::decode(d))
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for [T] {
+ default fn encode(&self, s: &mut S) {
+ s.emit_usize(self.len());
+ for e in self.iter() {
+ e.encode(s);
+ }
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for Vec<T> {
+ fn encode(&self, s: &mut S) {
+ let slice: &[T] = self;
+ slice.encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for Vec<T> {
+ default fn decode(d: &mut D) -> Vec<T> {
+ let len = d.read_usize();
+ // SAFETY: we set the capacity in advance, only write elements, and
+ // only set the length at the end once the writing has succeeded.
+ let mut vec = Vec::with_capacity(len);
+ unsafe {
+ let ptr: *mut T = vec.as_mut_ptr();
+ for i in 0..len {
+ std::ptr::write(ptr.offset(i as isize), Decodable::decode(d));
+ }
+ vec.set_len(len);
+ }
+ vec
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>, const N: usize> Encodable<S> for [T; N] {
+ fn encode(&self, s: &mut S) {
+ let slice: &[T] = self;
+ slice.encode(s);
+ }
+}
+
+impl<D: Decoder, const N: usize> Decodable<D> for [u8; N] {
+ fn decode(d: &mut D) -> [u8; N] {
+ let len = d.read_usize();
+ assert!(len == N);
+ let mut v = [0u8; N];
+ for i in 0..len {
+ v[i] = Decodable::decode(d);
+ }
+ v
+ }
+}
+
+impl<'a, S: Encoder, T: Encodable<S>> Encodable<S> for Cow<'a, [T]>
+where
+ [T]: ToOwned<Owned = Vec<T>>,
+{
+ fn encode(&self, s: &mut S) {
+ let slice: &[T] = self;
+ slice.encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D> + ToOwned> Decodable<D> for Cow<'static, [T]>
+where
+ [T]: ToOwned<Owned = Vec<T>>,
+{
+ fn decode(d: &mut D) -> Cow<'static, [T]> {
+ let v: Vec<T> = Decodable::decode(d);
+ Cow::Owned(v)
+ }
+}
+
+impl<'a, S: Encoder> Encodable<S> for Cow<'a, str> {
+ fn encode(&self, s: &mut S) {
+ let val: &str = self;
+ val.encode(s)
+ }
+}
+
+impl<'a, D: Decoder> Decodable<D> for Cow<'a, str> {
+ fn decode(d: &mut D) -> Cow<'static, str> {
+ let v: String = Decodable::decode(d);
+ Cow::Owned(v)
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for Option<T> {
+ fn encode(&self, s: &mut S) {
+ match *self {
+ None => s.emit_enum_variant(0, |_| {}),
+ Some(ref v) => s.emit_enum_variant(1, |s| v.encode(s)),
+ }
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for Option<T> {
+ fn decode(d: &mut D) -> Option<T> {
+ match d.read_usize() {
+ 0 => None,
+ 1 => Some(Decodable::decode(d)),
+ _ => panic!("Encountered invalid discriminant while decoding `Option`."),
+ }
+ }
+}
+
+impl<S: Encoder, T1: Encodable<S>, T2: Encodable<S>> Encodable<S> for Result<T1, T2> {
+ fn encode(&self, s: &mut S) {
+ match *self {
+ Ok(ref v) => s.emit_enum_variant(0, |s| v.encode(s)),
+ Err(ref v) => s.emit_enum_variant(1, |s| v.encode(s)),
+ }
+ }
+}
+
+impl<D: Decoder, T1: Decodable<D>, T2: Decodable<D>> Decodable<D> for Result<T1, T2> {
+ fn decode(d: &mut D) -> Result<T1, T2> {
+ match d.read_usize() {
+ 0 => Ok(T1::decode(d)),
+ 1 => Err(T2::decode(d)),
+ _ => panic!("Encountered invalid discriminant while decoding `Result`."),
+ }
+ }
+}
+
+macro_rules! peel {
+ ($name:ident, $($other:ident,)*) => (tuple! { $($other,)* })
+}
+
+macro_rules! tuple {
+ () => ();
+ ( $($name:ident,)+ ) => (
+ impl<D: Decoder, $($name: Decodable<D>),+> Decodable<D> for ($($name,)+) {
+ fn decode(d: &mut D) -> ($($name,)+) {
+ ($({ let element: $name = Decodable::decode(d); element },)+)
+ }
+ }
+ impl<S: Encoder, $($name: Encodable<S>),+> Encodable<S> for ($($name,)+) {
+ #[allow(non_snake_case)]
+ fn encode(&self, s: &mut S) {
+ let ($(ref $name,)+) = *self;
+ $($name.encode(s);)+
+ }
+ }
+ peel! { $($name,)+ }
+ )
+}
+
+tuple! { T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, }
+
+impl<S: Encoder> Encodable<S> for path::Path {
+ fn encode(&self, e: &mut S) {
+ self.to_str().unwrap().encode(e);
+ }
+}
+
+impl<S: Encoder> Encodable<S> for path::PathBuf {
+ fn encode(&self, e: &mut S) {
+ path::Path::encode(self, e);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for path::PathBuf {
+ fn decode(d: &mut D) -> path::PathBuf {
+ let bytes: String = Decodable::decode(d);
+ path::PathBuf::from(bytes)
+ }
+}
+
+impl<S: Encoder, T: Encodable<S> + Copy> Encodable<S> for Cell<T> {
+ fn encode(&self, s: &mut S) {
+ self.get().encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D> + Copy> Decodable<D> for Cell<T> {
+ fn decode(d: &mut D) -> Cell<T> {
+ Cell::new(Decodable::decode(d))
+ }
+}
+
+// FIXME: #15036
+// Should use `try_borrow`, returning an
+// `encoder.error("attempting to Encode borrowed RefCell")`
+// from `encode` when `try_borrow` returns `None`.
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for RefCell<T> {
+ fn encode(&self, s: &mut S) {
+ self.borrow().encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for RefCell<T> {
+ fn decode(d: &mut D) -> RefCell<T> {
+ RefCell::new(Decodable::decode(d))
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for Arc<T> {
+ fn encode(&self, s: &mut S) {
+ (**self).encode(s);
+ }
+}
+
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for Arc<T> {
+ fn decode(d: &mut D) -> Arc<T> {
+ Arc::new(Decodable::decode(d))
+ }
+}
+
+impl<S: Encoder, T: ?Sized + Encodable<S>> Encodable<S> for Box<T> {
+ fn encode(&self, s: &mut S) {
+ (**self).encode(s);
+ }
+}
+impl<D: Decoder, T: Decodable<D>> Decodable<D> for Box<T> {
+ fn decode(d: &mut D) -> Box<T> {
+ Box::new(Decodable::decode(d))
+ }
+}
diff --git a/compiler/rustc_serialize/tests/leb128.rs b/compiler/rustc_serialize/tests/leb128.rs
new file mode 100644
index 000000000..314c07db9
--- /dev/null
+++ b/compiler/rustc_serialize/tests/leb128.rs
@@ -0,0 +1,91 @@
+#![feature(maybe_uninit_slice)]
+#![feature(maybe_uninit_uninit_array)]
+
+use rustc_serialize::leb128::*;
+use std::mem::MaybeUninit;
+
+macro_rules! impl_test_unsigned_leb128 {
+ ($test_name:ident, $write_fn_name:ident, $read_fn_name:ident, $int_ty:ident) => {
+ #[test]
+ fn $test_name() {
+ // Test 256 evenly spaced values of integer range,
+ // integer max value, and some "random" numbers.
+ let mut values = Vec::new();
+
+ let increment = (1 as $int_ty) << ($int_ty::BITS - 8);
+ values.extend((0..256).map(|i| $int_ty::MIN + i * increment));
+
+ values.push($int_ty::MAX);
+
+ values.extend(
+ (-500..500).map(|i| (i as $int_ty).wrapping_mul(0x12345789ABCDEFu64 as $int_ty)),
+ );
+
+ let mut stream = Vec::new();
+
+ for &x in &values {
+ let mut buf = MaybeUninit::uninit_array();
+ stream.extend($write_fn_name(&mut buf, x));
+ }
+
+ let mut position = 0;
+ for &expected in &values {
+ let actual = $read_fn_name(&stream, &mut position);
+ assert_eq!(expected, actual);
+ }
+ assert_eq!(stream.len(), position);
+ }
+ };
+}
+
+impl_test_unsigned_leb128!(test_u16_leb128, write_u16_leb128, read_u16_leb128, u16);
+impl_test_unsigned_leb128!(test_u32_leb128, write_u32_leb128, read_u32_leb128, u32);
+impl_test_unsigned_leb128!(test_u64_leb128, write_u64_leb128, read_u64_leb128, u64);
+impl_test_unsigned_leb128!(test_u128_leb128, write_u128_leb128, read_u128_leb128, u128);
+impl_test_unsigned_leb128!(test_usize_leb128, write_usize_leb128, read_usize_leb128, usize);
+
+macro_rules! impl_test_signed_leb128 {
+ ($test_name:ident, $write_fn_name:ident, $read_fn_name:ident, $int_ty:ident) => {
+ #[test]
+ fn $test_name() {
+ // Test 256 evenly spaced values of integer range,
+ // integer max value, and some "random" numbers.
+ let mut values = Vec::new();
+
+ let mut value = $int_ty::MIN;
+ let increment = (1 as $int_ty) << ($int_ty::BITS - 8);
+
+ for _ in 0..256 {
+ values.push(value);
+ // The addition in the last loop iteration overflows.
+ value = value.wrapping_add(increment);
+ }
+
+ values.push($int_ty::MAX);
+
+ values.extend(
+ (-500..500).map(|i| (i as $int_ty).wrapping_mul(0x12345789ABCDEFi64 as $int_ty)),
+ );
+
+ let mut stream = Vec::new();
+
+ for &x in &values {
+ let mut buf = MaybeUninit::uninit_array();
+ stream.extend($write_fn_name(&mut buf, x));
+ }
+
+ let mut position = 0;
+ for &expected in &values {
+ let actual = $read_fn_name(&stream, &mut position);
+ assert_eq!(expected, actual);
+ }
+ assert_eq!(stream.len(), position);
+ }
+ };
+}
+
+impl_test_signed_leb128!(test_i16_leb128, write_i16_leb128, read_i16_leb128, i16);
+impl_test_signed_leb128!(test_i32_leb128, write_i32_leb128, read_i32_leb128, i32);
+impl_test_signed_leb128!(test_i64_leb128, write_i64_leb128, read_i64_leb128, i64);
+impl_test_signed_leb128!(test_i128_leb128, write_i128_leb128, read_i128_leb128, i128);
+impl_test_signed_leb128!(test_isize_leb128, write_isize_leb128, read_isize_leb128, isize);
diff --git a/compiler/rustc_serialize/tests/opaque.rs b/compiler/rustc_serialize/tests/opaque.rs
new file mode 100644
index 000000000..3a695d071
--- /dev/null
+++ b/compiler/rustc_serialize/tests/opaque.rs
@@ -0,0 +1,277 @@
+#![allow(rustc::internal)]
+
+use rustc_macros::{Decodable, Encodable};
+use rustc_serialize::opaque::{MemDecoder, MemEncoder};
+use rustc_serialize::{Decodable, Encodable};
+use std::fmt::Debug;
+
+#[derive(PartialEq, Clone, Debug, Encodable, Decodable)]
+struct Struct {
+ a: (),
+ b: u8,
+ c: u16,
+ d: u32,
+ e: u64,
+ f: usize,
+
+ g: i8,
+ h: i16,
+ i: i32,
+ j: i64,
+ k: isize,
+
+ l: char,
+ m: String,
+ n: f32,
+ o: f64,
+ p: bool,
+ q: Option<u32>,
+}
+
+fn check_round_trip<
+ T: Encodable<MemEncoder> + for<'a> Decodable<MemDecoder<'a>> + PartialEq + Debug,
+>(
+ values: Vec<T>,
+) {
+ let mut encoder = MemEncoder::new();
+ for value in &values {
+ Encodable::encode(value, &mut encoder);
+ }
+
+ let data = encoder.finish();
+ let mut decoder = MemDecoder::new(&data[..], 0);
+
+ for value in values {
+ let decoded = Decodable::decode(&mut decoder);
+ assert_eq!(value, decoded);
+ }
+}
+
+#[test]
+fn test_unit() {
+ check_round_trip(vec![(), (), (), ()]);
+}
+
+#[test]
+fn test_u8() {
+ let mut vec = vec![];
+ for i in u8::MIN..u8::MAX {
+ vec.push(i);
+ }
+ check_round_trip(vec);
+}
+
+#[test]
+fn test_u16() {
+ for i in u16::MIN..u16::MAX {
+ check_round_trip(vec![1, 2, 3, i, i, i]);
+ }
+}
+
+#[test]
+fn test_u32() {
+ check_round_trip(vec![1, 2, 3, u32::MIN, 0, 1, u32::MAX, 2, 1]);
+}
+
+#[test]
+fn test_u64() {
+ check_round_trip(vec![1, 2, 3, u64::MIN, 0, 1, u64::MAX, 2, 1]);
+}
+
+#[test]
+fn test_usize() {
+ check_round_trip(vec![1, 2, 3, usize::MIN, 0, 1, usize::MAX, 2, 1]);
+}
+
+#[test]
+fn test_i8() {
+ let mut vec = vec![];
+ for i in i8::MIN..i8::MAX {
+ vec.push(i);
+ }
+ check_round_trip(vec);
+}
+
+#[test]
+fn test_i16() {
+ for i in i16::MIN..i16::MAX {
+ check_round_trip(vec![-1, 2, -3, i, i, i, 2]);
+ }
+}
+
+#[test]
+fn test_i32() {
+ check_round_trip(vec![-1, 2, -3, i32::MIN, 0, 1, i32::MAX, 2, 1]);
+}
+
+#[test]
+fn test_i64() {
+ check_round_trip(vec![-1, 2, -3, i64::MIN, 0, 1, i64::MAX, 2, 1]);
+}
+
+#[test]
+fn test_isize() {
+ check_round_trip(vec![-1, 2, -3, isize::MIN, 0, 1, isize::MAX, 2, 1]);
+}
+
+#[test]
+fn test_bool() {
+ check_round_trip(vec![false, true, true, false, false]);
+}
+
+#[test]
+fn test_f32() {
+ let mut vec = vec![];
+ for i in -100..100 {
+ vec.push((i as f32) / 3.0);
+ }
+ check_round_trip(vec);
+}
+
+#[test]
+fn test_f64() {
+ let mut vec = vec![];
+ for i in -100..100 {
+ vec.push((i as f64) / 3.0);
+ }
+ check_round_trip(vec);
+}
+
+#[test]
+fn test_char() {
+ let vec = vec!['a', 'b', 'c', 'd', 'A', 'X', ' ', '#', 'Ö', 'Ä', 'µ', '€'];
+ check_round_trip(vec);
+}
+
+#[test]
+fn test_string() {
+ let vec = vec![
+ "abcbuÖeiovÄnameÜavmpßvmea€µsbpnvapeapmaebn".to_string(),
+ "abcbuÖganeiovÄnameÜavmpßvmea€µsbpnvapeapmaebn".to_string(),
+ "abcbuÖganeiovÄnameÜavmpßvmea€µsbpapmaebn".to_string(),
+ "abcbuÖganeiovÄnameÜavmpßvmeabpnvapeapmaebn".to_string(),
+ "abcbuÖganeiÄnameÜavmpßvmea€µsbpnvapeapmaebn".to_string(),
+ "abcbuÖganeiovÄnameÜavmpßvmea€µsbpmaebn".to_string(),
+ "abcbuÖganeiovÄnameÜavmpßvmea€µnvapeapmaebn".to_string(),
+ ];
+
+ check_round_trip(vec);
+}
+
+#[test]
+fn test_option() {
+ check_round_trip(vec![Some(-1i8)]);
+ check_round_trip(vec![Some(-2i16)]);
+ check_round_trip(vec![Some(-3i32)]);
+ check_round_trip(vec![Some(-4i64)]);
+ check_round_trip(vec![Some(-5isize)]);
+
+ let none_i8: Option<i8> = None;
+ check_round_trip(vec![none_i8]);
+
+ let none_i16: Option<i16> = None;
+ check_round_trip(vec![none_i16]);
+
+ let none_i32: Option<i32> = None;
+ check_round_trip(vec![none_i32]);
+
+ let none_i64: Option<i64> = None;
+ check_round_trip(vec![none_i64]);
+
+ let none_isize: Option<isize> = None;
+ check_round_trip(vec![none_isize]);
+}
+
+#[test]
+fn test_struct() {
+ check_round_trip(vec![Struct {
+ a: (),
+ b: 10,
+ c: 11,
+ d: 12,
+ e: 13,
+ f: 14,
+
+ g: 15,
+ h: 16,
+ i: 17,
+ j: 18,
+ k: 19,
+
+ l: 'x',
+ m: "abc".to_string(),
+ n: 20.5,
+ o: 21.5,
+ p: false,
+ q: None,
+ }]);
+
+ check_round_trip(vec![Struct {
+ a: (),
+ b: 101,
+ c: 111,
+ d: 121,
+ e: 131,
+ f: 141,
+
+ g: -15,
+ h: -16,
+ i: -17,
+ j: -18,
+ k: -19,
+
+ l: 'y',
+ m: "def".to_string(),
+ n: -20.5,
+ o: -21.5,
+ p: true,
+ q: Some(1234567),
+ }]);
+}
+
+#[derive(PartialEq, Clone, Debug, Encodable, Decodable)]
+enum Enum {
+ Variant1,
+ Variant2(usize, f32),
+ Variant3 { a: i32, b: char, c: bool },
+}
+
+#[test]
+fn test_enum() {
+ check_round_trip(vec![
+ Enum::Variant1,
+ Enum::Variant2(1, 2.5),
+ Enum::Variant3 { a: 3, b: 'b', c: false },
+ Enum::Variant3 { a: -4, b: 'f', c: true },
+ ]);
+}
+
+#[test]
+fn test_sequence() {
+ let mut vec = vec![];
+ for i in -100i64..100i64 {
+ vec.push(i * 100000);
+ }
+
+ check_round_trip(vec![vec]);
+}
+
+#[test]
+fn test_hash_map() {
+ use std::collections::HashMap;
+ let mut map = HashMap::new();
+ for i in -100i64..100i64 {
+ map.insert(i * 100000, i * 10000);
+ }
+
+ check_round_trip(vec![map]);
+}
+
+#[test]
+fn test_tuples() {
+ check_round_trip(vec![('x', (), false, 0.5f32)]);
+ check_round_trip(vec![(9i8, 10u16, 1.5f64)]);
+ check_round_trip(vec![(-12i16, 11u8, 12usize)]);
+ check_round_trip(vec![(1234567isize, 100000000000000u64, 99999999999999i64)]);
+ check_round_trip(vec![(String::new(), "some string".to_string())]);
+}
diff --git a/compiler/rustc_session/Cargo.toml b/compiler/rustc_session/Cargo.toml
new file mode 100644
index 000000000..37cfc4a0d
--- /dev/null
+++ b/compiler/rustc_session/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "rustc_session"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+getopts = "0.2"
+rustc_macros = { path = "../rustc_macros" }
+tracing = "0.1"
+rustc_errors = { path = "../rustc_errors" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_target = { path = "../rustc_target" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_span = { path = "../rustc_span" }
+rustc_fs_util = { path = "../rustc_fs_util" }
+num_cpus = "1.0"
+rustc_ast = { path = "../rustc_ast" }
+rustc_lint_defs = { path = "../rustc_lint_defs" }
diff --git a/compiler/rustc_session/src/cgu_reuse_tracker.rs b/compiler/rustc_session/src/cgu_reuse_tracker.rs
new file mode 100644
index 000000000..dd64e8ab7
--- /dev/null
+++ b/compiler/rustc_session/src/cgu_reuse_tracker.rs
@@ -0,0 +1,118 @@
+//! Some facilities for tracking how codegen-units are reused during incremental
+//! compilation. This is used for incremental compilation tests and debug
+//! output.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_span::{Span, Symbol};
+use std::sync::{Arc, Mutex};
+use tracing::debug;
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
+pub enum CguReuse {
+ No,
+ PreLto,
+ PostLto,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum ComparisonKind {
+ Exact,
+ AtLeast,
+}
+
+struct TrackerData {
+ actual_reuse: FxHashMap<String, CguReuse>,
+ expected_reuse: FxHashMap<String, (String, SendSpan, CguReuse, ComparisonKind)>,
+}
+
+// Span does not implement `Send`, so we can't just store it in the shared
+// `TrackerData` object. Instead of splitting up `TrackerData` into shared and
+// non-shared parts (which would be complicated), we just mark the `Span` here
+// explicitly as `Send`. That's safe because the span data here is only ever
+// accessed from the main thread.
+struct SendSpan(Span);
+unsafe impl Send for SendSpan {}
+
+#[derive(Clone)]
+pub struct CguReuseTracker {
+ data: Option<Arc<Mutex<TrackerData>>>,
+}
+
+impl CguReuseTracker {
+ pub fn new() -> CguReuseTracker {
+ let data =
+ TrackerData { actual_reuse: Default::default(), expected_reuse: Default::default() };
+
+ CguReuseTracker { data: Some(Arc::new(Mutex::new(data))) }
+ }
+
+ pub fn new_disabled() -> CguReuseTracker {
+ CguReuseTracker { data: None }
+ }
+
+ pub fn set_actual_reuse(&self, cgu_name: &str, kind: CguReuse) {
+ if let Some(ref data) = self.data {
+ debug!("set_actual_reuse({cgu_name:?}, {kind:?})");
+
+ let prev_reuse = data.lock().unwrap().actual_reuse.insert(cgu_name.to_string(), kind);
+
+ if let Some(prev_reuse) = prev_reuse {
+ // The only time it is legal to overwrite reuse state is when
+ // we discover during ThinLTO that we can actually reuse the
+ // post-LTO version of a CGU.
+ assert_eq!(prev_reuse, CguReuse::PreLto);
+ }
+ }
+ }
+
+ pub fn set_expectation(
+ &self,
+ cgu_name: Symbol,
+ cgu_user_name: &str,
+ error_span: Span,
+ expected_reuse: CguReuse,
+ comparison_kind: ComparisonKind,
+ ) {
+ if let Some(ref data) = self.data {
+ debug!("set_expectation({cgu_name:?}, {expected_reuse:?}, {comparison_kind:?})");
+ let mut data = data.lock().unwrap();
+
+ data.expected_reuse.insert(
+ cgu_name.to_string(),
+ (cgu_user_name.to_string(), SendSpan(error_span), expected_reuse, comparison_kind),
+ );
+ }
+ }
+
+ pub fn check_expected_reuse(&self, diag: &rustc_errors::Handler) {
+ if let Some(ref data) = self.data {
+ let data = data.lock().unwrap();
+
+ for (cgu_name, &(ref cgu_user_name, ref error_span, expected_reuse, comparison_kind)) in
+ &data.expected_reuse
+ {
+ if let Some(&actual_reuse) = data.actual_reuse.get(cgu_name) {
+ let (error, at_least) = match comparison_kind {
+ ComparisonKind::Exact => (expected_reuse != actual_reuse, false),
+ ComparisonKind::AtLeast => (actual_reuse < expected_reuse, true),
+ };
+
+ if error {
+ let at_least = if at_least { "at least " } else { "" };
+ let msg = format!(
+ "CGU-reuse for `{cgu_user_name}` is `{actual_reuse:?}` but \
+ should be {at_least}`{expected_reuse:?}`"
+ );
+ diag.span_err(error_span.0, &msg);
+ }
+ } else {
+ let msg = format!(
+ "CGU-reuse for `{cgu_user_name}` (mangled: `{cgu_name}`) was \
+ not recorded"
+ );
+ diag.span_fatal(error_span.0, &msg)
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_session/src/code_stats.rs b/compiler/rustc_session/src/code_stats.rs
new file mode 100644
index 000000000..eede4d16e
--- /dev/null
+++ b/compiler/rustc_session/src/code_stats.rs
@@ -0,0 +1,182 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sync::Lock;
+use rustc_span::Symbol;
+use rustc_target::abi::{Align, Size};
+use std::cmp::{self, Ordering};
+
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct VariantInfo {
+ pub name: Option<Symbol>,
+ pub kind: SizeKind,
+ pub size: u64,
+ pub align: u64,
+ pub fields: Vec<FieldInfo>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub enum SizeKind {
+ Exact,
+ Min,
+}
+
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct FieldInfo {
+ pub name: Symbol,
+ pub offset: u64,
+ pub size: u64,
+ pub align: u64,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+pub enum DataTypeKind {
+ Struct,
+ Union,
+ Enum,
+ Closure,
+}
+
+#[derive(PartialEq, Eq, Hash, Debug)]
+pub struct TypeSizeInfo {
+ pub kind: DataTypeKind,
+ pub type_description: String,
+ pub align: u64,
+ pub overall_size: u64,
+ pub packed: bool,
+ pub opt_discr_size: Option<u64>,
+ pub variants: Vec<VariantInfo>,
+}
+
+#[derive(Default)]
+pub struct CodeStats {
+ type_sizes: Lock<FxHashSet<TypeSizeInfo>>,
+}
+
+impl CodeStats {
+ pub fn record_type_size<S: ToString>(
+ &self,
+ kind: DataTypeKind,
+ type_desc: S,
+ align: Align,
+ overall_size: Size,
+ packed: bool,
+ opt_discr_size: Option<Size>,
+ mut variants: Vec<VariantInfo>,
+ ) {
+ // Sort variants so the largest ones are shown first. A stable sort is
+ // used here so that source code order is preserved for all variants
+ // that have the same size.
+ variants.sort_by(|info1, info2| info2.size.cmp(&info1.size));
+ let info = TypeSizeInfo {
+ kind,
+ type_description: type_desc.to_string(),
+ align: align.bytes(),
+ overall_size: overall_size.bytes(),
+ packed,
+ opt_discr_size: opt_discr_size.map(|s| s.bytes()),
+ variants,
+ };
+ self.type_sizes.borrow_mut().insert(info);
+ }
+
+ pub fn print_type_sizes(&self) {
+ let type_sizes = self.type_sizes.borrow();
+ let mut sorted: Vec<_> = type_sizes.iter().collect();
+
+ // Primary sort: large-to-small.
+ // Secondary sort: description (dictionary order)
+ sorted.sort_by(|info1, info2| {
+ // (reversing cmp order to get large-to-small ordering)
+ match info2.overall_size.cmp(&info1.overall_size) {
+ Ordering::Equal => info1.type_description.cmp(&info2.type_description),
+ other => other,
+ }
+ });
+
+ for info in sorted {
+ let TypeSizeInfo { type_description, overall_size, align, kind, variants, .. } = info;
+ println!(
+ "print-type-size type: `{type_description}`: {overall_size} bytes, alignment: {align} bytes"
+ );
+ let indent = " ";
+
+ let discr_size = if let Some(discr_size) = info.opt_discr_size {
+ println!("print-type-size {indent}discriminant: {discr_size} bytes");
+ discr_size
+ } else {
+ 0
+ };
+
+ // We start this at discr_size (rather than 0) because
+ // things like C-enums do not have variants but we still
+ // want the max_variant_size at the end of the loop below
+ // to reflect the presence of the discriminant.
+ let mut max_variant_size = discr_size;
+
+ let struct_like = match kind {
+ DataTypeKind::Struct | DataTypeKind::Closure => true,
+ DataTypeKind::Enum | DataTypeKind::Union => false,
+ };
+ for (i, variant_info) in variants.into_iter().enumerate() {
+ let VariantInfo { ref name, kind: _, align: _, size, ref fields } = *variant_info;
+ let indent = if !struct_like {
+ let name = match name.as_ref() {
+ Some(name) => name.to_string(),
+ None => i.to_string(),
+ };
+ println!(
+ "print-type-size {indent}variant `{name}`: {diff} bytes",
+ diff = size - discr_size
+ );
+ " "
+ } else {
+ assert!(i < 1);
+ " "
+ };
+ max_variant_size = cmp::max(max_variant_size, size);
+
+ let mut min_offset = discr_size;
+
+ // We want to print fields by increasing offset. We also want
+ // zero-sized fields before non-zero-sized fields, otherwise
+ // the loop below goes wrong; hence the `f.size` in the sort
+ // key.
+ let mut fields = fields.clone();
+ fields.sort_by_key(|f| (f.offset, f.size));
+
+ for field in fields {
+ let FieldInfo { ref name, offset, size, align } = field;
+
+ if offset > min_offset {
+ let pad = offset - min_offset;
+ println!("print-type-size {indent}padding: {pad} bytes");
+ }
+
+ if offset < min_offset {
+ // If this happens it's probably a union.
+ println!(
+ "print-type-size {indent}field `.{name}`: {size} bytes, \
+ offset: {offset} bytes, \
+ alignment: {align} bytes"
+ );
+ } else if info.packed || offset == min_offset {
+ println!("print-type-size {indent}field `.{name}`: {size} bytes");
+ } else {
+ // Include field alignment in output only if it caused padding injection
+ println!(
+ "print-type-size {indent}field `.{name}`: {size} bytes, \
+ alignment: {align} bytes"
+ );
+ }
+
+ min_offset = offset + size;
+ }
+ }
+
+ match overall_size.checked_sub(max_variant_size) {
+ None => panic!("max_variant_size {max_variant_size} > {overall_size} overall_size"),
+ Some(diff @ 1..) => println!("print-type-size {indent}end padding: {diff} bytes"),
+ Some(0) => {}
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
new file mode 100644
index 000000000..6a8298605
--- /dev/null
+++ b/compiler/rustc_session/src/config.rs
@@ -0,0 +1,2970 @@
+//! Contains infrastructure for configuring the compiler, including parsing
+//! command-line options.
+
+pub use crate::options::*;
+
+use crate::search_paths::SearchPath;
+use crate::utils::{CanonicalizedPath, NativeLib, NativeLibKind};
+use crate::{early_error, early_warn, Session};
+use crate::{lint, HashStableContext};
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+
+use rustc_data_structures::stable_hasher::ToStableHashKey;
+use rustc_target::abi::{Align, TargetDataLayout};
+use rustc_target::spec::{LinkerFlavor, SplitDebuginfo, Target, TargetTriple, TargetWarnings};
+use rustc_target::spec::{PanicStrategy, SanitizerSet, TARGETS};
+
+use crate::parse::{CrateCheckConfig, CrateConfig};
+use rustc_feature::UnstableFeatures;
+use rustc_span::edition::{Edition, DEFAULT_EDITION, EDITION_NAME_LIST, LATEST_STABLE_EDITION};
+use rustc_span::source_map::{FileName, FilePathMapping};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::RealFileName;
+use rustc_span::SourceFileHashAlgorithm;
+
+use rustc_errors::emitter::HumanReadableErrorType;
+use rustc_errors::{ColorConfig, HandlerFlags};
+
+use std::collections::btree_map::{
+ Iter as BTreeMapIter, Keys as BTreeMapKeysIter, Values as BTreeMapValuesIter,
+};
+use std::collections::{BTreeMap, BTreeSet};
+use std::fmt;
+use std::hash::Hash;
+use std::iter::{self, FromIterator};
+use std::path::{Path, PathBuf};
+use std::str::{self, FromStr};
+
+/// The different settings that the `-C strip` flag can have.
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum Strip {
+ /// Do not strip at all.
+ None,
+
+ /// Strip debuginfo.
+ Debuginfo,
+
+ /// Strip all symbols.
+ Symbols,
+}
+
+/// The different settings that the `-C control-flow-guard` flag can have.
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum CFGuard {
+ /// Do not emit Control Flow Guard metadata or checks.
+ Disabled,
+
+ /// Emit Control Flow Guard metadata but no checks.
+ NoChecks,
+
+ /// Emit Control Flow Guard metadata and checks.
+ Checks,
+}
+
+/// The different settings that the `-Z cf-protection` flag can have.
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum CFProtection {
+ /// Do not enable control-flow protection
+ None,
+
+ /// Emit control-flow protection for branches (enables indirect branch tracking).
+ Branch,
+
+ /// Emit control-flow protection for returns.
+ Return,
+
+ /// Emit control-flow protection for both branches and returns.
+ Full,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Hash, HashStable_Generic)]
+pub enum OptLevel {
+ No, // -O0
+ Less, // -O1
+ Default, // -O2
+ Aggressive, // -O3
+ Size, // -Os
+ SizeMin, // -Oz
+}
+
+/// This is what the `LtoCli` values get mapped to after resolving defaults and
+/// and taking other command line options into account.
+///
+/// Note that linker plugin-based LTO is a different mechanism entirely.
+#[derive(Clone, PartialEq)]
+pub enum Lto {
+ /// Don't do any LTO whatsoever.
+ No,
+
+ /// Do a full-crate-graph (inter-crate) LTO with ThinLTO.
+ Thin,
+
+ /// Do a local ThinLTO (intra-crate, over the CodeGen Units of the local crate only). This is
+ /// only relevant if multiple CGUs are used.
+ ThinLocal,
+
+ /// Do a full-crate-graph (inter-crate) LTO with "fat" LTO.
+ Fat,
+}
+
+/// The different settings that the `-C lto` flag can have.
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum LtoCli {
+ /// `-C lto=no`
+ No,
+ /// `-C lto=yes`
+ Yes,
+ /// `-C lto`
+ NoParam,
+ /// `-C lto=thin`
+ Thin,
+ /// `-C lto=fat`
+ Fat,
+ /// No `-C lto` flag passed
+ Unspecified,
+}
+
+/// The different settings that the `-Z dump_mir_spanview` flag can have. `Statement` generates a
+/// document highlighting each span of every statement (including terminators). `Terminator` and
+/// `Block` highlight a single span per `BasicBlock`: the span of the block's `Terminator`, or a
+/// computed span for the block, representing the entire range, covering the block's terminator and
+/// all of its statements.
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum MirSpanview {
+ /// Default `-Z dump_mir_spanview` or `-Z dump_mir_spanview=statement`
+ Statement,
+ /// `-Z dump_mir_spanview=terminator`
+ Terminator,
+ /// `-Z dump_mir_spanview=block`
+ Block,
+}
+
+/// The different settings that the `-C instrument-coverage` flag can have.
+///
+/// Coverage instrumentation now supports combining `-C instrument-coverage`
+/// with compiler and linker optimization (enabled with `-O` or `-C opt-level=1`
+/// and higher). Nevertheless, there are many variables, depending on options
+/// selected, code structure, and enabled attributes. If errors are encountered,
+/// either while compiling or when generating `llvm-cov show` reports, consider
+/// lowering the optimization level, including or excluding `-C link-dead-code`,
+/// or using `-Zunstable-options -C instrument-coverage=except-unused-functions`
+/// or `-Zunstable-options -C instrument-coverage=except-unused-generics`.
+///
+/// Note that `ExceptUnusedFunctions` means: When `mapgen.rs` generates the
+/// coverage map, it will not attempt to generate synthetic functions for unused
+/// (and not code-generated) functions (whether they are generic or not). As a
+/// result, non-codegenned functions will not be included in the coverage map,
+/// and will not appear, as covered or uncovered, in coverage reports.
+///
+/// `ExceptUnusedGenerics` will add synthetic functions to the coverage map,
+/// unless the function has type parameters.
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum InstrumentCoverage {
+ /// Default `-C instrument-coverage` or `-C instrument-coverage=statement`
+ All,
+ /// `-Zunstable-options -C instrument-coverage=except-unused-generics`
+ ExceptUnusedGenerics,
+ /// `-Zunstable-options -C instrument-coverage=except-unused-functions`
+ ExceptUnusedFunctions,
+ /// `-C instrument-coverage=off` (or `no`, etc.)
+ Off,
+}
+
+#[derive(Clone, PartialEq, Hash, Debug)]
+pub enum LinkerPluginLto {
+ LinkerPlugin(PathBuf),
+ LinkerPluginAuto,
+ Disabled,
+}
+
+/// Used with `-Z assert-incr-state`.
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum IncrementalStateAssertion {
+ /// Found and loaded an existing session directory.
+ ///
+ /// Note that this says nothing about whether any particular query
+ /// will be found to be red or green.
+ Loaded,
+ /// Did not load an existing session directory.
+ NotLoaded,
+}
+
+impl LinkerPluginLto {
+ pub fn enabled(&self) -> bool {
+ match *self {
+ LinkerPluginLto::LinkerPlugin(_) | LinkerPluginLto::LinkerPluginAuto => true,
+ LinkerPluginLto::Disabled => false,
+ }
+ }
+}
+
+/// The different settings that can be enabled via the `-Z location-detail` flag.
+#[derive(Clone, PartialEq, Hash, Debug)]
+pub struct LocationDetail {
+ pub file: bool,
+ pub line: bool,
+ pub column: bool,
+}
+
+impl LocationDetail {
+ pub fn all() -> Self {
+ Self { file: true, line: true, column: true }
+ }
+}
+
+#[derive(Clone, PartialEq, Hash, Debug)]
+pub enum SwitchWithOptPath {
+ Enabled(Option<PathBuf>),
+ Disabled,
+}
+
+impl SwitchWithOptPath {
+ pub fn enabled(&self) -> bool {
+ match *self {
+ SwitchWithOptPath::Enabled(_) => true,
+ SwitchWithOptPath::Disabled => false,
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable_Generic)]
+#[derive(Encodable, Decodable)]
+pub enum SymbolManglingVersion {
+ Legacy,
+ V0,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Hash)]
+pub enum DebugInfo {
+ None,
+ Limited,
+ Full,
+}
+
+/// Split debug-information is enabled by `-C split-debuginfo`, this enum is only used if split
+/// debug-information is enabled (in either `Packed` or `Unpacked` modes), and the platform
+/// uses DWARF for debug-information.
+///
+/// Some debug-information requires link-time relocation and some does not. LLVM can partition
+/// the debuginfo into sections depending on whether or not it requires link-time relocation. Split
+/// DWARF provides a mechanism which allows the linker to skip the sections which don't require
+/// link-time relocation - either by putting those sections in DWARF object files, or by keeping
+/// them in the object file in such a way that the linker will skip them.
+#[derive(Clone, Copy, Debug, PartialEq, Hash)]
+pub enum SplitDwarfKind {
+ /// Sections which do not require relocation are written into object file but ignored by the
+ /// linker.
+ Single,
+ /// Sections which do not require relocation are written into a DWARF object (`.dwo`) file
+ /// which is ignored by the linker.
+ Split,
+}
+
+impl FromStr for SplitDwarfKind {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<Self, ()> {
+ Ok(match s {
+ "single" => SplitDwarfKind::Single,
+ "split" => SplitDwarfKind::Split,
+ _ => return Err(()),
+ })
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord, HashStable_Generic)]
+#[derive(Encodable, Decodable)]
+pub enum OutputType {
+ Bitcode,
+ Assembly,
+ LlvmAssembly,
+ Mir,
+ Metadata,
+ Object,
+ Exe,
+ DepInfo,
+}
+
+impl<HCX: HashStableContext> ToStableHashKey<HCX> for OutputType {
+ type KeyType = Self;
+
+ fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType {
+ *self
+ }
+}
+
+impl OutputType {
+ fn is_compatible_with_codegen_units_and_single_output_file(&self) -> bool {
+ match *self {
+ OutputType::Exe | OutputType::DepInfo | OutputType::Metadata => true,
+ OutputType::Bitcode
+ | OutputType::Assembly
+ | OutputType::LlvmAssembly
+ | OutputType::Mir
+ | OutputType::Object => false,
+ }
+ }
+
+ fn shorthand(&self) -> &'static str {
+ match *self {
+ OutputType::Bitcode => "llvm-bc",
+ OutputType::Assembly => "asm",
+ OutputType::LlvmAssembly => "llvm-ir",
+ OutputType::Mir => "mir",
+ OutputType::Object => "obj",
+ OutputType::Metadata => "metadata",
+ OutputType::Exe => "link",
+ OutputType::DepInfo => "dep-info",
+ }
+ }
+
+ fn from_shorthand(shorthand: &str) -> Option<Self> {
+ Some(match shorthand {
+ "asm" => OutputType::Assembly,
+ "llvm-ir" => OutputType::LlvmAssembly,
+ "mir" => OutputType::Mir,
+ "llvm-bc" => OutputType::Bitcode,
+ "obj" => OutputType::Object,
+ "metadata" => OutputType::Metadata,
+ "link" => OutputType::Exe,
+ "dep-info" => OutputType::DepInfo,
+ _ => return None,
+ })
+ }
+
+ fn shorthands_display() -> String {
+ format!(
+ "`{}`, `{}`, `{}`, `{}`, `{}`, `{}`, `{}`, `{}`",
+ OutputType::Bitcode.shorthand(),
+ OutputType::Assembly.shorthand(),
+ OutputType::LlvmAssembly.shorthand(),
+ OutputType::Mir.shorthand(),
+ OutputType::Object.shorthand(),
+ OutputType::Metadata.shorthand(),
+ OutputType::Exe.shorthand(),
+ OutputType::DepInfo.shorthand(),
+ )
+ }
+
+ pub fn extension(&self) -> &'static str {
+ match *self {
+ OutputType::Bitcode => "bc",
+ OutputType::Assembly => "s",
+ OutputType::LlvmAssembly => "ll",
+ OutputType::Mir => "mir",
+ OutputType::Object => "o",
+ OutputType::Metadata => "rmeta",
+ OutputType::DepInfo => "d",
+ OutputType::Exe => "",
+ }
+ }
+}
+
+/// The type of diagnostics output to generate.
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum ErrorOutputType {
+ /// Output meant for the consumption of humans.
+ HumanReadable(HumanReadableErrorType),
+ /// Output that's consumed by other tools such as `rustfix` or the `RLS`.
+ Json {
+ /// Render the JSON in a human readable way (with indents and newlines).
+ pretty: bool,
+ /// The JSON output includes a `rendered` field that includes the rendered
+ /// human output.
+ json_rendered: HumanReadableErrorType,
+ },
+}
+
+impl Default for ErrorOutputType {
+ fn default() -> Self {
+ Self::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto))
+ }
+}
+
+/// Parameter to control path trimming.
+#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Hash)]
+pub enum TrimmedDefPaths {
+ /// `try_print_trimmed_def_path` never prints a trimmed path and never calls the expensive query
+ #[default]
+ Never,
+ /// `try_print_trimmed_def_path` calls the expensive query, the query doesn't call `delay_good_path_bug`
+ Always,
+ /// `try_print_trimmed_def_path` calls the expensive query, the query calls `delay_good_path_bug`
+ GoodPath,
+}
+
+/// Use tree-based collections to cheaply get a deterministic `Hash` implementation.
+/// *Do not* switch `BTreeMap` out for an unsorted container type! That would break
+/// dependency tracking for command-line arguments. Also only hash keys, since tracking
+/// should only depend on the output types, not the paths they're written to.
+#[derive(Clone, Debug, Hash, HashStable_Generic)]
+pub struct OutputTypes(BTreeMap<OutputType, Option<PathBuf>>);
+
+impl OutputTypes {
+ pub fn new(entries: &[(OutputType, Option<PathBuf>)]) -> OutputTypes {
+ OutputTypes(BTreeMap::from_iter(entries.iter().map(|&(k, ref v)| (k, v.clone()))))
+ }
+
+ pub fn get(&self, key: &OutputType) -> Option<&Option<PathBuf>> {
+ self.0.get(key)
+ }
+
+ pub fn contains_key(&self, key: &OutputType) -> bool {
+ self.0.contains_key(key)
+ }
+
+ pub fn keys(&self) -> BTreeMapKeysIter<'_, OutputType, Option<PathBuf>> {
+ self.0.keys()
+ }
+
+ pub fn values(&self) -> BTreeMapValuesIter<'_, OutputType, Option<PathBuf>> {
+ self.0.values()
+ }
+
+ pub fn len(&self) -> usize {
+ self.0.len()
+ }
+
+ /// Returns `true` if any of the output types require codegen or linking.
+ pub fn should_codegen(&self) -> bool {
+ self.0.keys().any(|k| match *k {
+ OutputType::Bitcode
+ | OutputType::Assembly
+ | OutputType::LlvmAssembly
+ | OutputType::Mir
+ | OutputType::Object
+ | OutputType::Exe => true,
+ OutputType::Metadata | OutputType::DepInfo => false,
+ })
+ }
+
+ /// Returns `true` if any of the output types require linking.
+ pub fn should_link(&self) -> bool {
+ self.0.keys().any(|k| match *k {
+ OutputType::Bitcode
+ | OutputType::Assembly
+ | OutputType::LlvmAssembly
+ | OutputType::Mir
+ | OutputType::Metadata
+ | OutputType::Object
+ | OutputType::DepInfo => false,
+ OutputType::Exe => true,
+ })
+ }
+}
+
+/// Use tree-based collections to cheaply get a deterministic `Hash` implementation.
+/// *Do not* switch `BTreeMap` or `BTreeSet` out for an unsorted container type! That
+/// would break dependency tracking for command-line arguments.
+#[derive(Clone)]
+pub struct Externs(BTreeMap<String, ExternEntry>);
+
+#[derive(Clone, Debug)]
+pub struct ExternEntry {
+ pub location: ExternLocation,
+ /// Indicates this is a "private" dependency for the
+ /// `exported_private_dependencies` lint.
+ ///
+ /// This can be set with the `priv` option like
+ /// `--extern priv:name=foo.rlib`.
+ pub is_private_dep: bool,
+ /// Add the extern entry to the extern prelude.
+ ///
+ /// This can be disabled with the `noprelude` option like
+ /// `--extern noprelude:name`.
+ pub add_prelude: bool,
+ /// The extern entry shouldn't be considered for unused dependency warnings.
+ ///
+ /// `--extern nounused:std=/path/to/lib/libstd.rlib`. This is used to
+ /// suppress `unused-crate-dependencies` warnings.
+ pub nounused_dep: bool,
+}
+
+#[derive(Clone, Debug)]
+pub enum ExternLocation {
+ /// Indicates to look for the library in the search paths.
+ ///
+ /// Added via `--extern name`.
+ FoundInLibrarySearchDirectories,
+ /// The locations where this extern entry must be found.
+ ///
+ /// The `CrateLoader` is responsible for loading these and figuring out
+ /// which one to use.
+ ///
+ /// Added via `--extern prelude_name=some_file.rlib`
+ ExactPaths(BTreeSet<CanonicalizedPath>),
+}
+
+impl Externs {
+ /// Used for testing.
+ pub fn new(data: BTreeMap<String, ExternEntry>) -> Externs {
+ Externs(data)
+ }
+
+ pub fn get(&self, key: &str) -> Option<&ExternEntry> {
+ self.0.get(key)
+ }
+
+ pub fn iter(&self) -> BTreeMapIter<'_, String, ExternEntry> {
+ self.0.iter()
+ }
+
+ pub fn len(&self) -> usize {
+ self.0.len()
+ }
+}
+
+impl ExternEntry {
+ fn new(location: ExternLocation) -> ExternEntry {
+ ExternEntry { location, is_private_dep: false, add_prelude: false, nounused_dep: false }
+ }
+
+ pub fn files(&self) -> Option<impl Iterator<Item = &CanonicalizedPath>> {
+ match &self.location {
+ ExternLocation::ExactPaths(set) => Some(set.iter()),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum PrintRequest {
+ FileNames,
+ Sysroot,
+ TargetLibdir,
+ CrateName,
+ Cfg,
+ TargetList,
+ TargetCPUs,
+ TargetFeatures,
+ RelocationModels,
+ CodeModels,
+ TlsModels,
+ TargetSpec,
+ NativeStaticLibs,
+ StackProtectorStrategies,
+ LinkArgs,
+}
+
+pub enum Input {
+ /// Load source code from a file.
+ File(PathBuf),
+ /// Load source code from a string.
+ Str {
+ /// A string that is shown in place of a filename.
+ name: FileName,
+ /// An anonymous string containing the source code.
+ input: String,
+ },
+}
+
+impl Input {
+ pub fn filestem(&self) -> &str {
+ match *self {
+ Input::File(ref ifile) => ifile.file_stem().unwrap().to_str().unwrap(),
+ Input::Str { .. } => "rust_out",
+ }
+ }
+
+ pub fn source_name(&self) -> FileName {
+ match *self {
+ Input::File(ref ifile) => ifile.clone().into(),
+ Input::Str { ref name, .. } => name.clone(),
+ }
+ }
+}
+
+#[derive(Clone, Hash, Debug, HashStable_Generic)]
+pub struct OutputFilenames {
+ pub out_directory: PathBuf,
+ filestem: String,
+ pub single_output_file: Option<PathBuf>,
+ pub temps_directory: Option<PathBuf>,
+ pub outputs: OutputTypes,
+}
+
+pub const RLINK_EXT: &str = "rlink";
+pub const RUST_CGU_EXT: &str = "rcgu";
+pub const DWARF_OBJECT_EXT: &str = "dwo";
+
+impl OutputFilenames {
+ pub fn new(
+ out_directory: PathBuf,
+ out_filestem: String,
+ single_output_file: Option<PathBuf>,
+ temps_directory: Option<PathBuf>,
+ extra: String,
+ outputs: OutputTypes,
+ ) -> Self {
+ OutputFilenames {
+ out_directory,
+ single_output_file,
+ temps_directory,
+ outputs,
+ filestem: format!("{out_filestem}{extra}"),
+ }
+ }
+
+ pub fn path(&self, flavor: OutputType) -> PathBuf {
+ self.outputs
+ .get(&flavor)
+ .and_then(|p| p.to_owned())
+ .or_else(|| self.single_output_file.clone())
+ .unwrap_or_else(|| self.output_path(flavor))
+ }
+
+ /// Gets the output path where a compilation artifact of the given type
+ /// should be placed on disk.
+ pub fn output_path(&self, flavor: OutputType) -> PathBuf {
+ let extension = flavor.extension();
+ self.with_directory_and_extension(&self.out_directory, &extension)
+ }
+
+ /// Gets the path where a compilation artifact of the given type for the
+ /// given codegen unit should be placed on disk. If codegen_unit_name is
+ /// None, a path distinct from those of any codegen unit will be generated.
+ pub fn temp_path(&self, flavor: OutputType, codegen_unit_name: Option<&str>) -> PathBuf {
+ let extension = flavor.extension();
+ self.temp_path_ext(extension, codegen_unit_name)
+ }
+
+ /// Like `temp_path`, but specifically for dwarf objects.
+ pub fn temp_path_dwo(&self, codegen_unit_name: Option<&str>) -> PathBuf {
+ self.temp_path_ext(DWARF_OBJECT_EXT, codegen_unit_name)
+ }
+
+ /// Like `temp_path`, but also supports things where there is no corresponding
+ /// OutputType, like noopt-bitcode or lto-bitcode.
+ pub fn temp_path_ext(&self, ext: &str, codegen_unit_name: Option<&str>) -> PathBuf {
+ let mut extension = String::new();
+
+ if let Some(codegen_unit_name) = codegen_unit_name {
+ extension.push_str(codegen_unit_name);
+ }
+
+ if !ext.is_empty() {
+ if !extension.is_empty() {
+ extension.push('.');
+ extension.push_str(RUST_CGU_EXT);
+ extension.push('.');
+ }
+
+ extension.push_str(ext);
+ }
+
+ let temps_directory = self.temps_directory.as_ref().unwrap_or(&self.out_directory);
+
+ self.with_directory_and_extension(&temps_directory, &extension)
+ }
+
+ pub fn with_extension(&self, extension: &str) -> PathBuf {
+ self.with_directory_and_extension(&self.out_directory, extension)
+ }
+
+ fn with_directory_and_extension(&self, directory: &PathBuf, extension: &str) -> PathBuf {
+ let mut path = directory.join(&self.filestem);
+ path.set_extension(extension);
+ path
+ }
+
+ /// Returns the path for the Split DWARF file - this can differ depending on which Split DWARF
+ /// mode is being used, which is the logic that this function is intended to encapsulate.
+ pub fn split_dwarf_path(
+ &self,
+ split_debuginfo_kind: SplitDebuginfo,
+ split_dwarf_kind: SplitDwarfKind,
+ cgu_name: Option<&str>,
+ ) -> Option<PathBuf> {
+ let obj_out = self.temp_path(OutputType::Object, cgu_name);
+ let dwo_out = self.temp_path_dwo(cgu_name);
+ match (split_debuginfo_kind, split_dwarf_kind) {
+ (SplitDebuginfo::Off, SplitDwarfKind::Single | SplitDwarfKind::Split) => None,
+ // Single mode doesn't change how DWARF is emitted, but does add Split DWARF attributes
+ // (pointing at the path which is being determined here). Use the path to the current
+ // object file.
+ (SplitDebuginfo::Packed | SplitDebuginfo::Unpacked, SplitDwarfKind::Single) => {
+ Some(obj_out)
+ }
+ // Split mode emits the DWARF into a different file, use that path.
+ (SplitDebuginfo::Packed | SplitDebuginfo::Unpacked, SplitDwarfKind::Split) => {
+ Some(dwo_out)
+ }
+ }
+ }
+}
+
+pub fn host_triple() -> &'static str {
+ // Get the host triple out of the build environment. This ensures that our
+ // idea of the host triple is the same as for the set of libraries we've
+ // actually built. We can't just take LLVM's host triple because they
+ // normalize all ix86 architectures to i386.
+ //
+ // Instead of grabbing the host triple (for the current host), we grab (at
+ // compile time) the target triple that this rustc is built with and
+ // calling that (at runtime) the host triple.
+ (option_env!("CFG_COMPILER_HOST_TRIPLE")).expect("CFG_COMPILER_HOST_TRIPLE")
+}
+
+impl Default for Options {
+ fn default() -> Options {
+ Options {
+ assert_incr_state: None,
+ crate_types: Vec::new(),
+ optimize: OptLevel::No,
+ debuginfo: DebugInfo::None,
+ lint_opts: Vec::new(),
+ lint_cap: None,
+ describe_lints: false,
+ output_types: OutputTypes(BTreeMap::new()),
+ search_paths: vec![],
+ maybe_sysroot: None,
+ target_triple: TargetTriple::from_triple(host_triple()),
+ test: false,
+ incremental: None,
+ unstable_opts: Default::default(),
+ prints: Vec::new(),
+ cg: Default::default(),
+ error_format: ErrorOutputType::default(),
+ diagnostic_width: None,
+ externs: Externs(BTreeMap::new()),
+ crate_name: None,
+ libs: Vec::new(),
+ unstable_features: UnstableFeatures::Disallow,
+ debug_assertions: true,
+ actually_rustdoc: false,
+ trimmed_def_paths: TrimmedDefPaths::default(),
+ cli_forced_codegen_units: None,
+ cli_forced_thinlto_off: false,
+ remap_path_prefix: Vec::new(),
+ real_rust_source_base_dir: None,
+ edition: DEFAULT_EDITION,
+ json_artifact_notifications: false,
+ json_unused_externs: JsonUnusedExterns::No,
+ json_future_incompat: false,
+ pretty: None,
+ working_dir: RealFileName::LocalPath(std::env::current_dir().unwrap()),
+ }
+ }
+}
+
+impl Options {
+ /// Returns `true` if there is a reason to build the dep graph.
+ pub fn build_dep_graph(&self) -> bool {
+ self.incremental.is_some()
+ || self.unstable_opts.dump_dep_graph
+ || self.unstable_opts.query_dep_graph
+ }
+
+ pub fn file_path_mapping(&self) -> FilePathMapping {
+ FilePathMapping::new(self.remap_path_prefix.clone())
+ }
+
+ /// Returns `true` if there will be an output file generated.
+ pub fn will_create_output_file(&self) -> bool {
+ !self.unstable_opts.parse_only && // The file is just being parsed
+ !self.unstable_opts.ls // The file is just being queried
+ }
+
+ #[inline]
+ pub fn share_generics(&self) -> bool {
+ match self.unstable_opts.share_generics {
+ Some(setting) => setting,
+ None => match self.optimize {
+ OptLevel::No | OptLevel::Less | OptLevel::Size | OptLevel::SizeMin => true,
+ OptLevel::Default | OptLevel::Aggressive => false,
+ },
+ }
+ }
+
+ pub fn get_symbol_mangling_version(&self) -> SymbolManglingVersion {
+ self.cg.symbol_mangling_version.unwrap_or(SymbolManglingVersion::Legacy)
+ }
+}
+
+impl UnstableOptions {
+ pub fn diagnostic_handler_flags(&self, can_emit_warnings: bool) -> HandlerFlags {
+ HandlerFlags {
+ can_emit_warnings,
+ treat_err_as_bug: self.treat_err_as_bug,
+ dont_buffer_diagnostics: self.dont_buffer_diagnostics,
+ report_delayed_bugs: self.report_delayed_bugs,
+ macro_backtrace: self.macro_backtrace,
+ deduplicate_diagnostics: self.deduplicate_diagnostics,
+ }
+ }
+}
+
+// The type of entry function, so users can have their own entry functions
+#[derive(Copy, Clone, PartialEq, Hash, Debug, HashStable_Generic)]
+pub enum EntryFnType {
+ Main,
+ Start,
+}
+
+#[derive(Copy, PartialEq, PartialOrd, Clone, Ord, Eq, Hash, Debug, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub enum CrateType {
+ Executable,
+ Dylib,
+ Rlib,
+ Staticlib,
+ Cdylib,
+ ProcMacro,
+}
+
+impl CrateType {
+ /// When generated, is this crate type an archive?
+ pub fn is_archive(&self) -> bool {
+ match *self {
+ CrateType::Rlib | CrateType::Staticlib => true,
+ CrateType::Executable | CrateType::Dylib | CrateType::Cdylib | CrateType::ProcMacro => {
+ false
+ }
+ }
+ }
+}
+
+#[derive(Clone, Hash, Debug, PartialEq, Eq)]
+pub enum Passes {
+ Some(Vec<String>),
+ All,
+}
+
+impl Passes {
+ pub fn is_empty(&self) -> bool {
+ match *self {
+ Passes::Some(ref v) => v.is_empty(),
+ Passes::All => false,
+ }
+ }
+
+ pub fn extend(&mut self, passes: impl IntoIterator<Item = String>) {
+ match *self {
+ Passes::Some(ref mut v) => v.extend(passes),
+ Passes::All => {}
+ }
+ }
+}
+
+#[derive(Clone, Copy, Hash, Debug, PartialEq)]
+pub enum PAuthKey {
+ A,
+ B,
+}
+
+#[derive(Clone, Copy, Hash, Debug, PartialEq)]
+pub struct PacRet {
+ pub leaf: bool,
+ pub key: PAuthKey,
+}
+
+#[derive(Clone, Copy, Hash, Debug, PartialEq)]
+pub struct BranchProtection {
+ pub bti: bool,
+ pub pac_ret: Option<PacRet>,
+}
+
+impl Default for BranchProtection {
+ fn default() -> Self {
+ BranchProtection { bti: false, pac_ret: None }
+ }
+}
+
+pub const fn default_lib_output() -> CrateType {
+ CrateType::Rlib
+}
+
+fn default_configuration(sess: &Session) -> CrateConfig {
+ // NOTE: This should be kept in sync with `CrateCheckConfig::fill_well_known` below.
+ let end = &sess.target.endian;
+ let arch = &sess.target.arch;
+ let wordsz = sess.target.pointer_width.to_string();
+ let os = &sess.target.os;
+ let env = &sess.target.env;
+ let abi = &sess.target.abi;
+ let vendor = &sess.target.vendor;
+ let min_atomic_width = sess.target.min_atomic_width();
+ let max_atomic_width = sess.target.max_atomic_width();
+ let atomic_cas = sess.target.atomic_cas;
+ let layout = TargetDataLayout::parse(&sess.target).unwrap_or_else(|err| {
+ sess.fatal(&err);
+ });
+
+ let mut ret = FxHashSet::default();
+ ret.reserve(7); // the minimum number of insertions
+ // Target bindings.
+ ret.insert((sym::target_os, Some(Symbol::intern(os))));
+ for fam in sess.target.families.as_ref() {
+ ret.insert((sym::target_family, Some(Symbol::intern(fam))));
+ if fam == "windows" {
+ ret.insert((sym::windows, None));
+ } else if fam == "unix" {
+ ret.insert((sym::unix, None));
+ }
+ }
+ ret.insert((sym::target_arch, Some(Symbol::intern(arch))));
+ ret.insert((sym::target_endian, Some(Symbol::intern(end.as_str()))));
+ ret.insert((sym::target_pointer_width, Some(Symbol::intern(&wordsz))));
+ ret.insert((sym::target_env, Some(Symbol::intern(env))));
+ ret.insert((sym::target_abi, Some(Symbol::intern(abi))));
+ ret.insert((sym::target_vendor, Some(Symbol::intern(vendor))));
+ if sess.target.has_thread_local {
+ ret.insert((sym::target_thread_local, None));
+ }
+ for (i, align) in [
+ (8, layout.i8_align.abi),
+ (16, layout.i16_align.abi),
+ (32, layout.i32_align.abi),
+ (64, layout.i64_align.abi),
+ (128, layout.i128_align.abi),
+ ] {
+ if i >= min_atomic_width && i <= max_atomic_width {
+ let mut insert_atomic = |s, align: Align| {
+ ret.insert((sym::target_has_atomic_load_store, Some(Symbol::intern(s))));
+ if atomic_cas {
+ ret.insert((sym::target_has_atomic, Some(Symbol::intern(s))));
+ }
+ if align.bits() == i {
+ ret.insert((sym::target_has_atomic_equal_alignment, Some(Symbol::intern(s))));
+ }
+ };
+ let s = i.to_string();
+ insert_atomic(&s, align);
+ if s == wordsz {
+ insert_atomic("ptr", layout.pointer_align.abi);
+ }
+ }
+ }
+
+ let panic_strategy = sess.panic_strategy();
+ ret.insert((sym::panic, Some(panic_strategy.desc_symbol())));
+
+ for s in sess.opts.unstable_opts.sanitizer {
+ let symbol = Symbol::intern(&s.to_string());
+ ret.insert((sym::sanitize, Some(symbol)));
+ }
+
+ if sess.opts.debug_assertions {
+ ret.insert((sym::debug_assertions, None));
+ }
+ // JUSTIFICATION: before wrapper fn is available
+ #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ if sess.opts.crate_types.contains(&CrateType::ProcMacro) {
+ ret.insert((sym::proc_macro, None));
+ }
+ ret
+}
+
+/// Converts the crate `cfg!` configuration from `String` to `Symbol`.
+/// `rustc_interface::interface::Config` accepts this in the compiler configuration,
+/// but the symbol interner is not yet set up then, so we must convert it later.
+pub fn to_crate_config(cfg: FxHashSet<(String, Option<String>)>) -> CrateConfig {
+ cfg.into_iter().map(|(a, b)| (Symbol::intern(&a), b.map(|b| Symbol::intern(&b)))).collect()
+}
+
+/// The parsed `--check-cfg` options
+pub struct CheckCfg<T = String> {
+ /// The set of all `names()`, if None no name checking is performed
+ pub names_valid: Option<FxHashSet<T>>,
+ /// Is well known values activated
+ pub well_known_values: bool,
+ /// The set of all `values()`
+ pub values_valid: FxHashMap<T, FxHashSet<T>>,
+}
+
+impl<T> Default for CheckCfg<T> {
+ fn default() -> Self {
+ CheckCfg {
+ names_valid: Default::default(),
+ values_valid: Default::default(),
+ well_known_values: false,
+ }
+ }
+}
+
+impl<T> CheckCfg<T> {
+ fn map_data<O: Eq + Hash>(&self, f: impl Fn(&T) -> O) -> CheckCfg<O> {
+ CheckCfg {
+ names_valid: self
+ .names_valid
+ .as_ref()
+ .map(|names_valid| names_valid.iter().map(|a| f(a)).collect()),
+ values_valid: self
+ .values_valid
+ .iter()
+ .map(|(a, b)| (f(a), b.iter().map(|b| f(b)).collect()))
+ .collect(),
+ well_known_values: self.well_known_values,
+ }
+ }
+}
+
+/// Converts the crate `--check-cfg` options from `String` to `Symbol`.
+/// `rustc_interface::interface::Config` accepts this in the compiler configuration,
+/// but the symbol interner is not yet set up then, so we must convert it later.
+pub fn to_crate_check_config(cfg: CheckCfg) -> CrateCheckConfig {
+ cfg.map_data(|s| Symbol::intern(s))
+}
+
+impl CrateCheckConfig {
+ /// Fills a `CrateCheckConfig` with well-known configuration names.
+ fn fill_well_known_names(&mut self) {
+ // NOTE: This should be kept in sync with `default_configuration` and
+ // `fill_well_known_values`
+ const WELL_KNOWN_NAMES: &[Symbol] = &[
+ // rustc
+ sym::unix,
+ sym::windows,
+ sym::target_os,
+ sym::target_family,
+ sym::target_arch,
+ sym::target_endian,
+ sym::target_pointer_width,
+ sym::target_env,
+ sym::target_abi,
+ sym::target_vendor,
+ sym::target_thread_local,
+ sym::target_has_atomic_load_store,
+ sym::target_has_atomic,
+ sym::target_has_atomic_equal_alignment,
+ sym::target_feature,
+ sym::panic,
+ sym::sanitize,
+ sym::debug_assertions,
+ sym::proc_macro,
+ sym::test,
+ sym::feature,
+ // rustdoc
+ sym::doc,
+ sym::doctest,
+ // miri
+ sym::miri,
+ ];
+
+ // We only insert well-known names if `names()` was activated
+ if let Some(names_valid) = &mut self.names_valid {
+ names_valid.extend(WELL_KNOWN_NAMES);
+ }
+ }
+
+ /// Fills a `CrateCheckConfig` with well-known configuration values.
+ fn fill_well_known_values(&mut self) {
+ if !self.well_known_values {
+ return;
+ }
+
+ // NOTE: This should be kept in sync with `default_configuration` and
+ // `fill_well_known_names`
+
+ let panic_values = &PanicStrategy::all();
+
+ let atomic_values = &[
+ sym::ptr,
+ sym::integer(8usize),
+ sym::integer(16usize),
+ sym::integer(32usize),
+ sym::integer(64usize),
+ sym::integer(128usize),
+ ];
+
+ let sanitize_values = SanitizerSet::all()
+ .into_iter()
+ .map(|sanitizer| Symbol::intern(sanitizer.as_str().unwrap()));
+
+ // Unknown possible values:
+ // - `feature`
+ // - `target_feature`
+
+ // No-values
+ for name in [
+ sym::doc,
+ sym::miri,
+ sym::unix,
+ sym::test,
+ sym::doctest,
+ sym::windows,
+ sym::proc_macro,
+ sym::debug_assertions,
+ sym::target_thread_local,
+ ] {
+ self.values_valid.entry(name).or_default();
+ }
+
+ // Pre-defined values
+ self.values_valid.entry(sym::panic).or_default().extend(panic_values);
+ self.values_valid.entry(sym::sanitize).or_default().extend(sanitize_values);
+ self.values_valid.entry(sym::target_has_atomic).or_default().extend(atomic_values);
+ self.values_valid
+ .entry(sym::target_has_atomic_load_store)
+ .or_default()
+ .extend(atomic_values);
+ self.values_valid
+ .entry(sym::target_has_atomic_equal_alignment)
+ .or_default()
+ .extend(atomic_values);
+
+ // Target specific values
+ {
+ const VALUES: [&Symbol; 8] = [
+ &sym::target_os,
+ &sym::target_family,
+ &sym::target_arch,
+ &sym::target_endian,
+ &sym::target_env,
+ &sym::target_abi,
+ &sym::target_vendor,
+ &sym::target_pointer_width,
+ ];
+
+ // Initialize (if not already initialized)
+ for &e in VALUES {
+ self.values_valid.entry(e).or_default();
+ }
+
+ // Get all values map at once otherwise it would be costly.
+ // (8 values * 220 targets ~= 1760 times, at the time of writing this comment).
+ let [
+ values_target_os,
+ values_target_family,
+ values_target_arch,
+ values_target_endian,
+ values_target_env,
+ values_target_abi,
+ values_target_vendor,
+ values_target_pointer_width,
+ ] = self
+ .values_valid
+ .get_many_mut(VALUES)
+ .expect("unable to get all the check-cfg values buckets");
+
+ for target in TARGETS
+ .iter()
+ .map(|target| Target::expect_builtin(&TargetTriple::from_triple(target)))
+ {
+ values_target_os.insert(Symbol::intern(&target.options.os));
+ values_target_family
+ .extend(target.options.families.iter().map(|family| Symbol::intern(family)));
+ values_target_arch.insert(Symbol::intern(&target.arch));
+ values_target_endian.insert(Symbol::intern(&target.options.endian.as_str()));
+ values_target_env.insert(Symbol::intern(&target.options.env));
+ values_target_abi.insert(Symbol::intern(&target.options.abi));
+ values_target_vendor.insert(Symbol::intern(&target.options.vendor));
+ values_target_pointer_width.insert(sym::integer(target.pointer_width));
+ }
+ }
+ }
+
+ pub fn fill_well_known(&mut self) {
+ self.fill_well_known_names();
+ self.fill_well_known_values();
+ }
+}
+
+pub fn build_configuration(sess: &Session, mut user_cfg: CrateConfig) -> CrateConfig {
+ // Combine the configuration requested by the session (command line) with
+ // some default and generated configuration items.
+ let default_cfg = default_configuration(sess);
+ // If the user wants a test runner, then add the test cfg.
+ if sess.opts.test {
+ user_cfg.insert((sym::test, None));
+ }
+ user_cfg.extend(default_cfg.iter().cloned());
+ user_cfg
+}
+
+pub(super) fn build_target_config(
+ opts: &Options,
+ target_override: Option<Target>,
+ sysroot: &Path,
+) -> Target {
+ let target_result = target_override.map_or_else(
+ || Target::search(&opts.target_triple, sysroot),
+ |t| Ok((t, TargetWarnings::empty())),
+ );
+ let (target, target_warnings) = target_result.unwrap_or_else(|e| {
+ early_error(
+ opts.error_format,
+ &format!(
+ "Error loading target specification: {}. \
+ Run `rustc --print target-list` for a list of built-in targets",
+ e
+ ),
+ )
+ });
+ for warning in target_warnings.warning_messages() {
+ early_warn(opts.error_format, &warning)
+ }
+
+ if !matches!(target.pointer_width, 16 | 32 | 64) {
+ early_error(
+ opts.error_format,
+ &format!(
+ "target specification was invalid: \
+ unrecognized target-pointer-width {}",
+ target.pointer_width
+ ),
+ )
+ }
+
+ target
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum OptionStability {
+ Stable,
+ Unstable,
+}
+
+pub struct RustcOptGroup {
+ pub apply: Box<dyn Fn(&mut getopts::Options) -> &mut getopts::Options>,
+ pub name: &'static str,
+ pub stability: OptionStability,
+}
+
+impl RustcOptGroup {
+ pub fn is_stable(&self) -> bool {
+ self.stability == OptionStability::Stable
+ }
+
+ pub fn stable<F>(name: &'static str, f: F) -> RustcOptGroup
+ where
+ F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static,
+ {
+ RustcOptGroup { name, apply: Box::new(f), stability: OptionStability::Stable }
+ }
+
+ pub fn unstable<F>(name: &'static str, f: F) -> RustcOptGroup
+ where
+ F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static,
+ {
+ RustcOptGroup { name, apply: Box::new(f), stability: OptionStability::Unstable }
+ }
+}
+
+// The `opt` local module holds wrappers around the `getopts` API that
+// adds extra rustc-specific metadata to each option; such metadata
+// is exposed by . The public
+// functions below ending with `_u` are the functions that return
+// *unstable* options, i.e., options that are only enabled when the
+// user also passes the `-Z unstable-options` debugging flag.
+mod opt {
+ // The `fn flag*` etc below are written so that we can use them
+ // in the future; do not warn about them not being used right now.
+ #![allow(dead_code)]
+
+ use super::RustcOptGroup;
+
+ pub type R = RustcOptGroup;
+ pub type S = &'static str;
+
+ fn stable<F>(name: S, f: F) -> R
+ where
+ F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static,
+ {
+ RustcOptGroup::stable(name, f)
+ }
+
+ fn unstable<F>(name: S, f: F) -> R
+ where
+ F: Fn(&mut getopts::Options) -> &mut getopts::Options + 'static,
+ {
+ RustcOptGroup::unstable(name, f)
+ }
+
+ fn longer(a: S, b: S) -> S {
+ if a.len() > b.len() { a } else { b }
+ }
+
+ pub fn opt_s(a: S, b: S, c: S, d: S) -> R {
+ stable(longer(a, b), move |opts| opts.optopt(a, b, c, d))
+ }
+ pub fn multi_s(a: S, b: S, c: S, d: S) -> R {
+ stable(longer(a, b), move |opts| opts.optmulti(a, b, c, d))
+ }
+ pub fn flag_s(a: S, b: S, c: S) -> R {
+ stable(longer(a, b), move |opts| opts.optflag(a, b, c))
+ }
+ pub fn flagmulti_s(a: S, b: S, c: S) -> R {
+ stable(longer(a, b), move |opts| opts.optflagmulti(a, b, c))
+ }
+
+ pub fn opt(a: S, b: S, c: S, d: S) -> R {
+ unstable(longer(a, b), move |opts| opts.optopt(a, b, c, d))
+ }
+ pub fn multi(a: S, b: S, c: S, d: S) -> R {
+ unstable(longer(a, b), move |opts| opts.optmulti(a, b, c, d))
+ }
+}
+
+/// Returns the "short" subset of the rustc command line options,
+/// including metadata for each option, such as whether the option is
+/// part of the stable long-term interface for rustc.
+pub fn rustc_short_optgroups() -> Vec<RustcOptGroup> {
+ vec![
+ opt::flag_s("h", "help", "Display this message"),
+ opt::multi_s("", "cfg", "Configure the compilation environment", "SPEC"),
+ opt::multi("", "check-cfg", "Provide list of valid cfg options for checking", "SPEC"),
+ opt::multi_s(
+ "L",
+ "",
+ "Add a directory to the library search path. The
+ optional KIND can be one of dependency, crate, native,
+ framework, or all (the default).",
+ "[KIND=]PATH",
+ ),
+ opt::multi_s(
+ "l",
+ "",
+ "Link the generated crate(s) to the specified native
+ library NAME. The optional KIND can be one of
+ static, framework, or dylib (the default).
+ Optional comma separated MODIFIERS (bundle|verbatim|whole-archive|as-needed)
+ may be specified each with a prefix of either '+' to
+ enable or '-' to disable.",
+ "[KIND[:MODIFIERS]=]NAME[:RENAME]",
+ ),
+ make_crate_type_option(),
+ opt::opt_s("", "crate-name", "Specify the name of the crate being built", "NAME"),
+ opt::opt_s(
+ "",
+ "edition",
+ "Specify which edition of the compiler to use when compiling code.",
+ EDITION_NAME_LIST,
+ ),
+ opt::multi_s(
+ "",
+ "emit",
+ "Comma separated list of types of output for \
+ the compiler to emit",
+ "[asm|llvm-bc|llvm-ir|obj|metadata|link|dep-info|mir]",
+ ),
+ opt::multi_s(
+ "",
+ "print",
+ "Compiler information to print on stdout",
+ "[crate-name|file-names|sysroot|target-libdir|cfg|target-list|\
+ target-cpus|target-features|relocation-models|code-models|\
+ tls-models|target-spec-json|native-static-libs|stack-protector-strategies|\
+ link-args]",
+ ),
+ opt::flagmulti_s("g", "", "Equivalent to -C debuginfo=2"),
+ opt::flagmulti_s("O", "", "Equivalent to -C opt-level=2"),
+ opt::opt_s("o", "", "Write output to <filename>", "FILENAME"),
+ opt::opt_s(
+ "",
+ "out-dir",
+ "Write output to compiler-chosen filename \
+ in <dir>",
+ "DIR",
+ ),
+ opt::opt_s(
+ "",
+ "explain",
+ "Provide a detailed explanation of an error \
+ message",
+ "OPT",
+ ),
+ opt::flag_s("", "test", "Build a test harness"),
+ opt::opt_s("", "target", "Target triple for which the code is compiled", "TARGET"),
+ opt::multi_s("A", "allow", "Set lint allowed", "LINT"),
+ opt::multi_s("W", "warn", "Set lint warnings", "LINT"),
+ opt::multi_s("", "force-warn", "Set lint force-warn", "LINT"),
+ opt::multi_s("D", "deny", "Set lint denied", "LINT"),
+ opt::multi_s("F", "forbid", "Set lint forbidden", "LINT"),
+ opt::multi_s(
+ "",
+ "cap-lints",
+ "Set the most restrictive lint level. \
+ More restrictive lints are capped at this \
+ level",
+ "LEVEL",
+ ),
+ opt::multi_s("C", "codegen", "Set a codegen option", "OPT[=VALUE]"),
+ opt::flag_s("V", "version", "Print version info and exit"),
+ opt::flag_s("v", "verbose", "Use verbose output"),
+ ]
+}
+
+/// Returns all rustc command line options, including metadata for
+/// each option, such as whether the option is part of the stable
+/// long-term interface for rustc.
+pub fn rustc_optgroups() -> Vec<RustcOptGroup> {
+ let mut opts = rustc_short_optgroups();
+ // FIXME: none of these descriptions are actually used
+ opts.extend(vec![
+ opt::multi_s(
+ "",
+ "extern",
+ "Specify where an external rust library is located",
+ "NAME[=PATH]",
+ ),
+ opt::opt_s("", "sysroot", "Override the system root", "PATH"),
+ opt::multi("Z", "", "Set unstable / perma-unstable options", "FLAG"),
+ opt::opt_s(
+ "",
+ "error-format",
+ "How errors and other messages are produced",
+ "human|json|short",
+ ),
+ opt::multi_s("", "json", "Configure the JSON output of the compiler", "CONFIG"),
+ opt::opt_s(
+ "",
+ "color",
+ "Configure coloring of output:
+ auto = colorize, if output goes to a tty (default);
+ always = always colorize output;
+ never = never colorize output",
+ "auto|always|never",
+ ),
+ opt::opt_s(
+ "",
+ "diagnostic-width",
+ "Inform rustc of the width of the output so that diagnostics can be truncated to fit",
+ "WIDTH",
+ ),
+ opt::multi_s(
+ "",
+ "remap-path-prefix",
+ "Remap source names in all output (compiler messages and output files)",
+ "FROM=TO",
+ ),
+ ]);
+ opts
+}
+
+pub fn get_cmd_lint_options(
+ matches: &getopts::Matches,
+ error_format: ErrorOutputType,
+) -> (Vec<(String, lint::Level)>, bool, Option<lint::Level>) {
+ let mut lint_opts_with_position = vec![];
+ let mut describe_lints = false;
+
+ for level in [lint::Allow, lint::Warn, lint::ForceWarn(None), lint::Deny, lint::Forbid] {
+ for (arg_pos, lint_name) in matches.opt_strs_pos(level.as_str()) {
+ if lint_name == "help" {
+ describe_lints = true;
+ } else {
+ lint_opts_with_position.push((arg_pos, lint_name.replace('-', "_"), level));
+ }
+ }
+ }
+
+ lint_opts_with_position.sort_by_key(|x| x.0);
+ let lint_opts = lint_opts_with_position
+ .iter()
+ .cloned()
+ .map(|(_, lint_name, level)| (lint_name, level))
+ .collect();
+
+ let lint_cap = matches.opt_str("cap-lints").map(|cap| {
+ lint::Level::from_str(&cap)
+ .unwrap_or_else(|| early_error(error_format, &format!("unknown lint level: `{cap}`")))
+ });
+
+ (lint_opts, describe_lints, lint_cap)
+}
+
+/// Parses the `--color` flag.
+pub fn parse_color(matches: &getopts::Matches) -> ColorConfig {
+ match matches.opt_str("color").as_ref().map(|s| &s[..]) {
+ Some("auto") => ColorConfig::Auto,
+ Some("always") => ColorConfig::Always,
+ Some("never") => ColorConfig::Never,
+
+ None => ColorConfig::Auto,
+
+ Some(arg) => early_error(
+ ErrorOutputType::default(),
+ &format!(
+ "argument for `--color` must be auto, \
+ always or never (instead was `{arg}`)"
+ ),
+ ),
+ }
+}
+
+/// Possible json config files
+pub struct JsonConfig {
+ pub json_rendered: HumanReadableErrorType,
+ pub json_artifact_notifications: bool,
+ pub json_unused_externs: JsonUnusedExterns,
+ pub json_future_incompat: bool,
+}
+
+/// Report unused externs in event stream
+#[derive(Copy, Clone)]
+pub enum JsonUnusedExterns {
+ /// Do not
+ No,
+ /// Report, but do not exit with failure status for deny/forbid
+ Silent,
+ /// Report, and also exit with failure status for deny/forbid
+ Loud,
+}
+
+impl JsonUnusedExterns {
+ pub fn is_enabled(&self) -> bool {
+ match self {
+ JsonUnusedExterns::No => false,
+ JsonUnusedExterns::Loud | JsonUnusedExterns::Silent => true,
+ }
+ }
+
+ pub fn is_loud(&self) -> bool {
+ match self {
+ JsonUnusedExterns::No | JsonUnusedExterns::Silent => false,
+ JsonUnusedExterns::Loud => true,
+ }
+ }
+}
+
+/// Parse the `--json` flag.
+///
+/// The first value returned is how to render JSON diagnostics, and the second
+/// is whether or not artifact notifications are enabled.
+pub fn parse_json(matches: &getopts::Matches) -> JsonConfig {
+ let mut json_rendered: fn(ColorConfig) -> HumanReadableErrorType =
+ HumanReadableErrorType::Default;
+ let mut json_color = ColorConfig::Never;
+ let mut json_artifact_notifications = false;
+ let mut json_unused_externs = JsonUnusedExterns::No;
+ let mut json_future_incompat = false;
+ for option in matches.opt_strs("json") {
+ // For now conservatively forbid `--color` with `--json` since `--json`
+ // won't actually be emitting any colors and anything colorized is
+ // embedded in a diagnostic message anyway.
+ if matches.opt_str("color").is_some() {
+ early_error(
+ ErrorOutputType::default(),
+ "cannot specify the `--color` option with `--json`",
+ );
+ }
+
+ for sub_option in option.split(',') {
+ match sub_option {
+ "diagnostic-short" => json_rendered = HumanReadableErrorType::Short,
+ "diagnostic-rendered-ansi" => json_color = ColorConfig::Always,
+ "artifacts" => json_artifact_notifications = true,
+ "unused-externs" => json_unused_externs = JsonUnusedExterns::Loud,
+ "unused-externs-silent" => json_unused_externs = JsonUnusedExterns::Silent,
+ "future-incompat" => json_future_incompat = true,
+ s => early_error(
+ ErrorOutputType::default(),
+ &format!("unknown `--json` option `{s}`"),
+ ),
+ }
+ }
+ }
+
+ JsonConfig {
+ json_rendered: json_rendered(json_color),
+ json_artifact_notifications,
+ json_unused_externs,
+ json_future_incompat,
+ }
+}
+
+/// Parses the `--error-format` flag.
+pub fn parse_error_format(
+ matches: &getopts::Matches,
+ color: ColorConfig,
+ json_rendered: HumanReadableErrorType,
+) -> ErrorOutputType {
+ // We need the `opts_present` check because the driver will send us Matches
+ // with only stable options if no unstable options are used. Since error-format
+ // is unstable, it will not be present. We have to use `opts_present` not
+ // `opt_present` because the latter will panic.
+ let error_format = if matches.opts_present(&["error-format".to_owned()]) {
+ match matches.opt_str("error-format").as_ref().map(|s| &s[..]) {
+ None | Some("human") => {
+ ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(color))
+ }
+ Some("human-annotate-rs") => {
+ ErrorOutputType::HumanReadable(HumanReadableErrorType::AnnotateSnippet(color))
+ }
+ Some("json") => ErrorOutputType::Json { pretty: false, json_rendered },
+ Some("pretty-json") => ErrorOutputType::Json { pretty: true, json_rendered },
+ Some("short") => ErrorOutputType::HumanReadable(HumanReadableErrorType::Short(color)),
+
+ Some(arg) => early_error(
+ ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(color)),
+ &format!(
+ "argument for `--error-format` must be `human`, `json` or \
+ `short` (instead was `{arg}`)"
+ ),
+ ),
+ }
+ } else {
+ ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(color))
+ };
+
+ match error_format {
+ ErrorOutputType::Json { .. } => {}
+
+ // Conservatively require that the `--json` argument is coupled with
+ // `--error-format=json`. This means that `--json` is specified we
+ // should actually be emitting JSON blobs.
+ _ if !matches.opt_strs("json").is_empty() => {
+ early_error(
+ ErrorOutputType::default(),
+ "using `--json` requires also using `--error-format=json`",
+ );
+ }
+
+ _ => {}
+ }
+
+ error_format
+}
+
+pub fn parse_crate_edition(matches: &getopts::Matches) -> Edition {
+ let edition = match matches.opt_str("edition") {
+ Some(arg) => Edition::from_str(&arg).unwrap_or_else(|_| {
+ early_error(
+ ErrorOutputType::default(),
+ &format!(
+ "argument for `--edition` must be one of: \
+ {EDITION_NAME_LIST}. (instead was `{arg}`)"
+ ),
+ )
+ }),
+ None => DEFAULT_EDITION,
+ };
+
+ if !edition.is_stable() && !nightly_options::is_unstable_enabled(matches) {
+ let is_nightly = nightly_options::match_is_nightly_build(matches);
+ let msg = if !is_nightly {
+ format!(
+ "the crate requires edition {}, but the latest edition supported by this Rust version is {}",
+ edition, LATEST_STABLE_EDITION
+ )
+ } else {
+ format!("edition {edition} is unstable and only available with -Z unstable-options")
+ };
+ early_error(ErrorOutputType::default(), &msg)
+ }
+
+ edition
+}
+
+fn check_error_format_stability(
+ unstable_opts: &UnstableOptions,
+ error_format: ErrorOutputType,
+ json_rendered: HumanReadableErrorType,
+) {
+ if !unstable_opts.unstable_options {
+ if let ErrorOutputType::Json { pretty: true, json_rendered } = error_format {
+ early_error(
+ ErrorOutputType::Json { pretty: false, json_rendered },
+ "`--error-format=pretty-json` is unstable",
+ );
+ }
+ if let ErrorOutputType::HumanReadable(HumanReadableErrorType::AnnotateSnippet(_)) =
+ error_format
+ {
+ early_error(
+ ErrorOutputType::Json { pretty: false, json_rendered },
+ "`--error-format=human-annotate-rs` is unstable",
+ );
+ }
+ }
+}
+
+fn parse_output_types(
+ unstable_opts: &UnstableOptions,
+ matches: &getopts::Matches,
+ error_format: ErrorOutputType,
+) -> OutputTypes {
+ let mut output_types = BTreeMap::new();
+ if !unstable_opts.parse_only {
+ for list in matches.opt_strs("emit") {
+ for output_type in list.split(',') {
+ let (shorthand, path) = match output_type.split_once('=') {
+ None => (output_type, None),
+ Some((shorthand, path)) => (shorthand, Some(PathBuf::from(path))),
+ };
+ let output_type = OutputType::from_shorthand(shorthand).unwrap_or_else(|| {
+ early_error(
+ error_format,
+ &format!(
+ "unknown emission type: `{shorthand}` - expected one of: {display}",
+ display = OutputType::shorthands_display(),
+ ),
+ )
+ });
+ output_types.insert(output_type, path);
+ }
+ }
+ };
+ if output_types.is_empty() {
+ output_types.insert(OutputType::Exe, None);
+ }
+ OutputTypes(output_types)
+}
+
+fn should_override_cgus_and_disable_thinlto(
+ output_types: &OutputTypes,
+ matches: &getopts::Matches,
+ error_format: ErrorOutputType,
+ mut codegen_units: Option<usize>,
+) -> (bool, Option<usize>) {
+ let mut disable_thinlto = false;
+ // Issue #30063: if user requests LLVM-related output to one
+ // particular path, disable codegen-units.
+ let incompatible: Vec<_> = output_types
+ .0
+ .iter()
+ .map(|ot_path| ot_path.0)
+ .filter(|ot| !ot.is_compatible_with_codegen_units_and_single_output_file())
+ .map(|ot| ot.shorthand())
+ .collect();
+ if !incompatible.is_empty() {
+ match codegen_units {
+ Some(n) if n > 1 => {
+ if matches.opt_present("o") {
+ for ot in &incompatible {
+ early_warn(
+ error_format,
+ &format!(
+ "`--emit={ot}` with `-o` incompatible with \
+ `-C codegen-units=N` for N > 1",
+ ),
+ );
+ }
+ early_warn(error_format, "resetting to default -C codegen-units=1");
+ codegen_units = Some(1);
+ disable_thinlto = true;
+ }
+ }
+ _ => {
+ codegen_units = Some(1);
+ disable_thinlto = true;
+ }
+ }
+ }
+
+ if codegen_units == Some(0) {
+ early_error(error_format, "value for codegen units must be a positive non-zero integer");
+ }
+
+ (disable_thinlto, codegen_units)
+}
+
+fn check_thread_count(unstable_opts: &UnstableOptions, error_format: ErrorOutputType) {
+ if unstable_opts.threads == 0 {
+ early_error(error_format, "value for threads must be a positive non-zero integer");
+ }
+
+ if unstable_opts.threads > 1 && unstable_opts.fuel.is_some() {
+ early_error(error_format, "optimization fuel is incompatible with multiple threads");
+ }
+}
+
+fn collect_print_requests(
+ cg: &mut CodegenOptions,
+ unstable_opts: &mut UnstableOptions,
+ matches: &getopts::Matches,
+ error_format: ErrorOutputType,
+) -> Vec<PrintRequest> {
+ let mut prints = Vec::<PrintRequest>::new();
+ if cg.target_cpu.as_ref().map_or(false, |s| s == "help") {
+ prints.push(PrintRequest::TargetCPUs);
+ cg.target_cpu = None;
+ };
+ if cg.target_feature == "help" {
+ prints.push(PrintRequest::TargetFeatures);
+ cg.target_feature = String::new();
+ }
+
+ prints.extend(matches.opt_strs("print").into_iter().map(|s| match &*s {
+ "crate-name" => PrintRequest::CrateName,
+ "file-names" => PrintRequest::FileNames,
+ "sysroot" => PrintRequest::Sysroot,
+ "target-libdir" => PrintRequest::TargetLibdir,
+ "cfg" => PrintRequest::Cfg,
+ "target-list" => PrintRequest::TargetList,
+ "target-cpus" => PrintRequest::TargetCPUs,
+ "target-features" => PrintRequest::TargetFeatures,
+ "relocation-models" => PrintRequest::RelocationModels,
+ "code-models" => PrintRequest::CodeModels,
+ "tls-models" => PrintRequest::TlsModels,
+ "native-static-libs" => PrintRequest::NativeStaticLibs,
+ "stack-protector-strategies" => PrintRequest::StackProtectorStrategies,
+ "target-spec-json" => {
+ if unstable_opts.unstable_options {
+ PrintRequest::TargetSpec
+ } else {
+ early_error(
+ error_format,
+ "the `-Z unstable-options` flag must also be passed to \
+ enable the target-spec-json print option",
+ );
+ }
+ }
+ "link-args" => PrintRequest::LinkArgs,
+ req => early_error(error_format, &format!("unknown print request `{req}`")),
+ }));
+
+ prints
+}
+
+pub fn parse_target_triple(
+ matches: &getopts::Matches,
+ error_format: ErrorOutputType,
+) -> TargetTriple {
+ match matches.opt_str("target") {
+ Some(target) if target.ends_with(".json") => {
+ let path = Path::new(&target);
+ TargetTriple::from_path(&path).unwrap_or_else(|_| {
+ early_error(error_format, &format!("target file {path:?} does not exist"))
+ })
+ }
+ Some(target) => TargetTriple::TargetTriple(target),
+ _ => TargetTriple::from_triple(host_triple()),
+ }
+}
+
+fn parse_opt_level(
+ matches: &getopts::Matches,
+ cg: &CodegenOptions,
+ error_format: ErrorOutputType,
+) -> OptLevel {
+ // The `-O` and `-C opt-level` flags specify the same setting, so we want to be able
+ // to use them interchangeably. However, because they're technically different flags,
+ // we need to work out manually which should take precedence if both are supplied (i.e.
+ // the rightmost flag). We do this by finding the (rightmost) position of both flags and
+ // comparing them. Note that if a flag is not found, its position will be `None`, which
+ // always compared less than `Some(_)`.
+ let max_o = matches.opt_positions("O").into_iter().max();
+ let max_c = matches
+ .opt_strs_pos("C")
+ .into_iter()
+ .flat_map(|(i, s)| {
+ // NB: This can match a string without `=`.
+ if let Some("opt-level") = s.splitn(2, '=').next() { Some(i) } else { None }
+ })
+ .max();
+ if max_o > max_c {
+ OptLevel::Default
+ } else {
+ match cg.opt_level.as_ref() {
+ "0" => OptLevel::No,
+ "1" => OptLevel::Less,
+ "2" => OptLevel::Default,
+ "3" => OptLevel::Aggressive,
+ "s" => OptLevel::Size,
+ "z" => OptLevel::SizeMin,
+ arg => {
+ early_error(
+ error_format,
+ &format!(
+ "optimization level needs to be \
+ between 0-3, s or z (instead was `{arg}`)"
+ ),
+ );
+ }
+ }
+ }
+}
+
+fn select_debuginfo(
+ matches: &getopts::Matches,
+ cg: &CodegenOptions,
+ error_format: ErrorOutputType,
+) -> DebugInfo {
+ let max_g = matches.opt_positions("g").into_iter().max();
+ let max_c = matches
+ .opt_strs_pos("C")
+ .into_iter()
+ .flat_map(|(i, s)| {
+ // NB: This can match a string without `=`.
+ if let Some("debuginfo") = s.splitn(2, '=').next() { Some(i) } else { None }
+ })
+ .max();
+ if max_g > max_c {
+ DebugInfo::Full
+ } else {
+ match cg.debuginfo {
+ 0 => DebugInfo::None,
+ 1 => DebugInfo::Limited,
+ 2 => DebugInfo::Full,
+ arg => {
+ early_error(
+ error_format,
+ &format!(
+ "debug info level needs to be between \
+ 0-2 (instead was `{arg}`)"
+ ),
+ );
+ }
+ }
+ }
+}
+
+pub(crate) fn parse_assert_incr_state(
+ opt_assertion: &Option<String>,
+ error_format: ErrorOutputType,
+) -> Option<IncrementalStateAssertion> {
+ match opt_assertion {
+ Some(s) if s.as_str() == "loaded" => Some(IncrementalStateAssertion::Loaded),
+ Some(s) if s.as_str() == "not-loaded" => Some(IncrementalStateAssertion::NotLoaded),
+ Some(s) => {
+ early_error(error_format, &format!("unexpected incremental state assertion value: {s}"))
+ }
+ None => None,
+ }
+}
+
+fn parse_native_lib_kind(
+ matches: &getopts::Matches,
+ kind: &str,
+ error_format: ErrorOutputType,
+) -> (NativeLibKind, Option<bool>) {
+ let (kind, modifiers) = match kind.split_once(':') {
+ None => (kind, None),
+ Some((kind, modifiers)) => (kind, Some(modifiers)),
+ };
+
+ let kind = match kind {
+ "static" => NativeLibKind::Static { bundle: None, whole_archive: None },
+ "dylib" => NativeLibKind::Dylib { as_needed: None },
+ "framework" => NativeLibKind::Framework { as_needed: None },
+ "link-arg" => {
+ if !nightly_options::is_unstable_enabled(matches) {
+ let why = if nightly_options::match_is_nightly_build(matches) {
+ " and only accepted on the nightly compiler"
+ } else {
+ ", the `-Z unstable-options` flag must also be passed to use it"
+ };
+ early_error(error_format, &format!("library kind `link-arg` is unstable{why}"))
+ }
+ NativeLibKind::LinkArg
+ }
+ _ => early_error(
+ error_format,
+ &format!(
+ "unknown library kind `{kind}`, expected one of: static, dylib, framework, link-arg"
+ ),
+ ),
+ };
+ match modifiers {
+ None => (kind, None),
+ Some(modifiers) => parse_native_lib_modifiers(kind, modifiers, error_format, matches),
+ }
+}
+
+fn parse_native_lib_modifiers(
+ mut kind: NativeLibKind,
+ modifiers: &str,
+ error_format: ErrorOutputType,
+ matches: &getopts::Matches,
+) -> (NativeLibKind, Option<bool>) {
+ let mut verbatim = None;
+ for modifier in modifiers.split(',') {
+ let (modifier, value) = match modifier.strip_prefix(&['+', '-']) {
+ Some(m) => (m, modifier.starts_with('+')),
+ None => early_error(
+ error_format,
+ "invalid linking modifier syntax, expected '+' or '-' prefix \
+ before one of: bundle, verbatim, whole-archive, as-needed",
+ ),
+ };
+
+ let report_unstable_modifier = || {
+ if !nightly_options::is_unstable_enabled(matches) {
+ let why = if nightly_options::match_is_nightly_build(matches) {
+ " and only accepted on the nightly compiler"
+ } else {
+ ", the `-Z unstable-options` flag must also be passed to use it"
+ };
+ early_error(
+ error_format,
+ &format!("linking modifier `{modifier}` is unstable{why}"),
+ )
+ }
+ };
+ let assign_modifier = |dst: &mut Option<bool>| {
+ if dst.is_some() {
+ let msg = format!("multiple `{modifier}` modifiers in a single `-l` option");
+ early_error(error_format, &msg)
+ } else {
+ *dst = Some(value);
+ }
+ };
+ match (modifier, &mut kind) {
+ ("bundle", NativeLibKind::Static { bundle, .. }) => assign_modifier(bundle),
+ ("bundle", _) => early_error(
+ error_format,
+ "linking modifier `bundle` is only compatible with `static` linking kind",
+ ),
+
+ ("verbatim", _) => {
+ report_unstable_modifier();
+ assign_modifier(&mut verbatim)
+ }
+
+ ("whole-archive", NativeLibKind::Static { whole_archive, .. }) => {
+ assign_modifier(whole_archive)
+ }
+ ("whole-archive", _) => early_error(
+ error_format,
+ "linking modifier `whole-archive` is only compatible with `static` linking kind",
+ ),
+
+ ("as-needed", NativeLibKind::Dylib { as_needed })
+ | ("as-needed", NativeLibKind::Framework { as_needed }) => {
+ report_unstable_modifier();
+ assign_modifier(as_needed)
+ }
+ ("as-needed", _) => early_error(
+ error_format,
+ "linking modifier `as-needed` is only compatible with \
+ `dylib` and `framework` linking kinds",
+ ),
+
+ // Note: this error also excludes the case with empty modifier
+ // string, like `modifiers = ""`.
+ _ => early_error(
+ error_format,
+ &format!(
+ "unknown linking modifier `{modifier}`, expected one \
+ of: bundle, verbatim, whole-archive, as-needed"
+ ),
+ ),
+ }
+ }
+
+ (kind, verbatim)
+}
+
+fn parse_libs(matches: &getopts::Matches, error_format: ErrorOutputType) -> Vec<NativeLib> {
+ matches
+ .opt_strs("l")
+ .into_iter()
+ .map(|s| {
+ // Parse string of the form "[KIND[:MODIFIERS]=]lib[:new_name]",
+ // where KIND is one of "dylib", "framework", "static", "link-arg" and
+ // where MODIFIERS are a comma separated list of supported modifiers
+ // (bundle, verbatim, whole-archive, as-needed). Each modifier is prefixed
+ // with either + or - to indicate whether it is enabled or disabled.
+ // The last value specified for a given modifier wins.
+ let (name, kind, verbatim) = match s.split_once('=') {
+ None => (s, NativeLibKind::Unspecified, None),
+ Some((kind, name)) => {
+ let (kind, verbatim) = parse_native_lib_kind(matches, kind, error_format);
+ (name.to_string(), kind, verbatim)
+ }
+ };
+
+ let (name, new_name) = match name.split_once(':') {
+ None => (name, None),
+ Some((name, new_name)) => (name.to_string(), Some(new_name.to_owned())),
+ };
+ if name.is_empty() {
+ early_error(error_format, "library name must not be empty");
+ }
+ NativeLib { name, new_name, kind, verbatim }
+ })
+ .collect()
+}
+
+pub fn parse_externs(
+ matches: &getopts::Matches,
+ unstable_opts: &UnstableOptions,
+ error_format: ErrorOutputType,
+) -> Externs {
+ let is_unstable_enabled = unstable_opts.unstable_options;
+ let mut externs: BTreeMap<String, ExternEntry> = BTreeMap::new();
+ for arg in matches.opt_strs("extern") {
+ let (name, path) = match arg.split_once('=') {
+ None => (arg, None),
+ Some((name, path)) => (name.to_string(), Some(Path::new(path))),
+ };
+ let (options, name) = match name.split_once(':') {
+ None => (None, name),
+ Some((opts, name)) => (Some(opts), name.to_string()),
+ };
+
+ let path = path.map(|p| CanonicalizedPath::new(p));
+
+ let entry = externs.entry(name.to_owned());
+
+ use std::collections::btree_map::Entry;
+
+ let entry = if let Some(path) = path {
+ // --extern prelude_name=some_file.rlib
+ match entry {
+ Entry::Vacant(vacant) => {
+ let files = BTreeSet::from_iter(iter::once(path));
+ vacant.insert(ExternEntry::new(ExternLocation::ExactPaths(files)))
+ }
+ Entry::Occupied(occupied) => {
+ let ext_ent = occupied.into_mut();
+ match ext_ent {
+ ExternEntry { location: ExternLocation::ExactPaths(files), .. } => {
+ files.insert(path);
+ }
+ ExternEntry {
+ location: location @ ExternLocation::FoundInLibrarySearchDirectories,
+ ..
+ } => {
+ // Exact paths take precedence over search directories.
+ let files = BTreeSet::from_iter(iter::once(path));
+ *location = ExternLocation::ExactPaths(files);
+ }
+ }
+ ext_ent
+ }
+ }
+ } else {
+ // --extern prelude_name
+ match entry {
+ Entry::Vacant(vacant) => {
+ vacant.insert(ExternEntry::new(ExternLocation::FoundInLibrarySearchDirectories))
+ }
+ Entry::Occupied(occupied) => {
+ // Ignore if already specified.
+ occupied.into_mut()
+ }
+ }
+ };
+
+ let mut is_private_dep = false;
+ let mut add_prelude = true;
+ let mut nounused_dep = false;
+ if let Some(opts) = options {
+ if !is_unstable_enabled {
+ early_error(
+ error_format,
+ "the `-Z unstable-options` flag must also be passed to \
+ enable `--extern options",
+ );
+ }
+ for opt in opts.split(',') {
+ match opt {
+ "priv" => is_private_dep = true,
+ "noprelude" => {
+ if let ExternLocation::ExactPaths(_) = &entry.location {
+ add_prelude = false;
+ } else {
+ early_error(
+ error_format,
+ "the `noprelude` --extern option requires a file path",
+ );
+ }
+ }
+ "nounused" => nounused_dep = true,
+ _ => early_error(error_format, &format!("unknown --extern option `{opt}`")),
+ }
+ }
+ }
+
+ // Crates start out being not private, and go to being private `priv`
+ // is specified.
+ entry.is_private_dep |= is_private_dep;
+ // likewise `nounused`
+ entry.nounused_dep |= nounused_dep;
+ // If any flag is missing `noprelude`, then add to the prelude.
+ entry.add_prelude |= add_prelude;
+ }
+ Externs(externs)
+}
+
+fn parse_remap_path_prefix(
+ matches: &getopts::Matches,
+ unstable_opts: &UnstableOptions,
+ error_format: ErrorOutputType,
+) -> Vec<(PathBuf, PathBuf)> {
+ let mut mapping: Vec<(PathBuf, PathBuf)> = matches
+ .opt_strs("remap-path-prefix")
+ .into_iter()
+ .map(|remap| match remap.rsplit_once('=') {
+ None => early_error(
+ error_format,
+ "--remap-path-prefix must contain '=' between FROM and TO",
+ ),
+ Some((from, to)) => (PathBuf::from(from), PathBuf::from(to)),
+ })
+ .collect();
+ match &unstable_opts.remap_cwd_prefix {
+ Some(to) => match std::env::current_dir() {
+ Ok(cwd) => mapping.push((cwd, to.clone())),
+ Err(_) => (),
+ },
+ None => (),
+ };
+ mapping
+}
+
+// JUSTIFICATION: before wrapper fn is available
+#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+pub fn build_session_options(matches: &getopts::Matches) -> Options {
+ let color = parse_color(matches);
+
+ let edition = parse_crate_edition(matches);
+
+ let JsonConfig {
+ json_rendered,
+ json_artifact_notifications,
+ json_unused_externs,
+ json_future_incompat,
+ } = parse_json(matches);
+
+ let error_format = parse_error_format(matches, color, json_rendered);
+
+ let diagnostic_width = matches.opt_get("diagnostic-width").unwrap_or_else(|_| {
+ early_error(error_format, "`--diagnostic-width` must be an positive integer");
+ });
+
+ let unparsed_crate_types = matches.opt_strs("crate-type");
+ let crate_types = parse_crate_types_from_list(unparsed_crate_types)
+ .unwrap_or_else(|e| early_error(error_format, &e));
+
+ let mut unstable_opts = UnstableOptions::build(matches, error_format);
+ let (lint_opts, describe_lints, lint_cap) = get_cmd_lint_options(matches, error_format);
+
+ check_error_format_stability(&unstable_opts, error_format, json_rendered);
+
+ if !unstable_opts.unstable_options && json_unused_externs.is_enabled() {
+ early_error(
+ error_format,
+ "the `-Z unstable-options` flag must also be passed to enable \
+ the flag `--json=unused-externs`",
+ );
+ }
+
+ let output_types = parse_output_types(&unstable_opts, matches, error_format);
+
+ let mut cg = CodegenOptions::build(matches, error_format);
+ let (disable_thinlto, mut codegen_units) = should_override_cgus_and_disable_thinlto(
+ &output_types,
+ matches,
+ error_format,
+ cg.codegen_units,
+ );
+
+ check_thread_count(&unstable_opts, error_format);
+
+ let incremental = cg.incremental.as_ref().map(PathBuf::from);
+
+ let assert_incr_state = parse_assert_incr_state(&unstable_opts.assert_incr_state, error_format);
+
+ if unstable_opts.profile && incremental.is_some() {
+ early_error(
+ error_format,
+ "can't instrument with gcov profiling when compiling incrementally",
+ );
+ }
+ if unstable_opts.profile {
+ match codegen_units {
+ Some(1) => {}
+ None => codegen_units = Some(1),
+ Some(_) => early_error(
+ error_format,
+ "can't instrument with gcov profiling with multiple codegen units",
+ ),
+ }
+ }
+
+ if cg.profile_generate.enabled() && cg.profile_use.is_some() {
+ early_error(
+ error_format,
+ "options `-C profile-generate` and `-C profile-use` are exclusive",
+ );
+ }
+
+ if unstable_opts.profile_sample_use.is_some()
+ && (cg.profile_generate.enabled() || cg.profile_use.is_some())
+ {
+ early_error(
+ error_format,
+ "option `-Z profile-sample-use` cannot be used with `-C profile-generate` or `-C profile-use`",
+ );
+ }
+
+ // Handle both `-Z symbol-mangling-version` and `-C symbol-mangling-version`; the latter takes
+ // precedence.
+ match (cg.symbol_mangling_version, unstable_opts.symbol_mangling_version) {
+ (Some(smv_c), Some(smv_z)) if smv_c != smv_z => {
+ early_error(
+ error_format,
+ "incompatible values passed for `-C symbol-mangling-version` \
+ and `-Z symbol-mangling-version`",
+ );
+ }
+ (Some(SymbolManglingVersion::V0), _) => {}
+ (Some(_), _) if !unstable_opts.unstable_options => {
+ early_error(
+ error_format,
+ "`-C symbol-mangling-version=legacy` requires `-Z unstable-options`",
+ );
+ }
+ (None, None) => {}
+ (None, smv) => {
+ early_warn(
+ error_format,
+ "`-Z symbol-mangling-version` is deprecated; use `-C symbol-mangling-version`",
+ );
+ cg.symbol_mangling_version = smv;
+ }
+ _ => {}
+ }
+
+ // Handle both `-Z instrument-coverage` and `-C instrument-coverage`; the latter takes
+ // precedence.
+ match (cg.instrument_coverage, unstable_opts.instrument_coverage) {
+ (Some(ic_c), Some(ic_z)) if ic_c != ic_z => {
+ early_error(
+ error_format,
+ "incompatible values passed for `-C instrument-coverage` \
+ and `-Z instrument-coverage`",
+ );
+ }
+ (Some(InstrumentCoverage::Off | InstrumentCoverage::All), _) => {}
+ (Some(_), _) if !unstable_opts.unstable_options => {
+ early_error(
+ error_format,
+ "`-C instrument-coverage=except-*` requires `-Z unstable-options`",
+ );
+ }
+ (None, None) => {}
+ (None, ic) => {
+ early_warn(
+ error_format,
+ "`-Z instrument-coverage` is deprecated; use `-C instrument-coverage`",
+ );
+ cg.instrument_coverage = ic;
+ }
+ _ => {}
+ }
+
+ if cg.instrument_coverage.is_some() && cg.instrument_coverage != Some(InstrumentCoverage::Off) {
+ if cg.profile_generate.enabled() || cg.profile_use.is_some() {
+ early_error(
+ error_format,
+ "option `-C instrument-coverage` is not compatible with either `-C profile-use` \
+ or `-C profile-generate`",
+ );
+ }
+
+ // `-C instrument-coverage` implies `-C symbol-mangling-version=v0` - to ensure consistent
+ // and reversible name mangling. Note, LLVM coverage tools can analyze coverage over
+ // multiple runs, including some changes to source code; so mangled names must be consistent
+ // across compilations.
+ match cg.symbol_mangling_version {
+ None => cg.symbol_mangling_version = Some(SymbolManglingVersion::V0),
+ Some(SymbolManglingVersion::Legacy) => {
+ early_warn(
+ error_format,
+ "-C instrument-coverage requires symbol mangling version `v0`, \
+ but `-C symbol-mangling-version=legacy` was specified",
+ );
+ }
+ Some(SymbolManglingVersion::V0) => {}
+ }
+ }
+
+ if let Ok(graphviz_font) = std::env::var("RUSTC_GRAPHVIZ_FONT") {
+ unstable_opts.graphviz_font = graphviz_font;
+ }
+
+ if !cg.embed_bitcode {
+ match cg.lto {
+ LtoCli::No | LtoCli::Unspecified => {}
+ LtoCli::Yes | LtoCli::NoParam | LtoCli::Thin | LtoCli::Fat => early_error(
+ error_format,
+ "options `-C embed-bitcode=no` and `-C lto` are incompatible",
+ ),
+ }
+ }
+
+ if cg.linker_flavor == Some(LinkerFlavor::L4Bender)
+ && !nightly_options::is_unstable_enabled(matches)
+ {
+ early_error(
+ error_format,
+ "`l4-bender` linker flavor is unstable, `-Z unstable-options` \
+ flag must also be passed to explicitly use it",
+ );
+ }
+
+ let prints = collect_print_requests(&mut cg, &mut unstable_opts, matches, error_format);
+
+ let cg = cg;
+
+ let sysroot_opt = matches.opt_str("sysroot").map(|m| PathBuf::from(&m));
+ let target_triple = parse_target_triple(matches, error_format);
+ let opt_level = parse_opt_level(matches, &cg, error_format);
+ // The `-g` and `-C debuginfo` flags specify the same setting, so we want to be able
+ // to use them interchangeably. See the note above (regarding `-O` and `-C opt-level`)
+ // for more details.
+ let debug_assertions = cg.debug_assertions.unwrap_or(opt_level == OptLevel::No);
+ let debuginfo = select_debuginfo(matches, &cg, error_format);
+
+ let mut search_paths = vec![];
+ for s in &matches.opt_strs("L") {
+ search_paths.push(SearchPath::from_cli_opt(&s, error_format));
+ }
+
+ let libs = parse_libs(matches, error_format);
+
+ let test = matches.opt_present("test");
+
+ if !cg.remark.is_empty() && debuginfo == DebugInfo::None {
+ early_warn(error_format, "-C remark requires \"-C debuginfo=n\" to show source locations");
+ }
+
+ let externs = parse_externs(matches, &unstable_opts, error_format);
+
+ let crate_name = matches.opt_str("crate-name");
+
+ let remap_path_prefix = parse_remap_path_prefix(matches, &unstable_opts, error_format);
+
+ let pretty = parse_pretty(&unstable_opts, error_format);
+
+ if !unstable_opts.unstable_options
+ && !target_triple.triple().contains("apple")
+ && cg.split_debuginfo.is_some()
+ {
+ early_error(error_format, "`-Csplit-debuginfo` is unstable on this platform");
+ }
+
+ // Try to find a directory containing the Rust `src`, for more details see
+ // the doc comment on the `real_rust_source_base_dir` field.
+ let tmp_buf;
+ let sysroot = match &sysroot_opt {
+ Some(s) => s,
+ None => {
+ tmp_buf = crate::filesearch::get_or_default_sysroot();
+ &tmp_buf
+ }
+ };
+ let real_rust_source_base_dir = {
+ // This is the location used by the `rust-src` `rustup` component.
+ let mut candidate = sysroot.join("lib/rustlib/src/rust");
+ if let Ok(metadata) = candidate.symlink_metadata() {
+ // Replace the symlink rustbuild creates, with its destination.
+ // We could try to use `fs::canonicalize` instead, but that might
+ // produce unnecessarily verbose path.
+ if metadata.file_type().is_symlink() {
+ if let Ok(symlink_dest) = std::fs::read_link(&candidate) {
+ candidate = symlink_dest;
+ }
+ }
+ }
+
+ // Only use this directory if it has a file we can expect to always find.
+ if candidate.join("library/std/src/lib.rs").is_file() { Some(candidate) } else { None }
+ };
+
+ let working_dir = std::env::current_dir().unwrap_or_else(|e| {
+ early_error(error_format, &format!("Current directory is invalid: {e}"));
+ });
+
+ let (path, remapped) =
+ FilePathMapping::new(remap_path_prefix.clone()).map_prefix(working_dir.clone());
+ let working_dir = if remapped {
+ RealFileName::Remapped { local_path: Some(working_dir), virtual_name: path }
+ } else {
+ RealFileName::LocalPath(path)
+ };
+
+ Options {
+ assert_incr_state,
+ crate_types,
+ optimize: opt_level,
+ debuginfo,
+ lint_opts,
+ lint_cap,
+ describe_lints,
+ output_types,
+ search_paths,
+ maybe_sysroot: sysroot_opt,
+ target_triple,
+ test,
+ incremental,
+ unstable_opts,
+ prints,
+ cg,
+ error_format,
+ diagnostic_width,
+ externs,
+ unstable_features: UnstableFeatures::from_environment(crate_name.as_deref()),
+ crate_name,
+ libs,
+ debug_assertions,
+ actually_rustdoc: false,
+ trimmed_def_paths: TrimmedDefPaths::default(),
+ cli_forced_codegen_units: codegen_units,
+ cli_forced_thinlto_off: disable_thinlto,
+ remap_path_prefix,
+ real_rust_source_base_dir,
+ edition,
+ json_artifact_notifications,
+ json_unused_externs,
+ json_future_incompat,
+ pretty,
+ working_dir,
+ }
+}
+
+fn parse_pretty(unstable_opts: &UnstableOptions, efmt: ErrorOutputType) -> Option<PpMode> {
+ use PpMode::*;
+
+ let first = match unstable_opts.unpretty.as_deref()? {
+ "normal" => Source(PpSourceMode::Normal),
+ "identified" => Source(PpSourceMode::Identified),
+ "expanded" => Source(PpSourceMode::Expanded),
+ "expanded,identified" => Source(PpSourceMode::ExpandedIdentified),
+ "expanded,hygiene" => Source(PpSourceMode::ExpandedHygiene),
+ "ast-tree" => AstTree(PpAstTreeMode::Normal),
+ "ast-tree,expanded" => AstTree(PpAstTreeMode::Expanded),
+ "hir" => Hir(PpHirMode::Normal),
+ "hir,identified" => Hir(PpHirMode::Identified),
+ "hir,typed" => Hir(PpHirMode::Typed),
+ "hir-tree" => HirTree,
+ "thir-tree" => ThirTree,
+ "mir" => Mir,
+ "mir-cfg" => MirCFG,
+ name => early_error(
+ efmt,
+ &format!(
+ "argument to `unpretty` must be one of `normal`, `identified`, \
+ `expanded`, `expanded,identified`, `expanded,hygiene`, \
+ `ast-tree`, `ast-tree,expanded`, `hir`, `hir,identified`, \
+ `hir,typed`, `hir-tree`, `thir-tree`, `mir` or `mir-cfg`; got {name}"
+ ),
+ ),
+ };
+ tracing::debug!("got unpretty option: {first:?}");
+ Some(first)
+}
+
+pub fn make_crate_type_option() -> RustcOptGroup {
+ opt::multi_s(
+ "",
+ "crate-type",
+ "Comma separated list of types of crates
+ for the compiler to emit",
+ "[bin|lib|rlib|dylib|cdylib|staticlib|proc-macro]",
+ )
+}
+
+pub fn parse_crate_types_from_list(list_list: Vec<String>) -> Result<Vec<CrateType>, String> {
+ let mut crate_types: Vec<CrateType> = Vec::new();
+ for unparsed_crate_type in &list_list {
+ for part in unparsed_crate_type.split(',') {
+ let new_part = match part {
+ "lib" => default_lib_output(),
+ "rlib" => CrateType::Rlib,
+ "staticlib" => CrateType::Staticlib,
+ "dylib" => CrateType::Dylib,
+ "cdylib" => CrateType::Cdylib,
+ "bin" => CrateType::Executable,
+ "proc-macro" => CrateType::ProcMacro,
+ _ => return Err(format!("unknown crate type: `{part}`")),
+ };
+ if !crate_types.contains(&new_part) {
+ crate_types.push(new_part)
+ }
+ }
+ }
+
+ Ok(crate_types)
+}
+
+pub mod nightly_options {
+ use super::{ErrorOutputType, OptionStability, RustcOptGroup};
+ use crate::early_error;
+ use rustc_feature::UnstableFeatures;
+
+ pub fn is_unstable_enabled(matches: &getopts::Matches) -> bool {
+ match_is_nightly_build(matches)
+ && matches.opt_strs("Z").iter().any(|x| *x == "unstable-options")
+ }
+
+ pub fn match_is_nightly_build(matches: &getopts::Matches) -> bool {
+ is_nightly_build(matches.opt_str("crate-name").as_deref())
+ }
+
+ pub fn is_nightly_build(krate: Option<&str>) -> bool {
+ UnstableFeatures::from_environment(krate).is_nightly_build()
+ }
+
+ pub fn check_nightly_options(matches: &getopts::Matches, flags: &[RustcOptGroup]) {
+ let has_z_unstable_option = matches.opt_strs("Z").iter().any(|x| *x == "unstable-options");
+ let really_allows_unstable_options = match_is_nightly_build(matches);
+
+ for opt in flags.iter() {
+ if opt.stability == OptionStability::Stable {
+ continue;
+ }
+ if !matches.opt_present(opt.name) {
+ continue;
+ }
+ if opt.name != "Z" && !has_z_unstable_option {
+ early_error(
+ ErrorOutputType::default(),
+ &format!(
+ "the `-Z unstable-options` flag must also be passed to enable \
+ the flag `{}`",
+ opt.name
+ ),
+ );
+ }
+ if really_allows_unstable_options {
+ continue;
+ }
+ match opt.stability {
+ OptionStability::Unstable => {
+ let msg = format!(
+ "the option `{}` is only accepted on the \
+ nightly compiler",
+ opt.name
+ );
+ early_error(ErrorOutputType::default(), &msg);
+ }
+ OptionStability::Stable => {}
+ }
+ }
+ }
+}
+
+impl fmt::Display for CrateType {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ CrateType::Executable => "bin".fmt(f),
+ CrateType::Dylib => "dylib".fmt(f),
+ CrateType::Rlib => "rlib".fmt(f),
+ CrateType::Staticlib => "staticlib".fmt(f),
+ CrateType::Cdylib => "cdylib".fmt(f),
+ CrateType::ProcMacro => "proc-macro".fmt(f),
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum PpSourceMode {
+ /// `-Zunpretty=normal`
+ Normal,
+ /// `-Zunpretty=expanded`
+ Expanded,
+ /// `-Zunpretty=identified`
+ Identified,
+ /// `-Zunpretty=expanded,identified`
+ ExpandedIdentified,
+ /// `-Zunpretty=expanded,hygiene`
+ ExpandedHygiene,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum PpAstTreeMode {
+ /// `-Zunpretty=ast`
+ Normal,
+ /// `-Zunpretty=ast,expanded`
+ Expanded,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum PpHirMode {
+ /// `-Zunpretty=hir`
+ Normal,
+ /// `-Zunpretty=hir,identified`
+ Identified,
+ /// `-Zunpretty=hir,typed`
+ Typed,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum PpMode {
+ /// Options that print the source code, i.e.
+ /// `-Zunpretty=normal` and `-Zunpretty=expanded`
+ Source(PpSourceMode),
+ AstTree(PpAstTreeMode),
+ /// Options that print the HIR, i.e. `-Zunpretty=hir`
+ Hir(PpHirMode),
+ /// `-Zunpretty=hir-tree`
+ HirTree,
+ /// `-Zunpretty=thir-tree`
+ ThirTree,
+ /// `-Zunpretty=mir`
+ Mir,
+ /// `-Zunpretty=mir-cfg`
+ MirCFG,
+}
+
+impl PpMode {
+ pub fn needs_ast_map(&self) -> bool {
+ use PpMode::*;
+ use PpSourceMode::*;
+ match *self {
+ Source(Normal | Identified) | AstTree(PpAstTreeMode::Normal) => false,
+
+ Source(Expanded | ExpandedIdentified | ExpandedHygiene)
+ | AstTree(PpAstTreeMode::Expanded)
+ | Hir(_)
+ | HirTree
+ | ThirTree
+ | Mir
+ | MirCFG => true,
+ }
+ }
+ pub fn needs_hir(&self) -> bool {
+ use PpMode::*;
+ match *self {
+ Source(_) | AstTree(_) => false,
+
+ Hir(_) | HirTree | ThirTree | Mir | MirCFG => true,
+ }
+ }
+
+ pub fn needs_analysis(&self) -> bool {
+ use PpMode::*;
+ matches!(*self, Mir | MirCFG | ThirTree)
+ }
+}
+
+/// Command-line arguments passed to the compiler have to be incorporated with
+/// the dependency tracking system for incremental compilation. This module
+/// provides some utilities to make this more convenient.
+///
+/// The values of all command-line arguments that are relevant for dependency
+/// tracking are hashed into a single value that determines whether the
+/// incremental compilation cache can be re-used or not. This hashing is done
+/// via the `DepTrackingHash` trait defined below, since the standard `Hash`
+/// implementation might not be suitable (e.g., arguments are stored in a `Vec`,
+/// the hash of which is order dependent, but we might not want the order of
+/// arguments to make a difference for the hash).
+///
+/// However, since the value provided by `Hash::hash` often *is* suitable,
+/// especially for primitive types, there is the
+/// `impl_dep_tracking_hash_via_hash!()` macro that allows to simply reuse the
+/// `Hash` implementation for `DepTrackingHash`. It's important though that
+/// we have an opt-in scheme here, so one is hopefully forced to think about
+/// how the hash should be calculated when adding a new command-line argument.
+pub(crate) mod dep_tracking {
+ use super::{
+ BranchProtection, CFGuard, CFProtection, CrateType, DebugInfo, ErrorOutputType,
+ InstrumentCoverage, LdImpl, LinkerPluginLto, LocationDetail, LtoCli, OomStrategy, OptLevel,
+ OutputType, OutputTypes, Passes, SourceFileHashAlgorithm, SplitDwarfKind,
+ SwitchWithOptPath, SymbolManglingVersion, TrimmedDefPaths,
+ };
+ use crate::lint;
+ use crate::options::WasiExecModel;
+ use crate::utils::{NativeLib, NativeLibKind};
+ use rustc_errors::LanguageIdentifier;
+ use rustc_feature::UnstableFeatures;
+ use rustc_span::edition::Edition;
+ use rustc_span::RealFileName;
+ use rustc_target::spec::{CodeModel, MergeFunctions, PanicStrategy, RelocModel};
+ use rustc_target::spec::{
+ RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TargetTriple, TlsModel,
+ };
+ use std::collections::hash_map::DefaultHasher;
+ use std::collections::BTreeMap;
+ use std::hash::Hash;
+ use std::num::NonZeroUsize;
+ use std::path::PathBuf;
+
+ pub trait DepTrackingHash {
+ fn hash(
+ &self,
+ hasher: &mut DefaultHasher,
+ error_format: ErrorOutputType,
+ for_crate_hash: bool,
+ );
+ }
+
+ macro_rules! impl_dep_tracking_hash_via_hash {
+ ($($t:ty),+ $(,)?) => {$(
+ impl DepTrackingHash for $t {
+ fn hash(&self, hasher: &mut DefaultHasher, _: ErrorOutputType, _for_crate_hash: bool) {
+ Hash::hash(self, hasher);
+ }
+ }
+ )+};
+ }
+
+ impl<T: DepTrackingHash> DepTrackingHash for Option<T> {
+ fn hash(
+ &self,
+ hasher: &mut DefaultHasher,
+ error_format: ErrorOutputType,
+ for_crate_hash: bool,
+ ) {
+ match self {
+ Some(x) => {
+ Hash::hash(&1, hasher);
+ DepTrackingHash::hash(x, hasher, error_format, for_crate_hash);
+ }
+ None => Hash::hash(&0, hasher),
+ }
+ }
+ }
+
+ impl_dep_tracking_hash_via_hash!(
+ bool,
+ usize,
+ NonZeroUsize,
+ u64,
+ String,
+ PathBuf,
+ lint::Level,
+ WasiExecModel,
+ u32,
+ RelocModel,
+ CodeModel,
+ TlsModel,
+ InstrumentCoverage,
+ CrateType,
+ MergeFunctions,
+ PanicStrategy,
+ RelroLevel,
+ Passes,
+ OptLevel,
+ LtoCli,
+ DebugInfo,
+ UnstableFeatures,
+ NativeLib,
+ NativeLibKind,
+ SanitizerSet,
+ CFGuard,
+ CFProtection,
+ TargetTriple,
+ Edition,
+ LinkerPluginLto,
+ SplitDebuginfo,
+ SplitDwarfKind,
+ StackProtector,
+ SwitchWithOptPath,
+ SymbolManglingVersion,
+ SourceFileHashAlgorithm,
+ TrimmedDefPaths,
+ Option<LdImpl>,
+ OutputType,
+ RealFileName,
+ LocationDetail,
+ BranchProtection,
+ OomStrategy,
+ LanguageIdentifier,
+ );
+
+ impl<T1, T2> DepTrackingHash for (T1, T2)
+ where
+ T1: DepTrackingHash,
+ T2: DepTrackingHash,
+ {
+ fn hash(
+ &self,
+ hasher: &mut DefaultHasher,
+ error_format: ErrorOutputType,
+ for_crate_hash: bool,
+ ) {
+ Hash::hash(&0, hasher);
+ DepTrackingHash::hash(&self.0, hasher, error_format, for_crate_hash);
+ Hash::hash(&1, hasher);
+ DepTrackingHash::hash(&self.1, hasher, error_format, for_crate_hash);
+ }
+ }
+
+ impl<T1, T2, T3> DepTrackingHash for (T1, T2, T3)
+ where
+ T1: DepTrackingHash,
+ T2: DepTrackingHash,
+ T3: DepTrackingHash,
+ {
+ fn hash(
+ &self,
+ hasher: &mut DefaultHasher,
+ error_format: ErrorOutputType,
+ for_crate_hash: bool,
+ ) {
+ Hash::hash(&0, hasher);
+ DepTrackingHash::hash(&self.0, hasher, error_format, for_crate_hash);
+ Hash::hash(&1, hasher);
+ DepTrackingHash::hash(&self.1, hasher, error_format, for_crate_hash);
+ Hash::hash(&2, hasher);
+ DepTrackingHash::hash(&self.2, hasher, error_format, for_crate_hash);
+ }
+ }
+
+ impl<T: DepTrackingHash> DepTrackingHash for Vec<T> {
+ fn hash(
+ &self,
+ hasher: &mut DefaultHasher,
+ error_format: ErrorOutputType,
+ for_crate_hash: bool,
+ ) {
+ Hash::hash(&self.len(), hasher);
+ for (index, elem) in self.iter().enumerate() {
+ Hash::hash(&index, hasher);
+ DepTrackingHash::hash(elem, hasher, error_format, for_crate_hash);
+ }
+ }
+ }
+
+ impl DepTrackingHash for OutputTypes {
+ fn hash(
+ &self,
+ hasher: &mut DefaultHasher,
+ error_format: ErrorOutputType,
+ for_crate_hash: bool,
+ ) {
+ Hash::hash(&self.0.len(), hasher);
+ for (key, val) in &self.0 {
+ DepTrackingHash::hash(key, hasher, error_format, for_crate_hash);
+ if !for_crate_hash {
+ DepTrackingHash::hash(val, hasher, error_format, for_crate_hash);
+ }
+ }
+ }
+ }
+
+ // This is a stable hash because BTreeMap is a sorted container
+ pub(crate) fn stable_hash(
+ sub_hashes: BTreeMap<&'static str, &dyn DepTrackingHash>,
+ hasher: &mut DefaultHasher,
+ error_format: ErrorOutputType,
+ for_crate_hash: bool,
+ ) {
+ for (key, sub_hash) in sub_hashes {
+ // Using Hash::hash() instead of DepTrackingHash::hash() is fine for
+ // the keys, as they are just plain strings
+ Hash::hash(&key.len(), hasher);
+ Hash::hash(key, hasher);
+ sub_hash.hash(hasher, error_format, for_crate_hash);
+ }
+ }
+}
+
+/// Default behavior to use in out-of-memory situations.
+#[derive(Clone, Copy, PartialEq, Hash, Debug, Encodable, Decodable, HashStable_Generic)]
+pub enum OomStrategy {
+ /// Generate a panic that can be caught by `catch_unwind`.
+ Panic,
+
+ /// Abort the process immediately.
+ Abort,
+}
+
+impl OomStrategy {
+ pub const SYMBOL: &'static str = "__rust_alloc_error_handler_should_panic";
+
+ pub fn should_panic(self) -> u8 {
+ match self {
+ OomStrategy::Panic => 1,
+ OomStrategy::Abort => 0,
+ }
+ }
+}
+
+/// How to run proc-macro code when building this crate
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum ProcMacroExecutionStrategy {
+ /// Run the proc-macro code on the same thread as the server.
+ SameThread,
+
+ /// Run the proc-macro code on a different thread.
+ CrossThread,
+}
diff --git a/compiler/rustc_session/src/cstore.rs b/compiler/rustc_session/src/cstore.rs
new file mode 100644
index 000000000..c1fd3c7c6
--- /dev/null
+++ b/compiler/rustc_session/src/cstore.rs
@@ -0,0 +1,218 @@
+//! the rustc crate store interface. This also includes types that
+//! are *mostly* used as a part of that interface, but these should
+//! probably get a better home if someone can find one.
+
+use crate::search_paths::PathKind;
+use crate::utils::NativeLibKind;
+use crate::Session;
+use rustc_ast as ast;
+use rustc_data_structures::sync::{self, MetadataRef};
+use rustc_hir::def_id::{CrateNum, DefId, StableCrateId, LOCAL_CRATE};
+use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
+use rustc_span::hygiene::{ExpnHash, ExpnId};
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+use rustc_target::spec::Target;
+
+use std::any::Any;
+use std::path::{Path, PathBuf};
+
+// lonely orphan structs and enums looking for a better home
+
+/// Where a crate came from on the local filesystem. One of these three options
+/// must be non-None.
+#[derive(PartialEq, Clone, Debug, HashStable_Generic, Encodable, Decodable)]
+pub struct CrateSource {
+ pub dylib: Option<(PathBuf, PathKind)>,
+ pub rlib: Option<(PathBuf, PathKind)>,
+ pub rmeta: Option<(PathBuf, PathKind)>,
+}
+
+impl CrateSource {
+ #[inline]
+ pub fn paths(&self) -> impl Iterator<Item = &PathBuf> {
+ self.dylib.iter().chain(self.rlib.iter()).chain(self.rmeta.iter()).map(|p| &p.0)
+ }
+}
+
+#[derive(Encodable, Decodable, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Debug)]
+#[derive(HashStable_Generic)]
+pub enum CrateDepKind {
+ /// A dependency that is only used for its macros.
+ MacrosOnly,
+ /// A dependency that is always injected into the dependency list and so
+ /// doesn't need to be linked to an rlib, e.g., the injected allocator.
+ Implicit,
+ /// A dependency that is required by an rlib version of this crate.
+ /// Ordinary `extern crate`s result in `Explicit` dependencies.
+ Explicit,
+}
+
+impl CrateDepKind {
+ #[inline]
+ pub fn macros_only(self) -> bool {
+ match self {
+ CrateDepKind::MacrosOnly => true,
+ CrateDepKind::Implicit | CrateDepKind::Explicit => false,
+ }
+ }
+}
+
+#[derive(Copy, Debug, PartialEq, Clone, Encodable, Decodable, HashStable_Generic)]
+pub enum LinkagePreference {
+ RequireDynamic,
+ RequireStatic,
+}
+
+#[derive(Debug, Encodable, Decodable, HashStable_Generic)]
+pub struct NativeLib {
+ pub kind: NativeLibKind,
+ pub name: Option<Symbol>,
+ pub cfg: Option<ast::MetaItem>,
+ pub foreign_module: Option<DefId>,
+ pub wasm_import_module: Option<Symbol>,
+ pub verbatim: Option<bool>,
+ pub dll_imports: Vec<DllImport>,
+}
+
+impl NativeLib {
+ pub fn has_modifiers(&self) -> bool {
+ self.verbatim.is_some() || self.kind.has_modifiers()
+ }
+}
+
+#[derive(Clone, Debug, Encodable, Decodable, HashStable_Generic)]
+pub struct DllImport {
+ pub name: Symbol,
+ pub ordinal: Option<u16>,
+ /// Calling convention for the function.
+ ///
+ /// On x86_64, this is always `DllCallingConvention::C`; on i686, it can be any
+ /// of the values, and we use `DllCallingConvention::C` to represent `"cdecl"`.
+ pub calling_convention: DllCallingConvention,
+ /// Span of import's "extern" declaration; used for diagnostics.
+ pub span: Span,
+}
+
+/// Calling convention for a function defined in an external library.
+///
+/// The usize value, where present, indicates the size of the function's argument list
+/// in bytes.
+#[derive(Clone, PartialEq, Debug, Encodable, Decodable, HashStable_Generic)]
+pub enum DllCallingConvention {
+ C,
+ Stdcall(usize),
+ Fastcall(usize),
+ Vectorcall(usize),
+}
+
+#[derive(Clone, Encodable, Decodable, HashStable_Generic, Debug)]
+pub struct ForeignModule {
+ pub foreign_items: Vec<DefId>,
+ pub def_id: DefId,
+}
+
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub struct ExternCrate {
+ pub src: ExternCrateSource,
+
+ /// span of the extern crate that caused this to be loaded
+ pub span: Span,
+
+ /// Number of links to reach the extern;
+ /// used to select the extern with the shortest path
+ pub path_len: usize,
+
+ /// Crate that depends on this crate
+ pub dependency_of: CrateNum,
+}
+
+impl ExternCrate {
+ /// If true, then this crate is the crate named by the extern
+ /// crate referenced above. If false, then this crate is a dep
+ /// of the crate.
+ #[inline]
+ pub fn is_direct(&self) -> bool {
+ self.dependency_of == LOCAL_CRATE
+ }
+
+ #[inline]
+ pub fn rank(&self) -> impl PartialOrd {
+ // Prefer:
+ // - direct extern crate to indirect
+ // - shorter paths to longer
+ (self.is_direct(), !self.path_len)
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub enum ExternCrateSource {
+ /// Crate is loaded by `extern crate`.
+ Extern(
+ /// def_id of the item in the current crate that caused
+ /// this crate to be loaded; note that there could be multiple
+ /// such ids
+ DefId,
+ ),
+ /// Crate is implicitly loaded by a path resolving through extern prelude.
+ Path,
+}
+
+/// The backend's way to give the crate store access to the metadata in a library.
+/// Note that it returns the raw metadata bytes stored in the library file, whether
+/// it is compressed, uncompressed, some weird mix, etc.
+/// rmeta files are backend independent and not handled here.
+///
+/// At the time of this writing, there is only one backend and one way to store
+/// metadata in library -- this trait just serves to decouple rustc_metadata from
+/// the archive reader, which depends on LLVM.
+pub trait MetadataLoader {
+ fn get_rlib_metadata(&self, target: &Target, filename: &Path) -> Result<MetadataRef, String>;
+ fn get_dylib_metadata(&self, target: &Target, filename: &Path) -> Result<MetadataRef, String>;
+}
+
+pub type MetadataLoaderDyn = dyn MetadataLoader + Sync;
+
+/// A store of Rust crates, through which their metadata can be accessed.
+///
+/// Note that this trait should probably not be expanding today. All new
+/// functionality should be driven through queries instead!
+///
+/// If you find a method on this trait named `{name}_untracked` it signifies
+/// that it's *not* tracked for dependency information throughout compilation
+/// (it'd break incremental compilation) and should only be called pre-HIR (e.g.
+/// during resolve)
+pub trait CrateStore: std::fmt::Debug {
+ fn as_any(&self) -> &dyn Any;
+
+ // Foreign definitions.
+ // This information is safe to access, since it's hashed as part of the DefPathHash, which incr.
+ // comp. uses to identify a DefId.
+ fn def_key(&self, def: DefId) -> DefKey;
+ fn def_path(&self, def: DefId) -> DefPath;
+ fn def_path_hash(&self, def: DefId) -> DefPathHash;
+
+ // This information is safe to access, since it's hashed as part of the StableCrateId, which
+ // incr. comp. uses to identify a CrateNum.
+ fn crate_name(&self, cnum: CrateNum) -> Symbol;
+ fn stable_crate_id(&self, cnum: CrateNum) -> StableCrateId;
+ fn stable_crate_id_to_crate_num(&self, stable_crate_id: StableCrateId) -> CrateNum;
+
+ /// Fetch a DefId from a DefPathHash for a foreign crate.
+ fn def_path_hash_to_def_id(&self, cnum: CrateNum, hash: DefPathHash) -> DefId;
+ fn expn_hash_to_expn_id(
+ &self,
+ sess: &Session,
+ cnum: CrateNum,
+ index_guess: u32,
+ hash: ExpnHash,
+ ) -> ExpnId;
+
+ /// Imports all `SourceFile`s from the given crate into the current session.
+ /// This normally happens automatically when we decode a `Span` from
+ /// that crate's metadata - however, the incr comp cache needs
+ /// to trigger this manually when decoding a foreign `Span`
+ fn import_source_files(&self, sess: &Session, cnum: CrateNum);
+}
+
+pub type CrateStoreDyn = dyn CrateStore + sync::Sync;
diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs
new file mode 100644
index 000000000..c973e3140
--- /dev/null
+++ b/compiler/rustc_session/src/filesearch.rs
@@ -0,0 +1,125 @@
+//! A module for searching for libraries
+
+use std::env;
+use std::fs;
+use std::iter::FromIterator;
+use std::path::{Path, PathBuf};
+
+use crate::search_paths::{PathKind, SearchPath};
+use rustc_fs_util::fix_windows_verbatim_for_gcc;
+use tracing::debug;
+
+#[derive(Copy, Clone)]
+pub enum FileMatch {
+ FileMatches,
+ FileDoesntMatch,
+}
+
+#[derive(Clone)]
+pub struct FileSearch<'a> {
+ sysroot: &'a Path,
+ triple: &'a str,
+ search_paths: &'a [SearchPath],
+ tlib_path: &'a SearchPath,
+ kind: PathKind,
+}
+
+impl<'a> FileSearch<'a> {
+ pub fn search_paths(&self) -> impl Iterator<Item = &'a SearchPath> {
+ let kind = self.kind;
+ self.search_paths
+ .iter()
+ .filter(move |sp| sp.kind.matches(kind))
+ .chain(std::iter::once(self.tlib_path))
+ }
+
+ pub fn get_lib_path(&self) -> PathBuf {
+ make_target_lib_path(self.sysroot, self.triple)
+ }
+
+ pub fn get_self_contained_lib_path(&self) -> PathBuf {
+ self.get_lib_path().join("self-contained")
+ }
+
+ pub fn new(
+ sysroot: &'a Path,
+ triple: &'a str,
+ search_paths: &'a [SearchPath],
+ tlib_path: &'a SearchPath,
+ kind: PathKind,
+ ) -> FileSearch<'a> {
+ debug!("using sysroot = {}, triple = {}", sysroot.display(), triple);
+ FileSearch { sysroot, triple, search_paths, tlib_path, kind }
+ }
+
+ /// Returns just the directories within the search paths.
+ pub fn search_path_dirs(&self) -> Vec<PathBuf> {
+ self.search_paths().map(|sp| sp.dir.to_path_buf()).collect()
+ }
+}
+
+pub fn make_target_lib_path(sysroot: &Path, target_triple: &str) -> PathBuf {
+ let rustlib_path = rustc_target::target_rustlib_path(sysroot, target_triple);
+ PathBuf::from_iter([sysroot, Path::new(&rustlib_path), Path::new("lib")])
+}
+
+/// This function checks if sysroot is found using env::args().next(), and if it
+/// is not found, uses env::current_exe() to imply sysroot.
+pub fn get_or_default_sysroot() -> PathBuf {
+ // Follow symlinks. If the resolved path is relative, make it absolute.
+ fn canonicalize(path: PathBuf) -> PathBuf {
+ let path = fs::canonicalize(&path).unwrap_or(path);
+ // See comments on this target function, but the gist is that
+ // gcc chokes on verbatim paths which fs::canonicalize generates
+ // so we try to avoid those kinds of paths.
+ fix_windows_verbatim_for_gcc(&path)
+ }
+
+ // Use env::current_exe() to get the path of the executable following
+ // symlinks/canonicalizing components.
+ fn from_current_exe() -> PathBuf {
+ match env::current_exe() {
+ Ok(exe) => {
+ let mut p = canonicalize(exe);
+ p.pop();
+ p.pop();
+ p
+ }
+ Err(e) => panic!("failed to get current_exe: {e}"),
+ }
+ }
+
+ // Use env::args().next() to get the path of the executable without
+ // following symlinks/canonicalizing any component. This makes the rustc
+ // binary able to locate Rust libraries in systems using content-addressable
+ // storage (CAS).
+ fn from_env_args_next() -> Option<PathBuf> {
+ match env::args_os().next() {
+ Some(first_arg) => {
+ let mut p = PathBuf::from(first_arg);
+
+ // Check if sysroot is found using env::args().next() only if the rustc in argv[0]
+ // is a symlink (see #79253). We might want to change/remove it to conform with
+ // https://www.gnu.org/prep/standards/standards.html#Finding-Program-Files in the
+ // future.
+ if fs::read_link(&p).is_err() {
+ // Path is not a symbolic link or does not exist.
+ return None;
+ }
+
+ // Pop off `bin/rustc`, obtaining the suspected sysroot.
+ p.pop();
+ p.pop();
+ // Look for the target rustlib directory in the suspected sysroot.
+ let mut rustlib_path = rustc_target::target_rustlib_path(&p, "dummy");
+ rustlib_path.pop(); // pop off the dummy target.
+ if rustlib_path.exists() { Some(p) } else { None }
+ }
+ None => None,
+ }
+ }
+
+ // Check if sysroot is found using env::args().next(), and if is not found,
+ // use env::current_exe() to imply sysroot.
+ from_env_args_next().unwrap_or_else(from_current_exe)
+}
diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs
new file mode 100644
index 000000000..7353c1ca0
--- /dev/null
+++ b/compiler/rustc_session/src/lib.rs
@@ -0,0 +1,40 @@
+#![feature(if_let_guard)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(once_cell)]
+#![feature(option_get_or_insert_default)]
+#![feature(rustc_attrs)]
+#![feature(map_many_mut)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate rustc_macros;
+
+pub mod cgu_reuse_tracker;
+pub mod utils;
+pub use lint::{declare_lint, declare_lint_pass, declare_tool_lint, impl_lint_pass};
+pub use rustc_lint_defs as lint;
+pub mod parse;
+
+mod code_stats;
+#[macro_use]
+pub mod config;
+pub mod cstore;
+pub mod filesearch;
+mod options;
+pub mod search_paths;
+
+mod session;
+pub use session::*;
+
+pub mod output;
+
+pub use getopts;
+
+/// Requirements for a `StableHashingContext` to be used in this crate.
+/// This is a hack to allow using the `HashStable_Generic` derive macro
+/// instead of implementing everything in `rustc_middle`.
+pub trait HashStableContext: rustc_ast::HashStableContext + rustc_hir::HashStableContext {}
diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs
new file mode 100644
index 000000000..1827f1c20
--- /dev/null
+++ b/compiler/rustc_session/src/options.rs
@@ -0,0 +1,1673 @@
+use crate::config::*;
+
+use crate::early_error;
+use crate::lint;
+use crate::search_paths::SearchPath;
+use crate::utils::NativeLib;
+use rustc_errors::LanguageIdentifier;
+use rustc_target::spec::{CodeModel, LinkerFlavor, MergeFunctions, PanicStrategy, SanitizerSet};
+use rustc_target::spec::{
+ RelocModel, RelroLevel, SplitDebuginfo, StackProtector, TargetTriple, TlsModel,
+};
+
+use rustc_feature::UnstableFeatures;
+use rustc_span::edition::Edition;
+use rustc_span::RealFileName;
+use rustc_span::SourceFileHashAlgorithm;
+
+use std::collections::BTreeMap;
+
+use std::collections::hash_map::DefaultHasher;
+use std::hash::Hasher;
+use std::num::NonZeroUsize;
+use std::path::PathBuf;
+use std::str;
+
+macro_rules! insert {
+ ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr) => {
+ if $sub_hashes
+ .insert(stringify!($opt_name), $opt_expr as &dyn dep_tracking::DepTrackingHash)
+ .is_some()
+ {
+ panic!("duplicate key in CLI DepTrackingHash: {}", stringify!($opt_name))
+ }
+ };
+}
+
+macro_rules! hash_opt {
+ ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr, $_for_crate_hash: ident, [UNTRACKED]) => {{}};
+ ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr, $_for_crate_hash: ident, [TRACKED]) => {{ insert!($opt_name, $opt_expr, $sub_hashes) }};
+ ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr, $for_crate_hash: ident, [TRACKED_NO_CRATE_HASH]) => {{
+ if !$for_crate_hash {
+ insert!($opt_name, $opt_expr, $sub_hashes)
+ }
+ }};
+ ($opt_name:ident, $opt_expr:expr, $sub_hashes:expr, $_for_crate_hash: ident, [SUBSTRUCT]) => {{}};
+}
+
+macro_rules! hash_substruct {
+ ($opt_name:ident, $opt_expr:expr, $error_format:expr, $for_crate_hash:expr, $hasher:expr, [UNTRACKED]) => {{}};
+ ($opt_name:ident, $opt_expr:expr, $error_format:expr, $for_crate_hash:expr, $hasher:expr, [TRACKED]) => {{}};
+ ($opt_name:ident, $opt_expr:expr, $error_format:expr, $for_crate_hash:expr, $hasher:expr, [TRACKED_NO_CRATE_HASH]) => {{}};
+ ($opt_name:ident, $opt_expr:expr, $error_format:expr, $for_crate_hash:expr, $hasher:expr, [SUBSTRUCT]) => {
+ use crate::config::dep_tracking::DepTrackingHash;
+ $opt_expr.dep_tracking_hash($for_crate_hash, $error_format).hash(
+ $hasher,
+ $error_format,
+ $for_crate_hash,
+ );
+ };
+}
+
+macro_rules! top_level_options {
+ ( $( #[$top_level_attr:meta] )* pub struct Options { $(
+ $( #[$attr:meta] )*
+ $opt:ident : $t:ty [$dep_tracking_marker:ident],
+ )* } ) => (
+ #[derive(Clone)]
+ $( #[$top_level_attr] )*
+ pub struct Options {
+ $(
+ $( #[$attr] )*
+ pub $opt: $t
+ ),*
+ }
+
+ impl Options {
+ pub fn dep_tracking_hash(&self, for_crate_hash: bool) -> u64 {
+ let mut sub_hashes = BTreeMap::new();
+ $({
+ hash_opt!($opt,
+ &self.$opt,
+ &mut sub_hashes,
+ for_crate_hash,
+ [$dep_tracking_marker]);
+ })*
+ let mut hasher = DefaultHasher::new();
+ dep_tracking::stable_hash(sub_hashes,
+ &mut hasher,
+ self.error_format,
+ for_crate_hash);
+ $({
+ hash_substruct!($opt,
+ &self.$opt,
+ self.error_format,
+ for_crate_hash,
+ &mut hasher,
+ [$dep_tracking_marker]);
+ })*
+ hasher.finish()
+ }
+ }
+ );
+}
+
+top_level_options!(
+ /// The top-level command-line options struct.
+ ///
+ /// For each option, one has to specify how it behaves with regard to the
+ /// dependency tracking system of incremental compilation. This is done via the
+ /// square-bracketed directive after the field type. The options are:
+ ///
+ /// - `[TRACKED]`
+ /// A change in the given field will cause the compiler to completely clear the
+ /// incremental compilation cache before proceeding.
+ ///
+ /// - `[TRACKED_NO_CRATE_HASH]`
+ /// Same as `[TRACKED]`, but will not affect the crate hash. This is useful for options that only
+ /// affect the incremental cache.
+ ///
+ /// - `[UNTRACKED]`
+ /// Incremental compilation is not influenced by this option.
+ ///
+ /// - `[SUBSTRUCT]`
+ /// Second-level sub-structs containing more options.
+ ///
+ /// If you add a new option to this struct or one of the sub-structs like
+ /// `CodegenOptions`, think about how it influences incremental compilation. If in
+ /// doubt, specify `[TRACKED]`, which is always "correct" but might lead to
+ /// unnecessary re-compilation.
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_ty)]
+ pub struct Options {
+ /// The crate config requested for the session, which may be combined
+ /// with additional crate configurations during the compile process.
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::crate_types` instead of this field"))]
+ crate_types: Vec<CrateType> [TRACKED],
+ optimize: OptLevel [TRACKED],
+ /// Include the `debug_assertions` flag in dependency tracking, since it
+ /// can influence whether overflow checks are done or not.
+ debug_assertions: bool [TRACKED],
+ debuginfo: DebugInfo [TRACKED],
+ lint_opts: Vec<(String, lint::Level)> [TRACKED_NO_CRATE_HASH],
+ lint_cap: Option<lint::Level> [TRACKED_NO_CRATE_HASH],
+ describe_lints: bool [UNTRACKED],
+ output_types: OutputTypes [TRACKED],
+ search_paths: Vec<SearchPath> [UNTRACKED],
+ libs: Vec<NativeLib> [TRACKED],
+ maybe_sysroot: Option<PathBuf> [UNTRACKED],
+
+ target_triple: TargetTriple [TRACKED],
+
+ test: bool [TRACKED],
+ error_format: ErrorOutputType [UNTRACKED],
+ diagnostic_width: Option<usize> [UNTRACKED],
+
+ /// If `Some`, enable incremental compilation, using the given
+ /// directory to store intermediate results.
+ incremental: Option<PathBuf> [UNTRACKED],
+ assert_incr_state: Option<IncrementalStateAssertion> [UNTRACKED],
+
+ unstable_opts: UnstableOptions [SUBSTRUCT],
+ prints: Vec<PrintRequest> [UNTRACKED],
+ cg: CodegenOptions [SUBSTRUCT],
+ externs: Externs [UNTRACKED],
+ crate_name: Option<String> [TRACKED],
+ /// Indicates how the compiler should treat unstable features.
+ unstable_features: UnstableFeatures [TRACKED],
+
+ /// Indicates whether this run of the compiler is actually rustdoc. This
+ /// is currently just a hack and will be removed eventually, so please
+ /// try to not rely on this too much.
+ actually_rustdoc: bool [TRACKED],
+
+ /// Control path trimming.
+ trimmed_def_paths: TrimmedDefPaths [TRACKED],
+
+ /// Specifications of codegen units / ThinLTO which are forced as a
+ /// result of parsing command line options. These are not necessarily
+ /// what rustc was invoked with, but massaged a bit to agree with
+ /// commands like `--emit llvm-ir` which they're often incompatible with
+ /// if we otherwise use the defaults of rustc.
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::codegen_units` instead of this field"))]
+ cli_forced_codegen_units: Option<usize> [UNTRACKED],
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field"))]
+ cli_forced_thinlto_off: bool [UNTRACKED],
+
+ /// Remap source path prefixes in all output (messages, object files, debug, etc.).
+ remap_path_prefix: Vec<(PathBuf, PathBuf)> [TRACKED_NO_CRATE_HASH],
+ /// Base directory containing the `src/` for the Rust standard library, and
+ /// potentially `rustc` as well, if we can can find it. Right now it's always
+ /// `$sysroot/lib/rustlib/src/rust` (i.e. the `rustup` `rust-src` component).
+ ///
+ /// This directory is what the virtual `/rustc/$hash` is translated back to,
+ /// if Rust was built with path remapping to `/rustc/$hash` enabled
+ /// (the `rust.remap-debuginfo` option in `config.toml`).
+ real_rust_source_base_dir: Option<PathBuf> [TRACKED_NO_CRATE_HASH],
+
+ edition: Edition [TRACKED],
+
+ /// `true` if we're emitting JSON blobs about each artifact produced
+ /// by the compiler.
+ json_artifact_notifications: bool [TRACKED],
+
+ /// `true` if we're emitting a JSON blob containing the unused externs
+ json_unused_externs: JsonUnusedExterns [UNTRACKED],
+
+ /// `true` if we're emitting a JSON job containing a future-incompat report for lints
+ json_future_incompat: bool [TRACKED],
+
+ pretty: Option<PpMode> [UNTRACKED],
+
+ /// The (potentially remapped) working directory
+ working_dir: RealFileName [TRACKED],
+ }
+);
+
+/// Defines all `CodegenOptions`/`DebuggingOptions` fields and parsers all at once. The goal of this
+/// macro is to define an interface that can be programmatically used by the option parser
+/// to initialize the struct without hardcoding field names all over the place.
+///
+/// The goal is to invoke this macro once with the correct fields, and then this macro generates all
+/// necessary code. The main gotcha of this macro is the `cgsetters` module which is a bunch of
+/// generated code to parse an option into its respective field in the struct. There are a few
+/// hand-written parsers for parsing specific types of values in this module.
+macro_rules! options {
+ ($struct_name:ident, $stat:ident, $optmod:ident, $prefix:expr, $outputname:expr,
+ $($( #[$attr:meta] )* $opt:ident : $t:ty = (
+ $init:expr,
+ $parse:ident,
+ [$dep_tracking_marker:ident],
+ $desc:expr)
+ ),* ,) =>
+(
+ #[derive(Clone)]
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_ty)]
+ pub struct $struct_name { $( $( #[$attr] )* pub $opt: $t),* }
+
+ impl Default for $struct_name {
+ fn default() -> $struct_name {
+ $struct_name { $($opt: $init),* }
+ }
+ }
+
+ impl $struct_name {
+ pub fn build(
+ matches: &getopts::Matches,
+ error_format: ErrorOutputType,
+ ) -> $struct_name {
+ build_options(matches, $stat, $prefix, $outputname, error_format)
+ }
+
+ fn dep_tracking_hash(&self, for_crate_hash: bool, error_format: ErrorOutputType) -> u64 {
+ let mut sub_hashes = BTreeMap::new();
+ $({
+ hash_opt!($opt,
+ &self.$opt,
+ &mut sub_hashes,
+ for_crate_hash,
+ [$dep_tracking_marker]);
+ })*
+ let mut hasher = DefaultHasher::new();
+ dep_tracking::stable_hash(sub_hashes,
+ &mut hasher,
+ error_format,
+ for_crate_hash
+ );
+ hasher.finish()
+ }
+ }
+
+ pub const $stat: OptionDescrs<$struct_name> =
+ &[ $( (stringify!($opt), $optmod::$opt, desc::$parse, $desc) ),* ];
+
+ mod $optmod {
+ $(
+ pub(super) fn $opt(cg: &mut super::$struct_name, v: Option<&str>) -> bool {
+ super::parse::$parse(&mut redirect_field!(cg.$opt), v)
+ }
+ )*
+ }
+
+) }
+
+impl Options {
+ // JUSTIFICATION: defn of the suggested wrapper fn
+ #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ pub fn time_passes(&self) -> bool {
+ self.unstable_opts.time_passes || self.unstable_opts.time
+ }
+}
+
+impl CodegenOptions {
+ // JUSTIFICATION: defn of the suggested wrapper fn
+ #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ pub fn instrument_coverage(&self) -> InstrumentCoverage {
+ self.instrument_coverage.unwrap_or(InstrumentCoverage::Off)
+ }
+}
+
+// Sometimes different options need to build a common structure.
+// That structure can be kept in one of the options' fields, the others become dummy.
+macro_rules! redirect_field {
+ ($cg:ident.link_arg) => {
+ $cg.link_args
+ };
+ ($cg:ident.pre_link_arg) => {
+ $cg.pre_link_args
+ };
+ ($cg:ident.$field:ident) => {
+ $cg.$field
+ };
+}
+
+type OptionSetter<O> = fn(&mut O, v: Option<&str>) -> bool;
+type OptionDescrs<O> = &'static [(&'static str, OptionSetter<O>, &'static str, &'static str)];
+
+fn build_options<O: Default>(
+ matches: &getopts::Matches,
+ descrs: OptionDescrs<O>,
+ prefix: &str,
+ outputname: &str,
+ error_format: ErrorOutputType,
+) -> O {
+ let mut op = O::default();
+ for option in matches.opt_strs(prefix) {
+ let (key, value) = match option.split_once('=') {
+ None => (option, None),
+ Some((k, v)) => (k.to_string(), Some(v)),
+ };
+
+ let option_to_lookup = key.replace('-', "_");
+ match descrs.iter().find(|(name, ..)| *name == option_to_lookup) {
+ Some((_, setter, type_desc, _)) => {
+ if !setter(&mut op, value) {
+ match value {
+ None => early_error(
+ error_format,
+ &format!(
+ "{0} option `{1}` requires {2} ({3} {1}=<value>)",
+ outputname, key, type_desc, prefix
+ ),
+ ),
+ Some(value) => early_error(
+ error_format,
+ &format!(
+ "incorrect value `{value}` for {outputname} option `{key}` - {type_desc} was expected"
+ ),
+ ),
+ }
+ }
+ }
+ None => early_error(error_format, &format!("unknown {outputname} option: `{key}`")),
+ }
+ }
+ return op;
+}
+
+#[allow(non_upper_case_globals)]
+mod desc {
+ pub const parse_no_flag: &str = "no value";
+ pub const parse_bool: &str = "one of: `y`, `yes`, `on`, `n`, `no`, or `off`";
+ pub const parse_opt_bool: &str = parse_bool;
+ pub const parse_string: &str = "a string";
+ pub const parse_opt_string: &str = parse_string;
+ pub const parse_string_push: &str = parse_string;
+ pub const parse_opt_langid: &str = "a language identifier";
+ pub const parse_opt_pathbuf: &str = "a path";
+ pub const parse_list: &str = "a space-separated list of strings";
+ pub const parse_list_with_polarity: &str =
+ "a comma-separated list of strings, with elements beginning with + or -";
+ pub const parse_opt_comma_list: &str = "a comma-separated list of strings";
+ pub const parse_number: &str = "a number";
+ pub const parse_opt_number: &str = parse_number;
+ pub const parse_threads: &str = parse_number;
+ pub const parse_passes: &str = "a space-separated list of passes, or `all`";
+ pub const parse_panic_strategy: &str = "either `unwind` or `abort`";
+ pub const parse_opt_panic_strategy: &str = parse_panic_strategy;
+ pub const parse_oom_strategy: &str = "either `panic` or `abort`";
+ pub const parse_relro_level: &str = "one of: `full`, `partial`, or `off`";
+ pub const parse_sanitizers: &str = "comma separated list of sanitizers: `address`, `cfi`, `hwaddress`, `leak`, `memory`, `memtag`, `shadow-call-stack`, or `thread`";
+ pub const parse_sanitizer_memory_track_origins: &str = "0, 1, or 2";
+ pub const parse_cfguard: &str =
+ "either a boolean (`yes`, `no`, `on`, `off`, etc), `checks`, or `nochecks`";
+ pub const parse_cfprotection: &str = "`none`|`no`|`n` (default), `branch`, `return`, or `full`|`yes`|`y` (equivalent to `branch` and `return`)";
+ pub const parse_strip: &str = "either `none`, `debuginfo`, or `symbols`";
+ pub const parse_linker_flavor: &str = ::rustc_target::spec::LinkerFlavor::one_of();
+ pub const parse_optimization_fuel: &str = "crate=integer";
+ pub const parse_mir_spanview: &str = "`statement` (default), `terminator`, or `block`";
+ pub const parse_instrument_coverage: &str =
+ "`all` (default), `except-unused-generics`, `except-unused-functions`, or `off`";
+ pub const parse_unpretty: &str = "`string` or `string=string`";
+ pub const parse_treat_err_as_bug: &str = "either no value or a number bigger than 0";
+ pub const parse_lto: &str =
+ "either a boolean (`yes`, `no`, `on`, `off`, etc), `thin`, `fat`, or omitted";
+ pub const parse_linker_plugin_lto: &str =
+ "either a boolean (`yes`, `no`, `on`, `off`, etc), or the path to the linker plugin";
+ pub const parse_location_detail: &str = "either `none`, or a comma separated list of location details to track: `file`, `line`, or `column`";
+ pub const parse_switch_with_opt_path: &str =
+ "an optional path to the profiling data output directory";
+ pub const parse_merge_functions: &str = "one of: `disabled`, `trampolines`, or `aliases`";
+ pub const parse_symbol_mangling_version: &str = "either `legacy` or `v0` (RFC 2603)";
+ pub const parse_src_file_hash: &str = "either `md5` or `sha1`";
+ pub const parse_relocation_model: &str =
+ "one of supported relocation models (`rustc --print relocation-models`)";
+ pub const parse_code_model: &str = "one of supported code models (`rustc --print code-models`)";
+ pub const parse_tls_model: &str = "one of supported TLS models (`rustc --print tls-models`)";
+ pub const parse_target_feature: &str = parse_string;
+ pub const parse_wasi_exec_model: &str = "either `command` or `reactor`";
+ pub const parse_split_debuginfo: &str =
+ "one of supported split-debuginfo modes (`off`, `packed`, or `unpacked`)";
+ pub const parse_split_dwarf_kind: &str =
+ "one of supported split dwarf modes (`split` or `single`)";
+ pub const parse_gcc_ld: &str = "one of: no value, `lld`";
+ pub const parse_stack_protector: &str =
+ "one of (`none` (default), `basic`, `strong`, or `all`)";
+ pub const parse_branch_protection: &str =
+ "a `,` separated combination of `bti`, `b-key`, `pac-ret`, or `leaf`";
+ pub const parse_proc_macro_execution_strategy: &str =
+ "one of supported execution strategies (`same-thread`, or `cross-thread`)";
+}
+
+mod parse {
+ pub(crate) use super::*;
+ use std::str::FromStr;
+
+ /// This is for boolean options that don't take a value and start with
+ /// `no-`. This style of option is deprecated.
+ pub(crate) fn parse_no_flag(slot: &mut bool, v: Option<&str>) -> bool {
+ match v {
+ None => {
+ *slot = true;
+ true
+ }
+ Some(_) => false,
+ }
+ }
+
+ /// Use this for any boolean option that has a static default.
+ pub(crate) fn parse_bool(slot: &mut bool, v: Option<&str>) -> bool {
+ match v {
+ Some("y") | Some("yes") | Some("on") | None => {
+ *slot = true;
+ true
+ }
+ Some("n") | Some("no") | Some("off") => {
+ *slot = false;
+ true
+ }
+ _ => false,
+ }
+ }
+
+ /// Use this for any boolean option that lacks a static default. (The
+ /// actions taken when such an option is not specified will depend on
+ /// other factors, such as other options, or target options.)
+ pub(crate) fn parse_opt_bool(slot: &mut Option<bool>, v: Option<&str>) -> bool {
+ match v {
+ Some("y") | Some("yes") | Some("on") | None => {
+ *slot = Some(true);
+ true
+ }
+ Some("n") | Some("no") | Some("off") => {
+ *slot = Some(false);
+ true
+ }
+ _ => false,
+ }
+ }
+
+ /// Use this for any string option that has a static default.
+ pub(crate) fn parse_string(slot: &mut String, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ *slot = s.to_string();
+ true
+ }
+ None => false,
+ }
+ }
+
+ /// Use this for any string option that lacks a static default.
+ pub(crate) fn parse_opt_string(slot: &mut Option<String>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ *slot = Some(s.to_string());
+ true
+ }
+ None => false,
+ }
+ }
+
+ /// Parse an optional language identifier, e.g. `en-US` or `zh-CN`.
+ pub(crate) fn parse_opt_langid(slot: &mut Option<LanguageIdentifier>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ *slot = rustc_errors::LanguageIdentifier::from_str(s).ok();
+ true
+ }
+ None => false,
+ }
+ }
+
+ pub(crate) fn parse_opt_pathbuf(slot: &mut Option<PathBuf>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ *slot = Some(PathBuf::from(s));
+ true
+ }
+ None => false,
+ }
+ }
+
+ pub(crate) fn parse_string_push(slot: &mut Vec<String>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ slot.push(s.to_string());
+ true
+ }
+ None => false,
+ }
+ }
+
+ pub(crate) fn parse_list(slot: &mut Vec<String>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ slot.extend(s.split_whitespace().map(|s| s.to_string()));
+ true
+ }
+ None => false,
+ }
+ }
+
+ pub(crate) fn parse_list_with_polarity(
+ slot: &mut Vec<(String, bool)>,
+ v: Option<&str>,
+ ) -> bool {
+ match v {
+ Some(s) => {
+ for s in s.split(',') {
+ let Some(pass_name) = s.strip_prefix(&['+', '-'][..]) else { return false };
+ slot.push((pass_name.to_string(), &s[..1] == "+"));
+ }
+ true
+ }
+ None => false,
+ }
+ }
+
+ pub(crate) fn parse_location_detail(ld: &mut LocationDetail, v: Option<&str>) -> bool {
+ if let Some(v) = v {
+ ld.line = false;
+ ld.file = false;
+ ld.column = false;
+ if v == "none" {
+ return true;
+ }
+ for s in v.split(',') {
+ match s {
+ "file" => ld.file = true,
+ "line" => ld.line = true,
+ "column" => ld.column = true,
+ _ => return false,
+ }
+ }
+ true
+ } else {
+ false
+ }
+ }
+
+ pub(crate) fn parse_opt_comma_list(slot: &mut Option<Vec<String>>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ let mut v: Vec<_> = s.split(',').map(|s| s.to_string()).collect();
+ v.sort_unstable();
+ *slot = Some(v);
+ true
+ }
+ None => false,
+ }
+ }
+
+ pub(crate) fn parse_threads(slot: &mut usize, v: Option<&str>) -> bool {
+ match v.and_then(|s| s.parse().ok()) {
+ Some(0) => {
+ *slot = ::num_cpus::get();
+ true
+ }
+ Some(i) => {
+ *slot = i;
+ true
+ }
+ None => false,
+ }
+ }
+
+ /// Use this for any numeric option that has a static default.
+ pub(crate) fn parse_number<T: Copy + FromStr>(slot: &mut T, v: Option<&str>) -> bool {
+ match v.and_then(|s| s.parse().ok()) {
+ Some(i) => {
+ *slot = i;
+ true
+ }
+ None => false,
+ }
+ }
+
+ /// Use this for any numeric option that lacks a static default.
+ pub(crate) fn parse_opt_number<T: Copy + FromStr>(
+ slot: &mut Option<T>,
+ v: Option<&str>,
+ ) -> bool {
+ match v {
+ Some(s) => {
+ *slot = s.parse().ok();
+ slot.is_some()
+ }
+ None => false,
+ }
+ }
+
+ pub(crate) fn parse_passes(slot: &mut Passes, v: Option<&str>) -> bool {
+ match v {
+ Some("all") => {
+ *slot = Passes::All;
+ true
+ }
+ v => {
+ let mut passes = vec![];
+ if parse_list(&mut passes, v) {
+ slot.extend(passes);
+ true
+ } else {
+ false
+ }
+ }
+ }
+ }
+
+ pub(crate) fn parse_opt_panic_strategy(
+ slot: &mut Option<PanicStrategy>,
+ v: Option<&str>,
+ ) -> bool {
+ match v {
+ Some("unwind") => *slot = Some(PanicStrategy::Unwind),
+ Some("abort") => *slot = Some(PanicStrategy::Abort),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_panic_strategy(slot: &mut PanicStrategy, v: Option<&str>) -> bool {
+ match v {
+ Some("unwind") => *slot = PanicStrategy::Unwind,
+ Some("abort") => *slot = PanicStrategy::Abort,
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_oom_strategy(slot: &mut OomStrategy, v: Option<&str>) -> bool {
+ match v {
+ Some("panic") => *slot = OomStrategy::Panic,
+ Some("abort") => *slot = OomStrategy::Abort,
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_relro_level(slot: &mut Option<RelroLevel>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => match s.parse::<RelroLevel>() {
+ Ok(level) => *slot = Some(level),
+ _ => return false,
+ },
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_sanitizers(slot: &mut SanitizerSet, v: Option<&str>) -> bool {
+ if let Some(v) = v {
+ for s in v.split(',') {
+ *slot |= match s {
+ "address" => SanitizerSet::ADDRESS,
+ "cfi" => SanitizerSet::CFI,
+ "leak" => SanitizerSet::LEAK,
+ "memory" => SanitizerSet::MEMORY,
+ "memtag" => SanitizerSet::MEMTAG,
+ "shadow-call-stack" => SanitizerSet::SHADOWCALLSTACK,
+ "thread" => SanitizerSet::THREAD,
+ "hwaddress" => SanitizerSet::HWADDRESS,
+ _ => return false,
+ }
+ }
+ true
+ } else {
+ false
+ }
+ }
+
+ pub(crate) fn parse_sanitizer_memory_track_origins(slot: &mut usize, v: Option<&str>) -> bool {
+ match v {
+ Some("2") | None => {
+ *slot = 2;
+ true
+ }
+ Some("1") => {
+ *slot = 1;
+ true
+ }
+ Some("0") => {
+ *slot = 0;
+ true
+ }
+ Some(_) => false,
+ }
+ }
+
+ pub(crate) fn parse_strip(slot: &mut Strip, v: Option<&str>) -> bool {
+ match v {
+ Some("none") => *slot = Strip::None,
+ Some("debuginfo") => *slot = Strip::Debuginfo,
+ Some("symbols") => *slot = Strip::Symbols,
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_cfguard(slot: &mut CFGuard, v: Option<&str>) -> bool {
+ if v.is_some() {
+ let mut bool_arg = None;
+ if parse_opt_bool(&mut bool_arg, v) {
+ *slot = if bool_arg.unwrap() { CFGuard::Checks } else { CFGuard::Disabled };
+ return true;
+ }
+ }
+
+ *slot = match v {
+ None => CFGuard::Checks,
+ Some("checks") => CFGuard::Checks,
+ Some("nochecks") => CFGuard::NoChecks,
+ Some(_) => return false,
+ };
+ true
+ }
+
+ pub(crate) fn parse_cfprotection(slot: &mut CFProtection, v: Option<&str>) -> bool {
+ if v.is_some() {
+ let mut bool_arg = None;
+ if parse_opt_bool(&mut bool_arg, v) {
+ *slot = if bool_arg.unwrap() { CFProtection::Full } else { CFProtection::None };
+ return true;
+ }
+ }
+
+ *slot = match v {
+ None | Some("none") => CFProtection::None,
+ Some("branch") => CFProtection::Branch,
+ Some("return") => CFProtection::Return,
+ Some("full") => CFProtection::Full,
+ Some(_) => return false,
+ };
+ true
+ }
+
+ pub(crate) fn parse_linker_flavor(slot: &mut Option<LinkerFlavor>, v: Option<&str>) -> bool {
+ match v.and_then(LinkerFlavor::from_str) {
+ Some(lf) => *slot = Some(lf),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_optimization_fuel(
+ slot: &mut Option<(String, u64)>,
+ v: Option<&str>,
+ ) -> bool {
+ match v {
+ None => false,
+ Some(s) => {
+ let parts = s.split('=').collect::<Vec<_>>();
+ if parts.len() != 2 {
+ return false;
+ }
+ let crate_name = parts[0].to_string();
+ let fuel = parts[1].parse::<u64>();
+ if fuel.is_err() {
+ return false;
+ }
+ *slot = Some((crate_name, fuel.unwrap()));
+ true
+ }
+ }
+ }
+
+ pub(crate) fn parse_unpretty(slot: &mut Option<String>, v: Option<&str>) -> bool {
+ match v {
+ None => false,
+ Some(s) if s.split('=').count() <= 2 => {
+ *slot = Some(s.to_string());
+ true
+ }
+ _ => false,
+ }
+ }
+
+ pub(crate) fn parse_mir_spanview(slot: &mut Option<MirSpanview>, v: Option<&str>) -> bool {
+ if v.is_some() {
+ let mut bool_arg = None;
+ if parse_opt_bool(&mut bool_arg, v) {
+ *slot = if bool_arg.unwrap() { Some(MirSpanview::Statement) } else { None };
+ return true;
+ }
+ }
+
+ let Some(v) = v else {
+ *slot = Some(MirSpanview::Statement);
+ return true;
+ };
+
+ *slot = Some(match v.trim_end_matches('s') {
+ "statement" | "stmt" => MirSpanview::Statement,
+ "terminator" | "term" => MirSpanview::Terminator,
+ "block" | "basicblock" => MirSpanview::Block,
+ _ => return false,
+ });
+ true
+ }
+
+ pub(crate) fn parse_instrument_coverage(
+ slot: &mut Option<InstrumentCoverage>,
+ v: Option<&str>,
+ ) -> bool {
+ if v.is_some() {
+ let mut bool_arg = None;
+ if parse_opt_bool(&mut bool_arg, v) {
+ *slot = if bool_arg.unwrap() { Some(InstrumentCoverage::All) } else { None };
+ return true;
+ }
+ }
+
+ let Some(v) = v else {
+ *slot = Some(InstrumentCoverage::All);
+ return true;
+ };
+
+ *slot = Some(match v {
+ "all" => InstrumentCoverage::All,
+ "except-unused-generics" | "except_unused_generics" => {
+ InstrumentCoverage::ExceptUnusedGenerics
+ }
+ "except-unused-functions" | "except_unused_functions" => {
+ InstrumentCoverage::ExceptUnusedFunctions
+ }
+ "off" | "no" | "n" | "false" | "0" => InstrumentCoverage::Off,
+ _ => return false,
+ });
+ true
+ }
+
+ pub(crate) fn parse_treat_err_as_bug(slot: &mut Option<NonZeroUsize>, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ *slot = s.parse().ok();
+ slot.is_some()
+ }
+ None => {
+ *slot = NonZeroUsize::new(1);
+ true
+ }
+ }
+ }
+
+ pub(crate) fn parse_lto(slot: &mut LtoCli, v: Option<&str>) -> bool {
+ if v.is_some() {
+ let mut bool_arg = None;
+ if parse_opt_bool(&mut bool_arg, v) {
+ *slot = if bool_arg.unwrap() { LtoCli::Yes } else { LtoCli::No };
+ return true;
+ }
+ }
+
+ *slot = match v {
+ None => LtoCli::NoParam,
+ Some("thin") => LtoCli::Thin,
+ Some("fat") => LtoCli::Fat,
+ Some(_) => return false,
+ };
+ true
+ }
+
+ pub(crate) fn parse_linker_plugin_lto(slot: &mut LinkerPluginLto, v: Option<&str>) -> bool {
+ if v.is_some() {
+ let mut bool_arg = None;
+ if parse_opt_bool(&mut bool_arg, v) {
+ *slot = if bool_arg.unwrap() {
+ LinkerPluginLto::LinkerPluginAuto
+ } else {
+ LinkerPluginLto::Disabled
+ };
+ return true;
+ }
+ }
+
+ *slot = match v {
+ None => LinkerPluginLto::LinkerPluginAuto,
+ Some(path) => LinkerPluginLto::LinkerPlugin(PathBuf::from(path)),
+ };
+ true
+ }
+
+ pub(crate) fn parse_switch_with_opt_path(
+ slot: &mut SwitchWithOptPath,
+ v: Option<&str>,
+ ) -> bool {
+ *slot = match v {
+ None => SwitchWithOptPath::Enabled(None),
+ Some(path) => SwitchWithOptPath::Enabled(Some(PathBuf::from(path))),
+ };
+ true
+ }
+
+ pub(crate) fn parse_merge_functions(
+ slot: &mut Option<MergeFunctions>,
+ v: Option<&str>,
+ ) -> bool {
+ match v.and_then(|s| MergeFunctions::from_str(s).ok()) {
+ Some(mergefunc) => *slot = Some(mergefunc),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_relocation_model(slot: &mut Option<RelocModel>, v: Option<&str>) -> bool {
+ match v.and_then(|s| RelocModel::from_str(s).ok()) {
+ Some(relocation_model) => *slot = Some(relocation_model),
+ None if v == Some("default") => *slot = None,
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_code_model(slot: &mut Option<CodeModel>, v: Option<&str>) -> bool {
+ match v.and_then(|s| CodeModel::from_str(s).ok()) {
+ Some(code_model) => *slot = Some(code_model),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_tls_model(slot: &mut Option<TlsModel>, v: Option<&str>) -> bool {
+ match v.and_then(|s| TlsModel::from_str(s).ok()) {
+ Some(tls_model) => *slot = Some(tls_model),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_symbol_mangling_version(
+ slot: &mut Option<SymbolManglingVersion>,
+ v: Option<&str>,
+ ) -> bool {
+ *slot = match v {
+ Some("legacy") => Some(SymbolManglingVersion::Legacy),
+ Some("v0") => Some(SymbolManglingVersion::V0),
+ _ => return false,
+ };
+ true
+ }
+
+ pub(crate) fn parse_src_file_hash(
+ slot: &mut Option<SourceFileHashAlgorithm>,
+ v: Option<&str>,
+ ) -> bool {
+ match v.and_then(|s| SourceFileHashAlgorithm::from_str(s).ok()) {
+ Some(hash_kind) => *slot = Some(hash_kind),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_target_feature(slot: &mut String, v: Option<&str>) -> bool {
+ match v {
+ Some(s) => {
+ if !slot.is_empty() {
+ slot.push(',');
+ }
+ slot.push_str(s);
+ true
+ }
+ None => false,
+ }
+ }
+
+ pub(crate) fn parse_wasi_exec_model(slot: &mut Option<WasiExecModel>, v: Option<&str>) -> bool {
+ match v {
+ Some("command") => *slot = Some(WasiExecModel::Command),
+ Some("reactor") => *slot = Some(WasiExecModel::Reactor),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_split_debuginfo(
+ slot: &mut Option<SplitDebuginfo>,
+ v: Option<&str>,
+ ) -> bool {
+ match v.and_then(|s| SplitDebuginfo::from_str(s).ok()) {
+ Some(e) => *slot = Some(e),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_split_dwarf_kind(slot: &mut SplitDwarfKind, v: Option<&str>) -> bool {
+ match v.and_then(|s| SplitDwarfKind::from_str(s).ok()) {
+ Some(e) => *slot = e,
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_gcc_ld(slot: &mut Option<LdImpl>, v: Option<&str>) -> bool {
+ match v {
+ None => *slot = None,
+ Some("lld") => *slot = Some(LdImpl::Lld),
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_stack_protector(slot: &mut StackProtector, v: Option<&str>) -> bool {
+ match v.and_then(|s| StackProtector::from_str(s).ok()) {
+ Some(ssp) => *slot = ssp,
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_branch_protection(
+ slot: &mut Option<BranchProtection>,
+ v: Option<&str>,
+ ) -> bool {
+ match v {
+ Some(s) => {
+ let slot = slot.get_or_insert_default();
+ for opt in s.split(',') {
+ match opt {
+ "bti" => slot.bti = true,
+ "pac-ret" if slot.pac_ret.is_none() => {
+ slot.pac_ret = Some(PacRet { leaf: false, key: PAuthKey::A })
+ }
+ "leaf" => match slot.pac_ret.as_mut() {
+ Some(pac) => pac.leaf = true,
+ _ => return false,
+ },
+ "b-key" => match slot.pac_ret.as_mut() {
+ Some(pac) => pac.key = PAuthKey::B,
+ _ => return false,
+ },
+ _ => return false,
+ };
+ }
+ }
+ _ => return false,
+ }
+ true
+ }
+
+ pub(crate) fn parse_proc_macro_execution_strategy(
+ slot: &mut ProcMacroExecutionStrategy,
+ v: Option<&str>,
+ ) -> bool {
+ *slot = match v {
+ Some("same-thread") => ProcMacroExecutionStrategy::SameThread,
+ Some("cross-thread") => ProcMacroExecutionStrategy::CrossThread,
+ _ => return false,
+ };
+ true
+ }
+}
+
+options! {
+ CodegenOptions, CG_OPTIONS, cgopts, "C", "codegen",
+
+ // This list is in alphabetical order.
+ //
+ // If you add a new option, please update:
+ // - compiler/rustc_interface/src/tests.rs
+ // - src/doc/rustc/src/codegen-options/index.md
+
+ ar: String = (String::new(), parse_string, [UNTRACKED],
+ "this option is deprecated and does nothing"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::code_model` instead of this field"))]
+ code_model: Option<CodeModel> = (None, parse_code_model, [TRACKED],
+ "choose the code model to use (`rustc --print code-models` for details)"),
+ codegen_units: Option<usize> = (None, parse_opt_number, [UNTRACKED],
+ "divide crate into N units to optimize in parallel"),
+ control_flow_guard: CFGuard = (CFGuard::Disabled, parse_cfguard, [TRACKED],
+ "use Windows Control Flow Guard (default: no)"),
+ debug_assertions: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "explicitly enable the `cfg(debug_assertions)` directive"),
+ debuginfo: usize = (0, parse_number, [TRACKED],
+ "debug info emission level (0 = no debug info, 1 = line tables only, \
+ 2 = full debug info with variable and type information; default: 0)"),
+ default_linker_libraries: bool = (false, parse_bool, [UNTRACKED],
+ "allow the linker to link its default libraries (default: no)"),
+ embed_bitcode: bool = (true, parse_bool, [TRACKED],
+ "emit bitcode in rlibs (default: yes)"),
+ extra_filename: String = (String::new(), parse_string, [UNTRACKED],
+ "extra data to put in each output filename"),
+ force_frame_pointers: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "force use of the frame pointers"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::must_emit_unwind_tables` instead of this field"))]
+ force_unwind_tables: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "force use of unwind tables"),
+ incremental: Option<String> = (None, parse_opt_string, [UNTRACKED],
+ "enable incremental compilation"),
+ inline_threshold: Option<u32> = (None, parse_opt_number, [TRACKED],
+ "set the threshold for inlining a function"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::instrument_coverage` instead of this field"))]
+ instrument_coverage: Option<InstrumentCoverage> = (None, parse_instrument_coverage, [TRACKED],
+ "instrument the generated code to support LLVM source-based code coverage \
+ reports (note, the compiler build config must include `profiler = true`); \
+ implies `-C symbol-mangling-version=v0`. Optional values are:
+ `=all` (implicit value)
+ `=except-unused-generics`
+ `=except-unused-functions`
+ `=off` (default)"),
+ link_arg: (/* redirected to link_args */) = ((), parse_string_push, [UNTRACKED],
+ "a single extra argument to append to the linker invocation (can be used several times)"),
+ link_args: Vec<String> = (Vec::new(), parse_list, [UNTRACKED],
+ "extra arguments to append to the linker invocation (space separated)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::link_dead_code` instead of this field"))]
+ link_dead_code: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "keep dead code at link time (useful for code coverage) (default: no)"),
+ link_self_contained: Option<bool> = (None, parse_opt_bool, [UNTRACKED],
+ "control whether to link Rust provided C objects/libraries or rely
+ on C toolchain installed in the system"),
+ linker: Option<PathBuf> = (None, parse_opt_pathbuf, [UNTRACKED],
+ "system linker to link outputs with"),
+ linker_flavor: Option<LinkerFlavor> = (None, parse_linker_flavor, [UNTRACKED],
+ "linker flavor"),
+ linker_plugin_lto: LinkerPluginLto = (LinkerPluginLto::Disabled,
+ parse_linker_plugin_lto, [TRACKED],
+ "generate build artifacts that are compatible with linker-based LTO"),
+ llvm_args: Vec<String> = (Vec::new(), parse_list, [TRACKED],
+ "a list of arguments to pass to LLVM (space separated)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field"))]
+ lto: LtoCli = (LtoCli::Unspecified, parse_lto, [TRACKED],
+ "perform LLVM link-time optimizations"),
+ metadata: Vec<String> = (Vec::new(), parse_list, [TRACKED],
+ "metadata to mangle symbol names with"),
+ no_prepopulate_passes: bool = (false, parse_no_flag, [TRACKED],
+ "give an empty list of passes to the pass manager"),
+ no_redzone: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "disable the use of the redzone"),
+ no_stack_check: bool = (false, parse_no_flag, [UNTRACKED],
+ "this option is deprecated and does nothing"),
+ no_vectorize_loops: bool = (false, parse_no_flag, [TRACKED],
+ "disable loop vectorization optimization passes"),
+ no_vectorize_slp: bool = (false, parse_no_flag, [TRACKED],
+ "disable LLVM's SLP vectorization pass"),
+ opt_level: String = ("0".to_string(), parse_string, [TRACKED],
+ "optimization level (0-3, s, or z; default: 0)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::overflow_checks` instead of this field"))]
+ overflow_checks: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "use overflow checks for integer arithmetic"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::panic_strategy` instead of this field"))]
+ panic: Option<PanicStrategy> = (None, parse_opt_panic_strategy, [TRACKED],
+ "panic strategy to compile crate with"),
+ passes: Vec<String> = (Vec::new(), parse_list, [TRACKED],
+ "a list of extra LLVM passes to run (space separated)"),
+ prefer_dynamic: bool = (false, parse_bool, [TRACKED],
+ "prefer dynamic linking to static linking (default: no)"),
+ profile_generate: SwitchWithOptPath = (SwitchWithOptPath::Disabled,
+ parse_switch_with_opt_path, [TRACKED],
+ "compile the program with profiling instrumentation"),
+ profile_use: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+ "use the given `.profdata` file for profile-guided optimization"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::relocation_model` instead of this field"))]
+ relocation_model: Option<RelocModel> = (None, parse_relocation_model, [TRACKED],
+ "control generation of position-independent code (PIC) \
+ (`rustc --print relocation-models` for details)"),
+ remark: Passes = (Passes::Some(Vec::new()), parse_passes, [UNTRACKED],
+ "print remarks for these optimization passes (space separated, or \"all\")"),
+ rpath: bool = (false, parse_bool, [UNTRACKED],
+ "set rpath values in libs/exes (default: no)"),
+ save_temps: bool = (false, parse_bool, [UNTRACKED],
+ "save all temporary output files during compilation (default: no)"),
+ soft_float: bool = (false, parse_bool, [TRACKED],
+ "use soft float ABI (*eabihf targets only) (default: no)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::split_debuginfo` instead of this field"))]
+ split_debuginfo: Option<SplitDebuginfo> = (None, parse_split_debuginfo, [TRACKED],
+ "how to handle split-debuginfo, a platform-specific option"),
+ strip: Strip = (Strip::None, parse_strip, [UNTRACKED],
+ "tell the linker which information to strip (`none` (default), `debuginfo` or `symbols`)"),
+ symbol_mangling_version: Option<SymbolManglingVersion> = (None,
+ parse_symbol_mangling_version, [TRACKED],
+ "which mangling version to use for symbol names ('legacy' (default) or 'v0')"),
+ target_cpu: Option<String> = (None, parse_opt_string, [TRACKED],
+ "select target processor (`rustc --print target-cpus` for details)"),
+ target_feature: String = (String::new(), parse_target_feature, [TRACKED],
+ "target specific attributes. (`rustc --print target-features` for details). \
+ This feature is unsafe."),
+
+ // This list is in alphabetical order.
+ //
+ // If you add a new option, please update:
+ // - compiler/rustc_interface/src/tests.rs
+ // - src/doc/rustc/src/codegen-options/index.md
+}
+
+options! {
+ UnstableOptions, Z_OPTIONS, dbopts, "Z", "unstable",
+
+ // This list is in alphabetical order.
+ //
+ // If you add a new option, please update:
+ // - compiler/rustc_interface/src/tests.rs
+ // - src/doc/unstable-book/src/compiler-flags
+
+ allow_features: Option<Vec<String>> = (None, parse_opt_comma_list, [TRACKED],
+ "only allow the listed language features to be enabled in code (space separated)"),
+ always_encode_mir: bool = (false, parse_bool, [TRACKED],
+ "encode MIR of all functions into the crate metadata (default: no)"),
+ assume_incomplete_release: bool = (false, parse_bool, [TRACKED],
+ "make cfg(version) treat the current version as incomplete (default: no)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::asm_comments` instead of this field"))]
+ asm_comments: bool = (false, parse_bool, [TRACKED],
+ "generate comments into the assembly (may change behavior) (default: no)"),
+ assert_incr_state: Option<String> = (None, parse_opt_string, [UNTRACKED],
+ "assert that the incremental cache is in given state: \
+ either `loaded` or `not-loaded`."),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::binary_dep_depinfo` instead of this field"))]
+ binary_dep_depinfo: bool = (false, parse_bool, [TRACKED],
+ "include artifacts (sysroot, crate dependencies) used during compilation in dep-info \
+ (default: no)"),
+ box_noalias: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "emit noalias metadata for box (default: yes)"),
+ branch_protection: Option<BranchProtection> = (None, parse_branch_protection, [TRACKED],
+ "set options for branch target identification and pointer authentication on AArch64"),
+ cf_protection: CFProtection = (CFProtection::None, parse_cfprotection, [TRACKED],
+ "instrument control-flow architecture protection"),
+ cgu_partitioning_strategy: Option<String> = (None, parse_opt_string, [TRACKED],
+ "the codegen unit partitioning strategy to use"),
+ chalk: bool = (false, parse_bool, [TRACKED],
+ "enable the experimental Chalk-based trait solving engine"),
+ codegen_backend: Option<String> = (None, parse_opt_string, [TRACKED],
+ "the backend to use"),
+ combine_cgu: bool = (false, parse_bool, [TRACKED],
+ "combine CGUs into a single one"),
+ crate_attr: Vec<String> = (Vec::new(), parse_string_push, [TRACKED],
+ "inject the given attribute in the crate"),
+ debug_info_for_profiling: bool = (false, parse_bool, [TRACKED],
+ "emit discriminators and other data necessary for AutoFDO"),
+ debug_macros: bool = (false, parse_bool, [TRACKED],
+ "emit line numbers debug info inside macros (default: no)"),
+ deduplicate_diagnostics: bool = (true, parse_bool, [UNTRACKED],
+ "deduplicate identical diagnostics (default: yes)"),
+ dep_info_omit_d_target: bool = (false, parse_bool, [TRACKED],
+ "in dep-info output, omit targets for tracking dependencies of the dep-info files \
+ themselves (default: no)"),
+ dep_tasks: bool = (false, parse_bool, [UNTRACKED],
+ "print tasks that execute and the color their dep node gets (requires debug build) \
+ (default: no)"),
+ dlltool: Option<PathBuf> = (None, parse_opt_pathbuf, [UNTRACKED],
+ "import library generation tool (windows-gnu only)"),
+ dont_buffer_diagnostics: bool = (false, parse_bool, [UNTRACKED],
+ "emit diagnostics rather than buffering (breaks NLL error downgrading, sorting) \
+ (default: no)"),
+ drop_tracking: bool = (false, parse_bool, [TRACKED],
+ "enables drop tracking in generators (default: no)"),
+ dual_proc_macros: bool = (false, parse_bool, [TRACKED],
+ "load proc macros for both target and host, but only link to the target (default: no)"),
+ dump_dep_graph: bool = (false, parse_bool, [UNTRACKED],
+ "dump the dependency graph to $RUST_DEP_GRAPH (default: /tmp/dep_graph.gv) \
+ (default: no)"),
+ dump_drop_tracking_cfg: Option<String> = (None, parse_opt_string, [UNTRACKED],
+ "dump drop-tracking control-flow graph as a `.dot` file (default: no)"),
+ dump_mir: Option<String> = (None, parse_opt_string, [UNTRACKED],
+ "dump MIR state to file.
+ `val` is used to select which passes and functions to dump. For example:
+ `all` matches all passes and functions,
+ `foo` matches all passes for functions whose name contains 'foo',
+ `foo & ConstProp` only the 'ConstProp' pass for function names containing 'foo',
+ `foo | bar` all passes for function names containing 'foo' or 'bar'."),
+ dump_mir_dataflow: bool = (false, parse_bool, [UNTRACKED],
+ "in addition to `.mir` files, create graphviz `.dot` files with dataflow results \
+ (default: no)"),
+ dump_mir_dir: String = ("mir_dump".to_string(), parse_string, [UNTRACKED],
+ "the directory the MIR is dumped into (default: `mir_dump`)"),
+ dump_mir_exclude_pass_number: bool = (false, parse_bool, [UNTRACKED],
+ "exclude the pass number when dumping MIR (used in tests) (default: no)"),
+ dump_mir_graphviz: bool = (false, parse_bool, [UNTRACKED],
+ "in addition to `.mir` files, create graphviz `.dot` files (and with \
+ `-Z instrument-coverage`, also create a `.dot` file for the MIR-derived \
+ coverage graph) (default: no)"),
+ dump_mir_spanview: Option<MirSpanview> = (None, parse_mir_spanview, [UNTRACKED],
+ "in addition to `.mir` files, create `.html` files to view spans for \
+ all `statement`s (including terminators), only `terminator` spans, or \
+ computed `block` spans (one span encompassing a block's terminator and \
+ all statements). If `-Z instrument-coverage` is also enabled, create \
+ an additional `.html` file showing the computed coverage spans."),
+ dwarf_version: Option<u32> = (None, parse_opt_number, [TRACKED],
+ "version of DWARF debug information to emit (default: 2 or 4, depending on platform)"),
+ emit_stack_sizes: bool = (false, parse_bool, [UNTRACKED],
+ "emit a section containing stack size metadata (default: no)"),
+ emit_thin_lto: bool = (true, parse_bool, [TRACKED],
+ "emit the bc module with thin LTO info (default: yes)"),
+ export_executable_symbols: bool = (false, parse_bool, [TRACKED],
+ "export symbols from executables, as if they were dynamic libraries"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::fewer_names` instead of this field"))]
+ fewer_names: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "reduce memory use by retaining fewer names within compilation artifacts (LLVM-IR) \
+ (default: no)"),
+ force_unstable_if_unmarked: bool = (false, parse_bool, [TRACKED],
+ "force all crates to be `rustc_private` unstable (default: no)"),
+ fuel: Option<(String, u64)> = (None, parse_optimization_fuel, [TRACKED],
+ "set the optimization fuel quota for a crate"),
+ function_sections: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "whether each function should go in its own section"),
+ future_incompat_test: bool = (false, parse_bool, [UNTRACKED],
+ "forces all lints to be future incompatible, used for internal testing (default: no)"),
+ gcc_ld: Option<LdImpl> = (None, parse_gcc_ld, [TRACKED], "implementation of ld used by cc"),
+ graphviz_dark_mode: bool = (false, parse_bool, [UNTRACKED],
+ "use dark-themed colors in graphviz output (default: no)"),
+ graphviz_font: String = ("Courier, monospace".to_string(), parse_string, [UNTRACKED],
+ "use the given `fontname` in graphviz output; can be overridden by setting \
+ environment variable `RUSTC_GRAPHVIZ_FONT` (default: `Courier, monospace`)"),
+ hir_stats: bool = (false, parse_bool, [UNTRACKED],
+ "print some statistics about AST and HIR (default: no)"),
+ human_readable_cgu_names: bool = (false, parse_bool, [TRACKED],
+ "generate human-readable, predictable names for codegen units (default: no)"),
+ identify_regions: bool = (false, parse_bool, [UNTRACKED],
+ "display unnamed regions as `'<id>`, using a non-ident unique id (default: no)"),
+ incremental_ignore_spans: bool = (false, parse_bool, [UNTRACKED],
+ "ignore spans during ICH computation -- used for testing (default: no)"),
+ incremental_info: bool = (false, parse_bool, [UNTRACKED],
+ "print high-level information about incremental reuse (or the lack thereof) \
+ (default: no)"),
+ incremental_relative_spans: bool = (false, parse_bool, [TRACKED],
+ "hash spans relative to their parent item for incr. comp. (default: no)"),
+ incremental_verify_ich: bool = (false, parse_bool, [UNTRACKED],
+ "verify incr. comp. hashes of green query instances (default: no)"),
+ inline_mir: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "enable MIR inlining (default: no)"),
+ inline_mir_threshold: Option<usize> = (None, parse_opt_number, [TRACKED],
+ "a default MIR inlining threshold (default: 50)"),
+ inline_mir_hint_threshold: Option<usize> = (None, parse_opt_number, [TRACKED],
+ "inlining threshold for functions with inline hint (default: 100)"),
+ inline_in_all_cgus: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "control whether `#[inline]` functions are in all CGUs"),
+ input_stats: bool = (false, parse_bool, [UNTRACKED],
+ "gather statistics about the input (default: no)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::instrument_coverage` instead of this field"))]
+ instrument_coverage: Option<InstrumentCoverage> = (None, parse_instrument_coverage, [TRACKED],
+ "instrument the generated code to support LLVM source-based code coverage \
+ reports (note, the compiler build config must include `profiler = true`); \
+ implies `-C symbol-mangling-version=v0`. Optional values are:
+ `=all` (implicit value)
+ `=except-unused-generics`
+ `=except-unused-functions`
+ `=off` (default)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::instrument_mcount` instead of this field"))]
+ instrument_mcount: bool = (false, parse_bool, [TRACKED],
+ "insert function instrument code for mcount-based tracing (default: no)"),
+ keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED],
+ "keep hygiene data after analysis (default: no)"),
+ link_native_libraries: bool = (true, parse_bool, [UNTRACKED],
+ "link native libraries in the linker invocation (default: yes)"),
+ link_only: bool = (false, parse_bool, [TRACKED],
+ "link the `.rlink` file generated by `-Z no-link` (default: no)"),
+ llvm_plugins: Vec<String> = (Vec::new(), parse_list, [TRACKED],
+ "a list LLVM plugins to enable (space separated)"),
+ llvm_time_trace: bool = (false, parse_bool, [UNTRACKED],
+ "generate JSON tracing data file from LLVM data (default: no)"),
+ location_detail: LocationDetail = (LocationDetail::all(), parse_location_detail, [TRACKED],
+ "what location details should be tracked when using caller_location, either \
+ `none`, or a comma separated list of location details, for which \
+ valid options are `file`, `line`, and `column` (default: `file,line,column`)"),
+ ls: bool = (false, parse_bool, [UNTRACKED],
+ "list the symbols defined by a library crate (default: no)"),
+ macro_backtrace: bool = (false, parse_bool, [UNTRACKED],
+ "show macro backtraces (default: no)"),
+ merge_functions: Option<MergeFunctions> = (None, parse_merge_functions, [TRACKED],
+ "control the operation of the MergeFunctions LLVM pass, taking \
+ the same values as the target option of the same name"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::meta_stats` instead of this field"))]
+ meta_stats: bool = (false, parse_bool, [UNTRACKED],
+ "gather metadata statistics (default: no)"),
+ mir_emit_retag: bool = (false, parse_bool, [TRACKED],
+ "emit Retagging MIR statements, interpreted e.g., by miri; implies -Zmir-opt-level=0 \
+ (default: no)"),
+ mir_enable_passes: Vec<(String, bool)> = (Vec::new(), parse_list_with_polarity, [TRACKED],
+ "use like `-Zmir-enable-passes=+DestProp,-InstCombine`. Forces the specified passes to be \
+ enabled, overriding all other checks. Passes that are not specified are enabled or \
+ disabled by other flags as usual."),
+ mir_pretty_relative_line_numbers: bool = (false, parse_bool, [UNTRACKED],
+ "use line numbers relative to the function in mir pretty printing"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::mir_opt_level` instead of this field"))]
+ mir_opt_level: Option<usize> = (None, parse_opt_number, [TRACKED],
+ "MIR optimization level (0-4; default: 1 in non optimized builds and 2 in optimized builds)"),
+ move_size_limit: Option<usize> = (None, parse_opt_number, [TRACKED],
+ "the size at which the `large_assignments` lint starts to be emitted"),
+ mutable_noalias: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "emit noalias metadata for mutable references (default: yes)"),
+ new_llvm_pass_manager: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "use new LLVM pass manager (default: no)"),
+ nll_facts: bool = (false, parse_bool, [UNTRACKED],
+ "dump facts from NLL analysis into side files (default: no)"),
+ nll_facts_dir: String = ("nll-facts".to_string(), parse_string, [UNTRACKED],
+ "the directory the NLL facts are dumped into (default: `nll-facts`)"),
+ no_analysis: bool = (false, parse_no_flag, [UNTRACKED],
+ "parse and expand the source, but run no analysis"),
+ no_codegen: bool = (false, parse_no_flag, [TRACKED_NO_CRATE_HASH],
+ "run all passes except codegen; no output"),
+ no_generate_arange_section: bool = (false, parse_no_flag, [TRACKED],
+ "omit DWARF address ranges that give faster lookups"),
+ no_interleave_lints: bool = (false, parse_no_flag, [UNTRACKED],
+ "execute lints separately; allows benchmarking individual lints"),
+ no_leak_check: bool = (false, parse_no_flag, [UNTRACKED],
+ "disable the 'leak check' for subtyping; unsound, but useful for tests"),
+ no_link: bool = (false, parse_no_flag, [TRACKED],
+ "compile without linking"),
+ no_parallel_llvm: bool = (false, parse_no_flag, [UNTRACKED],
+ "run LLVM in non-parallel mode (while keeping codegen-units and ThinLTO)"),
+ no_unique_section_names: bool = (false, parse_bool, [TRACKED],
+ "do not use unique names for text and data sections when -Z function-sections is used"),
+ no_profiler_runtime: bool = (false, parse_no_flag, [TRACKED],
+ "prevent automatic injection of the profiler_builtins crate"),
+ normalize_docs: bool = (false, parse_bool, [TRACKED],
+ "normalize associated items in rustdoc when generating documentation"),
+ oom: OomStrategy = (OomStrategy::Abort, parse_oom_strategy, [TRACKED],
+ "panic strategy for out-of-memory handling"),
+ osx_rpath_install_name: bool = (false, parse_bool, [TRACKED],
+ "pass `-install_name @rpath/...` to the macOS linker (default: no)"),
+ diagnostic_width: Option<usize> = (None, parse_opt_number, [UNTRACKED],
+ "set the current output width for diagnostic truncation"),
+ panic_abort_tests: bool = (false, parse_bool, [TRACKED],
+ "support compiling tests with panic=abort (default: no)"),
+ panic_in_drop: PanicStrategy = (PanicStrategy::Unwind, parse_panic_strategy, [TRACKED],
+ "panic strategy for panics in drops"),
+ parse_only: bool = (false, parse_bool, [UNTRACKED],
+ "parse only; do not compile, assemble, or link (default: no)"),
+ perf_stats: bool = (false, parse_bool, [UNTRACKED],
+ "print some performance-related statistics (default: no)"),
+ pick_stable_methods_before_any_unstable: bool = (true, parse_bool, [TRACKED],
+ "try to pick stable methods first before picking any unstable methods (default: yes)"),
+ plt: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "whether to use the PLT when calling into shared libraries;
+ only has effect for PIC code on systems with ELF binaries
+ (default: PLT is disabled if full relro is enabled)"),
+ polonius: bool = (false, parse_bool, [TRACKED],
+ "enable polonius-based borrow-checker (default: no)"),
+ polymorphize: bool = (false, parse_bool, [TRACKED],
+ "perform polymorphization analysis"),
+ pre_link_arg: (/* redirected to pre_link_args */) = ((), parse_string_push, [UNTRACKED],
+ "a single extra argument to prepend the linker invocation (can be used several times)"),
+ pre_link_args: Vec<String> = (Vec::new(), parse_list, [UNTRACKED],
+ "extra arguments to prepend to the linker invocation (space separated)"),
+ precise_enum_drop_elaboration: bool = (true, parse_bool, [TRACKED],
+ "use a more precise version of drop elaboration for matches on enums (default: yes). \
+ This results in better codegen, but has caused miscompilations on some tier 2 platforms. \
+ See #77382 and #74551."),
+ print_fuel: Option<String> = (None, parse_opt_string, [TRACKED],
+ "make rustc print the total optimization fuel used by a crate"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::print_llvm_passes` instead of this field"))]
+ print_llvm_passes: bool = (false, parse_bool, [UNTRACKED],
+ "print the LLVM optimization passes being run (default: no)"),
+ print_mono_items: Option<String> = (None, parse_opt_string, [UNTRACKED],
+ "print the result of the monomorphization collection pass"),
+ print_type_sizes: bool = (false, parse_bool, [UNTRACKED],
+ "print layout information for each type encountered (default: no)"),
+ proc_macro_backtrace: bool = (false, parse_bool, [UNTRACKED],
+ "show backtraces for panics during proc-macro execution (default: no)"),
+ proc_macro_execution_strategy: ProcMacroExecutionStrategy = (ProcMacroExecutionStrategy::SameThread,
+ parse_proc_macro_execution_strategy, [UNTRACKED],
+ "how to run proc-macro code (default: same-thread)"),
+ profile: bool = (false, parse_bool, [TRACKED],
+ "insert profiling code (default: no)"),
+ profile_closures: bool = (false, parse_no_flag, [UNTRACKED],
+ "profile size of closures"),
+ profile_emit: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+ "file path to emit profiling data at runtime when using 'profile' \
+ (default based on relative source path)"),
+ profiler_runtime: String = (String::from("profiler_builtins"), parse_string, [TRACKED],
+ "name of the profiler runtime crate to automatically inject (default: `profiler_builtins`)"),
+ profile_sample_use: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+ "use the given `.prof` file for sampled profile-guided optimization (also known as AutoFDO)"),
+ query_dep_graph: bool = (false, parse_bool, [UNTRACKED],
+ "enable queries of the dependency graph for regression testing (default: no)"),
+ randomize_layout: bool = (false, parse_bool, [TRACKED],
+ "randomize the layout of types (default: no)"),
+ layout_seed: Option<u64> = (None, parse_opt_number, [TRACKED],
+ "seed layout randomization"),
+ relax_elf_relocations: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "whether ELF relocations can be relaxed"),
+ relro_level: Option<RelroLevel> = (None, parse_relro_level, [TRACKED],
+ "choose which RELRO level to use"),
+ remap_cwd_prefix: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+ "remap paths under the current working directory to this path prefix"),
+ simulate_remapped_rust_src_base: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+ "simulate the effect of remap-debuginfo = true at bootstrapping by remapping path \
+ to rust's source base directory. only meant for testing purposes"),
+ report_delayed_bugs: bool = (false, parse_bool, [TRACKED],
+ "immediately print bugs registered with `delay_span_bug` (default: no)"),
+ sanitizer: SanitizerSet = (SanitizerSet::empty(), parse_sanitizers, [TRACKED],
+ "use a sanitizer"),
+ sanitizer_memory_track_origins: usize = (0, parse_sanitizer_memory_track_origins, [TRACKED],
+ "enable origins tracking in MemorySanitizer"),
+ sanitizer_recover: SanitizerSet = (SanitizerSet::empty(), parse_sanitizers, [TRACKED],
+ "enable recovery for selected sanitizers"),
+ saturating_float_casts: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "make float->int casts UB-free: numbers outside the integer type's range are clipped to \
+ the max/min integer respectively, and NaN is mapped to 0 (default: yes)"),
+ save_analysis: bool = (false, parse_bool, [UNTRACKED],
+ "write syntax and type analysis (in JSON format) information, in \
+ addition to normal output (default: no)"),
+ self_profile: SwitchWithOptPath = (SwitchWithOptPath::Disabled,
+ parse_switch_with_opt_path, [UNTRACKED],
+ "run the self profiler and output the raw event data"),
+ /// keep this in sync with the event filter names in librustc_data_structures/profiling.rs
+ self_profile_events: Option<Vec<String>> = (None, parse_opt_comma_list, [UNTRACKED],
+ "specify the events recorded by the self profiler;
+ for example: `-Z self-profile-events=default,query-keys`
+ all options: none, all, default, generic-activity, query-provider, query-cache-hit
+ query-blocked, incr-cache-load, incr-result-hashing, query-keys, function-args, args, llvm, artifact-sizes"),
+ self_profile_counter: String = ("wall-time".to_string(), parse_string, [UNTRACKED],
+ "counter used by the self profiler (default: `wall-time`), one of:
+ `wall-time` (monotonic clock, i.e. `std::time::Instant`)
+ `instructions:u` (retired instructions, userspace-only)
+ `instructions-minus-irqs:u` (subtracting hardware interrupt counts for extra accuracy)"
+ ),
+ share_generics: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "make the current crate share its generic instantiations"),
+ show_span: Option<String> = (None, parse_opt_string, [TRACKED],
+ "show spans for compiler debugging (expr|pat|ty)"),
+ span_debug: bool = (false, parse_bool, [UNTRACKED],
+ "forward proc_macro::Span's `Debug` impl to `Span`"),
+ /// o/w tests have closure@path
+ span_free_formats: bool = (false, parse_bool, [UNTRACKED],
+ "exclude spans when debug-printing compiler state (default: no)"),
+ src_hash_algorithm: Option<SourceFileHashAlgorithm> = (None, parse_src_file_hash, [TRACKED],
+ "hash algorithm of source files in debug info (`md5`, `sha1`, or `sha256`)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::stack_protector` instead of this field"))]
+ stack_protector: StackProtector = (StackProtector::None, parse_stack_protector, [TRACKED],
+ "control stack smash protection strategy (`rustc --print stack-protector-strategies` for details)"),
+ strict_init_checks: bool = (false, parse_bool, [TRACKED],
+ "control if mem::uninitialized and mem::zeroed panic on more UB"),
+ strip: Strip = (Strip::None, parse_strip, [UNTRACKED],
+ "tell the linker which information to strip (`none` (default), `debuginfo` or `symbols`)"),
+ split_dwarf_kind: SplitDwarfKind = (SplitDwarfKind::Split, parse_split_dwarf_kind, [TRACKED],
+ "split dwarf variant (only if -Csplit-debuginfo is enabled and on relevant platform)
+ (default: `split`)
+
+ `split`: sections which do not require relocation are written into a DWARF object (`.dwo`)
+ file which is ignored by the linker
+ `single`: sections which do not require relocation are written into object file but ignored
+ by the linker"),
+ split_dwarf_inlining: bool = (true, parse_bool, [TRACKED],
+ "provide minimal debug info in the object/executable to facilitate online \
+ symbolication/stack traces in the absence of .dwo/.dwp files when using Split DWARF"),
+ symbol_mangling_version: Option<SymbolManglingVersion> = (None,
+ parse_symbol_mangling_version, [TRACKED],
+ "which mangling version to use for symbol names ('legacy' (default) or 'v0')"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::teach` instead of this field"))]
+ teach: bool = (false, parse_bool, [TRACKED],
+ "show extended diagnostic help (default: no)"),
+ temps_dir: Option<String> = (None, parse_opt_string, [UNTRACKED],
+ "the directory the intermediate files are written to"),
+ // Diagnostics are considered side-effects of a query (see `QuerySideEffects`) and are saved
+ // alongside query results and changes to translation options can affect diagnostics - so
+ // translation options should be tracked.
+ translate_lang: Option<LanguageIdentifier> = (None, parse_opt_langid, [TRACKED],
+ "language identifier for diagnostic output"),
+ translate_additional_ftl: Option<PathBuf> = (None, parse_opt_pathbuf, [TRACKED],
+ "additional fluent translation to preferentially use (for testing translation)"),
+ translate_directionality_markers: bool = (false, parse_bool, [TRACKED],
+ "emit directionality isolation markers in translated diagnostics"),
+ tune_cpu: Option<String> = (None, parse_opt_string, [TRACKED],
+ "select processor to schedule for (`rustc --print target-cpus` for details)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::lto` instead of this field"))]
+ thinlto: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "enable ThinLTO when possible"),
+ thir_unsafeck: bool = (false, parse_bool, [TRACKED],
+ "use the THIR unsafety checker (default: no)"),
+ /// We default to 1 here since we want to behave like
+ /// a sequential compiler for now. This'll likely be adjusted
+ /// in the future. Note that -Zthreads=0 is the way to get
+ /// the num_cpus behavior.
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::threads` instead of this field"))]
+ threads: usize = (1, parse_threads, [UNTRACKED],
+ "use a thread pool with N threads"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::time_passes` instead of this field"))]
+ time: bool = (false, parse_bool, [UNTRACKED],
+ "measure time of rustc processes (default: no)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::time_llvm_passes` instead of this field"))]
+ time_llvm_passes: bool = (false, parse_bool, [UNTRACKED],
+ "measure time of each LLVM pass (default: no)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::time_passes` instead of this field"))]
+ time_passes: bool = (false, parse_bool, [UNTRACKED],
+ "measure time of each rustc pass (default: no)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::tls_model` instead of this field"))]
+ tls_model: Option<TlsModel> = (None, parse_tls_model, [TRACKED],
+ "choose the TLS model to use (`rustc --print tls-models` for details)"),
+ trace_macros: bool = (false, parse_bool, [UNTRACKED],
+ "for every macro invocation, print its name and arguments (default: no)"),
+ translate_remapped_path_to_local_path: bool = (true, parse_bool, [TRACKED],
+ "translate remapped paths into local paths when possible (default: yes)"),
+ trap_unreachable: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "generate trap instructions for unreachable intrinsics (default: use target setting, usually yes)"),
+ treat_err_as_bug: Option<NonZeroUsize> = (None, parse_treat_err_as_bug, [TRACKED],
+ "treat error number `val` that occurs as bug"),
+ trim_diagnostic_paths: bool = (true, parse_bool, [UNTRACKED],
+ "in diagnostics, use heuristics to shorten paths referring to items"),
+ ui_testing: bool = (false, parse_bool, [UNTRACKED],
+ "emit compiler diagnostics in a form suitable for UI testing (default: no)"),
+ uninit_const_chunk_threshold: usize = (16, parse_number, [TRACKED],
+ "allow generating const initializers with mixed init/uninit chunks, \
+ and set the maximum number of chunks for which this is allowed (default: 16)"),
+ unleash_the_miri_inside_of_you: bool = (false, parse_bool, [TRACKED],
+ "take the brakes off const evaluation. NOTE: this is unsound (default: no)"),
+ unpretty: Option<String> = (None, parse_unpretty, [UNTRACKED],
+ "present the input source, unstable (and less-pretty) variants;
+ `normal`, `identified`,
+ `expanded`, `expanded,identified`,
+ `expanded,hygiene` (with internal representations),
+ `ast-tree` (raw AST before expansion),
+ `ast-tree,expanded` (raw AST after expansion),
+ `hir` (the HIR), `hir,identified`,
+ `hir,typed` (HIR with types for each node),
+ `hir-tree` (dump the raw HIR),
+ `mir` (the MIR), or `mir-cfg` (graphviz formatted MIR)"),
+ unsound_mir_opts: bool = (false, parse_bool, [TRACKED],
+ "enable unsound and buggy MIR optimizations (default: no)"),
+ /// This name is kind of confusing: Most unstable options enable something themselves, while
+ /// this just allows "normal" options to be feature-gated.
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::unstable_options` instead of this field"))]
+ unstable_options: bool = (false, parse_bool, [UNTRACKED],
+ "adds unstable command line options to rustc interface (default: no)"),
+ use_ctors_section: Option<bool> = (None, parse_opt_bool, [TRACKED],
+ "use legacy .ctors section for initializers rather than .init_array"),
+ validate_mir: bool = (false, parse_bool, [UNTRACKED],
+ "validate MIR after each transformation"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::verbose` instead of this field"))]
+ verbose: bool = (false, parse_bool, [UNTRACKED],
+ "in general, enable more debug printouts (default: no)"),
+ #[cfg_attr(not(bootstrap), rustc_lint_opt_deny_field_access("use `Session::verify_llvm_ir` instead of this field"))]
+ verify_llvm_ir: bool = (false, parse_bool, [TRACKED],
+ "verify LLVM IR (default: no)"),
+ virtual_function_elimination: bool = (false, parse_bool, [TRACKED],
+ "enables dead virtual function elimination optimization. \
+ Requires `-Clto[=[fat,yes]]`"),
+ wasi_exec_model: Option<WasiExecModel> = (None, parse_wasi_exec_model, [TRACKED],
+ "whether to build a wasi command or reactor"),
+
+ // This list is in alphabetical order.
+ //
+ // If you add a new option, please update:
+ // - compiler/rustc_interface/src/tests.rs
+}
+
+#[derive(Clone, Hash, PartialEq, Eq, Debug)]
+pub enum WasiExecModel {
+ Command,
+ Reactor,
+}
+
+#[derive(Clone, Copy, Hash)]
+pub enum LdImpl {
+ Lld,
+}
diff --git a/compiler/rustc_session/src/output.rs b/compiler/rustc_session/src/output.rs
new file mode 100644
index 000000000..e5e6579d7
--- /dev/null
+++ b/compiler/rustc_session/src/output.rs
@@ -0,0 +1,202 @@
+//! Related to out filenames of compilation (e.g. save analysis, binaries).
+use crate::config::{CrateType, Input, OutputFilenames, OutputType};
+use crate::Session;
+use rustc_ast as ast;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use std::path::{Path, PathBuf};
+
+pub fn out_filename(
+ sess: &Session,
+ crate_type: CrateType,
+ outputs: &OutputFilenames,
+ crate_name: &str,
+) -> PathBuf {
+ let default_filename = filename_for_input(sess, crate_type, crate_name, outputs);
+ let out_filename = outputs
+ .outputs
+ .get(&OutputType::Exe)
+ .and_then(|s| s.to_owned())
+ .or_else(|| outputs.single_output_file.clone())
+ .unwrap_or(default_filename);
+
+ check_file_is_writeable(&out_filename, sess);
+
+ out_filename
+}
+
+/// Make sure files are writeable. Mac, FreeBSD, and Windows system linkers
+/// check this already -- however, the Linux linker will happily overwrite a
+/// read-only file. We should be consistent.
+pub fn check_file_is_writeable(file: &Path, sess: &Session) {
+ if !is_writeable(file) {
+ sess.fatal(&format!(
+ "output file {} is not writeable -- check its \
+ permissions",
+ file.display()
+ ));
+ }
+}
+
+fn is_writeable(p: &Path) -> bool {
+ match p.metadata() {
+ Err(..) => true,
+ Ok(m) => !m.permissions().readonly(),
+ }
+}
+
+pub fn find_crate_name(sess: &Session, attrs: &[ast::Attribute], input: &Input) -> String {
+ let validate = |s: String, span: Option<Span>| {
+ validate_crate_name(sess, &s, span);
+ s
+ };
+
+ // Look in attributes 100% of the time to make sure the attribute is marked
+ // as used. After doing this, however, we still prioritize a crate name from
+ // the command line over one found in the #[crate_name] attribute. If we
+ // find both we ensure that they're the same later on as well.
+ let attr_crate_name =
+ sess.find_by_name(attrs, sym::crate_name).and_then(|at| at.value_str().map(|s| (at, s)));
+
+ if let Some(ref s) = sess.opts.crate_name {
+ if let Some((attr, name)) = attr_crate_name {
+ if name.as_str() != s {
+ let msg = format!(
+ "`--crate-name` and `#[crate_name]` are \
+ required to match, but `{s}` != `{name}`"
+ );
+ sess.span_err(attr.span, &msg);
+ }
+ }
+ return validate(s.clone(), None);
+ }
+
+ if let Some((attr, s)) = attr_crate_name {
+ return validate(s.to_string(), Some(attr.span));
+ }
+ if let Input::File(ref path) = *input {
+ if let Some(s) = path.file_stem().and_then(|s| s.to_str()) {
+ if s.starts_with('-') {
+ let msg = format!(
+ "crate names cannot start with a `-`, but \
+ `{s}` has a leading hyphen"
+ );
+ sess.err(&msg);
+ } else {
+ return validate(s.replace('-', "_"), None);
+ }
+ }
+ }
+
+ "rust_out".to_string()
+}
+
+pub fn validate_crate_name(sess: &Session, s: &str, sp: Option<Span>) {
+ let mut err_count = 0;
+ {
+ let mut say = |s: &str| {
+ match sp {
+ Some(sp) => sess.span_err(sp, s),
+ None => sess.err(s),
+ };
+ err_count += 1;
+ };
+ if s.is_empty() {
+ say("crate name must not be empty");
+ }
+ for c in s.chars() {
+ if c.is_alphanumeric() {
+ continue;
+ }
+ if c == '_' {
+ continue;
+ }
+ say(&format!("invalid character `{c}` in crate name: `{s}`"));
+ }
+ }
+
+ if err_count > 0 {
+ sess.abort_if_errors();
+ }
+}
+
+pub fn filename_for_metadata(
+ sess: &Session,
+ crate_name: &str,
+ outputs: &OutputFilenames,
+) -> PathBuf {
+ // If the command-line specified the path, use that directly.
+ if let Some(Some(out_filename)) = sess.opts.output_types.get(&OutputType::Metadata) {
+ return out_filename.clone();
+ }
+
+ let libname = format!("{}{}", crate_name, sess.opts.cg.extra_filename);
+
+ let out_filename = outputs
+ .single_output_file
+ .clone()
+ .unwrap_or_else(|| outputs.out_directory.join(&format!("lib{libname}.rmeta")));
+
+ check_file_is_writeable(&out_filename, sess);
+
+ out_filename
+}
+
+pub fn filename_for_input(
+ sess: &Session,
+ crate_type: CrateType,
+ crate_name: &str,
+ outputs: &OutputFilenames,
+) -> PathBuf {
+ let libname = format!("{}{}", crate_name, sess.opts.cg.extra_filename);
+
+ match crate_type {
+ CrateType::Rlib => outputs.out_directory.join(&format!("lib{libname}.rlib")),
+ CrateType::Cdylib | CrateType::ProcMacro | CrateType::Dylib => {
+ let (prefix, suffix) = (&sess.target.dll_prefix, &sess.target.dll_suffix);
+ outputs.out_directory.join(&format!("{prefix}{libname}{suffix}"))
+ }
+ CrateType::Staticlib => {
+ let (prefix, suffix) = (&sess.target.staticlib_prefix, &sess.target.staticlib_suffix);
+ outputs.out_directory.join(&format!("{prefix}{libname}{suffix}"))
+ }
+ CrateType::Executable => {
+ let suffix = &sess.target.exe_suffix;
+ let out_filename = outputs.path(OutputType::Exe);
+ if suffix.is_empty() { out_filename } else { out_filename.with_extension(&suffix[1..]) }
+ }
+ }
+}
+
+/// Returns default crate type for target
+///
+/// Default crate type is used when crate type isn't provided neither
+/// through cmd line arguments nor through crate attributes
+///
+/// It is CrateType::Executable for all platforms but iOS as there is no
+/// way to run iOS binaries anyway without jailbreaking and
+/// interaction with Rust code through static library is the only
+/// option for now
+pub fn default_output_for_target(sess: &Session) -> CrateType {
+ if !sess.target.executables { CrateType::Staticlib } else { CrateType::Executable }
+}
+
+/// Checks if target supports crate_type as output
+pub fn invalid_output_for_target(sess: &Session, crate_type: CrateType) -> bool {
+ if let CrateType::Cdylib | CrateType::Dylib | CrateType::ProcMacro = crate_type {
+ if !sess.target.dynamic_linking {
+ return true;
+ }
+ if sess.crt_static(Some(crate_type)) && !sess.target.crt_static_allows_dylibs {
+ return true;
+ }
+ }
+ if let CrateType::ProcMacro | CrateType::Dylib = crate_type && sess.target.only_cdylib {
+ return true;
+ }
+ if let CrateType::Executable = crate_type && !sess.target.executables {
+ return true;
+ }
+
+ false
+}
diff --git a/compiler/rustc_session/src/parse.rs b/compiler/rustc_session/src/parse.rs
new file mode 100644
index 000000000..f31d52147
--- /dev/null
+++ b/compiler/rustc_session/src/parse.rs
@@ -0,0 +1,326 @@
+//! Contains `ParseSess` which holds state living beyond what one `Parser` might.
+//! It also serves as an input to the parser itself.
+
+use crate::config::CheckCfg;
+use crate::lint::{BufferedEarlyLint, BuiltinLintDiagnostics, Lint, LintId};
+use crate::SessionDiagnostic;
+use rustc_ast::node_id::NodeId;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::sync::{Lock, Lrc};
+use rustc_errors::{emitter::SilentEmitter, ColorConfig, Handler};
+use rustc_errors::{
+ error_code, fallback_fluent_bundle, Applicability, Diagnostic, DiagnosticBuilder,
+ DiagnosticMessage, ErrorGuaranteed, MultiSpan,
+};
+use rustc_feature::{find_feature_issue, GateIssue, UnstableFeatures};
+use rustc_span::edition::Edition;
+use rustc_span::hygiene::ExpnId;
+use rustc_span::source_map::{FilePathMapping, SourceMap};
+use rustc_span::{Span, Symbol};
+
+use std::str;
+
+/// The set of keys (and, optionally, values) that define the compilation
+/// environment of the crate, used to drive conditional compilation.
+pub type CrateConfig = FxHashSet<(Symbol, Option<Symbol>)>;
+pub type CrateCheckConfig = CheckCfg<Symbol>;
+
+/// Collected spans during parsing for places where a certain feature was
+/// used and should be feature gated accordingly in `check_crate`.
+#[derive(Default)]
+pub struct GatedSpans {
+ pub spans: Lock<FxHashMap<Symbol, Vec<Span>>>,
+}
+
+impl GatedSpans {
+ /// Feature gate the given `span` under the given `feature`
+ /// which is same `Symbol` used in `active.rs`.
+ pub fn gate(&self, feature: Symbol, span: Span) {
+ self.spans.borrow_mut().entry(feature).or_default().push(span);
+ }
+
+ /// Ungate the last span under the given `feature`.
+ /// Panics if the given `span` wasn't the last one.
+ ///
+ /// Using this is discouraged unless you have a really good reason to.
+ pub fn ungate_last(&self, feature: Symbol, span: Span) {
+ let removed_span = self.spans.borrow_mut().entry(feature).or_default().pop().unwrap();
+ debug_assert_eq!(span, removed_span);
+ }
+
+ /// Is the provided `feature` gate ungated currently?
+ ///
+ /// Using this is discouraged unless you have a really good reason to.
+ pub fn is_ungated(&self, feature: Symbol) -> bool {
+ self.spans.borrow().get(&feature).map_or(true, |spans| spans.is_empty())
+ }
+
+ /// Prepend the given set of `spans` onto the set in `self`.
+ pub fn merge(&self, mut spans: FxHashMap<Symbol, Vec<Span>>) {
+ let mut inner = self.spans.borrow_mut();
+ for (gate, mut gate_spans) in inner.drain() {
+ spans.entry(gate).or_default().append(&mut gate_spans);
+ }
+ *inner = spans;
+ }
+}
+
+#[derive(Default)]
+pub struct SymbolGallery {
+ /// All symbols occurred and their first occurrence span.
+ pub symbols: Lock<FxHashMap<Symbol, Span>>,
+}
+
+impl SymbolGallery {
+ /// Insert a symbol and its span into symbol gallery.
+ /// If the symbol has occurred before, ignore the new occurrence.
+ pub fn insert(&self, symbol: Symbol, span: Span) {
+ self.symbols.lock().entry(symbol).or_insert(span);
+ }
+}
+
+/// Construct a diagnostic for a language feature error due to the given `span`.
+/// The `feature`'s `Symbol` is the one you used in `active.rs` and `rustc_span::symbols`.
+pub fn feature_err<'a>(
+ sess: &'a ParseSess,
+ feature: Symbol,
+ span: impl Into<MultiSpan>,
+ explain: &str,
+) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ feature_err_issue(sess, feature, span, GateIssue::Language, explain)
+}
+
+/// Construct a diagnostic for a feature gate error.
+///
+/// This variant allows you to control whether it is a library or language feature.
+/// Almost always, you want to use this for a language feature. If so, prefer `feature_err`.
+pub fn feature_err_issue<'a>(
+ sess: &'a ParseSess,
+ feature: Symbol,
+ span: impl Into<MultiSpan>,
+ issue: GateIssue,
+ explain: &str,
+) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err = sess.span_diagnostic.struct_span_err_with_code(span, explain, error_code!(E0658));
+ add_feature_diagnostics_for_issue(&mut err, sess, feature, issue);
+ err
+}
+
+/// Adds the diagnostics for a feature to an existing error.
+pub fn add_feature_diagnostics<'a>(err: &mut Diagnostic, sess: &'a ParseSess, feature: Symbol) {
+ add_feature_diagnostics_for_issue(err, sess, feature, GateIssue::Language);
+}
+
+/// Adds the diagnostics for a feature to an existing error.
+///
+/// This variant allows you to control whether it is a library or language feature.
+/// Almost always, you want to use this for a language feature. If so, prefer
+/// `add_feature_diagnostics`.
+pub fn add_feature_diagnostics_for_issue<'a>(
+ err: &mut Diagnostic,
+ sess: &'a ParseSess,
+ feature: Symbol,
+ issue: GateIssue,
+) {
+ if let Some(n) = find_feature_issue(feature, issue) {
+ err.note(&format!(
+ "see issue #{n} <https://github.com/rust-lang/rust/issues/{n}> for more information"
+ ));
+ }
+
+ // #23973: do not suggest `#![feature(...)]` if we are in beta/stable
+ if sess.unstable_features.is_nightly_build() {
+ err.help(&format!("add `#![feature({feature})]` to the crate attributes to enable"));
+ }
+}
+
+/// Info about a parsing session.
+pub struct ParseSess {
+ pub span_diagnostic: Handler,
+ pub unstable_features: UnstableFeatures,
+ pub config: CrateConfig,
+ pub check_config: CrateCheckConfig,
+ pub edition: Edition,
+ /// Places where raw identifiers were used. This is used to avoid complaining about idents
+ /// clashing with keywords in new editions.
+ pub raw_identifier_spans: Lock<Vec<Span>>,
+ /// Places where identifiers that contain invalid Unicode codepoints but that look like they
+ /// should be. Useful to avoid bad tokenization when encountering emoji. We group them to
+ /// provide a single error per unique incorrect identifier.
+ pub bad_unicode_identifiers: Lock<FxHashMap<Symbol, Vec<Span>>>,
+ source_map: Lrc<SourceMap>,
+ pub buffered_lints: Lock<Vec<BufferedEarlyLint>>,
+ /// Contains the spans of block expressions that could have been incomplete based on the
+ /// operation token that followed it, but that the parser cannot identify without further
+ /// analysis.
+ pub ambiguous_block_expr_parse: Lock<FxHashMap<Span, Span>>,
+ pub gated_spans: GatedSpans,
+ pub symbol_gallery: SymbolGallery,
+ /// The parser has reached `Eof` due to an unclosed brace. Used to silence unnecessary errors.
+ pub reached_eof: Lock<bool>,
+ /// Environment variables accessed during the build and their values when they exist.
+ pub env_depinfo: Lock<FxHashSet<(Symbol, Option<Symbol>)>>,
+ /// File paths accessed during the build.
+ pub file_depinfo: Lock<FxHashSet<Symbol>>,
+ /// All the type ascriptions expressions that have had a suggestion for likely path typo.
+ pub type_ascription_path_suggestions: Lock<FxHashSet<Span>>,
+ /// Whether cfg(version) should treat the current release as incomplete
+ pub assume_incomplete_release: bool,
+ /// Spans passed to `proc_macro::quote_span`. Each span has a numerical
+ /// identifier represented by its position in the vector.
+ pub proc_macro_quoted_spans: Lock<Vec<Span>>,
+}
+
+impl ParseSess {
+ /// Used for testing.
+ pub fn new(file_path_mapping: FilePathMapping) -> Self {
+ let fallback_bundle = fallback_fluent_bundle(rustc_errors::DEFAULT_LOCALE_RESOURCES, false);
+ let sm = Lrc::new(SourceMap::new(file_path_mapping));
+ let handler = Handler::with_tty_emitter(
+ ColorConfig::Auto,
+ true,
+ None,
+ Some(sm.clone()),
+ None,
+ fallback_bundle,
+ );
+ ParseSess::with_span_handler(handler, sm)
+ }
+
+ pub fn with_span_handler(handler: Handler, source_map: Lrc<SourceMap>) -> Self {
+ Self {
+ span_diagnostic: handler,
+ unstable_features: UnstableFeatures::from_environment(None),
+ config: FxHashSet::default(),
+ check_config: CrateCheckConfig::default(),
+ edition: ExpnId::root().expn_data().edition,
+ raw_identifier_spans: Lock::new(Vec::new()),
+ bad_unicode_identifiers: Lock::new(Default::default()),
+ source_map,
+ buffered_lints: Lock::new(vec![]),
+ ambiguous_block_expr_parse: Lock::new(FxHashMap::default()),
+ gated_spans: GatedSpans::default(),
+ symbol_gallery: SymbolGallery::default(),
+ reached_eof: Lock::new(false),
+ env_depinfo: Default::default(),
+ file_depinfo: Default::default(),
+ type_ascription_path_suggestions: Default::default(),
+ assume_incomplete_release: false,
+ proc_macro_quoted_spans: Default::default(),
+ }
+ }
+
+ pub fn with_silent_emitter(fatal_note: Option<String>) -> Self {
+ let fallback_bundle = fallback_fluent_bundle(rustc_errors::DEFAULT_LOCALE_RESOURCES, false);
+ let sm = Lrc::new(SourceMap::new(FilePathMapping::empty()));
+ let fatal_handler =
+ Handler::with_tty_emitter(ColorConfig::Auto, false, None, None, None, fallback_bundle);
+ let handler = Handler::with_emitter(
+ false,
+ None,
+ Box::new(SilentEmitter { fatal_handler, fatal_note }),
+ );
+ ParseSess::with_span_handler(handler, sm)
+ }
+
+ #[inline]
+ pub fn source_map(&self) -> &SourceMap {
+ &self.source_map
+ }
+
+ pub fn clone_source_map(&self) -> Lrc<SourceMap> {
+ self.source_map.clone()
+ }
+
+ pub fn buffer_lint(
+ &self,
+ lint: &'static Lint,
+ span: impl Into<MultiSpan>,
+ node_id: NodeId,
+ msg: &str,
+ ) {
+ self.buffered_lints.with_lock(|buffered_lints| {
+ buffered_lints.push(BufferedEarlyLint {
+ span: span.into(),
+ node_id,
+ msg: msg.into(),
+ lint_id: LintId::of(lint),
+ diagnostic: BuiltinLintDiagnostics::Normal,
+ });
+ });
+ }
+
+ pub fn buffer_lint_with_diagnostic(
+ &self,
+ lint: &'static Lint,
+ span: impl Into<MultiSpan>,
+ node_id: NodeId,
+ msg: &str,
+ diagnostic: BuiltinLintDiagnostics,
+ ) {
+ self.buffered_lints.with_lock(|buffered_lints| {
+ buffered_lints.push(BufferedEarlyLint {
+ span: span.into(),
+ node_id,
+ msg: msg.into(),
+ lint_id: LintId::of(lint),
+ diagnostic,
+ });
+ });
+ }
+
+ /// Extend an error with a suggestion to wrap an expression with parentheses to allow the
+ /// parser to continue parsing the following operation as part of the same expression.
+ pub fn expr_parentheses_needed(&self, err: &mut Diagnostic, span: Span) {
+ err.multipart_suggestion(
+ "parentheses are required to parse this as an expression",
+ vec![(span.shrink_to_lo(), "(".to_string()), (span.shrink_to_hi(), ")".to_string())],
+ Applicability::MachineApplicable,
+ );
+ }
+
+ pub fn save_proc_macro_span(&self, span: Span) -> usize {
+ let mut spans = self.proc_macro_quoted_spans.lock();
+ spans.push(span);
+ return spans.len() - 1;
+ }
+
+ pub fn proc_macro_quoted_spans(&self) -> Vec<Span> {
+ self.proc_macro_quoted_spans.lock().clone()
+ }
+
+ pub fn create_err<'a>(
+ &'a self,
+ err: impl SessionDiagnostic<'a>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ err.into_diagnostic(self)
+ }
+
+ pub fn emit_err<'a>(&'a self, err: impl SessionDiagnostic<'a>) -> ErrorGuaranteed {
+ self.create_err(err).emit()
+ }
+
+ pub fn create_warning<'a>(
+ &'a self,
+ warning: impl SessionDiagnostic<'a, ()>,
+ ) -> DiagnosticBuilder<'a, ()> {
+ warning.into_diagnostic(self)
+ }
+
+ pub fn emit_warning<'a>(&'a self, warning: impl SessionDiagnostic<'a, ()>) {
+ self.create_warning(warning).emit()
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn struct_err(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ self.span_diagnostic.struct_err(msg)
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn struct_warn(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
+ self.span_diagnostic.struct_warn(msg)
+ }
+}
diff --git a/compiler/rustc_session/src/search_paths.rs b/compiler/rustc_session/src/search_paths.rs
new file mode 100644
index 000000000..56a6b6f3b
--- /dev/null
+++ b/compiler/rustc_session/src/search_paths.rs
@@ -0,0 +1,93 @@
+use crate::filesearch::make_target_lib_path;
+use crate::{config, early_error};
+use std::path::{Path, PathBuf};
+
+#[derive(Clone, Debug)]
+pub struct SearchPath {
+ pub kind: PathKind,
+ pub dir: PathBuf,
+ pub files: Vec<SearchPathFile>,
+}
+
+/// The obvious implementation of `SearchPath::files` is a `Vec<PathBuf>`. But
+/// it is searched repeatedly by `find_library_crate`, and the searches involve
+/// checking the prefix and suffix of the filename of each `PathBuf`. This is
+/// doable, but very slow, because it involves calls to `file_name` and
+/// `extension` that are themselves slow.
+///
+/// This type augments the `PathBuf` with an `String` containing the
+/// `PathBuf`'s filename. The prefix and suffix checking is much faster on the
+/// `String` than the `PathBuf`. (The filename must be valid UTF-8. If it's
+/// not, the entry should be skipped, because all Rust output files are valid
+/// UTF-8, and so a non-UTF-8 filename couldn't be one we're looking for.)
+#[derive(Clone, Debug)]
+pub struct SearchPathFile {
+ pub path: PathBuf,
+ pub file_name_str: String,
+}
+
+#[derive(PartialEq, Clone, Copy, Debug, Hash, Eq, Encodable, Decodable, HashStable_Generic)]
+pub enum PathKind {
+ Native,
+ Crate,
+ Dependency,
+ Framework,
+ ExternFlag,
+ All,
+}
+
+impl PathKind {
+ pub fn matches(&self, kind: PathKind) -> bool {
+ match (self, kind) {
+ (PathKind::All, _) | (_, PathKind::All) => true,
+ _ => *self == kind,
+ }
+ }
+}
+
+impl SearchPath {
+ pub fn from_cli_opt(path: &str, output: config::ErrorOutputType) -> Self {
+ let (kind, path) = if let Some(stripped) = path.strip_prefix("native=") {
+ (PathKind::Native, stripped)
+ } else if let Some(stripped) = path.strip_prefix("crate=") {
+ (PathKind::Crate, stripped)
+ } else if let Some(stripped) = path.strip_prefix("dependency=") {
+ (PathKind::Dependency, stripped)
+ } else if let Some(stripped) = path.strip_prefix("framework=") {
+ (PathKind::Framework, stripped)
+ } else if let Some(stripped) = path.strip_prefix("all=") {
+ (PathKind::All, stripped)
+ } else {
+ (PathKind::All, path)
+ };
+ if path.is_empty() {
+ early_error(output, "empty search path given via `-L`");
+ }
+
+ let dir = PathBuf::from(path);
+ Self::new(kind, dir)
+ }
+
+ pub fn from_sysroot_and_triple(sysroot: &Path, triple: &str) -> Self {
+ Self::new(PathKind::All, make_target_lib_path(sysroot, triple))
+ }
+
+ fn new(kind: PathKind, dir: PathBuf) -> Self {
+ // Get the files within the directory.
+ let files = match std::fs::read_dir(&dir) {
+ Ok(files) => files
+ .filter_map(|e| {
+ e.ok().and_then(|e| {
+ e.file_name().to_str().map(|s| SearchPathFile {
+ path: e.path(),
+ file_name_str: s.to_string(),
+ })
+ })
+ })
+ .collect::<Vec<_>>(),
+ Err(..) => vec![],
+ };
+
+ SearchPath { kind, dir, files }
+ }
+}
diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs
new file mode 100644
index 000000000..9669287b3
--- /dev/null
+++ b/compiler/rustc_session/src/session.rs
@@ -0,0 +1,1599 @@
+use crate::cgu_reuse_tracker::CguReuseTracker;
+use crate::code_stats::CodeStats;
+pub use crate::code_stats::{DataTypeKind, FieldInfo, SizeKind, VariantInfo};
+use crate::config::{self, CrateType, InstrumentCoverage, OptLevel, OutputType, SwitchWithOptPath};
+use crate::parse::{add_feature_diagnostics, ParseSess};
+use crate::search_paths::{PathKind, SearchPath};
+use crate::{filesearch, lint};
+
+pub use rustc_ast::attr::MarkedAttrs;
+pub use rustc_ast::Attribute;
+use rustc_data_structures::flock;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::jobserver::{self, Client};
+use rustc_data_structures::profiling::{duration_to_secs_str, SelfProfiler, SelfProfilerRef};
+use rustc_data_structures::sync::{
+ self, AtomicU64, AtomicUsize, Lock, Lrc, OnceCell, OneThread, Ordering, Ordering::SeqCst,
+};
+use rustc_errors::annotate_snippet_emitter_writer::AnnotateSnippetEmitterWriter;
+use rustc_errors::emitter::{Emitter, EmitterWriter, HumanReadableErrorType};
+use rustc_errors::json::JsonEmitter;
+use rustc_errors::registry::Registry;
+use rustc_errors::{
+ fallback_fluent_bundle, DiagnosticBuilder, DiagnosticId, DiagnosticMessage, EmissionGuarantee,
+ ErrorGuaranteed, FluentBundle, LazyFallbackBundle, MultiSpan,
+};
+use rustc_macros::HashStable_Generic;
+pub use rustc_span::def_id::StableCrateId;
+use rustc_span::edition::Edition;
+use rustc_span::source_map::{FileLoader, RealFileLoader, SourceMap, Span};
+use rustc_span::{sym, SourceFileHashAlgorithm, Symbol};
+use rustc_target::asm::InlineAsmArch;
+use rustc_target::spec::{CodeModel, PanicStrategy, RelocModel, RelroLevel};
+use rustc_target::spec::{
+ SanitizerSet, SplitDebuginfo, StackProtector, Target, TargetTriple, TlsModel,
+};
+
+use std::cell::{self, RefCell};
+use std::env;
+use std::fmt;
+use std::io::Write;
+use std::ops::{Div, Mul};
+use std::path::{Path, PathBuf};
+use std::str::FromStr;
+use std::sync::Arc;
+use std::time::Duration;
+
+pub struct OptimizationFuel {
+ /// If `-zfuel=crate=n` is specified, initially set to `n`, otherwise `0`.
+ remaining: u64,
+ /// We're rejecting all further optimizations.
+ out_of_fuel: bool,
+}
+
+/// The behavior of the CTFE engine when an error occurs with regards to backtraces.
+#[derive(Clone, Copy)]
+pub enum CtfeBacktrace {
+ /// Do nothing special, return the error as usual without a backtrace.
+ Disabled,
+ /// Capture a backtrace at the point the error is created and return it in the error
+ /// (to be printed later if/when the error ever actually gets shown to the user).
+ Capture,
+ /// Capture a backtrace at the point the error is created and immediately print it out.
+ Immediate,
+}
+
+/// New-type wrapper around `usize` for representing limits. Ensures that comparisons against
+/// limits are consistent throughout the compiler.
+#[derive(Clone, Copy, Debug, HashStable_Generic)]
+pub struct Limit(pub usize);
+
+impl Limit {
+ /// Create a new limit from a `usize`.
+ pub fn new(value: usize) -> Self {
+ Limit(value)
+ }
+
+ /// Check that `value` is within the limit. Ensures that the same comparisons are used
+ /// throughout the compiler, as mismatches can cause ICEs, see #72540.
+ #[inline]
+ pub fn value_within_limit(&self, value: usize) -> bool {
+ value <= self.0
+ }
+}
+
+impl From<usize> for Limit {
+ fn from(value: usize) -> Self {
+ Self::new(value)
+ }
+}
+
+impl fmt::Display for Limit {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl Div<usize> for Limit {
+ type Output = Limit;
+
+ fn div(self, rhs: usize) -> Self::Output {
+ Limit::new(self.0 / rhs)
+ }
+}
+
+impl Mul<usize> for Limit {
+ type Output = Limit;
+
+ fn mul(self, rhs: usize) -> Self::Output {
+ Limit::new(self.0 * rhs)
+ }
+}
+
+#[derive(Clone, Copy, Debug, HashStable_Generic)]
+pub struct Limits {
+ /// The maximum recursion limit for potentially infinitely recursive
+ /// operations such as auto-dereference and monomorphization.
+ pub recursion_limit: Limit,
+ /// The size at which the `large_assignments` lint starts
+ /// being emitted.
+ pub move_size_limit: Limit,
+ /// The maximum length of types during monomorphization.
+ pub type_length_limit: Limit,
+ /// The maximum blocks a const expression can evaluate.
+ pub const_eval_limit: Limit,
+}
+
+/// Represents the data associated with a compilation
+/// session for a single crate.
+pub struct Session {
+ pub target: Target,
+ pub host: Target,
+ pub opts: config::Options,
+ pub host_tlib_path: Lrc<SearchPath>,
+ pub target_tlib_path: Lrc<SearchPath>,
+ pub parse_sess: ParseSess,
+ pub sysroot: PathBuf,
+ /// The name of the root source file of the crate, in the local file system.
+ /// `None` means that there is no source file.
+ pub local_crate_source_file: Option<PathBuf>,
+
+ crate_types: OnceCell<Vec<CrateType>>,
+ /// The `stable_crate_id` is constructed out of the crate name and all the
+ /// `-C metadata` arguments passed to the compiler. Its value forms a unique
+ /// global identifier for the crate. It is used to allow multiple crates
+ /// with the same name to coexist. See the
+ /// `rustc_codegen_llvm::back::symbol_names` module for more information.
+ pub stable_crate_id: OnceCell<StableCrateId>,
+
+ features: OnceCell<rustc_feature::Features>,
+
+ incr_comp_session: OneThread<RefCell<IncrCompSession>>,
+ /// Used for incremental compilation tests. Will only be populated if
+ /// `-Zquery-dep-graph` is specified.
+ pub cgu_reuse_tracker: CguReuseTracker,
+
+ /// Used by `-Z self-profile`.
+ pub prof: SelfProfilerRef,
+
+ /// Some measurements that are being gathered during compilation.
+ pub perf_stats: PerfStats,
+
+ /// Data about code being compiled, gathered during compilation.
+ pub code_stats: CodeStats,
+
+ /// Tracks fuel info if `-zfuel=crate=n` is specified.
+ optimization_fuel: Lock<OptimizationFuel>,
+
+ /// Always set to zero and incremented so that we can print fuel expended by a crate.
+ pub print_fuel: AtomicU64,
+
+ /// Loaded up early on in the initialization of this `Session` to avoid
+ /// false positives about a job server in our environment.
+ pub jobserver: Client,
+
+ /// Cap lint level specified by a driver specifically.
+ pub driver_lint_caps: FxHashMap<lint::LintId, lint::Level>,
+
+ /// Tracks the current behavior of the CTFE engine when an error occurs.
+ /// Options range from returning the error without a backtrace to returning an error
+ /// and immediately printing the backtrace to stderr.
+ /// The `Lock` is only used by miri to allow setting `ctfe_backtrace` after analysis when
+ /// `MIRI_BACKTRACE` is set. This makes it only apply to miri's errors and not to all CTFE
+ /// errors.
+ pub ctfe_backtrace: Lock<CtfeBacktrace>,
+
+ /// This tracks where `-Zunleash-the-miri-inside-of-you` was used to get around a
+ /// const check, optionally with the relevant feature gate. We use this to
+ /// warn about unleashing, but with a single diagnostic instead of dozens that
+ /// drown everything else in noise.
+ miri_unleashed_features: Lock<Vec<(Span, Option<Symbol>)>>,
+
+ /// Architecture to use for interpreting asm!.
+ pub asm_arch: Option<InlineAsmArch>,
+
+ /// Set of enabled features for the current target.
+ pub target_features: FxHashSet<Symbol>,
+
+ /// Set of enabled features for the current target, including unstable ones.
+ pub unstable_target_features: FxHashSet<Symbol>,
+}
+
+pub struct PerfStats {
+ /// The accumulated time spent on computing symbol hashes.
+ pub symbol_hash_time: Lock<Duration>,
+ /// Total number of values canonicalized queries constructed.
+ pub queries_canonicalized: AtomicUsize,
+ /// Number of times this query is invoked.
+ pub normalize_generic_arg_after_erasing_regions: AtomicUsize,
+ /// Number of times this query is invoked.
+ pub normalize_projection_ty: AtomicUsize,
+}
+
+/// Trait implemented by error types. This should not be implemented manually. Instead, use
+/// `#[derive(SessionDiagnostic)]` -- see [rustc_macros::SessionDiagnostic].
+#[rustc_diagnostic_item = "SessionDiagnostic"]
+pub trait SessionDiagnostic<'a, T: EmissionGuarantee = ErrorGuaranteed> {
+ /// Write out as a diagnostic out of `sess`.
+ #[must_use]
+ fn into_diagnostic(self, sess: &'a ParseSess) -> DiagnosticBuilder<'a, T>;
+}
+
+impl Session {
+ pub fn miri_unleashed_feature(&self, span: Span, feature_gate: Option<Symbol>) {
+ self.miri_unleashed_features.lock().push((span, feature_gate));
+ }
+
+ fn check_miri_unleashed_features(&self) {
+ let unleashed_features = self.miri_unleashed_features.lock();
+ if !unleashed_features.is_empty() {
+ let mut must_err = false;
+ // Create a diagnostic pointing at where things got unleashed.
+ let mut diag = self.struct_warn("skipping const checks");
+ for &(span, feature_gate) in unleashed_features.iter() {
+ // FIXME: `span_label` doesn't do anything, so we use "help" as a hack.
+ if let Some(gate) = feature_gate {
+ diag.span_help(span, &format!("skipping check for `{gate}` feature"));
+ // The unleash flag must *not* be used to just "hack around" feature gates.
+ must_err = true;
+ } else {
+ diag.span_help(span, "skipping check that does not even have a feature gate");
+ }
+ }
+ diag.emit();
+ // If we should err, make sure we did.
+ if must_err && self.has_errors().is_none() {
+ // We have skipped a feature gate, and not run into other errors... reject.
+ self.err(
+ "`-Zunleash-the-miri-inside-of-you` may not be used to circumvent feature \
+ gates, except when testing error paths in the CTFE engine",
+ );
+ }
+ }
+ }
+
+ /// Invoked all the way at the end to finish off diagnostics printing.
+ pub fn finish_diagnostics(&self, registry: &Registry) {
+ self.check_miri_unleashed_features();
+ self.diagnostic().print_error_count(registry);
+ self.emit_future_breakage();
+ }
+
+ fn emit_future_breakage(&self) {
+ if !self.opts.json_future_incompat {
+ return;
+ }
+
+ let diags = self.diagnostic().take_future_breakage_diagnostics();
+ if diags.is_empty() {
+ return;
+ }
+ self.parse_sess.span_diagnostic.emit_future_breakage_report(diags);
+ }
+
+ pub fn local_stable_crate_id(&self) -> StableCrateId {
+ self.stable_crate_id.get().copied().unwrap()
+ }
+
+ pub fn crate_types(&self) -> &[CrateType] {
+ self.crate_types.get().unwrap().as_slice()
+ }
+
+ pub fn init_crate_types(&self, crate_types: Vec<CrateType>) {
+ self.crate_types.set(crate_types).expect("`crate_types` was initialized twice")
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_warn<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_span_warn(sp, msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_warn_with_expectation<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ id: lint::LintExpectationId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_span_warn_with_expectation(sp, msg, id)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_warn_with_code<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_span_warn_with_code(sp, msg, code)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_warn(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_warn(msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_warn_with_expectation(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ id: lint::LintExpectationId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_warn_with_expectation(msg, id)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_allow<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_span_allow(sp, msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_allow(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_allow(msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_expect(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ id: lint::LintExpectationId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_expect(msg, id)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_err<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ self.diagnostic().struct_span_err(sp, msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_err_with_code<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ self.diagnostic().struct_span_err_with_code(sp, msg, code)
+ }
+ // FIXME: This method should be removed (every error should have an associated error code).
+ #[rustc_lint_diagnostics]
+ pub fn struct_err(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ self.parse_sess.struct_err(msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_err_with_code(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ self.diagnostic().struct_err_with_code(msg, code)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_warn_with_code(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_warn_with_code(msg, code)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_fatal<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, !> {
+ self.diagnostic().struct_span_fatal(sp, msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_span_fatal_with_code<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> DiagnosticBuilder<'_, !> {
+ self.diagnostic().struct_span_fatal_with_code(sp, msg, code)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn struct_fatal(&self, msg: impl Into<DiagnosticMessage>) -> DiagnosticBuilder<'_, !> {
+ self.diagnostic().struct_fatal(msg)
+ }
+
+ #[rustc_lint_diagnostics]
+ pub fn span_fatal<S: Into<MultiSpan>>(&self, sp: S, msg: impl Into<DiagnosticMessage>) -> ! {
+ self.diagnostic().span_fatal(sp, msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn span_fatal_with_code<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) -> ! {
+ self.diagnostic().span_fatal_with_code(sp, msg, code)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn fatal(&self, msg: impl Into<DiagnosticMessage>) -> ! {
+ self.diagnostic().fatal(msg).raise()
+ }
+ #[rustc_lint_diagnostics]
+ pub fn span_err_or_warn<S: Into<MultiSpan>>(
+ &self,
+ is_warning: bool,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ ) {
+ if is_warning {
+ self.span_warn(sp, msg);
+ } else {
+ self.span_err(sp, msg);
+ }
+ }
+ #[rustc_lint_diagnostics]
+ pub fn span_err<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> ErrorGuaranteed {
+ self.diagnostic().span_err(sp, msg)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn span_err_with_code<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) {
+ self.diagnostic().span_err_with_code(sp, msg, code)
+ }
+ #[rustc_lint_diagnostics]
+ pub fn err(&self, msg: impl Into<DiagnosticMessage>) -> ErrorGuaranteed {
+ self.diagnostic().err(msg)
+ }
+ pub fn create_err<'a>(
+ &'a self,
+ err: impl SessionDiagnostic<'a>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ self.parse_sess.create_err(err)
+ }
+ pub fn create_feature_err<'a>(
+ &'a self,
+ err: impl SessionDiagnostic<'a>,
+ feature: Symbol,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err = self.parse_sess.create_err(err);
+ add_feature_diagnostics(&mut err, &self.parse_sess, feature);
+ err
+ }
+ pub fn emit_err<'a>(&'a self, err: impl SessionDiagnostic<'a>) -> ErrorGuaranteed {
+ self.parse_sess.emit_err(err)
+ }
+ pub fn create_warning<'a>(
+ &'a self,
+ err: impl SessionDiagnostic<'a, ()>,
+ ) -> DiagnosticBuilder<'a, ()> {
+ self.parse_sess.create_warning(err)
+ }
+ pub fn emit_warning<'a>(&'a self, warning: impl SessionDiagnostic<'a, ()>) {
+ self.parse_sess.emit_warning(warning)
+ }
+ #[inline]
+ pub fn err_count(&self) -> usize {
+ self.diagnostic().err_count()
+ }
+ pub fn has_errors(&self) -> Option<ErrorGuaranteed> {
+ self.diagnostic().has_errors()
+ }
+ pub fn has_errors_or_delayed_span_bugs(&self) -> bool {
+ self.diagnostic().has_errors_or_delayed_span_bugs()
+ }
+ pub fn abort_if_errors(&self) {
+ self.diagnostic().abort_if_errors();
+ }
+ pub fn compile_status(&self) -> Result<(), ErrorGuaranteed> {
+ if let Some(reported) = self.diagnostic().has_errors_or_lint_errors() {
+ let _ = self.diagnostic().emit_stashed_diagnostics();
+ Err(reported)
+ } else {
+ Ok(())
+ }
+ }
+ // FIXME(matthewjasper) Remove this method, it should never be needed.
+ pub fn track_errors<F, T>(&self, f: F) -> Result<T, ErrorGuaranteed>
+ where
+ F: FnOnce() -> T,
+ {
+ let old_count = self.err_count();
+ let result = f();
+ if self.err_count() == old_count {
+ Ok(result)
+ } else {
+ Err(ErrorGuaranteed::unchecked_claim_error_was_emitted())
+ }
+ }
+ pub fn span_warn<S: Into<MultiSpan>>(&self, sp: S, msg: impl Into<DiagnosticMessage>) {
+ self.diagnostic().span_warn(sp, msg)
+ }
+ pub fn span_warn_with_code<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ code: DiagnosticId,
+ ) {
+ self.diagnostic().span_warn_with_code(sp, msg, code)
+ }
+ pub fn warn(&self, msg: impl Into<DiagnosticMessage>) {
+ self.diagnostic().warn(msg)
+ }
+ /// Delay a span_bug() call until abort_if_errors()
+ #[track_caller]
+ pub fn delay_span_bug<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> ErrorGuaranteed {
+ self.diagnostic().delay_span_bug(sp, msg)
+ }
+
+ /// Used for code paths of expensive computations that should only take place when
+ /// warnings or errors are emitted. If no messages are emitted ("good path"), then
+ /// it's likely a bug.
+ pub fn delay_good_path_bug(&self, msg: impl Into<DiagnosticMessage>) {
+ if self.opts.unstable_opts.print_type_sizes
+ || self.opts.unstable_opts.query_dep_graph
+ || self.opts.unstable_opts.dump_mir.is_some()
+ || self.opts.unstable_opts.unpretty.is_some()
+ || self.opts.output_types.contains_key(&OutputType::Mir)
+ || std::env::var_os("RUSTC_LOG").is_some()
+ {
+ return;
+ }
+
+ self.diagnostic().delay_good_path_bug(msg)
+ }
+
+ pub fn note_without_error(&self, msg: impl Into<DiagnosticMessage>) {
+ self.diagnostic().note_without_error(msg)
+ }
+ pub fn span_note_without_error<S: Into<MultiSpan>>(
+ &self,
+ sp: S,
+ msg: impl Into<DiagnosticMessage>,
+ ) {
+ self.diagnostic().span_note_without_error(sp, msg)
+ }
+ pub fn struct_note_without_error(
+ &self,
+ msg: impl Into<DiagnosticMessage>,
+ ) -> DiagnosticBuilder<'_, ()> {
+ self.diagnostic().struct_note_without_error(msg)
+ }
+
+ #[inline]
+ pub fn diagnostic(&self) -> &rustc_errors::Handler {
+ &self.parse_sess.span_diagnostic
+ }
+
+ #[inline]
+ pub fn source_map(&self) -> &SourceMap {
+ self.parse_sess.source_map()
+ }
+
+ pub fn time_passes(&self) -> bool {
+ self.opts.time_passes()
+ }
+
+ /// Returns `true` if internal lints should be added to the lint store - i.e. if
+ /// `-Zunstable-options` is provided and this isn't rustdoc (internal lints can trigger errors
+ /// to be emitted under rustdoc).
+ pub fn enable_internal_lints(&self) -> bool {
+ self.unstable_options() && !self.opts.actually_rustdoc
+ }
+
+ pub fn instrument_coverage(&self) -> bool {
+ self.opts.cg.instrument_coverage() != InstrumentCoverage::Off
+ }
+
+ pub fn instrument_coverage_except_unused_generics(&self) -> bool {
+ self.opts.cg.instrument_coverage() == InstrumentCoverage::ExceptUnusedGenerics
+ }
+
+ pub fn instrument_coverage_except_unused_functions(&self) -> bool {
+ self.opts.cg.instrument_coverage() == InstrumentCoverage::ExceptUnusedFunctions
+ }
+
+ /// Gets the features enabled for the current compilation session.
+ /// DO NOT USE THIS METHOD if there is a TyCtxt available, as it circumvents
+ /// dependency tracking. Use tcx.features() instead.
+ #[inline]
+ pub fn features_untracked(&self) -> &rustc_feature::Features {
+ self.features.get().unwrap()
+ }
+
+ pub fn init_features(&self, features: rustc_feature::Features) {
+ match self.features.set(features) {
+ Ok(()) => {}
+ Err(_) => panic!("`features` was initialized twice"),
+ }
+ }
+
+ pub fn is_sanitizer_cfi_enabled(&self) -> bool {
+ self.opts.unstable_opts.sanitizer.contains(SanitizerSet::CFI)
+ }
+
+ /// Check whether this compile session and crate type use static crt.
+ pub fn crt_static(&self, crate_type: Option<CrateType>) -> bool {
+ if !self.target.crt_static_respected {
+ // If the target does not opt in to crt-static support, use its default.
+ return self.target.crt_static_default;
+ }
+
+ let requested_features = self.opts.cg.target_feature.split(',');
+ let found_negative = requested_features.clone().any(|r| r == "-crt-static");
+ let found_positive = requested_features.clone().any(|r| r == "+crt-static");
+
+ // JUSTIFICATION: necessary use of crate_types directly (see FIXME below)
+ #[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+ if found_positive || found_negative {
+ found_positive
+ } else if crate_type == Some(CrateType::ProcMacro)
+ || crate_type == None && self.opts.crate_types.contains(&CrateType::ProcMacro)
+ {
+ // FIXME: When crate_type is not available,
+ // we use compiler options to determine the crate_type.
+ // We can't check `#![crate_type = "proc-macro"]` here.
+ false
+ } else {
+ self.target.crt_static_default
+ }
+ }
+
+ pub fn is_wasi_reactor(&self) -> bool {
+ self.target.options.os == "wasi"
+ && matches!(
+ self.opts.unstable_opts.wasi_exec_model,
+ Some(config::WasiExecModel::Reactor)
+ )
+ }
+
+ pub fn target_can_use_split_dwarf(&self) -> bool {
+ !self.target.is_like_windows && !self.target.is_like_osx
+ }
+
+ pub fn generate_proc_macro_decls_symbol(&self, stable_crate_id: StableCrateId) -> String {
+ format!("__rustc_proc_macro_decls_{:08x}__", stable_crate_id.to_u64())
+ }
+
+ pub fn target_filesearch(&self, kind: PathKind) -> filesearch::FileSearch<'_> {
+ filesearch::FileSearch::new(
+ &self.sysroot,
+ self.opts.target_triple.triple(),
+ &self.opts.search_paths,
+ &self.target_tlib_path,
+ kind,
+ )
+ }
+ pub fn host_filesearch(&self, kind: PathKind) -> filesearch::FileSearch<'_> {
+ filesearch::FileSearch::new(
+ &self.sysroot,
+ config::host_triple(),
+ &self.opts.search_paths,
+ &self.host_tlib_path,
+ kind,
+ )
+ }
+
+ /// Returns a list of directories where target-specific tool binaries are located.
+ pub fn get_tools_search_paths(&self, self_contained: bool) -> Vec<PathBuf> {
+ let rustlib_path = rustc_target::target_rustlib_path(&self.sysroot, &config::host_triple());
+ let p = PathBuf::from_iter([
+ Path::new(&self.sysroot),
+ Path::new(&rustlib_path),
+ Path::new("bin"),
+ ]);
+ if self_contained { vec![p.clone(), p.join("self-contained")] } else { vec![p] }
+ }
+
+ pub fn init_incr_comp_session(
+ &self,
+ session_dir: PathBuf,
+ lock_file: flock::Lock,
+ load_dep_graph: bool,
+ ) {
+ let mut incr_comp_session = self.incr_comp_session.borrow_mut();
+
+ if let IncrCompSession::NotInitialized = *incr_comp_session {
+ } else {
+ panic!("Trying to initialize IncrCompSession `{:?}`", *incr_comp_session)
+ }
+
+ *incr_comp_session =
+ IncrCompSession::Active { session_directory: session_dir, lock_file, load_dep_graph };
+ }
+
+ pub fn finalize_incr_comp_session(&self, new_directory_path: PathBuf) {
+ let mut incr_comp_session = self.incr_comp_session.borrow_mut();
+
+ if let IncrCompSession::Active { .. } = *incr_comp_session {
+ } else {
+ panic!("trying to finalize `IncrCompSession` `{:?}`", *incr_comp_session);
+ }
+
+ // Note: this will also drop the lock file, thus unlocking the directory.
+ *incr_comp_session = IncrCompSession::Finalized { session_directory: new_directory_path };
+ }
+
+ pub fn mark_incr_comp_session_as_invalid(&self) {
+ let mut incr_comp_session = self.incr_comp_session.borrow_mut();
+
+ let session_directory = match *incr_comp_session {
+ IncrCompSession::Active { ref session_directory, .. } => session_directory.clone(),
+ IncrCompSession::InvalidBecauseOfErrors { .. } => return,
+ _ => panic!("trying to invalidate `IncrCompSession` `{:?}`", *incr_comp_session),
+ };
+
+ // Note: this will also drop the lock file, thus unlocking the directory.
+ *incr_comp_session = IncrCompSession::InvalidBecauseOfErrors { session_directory };
+ }
+
+ pub fn incr_comp_session_dir(&self) -> cell::Ref<'_, PathBuf> {
+ let incr_comp_session = self.incr_comp_session.borrow();
+ cell::Ref::map(incr_comp_session, |incr_comp_session| match *incr_comp_session {
+ IncrCompSession::NotInitialized => panic!(
+ "trying to get session directory from `IncrCompSession`: {:?}",
+ *incr_comp_session,
+ ),
+ IncrCompSession::Active { ref session_directory, .. }
+ | IncrCompSession::Finalized { ref session_directory }
+ | IncrCompSession::InvalidBecauseOfErrors { ref session_directory } => {
+ session_directory
+ }
+ })
+ }
+
+ pub fn incr_comp_session_dir_opt(&self) -> Option<cell::Ref<'_, PathBuf>> {
+ self.opts.incremental.as_ref().map(|_| self.incr_comp_session_dir())
+ }
+
+ pub fn print_perf_stats(&self) {
+ eprintln!(
+ "Total time spent computing symbol hashes: {}",
+ duration_to_secs_str(*self.perf_stats.symbol_hash_time.lock())
+ );
+ eprintln!(
+ "Total queries canonicalized: {}",
+ self.perf_stats.queries_canonicalized.load(Ordering::Relaxed)
+ );
+ eprintln!(
+ "normalize_generic_arg_after_erasing_regions: {}",
+ self.perf_stats.normalize_generic_arg_after_erasing_regions.load(Ordering::Relaxed)
+ );
+ eprintln!(
+ "normalize_projection_ty: {}",
+ self.perf_stats.normalize_projection_ty.load(Ordering::Relaxed)
+ );
+ }
+
+ /// We want to know if we're allowed to do an optimization for crate foo from -z fuel=foo=n.
+ /// This expends fuel if applicable, and records fuel if applicable.
+ pub fn consider_optimizing<T: Fn() -> String>(&self, crate_name: &str, msg: T) -> bool {
+ let mut ret = true;
+ if let Some((ref c, _)) = self.opts.unstable_opts.fuel {
+ if c == crate_name {
+ assert_eq!(self.threads(), 1);
+ let mut fuel = self.optimization_fuel.lock();
+ ret = fuel.remaining != 0;
+ if fuel.remaining == 0 && !fuel.out_of_fuel {
+ if self.diagnostic().can_emit_warnings() {
+ // We only call `msg` in case we can actually emit warnings.
+ // Otherwise, this could cause a `delay_good_path_bug` to
+ // trigger (issue #79546).
+ self.warn(&format!("optimization-fuel-exhausted: {}", msg()));
+ }
+ fuel.out_of_fuel = true;
+ } else if fuel.remaining > 0 {
+ fuel.remaining -= 1;
+ }
+ }
+ }
+ if let Some(ref c) = self.opts.unstable_opts.print_fuel {
+ if c == crate_name {
+ assert_eq!(self.threads(), 1);
+ self.print_fuel.fetch_add(1, SeqCst);
+ }
+ }
+ ret
+ }
+
+ pub fn rust_2015(&self) -> bool {
+ self.edition() == Edition::Edition2015
+ }
+
+ /// Are we allowed to use features from the Rust 2018 edition?
+ pub fn rust_2018(&self) -> bool {
+ self.edition() >= Edition::Edition2018
+ }
+
+ /// Are we allowed to use features from the Rust 2021 edition?
+ pub fn rust_2021(&self) -> bool {
+ self.edition() >= Edition::Edition2021
+ }
+
+ /// Are we allowed to use features from the Rust 2024 edition?
+ pub fn rust_2024(&self) -> bool {
+ self.edition() >= Edition::Edition2024
+ }
+
+ /// Returns `true` if we cannot skip the PLT for shared library calls.
+ pub fn needs_plt(&self) -> bool {
+ // Check if the current target usually needs PLT to be enabled.
+ // The user can use the command line flag to override it.
+ let needs_plt = self.target.needs_plt;
+
+ let dbg_opts = &self.opts.unstable_opts;
+
+ let relro_level = dbg_opts.relro_level.unwrap_or(self.target.relro_level);
+
+ // Only enable this optimization by default if full relro is also enabled.
+ // In this case, lazy binding was already unavailable, so nothing is lost.
+ // This also ensures `-Wl,-z,now` is supported by the linker.
+ let full_relro = RelroLevel::Full == relro_level;
+
+ // If user didn't explicitly forced us to use / skip the PLT,
+ // then try to skip it where possible.
+ dbg_opts.plt.unwrap_or(needs_plt || !full_relro)
+ }
+
+ /// Checks if LLVM lifetime markers should be emitted.
+ pub fn emit_lifetime_markers(&self) -> bool {
+ self.opts.optimize != config::OptLevel::No
+ // AddressSanitizer uses lifetimes to detect use after scope bugs.
+ // MemorySanitizer uses lifetimes to detect use of uninitialized stack variables.
+ // HWAddressSanitizer will use lifetimes to detect use after scope bugs in the future.
+ || self.opts.unstable_opts.sanitizer.intersects(SanitizerSet::ADDRESS | SanitizerSet::MEMORY | SanitizerSet::HWADDRESS)
+ }
+
+ pub fn is_proc_macro_attr(&self, attr: &Attribute) -> bool {
+ [sym::proc_macro, sym::proc_macro_attribute, sym::proc_macro_derive]
+ .iter()
+ .any(|kind| attr.has_name(*kind))
+ }
+
+ pub fn contains_name(&self, attrs: &[Attribute], name: Symbol) -> bool {
+ attrs.iter().any(|item| item.has_name(name))
+ }
+
+ pub fn find_by_name<'a>(
+ &'a self,
+ attrs: &'a [Attribute],
+ name: Symbol,
+ ) -> Option<&'a Attribute> {
+ attrs.iter().find(|attr| attr.has_name(name))
+ }
+
+ pub fn filter_by_name<'a>(
+ &'a self,
+ attrs: &'a [Attribute],
+ name: Symbol,
+ ) -> impl Iterator<Item = &'a Attribute> {
+ attrs.iter().filter(move |attr| attr.has_name(name))
+ }
+
+ pub fn first_attr_value_str_by_name(
+ &self,
+ attrs: &[Attribute],
+ name: Symbol,
+ ) -> Option<Symbol> {
+ attrs.iter().find(|at| at.has_name(name)).and_then(|at| at.value_str())
+ }
+}
+
+// JUSTIFICATION: defn of the suggested wrapper fns
+#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+impl Session {
+ pub fn verbose(&self) -> bool {
+ self.opts.unstable_opts.verbose
+ }
+
+ pub fn instrument_mcount(&self) -> bool {
+ self.opts.unstable_opts.instrument_mcount
+ }
+
+ pub fn time_llvm_passes(&self) -> bool {
+ self.opts.unstable_opts.time_llvm_passes
+ }
+
+ pub fn meta_stats(&self) -> bool {
+ self.opts.unstable_opts.meta_stats
+ }
+
+ pub fn asm_comments(&self) -> bool {
+ self.opts.unstable_opts.asm_comments
+ }
+
+ pub fn verify_llvm_ir(&self) -> bool {
+ self.opts.unstable_opts.verify_llvm_ir || option_env!("RUSTC_VERIFY_LLVM_IR").is_some()
+ }
+
+ pub fn print_llvm_passes(&self) -> bool {
+ self.opts.unstable_opts.print_llvm_passes
+ }
+
+ pub fn binary_dep_depinfo(&self) -> bool {
+ self.opts.unstable_opts.binary_dep_depinfo
+ }
+
+ pub fn mir_opt_level(&self) -> usize {
+ self.opts
+ .unstable_opts
+ .mir_opt_level
+ .unwrap_or_else(|| if self.opts.optimize != OptLevel::No { 2 } else { 1 })
+ }
+
+ /// Calculates the flavor of LTO to use for this compilation.
+ pub fn lto(&self) -> config::Lto {
+ // If our target has codegen requirements ignore the command line
+ if self.target.requires_lto {
+ return config::Lto::Fat;
+ }
+
+ // If the user specified something, return that. If they only said `-C
+ // lto` and we've for whatever reason forced off ThinLTO via the CLI,
+ // then ensure we can't use a ThinLTO.
+ match self.opts.cg.lto {
+ config::LtoCli::Unspecified => {
+ // The compiler was invoked without the `-Clto` flag. Fall
+ // through to the default handling
+ }
+ config::LtoCli::No => {
+ // The user explicitly opted out of any kind of LTO
+ return config::Lto::No;
+ }
+ config::LtoCli::Yes | config::LtoCli::Fat | config::LtoCli::NoParam => {
+ // All of these mean fat LTO
+ return config::Lto::Fat;
+ }
+ config::LtoCli::Thin => {
+ return if self.opts.cli_forced_thinlto_off {
+ config::Lto::Fat
+ } else {
+ config::Lto::Thin
+ };
+ }
+ }
+
+ // Ok at this point the target doesn't require anything and the user
+ // hasn't asked for anything. Our next decision is whether or not
+ // we enable "auto" ThinLTO where we use multiple codegen units and
+ // then do ThinLTO over those codegen units. The logic below will
+ // either return `No` or `ThinLocal`.
+
+ // If processing command line options determined that we're incompatible
+ // with ThinLTO (e.g., `-C lto --emit llvm-ir`) then return that option.
+ if self.opts.cli_forced_thinlto_off {
+ return config::Lto::No;
+ }
+
+ // If `-Z thinlto` specified process that, but note that this is mostly
+ // a deprecated option now that `-C lto=thin` exists.
+ if let Some(enabled) = self.opts.unstable_opts.thinlto {
+ if enabled {
+ return config::Lto::ThinLocal;
+ } else {
+ return config::Lto::No;
+ }
+ }
+
+ // If there's only one codegen unit and LTO isn't enabled then there's
+ // no need for ThinLTO so just return false.
+ if self.codegen_units() == 1 {
+ return config::Lto::No;
+ }
+
+ // Now we're in "defaults" territory. By default we enable ThinLTO for
+ // optimized compiles (anything greater than O0).
+ match self.opts.optimize {
+ config::OptLevel::No => config::Lto::No,
+ _ => config::Lto::ThinLocal,
+ }
+ }
+
+ /// Returns the panic strategy for this compile session. If the user explicitly selected one
+ /// using '-C panic', use that, otherwise use the panic strategy defined by the target.
+ pub fn panic_strategy(&self) -> PanicStrategy {
+ self.opts.cg.panic.unwrap_or(self.target.panic_strategy)
+ }
+
+ pub fn fewer_names(&self) -> bool {
+ if let Some(fewer_names) = self.opts.unstable_opts.fewer_names {
+ fewer_names
+ } else {
+ let more_names = self.opts.output_types.contains_key(&OutputType::LlvmAssembly)
+ || self.opts.output_types.contains_key(&OutputType::Bitcode)
+ // AddressSanitizer and MemorySanitizer use alloca name when reporting an issue.
+ || self.opts.unstable_opts.sanitizer.intersects(SanitizerSet::ADDRESS | SanitizerSet::MEMORY);
+ !more_names
+ }
+ }
+
+ pub fn unstable_options(&self) -> bool {
+ self.opts.unstable_opts.unstable_options
+ }
+
+ pub fn is_nightly_build(&self) -> bool {
+ self.opts.unstable_features.is_nightly_build()
+ }
+
+ pub fn overflow_checks(&self) -> bool {
+ self.opts.cg.overflow_checks.unwrap_or(self.opts.debug_assertions)
+ }
+
+ pub fn relocation_model(&self) -> RelocModel {
+ self.opts.cg.relocation_model.unwrap_or(self.target.relocation_model)
+ }
+
+ pub fn code_model(&self) -> Option<CodeModel> {
+ self.opts.cg.code_model.or(self.target.code_model)
+ }
+
+ pub fn tls_model(&self) -> TlsModel {
+ self.opts.unstable_opts.tls_model.unwrap_or(self.target.tls_model)
+ }
+
+ pub fn split_debuginfo(&self) -> SplitDebuginfo {
+ self.opts.cg.split_debuginfo.unwrap_or(self.target.split_debuginfo)
+ }
+
+ pub fn stack_protector(&self) -> StackProtector {
+ if self.target.options.supports_stack_protector {
+ self.opts.unstable_opts.stack_protector
+ } else {
+ StackProtector::None
+ }
+ }
+
+ pub fn must_emit_unwind_tables(&self) -> bool {
+ // This is used to control the emission of the `uwtable` attribute on
+ // LLVM functions.
+ //
+ // Unwind tables are needed when compiling with `-C panic=unwind`, but
+ // LLVM won't omit unwind tables unless the function is also marked as
+ // `nounwind`, so users are allowed to disable `uwtable` emission.
+ // Historically rustc always emits `uwtable` attributes by default, so
+ // even they can be disabled, they're still emitted by default.
+ //
+ // On some targets (including windows), however, exceptions include
+ // other events such as illegal instructions, segfaults, etc. This means
+ // that on Windows we end up still needing unwind tables even if the `-C
+ // panic=abort` flag is passed.
+ //
+ // You can also find more info on why Windows needs unwind tables in:
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
+ //
+ // If a target requires unwind tables, then they must be emitted.
+ // Otherwise, we can defer to the `-C force-unwind-tables=<yes/no>`
+ // value, if it is provided, or disable them, if not.
+ self.target.requires_uwtable
+ || self.opts.cg.force_unwind_tables.unwrap_or(
+ self.panic_strategy() == PanicStrategy::Unwind || self.target.default_uwtable,
+ )
+ }
+
+ /// Returns the number of query threads that should be used for this
+ /// compilation
+ pub fn threads(&self) -> usize {
+ self.opts.unstable_opts.threads
+ }
+
+ /// Returns the number of codegen units that should be used for this
+ /// compilation
+ pub fn codegen_units(&self) -> usize {
+ if let Some(n) = self.opts.cli_forced_codegen_units {
+ return n;
+ }
+ if let Some(n) = self.target.default_codegen_units {
+ return n as usize;
+ }
+
+ // If incremental compilation is turned on, we default to a high number
+ // codegen units in order to reduce the "collateral damage" small
+ // changes cause.
+ if self.opts.incremental.is_some() {
+ return 256;
+ }
+
+ // Why is 16 codegen units the default all the time?
+ //
+ // The main reason for enabling multiple codegen units by default is to
+ // leverage the ability for the codegen backend to do codegen and
+ // optimization in parallel. This allows us, especially for large crates, to
+ // make good use of all available resources on the machine once we've
+ // hit that stage of compilation. Large crates especially then often
+ // take a long time in codegen/optimization and this helps us amortize that
+ // cost.
+ //
+ // Note that a high number here doesn't mean that we'll be spawning a
+ // large number of threads in parallel. The backend of rustc contains
+ // global rate limiting through the `jobserver` crate so we'll never
+ // overload the system with too much work, but rather we'll only be
+ // optimizing when we're otherwise cooperating with other instances of
+ // rustc.
+ //
+ // Rather a high number here means that we should be able to keep a lot
+ // of idle cpus busy. By ensuring that no codegen unit takes *too* long
+ // to build we'll be guaranteed that all cpus will finish pretty closely
+ // to one another and we should make relatively optimal use of system
+ // resources
+ //
+ // Note that the main cost of codegen units is that it prevents LLVM
+ // from inlining across codegen units. Users in general don't have a lot
+ // of control over how codegen units are split up so it's our job in the
+ // compiler to ensure that undue performance isn't lost when using
+ // codegen units (aka we can't require everyone to slap `#[inline]` on
+ // everything).
+ //
+ // If we're compiling at `-O0` then the number doesn't really matter too
+ // much because performance doesn't matter and inlining is ok to lose.
+ // In debug mode we just want to try to guarantee that no cpu is stuck
+ // doing work that could otherwise be farmed to others.
+ //
+ // In release mode, however (O1 and above) performance does indeed
+ // matter! To recover the loss in performance due to inlining we'll be
+ // enabling ThinLTO by default (the function for which is just below).
+ // This will ensure that we recover any inlining wins we otherwise lost
+ // through codegen unit partitioning.
+ //
+ // ---
+ //
+ // Ok that's a lot of words but the basic tl;dr; is that we want a high
+ // number here -- but not too high. Additionally we're "safe" to have it
+ // always at the same number at all optimization levels.
+ //
+ // As a result 16 was chosen here! Mostly because it was a power of 2
+ // and most benchmarks agreed it was roughly a local optimum. Not very
+ // scientific.
+ 16
+ }
+
+ pub fn teach(&self, code: &DiagnosticId) -> bool {
+ self.opts.unstable_opts.teach && self.diagnostic().must_teach(code)
+ }
+
+ pub fn edition(&self) -> Edition {
+ self.opts.edition
+ }
+
+ pub fn link_dead_code(&self) -> bool {
+ self.opts.cg.link_dead_code.unwrap_or(false)
+ }
+}
+
+// JUSTIFICATION: part of session construction
+#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+fn default_emitter(
+ sopts: &config::Options,
+ registry: rustc_errors::registry::Registry,
+ source_map: Lrc<SourceMap>,
+ bundle: Option<Lrc<FluentBundle>>,
+ fallback_bundle: LazyFallbackBundle,
+ emitter_dest: Option<Box<dyn Write + Send>>,
+) -> Box<dyn Emitter + sync::Send> {
+ let macro_backtrace = sopts.unstable_opts.macro_backtrace;
+ match (sopts.error_format, emitter_dest) {
+ (config::ErrorOutputType::HumanReadable(kind), dst) => {
+ let (short, color_config) = kind.unzip();
+
+ if let HumanReadableErrorType::AnnotateSnippet(_) = kind {
+ let emitter = AnnotateSnippetEmitterWriter::new(
+ Some(source_map),
+ bundle,
+ fallback_bundle,
+ short,
+ macro_backtrace,
+ );
+ Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
+ } else {
+ let emitter = match dst {
+ None => EmitterWriter::stderr(
+ color_config,
+ Some(source_map),
+ bundle,
+ fallback_bundle,
+ short,
+ sopts.unstable_opts.teach,
+ sopts.diagnostic_width,
+ macro_backtrace,
+ ),
+ Some(dst) => EmitterWriter::new(
+ dst,
+ Some(source_map),
+ bundle,
+ fallback_bundle,
+ short,
+ false, // no teach messages when writing to a buffer
+ false, // no colors when writing to a buffer
+ None, // no diagnostic width
+ macro_backtrace,
+ ),
+ };
+ Box::new(emitter.ui_testing(sopts.unstable_opts.ui_testing))
+ }
+ }
+ (config::ErrorOutputType::Json { pretty, json_rendered }, None) => Box::new(
+ JsonEmitter::stderr(
+ Some(registry),
+ source_map,
+ bundle,
+ fallback_bundle,
+ pretty,
+ json_rendered,
+ sopts.diagnostic_width,
+ macro_backtrace,
+ )
+ .ui_testing(sopts.unstable_opts.ui_testing),
+ ),
+ (config::ErrorOutputType::Json { pretty, json_rendered }, Some(dst)) => Box::new(
+ JsonEmitter::new(
+ dst,
+ Some(registry),
+ source_map,
+ bundle,
+ fallback_bundle,
+ pretty,
+ json_rendered,
+ sopts.diagnostic_width,
+ macro_backtrace,
+ )
+ .ui_testing(sopts.unstable_opts.ui_testing),
+ ),
+ }
+}
+
+pub enum DiagnosticOutput {
+ Default,
+ Raw(Box<dyn Write + Send>),
+}
+
+// JUSTIFICATION: literally session construction
+#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+pub fn build_session(
+ sopts: config::Options,
+ local_crate_source_file: Option<PathBuf>,
+ bundle: Option<Lrc<rustc_errors::FluentBundle>>,
+ registry: rustc_errors::registry::Registry,
+ diagnostics_output: DiagnosticOutput,
+ driver_lint_caps: FxHashMap<lint::LintId, lint::Level>,
+ file_loader: Option<Box<dyn FileLoader + Send + Sync + 'static>>,
+ target_override: Option<Target>,
+) -> Session {
+ // FIXME: This is not general enough to make the warning lint completely override
+ // normal diagnostic warnings, since the warning lint can also be denied and changed
+ // later via the source code.
+ let warnings_allow = sopts
+ .lint_opts
+ .iter()
+ .filter(|&&(ref key, _)| *key == "warnings")
+ .map(|&(_, ref level)| *level == lint::Allow)
+ .last()
+ .unwrap_or(false);
+ let cap_lints_allow = sopts.lint_cap.map_or(false, |cap| cap == lint::Allow);
+ let can_emit_warnings = !(warnings_allow || cap_lints_allow);
+
+ let write_dest = match diagnostics_output {
+ DiagnosticOutput::Default => None,
+ DiagnosticOutput::Raw(write) => Some(write),
+ };
+
+ let sysroot = match &sopts.maybe_sysroot {
+ Some(sysroot) => sysroot.clone(),
+ None => filesearch::get_or_default_sysroot(),
+ };
+
+ let target_cfg = config::build_target_config(&sopts, target_override, &sysroot);
+ let host_triple = TargetTriple::from_triple(config::host_triple());
+ let (host, target_warnings) = Target::search(&host_triple, &sysroot).unwrap_or_else(|e| {
+ early_error(sopts.error_format, &format!("Error loading host specification: {e}"))
+ });
+ for warning in target_warnings.warning_messages() {
+ early_warn(sopts.error_format, &warning)
+ }
+
+ let loader = file_loader.unwrap_or_else(|| Box::new(RealFileLoader));
+ let hash_kind = sopts.unstable_opts.src_hash_algorithm.unwrap_or_else(|| {
+ if target_cfg.is_like_msvc {
+ SourceFileHashAlgorithm::Sha1
+ } else {
+ SourceFileHashAlgorithm::Md5
+ }
+ });
+ let source_map = Lrc::new(SourceMap::with_file_loader_and_hash_kind(
+ loader,
+ sopts.file_path_mapping(),
+ hash_kind,
+ ));
+
+ let fallback_bundle = fallback_fluent_bundle(
+ rustc_errors::DEFAULT_LOCALE_RESOURCES,
+ sopts.unstable_opts.translate_directionality_markers,
+ );
+ let emitter =
+ default_emitter(&sopts, registry, source_map.clone(), bundle, fallback_bundle, write_dest);
+
+ let span_diagnostic = rustc_errors::Handler::with_emitter_and_flags(
+ emitter,
+ sopts.unstable_opts.diagnostic_handler_flags(can_emit_warnings),
+ );
+
+ let self_profiler = if let SwitchWithOptPath::Enabled(ref d) = sopts.unstable_opts.self_profile
+ {
+ let directory =
+ if let Some(ref directory) = d { directory } else { std::path::Path::new(".") };
+
+ let profiler = SelfProfiler::new(
+ directory,
+ sopts.crate_name.as_deref(),
+ sopts.unstable_opts.self_profile_events.as_ref().map(|xs| &xs[..]),
+ &sopts.unstable_opts.self_profile_counter,
+ );
+ match profiler {
+ Ok(profiler) => Some(Arc::new(profiler)),
+ Err(e) => {
+ early_warn(sopts.error_format, &format!("failed to create profiler: {e}"));
+ None
+ }
+ }
+ } else {
+ None
+ };
+
+ let mut parse_sess = ParseSess::with_span_handler(span_diagnostic, source_map);
+ parse_sess.assume_incomplete_release = sopts.unstable_opts.assume_incomplete_release;
+
+ let host_triple = config::host_triple();
+ let target_triple = sopts.target_triple.triple();
+ let host_tlib_path = Lrc::new(SearchPath::from_sysroot_and_triple(&sysroot, host_triple));
+ let target_tlib_path = if host_triple == target_triple {
+ // Use the same `SearchPath` if host and target triple are identical to avoid unnecessary
+ // rescanning of the target lib path and an unnecessary allocation.
+ host_tlib_path.clone()
+ } else {
+ Lrc::new(SearchPath::from_sysroot_and_triple(&sysroot, target_triple))
+ };
+
+ let file_path_mapping = sopts.file_path_mapping();
+
+ let local_crate_source_file =
+ local_crate_source_file.map(|path| file_path_mapping.map_prefix(path).0);
+
+ let optimization_fuel = Lock::new(OptimizationFuel {
+ remaining: sopts.unstable_opts.fuel.as_ref().map_or(0, |i| i.1),
+ out_of_fuel: false,
+ });
+ let print_fuel = AtomicU64::new(0);
+
+ let cgu_reuse_tracker = if sopts.unstable_opts.query_dep_graph {
+ CguReuseTracker::new()
+ } else {
+ CguReuseTracker::new_disabled()
+ };
+
+ let prof =
+ SelfProfilerRef::new(self_profiler, sopts.time_passes(), sopts.unstable_opts.time_passes);
+
+ let ctfe_backtrace = Lock::new(match env::var("RUSTC_CTFE_BACKTRACE") {
+ Ok(ref val) if val == "immediate" => CtfeBacktrace::Immediate,
+ Ok(ref val) if val != "0" => CtfeBacktrace::Capture,
+ _ => CtfeBacktrace::Disabled,
+ });
+
+ let asm_arch =
+ if target_cfg.allow_asm { InlineAsmArch::from_str(&target_cfg.arch).ok() } else { None };
+
+ let sess = Session {
+ target: target_cfg,
+ host,
+ opts: sopts,
+ host_tlib_path,
+ target_tlib_path,
+ parse_sess,
+ sysroot,
+ local_crate_source_file,
+ crate_types: OnceCell::new(),
+ stable_crate_id: OnceCell::new(),
+ features: OnceCell::new(),
+ incr_comp_session: OneThread::new(RefCell::new(IncrCompSession::NotInitialized)),
+ cgu_reuse_tracker,
+ prof,
+ perf_stats: PerfStats {
+ symbol_hash_time: Lock::new(Duration::from_secs(0)),
+ queries_canonicalized: AtomicUsize::new(0),
+ normalize_generic_arg_after_erasing_regions: AtomicUsize::new(0),
+ normalize_projection_ty: AtomicUsize::new(0),
+ },
+ code_stats: Default::default(),
+ optimization_fuel,
+ print_fuel,
+ jobserver: jobserver::client(),
+ driver_lint_caps,
+ ctfe_backtrace,
+ miri_unleashed_features: Lock::new(Default::default()),
+ asm_arch,
+ target_features: FxHashSet::default(),
+ unstable_target_features: FxHashSet::default(),
+ };
+
+ validate_commandline_args_with_session_available(&sess);
+
+ sess
+}
+
+/// Validate command line arguments with a `Session`.
+///
+/// If it is useful to have a Session available already for validating a commandline argument, you
+/// can do so here.
+// JUSTIFICATION: needs to access args to validate them
+#[cfg_attr(not(bootstrap), allow(rustc::bad_opt_access))]
+fn validate_commandline_args_with_session_available(sess: &Session) {
+ // Since we don't know if code in an rlib will be linked to statically or
+ // dynamically downstream, rustc generates `__imp_` symbols that help linkers
+ // on Windows deal with this lack of knowledge (#27438). Unfortunately,
+ // these manually generated symbols confuse LLD when it tries to merge
+ // bitcode during ThinLTO. Therefore we disallow dynamic linking on Windows
+ // when compiling for LLD ThinLTO. This way we can validly just not generate
+ // the `dllimport` attributes and `__imp_` symbols in that case.
+ if sess.opts.cg.linker_plugin_lto.enabled()
+ && sess.opts.cg.prefer_dynamic
+ && sess.target.is_like_windows
+ {
+ sess.err(
+ "Linker plugin based LTO is not supported together with \
+ `-C prefer-dynamic` when targeting Windows-like targets",
+ );
+ }
+
+ // Make sure that any given profiling data actually exists so LLVM can't
+ // decide to silently skip PGO.
+ if let Some(ref path) = sess.opts.cg.profile_use {
+ if !path.exists() {
+ sess.err(&format!(
+ "File `{}` passed to `-C profile-use` does not exist.",
+ path.display()
+ ));
+ }
+ }
+
+ // Do the same for sample profile data.
+ if let Some(ref path) = sess.opts.unstable_opts.profile_sample_use {
+ if !path.exists() {
+ sess.err(&format!(
+ "File `{}` passed to `-C profile-sample-use` does not exist.",
+ path.display()
+ ));
+ }
+ }
+
+ // Unwind tables cannot be disabled if the target requires them.
+ if let Some(include_uwtables) = sess.opts.cg.force_unwind_tables {
+ if sess.target.requires_uwtable && !include_uwtables {
+ sess.err(
+ "target requires unwind tables, they cannot be disabled with \
+ `-C force-unwind-tables=no`.",
+ );
+ }
+ }
+
+ // Sanitizers can only be used on platforms that we know have working sanitizer codegen.
+ let supported_sanitizers = sess.target.options.supported_sanitizers;
+ let unsupported_sanitizers = sess.opts.unstable_opts.sanitizer - supported_sanitizers;
+ match unsupported_sanitizers.into_iter().count() {
+ 0 => {}
+ 1 => {
+ sess.err(&format!(
+ "{} sanitizer is not supported for this target",
+ unsupported_sanitizers
+ ));
+ }
+ _ => {
+ sess.err(&format!(
+ "{} sanitizers are not supported for this target",
+ unsupported_sanitizers
+ ));
+ }
+ }
+ // Cannot mix and match sanitizers.
+ let mut sanitizer_iter = sess.opts.unstable_opts.sanitizer.into_iter();
+ if let (Some(first), Some(second)) = (sanitizer_iter.next(), sanitizer_iter.next()) {
+ sess.err(&format!("`-Zsanitizer={first}` is incompatible with `-Zsanitizer={second}`"));
+ }
+
+ // Cannot enable crt-static with sanitizers on Linux
+ if sess.crt_static(None) && !sess.opts.unstable_opts.sanitizer.is_empty() {
+ sess.err(
+ "sanitizer is incompatible with statically linked libc, \
+ disable it using `-C target-feature=-crt-static`",
+ );
+ }
+
+ // LLVM CFI and VFE both require LTO.
+ if sess.lto() != config::Lto::Fat {
+ if sess.is_sanitizer_cfi_enabled() {
+ sess.err("`-Zsanitizer=cfi` requires `-Clto`");
+ }
+ if sess.opts.unstable_opts.virtual_function_elimination {
+ sess.err("`-Zvirtual-function-elimination` requires `-Clto`");
+ }
+ }
+
+ if sess.opts.unstable_opts.stack_protector != StackProtector::None {
+ if !sess.target.options.supports_stack_protector {
+ sess.warn(&format!(
+ "`-Z stack-protector={}` is not supported for target {} and will be ignored",
+ sess.opts.unstable_opts.stack_protector, sess.opts.target_triple
+ ))
+ }
+ }
+
+ if let Some(dwarf_version) = sess.opts.unstable_opts.dwarf_version {
+ if dwarf_version > 5 {
+ sess.err(&format!("requested DWARF version {} is greater than 5", dwarf_version));
+ }
+ }
+}
+
+/// Holds data on the current incremental compilation session, if there is one.
+#[derive(Debug)]
+pub enum IncrCompSession {
+ /// This is the state the session will be in until the incr. comp. dir is
+ /// needed.
+ NotInitialized,
+ /// This is the state during which the session directory is private and can
+ /// be modified.
+ Active { session_directory: PathBuf, lock_file: flock::Lock, load_dep_graph: bool },
+ /// This is the state after the session directory has been finalized. In this
+ /// state, the contents of the directory must not be modified any more.
+ Finalized { session_directory: PathBuf },
+ /// This is an error state that is reached when some compilation error has
+ /// occurred. It indicates that the contents of the session directory must
+ /// not be used, since they might be invalid.
+ InvalidBecauseOfErrors { session_directory: PathBuf },
+}
+
+fn early_error_handler(output: config::ErrorOutputType) -> rustc_errors::Handler {
+ let fallback_bundle = fallback_fluent_bundle(rustc_errors::DEFAULT_LOCALE_RESOURCES, false);
+ let emitter: Box<dyn Emitter + sync::Send> = match output {
+ config::ErrorOutputType::HumanReadable(kind) => {
+ let (short, color_config) = kind.unzip();
+ Box::new(EmitterWriter::stderr(
+ color_config,
+ None,
+ None,
+ fallback_bundle,
+ short,
+ false,
+ None,
+ false,
+ ))
+ }
+ config::ErrorOutputType::Json { pretty, json_rendered } => {
+ Box::new(JsonEmitter::basic(pretty, json_rendered, None, fallback_bundle, None, false))
+ }
+ };
+ rustc_errors::Handler::with_emitter(true, None, emitter)
+}
+
+pub fn early_error_no_abort(output: config::ErrorOutputType, msg: &str) -> ErrorGuaranteed {
+ early_error_handler(output).struct_err(msg).emit()
+}
+
+pub fn early_error(output: config::ErrorOutputType, msg: &str) -> ! {
+ early_error_handler(output).struct_fatal(msg).emit()
+}
+
+pub fn early_warn(output: config::ErrorOutputType, msg: &str) {
+ early_error_handler(output).struct_warn(msg).emit()
+}
diff --git a/compiler/rustc_session/src/utils.rs b/compiler/rustc_session/src/utils.rs
new file mode 100644
index 000000000..9a4f6f9f9
--- /dev/null
+++ b/compiler/rustc_session/src/utils.rs
@@ -0,0 +1,93 @@
+use crate::session::Session;
+use rustc_data_structures::profiling::VerboseTimingGuard;
+use std::path::{Path, PathBuf};
+
+impl Session {
+ pub fn timer<'a>(&'a self, what: &'static str) -> VerboseTimingGuard<'a> {
+ self.prof.verbose_generic_activity(what)
+ }
+ pub fn time<R>(&self, what: &'static str, f: impl FnOnce() -> R) -> R {
+ self.prof.verbose_generic_activity(what).run(f)
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub enum NativeLibKind {
+ /// Static library (e.g. `libfoo.a` on Linux or `foo.lib` on Windows/MSVC)
+ Static {
+ /// Whether to bundle objects from static library into produced rlib
+ bundle: Option<bool>,
+ /// Whether to link static library without throwing any object files away
+ whole_archive: Option<bool>,
+ },
+ /// Dynamic library (e.g. `libfoo.so` on Linux)
+ /// or an import library corresponding to a dynamic library (e.g. `foo.lib` on Windows/MSVC).
+ Dylib {
+ /// Whether the dynamic library will be linked only if it satisfies some undefined symbols
+ as_needed: Option<bool>,
+ },
+ /// Dynamic library (e.g. `foo.dll` on Windows) without a corresponding import library.
+ RawDylib,
+ /// A macOS-specific kind of dynamic libraries.
+ Framework {
+ /// Whether the framework will be linked only if it satisfies some undefined symbols
+ as_needed: Option<bool>,
+ },
+ /// Argument which is passed to linker, relative order with libraries and other arguments
+ /// is preserved
+ LinkArg,
+ /// The library kind wasn't specified, `Dylib` is currently used as a default.
+ Unspecified,
+}
+
+impl NativeLibKind {
+ pub fn has_modifiers(&self) -> bool {
+ match self {
+ NativeLibKind::Static { bundle, whole_archive } => {
+ bundle.is_some() || whole_archive.is_some()
+ }
+ NativeLibKind::Dylib { as_needed } | NativeLibKind::Framework { as_needed } => {
+ as_needed.is_some()
+ }
+ NativeLibKind::RawDylib | NativeLibKind::Unspecified | NativeLibKind::LinkArg => false,
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub struct NativeLib {
+ pub name: String,
+ pub new_name: Option<String>,
+ pub kind: NativeLibKind,
+ pub verbatim: Option<bool>,
+}
+
+impl NativeLib {
+ pub fn has_modifiers(&self) -> bool {
+ self.verbatim.is_some() || self.kind.has_modifiers()
+ }
+}
+
+/// A path that has been canonicalized along with its original, non-canonicalized form
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct CanonicalizedPath {
+ // Optional since canonicalization can sometimes fail
+ canonicalized: Option<PathBuf>,
+ original: PathBuf,
+}
+
+impl CanonicalizedPath {
+ pub fn new(path: &Path) -> Self {
+ Self { original: path.to_owned(), canonicalized: std::fs::canonicalize(path).ok() }
+ }
+
+ pub fn canonicalized(&self) -> &PathBuf {
+ self.canonicalized.as_ref().unwrap_or(self.original())
+ }
+
+ pub fn original(&self) -> &PathBuf {
+ &self.original
+ }
+}
diff --git a/compiler/rustc_smir/Cargo.toml b/compiler/rustc_smir/Cargo.toml
new file mode 100644
index 000000000..5e0d1f369
--- /dev/null
+++ b/compiler/rustc_smir/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "rustc_smir"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+rustc_borrowck = { path = "../rustc_borrowck", optional = true }
+rustc_driver = { path = "../rustc_driver", optional = true }
+rustc_hir = { path = "../rustc_hir", optional = true }
+rustc_interface = { path = "../rustc_interface", optional = true }
+rustc_middle = { path = "../rustc_middle", optional = true }
+rustc_mir_dataflow = { path = "../rustc_mir_dataflow", optional = true }
+rustc_mir_transform = { path = "../rustc_mir_transform", optional = true }
+rustc_serialize = { path = "../rustc_serialize", optional = true }
+rustc_trait_selection = { path = "../rustc_trait_selection", optional = true }
+
+[features]
+default = [
+ "rustc_borrowck",
+ "rustc_driver",
+ "rustc_hir",
+ "rustc_interface",
+ "rustc_middle",
+ "rustc_mir_dataflow",
+ "rustc_mir_transform",
+ "rustc_serialize",
+ "rustc_trait_selection",
+]
diff --git a/compiler/rustc_smir/README.md b/compiler/rustc_smir/README.md
new file mode 100644
index 000000000..ae49098dd
--- /dev/null
+++ b/compiler/rustc_smir/README.md
@@ -0,0 +1,75 @@
+This crate is regularly synced with its mirror in the rustc repo at `compiler/rustc_smir`.
+
+We use `git subtree` for this to preserve commits and allow the rustc repo to
+edit these crates without having to touch this repo. This keeps the crates compiling
+while allowing us to independently work on them here. The effort of keeping them in
+sync is pushed entirely onto us, without affecting rustc workflows negatively.
+This may change in the future, but changes to policy should only be done via a
+compiler team MCP.
+
+## Instructions for working on this crate locally
+
+Since the crate is the same in the rustc repo and here, the dependencies on rustc_* crates
+will only either work here or there, but never in both places at the same time. Thus we use
+optional dependencies on the rustc_* crates, requiring local development to use
+
+```
+cargo build --no-default-features -Zavoid-dev-deps
+```
+
+in order to compile successfully.
+
+## Instructions for syncing
+
+### Updating this repository
+
+In the rustc repo, execute
+
+```
+git subtree push --prefix=compiler/rustc_smir url_to_your_fork_of_project_stable_mir some_feature_branch
+```
+
+and then open a PR of your `some_feature_branch` against https://github.com/rust-lang/project-stable-mir
+
+### Updating the rustc library
+
+First we need to bump our stack limit, as the rustc repo otherwise quickly hits that:
+
+```
+ulimit -s 60000
+```
+
+#### Maximum function recursion depth (1000) reached
+
+Then we need to disable `dash` as the default shell for sh scripts, as otherwise we run into a
+hard limit of a recursion depth of 1000:
+
+```
+sudo dpkg-reconfigure dash
+```
+
+and then select `No` to disable dash.
+
+
+#### Patching your `git worktree`
+
+The regular git worktree does not scale to repos of the size of the rustc repo.
+So download the `git-subtree.sh` from https://github.com/gitgitgadget/git/pull/493/files and run
+
+```
+sudo cp --backup /path/to/patched/git-subtree.sh /usr/lib/git-core/git-subtree
+sudo chmod --reference=/usr/lib/git-core/git-subtree~ /usr/lib/git-core/git-subtree
+sudo chown --reference=/usr/lib/git-core/git-subtree~ /usr/lib/git-core/git-subtree
+```
+
+#### Actually doing a sync
+
+In the rustc repo, execute
+
+```
+git subtree pull --prefix=compiler/rustc_smir https://github.com/rust-lang/project-stable-mir smir
+```
+
+Note: only ever sync to rustc from the project-stable-mir's `smir` branch. Do not sync with your own forks.
+
+Then open a PR against rustc just like a regular PR.
diff --git a/compiler/rustc_smir/rust-toolchain.toml b/compiler/rustc_smir/rust-toolchain.toml
new file mode 100644
index 000000000..7b696fc1f
--- /dev/null
+++ b/compiler/rustc_smir/rust-toolchain.toml
@@ -0,0 +1,3 @@
+[toolchain]
+channel = "nightly-2022-06-01"
+components = [ "rustfmt", "rustc-dev" ]
diff --git a/compiler/rustc_smir/src/lib.rs b/compiler/rustc_smir/src/lib.rs
new file mode 100644
index 000000000..5c7aaf35b
--- /dev/null
+++ b/compiler/rustc_smir/src/lib.rs
@@ -0,0 +1,17 @@
+//! The WIP stable interface to rustc internals.
+//!
+//! For more information see https://github.com/rust-lang/project-stable-mir
+//!
+//! # Note
+//!
+//! This API is still completely unstable and subject to change.
+
+#![doc(
+ html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
+ test(attr(allow(unused_variables), deny(warnings)))
+)]
+#![cfg_attr(not(feature = "default"), feature(rustc_private))]
+
+pub mod mir;
+
+pub mod very_unstable;
diff --git a/compiler/rustc_smir/src/mir.rs b/compiler/rustc_smir/src/mir.rs
new file mode 100644
index 000000000..855605b1a
--- /dev/null
+++ b/compiler/rustc_smir/src/mir.rs
@@ -0,0 +1,10 @@
+pub use crate::very_unstable::middle::mir::{
+ visit::MutVisitor, AggregateKind, AssertKind, BasicBlock, BasicBlockData, BinOp, BindingForm,
+ BlockTailInfo, Body, BorrowKind, CastKind, ClearCrossCrate, Constant, ConstantKind,
+ CopyNonOverlapping, Coverage, FakeReadCause, Field, GeneratorInfo, ImplicitSelfKind,
+ InlineAsmOperand, Local, LocalDecl, LocalInfo, LocalKind, Location, MirPhase, MirSource,
+ NullOp, Operand, Place, PlaceRef, ProjectionElem, ProjectionKind, Promoted, RetagKind, Rvalue,
+ Safety, SourceInfo, SourceScope, SourceScopeData, SourceScopeLocalData, Statement,
+ StatementKind, UnOp, UserTypeProjection, UserTypeProjections, VarBindingForm, VarDebugInfo,
+ VarDebugInfoContents,
+};
diff --git a/compiler/rustc_smir/src/very_unstable.rs b/compiler/rustc_smir/src/very_unstable.rs
new file mode 100644
index 000000000..12ba133db
--- /dev/null
+++ b/compiler/rustc_smir/src/very_unstable.rs
@@ -0,0 +1,27 @@
+//! This module reexports various crates and modules from unstable rustc APIs.
+//! Add anything you need here and it will get slowly transferred to a stable API.
+//! Only use rustc_smir in your dependencies and use the reexports here instead of
+//! directly referring to the unstable crates.
+
+macro_rules! crates {
+ ($($rustc_name:ident -> $name:ident,)*) => {
+ $(
+ #[cfg(not(feature = "default"))]
+ pub extern crate $rustc_name as $name;
+ #[cfg(feature = "default")]
+ pub use $rustc_name as $name;
+ )*
+ }
+}
+
+crates! {
+ rustc_borrowck -> borrowck,
+ rustc_driver -> driver,
+ rustc_hir -> hir,
+ rustc_interface -> interface,
+ rustc_middle -> middle,
+ rustc_mir_dataflow -> dataflow,
+ rustc_mir_transform -> transform,
+ rustc_serialize -> serialize,
+ rustc_trait_selection -> trait_selection,
+}
diff --git a/compiler/rustc_span/Cargo.toml b/compiler/rustc_span/Cargo.toml
new file mode 100644
index 000000000..7227b193f
--- /dev/null
+++ b/compiler/rustc_span/Cargo.toml
@@ -0,0 +1,21 @@
+[package]
+name = "rustc_span"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_index = { path = "../rustc_index" }
+rustc_arena = { path = "../rustc_arena" }
+scoped-tls = "1.0"
+unicode-width = "0.1.4"
+cfg-if = "0.1.2"
+tracing = "0.1"
+sha1 = { package = "sha-1", version = "0.10.0" }
+sha2 = "0.10.1"
+md5 = { package = "md-5", version = "0.10.0" }
diff --git a/compiler/rustc_span/src/analyze_source_file.rs b/compiler/rustc_span/src/analyze_source_file.rs
new file mode 100644
index 000000000..5987fb2a1
--- /dev/null
+++ b/compiler/rustc_span/src/analyze_source_file.rs
@@ -0,0 +1,274 @@
+use super::*;
+use unicode_width::UnicodeWidthChar;
+
+#[cfg(test)]
+mod tests;
+
+/// Finds all newlines, multi-byte characters, and non-narrow characters in a
+/// SourceFile.
+///
+/// This function will use an SSE2 enhanced implementation if hardware support
+/// is detected at runtime.
+pub fn analyze_source_file(
+ src: &str,
+ source_file_start_pos: BytePos,
+) -> (Vec<BytePos>, Vec<MultiByteChar>, Vec<NonNarrowChar>) {
+ let mut lines = vec![source_file_start_pos];
+ let mut multi_byte_chars = vec![];
+ let mut non_narrow_chars = vec![];
+
+ // Calls the right implementation, depending on hardware support available.
+ analyze_source_file_dispatch(
+ src,
+ source_file_start_pos,
+ &mut lines,
+ &mut multi_byte_chars,
+ &mut non_narrow_chars,
+ );
+
+ // The code above optimistically registers a new line *after* each \n
+ // it encounters. If that point is already outside the source_file, remove
+ // it again.
+ if let Some(&last_line_start) = lines.last() {
+ let source_file_end = source_file_start_pos + BytePos::from_usize(src.len());
+ assert!(source_file_end >= last_line_start);
+ if last_line_start == source_file_end {
+ lines.pop();
+ }
+ }
+
+ (lines, multi_byte_chars, non_narrow_chars)
+}
+
+cfg_if::cfg_if! {
+ if #[cfg(all(any(target_arch = "x86", target_arch = "x86_64")))] {
+ fn analyze_source_file_dispatch(src: &str,
+ source_file_start_pos: BytePos,
+ lines: &mut Vec<BytePos>,
+ multi_byte_chars: &mut Vec<MultiByteChar>,
+ non_narrow_chars: &mut Vec<NonNarrowChar>) {
+ if is_x86_feature_detected!("sse2") {
+ unsafe {
+ analyze_source_file_sse2(src,
+ source_file_start_pos,
+ lines,
+ multi_byte_chars,
+ non_narrow_chars);
+ }
+ } else {
+ analyze_source_file_generic(src,
+ src.len(),
+ source_file_start_pos,
+ lines,
+ multi_byte_chars,
+ non_narrow_chars);
+
+ }
+ }
+
+ /// Checks 16 byte chunks of text at a time. If the chunk contains
+ /// something other than printable ASCII characters and newlines, the
+ /// function falls back to the generic implementation. Otherwise it uses
+ /// SSE2 intrinsics to quickly find all newlines.
+ #[target_feature(enable = "sse2")]
+ unsafe fn analyze_source_file_sse2(src: &str,
+ output_offset: BytePos,
+ lines: &mut Vec<BytePos>,
+ multi_byte_chars: &mut Vec<MultiByteChar>,
+ non_narrow_chars: &mut Vec<NonNarrowChar>) {
+ #[cfg(target_arch = "x86")]
+ use std::arch::x86::*;
+ #[cfg(target_arch = "x86_64")]
+ use std::arch::x86_64::*;
+
+ const CHUNK_SIZE: usize = 16;
+
+ let src_bytes = src.as_bytes();
+
+ let chunk_count = src.len() / CHUNK_SIZE;
+
+ // This variable keeps track of where we should start decoding a
+ // chunk. If a multi-byte character spans across chunk boundaries,
+ // we need to skip that part in the next chunk because we already
+ // handled it.
+ let mut intra_chunk_offset = 0;
+
+ for chunk_index in 0 .. chunk_count {
+ let ptr = src_bytes.as_ptr() as *const __m128i;
+ // We don't know if the pointer is aligned to 16 bytes, so we
+ // use `loadu`, which supports unaligned loading.
+ let chunk = _mm_loadu_si128(ptr.add(chunk_index));
+
+ // For character in the chunk, see if its byte value is < 0, which
+ // indicates that it's part of a UTF-8 char.
+ let multibyte_test = _mm_cmplt_epi8(chunk, _mm_set1_epi8(0));
+ // Create a bit mask from the comparison results.
+ let multibyte_mask = _mm_movemask_epi8(multibyte_test);
+
+ // If the bit mask is all zero, we only have ASCII chars here:
+ if multibyte_mask == 0 {
+ assert!(intra_chunk_offset == 0);
+
+ // Check if there are any control characters in the chunk. All
+ // control characters that we can encounter at this point have a
+ // byte value less than 32 or ...
+ let control_char_test0 = _mm_cmplt_epi8(chunk, _mm_set1_epi8(32));
+ let control_char_mask0 = _mm_movemask_epi8(control_char_test0);
+
+ // ... it's the ASCII 'DEL' character with a value of 127.
+ let control_char_test1 = _mm_cmpeq_epi8(chunk, _mm_set1_epi8(127));
+ let control_char_mask1 = _mm_movemask_epi8(control_char_test1);
+
+ let control_char_mask = control_char_mask0 | control_char_mask1;
+
+ if control_char_mask != 0 {
+ // Check for newlines in the chunk
+ let newlines_test = _mm_cmpeq_epi8(chunk, _mm_set1_epi8(b'\n' as i8));
+ let newlines_mask = _mm_movemask_epi8(newlines_test);
+
+ if control_char_mask == newlines_mask {
+ // All control characters are newlines, record them
+ let mut newlines_mask = 0xFFFF0000 | newlines_mask as u32;
+ let output_offset = output_offset +
+ BytePos::from_usize(chunk_index * CHUNK_SIZE + 1);
+
+ loop {
+ let index = newlines_mask.trailing_zeros();
+
+ if index >= CHUNK_SIZE as u32 {
+ // We have arrived at the end of the chunk.
+ break
+ }
+
+ lines.push(BytePos(index) + output_offset);
+
+ // Clear the bit, so we can find the next one.
+ newlines_mask &= (!1) << index;
+ }
+
+ // We are done for this chunk. All control characters were
+ // newlines and we took care of those.
+ continue
+ } else {
+ // Some of the control characters are not newlines,
+ // fall through to the slow path below.
+ }
+ } else {
+ // No control characters, nothing to record for this chunk
+ continue
+ }
+ }
+
+ // The slow path.
+ // There are control chars in here, fallback to generic decoding.
+ let scan_start = chunk_index * CHUNK_SIZE + intra_chunk_offset;
+ intra_chunk_offset = analyze_source_file_generic(
+ &src[scan_start .. ],
+ CHUNK_SIZE - intra_chunk_offset,
+ BytePos::from_usize(scan_start) + output_offset,
+ lines,
+ multi_byte_chars,
+ non_narrow_chars
+ );
+ }
+
+ // There might still be a tail left to analyze
+ let tail_start = chunk_count * CHUNK_SIZE + intra_chunk_offset;
+ if tail_start < src.len() {
+ analyze_source_file_generic(&src[tail_start as usize ..],
+ src.len() - tail_start,
+ output_offset + BytePos::from_usize(tail_start),
+ lines,
+ multi_byte_chars,
+ non_narrow_chars);
+ }
+ }
+ } else {
+
+ // The target (or compiler version) does not support SSE2 ...
+ fn analyze_source_file_dispatch(src: &str,
+ source_file_start_pos: BytePos,
+ lines: &mut Vec<BytePos>,
+ multi_byte_chars: &mut Vec<MultiByteChar>,
+ non_narrow_chars: &mut Vec<NonNarrowChar>) {
+ analyze_source_file_generic(src,
+ src.len(),
+ source_file_start_pos,
+ lines,
+ multi_byte_chars,
+ non_narrow_chars);
+ }
+ }
+}
+
+// `scan_len` determines the number of bytes in `src` to scan. Note that the
+// function can read past `scan_len` if a multi-byte character start within the
+// range but extends past it. The overflow is returned by the function.
+fn analyze_source_file_generic(
+ src: &str,
+ scan_len: usize,
+ output_offset: BytePos,
+ lines: &mut Vec<BytePos>,
+ multi_byte_chars: &mut Vec<MultiByteChar>,
+ non_narrow_chars: &mut Vec<NonNarrowChar>,
+) -> usize {
+ assert!(src.len() >= scan_len);
+ let mut i = 0;
+ let src_bytes = src.as_bytes();
+
+ while i < scan_len {
+ let byte = unsafe {
+ // We verified that i < scan_len <= src.len()
+ *src_bytes.get_unchecked(i as usize)
+ };
+
+ // How much to advance in order to get to the next UTF-8 char in the
+ // string.
+ let mut char_len = 1;
+
+ if byte < 32 {
+ // This is an ASCII control character, it could be one of the cases
+ // that are interesting to us.
+
+ let pos = BytePos::from_usize(i) + output_offset;
+
+ match byte {
+ b'\n' => {
+ lines.push(pos + BytePos(1));
+ }
+ b'\t' => {
+ non_narrow_chars.push(NonNarrowChar::Tab(pos));
+ }
+ _ => {
+ non_narrow_chars.push(NonNarrowChar::ZeroWidth(pos));
+ }
+ }
+ } else if byte >= 127 {
+ // The slow path:
+ // This is either ASCII control character "DEL" or the beginning of
+ // a multibyte char. Just decode to `char`.
+ let c = (&src[i..]).chars().next().unwrap();
+ char_len = c.len_utf8();
+
+ let pos = BytePos::from_usize(i) + output_offset;
+
+ if char_len > 1 {
+ assert!((2..=4).contains(&char_len));
+ let mbc = MultiByteChar { pos, bytes: char_len as u8 };
+ multi_byte_chars.push(mbc);
+ }
+
+ // Assume control characters are zero width.
+ // FIXME: How can we decide between `width` and `width_cjk`?
+ let char_width = UnicodeWidthChar::width(c).unwrap_or(0);
+
+ if char_width != 1 {
+ non_narrow_chars.push(NonNarrowChar::new(pos, char_width));
+ }
+ }
+
+ i += char_len;
+ }
+
+ i - scan_len
+}
diff --git a/compiler/rustc_span/src/analyze_source_file/tests.rs b/compiler/rustc_span/src/analyze_source_file/tests.rs
new file mode 100644
index 000000000..66aefc9a7
--- /dev/null
+++ b/compiler/rustc_span/src/analyze_source_file/tests.rs
@@ -0,0 +1,142 @@
+use super::*;
+
+macro_rules! test {
+ (case: $test_name:ident,
+ text: $text:expr,
+ source_file_start_pos: $source_file_start_pos:expr,
+ lines: $lines:expr,
+ multi_byte_chars: $multi_byte_chars:expr,
+ non_narrow_chars: $non_narrow_chars:expr,) => {
+ #[test]
+ fn $test_name() {
+ let (lines, multi_byte_chars, non_narrow_chars) =
+ analyze_source_file($text, BytePos($source_file_start_pos));
+
+ let expected_lines: Vec<BytePos> = $lines.into_iter().map(BytePos).collect();
+
+ assert_eq!(lines, expected_lines);
+
+ let expected_mbcs: Vec<MultiByteChar> = $multi_byte_chars
+ .into_iter()
+ .map(|(pos, bytes)| MultiByteChar { pos: BytePos(pos), bytes })
+ .collect();
+
+ assert_eq!(multi_byte_chars, expected_mbcs);
+
+ let expected_nncs: Vec<NonNarrowChar> = $non_narrow_chars
+ .into_iter()
+ .map(|(pos, width)| NonNarrowChar::new(BytePos(pos), width))
+ .collect();
+
+ assert_eq!(non_narrow_chars, expected_nncs);
+ }
+ };
+}
+
+test!(
+ case: empty_text,
+ text: "",
+ source_file_start_pos: 0,
+ lines: vec![],
+ multi_byte_chars: vec![],
+ non_narrow_chars: vec![],
+);
+
+test!(
+ case: newlines_short,
+ text: "a\nc",
+ source_file_start_pos: 0,
+ lines: vec![0, 2],
+ multi_byte_chars: vec![],
+ non_narrow_chars: vec![],
+);
+
+test!(
+ case: newlines_long,
+ text: "012345678\nabcdef012345678\na",
+ source_file_start_pos: 0,
+ lines: vec![0, 10, 26],
+ multi_byte_chars: vec![],
+ non_narrow_chars: vec![],
+);
+
+test!(
+ case: newline_and_multi_byte_char_in_same_chunk,
+ text: "01234β789\nbcdef0123456789abcdef",
+ source_file_start_pos: 0,
+ lines: vec![0, 11],
+ multi_byte_chars: vec![(5, 2)],
+ non_narrow_chars: vec![],
+);
+
+test!(
+ case: newline_and_control_char_in_same_chunk,
+ text: "01234\u{07}6789\nbcdef0123456789abcdef",
+ source_file_start_pos: 0,
+ lines: vec![0, 11],
+ multi_byte_chars: vec![],
+ non_narrow_chars: vec![(5, 0)],
+);
+
+test!(
+ case: multi_byte_char_short,
+ text: "aβc",
+ source_file_start_pos: 0,
+ lines: vec![0],
+ multi_byte_chars: vec![(1, 2)],
+ non_narrow_chars: vec![],
+);
+
+test!(
+ case: multi_byte_char_long,
+ text: "0123456789abcΔf012345β",
+ source_file_start_pos: 0,
+ lines: vec![0],
+ multi_byte_chars: vec![(13, 2), (22, 2)],
+ non_narrow_chars: vec![],
+);
+
+test!(
+ case: multi_byte_char_across_chunk_boundary,
+ text: "0123456789abcdeΔ123456789abcdef01234",
+ source_file_start_pos: 0,
+ lines: vec![0],
+ multi_byte_chars: vec![(15, 2)],
+ non_narrow_chars: vec![],
+);
+
+test!(
+ case: multi_byte_char_across_chunk_boundary_tail,
+ text: "0123456789abcdeΔ....",
+ source_file_start_pos: 0,
+ lines: vec![0],
+ multi_byte_chars: vec![(15, 2)],
+ non_narrow_chars: vec![],
+);
+
+test!(
+ case: non_narrow_short,
+ text: "0\t2",
+ source_file_start_pos: 0,
+ lines: vec![0],
+ multi_byte_chars: vec![],
+ non_narrow_chars: vec![(1, 4)],
+);
+
+test!(
+ case: non_narrow_long,
+ text: "01\t3456789abcdef01234567\u{07}9",
+ source_file_start_pos: 0,
+ lines: vec![0],
+ multi_byte_chars: vec![],
+ non_narrow_chars: vec![(2, 4), (24, 0)],
+);
+
+test!(
+ case: output_offset_all,
+ text: "01\t345\n789abcΔf01234567\u{07}9\nbcΔf",
+ source_file_start_pos: 1000,
+ lines: vec![0 + 1000, 7 + 1000, 27 + 1000],
+ multi_byte_chars: vec![(13 + 1000, 2), (29 + 1000, 2)],
+ non_narrow_chars: vec![(2 + 1000, 4), (24 + 1000, 0)],
+);
diff --git a/compiler/rustc_span/src/caching_source_map_view.rs b/compiler/rustc_span/src/caching_source_map_view.rs
new file mode 100644
index 000000000..fdabf404a
--- /dev/null
+++ b/compiler/rustc_span/src/caching_source_map_view.rs
@@ -0,0 +1,293 @@
+use crate::source_map::SourceMap;
+use crate::{BytePos, SourceFile, SpanData};
+use rustc_data_structures::sync::Lrc;
+use std::ops::Range;
+
+#[derive(Clone)]
+struct CacheEntry {
+ time_stamp: usize,
+ line_number: usize,
+ // The line's byte position range in the `SourceMap`. This range will fail to contain a valid
+ // position in certain edge cases. Spans often start/end one past something, and when that
+ // something is the last character of a file (this can happen when a file doesn't end in a
+ // newline, for example), we'd still like for the position to be considered within the last
+ // line. However, it isn't according to the exclusive upper bound of this range. We cannot
+ // change the upper bound to be inclusive, because for most lines, the upper bound is the same
+ // as the lower bound of the next line, so there would be an ambiguity.
+ //
+ // Since the containment aspect of this range is only used to see whether or not the cache
+ // entry contains a position, the only ramification of the above is that we will get cache
+ // misses for these rare positions. A line lookup for the position via `SourceMap::lookup_line`
+ // after a cache miss will produce the last line number, as desired.
+ line: Range<BytePos>,
+ file: Lrc<SourceFile>,
+ file_index: usize,
+}
+
+impl CacheEntry {
+ #[inline]
+ fn update(
+ &mut self,
+ new_file_and_idx: Option<(Lrc<SourceFile>, usize)>,
+ pos: BytePos,
+ time_stamp: usize,
+ ) {
+ if let Some((file, file_idx)) = new_file_and_idx {
+ self.file = file;
+ self.file_index = file_idx;
+ }
+
+ let line_index = self.file.lookup_line(pos).unwrap();
+ let line_bounds = self.file.line_bounds(line_index);
+ self.line_number = line_index + 1;
+ self.line = line_bounds;
+ self.touch(time_stamp);
+ }
+
+ #[inline]
+ fn touch(&mut self, time_stamp: usize) {
+ self.time_stamp = time_stamp;
+ }
+}
+
+#[derive(Clone)]
+pub struct CachingSourceMapView<'sm> {
+ source_map: &'sm SourceMap,
+ line_cache: [CacheEntry; 3],
+ time_stamp: usize,
+}
+
+impl<'sm> CachingSourceMapView<'sm> {
+ pub fn new(source_map: &'sm SourceMap) -> CachingSourceMapView<'sm> {
+ let files = source_map.files();
+ let first_file = files[0].clone();
+ let entry = CacheEntry {
+ time_stamp: 0,
+ line_number: 0,
+ line: BytePos(0)..BytePos(0),
+ file: first_file,
+ file_index: 0,
+ };
+
+ CachingSourceMapView {
+ source_map,
+ line_cache: [entry.clone(), entry.clone(), entry],
+ time_stamp: 0,
+ }
+ }
+
+ pub fn byte_pos_to_line_and_col(
+ &mut self,
+ pos: BytePos,
+ ) -> Option<(Lrc<SourceFile>, usize, BytePos)> {
+ self.time_stamp += 1;
+
+ // Check if the position is in one of the cached lines
+ let cache_idx = self.cache_entry_index(pos);
+ if cache_idx != -1 {
+ let cache_entry = &mut self.line_cache[cache_idx as usize];
+ cache_entry.touch(self.time_stamp);
+
+ return Some((
+ cache_entry.file.clone(),
+ cache_entry.line_number,
+ pos - cache_entry.line.start,
+ ));
+ }
+
+ // No cache hit ...
+ let oldest = self.oldest_cache_entry_index();
+
+ // If the entry doesn't point to the correct file, get the new file and index.
+ let new_file_and_idx = if !file_contains(&self.line_cache[oldest].file, pos) {
+ Some(self.file_for_position(pos)?)
+ } else {
+ None
+ };
+
+ let cache_entry = &mut self.line_cache[oldest];
+ cache_entry.update(new_file_and_idx, pos, self.time_stamp);
+
+ Some((cache_entry.file.clone(), cache_entry.line_number, pos - cache_entry.line.start))
+ }
+
+ pub fn span_data_to_lines_and_cols(
+ &mut self,
+ span_data: &SpanData,
+ ) -> Option<(Lrc<SourceFile>, usize, BytePos, usize, BytePos)> {
+ self.time_stamp += 1;
+
+ // Check if lo and hi are in the cached lines.
+ let lo_cache_idx = self.cache_entry_index(span_data.lo);
+ let hi_cache_idx = self.cache_entry_index(span_data.hi);
+
+ if lo_cache_idx != -1 && hi_cache_idx != -1 {
+ // Cache hit for span lo and hi. Check if they belong to the same file.
+ let result = {
+ let lo = &self.line_cache[lo_cache_idx as usize];
+ let hi = &self.line_cache[hi_cache_idx as usize];
+
+ if lo.file_index != hi.file_index {
+ return None;
+ }
+
+ (
+ lo.file.clone(),
+ lo.line_number,
+ span_data.lo - lo.line.start,
+ hi.line_number,
+ span_data.hi - hi.line.start,
+ )
+ };
+
+ self.line_cache[lo_cache_idx as usize].touch(self.time_stamp);
+ self.line_cache[hi_cache_idx as usize].touch(self.time_stamp);
+
+ return Some(result);
+ }
+
+ // No cache hit or cache hit for only one of span lo and hi.
+ let oldest = if lo_cache_idx != -1 || hi_cache_idx != -1 {
+ let avoid_idx = if lo_cache_idx != -1 { lo_cache_idx } else { hi_cache_idx };
+ self.oldest_cache_entry_index_avoid(avoid_idx as usize)
+ } else {
+ self.oldest_cache_entry_index()
+ };
+
+ // If the entry doesn't point to the correct file, get the new file and index.
+ // Return early if the file containing beginning of span doesn't contain end of span.
+ let new_file_and_idx = if !file_contains(&self.line_cache[oldest].file, span_data.lo) {
+ let new_file_and_idx = self.file_for_position(span_data.lo)?;
+ if !file_contains(&new_file_and_idx.0, span_data.hi) {
+ return None;
+ }
+
+ Some(new_file_and_idx)
+ } else {
+ let file = &self.line_cache[oldest].file;
+ if !file_contains(&file, span_data.hi) {
+ return None;
+ }
+
+ None
+ };
+
+ // Update the cache entries.
+ let (lo_idx, hi_idx) = match (lo_cache_idx, hi_cache_idx) {
+ // Oldest cache entry is for span_data.lo line.
+ (-1, -1) => {
+ let lo = &mut self.line_cache[oldest];
+ lo.update(new_file_and_idx, span_data.lo, self.time_stamp);
+
+ if !lo.line.contains(&span_data.hi) {
+ let new_file_and_idx = Some((lo.file.clone(), lo.file_index));
+ let next_oldest = self.oldest_cache_entry_index_avoid(oldest);
+ let hi = &mut self.line_cache[next_oldest];
+ hi.update(new_file_and_idx, span_data.hi, self.time_stamp);
+ (oldest, next_oldest)
+ } else {
+ (oldest, oldest)
+ }
+ }
+ // Oldest cache entry is for span_data.lo line.
+ (-1, _) => {
+ let lo = &mut self.line_cache[oldest];
+ lo.update(new_file_and_idx, span_data.lo, self.time_stamp);
+ let hi = &mut self.line_cache[hi_cache_idx as usize];
+ hi.touch(self.time_stamp);
+ (oldest, hi_cache_idx as usize)
+ }
+ // Oldest cache entry is for span_data.hi line.
+ (_, -1) => {
+ let hi = &mut self.line_cache[oldest];
+ hi.update(new_file_and_idx, span_data.hi, self.time_stamp);
+ let lo = &mut self.line_cache[lo_cache_idx as usize];
+ lo.touch(self.time_stamp);
+ (lo_cache_idx as usize, oldest)
+ }
+ _ => {
+ panic!();
+ }
+ };
+
+ let lo = &self.line_cache[lo_idx];
+ let hi = &self.line_cache[hi_idx];
+
+ // Span lo and hi may equal line end when last line doesn't
+ // end in newline, hence the inclusive upper bounds below.
+ assert!(span_data.lo >= lo.line.start);
+ assert!(span_data.lo <= lo.line.end);
+ assert!(span_data.hi >= hi.line.start);
+ assert!(span_data.hi <= hi.line.end);
+ assert!(lo.file.contains(span_data.lo));
+ assert!(lo.file.contains(span_data.hi));
+ assert_eq!(lo.file_index, hi.file_index);
+
+ Some((
+ lo.file.clone(),
+ lo.line_number,
+ span_data.lo - lo.line.start,
+ hi.line_number,
+ span_data.hi - hi.line.start,
+ ))
+ }
+
+ fn cache_entry_index(&self, pos: BytePos) -> isize {
+ for (idx, cache_entry) in self.line_cache.iter().enumerate() {
+ if cache_entry.line.contains(&pos) {
+ return idx as isize;
+ }
+ }
+
+ -1
+ }
+
+ fn oldest_cache_entry_index(&self) -> usize {
+ let mut oldest = 0;
+
+ for idx in 1..self.line_cache.len() {
+ if self.line_cache[idx].time_stamp < self.line_cache[oldest].time_stamp {
+ oldest = idx;
+ }
+ }
+
+ oldest
+ }
+
+ fn oldest_cache_entry_index_avoid(&self, avoid_idx: usize) -> usize {
+ let mut oldest = if avoid_idx != 0 { 0 } else { 1 };
+
+ for idx in 0..self.line_cache.len() {
+ if idx != avoid_idx
+ && self.line_cache[idx].time_stamp < self.line_cache[oldest].time_stamp
+ {
+ oldest = idx;
+ }
+ }
+
+ oldest
+ }
+
+ fn file_for_position(&self, pos: BytePos) -> Option<(Lrc<SourceFile>, usize)> {
+ if !self.source_map.files().is_empty() {
+ let file_idx = self.source_map.lookup_source_file_idx(pos);
+ let file = &self.source_map.files()[file_idx];
+
+ if file_contains(file, pos) {
+ return Some((file.clone(), file_idx));
+ }
+ }
+
+ None
+ }
+}
+
+#[inline]
+fn file_contains(file: &SourceFile, pos: BytePos) -> bool {
+ // `SourceMap::lookup_source_file_idx` and `SourceFile::contains` both consider the position
+ // one past the end of a file to belong to it. Normally, that's what we want. But for the
+ // purposes of converting a byte position to a line and column number, we can't come up with a
+ // line and column number if the file is empty, because an empty file doesn't contain any
+ // lines. So for our purposes, we don't consider empty files to contain any byte position.
+ file.contains(pos) && !file.is_empty()
+}
diff --git a/compiler/rustc_span/src/def_id.rs b/compiler/rustc_span/src/def_id.rs
new file mode 100644
index 000000000..a1533fe46
--- /dev/null
+++ b/compiler/rustc_span/src/def_id.rs
@@ -0,0 +1,444 @@
+use crate::HashStableContext;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use rustc_data_structures::AtomicRef;
+use rustc_index::vec::Idx;
+use rustc_macros::HashStable_Generic;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::borrow::Borrow;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+
+rustc_index::newtype_index! {
+ pub struct CrateNum {
+ ENCODABLE = custom
+ DEBUG_FORMAT = "crate{}"
+ }
+}
+
+/// Item definitions in the currently-compiled crate would have the `CrateNum`
+/// `LOCAL_CRATE` in their `DefId`.
+pub const LOCAL_CRATE: CrateNum = CrateNum::from_u32(0);
+
+impl CrateNum {
+ #[inline]
+ pub fn new(x: usize) -> CrateNum {
+ CrateNum::from_usize(x)
+ }
+
+ #[inline]
+ pub fn as_def_id(self) -> DefId {
+ DefId { krate: self, index: CRATE_DEF_INDEX }
+ }
+}
+
+impl fmt::Display for CrateNum {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.private, f)
+ }
+}
+
+/// As a local identifier, a `CrateNum` is only meaningful within its context, e.g. within a tcx.
+/// Therefore, make sure to include the context when encode a `CrateNum`.
+impl<E: Encoder> Encodable<E> for CrateNum {
+ default fn encode(&self, s: &mut E) {
+ s.emit_u32(self.as_u32());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for CrateNum {
+ default fn decode(d: &mut D) -> CrateNum {
+ CrateNum::from_u32(d.read_u32())
+ }
+}
+
+/// A `DefPathHash` is a fixed-size representation of a `DefPath` that is
+/// stable across crate and compilation session boundaries. It consists of two
+/// separate 64-bit hashes. The first uniquely identifies the crate this
+/// `DefPathHash` originates from (see [StableCrateId]), and the second
+/// uniquely identifies the corresponding `DefPath` within that crate. Together
+/// they form a unique identifier within an entire crate graph.
+///
+/// There is a very small chance of hash collisions, which would mean that two
+/// different `DefPath`s map to the same `DefPathHash`. Proceeding compilation
+/// with such a hash collision would very probably lead to an ICE, and in the
+/// worst case lead to a silent mis-compilation. The compiler therefore actively
+/// and exhaustively checks for such hash collisions and aborts compilation if
+/// it finds one.
+///
+/// `DefPathHash` uses 64-bit hashes for both the crate-id part and the
+/// crate-internal part, even though it is likely that there are many more
+/// `LocalDefId`s in a single crate than there are individual crates in a crate
+/// graph. Since we use the same number of bits in both cases, the collision
+/// probability for the crate-local part will be quite a bit higher (though
+/// still very small).
+///
+/// This imbalance is not by accident: A hash collision in the
+/// crate-local part of a `DefPathHash` will be detected and reported while
+/// compiling the crate in question. Such a collision does not depend on
+/// outside factors and can be easily fixed by the crate maintainer (e.g. by
+/// renaming the item in question or by bumping the crate version in a harmless
+/// way).
+///
+/// A collision between crate-id hashes on the other hand is harder to fix
+/// because it depends on the set of crates in the entire crate graph of a
+/// compilation session. Again, using the same crate with a different version
+/// number would fix the issue with a high probability -- but that might be
+/// easier said then done if the crates in questions are dependencies of
+/// third-party crates.
+///
+/// That being said, given a high quality hash function, the collision
+/// probabilities in question are very small. For example, for a big crate like
+/// `rustc_middle` (with ~50000 `LocalDefId`s as of the time of writing) there
+/// is a probability of roughly 1 in 14,750,000,000 of a crate-internal
+/// collision occurring. For a big crate graph with 1000 crates in it, there is
+/// a probability of 1 in 36,890,000,000,000 of a `StableCrateId` collision.
+#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Debug)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub struct DefPathHash(pub Fingerprint);
+
+impl DefPathHash {
+ /// Returns the [StableCrateId] identifying the crate this [DefPathHash]
+ /// originates from.
+ #[inline]
+ pub fn stable_crate_id(&self) -> StableCrateId {
+ StableCrateId(self.0.as_value().0)
+ }
+
+ /// Returns the crate-local part of the [DefPathHash].
+ ///
+ /// Used for tests.
+ #[inline]
+ pub fn local_hash(&self) -> u64 {
+ self.0.as_value().1
+ }
+
+ /// Builds a new [DefPathHash] with the given [StableCrateId] and
+ /// `local_hash`, where `local_hash` must be unique within its crate.
+ pub fn new(stable_crate_id: StableCrateId, local_hash: u64) -> DefPathHash {
+ DefPathHash(Fingerprint::new(stable_crate_id.0, local_hash))
+ }
+}
+
+impl Borrow<Fingerprint> for DefPathHash {
+ #[inline]
+ fn borrow(&self) -> &Fingerprint {
+ &self.0
+ }
+}
+
+/// A [`StableCrateId`] is a 64-bit hash of a crate name, together with all
+/// `-Cmetadata` arguments, and some other data. It is to [`CrateNum`] what [`DefPathHash`] is to
+/// [`DefId`]. It is stable across compilation sessions.
+///
+/// Since the ID is a hash value, there is a small chance that two crates
+/// end up with the same [`StableCrateId`]. The compiler will check for such
+/// collisions when loading crates and abort compilation in order to avoid
+/// further trouble.
+///
+/// For more information on the possibility of hash collisions in rustc,
+/// see the discussion in [`DefId`].
+#[derive(Copy, Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Debug)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub struct StableCrateId(pub(crate) u64);
+
+impl StableCrateId {
+ pub fn to_u64(self) -> u64 {
+ self.0
+ }
+
+ /// Computes the stable ID for a crate with the given name and
+ /// `-Cmetadata` arguments.
+ pub fn new(crate_name: &str, is_exe: bool, mut metadata: Vec<String>) -> StableCrateId {
+ let mut hasher = StableHasher::new();
+ crate_name.hash(&mut hasher);
+
+ // We don't want the stable crate ID to depend on the order of
+ // -C metadata arguments, so sort them:
+ metadata.sort();
+ // Every distinct -C metadata value is only incorporated once:
+ metadata.dedup();
+
+ hasher.write(b"metadata");
+ for s in &metadata {
+ // Also incorporate the length of a metadata string, so that we generate
+ // different values for `-Cmetadata=ab -Cmetadata=c` and
+ // `-Cmetadata=a -Cmetadata=bc`
+ hasher.write_usize(s.len());
+ hasher.write(s.as_bytes());
+ }
+
+ // Also incorporate crate type, so that we don't get symbol conflicts when
+ // linking against a library of the same name, if this is an executable.
+ hasher.write(if is_exe { b"exe" } else { b"lib" });
+
+ // Also incorporate the rustc version. Otherwise, with -Zsymbol-mangling-version=v0
+ // and no -Cmetadata, symbols from the same crate compiled with different versions of
+ // rustc are named the same.
+ //
+ // RUSTC_FORCE_RUSTC_VERSION is used to inject rustc version information
+ // during testing.
+ if let Some(val) = std::env::var_os("RUSTC_FORCE_RUSTC_VERSION") {
+ hasher.write(val.to_string_lossy().into_owned().as_bytes())
+ } else {
+ hasher.write(option_env!("CFG_VERSION").unwrap_or("unknown version").as_bytes());
+ }
+
+ StableCrateId(hasher.finish())
+ }
+}
+
+rustc_index::newtype_index! {
+ /// A DefIndex is an index into the hir-map for a crate, identifying a
+ /// particular definition. It should really be considered an interned
+ /// shorthand for a particular DefPath.
+ pub struct DefIndex {
+ ENCODABLE = custom // (only encodable in metadata)
+
+ DEBUG_FORMAT = "DefIndex({})",
+ /// The crate root is always assigned index 0 by the AST Map code,
+ /// thanks to `NodeCollector::new`.
+ const CRATE_DEF_INDEX = 0,
+ }
+}
+
+impl<E: Encoder> Encodable<E> for DefIndex {
+ default fn encode(&self, _: &mut E) {
+ panic!("cannot encode `DefIndex` with `{}`", std::any::type_name::<E>());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for DefIndex {
+ default fn decode(_: &mut D) -> DefIndex {
+ panic!("cannot decode `DefIndex` with `{}`", std::any::type_name::<D>());
+ }
+}
+
+/// A `DefId` identifies a particular *definition*, by combining a crate
+/// index and a def index.
+///
+/// You can create a `DefId` from a `LocalDefId` using `local_def_id.to_def_id()`.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Copy)]
+// On below-64 bit systems we can simply use the derived `Hash` impl
+#[cfg_attr(not(target_pointer_width = "64"), derive(Hash))]
+#[repr(C)]
+#[rustc_pass_by_value]
+// We guarantee field order. Note that the order is essential here, see below why.
+pub struct DefId {
+ // cfg-ing the order of fields so that the `DefIndex` which is high entropy always ends up in
+ // the lower bits no matter the endianness. This allows the compiler to turn that `Hash` impl
+ // into a direct call to 'u64::hash(_)`.
+ #[cfg(not(all(target_pointer_width = "64", target_endian = "big")))]
+ pub index: DefIndex,
+ pub krate: CrateNum,
+ #[cfg(all(target_pointer_width = "64", target_endian = "big"))]
+ pub index: DefIndex,
+}
+
+// On 64-bit systems, we can hash the whole `DefId` as one `u64` instead of two `u32`s. This
+// improves performance without impairing `FxHash` quality. So the below code gets compiled to a
+// noop on little endian systems because the memory layout of `DefId` is as follows:
+//
+// ```
+// +-1--------------31-+-32-------------63-+
+// ! index ! krate !
+// +-------------------+-------------------+
+// ```
+//
+// The order here has direct impact on `FxHash` quality because we have far more `DefIndex` per
+// crate than we have `Crate`s within one compilation. Or in other words, this arrangement puts
+// more entropy in the low bits than the high bits. The reason this matters is that `FxHash`, which
+// is used throughout rustc, has problems distributing the entropy from the high bits, so reversing
+// the order would lead to a large number of collisions and thus far worse performance.
+//
+// On 64-bit big-endian systems, this compiles to a 64-bit rotation by 32 bits, which is still
+// faster than another `FxHash` round.
+#[cfg(target_pointer_width = "64")]
+impl Hash for DefId {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ (((self.krate.as_u32() as u64) << 32) | (self.index.as_u32() as u64)).hash(h)
+ }
+}
+
+impl DefId {
+ /// Makes a local `DefId` from the given `DefIndex`.
+ #[inline]
+ pub fn local(index: DefIndex) -> DefId {
+ DefId { krate: LOCAL_CRATE, index }
+ }
+
+ /// Returns whether the item is defined in the crate currently being compiled.
+ #[inline]
+ pub fn is_local(self) -> bool {
+ self.krate == LOCAL_CRATE
+ }
+
+ #[inline]
+ pub fn as_local(self) -> Option<LocalDefId> {
+ if self.is_local() { Some(LocalDefId { local_def_index: self.index }) } else { None }
+ }
+
+ #[inline]
+ #[track_caller]
+ pub fn expect_local(self) -> LocalDefId {
+ // NOTE: `match` below is required to apply `#[track_caller]`,
+ // i.e. don't use closures.
+ match self.as_local() {
+ Some(local_def_id) => local_def_id,
+ None => panic!("DefId::expect_local: `{:?}` isn't local", self),
+ }
+ }
+
+ #[inline]
+ pub fn is_crate_root(self) -> bool {
+ self.index == CRATE_DEF_INDEX
+ }
+
+ #[inline]
+ pub fn as_crate_root(self) -> Option<CrateNum> {
+ if self.is_crate_root() { Some(self.krate) } else { None }
+ }
+
+ #[inline]
+ pub fn is_top_level_module(self) -> bool {
+ self.is_local() && self.is_crate_root()
+ }
+}
+
+impl<E: Encoder> Encodable<E> for DefId {
+ default fn encode(&self, s: &mut E) {
+ self.krate.encode(s);
+ self.index.encode(s);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for DefId {
+ default fn decode(d: &mut D) -> DefId {
+ DefId { krate: Decodable::decode(d), index: Decodable::decode(d) }
+ }
+}
+
+pub fn default_def_id_debug(def_id: DefId, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("DefId").field("krate", &def_id.krate).field("index", &def_id.index).finish()
+}
+
+pub static DEF_ID_DEBUG: AtomicRef<fn(DefId, &mut fmt::Formatter<'_>) -> fmt::Result> =
+ AtomicRef::new(&(default_def_id_debug as fn(_, &mut fmt::Formatter<'_>) -> _));
+
+impl fmt::Debug for DefId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (*DEF_ID_DEBUG)(*self, f)
+ }
+}
+
+rustc_data_structures::define_id_collections!(DefIdMap, DefIdSet, DefId);
+
+/// A `LocalDefId` is equivalent to a `DefId` with `krate == LOCAL_CRATE`. Since
+/// we encode this information in the type, we can ensure at compile time that
+/// no `DefId`s from upstream crates get thrown into the mix. There are quite a
+/// few cases where we know that only `DefId`s from the local crate are expected;
+/// a `DefId` from a different crate would signify a bug somewhere. This
+/// is when `LocalDefId` comes in handy.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct LocalDefId {
+ pub local_def_index: DefIndex,
+}
+
+// To ensure correctness of incremental compilation,
+// `LocalDefId` must not implement `Ord` or `PartialOrd`.
+// See https://github.com/rust-lang/rust/issues/90317.
+impl !Ord for LocalDefId {}
+impl !PartialOrd for LocalDefId {}
+
+pub const CRATE_DEF_ID: LocalDefId = LocalDefId { local_def_index: CRATE_DEF_INDEX };
+
+impl Idx for LocalDefId {
+ #[inline]
+ fn new(idx: usize) -> Self {
+ LocalDefId { local_def_index: Idx::new(idx) }
+ }
+ #[inline]
+ fn index(self) -> usize {
+ self.local_def_index.index()
+ }
+}
+
+impl LocalDefId {
+ #[inline]
+ pub fn to_def_id(self) -> DefId {
+ DefId { krate: LOCAL_CRATE, index: self.local_def_index }
+ }
+
+ #[inline]
+ pub fn is_top_level_module(self) -> bool {
+ self == CRATE_DEF_ID
+ }
+}
+
+impl fmt::Debug for LocalDefId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.to_def_id().fmt(f)
+ }
+}
+
+impl<E: Encoder> Encodable<E> for LocalDefId {
+ fn encode(&self, s: &mut E) {
+ self.to_def_id().encode(s);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for LocalDefId {
+ fn decode(d: &mut D) -> LocalDefId {
+ DefId::decode(d).expect_local()
+ }
+}
+
+rustc_data_structures::define_id_collections!(LocalDefIdMap, LocalDefIdSet, LocalDefId);
+
+impl<CTX: HashStableContext> HashStable<CTX> for DefId {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.to_stable_hash_key(hcx).hash_stable(hcx, hasher);
+ }
+}
+
+impl<CTX: HashStableContext> HashStable<CTX> for LocalDefId {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.to_stable_hash_key(hcx).hash_stable(hcx, hasher);
+ }
+}
+
+impl<CTX: HashStableContext> HashStable<CTX> for CrateNum {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.to_stable_hash_key(hcx).hash_stable(hcx, hasher);
+ }
+}
+
+impl<CTX: HashStableContext> ToStableHashKey<CTX> for DefId {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &CTX) -> DefPathHash {
+ hcx.def_path_hash(*self)
+ }
+}
+
+impl<CTX: HashStableContext> ToStableHashKey<CTX> for LocalDefId {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &CTX) -> DefPathHash {
+ hcx.def_path_hash(self.to_def_id())
+ }
+}
+
+impl<CTX: HashStableContext> ToStableHashKey<CTX> for CrateNum {
+ type KeyType = DefPathHash;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &CTX) -> DefPathHash {
+ self.as_def_id().to_stable_hash_key(hcx)
+ }
+}
diff --git a/compiler/rustc_span/src/edition.rs b/compiler/rustc_span/src/edition.rs
new file mode 100644
index 000000000..065d3660e
--- /dev/null
+++ b/compiler/rustc_span/src/edition.rs
@@ -0,0 +1,110 @@
+use crate::symbol::{sym, Symbol};
+use std::fmt;
+use std::str::FromStr;
+
+use rustc_macros::HashStable_Generic;
+
+/// The edition of the compiler. (See [RFC 2052](https://github.com/rust-lang/rfcs/blob/master/text/2052-epochs.md).)
+#[derive(Clone, Copy, Hash, PartialEq, PartialOrd, Debug, Encodable, Decodable, Eq)]
+#[derive(HashStable_Generic)]
+pub enum Edition {
+ // When adding new editions, be sure to do the following:
+ //
+ // - update the `ALL_EDITIONS` const
+ // - update the `EDITION_NAME_LIST` const
+ // - add a `rust_####()` function to the session
+ // - update the enum in Cargo's sources as well
+ //
+ // Editions *must* be kept in order, oldest to newest.
+ /// The 2015 edition
+ Edition2015,
+ /// The 2018 edition
+ Edition2018,
+ /// The 2021 edition
+ Edition2021,
+ /// The 2024 edition
+ Edition2024,
+}
+
+// Must be in order from oldest to newest.
+pub const ALL_EDITIONS: &[Edition] =
+ &[Edition::Edition2015, Edition::Edition2018, Edition::Edition2021, Edition::Edition2024];
+
+pub const EDITION_NAME_LIST: &str = "2015|2018|2021|2024";
+
+pub const DEFAULT_EDITION: Edition = Edition::Edition2015;
+
+pub const LATEST_STABLE_EDITION: Edition = Edition::Edition2021;
+
+impl fmt::Display for Edition {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let s = match *self {
+ Edition::Edition2015 => "2015",
+ Edition::Edition2018 => "2018",
+ Edition::Edition2021 => "2021",
+ Edition::Edition2024 => "2024",
+ };
+ write!(f, "{}", s)
+ }
+}
+
+impl Edition {
+ pub fn lint_name(&self) -> &'static str {
+ match *self {
+ Edition::Edition2015 => "rust_2015_compatibility",
+ Edition::Edition2018 => "rust_2018_compatibility",
+ Edition::Edition2021 => "rust_2021_compatibility",
+ Edition::Edition2024 => "rust_2024_compatibility",
+ }
+ }
+
+ pub fn feature_name(&self) -> Symbol {
+ match *self {
+ Edition::Edition2015 => sym::rust_2015_preview,
+ Edition::Edition2018 => sym::rust_2018_preview,
+ Edition::Edition2021 => sym::rust_2021_preview,
+ Edition::Edition2024 => sym::rust_2024_preview,
+ }
+ }
+
+ pub fn is_stable(&self) -> bool {
+ match *self {
+ Edition::Edition2015 => true,
+ Edition::Edition2018 => true,
+ Edition::Edition2021 => true,
+ Edition::Edition2024 => false,
+ }
+ }
+
+ pub fn rust_2015(&self) -> bool {
+ *self == Edition::Edition2015
+ }
+
+ /// Are we allowed to use features from the Rust 2018 edition?
+ pub fn rust_2018(&self) -> bool {
+ *self >= Edition::Edition2018
+ }
+
+ /// Are we allowed to use features from the Rust 2021 edition?
+ pub fn rust_2021(&self) -> bool {
+ *self >= Edition::Edition2021
+ }
+
+ /// Are we allowed to use features from the Rust 2024 edition?
+ pub fn rust_2024(&self) -> bool {
+ *self >= Edition::Edition2024
+ }
+}
+
+impl FromStr for Edition {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Self, ()> {
+ match s {
+ "2015" => Ok(Edition::Edition2015),
+ "2018" => Ok(Edition::Edition2018),
+ "2021" => Ok(Edition::Edition2021),
+ "2024" => Ok(Edition::Edition2024),
+ _ => Err(()),
+ }
+ }
+}
diff --git a/compiler/rustc_span/src/fatal_error.rs b/compiler/rustc_span/src/fatal_error.rs
new file mode 100644
index 000000000..fa84c486d
--- /dev/null
+++ b/compiler/rustc_span/src/fatal_error.rs
@@ -0,0 +1,26 @@
+/// Used as a return value to signify a fatal error occurred. (It is also
+/// used as the argument to panic at the moment, but that will eventually
+/// not be true.)
+#[derive(Copy, Clone, Debug)]
+#[must_use]
+pub struct FatalError;
+
+pub struct FatalErrorMarker;
+
+// Don't implement Send on FatalError. This makes it impossible to panic!(FatalError).
+// We don't want to invoke the panic handler and print a backtrace for fatal errors.
+impl !Send for FatalError {}
+
+impl FatalError {
+ pub fn raise(self) -> ! {
+ std::panic::resume_unwind(Box::new(FatalErrorMarker))
+ }
+}
+
+impl std::fmt::Display for FatalError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "fatal error")
+ }
+}
+
+impl std::error::Error for FatalError {}
diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs
new file mode 100644
index 000000000..e169d3c7c
--- /dev/null
+++ b/compiler/rustc_span/src/hygiene.rs
@@ -0,0 +1,1528 @@
+//! Machinery for hygienic macros.
+//!
+//! Inspired by Matthew Flatt et al., “Macros That Work Together: Compile-Time Bindings, Partial
+//! Expansion, and Definition Contexts,” *Journal of Functional Programming* 22, no. 2
+//! (March 1, 2012): 181–216, <https://doi.org/10.1017/S0956796812000093>.
+
+// Hygiene data is stored in a global variable and accessed via TLS, which
+// means that accesses are somewhat expensive. (`HygieneData::with`
+// encapsulates a single access.) Therefore, on hot code paths it is worth
+// ensuring that multiple HygieneData accesses are combined into a single
+// `HygieneData::with`.
+//
+// This explains why `HygieneData`, `SyntaxContext` and `ExpnId` have interfaces
+// with a certain amount of redundancy in them. For example,
+// `SyntaxContext::outer_expn_data` combines `SyntaxContext::outer` and
+// `ExpnId::expn_data` so that two `HygieneData` accesses can be performed within
+// a single `HygieneData::with` call.
+//
+// It also explains why many functions appear in `HygieneData` and again in
+// `SyntaxContext` or `ExpnId`. For example, `HygieneData::outer` and
+// `SyntaxContext::outer` do the same thing, but the former is for use within a
+// `HygieneData::with` call while the latter is for use outside such a call.
+// When modifying this file it is important to understand this distinction,
+// because getting it wrong can lead to nested `HygieneData::with` calls that
+// trigger runtime aborts. (Fortunately these are obvious and easy to fix.)
+
+use crate::edition::Edition;
+use crate::symbol::{kw, sym, Symbol};
+use crate::with_session_globals;
+use crate::{HashStableContext, Span, DUMMY_SP};
+
+use crate::def_id::{CrateNum, DefId, StableCrateId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::stable_hasher::HashingControls;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{Lock, Lrc};
+use rustc_data_structures::unhash::UnhashMap;
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable_Generic;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use std::fmt;
+use std::hash::Hash;
+use tracing::*;
+
+/// A `SyntaxContext` represents a chain of pairs `(ExpnId, Transparency)` named "marks".
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct SyntaxContext(u32);
+
+#[derive(Debug, Encodable, Decodable, Clone)]
+pub struct SyntaxContextData {
+ outer_expn: ExpnId,
+ outer_transparency: Transparency,
+ parent: SyntaxContext,
+ /// This context, but with all transparent and semi-transparent expansions filtered away.
+ opaque: SyntaxContext,
+ /// This context, but with all transparent expansions filtered away.
+ opaque_and_semitransparent: SyntaxContext,
+ /// Name of the crate to which `$crate` with this context would resolve.
+ dollar_crate_name: Symbol,
+}
+
+rustc_index::newtype_index! {
+ /// A unique ID associated with a macro invocation and expansion.
+ pub struct ExpnIndex {
+ ENCODABLE = custom
+ }
+}
+
+/// A unique ID associated with a macro invocation and expansion.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+pub struct ExpnId {
+ pub krate: CrateNum,
+ pub local_id: ExpnIndex,
+}
+
+impl fmt::Debug for ExpnId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Generate crate_::{{expn_}}.
+ write!(f, "{:?}::{{{{expn{}}}}}", self.krate, self.local_id.private)
+ }
+}
+
+rustc_index::newtype_index! {
+ /// A unique ID associated with a macro invocation and expansion.
+ pub struct LocalExpnId {
+ ENCODABLE = custom
+ ORD_IMPL = custom
+ DEBUG_FORMAT = "expn{}"
+ }
+}
+
+// To ensure correctness of incremental compilation,
+// `LocalExpnId` must not implement `Ord` or `PartialOrd`.
+// See https://github.com/rust-lang/rust/issues/90317.
+impl !Ord for LocalExpnId {}
+impl !PartialOrd for LocalExpnId {}
+
+/// Assert that the provided `HashStableContext` is configured with the 'default'
+/// `HashingControls`. We should always have bailed out before getting to here
+/// with a non-default mode. With this check in place, we can avoid the need
+/// to maintain separate versions of `ExpnData` hashes for each permutation
+/// of `HashingControls` settings.
+fn assert_default_hashing_controls<CTX: HashStableContext>(ctx: &CTX, msg: &str) {
+ match ctx.hashing_controls() {
+ // Note that we require that `hash_spans` be set according to the global
+ // `-Z incremental-ignore-spans` option. Normally, this option is disabled,
+ // which will cause us to require that this method always be called with `Span` hashing
+ // enabled.
+ HashingControls { hash_spans }
+ if hash_spans == !ctx.unstable_opts_incremental_ignore_spans() => {}
+ other => panic!("Attempted hashing of {msg} with non-default HashingControls: {:?}", other),
+ }
+}
+
+/// A unique hash value associated to an expansion.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encodable, Decodable, HashStable_Generic)]
+pub struct ExpnHash(Fingerprint);
+
+impl ExpnHash {
+ /// Returns the [StableCrateId] identifying the crate this [ExpnHash]
+ /// originates from.
+ #[inline]
+ pub fn stable_crate_id(self) -> StableCrateId {
+ StableCrateId(self.0.as_value().0)
+ }
+
+ /// Returns the crate-local part of the [ExpnHash].
+ ///
+ /// Used for tests.
+ #[inline]
+ pub fn local_hash(self) -> u64 {
+ self.0.as_value().1
+ }
+
+ #[inline]
+ pub fn is_root(self) -> bool {
+ self.0 == Fingerprint::ZERO
+ }
+
+ /// Builds a new [ExpnHash] with the given [StableCrateId] and
+ /// `local_hash`, where `local_hash` must be unique within its crate.
+ fn new(stable_crate_id: StableCrateId, local_hash: u64) -> ExpnHash {
+ ExpnHash(Fingerprint::new(stable_crate_id.0, local_hash))
+ }
+}
+
+/// A property of a macro expansion that determines how identifiers
+/// produced by that expansion are resolved.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Hash, Debug, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub enum Transparency {
+ /// Identifier produced by a transparent expansion is always resolved at call-site.
+ /// Call-site spans in procedural macros, hygiene opt-out in `macro` should use this.
+ Transparent,
+ /// Identifier produced by a semi-transparent expansion may be resolved
+ /// either at call-site or at definition-site.
+ /// If it's a local variable, label or `$crate` then it's resolved at def-site.
+ /// Otherwise it's resolved at call-site.
+ /// `macro_rules` macros behave like this, built-in macros currently behave like this too,
+ /// but that's an implementation detail.
+ SemiTransparent,
+ /// Identifier produced by an opaque expansion is always resolved at definition-site.
+ /// Def-site spans in procedural macros, identifiers from `macro` by default use this.
+ Opaque,
+}
+
+impl LocalExpnId {
+ /// The ID of the theoretical expansion that generates freshly parsed, unexpanded AST.
+ pub const ROOT: LocalExpnId = LocalExpnId::from_u32(0);
+
+ #[inline]
+ pub fn from_raw(idx: ExpnIndex) -> LocalExpnId {
+ LocalExpnId::from_u32(idx.as_u32())
+ }
+
+ #[inline]
+ pub fn as_raw(self) -> ExpnIndex {
+ ExpnIndex::from_u32(self.as_u32())
+ }
+
+ pub fn fresh_empty() -> LocalExpnId {
+ HygieneData::with(|data| {
+ let expn_id = data.local_expn_data.push(None);
+ let _eid = data.local_expn_hashes.push(ExpnHash(Fingerprint::ZERO));
+ debug_assert_eq!(expn_id, _eid);
+ expn_id
+ })
+ }
+
+ pub fn fresh(mut expn_data: ExpnData, ctx: impl HashStableContext) -> LocalExpnId {
+ debug_assert_eq!(expn_data.parent.krate, LOCAL_CRATE);
+ let expn_hash = update_disambiguator(&mut expn_data, ctx);
+ HygieneData::with(|data| {
+ let expn_id = data.local_expn_data.push(Some(expn_data));
+ let _eid = data.local_expn_hashes.push(expn_hash);
+ debug_assert_eq!(expn_id, _eid);
+ let _old_id = data.expn_hash_to_expn_id.insert(expn_hash, expn_id.to_expn_id());
+ debug_assert!(_old_id.is_none());
+ expn_id
+ })
+ }
+
+ #[inline]
+ pub fn expn_hash(self) -> ExpnHash {
+ HygieneData::with(|data| data.local_expn_hash(self))
+ }
+
+ #[inline]
+ pub fn expn_data(self) -> ExpnData {
+ HygieneData::with(|data| data.local_expn_data(self).clone())
+ }
+
+ #[inline]
+ pub fn to_expn_id(self) -> ExpnId {
+ ExpnId { krate: LOCAL_CRATE, local_id: self.as_raw() }
+ }
+
+ #[inline]
+ pub fn set_expn_data(self, mut expn_data: ExpnData, ctx: impl HashStableContext) {
+ debug_assert_eq!(expn_data.parent.krate, LOCAL_CRATE);
+ let expn_hash = update_disambiguator(&mut expn_data, ctx);
+ HygieneData::with(|data| {
+ let old_expn_data = &mut data.local_expn_data[self];
+ assert!(old_expn_data.is_none(), "expansion data is reset for an expansion ID");
+ *old_expn_data = Some(expn_data);
+ debug_assert_eq!(data.local_expn_hashes[self].0, Fingerprint::ZERO);
+ data.local_expn_hashes[self] = expn_hash;
+ let _old_id = data.expn_hash_to_expn_id.insert(expn_hash, self.to_expn_id());
+ debug_assert!(_old_id.is_none());
+ });
+ }
+
+ #[inline]
+ pub fn is_descendant_of(self, ancestor: LocalExpnId) -> bool {
+ self.to_expn_id().is_descendant_of(ancestor.to_expn_id())
+ }
+
+ /// `expn_id.outer_expn_is_descendant_of(ctxt)` is equivalent to but faster than
+ /// `expn_id.is_descendant_of(ctxt.outer_expn())`.
+ #[inline]
+ pub fn outer_expn_is_descendant_of(self, ctxt: SyntaxContext) -> bool {
+ self.to_expn_id().outer_expn_is_descendant_of(ctxt)
+ }
+
+ /// Returns span for the macro which originally caused this expansion to happen.
+ ///
+ /// Stops backtracing at include! boundary.
+ #[inline]
+ pub fn expansion_cause(self) -> Option<Span> {
+ self.to_expn_id().expansion_cause()
+ }
+
+ #[inline]
+ #[track_caller]
+ pub fn parent(self) -> LocalExpnId {
+ self.expn_data().parent.as_local().unwrap()
+ }
+}
+
+impl ExpnId {
+ /// The ID of the theoretical expansion that generates freshly parsed, unexpanded AST.
+ /// Invariant: we do not create any ExpnId with local_id == 0 and krate != 0.
+ pub const fn root() -> ExpnId {
+ ExpnId { krate: LOCAL_CRATE, local_id: ExpnIndex::from_u32(0) }
+ }
+
+ #[inline]
+ pub fn expn_hash(self) -> ExpnHash {
+ HygieneData::with(|data| data.expn_hash(self))
+ }
+
+ #[inline]
+ pub fn from_hash(hash: ExpnHash) -> Option<ExpnId> {
+ HygieneData::with(|data| data.expn_hash_to_expn_id.get(&hash).copied())
+ }
+
+ #[inline]
+ pub fn as_local(self) -> Option<LocalExpnId> {
+ if self.krate == LOCAL_CRATE { Some(LocalExpnId::from_raw(self.local_id)) } else { None }
+ }
+
+ #[inline]
+ #[track_caller]
+ pub fn expect_local(self) -> LocalExpnId {
+ self.as_local().unwrap()
+ }
+
+ #[inline]
+ pub fn expn_data(self) -> ExpnData {
+ HygieneData::with(|data| data.expn_data(self).clone())
+ }
+
+ #[inline]
+ pub fn is_descendant_of(self, ancestor: ExpnId) -> bool {
+ // a few "fast path" cases to avoid locking HygieneData
+ if ancestor == ExpnId::root() || ancestor == self {
+ return true;
+ }
+ if ancestor.krate != self.krate {
+ return false;
+ }
+ HygieneData::with(|data| data.is_descendant_of(self, ancestor))
+ }
+
+ /// `expn_id.outer_expn_is_descendant_of(ctxt)` is equivalent to but faster than
+ /// `expn_id.is_descendant_of(ctxt.outer_expn())`.
+ pub fn outer_expn_is_descendant_of(self, ctxt: SyntaxContext) -> bool {
+ HygieneData::with(|data| data.is_descendant_of(self, data.outer_expn(ctxt)))
+ }
+
+ /// Returns span for the macro which originally caused this expansion to happen.
+ ///
+ /// Stops backtracing at include! boundary.
+ pub fn expansion_cause(mut self) -> Option<Span> {
+ let mut last_macro = None;
+ loop {
+ let expn_data = self.expn_data();
+ // Stop going up the backtrace once include! is encountered
+ if expn_data.is_root()
+ || expn_data.kind == ExpnKind::Macro(MacroKind::Bang, sym::include)
+ {
+ break;
+ }
+ self = expn_data.call_site.ctxt().outer_expn();
+ last_macro = Some(expn_data.call_site);
+ }
+ last_macro
+ }
+}
+
+#[derive(Debug)]
+pub struct HygieneData {
+ /// Each expansion should have an associated expansion data, but sometimes there's a delay
+ /// between creation of an expansion ID and obtaining its data (e.g. macros are collected
+ /// first and then resolved later), so we use an `Option` here.
+ local_expn_data: IndexVec<LocalExpnId, Option<ExpnData>>,
+ local_expn_hashes: IndexVec<LocalExpnId, ExpnHash>,
+ /// Data and hash information from external crates. We may eventually want to remove these
+ /// maps, and fetch the information directly from the other crate's metadata like DefIds do.
+ foreign_expn_data: FxHashMap<ExpnId, ExpnData>,
+ foreign_expn_hashes: FxHashMap<ExpnId, ExpnHash>,
+ expn_hash_to_expn_id: UnhashMap<ExpnHash, ExpnId>,
+ syntax_context_data: Vec<SyntaxContextData>,
+ syntax_context_map: FxHashMap<(SyntaxContext, ExpnId, Transparency), SyntaxContext>,
+ /// Maps the `local_hash` of an `ExpnData` to the next disambiguator value.
+ /// This is used by `update_disambiguator` to keep track of which `ExpnData`s
+ /// would have collisions without a disambiguator.
+ /// The keys of this map are always computed with `ExpnData.disambiguator`
+ /// set to 0.
+ expn_data_disambiguators: FxHashMap<u64, u32>,
+}
+
+impl HygieneData {
+ pub(crate) fn new(edition: Edition) -> Self {
+ let root_data = ExpnData::default(
+ ExpnKind::Root,
+ DUMMY_SP,
+ edition,
+ Some(CRATE_DEF_ID.to_def_id()),
+ None,
+ );
+
+ HygieneData {
+ local_expn_data: IndexVec::from_elem_n(Some(root_data), 1),
+ local_expn_hashes: IndexVec::from_elem_n(ExpnHash(Fingerprint::ZERO), 1),
+ foreign_expn_data: FxHashMap::default(),
+ foreign_expn_hashes: FxHashMap::default(),
+ expn_hash_to_expn_id: std::iter::once((ExpnHash(Fingerprint::ZERO), ExpnId::root()))
+ .collect(),
+ syntax_context_data: vec![SyntaxContextData {
+ outer_expn: ExpnId::root(),
+ outer_transparency: Transparency::Opaque,
+ parent: SyntaxContext(0),
+ opaque: SyntaxContext(0),
+ opaque_and_semitransparent: SyntaxContext(0),
+ dollar_crate_name: kw::DollarCrate,
+ }],
+ syntax_context_map: FxHashMap::default(),
+ expn_data_disambiguators: FxHashMap::default(),
+ }
+ }
+
+ pub fn with<T, F: FnOnce(&mut HygieneData) -> T>(f: F) -> T {
+ with_session_globals(|session_globals| f(&mut *session_globals.hygiene_data.borrow_mut()))
+ }
+
+ #[inline]
+ fn local_expn_hash(&self, expn_id: LocalExpnId) -> ExpnHash {
+ self.local_expn_hashes[expn_id]
+ }
+
+ #[inline]
+ fn expn_hash(&self, expn_id: ExpnId) -> ExpnHash {
+ match expn_id.as_local() {
+ Some(expn_id) => self.local_expn_hashes[expn_id],
+ None => self.foreign_expn_hashes[&expn_id],
+ }
+ }
+
+ fn local_expn_data(&self, expn_id: LocalExpnId) -> &ExpnData {
+ self.local_expn_data[expn_id].as_ref().expect("no expansion data for an expansion ID")
+ }
+
+ fn expn_data(&self, expn_id: ExpnId) -> &ExpnData {
+ if let Some(expn_id) = expn_id.as_local() {
+ self.local_expn_data[expn_id].as_ref().expect("no expansion data for an expansion ID")
+ } else {
+ &self.foreign_expn_data[&expn_id]
+ }
+ }
+
+ fn is_descendant_of(&self, mut expn_id: ExpnId, ancestor: ExpnId) -> bool {
+ // a couple "fast path" cases to avoid traversing parents in the loop below
+ if ancestor == ExpnId::root() {
+ return true;
+ }
+ if expn_id.krate != ancestor.krate {
+ return false;
+ }
+ loop {
+ if expn_id == ancestor {
+ return true;
+ }
+ if expn_id == ExpnId::root() {
+ return false;
+ }
+ expn_id = self.expn_data(expn_id).parent;
+ }
+ }
+
+ fn normalize_to_macros_2_0(&self, ctxt: SyntaxContext) -> SyntaxContext {
+ self.syntax_context_data[ctxt.0 as usize].opaque
+ }
+
+ fn normalize_to_macro_rules(&self, ctxt: SyntaxContext) -> SyntaxContext {
+ self.syntax_context_data[ctxt.0 as usize].opaque_and_semitransparent
+ }
+
+ fn outer_expn(&self, ctxt: SyntaxContext) -> ExpnId {
+ self.syntax_context_data[ctxt.0 as usize].outer_expn
+ }
+
+ fn outer_mark(&self, ctxt: SyntaxContext) -> (ExpnId, Transparency) {
+ let data = &self.syntax_context_data[ctxt.0 as usize];
+ (data.outer_expn, data.outer_transparency)
+ }
+
+ fn parent_ctxt(&self, ctxt: SyntaxContext) -> SyntaxContext {
+ self.syntax_context_data[ctxt.0 as usize].parent
+ }
+
+ fn remove_mark(&self, ctxt: &mut SyntaxContext) -> (ExpnId, Transparency) {
+ let outer_mark = self.outer_mark(*ctxt);
+ *ctxt = self.parent_ctxt(*ctxt);
+ outer_mark
+ }
+
+ fn marks(&self, mut ctxt: SyntaxContext) -> Vec<(ExpnId, Transparency)> {
+ let mut marks = Vec::new();
+ while ctxt != SyntaxContext::root() {
+ debug!("marks: getting parent of {:?}", ctxt);
+ marks.push(self.outer_mark(ctxt));
+ ctxt = self.parent_ctxt(ctxt);
+ }
+ marks.reverse();
+ marks
+ }
+
+ fn walk_chain(&self, mut span: Span, to: SyntaxContext) -> Span {
+ debug!("walk_chain({:?}, {:?})", span, to);
+ debug!("walk_chain: span ctxt = {:?}", span.ctxt());
+ while span.from_expansion() && span.ctxt() != to {
+ let outer_expn = self.outer_expn(span.ctxt());
+ debug!("walk_chain({:?}): outer_expn={:?}", span, outer_expn);
+ let expn_data = self.expn_data(outer_expn);
+ debug!("walk_chain({:?}): expn_data={:?}", span, expn_data);
+ span = expn_data.call_site;
+ }
+ span
+ }
+
+ fn adjust(&self, ctxt: &mut SyntaxContext, expn_id: ExpnId) -> Option<ExpnId> {
+ let mut scope = None;
+ while !self.is_descendant_of(expn_id, self.outer_expn(*ctxt)) {
+ scope = Some(self.remove_mark(ctxt).0);
+ }
+ scope
+ }
+
+ fn apply_mark(
+ &mut self,
+ ctxt: SyntaxContext,
+ expn_id: ExpnId,
+ transparency: Transparency,
+ ) -> SyntaxContext {
+ assert_ne!(expn_id, ExpnId::root());
+ if transparency == Transparency::Opaque {
+ return self.apply_mark_internal(ctxt, expn_id, transparency);
+ }
+
+ let call_site_ctxt = self.expn_data(expn_id).call_site.ctxt();
+ let mut call_site_ctxt = if transparency == Transparency::SemiTransparent {
+ self.normalize_to_macros_2_0(call_site_ctxt)
+ } else {
+ self.normalize_to_macro_rules(call_site_ctxt)
+ };
+
+ if call_site_ctxt == SyntaxContext::root() {
+ return self.apply_mark_internal(ctxt, expn_id, transparency);
+ }
+
+ // Otherwise, `expn_id` is a macros 1.0 definition and the call site is in a
+ // macros 2.0 expansion, i.e., a macros 1.0 invocation is in a macros 2.0 definition.
+ //
+ // In this case, the tokens from the macros 1.0 definition inherit the hygiene
+ // at their invocation. That is, we pretend that the macros 1.0 definition
+ // was defined at its invocation (i.e., inside the macros 2.0 definition)
+ // so that the macros 2.0 definition remains hygienic.
+ //
+ // See the example at `test/ui/hygiene/legacy_interaction.rs`.
+ for (expn_id, transparency) in self.marks(ctxt) {
+ call_site_ctxt = self.apply_mark_internal(call_site_ctxt, expn_id, transparency);
+ }
+ self.apply_mark_internal(call_site_ctxt, expn_id, transparency)
+ }
+
+ fn apply_mark_internal(
+ &mut self,
+ ctxt: SyntaxContext,
+ expn_id: ExpnId,
+ transparency: Transparency,
+ ) -> SyntaxContext {
+ let syntax_context_data = &mut self.syntax_context_data;
+ let mut opaque = syntax_context_data[ctxt.0 as usize].opaque;
+ let mut opaque_and_semitransparent =
+ syntax_context_data[ctxt.0 as usize].opaque_and_semitransparent;
+
+ if transparency >= Transparency::Opaque {
+ let parent = opaque;
+ opaque = *self
+ .syntax_context_map
+ .entry((parent, expn_id, transparency))
+ .or_insert_with(|| {
+ let new_opaque = SyntaxContext(syntax_context_data.len() as u32);
+ syntax_context_data.push(SyntaxContextData {
+ outer_expn: expn_id,
+ outer_transparency: transparency,
+ parent,
+ opaque: new_opaque,
+ opaque_and_semitransparent: new_opaque,
+ dollar_crate_name: kw::DollarCrate,
+ });
+ new_opaque
+ });
+ }
+
+ if transparency >= Transparency::SemiTransparent {
+ let parent = opaque_and_semitransparent;
+ opaque_and_semitransparent = *self
+ .syntax_context_map
+ .entry((parent, expn_id, transparency))
+ .or_insert_with(|| {
+ let new_opaque_and_semitransparent =
+ SyntaxContext(syntax_context_data.len() as u32);
+ syntax_context_data.push(SyntaxContextData {
+ outer_expn: expn_id,
+ outer_transparency: transparency,
+ parent,
+ opaque,
+ opaque_and_semitransparent: new_opaque_and_semitransparent,
+ dollar_crate_name: kw::DollarCrate,
+ });
+ new_opaque_and_semitransparent
+ });
+ }
+
+ let parent = ctxt;
+ *self.syntax_context_map.entry((parent, expn_id, transparency)).or_insert_with(|| {
+ let new_opaque_and_semitransparent_and_transparent =
+ SyntaxContext(syntax_context_data.len() as u32);
+ syntax_context_data.push(SyntaxContextData {
+ outer_expn: expn_id,
+ outer_transparency: transparency,
+ parent,
+ opaque,
+ opaque_and_semitransparent,
+ dollar_crate_name: kw::DollarCrate,
+ });
+ new_opaque_and_semitransparent_and_transparent
+ })
+ }
+}
+
+pub fn clear_syntax_context_map() {
+ HygieneData::with(|data| data.syntax_context_map = FxHashMap::default());
+}
+
+pub fn walk_chain(span: Span, to: SyntaxContext) -> Span {
+ HygieneData::with(|data| data.walk_chain(span, to))
+}
+
+pub fn update_dollar_crate_names(mut get_name: impl FnMut(SyntaxContext) -> Symbol) {
+ // The new contexts that need updating are at the end of the list and have `$crate` as a name.
+ let (len, to_update) = HygieneData::with(|data| {
+ (
+ data.syntax_context_data.len(),
+ data.syntax_context_data
+ .iter()
+ .rev()
+ .take_while(|scdata| scdata.dollar_crate_name == kw::DollarCrate)
+ .count(),
+ )
+ });
+ // The callback must be called from outside of the `HygieneData` lock,
+ // since it will try to acquire it too.
+ let range_to_update = len - to_update..len;
+ let names: Vec<_> =
+ range_to_update.clone().map(|idx| get_name(SyntaxContext::from_u32(idx as u32))).collect();
+ HygieneData::with(|data| {
+ range_to_update.zip(names).for_each(|(idx, name)| {
+ data.syntax_context_data[idx].dollar_crate_name = name;
+ })
+ })
+}
+
+pub fn debug_hygiene_data(verbose: bool) -> String {
+ HygieneData::with(|data| {
+ if verbose {
+ format!("{:#?}", data)
+ } else {
+ let mut s = String::from("Expansions:");
+ let mut debug_expn_data = |(id, expn_data): (&ExpnId, &ExpnData)| {
+ s.push_str(&format!(
+ "\n{:?}: parent: {:?}, call_site_ctxt: {:?}, def_site_ctxt: {:?}, kind: {:?}",
+ id,
+ expn_data.parent,
+ expn_data.call_site.ctxt(),
+ expn_data.def_site.ctxt(),
+ expn_data.kind,
+ ))
+ };
+ data.local_expn_data.iter_enumerated().for_each(|(id, expn_data)| {
+ let expn_data = expn_data.as_ref().expect("no expansion data for an expansion ID");
+ debug_expn_data((&id.to_expn_id(), expn_data))
+ });
+
+ // Sort the hash map for more reproducible output.
+ // Because of this, it is fine to rely on the unstable iteration order of the map.
+ #[allow(rustc::potential_query_instability)]
+ let mut foreign_expn_data: Vec<_> = data.foreign_expn_data.iter().collect();
+ foreign_expn_data.sort_by_key(|(id, _)| (id.krate, id.local_id));
+ foreign_expn_data.into_iter().for_each(debug_expn_data);
+ s.push_str("\n\nSyntaxContexts:");
+ data.syntax_context_data.iter().enumerate().for_each(|(id, ctxt)| {
+ s.push_str(&format!(
+ "\n#{}: parent: {:?}, outer_mark: ({:?}, {:?})",
+ id, ctxt.parent, ctxt.outer_expn, ctxt.outer_transparency,
+ ));
+ });
+ s
+ }
+ })
+}
+
+impl SyntaxContext {
+ #[inline]
+ pub const fn root() -> Self {
+ SyntaxContext(0)
+ }
+
+ #[inline]
+ pub(crate) fn as_u32(self) -> u32 {
+ self.0
+ }
+
+ #[inline]
+ pub(crate) fn from_u32(raw: u32) -> SyntaxContext {
+ SyntaxContext(raw)
+ }
+
+ /// Extend a syntax context with a given expansion and transparency.
+ pub(crate) fn apply_mark(self, expn_id: ExpnId, transparency: Transparency) -> SyntaxContext {
+ HygieneData::with(|data| data.apply_mark(self, expn_id, transparency))
+ }
+
+ /// Pulls a single mark off of the syntax context. This effectively moves the
+ /// context up one macro definition level. That is, if we have a nested macro
+ /// definition as follows:
+ ///
+ /// ```ignore (illustrative)
+ /// macro_rules! f {
+ /// macro_rules! g {
+ /// ...
+ /// }
+ /// }
+ /// ```
+ ///
+ /// and we have a SyntaxContext that is referring to something declared by an invocation
+ /// of g (call it g1), calling remove_mark will result in the SyntaxContext for the
+ /// invocation of f that created g1.
+ /// Returns the mark that was removed.
+ pub fn remove_mark(&mut self) -> ExpnId {
+ HygieneData::with(|data| data.remove_mark(self).0)
+ }
+
+ pub fn marks(self) -> Vec<(ExpnId, Transparency)> {
+ HygieneData::with(|data| data.marks(self))
+ }
+
+ /// Adjust this context for resolution in a scope created by the given expansion.
+ /// For example, consider the following three resolutions of `f`:
+ ///
+ /// ```rust
+ /// #![feature(decl_macro)]
+ /// mod foo { pub fn f() {} } // `f`'s `SyntaxContext` is empty.
+ /// m!(f);
+ /// macro m($f:ident) {
+ /// mod bar {
+ /// pub fn f() {} // `f`'s `SyntaxContext` has a single `ExpnId` from `m`.
+ /// pub fn $f() {} // `$f`'s `SyntaxContext` is empty.
+ /// }
+ /// foo::f(); // `f`'s `SyntaxContext` has a single `ExpnId` from `m`
+ /// //^ Since `mod foo` is outside this expansion, `adjust` removes the mark from `f`,
+ /// //| and it resolves to `::foo::f`.
+ /// bar::f(); // `f`'s `SyntaxContext` has a single `ExpnId` from `m`
+ /// //^ Since `mod bar` not outside this expansion, `adjust` does not change `f`,
+ /// //| and it resolves to `::bar::f`.
+ /// bar::$f(); // `f`'s `SyntaxContext` is empty.
+ /// //^ Since `mod bar` is not outside this expansion, `adjust` does not change `$f`,
+ /// //| and it resolves to `::bar::$f`.
+ /// }
+ /// ```
+ /// This returns the expansion whose definition scope we use to privacy check the resolution,
+ /// or `None` if we privacy check as usual (i.e., not w.r.t. a macro definition scope).
+ pub fn adjust(&mut self, expn_id: ExpnId) -> Option<ExpnId> {
+ HygieneData::with(|data| data.adjust(self, expn_id))
+ }
+
+ /// Like `SyntaxContext::adjust`, but also normalizes `self` to macros 2.0.
+ pub fn normalize_to_macros_2_0_and_adjust(&mut self, expn_id: ExpnId) -> Option<ExpnId> {
+ HygieneData::with(|data| {
+ *self = data.normalize_to_macros_2_0(*self);
+ data.adjust(self, expn_id)
+ })
+ }
+
+ /// Adjust this context for resolution in a scope created by the given expansion
+ /// via a glob import with the given `SyntaxContext`.
+ /// For example:
+ ///
+ /// ```compile_fail,E0425
+ /// #![feature(decl_macro)]
+ /// m!(f);
+ /// macro m($i:ident) {
+ /// mod foo {
+ /// pub fn f() {} // `f`'s `SyntaxContext` has a single `ExpnId` from `m`.
+ /// pub fn $i() {} // `$i`'s `SyntaxContext` is empty.
+ /// }
+ /// n!(f);
+ /// macro n($j:ident) {
+ /// use foo::*;
+ /// f(); // `f`'s `SyntaxContext` has a mark from `m` and a mark from `n`
+ /// //^ `glob_adjust` removes the mark from `n`, so this resolves to `foo::f`.
+ /// $i(); // `$i`'s `SyntaxContext` has a mark from `n`
+ /// //^ `glob_adjust` removes the mark from `n`, so this resolves to `foo::$i`.
+ /// $j(); // `$j`'s `SyntaxContext` has a mark from `m`
+ /// //^ This cannot be glob-adjusted, so this is a resolution error.
+ /// }
+ /// }
+ /// ```
+ /// This returns `None` if the context cannot be glob-adjusted.
+ /// Otherwise, it returns the scope to use when privacy checking (see `adjust` for details).
+ pub fn glob_adjust(&mut self, expn_id: ExpnId, glob_span: Span) -> Option<Option<ExpnId>> {
+ HygieneData::with(|data| {
+ let mut scope = None;
+ let mut glob_ctxt = data.normalize_to_macros_2_0(glob_span.ctxt());
+ while !data.is_descendant_of(expn_id, data.outer_expn(glob_ctxt)) {
+ scope = Some(data.remove_mark(&mut glob_ctxt).0);
+ if data.remove_mark(self).0 != scope.unwrap() {
+ return None;
+ }
+ }
+ if data.adjust(self, expn_id).is_some() {
+ return None;
+ }
+ Some(scope)
+ })
+ }
+
+ /// Undo `glob_adjust` if possible:
+ ///
+ /// ```ignore (illustrative)
+ /// if let Some(privacy_checking_scope) = self.reverse_glob_adjust(expansion, glob_ctxt) {
+ /// assert!(self.glob_adjust(expansion, glob_ctxt) == Some(privacy_checking_scope));
+ /// }
+ /// ```
+ pub fn reverse_glob_adjust(
+ &mut self,
+ expn_id: ExpnId,
+ glob_span: Span,
+ ) -> Option<Option<ExpnId>> {
+ HygieneData::with(|data| {
+ if data.adjust(self, expn_id).is_some() {
+ return None;
+ }
+
+ let mut glob_ctxt = data.normalize_to_macros_2_0(glob_span.ctxt());
+ let mut marks = Vec::new();
+ while !data.is_descendant_of(expn_id, data.outer_expn(glob_ctxt)) {
+ marks.push(data.remove_mark(&mut glob_ctxt));
+ }
+
+ let scope = marks.last().map(|mark| mark.0);
+ while let Some((expn_id, transparency)) = marks.pop() {
+ *self = data.apply_mark(*self, expn_id, transparency);
+ }
+ Some(scope)
+ })
+ }
+
+ pub fn hygienic_eq(self, other: SyntaxContext, expn_id: ExpnId) -> bool {
+ HygieneData::with(|data| {
+ let mut self_normalized = data.normalize_to_macros_2_0(self);
+ data.adjust(&mut self_normalized, expn_id);
+ self_normalized == data.normalize_to_macros_2_0(other)
+ })
+ }
+
+ #[inline]
+ pub fn normalize_to_macros_2_0(self) -> SyntaxContext {
+ HygieneData::with(|data| data.normalize_to_macros_2_0(self))
+ }
+
+ #[inline]
+ pub fn normalize_to_macro_rules(self) -> SyntaxContext {
+ HygieneData::with(|data| data.normalize_to_macro_rules(self))
+ }
+
+ #[inline]
+ pub fn outer_expn(self) -> ExpnId {
+ HygieneData::with(|data| data.outer_expn(self))
+ }
+
+ /// `ctxt.outer_expn_data()` is equivalent to but faster than
+ /// `ctxt.outer_expn().expn_data()`.
+ #[inline]
+ pub fn outer_expn_data(self) -> ExpnData {
+ HygieneData::with(|data| data.expn_data(data.outer_expn(self)).clone())
+ }
+
+ #[inline]
+ pub fn outer_mark(self) -> (ExpnId, Transparency) {
+ HygieneData::with(|data| data.outer_mark(self))
+ }
+
+ pub fn dollar_crate_name(self) -> Symbol {
+ HygieneData::with(|data| data.syntax_context_data[self.0 as usize].dollar_crate_name)
+ }
+
+ pub fn edition(self) -> Edition {
+ HygieneData::with(|data| data.expn_data(data.outer_expn(self)).edition)
+ }
+}
+
+impl fmt::Debug for SyntaxContext {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "#{}", self.0)
+ }
+}
+
+impl Span {
+ /// Creates a fresh expansion with given properties.
+ /// Expansions are normally created by macros, but in some cases expansions are created for
+ /// other compiler-generated code to set per-span properties like allowed unstable features.
+ /// The returned span belongs to the created expansion and has the new properties,
+ /// but its location is inherited from the current span.
+ pub fn fresh_expansion(self, expn_id: LocalExpnId) -> Span {
+ HygieneData::with(|data| {
+ self.with_ctxt(data.apply_mark(
+ SyntaxContext::root(),
+ expn_id.to_expn_id(),
+ Transparency::Transparent,
+ ))
+ })
+ }
+
+ /// Reuses the span but adds information like the kind of the desugaring and features that are
+ /// allowed inside this span.
+ pub fn mark_with_reason(
+ self,
+ allow_internal_unstable: Option<Lrc<[Symbol]>>,
+ reason: DesugaringKind,
+ edition: Edition,
+ ctx: impl HashStableContext,
+ ) -> Span {
+ let expn_data = ExpnData {
+ allow_internal_unstable,
+ ..ExpnData::default(ExpnKind::Desugaring(reason), self, edition, None, None)
+ };
+ let expn_id = LocalExpnId::fresh(expn_data, ctx);
+ self.fresh_expansion(expn_id)
+ }
+}
+
+/// A subset of properties from both macro definition and macro call available through global data.
+/// Avoid using this if you have access to the original definition or call structures.
+#[derive(Clone, Debug, Encodable, Decodable, HashStable_Generic)]
+pub struct ExpnData {
+ // --- The part unique to each expansion.
+ /// The kind of this expansion - macro or compiler desugaring.
+ pub kind: ExpnKind,
+ /// The expansion that produced this expansion.
+ pub parent: ExpnId,
+ /// The location of the actual macro invocation or syntax sugar , e.g.
+ /// `let x = foo!();` or `if let Some(y) = x {}`
+ ///
+ /// This may recursively refer to other macro invocations, e.g., if
+ /// `foo!()` invoked `bar!()` internally, and there was an
+ /// expression inside `bar!`; the call_site of the expression in
+ /// the expansion would point to the `bar!` invocation; that
+ /// call_site span would have its own ExpnData, with the call_site
+ /// pointing to the `foo!` invocation.
+ pub call_site: Span,
+ /// Used to force two `ExpnData`s to have different `Fingerprint`s.
+ /// Due to macro expansion, it's possible to end up with two `ExpnId`s
+ /// that have identical `ExpnData`s. This violates the contract of `HashStable`
+ /// - the two `ExpnId`s are not equal, but their `Fingerprint`s are equal
+ /// (since the numerical `ExpnId` value is not considered by the `HashStable`
+ /// implementation).
+ ///
+ /// The `disambiguator` field is set by `update_disambiguator` when two distinct
+ /// `ExpnId`s would end up with the same `Fingerprint`. Since `ExpnData` includes
+ /// a `krate` field, this value only needs to be unique within a single crate.
+ disambiguator: u32,
+
+ // --- The part specific to the macro/desugaring definition.
+ // --- It may be reasonable to share this part between expansions with the same definition,
+ // --- but such sharing is known to bring some minor inconveniences without also bringing
+ // --- noticeable perf improvements (PR #62898).
+ /// The span of the macro definition (possibly dummy).
+ /// This span serves only informational purpose and is not used for resolution.
+ pub def_site: Span,
+ /// List of `#[unstable]`/feature-gated features that the macro is allowed to use
+ /// internally without forcing the whole crate to opt-in
+ /// to them.
+ pub allow_internal_unstable: Option<Lrc<[Symbol]>>,
+ /// Whether the macro is allowed to use `unsafe` internally
+ /// even if the user crate has `#![forbid(unsafe_code)]`.
+ pub allow_internal_unsafe: bool,
+ /// Enables the macro helper hack (`ident!(...)` -> `$crate::ident!(...)`)
+ /// for a given macro.
+ pub local_inner_macros: bool,
+ /// Edition of the crate in which the macro is defined.
+ pub edition: Edition,
+ /// The `DefId` of the macro being invoked,
+ /// if this `ExpnData` corresponds to a macro invocation
+ pub macro_def_id: Option<DefId>,
+ /// The normal module (`mod`) in which the expanded macro was defined.
+ pub parent_module: Option<DefId>,
+}
+
+impl !PartialEq for ExpnData {}
+impl !Hash for ExpnData {}
+
+impl ExpnData {
+ pub fn new(
+ kind: ExpnKind,
+ parent: ExpnId,
+ call_site: Span,
+ def_site: Span,
+ allow_internal_unstable: Option<Lrc<[Symbol]>>,
+ allow_internal_unsafe: bool,
+ local_inner_macros: bool,
+ edition: Edition,
+ macro_def_id: Option<DefId>,
+ parent_module: Option<DefId>,
+ ) -> ExpnData {
+ ExpnData {
+ kind,
+ parent,
+ call_site,
+ def_site,
+ allow_internal_unstable,
+ allow_internal_unsafe,
+ local_inner_macros,
+ edition,
+ macro_def_id,
+ parent_module,
+ disambiguator: 0,
+ }
+ }
+
+ /// Constructs expansion data with default properties.
+ pub fn default(
+ kind: ExpnKind,
+ call_site: Span,
+ edition: Edition,
+ macro_def_id: Option<DefId>,
+ parent_module: Option<DefId>,
+ ) -> ExpnData {
+ ExpnData {
+ kind,
+ parent: ExpnId::root(),
+ call_site,
+ def_site: DUMMY_SP,
+ allow_internal_unstable: None,
+ allow_internal_unsafe: false,
+ local_inner_macros: false,
+ edition,
+ macro_def_id,
+ parent_module,
+ disambiguator: 0,
+ }
+ }
+
+ pub fn allow_unstable(
+ kind: ExpnKind,
+ call_site: Span,
+ edition: Edition,
+ allow_internal_unstable: Lrc<[Symbol]>,
+ macro_def_id: Option<DefId>,
+ parent_module: Option<DefId>,
+ ) -> ExpnData {
+ ExpnData {
+ allow_internal_unstable: Some(allow_internal_unstable),
+ ..ExpnData::default(kind, call_site, edition, macro_def_id, parent_module)
+ }
+ }
+
+ #[inline]
+ pub fn is_root(&self) -> bool {
+ matches!(self.kind, ExpnKind::Root)
+ }
+
+ #[inline]
+ fn hash_expn(&self, ctx: &mut impl HashStableContext) -> u64 {
+ let mut hasher = StableHasher::new();
+ self.hash_stable(ctx, &mut hasher);
+ hasher.finish()
+ }
+}
+
+/// Expansion kind.
+#[derive(Clone, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
+pub enum ExpnKind {
+ /// No expansion, aka root expansion. Only `ExpnId::root()` has this kind.
+ Root,
+ /// Expansion produced by a macro.
+ Macro(MacroKind, Symbol),
+ /// Transform done by the compiler on the AST.
+ AstPass(AstPass),
+ /// Desugaring done by the compiler during HIR lowering.
+ Desugaring(DesugaringKind),
+ /// MIR inlining
+ Inlined,
+}
+
+impl ExpnKind {
+ pub fn descr(&self) -> String {
+ match *self {
+ ExpnKind::Root => kw::PathRoot.to_string(),
+ ExpnKind::Macro(macro_kind, name) => match macro_kind {
+ MacroKind::Bang => format!("{}!", name),
+ MacroKind::Attr => format!("#[{}]", name),
+ MacroKind::Derive => format!("#[derive({})]", name),
+ },
+ ExpnKind::AstPass(kind) => kind.descr().to_string(),
+ ExpnKind::Desugaring(kind) => format!("desugaring of {}", kind.descr()),
+ ExpnKind::Inlined => "inlined source".to_string(),
+ }
+ }
+}
+
+/// The kind of macro invocation or definition.
+#[derive(Clone, Copy, PartialEq, Eq, Encodable, Decodable, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum MacroKind {
+ /// A bang macro `foo!()`.
+ Bang,
+ /// An attribute macro `#[foo]`.
+ Attr,
+ /// A derive macro `#[derive(Foo)]`
+ Derive,
+}
+
+impl MacroKind {
+ pub fn descr(self) -> &'static str {
+ match self {
+ MacroKind::Bang => "macro",
+ MacroKind::Attr => "attribute macro",
+ MacroKind::Derive => "derive macro",
+ }
+ }
+
+ pub fn descr_expected(self) -> &'static str {
+ match self {
+ MacroKind::Attr => "attribute",
+ _ => self.descr(),
+ }
+ }
+
+ pub fn article(self) -> &'static str {
+ match self {
+ MacroKind::Attr => "an",
+ _ => "a",
+ }
+ }
+}
+
+/// The kind of AST transform.
+#[derive(Clone, Copy, Debug, PartialEq, Encodable, Decodable, HashStable_Generic)]
+pub enum AstPass {
+ StdImports,
+ TestHarness,
+ ProcMacroHarness,
+}
+
+impl AstPass {
+ pub fn descr(self) -> &'static str {
+ match self {
+ AstPass::StdImports => "standard library imports",
+ AstPass::TestHarness => "test harness",
+ AstPass::ProcMacroHarness => "proc macro harness",
+ }
+ }
+}
+
+/// The kind of compiler desugaring.
+#[derive(Clone, Copy, PartialEq, Debug, Encodable, Decodable, HashStable_Generic)]
+pub enum DesugaringKind {
+ /// We desugar `if c { i } else { e }` to `match $ExprKind::Use(c) { true => i, _ => e }`.
+ /// However, we do not want to blame `c` for unreachability but rather say that `i`
+ /// is unreachable. This desugaring kind allows us to avoid blaming `c`.
+ /// This also applies to `while` loops.
+ CondTemporary,
+ QuestionMark,
+ TryBlock,
+ YeetExpr,
+ /// Desugaring of an `impl Trait` in return type position
+ /// to an `type Foo = impl Trait;` and replacing the
+ /// `impl Trait` with `Foo`.
+ OpaqueTy,
+ Async,
+ Await,
+ ForLoop,
+ WhileLoop,
+}
+
+impl DesugaringKind {
+ /// The description wording should combine well with "desugaring of {}".
+ pub fn descr(self) -> &'static str {
+ match self {
+ DesugaringKind::CondTemporary => "`if` or `while` condition",
+ DesugaringKind::Async => "`async` block or function",
+ DesugaringKind::Await => "`await` expression",
+ DesugaringKind::QuestionMark => "operator `?`",
+ DesugaringKind::TryBlock => "`try` block",
+ DesugaringKind::YeetExpr => "`do yeet` expression",
+ DesugaringKind::OpaqueTy => "`impl Trait`",
+ DesugaringKind::ForLoop => "`for` loop",
+ DesugaringKind::WhileLoop => "`while` loop",
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct HygieneEncodeContext {
+ /// All `SyntaxContexts` for which we have written `SyntaxContextData` into crate metadata.
+ /// This is `None` after we finish encoding `SyntaxContexts`, to ensure
+ /// that we don't accidentally try to encode any more `SyntaxContexts`
+ serialized_ctxts: Lock<FxHashSet<SyntaxContext>>,
+ /// The `SyntaxContexts` that we have serialized (e.g. as a result of encoding `Spans`)
+ /// in the most recent 'round' of serializing. Serializing `SyntaxContextData`
+ /// may cause us to serialize more `SyntaxContext`s, so serialize in a loop
+ /// until we reach a fixed point.
+ latest_ctxts: Lock<FxHashSet<SyntaxContext>>,
+
+ serialized_expns: Lock<FxHashSet<ExpnId>>,
+
+ latest_expns: Lock<FxHashSet<ExpnId>>,
+}
+
+impl HygieneEncodeContext {
+ /// Record the fact that we need to serialize the corresponding `ExpnData`.
+ pub fn schedule_expn_data_for_encoding(&self, expn: ExpnId) {
+ if !self.serialized_expns.lock().contains(&expn) {
+ self.latest_expns.lock().insert(expn);
+ }
+ }
+
+ pub fn encode<T>(
+ &self,
+ encoder: &mut T,
+ mut encode_ctxt: impl FnMut(&mut T, u32, &SyntaxContextData),
+ mut encode_expn: impl FnMut(&mut T, ExpnId, &ExpnData, ExpnHash),
+ ) {
+ // When we serialize a `SyntaxContextData`, we may end up serializing
+ // a `SyntaxContext` that we haven't seen before
+ while !self.latest_ctxts.lock().is_empty() || !self.latest_expns.lock().is_empty() {
+ debug!(
+ "encode_hygiene: Serializing a round of {:?} SyntaxContextDatas: {:?}",
+ self.latest_ctxts.lock().len(),
+ self.latest_ctxts
+ );
+
+ // Consume the current round of SyntaxContexts.
+ // Drop the lock() temporary early
+ let latest_ctxts = { std::mem::take(&mut *self.latest_ctxts.lock()) };
+
+ // It's fine to iterate over a HashMap, because the serialization
+ // of the table that we insert data into doesn't depend on insertion
+ // order
+ #[allow(rustc::potential_query_instability)]
+ for_all_ctxts_in(latest_ctxts.into_iter(), |index, ctxt, data| {
+ if self.serialized_ctxts.lock().insert(ctxt) {
+ encode_ctxt(encoder, index, data);
+ }
+ });
+
+ let latest_expns = { std::mem::take(&mut *self.latest_expns.lock()) };
+
+ // Same as above, this is fine as we are inserting into a order-independent hashset
+ #[allow(rustc::potential_query_instability)]
+ for_all_expns_in(latest_expns.into_iter(), |expn, data, hash| {
+ if self.serialized_expns.lock().insert(expn) {
+ encode_expn(encoder, expn, data, hash);
+ }
+ });
+ }
+ debug!("encode_hygiene: Done serializing SyntaxContextData");
+ }
+}
+
+#[derive(Default)]
+/// Additional information used to assist in decoding hygiene data
+pub struct HygieneDecodeContext {
+ // Maps serialized `SyntaxContext` ids to a `SyntaxContext` in the current
+ // global `HygieneData`. When we deserialize a `SyntaxContext`, we need to create
+ // a new id in the global `HygieneData`. This map tracks the ID we end up picking,
+ // so that multiple occurrences of the same serialized id are decoded to the same
+ // `SyntaxContext`
+ remapped_ctxts: Lock<Vec<Option<SyntaxContext>>>,
+}
+
+/// Register an expansion which has been decoded from the on-disk-cache for the local crate.
+pub fn register_local_expn_id(data: ExpnData, hash: ExpnHash) -> ExpnId {
+ HygieneData::with(|hygiene_data| {
+ let expn_id = hygiene_data.local_expn_data.next_index();
+ hygiene_data.local_expn_data.push(Some(data));
+ let _eid = hygiene_data.local_expn_hashes.push(hash);
+ debug_assert_eq!(expn_id, _eid);
+
+ let expn_id = expn_id.to_expn_id();
+
+ let _old_id = hygiene_data.expn_hash_to_expn_id.insert(hash, expn_id);
+ debug_assert!(_old_id.is_none());
+ expn_id
+ })
+}
+
+/// Register an expansion which has been decoded from the metadata of a foreign crate.
+pub fn register_expn_id(
+ krate: CrateNum,
+ local_id: ExpnIndex,
+ data: ExpnData,
+ hash: ExpnHash,
+) -> ExpnId {
+ debug_assert!(data.parent == ExpnId::root() || krate == data.parent.krate);
+ let expn_id = ExpnId { krate, local_id };
+ HygieneData::with(|hygiene_data| {
+ let _old_data = hygiene_data.foreign_expn_data.insert(expn_id, data);
+ debug_assert!(_old_data.is_none());
+ let _old_hash = hygiene_data.foreign_expn_hashes.insert(expn_id, hash);
+ debug_assert!(_old_hash.is_none());
+ let _old_id = hygiene_data.expn_hash_to_expn_id.insert(hash, expn_id);
+ debug_assert!(_old_id.is_none());
+ });
+ expn_id
+}
+
+/// Decode an expansion from the metadata of a foreign crate.
+pub fn decode_expn_id(
+ krate: CrateNum,
+ index: u32,
+ decode_data: impl FnOnce(ExpnId) -> (ExpnData, ExpnHash),
+) -> ExpnId {
+ if index == 0 {
+ debug!("decode_expn_id: deserialized root");
+ return ExpnId::root();
+ }
+
+ let index = ExpnIndex::from_u32(index);
+
+ // This function is used to decode metadata, so it cannot decode information about LOCAL_CRATE.
+ debug_assert_ne!(krate, LOCAL_CRATE);
+ let expn_id = ExpnId { krate, local_id: index };
+
+ // Fast path if the expansion has already been decoded.
+ if HygieneData::with(|hygiene_data| hygiene_data.foreign_expn_data.contains_key(&expn_id)) {
+ return expn_id;
+ }
+
+ // Don't decode the data inside `HygieneData::with`, since we need to recursively decode
+ // other ExpnIds
+ let (expn_data, hash) = decode_data(expn_id);
+
+ register_expn_id(krate, index, expn_data, hash)
+}
+
+// Decodes `SyntaxContext`, using the provided `HygieneDecodeContext`
+// to track which `SyntaxContext`s we have already decoded.
+// The provided closure will be invoked to deserialize a `SyntaxContextData`
+// if we haven't already seen the id of the `SyntaxContext` we are deserializing.
+pub fn decode_syntax_context<D: Decoder, F: FnOnce(&mut D, u32) -> SyntaxContextData>(
+ d: &mut D,
+ context: &HygieneDecodeContext,
+ decode_data: F,
+) -> SyntaxContext {
+ let raw_id: u32 = Decodable::decode(d);
+ if raw_id == 0 {
+ debug!("decode_syntax_context: deserialized root");
+ // The root is special
+ return SyntaxContext::root();
+ }
+
+ let outer_ctxts = &context.remapped_ctxts;
+
+ // Ensure that the lock() temporary is dropped early
+ {
+ if let Some(ctxt) = outer_ctxts.lock().get(raw_id as usize).copied().flatten() {
+ return ctxt;
+ }
+ }
+
+ // Allocate and store SyntaxContext id *before* calling the decoder function,
+ // as the SyntaxContextData may reference itself.
+ let new_ctxt = HygieneData::with(|hygiene_data| {
+ let new_ctxt = SyntaxContext(hygiene_data.syntax_context_data.len() as u32);
+ // Push a dummy SyntaxContextData to ensure that nobody else can get the
+ // same ID as us. This will be overwritten after call `decode_Data`
+ hygiene_data.syntax_context_data.push(SyntaxContextData {
+ outer_expn: ExpnId::root(),
+ outer_transparency: Transparency::Transparent,
+ parent: SyntaxContext::root(),
+ opaque: SyntaxContext::root(),
+ opaque_and_semitransparent: SyntaxContext::root(),
+ dollar_crate_name: kw::Empty,
+ });
+ let mut ctxts = outer_ctxts.lock();
+ let new_len = raw_id as usize + 1;
+ if ctxts.len() < new_len {
+ ctxts.resize(new_len, None);
+ }
+ ctxts[raw_id as usize] = Some(new_ctxt);
+ drop(ctxts);
+ new_ctxt
+ });
+
+ // Don't try to decode data while holding the lock, since we need to
+ // be able to recursively decode a SyntaxContext
+ let mut ctxt_data = decode_data(d, raw_id);
+ // Reset `dollar_crate_name` so that it will be updated by `update_dollar_crate_names`
+ // We don't care what the encoding crate set this to - we want to resolve it
+ // from the perspective of the current compilation session
+ ctxt_data.dollar_crate_name = kw::DollarCrate;
+
+ // Overwrite the dummy data with our decoded SyntaxContextData
+ HygieneData::with(|hygiene_data| {
+ let dummy = std::mem::replace(
+ &mut hygiene_data.syntax_context_data[new_ctxt.as_u32() as usize],
+ ctxt_data,
+ );
+ // Make sure nothing weird happening while `decode_data` was running
+ assert_eq!(dummy.dollar_crate_name, kw::Empty);
+ });
+
+ new_ctxt
+}
+
+fn for_all_ctxts_in<F: FnMut(u32, SyntaxContext, &SyntaxContextData)>(
+ ctxts: impl Iterator<Item = SyntaxContext>,
+ mut f: F,
+) {
+ let all_data: Vec<_> = HygieneData::with(|data| {
+ ctxts.map(|ctxt| (ctxt, data.syntax_context_data[ctxt.0 as usize].clone())).collect()
+ });
+ for (ctxt, data) in all_data.into_iter() {
+ f(ctxt.0, ctxt, &data);
+ }
+}
+
+fn for_all_expns_in(
+ expns: impl Iterator<Item = ExpnId>,
+ mut f: impl FnMut(ExpnId, &ExpnData, ExpnHash),
+) {
+ let all_data: Vec<_> = HygieneData::with(|data| {
+ expns.map(|expn| (expn, data.expn_data(expn).clone(), data.expn_hash(expn))).collect()
+ });
+ for (expn, data, hash) in all_data.into_iter() {
+ f(expn, &data, hash);
+ }
+}
+
+impl<E: Encoder> Encodable<E> for LocalExpnId {
+ fn encode(&self, e: &mut E) {
+ self.to_expn_id().encode(e);
+ }
+}
+
+impl<E: Encoder> Encodable<E> for ExpnId {
+ default fn encode(&self, _: &mut E) {
+ panic!("cannot encode `ExpnId` with `{}`", std::any::type_name::<E>());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for LocalExpnId {
+ fn decode(d: &mut D) -> Self {
+ ExpnId::expect_local(ExpnId::decode(d))
+ }
+}
+
+impl<D: Decoder> Decodable<D> for ExpnId {
+ default fn decode(_: &mut D) -> Self {
+ panic!("cannot decode `ExpnId` with `{}`", std::any::type_name::<D>());
+ }
+}
+
+pub fn raw_encode_syntax_context<E: Encoder>(
+ ctxt: SyntaxContext,
+ context: &HygieneEncodeContext,
+ e: &mut E,
+) {
+ if !context.serialized_ctxts.lock().contains(&ctxt) {
+ context.latest_ctxts.lock().insert(ctxt);
+ }
+ ctxt.0.encode(e);
+}
+
+impl<E: Encoder> Encodable<E> for SyntaxContext {
+ default fn encode(&self, _: &mut E) {
+ panic!("cannot encode `SyntaxContext` with `{}`", std::any::type_name::<E>());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for SyntaxContext {
+ default fn decode(_: &mut D) -> Self {
+ panic!("cannot decode `SyntaxContext` with `{}`", std::any::type_name::<D>());
+ }
+}
+
+/// Updates the `disambiguator` field of the corresponding `ExpnData`
+/// such that the `Fingerprint` of the `ExpnData` does not collide with
+/// any other `ExpnIds`.
+///
+/// This method is called only when an `ExpnData` is first associated
+/// with an `ExpnId` (when the `ExpnId` is initially constructed, or via
+/// `set_expn_data`). It is *not* called for foreign `ExpnId`s deserialized
+/// from another crate's metadata - since `ExpnHash` includes the stable crate id,
+/// collisions are only possible between `ExpnId`s within the same crate.
+fn update_disambiguator(expn_data: &mut ExpnData, mut ctx: impl HashStableContext) -> ExpnHash {
+ // This disambiguator should not have been set yet.
+ assert_eq!(
+ expn_data.disambiguator, 0,
+ "Already set disambiguator for ExpnData: {:?}",
+ expn_data
+ );
+ assert_default_hashing_controls(&ctx, "ExpnData (disambiguator)");
+ let mut expn_hash = expn_data.hash_expn(&mut ctx);
+
+ let disambiguator = HygieneData::with(|data| {
+ // If this is the first ExpnData with a given hash, then keep our
+ // disambiguator at 0 (the default u32 value)
+ let disambig = data.expn_data_disambiguators.entry(expn_hash).or_default();
+ let disambiguator = *disambig;
+ *disambig += 1;
+ disambiguator
+ });
+
+ if disambiguator != 0 {
+ debug!("Set disambiguator for expn_data={:?} expn_hash={:?}", expn_data, expn_hash);
+
+ expn_data.disambiguator = disambiguator;
+ expn_hash = expn_data.hash_expn(&mut ctx);
+
+ // Verify that the new disambiguator makes the hash unique
+ #[cfg(debug_assertions)]
+ HygieneData::with(|data| {
+ assert_eq!(
+ data.expn_data_disambiguators.get(&expn_hash),
+ None,
+ "Hash collision after disambiguator update!",
+ );
+ });
+ }
+
+ ExpnHash::new(ctx.def_path_hash(LOCAL_CRATE.as_def_id()).stable_crate_id(), expn_hash)
+}
+
+impl<CTX: HashStableContext> HashStable<CTX> for SyntaxContext {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ const TAG_EXPANSION: u8 = 0;
+ const TAG_NO_EXPANSION: u8 = 1;
+
+ if *self == SyntaxContext::root() {
+ TAG_NO_EXPANSION.hash_stable(ctx, hasher);
+ } else {
+ TAG_EXPANSION.hash_stable(ctx, hasher);
+ let (expn_id, transparency) = self.outer_mark();
+ expn_id.hash_stable(ctx, hasher);
+ transparency.hash_stable(ctx, hasher);
+ }
+ }
+}
+
+impl<CTX: HashStableContext> HashStable<CTX> for ExpnId {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ assert_default_hashing_controls(ctx, "ExpnId");
+ let hash = if *self == ExpnId::root() {
+ // Avoid fetching TLS storage for a trivial often-used value.
+ Fingerprint::ZERO
+ } else {
+ self.expn_hash().0
+ };
+
+ hash.hash_stable(ctx, hasher);
+ }
+}
diff --git a/compiler/rustc_span/src/lev_distance.rs b/compiler/rustc_span/src/lev_distance.rs
new file mode 100644
index 000000000..61e4b98a8
--- /dev/null
+++ b/compiler/rustc_span/src/lev_distance.rs
@@ -0,0 +1,177 @@
+//! Levenshtein distances.
+//!
+//! The [Levenshtein distance] is a metric for measuring the difference between two strings.
+//!
+//! [Levenshtein distance]: https://en.wikipedia.org/wiki/Levenshtein_distance
+
+use crate::symbol::Symbol;
+use std::cmp;
+
+#[cfg(test)]
+mod tests;
+
+/// Finds the Levenshtein distance between two strings.
+///
+/// Returns None if the distance exceeds the limit.
+pub fn lev_distance(a: &str, b: &str, limit: usize) -> Option<usize> {
+ let n = a.chars().count();
+ let m = b.chars().count();
+ let min_dist = if n < m { m - n } else { n - m };
+
+ if min_dist > limit {
+ return None;
+ }
+ if n == 0 || m == 0 {
+ return (min_dist <= limit).then_some(min_dist);
+ }
+
+ let mut dcol: Vec<_> = (0..=m).collect();
+
+ for (i, sc) in a.chars().enumerate() {
+ let mut current = i;
+ dcol[0] = current + 1;
+
+ for (j, tc) in b.chars().enumerate() {
+ let next = dcol[j + 1];
+ if sc == tc {
+ dcol[j + 1] = current;
+ } else {
+ dcol[j + 1] = cmp::min(current, next);
+ dcol[j + 1] = cmp::min(dcol[j + 1], dcol[j]) + 1;
+ }
+ current = next;
+ }
+ }
+
+ (dcol[m] <= limit).then_some(dcol[m])
+}
+
+/// Provides a word similarity score between two words that accounts for substrings being more
+/// meaningful than a typical Levenshtein distance. The lower the score, the closer the match.
+/// 0 is an identical match.
+///
+/// Uses the Levenshtein distance between the two strings and removes the cost of the length
+/// difference. If this is 0 then it is either a substring match or a full word match, in the
+/// substring match case we detect this and return `1`. To prevent finding meaningless substrings,
+/// eg. "in" in "shrink", we only perform this subtraction of length difference if one of the words
+/// is not greater than twice the length of the other. For cases where the words are close in size
+/// but not an exact substring then the cost of the length difference is discounted by half.
+///
+/// Returns `None` if the distance exceeds the limit.
+pub fn lev_distance_with_substrings(a: &str, b: &str, limit: usize) -> Option<usize> {
+ let n = a.chars().count();
+ let m = b.chars().count();
+
+ // Check one isn't less than half the length of the other. If this is true then there is a
+ // big difference in length.
+ let big_len_diff = (n * 2) < m || (m * 2) < n;
+ let len_diff = if n < m { m - n } else { n - m };
+ let lev = lev_distance(a, b, limit + len_diff)?;
+
+ // This is the crux, subtracting length difference means exact substring matches will now be 0
+ let score = lev - len_diff;
+
+ // If the score is 0 but the words have different lengths then it's a substring match not a full
+ // word match
+ let score = if score == 0 && len_diff > 0 && !big_len_diff {
+ 1 // Exact substring match, but not a total word match so return non-zero
+ } else if !big_len_diff {
+ // Not a big difference in length, discount cost of length difference
+ score + (len_diff + 1) / 2
+ } else {
+ // A big difference in length, add back the difference in length to the score
+ score + len_diff
+ };
+
+ (score <= limit).then_some(score)
+}
+
+/// Finds the best match for given word in the given iterator where substrings are meaningful.
+///
+/// A version of [`find_best_match_for_name`] that uses [`lev_distance_with_substrings`] as the score
+/// for word similarity. This takes an optional distance limit which defaults to one-third of the
+/// given word.
+///
+/// Besides the modified Levenshtein, we use case insensitive comparison to improve accuracy
+/// on an edge case with a lower(upper)case letters mismatch.
+pub fn find_best_match_for_name_with_substrings(
+ candidates: &[Symbol],
+ lookup: Symbol,
+ dist: Option<usize>,
+) -> Option<Symbol> {
+ find_best_match_for_name_impl(true, candidates, lookup, dist)
+}
+
+/// Finds the best match for a given word in the given iterator.
+///
+/// As a loose rule to avoid the obviously incorrect suggestions, it takes
+/// an optional limit for the maximum allowable edit distance, which defaults
+/// to one-third of the given word.
+///
+/// Besides Levenshtein, we use case insensitive comparison to improve accuracy
+/// on an edge case with a lower(upper)case letters mismatch.
+pub fn find_best_match_for_name(
+ candidates: &[Symbol],
+ lookup: Symbol,
+ dist: Option<usize>,
+) -> Option<Symbol> {
+ find_best_match_for_name_impl(false, candidates, lookup, dist)
+}
+
+#[cold]
+fn find_best_match_for_name_impl(
+ use_substring_score: bool,
+ candidates: &[Symbol],
+ lookup: Symbol,
+ dist: Option<usize>,
+) -> Option<Symbol> {
+ let lookup = lookup.as_str();
+ let lookup_uppercase = lookup.to_uppercase();
+
+ // Priority of matches:
+ // 1. Exact case insensitive match
+ // 2. Levenshtein distance match
+ // 3. Sorted word match
+ if let Some(c) = candidates.iter().find(|c| c.as_str().to_uppercase() == lookup_uppercase) {
+ return Some(*c);
+ }
+
+ let mut dist = dist.unwrap_or_else(|| cmp::max(lookup.len(), 3) / 3);
+ let mut best = None;
+ for c in candidates {
+ match if use_substring_score {
+ lev_distance_with_substrings(lookup, c.as_str(), dist)
+ } else {
+ lev_distance(lookup, c.as_str(), dist)
+ } {
+ Some(0) => return Some(*c),
+ Some(d) => {
+ dist = d - 1;
+ best = Some(*c);
+ }
+ None => {}
+ }
+ }
+ if best.is_some() {
+ return best;
+ }
+
+ find_match_by_sorted_words(candidates, lookup)
+}
+
+fn find_match_by_sorted_words(iter_names: &[Symbol], lookup: &str) -> Option<Symbol> {
+ iter_names.iter().fold(None, |result, candidate| {
+ if sort_by_words(candidate.as_str()) == sort_by_words(lookup) {
+ Some(*candidate)
+ } else {
+ result
+ }
+ })
+}
+
+fn sort_by_words(name: &str) -> String {
+ let mut split_words: Vec<&str> = name.split('_').collect();
+ // We are sorting primitive &strs and can use unstable sort here.
+ split_words.sort_unstable();
+ split_words.join("_")
+}
diff --git a/compiler/rustc_span/src/lev_distance/tests.rs b/compiler/rustc_span/src/lev_distance/tests.rs
new file mode 100644
index 000000000..b17d6588c
--- /dev/null
+++ b/compiler/rustc_span/src/lev_distance/tests.rs
@@ -0,0 +1,71 @@
+use super::*;
+
+#[test]
+fn test_lev_distance() {
+ use std::char::{from_u32, MAX};
+ // Test bytelength agnosticity
+ for c in (0..MAX as u32).filter_map(from_u32).map(|i| i.to_string()) {
+ assert_eq!(lev_distance(&c[..], &c[..], usize::MAX), Some(0));
+ }
+
+ let a = "\nMäry häd ä little lämb\n\nLittle lämb\n";
+ let b = "\nMary häd ä little lämb\n\nLittle lämb\n";
+ let c = "Mary häd ä little lämb\n\nLittle lämb\n";
+ assert_eq!(lev_distance(a, b, usize::MAX), Some(1));
+ assert_eq!(lev_distance(b, a, usize::MAX), Some(1));
+ assert_eq!(lev_distance(a, c, usize::MAX), Some(2));
+ assert_eq!(lev_distance(c, a, usize::MAX), Some(2));
+ assert_eq!(lev_distance(b, c, usize::MAX), Some(1));
+ assert_eq!(lev_distance(c, b, usize::MAX), Some(1));
+}
+
+#[test]
+fn test_lev_distance_limit() {
+ assert_eq!(lev_distance("abc", "abcd", 1), Some(1));
+ assert_eq!(lev_distance("abc", "abcd", 0), None);
+ assert_eq!(lev_distance("abc", "xyz", 3), Some(3));
+ assert_eq!(lev_distance("abc", "xyz", 2), None);
+}
+
+#[test]
+fn test_method_name_similarity_score() {
+ assert_eq!(lev_distance_with_substrings("empty", "is_empty", 1), Some(1));
+ assert_eq!(lev_distance_with_substrings("shrunk", "rchunks", 2), None);
+ assert_eq!(lev_distance_with_substrings("abc", "abcd", 1), Some(1));
+ assert_eq!(lev_distance_with_substrings("a", "abcd", 1), None);
+ assert_eq!(lev_distance_with_substrings("edf", "eq", 1), None);
+ assert_eq!(lev_distance_with_substrings("abc", "xyz", 3), Some(3));
+ assert_eq!(lev_distance_with_substrings("abcdef", "abcdef", 2), Some(0));
+}
+
+#[test]
+fn test_find_best_match_for_name() {
+ use crate::create_default_session_globals_then;
+ create_default_session_globals_then(|| {
+ let input = vec![Symbol::intern("aaab"), Symbol::intern("aaabc")];
+ assert_eq!(
+ find_best_match_for_name(&input, Symbol::intern("aaaa"), None),
+ Some(Symbol::intern("aaab"))
+ );
+
+ assert_eq!(find_best_match_for_name(&input, Symbol::intern("1111111111"), None), None);
+
+ let input = vec![Symbol::intern("AAAA")];
+ assert_eq!(
+ find_best_match_for_name(&input, Symbol::intern("aaaa"), None),
+ Some(Symbol::intern("AAAA"))
+ );
+
+ let input = vec![Symbol::intern("AAAA")];
+ assert_eq!(
+ find_best_match_for_name(&input, Symbol::intern("aaaa"), Some(4)),
+ Some(Symbol::intern("AAAA"))
+ );
+
+ let input = vec![Symbol::intern("a_longer_variable_name")];
+ assert_eq!(
+ find_best_match_for_name(&input, Symbol::intern("a_variable_longer_name"), None),
+ Some(Symbol::intern("a_longer_variable_name"))
+ );
+ })
+}
diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs
new file mode 100644
index 000000000..cf3069281
--- /dev/null
+++ b/compiler/rustc_span/src/lib.rs
@@ -0,0 +1,2116 @@
+//! Source positions and related helper functions.
+//!
+//! Important concepts in this module include:
+//!
+//! - the *span*, represented by [`SpanData`] and related types;
+//! - source code as represented by a [`SourceMap`]; and
+//! - interned strings, represented by [`Symbol`]s, with some common symbols available statically in the [`sym`] module.
+//!
+//! Unlike most compilers, the span contains not only the position in the source code, but also various other metadata,
+//! such as the edition and macro hygiene. This metadata is stored in [`SyntaxContext`] and [`ExpnData`].
+//!
+//! ## Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(array_windows)]
+#![feature(let_else)]
+#![feature(if_let_guard)]
+#![feature(negative_impls)]
+#![feature(min_specialization)]
+#![feature(rustc_attrs)]
+
+#[macro_use]
+extern crate rustc_macros;
+
+#[macro_use]
+extern crate tracing;
+
+use rustc_data_structures::AtomicRef;
+use rustc_macros::HashStable_Generic;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+mod caching_source_map_view;
+pub mod source_map;
+pub use self::caching_source_map_view::CachingSourceMapView;
+use source_map::SourceMap;
+
+pub mod edition;
+use edition::Edition;
+pub mod hygiene;
+use hygiene::Transparency;
+pub use hygiene::{DesugaringKind, ExpnKind, MacroKind};
+pub use hygiene::{ExpnData, ExpnHash, ExpnId, LocalExpnId, SyntaxContext};
+use rustc_data_structures::stable_hasher::HashingControls;
+pub mod def_id;
+use def_id::{CrateNum, DefId, DefPathHash, LocalDefId, LOCAL_CRATE};
+pub mod lev_distance;
+mod span_encoding;
+pub use span_encoding::{Span, DUMMY_SP};
+
+pub mod symbol;
+pub use symbol::{sym, Symbol};
+
+mod analyze_source_file;
+pub mod fatal_error;
+
+pub mod profiling;
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{Lock, Lrc};
+
+use std::borrow::Cow;
+use std::cmp::{self, Ordering};
+use std::fmt;
+use std::hash::Hash;
+use std::ops::{Add, Range, Sub};
+use std::path::{Path, PathBuf};
+use std::str::FromStr;
+use std::sync::Arc;
+
+use md5::Digest;
+use md5::Md5;
+use sha1::Sha1;
+use sha2::Sha256;
+
+use tracing::debug;
+
+#[cfg(test)]
+mod tests;
+
+// Per-session global variables: this struct is stored in thread-local storage
+// in such a way that it is accessible without any kind of handle to all
+// threads within the compilation session, but is not accessible outside the
+// session.
+pub struct SessionGlobals {
+ symbol_interner: symbol::Interner,
+ span_interner: Lock<span_encoding::SpanInterner>,
+ hygiene_data: Lock<hygiene::HygieneData>,
+ source_map: Lock<Option<Lrc<SourceMap>>>,
+}
+
+impl SessionGlobals {
+ pub fn new(edition: Edition) -> SessionGlobals {
+ SessionGlobals {
+ symbol_interner: symbol::Interner::fresh(),
+ span_interner: Lock::new(span_encoding::SpanInterner::default()),
+ hygiene_data: Lock::new(hygiene::HygieneData::new(edition)),
+ source_map: Lock::new(None),
+ }
+ }
+}
+
+#[inline]
+pub fn create_session_globals_then<R>(edition: Edition, f: impl FnOnce() -> R) -> R {
+ assert!(
+ !SESSION_GLOBALS.is_set(),
+ "SESSION_GLOBALS should never be overwritten! \
+ Use another thread if you need another SessionGlobals"
+ );
+ let session_globals = SessionGlobals::new(edition);
+ SESSION_GLOBALS.set(&session_globals, f)
+}
+
+#[inline]
+pub fn set_session_globals_then<R>(session_globals: &SessionGlobals, f: impl FnOnce() -> R) -> R {
+ assert!(
+ !SESSION_GLOBALS.is_set(),
+ "SESSION_GLOBALS should never be overwritten! \
+ Use another thread if you need another SessionGlobals"
+ );
+ SESSION_GLOBALS.set(session_globals, f)
+}
+
+#[inline]
+pub fn create_default_session_if_not_set_then<R, F>(f: F) -> R
+where
+ F: FnOnce(&SessionGlobals) -> R,
+{
+ create_session_if_not_set_then(edition::DEFAULT_EDITION, f)
+}
+
+#[inline]
+pub fn create_session_if_not_set_then<R, F>(edition: Edition, f: F) -> R
+where
+ F: FnOnce(&SessionGlobals) -> R,
+{
+ if !SESSION_GLOBALS.is_set() {
+ let session_globals = SessionGlobals::new(edition);
+ SESSION_GLOBALS.set(&session_globals, || SESSION_GLOBALS.with(f))
+ } else {
+ SESSION_GLOBALS.with(f)
+ }
+}
+
+#[inline]
+pub fn with_session_globals<R, F>(f: F) -> R
+where
+ F: FnOnce(&SessionGlobals) -> R,
+{
+ SESSION_GLOBALS.with(f)
+}
+
+#[inline]
+pub fn create_default_session_globals_then<R>(f: impl FnOnce() -> R) -> R {
+ create_session_globals_then(edition::DEFAULT_EDITION, f)
+}
+
+// If this ever becomes non thread-local, `decode_syntax_context`
+// and `decode_expn_id` will need to be updated to handle concurrent
+// deserialization.
+scoped_tls::scoped_thread_local!(static SESSION_GLOBALS: SessionGlobals);
+
+// FIXME: We should use this enum or something like it to get rid of the
+// use of magic `/rust/1.x/...` paths across the board.
+#[derive(Debug, Eq, PartialEq, Clone, Ord, PartialOrd)]
+#[derive(Decodable)]
+pub enum RealFileName {
+ LocalPath(PathBuf),
+ /// For remapped paths (namely paths into libstd that have been mapped
+ /// to the appropriate spot on the local host's file system, and local file
+ /// system paths that have been remapped with `FilePathMapping`),
+ Remapped {
+ /// `local_path` is the (host-dependent) local path to the file. This is
+ /// None if the file was imported from another crate
+ local_path: Option<PathBuf>,
+ /// `virtual_name` is the stable path rustc will store internally within
+ /// build artifacts.
+ virtual_name: PathBuf,
+ },
+}
+
+impl Hash for RealFileName {
+ fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
+ // To prevent #70924 from happening again we should only hash the
+ // remapped (virtualized) path if that exists. This is because
+ // virtualized paths to sysroot crates (/rust/$hash or /rust/$version)
+ // remain stable even if the corresponding local_path changes
+ self.remapped_path_if_available().hash(state)
+ }
+}
+
+// This is functionally identical to #[derive(Encodable)], with the exception of
+// an added assert statement
+impl<S: Encoder> Encodable<S> for RealFileName {
+ fn encode(&self, encoder: &mut S) {
+ match *self {
+ RealFileName::LocalPath(ref local_path) => encoder.emit_enum_variant(0, |encoder| {
+ local_path.encode(encoder);
+ }),
+
+ RealFileName::Remapped { ref local_path, ref virtual_name } => encoder
+ .emit_enum_variant(1, |encoder| {
+ // For privacy and build reproducibility, we must not embed host-dependant path in artifacts
+ // if they have been remapped by --remap-path-prefix
+ assert!(local_path.is_none());
+ local_path.encode(encoder);
+ virtual_name.encode(encoder);
+ }),
+ }
+ }
+}
+
+impl RealFileName {
+ /// Returns the path suitable for reading from the file system on the local host,
+ /// if this information exists.
+ /// Avoid embedding this in build artifacts; see `remapped_path_if_available()` for that.
+ pub fn local_path(&self) -> Option<&Path> {
+ match self {
+ RealFileName::LocalPath(p) => Some(p),
+ RealFileName::Remapped { local_path: p, virtual_name: _ } => {
+ p.as_ref().map(PathBuf::as_path)
+ }
+ }
+ }
+
+ /// Returns the path suitable for reading from the file system on the local host,
+ /// if this information exists.
+ /// Avoid embedding this in build artifacts; see `remapped_path_if_available()` for that.
+ pub fn into_local_path(self) -> Option<PathBuf> {
+ match self {
+ RealFileName::LocalPath(p) => Some(p),
+ RealFileName::Remapped { local_path: p, virtual_name: _ } => p,
+ }
+ }
+
+ /// Returns the path suitable for embedding into build artifacts. This would still
+ /// be a local path if it has not been remapped. A remapped path will not correspond
+ /// to a valid file system path: see `local_path_if_available()` for something that
+ /// is more likely to return paths into the local host file system.
+ pub fn remapped_path_if_available(&self) -> &Path {
+ match self {
+ RealFileName::LocalPath(p)
+ | RealFileName::Remapped { local_path: _, virtual_name: p } => &p,
+ }
+ }
+
+ /// Returns the path suitable for reading from the file system on the local host,
+ /// if this information exists. Otherwise returns the remapped name.
+ /// Avoid embedding this in build artifacts; see `remapped_path_if_available()` for that.
+ pub fn local_path_if_available(&self) -> &Path {
+ match self {
+ RealFileName::LocalPath(path)
+ | RealFileName::Remapped { local_path: None, virtual_name: path }
+ | RealFileName::Remapped { local_path: Some(path), virtual_name: _ } => path,
+ }
+ }
+
+ pub fn to_string_lossy(&self, display_pref: FileNameDisplayPreference) -> Cow<'_, str> {
+ match display_pref {
+ FileNameDisplayPreference::Local => self.local_path_if_available().to_string_lossy(),
+ FileNameDisplayPreference::Remapped => {
+ self.remapped_path_if_available().to_string_lossy()
+ }
+ }
+ }
+}
+
+/// Differentiates between real files and common virtual files.
+#[derive(Debug, Eq, PartialEq, Clone, Ord, PartialOrd, Hash)]
+#[derive(Decodable, Encodable)]
+pub enum FileName {
+ Real(RealFileName),
+ /// Call to `quote!`.
+ QuoteExpansion(u64),
+ /// Command line.
+ Anon(u64),
+ /// Hack in `src/librustc_ast/parse.rs`.
+ // FIXME(jseyfried)
+ MacroExpansion(u64),
+ ProcMacroSourceCode(u64),
+ /// Strings provided as `--cfg [cfgspec]` stored in a `crate_cfg`.
+ CfgSpec(u64),
+ /// Strings provided as crate attributes in the CLI.
+ CliCrateAttr(u64),
+ /// Custom sources for explicit parser calls from plugins and drivers.
+ Custom(String),
+ DocTest(PathBuf, isize),
+ /// Post-substitution inline assembly from LLVM.
+ InlineAsm(u64),
+}
+
+impl From<PathBuf> for FileName {
+ fn from(p: PathBuf) -> Self {
+ assert!(!p.to_string_lossy().ends_with('>'));
+ FileName::Real(RealFileName::LocalPath(p))
+ }
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug)]
+pub enum FileNameDisplayPreference {
+ Remapped,
+ Local,
+}
+
+pub struct FileNameDisplay<'a> {
+ inner: &'a FileName,
+ display_pref: FileNameDisplayPreference,
+}
+
+impl fmt::Display for FileNameDisplay<'_> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ use FileName::*;
+ match *self.inner {
+ Real(ref name) => {
+ write!(fmt, "{}", name.to_string_lossy(self.display_pref))
+ }
+ QuoteExpansion(_) => write!(fmt, "<quote expansion>"),
+ MacroExpansion(_) => write!(fmt, "<macro expansion>"),
+ Anon(_) => write!(fmt, "<anon>"),
+ ProcMacroSourceCode(_) => write!(fmt, "<proc-macro source code>"),
+ CfgSpec(_) => write!(fmt, "<cfgspec>"),
+ CliCrateAttr(_) => write!(fmt, "<crate attribute>"),
+ Custom(ref s) => write!(fmt, "<{}>", s),
+ DocTest(ref path, _) => write!(fmt, "{}", path.display()),
+ InlineAsm(_) => write!(fmt, "<inline asm>"),
+ }
+ }
+}
+
+impl<'a> FileNameDisplay<'a> {
+ pub fn to_string_lossy(&self) -> Cow<'a, str> {
+ match self.inner {
+ FileName::Real(ref inner) => inner.to_string_lossy(self.display_pref),
+ _ => Cow::from(self.to_string()),
+ }
+ }
+}
+
+impl FileName {
+ pub fn is_real(&self) -> bool {
+ use FileName::*;
+ match *self {
+ Real(_) => true,
+ Anon(_)
+ | MacroExpansion(_)
+ | ProcMacroSourceCode(_)
+ | CfgSpec(_)
+ | CliCrateAttr(_)
+ | Custom(_)
+ | QuoteExpansion(_)
+ | DocTest(_, _)
+ | InlineAsm(_) => false,
+ }
+ }
+
+ pub fn prefer_remapped(&self) -> FileNameDisplay<'_> {
+ FileNameDisplay { inner: self, display_pref: FileNameDisplayPreference::Remapped }
+ }
+
+ // This may include transient local filesystem information.
+ // Must not be embedded in build outputs.
+ pub fn prefer_local(&self) -> FileNameDisplay<'_> {
+ FileNameDisplay { inner: self, display_pref: FileNameDisplayPreference::Local }
+ }
+
+ pub fn display(&self, display_pref: FileNameDisplayPreference) -> FileNameDisplay<'_> {
+ FileNameDisplay { inner: self, display_pref }
+ }
+
+ pub fn macro_expansion_source_code(src: &str) -> FileName {
+ let mut hasher = StableHasher::new();
+ src.hash(&mut hasher);
+ FileName::MacroExpansion(hasher.finish())
+ }
+
+ pub fn anon_source_code(src: &str) -> FileName {
+ let mut hasher = StableHasher::new();
+ src.hash(&mut hasher);
+ FileName::Anon(hasher.finish())
+ }
+
+ pub fn proc_macro_source_code(src: &str) -> FileName {
+ let mut hasher = StableHasher::new();
+ src.hash(&mut hasher);
+ FileName::ProcMacroSourceCode(hasher.finish())
+ }
+
+ pub fn cfg_spec_source_code(src: &str) -> FileName {
+ let mut hasher = StableHasher::new();
+ src.hash(&mut hasher);
+ FileName::QuoteExpansion(hasher.finish())
+ }
+
+ pub fn cli_crate_attr_source_code(src: &str) -> FileName {
+ let mut hasher = StableHasher::new();
+ src.hash(&mut hasher);
+ FileName::CliCrateAttr(hasher.finish())
+ }
+
+ pub fn doc_test_source_code(path: PathBuf, line: isize) -> FileName {
+ FileName::DocTest(path, line)
+ }
+
+ pub fn inline_asm_source_code(src: &str) -> FileName {
+ let mut hasher = StableHasher::new();
+ src.hash(&mut hasher);
+ FileName::InlineAsm(hasher.finish())
+ }
+}
+
+/// Represents a span.
+///
+/// Spans represent a region of code, used for error reporting. Positions in spans
+/// are *absolute* positions from the beginning of the [`SourceMap`], not positions
+/// relative to [`SourceFile`]s. Methods on the `SourceMap` can be used to relate spans back
+/// to the original source.
+///
+/// You must be careful if the span crosses more than one file, since you will not be
+/// able to use many of the functions on spans in source_map and you cannot assume
+/// that the length of the span is equal to `span.hi - span.lo`; there may be space in the
+/// [`BytePos`] range between files.
+///
+/// `SpanData` is public because `Span` uses a thread-local interner and can't be
+/// sent to other threads, but some pieces of performance infra run in a separate thread.
+/// Using `Span` is generally preferred.
+#[derive(Clone, Copy, Hash, PartialEq, Eq)]
+pub struct SpanData {
+ pub lo: BytePos,
+ pub hi: BytePos,
+ /// Information about where the macro came from, if this piece of
+ /// code was created by a macro expansion.
+ pub ctxt: SyntaxContext,
+ pub parent: Option<LocalDefId>,
+}
+
+// Order spans by position in the file.
+impl Ord for SpanData {
+ fn cmp(&self, other: &Self) -> Ordering {
+ let SpanData {
+ lo: s_lo,
+ hi: s_hi,
+ ctxt: s_ctxt,
+ // `LocalDefId` does not implement `Ord`.
+ // The other fields are enough to determine in-file order.
+ parent: _,
+ } = self;
+ let SpanData {
+ lo: o_lo,
+ hi: o_hi,
+ ctxt: o_ctxt,
+ // `LocalDefId` does not implement `Ord`.
+ // The other fields are enough to determine in-file order.
+ parent: _,
+ } = other;
+
+ (s_lo, s_hi, s_ctxt).cmp(&(o_lo, o_hi, o_ctxt))
+ }
+}
+
+impl PartialOrd for SpanData {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl SpanData {
+ #[inline]
+ pub fn span(&self) -> Span {
+ Span::new(self.lo, self.hi, self.ctxt, self.parent)
+ }
+ #[inline]
+ pub fn with_lo(&self, lo: BytePos) -> Span {
+ Span::new(lo, self.hi, self.ctxt, self.parent)
+ }
+ #[inline]
+ pub fn with_hi(&self, hi: BytePos) -> Span {
+ Span::new(self.lo, hi, self.ctxt, self.parent)
+ }
+ #[inline]
+ pub fn with_ctxt(&self, ctxt: SyntaxContext) -> Span {
+ Span::new(self.lo, self.hi, ctxt, self.parent)
+ }
+ #[inline]
+ pub fn with_parent(&self, parent: Option<LocalDefId>) -> Span {
+ Span::new(self.lo, self.hi, self.ctxt, parent)
+ }
+ /// Returns `true` if this is a dummy span with any hygienic context.
+ #[inline]
+ pub fn is_dummy(self) -> bool {
+ self.lo.0 == 0 && self.hi.0 == 0
+ }
+ /// Returns `true` if `self` fully encloses `other`.
+ pub fn contains(self, other: Self) -> bool {
+ self.lo <= other.lo && other.hi <= self.hi
+ }
+}
+
+// The interner is pointed to by a thread local value which is only set on the main thread
+// with parallelization is disabled. So we don't allow `Span` to transfer between threads
+// to avoid panics and other errors, even though it would be memory safe to do so.
+#[cfg(not(parallel_compiler))]
+impl !Send for Span {}
+#[cfg(not(parallel_compiler))]
+impl !Sync for Span {}
+
+impl PartialOrd for Span {
+ fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&self.data(), &rhs.data())
+ }
+}
+impl Ord for Span {
+ fn cmp(&self, rhs: &Self) -> Ordering {
+ Ord::cmp(&self.data(), &rhs.data())
+ }
+}
+
+impl Span {
+ #[inline]
+ pub fn lo(self) -> BytePos {
+ self.data().lo
+ }
+ #[inline]
+ pub fn with_lo(self, lo: BytePos) -> Span {
+ self.data().with_lo(lo)
+ }
+ #[inline]
+ pub fn hi(self) -> BytePos {
+ self.data().hi
+ }
+ #[inline]
+ pub fn with_hi(self, hi: BytePos) -> Span {
+ self.data().with_hi(hi)
+ }
+ #[inline]
+ pub fn ctxt(self) -> SyntaxContext {
+ self.data_untracked().ctxt
+ }
+ pub fn eq_ctxt(self, other: Span) -> bool {
+ self.data_untracked().ctxt == other.data_untracked().ctxt
+ }
+ #[inline]
+ pub fn with_ctxt(self, ctxt: SyntaxContext) -> Span {
+ self.data_untracked().with_ctxt(ctxt)
+ }
+ #[inline]
+ pub fn parent(self) -> Option<LocalDefId> {
+ self.data().parent
+ }
+ #[inline]
+ pub fn with_parent(self, ctxt: Option<LocalDefId>) -> Span {
+ self.data().with_parent(ctxt)
+ }
+
+ /// Returns `true` if this is a dummy span with any hygienic context.
+ #[inline]
+ pub fn is_dummy(self) -> bool {
+ self.data_untracked().is_dummy()
+ }
+
+ /// Returns `true` if this span comes from a macro or desugaring.
+ #[inline]
+ pub fn from_expansion(self) -> bool {
+ self.ctxt() != SyntaxContext::root()
+ }
+
+ /// Returns `true` if `span` originates in a derive-macro's expansion.
+ pub fn in_derive_expansion(self) -> bool {
+ matches!(self.ctxt().outer_expn_data().kind, ExpnKind::Macro(MacroKind::Derive, _))
+ }
+
+ /// Gate suggestions that would not be appropriate in a context the user didn't write.
+ pub fn can_be_used_for_suggestions(self) -> bool {
+ !self.from_expansion()
+ // FIXME: If this span comes from a `derive` macro but it points at code the user wrote,
+ // the callsite span and the span will be pointing at different places. It also means that
+ // we can safely provide suggestions on this span.
+ || (matches!(self.ctxt().outer_expn_data().kind, ExpnKind::Macro(MacroKind::Derive, _))
+ && self.parent_callsite().map(|p| (p.lo(), p.hi())) != Some((self.lo(), self.hi())))
+ }
+
+ #[inline]
+ pub fn with_root_ctxt(lo: BytePos, hi: BytePos) -> Span {
+ Span::new(lo, hi, SyntaxContext::root(), None)
+ }
+
+ /// Returns a new span representing an empty span at the beginning of this span.
+ #[inline]
+ pub fn shrink_to_lo(self) -> Span {
+ let span = self.data_untracked();
+ span.with_hi(span.lo)
+ }
+ /// Returns a new span representing an empty span at the end of this span.
+ #[inline]
+ pub fn shrink_to_hi(self) -> Span {
+ let span = self.data_untracked();
+ span.with_lo(span.hi)
+ }
+
+ #[inline]
+ /// Returns `true` if `hi == lo`.
+ pub fn is_empty(self) -> bool {
+ let span = self.data_untracked();
+ span.hi == span.lo
+ }
+
+ /// Returns `self` if `self` is not the dummy span, and `other` otherwise.
+ pub fn substitute_dummy(self, other: Span) -> Span {
+ if self.is_dummy() { other } else { self }
+ }
+
+ /// Returns `true` if `self` fully encloses `other`.
+ pub fn contains(self, other: Span) -> bool {
+ let span = self.data();
+ let other = other.data();
+ span.contains(other)
+ }
+
+ /// Returns `true` if `self` touches `other`.
+ pub fn overlaps(self, other: Span) -> bool {
+ let span = self.data();
+ let other = other.data();
+ span.lo < other.hi && other.lo < span.hi
+ }
+
+ /// Returns `true` if the spans are equal with regards to the source text.
+ ///
+ /// Use this instead of `==` when either span could be generated code,
+ /// and you only care that they point to the same bytes of source text.
+ pub fn source_equal(self, other: Span) -> bool {
+ let span = self.data();
+ let other = other.data();
+ span.lo == other.lo && span.hi == other.hi
+ }
+
+ /// Returns `Some(span)`, where the start is trimmed by the end of `other`.
+ pub fn trim_start(self, other: Span) -> Option<Span> {
+ let span = self.data();
+ let other = other.data();
+ if span.hi > other.hi { Some(span.with_lo(cmp::max(span.lo, other.hi))) } else { None }
+ }
+
+ /// Returns the source span -- this is either the supplied span, or the span for
+ /// the macro callsite that expanded to it.
+ pub fn source_callsite(self) -> Span {
+ let expn_data = self.ctxt().outer_expn_data();
+ if !expn_data.is_root() { expn_data.call_site.source_callsite() } else { self }
+ }
+
+ /// The `Span` for the tokens in the previous macro expansion from which `self` was generated,
+ /// if any.
+ pub fn parent_callsite(self) -> Option<Span> {
+ let expn_data = self.ctxt().outer_expn_data();
+ if !expn_data.is_root() { Some(expn_data.call_site) } else { None }
+ }
+
+ /// Walk down the expansion ancestors to find a span that's contained within `outer`.
+ pub fn find_ancestor_inside(mut self, outer: Span) -> Option<Span> {
+ while !outer.contains(self) {
+ self = self.parent_callsite()?;
+ }
+ Some(self)
+ }
+
+ /// Edition of the crate from which this span came.
+ pub fn edition(self) -> edition::Edition {
+ self.ctxt().edition()
+ }
+
+ #[inline]
+ pub fn rust_2015(self) -> bool {
+ self.edition() == edition::Edition::Edition2015
+ }
+
+ #[inline]
+ pub fn rust_2018(self) -> bool {
+ self.edition() >= edition::Edition::Edition2018
+ }
+
+ #[inline]
+ pub fn rust_2021(self) -> bool {
+ self.edition() >= edition::Edition::Edition2021
+ }
+
+ #[inline]
+ pub fn rust_2024(self) -> bool {
+ self.edition() >= edition::Edition::Edition2024
+ }
+
+ /// Returns the source callee.
+ ///
+ /// Returns `None` if the supplied span has no expansion trace,
+ /// else returns the `ExpnData` for the macro definition
+ /// corresponding to the source callsite.
+ pub fn source_callee(self) -> Option<ExpnData> {
+ fn source_callee(expn_data: ExpnData) -> ExpnData {
+ let next_expn_data = expn_data.call_site.ctxt().outer_expn_data();
+ if !next_expn_data.is_root() { source_callee(next_expn_data) } else { expn_data }
+ }
+ let expn_data = self.ctxt().outer_expn_data();
+ if !expn_data.is_root() { Some(source_callee(expn_data)) } else { None }
+ }
+
+ /// Checks if a span is "internal" to a macro in which `#[unstable]`
+ /// items can be used (that is, a macro marked with
+ /// `#[allow_internal_unstable]`).
+ pub fn allows_unstable(self, feature: Symbol) -> bool {
+ self.ctxt()
+ .outer_expn_data()
+ .allow_internal_unstable
+ .map_or(false, |features| features.iter().any(|&f| f == feature))
+ }
+
+ /// Checks if this span arises from a compiler desugaring of kind `kind`.
+ pub fn is_desugaring(self, kind: DesugaringKind) -> bool {
+ match self.ctxt().outer_expn_data().kind {
+ ExpnKind::Desugaring(k) => k == kind,
+ _ => false,
+ }
+ }
+
+ /// Returns the compiler desugaring that created this span, or `None`
+ /// if this span is not from a desugaring.
+ pub fn desugaring_kind(self) -> Option<DesugaringKind> {
+ match self.ctxt().outer_expn_data().kind {
+ ExpnKind::Desugaring(k) => Some(k),
+ _ => None,
+ }
+ }
+
+ /// Checks if a span is "internal" to a macro in which `unsafe`
+ /// can be used without triggering the `unsafe_code` lint.
+ // (that is, a macro marked with `#[allow_internal_unsafe]`).
+ pub fn allows_unsafe(self) -> bool {
+ self.ctxt().outer_expn_data().allow_internal_unsafe
+ }
+
+ pub fn macro_backtrace(mut self) -> impl Iterator<Item = ExpnData> {
+ let mut prev_span = DUMMY_SP;
+ std::iter::from_fn(move || {
+ loop {
+ let expn_data = self.ctxt().outer_expn_data();
+ if expn_data.is_root() {
+ return None;
+ }
+
+ let is_recursive = expn_data.call_site.source_equal(prev_span);
+
+ prev_span = self;
+ self = expn_data.call_site;
+
+ // Don't print recursive invocations.
+ if !is_recursive {
+ return Some(expn_data);
+ }
+ }
+ })
+ }
+
+ /// Returns a `Span` that would enclose both `self` and `end`.
+ ///
+ /// ```text
+ /// ____ ___
+ /// self lorem ipsum end
+ /// ^^^^^^^^^^^^^^^^^^^^
+ /// ```
+ pub fn to(self, end: Span) -> Span {
+ let span_data = self.data();
+ let end_data = end.data();
+ // FIXME(jseyfried): `self.ctxt` should always equal `end.ctxt` here (cf. issue #23480).
+ // Return the macro span on its own to avoid weird diagnostic output. It is preferable to
+ // have an incomplete span than a completely nonsensical one.
+ if span_data.ctxt != end_data.ctxt {
+ if span_data.ctxt == SyntaxContext::root() {
+ return end;
+ } else if end_data.ctxt == SyntaxContext::root() {
+ return self;
+ }
+ // Both spans fall within a macro.
+ // FIXME(estebank): check if it is the *same* macro.
+ }
+ Span::new(
+ cmp::min(span_data.lo, end_data.lo),
+ cmp::max(span_data.hi, end_data.hi),
+ if span_data.ctxt == SyntaxContext::root() { end_data.ctxt } else { span_data.ctxt },
+ if span_data.parent == end_data.parent { span_data.parent } else { None },
+ )
+ }
+
+ /// Returns a `Span` between the end of `self` to the beginning of `end`.
+ ///
+ /// ```text
+ /// ____ ___
+ /// self lorem ipsum end
+ /// ^^^^^^^^^^^^^
+ /// ```
+ pub fn between(self, end: Span) -> Span {
+ let span = self.data();
+ let end = end.data();
+ Span::new(
+ span.hi,
+ end.lo,
+ if end.ctxt == SyntaxContext::root() { end.ctxt } else { span.ctxt },
+ if span.parent == end.parent { span.parent } else { None },
+ )
+ }
+
+ /// Returns a `Span` from the beginning of `self` until the beginning of `end`.
+ ///
+ /// ```text
+ /// ____ ___
+ /// self lorem ipsum end
+ /// ^^^^^^^^^^^^^^^^^
+ /// ```
+ pub fn until(self, end: Span) -> Span {
+ // Most of this function's body is copied from `to`.
+ // We can't just do `self.to(end.shrink_to_lo())`,
+ // because to also does some magic where it uses min/max so
+ // it can handle overlapping spans. Some advanced mis-use of
+ // `until` with different ctxts makes this visible.
+ let span_data = self.data();
+ let end_data = end.data();
+ // FIXME(jseyfried): `self.ctxt` should always equal `end.ctxt` here (cf. issue #23480).
+ // Return the macro span on its own to avoid weird diagnostic output. It is preferable to
+ // have an incomplete span than a completely nonsensical one.
+ if span_data.ctxt != end_data.ctxt {
+ if span_data.ctxt == SyntaxContext::root() {
+ return end;
+ } else if end_data.ctxt == SyntaxContext::root() {
+ return self;
+ }
+ // Both spans fall within a macro.
+ // FIXME(estebank): check if it is the *same* macro.
+ }
+ Span::new(
+ span_data.lo,
+ end_data.lo,
+ if end_data.ctxt == SyntaxContext::root() { end_data.ctxt } else { span_data.ctxt },
+ if span_data.parent == end_data.parent { span_data.parent } else { None },
+ )
+ }
+
+ pub fn from_inner(self, inner: InnerSpan) -> Span {
+ let span = self.data();
+ Span::new(
+ span.lo + BytePos::from_usize(inner.start),
+ span.lo + BytePos::from_usize(inner.end),
+ span.ctxt,
+ span.parent,
+ )
+ }
+
+ /// Equivalent of `Span::def_site` from the proc macro API,
+ /// except that the location is taken from the `self` span.
+ pub fn with_def_site_ctxt(self, expn_id: ExpnId) -> Span {
+ self.with_ctxt_from_mark(expn_id, Transparency::Opaque)
+ }
+
+ /// Equivalent of `Span::call_site` from the proc macro API,
+ /// except that the location is taken from the `self` span.
+ pub fn with_call_site_ctxt(self, expn_id: ExpnId) -> Span {
+ self.with_ctxt_from_mark(expn_id, Transparency::Transparent)
+ }
+
+ /// Equivalent of `Span::mixed_site` from the proc macro API,
+ /// except that the location is taken from the `self` span.
+ pub fn with_mixed_site_ctxt(self, expn_id: ExpnId) -> Span {
+ self.with_ctxt_from_mark(expn_id, Transparency::SemiTransparent)
+ }
+
+ /// Produces a span with the same location as `self` and context produced by a macro with the
+ /// given ID and transparency, assuming that macro was defined directly and not produced by
+ /// some other macro (which is the case for built-in and procedural macros).
+ pub fn with_ctxt_from_mark(self, expn_id: ExpnId, transparency: Transparency) -> Span {
+ self.with_ctxt(SyntaxContext::root().apply_mark(expn_id, transparency))
+ }
+
+ #[inline]
+ pub fn apply_mark(self, expn_id: ExpnId, transparency: Transparency) -> Span {
+ let span = self.data();
+ span.with_ctxt(span.ctxt.apply_mark(expn_id, transparency))
+ }
+
+ #[inline]
+ pub fn remove_mark(&mut self) -> ExpnId {
+ let mut span = self.data();
+ let mark = span.ctxt.remove_mark();
+ *self = Span::new(span.lo, span.hi, span.ctxt, span.parent);
+ mark
+ }
+
+ #[inline]
+ pub fn adjust(&mut self, expn_id: ExpnId) -> Option<ExpnId> {
+ let mut span = self.data();
+ let mark = span.ctxt.adjust(expn_id);
+ *self = Span::new(span.lo, span.hi, span.ctxt, span.parent);
+ mark
+ }
+
+ #[inline]
+ pub fn normalize_to_macros_2_0_and_adjust(&mut self, expn_id: ExpnId) -> Option<ExpnId> {
+ let mut span = self.data();
+ let mark = span.ctxt.normalize_to_macros_2_0_and_adjust(expn_id);
+ *self = Span::new(span.lo, span.hi, span.ctxt, span.parent);
+ mark
+ }
+
+ #[inline]
+ pub fn glob_adjust(&mut self, expn_id: ExpnId, glob_span: Span) -> Option<Option<ExpnId>> {
+ let mut span = self.data();
+ let mark = span.ctxt.glob_adjust(expn_id, glob_span);
+ *self = Span::new(span.lo, span.hi, span.ctxt, span.parent);
+ mark
+ }
+
+ #[inline]
+ pub fn reverse_glob_adjust(
+ &mut self,
+ expn_id: ExpnId,
+ glob_span: Span,
+ ) -> Option<Option<ExpnId>> {
+ let mut span = self.data();
+ let mark = span.ctxt.reverse_glob_adjust(expn_id, glob_span);
+ *self = Span::new(span.lo, span.hi, span.ctxt, span.parent);
+ mark
+ }
+
+ #[inline]
+ pub fn normalize_to_macros_2_0(self) -> Span {
+ let span = self.data();
+ span.with_ctxt(span.ctxt.normalize_to_macros_2_0())
+ }
+
+ #[inline]
+ pub fn normalize_to_macro_rules(self) -> Span {
+ let span = self.data();
+ span.with_ctxt(span.ctxt.normalize_to_macro_rules())
+ }
+}
+
+impl Default for Span {
+ fn default() -> Self {
+ DUMMY_SP
+ }
+}
+
+impl<E: Encoder> Encodable<E> for Span {
+ default fn encode(&self, s: &mut E) {
+ let span = self.data();
+ span.lo.encode(s);
+ span.hi.encode(s);
+ }
+}
+impl<D: Decoder> Decodable<D> for Span {
+ default fn decode(s: &mut D) -> Span {
+ let lo = Decodable::decode(s);
+ let hi = Decodable::decode(s);
+
+ Span::new(lo, hi, SyntaxContext::root(), None)
+ }
+}
+
+/// Calls the provided closure, using the provided `SourceMap` to format
+/// any spans that are debug-printed during the closure's execution.
+///
+/// Normally, the global `TyCtxt` is used to retrieve the `SourceMap`
+/// (see `rustc_interface::callbacks::span_debug1`). However, some parts
+/// of the compiler (e.g. `rustc_parse`) may debug-print `Span`s before
+/// a `TyCtxt` is available. In this case, we fall back to
+/// the `SourceMap` provided to this function. If that is not available,
+/// we fall back to printing the raw `Span` field values.
+pub fn with_source_map<T, F: FnOnce() -> T>(source_map: Lrc<SourceMap>, f: F) -> T {
+ with_session_globals(|session_globals| {
+ *session_globals.source_map.borrow_mut() = Some(source_map);
+ });
+ struct ClearSourceMap;
+ impl Drop for ClearSourceMap {
+ fn drop(&mut self) {
+ with_session_globals(|session_globals| {
+ session_globals.source_map.borrow_mut().take();
+ });
+ }
+ }
+
+ let _guard = ClearSourceMap;
+ f()
+}
+
+impl fmt::Debug for Span {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ with_session_globals(|session_globals| {
+ if let Some(source_map) = &*session_globals.source_map.borrow() {
+ write!(f, "{} ({:?})", source_map.span_to_diagnostic_string(*self), self.ctxt())
+ } else {
+ f.debug_struct("Span")
+ .field("lo", &self.lo())
+ .field("hi", &self.hi())
+ .field("ctxt", &self.ctxt())
+ .finish()
+ }
+ })
+ }
+}
+
+impl fmt::Debug for SpanData {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&Span::new(self.lo, self.hi, self.ctxt, self.parent), f)
+ }
+}
+
+/// Identifies an offset of a multi-byte character in a `SourceFile`.
+#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug)]
+pub struct MultiByteChar {
+ /// The absolute offset of the character in the `SourceMap`.
+ pub pos: BytePos,
+ /// The number of bytes, `>= 2`.
+ pub bytes: u8,
+}
+
+/// Identifies an offset of a non-narrow character in a `SourceFile`.
+#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug)]
+pub enum NonNarrowChar {
+ /// Represents a zero-width character.
+ ZeroWidth(BytePos),
+ /// Represents a wide (full-width) character.
+ Wide(BytePos),
+ /// Represents a tab character, represented visually with a width of 4 characters.
+ Tab(BytePos),
+}
+
+impl NonNarrowChar {
+ fn new(pos: BytePos, width: usize) -> Self {
+ match width {
+ 0 => NonNarrowChar::ZeroWidth(pos),
+ 2 => NonNarrowChar::Wide(pos),
+ 4 => NonNarrowChar::Tab(pos),
+ _ => panic!("width {} given for non-narrow character", width),
+ }
+ }
+
+ /// Returns the absolute offset of the character in the `SourceMap`.
+ pub fn pos(&self) -> BytePos {
+ match *self {
+ NonNarrowChar::ZeroWidth(p) | NonNarrowChar::Wide(p) | NonNarrowChar::Tab(p) => p,
+ }
+ }
+
+ /// Returns the width of the character, 0 (zero-width) or 2 (wide).
+ pub fn width(&self) -> usize {
+ match *self {
+ NonNarrowChar::ZeroWidth(_) => 0,
+ NonNarrowChar::Wide(_) => 2,
+ NonNarrowChar::Tab(_) => 4,
+ }
+ }
+}
+
+impl Add<BytePos> for NonNarrowChar {
+ type Output = Self;
+
+ fn add(self, rhs: BytePos) -> Self {
+ match self {
+ NonNarrowChar::ZeroWidth(pos) => NonNarrowChar::ZeroWidth(pos + rhs),
+ NonNarrowChar::Wide(pos) => NonNarrowChar::Wide(pos + rhs),
+ NonNarrowChar::Tab(pos) => NonNarrowChar::Tab(pos + rhs),
+ }
+ }
+}
+
+impl Sub<BytePos> for NonNarrowChar {
+ type Output = Self;
+
+ fn sub(self, rhs: BytePos) -> Self {
+ match self {
+ NonNarrowChar::ZeroWidth(pos) => NonNarrowChar::ZeroWidth(pos - rhs),
+ NonNarrowChar::Wide(pos) => NonNarrowChar::Wide(pos - rhs),
+ NonNarrowChar::Tab(pos) => NonNarrowChar::Tab(pos - rhs),
+ }
+ }
+}
+
+/// Identifies an offset of a character that was normalized away from `SourceFile`.
+#[derive(Copy, Clone, Encodable, Decodable, Eq, PartialEq, Debug)]
+pub struct NormalizedPos {
+ /// The absolute offset of the character in the `SourceMap`.
+ pub pos: BytePos,
+ /// The difference between original and normalized string at position.
+ pub diff: u32,
+}
+
+#[derive(PartialEq, Eq, Clone, Debug)]
+pub enum ExternalSource {
+ /// No external source has to be loaded, since the `SourceFile` represents a local crate.
+ Unneeded,
+ Foreign {
+ kind: ExternalSourceKind,
+ /// This SourceFile's byte-offset within the source_map of its original crate.
+ original_start_pos: BytePos,
+ /// The end of this SourceFile within the source_map of its original crate.
+ original_end_pos: BytePos,
+ },
+}
+
+/// The state of the lazy external source loading mechanism of a `SourceFile`.
+#[derive(PartialEq, Eq, Clone, Debug)]
+pub enum ExternalSourceKind {
+ /// The external source has been loaded already.
+ Present(Lrc<String>),
+ /// No attempt has been made to load the external source.
+ AbsentOk,
+ /// A failed attempt has been made to load the external source.
+ AbsentErr,
+ Unneeded,
+}
+
+impl ExternalSource {
+ pub fn get_source(&self) -> Option<&Lrc<String>> {
+ match self {
+ ExternalSource::Foreign { kind: ExternalSourceKind::Present(ref src), .. } => Some(src),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct OffsetOverflowError;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub enum SourceFileHashAlgorithm {
+ Md5,
+ Sha1,
+ Sha256,
+}
+
+impl FromStr for SourceFileHashAlgorithm {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<SourceFileHashAlgorithm, ()> {
+ match s {
+ "md5" => Ok(SourceFileHashAlgorithm::Md5),
+ "sha1" => Ok(SourceFileHashAlgorithm::Sha1),
+ "sha256" => Ok(SourceFileHashAlgorithm::Sha256),
+ _ => Err(()),
+ }
+ }
+}
+
+/// The hash of the on-disk source file used for debug info.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub struct SourceFileHash {
+ pub kind: SourceFileHashAlgorithm,
+ value: [u8; 32],
+}
+
+impl SourceFileHash {
+ pub fn new(kind: SourceFileHashAlgorithm, src: &str) -> SourceFileHash {
+ let mut hash = SourceFileHash { kind, value: Default::default() };
+ let len = hash.hash_len();
+ let value = &mut hash.value[..len];
+ let data = src.as_bytes();
+ match kind {
+ SourceFileHashAlgorithm::Md5 => {
+ value.copy_from_slice(&Md5::digest(data));
+ }
+ SourceFileHashAlgorithm::Sha1 => {
+ value.copy_from_slice(&Sha1::digest(data));
+ }
+ SourceFileHashAlgorithm::Sha256 => {
+ value.copy_from_slice(&Sha256::digest(data));
+ }
+ }
+ hash
+ }
+
+ /// Check if the stored hash matches the hash of the string.
+ pub fn matches(&self, src: &str) -> bool {
+ Self::new(self.kind, src) == *self
+ }
+
+ /// The bytes of the hash.
+ pub fn hash_bytes(&self) -> &[u8] {
+ let len = self.hash_len();
+ &self.value[..len]
+ }
+
+ fn hash_len(&self) -> usize {
+ match self.kind {
+ SourceFileHashAlgorithm::Md5 => 16,
+ SourceFileHashAlgorithm::Sha1 => 20,
+ SourceFileHashAlgorithm::Sha256 => 32,
+ }
+ }
+}
+
+#[derive(HashStable_Generic)]
+#[derive(Copy, PartialEq, PartialOrd, Clone, Ord, Eq, Hash, Debug, Encodable, Decodable)]
+pub enum DebuggerVisualizerType {
+ Natvis,
+ GdbPrettyPrinter,
+}
+
+/// A single debugger visualizer file.
+#[derive(HashStable_Generic)]
+#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Encodable, Decodable)]
+pub struct DebuggerVisualizerFile {
+ /// The complete debugger visualizer source.
+ pub src: Arc<[u8]>,
+ /// Indicates which visualizer type this targets.
+ pub visualizer_type: DebuggerVisualizerType,
+}
+
+impl DebuggerVisualizerFile {
+ pub fn new(src: Arc<[u8]>, visualizer_type: DebuggerVisualizerType) -> Self {
+ DebuggerVisualizerFile { src, visualizer_type }
+ }
+}
+
+#[derive(Clone)]
+pub enum SourceFileLines {
+ /// The source file lines, in decoded (random-access) form.
+ Lines(Vec<BytePos>),
+
+ /// The source file lines, in undecoded difference list form.
+ Diffs(SourceFileDiffs),
+}
+
+impl SourceFileLines {
+ pub fn is_lines(&self) -> bool {
+ matches!(self, SourceFileLines::Lines(_))
+ }
+}
+
+/// The source file lines in difference list form. This matches the form
+/// used within metadata, which saves space by exploiting the fact that the
+/// lines list is sorted and individual lines are usually not that long.
+///
+/// We read it directly from metadata and only decode it into `Lines` form
+/// when necessary. This is a significant performance win, especially for
+/// small crates where very little of `std`'s metadata is used.
+#[derive(Clone)]
+pub struct SourceFileDiffs {
+ /// Position of the first line. Note that this is always encoded as a
+ /// `BytePos` because it is often much larger than any of the
+ /// differences.
+ line_start: BytePos,
+
+ /// Always 1, 2, or 4. Always as small as possible, while being big
+ /// enough to hold the length of the longest line in the source file.
+ /// The 1 case is by far the most common.
+ bytes_per_diff: usize,
+
+ /// The number of diffs encoded in `raw_diffs`. Always one less than
+ /// the number of lines in the source file.
+ num_diffs: usize,
+
+ /// The diffs in "raw" form. Each segment of `bytes_per_diff` length
+ /// encodes one little-endian diff. Note that they aren't LEB128
+ /// encoded. This makes for much faster decoding. Besides, the
+ /// bytes_per_diff==1 case is by far the most common, and LEB128
+ /// encoding has no effect on that case.
+ raw_diffs: Vec<u8>,
+}
+
+/// A single source in the [`SourceMap`].
+#[derive(Clone)]
+pub struct SourceFile {
+ /// The name of the file that the source came from. Source that doesn't
+ /// originate from files has names between angle brackets by convention
+ /// (e.g., `<anon>`).
+ pub name: FileName,
+ /// The complete source code.
+ pub src: Option<Lrc<String>>,
+ /// The source code's hash.
+ pub src_hash: SourceFileHash,
+ /// The external source code (used for external crates, which will have a `None`
+ /// value as `self.src`.
+ pub external_src: Lock<ExternalSource>,
+ /// The start position of this source in the `SourceMap`.
+ pub start_pos: BytePos,
+ /// The end position of this source in the `SourceMap`.
+ pub end_pos: BytePos,
+ /// Locations of lines beginnings in the source code.
+ pub lines: Lock<SourceFileLines>,
+ /// Locations of multi-byte characters in the source code.
+ pub multibyte_chars: Vec<MultiByteChar>,
+ /// Width of characters that are not narrow in the source code.
+ pub non_narrow_chars: Vec<NonNarrowChar>,
+ /// Locations of characters removed during normalization.
+ pub normalized_pos: Vec<NormalizedPos>,
+ /// A hash of the filename, used for speeding up hashing in incremental compilation.
+ pub name_hash: u128,
+ /// Indicates which crate this `SourceFile` was imported from.
+ pub cnum: CrateNum,
+}
+
+impl<S: Encoder> Encodable<S> for SourceFile {
+ fn encode(&self, s: &mut S) {
+ self.name.encode(s);
+ self.src_hash.encode(s);
+ self.start_pos.encode(s);
+ self.end_pos.encode(s);
+
+ // We are always in `Lines` form by the time we reach here.
+ assert!(self.lines.borrow().is_lines());
+ self.lines(|lines| {
+ // Store the length.
+ s.emit_u32(lines.len() as u32);
+
+ // Compute and store the difference list.
+ if lines.len() != 0 {
+ let max_line_length = if lines.len() == 1 {
+ 0
+ } else {
+ lines
+ .array_windows()
+ .map(|&[fst, snd]| snd - fst)
+ .map(|bp| bp.to_usize())
+ .max()
+ .unwrap()
+ };
+
+ let bytes_per_diff: usize = match max_line_length {
+ 0..=0xFF => 1,
+ 0x100..=0xFFFF => 2,
+ _ => 4,
+ };
+
+ // Encode the number of bytes used per diff.
+ s.emit_u8(bytes_per_diff as u8);
+
+ // Encode the first element.
+ lines[0].encode(s);
+
+ // Encode the difference list.
+ let diff_iter = lines.array_windows().map(|&[fst, snd]| snd - fst);
+ let num_diffs = lines.len() - 1;
+ let mut raw_diffs;
+ match bytes_per_diff {
+ 1 => {
+ raw_diffs = Vec::with_capacity(num_diffs);
+ for diff in diff_iter {
+ raw_diffs.push(diff.0 as u8);
+ }
+ }
+ 2 => {
+ raw_diffs = Vec::with_capacity(bytes_per_diff * num_diffs);
+ for diff in diff_iter {
+ raw_diffs.extend_from_slice(&(diff.0 as u16).to_le_bytes());
+ }
+ }
+ 4 => {
+ raw_diffs = Vec::with_capacity(bytes_per_diff * num_diffs);
+ for diff in diff_iter {
+ raw_diffs.extend_from_slice(&(diff.0 as u32).to_le_bytes());
+ }
+ }
+ _ => unreachable!(),
+ }
+ s.emit_raw_bytes(&raw_diffs);
+ }
+ });
+
+ self.multibyte_chars.encode(s);
+ self.non_narrow_chars.encode(s);
+ self.name_hash.encode(s);
+ self.normalized_pos.encode(s);
+ self.cnum.encode(s);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for SourceFile {
+ fn decode(d: &mut D) -> SourceFile {
+ let name: FileName = Decodable::decode(d);
+ let src_hash: SourceFileHash = Decodable::decode(d);
+ let start_pos: BytePos = Decodable::decode(d);
+ let end_pos: BytePos = Decodable::decode(d);
+ let lines = {
+ let num_lines: u32 = Decodable::decode(d);
+ if num_lines > 0 {
+ // Read the number of bytes used per diff.
+ let bytes_per_diff = d.read_u8() as usize;
+
+ // Read the first element.
+ let line_start: BytePos = Decodable::decode(d);
+
+ // Read the difference list.
+ let num_diffs = num_lines as usize - 1;
+ let raw_diffs = d.read_raw_bytes(bytes_per_diff * num_diffs).to_vec();
+ SourceFileLines::Diffs(SourceFileDiffs {
+ line_start,
+ bytes_per_diff,
+ num_diffs,
+ raw_diffs,
+ })
+ } else {
+ SourceFileLines::Lines(vec![])
+ }
+ };
+ let multibyte_chars: Vec<MultiByteChar> = Decodable::decode(d);
+ let non_narrow_chars: Vec<NonNarrowChar> = Decodable::decode(d);
+ let name_hash: u128 = Decodable::decode(d);
+ let normalized_pos: Vec<NormalizedPos> = Decodable::decode(d);
+ let cnum: CrateNum = Decodable::decode(d);
+ SourceFile {
+ name,
+ start_pos,
+ end_pos,
+ src: None,
+ src_hash,
+ // Unused - the metadata decoder will construct
+ // a new SourceFile, filling in `external_src` properly
+ external_src: Lock::new(ExternalSource::Unneeded),
+ lines: Lock::new(lines),
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
+ name_hash,
+ cnum,
+ }
+ }
+}
+
+impl fmt::Debug for SourceFile {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "SourceFile({:?})", self.name)
+ }
+}
+
+impl SourceFile {
+ pub fn new(
+ name: FileName,
+ mut src: String,
+ start_pos: BytePos,
+ hash_kind: SourceFileHashAlgorithm,
+ ) -> Self {
+ // Compute the file hash before any normalization.
+ let src_hash = SourceFileHash::new(hash_kind, &src);
+ let normalized_pos = normalize_src(&mut src, start_pos);
+
+ let name_hash = {
+ let mut hasher: StableHasher = StableHasher::new();
+ name.hash(&mut hasher);
+ hasher.finish::<u128>()
+ };
+ let end_pos = start_pos.to_usize() + src.len();
+ assert!(end_pos <= u32::MAX as usize);
+
+ let (lines, multibyte_chars, non_narrow_chars) =
+ analyze_source_file::analyze_source_file(&src, start_pos);
+
+ SourceFile {
+ name,
+ src: Some(Lrc::new(src)),
+ src_hash,
+ external_src: Lock::new(ExternalSource::Unneeded),
+ start_pos,
+ end_pos: Pos::from_usize(end_pos),
+ lines: Lock::new(SourceFileLines::Lines(lines)),
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
+ name_hash,
+ cnum: LOCAL_CRATE,
+ }
+ }
+
+ pub fn lines<F, R>(&self, f: F) -> R
+ where
+ F: FnOnce(&[BytePos]) -> R,
+ {
+ let mut guard = self.lines.borrow_mut();
+ match &*guard {
+ SourceFileLines::Lines(lines) => f(lines),
+ SourceFileLines::Diffs(SourceFileDiffs {
+ mut line_start,
+ bytes_per_diff,
+ num_diffs,
+ raw_diffs,
+ }) => {
+ // Convert from "diffs" form to "lines" form.
+ let num_lines = num_diffs + 1;
+ let mut lines = Vec::with_capacity(num_lines);
+ lines.push(line_start);
+
+ assert_eq!(*num_diffs, raw_diffs.len() / bytes_per_diff);
+ match bytes_per_diff {
+ 1 => {
+ lines.extend(raw_diffs.into_iter().map(|&diff| {
+ line_start = line_start + BytePos(diff as u32);
+ line_start
+ }));
+ }
+ 2 => {
+ lines.extend((0..*num_diffs).map(|i| {
+ let pos = bytes_per_diff * i;
+ let bytes = [raw_diffs[pos], raw_diffs[pos + 1]];
+ let diff = u16::from_le_bytes(bytes);
+ line_start = line_start + BytePos(diff as u32);
+ line_start
+ }));
+ }
+ 4 => {
+ lines.extend((0..*num_diffs).map(|i| {
+ let pos = bytes_per_diff * i;
+ let bytes = [
+ raw_diffs[pos],
+ raw_diffs[pos + 1],
+ raw_diffs[pos + 2],
+ raw_diffs[pos + 3],
+ ];
+ let diff = u32::from_le_bytes(bytes);
+ line_start = line_start + BytePos(diff);
+ line_start
+ }));
+ }
+ _ => unreachable!(),
+ }
+ let res = f(&lines);
+ *guard = SourceFileLines::Lines(lines);
+ res
+ }
+ }
+ }
+
+ /// Returns the `BytePos` of the beginning of the current line.
+ pub fn line_begin_pos(&self, pos: BytePos) -> BytePos {
+ let line_index = self.lookup_line(pos).unwrap();
+ self.lines(|lines| lines[line_index])
+ }
+
+ /// Add externally loaded source.
+ /// If the hash of the input doesn't match or no input is supplied via None,
+ /// it is interpreted as an error and the corresponding enum variant is set.
+ /// The return value signifies whether some kind of source is present.
+ pub fn add_external_src<F>(&self, get_src: F) -> bool
+ where
+ F: FnOnce() -> Option<String>,
+ {
+ if matches!(
+ *self.external_src.borrow(),
+ ExternalSource::Foreign { kind: ExternalSourceKind::AbsentOk, .. }
+ ) {
+ let src = get_src();
+ let mut external_src = self.external_src.borrow_mut();
+ // Check that no-one else have provided the source while we were getting it
+ if let ExternalSource::Foreign {
+ kind: src_kind @ ExternalSourceKind::AbsentOk, ..
+ } = &mut *external_src
+ {
+ if let Some(mut src) = src {
+ // The src_hash needs to be computed on the pre-normalized src.
+ if self.src_hash.matches(&src) {
+ normalize_src(&mut src, BytePos::from_usize(0));
+ *src_kind = ExternalSourceKind::Present(Lrc::new(src));
+ return true;
+ }
+ } else {
+ *src_kind = ExternalSourceKind::AbsentErr;
+ }
+
+ false
+ } else {
+ self.src.is_some() || external_src.get_source().is_some()
+ }
+ } else {
+ self.src.is_some() || self.external_src.borrow().get_source().is_some()
+ }
+ }
+
+ /// Gets a line from the list of pre-computed line-beginnings.
+ /// The line number here is 0-based.
+ pub fn get_line(&self, line_number: usize) -> Option<Cow<'_, str>> {
+ fn get_until_newline(src: &str, begin: usize) -> &str {
+ // We can't use `lines.get(line_number+1)` because we might
+ // be parsing when we call this function and thus the current
+ // line is the last one we have line info for.
+ let slice = &src[begin..];
+ match slice.find('\n') {
+ Some(e) => &slice[..e],
+ None => slice,
+ }
+ }
+
+ let begin = {
+ let line = self.lines(|lines| lines.get(line_number).copied())?;
+ let begin: BytePos = line - self.start_pos;
+ begin.to_usize()
+ };
+
+ if let Some(ref src) = self.src {
+ Some(Cow::from(get_until_newline(src, begin)))
+ } else if let Some(src) = self.external_src.borrow().get_source() {
+ Some(Cow::Owned(String::from(get_until_newline(src, begin))))
+ } else {
+ None
+ }
+ }
+
+ pub fn is_real_file(&self) -> bool {
+ self.name.is_real()
+ }
+
+ #[inline]
+ pub fn is_imported(&self) -> bool {
+ self.src.is_none()
+ }
+
+ pub fn count_lines(&self) -> usize {
+ self.lines(|lines| lines.len())
+ }
+
+ /// Finds the line containing the given position. The return value is the
+ /// index into the `lines` array of this `SourceFile`, not the 1-based line
+ /// number. If the source_file is empty or the position is located before the
+ /// first line, `None` is returned.
+ pub fn lookup_line(&self, pos: BytePos) -> Option<usize> {
+ self.lines(|lines| match lines.binary_search(&pos) {
+ Ok(idx) => Some(idx),
+ Err(0) => None,
+ Err(idx) => Some(idx - 1),
+ })
+ }
+
+ pub fn line_bounds(&self, line_index: usize) -> Range<BytePos> {
+ if self.is_empty() {
+ return self.start_pos..self.end_pos;
+ }
+
+ self.lines(|lines| {
+ assert!(line_index < lines.len());
+ if line_index == (lines.len() - 1) {
+ lines[line_index]..self.end_pos
+ } else {
+ lines[line_index]..lines[line_index + 1]
+ }
+ })
+ }
+
+ /// Returns whether or not the file contains the given `SourceMap` byte
+ /// position. The position one past the end of the file is considered to be
+ /// contained by the file. This implies that files for which `is_empty`
+ /// returns true still contain one byte position according to this function.
+ #[inline]
+ pub fn contains(&self, byte_pos: BytePos) -> bool {
+ byte_pos >= self.start_pos && byte_pos <= self.end_pos
+ }
+
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.start_pos == self.end_pos
+ }
+
+ /// Calculates the original byte position relative to the start of the file
+ /// based on the given byte position.
+ pub fn original_relative_byte_pos(&self, pos: BytePos) -> BytePos {
+ // Diff before any records is 0. Otherwise use the previously recorded
+ // diff as that applies to the following characters until a new diff
+ // is recorded.
+ let diff = match self.normalized_pos.binary_search_by(|np| np.pos.cmp(&pos)) {
+ Ok(i) => self.normalized_pos[i].diff,
+ Err(i) if i == 0 => 0,
+ Err(i) => self.normalized_pos[i - 1].diff,
+ };
+
+ BytePos::from_u32(pos.0 - self.start_pos.0 + diff)
+ }
+
+ /// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
+ pub fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
+ // The number of extra bytes due to multibyte chars in the `SourceFile`.
+ let mut total_extra_bytes = 0;
+
+ for mbc in self.multibyte_chars.iter() {
+ debug!("{}-byte char at {:?}", mbc.bytes, mbc.pos);
+ if mbc.pos < bpos {
+ // Every character is at least one byte, so we only
+ // count the actual extra bytes.
+ total_extra_bytes += mbc.bytes as u32 - 1;
+ // We should never see a byte position in the middle of a
+ // character.
+ assert!(bpos.to_u32() >= mbc.pos.to_u32() + mbc.bytes as u32);
+ } else {
+ break;
+ }
+ }
+
+ assert!(self.start_pos.to_u32() + total_extra_bytes <= bpos.to_u32());
+ CharPos(bpos.to_usize() - self.start_pos.to_usize() - total_extra_bytes as usize)
+ }
+
+ /// Looks up the file's (1-based) line number and (0-based `CharPos`) column offset, for a
+ /// given `BytePos`.
+ pub fn lookup_file_pos(&self, pos: BytePos) -> (usize, CharPos) {
+ let chpos = self.bytepos_to_file_charpos(pos);
+ match self.lookup_line(pos) {
+ Some(a) => {
+ let line = a + 1; // Line numbers start at 1
+ let linebpos = self.lines(|lines| lines[a]);
+ let linechpos = self.bytepos_to_file_charpos(linebpos);
+ let col = chpos - linechpos;
+ debug!("byte pos {:?} is on the line at byte pos {:?}", pos, linebpos);
+ debug!("char pos {:?} is on the line at char pos {:?}", chpos, linechpos);
+ debug!("byte is on line: {}", line);
+ assert!(chpos >= linechpos);
+ (line, col)
+ }
+ None => (0, chpos),
+ }
+ }
+
+ /// Looks up the file's (1-based) line number, (0-based `CharPos`) column offset, and (0-based)
+ /// column offset when displayed, for a given `BytePos`.
+ pub fn lookup_file_pos_with_col_display(&self, pos: BytePos) -> (usize, CharPos, usize) {
+ let (line, col_or_chpos) = self.lookup_file_pos(pos);
+ if line > 0 {
+ let col = col_or_chpos;
+ let linebpos = self.lines(|lines| lines[line - 1]);
+ let col_display = {
+ let start_width_idx = self
+ .non_narrow_chars
+ .binary_search_by_key(&linebpos, |x| x.pos())
+ .unwrap_or_else(|x| x);
+ let end_width_idx = self
+ .non_narrow_chars
+ .binary_search_by_key(&pos, |x| x.pos())
+ .unwrap_or_else(|x| x);
+ let special_chars = end_width_idx - start_width_idx;
+ let non_narrow: usize = self.non_narrow_chars[start_width_idx..end_width_idx]
+ .iter()
+ .map(|x| x.width())
+ .sum();
+ col.0 - special_chars + non_narrow
+ };
+ (line, col, col_display)
+ } else {
+ let chpos = col_or_chpos;
+ let col_display = {
+ let end_width_idx = self
+ .non_narrow_chars
+ .binary_search_by_key(&pos, |x| x.pos())
+ .unwrap_or_else(|x| x);
+ let non_narrow: usize =
+ self.non_narrow_chars[0..end_width_idx].iter().map(|x| x.width()).sum();
+ chpos.0 - end_width_idx + non_narrow
+ };
+ (0, chpos, col_display)
+ }
+ }
+}
+
+/// Normalizes the source code and records the normalizations.
+fn normalize_src(src: &mut String, start_pos: BytePos) -> Vec<NormalizedPos> {
+ let mut normalized_pos = vec![];
+ remove_bom(src, &mut normalized_pos);
+ normalize_newlines(src, &mut normalized_pos);
+
+ // Offset all the positions by start_pos to match the final file positions.
+ for np in &mut normalized_pos {
+ np.pos.0 += start_pos.0;
+ }
+
+ normalized_pos
+}
+
+/// Removes UTF-8 BOM, if any.
+fn remove_bom(src: &mut String, normalized_pos: &mut Vec<NormalizedPos>) {
+ if src.starts_with('\u{feff}') {
+ src.drain(..3);
+ normalized_pos.push(NormalizedPos { pos: BytePos(0), diff: 3 });
+ }
+}
+
+/// Replaces `\r\n` with `\n` in-place in `src`.
+///
+/// Returns error if there's a lone `\r` in the string.
+fn normalize_newlines(src: &mut String, normalized_pos: &mut Vec<NormalizedPos>) {
+ if !src.as_bytes().contains(&b'\r') {
+ return;
+ }
+
+ // We replace `\r\n` with `\n` in-place, which doesn't break utf-8 encoding.
+ // While we *can* call `as_mut_vec` and do surgery on the live string
+ // directly, let's rather steal the contents of `src`. This makes the code
+ // safe even if a panic occurs.
+
+ let mut buf = std::mem::replace(src, String::new()).into_bytes();
+ let mut gap_len = 0;
+ let mut tail = buf.as_mut_slice();
+ let mut cursor = 0;
+ let original_gap = normalized_pos.last().map_or(0, |l| l.diff);
+ loop {
+ let idx = match find_crlf(&tail[gap_len..]) {
+ None => tail.len(),
+ Some(idx) => idx + gap_len,
+ };
+ tail.copy_within(gap_len..idx, 0);
+ tail = &mut tail[idx - gap_len..];
+ if tail.len() == gap_len {
+ break;
+ }
+ cursor += idx - gap_len;
+ gap_len += 1;
+ normalized_pos.push(NormalizedPos {
+ pos: BytePos::from_usize(cursor + 1),
+ diff: original_gap + gap_len as u32,
+ });
+ }
+
+ // Account for removed `\r`.
+ // After `set_len`, `buf` is guaranteed to contain utf-8 again.
+ let new_len = buf.len() - gap_len;
+ unsafe {
+ buf.set_len(new_len);
+ *src = String::from_utf8_unchecked(buf);
+ }
+
+ fn find_crlf(src: &[u8]) -> Option<usize> {
+ let mut search_idx = 0;
+ while let Some(idx) = find_cr(&src[search_idx..]) {
+ if src[search_idx..].get(idx + 1) != Some(&b'\n') {
+ search_idx += idx + 1;
+ continue;
+ }
+ return Some(search_idx + idx);
+ }
+ None
+ }
+
+ fn find_cr(src: &[u8]) -> Option<usize> {
+ src.iter().position(|&b| b == b'\r')
+ }
+}
+
+// _____________________________________________________________________________
+// Pos, BytePos, CharPos
+//
+
+pub trait Pos {
+ fn from_usize(n: usize) -> Self;
+ fn to_usize(&self) -> usize;
+ fn from_u32(n: u32) -> Self;
+ fn to_u32(&self) -> u32;
+}
+
+macro_rules! impl_pos {
+ (
+ $(
+ $(#[$attr:meta])*
+ $vis:vis struct $ident:ident($inner_vis:vis $inner_ty:ty);
+ )*
+ ) => {
+ $(
+ $(#[$attr])*
+ $vis struct $ident($inner_vis $inner_ty);
+
+ impl Pos for $ident {
+ #[inline(always)]
+ fn from_usize(n: usize) -> $ident {
+ $ident(n as $inner_ty)
+ }
+
+ #[inline(always)]
+ fn to_usize(&self) -> usize {
+ self.0 as usize
+ }
+
+ #[inline(always)]
+ fn from_u32(n: u32) -> $ident {
+ $ident(n as $inner_ty)
+ }
+
+ #[inline(always)]
+ fn to_u32(&self) -> u32 {
+ self.0 as u32
+ }
+ }
+
+ impl Add for $ident {
+ type Output = $ident;
+
+ #[inline(always)]
+ fn add(self, rhs: $ident) -> $ident {
+ $ident(self.0 + rhs.0)
+ }
+ }
+
+ impl Sub for $ident {
+ type Output = $ident;
+
+ #[inline(always)]
+ fn sub(self, rhs: $ident) -> $ident {
+ $ident(self.0 - rhs.0)
+ }
+ }
+ )*
+ };
+}
+
+impl_pos! {
+ /// A byte offset.
+ ///
+ /// Keep this small (currently 32-bits), as AST contains a lot of them.
+ #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Debug)]
+ pub struct BytePos(pub u32);
+
+ /// A character offset.
+ ///
+ /// Because of multibyte UTF-8 characters, a byte offset
+ /// is not equivalent to a character offset. The [`SourceMap`] will convert [`BytePos`]
+ /// values to `CharPos` values as necessary.
+ #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
+ pub struct CharPos(pub usize);
+}
+
+impl<S: Encoder> Encodable<S> for BytePos {
+ fn encode(&self, s: &mut S) {
+ s.emit_u32(self.0);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for BytePos {
+ fn decode(d: &mut D) -> BytePos {
+ BytePos(d.read_u32())
+ }
+}
+
+// _____________________________________________________________________________
+// Loc, SourceFileAndLine, SourceFileAndBytePos
+//
+
+/// A source code location used for error reporting.
+#[derive(Debug, Clone)]
+pub struct Loc {
+ /// Information about the original source.
+ pub file: Lrc<SourceFile>,
+ /// The (1-based) line number.
+ pub line: usize,
+ /// The (0-based) column offset.
+ pub col: CharPos,
+ /// The (0-based) column offset when displayed.
+ pub col_display: usize,
+}
+
+// Used to be structural records.
+#[derive(Debug)]
+pub struct SourceFileAndLine {
+ pub sf: Lrc<SourceFile>,
+ /// Index of line, starting from 0.
+ pub line: usize,
+}
+#[derive(Debug)]
+pub struct SourceFileAndBytePos {
+ pub sf: Lrc<SourceFile>,
+ pub pos: BytePos,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub struct LineInfo {
+ /// Index of line, starting from 0.
+ pub line_index: usize,
+
+ /// Column in line where span begins, starting from 0.
+ pub start_col: CharPos,
+
+ /// Column in line where span ends, starting from 0, exclusive.
+ pub end_col: CharPos,
+}
+
+pub struct FileLines {
+ pub file: Lrc<SourceFile>,
+ pub lines: Vec<LineInfo>,
+}
+
+pub static SPAN_TRACK: AtomicRef<fn(LocalDefId)> = AtomicRef::new(&((|_| {}) as fn(_)));
+
+// _____________________________________________________________________________
+// SpanLinesError, SpanSnippetError, DistinctSources, MalformedSourceMapPositions
+//
+
+pub type FileLinesResult = Result<FileLines, SpanLinesError>;
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum SpanLinesError {
+ DistinctSources(DistinctSources),
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum SpanSnippetError {
+ IllFormedSpan(Span),
+ DistinctSources(DistinctSources),
+ MalformedForSourcemap(MalformedSourceMapPositions),
+ SourceNotAvailable { filename: FileName },
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct DistinctSources {
+ pub begin: (FileName, BytePos),
+ pub end: (FileName, BytePos),
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct MalformedSourceMapPositions {
+ pub name: FileName,
+ pub source_len: usize,
+ pub begin_pos: BytePos,
+ pub end_pos: BytePos,
+}
+
+/// Range inside of a `Span` used for diagnostics when we only have access to relative positions.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct InnerSpan {
+ pub start: usize,
+ pub end: usize,
+}
+
+impl InnerSpan {
+ pub fn new(start: usize, end: usize) -> InnerSpan {
+ InnerSpan { start, end }
+ }
+}
+
+/// Requirements for a `StableHashingContext` to be used in this crate.
+///
+/// This is a hack to allow using the [`HashStable_Generic`] derive macro
+/// instead of implementing everything in rustc_middle.
+pub trait HashStableContext {
+ fn def_path_hash(&self, def_id: DefId) -> DefPathHash;
+ fn hash_spans(&self) -> bool;
+ /// Accesses `sess.opts.unstable_opts.incremental_ignore_spans` since
+ /// we don't have easy access to a `Session`
+ fn unstable_opts_incremental_ignore_spans(&self) -> bool;
+ fn def_span(&self, def_id: LocalDefId) -> Span;
+ fn span_data_to_lines_and_cols(
+ &mut self,
+ span: &SpanData,
+ ) -> Option<(Lrc<SourceFile>, usize, BytePos, usize, BytePos)>;
+ fn hashing_controls(&self) -> HashingControls;
+}
+
+impl<CTX> HashStable<CTX> for Span
+where
+ CTX: HashStableContext,
+{
+ /// Hashes a span in a stable way. We can't directly hash the span's `BytePos`
+ /// fields (that would be similar to hashing pointers, since those are just
+ /// offsets into the `SourceMap`). Instead, we hash the (file name, line, column)
+ /// triple, which stays the same even if the containing `SourceFile` has moved
+ /// within the `SourceMap`.
+ ///
+ /// Also note that we are hashing byte offsets for the column, not unicode
+ /// codepoint offsets. For the purpose of the hash that's sufficient.
+ /// Also, hashing filenames is expensive so we avoid doing it twice when the
+ /// span starts and ends in the same file, which is almost always the case.
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ const TAG_VALID_SPAN: u8 = 0;
+ const TAG_INVALID_SPAN: u8 = 1;
+ const TAG_RELATIVE_SPAN: u8 = 2;
+
+ if !ctx.hash_spans() {
+ return;
+ }
+
+ let span = self.data_untracked();
+ span.ctxt.hash_stable(ctx, hasher);
+ span.parent.hash_stable(ctx, hasher);
+
+ if span.is_dummy() {
+ Hash::hash(&TAG_INVALID_SPAN, hasher);
+ return;
+ }
+
+ if let Some(parent) = span.parent {
+ let def_span = ctx.def_span(parent).data_untracked();
+ if def_span.contains(span) {
+ // This span is enclosed in a definition: only hash the relative position.
+ Hash::hash(&TAG_RELATIVE_SPAN, hasher);
+ (span.lo - def_span.lo).to_u32().hash_stable(ctx, hasher);
+ (span.hi - def_span.lo).to_u32().hash_stable(ctx, hasher);
+ return;
+ }
+ }
+
+ // If this is not an empty or invalid span, we want to hash the last
+ // position that belongs to it, as opposed to hashing the first
+ // position past it.
+ let Some((file, line_lo, col_lo, line_hi, col_hi)) = ctx.span_data_to_lines_and_cols(&span) else {
+ Hash::hash(&TAG_INVALID_SPAN, hasher);
+ return;
+ };
+
+ Hash::hash(&TAG_VALID_SPAN, hasher);
+ // We truncate the stable ID hash and line and column numbers. The chances
+ // of causing a collision this way should be minimal.
+ Hash::hash(&(file.name_hash as u64), hasher);
+
+ // Hash both the length and the end location (line/column) of a span. If we
+ // hash only the length, for example, then two otherwise equal spans with
+ // different end locations will have the same hash. This can cause a problem
+ // during incremental compilation wherein a previous result for a query that
+ // depends on the end location of a span will be incorrectly reused when the
+ // end location of the span it depends on has changed (see issue #74890). A
+ // similar analysis applies if some query depends specifically on the length
+ // of the span, but we only hash the end location. So hash both.
+
+ let col_lo_trunc = (col_lo.0 as u64) & 0xFF;
+ let line_lo_trunc = ((line_lo as u64) & 0xFF_FF_FF) << 8;
+ let col_hi_trunc = (col_hi.0 as u64) & 0xFF << 32;
+ let line_hi_trunc = ((line_hi as u64) & 0xFF_FF_FF) << 40;
+ let col_line = col_lo_trunc | line_lo_trunc | col_hi_trunc | line_hi_trunc;
+ let len = (span.hi - span.lo).0;
+ Hash::hash(&col_line, hasher);
+ Hash::hash(&len, hasher);
+ }
+}
diff --git a/compiler/rustc_span/src/profiling.rs b/compiler/rustc_span/src/profiling.rs
new file mode 100644
index 000000000..f169007fa
--- /dev/null
+++ b/compiler/rustc_span/src/profiling.rs
@@ -0,0 +1,35 @@
+use std::borrow::Borrow;
+
+use rustc_data_structures::profiling::EventArgRecorder;
+
+/// Extension trait for self-profiling purposes: allows to record spans within a generic activity's
+/// event arguments.
+pub trait SpannedEventArgRecorder {
+ /// Records the following event arguments within the current generic activity being profiled:
+ /// - the provided `event_arg`
+ /// - a string representation of the provided `span`
+ ///
+ /// Note: when self-profiling with costly event arguments, at least one argument
+ /// needs to be recorded. A panic will be triggered if that doesn't happen.
+ fn record_arg_with_span<A>(&mut self, event_arg: A, span: crate::Span)
+ where
+ A: Borrow<str> + Into<String>;
+}
+
+impl SpannedEventArgRecorder for EventArgRecorder<'_> {
+ fn record_arg_with_span<A>(&mut self, event_arg: A, span: crate::Span)
+ where
+ A: Borrow<str> + Into<String>,
+ {
+ self.record_arg(event_arg);
+
+ let span_arg = crate::with_session_globals(|session_globals| {
+ if let Some(source_map) = &*session_globals.source_map.borrow() {
+ source_map.span_to_embeddable_string(span)
+ } else {
+ format!("{:?}", span)
+ }
+ });
+ self.record_arg(span_arg);
+ }
+}
diff --git a/compiler/rustc_span/src/source_map.rs b/compiler/rustc_span/src/source_map.rs
new file mode 100644
index 000000000..28381157d
--- /dev/null
+++ b/compiler/rustc_span/src/source_map.rs
@@ -0,0 +1,1281 @@
+//! Types for tracking pieces of source code within a crate.
+//!
+//! The [`SourceMap`] tracks all the source code used within a single crate, mapping
+//! from integer byte positions to the original source code location. Each bit
+//! of source parsed during crate parsing (typically files, in-memory strings,
+//! or various bits of macro expansion) cover a continuous range of bytes in the
+//! `SourceMap` and are represented by [`SourceFile`]s. Byte positions are stored in
+//! [`Span`] and used pervasively in the compiler. They are absolute positions
+//! within the `SourceMap`, which upon request can be converted to line and column
+//! information, source code snippets, etc.
+
+pub use crate::hygiene::{ExpnData, ExpnKind};
+pub use crate::*;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::StableHasher;
+use rustc_data_structures::sync::{AtomicU32, Lrc, MappedReadGuard, ReadGuard, RwLock};
+use std::hash::Hash;
+use std::path::{Path, PathBuf};
+use std::sync::atomic::Ordering;
+use std::{clone::Clone, cmp};
+use std::{convert::TryFrom, unreachable};
+
+use std::fs;
+use std::io;
+use tracing::debug;
+
+#[cfg(test)]
+mod tests;
+
+/// Returns the span itself if it doesn't come from a macro expansion,
+/// otherwise return the call site span up to the `enclosing_sp` by
+/// following the `expn_data` chain.
+pub fn original_sp(sp: Span, enclosing_sp: Span) -> Span {
+ let expn_data1 = sp.ctxt().outer_expn_data();
+ let expn_data2 = enclosing_sp.ctxt().outer_expn_data();
+ if expn_data1.is_root() || !expn_data2.is_root() && expn_data1.call_site == expn_data2.call_site
+ {
+ sp
+ } else {
+ original_sp(expn_data1.call_site, enclosing_sp)
+ }
+}
+
+pub mod monotonic {
+ use std::ops::{Deref, DerefMut};
+
+ /// A `MonotonicVec` is a `Vec` which can only be grown.
+ /// Once inserted, an element can never be removed or swapped,
+ /// guaranteeing that any indices into a `MonotonicVec` are stable
+ // This is declared in its own module to ensure that the private
+ // field is inaccessible
+ pub struct MonotonicVec<T>(Vec<T>);
+ impl<T> MonotonicVec<T> {
+ pub fn new(val: Vec<T>) -> MonotonicVec<T> {
+ MonotonicVec(val)
+ }
+
+ pub fn push(&mut self, val: T) {
+ self.0.push(val);
+ }
+ }
+
+ impl<T> Default for MonotonicVec<T> {
+ fn default() -> Self {
+ MonotonicVec::new(vec![])
+ }
+ }
+
+ impl<T> Deref for MonotonicVec<T> {
+ type Target = Vec<T>;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+ }
+
+ impl<T> !DerefMut for MonotonicVec<T> {}
+}
+
+#[derive(Clone, Encodable, Decodable, Debug, Copy, HashStable_Generic)]
+pub struct Spanned<T> {
+ pub node: T,
+ pub span: Span,
+}
+
+pub fn respan<T>(sp: Span, t: T) -> Spanned<T> {
+ Spanned { node: t, span: sp }
+}
+
+pub fn dummy_spanned<T>(t: T) -> Spanned<T> {
+ respan(DUMMY_SP, t)
+}
+
+// _____________________________________________________________________________
+// SourceFile, MultiByteChar, FileName, FileLines
+//
+
+/// An abstraction over the fs operations used by the Parser.
+pub trait FileLoader {
+ /// Query the existence of a file.
+ fn file_exists(&self, path: &Path) -> bool;
+
+ /// Read the contents of a UTF-8 file into memory.
+ fn read_file(&self, path: &Path) -> io::Result<String>;
+}
+
+/// A FileLoader that uses std::fs to load real files.
+pub struct RealFileLoader;
+
+impl FileLoader for RealFileLoader {
+ fn file_exists(&self, path: &Path) -> bool {
+ path.exists()
+ }
+
+ fn read_file(&self, path: &Path) -> io::Result<String> {
+ fs::read_to_string(path)
+ }
+}
+
+/// This is a [SourceFile] identifier that is used to correlate source files between
+/// subsequent compilation sessions (which is something we need to do during
+/// incremental compilation).
+///
+/// The [StableSourceFileId] also contains the CrateNum of the crate the source
+/// file was originally parsed for. This way we get two separate entries in
+/// the [SourceMap] if the same file is part of both the local and an upstream
+/// crate. Trying to only have one entry for both cases is problematic because
+/// at the point where we discover that there's a local use of the file in
+/// addition to the upstream one, we might already have made decisions based on
+/// the assumption that it's an upstream file. Treating the two files as
+/// different has no real downsides.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Encodable, Decodable, Debug)]
+pub struct StableSourceFileId {
+ // A hash of the source file's FileName. This is hash so that it's size
+ // is more predictable than if we included the actual FileName value.
+ pub file_name_hash: u64,
+
+ // The CrateNum of the crate this source file was originally parsed for.
+ // We cannot include this information in the hash because at the time
+ // of hashing we don't have the context to map from the CrateNum's numeric
+ // value to a StableCrateId.
+ pub cnum: CrateNum,
+}
+
+// FIXME: we need a more globally consistent approach to the problem solved by
+// StableSourceFileId, perhaps built atop source_file.name_hash.
+impl StableSourceFileId {
+ pub fn new(source_file: &SourceFile) -> StableSourceFileId {
+ StableSourceFileId::new_from_name(&source_file.name, source_file.cnum)
+ }
+
+ fn new_from_name(name: &FileName, cnum: CrateNum) -> StableSourceFileId {
+ let mut hasher = StableHasher::new();
+ name.hash(&mut hasher);
+ StableSourceFileId { file_name_hash: hasher.finish(), cnum }
+ }
+}
+
+// _____________________________________________________________________________
+// SourceMap
+//
+
+#[derive(Default)]
+pub(super) struct SourceMapFiles {
+ source_files: monotonic::MonotonicVec<Lrc<SourceFile>>,
+ stable_id_to_source_file: FxHashMap<StableSourceFileId, Lrc<SourceFile>>,
+}
+
+pub struct SourceMap {
+ /// The address space below this value is currently used by the files in the source map.
+ used_address_space: AtomicU32,
+
+ files: RwLock<SourceMapFiles>,
+ file_loader: Box<dyn FileLoader + Sync + Send>,
+ // This is used to apply the file path remapping as specified via
+ // `--remap-path-prefix` to all `SourceFile`s allocated within this `SourceMap`.
+ path_mapping: FilePathMapping,
+
+ /// The algorithm used for hashing the contents of each source file.
+ hash_kind: SourceFileHashAlgorithm,
+}
+
+impl SourceMap {
+ pub fn new(path_mapping: FilePathMapping) -> SourceMap {
+ Self::with_file_loader_and_hash_kind(
+ Box::new(RealFileLoader),
+ path_mapping,
+ SourceFileHashAlgorithm::Md5,
+ )
+ }
+
+ pub fn with_file_loader_and_hash_kind(
+ file_loader: Box<dyn FileLoader + Sync + Send>,
+ path_mapping: FilePathMapping,
+ hash_kind: SourceFileHashAlgorithm,
+ ) -> SourceMap {
+ SourceMap {
+ used_address_space: AtomicU32::new(0),
+ files: Default::default(),
+ file_loader,
+ path_mapping,
+ hash_kind,
+ }
+ }
+
+ pub fn path_mapping(&self) -> &FilePathMapping {
+ &self.path_mapping
+ }
+
+ pub fn file_exists(&self, path: &Path) -> bool {
+ self.file_loader.file_exists(path)
+ }
+
+ pub fn load_file(&self, path: &Path) -> io::Result<Lrc<SourceFile>> {
+ let src = self.file_loader.read_file(path)?;
+ let filename = path.to_owned().into();
+ Ok(self.new_source_file(filename, src))
+ }
+
+ /// Loads source file as a binary blob.
+ ///
+ /// Unlike `load_file`, guarantees that no normalization like BOM-removal
+ /// takes place.
+ pub fn load_binary_file(&self, path: &Path) -> io::Result<Vec<u8>> {
+ // Ideally, this should use `self.file_loader`, but it can't
+ // deal with binary files yet.
+ let bytes = fs::read(path)?;
+
+ // We need to add file to the `SourceMap`, so that it is present
+ // in dep-info. There's also an edge case that file might be both
+ // loaded as a binary via `include_bytes!` and as proper `SourceFile`
+ // via `mod`, so we try to use real file contents and not just an
+ // empty string.
+ let text = std::str::from_utf8(&bytes).unwrap_or("").to_string();
+ self.new_source_file(path.to_owned().into(), text);
+ Ok(bytes)
+ }
+
+ // By returning a `MonotonicVec`, we ensure that consumers cannot invalidate
+ // any existing indices pointing into `files`.
+ pub fn files(&self) -> MappedReadGuard<'_, monotonic::MonotonicVec<Lrc<SourceFile>>> {
+ ReadGuard::map(self.files.borrow(), |files| &files.source_files)
+ }
+
+ pub fn source_file_by_stable_id(
+ &self,
+ stable_id: StableSourceFileId,
+ ) -> Option<Lrc<SourceFile>> {
+ self.files.borrow().stable_id_to_source_file.get(&stable_id).cloned()
+ }
+
+ fn allocate_address_space(&self, size: usize) -> Result<usize, OffsetOverflowError> {
+ let size = u32::try_from(size).map_err(|_| OffsetOverflowError)?;
+
+ loop {
+ let current = self.used_address_space.load(Ordering::Relaxed);
+ let next = current
+ .checked_add(size)
+ // Add one so there is some space between files. This lets us distinguish
+ // positions in the `SourceMap`, even in the presence of zero-length files.
+ .and_then(|next| next.checked_add(1))
+ .ok_or(OffsetOverflowError)?;
+
+ if self
+ .used_address_space
+ .compare_exchange(current, next, Ordering::Relaxed, Ordering::Relaxed)
+ .is_ok()
+ {
+ return Ok(usize::try_from(current).unwrap());
+ }
+ }
+ }
+
+ /// Creates a new `SourceFile`.
+ /// If a file already exists in the `SourceMap` with the same ID, that file is returned
+ /// unmodified.
+ pub fn new_source_file(&self, filename: FileName, src: String) -> Lrc<SourceFile> {
+ self.try_new_source_file(filename, src).unwrap_or_else(|OffsetOverflowError| {
+ eprintln!("fatal error: rustc does not support files larger than 4GB");
+ crate::fatal_error::FatalError.raise()
+ })
+ }
+
+ fn try_new_source_file(
+ &self,
+ filename: FileName,
+ src: String,
+ ) -> Result<Lrc<SourceFile>, OffsetOverflowError> {
+ // Note that filename may not be a valid path, eg it may be `<anon>` etc,
+ // but this is okay because the directory determined by `path.pop()` will
+ // be empty, so the working directory will be used.
+ let (filename, _) = self.path_mapping.map_filename_prefix(&filename);
+
+ let file_id = StableSourceFileId::new_from_name(&filename, LOCAL_CRATE);
+
+ let lrc_sf = match self.source_file_by_stable_id(file_id) {
+ Some(lrc_sf) => lrc_sf,
+ None => {
+ let start_pos = self.allocate_address_space(src.len())?;
+
+ let source_file = Lrc::new(SourceFile::new(
+ filename,
+ src,
+ Pos::from_usize(start_pos),
+ self.hash_kind,
+ ));
+
+ // Let's make sure the file_id we generated above actually matches
+ // the ID we generate for the SourceFile we just created.
+ debug_assert_eq!(StableSourceFileId::new(&source_file), file_id);
+
+ let mut files = self.files.borrow_mut();
+
+ files.source_files.push(source_file.clone());
+ files.stable_id_to_source_file.insert(file_id, source_file.clone());
+
+ source_file
+ }
+ };
+ Ok(lrc_sf)
+ }
+
+ /// Allocates a new `SourceFile` representing a source file from an external
+ /// crate. The source code of such an "imported `SourceFile`" is not available,
+ /// but we still know enough to generate accurate debuginfo location
+ /// information for things inlined from other crates.
+ pub fn new_imported_source_file(
+ &self,
+ filename: FileName,
+ src_hash: SourceFileHash,
+ name_hash: u128,
+ source_len: usize,
+ cnum: CrateNum,
+ file_local_lines: Lock<SourceFileLines>,
+ mut file_local_multibyte_chars: Vec<MultiByteChar>,
+ mut file_local_non_narrow_chars: Vec<NonNarrowChar>,
+ mut file_local_normalized_pos: Vec<NormalizedPos>,
+ original_start_pos: BytePos,
+ original_end_pos: BytePos,
+ ) -> Lrc<SourceFile> {
+ let start_pos = self
+ .allocate_address_space(source_len)
+ .expect("not enough address space for imported source file");
+
+ let end_pos = Pos::from_usize(start_pos + source_len);
+ let start_pos = Pos::from_usize(start_pos);
+
+ // Translate these positions into the new global frame of reference,
+ // now that the offset of the SourceFile is known.
+ //
+ // These are all unsigned values. `original_start_pos` may be larger or
+ // smaller than `start_pos`, but `pos` is always larger than both.
+ // Therefore, `(pos - original_start_pos) + start_pos` won't overflow
+ // but `start_pos - original_start_pos` might. So we use the former
+ // form rather than pre-computing the offset into a local variable. The
+ // compiler backend can optimize away the repeated computations in a
+ // way that won't trigger overflow checks.
+ match &mut *file_local_lines.borrow_mut() {
+ SourceFileLines::Lines(lines) => {
+ for pos in lines {
+ *pos = (*pos - original_start_pos) + start_pos;
+ }
+ }
+ SourceFileLines::Diffs(SourceFileDiffs { line_start, .. }) => {
+ *line_start = (*line_start - original_start_pos) + start_pos;
+ }
+ }
+ for mbc in &mut file_local_multibyte_chars {
+ mbc.pos = (mbc.pos - original_start_pos) + start_pos;
+ }
+ for swc in &mut file_local_non_narrow_chars {
+ *swc = (*swc - original_start_pos) + start_pos;
+ }
+ for nc in &mut file_local_normalized_pos {
+ nc.pos = (nc.pos - original_start_pos) + start_pos;
+ }
+
+ let source_file = Lrc::new(SourceFile {
+ name: filename,
+ src: None,
+ src_hash,
+ external_src: Lock::new(ExternalSource::Foreign {
+ kind: ExternalSourceKind::AbsentOk,
+ original_start_pos,
+ original_end_pos,
+ }),
+ start_pos,
+ end_pos,
+ lines: file_local_lines,
+ multibyte_chars: file_local_multibyte_chars,
+ non_narrow_chars: file_local_non_narrow_chars,
+ normalized_pos: file_local_normalized_pos,
+ name_hash,
+ cnum,
+ });
+
+ let mut files = self.files.borrow_mut();
+
+ files.source_files.push(source_file.clone());
+ files
+ .stable_id_to_source_file
+ .insert(StableSourceFileId::new(&source_file), source_file.clone());
+
+ source_file
+ }
+
+ // If there is a doctest offset, applies it to the line.
+ pub fn doctest_offset_line(&self, file: &FileName, orig: usize) -> usize {
+ match file {
+ FileName::DocTest(_, offset) => {
+ if *offset < 0 {
+ orig - (-(*offset)) as usize
+ } else {
+ orig + *offset as usize
+ }
+ }
+ _ => orig,
+ }
+ }
+
+ /// Return the SourceFile that contains the given `BytePos`
+ pub fn lookup_source_file(&self, pos: BytePos) -> Lrc<SourceFile> {
+ let idx = self.lookup_source_file_idx(pos);
+ (*self.files.borrow().source_files)[idx].clone()
+ }
+
+ /// Looks up source information about a `BytePos`.
+ pub fn lookup_char_pos(&self, pos: BytePos) -> Loc {
+ let sf = self.lookup_source_file(pos);
+ let (line, col, col_display) = sf.lookup_file_pos_with_col_display(pos);
+ Loc { file: sf, line, col, col_display }
+ }
+
+ // If the corresponding `SourceFile` is empty, does not return a line number.
+ pub fn lookup_line(&self, pos: BytePos) -> Result<SourceFileAndLine, Lrc<SourceFile>> {
+ let f = self.lookup_source_file(pos);
+
+ match f.lookup_line(pos) {
+ Some(line) => Ok(SourceFileAndLine { sf: f, line }),
+ None => Err(f),
+ }
+ }
+
+ fn span_to_string(&self, sp: Span, filename_display_pref: FileNameDisplayPreference) -> String {
+ if self.files.borrow().source_files.is_empty() || sp.is_dummy() {
+ return "no-location".to_string();
+ }
+
+ let lo = self.lookup_char_pos(sp.lo());
+ let hi = self.lookup_char_pos(sp.hi());
+ format!(
+ "{}:{}:{}: {}:{}",
+ lo.file.name.display(filename_display_pref),
+ lo.line,
+ lo.col.to_usize() + 1,
+ hi.line,
+ hi.col.to_usize() + 1,
+ )
+ }
+
+ /// Format the span location suitable for embedding in build artifacts
+ pub fn span_to_embeddable_string(&self, sp: Span) -> String {
+ self.span_to_string(sp, FileNameDisplayPreference::Remapped)
+ }
+
+ /// Format the span location suitable for pretty printing anotations with relative line numbers
+ pub fn span_to_relative_line_string(&self, sp: Span, relative_to: Span) -> String {
+ if self.files.borrow().source_files.is_empty() || sp.is_dummy() || relative_to.is_dummy() {
+ return "no-location".to_string();
+ }
+
+ let lo = self.lookup_char_pos(sp.lo());
+ let hi = self.lookup_char_pos(sp.hi());
+ let offset = self.lookup_char_pos(relative_to.lo());
+
+ if lo.file.name != offset.file.name {
+ return self.span_to_embeddable_string(sp);
+ }
+
+ let lo_line = lo.line.saturating_sub(offset.line);
+ let hi_line = hi.line.saturating_sub(offset.line);
+
+ format!(
+ "{}:+{}:{}: +{}:{}",
+ lo.file.name.display(FileNameDisplayPreference::Remapped),
+ lo_line,
+ lo.col.to_usize() + 1,
+ hi_line,
+ hi.col.to_usize() + 1,
+ )
+ }
+
+ /// Format the span location to be printed in diagnostics. Must not be emitted
+ /// to build artifacts as this may leak local file paths. Use span_to_embeddable_string
+ /// for string suitable for embedding.
+ pub fn span_to_diagnostic_string(&self, sp: Span) -> String {
+ self.span_to_string(sp, self.path_mapping.filename_display_for_diagnostics)
+ }
+
+ pub fn span_to_filename(&self, sp: Span) -> FileName {
+ self.lookup_char_pos(sp.lo()).file.name.clone()
+ }
+
+ pub fn filename_for_diagnostics<'a>(&self, filename: &'a FileName) -> FileNameDisplay<'a> {
+ filename.display(self.path_mapping.filename_display_for_diagnostics)
+ }
+
+ pub fn is_multiline(&self, sp: Span) -> bool {
+ let lo = self.lookup_source_file_idx(sp.lo());
+ let hi = self.lookup_source_file_idx(sp.hi());
+ if lo != hi {
+ return true;
+ }
+ let f = (*self.files.borrow().source_files)[lo].clone();
+ f.lookup_line(sp.lo()) != f.lookup_line(sp.hi())
+ }
+
+ #[instrument(skip(self), level = "trace")]
+ pub fn is_valid_span(&self, sp: Span) -> Result<(Loc, Loc), SpanLinesError> {
+ let lo = self.lookup_char_pos(sp.lo());
+ trace!(?lo);
+ let hi = self.lookup_char_pos(sp.hi());
+ trace!(?hi);
+ if lo.file.start_pos != hi.file.start_pos {
+ return Err(SpanLinesError::DistinctSources(DistinctSources {
+ begin: (lo.file.name.clone(), lo.file.start_pos),
+ end: (hi.file.name.clone(), hi.file.start_pos),
+ }));
+ }
+ Ok((lo, hi))
+ }
+
+ pub fn is_line_before_span_empty(&self, sp: Span) -> bool {
+ match self.span_to_prev_source(sp) {
+ Ok(s) => s.rsplit_once('\n').unwrap_or(("", &s)).1.trim_start().is_empty(),
+ Err(_) => false,
+ }
+ }
+
+ pub fn span_to_lines(&self, sp: Span) -> FileLinesResult {
+ debug!("span_to_lines(sp={:?})", sp);
+ let (lo, hi) = self.is_valid_span(sp)?;
+ assert!(hi.line >= lo.line);
+
+ if sp.is_dummy() {
+ return Ok(FileLines { file: lo.file, lines: Vec::new() });
+ }
+
+ let mut lines = Vec::with_capacity(hi.line - lo.line + 1);
+
+ // The span starts partway through the first line,
+ // but after that it starts from offset 0.
+ let mut start_col = lo.col;
+
+ // For every line but the last, it extends from `start_col`
+ // and to the end of the line. Be careful because the line
+ // numbers in Loc are 1-based, so we subtract 1 to get 0-based
+ // lines.
+ //
+ // FIXME: now that we handle DUMMY_SP up above, we should consider
+ // asserting that the line numbers here are all indeed 1-based.
+ let hi_line = hi.line.saturating_sub(1);
+ for line_index in lo.line.saturating_sub(1)..hi_line {
+ let line_len = lo.file.get_line(line_index).map_or(0, |s| s.chars().count());
+ lines.push(LineInfo { line_index, start_col, end_col: CharPos::from_usize(line_len) });
+ start_col = CharPos::from_usize(0);
+ }
+
+ // For the last line, it extends from `start_col` to `hi.col`:
+ lines.push(LineInfo { line_index: hi_line, start_col, end_col: hi.col });
+
+ Ok(FileLines { file: lo.file, lines })
+ }
+
+ /// Extracts the source surrounding the given `Span` using the `extract_source` function. The
+ /// extract function takes three arguments: a string slice containing the source, an index in
+ /// the slice for the beginning of the span and an index in the slice for the end of the span.
+ fn span_to_source<F, T>(&self, sp: Span, extract_source: F) -> Result<T, SpanSnippetError>
+ where
+ F: Fn(&str, usize, usize) -> Result<T, SpanSnippetError>,
+ {
+ let local_begin = self.lookup_byte_offset(sp.lo());
+ let local_end = self.lookup_byte_offset(sp.hi());
+
+ if local_begin.sf.start_pos != local_end.sf.start_pos {
+ Err(SpanSnippetError::DistinctSources(DistinctSources {
+ begin: (local_begin.sf.name.clone(), local_begin.sf.start_pos),
+ end: (local_end.sf.name.clone(), local_end.sf.start_pos),
+ }))
+ } else {
+ self.ensure_source_file_source_present(local_begin.sf.clone());
+
+ let start_index = local_begin.pos.to_usize();
+ let end_index = local_end.pos.to_usize();
+ let source_len = (local_begin.sf.end_pos - local_begin.sf.start_pos).to_usize();
+
+ if start_index > end_index || end_index > source_len {
+ return Err(SpanSnippetError::MalformedForSourcemap(MalformedSourceMapPositions {
+ name: local_begin.sf.name.clone(),
+ source_len,
+ begin_pos: local_begin.pos,
+ end_pos: local_end.pos,
+ }));
+ }
+
+ if let Some(ref src) = local_begin.sf.src {
+ extract_source(src, start_index, end_index)
+ } else if let Some(src) = local_begin.sf.external_src.borrow().get_source() {
+ extract_source(src, start_index, end_index)
+ } else {
+ Err(SpanSnippetError::SourceNotAvailable { filename: local_begin.sf.name.clone() })
+ }
+ }
+ }
+
+ pub fn is_span_accessible(&self, sp: Span) -> bool {
+ self.span_to_source(sp, |src, start_index, end_index| {
+ Ok(src.get(start_index..end_index).is_some())
+ })
+ .map_or(false, |is_accessible| is_accessible)
+ }
+
+ /// Returns the source snippet as `String` corresponding to the given `Span`.
+ pub fn span_to_snippet(&self, sp: Span) -> Result<String, SpanSnippetError> {
+ self.span_to_source(sp, |src, start_index, end_index| {
+ src.get(start_index..end_index)
+ .map(|s| s.to_string())
+ .ok_or(SpanSnippetError::IllFormedSpan(sp))
+ })
+ }
+
+ pub fn span_to_margin(&self, sp: Span) -> Option<usize> {
+ Some(self.indentation_before(sp)?.len())
+ }
+
+ pub fn indentation_before(&self, sp: Span) -> Option<String> {
+ self.span_to_source(sp, |src, start_index, _| {
+ let before = &src[..start_index];
+ let last_line = before.rsplit_once('\n').map_or(before, |(_, last)| last);
+ Ok(last_line
+ .split_once(|c: char| !c.is_whitespace())
+ .map_or(last_line, |(indent, _)| indent)
+ .to_string())
+ })
+ .ok()
+ }
+
+ /// Returns the source snippet as `String` before the given `Span`.
+ pub fn span_to_prev_source(&self, sp: Span) -> Result<String, SpanSnippetError> {
+ self.span_to_source(sp, |src, start_index, _| {
+ src.get(..start_index).map(|s| s.to_string()).ok_or(SpanSnippetError::IllFormedSpan(sp))
+ })
+ }
+
+ /// Extends the given `Span` to just after the previous occurrence of `c`. Return the same span
+ /// if no character could be found or if an error occurred while retrieving the code snippet.
+ pub fn span_extend_to_prev_char(&self, sp: Span, c: char, accept_newlines: bool) -> Span {
+ if let Ok(prev_source) = self.span_to_prev_source(sp) {
+ let prev_source = prev_source.rsplit(c).next().unwrap_or("");
+ if !prev_source.is_empty() && (accept_newlines || !prev_source.contains('\n')) {
+ return sp.with_lo(BytePos(sp.lo().0 - prev_source.len() as u32));
+ }
+ }
+
+ sp
+ }
+
+ /// Extends the given `Span` to just after the previous occurrence of `pat` when surrounded by
+ /// whitespace. Returns None if the pattern could not be found or if an error occurred while
+ /// retrieving the code snippet.
+ pub fn span_extend_to_prev_str(
+ &self,
+ sp: Span,
+ pat: &str,
+ accept_newlines: bool,
+ include_whitespace: bool,
+ ) -> Option<Span> {
+ // assure that the pattern is delimited, to avoid the following
+ // fn my_fn()
+ // ^^^^ returned span without the check
+ // ---------- correct span
+ let prev_source = self.span_to_prev_source(sp).ok()?;
+ for ws in &[" ", "\t", "\n"] {
+ let pat = pat.to_owned() + ws;
+ if let Some(pat_pos) = prev_source.rfind(&pat) {
+ let just_after_pat_pos = pat_pos + pat.len() - 1;
+ let just_after_pat_plus_ws = if include_whitespace {
+ just_after_pat_pos
+ + prev_source[just_after_pat_pos..]
+ .find(|c: char| !c.is_whitespace())
+ .unwrap_or(0)
+ } else {
+ just_after_pat_pos
+ };
+ let len = prev_source.len() - just_after_pat_plus_ws;
+ let prev_source = &prev_source[just_after_pat_plus_ws..];
+ if accept_newlines || !prev_source.trim_start().contains('\n') {
+ return Some(sp.with_lo(BytePos(sp.lo().0 - len as u32)));
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Returns the source snippet as `String` after the given `Span`.
+ pub fn span_to_next_source(&self, sp: Span) -> Result<String, SpanSnippetError> {
+ self.span_to_source(sp, |src, _, end_index| {
+ src.get(end_index..).map(|s| s.to_string()).ok_or(SpanSnippetError::IllFormedSpan(sp))
+ })
+ }
+
+ /// Extends the given `Span` while the next character matches the predicate
+ pub fn span_extend_while(
+ &self,
+ span: Span,
+ f: impl Fn(char) -> bool,
+ ) -> Result<Span, SpanSnippetError> {
+ self.span_to_source(span, |s, _start, end| {
+ let n = s[end..].char_indices().find(|&(_, c)| !f(c)).map_or(s.len() - end, |(i, _)| i);
+ Ok(span.with_hi(span.hi() + BytePos(n as u32)))
+ })
+ }
+
+ /// Extends the given `Span` to just after the next occurrence of `c`.
+ pub fn span_extend_to_next_char(&self, sp: Span, c: char, accept_newlines: bool) -> Span {
+ if let Ok(next_source) = self.span_to_next_source(sp) {
+ let next_source = next_source.split(c).next().unwrap_or("");
+ if !next_source.is_empty() && (accept_newlines || !next_source.contains('\n')) {
+ return sp.with_hi(BytePos(sp.hi().0 + next_source.len() as u32));
+ }
+ }
+
+ sp
+ }
+
+ /// Extends the given `Span` to contain the entire line it is on.
+ pub fn span_extend_to_line(&self, sp: Span) -> Span {
+ self.span_extend_to_prev_char(self.span_extend_to_next_char(sp, '\n', true), '\n', true)
+ }
+
+ /// Given a `Span`, tries to get a shorter span ending before the first occurrence of `char`
+ /// `c`.
+ pub fn span_until_char(&self, sp: Span, c: char) -> Span {
+ match self.span_to_snippet(sp) {
+ Ok(snippet) => {
+ let snippet = snippet.split(c).next().unwrap_or("").trim_end();
+ if !snippet.is_empty() && !snippet.contains('\n') {
+ sp.with_hi(BytePos(sp.lo().0 + snippet.len() as u32))
+ } else {
+ sp
+ }
+ }
+ _ => sp,
+ }
+ }
+
+ /// Given a `Span`, tries to get a shorter span ending just after the first occurrence of `char`
+ /// `c`.
+ pub fn span_through_char(&self, sp: Span, c: char) -> Span {
+ if let Ok(snippet) = self.span_to_snippet(sp) {
+ if let Some(offset) = snippet.find(c) {
+ return sp.with_hi(BytePos(sp.lo().0 + (offset + c.len_utf8()) as u32));
+ }
+ }
+ sp
+ }
+
+ /// Given a `Span`, gets a new `Span` covering the first token and all its trailing whitespace
+ /// or the original `Span`.
+ ///
+ /// If `sp` points to `"let mut x"`, then a span pointing at `"let "` will be returned.
+ pub fn span_until_non_whitespace(&self, sp: Span) -> Span {
+ let mut whitespace_found = false;
+
+ self.span_take_while(sp, |c| {
+ if !whitespace_found && c.is_whitespace() {
+ whitespace_found = true;
+ }
+
+ !whitespace_found || c.is_whitespace()
+ })
+ }
+
+ /// Given a `Span`, gets a new `Span` covering the first token without its trailing whitespace
+ /// or the original `Span` in case of error.
+ ///
+ /// If `sp` points to `"let mut x"`, then a span pointing at `"let"` will be returned.
+ pub fn span_until_whitespace(&self, sp: Span) -> Span {
+ self.span_take_while(sp, |c| !c.is_whitespace())
+ }
+
+ /// Given a `Span`, gets a shorter one until `predicate` yields `false`.
+ pub fn span_take_while<P>(&self, sp: Span, predicate: P) -> Span
+ where
+ P: for<'r> FnMut(&'r char) -> bool,
+ {
+ if let Ok(snippet) = self.span_to_snippet(sp) {
+ let offset = snippet.chars().take_while(predicate).map(|c| c.len_utf8()).sum::<usize>();
+
+ sp.with_hi(BytePos(sp.lo().0 + (offset as u32)))
+ } else {
+ sp
+ }
+ }
+
+ /// Given a `Span`, return a span ending in the closest `{`. This is useful when you have a
+ /// `Span` enclosing a whole item but we need to point at only the head (usually the first
+ /// line) of that item.
+ ///
+ /// *Only suitable for diagnostics.*
+ pub fn guess_head_span(&self, sp: Span) -> Span {
+ // FIXME: extend the AST items to have a head span, or replace callers with pointing at
+ // the item's ident when appropriate.
+ self.span_until_char(sp, '{')
+ }
+
+ /// Returns a new span representing just the first character of the given span.
+ pub fn start_point(&self, sp: Span) -> Span {
+ let width = {
+ let sp = sp.data();
+ let local_begin = self.lookup_byte_offset(sp.lo);
+ let start_index = local_begin.pos.to_usize();
+ let src = local_begin.sf.external_src.borrow();
+
+ let snippet = if let Some(ref src) = local_begin.sf.src {
+ Some(&src[start_index..])
+ } else if let Some(src) = src.get_source() {
+ Some(&src[start_index..])
+ } else {
+ None
+ };
+
+ match snippet {
+ None => 1,
+ Some(snippet) => match snippet.chars().next() {
+ None => 1,
+ Some(c) => c.len_utf8(),
+ },
+ }
+ };
+
+ sp.with_hi(BytePos(sp.lo().0 + width as u32))
+ }
+
+ /// Returns a new span representing just the last character of this span.
+ pub fn end_point(&self, sp: Span) -> Span {
+ let pos = sp.hi().0;
+
+ let width = self.find_width_of_character_at_span(sp, false);
+ let corrected_end_position = pos.checked_sub(width).unwrap_or(pos);
+
+ let end_point = BytePos(cmp::max(corrected_end_position, sp.lo().0));
+ sp.with_lo(end_point)
+ }
+
+ /// Returns a new span representing the next character after the end-point of this span.
+ pub fn next_point(&self, sp: Span) -> Span {
+ if sp.is_dummy() {
+ return sp;
+ }
+ let start_of_next_point = sp.hi().0;
+
+ let width = self.find_width_of_character_at_span(sp.shrink_to_hi(), true);
+ // If the width is 1, then the next span should point to the same `lo` and `hi`. However,
+ // in the case of a multibyte character, where the width != 1, the next span should
+ // span multiple bytes to include the whole character.
+ let end_of_next_point =
+ start_of_next_point.checked_add(width - 1).unwrap_or(start_of_next_point);
+
+ let end_of_next_point = BytePos(cmp::max(sp.lo().0 + 1, end_of_next_point));
+ Span::new(BytePos(start_of_next_point), end_of_next_point, sp.ctxt(), None)
+ }
+
+ /// Finds the width of the character, either before or after the end of provided span,
+ /// depending on the `forwards` parameter.
+ fn find_width_of_character_at_span(&self, sp: Span, forwards: bool) -> u32 {
+ let sp = sp.data();
+ if sp.lo == sp.hi {
+ debug!("find_width_of_character_at_span: early return empty span");
+ return 1;
+ }
+
+ let local_begin = self.lookup_byte_offset(sp.lo);
+ let local_end = self.lookup_byte_offset(sp.hi);
+ debug!(
+ "find_width_of_character_at_span: local_begin=`{:?}`, local_end=`{:?}`",
+ local_begin, local_end
+ );
+
+ if local_begin.sf.start_pos != local_end.sf.start_pos {
+ debug!("find_width_of_character_at_span: begin and end are in different files");
+ return 1;
+ }
+
+ let start_index = local_begin.pos.to_usize();
+ let end_index = local_end.pos.to_usize();
+ debug!(
+ "find_width_of_character_at_span: start_index=`{:?}`, end_index=`{:?}`",
+ start_index, end_index
+ );
+
+ // Disregard indexes that are at the start or end of their spans, they can't fit bigger
+ // characters.
+ if (!forwards && end_index == usize::MIN) || (forwards && start_index == usize::MAX) {
+ debug!("find_width_of_character_at_span: start or end of span, cannot be multibyte");
+ return 1;
+ }
+
+ let source_len = (local_begin.sf.end_pos - local_begin.sf.start_pos).to_usize();
+ debug!("find_width_of_character_at_span: source_len=`{:?}`", source_len);
+ // Ensure indexes are also not malformed.
+ if start_index > end_index || end_index > source_len {
+ debug!("find_width_of_character_at_span: source indexes are malformed");
+ return 1;
+ }
+
+ let src = local_begin.sf.external_src.borrow();
+
+ // We need to extend the snippet to the end of the src rather than to end_index so when
+ // searching forwards for boundaries we've got somewhere to search.
+ let snippet = if let Some(ref src) = local_begin.sf.src {
+ &src[start_index..]
+ } else if let Some(src) = src.get_source() {
+ &src[start_index..]
+ } else {
+ return 1;
+ };
+ debug!("find_width_of_character_at_span: snippet=`{:?}`", snippet);
+
+ let mut target = if forwards { end_index + 1 } else { end_index - 1 };
+ debug!("find_width_of_character_at_span: initial target=`{:?}`", target);
+
+ while !snippet.is_char_boundary(target - start_index) && target < source_len {
+ target = if forwards {
+ target + 1
+ } else {
+ match target.checked_sub(1) {
+ Some(target) => target,
+ None => {
+ break;
+ }
+ }
+ };
+ debug!("find_width_of_character_at_span: target=`{:?}`", target);
+ }
+ debug!("find_width_of_character_at_span: final target=`{:?}`", target);
+
+ if forwards { (target - end_index) as u32 } else { (end_index - target) as u32 }
+ }
+
+ pub fn get_source_file(&self, filename: &FileName) -> Option<Lrc<SourceFile>> {
+ // Remap filename before lookup
+ let filename = self.path_mapping().map_filename_prefix(filename).0;
+ for sf in self.files.borrow().source_files.iter() {
+ if filename == sf.name {
+ return Some(sf.clone());
+ }
+ }
+ None
+ }
+
+ /// For a global `BytePos`, computes the local offset within the containing `SourceFile`.
+ pub fn lookup_byte_offset(&self, bpos: BytePos) -> SourceFileAndBytePos {
+ let idx = self.lookup_source_file_idx(bpos);
+ let sf = (*self.files.borrow().source_files)[idx].clone();
+ let offset = bpos - sf.start_pos;
+ SourceFileAndBytePos { sf, pos: offset }
+ }
+
+ // Returns the index of the `SourceFile` (in `self.files`) that contains `pos`.
+ // This index is guaranteed to be valid for the lifetime of this `SourceMap`,
+ // since `source_files` is a `MonotonicVec`
+ pub fn lookup_source_file_idx(&self, pos: BytePos) -> usize {
+ self.files
+ .borrow()
+ .source_files
+ .binary_search_by_key(&pos, |key| key.start_pos)
+ .unwrap_or_else(|p| p - 1)
+ }
+
+ pub fn count_lines(&self) -> usize {
+ self.files().iter().fold(0, |a, f| a + f.count_lines())
+ }
+
+ pub fn generate_fn_name_span(&self, span: Span) -> Option<Span> {
+ let prev_span = self.span_extend_to_prev_str(span, "fn", true, true)?;
+ if let Ok(snippet) = self.span_to_snippet(prev_span) {
+ debug!(
+ "generate_fn_name_span: span={:?}, prev_span={:?}, snippet={:?}",
+ span, prev_span, snippet
+ );
+
+ if snippet.is_empty() {
+ return None;
+ };
+
+ let len = snippet
+ .find(|c: char| !c.is_alphanumeric() && c != '_')
+ .expect("no label after fn");
+ Some(prev_span.with_hi(BytePos(prev_span.lo().0 + len as u32)))
+ } else {
+ None
+ }
+ }
+
+ /// Takes the span of a type parameter in a function signature and try to generate a span for
+ /// the function name (with generics) and a new snippet for this span with the pointed type
+ /// parameter as a new local type parameter.
+ ///
+ /// For instance:
+ /// ```rust,ignore (pseudo-Rust)
+ /// // Given span
+ /// fn my_function(param: T)
+ /// // ^ Original span
+ ///
+ /// // Result
+ /// fn my_function(param: T)
+ /// // ^^^^^^^^^^^ Generated span with snippet `my_function<T>`
+ /// ```
+ ///
+ /// Attention: The method used is very fragile since it essentially duplicates the work of the
+ /// parser. If you need to use this function or something similar, please consider updating the
+ /// `SourceMap` functions and this function to something more robust.
+ pub fn generate_local_type_param_snippet(&self, span: Span) -> Option<(Span, String)> {
+ // Try to extend the span to the previous "fn" keyword to retrieve the function
+ // signature.
+ if let Some(sugg_span) = self.span_extend_to_prev_str(span, "fn", false, true) {
+ if let Ok(snippet) = self.span_to_snippet(sugg_span) {
+ // Consume the function name.
+ let mut offset = snippet
+ .find(|c: char| !c.is_alphanumeric() && c != '_')
+ .expect("no label after fn");
+
+ // Consume the generics part of the function signature.
+ let mut bracket_counter = 0;
+ let mut last_char = None;
+ for c in snippet[offset..].chars() {
+ match c {
+ '<' => bracket_counter += 1,
+ '>' => bracket_counter -= 1,
+ '(' => {
+ if bracket_counter == 0 {
+ break;
+ }
+ }
+ _ => {}
+ }
+ offset += c.len_utf8();
+ last_char = Some(c);
+ }
+
+ // Adjust the suggestion span to encompass the function name with its generics.
+ let sugg_span = sugg_span.with_hi(BytePos(sugg_span.lo().0 + offset as u32));
+
+ // Prepare the new suggested snippet to append the type parameter that triggered
+ // the error in the generics of the function signature.
+ let mut new_snippet = if last_char == Some('>') {
+ format!("{}, ", &snippet[..(offset - '>'.len_utf8())])
+ } else {
+ format!("{}<", &snippet[..offset])
+ };
+ new_snippet
+ .push_str(&self.span_to_snippet(span).unwrap_or_else(|_| "T".to_string()));
+ new_snippet.push('>');
+
+ return Some((sugg_span, new_snippet));
+ }
+ }
+
+ None
+ }
+ pub fn ensure_source_file_source_present(&self, source_file: Lrc<SourceFile>) -> bool {
+ source_file.add_external_src(|| {
+ match source_file.name {
+ FileName::Real(ref name) if let Some(local_path) = name.local_path() => {
+ self.file_loader.read_file(local_path).ok()
+ }
+ _ => None,
+ }
+ })
+ }
+
+ pub fn is_imported(&self, sp: Span) -> bool {
+ let source_file_index = self.lookup_source_file_idx(sp.lo());
+ let source_file = &self.files()[source_file_index];
+ source_file.is_imported()
+ }
+
+ /// Gets the span of a statement. If the statement is a macro expansion, the
+ /// span in the context of the block span is found. The trailing semicolon is included
+ /// on a best-effort basis.
+ pub fn stmt_span(&self, stmt_span: Span, block_span: Span) -> Span {
+ if !stmt_span.from_expansion() {
+ return stmt_span;
+ }
+ let mac_call = original_sp(stmt_span, block_span);
+ self.mac_call_stmt_semi_span(mac_call).map_or(mac_call, |s| mac_call.with_hi(s.hi()))
+ }
+
+ /// Tries to find the span of the semicolon of a macro call statement.
+ /// The input must be the *call site* span of a statement from macro expansion.
+ /// ```ignore (illustrative)
+ /// // v output
+ /// mac!();
+ /// // ^^^^^^ input
+ /// ```
+ pub fn mac_call_stmt_semi_span(&self, mac_call: Span) -> Option<Span> {
+ let span = self.span_extend_while(mac_call, char::is_whitespace).ok()?;
+ let span = span.shrink_to_hi().with_hi(BytePos(span.hi().0.checked_add(1)?));
+ if self.span_to_snippet(span).as_deref() != Ok(";") {
+ return None;
+ }
+ Some(span)
+ }
+}
+
+#[derive(Clone)]
+pub struct FilePathMapping {
+ mapping: Vec<(PathBuf, PathBuf)>,
+ filename_display_for_diagnostics: FileNameDisplayPreference,
+}
+
+impl FilePathMapping {
+ pub fn empty() -> FilePathMapping {
+ FilePathMapping::new(Vec::new())
+ }
+
+ pub fn new(mapping: Vec<(PathBuf, PathBuf)>) -> FilePathMapping {
+ let filename_display_for_diagnostics = if mapping.is_empty() {
+ FileNameDisplayPreference::Local
+ } else {
+ FileNameDisplayPreference::Remapped
+ };
+
+ FilePathMapping { mapping, filename_display_for_diagnostics }
+ }
+
+ /// Applies any path prefix substitution as defined by the mapping.
+ /// The return value is the remapped path and a boolean indicating whether
+ /// the path was affected by the mapping.
+ pub fn map_prefix(&self, path: PathBuf) -> (PathBuf, bool) {
+ if path.as_os_str().is_empty() {
+ // Exit early if the path is empty and therefore there's nothing to remap.
+ // This is mostly to reduce spam for `RUSTC_LOG=[remap_path_prefix]`.
+ return (path, false);
+ }
+
+ return remap_path_prefix(&self.mapping, path);
+
+ #[instrument(level = "debug", skip(mapping))]
+ fn remap_path_prefix(mapping: &[(PathBuf, PathBuf)], path: PathBuf) -> (PathBuf, bool) {
+ // NOTE: We are iterating over the mapping entries from last to first
+ // because entries specified later on the command line should
+ // take precedence.
+ for &(ref from, ref to) in mapping.iter().rev() {
+ debug!("Trying to apply {:?} => {:?}", from, to);
+
+ if let Ok(rest) = path.strip_prefix(from) {
+ let remapped = if rest.as_os_str().is_empty() {
+ // This is subtle, joining an empty path onto e.g. `foo/bar` will
+ // result in `foo/bar/`, that is, there'll be an additional directory
+ // separator at the end. This can lead to duplicated directory separators
+ // in remapped paths down the line.
+ // So, if we have an exact match, we just return that without a call
+ // to `Path::join()`.
+ to.clone()
+ } else {
+ to.join(rest)
+ };
+ debug!("Match - remapped {:?} => {:?}", path, remapped);
+
+ return (remapped, true);
+ } else {
+ debug!("No match - prefix {:?} does not match {:?}", from, path);
+ }
+ }
+
+ debug!("Path {:?} was not remapped", path);
+ (path, false)
+ }
+ }
+
+ fn map_filename_prefix(&self, file: &FileName) -> (FileName, bool) {
+ match file {
+ FileName::Real(realfile) if let RealFileName::LocalPath(local_path) = realfile => {
+ let (mapped_path, mapped) = self.map_prefix(local_path.to_path_buf());
+ let realfile = if mapped {
+ RealFileName::Remapped {
+ local_path: Some(local_path.clone()),
+ virtual_name: mapped_path,
+ }
+ } else {
+ realfile.clone()
+ };
+ (FileName::Real(realfile), mapped)
+ }
+ FileName::Real(_) => unreachable!("attempted to remap an already remapped filename"),
+ other => (other.clone(), false),
+ }
+ }
+
+ /// Expand a relative path to an absolute path with remapping taken into account.
+ /// Use this when absolute paths are required (e.g. debuginfo or crate metadata).
+ ///
+ /// The resulting `RealFileName` will have its `local_path` portion erased if
+ /// possible (i.e. if there's also a remapped path).
+ pub fn to_embeddable_absolute_path(
+ &self,
+ file_path: RealFileName,
+ working_directory: &RealFileName,
+ ) -> RealFileName {
+ match file_path {
+ // Anything that's already remapped we don't modify, except for erasing
+ // the `local_path` portion.
+ RealFileName::Remapped { local_path: _, virtual_name } => {
+ RealFileName::Remapped {
+ // We do not want any local path to be exported into metadata
+ local_path: None,
+ // We use the remapped name verbatim, even if it looks like a relative
+ // path. The assumption is that the user doesn't want us to further
+ // process paths that have gone through remapping.
+ virtual_name,
+ }
+ }
+
+ RealFileName::LocalPath(unmapped_file_path) => {
+ // If no remapping has been applied yet, try to do so
+ let (new_path, was_remapped) = self.map_prefix(unmapped_file_path);
+ if was_remapped {
+ // It was remapped, so don't modify further
+ return RealFileName::Remapped { local_path: None, virtual_name: new_path };
+ }
+
+ if new_path.is_absolute() {
+ // No remapping has applied to this path and it is absolute,
+ // so the working directory cannot influence it either, so
+ // we are done.
+ return RealFileName::LocalPath(new_path);
+ }
+
+ debug_assert!(new_path.is_relative());
+ let unmapped_file_path_rel = new_path;
+
+ match working_directory {
+ RealFileName::LocalPath(unmapped_working_dir_abs) => {
+ let file_path_abs = unmapped_working_dir_abs.join(unmapped_file_path_rel);
+
+ // Although neither `working_directory` nor the file name were subject
+ // to path remapping, the concatenation between the two may be. Hence
+ // we need to do a remapping here.
+ let (file_path_abs, was_remapped) = self.map_prefix(file_path_abs);
+ if was_remapped {
+ RealFileName::Remapped {
+ // Erase the actual path
+ local_path: None,
+ virtual_name: file_path_abs,
+ }
+ } else {
+ // No kind of remapping applied to this path, so
+ // we leave it as it is.
+ RealFileName::LocalPath(file_path_abs)
+ }
+ }
+ RealFileName::Remapped {
+ local_path: _,
+ virtual_name: remapped_working_dir_abs,
+ } => {
+ // If working_directory has been remapped, then we emit
+ // Remapped variant as the expanded path won't be valid
+ RealFileName::Remapped {
+ local_path: None,
+ virtual_name: Path::new(remapped_working_dir_abs)
+ .join(unmapped_file_path_rel),
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_span/src/source_map/tests.rs b/compiler/rustc_span/src/source_map/tests.rs
new file mode 100644
index 000000000..be827cea8
--- /dev/null
+++ b/compiler/rustc_span/src/source_map/tests.rs
@@ -0,0 +1,481 @@
+use super::*;
+
+use rustc_data_structures::sync::Lrc;
+
+fn init_source_map() -> SourceMap {
+ let sm = SourceMap::new(FilePathMapping::empty());
+ sm.new_source_file(PathBuf::from("blork.rs").into(), "first line.\nsecond line".to_string());
+ sm.new_source_file(PathBuf::from("empty.rs").into(), String::new());
+ sm.new_source_file(PathBuf::from("blork2.rs").into(), "first line.\nsecond line".to_string());
+ sm
+}
+
+impl SourceMap {
+ /// Returns `Some(span)`, a union of the LHS and RHS span. The LHS must precede the RHS. If
+ /// there are gaps between LHS and RHS, the resulting union will cross these gaps.
+ /// For this to work,
+ ///
+ /// * the syntax contexts of both spans much match,
+ /// * the LHS span needs to end on the same line the RHS span begins,
+ /// * the LHS span must start at or before the RHS span.
+ fn merge_spans(&self, sp_lhs: Span, sp_rhs: Span) -> Option<Span> {
+ // Ensure we're at the same expansion ID.
+ if sp_lhs.ctxt() != sp_rhs.ctxt() {
+ return None;
+ }
+
+ let lhs_end = match self.lookup_line(sp_lhs.hi()) {
+ Ok(x) => x,
+ Err(_) => return None,
+ };
+ let rhs_begin = match self.lookup_line(sp_rhs.lo()) {
+ Ok(x) => x,
+ Err(_) => return None,
+ };
+
+ // If we must cross lines to merge, don't merge.
+ if lhs_end.line != rhs_begin.line {
+ return None;
+ }
+
+ // Ensure these follow the expected order and that we don't overlap.
+ if (sp_lhs.lo() <= sp_rhs.lo()) && (sp_lhs.hi() <= sp_rhs.lo()) {
+ Some(sp_lhs.to(sp_rhs))
+ } else {
+ None
+ }
+ }
+
+ /// Converts an absolute `BytePos` to a `CharPos` relative to the `SourceFile`.
+ fn bytepos_to_file_charpos(&self, bpos: BytePos) -> CharPos {
+ let idx = self.lookup_source_file_idx(bpos);
+ let sf = &(*self.files.borrow().source_files)[idx];
+ sf.bytepos_to_file_charpos(bpos)
+ }
+}
+
+/// Tests `lookup_byte_offset`.
+#[test]
+fn t3() {
+ let sm = init_source_map();
+
+ let srcfbp1 = sm.lookup_byte_offset(BytePos(23));
+ assert_eq!(srcfbp1.sf.name, PathBuf::from("blork.rs").into());
+ assert_eq!(srcfbp1.pos, BytePos(23));
+
+ let srcfbp1 = sm.lookup_byte_offset(BytePos(24));
+ assert_eq!(srcfbp1.sf.name, PathBuf::from("empty.rs").into());
+ assert_eq!(srcfbp1.pos, BytePos(0));
+
+ let srcfbp2 = sm.lookup_byte_offset(BytePos(25));
+ assert_eq!(srcfbp2.sf.name, PathBuf::from("blork2.rs").into());
+ assert_eq!(srcfbp2.pos, BytePos(0));
+}
+
+/// Tests `bytepos_to_file_charpos`.
+#[test]
+fn t4() {
+ let sm = init_source_map();
+
+ let cp1 = sm.bytepos_to_file_charpos(BytePos(22));
+ assert_eq!(cp1, CharPos(22));
+
+ let cp2 = sm.bytepos_to_file_charpos(BytePos(25));
+ assert_eq!(cp2, CharPos(0));
+}
+
+/// Tests zero-length `SourceFile`s.
+#[test]
+fn t5() {
+ let sm = init_source_map();
+
+ let loc1 = sm.lookup_char_pos(BytePos(22));
+ assert_eq!(loc1.file.name, PathBuf::from("blork.rs").into());
+ assert_eq!(loc1.line, 2);
+ assert_eq!(loc1.col, CharPos(10));
+
+ let loc2 = sm.lookup_char_pos(BytePos(25));
+ assert_eq!(loc2.file.name, PathBuf::from("blork2.rs").into());
+ assert_eq!(loc2.line, 1);
+ assert_eq!(loc2.col, CharPos(0));
+}
+
+fn init_source_map_mbc() -> SourceMap {
+ let sm = SourceMap::new(FilePathMapping::empty());
+ // "€" is a three-byte UTF8 char.
+ sm.new_source_file(
+ PathBuf::from("blork.rs").into(),
+ "fir€st €€€€ line.\nsecond line".to_string(),
+ );
+ sm.new_source_file(
+ PathBuf::from("blork2.rs").into(),
+ "first line€€.\n€ second line".to_string(),
+ );
+ sm
+}
+
+/// Tests `bytepos_to_file_charpos` in the presence of multi-byte chars.
+#[test]
+fn t6() {
+ let sm = init_source_map_mbc();
+
+ let cp1 = sm.bytepos_to_file_charpos(BytePos(3));
+ assert_eq!(cp1, CharPos(3));
+
+ let cp2 = sm.bytepos_to_file_charpos(BytePos(6));
+ assert_eq!(cp2, CharPos(4));
+
+ let cp3 = sm.bytepos_to_file_charpos(BytePos(56));
+ assert_eq!(cp3, CharPos(12));
+
+ let cp4 = sm.bytepos_to_file_charpos(BytePos(61));
+ assert_eq!(cp4, CharPos(15));
+}
+
+/// Test `span_to_lines` for a span ending at the end of a `SourceFile`.
+#[test]
+fn t7() {
+ let sm = init_source_map();
+ let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
+ let file_lines = sm.span_to_lines(span).unwrap();
+
+ assert_eq!(file_lines.file.name, PathBuf::from("blork.rs").into());
+ assert_eq!(file_lines.lines.len(), 1);
+ assert_eq!(file_lines.lines[0].line_index, 1);
+}
+
+/// Given a string like " ~~~~~~~~~~~~ ", produces a span
+/// converting that range. The idea is that the string has the same
+/// length as the input, and we uncover the byte positions. Note
+/// that this can span lines and so on.
+fn span_from_selection(input: &str, selection: &str) -> Span {
+ assert_eq!(input.len(), selection.len());
+ let left_index = selection.find('~').unwrap() as u32;
+ let right_index = selection.rfind('~').map_or(left_index, |x| x as u32);
+ Span::with_root_ctxt(BytePos(left_index), BytePos(right_index + 1))
+}
+
+/// Tests `span_to_snippet` and `span_to_lines` for a span converting 3
+/// lines in the middle of a file.
+#[test]
+fn span_to_snippet_and_lines_spanning_multiple_lines() {
+ let sm = SourceMap::new(FilePathMapping::empty());
+ let inputtext = "aaaaa\nbbbbBB\nCCC\nDDDDDddddd\neee\n";
+ let selection = " \n ~~\n~~~\n~~~~~ \n \n";
+ sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_string());
+ let span = span_from_selection(inputtext, selection);
+
+ // Check that we are extracting the text we thought we were extracting.
+ assert_eq!(&sm.span_to_snippet(span).unwrap(), "BB\nCCC\nDDDDD");
+
+ // Check that span_to_lines gives us the complete result with the lines/cols we expected.
+ let lines = sm.span_to_lines(span).unwrap();
+ let expected = vec![
+ LineInfo { line_index: 1, start_col: CharPos(4), end_col: CharPos(6) },
+ LineInfo { line_index: 2, start_col: CharPos(0), end_col: CharPos(3) },
+ LineInfo { line_index: 3, start_col: CharPos(0), end_col: CharPos(5) },
+ ];
+ assert_eq!(lines.lines, expected);
+}
+
+/// Test span_to_snippet for a span ending at the end of a `SourceFile`.
+#[test]
+fn t8() {
+ let sm = init_source_map();
+ let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
+ let snippet = sm.span_to_snippet(span);
+
+ assert_eq!(snippet, Ok("second line".to_string()));
+}
+
+/// Test `span_to_str` for a span ending at the end of a `SourceFile`.
+#[test]
+fn t9() {
+ let sm = init_source_map();
+ let span = Span::with_root_ctxt(BytePos(12), BytePos(23));
+ let sstr = sm.span_to_diagnostic_string(span);
+
+ assert_eq!(sstr, "blork.rs:2:1: 2:12");
+}
+
+/// Tests failing to merge two spans on different lines.
+#[test]
+fn span_merging_fail() {
+ let sm = SourceMap::new(FilePathMapping::empty());
+ let inputtext = "bbbb BB\ncc CCC\n";
+ let selection1 = " ~~\n \n";
+ let selection2 = " \n ~~~\n";
+ sm.new_source_file(Path::new("blork.rs").to_owned().into(), inputtext.to_owned());
+ let span1 = span_from_selection(inputtext, selection1);
+ let span2 = span_from_selection(inputtext, selection2);
+
+ assert!(sm.merge_spans(span1, span2).is_none());
+}
+
+/// Tests loading an external source file that requires normalization.
+#[test]
+fn t10() {
+ let sm = SourceMap::new(FilePathMapping::empty());
+ let unnormalized = "first line.\r\nsecond line";
+ let normalized = "first line.\nsecond line";
+
+ let src_file = sm.new_source_file(PathBuf::from("blork.rs").into(), unnormalized.to_string());
+
+ assert_eq!(src_file.src.as_ref().unwrap().as_ref(), normalized);
+ assert!(
+ src_file.src_hash.matches(unnormalized),
+ "src_hash should use the source before normalization"
+ );
+
+ let SourceFile {
+ name,
+ src_hash,
+ start_pos,
+ end_pos,
+ lines,
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
+ name_hash,
+ ..
+ } = (*src_file).clone();
+
+ let imported_src_file = sm.new_imported_source_file(
+ name,
+ src_hash,
+ name_hash,
+ (end_pos - start_pos).to_usize(),
+ CrateNum::new(0),
+ lines,
+ multibyte_chars,
+ non_narrow_chars,
+ normalized_pos,
+ start_pos,
+ end_pos,
+ );
+
+ assert!(
+ imported_src_file.external_src.borrow().get_source().is_none(),
+ "imported source file should not have source yet"
+ );
+ imported_src_file.add_external_src(|| Some(unnormalized.to_string()));
+ assert_eq!(
+ imported_src_file.external_src.borrow().get_source().unwrap().as_ref(),
+ normalized,
+ "imported source file should be normalized"
+ );
+}
+
+/// Returns the span corresponding to the `n`th occurrence of `substring` in `source_text`.
+trait SourceMapExtension {
+ fn span_substr(
+ &self,
+ file: &Lrc<SourceFile>,
+ source_text: &str,
+ substring: &str,
+ n: usize,
+ ) -> Span;
+}
+
+impl SourceMapExtension for SourceMap {
+ fn span_substr(
+ &self,
+ file: &Lrc<SourceFile>,
+ source_text: &str,
+ substring: &str,
+ n: usize,
+ ) -> Span {
+ eprintln!(
+ "span_substr(file={:?}/{:?}, substring={:?}, n={})",
+ file.name, file.start_pos, substring, n
+ );
+ let mut i = 0;
+ let mut hi = 0;
+ loop {
+ let offset = source_text[hi..].find(substring).unwrap_or_else(|| {
+ panic!(
+ "source_text `{}` does not have {} occurrences of `{}`, only {}",
+ source_text, n, substring, i
+ );
+ });
+ let lo = hi + offset;
+ hi = lo + substring.len();
+ if i == n {
+ let span = Span::with_root_ctxt(
+ BytePos(lo as u32 + file.start_pos.0),
+ BytePos(hi as u32 + file.start_pos.0),
+ );
+ assert_eq!(&self.span_to_snippet(span).unwrap()[..], substring);
+ return span;
+ }
+ i += 1;
+ }
+ }
+}
+
+// Takes a unix-style path and returns a platform specific path.
+fn path(p: &str) -> PathBuf {
+ path_str(p).into()
+}
+
+// Takes a unix-style path and returns a platform specific path.
+fn path_str(p: &str) -> String {
+ #[cfg(not(windows))]
+ {
+ return p.into();
+ }
+
+ #[cfg(windows)]
+ {
+ let mut path = p.replace('/', "\\");
+ if let Some(rest) = path.strip_prefix('\\') {
+ path = ["X:\\", rest].concat();
+ }
+
+ path
+ }
+}
+
+fn map_path_prefix(mapping: &FilePathMapping, p: &str) -> String {
+ // It's important that we convert to a string here because that's what
+ // later stages do too (e.g. in the backend), and comparing `Path` values
+ // won't catch some differences at the string level, e.g. "abc" and "abc/"
+ // compare as equal.
+ mapping.map_prefix(path(p)).0.to_string_lossy().to_string()
+}
+
+#[test]
+fn path_prefix_remapping() {
+ // Relative to relative
+ {
+ let mapping = &FilePathMapping::new(vec![(path("abc/def"), path("foo"))]);
+
+ assert_eq!(map_path_prefix(mapping, "abc/def/src/main.rs"), path_str("foo/src/main.rs"));
+ assert_eq!(map_path_prefix(mapping, "abc/def"), path_str("foo"));
+ }
+
+ // Relative to absolute
+ {
+ let mapping = &FilePathMapping::new(vec![(path("abc/def"), path("/foo"))]);
+
+ assert_eq!(map_path_prefix(mapping, "abc/def/src/main.rs"), path_str("/foo/src/main.rs"));
+ assert_eq!(map_path_prefix(mapping, "abc/def"), path_str("/foo"));
+ }
+
+ // Absolute to relative
+ {
+ let mapping = &FilePathMapping::new(vec![(path("/abc/def"), path("foo"))]);
+
+ assert_eq!(map_path_prefix(mapping, "/abc/def/src/main.rs"), path_str("foo/src/main.rs"));
+ assert_eq!(map_path_prefix(mapping, "/abc/def"), path_str("foo"));
+ }
+
+ // Absolute to absolute
+ {
+ let mapping = &FilePathMapping::new(vec![(path("/abc/def"), path("/foo"))]);
+
+ assert_eq!(map_path_prefix(mapping, "/abc/def/src/main.rs"), path_str("/foo/src/main.rs"));
+ assert_eq!(map_path_prefix(mapping, "/abc/def"), path_str("/foo"));
+ }
+}
+
+#[test]
+fn path_prefix_remapping_expand_to_absolute() {
+ // "virtual" working directory is relative path
+ let mapping =
+ &FilePathMapping::new(vec![(path("/foo"), path("FOO")), (path("/bar"), path("BAR"))]);
+ let working_directory = path("/foo");
+ let working_directory = RealFileName::Remapped {
+ local_path: Some(working_directory.clone()),
+ virtual_name: mapping.map_prefix(working_directory).0,
+ };
+
+ assert_eq!(working_directory.remapped_path_if_available(), path("FOO"));
+
+ // Unmapped absolute path
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::LocalPath(path("/foo/src/main.rs")),
+ &working_directory
+ ),
+ RealFileName::Remapped { local_path: None, virtual_name: path("FOO/src/main.rs") }
+ );
+
+ // Unmapped absolute path with unrelated working directory
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::LocalPath(path("/bar/src/main.rs")),
+ &working_directory
+ ),
+ RealFileName::Remapped { local_path: None, virtual_name: path("BAR/src/main.rs") }
+ );
+
+ // Unmapped absolute path that does not match any prefix
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::LocalPath(path("/quux/src/main.rs")),
+ &working_directory
+ ),
+ RealFileName::LocalPath(path("/quux/src/main.rs")),
+ );
+
+ // Unmapped relative path
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::LocalPath(path("src/main.rs")),
+ &working_directory
+ ),
+ RealFileName::Remapped { local_path: None, virtual_name: path("FOO/src/main.rs") }
+ );
+
+ // Unmapped relative path with `./`
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::LocalPath(path("./src/main.rs")),
+ &working_directory
+ ),
+ RealFileName::Remapped { local_path: None, virtual_name: path("FOO/src/main.rs") }
+ );
+
+ // Unmapped relative path that does not match any prefix
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::LocalPath(path("quux/src/main.rs")),
+ &RealFileName::LocalPath(path("/abc")),
+ ),
+ RealFileName::LocalPath(path("/abc/quux/src/main.rs")),
+ );
+
+ // Already remapped absolute path
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::Remapped {
+ local_path: Some(path("/foo/src/main.rs")),
+ virtual_name: path("FOO/src/main.rs"),
+ },
+ &working_directory
+ ),
+ RealFileName::Remapped { local_path: None, virtual_name: path("FOO/src/main.rs") }
+ );
+
+ // Already remapped absolute path, with unrelated working directory
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::Remapped {
+ local_path: Some(path("/bar/src/main.rs")),
+ virtual_name: path("BAR/src/main.rs"),
+ },
+ &working_directory
+ ),
+ RealFileName::Remapped { local_path: None, virtual_name: path("BAR/src/main.rs") }
+ );
+
+ // Already remapped relative path
+ assert_eq!(
+ mapping.to_embeddable_absolute_path(
+ RealFileName::Remapped { local_path: None, virtual_name: path("XYZ/src/main.rs") },
+ &working_directory
+ ),
+ RealFileName::Remapped { local_path: None, virtual_name: path("XYZ/src/main.rs") }
+ );
+}
diff --git a/compiler/rustc_span/src/span_encoding.rs b/compiler/rustc_span/src/span_encoding.rs
new file mode 100644
index 000000000..3ee329e97
--- /dev/null
+++ b/compiler/rustc_span/src/span_encoding.rs
@@ -0,0 +1,150 @@
+// Spans are encoded using 1-bit tag and 2 different encoding formats (one for each tag value).
+// One format is used for keeping span data inline,
+// another contains index into an out-of-line span interner.
+// The encoding format for inline spans were obtained by optimizing over crates in rustc/libstd.
+// See https://internals.rust-lang.org/t/rfc-compiler-refactoring-spans/1357/28
+
+use crate::def_id::LocalDefId;
+use crate::hygiene::SyntaxContext;
+use crate::SPAN_TRACK;
+use crate::{BytePos, SpanData};
+
+use rustc_data_structures::fx::FxIndexSet;
+
+/// A compressed span.
+///
+/// Whereas [`SpanData`] is 12 bytes, which is a bit too big to stick everywhere, `Span`
+/// is a form that only takes up 8 bytes, with less space for the length and
+/// context. The vast majority (99.9%+) of `SpanData` instances will fit within
+/// those 8 bytes; any `SpanData` whose fields don't fit into a `Span` are
+/// stored in a separate interner table, and the `Span` will index into that
+/// table. Interning is rare enough that the cost is low, but common enough
+/// that the code is exercised regularly.
+///
+/// An earlier version of this code used only 4 bytes for `Span`, but that was
+/// slower because only 80--90% of spans could be stored inline (even less in
+/// very large crates) and so the interner was used a lot more.
+///
+/// Inline (compressed) format:
+/// - `span.base_or_index == span_data.lo`
+/// - `span.len_or_tag == len == span_data.hi - span_data.lo` (must be `<= MAX_LEN`)
+/// - `span.ctxt == span_data.ctxt` (must be `<= MAX_CTXT`)
+///
+/// Interned format:
+/// - `span.base_or_index == index` (indexes into the interner table)
+/// - `span.len_or_tag == LEN_TAG` (high bit set, all other bits are zero)
+/// - `span.ctxt == 0`
+///
+/// The inline form uses 0 for the tag value (rather than 1) so that we don't
+/// need to mask out the tag bit when getting the length, and so that the
+/// dummy span can be all zeroes.
+///
+/// Notes about the choice of field sizes:
+/// - `base` is 32 bits in both `Span` and `SpanData`, which means that `base`
+/// values never cause interning. The number of bits needed for `base`
+/// depends on the crate size. 32 bits allows up to 4 GiB of code in a crate.
+/// - `len` is 15 bits in `Span` (a u16, minus 1 bit for the tag) and 32 bits
+/// in `SpanData`, which means that large `len` values will cause interning.
+/// The number of bits needed for `len` does not depend on the crate size.
+/// The most common numbers of bits for `len` are from 0 to 7, with a peak usually
+/// at 3 or 4, and then it drops off quickly from 8 onwards. 15 bits is enough
+/// for 99.99%+ of cases, but larger values (sometimes 20+ bits) might occur
+/// dozens of times in a typical crate.
+/// - `ctxt` is 16 bits in `Span` and 32 bits in `SpanData`, which means that
+/// large `ctxt` values will cause interning. The number of bits needed for
+/// `ctxt` values depend partly on the crate size and partly on the form of
+/// the code. No crates in `rustc-perf` need more than 15 bits for `ctxt`,
+/// but larger crates might need more than 16 bits.
+///
+/// In order to reliably use parented spans in incremental compilation,
+/// the dependency to the parent definition's span. This is performed
+/// using the callback `SPAN_TRACK` to access the query engine.
+///
+#[derive(Clone, Copy, Eq, PartialEq, Hash)]
+#[rustc_pass_by_value]
+pub struct Span {
+ base_or_index: u32,
+ len_or_tag: u16,
+ ctxt_or_zero: u16,
+}
+
+const LEN_TAG: u16 = 0b1000_0000_0000_0000;
+const MAX_LEN: u32 = 0b0111_1111_1111_1111;
+const MAX_CTXT: u32 = 0b1111_1111_1111_1111;
+
+/// Dummy span, both position and length are zero, syntax context is zero as well.
+pub const DUMMY_SP: Span = Span { base_or_index: 0, len_or_tag: 0, ctxt_or_zero: 0 };
+
+impl Span {
+ #[inline]
+ pub fn new(
+ mut lo: BytePos,
+ mut hi: BytePos,
+ ctxt: SyntaxContext,
+ parent: Option<LocalDefId>,
+ ) -> Self {
+ if lo > hi {
+ std::mem::swap(&mut lo, &mut hi);
+ }
+
+ let (base, len, ctxt2) = (lo.0, hi.0 - lo.0, ctxt.as_u32());
+
+ if len <= MAX_LEN && ctxt2 <= MAX_CTXT && parent.is_none() {
+ // Inline format.
+ Span { base_or_index: base, len_or_tag: len as u16, ctxt_or_zero: ctxt2 as u16 }
+ } else {
+ // Interned format.
+ let index =
+ with_span_interner(|interner| interner.intern(&SpanData { lo, hi, ctxt, parent }));
+ Span { base_or_index: index, len_or_tag: LEN_TAG, ctxt_or_zero: 0 }
+ }
+ }
+
+ #[inline]
+ pub fn data(self) -> SpanData {
+ let data = self.data_untracked();
+ if let Some(parent) = data.parent {
+ (*SPAN_TRACK)(parent);
+ }
+ data
+ }
+
+ /// Internal function to translate between an encoded span and the expanded representation.
+ /// This function must not be used outside the incremental engine.
+ #[inline]
+ pub fn data_untracked(self) -> SpanData {
+ if self.len_or_tag != LEN_TAG {
+ // Inline format.
+ debug_assert!(self.len_or_tag as u32 <= MAX_LEN);
+ SpanData {
+ lo: BytePos(self.base_or_index),
+ hi: BytePos(self.base_or_index + self.len_or_tag as u32),
+ ctxt: SyntaxContext::from_u32(self.ctxt_or_zero as u32),
+ parent: None,
+ }
+ } else {
+ // Interned format.
+ debug_assert!(self.ctxt_or_zero == 0);
+ let index = self.base_or_index;
+ with_span_interner(|interner| interner.spans[index as usize])
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct SpanInterner {
+ spans: FxIndexSet<SpanData>,
+}
+
+impl SpanInterner {
+ fn intern(&mut self, span_data: &SpanData) -> u32 {
+ let (index, _) = self.spans.insert_full(*span_data);
+ index as u32
+ }
+}
+
+// If an interner exists, return it. Otherwise, prepare a fresh one.
+#[inline]
+fn with_span_interner<T, F: FnOnce(&mut SpanInterner) -> T>(f: F) -> T {
+ crate::with_session_globals(|session_globals| f(&mut *session_globals.span_interner.lock()))
+}
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
new file mode 100644
index 000000000..791160ff6
--- /dev/null
+++ b/compiler/rustc_span/src/symbol.rs
@@ -0,0 +1,2067 @@
+//! An "interner" is a data structure that associates values with usize tags and
+//! allows bidirectional lookup; i.e., given a value, one can easily find the
+//! type, and vice versa.
+
+use rustc_arena::DroplessArena;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use rustc_data_structures::sync::Lock;
+use rustc_macros::HashStable_Generic;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+use std::cmp::{Ord, PartialEq, PartialOrd};
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::str;
+
+use crate::{with_session_globals, Edition, Span, DUMMY_SP};
+
+#[cfg(test)]
+mod tests;
+
+// The proc macro code for this is in `compiler/rustc_macros/src/symbols.rs`.
+symbols! {
+ // After modifying this list adjust `is_special`, `is_used_keyword`/`is_unused_keyword`,
+ // this should be rarely necessary though if the keywords are kept in alphabetic order.
+ Keywords {
+ // Special reserved identifiers used internally for elided lifetimes,
+ // unnamed method parameters, crate root module, error recovery etc.
+ Empty: "",
+ PathRoot: "{{root}}",
+ DollarCrate: "$crate",
+ Underscore: "_",
+
+ // Keywords that are used in stable Rust.
+ As: "as",
+ Break: "break",
+ Const: "const",
+ Continue: "continue",
+ Crate: "crate",
+ Else: "else",
+ Enum: "enum",
+ Extern: "extern",
+ False: "false",
+ Fn: "fn",
+ For: "for",
+ If: "if",
+ Impl: "impl",
+ In: "in",
+ Let: "let",
+ Loop: "loop",
+ Match: "match",
+ Mod: "mod",
+ Move: "move",
+ Mut: "mut",
+ Pub: "pub",
+ Ref: "ref",
+ Return: "return",
+ SelfLower: "self",
+ SelfUpper: "Self",
+ Static: "static",
+ Struct: "struct",
+ Super: "super",
+ Trait: "trait",
+ True: "true",
+ Type: "type",
+ Unsafe: "unsafe",
+ Use: "use",
+ Where: "where",
+ While: "while",
+
+ // Keywords that are used in unstable Rust or reserved for future use.
+ Abstract: "abstract",
+ Become: "become",
+ Box: "box",
+ Do: "do",
+ Final: "final",
+ Macro: "macro",
+ Override: "override",
+ Priv: "priv",
+ Typeof: "typeof",
+ Unsized: "unsized",
+ Virtual: "virtual",
+ Yield: "yield",
+
+ // Edition-specific keywords that are used in stable Rust.
+ Async: "async", // >= 2018 Edition only
+ Await: "await", // >= 2018 Edition only
+ Dyn: "dyn", // >= 2018 Edition only
+
+ // Edition-specific keywords that are used in unstable Rust or reserved for future use.
+ Try: "try", // >= 2018 Edition only
+
+ // Special lifetime names
+ UnderscoreLifetime: "'_",
+ StaticLifetime: "'static",
+
+ // Weak keywords, have special meaning only in specific contexts.
+ Auto: "auto",
+ Catch: "catch",
+ Default: "default",
+ MacroRules: "macro_rules",
+ Raw: "raw",
+ Union: "union",
+ Yeet: "yeet",
+ }
+
+ // Pre-interned symbols that can be referred to with `rustc_span::sym::*`.
+ //
+ // The symbol is the stringified identifier unless otherwise specified, in
+ // which case the name should mention the non-identifier punctuation.
+ // E.g. `sym::proc_dash_macro` represents "proc-macro", and it shouldn't be
+ // called `sym::proc_macro` because then it's easy to mistakenly think it
+ // represents "proc_macro".
+ //
+ // As well as the symbols listed, there are symbols for the strings
+ // "0", "1", ..., "9", which are accessible via `sym::integer`.
+ //
+ // The proc macro will abort if symbols are not in alphabetical order (as
+ // defined by `impl Ord for str`) or if any symbols are duplicated. Vim
+ // users can sort the list by selecting it and executing the command
+ // `:'<,'>!LC_ALL=C sort`.
+ //
+ // There is currently no checking that all symbols are used; that would be
+ // nice to have.
+ Symbols {
+ AcqRel,
+ Acquire,
+ AddSubdiagnostic,
+ Alignment,
+ Any,
+ Arc,
+ Argument,
+ ArgumentV1,
+ ArgumentV1Methods,
+ Arguments,
+ AsMut,
+ AsRef,
+ AssertParamIsClone,
+ AssertParamIsCopy,
+ AssertParamIsEq,
+ AtomicBool,
+ AtomicI128,
+ AtomicI16,
+ AtomicI32,
+ AtomicI64,
+ AtomicI8,
+ AtomicIsize,
+ AtomicPtr,
+ AtomicU128,
+ AtomicU16,
+ AtomicU32,
+ AtomicU64,
+ AtomicU8,
+ AtomicUsize,
+ BTreeEntry,
+ BTreeMap,
+ BTreeSet,
+ BinaryHeap,
+ Borrow,
+ Break,
+ C,
+ CStr,
+ CString,
+ Capture,
+ Center,
+ Clone,
+ Continue,
+ Copy,
+ Count,
+ Cow,
+ Debug,
+ DebugStruct,
+ DebugTuple,
+ Decodable,
+ Decoder,
+ DecorateLint,
+ Default,
+ Deref,
+ DiagnosticMessage,
+ DirBuilder,
+ Display,
+ DoubleEndedIterator,
+ Duration,
+ Encodable,
+ Encoder,
+ Eq,
+ Equal,
+ Err,
+ Error,
+ File,
+ FileType,
+ Fn,
+ FnMut,
+ FnOnce,
+ FormatSpec,
+ Formatter,
+ From,
+ FromIterator,
+ FromResidual,
+ Future,
+ FxHashMap,
+ FxHashSet,
+ GlobalAlloc,
+ Hash,
+ HashMap,
+ HashMapEntry,
+ HashSet,
+ Hasher,
+ Implied,
+ Input,
+ Into,
+ IntoFuture,
+ IntoIterator,
+ IoRead,
+ IoWrite,
+ IrTyKind,
+ Is,
+ ItemContext,
+ Iterator,
+ Layout,
+ Left,
+ LinkedList,
+ LintPass,
+ Mutex,
+ N,
+ NonZeroI128,
+ NonZeroI16,
+ NonZeroI32,
+ NonZeroI64,
+ NonZeroI8,
+ NonZeroU128,
+ NonZeroU16,
+ NonZeroU32,
+ NonZeroU64,
+ NonZeroU8,
+ None,
+ Ok,
+ Option,
+ Ord,
+ Ordering,
+ OsStr,
+ OsString,
+ Output,
+ Param,
+ PartialEq,
+ PartialOrd,
+ Path,
+ PathBuf,
+ Pending,
+ Pin,
+ Pointer,
+ Poll,
+ ProcMacro,
+ ProcMacroHack,
+ ProceduralMasqueradeDummyType,
+ Range,
+ RangeFrom,
+ RangeFull,
+ RangeInclusive,
+ RangeTo,
+ RangeToInclusive,
+ Rc,
+ Ready,
+ Receiver,
+ Relaxed,
+ Release,
+ Result,
+ Return,
+ Right,
+ Rust,
+ RustcDecodable,
+ RustcEncodable,
+ Send,
+ SeqCst,
+ SessionDiagnostic,
+ SliceIndex,
+ Some,
+ String,
+ StructuralEq,
+ StructuralPartialEq,
+ SubdiagnosticMessage,
+ Sync,
+ Target,
+ ToOwned,
+ ToString,
+ Try,
+ TryCaptureGeneric,
+ TryCapturePrintable,
+ TryFrom,
+ TryInto,
+ Ty,
+ TyCtxt,
+ TyKind,
+ Unknown,
+ UnsafeArg,
+ Vec,
+ VecDeque,
+ Wrapper,
+ Yield,
+ _DECLS,
+ _Self,
+ __D,
+ __H,
+ __S,
+ __awaitee,
+ __try_var,
+ _d,
+ _e,
+ _task_context,
+ a32,
+ aarch64_target_feature,
+ aarch64_ver_target_feature,
+ abi,
+ abi_amdgpu_kernel,
+ abi_avr_interrupt,
+ abi_c_cmse_nonsecure_call,
+ abi_efiapi,
+ abi_msp430_interrupt,
+ abi_ptx,
+ abi_sysv64,
+ abi_thiscall,
+ abi_unadjusted,
+ abi_vectorcall,
+ abi_x86_interrupt,
+ abort,
+ aborts,
+ add,
+ add_assign,
+ add_with_overflow,
+ address,
+ adt_const_params,
+ advanced_slice_patterns,
+ adx_target_feature,
+ alias,
+ align,
+ align_offset,
+ alignstack,
+ all,
+ alloc,
+ alloc_error_handler,
+ alloc_layout,
+ alloc_zeroed,
+ allocator,
+ allocator_api,
+ allocator_internals,
+ allow,
+ allow_fail,
+ allow_internal_unsafe,
+ allow_internal_unstable,
+ allowed,
+ alu32,
+ always,
+ and,
+ and_then,
+ anonymous_lifetime_in_impl_trait,
+ any,
+ append_const_msg,
+ arbitrary_enum_discriminant,
+ arbitrary_self_types,
+ args,
+ arith_offset,
+ arm,
+ arm_target_feature,
+ array,
+ arrays,
+ as_ptr,
+ as_ref,
+ as_str,
+ asm,
+ asm_const,
+ asm_experimental_arch,
+ asm_sym,
+ asm_unwind,
+ assert,
+ assert_eq_macro,
+ assert_inhabited,
+ assert_macro,
+ assert_ne_macro,
+ assert_receiver_is_total_eq,
+ assert_uninit_valid,
+ assert_zero_valid,
+ asserting,
+ associated_const_equality,
+ associated_consts,
+ associated_type_bounds,
+ associated_type_defaults,
+ associated_types,
+ assume,
+ assume_init,
+ async_await,
+ async_closure,
+ atomic,
+ atomic_mod,
+ atomics,
+ att_syntax,
+ attr,
+ attr_literals,
+ attributes,
+ augmented_assignments,
+ auto_traits,
+ automatically_derived,
+ avx,
+ avx512_target_feature,
+ avx512bw,
+ avx512f,
+ await_macro,
+ bang,
+ begin_panic,
+ bench,
+ bin,
+ bind_by_move_pattern_guards,
+ bindings_after_at,
+ bitand,
+ bitand_assign,
+ bitor,
+ bitor_assign,
+ bitreverse,
+ bitxor,
+ bitxor_assign,
+ black_box,
+ block,
+ bool,
+ borrowck_graphviz_format,
+ borrowck_graphviz_postflow,
+ borrowck_graphviz_preflow,
+ box_free,
+ box_patterns,
+ box_syntax,
+ bpf_target_feature,
+ braced_empty_structs,
+ branch,
+ breakpoint,
+ bridge,
+ bswap,
+ c_str,
+ c_unwind,
+ c_variadic,
+ call,
+ call_mut,
+ call_once,
+ caller_location,
+ capture_disjoint_fields,
+ cdylib,
+ ceilf32,
+ ceilf64,
+ cfg,
+ cfg_accessible,
+ cfg_attr,
+ cfg_attr_multi,
+ cfg_doctest,
+ cfg_eval,
+ cfg_hide,
+ cfg_macro,
+ cfg_panic,
+ cfg_sanitize,
+ cfg_target_abi,
+ cfg_target_compact,
+ cfg_target_feature,
+ cfg_target_has_atomic,
+ cfg_target_has_atomic_equal_alignment,
+ cfg_target_has_atomic_load_store,
+ cfg_target_thread_local,
+ cfg_target_vendor,
+ cfg_version,
+ cfi,
+ char,
+ client,
+ clippy,
+ clobber_abi,
+ clone,
+ clone_closures,
+ clone_from,
+ closure,
+ closure_lifetime_binder,
+ closure_to_fn_coercion,
+ closure_track_caller,
+ cmp,
+ cmp_max,
+ cmp_min,
+ cmpxchg16b_target_feature,
+ cmse_nonsecure_entry,
+ coerce_unsized,
+ cold,
+ column,
+ column_macro,
+ compare_and_swap,
+ compare_exchange,
+ compare_exchange_weak,
+ compile_error,
+ compile_error_macro,
+ compiler,
+ compiler_builtins,
+ compiler_fence,
+ concat,
+ concat_bytes,
+ concat_idents,
+ concat_macro,
+ conservative_impl_trait,
+ console,
+ const_allocate,
+ const_async_blocks,
+ const_compare_raw_pointers,
+ const_constructor,
+ const_deallocate,
+ const_eval_limit,
+ const_eval_select,
+ const_eval_select_ct,
+ const_evaluatable_checked,
+ const_extern_fn,
+ const_fn,
+ const_fn_floating_point_arithmetic,
+ const_fn_fn_ptr_basics,
+ const_fn_trait_bound,
+ const_fn_transmute,
+ const_fn_union,
+ const_fn_unsize,
+ const_for,
+ const_format_args,
+ const_generic_defaults,
+ const_generics,
+ const_generics_defaults,
+ const_if_match,
+ const_impl_trait,
+ const_in_array_repeat_expressions,
+ const_indexing,
+ const_let,
+ const_loop,
+ const_mut_refs,
+ const_panic,
+ const_panic_fmt,
+ const_precise_live_drops,
+ const_raw_ptr_deref,
+ const_raw_ptr_to_usize_cast,
+ const_refs_to_cell,
+ const_trait,
+ const_trait_bound_opt_out,
+ const_trait_impl,
+ const_transmute,
+ const_try,
+ constant,
+ constructor,
+ contents,
+ context,
+ convert,
+ copy,
+ copy_closures,
+ copy_nonoverlapping,
+ copysignf32,
+ copysignf64,
+ core,
+ core_intrinsics,
+ core_panic,
+ core_panic_2015_macro,
+ core_panic_macro,
+ cosf32,
+ cosf64,
+ count,
+ cr,
+ crate_id,
+ crate_in_paths,
+ crate_local,
+ crate_name,
+ crate_type,
+ crate_visibility_modifier,
+ crt_dash_static: "crt-static",
+ cstring_type,
+ ctlz,
+ ctlz_nonzero,
+ ctpop,
+ cttz,
+ cttz_nonzero,
+ custom_attribute,
+ custom_derive,
+ custom_inner_attributes,
+ custom_test_frameworks,
+ d,
+ d32,
+ dbg_macro,
+ dead_code,
+ dealloc,
+ debug,
+ debug_assert_eq_macro,
+ debug_assert_macro,
+ debug_assert_ne_macro,
+ debug_assertions,
+ debug_struct,
+ debug_struct_fields_finish,
+ debug_trait_builder,
+ debug_tuple,
+ debug_tuple_fields_finish,
+ debugger_visualizer,
+ decl_macro,
+ declare_lint_pass,
+ decode,
+ default_alloc_error_handler,
+ default_lib_allocator,
+ default_method_body_is_const,
+ default_type_parameter_fallback,
+ default_type_params,
+ delay_span_bug_from_inside_query,
+ deny,
+ deprecated,
+ deprecated_safe,
+ deprecated_suggestion,
+ deref,
+ deref_method,
+ deref_mut,
+ deref_target,
+ derive,
+ derive_default_enum,
+ destruct,
+ destructuring_assignment,
+ diagnostic,
+ direct,
+ discriminant_kind,
+ discriminant_type,
+ discriminant_value,
+ dispatch_from_dyn,
+ display_trait,
+ div,
+ div_assign,
+ doc,
+ doc_alias,
+ doc_auto_cfg,
+ doc_cfg,
+ doc_cfg_hide,
+ doc_keyword,
+ doc_masked,
+ doc_notable_trait,
+ doc_primitive,
+ doc_spotlight,
+ doctest,
+ document_private_items,
+ dotdot: "..",
+ dotdot_in_tuple_patterns,
+ dotdoteq_in_patterns,
+ dreg,
+ dreg_low16,
+ dreg_low8,
+ drop,
+ drop_in_place,
+ drop_types_in_const,
+ dropck_eyepatch,
+ dropck_parametricity,
+ dylib,
+ dyn_metadata,
+ dyn_trait,
+ e,
+ edition_macro_pats,
+ edition_panic,
+ eh_catch_typeinfo,
+ eh_personality,
+ emit_enum,
+ emit_enum_variant,
+ emit_enum_variant_arg,
+ emit_struct,
+ emit_struct_field,
+ enable,
+ enclosing_scope,
+ encode,
+ end,
+ env,
+ env_macro,
+ eprint_macro,
+ eprintln_macro,
+ eq,
+ ermsb_target_feature,
+ exact_div,
+ except,
+ exchange_malloc,
+ exclusive_range_pattern,
+ exhaustive_integer_patterns,
+ exhaustive_patterns,
+ existential_type,
+ exp2f32,
+ exp2f64,
+ expect,
+ expected,
+ expf32,
+ expf64,
+ explicit_generic_args_with_impl_trait,
+ export_name,
+ expr,
+ extended_key_value_attributes,
+ extern_absolute_paths,
+ extern_crate_item_prelude,
+ extern_crate_self,
+ extern_in_paths,
+ extern_prelude,
+ extern_types,
+ external_doc,
+ f,
+ f16c_target_feature,
+ f32,
+ f64,
+ fabsf32,
+ fabsf64,
+ fadd_fast,
+ fake_variadic,
+ fdiv_fast,
+ feature,
+ fence,
+ ferris: "🦀",
+ fetch_update,
+ ffi,
+ ffi_const,
+ ffi_pure,
+ ffi_returns_twice,
+ field,
+ field_init_shorthand,
+ file,
+ file_macro,
+ fill,
+ finish,
+ flags,
+ float,
+ float_to_int_unchecked,
+ floorf32,
+ floorf64,
+ fmaf32,
+ fmaf64,
+ fmt,
+ fmt_as_str,
+ fmt_internals,
+ fmul_fast,
+ fn_align,
+ fn_must_use,
+ fn_mut,
+ fn_once,
+ fn_once_output,
+ forbid,
+ forget,
+ format,
+ format_args,
+ format_args_capture,
+ format_args_macro,
+ format_args_nl,
+ format_macro,
+ fp,
+ freeze,
+ freg,
+ frem_fast,
+ from,
+ from_desugaring,
+ from_generator,
+ from_iter,
+ from_method,
+ from_output,
+ from_residual,
+ from_size_align_unchecked,
+ from_usize,
+ from_yeet,
+ fsub_fast,
+ fundamental,
+ future,
+ future_trait,
+ gdb_script_file,
+ ge,
+ gen_future,
+ gen_kill,
+ generator,
+ generator_return,
+ generator_state,
+ generators,
+ generic_arg_infer,
+ generic_assert,
+ generic_associated_types,
+ generic_associated_types_extended,
+ generic_const_exprs,
+ generic_param_attrs,
+ get_context,
+ global_allocator,
+ global_asm,
+ globs,
+ gt,
+ half_open_range_patterns,
+ hash,
+ hexagon_target_feature,
+ hidden,
+ homogeneous_aggregate,
+ html_favicon_url,
+ html_logo_url,
+ html_no_source,
+ html_playground_url,
+ html_root_url,
+ hwaddress,
+ i,
+ i128,
+ i128_type,
+ i16,
+ i32,
+ i64,
+ i8,
+ ident,
+ if_let,
+ if_let_guard,
+ if_while_or_patterns,
+ ignore,
+ impl_header_lifetime_elision,
+ impl_lint_pass,
+ impl_macros,
+ impl_trait_in_bindings,
+ implied_by,
+ import,
+ import_shadowing,
+ imported_main,
+ in_band_lifetimes,
+ include,
+ include_bytes,
+ include_bytes_macro,
+ include_macro,
+ include_str,
+ include_str_macro,
+ inclusive_range_syntax,
+ index,
+ index_mut,
+ infer_outlives_requirements,
+ infer_static_outlives_requirements,
+ inherent_associated_types,
+ inlateout,
+ inline,
+ inline_const,
+ inline_const_pat,
+ inout,
+ instruction_set,
+ integer_: "integer",
+ integral,
+ intel,
+ into_future,
+ into_iter,
+ intra_doc_pointers,
+ intrinsics,
+ irrefutable_let_patterns,
+ isa_attribute,
+ isize,
+ issue,
+ issue_5723_bootstrap,
+ issue_tracker_base_url,
+ item,
+ item_like_imports,
+ iter,
+ iter_repeat,
+ keyword,
+ kind,
+ kreg,
+ kreg0,
+ label,
+ label_break_value,
+ lang,
+ lang_items,
+ large_assignments,
+ lateout,
+ lazy_normalization_consts,
+ le,
+ len,
+ let_chains,
+ let_else,
+ lhs,
+ lib,
+ libc,
+ lifetime,
+ likely,
+ line,
+ line_macro,
+ link,
+ link_args,
+ link_cfg,
+ link_llvm_intrinsics,
+ link_name,
+ link_ordinal,
+ link_section,
+ linkage,
+ linker,
+ lint_reasons,
+ literal,
+ load,
+ loaded_from_disk,
+ local,
+ local_inner_macros,
+ log10f32,
+ log10f64,
+ log2f32,
+ log2f64,
+ log_syntax,
+ logf32,
+ logf64,
+ loop_break_value,
+ lt,
+ macro_at_most_once_rep,
+ macro_attributes_in_derive_output,
+ macro_escape,
+ macro_export,
+ macro_lifetime_matcher,
+ macro_literal_matcher,
+ macro_metavar_expr,
+ macro_reexport,
+ macro_use,
+ macro_vis_matcher,
+ macros_in_extern,
+ main,
+ managed_boxes,
+ manually_drop,
+ map,
+ marker,
+ marker_trait_attr,
+ masked,
+ match_beginning_vert,
+ match_default_bindings,
+ matches_macro,
+ maxnumf32,
+ maxnumf64,
+ may_dangle,
+ may_unwind,
+ maybe_uninit,
+ maybe_uninit_uninit,
+ maybe_uninit_zeroed,
+ mem_discriminant,
+ mem_drop,
+ mem_forget,
+ mem_replace,
+ mem_size_of,
+ mem_size_of_val,
+ mem_uninitialized,
+ mem_variant_count,
+ mem_zeroed,
+ member_constraints,
+ memory,
+ memtag,
+ message,
+ meta,
+ metadata_type,
+ min_align_of,
+ min_align_of_val,
+ min_const_fn,
+ min_const_generics,
+ min_const_unsafe_fn,
+ min_specialization,
+ min_type_alias_impl_trait,
+ minnumf32,
+ minnumf64,
+ mips_target_feature,
+ miri,
+ misc,
+ mmx_reg,
+ modifiers,
+ module,
+ module_path,
+ module_path_macro,
+ more_qualified_paths,
+ more_struct_aliases,
+ movbe_target_feature,
+ move_ref_pattern,
+ move_size_limit,
+ mul,
+ mul_assign,
+ mul_with_overflow,
+ must_not_suspend,
+ must_use,
+ naked,
+ naked_functions,
+ name,
+ names,
+ native_link_modifiers,
+ native_link_modifiers_as_needed,
+ native_link_modifiers_bundle,
+ native_link_modifiers_verbatim,
+ native_link_modifiers_whole_archive,
+ natvis_file,
+ ne,
+ nearbyintf32,
+ nearbyintf64,
+ needs_allocator,
+ needs_drop,
+ needs_panic_runtime,
+ neg,
+ negate_unsigned,
+ negative_impls,
+ neon,
+ never,
+ never_type,
+ never_type_fallback,
+ new,
+ new_unchecked,
+ next,
+ nll,
+ no,
+ no_builtins,
+ no_core,
+ no_coverage,
+ no_crate_inject,
+ no_debug,
+ no_default_passes,
+ no_implicit_prelude,
+ no_inline,
+ no_link,
+ no_main,
+ no_mangle,
+ no_sanitize,
+ no_stack_check,
+ no_start,
+ no_std,
+ nomem,
+ non_ascii_idents,
+ non_exhaustive,
+ non_exhaustive_omitted_patterns_lint,
+ non_modrs_mods,
+ none_error,
+ nontemporal_store,
+ noop_method_borrow,
+ noop_method_clone,
+ noop_method_deref,
+ noreturn,
+ nostack,
+ not,
+ notable_trait,
+ note,
+ object_safe_for_dispatch,
+ of,
+ offset,
+ omit_gdb_pretty_printer_section,
+ on,
+ on_unimplemented,
+ oom,
+ opaque,
+ ops,
+ opt_out_copy,
+ optimize,
+ optimize_attribute,
+ optin_builtin_traits,
+ option,
+ option_env,
+ option_env_macro,
+ options,
+ or,
+ or_patterns,
+ other,
+ out,
+ overlapping_marker_traits,
+ owned_box,
+ packed,
+ panic,
+ panic_2015,
+ panic_2021,
+ panic_abort,
+ panic_bounds_check,
+ panic_display,
+ panic_fmt,
+ panic_handler,
+ panic_impl,
+ panic_implementation,
+ panic_info,
+ panic_location,
+ panic_no_unwind,
+ panic_runtime,
+ panic_str,
+ panic_unwind,
+ panicking,
+ param_attrs,
+ partial_cmp,
+ partial_ord,
+ passes,
+ pat,
+ pat_param,
+ path,
+ pattern_parentheses,
+ phantom_data,
+ pin,
+ platform_intrinsics,
+ plugin,
+ plugin_registrar,
+ plugins,
+ pointee_trait,
+ pointer,
+ pointer_trait_fmt,
+ poll,
+ position,
+ post_dash_lto: "post-lto",
+ powerpc_target_feature,
+ powf32,
+ powf64,
+ powif32,
+ powif64,
+ pre_dash_lto: "pre-lto",
+ precise_pointer_size_matching,
+ precision,
+ pref_align_of,
+ prefetch_read_data,
+ prefetch_read_instruction,
+ prefetch_write_data,
+ prefetch_write_instruction,
+ preg,
+ prelude,
+ prelude_import,
+ preserves_flags,
+ primitive,
+ print_macro,
+ println_macro,
+ proc_dash_macro: "proc-macro",
+ proc_macro,
+ proc_macro_attribute,
+ proc_macro_def_site,
+ proc_macro_derive,
+ proc_macro_expr,
+ proc_macro_gen,
+ proc_macro_hygiene,
+ proc_macro_internals,
+ proc_macro_mod,
+ proc_macro_non_items,
+ proc_macro_path_invoc,
+ profiler_builtins,
+ profiler_runtime,
+ ptr,
+ ptr_guaranteed_eq,
+ ptr_guaranteed_ne,
+ ptr_null,
+ ptr_null_mut,
+ ptr_offset_from,
+ ptr_offset_from_unsigned,
+ pub_macro_rules,
+ pub_restricted,
+ pure,
+ pushpop_unsafe,
+ qreg,
+ qreg_low4,
+ qreg_low8,
+ quad_precision_float,
+ question_mark,
+ quote,
+ range_inclusive_new,
+ raw_dylib,
+ raw_eq,
+ raw_identifiers,
+ raw_ref_op,
+ re_rebalance_coherence,
+ read_enum,
+ read_enum_variant,
+ read_enum_variant_arg,
+ read_struct,
+ read_struct_field,
+ readonly,
+ realloc,
+ reason,
+ receiver,
+ recursion_limit,
+ reexport_test_harness_main,
+ ref_unwind_safe_trait,
+ reference,
+ reflect,
+ reg,
+ reg16,
+ reg32,
+ reg64,
+ reg_abcd,
+ reg_byte,
+ reg_iw,
+ reg_nonzero,
+ reg_pair,
+ reg_ptr,
+ reg_upper,
+ register_attr,
+ register_tool,
+ relaxed_adts,
+ relaxed_struct_unsize,
+ rem,
+ rem_assign,
+ repr,
+ repr128,
+ repr_align,
+ repr_align_enum,
+ repr_packed,
+ repr_simd,
+ repr_transparent,
+ residual,
+ result,
+ rhs,
+ rintf32,
+ rintf64,
+ riscv_target_feature,
+ rlib,
+ rotate_left,
+ rotate_right,
+ roundf32,
+ roundf64,
+ rt,
+ rtm_target_feature,
+ rust,
+ rust_2015,
+ rust_2015_preview,
+ rust_2018,
+ rust_2018_preview,
+ rust_2021,
+ rust_2021_preview,
+ rust_2024,
+ rust_2024_preview,
+ rust_begin_unwind,
+ rust_cold_cc,
+ rust_eh_catch_typeinfo,
+ rust_eh_personality,
+ rust_eh_register_frames,
+ rust_eh_unregister_frames,
+ rust_oom,
+ rustc,
+ rustc_allocator,
+ rustc_allocator_nounwind,
+ rustc_allocator_zeroed,
+ rustc_allow_const_fn_unstable,
+ rustc_allow_incoherent_impl,
+ rustc_allowed_through_unstable_modules,
+ rustc_attrs,
+ rustc_box,
+ rustc_builtin_macro,
+ rustc_capture_analysis,
+ rustc_clean,
+ rustc_coherence_is_core,
+ rustc_const_stable,
+ rustc_const_unstable,
+ rustc_conversion_suggestion,
+ rustc_deallocator,
+ rustc_def_path,
+ rustc_diagnostic_item,
+ rustc_diagnostic_macros,
+ rustc_dirty,
+ rustc_do_not_const_check,
+ rustc_dummy,
+ rustc_dump_env_program_clauses,
+ rustc_dump_program_clauses,
+ rustc_dump_user_substs,
+ rustc_dump_vtable,
+ rustc_error,
+ rustc_evaluate_where_clauses,
+ rustc_expected_cgu_reuse,
+ rustc_has_incoherent_inherent_impls,
+ rustc_if_this_changed,
+ rustc_inherit_overflow_checks,
+ rustc_insignificant_dtor,
+ rustc_layout,
+ rustc_layout_scalar_valid_range_end,
+ rustc_layout_scalar_valid_range_start,
+ rustc_legacy_const_generics,
+ rustc_lint_diagnostics,
+ rustc_lint_opt_deny_field_access,
+ rustc_lint_opt_ty,
+ rustc_lint_query_instability,
+ rustc_macro_transparency,
+ rustc_main,
+ rustc_mir,
+ rustc_must_implement_one_of,
+ rustc_nonnull_optimization_guaranteed,
+ rustc_object_lifetime_default,
+ rustc_on_unimplemented,
+ rustc_outlives,
+ rustc_paren_sugar,
+ rustc_partition_codegened,
+ rustc_partition_reused,
+ rustc_pass_by_value,
+ rustc_peek,
+ rustc_peek_definite_init,
+ rustc_peek_liveness,
+ rustc_peek_maybe_init,
+ rustc_peek_maybe_uninit,
+ rustc_polymorphize_error,
+ rustc_private,
+ rustc_proc_macro_decls,
+ rustc_promotable,
+ rustc_reallocator,
+ rustc_regions,
+ rustc_reservation_impl,
+ rustc_serialize,
+ rustc_skip_array_during_method_dispatch,
+ rustc_specialization_trait,
+ rustc_stable,
+ rustc_std_internal_symbol,
+ rustc_strict_coherence,
+ rustc_symbol_name,
+ rustc_test_marker,
+ rustc_then_this_would_need,
+ rustc_trivial_field_reads,
+ rustc_unsafe_specialization_marker,
+ rustc_variance,
+ rustdoc,
+ rustdoc_internals,
+ rustfmt,
+ rvalue_static_promotion,
+ s,
+ sanitize,
+ sanitizer_runtime,
+ saturating_add,
+ saturating_sub,
+ self_in_typedefs,
+ self_struct_ctor,
+ semitransparent,
+ shadow_call_stack,
+ shl,
+ shl_assign,
+ should_panic,
+ shr,
+ shr_assign,
+ simd,
+ simd_add,
+ simd_and,
+ simd_arith_offset,
+ simd_as,
+ simd_bitmask,
+ simd_cast,
+ simd_ceil,
+ simd_div,
+ simd_eq,
+ simd_extract,
+ simd_fabs,
+ simd_fcos,
+ simd_fexp,
+ simd_fexp2,
+ simd_ffi,
+ simd_flog,
+ simd_flog10,
+ simd_flog2,
+ simd_floor,
+ simd_fma,
+ simd_fmax,
+ simd_fmin,
+ simd_fpow,
+ simd_fpowi,
+ simd_fsin,
+ simd_fsqrt,
+ simd_gather,
+ simd_ge,
+ simd_gt,
+ simd_insert,
+ simd_le,
+ simd_lt,
+ simd_mul,
+ simd_ne,
+ simd_neg,
+ simd_or,
+ simd_reduce_add_ordered,
+ simd_reduce_add_unordered,
+ simd_reduce_all,
+ simd_reduce_and,
+ simd_reduce_any,
+ simd_reduce_max,
+ simd_reduce_max_nanless,
+ simd_reduce_min,
+ simd_reduce_min_nanless,
+ simd_reduce_mul_ordered,
+ simd_reduce_mul_unordered,
+ simd_reduce_or,
+ simd_reduce_xor,
+ simd_rem,
+ simd_round,
+ simd_saturating_add,
+ simd_saturating_sub,
+ simd_scatter,
+ simd_select,
+ simd_select_bitmask,
+ simd_shl,
+ simd_shr,
+ simd_shuffle,
+ simd_sub,
+ simd_trunc,
+ simd_xor,
+ since,
+ sinf32,
+ sinf64,
+ size,
+ size_of,
+ size_of_val,
+ sized,
+ skip,
+ slice,
+ slice_len_fn,
+ slice_patterns,
+ slicing_syntax,
+ soft,
+ specialization,
+ speed,
+ spotlight,
+ sqrtf32,
+ sqrtf64,
+ sreg,
+ sreg_low16,
+ sse,
+ sse4a_target_feature,
+ stable,
+ staged_api,
+ start,
+ state,
+ static_in_const,
+ static_nobundle,
+ static_recursion,
+ staticlib,
+ std,
+ std_inject,
+ std_panic,
+ std_panic_2015_macro,
+ std_panic_macro,
+ stmt,
+ stmt_expr_attributes,
+ stop_after_dataflow,
+ store,
+ str,
+ str_split_whitespace,
+ str_trim,
+ str_trim_end,
+ str_trim_start,
+ strict_provenance,
+ stringify,
+ stringify_macro,
+ struct_field_attributes,
+ struct_inherit,
+ struct_variant,
+ structural_match,
+ structural_peq,
+ structural_teq,
+ sty,
+ sub,
+ sub_assign,
+ sub_with_overflow,
+ suggestion,
+ sym,
+ sync,
+ t32,
+ target,
+ target_abi,
+ target_arch,
+ target_endian,
+ target_env,
+ target_family,
+ target_feature,
+ target_feature_11,
+ target_has_atomic,
+ target_has_atomic_equal_alignment,
+ target_has_atomic_load_store,
+ target_os,
+ target_pointer_width,
+ target_target_vendor,
+ target_thread_local,
+ target_vendor,
+ task,
+ tbm_target_feature,
+ termination,
+ termination_trait,
+ termination_trait_test,
+ test,
+ test_2018_feature,
+ test_accepted_feature,
+ test_case,
+ test_removed_feature,
+ test_runner,
+ test_unstable_lint,
+ then_with,
+ thread,
+ thread_local,
+ thread_local_macro,
+ thumb2,
+ thumb_mode: "thumb-mode",
+ tmm_reg,
+ to_string,
+ to_vec,
+ todo_macro,
+ tool_attributes,
+ tool_lints,
+ trace_macros,
+ track_caller,
+ trait_alias,
+ trait_upcasting,
+ transmute,
+ transmute_trait,
+ transparent,
+ transparent_enums,
+ transparent_unions,
+ trivial_bounds,
+ truncf32,
+ truncf64,
+ try_blocks,
+ try_capture,
+ try_from,
+ try_into,
+ try_trait_v2,
+ tt,
+ tuple,
+ tuple_from_req,
+ tuple_indexing,
+ two_phase,
+ ty,
+ type_alias_enum_variants,
+ type_alias_impl_trait,
+ type_ascription,
+ type_changing_struct_update,
+ type_id,
+ type_length_limit,
+ type_macros,
+ type_name,
+ u128,
+ u16,
+ u32,
+ u64,
+ u8,
+ unaligned_volatile_load,
+ unaligned_volatile_store,
+ unboxed_closures,
+ unchecked_add,
+ unchecked_div,
+ unchecked_mul,
+ unchecked_rem,
+ unchecked_shl,
+ unchecked_shr,
+ unchecked_sub,
+ underscore_const_names,
+ underscore_imports,
+ underscore_lifetimes,
+ uniform_paths,
+ unimplemented_macro,
+ unit,
+ universal_impl_trait,
+ unix,
+ unlikely,
+ unmarked_api,
+ unpin,
+ unreachable,
+ unreachable_2015,
+ unreachable_2015_macro,
+ unreachable_2021,
+ unreachable_2021_macro,
+ unreachable_code,
+ unreachable_display,
+ unreachable_macro,
+ unrestricted_attribute_tokens,
+ unsafe_block_in_unsafe_fn,
+ unsafe_cell,
+ unsafe_no_drop_flag,
+ unsafe_pin_internals,
+ unsize,
+ unsized_fn_params,
+ unsized_locals,
+ unsized_tuple_coercion,
+ unstable,
+ unstable_location_reason_default: "this crate is being loaded from the sysroot, an \
+ unstable location; did you mean to load this crate \
+ from crates.io via `Cargo.toml` instead?",
+ untagged_unions,
+ unused_imports,
+ unused_qualifications,
+ unwind,
+ unwind_attributes,
+ unwind_safe_trait,
+ unwrap,
+ unwrap_or,
+ use_extern_macros,
+ use_nested_groups,
+ used,
+ used_with_arg,
+ using,
+ usize,
+ v1,
+ va_arg,
+ va_copy,
+ va_end,
+ va_list,
+ va_start,
+ val,
+ values,
+ var,
+ variant_count,
+ vec,
+ vec_macro,
+ version,
+ vfp2,
+ vis,
+ visible_private_types,
+ volatile,
+ volatile_copy_memory,
+ volatile_copy_nonoverlapping_memory,
+ volatile_load,
+ volatile_set_memory,
+ volatile_store,
+ vreg,
+ vreg_low16,
+ vtable_align,
+ vtable_size,
+ warn,
+ wasm_abi,
+ wasm_import_module,
+ wasm_target_feature,
+ while_let,
+ width,
+ windows,
+ windows_subsystem,
+ with_negative_coherence,
+ wrapping_add,
+ wrapping_mul,
+ wrapping_sub,
+ wreg,
+ write_bytes,
+ write_macro,
+ write_str,
+ writeln_macro,
+ x87_reg,
+ xer,
+ xmm_reg,
+ yeet_desugar_details,
+ yeet_expr,
+ ymm_reg,
+ zmm_reg,
+ }
+}
+
+#[derive(Copy, Clone, Eq, HashStable_Generic, Encodable, Decodable)]
+pub struct Ident {
+ pub name: Symbol,
+ pub span: Span,
+}
+
+impl Ident {
+ #[inline]
+ /// Constructs a new identifier from a symbol and a span.
+ pub const fn new(name: Symbol, span: Span) -> Ident {
+ Ident { name, span }
+ }
+
+ /// Constructs a new identifier with a dummy span.
+ #[inline]
+ pub const fn with_dummy_span(name: Symbol) -> Ident {
+ Ident::new(name, DUMMY_SP)
+ }
+
+ #[inline]
+ pub fn empty() -> Ident {
+ Ident::with_dummy_span(kw::Empty)
+ }
+
+ /// Maps a string to an identifier with a dummy span.
+ pub fn from_str(string: &str) -> Ident {
+ Ident::with_dummy_span(Symbol::intern(string))
+ }
+
+ /// Maps a string and a span to an identifier.
+ pub fn from_str_and_span(string: &str, span: Span) -> Ident {
+ Ident::new(Symbol::intern(string), span)
+ }
+
+ /// Replaces `lo` and `hi` with those from `span`, but keep hygiene context.
+ pub fn with_span_pos(self, span: Span) -> Ident {
+ Ident::new(self.name, span.with_ctxt(self.span.ctxt()))
+ }
+
+ pub fn without_first_quote(self) -> Ident {
+ Ident::new(Symbol::intern(self.as_str().trim_start_matches('\'')), self.span)
+ }
+
+ /// "Normalize" ident for use in comparisons using "item hygiene".
+ /// Identifiers with same string value become same if they came from the same macro 2.0 macro
+ /// (e.g., `macro` item, but not `macro_rules` item) and stay different if they came from
+ /// different macro 2.0 macros.
+ /// Technically, this operation strips all non-opaque marks from ident's syntactic context.
+ pub fn normalize_to_macros_2_0(self) -> Ident {
+ Ident::new(self.name, self.span.normalize_to_macros_2_0())
+ }
+
+ /// "Normalize" ident for use in comparisons using "local variable hygiene".
+ /// Identifiers with same string value become same if they came from the same non-transparent
+ /// macro (e.g., `macro` or `macro_rules!` items) and stay different if they came from different
+ /// non-transparent macros.
+ /// Technically, this operation strips all transparent marks from ident's syntactic context.
+ pub fn normalize_to_macro_rules(self) -> Ident {
+ Ident::new(self.name, self.span.normalize_to_macro_rules())
+ }
+
+ /// Access the underlying string. This is a slowish operation because it
+ /// requires locking the symbol interner.
+ ///
+ /// Note that the lifetime of the return value is a lie. See
+ /// `Symbol::as_str()` for details.
+ pub fn as_str(&self) -> &str {
+ self.name.as_str()
+ }
+}
+
+impl PartialEq for Ident {
+ fn eq(&self, rhs: &Self) -> bool {
+ self.name == rhs.name && self.span.eq_ctxt(rhs.span)
+ }
+}
+
+impl Hash for Ident {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.name.hash(state);
+ self.span.ctxt().hash(state);
+ }
+}
+
+impl fmt::Debug for Ident {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)?;
+ fmt::Debug::fmt(&self.span.ctxt(), f)
+ }
+}
+
+/// This implementation is supposed to be used in error messages, so it's expected to be identical
+/// to printing the original identifier token written in source code (`token_to_string`),
+/// except that AST identifiers don't keep the rawness flag, so we have to guess it.
+impl fmt::Display for Ident {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&IdentPrinter::new(self.name, self.is_raw_guess(), None), f)
+ }
+}
+
+/// This is the most general way to print identifiers.
+/// AST pretty-printer is used as a fallback for turning AST structures into token streams for
+/// proc macros. Additionally, proc macros may stringify their input and expect it survive the
+/// stringification (especially true for proc macro derives written between Rust 1.15 and 1.30).
+/// So we need to somehow pretty-print `$crate` in a way preserving at least some of its
+/// hygiene data, most importantly name of the crate it refers to.
+/// As a result we print `$crate` as `crate` if it refers to the local crate
+/// and as `::other_crate_name` if it refers to some other crate.
+/// Note, that this is only done if the ident token is printed from inside of AST pretty-printing,
+/// but not otherwise. Pretty-printing is the only way for proc macros to discover token contents,
+/// so we should not perform this lossy conversion if the top level call to the pretty-printer was
+/// done for a token stream or a single token.
+pub struct IdentPrinter {
+ symbol: Symbol,
+ is_raw: bool,
+ /// Span used for retrieving the crate name to which `$crate` refers to,
+ /// if this field is `None` then the `$crate` conversion doesn't happen.
+ convert_dollar_crate: Option<Span>,
+}
+
+impl IdentPrinter {
+ /// The most general `IdentPrinter` constructor. Do not use this.
+ pub fn new(symbol: Symbol, is_raw: bool, convert_dollar_crate: Option<Span>) -> IdentPrinter {
+ IdentPrinter { symbol, is_raw, convert_dollar_crate }
+ }
+
+ /// This implementation is supposed to be used when printing identifiers
+ /// as a part of pretty-printing for larger AST pieces.
+ /// Do not use this either.
+ pub fn for_ast_ident(ident: Ident, is_raw: bool) -> IdentPrinter {
+ IdentPrinter::new(ident.name, is_raw, Some(ident.span))
+ }
+}
+
+impl fmt::Display for IdentPrinter {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.is_raw {
+ f.write_str("r#")?;
+ } else if self.symbol == kw::DollarCrate {
+ if let Some(span) = self.convert_dollar_crate {
+ let converted = span.ctxt().dollar_crate_name();
+ if !converted.is_path_segment_keyword() {
+ f.write_str("::")?;
+ }
+ return fmt::Display::fmt(&converted, f);
+ }
+ }
+ fmt::Display::fmt(&self.symbol, f)
+ }
+}
+
+/// An newtype around `Ident` that calls [Ident::normalize_to_macro_rules] on
+/// construction.
+// FIXME(matthewj, petrochenkov) Use this more often, add a similar
+// `ModernIdent` struct and use that as well.
+#[derive(Copy, Clone, Eq, PartialEq, Hash)]
+pub struct MacroRulesNormalizedIdent(Ident);
+
+impl MacroRulesNormalizedIdent {
+ pub fn new(ident: Ident) -> Self {
+ Self(ident.normalize_to_macro_rules())
+ }
+}
+
+impl fmt::Debug for MacroRulesNormalizedIdent {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.0, f)
+ }
+}
+
+impl fmt::Display for MacroRulesNormalizedIdent {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+}
+
+/// An interned string.
+///
+/// Internally, a `Symbol` is implemented as an index, and all operations
+/// (including hashing, equality, and ordering) operate on that index. The use
+/// of `rustc_index::newtype_index!` means that `Option<Symbol>` only takes up 4 bytes,
+/// because `rustc_index::newtype_index!` reserves the last 256 values for tagging purposes.
+///
+/// Note that `Symbol` cannot directly be a `rustc_index::newtype_index!` because it
+/// implements `fmt::Debug`, `Encodable`, and `Decodable` in special ways.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct Symbol(SymbolIndex);
+
+rustc_index::newtype_index! {
+ struct SymbolIndex { .. }
+}
+
+impl Symbol {
+ const fn new(n: u32) -> Self {
+ Symbol(SymbolIndex::from_u32(n))
+ }
+
+ /// Maps a string to its interned representation.
+ pub fn intern(string: &str) -> Self {
+ with_session_globals(|session_globals| session_globals.symbol_interner.intern(string))
+ }
+
+ /// Access the underlying string. This is a slowish operation because it
+ /// requires locking the symbol interner.
+ ///
+ /// Note that the lifetime of the return value is a lie. It's not the same
+ /// as `&self`, but actually tied to the lifetime of the underlying
+ /// interner. Interners are long-lived, and there are very few of them, and
+ /// this function is typically used for short-lived things, so in practice
+ /// it works out ok.
+ pub fn as_str(&self) -> &str {
+ with_session_globals(|session_globals| unsafe {
+ std::mem::transmute::<&str, &str>(session_globals.symbol_interner.get(*self))
+ })
+ }
+
+ pub fn as_u32(self) -> u32 {
+ self.0.as_u32()
+ }
+
+ pub fn is_empty(self) -> bool {
+ self == kw::Empty
+ }
+
+ /// This method is supposed to be used in error messages, so it's expected to be
+ /// identical to printing the original identifier token written in source code
+ /// (`token_to_string`, `Ident::to_string`), except that symbols don't keep the rawness flag
+ /// or edition, so we have to guess the rawness using the global edition.
+ pub fn to_ident_string(self) -> String {
+ Ident::with_dummy_span(self).to_string()
+ }
+}
+
+impl fmt::Debug for Symbol {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(self.as_str(), f)
+ }
+}
+
+impl fmt::Display for Symbol {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self.as_str(), f)
+ }
+}
+
+impl<S: Encoder> Encodable<S> for Symbol {
+ fn encode(&self, s: &mut S) {
+ s.emit_str(self.as_str());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for Symbol {
+ #[inline]
+ fn decode(d: &mut D) -> Symbol {
+ Symbol::intern(&d.read_str())
+ }
+}
+
+impl<CTX> HashStable<CTX> for Symbol {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.as_str().hash_stable(hcx, hasher);
+ }
+}
+
+impl<CTX> ToStableHashKey<CTX> for Symbol {
+ type KeyType = String;
+ #[inline]
+ fn to_stable_hash_key(&self, _: &CTX) -> String {
+ self.as_str().to_string()
+ }
+}
+
+#[derive(Default)]
+pub(crate) struct Interner(Lock<InternerInner>);
+
+// The `&'static str`s in this type actually point into the arena.
+//
+// The `FxHashMap`+`Vec` pair could be replaced by `FxIndexSet`, but #75278
+// found that to regress performance up to 2% in some cases. This might be
+// revisited after further improvements to `indexmap`.
+//
+// This type is private to prevent accidentally constructing more than one
+// `Interner` on the same thread, which makes it easy to mix up `Symbol`s
+// between `Interner`s.
+#[derive(Default)]
+struct InternerInner {
+ arena: DroplessArena,
+ names: FxHashMap<&'static str, Symbol>,
+ strings: Vec<&'static str>,
+}
+
+impl Interner {
+ fn prefill(init: &[&'static str]) -> Self {
+ Interner(Lock::new(InternerInner {
+ strings: init.into(),
+ names: init.iter().copied().zip((0..).map(Symbol::new)).collect(),
+ ..Default::default()
+ }))
+ }
+
+ #[inline]
+ fn intern(&self, string: &str) -> Symbol {
+ let mut inner = self.0.lock();
+ if let Some(&name) = inner.names.get(string) {
+ return name;
+ }
+
+ let name = Symbol::new(inner.strings.len() as u32);
+
+ // SAFETY: we convert from `&str` to `&[u8]`, clone it into the arena,
+ // and immediately convert the clone back to `&[u8], all because there
+ // is no `inner.arena.alloc_str()` method. This is clearly safe.
+ let string: &str =
+ unsafe { str::from_utf8_unchecked(inner.arena.alloc_slice(string.as_bytes())) };
+
+ // SAFETY: we can extend the arena allocation to `'static` because we
+ // only access these while the arena is still alive.
+ let string: &'static str = unsafe { &*(string as *const str) };
+ inner.strings.push(string);
+
+ // This second hash table lookup can be avoided by using `RawEntryMut`,
+ // but this code path isn't hot enough for it to be worth it. See
+ // #91445 for details.
+ inner.names.insert(string, name);
+ name
+ }
+
+ // Get the symbol as a string. `Symbol::as_str()` should be used in
+ // preference to this function.
+ fn get(&self, symbol: Symbol) -> &str {
+ self.0.lock().strings[symbol.0.as_usize()]
+ }
+}
+
+// This module has a very short name because it's used a lot.
+/// This module contains all the defined keyword `Symbol`s.
+///
+/// Given that `kw` is imported, use them like `kw::keyword_name`.
+/// For example `kw::Loop` or `kw::Break`.
+pub mod kw {
+ pub use super::kw_generated::*;
+}
+
+// This module has a very short name because it's used a lot.
+/// This module contains all the defined non-keyword `Symbol`s.
+///
+/// Given that `sym` is imported, use them like `sym::symbol_name`.
+/// For example `sym::rustfmt` or `sym::u8`.
+pub mod sym {
+ use super::Symbol;
+ use std::convert::TryInto;
+
+ #[doc(inline)]
+ pub use super::sym_generated::*;
+
+ // Used from a macro in `librustc_feature/accepted.rs`
+ pub use super::kw::MacroRules as macro_rules;
+
+ /// Get the symbol for an integer.
+ ///
+ /// The first few non-negative integers each have a static symbol and therefore
+ /// are fast.
+ pub fn integer<N: TryInto<usize> + Copy + ToString>(n: N) -> Symbol {
+ if let Result::Ok(idx) = n.try_into() {
+ if idx < 10 {
+ return Symbol::new(super::SYMBOL_DIGITS_BASE + idx as u32);
+ }
+ }
+ Symbol::intern(&n.to_string())
+ }
+}
+
+impl Symbol {
+ fn is_special(self) -> bool {
+ self <= kw::Underscore
+ }
+
+ fn is_used_keyword_always(self) -> bool {
+ self >= kw::As && self <= kw::While
+ }
+
+ fn is_used_keyword_conditional(self, edition: impl FnOnce() -> Edition) -> bool {
+ (self >= kw::Async && self <= kw::Dyn) && edition() >= Edition::Edition2018
+ }
+
+ fn is_unused_keyword_always(self) -> bool {
+ self >= kw::Abstract && self <= kw::Yield
+ }
+
+ fn is_unused_keyword_conditional(self, edition: impl FnOnce() -> Edition) -> bool {
+ self == kw::Try && edition() >= Edition::Edition2018
+ }
+
+ pub fn is_reserved(self, edition: impl Copy + FnOnce() -> Edition) -> bool {
+ self.is_special()
+ || self.is_used_keyword_always()
+ || self.is_unused_keyword_always()
+ || self.is_used_keyword_conditional(edition)
+ || self.is_unused_keyword_conditional(edition)
+ }
+
+ /// A keyword or reserved identifier that can be used as a path segment.
+ pub fn is_path_segment_keyword(self) -> bool {
+ self == kw::Super
+ || self == kw::SelfLower
+ || self == kw::SelfUpper
+ || self == kw::Crate
+ || self == kw::PathRoot
+ || self == kw::DollarCrate
+ }
+
+ /// Returns `true` if the symbol is `true` or `false`.
+ pub fn is_bool_lit(self) -> bool {
+ self == kw::True || self == kw::False
+ }
+
+ /// Returns `true` if this symbol can be a raw identifier.
+ pub fn can_be_raw(self) -> bool {
+ self != kw::Empty && self != kw::Underscore && !self.is_path_segment_keyword()
+ }
+}
+
+impl Ident {
+ // Returns `true` for reserved identifiers used internally for elided lifetimes,
+ // unnamed method parameters, crate root module, error recovery etc.
+ pub fn is_special(self) -> bool {
+ self.name.is_special()
+ }
+
+ /// Returns `true` if the token is a keyword used in the language.
+ pub fn is_used_keyword(self) -> bool {
+ // Note: `span.edition()` is relatively expensive, don't call it unless necessary.
+ self.name.is_used_keyword_always()
+ || self.name.is_used_keyword_conditional(|| self.span.edition())
+ }
+
+ /// Returns `true` if the token is a keyword reserved for possible future use.
+ pub fn is_unused_keyword(self) -> bool {
+ // Note: `span.edition()` is relatively expensive, don't call it unless necessary.
+ self.name.is_unused_keyword_always()
+ || self.name.is_unused_keyword_conditional(|| self.span.edition())
+ }
+
+ /// Returns `true` if the token is either a special identifier or a keyword.
+ pub fn is_reserved(self) -> bool {
+ // Note: `span.edition()` is relatively expensive, don't call it unless necessary.
+ self.name.is_reserved(|| self.span.edition())
+ }
+
+ /// A keyword or reserved identifier that can be used as a path segment.
+ pub fn is_path_segment_keyword(self) -> bool {
+ self.name.is_path_segment_keyword()
+ }
+
+ /// We see this identifier in a normal identifier position, like variable name or a type.
+ /// How was it written originally? Did it use the raw form? Let's try to guess.
+ pub fn is_raw_guess(self) -> bool {
+ self.name.can_be_raw() && self.is_reserved()
+ }
+}
diff --git a/compiler/rustc_span/src/symbol/tests.rs b/compiler/rustc_span/src/symbol/tests.rs
new file mode 100644
index 000000000..0958fce5f
--- /dev/null
+++ b/compiler/rustc_span/src/symbol/tests.rs
@@ -0,0 +1,25 @@
+use super::*;
+
+use crate::create_default_session_globals_then;
+
+#[test]
+fn interner_tests() {
+ let i = Interner::default();
+ // first one is zero:
+ assert_eq!(i.intern("dog"), Symbol::new(0));
+ // re-use gets the same entry:
+ assert_eq!(i.intern("dog"), Symbol::new(0));
+ // different string gets a different #:
+ assert_eq!(i.intern("cat"), Symbol::new(1));
+ assert_eq!(i.intern("cat"), Symbol::new(1));
+ // dog is still at zero
+ assert_eq!(i.intern("dog"), Symbol::new(0));
+}
+
+#[test]
+fn without_first_quote_test() {
+ create_default_session_globals_then(|| {
+ let i = Ident::from_str("'break");
+ assert_eq!(i.without_first_quote().name, kw::Break);
+ });
+}
diff --git a/compiler/rustc_span/src/tests.rs b/compiler/rustc_span/src/tests.rs
new file mode 100644
index 000000000..5b3915c33
--- /dev/null
+++ b/compiler/rustc_span/src/tests.rs
@@ -0,0 +1,43 @@
+use super::*;
+
+#[test]
+fn test_lookup_line() {
+ let source = "abcdefghijklm\nabcdefghij\n...".to_owned();
+ let sf =
+ SourceFile::new(FileName::Anon(0), source, BytePos(3), SourceFileHashAlgorithm::Sha256);
+ sf.lines(|lines| assert_eq!(lines, &[BytePos(3), BytePos(17), BytePos(28)]));
+
+ assert_eq!(sf.lookup_line(BytePos(0)), None);
+ assert_eq!(sf.lookup_line(BytePos(3)), Some(0));
+ assert_eq!(sf.lookup_line(BytePos(4)), Some(0));
+
+ assert_eq!(sf.lookup_line(BytePos(16)), Some(0));
+ assert_eq!(sf.lookup_line(BytePos(17)), Some(1));
+ assert_eq!(sf.lookup_line(BytePos(18)), Some(1));
+
+ assert_eq!(sf.lookup_line(BytePos(28)), Some(2));
+ assert_eq!(sf.lookup_line(BytePos(29)), Some(2));
+}
+
+#[test]
+fn test_normalize_newlines() {
+ fn check(before: &str, after: &str, expected_positions: &[u32]) {
+ let mut actual = before.to_string();
+ let mut actual_positions = vec![];
+ normalize_newlines(&mut actual, &mut actual_positions);
+ let actual_positions: Vec<_> = actual_positions.into_iter().map(|nc| nc.pos.0).collect();
+ assert_eq!(actual.as_str(), after);
+ assert_eq!(actual_positions, expected_positions);
+ }
+ check("", "", &[]);
+ check("\n", "\n", &[]);
+ check("\r", "\r", &[]);
+ check("\r\r", "\r\r", &[]);
+ check("\r\n", "\n", &[1]);
+ check("hello world", "hello world", &[]);
+ check("hello\nworld", "hello\nworld", &[]);
+ check("hello\r\nworld", "hello\nworld", &[6]);
+ check("\r\nhello\r\nworld\r\n", "\nhello\nworld\n", &[1, 7, 13]);
+ check("\r\r\n", "\r\n", &[2]);
+ check("hello\rworld", "hello\rworld", &[]);
+}
diff --git a/compiler/rustc_symbol_mangling/Cargo.toml b/compiler/rustc_symbol_mangling/Cargo.toml
new file mode 100644
index 000000000..b104a40c2
--- /dev/null
+++ b/compiler/rustc_symbol_mangling/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "rustc_symbol_mangling"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+bitflags = "1.2.1"
+tracing = "0.1"
+punycode = "0.4.0"
+rustc-demangle = "0.1.21"
+
+rustc_span = { path = "../rustc_span" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_target = { path = "../rustc_target" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_session = { path = "../rustc_session" }
diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs
new file mode 100644
index 000000000..9241fd82c
--- /dev/null
+++ b/compiler/rustc_symbol_mangling/src/legacy.rs
@@ -0,0 +1,464 @@
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::CrateNum;
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+use rustc_middle::ty::print::{PrettyPrinter, Print, Printer};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
+use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeVisitable};
+use rustc_middle::util::common::record_time;
+
+use tracing::debug;
+
+use std::fmt::{self, Write};
+use std::mem::{self, discriminant};
+
+pub(super) fn mangle<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+ instantiating_crate: Option<CrateNum>,
+) -> String {
+ let def_id = instance.def_id();
+
+ // We want to compute the "type" of this item. Unfortunately, some
+ // kinds of items (e.g., closures) don't have an entry in the
+ // item-type array. So walk back up the find the closest parent
+ // that DOES have an entry.
+ let mut ty_def_id = def_id;
+ let instance_ty;
+ loop {
+ let key = tcx.def_key(ty_def_id);
+ match key.disambiguated_data.data {
+ DefPathData::TypeNs(_) | DefPathData::ValueNs(_) => {
+ instance_ty = tcx.type_of(ty_def_id);
+ debug!(?instance_ty);
+ break;
+ }
+ _ => {
+ // if we're making a symbol for something, there ought
+ // to be a value or type-def or something in there
+ // *somewhere*
+ ty_def_id.index = key.parent.unwrap_or_else(|| {
+ bug!(
+ "finding type for {:?}, encountered def-id {:?} with no \
+ parent",
+ def_id,
+ ty_def_id
+ );
+ });
+ }
+ }
+ }
+
+ // Erase regions because they may not be deterministic when hashed
+ // and should not matter anyhow.
+ let instance_ty = tcx.erase_regions(instance_ty);
+
+ let hash = get_symbol_hash(tcx, instance, instance_ty, instantiating_crate);
+
+ let mut printer = SymbolPrinter { tcx, path: SymbolPath::new(), keep_within_component: false };
+ printer
+ .print_def_path(
+ def_id,
+ if let ty::InstanceDef::DropGlue(_, _) = instance.def {
+ // Add the name of the dropped type to the symbol name
+ &*instance.substs
+ } else {
+ &[]
+ },
+ )
+ .unwrap();
+
+ if let ty::InstanceDef::VTableShim(..) = instance.def {
+ let _ = printer.write_str("{{vtable-shim}}");
+ }
+
+ if let ty::InstanceDef::ReifyShim(..) = instance.def {
+ let _ = printer.write_str("{{reify-shim}}");
+ }
+
+ printer.path.finish(hash)
+}
+
+fn get_symbol_hash<'tcx>(
+ tcx: TyCtxt<'tcx>,
+
+ // instance this name will be for
+ instance: Instance<'tcx>,
+
+ // type of the item, without any generic
+ // parameters substituted; this is
+ // included in the hash as a kind of
+ // safeguard.
+ item_type: Ty<'tcx>,
+
+ instantiating_crate: Option<CrateNum>,
+) -> u64 {
+ let def_id = instance.def_id();
+ let substs = instance.substs;
+ debug!("get_symbol_hash(def_id={:?}, parameters={:?})", def_id, substs);
+
+ tcx.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+
+ record_time(&tcx.sess.perf_stats.symbol_hash_time, || {
+ // the main symbol name is not necessarily unique; hash in the
+ // compiler's internal def-path, guaranteeing each symbol has a
+ // truly unique path
+ tcx.def_path_hash(def_id).hash_stable(&mut hcx, &mut hasher);
+
+ // Include the main item-type. Note that, in this case, the
+ // assertions about `needs_subst` may not hold, but this item-type
+ // ought to be the same for every reference anyway.
+ assert!(!item_type.has_erasable_regions());
+ hcx.while_hashing_spans(false, |hcx| {
+ item_type.hash_stable(hcx, &mut hasher);
+
+ // If this is a function, we hash the signature as well.
+ // This is not *strictly* needed, but it may help in some
+ // situations, see the `run-make/a-b-a-linker-guard` test.
+ if let ty::FnDef(..) = item_type.kind() {
+ item_type.fn_sig(tcx).hash_stable(hcx, &mut hasher);
+ }
+
+ // also include any type parameters (for generic items)
+ substs.hash_stable(hcx, &mut hasher);
+
+ if let Some(instantiating_crate) = instantiating_crate {
+ tcx.def_path_hash(instantiating_crate.as_def_id())
+ .stable_crate_id()
+ .hash_stable(hcx, &mut hasher);
+ }
+
+ // We want to avoid accidental collision between different types of instances.
+ // Especially, `VTableShim`s and `ReifyShim`s may overlap with their original
+ // instances without this.
+ discriminant(&instance.def).hash_stable(hcx, &mut hasher);
+ });
+ });
+
+ // 64 bits should be enough to avoid collisions.
+ hasher.finish::<u64>()
+ })
+}
+
+// Follow C++ namespace-mangling style, see
+// https://en.wikipedia.org/wiki/Name_mangling for more info.
+//
+// It turns out that on macOS you can actually have arbitrary symbols in
+// function names (at least when given to LLVM), but this is not possible
+// when using unix's linker. Perhaps one day when we just use a linker from LLVM
+// we won't need to do this name mangling. The problem with name mangling is
+// that it seriously limits the available characters. For example we can't
+// have things like &T in symbol names when one would theoretically
+// want them for things like impls of traits on that type.
+//
+// To be able to work on all platforms and get *some* reasonable output, we
+// use C++ name-mangling.
+#[derive(Debug)]
+struct SymbolPath {
+ result: String,
+ temp_buf: String,
+}
+
+impl SymbolPath {
+ fn new() -> Self {
+ let mut result =
+ SymbolPath { result: String::with_capacity(64), temp_buf: String::with_capacity(16) };
+ result.result.push_str("_ZN"); // _Z == Begin name-sequence, N == nested
+ result
+ }
+
+ fn finalize_pending_component(&mut self) {
+ if !self.temp_buf.is_empty() {
+ let _ = write!(self.result, "{}{}", self.temp_buf.len(), self.temp_buf);
+ self.temp_buf.clear();
+ }
+ }
+
+ fn finish(mut self, hash: u64) -> String {
+ self.finalize_pending_component();
+ // E = end name-sequence
+ let _ = write!(self.result, "17h{:016x}E", hash);
+ self.result
+ }
+}
+
+struct SymbolPrinter<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ path: SymbolPath,
+
+ // When `true`, `finalize_pending_component` isn't used.
+ // This is needed when recursing into `path_qualified`,
+ // or `path_generic_args`, as any nested paths are
+ // logically within one component.
+ keep_within_component: bool,
+}
+
+// HACK(eddyb) this relies on using the `fmt` interface to get
+// `PrettyPrinter` aka pretty printing of e.g. types in paths,
+// symbol names should have their own printing machinery.
+
+impl<'tcx> Printer<'tcx> for &mut SymbolPrinter<'tcx> {
+ type Error = fmt::Error;
+
+ type Path = Self;
+ type Region = Self;
+ type Type = Self;
+ type DynExistential = Self;
+ type Const = Self;
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+ Ok(self)
+ }
+
+ fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ match *ty.kind() {
+ // Print all nominal types as paths (unlike `pretty_print_type`).
+ ty::FnDef(def_id, substs)
+ | ty::Opaque(def_id, substs)
+ | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
+ | ty::Closure(def_id, substs)
+ | ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
+
+ // The `pretty_print_type` formatting of array size depends on
+ // -Zverbose flag, so we cannot reuse it here.
+ ty::Array(ty, size) => {
+ self.write_str("[")?;
+ self = self.print_type(ty)?;
+ self.write_str("; ")?;
+ if let Some(size) = size.kind().try_to_bits(self.tcx().data_layout.pointer_size) {
+ write!(self, "{}", size)?
+ } else if let ty::ConstKind::Param(param) = size.kind() {
+ self = param.print(self)?
+ } else {
+ self.write_str("_")?
+ }
+ self.write_str("]")?;
+ Ok(self)
+ }
+
+ _ => self.pretty_print_type(ty),
+ }
+ }
+
+ fn print_dyn_existential(
+ mut self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ let mut first = true;
+ for p in predicates {
+ if !first {
+ write!(self, "+")?;
+ }
+ first = false;
+ self = p.print(self)?;
+ }
+ Ok(self)
+ }
+
+ fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ // only print integers
+ match (ct.kind(), ct.ty().kind()) {
+ (ty::ConstKind::Value(ty::ValTree::Leaf(scalar)), ty::Int(_) | ty::Uint(_)) => {
+ // The `pretty_print_const` formatting depends on -Zverbose
+ // flag, so we cannot reuse it here.
+ let signed = matches!(ct.ty().kind(), ty::Int(_));
+ write!(
+ self,
+ "{:#?}",
+ ty::ConstInt::new(scalar, signed, ct.ty().is_ptr_sized_integral())
+ )?;
+ }
+ _ => self.write_str("_")?,
+ }
+ Ok(self)
+ }
+
+ fn path_crate(self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ self.write_str(self.tcx.crate_name(cnum).as_str())?;
+ Ok(self)
+ }
+ fn path_qualified(
+ self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ // Similar to `pretty_path_qualified`, but for the other
+ // types that are printed as paths (see `print_type` above).
+ match self_ty.kind() {
+ ty::FnDef(..)
+ | ty::Opaque(..)
+ | ty::Projection(_)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ if trait_ref.is_none() =>
+ {
+ self.print_type(self_ty)
+ }
+
+ _ => self.pretty_path_qualified(self_ty, trait_ref),
+ }
+ }
+
+ fn path_append_impl(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _disambiguated_data: &DisambiguatedDefPathData,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self.pretty_path_append_impl(
+ |mut cx| {
+ cx = print_prefix(cx)?;
+
+ if cx.keep_within_component {
+ // HACK(eddyb) print the path similarly to how `FmtPrinter` prints it.
+ cx.write_str("::")?;
+ } else {
+ cx.path.finalize_pending_component();
+ }
+
+ Ok(cx)
+ },
+ self_ty,
+ trait_ref,
+ )
+ }
+ fn path_append(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ // Skip `::{{extern}}` blocks and `::{{constructor}}` on tuple/unit structs.
+ if let DefPathData::ForeignMod | DefPathData::Ctor = disambiguated_data.data {
+ return Ok(self);
+ }
+
+ if self.keep_within_component {
+ // HACK(eddyb) print the path similarly to how `FmtPrinter` prints it.
+ self.write_str("::")?;
+ } else {
+ self.path.finalize_pending_component();
+ }
+
+ write!(self, "{}", disambiguated_data.data)?;
+
+ Ok(self)
+ }
+ fn path_generic_args(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ let args =
+ args.iter().cloned().filter(|arg| !matches!(arg.unpack(), GenericArgKind::Lifetime(_)));
+
+ if args.clone().next().is_some() {
+ self.generic_delimiters(|cx| cx.comma_sep(args))
+ } else {
+ Ok(self)
+ }
+ }
+}
+
+impl<'tcx> PrettyPrinter<'tcx> for &mut SymbolPrinter<'tcx> {
+ fn should_print_region(&self, _region: ty::Region<'_>) -> bool {
+ false
+ }
+ fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+ {
+ if let Some(first) = elems.next() {
+ self = first.print(self)?;
+ for elem in elems {
+ self.write_str(",")?;
+ self = elem.print(self)?;
+ }
+ }
+ Ok(self)
+ }
+
+ fn generic_delimiters(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ ) -> Result<Self, Self::Error> {
+ write!(self, "<")?;
+
+ let kept_within_component = mem::replace(&mut self.keep_within_component, true);
+ self = f(self)?;
+ self.keep_within_component = kept_within_component;
+
+ write!(self, ">")?;
+
+ Ok(self)
+ }
+}
+
+impl fmt::Write for SymbolPrinter<'_> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ // Name sanitation. LLVM will happily accept identifiers with weird names, but
+ // gas doesn't!
+ // gas accepts the following characters in symbols: a-z, A-Z, 0-9, ., _, $
+ // NVPTX assembly has more strict naming rules than gas, so additionally, dots
+ // are replaced with '$' there.
+
+ for c in s.chars() {
+ if self.path.temp_buf.is_empty() {
+ match c {
+ 'a'..='z' | 'A'..='Z' | '_' => {}
+ _ => {
+ // Underscore-qualify anything that didn't start as an ident.
+ self.path.temp_buf.push('_');
+ }
+ }
+ }
+ match c {
+ // Escape these with $ sequences
+ '@' => self.path.temp_buf.push_str("$SP$"),
+ '*' => self.path.temp_buf.push_str("$BP$"),
+ '&' => self.path.temp_buf.push_str("$RF$"),
+ '<' => self.path.temp_buf.push_str("$LT$"),
+ '>' => self.path.temp_buf.push_str("$GT$"),
+ '(' => self.path.temp_buf.push_str("$LP$"),
+ ')' => self.path.temp_buf.push_str("$RP$"),
+ ',' => self.path.temp_buf.push_str("$C$"),
+
+ '-' | ':' | '.' if self.tcx.has_strict_asm_symbol_naming() => {
+ // NVPTX doesn't support these characters in symbol names.
+ self.path.temp_buf.push('$')
+ }
+
+ // '.' doesn't occur in types and functions, so reuse it
+ // for ':' and '-'
+ '-' | ':' => self.path.temp_buf.push('.'),
+
+ // Avoid crashing LLVM in certain (LTO-related) situations, see #60925.
+ 'm' if self.path.temp_buf.ends_with(".llv") => self.path.temp_buf.push_str("$u6d$"),
+
+ // These are legal symbols
+ 'a'..='z' | 'A'..='Z' | '0'..='9' | '_' | '.' | '$' => self.path.temp_buf.push(c),
+
+ _ => {
+ self.path.temp_buf.push('$');
+ for c in c.escape_unicode().skip(1) {
+ match c {
+ '{' => {}
+ '}' => self.path.temp_buf.push('$'),
+ c => self.path.temp_buf.push(c),
+ }
+ }
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_symbol_mangling/src/lib.rs b/compiler/rustc_symbol_mangling/src/lib.rs
new file mode 100644
index 000000000..5fc992023
--- /dev/null
+++ b/compiler/rustc_symbol_mangling/src/lib.rs
@@ -0,0 +1,277 @@
+//! The Rust Linkage Model and Symbol Names
+//! =======================================
+//!
+//! The semantic model of Rust linkage is, broadly, that "there's no global
+//! namespace" between crates. Our aim is to preserve the illusion of this
+//! model despite the fact that it's not *quite* possible to implement on
+//! modern linkers. We initially didn't use system linkers at all, but have
+//! been convinced of their utility.
+//!
+//! There are a few issues to handle:
+//!
+//! - Linkers operate on a flat namespace, so we have to flatten names.
+//! We do this using the C++ namespace-mangling technique. Foo::bar
+//! symbols and such.
+//!
+//! - Symbols for distinct items with the same *name* need to get different
+//! linkage-names. Examples of this are monomorphizations of functions or
+//! items within anonymous scopes that end up having the same path.
+//!
+//! - Symbols in different crates but with same names "within" the crate need
+//! to get different linkage-names.
+//!
+//! - Symbol names should be deterministic: Two consecutive runs of the
+//! compiler over the same code base should produce the same symbol names for
+//! the same items.
+//!
+//! - Symbol names should not depend on any global properties of the code base,
+//! so that small modifications to the code base do not result in all symbols
+//! changing. In previous versions of the compiler, symbol names incorporated
+//! the SVH (Stable Version Hash) of the crate. This scheme turned out to be
+//! infeasible when used in conjunction with incremental compilation because
+//! small code changes would invalidate all symbols generated previously.
+//!
+//! - Even symbols from different versions of the same crate should be able to
+//! live next to each other without conflict.
+//!
+//! In order to fulfill the above requirements the following scheme is used by
+//! the compiler:
+//!
+//! The main tool for avoiding naming conflicts is the incorporation of a 64-bit
+//! hash value into every exported symbol name. Anything that makes a difference
+//! to the symbol being named, but does not show up in the regular path needs to
+//! be fed into this hash:
+//!
+//! - Different monomorphizations of the same item have the same path but differ
+//! in their concrete type parameters, so these parameters are part of the
+//! data being digested for the symbol hash.
+//!
+//! - Rust allows items to be defined in anonymous scopes, such as in
+//! `fn foo() { { fn bar() {} } { fn bar() {} } }`. Both `bar` functions have
+//! the path `foo::bar`, since the anonymous scopes do not contribute to the
+//! path of an item. The compiler already handles this case via so-called
+//! disambiguating `DefPaths` which use indices to distinguish items with the
+//! same name. The DefPaths of the functions above are thus `foo[0]::bar[0]`
+//! and `foo[0]::bar[1]`. In order to incorporate this disambiguation
+//! information into the symbol name too, these indices are fed into the
+//! symbol hash, so that the above two symbols would end up with different
+//! hash values.
+//!
+//! The two measures described above suffice to avoid intra-crate conflicts. In
+//! order to also avoid inter-crate conflicts two more measures are taken:
+//!
+//! - The name of the crate containing the symbol is prepended to the symbol
+//! name, i.e., symbols are "crate qualified". For example, a function `foo` in
+//! module `bar` in crate `baz` would get a symbol name like
+//! `baz::bar::foo::{hash}` instead of just `bar::foo::{hash}`. This avoids
+//! simple conflicts between functions from different crates.
+//!
+//! - In order to be able to also use symbols from two versions of the same
+//! crate (which naturally also have the same name), a stronger measure is
+//! required: The compiler accepts an arbitrary "disambiguator" value via the
+//! `-C metadata` command-line argument. This disambiguator is then fed into
+//! the symbol hash of every exported item. Consequently, the symbols in two
+//! identical crates but with different disambiguators are not in conflict
+//! with each other. This facility is mainly intended to be used by build
+//! tools like Cargo.
+//!
+//! A note on symbol name stability
+//! -------------------------------
+//! Previous versions of the compiler resorted to feeding NodeIds into the
+//! symbol hash in order to disambiguate between items with the same path. The
+//! current version of the name generation algorithm takes great care not to do
+//! that, since NodeIds are notoriously unstable: A small change to the
+//! code base will offset all NodeIds after the change and thus, much as using
+//! the SVH in the hash, invalidate an unbounded number of symbol names. This
+//! makes re-using previously compiled code for incremental compilation
+//! virtually impossible. Thus, symbol hash generation exclusively relies on
+//! DefPaths which are much more robust in the face of changes to the code base.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(never_type)]
+#![recursion_limit = "256"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate rustc_middle;
+
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{CrateNum, LOCAL_CRATE};
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::mir::mono::{InstantiationMode, MonoItem};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, Instance, TyCtxt};
+use rustc_session::config::SymbolManglingVersion;
+
+use tracing::debug;
+
+mod legacy;
+mod v0;
+
+pub mod test;
+pub mod typeid;
+
+/// This function computes the symbol name for the given `instance` and the
+/// given instantiating crate. That is, if you know that instance X is
+/// instantiated in crate Y, this is the symbol name this instance would have.
+pub fn symbol_name_for_instance_in_crate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+ instantiating_crate: CrateNum,
+) -> String {
+ compute_symbol_name(tcx, instance, || instantiating_crate)
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { symbol_name: symbol_name_provider, ..*providers };
+}
+
+// The `symbol_name` query provides the symbol name for calling a given
+// instance from the local crate. In particular, it will also look up the
+// correct symbol name of instances from upstream crates.
+fn symbol_name_provider<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::SymbolName<'tcx> {
+ let symbol_name = compute_symbol_name(tcx, instance, || {
+ // This closure determines the instantiating crate for instances that
+ // need an instantiating-crate-suffix for their symbol name, in order
+ // to differentiate between local copies.
+ if is_generic(instance.substs) {
+ // For generics we might find re-usable upstream instances. If there
+ // is one, we rely on the symbol being instantiated locally.
+ instance.upstream_monomorphization(tcx).unwrap_or(LOCAL_CRATE)
+ } else {
+ // For non-generic things that need to avoid naming conflicts, we
+ // always instantiate a copy in the local crate.
+ LOCAL_CRATE
+ }
+ });
+
+ ty::SymbolName::new(tcx, &symbol_name)
+}
+
+pub fn typeid_for_trait_ref<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyExistentialTraitRef<'tcx>,
+) -> String {
+ v0::mangle_typeid_for_trait_ref(tcx, trait_ref)
+}
+
+/// Computes the symbol name for the given instance. This function will call
+/// `compute_instantiating_crate` if it needs to factor the instantiating crate
+/// into the symbol name.
+fn compute_symbol_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+ compute_instantiating_crate: impl FnOnce() -> CrateNum,
+) -> String {
+ let def_id = instance.def_id();
+ let substs = instance.substs;
+
+ debug!("symbol_name(def_id={:?}, substs={:?})", def_id, substs);
+
+ if let Some(def_id) = def_id.as_local() {
+ if tcx.proc_macro_decls_static(()) == Some(def_id) {
+ let stable_crate_id = tcx.sess.local_stable_crate_id();
+ return tcx.sess.generate_proc_macro_decls_symbol(stable_crate_id);
+ }
+ }
+
+ // FIXME(eddyb) Precompute a custom symbol name based on attributes.
+ let attrs = if tcx.def_kind(def_id).has_codegen_attrs() {
+ tcx.codegen_fn_attrs(def_id)
+ } else {
+ CodegenFnAttrs::EMPTY
+ };
+
+ // Foreign items by default use no mangling for their symbol name. There's a
+ // few exceptions to this rule though:
+ //
+ // * This can be overridden with the `#[link_name]` attribute
+ //
+ // * On the wasm32 targets there is a bug (or feature) in LLD [1] where the
+ // same-named symbol when imported from different wasm modules will get
+ // hooked up incorrectly. As a result foreign symbols, on the wasm target,
+ // with a wasm import module, get mangled. Additionally our codegen will
+ // deduplicate symbols based purely on the symbol name, but for wasm this
+ // isn't quite right because the same-named symbol on wasm can come from
+ // different modules. For these reasons if `#[link(wasm_import_module)]`
+ // is present we mangle everything on wasm because the demangled form will
+ // show up in the `wasm-import-name` custom attribute in LLVM IR.
+ //
+ // [1]: https://bugs.llvm.org/show_bug.cgi?id=44316
+ if tcx.is_foreign_item(def_id)
+ && (!tcx.sess.target.is_like_wasm
+ || !tcx.wasm_import_module_map(def_id.krate).contains_key(&def_id))
+ {
+ if let Some(name) = attrs.link_name {
+ return name.to_string();
+ }
+ return tcx.item_name(def_id).to_string();
+ }
+
+ if let Some(name) = attrs.export_name {
+ // Use provided name
+ return name.to_string();
+ }
+
+ if attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE) {
+ // Don't mangle
+ return tcx.item_name(def_id).to_string();
+ }
+
+ // If we're dealing with an instance of a function that's inlined from
+ // another crate but we're marking it as globally shared to our
+ // compilation (aka we're not making an internal copy in each of our
+ // codegen units) then this symbol may become an exported (but hidden
+ // visibility) symbol. This means that multiple crates may do the same
+ // and we want to be sure to avoid any symbol conflicts here.
+ let is_globally_shared_function = matches!(
+ tcx.def_kind(instance.def_id()),
+ DefKind::Fn | DefKind::AssocFn | DefKind::Closure | DefKind::Generator | DefKind::Ctor(..)
+ ) && matches!(
+ MonoItem::Fn(instance).instantiation_mode(tcx),
+ InstantiationMode::GloballyShared { may_conflict: true }
+ );
+
+ // If this is an instance of a generic function, we also hash in
+ // the ID of the instantiating crate. This avoids symbol conflicts
+ // in case the same instances is emitted in two crates of the same
+ // project.
+ let avoid_cross_crate_conflicts = is_generic(substs) || is_globally_shared_function;
+
+ let instantiating_crate =
+ if avoid_cross_crate_conflicts { Some(compute_instantiating_crate()) } else { None };
+
+ // Pick the crate responsible for the symbol mangling version, which has to:
+ // 1. be stable for each instance, whether it's being defined or imported
+ // 2. obey each crate's own `-C symbol-mangling-version`, as much as possible
+ // We solve these as follows:
+ // 1. because symbol names depend on both `def_id` and `instantiating_crate`,
+ // both their `CrateNum`s are stable for any given instance, so we can pick
+ // either and have a stable choice of symbol mangling version
+ // 2. we favor `instantiating_crate` where possible (i.e. when `Some`)
+ let mangling_version_crate = instantiating_crate.unwrap_or(def_id.krate);
+ let mangling_version = if mangling_version_crate == LOCAL_CRATE {
+ tcx.sess.opts.get_symbol_mangling_version()
+ } else {
+ tcx.symbol_mangling_version(mangling_version_crate)
+ };
+
+ let symbol = match mangling_version {
+ SymbolManglingVersion::Legacy => legacy::mangle(tcx, instance, instantiating_crate),
+ SymbolManglingVersion::V0 => v0::mangle(tcx, instance, instantiating_crate),
+ };
+
+ debug_assert!(
+ rustc_demangle::try_demangle(&symbol).is_ok(),
+ "compute_symbol_name: `{}` cannot be demangled",
+ symbol
+ );
+
+ symbol
+}
+
+fn is_generic(substs: SubstsRef<'_>) -> bool {
+ substs.non_erasable_generics().next().is_some()
+}
diff --git a/compiler/rustc_symbol_mangling/src/test.rs b/compiler/rustc_symbol_mangling/src/test.rs
new file mode 100644
index 000000000..7249ce04c
--- /dev/null
+++ b/compiler/rustc_symbol_mangling/src/test.rs
@@ -0,0 +1,74 @@
+//! Walks the crate looking for items/impl-items/trait-items that have
+//! either a `rustc_symbol_name` or `rustc_def_path` attribute and
+//! generates an error giving, respectively, the symbol name or
+//! def-path. This is used for unit testing the code that generates
+//! paths etc in all kinds of annoying scenarios.
+
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{subst::InternalSubsts, Instance, TyCtxt};
+use rustc_span::symbol::{sym, Symbol};
+
+const SYMBOL_NAME: Symbol = sym::rustc_symbol_name;
+const DEF_PATH: Symbol = sym::rustc_def_path;
+
+pub fn report_symbol_names(tcx: TyCtxt<'_>) {
+ // if the `rustc_attrs` feature is not enabled, then the
+ // attributes we are interested in cannot be present anyway, so
+ // skip the walk.
+ if !tcx.features().rustc_attrs {
+ return;
+ }
+
+ tcx.dep_graph.with_ignore(|| {
+ let mut symbol_names = SymbolNamesTest { tcx };
+ let crate_items = tcx.hir_crate_items(());
+
+ for id in crate_items.items() {
+ symbol_names.process_attrs(id.def_id);
+ }
+
+ for id in crate_items.trait_items() {
+ symbol_names.process_attrs(id.def_id);
+ }
+
+ for id in crate_items.impl_items() {
+ symbol_names.process_attrs(id.def_id);
+ }
+
+ for id in crate_items.foreign_items() {
+ symbol_names.process_attrs(id.def_id);
+ }
+ })
+}
+
+struct SymbolNamesTest<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl SymbolNamesTest<'_> {
+ fn process_attrs(&mut self, def_id: LocalDefId) {
+ let tcx = self.tcx;
+ // The formatting of `tag({})` is chosen so that tests can elect
+ // to test the entirety of the string, if they choose, or else just
+ // some subset.
+ for attr in tcx.get_attrs(def_id.to_def_id(), SYMBOL_NAME) {
+ let def_id = def_id.to_def_id();
+ let instance = Instance::new(
+ def_id,
+ tcx.erase_regions(InternalSubsts::identity_for_item(tcx, def_id)),
+ );
+ let mangled = tcx.symbol_name(instance);
+ tcx.sess.span_err(attr.span, &format!("symbol-name({})", mangled));
+ if let Ok(demangling) = rustc_demangle::try_demangle(mangled.name) {
+ tcx.sess.span_err(attr.span, &format!("demangling({})", demangling));
+ tcx.sess.span_err(attr.span, &format!("demangling-alt({:#})", demangling));
+ }
+ }
+
+ for attr in tcx.get_attrs(def_id.to_def_id(), DEF_PATH) {
+ let path = with_no_trimmed_paths!(tcx.def_path_str(def_id.to_def_id()));
+ tcx.sess.span_err(attr.span, &format!("def-path({})", path));
+ }
+ }
+}
diff --git a/compiler/rustc_symbol_mangling/src/typeid.rs b/compiler/rustc_symbol_mangling/src/typeid.rs
new file mode 100644
index 000000000..9228bea43
--- /dev/null
+++ b/compiler/rustc_symbol_mangling/src/typeid.rs
@@ -0,0 +1,18 @@
+// For more information about type metadata and type metadata identifiers for cross-language LLVM
+// CFI support, see Type metadata in the design document in the tracking issue #89653.
+
+use rustc_middle::ty::{FnSig, Ty, TyCtxt};
+use rustc_target::abi::call::FnAbi;
+
+mod typeid_itanium_cxx_abi;
+use typeid_itanium_cxx_abi::TypeIdOptions;
+
+/// Returns a type metadata identifier for the specified FnAbi.
+pub fn typeid_for_fnabi<'tcx>(tcx: TyCtxt<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> String {
+ typeid_itanium_cxx_abi::typeid_for_fnabi(tcx, fn_abi, TypeIdOptions::NO_OPTIONS)
+}
+
+/// Returns a type metadata identifier for the specified FnSig.
+pub fn typeid_for_fnsig<'tcx>(tcx: TyCtxt<'tcx>, fn_sig: &FnSig<'tcx>) -> String {
+ typeid_itanium_cxx_abi::typeid_for_fnsig(tcx, fn_sig, TypeIdOptions::NO_OPTIONS)
+}
diff --git a/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
new file mode 100644
index 000000000..a09b52fbf
--- /dev/null
+++ b/compiler/rustc_symbol_mangling/src/typeid/typeid_itanium_cxx_abi.rs
@@ -0,0 +1,929 @@
+// For more information about type metadata and type metadata identifiers for cross-language LLVM
+// CFI support, see Type metadata in the design document in the tracking issue #89653.
+
+// FIXME(rcvalle): Identify C char and integer type uses and encode them with their respective
+// builtin type encodings as specified by the Itanium C++ ABI for extern function types with the "C"
+// calling convention to use this encoding for cross-language LLVM CFI.
+
+use bitflags::bitflags;
+use core::fmt::Display;
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
+use rustc_middle::ty::{
+ self, Binder, Const, ExistentialPredicate, FloatTy, FnSig, IntTy, List, Region, RegionKind,
+ Term, Ty, TyCtxt, UintTy,
+};
+use rustc_span::def_id::DefId;
+use rustc_span::symbol::sym;
+use rustc_target::abi::call::{Conv, FnAbi};
+use rustc_target::spec::abi::Abi;
+use std::fmt::Write as _;
+
+/// Type and extended type qualifiers.
+#[derive(Eq, Hash, PartialEq)]
+enum TyQ {
+ None,
+ Const,
+ Mut,
+}
+
+/// Substitution dictionary key.
+#[derive(Eq, Hash, PartialEq)]
+enum DictKey<'tcx> {
+ Ty(Ty<'tcx>, TyQ),
+ Region(Region<'tcx>),
+ Const(Const<'tcx>),
+ Predicate(ExistentialPredicate<'tcx>),
+}
+
+bitflags! {
+ /// Options for typeid_for_fnabi and typeid_for_fnsig.
+ pub struct TypeIdOptions: u32 {
+ const NO_OPTIONS = 0;
+ const GENERALIZE_POINTERS = 1;
+ const GENERALIZE_REPR_C = 2;
+ }
+}
+
+/// Options for encode_ty.
+type EncodeTyOptions = TypeIdOptions;
+
+/// Options for transform_ty.
+type TransformTyOptions = TypeIdOptions;
+
+/// Converts a number to a disambiguator (see
+/// <https://rust-lang.github.io/rfcs/2603-rust-symbol-name-mangling-v0.html>).
+fn to_disambiguator(num: u64) -> String {
+ if let Some(num) = num.checked_sub(1) {
+ format!("s{}_", base_n::encode(num as u128, 62))
+ } else {
+ "s_".to_string()
+ }
+}
+
+/// Converts a number to a sequence number (see
+/// <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangle.seq-id>).
+fn to_seq_id(num: usize) -> String {
+ if let Some(num) = num.checked_sub(1) {
+ base_n::encode(num as u128, 36).to_uppercase()
+ } else {
+ "".to_string()
+ }
+}
+
+/// Substitutes a component if found in the substitution dictionary (see
+/// <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-compression>).
+fn compress<'tcx>(
+ dict: &mut FxHashMap<DictKey<'tcx>, usize>,
+ key: DictKey<'tcx>,
+ comp: &mut String,
+) {
+ match dict.get(&key) {
+ Some(num) => {
+ comp.clear();
+ let _ = write!(comp, "S{}_", to_seq_id(*num));
+ }
+ None => {
+ dict.insert(key, dict.len());
+ }
+ }
+}
+
+// FIXME(rcvalle): Move to compiler/rustc_middle/src/ty/sty.rs after C types work is done, possibly
+// along with other is_c_type methods.
+/// Returns whether a `ty::Ty` is `c_void`.
+fn is_c_void_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+ match ty.kind() {
+ ty::Adt(adt_def, ..) => {
+ let def_id = adt_def.0.did;
+ let crate_name = tcx.crate_name(def_id.krate);
+ if tcx.item_name(def_id).as_str() == "c_void"
+ && (crate_name == sym::core || crate_name == sym::std || crate_name == sym::libc)
+ {
+ true
+ } else {
+ false
+ }
+ }
+ _ => false,
+ }
+}
+
+/// Encodes a const using the Itanium C++ ABI as a literal argument (see
+/// <https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling.literal>).
+fn encode_const<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ c: Const<'tcx>,
+ dict: &mut FxHashMap<DictKey<'tcx>, usize>,
+ options: EncodeTyOptions,
+) -> String {
+ // L<element-type>[n]<element-value>E as literal argument
+ let mut s = String::from('L');
+
+ // Element type
+ s.push_str(&encode_ty(tcx, c.ty(), dict, options));
+
+ // The only allowed types of const parameters are bool, u8, u16, u32, u64, u128, usize i8, i16,
+ // i32, i64, i128, isize, and char. The bool value false is encoded as 0 and true as 1.
+ fn push_signed_value<T: Display + PartialOrd>(s: &mut String, value: T, zero: T) {
+ if value < zero {
+ s.push('n')
+ };
+ let _ = write!(s, "{}", value);
+ }
+
+ fn push_unsigned_value<T: Display>(s: &mut String, value: T) {
+ let _ = write!(s, "{}", value);
+ }
+
+ if let Some(scalar_int) = c.kind().try_to_scalar_int() {
+ let signed = c.ty().is_signed();
+ match scalar_int.size().bits() {
+ 8 if signed => push_signed_value(&mut s, scalar_int.try_to_i8().unwrap(), 0),
+ 16 if signed => push_signed_value(&mut s, scalar_int.try_to_i16().unwrap(), 0),
+ 32 if signed => push_signed_value(&mut s, scalar_int.try_to_i32().unwrap(), 0),
+ 64 if signed => push_signed_value(&mut s, scalar_int.try_to_i64().unwrap(), 0),
+ 128 if signed => push_signed_value(&mut s, scalar_int.try_to_i128().unwrap(), 0),
+ 8 => push_unsigned_value(&mut s, scalar_int.try_to_u8().unwrap()),
+ 16 => push_unsigned_value(&mut s, scalar_int.try_to_u16().unwrap()),
+ 32 => push_unsigned_value(&mut s, scalar_int.try_to_u32().unwrap()),
+ 64 => push_unsigned_value(&mut s, scalar_int.try_to_u64().unwrap()),
+ 128 => push_unsigned_value(&mut s, scalar_int.try_to_u128().unwrap()),
+ _ => {
+ bug!("encode_const: unexpected size `{:?}`", scalar_int.size().bits());
+ }
+ };
+ } else {
+ bug!("encode_const: unexpected type `{:?}`", c.ty());
+ }
+
+ // Close the "L..E" pair
+ s.push('E');
+
+ compress(dict, DictKey::Const(c), &mut s);
+
+ s
+}
+
+/// Encodes a FnSig using the Itanium C++ ABI with vendor extended type qualifiers and types for
+/// Rust types that are not used at the FFI boundary.
+fn encode_fnsig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ fn_sig: &FnSig<'tcx>,
+ dict: &mut FxHashMap<DictKey<'tcx>, usize>,
+ options: TypeIdOptions,
+) -> String {
+ // Function types are delimited by an "F..E" pair
+ let mut s = String::from("F");
+
+ let mut encode_ty_options = EncodeTyOptions::from_bits(options.bits())
+ .unwrap_or_else(|| bug!("encode_fnsig: invalid option(s) `{:?}`", options.bits()));
+ match fn_sig.abi {
+ Abi::C { .. } => {
+ encode_ty_options.insert(EncodeTyOptions::GENERALIZE_REPR_C);
+ }
+ _ => {
+ encode_ty_options.remove(EncodeTyOptions::GENERALIZE_REPR_C);
+ }
+ }
+
+ // Encode the return type
+ let transform_ty_options = TransformTyOptions::from_bits(options.bits())
+ .unwrap_or_else(|| bug!("encode_fnsig: invalid option(s) `{:?}`", options.bits()));
+ let ty = transform_ty(tcx, fn_sig.output(), transform_ty_options);
+ s.push_str(&encode_ty(tcx, ty, dict, encode_ty_options));
+
+ // Encode the parameter types
+ let tys = fn_sig.inputs();
+ if !tys.is_empty() {
+ for ty in tys {
+ let ty = transform_ty(tcx, *ty, transform_ty_options);
+ s.push_str(&encode_ty(tcx, ty, dict, encode_ty_options));
+ }
+
+ if fn_sig.c_variadic {
+ s.push('z');
+ }
+ } else {
+ if fn_sig.c_variadic {
+ s.push('z');
+ } else {
+ // Empty parameter lists, whether declared as () or conventionally as (void), are
+ // encoded with a void parameter specifier "v".
+ s.push('v')
+ }
+ }
+
+ // Close the "F..E" pair
+ s.push('E');
+
+ s
+}
+
+/// Encodes a predicate using the Itanium C++ ABI with vendor extended type qualifiers and types for
+/// Rust types that are not used at the FFI boundary.
+fn encode_predicate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicate: Binder<'tcx, ExistentialPredicate<'tcx>>,
+ dict: &mut FxHashMap<DictKey<'tcx>, usize>,
+ options: EncodeTyOptions,
+) -> String {
+ // u<length><name>[I<element-type1..element-typeN>E], where <element-type> is <subst>, as vendor
+ // extended type.
+ let mut s = String::new();
+ match predicate.as_ref().skip_binder() {
+ ty::ExistentialPredicate::Trait(trait_ref) => {
+ let name = encode_ty_name(tcx, trait_ref.def_id);
+ let _ = write!(s, "u{}{}", name.len(), &name);
+ s.push_str(&encode_substs(tcx, trait_ref.substs, dict, options));
+ }
+ ty::ExistentialPredicate::Projection(projection) => {
+ let name = encode_ty_name(tcx, projection.item_def_id);
+ let _ = write!(s, "u{}{}", name.len(), &name);
+ s.push_str(&encode_substs(tcx, projection.substs, dict, options));
+ match projection.term {
+ Term::Ty(ty) => {
+ s.push_str(&encode_ty(tcx, ty, dict, options));
+ }
+ Term::Const(c) => {
+ s.push_str(&encode_const(tcx, c, dict, options));
+ }
+ }
+ }
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ let name = encode_ty_name(tcx, *def_id);
+ let _ = write!(s, "u{}{}", name.len(), &name);
+ }
+ };
+ compress(dict, DictKey::Predicate(*predicate.as_ref().skip_binder()), &mut s);
+ s
+}
+
+/// Encodes predicates using the Itanium C++ ABI with vendor extended type qualifiers and types for
+/// Rust types that are not used at the FFI boundary.
+fn encode_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: &List<Binder<'tcx, ExistentialPredicate<'tcx>>>,
+ dict: &mut FxHashMap<DictKey<'tcx>, usize>,
+ options: EncodeTyOptions,
+) -> String {
+ // <predicate1[..predicateN]>E as part of vendor extended type
+ let mut s = String::new();
+ let predicates: Vec<Binder<'tcx, ExistentialPredicate<'tcx>>> =
+ predicates.iter().map(|predicate| predicate).collect();
+ for predicate in predicates {
+ s.push_str(&encode_predicate(tcx, predicate, dict, options));
+ }
+ s
+}
+
+/// Encodes a region using the Itanium C++ ABI as a vendor extended type.
+fn encode_region<'tcx>(
+ _tcx: TyCtxt<'tcx>,
+ region: Region<'tcx>,
+ dict: &mut FxHashMap<DictKey<'tcx>, usize>,
+ _options: EncodeTyOptions,
+) -> String {
+ // u6region[I[<region-disambiguator>][<region-index>]E] as vendor extended type
+ let mut s = String::new();
+ match region.kind() {
+ RegionKind::ReLateBound(debruijn, r) => {
+ s.push_str("u6regionI");
+ // Debruijn index, which identifies the binder, as region disambiguator
+ let num = debruijn.index() as u64;
+ if num > 0 {
+ s.push_str(&to_disambiguator(num));
+ }
+ // Index within the binder
+ let _ = write!(s, "{}", r.var.index() as u64);
+ s.push('E');
+ compress(dict, DictKey::Region(region), &mut s);
+ }
+ RegionKind::ReErased => {
+ s.push_str("u6region");
+ compress(dict, DictKey::Region(region), &mut s);
+ }
+ RegionKind::ReEarlyBound(..)
+ | RegionKind::ReFree(..)
+ | RegionKind::ReStatic
+ | RegionKind::ReVar(..)
+ | RegionKind::RePlaceholder(..)
+ | RegionKind::ReEmpty(..) => {
+ bug!("encode_region: unexpected `{:?}`", region.kind());
+ }
+ }
+ s
+}
+
+/// Encodes substs using the Itanium C++ ABI with vendor extended type qualifiers and types for Rust
+/// types that are not used at the FFI boundary.
+fn encode_substs<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ dict: &mut FxHashMap<DictKey<'tcx>, usize>,
+ options: EncodeTyOptions,
+) -> String {
+ // [I<subst1..substN>E] as part of vendor extended type
+ let mut s = String::new();
+ let substs: Vec<GenericArg<'_>> = substs.iter().map(|subst| subst).collect();
+ if !substs.is_empty() {
+ s.push('I');
+ for subst in substs {
+ match subst.unpack() {
+ GenericArgKind::Lifetime(region) => {
+ s.push_str(&encode_region(tcx, region, dict, options));
+ }
+ GenericArgKind::Type(ty) => {
+ s.push_str(&encode_ty(tcx, ty, dict, options));
+ }
+ GenericArgKind::Const(c) => {
+ s.push_str(&encode_const(tcx, c, dict, options));
+ }
+ }
+ }
+ s.push('E');
+ }
+ s
+}
+
+/// Encodes a ty:Ty name, including its crate and path disambiguators and names.
+fn encode_ty_name<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> String {
+ // Encode <name> for use in u<length><name>[I<element-type1..element-typeN>E], where
+ // <element-type> is <subst>, using v0's <path> without v0's extended form of paths:
+ //
+ // N<namespace-tagN>..N<namespace-tag1>
+ // C<crate-disambiguator><crate-name>
+ // <path-disambiguator1><path-name1>..<path-disambiguatorN><path-nameN>
+ //
+ // With additional tags for DefPathData::Impl and DefPathData::ForeignMod. For instance:
+ //
+ // pub type Type1 = impl Send;
+ // let _: Type1 = <Struct1<i32>>::foo;
+ // fn foo1(_: Type1) { }
+ //
+ // pub type Type2 = impl Send;
+ // let _: Type2 = <Trait1<i32>>::foo;
+ // fn foo2(_: Type2) { }
+ //
+ // pub type Type3 = impl Send;
+ // let _: Type3 = <i32 as Trait1<i32>>::foo;
+ // fn foo3(_: Type3) { }
+ //
+ // pub type Type4 = impl Send;
+ // let _: Type4 = <Struct1<i32> as Trait1<i32>>::foo;
+ // fn foo3(_: Type4) { }
+ //
+ // Are encoded as:
+ //
+ // _ZTSFvu29NvNIC1234_5crate8{{impl}}3fooIu3i32EE
+ // _ZTSFvu27NvNtC1234_5crate6Trait13fooIu3dynIu21NtC1234_5crate6Trait1Iu3i32Eu6regionES_EE
+ // _ZTSFvu27NvNtC1234_5crate6Trait13fooIu3i32S_EE
+ // _ZTSFvu27NvNtC1234_5crate6Trait13fooIu22NtC1234_5crate7Struct1Iu3i32ES_EE
+ //
+ // The reason for not using v0's extended form of paths is to use a consistent and simpler
+ // encoding, as the reasoning for using it isn't relevand for type metadata identifiers (i.e.,
+ // keep symbol names close to how methods are represented in error messages). See
+ // https://rust-lang.github.io/rfcs/2603-rust-symbol-name-mangling-v0.html#methods.
+ let mut s = String::new();
+
+ // Start and namespace tags
+ let mut def_path = tcx.def_path(def_id);
+ def_path.data.reverse();
+ for disambiguated_data in &def_path.data {
+ s.push('N');
+ s.push_str(match disambiguated_data.data {
+ hir::definitions::DefPathData::Impl => "I", // Not specified in v0's <namespace>
+ hir::definitions::DefPathData::ForeignMod => "F", // Not specified in v0's <namespace>
+ hir::definitions::DefPathData::TypeNs(..) => "t",
+ hir::definitions::DefPathData::ValueNs(..) => "v",
+ hir::definitions::DefPathData::ClosureExpr => "C",
+ hir::definitions::DefPathData::Ctor => "c",
+ hir::definitions::DefPathData::AnonConst => "k",
+ hir::definitions::DefPathData::ImplTrait => "i",
+ hir::definitions::DefPathData::CrateRoot
+ | hir::definitions::DefPathData::Use
+ | hir::definitions::DefPathData::GlobalAsm
+ | hir::definitions::DefPathData::MacroNs(..)
+ | hir::definitions::DefPathData::LifetimeNs(..) => {
+ bug!("encode_ty_name: unexpected `{:?}`", disambiguated_data.data);
+ }
+ });
+ }
+
+ // Crate disambiguator and name
+ s.push('C');
+ s.push_str(&to_disambiguator(tcx.stable_crate_id(def_path.krate).to_u64()));
+ let crate_name = tcx.crate_name(def_path.krate).to_string();
+ let _ = write!(s, "{}{}", crate_name.len(), &crate_name);
+
+ // Disambiguators and names
+ def_path.data.reverse();
+ for disambiguated_data in &def_path.data {
+ let num = disambiguated_data.disambiguator as u64;
+ if num > 0 {
+ s.push_str(&to_disambiguator(num));
+ }
+
+ let name = disambiguated_data.data.to_string();
+ let _ = write!(s, "{}", name.len());
+
+ // Prepend a '_' if name starts with a digit or '_'
+ if let Some(first) = name.as_bytes().get(0) {
+ if first.is_ascii_digit() || *first == b'_' {
+ s.push('_');
+ }
+ } else {
+ bug!("encode_ty_name: invalid name `{:?}`", name);
+ }
+
+ s.push_str(&name);
+ }
+
+ s
+}
+
+/// Encodes a ty:Ty using the Itanium C++ ABI with vendor extended type qualifiers and types for
+/// Rust types that are not used at the FFI boundary.
+fn encode_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ dict: &mut FxHashMap<DictKey<'tcx>, usize>,
+ options: EncodeTyOptions,
+) -> String {
+ let mut typeid = String::new();
+
+ match ty.kind() {
+ // Primitive types
+ ty::Bool => {
+ typeid.push('b');
+ }
+
+ ty::Int(..) | ty::Uint(..) | ty::Float(..) => {
+ // u<length><type-name> as vendor extended type
+ let mut s = String::from(match ty.kind() {
+ ty::Int(IntTy::I8) => "u2i8",
+ ty::Int(IntTy::I16) => "u3i16",
+ ty::Int(IntTy::I32) => "u3i32",
+ ty::Int(IntTy::I64) => "u3i64",
+ ty::Int(IntTy::I128) => "u4i128",
+ ty::Int(IntTy::Isize) => "u5isize",
+ ty::Uint(UintTy::U8) => "u2u8",
+ ty::Uint(UintTy::U16) => "u3u16",
+ ty::Uint(UintTy::U32) => "u3u32",
+ ty::Uint(UintTy::U64) => "u3u64",
+ ty::Uint(UintTy::U128) => "u4u128",
+ ty::Uint(UintTy::Usize) => "u5usize",
+ ty::Float(FloatTy::F32) => "u3f32",
+ ty::Float(FloatTy::F64) => "u3f64",
+ _ => "",
+ });
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ ty::Char => {
+ // u4char as vendor extended type
+ let mut s = String::from("u4char");
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ ty::Str => {
+ // u3str as vendor extended type
+ let mut s = String::from("u3str");
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ ty::Never => {
+ // u5never as vendor extended type
+ let mut s = String::from("u5never");
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ // Compound types
+ // () in Rust is equivalent to void return type in C
+ _ if ty.is_unit() => {
+ typeid.push('v');
+ }
+
+ // Sequence types
+ ty::Tuple(tys) => {
+ // u5tupleI<element-type1..element-typeN>E as vendor extended type
+ let mut s = String::from("u5tupleI");
+ for ty in tys.iter() {
+ s.push_str(&encode_ty(tcx, ty, dict, options));
+ }
+ s.push('E');
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ ty::Array(ty0, len) => {
+ // A<array-length><element-type>
+ let mut s = String::from("A");
+ let _ = write!(s, "{}", &len.kind().try_to_scalar().unwrap().to_u64().unwrap());
+ s.push_str(&encode_ty(tcx, *ty0, dict, options));
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ ty::Slice(ty0) => {
+ // u5sliceI<element-type>E as vendor extended type
+ let mut s = String::from("u5sliceI");
+ s.push_str(&encode_ty(tcx, *ty0, dict, options));
+ s.push('E');
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ // User-defined types
+ ty::Adt(adt_def, substs) => {
+ let mut s = String::new();
+ let def_id = adt_def.0.did;
+ if options.contains(EncodeTyOptions::GENERALIZE_REPR_C) && adt_def.repr().c() {
+ // For for cross-language CFI support, the encoding must be compatible at the FFI
+ // boundary. For instance:
+ //
+ // struct type1 {};
+ // void foo(struct type1* bar) {}
+ //
+ // Is encoded as:
+ //
+ // _ZTSFvP5type1E
+ //
+ // So, encode any repr(C) user-defined type for extern function types with the "C"
+ // calling convention (or extern types [i.e., ty::Foreign]) as <length><name>, where
+ // <name> is <unscoped-name>.
+ let name = tcx.item_name(def_id).to_string();
+ let _ = write!(s, "{}{}", name.len(), &name);
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ } else {
+ // u<length><name>[I<element-type1..element-typeN>E], where <element-type> is
+ // <subst>, as vendor extended type.
+ let name = encode_ty_name(tcx, def_id);
+ let _ = write!(s, "u{}{}", name.len(), &name);
+ s.push_str(&encode_substs(tcx, substs, dict, options));
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ }
+ typeid.push_str(&s);
+ }
+
+ ty::Foreign(def_id) => {
+ // <length><name>, where <name> is <unscoped-name>
+ let mut s = String::new();
+ let name = tcx.item_name(*def_id).to_string();
+ let _ = write!(s, "{}{}", name.len(), &name);
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ // Function types
+ ty::FnDef(def_id, substs)
+ | ty::Closure(def_id, substs)
+ | ty::Generator(def_id, substs, ..) => {
+ // u<length><name>[I<element-type1..element-typeN>E], where <element-type> is <subst>,
+ // as vendor extended type.
+ let mut s = String::new();
+ let name = encode_ty_name(tcx, *def_id);
+ let _ = write!(s, "u{}{}", name.len(), &name);
+ s.push_str(&encode_substs(tcx, substs, dict, options));
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ // Pointer types
+ ty::Ref(region, ty0, ..) => {
+ // [U3mut]u3refI<element-type>E as vendor extended type qualifier and type
+ let mut s = String::new();
+ s.push_str("u3refI");
+ s.push_str(&encode_ty(tcx, *ty0, dict, options));
+ s.push('E');
+ compress(dict, DictKey::Ty(tcx.mk_imm_ref(*region, *ty0), TyQ::None), &mut s);
+ if ty.is_mutable_ptr() {
+ s = format!("{}{}", "U3mut", &s);
+ compress(dict, DictKey::Ty(ty, TyQ::Mut), &mut s);
+ }
+ typeid.push_str(&s);
+ }
+
+ ty::RawPtr(tm) => {
+ // P[K]<element-type>
+ let mut s = String::new();
+ s.push_str(&encode_ty(tcx, tm.ty, dict, options));
+ if !ty.is_mutable_ptr() {
+ s = format!("{}{}", "K", &s);
+ compress(dict, DictKey::Ty(tm.ty, TyQ::Const), &mut s);
+ };
+ s = format!("{}{}", "P", &s);
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ ty::FnPtr(fn_sig) => {
+ // PF<return-type><parameter-type1..parameter-typeN>E
+ let mut s = String::from("P");
+ s.push_str(&encode_fnsig(tcx, &fn_sig.skip_binder(), dict, TypeIdOptions::NO_OPTIONS));
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ // Trait types
+ ty::Dynamic(predicates, region) => {
+ // u3dynI<element-type1[..element-typeN]>E, where <element-type> is <predicate>, as
+ // vendor extended type.
+ let mut s = String::from("u3dynI");
+ s.push_str(&encode_predicates(tcx, predicates, dict, options));
+ s.push_str(&encode_region(tcx, *region, dict, options));
+ s.push('E');
+ compress(dict, DictKey::Ty(ty, TyQ::None), &mut s);
+ typeid.push_str(&s);
+ }
+
+ // Unexpected types
+ ty::Bound(..)
+ | ty::Error(..)
+ | ty::GeneratorWitness(..)
+ | ty::Infer(..)
+ | ty::Opaque(..)
+ | ty::Param(..)
+ | ty::Placeholder(..)
+ | ty::Projection(..) => {
+ bug!("encode_ty: unexpected `{:?}`", ty.kind());
+ }
+ };
+
+ typeid
+}
+
+// Transforms a ty:Ty for being encoded and used in the substitution dictionary. It transforms all
+// c_void types into unit types unconditionally, and generalizes all pointers if
+// TransformTyOptions::GENERALIZE_POINTERS option is set.
+fn transform_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, options: TransformTyOptions) -> Ty<'tcx> {
+ let mut ty = ty;
+
+ match ty.kind() {
+ ty::Bool
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Char
+ | ty::Str
+ | ty::Never
+ | ty::Foreign(..)
+ | ty::Dynamic(..) => {}
+
+ _ if ty.is_unit() => {}
+
+ ty::Tuple(tys) => {
+ ty = tcx.mk_tup(tys.iter().map(|ty| transform_ty(tcx, ty, options)));
+ }
+
+ ty::Array(ty0, len) => {
+ let len = len.kind().try_to_scalar().unwrap().to_u64().unwrap();
+ ty = tcx.mk_array(transform_ty(tcx, *ty0, options), len);
+ }
+
+ ty::Slice(ty0) => {
+ ty = tcx.mk_slice(transform_ty(tcx, *ty0, options));
+ }
+
+ ty::Adt(adt_def, substs) => {
+ if is_c_void_ty(tcx, ty) {
+ ty = tcx.mk_unit();
+ } else if options.contains(TransformTyOptions::GENERALIZE_REPR_C) && adt_def.repr().c()
+ {
+ ty = tcx.mk_adt(*adt_def, ty::List::empty());
+ } else if adt_def.repr().transparent() && adt_def.is_struct() {
+ let variant = adt_def.non_enum_variant();
+ let param_env = tcx.param_env(variant.def_id);
+ let field = variant.fields.iter().find(|field| {
+ let ty = tcx.type_of(field.did);
+ let is_zst =
+ tcx.layout_of(param_env.and(ty)).map_or(false, |layout| layout.is_zst());
+ !is_zst
+ });
+ if field.is_none() {
+ // Transform repr(transparent) types without non-ZST field into ()
+ ty = tcx.mk_unit();
+ } else {
+ let ty0 = tcx.type_of(field.unwrap().did);
+ // Generalize any repr(transparent) user-defined type that is either a pointer
+ // or reference, and either references itself or any other type that contains or
+ // references itself, to avoid a reference cycle.
+ if ty0.is_any_ptr() && ty0.contains(ty) {
+ ty = transform_ty(
+ tcx,
+ ty0,
+ options | TransformTyOptions::GENERALIZE_POINTERS,
+ );
+ } else {
+ ty = transform_ty(tcx, ty0, options);
+ }
+ }
+ } else {
+ ty = tcx.mk_adt(*adt_def, transform_substs(tcx, substs, options));
+ }
+ }
+
+ ty::FnDef(def_id, substs) => {
+ ty = tcx.mk_fn_def(*def_id, transform_substs(tcx, substs, options));
+ }
+
+ ty::Closure(def_id, substs) => {
+ ty = tcx.mk_closure(*def_id, transform_substs(tcx, substs, options));
+ }
+
+ ty::Generator(def_id, substs, movability) => {
+ ty = tcx.mk_generator(*def_id, transform_substs(tcx, substs, options), *movability);
+ }
+
+ ty::Ref(region, ty0, ..) => {
+ if options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
+ if ty.is_mutable_ptr() {
+ ty = tcx.mk_mut_ref(tcx.lifetimes.re_static, tcx.mk_unit());
+ } else {
+ ty = tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_unit());
+ }
+ } else {
+ if ty.is_mutable_ptr() {
+ ty = tcx.mk_mut_ref(*region, transform_ty(tcx, *ty0, options));
+ } else {
+ ty = tcx.mk_imm_ref(*region, transform_ty(tcx, *ty0, options));
+ }
+ }
+ }
+
+ ty::RawPtr(tm) => {
+ if options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
+ if ty.is_mutable_ptr() {
+ ty = tcx.mk_mut_ptr(tcx.mk_unit());
+ } else {
+ ty = tcx.mk_imm_ptr(tcx.mk_unit());
+ }
+ } else {
+ if ty.is_mutable_ptr() {
+ ty = tcx.mk_mut_ptr(transform_ty(tcx, tm.ty, options));
+ } else {
+ ty = tcx.mk_imm_ptr(transform_ty(tcx, tm.ty, options));
+ }
+ }
+ }
+
+ ty::FnPtr(fn_sig) => {
+ if options.contains(TransformTyOptions::GENERALIZE_POINTERS) {
+ ty = tcx.mk_imm_ptr(tcx.mk_unit());
+ } else {
+ let parameters: Vec<Ty<'tcx>> = fn_sig
+ .skip_binder()
+ .inputs()
+ .iter()
+ .map(|ty| transform_ty(tcx, *ty, options))
+ .collect();
+ let output = transform_ty(tcx, fn_sig.skip_binder().output(), options);
+ ty = tcx.mk_fn_ptr(ty::Binder::bind_with_vars(
+ tcx.mk_fn_sig(
+ parameters.iter(),
+ &output,
+ fn_sig.c_variadic(),
+ fn_sig.unsafety(),
+ fn_sig.abi(),
+ ),
+ fn_sig.bound_vars(),
+ ));
+ }
+ }
+
+ ty::Bound(..)
+ | ty::Error(..)
+ | ty::GeneratorWitness(..)
+ | ty::Infer(..)
+ | ty::Opaque(..)
+ | ty::Param(..)
+ | ty::Placeholder(..)
+ | ty::Projection(..) => {
+ bug!("transform_ty: unexpected `{:?}`", ty.kind());
+ }
+ }
+
+ ty
+}
+
+/// Transforms substs for being encoded and used in the substitution dictionary.
+fn transform_substs<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ options: TransformTyOptions,
+) -> SubstsRef<'tcx> {
+ let substs: Vec<GenericArg<'tcx>> = substs
+ .iter()
+ .map(|subst| {
+ if let GenericArgKind::Type(ty) = subst.unpack() {
+ if is_c_void_ty(tcx, ty) {
+ tcx.mk_unit().into()
+ } else {
+ transform_ty(tcx, ty, options).into()
+ }
+ } else {
+ subst
+ }
+ })
+ .collect();
+ tcx.mk_substs(substs.iter())
+}
+
+/// Returns a type metadata identifier for the specified FnAbi using the Itanium C++ ABI with vendor
+/// extended type qualifiers and types for Rust types that are not used at the FFI boundary.
+pub fn typeid_for_fnabi<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+ options: TypeIdOptions,
+) -> String {
+ // A name is mangled by prefixing "_Z" to an encoding of its name, and in the case of functions
+ // its type.
+ let mut typeid = String::from("_Z");
+
+ // Clang uses the Itanium C++ ABI's virtual tables and RTTI typeinfo structure name as type
+ // metadata identifiers for function pointers. The typeinfo name encoding is a two-character
+ // code (i.e., 'TS') prefixed to the type encoding for the function.
+ typeid.push_str("TS");
+
+ // Function types are delimited by an "F..E" pair
+ typeid.push('F');
+
+ // A dictionary of substitution candidates used for compression (see
+ // https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-compression).
+ let mut dict: FxHashMap<DictKey<'tcx>, usize> = FxHashMap::default();
+
+ let mut encode_ty_options = EncodeTyOptions::from_bits(options.bits())
+ .unwrap_or_else(|| bug!("typeid_for_fnabi: invalid option(s) `{:?}`", options.bits()));
+ match fn_abi.conv {
+ Conv::C => {
+ encode_ty_options.insert(EncodeTyOptions::GENERALIZE_REPR_C);
+ }
+ _ => {
+ encode_ty_options.remove(EncodeTyOptions::GENERALIZE_REPR_C);
+ }
+ }
+
+ // Encode the return type
+ let transform_ty_options = TransformTyOptions::from_bits(options.bits())
+ .unwrap_or_else(|| bug!("typeid_for_fnabi: invalid option(s) `{:?}`", options.bits()));
+ let ty = transform_ty(tcx, fn_abi.ret.layout.ty, transform_ty_options);
+ typeid.push_str(&encode_ty(tcx, ty, &mut dict, encode_ty_options));
+
+ // Encode the parameter types
+ if !fn_abi.c_variadic {
+ if !fn_abi.args.is_empty() {
+ for arg in fn_abi.args.iter() {
+ let ty = transform_ty(tcx, arg.layout.ty, transform_ty_options);
+ typeid.push_str(&encode_ty(tcx, ty, &mut dict, encode_ty_options));
+ }
+ } else {
+ // Empty parameter lists, whether declared as () or conventionally as (void), are
+ // encoded with a void parameter specifier "v".
+ typeid.push('v');
+ }
+ } else {
+ for n in 0..fn_abi.fixed_count {
+ let ty = transform_ty(tcx, fn_abi.args[n].layout.ty, transform_ty_options);
+ typeid.push_str(&encode_ty(tcx, ty, &mut dict, encode_ty_options));
+ }
+
+ typeid.push('z');
+ }
+
+ // Close the "F..E" pair
+ typeid.push('E');
+
+ typeid
+}
+
+/// Returns a type metadata identifier for the specified FnSig using the Itanium C++ ABI with vendor
+/// extended type qualifiers and types for Rust types that are not used at the FFI boundary.
+pub fn typeid_for_fnsig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ fn_sig: &FnSig<'tcx>,
+ options: TypeIdOptions,
+) -> String {
+ // A name is mangled by prefixing "_Z" to an encoding of its name, and in the case of functions
+ // its type.
+ let mut typeid = String::from("_Z");
+
+ // Clang uses the Itanium C++ ABI's virtual tables and RTTI typeinfo structure name as type
+ // metadata identifiers for function pointers. The typeinfo name encoding is a two-character
+ // code (i.e., 'TS') prefixed to the type encoding for the function.
+ typeid.push_str("TS");
+
+ // A dictionary of substitution candidates used for compression (see
+ // https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-compression).
+ let mut dict: FxHashMap<DictKey<'tcx>, usize> = FxHashMap::default();
+
+ // Encode the function signature
+ typeid.push_str(&encode_fnsig(tcx, fn_sig, &mut dict, options));
+
+ typeid
+}
diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs
new file mode 100644
index 000000000..71fa5a448
--- /dev/null
+++ b/compiler/rustc_symbol_mangling/src/v0.rs
@@ -0,0 +1,844 @@
+use rustc_data_structures::base_n;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::intern::Interned;
+use rustc_hir as hir;
+use rustc_hir::def::CtorKind;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+use rustc_middle::ty::layout::IntegerExt;
+use rustc_middle::ty::print::{Print, Printer};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst};
+use rustc_middle::ty::{
+ self, EarlyBinder, FloatTy, Instance, IntTy, Ty, TyCtxt, TypeVisitable, UintTy,
+};
+use rustc_span::symbol::kw;
+use rustc_target::abi::Integer;
+use rustc_target::spec::abi::Abi;
+
+use std::fmt::Write;
+use std::iter;
+use std::ops::Range;
+
+pub(super) fn mangle<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: Instance<'tcx>,
+ instantiating_crate: Option<CrateNum>,
+) -> String {
+ let def_id = instance.def_id();
+ // FIXME(eddyb) this should ideally not be needed.
+ let substs = tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), instance.substs);
+
+ let prefix = "_R";
+ let mut cx = &mut SymbolMangler {
+ tcx,
+ start_offset: prefix.len(),
+ paths: FxHashMap::default(),
+ types: FxHashMap::default(),
+ consts: FxHashMap::default(),
+ binders: vec![],
+ out: String::from(prefix),
+ };
+
+ // Append `::{shim:...#0}` to shims that can coexist with a non-shim instance.
+ let shim_kind = match instance.def {
+ ty::InstanceDef::VTableShim(_) => Some("vtable"),
+ ty::InstanceDef::ReifyShim(_) => Some("reify"),
+
+ _ => None,
+ };
+
+ cx = if let Some(shim_kind) = shim_kind {
+ cx.path_append_ns(|cx| cx.print_def_path(def_id, substs), 'S', 0, shim_kind).unwrap()
+ } else {
+ cx.print_def_path(def_id, substs).unwrap()
+ };
+ if let Some(instantiating_crate) = instantiating_crate {
+ cx = cx.print_def_path(instantiating_crate.as_def_id(), &[]).unwrap();
+ }
+ std::mem::take(&mut cx.out)
+}
+
+pub(super) fn mangle_typeid_for_trait_ref<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyExistentialTraitRef<'tcx>,
+) -> String {
+ // FIXME(flip1995): See comment in `mangle_typeid_for_fnabi`.
+ let mut cx = &mut SymbolMangler {
+ tcx,
+ start_offset: 0,
+ paths: FxHashMap::default(),
+ types: FxHashMap::default(),
+ consts: FxHashMap::default(),
+ binders: vec![],
+ out: String::new(),
+ };
+ cx = cx.print_def_path(trait_ref.def_id(), &[]).unwrap();
+ std::mem::take(&mut cx.out)
+}
+
+struct BinderLevel {
+ /// The range of distances from the root of what's
+ /// being printed, to the lifetimes in a binder.
+ /// Specifically, a `BrAnon(i)` lifetime has depth
+ /// `lifetime_depths.start + i`, going away from the
+ /// the root and towards its use site, as `i` increases.
+ /// This is used to flatten rustc's pairing of `BrAnon`
+ /// (intra-binder disambiguation) with a `DebruijnIndex`
+ /// (binder addressing), to "true" de Bruijn indices,
+ /// by subtracting the depth of a certain lifetime, from
+ /// the innermost depth at its use site.
+ lifetime_depths: Range<u32>,
+}
+
+struct SymbolMangler<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ binders: Vec<BinderLevel>,
+ out: String,
+
+ /// The length of the prefix in `out` (e.g. 2 for `_R`).
+ start_offset: usize,
+ /// The values are start positions in `out`, in bytes.
+ paths: FxHashMap<(DefId, &'tcx [GenericArg<'tcx>]), usize>,
+ types: FxHashMap<Ty<'tcx>, usize>,
+ consts: FxHashMap<ty::Const<'tcx>, usize>,
+}
+
+impl<'tcx> SymbolMangler<'tcx> {
+ fn push(&mut self, s: &str) {
+ self.out.push_str(s);
+ }
+
+ /// Push a `_`-terminated base 62 integer, using the format
+ /// specified in the RFC as `<base-62-number>`, that is:
+ /// * `x = 0` is encoded as just the `"_"` terminator
+ /// * `x > 0` is encoded as `x - 1` in base 62, followed by `"_"`,
+ /// e.g. `1` becomes `"0_"`, `62` becomes `"Z_"`, etc.
+ fn push_integer_62(&mut self, x: u64) {
+ if let Some(x) = x.checked_sub(1) {
+ base_n::push_str(x as u128, 62, &mut self.out);
+ }
+ self.push("_");
+ }
+
+ /// Push a `tag`-prefixed base 62 integer, when larger than `0`, that is:
+ /// * `x = 0` is encoded as `""` (nothing)
+ /// * `x > 0` is encoded as the `tag` followed by `push_integer_62(x - 1)`
+ /// e.g. `1` becomes `tag + "_"`, `2` becomes `tag + "0_"`, etc.
+ fn push_opt_integer_62(&mut self, tag: &str, x: u64) {
+ if let Some(x) = x.checked_sub(1) {
+ self.push(tag);
+ self.push_integer_62(x);
+ }
+ }
+
+ fn push_disambiguator(&mut self, dis: u64) {
+ self.push_opt_integer_62("s", dis);
+ }
+
+ fn push_ident(&mut self, ident: &str) {
+ let mut use_punycode = false;
+ for b in ident.bytes() {
+ match b {
+ b'_' | b'a'..=b'z' | b'A'..=b'Z' | b'0'..=b'9' => {}
+ 0x80..=0xff => use_punycode = true,
+ _ => bug!("symbol_names: bad byte {} in ident {:?}", b, ident),
+ }
+ }
+
+ let punycode_string;
+ let ident = if use_punycode {
+ self.push("u");
+
+ // FIXME(eddyb) we should probably roll our own punycode implementation.
+ let mut punycode_bytes = match punycode::encode(ident) {
+ Ok(s) => s.into_bytes(),
+ Err(()) => bug!("symbol_names: punycode encoding failed for ident {:?}", ident),
+ };
+
+ // Replace `-` with `_`.
+ if let Some(c) = punycode_bytes.iter_mut().rfind(|&&mut c| c == b'-') {
+ *c = b'_';
+ }
+
+ // FIXME(eddyb) avoid rechecking UTF-8 validity.
+ punycode_string = String::from_utf8(punycode_bytes).unwrap();
+ &punycode_string
+ } else {
+ ident
+ };
+
+ let _ = write!(self.out, "{}", ident.len());
+
+ // Write a separating `_` if necessary (leading digit or `_`).
+ if let Some('_' | '0'..='9') = ident.chars().next() {
+ self.push("_");
+ }
+
+ self.push(ident);
+ }
+
+ fn path_append_ns<'a>(
+ mut self: &'a mut Self,
+ print_prefix: impl FnOnce(&'a mut Self) -> Result<&'a mut Self, !>,
+ ns: char,
+ disambiguator: u64,
+ name: &str,
+ ) -> Result<&'a mut Self, !> {
+ self.push("N");
+ self.out.push(ns);
+ self = print_prefix(self)?;
+ self.push_disambiguator(disambiguator as u64);
+ self.push_ident(name);
+ Ok(self)
+ }
+
+ fn print_backref(&mut self, i: usize) -> Result<&mut Self, !> {
+ self.push("B");
+ self.push_integer_62((i - self.start_offset) as u64);
+ Ok(self)
+ }
+
+ fn in_binder<'a, T>(
+ mut self: &'a mut Self,
+ value: &ty::Binder<'tcx, T>,
+ print_value: impl FnOnce(&'a mut Self, &T) -> Result<&'a mut Self, !>,
+ ) -> Result<&'a mut Self, !>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ let regions = if value.has_late_bound_regions() {
+ self.tcx.collect_referenced_late_bound_regions(value)
+ } else {
+ FxHashSet::default()
+ };
+
+ let mut lifetime_depths =
+ self.binders.last().map(|b| b.lifetime_depths.end).map_or(0..0, |i| i..i);
+
+ let lifetimes = regions
+ .into_iter()
+ .map(|br| match br {
+ ty::BrAnon(i) => i,
+ _ => bug!("symbol_names: non-anonymized region `{:?}` in `{:?}`", br, value),
+ })
+ .max()
+ .map_or(0, |max| max + 1);
+
+ self.push_opt_integer_62("G", lifetimes as u64);
+ lifetime_depths.end += lifetimes;
+
+ self.binders.push(BinderLevel { lifetime_depths });
+ self = print_value(self, value.as_ref().skip_binder())?;
+ self.binders.pop();
+
+ Ok(self)
+ }
+}
+
+impl<'tcx> Printer<'tcx> for &mut SymbolMangler<'tcx> {
+ type Error = !;
+
+ type Path = Self;
+ type Region = Self;
+ type Type = Self;
+ type DynExistential = Self;
+ type Const = Self;
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn print_def_path(
+ mut self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ if let Some(&i) = self.paths.get(&(def_id, substs)) {
+ return self.print_backref(i);
+ }
+ let start = self.out.len();
+
+ self = self.default_print_def_path(def_id, substs)?;
+
+ // Only cache paths that do not refer to an enclosing
+ // binder (which would change depending on context).
+ if !substs.iter().any(|k| k.has_escaping_bound_vars()) {
+ self.paths.insert((def_id, substs), start);
+ }
+ Ok(self)
+ }
+
+ fn print_impl_path(
+ mut self,
+ impl_def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ mut self_ty: Ty<'tcx>,
+ mut impl_trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ let key = self.tcx.def_key(impl_def_id);
+ let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
+
+ let mut param_env = self.tcx.param_env_reveal_all_normalized(impl_def_id);
+ if !substs.is_empty() {
+ param_env = EarlyBinder(param_env).subst(self.tcx, substs);
+ }
+
+ match &mut impl_trait_ref {
+ Some(impl_trait_ref) => {
+ assert_eq!(impl_trait_ref.self_ty(), self_ty);
+ *impl_trait_ref = self.tcx.normalize_erasing_regions(param_env, *impl_trait_ref);
+ self_ty = impl_trait_ref.self_ty();
+ }
+ None => {
+ self_ty = self.tcx.normalize_erasing_regions(param_env, self_ty);
+ }
+ }
+
+ self.push(match impl_trait_ref {
+ Some(_) => "X",
+ None => "M",
+ });
+
+ // Encode impl generic params if the substitutions contain parameters (implying
+ // polymorphization is enabled) and this isn't an inherent impl.
+ if impl_trait_ref.is_some() && substs.iter().any(|a| a.has_param_types_or_consts()) {
+ self = self.path_generic_args(
+ |this| {
+ this.path_append_ns(
+ |cx| cx.print_def_path(parent_def_id, &[]),
+ 'I',
+ key.disambiguated_data.disambiguator as u64,
+ "",
+ )
+ },
+ substs,
+ )?;
+ } else {
+ self.push_disambiguator(key.disambiguated_data.disambiguator as u64);
+ self = self.print_def_path(parent_def_id, &[])?;
+ }
+
+ self = self_ty.print(self)?;
+
+ if let Some(trait_ref) = impl_trait_ref {
+ self = self.print_def_path(trait_ref.def_id, trait_ref.substs)?;
+ }
+
+ Ok(self)
+ }
+
+ fn print_region(self, region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
+ let i = match *region {
+ // Erased lifetimes use the index 0, for a
+ // shorter mangling of `L_`.
+ ty::ReErased => 0,
+
+ // Late-bound lifetimes use indices starting at 1,
+ // see `BinderLevel` for more details.
+ ty::ReLateBound(debruijn, ty::BoundRegion { kind: ty::BrAnon(i), .. }) => {
+ let binder = &self.binders[self.binders.len() - 1 - debruijn.index()];
+ let depth = binder.lifetime_depths.start + i;
+
+ 1 + (self.binders.last().unwrap().lifetime_depths.end - 1 - depth)
+ }
+
+ _ => bug!("symbol_names: non-erased region `{:?}`", region),
+ };
+ self.push("L");
+ self.push_integer_62(i as u64);
+ Ok(self)
+ }
+
+ fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ // Basic types, never cached (single-character).
+ let basic_type = match ty.kind() {
+ ty::Bool => "b",
+ ty::Char => "c",
+ ty::Str => "e",
+ ty::Tuple(_) if ty.is_unit() => "u",
+ ty::Int(IntTy::I8) => "a",
+ ty::Int(IntTy::I16) => "s",
+ ty::Int(IntTy::I32) => "l",
+ ty::Int(IntTy::I64) => "x",
+ ty::Int(IntTy::I128) => "n",
+ ty::Int(IntTy::Isize) => "i",
+ ty::Uint(UintTy::U8) => "h",
+ ty::Uint(UintTy::U16) => "t",
+ ty::Uint(UintTy::U32) => "m",
+ ty::Uint(UintTy::U64) => "y",
+ ty::Uint(UintTy::U128) => "o",
+ ty::Uint(UintTy::Usize) => "j",
+ ty::Float(FloatTy::F32) => "f",
+ ty::Float(FloatTy::F64) => "d",
+ ty::Never => "z",
+
+ // Placeholders (should be demangled as `_`).
+ ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => "p",
+
+ _ => "",
+ };
+ if !basic_type.is_empty() {
+ self.push(basic_type);
+ return Ok(self);
+ }
+
+ if let Some(&i) = self.types.get(&ty) {
+ return self.print_backref(i);
+ }
+ let start = self.out.len();
+
+ match *ty.kind() {
+ // Basic types, handled above.
+ ty::Bool | ty::Char | ty::Str | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Never => {
+ unreachable!()
+ }
+ ty::Tuple(_) if ty.is_unit() => unreachable!(),
+
+ // Placeholders, also handled as part of basic types.
+ ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
+ unreachable!()
+ }
+
+ ty::Ref(r, ty, mutbl) => {
+ self.push(match mutbl {
+ hir::Mutability::Not => "R",
+ hir::Mutability::Mut => "Q",
+ });
+ if !r.is_erased() {
+ self = r.print(self)?;
+ }
+ self = ty.print(self)?;
+ }
+
+ ty::RawPtr(mt) => {
+ self.push(match mt.mutbl {
+ hir::Mutability::Not => "P",
+ hir::Mutability::Mut => "O",
+ });
+ self = mt.ty.print(self)?;
+ }
+
+ ty::Array(ty, len) => {
+ self.push("A");
+ self = ty.print(self)?;
+ self = self.print_const(len)?;
+ }
+ ty::Slice(ty) => {
+ self.push("S");
+ self = ty.print(self)?;
+ }
+
+ ty::Tuple(tys) => {
+ self.push("T");
+ for ty in tys.iter() {
+ self = ty.print(self)?;
+ }
+ self.push("E");
+ }
+
+ // Mangle all nominal types as paths.
+ ty::Adt(ty::AdtDef(Interned(&ty::AdtDefData { did: def_id, .. }, _)), substs)
+ | ty::FnDef(def_id, substs)
+ | ty::Opaque(def_id, substs)
+ | ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
+ | ty::Closure(def_id, substs)
+ | ty::Generator(def_id, substs, _) => {
+ self = self.print_def_path(def_id, substs)?;
+ }
+ ty::Foreign(def_id) => {
+ self = self.print_def_path(def_id, &[])?;
+ }
+
+ ty::FnPtr(sig) => {
+ self.push("F");
+ self = self.in_binder(&sig, |mut cx, sig| {
+ if sig.unsafety == hir::Unsafety::Unsafe {
+ cx.push("U");
+ }
+ match sig.abi {
+ Abi::Rust => {}
+ Abi::C { unwind: false } => cx.push("KC"),
+ abi => {
+ cx.push("K");
+ let name = abi.name();
+ if name.contains('-') {
+ cx.push_ident(&name.replace('-', "_"));
+ } else {
+ cx.push_ident(name);
+ }
+ }
+ }
+ for &ty in sig.inputs() {
+ cx = ty.print(cx)?;
+ }
+ if sig.c_variadic {
+ cx.push("v");
+ }
+ cx.push("E");
+ sig.output().print(cx)
+ })?;
+ }
+
+ ty::Dynamic(predicates, r) => {
+ self.push("D");
+ self = self.print_dyn_existential(predicates)?;
+ self = r.print(self)?;
+ }
+
+ ty::GeneratorWitness(_) => bug!("symbol_names: unexpected `GeneratorWitness`"),
+ }
+
+ // Only cache types that do not refer to an enclosing
+ // binder (which would change depending on context).
+ if !ty.has_escaping_bound_vars() {
+ self.types.insert(ty, start);
+ }
+ Ok(self)
+ }
+
+ fn print_dyn_existential(
+ mut self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ // Okay, so this is a bit tricky. Imagine we have a trait object like
+ // `dyn for<'a> Foo<'a, Bar = &'a ()>`. When we mangle this, the
+ // output looks really close to the syntax, where the `Bar = &'a ()` bit
+ // is under the same binders (`['a]`) as the `Foo<'a>` bit. However, we
+ // actually desugar these into two separate `ExistentialPredicate`s. We
+ // can't enter/exit the "binder scope" twice though, because then we
+ // would mangle the binders twice. (Also, side note, we merging these
+ // two is kind of difficult, because of potential HRTBs in the Projection
+ // predicate.)
+ //
+ // Also worth mentioning: imagine that we instead had
+ // `dyn for<'a> Foo<'a, Bar = &'a ()> + Send`. In this case, `Send` is
+ // under the same binders as `Foo`. Currently, this doesn't matter,
+ // because only *auto traits* are allowed other than the principal trait
+ // and all auto traits don't have any generics. Two things could
+ // make this not an "okay" mangling:
+ // 1) Instead of mangling only *used*
+ // bound vars, we want to mangle *all* bound vars (`for<'b> Send` is a
+ // valid trait predicate);
+ // 2) We allow multiple "principal" traits in the future, or at least
+ // allow in any form another trait predicate that can take generics.
+ //
+ // Here we assume that predicates have the following structure:
+ // [<Trait> [{<Projection>}]] [{<Auto>}]
+ // Since any predicates after the first one shouldn't change the binders,
+ // just put them all in the binders of the first.
+ self = self.in_binder(&predicates[0], |mut cx, _| {
+ for predicate in predicates.iter() {
+ // It would be nice to be able to validate bound vars here, but
+ // projections can actually include bound vars from super traits
+ // because of HRTBs (only in the `Self` type). Also, auto traits
+ // could have different bound vars *anyways*.
+ match predicate.as_ref().skip_binder() {
+ ty::ExistentialPredicate::Trait(trait_ref) => {
+ // Use a type that can't appear in defaults of type parameters.
+ let dummy_self = cx.tcx.mk_ty_infer(ty::FreshTy(0));
+ let trait_ref = trait_ref.with_self_ty(cx.tcx, dummy_self);
+ cx = cx.print_def_path(trait_ref.def_id, trait_ref.substs)?;
+ }
+ ty::ExistentialPredicate::Projection(projection) => {
+ let name = cx.tcx.associated_item(projection.item_def_id).name;
+ cx.push("p");
+ cx.push_ident(name.as_str());
+ cx = match projection.term {
+ ty::Term::Ty(ty) => ty.print(cx),
+ ty::Term::Const(c) => c.print(cx),
+ }?;
+ }
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ cx = cx.print_def_path(*def_id, &[])?;
+ }
+ }
+ }
+ Ok(cx)
+ })?;
+
+ self.push("E");
+ Ok(self)
+ }
+
+ fn print_const(mut self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ // We only mangle a typed value if the const can be evaluated.
+ let ct = ct.eval(self.tcx, ty::ParamEnv::reveal_all());
+ match ct.kind() {
+ ty::ConstKind::Value(_) => {}
+
+ // Placeholders (should be demangled as `_`).
+ // NOTE(eddyb) despite `Unevaluated` having a `DefId` (and therefore
+ // a path), even for it we still need to encode a placeholder, as
+ // the path could refer back to e.g. an `impl` using the constant.
+ ty::ConstKind::Unevaluated(_)
+ | ty::ConstKind::Param(_)
+ | ty::ConstKind::Infer(_)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Placeholder(_)
+ | ty::ConstKind::Error(_) => {
+ // Never cached (single-character).
+ self.push("p");
+ return Ok(self);
+ }
+ }
+
+ if let Some(&i) = self.consts.get(&ct) {
+ return self.print_backref(i);
+ }
+
+ let start = self.out.len();
+ let ty = ct.ty();
+
+ match ty.kind() {
+ ty::Uint(_) | ty::Int(_) | ty::Bool | ty::Char => {
+ self = ty.print(self)?;
+
+ let mut bits = ct.eval_bits(self.tcx, ty::ParamEnv::reveal_all(), ty);
+
+ // Negative integer values are mangled using `n` as a "sign prefix".
+ if let ty::Int(ity) = ty.kind() {
+ let val =
+ Integer::from_int_ty(&self.tcx, *ity).size().sign_extend(bits) as i128;
+ if val < 0 {
+ self.push("n");
+ }
+ bits = val.unsigned_abs();
+ }
+
+ let _ = write!(self.out, "{:x}_", bits);
+ }
+
+ // FIXME(valtrees): Remove the special case for `str`
+ // here and fully support unsized constants.
+ ty::Ref(_, inner_ty, mutbl) => {
+ self.push(match mutbl {
+ hir::Mutability::Not => "R",
+ hir::Mutability::Mut => "Q",
+ });
+
+ match inner_ty.kind() {
+ ty::Str if *mutbl == hir::Mutability::Not => {
+ match ct.kind() {
+ ty::ConstKind::Value(valtree) => {
+ let slice =
+ valtree.try_to_raw_bytes(self.tcx(), ty).unwrap_or_else(|| {
+ bug!(
+ "expected to get raw bytes from valtree {:?} for type {:}",
+ valtree, ty
+ )
+ });
+ let s = std::str::from_utf8(slice).expect("non utf8 str from miri");
+
+ self.push("e");
+
+ // FIXME(eddyb) use a specialized hex-encoding loop.
+ for byte in s.bytes() {
+ let _ = write!(self.out, "{:02x}", byte);
+ }
+
+ self.push("_");
+ }
+
+ _ => {
+ bug!("symbol_names: unsupported `&str` constant: {:?}", ct);
+ }
+ }
+ }
+ _ => {
+ let pointee_ty = ct
+ .ty()
+ .builtin_deref(true)
+ .expect("tried to dereference on non-ptr type")
+ .ty;
+ let dereferenced_const =
+ self.tcx.mk_const(ty::ConstS { kind: ct.kind(), ty: pointee_ty });
+ self = dereferenced_const.print(self)?;
+ }
+ }
+ }
+
+ ty::Array(..) | ty::Tuple(..) | ty::Adt(..) | ty::Slice(_) => {
+ let contents = self.tcx.destructure_const(ct);
+ let fields = contents.fields.iter().copied();
+
+ let print_field_list = |mut this: Self| {
+ for field in fields.clone() {
+ this = field.print(this)?;
+ }
+ this.push("E");
+ Ok(this)
+ };
+
+ match *ct.ty().kind() {
+ ty::Array(..) | ty::Slice(_) => {
+ self.push("A");
+ self = print_field_list(self)?;
+ }
+ ty::Tuple(..) => {
+ self.push("T");
+ self = print_field_list(self)?;
+ }
+ ty::Adt(def, substs) => {
+ let variant_idx =
+ contents.variant.expect("destructed const of adt without variant idx");
+ let variant_def = &def.variant(variant_idx);
+
+ self.push("V");
+ self = self.print_def_path(variant_def.def_id, substs)?;
+
+ match variant_def.ctor_kind {
+ CtorKind::Const => {
+ self.push("U");
+ }
+ CtorKind::Fn => {
+ self.push("T");
+ self = print_field_list(self)?;
+ }
+ CtorKind::Fictive => {
+ self.push("S");
+ for (field_def, field) in iter::zip(&variant_def.fields, fields) {
+ // HACK(eddyb) this mimics `path_append`,
+ // instead of simply using `field_def.ident`,
+ // just to be able to handle disambiguators.
+ let disambiguated_field =
+ self.tcx.def_key(field_def.did).disambiguated_data;
+ let field_name = disambiguated_field.data.get_opt_name();
+ self.push_disambiguator(
+ disambiguated_field.disambiguator as u64,
+ );
+ self.push_ident(field_name.unwrap_or(kw::Empty).as_str());
+
+ self = field.print(self)?;
+ }
+ self.push("E");
+ }
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+ _ => {
+ bug!("symbol_names: unsupported constant of type `{}` ({:?})", ct.ty(), ct);
+ }
+ }
+
+ // Only cache consts that do not refer to an enclosing
+ // binder (which would change depending on context).
+ if !ct.has_escaping_bound_vars() {
+ self.consts.insert(ct, start);
+ }
+ Ok(self)
+ }
+
+ fn path_crate(self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ self.push("C");
+ let stable_crate_id = self.tcx.def_path_hash(cnum.as_def_id()).stable_crate_id();
+ self.push_disambiguator(stable_crate_id.to_u64());
+ let name = self.tcx.crate_name(cnum);
+ self.push_ident(name.as_str());
+ Ok(self)
+ }
+
+ fn path_qualified(
+ mut self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ assert!(trait_ref.is_some());
+ let trait_ref = trait_ref.unwrap();
+
+ self.push("Y");
+ self = self_ty.print(self)?;
+ self.print_def_path(trait_ref.def_id, trait_ref.substs)
+ }
+
+ fn path_append_impl(
+ self,
+ _: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _: &DisambiguatedDefPathData,
+ _: Ty<'tcx>,
+ _: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ // Inlined into `print_impl_path`
+ unreachable!()
+ }
+
+ fn path_append(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error> {
+ let ns = match disambiguated_data.data {
+ // Extern block segments can be skipped, names from extern blocks
+ // are effectively living in their parent modules.
+ DefPathData::ForeignMod => return print_prefix(self),
+
+ // Uppercase categories are more stable than lowercase ones.
+ DefPathData::TypeNs(_) => 't',
+ DefPathData::ValueNs(_) => 'v',
+ DefPathData::ClosureExpr => 'C',
+ DefPathData::Ctor => 'c',
+ DefPathData::AnonConst => 'k',
+ DefPathData::ImplTrait => 'i',
+
+ // These should never show up as `path_append` arguments.
+ DefPathData::CrateRoot
+ | DefPathData::Use
+ | DefPathData::GlobalAsm
+ | DefPathData::Impl
+ | DefPathData::MacroNs(_)
+ | DefPathData::LifetimeNs(_) => {
+ bug!("symbol_names: unexpected DefPathData: {:?}", disambiguated_data.data)
+ }
+ };
+
+ let name = disambiguated_data.data.get_opt_name();
+
+ self.path_append_ns(
+ print_prefix,
+ ns,
+ disambiguated_data.disambiguator as u64,
+ name.unwrap_or(kw::Empty).as_str(),
+ )
+ }
+
+ fn path_generic_args(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ // Don't print any regions if they're all erased.
+ let print_regions = args.iter().any(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(r) => !r.is_erased(),
+ _ => false,
+ });
+ let args = args.iter().cloned().filter(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(_) => print_regions,
+ _ => true,
+ });
+
+ if args.clone().next().is_none() {
+ return print_prefix(self);
+ }
+
+ self.push("I");
+ self = print_prefix(self)?;
+ for arg in args {
+ match arg.unpack() {
+ GenericArgKind::Lifetime(lt) => {
+ self = lt.print(self)?;
+ }
+ GenericArgKind::Type(ty) => {
+ self = ty.print(self)?;
+ }
+ GenericArgKind::Const(c) => {
+ self.push("K");
+ self = c.print(self)?;
+ }
+ }
+ }
+ self.push("E");
+
+ Ok(self)
+ }
+}
diff --git a/compiler/rustc_target/Cargo.toml b/compiler/rustc_target/Cargo.toml
new file mode 100644
index 000000000..162376af4
--- /dev/null
+++ b/compiler/rustc_target/Cargo.toml
@@ -0,0 +1,14 @@
+[package]
+name = "rustc_target"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+bitflags = "1.2.1"
+tracing = "0.1"
+serde_json = "1.0.59"
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_span = { path = "../rustc_span" }
+rustc_index = { path = "../rustc_index" }
diff --git a/compiler/rustc_target/README.md b/compiler/rustc_target/README.md
new file mode 100644
index 000000000..ca72a89da
--- /dev/null
+++ b/compiler/rustc_target/README.md
@@ -0,0 +1,6 @@
+`rustc_target` contains some very low-level details that are
+specific to different compilation targets and so forth.
+
+For more information about how rustc works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/
diff --git a/compiler/rustc_target/src/abi/call/aarch64.rs b/compiler/rustc_target/src/abi/call/aarch64.rs
new file mode 100644
index 000000000..4613a459c
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/aarch64.rs
@@ -0,0 +1,86 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ let size = arg.layout.size;
+
+ // Ensure we have at most four uniquely addressable members.
+ if size > unit.size.checked_mul(4, cx).unwrap() {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => size.bits() == 64 || size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ return;
+ }
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
+ ret.cast_to(uniform);
+ return;
+ }
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ ret.cast_to(Uniform { unit: Reg::i64(), total: size });
+ return;
+ }
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(32);
+ return;
+ }
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
+ arg.cast_to(uniform);
+ return;
+ }
+ let size = arg.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ arg.cast_to(Uniform { unit: Reg::i64(), total: size });
+ return;
+ }
+ arg.make_indirect();
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/amdgpu.rs b/compiler/rustc_target/src/abi/call/amdgpu.rs
new file mode 100644
index 000000000..9be97476c
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/amdgpu.rs
@@ -0,0 +1,35 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<'a, Ty, C>(_cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ ret.extend_integer_width_to(32);
+}
+
+fn classify_arg<'a, Ty, C>(_cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.extend_integer_width_to(32);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/arm.rs b/compiler/rustc_target/src/abi/call/arm.rs
new file mode 100644
index 000000000..e66c2132b
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/arm.rs
@@ -0,0 +1,97 @@
+use crate::abi::call::{ArgAbi, Conv, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ let size = arg.layout.size;
+
+ // Ensure we have at most four uniquely addressable members.
+ if size > unit.size.checked_mul(4, cx).unwrap() {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => size.bits() == 64 || size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, vfp: bool)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ return;
+ }
+
+ if vfp {
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret) {
+ ret.cast_to(uniform);
+ return;
+ }
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 32 {
+ ret.cast_to(Uniform { unit: Reg::i32(), total: size });
+ return;
+ }
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, vfp: bool)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(32);
+ return;
+ }
+
+ if vfp {
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg) {
+ arg.cast_to(uniform);
+ return;
+ }
+ }
+
+ let align = arg.layout.align.abi.bytes();
+ let total = arg.layout.size;
+ arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ // If this is a target with a hard-float ABI, and the function is not explicitly
+ // `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
+ let vfp = cx.target_spec().llvm_target.ends_with("hf")
+ && fn_abi.conv != Conv::ArmAapcs
+ && !fn_abi.c_variadic;
+
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, vfp);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, vfp);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/avr.rs b/compiler/rustc_target/src/abi/call/avr.rs
new file mode 100644
index 000000000..c1f7a1e3a
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/avr.rs
@@ -0,0 +1,59 @@
+//! LLVM-frontend specific AVR calling convention implementation.
+//!
+//! # Current calling convention ABI
+//!
+//! Inherited from Clang's `clang::DefaultABIInfo` implementation - self described
+//! as
+//!
+//! > the default implementation for ABI specific details. This implementation
+//! > provides information which results in
+//! > self-consistent and sensible LLVM IR generation, but does not
+//! > conform to any particular ABI.
+//! >
+//! > - Doxygen Doxumentation of `clang::DefaultABIInfo`
+//!
+//! This calling convention may not match AVR-GCC in all cases.
+//!
+//! In the future, an AVR-GCC compatible argument classification ABI should be
+//! adopted in both Rust and Clang.
+//!
+//! *NOTE*: Currently, this module implements the same calling convention
+//! that clang with AVR currently does - the default, simple, unspecialized
+//! ABI implementation available to all targets. This ABI is not
+//! binary-compatible with AVR-GCC. Once LLVM [PR46140](https://bugs.llvm.org/show_bug.cgi?id=46140)
+//! is completed, this module should be updated to match so that both Clang
+//! and Rust emit code to the same AVR-GCC compatible ABI.
+//!
+//! In particular, both Clang and Rust may not have the same semantics
+//! when promoting arguments to indirect references as AVR-GCC. It is important
+//! to note that the core AVR ABI implementation within LLVM itself is ABI
+//! compatible with AVR-GCC - Rust and AVR-GCC only differ in the small amount
+//! of compiler frontend specific calling convention logic implemented here.
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret_ty<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect();
+ }
+}
+
+pub fn compute_abi_info<Ty>(fty: &mut FnAbi<'_, Ty>) {
+ if !fty.ret.is_ignore() {
+ classify_ret_ty(&mut fty.ret);
+ }
+
+ for arg in &mut fty.args {
+ if arg.is_ignore() {
+ continue;
+ }
+
+ classify_arg_ty(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/bpf.rs b/compiler/rustc_target/src/abi/call/bpf.rs
new file mode 100644
index 000000000..466c52553
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/bpf.rs
@@ -0,0 +1,31 @@
+// see https://github.com/llvm/llvm-project/blob/main/llvm/lib/Target/BPF/BPFCallingConv.td
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() || ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() || arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/hexagon.rs b/compiler/rustc_target/src/abi/call/hexagon.rs
new file mode 100644
index 000000000..8028443b8
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/hexagon.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/m68k.rs b/compiler/rustc_target/src/abi/call/m68k.rs
new file mode 100644
index 000000000..58fdc00b6
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/m68k.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect_byval();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mips.rs b/compiler/rustc_target/src/abi/call/mips.rs
new file mode 100644
index 000000000..cc4431976
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mips.rs
@@ -0,0 +1,51 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
+use crate::abi::{HasDataLayout, Size};
+
+fn classify_ret<Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ } else {
+ ret.make_indirect();
+ *offset += cx.data_layout().pointer_size;
+ }
+}
+
+fn classify_arg<Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+
+ if arg.layout.is_aggregate() {
+ arg.cast_to(Uniform { unit: Reg::i32(), total: size });
+ if !offset.is_aligned(align) {
+ arg.pad_with(Reg::i32());
+ }
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+
+ *offset = offset.align_to(align) + size.align_to(align);
+}
+
+pub fn compute_abi_info<Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
+where
+ C: HasDataLayout,
+{
+ let mut offset = Size::ZERO;
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, &mut offset);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, &mut offset);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mips64.rs b/compiler/rustc_target/src/abi/call/mips64.rs
new file mode 100644
index 000000000..cd54167aa
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mips64.rs
@@ -0,0 +1,167 @@
+use crate::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode, Reg, Uniform,
+};
+use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
+
+fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
+ // Always sign extend u32 values on 64-bit mips
+ if let abi::Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, signed) = scalar.primitive() {
+ if !signed && i.size().bits() == 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(bits);
+}
+
+fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ match ret.layout.field(cx, i).abi {
+ abi::Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::F32 => Some(Reg::f32()),
+ abi::F64 => Some(Reg::f64()),
+ _ => None,
+ },
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ extend_integer_width_mips(ret, 64);
+ return;
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
+ // use of float registers to structures (not unions) containing exactly one or two
+ // float fields.
+
+ if let abi::FieldsShape::Arbitrary { .. } = ret.layout.fields {
+ if ret.layout.fields.count() == 1 {
+ if let Some(reg) = float_reg(cx, ret, 0) {
+ ret.cast_to(reg);
+ return;
+ }
+ } else if ret.layout.fields.count() == 2 {
+ if let Some(reg0) = float_reg(cx, ret, 0) {
+ if let Some(reg1) = float_reg(cx, ret, 1) {
+ ret.cast_to(CastTarget::pair(reg0, reg1));
+ return;
+ }
+ }
+ }
+ }
+
+ // Cast to a uniform int structure
+ ret.cast_to(Uniform { unit: Reg::i64(), total: size });
+ } else {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ extend_integer_width_mips(arg, 64);
+ return;
+ }
+
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let mut prefix = [None; 8];
+ let mut prefix_index = 0;
+
+ match arg.layout.fields {
+ abi::FieldsShape::Primitive => unreachable!(),
+ abi::FieldsShape::Array { .. } => {
+ // Arrays are passed indirectly
+ arg.make_indirect();
+ return;
+ }
+ abi::FieldsShape::Union(_) => {
+ // Unions and are always treated as a series of 64-bit integer chunks
+ }
+ abi::FieldsShape::Arbitrary { .. } => {
+ // Structures are split up into a series of 64-bit integer chunks, but any aligned
+ // doubles not part of another aggregate are passed as floats.
+ let mut last_offset = Size::ZERO;
+
+ for i in 0..arg.layout.fields.count() {
+ let field = arg.layout.field(cx, i);
+ let offset = arg.layout.fields.offset(i);
+
+ // We only care about aligned doubles
+ if let abi::Abi::Scalar(scalar) = field.abi {
+ if let abi::F64 = scalar.primitive() {
+ if offset.is_aligned(dl.f64_align.abi) {
+ // Insert enough integers to cover [last_offset, offset)
+ assert!(last_offset.is_aligned(dl.f64_align.abi));
+ for _ in 0..((offset - last_offset).bits() / 64)
+ .min((prefix.len() - prefix_index) as u64)
+ {
+ prefix[prefix_index] = Some(Reg::i64());
+ prefix_index += 1;
+ }
+
+ if prefix_index == prefix.len() {
+ break;
+ }
+
+ prefix[prefix_index] = Some(Reg::f64());
+ prefix_index += 1;
+ last_offset = offset + Reg::f64().size;
+ }
+ }
+ }
+ }
+ }
+ };
+
+ // Extract first 8 chunks as the prefix
+ let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
+ arg.cast_to(CastTarget {
+ prefix,
+ rest: Uniform { unit: Reg::i64(), total: rest_size },
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/mod.rs b/compiler/rustc_target/src/abi/call/mod.rs
new file mode 100644
index 000000000..577126a95
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/mod.rs
@@ -0,0 +1,734 @@
+use crate::abi::{self, Abi, Align, FieldsShape, Size};
+use crate::abi::{HasDataLayout, TyAbiInterface, TyAndLayout};
+use crate::spec::{self, HasTargetSpec};
+use rustc_span::Symbol;
+use std::fmt;
+
+mod aarch64;
+mod amdgpu;
+mod arm;
+mod avr;
+mod bpf;
+mod hexagon;
+mod m68k;
+mod mips;
+mod mips64;
+mod msp430;
+mod nvptx;
+mod nvptx64;
+mod powerpc;
+mod powerpc64;
+mod riscv;
+mod s390x;
+mod sparc;
+mod sparc64;
+mod wasm;
+mod x86;
+mod x86_64;
+mod x86_win64;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum PassMode {
+ /// Ignore the argument.
+ ///
+ /// The argument is either uninhabited or a ZST.
+ Ignore,
+ /// Pass the argument directly.
+ ///
+ /// The argument has a layout abi of `Scalar`, `Vector` or in rare cases `Aggregate`.
+ Direct(ArgAttributes),
+ /// Pass a pair's elements directly in two arguments.
+ ///
+ /// The argument has a layout abi of `ScalarPair`.
+ Pair(ArgAttributes, ArgAttributes),
+ /// Pass the argument after casting it, to either
+ /// a single uniform or a pair of registers.
+ Cast(CastTarget),
+ /// Pass the argument indirectly via a hidden pointer.
+ /// The `extra_attrs` value, if any, is for the extra data (vtable or length)
+ /// which indicates that it refers to an unsized rvalue.
+ /// `on_stack` defines that the the value should be passed at a fixed
+ /// stack offset in accordance to the ABI rather than passed using a
+ /// pointer. This corresponds to the `byval` LLVM argument attribute.
+ Indirect { attrs: ArgAttributes, extra_attrs: Option<ArgAttributes>, on_stack: bool },
+}
+
+// Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
+// of this module
+pub use attr_impl::ArgAttribute;
+
+#[allow(non_upper_case_globals)]
+#[allow(unused)]
+mod attr_impl {
+ // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
+ bitflags::bitflags! {
+ #[derive(Default, HashStable_Generic)]
+ pub struct ArgAttribute: u16 {
+ const NoAlias = 1 << 1;
+ const NoCapture = 1 << 2;
+ const NonNull = 1 << 3;
+ const ReadOnly = 1 << 4;
+ const InReg = 1 << 5;
+ // Due to past miscompiles in LLVM, we use a separate attribute for
+ // &mut arguments, so that the codegen backend can decide whether
+ // or not to actually emit the attribute. It can also be controlled
+ // with the `-Zmutable-noalias` debugging option.
+ const NoAliasMutRef = 1 << 6;
+ const NoUndef = 1 << 7;
+ }
+ }
+}
+
+/// Sometimes an ABI requires small integers to be extended to a full or partial register. This enum
+/// defines if this extension should be zero-extension or sign-extension when necessary. When it is
+/// not necessary to extend the argument, this enum is ignored.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum ArgExtension {
+ None,
+ Zext,
+ Sext,
+}
+
+/// A compact representation of LLVM attributes (at least those relevant for this module)
+/// that can be manipulated without interacting with LLVM's Attribute machinery.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct ArgAttributes {
+ pub regular: ArgAttribute,
+ pub arg_ext: ArgExtension,
+ /// The minimum size of the pointee, guaranteed to be valid for the duration of the whole call
+ /// (corresponding to LLVM's dereferenceable and dereferenceable_or_null attributes).
+ pub pointee_size: Size,
+ pub pointee_align: Option<Align>,
+}
+
+impl ArgAttributes {
+ pub fn new() -> Self {
+ ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ }
+ }
+
+ pub fn ext(&mut self, ext: ArgExtension) -> &mut Self {
+ assert!(
+ self.arg_ext == ArgExtension::None || self.arg_ext == ext,
+ "cannot set {:?} when {:?} is already set",
+ ext,
+ self.arg_ext
+ );
+ self.arg_ext = ext;
+ self
+ }
+
+ pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
+ self.regular |= attr;
+ self
+ }
+
+ pub fn contains(&self, attr: ArgAttribute) -> bool {
+ self.regular.contains(attr)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum RegKind {
+ Integer,
+ Float,
+ Vector,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct Reg {
+ pub kind: RegKind,
+ pub size: Size,
+}
+
+macro_rules! reg_ctor {
+ ($name:ident, $kind:ident, $bits:expr) => {
+ pub fn $name() -> Reg {
+ Reg { kind: RegKind::$kind, size: Size::from_bits($bits) }
+ }
+ };
+}
+
+impl Reg {
+ reg_ctor!(i8, Integer, 8);
+ reg_ctor!(i16, Integer, 16);
+ reg_ctor!(i32, Integer, 32);
+ reg_ctor!(i64, Integer, 64);
+ reg_ctor!(i128, Integer, 128);
+
+ reg_ctor!(f32, Float, 32);
+ reg_ctor!(f64, Float, 64);
+}
+
+impl Reg {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ let dl = cx.data_layout();
+ match self.kind {
+ RegKind::Integer => match self.size.bits() {
+ 1 => dl.i1_align.abi,
+ 2..=8 => dl.i8_align.abi,
+ 9..=16 => dl.i16_align.abi,
+ 17..=32 => dl.i32_align.abi,
+ 33..=64 => dl.i64_align.abi,
+ 65..=128 => dl.i128_align.abi,
+ _ => panic!("unsupported integer: {:?}", self),
+ },
+ RegKind::Float => match self.size.bits() {
+ 32 => dl.f32_align.abi,
+ 64 => dl.f64_align.abi,
+ _ => panic!("unsupported float: {:?}", self),
+ },
+ RegKind::Vector => dl.vector_align(self.size).abi,
+ }
+ }
+}
+
+/// An argument passed entirely registers with the
+/// same kind (e.g., HFA / HVA on PPC64 and AArch64).
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct Uniform {
+ pub unit: Reg,
+
+ /// The total size of the argument, which can be:
+ /// * equal to `unit.size` (one scalar/vector),
+ /// * a multiple of `unit.size` (an array of scalar/vectors),
+ /// * if `unit.kind` is `Integer`, the last element
+ /// can be shorter, i.e., `{ i64, i64, i32 }` for
+ /// 64-bit integers with a total size of 20 bytes.
+ pub total: Size,
+}
+
+impl From<Reg> for Uniform {
+ fn from(unit: Reg) -> Uniform {
+ Uniform { unit, total: unit.size }
+ }
+}
+
+impl Uniform {
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ self.unit.align(cx)
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct CastTarget {
+ pub prefix: [Option<Reg>; 8],
+ pub rest: Uniform,
+ pub attrs: ArgAttributes,
+}
+
+impl From<Reg> for CastTarget {
+ fn from(unit: Reg) -> CastTarget {
+ CastTarget::from(Uniform::from(unit))
+ }
+}
+
+impl From<Uniform> for CastTarget {
+ fn from(uniform: Uniform) -> CastTarget {
+ CastTarget {
+ prefix: [None; 8],
+ rest: uniform,
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ }
+ }
+}
+
+impl CastTarget {
+ pub fn pair(a: Reg, b: Reg) -> CastTarget {
+ CastTarget {
+ prefix: [Some(a), None, None, None, None, None, None, None],
+ rest: Uniform::from(b),
+ attrs: ArgAttributes {
+ regular: ArgAttribute::default(),
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ }
+ }
+
+ pub fn size<C: HasDataLayout>(&self, _cx: &C) -> Size {
+ let mut size = self.rest.total;
+ for i in 0..self.prefix.iter().count() {
+ match self.prefix[i] {
+ Some(v) => size += Size { raw: v.size.bytes() },
+ None => {}
+ }
+ }
+ return size;
+ }
+
+ pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
+ self.prefix
+ .iter()
+ .filter_map(|x| x.map(|reg| reg.align(cx)))
+ .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| {
+ acc.max(align)
+ })
+ }
+}
+
+/// Return value from the `homogeneous_aggregate` test function.
+#[derive(Copy, Clone, Debug)]
+pub enum HomogeneousAggregate {
+ /// Yes, all the "leaf fields" of this struct are passed in the
+ /// same way (specified in the `Reg` value).
+ Homogeneous(Reg),
+
+ /// There are no leaf fields at all.
+ NoData,
+}
+
+/// Error from the `homogeneous_aggregate` test function, indicating
+/// there are distinct leaf fields passed in different ways,
+/// or this is uninhabited.
+#[derive(Copy, Clone, Debug)]
+pub struct Heterogeneous;
+
+impl HomogeneousAggregate {
+ /// If this is a homogeneous aggregate, returns the homogeneous
+ /// unit, else `None`.
+ pub fn unit(self) -> Option<Reg> {
+ match self {
+ HomogeneousAggregate::Homogeneous(reg) => Some(reg),
+ HomogeneousAggregate::NoData => None,
+ }
+ }
+
+ /// Try to combine two `HomogeneousAggregate`s, e.g. from two fields in
+ /// the same `struct`. Only succeeds if only one of them has any data,
+ /// or both units are identical.
+ fn merge(self, other: HomogeneousAggregate) -> Result<HomogeneousAggregate, Heterogeneous> {
+ match (self, other) {
+ (x, HomogeneousAggregate::NoData) | (HomogeneousAggregate::NoData, x) => Ok(x),
+
+ (HomogeneousAggregate::Homogeneous(a), HomogeneousAggregate::Homogeneous(b)) => {
+ if a != b {
+ return Err(Heterogeneous);
+ }
+ Ok(self)
+ }
+ }
+ }
+}
+
+impl<'a, Ty> TyAndLayout<'a, Ty> {
+ fn is_aggregate(&self) -> bool {
+ match self.abi {
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
+ }
+ }
+
+ /// Returns `Homogeneous` if this layout is an aggregate containing fields of
+ /// only a single type (e.g., `(u32, u32)`). Such aggregates are often
+ /// special-cased in ABIs.
+ ///
+ /// Note: We generally ignore fields of zero-sized type when computing
+ /// this value (see #56877).
+ ///
+ /// This is public so that it can be used in unit tests, but
+ /// should generally only be relevant to the ABI details of
+ /// specific targets.
+ pub fn homogeneous_aggregate<C>(&self, cx: &C) -> Result<HomogeneousAggregate, Heterogeneous>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ {
+ match self.abi {
+ Abi::Uninhabited => Err(Heterogeneous),
+
+ // The primitive for this algorithm.
+ Abi::Scalar(scalar) => {
+ let kind = match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => RegKind::Integer,
+ abi::F32 | abi::F64 => RegKind::Float,
+ };
+ Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
+ }
+
+ Abi::Vector { .. } => {
+ assert!(!self.is_zst());
+ Ok(HomogeneousAggregate::Homogeneous(Reg {
+ kind: RegKind::Vector,
+ size: self.size,
+ }))
+ }
+
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+ // Helper for computing `homogeneous_aggregate`, allowing a custom
+ // starting offset (used below for handling variants).
+ let from_fields_at =
+ |layout: Self,
+ start: Size|
+ -> Result<(HomogeneousAggregate, Size), Heterogeneous> {
+ let is_union = match layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Array { count, .. } => {
+ assert_eq!(start, Size::ZERO);
+
+ let result = if count > 0 {
+ layout.field(cx, 0).homogeneous_aggregate(cx)?
+ } else {
+ HomogeneousAggregate::NoData
+ };
+ return Ok((result, layout.size));
+ }
+ FieldsShape::Union(_) => true,
+ FieldsShape::Arbitrary { .. } => false,
+ };
+
+ let mut result = HomogeneousAggregate::NoData;
+ let mut total = start;
+
+ for i in 0..layout.fields.count() {
+ if !is_union && total != layout.fields.offset(i) {
+ return Err(Heterogeneous);
+ }
+
+ let field = layout.field(cx, i);
+
+ result = result.merge(field.homogeneous_aggregate(cx)?)?;
+
+ // Keep track of the offset (without padding).
+ let size = field.size;
+ if is_union {
+ total = total.max(size);
+ } else {
+ total += size;
+ }
+ }
+
+ Ok((result, total))
+ };
+
+ let (mut result, mut total) = from_fields_at(*self, Size::ZERO)?;
+
+ match &self.variants {
+ abi::Variants::Single { .. } => {}
+ abi::Variants::Multiple { variants, .. } => {
+ // Treat enum variants like union members.
+ // HACK(eddyb) pretend the `enum` field (discriminant)
+ // is at the start of every variant (otherwise the gap
+ // at the start of all variants would disqualify them).
+ //
+ // NB: for all tagged `enum`s (which include all non-C-like
+ // `enum`s with defined FFI representation), this will
+ // match the homogeneous computation on the equivalent
+ // `struct { tag; union { variant1; ... } }` and/or
+ // `union { struct { tag; variant1; } ... }`
+ // (the offsets of variant fields should be identical
+ // between the two for either to be a homogeneous aggregate).
+ let variant_start = total;
+ for variant_idx in variants.indices() {
+ let (variant_result, variant_total) =
+ from_fields_at(self.for_variant(cx, variant_idx), variant_start)?;
+
+ result = result.merge(variant_result)?;
+ total = total.max(variant_total);
+ }
+ }
+ }
+
+ // There needs to be no padding.
+ if total != self.size {
+ Err(Heterogeneous)
+ } else {
+ match result {
+ HomogeneousAggregate::Homogeneous(_) => {
+ assert_ne!(total, Size::ZERO);
+ }
+ HomogeneousAggregate::NoData => {
+ assert_eq!(total, Size::ZERO);
+ }
+ }
+ Ok(result)
+ }
+ }
+ }
+ }
+}
+
+/// Information about how to pass an argument to,
+/// or return a value from, a function, under some ABI.
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct ArgAbi<'a, Ty> {
+ pub layout: TyAndLayout<'a, Ty>,
+
+ /// Dummy argument, which is emitted before the real argument.
+ pub pad: Option<Reg>,
+
+ pub mode: PassMode,
+}
+
+impl<'a, Ty> ArgAbi<'a, Ty> {
+ pub fn new(
+ cx: &impl HasDataLayout,
+ layout: TyAndLayout<'a, Ty>,
+ scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
+ ) -> Self {
+ let mode = match layout.abi {
+ Abi::Uninhabited => PassMode::Ignore,
+ Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
+ Abi::ScalarPair(a, b) => PassMode::Pair(
+ scalar_attrs(&layout, a, Size::ZERO),
+ scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
+ ),
+ Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
+ Abi::Aggregate { .. } => PassMode::Direct(ArgAttributes::new()),
+ };
+ ArgAbi { layout, pad: None, mode }
+ }
+
+ fn indirect_pass_mode(layout: &TyAndLayout<'a, Ty>) -> PassMode {
+ let mut attrs = ArgAttributes::new();
+
+ // For non-immediate arguments the callee gets its own copy of
+ // the value on the stack, so there are no aliases. It's also
+ // program-invisible so can't possibly capture
+ attrs
+ .set(ArgAttribute::NoAlias)
+ .set(ArgAttribute::NoCapture)
+ .set(ArgAttribute::NonNull)
+ .set(ArgAttribute::NoUndef);
+ attrs.pointee_size = layout.size;
+ // FIXME(eddyb) We should be doing this, but at least on
+ // i686-pc-windows-msvc, it results in wrong stack offsets.
+ // attrs.pointee_align = Some(layout.align.abi);
+
+ let extra_attrs = layout.is_unsized().then_some(ArgAttributes::new());
+
+ PassMode::Indirect { attrs, extra_attrs, on_stack: false }
+ }
+
+ pub fn make_indirect(&mut self) {
+ match self.mode {
+ PassMode::Direct(_) | PassMode::Pair(_, _) => {}
+ PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: false } => return,
+ _ => panic!("Tried to make {:?} indirect", self.mode),
+ }
+
+ self.mode = Self::indirect_pass_mode(&self.layout);
+ }
+
+ pub fn make_indirect_byval(&mut self) {
+ self.make_indirect();
+ match self.mode {
+ PassMode::Indirect { attrs: _, extra_attrs: _, ref mut on_stack } => {
+ *on_stack = true;
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ pub fn extend_integer_width_to(&mut self, bits: u64) {
+ // Only integers have signedness
+ if let Abi::Scalar(scalar) = self.layout.abi {
+ if let abi::Int(i, signed) = scalar.primitive() {
+ if i.size().bits() < bits {
+ if let PassMode::Direct(ref mut attrs) = self.mode {
+ if signed {
+ attrs.ext(ArgExtension::Sext)
+ } else {
+ attrs.ext(ArgExtension::Zext)
+ };
+ }
+ }
+ }
+ }
+ }
+
+ pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
+ self.mode = PassMode::Cast(target.into());
+ }
+
+ pub fn pad_with(&mut self, reg: Reg) {
+ self.pad = Some(reg);
+ }
+
+ pub fn is_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { .. })
+ }
+
+ pub fn is_sized_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ })
+ }
+
+ pub fn is_unsized_indirect(&self) -> bool {
+ matches!(self.mode, PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ })
+ }
+
+ pub fn is_ignore(&self) -> bool {
+ matches!(self.mode, PassMode::Ignore)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Conv {
+ // General language calling conventions, for which every target
+ // should have its own backend (e.g. LLVM) support.
+ C,
+ Rust,
+
+ /// For things unlikely to be called, where smaller caller codegen is
+ /// preferred over raw speed.
+ /// Stronger than just `#[cold]` because `fn` pointers might be incompatible.
+ RustCold,
+
+ // Target-specific calling conventions.
+ ArmAapcs,
+ CCmseNonSecureCall,
+
+ Msp430Intr,
+
+ PtxKernel,
+
+ X86Fastcall,
+ X86Intr,
+ X86Stdcall,
+ X86ThisCall,
+ X86VectorCall,
+
+ X86_64SysV,
+ X86_64Win64,
+
+ AmdGpuKernel,
+ AvrInterrupt,
+ AvrNonBlockingInterrupt,
+}
+
+/// Metadata describing how the arguments to a native function
+/// should be passed in order to respect the native ABI.
+///
+/// I will do my best to describe this structure, but these
+/// comments are reverse-engineered and may be inaccurate. -NDM
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct FnAbi<'a, Ty> {
+ /// The LLVM types of each argument.
+ pub args: Vec<ArgAbi<'a, Ty>>,
+
+ /// LLVM return type.
+ pub ret: ArgAbi<'a, Ty>,
+
+ pub c_variadic: bool,
+
+ /// The count of non-variadic arguments.
+ ///
+ /// Should only be different from args.len() when c_variadic is true.
+ /// This can be used to know whether an argument is variadic or not.
+ pub fixed_count: usize,
+
+ pub conv: Conv,
+
+ pub can_unwind: bool,
+}
+
+/// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
+#[derive(Copy, Clone, Debug, HashStable_Generic)]
+pub enum AdjustForForeignAbiError {
+ /// Target architecture doesn't support "foreign" (i.e. non-Rust) ABIs.
+ Unsupported { arch: Symbol, abi: spec::abi::Abi },
+}
+
+impl fmt::Display for AdjustForForeignAbiError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Unsupported { arch, abi } => {
+ write!(f, "target architecture {:?} does not support `extern {}` ABI", arch, abi)
+ }
+ }
+ }
+}
+
+impl<'a, Ty> FnAbi<'a, Ty> {
+ pub fn adjust_for_foreign_abi<C>(
+ &mut self,
+ cx: &C,
+ abi: spec::abi::Abi,
+ ) -> Result<(), AdjustForForeignAbiError>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+ {
+ if abi == spec::abi::Abi::X86Interrupt {
+ if let Some(arg) = self.args.first_mut() {
+ arg.make_indirect_byval();
+ }
+ return Ok(());
+ }
+
+ match &cx.target_spec().arch[..] {
+ "x86" => {
+ let flavor = if let spec::abi::Abi::Fastcall { .. }
+ | spec::abi::Abi::Vectorcall { .. } = abi
+ {
+ x86::Flavor::FastcallOrVectorcall
+ } else {
+ x86::Flavor::General
+ };
+ x86::compute_abi_info(cx, self, flavor);
+ }
+ "x86_64" => match abi {
+ spec::abi::Abi::SysV64 { .. } => x86_64::compute_abi_info(cx, self),
+ spec::abi::Abi::Win64 { .. } => x86_win64::compute_abi_info(self),
+ _ => {
+ if cx.target_spec().is_like_windows {
+ x86_win64::compute_abi_info(self)
+ } else {
+ x86_64::compute_abi_info(cx, self)
+ }
+ }
+ },
+ "aarch64" => aarch64::compute_abi_info(cx, self),
+ "amdgpu" => amdgpu::compute_abi_info(cx, self),
+ "arm" => arm::compute_abi_info(cx, self),
+ "avr" => avr::compute_abi_info(self),
+ "m68k" => m68k::compute_abi_info(self),
+ "mips" => mips::compute_abi_info(cx, self),
+ "mips64" => mips64::compute_abi_info(cx, self),
+ "powerpc" => powerpc::compute_abi_info(self),
+ "powerpc64" => powerpc64::compute_abi_info(cx, self),
+ "s390x" => s390x::compute_abi_info(cx, self),
+ "msp430" => msp430::compute_abi_info(self),
+ "sparc" => sparc::compute_abi_info(cx, self),
+ "sparc64" => sparc64::compute_abi_info(cx, self),
+ "nvptx" => nvptx::compute_abi_info(self),
+ "nvptx64" => {
+ if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::PtxKernel {
+ nvptx64::compute_ptx_kernel_abi_info(cx, self)
+ } else {
+ nvptx64::compute_abi_info(self)
+ }
+ }
+ "hexagon" => hexagon::compute_abi_info(self),
+ "riscv32" | "riscv64" => riscv::compute_abi_info(cx, self),
+ "wasm32" | "wasm64" => {
+ if cx.target_spec().adjust_abi(abi) == spec::abi::Abi::Wasm {
+ wasm::compute_wasm_abi_info(self)
+ } else {
+ wasm::compute_c_abi_info(cx, self)
+ }
+ }
+ "asmjs" => wasm::compute_c_abi_info(cx, self),
+ "bpf" => bpf::compute_abi_info(self),
+ arch => {
+ return Err(AdjustForForeignAbiError::Unsupported {
+ arch: Symbol::intern(arch),
+ abi,
+ });
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/msp430.rs b/compiler/rustc_target/src/abi/call/msp430.rs
new file mode 100644
index 000000000..0ba73657b
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/msp430.rs
@@ -0,0 +1,39 @@
+// Reference: MSP430 Embedded Application Binary Interface
+// https://www.ti.com/lit/an/slaa534a/slaa534a.pdf
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+// 3.5 Structures or Unions Passed and Returned by Reference
+//
+// "Structures (including classes) and unions larger than 32 bits are passed and
+// returned by reference. To pass a structure or union by reference, the caller
+// places its address in the appropriate location: either in a register or on
+// the stack, according to its position in the argument list. (..)"
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(16);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(16);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/nvptx.rs b/compiler/rustc_target/src/abi/call/nvptx.rs
new file mode 100644
index 000000000..428dd95bb
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/nvptx.rs
@@ -0,0 +1,33 @@
+// Reference: PTX Writer's Guide to Interoperability
+// https://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
+
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/nvptx64.rs b/compiler/rustc_target/src/abi/call/nvptx64.rs
new file mode 100644
index 000000000..fc16f1c97
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/nvptx64.rs
@@ -0,0 +1,64 @@
+use crate::abi::call::{ArgAbi, FnAbi, PassMode, Reg, Size, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
+ arg.make_indirect();
+ }
+}
+
+fn classify_arg_kernel<'a, Ty, C>(_cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if matches!(arg.mode, PassMode::Pair(..)) && (arg.layout.is_adt() || arg.layout.is_tuple()) {
+ let align_bytes = arg.layout.align.abi.bytes();
+
+ let unit = match align_bytes {
+ 1 => Reg::i8(),
+ 2 => Reg::i16(),
+ 4 => Reg::i32(),
+ 8 => Reg::i64(),
+ 16 => Reg::i128(),
+ _ => unreachable!("Align is given as power of 2 no larger than 16 bytes"),
+ };
+ arg.cast_to(Uniform { unit, total: Size::from_bytes(2 * align_bytes) });
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
+
+pub fn compute_ptx_kernel_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.layout.is_unit() && !fn_abi.ret.layout.is_never() {
+ panic!("Kernels should not return anything other than () or !");
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg_kernel(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/powerpc.rs b/compiler/rustc_target/src/abi/call/powerpc.rs
new file mode 100644
index 000000000..27a5c6d2f
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/powerpc.rs
@@ -0,0 +1,30 @@
+use crate::abi::call::{ArgAbi, FnAbi};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if ret.layout.is_aggregate() {
+ ret.make_indirect();
+ } else {
+ ret.extend_integer_width_to(32);
+ }
+}
+
+fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ if arg.layout.is_aggregate() {
+ arg.make_indirect();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+}
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/powerpc64.rs b/compiler/rustc_target/src/abi/call/powerpc64.rs
new file mode 100644
index 000000000..c22ef9c8f
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/powerpc64.rs
@@ -0,0 +1,141 @@
+// FIXME:
+// Alignment of 128 bit types is not currently handled, this will
+// need to be fixed when PowerPC vector support is added.
+
+use crate::abi::call::{ArgAbi, FnAbi, Reg, RegKind, Uniform};
+use crate::abi::{Endian, HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+#[derive(Debug, Clone, Copy, PartialEq)]
+enum ABI {
+ ELFv1, // original ABI used for powerpc64 (big-endian)
+ ELFv2, // newer ABI used for powerpc64le and musl (both endians)
+}
+use ABI::*;
+
+fn is_homogeneous_aggregate<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ abi: ABI,
+) -> Option<Uniform>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()).and_then(|unit| {
+ // ELFv1 only passes one-member aggregates transparently.
+ // ELFv2 passes up to eight uniquely addressable members.
+ if (abi == ELFv1 && arg.layout.size > unit.size)
+ || arg.layout.size > unit.size.checked_mul(8, cx).unwrap()
+ {
+ return None;
+ }
+
+ let valid_unit = match unit.kind {
+ RegKind::Integer => false,
+ RegKind::Float => true,
+ RegKind::Vector => arg.layout.size.bits() == 128,
+ };
+
+ valid_unit.then_some(Uniform { unit, total: arg.layout.size })
+ })
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, abi: ABI)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(64);
+ return;
+ }
+
+ // The ELFv1 ABI doesn't return aggregates in registers
+ if abi == ELFv1 {
+ ret.make_indirect();
+ return;
+ }
+
+ if let Some(uniform) = is_homogeneous_aggregate(cx, ret, abi) {
+ ret.cast_to(uniform);
+ return;
+ }
+
+ let size = ret.layout.size;
+ let bits = size.bits();
+ if bits <= 128 {
+ let unit = if cx.data_layout().endian == Endian::Big {
+ Reg { kind: RegKind::Integer, size }
+ } else if bits <= 8 {
+ Reg::i8()
+ } else if bits <= 16 {
+ Reg::i16()
+ } else if bits <= 32 {
+ Reg::i32()
+ } else {
+ Reg::i64()
+ };
+
+ ret.cast_to(Uniform { unit, total: size });
+ return;
+ }
+
+ ret.make_indirect();
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, abi: ABI)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ if let Some(uniform) = is_homogeneous_aggregate(cx, arg, abi) {
+ arg.cast_to(uniform);
+ return;
+ }
+
+ let size = arg.layout.size;
+ let (unit, total) = if size.bits() <= 64 {
+ // Aggregates smaller than a doubleword should appear in
+ // the least-significant bits of the parameter doubleword.
+ (Reg { kind: RegKind::Integer, size }, size)
+ } else {
+ // Aggregates larger than a doubleword should be padded
+ // at the tail to fill out a whole number of doublewords.
+ let reg_i64 = Reg::i64();
+ (reg_i64, size.align_to(reg_i64.align(cx)))
+ };
+
+ arg.cast_to(Uniform { unit, total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let abi = if cx.target_spec().env == "musl" {
+ ELFv2
+ } else {
+ match cx.data_layout().endian {
+ Endian::Big => ELFv1,
+ Endian::Little => ELFv2,
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, abi);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, abi);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/riscv.rs b/compiler/rustc_target/src/abi/call/riscv.rs
new file mode 100644
index 000000000..752b44f64
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/riscv.rs
@@ -0,0 +1,348 @@
+// Reference: RISC-V ELF psABI specification
+// https://github.com/riscv/riscv-elf-psabi-doc
+//
+// Reference: Clang RISC-V ELF psABI lowering code
+// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
+
+use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
+use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+use crate::spec::HasTargetSpec;
+
+#[derive(Copy, Clone)]
+enum RegPassKind {
+ Float(Reg),
+ Integer(Reg),
+ Unknown,
+}
+
+#[derive(Copy, Clone)]
+enum FloatConv {
+ FloatPair(Reg, Reg),
+ Float(Reg),
+ MixedPair(Reg, Reg),
+}
+
+#[derive(Copy, Clone)]
+struct CannotUseFpConv;
+
+fn is_riscv_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
+ match arg.layout.abi {
+ Abi::Vector { .. } => true,
+ _ => arg.layout.is_aggregate(),
+ }
+}
+
+fn should_use_fp_conv_helper<'a, Ty, C>(
+ cx: &C,
+ arg_layout: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ field1_kind: &mut RegPassKind,
+ field2_kind: &mut RegPassKind,
+) -> Result<(), CannotUseFpConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ match arg_layout.abi {
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => {
+ if arg_layout.size.bits() > xlen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ (RegPassKind::Float(_), RegPassKind::Unknown) => {
+ *field2_kind = RegPassKind::Integer(Reg {
+ kind: RegKind::Integer,
+ size: arg_layout.size,
+ });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ abi::F32 | abi::F64 => {
+ if arg_layout.size.bits() > flen {
+ return Err(CannotUseFpConv);
+ }
+ match (*field1_kind, *field2_kind) {
+ (RegPassKind::Unknown, _) => {
+ *field1_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ (_, RegPassKind::Unknown) => {
+ *field2_kind =
+ RegPassKind::Float(Reg { kind: RegKind::Float, size: arg_layout.size });
+ }
+ _ => return Err(CannotUseFpConv),
+ }
+ }
+ },
+ Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
+ FieldsShape::Primitive => {
+ unreachable!("aggregates can't have `FieldsShape::Primitive`")
+ }
+ FieldsShape::Union(_) => {
+ if !arg_layout.is_zst() {
+ return Err(CannotUseFpConv);
+ }
+ }
+ FieldsShape::Array { count, .. } => {
+ for _ in 0..count {
+ let elem_layout = arg_layout.field(cx, 0);
+ should_use_fp_conv_helper(
+ cx,
+ &elem_layout,
+ xlen,
+ flen,
+ field1_kind,
+ field2_kind,
+ )?;
+ }
+ }
+ FieldsShape::Arbitrary { .. } => {
+ match arg_layout.variants {
+ abi::Variants::Multiple { .. } => return Err(CannotUseFpConv),
+ abi::Variants::Single { .. } => (),
+ }
+ for i in arg_layout.fields.index_by_increasing_offset() {
+ let field = arg_layout.field(cx, i);
+ should_use_fp_conv_helper(cx, &field, xlen, flen, field1_kind, field2_kind)?;
+ }
+ }
+ },
+ }
+ Ok(())
+}
+
+fn should_use_fp_conv<'a, Ty, C>(
+ cx: &C,
+ arg: &TyAndLayout<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+) -> Option<FloatConv>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ let mut field1_kind = RegPassKind::Unknown;
+ let mut field2_kind = RegPassKind::Unknown;
+ if should_use_fp_conv_helper(cx, arg, xlen, flen, &mut field1_kind, &mut field2_kind).is_err() {
+ return None;
+ }
+ match (field1_kind, field2_kind) {
+ (RegPassKind::Integer(l), RegPassKind::Float(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Integer(r)) => Some(FloatConv::MixedPair(l, r)),
+ (RegPassKind::Float(l), RegPassKind::Float(r)) => Some(FloatConv::FloatPair(l, r)),
+ (RegPassKind::Float(f), RegPassKind::Unknown) => Some(FloatConv::Float(f)),
+ _ => None,
+ }
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, xlen: u64, flen: u64) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if let Some(conv) = should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ match conv {
+ FloatConv::Float(f) => {
+ arg.cast_to(f);
+ }
+ FloatConv::FloatPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ FloatConv::MixedPair(l, r) => {
+ arg.cast_to(CastTarget::pair(l, r));
+ }
+ }
+ return false;
+ }
+
+ let total = arg.layout.size;
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_riscv_aggregate(arg) {
+ arg.make_indirect();
+ }
+ return true;
+ }
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+ if is_riscv_aggregate(arg) {
+ if total.bits() <= xlen {
+ arg.cast_to(xlen_reg);
+ } else {
+ arg.cast_to(Uniform { unit: xlen_reg, total: Size::from_bits(xlen * 2) });
+ }
+ return false;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ extend_integer_width(arg, xlen);
+ false
+}
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &mut ArgAbi<'a, Ty>,
+ xlen: u64,
+ flen: u64,
+ is_vararg: bool,
+ avail_gprs: &mut u64,
+ avail_fprs: &mut u64,
+) where
+ Ty: TyAbiInterface<'a, C> + Copy,
+{
+ if !is_vararg {
+ match should_use_fp_conv(cx, &arg.layout, xlen, flen) {
+ Some(FloatConv::Float(f)) if *avail_fprs >= 1 => {
+ *avail_fprs -= 1;
+ arg.cast_to(f);
+ return;
+ }
+ Some(FloatConv::FloatPair(l, r)) if *avail_fprs >= 2 => {
+ *avail_fprs -= 2;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ Some(FloatConv::MixedPair(l, r)) if *avail_fprs >= 1 && *avail_gprs >= 1 => {
+ *avail_gprs -= 1;
+ *avail_fprs -= 1;
+ arg.cast_to(CastTarget::pair(l, r));
+ return;
+ }
+ _ => (),
+ }
+ }
+
+ let total = arg.layout.size;
+ let align = arg.layout.align.abi.bits();
+
+ // "Scalars wider than 2✕XLEN are passed by reference and are replaced in
+ // the argument list with the address."
+ // "Aggregates larger than 2✕XLEN bits are passed by reference and are
+ // replaced in the argument list with the address, as are C++ aggregates
+ // with nontrivial copy constructors, destructors, or vtables."
+ if total.bits() > 2 * xlen {
+ // We rely on the LLVM backend lowering code to lower passing a scalar larger than 2*XLEN.
+ if is_riscv_aggregate(arg) {
+ arg.make_indirect();
+ }
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ let double_xlen_reg = match xlen {
+ 32 => Reg::i64(),
+ 64 => Reg::i128(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ let xlen_reg = match xlen {
+ 32 => Reg::i32(),
+ 64 => Reg::i64(),
+ _ => unreachable!("Unsupported XLEN: {}", xlen),
+ };
+
+ if total.bits() > xlen {
+ let align_regs = align > xlen;
+ if is_riscv_aggregate(arg) {
+ arg.cast_to(Uniform {
+ unit: if align_regs { double_xlen_reg } else { xlen_reg },
+ total: Size::from_bits(xlen * 2),
+ });
+ }
+ if align_regs && is_vararg {
+ *avail_gprs -= *avail_gprs % 2;
+ }
+ if *avail_gprs >= 2 {
+ *avail_gprs -= 2;
+ } else {
+ *avail_gprs = 0;
+ }
+ return;
+ } else if is_riscv_aggregate(arg) {
+ arg.cast_to(xlen_reg);
+ if *avail_gprs >= 1 {
+ *avail_gprs -= 1;
+ }
+ return;
+ }
+
+ // "When passed in registers, scalars narrower than XLEN bits are widened
+ // according to the sign of their type up to 32 bits, then sign-extended to
+ // XLEN bits."
+ if *avail_gprs >= 1 {
+ extend_integer_width(arg, xlen);
+ *avail_gprs -= 1;
+ }
+}
+
+fn extend_integer_width<'a, Ty>(arg: &mut ArgAbi<'a, Ty>, xlen: u64) {
+ if let Abi::Scalar(scalar) = arg.layout.abi {
+ if let abi::Int(i, _) = scalar.primitive() {
+ // 32-bit integers are always sign-extended
+ if i.size().bits() == 32 && xlen > 32 {
+ if let PassMode::Direct(ref mut attrs) = arg.mode {
+ attrs.ext(ArgExtension::Sext);
+ return;
+ }
+ }
+ }
+ }
+
+ arg.extend_integer_width_to(xlen);
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ let flen = match &cx.target_spec().llvm_abiname[..] {
+ "ilp32f" | "lp64f" => 32,
+ "ilp32d" | "lp64d" => 64,
+ _ => 0,
+ };
+ let xlen = cx.data_layout().pointer_size.bits();
+
+ let mut avail_gprs = 8;
+ let mut avail_fprs = 8;
+
+ if !fn_abi.ret.is_ignore() && classify_ret(cx, &mut fn_abi.ret, xlen, flen) {
+ avail_gprs -= 1;
+ }
+
+ for (i, arg) in fn_abi.args.iter_mut().enumerate() {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(
+ cx,
+ arg,
+ xlen,
+ flen,
+ i >= fn_abi.fixed_count,
+ &mut avail_gprs,
+ &mut avail_fprs,
+ );
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/s390x.rs b/compiler/rustc_target/src/abi/call/s390x.rs
new file mode 100644
index 000000000..13706e8c2
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/s390x.rs
@@ -0,0 +1,57 @@
+// FIXME: The assumes we're using the non-vector ABI, i.e., compiling
+// for a pre-z13 machine or using -mno-vx.
+
+use crate::abi::call::{ArgAbi, FnAbi, Reg};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
+ ret.extend_integer_width_to(64);
+ } else {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ if arg.layout.is_single_fp_element(cx) {
+ match arg.layout.size.bytes() {
+ 4 => arg.cast_to(Reg::f32()),
+ 8 => arg.cast_to(Reg::f64()),
+ _ => arg.make_indirect(),
+ }
+ } else {
+ match arg.layout.size.bytes() {
+ 1 => arg.cast_to(Reg::i8()),
+ 2 => arg.cast_to(Reg::i16()),
+ 4 => arg.cast_to(Reg::i32()),
+ 8 => arg.cast_to(Reg::i64()),
+ _ => arg.make_indirect(),
+ }
+ }
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/sparc.rs b/compiler/rustc_target/src/abi/call/sparc.rs
new file mode 100644
index 000000000..cc4431976
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/sparc.rs
@@ -0,0 +1,51 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
+use crate::abi::{HasDataLayout, Size};
+
+fn classify_ret<Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ if !ret.layout.is_aggregate() {
+ ret.extend_integer_width_to(32);
+ } else {
+ ret.make_indirect();
+ *offset += cx.data_layout().pointer_size;
+ }
+}
+
+fn classify_arg<Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+ let size = arg.layout.size;
+ let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
+
+ if arg.layout.is_aggregate() {
+ arg.cast_to(Uniform { unit: Reg::i32(), total: size });
+ if !offset.is_aligned(align) {
+ arg.pad_with(Reg::i32());
+ }
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+
+ *offset = offset.align_to(align) + size.align_to(align);
+}
+
+pub fn compute_abi_info<Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
+where
+ C: HasDataLayout,
+{
+ let mut offset = Size::ZERO;
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret, &mut offset);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, &mut offset);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/sparc64.rs b/compiler/rustc_target/src/abi/call/sparc64.rs
new file mode 100644
index 000000000..cc3a0a699
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/sparc64.rs
@@ -0,0 +1,226 @@
+// FIXME: This needs an audit for correctness and completeness.
+
+use crate::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, Reg, Uniform,
+};
+use crate::abi::{self, HasDataLayout, Scalar, Size, TyAbiInterface, TyAndLayout};
+
+#[derive(Clone, Debug)]
+pub struct Sdata {
+ pub prefix: [Option<Reg>; 8],
+ pub prefix_index: usize,
+ pub last_offset: Size,
+ pub has_float: bool,
+ pub arg_attribute: ArgAttribute,
+}
+
+fn arg_scalar<C>(cx: &C, scalar: &Scalar, offset: Size, mut data: Sdata) -> Sdata
+where
+ C: HasDataLayout,
+{
+ let dl = cx.data_layout();
+
+ if !scalar.primitive().is_float() {
+ return data;
+ }
+
+ data.has_float = true;
+
+ if !data.last_offset.is_aligned(dl.f64_align.abi) && data.last_offset < offset {
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i32().size;
+ }
+
+ for _ in 0..((offset - data.last_offset).bits() / 64)
+ .min((data.prefix.len() - data.prefix_index) as u64)
+ {
+ data.prefix[data.prefix_index] = Some(Reg::i64());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i64().size;
+ }
+
+ if data.last_offset < offset {
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset = data.last_offset + Reg::i32().size;
+ }
+
+ if data.prefix_index == data.prefix.len() {
+ return data;
+ }
+
+ if scalar.primitive() == abi::F32 {
+ data.arg_attribute = ArgAttribute::InReg;
+ data.prefix[data.prefix_index] = Some(Reg::f32());
+ data.last_offset = offset + Reg::f32().size;
+ } else {
+ data.prefix[data.prefix_index] = Some(Reg::f64());
+ data.last_offset = offset + Reg::f64().size;
+ }
+ data.prefix_index += 1;
+ return data;
+}
+
+fn arg_scalar_pair<C>(
+ cx: &C,
+ scalar1: &Scalar,
+ scalar2: &Scalar,
+ mut offset: Size,
+ mut data: Sdata,
+) -> Sdata
+where
+ C: HasDataLayout,
+{
+ data = arg_scalar(cx, &scalar1, offset, data);
+ match (scalar1.primitive(), scalar2.primitive()) {
+ (abi::F32, _) => offset += Reg::f32().size,
+ (_, abi::F64) => offset += Reg::f64().size,
+ (abi::Int(i, _signed), _) => offset += i.size(),
+ (abi::Pointer, _) => offset += Reg::i64().size,
+ _ => {}
+ }
+
+ if (offset.raw % 4) != 0 && scalar2.primitive().is_float() {
+ offset.raw += 4 - (offset.raw % 4);
+ }
+ data = arg_scalar(cx, &scalar2, offset, data);
+ return data;
+}
+
+fn parse_structure<'a, Ty, C>(
+ cx: &C,
+ layout: TyAndLayout<'a, Ty>,
+ mut data: Sdata,
+ mut offset: Size,
+) -> Sdata
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if let abi::FieldsShape::Union(_) = layout.fields {
+ return data;
+ }
+
+ match layout.abi {
+ abi::Abi::Scalar(scalar) => {
+ data = arg_scalar(cx, &scalar, offset, data);
+ }
+ abi::Abi::Aggregate { .. } => {
+ for i in 0..layout.fields.count() {
+ if offset < layout.fields.offset(i) {
+ offset = layout.fields.offset(i);
+ }
+ data = parse_structure(cx, layout.field(cx, i), data.clone(), offset);
+ }
+ }
+ _ => {
+ if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi {
+ data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
+ }
+ }
+ }
+
+ return data;
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, in_registers_max: Size)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !arg.layout.is_aggregate() {
+ arg.extend_integer_width_to(64);
+ return;
+ }
+
+ let total = arg.layout.size;
+ if total > in_registers_max {
+ arg.make_indirect();
+ return;
+ }
+
+ match arg.layout.fields {
+ abi::FieldsShape::Primitive => unreachable!(),
+ abi::FieldsShape::Array { .. } => {
+ // Arrays are passed indirectly
+ arg.make_indirect();
+ return;
+ }
+ abi::FieldsShape::Union(_) => {
+ // Unions and are always treated as a series of 64-bit integer chunks
+ }
+ abi::FieldsShape::Arbitrary { .. } => {
+ // Structures with floating point numbers need special care.
+
+ let mut data = parse_structure(
+ cx,
+ arg.layout,
+ Sdata {
+ prefix: [None; 8],
+ prefix_index: 0,
+ last_offset: Size::ZERO,
+ has_float: false,
+ arg_attribute: ArgAttribute::default(),
+ },
+ Size { raw: 0 },
+ );
+
+ if data.has_float {
+ // Structure { float, int, int } doesn't like to be handled like
+ // { float, long int }. Other way around it doesn't mind.
+ if data.last_offset < arg.layout.size
+ && (data.last_offset.raw % 8) != 0
+ && data.prefix_index < data.prefix.len()
+ {
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ data.prefix_index += 1;
+ data.last_offset += Reg::i32().size;
+ }
+
+ let mut rest_size = arg.layout.size - data.last_offset;
+ if (rest_size.raw % 8) != 0 && data.prefix_index < data.prefix.len() {
+ data.prefix[data.prefix_index] = Some(Reg::i32());
+ rest_size = rest_size - Reg::i32().size;
+ }
+
+ arg.cast_to(CastTarget {
+ prefix: data.prefix,
+ rest: Uniform { unit: Reg::i64(), total: rest_size },
+ attrs: ArgAttributes {
+ regular: data.arg_attribute,
+ arg_ext: ArgExtension::None,
+ pointee_size: Size::ZERO,
+ pointee_align: None,
+ },
+ });
+ return;
+ }
+ }
+ }
+
+ arg.cast_to(Uniform { unit: Reg::i64(), total });
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_arg(cx, &mut fn_abi.ret, Size { raw: 32 });
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg, Size { raw: 16 });
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/wasm.rs b/compiler/rustc_target/src/abi/call/wasm.rs
new file mode 100644
index 000000000..3237cde10
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/wasm.rs
@@ -0,0 +1,83 @@
+use crate::abi::call::{ArgAbi, FnAbi, Uniform};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+
+fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgAbi<'a, Ty>) -> bool
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if val.layout.is_aggregate() {
+ if let Some(unit) = val.layout.homogeneous_aggregate(cx).ok().and_then(|ha| ha.unit()) {
+ let size = val.layout.size;
+ if unit.size == size {
+ val.cast_to(Uniform { unit, total: size });
+ return true;
+ }
+ }
+ }
+ false
+}
+
+fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ ret.extend_integer_width_to(32);
+ if ret.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, ret) {
+ ret.make_indirect();
+ }
+}
+
+fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ arg.extend_integer_width_to(32);
+ if arg.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, arg) {
+ arg.make_indirect_byval();
+ }
+}
+
+/// The purpose of this ABI is to match the C ABI (aka clang) exactly.
+pub fn compute_c_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(cx, &mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(cx, arg);
+ }
+}
+
+/// The purpose of this ABI is for matching the WebAssembly standard. This
+/// intentionally diverges from the C ABI and is specifically crafted to take
+/// advantage of LLVM's support of multiple returns in WebAssembly.
+pub fn compute_wasm_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ if !fn_abi.ret.is_ignore() {
+ classify_ret(&mut fn_abi.ret);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ classify_arg(arg);
+ }
+
+ fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
+ ret.extend_integer_width_to(32);
+ }
+
+ fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
+ arg.extend_integer_width_to(32);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86.rs b/compiler/rustc_target/src/abi/call/x86.rs
new file mode 100644
index 000000000..c7d59baf9
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86.rs
@@ -0,0 +1,117 @@
+use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
+use crate::abi::{HasDataLayout, TyAbiInterface};
+use crate::spec::HasTargetSpec;
+
+#[derive(PartialEq)]
+pub enum Flavor {
+ General,
+ FastcallOrVectorcall,
+}
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, flavor: Flavor)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout + HasTargetSpec,
+{
+ if !fn_abi.ret.is_ignore() {
+ if fn_abi.ret.layout.is_aggregate() {
+ // Returning a structure. Most often, this will use
+ // a hidden first argument. On some platforms, though,
+ // small structs are returned as integers.
+ //
+ // Some links:
+ // https://www.angelcode.com/dev/callconv/callconv.html
+ // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
+ let t = cx.target_spec();
+ if t.abi_return_struct_as_int {
+ // According to Clang, everyone but MSVC returns single-element
+ // float aggregates directly in a floating-point register.
+ if !t.is_like_msvc && fn_abi.ret.layout.is_single_fp_element(cx) {
+ match fn_abi.ret.layout.size.bytes() {
+ 4 => fn_abi.ret.cast_to(Reg::f32()),
+ 8 => fn_abi.ret.cast_to(Reg::f64()),
+ _ => fn_abi.ret.make_indirect(),
+ }
+ } else {
+ match fn_abi.ret.layout.size.bytes() {
+ 1 => fn_abi.ret.cast_to(Reg::i8()),
+ 2 => fn_abi.ret.cast_to(Reg::i16()),
+ 4 => fn_abi.ret.cast_to(Reg::i32()),
+ 8 => fn_abi.ret.cast_to(Reg::i64()),
+ _ => fn_abi.ret.make_indirect(),
+ }
+ }
+ } else {
+ fn_abi.ret.make_indirect();
+ }
+ } else {
+ fn_abi.ret.extend_integer_width_to(32);
+ }
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ if arg.layout.is_aggregate() {
+ arg.make_indirect_byval();
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+ }
+
+ if flavor == Flavor::FastcallOrVectorcall {
+ // Mark arguments as InReg like clang does it,
+ // so our fastcall/vectorcall is compatible with C/C++ fastcall/vectorcall.
+
+ // Clang reference: lib/CodeGen/TargetInfo.cpp
+ // See X86_32ABIInfo::shouldPrimitiveUseInReg(), X86_32ABIInfo::updateFreeRegs()
+
+ // IsSoftFloatABI is only set to true on ARM platforms,
+ // which in turn can't be x86?
+
+ let mut free_regs = 2;
+
+ for arg in &mut fn_abi.args {
+ let attrs = match arg.mode {
+ PassMode::Ignore
+ | PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
+ continue;
+ }
+ PassMode::Direct(ref mut attrs) => attrs,
+ PassMode::Pair(..)
+ | PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ }
+ | PassMode::Cast(_) => {
+ unreachable!("x86 shouldn't be passing arguments by {:?}", arg.mode)
+ }
+ };
+
+ // At this point we know this must be a primitive of sorts.
+ let unit = arg.layout.homogeneous_aggregate(cx).unwrap().unit().unwrap();
+ assert_eq!(unit.size, arg.layout.size);
+ if unit.kind == RegKind::Float {
+ continue;
+ }
+
+ let size_in_regs = (arg.layout.size.bits() + 31) / 32;
+
+ if size_in_regs == 0 {
+ continue;
+ }
+
+ if size_in_regs > free_regs {
+ break;
+ }
+
+ free_regs -= size_in_regs;
+
+ if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
+ attrs.set(ArgAttribute::InReg);
+ }
+
+ if free_regs == 0 {
+ break;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86_64.rs b/compiler/rustc_target/src/abi/call/x86_64.rs
new file mode 100644
index 000000000..a52e01a49
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86_64.rs
@@ -0,0 +1,248 @@
+// The classification code for the x86_64 ABI is taken from the clay language
+// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
+
+use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
+use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
+
+/// Classification of "eightbyte" components.
+// N.B., the order of the variants is from general to specific,
+// such that `unify(a, b)` is the "smaller" of `a` and `b`.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
+enum Class {
+ Int,
+ Sse,
+ SseUp,
+}
+
+#[derive(Clone, Copy, Debug)]
+struct Memory;
+
+// Currently supported vector size (AVX-512).
+const LARGEST_VECTOR_SIZE: usize = 512;
+const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
+
+fn classify_arg<'a, Ty, C>(
+ cx: &C,
+ arg: &ArgAbi<'a, Ty>,
+) -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ fn classify<'a, Ty, C>(
+ cx: &C,
+ layout: TyAndLayout<'a, Ty>,
+ cls: &mut [Option<Class>],
+ off: Size,
+ ) -> Result<(), Memory>
+ where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+ {
+ if !off.is_aligned(layout.align.abi) {
+ if !layout.is_zst() {
+ return Err(Memory);
+ }
+ return Ok(());
+ }
+
+ let mut c = match layout.abi {
+ Abi::Uninhabited => return Ok(()),
+
+ Abi::Scalar(scalar) => match scalar.primitive() {
+ abi::Int(..) | abi::Pointer => Class::Int,
+ abi::F32 | abi::F64 => Class::Sse,
+ },
+
+ Abi::Vector { .. } => Class::Sse,
+
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
+ for i in 0..layout.fields.count() {
+ let field_off = off + layout.fields.offset(i);
+ classify(cx, layout.field(cx, i), cls, field_off)?;
+ }
+
+ match &layout.variants {
+ abi::Variants::Single { .. } => {}
+ abi::Variants::Multiple { variants, .. } => {
+ // Treat enum variants like union members.
+ for variant_idx in variants.indices() {
+ classify(cx, layout.for_variant(cx, variant_idx), cls, off)?;
+ }
+ }
+ }
+
+ return Ok(());
+ }
+ };
+
+ // Fill in `cls` for scalars (Int/Sse) and vectors (Sse).
+ let first = (off.bytes() / 8) as usize;
+ let last = ((off.bytes() + layout.size.bytes() - 1) / 8) as usize;
+ for cls in &mut cls[first..=last] {
+ *cls = Some(cls.map_or(c, |old| old.min(c)));
+
+ // Everything after the first Sse "eightbyte"
+ // component is the upper half of a register.
+ if c == Class::Sse {
+ c = Class::SseUp;
+ }
+ }
+
+ Ok(())
+ }
+
+ let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
+ if n > MAX_EIGHTBYTES {
+ return Err(Memory);
+ }
+
+ let mut cls = [None; MAX_EIGHTBYTES];
+ classify(cx, arg.layout, &mut cls, Size::ZERO)?;
+ if n > 2 {
+ if cls[0] != Some(Class::Sse) {
+ return Err(Memory);
+ }
+ if cls[1..n].iter().any(|&c| c != Some(Class::SseUp)) {
+ return Err(Memory);
+ }
+ } else {
+ let mut i = 0;
+ while i < n {
+ if cls[i] == Some(Class::SseUp) {
+ cls[i] = Some(Class::Sse);
+ } else if cls[i] == Some(Class::Sse) {
+ i += 1;
+ while i != n && cls[i] == Some(Class::SseUp) {
+ i += 1;
+ }
+ } else {
+ i += 1;
+ }
+ }
+ }
+
+ Ok(cls)
+}
+
+fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg> {
+ if *i >= cls.len() {
+ return None;
+ }
+
+ match cls[*i] {
+ None => None,
+ Some(Class::Int) => {
+ *i += 1;
+ Some(if size.bytes() < 8 { Reg { kind: RegKind::Integer, size } } else { Reg::i64() })
+ }
+ Some(Class::Sse) => {
+ let vec_len =
+ 1 + cls[*i + 1..].iter().take_while(|&&c| c == Some(Class::SseUp)).count();
+ *i += vec_len;
+ Some(if vec_len == 1 {
+ match size.bytes() {
+ 4 => Reg::f32(),
+ _ => Reg::f64(),
+ }
+ } else {
+ Reg { kind: RegKind::Vector, size: Size::from_bytes(8) * (vec_len as u64) }
+ })
+ }
+ Some(c) => unreachable!("reg_component: unhandled class {:?}", c),
+ }
+}
+
+fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
+ let mut i = 0;
+ let lo = reg_component(cls, &mut i, size).unwrap();
+ let offset = Size::from_bytes(8) * (i as u64);
+ let mut target = CastTarget::from(lo);
+ if size > offset {
+ if let Some(hi) = reg_component(cls, &mut i, size - offset) {
+ target = CastTarget::pair(lo, hi);
+ }
+ }
+ assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
+ target
+}
+
+const MAX_INT_REGS: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
+const MAX_SSE_REGS: usize = 8; // XMM0-7
+
+pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
+where
+ Ty: TyAbiInterface<'a, C> + Copy,
+ C: HasDataLayout,
+{
+ let mut int_regs = MAX_INT_REGS;
+ let mut sse_regs = MAX_SSE_REGS;
+
+ let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
+ let mut cls_or_mem = classify_arg(cx, arg);
+
+ if is_arg {
+ if let Ok(cls) = cls_or_mem {
+ let mut needed_int = 0;
+ let mut needed_sse = 0;
+ for c in cls {
+ match c {
+ Some(Class::Int) => needed_int += 1,
+ Some(Class::Sse) => needed_sse += 1,
+ _ => {}
+ }
+ }
+ match (int_regs.checked_sub(needed_int), sse_regs.checked_sub(needed_sse)) {
+ (Some(left_int), Some(left_sse)) => {
+ int_regs = left_int;
+ sse_regs = left_sse;
+ }
+ _ => {
+ // Not enough registers for this argument, so it will be
+ // passed on the stack, but we only mark aggregates
+ // explicitly as indirect `byval` arguments, as LLVM will
+ // automatically put immediates on the stack itself.
+ if arg.layout.is_aggregate() {
+ cls_or_mem = Err(Memory);
+ }
+ }
+ }
+ }
+ }
+
+ match cls_or_mem {
+ Err(Memory) => {
+ if is_arg {
+ arg.make_indirect_byval();
+ } else {
+ // `sret` parameter thus one less integer register available
+ arg.make_indirect();
+ // NOTE(eddyb) return is handled first, so no registers
+ // should've been used yet.
+ assert_eq!(int_regs, MAX_INT_REGS);
+ int_regs -= 1;
+ }
+ }
+ Ok(ref cls) => {
+ // split into sized chunks passed individually
+ if arg.layout.is_aggregate() {
+ let size = arg.layout.size;
+ arg.cast_to(cast_target(cls, size))
+ } else {
+ arg.extend_integer_width_to(32);
+ }
+ }
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ x86_64_arg_or_ret(&mut fn_abi.ret, false);
+ }
+
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ x86_64_arg_or_ret(arg, true);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/call/x86_win64.rs b/compiler/rustc_target/src/abi/call/x86_win64.rs
new file mode 100644
index 000000000..2aad641b1
--- /dev/null
+++ b/compiler/rustc_target/src/abi/call/x86_win64.rs
@@ -0,0 +1,40 @@
+use crate::abi::call::{ArgAbi, FnAbi, Reg};
+use crate::abi::Abi;
+
+// Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
+
+pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
+ let fixup = |a: &mut ArgAbi<'_, Ty>| {
+ match a.layout.abi {
+ Abi::Uninhabited => {}
+ Abi::ScalarPair(..) | Abi::Aggregate { .. } => match a.layout.size.bits() {
+ 8 => a.cast_to(Reg::i8()),
+ 16 => a.cast_to(Reg::i16()),
+ 32 => a.cast_to(Reg::i32()),
+ 64 => a.cast_to(Reg::i64()),
+ _ => a.make_indirect(),
+ },
+ Abi::Vector { .. } => {
+ // FIXME(eddyb) there should be a size cap here
+ // (probably what clang calls "illegal vectors").
+ }
+ Abi::Scalar(_) => {
+ if a.layout.size.bytes() > 8 {
+ a.make_indirect();
+ } else {
+ a.extend_integer_width_to(32);
+ }
+ }
+ }
+ };
+
+ if !fn_abi.ret.is_ignore() {
+ fixup(&mut fn_abi.ret);
+ }
+ for arg in &mut fn_abi.args {
+ if arg.is_ignore() {
+ continue;
+ }
+ fixup(arg);
+ }
+}
diff --git a/compiler/rustc_target/src/abi/mod.rs b/compiler/rustc_target/src/abi/mod.rs
new file mode 100644
index 000000000..92ce4d91d
--- /dev/null
+++ b/compiler/rustc_target/src/abi/mod.rs
@@ -0,0 +1,1558 @@
+pub use Integer::*;
+pub use Primitive::*;
+
+use crate::json::{Json, ToJson};
+use crate::spec::Target;
+
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+use std::iter::Step;
+use std::num::NonZeroUsize;
+use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
+use std::str::FromStr;
+
+use rustc_data_structures::intern::Interned;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_macros::HashStable_Generic;
+
+pub mod call;
+
+/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
+/// for a target, which contains everything needed to compute layouts.
+pub struct TargetDataLayout {
+ pub endian: Endian,
+ pub i1_align: AbiAndPrefAlign,
+ pub i8_align: AbiAndPrefAlign,
+ pub i16_align: AbiAndPrefAlign,
+ pub i32_align: AbiAndPrefAlign,
+ pub i64_align: AbiAndPrefAlign,
+ pub i128_align: AbiAndPrefAlign,
+ pub f32_align: AbiAndPrefAlign,
+ pub f64_align: AbiAndPrefAlign,
+ pub pointer_size: Size,
+ pub pointer_align: AbiAndPrefAlign,
+ pub aggregate_align: AbiAndPrefAlign,
+
+ /// Alignments for vector types.
+ pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
+
+ pub instruction_address_space: AddressSpace,
+
+ /// Minimum size of #[repr(C)] enums (default I32 bits)
+ pub c_enum_min_size: Integer,
+}
+
+impl Default for TargetDataLayout {
+ /// Creates an instance of `TargetDataLayout`.
+ fn default() -> TargetDataLayout {
+ let align = |bits| Align::from_bits(bits).unwrap();
+ TargetDataLayout {
+ endian: Endian::Big,
+ i1_align: AbiAndPrefAlign::new(align(8)),
+ i8_align: AbiAndPrefAlign::new(align(8)),
+ i16_align: AbiAndPrefAlign::new(align(16)),
+ i32_align: AbiAndPrefAlign::new(align(32)),
+ i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
+ i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
+ f32_align: AbiAndPrefAlign::new(align(32)),
+ f64_align: AbiAndPrefAlign::new(align(64)),
+ pointer_size: Size::from_bits(64),
+ pointer_align: AbiAndPrefAlign::new(align(64)),
+ aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
+ vector_align: vec![
+ (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
+ (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
+ ],
+ instruction_address_space: AddressSpace::DATA,
+ c_enum_min_size: Integer::I32,
+ }
+ }
+}
+
+impl TargetDataLayout {
+ pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
+ // Parse an address space index from a string.
+ let parse_address_space = |s: &str, cause: &str| {
+ s.parse::<u32>().map(AddressSpace).map_err(|err| {
+ format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
+ })
+ };
+
+ // Parse a bit count from a string.
+ let parse_bits = |s: &str, kind: &str, cause: &str| {
+ s.parse::<u64>().map_err(|err| {
+ format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
+ })
+ };
+
+ // Parse a size string.
+ let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
+
+ // Parse an alignment string.
+ let align = |s: &[&str], cause: &str| {
+ if s.is_empty() {
+ return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
+ }
+ let align_from_bits = |bits| {
+ Align::from_bits(bits).map_err(|err| {
+ format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
+ })
+ };
+ let abi = parse_bits(s[0], "alignment", cause)?;
+ let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
+ Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
+ };
+
+ let mut dl = TargetDataLayout::default();
+ let mut i128_align_src = 64;
+ for spec in target.data_layout.split('-') {
+ let spec_parts = spec.split(':').collect::<Vec<_>>();
+
+ match &*spec_parts {
+ ["e"] => dl.endian = Endian::Little,
+ ["E"] => dl.endian = Endian::Big,
+ [p] if p.starts_with('P') => {
+ dl.instruction_address_space = parse_address_space(&p[1..], "P")?
+ }
+ ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
+ ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
+ ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
+ [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
+ dl.pointer_size = size(s, p)?;
+ dl.pointer_align = align(a, p)?;
+ }
+ [s, ref a @ ..] if s.starts_with('i') => {
+ let Ok(bits) = s[1..].parse::<u64>() else {
+ size(&s[1..], "i")?; // For the user error.
+ continue;
+ };
+ let a = align(a, s)?;
+ match bits {
+ 1 => dl.i1_align = a,
+ 8 => dl.i8_align = a,
+ 16 => dl.i16_align = a,
+ 32 => dl.i32_align = a,
+ 64 => dl.i64_align = a,
+ _ => {}
+ }
+ if bits >= i128_align_src && bits <= 128 {
+ // Default alignment for i128 is decided by taking the alignment of
+ // largest-sized i{64..=128}.
+ i128_align_src = bits;
+ dl.i128_align = a;
+ }
+ }
+ [s, ref a @ ..] if s.starts_with('v') => {
+ let v_size = size(&s[1..], "v")?;
+ let a = align(a, s)?;
+ if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
+ v.1 = a;
+ continue;
+ }
+ // No existing entry, add a new one.
+ dl.vector_align.push((v_size, a));
+ }
+ _ => {} // Ignore everything else.
+ }
+ }
+
+ // Perform consistency checks against the Target information.
+ if dl.endian != target.endian {
+ return Err(format!(
+ "inconsistent target specification: \"data-layout\" claims \
+ architecture is {}-endian, while \"target-endian\" is `{}`",
+ dl.endian.as_str(),
+ target.endian.as_str(),
+ ));
+ }
+
+ let target_pointer_width: u64 = target.pointer_width.into();
+ if dl.pointer_size.bits() != target_pointer_width {
+ return Err(format!(
+ "inconsistent target specification: \"data-layout\" claims \
+ pointers are {}-bit, while \"target-pointer-width\" is `{}`",
+ dl.pointer_size.bits(),
+ target.pointer_width
+ ));
+ }
+
+ dl.c_enum_min_size = Integer::from_size(Size::from_bits(target.c_enum_min_bits))?;
+
+ Ok(dl)
+ }
+
+ /// Returns exclusive upper bound on object size.
+ ///
+ /// The theoretical maximum object size is defined as the maximum positive `isize` value.
+ /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
+ /// index every address within an object along with one byte past the end, along with allowing
+ /// `isize` to store the difference between any two pointers into an object.
+ ///
+ /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
+ /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
+ /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
+ /// address space on 64-bit ARMv8 and x86_64.
+ #[inline]
+ pub fn obj_size_bound(&self) -> u64 {
+ match self.pointer_size.bits() {
+ 16 => 1 << 15,
+ 32 => 1 << 31,
+ 64 => 1 << 47,
+ bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
+ }
+ }
+
+ #[inline]
+ pub fn ptr_sized_integer(&self) -> Integer {
+ match self.pointer_size.bits() {
+ 16 => I16,
+ 32 => I32,
+ 64 => I64,
+ bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
+ }
+ }
+
+ #[inline]
+ pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
+ for &(size, align) in &self.vector_align {
+ if size == vec_size {
+ return align;
+ }
+ }
+ // Default to natural alignment, which is what LLVM does.
+ // That is, use the size, rounded up to a power of 2.
+ AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
+ }
+}
+
+pub trait HasDataLayout {
+ fn data_layout(&self) -> &TargetDataLayout;
+}
+
+impl HasDataLayout for TargetDataLayout {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ self
+ }
+}
+
+/// Endianness of the target, which must match cfg(target-endian).
+#[derive(Copy, Clone, PartialEq)]
+pub enum Endian {
+ Little,
+ Big,
+}
+
+impl Endian {
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ Self::Little => "little",
+ Self::Big => "big",
+ }
+ }
+}
+
+impl fmt::Debug for Endian {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.as_str())
+ }
+}
+
+impl FromStr for Endian {
+ type Err = String;
+
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ match s {
+ "little" => Ok(Self::Little),
+ "big" => Ok(Self::Big),
+ _ => Err(format!(r#"unknown endian: "{}""#, s)),
+ }
+ }
+}
+
+impl ToJson for Endian {
+ fn to_json(&self) -> Json {
+ self.as_str().to_json()
+ }
+}
+
+/// Size of a type in bytes.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub struct Size {
+ raw: u64,
+}
+
+// This is debug-printed a lot in larger structs, don't waste too much space there
+impl fmt::Debug for Size {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Size({} bytes)", self.bytes())
+ }
+}
+
+impl Size {
+ pub const ZERO: Size = Size { raw: 0 };
+
+ /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
+ /// not a multiple of 8.
+ pub fn from_bits(bits: impl TryInto<u64>) -> Size {
+ let bits = bits.try_into().ok().unwrap();
+ // Avoid potential overflow from `bits + 7`.
+ Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
+ }
+
+ #[inline]
+ pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
+ let bytes: u64 = bytes.try_into().ok().unwrap();
+ Size { raw: bytes }
+ }
+
+ #[inline]
+ pub fn bytes(self) -> u64 {
+ self.raw
+ }
+
+ #[inline]
+ pub fn bytes_usize(self) -> usize {
+ self.bytes().try_into().unwrap()
+ }
+
+ #[inline]
+ pub fn bits(self) -> u64 {
+ #[cold]
+ fn overflow(bytes: u64) -> ! {
+ panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes)
+ }
+
+ self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
+ }
+
+ #[inline]
+ pub fn bits_usize(self) -> usize {
+ self.bits().try_into().unwrap()
+ }
+
+ #[inline]
+ pub fn align_to(self, align: Align) -> Size {
+ let mask = align.bytes() - 1;
+ Size::from_bytes((self.bytes() + mask) & !mask)
+ }
+
+ #[inline]
+ pub fn is_aligned(self, align: Align) -> bool {
+ let mask = align.bytes() - 1;
+ self.bytes() & mask == 0
+ }
+
+ #[inline]
+ pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
+ let dl = cx.data_layout();
+
+ let bytes = self.bytes().checked_add(offset.bytes())?;
+
+ if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
+ }
+
+ #[inline]
+ pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
+ let dl = cx.data_layout();
+
+ let bytes = self.bytes().checked_mul(count)?;
+ if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
+ }
+
+ /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
+ /// (i.e., if it is negative, fill with 1's on the left).
+ #[inline]
+ pub fn sign_extend(self, value: u128) -> u128 {
+ let size = self.bits();
+ if size == 0 {
+ // Truncated until nothing is left.
+ return 0;
+ }
+ // Sign-extend it.
+ let shift = 128 - size;
+ // Shift the unsigned value to the left, then shift back to the right as signed
+ // (essentially fills with sign bit on the left).
+ (((value << shift) as i128) >> shift) as u128
+ }
+
+ /// Truncates `value` to `self` bits.
+ #[inline]
+ pub fn truncate(self, value: u128) -> u128 {
+ let size = self.bits();
+ if size == 0 {
+ // Truncated until nothing is left.
+ return 0;
+ }
+ let shift = 128 - size;
+ // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
+ (value << shift) >> shift
+ }
+
+ #[inline]
+ pub fn signed_int_min(&self) -> i128 {
+ self.sign_extend(1_u128 << (self.bits() - 1)) as i128
+ }
+
+ #[inline]
+ pub fn signed_int_max(&self) -> i128 {
+ i128::MAX >> (128 - self.bits())
+ }
+
+ #[inline]
+ pub fn unsigned_int_max(&self) -> u128 {
+ u128::MAX >> (128 - self.bits())
+ }
+}
+
+// Panicking addition, subtraction and multiplication for convenience.
+// Avoid during layout computation, return `LayoutError` instead.
+
+impl Add for Size {
+ type Output = Size;
+ #[inline]
+ fn add(self, other: Size) -> Size {
+ Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
+ panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
+ }))
+ }
+}
+
+impl Sub for Size {
+ type Output = Size;
+ #[inline]
+ fn sub(self, other: Size) -> Size {
+ Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
+ panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
+ }))
+ }
+}
+
+impl Mul<Size> for u64 {
+ type Output = Size;
+ #[inline]
+ fn mul(self, size: Size) -> Size {
+ size * self
+ }
+}
+
+impl Mul<u64> for Size {
+ type Output = Size;
+ #[inline]
+ fn mul(self, count: u64) -> Size {
+ match self.bytes().checked_mul(count) {
+ Some(bytes) => Size::from_bytes(bytes),
+ None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
+ }
+ }
+}
+
+impl AddAssign for Size {
+ #[inline]
+ fn add_assign(&mut self, other: Size) {
+ *self = *self + other;
+ }
+}
+
+impl Step for Size {
+ #[inline]
+ fn steps_between(start: &Self, end: &Self) -> Option<usize> {
+ u64::steps_between(&start.bytes(), &end.bytes())
+ }
+
+ #[inline]
+ fn forward_checked(start: Self, count: usize) -> Option<Self> {
+ u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
+ }
+
+ #[inline]
+ fn forward(start: Self, count: usize) -> Self {
+ Self::from_bytes(u64::forward(start.bytes(), count))
+ }
+
+ #[inline]
+ unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
+ Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
+ }
+
+ #[inline]
+ fn backward_checked(start: Self, count: usize) -> Option<Self> {
+ u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
+ }
+
+ #[inline]
+ fn backward(start: Self, count: usize) -> Self {
+ Self::from_bytes(u64::backward(start.bytes(), count))
+ }
+
+ #[inline]
+ unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
+ Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
+ }
+}
+
+/// Alignment of a type in bytes (always a power of two).
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+#[derive(HashStable_Generic)]
+pub struct Align {
+ pow2: u8,
+}
+
+// This is debug-printed a lot in larger structs, don't waste too much space there
+impl fmt::Debug for Align {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Align({} bytes)", self.bytes())
+ }
+}
+
+impl Align {
+ pub const ONE: Align = Align { pow2: 0 };
+ pub const MAX: Align = Align { pow2: 29 };
+
+ #[inline]
+ pub fn from_bits(bits: u64) -> Result<Align, String> {
+ Align::from_bytes(Size::from_bits(bits).bytes())
+ }
+
+ #[inline]
+ pub fn from_bytes(align: u64) -> Result<Align, String> {
+ // Treat an alignment of 0 bytes like 1-byte alignment.
+ if align == 0 {
+ return Ok(Align::ONE);
+ }
+
+ #[cold]
+ fn not_power_of_2(align: u64) -> String {
+ format!("`{}` is not a power of 2", align)
+ }
+
+ #[cold]
+ fn too_large(align: u64) -> String {
+ format!("`{}` is too large", align)
+ }
+
+ let mut bytes = align;
+ let mut pow2: u8 = 0;
+ while (bytes & 1) == 0 {
+ pow2 += 1;
+ bytes >>= 1;
+ }
+ if bytes != 1 {
+ return Err(not_power_of_2(align));
+ }
+ if pow2 > Self::MAX.pow2 {
+ return Err(too_large(align));
+ }
+
+ Ok(Align { pow2 })
+ }
+
+ #[inline]
+ pub fn bytes(self) -> u64 {
+ 1 << self.pow2
+ }
+
+ #[inline]
+ pub fn bits(self) -> u64 {
+ self.bytes() * 8
+ }
+
+ /// Computes the best alignment possible for the given offset
+ /// (the largest power of two that the offset is a multiple of).
+ ///
+ /// N.B., for an offset of `0`, this happens to return `2^64`.
+ #[inline]
+ pub fn max_for_offset(offset: Size) -> Align {
+ Align { pow2: offset.bytes().trailing_zeros() as u8 }
+ }
+
+ /// Lower the alignment, if necessary, such that the given offset
+ /// is aligned to it (the offset is a multiple of the alignment).
+ #[inline]
+ pub fn restrict_for_offset(self, offset: Size) -> Align {
+ self.min(Align::max_for_offset(offset))
+ }
+}
+
+/// A pair of alignments, ABI-mandated and preferred.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub struct AbiAndPrefAlign {
+ pub abi: Align,
+ pub pref: Align,
+}
+
+impl AbiAndPrefAlign {
+ #[inline]
+ pub fn new(align: Align) -> AbiAndPrefAlign {
+ AbiAndPrefAlign { abi: align, pref: align }
+ }
+
+ #[inline]
+ pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
+ AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
+ }
+
+ #[inline]
+ pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
+ AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
+ }
+}
+
+/// Integers, also used for enum discriminants.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
+pub enum Integer {
+ I8,
+ I16,
+ I32,
+ I64,
+ I128,
+}
+
+impl Integer {
+ #[inline]
+ pub fn size(self) -> Size {
+ match self {
+ I8 => Size::from_bytes(1),
+ I16 => Size::from_bytes(2),
+ I32 => Size::from_bytes(4),
+ I64 => Size::from_bytes(8),
+ I128 => Size::from_bytes(16),
+ }
+ }
+
+ pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+ let dl = cx.data_layout();
+
+ match self {
+ I8 => dl.i8_align,
+ I16 => dl.i16_align,
+ I32 => dl.i32_align,
+ I64 => dl.i64_align,
+ I128 => dl.i128_align,
+ }
+ }
+
+ /// Finds the smallest Integer type which can represent the signed value.
+ #[inline]
+ pub fn fit_signed(x: i128) -> Integer {
+ match x {
+ -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
+ -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
+ -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
+ -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
+ _ => I128,
+ }
+ }
+
+ /// Finds the smallest Integer type which can represent the unsigned value.
+ #[inline]
+ pub fn fit_unsigned(x: u128) -> Integer {
+ match x {
+ 0..=0x0000_0000_0000_00ff => I8,
+ 0..=0x0000_0000_0000_ffff => I16,
+ 0..=0x0000_0000_ffff_ffff => I32,
+ 0..=0xffff_ffff_ffff_ffff => I64,
+ _ => I128,
+ }
+ }
+
+ /// Finds the smallest integer with the given alignment.
+ pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
+ let dl = cx.data_layout();
+
+ for candidate in [I8, I16, I32, I64, I128] {
+ if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
+ return Some(candidate);
+ }
+ }
+ None
+ }
+
+ /// Find the largest integer with the given alignment or less.
+ pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
+ let dl = cx.data_layout();
+
+ // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
+ for candidate in [I64, I32, I16] {
+ if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
+ return candidate;
+ }
+ }
+ I8
+ }
+
+ // FIXME(eddyb) consolidate this and other methods that find the appropriate
+ // `Integer` given some requirements.
+ #[inline]
+ fn from_size(size: Size) -> Result<Self, String> {
+ match size.bits() {
+ 8 => Ok(Integer::I8),
+ 16 => Ok(Integer::I16),
+ 32 => Ok(Integer::I32),
+ 64 => Ok(Integer::I64),
+ 128 => Ok(Integer::I128),
+ _ => Err(format!("rust does not support integers with {} bits", size.bits())),
+ }
+ }
+}
+
+/// Fundamental unit of memory access and layout.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Primitive {
+ /// The `bool` is the signedness of the `Integer` type.
+ ///
+ /// One would think we would not care about such details this low down,
+ /// but some ABIs are described in terms of C types and ISAs where the
+ /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
+ /// a negative integer passed by zero-extension will appear positive in
+ /// the callee, and most operations on it will produce the wrong values.
+ Int(Integer, bool),
+ F32,
+ F64,
+ Pointer,
+}
+
+impl Primitive {
+ pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
+ let dl = cx.data_layout();
+
+ match self {
+ Int(i, _) => i.size(),
+ F32 => Size::from_bits(32),
+ F64 => Size::from_bits(64),
+ Pointer => dl.pointer_size,
+ }
+ }
+
+ pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
+ let dl = cx.data_layout();
+
+ match self {
+ Int(i, _) => i.align(dl),
+ F32 => dl.f32_align,
+ F64 => dl.f64_align,
+ Pointer => dl.pointer_align,
+ }
+ }
+
+ // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
+ #[inline]
+ pub fn is_float(self) -> bool {
+ matches!(self, F32 | F64)
+ }
+
+ // FIXME(eddyb) remove, it's completely unused.
+ #[inline]
+ pub fn is_int(self) -> bool {
+ matches!(self, Int(..))
+ }
+
+ #[inline]
+ pub fn is_ptr(self) -> bool {
+ matches!(self, Pointer)
+ }
+}
+
+/// Inclusive wrap-around range of valid values, that is, if
+/// start > end, it represents `start..=MAX`,
+/// followed by `0..=end`.
+///
+/// That is, for an i8 primitive, a range of `254..=2` means following
+/// sequence:
+///
+/// 254 (-2), 255 (-1), 0, 1, 2
+///
+/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[derive(HashStable_Generic)]
+pub struct WrappingRange {
+ pub start: u128,
+ pub end: u128,
+}
+
+impl WrappingRange {
+ pub fn full(size: Size) -> Self {
+ Self { start: 0, end: size.unsigned_int_max() }
+ }
+
+ /// Returns `true` if `v` is contained in the range.
+ #[inline(always)]
+ pub fn contains(&self, v: u128) -> bool {
+ if self.start <= self.end {
+ self.start <= v && v <= self.end
+ } else {
+ self.start <= v || v <= self.end
+ }
+ }
+
+ /// Returns `self` with replaced `start`
+ #[inline(always)]
+ pub fn with_start(mut self, start: u128) -> Self {
+ self.start = start;
+ self
+ }
+
+ /// Returns `self` with replaced `end`
+ #[inline(always)]
+ pub fn with_end(mut self, end: u128) -> Self {
+ self.end = end;
+ self
+ }
+
+ /// Returns `true` if `size` completely fills the range.
+ #[inline]
+ pub fn is_full_for(&self, size: Size) -> bool {
+ let max_value = size.unsigned_int_max();
+ debug_assert!(self.start <= max_value && self.end <= max_value);
+ self.start == (self.end.wrapping_add(1) & max_value)
+ }
+}
+
+impl fmt::Debug for WrappingRange {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.start > self.end {
+ write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
+ } else {
+ write!(fmt, "{}..={}", self.start, self.end)?;
+ }
+ Ok(())
+ }
+}
+
+/// Information about one scalar component of a Rust type.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
+#[derive(HashStable_Generic)]
+pub enum Scalar {
+ Initialized {
+ value: Primitive,
+
+ // FIXME(eddyb) always use the shortest range, e.g., by finding
+ // the largest space between two consecutive valid values and
+ // taking everything else as the (shortest) valid range.
+ valid_range: WrappingRange,
+ },
+ Union {
+ /// Even for unions, we need to use the correct registers for the kind of
+ /// values inside the union, so we keep the `Primitive` type around. We
+ /// also use it to compute the size of the scalar.
+ /// However, unions never have niches and even allow undef,
+ /// so there is no `valid_range`.
+ value: Primitive,
+ },
+}
+
+impl Scalar {
+ #[inline]
+ pub fn is_bool(&self) -> bool {
+ matches!(
+ self,
+ Scalar::Initialized {
+ value: Int(I8, false),
+ valid_range: WrappingRange { start: 0, end: 1 }
+ }
+ )
+ }
+
+ /// Get the primitive representation of this type, ignoring the valid range and whether the
+ /// value is allowed to be undefined (due to being a union).
+ pub fn primitive(&self) -> Primitive {
+ match *self {
+ Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
+ }
+ }
+
+ pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
+ self.primitive().align(cx)
+ }
+
+ pub fn size(self, cx: &impl HasDataLayout) -> Size {
+ self.primitive().size(cx)
+ }
+
+ #[inline]
+ pub fn to_union(&self) -> Self {
+ Self::Union { value: self.primitive() }
+ }
+
+ #[inline]
+ pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
+ match *self {
+ Scalar::Initialized { valid_range, .. } => valid_range,
+ Scalar::Union { value } => WrappingRange::full(value.size(cx)),
+ }
+ }
+
+ #[inline]
+ /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union.
+ pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
+ match self {
+ Scalar::Initialized { valid_range, .. } => valid_range,
+ Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
+ }
+ }
+
+ /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
+ #[inline]
+ pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
+ match *self {
+ Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
+ Scalar::Union { .. } => true,
+ }
+ }
+
+ /// Returns `true` if this type can be left uninit.
+ #[inline]
+ pub fn is_uninit_valid(&self) -> bool {
+ match *self {
+ Scalar::Initialized { .. } => false,
+ Scalar::Union { .. } => true,
+ }
+ }
+}
+
+/// Describes how the fields of a type are located in memory.
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum FieldsShape {
+ /// Scalar primitives and `!`, which never have fields.
+ Primitive,
+
+ /// All fields start at no offset. The `usize` is the field count.
+ Union(NonZeroUsize),
+
+ /// Array/vector-like placement, with all fields of identical types.
+ Array { stride: Size, count: u64 },
+
+ /// Struct-like placement, with precomputed offsets.
+ ///
+ /// Fields are guaranteed to not overlap, but note that gaps
+ /// before, between and after all the fields are NOT always
+ /// padding, and as such their contents may not be discarded.
+ /// For example, enum variants leave a gap at the start,
+ /// where the discriminant field in the enum layout goes.
+ Arbitrary {
+ /// Offsets for the first byte of each field,
+ /// ordered to match the source definition order.
+ /// This vector does not go in increasing order.
+ // FIXME(eddyb) use small vector optimization for the common case.
+ offsets: Vec<Size>,
+
+ /// Maps source order field indices to memory order indices,
+ /// depending on how the fields were reordered (if at all).
+ /// This is a permutation, with both the source order and the
+ /// memory order using the same (0..n) index ranges.
+ ///
+ /// Note that during computation of `memory_index`, sometimes
+ /// it is easier to operate on the inverse mapping (that is,
+ /// from memory order to source order), and that is usually
+ /// named `inverse_memory_index`.
+ ///
+ // FIXME(eddyb) build a better abstraction for permutations, if possible.
+ // FIXME(camlorn) also consider small vector optimization here.
+ memory_index: Vec<u32>,
+ },
+}
+
+impl FieldsShape {
+ #[inline]
+ pub fn count(&self) -> usize {
+ match *self {
+ FieldsShape::Primitive => 0,
+ FieldsShape::Union(count) => count.get(),
+ FieldsShape::Array { count, .. } => count.try_into().unwrap(),
+ FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
+ }
+ }
+
+ #[inline]
+ pub fn offset(&self, i: usize) -> Size {
+ match *self {
+ FieldsShape::Primitive => {
+ unreachable!("FieldsShape::offset: `Primitive`s have no fields")
+ }
+ FieldsShape::Union(count) => {
+ assert!(
+ i < count.get(),
+ "tried to access field {} of union with {} fields",
+ i,
+ count
+ );
+ Size::ZERO
+ }
+ FieldsShape::Array { stride, count } => {
+ let i = u64::try_from(i).unwrap();
+ assert!(i < count);
+ stride * i
+ }
+ FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
+ }
+ }
+
+ #[inline]
+ pub fn memory_index(&self, i: usize) -> usize {
+ match *self {
+ FieldsShape::Primitive => {
+ unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
+ }
+ FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
+ FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
+ }
+ }
+
+ /// Gets source indices of the fields by increasing offsets.
+ #[inline]
+ pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
+ let mut inverse_small = [0u8; 64];
+ let mut inverse_big = vec![];
+ let use_small = self.count() <= inverse_small.len();
+
+ // We have to write this logic twice in order to keep the array small.
+ if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
+ if use_small {
+ for i in 0..self.count() {
+ inverse_small[memory_index[i] as usize] = i as u8;
+ }
+ } else {
+ inverse_big = vec![0; self.count()];
+ for i in 0..self.count() {
+ inverse_big[memory_index[i] as usize] = i as u32;
+ }
+ }
+ }
+
+ (0..self.count()).map(move |i| match *self {
+ FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
+ FieldsShape::Arbitrary { .. } => {
+ if use_small {
+ inverse_small[i] as usize
+ } else {
+ inverse_big[i] as usize
+ }
+ }
+ })
+ }
+}
+
+/// An identifier that specifies the address space that some operation
+/// should operate on. Special address spaces have an effect on code generation,
+/// depending on the target and the address spaces it implements.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub struct AddressSpace(pub u32);
+
+impl AddressSpace {
+ /// The default address space, corresponding to data space.
+ pub const DATA: Self = AddressSpace(0);
+}
+
+/// Describes how values of the type are passed by target ABIs,
+/// in terms of categories of C types there are ABI rules for.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Abi {
+ Uninhabited,
+ Scalar(Scalar),
+ ScalarPair(Scalar, Scalar),
+ Vector {
+ element: Scalar,
+ count: u64,
+ },
+ Aggregate {
+ /// If true, the size is exact, otherwise it's only a lower bound.
+ sized: bool,
+ },
+}
+
+impl Abi {
+ /// Returns `true` if the layout corresponds to an unsized type.
+ #[inline]
+ pub fn is_unsized(&self) -> bool {
+ match *self {
+ Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
+ Abi::Aggregate { sized } => !sized,
+ }
+ }
+
+ /// Returns `true` if this is a single signed integer scalar
+ #[inline]
+ pub fn is_signed(&self) -> bool {
+ match self {
+ Abi::Scalar(scal) => match scal.primitive() {
+ Primitive::Int(_, signed) => signed,
+ _ => false,
+ },
+ _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
+ }
+ }
+
+ /// Returns `true` if this is an uninhabited type
+ #[inline]
+ pub fn is_uninhabited(&self) -> bool {
+ matches!(*self, Abi::Uninhabited)
+ }
+
+ /// Returns `true` is this is a scalar type
+ #[inline]
+ pub fn is_scalar(&self) -> bool {
+ matches!(*self, Abi::Scalar(_))
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct VariantIdx {
+ derive [HashStable_Generic]
+ }
+}
+
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum Variants<'a> {
+ /// Single enum variants, structs/tuples, unions, and all non-ADTs.
+ Single { index: VariantIdx },
+
+ /// Enum-likes with more than one inhabited variant: each variant comes with
+ /// a *discriminant* (usually the same as the variant index but the user can
+ /// assign explicit discriminant values). That discriminant is encoded
+ /// as a *tag* on the machine. The layout of each variant is
+ /// a struct, and they all have space reserved for the tag.
+ /// For enums, the tag is the sole field of the layout.
+ Multiple {
+ tag: Scalar,
+ tag_encoding: TagEncoding,
+ tag_field: usize,
+ variants: IndexVec<VariantIdx, Layout<'a>>,
+ },
+}
+
+#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub enum TagEncoding {
+ /// The tag directly stores the discriminant, but possibly with a smaller layout
+ /// (so converting the tag to the discriminant can require sign extension).
+ Direct,
+
+ /// Niche (values invalid for a type) encoding the discriminant:
+ /// Discriminant and variant index coincide.
+ /// The variant `dataful_variant` contains a niche at an arbitrary
+ /// offset (field `tag_field` of the enum), which for a variant with
+ /// discriminant `d` is set to
+ /// `(d - niche_variants.start).wrapping_add(niche_start)`.
+ ///
+ /// For example, `Option<(usize, &T)>` is represented such that
+ /// `None` has a null pointer for the second tuple field, and
+ /// `Some` is the identity function (with a non-null reference).
+ Niche {
+ dataful_variant: VariantIdx,
+ niche_variants: RangeInclusive<VariantIdx>,
+ niche_start: u128,
+ },
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
+pub struct Niche {
+ pub offset: Size,
+ pub value: Primitive,
+ pub valid_range: WrappingRange,
+}
+
+impl Niche {
+ pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
+ let Scalar::Initialized { value, valid_range } = scalar else { return None };
+ let niche = Niche { offset, value, valid_range };
+ if niche.available(cx) > 0 { Some(niche) } else { None }
+ }
+
+ pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
+ let Self { value, valid_range: v, .. } = *self;
+ let size = value.size(cx);
+ assert!(size.bits() <= 128);
+ let max_value = size.unsigned_int_max();
+
+ // Find out how many values are outside the valid range.
+ let niche = v.end.wrapping_add(1)..v.start;
+ niche.end.wrapping_sub(niche.start) & max_value
+ }
+
+ pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
+ assert!(count > 0);
+
+ let Self { value, valid_range: v, .. } = *self;
+ let size = value.size(cx);
+ assert!(size.bits() <= 128);
+ let max_value = size.unsigned_int_max();
+
+ let niche = v.end.wrapping_add(1)..v.start;
+ let available = niche.end.wrapping_sub(niche.start) & max_value;
+ if count > available {
+ return None;
+ }
+
+ // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
+ // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
+ // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
+ // Having `None` in niche zero can enable some special optimizations.
+ //
+ // Bound selection criteria:
+ // 1. Select closest to zero given wrapping semantics.
+ // 2. Avoid moving past zero if possible.
+ //
+ // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
+ // If niche zero is already reserved, the selection of bounds are of little interest.
+ let move_start = |v: WrappingRange| {
+ let start = v.start.wrapping_sub(count) & max_value;
+ Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
+ };
+ let move_end = |v: WrappingRange| {
+ let start = v.end.wrapping_add(1) & max_value;
+ let end = v.end.wrapping_add(count) & max_value;
+ Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
+ };
+ let distance_end_zero = max_value - v.end;
+ if v.start > v.end {
+ // zero is unavailable because wrapping occurs
+ move_end(v)
+ } else if v.start <= distance_end_zero {
+ if count <= v.start {
+ move_start(v)
+ } else {
+ // moved past zero, use other bound
+ move_end(v)
+ }
+ } else {
+ let end = v.end.wrapping_add(count) & max_value;
+ let overshot_zero = (1..=v.end).contains(&end);
+ if overshot_zero {
+ // moved past zero, use other bound
+ move_start(v)
+ } else {
+ move_end(v)
+ }
+ }
+ }
+}
+
+#[derive(PartialEq, Eq, Hash, HashStable_Generic)]
+pub struct LayoutS<'a> {
+ /// Says where the fields are located within the layout.
+ pub fields: FieldsShape,
+
+ /// Encodes information about multi-variant layouts.
+ /// Even with `Multiple` variants, a layout still has its own fields! Those are then
+ /// shared between all variants. One of them will be the discriminant,
+ /// but e.g. generators can have more.
+ ///
+ /// To access all fields of this layout, both `fields` and the fields of the active variant
+ /// must be taken into account.
+ pub variants: Variants<'a>,
+
+ /// The `abi` defines how this data is passed between functions, and it defines
+ /// value restrictions via `valid_range`.
+ ///
+ /// Note that this is entirely orthogonal to the recursive structure defined by
+ /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
+ /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
+ /// have to be taken into account to find all fields of this layout.
+ pub abi: Abi,
+
+ /// The leaf scalar with the largest number of invalid values
+ /// (i.e. outside of its `valid_range`), if it exists.
+ pub largest_niche: Option<Niche>,
+
+ pub align: AbiAndPrefAlign,
+ pub size: Size,
+}
+
+impl<'a> LayoutS<'a> {
+ pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
+ let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
+ let size = scalar.size(cx);
+ let align = scalar.align(cx);
+ LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Primitive,
+ abi: Abi::Scalar(scalar),
+ largest_niche,
+ size,
+ align,
+ }
+ }
+}
+
+impl<'a> fmt::Debug for LayoutS<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This is how `Layout` used to print before it become
+ // `Interned<LayoutS>`. We print it like this to avoid having to update
+ // expected output in a lot of tests.
+ let LayoutS { size, align, abi, fields, largest_niche, variants } = self;
+ f.debug_struct("Layout")
+ .field("size", size)
+ .field("align", align)
+ .field("abi", abi)
+ .field("fields", fields)
+ .field("largest_niche", largest_niche)
+ .field("variants", variants)
+ .finish()
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
+#[rustc_pass_by_value]
+pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
+
+impl<'a> fmt::Debug for Layout<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // See comment on `<LayoutS as Debug>::fmt` above.
+ self.0.0.fmt(f)
+ }
+}
+
+impl<'a> Layout<'a> {
+ pub fn fields(self) -> &'a FieldsShape {
+ &self.0.0.fields
+ }
+
+ pub fn variants(self) -> &'a Variants<'a> {
+ &self.0.0.variants
+ }
+
+ pub fn abi(self) -> Abi {
+ self.0.0.abi
+ }
+
+ pub fn largest_niche(self) -> Option<Niche> {
+ self.0.0.largest_niche
+ }
+
+ pub fn align(self) -> AbiAndPrefAlign {
+ self.0.0.align
+ }
+
+ pub fn size(self) -> Size {
+ self.0.0.size
+ }
+}
+
+/// The layout of a type, alongside the type itself.
+/// Provides various type traversal APIs (e.g., recursing into fields).
+///
+/// Note that the layout is NOT guaranteed to always be identical
+/// to that obtained from `layout_of(ty)`, as we need to produce
+/// layouts for which Rust types do not exist, such as enum variants
+/// or synthetic fields of enums (i.e., discriminants) and fat pointers.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
+pub struct TyAndLayout<'a, Ty> {
+ pub ty: Ty,
+ pub layout: Layout<'a>,
+}
+
+impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
+ type Target = &'a LayoutS<'a>;
+ fn deref(&self) -> &&'a LayoutS<'a> {
+ &self.layout.0.0
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum PointerKind {
+ /// Most general case, we know no restrictions to tell LLVM.
+ SharedMutable,
+
+ /// `&T` where `T` contains no `UnsafeCell`, is `dereferenceable`, `noalias` and `readonly`.
+ Frozen,
+
+ /// `&mut T` which is `dereferenceable` and `noalias` but not `readonly`.
+ UniqueBorrowed,
+
+ /// `&mut !Unpin`, which is `dereferenceable` but neither `noalias` nor `readonly`.
+ UniqueBorrowedPinned,
+
+ /// `Box<T>`, which is `noalias` (even on return types, unlike the above) but neither `readonly`
+ /// nor `dereferenceable`.
+ UniqueOwned,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct PointeeInfo {
+ pub size: Size,
+ pub align: Align,
+ pub safe: Option<PointerKind>,
+ pub address_space: AddressSpace,
+}
+
+/// Used in `might_permit_raw_init` to indicate the kind of initialisation
+/// that is checked to be valid
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum InitKind {
+ Zero,
+ Uninit,
+}
+
+/// Trait that needs to be implemented by the higher-level type representation
+/// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
+pub trait TyAbiInterface<'a, C>: Sized {
+ fn ty_and_layout_for_variant(
+ this: TyAndLayout<'a, Self>,
+ cx: &C,
+ variant_index: VariantIdx,
+ ) -> TyAndLayout<'a, Self>;
+ fn ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>;
+ fn ty_and_layout_pointee_info_at(
+ this: TyAndLayout<'a, Self>,
+ cx: &C,
+ offset: Size,
+ ) -> Option<PointeeInfo>;
+ fn is_adt(this: TyAndLayout<'a, Self>) -> bool;
+ fn is_never(this: TyAndLayout<'a, Self>) -> bool;
+ fn is_tuple(this: TyAndLayout<'a, Self>) -> bool;
+ fn is_unit(this: TyAndLayout<'a, Self>) -> bool;
+}
+
+impl<'a, Ty> TyAndLayout<'a, Ty> {
+ pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::ty_and_layout_for_variant(self, cx, variant_index)
+ }
+
+ pub fn field<C>(self, cx: &C, i: usize) -> Self
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::ty_and_layout_field(self, cx, i)
+ }
+
+ pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::ty_and_layout_pointee_info_at(self, cx, offset)
+ }
+
+ pub fn is_single_fp_element<C>(self, cx: &C) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ C: HasDataLayout,
+ {
+ match self.abi {
+ Abi::Scalar(scalar) => scalar.primitive().is_float(),
+ Abi::Aggregate { .. } => {
+ if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
+ self.field(cx, 0).is_single_fp_element(cx)
+ } else {
+ false
+ }
+ }
+ _ => false,
+ }
+ }
+
+ pub fn is_adt<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_adt(self)
+ }
+
+ pub fn is_never<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_never(self)
+ }
+
+ pub fn is_tuple<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_tuple(self)
+ }
+
+ pub fn is_unit<C>(self) -> bool
+ where
+ Ty: TyAbiInterface<'a, C>,
+ {
+ Ty::is_unit(self)
+ }
+}
+
+impl<'a, Ty> TyAndLayout<'a, Ty> {
+ /// Returns `true` if the layout corresponds to an unsized type.
+ pub fn is_unsized(&self) -> bool {
+ self.abi.is_unsized()
+ }
+
+ /// Returns `true` if the type is a ZST and not unsized.
+ pub fn is_zst(&self) -> bool {
+ match self.abi {
+ Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
+ Abi::Uninhabited => self.size.bytes() == 0,
+ Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
+ }
+ }
+
+ /// Determines if this type permits "raw" initialization by just transmuting some
+ /// memory into an instance of `T`.
+ ///
+ /// `init_kind` indicates if the memory is zero-initialized or left uninitialized.
+ ///
+ /// This code is intentionally conservative, and will not detect
+ /// * zero init of an enum whose 0 variant does not allow zero initialization
+ /// * making uninitialized types who have a full valid range (ints, floats, raw pointers)
+ /// * Any form of invalid value being made inside an array (unless the value is uninhabited)
+ ///
+ /// A strict form of these checks that uses const evaluation exists in
+ /// `rustc_const_eval::might_permit_raw_init`, and a tracking issue for making these checks
+ /// stricter is <https://github.com/rust-lang/rust/issues/66151>.
+ ///
+ /// FIXME: Once all the conservatism is removed from here, and the checks are ran by default,
+ /// we can use the const evaluation checks always instead.
+ pub fn might_permit_raw_init<C>(self, cx: &C, init_kind: InitKind) -> bool
+ where
+ Self: Copy,
+ Ty: TyAbiInterface<'a, C>,
+ C: HasDataLayout,
+ {
+ let scalar_allows_raw_init = move |s: Scalar| -> bool {
+ match init_kind {
+ InitKind::Zero => {
+ // The range must contain 0.
+ s.valid_range(cx).contains(0)
+ }
+ InitKind::Uninit => {
+ // The range must include all values.
+ s.is_always_valid(cx)
+ }
+ }
+ };
+
+ // Check the ABI.
+ let valid = match self.abi {
+ Abi::Uninhabited => false, // definitely UB
+ Abi::Scalar(s) => scalar_allows_raw_init(s),
+ Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
+ Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
+ Abi::Aggregate { .. } => true, // Fields are checked below.
+ };
+ if !valid {
+ // This is definitely not okay.
+ return false;
+ }
+
+ // If we have not found an error yet, we need to recursively descend into fields.
+ match &self.fields {
+ FieldsShape::Primitive | FieldsShape::Union { .. } => {}
+ FieldsShape::Array { .. } => {
+ // FIXME(#66151): For now, we are conservative and do not check arrays by default.
+ }
+ FieldsShape::Arbitrary { offsets, .. } => {
+ for idx in 0..offsets.len() {
+ if !self.field(cx, idx).might_permit_raw_init(cx, init_kind) {
+ // We found a field that is unhappy with this kind of initialization.
+ return false;
+ }
+ }
+ }
+ }
+
+ // FIXME(#66151): For now, we are conservative and do not check `self.variants`.
+ true
+ }
+}
diff --git a/compiler/rustc_target/src/asm/aarch64.rs b/compiler/rustc_target/src/asm/aarch64.rs
new file mode 100644
index 000000000..62a0f9fb0
--- /dev/null
+++ b/compiler/rustc_target/src/asm/aarch64.rs
@@ -0,0 +1,200 @@
+use super::{InlineAsmArch, InlineAsmType};
+use crate::spec::{RelocModel, Target};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ AArch64 AArch64InlineAsmRegClass {
+ reg,
+ vreg,
+ vreg_low16,
+ preg,
+ }
+}
+
+impl AArch64InlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ match self {
+ Self::reg => &['w', 'x'],
+ Self::vreg | Self::vreg_low16 => &['b', 'h', 's', 'd', 'q', 'v'],
+ Self::preg => &[],
+ }
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ match self {
+ Self::reg => match ty.size().bits() {
+ 64 => None,
+ _ => Some(('w', "w0")),
+ },
+ Self::vreg | Self::vreg_low16 => match ty.size().bits() {
+ 8 => Some(('b', "b0")),
+ 16 => Some(('h', "h0")),
+ 32 => Some(('s', "s0")),
+ 64 => Some(('d', "d0")),
+ 128 => Some(('q', "q0")),
+ _ => None,
+ },
+ Self::preg => None,
+ }
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ match self {
+ Self::reg => Some(('x', "x0")),
+ Self::vreg | Self::vreg_low16 => Some(('v', "v0")),
+ Self::preg => None,
+ }
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg => types! { _: I8, I16, I32, I64, F32, F64; },
+ Self::vreg | Self::vreg_low16 => types! {
+ neon: I8, I16, I32, I64, F32, F64,
+ VecI8(8), VecI16(4), VecI32(2), VecI64(1), VecF32(2), VecF64(1),
+ VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2);
+ },
+ Self::preg => &[],
+ }
+ }
+}
+
+pub fn target_reserves_x18(target: &Target) -> bool {
+ target.os == "android" || target.os == "fuchsia" || target.is_like_osx || target.is_like_windows
+}
+
+fn reserved_x18(
+ _arch: InlineAsmArch,
+ _reloc_model: RelocModel,
+ _target_features: &FxHashSet<Symbol>,
+ target: &Target,
+ _is_clobber: bool,
+) -> Result<(), &'static str> {
+ if target_reserves_x18(target) {
+ Err("x18 is a reserved register on this target")
+ } else {
+ Ok(())
+ }
+}
+
+def_regs! {
+ AArch64 AArch64InlineAsmReg AArch64InlineAsmRegClass {
+ x0: reg = ["x0", "w0"],
+ x1: reg = ["x1", "w1"],
+ x2: reg = ["x2", "w2"],
+ x3: reg = ["x3", "w3"],
+ x4: reg = ["x4", "w4"],
+ x5: reg = ["x5", "w5"],
+ x6: reg = ["x6", "w6"],
+ x7: reg = ["x7", "w7"],
+ x8: reg = ["x8", "w8"],
+ x9: reg = ["x9", "w9"],
+ x10: reg = ["x10", "w10"],
+ x11: reg = ["x11", "w11"],
+ x12: reg = ["x12", "w12"],
+ x13: reg = ["x13", "w13"],
+ x14: reg = ["x14", "w14"],
+ x15: reg = ["x15", "w15"],
+ x16: reg = ["x16", "w16"],
+ x17: reg = ["x17", "w17"],
+ x18: reg = ["x18", "w18"] % reserved_x18,
+ x20: reg = ["x20", "w20"],
+ x21: reg = ["x21", "w21"],
+ x22: reg = ["x22", "w22"],
+ x23: reg = ["x23", "w23"],
+ x24: reg = ["x24", "w24"],
+ x25: reg = ["x25", "w25"],
+ x26: reg = ["x26", "w26"],
+ x27: reg = ["x27", "w27"],
+ x28: reg = ["x28", "w28"],
+ x30: reg = ["x30", "w30", "lr", "wlr"],
+ v0: vreg, vreg_low16 = ["v0", "b0", "h0", "s0", "d0", "q0", "z0"],
+ v1: vreg, vreg_low16 = ["v1", "b1", "h1", "s1", "d1", "q1", "z1"],
+ v2: vreg, vreg_low16 = ["v2", "b2", "h2", "s2", "d2", "q2", "z2"],
+ v3: vreg, vreg_low16 = ["v3", "b3", "h3", "s3", "d3", "q3", "z3"],
+ v4: vreg, vreg_low16 = ["v4", "b4", "h4", "s4", "d4", "q4", "z4"],
+ v5: vreg, vreg_low16 = ["v5", "b5", "h5", "s5", "d5", "q5", "z5"],
+ v6: vreg, vreg_low16 = ["v6", "b6", "h6", "s6", "d6", "q6", "z6"],
+ v7: vreg, vreg_low16 = ["v7", "b7", "h7", "s7", "d7", "q7", "z7"],
+ v8: vreg, vreg_low16 = ["v8", "b8", "h8", "s8", "d8", "q8", "z8"],
+ v9: vreg, vreg_low16 = ["v9", "b9", "h9", "s9", "d9", "q9", "z9"],
+ v10: vreg, vreg_low16 = ["v10", "b10", "h10", "s10", "d10", "q10", "z10"],
+ v11: vreg, vreg_low16 = ["v11", "b11", "h11", "s11", "d11", "q11", "z11"],
+ v12: vreg, vreg_low16 = ["v12", "b12", "h12", "s12", "d12", "q12", "z12"],
+ v13: vreg, vreg_low16 = ["v13", "b13", "h13", "s13", "d13", "q13", "z13"],
+ v14: vreg, vreg_low16 = ["v14", "b14", "h14", "s14", "d14", "q14", "z14"],
+ v15: vreg, vreg_low16 = ["v15", "b15", "h15", "s15", "d15", "q15", "z15"],
+ v16: vreg = ["v16", "b16", "h16", "s16", "d16", "q16", "z16"],
+ v17: vreg = ["v17", "b17", "h17", "s17", "d17", "q17", "z17"],
+ v18: vreg = ["v18", "b18", "h18", "s18", "d18", "q18", "z18"],
+ v19: vreg = ["v19", "b19", "h19", "s19", "d19", "q19", "z19"],
+ v20: vreg = ["v20", "b20", "h20", "s20", "d20", "q20", "z20"],
+ v21: vreg = ["v21", "b21", "h21", "s21", "d21", "q21", "z21"],
+ v22: vreg = ["v22", "b22", "h22", "s22", "d22", "q22", "z22"],
+ v23: vreg = ["v23", "b23", "h23", "s23", "d23", "q23", "z23"],
+ v24: vreg = ["v24", "b24", "h24", "s24", "d24", "q24", "z24"],
+ v25: vreg = ["v25", "b25", "h25", "s25", "d25", "q25", "z25"],
+ v26: vreg = ["v26", "b26", "h26", "s26", "d26", "q26", "z26"],
+ v27: vreg = ["v27", "b27", "h27", "s27", "d27", "q27", "z27"],
+ v28: vreg = ["v28", "b28", "h28", "s28", "d28", "q28", "z28"],
+ v29: vreg = ["v29", "b29", "h29", "s29", "d29", "q29", "z29"],
+ v30: vreg = ["v30", "b30", "h30", "s30", "d30", "q30", "z30"],
+ v31: vreg = ["v31", "b31", "h31", "s31", "d31", "q31", "z31"],
+ p0: preg = ["p0"],
+ p1: preg = ["p1"],
+ p2: preg = ["p2"],
+ p3: preg = ["p3"],
+ p4: preg = ["p4"],
+ p5: preg = ["p5"],
+ p6: preg = ["p6"],
+ p7: preg = ["p7"],
+ p8: preg = ["p8"],
+ p9: preg = ["p9"],
+ p10: preg = ["p10"],
+ p11: preg = ["p11"],
+ p12: preg = ["p12"],
+ p13: preg = ["p13"],
+ p14: preg = ["p14"],
+ p15: preg = ["p15"],
+ ffr: preg = ["ffr"],
+ #error = ["x19", "w19"] =>
+ "x19 is used internally by LLVM and cannot be used as an operand for inline asm",
+ #error = ["x29", "w29", "fp", "wfp"] =>
+ "the frame pointer cannot be used as an operand for inline asm",
+ #error = ["sp", "wsp"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["xzr", "wzr"] =>
+ "the zero register cannot be used as an operand for inline asm",
+ }
+}
+
+impl AArch64InlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ modifier: Option<char>,
+ ) -> fmt::Result {
+ let (prefix, index) = if (self as u32) < Self::v0 as u32 {
+ (modifier.unwrap_or('x'), self as u32 - Self::x0 as u32)
+ } else {
+ (modifier.unwrap_or('v'), self as u32 - Self::v0 as u32)
+ };
+ assert!(index < 32);
+ write!(out, "{}{}", prefix, index)
+ }
+}
diff --git a/compiler/rustc_target/src/asm/arm.rs b/compiler/rustc_target/src/asm/arm.rs
new file mode 100644
index 000000000..0db3eb6fc
--- /dev/null
+++ b/compiler/rustc_target/src/asm/arm.rs
@@ -0,0 +1,340 @@
+use super::{InlineAsmArch, InlineAsmType};
+use crate::spec::{RelocModel, Target};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_macros::HashStable_Generic;
+use rustc_span::{sym, Symbol};
+use std::fmt;
+
+def_reg_class! {
+ Arm ArmInlineAsmRegClass {
+ reg,
+ sreg,
+ sreg_low16,
+ dreg,
+ dreg_low16,
+ dreg_low8,
+ qreg,
+ qreg_low8,
+ qreg_low4,
+ }
+}
+
+impl ArmInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ match self {
+ Self::qreg | Self::qreg_low8 | Self::qreg_low4 => &['e', 'f'],
+ _ => &[],
+ }
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg => types! { _: I8, I16, I32, F32; },
+ Self::sreg | Self::sreg_low16 => types! { vfp2: I32, F32; },
+ Self::dreg_low16 | Self::dreg_low8 => types! {
+ vfp2: I64, F64, VecI8(8), VecI16(4), VecI32(2), VecI64(1), VecF32(2);
+ },
+ Self::dreg => types! {
+ d32: I64, F64, VecI8(8), VecI16(4), VecI32(2), VecI64(1), VecF32(2);
+ },
+ Self::qreg | Self::qreg_low8 | Self::qreg_low4 => types! {
+ neon: VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4);
+ },
+ }
+ }
+}
+
+// This uses the same logic as useR7AsFramePointer in LLVM
+fn frame_pointer_is_r7(target_features: &FxHashSet<Symbol>, target: &Target) -> bool {
+ target.is_like_osx || (!target.is_like_windows && target_features.contains(&sym::thumb_mode))
+}
+
+fn frame_pointer_r11(
+ arch: InlineAsmArch,
+ reloc_model: RelocModel,
+ target_features: &FxHashSet<Symbol>,
+ target: &Target,
+ is_clobber: bool,
+) -> Result<(), &'static str> {
+ not_thumb1(arch, reloc_model, target_features, target, is_clobber)?;
+
+ if !frame_pointer_is_r7(target_features, target) {
+ Err("the frame pointer (r11) cannot be used as an operand for inline asm")
+ } else {
+ Ok(())
+ }
+}
+
+fn frame_pointer_r7(
+ _arch: InlineAsmArch,
+ _reloc_model: RelocModel,
+ target_features: &FxHashSet<Symbol>,
+ target: &Target,
+ _is_clobber: bool,
+) -> Result<(), &'static str> {
+ if frame_pointer_is_r7(target_features, target) {
+ Err("the frame pointer (r7) cannot be used as an operand for inline asm")
+ } else {
+ Ok(())
+ }
+}
+
+fn not_thumb1(
+ _arch: InlineAsmArch,
+ _reloc_model: RelocModel,
+ target_features: &FxHashSet<Symbol>,
+ _target: &Target,
+ is_clobber: bool,
+) -> Result<(), &'static str> {
+ if !is_clobber
+ && target_features.contains(&sym::thumb_mode)
+ && !target_features.contains(&sym::thumb2)
+ {
+ Err("high registers (r8+) can only be used as clobbers in Thumb-1 code")
+ } else {
+ Ok(())
+ }
+}
+
+fn reserved_r9(
+ arch: InlineAsmArch,
+ reloc_model: RelocModel,
+ target_features: &FxHashSet<Symbol>,
+ target: &Target,
+ is_clobber: bool,
+) -> Result<(), &'static str> {
+ not_thumb1(arch, reloc_model, target_features, target, is_clobber)?;
+
+ match reloc_model {
+ RelocModel::Rwpi | RelocModel::RopiRwpi => {
+ Err("the RWPI static base register (r9) cannot be used as an operand for inline asm")
+ }
+ _ => Ok(()),
+ }
+}
+
+def_regs! {
+ Arm ArmInlineAsmReg ArmInlineAsmRegClass {
+ r0: reg = ["r0", "a1"],
+ r1: reg = ["r1", "a2"],
+ r2: reg = ["r2", "a3"],
+ r3: reg = ["r3", "a4"],
+ r4: reg = ["r4", "v1"],
+ r5: reg = ["r5", "v2"],
+ r7: reg = ["r7", "v4"] % frame_pointer_r7,
+ r8: reg = ["r8", "v5"] % not_thumb1,
+ r9: reg = ["r9", "v6", "rfp"] % reserved_r9,
+ r10: reg = ["r10", "sl"] % not_thumb1,
+ r11: reg = ["r11", "fp"] % frame_pointer_r11,
+ r12: reg = ["r12", "ip"] % not_thumb1,
+ r14: reg = ["r14", "lr"] % not_thumb1,
+ s0: sreg, sreg_low16 = ["s0"],
+ s1: sreg, sreg_low16 = ["s1"],
+ s2: sreg, sreg_low16 = ["s2"],
+ s3: sreg, sreg_low16 = ["s3"],
+ s4: sreg, sreg_low16 = ["s4"],
+ s5: sreg, sreg_low16 = ["s5"],
+ s6: sreg, sreg_low16 = ["s6"],
+ s7: sreg, sreg_low16 = ["s7"],
+ s8: sreg, sreg_low16 = ["s8"],
+ s9: sreg, sreg_low16 = ["s9"],
+ s10: sreg, sreg_low16 = ["s10"],
+ s11: sreg, sreg_low16 = ["s11"],
+ s12: sreg, sreg_low16 = ["s12"],
+ s13: sreg, sreg_low16 = ["s13"],
+ s14: sreg, sreg_low16 = ["s14"],
+ s15: sreg, sreg_low16 = ["s15"],
+ s16: sreg = ["s16"],
+ s17: sreg = ["s17"],
+ s18: sreg = ["s18"],
+ s19: sreg = ["s19"],
+ s20: sreg = ["s20"],
+ s21: sreg = ["s21"],
+ s22: sreg = ["s22"],
+ s23: sreg = ["s23"],
+ s24: sreg = ["s24"],
+ s25: sreg = ["s25"],
+ s26: sreg = ["s26"],
+ s27: sreg = ["s27"],
+ s28: sreg = ["s28"],
+ s29: sreg = ["s29"],
+ s30: sreg = ["s30"],
+ s31: sreg = ["s31"],
+ d0: dreg, dreg_low16, dreg_low8 = ["d0"],
+ d1: dreg, dreg_low16, dreg_low8 = ["d1"],
+ d2: dreg, dreg_low16, dreg_low8 = ["d2"],
+ d3: dreg, dreg_low16, dreg_low8 = ["d3"],
+ d4: dreg, dreg_low16, dreg_low8 = ["d4"],
+ d5: dreg, dreg_low16, dreg_low8 = ["d5"],
+ d6: dreg, dreg_low16, dreg_low8 = ["d6"],
+ d7: dreg, dreg_low16, dreg_low8 = ["d7"],
+ d8: dreg, dreg_low16 = ["d8"],
+ d9: dreg, dreg_low16 = ["d9"],
+ d10: dreg, dreg_low16 = ["d10"],
+ d11: dreg, dreg_low16 = ["d11"],
+ d12: dreg, dreg_low16 = ["d12"],
+ d13: dreg, dreg_low16 = ["d13"],
+ d14: dreg, dreg_low16 = ["d14"],
+ d15: dreg, dreg_low16 = ["d15"],
+ d16: dreg = ["d16"],
+ d17: dreg = ["d17"],
+ d18: dreg = ["d18"],
+ d19: dreg = ["d19"],
+ d20: dreg = ["d20"],
+ d21: dreg = ["d21"],
+ d22: dreg = ["d22"],
+ d23: dreg = ["d23"],
+ d24: dreg = ["d24"],
+ d25: dreg = ["d25"],
+ d26: dreg = ["d26"],
+ d27: dreg = ["d27"],
+ d28: dreg = ["d28"],
+ d29: dreg = ["d29"],
+ d30: dreg = ["d30"],
+ d31: dreg = ["d31"],
+ q0: qreg, qreg_low8, qreg_low4 = ["q0"],
+ q1: qreg, qreg_low8, qreg_low4 = ["q1"],
+ q2: qreg, qreg_low8, qreg_low4 = ["q2"],
+ q3: qreg, qreg_low8, qreg_low4 = ["q3"],
+ q4: qreg, qreg_low8 = ["q4"],
+ q5: qreg, qreg_low8 = ["q5"],
+ q6: qreg, qreg_low8 = ["q6"],
+ q7: qreg, qreg_low8 = ["q7"],
+ q8: qreg = ["q8"],
+ q9: qreg = ["q9"],
+ q10: qreg = ["q10"],
+ q11: qreg = ["q11"],
+ q12: qreg = ["q12"],
+ q13: qreg = ["q13"],
+ q14: qreg = ["q14"],
+ q15: qreg = ["q15"],
+ #error = ["r6", "v3"] =>
+ "r6 is used internally by LLVM and cannot be used as an operand for inline asm",
+ #error = ["r13", "sp"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["r15", "pc"] =>
+ "the program pointer cannot be used as an operand for inline asm",
+ }
+}
+
+impl ArmInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ modifier: Option<char>,
+ ) -> fmt::Result {
+ // Only qreg is allowed to have modifiers. This should have been
+ // validated already by now.
+ if let Some(modifier) = modifier {
+ let index = self as u32 - Self::q0 as u32;
+ assert!(index < 16);
+ let index = index * 2 + (modifier == 'f') as u32;
+ write!(out, "d{}", index)
+ } else {
+ out.write_str(self.name())
+ }
+ }
+
+ pub fn overlapping_regs(self, mut cb: impl FnMut(ArmInlineAsmReg)) {
+ cb(self);
+
+ macro_rules! reg_conflicts {
+ (
+ $(
+ $q:ident : $d0:ident $d1:ident : $s0:ident $s1:ident $s2:ident $s3:ident
+ ),*;
+ $(
+ $q_high:ident : $d0_high:ident $d1_high:ident
+ ),*;
+ ) => {
+ match self {
+ $(
+ Self::$q => {
+ cb(Self::$d0);
+ cb(Self::$d1);
+ cb(Self::$s0);
+ cb(Self::$s1);
+ cb(Self::$s2);
+ cb(Self::$s3);
+ }
+ Self::$d0 => {
+ cb(Self::$q);
+ cb(Self::$s0);
+ cb(Self::$s1);
+ }
+ Self::$d1 => {
+ cb(Self::$q);
+ cb(Self::$s2);
+ cb(Self::$s3);
+ }
+ Self::$s0 | Self::$s1 => {
+ cb(Self::$q);
+ cb(Self::$d0);
+ }
+ Self::$s2 | Self::$s3 => {
+ cb(Self::$q);
+ cb(Self::$d1);
+ }
+ )*
+ $(
+ Self::$q_high => {
+ cb(Self::$d0_high);
+ cb(Self::$d1_high);
+ }
+ Self::$d0_high | Self::$d1_high => {
+ cb(Self::$q_high);
+ }
+ )*
+ _ => {},
+ }
+ };
+ }
+
+ // ARM's floating-point register file is interesting in that it can be
+ // viewed as 16 128-bit registers, 32 64-bit registers or 32 32-bit
+ // registers. Because these views overlap, the registers of different
+ // widths will conflict (e.g. d0 overlaps with s0 and s1, and q1
+ // overlaps with d2 and d3).
+ //
+ // See section E1.3.1 of the ARM Architecture Reference Manual for
+ // ARMv8-A for more details.
+ reg_conflicts! {
+ q0 : d0 d1 : s0 s1 s2 s3,
+ q1 : d2 d3 : s4 s5 s6 s7,
+ q2 : d4 d5 : s8 s9 s10 s11,
+ q3 : d6 d7 : s12 s13 s14 s15,
+ q4 : d8 d9 : s16 s17 s18 s19,
+ q5 : d10 d11 : s20 s21 s22 s23,
+ q6 : d12 d13 : s24 s25 s26 s27,
+ q7 : d14 d15 : s28 s29 s30 s31;
+ q8 : d16 d17,
+ q9 : d18 d19,
+ q10 : d20 d21,
+ q11 : d22 d23,
+ q12 : d24 d25,
+ q13 : d26 d27,
+ q14 : d28 d29,
+ q15 : d30 d31;
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/asm/avr.rs b/compiler/rustc_target/src/asm/avr.rs
new file mode 100644
index 000000000..9a96a61f5
--- /dev/null
+++ b/compiler/rustc_target/src/asm/avr.rs
@@ -0,0 +1,197 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ Avr AvrInlineAsmRegClass {
+ reg,
+ reg_upper,
+ reg_pair,
+ reg_iw,
+ reg_ptr,
+ }
+}
+
+impl AvrInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: InlineAsmArch) -> &'static [char] {
+ match self {
+ Self::reg_pair | Self::reg_iw | Self::reg_ptr => &['h', 'l'],
+ _ => &[],
+ }
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg => types! { _: I8; },
+ Self::reg_upper => types! { _: I8; },
+ Self::reg_pair => types! { _: I16; },
+ Self::reg_iw => types! { _: I16; },
+ Self::reg_ptr => types! { _: I16; },
+ }
+ }
+}
+
+def_regs! {
+ Avr AvrInlineAsmReg AvrInlineAsmRegClass {
+ r2: reg = ["r2"],
+ r3: reg = ["r3"],
+ r4: reg = ["r4"],
+ r5: reg = ["r5"],
+ r6: reg = ["r6"],
+ r7: reg = ["r7"],
+ r8: reg = ["r8"],
+ r9: reg = ["r9"],
+ r10: reg = ["r10"],
+ r11: reg = ["r11"],
+ r12: reg = ["r12"],
+ r13: reg = ["r13"],
+ r14: reg = ["r14"],
+ r15: reg = ["r15"],
+ r16: reg, reg_upper = ["r16"],
+ r17: reg, reg_upper = ["r17"],
+ r18: reg, reg_upper = ["r18"],
+ r19: reg, reg_upper = ["r19"],
+ r20: reg, reg_upper = ["r20"],
+ r21: reg, reg_upper = ["r21"],
+ r22: reg, reg_upper = ["r22"],
+ r23: reg, reg_upper = ["r23"],
+ r24: reg, reg_upper = ["r24"],
+ r25: reg, reg_upper = ["r25"],
+ r26: reg, reg_upper = ["r26", "XL"],
+ r27: reg, reg_upper = ["r27", "XH"],
+ r30: reg, reg_upper = ["r30", "ZL"],
+ r31: reg, reg_upper = ["r31", "ZH"],
+
+ r3r2: reg_pair = ["r3r2"],
+ r5r4: reg_pair = ["r5r4"],
+ r7r6: reg_pair = ["r7r6"],
+ r9r8: reg_pair = ["r9r8"],
+ r11r10: reg_pair = ["r11r10"],
+ r13r12: reg_pair = ["r13r12"],
+ r15r14: reg_pair = ["r15r14"],
+ r17r16: reg_pair = ["r17r16"],
+ r19r18: reg_pair = ["r19r18"],
+ r21r20: reg_pair = ["r21r20"],
+ r23r22: reg_pair = ["r23r22"],
+
+ r25r24: reg_iw, reg_pair = ["r25r24"],
+
+ X: reg_ptr, reg_iw, reg_pair = ["r27r26", "X"],
+ Z: reg_ptr, reg_iw, reg_pair = ["r31r30", "Z"],
+
+ #error = ["Y", "YL", "YH"] =>
+ "the frame pointer cannot be used as an operand for inline asm",
+ #error = ["SP", "SPL", "SPH"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["r0", "r1", "r1r0"] =>
+ "r0 and r1 are not available due to an issue in LLVM",
+ }
+}
+
+macro_rules! emit_pairs {
+ (
+ $self:ident $modifier:ident,
+ $($pair:ident $name:literal $hi:literal $lo:literal,)*
+ ) => {
+ match ($self, $modifier) {
+ $(
+ (AvrInlineAsmReg::$pair, Some('h')) => $hi,
+ (AvrInlineAsmReg::$pair, Some('l')) => $lo,
+ (AvrInlineAsmReg::$pair, _) => $name,
+ )*
+ _ => $self.name(),
+ }
+ };
+}
+
+impl AvrInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ modifier: Option<char>,
+ ) -> fmt::Result {
+ let name = emit_pairs! {
+ self modifier,
+ Z "Z" "ZH" "ZL",
+ X "X" "XH" "XL",
+ r25r24 "r25:r24" "r25" "r24",
+ r23r22 "r23:r22" "r23" "r22",
+ r21r20 "r21:r20" "r21" "r20",
+ r19r18 "r19:r18" "r19" "r18",
+ r17r16 "r17:r16" "r17" "r16",
+ r15r14 "r15:r14" "r15" "r14",
+ r13r12 "r13:r12" "r13" "r12",
+ r11r10 "r11:r10" "r11" "r10",
+ r9r8 "r9:r8" "r9" "r8",
+ r7r6 "r7:r6" "r7" "r6",
+ r5r4 "r5:r4" "r5" "r4",
+ r3r2 "r3:r2" "r3" "r2",
+ };
+ out.write_str(name)
+ }
+
+ pub fn overlapping_regs(self, mut cb: impl FnMut(AvrInlineAsmReg)) {
+ cb(self);
+
+ macro_rules! reg_conflicts {
+ (
+ $(
+ $pair:ident : $hi:ident $lo:ident,
+ )*
+ ) => {
+ match self {
+ $(
+ Self::$pair => {
+ cb(Self::$hi);
+ cb(Self::$lo);
+ }
+ Self::$hi => {
+ cb(Self::$pair);
+ }
+ Self::$lo => {
+ cb(Self::$pair);
+ }
+ )*
+ }
+ };
+ }
+
+ reg_conflicts! {
+ Z : r31 r30,
+ X : r27 r26,
+ r25r24 : r25 r24,
+ r23r22 : r23 r22,
+ r21r20 : r21 r20,
+ r19r18 : r19 r18,
+ r17r16 : r17 r16,
+ r15r14 : r15 r14,
+ r13r12 : r13 r12,
+ r11r10 : r11 r10,
+ r9r8 : r9 r8,
+ r7r6 : r7 r6,
+ r5r4 : r5 r4,
+ r3r2 : r3 r2,
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/asm/bpf.rs b/compiler/rustc_target/src/asm/bpf.rs
new file mode 100644
index 000000000..3b03766a0
--- /dev/null
+++ b/compiler/rustc_target/src/asm/bpf.rs
@@ -0,0 +1,118 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ Bpf BpfInlineAsmRegClass {
+ reg,
+ wreg,
+ }
+}
+
+impl BpfInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg => types! { _: I8, I16, I32, I64; },
+ Self::wreg => types! { alu32: I8, I16, I32; },
+ }
+ }
+}
+
+def_regs! {
+ Bpf BpfInlineAsmReg BpfInlineAsmRegClass {
+ r0: reg = ["r0"],
+ r1: reg = ["r1"],
+ r2: reg = ["r2"],
+ r3: reg = ["r3"],
+ r4: reg = ["r4"],
+ r5: reg = ["r5"],
+ r6: reg = ["r6"],
+ r7: reg = ["r7"],
+ r8: reg = ["r8"],
+ r9: reg = ["r9"],
+ w0: wreg = ["w0"],
+ w1: wreg = ["w1"],
+ w2: wreg = ["w2"],
+ w3: wreg = ["w3"],
+ w4: wreg = ["w4"],
+ w5: wreg = ["w5"],
+ w6: wreg = ["w6"],
+ w7: wreg = ["w7"],
+ w8: wreg = ["w8"],
+ w9: wreg = ["w9"],
+
+ #error = ["r10", "w10"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ }
+}
+
+impl BpfInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ _modifier: Option<char>,
+ ) -> fmt::Result {
+ out.write_str(self.name())
+ }
+
+ pub fn overlapping_regs(self, mut cb: impl FnMut(BpfInlineAsmReg)) {
+ cb(self);
+
+ macro_rules! reg_conflicts {
+ (
+ $(
+ $r:ident : $w:ident
+ ),*
+ ) => {
+ match self {
+ $(
+ Self::$r => {
+ cb(Self::$w);
+ }
+ Self::$w => {
+ cb(Self::$r);
+ }
+ )*
+ }
+ };
+ }
+
+ reg_conflicts! {
+ r0 : w0,
+ r1 : w1,
+ r2 : w2,
+ r3 : w3,
+ r4 : w4,
+ r5 : w5,
+ r6 : w6,
+ r7 : w7,
+ r8 : w8,
+ r9 : w9
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/asm/hexagon.rs b/compiler/rustc_target/src/asm/hexagon.rs
new file mode 100644
index 000000000..d20270ac9
--- /dev/null
+++ b/compiler/rustc_target/src/asm/hexagon.rs
@@ -0,0 +1,95 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ Hexagon HexagonInlineAsmRegClass {
+ reg,
+ }
+}
+
+impl HexagonInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg => types! { _: I8, I16, I32, F32; },
+ }
+ }
+}
+
+def_regs! {
+ Hexagon HexagonInlineAsmReg HexagonInlineAsmRegClass {
+ r0: reg = ["r0"],
+ r1: reg = ["r1"],
+ r2: reg = ["r2"],
+ r3: reg = ["r3"],
+ r4: reg = ["r4"],
+ r5: reg = ["r5"],
+ r6: reg = ["r6"],
+ r7: reg = ["r7"],
+ r8: reg = ["r8"],
+ r9: reg = ["r9"],
+ r10: reg = ["r10"],
+ r11: reg = ["r11"],
+ r12: reg = ["r12"],
+ r13: reg = ["r13"],
+ r14: reg = ["r14"],
+ r15: reg = ["r15"],
+ r16: reg = ["r16"],
+ r17: reg = ["r17"],
+ r18: reg = ["r18"],
+ r20: reg = ["r20"],
+ r21: reg = ["r21"],
+ r22: reg = ["r22"],
+ r23: reg = ["r23"],
+ r24: reg = ["r24"],
+ r25: reg = ["r25"],
+ r26: reg = ["r26"],
+ r27: reg = ["r27"],
+ r28: reg = ["r28"],
+ #error = ["r19"] =>
+ "r19 is used internally by LLVM and cannot be used as an operand for inline asm",
+ #error = ["r29", "sp"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["r30", "fr"] =>
+ "the frame register cannot be used as an operand for inline asm",
+ #error = ["r31", "lr"] =>
+ "the link register cannot be used as an operand for inline asm",
+ }
+}
+
+impl HexagonInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ _modifier: Option<char>,
+ ) -> fmt::Result {
+ out.write_str(self.name())
+ }
+
+ pub fn overlapping_regs(self, mut _cb: impl FnMut(HexagonInlineAsmReg)) {}
+}
diff --git a/compiler/rustc_target/src/asm/mips.rs b/compiler/rustc_target/src/asm/mips.rs
new file mode 100644
index 000000000..4e7c2eb1b
--- /dev/null
+++ b/compiler/rustc_target/src/asm/mips.rs
@@ -0,0 +1,135 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ Mips MipsInlineAsmRegClass {
+ reg,
+ freg,
+ }
+}
+
+impl MipsInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match (self, arch) {
+ (Self::reg, InlineAsmArch::Mips64) => types! { _: I8, I16, I32, I64, F32, F64; },
+ (Self::reg, _) => types! { _: I8, I16, I32, F32; },
+ (Self::freg, _) => types! { _: F32, F64; },
+ }
+ }
+}
+
+// The reserved registers are somewhat taken from
+// <https://github.com/llvm/llvm-project/blob/deb8f8bcf31540c657716ea5242183b0792702a1/llvm/lib/Target/Mips/MipsRegisterInfo.cpp#L150>.
+def_regs! {
+ Mips MipsInlineAsmReg MipsInlineAsmRegClass {
+ r2: reg = ["$2"],
+ r3: reg = ["$3"],
+ r4: reg = ["$4"],
+ r5: reg = ["$5"],
+ r6: reg = ["$6"],
+ r7: reg = ["$7"],
+ // FIXME: Reserve $t0, $t1 if in mips16 mode.
+ r8: reg = ["$8"],
+ r9: reg = ["$9"],
+ r10: reg = ["$10"],
+ r11: reg = ["$11"],
+ r12: reg = ["$12"],
+ r13: reg = ["$13"],
+ r14: reg = ["$14"],
+ r15: reg = ["$15"],
+ r16: reg = ["$16"],
+ r17: reg = ["$17"],
+ r18: reg = ["$18"],
+ r19: reg = ["$19"],
+ r20: reg = ["$20"],
+ r21: reg = ["$21"],
+ r22: reg = ["$22"],
+ r23: reg = ["$23"],
+ r24: reg = ["$24"],
+ r25: reg = ["$25"],
+ f0: freg = ["$f0"],
+ f1: freg = ["$f1"],
+ f2: freg = ["$f2"],
+ f3: freg = ["$f3"],
+ f4: freg = ["$f4"],
+ f5: freg = ["$f5"],
+ f6: freg = ["$f6"],
+ f7: freg = ["$f7"],
+ f8: freg = ["$f8"],
+ f9: freg = ["$f9"],
+ f10: freg = ["$f10"],
+ f11: freg = ["$f11"],
+ f12: freg = ["$f12"],
+ f13: freg = ["$f13"],
+ f14: freg = ["$f14"],
+ f15: freg = ["$f15"],
+ f16: freg = ["$f16"],
+ f17: freg = ["$f17"],
+ f18: freg = ["$f18"],
+ f19: freg = ["$f19"],
+ f20: freg = ["$f20"],
+ f21: freg = ["$f21"],
+ f22: freg = ["$f22"],
+ f23: freg = ["$f23"],
+ f24: freg = ["$f24"],
+ f25: freg = ["$f25"],
+ f26: freg = ["$f26"],
+ f27: freg = ["$f27"],
+ f28: freg = ["$f28"],
+ f29: freg = ["$f29"],
+ f30: freg = ["$f30"],
+ f31: freg = ["$f31"],
+ #error = ["$0"] =>
+ "constant zero cannot be used as an operand for inline asm",
+ #error = ["$1"] =>
+ "reserved for assembler (Assembler Temp)",
+ #error = ["$26"] =>
+ "OS-reserved register cannot be used as an operand for inline asm",
+ #error = ["$27"] =>
+ "OS-reserved register cannot be used as an operand for inline asm",
+ #error = ["$28"] =>
+ "the global pointer cannot be used as an operand for inline asm",
+ #error = ["$29"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["$30"] =>
+ "the frame pointer cannot be used as an operand for inline asm",
+ #error = ["$31"] =>
+ "the return address register cannot be used as an operand for inline asm",
+ }
+}
+
+impl MipsInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ _modifier: Option<char>,
+ ) -> fmt::Result {
+ out.write_str(self.name())
+ }
+}
diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs
new file mode 100644
index 000000000..65d2cd64b
--- /dev/null
+++ b/compiler/rustc_target/src/asm/mod.rs
@@ -0,0 +1,976 @@
+use crate::spec::Target;
+use crate::{abi::Size, spec::RelocModel};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+use std::str::FromStr;
+
+macro_rules! def_reg_class {
+ ($arch:ident $arch_regclass:ident {
+ $(
+ $class:ident,
+ )*
+ }) => {
+ #[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, PartialOrd, Hash, HashStable_Generic)]
+ #[allow(non_camel_case_types)]
+ pub enum $arch_regclass {
+ $($class,)*
+ }
+
+ impl $arch_regclass {
+ pub fn name(self) -> rustc_span::Symbol {
+ match self {
+ $(Self::$class => rustc_span::symbol::sym::$class,)*
+ }
+ }
+
+ pub fn parse(name: rustc_span::Symbol) -> Result<Self, &'static str> {
+ match name {
+ $(
+ rustc_span::sym::$class => Ok(Self::$class),
+ )*
+ _ => Err("unknown register class"),
+ }
+ }
+ }
+
+ pub(super) fn regclass_map() -> rustc_data_structures::fx::FxHashMap<
+ super::InlineAsmRegClass,
+ rustc_data_structures::fx::FxHashSet<super::InlineAsmReg>,
+ > {
+ use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+ use super::InlineAsmRegClass;
+ let mut map = FxHashMap::default();
+ $(
+ map.insert(InlineAsmRegClass::$arch($arch_regclass::$class), FxHashSet::default());
+ )*
+ map
+ }
+ }
+}
+
+macro_rules! def_regs {
+ ($arch:ident $arch_reg:ident $arch_regclass:ident {
+ $(
+ $reg:ident: $class:ident $(, $extra_class:ident)* = [$reg_name:literal $(, $alias:literal)*] $(% $filter:ident)?,
+ )*
+ $(
+ #error = [$($bad_reg:literal),+] => $error:literal,
+ )*
+ }) => {
+ #[allow(unreachable_code)]
+ #[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, PartialOrd, Hash, HashStable_Generic)]
+ #[allow(non_camel_case_types)]
+ pub enum $arch_reg {
+ $($reg,)*
+ }
+
+ impl $arch_reg {
+ pub fn name(self) -> &'static str {
+ match self {
+ $(Self::$reg => $reg_name,)*
+ }
+ }
+
+ pub fn reg_class(self) -> $arch_regclass {
+ match self {
+ $(Self::$reg => $arch_regclass::$class,)*
+ }
+ }
+
+ pub fn parse(name: &str) -> Result<Self, &'static str> {
+ match name {
+ $(
+ $($alias)|* | $reg_name => Ok(Self::$reg),
+ )*
+ $(
+ $($bad_reg)|* => Err($error),
+ )*
+ _ => Err("unknown register"),
+ }
+ }
+
+ pub fn validate(self,
+ _arch: super::InlineAsmArch,
+ _reloc_model: crate::spec::RelocModel,
+ _target_features: &rustc_data_structures::fx::FxHashSet<Symbol>,
+ _target: &crate::spec::Target,
+ _is_clobber: bool,
+ ) -> Result<(), &'static str> {
+ match self {
+ $(
+ Self::$reg => {
+ $($filter(
+ _arch,
+ _reloc_model,
+ _target_features,
+ _target,
+ _is_clobber
+ )?;)?
+ Ok(())
+ }
+ )*
+ }
+ }
+ }
+
+ pub(super) fn fill_reg_map(
+ _arch: super::InlineAsmArch,
+ _reloc_model: crate::spec::RelocModel,
+ _target_features: &rustc_data_structures::fx::FxHashSet<Symbol>,
+ _target: &crate::spec::Target,
+ _map: &mut rustc_data_structures::fx::FxHashMap<
+ super::InlineAsmRegClass,
+ rustc_data_structures::fx::FxHashSet<super::InlineAsmReg>,
+ >,
+ ) {
+ #[allow(unused_imports)]
+ use super::{InlineAsmReg, InlineAsmRegClass};
+ $(
+ if $($filter(_arch, _reloc_model, _target_features, _target, false).is_ok() &&)? true {
+ if let Some(set) = _map.get_mut(&InlineAsmRegClass::$arch($arch_regclass::$class)) {
+ set.insert(InlineAsmReg::$arch($arch_reg::$reg));
+ }
+ $(
+ if let Some(set) = _map.get_mut(&InlineAsmRegClass::$arch($arch_regclass::$extra_class)) {
+ set.insert(InlineAsmReg::$arch($arch_reg::$reg));
+ }
+ )*
+ }
+ )*
+ }
+ }
+}
+
+macro_rules! types {
+ (
+ $(_ : $($ty:expr),+;)?
+ $($feature:ident: $($ty2:expr),+;)*
+ ) => {
+ {
+ use super::InlineAsmType::*;
+ &[
+ $($(
+ ($ty, None),
+ )*)?
+ $($(
+ ($ty2, Some(rustc_span::sym::$feature)),
+ )*)*
+ ]
+ }
+ };
+}
+
+mod aarch64;
+mod arm;
+mod avr;
+mod bpf;
+mod hexagon;
+mod mips;
+mod msp430;
+mod nvptx;
+mod powerpc;
+mod riscv;
+mod s390x;
+mod spirv;
+mod wasm;
+mod x86;
+
+pub use aarch64::{AArch64InlineAsmReg, AArch64InlineAsmRegClass};
+pub use arm::{ArmInlineAsmReg, ArmInlineAsmRegClass};
+pub use avr::{AvrInlineAsmReg, AvrInlineAsmRegClass};
+pub use bpf::{BpfInlineAsmReg, BpfInlineAsmRegClass};
+pub use hexagon::{HexagonInlineAsmReg, HexagonInlineAsmRegClass};
+pub use mips::{MipsInlineAsmReg, MipsInlineAsmRegClass};
+pub use msp430::{Msp430InlineAsmReg, Msp430InlineAsmRegClass};
+pub use nvptx::{NvptxInlineAsmReg, NvptxInlineAsmRegClass};
+pub use powerpc::{PowerPCInlineAsmReg, PowerPCInlineAsmRegClass};
+pub use riscv::{RiscVInlineAsmReg, RiscVInlineAsmRegClass};
+pub use s390x::{S390xInlineAsmReg, S390xInlineAsmRegClass};
+pub use spirv::{SpirVInlineAsmReg, SpirVInlineAsmRegClass};
+pub use wasm::{WasmInlineAsmReg, WasmInlineAsmRegClass};
+pub use x86::{X86InlineAsmReg, X86InlineAsmRegClass};
+
+#[derive(Copy, Clone, Encodable, Decodable, Debug, Eq, PartialEq, Hash)]
+pub enum InlineAsmArch {
+ X86,
+ X86_64,
+ Arm,
+ AArch64,
+ RiscV32,
+ RiscV64,
+ Nvptx64,
+ Hexagon,
+ Mips,
+ Mips64,
+ PowerPC,
+ PowerPC64,
+ S390x,
+ SpirV,
+ Wasm32,
+ Wasm64,
+ Bpf,
+ Avr,
+ Msp430,
+}
+
+impl FromStr for InlineAsmArch {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<InlineAsmArch, ()> {
+ match s {
+ "x86" => Ok(Self::X86),
+ "x86_64" => Ok(Self::X86_64),
+ "arm" => Ok(Self::Arm),
+ "aarch64" => Ok(Self::AArch64),
+ "riscv32" => Ok(Self::RiscV32),
+ "riscv64" => Ok(Self::RiscV64),
+ "nvptx64" => Ok(Self::Nvptx64),
+ "powerpc" => Ok(Self::PowerPC),
+ "powerpc64" => Ok(Self::PowerPC64),
+ "hexagon" => Ok(Self::Hexagon),
+ "mips" => Ok(Self::Mips),
+ "mips64" => Ok(Self::Mips64),
+ "s390x" => Ok(Self::S390x),
+ "spirv" => Ok(Self::SpirV),
+ "wasm32" => Ok(Self::Wasm32),
+ "wasm64" => Ok(Self::Wasm64),
+ "bpf" => Ok(Self::Bpf),
+ "avr" => Ok(Self::Avr),
+ "msp430" => Ok(Self::Msp430),
+ _ => Err(()),
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Hash)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum InlineAsmReg {
+ X86(X86InlineAsmReg),
+ Arm(ArmInlineAsmReg),
+ AArch64(AArch64InlineAsmReg),
+ RiscV(RiscVInlineAsmReg),
+ Nvptx(NvptxInlineAsmReg),
+ PowerPC(PowerPCInlineAsmReg),
+ Hexagon(HexagonInlineAsmReg),
+ Mips(MipsInlineAsmReg),
+ S390x(S390xInlineAsmReg),
+ SpirV(SpirVInlineAsmReg),
+ Wasm(WasmInlineAsmReg),
+ Bpf(BpfInlineAsmReg),
+ Avr(AvrInlineAsmReg),
+ Msp430(Msp430InlineAsmReg),
+ // Placeholder for invalid register constraints for the current target
+ Err,
+}
+
+impl InlineAsmReg {
+ pub fn name(self) -> &'static str {
+ match self {
+ Self::X86(r) => r.name(),
+ Self::Arm(r) => r.name(),
+ Self::AArch64(r) => r.name(),
+ Self::RiscV(r) => r.name(),
+ Self::PowerPC(r) => r.name(),
+ Self::Hexagon(r) => r.name(),
+ Self::Mips(r) => r.name(),
+ Self::S390x(r) => r.name(),
+ Self::Bpf(r) => r.name(),
+ Self::Avr(r) => r.name(),
+ Self::Msp430(r) => r.name(),
+ Self::Err => "<reg>",
+ }
+ }
+
+ pub fn reg_class(self) -> InlineAsmRegClass {
+ match self {
+ Self::X86(r) => InlineAsmRegClass::X86(r.reg_class()),
+ Self::Arm(r) => InlineAsmRegClass::Arm(r.reg_class()),
+ Self::AArch64(r) => InlineAsmRegClass::AArch64(r.reg_class()),
+ Self::RiscV(r) => InlineAsmRegClass::RiscV(r.reg_class()),
+ Self::PowerPC(r) => InlineAsmRegClass::PowerPC(r.reg_class()),
+ Self::Hexagon(r) => InlineAsmRegClass::Hexagon(r.reg_class()),
+ Self::Mips(r) => InlineAsmRegClass::Mips(r.reg_class()),
+ Self::S390x(r) => InlineAsmRegClass::S390x(r.reg_class()),
+ Self::Bpf(r) => InlineAsmRegClass::Bpf(r.reg_class()),
+ Self::Avr(r) => InlineAsmRegClass::Avr(r.reg_class()),
+ Self::Msp430(r) => InlineAsmRegClass::Msp430(r.reg_class()),
+ Self::Err => InlineAsmRegClass::Err,
+ }
+ }
+
+ pub fn parse(arch: InlineAsmArch, name: Symbol) -> Result<Self, &'static str> {
+ // FIXME: use direct symbol comparison for register names
+ // Use `Symbol::as_str` instead of `Symbol::with` here because `has_feature` may access `Symbol`.
+ let name = name.as_str();
+ Ok(match arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => Self::X86(X86InlineAsmReg::parse(name)?),
+ InlineAsmArch::Arm => Self::Arm(ArmInlineAsmReg::parse(name)?),
+ InlineAsmArch::AArch64 => Self::AArch64(AArch64InlineAsmReg::parse(name)?),
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ Self::RiscV(RiscVInlineAsmReg::parse(name)?)
+ }
+ InlineAsmArch::Nvptx64 => Self::Nvptx(NvptxInlineAsmReg::parse(name)?),
+ InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {
+ Self::PowerPC(PowerPCInlineAsmReg::parse(name)?)
+ }
+ InlineAsmArch::Hexagon => Self::Hexagon(HexagonInlineAsmReg::parse(name)?),
+ InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
+ Self::Mips(MipsInlineAsmReg::parse(name)?)
+ }
+ InlineAsmArch::S390x => Self::S390x(S390xInlineAsmReg::parse(name)?),
+ InlineAsmArch::SpirV => Self::SpirV(SpirVInlineAsmReg::parse(name)?),
+ InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {
+ Self::Wasm(WasmInlineAsmReg::parse(name)?)
+ }
+ InlineAsmArch::Bpf => Self::Bpf(BpfInlineAsmReg::parse(name)?),
+ InlineAsmArch::Avr => Self::Avr(AvrInlineAsmReg::parse(name)?),
+ InlineAsmArch::Msp430 => Self::Msp430(Msp430InlineAsmReg::parse(name)?),
+ })
+ }
+
+ pub fn validate(
+ self,
+ arch: InlineAsmArch,
+ reloc_model: RelocModel,
+ target_features: &FxHashSet<Symbol>,
+ target: &Target,
+ is_clobber: bool,
+ ) -> Result<(), &'static str> {
+ match self {
+ Self::X86(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::Arm(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::AArch64(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::RiscV(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::PowerPC(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::Hexagon(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::Mips(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::S390x(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::Bpf(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::Avr(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::Msp430(r) => r.validate(arch, reloc_model, target_features, target, is_clobber),
+ Self::Err => unreachable!(),
+ }
+ }
+
+ // NOTE: This function isn't used at the moment, but is needed to support
+ // falling back to an external assembler.
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ arch: InlineAsmArch,
+ modifier: Option<char>,
+ ) -> fmt::Result {
+ match self {
+ Self::X86(r) => r.emit(out, arch, modifier),
+ Self::Arm(r) => r.emit(out, arch, modifier),
+ Self::AArch64(r) => r.emit(out, arch, modifier),
+ Self::RiscV(r) => r.emit(out, arch, modifier),
+ Self::PowerPC(r) => r.emit(out, arch, modifier),
+ Self::Hexagon(r) => r.emit(out, arch, modifier),
+ Self::Mips(r) => r.emit(out, arch, modifier),
+ Self::S390x(r) => r.emit(out, arch, modifier),
+ Self::Bpf(r) => r.emit(out, arch, modifier),
+ Self::Avr(r) => r.emit(out, arch, modifier),
+ Self::Msp430(r) => r.emit(out, arch, modifier),
+ Self::Err => unreachable!("Use of InlineAsmReg::Err"),
+ }
+ }
+
+ pub fn overlapping_regs(self, mut cb: impl FnMut(InlineAsmReg)) {
+ match self {
+ Self::X86(r) => r.overlapping_regs(|r| cb(Self::X86(r))),
+ Self::Arm(r) => r.overlapping_regs(|r| cb(Self::Arm(r))),
+ Self::AArch64(_) => cb(self),
+ Self::RiscV(_) => cb(self),
+ Self::PowerPC(r) => r.overlapping_regs(|r| cb(Self::PowerPC(r))),
+ Self::Hexagon(r) => r.overlapping_regs(|r| cb(Self::Hexagon(r))),
+ Self::Mips(_) => cb(self),
+ Self::S390x(_) => cb(self),
+ Self::Bpf(r) => r.overlapping_regs(|r| cb(Self::Bpf(r))),
+ Self::Avr(r) => r.overlapping_regs(|r| cb(Self::Avr(r))),
+ Self::Msp430(_) => cb(self),
+ Self::Err => unreachable!("Use of InlineAsmReg::Err"),
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Hash)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum InlineAsmRegClass {
+ X86(X86InlineAsmRegClass),
+ Arm(ArmInlineAsmRegClass),
+ AArch64(AArch64InlineAsmRegClass),
+ RiscV(RiscVInlineAsmRegClass),
+ Nvptx(NvptxInlineAsmRegClass),
+ PowerPC(PowerPCInlineAsmRegClass),
+ Hexagon(HexagonInlineAsmRegClass),
+ Mips(MipsInlineAsmRegClass),
+ S390x(S390xInlineAsmRegClass),
+ SpirV(SpirVInlineAsmRegClass),
+ Wasm(WasmInlineAsmRegClass),
+ Bpf(BpfInlineAsmRegClass),
+ Avr(AvrInlineAsmRegClass),
+ Msp430(Msp430InlineAsmRegClass),
+ // Placeholder for invalid register constraints for the current target
+ Err,
+}
+
+impl InlineAsmRegClass {
+ pub fn name(self) -> Symbol {
+ match self {
+ Self::X86(r) => r.name(),
+ Self::Arm(r) => r.name(),
+ Self::AArch64(r) => r.name(),
+ Self::RiscV(r) => r.name(),
+ Self::Nvptx(r) => r.name(),
+ Self::PowerPC(r) => r.name(),
+ Self::Hexagon(r) => r.name(),
+ Self::Mips(r) => r.name(),
+ Self::S390x(r) => r.name(),
+ Self::SpirV(r) => r.name(),
+ Self::Wasm(r) => r.name(),
+ Self::Bpf(r) => r.name(),
+ Self::Avr(r) => r.name(),
+ Self::Msp430(r) => r.name(),
+ Self::Err => rustc_span::symbol::sym::reg,
+ }
+ }
+
+ /// Returns a suggested register class to use for this type. This is called
+ /// when `supported_types` fails to give a better error
+ /// message to the user.
+ pub fn suggest_class(self, arch: InlineAsmArch, ty: InlineAsmType) -> Option<Self> {
+ match self {
+ Self::X86(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::X86),
+ Self::Arm(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Arm),
+ Self::AArch64(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::AArch64),
+ Self::RiscV(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::RiscV),
+ Self::Nvptx(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Nvptx),
+ Self::PowerPC(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::PowerPC),
+ Self::Hexagon(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Hexagon),
+ Self::Mips(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Mips),
+ Self::S390x(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::S390x),
+ Self::SpirV(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::SpirV),
+ Self::Wasm(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Wasm),
+ Self::Bpf(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Bpf),
+ Self::Avr(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Avr),
+ Self::Msp430(r) => r.suggest_class(arch, ty).map(InlineAsmRegClass::Msp430),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
+ }
+ }
+
+ /// Returns a suggested template modifier to use for this type and an
+ /// example of a register named formatted with it.
+ ///
+ /// Such suggestions are useful if a type smaller than the full register
+ /// size is used and a modifier can be used to point to the subregister of
+ /// the correct size.
+ pub fn suggest_modifier(
+ self,
+ arch: InlineAsmArch,
+ ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ match self {
+ Self::X86(r) => r.suggest_modifier(arch, ty),
+ Self::Arm(r) => r.suggest_modifier(arch, ty),
+ Self::AArch64(r) => r.suggest_modifier(arch, ty),
+ Self::RiscV(r) => r.suggest_modifier(arch, ty),
+ Self::Nvptx(r) => r.suggest_modifier(arch, ty),
+ Self::PowerPC(r) => r.suggest_modifier(arch, ty),
+ Self::Hexagon(r) => r.suggest_modifier(arch, ty),
+ Self::Mips(r) => r.suggest_modifier(arch, ty),
+ Self::S390x(r) => r.suggest_modifier(arch, ty),
+ Self::SpirV(r) => r.suggest_modifier(arch, ty),
+ Self::Wasm(r) => r.suggest_modifier(arch, ty),
+ Self::Bpf(r) => r.suggest_modifier(arch, ty),
+ Self::Avr(r) => r.suggest_modifier(arch, ty),
+ Self::Msp430(r) => r.suggest_modifier(arch, ty),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
+ }
+ }
+
+ /// Returns the default modifier for this register and an example of a
+ /// register named formatted with it.
+ ///
+ /// This is only needed when the register class can suggest a modifier, so
+ /// that the user can be shown how to get the default behavior without a
+ /// warning.
+ pub fn default_modifier(self, arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ match self {
+ Self::X86(r) => r.default_modifier(arch),
+ Self::Arm(r) => r.default_modifier(arch),
+ Self::AArch64(r) => r.default_modifier(arch),
+ Self::RiscV(r) => r.default_modifier(arch),
+ Self::Nvptx(r) => r.default_modifier(arch),
+ Self::PowerPC(r) => r.default_modifier(arch),
+ Self::Hexagon(r) => r.default_modifier(arch),
+ Self::Mips(r) => r.default_modifier(arch),
+ Self::S390x(r) => r.default_modifier(arch),
+ Self::SpirV(r) => r.default_modifier(arch),
+ Self::Wasm(r) => r.default_modifier(arch),
+ Self::Bpf(r) => r.default_modifier(arch),
+ Self::Avr(r) => r.default_modifier(arch),
+ Self::Msp430(r) => r.default_modifier(arch),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
+ }
+ }
+
+ /// Returns a list of supported types for this register class, each with an
+ /// options target feature required to use this type.
+ pub fn supported_types(
+ self,
+ arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::X86(r) => r.supported_types(arch),
+ Self::Arm(r) => r.supported_types(arch),
+ Self::AArch64(r) => r.supported_types(arch),
+ Self::RiscV(r) => r.supported_types(arch),
+ Self::Nvptx(r) => r.supported_types(arch),
+ Self::PowerPC(r) => r.supported_types(arch),
+ Self::Hexagon(r) => r.supported_types(arch),
+ Self::Mips(r) => r.supported_types(arch),
+ Self::S390x(r) => r.supported_types(arch),
+ Self::SpirV(r) => r.supported_types(arch),
+ Self::Wasm(r) => r.supported_types(arch),
+ Self::Bpf(r) => r.supported_types(arch),
+ Self::Avr(r) => r.supported_types(arch),
+ Self::Msp430(r) => r.supported_types(arch),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
+ }
+ }
+
+ pub fn parse(arch: InlineAsmArch, name: Symbol) -> Result<Self, &'static str> {
+ Ok(match arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ Self::X86(X86InlineAsmRegClass::parse(name)?)
+ }
+ InlineAsmArch::Arm => Self::Arm(ArmInlineAsmRegClass::parse(name)?),
+ InlineAsmArch::AArch64 => Self::AArch64(AArch64InlineAsmRegClass::parse(name)?),
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ Self::RiscV(RiscVInlineAsmRegClass::parse(name)?)
+ }
+ InlineAsmArch::Nvptx64 => Self::Nvptx(NvptxInlineAsmRegClass::parse(name)?),
+ InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {
+ Self::PowerPC(PowerPCInlineAsmRegClass::parse(name)?)
+ }
+ InlineAsmArch::Hexagon => Self::Hexagon(HexagonInlineAsmRegClass::parse(name)?),
+ InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
+ Self::Mips(MipsInlineAsmRegClass::parse(name)?)
+ }
+ InlineAsmArch::S390x => Self::S390x(S390xInlineAsmRegClass::parse(name)?),
+ InlineAsmArch::SpirV => Self::SpirV(SpirVInlineAsmRegClass::parse(name)?),
+ InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {
+ Self::Wasm(WasmInlineAsmRegClass::parse(name)?)
+ }
+ InlineAsmArch::Bpf => Self::Bpf(BpfInlineAsmRegClass::parse(name)?),
+ InlineAsmArch::Avr => Self::Avr(AvrInlineAsmRegClass::parse(name)?),
+ InlineAsmArch::Msp430 => Self::Msp430(Msp430InlineAsmRegClass::parse(name)?),
+ })
+ }
+
+ /// Returns the list of template modifiers that can be used with this
+ /// register class.
+ pub fn valid_modifiers(self, arch: InlineAsmArch) -> &'static [char] {
+ match self {
+ Self::X86(r) => r.valid_modifiers(arch),
+ Self::Arm(r) => r.valid_modifiers(arch),
+ Self::AArch64(r) => r.valid_modifiers(arch),
+ Self::RiscV(r) => r.valid_modifiers(arch),
+ Self::Nvptx(r) => r.valid_modifiers(arch),
+ Self::PowerPC(r) => r.valid_modifiers(arch),
+ Self::Hexagon(r) => r.valid_modifiers(arch),
+ Self::Mips(r) => r.valid_modifiers(arch),
+ Self::S390x(r) => r.valid_modifiers(arch),
+ Self::SpirV(r) => r.valid_modifiers(arch),
+ Self::Wasm(r) => r.valid_modifiers(arch),
+ Self::Bpf(r) => r.valid_modifiers(arch),
+ Self::Avr(r) => r.valid_modifiers(arch),
+ Self::Msp430(r) => r.valid_modifiers(arch),
+ Self::Err => unreachable!("Use of InlineAsmRegClass::Err"),
+ }
+ }
+
+ /// Returns whether registers in this class can only be used as clobbers
+ /// and not as inputs/outputs.
+ pub fn is_clobber_only(self, arch: InlineAsmArch) -> bool {
+ self.supported_types(arch).is_empty()
+ }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Hash)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum InlineAsmRegOrRegClass {
+ Reg(InlineAsmReg),
+ RegClass(InlineAsmRegClass),
+}
+
+impl InlineAsmRegOrRegClass {
+ pub fn reg_class(self) -> InlineAsmRegClass {
+ match self {
+ Self::Reg(r) => r.reg_class(),
+ Self::RegClass(r) => r,
+ }
+ }
+}
+
+impl fmt::Display for InlineAsmRegOrRegClass {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Reg(r) => write!(f, "\"{}\"", r.name()),
+ Self::RegClass(r) => write!(f, "{}", r.name()),
+ }
+ }
+}
+
+/// Set of types which can be used with a particular register class.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub enum InlineAsmType {
+ I8,
+ I16,
+ I32,
+ I64,
+ I128,
+ F32,
+ F64,
+ VecI8(u64),
+ VecI16(u64),
+ VecI32(u64),
+ VecI64(u64),
+ VecI128(u64),
+ VecF32(u64),
+ VecF64(u64),
+}
+
+impl InlineAsmType {
+ pub fn is_integer(self) -> bool {
+ matches!(self, Self::I8 | Self::I16 | Self::I32 | Self::I64 | Self::I128)
+ }
+
+ pub fn size(self) -> Size {
+ Size::from_bytes(match self {
+ Self::I8 => 1,
+ Self::I16 => 2,
+ Self::I32 => 4,
+ Self::I64 => 8,
+ Self::I128 => 16,
+ Self::F32 => 4,
+ Self::F64 => 8,
+ Self::VecI8(n) => n * 1,
+ Self::VecI16(n) => n * 2,
+ Self::VecI32(n) => n * 4,
+ Self::VecI64(n) => n * 8,
+ Self::VecI128(n) => n * 16,
+ Self::VecF32(n) => n * 4,
+ Self::VecF64(n) => n * 8,
+ })
+ }
+}
+
+impl fmt::Display for InlineAsmType {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Self::I8 => f.write_str("i8"),
+ Self::I16 => f.write_str("i16"),
+ Self::I32 => f.write_str("i32"),
+ Self::I64 => f.write_str("i64"),
+ Self::I128 => f.write_str("i128"),
+ Self::F32 => f.write_str("f32"),
+ Self::F64 => f.write_str("f64"),
+ Self::VecI8(n) => write!(f, "i8x{}", n),
+ Self::VecI16(n) => write!(f, "i16x{}", n),
+ Self::VecI32(n) => write!(f, "i32x{}", n),
+ Self::VecI64(n) => write!(f, "i64x{}", n),
+ Self::VecI128(n) => write!(f, "i128x{}", n),
+ Self::VecF32(n) => write!(f, "f32x{}", n),
+ Self::VecF64(n) => write!(f, "f64x{}", n),
+ }
+ }
+}
+
+/// Returns the full set of allocatable registers for a given architecture.
+///
+/// The registers are structured as a map containing the set of allocatable
+/// registers in each register class. A particular register may be allocatable
+/// from multiple register classes, in which case it will appear multiple times
+/// in the map.
+// NOTE: This function isn't used at the moment, but is needed to support
+// falling back to an external assembler.
+pub fn allocatable_registers(
+ arch: InlineAsmArch,
+ reloc_model: RelocModel,
+ target_features: &FxHashSet<Symbol>,
+ target: &crate::spec::Target,
+) -> FxHashMap<InlineAsmRegClass, FxHashSet<InlineAsmReg>> {
+ match arch {
+ InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
+ let mut map = x86::regclass_map();
+ x86::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::Arm => {
+ let mut map = arm::regclass_map();
+ arm::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::AArch64 => {
+ let mut map = aarch64::regclass_map();
+ aarch64::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
+ let mut map = riscv::regclass_map();
+ riscv::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::Nvptx64 => {
+ let mut map = nvptx::regclass_map();
+ nvptx::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {
+ let mut map = powerpc::regclass_map();
+ powerpc::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::Hexagon => {
+ let mut map = hexagon::regclass_map();
+ hexagon::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::Mips | InlineAsmArch::Mips64 => {
+ let mut map = mips::regclass_map();
+ mips::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::S390x => {
+ let mut map = s390x::regclass_map();
+ s390x::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::SpirV => {
+ let mut map = spirv::regclass_map();
+ spirv::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::Wasm32 | InlineAsmArch::Wasm64 => {
+ let mut map = wasm::regclass_map();
+ wasm::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::Bpf => {
+ let mut map = bpf::regclass_map();
+ bpf::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::Avr => {
+ let mut map = avr::regclass_map();
+ avr::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ InlineAsmArch::Msp430 => {
+ let mut map = msp430::regclass_map();
+ msp430::fill_reg_map(arch, reloc_model, target_features, target, &mut map);
+ map
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Hash)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum InlineAsmClobberAbi {
+ X86,
+ X86_64Win,
+ X86_64SysV,
+ Arm,
+ AArch64,
+ AArch64NoX18,
+ RiscV,
+}
+
+impl InlineAsmClobberAbi {
+ /// Parses a clobber ABI for the given target, or returns a list of supported
+ /// clobber ABIs for the target.
+ pub fn parse(
+ arch: InlineAsmArch,
+ target: &Target,
+ name: Symbol,
+ ) -> Result<Self, &'static [&'static str]> {
+ let name = name.as_str();
+ match arch {
+ InlineAsmArch::X86 => match name {
+ "C" | "system" | "efiapi" | "cdecl" | "stdcall" | "fastcall" => {
+ Ok(InlineAsmClobberAbi::X86)
+ }
+ _ => Err(&["C", "system", "efiapi", "cdecl", "stdcall", "fastcall"]),
+ },
+ InlineAsmArch::X86_64 => match name {
+ "C" | "system" if !target.is_like_windows => Ok(InlineAsmClobberAbi::X86_64SysV),
+ "C" | "system" if target.is_like_windows => Ok(InlineAsmClobberAbi::X86_64Win),
+ "win64" | "efiapi" => Ok(InlineAsmClobberAbi::X86_64Win),
+ "sysv64" => Ok(InlineAsmClobberAbi::X86_64SysV),
+ _ => Err(&["C", "system", "efiapi", "win64", "sysv64"]),
+ },
+ InlineAsmArch::Arm => match name {
+ "C" | "system" | "efiapi" | "aapcs" => Ok(InlineAsmClobberAbi::Arm),
+ _ => Err(&["C", "system", "efiapi", "aapcs"]),
+ },
+ InlineAsmArch::AArch64 => match name {
+ "C" | "system" | "efiapi" => Ok(if aarch64::target_reserves_x18(target) {
+ InlineAsmClobberAbi::AArch64NoX18
+ } else {
+ InlineAsmClobberAbi::AArch64
+ }),
+ _ => Err(&["C", "system", "efiapi"]),
+ },
+ InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => match name {
+ "C" | "system" | "efiapi" => Ok(InlineAsmClobberAbi::RiscV),
+ _ => Err(&["C", "system", "efiapi"]),
+ },
+ _ => Err(&[]),
+ }
+ }
+
+ /// Returns the set of registers which are clobbered by this ABI.
+ pub fn clobbered_regs(self) -> &'static [InlineAsmReg] {
+ macro_rules! clobbered_regs {
+ ($arch:ident $arch_reg:ident {
+ $(
+ $reg:ident,
+ )*
+ }) => {
+ &[
+ $(InlineAsmReg::$arch($arch_reg::$reg),)*
+ ]
+ };
+ }
+ match self {
+ InlineAsmClobberAbi::X86 => clobbered_regs! {
+ X86 X86InlineAsmReg {
+ ax, cx, dx,
+
+ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
+
+ k0, k1, k2, k3, k4, k5, k6, k7,
+
+ mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7,
+ st0, st1, st2, st3, st4, st5, st6, st7,
+ }
+ },
+ InlineAsmClobberAbi::X86_64SysV => clobbered_regs! {
+ X86 X86InlineAsmReg {
+ ax, cx, dx, si, di, r8, r9, r10, r11,
+
+ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
+ xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
+ zmm16, zmm17, zmm18, zmm19, zmm20, zmm21, zmm22, zmm23,
+ zmm24, zmm25, zmm26, zmm27, zmm28, zmm29, zmm30, zmm31,
+
+ k0, k1, k2, k3, k4, k5, k6, k7,
+
+ mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7,
+ st0, st1, st2, st3, st4, st5, st6, st7,
+ tmm0, tmm1, tmm2, tmm3, tmm4, tmm5, tmm6, tmm7,
+ }
+ },
+ InlineAsmClobberAbi::X86_64Win => clobbered_regs! {
+ X86 X86InlineAsmReg {
+ // rdi and rsi are callee-saved on windows
+ ax, cx, dx, r8, r9, r10, r11,
+
+ // xmm6-xmm15 are callee-saved on windows, but we need to
+ // mark them as clobbered anyways because the upper portions
+ // of ymm6-ymm15 are volatile.
+ xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7,
+ xmm8, xmm9, xmm10, xmm11, xmm12, xmm13, xmm14, xmm15,
+ zmm16, zmm17, zmm18, zmm19, zmm20, zmm21, zmm22, zmm23,
+ zmm24, zmm25, zmm26, zmm27, zmm28, zmm29, zmm30, zmm31,
+
+ k0, k1, k2, k3, k4, k5, k6, k7,
+
+ mm0, mm1, mm2, mm3, mm4, mm5, mm6, mm7,
+ st0, st1, st2, st3, st4, st5, st6, st7,
+ tmm0, tmm1, tmm2, tmm3, tmm4, tmm5, tmm6, tmm7,
+ }
+ },
+ InlineAsmClobberAbi::AArch64 => clobbered_regs! {
+ AArch64 AArch64InlineAsmReg {
+ x0, x1, x2, x3, x4, x5, x6, x7,
+ x8, x9, x10, x11, x12, x13, x14, x15,
+ x16, x17, x18, x30,
+
+ // Technically the low 64 bits of v8-v15 are preserved, but
+ // we have no way of expressing this using clobbers.
+ v0, v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31,
+
+ p0, p1, p2, p3, p4, p5, p6, p7,
+ p8, p9, p10, p11, p12, p13, p14, p15,
+ ffr,
+
+ }
+ },
+ InlineAsmClobberAbi::AArch64NoX18 => clobbered_regs! {
+ AArch64 AArch64InlineAsmReg {
+ x0, x1, x2, x3, x4, x5, x6, x7,
+ x8, x9, x10, x11, x12, x13, x14, x15,
+ x16, x17, x30,
+
+ // Technically the low 64 bits of v8-v15 are preserved, but
+ // we have no way of expressing this using clobbers.
+ v0, v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31,
+
+ p0, p1, p2, p3, p4, p5, p6, p7,
+ p8, p9, p10, p11, p12, p13, p14, p15,
+ ffr,
+
+ }
+ },
+ InlineAsmClobberAbi::Arm => clobbered_regs! {
+ Arm ArmInlineAsmReg {
+ // r9 is either platform-reserved or callee-saved. Either
+ // way we don't need to clobber it.
+ r0, r1, r2, r3, r12, r14,
+
+ // The finest-grained register variant is used here so that
+ // partial uses of larger registers are properly handled.
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ s8, s9, s10, s11, s12, s13, s14, s15,
+ // s16-s31 are callee-saved
+ d16, d17, d18, d19, d20, d21, d22, d23,
+ d24, d25, d26, d27, d28, d29, d30, d31,
+ }
+ },
+ InlineAsmClobberAbi::RiscV => clobbered_regs! {
+ RiscV RiscVInlineAsmReg {
+ // ra
+ x1,
+ // t0-t2
+ x5, x6, x7,
+ // a0-a7
+ x10, x11, x12, x13, x14, x15, x16, x17,
+ // t3-t6
+ x28, x29, x30, x31,
+ // ft0-ft7
+ f0, f1, f2, f3, f4, f5, f6, f7,
+ // fa0-fa7
+ f10, f11, f12, f13, f14, f15, f16, f17,
+ // ft8-ft11
+ f28, f29, f30, f31,
+
+ v0, v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31,
+ }
+ },
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/asm/msp430.rs b/compiler/rustc_target/src/asm/msp430.rs
new file mode 100644
index 000000000..a27d6390a
--- /dev/null
+++ b/compiler/rustc_target/src/asm/msp430.rs
@@ -0,0 +1,81 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ Msp430 Msp430InlineAsmRegClass {
+ reg,
+ }
+}
+
+impl Msp430InlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match (self, arch) {
+ (Self::reg, _) => types! { _: I8, I16; },
+ }
+ }
+}
+
+// The reserved registers are taken from:
+// https://github.com/llvm/llvm-project/blob/36cb29cbbe1b22dcd298ad65e1fabe899b7d7249/llvm/lib/Target/MSP430/MSP430RegisterInfo.cpp#L73.
+def_regs! {
+ Msp430 Msp430InlineAsmReg Msp430InlineAsmRegClass {
+ r5: reg = ["r5"],
+ r6: reg = ["r6"],
+ r7: reg = ["r7"],
+ r8: reg = ["r8"],
+ r9: reg = ["r9"],
+ r10: reg = ["r10"],
+ r11: reg = ["r11"],
+ r12: reg = ["r12"],
+ r13: reg = ["r13"],
+ r14: reg = ["r14"],
+ r15: reg = ["r15"],
+
+ #error = ["r0", "pc"] =>
+ "the program counter cannot be used as an operand for inline asm",
+ #error = ["r1", "sp"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["r2", "sr"] =>
+ "the status register cannot be used as an operand for inline asm",
+ #error = ["r3", "cg"] =>
+ "the constant generator cannot be used as an operand for inline asm",
+ #error = ["r4", "fp"] =>
+ "the frame pointer cannot be used as an operand for inline asm",
+ }
+}
+
+impl Msp430InlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ _modifier: Option<char>,
+ ) -> fmt::Result {
+ out.write_str(self.name())
+ }
+}
diff --git a/compiler/rustc_target/src/asm/nvptx.rs b/compiler/rustc_target/src/asm/nvptx.rs
new file mode 100644
index 000000000..8e1e91e7c
--- /dev/null
+++ b/compiler/rustc_target/src/asm/nvptx.rs
@@ -0,0 +1,50 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+
+def_reg_class! {
+ Nvptx NvptxInlineAsmRegClass {
+ reg16,
+ reg32,
+ reg64,
+ }
+}
+
+impl NvptxInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg16 => types! { _: I8, I16; },
+ Self::reg32 => types! { _: I8, I16, I32, F32; },
+ Self::reg64 => types! { _: I8, I16, I32, F32, I64, F64; },
+ }
+ }
+}
+
+def_regs! {
+ // Registers in PTX are declared in the assembly.
+ // There are no predefined registers that one can use.
+ Nvptx NvptxInlineAsmReg NvptxInlineAsmRegClass {}
+}
diff --git a/compiler/rustc_target/src/asm/powerpc.rs b/compiler/rustc_target/src/asm/powerpc.rs
new file mode 100644
index 000000000..d3ccb3035
--- /dev/null
+++ b/compiler/rustc_target/src/asm/powerpc.rs
@@ -0,0 +1,204 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ PowerPC PowerPCInlineAsmRegClass {
+ reg,
+ reg_nonzero,
+ freg,
+ cr,
+ xer,
+ }
+}
+
+impl PowerPCInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg | Self::reg_nonzero => {
+ if arch == InlineAsmArch::PowerPC {
+ types! { _: I8, I16, I32; }
+ } else {
+ types! { _: I8, I16, I32, I64; }
+ }
+ }
+ Self::freg => types! { _: F32, F64; },
+ Self::cr | Self::xer => &[],
+ }
+ }
+}
+
+def_regs! {
+ PowerPC PowerPCInlineAsmReg PowerPCInlineAsmRegClass {
+ r0: reg = ["r0", "0"],
+ r3: reg, reg_nonzero = ["r3", "3"],
+ r4: reg, reg_nonzero = ["r4", "4"],
+ r5: reg, reg_nonzero = ["r5", "5"],
+ r6: reg, reg_nonzero = ["r6", "6"],
+ r7: reg, reg_nonzero = ["r7", "7"],
+ r8: reg, reg_nonzero = ["r8", "8"],
+ r9: reg, reg_nonzero = ["r9", "9"],
+ r10: reg, reg_nonzero = ["r10", "10"],
+ r11: reg, reg_nonzero = ["r11", "11"],
+ r12: reg, reg_nonzero = ["r12", "12"],
+ r14: reg, reg_nonzero = ["r14", "14"],
+ r15: reg, reg_nonzero = ["r15", "15"],
+ r16: reg, reg_nonzero = ["r16", "16"],
+ r17: reg, reg_nonzero = ["r17", "17"],
+ r18: reg, reg_nonzero = ["r18", "18"],
+ r19: reg, reg_nonzero = ["r19", "19"],
+ r20: reg, reg_nonzero = ["r20", "20"],
+ r21: reg, reg_nonzero = ["r21", "21"],
+ r22: reg, reg_nonzero = ["r22", "22"],
+ r23: reg, reg_nonzero = ["r23", "23"],
+ r24: reg, reg_nonzero = ["r24", "24"],
+ r25: reg, reg_nonzero = ["r25", "25"],
+ r26: reg, reg_nonzero = ["r26", "26"],
+ r27: reg, reg_nonzero = ["r27", "27"],
+ r28: reg, reg_nonzero = ["r28", "28"],
+ f0: freg = ["f0", "fr0"],
+ f1: freg = ["f1", "fr1"],
+ f2: freg = ["f2", "fr2"],
+ f3: freg = ["f3", "fr3"],
+ f4: freg = ["f4", "fr4"],
+ f5: freg = ["f5", "fr5"],
+ f6: freg = ["f6", "fr6"],
+ f7: freg = ["f7", "fr7"],
+ f8: freg = ["f8", "fr8"],
+ f9: freg = ["f9", "fr9"],
+ f10: freg = ["f10", "fr10"],
+ f11: freg = ["f11", "fr11"],
+ f12: freg = ["f12", "fr12"],
+ f13: freg = ["f13", "fr13"],
+ f14: freg = ["f14", "fr14"],
+ f15: freg = ["f15", "fr15"],
+ f16: freg = ["f16", "fr16"],
+ f17: freg = ["f17", "fr17"],
+ f18: freg = ["f18", "fr18"],
+ f19: freg = ["f19", "fr19"],
+ f20: freg = ["f20", "fr20"],
+ f21: freg = ["f21", "fr21"],
+ f22: freg = ["f22", "fr22"],
+ f23: freg = ["f23", "fr23"],
+ f24: freg = ["f24", "fr24"],
+ f25: freg = ["f25", "fr25"],
+ f26: freg = ["f26", "fr26"],
+ f27: freg = ["f27", "fr27"],
+ f28: freg = ["f28", "fr28"],
+ f29: freg = ["f29", "fr29"],
+ f30: freg = ["f30", "fr30"],
+ f31: freg = ["f31", "fr31"],
+ cr: cr = ["cr"],
+ cr0: cr = ["cr0"],
+ cr1: cr = ["cr1"],
+ cr2: cr = ["cr2"],
+ cr3: cr = ["cr3"],
+ cr4: cr = ["cr4"],
+ cr5: cr = ["cr5"],
+ cr6: cr = ["cr6"],
+ cr7: cr = ["cr7"],
+ xer: xer = ["xer"],
+ #error = ["r1", "1", "sp"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["r2", "2"] =>
+ "r2 is a system reserved register and cannot be used as an operand for inline asm",
+ #error = ["r13", "13"] =>
+ "r13 is a system reserved register and cannot be used as an operand for inline asm",
+ #error = ["r29", "29"] =>
+ "r29 is used internally by LLVM and cannot be used as an operand for inline asm",
+ #error = ["r30", "30"] =>
+ "r30 is used internally by LLVM and cannot be used as an operand for inline asm",
+ #error = ["r31", "31", "fp"] =>
+ "the frame pointer cannot be used as an operand for inline asm",
+ #error = ["lr"] =>
+ "the link register cannot be used as an operand for inline asm",
+ #error = ["ctr"] =>
+ "the counter register cannot be used as an operand for inline asm",
+ #error = ["vrsave"] =>
+ "the vrsave register cannot be used as an operand for inline asm",
+ }
+}
+
+impl PowerPCInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ _modifier: Option<char>,
+ ) -> fmt::Result {
+ macro_rules! do_emit {
+ (
+ $($(($reg:ident, $value:literal)),*;)*
+ ) => {
+ out.write_str(match self {
+ $($(Self::$reg => $value,)*)*
+ })
+ };
+ }
+ // Strip off the leading prefix.
+ do_emit! {
+ (r0, "0"), (r3, "3"), (r4, "4"), (r5, "5"), (r6, "6"), (r7, "7");
+ (r8, "8"), (r9, "9"), (r10, "10"), (r11, "11"), (r12, "12"), (r14, "14"), (r15, "15");
+ (r16, "16"), (r17, "17"), (r18, "18"), (r19, "19"), (r20, "20"), (r21, "21"), (r22, "22"), (r23, "23");
+ (r24, "24"), (r25, "25"), (r26, "26"), (r27, "27"), (r28, "28");
+ (f0, "0"), (f1, "1"), (f2, "2"), (f3, "3"), (f4, "4"), (f5, "5"), (f6, "6"), (f7, "7");
+ (f8, "8"), (f9, "9"), (f10, "10"), (f11, "11"), (f12, "12"), (f13, "13"), (f14, "14"), (f15, "15");
+ (f16, "16"), (f17, "17"), (f18, "18"), (f19, "19"), (f20, "20"), (f21, "21"), (f22, "22"), (f23, "23");
+ (f24, "24"), (f25, "25"), (f26, "26"), (f27, "27"), (f28, "28"), (f29, "29"), (f30, "30"), (f31, "31");
+ (cr, "cr");
+ (cr0, "0"), (cr1, "1"), (cr2, "2"), (cr3, "3"), (cr4, "4"), (cr5, "5"), (cr6, "6"), (cr7, "7");
+ (xer, "xer");
+ }
+ }
+
+ pub fn overlapping_regs(self, mut cb: impl FnMut(PowerPCInlineAsmReg)) {
+ macro_rules! reg_conflicts {
+ (
+ $(
+ $full:ident : $($field:ident)*
+ ),*;
+ ) => {
+ match self {
+ $(
+ Self::$full => {
+ cb(Self::$full);
+ $(cb(Self::$field);)*
+ }
+ $(Self::$field)|* => {
+ cb(Self::$full);
+ cb(self);
+ }
+ )*
+ r => cb(r),
+ }
+ };
+ }
+ reg_conflicts! {
+ cr : cr0 cr1 cr2 cr3 cr4 cr5 cr6 cr7;
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/asm/riscv.rs b/compiler/rustc_target/src/asm/riscv.rs
new file mode 100644
index 000000000..e41bdc9a5
--- /dev/null
+++ b/compiler/rustc_target/src/asm/riscv.rs
@@ -0,0 +1,185 @@
+use super::{InlineAsmArch, InlineAsmType};
+use crate::spec::{RelocModel, Target};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_macros::HashStable_Generic;
+use rustc_span::{sym, Symbol};
+use std::fmt;
+
+def_reg_class! {
+ RiscV RiscVInlineAsmRegClass {
+ reg,
+ freg,
+ vreg,
+ }
+}
+
+impl RiscVInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg => {
+ if arch == InlineAsmArch::RiscV64 {
+ types! { _: I8, I16, I32, I64, F32, F64; }
+ } else {
+ types! { _: I8, I16, I32, F32; }
+ }
+ }
+ Self::freg => types! { f: F32; d: F64; },
+ Self::vreg => &[],
+ }
+ }
+}
+
+fn not_e(
+ _arch: InlineAsmArch,
+ _reloc_model: RelocModel,
+ target_features: &FxHashSet<Symbol>,
+ _target: &Target,
+ _is_clobber: bool,
+) -> Result<(), &'static str> {
+ if target_features.contains(&sym::e) {
+ Err("register can't be used with the `e` target feature")
+ } else {
+ Ok(())
+ }
+}
+
+def_regs! {
+ RiscV RiscVInlineAsmReg RiscVInlineAsmRegClass {
+ x1: reg = ["x1", "ra"],
+ x5: reg = ["x5", "t0"],
+ x6: reg = ["x6", "t1"],
+ x7: reg = ["x7", "t2"],
+ x10: reg = ["x10", "a0"],
+ x11: reg = ["x11", "a1"],
+ x12: reg = ["x12", "a2"],
+ x13: reg = ["x13", "a3"],
+ x14: reg = ["x14", "a4"],
+ x15: reg = ["x15", "a5"],
+ x16: reg = ["x16", "a6"] % not_e,
+ x17: reg = ["x17", "a7"] % not_e,
+ x18: reg = ["x18", "s2"] % not_e,
+ x19: reg = ["x19", "s3"] % not_e,
+ x20: reg = ["x20", "s4"] % not_e,
+ x21: reg = ["x21", "s5"] % not_e,
+ x22: reg = ["x22", "s6"] % not_e,
+ x23: reg = ["x23", "s7"] % not_e,
+ x24: reg = ["x24", "s8"] % not_e,
+ x25: reg = ["x25", "s9"] % not_e,
+ x26: reg = ["x26", "s10"] % not_e,
+ x27: reg = ["x27", "s11"] % not_e,
+ x28: reg = ["x28", "t3"] % not_e,
+ x29: reg = ["x29", "t4"] % not_e,
+ x30: reg = ["x30", "t5"] % not_e,
+ x31: reg = ["x31", "t6"] % not_e,
+ f0: freg = ["f0", "ft0"],
+ f1: freg = ["f1", "ft1"],
+ f2: freg = ["f2", "ft2"],
+ f3: freg = ["f3", "ft3"],
+ f4: freg = ["f4", "ft4"],
+ f5: freg = ["f5", "ft5"],
+ f6: freg = ["f6", "ft6"],
+ f7: freg = ["f7", "ft7"],
+ f8: freg = ["f8", "fs0"],
+ f9: freg = ["f9", "fs1"],
+ f10: freg = ["f10", "fa0"],
+ f11: freg = ["f11", "fa1"],
+ f12: freg = ["f12", "fa2"],
+ f13: freg = ["f13", "fa3"],
+ f14: freg = ["f14", "fa4"],
+ f15: freg = ["f15", "fa5"],
+ f16: freg = ["f16", "fa6"],
+ f17: freg = ["f17", "fa7"],
+ f18: freg = ["f18", "fs2"],
+ f19: freg = ["f19", "fs3"],
+ f20: freg = ["f20", "fs4"],
+ f21: freg = ["f21", "fs5"],
+ f22: freg = ["f22", "fs6"],
+ f23: freg = ["f23", "fs7"],
+ f24: freg = ["f24", "fs8"],
+ f25: freg = ["f25", "fs9"],
+ f26: freg = ["f26", "fs10"],
+ f27: freg = ["f27", "fs11"],
+ f28: freg = ["f28", "ft8"],
+ f29: freg = ["f29", "ft9"],
+ f30: freg = ["f30", "ft10"],
+ f31: freg = ["f31", "ft11"],
+ v0: vreg = ["v0"],
+ v1: vreg = ["v1"],
+ v2: vreg = ["v2"],
+ v3: vreg = ["v3"],
+ v4: vreg = ["v4"],
+ v5: vreg = ["v5"],
+ v6: vreg = ["v6"],
+ v7: vreg = ["v7"],
+ v8: vreg = ["v8"],
+ v9: vreg = ["v9"],
+ v10: vreg = ["v10"],
+ v11: vreg = ["v11"],
+ v12: vreg = ["v12"],
+ v13: vreg = ["v13"],
+ v14: vreg = ["v14"],
+ v15: vreg = ["v15"],
+ v16: vreg = ["v16"],
+ v17: vreg = ["v17"],
+ v18: vreg = ["v18"],
+ v19: vreg = ["v19"],
+ v20: vreg = ["v20"],
+ v21: vreg = ["v21"],
+ v22: vreg = ["v22"],
+ v23: vreg = ["v23"],
+ v24: vreg = ["v24"],
+ v25: vreg = ["v25"],
+ v26: vreg = ["v26"],
+ v27: vreg = ["v27"],
+ v28: vreg = ["v28"],
+ v29: vreg = ["v29"],
+ v30: vreg = ["v30"],
+ v31: vreg = ["v31"],
+ #error = ["x9", "s1"] =>
+ "s1 is used internally by LLVM and cannot be used as an operand for inline asm",
+ #error = ["x8", "s0", "fp"] =>
+ "the frame pointer cannot be used as an operand for inline asm",
+ #error = ["x2", "sp"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["x3", "gp"] =>
+ "the global pointer cannot be used as an operand for inline asm",
+ #error = ["x4", "tp"] =>
+ "the thread pointer cannot be used as an operand for inline asm" ,
+ #error = ["x0", "zero"] =>
+ "the zero register cannot be used as an operand for inline asm",
+ }
+}
+
+impl RiscVInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ _modifier: Option<char>,
+ ) -> fmt::Result {
+ out.write_str(self.name())
+ }
+}
diff --git a/compiler/rustc_target/src/asm/s390x.rs b/compiler/rustc_target/src/asm/s390x.rs
new file mode 100644
index 000000000..0a50064f5
--- /dev/null
+++ b/compiler/rustc_target/src/asm/s390x.rs
@@ -0,0 +1,107 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ S390x S390xInlineAsmRegClass {
+ reg,
+ freg,
+ }
+}
+
+impl S390xInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match (self, arch) {
+ (Self::reg, _) => types! { _: I8, I16, I32, I64; },
+ (Self::freg, _) => types! { _: F32, F64; },
+ }
+ }
+}
+
+def_regs! {
+ S390x S390xInlineAsmReg S390xInlineAsmRegClass {
+ r0: reg = ["r0"],
+ r1: reg = ["r1"],
+ r2: reg = ["r2"],
+ r3: reg = ["r3"],
+ r4: reg = ["r4"],
+ r5: reg = ["r5"],
+ r6: reg = ["r6"],
+ r7: reg = ["r7"],
+ r8: reg = ["r8"],
+ r9: reg = ["r9"],
+ r10: reg = ["r10"],
+ r12: reg = ["r12"],
+ r13: reg = ["r13"],
+ r14: reg = ["r14"],
+ f0: freg = ["f0"],
+ f1: freg = ["f1"],
+ f2: freg = ["f2"],
+ f3: freg = ["f3"],
+ f4: freg = ["f4"],
+ f5: freg = ["f5"],
+ f6: freg = ["f6"],
+ f7: freg = ["f7"],
+ f8: freg = ["f8"],
+ f9: freg = ["f9"],
+ f10: freg = ["f10"],
+ f11: freg = ["f11"],
+ f12: freg = ["f12"],
+ f13: freg = ["f13"],
+ f14: freg = ["f14"],
+ f15: freg = ["f15"],
+ #error = ["r11"] =>
+ "The frame pointer cannot be used as an operand for inline asm",
+ #error = ["r15"] =>
+ "The stack pointer cannot be used as an operand for inline asm",
+ #error = [
+ "c0", "c1", "c2", "c3",
+ "c4", "c5", "c6", "c7",
+ "c8", "c9", "c10", "c11",
+ "c12", "c13", "c14", "c15"
+ ] =>
+ "control registers are reserved by the kernel and cannot be used as operands for inline asm",
+ #error = [
+ "a0", "a1", "a2", "a3",
+ "a4", "a5", "a6", "a7",
+ "a8", "a9", "a10", "a11",
+ "a12", "a13", "a14", "a15"
+ ] =>
+ "access registers are not supported and cannot be used as operands for inline asm",
+ }
+}
+
+impl S390xInlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ _arch: InlineAsmArch,
+ _modifier: Option<char>,
+ ) -> fmt::Result {
+ write!(out, "%{}", self.name())
+ }
+}
diff --git a/compiler/rustc_target/src/asm/spirv.rs b/compiler/rustc_target/src/asm/spirv.rs
new file mode 100644
index 000000000..31073da10
--- /dev/null
+++ b/compiler/rustc_target/src/asm/spirv.rs
@@ -0,0 +1,47 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+
+def_reg_class! {
+ SpirV SpirVInlineAsmRegClass {
+ reg,
+ }
+}
+
+impl SpirVInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg => {
+ types! { _: I8, I16, I32, I64, F32, F64; }
+ }
+ }
+ }
+}
+
+def_regs! {
+ // SPIR-V is SSA-based, it does not have registers.
+ SpirV SpirVInlineAsmReg SpirVInlineAsmRegClass {}
+}
diff --git a/compiler/rustc_target/src/asm/wasm.rs b/compiler/rustc_target/src/asm/wasm.rs
new file mode 100644
index 000000000..f095b7c6e
--- /dev/null
+++ b/compiler/rustc_target/src/asm/wasm.rs
@@ -0,0 +1,47 @@
+use super::{InlineAsmArch, InlineAsmType};
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+
+def_reg_class! {
+ Wasm WasmInlineAsmRegClass {
+ local,
+ }
+}
+
+impl WasmInlineAsmRegClass {
+ pub fn valid_modifiers(self, _arch: super::InlineAsmArch) -> &'static [char] {
+ &[]
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, _ty: InlineAsmType) -> Option<Self> {
+ None
+ }
+
+ pub fn suggest_modifier(
+ self,
+ _arch: InlineAsmArch,
+ _ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn default_modifier(self, _arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ None
+ }
+
+ pub fn supported_types(
+ self,
+ _arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::local => {
+ types! { _: I8, I16, I32, I64, F32, F64; }
+ }
+ }
+ }
+}
+
+def_regs! {
+ // WebAssembly doesn't have registers.
+ Wasm WasmInlineAsmReg WasmInlineAsmRegClass {}
+}
diff --git a/compiler/rustc_target/src/asm/x86.rs b/compiler/rustc_target/src/asm/x86.rs
new file mode 100644
index 000000000..238c36509
--- /dev/null
+++ b/compiler/rustc_target/src/asm/x86.rs
@@ -0,0 +1,492 @@
+use super::{InlineAsmArch, InlineAsmType};
+use crate::spec::{RelocModel, Target};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_macros::HashStable_Generic;
+use rustc_span::Symbol;
+use std::fmt;
+
+def_reg_class! {
+ X86 X86InlineAsmRegClass {
+ reg,
+ reg_abcd,
+ reg_byte,
+ xmm_reg,
+ ymm_reg,
+ zmm_reg,
+ kreg,
+ kreg0,
+ mmx_reg,
+ x87_reg,
+ tmm_reg,
+ }
+}
+
+impl X86InlineAsmRegClass {
+ pub fn valid_modifiers(self, arch: super::InlineAsmArch) -> &'static [char] {
+ match self {
+ Self::reg => {
+ if arch == InlineAsmArch::X86_64 {
+ &['l', 'x', 'e', 'r']
+ } else {
+ &['x', 'e']
+ }
+ }
+ Self::reg_abcd => {
+ if arch == InlineAsmArch::X86_64 {
+ &['l', 'h', 'x', 'e', 'r']
+ } else {
+ &['l', 'h', 'x', 'e']
+ }
+ }
+ Self::reg_byte => &[],
+ Self::xmm_reg | Self::ymm_reg | Self::zmm_reg => &['x', 'y', 'z'],
+ Self::kreg | Self::kreg0 => &[],
+ Self::mmx_reg | Self::x87_reg => &[],
+ Self::tmm_reg => &[],
+ }
+ }
+
+ pub fn suggest_class(self, _arch: InlineAsmArch, ty: InlineAsmType) -> Option<Self> {
+ match self {
+ Self::reg | Self::reg_abcd if ty.size().bits() == 8 => Some(Self::reg_byte),
+ _ => None,
+ }
+ }
+
+ pub fn suggest_modifier(
+ self,
+ arch: InlineAsmArch,
+ ty: InlineAsmType,
+ ) -> Option<(char, &'static str)> {
+ match self {
+ Self::reg => match ty.size().bits() {
+ 16 => Some(('x', "ax")),
+ 32 if arch == InlineAsmArch::X86_64 => Some(('e', "eax")),
+ _ => None,
+ },
+ Self::reg_abcd => match ty.size().bits() {
+ 16 => Some(('x', "ax")),
+ 32 if arch == InlineAsmArch::X86_64 => Some(('e', "eax")),
+ _ => None,
+ },
+ Self::reg_byte => None,
+ Self::xmm_reg => None,
+ Self::ymm_reg => match ty.size().bits() {
+ 256 => None,
+ _ => Some(('x', "xmm0")),
+ },
+ Self::zmm_reg => match ty.size().bits() {
+ 512 => None,
+ 256 => Some(('y', "ymm0")),
+ _ => Some(('x', "xmm0")),
+ },
+ Self::kreg | Self::kreg0 => None,
+ Self::mmx_reg | Self::x87_reg => None,
+ Self::tmm_reg => None,
+ }
+ }
+
+ pub fn default_modifier(self, arch: InlineAsmArch) -> Option<(char, &'static str)> {
+ match self {
+ Self::reg | Self::reg_abcd => {
+ if arch == InlineAsmArch::X86_64 {
+ Some(('r', "rax"))
+ } else {
+ Some(('e', "eax"))
+ }
+ }
+ Self::reg_byte => None,
+ Self::xmm_reg => Some(('x', "xmm0")),
+ Self::ymm_reg => Some(('y', "ymm0")),
+ Self::zmm_reg => Some(('z', "zmm0")),
+ Self::kreg | Self::kreg0 => None,
+ Self::mmx_reg | Self::x87_reg => None,
+ Self::tmm_reg => None,
+ }
+ }
+
+ pub fn supported_types(
+ self,
+ arch: InlineAsmArch,
+ ) -> &'static [(InlineAsmType, Option<Symbol>)] {
+ match self {
+ Self::reg | Self::reg_abcd => {
+ if arch == InlineAsmArch::X86_64 {
+ types! { _: I16, I32, I64, F32, F64; }
+ } else {
+ types! { _: I16, I32, F32; }
+ }
+ }
+ Self::reg_byte => types! { _: I8; },
+ Self::xmm_reg => types! {
+ sse: I32, I64, F32, F64,
+ VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2);
+ },
+ Self::ymm_reg => types! {
+ avx: I32, I64, F32, F64,
+ VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2),
+ VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4);
+ },
+ Self::zmm_reg => types! {
+ avx512f: I32, I64, F32, F64,
+ VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2),
+ VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4),
+ VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF32(16), VecF64(8);
+ },
+ Self::kreg => types! {
+ avx512f: I8, I16;
+ avx512bw: I32, I64;
+ },
+ Self::kreg0 => &[],
+ Self::mmx_reg | Self::x87_reg => &[],
+ Self::tmm_reg => &[],
+ }
+ }
+}
+
+fn x86_64_only(
+ arch: InlineAsmArch,
+ _reloc_model: RelocModel,
+ _target_features: &FxHashSet<Symbol>,
+ _target: &Target,
+ _is_clobber: bool,
+) -> Result<(), &'static str> {
+ match arch {
+ InlineAsmArch::X86 => Err("register is only available on x86_64"),
+ InlineAsmArch::X86_64 => Ok(()),
+ _ => unreachable!(),
+ }
+}
+
+fn high_byte(
+ arch: InlineAsmArch,
+ _reloc_model: RelocModel,
+ _target_features: &FxHashSet<Symbol>,
+ _target: &Target,
+ _is_clobber: bool,
+) -> Result<(), &'static str> {
+ match arch {
+ InlineAsmArch::X86_64 => Err("high byte registers cannot be used as an operand on x86_64"),
+ _ => Ok(()),
+ }
+}
+
+fn rbx_reserved(
+ arch: InlineAsmArch,
+ _reloc_model: RelocModel,
+ _target_features: &FxHashSet<Symbol>,
+ _target: &Target,
+ _is_clobber: bool,
+) -> Result<(), &'static str> {
+ match arch {
+ InlineAsmArch::X86 => Ok(()),
+ InlineAsmArch::X86_64 => {
+ Err("rbx is used internally by LLVM and cannot be used as an operand for inline asm")
+ }
+ _ => unreachable!(),
+ }
+}
+
+fn esi_reserved(
+ arch: InlineAsmArch,
+ _reloc_model: RelocModel,
+ _target_features: &FxHashSet<Symbol>,
+ _target: &Target,
+ _is_clobber: bool,
+) -> Result<(), &'static str> {
+ match arch {
+ InlineAsmArch::X86 => {
+ Err("esi is used internally by LLVM and cannot be used as an operand for inline asm")
+ }
+ InlineAsmArch::X86_64 => Ok(()),
+ _ => unreachable!(),
+ }
+}
+
+def_regs! {
+ X86 X86InlineAsmReg X86InlineAsmRegClass {
+ ax: reg, reg_abcd = ["ax", "eax", "rax"],
+ bx: reg, reg_abcd = ["bx", "ebx", "rbx"] % rbx_reserved,
+ cx: reg, reg_abcd = ["cx", "ecx", "rcx"],
+ dx: reg, reg_abcd = ["dx", "edx", "rdx"],
+ si: reg = ["si", "esi", "rsi"] % esi_reserved,
+ di: reg = ["di", "edi", "rdi"],
+ r8: reg = ["r8", "r8w", "r8d"] % x86_64_only,
+ r9: reg = ["r9", "r9w", "r9d"] % x86_64_only,
+ r10: reg = ["r10", "r10w", "r10d"] % x86_64_only,
+ r11: reg = ["r11", "r11w", "r11d"] % x86_64_only,
+ r12: reg = ["r12", "r12w", "r12d"] % x86_64_only,
+ r13: reg = ["r13", "r13w", "r13d"] % x86_64_only,
+ r14: reg = ["r14", "r14w", "r14d"] % x86_64_only,
+ r15: reg = ["r15", "r15w", "r15d"] % x86_64_only,
+ al: reg_byte = ["al"],
+ ah: reg_byte = ["ah"] % high_byte,
+ bl: reg_byte = ["bl"],
+ bh: reg_byte = ["bh"] % high_byte,
+ cl: reg_byte = ["cl"],
+ ch: reg_byte = ["ch"] % high_byte,
+ dl: reg_byte = ["dl"],
+ dh: reg_byte = ["dh"] % high_byte,
+ sil: reg_byte = ["sil"] % x86_64_only,
+ dil: reg_byte = ["dil"] % x86_64_only,
+ r8b: reg_byte = ["r8b"] % x86_64_only,
+ r9b: reg_byte = ["r9b"] % x86_64_only,
+ r10b: reg_byte = ["r10b"] % x86_64_only,
+ r11b: reg_byte = ["r11b"] % x86_64_only,
+ r12b: reg_byte = ["r12b"] % x86_64_only,
+ r13b: reg_byte = ["r13b"] % x86_64_only,
+ r14b: reg_byte = ["r14b"] % x86_64_only,
+ r15b: reg_byte = ["r15b"] % x86_64_only,
+ xmm0: xmm_reg = ["xmm0"],
+ xmm1: xmm_reg = ["xmm1"],
+ xmm2: xmm_reg = ["xmm2"],
+ xmm3: xmm_reg = ["xmm3"],
+ xmm4: xmm_reg = ["xmm4"],
+ xmm5: xmm_reg = ["xmm5"],
+ xmm6: xmm_reg = ["xmm6"],
+ xmm7: xmm_reg = ["xmm7"],
+ xmm8: xmm_reg = ["xmm8"] % x86_64_only,
+ xmm9: xmm_reg = ["xmm9"] % x86_64_only,
+ xmm10: xmm_reg = ["xmm10"] % x86_64_only,
+ xmm11: xmm_reg = ["xmm11"] % x86_64_only,
+ xmm12: xmm_reg = ["xmm12"] % x86_64_only,
+ xmm13: xmm_reg = ["xmm13"] % x86_64_only,
+ xmm14: xmm_reg = ["xmm14"] % x86_64_only,
+ xmm15: xmm_reg = ["xmm15"] % x86_64_only,
+ ymm0: ymm_reg = ["ymm0"],
+ ymm1: ymm_reg = ["ymm1"],
+ ymm2: ymm_reg = ["ymm2"],
+ ymm3: ymm_reg = ["ymm3"],
+ ymm4: ymm_reg = ["ymm4"],
+ ymm5: ymm_reg = ["ymm5"],
+ ymm6: ymm_reg = ["ymm6"],
+ ymm7: ymm_reg = ["ymm7"],
+ ymm8: ymm_reg = ["ymm8"] % x86_64_only,
+ ymm9: ymm_reg = ["ymm9"] % x86_64_only,
+ ymm10: ymm_reg = ["ymm10"] % x86_64_only,
+ ymm11: ymm_reg = ["ymm11"] % x86_64_only,
+ ymm12: ymm_reg = ["ymm12"] % x86_64_only,
+ ymm13: ymm_reg = ["ymm13"] % x86_64_only,
+ ymm14: ymm_reg = ["ymm14"] % x86_64_only,
+ ymm15: ymm_reg = ["ymm15"] % x86_64_only,
+ zmm0: zmm_reg = ["zmm0"],
+ zmm1: zmm_reg = ["zmm1"],
+ zmm2: zmm_reg = ["zmm2"],
+ zmm3: zmm_reg = ["zmm3"],
+ zmm4: zmm_reg = ["zmm4"],
+ zmm5: zmm_reg = ["zmm5"],
+ zmm6: zmm_reg = ["zmm6"],
+ zmm7: zmm_reg = ["zmm7"],
+ zmm8: zmm_reg = ["zmm8"] % x86_64_only,
+ zmm9: zmm_reg = ["zmm9"] % x86_64_only,
+ zmm10: zmm_reg = ["zmm10"] % x86_64_only,
+ zmm11: zmm_reg = ["zmm11"] % x86_64_only,
+ zmm12: zmm_reg = ["zmm12"] % x86_64_only,
+ zmm13: zmm_reg = ["zmm13"] % x86_64_only,
+ zmm14: zmm_reg = ["zmm14"] % x86_64_only,
+ zmm15: zmm_reg = ["zmm15"] % x86_64_only,
+ zmm16: zmm_reg = ["zmm16", "xmm16", "ymm16"] % x86_64_only,
+ zmm17: zmm_reg = ["zmm17", "xmm17", "ymm17"] % x86_64_only,
+ zmm18: zmm_reg = ["zmm18", "xmm18", "ymm18"] % x86_64_only,
+ zmm19: zmm_reg = ["zmm19", "xmm19", "ymm19"] % x86_64_only,
+ zmm20: zmm_reg = ["zmm20", "xmm20", "ymm20"] % x86_64_only,
+ zmm21: zmm_reg = ["zmm21", "xmm21", "ymm21"] % x86_64_only,
+ zmm22: zmm_reg = ["zmm22", "xmm22", "ymm22"] % x86_64_only,
+ zmm23: zmm_reg = ["zmm23", "xmm23", "ymm23"] % x86_64_only,
+ zmm24: zmm_reg = ["zmm24", "xmm24", "ymm24"] % x86_64_only,
+ zmm25: zmm_reg = ["zmm25", "xmm25", "ymm25"] % x86_64_only,
+ zmm26: zmm_reg = ["zmm26", "xmm26", "ymm26"] % x86_64_only,
+ zmm27: zmm_reg = ["zmm27", "xmm27", "ymm27"] % x86_64_only,
+ zmm28: zmm_reg = ["zmm28", "xmm28", "ymm28"] % x86_64_only,
+ zmm29: zmm_reg = ["zmm29", "xmm29", "ymm29"] % x86_64_only,
+ zmm30: zmm_reg = ["zmm30", "xmm30", "ymm30"] % x86_64_only,
+ zmm31: zmm_reg = ["zmm31", "xmm31", "ymm31"] % x86_64_only,
+ k0: kreg0 = ["k0"],
+ k1: kreg = ["k1"],
+ k2: kreg = ["k2"],
+ k3: kreg = ["k3"],
+ k4: kreg = ["k4"],
+ k5: kreg = ["k5"],
+ k6: kreg = ["k6"],
+ k7: kreg = ["k7"],
+ mm0: mmx_reg = ["mm0"],
+ mm1: mmx_reg = ["mm1"],
+ mm2: mmx_reg = ["mm2"],
+ mm3: mmx_reg = ["mm3"],
+ mm4: mmx_reg = ["mm4"],
+ mm5: mmx_reg = ["mm5"],
+ mm6: mmx_reg = ["mm6"],
+ mm7: mmx_reg = ["mm7"],
+ st0: x87_reg = ["st(0)", "st"],
+ st1: x87_reg = ["st(1)"],
+ st2: x87_reg = ["st(2)"],
+ st3: x87_reg = ["st(3)"],
+ st4: x87_reg = ["st(4)"],
+ st5: x87_reg = ["st(5)"],
+ st6: x87_reg = ["st(6)"],
+ st7: x87_reg = ["st(7)"],
+ tmm0: tmm_reg = ["tmm0"] % x86_64_only,
+ tmm1: tmm_reg = ["tmm1"] % x86_64_only,
+ tmm2: tmm_reg = ["tmm2"] % x86_64_only,
+ tmm3: tmm_reg = ["tmm3"] % x86_64_only,
+ tmm4: tmm_reg = ["tmm4"] % x86_64_only,
+ tmm5: tmm_reg = ["tmm5"] % x86_64_only,
+ tmm6: tmm_reg = ["tmm6"] % x86_64_only,
+ tmm7: tmm_reg = ["tmm7"] % x86_64_only,
+ #error = ["bp", "bpl", "ebp", "rbp"] =>
+ "the frame pointer cannot be used as an operand for inline asm",
+ #error = ["sp", "spl", "esp", "rsp"] =>
+ "the stack pointer cannot be used as an operand for inline asm",
+ #error = ["ip", "eip", "rip"] =>
+ "the instruction pointer cannot be used as an operand for inline asm",
+ }
+}
+
+impl X86InlineAsmReg {
+ pub fn emit(
+ self,
+ out: &mut dyn fmt::Write,
+ arch: InlineAsmArch,
+ modifier: Option<char>,
+ ) -> fmt::Result {
+ let reg_default_modifier = match arch {
+ InlineAsmArch::X86 => 'e',
+ InlineAsmArch::X86_64 => 'r',
+ _ => unreachable!(),
+ };
+ if self as u32 <= Self::dx as u32 {
+ let root = ['a', 'b', 'c', 'd'][self as usize - Self::ax as usize];
+ match modifier.unwrap_or(reg_default_modifier) {
+ 'l' => write!(out, "{}l", root),
+ 'h' => write!(out, "{}h", root),
+ 'x' => write!(out, "{}x", root),
+ 'e' => write!(out, "e{}x", root),
+ 'r' => write!(out, "r{}x", root),
+ _ => unreachable!(),
+ }
+ } else if self as u32 <= Self::di as u32 {
+ let root = self.name();
+ match modifier.unwrap_or(reg_default_modifier) {
+ 'l' => write!(out, "{}l", root),
+ 'x' => write!(out, "{}", root),
+ 'e' => write!(out, "e{}", root),
+ 'r' => write!(out, "r{}", root),
+ _ => unreachable!(),
+ }
+ } else if self as u32 <= Self::r15 as u32 {
+ let root = self.name();
+ match modifier.unwrap_or(reg_default_modifier) {
+ 'l' => write!(out, "{}b", root),
+ 'x' => write!(out, "{}w", root),
+ 'e' => write!(out, "{}d", root),
+ 'r' => out.write_str(root),
+ _ => unreachable!(),
+ }
+ } else if self as u32 <= Self::r15b as u32 {
+ out.write_str(self.name())
+ } else if self as u32 <= Self::xmm15 as u32 {
+ let prefix = modifier.unwrap_or('x');
+ let index = self as u32 - Self::xmm0 as u32;
+ write!(out, "{}{}", prefix, index)
+ } else if self as u32 <= Self::ymm15 as u32 {
+ let prefix = modifier.unwrap_or('y');
+ let index = self as u32 - Self::ymm0 as u32;
+ write!(out, "{}{}", prefix, index)
+ } else if self as u32 <= Self::zmm31 as u32 {
+ let prefix = modifier.unwrap_or('z');
+ let index = self as u32 - Self::zmm0 as u32;
+ write!(out, "{}{}", prefix, index)
+ } else {
+ out.write_str(self.name())
+ }
+ }
+
+ pub fn overlapping_regs(self, mut cb: impl FnMut(X86InlineAsmReg)) {
+ macro_rules! reg_conflicts {
+ (
+ $(
+ $w:ident : $l:ident $h:ident
+ ),*;
+ $(
+ $w2:ident : $l2:ident
+ ),*;
+ $(
+ $x:ident : $y:ident : $z:ident
+ ),*;
+ ) => {
+ match self {
+ $(
+ Self::$w => {
+ cb(Self::$w);
+ cb(Self::$l);
+ cb(Self::$h);
+ }
+ Self::$l => {
+ cb(Self::$w);
+ cb(Self::$l);
+ }
+ Self::$h => {
+ cb(Self::$w);
+ cb(Self::$h);
+ }
+ )*
+ $(
+ Self::$w2 | Self::$l2 => {
+ cb(Self::$w2);
+ cb(Self::$l2);
+ }
+ )*
+ $(
+ Self::$x | Self::$y | Self::$z => {
+ cb(Self::$x);
+ cb(Self::$y);
+ cb(Self::$z);
+ }
+ )*
+ r => cb(r),
+ }
+ };
+ }
+
+ // XMM*, YMM* and ZMM* are all different views of the same register.
+ //
+ // See section 15.5 of the combined Intel® 64 and IA-32 Architectures
+ // Software Developer’s Manual for more details.
+ //
+ // We don't need to specify conflicts for [x,y,z]mm[16-31] since these
+ // registers are only available with AVX-512, so we just specify them
+ // as aliases directly.
+ reg_conflicts! {
+ ax : al ah,
+ bx : bl bh,
+ cx : cl ch,
+ dx : dl dh;
+ si : sil,
+ di : dil,
+ r8 : r8b,
+ r9 : r9b,
+ r10 : r10b,
+ r11 : r11b,
+ r12 : r12b,
+ r13 : r13b,
+ r14 : r14b,
+ r15 : r15b;
+ xmm0 : ymm0 : zmm0,
+ xmm1 : ymm1 : zmm1,
+ xmm2 : ymm2 : zmm2,
+ xmm3 : ymm3 : zmm3,
+ xmm4 : ymm4 : zmm4,
+ xmm5 : ymm5 : zmm5,
+ xmm6 : ymm6 : zmm6,
+ xmm7 : ymm7 : zmm7,
+ xmm8 : ymm8 : zmm8,
+ xmm9 : ymm9 : zmm9,
+ xmm10 : ymm10 : zmm10,
+ xmm11 : ymm11 : zmm11,
+ xmm12 : ymm12 : zmm12,
+ xmm13 : ymm13 : zmm13,
+ xmm14 : ymm14 : zmm14,
+ xmm15 : ymm15 : zmm15;
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/json.rs b/compiler/rustc_target/src/json.rs
new file mode 100644
index 000000000..b5d926352
--- /dev/null
+++ b/compiler/rustc_target/src/json.rs
@@ -0,0 +1,91 @@
+use std::borrow::Cow;
+use std::collections::BTreeMap;
+
+pub use serde_json::Value as Json;
+use serde_json::{Map, Number};
+
+pub trait ToJson {
+ fn to_json(&self) -> Json;
+}
+
+impl ToJson for Json {
+ fn to_json(&self) -> Json {
+ self.clone()
+ }
+}
+
+macro_rules! to_json_impl_num {
+ ($($t:ty), +) => (
+ $(impl ToJson for $t {
+ fn to_json(&self) -> Json {
+ Json::Number(Number::from(*self))
+ }
+ })+
+ )
+}
+
+to_json_impl_num! { isize, i8, i16, i32, i64, usize, u8, u16, u32, u64 }
+
+impl ToJson for bool {
+ fn to_json(&self) -> Json {
+ Json::Bool(*self)
+ }
+}
+
+impl ToJson for str {
+ fn to_json(&self) -> Json {
+ Json::String(self.to_owned())
+ }
+}
+
+impl ToJson for String {
+ fn to_json(&self) -> Json {
+ Json::String(self.to_owned())
+ }
+}
+
+impl<'a> ToJson for Cow<'a, str> {
+ fn to_json(&self) -> Json {
+ Json::String(self.to_string())
+ }
+}
+
+impl<A: ToJson> ToJson for [A] {
+ fn to_json(&self) -> Json {
+ Json::Array(self.iter().map(|elt| elt.to_json()).collect())
+ }
+}
+
+impl<A: ToJson> ToJson for Vec<A> {
+ fn to_json(&self) -> Json {
+ Json::Array(self.iter().map(|elt| elt.to_json()).collect())
+ }
+}
+
+impl<'a, A: ToJson> ToJson for Cow<'a, [A]>
+where
+ [A]: ToOwned,
+{
+ fn to_json(&self) -> Json {
+ Json::Array(self.iter().map(|elt| elt.to_json()).collect())
+ }
+}
+
+impl<T: ToString, A: ToJson> ToJson for BTreeMap<T, A> {
+ fn to_json(&self) -> Json {
+ let mut d = Map::new();
+ for (key, value) in self {
+ d.insert(key.to_string(), value.to_json());
+ }
+ Json::Object(d)
+ }
+}
+
+impl<A: ToJson> ToJson for Option<A> {
+ fn to_json(&self) -> Json {
+ match *self {
+ None => Json::Null,
+ Some(ref value) => value.to_json(),
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs
new file mode 100644
index 000000000..59dbea705
--- /dev/null
+++ b/compiler/rustc_target/src/lib.rs
@@ -0,0 +1,88 @@
+//! Some stuff used by rustc that doesn't have many dependencies
+//!
+//! Originally extracted from rustc::back, which was nominally the
+//! compiler 'backend', though LLVM is rustc's backend, so rustc_target
+//! is really just odds-and-ends relating to code gen and linking.
+//! This crate mostly exists to make rustc smaller, so we might put
+//! more 'stuff' here in the future. It does not have a dependency on
+//! LLVM.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(assert_matches)]
+#![feature(associated_type_bounds)]
+#![feature(exhaustive_patterns)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(rustc_attrs)]
+#![feature(step_trait)]
+
+use std::iter::FromIterator;
+use std::path::{Path, PathBuf};
+
+#[macro_use]
+extern crate rustc_macros;
+
+#[macro_use]
+extern crate tracing;
+
+pub mod abi;
+pub mod asm;
+pub mod json;
+pub mod spec;
+
+#[cfg(test)]
+mod tests;
+
+/// Requirements for a `StableHashingContext` to be used in this crate.
+/// This is a hack to allow using the `HashStable_Generic` derive macro
+/// instead of implementing everything in `rustc_middle`.
+pub trait HashStableContext {}
+
+/// The name of rustc's own place to organize libraries.
+///
+/// Used to be `rustc`, now the default is `rustlib`.
+const RUST_LIB_DIR: &str = "rustlib";
+
+/// Returns a `rustlib` path for this particular target, relative to the provided sysroot.
+///
+/// For example: `target_sysroot_path("/usr", "x86_64-unknown-linux-gnu")` =>
+/// `"lib*/rustlib/x86_64-unknown-linux-gnu"`.
+pub fn target_rustlib_path(sysroot: &Path, target_triple: &str) -> PathBuf {
+ let libdir = find_libdir(sysroot);
+ PathBuf::from_iter([
+ Path::new(libdir.as_ref()),
+ Path::new(RUST_LIB_DIR),
+ Path::new(target_triple),
+ ])
+}
+
+/// The name of the directory rustc expects libraries to be located.
+fn find_libdir(sysroot: &Path) -> std::borrow::Cow<'static, str> {
+ // FIXME: This is a quick hack to make the rustc binary able to locate
+ // Rust libraries in Linux environments where libraries might be installed
+ // to lib64/lib32. This would be more foolproof by basing the sysroot off
+ // of the directory where `librustc_driver` is located, rather than
+ // where the rustc binary is.
+ // If --libdir is set during configuration to the value other than
+ // "lib" (i.e., non-default), this value is used (see issue #16552).
+
+ #[cfg(target_pointer_width = "64")]
+ const PRIMARY_LIB_DIR: &str = "lib64";
+
+ #[cfg(target_pointer_width = "32")]
+ const PRIMARY_LIB_DIR: &str = "lib32";
+
+ const SECONDARY_LIB_DIR: &str = "lib";
+
+ match option_env!("CFG_LIBDIR_RELATIVE") {
+ None | Some("lib") => {
+ if sysroot.join(PRIMARY_LIB_DIR).join(RUST_LIB_DIR).exists() {
+ PRIMARY_LIB_DIR.into()
+ } else {
+ SECONDARY_LIB_DIR.into()
+ }
+ }
+ Some(libdir) => libdir.into(),
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
new file mode 100644
index 000000000..9d36e37d7
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_apple_darwin.rs
@@ -0,0 +1,30 @@
+use crate::spec::{FramePointer, LinkerFlavor, SanitizerSet, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::apple_base::opts("macos");
+ base.cpu = "apple-a14".into();
+ base.max_atomic_width = Some(128);
+
+ // FIXME: The leak sanitizer currently fails the tests, see #88132.
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::THREAD;
+
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-arch", "arm64"]);
+ base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
+
+ // Clang automatically chooses a more specific target based on
+ // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
+ // correctly, we do too.
+ let llvm_target = super::apple_base::macos_llvm_target("arm64");
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ mcount: "\u{1}mcount".into(),
+ frame_pointer: FramePointer::NonLeaf,
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios.rs
new file mode 100644
index 000000000..beb904239
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios.rs
@@ -0,0 +1,36 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{FramePointer, Target, TargetOptions};
+
+pub fn target() -> Target {
+ // Clang automatically chooses a more specific target based on
+ // IPHONEOS_DEPLOYMENT_TARGET.
+ // This is required for the target to pick the right
+ // MACH-O commands, so we do too.
+ let arch = "arm64";
+ let llvm_target = super::apple_base::ios_llvm_target(arch);
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ features: "+neon,+fp-armv8,+apple-a7".into(),
+ max_atomic_width: Some(128),
+ forces_embed_bitcode: true,
+ frame_pointer: FramePointer::NonLeaf,
+ // Taken from a clang build on Xcode 11.4.1.
+ // These arguments are not actually invoked - they just have
+ // to look right to pass App Store validation.
+ bitcode_llvm_cmdline: "-triple\0\
+ arm64-apple-ios11.0.0\0\
+ -emit-obj\0\
+ -disable-llvm-passes\0\
+ -target-abi\0\
+ darwinpcs\0\
+ -Os\0"
+ .into(),
+ ..opts("ios", Arch::Arm64)
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
new file mode 100644
index 000000000..1dad07a9a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios_macabi.rs
@@ -0,0 +1,32 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{FramePointer, LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let llvm_target = "arm64-apple-ios14.0-macabi";
+
+ let mut base = opts("ios", Arch::Arm64_macabi);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-target", llvm_target]);
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ features: "+neon,+fp-armv8,+apple-a12".into(),
+ max_atomic_width: Some(128),
+ forces_embed_bitcode: true,
+ frame_pointer: FramePointer::NonLeaf,
+ // Taken from a clang build on Xcode 11.4.1.
+ // These arguments are not actually invoked - they just have
+ // to look right to pass App Store validation.
+ bitcode_llvm_cmdline: "-triple\0\
+ arm64-apple-ios14.0-macabi\0\
+ -emit-obj\0\
+ -disable-llvm-passes\0\
+ -Os\0"
+ .into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs b/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs
new file mode 100644
index 000000000..b4e135f66
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_apple_ios_sim.rs
@@ -0,0 +1,38 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{FramePointer, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("ios", Arch::Arm64_sim);
+
+ // Clang automatically chooses a more specific target based on
+ // IPHONEOS_DEPLOYMENT_TARGET.
+ // This is required for the simulator target to pick the right
+ // MACH-O commands, so we do too.
+ let arch = "arm64";
+ let llvm_target = super::apple_base::ios_sim_llvm_target(arch);
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ features: "+neon,+fp-armv8,+apple-a7".into(),
+ max_atomic_width: Some(128),
+ forces_embed_bitcode: true,
+ frame_pointer: FramePointer::NonLeaf,
+ // Taken from a clang build on Xcode 11.4.1.
+ // These arguments are not actually invoked - they just have
+ // to look right to pass App Store validation.
+ bitcode_llvm_cmdline: "-triple\0\
+ arm64-apple-ios14.0-simulator\0\
+ -emit-obj\0\
+ -disable-llvm-passes\0\
+ -target-abi\0\
+ darwinpcs\0\
+ -Os\0"
+ .into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs b/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs
new file mode 100644
index 000000000..2e31d16dc
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_apple_tvos.rs
@@ -0,0 +1,18 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{FramePointer, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "arm64-apple-tvos".into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ features: "+neon,+fp-armv8,+apple-a7".into(),
+ max_atomic_width: Some(128),
+ forces_embed_bitcode: true,
+ frame_pointer: FramePointer::NonLeaf,
+ ..opts("tvos", Arch::Arm64)
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs b/compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs
new file mode 100644
index 000000000..3059f4214
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_apple_watchos_sim.rs
@@ -0,0 +1,38 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{FramePointer, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("watchos", Arch::Arm64_sim);
+
+ // Clang automatically chooses a more specific target based on
+ // WATCHOS_DEPLOYMENT_TARGET.
+ // This is required for the simulator target to pick the right
+ // MACH-O commands, so we do too.
+ let arch = "arm64";
+ let llvm_target = super::apple_base::watchos_sim_llvm_target(arch);
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ features: "+neon,+fp-armv8,+apple-a7".into(),
+ max_atomic_width: Some(128),
+ forces_embed_bitcode: true,
+ frame_pointer: FramePointer::NonLeaf,
+ // Taken from a clang build on Xcode 11.4.1.
+ // These arguments are not actually invoked - they just have
+ // to look right to pass App Store validation.
+ bitcode_llvm_cmdline: "-triple\0\
+ arm64-apple-watchos5.0-simulator\0\
+ -emit-obj\0\
+ -disable-llvm-passes\0\
+ -target-abi\0\
+ darwinpcs\0\
+ -Os\0"
+ .into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_be_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/aarch64_be_unknown_linux_gnu.rs
new file mode 100644
index 000000000..9bce82a19
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_be_unknown_linux_gnu.rs
@@ -0,0 +1,18 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64_be-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ features: "+outline-atomics".into(),
+ max_atomic_width: Some(128),
+ mcount: "\u{1}_mcount".into(),
+ endian: Endian::Big,
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_be_unknown_linux_gnu_ilp32.rs b/compiler/rustc_target/src/spec/aarch64_be_unknown_linux_gnu_ilp32.rs
new file mode 100644
index 000000000..c9ceb55dd
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_be_unknown_linux_gnu_ilp32.rs
@@ -0,0 +1,21 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.max_atomic_width = Some(128);
+
+ Target {
+ llvm_target: "aarch64_be-unknown-linux-gnu_ilp32".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ abi: "ilp32".into(),
+ features: "+outline-atomics".into(),
+ mcount: "\u{1}_mcount".into(),
+ endian: Endian::Big,
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_fuchsia.rs b/compiler/rustc_target/src/spec/aarch64_fuchsia.rs
new file mode 100644
index 000000000..4634433c4
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_fuchsia.rs
@@ -0,0 +1,15 @@
+use crate::spec::{SanitizerSet, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-fuchsia".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(128),
+ supported_sanitizers: SanitizerSet::ADDRESS | SanitizerSet::CFI,
+ ..super::fuchsia_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_kmc_solid_asp3.rs b/compiler/rustc_target/src/spec/aarch64_kmc_solid_asp3.rs
new file mode 100644
index 000000000..6ea9ae266
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_kmc_solid_asp3.rs
@@ -0,0 +1,19 @@
+use super::{RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = super::solid_base::opts("asp3");
+ Target {
+ llvm_target: "aarch64-unknown-none".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ linker: Some("aarch64-kmc-elf-gcc".into()),
+ features: "+neon,+fp-armv8".into(),
+ relocation_model: RelocModel::Static,
+ disable_redzone: true,
+ max_atomic_width: Some(128),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_linux_android.rs b/compiler/rustc_target/src/spec/aarch64_linux_android.rs
new file mode 100644
index 000000000..c85f7f62a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_linux_android.rs
@@ -0,0 +1,25 @@
+use crate::spec::{SanitizerSet, Target, TargetOptions};
+
+// See https://developer.android.com/ndk/guides/abis.html#arm64-v8a
+// for target ABI requirements.
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-linux-android".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(128),
+ // As documented in https://developer.android.com/ndk/guides/cpu-features.html
+ // the neon (ASIMD) and FP must exist on all android aarch64 targets.
+ features: "+neon,+fp-armv8".into(),
+ supported_sanitizers: SanitizerSet::CFI
+ | SanitizerSet::HWADDRESS
+ | SanitizerSet::MEMTAG
+ | SanitizerSet::SHADOWCALLSTACK
+ | SanitizerSet::ADDRESS,
+ ..super::android_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs b/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs
new file mode 100644
index 000000000..1b7161fbb
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding.rs
@@ -0,0 +1,26 @@
+use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelroLevel, Target, TargetOptions};
+
+const LINKER_SCRIPT: &str = include_str!("./aarch64_nintendo_switch_freestanding_linker_script.ld");
+
+/// A base target for Nintendo Switch devices using a pure LLVM toolchain.
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-unknown-none".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ link_script: Some(LINKER_SCRIPT.into()),
+ os: "horizon".into(),
+ max_atomic_width: Some(128),
+ panic_strategy: PanicStrategy::Abort,
+ position_independent_executables: true,
+ dynamic_linking: true,
+ executables: true,
+ relro_level: RelroLevel::Off,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding_linker_script.ld b/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding_linker_script.ld
new file mode 100644
index 000000000..f3441e659
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_nintendo_switch_freestanding_linker_script.ld
@@ -0,0 +1,78 @@
+OUTPUT_FORMAT(elf64-littleaarch64)
+OUTPUT_ARCH(aarch64)
+ENTRY(_start)
+
+PHDRS
+{
+ text PT_LOAD FLAGS(5);
+ rodata PT_LOAD FLAGS(4);
+ data PT_LOAD FLAGS(6);
+ bss PT_LOAD FLAGS(6);
+ dynamic PT_DYNAMIC;
+}
+
+SECTIONS
+{
+ . = 0;
+
+ .text : ALIGN(0x1000) {
+ HIDDEN(__text_start = .);
+ KEEP(*(.text.jmp))
+
+ . = 0x80;
+
+ *(.text .text.*)
+ *(.plt .plt.*)
+ }
+
+ /* Read-only sections */
+
+ . = ALIGN(0x1000);
+
+ .module_name : { *(.module_name) } :rodata
+
+ .rodata : { *(.rodata .rodata.*) } :rodata
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym .dynsym.*) }
+ .dynstr : { *(.dynstr .dynstr.*) }
+ .rela.dyn : { *(.rela.dyn) }
+
+ .eh_frame : {
+ HIDDEN(__eh_frame_start = .);
+ *(.eh_frame .eh_frame.*)
+ HIDDEN(__eh_frame_end = .);
+ }
+
+ .eh_frame_hdr : {
+ HIDDEN(__eh_frame_hdr_start = .);
+ *(.eh_frame_hdr .eh_frame_hdr.*)
+ HIDDEN(__eh_frame_hdr_end = .);
+ }
+
+ /* Read-write sections */
+
+ . = ALIGN(0x1000);
+
+ .data : {
+ *(.data .data.*)
+ *(.got .got.*)
+ *(.got.plt .got.plt.*)
+ } :data
+
+ .dynamic : {
+ HIDDEN(__dynamic_start = .);
+ *(.dynamic)
+ }
+
+ /* BSS section */
+
+ . = ALIGN(0x1000);
+
+ .bss : {
+ HIDDEN(__bss_start = .);
+ *(.bss .bss.*)
+ *(COMMON)
+ . = ALIGN(8);
+ HIDDEN(__bss_end = .);
+ } :bss
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs b/compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs
new file mode 100644
index 000000000..59c6a95c2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_pc_windows_gnullvm.rs
@@ -0,0 +1,16 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::windows_gnullvm_base::opts();
+ base.max_atomic_width = Some(64);
+ base.features = "+neon,+fp-armv8".into();
+ base.linker = Some("aarch64-w64-mingw32-clang".into());
+
+ Target {
+ llvm_target: "aarch64-pc-windows-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs
new file mode 100644
index 000000000..856ec4fb0
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_pc_windows_msvc.rs
@@ -0,0 +1,15 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::windows_msvc_base::opts();
+ base.max_atomic_width = Some(64);
+ base.features = "+neon,+fp-armv8".into();
+
+ Target {
+ llvm_target: "aarch64-pc-windows-msvc".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/aarch64_unknown_freebsd.rs
new file mode 100644
index 000000000..2f39c4862
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_freebsd.rs
@@ -0,0 +1,18 @@
+use crate::spec::{SanitizerSet, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-unknown-freebsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(128),
+ supported_sanitizers: SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::MEMORY
+ | SanitizerSet::THREAD,
+ ..super::freebsd_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs b/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs
new file mode 100644
index 000000000..1d7269c8d
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_hermit.rs
@@ -0,0 +1,15 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::hermit_base::opts();
+ base.max_atomic_width = Some(128);
+ base.features = "+strict-align,+neon,+fp-armv8".into();
+
+ Target {
+ llvm_target: "aarch64-unknown-hermit".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs
new file mode 100644
index 000000000..3006044d5
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu.rs
@@ -0,0 +1,23 @@
+use crate::spec::{SanitizerSet, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ features: "+outline-atomics".into(),
+ mcount: "\u{1}_mcount".into(),
+ max_atomic_width: Some(128),
+ supported_sanitizers: SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::LEAK
+ | SanitizerSet::MEMORY
+ | SanitizerSet::MEMTAG
+ | SanitizerSet::THREAD
+ | SanitizerSet::HWADDRESS,
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu_ilp32.rs b/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu_ilp32.rs
new file mode 100644
index 000000000..63023df1d
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_linux_gnu_ilp32.rs
@@ -0,0 +1,17 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-unknown-linux-gnu_ilp32".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ abi: "ilp32".into(),
+ features: "+outline-atomics".into(),
+ max_atomic_width: Some(128),
+ mcount: "\u{1}_mcount".into(),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs
new file mode 100644
index 000000000..002d0dac2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_linux_musl.rs
@@ -0,0 +1,14 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.max_atomic_width = Some(128);
+
+ Target {
+ llvm_target: "aarch64-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions { mcount: "\u{1}_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs
new file mode 100644
index 000000000..703f75022
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_netbsd.rs
@@ -0,0 +1,15 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-unknown-netbsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ mcount: "__mcount".into(),
+ max_atomic_width: Some(128),
+ ..super::netbsd_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_none.rs b/compiler/rustc_target/src/spec/aarch64_unknown_none.rs
new file mode 100644
index 000000000..d3fd7051a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_none.rs
@@ -0,0 +1,29 @@
+// Generic AArch64 target for bare-metal code - Floating point enabled
+//
+// Can be used in conjunction with the `target-feature` and
+// `target-cpu` compiler flags to opt-in more hardware-specific
+// features.
+//
+// For example, `-C target-cpu=cortex-a53`.
+
+use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let opts = TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ features: "+strict-align,+neon,+fp-armv8".into(),
+ relocation_model: RelocModel::Static,
+ disable_redzone: true,
+ max_atomic_width: Some(128),
+ panic_strategy: PanicStrategy::Abort,
+ ..Default::default()
+ };
+ Target {
+ llvm_target: "aarch64-unknown-none".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: opts,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs b/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs
new file mode 100644
index 000000000..6316abe1b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_none_softfloat.rs
@@ -0,0 +1,30 @@
+// Generic AArch64 target for bare-metal code - Floating point disabled
+//
+// Can be used in conjunction with the `target-feature` and
+// `target-cpu` compiler flags to opt-in more hardware-specific
+// features.
+//
+// For example, `-C target-cpu=cortex-a53`.
+
+use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let opts = TargetOptions {
+ abi: "softfloat".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ features: "+strict-align,-neon,-fp-armv8".into(),
+ relocation_model: RelocModel::Static,
+ disable_redzone: true,
+ max_atomic_width: Some(128),
+ panic_strategy: PanicStrategy::Abort,
+ ..Default::default()
+ };
+ Target {
+ llvm_target: "aarch64-unknown-none".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: opts,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/aarch64_unknown_openbsd.rs
new file mode 100644
index 000000000..3d99040f0
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_openbsd.rs
@@ -0,0 +1,11 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-unknown-openbsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions { max_atomic_width: Some(128), ..super::openbsd_base::opts() },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_redox.rs b/compiler/rustc_target/src/spec/aarch64_unknown_redox.rs
new file mode 100644
index 000000000..6c9be4c8e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_redox.rs
@@ -0,0 +1,14 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::redox_base::opts();
+ base.max_atomic_width = Some(128);
+
+ Target {
+ llvm_target: "aarch64-unknown-redox".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs b/compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs
new file mode 100644
index 000000000..162b091b2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_unknown_uefi.rs
@@ -0,0 +1,20 @@
+// This defines the aarch64 target for UEFI systems as described in the UEFI specification. See the
+// uefi-base module for generic UEFI options.
+
+use super::uefi_msvc_base;
+use crate::spec::{LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = uefi_msvc_base::opts();
+
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Msvc, &["/machine:arm64"]);
+
+ Target {
+ llvm_target: "aarch64-unknown-windows".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs
new file mode 100644
index 000000000..54247fd93
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_uwp_windows_msvc.rs
@@ -0,0 +1,14 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::windows_uwp_msvc_base::opts();
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "aarch64-pc-windows-msvc".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/aarch64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/aarch64_wrs_vxworks.rs
new file mode 100644
index 000000000..e118553df
--- /dev/null
+++ b/compiler/rustc_target/src/spec/aarch64_wrs_vxworks.rs
@@ -0,0 +1,11 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "aarch64-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions { max_atomic_width: Some(128), ..super::vxworks_base::opts() },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/abi.rs b/compiler/rustc_target/src/spec/abi.rs
new file mode 100644
index 000000000..337554dc9
--- /dev/null
+++ b/compiler/rustc_target/src/spec/abi.rs
@@ -0,0 +1,175 @@
+use std::fmt;
+
+use rustc_macros::HashStable_Generic;
+
+#[cfg(test)]
+mod tests;
+
+#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)]
+#[derive(HashStable_Generic, Encodable, Decodable)]
+pub enum Abi {
+ // Some of the ABIs come first because every time we add a new ABI, we have to re-bless all the
+ // hashing tests. These are used in many places, so giving them stable values reduces test
+ // churn. The specific values are meaningless.
+ Rust,
+ C { unwind: bool },
+ Cdecl { unwind: bool },
+ Stdcall { unwind: bool },
+ Fastcall { unwind: bool },
+ Vectorcall { unwind: bool },
+ Thiscall { unwind: bool },
+ Aapcs { unwind: bool },
+ Win64 { unwind: bool },
+ SysV64 { unwind: bool },
+ PtxKernel,
+ Msp430Interrupt,
+ X86Interrupt,
+ AmdGpuKernel,
+ EfiApi,
+ AvrInterrupt,
+ AvrNonBlockingInterrupt,
+ CCmseNonSecureCall,
+ Wasm,
+ System { unwind: bool },
+ RustIntrinsic,
+ RustCall,
+ PlatformIntrinsic,
+ Unadjusted,
+ RustCold,
+}
+
+#[derive(Copy, Clone)]
+pub struct AbiData {
+ abi: Abi,
+
+ /// Name of this ABI as we like it called.
+ name: &'static str,
+}
+
+#[allow(non_upper_case_globals)]
+const AbiDatas: &[AbiData] = &[
+ AbiData { abi: Abi::Rust, name: "Rust" },
+ AbiData { abi: Abi::C { unwind: false }, name: "C" },
+ AbiData { abi: Abi::C { unwind: true }, name: "C-unwind" },
+ AbiData { abi: Abi::Cdecl { unwind: false }, name: "cdecl" },
+ AbiData { abi: Abi::Cdecl { unwind: true }, name: "cdecl-unwind" },
+ AbiData { abi: Abi::Stdcall { unwind: false }, name: "stdcall" },
+ AbiData { abi: Abi::Stdcall { unwind: true }, name: "stdcall-unwind" },
+ AbiData { abi: Abi::Fastcall { unwind: false }, name: "fastcall" },
+ AbiData { abi: Abi::Fastcall { unwind: true }, name: "fastcall-unwind" },
+ AbiData { abi: Abi::Vectorcall { unwind: false }, name: "vectorcall" },
+ AbiData { abi: Abi::Vectorcall { unwind: true }, name: "vectorcall-unwind" },
+ AbiData { abi: Abi::Thiscall { unwind: false }, name: "thiscall" },
+ AbiData { abi: Abi::Thiscall { unwind: true }, name: "thiscall-unwind" },
+ AbiData { abi: Abi::Aapcs { unwind: false }, name: "aapcs" },
+ AbiData { abi: Abi::Aapcs { unwind: true }, name: "aapcs-unwind" },
+ AbiData { abi: Abi::Win64 { unwind: false }, name: "win64" },
+ AbiData { abi: Abi::Win64 { unwind: true }, name: "win64-unwind" },
+ AbiData { abi: Abi::SysV64 { unwind: false }, name: "sysv64" },
+ AbiData { abi: Abi::SysV64 { unwind: true }, name: "sysv64-unwind" },
+ AbiData { abi: Abi::PtxKernel, name: "ptx-kernel" },
+ AbiData { abi: Abi::Msp430Interrupt, name: "msp430-interrupt" },
+ AbiData { abi: Abi::X86Interrupt, name: "x86-interrupt" },
+ AbiData { abi: Abi::AmdGpuKernel, name: "amdgpu-kernel" },
+ AbiData { abi: Abi::EfiApi, name: "efiapi" },
+ AbiData { abi: Abi::AvrInterrupt, name: "avr-interrupt" },
+ AbiData { abi: Abi::AvrNonBlockingInterrupt, name: "avr-non-blocking-interrupt" },
+ AbiData { abi: Abi::CCmseNonSecureCall, name: "C-cmse-nonsecure-call" },
+ AbiData { abi: Abi::Wasm, name: "wasm" },
+ AbiData { abi: Abi::System { unwind: false }, name: "system" },
+ AbiData { abi: Abi::System { unwind: true }, name: "system-unwind" },
+ AbiData { abi: Abi::RustIntrinsic, name: "rust-intrinsic" },
+ AbiData { abi: Abi::RustCall, name: "rust-call" },
+ AbiData { abi: Abi::PlatformIntrinsic, name: "platform-intrinsic" },
+ AbiData { abi: Abi::Unadjusted, name: "unadjusted" },
+ AbiData { abi: Abi::RustCold, name: "rust-cold" },
+];
+
+/// Returns the ABI with the given name (if any).
+pub fn lookup(name: &str) -> Option<Abi> {
+ AbiDatas.iter().find(|abi_data| name == abi_data.name).map(|&x| x.abi)
+}
+
+pub fn all_names() -> Vec<&'static str> {
+ AbiDatas.iter().map(|d| d.name).collect()
+}
+
+impl Abi {
+ /// Default ABI chosen for `extern fn` declarations without an explicit ABI.
+ pub const FALLBACK: Abi = Abi::C { unwind: false };
+
+ #[inline]
+ pub fn index(self) -> usize {
+ // N.B., this ordering MUST match the AbiDatas array above.
+ // (This is ensured by the test indices_are_correct().)
+ use Abi::*;
+ let i = match self {
+ // Cross-platform ABIs
+ Rust => 0,
+ C { unwind: false } => 1,
+ C { unwind: true } => 2,
+ // Platform-specific ABIs
+ Cdecl { unwind: false } => 3,
+ Cdecl { unwind: true } => 4,
+ Stdcall { unwind: false } => 5,
+ Stdcall { unwind: true } => 6,
+ Fastcall { unwind: false } => 7,
+ Fastcall { unwind: true } => 8,
+ Vectorcall { unwind: false } => 9,
+ Vectorcall { unwind: true } => 10,
+ Thiscall { unwind: false } => 11,
+ Thiscall { unwind: true } => 12,
+ Aapcs { unwind: false } => 13,
+ Aapcs { unwind: true } => 14,
+ Win64 { unwind: false } => 15,
+ Win64 { unwind: true } => 16,
+ SysV64 { unwind: false } => 17,
+ SysV64 { unwind: true } => 18,
+ PtxKernel => 19,
+ Msp430Interrupt => 20,
+ X86Interrupt => 21,
+ AmdGpuKernel => 22,
+ EfiApi => 23,
+ AvrInterrupt => 24,
+ AvrNonBlockingInterrupt => 25,
+ CCmseNonSecureCall => 26,
+ Wasm => 27,
+ // Cross-platform ABIs
+ System { unwind: false } => 28,
+ System { unwind: true } => 29,
+ RustIntrinsic => 30,
+ RustCall => 31,
+ PlatformIntrinsic => 32,
+ Unadjusted => 33,
+ RustCold => 34,
+ };
+ debug_assert!(
+ AbiDatas
+ .iter()
+ .enumerate()
+ .find(|(_, AbiData { abi, .. })| *abi == self)
+ .map(|(index, _)| index)
+ .expect("abi variant has associated data")
+ == i,
+ "Abi index did not match `AbiDatas` ordering"
+ );
+ i
+ }
+
+ #[inline]
+ pub fn data(self) -> &'static AbiData {
+ &AbiDatas[self.index()]
+ }
+
+ pub fn name(self) -> &'static str {
+ self.data().name
+ }
+}
+
+impl fmt::Display for Abi {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ abi => write!(f, "\"{}\"", abi.name()),
+ }
+ }
+}
diff --git a/compiler/rustc_target/src/spec/abi/tests.rs b/compiler/rustc_target/src/spec/abi/tests.rs
new file mode 100644
index 000000000..8bea5e5ef
--- /dev/null
+++ b/compiler/rustc_target/src/spec/abi/tests.rs
@@ -0,0 +1,27 @@
+use super::*;
+
+#[allow(non_snake_case)]
+#[test]
+fn lookup_Rust() {
+ let abi = lookup("Rust");
+ assert!(abi.is_some() && abi.unwrap().data().name == "Rust");
+}
+
+#[test]
+fn lookup_cdecl() {
+ let abi = lookup("cdecl");
+ assert!(abi.is_some() && abi.unwrap().data().name == "cdecl");
+}
+
+#[test]
+fn lookup_baz() {
+ let abi = lookup("baz");
+ assert!(abi.is_none());
+}
+
+#[test]
+fn indices_are_correct() {
+ for (i, abi_data) in AbiDatas.iter().enumerate() {
+ assert_eq!(i, abi_data.abi.index());
+ }
+}
diff --git a/compiler/rustc_target/src/spec/android_base.rs b/compiler/rustc_target/src/spec/android_base.rs
new file mode 100644
index 000000000..dc06597db
--- /dev/null
+++ b/compiler/rustc_target/src/spec/android_base.rs
@@ -0,0 +1,15 @@
+use crate::spec::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+ let mut base = super::linux_base::opts();
+ base.os = "android".into();
+ base.default_dwarf_version = 2;
+ base.position_independent_executables = true;
+ base.has_thread_local = false;
+ // This is for backward compatibility, see https://github.com/rust-lang/rust/issues/49867
+ // for context. (At that time, there was no `-C force-unwind-tables`, so the only solution
+ // was to always emit `uwtable`).
+ base.default_uwtable = true;
+ base.crt_static_respected = false;
+ base
+}
diff --git a/compiler/rustc_target/src/spec/apple_base.rs b/compiler/rustc_target/src/spec/apple_base.rs
new file mode 100644
index 000000000..15e4fb9be
--- /dev/null
+++ b/compiler/rustc_target/src/spec/apple_base.rs
@@ -0,0 +1,143 @@
+use std::{borrow::Cow, env};
+
+use crate::spec::{cvs, FramePointer, LldFlavor, SplitDebuginfo, TargetOptions};
+
+pub fn opts(os: &'static str) -> TargetOptions {
+ // ELF TLS is only available in macOS 10.7+. If you try to compile for 10.6
+ // either the linker will complain if it is used or the binary will end up
+ // segfaulting at runtime when run on 10.6. Rust by default supports macOS
+ // 10.7+, but there is a standard environment variable,
+ // MACOSX_DEPLOYMENT_TARGET, which is used to signal targeting older
+ // versions of macOS. For example compiling on 10.10 with
+ // MACOSX_DEPLOYMENT_TARGET set to 10.6 will cause the linker to generate
+ // warnings about the usage of ELF TLS.
+ //
+ // Here we detect what version is being requested, defaulting to 10.7. ELF
+ // TLS is flagged as enabled if it looks to be supported. The architecture
+ // only matters for default deployment target which is 11.0 for ARM64 and
+ // 10.7 for everything else.
+ let has_thread_local = macos_deployment_target("x86_64") >= (10, 7);
+
+ TargetOptions {
+ os: os.into(),
+ vendor: "apple".into(),
+ // macOS has -dead_strip, which doesn't rely on function_sections
+ function_sections: false,
+ dynamic_linking: true,
+ linker_is_gnu: false,
+ families: cvs!["unix"],
+ is_like_osx: true,
+ default_dwarf_version: 2,
+ frame_pointer: FramePointer::Always,
+ has_rpath: true,
+ dll_suffix: ".dylib".into(),
+ archive_format: "darwin".into(),
+ has_thread_local,
+ abi_return_struct_as_int: true,
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ lld_flavor: LldFlavor::Ld64,
+
+ // The historical default for macOS targets is to run `dsymutil` which
+ // generates a packed version of debuginfo split from the main file.
+ split_debuginfo: SplitDebuginfo::Packed,
+
+ // This environment variable is pretty magical but is intended for
+ // producing deterministic builds. This was first discovered to be used
+ // by the `ar` tool as a way to control whether or not mtime entries in
+ // the archive headers were set to zero or not. It appears that
+ // eventually the linker got updated to do the same thing and now reads
+ // this environment variable too in recent versions.
+ //
+ // For some more info see the commentary on #47086
+ link_env: Cow::Borrowed(&[(Cow::Borrowed("ZERO_AR_DATE"), Cow::Borrowed("1"))]),
+
+ ..Default::default()
+ }
+}
+
+fn deployment_target(var_name: &str) -> Option<(u32, u32)> {
+ let deployment_target = env::var(var_name).ok();
+ deployment_target
+ .as_ref()
+ .and_then(|s| s.split_once('.'))
+ .and_then(|(a, b)| a.parse::<u32>().and_then(|a| b.parse::<u32>().map(|b| (a, b))).ok())
+}
+
+fn macos_default_deployment_target(arch: &str) -> (u32, u32) {
+ if arch == "arm64" { (11, 0) } else { (10, 7) }
+}
+
+fn macos_deployment_target(arch: &str) -> (u32, u32) {
+ deployment_target("MACOSX_DEPLOYMENT_TARGET")
+ .unwrap_or_else(|| macos_default_deployment_target(arch))
+}
+
+pub fn macos_llvm_target(arch: &str) -> String {
+ let (major, minor) = macos_deployment_target(arch);
+ format!("{}-apple-macosx{}.{}.0", arch, major, minor)
+}
+
+pub fn macos_link_env_remove() -> Vec<Cow<'static, str>> {
+ let mut env_remove = Vec::with_capacity(2);
+ // Remove the `SDKROOT` environment variable if it's clearly set for the wrong platform, which
+ // may occur when we're linking a custom build script while targeting iOS for example.
+ if let Ok(sdkroot) = env::var("SDKROOT") {
+ if sdkroot.contains("iPhoneOS.platform") || sdkroot.contains("iPhoneSimulator.platform") {
+ env_remove.push("SDKROOT".into())
+ }
+ }
+ // Additionally, `IPHONEOS_DEPLOYMENT_TARGET` must not be set when using the Xcode linker at
+ // "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ld",
+ // although this is apparently ignored when using the linker at "/usr/bin/ld".
+ env_remove.push("IPHONEOS_DEPLOYMENT_TARGET".into());
+ env_remove
+}
+
+fn ios_deployment_target() -> (u32, u32) {
+ deployment_target("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or((7, 0))
+}
+
+pub fn ios_llvm_target(arch: &str) -> String {
+ // Modern iOS tooling extracts information about deployment target
+ // from LC_BUILD_VERSION. This load command will only be emitted when
+ // we build with a version specific `llvm_target`, with the version
+ // set high enough. Luckily one LC_BUILD_VERSION is enough, for Xcode
+ // to pick it up (since std and core are still built with the fallback
+ // of version 7.0 and hence emit the old LC_IPHONE_MIN_VERSION).
+ let (major, minor) = ios_deployment_target();
+ format!("{}-apple-ios{}.{}.0", arch, major, minor)
+}
+
+pub fn ios_lld_platform_version() -> String {
+ let (major, minor) = ios_deployment_target();
+ format!("{}.{}", major, minor)
+}
+
+pub fn ios_sim_llvm_target(arch: &str) -> String {
+ let (major, minor) = ios_deployment_target();
+ format!("{}-apple-ios{}.{}.0-simulator", arch, major, minor)
+}
+
+fn tvos_deployment_target() -> (u32, u32) {
+ deployment_target("TVOS_DEPLOYMENT_TARGET").unwrap_or((7, 0))
+}
+
+pub fn tvos_lld_platform_version() -> String {
+ let (major, minor) = tvos_deployment_target();
+ format!("{}.{}", major, minor)
+}
+
+fn watchos_deployment_target() -> (u32, u32) {
+ deployment_target("WATCHOS_DEPLOYMENT_TARGET").unwrap_or((5, 0))
+}
+
+pub fn watchos_lld_platform_version() -> String {
+ let (major, minor) = watchos_deployment_target();
+ format!("{}.{}", major, minor)
+}
+
+pub fn watchos_sim_llvm_target(arch: &str) -> String {
+ let (major, minor) = watchos_deployment_target();
+ format!("{}-apple-watchos{}.{}.0-simulator", arch, major, minor)
+}
diff --git a/compiler/rustc_target/src/spec/apple_sdk_base.rs b/compiler/rustc_target/src/spec/apple_sdk_base.rs
new file mode 100644
index 000000000..d77558f0f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/apple_sdk_base.rs
@@ -0,0 +1,113 @@
+use crate::spec::{cvs, LinkArgs, LinkerFlavor, LldFlavor, TargetOptions};
+use std::borrow::Cow;
+
+use Arch::*;
+#[allow(non_camel_case_types)]
+#[derive(Copy, Clone)]
+pub enum Arch {
+ Armv7,
+ Armv7k,
+ Armv7s,
+ Arm64,
+ Arm64_32,
+ I386,
+ X86_64,
+ X86_64_macabi,
+ Arm64_macabi,
+ Arm64_sim,
+}
+
+fn target_arch_name(arch: Arch) -> &'static str {
+ match arch {
+ Armv7 => "armv7",
+ Armv7k => "armv7k",
+ Armv7s => "armv7s",
+ Arm64 | Arm64_macabi | Arm64_sim => "arm64",
+ Arm64_32 => "arm64_32",
+ I386 => "i386",
+ X86_64 | X86_64_macabi => "x86_64",
+ }
+}
+
+fn target_abi(arch: Arch) -> &'static str {
+ match arch {
+ Armv7 | Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | X86_64 => "",
+ X86_64_macabi | Arm64_macabi => "macabi",
+ Arm64_sim => "sim",
+ }
+}
+
+fn target_cpu(arch: Arch) -> &'static str {
+ match arch {
+ Armv7 => "cortex-a8", // iOS7 is supported on iPhone 4 and higher
+ Armv7k => "cortex-a8",
+ Armv7s => "cortex-a9",
+ Arm64 => "apple-a7",
+ Arm64_32 => "apple-s4",
+ I386 => "yonah",
+ X86_64 => "core2",
+ X86_64_macabi => "core2",
+ Arm64_macabi => "apple-a12",
+ Arm64_sim => "apple-a12",
+ }
+}
+
+fn link_env_remove(arch: Arch) -> Cow<'static, [Cow<'static, str>]> {
+ match arch {
+ Armv7 | Armv7k | Armv7s | Arm64 | Arm64_32 | I386 | X86_64 | Arm64_sim => {
+ cvs!["MACOSX_DEPLOYMENT_TARGET"]
+ }
+ X86_64_macabi | Arm64_macabi => cvs!["IPHONEOS_DEPLOYMENT_TARGET"],
+ }
+}
+
+fn pre_link_args(os: &'static str, arch: Arch) -> LinkArgs {
+ let mut args = LinkArgs::new();
+
+ let target_abi = target_abi(arch);
+
+ let platform_name = match target_abi {
+ "sim" => format!("{}-simulator", os),
+ "macabi" => "mac-catalyst".to_string(),
+ _ => os.to_string(),
+ };
+
+ let platform_version = match os.as_ref() {
+ "ios" => super::apple_base::ios_lld_platform_version(),
+ "tvos" => super::apple_base::tvos_lld_platform_version(),
+ "watchos" => super::apple_base::watchos_lld_platform_version(),
+ _ => unreachable!(),
+ };
+
+ let arch_str = target_arch_name(arch);
+
+ if target_abi != "macabi" {
+ args.insert(LinkerFlavor::Gcc, vec!["-arch".into(), arch_str.into()]);
+ }
+
+ args.insert(
+ LinkerFlavor::Lld(LldFlavor::Ld64),
+ vec![
+ "-arch".into(),
+ arch_str.into(),
+ "-platform_version".into(),
+ platform_name.into(),
+ platform_version.clone().into(),
+ platform_version.into(),
+ ],
+ );
+
+ args
+}
+
+pub fn opts(os: &'static str, arch: Arch) -> TargetOptions {
+ TargetOptions {
+ abi: target_abi(arch).into(),
+ cpu: target_cpu(arch).into(),
+ dynamic_linking: false,
+ pre_link_args: pre_link_args(os, arch),
+ link_env_remove: link_env_remove(arch),
+ has_thread_local: false,
+ ..super::apple_base::opts(os)
+ }
+}
diff --git a/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs b/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs
new file mode 100644
index 000000000..7b23fe1c4
--- /dev/null
+++ b/compiler/rustc_target/src/spec/arm64_32_apple_watchos.rs
@@ -0,0 +1,28 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("watchos", Arch::Arm64_32);
+ Target {
+ llvm_target: "arm64_32-apple-watchos".into(),
+ pointer_width: 32,
+ data_layout: "e-m:o-p:32:32-i64:64-i128:128-n32:64-S128".into(),
+ arch: "aarch64".into(),
+ options: TargetOptions {
+ features: "+neon,+fp-armv8,+apple-a7".into(),
+ max_atomic_width: Some(64),
+ forces_embed_bitcode: true,
+ // These arguments are not actually invoked - they just have
+ // to look right to pass App Store validation.
+ bitcode_llvm_cmdline: "-triple\0\
+ arm64_32-apple-watchos5.0.0\0\
+ -emit-obj\0\
+ -disable-llvm-passes\0\
+ -target-abi\0\
+ darwinpcs\0\
+ -Os\0"
+ .into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/arm_linux_androideabi.rs b/compiler/rustc_target/src/spec/arm_linux_androideabi.rs
new file mode 100644
index 000000000..bbf1fa315
--- /dev/null
+++ b/compiler/rustc_target/src/spec/arm_linux_androideabi.rs
@@ -0,0 +1,18 @@
+use crate::spec::{SanitizerSet, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "arm-linux-androideabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ // https://developer.android.com/ndk/guides/abis.html#armeabi
+ features: "+strict-align,+v5te".into(),
+ supported_sanitizers: SanitizerSet::ADDRESS,
+ max_atomic_width: Some(32),
+ ..super::android_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs
new file mode 100644
index 000000000..c0f1827ad
--- /dev/null
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabi.rs
@@ -0,0 +1,17 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "arm-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+strict-align,+v6".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs
new file mode 100644
index 000000000..79b8958c2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_gnueabihf.rs
@@ -0,0 +1,17 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "arm-unknown-linux-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ features: "+strict-align,+v6,+vfp2,-d32".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs
new file mode 100644
index 000000000..3ef441d6a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabi.rs
@@ -0,0 +1,22 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ // It's important we use "gnueabi" and not "musleabi" here. LLVM uses it
+ // to determine the calling convention and float ABI, and it doesn't
+ // support the "musleabi" value.
+ llvm_target: "arm-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ // Most of these settings are copied from the arm_unknown_linux_gnueabi
+ // target.
+ features: "+strict-align,+v6".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}mcount".into(),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs
new file mode 100644
index 000000000..eb6660d4c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/arm_unknown_linux_musleabihf.rs
@@ -0,0 +1,22 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
+ // uses it to determine the calling convention and float ABI, and it
+ // doesn't support the "musleabihf" value.
+ llvm_target: "arm-unknown-linux-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // Most of these settings are copied from the arm_unknown_linux_gnueabihf
+ // target.
+ features: "+strict-align,+v6,+vfp2,-d32".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}mcount".into(),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
new file mode 100644
index 000000000..511693abe
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armebv7r_none_eabi.rs
@@ -0,0 +1,27 @@
+// Targets the Big endian Cortex-R4/R5 processor (ARMv7-R)
+
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armebv7r-unknown-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ endian: Endian::Big,
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ relocation_model: RelocModel::Static,
+ panic_strategy: PanicStrategy::Abort,
+ max_atomic_width: Some(32),
+ emit_debug_gdb_scripts: false,
+ // GCC and Clang default to 8 for arm-none here
+ c_enum_min_bits: 8,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
new file mode 100644
index 000000000..5df4a0a15
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armebv7r_none_eabihf.rs
@@ -0,0 +1,28 @@
+// Targets the Cortex-R4F/R5F processor (ARMv7-R)
+
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armebv7r-unknown-none-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ endian: Endian::Big,
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ relocation_model: RelocModel::Static,
+ panic_strategy: PanicStrategy::Abort,
+ features: "+vfp3,-d32,-fp16".into(),
+ max_atomic_width: Some(32),
+ emit_debug_gdb_scripts: false,
+ // GCC and Clang default to 8 for arm-none here
+ c_enum_min_bits: 8,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs
new file mode 100644
index 000000000..1de63a920
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv4t_unknown_linux_gnueabi.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv4t-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+soft-float,+strict-align".into(),
+ // Atomic operations provided by compiler-builtins
+ max_atomic_width: Some(32),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ has_thumb_interworking: true,
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs
new file mode 100644
index 000000000..b94056310
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv5te_unknown_linux_gnueabi.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv5te-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+soft-float,+strict-align".into(),
+ // Atomic operations provided by compiler-builtins
+ max_atomic_width: Some(32),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ has_thumb_interworking: true,
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs
new file mode 100644
index 000000000..2530971b5
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv5te_unknown_linux_musleabi.rs
@@ -0,0 +1,23 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ // FIXME: this comment below does not seem applicable?
+ // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
+ // uses it to determine the calling convention and float ABI, and LLVM
+ // doesn't support the "musleabihf" value.
+ llvm_target: "armv5te-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+soft-float,+strict-align".into(),
+ // Atomic operations provided by compiler-builtins
+ max_atomic_width: Some(32),
+ mcount: "\u{1}mcount".into(),
+ has_thumb_interworking: true,
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv5te_unknown_linux_uclibceabi.rs b/compiler/rustc_target/src/spec/armv5te_unknown_linux_uclibceabi.rs
new file mode 100644
index 000000000..a51be10a3
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv5te_unknown_linux_uclibceabi.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv5te-unknown-linux-uclibcgnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+soft-float,+strict-align".into(),
+ // Atomic operations provided by compiler-builtins
+ max_atomic_width: Some(32),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ has_thumb_interworking: true,
+ ..super::linux_uclibc_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs b/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs
new file mode 100644
index 000000000..b7cfccc8b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv6_unknown_freebsd.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv6-unknown-freebsd-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // FIXME: change env to "gnu" when cfg_target_abi becomes stable
+ env: "gnueabihf".into(),
+ features: "+v6,+vfp2,-d32".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ ..super::freebsd_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs b/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs
new file mode 100644
index 000000000..6e26f686f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv6_unknown_netbsd_eabihf.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv6-unknown-netbsdelf-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // FIXME: remove env when cfg_target_abi becomes stable
+ env: "eabihf".into(),
+ features: "+v6,+vfp2,-d32".into(),
+ max_atomic_width: Some(64),
+ mcount: "__mcount".into(),
+ ..super::netbsd_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs b/compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs
new file mode 100644
index 000000000..1bba39393
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv6k_nintendo_3ds.rs
@@ -0,0 +1,38 @@
+use crate::spec::{cvs, LinkerFlavor, RelocModel, Target, TargetOptions};
+
+/// A base target for Nintendo 3DS devices using the devkitARM toolchain.
+///
+/// Requires the devkitARM toolchain for 3DS targets on the host system.
+
+pub fn target() -> Target {
+ let pre_link_args = TargetOptions::link_args(
+ LinkerFlavor::Gcc,
+ &["-specs=3dsx.specs", "-mtune=mpcore", "-mfloat-abi=hard", "-mtp=soft"],
+ );
+
+ Target {
+ llvm_target: "armv6k-none-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ os: "horizon".into(),
+ env: "newlib".into(),
+ vendor: "nintendo".into(),
+ abi: "eabihf".into(),
+ linker_flavor: LinkerFlavor::Gcc,
+ cpu: "mpcore".into(),
+ families: cvs!["unix"],
+ linker: Some("arm-none-eabi-gcc".into()),
+ relocation_model: RelocModel::Static,
+ features: "+vfp2".into(),
+ pre_link_args,
+ exe_suffix: ".elf".into(),
+ no_default_libraries: false,
+ // There are some issues in debug builds with this enabled in certain programs.
+ has_thread_local: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_apple_ios.rs b/compiler/rustc_target/src/spec/armv7_apple_ios.rs
new file mode 100644
index 000000000..57fd74a36
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_apple_ios.rs
@@ -0,0 +1,18 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let llvm_target = super::apple_base::ios_llvm_target("armv7");
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 32,
+ data_layout: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ features: "+v7,+vfp3,+neon".into(),
+ max_atomic_width: Some(64),
+ ..opts("ios", Arch::Armv7)
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs b/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs
new file mode 100644
index 000000000..38c117a49
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_linux_androideabi.rs
@@ -0,0 +1,27 @@
+use crate::spec::{LinkerFlavor, SanitizerSet, Target, TargetOptions};
+
+// This target if is for the baseline of the Android v7a ABI
+// in thumb mode. It's named armv7-* instead of thumbv7-*
+// for historical reasons. See the thumbv7neon variant for
+// enabling NEON.
+
+// See https://developer.android.com/ndk/guides/abis.html#v7a
+// for target ABI requirements.
+
+pub fn target() -> Target {
+ let mut base = super::android_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-march=armv7-a"]);
+ Target {
+ llvm_target: "armv7-none-linux-android".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+v7,+thumb-mode,+thumb2,+vfp3,-d32,-neon".into(),
+ supported_sanitizers: SanitizerSet::ADDRESS,
+ max_atomic_width: Some(64),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs b/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs
new file mode 100644
index 000000000..bc37b62de
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_unknown_freebsd.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7-unknown-freebsd-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // FIXME: change env to "gnu" when cfg_target_abi becomes stable
+ env: "gnueabihf".into(),
+ features: "+v7,+vfp3,-d32,+thumb2,-neon".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ ..super::freebsd_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs
new file mode 100644
index 000000000..903042d7e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabi.rs
@@ -0,0 +1,20 @@
+use crate::spec::{Target, TargetOptions};
+
+// This target is for glibc Linux on ARMv7 without thumb-mode, NEON or
+// hardfloat.
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+v7,+thumb2,+soft-float,-neon".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs
new file mode 100644
index 000000000..e39ea49a0
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_gnueabihf.rs
@@ -0,0 +1,21 @@
+use crate::spec::{Target, TargetOptions};
+
+// This target is for glibc Linux on ARMv7 without NEON or
+// thumb-mode. See the thumbv7neon variant for enabling both.
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7-unknown-linux-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // Info about features at https://wiki.debian.org/ArmHardFloatPort
+ features: "+v7,+vfp3,-d32,+thumb2,-neon".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}__gnu_mcount_nc".into(),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs
new file mode 100644
index 000000000..7dae85773
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabi.rs
@@ -0,0 +1,26 @@
+use crate::spec::{Target, TargetOptions};
+
+// This target is for musl Linux on ARMv7 without thumb-mode, NEON or
+// hardfloat.
+
+pub fn target() -> Target {
+ // Most of these settings are copied from the armv7_unknown_linux_gnueabi
+ // target.
+ Target {
+ // It's important we use "gnueabi" and not "musleabi" here. LLVM uses it
+ // to determine the calling convention and float ABI, and it doesn't
+ // support the "musleabi" value.
+ llvm_target: "armv7-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+v7,+thumb2,+soft-float,-neon".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}mcount".into(),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs
new file mode 100644
index 000000000..ba83964bf
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_musleabihf.rs
@@ -0,0 +1,25 @@
+use crate::spec::{Target, TargetOptions};
+
+// This target is for musl Linux on ARMv7 without thumb-mode or NEON.
+
+pub fn target() -> Target {
+ Target {
+ // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
+ // uses it to determine the calling convention and float ABI, and LLVM
+ // doesn't support the "musleabihf" value.
+ llvm_target: "armv7-unknown-linux-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ // Most of these settings are copied from the armv7_unknown_linux_gnueabihf
+ // target.
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ features: "+v7,+vfp3,-d32,+thumb2,-neon".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}mcount".into(),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_uclibceabi.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_uclibceabi.rs
new file mode 100644
index 000000000..171f67070
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_uclibceabi.rs
@@ -0,0 +1,23 @@
+use crate::spec::{Target, TargetOptions};
+
+// This target is for uclibc Linux on ARMv7 without NEON,
+// thumb-mode or hardfloat.
+
+pub fn target() -> Target {
+ let base = super::linux_uclibc_base::opts();
+ Target {
+ llvm_target: "armv7-unknown-linux-gnueabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ features: "+v7,+thumb2,+soft-float,-neon".into(),
+ cpu: "generic".into(),
+ max_atomic_width: Some(64),
+ mcount: "_mcount".into(),
+ abi: "eabi".into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_linux_uclibceabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_linux_uclibceabihf.rs
new file mode 100644
index 000000000..d3e95a657
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_unknown_linux_uclibceabihf.rs
@@ -0,0 +1,24 @@
+use crate::spec::{Target, TargetOptions};
+
+// This target is for uclibc Linux on ARMv7 without NEON or
+// thumb-mode. See the thumbv7neon variant for enabling both.
+
+pub fn target() -> Target {
+ let base = super::linux_uclibc_base::opts();
+ Target {
+ llvm_target: "armv7-unknown-linux-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ // Info about features at https://wiki.debian.org/ArmHardFloatPort
+ features: "+v7,+vfp3,-d32,+thumb2,-neon".into(),
+ cpu: "generic".into(),
+ max_atomic_width: Some(64),
+ mcount: "_mcount".into(),
+ abi: "eabihf".into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs b/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs
new file mode 100644
index 000000000..c89ae2483
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_unknown_netbsd_eabihf.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7-unknown-netbsdelf-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // FIXME: remove env when cfg_target_abi becomes stable
+ env: "eabihf".into(),
+ features: "+v7,+vfp3,-d32,+thumb2,-neon".into(),
+ max_atomic_width: Some(64),
+ mcount: "__mcount".into(),
+ ..super::netbsd_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7_wrs_vxworks_eabihf.rs b/compiler/rustc_target/src/spec/armv7_wrs_vxworks_eabihf.rs
new file mode 100644
index 000000000..c1ab90172
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7_wrs_vxworks_eabihf.rs
@@ -0,0 +1,17 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7-unknown-linux-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // Info about features at https://wiki.debian.org/ArmHardFloatPort
+ features: "+v7,+vfp3,-d32,+thumb2,-neon".into(),
+ max_atomic_width: Some(64),
+ ..super::vxworks_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7a_kmc_solid_asp3_eabi.rs b/compiler/rustc_target/src/spec/armv7a_kmc_solid_asp3_eabi.rs
new file mode 100644
index 000000000..b49dc650b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7a_kmc_solid_asp3_eabi.rs
@@ -0,0 +1,19 @@
+use super::{RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = super::solid_base::opts("asp3");
+ Target {
+ llvm_target: "armv7a-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ linker: Some("arm-kmc-eabi-gcc".into()),
+ features: "+v7,+soft-float,+thumb2,-neon".into(),
+ relocation_model: RelocModel::Static,
+ disable_redzone: true,
+ max_atomic_width: Some(64),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7a_kmc_solid_asp3_eabihf.rs b/compiler/rustc_target/src/spec/armv7a_kmc_solid_asp3_eabihf.rs
new file mode 100644
index 000000000..7d30238e8
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7a_kmc_solid_asp3_eabihf.rs
@@ -0,0 +1,19 @@
+use super::{RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = super::solid_base::opts("asp3");
+ Target {
+ llvm_target: "armv7a-none-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ linker: Some("arm-kmc-eabi-gcc".into()),
+ features: "+v7,+vfp3,-d32,+thumb2,-neon".into(),
+ relocation_model: RelocModel::Static,
+ disable_redzone: true,
+ max_atomic_width: Some(64),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabi.rs b/compiler/rustc_target/src/spec/armv7a_none_eabi.rs
new file mode 100644
index 000000000..cb5cbe158
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7a_none_eabi.rs
@@ -0,0 +1,40 @@
+// Generic ARMv7-A target for bare-metal code - floating point disabled
+//
+// This is basically the `armv7-unknown-linux-gnueabi` target with some changes
+// (listed below) to bring it closer to the bare-metal `thumb` & `aarch64`
+// targets:
+//
+// - `TargetOptions.features`: added `+strict-align`. rationale: unaligned
+// memory access is disabled on boot on these cores
+// - linker changed to LLD. rationale: C is not strictly needed to build
+// bare-metal binaries (the `gcc` linker has the advantage that it knows where C
+// libraries and crt*.o are but it's not much of an advantage here); LLD is also
+// faster
+// - `panic_strategy` set to `abort`. rationale: matches `thumb` targets
+// - `relocation-model` set to `static`; also no PIE, no relro and no dynamic
+// linking. rationale: matches `thumb` targets
+
+use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let opts = TargetOptions {
+ abi: "eabi".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ features: "+v7,+thumb2,+soft-float,-neon,+strict-align".into(),
+ relocation_model: RelocModel::Static,
+ disable_redzone: true,
+ max_atomic_width: Some(64),
+ panic_strategy: PanicStrategy::Abort,
+ emit_debug_gdb_scripts: false,
+ c_enum_min_bits: 8,
+ ..Default::default()
+ };
+ Target {
+ llvm_target: "armv7a-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: opts,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
new file mode 100644
index 000000000..fb5dd2e75
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7a_none_eabihf.rs
@@ -0,0 +1,32 @@
+// Generic ARMv7-A target for bare-metal code - floating point enabled (assumes
+// FPU is present and emits FPU instructions)
+//
+// This is basically the `armv7-unknown-linux-gnueabihf` target with some
+// changes (list in `armv7a_none_eabi.rs`) to bring it closer to the bare-metal
+// `thumb` & `aarch64` targets.
+
+use super::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let opts = TargetOptions {
+ abi: "eabihf".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ features: "+v7,+vfp3,-d32,+thumb2,-neon,+strict-align".into(),
+ relocation_model: RelocModel::Static,
+ disable_redzone: true,
+ max_atomic_width: Some(64),
+ panic_strategy: PanicStrategy::Abort,
+ emit_debug_gdb_scripts: false,
+ // GCC and Clang default to 8 for arm-none here
+ c_enum_min_bits: 8,
+ ..Default::default()
+ };
+ Target {
+ llvm_target: "armv7a-none-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: opts,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7k_apple_watchos.rs b/compiler/rustc_target/src/spec/armv7k_apple_watchos.rs
new file mode 100644
index 000000000..af5d1c2ff
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7k_apple_watchos.rs
@@ -0,0 +1,28 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("watchos", Arch::Armv7k);
+ Target {
+ llvm_target: "armv7k-apple-watchos".into(),
+ pointer_width: 32,
+ data_layout: "e-m:o-p:32:32-Fi8-i64:64-a:0:32-n32-S128".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ features: "+v7,+vfp4,+neon".into(),
+ max_atomic_width: Some(64),
+ forces_embed_bitcode: true,
+ // These arguments are not actually invoked - they just have
+ // to look right to pass App Store validation.
+ bitcode_llvm_cmdline: "-triple\0\
+ armv7k-apple-watchos3.0.0\0\
+ -emit-obj\0\
+ -disable-llvm-passes\0\
+ -target-abi\0\
+ darwinpcs\0\
+ -Os\0"
+ .into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabi.rs b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
new file mode 100644
index 000000000..5f1da09b3
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7r_none_eabi.rs
@@ -0,0 +1,26 @@
+// Targets the Little-endian Cortex-R4/R5 processor (ARMv7-R)
+
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7r-unknown-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ relocation_model: RelocModel::Static,
+ panic_strategy: PanicStrategy::Abort,
+ max_atomic_width: Some(32),
+ emit_debug_gdb_scripts: false,
+ // GCC and Clang default to 8 for arm-none here
+ c_enum_min_bits: 8,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
new file mode 100644
index 000000000..0038ed0df
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7r_none_eabihf.rs
@@ -0,0 +1,27 @@
+// Targets the Little-endian Cortex-R4F/R5F processor (ARMv7-R)
+
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7r-unknown-none-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ relocation_model: RelocModel::Static,
+ panic_strategy: PanicStrategy::Abort,
+ features: "+vfp3,-d32,-fp16".into(),
+ max_atomic_width: Some(32),
+ emit_debug_gdb_scripts: false,
+ // GCC and Clang default to 8 for arm-none here
+ c_enum_min_bits: 8,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/armv7s_apple_ios.rs b/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
new file mode 100644
index 000000000..cc17265b2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/armv7s_apple_ios.rs
@@ -0,0 +1,16 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7s-apple-ios".into(),
+ pointer_width: 32,
+ data_layout: "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ features: "+v7,+vfp4,+neon".into(),
+ max_atomic_width: Some(64),
+ ..opts("ios", Arch::Armv7s)
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs b/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs
new file mode 100644
index 000000000..b4cf2c5ee
--- /dev/null
+++ b/compiler/rustc_target/src/spec/asmjs_unknown_emscripten.rs
@@ -0,0 +1,7 @@
+use super::{wasm32_unknown_emscripten, LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut target = wasm32_unknown_emscripten::target();
+ target.add_post_link_args(LinkerFlavor::Em, &["-sWASM=0", "--memory-init-file", "0"]);
+ target
+}
diff --git a/compiler/rustc_target/src/spec/avr_gnu_base.rs b/compiler/rustc_target/src/spec/avr_gnu_base.rs
new file mode 100644
index 000000000..1d441e558
--- /dev/null
+++ b/compiler/rustc_target/src/spec/avr_gnu_base.rs
@@ -0,0 +1,27 @@
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+/// A base target for AVR devices using the GNU toolchain.
+///
+/// Requires GNU avr-gcc and avr-binutils on the host system.
+/// FIXME: Remove the second parameter when const string concatenation is possible.
+pub fn target(target_cpu: &'static str, mmcu: &'static str) -> Target {
+ Target {
+ arch: "avr".into(),
+ data_layout: "e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8".into(),
+ llvm_target: "avr-unknown-unknown".into(),
+ pointer_width: 16,
+ options: TargetOptions {
+ c_int_width: "16".into(),
+ cpu: target_cpu.into(),
+ exe_suffix: ".elf".into(),
+
+ linker: Some("avr-gcc".into()),
+ eh_frame_header: false,
+ pre_link_args: TargetOptions::link_args(LinkerFlavor::Gcc, &[mmcu]),
+ late_link_args: TargetOptions::link_args(LinkerFlavor::Gcc, &["-lgcc"]),
+ max_atomic_width: Some(0),
+ atomic_cas: false,
+ ..TargetOptions::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/avr_unknown_gnu_atmega328.rs b/compiler/rustc_target/src/spec/avr_unknown_gnu_atmega328.rs
new file mode 100644
index 000000000..6c16b03cc
--- /dev/null
+++ b/compiler/rustc_target/src/spec/avr_unknown_gnu_atmega328.rs
@@ -0,0 +1,5 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ super::avr_gnu_base::target("atmega328", "-mmcu=atmega328")
+}
diff --git a/compiler/rustc_target/src/spec/bpf_base.rs b/compiler/rustc_target/src/spec/bpf_base.rs
new file mode 100644
index 000000000..3c4da6f88
--- /dev/null
+++ b/compiler/rustc_target/src/spec/bpf_base.rs
@@ -0,0 +1,25 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, TargetOptions};
+
+pub fn opts(endian: Endian) -> TargetOptions {
+ TargetOptions {
+ allow_asm: true,
+ endian,
+ linker_flavor: LinkerFlavor::BpfLinker,
+ atomic_cas: false,
+ dynamic_linking: true,
+ no_builtins: true,
+ panic_strategy: PanicStrategy::Abort,
+ position_independent_executables: true,
+ // Disable MergeFunctions since:
+ // - older kernels don't support bpf-to-bpf calls
+ // - on newer kernels, userspace still needs to relocate before calling
+ // BPF_PROG_LOAD and not all BPF libraries do that yet
+ merge_functions: MergeFunctions::Disabled,
+ obj_is_bitcode: true,
+ requires_lto: false,
+ singlethread: true,
+ max_atomic_width: Some(64),
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/bpfeb_unknown_none.rs b/compiler/rustc_target/src/spec/bpfeb_unknown_none.rs
new file mode 100644
index 000000000..174ddfa50
--- /dev/null
+++ b/compiler/rustc_target/src/spec/bpfeb_unknown_none.rs
@@ -0,0 +1,12 @@
+use crate::spec::Target;
+use crate::{abi::Endian, spec::bpf_base};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "bpfeb".into(),
+ data_layout: "E-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
+ pointer_width: 64,
+ arch: "bpf".into(),
+ options: bpf_base::opts(Endian::Big),
+ }
+}
diff --git a/compiler/rustc_target/src/spec/bpfel_unknown_none.rs b/compiler/rustc_target/src/spec/bpfel_unknown_none.rs
new file mode 100644
index 000000000..7625e7b0e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/bpfel_unknown_none.rs
@@ -0,0 +1,12 @@
+use crate::spec::Target;
+use crate::{abi::Endian, spec::bpf_base};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "bpfel".into(),
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128".into(),
+ pointer_width: 64,
+ arch: "bpf".into(),
+ options: bpf_base::opts(Endian::Little),
+ }
+}
diff --git a/compiler/rustc_target/src/spec/crt_objects.rs b/compiler/rustc_target/src/spec/crt_objects.rs
new file mode 100644
index 000000000..52ac3622e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/crt_objects.rs
@@ -0,0 +1,157 @@
+//! Object files providing support for basic runtime facilities and added to the produced binaries
+//! at the start and at the end of linking.
+//!
+//! Table of CRT objects for popular toolchains.
+//! The `crtx` ones are generally distributed with libc and the `begin/end` ones with gcc.
+//! See <https://dev.gentoo.org/~vapier/crt.txt> for some more details.
+//!
+//! | Pre-link CRT objects | glibc | musl | bionic | mingw | wasi |
+//! |----------------------|------------------------|------------------------|------------------|-------------------|--------------|
+//! | dynamic-nopic-exe | crt1, crti, crtbegin | crt1, crti, crtbegin | crtbegin_dynamic | crt2, crtbegin | crt1 |
+//! | dynamic-pic-exe | Scrt1, crti, crtbeginS | Scrt1, crti, crtbeginS | crtbegin_dynamic | crt2, crtbegin | crt1 |
+//! | static-nopic-exe | crt1, crti, crtbeginT | crt1, crti, crtbegin | crtbegin_static | crt2, crtbegin | crt1 |
+//! | static-pic-exe | rcrt1, crti, crtbeginS | rcrt1, crti, crtbeginS | crtbegin_dynamic | crt2, crtbegin | crt1 |
+//! | dynamic-dylib | crti, crtbeginS | crti, crtbeginS | crtbegin_so | dllcrt2, crtbegin | - |
+//! | static-dylib (gcc) | crti, crtbeginT | crti, crtbeginS | crtbegin_so | dllcrt2, crtbegin | - |
+//! | static-dylib (clang) | crti, crtbeginT | N/A | crtbegin_static | dllcrt2, crtbegin | - |
+//! | wasi-reactor-exe | N/A | N/A | N/A | N/A | crt1-reactor |
+//!
+//! | Post-link CRT objects | glibc | musl | bionic | mingw | wasi |
+//! |-----------------------|---------------|---------------|----------------|--------|------|
+//! | dynamic-nopic-exe | crtend, crtn | crtend, crtn | crtend_android | crtend | - |
+//! | dynamic-pic-exe | crtendS, crtn | crtendS, crtn | crtend_android | crtend | - |
+//! | static-nopic-exe | crtend, crtn | crtend, crtn | crtend_android | crtend | - |
+//! | static-pic-exe | crtendS, crtn | crtendS, crtn | crtend_android | crtend | - |
+//! | dynamic-dylib | crtendS, crtn | crtendS, crtn | crtend_so | crtend | - |
+//! | static-dylib (gcc) | crtend, crtn | crtendS, crtn | crtend_so | crtend | - |
+//! | static-dylib (clang) | crtendS, crtn | N/A | crtend_so | crtend | - |
+//!
+//! Use cases for rustc linking the CRT objects explicitly:
+//! - rustc needs to add its own Rust-specific objects (mingw is the example)
+//! - gcc wrapper cannot be used for some reason and linker like ld or lld is used directly.
+//! - gcc wrapper pulls wrong CRT objects (e.g. from glibc when we are targeting musl).
+//!
+//! In general it is preferable to rely on the target's native toolchain to pull the objects.
+//! However, for some targets (musl, mingw) rustc historically provides a more self-contained
+//! installation not requiring users to install the native target's toolchain.
+//! In that case rustc distributes the objects as a part of the target's Rust toolchain
+//! and falls back to linking with them manually.
+//! Unlike native toolchains, rustc only currently adds the libc's objects during linking,
+//! but not gcc's. As a result rustc cannot link with C++ static libraries (#36710)
+//! when linking in self-contained mode.
+
+use crate::json::{Json, ToJson};
+use crate::spec::LinkOutputKind;
+use std::borrow::Cow;
+use std::collections::BTreeMap;
+use std::str::FromStr;
+
+pub type CrtObjects = BTreeMap<LinkOutputKind, Vec<Cow<'static, str>>>;
+
+pub(super) fn new(obj_table: &[(LinkOutputKind, &[&'static str])]) -> CrtObjects {
+ obj_table.iter().map(|(z, k)| (*z, k.iter().map(|b| (*b).into()).collect())).collect()
+}
+
+pub(super) fn all(obj: &'static str) -> CrtObjects {
+ new(&[
+ (LinkOutputKind::DynamicNoPicExe, &[obj]),
+ (LinkOutputKind::DynamicPicExe, &[obj]),
+ (LinkOutputKind::StaticNoPicExe, &[obj]),
+ (LinkOutputKind::StaticPicExe, &[obj]),
+ (LinkOutputKind::DynamicDylib, &[obj]),
+ (LinkOutputKind::StaticDylib, &[obj]),
+ ])
+}
+
+pub(super) fn pre_musl_fallback() -> CrtObjects {
+ new(&[
+ (LinkOutputKind::DynamicNoPicExe, &["crt1.o", "crti.o", "crtbegin.o"]),
+ (LinkOutputKind::DynamicPicExe, &["Scrt1.o", "crti.o", "crtbeginS.o"]),
+ (LinkOutputKind::StaticNoPicExe, &["crt1.o", "crti.o", "crtbegin.o"]),
+ (LinkOutputKind::StaticPicExe, &["rcrt1.o", "crti.o", "crtbeginS.o"]),
+ (LinkOutputKind::DynamicDylib, &["crti.o", "crtbeginS.o"]),
+ (LinkOutputKind::StaticDylib, &["crti.o", "crtbeginS.o"]),
+ ])
+}
+
+pub(super) fn post_musl_fallback() -> CrtObjects {
+ new(&[
+ (LinkOutputKind::DynamicNoPicExe, &["crtend.o", "crtn.o"]),
+ (LinkOutputKind::DynamicPicExe, &["crtendS.o", "crtn.o"]),
+ (LinkOutputKind::StaticNoPicExe, &["crtend.o", "crtn.o"]),
+ (LinkOutputKind::StaticPicExe, &["crtendS.o", "crtn.o"]),
+ (LinkOutputKind::DynamicDylib, &["crtendS.o", "crtn.o"]),
+ (LinkOutputKind::StaticDylib, &["crtendS.o", "crtn.o"]),
+ ])
+}
+
+pub(super) fn pre_mingw_fallback() -> CrtObjects {
+ new(&[
+ (LinkOutputKind::DynamicNoPicExe, &["crt2.o", "rsbegin.o"]),
+ (LinkOutputKind::DynamicPicExe, &["crt2.o", "rsbegin.o"]),
+ (LinkOutputKind::StaticNoPicExe, &["crt2.o", "rsbegin.o"]),
+ (LinkOutputKind::StaticPicExe, &["crt2.o", "rsbegin.o"]),
+ (LinkOutputKind::DynamicDylib, &["dllcrt2.o", "rsbegin.o"]),
+ (LinkOutputKind::StaticDylib, &["dllcrt2.o", "rsbegin.o"]),
+ ])
+}
+
+pub(super) fn post_mingw_fallback() -> CrtObjects {
+ all("rsend.o")
+}
+
+pub(super) fn pre_mingw() -> CrtObjects {
+ all("rsbegin.o")
+}
+
+pub(super) fn post_mingw() -> CrtObjects {
+ all("rsend.o")
+}
+
+pub(super) fn pre_wasi_fallback() -> CrtObjects {
+ // Use crt1-command.o instead of crt1.o to enable support for new-style
+ // commands. See https://reviews.llvm.org/D81689 for more info.
+ new(&[
+ (LinkOutputKind::DynamicNoPicExe, &["crt1-command.o"]),
+ (LinkOutputKind::DynamicPicExe, &["crt1-command.o"]),
+ (LinkOutputKind::StaticNoPicExe, &["crt1-command.o"]),
+ (LinkOutputKind::StaticPicExe, &["crt1-command.o"]),
+ (LinkOutputKind::WasiReactorExe, &["crt1-reactor.o"]),
+ ])
+}
+
+pub(super) fn post_wasi_fallback() -> CrtObjects {
+ new(&[])
+}
+
+/// Which logic to use to determine whether to fall back to the "self-contained" mode or not.
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum CrtObjectsFallback {
+ Musl,
+ Mingw,
+ Wasm,
+}
+
+impl FromStr for CrtObjectsFallback {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<CrtObjectsFallback, ()> {
+ Ok(match s {
+ "musl" => CrtObjectsFallback::Musl,
+ "mingw" => CrtObjectsFallback::Mingw,
+ "wasm" => CrtObjectsFallback::Wasm,
+ _ => return Err(()),
+ })
+ }
+}
+
+impl ToJson for CrtObjectsFallback {
+ fn to_json(&self) -> Json {
+ match *self {
+ CrtObjectsFallback::Musl => "musl",
+ CrtObjectsFallback::Mingw => "mingw",
+ CrtObjectsFallback::Wasm => "wasm",
+ }
+ .to_json()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/dragonfly_base.rs b/compiler/rustc_target/src/spec/dragonfly_base.rs
new file mode 100644
index 000000000..de2be7817
--- /dev/null
+++ b/compiler/rustc_target/src/spec/dragonfly_base.rs
@@ -0,0 +1,14 @@
+use crate::spec::{cvs, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "dragonfly".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ has_rpath: true,
+ position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ default_dwarf_version: 2,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/freebsd_base.rs b/compiler/rustc_target/src/spec/freebsd_base.rs
new file mode 100644
index 000000000..8c141aaae
--- /dev/null
+++ b/compiler/rustc_target/src/spec/freebsd_base.rs
@@ -0,0 +1,15 @@
+use crate::spec::{cvs, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "freebsd".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ has_rpath: true,
+ position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ abi_return_struct_as_int: true,
+ default_dwarf_version: 2,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/fuchsia_base.rs b/compiler/rustc_target/src/spec/fuchsia_base.rs
new file mode 100644
index 000000000..df1e3275f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/fuchsia_base.rs
@@ -0,0 +1,38 @@
+use crate::spec::{crt_objects, cvs, LinkOutputKind, LinkerFlavor, LldFlavor, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ let pre_link_args = TargetOptions::link_args(
+ LinkerFlavor::Ld,
+ &[
+ "--build-id",
+ "--hash-style=gnu",
+ "-z",
+ "max-page-size=4096",
+ "-z",
+ "now",
+ "-z",
+ "rodynamic",
+ "-z",
+ "separate-loadable-segments",
+ "--pack-dyn-relocs=relr",
+ ],
+ );
+
+ TargetOptions {
+ os: "fuchsia".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ pre_link_args,
+ pre_link_objects: crt_objects::new(&[
+ (LinkOutputKind::DynamicNoPicExe, &["Scrt1.o"]),
+ (LinkOutputKind::DynamicPicExe, &["Scrt1.o"]),
+ (LinkOutputKind::StaticNoPicExe, &["Scrt1.o"]),
+ (LinkOutputKind::StaticPicExe, &["Scrt1.o"]),
+ ]),
+ position_independent_executables: true,
+ has_thread_local: true,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/haiku_base.rs b/compiler/rustc_target/src/spec/haiku_base.rs
new file mode 100644
index 000000000..8ab874410
--- /dev/null
+++ b/compiler/rustc_target/src/spec/haiku_base.rs
@@ -0,0 +1,11 @@
+use crate::spec::{cvs, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "haiku".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ relro_level: RelroLevel::Full,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/hermit_base.rs b/compiler/rustc_target/src/spec/hermit_base.rs
new file mode 100644
index 000000000..562ccef7e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/hermit_base.rs
@@ -0,0 +1,21 @@
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, TargetOptions, TlsModel};
+
+pub fn opts() -> TargetOptions {
+ let pre_link_args = TargetOptions::link_args(
+ LinkerFlavor::Ld,
+ &["--build-id", "--hash-style=gnu", "--Bstatic"],
+ );
+
+ TargetOptions {
+ os: "hermit".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ has_thread_local: true,
+ pre_link_args,
+ panic_strategy: PanicStrategy::Abort,
+ position_independent_executables: true,
+ static_position_independent_executables: true,
+ tls_model: TlsModel::InitialExec,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs
new file mode 100644
index 000000000..cc2c78c69
--- /dev/null
+++ b/compiler/rustc_target/src/spec/hexagon_unknown_linux_musl.rs
@@ -0,0 +1,30 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "hexagonv60".into();
+ base.max_atomic_width = Some(32);
+ // FIXME: HVX length defaults are per-CPU
+ base.features = "-small-data,+hvx-length128b".into();
+
+ base.crt_static_default = false;
+ base.has_rpath = true;
+ base.linker_is_gnu = false;
+ base.dynamic_linking = true;
+
+ base.c_enum_min_bits = 8;
+
+ Target {
+ llvm_target: "hexagon-unknown-linux-musl".into(),
+ pointer_width: 32,
+ data_layout: concat!(
+ "e-m:e-p:32:32:32-a:0-n16:32-i64:64:64-i32:32",
+ ":32-i16:16:16-i1:8:8-f32:32:32-f64:64:64-v32",
+ ":32:32-v64:64:64-v512:512:512-v1024:1024:1024-v2048",
+ ":2048:2048"
+ )
+ .into(),
+ arch: "hexagon".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i386_apple_ios.rs b/compiler/rustc_target/src/spec/i386_apple_ios.rs
new file mode 100644
index 000000000..8b6266c58
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i386_apple_ios.rs
@@ -0,0 +1,22 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("ios", Arch::I386);
+ let llvm_target = super::apple_base::ios_sim_llvm_target("i386");
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 32,
+ data_layout: "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:128-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(64),
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ stack_probes: StackProbeType::Call,
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i386_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i386_unknown_linux_gnu.rs
new file mode 100644
index 000000000..801a88933
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i386_unknown_linux_gnu.rs
@@ -0,0 +1,8 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::i686_unknown_linux_gnu::target();
+ base.cpu = "i386".into();
+ base.llvm_target = "i386-unknown-linux-gnu".into();
+ base
+}
diff --git a/compiler/rustc_target/src/spec/i486_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i486_unknown_linux_gnu.rs
new file mode 100644
index 000000000..a11fbecc3
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i486_unknown_linux_gnu.rs
@@ -0,0 +1,8 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::i686_unknown_linux_gnu::target();
+ base.cpu = "i486".into();
+ base.llvm_target = "i486-unknown-linux-gnu".into();
+ base
+}
diff --git a/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs
new file mode 100644
index 000000000..befb0f89f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i586_pc_windows_msvc.rs
@@ -0,0 +1,8 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::i686_pc_windows_msvc::target();
+ base.cpu = "pentium".into();
+ base.llvm_target = "i586-pc-windows-msvc".into();
+ base
+}
diff --git a/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs
new file mode 100644
index 000000000..b699a7af1
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i586_unknown_linux_gnu.rs
@@ -0,0 +1,8 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::i686_unknown_linux_gnu::target();
+ base.cpu = "pentium".into();
+ base.llvm_target = "i586-unknown-linux-gnu".into();
+ base
+}
diff --git a/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs
new file mode 100644
index 000000000..55a26eb00
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i586_unknown_linux_musl.rs
@@ -0,0 +1,8 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::i686_unknown_linux_musl::target();
+ base.cpu = "pentium".into();
+ base.llvm_target = "i586-unknown-linux-musl".into();
+ base
+}
diff --git a/compiler/rustc_target/src/spec/i686_apple_darwin.rs b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
new file mode 100644
index 000000000..1718bd77b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_apple_darwin.rs
@@ -0,0 +1,28 @@
+use crate::spec::{FramePointer, LinkerFlavor, StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::apple_base::opts("macos");
+ base.cpu = "yonah".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.frame_pointer = FramePointer::Always;
+
+ // Clang automatically chooses a more specific target based on
+ // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
+ // correctly, we do too.
+ let arch = "i686";
+ let llvm_target = super::apple_base::macos_llvm_target(&arch);
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 32,
+ data_layout: "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:128-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: TargetOptions { mcount: "\u{1}mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_linux_android.rs b/compiler/rustc_target/src/spec/i686_linux_android.rs
new file mode 100644
index 000000000..bdaf5c990
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_linux_android.rs
@@ -0,0 +1,26 @@
+use crate::spec::{SanitizerSet, StackProbeType, Target, TargetOptions};
+
+// See https://developer.android.com/ndk/guides/abis.html#x86
+// for target ABI requirements.
+
+pub fn target() -> Target {
+ let mut base = super::android_base::opts();
+
+ base.max_atomic_width = Some(64);
+
+ // https://developer.android.com/ndk/guides/abis.html#x86
+ base.cpu = "pentiumpro".into();
+ base.features = "+mmx,+sse,+sse2,+sse3,+ssse3".into();
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "i686-linux-android".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: TargetOptions { supported_sanitizers: SanitizerSet::ADDRESS, ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs b/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs
new file mode 100644
index 000000000..631865439
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_pc_windows_gnu.rs
@@ -0,0 +1,24 @@
+use crate::spec::{FramePointer, LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::windows_gnu_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.frame_pointer = FramePointer::Always; // Required for backtraces
+ base.linker = Some("i686-w64-mingw32-gcc".into());
+
+ // Mark all dynamic libraries and executables as compatible with the larger 4GiB address
+ // space available to x86 Windows binaries on x86_64.
+ base.add_pre_link_args(LinkerFlavor::Ld, &["-m", "i386pe", "--large-address-aware"]);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-Wl,--large-address-aware"]);
+
+ Target {
+ llvm_target: "i686-pc-windows-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ i64:64-f80:32-n8:16:32-a:0:32-S32"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs
new file mode 100644
index 000000000..f4ceaa1ca
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_pc_windows_msvc.rs
@@ -0,0 +1,32 @@
+use crate::spec::{LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::windows_msvc_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+
+ base.add_pre_link_args(
+ LinkerFlavor::Msvc,
+ &[
+ // Mark all dynamic libraries and executables as compatible with the larger 4GiB address
+ // space available to x86 Windows binaries on x86_64.
+ "/LARGEADDRESSAWARE",
+ // Ensure the linker will only produce an image if it can also produce a table of
+ // the image's safe exception handlers.
+ // https://docs.microsoft.com/en-us/cpp/build/reference/safeseh-image-has-safe-exception-handlers
+ "/SAFESEH",
+ ],
+ );
+ // Workaround for #95429
+ base.has_thread_local = false;
+
+ Target {
+ llvm_target: "i686-pc-windows-msvc".into(),
+ pointer_width: 32,
+ data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ i64:64-f80:128-n8:16:32-a:0:32-S32"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs b/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
new file mode 100644
index 000000000..aff284bf2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_unknown_freebsd.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::freebsd_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "-Wl,-znotext"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "i686-unknown-freebsd".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_unknown_haiku.rs b/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
new file mode 100644
index 000000000..87aa74e40
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_unknown_haiku.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::haiku_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "i686-unknown-haiku".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
new file mode 100644
index 000000000..765803d16
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_unknown_linux_gnu.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "i686-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
new file mode 100644
index 000000000..d94928043
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_unknown_linux_musl.rs
@@ -0,0 +1,34 @@
+use crate::spec::{FramePointer, LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "-Wl,-melf_i386"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ // The unwinder used by i686-unknown-linux-musl, the LLVM libunwind
+ // implementation, apparently relies on frame pointers existing... somehow.
+ // It's not clear to me why nor where this dependency is introduced, but the
+ // test suite does not pass with frame pointers eliminated and it passes
+ // with frame pointers present.
+ //
+ // If you think that this is no longer necessary, then please feel free to
+ // ignore! If it still passes the test suite and the bots then sounds good
+ // to me.
+ //
+ // This may or may not be related to this bug:
+ // https://llvm.org/bugs/show_bug.cgi?id=30879
+ base.frame_pointer = FramePointer::Always;
+
+ Target {
+ llvm_target: "i686-unknown-linux-musl".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
new file mode 100644
index 000000000..8de698b51
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_unknown_netbsd.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::netbsd_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "i686-unknown-netbsdelf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: TargetOptions { mcount: "__mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs b/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
new file mode 100644
index 000000000..7f25a1a16
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_unknown_openbsd.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::openbsd_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "-fuse-ld=lld"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "i686-unknown-openbsd".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_unknown_uefi.rs b/compiler/rustc_target/src/spec/i686_unknown_uefi.rs
new file mode 100644
index 000000000..a2e42c5e6
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_unknown_uefi.rs
@@ -0,0 +1,89 @@
+// This defines the ia32 target for UEFI systems as described in the UEFI specification. See the
+// uefi-base module for generic UEFI options. On ia32 systems
+// UEFI systems always run in protected-mode, have the interrupt-controller pre-configured and
+// force a single-CPU execution.
+// The cdecl ABI is used. It differs from the stdcall or fastcall ABI.
+// "i686-unknown-windows" is used to get the minimal subset of windows-specific features.
+
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::uefi_msvc_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+
+ // We disable MMX and SSE for now, even though UEFI allows using them. Problem is, you have to
+ // enable these CPU features explicitly before their first use, otherwise their instructions
+ // will trigger an exception. Rust does not inject any code that enables AVX/MMX/SSE
+ // instruction sets, so this must be done by the firmware. However, existing firmware is known
+ // to leave these uninitialized, thus triggering exceptions if we make use of them. Which is
+ // why we avoid them and instead use soft-floats. This is also what GRUB and friends did so
+ // far.
+ // If you initialize FP units yourself, you can override these flags with custom linker
+ // arguments, thus giving you access to full MMX/SSE acceleration.
+ base.features = "-mmx,-sse,+soft-float".into();
+
+ // Use -GNU here, because of the reason below:
+ // Background and Problem:
+ // If we use i686-unknown-windows, the LLVM IA32 MSVC generates compiler intrinsic
+ // _alldiv, _aulldiv, _allrem, _aullrem, _allmul, which will cause undefined symbol.
+ // A real issue is __aulldiv() is referred by __udivdi3() - udivmod_inner!(), from
+ // https://github.com/rust-lang-nursery/compiler-builtins.
+ // As result, rust-lld generates link error finally.
+ // Root-cause:
+ // In rust\src\llvm-project\llvm\lib\Target\X86\X86ISelLowering.cpp,
+ // we have below code to use MSVC intrinsics. It assumes MSVC target
+ // will link MSVC library. But that is NOT true in UEFI environment.
+ // UEFI does not link any MSVC or GCC standard library.
+ // if (Subtarget.isTargetKnownWindowsMSVC() ||
+ // Subtarget.isTargetWindowsItanium()) {
+ // // Setup Windows compiler runtime calls.
+ // setLibcallName(RTLIB::SDIV_I64, "_alldiv");
+ // setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
+ // setLibcallName(RTLIB::SREM_I64, "_allrem");
+ // setLibcallName(RTLIB::UREM_I64, "_aullrem");
+ // setLibcallName(RTLIB::MUL_I64, "_allmul");
+ // setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
+ // setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
+ // setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
+ // setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
+ // setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
+ // }
+ // The compiler intrinsics should be implemented by compiler-builtins.
+ // Unfortunately, compiler-builtins has not provided those intrinsics yet. Such as:
+ // i386/divdi3.S
+ // i386/lshrdi3.S
+ // i386/moddi3.S
+ // i386/muldi3.S
+ // i386/udivdi3.S
+ // i386/umoddi3.S
+ // Possible solution:
+ // 1. Eliminate Intrinsics generation.
+ // 1.1 Choose different target to bypass isTargetKnownWindowsMSVC().
+ // 1.2 Remove the "Setup Windows compiler runtime calls" in LLVM
+ // 2. Implement Intrinsics.
+ // We evaluated all options.
+ // #2 is hard because we need implement the intrinsics (_aulldiv) generated
+ // from the other intrinsics (__udivdi3) implementation with the same
+ // functionality (udivmod_inner). If we let _aulldiv() call udivmod_inner!(),
+ // then we are in loop. We may have to find another way to implement udivmod_inner!().
+ // #1.2 may break the existing usage.
+ // #1.1 seems the simplest solution today.
+ // The IA32 -gnu calling convention is same as the one defined in UEFI specification.
+ // It uses cdecl, EAX/ECX/EDX as volatile register, and EAX/EDX as return value.
+ // We also checked the LLVM X86TargetLowering, the differences between -gnu and -msvc
+ // is fmodf(f32), longjmp() and TLS. None of them impacts the UEFI code.
+ // As a result, we choose -gnu for i686 version before those intrinsics are implemented in
+ // compiler-builtins. After compiler-builtins implements all required intrinsics, we may
+ // remove -gnu and use the default one.
+ Target {
+ llvm_target: "i686-unknown-windows-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ i64:64-f80:32-n8:16:32-a:0:32-S32"
+ .into(),
+ arch: "x86".into(),
+
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs b/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs
new file mode 100644
index 000000000..d52810d2f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_uwp_windows_gnu.rs
@@ -0,0 +1,23 @@
+use crate::spec::{FramePointer, LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::windows_uwp_gnu_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.frame_pointer = FramePointer::Always; // Required for backtraces
+
+ // Mark all dynamic libraries and executables as compatible with the larger 4GiB address
+ // space available to x86 Windows binaries on x86_64.
+ base.add_pre_link_args(LinkerFlavor::Ld, &["-m", "i386pe", "--large-address-aware"]);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-Wl,--large-address-aware"]);
+
+ Target {
+ llvm_target: "i686-pc-windows-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ i64:64-f80:32-n8:16:32-a:0:32-S32"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/i686_uwp_windows_msvc.rs
new file mode 100644
index 000000000..4c657fe90
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_uwp_windows_msvc.rs
@@ -0,0 +1,17 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::windows_uwp_msvc_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "i686-pc-windows-msvc".into(),
+ pointer_width: 32,
+ data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ i64:64-f80:128-n8:16:32-a:0:32-S32"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs b/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
new file mode 100644
index 000000000..f62404e82
--- /dev/null
+++ b/compiler/rustc_target/src/spec/i686_wrs_vxworks.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::vxworks_base::opts();
+ base.cpu = "pentium4".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "i686-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ f64:32:64-f80:32-n8:16:32-S128"
+ .into(),
+ arch: "x86".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/illumos_base.rs b/compiler/rustc_target/src/spec/illumos_base.rs
new file mode 100644
index 000000000..77e000474
--- /dev/null
+++ b/compiler/rustc_target/src/spec/illumos_base.rs
@@ -0,0 +1,59 @@
+use crate::spec::{cvs, FramePointer, LinkerFlavor, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ let late_link_args = TargetOptions::link_args(
+ LinkerFlavor::Gcc,
+ &[
+ // The illumos libc contains a stack unwinding implementation, as
+ // does libgcc_s. The latter implementation includes several
+ // additional symbols that are not always in base libc. To force
+ // the consistent use of just one unwinder, we ensure libc appears
+ // after libgcc_s in the NEEDED list for the resultant binary by
+ // ignoring any attempts to add it as a dynamic dependency until the
+ // very end.
+ // FIXME: This should be replaced by a more complete and generic
+ // mechanism for controlling the order of library arguments passed
+ // to the linker.
+ "-lc",
+ // LLVM will insert calls to the stack protector functions
+ // "__stack_chk_fail" and "__stack_chk_guard" into code in native
+ // object files. Some platforms include these symbols directly in
+ // libc, but at least historically these have been provided in
+ // libssp.so on illumos and Solaris systems.
+ "-lssp",
+ ],
+ );
+
+ TargetOptions {
+ os: "illumos".into(),
+ dynamic_linking: true,
+ has_rpath: true,
+ families: cvs!["unix"],
+ is_like_solaris: true,
+ linker_is_gnu: false,
+ limit_rdylib_exports: false, // Linker doesn't support this
+ frame_pointer: FramePointer::Always,
+ eh_frame_header: false,
+ late_link_args,
+
+ // While we support ELF TLS, rust requires a way to register
+ // cleanup handlers (in C, this would be something along the lines of:
+ // void register_callback(void (*fn)(void *), void *arg);
+ // (see src/libstd/sys/unix/fast_thread_local.rs) that is currently
+ // missing in illumos. For now at least, we must fallback to using
+ // pthread_{get,set}specific.
+ //has_thread_local: true,
+
+ // FIXME: Currently, rust is invoking cc to link, which ends up
+ // causing these to get included twice. We should eventually transition
+ // to having rustc invoke ld directly, in which case these will need to
+ // be uncommented.
+ //
+ // We want XPG6 behavior from libc and libm. See standards(5)
+ //pre_link_objects_exe: vec![
+ // "/usr/lib/amd64/values-Xc.o".into(),
+ // "/usr/lib/amd64/values-xpg6.o".into(),
+ //],
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/l4re_base.rs b/compiler/rustc_target/src/spec/l4re_base.rs
new file mode 100644
index 000000000..a08756861
--- /dev/null
+++ b/compiler/rustc_target/src/spec/l4re_base.rs
@@ -0,0 +1,14 @@
+use crate::spec::{cvs, LinkerFlavor, PanicStrategy, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "l4re".into(),
+ env: "uclibc".into(),
+ linker_flavor: LinkerFlavor::L4Bender,
+ panic_strategy: PanicStrategy::Abort,
+ linker: Some("l4-bender".into()),
+ linker_is_gnu: false,
+ families: cvs!["unix"],
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/linux_base.rs b/compiler/rustc_target/src/spec/linux_base.rs
new file mode 100644
index 000000000..f4fce3b40
--- /dev/null
+++ b/compiler/rustc_target/src/spec/linux_base.rs
@@ -0,0 +1,15 @@
+use crate::spec::{cvs, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "linux".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ has_rpath: true,
+ position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ has_thread_local: true,
+ crt_static_respected: true,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/linux_gnu_base.rs b/compiler/rustc_target/src/spec/linux_gnu_base.rs
new file mode 100644
index 000000000..8d6b3f185
--- /dev/null
+++ b/compiler/rustc_target/src/spec/linux_gnu_base.rs
@@ -0,0 +1,5 @@
+use crate::spec::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+ TargetOptions { env: "gnu".into(), ..super::linux_base::opts() }
+}
diff --git a/compiler/rustc_target/src/spec/linux_kernel_base.rs b/compiler/rustc_target/src/spec/linux_kernel_base.rs
new file mode 100644
index 000000000..0f5d85205
--- /dev/null
+++ b/compiler/rustc_target/src/spec/linux_kernel_base.rs
@@ -0,0 +1,19 @@
+use crate::spec::TargetOptions;
+use crate::spec::{FramePointer, PanicStrategy, RelocModel, RelroLevel, StackProbeType};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ env: "gnu".into(),
+ disable_redzone: true,
+ panic_strategy: PanicStrategy::Abort,
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ stack_probes: StackProbeType::Call,
+ frame_pointer: FramePointer::Always,
+ position_independent_executables: true,
+ needs_plt: true,
+ relro_level: RelroLevel::Full,
+ relocation_model: RelocModel::Static,
+
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/linux_musl_base.rs b/compiler/rustc_target/src/spec/linux_musl_base.rs
new file mode 100644
index 000000000..207a87ab0
--- /dev/null
+++ b/compiler/rustc_target/src/spec/linux_musl_base.rs
@@ -0,0 +1,16 @@
+use crate::spec::crt_objects::{self, CrtObjectsFallback};
+use crate::spec::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+ let mut base = super::linux_base::opts();
+
+ base.env = "musl".into();
+ base.pre_link_objects_fallback = crt_objects::pre_musl_fallback();
+ base.post_link_objects_fallback = crt_objects::post_musl_fallback();
+ base.crt_objects_fallback = Some(CrtObjectsFallback::Musl);
+
+ // These targets statically link libc by default
+ base.crt_static_default = true;
+
+ base
+}
diff --git a/compiler/rustc_target/src/spec/linux_uclibc_base.rs b/compiler/rustc_target/src/spec/linux_uclibc_base.rs
new file mode 100644
index 000000000..4ba480ffe
--- /dev/null
+++ b/compiler/rustc_target/src/spec/linux_uclibc_base.rs
@@ -0,0 +1,5 @@
+use crate::spec::TargetOptions;
+
+pub fn opts() -> TargetOptions {
+ TargetOptions { env: "uclibc".into(), ..super::linux_base::opts() }
+}
diff --git a/compiler/rustc_target/src/spec/m68k_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/m68k_unknown_linux_gnu.rs
new file mode 100644
index 000000000..ebd74012d
--- /dev/null
+++ b/compiler/rustc_target/src/spec/m68k_unknown_linux_gnu.rs
@@ -0,0 +1,15 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "m68k-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:16:32-i8:8:8-i16:16:16-i32:16:32-n8:16:32-a:0:16-S16".into(),
+ arch: "m68k".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mips64_openwrt_linux_musl.rs b/compiler/rustc_target/src/spec/mips64_openwrt_linux_musl.rs
new file mode 100644
index 000000000..3c6ef52c6
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mips64_openwrt_linux_musl.rs
@@ -0,0 +1,26 @@
+/// A target tuple for OpenWrt MIPS64 targets
+///
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "mips64r2".into();
+ base.features = "+mips64r2,+soft-float".into();
+ base.max_atomic_width = Some(64);
+ base.crt_static_default = false;
+
+ Target {
+ // LLVM doesn't recognize "muslabi64" yet.
+ llvm_target: "mips64-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
+ arch: "mips64".into(),
+ options: TargetOptions {
+ abi: "abi64".into(),
+ endian: Endian::Big,
+ mcount: "_mcount".into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs
new file mode 100644
index 000000000..fc5dbd114
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mips64_unknown_linux_gnuabi64.rs
@@ -0,0 +1,22 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mips64-unknown-linux-gnuabi64".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
+ arch: "mips64".into(),
+ options: TargetOptions {
+ abi: "abi64".into(),
+ endian: Endian::Big,
+ // NOTE(mips64r2) matches C toolchain
+ cpu: "mips64r2".into(),
+ features: "+mips64r2".into(),
+ max_atomic_width: Some(64),
+ mcount: "_mcount".into(),
+
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs b/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs
new file mode 100644
index 000000000..465e97a02
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs
@@ -0,0 +1,22 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "mips64r2".into();
+ base.features = "+mips64r2".into();
+ base.max_atomic_width = Some(64);
+ Target {
+ // LLVM doesn't recognize "muslabi64" yet.
+ llvm_target: "mips64-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
+ arch: "mips64".into(),
+ options: TargetOptions {
+ abi: "abi64".into(),
+ endian: Endian::Big,
+ mcount: "_mcount".into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs
new file mode 100644
index 000000000..e0d5f6f57
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mips64el_unknown_linux_gnuabi64.rs
@@ -0,0 +1,20 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mips64el-unknown-linux-gnuabi64".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
+ arch: "mips64".into(),
+ options: TargetOptions {
+ abi: "abi64".into(),
+ // NOTE(mips64r2) matches C toolchain
+ cpu: "mips64r2".into(),
+ features: "+mips64r2".into(),
+ max_atomic_width: Some(64),
+ mcount: "_mcount".into(),
+
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs b/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs
new file mode 100644
index 000000000..75575eb7e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs
@@ -0,0 +1,16 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "mips64r2".into();
+ base.features = "+mips64r2".into();
+ base.max_atomic_width = Some(64);
+ Target {
+ // LLVM doesn't recognize "muslabi64" yet.
+ llvm_target: "mips64el-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
+ arch: "mips64".into(),
+ options: TargetOptions { abi: "abi64".into(), mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs
new file mode 100644
index 000000000..8df8b0b4c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mips_unknown_linux_gnu.rs
@@ -0,0 +1,20 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mips-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+ options: TargetOptions {
+ endian: Endian::Big,
+ cpu: "mips32r2".into(),
+ features: "+mips32r2,+fpxx,+nooddspreg".into(),
+ max_atomic_width: Some(32),
+ mcount: "_mcount".into(),
+
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs
new file mode 100644
index 000000000..c2846313a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs
@@ -0,0 +1,17 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "mips32r2".into();
+ base.features = "+mips32r2,+soft-float".into();
+ base.max_atomic_width = Some(32);
+ base.crt_static_default = false;
+ Target {
+ llvm_target: "mips-unknown-linux-musl".into(),
+ pointer_width: 32,
+ data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs b/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs
new file mode 100644
index 000000000..c59bb5fdd
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mips_unknown_linux_uclibc.rs
@@ -0,0 +1,20 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mips-unknown-linux-uclibc".into(),
+ pointer_width: 32,
+ data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+ options: TargetOptions {
+ endian: Endian::Big,
+ cpu: "mips32r2".into(),
+ features: "+mips32r2,+soft-float".into(),
+ max_atomic_width: Some(32),
+ mcount: "_mcount".into(),
+
+ ..super::linux_uclibc_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsel_sony_psp.rs b/compiler/rustc_target/src/spec/mipsel_sony_psp.rs
new file mode 100644
index 000000000..cfc8ec21c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsel_sony_psp.rs
@@ -0,0 +1,34 @@
+use crate::spec::{cvs, Target, TargetOptions};
+use crate::spec::{LinkerFlavor, LldFlavor, RelocModel};
+
+// The PSP has custom linker requirements.
+const LINKER_SCRIPT: &str = include_str!("./mipsel_sony_psp_linker_script.ld");
+
+pub fn target() -> Target {
+ let pre_link_args = TargetOptions::link_args(LinkerFlavor::Ld, &["--emit-relocs", "--nmagic"]);
+
+ Target {
+ llvm_target: "mipsel-sony-psp".into(),
+ pointer_width: 32,
+ data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+
+ options: TargetOptions {
+ os: "psp".into(),
+ vendor: "sony".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ cpu: "mips2".into(),
+ linker: Some("rust-lld".into()),
+ relocation_model: RelocModel::Static,
+
+ // PSP FPU only supports single precision floats.
+ features: "+single-float".into(),
+
+ // PSP does not support trap-on-condition instructions.
+ llvm_args: cvs!["-mno-check-zero-division"],
+ pre_link_args,
+ link_script: Some(LINKER_SCRIPT.into()),
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsel_sony_psp_linker_script.ld b/compiler/rustc_target/src/spec/mipsel_sony_psp_linker_script.ld
new file mode 100644
index 000000000..9eb35ad9f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsel_sony_psp_linker_script.ld
@@ -0,0 +1,45 @@
+ENTRY(module_start)
+SECTIONS
+{
+ /* PRX format requires text to begin at 0 */
+ .text 0 : { *(.text .text.*) }
+
+ /* Sort stubs for convenient ordering */
+ .sceStub.text : { *(.sceStub.text) *(SORT(.sceStub.text.*)) }
+
+ /* PSP import library stub sections. Bundles together `.lib.stub.entry.*`
+ * sections for better `--gc-sections` support. */
+ .lib.stub.top : { *(.lib.stub.top) }
+ .lib.stub : { *(.lib.stub) *(.lib.stub.entry.*) }
+ .lib.stub.btm : { *(.lib.stub.btm) }
+
+ /* Keep these sections around, even though they may appear unused to the linker */
+ .lib.ent.top : { KEEP(*(.lib.ent.top)) }
+ .lib.ent : { KEEP(*(.lib.ent)) }
+ .lib.ent.btm : { KEEP(*(.lib.ent.btm)) }
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) }
+
+ /* Add symbols for LLVM's libunwind */
+ __eh_frame_hdr_start = SIZEOF(.eh_frame_hdr) > 0 ? ADDR(.eh_frame_hdr) : 0;
+ __eh_frame_hdr_end = SIZEOF(.eh_frame_hdr) > 0 ? . : 0;
+ .eh_frame :
+ {
+ __eh_frame_start = .;
+ KEEP(*(.eh_frame))
+ __eh_frame_end = .;
+ }
+
+ /* These are explicitly listed to avoid being merged into .rodata */
+ .rodata.sceResident : { *(.rodata.sceResident) *(.rodata.sceResident.*) }
+ .rodata.sceModuleInfo : { *(.rodata.sceModuleInfo) }
+ /* Sort NIDs for convenient ordering */
+ .rodata.sceNid : { *(.rodata.sceNid) *(SORT(.rodata.sceNid.*)) }
+
+ .rodata : { *(.rodata .rodata.*) }
+ .data : { *(.data .data.*) }
+ .gcc_except_table : { *(.gcc_except_table .gcc_except_table.*) }
+ .bss : { *(.bss .bss.*) }
+
+ /DISCARD/ : { *(.rel.sceStub.text .MIPS.abiflags .reginfo) }
+}
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs
new file mode 100644
index 000000000..01346e71a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_gnu.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mipsel-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+
+ options: TargetOptions {
+ cpu: "mips32r2".into(),
+ features: "+mips32r2,+fpxx,+nooddspreg".into(),
+ max_atomic_width: Some(32),
+ mcount: "_mcount".into(),
+
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs
new file mode 100644
index 000000000..0e8f1a2c8
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs
@@ -0,0 +1,16 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "mips32r2".into();
+ base.features = "+mips32r2,+soft-float".into();
+ base.max_atomic_width = Some(32);
+ base.crt_static_default = false;
+ Target {
+ llvm_target: "mipsel-unknown-linux-musl".into(),
+ pointer_width: 32,
+ data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+ options: TargetOptions { mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs b/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs
new file mode 100644
index 000000000..834207458
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_linux_uclibc.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mipsel-unknown-linux-uclibc".into(),
+ pointer_width: 32,
+ data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+
+ options: TargetOptions {
+ cpu: "mips32r2".into(),
+ features: "+mips32r2,+soft-float".into(),
+ max_atomic_width: Some(32),
+ mcount: "_mcount".into(),
+
+ ..super::linux_uclibc_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsel_unknown_none.rs b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
new file mode 100644
index 000000000..fe2aa2de8
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsel_unknown_none.rs
@@ -0,0 +1,27 @@
+//! Bare MIPS32r2, little endian, softfloat, O32 calling convention
+//!
+//! Can be used for MIPS M4K core (e.g. on PIC32MX devices)
+
+use crate::spec::{LinkerFlavor, LldFlavor, RelocModel};
+use crate::spec::{PanicStrategy, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mipsel-unknown-none".into(),
+ pointer_width: 32,
+ data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ cpu: "mips32r2".into(),
+ features: "+mips32r2,+soft-float,+noabicalls".into(),
+ max_atomic_width: Some(32),
+ linker: Some("rust-lld".into()),
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ emit_debug_gdb_scripts: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs
new file mode 100644
index 000000000..1e066b271
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsisa32r6_unknown_linux_gnu.rs
@@ -0,0 +1,20 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mipsisa32r6-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "E-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+ options: TargetOptions {
+ endian: Endian::Big,
+ cpu: "mips32r6".into(),
+ features: "+mips32r6".into(),
+ max_atomic_width: Some(32),
+ mcount: "_mcount".into(),
+
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs
new file mode 100644
index 000000000..4785929c1
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsisa32r6el_unknown_linux_gnu.rs
@@ -0,0 +1,19 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mipsisa32r6el-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:m-p:32:32-i8:8:32-i16:16:32-i64:64-n32-S64".into(),
+ arch: "mips".into(),
+
+ options: TargetOptions {
+ cpu: "mips32r6".into(),
+ features: "+mips32r6".into(),
+ max_atomic_width: Some(32),
+ mcount: "_mcount".into(),
+
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs
new file mode 100644
index 000000000..766ac7680
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsisa64r6_unknown_linux_gnuabi64.rs
@@ -0,0 +1,22 @@
+use crate::abi::Endian;
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mipsisa64r6-unknown-linux-gnuabi64".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
+ arch: "mips64".into(),
+ options: TargetOptions {
+ abi: "abi64".into(),
+ endian: Endian::Big,
+ // NOTE(mips64r6) matches C toolchain
+ cpu: "mips64r6".into(),
+ features: "+mips64r6".into(),
+ max_atomic_width: Some(64),
+ mcount: "_mcount".into(),
+
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs b/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs
new file mode 100644
index 000000000..d2b07c654
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mipsisa64r6el_unknown_linux_gnuabi64.rs
@@ -0,0 +1,20 @@
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "mipsisa64r6el-unknown-linux-gnuabi64".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i8:8:32-i16:16:32-i64:64-n32:64-S128".into(),
+ arch: "mips64".into(),
+ options: TargetOptions {
+ abi: "abi64".into(),
+ // NOTE(mips64r6) matches C toolchain
+ cpu: "mips64r6".into(),
+ features: "+mips64r6".into(),
+ max_atomic_width: Some(64),
+ mcount: "_mcount".into(),
+
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs
new file mode 100644
index 000000000..f7abeafd3
--- /dev/null
+++ b/compiler/rustc_target/src/spec/mod.rs
@@ -0,0 +1,2585 @@
+//! [Flexible target specification.](https://github.com/rust-lang/rfcs/pull/131)
+//!
+//! Rust targets a wide variety of usecases, and in the interest of flexibility,
+//! allows new target triples to be defined in configuration files. Most users
+//! will not need to care about these, but this is invaluable when porting Rust
+//! to a new platform, and allows for an unprecedented level of control over how
+//! the compiler works.
+//!
+//! # Using custom targets
+//!
+//! A target triple, as passed via `rustc --target=TRIPLE`, will first be
+//! compared against the list of built-in targets. This is to ease distributing
+//! rustc (no need for configuration files) and also to hold these built-in
+//! targets as immutable and sacred. If `TRIPLE` is not one of the built-in
+//! targets, rustc will check if a file named `TRIPLE` exists. If it does, it
+//! will be loaded as the target configuration. If the file does not exist,
+//! rustc will search each directory in the environment variable
+//! `RUST_TARGET_PATH` for a file named `TRIPLE.json`. The first one found will
+//! be loaded. If no file is found in any of those directories, a fatal error
+//! will be given.
+//!
+//! Projects defining their own targets should use
+//! `--target=path/to/my-awesome-platform.json` instead of adding to
+//! `RUST_TARGET_PATH`.
+//!
+//! # Defining a new target
+//!
+//! Targets are defined using [JSON](https://json.org/). The `Target` struct in
+//! this module defines the format the JSON file should take, though each
+//! underscore in the field names should be replaced with a hyphen (`-`) in the
+//! JSON file. Some fields are required in every target specification, such as
+//! `llvm-target`, `target-endian`, `target-pointer-width`, `data-layout`,
+//! `arch`, and `os`. In general, options passed to rustc with `-C` override
+//! the target's settings, though `target-feature` and `link-args` will *add*
+//! to the list specified by the target, rather than replace.
+
+use crate::abi::Endian;
+use crate::json::{Json, ToJson};
+use crate::spec::abi::{lookup as lookup_abi, Abi};
+use crate::spec::crt_objects::{CrtObjects, CrtObjectsFallback};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_span::symbol::{sym, Symbol};
+use serde_json::Value;
+use std::borrow::Cow;
+use std::collections::BTreeMap;
+use std::convert::TryFrom;
+use std::hash::{Hash, Hasher};
+use std::iter::FromIterator;
+use std::ops::{Deref, DerefMut};
+use std::path::{Path, PathBuf};
+use std::str::FromStr;
+use std::{fmt, io};
+
+use rustc_macros::HashStable_Generic;
+
+pub mod abi;
+pub mod crt_objects;
+
+mod android_base;
+mod apple_base;
+mod apple_sdk_base;
+mod avr_gnu_base;
+mod bpf_base;
+mod dragonfly_base;
+mod freebsd_base;
+mod fuchsia_base;
+mod haiku_base;
+mod hermit_base;
+mod illumos_base;
+mod l4re_base;
+mod linux_base;
+mod linux_gnu_base;
+mod linux_kernel_base;
+mod linux_musl_base;
+mod linux_uclibc_base;
+mod msvc_base;
+mod netbsd_base;
+mod openbsd_base;
+mod redox_base;
+mod solaris_base;
+mod solid_base;
+mod thumb_base;
+mod uefi_msvc_base;
+mod vxworks_base;
+mod wasm_base;
+mod windows_gnu_base;
+mod windows_gnullvm_base;
+mod windows_msvc_base;
+mod windows_uwp_gnu_base;
+mod windows_uwp_msvc_base;
+
+#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum LinkerFlavor {
+ Em,
+ Gcc,
+ L4Bender,
+ Ld,
+ Msvc,
+ Lld(LldFlavor),
+ PtxLinker,
+ BpfLinker,
+}
+
+#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum LldFlavor {
+ Wasm,
+ Ld64,
+ Ld,
+ Link,
+}
+
+impl LldFlavor {
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ LldFlavor::Wasm => "wasm",
+ LldFlavor::Ld64 => "darwin",
+ LldFlavor::Ld => "gnu",
+ LldFlavor::Link => "link",
+ }
+ }
+
+ fn from_str(s: &str) -> Option<Self> {
+ Some(match s {
+ "darwin" => LldFlavor::Ld64,
+ "gnu" => LldFlavor::Ld,
+ "link" => LldFlavor::Link,
+ "wasm" => LldFlavor::Wasm,
+ _ => return None,
+ })
+ }
+}
+
+impl ToJson for LldFlavor {
+ fn to_json(&self) -> Json {
+ self.as_str().to_json()
+ }
+}
+
+impl ToJson for LinkerFlavor {
+ fn to_json(&self) -> Json {
+ self.desc().to_json()
+ }
+}
+macro_rules! flavor_mappings {
+ ($((($($flavor:tt)*), $string:expr),)*) => (
+ impl LinkerFlavor {
+ pub const fn one_of() -> &'static str {
+ concat!("one of: ", $($string, " ",)*)
+ }
+
+ pub fn from_str(s: &str) -> Option<Self> {
+ Some(match s {
+ $($string => $($flavor)*,)*
+ _ => return None,
+ })
+ }
+
+ pub fn desc(&self) -> &str {
+ match *self {
+ $($($flavor)* => $string,)*
+ }
+ }
+ }
+ )
+}
+
+flavor_mappings! {
+ ((LinkerFlavor::Em), "em"),
+ ((LinkerFlavor::Gcc), "gcc"),
+ ((LinkerFlavor::L4Bender), "l4-bender"),
+ ((LinkerFlavor::Ld), "ld"),
+ ((LinkerFlavor::Msvc), "msvc"),
+ ((LinkerFlavor::PtxLinker), "ptx-linker"),
+ ((LinkerFlavor::BpfLinker), "bpf-linker"),
+ ((LinkerFlavor::Lld(LldFlavor::Wasm)), "wasm-ld"),
+ ((LinkerFlavor::Lld(LldFlavor::Ld64)), "ld64.lld"),
+ ((LinkerFlavor::Lld(LldFlavor::Ld)), "ld.lld"),
+ ((LinkerFlavor::Lld(LldFlavor::Link)), "lld-link"),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable, HashStable_Generic)]
+pub enum PanicStrategy {
+ Unwind,
+ Abort,
+}
+
+impl PanicStrategy {
+ pub fn desc(&self) -> &str {
+ match *self {
+ PanicStrategy::Unwind => "unwind",
+ PanicStrategy::Abort => "abort",
+ }
+ }
+
+ pub const fn desc_symbol(&self) -> Symbol {
+ match *self {
+ PanicStrategy::Unwind => sym::unwind,
+ PanicStrategy::Abort => sym::abort,
+ }
+ }
+
+ pub const fn all() -> [Symbol; 2] {
+ [Self::Abort.desc_symbol(), Self::Unwind.desc_symbol()]
+ }
+}
+
+impl ToJson for PanicStrategy {
+ fn to_json(&self) -> Json {
+ match *self {
+ PanicStrategy::Abort => "abort".to_json(),
+ PanicStrategy::Unwind => "unwind".to_json(),
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Hash)]
+pub enum RelroLevel {
+ Full,
+ Partial,
+ Off,
+ None,
+}
+
+impl RelroLevel {
+ pub fn desc(&self) -> &str {
+ match *self {
+ RelroLevel::Full => "full",
+ RelroLevel::Partial => "partial",
+ RelroLevel::Off => "off",
+ RelroLevel::None => "none",
+ }
+ }
+}
+
+impl FromStr for RelroLevel {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<RelroLevel, ()> {
+ match s {
+ "full" => Ok(RelroLevel::Full),
+ "partial" => Ok(RelroLevel::Partial),
+ "off" => Ok(RelroLevel::Off),
+ "none" => Ok(RelroLevel::None),
+ _ => Err(()),
+ }
+ }
+}
+
+impl ToJson for RelroLevel {
+ fn to_json(&self) -> Json {
+ match *self {
+ RelroLevel::Full => "full".to_json(),
+ RelroLevel::Partial => "partial".to_json(),
+ RelroLevel::Off => "off".to_json(),
+ RelroLevel::None => "None".to_json(),
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Hash)]
+pub enum MergeFunctions {
+ Disabled,
+ Trampolines,
+ Aliases,
+}
+
+impl MergeFunctions {
+ pub fn desc(&self) -> &str {
+ match *self {
+ MergeFunctions::Disabled => "disabled",
+ MergeFunctions::Trampolines => "trampolines",
+ MergeFunctions::Aliases => "aliases",
+ }
+ }
+}
+
+impl FromStr for MergeFunctions {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<MergeFunctions, ()> {
+ match s {
+ "disabled" => Ok(MergeFunctions::Disabled),
+ "trampolines" => Ok(MergeFunctions::Trampolines),
+ "aliases" => Ok(MergeFunctions::Aliases),
+ _ => Err(()),
+ }
+ }
+}
+
+impl ToJson for MergeFunctions {
+ fn to_json(&self) -> Json {
+ match *self {
+ MergeFunctions::Disabled => "disabled".to_json(),
+ MergeFunctions::Trampolines => "trampolines".to_json(),
+ MergeFunctions::Aliases => "aliases".to_json(),
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum RelocModel {
+ Static,
+ Pic,
+ Pie,
+ DynamicNoPic,
+ Ropi,
+ Rwpi,
+ RopiRwpi,
+}
+
+impl FromStr for RelocModel {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<RelocModel, ()> {
+ Ok(match s {
+ "static" => RelocModel::Static,
+ "pic" => RelocModel::Pic,
+ "pie" => RelocModel::Pie,
+ "dynamic-no-pic" => RelocModel::DynamicNoPic,
+ "ropi" => RelocModel::Ropi,
+ "rwpi" => RelocModel::Rwpi,
+ "ropi-rwpi" => RelocModel::RopiRwpi,
+ _ => return Err(()),
+ })
+ }
+}
+
+impl ToJson for RelocModel {
+ fn to_json(&self) -> Json {
+ match *self {
+ RelocModel::Static => "static",
+ RelocModel::Pic => "pic",
+ RelocModel::Pie => "pie",
+ RelocModel::DynamicNoPic => "dynamic-no-pic",
+ RelocModel::Ropi => "ropi",
+ RelocModel::Rwpi => "rwpi",
+ RelocModel::RopiRwpi => "ropi-rwpi",
+ }
+ .to_json()
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum CodeModel {
+ Tiny,
+ Small,
+ Kernel,
+ Medium,
+ Large,
+}
+
+impl FromStr for CodeModel {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<CodeModel, ()> {
+ Ok(match s {
+ "tiny" => CodeModel::Tiny,
+ "small" => CodeModel::Small,
+ "kernel" => CodeModel::Kernel,
+ "medium" => CodeModel::Medium,
+ "large" => CodeModel::Large,
+ _ => return Err(()),
+ })
+ }
+}
+
+impl ToJson for CodeModel {
+ fn to_json(&self) -> Json {
+ match *self {
+ CodeModel::Tiny => "tiny",
+ CodeModel::Small => "small",
+ CodeModel::Kernel => "kernel",
+ CodeModel::Medium => "medium",
+ CodeModel::Large => "large",
+ }
+ .to_json()
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum TlsModel {
+ GeneralDynamic,
+ LocalDynamic,
+ InitialExec,
+ LocalExec,
+}
+
+impl FromStr for TlsModel {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<TlsModel, ()> {
+ Ok(match s {
+ // Note the difference "general" vs "global" difference. The model name is "general",
+ // but the user-facing option name is "global" for consistency with other compilers.
+ "global-dynamic" => TlsModel::GeneralDynamic,
+ "local-dynamic" => TlsModel::LocalDynamic,
+ "initial-exec" => TlsModel::InitialExec,
+ "local-exec" => TlsModel::LocalExec,
+ _ => return Err(()),
+ })
+ }
+}
+
+impl ToJson for TlsModel {
+ fn to_json(&self) -> Json {
+ match *self {
+ TlsModel::GeneralDynamic => "global-dynamic",
+ TlsModel::LocalDynamic => "local-dynamic",
+ TlsModel::InitialExec => "initial-exec",
+ TlsModel::LocalExec => "local-exec",
+ }
+ .to_json()
+ }
+}
+
+/// Everything is flattened to a single enum to make the json encoding/decoding less annoying.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
+pub enum LinkOutputKind {
+ /// Dynamically linked non position-independent executable.
+ DynamicNoPicExe,
+ /// Dynamically linked position-independent executable.
+ DynamicPicExe,
+ /// Statically linked non position-independent executable.
+ StaticNoPicExe,
+ /// Statically linked position-independent executable.
+ StaticPicExe,
+ /// Regular dynamic library ("dynamically linked").
+ DynamicDylib,
+ /// Dynamic library with bundled libc ("statically linked").
+ StaticDylib,
+ /// WASI module with a lifetime past the _initialize entry point
+ WasiReactorExe,
+}
+
+impl LinkOutputKind {
+ fn as_str(&self) -> &'static str {
+ match self {
+ LinkOutputKind::DynamicNoPicExe => "dynamic-nopic-exe",
+ LinkOutputKind::DynamicPicExe => "dynamic-pic-exe",
+ LinkOutputKind::StaticNoPicExe => "static-nopic-exe",
+ LinkOutputKind::StaticPicExe => "static-pic-exe",
+ LinkOutputKind::DynamicDylib => "dynamic-dylib",
+ LinkOutputKind::StaticDylib => "static-dylib",
+ LinkOutputKind::WasiReactorExe => "wasi-reactor-exe",
+ }
+ }
+
+ pub(super) fn from_str(s: &str) -> Option<LinkOutputKind> {
+ Some(match s {
+ "dynamic-nopic-exe" => LinkOutputKind::DynamicNoPicExe,
+ "dynamic-pic-exe" => LinkOutputKind::DynamicPicExe,
+ "static-nopic-exe" => LinkOutputKind::StaticNoPicExe,
+ "static-pic-exe" => LinkOutputKind::StaticPicExe,
+ "dynamic-dylib" => LinkOutputKind::DynamicDylib,
+ "static-dylib" => LinkOutputKind::StaticDylib,
+ "wasi-reactor-exe" => LinkOutputKind::WasiReactorExe,
+ _ => return None,
+ })
+ }
+}
+
+impl fmt::Display for LinkOutputKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.as_str())
+ }
+}
+
+pub type LinkArgs = BTreeMap<LinkerFlavor, Vec<StaticCow<str>>>;
+
+#[derive(Clone, Copy, Hash, Debug, PartialEq, Eq)]
+pub enum SplitDebuginfo {
+ /// Split debug-information is disabled, meaning that on supported platforms
+ /// you can find all debug information in the executable itself. This is
+ /// only supported for ELF effectively.
+ ///
+ /// * Windows - not supported
+ /// * macOS - don't run `dsymutil`
+ /// * ELF - `.dwarf_*` sections
+ Off,
+
+ /// Split debug-information can be found in a "packed" location separate
+ /// from the final artifact. This is supported on all platforms.
+ ///
+ /// * Windows - `*.pdb`
+ /// * macOS - `*.dSYM` (run `dsymutil`)
+ /// * ELF - `*.dwp` (run `rust-llvm-dwp`)
+ Packed,
+
+ /// Split debug-information can be found in individual object files on the
+ /// filesystem. The main executable may point to the object files.
+ ///
+ /// * Windows - not supported
+ /// * macOS - supported, scattered object files
+ /// * ELF - supported, scattered `*.dwo` or `*.o` files (see `SplitDwarfKind`)
+ Unpacked,
+}
+
+impl SplitDebuginfo {
+ fn as_str(&self) -> &'static str {
+ match self {
+ SplitDebuginfo::Off => "off",
+ SplitDebuginfo::Packed => "packed",
+ SplitDebuginfo::Unpacked => "unpacked",
+ }
+ }
+}
+
+impl FromStr for SplitDebuginfo {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<SplitDebuginfo, ()> {
+ Ok(match s {
+ "off" => SplitDebuginfo::Off,
+ "unpacked" => SplitDebuginfo::Unpacked,
+ "packed" => SplitDebuginfo::Packed,
+ _ => return Err(()),
+ })
+ }
+}
+
+impl ToJson for SplitDebuginfo {
+ fn to_json(&self) -> Json {
+ self.as_str().to_json()
+ }
+}
+
+impl fmt::Display for SplitDebuginfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.as_str())
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum StackProbeType {
+ /// Don't emit any stack probes.
+ None,
+ /// It is harmless to use this option even on targets that do not have backend support for
+ /// stack probes as the failure mode is the same as if no stack-probe option was specified in
+ /// the first place.
+ Inline,
+ /// Call `__rust_probestack` whenever stack needs to be probed.
+ Call,
+ /// Use inline option for LLVM versions later than specified in `min_llvm_version_for_inline`
+ /// and call `__rust_probestack` otherwise.
+ InlineOrCall { min_llvm_version_for_inline: (u32, u32, u32) },
+}
+
+impl StackProbeType {
+ fn from_json(json: &Json) -> Result<Self, String> {
+ let object = json.as_object().ok_or_else(|| "expected a JSON object")?;
+ let kind = object
+ .get("kind")
+ .and_then(|o| o.as_str())
+ .ok_or_else(|| "expected `kind` to be a string")?;
+ match kind {
+ "none" => Ok(StackProbeType::None),
+ "inline" => Ok(StackProbeType::Inline),
+ "call" => Ok(StackProbeType::Call),
+ "inline-or-call" => {
+ let min_version = object
+ .get("min-llvm-version-for-inline")
+ .and_then(|o| o.as_array())
+ .ok_or_else(|| "expected `min-llvm-version-for-inline` to be an array")?;
+ let mut iter = min_version.into_iter().map(|v| {
+ let int = v.as_u64().ok_or_else(
+ || "expected `min-llvm-version-for-inline` values to be integers",
+ )?;
+ u32::try_from(int)
+ .map_err(|_| "`min-llvm-version-for-inline` values don't convert to u32")
+ });
+ let min_llvm_version_for_inline = (
+ iter.next().unwrap_or(Ok(11))?,
+ iter.next().unwrap_or(Ok(0))?,
+ iter.next().unwrap_or(Ok(0))?,
+ );
+ Ok(StackProbeType::InlineOrCall { min_llvm_version_for_inline })
+ }
+ _ => Err(String::from(
+ "`kind` expected to be one of `none`, `inline`, `call` or `inline-or-call`",
+ )),
+ }
+ }
+}
+
+impl ToJson for StackProbeType {
+ fn to_json(&self) -> Json {
+ Json::Object(match self {
+ StackProbeType::None => {
+ [(String::from("kind"), "none".to_json())].into_iter().collect()
+ }
+ StackProbeType::Inline => {
+ [(String::from("kind"), "inline".to_json())].into_iter().collect()
+ }
+ StackProbeType::Call => {
+ [(String::from("kind"), "call".to_json())].into_iter().collect()
+ }
+ StackProbeType::InlineOrCall { min_llvm_version_for_inline: (maj, min, patch) } => [
+ (String::from("kind"), "inline-or-call".to_json()),
+ (
+ String::from("min-llvm-version-for-inline"),
+ Json::Array(vec![maj.to_json(), min.to_json(), patch.to_json()]),
+ ),
+ ]
+ .into_iter()
+ .collect(),
+ })
+ }
+}
+
+bitflags::bitflags! {
+ #[derive(Default, Encodable, Decodable)]
+ pub struct SanitizerSet: u8 {
+ const ADDRESS = 1 << 0;
+ const LEAK = 1 << 1;
+ const MEMORY = 1 << 2;
+ const THREAD = 1 << 3;
+ const HWADDRESS = 1 << 4;
+ const CFI = 1 << 5;
+ const MEMTAG = 1 << 6;
+ const SHADOWCALLSTACK = 1 << 7;
+ }
+}
+
+impl SanitizerSet {
+ /// Return sanitizer's name
+ ///
+ /// Returns none if the flags is a set of sanitizers numbering not exactly one.
+ pub fn as_str(self) -> Option<&'static str> {
+ Some(match self {
+ SanitizerSet::ADDRESS => "address",
+ SanitizerSet::CFI => "cfi",
+ SanitizerSet::LEAK => "leak",
+ SanitizerSet::MEMORY => "memory",
+ SanitizerSet::MEMTAG => "memtag",
+ SanitizerSet::SHADOWCALLSTACK => "shadow-call-stack",
+ SanitizerSet::THREAD => "thread",
+ SanitizerSet::HWADDRESS => "hwaddress",
+ _ => return None,
+ })
+ }
+}
+
+/// Formats a sanitizer set as a comma separated list of sanitizers' names.
+impl fmt::Display for SanitizerSet {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut first = true;
+ for s in *self {
+ let name = s.as_str().unwrap_or_else(|| panic!("unrecognized sanitizer {:?}", s));
+ if !first {
+ f.write_str(", ")?;
+ }
+ f.write_str(name)?;
+ first = false;
+ }
+ Ok(())
+ }
+}
+
+impl IntoIterator for SanitizerSet {
+ type Item = SanitizerSet;
+ type IntoIter = std::vec::IntoIter<SanitizerSet>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ [
+ SanitizerSet::ADDRESS,
+ SanitizerSet::CFI,
+ SanitizerSet::LEAK,
+ SanitizerSet::MEMORY,
+ SanitizerSet::MEMTAG,
+ SanitizerSet::SHADOWCALLSTACK,
+ SanitizerSet::THREAD,
+ SanitizerSet::HWADDRESS,
+ ]
+ .iter()
+ .copied()
+ .filter(|&s| self.contains(s))
+ .collect::<Vec<_>>()
+ .into_iter()
+ }
+}
+
+impl<CTX> HashStable<CTX> for SanitizerSet {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.bits().hash_stable(ctx, hasher);
+ }
+}
+
+impl ToJson for SanitizerSet {
+ fn to_json(&self) -> Json {
+ self.into_iter()
+ .map(|v| Some(v.as_str()?.to_json()))
+ .collect::<Option<Vec<_>>>()
+ .unwrap_or_default()
+ .to_json()
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Hash, Debug)]
+pub enum FramePointer {
+ /// Forces the machine code generator to always preserve the frame pointers.
+ Always,
+ /// Forces the machine code generator to preserve the frame pointers except for the leaf
+ /// functions (i.e. those that don't call other functions).
+ NonLeaf,
+ /// Allows the machine code generator to omit the frame pointers.
+ ///
+ /// This option does not guarantee that the frame pointers will be omitted.
+ MayOmit,
+}
+
+impl FromStr for FramePointer {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Self, ()> {
+ Ok(match s {
+ "always" => Self::Always,
+ "non-leaf" => Self::NonLeaf,
+ "may-omit" => Self::MayOmit,
+ _ => return Err(()),
+ })
+ }
+}
+
+impl ToJson for FramePointer {
+ fn to_json(&self) -> Json {
+ match *self {
+ Self::Always => "always",
+ Self::NonLeaf => "non-leaf",
+ Self::MayOmit => "may-omit",
+ }
+ .to_json()
+ }
+}
+
+/// Controls use of stack canaries.
+#[derive(Clone, Copy, Debug, PartialEq, Hash, Eq)]
+pub enum StackProtector {
+ /// Disable stack canary generation.
+ None,
+
+ /// On LLVM, mark all generated LLVM functions with the `ssp` attribute (see
+ /// llvm/docs/LangRef.rst). This triggers stack canary generation in
+ /// functions which contain an array of a byte-sized type with more than
+ /// eight elements.
+ Basic,
+
+ /// On LLVM, mark all generated LLVM functions with the `sspstrong`
+ /// attribute (see llvm/docs/LangRef.rst). This triggers stack canary
+ /// generation in functions which either contain an array, or which take
+ /// the address of a local variable.
+ Strong,
+
+ /// Generate stack canaries in all functions.
+ All,
+}
+
+impl StackProtector {
+ fn as_str(&self) -> &'static str {
+ match self {
+ StackProtector::None => "none",
+ StackProtector::Basic => "basic",
+ StackProtector::Strong => "strong",
+ StackProtector::All => "all",
+ }
+ }
+}
+
+impl FromStr for StackProtector {
+ type Err = ();
+
+ fn from_str(s: &str) -> Result<StackProtector, ()> {
+ Ok(match s {
+ "none" => StackProtector::None,
+ "basic" => StackProtector::Basic,
+ "strong" => StackProtector::Strong,
+ "all" => StackProtector::All,
+ _ => return Err(()),
+ })
+ }
+}
+
+impl fmt::Display for StackProtector {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(self.as_str())
+ }
+}
+
+macro_rules! supported_targets {
+ ( $(($( $triple:literal, )+ $module:ident ),)+ ) => {
+ $(mod $module;)+
+
+ /// List of supported targets
+ pub const TARGETS: &[&str] = &[$($($triple),+),+];
+
+ fn load_builtin(target: &str) -> Option<Target> {
+ let mut t = match target {
+ $( $($triple)|+ => $module::target(), )+
+ _ => return None,
+ };
+ t.is_builtin = true;
+ debug!("got builtin target: {:?}", t);
+ Some(t)
+ }
+
+ #[cfg(test)]
+ mod tests {
+ mod tests_impl;
+
+ // Cannot put this into a separate file without duplication, make an exception.
+ $(
+ #[test] // `#[test]`
+ fn $module() {
+ tests_impl::test_target(super::$module::target());
+ }
+ )+
+ }
+ };
+}
+
+supported_targets! {
+ ("x86_64-unknown-linux-gnu", x86_64_unknown_linux_gnu),
+ ("x86_64-unknown-linux-gnux32", x86_64_unknown_linux_gnux32),
+ ("i686-unknown-linux-gnu", i686_unknown_linux_gnu),
+ ("i586-unknown-linux-gnu", i586_unknown_linux_gnu),
+ ("m68k-unknown-linux-gnu", m68k_unknown_linux_gnu),
+ ("mips-unknown-linux-gnu", mips_unknown_linux_gnu),
+ ("mips64-unknown-linux-gnuabi64", mips64_unknown_linux_gnuabi64),
+ ("mips64el-unknown-linux-gnuabi64", mips64el_unknown_linux_gnuabi64),
+ ("mipsisa32r6-unknown-linux-gnu", mipsisa32r6_unknown_linux_gnu),
+ ("mipsisa32r6el-unknown-linux-gnu", mipsisa32r6el_unknown_linux_gnu),
+ ("mipsisa64r6-unknown-linux-gnuabi64", mipsisa64r6_unknown_linux_gnuabi64),
+ ("mipsisa64r6el-unknown-linux-gnuabi64", mipsisa64r6el_unknown_linux_gnuabi64),
+ ("mipsel-unknown-linux-gnu", mipsel_unknown_linux_gnu),
+ ("powerpc-unknown-linux-gnu", powerpc_unknown_linux_gnu),
+ ("powerpc-unknown-linux-gnuspe", powerpc_unknown_linux_gnuspe),
+ ("powerpc-unknown-linux-musl", powerpc_unknown_linux_musl),
+ ("powerpc64-unknown-linux-gnu", powerpc64_unknown_linux_gnu),
+ ("powerpc64-unknown-linux-musl", powerpc64_unknown_linux_musl),
+ ("powerpc64le-unknown-linux-gnu", powerpc64le_unknown_linux_gnu),
+ ("powerpc64le-unknown-linux-musl", powerpc64le_unknown_linux_musl),
+ ("s390x-unknown-linux-gnu", s390x_unknown_linux_gnu),
+ ("s390x-unknown-linux-musl", s390x_unknown_linux_musl),
+ ("sparc-unknown-linux-gnu", sparc_unknown_linux_gnu),
+ ("sparc64-unknown-linux-gnu", sparc64_unknown_linux_gnu),
+ ("arm-unknown-linux-gnueabi", arm_unknown_linux_gnueabi),
+ ("arm-unknown-linux-gnueabihf", arm_unknown_linux_gnueabihf),
+ ("arm-unknown-linux-musleabi", arm_unknown_linux_musleabi),
+ ("arm-unknown-linux-musleabihf", arm_unknown_linux_musleabihf),
+ ("armv4t-unknown-linux-gnueabi", armv4t_unknown_linux_gnueabi),
+ ("armv5te-unknown-linux-gnueabi", armv5te_unknown_linux_gnueabi),
+ ("armv5te-unknown-linux-musleabi", armv5te_unknown_linux_musleabi),
+ ("armv5te-unknown-linux-uclibceabi", armv5te_unknown_linux_uclibceabi),
+ ("armv7-unknown-linux-gnueabi", armv7_unknown_linux_gnueabi),
+ ("armv7-unknown-linux-gnueabihf", armv7_unknown_linux_gnueabihf),
+ ("thumbv7neon-unknown-linux-gnueabihf", thumbv7neon_unknown_linux_gnueabihf),
+ ("thumbv7neon-unknown-linux-musleabihf", thumbv7neon_unknown_linux_musleabihf),
+ ("armv7-unknown-linux-musleabi", armv7_unknown_linux_musleabi),
+ ("armv7-unknown-linux-musleabihf", armv7_unknown_linux_musleabihf),
+ ("aarch64-unknown-linux-gnu", aarch64_unknown_linux_gnu),
+ ("aarch64-unknown-linux-musl", aarch64_unknown_linux_musl),
+ ("x86_64-unknown-linux-musl", x86_64_unknown_linux_musl),
+ ("i686-unknown-linux-musl", i686_unknown_linux_musl),
+ ("i586-unknown-linux-musl", i586_unknown_linux_musl),
+ ("mips-unknown-linux-musl", mips_unknown_linux_musl),
+ ("mipsel-unknown-linux-musl", mipsel_unknown_linux_musl),
+ ("mips64-unknown-linux-muslabi64", mips64_unknown_linux_muslabi64),
+ ("mips64el-unknown-linux-muslabi64", mips64el_unknown_linux_muslabi64),
+ ("hexagon-unknown-linux-musl", hexagon_unknown_linux_musl),
+
+ ("mips-unknown-linux-uclibc", mips_unknown_linux_uclibc),
+ ("mipsel-unknown-linux-uclibc", mipsel_unknown_linux_uclibc),
+
+ ("i686-linux-android", i686_linux_android),
+ ("x86_64-linux-android", x86_64_linux_android),
+ ("arm-linux-androideabi", arm_linux_androideabi),
+ ("armv7-linux-androideabi", armv7_linux_androideabi),
+ ("thumbv7neon-linux-androideabi", thumbv7neon_linux_androideabi),
+ ("aarch64-linux-android", aarch64_linux_android),
+
+ ("x86_64-unknown-none-linuxkernel", x86_64_unknown_none_linuxkernel),
+
+ ("aarch64-unknown-freebsd", aarch64_unknown_freebsd),
+ ("armv6-unknown-freebsd", armv6_unknown_freebsd),
+ ("armv7-unknown-freebsd", armv7_unknown_freebsd),
+ ("i686-unknown-freebsd", i686_unknown_freebsd),
+ ("powerpc-unknown-freebsd", powerpc_unknown_freebsd),
+ ("powerpc64-unknown-freebsd", powerpc64_unknown_freebsd),
+ ("powerpc64le-unknown-freebsd", powerpc64le_unknown_freebsd),
+ ("riscv64gc-unknown-freebsd", riscv64gc_unknown_freebsd),
+ ("x86_64-unknown-freebsd", x86_64_unknown_freebsd),
+
+ ("x86_64-unknown-dragonfly", x86_64_unknown_dragonfly),
+
+ ("aarch64-unknown-openbsd", aarch64_unknown_openbsd),
+ ("i686-unknown-openbsd", i686_unknown_openbsd),
+ ("sparc64-unknown-openbsd", sparc64_unknown_openbsd),
+ ("x86_64-unknown-openbsd", x86_64_unknown_openbsd),
+ ("powerpc-unknown-openbsd", powerpc_unknown_openbsd),
+
+ ("aarch64-unknown-netbsd", aarch64_unknown_netbsd),
+ ("armv6-unknown-netbsd-eabihf", armv6_unknown_netbsd_eabihf),
+ ("armv7-unknown-netbsd-eabihf", armv7_unknown_netbsd_eabihf),
+ ("i686-unknown-netbsd", i686_unknown_netbsd),
+ ("powerpc-unknown-netbsd", powerpc_unknown_netbsd),
+ ("sparc64-unknown-netbsd", sparc64_unknown_netbsd),
+ ("x86_64-unknown-netbsd", x86_64_unknown_netbsd),
+
+ ("i686-unknown-haiku", i686_unknown_haiku),
+ ("x86_64-unknown-haiku", x86_64_unknown_haiku),
+
+ ("aarch64-apple-darwin", aarch64_apple_darwin),
+ ("x86_64-apple-darwin", x86_64_apple_darwin),
+ ("i686-apple-darwin", i686_apple_darwin),
+
+ ("aarch64-fuchsia", aarch64_fuchsia),
+ ("x86_64-fuchsia", x86_64_fuchsia),
+
+ ("avr-unknown-gnu-atmega328", avr_unknown_gnu_atmega328),
+
+ ("x86_64-unknown-l4re-uclibc", x86_64_unknown_l4re_uclibc),
+
+ ("aarch64-unknown-redox", aarch64_unknown_redox),
+ ("x86_64-unknown-redox", x86_64_unknown_redox),
+
+ ("i386-apple-ios", i386_apple_ios),
+ ("x86_64-apple-ios", x86_64_apple_ios),
+ ("aarch64-apple-ios", aarch64_apple_ios),
+ ("armv7-apple-ios", armv7_apple_ios),
+ ("armv7s-apple-ios", armv7s_apple_ios),
+ ("x86_64-apple-ios-macabi", x86_64_apple_ios_macabi),
+ ("aarch64-apple-ios-macabi", aarch64_apple_ios_macabi),
+ ("aarch64-apple-ios-sim", aarch64_apple_ios_sim),
+ ("aarch64-apple-tvos", aarch64_apple_tvos),
+ ("x86_64-apple-tvos", x86_64_apple_tvos),
+
+ ("armv7k-apple-watchos", armv7k_apple_watchos),
+ ("arm64_32-apple-watchos", arm64_32_apple_watchos),
+ ("x86_64-apple-watchos-sim", x86_64_apple_watchos_sim),
+ ("aarch64-apple-watchos-sim", aarch64_apple_watchos_sim),
+
+ ("armebv7r-none-eabi", armebv7r_none_eabi),
+ ("armebv7r-none-eabihf", armebv7r_none_eabihf),
+ ("armv7r-none-eabi", armv7r_none_eabi),
+ ("armv7r-none-eabihf", armv7r_none_eabihf),
+
+ ("x86_64-pc-solaris", x86_64_pc_solaris),
+ ("x86_64-sun-solaris", x86_64_sun_solaris),
+ ("sparcv9-sun-solaris", sparcv9_sun_solaris),
+
+ ("x86_64-unknown-illumos", x86_64_unknown_illumos),
+
+ ("x86_64-pc-windows-gnu", x86_64_pc_windows_gnu),
+ ("i686-pc-windows-gnu", i686_pc_windows_gnu),
+ ("i686-uwp-windows-gnu", i686_uwp_windows_gnu),
+ ("x86_64-uwp-windows-gnu", x86_64_uwp_windows_gnu),
+
+ ("aarch64-pc-windows-gnullvm", aarch64_pc_windows_gnullvm),
+ ("x86_64-pc-windows-gnullvm", x86_64_pc_windows_gnullvm),
+
+ ("aarch64-pc-windows-msvc", aarch64_pc_windows_msvc),
+ ("aarch64-uwp-windows-msvc", aarch64_uwp_windows_msvc),
+ ("x86_64-pc-windows-msvc", x86_64_pc_windows_msvc),
+ ("x86_64-uwp-windows-msvc", x86_64_uwp_windows_msvc),
+ ("i686-pc-windows-msvc", i686_pc_windows_msvc),
+ ("i686-uwp-windows-msvc", i686_uwp_windows_msvc),
+ ("i586-pc-windows-msvc", i586_pc_windows_msvc),
+ ("thumbv7a-pc-windows-msvc", thumbv7a_pc_windows_msvc),
+ ("thumbv7a-uwp-windows-msvc", thumbv7a_uwp_windows_msvc),
+
+ ("asmjs-unknown-emscripten", asmjs_unknown_emscripten),
+ ("wasm32-unknown-emscripten", wasm32_unknown_emscripten),
+ ("wasm32-unknown-unknown", wasm32_unknown_unknown),
+ ("wasm32-wasi", wasm32_wasi),
+ ("wasm64-unknown-unknown", wasm64_unknown_unknown),
+
+ ("thumbv6m-none-eabi", thumbv6m_none_eabi),
+ ("thumbv7m-none-eabi", thumbv7m_none_eabi),
+ ("thumbv7em-none-eabi", thumbv7em_none_eabi),
+ ("thumbv7em-none-eabihf", thumbv7em_none_eabihf),
+ ("thumbv8m.base-none-eabi", thumbv8m_base_none_eabi),
+ ("thumbv8m.main-none-eabi", thumbv8m_main_none_eabi),
+ ("thumbv8m.main-none-eabihf", thumbv8m_main_none_eabihf),
+
+ ("armv7a-none-eabi", armv7a_none_eabi),
+ ("armv7a-none-eabihf", armv7a_none_eabihf),
+
+ ("msp430-none-elf", msp430_none_elf),
+
+ ("aarch64-unknown-hermit", aarch64_unknown_hermit),
+ ("x86_64-unknown-hermit", x86_64_unknown_hermit),
+
+ ("riscv32i-unknown-none-elf", riscv32i_unknown_none_elf),
+ ("riscv32im-unknown-none-elf", riscv32im_unknown_none_elf),
+ ("riscv32imc-unknown-none-elf", riscv32imc_unknown_none_elf),
+ ("riscv32imc-esp-espidf", riscv32imc_esp_espidf),
+ ("riscv32imac-unknown-none-elf", riscv32imac_unknown_none_elf),
+ ("riscv32imac-unknown-xous-elf", riscv32imac_unknown_xous_elf),
+ ("riscv32gc-unknown-linux-gnu", riscv32gc_unknown_linux_gnu),
+ ("riscv32gc-unknown-linux-musl", riscv32gc_unknown_linux_musl),
+ ("riscv64imac-unknown-none-elf", riscv64imac_unknown_none_elf),
+ ("riscv64gc-unknown-none-elf", riscv64gc_unknown_none_elf),
+ ("riscv64gc-unknown-linux-gnu", riscv64gc_unknown_linux_gnu),
+ ("riscv64gc-unknown-linux-musl", riscv64gc_unknown_linux_musl),
+
+ ("aarch64-unknown-none", aarch64_unknown_none),
+ ("aarch64-unknown-none-softfloat", aarch64_unknown_none_softfloat),
+
+ ("x86_64-fortanix-unknown-sgx", x86_64_fortanix_unknown_sgx),
+
+ ("x86_64-unknown-uefi", x86_64_unknown_uefi),
+ ("i686-unknown-uefi", i686_unknown_uefi),
+ ("aarch64-unknown-uefi", aarch64_unknown_uefi),
+
+ ("nvptx64-nvidia-cuda", nvptx64_nvidia_cuda),
+
+ ("i686-wrs-vxworks", i686_wrs_vxworks),
+ ("x86_64-wrs-vxworks", x86_64_wrs_vxworks),
+ ("armv7-wrs-vxworks-eabihf", armv7_wrs_vxworks_eabihf),
+ ("aarch64-wrs-vxworks", aarch64_wrs_vxworks),
+ ("powerpc-wrs-vxworks", powerpc_wrs_vxworks),
+ ("powerpc-wrs-vxworks-spe", powerpc_wrs_vxworks_spe),
+ ("powerpc64-wrs-vxworks", powerpc64_wrs_vxworks),
+
+ ("aarch64-kmc-solid_asp3", aarch64_kmc_solid_asp3),
+ ("armv7a-kmc-solid_asp3-eabi", armv7a_kmc_solid_asp3_eabi),
+ ("armv7a-kmc-solid_asp3-eabihf", armv7a_kmc_solid_asp3_eabihf),
+
+ ("mipsel-sony-psp", mipsel_sony_psp),
+ ("mipsel-unknown-none", mipsel_unknown_none),
+ ("thumbv4t-none-eabi", thumbv4t_none_eabi),
+
+ ("aarch64_be-unknown-linux-gnu", aarch64_be_unknown_linux_gnu),
+ ("aarch64-unknown-linux-gnu_ilp32", aarch64_unknown_linux_gnu_ilp32),
+ ("aarch64_be-unknown-linux-gnu_ilp32", aarch64_be_unknown_linux_gnu_ilp32),
+
+ ("bpfeb-unknown-none", bpfeb_unknown_none),
+ ("bpfel-unknown-none", bpfel_unknown_none),
+
+ ("armv6k-nintendo-3ds", armv6k_nintendo_3ds),
+
+ ("aarch64-nintendo-switch-freestanding", aarch64_nintendo_switch_freestanding),
+
+ ("armv7-unknown-linux-uclibceabi", armv7_unknown_linux_uclibceabi),
+ ("armv7-unknown-linux-uclibceabihf", armv7_unknown_linux_uclibceabihf),
+
+ ("x86_64-unknown-none", x86_64_unknown_none),
+
+ ("mips64-openwrt-linux-musl", mips64_openwrt_linux_musl),
+}
+
+/// Cow-Vec-Str: Cow<'static, [Cow<'static, str>]>
+macro_rules! cvs {
+ () => {
+ ::std::borrow::Cow::Borrowed(&[])
+ };
+ ($($x:expr),+ $(,)?) => {
+ ::std::borrow::Cow::Borrowed(&[
+ $(
+ ::std::borrow::Cow::Borrowed($x),
+ )*
+ ])
+ };
+}
+
+pub(crate) use cvs;
+
+/// Warnings encountered when parsing the target `json`.
+///
+/// Includes fields that weren't recognized and fields that don't have the expected type.
+#[derive(Debug, PartialEq)]
+pub struct TargetWarnings {
+ unused_fields: Vec<String>,
+ incorrect_type: Vec<String>,
+}
+
+impl TargetWarnings {
+ pub fn empty() -> Self {
+ Self { unused_fields: Vec::new(), incorrect_type: Vec::new() }
+ }
+
+ pub fn warning_messages(&self) -> Vec<String> {
+ let mut warnings = vec![];
+ if !self.unused_fields.is_empty() {
+ warnings.push(format!(
+ "target json file contains unused fields: {}",
+ self.unused_fields.join(", ")
+ ));
+ }
+ if !self.incorrect_type.is_empty() {
+ warnings.push(format!(
+ "target json file contains fields whose value doesn't have the correct json type: {}",
+ self.incorrect_type.join(", ")
+ ));
+ }
+ warnings
+ }
+}
+
+/// Everything `rustc` knows about how to compile for a specific target.
+///
+/// Every field here must be specified, and has no default value.
+#[derive(PartialEq, Clone, Debug)]
+pub struct Target {
+ /// Target triple to pass to LLVM.
+ pub llvm_target: StaticCow<str>,
+ /// Number of bits in a pointer. Influences the `target_pointer_width` `cfg` variable.
+ pub pointer_width: u32,
+ /// Architecture to use for ABI considerations. Valid options include: "x86",
+ /// "x86_64", "arm", "aarch64", "mips", "powerpc", "powerpc64", and others.
+ pub arch: StaticCow<str>,
+ /// [Data layout](https://llvm.org/docs/LangRef.html#data-layout) to pass to LLVM.
+ pub data_layout: StaticCow<str>,
+ /// Optional settings with defaults.
+ pub options: TargetOptions,
+}
+
+pub trait HasTargetSpec {
+ fn target_spec(&self) -> &Target;
+}
+
+impl HasTargetSpec for Target {
+ #[inline]
+ fn target_spec(&self) -> &Target {
+ self
+ }
+}
+
+type StaticCow<T> = Cow<'static, T>;
+
+/// Optional aspects of a target specification.
+///
+/// This has an implementation of `Default`, see each field for what the default is. In general,
+/// these try to take "minimal defaults" that don't assume anything about the runtime they run in.
+///
+/// `TargetOptions` as a separate structure is mostly an implementation detail of `Target`
+/// construction, all its fields logically belong to `Target` and available from `Target`
+/// through `Deref` impls.
+#[derive(PartialEq, Clone, Debug)]
+pub struct TargetOptions {
+ /// Whether the target is built-in or loaded from a custom target specification.
+ pub is_builtin: bool,
+
+ /// Used as the `target_endian` `cfg` variable. Defaults to little endian.
+ pub endian: Endian,
+ /// Width of c_int type. Defaults to "32".
+ pub c_int_width: StaticCow<str>,
+ /// OS name to use for conditional compilation (`target_os`). Defaults to "none".
+ /// "none" implies a bare metal target without `std` library.
+ /// A couple of targets having `std` also use "unknown" as an `os` value,
+ /// but they are exceptions.
+ pub os: StaticCow<str>,
+ /// Environment name to use for conditional compilation (`target_env`). Defaults to "".
+ pub env: StaticCow<str>,
+ /// ABI name to distinguish multiple ABIs on the same OS and architecture. For instance, `"eabi"`
+ /// or `"eabihf"`. Defaults to "".
+ pub abi: StaticCow<str>,
+ /// Vendor name to use for conditional compilation (`target_vendor`). Defaults to "unknown".
+ pub vendor: StaticCow<str>,
+ /// Default linker flavor used if `-C linker-flavor` or `-C linker` are not passed
+ /// on the command line. Defaults to `LinkerFlavor::Gcc`.
+ pub linker_flavor: LinkerFlavor,
+
+ /// Linker to invoke
+ pub linker: Option<StaticCow<str>>,
+
+ /// LLD flavor used if `lld` (or `rust-lld`) is specified as a linker
+ /// without clarifying its flavor in any way.
+ pub lld_flavor: LldFlavor,
+
+ /// Linker arguments that are passed *before* any user-defined libraries.
+ pub pre_link_args: LinkArgs,
+ /// Objects to link before and after all other object code.
+ pub pre_link_objects: CrtObjects,
+ pub post_link_objects: CrtObjects,
+ /// Same as `(pre|post)_link_objects`, but when we fail to pull the objects with help of the
+ /// target's native gcc and fall back to the "self-contained" mode and pull them manually.
+ /// See `crt_objects.rs` for some more detailed documentation.
+ pub pre_link_objects_fallback: CrtObjects,
+ pub post_link_objects_fallback: CrtObjects,
+ /// Which logic to use to determine whether to fall back to the "self-contained" mode or not.
+ pub crt_objects_fallback: Option<CrtObjectsFallback>,
+
+ /// Linker arguments that are unconditionally passed after any
+ /// user-defined but before post-link objects. Standard platform
+ /// libraries that should be always be linked to, usually go here.
+ pub late_link_args: LinkArgs,
+ /// Linker arguments used in addition to `late_link_args` if at least one
+ /// Rust dependency is dynamically linked.
+ pub late_link_args_dynamic: LinkArgs,
+ /// Linker arguments used in addition to `late_link_args` if all Rust
+ /// dependencies are statically linked.
+ pub late_link_args_static: LinkArgs,
+ /// Linker arguments that are unconditionally passed *after* any
+ /// user-defined libraries.
+ pub post_link_args: LinkArgs,
+ /// Optional link script applied to `dylib` and `executable` crate types.
+ /// This is a string containing the script, not a path. Can only be applied
+ /// to linkers where `linker_is_gnu` is true.
+ pub link_script: Option<StaticCow<str>>,
+
+ /// Environment variables to be set for the linker invocation.
+ pub link_env: StaticCow<[(StaticCow<str>, StaticCow<str>)]>,
+ /// Environment variables to be removed for the linker invocation.
+ pub link_env_remove: StaticCow<[StaticCow<str>]>,
+
+ /// Extra arguments to pass to the external assembler (when used)
+ pub asm_args: StaticCow<[StaticCow<str>]>,
+
+ /// Default CPU to pass to LLVM. Corresponds to `llc -mcpu=$cpu`. Defaults
+ /// to "generic".
+ pub cpu: StaticCow<str>,
+ /// Default target features to pass to LLVM. These features will *always* be
+ /// passed, and cannot be disabled even via `-C`. Corresponds to `llc
+ /// -mattr=$features`.
+ pub features: StaticCow<str>,
+ /// Whether dynamic linking is available on this target. Defaults to false.
+ pub dynamic_linking: bool,
+ /// If dynamic linking is available, whether only cdylibs are supported.
+ pub only_cdylib: bool,
+ /// Whether executables are available on this target. Defaults to true.
+ pub executables: bool,
+ /// Relocation model to use in object file. Corresponds to `llc
+ /// -relocation-model=$relocation_model`. Defaults to `Pic`.
+ pub relocation_model: RelocModel,
+ /// Code model to use. Corresponds to `llc -code-model=$code_model`.
+ /// Defaults to `None` which means "inherited from the base LLVM target".
+ pub code_model: Option<CodeModel>,
+ /// TLS model to use. Options are "global-dynamic" (default), "local-dynamic", "initial-exec"
+ /// and "local-exec". This is similar to the -ftls-model option in GCC/Clang.
+ pub tls_model: TlsModel,
+ /// Do not emit code that uses the "red zone", if the ABI has one. Defaults to false.
+ pub disable_redzone: bool,
+ /// Frame pointer mode for this target. Defaults to `MayOmit`.
+ pub frame_pointer: FramePointer,
+ /// Emit each function in its own section. Defaults to true.
+ pub function_sections: bool,
+ /// String to prepend to the name of every dynamic library. Defaults to "lib".
+ pub dll_prefix: StaticCow<str>,
+ /// String to append to the name of every dynamic library. Defaults to ".so".
+ pub dll_suffix: StaticCow<str>,
+ /// String to append to the name of every executable.
+ pub exe_suffix: StaticCow<str>,
+ /// String to prepend to the name of every static library. Defaults to "lib".
+ pub staticlib_prefix: StaticCow<str>,
+ /// String to append to the name of every static library. Defaults to ".a".
+ pub staticlib_suffix: StaticCow<str>,
+ /// Values of the `target_family` cfg set for this target.
+ ///
+ /// Common options are: "unix", "windows". Defaults to no families.
+ ///
+ /// See <https://doc.rust-lang.org/reference/conditional-compilation.html#target_family>.
+ pub families: StaticCow<[StaticCow<str>]>,
+ /// Whether the target toolchain's ABI supports returning small structs as an integer.
+ pub abi_return_struct_as_int: bool,
+ /// Whether the target toolchain is like macOS's. Only useful for compiling against iOS/macOS,
+ /// in particular running dsymutil and some other stuff like `-dead_strip`. Defaults to false.
+ pub is_like_osx: bool,
+ /// Whether the target toolchain is like Solaris's.
+ /// Only useful for compiling against Illumos/Solaris,
+ /// as they have a different set of linker flags. Defaults to false.
+ pub is_like_solaris: bool,
+ /// Whether the target is like Windows.
+ /// This is a combination of several more specific properties represented as a single flag:
+ /// - The target uses a Windows ABI,
+ /// - uses PE/COFF as a format for object code,
+ /// - uses Windows-style dllexport/dllimport for shared libraries,
+ /// - uses import libraries and .def files for symbol exports,
+ /// - executables support setting a subsystem.
+ pub is_like_windows: bool,
+ /// Whether the target is like MSVC.
+ /// This is a combination of several more specific properties represented as a single flag:
+ /// - The target has all the properties from `is_like_windows`
+ /// (for in-tree targets "is_like_msvc ⇒ is_like_windows" is ensured by a unit test),
+ /// - has some MSVC-specific Windows ABI properties,
+ /// - uses a link.exe-like linker,
+ /// - uses CodeView/PDB for debuginfo and natvis for its visualization,
+ /// - uses SEH-based unwinding,
+ /// - supports control flow guard mechanism.
+ pub is_like_msvc: bool,
+ /// Whether a target toolchain is like WASM.
+ pub is_like_wasm: bool,
+ /// Default supported version of DWARF on this platform.
+ /// Useful because some platforms (osx, bsd) only want up to DWARF2.
+ pub default_dwarf_version: u32,
+ /// Whether the linker support GNU-like arguments such as -O. Defaults to true.
+ pub linker_is_gnu: bool,
+ /// The MinGW toolchain has a known issue that prevents it from correctly
+ /// handling COFF object files with more than 2<sup>15</sup> sections. Since each weak
+ /// symbol needs its own COMDAT section, weak linkage implies a large
+ /// number sections that easily exceeds the given limit for larger
+ /// codebases. Consequently we want a way to disallow weak linkage on some
+ /// platforms.
+ pub allows_weak_linkage: bool,
+ /// Whether the linker support rpaths or not. Defaults to false.
+ pub has_rpath: bool,
+ /// Whether to disable linking to the default libraries, typically corresponds
+ /// to `-nodefaultlibs`. Defaults to true.
+ pub no_default_libraries: bool,
+ /// Dynamically linked executables can be compiled as position independent
+ /// if the default relocation model of position independent code is not
+ /// changed. This is a requirement to take advantage of ASLR, as otherwise
+ /// the functions in the executable are not randomized and can be used
+ /// during an exploit of a vulnerability in any code.
+ pub position_independent_executables: bool,
+ /// Executables that are both statically linked and position-independent are supported.
+ pub static_position_independent_executables: bool,
+ /// Determines if the target always requires using the PLT for indirect
+ /// library calls or not. This controls the default value of the `-Z plt` flag.
+ pub needs_plt: bool,
+ /// Either partial, full, or off. Full RELRO makes the dynamic linker
+ /// resolve all symbols at startup and marks the GOT read-only before
+ /// starting the program, preventing overwriting the GOT.
+ pub relro_level: RelroLevel,
+ /// Format that archives should be emitted in. This affects whether we use
+ /// LLVM to assemble an archive or fall back to the system linker, and
+ /// currently only "gnu" is used to fall into LLVM. Unknown strings cause
+ /// the system linker to be used.
+ pub archive_format: StaticCow<str>,
+ /// Is asm!() allowed? Defaults to true.
+ pub allow_asm: bool,
+ /// Whether the runtime startup code requires the `main` function be passed
+ /// `argc` and `argv` values.
+ pub main_needs_argc_argv: bool,
+
+ /// Flag indicating whether #[thread_local] is available for this target.
+ pub has_thread_local: bool,
+ // This is mainly for easy compatibility with emscripten.
+ // If we give emcc .o files that are actually .bc files it
+ // will 'just work'.
+ pub obj_is_bitcode: bool,
+ /// Whether the target requires that emitted object code includes bitcode.
+ pub forces_embed_bitcode: bool,
+ /// Content of the LLVM cmdline section associated with embedded bitcode.
+ pub bitcode_llvm_cmdline: StaticCow<str>,
+
+ /// Don't use this field; instead use the `.min_atomic_width()` method.
+ pub min_atomic_width: Option<u64>,
+
+ /// Don't use this field; instead use the `.max_atomic_width()` method.
+ pub max_atomic_width: Option<u64>,
+
+ /// Whether the target supports atomic CAS operations natively
+ pub atomic_cas: bool,
+
+ /// Panic strategy: "unwind" or "abort"
+ pub panic_strategy: PanicStrategy,
+
+ /// Whether or not linking dylibs to a static CRT is allowed.
+ pub crt_static_allows_dylibs: bool,
+ /// Whether or not the CRT is statically linked by default.
+ pub crt_static_default: bool,
+ /// Whether or not crt-static is respected by the compiler (or is a no-op).
+ pub crt_static_respected: bool,
+
+ /// The implementation of stack probes to use.
+ pub stack_probes: StackProbeType,
+
+ /// The minimum alignment for global symbols.
+ pub min_global_align: Option<u64>,
+
+ /// Default number of codegen units to use in debug mode
+ pub default_codegen_units: Option<u64>,
+
+ /// Whether to generate trap instructions in places where optimization would
+ /// otherwise produce control flow that falls through into unrelated memory.
+ pub trap_unreachable: bool,
+
+ /// This target requires everything to be compiled with LTO to emit a final
+ /// executable, aka there is no native linker for this target.
+ pub requires_lto: bool,
+
+ /// This target has no support for threads.
+ pub singlethread: bool,
+
+ /// Whether library functions call lowering/optimization is disabled in LLVM
+ /// for this target unconditionally.
+ pub no_builtins: bool,
+
+ /// The default visibility for symbols in this target should be "hidden"
+ /// rather than "default"
+ pub default_hidden_visibility: bool,
+
+ /// Whether a .debug_gdb_scripts section will be added to the output object file
+ pub emit_debug_gdb_scripts: bool,
+
+ /// Whether or not to unconditionally `uwtable` attributes on functions,
+ /// typically because the platform needs to unwind for things like stack
+ /// unwinders.
+ pub requires_uwtable: bool,
+
+ /// Whether or not to emit `uwtable` attributes on functions if `-C force-unwind-tables`
+ /// is not specified and `uwtable` is not required on this target.
+ pub default_uwtable: bool,
+
+ /// Whether or not SIMD types are passed by reference in the Rust ABI,
+ /// typically required if a target can be compiled with a mixed set of
+ /// target features. This is `true` by default, and `false` for targets like
+ /// wasm32 where the whole program either has simd or not.
+ pub simd_types_indirect: bool,
+
+ /// Pass a list of symbol which should be exported in the dylib to the linker.
+ pub limit_rdylib_exports: bool,
+
+ /// If set, have the linker export exactly these symbols, instead of using
+ /// the usual logic to figure this out from the crate itself.
+ pub override_export_symbols: Option<StaticCow<[StaticCow<str>]>>,
+
+ /// Determines how or whether the MergeFunctions LLVM pass should run for
+ /// this target. Either "disabled", "trampolines", or "aliases".
+ /// The MergeFunctions pass is generally useful, but some targets may need
+ /// to opt out. The default is "aliases".
+ ///
+ /// Workaround for: <https://github.com/rust-lang/rust/issues/57356>
+ pub merge_functions: MergeFunctions,
+
+ /// Use platform dependent mcount function
+ pub mcount: StaticCow<str>,
+
+ /// LLVM ABI name, corresponds to the '-mabi' parameter available in multilib C compilers
+ pub llvm_abiname: StaticCow<str>,
+
+ /// Whether or not RelaxElfRelocation flag will be passed to the linker
+ pub relax_elf_relocations: bool,
+
+ /// Additional arguments to pass to LLVM, similar to the `-C llvm-args` codegen option.
+ pub llvm_args: StaticCow<[StaticCow<str>]>,
+
+ /// Whether to use legacy .ctors initialization hooks rather than .init_array. Defaults
+ /// to false (uses .init_array).
+ pub use_ctors_section: bool,
+
+ /// Whether the linker is instructed to add a `GNU_EH_FRAME` ELF header
+ /// used to locate unwinding information is passed
+ /// (only has effect if the linker is `ld`-like).
+ pub eh_frame_header: bool,
+
+ /// Is true if the target is an ARM architecture using thumb v1 which allows for
+ /// thumb and arm interworking.
+ pub has_thumb_interworking: bool,
+
+ /// How to handle split debug information, if at all. Specifying `None` has
+ /// target-specific meaning.
+ pub split_debuginfo: SplitDebuginfo,
+
+ /// The sanitizers supported by this target
+ ///
+ /// Note that the support here is at a codegen level. If the machine code with sanitizer
+ /// enabled can generated on this target, but the necessary supporting libraries are not
+ /// distributed with the target, the sanitizer should still appear in this list for the target.
+ pub supported_sanitizers: SanitizerSet,
+
+ /// If present it's a default value to use for adjusting the C ABI.
+ pub default_adjusted_cabi: Option<Abi>,
+
+ /// Minimum number of bits in #[repr(C)] enum. Defaults to 32.
+ pub c_enum_min_bits: u64,
+
+ /// Whether or not the DWARF `.debug_aranges` section should be generated.
+ pub generate_arange_section: bool,
+
+ /// Whether the target supports stack canary checks. `true` by default,
+ /// since this is most common among tier 1 and tier 2 targets.
+ pub supports_stack_protector: bool,
+}
+
+/// Add arguments for the given flavor and also for its "twin" flavors
+/// that have a compatible command line interface.
+fn add_link_args(link_args: &mut LinkArgs, flavor: LinkerFlavor, args: &[&'static str]) {
+ let mut insert = |flavor| {
+ link_args.entry(flavor).or_default().extend(args.iter().copied().map(Cow::Borrowed))
+ };
+ insert(flavor);
+ match flavor {
+ LinkerFlavor::Ld => insert(LinkerFlavor::Lld(LldFlavor::Ld)),
+ LinkerFlavor::Msvc => insert(LinkerFlavor::Lld(LldFlavor::Link)),
+ LinkerFlavor::Lld(LldFlavor::Wasm) => {}
+ LinkerFlavor::Lld(lld_flavor) => {
+ panic!("add_link_args: use non-LLD flavor for {:?}", lld_flavor)
+ }
+ LinkerFlavor::Gcc
+ | LinkerFlavor::Em
+ | LinkerFlavor::L4Bender
+ | LinkerFlavor::BpfLinker
+ | LinkerFlavor::PtxLinker => {}
+ }
+}
+
+impl TargetOptions {
+ fn link_args(flavor: LinkerFlavor, args: &[&'static str]) -> LinkArgs {
+ let mut link_args = LinkArgs::new();
+ add_link_args(&mut link_args, flavor, args);
+ link_args
+ }
+
+ fn add_pre_link_args(&mut self, flavor: LinkerFlavor, args: &[&'static str]) {
+ add_link_args(&mut self.pre_link_args, flavor, args);
+ }
+
+ fn add_post_link_args(&mut self, flavor: LinkerFlavor, args: &[&'static str]) {
+ add_link_args(&mut self.post_link_args, flavor, args);
+ }
+}
+
+impl Default for TargetOptions {
+ /// Creates a set of "sane defaults" for any target. This is still
+ /// incomplete, and if used for compilation, will certainly not work.
+ fn default() -> TargetOptions {
+ TargetOptions {
+ is_builtin: false,
+ endian: Endian::Little,
+ c_int_width: "32".into(),
+ os: "none".into(),
+ env: "".into(),
+ abi: "".into(),
+ vendor: "unknown".into(),
+ linker_flavor: LinkerFlavor::Gcc,
+ linker: option_env!("CFG_DEFAULT_LINKER").map(|s| s.into()),
+ lld_flavor: LldFlavor::Ld,
+ pre_link_args: LinkArgs::new(),
+ post_link_args: LinkArgs::new(),
+ link_script: None,
+ asm_args: cvs![],
+ cpu: "generic".into(),
+ features: "".into(),
+ dynamic_linking: false,
+ only_cdylib: false,
+ executables: true,
+ relocation_model: RelocModel::Pic,
+ code_model: None,
+ tls_model: TlsModel::GeneralDynamic,
+ disable_redzone: false,
+ frame_pointer: FramePointer::MayOmit,
+ function_sections: true,
+ dll_prefix: "lib".into(),
+ dll_suffix: ".so".into(),
+ exe_suffix: "".into(),
+ staticlib_prefix: "lib".into(),
+ staticlib_suffix: ".a".into(),
+ families: cvs![],
+ abi_return_struct_as_int: false,
+ is_like_osx: false,
+ is_like_solaris: false,
+ is_like_windows: false,
+ is_like_msvc: false,
+ is_like_wasm: false,
+ default_dwarf_version: 4,
+ linker_is_gnu: true,
+ allows_weak_linkage: true,
+ has_rpath: false,
+ no_default_libraries: true,
+ position_independent_executables: false,
+ static_position_independent_executables: false,
+ needs_plt: false,
+ relro_level: RelroLevel::None,
+ pre_link_objects: Default::default(),
+ post_link_objects: Default::default(),
+ pre_link_objects_fallback: Default::default(),
+ post_link_objects_fallback: Default::default(),
+ crt_objects_fallback: None,
+ late_link_args: LinkArgs::new(),
+ late_link_args_dynamic: LinkArgs::new(),
+ late_link_args_static: LinkArgs::new(),
+ link_env: cvs![],
+ link_env_remove: cvs![],
+ archive_format: "gnu".into(),
+ main_needs_argc_argv: true,
+ allow_asm: true,
+ has_thread_local: false,
+ obj_is_bitcode: false,
+ forces_embed_bitcode: false,
+ bitcode_llvm_cmdline: "".into(),
+ min_atomic_width: None,
+ max_atomic_width: None,
+ atomic_cas: true,
+ panic_strategy: PanicStrategy::Unwind,
+ crt_static_allows_dylibs: false,
+ crt_static_default: false,
+ crt_static_respected: false,
+ stack_probes: StackProbeType::None,
+ min_global_align: None,
+ default_codegen_units: None,
+ trap_unreachable: true,
+ requires_lto: false,
+ singlethread: false,
+ no_builtins: false,
+ default_hidden_visibility: false,
+ emit_debug_gdb_scripts: true,
+ requires_uwtable: false,
+ default_uwtable: false,
+ simd_types_indirect: true,
+ limit_rdylib_exports: true,
+ override_export_symbols: None,
+ merge_functions: MergeFunctions::Aliases,
+ mcount: "mcount".into(),
+ llvm_abiname: "".into(),
+ relax_elf_relocations: false,
+ llvm_args: cvs![],
+ use_ctors_section: false,
+ eh_frame_header: true,
+ has_thumb_interworking: false,
+ split_debuginfo: SplitDebuginfo::Off,
+ supported_sanitizers: SanitizerSet::empty(),
+ default_adjusted_cabi: None,
+ c_enum_min_bits: 32,
+ generate_arange_section: true,
+ supports_stack_protector: true,
+ }
+ }
+}
+
+/// `TargetOptions` being a separate type is basically an implementation detail of `Target` that is
+/// used for providing defaults. Perhaps there's a way to merge `TargetOptions` into `Target` so
+/// this `Deref` implementation is no longer necessary.
+impl Deref for Target {
+ type Target = TargetOptions;
+
+ #[inline]
+ fn deref(&self) -> &Self::Target {
+ &self.options
+ }
+}
+impl DerefMut for Target {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.options
+ }
+}
+
+impl Target {
+ /// Given a function ABI, turn it into the correct ABI for this target.
+ pub fn adjust_abi(&self, abi: Abi) -> Abi {
+ match abi {
+ Abi::C { .. } => self.default_adjusted_cabi.unwrap_or(abi),
+ Abi::System { unwind } if self.is_like_windows && self.arch == "x86" => {
+ Abi::Stdcall { unwind }
+ }
+ Abi::System { unwind } => Abi::C { unwind },
+ Abi::EfiApi if self.arch == "x86_64" => Abi::Win64 { unwind: false },
+ Abi::EfiApi => Abi::C { unwind: false },
+
+ // See commentary in `is_abi_supported`.
+ Abi::Stdcall { .. } | Abi::Thiscall { .. } if self.arch == "x86" => abi,
+ Abi::Stdcall { unwind } | Abi::Thiscall { unwind } => Abi::C { unwind },
+ Abi::Fastcall { .. } if self.arch == "x86" => abi,
+ Abi::Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi,
+ Abi::Fastcall { unwind } | Abi::Vectorcall { unwind } => Abi::C { unwind },
+
+ abi => abi,
+ }
+ }
+
+ /// Returns a None if the UNSUPPORTED_CALLING_CONVENTIONS lint should be emitted
+ pub fn is_abi_supported(&self, abi: Abi) -> Option<bool> {
+ use Abi::*;
+ Some(match abi {
+ Rust
+ | C { .. }
+ | System { .. }
+ | RustIntrinsic
+ | RustCall
+ | PlatformIntrinsic
+ | Unadjusted
+ | Cdecl { .. }
+ | EfiApi
+ | RustCold => true,
+ X86Interrupt => ["x86", "x86_64"].contains(&&self.arch[..]),
+ Aapcs { .. } => "arm" == self.arch,
+ CCmseNonSecureCall => ["arm", "aarch64"].contains(&&self.arch[..]),
+ Win64 { .. } | SysV64 { .. } => self.arch == "x86_64",
+ PtxKernel => self.arch == "nvptx64",
+ Msp430Interrupt => self.arch == "msp430",
+ AmdGpuKernel => self.arch == "amdgcn",
+ AvrInterrupt | AvrNonBlockingInterrupt => self.arch == "avr",
+ Wasm => ["wasm32", "wasm64"].contains(&&self.arch[..]),
+ Thiscall { .. } => self.arch == "x86",
+ // On windows these fall-back to platform native calling convention (C) when the
+ // architecture is not supported.
+ //
+ // This is I believe a historical accident that has occurred as part of Microsoft
+ // striving to allow most of the code to "just" compile when support for 64-bit x86
+ // was added and then later again, when support for ARM architectures was added.
+ //
+ // This is well documented across MSDN. Support for this in Rust has been added in
+ // #54576. This makes much more sense in context of Microsoft's C++ than it does in
+ // Rust, but there isn't much leeway remaining here to change it back at the time this
+ // comment has been written.
+ //
+ // Following are the relevant excerpts from the MSDN documentation.
+ //
+ // > The __vectorcall calling convention is only supported in native code on x86 and
+ // x64 processors that include Streaming SIMD Extensions 2 (SSE2) and above.
+ // > ...
+ // > On ARM machines, __vectorcall is accepted and ignored by the compiler.
+ //
+ // -- https://docs.microsoft.com/en-us/cpp/cpp/vectorcall?view=msvc-160
+ //
+ // > On ARM and x64 processors, __stdcall is accepted and ignored by the compiler;
+ //
+ // -- https://docs.microsoft.com/en-us/cpp/cpp/stdcall?view=msvc-160
+ //
+ // > In most cases, keywords or compiler switches that specify an unsupported
+ // > convention on a particular platform are ignored, and the platform default
+ // > convention is used.
+ //
+ // -- https://docs.microsoft.com/en-us/cpp/cpp/argument-passing-and-naming-conventions
+ Stdcall { .. } | Fastcall { .. } | Vectorcall { .. } if self.is_like_windows => true,
+ // Outside of Windows we want to only support these calling conventions for the
+ // architectures for which these calling conventions are actually well defined.
+ Stdcall { .. } | Fastcall { .. } if self.arch == "x86" => true,
+ Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => true,
+ // Return a `None` for other cases so that we know to emit a future compat lint.
+ Stdcall { .. } | Fastcall { .. } | Vectorcall { .. } => return None,
+ })
+ }
+
+ /// Minimum integer size in bits that this target can perform atomic
+ /// operations on.
+ pub fn min_atomic_width(&self) -> u64 {
+ self.min_atomic_width.unwrap_or(8)
+ }
+
+ /// Maximum integer size in bits that this target can perform atomic
+ /// operations on.
+ pub fn max_atomic_width(&self) -> u64 {
+ self.max_atomic_width.unwrap_or_else(|| self.pointer_width.into())
+ }
+
+ /// Loads a target descriptor from a JSON object.
+ pub fn from_json(obj: Json) -> Result<(Target, TargetWarnings), String> {
+ // While ugly, this code must remain this way to retain
+ // compatibility with existing JSON fields and the internal
+ // expected naming of the Target and TargetOptions structs.
+ // To ensure compatibility is retained, the built-in targets
+ // are round-tripped through this code to catch cases where
+ // the JSON parser is not updated to match the structs.
+
+ let mut obj = match obj {
+ Value::Object(obj) => obj,
+ _ => return Err("Expected JSON object for target")?,
+ };
+
+ let mut get_req_field = |name: &str| {
+ obj.remove(name)
+ .and_then(|j| j.as_str().map(str::to_string))
+ .ok_or_else(|| format!("Field {} in target specification is required", name))
+ };
+
+ let mut base = Target {
+ llvm_target: get_req_field("llvm-target")?.into(),
+ pointer_width: get_req_field("target-pointer-width")?
+ .parse::<u32>()
+ .map_err(|_| "target-pointer-width must be an integer".to_string())?,
+ data_layout: get_req_field("data-layout")?.into(),
+ arch: get_req_field("arch")?.into(),
+ options: Default::default(),
+ };
+
+ let mut incorrect_type = vec![];
+
+ macro_rules! key {
+ ($key_name:ident) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(s) = obj.remove(&name).and_then(|s| s.as_str().map(str::to_string).map(Cow::from)) {
+ base.$key_name = s;
+ }
+ } );
+ ($key_name:ident = $json_name:expr) => ( {
+ let name = $json_name;
+ if let Some(s) = obj.remove(name).and_then(|s| s.as_str().map(str::to_string).map(Cow::from)) {
+ base.$key_name = s;
+ }
+ } );
+ ($key_name:ident, bool) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(s) = obj.remove(&name).and_then(|b| b.as_bool()) {
+ base.$key_name = s;
+ }
+ } );
+ ($key_name:ident, u64) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(s) = obj.remove(&name).and_then(|j| Json::as_u64(&j)) {
+ base.$key_name = s;
+ }
+ } );
+ ($key_name:ident, u32) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(s) = obj.remove(&name).and_then(|b| b.as_u64()) {
+ if s < 1 || s > 5 {
+ return Err("Not a valid DWARF version number".into());
+ }
+ base.$key_name = s as u32;
+ }
+ } );
+ ($key_name:ident, Option<u64>) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(s) = obj.remove(&name).and_then(|b| b.as_u64()) {
+ base.$key_name = Some(s);
+ }
+ } );
+ ($key_name:ident, MergeFunctions) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<MergeFunctions>() {
+ Ok(mergefunc) => base.$key_name = mergefunc,
+ _ => return Some(Err(format!("'{}' is not a valid value for \
+ merge-functions. Use 'disabled', \
+ 'trampolines', or 'aliases'.",
+ s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, RelocModel) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<RelocModel>() {
+ Ok(relocation_model) => base.$key_name = relocation_model,
+ _ => return Some(Err(format!("'{}' is not a valid relocation model. \
+ Run `rustc --print relocation-models` to \
+ see the list of supported values.", s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, CodeModel) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<CodeModel>() {
+ Ok(code_model) => base.$key_name = Some(code_model),
+ _ => return Some(Err(format!("'{}' is not a valid code model. \
+ Run `rustc --print code-models` to \
+ see the list of supported values.", s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, TlsModel) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<TlsModel>() {
+ Ok(tls_model) => base.$key_name = tls_model,
+ _ => return Some(Err(format!("'{}' is not a valid TLS model. \
+ Run `rustc --print tls-models` to \
+ see the list of supported values.", s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, PanicStrategy) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s {
+ "unwind" => base.$key_name = PanicStrategy::Unwind,
+ "abort" => base.$key_name = PanicStrategy::Abort,
+ _ => return Some(Err(format!("'{}' is not a valid value for \
+ panic-strategy. Use 'unwind' or 'abort'.",
+ s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, RelroLevel) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<RelroLevel>() {
+ Ok(level) => base.$key_name = level,
+ _ => return Some(Err(format!("'{}' is not a valid value for \
+ relro-level. Use 'full', 'partial, or 'off'.",
+ s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, SplitDebuginfo) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<SplitDebuginfo>() {
+ Ok(level) => base.$key_name = level,
+ _ => return Some(Err(format!("'{}' is not a valid value for \
+ split-debuginfo. Use 'off' or 'dsymutil'.",
+ s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, list) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(j) = obj.remove(&name) {
+ if let Some(v) = j.as_array() {
+ base.$key_name = v.iter()
+ .map(|a| a.as_str().unwrap().to_string().into())
+ .collect();
+ } else {
+ incorrect_type.push(name)
+ }
+ }
+ } );
+ ($key_name:ident, opt_list) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(j) = obj.remove(&name) {
+ if let Some(v) = j.as_array() {
+ base.$key_name = Some(v.iter()
+ .map(|a| a.as_str().unwrap().to_string().into())
+ .collect());
+ } else {
+ incorrect_type.push(name)
+ }
+ }
+ } );
+ ($key_name:ident, optional) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(o) = obj.remove(&name) {
+ base.$key_name = o
+ .as_str()
+ .map(|s| s.to_string().into());
+ }
+ } );
+ ($key_name:ident, LldFlavor) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ if let Some(flavor) = LldFlavor::from_str(&s) {
+ base.$key_name = flavor;
+ } else {
+ return Some(Err(format!(
+ "'{}' is not a valid value for lld-flavor. \
+ Use 'darwin', 'gnu', 'link' or 'wasm.",
+ s)))
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, LinkerFlavor) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match LinkerFlavor::from_str(s) {
+ Some(linker_flavor) => base.$key_name = linker_flavor,
+ _ => return Some(Err(format!("'{}' is not a valid value for linker-flavor. \
+ Use {}", s, LinkerFlavor::one_of()))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, StackProbeType) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| match StackProbeType::from_json(&o) {
+ Ok(v) => {
+ base.$key_name = v;
+ Some(Ok(()))
+ },
+ Err(s) => Some(Err(
+ format!("`{:?}` is not a valid value for `{}`: {}", o, name, s)
+ )),
+ }).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, SanitizerSet) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(o) = obj.remove(&name) {
+ if let Some(a) = o.as_array() {
+ for s in a {
+ base.$key_name |= match s.as_str() {
+ Some("address") => SanitizerSet::ADDRESS,
+ Some("cfi") => SanitizerSet::CFI,
+ Some("leak") => SanitizerSet::LEAK,
+ Some("memory") => SanitizerSet::MEMORY,
+ Some("memtag") => SanitizerSet::MEMTAG,
+ Some("shadow-call-stack") => SanitizerSet::SHADOWCALLSTACK,
+ Some("thread") => SanitizerSet::THREAD,
+ Some("hwaddress") => SanitizerSet::HWADDRESS,
+ Some(s) => return Err(format!("unknown sanitizer {}", s)),
+ _ => return Err(format!("not a string: {:?}", s)),
+ };
+ }
+ } else {
+ incorrect_type.push(name)
+ }
+ }
+ Ok::<(), String>(())
+ } );
+
+ ($key_name:ident, crt_objects_fallback) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match s.parse::<CrtObjectsFallback>() {
+ Ok(fallback) => base.$key_name = Some(fallback),
+ _ => return Some(Err(format!("'{}' is not a valid CRT objects fallback. \
+ Use 'musl', 'mingw' or 'wasm'", s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, link_objects) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(val) = obj.remove(&name) {
+ let obj = val.as_object().ok_or_else(|| format!("{}: expected a \
+ JSON object with fields per CRT object kind.", name))?;
+ let mut args = CrtObjects::new();
+ for (k, v) in obj {
+ let kind = LinkOutputKind::from_str(&k).ok_or_else(|| {
+ format!("{}: '{}' is not a valid value for CRT object kind. \
+ Use '(dynamic,static)-(nopic,pic)-exe' or \
+ '(dynamic,static)-dylib' or 'wasi-reactor-exe'", name, k)
+ })?;
+
+ let v = v.as_array().ok_or_else(||
+ format!("{}.{}: expected a JSON array", name, k)
+ )?.iter().enumerate()
+ .map(|(i,s)| {
+ let s = s.as_str().ok_or_else(||
+ format!("{}.{}[{}]: expected a JSON string", name, k, i))?;
+ Ok(s.to_string().into())
+ })
+ .collect::<Result<Vec<_>, String>>()?;
+
+ args.insert(kind, v);
+ }
+ base.$key_name = args;
+ }
+ } );
+ ($key_name:ident, link_args) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(val) = obj.remove(&name) {
+ let obj = val.as_object().ok_or_else(|| format!("{}: expected a \
+ JSON object with fields per linker-flavor.", name))?;
+ let mut args = LinkArgs::new();
+ for (k, v) in obj {
+ let flavor = LinkerFlavor::from_str(&k).ok_or_else(|| {
+ format!("{}: '{}' is not a valid value for linker-flavor. \
+ Use 'em', 'gcc', 'ld' or 'msvc'", name, k)
+ })?;
+
+ let v = v.as_array().ok_or_else(||
+ format!("{}.{}: expected a JSON array", name, k)
+ )?.iter().enumerate()
+ .map(|(i,s)| {
+ let s = s.as_str().ok_or_else(||
+ format!("{}.{}[{}]: expected a JSON string", name, k, i))?;
+ Ok(s.to_string().into())
+ })
+ .collect::<Result<Vec<_>, String>>()?;
+
+ args.insert(flavor, v);
+ }
+ base.$key_name = args;
+ }
+ } );
+ ($key_name:ident, env) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ if let Some(o) = obj.remove(&name) {
+ if let Some(a) = o.as_array() {
+ for o in a {
+ if let Some(s) = o.as_str() {
+ let p = s.split('=').collect::<Vec<_>>();
+ if p.len() == 2 {
+ let k = p[0].to_string();
+ let v = p[1].to_string();
+ base.$key_name.to_mut().push((k.into(), v.into()));
+ }
+ }
+ }
+ } else {
+ incorrect_type.push(name)
+ }
+ }
+ } );
+ ($key_name:ident, Option<Abi>) => ( {
+ let name = (stringify!($key_name)).replace("_", "-");
+ obj.remove(&name).and_then(|o| o.as_str().and_then(|s| {
+ match lookup_abi(s) {
+ Some(abi) => base.$key_name = Some(abi),
+ _ => return Some(Err(format!("'{}' is not a valid value for abi", s))),
+ }
+ Some(Ok(()))
+ })).unwrap_or(Ok(()))
+ } );
+ ($key_name:ident, TargetFamilies) => ( {
+ if let Some(value) = obj.remove("target-family") {
+ if let Some(v) = value.as_array() {
+ base.$key_name = v.iter()
+ .map(|a| a.as_str().unwrap().to_string().into())
+ .collect();
+ } else if let Some(v) = value.as_str() {
+ base.$key_name = vec![v.to_string().into()].into();
+ }
+ }
+ } );
+ }
+
+ if let Some(j) = obj.remove("target-endian") {
+ if let Some(s) = j.as_str() {
+ base.endian = s.parse()?;
+ } else {
+ incorrect_type.push("target-endian".into())
+ }
+ }
+
+ if let Some(fp) = obj.remove("frame-pointer") {
+ if let Some(s) = fp.as_str() {
+ base.frame_pointer = s
+ .parse()
+ .map_err(|()| format!("'{}' is not a valid value for frame-pointer", s))?;
+ } else {
+ incorrect_type.push("frame-pointer".into())
+ }
+ }
+
+ key!(is_builtin, bool);
+ key!(c_int_width = "target-c-int-width");
+ key!(os);
+ key!(env);
+ key!(abi);
+ key!(vendor);
+ key!(linker_flavor, LinkerFlavor)?;
+ key!(linker, optional);
+ key!(lld_flavor, LldFlavor)?;
+ key!(pre_link_objects, link_objects);
+ key!(post_link_objects, link_objects);
+ key!(pre_link_objects_fallback, link_objects);
+ key!(post_link_objects_fallback, link_objects);
+ key!(crt_objects_fallback, crt_objects_fallback)?;
+ key!(pre_link_args, link_args);
+ key!(late_link_args, link_args);
+ key!(late_link_args_dynamic, link_args);
+ key!(late_link_args_static, link_args);
+ key!(post_link_args, link_args);
+ key!(link_script, optional);
+ key!(link_env, env);
+ key!(link_env_remove, list);
+ key!(asm_args, list);
+ key!(cpu);
+ key!(features);
+ key!(dynamic_linking, bool);
+ key!(only_cdylib, bool);
+ key!(executables, bool);
+ key!(relocation_model, RelocModel)?;
+ key!(code_model, CodeModel)?;
+ key!(tls_model, TlsModel)?;
+ key!(disable_redzone, bool);
+ key!(function_sections, bool);
+ key!(dll_prefix);
+ key!(dll_suffix);
+ key!(exe_suffix);
+ key!(staticlib_prefix);
+ key!(staticlib_suffix);
+ key!(families, TargetFamilies);
+ key!(abi_return_struct_as_int, bool);
+ key!(is_like_osx, bool);
+ key!(is_like_solaris, bool);
+ key!(is_like_windows, bool);
+ key!(is_like_msvc, bool);
+ key!(is_like_wasm, bool);
+ key!(default_dwarf_version, u32);
+ key!(linker_is_gnu, bool);
+ key!(allows_weak_linkage, bool);
+ key!(has_rpath, bool);
+ key!(no_default_libraries, bool);
+ key!(position_independent_executables, bool);
+ key!(static_position_independent_executables, bool);
+ key!(needs_plt, bool);
+ key!(relro_level, RelroLevel)?;
+ key!(archive_format);
+ key!(allow_asm, bool);
+ key!(main_needs_argc_argv, bool);
+ key!(has_thread_local, bool);
+ key!(obj_is_bitcode, bool);
+ key!(forces_embed_bitcode, bool);
+ key!(bitcode_llvm_cmdline);
+ key!(max_atomic_width, Option<u64>);
+ key!(min_atomic_width, Option<u64>);
+ key!(atomic_cas, bool);
+ key!(panic_strategy, PanicStrategy)?;
+ key!(crt_static_allows_dylibs, bool);
+ key!(crt_static_default, bool);
+ key!(crt_static_respected, bool);
+ key!(stack_probes, StackProbeType)?;
+ key!(min_global_align, Option<u64>);
+ key!(default_codegen_units, Option<u64>);
+ key!(trap_unreachable, bool);
+ key!(requires_lto, bool);
+ key!(singlethread, bool);
+ key!(no_builtins, bool);
+ key!(default_hidden_visibility, bool);
+ key!(emit_debug_gdb_scripts, bool);
+ key!(requires_uwtable, bool);
+ key!(default_uwtable, bool);
+ key!(simd_types_indirect, bool);
+ key!(limit_rdylib_exports, bool);
+ key!(override_export_symbols, opt_list);
+ key!(merge_functions, MergeFunctions)?;
+ key!(mcount = "target-mcount");
+ key!(llvm_abiname);
+ key!(relax_elf_relocations, bool);
+ key!(llvm_args, list);
+ key!(use_ctors_section, bool);
+ key!(eh_frame_header, bool);
+ key!(has_thumb_interworking, bool);
+ key!(split_debuginfo, SplitDebuginfo)?;
+ key!(supported_sanitizers, SanitizerSet)?;
+ key!(default_adjusted_cabi, Option<Abi>)?;
+ key!(c_enum_min_bits, u64);
+ key!(generate_arange_section, bool);
+ key!(supports_stack_protector, bool);
+
+ if base.is_builtin {
+ // This can cause unfortunate ICEs later down the line.
+ return Err("may not set is_builtin for targets not built-in".into());
+ }
+ // Each field should have been read using `Json::remove` so any keys remaining are unused.
+ let remaining_keys = obj.keys();
+ Ok((
+ base,
+ TargetWarnings { unused_fields: remaining_keys.cloned().collect(), incorrect_type },
+ ))
+ }
+
+ /// Load a built-in target
+ pub fn expect_builtin(target_triple: &TargetTriple) -> Target {
+ match *target_triple {
+ TargetTriple::TargetTriple(ref target_triple) => {
+ load_builtin(target_triple).expect("built-in target")
+ }
+ TargetTriple::TargetJson { .. } => {
+ panic!("built-in targets doens't support target-paths")
+ }
+ }
+ }
+
+ /// Search for a JSON file specifying the given target triple.
+ ///
+ /// If none is found in `$RUST_TARGET_PATH`, look for a file called `target.json` inside the
+ /// sysroot under the target-triple's `rustlib` directory. Note that it could also just be a
+ /// bare filename already, so also check for that. If one of the hardcoded targets we know
+ /// about, just return it directly.
+ ///
+ /// The error string could come from any of the APIs called, including filesystem access and
+ /// JSON decoding.
+ pub fn search(
+ target_triple: &TargetTriple,
+ sysroot: &Path,
+ ) -> Result<(Target, TargetWarnings), String> {
+ use std::env;
+ use std::fs;
+
+ fn load_file(path: &Path) -> Result<(Target, TargetWarnings), String> {
+ let contents = fs::read_to_string(path).map_err(|e| e.to_string())?;
+ let obj = serde_json::from_str(&contents).map_err(|e| e.to_string())?;
+ Target::from_json(obj)
+ }
+
+ match *target_triple {
+ TargetTriple::TargetTriple(ref target_triple) => {
+ // check if triple is in list of built-in targets
+ if let Some(t) = load_builtin(target_triple) {
+ return Ok((t, TargetWarnings::empty()));
+ }
+
+ // search for a file named `target_triple`.json in RUST_TARGET_PATH
+ let path = {
+ let mut target = target_triple.to_string();
+ target.push_str(".json");
+ PathBuf::from(target)
+ };
+
+ let target_path = env::var_os("RUST_TARGET_PATH").unwrap_or_default();
+
+ for dir in env::split_paths(&target_path) {
+ let p = dir.join(&path);
+ if p.is_file() {
+ return load_file(&p);
+ }
+ }
+
+ // Additionally look in the sysroot under `lib/rustlib/<triple>/target.json`
+ // as a fallback.
+ let rustlib_path = crate::target_rustlib_path(&sysroot, &target_triple);
+ let p = PathBuf::from_iter([
+ Path::new(sysroot),
+ Path::new(&rustlib_path),
+ Path::new("target.json"),
+ ]);
+ if p.is_file() {
+ return load_file(&p);
+ }
+
+ Err(format!("Could not find specification for target {:?}", target_triple))
+ }
+ TargetTriple::TargetJson { ref contents, .. } => {
+ let obj = serde_json::from_str(contents).map_err(|e| e.to_string())?;
+ Target::from_json(obj)
+ }
+ }
+ }
+}
+
+impl ToJson for Target {
+ fn to_json(&self) -> Json {
+ let mut d = serde_json::Map::new();
+ let default: TargetOptions = Default::default();
+
+ macro_rules! target_val {
+ ($attr:ident) => {{
+ let name = (stringify!($attr)).replace("_", "-");
+ d.insert(name, self.$attr.to_json());
+ }};
+ }
+
+ macro_rules! target_option_val {
+ ($attr:ident) => {{
+ let name = (stringify!($attr)).replace("_", "-");
+ if default.$attr != self.$attr {
+ d.insert(name, self.$attr.to_json());
+ }
+ }};
+ ($attr:ident, $key_name:expr) => {{
+ let name = $key_name;
+ if default.$attr != self.$attr {
+ d.insert(name.into(), self.$attr.to_json());
+ }
+ }};
+ (link_args - $attr:ident) => {{
+ let name = (stringify!($attr)).replace("_", "-");
+ if default.$attr != self.$attr {
+ let obj = self
+ .$attr
+ .iter()
+ .map(|(k, v)| (k.desc().to_string(), v.clone()))
+ .collect::<BTreeMap<_, _>>();
+ d.insert(name, obj.to_json());
+ }
+ }};
+ (env - $attr:ident) => {{
+ let name = (stringify!($attr)).replace("_", "-");
+ if default.$attr != self.$attr {
+ let obj = self
+ .$attr
+ .iter()
+ .map(|&(ref k, ref v)| format!("{k}={v}"))
+ .collect::<Vec<_>>();
+ d.insert(name, obj.to_json());
+ }
+ }};
+ }
+
+ target_val!(llvm_target);
+ d.insert("target-pointer-width".to_string(), self.pointer_width.to_string().to_json());
+ target_val!(arch);
+ target_val!(data_layout);
+
+ target_option_val!(is_builtin);
+ target_option_val!(endian, "target-endian");
+ target_option_val!(c_int_width, "target-c-int-width");
+ target_option_val!(os);
+ target_option_val!(env);
+ target_option_val!(abi);
+ target_option_val!(vendor);
+ target_option_val!(linker_flavor);
+ target_option_val!(linker);
+ target_option_val!(lld_flavor);
+ target_option_val!(pre_link_objects);
+ target_option_val!(post_link_objects);
+ target_option_val!(pre_link_objects_fallback);
+ target_option_val!(post_link_objects_fallback);
+ target_option_val!(crt_objects_fallback);
+ target_option_val!(link_args - pre_link_args);
+ target_option_val!(link_args - late_link_args);
+ target_option_val!(link_args - late_link_args_dynamic);
+ target_option_val!(link_args - late_link_args_static);
+ target_option_val!(link_args - post_link_args);
+ target_option_val!(link_script);
+ target_option_val!(env - link_env);
+ target_option_val!(link_env_remove);
+ target_option_val!(asm_args);
+ target_option_val!(cpu);
+ target_option_val!(features);
+ target_option_val!(dynamic_linking);
+ target_option_val!(only_cdylib);
+ target_option_val!(executables);
+ target_option_val!(relocation_model);
+ target_option_val!(code_model);
+ target_option_val!(tls_model);
+ target_option_val!(disable_redzone);
+ target_option_val!(frame_pointer);
+ target_option_val!(function_sections);
+ target_option_val!(dll_prefix);
+ target_option_val!(dll_suffix);
+ target_option_val!(exe_suffix);
+ target_option_val!(staticlib_prefix);
+ target_option_val!(staticlib_suffix);
+ target_option_val!(families, "target-family");
+ target_option_val!(abi_return_struct_as_int);
+ target_option_val!(is_like_osx);
+ target_option_val!(is_like_solaris);
+ target_option_val!(is_like_windows);
+ target_option_val!(is_like_msvc);
+ target_option_val!(is_like_wasm);
+ target_option_val!(default_dwarf_version);
+ target_option_val!(linker_is_gnu);
+ target_option_val!(allows_weak_linkage);
+ target_option_val!(has_rpath);
+ target_option_val!(no_default_libraries);
+ target_option_val!(position_independent_executables);
+ target_option_val!(static_position_independent_executables);
+ target_option_val!(needs_plt);
+ target_option_val!(relro_level);
+ target_option_val!(archive_format);
+ target_option_val!(allow_asm);
+ target_option_val!(main_needs_argc_argv);
+ target_option_val!(has_thread_local);
+ target_option_val!(obj_is_bitcode);
+ target_option_val!(forces_embed_bitcode);
+ target_option_val!(bitcode_llvm_cmdline);
+ target_option_val!(min_atomic_width);
+ target_option_val!(max_atomic_width);
+ target_option_val!(atomic_cas);
+ target_option_val!(panic_strategy);
+ target_option_val!(crt_static_allows_dylibs);
+ target_option_val!(crt_static_default);
+ target_option_val!(crt_static_respected);
+ target_option_val!(stack_probes);
+ target_option_val!(min_global_align);
+ target_option_val!(default_codegen_units);
+ target_option_val!(trap_unreachable);
+ target_option_val!(requires_lto);
+ target_option_val!(singlethread);
+ target_option_val!(no_builtins);
+ target_option_val!(default_hidden_visibility);
+ target_option_val!(emit_debug_gdb_scripts);
+ target_option_val!(requires_uwtable);
+ target_option_val!(default_uwtable);
+ target_option_val!(simd_types_indirect);
+ target_option_val!(limit_rdylib_exports);
+ target_option_val!(override_export_symbols);
+ target_option_val!(merge_functions);
+ target_option_val!(mcount, "target-mcount");
+ target_option_val!(llvm_abiname);
+ target_option_val!(relax_elf_relocations);
+ target_option_val!(llvm_args);
+ target_option_val!(use_ctors_section);
+ target_option_val!(eh_frame_header);
+ target_option_val!(has_thumb_interworking);
+ target_option_val!(split_debuginfo);
+ target_option_val!(supported_sanitizers);
+ target_option_val!(c_enum_min_bits);
+ target_option_val!(generate_arange_section);
+ target_option_val!(supports_stack_protector);
+
+ if let Some(abi) = self.default_adjusted_cabi {
+ d.insert("default-adjusted-cabi".into(), Abi::name(abi).to_json());
+ }
+
+ Json::Object(d)
+ }
+}
+
+/// Either a target triple string or a path to a JSON file.
+#[derive(Clone, Debug)]
+pub enum TargetTriple {
+ TargetTriple(String),
+ TargetJson {
+ /// Warning: This field may only be used by rustdoc. Using it anywhere else will lead to
+ /// inconsistencies as it is discarded during serialization.
+ path_for_rustdoc: PathBuf,
+ triple: String,
+ contents: String,
+ },
+}
+
+// Use a manual implementation to ignore the path field
+impl PartialEq for TargetTriple {
+ fn eq(&self, other: &Self) -> bool {
+ match (self, other) {
+ (Self::TargetTriple(l0), Self::TargetTriple(r0)) => l0 == r0,
+ (
+ Self::TargetJson { path_for_rustdoc: _, triple: l_triple, contents: l_contents },
+ Self::TargetJson { path_for_rustdoc: _, triple: r_triple, contents: r_contents },
+ ) => l_triple == r_triple && l_contents == r_contents,
+ _ => false,
+ }
+ }
+}
+
+// Use a manual implementation to ignore the path field
+impl Hash for TargetTriple {
+ fn hash<H: Hasher>(&self, state: &mut H) -> () {
+ match self {
+ TargetTriple::TargetTriple(triple) => {
+ 0u8.hash(state);
+ triple.hash(state)
+ }
+ TargetTriple::TargetJson { path_for_rustdoc: _, triple, contents } => {
+ 1u8.hash(state);
+ triple.hash(state);
+ contents.hash(state)
+ }
+ }
+ }
+}
+
+// Use a manual implementation to prevent encoding the target json file path in the crate metadata
+impl<S: Encoder> Encodable<S> for TargetTriple {
+ fn encode(&self, s: &mut S) {
+ match self {
+ TargetTriple::TargetTriple(triple) => s.emit_enum_variant(0, |s| s.emit_str(triple)),
+ TargetTriple::TargetJson { path_for_rustdoc: _, triple, contents } => s
+ .emit_enum_variant(1, |s| {
+ s.emit_str(triple);
+ s.emit_str(contents)
+ }),
+ }
+ }
+}
+
+impl<D: Decoder> Decodable<D> for TargetTriple {
+ fn decode(d: &mut D) -> Self {
+ match d.read_usize() {
+ 0 => TargetTriple::TargetTriple(d.read_str().to_owned()),
+ 1 => TargetTriple::TargetJson {
+ path_for_rustdoc: PathBuf::new(),
+ triple: d.read_str().to_owned(),
+ contents: d.read_str().to_owned(),
+ },
+ _ => {
+ panic!("invalid enum variant tag while decoding `TargetTriple`, expected 0..2");
+ }
+ }
+ }
+}
+
+impl TargetTriple {
+ /// Creates a target triple from the passed target triple string.
+ pub fn from_triple(triple: &str) -> Self {
+ TargetTriple::TargetTriple(triple.into())
+ }
+
+ /// Creates a target triple from the passed target path.
+ pub fn from_path(path: &Path) -> Result<Self, io::Error> {
+ let canonicalized_path = path.canonicalize()?;
+ let contents = std::fs::read_to_string(&canonicalized_path).map_err(|err| {
+ io::Error::new(
+ io::ErrorKind::InvalidInput,
+ format!("target path {:?} is not a valid file: {}", canonicalized_path, err),
+ )
+ })?;
+ let triple = canonicalized_path
+ .file_stem()
+ .expect("target path must not be empty")
+ .to_str()
+ .expect("target path must be valid unicode")
+ .to_owned();
+ Ok(TargetTriple::TargetJson { path_for_rustdoc: canonicalized_path, triple, contents })
+ }
+
+ /// Returns a string triple for this target.
+ ///
+ /// If this target is a path, the file name (without extension) is returned.
+ pub fn triple(&self) -> &str {
+ match *self {
+ TargetTriple::TargetTriple(ref triple)
+ | TargetTriple::TargetJson { ref triple, .. } => triple,
+ }
+ }
+
+ /// Returns an extended string triple for this target.
+ ///
+ /// If this target is a path, a hash of the path is appended to the triple returned
+ /// by `triple()`.
+ pub fn debug_triple(&self) -> String {
+ use std::collections::hash_map::DefaultHasher;
+
+ match self {
+ TargetTriple::TargetTriple(triple) => triple.to_owned(),
+ TargetTriple::TargetJson { path_for_rustdoc: _, triple, contents: content } => {
+ let mut hasher = DefaultHasher::new();
+ content.hash(&mut hasher);
+ let hash = hasher.finish();
+ format!("{}-{}", triple, hash)
+ }
+ }
+ }
+}
+
+impl fmt::Display for TargetTriple {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.debug_triple())
+ }
+}
diff --git a/compiler/rustc_target/src/spec/msp430_none_elf.rs b/compiler/rustc_target/src/spec/msp430_none_elf.rs
new file mode 100644
index 000000000..6b09386ae
--- /dev/null
+++ b/compiler/rustc_target/src/spec/msp430_none_elf.rs
@@ -0,0 +1,59 @@
+use crate::spec::{cvs, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "msp430-none-elf".into(),
+ pointer_width: 16,
+ data_layout: "e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16".into(),
+ arch: "msp430".into(),
+
+ options: TargetOptions {
+ c_int_width: "16".into(),
+
+ // The LLVM backend currently can't generate object files. To
+ // workaround this LLVM generates assembly files which then we feed
+ // to gcc to get object files. For this reason we have a hard
+ // dependency on this specific gcc.
+ asm_args: cvs!["-mcpu=msp430"],
+ linker: Some("msp430-elf-gcc".into()),
+ linker_is_gnu: false,
+
+ // There are no atomic CAS instructions available in the MSP430
+ // instruction set, and the LLVM backend doesn't currently support
+ // compiler fences so the Atomic* API is missing on this target.
+ // When the LLVM backend gains support for compile fences uncomment
+ // the `singlethread: true` line and set `max_atomic_width` to
+ // `Some(16)`.
+ max_atomic_width: Some(0),
+ atomic_cas: false,
+ // singlethread: true,
+
+ // Because these devices have very little resources having an
+ // unwinder is too onerous so we default to "abort" because the
+ // "unwind" strategy is very rare.
+ panic_strategy: PanicStrategy::Abort,
+
+ // Similarly, one almost always never wants to use relocatable
+ // code because of the extra costs it involves.
+ relocation_model: RelocModel::Static,
+
+ // Right now we invoke an external assembler and this isn't
+ // compatible with multiple codegen units, and plus we probably
+ // don't want to invoke that many gcc instances.
+ default_codegen_units: Some(1),
+
+ // Since MSP430 doesn't meaningfully support faulting on illegal
+ // instructions, LLVM generates a call to abort() function instead
+ // of a trap instruction. Such calls are 4 bytes long, and that is
+ // too much overhead for such small target.
+ trap_unreachable: false,
+
+ // See the thumb_base.rs file for an explanation of this value
+ emit_debug_gdb_scripts: false,
+
+ eh_frame_header: false,
+
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/msvc_base.rs b/compiler/rustc_target/src/spec/msvc_base.rs
new file mode 100644
index 000000000..edb30b72b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/msvc_base.rs
@@ -0,0 +1,24 @@
+use crate::spec::{LinkerFlavor, LldFlavor, SplitDebuginfo, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ // Suppress the verbose logo and authorship debugging output, which would needlessly
+ // clog any log files.
+ let pre_link_args = TargetOptions::link_args(LinkerFlavor::Msvc, &["/NOLOGO"]);
+
+ TargetOptions {
+ linker_flavor: LinkerFlavor::Msvc,
+ is_like_windows: true,
+ is_like_msvc: true,
+ lld_flavor: LldFlavor::Link,
+ linker_is_gnu: false,
+ pre_link_args,
+ abi_return_struct_as_int: true,
+ emit_debug_gdb_scripts: false,
+
+ // Currently this is the only supported method of debuginfo on MSVC
+ // where `*.pdb` files show up next to the final artifact.
+ split_debuginfo: SplitDebuginfo::Packed,
+
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/netbsd_base.rs b/compiler/rustc_target/src/spec/netbsd_base.rs
new file mode 100644
index 000000000..be94ea234
--- /dev/null
+++ b/compiler/rustc_target/src/spec/netbsd_base.rs
@@ -0,0 +1,16 @@
+use crate::spec::{cvs, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "netbsd".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ no_default_libraries: false,
+ has_rpath: true,
+ position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ use_ctors_section: true,
+ default_dwarf_version: 2,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
new file mode 100644
index 000000000..1c5b68001
--- /dev/null
+++ b/compiler/rustc_target/src/spec/nvptx64_nvidia_cuda.rs
@@ -0,0 +1,53 @@
+use crate::spec::{LinkerFlavor, MergeFunctions, PanicStrategy, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ arch: "nvptx64".into(),
+ data_layout: "e-i64:64-i128:128-v16:16-v32:32-n16:32:64".into(),
+ llvm_target: "nvptx64-nvidia-cuda".into(),
+ pointer_width: 64,
+
+ options: TargetOptions {
+ os: "cuda".into(),
+ vendor: "nvidia".into(),
+ linker_flavor: LinkerFlavor::PtxLinker,
+ // The linker can be installed from `crates.io`.
+ linker: Some("rust-ptx-linker".into()),
+ linker_is_gnu: false,
+
+ // With `ptx-linker` approach, it can be later overridden via link flags.
+ cpu: "sm_30".into(),
+
+ // FIXME: create tests for the atomics.
+ max_atomic_width: Some(64),
+
+ // Unwinding on CUDA is neither feasible nor useful.
+ panic_strategy: PanicStrategy::Abort,
+
+ // Needed to use `dylib` and `bin` crate types and the linker.
+ dynamic_linking: true,
+
+ // Avoid using dylib because it contain metadata not supported
+ // by LLVM NVPTX backend.
+ only_cdylib: true,
+
+ // Let the `ptx-linker` to handle LLVM lowering into MC / assembly.
+ obj_is_bitcode: true,
+
+ // Convenient and predicable naming scheme.
+ dll_prefix: "".into(),
+ dll_suffix: ".ptx".into(),
+ exe_suffix: ".ptx".into(),
+
+ // Disable MergeFunctions LLVM optimisation pass because it can
+ // produce kernel functions that call other kernel functions.
+ // This behavior is not supported by PTX ISA.
+ merge_functions: MergeFunctions::Disabled,
+
+ // The LLVM backend does not support stack canaries for this target
+ supports_stack_protector: false,
+
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/openbsd_base.rs b/compiler/rustc_target/src/spec/openbsd_base.rs
new file mode 100644
index 000000000..e7db14e05
--- /dev/null
+++ b/compiler/rustc_target/src/spec/openbsd_base.rs
@@ -0,0 +1,16 @@
+use crate::spec::{cvs, FramePointer, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "openbsd".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ has_rpath: true,
+ abi_return_struct_as_int: true,
+ position_independent_executables: true,
+ frame_pointer: FramePointer::Always, // FIXME 43575: should be MayOmit...
+ relro_level: RelroLevel::Full,
+ default_dwarf_version: 2,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
new file mode 100644
index 000000000..803453c4a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_freebsd.rs
@@ -0,0 +1,17 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::freebsd_base::opts();
+ base.cpu = "ppc64".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "powerpc64-unknown-freebsd".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64".into(),
+ arch: "powerpc64".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
new file mode 100644
index 000000000..5413c4f33
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_gnu.rs
@@ -0,0 +1,21 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, RelroLevel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.cpu = "ppc64".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ // ld.so in at least RHEL6 on ppc64 has a bug related to BIND_NOW, so only enable partial RELRO
+ // for now. https://github.com/rust-lang/rust/pull/43170#issuecomment-315411474
+ base.relro_level = RelroLevel::Partial;
+
+ Target {
+ llvm_target: "powerpc64-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ arch: "powerpc64".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
new file mode 100644
index 000000000..159335eb6
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64_unknown_linux_musl.rs
@@ -0,0 +1,17 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "ppc64".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "powerpc64-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ arch: "powerpc64".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
new file mode 100644
index 000000000..b7420d232
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64_wrs_vxworks.rs
@@ -0,0 +1,17 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::vxworks_base::opts();
+ base.cpu = "ppc64".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "powerpc64-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ arch: "powerpc64".into(),
+ options: TargetOptions { endian: Endian::Big, ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs
new file mode 100644
index 000000000..a3d180043
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_freebsd.rs
@@ -0,0 +1,16 @@
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::freebsd_base::opts();
+ base.cpu = "ppc64le".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "powerpc64le-unknown-freebsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i64:64-n32:64".into(),
+ arch: "powerpc64".into(),
+ options: TargetOptions { mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
new file mode 100644
index 000000000..e18ff3be4
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_gnu.rs
@@ -0,0 +1,16 @@
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.cpu = "ppc64le".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "powerpc64le-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ arch: "powerpc64".into(),
+ options: TargetOptions { mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
new file mode 100644
index 000000000..b84943d23
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc64le_unknown_linux_musl.rs
@@ -0,0 +1,16 @@
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "ppc64le".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "powerpc64le-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-i64:64-n32:64-S128-v256:256:256-v512:512:512".into(),
+ arch: "powerpc64".into(),
+ options: TargetOptions { mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs
new file mode 100644
index 000000000..516b2de37
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_freebsd.rs
@@ -0,0 +1,23 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::freebsd_base::opts();
+ // Extra hint to linker that we are generating secure-PLT code.
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "--target=powerpc-unknown-freebsd13.0"]);
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-freebsd13.0".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ arch: "powerpc".into(),
+ options: TargetOptions {
+ endian: Endian::Big,
+ features: "+secure-plt".into(),
+ relocation_model: RelocModel::Pic,
+ mcount: "_mcount".into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
new file mode 100644
index 000000000..6686a0bbf
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnu.rs
@@ -0,0 +1,16 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ arch: "powerpc".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
new file mode 100644
index 000000000..6a250f4b5
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_gnuspe.rs
@@ -0,0 +1,21 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-mspe"]);
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-linux-gnuspe".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ arch: "powerpc".into(),
+ options: TargetOptions {
+ abi: "spe".into(),
+ endian: Endian::Big,
+ mcount: "_mcount".into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
new file mode 100644
index 000000000..34200c679
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_linux_musl.rs
@@ -0,0 +1,16 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-linux-musl".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ arch: "powerpc".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "_mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
new file mode 100644
index 000000000..60661ef9b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_netbsd.rs
@@ -0,0 +1,16 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::netbsd_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32"]);
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-netbsd".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ arch: "powerpc".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "__mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs b/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
new file mode 100644
index 000000000..ad2c3d40f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_unknown_openbsd.rs
@@ -0,0 +1,16 @@
+use crate::abi::Endian;
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::openbsd_base::opts();
+ base.endian = Endian::Big;
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-openbsd".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ arch: "powerpc".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
new file mode 100644
index 000000000..3f24966e0
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks.rs
@@ -0,0 +1,16 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::vxworks_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m32", "--secure-plt"]);
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ arch: "powerpc".into(),
+ options: TargetOptions { endian: Endian::Big, features: "+secure-plt".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
new file mode 100644
index 000000000..0f04f41f9
--- /dev/null
+++ b/compiler/rustc_target/src/spec/powerpc_wrs_vxworks_spe.rs
@@ -0,0 +1,22 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::vxworks_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-mspe", "--secure-plt"]);
+ base.max_atomic_width = Some(32);
+
+ Target {
+ llvm_target: "powerpc-unknown-linux-gnuspe".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-n32".into(),
+ arch: "powerpc".into(),
+ options: TargetOptions {
+ abi: "spe".into(),
+ endian: Endian::Big,
+ // feature msync would disable instruction 'fsync' which is not supported by fsl_p1p2
+ features: "+secure-plt,+msync".into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/redox_base.rs b/compiler/rustc_target/src/spec/redox_base.rs
new file mode 100644
index 000000000..468fe4785
--- /dev/null
+++ b/compiler/rustc_target/src/spec/redox_base.rs
@@ -0,0 +1,17 @@
+use crate::spec::{cvs, RelroLevel, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "redox".into(),
+ env: "relibc".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ has_rpath: true,
+ position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ has_thread_local: true,
+ crt_static_default: true,
+ crt_static_respected: true,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs
new file mode 100644
index 000000000..bffd377bc
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_gnu.rs
@@ -0,0 +1,18 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv32-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
+ arch: "riscv32".into(),
+ options: TargetOptions {
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv32".into(),
+ features: "+m,+a,+f,+d,+c".into(),
+ llvm_abiname: "ilp32d".into(),
+ max_atomic_width: Some(32),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_musl.rs
new file mode 100644
index 000000000..c9f3acffb
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32gc_unknown_linux_musl.rs
@@ -0,0 +1,18 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv32-unknown-linux-musl".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
+ arch: "riscv32".into(),
+ options: TargetOptions {
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv32".into(),
+ features: "+m,+a,+f,+d,+c".into(),
+ llvm_abiname: "ilp32d".into(),
+ max_atomic_width: Some(32),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs
new file mode 100644
index 000000000..232139db6
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32i_unknown_none_elf.rs
@@ -0,0 +1,24 @@
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
+ llvm_target: "riscv32".into(),
+ pointer_width: 32,
+ arch: "riscv32".into(),
+
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ cpu: "generic-rv32".into(),
+ max_atomic_width: Some(0),
+ atomic_cas: false,
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs
new file mode 100644
index 000000000..3e5d2887f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32im_unknown_none_elf.rs
@@ -0,0 +1,25 @@
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
+ llvm_target: "riscv32".into(),
+ pointer_width: 32,
+ arch: "riscv32".into(),
+
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ cpu: "generic-rv32".into(),
+ max_atomic_width: Some(0),
+ atomic_cas: false,
+ features: "+m".into(),
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs
new file mode 100644
index 000000000..99317b9f1
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32imac_unknown_none_elf.rs
@@ -0,0 +1,24 @@
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
+ llvm_target: "riscv32".into(),
+ pointer_width: 32,
+ arch: "riscv32".into(),
+
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ cpu: "generic-rv32".into(),
+ max_atomic_width: Some(32),
+ features: "+m,+a,+c".into(),
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs b/compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs
new file mode 100644
index 000000000..a5de645c9
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32imac_unknown_xous_elf.rs
@@ -0,0 +1,23 @@
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
+ llvm_target: "riscv32".into(),
+ pointer_width: 32,
+ arch: "riscv32".into(),
+
+ options: TargetOptions {
+ os: "xous".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ cpu: "generic-rv32".into(),
+ max_atomic_width: Some(32),
+ features: "+m,+a,+c".into(),
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs b/compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs
new file mode 100644
index 000000000..03baef65c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32imc_esp_espidf.rs
@@ -0,0 +1,36 @@
+use crate::spec::{cvs, Target, TargetOptions};
+use crate::spec::{LinkerFlavor, PanicStrategy, RelocModel};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
+ llvm_target: "riscv32".into(),
+ pointer_width: 32,
+ arch: "riscv32".into(),
+
+ options: TargetOptions {
+ families: cvs!["unix"],
+ os: "espidf".into(),
+ env: "newlib".into(),
+ vendor: "espressif".into(),
+ linker_flavor: LinkerFlavor::Gcc,
+ linker: Some("riscv32-esp-elf-gcc".into()),
+ cpu: "generic-rv32".into(),
+
+ // While the RiscV32IMC architecture does not natively support atomics, ESP-IDF does support
+ // the __atomic* and __sync* GCC builtins, so setting `max_atomic_width` to `Some(64)`
+ // and `atomic_cas` to `true` will cause the compiler to emit libcalls to these builtins.
+ //
+ // Support for atomics is necessary for the Rust STD library, which is supported by the ESP-IDF framework.
+ max_atomic_width: Some(64),
+ atomic_cas: true,
+
+ features: "+m,+c".into(),
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs
new file mode 100644
index 000000000..bf510d204
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv32imc_unknown_none_elf.rs
@@ -0,0 +1,25 @@
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:32:32-i64:64-n32-S128".into(),
+ llvm_target: "riscv32".into(),
+ pointer_width: 32,
+ arch: "riscv32".into(),
+
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ cpu: "generic-rv32".into(),
+ max_atomic_width: Some(0),
+ atomic_cas: false,
+ features: "+m,+c".into(),
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs
new file mode 100644
index 000000000..0539eca6c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_freebsd.rs
@@ -0,0 +1,18 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv64-unknown-freebsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ arch: "riscv64".into(),
+ options: TargetOptions {
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv64".into(),
+ features: "+m,+a,+f,+d,+c".into(),
+ llvm_abiname: "lp64d".into(),
+ max_atomic_width: Some(64),
+ ..super::freebsd_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs
new file mode 100644
index 000000000..7d1bf228c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_gnu.rs
@@ -0,0 +1,18 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv64-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ arch: "riscv64".into(),
+ options: TargetOptions {
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv64".into(),
+ features: "+m,+a,+f,+d,+c".into(),
+ llvm_abiname: "lp64d".into(),
+ max_atomic_width: Some(64),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs
new file mode 100644
index 000000000..f04f8a48b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_linux_musl.rs
@@ -0,0 +1,18 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "riscv64-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ arch: "riscv64".into(),
+ options: TargetOptions {
+ code_model: Some(CodeModel::Medium),
+ cpu: "generic-rv64".into(),
+ features: "+m,+a,+f,+d,+c".into(),
+ llvm_abiname: "lp64d".into(),
+ max_atomic_width: Some(64),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
new file mode 100644
index 000000000..03b3cfd1e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64gc_unknown_none_elf.rs
@@ -0,0 +1,26 @@
+use crate::spec::{CodeModel, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ llvm_target: "riscv64".into(),
+ pointer_width: 64,
+ arch: "riscv64".into(),
+
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ llvm_abiname: "lp64d".into(),
+ cpu: "generic-rv64".into(),
+ max_atomic_width: Some(64),
+ features: "+m,+a,+f,+d,+c".into(),
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ code_model: Some(CodeModel::Medium),
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs b/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
new file mode 100644
index 000000000..2a94c9dd2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/riscv64imac_unknown_none_elf.rs
@@ -0,0 +1,25 @@
+use crate::spec::{CodeModel, Target, TargetOptions};
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+
+pub fn target() -> Target {
+ Target {
+ data_layout: "e-m:e-p:64:64-i64:64-i128:128-n64-S128".into(),
+ llvm_target: "riscv64".into(),
+ pointer_width: 64,
+ arch: "riscv64".into(),
+
+ options: TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ cpu: "generic-rv64".into(),
+ max_atomic_width: Some(64),
+ features: "+m,+a,+c".into(),
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ code_model: Some(CodeModel::Medium),
+ emit_debug_gdb_scripts: false,
+ eh_frame_header: false,
+ ..Default::default()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs
new file mode 100644
index 000000000..8757bbed8
--- /dev/null
+++ b/compiler/rustc_target/src/spec/s390x_unknown_linux_gnu.rs
@@ -0,0 +1,23 @@
+use crate::abi::Endian;
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.endian = Endian::Big;
+ // z10 is the oldest CPU supported by LLVM
+ base.cpu = "z10".into();
+ // FIXME: The data_layout string below and the ABI implementation in
+ // cabi_s390x.rs are for now hard-coded to assume the no-vector ABI.
+ // Pass the -vector feature string to LLVM to respect this assumption.
+ base.features = "-vector".into();
+ base.max_atomic_width = Some(64);
+ base.min_global_align = Some(16);
+
+ Target {
+ llvm_target: "s390x-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".into(),
+ arch: "s390x".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs
new file mode 100644
index 000000000..4c855271a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/s390x_unknown_linux_musl.rs
@@ -0,0 +1,24 @@
+use crate::abi::Endian;
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.endian = Endian::Big;
+ // z10 is the oldest CPU supported by LLVM
+ base.cpu = "z10".into();
+ // FIXME: The data_layout string below and the ABI implementation in
+ // cabi_s390x.rs are for now hard-coded to assume the no-vector ABI.
+ // Pass the -vector feature string to LLVM to respect this assumption.
+ base.features = "-vector".into();
+ base.max_atomic_width = Some(64);
+ base.min_global_align = Some(16);
+ base.static_position_independent_executables = true;
+
+ Target {
+ llvm_target: "s390x-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-a:8:16-n32:64".into(),
+ arch: "s390x".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/solaris_base.rs b/compiler/rustc_target/src/spec/solaris_base.rs
new file mode 100644
index 000000000..b7e8e8cf7
--- /dev/null
+++ b/compiler/rustc_target/src/spec/solaris_base.rs
@@ -0,0 +1,16 @@
+use crate::spec::{cvs, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "solaris".into(),
+ dynamic_linking: true,
+ has_rpath: true,
+ families: cvs!["unix"],
+ is_like_solaris: true,
+ linker_is_gnu: false,
+ limit_rdylib_exports: false, // Linker doesn't support this
+ eh_frame_header: false,
+
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/solid_base.rs b/compiler/rustc_target/src/spec/solid_base.rs
new file mode 100644
index 000000000..c585a6cd5
--- /dev/null
+++ b/compiler/rustc_target/src/spec/solid_base.rs
@@ -0,0 +1,13 @@
+use super::FramePointer;
+use crate::spec::TargetOptions;
+
+pub fn opts(kernel: &str) -> TargetOptions {
+ TargetOptions {
+ os: format!("solid_{}", kernel).into(),
+ vendor: "kmc".into(),
+ executables: false,
+ frame_pointer: FramePointer::NonLeaf,
+ has_thread_local: true,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs
new file mode 100644
index 000000000..39efd8f30
--- /dev/null
+++ b/compiler/rustc_target/src/spec/sparc64_unknown_linux_gnu.rs
@@ -0,0 +1,17 @@
+use crate::abi::Endian;
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.endian = Endian::Big;
+ base.cpu = "v9".into();
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "sparc64-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64-S128".into(),
+ arch: "sparc64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs
new file mode 100644
index 000000000..836ab0e37
--- /dev/null
+++ b/compiler/rustc_target/src/spec/sparc64_unknown_netbsd.rs
@@ -0,0 +1,17 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::netbsd_base::opts();
+ base.cpu = "v9".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "sparc64-unknown-netbsd".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64-S128".into(),
+ arch: "sparc64".into(),
+ options: TargetOptions { endian: Endian::Big, mcount: "__mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs
new file mode 100644
index 000000000..4a192df39
--- /dev/null
+++ b/compiler/rustc_target/src/spec/sparc64_unknown_openbsd.rs
@@ -0,0 +1,18 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::openbsd_base::opts();
+ base.endian = Endian::Big;
+ base.cpu = "v9".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "sparc64-unknown-openbsd".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64-S128".into(),
+ arch: "sparc64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
new file mode 100644
index 000000000..ea4fafa4b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/sparc_unknown_linux_gnu.rs
@@ -0,0 +1,18 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.endian = Endian::Big;
+ base.cpu = "v9".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-mv8plus"]);
+
+ Target {
+ llvm_target: "sparc-unknown-linux-gnu".into(),
+ pointer_width: 32,
+ data_layout: "E-m:e-p:32:32-i64:64-f128:64-n32-S64".into(),
+ arch: "sparc".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
new file mode 100644
index 000000000..aac09181a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/sparcv9_sun_solaris.rs
@@ -0,0 +1,24 @@
+use crate::abi::Endian;
+use crate::spec::{LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::solaris_base::opts();
+ base.endian = Endian::Big;
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // llvm calls this "v9"
+ base.cpu = "v9".into();
+ base.vendor = "sun".into();
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "sparcv9-sun-solaris".into(),
+ pointer_width: 64,
+ data_layout: "E-m:e-i64:64-n32:64-S128".into(),
+ // Use "sparc64" instead of "sparcv9" here, since the former is already
+ // used widely in the source base. If we ever needed ABI
+ // differentiation from the sparc64, we could, but that would probably
+ // just be confusing.
+ arch: "sparc64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/tests/tests_impl.rs b/compiler/rustc_target/src/spec/tests/tests_impl.rs
new file mode 100644
index 000000000..1db6db78b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/tests/tests_impl.rs
@@ -0,0 +1,135 @@
+use super::super::*;
+use std::assert_matches::assert_matches;
+
+// Test target self-consistency and JSON encoding/decoding roundtrip.
+pub(super) fn test_target(target: Target) {
+ target.check_consistency();
+ assert_eq!(Target::from_json(target.to_json()).map(|(j, _)| j), Ok(target));
+}
+
+impl Target {
+ fn check_consistency(&self) {
+ assert_eq!(self.is_like_osx, self.vendor == "apple");
+ assert_eq!(self.is_like_solaris, self.os == "solaris" || self.os == "illumos");
+ assert_eq!(self.is_like_windows, self.os == "windows" || self.os == "uefi");
+ assert_eq!(self.is_like_wasm, self.arch == "wasm32" || self.arch == "wasm64");
+ assert!(self.is_like_windows || !self.is_like_msvc);
+
+ // Check that default linker flavor and lld flavor are compatible
+ // with some other key properties.
+ assert_eq!(self.is_like_osx, matches!(self.lld_flavor, LldFlavor::Ld64));
+ assert_eq!(self.is_like_msvc, matches!(self.lld_flavor, LldFlavor::Link));
+ assert_eq!(self.is_like_wasm, matches!(self.lld_flavor, LldFlavor::Wasm));
+ assert_eq!(self.os == "l4re", matches!(self.linker_flavor, LinkerFlavor::L4Bender));
+ assert_eq!(self.os == "emscripten", matches!(self.linker_flavor, LinkerFlavor::Em));
+ assert_eq!(self.arch == "bpf", matches!(self.linker_flavor, LinkerFlavor::BpfLinker));
+ assert_eq!(self.arch == "nvptx64", matches!(self.linker_flavor, LinkerFlavor::PtxLinker));
+
+ for args in [
+ &self.pre_link_args,
+ &self.late_link_args,
+ &self.late_link_args_dynamic,
+ &self.late_link_args_static,
+ &self.post_link_args,
+ ] {
+ for (&flavor, flavor_args) in args {
+ assert!(!flavor_args.is_empty());
+ // Check that flavors mentioned in link args are compatible with the default flavor.
+ match (self.linker_flavor, self.lld_flavor) {
+ (
+ LinkerFlavor::Ld | LinkerFlavor::Lld(LldFlavor::Ld) | LinkerFlavor::Gcc,
+ LldFlavor::Ld,
+ ) => {
+ assert_matches!(
+ flavor,
+ LinkerFlavor::Ld | LinkerFlavor::Lld(LldFlavor::Ld) | LinkerFlavor::Gcc
+ )
+ }
+ (LinkerFlavor::Gcc, LldFlavor::Ld64) => {
+ assert_matches!(
+ flavor,
+ LinkerFlavor::Lld(LldFlavor::Ld64) | LinkerFlavor::Gcc
+ )
+ }
+ (LinkerFlavor::Msvc | LinkerFlavor::Lld(LldFlavor::Link), LldFlavor::Link) => {
+ assert_matches!(
+ flavor,
+ LinkerFlavor::Msvc | LinkerFlavor::Lld(LldFlavor::Link)
+ )
+ }
+ (LinkerFlavor::Lld(LldFlavor::Wasm) | LinkerFlavor::Gcc, LldFlavor::Wasm) => {
+ assert_matches!(
+ flavor,
+ LinkerFlavor::Lld(LldFlavor::Wasm) | LinkerFlavor::Gcc
+ )
+ }
+ (LinkerFlavor::L4Bender, LldFlavor::Ld) => {
+ assert_matches!(flavor, LinkerFlavor::L4Bender)
+ }
+ (LinkerFlavor::Em, LldFlavor::Wasm) => {
+ assert_matches!(flavor, LinkerFlavor::Em)
+ }
+ (LinkerFlavor::BpfLinker, LldFlavor::Ld) => {
+ assert_matches!(flavor, LinkerFlavor::BpfLinker)
+ }
+ (LinkerFlavor::PtxLinker, LldFlavor::Ld) => {
+ assert_matches!(flavor, LinkerFlavor::PtxLinker)
+ }
+ flavors => unreachable!("unexpected flavor combination: {:?}", flavors),
+ }
+
+ // Check that link args for cc and non-cc versions of flavors are consistent.
+ let check_noncc = |noncc_flavor| {
+ if let Some(noncc_args) = args.get(&noncc_flavor) {
+ for arg in flavor_args {
+ if let Some(suffix) = arg.strip_prefix("-Wl,") {
+ assert!(noncc_args.iter().any(|a| a == suffix));
+ }
+ }
+ }
+ };
+ match self.linker_flavor {
+ LinkerFlavor::Gcc => match self.lld_flavor {
+ LldFlavor::Ld => {
+ check_noncc(LinkerFlavor::Ld);
+ check_noncc(LinkerFlavor::Lld(LldFlavor::Ld));
+ }
+ LldFlavor::Wasm => check_noncc(LinkerFlavor::Lld(LldFlavor::Wasm)),
+ LldFlavor::Ld64 | LldFlavor::Link => {}
+ },
+ _ => {}
+ }
+ }
+
+ // Check that link args for lld and non-lld versions of flavors are consistent.
+ assert_eq!(args.get(&LinkerFlavor::Ld), args.get(&LinkerFlavor::Lld(LldFlavor::Ld)));
+ assert_eq!(
+ args.get(&LinkerFlavor::Msvc),
+ args.get(&LinkerFlavor::Lld(LldFlavor::Link)),
+ );
+ }
+
+ assert!(
+ (self.pre_link_objects_fallback.is_empty()
+ && self.post_link_objects_fallback.is_empty())
+ || self.crt_objects_fallback.is_some()
+ );
+
+ // If your target really needs to deviate from the rules below,
+ // except it and document the reasons.
+ // Keep the default "unknown" vendor instead.
+ assert_ne!(self.vendor, "");
+ if !self.can_use_os_unknown() {
+ // Keep the default "none" for bare metal targets instead.
+ assert_ne!(self.os, "unknown");
+ }
+ }
+
+ // Add your target to the whitelist if it has `std` library
+ // and you certainly want "unknown" for the OS name.
+ fn can_use_os_unknown(&self) -> bool {
+ self.llvm_target == "wasm32-unknown-unknown"
+ || self.llvm_target == "wasm64-unknown-unknown"
+ || (self.env == "sgx" && self.vendor == "fortanix")
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumb_base.rs b/compiler/rustc_target/src/spec/thumb_base.rs
new file mode 100644
index 000000000..049142b89
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumb_base.rs
@@ -0,0 +1,60 @@
+// These `thumbv*` targets cover the ARM Cortex-M family of processors which are widely used in
+// microcontrollers. Namely, all these processors:
+//
+// - Cortex-M0
+// - Cortex-M0+
+// - Cortex-M1
+// - Cortex-M3
+// - Cortex-M4(F)
+// - Cortex-M7(F)
+// - Cortex-M23
+// - Cortex-M33
+//
+// We have opted for these instead of one target per processor (e.g., `cortex-m0`, `cortex-m3`,
+// etc) because the differences between some processors like the cortex-m0 and cortex-m1 are almost
+// non-existent from the POV of codegen so it doesn't make sense to have separate targets for them.
+// And if differences exist between two processors under the same target, rustc flags can be used to
+// optimize for one processor or the other.
+//
+// Also, we have not chosen a single target (`arm-none-eabi`) like GCC does because this makes
+// difficult to integrate Rust code and C code. Targeting the Cortex-M4 requires different gcc flags
+// than the ones you would use for the Cortex-M0 and with a single target it'd be impossible to
+// differentiate one processor from the other.
+//
+// About arm vs thumb in the name. The Cortex-M devices only support the Thumb instruction set,
+// which is more compact (higher code density), and not the ARM instruction set. That's why LLVM
+// triples use thumb instead of arm. We follow suit because having thumb in the name let us
+// differentiate these targets from our other `arm(v7)-*-*-gnueabi(hf)` targets in the context of
+// build scripts / gcc flags.
+
+use crate::spec::TargetOptions;
+use crate::spec::{FramePointer, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel};
+
+pub fn opts() -> TargetOptions {
+ // See rust-lang/rfcs#1645 for a discussion about these defaults
+ TargetOptions {
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ // In most cases, LLD is good enough
+ linker: Some("rust-lld".into()),
+ // Because these devices have very little resources having an unwinder is too onerous so we
+ // default to "abort" because the "unwind" strategy is very rare.
+ panic_strategy: PanicStrategy::Abort,
+ // Similarly, one almost always never wants to use relocatable code because of the extra
+ // costs it involves.
+ relocation_model: RelocModel::Static,
+ // When this section is added a volatile load to its start address is also generated. This
+ // volatile load is a footgun as it can end up loading an invalid memory address, depending
+ // on how the user set up their linker scripts. This section adds pretty printer for stuff
+ // like std::Vec, which is not that used in no-std context, so it's best to left it out
+ // until we figure a way to add the pretty printers without requiring a volatile load cf.
+ // rust-lang/rust#44993.
+ emit_debug_gdb_scripts: false,
+ // LLVM is eager to trash the link register when calling `noreturn` functions, which
+ // breaks debugging. Preserve LR by default to prevent that from happening.
+ frame_pointer: FramePointer::Always,
+ // ARM supports multiple ABIs for enums, the linux one matches the default of 32 here
+ // but any arm-none or thumb-none target will be defaulted to 8 on GCC and clang
+ c_enum_min_bits: 8,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
new file mode 100644
index 000000000..7125d141a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv4t_none_eabi.rs
@@ -0,0 +1,69 @@
+//! Targets the ARMv4T, with code as `t32` code by default.
+//!
+//! Primarily of use for the GBA, but usable with other devices too.
+//!
+//! Please ping @Lokathor if changes are needed.
+//!
+//! This target profile assumes that you have the ARM binutils in your path
+//! (specifically the linker, `arm-none-eabi-ld`). They can be obtained for free
+//! for all major OSes from the ARM developer's website, and they may also be
+//! available in your system's package manager. Unfortunately, the standard
+//! linker that Rust uses (`lld`) only supports as far back as `ARMv5TE`, so we
+//! must use the GNU `ld` linker.
+//!
+//! **Important:** This target profile **does not** specify a linker script. You
+//! just get the default link script when you build a binary for this target.
+//! The default link script is very likely wrong, so you should use
+//! `-Clink-arg=-Tmy_script.ld` to override that with a correct linker script.
+
+use crate::spec::{
+ cvs, FramePointer, LinkerFlavor, PanicStrategy, RelocModel, Target, TargetOptions,
+};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv4t-none-eabi".into(),
+ pointer_width: 32,
+ arch: "arm".into(),
+ /* Data layout args are '-' separated:
+ * little endian
+ * stack is 64-bit aligned (EABI)
+ * pointers are 32-bit
+ * i64 must be 64-bit aligned (EABI)
+ * mangle names with ELF style
+ * native integers are 32-bit
+ * All other elements are default
+ */
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ linker_flavor: LinkerFlavor::Ld,
+ linker: Some("arm-none-eabi-ld".into()),
+
+ // extra args passed to the external assembler (assuming `arm-none-eabi-as`):
+ // * activate t32/a32 interworking
+ // * use arch ARMv4T
+ // * use little-endian
+ asm_args: cvs!["-mthumb-interwork", "-march=armv4t", "-mlittle-endian",],
+
+ // minimum extra features, these cannot be disabled via -C
+ features: "+soft-float,+strict-align".into(),
+
+ panic_strategy: PanicStrategy::Abort,
+ relocation_model: RelocModel::Static,
+ // suggested from thumb_base, rust-lang/rust#44993.
+ emit_debug_gdb_scripts: false,
+ // suggested from thumb_base, with no-os gcc/clang use 8-bit enums
+ c_enum_min_bits: 8,
+ frame_pointer: FramePointer::MayOmit,
+
+ main_needs_argc_argv: false,
+
+ // don't have atomic compare-and-swap
+ atomic_cas: false,
+ has_thumb_interworking: true,
+
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs
new file mode 100644
index 000000000..2546ab9b7
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv6m_none_eabi.rs
@@ -0,0 +1,23 @@
+// Targets the Cortex-M0, Cortex-M0+ and Cortex-M1 processors (ARMv6-M architecture)
+
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv6m-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ // The ARMv6-M architecture doesn't support unaligned loads/stores so we disable them
+ // with +strict-align.
+ features: "+strict-align".into(),
+ // There are no atomic CAS instructions available in the instruction set of the ARMv6-M
+ // architecture
+ atomic_cas: false,
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs
new file mode 100644
index 000000000..4d09d3a4d
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv7a_pc_windows_msvc.rs
@@ -0,0 +1,28 @@
+use crate::spec::{LinkerFlavor, PanicStrategy, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::windows_msvc_base::opts();
+ // Prevent error LNK2013: BRANCH24(T) fixup overflow
+ // The LBR optimization tries to eliminate branch islands,
+ // but if the displacement is larger than can fit
+ // in the instruction, this error will occur. The linker
+ // should be smart enough to insert branch islands only
+ // where necessary, but this is not the observed behavior.
+ // Disabling the LBR optimization works around the issue.
+ base.add_pre_link_args(LinkerFlavor::Msvc, &["/OPT:NOLBR"]);
+
+ Target {
+ llvm_target: "thumbv7a-pc-windows-msvc".into(),
+ pointer_width: 32,
+ data_layout: "e-m:w-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ features: "+vfp3,+neon".into(),
+ max_atomic_width: Some(64),
+ // FIXME(jordanrh): use PanicStrategy::Unwind when SEH is
+ // implemented for windows/arm in LLVM
+ panic_strategy: PanicStrategy::Abort,
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv7a_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/thumbv7a_uwp_windows_msvc.rs
new file mode 100644
index 000000000..65c2f5a70
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv7a_uwp_windows_msvc.rs
@@ -0,0 +1,18 @@
+use crate::spec::{PanicStrategy, Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv7a-pc-windows-msvc".into(),
+ pointer_width: 32,
+ data_layout: "e-m:w-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ features: "+vfp3,+neon".into(),
+ max_atomic_width: Some(64),
+ // FIXME(jordanrh): use PanicStrategy::Unwind when SEH is
+ // implemented for windows/arm in LLVM
+ panic_strategy: PanicStrategy::Abort,
+ ..super::windows_uwp_msvc_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv7em_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv7em_none_eabi.rs
new file mode 100644
index 000000000..000e5f2d3
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv7em_none_eabi.rs
@@ -0,0 +1,27 @@
+// Targets the Cortex-M4 and Cortex-M7 processors (ARMv7E-M)
+//
+// This target assumes that the device doesn't have a FPU (Floating Point Unit) and lowers all the
+// floating point operations to software routines (intrinsics).
+//
+// As such, this target uses the "soft" calling convention (ABI) where floating point values are
+// passed to/from subroutines via general purpose registers (R0, R1, etc.).
+//
+// To opt-in to hardware accelerated floating point operations, you can use, for example,
+// `-C target-feature=+vfp4` or `-C target-cpu=cortex-m4`.
+
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv7em-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ max_atomic_width: Some(32),
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv7em_none_eabihf.rs b/compiler/rustc_target/src/spec/thumbv7em_none_eabihf.rs
new file mode 100644
index 000000000..39a72564e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv7em_none_eabihf.rs
@@ -0,0 +1,36 @@
+// Targets the Cortex-M4F and Cortex-M7F processors (ARMv7E-M)
+//
+// This target assumes that the device does have a FPU (Floating Point Unit) and lowers all (single
+// precision) floating point operations to hardware instructions.
+//
+// Additionally, this target uses the "hard" floating convention (ABI) where floating point values
+// are passed to/from subroutines via FPU registers (S0, S1, D0, D1, etc.).
+//
+// To opt into double precision hardware support, use the `-C target-feature=+fp64` flag.
+
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv7em-none-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // `+vfp4` is the lowest common denominator between the Cortex-M4 (vfp4-16) and the
+ // Cortex-M7 (vfp5)
+ // `-d32` both the Cortex-M4 and the Cortex-M7 only have 16 double-precision registers
+ // available
+ // `-fp64` The Cortex-M4 only supports single precision floating point operations
+ // whereas in the Cortex-M7 double precision is optional
+ //
+ // Reference:
+ // ARMv7-M Architecture Reference Manual - A2.5 The optional floating-point extension
+ features: "+vfp4,-d32,-fp64".into(),
+ max_atomic_width: Some(32),
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv7m_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv7m_none_eabi.rs
new file mode 100644
index 000000000..ab25cde66
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv7m_none_eabi.rs
@@ -0,0 +1,18 @@
+// Targets the Cortex-M3 processor (ARMv7-M)
+
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv7m-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ max_atomic_width: Some(32),
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs b/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs
new file mode 100644
index 000000000..4cad9e183
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv7neon_linux_androideabi.rs
@@ -0,0 +1,26 @@
+use crate::spec::{LinkerFlavor, Target, TargetOptions};
+
+// This target if is for the Android v7a ABI in thumb mode with
+// NEON unconditionally enabled and, therefore, with 32 FPU registers
+// enabled as well. See section A2.6.2 on page A2-56 in
+// https://static.docs.arm.com/ddi0406/cd/DDI0406C_d_armv7ar_arm.pdf
+
+// See https://developer.android.com/ndk/guides/abis.html#v7a
+// for target ABI requirements.
+
+pub fn target() -> Target {
+ let mut base = super::android_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-march=armv7-a"]);
+ Target {
+ llvm_target: "armv7-none-linux-android".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabi".into(),
+ features: "+v7,+thumb-mode,+thumb2,+vfp3,+neon".into(),
+ max_atomic_width: Some(64),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs
new file mode 100644
index 000000000..28c81340a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_gnueabihf.rs
@@ -0,0 +1,23 @@
+use crate::spec::{Target, TargetOptions};
+
+// This target is for glibc Linux on ARMv7 with thumb mode enabled
+// (for consistency with Android and Debian-based distributions)
+// and with NEON unconditionally enabled and, therefore, with 32 FPU
+// registers enabled as well. See section A2.6.2 on page A2-56 in
+// https://static.docs.arm.com/ddi0406/cd/DDI0406C_d_armv7ar_arm.pdf
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "armv7-unknown-linux-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // Info about features at https://wiki.debian.org/ArmHardFloatPort
+ features: "+v7,+thumb-mode,+thumb2,+vfp3,+neon".into(),
+ max_atomic_width: Some(64),
+ ..super::linux_gnu_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs
new file mode 100644
index 000000000..2c375ab22
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv7neon_unknown_linux_musleabihf.rs
@@ -0,0 +1,29 @@
+use crate::spec::{Target, TargetOptions};
+
+// This target is for musl Linux on ARMv7 with thumb mode enabled
+// (for consistency with Android and Debian-based distributions)
+// and with NEON unconditionally enabled and, therefore, with 32 FPU
+// registers enabled as well. See section A2.6.2 on page A2-56 in
+// https://static.docs.arm.com/ddi0406/cd/DDI0406C_d_armv7ar_arm.pdf
+
+pub fn target() -> Target {
+ Target {
+ // It's important we use "gnueabihf" and not "musleabihf" here. LLVM
+ // uses it to determine the calling convention and float ABI, and LLVM
+ // doesn't support the "musleabihf" value.
+ llvm_target: "armv7-unknown-linux-gnueabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ // Most of these settings are copied from the thumbv7neon_unknown_linux_gnueabihf
+ // target.
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ features: "+v7,+thumb-mode,+thumb2,+vfp3,+neon".into(),
+ max_atomic_width: Some(64),
+ mcount: "\u{1}mcount".into(),
+ ..super::linux_musl_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv8m_base_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv8m_base_none_eabi.rs
new file mode 100644
index 000000000..756b1834c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv8m_base_none_eabi.rs
@@ -0,0 +1,21 @@
+// Targets the Cortex-M23 processor (Baseline ARMv8-M)
+
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv8m.base-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ // ARMv8-M baseline doesn't support unaligned loads/stores so we disable them
+ // with +strict-align.
+ features: "+strict-align".into(),
+ max_atomic_width: Some(32),
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv8m_main_none_eabi.rs b/compiler/rustc_target/src/spec/thumbv8m_main_none_eabi.rs
new file mode 100644
index 000000000..4b6268546
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv8m_main_none_eabi.rs
@@ -0,0 +1,19 @@
+// Targets the Cortex-M33 processor (Armv8-M Mainline architecture profile),
+// without the Floating Point extension.
+
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv8m.main-none-eabi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabi".into(),
+ max_atomic_width: Some(32),
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/thumbv8m_main_none_eabihf.rs b/compiler/rustc_target/src/spec/thumbv8m_main_none_eabihf.rs
new file mode 100644
index 000000000..86c25f9e4
--- /dev/null
+++ b/compiler/rustc_target/src/spec/thumbv8m_main_none_eabihf.rs
@@ -0,0 +1,25 @@
+// Targets the Cortex-M33 processor (Armv8-M Mainline architecture profile),
+// with the Floating Point extension.
+
+use crate::spec::{Target, TargetOptions};
+
+pub fn target() -> Target {
+ Target {
+ llvm_target: "thumbv8m.main-none-eabihf".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64".into(),
+ arch: "arm".into(),
+
+ options: TargetOptions {
+ abi: "eabihf".into(),
+ // If the Floating Point extension is implemented in the Cortex-M33
+ // processor, the Cortex-M33 Technical Reference Manual states that
+ // the FPU uses the FPv5 architecture, single-precision instructions
+ // and 16 D registers.
+ // These parameters map to the following LLVM features.
+ features: "+fp-armv8,-fp64,-d32".into(),
+ max_atomic_width: Some(32),
+ ..super::thumb_base::opts()
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/uefi_msvc_base.rs b/compiler/rustc_target/src/spec/uefi_msvc_base.rs
new file mode 100644
index 000000000..aee8eb2e3
--- /dev/null
+++ b/compiler/rustc_target/src/spec/uefi_msvc_base.rs
@@ -0,0 +1,51 @@
+// This defines a base target-configuration for native UEFI systems. The UEFI specification has
+// quite detailed sections on the ABI of all the supported target architectures. In almost all
+// cases it simply follows what Microsoft Windows does. Hence, whenever in doubt, see the MSDN
+// documentation.
+// UEFI uses COFF/PE32+ format for binaries. All binaries must be statically linked. No dynamic
+// linker is supported. As native to COFF, binaries are position-dependent, but will be relocated
+// by the loader if the pre-chosen memory location is already in use.
+// UEFI forbids running code on anything but the boot-CPU. No interrupts are allowed other than
+// the timer-interrupt. Device-drivers are required to use polling-based models. Furthermore, all
+// code runs in the same environment, no process separation is supported.
+
+use crate::spec::{LinkerFlavor, LldFlavor, PanicStrategy, StackProbeType, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ let mut base = super::msvc_base::opts();
+
+ base.add_pre_link_args(
+ LinkerFlavor::Msvc,
+ &[
+ // Non-standard subsystems have no default entry-point in PE+ files. We have to define
+ // one. "efi_main" seems to be a common choice amongst other implementations and the
+ // spec.
+ "/entry:efi_main",
+ // COFF images have a "Subsystem" field in their header, which defines what kind of
+ // program it is. UEFI has 3 fields reserved, which are EFI_APPLICATION,
+ // EFI_BOOT_SERVICE_DRIVER, and EFI_RUNTIME_DRIVER. We default to EFI_APPLICATION,
+ // which is very likely the most common option. Individual projects can override this
+ // with custom linker flags.
+ // The subsystem-type only has minor effects on the application. It defines the memory
+ // regions the application is loaded into (runtime-drivers need to be put into
+ // reserved areas), as well as whether a return from the entry-point is treated as
+ // exit (default for applications).
+ "/subsystem:efi_application",
+ ],
+ );
+
+ TargetOptions {
+ os: "uefi".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Link),
+ disable_redzone: true,
+ exe_suffix: ".efi".into(),
+ allows_weak_linkage: false,
+ panic_strategy: PanicStrategy::Abort,
+ // LLVM does not emit inline assembly because the LLVM target does not get considered as…
+ // "Windows".
+ stack_probes: StackProbeType::Call,
+ singlethread: true,
+ linker: Some("rust-lld".into()),
+ ..base
+ }
+}
diff --git a/compiler/rustc_target/src/spec/vxworks_base.rs b/compiler/rustc_target/src/spec/vxworks_base.rs
new file mode 100644
index 000000000..aa4784b63
--- /dev/null
+++ b/compiler/rustc_target/src/spec/vxworks_base.rs
@@ -0,0 +1,21 @@
+use crate::spec::{cvs, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ TargetOptions {
+ os: "vxworks".into(),
+ env: "gnu".into(),
+ vendor: "wrs".into(),
+ linker: Some("wr-c++".into()),
+ exe_suffix: ".vxe".into(),
+ dynamic_linking: true,
+ families: cvs!["unix"],
+ has_rpath: true,
+ has_thread_local: true,
+ crt_static_default: true,
+ crt_static_respected: true,
+ crt_static_allows_dylibs: true,
+ // VxWorks needs to implement this to support profiling
+ mcount: "_mcount".into(),
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs b/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs
new file mode 100644
index 000000000..c7e7d2210
--- /dev/null
+++ b/compiler/rustc_target/src/spec/wasm32_unknown_emscripten.rs
@@ -0,0 +1,34 @@
+use super::{cvs, wasm_base};
+use super::{LinkArgs, LinkerFlavor, PanicStrategy, RelocModel, Target, TargetOptions};
+
+pub fn target() -> Target {
+ // Reset flags for non-Em flavors back to empty to satisfy sanity checking tests.
+ let pre_link_args = LinkArgs::new();
+ let post_link_args = TargetOptions::link_args(
+ LinkerFlavor::Em,
+ &["-sABORTING_MALLOC=0", "-Wl,--fatal-warnings"],
+ );
+
+ let opts = TargetOptions {
+ os: "emscripten".into(),
+ linker_flavor: LinkerFlavor::Em,
+ // emcc emits two files - a .js file to instantiate the wasm and supply platform
+ // functionality, and a .wasm file.
+ exe_suffix: ".js".into(),
+ linker: None,
+ pre_link_args,
+ post_link_args,
+ relocation_model: RelocModel::Pic,
+ panic_strategy: PanicStrategy::Unwind,
+ no_default_libraries: false,
+ families: cvs!["unix", "wasm"],
+ ..wasm_base::options()
+ };
+ Target {
+ llvm_target: "wasm32-unknown-emscripten".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-f128:64-n32:64-S128-ni:1:10:20".into(),
+ arch: "wasm32".into(),
+ options: opts,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
new file mode 100644
index 000000000..4e2927dd9
--- /dev/null
+++ b/compiler/rustc_target/src/spec/wasm32_unknown_unknown.rs
@@ -0,0 +1,64 @@
+//! A "bare wasm" target representing a WebAssembly output that makes zero
+//! assumptions about its environment.
+//!
+//! The `wasm32-unknown-unknown` target is intended to encapsulate use cases
+//! that do not rely on any imported functionality. The binaries generated are
+//! entirely self-contained by default when using the standard library. Although
+//! the standard library is available, most of it returns an error immediately
+//! (e.g. trying to create a TCP stream or something like that).
+//!
+//! This target is more or less managed by the Rust and WebAssembly Working
+//! Group nowadays at <https://github.com/rustwasm>.
+
+use super::wasm_base;
+use super::{LinkerFlavor, LldFlavor, Target};
+use crate::spec::abi::Abi;
+
+pub fn target() -> Target {
+ let mut options = wasm_base::options();
+ options.os = "unknown".into();
+ options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
+
+ // This is a default for backwards-compatibility with the original
+ // definition of this target oh-so-long-ago. Once the "wasm" ABI is
+ // stable and the wasm-bindgen project has switched to using it then there's
+ // no need for this and it can be removed.
+ //
+ // Currently this is the reason that this target's ABI is mismatched with
+ // clang's ABI. This means that, in the limit, you can't merge C and Rust
+ // code on this target due to this ABI mismatch.
+ options.default_adjusted_cabi = Some(Abi::Wasm);
+
+ options.add_pre_link_args(
+ LinkerFlavor::Lld(LldFlavor::Wasm),
+ &[
+ // For now this target just never has an entry symbol no matter the output
+ // type, so unconditionally pass this.
+ "--no-entry",
+ // Rust really needs a way for users to specify exports and imports in
+ // the source code. --export-dynamic isn't the right tool for this job,
+ // however it does have the side effect of automatically exporting a lot
+ // of symbols, which approximates what people want when compiling for
+ // wasm32-unknown-unknown expect, so use it for now.
+ "--export-dynamic",
+ ],
+ );
+ options.add_pre_link_args(
+ LinkerFlavor::Gcc,
+ &[
+ // Make sure clang uses LLD as its linker and is configured appropriately
+ // otherwise
+ "--target=wasm32-unknown-unknown",
+ "-Wl,--no-entry",
+ "-Wl,--export-dynamic",
+ ],
+ );
+
+ Target {
+ llvm_target: "wasm32-unknown-unknown".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20".into(),
+ arch: "wasm32".into(),
+ options,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/wasm32_wasi.rs b/compiler/rustc_target/src/spec/wasm32_wasi.rs
new file mode 100644
index 000000000..280457d68
--- /dev/null
+++ b/compiler/rustc_target/src/spec/wasm32_wasi.rs
@@ -0,0 +1,112 @@
+//! The `wasm32-wasi` target is a new and still (as of April 2019) an
+//! experimental target. The definition in this file is likely to be tweaked
+//! over time and shouldn't be relied on too much.
+//!
+//! The `wasi` target is a proposal to define a standardized set of syscalls
+//! that WebAssembly files can interoperate with. This set of syscalls is
+//! intended to empower WebAssembly binaries with native capabilities such as
+//! filesystem access, network access, etc.
+//!
+//! You can see more about the proposal at <https://wasi.dev>.
+//!
+//! The Rust target definition here is interesting in a few ways. We want to
+//! serve two use cases here with this target:
+//!
+//! * First, we want Rust usage of the target to be as hassle-free as possible,
+//! ideally avoiding the need to configure and install a local wasm32-wasi
+//! toolchain.
+//!
+//! * Second, one of the primary use cases of LLVM's new wasm backend and the
+//! wasm support in LLD is that any compiled language can interoperate with
+//! any other. To that the `wasm32-wasi` target is the first with a viable C
+//! standard library and sysroot common definition, so we want Rust and C/C++
+//! code to interoperate when compiled to `wasm32-unknown-unknown`.
+//!
+//! You'll note, however, that the two goals above are somewhat at odds with one
+//! another. To attempt to solve both use cases in one go we define a target
+//! that (ab)uses the `crt-static` target feature to indicate which one you're
+//! in.
+//!
+//! ## No interop with C required
+//!
+//! By default the `crt-static` target feature is enabled, and when enabled
+//! this means that the bundled version of `libc.a` found in `liblibc.rlib`
+//! is used. This isn't intended really for interoperation with a C because it
+//! may be the case that Rust's bundled C library is incompatible with a
+//! foreign-compiled C library. In this use case, though, we use `rust-lld` and
+//! some copied crt startup object files to ensure that you can download the
+//! wasi target for Rust and you're off to the races, no further configuration
+//! necessary.
+//!
+//! All in all, by default, no external dependencies are required. You can
+//! compile `wasm32-wasi` binaries straight out of the box. You can't, however,
+//! reliably interoperate with C code in this mode (yet).
+//!
+//! ## Interop with C required
+//!
+//! For the second goal we repurpose the `target-feature` flag, meaning that
+//! you'll need to do a few things to have C/Rust code interoperate.
+//!
+//! 1. All Rust code needs to be compiled with `-C target-feature=-crt-static`,
+//! indicating that the bundled C standard library in the Rust sysroot will
+//! not be used.
+//!
+//! 2. If you're using rustc to build a linked artifact then you'll need to
+//! specify `-C linker` to a `clang` binary that supports
+//! `wasm32-wasi` and is configured with the `wasm32-wasi` sysroot. This
+//! will cause Rust code to be linked against the libc.a that the specified
+//! `clang` provides.
+//!
+//! 3. If you're building a staticlib and integrating Rust code elsewhere, then
+//! compiling with `-C target-feature=-crt-static` is all you need to do.
+//!
+//! You can configure the linker via Cargo using the
+//! `CARGO_TARGET_WASM32_WASI_LINKER` env var. Be sure to also set
+//! `CC_wasm32-wasi` if any crates in the dependency graph are using the `cc`
+//! crate.
+//!
+//! ## Remember, this is all in flux
+//!
+//! The wasi target is **very** new in its specification. It's likely going to
+//! be a long effort to get it standardized and stable. We'll be following it as
+//! best we can with this target. Don't start relying on too much here unless
+//! you know what you're getting in to!
+
+use super::wasm_base;
+use super::{crt_objects, LinkerFlavor, LldFlavor, Target};
+
+pub fn target() -> Target {
+ let mut options = wasm_base::options();
+
+ options.os = "wasi".into();
+ options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
+ options.add_pre_link_args(LinkerFlavor::Gcc, &["--target=wasm32-wasi"]);
+
+ options.pre_link_objects_fallback = crt_objects::pre_wasi_fallback();
+ options.post_link_objects_fallback = crt_objects::post_wasi_fallback();
+
+ // Right now this is a bit of a workaround but we're currently saying that
+ // the target by default has a static crt which we're taking as a signal
+ // for "use the bundled crt". If that's turned off then the system's crt
+ // will be used, but this means that default usage of this target doesn't
+ // need an external compiler but it's still interoperable with an external
+ // compiler if configured correctly.
+ options.crt_static_default = true;
+ options.crt_static_respected = true;
+
+ // Allow `+crt-static` to create a "cdylib" output which is just a wasm file
+ // without a main function.
+ options.crt_static_allows_dylibs = true;
+
+ // WASI's `sys::args::init` function ignores its arguments; instead,
+ // `args::args()` makes the WASI API calls itself.
+ options.main_needs_argc_argv = false;
+
+ Target {
+ llvm_target: "wasm32-wasi".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20".into(),
+ arch: "wasm32".into(),
+ options,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs b/compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs
new file mode 100644
index 000000000..5211f7707
--- /dev/null
+++ b/compiler/rustc_target/src/spec/wasm64_unknown_unknown.rs
@@ -0,0 +1,49 @@
+//! A "bare wasm" target representing a WebAssembly output that makes zero
+//! assumptions about its environment.
+//!
+//! The `wasm64-unknown-unknown` target is intended to encapsulate use cases
+//! that do not rely on any imported functionality. The binaries generated are
+//! entirely self-contained by default when using the standard library. Although
+//! the standard library is available, most of it returns an error immediately
+//! (e.g. trying to create a TCP stream or something like that).
+
+use super::wasm_base;
+use super::{LinkerFlavor, LldFlavor, Target};
+
+pub fn target() -> Target {
+ let mut options = wasm_base::options();
+ options.os = "unknown".into();
+ options.linker_flavor = LinkerFlavor::Lld(LldFlavor::Wasm);
+
+ options.add_pre_link_args(
+ LinkerFlavor::Lld(LldFlavor::Wasm),
+ &[
+ // For now this target just never has an entry symbol no matter the output
+ // type, so unconditionally pass this.
+ "--no-entry",
+ "-mwasm64",
+ ],
+ );
+ options.add_pre_link_args(
+ LinkerFlavor::Gcc,
+ &[
+ // Make sure clang uses LLD as its linker and is configured appropriately
+ // otherwise
+ "--target=wasm64-unknown-unknown",
+ "-Wl,--no-entry",
+ ],
+ );
+
+ // Any engine that implements wasm64 will surely implement the rest of these
+ // features since they were all merged into the official spec by the time
+ // wasm64 was designed.
+ options.features = "+bulk-memory,+mutable-globals,+sign-ext,+nontrapping-fptoint".into();
+
+ Target {
+ llvm_target: "wasm64-unknown-unknown".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p:64:64-p10:8:8-p20:8:8-i64:64-n32:64-S128-ni:1:10:20".into(),
+ arch: "wasm64".into(),
+ options,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/wasm_base.rs b/compiler/rustc_target/src/spec/wasm_base.rs
new file mode 100644
index 000000000..9216d3e7b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/wasm_base.rs
@@ -0,0 +1,129 @@
+use super::crt_objects::CrtObjectsFallback;
+use super::{cvs, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, TargetOptions, TlsModel};
+
+pub fn options() -> TargetOptions {
+ macro_rules! args {
+ ($prefix:literal) => {
+ &[
+ // By default LLD only gives us one page of stack (64k) which is a
+ // little small. Default to a larger stack closer to other PC platforms
+ // (1MB) and users can always inject their own link-args to override this.
+ concat!($prefix, "-z"),
+ concat!($prefix, "stack-size=1048576"),
+ // By default LLD's memory layout is:
+ //
+ // 1. First, a blank page
+ // 2. Next, all static data
+ // 3. Finally, the main stack (which grows down)
+ //
+ // This has the unfortunate consequence that on stack overflows you
+ // corrupt static data and can cause some exceedingly weird bugs. To
+ // help detect this a little sooner we instead request that the stack is
+ // placed before static data.
+ //
+ // This means that we'll generate slightly larger binaries as references
+ // to static data will take more bytes in the ULEB128 encoding, but
+ // stack overflow will be guaranteed to trap as it underflows instead of
+ // corrupting static data.
+ concat!($prefix, "--stack-first"),
+ // FIXME we probably shouldn't pass this but instead pass an explicit list
+ // of symbols we'll allow to be undefined. We don't currently have a
+ // mechanism of knowing, however, which symbols are intended to be imported
+ // from the environment and which are intended to be imported from other
+ // objects linked elsewhere. This is a coarse approximation but is sure to
+ // hide some bugs and frustrate someone at some point, so we should ideally
+ // work towards a world where we can explicitly list symbols that are
+ // supposed to be imported and have all other symbols generate errors if
+ // they remain undefined.
+ concat!($prefix, "--allow-undefined"),
+ // Rust code should never have warnings, and warnings are often
+ // indicative of bugs, let's prevent them.
+ concat!($prefix, "--fatal-warnings"),
+ // LLD only implements C++-like demangling, which doesn't match our own
+ // mangling scheme. Tell LLD to not demangle anything and leave it up to
+ // us to demangle these symbols later. Currently rustc does not perform
+ // further demangling, but tools like twiggy and wasm-bindgen are intended
+ // to do so.
+ concat!($prefix, "--no-demangle"),
+ ]
+ };
+ }
+
+ let mut pre_link_args = TargetOptions::link_args(LinkerFlavor::Lld(LldFlavor::Wasm), args!(""));
+ super::add_link_args(&mut pre_link_args, LinkerFlavor::Gcc, args!("-Wl,"));
+
+ TargetOptions {
+ is_like_wasm: true,
+ families: cvs!["wasm"],
+
+ // we allow dynamic linking, but only cdylibs. Basically we allow a
+ // final library artifact that exports some symbols (a wasm module) but
+ // we don't allow intermediate `dylib` crate types
+ dynamic_linking: true,
+ only_cdylib: true,
+
+ // relatively self-explanatory!
+ exe_suffix: ".wasm".into(),
+ dll_prefix: "".into(),
+ dll_suffix: ".wasm".into(),
+ eh_frame_header: false,
+
+ max_atomic_width: Some(64),
+
+ // Unwinding doesn't work right now, so the whole target unconditionally
+ // defaults to panic=abort. Note that this is guaranteed to change in
+ // the future once unwinding is implemented. Don't rely on this as we're
+ // basically guaranteed to change it once WebAssembly supports
+ // exceptions.
+ panic_strategy: PanicStrategy::Abort,
+
+ // Wasm doesn't have atomics yet, so tell LLVM that we're in a single
+ // threaded model which will legalize atomics to normal operations.
+ singlethread: true,
+
+ // no dynamic linking, no need for default visibility!
+ default_hidden_visibility: true,
+
+ // Symbol visibility takes care of this for the WebAssembly.
+ // Additionally the only known linker, LLD, doesn't support the script
+ // arguments just yet
+ limit_rdylib_exports: false,
+
+ // we use the LLD shipped with the Rust toolchain by default
+ linker: Some("rust-lld".into()),
+ lld_flavor: LldFlavor::Wasm,
+ linker_is_gnu: false,
+
+ pre_link_args,
+
+ crt_objects_fallback: Some(CrtObjectsFallback::Wasm),
+
+ // This has no effect in LLVM 8 or prior, but in LLVM 9 and later when
+ // PIC code is implemented this has quite a drastic effect if it stays
+ // at the default, `pic`. In an effort to keep wasm binaries as minimal
+ // as possible we're defaulting to `static` for now, but the hope is
+ // that eventually we can ship a `pic`-compatible standard library which
+ // works with `static` as well (or works with some method of generating
+ // non-relative calls and such later on).
+ relocation_model: RelocModel::Static,
+
+ // When the atomics feature is activated then these two keys matter,
+ // otherwise they're basically ignored by the standard library. In this
+ // mode, however, the `#[thread_local]` attribute works (i.e.
+ // `has_thread_local`) and we need to get it to work by specifying
+ // `local-exec` as that's all that's implemented in LLVM today for wasm.
+ has_thread_local: true,
+ tls_model: TlsModel::LocalExec,
+
+ // gdb scripts don't work on wasm blobs
+ emit_debug_gdb_scripts: false,
+
+ // There's more discussion of this at
+ // https://bugs.llvm.org/show_bug.cgi?id=52442 but the general result is
+ // that this isn't useful for wasm and has tricky issues with
+ // representation, so this is disabled.
+ generate_arange_section: false,
+
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/windows_gnu_base.rs b/compiler/rustc_target/src/spec/windows_gnu_base.rs
new file mode 100644
index 000000000..90e0af3e3
--- /dev/null
+++ b/compiler/rustc_target/src/spec/windows_gnu_base.rs
@@ -0,0 +1,91 @@
+use crate::spec::crt_objects::{self, CrtObjectsFallback};
+use crate::spec::{cvs, LinkerFlavor, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ let mut pre_link_args = TargetOptions::link_args(
+ LinkerFlavor::Ld,
+ &[
+ // Enable ASLR
+ "--dynamicbase",
+ // ASLR will rebase it anyway so leaving that option enabled only leads to confusion
+ "--disable-auto-image-base",
+ ],
+ );
+ super::add_link_args(
+ &mut pre_link_args,
+ LinkerFlavor::Gcc,
+ &[
+ // Tell GCC to avoid linker plugins, because we are not bundling
+ // them with Windows installer, and Rust does its own LTO anyways.
+ "-fno-use-linker-plugin",
+ "-Wl,--dynamicbase",
+ "-Wl,--disable-auto-image-base",
+ ],
+ );
+
+ // Order of `late_link_args*` was found through trial and error to work with various
+ // mingw-w64 versions (not tested on the CI). It's expected to change from time to time.
+ let mingw_libs = &[
+ "-lmsvcrt",
+ "-lmingwex",
+ "-lmingw32",
+ "-lgcc", // alas, mingw* libraries above depend on libgcc
+ // mingw's msvcrt is a weird hybrid import library and static library.
+ // And it seems that the linker fails to use import symbols from msvcrt
+ // that are required from functions in msvcrt in certain cases. For example
+ // `_fmode` that is used by an implementation of `__p__fmode` in x86_64.
+ // The library is purposely listed twice to fix that.
+ //
+ // See https://github.com/rust-lang/rust/pull/47483 for some more details.
+ "-lmsvcrt",
+ "-luser32",
+ "-lkernel32",
+ ];
+ let mut late_link_args = TargetOptions::link_args(LinkerFlavor::Ld, mingw_libs);
+ super::add_link_args(&mut late_link_args, LinkerFlavor::Gcc, mingw_libs);
+ // If any of our crates are dynamically linked then we need to use
+ // the shared libgcc_s-dw2-1.dll. This is required to support
+ // unwinding across DLL boundaries.
+ let dynamic_unwind_libs = &["-lgcc_s"];
+ let mut late_link_args_dynamic =
+ TargetOptions::link_args(LinkerFlavor::Ld, dynamic_unwind_libs);
+ super::add_link_args(&mut late_link_args_dynamic, LinkerFlavor::Gcc, dynamic_unwind_libs);
+ // If all of our crates are statically linked then we can get away
+ // with statically linking the libgcc unwinding code. This allows
+ // binaries to be redistributed without the libgcc_s-dw2-1.dll
+ // dependency, but unfortunately break unwinding across DLL
+ // boundaries when unwinding across FFI boundaries.
+ let static_unwind_libs = &["-lgcc_eh", "-l:libpthread.a"];
+ let mut late_link_args_static = TargetOptions::link_args(LinkerFlavor::Ld, static_unwind_libs);
+ super::add_link_args(&mut late_link_args_static, LinkerFlavor::Gcc, static_unwind_libs);
+
+ TargetOptions {
+ os: "windows".into(),
+ env: "gnu".into(),
+ vendor: "pc".into(),
+ // FIXME(#13846) this should be enabled for windows
+ function_sections: false,
+ linker: Some("gcc".into()),
+ dynamic_linking: true,
+ dll_prefix: "".into(),
+ dll_suffix: ".dll".into(),
+ exe_suffix: ".exe".into(),
+ families: cvs!["windows"],
+ is_like_windows: true,
+ allows_weak_linkage: false,
+ pre_link_args,
+ pre_link_objects: crt_objects::pre_mingw(),
+ post_link_objects: crt_objects::post_mingw(),
+ pre_link_objects_fallback: crt_objects::pre_mingw_fallback(),
+ post_link_objects_fallback: crt_objects::post_mingw_fallback(),
+ crt_objects_fallback: Some(CrtObjectsFallback::Mingw),
+ late_link_args,
+ late_link_args_dynamic,
+ late_link_args_static,
+ abi_return_struct_as_int: true,
+ emit_debug_gdb_scripts: false,
+ requires_uwtable: true,
+ eh_frame_header: false,
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/windows_gnullvm_base.rs b/compiler/rustc_target/src/spec/windows_gnullvm_base.rs
new file mode 100644
index 000000000..bae007dc9
--- /dev/null
+++ b/compiler/rustc_target/src/spec/windows_gnullvm_base.rs
@@ -0,0 +1,40 @@
+use crate::spec::{cvs, LinkerFlavor, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ // We cannot use `-nodefaultlibs` because compiler-rt has to be passed
+ // as a path since it's not added to linker search path by the default.
+ // There were attemts to make it behave like libgcc (so one can just use -l<name>)
+ // but LLVM maintainers rejected it: https://reviews.llvm.org/D51440
+ let pre_link_args =
+ TargetOptions::link_args(LinkerFlavor::Gcc, &["-nolibc", "--unwindlib=none"]);
+ // Order of `late_link_args*` does not matter with LLD.
+ let late_link_args = TargetOptions::link_args(
+ LinkerFlavor::Gcc,
+ &["-lmingw32", "-lmingwex", "-lmsvcrt", "-lkernel32", "-luser32"],
+ );
+
+ TargetOptions {
+ os: "windows".into(),
+ env: "gnu".into(),
+ vendor: "pc".into(),
+ abi: "llvm".into(),
+ linker: Some("clang".into()),
+ dynamic_linking: true,
+ dll_prefix: "".into(),
+ dll_suffix: ".dll".into(),
+ exe_suffix: ".exe".into(),
+ families: cvs!["windows"],
+ is_like_windows: true,
+ allows_weak_linkage: false,
+ pre_link_args,
+ late_link_args,
+ abi_return_struct_as_int: true,
+ emit_debug_gdb_scripts: false,
+ requires_uwtable: true,
+ eh_frame_header: false,
+ no_default_libraries: false,
+ has_thread_local: true,
+
+ ..Default::default()
+ }
+}
diff --git a/compiler/rustc_target/src/spec/windows_msvc_base.rs b/compiler/rustc_target/src/spec/windows_msvc_base.rs
new file mode 100644
index 000000000..21062c337
--- /dev/null
+++ b/compiler/rustc_target/src/spec/windows_msvc_base.rs
@@ -0,0 +1,34 @@
+use crate::spec::{cvs, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ let base = super::msvc_base::opts();
+
+ TargetOptions {
+ os: "windows".into(),
+ env: "msvc".into(),
+ vendor: "pc".into(),
+ dynamic_linking: true,
+ dll_prefix: "".into(),
+ dll_suffix: ".dll".into(),
+ exe_suffix: ".exe".into(),
+ staticlib_prefix: "".into(),
+ staticlib_suffix: ".lib".into(),
+ families: cvs!["windows"],
+ crt_static_allows_dylibs: true,
+ crt_static_respected: true,
+ requires_uwtable: true,
+ // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC
+ // as there's been trouble in the past of linking the C++ standard
+ // library required by LLVM. This likely needs to happen one day, but
+ // in general Windows is also a more controlled environment than
+ // Unix, so it's not necessarily as critical that this be implemented.
+ //
+ // Note that there are also some licensing worries about statically
+ // linking some libraries which require a specific agreement, so it may
+ // not ever be possible for us to pass this flag.
+ no_default_libraries: false,
+ has_thread_local: true,
+
+ ..base
+ }
+}
diff --git a/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs b/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs
new file mode 100644
index 000000000..fa69b919c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/windows_uwp_gnu_base.rs
@@ -0,0 +1,34 @@
+use crate::spec::{LinkArgs, LinkerFlavor, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ let base = super::windows_gnu_base::opts();
+
+ // FIXME: This should be updated for the exception machinery changes from #67502
+ // and inherit from `windows_gnu_base`, at least partially.
+ let mingw_libs = &[
+ "-lwinstorecompat",
+ "-lruntimeobject",
+ "-lsynchronization",
+ "-lvcruntime140_app",
+ "-lucrt",
+ "-lwindowsapp",
+ "-lmingwex",
+ "-lmingw32",
+ ];
+ let mut late_link_args = TargetOptions::link_args(LinkerFlavor::Ld, mingw_libs);
+ super::add_link_args(&mut late_link_args, LinkerFlavor::Gcc, mingw_libs);
+ // Reset the flags back to empty until the FIXME above is addressed.
+ let late_link_args_dynamic = LinkArgs::new();
+ let late_link_args_static = LinkArgs::new();
+
+ TargetOptions {
+ abi: "uwp".into(),
+ vendor: "uwp".into(),
+ limit_rdylib_exports: false,
+ late_link_args,
+ late_link_args_dynamic,
+ late_link_args_static,
+
+ ..base
+ }
+}
diff --git a/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs b/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs
new file mode 100644
index 000000000..f2573fc2d
--- /dev/null
+++ b/compiler/rustc_target/src/spec/windows_uwp_msvc_base.rs
@@ -0,0 +1,11 @@
+use crate::spec::{LinkerFlavor, TargetOptions};
+
+pub fn opts() -> TargetOptions {
+ let mut opts = super::windows_msvc_base::opts();
+
+ opts.abi = "uwp".into();
+ opts.vendor = "uwp".into();
+ opts.add_pre_link_args(LinkerFlavor::Msvc, &["/APPCONTAINER", "mincore.lib"]);
+
+ opts
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
new file mode 100644
index 000000000..dbd26899c
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_apple_darwin.rs
@@ -0,0 +1,30 @@
+use crate::spec::TargetOptions;
+use crate::spec::{FramePointer, LinkerFlavor, SanitizerSet, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::apple_base::opts("macos");
+ base.cpu = "core2".into();
+ base.max_atomic_width = Some(128); // core2 support cmpxchg16b
+ base.frame_pointer = FramePointer::Always;
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64", "-arch", "x86_64"]);
+ base.link_env_remove.to_mut().extend(super::apple_base::macos_link_env_remove());
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.supported_sanitizers =
+ SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::LEAK | SanitizerSet::THREAD;
+
+ // Clang automatically chooses a more specific target based on
+ // MACOSX_DEPLOYMENT_TARGET. To enable cross-language LTO to work
+ // correctly, we do too.
+ let arch = "x86_64";
+ let llvm_target = super::apple_base::macos_llvm_target(&arch);
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: arch.into(),
+ options: TargetOptions { mcount: "\u{1}mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
new file mode 100644
index 000000000..5e64ed0cf
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios.rs
@@ -0,0 +1,21 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("ios", Arch::X86_64);
+ let llvm_target = super::apple_base::ios_sim_llvm_target("x86_64");
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(64),
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ stack_probes: StackProbeType::Call,
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
new file mode 100644
index 000000000..2122bcd37
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_apple_ios_macabi.rs
@@ -0,0 +1,23 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{LinkerFlavor, StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let llvm_target = "x86_64-apple-ios13.0-macabi";
+
+ let mut base = opts("ios", Arch::X86_64_macabi);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-target", llvm_target]);
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(64),
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ stack_probes: StackProbeType::Call,
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
new file mode 100644
index 000000000..a848c5a0a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_apple_tvos.rs
@@ -0,0 +1,18 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("tvos", Arch::X86_64);
+ Target {
+ llvm_target: "x86_64-apple-tvos".into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-i64:64-f80:128-n8:16:32:64-S128".into(),
+ arch: "x86_64".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(64),
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ stack_probes: StackProbeType::Call,
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs b/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs
new file mode 100644
index 000000000..4dff3c2f2
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_apple_watchos_sim.rs
@@ -0,0 +1,35 @@
+use super::apple_sdk_base::{opts, Arch};
+use crate::spec::{StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let base = opts("watchos", Arch::X86_64);
+
+ let arch = "x86_64";
+ let llvm_target = super::apple_base::watchos_sim_llvm_target(arch);
+
+ Target {
+ llvm_target: llvm_target.into(),
+ pointer_width: 64,
+ data_layout: "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: TargetOptions {
+ max_atomic_width: Some(64),
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ stack_probes: StackProbeType::Call,
+ forces_embed_bitcode: true,
+ // Taken from a clang build on Xcode 11.4.1.
+ // These arguments are not actually invoked - they just have
+ // to look right to pass App Store validation.
+ bitcode_llvm_cmdline: "-triple\0\
+ x86_64-apple-watchos5.0-simulator\0\
+ -emit-obj\0\
+ -disable-llvm-passes\0\
+ -target-abi\0\
+ darwinpcs\0\
+ -Os\0"
+ .into(),
+ ..base
+ },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs
new file mode 100644
index 000000000..9d597ea2e
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_fortanix_unknown_sgx.rs
@@ -0,0 +1,84 @@
+use std::borrow::Cow;
+
+use crate::spec::cvs;
+
+use super::{LinkerFlavor, LldFlavor, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let pre_link_args = TargetOptions::link_args(
+ LinkerFlavor::Ld,
+ &[
+ "-e",
+ "elf_entry",
+ "-Bstatic",
+ "--gc-sections",
+ "-z",
+ "text",
+ "-z",
+ "norelro",
+ "--no-undefined",
+ "--error-unresolved-symbols",
+ "--no-undefined-version",
+ "-Bsymbolic",
+ "--export-dynamic",
+ // The following symbols are needed by libunwind, which is linked after
+ // libstd. Make sure they're included in the link.
+ "-u",
+ "__rust_abort",
+ "-u",
+ "__rust_c_alloc",
+ "-u",
+ "__rust_c_dealloc",
+ "-u",
+ "__rust_print_err",
+ "-u",
+ "__rust_rwlock_rdlock",
+ "-u",
+ "__rust_rwlock_unlock",
+ "-u",
+ "__rust_rwlock_wrlock",
+ ],
+ );
+
+ const EXPORT_SYMBOLS: &[&str] = &[
+ "sgx_entry",
+ "HEAP_BASE",
+ "HEAP_SIZE",
+ "RELA",
+ "RELACOUNT",
+ "ENCLAVE_SIZE",
+ "CFGDATA_BASE",
+ "DEBUG",
+ "EH_FRM_HDR_OFFSET",
+ "EH_FRM_HDR_LEN",
+ "EH_FRM_OFFSET",
+ "EH_FRM_LEN",
+ "TEXT_BASE",
+ "TEXT_SIZE",
+ ];
+ let opts = TargetOptions {
+ os: "unknown".into(),
+ env: "sgx".into(),
+ vendor: "fortanix".into(),
+ abi: "fortanix".into(),
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ max_atomic_width: Some(64),
+ cpu: "x86-64".into(),
+ features: "+rdrnd,+rdseed,+lvi-cfi,+lvi-load-hardening".into(),
+ llvm_args: cvs!["--x86-experimental-lvi-inline-asm-hardening"],
+ position_independent_executables: true,
+ pre_link_args,
+ override_export_symbols: Some(EXPORT_SYMBOLS.iter().cloned().map(Cow::from).collect()),
+ relax_elf_relocations: true,
+ ..Default::default()
+ };
+ Target {
+ llvm_target: "x86_64-elf".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: opts,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_fuchsia.rs b/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
new file mode 100644
index 000000000..4f88fc350
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_fuchsia.rs
@@ -0,0 +1,19 @@
+use crate::spec::{SanitizerSet, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::fuchsia_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
+
+ Target {
+ llvm_target: "x86_64-fuchsia".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_linux_android.rs b/compiler/rustc_target/src/spec/x86_64_linux_android.rs
new file mode 100644
index 000000000..6d19cf265
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_linux_android.rs
@@ -0,0 +1,21 @@
+use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::android_base::opts();
+ base.cpu = "x86-64".into();
+ // https://developer.android.com/ndk/guides/abis.html#86-64
+ base.features = "+mmx,+sse,+sse2,+sse3,+ssse3,+sse4.1,+sse4.2,+popcnt".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "x86_64-linux-android".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: TargetOptions { supported_sanitizers: SanitizerSet::ADDRESS, ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs b/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs
new file mode 100644
index 000000000..0550b221f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_pc_solaris.rs
@@ -0,0 +1,21 @@
+use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::solaris_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.cpu = "x86-64".into();
+ base.vendor = "pc".into();
+ base.max_atomic_width = Some(64);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
+
+ Target {
+ llvm_target: "x86_64-pc-solaris".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs
new file mode 100644
index 000000000..59a8cffca
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnu.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::windows_gnu_base::opts();
+ base.cpu = "x86-64".into();
+ // Use high-entropy 64 bit address space for ASLR
+ base.add_pre_link_args(LinkerFlavor::Ld, &["-m", "i386pep", "--high-entropy-va"]);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64", "-Wl,--high-entropy-va"]);
+ base.max_atomic_width = Some(64);
+ base.linker = Some("x86_64-w64-mingw32-gcc".into());
+
+ Target {
+ llvm_target: "x86_64-pc-windows-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs
new file mode 100644
index 000000000..d3909b389
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_pc_windows_gnullvm.rs
@@ -0,0 +1,18 @@
+use crate::spec::{LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::windows_gnullvm_base::opts();
+ base.cpu = "x86-64".into();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.max_atomic_width = Some(64);
+ base.linker = Some("x86_64-w64-mingw32-clang".into());
+
+ Target {
+ llvm_target: "x86_64-pc-windows-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/x86_64_pc_windows_msvc.rs
new file mode 100644
index 000000000..081806aa6
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_pc_windows_msvc.rs
@@ -0,0 +1,16 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::windows_msvc_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "x86_64-pc-windows-msvc".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs b/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
new file mode 100644
index 000000000..cbe87589a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_sun_solaris.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::solaris_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ base.cpu = "x86-64".into();
+ base.vendor = "sun".into();
+ base.max_atomic_width = Some(64);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "x86_64-pc-solaris".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs b/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
new file mode 100644
index 000000000..746f64781
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_dragonfly.rs
@@ -0,0 +1,19 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::dragonfly_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "x86_64-unknown-dragonfly".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
new file mode 100644
index 000000000..b30784ed6
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_freebsd.rs
@@ -0,0 +1,21 @@
+use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::freebsd_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.supported_sanitizers =
+ SanitizerSet::ADDRESS | SanitizerSet::CFI | SanitizerSet::MEMORY | SanitizerSet::THREAD;
+
+ Target {
+ llvm_target: "x86_64-unknown-freebsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs b/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
new file mode 100644
index 000000000..d6d033629
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_haiku.rs
@@ -0,0 +1,21 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::haiku_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ // This option is required to build executables on Haiku x86_64
+ base.position_independent_executables = true;
+
+ Target {
+ llvm_target: "x86_64-unknown-haiku".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
new file mode 100644
index 000000000..d31530161
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_hermit.rs
@@ -0,0 +1,19 @@
+use crate::spec::{StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::hermit_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.features = "+rdrnd,+rdseed".into();
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "x86_64-unknown-hermit".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs b/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs
new file mode 100644
index 000000000..9f19c3a2b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_illumos.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, SanitizerSet, Target};
+
+pub fn target() -> Target {
+ let mut base = super::illumos_base::opts();
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64", "-std=c99"]);
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.supported_sanitizers = SanitizerSet::ADDRESS | SanitizerSet::CFI;
+
+ Target {
+ // LLVM does not currently have a separate illumos target,
+ // so we still pass Solaris to it
+ llvm_target: "x86_64-pc-solaris".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs b/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs
new file mode 100644
index 000000000..78189a0c0
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_l4re_uclibc.rs
@@ -0,0 +1,19 @@
+use crate::spec::{PanicStrategy, Target};
+
+pub fn target() -> Target {
+ let mut base = super::l4re_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.crt_static_allows_dylibs = false;
+ base.dynamic_linking = false;
+ base.panic_strategy = PanicStrategy::Abort;
+
+ Target {
+ llvm_target: "x86_64-unknown-l4re-uclibc".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
new file mode 100644
index 000000000..956be0353
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnu.rs
@@ -0,0 +1,25 @@
+use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.static_position_independent_executables = true;
+ base.supported_sanitizers = SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::LEAK
+ | SanitizerSet::MEMORY
+ | SanitizerSet::THREAD;
+
+ Target {
+ llvm_target: "x86_64-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
new file mode 100644
index 000000000..140882747
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_gnux32.rs
@@ -0,0 +1,25 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::linux_gnu_base::opts();
+ base.cpu = "x86-64".into();
+ base.abi = "x32".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-mx32"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.has_thread_local = false;
+ // BUG(GabrielMajeri): disabling the PLT on x86_64 Linux with x32 ABI
+ // breaks code gen. See LLVM bug 36743
+ base.needs_plt = true;
+
+ Target {
+ llvm_target: "x86_64-unknown-linux-gnux32".into(),
+ pointer_width: 32,
+ data_layout: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-\
+ i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs b/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
new file mode 100644
index 000000000..87e7784d1
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_linux_musl.rs
@@ -0,0 +1,25 @@
+use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::linux_musl_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.static_position_independent_executables = true;
+ base.supported_sanitizers = SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::LEAK
+ | SanitizerSet::MEMORY
+ | SanitizerSet::THREAD;
+
+ Target {
+ llvm_target: "x86_64-unknown-linux-musl".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
new file mode 100644
index 000000000..d3a67619a
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_netbsd.rs
@@ -0,0 +1,24 @@
+use crate::spec::{LinkerFlavor, SanitizerSet, StackProbeType, Target, TargetOptions};
+
+pub fn target() -> Target {
+ let mut base = super::netbsd_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.supported_sanitizers = SanitizerSet::ADDRESS
+ | SanitizerSet::CFI
+ | SanitizerSet::LEAK
+ | SanitizerSet::MEMORY
+ | SanitizerSet::THREAD;
+
+ Target {
+ llvm_target: "x86_64-unknown-netbsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: TargetOptions { mcount: "__mcount".into(), ..base },
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_none.rs b/compiler/rustc_target/src/spec/x86_64_unknown_none.rs
new file mode 100644
index 000000000..809fd642d
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_none.rs
@@ -0,0 +1,40 @@
+// Generic x86-64 target for bare-metal code - Floating point disabled
+//
+// Can be used in conjunction with the `target-feature` and
+// `target-cpu` compiler flags to opt-in more hardware-specific
+// features.
+
+use super::{
+ CodeModel, LinkerFlavor, LldFlavor, PanicStrategy, RelocModel, RelroLevel, StackProbeType,
+ Target, TargetOptions,
+};
+
+pub fn target() -> Target {
+ let opts = TargetOptions {
+ cpu: "x86-64".into(),
+ max_atomic_width: Some(64),
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ stack_probes: StackProbeType::Call,
+ position_independent_executables: true,
+ static_position_independent_executables: true,
+ relro_level: RelroLevel::Full,
+ relocation_model: RelocModel::Pic,
+ linker_flavor: LinkerFlavor::Lld(LldFlavor::Ld),
+ linker: Some("rust-lld".into()),
+ features:
+ "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float"
+ .into(),
+ disable_redzone: true,
+ panic_strategy: PanicStrategy::Abort,
+ code_model: Some(CodeModel::Kernel),
+ ..Default::default()
+ };
+ Target {
+ llvm_target: "x86_64-unknown-none-elf".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: opts,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs b/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
new file mode 100644
index 000000000..593345a5f
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_none_linuxkernel.rs
@@ -0,0 +1,28 @@
+// This defines the amd64 target for the Linux Kernel. See the linux-kernel-base module for
+// generic Linux kernel options.
+
+use crate::spec::{CodeModel, LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::linux_kernel_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.features =
+ "-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2,-3dnow,-3dnowa,-avx,-avx2,+soft-float".into();
+ base.code_model = Some(CodeModel::Kernel);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+
+ Target {
+ // FIXME: Some dispute, the linux-on-clang folks think this should use
+ // "Linux". We disagree because running *on* Linux is nothing like
+ // running *as" linux, and historically the "os" component as has always
+ // been used to mean the "on" part.
+ llvm_target: "x86_64-unknown-none-elf".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
new file mode 100644
index 000000000..f50c6bcee
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_openbsd.rs
@@ -0,0 +1,19 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::openbsd_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "x86_64-unknown-openbsd".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs b/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
new file mode 100644
index 000000000..668ae9054
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_redox.rs
@@ -0,0 +1,19 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::redox_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+
+ Target {
+ llvm_target: "x86_64-unknown-redox".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs b/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs
new file mode 100644
index 000000000..a7ae17839
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_unknown_uefi.rs
@@ -0,0 +1,36 @@
+// This defines the amd64 target for UEFI systems as described in the UEFI specification. See the
+// uefi-base module for generic UEFI options. On x86_64 systems (mostly called "x64" in the spec)
+// UEFI systems always run in long-mode, have the interrupt-controller pre-configured and force a
+// single-CPU execution.
+// The win64 ABI is used. It differs from the sysv64 ABI, so we must use a windows target with
+// LLVM. "x86_64-unknown-windows" is used to get the minimal subset of windows-specific features.
+
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::uefi_msvc_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+
+ // We disable MMX and SSE for now, even though UEFI allows using them. Problem is, you have to
+ // enable these CPU features explicitly before their first use, otherwise their instructions
+ // will trigger an exception. Rust does not inject any code that enables AVX/MMX/SSE
+ // instruction sets, so this must be done by the firmware. However, existing firmware is known
+ // to leave these uninitialized, thus triggering exceptions if we make use of them. Which is
+ // why we avoid them and instead use soft-floats. This is also what GRUB and friends did so
+ // far.
+ //
+ // If you initialize FP units yourself, you can override these flags with custom linker
+ // arguments, thus giving you access to full MMX/SSE acceleration.
+ base.features = "-mmx,-sse,+soft-float".into();
+
+ Target {
+ llvm_target: "x86_64-unknown-windows".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs b/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs
new file mode 100644
index 000000000..76d2013cf
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_uwp_windows_gnu.rs
@@ -0,0 +1,19 @@
+use crate::spec::{LinkerFlavor, Target};
+
+pub fn target() -> Target {
+ let mut base = super::windows_uwp_gnu_base::opts();
+ base.cpu = "x86-64".into();
+ // Use high-entropy 64 bit address space for ASLR
+ base.add_pre_link_args(LinkerFlavor::Ld, &["-m", "i386pep", "--high-entropy-va"]);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64", "-Wl,--high-entropy-va"]);
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "x86_64-pc-windows-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_uwp_windows_msvc.rs b/compiler/rustc_target/src/spec/x86_64_uwp_windows_msvc.rs
new file mode 100644
index 000000000..b2769350b
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_uwp_windows_msvc.rs
@@ -0,0 +1,16 @@
+use crate::spec::Target;
+
+pub fn target() -> Target {
+ let mut base = super::windows_uwp_msvc_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+
+ Target {
+ llvm_target: "x86_64-pc-windows-msvc".into(),
+ pointer_width: 64,
+ data_layout: "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
new file mode 100644
index 000000000..129897495
--- /dev/null
+++ b/compiler/rustc_target/src/spec/x86_64_wrs_vxworks.rs
@@ -0,0 +1,20 @@
+use crate::spec::{LinkerFlavor, StackProbeType, Target};
+
+pub fn target() -> Target {
+ let mut base = super::vxworks_base::opts();
+ base.cpu = "x86-64".into();
+ base.max_atomic_width = Some(64);
+ base.add_pre_link_args(LinkerFlavor::Gcc, &["-m64"]);
+ // don't use probe-stack=inline-asm until rust#83139 and rust#84667 are resolved
+ base.stack_probes = StackProbeType::Call;
+ base.disable_redzone = true;
+
+ Target {
+ llvm_target: "x86_64-unknown-linux-gnu".into(),
+ pointer_width: 64,
+ data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+ .into(),
+ arch: "x86_64".into(),
+ options: base,
+ }
+}
diff --git a/compiler/rustc_target/src/tests.rs b/compiler/rustc_target/src/tests.rs
new file mode 100644
index 000000000..76375170d
--- /dev/null
+++ b/compiler/rustc_target/src/tests.rs
@@ -0,0 +1,57 @@
+use crate::spec::Target;
+
+#[test]
+fn report_unused_fields() {
+ let json = serde_json::from_str(
+ r#"
+ {
+ "arch": "powerpc64",
+ "data-layout": "e-m:e-i64:64-n32:64",
+ "llvm-target": "powerpc64le-elf",
+ "target-pointer-width": "64",
+ "code-mode": "foo"
+ }
+ "#,
+ )
+ .unwrap();
+ let warnings = Target::from_json(json).unwrap().1;
+ assert_eq!(warnings.warning_messages().len(), 1);
+ assert!(warnings.warning_messages().join("\n").contains("code-mode"));
+}
+
+#[test]
+fn report_incorrect_json_type() {
+ let json = serde_json::from_str(
+ r#"
+ {
+ "arch": "powerpc64",
+ "data-layout": "e-m:e-i64:64-n32:64",
+ "llvm-target": "powerpc64le-elf",
+ "target-pointer-width": "64",
+ "link-env-remove": "foo"
+ }
+ "#,
+ )
+ .unwrap();
+ let warnings = Target::from_json(json).unwrap().1;
+ assert_eq!(warnings.warning_messages().len(), 1);
+ assert!(warnings.warning_messages().join("\n").contains("link-env-remove"));
+}
+
+#[test]
+fn no_warnings_for_valid_target() {
+ let json = serde_json::from_str(
+ r#"
+ {
+ "arch": "powerpc64",
+ "data-layout": "e-m:e-i64:64-n32:64",
+ "llvm-target": "powerpc64le-elf",
+ "target-pointer-width": "64",
+ "link-env-remove": ["foo"]
+ }
+ "#,
+ )
+ .unwrap();
+ let warnings = Target::from_json(json).unwrap().1;
+ assert_eq!(warnings.warning_messages().len(), 0);
+}
diff --git a/compiler/rustc_trait_selection/Cargo.toml b/compiler/rustc_trait_selection/Cargo.toml
new file mode 100644
index 000000000..566f236f2
--- /dev/null
+++ b/compiler/rustc_trait_selection/Cargo.toml
@@ -0,0 +1,27 @@
+[package]
+name = "rustc_trait_selection"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_parse_format = { path = "../rustc_parse_format" }
+tracing = "0.1"
+rustc_attr = { path = "../rustc_attr" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_lint_defs = { path = "../rustc_lint_defs" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_session = { path = "../rustc_session" }
+rustc_span = { path = "../rustc_span" }
+rustc_target = { path = "../rustc_target" }
+rustc_transmute = { path = "../rustc_transmute", features = ["rustc"] }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_trait_selection/src/autoderef.rs b/compiler/rustc_trait_selection/src/autoderef.rs
new file mode 100644
index 000000000..8b7e8984a
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/autoderef.rs
@@ -0,0 +1,240 @@
+use crate::traits::query::evaluate_obligation::InferCtxtExt;
+use crate::traits::{self, TraitEngine};
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::ty::{self, TraitRef, Ty, TyCtxt};
+use rustc_middle::ty::{ToPredicate, TypeVisitable};
+use rustc_session::Limit;
+use rustc_span::def_id::LOCAL_CRATE;
+use rustc_span::Span;
+
+#[derive(Copy, Clone, Debug)]
+pub enum AutoderefKind {
+ Builtin,
+ Overloaded,
+}
+
+struct AutoderefSnapshot<'tcx> {
+ at_start: bool,
+ reached_recursion_limit: bool,
+ steps: Vec<(Ty<'tcx>, AutoderefKind)>,
+ cur_ty: Ty<'tcx>,
+ obligations: Vec<traits::PredicateObligation<'tcx>>,
+}
+
+pub struct Autoderef<'a, 'tcx> {
+ // Meta infos:
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ span: Span,
+ overloaded_span: Span,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+
+ // Current state:
+ state: AutoderefSnapshot<'tcx>,
+
+ // Configurations:
+ include_raw_pointers: bool,
+ silence_errors: bool,
+}
+
+impl<'a, 'tcx> Iterator for Autoderef<'a, 'tcx> {
+ type Item = (Ty<'tcx>, usize);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ let tcx = self.infcx.tcx;
+
+ debug!("autoderef: steps={:?}, cur_ty={:?}", self.state.steps, self.state.cur_ty);
+ if self.state.at_start {
+ self.state.at_start = false;
+ debug!("autoderef stage #0 is {:?}", self.state.cur_ty);
+ return Some((self.state.cur_ty, 0));
+ }
+
+ // If we have reached the recursion limit, error gracefully.
+ if !tcx.recursion_limit().value_within_limit(self.state.steps.len()) {
+ if !self.silence_errors {
+ report_autoderef_recursion_limit_error(tcx, self.span, self.state.cur_ty);
+ }
+ self.state.reached_recursion_limit = true;
+ return None;
+ }
+
+ if self.state.cur_ty.is_ty_var() {
+ return None;
+ }
+
+ // Otherwise, deref if type is derefable:
+ let (kind, new_ty) =
+ if let Some(mt) = self.state.cur_ty.builtin_deref(self.include_raw_pointers) {
+ (AutoderefKind::Builtin, mt.ty)
+ } else if let Some(ty) = self.overloaded_deref_ty(self.state.cur_ty) {
+ (AutoderefKind::Overloaded, ty)
+ } else {
+ return None;
+ };
+
+ if new_ty.references_error() {
+ return None;
+ }
+
+ self.state.steps.push((self.state.cur_ty, kind));
+ debug!(
+ "autoderef stage #{:?} is {:?} from {:?}",
+ self.step_count(),
+ new_ty,
+ (self.state.cur_ty, kind)
+ );
+ self.state.cur_ty = new_ty;
+
+ Some((self.state.cur_ty, self.step_count()))
+ }
+}
+
+impl<'a, 'tcx> Autoderef<'a, 'tcx> {
+ pub fn new(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ overloaded_span: Span,
+ ) -> Autoderef<'a, 'tcx> {
+ Autoderef {
+ infcx,
+ span,
+ overloaded_span,
+ body_id,
+ param_env,
+ state: AutoderefSnapshot {
+ steps: vec![],
+ cur_ty: infcx.resolve_vars_if_possible(base_ty),
+ obligations: vec![],
+ at_start: true,
+ reached_recursion_limit: false,
+ },
+ include_raw_pointers: false,
+ silence_errors: false,
+ }
+ }
+
+ fn overloaded_deref_ty(&mut self, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
+ debug!("overloaded_deref_ty({:?})", ty);
+
+ let tcx = self.infcx.tcx;
+
+ // <ty as Deref>
+ let trait_ref = TraitRef {
+ def_id: tcx.lang_items().deref_trait()?,
+ substs: tcx.mk_substs_trait(ty, &[]),
+ };
+
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+
+ let obligation = traits::Obligation::new(
+ cause.clone(),
+ self.param_env,
+ ty::Binder::dummy(trait_ref).without_const().to_predicate(tcx),
+ );
+ if !self.infcx.predicate_may_hold(&obligation) {
+ debug!("overloaded_deref_ty: cannot match obligation");
+ return None;
+ }
+
+ let mut fulfillcx = traits::FulfillmentContext::new_in_snapshot();
+ let normalized_ty = fulfillcx.normalize_projection_type(
+ &self.infcx,
+ self.param_env,
+ ty::ProjectionTy {
+ item_def_id: tcx.lang_items().deref_target()?,
+ substs: trait_ref.substs,
+ },
+ cause,
+ );
+ let errors = fulfillcx.select_where_possible(&self.infcx);
+ if !errors.is_empty() {
+ // This shouldn't happen, except for evaluate/fulfill mismatches,
+ // but that's not a reason for an ICE (`predicate_may_hold` is conservative
+ // by design).
+ debug!("overloaded_deref_ty: encountered errors {:?} while fulfilling", errors);
+ return None;
+ }
+ let obligations = fulfillcx.pending_obligations();
+ debug!("overloaded_deref_ty({:?}) = ({:?}, {:?})", ty, normalized_ty, obligations);
+ self.state.obligations.extend(obligations);
+
+ Some(self.infcx.resolve_vars_if_possible(normalized_ty))
+ }
+
+ /// Returns the final type we ended up with, which may be an inference
+ /// variable (we will resolve it first, if we want).
+ pub fn final_ty(&self, resolve: bool) -> Ty<'tcx> {
+ if resolve {
+ self.infcx.resolve_vars_if_possible(self.state.cur_ty)
+ } else {
+ self.state.cur_ty
+ }
+ }
+
+ pub fn step_count(&self) -> usize {
+ self.state.steps.len()
+ }
+
+ pub fn into_obligations(self) -> Vec<traits::PredicateObligation<'tcx>> {
+ self.state.obligations
+ }
+
+ pub fn steps(&self) -> &[(Ty<'tcx>, AutoderefKind)] {
+ &self.state.steps
+ }
+
+ pub fn span(&self) -> Span {
+ self.span
+ }
+
+ pub fn overloaded_span(&self) -> Span {
+ self.overloaded_span
+ }
+
+ pub fn reached_recursion_limit(&self) -> bool {
+ self.state.reached_recursion_limit
+ }
+
+ /// also dereference through raw pointer types
+ /// e.g., assuming ptr_to_Foo is the type `*const Foo`
+ /// fcx.autoderef(span, ptr_to_Foo) => [*const Foo]
+ /// fcx.autoderef(span, ptr_to_Foo).include_raw_ptrs() => [*const Foo, Foo]
+ pub fn include_raw_pointers(mut self) -> Self {
+ self.include_raw_pointers = true;
+ self
+ }
+
+ pub fn silence_errors(mut self) -> Self {
+ self.silence_errors = true;
+ self
+ }
+}
+
+pub fn report_autoderef_recursion_limit_error<'tcx>(tcx: TyCtxt<'tcx>, span: Span, ty: Ty<'tcx>) {
+ // We've reached the recursion limit, error gracefully.
+ let suggested_limit = match tcx.recursion_limit() {
+ Limit(0) => Limit(2),
+ limit => limit * 2,
+ };
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0055,
+ "reached the recursion limit while auto-dereferencing `{:?}`",
+ ty
+ )
+ .span_label(span, "deref recursion limit reached")
+ .help(&format!(
+ "consider increasing the recursion limit by adding a \
+ `#![recursion_limit = \"{}\"]` attribute to your crate (`{}`)",
+ suggested_limit,
+ tcx.crate_name(LOCAL_CRATE),
+ ))
+ .emit();
+}
diff --git a/compiler/rustc_trait_selection/src/infer.rs b/compiler/rustc_trait_selection/src/infer.rs
new file mode 100644
index 000000000..9d30374f8
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/infer.rs
@@ -0,0 +1,177 @@
+use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
+use crate::traits::{self, TraitEngine, TraitEngineExt};
+
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_infer::traits::ObligationCause;
+use rustc_middle::arena::ArenaAllocatable;
+use rustc_middle::infer::canonical::{Canonical, CanonicalizedQueryResponse, QueryResponse};
+use rustc_middle::traits::query::Fallible;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::ToPredicate;
+use rustc_middle::ty::{self, Ty, TypeFoldable, TypeVisitable};
+use rustc_span::{Span, DUMMY_SP};
+
+use std::fmt::Debug;
+
+pub use rustc_infer::infer::*;
+
+pub trait InferCtxtExt<'tcx> {
+ fn type_is_copy_modulo_regions(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ span: Span,
+ ) -> bool;
+
+ fn partially_normalize_associated_types_in<T>(
+ &self,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> InferOk<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>;
+
+ /// Check whether a `ty` implements given trait(trait_def_id).
+ /// The inputs are:
+ ///
+ /// - the def-id of the trait
+ /// - the self type
+ /// - the *other* type parameters of the trait, excluding the self-type
+ /// - the parameter environment
+ ///
+ /// Invokes `evaluate_obligation`, so in the event that evaluating
+ /// `Ty: Trait` causes overflow, EvaluatedToRecur (or EvaluatedToUnknown)
+ /// will be returned.
+ fn type_implements_trait(
+ &self,
+ trait_def_id: DefId,
+ ty: Ty<'tcx>,
+ params: SubstsRef<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> traits::EvaluationResult;
+}
+impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
+ fn type_is_copy_modulo_regions(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ span: Span,
+ ) -> bool {
+ let ty = self.resolve_vars_if_possible(ty);
+
+ if !(param_env, ty).needs_infer() {
+ return ty.is_copy_modulo_regions(self.tcx.at(span), param_env);
+ }
+
+ let copy_def_id = self.tcx.require_lang_item(LangItem::Copy, None);
+
+ // This can get called from typeck (by euv), and `moves_by_default`
+ // rightly refuses to work with inference variables, but
+ // moves_by_default has a cache, which we want to use in other
+ // cases.
+ traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, copy_def_id, span)
+ }
+
+ /// Normalizes associated types in `value`, potentially returning
+ /// new obligations that must further be processed.
+ fn partially_normalize_associated_types_in<T>(
+ &self,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> InferOk<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!("partially_normalize_associated_types_in(value={:?})", value);
+ let mut selcx = traits::SelectionContext::new(self);
+ let traits::Normalized { value, obligations } =
+ traits::normalize(&mut selcx, param_env, cause, value);
+ debug!(
+ "partially_normalize_associated_types_in: result={:?} predicates={:?}",
+ value, obligations
+ );
+ InferOk { value, obligations }
+ }
+
+ fn type_implements_trait(
+ &self,
+ trait_def_id: DefId,
+ ty: Ty<'tcx>,
+ params: SubstsRef<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> traits::EvaluationResult {
+ debug!(
+ "type_implements_trait: trait_def_id={:?}, type={:?}, params={:?}, param_env={:?}",
+ trait_def_id, ty, params, param_env
+ );
+
+ let trait_ref =
+ ty::TraitRef { def_id: trait_def_id, substs: self.tcx.mk_substs_trait(ty, params) };
+
+ let obligation = traits::Obligation {
+ cause: traits::ObligationCause::dummy(),
+ param_env,
+ recursion_depth: 0,
+ predicate: ty::Binder::dummy(trait_ref).without_const().to_predicate(self.tcx),
+ };
+ self.evaluate_obligation(&obligation).unwrap_or(traits::EvaluationResult::EvaluatedToErr)
+ }
+}
+
+pub trait InferCtxtBuilderExt<'tcx> {
+ fn enter_canonical_trait_query<K, R>(
+ &mut self,
+ canonical_key: &Canonical<'tcx, K>,
+ operation: impl FnOnce(&InferCtxt<'_, 'tcx>, &mut dyn TraitEngine<'tcx>, K) -> Fallible<R>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, R>>
+ where
+ K: TypeFoldable<'tcx>,
+ R: Debug + TypeFoldable<'tcx>,
+ Canonical<'tcx, QueryResponse<'tcx, R>>: ArenaAllocatable<'tcx>;
+}
+
+impl<'tcx> InferCtxtBuilderExt<'tcx> for InferCtxtBuilder<'tcx> {
+ /// The "main method" for a canonicalized trait query. Given the
+ /// canonical key `canonical_key`, this method will create a new
+ /// inference context, instantiate the key, and run your operation
+ /// `op`. The operation should yield up a result (of type `R`) as
+ /// well as a set of trait obligations that must be fully
+ /// satisfied. These obligations will be processed and the
+ /// canonical result created.
+ ///
+ /// Returns `NoSolution` in the event of any error.
+ ///
+ /// (It might be mildly nicer to implement this on `TyCtxt`, and
+ /// not `InferCtxtBuilder`, but that is a bit tricky right now.
+ /// In part because we would need a `for<'tcx>` sort of
+ /// bound for the closure and in part because it is convenient to
+ /// have `'tcx` be free on this function so that we can talk about
+ /// `K: TypeFoldable<'tcx>`.)
+ fn enter_canonical_trait_query<K, R>(
+ &mut self,
+ canonical_key: &Canonical<'tcx, K>,
+ operation: impl FnOnce(&InferCtxt<'_, 'tcx>, &mut dyn TraitEngine<'tcx>, K) -> Fallible<R>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, R>>
+ where
+ K: TypeFoldable<'tcx>,
+ R: Debug + TypeFoldable<'tcx>,
+ Canonical<'tcx, QueryResponse<'tcx, R>>: ArenaAllocatable<'tcx>,
+ {
+ self.enter_with_canonical(
+ DUMMY_SP,
+ canonical_key,
+ |ref infcx, key, canonical_inference_vars| {
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+ let value = operation(infcx, &mut *fulfill_cx, key)?;
+ infcx.make_canonicalized_query_response(
+ canonical_inference_vars,
+ value,
+ &mut *fulfill_cx,
+ )
+ },
+ )
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/lib.rs b/compiler/rustc_trait_selection/src/lib.rs
new file mode 100644
index 000000000..282ee632c
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/lib.rs
@@ -0,0 +1,40 @@
+//! This crate defines the trait resolution method.
+//!
+//! - **Traits.** Trait resolution is implemented in the `traits` module.
+//!
+//! For more information about how rustc works, see the [rustc-dev-guide].
+//!
+//! [rustc-dev-guide]: https://rustc-dev-guide.rust-lang.org/
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![allow(rustc::potential_query_instability)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(drain_filter)]
+#![feature(hash_drain_filter)]
+#![feature(label_break_value)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(if_let_guard)]
+#![feature(never_type)]
+#![recursion_limit = "512"] // For rustdoc
+
+#[macro_use]
+extern crate rustc_macros;
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+#[macro_use]
+extern crate rustc_data_structures;
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+#[macro_use]
+extern crate smallvec;
+
+pub mod autoderef;
+pub mod infer;
+pub mod traits;
diff --git a/compiler/rustc_trait_selection/src/traits/auto_trait.rs b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
new file mode 100644
index 000000000..294c81d0b
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/auto_trait.rs
@@ -0,0 +1,903 @@
+//! Support code for rustdoc and external tools.
+//! You really don't want to be using this unless you need to.
+
+use super::*;
+
+use crate::infer::region_constraints::{Constraint, RegionConstraintData};
+use crate::infer::InferCtxt;
+use crate::traits::project::ProjectAndUnifyResult;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{Region, RegionVid, Term};
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+
+use std::collections::hash_map::Entry;
+use std::collections::VecDeque;
+use std::iter;
+
+// FIXME(twk): this is obviously not nice to duplicate like that
+#[derive(Eq, PartialEq, Hash, Copy, Clone, Debug)]
+pub enum RegionTarget<'tcx> {
+ Region(Region<'tcx>),
+ RegionVid(RegionVid),
+}
+
+#[derive(Default, Debug, Clone)]
+pub struct RegionDeps<'tcx> {
+ larger: FxHashSet<RegionTarget<'tcx>>,
+ smaller: FxHashSet<RegionTarget<'tcx>>,
+}
+
+pub enum AutoTraitResult<A> {
+ ExplicitImpl,
+ PositiveImpl(A),
+ NegativeImpl,
+}
+
+#[allow(dead_code)]
+impl<A> AutoTraitResult<A> {
+ fn is_auto(&self) -> bool {
+ matches!(self, AutoTraitResult::PositiveImpl(_) | AutoTraitResult::NegativeImpl)
+ }
+}
+
+pub struct AutoTraitInfo<'cx> {
+ pub full_user_env: ty::ParamEnv<'cx>,
+ pub region_data: RegionConstraintData<'cx>,
+ pub vid_to_region: FxHashMap<ty::RegionVid, ty::Region<'cx>>,
+}
+
+pub struct AutoTraitFinder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> AutoTraitFinder<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>) -> Self {
+ AutoTraitFinder { tcx }
+ }
+
+ /// Makes a best effort to determine whether and under which conditions an auto trait is
+ /// implemented for a type. For example, if you have
+ ///
+ /// ```
+ /// struct Foo<T> { data: Box<T> }
+ /// ```
+ ///
+ /// then this might return that Foo<T>: Send if T: Send (encoded in the AutoTraitResult type).
+ /// The analysis attempts to account for custom impls as well as other complex cases. This
+ /// result is intended for use by rustdoc and other such consumers.
+ ///
+ /// (Note that due to the coinductive nature of Send, the full and correct result is actually
+ /// quite simple to generate. That is, when a type has no custom impl, it is Send iff its field
+ /// types are all Send. So, in our example, we might have that Foo<T>: Send if Box<T>: Send.
+ /// But this is often not the best way to present to the user.)
+ ///
+ /// Warning: The API should be considered highly unstable, and it may be refactored or removed
+ /// in the future.
+ pub fn find_auto_trait_generics<A>(
+ &self,
+ ty: Ty<'tcx>,
+ orig_env: ty::ParamEnv<'tcx>,
+ trait_did: DefId,
+ mut auto_trait_callback: impl FnMut(AutoTraitInfo<'tcx>) -> A,
+ ) -> AutoTraitResult<A> {
+ let tcx = self.tcx;
+
+ let trait_ref = ty::TraitRef { def_id: trait_did, substs: tcx.mk_substs_trait(ty, &[]) };
+
+ let trait_pred = ty::Binder::dummy(trait_ref);
+
+ let bail_out = tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ let result = selcx.select(&Obligation::new(
+ ObligationCause::dummy(),
+ orig_env,
+ trait_pred.to_poly_trait_predicate(),
+ ));
+
+ match result {
+ Ok(Some(ImplSource::UserDefined(_))) => {
+ debug!(
+ "find_auto_trait_generics({:?}): \
+ manual impl found, bailing out",
+ trait_ref
+ );
+ return true;
+ }
+ _ => {}
+ }
+
+ let result = selcx.select(&Obligation::new(
+ ObligationCause::dummy(),
+ orig_env,
+ trait_pred.to_poly_trait_predicate_negative_polarity(),
+ ));
+
+ match result {
+ Ok(Some(ImplSource::UserDefined(_))) => {
+ debug!(
+ "find_auto_trait_generics({:?}): \
+ manual impl found, bailing out",
+ trait_ref
+ );
+ true
+ }
+ _ => false,
+ }
+ });
+
+ // If an explicit impl exists, it always takes priority over an auto impl
+ if bail_out {
+ return AutoTraitResult::ExplicitImpl;
+ }
+
+ tcx.infer_ctxt().enter(|infcx| {
+ let mut fresh_preds = FxHashSet::default();
+
+ // Due to the way projections are handled by SelectionContext, we need to run
+ // evaluate_predicates twice: once on the original param env, and once on the result of
+ // the first evaluate_predicates call.
+ //
+ // The problem is this: most of rustc, including SelectionContext and traits::project,
+ // are designed to work with a concrete usage of a type (e.g., Vec<u8>
+ // fn<T>() { Vec<T> }. This information will generally never change - given
+ // the 'T' in fn<T>() { ... }, we'll never know anything else about 'T'.
+ // If we're unable to prove that 'T' implements a particular trait, we're done -
+ // there's nothing left to do but error out.
+ //
+ // However, synthesizing an auto trait impl works differently. Here, we start out with
+ // a set of initial conditions - the ParamEnv of the struct/enum/union we're dealing
+ // with - and progressively discover the conditions we need to fulfill for it to
+ // implement a certain auto trait. This ends up breaking two assumptions made by trait
+ // selection and projection:
+ //
+ // * We can always cache the result of a particular trait selection for the lifetime of
+ // an InfCtxt
+ // * Given a projection bound such as '<T as SomeTrait>::SomeItem = K', if 'T:
+ // SomeTrait' doesn't hold, then we don't need to care about the 'SomeItem = K'
+ //
+ // We fix the first assumption by manually clearing out all of the InferCtxt's caches
+ // in between calls to SelectionContext.select. This allows us to keep all of the
+ // intermediate types we create bound to the 'tcx lifetime, rather than needing to lift
+ // them between calls.
+ //
+ // We fix the second assumption by reprocessing the result of our first call to
+ // evaluate_predicates. Using the example of '<T as SomeTrait>::SomeItem = K', our first
+ // pass will pick up 'T: SomeTrait', but not 'SomeItem = K'. On our second pass,
+ // traits::project will see that 'T: SomeTrait' is in our ParamEnv, allowing
+ // SelectionContext to return it back to us.
+
+ let Some((new_env, user_env)) = self.evaluate_predicates(
+ &infcx,
+ trait_did,
+ ty,
+ orig_env,
+ orig_env,
+ &mut fresh_preds,
+ false,
+ ) else {
+ return AutoTraitResult::NegativeImpl;
+ };
+
+ let (full_env, full_user_env) = self
+ .evaluate_predicates(
+ &infcx,
+ trait_did,
+ ty,
+ new_env,
+ user_env,
+ &mut fresh_preds,
+ true,
+ )
+ .unwrap_or_else(|| {
+ panic!("Failed to fully process: {:?} {:?} {:?}", ty, trait_did, orig_env)
+ });
+
+ debug!(
+ "find_auto_trait_generics({:?}): fulfilling \
+ with {:?}",
+ trait_ref, full_env
+ );
+ infcx.clear_caches();
+
+ // At this point, we already have all of the bounds we need. FulfillmentContext is used
+ // to store all of the necessary region/lifetime bounds in the InferContext, as well as
+ // an additional sanity check.
+ let mut fulfill = <dyn TraitEngine<'tcx>>::new(tcx);
+ fulfill.register_bound(&infcx, full_env, ty, trait_did, ObligationCause::dummy());
+ let errors = fulfill.select_all_or_error(&infcx);
+
+ if !errors.is_empty() {
+ panic!("Unable to fulfill trait {:?} for '{:?}': {:?}", trait_did, ty, errors);
+ }
+
+ infcx.process_registered_region_obligations(&Default::default(), full_env);
+
+ let region_data = infcx
+ .inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .region_constraint_data()
+ .clone();
+
+ let vid_to_region = self.map_vid_to_region(&region_data);
+
+ let info = AutoTraitInfo { full_user_env, region_data, vid_to_region };
+
+ AutoTraitResult::PositiveImpl(auto_trait_callback(info))
+ })
+ }
+}
+
+impl<'tcx> AutoTraitFinder<'tcx> {
+ /// The core logic responsible for computing the bounds for our synthesized impl.
+ ///
+ /// To calculate the bounds, we call `SelectionContext.select` in a loop. Like
+ /// `FulfillmentContext`, we recursively select the nested obligations of predicates we
+ /// encounter. However, whenever we encounter an `UnimplementedError` involving a type
+ /// parameter, we add it to our `ParamEnv`. Since our goal is to determine when a particular
+ /// type implements an auto trait, Unimplemented errors tell us what conditions need to be met.
+ ///
+ /// This method ends up working somewhat similarly to `FulfillmentContext`, but with a few key
+ /// differences. `FulfillmentContext` works under the assumption that it's dealing with concrete
+ /// user code. According, it considers all possible ways that a `Predicate` could be met, which
+ /// isn't always what we want for a synthesized impl. For example, given the predicate `T:
+ /// Iterator`, `FulfillmentContext` can end up reporting an Unimplemented error for `T:
+ /// IntoIterator` -- since there's an implementation of `Iterator` where `T: IntoIterator`,
+ /// `FulfillmentContext` will drive `SelectionContext` to consider that impl before giving up.
+ /// If we were to rely on `FulfillmentContext`s decision, we might end up synthesizing an impl
+ /// like this:
+ /// ```ignore (illustrative)
+ /// impl<T> Send for Foo<T> where T: IntoIterator
+ /// ```
+ /// While it might be technically true that Foo implements Send where `T: IntoIterator`,
+ /// the bound is overly restrictive - it's really only necessary that `T: Iterator`.
+ ///
+ /// For this reason, `evaluate_predicates` handles predicates with type variables specially.
+ /// When we encounter an `Unimplemented` error for a bound such as `T: Iterator`, we immediately
+ /// add it to our `ParamEnv`, and add it to our stack for recursive evaluation. When we later
+ /// select it, we'll pick up any nested bounds, without ever inferring that `T: IntoIterator`
+ /// needs to hold.
+ ///
+ /// One additional consideration is supertrait bounds. Normally, a `ParamEnv` is only ever
+ /// constructed once for a given type. As part of the construction process, the `ParamEnv` will
+ /// have any supertrait bounds normalized -- e.g., if we have a type `struct Foo<T: Copy>`, the
+ /// `ParamEnv` will contain `T: Copy` and `T: Clone`, since `Copy: Clone`. When we construct our
+ /// own `ParamEnv`, we need to do this ourselves, through `traits::elaborate_predicates`, or
+ /// else `SelectionContext` will choke on the missing predicates. However, this should never
+ /// show up in the final synthesized generics: we don't want our generated docs page to contain
+ /// something like `T: Copy + Clone`, as that's redundant. Therefore, we keep track of a
+ /// separate `user_env`, which only holds the predicates that will actually be displayed to the
+ /// user.
+ fn evaluate_predicates(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ trait_did: DefId,
+ ty: Ty<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ user_env: ty::ParamEnv<'tcx>,
+ fresh_preds: &mut FxHashSet<ty::Predicate<'tcx>>,
+ only_projections: bool,
+ ) -> Option<(ty::ParamEnv<'tcx>, ty::ParamEnv<'tcx>)> {
+ let tcx = infcx.tcx;
+
+ // Don't try to process any nested obligations involving predicates
+ // that are already in the `ParamEnv` (modulo regions): we already
+ // know that they must hold.
+ for predicate in param_env.caller_bounds() {
+ fresh_preds.insert(self.clean_pred(infcx, predicate));
+ }
+
+ let mut select = SelectionContext::new(&infcx);
+
+ let mut already_visited = FxHashSet::default();
+ let mut predicates = VecDeque::new();
+ predicates.push_back(ty::Binder::dummy(ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ def_id: trait_did,
+ substs: infcx.tcx.mk_substs_trait(ty, &[]),
+ },
+ constness: ty::BoundConstness::NotConst,
+ // Auto traits are positive
+ polarity: ty::ImplPolarity::Positive,
+ }));
+
+ let computed_preds = param_env.caller_bounds().iter();
+ let mut user_computed_preds: FxHashSet<_> = user_env.caller_bounds().iter().collect();
+
+ let mut new_env = param_env;
+ let dummy_cause = ObligationCause::dummy();
+
+ while let Some(pred) = predicates.pop_front() {
+ infcx.clear_caches();
+
+ if !already_visited.insert(pred) {
+ continue;
+ }
+
+ // Call `infcx.resolve_vars_if_possible` to see if we can
+ // get rid of any inference variables.
+ let obligation =
+ infcx.resolve_vars_if_possible(Obligation::new(dummy_cause.clone(), new_env, pred));
+ let result = select.select(&obligation);
+
+ match result {
+ Ok(Some(ref impl_source)) => {
+ // If we see an explicit negative impl (e.g., `impl !Send for MyStruct`),
+ // we immediately bail out, since it's impossible for us to continue.
+
+ if let ImplSource::UserDefined(ImplSourceUserDefinedData {
+ impl_def_id, ..
+ }) = impl_source
+ {
+ // Blame 'tidy' for the weird bracket placement.
+ if infcx.tcx.impl_polarity(*impl_def_id) == ty::ImplPolarity::Negative {
+ debug!(
+ "evaluate_nested_obligations: found explicit negative impl\
+ {:?}, bailing out",
+ impl_def_id
+ );
+ return None;
+ }
+ }
+
+ let obligations = impl_source.clone().nested_obligations().into_iter();
+
+ if !self.evaluate_nested_obligations(
+ ty,
+ obligations,
+ &mut user_computed_preds,
+ fresh_preds,
+ &mut predicates,
+ &mut select,
+ only_projections,
+ ) {
+ return None;
+ }
+ }
+ Ok(None) => {}
+ Err(SelectionError::Unimplemented) => {
+ if self.is_param_no_infer(pred.skip_binder().trait_ref.substs) {
+ already_visited.remove(&pred);
+ self.add_user_pred(&mut user_computed_preds, pred.to_predicate(self.tcx));
+ predicates.push_back(pred);
+ } else {
+ debug!(
+ "evaluate_nested_obligations: `Unimplemented` found, bailing: \
+ {:?} {:?} {:?}",
+ ty,
+ pred,
+ pred.skip_binder().trait_ref.substs
+ );
+ return None;
+ }
+ }
+ _ => panic!("Unexpected error for '{:?}': {:?}", ty, result),
+ };
+
+ let normalized_preds = elaborate_predicates(
+ tcx,
+ computed_preds.clone().chain(user_computed_preds.iter().cloned()),
+ )
+ .map(|o| o.predicate);
+ new_env = ty::ParamEnv::new(
+ tcx.mk_predicates(normalized_preds),
+ param_env.reveal(),
+ param_env.constness(),
+ );
+ }
+
+ let final_user_env = ty::ParamEnv::new(
+ tcx.mk_predicates(user_computed_preds.into_iter()),
+ user_env.reveal(),
+ user_env.constness(),
+ );
+ debug!(
+ "evaluate_nested_obligations(ty={:?}, trait_did={:?}): succeeded with '{:?}' \
+ '{:?}'",
+ ty, trait_did, new_env, final_user_env
+ );
+
+ Some((new_env, final_user_env))
+ }
+
+ /// This method is designed to work around the following issue:
+ /// When we compute auto trait bounds, we repeatedly call `SelectionContext.select`,
+ /// progressively building a `ParamEnv` based on the results we get.
+ /// However, our usage of `SelectionContext` differs from its normal use within the compiler,
+ /// in that we capture and re-reprocess predicates from `Unimplemented` errors.
+ ///
+ /// This can lead to a corner case when dealing with region parameters.
+ /// During our selection loop in `evaluate_predicates`, we might end up with
+ /// two trait predicates that differ only in their region parameters:
+ /// one containing a HRTB lifetime parameter, and one containing a 'normal'
+ /// lifetime parameter. For example:
+ /// ```ignore (illustrative)
+ /// T as MyTrait<'a>
+ /// T as MyTrait<'static>
+ /// ```
+ /// If we put both of these predicates in our computed `ParamEnv`, we'll
+ /// confuse `SelectionContext`, since it will (correctly) view both as being applicable.
+ ///
+ /// To solve this, we pick the 'more strict' lifetime bound -- i.e., the HRTB
+ /// Our end goal is to generate a user-visible description of the conditions
+ /// under which a type implements an auto trait. A trait predicate involving
+ /// a HRTB means that the type needs to work with any choice of lifetime,
+ /// not just one specific lifetime (e.g., `'static`).
+ fn add_user_pred(
+ &self,
+ user_computed_preds: &mut FxHashSet<ty::Predicate<'tcx>>,
+ new_pred: ty::Predicate<'tcx>,
+ ) {
+ let mut should_add_new = true;
+ user_computed_preds.retain(|&old_pred| {
+ if let (ty::PredicateKind::Trait(new_trait), ty::PredicateKind::Trait(old_trait)) =
+ (new_pred.kind().skip_binder(), old_pred.kind().skip_binder())
+ {
+ if new_trait.def_id() == old_trait.def_id() {
+ let new_substs = new_trait.trait_ref.substs;
+ let old_substs = old_trait.trait_ref.substs;
+
+ if !new_substs.types().eq(old_substs.types()) {
+ // We can't compare lifetimes if the types are different,
+ // so skip checking `old_pred`.
+ return true;
+ }
+
+ for (new_region, old_region) in
+ iter::zip(new_substs.regions(), old_substs.regions())
+ {
+ match (*new_region, *old_region) {
+ // If both predicates have an `ReLateBound` (a HRTB) in the
+ // same spot, we do nothing.
+ (ty::ReLateBound(_, _), ty::ReLateBound(_, _)) => {}
+
+ (ty::ReLateBound(_, _), _) | (_, ty::ReVar(_)) => {
+ // One of these is true:
+ // The new predicate has a HRTB in a spot where the old
+ // predicate does not (if they both had a HRTB, the previous
+ // match arm would have executed). A HRBT is a 'stricter'
+ // bound than anything else, so we want to keep the newer
+ // predicate (with the HRBT) in place of the old predicate.
+ //
+ // OR
+ //
+ // The old predicate has a region variable where the new
+ // predicate has some other kind of region. An region
+ // variable isn't something we can actually display to a user,
+ // so we choose their new predicate (which doesn't have a region
+ // variable).
+ //
+ // In both cases, we want to remove the old predicate,
+ // from `user_computed_preds`, and replace it with the new
+ // one. Having both the old and the new
+ // predicate in a `ParamEnv` would confuse `SelectionContext`.
+ //
+ // We're currently in the predicate passed to 'retain',
+ // so we return `false` to remove the old predicate from
+ // `user_computed_preds`.
+ return false;
+ }
+ (_, ty::ReLateBound(_, _)) | (ty::ReVar(_), _) => {
+ // This is the opposite situation as the previous arm.
+ // One of these is true:
+ //
+ // The old predicate has a HRTB lifetime in a place where the
+ // new predicate does not.
+ //
+ // OR
+ //
+ // The new predicate has a region variable where the old
+ // predicate has some other type of region.
+ //
+ // We want to leave the old
+ // predicate in `user_computed_preds`, and skip adding
+ // new_pred to `user_computed_params`.
+ should_add_new = false
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ true
+ });
+
+ if should_add_new {
+ user_computed_preds.insert(new_pred);
+ }
+ }
+
+ /// This is very similar to `handle_lifetimes`. However, instead of matching `ty::Region`s
+ /// to each other, we match `ty::RegionVid`s to `ty::Region`s.
+ fn map_vid_to_region<'cx>(
+ &self,
+ regions: &RegionConstraintData<'cx>,
+ ) -> FxHashMap<ty::RegionVid, ty::Region<'cx>> {
+ let mut vid_map: FxHashMap<RegionTarget<'cx>, RegionDeps<'cx>> = FxHashMap::default();
+ let mut finished_map = FxHashMap::default();
+
+ for constraint in regions.constraints.keys() {
+ match constraint {
+ &Constraint::VarSubVar(r1, r2) => {
+ {
+ let deps1 = vid_map.entry(RegionTarget::RegionVid(r1)).or_default();
+ deps1.larger.insert(RegionTarget::RegionVid(r2));
+ }
+
+ let deps2 = vid_map.entry(RegionTarget::RegionVid(r2)).or_default();
+ deps2.smaller.insert(RegionTarget::RegionVid(r1));
+ }
+ &Constraint::RegSubVar(region, vid) => {
+ {
+ let deps1 = vid_map.entry(RegionTarget::Region(region)).or_default();
+ deps1.larger.insert(RegionTarget::RegionVid(vid));
+ }
+
+ let deps2 = vid_map.entry(RegionTarget::RegionVid(vid)).or_default();
+ deps2.smaller.insert(RegionTarget::Region(region));
+ }
+ &Constraint::VarSubReg(vid, region) => {
+ finished_map.insert(vid, region);
+ }
+ &Constraint::RegSubReg(r1, r2) => {
+ {
+ let deps1 = vid_map.entry(RegionTarget::Region(r1)).or_default();
+ deps1.larger.insert(RegionTarget::Region(r2));
+ }
+
+ let deps2 = vid_map.entry(RegionTarget::Region(r2)).or_default();
+ deps2.smaller.insert(RegionTarget::Region(r1));
+ }
+ }
+ }
+
+ while !vid_map.is_empty() {
+ let target = *vid_map.keys().next().expect("Keys somehow empty");
+ let deps = vid_map.remove(&target).expect("Entry somehow missing");
+
+ for smaller in deps.smaller.iter() {
+ for larger in deps.larger.iter() {
+ match (smaller, larger) {
+ (&RegionTarget::Region(_), &RegionTarget::Region(_)) => {
+ if let Entry::Occupied(v) = vid_map.entry(*smaller) {
+ let smaller_deps = v.into_mut();
+ smaller_deps.larger.insert(*larger);
+ smaller_deps.larger.remove(&target);
+ }
+
+ if let Entry::Occupied(v) = vid_map.entry(*larger) {
+ let larger_deps = v.into_mut();
+ larger_deps.smaller.insert(*smaller);
+ larger_deps.smaller.remove(&target);
+ }
+ }
+ (&RegionTarget::RegionVid(v1), &RegionTarget::Region(r1)) => {
+ finished_map.insert(v1, r1);
+ }
+ (&RegionTarget::Region(_), &RegionTarget::RegionVid(_)) => {
+ // Do nothing; we don't care about regions that are smaller than vids.
+ }
+ (&RegionTarget::RegionVid(_), &RegionTarget::RegionVid(_)) => {
+ if let Entry::Occupied(v) = vid_map.entry(*smaller) {
+ let smaller_deps = v.into_mut();
+ smaller_deps.larger.insert(*larger);
+ smaller_deps.larger.remove(&target);
+ }
+
+ if let Entry::Occupied(v) = vid_map.entry(*larger) {
+ let larger_deps = v.into_mut();
+ larger_deps.smaller.insert(*smaller);
+ larger_deps.smaller.remove(&target);
+ }
+ }
+ }
+ }
+ }
+ }
+ finished_map
+ }
+
+ fn is_param_no_infer(&self, substs: SubstsRef<'_>) -> bool {
+ self.is_of_param(substs.type_at(0)) && !substs.types().any(|t| t.has_infer_types())
+ }
+
+ pub fn is_of_param(&self, ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Param(_) => true,
+ ty::Projection(p) => self.is_of_param(p.self_ty()),
+ _ => false,
+ }
+ }
+
+ fn is_self_referential_projection(&self, p: ty::PolyProjectionPredicate<'_>) -> bool {
+ if let Term::Ty(ty) = p.term().skip_binder() {
+ matches!(ty.kind(), ty::Projection(proj) if proj == &p.skip_binder().projection_ty)
+ } else {
+ false
+ }
+ }
+
+ fn evaluate_nested_obligations(
+ &self,
+ ty: Ty<'_>,
+ nested: impl Iterator<Item = Obligation<'tcx, ty::Predicate<'tcx>>>,
+ computed_preds: &mut FxHashSet<ty::Predicate<'tcx>>,
+ fresh_preds: &mut FxHashSet<ty::Predicate<'tcx>>,
+ predicates: &mut VecDeque<ty::PolyTraitPredicate<'tcx>>,
+ select: &mut SelectionContext<'_, 'tcx>,
+ only_projections: bool,
+ ) -> bool {
+ let dummy_cause = ObligationCause::dummy();
+
+ for obligation in nested {
+ let is_new_pred =
+ fresh_preds.insert(self.clean_pred(select.infcx(), obligation.predicate));
+
+ // Resolve any inference variables that we can, to help selection succeed
+ let predicate = select.infcx().resolve_vars_if_possible(obligation.predicate);
+
+ // We only add a predicate as a user-displayable bound if
+ // it involves a generic parameter, and doesn't contain
+ // any inference variables.
+ //
+ // Displaying a bound involving a concrete type (instead of a generic
+ // parameter) would be pointless, since it's always true
+ // (e.g. u8: Copy)
+ // Displaying an inference variable is impossible, since they're
+ // an internal compiler detail without a defined visual representation
+ //
+ // We check this by calling is_of_param on the relevant types
+ // from the various possible predicates
+
+ let bound_predicate = predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(p) => {
+ // Add this to `predicates` so that we end up calling `select`
+ // with it. If this predicate ends up being unimplemented,
+ // then `evaluate_predicates` will handle adding it the `ParamEnv`
+ // if possible.
+ predicates.push_back(bound_predicate.rebind(p));
+ }
+ ty::PredicateKind::Projection(p) => {
+ let p = bound_predicate.rebind(p);
+ debug!(
+ "evaluate_nested_obligations: examining projection predicate {:?}",
+ predicate
+ );
+
+ // As described above, we only want to display
+ // bounds which include a generic parameter but don't include
+ // an inference variable.
+ // Additionally, we check if we've seen this predicate before,
+ // to avoid rendering duplicate bounds to the user.
+ if self.is_param_no_infer(p.skip_binder().projection_ty.substs)
+ && !p.term().skip_binder().has_infer_types()
+ && is_new_pred
+ {
+ debug!(
+ "evaluate_nested_obligations: adding projection predicate \
+ to computed_preds: {:?}",
+ predicate
+ );
+
+ // Under unusual circumstances, we can end up with a self-referential
+ // projection predicate. For example:
+ // <T as MyType>::Value == <T as MyType>::Value
+ // Not only is displaying this to the user pointless,
+ // having it in the ParamEnv will cause an issue if we try to call
+ // poly_project_and_unify_type on the predicate, since this kind of
+ // predicate will normally never end up in a ParamEnv.
+ //
+ // For these reasons, we ignore these weird predicates,
+ // ensuring that we're able to properly synthesize an auto trait impl
+ if self.is_self_referential_projection(p) {
+ debug!(
+ "evaluate_nested_obligations: encountered a projection
+ predicate equating a type with itself! Skipping"
+ );
+ } else {
+ self.add_user_pred(computed_preds, predicate);
+ }
+ }
+
+ // There are three possible cases when we project a predicate:
+ //
+ // 1. We encounter an error. This means that it's impossible for
+ // our current type to implement the auto trait - there's bound
+ // that we could add to our ParamEnv that would 'fix' this kind
+ // of error, as it's not caused by an unimplemented type.
+ //
+ // 2. We successfully project the predicate (Ok(Some(_))), generating
+ // some subobligations. We then process these subobligations
+ // like any other generated sub-obligations.
+ //
+ // 3. We receive an 'ambiguous' result (Ok(None))
+ // If we were actually trying to compile a crate,
+ // we would need to re-process this obligation later.
+ // However, all we care about is finding out what bounds
+ // are needed for our type to implement a particular auto trait.
+ // We've already added this obligation to our computed ParamEnv
+ // above (if it was necessary). Therefore, we don't need
+ // to do any further processing of the obligation.
+ //
+ // Note that we *must* try to project *all* projection predicates
+ // we encounter, even ones without inference variable.
+ // This ensures that we detect any projection errors,
+ // which indicate that our type can *never* implement the given
+ // auto trait. In that case, we will generate an explicit negative
+ // impl (e.g. 'impl !Send for MyType'). However, we don't
+ // try to process any of the generated subobligations -
+ // they contain no new information, since we already know
+ // that our type implements the projected-through trait,
+ // and can lead to weird region issues.
+ //
+ // Normally, we'll generate a negative impl as a result of encountering
+ // a type with an explicit negative impl of an auto trait
+ // (for example, raw pointers have !Send and !Sync impls)
+ // However, through some **interesting** manipulations of the type
+ // system, it's actually possible to write a type that never
+ // implements an auto trait due to a projection error, not a normal
+ // negative impl error. To properly handle this case, we need
+ // to ensure that we catch any potential projection errors,
+ // and turn them into an explicit negative impl for our type.
+ debug!("Projecting and unifying projection predicate {:?}", predicate);
+
+ match project::poly_project_and_unify_type(select, &obligation.with(p)) {
+ ProjectAndUnifyResult::MismatchedProjectionTypes(e) => {
+ debug!(
+ "evaluate_nested_obligations: Unable to unify predicate \
+ '{:?}' '{:?}', bailing out",
+ ty, e
+ );
+ return false;
+ }
+ ProjectAndUnifyResult::Recursive => {
+ debug!("evaluate_nested_obligations: recursive projection predicate");
+ return false;
+ }
+ ProjectAndUnifyResult::Holds(v) => {
+ // We only care about sub-obligations
+ // when we started out trying to unify
+ // some inference variables. See the comment above
+ // for more information
+ if p.term().skip_binder().has_infer_types() {
+ if !self.evaluate_nested_obligations(
+ ty,
+ v.into_iter(),
+ computed_preds,
+ fresh_preds,
+ predicates,
+ select,
+ only_projections,
+ ) {
+ return false;
+ }
+ }
+ }
+ ProjectAndUnifyResult::FailedNormalization => {
+ // It's ok not to make progress when have no inference variables -
+ // in that case, we were only performing unification to check if an
+ // error occurred (which would indicate that it's impossible for our
+ // type to implement the auto trait).
+ // However, we should always make progress (either by generating
+ // subobligations or getting an error) when we started off with
+ // inference variables
+ if p.term().skip_binder().has_infer_types() {
+ panic!("Unexpected result when selecting {:?} {:?}", ty, obligation)
+ }
+ }
+ }
+ }
+ ty::PredicateKind::RegionOutlives(binder) => {
+ let binder = bound_predicate.rebind(binder);
+ select.infcx().region_outlives_predicate(&dummy_cause, binder)
+ }
+ ty::PredicateKind::TypeOutlives(binder) => {
+ let binder = bound_predicate.rebind(binder);
+ match (
+ binder.no_bound_vars(),
+ binder.map_bound_ref(|pred| pred.0).no_bound_vars(),
+ ) {
+ (None, Some(t_a)) => {
+ select.infcx().register_region_obligation_with_cause(
+ t_a,
+ select.infcx().tcx.lifetimes.re_static,
+ &dummy_cause,
+ );
+ }
+ (Some(ty::OutlivesPredicate(t_a, r_b)), _) => {
+ select.infcx().register_region_obligation_with_cause(
+ t_a,
+ r_b,
+ &dummy_cause,
+ );
+ }
+ _ => {}
+ };
+ }
+ ty::PredicateKind::ConstEquate(c1, c2) => {
+ let evaluate = |c: ty::Const<'tcx>| {
+ if let ty::ConstKind::Unevaluated(unevaluated) = c.kind() {
+ match select.infcx().const_eval_resolve(
+ obligation.param_env,
+ unevaluated,
+ Some(obligation.cause.span),
+ ) {
+ Ok(Some(valtree)) => {
+ Ok(ty::Const::from_value(select.tcx(), valtree, c.ty()))
+ }
+ Ok(None) => {
+ let tcx = self.tcx;
+ let def_id = unevaluated.def.did;
+ let reported = tcx.sess.struct_span_err(tcx.def_span(def_id), &format!("unable to construct a constant value for the unevaluated constant {:?}", unevaluated)).emit();
+
+ Err(ErrorHandled::Reported(reported))
+ }
+ Err(err) => Err(err),
+ }
+ } else {
+ Ok(c)
+ }
+ };
+
+ match (evaluate(c1), evaluate(c2)) {
+ (Ok(c1), Ok(c2)) => {
+ match select
+ .infcx()
+ .at(&obligation.cause, obligation.param_env)
+ .eq(c1, c2)
+ {
+ Ok(_) => (),
+ Err(_) => return false,
+ }
+ }
+ _ => return false,
+ }
+ }
+ // There's not really much we can do with these predicates -
+ // we start out with a `ParamEnv` with no inference variables,
+ // and these don't correspond to adding any new bounds to
+ // the `ParamEnv`.
+ ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => {}
+ };
+ }
+ true
+ }
+
+ pub fn clean_pred(
+ &self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ p: ty::Predicate<'tcx>,
+ ) -> ty::Predicate<'tcx> {
+ infcx.freshen(p)
+ }
+}
+
+// Replaces all ReVars in a type with ty::Region's, using the provided map
+pub struct RegionReplacer<'a, 'tcx> {
+ vid_to_region: &'a FxHashMap<ty::RegionVid, ty::Region<'tcx>>,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionReplacer<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ (match *r {
+ ty::ReVar(vid) => self.vid_to_region.get(&vid).cloned(),
+ _ => None,
+ })
+ .unwrap_or_else(|| r.super_fold_with(self))
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs b/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs
new file mode 100644
index 000000000..9ef7ac9a8
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/chalk_fulfill.rs
@@ -0,0 +1,163 @@
+//! Defines a Chalk-based `TraitEngine`
+
+use crate::infer::canonical::OriginalQueryValues;
+use crate::infer::InferCtxt;
+use crate::traits::query::NoSolution;
+use crate::traits::{
+ ChalkEnvironmentAndGoal, FulfillmentError, FulfillmentErrorCode, ObligationCause,
+ PredicateObligation, SelectionError, TraitEngine,
+};
+use rustc_data_structures::fx::{FxHashMap, FxIndexSet};
+use rustc_middle::ty::{self, Ty, TypeVisitable};
+
+pub struct FulfillmentContext<'tcx> {
+ obligations: FxIndexSet<PredicateObligation<'tcx>>,
+
+ relationships: FxHashMap<ty::TyVid, ty::FoundRelationships>,
+}
+
+impl FulfillmentContext<'_> {
+ pub(crate) fn new() -> Self {
+ FulfillmentContext {
+ obligations: FxIndexSet::default(),
+ relationships: FxHashMap::default(),
+ }
+ }
+}
+
+impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
+ fn normalize_projection_type(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ _param_env: ty::ParamEnv<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ _cause: ObligationCause<'tcx>,
+ ) -> Ty<'tcx> {
+ infcx.tcx.mk_ty(ty::Projection(projection_ty))
+ }
+
+ fn register_predicate_obligation(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ obligation: PredicateObligation<'tcx>,
+ ) {
+ assert!(!infcx.is_in_snapshot());
+ let obligation = infcx.resolve_vars_if_possible(obligation);
+
+ super::relationships::update(self, infcx, &obligation);
+
+ self.obligations.insert(obligation);
+ }
+
+ fn select_all_or_error(&mut self, infcx: &InferCtxt<'_, 'tcx>) -> Vec<FulfillmentError<'tcx>> {
+ {
+ let errors = self.select_where_possible(infcx);
+
+ if !errors.is_empty() {
+ return errors;
+ }
+ }
+
+ // any remaining obligations are errors
+ self.obligations
+ .iter()
+ .map(|obligation| FulfillmentError {
+ obligation: obligation.clone(),
+ code: FulfillmentErrorCode::CodeAmbiguity,
+ // FIXME - does Chalk have a notation of 'root obligation'?
+ // This is just for diagnostics, so it's okay if this is wrong
+ root_obligation: obligation.clone(),
+ })
+ .collect()
+ }
+
+ fn select_where_possible(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ ) -> Vec<FulfillmentError<'tcx>> {
+ assert!(!infcx.is_in_snapshot());
+
+ let mut errors = Vec::new();
+ let mut next_round = FxIndexSet::default();
+ let mut making_progress;
+
+ loop {
+ making_progress = false;
+
+ // We iterate over all obligations, and record if we are able
+ // to unambiguously prove at least one obligation.
+ for obligation in self.obligations.drain(..) {
+ let obligation = infcx.resolve_vars_if_possible(obligation);
+ let environment = obligation.param_env.caller_bounds();
+ let goal = ChalkEnvironmentAndGoal { environment, goal: obligation.predicate };
+ let mut orig_values = OriginalQueryValues::default();
+ if goal.references_error() {
+ continue;
+ }
+
+ let canonical_goal =
+ infcx.canonicalize_query_preserving_universes(goal, &mut orig_values);
+
+ match infcx.tcx.evaluate_goal(canonical_goal) {
+ Ok(response) => {
+ if response.is_proven() {
+ making_progress = true;
+
+ match infcx.instantiate_query_response_and_region_obligations(
+ &obligation.cause,
+ obligation.param_env,
+ &orig_values,
+ &response,
+ ) {
+ Ok(infer_ok) => next_round.extend(
+ infer_ok.obligations.into_iter().map(|obligation| {
+ assert!(!infcx.is_in_snapshot());
+ infcx.resolve_vars_if_possible(obligation)
+ }),
+ ),
+
+ Err(_err) => errors.push(FulfillmentError {
+ obligation: obligation.clone(),
+ code: FulfillmentErrorCode::CodeSelectionError(
+ SelectionError::Unimplemented,
+ ),
+ // FIXME - does Chalk have a notation of 'root obligation'?
+ // This is just for diagnostics, so it's okay if this is wrong
+ root_obligation: obligation,
+ }),
+ }
+ } else {
+ // Ambiguous: retry at next round.
+ next_round.insert(obligation);
+ }
+ }
+
+ Err(NoSolution) => errors.push(FulfillmentError {
+ obligation: obligation.clone(),
+ code: FulfillmentErrorCode::CodeSelectionError(
+ SelectionError::Unimplemented,
+ ),
+ // FIXME - does Chalk have a notation of 'root obligation'?
+ // This is just for diagnostics, so it's okay if this is wrong
+ root_obligation: obligation,
+ }),
+ }
+ }
+ next_round = std::mem::replace(&mut self.obligations, next_round);
+
+ if !making_progress {
+ break;
+ }
+ }
+
+ errors
+ }
+
+ fn pending_obligations(&self) -> Vec<PredicateObligation<'tcx>> {
+ self.obligations.iter().cloned().collect()
+ }
+
+ fn relationships(&mut self) -> &mut FxHashMap<ty::TyVid, ty::FoundRelationships> {
+ &mut self.relationships
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/codegen.rs b/compiler/rustc_trait_selection/src/traits/codegen.rs
new file mode 100644
index 000000000..c0700748c
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/codegen.rs
@@ -0,0 +1,80 @@
+// This file contains various trait resolution methods used by codegen.
+// They all assume regions can be erased and monomorphic types. It
+// seems likely that they should eventually be merged into more
+// general routines.
+
+use crate::infer::{DefiningAnchor, TyCtxtInferExt};
+use crate::traits::{
+ ImplSource, Obligation, ObligationCause, SelectionContext, TraitEngine, TraitEngineExt,
+ Unimplemented,
+};
+use rustc_middle::traits::CodegenObligationError;
+use rustc_middle::ty::{self, TyCtxt};
+
+/// Attempts to resolve an obligation to an `ImplSource`. The result is
+/// a shallow `ImplSource` resolution, meaning that we do not
+/// (necessarily) resolve all nested obligations on the impl. Note
+/// that type check should guarantee to us that all nested
+/// obligations *could be* resolved if we wanted to.
+///
+/// This also expects that `trait_ref` is fully normalized.
+#[instrument(level = "debug", skip(tcx))]
+pub fn codegen_fulfill_obligation<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (param_env, trait_ref): (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>),
+) -> Result<&'tcx ImplSource<'tcx, ()>, CodegenObligationError> {
+ // We expect the input to be fully normalized.
+ debug_assert_eq!(trait_ref, tcx.normalize_erasing_regions(param_env, trait_ref));
+
+ // Do the initial selection for the obligation. This yields the
+ // shallow result we are looking for -- that is, what specific impl.
+ let mut infcx_builder =
+ tcx.infer_ctxt().ignoring_regions().with_opaque_type_inference(DefiningAnchor::Bubble);
+ infcx_builder.enter(|infcx| {
+ //~^ HACK `Bubble` is required for
+ // this test to pass: type-alias-impl-trait/assoc-projection-ice.rs
+ let mut selcx = SelectionContext::new(&infcx);
+
+ let obligation_cause = ObligationCause::dummy();
+ let obligation =
+ Obligation::new(obligation_cause, param_env, trait_ref.to_poly_trait_predicate());
+
+ let selection = match selcx.select(&obligation) {
+ Ok(Some(selection)) => selection,
+ Ok(None) => return Err(CodegenObligationError::Ambiguity),
+ Err(Unimplemented) => return Err(CodegenObligationError::Unimplemented),
+ Err(e) => {
+ bug!("Encountered error `{:?}` selecting `{:?}` during codegen", e, trait_ref)
+ }
+ };
+
+ debug!(?selection);
+
+ // Currently, we use a fulfillment context to completely resolve
+ // all nested obligations. This is because they can inform the
+ // inference of the impl's type parameters.
+ let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(tcx);
+ let impl_source = selection.map(|predicate| {
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
+ });
+
+ // In principle, we only need to do this so long as `impl_source`
+ // contains unbound type parameters. It could be a slight
+ // optimization to stop iterating early.
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+ if !errors.is_empty() {
+ return Err(CodegenObligationError::FulfillmentError);
+ }
+
+ let impl_source = infcx.resolve_vars_if_possible(impl_source);
+ let impl_source = infcx.tcx.erase_regions(impl_source);
+
+ // Opaque types may have gotten their hidden types constrained, but we can ignore them safely
+ // as they will get constrained elsewhere, too.
+ // (ouz-a) This is required for `type-alias-impl-trait/assoc-projection-ice.rs` to pass
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+
+ debug!("Cache miss: {trait_ref:?} => {impl_source:?}");
+ Ok(&*tcx.arena.alloc(impl_source))
+ })
+}
diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs
new file mode 100644
index 000000000..1c8cdf4ca
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/coherence.rs
@@ -0,0 +1,747 @@
+//! See Rustc Dev Guide chapters on [trait-resolution] and [trait-specialization] for more info on
+//! how this works.
+//!
+//! [trait-resolution]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html
+//! [trait-specialization]: https://rustc-dev-guide.rust-lang.org/traits/specialization.html
+
+use crate::infer::outlives::env::OutlivesEnvironment;
+use crate::infer::{CombinedSnapshot, InferOk};
+use crate::traits::select::IntercrateAmbiguityCause;
+use crate::traits::util::impl_subject_and_oblig;
+use crate::traits::SkipLeakCheck;
+use crate::traits::{
+ self, FulfillmentContext, Normalized, Obligation, ObligationCause, PredicateObligation,
+ PredicateObligations, SelectionContext, TraitEngineExt,
+};
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_errors::Diagnostic;
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_infer::traits::{util, TraitEngine};
+use rustc_middle::traits::specialization_graph::OverlapMode;
+use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, ImplSubject, Ty, TyCtxt, TypeVisitor};
+use rustc_span::symbol::sym;
+use rustc_span::DUMMY_SP;
+use std::fmt::Debug;
+use std::iter;
+use std::ops::ControlFlow;
+
+/// Whether we do the orphan check relative to this crate or
+/// to some remote crate.
+#[derive(Copy, Clone, Debug)]
+enum InCrate {
+ Local,
+ Remote,
+}
+
+#[derive(Debug, Copy, Clone)]
+pub enum Conflict {
+ Upstream,
+ Downstream,
+}
+
+pub struct OverlapResult<'tcx> {
+ pub impl_header: ty::ImplHeader<'tcx>,
+ pub intercrate_ambiguity_causes: FxIndexSet<IntercrateAmbiguityCause>,
+
+ /// `true` if the overlap might've been permitted before the shift
+ /// to universes.
+ pub involves_placeholder: bool,
+}
+
+pub fn add_placeholder_note(err: &mut Diagnostic) {
+ err.note(
+ "this behavior recently changed as a result of a bug fix; \
+ see rust-lang/rust#56105 for details",
+ );
+}
+
+/// If there are types that satisfy both impls, invokes `on_overlap`
+/// with a suitably-freshened `ImplHeader` with those types
+/// substituted. Otherwise, invokes `no_overlap`.
+#[instrument(skip(tcx, skip_leak_check, on_overlap, no_overlap), level = "debug")]
+pub fn overlapping_impls<F1, F2, R>(
+ tcx: TyCtxt<'_>,
+ impl1_def_id: DefId,
+ impl2_def_id: DefId,
+ skip_leak_check: SkipLeakCheck,
+ overlap_mode: OverlapMode,
+ on_overlap: F1,
+ no_overlap: F2,
+) -> R
+where
+ F1: FnOnce(OverlapResult<'_>) -> R,
+ F2: FnOnce() -> R,
+{
+ // Before doing expensive operations like entering an inference context, do
+ // a quick check via fast_reject to tell if the impl headers could possibly
+ // unify.
+ let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::AsInfer };
+ let impl1_ref = tcx.impl_trait_ref(impl1_def_id);
+ let impl2_ref = tcx.impl_trait_ref(impl2_def_id);
+ let may_overlap = match (impl1_ref, impl2_ref) {
+ (Some(a), Some(b)) => iter::zip(a.substs, b.substs)
+ .all(|(arg1, arg2)| drcx.generic_args_may_unify(arg1, arg2)),
+ (None, None) => {
+ let self_ty1 = tcx.type_of(impl1_def_id);
+ let self_ty2 = tcx.type_of(impl2_def_id);
+ drcx.types_may_unify(self_ty1, self_ty2)
+ }
+ _ => bug!("unexpected impls: {impl1_def_id:?} {impl2_def_id:?}"),
+ };
+
+ if !may_overlap {
+ // Some types involved are definitely different, so the impls couldn't possibly overlap.
+ debug!("overlapping_impls: fast_reject early-exit");
+ return no_overlap();
+ }
+
+ let overlaps = tcx.infer_ctxt().enter(|infcx| {
+ let selcx = &mut SelectionContext::intercrate(&infcx);
+ overlap(selcx, skip_leak_check, impl1_def_id, impl2_def_id, overlap_mode).is_some()
+ });
+
+ if !overlaps {
+ return no_overlap();
+ }
+
+ // In the case where we detect an error, run the check again, but
+ // this time tracking intercrate ambiguity causes for better
+ // diagnostics. (These take time and can lead to false errors.)
+ tcx.infer_ctxt().enter(|infcx| {
+ let selcx = &mut SelectionContext::intercrate(&infcx);
+ selcx.enable_tracking_intercrate_ambiguity_causes();
+ on_overlap(
+ overlap(selcx, skip_leak_check, impl1_def_id, impl2_def_id, overlap_mode).unwrap(),
+ )
+ })
+}
+
+fn with_fresh_ty_vars<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ impl_def_id: DefId,
+) -> ty::ImplHeader<'tcx> {
+ let tcx = selcx.tcx();
+ let impl_substs = selcx.infcx().fresh_substs_for_item(DUMMY_SP, impl_def_id);
+
+ let header = ty::ImplHeader {
+ impl_def_id,
+ self_ty: tcx.bound_type_of(impl_def_id).subst(tcx, impl_substs),
+ trait_ref: tcx.bound_impl_trait_ref(impl_def_id).map(|i| i.subst(tcx, impl_substs)),
+ predicates: tcx.predicates_of(impl_def_id).instantiate(tcx, impl_substs).predicates,
+ };
+
+ let Normalized { value: mut header, obligations } =
+ traits::normalize(selcx, param_env, ObligationCause::dummy(), header);
+
+ header.predicates.extend(obligations.into_iter().map(|o| o.predicate));
+ header
+}
+
+/// Can both impl `a` and impl `b` be satisfied by a common type (including
+/// where-clauses)? If so, returns an `ImplHeader` that unifies the two impls.
+fn overlap<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ skip_leak_check: SkipLeakCheck,
+ impl1_def_id: DefId,
+ impl2_def_id: DefId,
+ overlap_mode: OverlapMode,
+) -> Option<OverlapResult<'tcx>> {
+ debug!(
+ "overlap(impl1_def_id={:?}, impl2_def_id={:?}, overlap_mode={:?})",
+ impl1_def_id, impl2_def_id, overlap_mode
+ );
+
+ selcx.infcx().probe_maybe_skip_leak_check(skip_leak_check.is_yes(), |snapshot| {
+ overlap_within_probe(selcx, impl1_def_id, impl2_def_id, overlap_mode, snapshot)
+ })
+}
+
+fn overlap_within_probe<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ impl1_def_id: DefId,
+ impl2_def_id: DefId,
+ overlap_mode: OverlapMode,
+ snapshot: &CombinedSnapshot<'_, 'tcx>,
+) -> Option<OverlapResult<'tcx>> {
+ let infcx = selcx.infcx();
+
+ if overlap_mode.use_negative_impl() {
+ if negative_impl(selcx, impl1_def_id, impl2_def_id)
+ || negative_impl(selcx, impl2_def_id, impl1_def_id)
+ {
+ return None;
+ }
+ }
+
+ // For the purposes of this check, we don't bring any placeholder
+ // types into scope; instead, we replace the generic types with
+ // fresh type variables, and hence we do our evaluations in an
+ // empty environment.
+ let param_env = ty::ParamEnv::empty();
+
+ let impl1_header = with_fresh_ty_vars(selcx, param_env, impl1_def_id);
+ let impl2_header = with_fresh_ty_vars(selcx, param_env, impl2_def_id);
+
+ let obligations = equate_impl_headers(selcx, &impl1_header, &impl2_header)?;
+ debug!("overlap: unification check succeeded");
+
+ if overlap_mode.use_implicit_negative() {
+ if implicit_negative(selcx, param_env, &impl1_header, impl2_header, obligations) {
+ return None;
+ }
+ }
+
+ // We disable the leak when when creating the `snapshot` by using
+ // `infcx.probe_maybe_disable_leak_check`.
+ if infcx.leak_check(true, snapshot).is_err() {
+ debug!("overlap: leak check failed");
+ return None;
+ }
+
+ let intercrate_ambiguity_causes = selcx.take_intercrate_ambiguity_causes();
+ debug!("overlap: intercrate_ambiguity_causes={:#?}", intercrate_ambiguity_causes);
+
+ let involves_placeholder =
+ matches!(selcx.infcx().region_constraints_added_in_snapshot(snapshot), Some(true));
+
+ let impl_header = selcx.infcx().resolve_vars_if_possible(impl1_header);
+ Some(OverlapResult { impl_header, intercrate_ambiguity_causes, involves_placeholder })
+}
+
+fn equate_impl_headers<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ impl1_header: &ty::ImplHeader<'tcx>,
+ impl2_header: &ty::ImplHeader<'tcx>,
+) -> Option<PredicateObligations<'tcx>> {
+ // Do `a` and `b` unify? If not, no overlap.
+ debug!("equate_impl_headers(impl1_header={:?}, impl2_header={:?}", impl1_header, impl2_header);
+ selcx
+ .infcx()
+ .at(&ObligationCause::dummy(), ty::ParamEnv::empty())
+ .eq_impl_headers(impl1_header, impl2_header)
+ .map(|infer_ok| infer_ok.obligations)
+ .ok()
+}
+
+/// Given impl1 and impl2 check if both impls can be satisfied by a common type (including
+/// where-clauses) If so, return false, otherwise return true, they are disjoint.
+fn implicit_negative<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ impl1_header: &ty::ImplHeader<'tcx>,
+ impl2_header: ty::ImplHeader<'tcx>,
+ obligations: PredicateObligations<'tcx>,
+) -> bool {
+ // There's no overlap if obligations are unsatisfiable or if the obligation negated is
+ // satisfied.
+ //
+ // For example, given these two impl headers:
+ //
+ // `impl<'a> From<&'a str> for Box<dyn Error>`
+ // `impl<E> From<E> for Box<dyn Error> where E: Error`
+ //
+ // So we have:
+ //
+ // `Box<dyn Error>: From<&'?a str>`
+ // `Box<dyn Error>: From<?E>`
+ //
+ // After equating the two headers:
+ //
+ // `Box<dyn Error> = Box<dyn Error>`
+ // So, `?E = &'?a str` and then given the where clause `&'?a str: Error`.
+ //
+ // If the obligation `&'?a str: Error` holds, it means that there's overlap. If that doesn't
+ // hold we need to check if `&'?a str: !Error` holds, if doesn't hold there's overlap because
+ // at some point an impl for `&'?a str: Error` could be added.
+ debug!(
+ "implicit_negative(impl1_header={:?}, impl2_header={:?}, obligations={:?})",
+ impl1_header, impl2_header, obligations
+ );
+ let infcx = selcx.infcx();
+ let opt_failing_obligation = impl1_header
+ .predicates
+ .iter()
+ .copied()
+ .chain(impl2_header.predicates)
+ .map(|p| infcx.resolve_vars_if_possible(p))
+ .map(|p| Obligation {
+ cause: ObligationCause::dummy(),
+ param_env,
+ recursion_depth: 0,
+ predicate: p,
+ })
+ .chain(obligations)
+ .find(|o| !selcx.predicate_may_hold_fatal(o));
+
+ if let Some(failing_obligation) = opt_failing_obligation {
+ debug!("overlap: obligation unsatisfiable {:?}", failing_obligation);
+ true
+ } else {
+ false
+ }
+}
+
+/// Given impl1 and impl2 check if both impls are never satisfied by a common type (including
+/// where-clauses) If so, return true, they are disjoint and false otherwise.
+fn negative_impl<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ impl1_def_id: DefId,
+ impl2_def_id: DefId,
+) -> bool {
+ debug!("negative_impl(impl1_def_id={:?}, impl2_def_id={:?})", impl1_def_id, impl2_def_id);
+ let tcx = selcx.infcx().tcx;
+
+ // Create an infcx, taking the predicates of impl1 as assumptions:
+ tcx.infer_ctxt().enter(|infcx| {
+ // create a parameter environment corresponding to a (placeholder) instantiation of impl1
+ let impl_env = tcx.param_env(impl1_def_id);
+ let subject1 = match traits::fully_normalize(
+ &infcx,
+ FulfillmentContext::new(),
+ ObligationCause::dummy(),
+ impl_env,
+ tcx.impl_subject(impl1_def_id),
+ ) {
+ Ok(s) => s,
+ Err(err) => bug!("failed to fully normalize {:?}: {:?}", impl1_def_id, err),
+ };
+
+ // Attempt to prove that impl2 applies, given all of the above.
+ let selcx = &mut SelectionContext::new(&infcx);
+ let impl2_substs = infcx.fresh_substs_for_item(DUMMY_SP, impl2_def_id);
+ let (subject2, obligations) =
+ impl_subject_and_oblig(selcx, impl_env, impl2_def_id, impl2_substs);
+
+ !equate(&infcx, impl_env, subject1, subject2, obligations)
+ })
+}
+
+fn equate<'cx, 'tcx>(
+ infcx: &InferCtxt<'cx, 'tcx>,
+ impl_env: ty::ParamEnv<'tcx>,
+ subject1: ImplSubject<'tcx>,
+ subject2: ImplSubject<'tcx>,
+ obligations: impl Iterator<Item = PredicateObligation<'tcx>>,
+) -> bool {
+ // do the impls unify? If not, not disjoint.
+ let Ok(InferOk { obligations: more_obligations, .. }) =
+ infcx.at(&ObligationCause::dummy(), impl_env).eq(subject1, subject2)
+ else {
+ debug!("explicit_disjoint: {:?} does not unify with {:?}", subject1, subject2);
+ return true;
+ };
+
+ let selcx = &mut SelectionContext::new(&infcx);
+ let opt_failing_obligation = obligations
+ .into_iter()
+ .chain(more_obligations)
+ .find(|o| negative_impl_exists(selcx, impl_env, o));
+
+ if let Some(failing_obligation) = opt_failing_obligation {
+ debug!("overlap: obligation unsatisfiable {:?}", failing_obligation);
+ false
+ } else {
+ true
+ }
+}
+
+/// Try to prove that a negative impl exist for the given obligation and its super predicates.
+#[instrument(level = "debug", skip(selcx))]
+fn negative_impl_exists<'cx, 'tcx>(
+ selcx: &SelectionContext<'cx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ o: &PredicateObligation<'tcx>,
+) -> bool {
+ let infcx = &selcx.infcx().fork();
+
+ if resolve_negative_obligation(infcx, param_env, o) {
+ return true;
+ }
+
+ // Try to prove a negative obligation exists for super predicates
+ for o in util::elaborate_predicates(infcx.tcx, iter::once(o.predicate)) {
+ if resolve_negative_obligation(infcx, param_env, &o) {
+ return true;
+ }
+ }
+
+ false
+}
+
+#[instrument(level = "debug", skip(infcx))]
+fn resolve_negative_obligation<'cx, 'tcx>(
+ infcx: &InferCtxt<'cx, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ o: &PredicateObligation<'tcx>,
+) -> bool {
+ let tcx = infcx.tcx;
+
+ let Some(o) = o.flip_polarity(tcx) else {
+ return false;
+ };
+
+ let mut fulfillment_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+ fulfillment_cx.register_predicate_obligation(infcx, o);
+
+ let errors = fulfillment_cx.select_all_or_error(infcx);
+
+ if !errors.is_empty() {
+ return false;
+ }
+
+ // FIXME -- also add "assumed to be well formed" types into the `outlives_env`
+ let outlives_env = OutlivesEnvironment::new(param_env);
+ infcx.process_registered_region_obligations(outlives_env.region_bound_pairs(), param_env);
+
+ infcx.resolve_regions(&outlives_env).is_empty()
+}
+
+pub fn trait_ref_is_knowable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+) -> Option<Conflict> {
+ debug!("trait_ref_is_knowable(trait_ref={:?})", trait_ref);
+ if orphan_check_trait_ref(tcx, trait_ref, InCrate::Remote).is_ok() {
+ // A downstream or cousin crate is allowed to implement some
+ // substitution of this trait-ref.
+ return Some(Conflict::Downstream);
+ }
+
+ if trait_ref_is_local_or_fundamental(tcx, trait_ref) {
+ // This is a local or fundamental trait, so future-compatibility
+ // is no concern. We know that downstream/cousin crates are not
+ // allowed to implement a substitution of this trait ref, which
+ // means impls could only come from dependencies of this crate,
+ // which we already know about.
+ return None;
+ }
+
+ // This is a remote non-fundamental trait, so if another crate
+ // can be the "final owner" of a substitution of this trait-ref,
+ // they are allowed to implement it future-compatibly.
+ //
+ // However, if we are a final owner, then nobody else can be,
+ // and if we are an intermediate owner, then we don't care
+ // about future-compatibility, which means that we're OK if
+ // we are an owner.
+ if orphan_check_trait_ref(tcx, trait_ref, InCrate::Local).is_ok() {
+ debug!("trait_ref_is_knowable: orphan check passed");
+ None
+ } else {
+ debug!("trait_ref_is_knowable: nonlocal, nonfundamental, unowned");
+ Some(Conflict::Upstream)
+ }
+}
+
+pub fn trait_ref_is_local_or_fundamental<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+) -> bool {
+ trait_ref.def_id.krate == LOCAL_CRATE || tcx.has_attr(trait_ref.def_id, sym::fundamental)
+}
+
+pub enum OrphanCheckErr<'tcx> {
+ NonLocalInputType(Vec<(Ty<'tcx>, bool /* Is this the first input type? */)>),
+ UncoveredTy(Ty<'tcx>, Option<Ty<'tcx>>),
+}
+
+/// Checks the coherence orphan rules. `impl_def_id` should be the
+/// `DefId` of a trait impl. To pass, either the trait must be local, or else
+/// two conditions must be satisfied:
+///
+/// 1. All type parameters in `Self` must be "covered" by some local type constructor.
+/// 2. Some local type must appear in `Self`.
+pub fn orphan_check(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Result<(), OrphanCheckErr<'_>> {
+ debug!("orphan_check({:?})", impl_def_id);
+
+ // We only except this routine to be invoked on implementations
+ // of a trait, not inherent implementations.
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+ debug!("orphan_check: trait_ref={:?}", trait_ref);
+
+ // If the *trait* is local to the crate, ok.
+ if trait_ref.def_id.is_local() {
+ debug!("trait {:?} is local to current crate", trait_ref.def_id);
+ return Ok(());
+ }
+
+ orphan_check_trait_ref(tcx, trait_ref, InCrate::Local)
+}
+
+/// Checks whether a trait-ref is potentially implementable by a crate.
+///
+/// The current rule is that a trait-ref orphan checks in a crate C:
+///
+/// 1. Order the parameters in the trait-ref in subst order - Self first,
+/// others linearly (e.g., `<U as Foo<V, W>>` is U < V < W).
+/// 2. Of these type parameters, there is at least one type parameter
+/// in which, walking the type as a tree, you can reach a type local
+/// to C where all types in-between are fundamental types. Call the
+/// first such parameter the "local key parameter".
+/// - e.g., `Box<LocalType>` is OK, because you can visit LocalType
+/// going through `Box`, which is fundamental.
+/// - similarly, `FundamentalPair<Vec<()>, Box<LocalType>>` is OK for
+/// the same reason.
+/// - but (knowing that `Vec<T>` is non-fundamental, and assuming it's
+/// not local), `Vec<LocalType>` is bad, because `Vec<->` is between
+/// the local type and the type parameter.
+/// 3. Before this local type, no generic type parameter of the impl must
+/// be reachable through fundamental types.
+/// - e.g. `impl<T> Trait<LocalType> for Vec<T>` is fine, as `Vec` is not fundamental.
+/// - while `impl<T> Trait<LocalType> for Box<T>` results in an error, as `T` is
+/// reachable through the fundamental type `Box`.
+/// 4. Every type in the local key parameter not known in C, going
+/// through the parameter's type tree, must appear only as a subtree of
+/// a type local to C, with only fundamental types between the type
+/// local to C and the local key parameter.
+/// - e.g., `Vec<LocalType<T>>>` (or equivalently `Box<Vec<LocalType<T>>>`)
+/// is bad, because the only local type with `T` as a subtree is
+/// `LocalType<T>`, and `Vec<->` is between it and the type parameter.
+/// - similarly, `FundamentalPair<LocalType<T>, T>` is bad, because
+/// the second occurrence of `T` is not a subtree of *any* local type.
+/// - however, `LocalType<Vec<T>>` is OK, because `T` is a subtree of
+/// `LocalType<Vec<T>>`, which is local and has no types between it and
+/// the type parameter.
+///
+/// The orphan rules actually serve several different purposes:
+///
+/// 1. They enable link-safety - i.e., 2 mutually-unknowing crates (where
+/// every type local to one crate is unknown in the other) can't implement
+/// the same trait-ref. This follows because it can be seen that no such
+/// type can orphan-check in 2 such crates.
+///
+/// To check that a local impl follows the orphan rules, we check it in
+/// InCrate::Local mode, using type parameters for the "generic" types.
+///
+/// 2. They ground negative reasoning for coherence. If a user wants to
+/// write both a conditional blanket impl and a specific impl, we need to
+/// make sure they do not overlap. For example, if we write
+/// ```ignore (illustrative)
+/// impl<T> IntoIterator for Vec<T>
+/// impl<T: Iterator> IntoIterator for T
+/// ```
+/// We need to be able to prove that `Vec<$0>: !Iterator` for every type $0.
+/// We can observe that this holds in the current crate, but we need to make
+/// sure this will also hold in all unknown crates (both "independent" crates,
+/// which we need for link-safety, and also child crates, because we don't want
+/// child crates to get error for impl conflicts in a *dependency*).
+///
+/// For that, we only allow negative reasoning if, for every assignment to the
+/// inference variables, every unknown crate would get an orphan error if they
+/// try to implement this trait-ref. To check for this, we use InCrate::Remote
+/// mode. That is sound because we already know all the impls from known crates.
+///
+/// 3. For non-`#[fundamental]` traits, they guarantee that parent crates can
+/// add "non-blanket" impls without breaking negative reasoning in dependent
+/// crates. This is the "rebalancing coherence" (RFC 1023) restriction.
+///
+/// For that, we only a allow crate to perform negative reasoning on
+/// non-local-non-`#[fundamental]` only if there's a local key parameter as per (2).
+///
+/// Because we never perform negative reasoning generically (coherence does
+/// not involve type parameters), this can be interpreted as doing the full
+/// orphan check (using InCrate::Local mode), substituting non-local known
+/// types for all inference variables.
+///
+/// This allows for crates to future-compatibly add impls as long as they
+/// can't apply to types with a key parameter in a child crate - applying
+/// the rules, this basically means that every type parameter in the impl
+/// must appear behind a non-fundamental type (because this is not a
+/// type-system requirement, crate owners might also go for "semantic
+/// future-compatibility" involving things such as sealed traits, but
+/// the above requirement is sufficient, and is necessary in "open world"
+/// cases).
+///
+/// Note that this function is never called for types that have both type
+/// parameters and inference variables.
+fn orphan_check_trait_ref<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ in_crate: InCrate,
+) -> Result<(), OrphanCheckErr<'tcx>> {
+ debug!("orphan_check_trait_ref(trait_ref={:?}, in_crate={:?})", trait_ref, in_crate);
+
+ if trait_ref.needs_infer() && trait_ref.needs_subst() {
+ bug!(
+ "can't orphan check a trait ref with both params and inference variables {:?}",
+ trait_ref
+ );
+ }
+
+ let mut checker = OrphanChecker::new(tcx, in_crate);
+ match trait_ref.visit_with(&mut checker) {
+ ControlFlow::Continue(()) => Err(OrphanCheckErr::NonLocalInputType(checker.non_local_tys)),
+ ControlFlow::Break(OrphanCheckEarlyExit::ParamTy(ty)) => {
+ // Does there exist some local type after the `ParamTy`.
+ checker.search_first_local_ty = true;
+ if let Some(OrphanCheckEarlyExit::LocalTy(local_ty)) =
+ trait_ref.visit_with(&mut checker).break_value()
+ {
+ Err(OrphanCheckErr::UncoveredTy(ty, Some(local_ty)))
+ } else {
+ Err(OrphanCheckErr::UncoveredTy(ty, None))
+ }
+ }
+ ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(_)) => Ok(()),
+ }
+}
+
+struct OrphanChecker<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ in_crate: InCrate,
+ in_self_ty: bool,
+ /// Ignore orphan check failures and exclusively search for the first
+ /// local type.
+ search_first_local_ty: bool,
+ non_local_tys: Vec<(Ty<'tcx>, bool)>,
+}
+
+impl<'tcx> OrphanChecker<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, in_crate: InCrate) -> Self {
+ OrphanChecker {
+ tcx,
+ in_crate,
+ in_self_ty: true,
+ search_first_local_ty: false,
+ non_local_tys: Vec::new(),
+ }
+ }
+
+ fn found_non_local_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<OrphanCheckEarlyExit<'tcx>> {
+ self.non_local_tys.push((t, self.in_self_ty));
+ ControlFlow::CONTINUE
+ }
+
+ fn found_param_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<OrphanCheckEarlyExit<'tcx>> {
+ if self.search_first_local_ty {
+ ControlFlow::CONTINUE
+ } else {
+ ControlFlow::Break(OrphanCheckEarlyExit::ParamTy(t))
+ }
+ }
+
+ fn def_id_is_local(&mut self, def_id: DefId) -> bool {
+ match self.in_crate {
+ InCrate::Local => def_id.is_local(),
+ InCrate::Remote => false,
+ }
+ }
+}
+
+enum OrphanCheckEarlyExit<'tcx> {
+ ParamTy(Ty<'tcx>),
+ LocalTy(Ty<'tcx>),
+}
+
+impl<'tcx> TypeVisitor<'tcx> for OrphanChecker<'tcx> {
+ type BreakTy = OrphanCheckEarlyExit<'tcx>;
+ fn visit_region(&mut self, _r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let result = match *ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Str
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::RawPtr(..)
+ | ty::Never
+ | ty::Tuple(..)
+ | ty::Projection(..) => self.found_non_local_ty(ty),
+
+ ty::Param(..) => self.found_param_ty(ty),
+
+ ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) => match self.in_crate {
+ InCrate::Local => self.found_non_local_ty(ty),
+ // The inference variable might be unified with a local
+ // type in that remote crate.
+ InCrate::Remote => ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty)),
+ },
+
+ // For fundamental types, we just look inside of them.
+ ty::Ref(_, ty, _) => ty.visit_with(self),
+ ty::Adt(def, substs) => {
+ if self.def_id_is_local(def.did()) {
+ ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty))
+ } else if def.is_fundamental() {
+ substs.visit_with(self)
+ } else {
+ self.found_non_local_ty(ty)
+ }
+ }
+ ty::Foreign(def_id) => {
+ if self.def_id_is_local(def_id) {
+ ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty))
+ } else {
+ self.found_non_local_ty(ty)
+ }
+ }
+ ty::Dynamic(tt, ..) => {
+ let principal = tt.principal().map(|p| p.def_id());
+ if principal.map_or(false, |p| self.def_id_is_local(p)) {
+ ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty))
+ } else {
+ self.found_non_local_ty(ty)
+ }
+ }
+ ty::Error(_) => ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty)),
+ ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(..) => {
+ self.tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ format!("ty_is_local invoked on closure or generator: {:?}", ty),
+ );
+ ControlFlow::Break(OrphanCheckEarlyExit::LocalTy(ty))
+ }
+ ty::Opaque(..) => {
+ // This merits some explanation.
+ // Normally, opaque types are not involved when performing
+ // coherence checking, since it is illegal to directly
+ // implement a trait on an opaque type. However, we might
+ // end up looking at an opaque type during coherence checking
+ // if an opaque type gets used within another type (e.g. as
+ // the type of a field) when checking for auto trait or `Sized`
+ // impls. This requires us to decide whether or not an opaque
+ // type should be considered 'local' or not.
+ //
+ // We choose to treat all opaque types as non-local, even
+ // those that appear within the same crate. This seems
+ // somewhat surprising at first, but makes sense when
+ // you consider that opaque types are supposed to hide
+ // the underlying type *within the same crate*. When an
+ // opaque type is used from outside the module
+ // where it is declared, it should be impossible to observe
+ // anything about it other than the traits that it implements.
+ //
+ // The alternative would be to look at the underlying type
+ // to determine whether or not the opaque type itself should
+ // be considered local. However, this could make it a breaking change
+ // to switch the underlying ('defining') type from a local type
+ // to a remote type. This would violate the rule that opaque
+ // types should be completely opaque apart from the traits
+ // that they implement, so we don't use this behavior.
+ self.found_non_local_ty(ty)
+ }
+ };
+ // A bit of a hack, the `OrphanChecker` is only used to visit a `TraitRef`, so
+ // the first type we visit is always the self type.
+ self.in_self_ty = false;
+ result
+ }
+
+ // FIXME: Constants should participate in orphan checking.
+ fn visit_const(&mut self, _c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
new file mode 100644
index 000000000..254bc4ab6
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs
@@ -0,0 +1,308 @@
+//! Checking that constant values used in types can be successfully evaluated.
+//!
+//! For concrete constants, this is fairly simple as we can just try and evaluate it.
+//!
+//! When dealing with polymorphic constants, for example `std::mem::size_of::<T>() - 1`,
+//! this is not as easy.
+//!
+//! In this case we try to build an abstract representation of this constant using
+//! `thir_abstract_const` which can then be checked for structural equality with other
+//! generic constants mentioned in the `caller_bounds` of the current environment.
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def::DefKind;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::abstract_const::{
+ walk_abstract_const, AbstractConst, FailureKind, Node, NotConstEvaluatable,
+};
+use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
+use rustc_session::lint;
+use rustc_span::Span;
+
+use std::iter;
+use std::ops::ControlFlow;
+
+pub struct ConstUnifyCtxt<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> ConstUnifyCtxt<'tcx> {
+ // Substitutes generics repeatedly to allow AbstractConsts to unify where a
+ // ConstKind::Unevaluated could be turned into an AbstractConst that would unify e.g.
+ // Param(N) should unify with Param(T), substs: [Unevaluated("T2", [Unevaluated("T3", [Param(N)])])]
+ #[inline]
+ #[instrument(skip(self), level = "debug")]
+ fn try_replace_substs_in_root(
+ &self,
+ mut abstr_const: AbstractConst<'tcx>,
+ ) -> Option<AbstractConst<'tcx>> {
+ while let Node::Leaf(ct) = abstr_const.root(self.tcx) {
+ match AbstractConst::from_const(self.tcx, ct) {
+ Ok(Some(act)) => abstr_const = act,
+ Ok(None) => break,
+ Err(_) => return None,
+ }
+ }
+
+ Some(abstr_const)
+ }
+
+ /// Tries to unify two abstract constants using structural equality.
+ #[instrument(skip(self), level = "debug")]
+ pub fn try_unify(&self, a: AbstractConst<'tcx>, b: AbstractConst<'tcx>) -> bool {
+ let a = if let Some(a) = self.try_replace_substs_in_root(a) {
+ a
+ } else {
+ return true;
+ };
+
+ let b = if let Some(b) = self.try_replace_substs_in_root(b) {
+ b
+ } else {
+ return true;
+ };
+
+ let a_root = a.root(self.tcx);
+ let b_root = b.root(self.tcx);
+ debug!(?a_root, ?b_root);
+
+ match (a_root, b_root) {
+ (Node::Leaf(a_ct), Node::Leaf(b_ct)) => {
+ let a_ct = a_ct.eval(self.tcx, self.param_env);
+ debug!("a_ct evaluated: {:?}", a_ct);
+ let b_ct = b_ct.eval(self.tcx, self.param_env);
+ debug!("b_ct evaluated: {:?}", b_ct);
+
+ if a_ct.ty() != b_ct.ty() {
+ return false;
+ }
+
+ match (a_ct.kind(), b_ct.kind()) {
+ // We can just unify errors with everything to reduce the amount of
+ // emitted errors here.
+ (ty::ConstKind::Error(_), _) | (_, ty::ConstKind::Error(_)) => true,
+ (ty::ConstKind::Param(a_param), ty::ConstKind::Param(b_param)) => {
+ a_param == b_param
+ }
+ (ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val,
+ // If we have `fn a<const N: usize>() -> [u8; N + 1]` and `fn b<const M: usize>() -> [u8; 1 + M]`
+ // we do not want to use `assert_eq!(a(), b())` to infer that `N` and `M` have to be `1`. This
+ // means that we only allow inference variables if they are equal.
+ (ty::ConstKind::Infer(a_val), ty::ConstKind::Infer(b_val)) => a_val == b_val,
+ // We expand generic anonymous constants at the start of this function, so this
+ // branch should only be taking when dealing with associated constants, at
+ // which point directly comparing them seems like the desired behavior.
+ //
+ // FIXME(generic_const_exprs): This isn't actually the case.
+ // We also take this branch for concrete anonymous constants and
+ // expand generic anonymous constants with concrete substs.
+ (ty::ConstKind::Unevaluated(a_uv), ty::ConstKind::Unevaluated(b_uv)) => {
+ a_uv == b_uv
+ }
+ // FIXME(generic_const_exprs): We may want to either actually try
+ // to evaluate `a_ct` and `b_ct` if they are are fully concrete or something like
+ // this, for now we just return false here.
+ _ => false,
+ }
+ }
+ (Node::Binop(a_op, al, ar), Node::Binop(b_op, bl, br)) if a_op == b_op => {
+ self.try_unify(a.subtree(al), b.subtree(bl))
+ && self.try_unify(a.subtree(ar), b.subtree(br))
+ }
+ (Node::UnaryOp(a_op, av), Node::UnaryOp(b_op, bv)) if a_op == b_op => {
+ self.try_unify(a.subtree(av), b.subtree(bv))
+ }
+ (Node::FunctionCall(a_f, a_args), Node::FunctionCall(b_f, b_args))
+ if a_args.len() == b_args.len() =>
+ {
+ self.try_unify(a.subtree(a_f), b.subtree(b_f))
+ && iter::zip(a_args, b_args)
+ .all(|(&an, &bn)| self.try_unify(a.subtree(an), b.subtree(bn)))
+ }
+ (Node::Cast(a_kind, a_operand, a_ty), Node::Cast(b_kind, b_operand, b_ty))
+ if (a_ty == b_ty) && (a_kind == b_kind) =>
+ {
+ self.try_unify(a.subtree(a_operand), b.subtree(b_operand))
+ }
+ // use this over `_ => false` to make adding variants to `Node` less error prone
+ (Node::Cast(..), _)
+ | (Node::FunctionCall(..), _)
+ | (Node::UnaryOp(..), _)
+ | (Node::Binop(..), _)
+ | (Node::Leaf(..), _) => false,
+ }
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+pub fn try_unify_abstract_consts<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (a, b): (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>),
+ param_env: ty::ParamEnv<'tcx>,
+) -> bool {
+ (|| {
+ if let Some(a) = AbstractConst::new(tcx, a)? {
+ if let Some(b) = AbstractConst::new(tcx, b)? {
+ let const_unify_ctxt = ConstUnifyCtxt { tcx, param_env };
+ return Ok(const_unify_ctxt.try_unify(a, b));
+ }
+ }
+
+ Ok(false)
+ })()
+ .unwrap_or_else(|_: ErrorGuaranteed| true)
+ // FIXME(generic_const_exprs): We should instead have this
+ // method return the resulting `ty::Const` and return `ConstKind::Error`
+ // on `ErrorGuaranteed`.
+}
+
+/// Check if a given constant can be evaluated.
+#[instrument(skip(infcx), level = "debug")]
+pub fn is_const_evaluatable<'cx, 'tcx>(
+ infcx: &InferCtxt<'cx, 'tcx>,
+ uv: ty::Unevaluated<'tcx, ()>,
+ param_env: ty::ParamEnv<'tcx>,
+ span: Span,
+) -> Result<(), NotConstEvaluatable> {
+ let tcx = infcx.tcx;
+
+ if tcx.features().generic_const_exprs {
+ if let Some(ct) = AbstractConst::new(tcx, uv)? {
+ if satisfied_from_param_env(tcx, ct, param_env)? {
+ return Ok(());
+ }
+ match ct.unify_failure_kind(tcx) {
+ FailureKind::MentionsInfer => {
+ return Err(NotConstEvaluatable::MentionsInfer);
+ }
+ FailureKind::MentionsParam => {
+ return Err(NotConstEvaluatable::MentionsParam);
+ }
+ // returned below
+ FailureKind::Concrete => {}
+ }
+ }
+ let concrete = infcx.const_eval_resolve(param_env, uv.expand(), Some(span));
+ match concrete {
+ Err(ErrorHandled::TooGeneric) => {
+ Err(NotConstEvaluatable::Error(infcx.tcx.sess.delay_span_bug(
+ span,
+ format!("Missing value for constant, but no error reported?"),
+ )))
+ }
+ Err(ErrorHandled::Linted) => {
+ let reported = infcx
+ .tcx
+ .sess
+ .delay_span_bug(span, "constant in type had error reported as lint");
+ Err(NotConstEvaluatable::Error(reported))
+ }
+ Err(ErrorHandled::Reported(e)) => Err(NotConstEvaluatable::Error(e)),
+ Ok(_) => Ok(()),
+ }
+ } else {
+ // FIXME: We should only try to evaluate a given constant here if it is fully concrete
+ // as we don't want to allow things like `[u8; std::mem::size_of::<*mut T>()]`.
+ //
+ // We previously did not check this, so we only emit a future compat warning if
+ // const evaluation succeeds and the given constant is still polymorphic for now
+ // and hopefully soon change this to an error.
+ //
+ // See #74595 for more details about this.
+ let concrete = infcx.const_eval_resolve(param_env, uv.expand(), Some(span));
+
+ match concrete {
+ // If we're evaluating a foreign constant, under a nightly compiler without generic
+ // const exprs, AND it would've passed if that expression had been evaluated with
+ // generic const exprs, then suggest using generic const exprs.
+ Err(_) if tcx.sess.is_nightly_build()
+ && let Ok(Some(ct)) = AbstractConst::new(tcx, uv)
+ && satisfied_from_param_env(tcx, ct, param_env) == Ok(true) => {
+ tcx.sess
+ .struct_span_fatal(
+ // Slightly better span than just using `span` alone
+ if span == rustc_span::DUMMY_SP { tcx.def_span(uv.def.did) } else { span },
+ "failed to evaluate generic const expression",
+ )
+ .note("the crate this constant originates from uses `#![feature(generic_const_exprs)]`")
+ .span_suggestion_verbose(
+ rustc_span::DUMMY_SP,
+ "consider enabling this feature",
+ "#![feature(generic_const_exprs)]\n",
+ rustc_errors::Applicability::MaybeIncorrect,
+ )
+ .emit()
+ }
+
+ Err(ErrorHandled::TooGeneric) => Err(if uv.has_infer_types_or_consts() {
+ NotConstEvaluatable::MentionsInfer
+ } else if uv.has_param_types_or_consts() {
+ NotConstEvaluatable::MentionsParam
+ } else {
+ let guar = infcx.tcx.sess.delay_span_bug(span, format!("Missing value for constant, but no error reported?"));
+ NotConstEvaluatable::Error(guar)
+ }),
+ Err(ErrorHandled::Linted) => {
+ let reported =
+ infcx.tcx.sess.delay_span_bug(span, "constant in type had error reported as lint");
+ Err(NotConstEvaluatable::Error(reported))
+ }
+ Err(ErrorHandled::Reported(e)) => Err(NotConstEvaluatable::Error(e)),
+ Ok(_) => {
+ if uv.substs.has_param_types_or_consts() {
+ assert!(matches!(infcx.tcx.def_kind(uv.def.did), DefKind::AnonConst));
+ let mir_body = infcx.tcx.mir_for_ctfe_opt_const_arg(uv.def);
+
+ if mir_body.is_polymorphic {
+ let Some(local_def_id) = uv.def.did.as_local() else { return Ok(()) };
+ tcx.struct_span_lint_hir(
+ lint::builtin::CONST_EVALUATABLE_UNCHECKED,
+ tcx.hir().local_def_id_to_hir_id(local_def_id),
+ span,
+ |err| {
+ err.build("cannot use constants which depend on generic parameters in types").emit();
+ })
+ }
+ }
+
+ Ok(())
+ },
+ }
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+fn satisfied_from_param_env<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ct: AbstractConst<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+) -> Result<bool, NotConstEvaluatable> {
+ for pred in param_env.caller_bounds() {
+ match pred.kind().skip_binder() {
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ if let Some(b_ct) = AbstractConst::new(tcx, uv)? {
+ let const_unify_ctxt = ConstUnifyCtxt { tcx, param_env };
+
+ // Try to unify with each subtree in the AbstractConst to allow for
+ // `N + 1` being const evaluatable even if theres only a `ConstEvaluatable`
+ // predicate for `(N + 1) * 2`
+ let result = walk_abstract_const(tcx, b_ct, |b_ct| {
+ match const_unify_ctxt.try_unify(ct, b_ct) {
+ true => ControlFlow::BREAK,
+ false => ControlFlow::CONTINUE,
+ }
+ });
+
+ if let ControlFlow::Break(()) = result {
+ debug!("is_const_evaluatable: abstract_const ~~> ok");
+ return Ok(true);
+ }
+ }
+ }
+ _ => {} // don't care
+ }
+ }
+
+ Ok(false)
+}
diff --git a/compiler/rustc_trait_selection/src/traits/engine.rs b/compiler/rustc_trait_selection/src/traits/engine.rs
new file mode 100644
index 000000000..6c177f638
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/engine.rs
@@ -0,0 +1,112 @@
+use std::cell::RefCell;
+
+use super::TraitEngine;
+use super::{ChalkFulfillmentContext, FulfillmentContext};
+use crate::infer::InferCtxtExt;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::{InferCtxt, InferOk};
+use rustc_infer::traits::{
+ FulfillmentError, Obligation, ObligationCause, PredicateObligation, TraitEngineExt as _,
+};
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::ToPredicate;
+use rustc_middle::ty::TypeFoldable;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+pub trait TraitEngineExt<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>) -> Box<Self>;
+}
+
+impl<'tcx> TraitEngineExt<'tcx> for dyn TraitEngine<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>) -> Box<Self> {
+ if tcx.sess.opts.unstable_opts.chalk {
+ Box::new(ChalkFulfillmentContext::new())
+ } else {
+ Box::new(FulfillmentContext::new())
+ }
+ }
+}
+
+/// Used if you want to have pleasant experience when dealing
+/// with obligations outside of hir or mir typeck.
+pub struct ObligationCtxt<'a, 'tcx> {
+ pub infcx: &'a InferCtxt<'a, 'tcx>,
+ engine: RefCell<Box<dyn TraitEngine<'tcx>>>,
+}
+
+impl<'a, 'tcx> ObligationCtxt<'a, 'tcx> {
+ pub fn new(infcx: &'a InferCtxt<'a, 'tcx>) -> Self {
+ Self { infcx, engine: RefCell::new(<dyn TraitEngine<'_>>::new(infcx.tcx)) }
+ }
+
+ pub fn register_obligation(&self, obligation: PredicateObligation<'tcx>) {
+ self.engine.borrow_mut().register_predicate_obligation(self.infcx, obligation);
+ }
+
+ pub fn register_obligations(
+ &self,
+ obligations: impl IntoIterator<Item = PredicateObligation<'tcx>>,
+ ) {
+ // Can't use `register_predicate_obligations` because the iterator
+ // may also use this `ObligationCtxt`.
+ for obligation in obligations {
+ self.engine.borrow_mut().register_predicate_obligation(self.infcx, obligation)
+ }
+ }
+
+ pub fn register_infer_ok_obligations<T>(&self, infer_ok: InferOk<'tcx, T>) -> T {
+ let InferOk { value, obligations } = infer_ok;
+ self.engine.borrow_mut().register_predicate_obligations(self.infcx, obligations);
+ value
+ }
+
+ /// Requires that `ty` must implement the trait with `def_id` in
+ /// the given environment. This trait must not have any type
+ /// parameters (except for `Self`).
+ pub fn register_bound(
+ &self,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ def_id: DefId,
+ ) {
+ let tcx = self.infcx.tcx;
+ let trait_ref = ty::TraitRef { def_id, substs: tcx.mk_substs_trait(ty, &[]) };
+ self.register_obligation(Obligation {
+ cause,
+ recursion_depth: 0,
+ param_env,
+ predicate: ty::Binder::dummy(trait_ref).without_const().to_predicate(tcx),
+ });
+ }
+
+ pub fn normalize<T: TypeFoldable<'tcx>>(
+ &self,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T {
+ let infer_ok = self.infcx.partially_normalize_associated_types_in(cause, param_env, value);
+ self.register_infer_ok_obligations(infer_ok)
+ }
+
+ pub fn equate_types(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Result<(), TypeError<'tcx>> {
+ match self.infcx.at(cause, param_env).eq(expected, actual) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_obligations(obligations);
+ Ok(())
+ }
+ Err(e) => Err(e),
+ }
+ }
+
+ pub fn select_all_or_error(&self) -> Vec<FulfillmentError<'tcx>> {
+ self.engine.borrow_mut().select_all_or_error(self.infcx)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
new file mode 100644
index 000000000..e442c5c91
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs
@@ -0,0 +1,2765 @@
+pub mod on_unimplemented;
+pub mod suggestions;
+
+use super::{
+ EvaluationResult, FulfillmentContext, FulfillmentError, FulfillmentErrorCode,
+ MismatchedProjectionTypes, Obligation, ObligationCause, ObligationCauseCode,
+ OnUnimplementedDirective, OnUnimplementedNote, OutputTypeParameterMismatch, Overflow,
+ PredicateObligation, SelectionContext, SelectionError, TraitNotObjectSafe,
+};
+
+use crate::infer::error_reporting::{TyCategory, TypeAnnotationNeeded as ErrorCode};
+use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use crate::infer::{self, InferCtxt, TyCtxtInferExt};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+ MultiSpan, Style,
+};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::GenericParam;
+use rustc_hir::Item;
+use rustc_hir::Node;
+use rustc_infer::traits::TraitEngine;
+use rustc_middle::traits::select::OverflowError;
+use rustc_middle::ty::abstract_const::NotConstEvaluatable;
+use rustc_middle::ty::error::ExpectedFound;
+use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::{
+ self, SubtypePredicate, ToPolyTraitRef, ToPredicate, TraitRef, Ty, TyCtxt, TypeFoldable,
+ TypeVisitable,
+};
+use rustc_span::symbol::{kw, sym};
+use rustc_span::{ExpnKind, Span, DUMMY_SP};
+use std::fmt;
+use std::iter;
+use std::ops::ControlFlow;
+
+use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
+use crate::traits::query::normalize::AtExt as _;
+use crate::traits::specialize::to_pretty_impl_header;
+use on_unimplemented::InferCtxtExt as _;
+use suggestions::InferCtxtExt as _;
+
+pub use rustc_infer::traits::error_reporting::*;
+
+// When outputting impl candidates, prefer showing those that are more similar.
+//
+// We also compare candidates after skipping lifetimes, which has a lower
+// priority than exact matches.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub enum CandidateSimilarity {
+ Exact { ignoring_lifetimes: bool },
+ Fuzzy { ignoring_lifetimes: bool },
+}
+
+#[derive(Debug, Clone, Copy)]
+pub struct ImplCandidate<'tcx> {
+ pub trait_ref: ty::TraitRef<'tcx>,
+ pub similarity: CandidateSimilarity,
+}
+
+pub trait InferCtxtExt<'tcx> {
+ fn report_fulfillment_errors(
+ &self,
+ errors: &[FulfillmentError<'tcx>],
+ body_id: Option<hir::BodyId>,
+ fallback_has_occurred: bool,
+ ) -> ErrorGuaranteed;
+
+ fn report_overflow_error<T>(
+ &self,
+ obligation: &Obligation<'tcx, T>,
+ suggest_increasing_limit: bool,
+ ) -> !
+ where
+ T: fmt::Display + TypeFoldable<'tcx>;
+
+ fn report_overflow_error_cycle(&self, cycle: &[PredicateObligation<'tcx>]) -> !;
+
+ /// The `root_obligation` parameter should be the `root_obligation` field
+ /// from a `FulfillmentError`. If no `FulfillmentError` is available,
+ /// then it should be the same as `obligation`.
+ fn report_selection_error(
+ &self,
+ obligation: PredicateObligation<'tcx>,
+ root_obligation: &PredicateObligation<'tcx>,
+ error: &SelectionError<'tcx>,
+ fallback_has_occurred: bool,
+ );
+
+ /// Given some node representing a fn-like thing in the HIR map,
+ /// returns a span and `ArgKind` information that describes the
+ /// arguments it expects. This can be supplied to
+ /// `report_arg_count_mismatch`.
+ fn get_fn_like_arguments(&self, node: Node<'_>) -> Option<(Span, Vec<ArgKind>)>;
+
+ /// Reports an error when the number of arguments needed by a
+ /// trait match doesn't match the number that the expression
+ /// provides.
+ fn report_arg_count_mismatch(
+ &self,
+ span: Span,
+ found_span: Option<Span>,
+ expected_args: Vec<ArgKind>,
+ found_args: Vec<ArgKind>,
+ is_closure: bool,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+
+ /// Checks if the type implements one of `Fn`, `FnMut`, or `FnOnce`
+ /// in that order, and returns the generic type corresponding to the
+ /// argument of that trait (corresponding to the closure arguments).
+ fn type_implements_fn_trait(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: ty::Binder<'tcx, Ty<'tcx>>,
+ constness: ty::BoundConstness,
+ polarity: ty::ImplPolarity,
+ ) -> Result<(ty::ClosureKind, ty::Binder<'tcx, Ty<'tcx>>), ()>;
+}
+
+impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
+ fn report_fulfillment_errors(
+ &self,
+ errors: &[FulfillmentError<'tcx>],
+ body_id: Option<hir::BodyId>,
+ fallback_has_occurred: bool,
+ ) -> ErrorGuaranteed {
+ #[derive(Debug)]
+ struct ErrorDescriptor<'tcx> {
+ predicate: ty::Predicate<'tcx>,
+ index: Option<usize>, // None if this is an old error
+ }
+
+ let mut error_map: FxHashMap<_, Vec<_>> = self
+ .reported_trait_errors
+ .borrow()
+ .iter()
+ .map(|(&span, predicates)| {
+ (
+ span,
+ predicates
+ .iter()
+ .map(|&predicate| ErrorDescriptor { predicate, index: None })
+ .collect(),
+ )
+ })
+ .collect();
+
+ for (index, error) in errors.iter().enumerate() {
+ // We want to ignore desugarings here: spans are equivalent even
+ // if one is the result of a desugaring and the other is not.
+ let mut span = error.obligation.cause.span;
+ let expn_data = span.ctxt().outer_expn_data();
+ if let ExpnKind::Desugaring(_) = expn_data.kind {
+ span = expn_data.call_site;
+ }
+
+ error_map.entry(span).or_default().push(ErrorDescriptor {
+ predicate: error.obligation.predicate,
+ index: Some(index),
+ });
+
+ self.reported_trait_errors
+ .borrow_mut()
+ .entry(span)
+ .or_default()
+ .push(error.obligation.predicate);
+ }
+
+ // We do this in 2 passes because we want to display errors in order, though
+ // maybe it *is* better to sort errors by span or something.
+ let mut is_suppressed = vec![false; errors.len()];
+ for (_, error_set) in error_map.iter() {
+ // We want to suppress "duplicate" errors with the same span.
+ for error in error_set {
+ if let Some(index) = error.index {
+ // Suppress errors that are either:
+ // 1) strictly implied by another error.
+ // 2) implied by an error with a smaller index.
+ for error2 in error_set {
+ if error2.index.map_or(false, |index2| is_suppressed[index2]) {
+ // Avoid errors being suppressed by already-suppressed
+ // errors, to prevent all errors from being suppressed
+ // at once.
+ continue;
+ }
+
+ if self.error_implies(error2.predicate, error.predicate)
+ && !(error2.index >= error.index
+ && self.error_implies(error.predicate, error2.predicate))
+ {
+ info!("skipping {:?} (implied by {:?})", error, error2);
+ is_suppressed[index] = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ for (error, suppressed) in iter::zip(errors, is_suppressed) {
+ if !suppressed {
+ self.report_fulfillment_error(error, body_id, fallback_has_occurred);
+ }
+ }
+
+ self.tcx.sess.delay_span_bug(DUMMY_SP, "expected fullfillment errors")
+ }
+
+ /// Reports that an overflow has occurred and halts compilation. We
+ /// halt compilation unconditionally because it is important that
+ /// overflows never be masked -- they basically represent computations
+ /// whose result could not be truly determined and thus we can't say
+ /// if the program type checks or not -- and they are unusual
+ /// occurrences in any case.
+ fn report_overflow_error<T>(
+ &self,
+ obligation: &Obligation<'tcx, T>,
+ suggest_increasing_limit: bool,
+ ) -> !
+ where
+ T: fmt::Display + TypeFoldable<'tcx>,
+ {
+ let predicate = self.resolve_vars_if_possible(obligation.predicate.clone());
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ obligation.cause.span,
+ E0275,
+ "overflow evaluating the requirement `{}`",
+ predicate
+ );
+
+ if suggest_increasing_limit {
+ self.suggest_new_overflow_limit(&mut err);
+ }
+
+ self.note_obligation_cause_code(
+ &mut err,
+ &obligation.predicate,
+ obligation.param_env,
+ obligation.cause.code(),
+ &mut vec![],
+ &mut Default::default(),
+ );
+
+ err.emit();
+ self.tcx.sess.abort_if_errors();
+ bug!();
+ }
+
+ /// Reports that a cycle was detected which led to overflow and halts
+ /// compilation. This is equivalent to `report_overflow_error` except
+ /// that we can give a more helpful error message (and, in particular,
+ /// we do not suggest increasing the overflow limit, which is not
+ /// going to help).
+ fn report_overflow_error_cycle(&self, cycle: &[PredicateObligation<'tcx>]) -> ! {
+ let cycle = self.resolve_vars_if_possible(cycle.to_owned());
+ assert!(!cycle.is_empty());
+
+ debug!(?cycle, "report_overflow_error_cycle");
+
+ // The 'deepest' obligation is most likely to have a useful
+ // cause 'backtrace'
+ self.report_overflow_error(cycle.iter().max_by_key(|p| p.recursion_depth).unwrap(), false);
+ }
+
+ fn report_selection_error(
+ &self,
+ mut obligation: PredicateObligation<'tcx>,
+ root_obligation: &PredicateObligation<'tcx>,
+ error: &SelectionError<'tcx>,
+ fallback_has_occurred: bool,
+ ) {
+ self.set_tainted_by_errors();
+ let tcx = self.tcx;
+ let mut span = obligation.cause.span;
+
+ let mut err = match *error {
+ SelectionError::Ambiguous(ref impls) => {
+ let mut err = self.tcx.sess.struct_span_err(
+ obligation.cause.span,
+ &format!("multiple applicable `impl`s for `{}`", obligation.predicate),
+ );
+ self.annotate_source_of_ambiguity(&mut err, impls, obligation.predicate);
+ err.emit();
+ return;
+ }
+ SelectionError::Unimplemented => {
+ // If this obligation was generated as a result of well-formedness checking, see if we
+ // can get a better error message by performing HIR-based well-formedness checking.
+ if let ObligationCauseCode::WellFormed(Some(wf_loc)) =
+ root_obligation.cause.code().peel_derives()
+ {
+ if let Some(cause) = self
+ .tcx
+ .diagnostic_hir_wf_check((tcx.erase_regions(obligation.predicate), *wf_loc))
+ {
+ obligation.cause = cause.clone();
+ span = obligation.cause.span;
+ }
+ }
+ if let ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id,
+ trait_item_def_id,
+ kind: _,
+ } = *obligation.cause.code()
+ {
+ self.report_extra_impl_obligation(
+ span,
+ impl_item_def_id,
+ trait_item_def_id,
+ &format!("`{}`", obligation.predicate),
+ )
+ .emit();
+ return;
+ }
+
+ let bound_predicate = obligation.predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(trait_predicate) => {
+ let trait_predicate = bound_predicate.rebind(trait_predicate);
+ let mut trait_predicate = self.resolve_vars_if_possible(trait_predicate);
+
+ trait_predicate.remap_constness_diag(obligation.param_env);
+ let predicate_is_const = ty::BoundConstness::ConstIfConst
+ == trait_predicate.skip_binder().constness;
+
+ if self.tcx.sess.has_errors().is_some()
+ && trait_predicate.references_error()
+ {
+ return;
+ }
+ let trait_ref = trait_predicate.to_poly_trait_ref();
+ let (post_message, pre_message, type_def) = self
+ .get_parent_trait_ref(obligation.cause.code())
+ .map(|(t, s)| {
+ (
+ format!(" in `{}`", t),
+ format!("within `{}`, ", t),
+ s.map(|s| (format!("within this `{}`", t), s)),
+ )
+ })
+ .unwrap_or_default();
+
+ let OnUnimplementedNote {
+ message,
+ label,
+ note,
+ enclosing_scope,
+ append_const_msg,
+ } = self.on_unimplemented_note(trait_ref, &obligation);
+ let have_alt_message = message.is_some() || label.is_some();
+ let is_try_conversion = self.is_try_conversion(span, trait_ref.def_id());
+ let is_unsize =
+ Some(trait_ref.def_id()) == self.tcx.lang_items().unsize_trait();
+ let (message, note, append_const_msg) = if is_try_conversion {
+ (
+ Some(format!(
+ "`?` couldn't convert the error to `{}`",
+ trait_ref.skip_binder().self_ty(),
+ )),
+ Some(
+ "the question mark operation (`?`) implicitly performs a \
+ conversion on the error value using the `From` trait"
+ .to_owned(),
+ ),
+ Some(None),
+ )
+ } else {
+ (message, note, append_const_msg)
+ };
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0277,
+ "{}",
+ message
+ .and_then(|cannot_do_this| {
+ match (predicate_is_const, append_const_msg) {
+ // do nothing if predicate is not const
+ (false, _) => Some(cannot_do_this),
+ // suggested using default post message
+ (true, Some(None)) => {
+ Some(format!("{cannot_do_this} in const contexts"))
+ }
+ // overridden post message
+ (true, Some(Some(post_message))) => {
+ Some(format!("{cannot_do_this}{post_message}"))
+ }
+ // fallback to generic message
+ (true, None) => None,
+ }
+ })
+ .unwrap_or_else(|| format!(
+ "the trait bound `{}` is not satisfied{}",
+ trait_predicate, post_message,
+ ))
+ );
+
+ if is_try_conversion {
+ let none_error = self
+ .tcx
+ .get_diagnostic_item(sym::none_error)
+ .map(|def_id| tcx.type_of(def_id));
+ let should_convert_option_to_result =
+ Some(trait_ref.skip_binder().substs.type_at(1)) == none_error;
+ let should_convert_result_to_option =
+ Some(trait_ref.self_ty().skip_binder()) == none_error;
+ if should_convert_option_to_result {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "consider converting the `Option<T>` into a `Result<T, _>` \
+ using `Option::ok_or` or `Option::ok_or_else`",
+ ".ok_or_else(|| /* error value */)",
+ Applicability::HasPlaceholders,
+ );
+ } else if should_convert_result_to_option {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "consider converting the `Result<T, _>` into an `Option<T>` \
+ using `Result::ok`",
+ ".ok()",
+ Applicability::MachineApplicable,
+ );
+ }
+ if let Some(ret_span) = self.return_type_span(&obligation) {
+ err.span_label(
+ ret_span,
+ &format!(
+ "expected `{}` because of this",
+ trait_ref.skip_binder().self_ty()
+ ),
+ );
+ }
+ }
+
+ if Some(trait_ref.def_id()) == tcx.lang_items().drop_trait()
+ && predicate_is_const
+ {
+ err.note("`~const Drop` was renamed to `~const Destruct`");
+ err.note("See <https://github.com/rust-lang/rust/pull/94901> for more details");
+ }
+
+ let explanation = if let ObligationCauseCode::MainFunctionType =
+ obligation.cause.code()
+ {
+ "consider using `()`, or a `Result`".to_owned()
+ } else {
+ format!(
+ "{}the trait `{}` is not implemented for `{}`",
+ pre_message,
+ trait_predicate.print_modifiers_and_trait_path(),
+ trait_ref.skip_binder().self_ty(),
+ )
+ };
+
+ if self.suggest_add_reference_to_arg(
+ &obligation,
+ &mut err,
+ trait_predicate,
+ have_alt_message,
+ ) {
+ self.note_obligation_cause(&mut err, &obligation);
+ err.emit();
+ return;
+ }
+ if let Some(ref s) = label {
+ // If it has a custom `#[rustc_on_unimplemented]`
+ // error message, let's display it as the label!
+ err.span_label(span, s);
+ if !matches!(trait_ref.skip_binder().self_ty().kind(), ty::Param(_)) {
+ // When the self type is a type param We don't need to "the trait
+ // `std::marker::Sized` is not implemented for `T`" as we will point
+ // at the type param with a label to suggest constraining it.
+ err.help(&explanation);
+ }
+ } else {
+ err.span_label(span, explanation);
+ }
+
+ if let ObligationCauseCode::ObjectCastObligation(concrete_ty, obj_ty) = obligation.cause.code().peel_derives() &&
+ Some(trait_ref.def_id()) == self.tcx.lang_items().sized_trait() {
+ self.suggest_borrowing_for_object_cast(&mut err, &root_obligation, *concrete_ty, *obj_ty);
+ }
+
+ if trait_predicate.is_const_if_const() && obligation.param_env.is_const() {
+ let non_const_predicate = trait_ref.without_const();
+ let non_const_obligation = Obligation {
+ cause: obligation.cause.clone(),
+ param_env: obligation.param_env.without_const(),
+ predicate: non_const_predicate.to_predicate(tcx),
+ recursion_depth: obligation.recursion_depth,
+ };
+ if self.predicate_may_hold(&non_const_obligation) {
+ err.span_note(
+ span,
+ &format!(
+ "the trait `{}` is implemented for `{}`, \
+ but that implementation is not `const`",
+ non_const_predicate.print_modifiers_and_trait_path(),
+ trait_ref.skip_binder().self_ty(),
+ ),
+ );
+ }
+ }
+
+ if let Some((msg, span)) = type_def {
+ err.span_label(span, &msg);
+ }
+ if let Some(ref s) = note {
+ // If it has a custom `#[rustc_on_unimplemented]` note, let's display it
+ err.note(s.as_str());
+ }
+ if let Some(ref s) = enclosing_scope {
+ let body = tcx
+ .hir()
+ .opt_local_def_id(obligation.cause.body_id)
+ .unwrap_or_else(|| {
+ tcx.hir().body_owner_def_id(hir::BodyId {
+ hir_id: obligation.cause.body_id,
+ })
+ });
+
+ let enclosing_scope_span =
+ tcx.hir().span_with_body(tcx.hir().local_def_id_to_hir_id(body));
+
+ err.span_label(enclosing_scope_span, s);
+ }
+
+ self.suggest_floating_point_literal(&obligation, &mut err, &trait_ref);
+ self.suggest_dereferencing_index(&obligation, &mut err, trait_predicate);
+ let mut suggested =
+ self.suggest_dereferences(&obligation, &mut err, trait_predicate);
+ suggested |= self.suggest_fn_call(&obligation, &mut err, trait_predicate);
+ suggested |=
+ self.suggest_remove_reference(&obligation, &mut err, trait_predicate);
+ suggested |= self.suggest_semicolon_removal(
+ &obligation,
+ &mut err,
+ span,
+ trait_predicate,
+ );
+ self.note_version_mismatch(&mut err, &trait_ref);
+ self.suggest_remove_await(&obligation, &mut err);
+ self.suggest_derive(&obligation, &mut err, trait_predicate);
+
+ if Some(trait_ref.def_id()) == tcx.lang_items().try_trait() {
+ self.suggest_await_before_try(
+ &mut err,
+ &obligation,
+ trait_predicate,
+ span,
+ );
+ }
+
+ if self.suggest_impl_trait(&mut err, span, &obligation, trait_predicate) {
+ err.emit();
+ return;
+ }
+
+ if is_unsize {
+ // If the obligation failed due to a missing implementation of the
+ // `Unsize` trait, give a pointer to why that might be the case
+ err.note(
+ "all implementations of `Unsize` are provided \
+ automatically by the compiler, see \
+ <https://doc.rust-lang.org/stable/std/marker/trait.Unsize.html> \
+ for more information",
+ );
+ }
+
+ let is_fn_trait = [
+ self.tcx.lang_items().fn_trait(),
+ self.tcx.lang_items().fn_mut_trait(),
+ self.tcx.lang_items().fn_once_trait(),
+ ]
+ .contains(&Some(trait_ref.def_id()));
+ let is_target_feature_fn = if let ty::FnDef(def_id, _) =
+ *trait_ref.skip_binder().self_ty().kind()
+ {
+ !self.tcx.codegen_fn_attrs(def_id).target_features.is_empty()
+ } else {
+ false
+ };
+ if is_fn_trait && is_target_feature_fn {
+ err.note(
+ "`#[target_feature]` functions do not implement the `Fn` traits",
+ );
+ }
+
+ // Try to report a help message
+ if is_fn_trait
+ && let Ok((implemented_kind, params)) = self.type_implements_fn_trait(
+ obligation.param_env,
+ trait_ref.self_ty(),
+ trait_predicate.skip_binder().constness,
+ trait_predicate.skip_binder().polarity,
+ )
+ {
+ // If the type implements `Fn`, `FnMut`, or `FnOnce`, suppress the following
+ // suggestion to add trait bounds for the type, since we only typically implement
+ // these traits once.
+
+ // Note if the `FnMut` or `FnOnce` is less general than the trait we're trying
+ // to implement.
+ let selected_kind =
+ ty::ClosureKind::from_def_id(self.tcx, trait_ref.def_id())
+ .expect("expected to map DefId to ClosureKind");
+ if !implemented_kind.extends(selected_kind) {
+ err.note(
+ &format!(
+ "`{}` implements `{}`, but it must implement `{}`, which is more general",
+ trait_ref.skip_binder().self_ty(),
+ implemented_kind,
+ selected_kind
+ )
+ );
+ }
+
+ // Note any argument mismatches
+ let given_ty = params.skip_binder();
+ let expected_ty = trait_ref.skip_binder().substs.type_at(1);
+ if let ty::Tuple(given) = given_ty.kind()
+ && let ty::Tuple(expected) = expected_ty.kind()
+ {
+ if expected.len() != given.len() {
+ // Note number of types that were expected and given
+ err.note(
+ &format!(
+ "expected a closure taking {} argument{}, but one taking {} argument{} was given",
+ given.len(),
+ pluralize!(given.len()),
+ expected.len(),
+ pluralize!(expected.len()),
+ )
+ );
+ } else if !self.same_type_modulo_infer(given_ty, expected_ty) {
+ // Print type mismatch
+ let (expected_args, given_args) =
+ self.cmp(given_ty, expected_ty);
+ err.note_expected_found(
+ &"a closure with arguments",
+ expected_args,
+ &"a closure with arguments",
+ given_args,
+ );
+ }
+ }
+ } else if !trait_ref.has_infer_types_or_consts()
+ && self.predicate_can_apply(obligation.param_env, trait_ref)
+ {
+ // If a where-clause may be useful, remind the
+ // user that they can add it.
+ //
+ // don't display an on-unimplemented note, as
+ // these notes will often be of the form
+ // "the type `T` can't be frobnicated"
+ // which is somewhat confusing.
+ self.suggest_restricting_param_bound(
+ &mut err,
+ trait_predicate,
+ None,
+ obligation.cause.body_id,
+ );
+ } else if !suggested {
+ // Can't show anything else useful, try to find similar impls.
+ let impl_candidates = self.find_similar_impl_candidates(trait_predicate);
+ if !self.report_similar_impl_candidates(
+ impl_candidates,
+ trait_ref,
+ obligation.cause.body_id,
+ &mut err,
+ ) {
+ // This is *almost* equivalent to
+ // `obligation.cause.code().peel_derives()`, but it gives us the
+ // trait predicate for that corresponding root obligation. This
+ // lets us get a derived obligation from a type parameter, like
+ // when calling `string.strip_suffix(p)` where `p` is *not* an
+ // implementer of `Pattern<'_>`.
+ let mut code = obligation.cause.code();
+ let mut trait_pred = trait_predicate;
+ let mut peeled = false;
+ while let Some((parent_code, parent_trait_pred)) = code.parent() {
+ code = parent_code;
+ if let Some(parent_trait_pred) = parent_trait_pred {
+ trait_pred = parent_trait_pred;
+ peeled = true;
+ }
+ }
+ let def_id = trait_pred.def_id();
+ // Mention *all* the `impl`s for the *top most* obligation, the
+ // user might have meant to use one of them, if any found. We skip
+ // auto-traits or fundamental traits that might not be exactly what
+ // the user might expect to be presented with. Instead this is
+ // useful for less general traits.
+ if peeled
+ && !self.tcx.trait_is_auto(def_id)
+ && !self.tcx.lang_items().items().contains(&Some(def_id))
+ {
+ let trait_ref = trait_pred.to_poly_trait_ref();
+ let impl_candidates =
+ self.find_similar_impl_candidates(trait_pred);
+ self.report_similar_impl_candidates(
+ impl_candidates,
+ trait_ref,
+ obligation.cause.body_id,
+ &mut err,
+ );
+ }
+ }
+ }
+
+ // Changing mutability doesn't make a difference to whether we have
+ // an `Unsize` impl (Fixes ICE in #71036)
+ if !is_unsize {
+ self.suggest_change_mut(&obligation, &mut err, trait_predicate);
+ }
+
+ // If this error is due to `!: Trait` not implemented but `(): Trait` is
+ // implemented, and fallback has occurred, then it could be due to a
+ // variable that used to fallback to `()` now falling back to `!`. Issue a
+ // note informing about the change in behaviour.
+ if trait_predicate.skip_binder().self_ty().is_never()
+ && fallback_has_occurred
+ {
+ let predicate = trait_predicate.map_bound(|mut trait_pred| {
+ trait_pred.trait_ref.substs = self.tcx.mk_substs_trait(
+ self.tcx.mk_unit(),
+ &trait_pred.trait_ref.substs[1..],
+ );
+ trait_pred
+ });
+ let unit_obligation = obligation.with(predicate.to_predicate(tcx));
+ if self.predicate_may_hold(&unit_obligation) {
+ err.note(
+ "this error might have been caused by changes to \
+ Rust's type-inference algorithm (see issue #48950 \
+ <https://github.com/rust-lang/rust/issues/48950> \
+ for more information)",
+ );
+ err.help("did you intend to use the type `()` here instead?");
+ }
+ }
+
+ // Return early if the trait is Debug or Display and the invocation
+ // originates within a standard library macro, because the output
+ // is otherwise overwhelming and unhelpful (see #85844 for an
+ // example).
+
+ let in_std_macro =
+ match obligation.cause.span.ctxt().outer_expn_data().macro_def_id {
+ Some(macro_def_id) => {
+ let crate_name = tcx.crate_name(macro_def_id.krate);
+ crate_name == sym::std || crate_name == sym::core
+ }
+ None => false,
+ };
+
+ if in_std_macro
+ && matches!(
+ self.tcx.get_diagnostic_name(trait_ref.def_id()),
+ Some(sym::Debug | sym::Display)
+ )
+ {
+ err.emit();
+ return;
+ }
+
+ err
+ }
+
+ ty::PredicateKind::Subtype(predicate) => {
+ // Errors for Subtype predicates show up as
+ // `FulfillmentErrorCode::CodeSubtypeError`,
+ // not selection error.
+ span_bug!(span, "subtype requirement gave wrong error: `{:?}`", predicate)
+ }
+
+ ty::PredicateKind::Coerce(predicate) => {
+ // Errors for Coerce predicates show up as
+ // `FulfillmentErrorCode::CodeSubtypeError`,
+ // not selection error.
+ span_bug!(span, "coerce requirement gave wrong error: `{:?}`", predicate)
+ }
+
+ ty::PredicateKind::RegionOutlives(..)
+ | ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::TypeOutlives(..) => {
+ let predicate = self.resolve_vars_if_possible(obligation.predicate);
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0280,
+ "the requirement `{}` is not satisfied",
+ predicate
+ )
+ }
+
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ let violations = self.tcx.object_safety_violations(trait_def_id);
+ report_object_safety_error(self.tcx, span, trait_def_id, violations)
+ }
+
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
+ let found_kind = self.closure_kind(closure_substs).unwrap();
+ let closure_span = self.tcx.def_span(closure_def_id);
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ closure_span,
+ E0525,
+ "expected a closure that implements the `{}` trait, \
+ but this closure only implements `{}`",
+ kind,
+ found_kind
+ );
+
+ err.span_label(
+ closure_span,
+ format!("this closure implements `{}`, not `{}`", found_kind, kind),
+ );
+ err.span_label(
+ obligation.cause.span,
+ format!("the requirement to implement `{}` derives from here", kind),
+ );
+
+ // Additional context information explaining why the closure only implements
+ // a particular trait.
+ if let Some(typeck_results) = self.in_progress_typeck_results {
+ let hir_id = self
+ .tcx
+ .hir()
+ .local_def_id_to_hir_id(closure_def_id.expect_local());
+ let typeck_results = typeck_results.borrow();
+ match (found_kind, typeck_results.closure_kind_origins().get(hir_id)) {
+ (ty::ClosureKind::FnOnce, Some((span, place))) => {
+ err.span_label(
+ *span,
+ format!(
+ "closure is `FnOnce` because it moves the \
+ variable `{}` out of its environment",
+ ty::place_to_string_for_capture(tcx, place)
+ ),
+ );
+ }
+ (ty::ClosureKind::FnMut, Some((span, place))) => {
+ err.span_label(
+ *span,
+ format!(
+ "closure is `FnMut` because it mutates the \
+ variable `{}` here",
+ ty::place_to_string_for_capture(tcx, place)
+ ),
+ );
+ }
+ _ => {}
+ }
+ }
+
+ err.emit();
+ return;
+ }
+
+ ty::PredicateKind::WellFormed(ty) => {
+ if !self.tcx.sess.opts.unstable_opts.chalk {
+ // WF predicates cannot themselves make
+ // errors. They can only block due to
+ // ambiguity; otherwise, they always
+ // degenerate into other obligations
+ // (which may fail).
+ span_bug!(span, "WF predicate not satisfied for {:?}", ty);
+ } else {
+ // FIXME: we'll need a better message which takes into account
+ // which bounds actually failed to hold.
+ self.tcx.sess.struct_span_err(
+ span,
+ &format!("the type `{}` is not well-formed (chalk)", ty),
+ )
+ }
+ }
+
+ ty::PredicateKind::ConstEvaluatable(..) => {
+ // Errors for `ConstEvaluatable` predicates show up as
+ // `SelectionError::ConstEvalFailure`,
+ // not `Unimplemented`.
+ span_bug!(
+ span,
+ "const-evaluatable requirement gave wrong error: `{:?}`",
+ obligation
+ )
+ }
+
+ ty::PredicateKind::ConstEquate(..) => {
+ // Errors for `ConstEquate` predicates show up as
+ // `SelectionError::ConstEvalFailure`,
+ // not `Unimplemented`.
+ span_bug!(
+ span,
+ "const-equate requirement gave wrong error: `{:?}`",
+ obligation
+ )
+ }
+
+ ty::PredicateKind::TypeWellFormedFromEnv(..) => span_bug!(
+ span,
+ "TypeWellFormedFromEnv predicate should only exist in the environment"
+ ),
+ }
+ }
+
+ OutputTypeParameterMismatch(found_trait_ref, expected_trait_ref, _) => {
+ let found_trait_ref = self.resolve_vars_if_possible(found_trait_ref);
+ let expected_trait_ref = self.resolve_vars_if_possible(expected_trait_ref);
+
+ if expected_trait_ref.self_ty().references_error() {
+ return;
+ }
+
+ let Some(found_trait_ty) = found_trait_ref.self_ty().no_bound_vars() else {
+ return;
+ };
+
+ let found_did = match *found_trait_ty.kind() {
+ ty::Closure(did, _)
+ | ty::Foreign(did)
+ | ty::FnDef(did, _)
+ | ty::Generator(did, ..) => Some(did),
+ ty::Adt(def, _) => Some(def.did()),
+ _ => None,
+ };
+
+ let found_span = found_did.and_then(|did| self.tcx.hir().span_if_local(did));
+
+ if self.reported_closure_mismatch.borrow().contains(&(span, found_span)) {
+ // We check closures twice, with obligations flowing in different directions,
+ // but we want to complain about them only once.
+ return;
+ }
+
+ self.reported_closure_mismatch.borrow_mut().insert((span, found_span));
+
+ let found = match found_trait_ref.skip_binder().substs.type_at(1).kind() {
+ ty::Tuple(ref tys) => vec![ArgKind::empty(); tys.len()],
+ _ => vec![ArgKind::empty()],
+ };
+
+ let expected_ty = expected_trait_ref.skip_binder().substs.type_at(1);
+ let expected = match expected_ty.kind() {
+ ty::Tuple(ref tys) => {
+ tys.iter().map(|t| ArgKind::from_expected_ty(t, Some(span))).collect()
+ }
+ _ => vec![ArgKind::Arg("_".to_owned(), expected_ty.to_string())],
+ };
+
+ if found.len() == expected.len() {
+ self.report_closure_arg_mismatch(
+ span,
+ found_span,
+ found_trait_ref,
+ expected_trait_ref,
+ )
+ } else {
+ let (closure_span, found) = found_did
+ .and_then(|did| {
+ let node = self.tcx.hir().get_if_local(did)?;
+ let (found_span, found) = self.get_fn_like_arguments(node)?;
+ Some((Some(found_span), found))
+ })
+ .unwrap_or((found_span, found));
+
+ self.report_arg_count_mismatch(
+ span,
+ closure_span,
+ expected,
+ found,
+ found_trait_ty.is_closure(),
+ )
+ }
+ }
+
+ TraitNotObjectSafe(did) => {
+ let violations = self.tcx.object_safety_violations(did);
+ report_object_safety_error(self.tcx, span, did, violations)
+ }
+
+ SelectionError::NotConstEvaluatable(NotConstEvaluatable::MentionsInfer) => {
+ bug!(
+ "MentionsInfer should have been handled in `traits/fulfill.rs` or `traits/select/mod.rs`"
+ )
+ }
+ SelectionError::NotConstEvaluatable(NotConstEvaluatable::MentionsParam) => {
+ if !self.tcx.features().generic_const_exprs {
+ let mut err = self.tcx.sess.struct_span_err(
+ span,
+ "constant expression depends on a generic parameter",
+ );
+ // FIXME(const_generics): we should suggest to the user how they can resolve this
+ // issue. However, this is currently not actually possible
+ // (see https://github.com/rust-lang/rust/issues/66962#issuecomment-575907083).
+ //
+ // Note that with `feature(generic_const_exprs)` this case should not
+ // be reachable.
+ err.note("this may fail depending on what value the parameter takes");
+ err.emit();
+ return;
+ }
+
+ match obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ let mut err =
+ self.tcx.sess.struct_span_err(span, "unconstrained generic constant");
+ let const_span = self.tcx.def_span(uv.def.did);
+ match self.tcx.sess.source_map().span_to_snippet(const_span) {
+ Ok(snippet) => err.help(&format!(
+ "try adding a `where` bound using this expression: `where [(); {}]:`",
+ snippet
+ )),
+ _ => err.help("consider adding a `where` bound using this expression"),
+ };
+ err
+ }
+ _ => {
+ span_bug!(
+ span,
+ "unexpected non-ConstEvaluatable predicate, this should not be reachable"
+ )
+ }
+ }
+ }
+
+ // Already reported in the query.
+ SelectionError::NotConstEvaluatable(NotConstEvaluatable::Error(_)) => {
+ // FIXME(eddyb) remove this once `ErrorGuaranteed` becomes a proof token.
+ self.tcx.sess.delay_span_bug(span, "`ErrorGuaranteed` without an error");
+ return;
+ }
+ // Already reported.
+ Overflow(OverflowError::Error(_)) => {
+ self.tcx.sess.delay_span_bug(span, "`OverflowError` has been reported");
+ return;
+ }
+ Overflow(_) => {
+ bug!("overflow should be handled before the `report_selection_error` path");
+ }
+ SelectionError::ErrorReporting => {
+ bug!("ErrorReporting Overflow should not reach `report_selection_err` call")
+ }
+ };
+
+ self.note_obligation_cause(&mut err, &obligation);
+ self.point_at_returns_when_relevant(&mut err, &obligation);
+
+ err.emit();
+ }
+
+ /// Given some node representing a fn-like thing in the HIR map,
+ /// returns a span and `ArgKind` information that describes the
+ /// arguments it expects. This can be supplied to
+ /// `report_arg_count_mismatch`.
+ fn get_fn_like_arguments(&self, node: Node<'_>) -> Option<(Span, Vec<ArgKind>)> {
+ let sm = self.tcx.sess.source_map();
+ let hir = self.tcx.hir();
+ Some(match node {
+ Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { body, fn_decl_span, .. }),
+ ..
+ }) => (
+ fn_decl_span,
+ hir.body(body)
+ .params
+ .iter()
+ .map(|arg| {
+ if let hir::Pat { kind: hir::PatKind::Tuple(ref args, _), span, .. } =
+ *arg.pat
+ {
+ Some(ArgKind::Tuple(
+ Some(span),
+ args.iter()
+ .map(|pat| {
+ sm.span_to_snippet(pat.span)
+ .ok()
+ .map(|snippet| (snippet, "_".to_owned()))
+ })
+ .collect::<Option<Vec<_>>>()?,
+ ))
+ } else {
+ let name = sm.span_to_snippet(arg.pat.span).ok()?;
+ Some(ArgKind::Arg(name, "_".to_owned()))
+ }
+ })
+ .collect::<Option<Vec<ArgKind>>>()?,
+ ),
+ Node::Item(&hir::Item { kind: hir::ItemKind::Fn(ref sig, ..), .. })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(ref sig, _), .. })
+ | Node::TraitItem(&hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(ref sig, _), ..
+ }) => (
+ sig.span,
+ sig.decl
+ .inputs
+ .iter()
+ .map(|arg| match arg.kind {
+ hir::TyKind::Tup(ref tys) => ArgKind::Tuple(
+ Some(arg.span),
+ vec![("_".to_owned(), "_".to_owned()); tys.len()],
+ ),
+ _ => ArgKind::empty(),
+ })
+ .collect::<Vec<ArgKind>>(),
+ ),
+ Node::Ctor(ref variant_data) => {
+ let span = variant_data.ctor_hir_id().map_or(DUMMY_SP, |id| hir.span(id));
+ (span, vec![ArgKind::empty(); variant_data.fields().len()])
+ }
+ _ => panic!("non-FnLike node found: {:?}", node),
+ })
+ }
+
+ /// Reports an error when the number of arguments needed by a
+ /// trait match doesn't match the number that the expression
+ /// provides.
+ fn report_arg_count_mismatch(
+ &self,
+ span: Span,
+ found_span: Option<Span>,
+ expected_args: Vec<ArgKind>,
+ found_args: Vec<ArgKind>,
+ is_closure: bool,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let kind = if is_closure { "closure" } else { "function" };
+
+ let args_str = |arguments: &[ArgKind], other: &[ArgKind]| {
+ let arg_length = arguments.len();
+ let distinct = matches!(other, &[ArgKind::Tuple(..)]);
+ match (arg_length, arguments.get(0)) {
+ (1, Some(&ArgKind::Tuple(_, ref fields))) => {
+ format!("a single {}-tuple as argument", fields.len())
+ }
+ _ => format!(
+ "{} {}argument{}",
+ arg_length,
+ if distinct && arg_length > 1 { "distinct " } else { "" },
+ pluralize!(arg_length)
+ ),
+ }
+ };
+
+ let expected_str = args_str(&expected_args, &found_args);
+ let found_str = args_str(&found_args, &expected_args);
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0593,
+ "{} is expected to take {}, but it takes {}",
+ kind,
+ expected_str,
+ found_str,
+ );
+
+ err.span_label(span, format!("expected {} that takes {}", kind, expected_str));
+
+ if let Some(found_span) = found_span {
+ err.span_label(found_span, format!("takes {}", found_str));
+
+ // move |_| { ... }
+ // ^^^^^^^^-- def_span
+ //
+ // move |_| { ... }
+ // ^^^^^-- prefix
+ let prefix_span = self.tcx.sess.source_map().span_until_non_whitespace(found_span);
+ // move |_| { ... }
+ // ^^^-- pipe_span
+ let pipe_span =
+ if let Some(span) = found_span.trim_start(prefix_span) { span } else { found_span };
+
+ // Suggest to take and ignore the arguments with expected_args_length `_`s if
+ // found arguments is empty (assume the user just wants to ignore args in this case).
+ // For example, if `expected_args_length` is 2, suggest `|_, _|`.
+ if found_args.is_empty() && is_closure {
+ let underscores = vec!["_"; expected_args.len()].join(", ");
+ err.span_suggestion_verbose(
+ pipe_span,
+ &format!(
+ "consider changing the closure to take and ignore the expected argument{}",
+ pluralize!(expected_args.len())
+ ),
+ format!("|{}|", underscores),
+ Applicability::MachineApplicable,
+ );
+ }
+
+ if let &[ArgKind::Tuple(_, ref fields)] = &found_args[..] {
+ if fields.len() == expected_args.len() {
+ let sugg = fields
+ .iter()
+ .map(|(name, _)| name.to_owned())
+ .collect::<Vec<String>>()
+ .join(", ");
+ err.span_suggestion_verbose(
+ found_span,
+ "change the closure to take multiple arguments instead of a single tuple",
+ format!("|{}|", sugg),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ if let &[ArgKind::Tuple(_, ref fields)] = &expected_args[..]
+ && fields.len() == found_args.len()
+ && is_closure
+ {
+ let sugg = format!(
+ "|({}){}|",
+ found_args
+ .iter()
+ .map(|arg| match arg {
+ ArgKind::Arg(name, _) => name.to_owned(),
+ _ => "_".to_owned(),
+ })
+ .collect::<Vec<String>>()
+ .join(", "),
+ // add type annotations if available
+ if found_args.iter().any(|arg| match arg {
+ ArgKind::Arg(_, ty) => ty != "_",
+ _ => false,
+ }) {
+ format!(
+ ": ({})",
+ fields
+ .iter()
+ .map(|(_, ty)| ty.to_owned())
+ .collect::<Vec<String>>()
+ .join(", ")
+ )
+ } else {
+ String::new()
+ },
+ );
+ err.span_suggestion_verbose(
+ found_span,
+ "change the closure to accept a tuple instead of individual arguments",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+
+ err
+ }
+
+ fn type_implements_fn_trait(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: ty::Binder<'tcx, Ty<'tcx>>,
+ constness: ty::BoundConstness,
+ polarity: ty::ImplPolarity,
+ ) -> Result<(ty::ClosureKind, ty::Binder<'tcx, Ty<'tcx>>), ()> {
+ self.commit_if_ok(|_| {
+ for trait_def_id in [
+ self.tcx.lang_items().fn_trait(),
+ self.tcx.lang_items().fn_mut_trait(),
+ self.tcx.lang_items().fn_once_trait(),
+ ] {
+ let Some(trait_def_id) = trait_def_id else { continue };
+ // Make a fresh inference variable so we can determine what the substitutions
+ // of the trait are.
+ let var = self.next_ty_var(TypeVariableOrigin {
+ span: DUMMY_SP,
+ kind: TypeVariableOriginKind::MiscVariable,
+ });
+ let substs = self.tcx.mk_substs_trait(ty.skip_binder(), &[var.into()]);
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ param_env,
+ ty.rebind(ty::TraitPredicate {
+ trait_ref: ty::TraitRef::new(trait_def_id, substs),
+ constness,
+ polarity,
+ })
+ .to_predicate(self.tcx),
+ );
+ let mut fulfill_cx = FulfillmentContext::new_in_snapshot();
+ fulfill_cx.register_predicate_obligation(self, obligation);
+ if fulfill_cx.select_all_or_error(self).is_empty() {
+ return Ok((
+ ty::ClosureKind::from_def_id(self.tcx, trait_def_id)
+ .expect("expected to map DefId to ClosureKind"),
+ ty.rebind(self.resolve_vars_if_possible(var)),
+ ));
+ }
+ }
+
+ Err(())
+ })
+ }
+}
+
+trait InferCtxtPrivExt<'hir, 'tcx> {
+ // returns if `cond` not occurring implies that `error` does not occur - i.e., that
+ // `error` occurring implies that `cond` occurs.
+ fn error_implies(&self, cond: ty::Predicate<'tcx>, error: ty::Predicate<'tcx>) -> bool;
+
+ fn report_fulfillment_error(
+ &self,
+ error: &FulfillmentError<'tcx>,
+ body_id: Option<hir::BodyId>,
+ fallback_has_occurred: bool,
+ );
+
+ fn report_projection_error(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ error: &MismatchedProjectionTypes<'tcx>,
+ );
+
+ fn fuzzy_match_tys(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ignoring_lifetimes: bool,
+ ) -> Option<CandidateSimilarity>;
+
+ fn describe_generator(&self, body_id: hir::BodyId) -> Option<&'static str>;
+
+ fn find_similar_impl_candidates(
+ &self,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> Vec<ImplCandidate<'tcx>>;
+
+ fn report_similar_impl_candidates(
+ &self,
+ impl_candidates: Vec<ImplCandidate<'tcx>>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ body_id: hir::HirId,
+ err: &mut Diagnostic,
+ ) -> bool;
+
+ /// Gets the parent trait chain start
+ fn get_parent_trait_ref(
+ &self,
+ code: &ObligationCauseCode<'tcx>,
+ ) -> Option<(String, Option<Span>)>;
+
+ /// If the `Self` type of the unsatisfied trait `trait_ref` implements a trait
+ /// with the same path as `trait_ref`, a help message about
+ /// a probable version mismatch is added to `err`
+ fn note_version_mismatch(
+ &self,
+ err: &mut Diagnostic,
+ trait_ref: &ty::PolyTraitRef<'tcx>,
+ ) -> bool;
+
+ /// Creates a `PredicateObligation` with `new_self_ty` replacing the existing type in the
+ /// `trait_ref`.
+ ///
+ /// For this to work, `new_self_ty` must have no escaping bound variables.
+ fn mk_trait_obligation_with_new_self_ty(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ trait_ref_and_ty: ty::Binder<'tcx, (ty::TraitPredicate<'tcx>, Ty<'tcx>)>,
+ ) -> PredicateObligation<'tcx>;
+
+ fn maybe_report_ambiguity(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ body_id: Option<hir::BodyId>,
+ );
+
+ fn predicate_can_apply(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ pred: ty::PolyTraitRef<'tcx>,
+ ) -> bool;
+
+ fn note_obligation_cause(&self, err: &mut Diagnostic, obligation: &PredicateObligation<'tcx>);
+
+ fn suggest_unsized_bound_if_applicable(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ );
+
+ fn annotate_source_of_ambiguity(
+ &self,
+ err: &mut Diagnostic,
+ impls: &[DefId],
+ predicate: ty::Predicate<'tcx>,
+ );
+
+ fn maybe_suggest_unsized_generics(&self, err: &mut Diagnostic, span: Span, node: Node<'hir>);
+
+ fn maybe_indirection_for_unsized(
+ &self,
+ err: &mut Diagnostic,
+ item: &'hir Item<'hir>,
+ param: &'hir GenericParam<'hir>,
+ ) -> bool;
+
+ fn is_recursive_obligation(
+ &self,
+ obligated_types: &mut Vec<Ty<'tcx>>,
+ cause_code: &ObligationCauseCode<'tcx>,
+ ) -> bool;
+}
+
+impl<'a, 'tcx> InferCtxtPrivExt<'a, 'tcx> for InferCtxt<'a, 'tcx> {
+ // returns if `cond` not occurring implies that `error` does not occur - i.e., that
+ // `error` occurring implies that `cond` occurs.
+ fn error_implies(&self, cond: ty::Predicate<'tcx>, error: ty::Predicate<'tcx>) -> bool {
+ if cond == error {
+ return true;
+ }
+
+ // FIXME: It should be possible to deal with `ForAll` in a cleaner way.
+ let bound_error = error.kind();
+ let (cond, error) = match (cond.kind().skip_binder(), bound_error.skip_binder()) {
+ (ty::PredicateKind::Trait(..), ty::PredicateKind::Trait(error)) => {
+ (cond, bound_error.rebind(error))
+ }
+ _ => {
+ // FIXME: make this work in other cases too.
+ return false;
+ }
+ };
+
+ for obligation in super::elaborate_predicates(self.tcx, std::iter::once(cond)) {
+ let bound_predicate = obligation.predicate.kind();
+ if let ty::PredicateKind::Trait(implication) = bound_predicate.skip_binder() {
+ let error = error.to_poly_trait_ref();
+ let implication = bound_predicate.rebind(implication.trait_ref);
+ // FIXME: I'm just not taking associated types at all here.
+ // Eventually I'll need to implement param-env-aware
+ // `Γ₁ ⊦ φ₁ => Γ₂ ⊦ φ₂` logic.
+ let param_env = ty::ParamEnv::empty();
+ if self.can_sub(param_env, error, implication).is_ok() {
+ debug!("error_implies: {:?} -> {:?} -> {:?}", cond, error, implication);
+ return true;
+ }
+ }
+ }
+
+ false
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn report_fulfillment_error(
+ &self,
+ error: &FulfillmentError<'tcx>,
+ body_id: Option<hir::BodyId>,
+ fallback_has_occurred: bool,
+ ) {
+ match error.code {
+ FulfillmentErrorCode::CodeSelectionError(ref selection_error) => {
+ self.report_selection_error(
+ error.obligation.clone(),
+ &error.root_obligation,
+ selection_error,
+ fallback_has_occurred,
+ );
+ }
+ FulfillmentErrorCode::CodeProjectionError(ref e) => {
+ self.report_projection_error(&error.obligation, e);
+ }
+ FulfillmentErrorCode::CodeAmbiguity => {
+ self.maybe_report_ambiguity(&error.obligation, body_id);
+ }
+ FulfillmentErrorCode::CodeSubtypeError(ref expected_found, ref err) => {
+ self.report_mismatched_types(
+ &error.obligation.cause,
+ expected_found.expected,
+ expected_found.found,
+ err.clone(),
+ )
+ .emit();
+ }
+ FulfillmentErrorCode::CodeConstEquateError(ref expected_found, ref err) => {
+ self.report_mismatched_consts(
+ &error.obligation.cause,
+ expected_found.expected,
+ expected_found.found,
+ err.clone(),
+ )
+ .emit();
+ }
+ }
+ }
+
+ #[instrument(level = "debug", skip_all)]
+ fn report_projection_error(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ error: &MismatchedProjectionTypes<'tcx>,
+ ) {
+ let predicate = self.resolve_vars_if_possible(obligation.predicate);
+
+ if predicate.references_error() {
+ return;
+ }
+
+ self.probe(|_| {
+ let err_buf;
+ let mut err = &error.err;
+ let mut values = None;
+
+ // try to find the mismatched types to report the error with.
+ //
+ // this can fail if the problem was higher-ranked, in which
+ // cause I have no idea for a good error message.
+ let bound_predicate = predicate.kind();
+ if let ty::PredicateKind::Projection(data) = bound_predicate.skip_binder() {
+ let mut selcx = SelectionContext::new(self);
+ let data = self.replace_bound_vars_with_fresh_vars(
+ obligation.cause.span,
+ infer::LateBoundRegionConversionTime::HigherRankedType,
+ bound_predicate.rebind(data),
+ );
+ let mut obligations = vec![];
+ let normalized_ty = super::normalize_projection_type(
+ &mut selcx,
+ obligation.param_env,
+ data.projection_ty,
+ obligation.cause.clone(),
+ 0,
+ &mut obligations,
+ );
+
+ debug!(?obligation.cause, ?obligation.param_env);
+
+ debug!(?normalized_ty, data.ty = ?data.term);
+
+ let is_normalized_ty_expected = !matches!(
+ obligation.cause.code().peel_derives(),
+ ObligationCauseCode::ItemObligation(_)
+ | ObligationCauseCode::BindingObligation(_, _)
+ | ObligationCauseCode::ObjectCastObligation(..)
+ | ObligationCauseCode::OpaqueType
+ );
+ if let Err(error) = self.at(&obligation.cause, obligation.param_env).eq_exp(
+ is_normalized_ty_expected,
+ normalized_ty,
+ data.term,
+ ) {
+ values = Some(infer::ValuePairs::Terms(ExpectedFound::new(
+ is_normalized_ty_expected,
+ normalized_ty,
+ data.term,
+ )));
+ err_buf = error;
+ err = &err_buf;
+ }
+ }
+
+ let mut diag = struct_span_err!(
+ self.tcx.sess,
+ obligation.cause.span,
+ E0271,
+ "type mismatch resolving `{}`",
+ predicate
+ );
+ let secondary_span = match predicate.kind().skip_binder() {
+ ty::PredicateKind::Projection(proj) => self
+ .tcx
+ .opt_associated_item(proj.projection_ty.item_def_id)
+ .and_then(|trait_assoc_item| {
+ self.tcx
+ .trait_of_item(proj.projection_ty.item_def_id)
+ .map(|id| (trait_assoc_item, id))
+ })
+ .and_then(|(trait_assoc_item, id)| {
+ let trait_assoc_ident = trait_assoc_item.ident(self.tcx);
+ self.tcx.find_map_relevant_impl(id, proj.projection_ty.self_ty(), |did| {
+ self.tcx
+ .associated_items(did)
+ .in_definition_order()
+ .find(|assoc| assoc.ident(self.tcx) == trait_assoc_ident)
+ })
+ })
+ .and_then(|item| match self.tcx.hir().get_if_local(item.def_id) {
+ Some(
+ hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Type(_, Some(ty)),
+ ..
+ })
+ | hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::TyAlias(ty),
+ ..
+ }),
+ ) => Some((ty.span, format!("type mismatch resolving `{}`", predicate))),
+ _ => None,
+ }),
+ _ => None,
+ };
+ self.note_type_err(
+ &mut diag,
+ &obligation.cause,
+ secondary_span,
+ values,
+ err,
+ true,
+ false,
+ );
+ self.note_obligation_cause(&mut diag, obligation);
+ diag.emit();
+ });
+ }
+
+ fn fuzzy_match_tys(
+ &self,
+ mut a: Ty<'tcx>,
+ mut b: Ty<'tcx>,
+ ignoring_lifetimes: bool,
+ ) -> Option<CandidateSimilarity> {
+ /// returns the fuzzy category of a given type, or None
+ /// if the type can be equated to any type.
+ fn type_category(tcx: TyCtxt<'_>, t: Ty<'_>) -> Option<u32> {
+ match t.kind() {
+ ty::Bool => Some(0),
+ ty::Char => Some(1),
+ ty::Str => Some(2),
+ ty::Adt(def, _) if tcx.is_diagnostic_item(sym::String, def.did()) => Some(2),
+ ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Infer(ty::IntVar(..) | ty::FloatVar(..)) => Some(4),
+ ty::Ref(..) | ty::RawPtr(..) => Some(5),
+ ty::Array(..) | ty::Slice(..) => Some(6),
+ ty::FnDef(..) | ty::FnPtr(..) => Some(7),
+ ty::Dynamic(..) => Some(8),
+ ty::Closure(..) => Some(9),
+ ty::Tuple(..) => Some(10),
+ ty::Param(..) => Some(11),
+ ty::Projection(..) => Some(12),
+ ty::Opaque(..) => Some(13),
+ ty::Never => Some(14),
+ ty::Adt(..) => Some(15),
+ ty::Generator(..) => Some(16),
+ ty::Foreign(..) => Some(17),
+ ty::GeneratorWitness(..) => Some(18),
+ ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(_) => None,
+ }
+ }
+
+ let strip_references = |mut t: Ty<'tcx>| -> Ty<'tcx> {
+ loop {
+ match t.kind() {
+ ty::Ref(_, inner, _) | ty::RawPtr(ty::TypeAndMut { ty: inner, .. }) => {
+ t = *inner
+ }
+ _ => break t,
+ }
+ }
+ };
+
+ if !ignoring_lifetimes {
+ a = strip_references(a);
+ b = strip_references(b);
+ }
+
+ let cat_a = type_category(self.tcx, a)?;
+ let cat_b = type_category(self.tcx, b)?;
+ if a == b {
+ Some(CandidateSimilarity::Exact { ignoring_lifetimes })
+ } else if cat_a == cat_b {
+ match (a.kind(), b.kind()) {
+ (ty::Adt(def_a, _), ty::Adt(def_b, _)) => def_a == def_b,
+ (ty::Foreign(def_a), ty::Foreign(def_b)) => def_a == def_b,
+ // Matching on references results in a lot of unhelpful
+ // suggestions, so let's just not do that for now.
+ //
+ // We still upgrade successful matches to `ignoring_lifetimes: true`
+ // to prioritize that impl.
+ (ty::Ref(..) | ty::RawPtr(..), ty::Ref(..) | ty::RawPtr(..)) => {
+ self.fuzzy_match_tys(a, b, true).is_some()
+ }
+ _ => true,
+ }
+ .then_some(CandidateSimilarity::Fuzzy { ignoring_lifetimes })
+ } else if ignoring_lifetimes {
+ None
+ } else {
+ self.fuzzy_match_tys(a, b, true)
+ }
+ }
+
+ fn describe_generator(&self, body_id: hir::BodyId) -> Option<&'static str> {
+ self.tcx.hir().body(body_id).generator_kind.map(|gen_kind| match gen_kind {
+ hir::GeneratorKind::Gen => "a generator",
+ hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) => "an async block",
+ hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn) => "an async function",
+ hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure) => "an async closure",
+ })
+ }
+
+ fn find_similar_impl_candidates(
+ &self,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> Vec<ImplCandidate<'tcx>> {
+ self.tcx
+ .all_impls(trait_pred.def_id())
+ .filter_map(|def_id| {
+ if self.tcx.impl_polarity(def_id) == ty::ImplPolarity::Negative
+ || !trait_pred
+ .skip_binder()
+ .is_constness_satisfied_by(self.tcx.constness(def_id))
+ {
+ return None;
+ }
+
+ let imp = self.tcx.impl_trait_ref(def_id).unwrap();
+
+ self.fuzzy_match_tys(trait_pred.skip_binder().self_ty(), imp.self_ty(), false)
+ .map(|similarity| ImplCandidate { trait_ref: imp, similarity })
+ })
+ .collect()
+ }
+
+ fn report_similar_impl_candidates(
+ &self,
+ impl_candidates: Vec<ImplCandidate<'tcx>>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ body_id: hir::HirId,
+ err: &mut Diagnostic,
+ ) -> bool {
+ let report = |mut candidates: Vec<TraitRef<'tcx>>, err: &mut Diagnostic| {
+ candidates.sort();
+ candidates.dedup();
+ let len = candidates.len();
+ if candidates.len() == 0 {
+ return false;
+ }
+ if candidates.len() == 1 {
+ err.highlighted_help(vec![
+ (
+ format!("the trait `{}` ", candidates[0].print_only_trait_path()),
+ Style::NoStyle,
+ ),
+ ("is".to_string(), Style::Highlight),
+ (" implemented for `".to_string(), Style::NoStyle),
+ (candidates[0].self_ty().to_string(), Style::Highlight),
+ ("`".to_string(), Style::NoStyle),
+ ]);
+ return true;
+ }
+ let trait_ref = TraitRef::identity(self.tcx, candidates[0].def_id);
+ // Check if the trait is the same in all cases. If so, we'll only show the type.
+ let mut traits: Vec<_> =
+ candidates.iter().map(|c| c.print_only_trait_path().to_string()).collect();
+ traits.sort();
+ traits.dedup();
+
+ let mut candidates: Vec<String> = candidates
+ .into_iter()
+ .map(|c| {
+ if traits.len() == 1 {
+ format!("\n {}", c.self_ty())
+ } else {
+ format!("\n {}", c)
+ }
+ })
+ .collect();
+
+ candidates.sort();
+ candidates.dedup();
+ let end = if candidates.len() <= 9 { candidates.len() } else { 8 };
+ err.help(&format!(
+ "the following other types implement trait `{}`:{}{}",
+ trait_ref.print_only_trait_path(),
+ candidates[..end].join(""),
+ if len > 9 { format!("\nand {} others", len - 8) } else { String::new() }
+ ));
+ true
+ };
+
+ let def_id = trait_ref.def_id();
+ if impl_candidates.is_empty() {
+ if self.tcx.trait_is_auto(def_id)
+ || self.tcx.lang_items().items().contains(&Some(def_id))
+ || self.tcx.get_diagnostic_name(def_id).is_some()
+ {
+ // Mentioning implementers of `Copy`, `Debug` and friends is not useful.
+ return false;
+ }
+ let normalized_impl_candidates: Vec<_> = self
+ .tcx
+ .all_impls(def_id)
+ // Ignore automatically derived impls and `!Trait` impls.
+ .filter(|&def_id| {
+ self.tcx.impl_polarity(def_id) != ty::ImplPolarity::Negative
+ || self.tcx.is_builtin_derive(def_id)
+ })
+ .filter_map(|def_id| self.tcx.impl_trait_ref(def_id))
+ .filter(|trait_ref| {
+ let self_ty = trait_ref.self_ty();
+ // Avoid mentioning type parameters.
+ if let ty::Param(_) = self_ty.kind() {
+ false
+ }
+ // Avoid mentioning types that are private to another crate
+ else if let ty::Adt(def, _) = self_ty.peel_refs().kind() {
+ // FIXME(compiler-errors): This could be generalized, both to
+ // be more granular, and probably look past other `#[fundamental]`
+ // types, too.
+ self.tcx
+ .visibility(def.did())
+ .is_accessible_from(body_id.owner.to_def_id(), self.tcx)
+ } else {
+ true
+ }
+ })
+ .collect();
+ return report(normalized_impl_candidates, err);
+ }
+
+ let normalize = |candidate| {
+ self.tcx.infer_ctxt().enter(|ref infcx| {
+ let normalized = infcx
+ .at(&ObligationCause::dummy(), ty::ParamEnv::empty())
+ .normalize(candidate)
+ .ok();
+ match normalized {
+ Some(normalized) => normalized.value,
+ None => candidate,
+ }
+ })
+ };
+
+ // Sort impl candidates so that ordering is consistent for UI tests.
+ // because the ordering of `impl_candidates` may not be deterministic:
+ // https://github.com/rust-lang/rust/pull/57475#issuecomment-455519507
+ //
+ // Prefer more similar candidates first, then sort lexicographically
+ // by their normalized string representation.
+ let mut normalized_impl_candidates_and_similarities = impl_candidates
+ .into_iter()
+ .map(|ImplCandidate { trait_ref, similarity }| {
+ let normalized = normalize(trait_ref);
+ (similarity, normalized)
+ })
+ .collect::<Vec<_>>();
+ normalized_impl_candidates_and_similarities.sort();
+ normalized_impl_candidates_and_similarities.dedup();
+
+ let normalized_impl_candidates = normalized_impl_candidates_and_similarities
+ .into_iter()
+ .map(|(_, normalized)| normalized)
+ .collect::<Vec<_>>();
+
+ report(normalized_impl_candidates, err)
+ }
+
+ /// Gets the parent trait chain start
+ fn get_parent_trait_ref(
+ &self,
+ code: &ObligationCauseCode<'tcx>,
+ ) -> Option<(String, Option<Span>)> {
+ match code {
+ ObligationCauseCode::BuiltinDerivedObligation(data) => {
+ let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_pred);
+ match self.get_parent_trait_ref(&data.parent_code) {
+ Some(t) => Some(t),
+ None => {
+ let ty = parent_trait_ref.skip_binder().self_ty();
+ let span = TyCategory::from_ty(self.tcx, ty)
+ .map(|(_, def_id)| self.tcx.def_span(def_id));
+ Some((ty.to_string(), span))
+ }
+ }
+ }
+ ObligationCauseCode::FunctionArgumentObligation { parent_code, .. } => {
+ self.get_parent_trait_ref(&parent_code)
+ }
+ _ => None,
+ }
+ }
+
+ /// If the `Self` type of the unsatisfied trait `trait_ref` implements a trait
+ /// with the same path as `trait_ref`, a help message about
+ /// a probable version mismatch is added to `err`
+ fn note_version_mismatch(
+ &self,
+ err: &mut Diagnostic,
+ trait_ref: &ty::PolyTraitRef<'tcx>,
+ ) -> bool {
+ let get_trait_impl = |trait_def_id| {
+ self.tcx.find_map_relevant_impl(trait_def_id, trait_ref.skip_binder().self_ty(), Some)
+ };
+ let required_trait_path = self.tcx.def_path_str(trait_ref.def_id());
+ let traits_with_same_path: std::collections::BTreeSet<_> = self
+ .tcx
+ .all_traits()
+ .filter(|trait_def_id| *trait_def_id != trait_ref.def_id())
+ .filter(|trait_def_id| self.tcx.def_path_str(*trait_def_id) == required_trait_path)
+ .collect();
+ let mut suggested = false;
+ for trait_with_same_path in traits_with_same_path {
+ if let Some(impl_def_id) = get_trait_impl(trait_with_same_path) {
+ let impl_span = self.tcx.def_span(impl_def_id);
+ err.span_help(impl_span, "trait impl with same name found");
+ let trait_crate = self.tcx.crate_name(trait_with_same_path.krate);
+ let crate_msg = format!(
+ "perhaps two different versions of crate `{}` are being used?",
+ trait_crate
+ );
+ err.note(&crate_msg);
+ suggested = true;
+ }
+ }
+ suggested
+ }
+
+ fn mk_trait_obligation_with_new_self_ty(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ trait_ref_and_ty: ty::Binder<'tcx, (ty::TraitPredicate<'tcx>, Ty<'tcx>)>,
+ ) -> PredicateObligation<'tcx> {
+ let trait_pred = trait_ref_and_ty.map_bound_ref(|(tr, new_self_ty)| ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ substs: self.tcx.mk_substs_trait(*new_self_ty, &tr.trait_ref.substs[1..]),
+ ..tr.trait_ref
+ },
+ ..*tr
+ });
+
+ Obligation::new(ObligationCause::dummy(), param_env, trait_pred.to_predicate(self.tcx))
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn maybe_report_ambiguity(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ body_id: Option<hir::BodyId>,
+ ) {
+ // Unable to successfully determine, probably means
+ // insufficient type information, but could mean
+ // ambiguous impls. The latter *ought* to be a
+ // coherence violation, so we don't report it here.
+
+ let predicate = self.resolve_vars_if_possible(obligation.predicate);
+ let span = obligation.cause.span;
+
+ debug!(?predicate, obligation.cause.code = tracing::field::debug(&obligation.cause.code()));
+
+ // Ambiguity errors are often caused as fallout from earlier errors.
+ // We ignore them if this `infcx` is tainted in some cases below.
+
+ let bound_predicate = predicate.kind();
+ let mut err = match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(data) => {
+ let trait_ref = bound_predicate.rebind(data.trait_ref);
+ debug!(?trait_ref);
+
+ if predicate.references_error() {
+ return;
+ }
+
+ // This is kind of a hack: it frequently happens that some earlier
+ // error prevents types from being fully inferred, and then we get
+ // a bunch of uninteresting errors saying something like "<generic
+ // #0> doesn't implement Sized". It may even be true that we
+ // could just skip over all checks where the self-ty is an
+ // inference variable, but I was afraid that there might be an
+ // inference variable created, registered as an obligation, and
+ // then never forced by writeback, and hence by skipping here we'd
+ // be ignoring the fact that we don't KNOW the type works
+ // out. Though even that would probably be harmless, given that
+ // we're only talking about builtin traits, which are known to be
+ // inhabited. We used to check for `self.tcx.sess.has_errors()` to
+ // avoid inundating the user with unnecessary errors, but we now
+ // check upstream for type errors and don't add the obligations to
+ // begin with in those cases.
+ if self.tcx.lang_items().sized_trait() == Some(trait_ref.def_id()) {
+ if !self.is_tainted_by_errors() {
+ self.emit_inference_failure_err(
+ body_id,
+ span,
+ trait_ref.self_ty().skip_binder().into(),
+ ErrorCode::E0282,
+ false,
+ )
+ .emit();
+ }
+ return;
+ }
+
+ // Typically, this ambiguity should only happen if
+ // there are unresolved type inference variables
+ // (otherwise it would suggest a coherence
+ // failure). But given #21974 that is not necessarily
+ // the case -- we can have multiple where clauses that
+ // are only distinguished by a region, which results
+ // in an ambiguity even when all types are fully
+ // known, since we don't dispatch based on region
+ // relationships.
+
+ // Pick the first substitution that still contains inference variables as the one
+ // we're going to emit an error for. If there are none (see above), fall back to
+ // a more general error.
+ let subst = data.trait_ref.substs.iter().find(|s| s.has_infer_types_or_consts());
+
+ let mut err = if let Some(subst) = subst {
+ self.emit_inference_failure_err(body_id, span, subst, ErrorCode::E0283, true)
+ } else {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0283,
+ "type annotations needed: cannot satisfy `{}`",
+ predicate,
+ )
+ };
+
+ let obligation = Obligation::new(
+ obligation.cause.clone(),
+ obligation.param_env,
+ trait_ref.to_poly_trait_predicate(),
+ );
+ let mut selcx = SelectionContext::with_query_mode(
+ &self,
+ crate::traits::TraitQueryMode::Standard,
+ );
+ match selcx.select_from_obligation(&obligation) {
+ Err(SelectionError::Ambiguous(impls)) if impls.len() > 1 => {
+ self.annotate_source_of_ambiguity(&mut err, &impls, predicate);
+ }
+ _ => {
+ if self.is_tainted_by_errors() {
+ err.cancel();
+ return;
+ }
+ err.note(&format!("cannot satisfy `{}`", predicate));
+ }
+ }
+
+ if let ObligationCauseCode::ItemObligation(def_id) = *obligation.cause.code() {
+ self.suggest_fully_qualified_path(&mut err, def_id, span, trait_ref.def_id());
+ } else if let (
+ Ok(ref snippet),
+ &ObligationCauseCode::BindingObligation(def_id, _),
+ ) =
+ (self.tcx.sess.source_map().span_to_snippet(span), obligation.cause.code())
+ {
+ let generics = self.tcx.generics_of(def_id);
+ if generics.params.iter().any(|p| p.name != kw::SelfUpper)
+ && !snippet.ends_with('>')
+ && !generics.has_impl_trait()
+ && !self.tcx.fn_trait_kind_from_lang_item(def_id).is_some()
+ {
+ // FIXME: To avoid spurious suggestions in functions where type arguments
+ // where already supplied, we check the snippet to make sure it doesn't
+ // end with a turbofish. Ideally we would have access to a `PathSegment`
+ // instead. Otherwise we would produce the following output:
+ //
+ // error[E0283]: type annotations needed
+ // --> $DIR/issue-54954.rs:3:24
+ // |
+ // LL | const ARR_LEN: usize = Tt::const_val::<[i8; 123]>();
+ // | ^^^^^^^^^^^^^^^^^^^^^^^^^^
+ // | |
+ // | cannot infer type
+ // | help: consider specifying the type argument
+ // | in the function call:
+ // | `Tt::const_val::<[i8; 123]>::<T>`
+ // ...
+ // LL | const fn const_val<T: Sized>() -> usize {
+ // | - required by this bound in `Tt::const_val`
+ // |
+ // = note: cannot satisfy `_: Tt`
+
+ // Clear any more general suggestions in favor of our specific one
+ err.clear_suggestions();
+
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ &format!(
+ "consider specifying the type argument{} in the function call",
+ pluralize!(generics.params.len()),
+ ),
+ format!(
+ "::<{}>",
+ generics
+ .params
+ .iter()
+ .map(|p| p.name.to_string())
+ .collect::<Vec<String>>()
+ .join(", ")
+ ),
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+
+ if let (Some(body_id), Some(ty::subst::GenericArgKind::Type(_))) =
+ (body_id, subst.map(|subst| subst.unpack()))
+ {
+ struct FindExprBySpan<'hir> {
+ span: Span,
+ result: Option<&'hir hir::Expr<'hir>>,
+ }
+
+ impl<'v> hir::intravisit::Visitor<'v> for FindExprBySpan<'v> {
+ fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) {
+ if self.span == ex.span {
+ self.result = Some(ex);
+ } else {
+ hir::intravisit::walk_expr(self, ex);
+ }
+ }
+ }
+
+ let mut expr_finder = FindExprBySpan { span, result: None };
+
+ expr_finder.visit_expr(&self.tcx.hir().body(body_id).value);
+
+ if let Some(hir::Expr {
+ kind: hir::ExprKind::Path(hir::QPath::Resolved(None, path)), .. }
+ ) = expr_finder.result
+ && let [
+ ..,
+ trait_path_segment @ hir::PathSegment {
+ res: Some(rustc_hir::def::Res::Def(rustc_hir::def::DefKind::Trait, trait_id)),
+ ..
+ },
+ hir::PathSegment {
+ ident: assoc_item_name,
+ res: Some(rustc_hir::def::Res::Def(_, item_id)),
+ ..
+ }
+ ] = path.segments
+ && data.trait_ref.def_id == *trait_id
+ && self.tcx.trait_of_item(*item_id) == Some(*trait_id)
+ && !self.is_tainted_by_errors()
+ {
+ let (verb, noun) = match self.tcx.associated_item(item_id).kind {
+ ty::AssocKind::Const => ("refer to the", "constant"),
+ ty::AssocKind::Fn => ("call", "function"),
+ ty::AssocKind::Type => ("refer to the", "type"), // this is already covered by E0223, but this single match arm doesn't hurt here
+ };
+
+ // Replace the more general E0283 with a more specific error
+ err.cancel();
+ err = self.tcx.sess.struct_span_err_with_code(
+ span,
+ &format!(
+ "cannot {verb} associated {noun} on trait without specifying the corresponding `impl` type",
+ ),
+ rustc_errors::error_code!(E0790),
+ );
+
+ if let Some(local_def_id) = data.trait_ref.def_id.as_local()
+ && let Some(hir::Node::Item(hir::Item { ident: trait_name, kind: hir::ItemKind::Trait(_, _, _, _, trait_item_refs), .. })) = self.tcx.hir().find_by_def_id(local_def_id)
+ && let Some(method_ref) = trait_item_refs.iter().find(|item_ref| item_ref.ident == *assoc_item_name) {
+ err.span_label(method_ref.span, format!("`{}::{}` defined here", trait_name, assoc_item_name));
+ }
+
+ err.span_label(span, format!("cannot {verb} associated {noun} of trait"));
+
+ let trait_impls = self.tcx.trait_impls_of(data.trait_ref.def_id);
+
+ if trait_impls.blanket_impls().is_empty()
+ && let Some((impl_ty, _)) = trait_impls.non_blanket_impls().iter().next()
+ && let Some(impl_def_id) = impl_ty.def() {
+ let message = if trait_impls.non_blanket_impls().len() == 1 {
+ "use the fully-qualified path to the only available implementation".to_string()
+ } else {
+ format!(
+ "use a fully-qualified path to a specific available implementation ({} found)",
+ trait_impls.non_blanket_impls().len()
+ )
+ };
+
+ err.multipart_suggestion(
+ message,
+ vec![
+ (trait_path_segment.ident.span.shrink_to_lo(), format!("<{} as ", self.tcx.def_path(impl_def_id).to_string_no_crate_verbose())),
+ (trait_path_segment.ident.span.shrink_to_hi(), format!(">"))
+ ],
+ Applicability::MaybeIncorrect
+ );
+ }
+ }
+ };
+
+ err
+ }
+
+ ty::PredicateKind::WellFormed(arg) => {
+ // Same hacky approach as above to avoid deluging user
+ // with error messages.
+ if arg.references_error()
+ || self.tcx.sess.has_errors().is_some()
+ || self.is_tainted_by_errors()
+ {
+ return;
+ }
+
+ self.emit_inference_failure_err(body_id, span, arg, ErrorCode::E0282, false)
+ }
+
+ ty::PredicateKind::Subtype(data) => {
+ if data.references_error()
+ || self.tcx.sess.has_errors().is_some()
+ || self.is_tainted_by_errors()
+ {
+ // no need to overload user in such cases
+ return;
+ }
+ let SubtypePredicate { a_is_expected: _, a, b } = data;
+ // both must be type variables, or the other would've been instantiated
+ assert!(a.is_ty_var() && b.is_ty_var());
+ self.emit_inference_failure_err(body_id, span, a.into(), ErrorCode::E0282, true)
+ }
+ ty::PredicateKind::Projection(data) => {
+ if predicate.references_error() || self.is_tainted_by_errors() {
+ return;
+ }
+ let subst = data
+ .projection_ty
+ .substs
+ .iter()
+ .chain(Some(data.term.into_arg()))
+ .find(|g| g.has_infer_types_or_consts());
+ if let Some(subst) = subst {
+ let mut err = self.emit_inference_failure_err(
+ body_id,
+ span,
+ subst,
+ ErrorCode::E0284,
+ true,
+ );
+ err.note(&format!("cannot satisfy `{}`", predicate));
+ err
+ } else {
+ // If we can't find a substitution, just print a generic error
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0284,
+ "type annotations needed: cannot satisfy `{}`",
+ predicate,
+ );
+ err.span_label(span, &format!("cannot satisfy `{}`", predicate));
+ err
+ }
+ }
+
+ ty::PredicateKind::ConstEvaluatable(data) => {
+ if predicate.references_error() || self.is_tainted_by_errors() {
+ return;
+ }
+ let subst = data.substs.iter().find(|g| g.has_infer_types_or_consts());
+ if let Some(subst) = subst {
+ let err = self.emit_inference_failure_err(
+ body_id,
+ span,
+ subst,
+ ErrorCode::E0284,
+ true,
+ );
+ err
+ } else {
+ // If we can't find a substitution, just print a generic error
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0284,
+ "type annotations needed: cannot satisfy `{}`",
+ predicate,
+ );
+ err.span_label(span, &format!("cannot satisfy `{}`", predicate));
+ err
+ }
+ }
+ _ => {
+ if self.tcx.sess.has_errors().is_some() || self.is_tainted_by_errors() {
+ return;
+ }
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0284,
+ "type annotations needed: cannot satisfy `{}`",
+ predicate,
+ );
+ err.span_label(span, &format!("cannot satisfy `{}`", predicate));
+ err
+ }
+ };
+ self.note_obligation_cause(&mut err, obligation);
+ err.emit();
+ }
+
+ fn annotate_source_of_ambiguity(
+ &self,
+ err: &mut Diagnostic,
+ impls: &[DefId],
+ predicate: ty::Predicate<'tcx>,
+ ) {
+ let mut spans = vec![];
+ let mut crates = vec![];
+ let mut post = vec![];
+ for def_id in impls {
+ match self.tcx.span_of_impl(*def_id) {
+ Ok(span) => spans.push(span),
+ Err(name) => {
+ crates.push(name);
+ if let Some(header) = to_pretty_impl_header(self.tcx, *def_id) {
+ post.push(header);
+ }
+ }
+ }
+ }
+ let msg = format!("multiple `impl`s satisfying `{}` found", predicate);
+ let mut crate_names: Vec<_> = crates.iter().map(|n| format!("`{}`", n)).collect();
+ crate_names.sort();
+ crate_names.dedup();
+ post.sort();
+ post.dedup();
+
+ if self.is_tainted_by_errors()
+ && (crate_names.len() == 1
+ && spans.len() == 0
+ && ["`core`", "`alloc`", "`std`"].contains(&crate_names[0].as_str())
+ || predicate.visit_with(&mut HasNumericInferVisitor).is_break())
+ {
+ // Avoid complaining about other inference issues for expressions like
+ // `42 >> 1`, where the types are still `{integer}`, but we want to
+ // Do we need `trait_ref.skip_binder().self_ty().is_numeric() &&` too?
+ // NOTE(eddyb) this was `.cancel()`, but `err`
+ // is borrowed, so we can't fully defuse it.
+ err.downgrade_to_delayed_bug();
+ return;
+ }
+ let post = if post.len() > 4 {
+ format!(
+ ":\n{}\nand {} more",
+ post.iter().map(|p| format!("- {}", p)).take(4).collect::<Vec<_>>().join("\n"),
+ post.len() - 4,
+ )
+ } else if post.len() > 1 || (post.len() == 1 && post[0].contains('\n')) {
+ format!(":\n{}", post.iter().map(|p| format!("- {}", p)).collect::<Vec<_>>().join("\n"),)
+ } else if post.len() == 1 {
+ format!(": `{}`", post[0])
+ } else {
+ String::new()
+ };
+
+ match (spans.len(), crates.len(), crate_names.len()) {
+ (0, 0, 0) => {
+ err.note(&format!("cannot satisfy `{}`", predicate));
+ }
+ (0, _, 1) => {
+ err.note(&format!("{} in the `{}` crate{}", msg, crates[0], post,));
+ }
+ (0, _, _) => {
+ err.note(&format!(
+ "{} in the following crates: {}{}",
+ msg,
+ crate_names.join(", "),
+ post,
+ ));
+ }
+ (_, 0, 0) => {
+ let span: MultiSpan = spans.into();
+ err.span_note(span, &msg);
+ }
+ (_, 1, 1) => {
+ let span: MultiSpan = spans.into();
+ err.span_note(span, &msg);
+ err.note(
+ &format!("and another `impl` found in the `{}` crate{}", crates[0], post,),
+ );
+ }
+ _ => {
+ let span: MultiSpan = spans.into();
+ err.span_note(span, &msg);
+ err.note(&format!(
+ "and more `impl`s found in the following crates: {}{}",
+ crate_names.join(", "),
+ post,
+ ));
+ }
+ }
+ }
+
+ /// Returns `true` if the trait predicate may apply for *some* assignment
+ /// to the type parameters.
+ fn predicate_can_apply(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ pred: ty::PolyTraitRef<'tcx>,
+ ) -> bool {
+ struct ParamToVarFolder<'a, 'tcx> {
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ var_map: FxHashMap<Ty<'tcx>, Ty<'tcx>>,
+ }
+
+ impl<'a, 'tcx> TypeFolder<'tcx> for ParamToVarFolder<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::Param(ty::ParamTy { name, .. }) = *ty.kind() {
+ let infcx = self.infcx;
+ *self.var_map.entry(ty).or_insert_with(|| {
+ infcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeParameterDefinition(name, None),
+ span: DUMMY_SP,
+ })
+ })
+ } else {
+ ty.super_fold_with(self)
+ }
+ }
+ }
+
+ self.probe(|_| {
+ let mut selcx = SelectionContext::new(self);
+
+ let cleaned_pred =
+ pred.fold_with(&mut ParamToVarFolder { infcx: self, var_map: Default::default() });
+
+ let cleaned_pred = super::project::normalize(
+ &mut selcx,
+ param_env,
+ ObligationCause::dummy(),
+ cleaned_pred,
+ )
+ .value;
+
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ param_env,
+ cleaned_pred.without_const().to_predicate(selcx.tcx()),
+ );
+
+ self.predicate_may_hold(&obligation)
+ })
+ }
+
+ fn note_obligation_cause(&self, err: &mut Diagnostic, obligation: &PredicateObligation<'tcx>) {
+ // First, attempt to add note to this error with an async-await-specific
+ // message, and fall back to regular note otherwise.
+ if !self.maybe_note_obligation_cause_for_async_await(err, obligation) {
+ self.note_obligation_cause_code(
+ err,
+ &obligation.predicate,
+ obligation.param_env,
+ obligation.cause.code(),
+ &mut vec![],
+ &mut Default::default(),
+ );
+ self.suggest_unsized_bound_if_applicable(err, obligation);
+ }
+ }
+
+ #[instrument(level = "debug", skip_all)]
+ fn suggest_unsized_bound_if_applicable(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ ) {
+ let (
+ ty::PredicateKind::Trait(pred),
+ &ObligationCauseCode::BindingObligation(item_def_id, span),
+ ) = (
+ obligation.predicate.kind().skip_binder(),
+ obligation.cause.code().peel_derives(),
+ ) else {
+ return;
+ };
+ debug!(?pred, ?item_def_id, ?span);
+
+ let (Some(node), true) = (
+ self.tcx.hir().get_if_local(item_def_id),
+ Some(pred.def_id()) == self.tcx.lang_items().sized_trait(),
+ ) else {
+ return;
+ };
+ self.maybe_suggest_unsized_generics(err, span, node);
+ }
+
+ #[instrument(level = "debug", skip_all)]
+ fn maybe_suggest_unsized_generics<'hir>(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ node: Node<'hir>,
+ ) {
+ let Some(generics) = node.generics() else {
+ return;
+ };
+ let sized_trait = self.tcx.lang_items().sized_trait();
+ debug!(?generics.params);
+ debug!(?generics.predicates);
+ let Some(param) = generics.params.iter().find(|param| param.span == span) else {
+ return;
+ };
+ let param_def_id = self.tcx.hir().local_def_id(param.hir_id);
+ // Check that none of the explicit trait bounds is `Sized`. Assume that an explicit
+ // `Sized` bound is there intentionally and we don't need to suggest relaxing it.
+ let explicitly_sized = generics
+ .bounds_for_param(param_def_id)
+ .flat_map(|bp| bp.bounds)
+ .any(|bound| bound.trait_ref().and_then(|tr| tr.trait_def_id()) == sized_trait);
+ if explicitly_sized {
+ return;
+ }
+ debug!(?param);
+ match node {
+ hir::Node::Item(
+ item @ hir::Item {
+ // Only suggest indirection for uses of type parameters in ADTs.
+ kind:
+ hir::ItemKind::Enum(..) | hir::ItemKind::Struct(..) | hir::ItemKind::Union(..),
+ ..
+ },
+ ) => {
+ if self.maybe_indirection_for_unsized(err, item, param) {
+ return;
+ }
+ }
+ _ => {}
+ };
+ // Didn't add an indirection suggestion, so add a general suggestion to relax `Sized`.
+ let (span, separator) = if let Some(s) = generics.bounds_span_for_suggestions(param_def_id)
+ {
+ (s, " +")
+ } else {
+ (span.shrink_to_hi(), ":")
+ };
+ err.span_suggestion_verbose(
+ span,
+ "consider relaxing the implicit `Sized` restriction",
+ format!("{} ?Sized", separator),
+ Applicability::MachineApplicable,
+ );
+ }
+
+ fn maybe_indirection_for_unsized<'hir>(
+ &self,
+ err: &mut Diagnostic,
+ item: &'hir Item<'hir>,
+ param: &'hir GenericParam<'hir>,
+ ) -> bool {
+ // Suggesting `T: ?Sized` is only valid in an ADT if `T` is only used in a
+ // borrow. `struct S<'a, T: ?Sized>(&'a T);` is valid, `struct S<T: ?Sized>(T);`
+ // is not. Look for invalid "bare" parameter uses, and suggest using indirection.
+ let mut visitor =
+ FindTypeParam { param: param.name.ident().name, invalid_spans: vec![], nested: false };
+ visitor.visit_item(item);
+ if visitor.invalid_spans.is_empty() {
+ return false;
+ }
+ let mut multispan: MultiSpan = param.span.into();
+ multispan.push_span_label(
+ param.span,
+ format!("this could be changed to `{}: ?Sized`...", param.name.ident()),
+ );
+ for sp in visitor.invalid_spans {
+ multispan.push_span_label(
+ sp,
+ format!("...if indirection were used here: `Box<{}>`", param.name.ident()),
+ );
+ }
+ err.span_help(
+ multispan,
+ &format!(
+ "you could relax the implicit `Sized` bound on `{T}` if it were \
+ used through indirection like `&{T}` or `Box<{T}>`",
+ T = param.name.ident(),
+ ),
+ );
+ true
+ }
+
+ fn is_recursive_obligation(
+ &self,
+ obligated_types: &mut Vec<Ty<'tcx>>,
+ cause_code: &ObligationCauseCode<'tcx>,
+ ) -> bool {
+ if let ObligationCauseCode::BuiltinDerivedObligation(ref data) = cause_code {
+ let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_pred);
+ let self_ty = parent_trait_ref.skip_binder().self_ty();
+ if obligated_types.iter().any(|ot| ot == &self_ty) {
+ return true;
+ }
+ if let ty::Adt(def, substs) = self_ty.kind()
+ && let [arg] = &substs[..]
+ && let ty::subst::GenericArgKind::Type(ty) = arg.unpack()
+ && let ty::Adt(inner_def, _) = ty.kind()
+ && inner_def == def
+ {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// Look for type `param` in an ADT being used only through a reference to confirm that suggesting
+/// `param: ?Sized` would be a valid constraint.
+struct FindTypeParam {
+ param: rustc_span::Symbol,
+ invalid_spans: Vec<Span>,
+ nested: bool,
+}
+
+impl<'v> Visitor<'v> for FindTypeParam {
+ fn visit_where_predicate(&mut self, _: &'v hir::WherePredicate<'v>) {
+ // Skip where-clauses, to avoid suggesting indirection for type parameters found there.
+ }
+
+ fn visit_ty(&mut self, ty: &hir::Ty<'_>) {
+ // We collect the spans of all uses of the "bare" type param, like in `field: T` or
+ // `field: (T, T)` where we could make `T: ?Sized` while skipping cases that are known to be
+ // valid like `field: &'a T` or `field: *mut T` and cases that *might* have further `Sized`
+ // obligations like `Box<T>` and `Vec<T>`, but we perform no extra analysis for those cases
+ // and suggest `T: ?Sized` regardless of their obligations. This is fine because the errors
+ // in that case should make what happened clear enough.
+ match ty.kind {
+ hir::TyKind::Ptr(_) | hir::TyKind::Rptr(..) | hir::TyKind::TraitObject(..) => {}
+ hir::TyKind::Path(hir::QPath::Resolved(None, path))
+ if path.segments.len() == 1 && path.segments[0].ident.name == self.param =>
+ {
+ if !self.nested {
+ debug!(?ty, "FindTypeParam::visit_ty");
+ self.invalid_spans.push(ty.span);
+ }
+ }
+ hir::TyKind::Path(_) => {
+ let prev = self.nested;
+ self.nested = true;
+ hir::intravisit::walk_ty(self, ty);
+ self.nested = prev;
+ }
+ _ => {
+ hir::intravisit::walk_ty(self, ty);
+ }
+ }
+ }
+}
+
+pub fn recursive_type_with_infinite_size_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ type_def_id: DefId,
+ spans: Vec<(Span, Option<hir::HirId>)>,
+) {
+ assert!(type_def_id.is_local());
+ let span = tcx.def_span(type_def_id);
+ let path = tcx.def_path_str(type_def_id);
+ let mut err =
+ struct_span_err!(tcx.sess, span, E0072, "recursive type `{}` has infinite size", path);
+ err.span_label(span, "recursive type has infinite size");
+ for &(span, _) in &spans {
+ err.span_label(span, "recursive without indirection");
+ }
+ let msg = format!(
+ "insert some indirection (e.g., a `Box`, `Rc`, or `&`) to make `{}` representable",
+ path,
+ );
+ if spans.len() <= 4 {
+ // FIXME(compiler-errors): This suggestion might be erroneous if Box is shadowed
+ err.multipart_suggestion(
+ &msg,
+ spans
+ .into_iter()
+ .flat_map(|(span, field_id)| {
+ if let Some(generic_span) = get_option_generic_from_field_id(tcx, field_id) {
+ // If we match an `Option` and can grab the span of the Option's generic, then
+ // suggest boxing the generic arg for a non-null niche optimization.
+ vec![
+ (generic_span.shrink_to_lo(), "Box<".to_string()),
+ (generic_span.shrink_to_hi(), ">".to_string()),
+ ]
+ } else {
+ vec![
+ (span.shrink_to_lo(), "Box<".to_string()),
+ (span.shrink_to_hi(), ">".to_string()),
+ ]
+ }
+ })
+ .collect(),
+ Applicability::HasPlaceholders,
+ );
+ } else {
+ err.help(&msg);
+ }
+ err.emit();
+}
+
+/// Extract the span for the generic type `T` of `Option<T>` in a field definition
+fn get_option_generic_from_field_id(tcx: TyCtxt<'_>, field_id: Option<hir::HirId>) -> Option<Span> {
+ let node = tcx.hir().find(field_id?);
+
+ // Expect a field from our field_id
+ let Some(hir::Node::Field(field_def)) = node
+ else { bug!("Expected HirId corresponding to FieldDef, found: {:?}", node) };
+
+ // Match a type that is a simple QPath with no Self
+ let hir::TyKind::Path(hir::QPath::Resolved(None, path)) = &field_def.ty.kind
+ else { return None };
+
+ // Check if the path we're checking resolves to Option
+ let hir::def::Res::Def(_, did) = path.res
+ else { return None };
+
+ // Bail if this path doesn't describe `::core::option::Option`
+ if !tcx.is_diagnostic_item(sym::Option, did) {
+ return None;
+ }
+
+ // Match a single generic arg in the 0th path segment
+ let generic_arg = path.segments.last()?.args?.args.get(0)?;
+
+ // Take the span out of the type, if it's a type
+ if let hir::GenericArg::Type(generic_ty) = generic_arg { Some(generic_ty.span) } else { None }
+}
+
+/// Summarizes information
+#[derive(Clone)]
+pub enum ArgKind {
+ /// An argument of non-tuple type. Parameters are (name, ty)
+ Arg(String, String),
+
+ /// An argument of tuple type. For a "found" argument, the span is
+ /// the location in the source of the pattern. For an "expected"
+ /// argument, it will be None. The vector is a list of (name, ty)
+ /// strings for the components of the tuple.
+ Tuple(Option<Span>, Vec<(String, String)>),
+}
+
+impl ArgKind {
+ fn empty() -> ArgKind {
+ ArgKind::Arg("_".to_owned(), "_".to_owned())
+ }
+
+ /// Creates an `ArgKind` from the expected type of an
+ /// argument. It has no name (`_`) and an optional source span.
+ pub fn from_expected_ty(t: Ty<'_>, span: Option<Span>) -> ArgKind {
+ match t.kind() {
+ ty::Tuple(tys) => ArgKind::Tuple(
+ span,
+ tys.iter().map(|ty| ("_".to_owned(), ty.to_string())).collect::<Vec<_>>(),
+ ),
+ _ => ArgKind::Arg("_".to_owned(), t.to_string()),
+ }
+ }
+}
+
+struct HasNumericInferVisitor;
+
+impl<'tcx> ty::TypeVisitor<'tcx> for HasNumericInferVisitor {
+ type BreakTy = ();
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if matches!(ty.kind(), ty::Infer(ty::FloatVar(_) | ty::IntVar(_))) {
+ ControlFlow::Break(())
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
new file mode 100644
index 000000000..e6907637c
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/on_unimplemented.rs
@@ -0,0 +1,272 @@
+use super::{
+ ObligationCauseCode, OnUnimplementedDirective, OnUnimplementedNote, PredicateObligation,
+};
+use crate::infer::InferCtxt;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::{self, GenericParamDefKind};
+use rustc_span::symbol::sym;
+use std::iter;
+
+use super::InferCtxtPrivExt;
+
+pub trait InferCtxtExt<'tcx> {
+ /*private*/
+ fn impl_similar_to(
+ &self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> Option<(DefId, SubstsRef<'tcx>)>;
+
+ /*private*/
+ fn describe_enclosure(&self, hir_id: hir::HirId) -> Option<&'static str>;
+
+ fn on_unimplemented_note(
+ &self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> OnUnimplementedNote;
+}
+
+impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
+ fn impl_similar_to(
+ &self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> Option<(DefId, SubstsRef<'tcx>)> {
+ let tcx = self.tcx;
+ let param_env = obligation.param_env;
+ let trait_ref = tcx.erase_late_bound_regions(trait_ref);
+ let trait_self_ty = trait_ref.self_ty();
+
+ let mut self_match_impls = vec![];
+ let mut fuzzy_match_impls = vec![];
+
+ self.tcx.for_each_relevant_impl(trait_ref.def_id, trait_self_ty, |def_id| {
+ let impl_substs = self.fresh_substs_for_item(obligation.cause.span, def_id);
+ let impl_trait_ref = tcx.bound_impl_trait_ref(def_id).unwrap().subst(tcx, impl_substs);
+
+ let impl_self_ty = impl_trait_ref.self_ty();
+
+ if let Ok(..) = self.can_eq(param_env, trait_self_ty, impl_self_ty) {
+ self_match_impls.push((def_id, impl_substs));
+
+ if iter::zip(
+ trait_ref.substs.types().skip(1),
+ impl_trait_ref.substs.types().skip(1),
+ )
+ .all(|(u, v)| self.fuzzy_match_tys(u, v, false).is_some())
+ {
+ fuzzy_match_impls.push((def_id, impl_substs));
+ }
+ }
+ });
+
+ let impl_def_id_and_substs = if self_match_impls.len() == 1 {
+ self_match_impls[0]
+ } else if fuzzy_match_impls.len() == 1 {
+ fuzzy_match_impls[0]
+ } else {
+ return None;
+ };
+
+ tcx.has_attr(impl_def_id_and_substs.0, sym::rustc_on_unimplemented)
+ .then_some(impl_def_id_and_substs)
+ }
+
+ /// Used to set on_unimplemented's `ItemContext`
+ /// to be the enclosing (async) block/function/closure
+ fn describe_enclosure(&self, hir_id: hir::HirId) -> Option<&'static str> {
+ let hir = self.tcx.hir();
+ let node = hir.find(hir_id)?;
+ match &node {
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, _, body_id), .. }) => {
+ self.describe_generator(*body_id).or_else(|| {
+ Some(match sig.header {
+ hir::FnHeader { asyncness: hir::IsAsync::Async, .. } => "an async function",
+ _ => "a function",
+ })
+ })
+ }
+ hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(body_id)),
+ ..
+ }) => self.describe_generator(*body_id).or_else(|| Some("a trait method")),
+ hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(sig, body_id),
+ ..
+ }) => self.describe_generator(*body_id).or_else(|| {
+ Some(match sig.header {
+ hir::FnHeader { asyncness: hir::IsAsync::Async, .. } => "an async method",
+ _ => "a method",
+ })
+ }),
+ hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { body, movability, .. }),
+ ..
+ }) => self.describe_generator(*body).or_else(|| {
+ Some(if movability.is_some() { "an async closure" } else { "a closure" })
+ }),
+ hir::Node::Expr(hir::Expr { .. }) => {
+ let parent_hid = hir.get_parent_node(hir_id);
+ if parent_hid != hir_id { self.describe_enclosure(parent_hid) } else { None }
+ }
+ _ => None,
+ }
+ }
+
+ fn on_unimplemented_note(
+ &self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> OnUnimplementedNote {
+ let (def_id, substs) = self
+ .impl_similar_to(trait_ref, obligation)
+ .unwrap_or_else(|| (trait_ref.def_id(), trait_ref.skip_binder().substs));
+ let trait_ref = trait_ref.skip_binder();
+
+ let mut flags = vec![(
+ sym::ItemContext,
+ self.describe_enclosure(obligation.cause.body_id).map(|s| s.to_owned()),
+ )];
+
+ match obligation.cause.code() {
+ ObligationCauseCode::BuiltinDerivedObligation(..)
+ | ObligationCauseCode::ImplDerivedObligation(..)
+ | ObligationCauseCode::DerivedObligation(..) => {}
+ _ => {
+ // this is a "direct", user-specified, rather than derived,
+ // obligation.
+ flags.push((sym::direct, None));
+ }
+ }
+
+ if let ObligationCauseCode::ItemObligation(item)
+ | ObligationCauseCode::BindingObligation(item, _) = *obligation.cause.code()
+ {
+ // FIXME: maybe also have some way of handling methods
+ // from other traits? That would require name resolution,
+ // which we might want to be some sort of hygienic.
+ //
+ // Currently I'm leaving it for what I need for `try`.
+ if self.tcx.trait_of_item(item) == Some(trait_ref.def_id) {
+ let method = self.tcx.item_name(item);
+ flags.push((sym::from_method, None));
+ flags.push((sym::from_method, Some(method.to_string())));
+ }
+ }
+
+ if let Some(k) = obligation.cause.span.desugaring_kind() {
+ flags.push((sym::from_desugaring, None));
+ flags.push((sym::from_desugaring, Some(format!("{:?}", k))));
+ }
+
+ // Add all types without trimmed paths.
+ ty::print::with_no_trimmed_paths!({
+ let generics = self.tcx.generics_of(def_id);
+ let self_ty = trait_ref.self_ty();
+ // This is also included through the generics list as `Self`,
+ // but the parser won't allow you to use it
+ flags.push((sym::_Self, Some(self_ty.to_string())));
+ if let Some(def) = self_ty.ty_adt_def() {
+ // We also want to be able to select self's original
+ // signature with no type arguments resolved
+ flags.push((sym::_Self, Some(self.tcx.type_of(def.did()).to_string())));
+ }
+
+ for param in generics.params.iter() {
+ let value = match param.kind {
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
+ substs[param.index as usize].to_string()
+ }
+ GenericParamDefKind::Lifetime => continue,
+ };
+ let name = param.name;
+ flags.push((name, Some(value)));
+
+ if let GenericParamDefKind::Type { .. } = param.kind {
+ let param_ty = substs[param.index as usize].expect_ty();
+ if let Some(def) = param_ty.ty_adt_def() {
+ // We also want to be able to select the parameter's
+ // original signature with no type arguments resolved
+ flags.push((name, Some(self.tcx.type_of(def.did()).to_string())));
+ }
+ }
+ }
+
+ if let Some(true) = self_ty.ty_adt_def().map(|def| def.did().is_local()) {
+ flags.push((sym::crate_local, None));
+ }
+
+ // Allow targeting all integers using `{integral}`, even if the exact type was resolved
+ if self_ty.is_integral() {
+ flags.push((sym::_Self, Some("{integral}".to_owned())));
+ }
+
+ if self_ty.is_array_slice() {
+ flags.push((sym::_Self, Some("&[]".to_owned())));
+ }
+
+ if self_ty.is_fn() {
+ let fn_sig = self_ty.fn_sig(self.tcx);
+ let shortname = match fn_sig.unsafety() {
+ hir::Unsafety::Normal => "fn",
+ hir::Unsafety::Unsafe => "unsafe fn",
+ };
+ flags.push((sym::_Self, Some(shortname.to_owned())));
+ }
+
+ // Slices give us `[]`, `[{ty}]`
+ if let ty::Slice(aty) = self_ty.kind() {
+ flags.push((sym::_Self, Some("[]".to_string())));
+ if let Some(def) = aty.ty_adt_def() {
+ // We also want to be able to select the slice's type's original
+ // signature with no type arguments resolved
+ flags.push((sym::_Self, Some(format!("[{}]", self.tcx.type_of(def.did())))));
+ }
+ if aty.is_integral() {
+ flags.push((sym::_Self, Some("[{integral}]".to_string())));
+ }
+ }
+
+ // Arrays give us `[]`, `[{ty}; _]` and `[{ty}; N]`
+ if let ty::Array(aty, len) = self_ty.kind() {
+ flags.push((sym::_Self, Some("[]".to_string())));
+ let len = len.kind().try_to_value().and_then(|v| v.try_to_machine_usize(self.tcx));
+ flags.push((sym::_Self, Some(format!("[{}; _]", aty))));
+ if let Some(n) = len {
+ flags.push((sym::_Self, Some(format!("[{}; {}]", aty, n))));
+ }
+ if let Some(def) = aty.ty_adt_def() {
+ // We also want to be able to select the array's type's original
+ // signature with no type arguments resolved
+ let def_ty = self.tcx.type_of(def.did());
+ flags.push((sym::_Self, Some(format!("[{def_ty}; _]"))));
+ if let Some(n) = len {
+ flags.push((sym::_Self, Some(format!("[{def_ty}; {n}]"))));
+ }
+ }
+ if aty.is_integral() {
+ flags.push((sym::_Self, Some("[{integral}; _]".to_string())));
+ if let Some(n) = len {
+ flags.push((sym::_Self, Some(format!("[{{integral}}; {n}]"))));
+ }
+ }
+ }
+ if let ty::Dynamic(traits, _) = self_ty.kind() {
+ for t in traits.iter() {
+ if let ty::ExistentialPredicate::Trait(trait_ref) = t.skip_binder() {
+ flags.push((sym::_Self, Some(self.tcx.def_path_str(trait_ref.def_id))))
+ }
+ }
+ }
+ });
+
+ if let Ok(Some(command)) = OnUnimplementedDirective::of_item(self.tcx, def_id) {
+ command.evaluate(self.tcx, trait_ref, &flags)
+ } else {
+ OnUnimplementedNote::default()
+ }
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
new file mode 100644
index 000000000..219413121
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs
@@ -0,0 +1,3119 @@
+use super::{
+ EvaluationResult, Obligation, ObligationCause, ObligationCauseCode, PredicateObligation,
+ SelectionContext,
+};
+
+use crate::autoderef::Autoderef;
+use crate::infer::InferCtxt;
+use crate::traits::normalize_to;
+
+use hir::HirId;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::{
+ error_code, pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder,
+ ErrorGuaranteed, MultiSpan, Style,
+};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{AsyncGeneratorKind, GeneratorKind, Node};
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::hir::map;
+use rustc_middle::ty::{
+ self, suggest_arbitrary_trait_bound, suggest_constraining_type_param, AdtKind, DefIdTree,
+ GeneratorDiagnosticData, GeneratorInteriorTypeCause, Infer, InferTy, IsSuggestable,
+ ProjectionPredicate, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
+ TypeVisitable,
+};
+use rustc_middle::ty::{TypeAndMut, TypeckResults};
+use rustc_session::Limit;
+use rustc_span::def_id::LOCAL_CRATE;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{BytePos, DesugaringKind, ExpnKind, Span, DUMMY_SP};
+use rustc_target::spec::abi;
+use std::fmt;
+
+use super::InferCtxtPrivExt;
+use crate::infer::InferCtxtExt as _;
+use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+
+#[derive(Debug)]
+pub enum GeneratorInteriorOrUpvar {
+ // span of interior type
+ Interior(Span),
+ // span of upvar
+ Upvar(Span),
+}
+
+// This type provides a uniform interface to retrieve data on generators, whether it originated from
+// the local crate being compiled or from a foreign crate.
+#[derive(Debug)]
+pub enum GeneratorData<'tcx, 'a> {
+ Local(&'a TypeckResults<'tcx>),
+ Foreign(&'tcx GeneratorDiagnosticData<'tcx>),
+}
+
+impl<'tcx, 'a> GeneratorData<'tcx, 'a> {
+ // Try to get information about variables captured by the generator that matches a type we are
+ // looking for with `ty_matches` function. We uses it to find upvar which causes a failure to
+ // meet an obligation
+ fn try_get_upvar_span<F>(
+ &self,
+ infer_context: &InferCtxt<'a, 'tcx>,
+ generator_did: DefId,
+ ty_matches: F,
+ ) -> Option<GeneratorInteriorOrUpvar>
+ where
+ F: Fn(ty::Binder<'tcx, Ty<'tcx>>) -> bool,
+ {
+ match self {
+ GeneratorData::Local(typeck_results) => {
+ infer_context.tcx.upvars_mentioned(generator_did).and_then(|upvars| {
+ upvars.iter().find_map(|(upvar_id, upvar)| {
+ let upvar_ty = typeck_results.node_type(*upvar_id);
+ let upvar_ty = infer_context.resolve_vars_if_possible(upvar_ty);
+ if ty_matches(ty::Binder::dummy(upvar_ty)) {
+ Some(GeneratorInteriorOrUpvar::Upvar(upvar.span))
+ } else {
+ None
+ }
+ })
+ })
+ }
+ GeneratorData::Foreign(_) => None,
+ }
+ }
+
+ // Try to get the span of a type being awaited on that matches the type we are looking with the
+ // `ty_matches` function. We uses it to find awaited type which causes a failure to meet an
+ // obligation
+ fn get_from_await_ty<F>(
+ &self,
+ visitor: AwaitsVisitor,
+ hir: map::Map<'tcx>,
+ ty_matches: F,
+ ) -> Option<Span>
+ where
+ F: Fn(ty::Binder<'tcx, Ty<'tcx>>) -> bool,
+ {
+ match self {
+ GeneratorData::Local(typeck_results) => visitor
+ .awaits
+ .into_iter()
+ .map(|id| hir.expect_expr(id))
+ .find(|await_expr| {
+ ty_matches(ty::Binder::dummy(typeck_results.expr_ty_adjusted(&await_expr)))
+ })
+ .map(|expr| expr.span),
+ GeneratorData::Foreign(generator_diagnostic_data) => visitor
+ .awaits
+ .into_iter()
+ .map(|id| hir.expect_expr(id))
+ .find(|await_expr| {
+ ty_matches(ty::Binder::dummy(
+ generator_diagnostic_data
+ .adjustments
+ .get(&await_expr.hir_id.local_id)
+ .map_or::<&[ty::adjustment::Adjustment<'tcx>], _>(&[], |a| &a[..])
+ .last()
+ .map_or_else::<Ty<'tcx>, _, _>(
+ || {
+ generator_diagnostic_data
+ .nodes_types
+ .get(&await_expr.hir_id.local_id)
+ .cloned()
+ .unwrap_or_else(|| {
+ bug!(
+ "node_type: no type for node `{}`",
+ ty::tls::with(|tcx| tcx
+ .hir()
+ .node_to_string(await_expr.hir_id))
+ )
+ })
+ },
+ |adj| adj.target,
+ ),
+ ))
+ })
+ .map(|expr| expr.span),
+ }
+ }
+
+ /// Get the type, expression, span and optional scope span of all types
+ /// that are live across the yield of this generator
+ fn get_generator_interior_types(
+ &self,
+ ) -> ty::Binder<'tcx, &[GeneratorInteriorTypeCause<'tcx>]> {
+ match self {
+ GeneratorData::Local(typeck_result) => {
+ typeck_result.generator_interior_types.as_deref()
+ }
+ GeneratorData::Foreign(generator_diagnostic_data) => {
+ generator_diagnostic_data.generator_interior_types.as_deref()
+ }
+ }
+ }
+
+ // Used to get the source of the data, note we don't have as much information for generators
+ // originated from foreign crates
+ fn is_foreign(&self) -> bool {
+ match self {
+ GeneratorData::Local(_) => false,
+ GeneratorData::Foreign(_) => true,
+ }
+ }
+}
+
+// This trait is public to expose the diagnostics methods to clippy.
+pub trait InferCtxtExt<'tcx> {
+ fn suggest_restricting_param_bound(
+ &self,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ proj_pred: Option<ty::PolyProjectionPredicate<'tcx>>,
+ body_id: hir::HirId,
+ );
+
+ fn suggest_dereferences(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool;
+
+ fn get_closure_name(&self, def_id: DefId, err: &mut Diagnostic, msg: &str) -> Option<Symbol>;
+
+ fn suggest_fn_call(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool;
+
+ fn suggest_add_reference_to_arg(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ has_custom_message: bool,
+ ) -> bool;
+
+ fn suggest_borrowing_for_object_cast(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ self_ty: Ty<'tcx>,
+ object_ty: Ty<'tcx>,
+ );
+
+ fn suggest_remove_reference(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool;
+
+ fn suggest_remove_await(&self, obligation: &PredicateObligation<'tcx>, err: &mut Diagnostic);
+
+ fn suggest_change_mut(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ );
+
+ fn suggest_semicolon_removal(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ span: Span,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool;
+
+ fn return_type_span(&self, obligation: &PredicateObligation<'tcx>) -> Option<Span>;
+
+ fn suggest_impl_trait(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ obligation: &PredicateObligation<'tcx>,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool;
+
+ fn point_at_returns_when_relevant(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ );
+
+ fn report_closure_arg_mismatch(
+ &self,
+ span: Span,
+ found_span: Option<Span>,
+ found: ty::PolyTraitRef<'tcx>,
+ expected: ty::PolyTraitRef<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+
+ fn suggest_fully_qualified_path(
+ &self,
+ err: &mut Diagnostic,
+ item_def_id: DefId,
+ span: Span,
+ trait_ref: DefId,
+ );
+
+ fn maybe_note_obligation_cause_for_async_await(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> bool;
+
+ fn note_obligation_cause_for_async_await(
+ &self,
+ err: &mut Diagnostic,
+ interior_or_upvar_span: GeneratorInteriorOrUpvar,
+ interior_extra_info: Option<(Option<Span>, Span, Option<hir::HirId>, Option<Span>)>,
+ is_async: bool,
+ outer_generator: Option<DefId>,
+ trait_pred: ty::TraitPredicate<'tcx>,
+ target_ty: Ty<'tcx>,
+ typeck_results: Option<&ty::TypeckResults<'tcx>>,
+ obligation: &PredicateObligation<'tcx>,
+ next_code: Option<&ObligationCauseCode<'tcx>>,
+ );
+
+ fn note_obligation_cause_code<T>(
+ &self,
+ err: &mut Diagnostic,
+ predicate: &T,
+ param_env: ty::ParamEnv<'tcx>,
+ cause_code: &ObligationCauseCode<'tcx>,
+ obligated_types: &mut Vec<Ty<'tcx>>,
+ seen_requirements: &mut FxHashSet<DefId>,
+ ) where
+ T: fmt::Display;
+
+ fn suggest_new_overflow_limit(&self, err: &mut Diagnostic);
+
+ /// Suggest to await before try: future? => future.await?
+ fn suggest_await_before_try(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ span: Span,
+ );
+
+ fn suggest_floating_point_literal(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_ref: &ty::PolyTraitRef<'tcx>,
+ );
+
+ fn suggest_derive(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ );
+
+ fn suggest_dereferencing_index(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ );
+}
+
+fn predicate_constraint(generics: &hir::Generics<'_>, pred: String) -> (Span, String) {
+ (
+ generics.tail_span_for_predicate_suggestion(),
+ format!("{} {}", generics.add_where_or_trailing_comma(), pred),
+ )
+}
+
+/// Type parameter needs more bounds. The trivial case is `T` `where T: Bound`, but
+/// it can also be an `impl Trait` param that needs to be decomposed to a type
+/// param for cleaner code.
+fn suggest_restriction<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ hir_id: HirId,
+ hir_generics: &hir::Generics<'tcx>,
+ msg: &str,
+ err: &mut Diagnostic,
+ fn_sig: Option<&hir::FnSig<'_>>,
+ projection: Option<&ty::ProjectionTy<'_>>,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ // When we are dealing with a trait, `super_traits` will be `Some`:
+ // Given `trait T: A + B + C {}`
+ // - ^^^^^^^^^ GenericBounds
+ // |
+ // &Ident
+ super_traits: Option<(&Ident, &hir::GenericBounds<'_>)>,
+) {
+ if hir_generics.where_clause_span.from_expansion()
+ || hir_generics.where_clause_span.desugaring_kind().is_some()
+ {
+ return;
+ }
+ let Some(item_id) = hir_id.as_owner() else { return; };
+ let generics = tcx.generics_of(item_id);
+ // Given `fn foo(t: impl Trait)` where `Trait` requires assoc type `A`...
+ if let Some((param, bound_str, fn_sig)) =
+ fn_sig.zip(projection).and_then(|(sig, p)| match p.self_ty().kind() {
+ // Shenanigans to get the `Trait` from the `impl Trait`.
+ ty::Param(param) => {
+ let param_def = generics.type_param(param, tcx);
+ if param_def.kind.is_synthetic() {
+ let bound_str =
+ param_def.name.as_str().strip_prefix("impl ")?.trim_start().to_string();
+ return Some((param_def, bound_str, sig));
+ }
+ None
+ }
+ _ => None,
+ })
+ {
+ let type_param_name = hir_generics.params.next_type_param_name(Some(&bound_str));
+ let trait_pred = trait_pred.fold_with(&mut ReplaceImplTraitFolder {
+ tcx,
+ param,
+ replace_ty: ty::ParamTy::new(generics.count() as u32, Symbol::intern(&type_param_name))
+ .to_ty(tcx),
+ });
+ if !trait_pred.is_suggestable(tcx, false) {
+ return;
+ }
+ // We know we have an `impl Trait` that doesn't satisfy a required projection.
+
+ // Find all of the occurrences of `impl Trait` for `Trait` in the function arguments'
+ // types. There should be at least one, but there might be *more* than one. In that
+ // case we could just ignore it and try to identify which one needs the restriction,
+ // but instead we choose to suggest replacing all instances of `impl Trait` with `T`
+ // where `T: Trait`.
+ let mut ty_spans = vec![];
+ for input in fn_sig.decl.inputs {
+ ReplaceImplTraitVisitor { ty_spans: &mut ty_spans, param_did: param.def_id }
+ .visit_ty(input);
+ }
+ // The type param `T: Trait` we will suggest to introduce.
+ let type_param = format!("{}: {}", type_param_name, bound_str);
+
+ let mut sugg = vec![
+ if let Some(span) = hir_generics.span_for_param_suggestion() {
+ (span, format!(", {}", type_param))
+ } else {
+ (hir_generics.span, format!("<{}>", type_param))
+ },
+ // `fn foo(t: impl Trait)`
+ // ^ suggest `where <T as Trait>::A: Bound`
+ predicate_constraint(hir_generics, trait_pred.to_predicate(tcx).to_string()),
+ ];
+ sugg.extend(ty_spans.into_iter().map(|s| (s, type_param_name.to_string())));
+
+ // Suggest `fn foo<T: Trait>(t: T) where <T as Trait>::A: Bound`.
+ // FIXME: once `#![feature(associated_type_bounds)]` is stabilized, we should suggest
+ // `fn foo(t: impl Trait<A: Bound>)` instead.
+ err.multipart_suggestion(
+ "introduce a type parameter with a trait bound instead of using `impl Trait`",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ if !trait_pred.is_suggestable(tcx, false) {
+ return;
+ }
+ // Trivial case: `T` needs an extra bound: `T: Bound`.
+ let (sp, suggestion) = match (
+ hir_generics
+ .params
+ .iter()
+ .find(|p| !matches!(p.kind, hir::GenericParamKind::Type { synthetic: true, .. })),
+ super_traits,
+ ) {
+ (_, None) => {
+ predicate_constraint(hir_generics, trait_pred.to_predicate(tcx).to_string())
+ }
+ (None, Some((ident, []))) => (
+ ident.span.shrink_to_hi(),
+ format!(": {}", trait_pred.print_modifiers_and_trait_path()),
+ ),
+ (_, Some((_, [.., bounds]))) => (
+ bounds.span().shrink_to_hi(),
+ format!(" + {}", trait_pred.print_modifiers_and_trait_path()),
+ ),
+ (Some(_), Some((_, []))) => (
+ hir_generics.span.shrink_to_hi(),
+ format!(": {}", trait_pred.print_modifiers_and_trait_path()),
+ ),
+ };
+
+ err.span_suggestion_verbose(
+ sp,
+ &format!("consider further restricting {}", msg),
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ }
+}
+
+impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> {
+ fn suggest_restricting_param_bound(
+ &self,
+ mut err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ proj_pred: Option<ty::PolyProjectionPredicate<'tcx>>,
+ body_id: hir::HirId,
+ ) {
+ let trait_pred = self.resolve_numeric_literals_with_default(trait_pred);
+
+ let self_ty = trait_pred.skip_binder().self_ty();
+ let (param_ty, projection) = match self_ty.kind() {
+ ty::Param(_) => (true, None),
+ ty::Projection(projection) => (false, Some(projection)),
+ _ => (false, None),
+ };
+
+ // FIXME: Add check for trait bound that is already present, particularly `?Sized` so we
+ // don't suggest `T: Sized + ?Sized`.
+ let mut hir_id = body_id;
+ while let Some(node) = self.tcx.hir().find(hir_id) {
+ match node {
+ hir::Node::Item(hir::Item {
+ ident,
+ kind: hir::ItemKind::Trait(_, _, generics, bounds, _),
+ ..
+ }) if self_ty == self.tcx.types.self_param => {
+ assert!(param_ty);
+ // Restricting `Self` for a single method.
+ suggest_restriction(
+ self.tcx,
+ hir_id,
+ &generics,
+ "`Self`",
+ err,
+ None,
+ projection,
+ trait_pred,
+ Some((ident, bounds)),
+ );
+ return;
+ }
+
+ hir::Node::TraitItem(hir::TraitItem {
+ generics,
+ kind: hir::TraitItemKind::Fn(..),
+ ..
+ }) if self_ty == self.tcx.types.self_param => {
+ assert!(param_ty);
+ // Restricting `Self` for a single method.
+ suggest_restriction(
+ self.tcx, hir_id, &generics, "`Self`", err, None, projection, trait_pred,
+ None,
+ );
+ return;
+ }
+
+ hir::Node::TraitItem(hir::TraitItem {
+ generics,
+ kind: hir::TraitItemKind::Fn(fn_sig, ..),
+ ..
+ })
+ | hir::Node::ImplItem(hir::ImplItem {
+ generics,
+ kind: hir::ImplItemKind::Fn(fn_sig, ..),
+ ..
+ })
+ | hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Fn(fn_sig, generics, _), ..
+ }) if projection.is_some() => {
+ // Missing restriction on associated type of type parameter (unmet projection).
+ suggest_restriction(
+ self.tcx,
+ hir_id,
+ &generics,
+ "the associated type",
+ err,
+ Some(fn_sig),
+ projection,
+ trait_pred,
+ None,
+ );
+ return;
+ }
+ hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Trait(_, _, generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { generics, .. }),
+ ..
+ }) if projection.is_some() => {
+ // Missing restriction on associated type of type parameter (unmet projection).
+ suggest_restriction(
+ self.tcx,
+ hir_id,
+ &generics,
+ "the associated type",
+ err,
+ None,
+ projection,
+ trait_pred,
+ None,
+ );
+ return;
+ }
+
+ hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Struct(_, generics)
+ | hir::ItemKind::Enum(_, generics)
+ | hir::ItemKind::Union(_, generics)
+ | hir::ItemKind::Trait(_, _, generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { generics, .. })
+ | hir::ItemKind::Fn(_, generics, _)
+ | hir::ItemKind::TyAlias(_, generics)
+ | hir::ItemKind::TraitAlias(generics, _)
+ | hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. }),
+ ..
+ })
+ | hir::Node::TraitItem(hir::TraitItem { generics, .. })
+ | hir::Node::ImplItem(hir::ImplItem { generics, .. })
+ if param_ty =>
+ {
+ // We skip the 0'th subst (self) because we do not want
+ // to consider the predicate as not suggestible if the
+ // self type is an arg position `impl Trait` -- instead,
+ // we handle that by adding ` + Bound` below.
+ // FIXME(compiler-errors): It would be nice to do the same
+ // this that we do in `suggest_restriction` and pull the
+ // `impl Trait` into a new generic if it shows up somewhere
+ // else in the predicate.
+ if !trait_pred.skip_binder().trait_ref.substs[1..]
+ .iter()
+ .all(|g| g.is_suggestable(self.tcx, false))
+ {
+ return;
+ }
+ // Missing generic type parameter bound.
+ let param_name = self_ty.to_string();
+ let mut constraint = with_no_trimmed_paths!(
+ trait_pred.print_modifiers_and_trait_path().to_string()
+ );
+
+ if let Some(proj_pred) = proj_pred {
+ let ProjectionPredicate { projection_ty, term } = proj_pred.skip_binder();
+ let item = self.tcx.associated_item(projection_ty.item_def_id);
+
+ // FIXME: this case overlaps with code in TyCtxt::note_and_explain_type_err.
+ // That should be extracted into a helper function.
+ if constraint.ends_with('>') {
+ constraint = format!(
+ "{}, {}={}>",
+ &constraint[..constraint.len() - 1],
+ item.name,
+ term
+ );
+ } else {
+ constraint.push_str(&format!("<{}={}>", item.name, term));
+ }
+ }
+
+ if suggest_constraining_type_param(
+ self.tcx,
+ generics,
+ &mut err,
+ &param_name,
+ &constraint,
+ Some(trait_pred.def_id()),
+ ) {
+ return;
+ }
+ }
+
+ hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Struct(_, generics)
+ | hir::ItemKind::Enum(_, generics)
+ | hir::ItemKind::Union(_, generics)
+ | hir::ItemKind::Trait(_, _, generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { generics, .. })
+ | hir::ItemKind::Fn(_, generics, _)
+ | hir::ItemKind::TyAlias(_, generics)
+ | hir::ItemKind::TraitAlias(generics, _)
+ | hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. }),
+ ..
+ }) if !param_ty => {
+ // Missing generic type parameter bound.
+ if suggest_arbitrary_trait_bound(self.tcx, generics, &mut err, trait_pred) {
+ return;
+ }
+ }
+ hir::Node::Crate(..) => return,
+
+ _ => {}
+ }
+
+ hir_id = self.tcx.hir().local_def_id_to_hir_id(self.tcx.hir().get_parent_item(hir_id));
+ }
+ }
+
+ /// When after several dereferencing, the reference satisfies the trait
+ /// binding. This function provides dereference suggestion for this
+ /// specific situation.
+ fn suggest_dereferences(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool {
+ // It only make sense when suggesting dereferences for arguments
+ let ObligationCauseCode::FunctionArgumentObligation { .. } = obligation.cause.code() else {
+ return false;
+ };
+ let param_env = obligation.param_env;
+ let body_id = obligation.cause.body_id;
+ let span = obligation.cause.span;
+ let mut real_trait_pred = trait_pred;
+ let mut code = obligation.cause.code();
+ while let Some((parent_code, parent_trait_pred)) = code.parent() {
+ code = parent_code;
+ if let Some(parent_trait_pred) = parent_trait_pred {
+ real_trait_pred = parent_trait_pred;
+ }
+
+ // Skipping binder here, remapping below
+ let real_ty = real_trait_pred.self_ty().skip_binder();
+
+ if let ty::Ref(region, base_ty, mutbl) = *real_ty.kind() {
+ let mut autoderef = Autoderef::new(self, param_env, body_id, span, base_ty, span);
+ if let Some(steps) = autoderef.find_map(|(ty, steps)| {
+ // Re-add the `&`
+ let ty = self.tcx.mk_ref(region, TypeAndMut { ty, mutbl });
+
+ // Remapping bound vars here
+ let real_trait_pred_and_ty =
+ real_trait_pred.map_bound(|inner_trait_pred| (inner_trait_pred, ty));
+ let obligation = self
+ .mk_trait_obligation_with_new_self_ty(param_env, real_trait_pred_and_ty);
+ Some(steps).filter(|_| self.predicate_may_hold(&obligation))
+ }) {
+ if steps > 0 {
+ if let Ok(src) = self.tcx.sess.source_map().span_to_snippet(span) {
+ // Don't care about `&mut` because `DerefMut` is used less
+ // often and user will not expect autoderef happens.
+ if src.starts_with('&') && !src.starts_with("&mut ") {
+ let derefs = "*".repeat(steps);
+ err.span_suggestion(
+ span,
+ "consider dereferencing here",
+ format!("&{}{}", derefs, &src[1..]),
+ Applicability::MachineApplicable,
+ );
+ return true;
+ }
+ }
+ }
+ } else if real_trait_pred != trait_pred {
+ // This branch addresses #87437.
+
+ // Remapping bound vars here
+ let real_trait_pred_and_base_ty =
+ real_trait_pred.map_bound(|inner_trait_pred| (inner_trait_pred, base_ty));
+ let obligation = self.mk_trait_obligation_with_new_self_ty(
+ param_env,
+ real_trait_pred_and_base_ty,
+ );
+ if self.predicate_may_hold(&obligation) {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "consider dereferencing here",
+ "*",
+ Applicability::MachineApplicable,
+ );
+ return true;
+ }
+ }
+ }
+ }
+ false
+ }
+
+ /// Given a closure's `DefId`, return the given name of the closure.
+ ///
+ /// This doesn't account for reassignments, but it's only used for suggestions.
+ fn get_closure_name(&self, def_id: DefId, err: &mut Diagnostic, msg: &str) -> Option<Symbol> {
+ let get_name = |err: &mut Diagnostic, kind: &hir::PatKind<'_>| -> Option<Symbol> {
+ // Get the local name of this closure. This can be inaccurate because
+ // of the possibility of reassignment, but this should be good enough.
+ match &kind {
+ hir::PatKind::Binding(hir::BindingAnnotation::Unannotated, _, ident, None) => {
+ Some(ident.name)
+ }
+ _ => {
+ err.note(msg);
+ None
+ }
+ }
+ };
+
+ let hir = self.tcx.hir();
+ let hir_id = hir.local_def_id_to_hir_id(def_id.as_local()?);
+ let parent_node = hir.get_parent_node(hir_id);
+ match hir.find(parent_node) {
+ Some(hir::Node::Stmt(hir::Stmt { kind: hir::StmtKind::Local(local), .. })) => {
+ get_name(err, &local.pat.kind)
+ }
+ // Different to previous arm because one is `&hir::Local` and the other
+ // is `P<hir::Local>`.
+ Some(hir::Node::Local(local)) => get_name(err, &local.pat.kind),
+ _ => None,
+ }
+ }
+
+ /// We tried to apply the bound to an `fn` or closure. Check whether calling it would
+ /// evaluate to a type that *would* satisfy the trait binding. If it would, suggest calling
+ /// it: `bar(foo)` → `bar(foo())`. This case is *very* likely to be hit if `foo` is `async`.
+ fn suggest_fn_call(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool {
+ // Skipping binder here, remapping below
+ let self_ty = trait_pred.self_ty().skip_binder();
+
+ let (def_id, output_ty, callable) = match *self_ty.kind() {
+ ty::Closure(def_id, substs) => (def_id, substs.as_closure().sig().output(), "closure"),
+ ty::FnDef(def_id, _) => (def_id, self_ty.fn_sig(self.tcx).output(), "function"),
+ _ => return false,
+ };
+ let msg = format!("use parentheses to call the {}", callable);
+
+ // "We should really create a single list of bound vars from the combined vars
+ // from the predicate and function, but instead we just liberate the function bound vars"
+ let output_ty = self.tcx.liberate_late_bound_regions(def_id, output_ty);
+
+ // Remapping bound vars here
+ let trait_pred_and_self = trait_pred.map_bound(|trait_pred| (trait_pred, output_ty));
+
+ let new_obligation =
+ self.mk_trait_obligation_with_new_self_ty(obligation.param_env, trait_pred_and_self);
+
+ match self.evaluate_obligation(&new_obligation) {
+ Ok(
+ EvaluationResult::EvaluatedToOk
+ | EvaluationResult::EvaluatedToOkModuloRegions
+ | EvaluationResult::EvaluatedToOkModuloOpaqueTypes
+ | EvaluationResult::EvaluatedToAmbig,
+ ) => {}
+ _ => return false,
+ }
+ let hir = self.tcx.hir();
+ // Get the name of the callable and the arguments to be used in the suggestion.
+ let (snippet, sugg) = match hir.get_if_local(def_id) {
+ Some(hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { fn_decl, fn_decl_span, .. }),
+ ..
+ })) => {
+ err.span_label(*fn_decl_span, "consider calling this closure");
+ let Some(name) = self.get_closure_name(def_id, err, &msg) else {
+ return false;
+ };
+ let args = fn_decl.inputs.iter().map(|_| "_").collect::<Vec<_>>().join(", ");
+ let sugg = format!("({})", args);
+ (format!("{}{}", name, sugg), sugg)
+ }
+ Some(hir::Node::Item(hir::Item {
+ ident,
+ kind: hir::ItemKind::Fn(.., body_id),
+ ..
+ })) => {
+ err.span_label(ident.span, "consider calling this function");
+ let body = hir.body(*body_id);
+ let args = body
+ .params
+ .iter()
+ .map(|arg| match &arg.pat.kind {
+ hir::PatKind::Binding(_, _, ident, None)
+ // FIXME: provide a better suggestion when encountering `SelfLower`, it
+ // should suggest a method call.
+ if ident.name != kw::SelfLower => ident.to_string(),
+ _ => "_".to_string(),
+ })
+ .collect::<Vec<_>>()
+ .join(", ");
+ let sugg = format!("({})", args);
+ (format!("{}{}", ident, sugg), sugg)
+ }
+ _ => return false,
+ };
+ if matches!(obligation.cause.code(), ObligationCauseCode::FunctionArgumentObligation { .. })
+ {
+ // When the obligation error has been ensured to have been caused by
+ // an argument, the `obligation.cause.span` points at the expression
+ // of the argument, so we can provide a suggestion. Otherwise, we give
+ // a more general note.
+ err.span_suggestion_verbose(
+ obligation.cause.span.shrink_to_hi(),
+ &msg,
+ sugg,
+ Applicability::HasPlaceholders,
+ );
+ } else {
+ err.help(&format!("{}: `{}`", msg, snippet));
+ }
+ true
+ }
+
+ fn suggest_add_reference_to_arg(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ poly_trait_pred: ty::PolyTraitPredicate<'tcx>,
+ has_custom_message: bool,
+ ) -> bool {
+ let span = obligation.cause.span;
+
+ let code = if let ObligationCauseCode::FunctionArgumentObligation { parent_code, .. } =
+ obligation.cause.code()
+ {
+ &parent_code
+ } else if let ExpnKind::Desugaring(DesugaringKind::ForLoop) =
+ span.ctxt().outer_expn_data().kind
+ {
+ obligation.cause.code()
+ } else {
+ return false;
+ };
+
+ // List of traits for which it would be nonsensical to suggest borrowing.
+ // For instance, immutable references are always Copy, so suggesting to
+ // borrow would always succeed, but it's probably not what the user wanted.
+ let mut never_suggest_borrow: Vec<_> =
+ [LangItem::Copy, LangItem::Clone, LangItem::Unpin, LangItem::Sized]
+ .iter()
+ .filter_map(|lang_item| self.tcx.lang_items().require(*lang_item).ok())
+ .collect();
+
+ if let Some(def_id) = self.tcx.get_diagnostic_item(sym::Send) {
+ never_suggest_borrow.push(def_id);
+ }
+
+ let param_env = obligation.param_env;
+
+ // Try to apply the original trait binding obligation by borrowing.
+ let mut try_borrowing =
+ |old_pred: ty::PolyTraitPredicate<'tcx>, blacklist: &[DefId]| -> bool {
+ if blacklist.contains(&old_pred.def_id()) {
+ return false;
+ }
+ // We map bounds to `&T` and `&mut T`
+ let trait_pred_and_imm_ref = old_pred.map_bound(|trait_pred| {
+ (
+ trait_pred,
+ self.tcx.mk_imm_ref(self.tcx.lifetimes.re_static, trait_pred.self_ty()),
+ )
+ });
+ let trait_pred_and_mut_ref = old_pred.map_bound(|trait_pred| {
+ (
+ trait_pred,
+ self.tcx.mk_mut_ref(self.tcx.lifetimes.re_static, trait_pred.self_ty()),
+ )
+ });
+
+ let mk_result = |trait_pred_and_new_ty| {
+ let obligation =
+ self.mk_trait_obligation_with_new_self_ty(param_env, trait_pred_and_new_ty);
+ self.predicate_must_hold_modulo_regions(&obligation)
+ };
+ let imm_result = mk_result(trait_pred_and_imm_ref);
+ let mut_result = mk_result(trait_pred_and_mut_ref);
+
+ if imm_result || mut_result {
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ // We have a very specific type of error, where just borrowing this argument
+ // might solve the problem. In cases like this, the important part is the
+ // original type obligation, not the last one that failed, which is arbitrary.
+ // Because of this, we modify the error to refer to the original obligation and
+ // return early in the caller.
+
+ let msg = format!("the trait bound `{}` is not satisfied", old_pred);
+ if has_custom_message {
+ err.note(&msg);
+ } else {
+ err.message =
+ vec![(rustc_errors::DiagnosticMessage::Str(msg), Style::NoStyle)];
+ }
+ if snippet.starts_with('&') {
+ // This is already a literal borrow and the obligation is failing
+ // somewhere else in the obligation chain. Do not suggest non-sense.
+ return false;
+ }
+ err.span_label(
+ span,
+ &format!(
+ "expected an implementor of trait `{}`",
+ old_pred.print_modifiers_and_trait_path(),
+ ),
+ );
+
+ // This if is to prevent a special edge-case
+ if matches!(
+ span.ctxt().outer_expn_data().kind,
+ ExpnKind::Root | ExpnKind::Desugaring(DesugaringKind::ForLoop)
+ ) {
+ // We don't want a borrowing suggestion on the fields in structs,
+ // ```
+ // struct Foo {
+ // the_foos: Vec<Foo>
+ // }
+ // ```
+
+ if imm_result && mut_result {
+ err.span_suggestions(
+ span.shrink_to_lo(),
+ "consider borrowing here",
+ ["&".to_string(), "&mut ".to_string()].into_iter(),
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ &format!(
+ "consider{} borrowing here",
+ if mut_result { " mutably" } else { "" }
+ ),
+ format!("&{}", if mut_result { "mut " } else { "" }),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ return true;
+ }
+ }
+ return false;
+ };
+
+ if let ObligationCauseCode::ImplDerivedObligation(cause) = &*code {
+ try_borrowing(cause.derived.parent_trait_pred, &[])
+ } else if let ObligationCauseCode::BindingObligation(_, _)
+ | ObligationCauseCode::ItemObligation(_) = code
+ {
+ try_borrowing(poly_trait_pred, &never_suggest_borrow)
+ } else {
+ false
+ }
+ }
+
+ // Suggest borrowing the type
+ fn suggest_borrowing_for_object_cast(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ self_ty: Ty<'tcx>,
+ object_ty: Ty<'tcx>,
+ ) {
+ let ty::Dynamic(predicates, _) = object_ty.kind() else { return; };
+ let self_ref_ty = self.tcx.mk_imm_ref(self.tcx.lifetimes.re_erased, self_ty);
+
+ for predicate in predicates.iter() {
+ if !self.predicate_must_hold_modulo_regions(
+ &obligation.with(predicate.with_self_ty(self.tcx, self_ref_ty)),
+ ) {
+ return;
+ }
+ }
+
+ err.span_suggestion(
+ obligation.cause.span.shrink_to_lo(),
+ &format!(
+ "consider borrowing the value, since `&{self_ty}` can be coerced into `{object_ty}`"
+ ),
+ "&",
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ /// Whenever references are used by mistake, like `for (i, e) in &vec.iter().enumerate()`,
+ /// suggest removing these references until we reach a type that implements the trait.
+ fn suggest_remove_reference(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool {
+ let span = obligation.cause.span;
+
+ let mut suggested = false;
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ let refs_number =
+ snippet.chars().filter(|c| !c.is_whitespace()).take_while(|c| *c == '&').count();
+ if let Some('\'') = snippet.chars().filter(|c| !c.is_whitespace()).nth(refs_number) {
+ // Do not suggest removal of borrow from type arguments.
+ return false;
+ }
+
+ // Skipping binder here, remapping below
+ let mut suggested_ty = trait_pred.self_ty().skip_binder();
+
+ for refs_remaining in 0..refs_number {
+ let ty::Ref(_, inner_ty, _) = suggested_ty.kind() else {
+ break;
+ };
+ suggested_ty = *inner_ty;
+
+ // Remapping bound vars here
+ let trait_pred_and_suggested_ty =
+ trait_pred.map_bound(|trait_pred| (trait_pred, suggested_ty));
+
+ let new_obligation = self.mk_trait_obligation_with_new_self_ty(
+ obligation.param_env,
+ trait_pred_and_suggested_ty,
+ );
+
+ if self.predicate_may_hold(&new_obligation) {
+ let sp = self
+ .tcx
+ .sess
+ .source_map()
+ .span_take_while(span, |c| c.is_whitespace() || *c == '&');
+
+ let remove_refs = refs_remaining + 1;
+
+ let msg = if remove_refs == 1 {
+ "consider removing the leading `&`-reference".to_string()
+ } else {
+ format!("consider removing {} leading `&`-references", remove_refs)
+ };
+
+ err.span_suggestion_short(sp, &msg, "", Applicability::MachineApplicable);
+ suggested = true;
+ break;
+ }
+ }
+ }
+ suggested
+ }
+
+ fn suggest_remove_await(&self, obligation: &PredicateObligation<'tcx>, err: &mut Diagnostic) {
+ let span = obligation.cause.span;
+
+ if let ObligationCauseCode::AwaitableExpr(hir_id) = obligation.cause.code().peel_derives() {
+ let hir = self.tcx.hir();
+ if let Some(node) = hir_id.and_then(|hir_id| hir.find(hir_id)) {
+ if let hir::Node::Expr(expr) = node {
+ // FIXME: use `obligation.predicate.kind()...trait_ref.self_ty()` to see if we have `()`
+ // and if not maybe suggest doing something else? If we kept the expression around we
+ // could also check if it is an fn call (very likely) and suggest changing *that*, if
+ // it is from the local crate.
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi().with_hi(span.hi()),
+ "remove the `.await`",
+ "",
+ Applicability::MachineApplicable,
+ );
+ // FIXME: account for associated `async fn`s.
+ if let hir::Expr { span, kind: hir::ExprKind::Call(base, _), .. } = expr {
+ if let ty::PredicateKind::Trait(pred) =
+ obligation.predicate.kind().skip_binder()
+ {
+ err.span_label(
+ *span,
+ &format!("this call returns `{}`", pred.self_ty()),
+ );
+ }
+ if let Some(typeck_results) =
+ self.in_progress_typeck_results.map(|t| t.borrow())
+ && let ty = typeck_results.expr_ty_adjusted(base)
+ && let ty::FnDef(def_id, _substs) = ty.kind()
+ && let Some(hir::Node::Item(hir::Item { ident, span, vis_span, .. })) =
+ hir.get_if_local(*def_id)
+ {
+ let msg = format!(
+ "alternatively, consider making `fn {}` asynchronous",
+ ident
+ );
+ if vis_span.is_empty() {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ &msg,
+ "async ",
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_suggestion_verbose(
+ vis_span.shrink_to_hi(),
+ &msg,
+ " async",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// Check if the trait bound is implemented for a different mutability and note it in the
+ /// final error.
+ fn suggest_change_mut(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) {
+ let points_at_arg = matches!(
+ obligation.cause.code(),
+ ObligationCauseCode::FunctionArgumentObligation { .. },
+ );
+
+ let span = obligation.cause.span;
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ let refs_number =
+ snippet.chars().filter(|c| !c.is_whitespace()).take_while(|c| *c == '&').count();
+ if let Some('\'') = snippet.chars().filter(|c| !c.is_whitespace()).nth(refs_number) {
+ // Do not suggest removal of borrow from type arguments.
+ return;
+ }
+ let trait_pred = self.resolve_vars_if_possible(trait_pred);
+ if trait_pred.has_infer_types_or_consts() {
+ // Do not ICE while trying to find if a reborrow would succeed on a trait with
+ // unresolved bindings.
+ return;
+ }
+
+ // Skipping binder here, remapping below
+ if let ty::Ref(region, t_type, mutability) = *trait_pred.skip_binder().self_ty().kind()
+ {
+ let suggested_ty = match mutability {
+ hir::Mutability::Mut => self.tcx.mk_imm_ref(region, t_type),
+ hir::Mutability::Not => self.tcx.mk_mut_ref(region, t_type),
+ };
+
+ // Remapping bound vars here
+ let trait_pred_and_suggested_ty =
+ trait_pred.map_bound(|trait_pred| (trait_pred, suggested_ty));
+
+ let new_obligation = self.mk_trait_obligation_with_new_self_ty(
+ obligation.param_env,
+ trait_pred_and_suggested_ty,
+ );
+ let suggested_ty_would_satisfy_obligation = self
+ .evaluate_obligation_no_overflow(&new_obligation)
+ .must_apply_modulo_regions();
+ if suggested_ty_would_satisfy_obligation {
+ let sp = self
+ .tcx
+ .sess
+ .source_map()
+ .span_take_while(span, |c| c.is_whitespace() || *c == '&');
+ if points_at_arg && mutability == hir::Mutability::Not && refs_number > 0 {
+ err.span_suggestion_verbose(
+ sp,
+ "consider changing this borrow's mutability",
+ "&mut ",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.note(&format!(
+ "`{}` is implemented for `{:?}`, but not for `{:?}`",
+ trait_pred.print_modifiers_and_trait_path(),
+ suggested_ty,
+ trait_pred.skip_binder().self_ty(),
+ ));
+ }
+ }
+ }
+ }
+ }
+
+ fn suggest_semicolon_removal(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ span: Span,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool {
+ let hir = self.tcx.hir();
+ let parent_node = hir.get_parent_node(obligation.cause.body_id);
+ let node = hir.find(parent_node);
+ if let Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, _, body_id), .. })) = node
+ && let hir::ExprKind::Block(blk, _) = &hir.body(*body_id).value.kind
+ && sig.decl.output.span().overlaps(span)
+ && blk.expr.is_none()
+ && trait_pred.self_ty().skip_binder().is_unit()
+ && let Some(stmt) = blk.stmts.last()
+ && let hir::StmtKind::Semi(expr) = stmt.kind
+ // Only suggest this if the expression behind the semicolon implements the predicate
+ && let Some(typeck_results) = self.in_progress_typeck_results
+ && let Some(ty) = typeck_results.borrow().expr_ty_opt(expr)
+ && self.predicate_may_hold(&self.mk_trait_obligation_with_new_self_ty(
+ obligation.param_env, trait_pred.map_bound(|trait_pred| (trait_pred, ty))
+ ))
+ {
+ err.span_label(
+ expr.span,
+ &format!(
+ "this expression has type `{}`, which implements `{}`",
+ ty,
+ trait_pred.print_modifiers_and_trait_path()
+ )
+ );
+ err.span_suggestion(
+ self.tcx.sess.source_map().end_point(stmt.span),
+ "remove this semicolon",
+ "",
+ Applicability::MachineApplicable
+ );
+ return true;
+ }
+ false
+ }
+
+ fn return_type_span(&self, obligation: &PredicateObligation<'tcx>) -> Option<Span> {
+ let hir = self.tcx.hir();
+ let parent_node = hir.get_parent_node(obligation.cause.body_id);
+ let Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(sig, ..), .. })) = hir.find(parent_node) else {
+ return None;
+ };
+
+ if let hir::FnRetTy::Return(ret_ty) = sig.decl.output { Some(ret_ty.span) } else { None }
+ }
+
+ /// If all conditions are met to identify a returned `dyn Trait`, suggest using `impl Trait` if
+ /// applicable and signal that the error has been expanded appropriately and needs to be
+ /// emitted.
+ fn suggest_impl_trait(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ obligation: &PredicateObligation<'tcx>,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> bool {
+ match obligation.cause.code().peel_derives() {
+ // Only suggest `impl Trait` if the return type is unsized because it is `dyn Trait`.
+ ObligationCauseCode::SizedReturnType => {}
+ _ => return false,
+ }
+
+ let hir = self.tcx.hir();
+ let fn_hir_id = hir.get_parent_node(obligation.cause.body_id);
+ let node = hir.find(fn_hir_id);
+ let Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Fn(sig, _, body_id),
+ ..
+ })) = node
+ else {
+ return false;
+ };
+ let body = hir.body(*body_id);
+ let trait_pred = self.resolve_vars_if_possible(trait_pred);
+ let ty = trait_pred.skip_binder().self_ty();
+ let is_object_safe = match ty.kind() {
+ ty::Dynamic(predicates, _) => {
+ // If the `dyn Trait` is not object safe, do not suggest `Box<dyn Trait>`.
+ predicates
+ .principal_def_id()
+ .map_or(true, |def_id| self.tcx.object_safety_violations(def_id).is_empty())
+ }
+ // We only want to suggest `impl Trait` to `dyn Trait`s.
+ // For example, `fn foo() -> str` needs to be filtered out.
+ _ => return false,
+ };
+
+ let hir::FnRetTy::Return(ret_ty) = sig.decl.output else {
+ return false;
+ };
+
+ // Use `TypeVisitor` instead of the output type directly to find the span of `ty` for
+ // cases like `fn foo() -> (dyn Trait, i32) {}`.
+ // Recursively look for `TraitObject` types and if there's only one, use that span to
+ // suggest `impl Trait`.
+
+ // Visit to make sure there's a single `return` type to suggest `impl Trait`,
+ // otherwise suggest using `Box<dyn Trait>` or an enum.
+ let mut visitor = ReturnsVisitor::default();
+ visitor.visit_body(&body);
+
+ let typeck_results = self.in_progress_typeck_results.map(|t| t.borrow()).unwrap();
+ let Some(liberated_sig) = typeck_results.liberated_fn_sigs().get(fn_hir_id).copied() else { return false; };
+
+ let ret_types = visitor
+ .returns
+ .iter()
+ .filter_map(|expr| Some((expr.span, typeck_results.node_type_opt(expr.hir_id)?)))
+ .map(|(expr_span, ty)| (expr_span, self.resolve_vars_if_possible(ty)));
+ let (last_ty, all_returns_have_same_type, only_never_return) = ret_types.clone().fold(
+ (None, true, true),
+ |(last_ty, mut same, only_never_return): (std::option::Option<Ty<'_>>, bool, bool),
+ (_, ty)| {
+ let ty = self.resolve_vars_if_possible(ty);
+ same &=
+ !matches!(ty.kind(), ty::Error(_))
+ && last_ty.map_or(true, |last_ty| {
+ // FIXME: ideally we would use `can_coerce` here instead, but `typeck` comes
+ // *after* in the dependency graph.
+ match (ty.kind(), last_ty.kind()) {
+ (Infer(InferTy::IntVar(_)), Infer(InferTy::IntVar(_)))
+ | (Infer(InferTy::FloatVar(_)), Infer(InferTy::FloatVar(_)))
+ | (Infer(InferTy::FreshIntTy(_)), Infer(InferTy::FreshIntTy(_)))
+ | (
+ Infer(InferTy::FreshFloatTy(_)),
+ Infer(InferTy::FreshFloatTy(_)),
+ ) => true,
+ _ => ty == last_ty,
+ }
+ });
+ (Some(ty), same, only_never_return && matches!(ty.kind(), ty::Never))
+ },
+ );
+ let mut spans_and_needs_box = vec![];
+
+ match liberated_sig.output().kind() {
+ ty::Dynamic(predicates, _) => {
+ let cause = ObligationCause::misc(ret_ty.span, fn_hir_id);
+ let param_env = ty::ParamEnv::empty();
+
+ if !only_never_return {
+ for (expr_span, return_ty) in ret_types {
+ let self_ty_satisfies_dyn_predicates = |self_ty| {
+ predicates.iter().all(|predicate| {
+ let pred = predicate.with_self_ty(self.tcx, self_ty);
+ let obl = Obligation::new(cause.clone(), param_env, pred);
+ self.predicate_may_hold(&obl)
+ })
+ };
+
+ if let ty::Adt(def, substs) = return_ty.kind()
+ && def.is_box()
+ && self_ty_satisfies_dyn_predicates(substs.type_at(0))
+ {
+ spans_and_needs_box.push((expr_span, false));
+ } else if self_ty_satisfies_dyn_predicates(return_ty) {
+ spans_and_needs_box.push((expr_span, true));
+ } else {
+ return false;
+ }
+ }
+ }
+ }
+ _ => return false,
+ };
+
+ let sm = self.tcx.sess.source_map();
+ if !ret_ty.span.overlaps(span) {
+ return false;
+ }
+ let snippet = if let hir::TyKind::TraitObject(..) = ret_ty.kind {
+ if let Ok(snippet) = sm.span_to_snippet(ret_ty.span) {
+ snippet
+ } else {
+ return false;
+ }
+ } else {
+ // Substitute the type, so we can print a fixup given `type Alias = dyn Trait`
+ let name = liberated_sig.output().to_string();
+ let name =
+ name.strip_prefix('(').and_then(|name| name.strip_suffix(')')).unwrap_or(&name);
+ if !name.starts_with("dyn ") {
+ return false;
+ }
+ name.to_owned()
+ };
+
+ err.code(error_code!(E0746));
+ err.set_primary_message("return type cannot have an unboxed trait object");
+ err.children.clear();
+ let impl_trait_msg = "for information on `impl Trait`, see \
+ <https://doc.rust-lang.org/book/ch10-02-traits.html\
+ #returning-types-that-implement-traits>";
+ let trait_obj_msg = "for information on trait objects, see \
+ <https://doc.rust-lang.org/book/ch17-02-trait-objects.html\
+ #using-trait-objects-that-allow-for-values-of-different-types>";
+
+ let has_dyn = snippet.split_whitespace().next().map_or(false, |s| s == "dyn");
+ let trait_obj = if has_dyn { &snippet[4..] } else { &snippet };
+ if only_never_return {
+ // No return paths, probably using `panic!()` or similar.
+ // Suggest `-> T`, `-> impl Trait`, and if `Trait` is object safe, `-> Box<dyn Trait>`.
+ suggest_trait_object_return_type_alternatives(
+ err,
+ ret_ty.span,
+ trait_obj,
+ is_object_safe,
+ );
+ } else if let (Some(last_ty), true) = (last_ty, all_returns_have_same_type) {
+ // Suggest `-> impl Trait`.
+ err.span_suggestion(
+ ret_ty.span,
+ &format!(
+ "use `impl {1}` as the return type, as all return paths are of type `{}`, \
+ which implements `{1}`",
+ last_ty, trait_obj,
+ ),
+ format!("impl {}", trait_obj),
+ Applicability::MachineApplicable,
+ );
+ err.note(impl_trait_msg);
+ } else {
+ if is_object_safe {
+ // Suggest `-> Box<dyn Trait>` and `Box::new(returned_value)`.
+ err.multipart_suggestion(
+ "return a boxed trait object instead",
+ vec![
+ (ret_ty.span.shrink_to_lo(), "Box<".to_string()),
+ (span.shrink_to_hi(), ">".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ for (span, needs_box) in spans_and_needs_box {
+ if needs_box {
+ err.multipart_suggestion(
+ "... and box this value",
+ vec![
+ (span.shrink_to_lo(), "Box::new(".to_string()),
+ (span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ } else {
+ // This is currently not possible to trigger because E0038 takes precedence, but
+ // leave it in for completeness in case anything changes in an earlier stage.
+ err.note(&format!(
+ "if trait `{}` were object-safe, you could return a trait object",
+ trait_obj,
+ ));
+ }
+ err.note(trait_obj_msg);
+ err.note(&format!(
+ "if all the returned values were of the same type you could use `impl {}` as the \
+ return type",
+ trait_obj,
+ ));
+ err.note(impl_trait_msg);
+ err.note("you can create a new `enum` with a variant for each returned type");
+ }
+ true
+ }
+
+ fn point_at_returns_when_relevant(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ ) {
+ match obligation.cause.code().peel_derives() {
+ ObligationCauseCode::SizedReturnType => {}
+ _ => return,
+ }
+
+ let hir = self.tcx.hir();
+ let parent_node = hir.get_parent_node(obligation.cause.body_id);
+ let node = hir.find(parent_node);
+ if let Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, _, body_id), .. })) =
+ node
+ {
+ let body = hir.body(*body_id);
+ // Point at all the `return`s in the function as they have failed trait bounds.
+ let mut visitor = ReturnsVisitor::default();
+ visitor.visit_body(&body);
+ let typeck_results = self.in_progress_typeck_results.map(|t| t.borrow()).unwrap();
+ for expr in &visitor.returns {
+ if let Some(returned_ty) = typeck_results.node_type_opt(expr.hir_id) {
+ let ty = self.resolve_vars_if_possible(returned_ty);
+ err.span_label(expr.span, &format!("this returned value is of type `{}`", ty));
+ }
+ }
+ }
+ }
+
+ fn report_closure_arg_mismatch(
+ &self,
+ span: Span,
+ found_span: Option<Span>,
+ found: ty::PolyTraitRef<'tcx>,
+ expected: ty::PolyTraitRef<'tcx>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ pub(crate) fn build_fn_sig_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Ty<'tcx> {
+ let inputs = trait_ref.skip_binder().substs.type_at(1);
+ let sig = match inputs.kind() {
+ ty::Tuple(inputs)
+ if tcx.fn_trait_kind_from_lang_item(trait_ref.def_id()).is_some() =>
+ {
+ tcx.mk_fn_sig(
+ inputs.iter(),
+ tcx.mk_ty_infer(ty::TyVar(ty::TyVid::from_u32(0))),
+ false,
+ hir::Unsafety::Normal,
+ abi::Abi::Rust,
+ )
+ }
+ _ => tcx.mk_fn_sig(
+ std::iter::once(inputs),
+ tcx.mk_ty_infer(ty::TyVar(ty::TyVid::from_u32(0))),
+ false,
+ hir::Unsafety::Normal,
+ abi::Abi::Rust,
+ ),
+ };
+
+ tcx.mk_fn_ptr(trait_ref.rebind(sig))
+ }
+
+ let argument_kind = match expected.skip_binder().self_ty().kind() {
+ ty::Closure(..) => "closure",
+ ty::Generator(..) => "generator",
+ _ => "function",
+ };
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0631,
+ "type mismatch in {argument_kind} arguments",
+ );
+
+ err.span_label(span, "expected due to this");
+
+ let found_span = found_span.unwrap_or(span);
+ err.span_label(found_span, "found signature defined here");
+
+ let expected = build_fn_sig_ty(self.tcx, expected);
+ let found = build_fn_sig_ty(self.tcx, found);
+
+ let (expected_str, found_str) =
+ self.tcx.infer_ctxt().enter(|infcx| infcx.cmp(expected, found));
+
+ let signature_kind = format!("{argument_kind} signature");
+ err.note_expected_found(&signature_kind, expected_str, &signature_kind, found_str);
+
+ err
+ }
+
+ fn suggest_fully_qualified_path(
+ &self,
+ err: &mut Diagnostic,
+ item_def_id: DefId,
+ span: Span,
+ trait_ref: DefId,
+ ) {
+ if let Some(assoc_item) = self.tcx.opt_associated_item(item_def_id) {
+ if let ty::AssocKind::Const | ty::AssocKind::Type = assoc_item.kind {
+ err.note(&format!(
+ "{}s cannot be accessed directly on a `trait`, they can only be \
+ accessed through a specific `impl`",
+ assoc_item.kind.as_def_kind().descr(item_def_id)
+ ));
+ err.span_suggestion(
+ span,
+ "use the fully qualified path to an implementation",
+ format!("<Type as {}>::{}", self.tcx.def_path_str(trait_ref), assoc_item.name),
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ }
+
+ /// Adds an async-await specific note to the diagnostic when the future does not implement
+ /// an auto trait because of a captured type.
+ ///
+ /// ```text
+ /// note: future does not implement `Qux` as this value is used across an await
+ /// --> $DIR/issue-64130-3-other.rs:17:5
+ /// |
+ /// LL | let x = Foo;
+ /// | - has type `Foo`
+ /// LL | baz().await;
+ /// | ^^^^^^^^^^^ await occurs here, with `x` maybe used later
+ /// LL | }
+ /// | - `x` is later dropped here
+ /// ```
+ ///
+ /// When the diagnostic does not implement `Send` or `Sync` specifically, then the diagnostic
+ /// is "replaced" with a different message and a more specific error.
+ ///
+ /// ```text
+ /// error: future cannot be sent between threads safely
+ /// --> $DIR/issue-64130-2-send.rs:21:5
+ /// |
+ /// LL | fn is_send<T: Send>(t: T) { }
+ /// | ---- required by this bound in `is_send`
+ /// ...
+ /// LL | is_send(bar());
+ /// | ^^^^^^^ future returned by `bar` is not send
+ /// |
+ /// = help: within `impl std::future::Future`, the trait `std::marker::Send` is not
+ /// implemented for `Foo`
+ /// note: future is not send as this value is used across an await
+ /// --> $DIR/issue-64130-2-send.rs:15:5
+ /// |
+ /// LL | let x = Foo;
+ /// | - has type `Foo`
+ /// LL | baz().await;
+ /// | ^^^^^^^^^^^ await occurs here, with `x` maybe used later
+ /// LL | }
+ /// | - `x` is later dropped here
+ /// ```
+ ///
+ /// Returns `true` if an async-await specific note was added to the diagnostic.
+ #[instrument(level = "debug", skip_all, fields(?obligation.predicate, ?obligation.cause.span))]
+ fn maybe_note_obligation_cause_for_async_await(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> bool {
+ let hir = self.tcx.hir();
+
+ // Attempt to detect an async-await error by looking at the obligation causes, looking
+ // for a generator to be present.
+ //
+ // When a future does not implement a trait because of a captured type in one of the
+ // generators somewhere in the call stack, then the result is a chain of obligations.
+ //
+ // Given an `async fn` A that calls an `async fn` B which captures a non-send type and that
+ // future is passed as an argument to a function C which requires a `Send` type, then the
+ // chain looks something like this:
+ //
+ // - `BuiltinDerivedObligation` with a generator witness (B)
+ // - `BuiltinDerivedObligation` with a generator (B)
+ // - `BuiltinDerivedObligation` with `std::future::GenFuture` (B)
+ // - `BuiltinDerivedObligation` with `impl std::future::Future` (B)
+ // - `BuiltinDerivedObligation` with `impl std::future::Future` (B)
+ // - `BuiltinDerivedObligation` with a generator witness (A)
+ // - `BuiltinDerivedObligation` with a generator (A)
+ // - `BuiltinDerivedObligation` with `std::future::GenFuture` (A)
+ // - `BuiltinDerivedObligation` with `impl std::future::Future` (A)
+ // - `BuiltinDerivedObligation` with `impl std::future::Future` (A)
+ // - `BindingObligation` with `impl_send (Send requirement)
+ //
+ // The first obligation in the chain is the most useful and has the generator that captured
+ // the type. The last generator (`outer_generator` below) has information about where the
+ // bound was introduced. At least one generator should be present for this diagnostic to be
+ // modified.
+ let (mut trait_ref, mut target_ty) = match obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(p) => (Some(p), Some(p.self_ty())),
+ _ => (None, None),
+ };
+ let mut generator = None;
+ let mut outer_generator = None;
+ let mut next_code = Some(obligation.cause.code());
+
+ let mut seen_upvar_tys_infer_tuple = false;
+
+ while let Some(code) = next_code {
+ debug!(?code);
+ match code {
+ ObligationCauseCode::FunctionArgumentObligation { parent_code, .. } => {
+ next_code = Some(parent_code);
+ }
+ ObligationCauseCode::ImplDerivedObligation(cause) => {
+ let ty = cause.derived.parent_trait_pred.skip_binder().self_ty();
+ debug!(
+ parent_trait_ref = ?cause.derived.parent_trait_pred,
+ self_ty.kind = ?ty.kind(),
+ "ImplDerived",
+ );
+
+ match *ty.kind() {
+ ty::Generator(did, ..) => {
+ generator = generator.or(Some(did));
+ outer_generator = Some(did);
+ }
+ ty::GeneratorWitness(..) => {}
+ ty::Tuple(_) if !seen_upvar_tys_infer_tuple => {
+ // By introducing a tuple of upvar types into the chain of obligations
+ // of a generator, the first non-generator item is now the tuple itself,
+ // we shall ignore this.
+
+ seen_upvar_tys_infer_tuple = true;
+ }
+ _ if generator.is_none() => {
+ trait_ref = Some(cause.derived.parent_trait_pred.skip_binder());
+ target_ty = Some(ty);
+ }
+ _ => {}
+ }
+
+ next_code = Some(&cause.derived.parent_code);
+ }
+ ObligationCauseCode::DerivedObligation(derived_obligation)
+ | ObligationCauseCode::BuiltinDerivedObligation(derived_obligation) => {
+ let ty = derived_obligation.parent_trait_pred.skip_binder().self_ty();
+ debug!(
+ parent_trait_ref = ?derived_obligation.parent_trait_pred,
+ self_ty.kind = ?ty.kind(),
+ );
+
+ match *ty.kind() {
+ ty::Generator(did, ..) => {
+ generator = generator.or(Some(did));
+ outer_generator = Some(did);
+ }
+ ty::GeneratorWitness(..) => {}
+ ty::Tuple(_) if !seen_upvar_tys_infer_tuple => {
+ // By introducing a tuple of upvar types into the chain of obligations
+ // of a generator, the first non-generator item is now the tuple itself,
+ // we shall ignore this.
+
+ seen_upvar_tys_infer_tuple = true;
+ }
+ _ if generator.is_none() => {
+ trait_ref = Some(derived_obligation.parent_trait_pred.skip_binder());
+ target_ty = Some(ty);
+ }
+ _ => {}
+ }
+
+ next_code = Some(&derived_obligation.parent_code);
+ }
+ _ => break,
+ }
+ }
+
+ // Only continue if a generator was found.
+ debug!(?generator, ?trait_ref, ?target_ty);
+ let (Some(generator_did), Some(trait_ref), Some(target_ty)) = (generator, trait_ref, target_ty) else {
+ return false;
+ };
+
+ let span = self.tcx.def_span(generator_did);
+
+ let in_progress_typeck_results = self.in_progress_typeck_results.map(|t| t.borrow());
+ let generator_did_root = self.tcx.typeck_root_def_id(generator_did);
+ debug!(
+ ?generator_did,
+ ?generator_did_root,
+ in_progress_typeck_results.hir_owner = ?in_progress_typeck_results.as_ref().map(|t| t.hir_owner),
+ ?span,
+ );
+
+ let generator_body = generator_did
+ .as_local()
+ .and_then(|def_id| hir.maybe_body_owned_by(def_id))
+ .map(|body_id| hir.body(body_id));
+ let is_async = match generator_did.as_local() {
+ Some(_) => generator_body
+ .and_then(|body| body.generator_kind())
+ .map(|generator_kind| matches!(generator_kind, hir::GeneratorKind::Async(..)))
+ .unwrap_or(false),
+ None => self
+ .tcx
+ .generator_kind(generator_did)
+ .map(|generator_kind| matches!(generator_kind, hir::GeneratorKind::Async(..)))
+ .unwrap_or(false),
+ };
+ let mut visitor = AwaitsVisitor::default();
+ if let Some(body) = generator_body {
+ visitor.visit_body(body);
+ }
+ debug!(awaits = ?visitor.awaits);
+
+ // Look for a type inside the generator interior that matches the target type to get
+ // a span.
+ let target_ty_erased = self.tcx.erase_regions(target_ty);
+ let ty_matches = |ty| -> bool {
+ // Careful: the regions for types that appear in the
+ // generator interior are not generally known, so we
+ // want to erase them when comparing (and anyway,
+ // `Send` and other bounds are generally unaffected by
+ // the choice of region). When erasing regions, we
+ // also have to erase late-bound regions. This is
+ // because the types that appear in the generator
+ // interior generally contain "bound regions" to
+ // represent regions that are part of the suspended
+ // generator frame. Bound regions are preserved by
+ // `erase_regions` and so we must also call
+ // `erase_late_bound_regions`.
+ let ty_erased = self.tcx.erase_late_bound_regions(ty);
+ let ty_erased = self.tcx.erase_regions(ty_erased);
+ let eq = ty_erased == target_ty_erased;
+ debug!(?ty_erased, ?target_ty_erased, ?eq);
+ eq
+ };
+
+ let mut interior_or_upvar_span = None;
+ let mut interior_extra_info = None;
+
+ // Get the typeck results from the infcx if the generator is the function we are currently
+ // type-checking; otherwise, get them by performing a query. This is needed to avoid
+ // cycles. If we can't use resolved types because the generator comes from another crate,
+ // we still provide a targeted error but without all the relevant spans.
+ let generator_data: Option<GeneratorData<'tcx, '_>> = match &in_progress_typeck_results {
+ Some(t) if t.hir_owner.to_def_id() == generator_did_root => {
+ Some(GeneratorData::Local(&t))
+ }
+ _ if generator_did.is_local() => {
+ Some(GeneratorData::Local(self.tcx.typeck(generator_did.expect_local())))
+ }
+ _ => self
+ .tcx
+ .generator_diagnostic_data(generator_did)
+ .as_ref()
+ .map(|generator_diag_data| GeneratorData::Foreign(generator_diag_data)),
+ };
+
+ if let Some(generator_data) = generator_data.as_ref() {
+ interior_or_upvar_span =
+ generator_data.try_get_upvar_span(&self, generator_did, ty_matches);
+
+ // The generator interior types share the same binders
+ if let Some(cause) =
+ generator_data.get_generator_interior_types().skip_binder().iter().find(
+ |ty::GeneratorInteriorTypeCause { ty, .. }| {
+ ty_matches(generator_data.get_generator_interior_types().rebind(*ty))
+ },
+ )
+ {
+ let from_awaited_ty = generator_data.get_from_await_ty(visitor, hir, ty_matches);
+ let ty::GeneratorInteriorTypeCause { span, scope_span, yield_span, expr, .. } =
+ cause;
+
+ interior_or_upvar_span = Some(GeneratorInteriorOrUpvar::Interior(*span));
+ interior_extra_info = Some((*scope_span, *yield_span, *expr, from_awaited_ty));
+ }
+
+ if interior_or_upvar_span.is_none() && generator_data.is_foreign() {
+ interior_or_upvar_span = Some(GeneratorInteriorOrUpvar::Interior(span));
+ }
+ }
+
+ if let Some(interior_or_upvar_span) = interior_or_upvar_span {
+ let typeck_results = generator_data.and_then(|generator_data| match generator_data {
+ GeneratorData::Local(typeck_results) => Some(typeck_results),
+ GeneratorData::Foreign(_) => None,
+ });
+ self.note_obligation_cause_for_async_await(
+ err,
+ interior_or_upvar_span,
+ interior_extra_info,
+ is_async,
+ outer_generator,
+ trait_ref,
+ target_ty,
+ typeck_results,
+ obligation,
+ next_code,
+ );
+ true
+ } else {
+ false
+ }
+ }
+
+ /// Unconditionally adds the diagnostic note described in
+ /// `maybe_note_obligation_cause_for_async_await`'s documentation comment.
+ #[instrument(level = "debug", skip_all)]
+ fn note_obligation_cause_for_async_await(
+ &self,
+ err: &mut Diagnostic,
+ interior_or_upvar_span: GeneratorInteriorOrUpvar,
+ interior_extra_info: Option<(Option<Span>, Span, Option<hir::HirId>, Option<Span>)>,
+ is_async: bool,
+ outer_generator: Option<DefId>,
+ trait_pred: ty::TraitPredicate<'tcx>,
+ target_ty: Ty<'tcx>,
+ typeck_results: Option<&ty::TypeckResults<'tcx>>,
+ obligation: &PredicateObligation<'tcx>,
+ next_code: Option<&ObligationCauseCode<'tcx>>,
+ ) {
+ let source_map = self.tcx.sess.source_map();
+
+ let (await_or_yield, an_await_or_yield) =
+ if is_async { ("await", "an await") } else { ("yield", "a yield") };
+ let future_or_generator = if is_async { "future" } else { "generator" };
+
+ // Special case the primary error message when send or sync is the trait that was
+ // not implemented.
+ let hir = self.tcx.hir();
+ let trait_explanation = if let Some(name @ (sym::Send | sym::Sync)) =
+ self.tcx.get_diagnostic_name(trait_pred.def_id())
+ {
+ let (trait_name, trait_verb) =
+ if name == sym::Send { ("`Send`", "sent") } else { ("`Sync`", "shared") };
+
+ err.clear_code();
+ err.set_primary_message(format!(
+ "{} cannot be {} between threads safely",
+ future_or_generator, trait_verb
+ ));
+
+ let original_span = err.span.primary_span().unwrap();
+ let mut span = MultiSpan::from_span(original_span);
+
+ let message = outer_generator
+ .and_then(|generator_did| {
+ Some(match self.tcx.generator_kind(generator_did).unwrap() {
+ GeneratorKind::Gen => format!("generator is not {}", trait_name),
+ GeneratorKind::Async(AsyncGeneratorKind::Fn) => self
+ .tcx
+ .parent(generator_did)
+ .as_local()
+ .map(|parent_did| hir.local_def_id_to_hir_id(parent_did))
+ .and_then(|parent_hir_id| hir.opt_name(parent_hir_id))
+ .map(|name| {
+ format!("future returned by `{}` is not {}", name, trait_name)
+ })?,
+ GeneratorKind::Async(AsyncGeneratorKind::Block) => {
+ format!("future created by async block is not {}", trait_name)
+ }
+ GeneratorKind::Async(AsyncGeneratorKind::Closure) => {
+ format!("future created by async closure is not {}", trait_name)
+ }
+ })
+ })
+ .unwrap_or_else(|| format!("{} is not {}", future_or_generator, trait_name));
+
+ span.push_span_label(original_span, message);
+ err.set_span(span);
+
+ format!("is not {}", trait_name)
+ } else {
+ format!("does not implement `{}`", trait_pred.print_modifiers_and_trait_path())
+ };
+
+ let mut explain_yield = |interior_span: Span,
+ yield_span: Span,
+ scope_span: Option<Span>| {
+ let mut span = MultiSpan::from_span(yield_span);
+ if let Ok(snippet) = source_map.span_to_snippet(interior_span) {
+ // #70935: If snippet contains newlines, display "the value" instead
+ // so that we do not emit complex diagnostics.
+ let snippet = &format!("`{}`", snippet);
+ let snippet = if snippet.contains('\n') { "the value" } else { snippet };
+ // note: future is not `Send` as this value is used across an await
+ // --> $DIR/issue-70935-complex-spans.rs:13:9
+ // |
+ // LL | baz(|| async {
+ // | ______________-
+ // | |
+ // | |
+ // LL | | foo(tx.clone());
+ // LL | | }).await;
+ // | | - ^^^^^^ await occurs here, with value maybe used later
+ // | |__________|
+ // | has type `closure` which is not `Send`
+ // note: value is later dropped here
+ // LL | | }).await;
+ // | | ^
+ //
+ span.push_span_label(
+ yield_span,
+ format!("{} occurs here, with {} maybe used later", await_or_yield, snippet),
+ );
+ span.push_span_label(
+ interior_span,
+ format!("has type `{}` which {}", target_ty, trait_explanation),
+ );
+ // If available, use the scope span to annotate the drop location.
+ let mut scope_note = None;
+ if let Some(scope_span) = scope_span {
+ let scope_span = source_map.end_point(scope_span);
+
+ let msg = format!("{} is later dropped here", snippet);
+ if source_map.is_multiline(yield_span.between(scope_span)) {
+ span.push_span_label(scope_span, msg);
+ } else {
+ scope_note = Some((scope_span, msg));
+ }
+ }
+ err.span_note(
+ span,
+ &format!(
+ "{} {} as this value is used across {}",
+ future_or_generator, trait_explanation, an_await_or_yield
+ ),
+ );
+ if let Some((span, msg)) = scope_note {
+ err.span_note(span, &msg);
+ }
+ }
+ };
+ match interior_or_upvar_span {
+ GeneratorInteriorOrUpvar::Interior(interior_span) => {
+ if let Some((scope_span, yield_span, expr, from_awaited_ty)) = interior_extra_info {
+ if let Some(await_span) = from_awaited_ty {
+ // The type causing this obligation is one being awaited at await_span.
+ let mut span = MultiSpan::from_span(await_span);
+ span.push_span_label(
+ await_span,
+ format!(
+ "await occurs here on type `{}`, which {}",
+ target_ty, trait_explanation
+ ),
+ );
+ err.span_note(
+ span,
+ &format!(
+ "future {not_trait} as it awaits another future which {not_trait}",
+ not_trait = trait_explanation
+ ),
+ );
+ } else {
+ // Look at the last interior type to get a span for the `.await`.
+ debug!(
+ generator_interior_types = ?format_args!(
+ "{:#?}", typeck_results.as_ref().map(|t| &t.generator_interior_types)
+ ),
+ );
+ explain_yield(interior_span, yield_span, scope_span);
+ }
+
+ if let Some(expr_id) = expr {
+ let expr = hir.expect_expr(expr_id);
+ debug!("target_ty evaluated from {:?}", expr);
+
+ let parent = hir.get_parent_node(expr_id);
+ if let Some(hir::Node::Expr(e)) = hir.find(parent) {
+ let parent_span = hir.span(parent);
+ let parent_did = parent.owner.to_def_id();
+ // ```rust
+ // impl T {
+ // fn foo(&self) -> i32 {}
+ // }
+ // T.foo();
+ // ^^^^^^^ a temporary `&T` created inside this method call due to `&self`
+ // ```
+ //
+ let is_region_borrow = if let Some(typeck_results) = typeck_results {
+ typeck_results
+ .expr_adjustments(expr)
+ .iter()
+ .any(|adj| adj.is_region_borrow())
+ } else {
+ false
+ };
+
+ // ```rust
+ // struct Foo(*const u8);
+ // bar(Foo(std::ptr::null())).await;
+ // ^^^^^^^^^^^^^^^^^^^^^ raw-ptr `*T` created inside this struct ctor.
+ // ```
+ debug!(parent_def_kind = ?self.tcx.def_kind(parent_did));
+ let is_raw_borrow_inside_fn_like_call =
+ match self.tcx.def_kind(parent_did) {
+ DefKind::Fn | DefKind::Ctor(..) => target_ty.is_unsafe_ptr(),
+ _ => false,
+ };
+ if let Some(typeck_results) = typeck_results {
+ if (typeck_results.is_method_call(e) && is_region_borrow)
+ || is_raw_borrow_inside_fn_like_call
+ {
+ err.span_help(
+ parent_span,
+ "consider moving this into a `let` \
+ binding to create a shorter lived borrow",
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+ GeneratorInteriorOrUpvar::Upvar(upvar_span) => {
+ // `Some(ref_ty)` if `target_ty` is `&T` and `T` fails to impl `Sync`
+ let refers_to_non_sync = match target_ty.kind() {
+ ty::Ref(_, ref_ty, _) => match self.evaluate_obligation(&obligation) {
+ Ok(eval) if !eval.may_apply() => Some(ref_ty),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let (span_label, span_note) = match refers_to_non_sync {
+ // if `target_ty` is `&T` and `T` fails to impl `Sync`,
+ // include suggestions to make `T: Sync` so that `&T: Send`
+ Some(ref_ty) => (
+ format!(
+ "has type `{}` which {}, because `{}` is not `Sync`",
+ target_ty, trait_explanation, ref_ty
+ ),
+ format!(
+ "captured value {} because `&` references cannot be sent unless their referent is `Sync`",
+ trait_explanation
+ ),
+ ),
+ None => (
+ format!("has type `{}` which {}", target_ty, trait_explanation),
+ format!("captured value {}", trait_explanation),
+ ),
+ };
+
+ let mut span = MultiSpan::from_span(upvar_span);
+ span.push_span_label(upvar_span, span_label);
+ err.span_note(span, &span_note);
+ }
+ }
+
+ // Add a note for the item obligation that remains - normally a note pointing to the
+ // bound that introduced the obligation (e.g. `T: Send`).
+ debug!(?next_code);
+ self.note_obligation_cause_code(
+ err,
+ &obligation.predicate,
+ obligation.param_env,
+ next_code.unwrap(),
+ &mut Vec::new(),
+ &mut Default::default(),
+ );
+ }
+
+ fn note_obligation_cause_code<T>(
+ &self,
+ err: &mut Diagnostic,
+ predicate: &T,
+ param_env: ty::ParamEnv<'tcx>,
+ cause_code: &ObligationCauseCode<'tcx>,
+ obligated_types: &mut Vec<Ty<'tcx>>,
+ seen_requirements: &mut FxHashSet<DefId>,
+ ) where
+ T: fmt::Display,
+ {
+ let tcx = self.tcx;
+ match *cause_code {
+ ObligationCauseCode::ExprAssignable
+ | ObligationCauseCode::MatchExpressionArm { .. }
+ | ObligationCauseCode::Pattern { .. }
+ | ObligationCauseCode::IfExpression { .. }
+ | ObligationCauseCode::IfExpressionWithNoElse
+ | ObligationCauseCode::MainFunctionType
+ | ObligationCauseCode::StartFunctionType
+ | ObligationCauseCode::IntrinsicType
+ | ObligationCauseCode::MethodReceiver
+ | ObligationCauseCode::ReturnNoExpression
+ | ObligationCauseCode::UnifyReceiver(..)
+ | ObligationCauseCode::OpaqueType
+ | ObligationCauseCode::MiscObligation
+ | ObligationCauseCode::WellFormed(..)
+ | ObligationCauseCode::MatchImpl(..)
+ | ObligationCauseCode::ReturnType
+ | ObligationCauseCode::ReturnValue(_)
+ | ObligationCauseCode::BlockTailExpression(_)
+ | ObligationCauseCode::AwaitableExpr(_)
+ | ObligationCauseCode::ForLoopIterator
+ | ObligationCauseCode::QuestionMark
+ | ObligationCauseCode::CheckAssociatedTypeBounds { .. }
+ | ObligationCauseCode::LetElse
+ | ObligationCauseCode::BinOp { .. } => {}
+ ObligationCauseCode::SliceOrArrayElem => {
+ err.note("slice and array elements must have `Sized` type");
+ }
+ ObligationCauseCode::TupleElem => {
+ err.note("only the last element of a tuple may have a dynamically sized type");
+ }
+ ObligationCauseCode::ProjectionWf(data) => {
+ err.note(&format!("required so that the projection `{}` is well-formed", data,));
+ }
+ ObligationCauseCode::ReferenceOutlivesReferent(ref_ty) => {
+ err.note(&format!(
+ "required so that reference `{}` does not outlive its referent",
+ ref_ty,
+ ));
+ }
+ ObligationCauseCode::ObjectTypeBound(object_ty, region) => {
+ err.note(&format!(
+ "required so that the lifetime bound of `{}` for `{}` is satisfied",
+ region, object_ty,
+ ));
+ }
+ ObligationCauseCode::ItemObligation(_item_def_id) => {
+ // We hold the `DefId` of the item introducing the obligation, but displaying it
+ // doesn't add user usable information. It always point at an associated item.
+ }
+ ObligationCauseCode::BindingObligation(item_def_id, span) => {
+ let item_name = tcx.def_path_str(item_def_id);
+ let mut multispan = MultiSpan::from(span);
+ if let Some(ident) = tcx.opt_item_ident(item_def_id) {
+ let sm = tcx.sess.source_map();
+ let same_line =
+ match (sm.lookup_line(ident.span.hi()), sm.lookup_line(span.lo())) {
+ (Ok(l), Ok(r)) => l.line == r.line,
+ _ => true,
+ };
+ if !ident.span.overlaps(span) && !same_line {
+ multispan.push_span_label(ident.span, "required by a bound in this");
+ }
+ }
+ let descr = format!("required by a bound in `{}`", item_name);
+ if span != DUMMY_SP {
+ let msg = format!("required by this bound in `{}`", item_name);
+ multispan.push_span_label(span, msg);
+ err.span_note(multispan, &descr);
+ } else {
+ err.span_note(tcx.def_span(item_def_id), &descr);
+ }
+ }
+ ObligationCauseCode::ObjectCastObligation(concrete_ty, object_ty) => {
+ err.note(&format!(
+ "required for the cast from `{}` to the object type `{}`",
+ self.ty_to_string(concrete_ty),
+ self.ty_to_string(object_ty)
+ ));
+ }
+ ObligationCauseCode::Coercion { source: _, target } => {
+ err.note(&format!("required by cast to type `{}`", self.ty_to_string(target)));
+ }
+ ObligationCauseCode::RepeatElementCopy { is_const_fn } => {
+ err.note(
+ "the `Copy` trait is required because this value will be copied for each element of the array",
+ );
+
+ if is_const_fn {
+ err.help(
+ "consider creating a new `const` item and initializing it with the result \
+ of the function call to be used in the repeat position, like \
+ `const VAL: Type = const_fn();` and `let x = [VAL; 42];`",
+ );
+ }
+
+ if self.tcx.sess.is_nightly_build() && is_const_fn {
+ err.help(
+ "create an inline `const` block, see RFC #2920 \
+ <https://github.com/rust-lang/rfcs/pull/2920> for more information",
+ );
+ }
+ }
+ ObligationCauseCode::VariableType(hir_id) => {
+ let parent_node = self.tcx.hir().get_parent_node(hir_id);
+ match self.tcx.hir().find(parent_node) {
+ Some(Node::Local(hir::Local {
+ init: Some(hir::Expr { kind: hir::ExprKind::Index(_, _), span, .. }),
+ ..
+ })) => {
+ // When encountering an assignment of an unsized trait, like
+ // `let x = ""[..];`, provide a suggestion to borrow the initializer in
+ // order to use have a slice instead.
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "consider borrowing here",
+ "&",
+ Applicability::MachineApplicable,
+ );
+ err.note("all local variables must have a statically known size");
+ }
+ Some(Node::Param(param)) => {
+ err.span_suggestion_verbose(
+ param.ty_span.shrink_to_lo(),
+ "function arguments must have a statically known size, borrowed types \
+ always have a known size",
+ "&",
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {
+ err.note("all local variables must have a statically known size");
+ }
+ }
+ if !self.tcx.features().unsized_locals {
+ err.help("unsized locals are gated as an unstable feature");
+ }
+ }
+ ObligationCauseCode::SizedArgumentType(sp) => {
+ if let Some(span) = sp {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "function arguments must have a statically known size, borrowed types \
+ always have a known size",
+ "&",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.note("all function arguments must have a statically known size");
+ }
+ if tcx.sess.opts.unstable_features.is_nightly_build()
+ && !self.tcx.features().unsized_fn_params
+ {
+ err.help("unsized fn params are gated as an unstable feature");
+ }
+ }
+ ObligationCauseCode::SizedReturnType => {
+ err.note("the return type of a function must have a statically known size");
+ }
+ ObligationCauseCode::SizedYieldType => {
+ err.note("the yield type of a generator must have a statically known size");
+ }
+ ObligationCauseCode::SizedBoxType => {
+ err.note("the type of a box expression must have a statically known size");
+ }
+ ObligationCauseCode::AssignmentLhsSized => {
+ err.note("the left-hand-side of an assignment must have a statically known size");
+ }
+ ObligationCauseCode::TupleInitializerSized => {
+ err.note("tuples must have a statically known size to be initialized");
+ }
+ ObligationCauseCode::StructInitializerSized => {
+ err.note("structs must have a statically known size to be initialized");
+ }
+ ObligationCauseCode::FieldSized { adt_kind: ref item, last, span } => {
+ match *item {
+ AdtKind::Struct => {
+ if last {
+ err.note(
+ "the last field of a packed struct may only have a \
+ dynamically sized type if it does not need drop to be run",
+ );
+ } else {
+ err.note(
+ "only the last field of a struct may have a dynamically sized type",
+ );
+ }
+ }
+ AdtKind::Union => {
+ err.note("no field of a union may have a dynamically sized type");
+ }
+ AdtKind::Enum => {
+ err.note("no field of an enum variant may have a dynamically sized type");
+ }
+ }
+ err.help("change the field's type to have a statically known size");
+ err.span_suggestion(
+ span.shrink_to_lo(),
+ "borrowed types always have a statically known size",
+ "&",
+ Applicability::MachineApplicable,
+ );
+ err.multipart_suggestion(
+ "the `Box` type always has a statically known size and allocates its contents \
+ in the heap",
+ vec![
+ (span.shrink_to_lo(), "Box<".to_string()),
+ (span.shrink_to_hi(), ">".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ ObligationCauseCode::ConstSized => {
+ err.note("constant expressions must have a statically known size");
+ }
+ ObligationCauseCode::InlineAsmSized => {
+ err.note("all inline asm arguments must have a statically known size");
+ }
+ ObligationCauseCode::ConstPatternStructural => {
+ err.note("constants used for pattern-matching must derive `PartialEq` and `Eq`");
+ }
+ ObligationCauseCode::SharedStatic => {
+ err.note("shared static variables must have a type that implements `Sync`");
+ }
+ ObligationCauseCode::BuiltinDerivedObligation(ref data) => {
+ let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_pred);
+ let ty = parent_trait_ref.skip_binder().self_ty();
+ if parent_trait_ref.references_error() {
+ // NOTE(eddyb) this was `.cancel()`, but `err`
+ // is borrowed, so we can't fully defuse it.
+ err.downgrade_to_delayed_bug();
+ return;
+ }
+
+ // If the obligation for a tuple is set directly by a Generator or Closure,
+ // then the tuple must be the one containing capture types.
+ let is_upvar_tys_infer_tuple = if !matches!(ty.kind(), ty::Tuple(..)) {
+ false
+ } else {
+ if let ObligationCauseCode::BuiltinDerivedObligation(data) = &*data.parent_code
+ {
+ let parent_trait_ref =
+ self.resolve_vars_if_possible(data.parent_trait_pred);
+ let nested_ty = parent_trait_ref.skip_binder().self_ty();
+ matches!(nested_ty.kind(), ty::Generator(..))
+ || matches!(nested_ty.kind(), ty::Closure(..))
+ } else {
+ false
+ }
+ };
+
+ let from_generator = tcx.lang_items().from_generator_fn().unwrap();
+
+ // Don't print the tuple of capture types
+ 'print: {
+ if !is_upvar_tys_infer_tuple {
+ let msg = format!("required because it appears within the type `{}`", ty);
+ match ty.kind() {
+ ty::Adt(def, _) => {
+ // `gen_future` is used in all async functions; it doesn't add any additional info.
+ if self.tcx.is_diagnostic_item(sym::gen_future, def.did()) {
+ break 'print;
+ }
+ match self.tcx.opt_item_ident(def.did()) {
+ Some(ident) => err.span_note(ident.span, &msg),
+ None => err.note(&msg),
+ }
+ }
+ ty::Opaque(def_id, _) => {
+ // Avoid printing the future from `core::future::from_generator`, it's not helpful
+ if tcx.parent(*def_id) == from_generator {
+ break 'print;
+ }
+
+ // If the previous type is `from_generator`, this is the future generated by the body of an async function.
+ // Avoid printing it twice (it was already printed in the `ty::Generator` arm below).
+ let is_future = tcx.ty_is_opaque_future(ty);
+ debug!(
+ ?obligated_types,
+ ?is_future,
+ "note_obligation_cause_code: check for async fn"
+ );
+ if is_future
+ && obligated_types.last().map_or(false, |ty| match ty.kind() {
+ ty::Opaque(last_def_id, _) => {
+ tcx.parent(*last_def_id) == from_generator
+ }
+ _ => false,
+ })
+ {
+ break 'print;
+ }
+ err.span_note(self.tcx.def_span(def_id), &msg)
+ }
+ ty::GeneratorWitness(bound_tys) => {
+ use std::fmt::Write;
+
+ // FIXME: this is kind of an unusual format for rustc, can we make it more clear?
+ // Maybe we should just remove this note altogether?
+ // FIXME: only print types which don't meet the trait requirement
+ let mut msg =
+ "required because it captures the following types: ".to_owned();
+ for ty in bound_tys.skip_binder() {
+ write!(msg, "`{}`, ", ty).unwrap();
+ }
+ err.note(msg.trim_end_matches(", "))
+ }
+ ty::Generator(def_id, _, _) => {
+ let sp = self.tcx.def_span(def_id);
+
+ // Special-case this to say "async block" instead of `[static generator]`.
+ let kind = tcx.generator_kind(def_id).unwrap();
+ err.span_note(
+ sp,
+ &format!("required because it's used within this {}", kind),
+ )
+ }
+ ty::Closure(def_id, _) => err.span_note(
+ self.tcx.def_span(def_id),
+ &format!("required because it's used within this closure"),
+ ),
+ _ => err.note(&msg),
+ };
+ }
+ }
+
+ obligated_types.push(ty);
+
+ let parent_predicate = parent_trait_ref.to_predicate(tcx);
+ if !self.is_recursive_obligation(obligated_types, &data.parent_code) {
+ // #74711: avoid a stack overflow
+ ensure_sufficient_stack(|| {
+ self.note_obligation_cause_code(
+ err,
+ &parent_predicate,
+ param_env,
+ &data.parent_code,
+ obligated_types,
+ seen_requirements,
+ )
+ });
+ } else {
+ ensure_sufficient_stack(|| {
+ self.note_obligation_cause_code(
+ err,
+ &parent_predicate,
+ param_env,
+ cause_code.peel_derives(),
+ obligated_types,
+ seen_requirements,
+ )
+ });
+ }
+ }
+ ObligationCauseCode::ImplDerivedObligation(ref data) => {
+ let mut parent_trait_pred =
+ self.resolve_vars_if_possible(data.derived.parent_trait_pred);
+ parent_trait_pred.remap_constness_diag(param_env);
+ let parent_def_id = parent_trait_pred.def_id();
+ let msg = format!(
+ "required because of the requirements on the impl of `{}` for `{}`",
+ parent_trait_pred.print_modifiers_and_trait_path(),
+ parent_trait_pred.skip_binder().self_ty()
+ );
+ let mut is_auto_trait = false;
+ match self.tcx.hir().get_if_local(data.impl_def_id) {
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(is_auto, ..),
+ ident,
+ ..
+ })) => {
+ // FIXME: we should do something else so that it works even on crate foreign
+ // auto traits.
+ is_auto_trait = matches!(is_auto, hir::IsAuto::Yes);
+ err.span_note(ident.span, &msg)
+ }
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }),
+ ..
+ })) => {
+ let mut spans = Vec::with_capacity(2);
+ if let Some(trait_ref) = of_trait {
+ spans.push(trait_ref.path.span);
+ }
+ spans.push(self_ty.span);
+ err.span_note(spans, &msg)
+ }
+ _ => err.note(&msg),
+ };
+
+ let mut parent_predicate = parent_trait_pred.to_predicate(tcx);
+ let mut data = &data.derived;
+ let mut count = 0;
+ seen_requirements.insert(parent_def_id);
+ if is_auto_trait {
+ // We don't want to point at the ADT saying "required because it appears within
+ // the type `X`", like we would otherwise do in test `supertrait-auto-trait.rs`.
+ while let ObligationCauseCode::BuiltinDerivedObligation(derived) =
+ &*data.parent_code
+ {
+ let child_trait_ref =
+ self.resolve_vars_if_possible(derived.parent_trait_pred);
+ let child_def_id = child_trait_ref.def_id();
+ if seen_requirements.insert(child_def_id) {
+ break;
+ }
+ data = derived;
+ parent_predicate = child_trait_ref.to_predicate(tcx);
+ parent_trait_pred = child_trait_ref;
+ }
+ }
+ while let ObligationCauseCode::ImplDerivedObligation(child) = &*data.parent_code {
+ // Skip redundant recursive obligation notes. See `ui/issue-20413.rs`.
+ let child_trait_pred =
+ self.resolve_vars_if_possible(child.derived.parent_trait_pred);
+ let child_def_id = child_trait_pred.def_id();
+ if seen_requirements.insert(child_def_id) {
+ break;
+ }
+ count += 1;
+ data = &child.derived;
+ parent_predicate = child_trait_pred.to_predicate(tcx);
+ parent_trait_pred = child_trait_pred;
+ }
+ if count > 0 {
+ err.note(&format!(
+ "{} redundant requirement{} hidden",
+ count,
+ pluralize!(count)
+ ));
+ err.note(&format!(
+ "required because of the requirements on the impl of `{}` for `{}`",
+ parent_trait_pred.print_modifiers_and_trait_path(),
+ parent_trait_pred.skip_binder().self_ty()
+ ));
+ }
+ // #74711: avoid a stack overflow
+ ensure_sufficient_stack(|| {
+ self.note_obligation_cause_code(
+ err,
+ &parent_predicate,
+ param_env,
+ &data.parent_code,
+ obligated_types,
+ seen_requirements,
+ )
+ });
+ }
+ ObligationCauseCode::DerivedObligation(ref data) => {
+ let parent_trait_ref = self.resolve_vars_if_possible(data.parent_trait_pred);
+ let parent_predicate = parent_trait_ref.to_predicate(tcx);
+ // #74711: avoid a stack overflow
+ ensure_sufficient_stack(|| {
+ self.note_obligation_cause_code(
+ err,
+ &parent_predicate,
+ param_env,
+ &data.parent_code,
+ obligated_types,
+ seen_requirements,
+ )
+ });
+ }
+ ObligationCauseCode::FunctionArgumentObligation {
+ arg_hir_id,
+ call_hir_id,
+ ref parent_code,
+ } => {
+ let hir = self.tcx.hir();
+ if let Some(Node::Expr(expr @ hir::Expr { kind: hir::ExprKind::Block(..), .. })) =
+ hir.find(arg_hir_id)
+ {
+ let in_progress_typeck_results =
+ self.in_progress_typeck_results.map(|t| t.borrow());
+ let parent_id = hir.get_parent_item(arg_hir_id);
+ let typeck_results: &TypeckResults<'tcx> = match &in_progress_typeck_results {
+ Some(t) if t.hir_owner == parent_id => t,
+ _ => self.tcx.typeck(parent_id),
+ };
+ let ty = typeck_results.expr_ty_adjusted(expr);
+ let span = expr.peel_blocks().span;
+ if Some(span) != err.span.primary_span() {
+ err.span_label(
+ span,
+ &if ty.references_error() {
+ String::new()
+ } else {
+ format!("this tail expression is of type `{:?}`", ty)
+ },
+ );
+ }
+ }
+ if let Some(Node::Expr(hir::Expr {
+ kind:
+ hir::ExprKind::Call(hir::Expr { span, .. }, _)
+ | hir::ExprKind::MethodCall(
+ hir::PathSegment { ident: Ident { span, .. }, .. },
+ ..,
+ ),
+ ..
+ })) = hir.find(call_hir_id)
+ {
+ if Some(*span) != err.span.primary_span() {
+ err.span_label(*span, "required by a bound introduced by this call");
+ }
+ }
+ ensure_sufficient_stack(|| {
+ self.note_obligation_cause_code(
+ err,
+ predicate,
+ param_env,
+ &parent_code,
+ obligated_types,
+ seen_requirements,
+ )
+ });
+ }
+ ObligationCauseCode::CompareImplItemObligation { trait_item_def_id, kind, .. } => {
+ let item_name = self.tcx.item_name(trait_item_def_id);
+ let msg = format!(
+ "the requirement `{}` appears on the `impl`'s {kind} `{}` but not on the \
+ corresponding trait's {kind}",
+ predicate, item_name,
+ );
+ let sp = self
+ .tcx
+ .opt_item_ident(trait_item_def_id)
+ .map(|i| i.span)
+ .unwrap_or_else(|| self.tcx.def_span(trait_item_def_id));
+ let mut assoc_span: MultiSpan = sp.into();
+ assoc_span.push_span_label(
+ sp,
+ format!("this trait's {kind} doesn't have the requirement `{}`", predicate),
+ );
+ if let Some(ident) = self
+ .tcx
+ .opt_associated_item(trait_item_def_id)
+ .and_then(|i| self.tcx.opt_item_ident(i.container_id(self.tcx)))
+ {
+ assoc_span.push_span_label(ident.span, "in this trait");
+ }
+ err.span_note(assoc_span, &msg);
+ }
+ ObligationCauseCode::TrivialBound => {
+ err.help("see issue #48214");
+ if tcx.sess.opts.unstable_features.is_nightly_build() {
+ err.help("add `#![feature(trivial_bounds)]` to the crate attributes to enable");
+ }
+ }
+ ObligationCauseCode::OpaqueReturnType(expr_info) => {
+ if let Some((expr_ty, expr_span)) = expr_info {
+ let expr_ty = self.resolve_vars_if_possible(expr_ty);
+ err.span_label(
+ expr_span,
+ format!("return type was inferred to be `{expr_ty}` here"),
+ );
+ }
+ }
+ }
+ }
+
+ fn suggest_new_overflow_limit(&self, err: &mut Diagnostic) {
+ let suggested_limit = match self.tcx.recursion_limit() {
+ Limit(0) => Limit(2),
+ limit => limit * 2,
+ };
+ err.help(&format!(
+ "consider increasing the recursion limit by adding a \
+ `#![recursion_limit = \"{}\"]` attribute to your crate (`{}`)",
+ suggested_limit,
+ self.tcx.crate_name(LOCAL_CRATE),
+ ));
+ }
+
+ #[instrument(
+ level = "debug", skip(self, err), fields(trait_pred.self_ty = ?trait_pred.self_ty())
+ )]
+ fn suggest_await_before_try(
+ &self,
+ err: &mut Diagnostic,
+ obligation: &PredicateObligation<'tcx>,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ span: Span,
+ ) {
+ let body_hir_id = obligation.cause.body_id;
+ let item_id = self.tcx.hir().get_parent_node(body_hir_id);
+
+ if let Some(body_id) =
+ self.tcx.hir().maybe_body_owned_by(self.tcx.hir().local_def_id(item_id))
+ {
+ let body = self.tcx.hir().body(body_id);
+ if let Some(hir::GeneratorKind::Async(_)) = body.generator_kind {
+ let future_trait = self.tcx.require_lang_item(LangItem::Future, None);
+
+ let self_ty = self.resolve_vars_if_possible(trait_pred.self_ty());
+ let impls_future = self.type_implements_trait(
+ future_trait,
+ self.tcx.erase_late_bound_regions(self_ty),
+ ty::List::empty(),
+ obligation.param_env,
+ );
+ if !impls_future.must_apply_modulo_regions() {
+ return;
+ }
+
+ let item_def_id = self.tcx.associated_item_def_ids(future_trait)[0];
+ // `<T as Future>::Output`
+ let projection_ty = trait_pred.map_bound(|trait_pred| {
+ self.tcx.mk_projection(
+ item_def_id,
+ // Future::Output has no substs
+ self.tcx.mk_substs_trait(trait_pred.self_ty(), &[]),
+ )
+ });
+ let projection_ty = normalize_to(
+ &mut SelectionContext::new(self),
+ obligation.param_env,
+ obligation.cause.clone(),
+ projection_ty,
+ &mut vec![],
+ );
+
+ debug!(
+ normalized_projection_type = ?self.resolve_vars_if_possible(projection_ty)
+ );
+ let try_obligation = self.mk_trait_obligation_with_new_self_ty(
+ obligation.param_env,
+ trait_pred.map_bound(|trait_pred| (trait_pred, projection_ty.skip_binder())),
+ );
+ debug!(try_trait_obligation = ?try_obligation);
+ if self.predicate_may_hold(&try_obligation)
+ && let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span)
+ && snippet.ends_with('?')
+ {
+ err.span_suggestion_verbose(
+ span.with_hi(span.hi() - BytePos(1)).shrink_to_hi(),
+ "consider `await`ing on the `Future`",
+ ".await",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+
+ fn suggest_floating_point_literal(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_ref: &ty::PolyTraitRef<'tcx>,
+ ) {
+ let rhs_span = match obligation.cause.code() {
+ ObligationCauseCode::BinOp { rhs_span: Some(span), is_lit, .. } if *is_lit => span,
+ _ => return,
+ };
+ match (
+ trait_ref.skip_binder().self_ty().kind(),
+ trait_ref.skip_binder().substs.type_at(1).kind(),
+ ) {
+ (ty::Float(_), ty::Infer(InferTy::IntVar(_))) => {
+ err.span_suggestion_verbose(
+ rhs_span.shrink_to_hi(),
+ "consider using a floating-point literal by writing it with `.0`",
+ ".0",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ }
+ }
+
+ fn suggest_derive(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) {
+ let Some(diagnostic_name) = self.tcx.get_diagnostic_name(trait_pred.def_id()) else {
+ return;
+ };
+ let (adt, substs) = match trait_pred.skip_binder().self_ty().kind() {
+ ty::Adt(adt, substs) if adt.did().is_local() => (adt, substs),
+ _ => return,
+ };
+ let can_derive = {
+ let is_derivable_trait = match diagnostic_name {
+ sym::Default => !adt.is_enum(),
+ sym::PartialEq | sym::PartialOrd => {
+ let rhs_ty = trait_pred.skip_binder().trait_ref.substs.type_at(1);
+ trait_pred.skip_binder().self_ty() == rhs_ty
+ }
+ sym::Eq | sym::Ord | sym::Clone | sym::Copy | sym::Hash | sym::Debug => true,
+ _ => false,
+ };
+ is_derivable_trait &&
+ // Ensure all fields impl the trait.
+ adt.all_fields().all(|field| {
+ let field_ty = field.ty(self.tcx, substs);
+ let trait_substs = match diagnostic_name {
+ sym::PartialEq | sym::PartialOrd => {
+ self.tcx.mk_substs_trait(field_ty, &[field_ty.into()])
+ }
+ _ => self.tcx.mk_substs_trait(field_ty, &[]),
+ };
+ let trait_pred = trait_pred.map_bound_ref(|tr| ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ substs: trait_substs,
+ ..trait_pred.skip_binder().trait_ref
+ },
+ ..*tr
+ });
+ let field_obl = Obligation::new(
+ obligation.cause.clone(),
+ obligation.param_env,
+ trait_pred.to_predicate(self.tcx),
+ );
+ self.predicate_must_hold_modulo_regions(&field_obl)
+ })
+ };
+ if can_derive {
+ err.span_suggestion_verbose(
+ self.tcx.def_span(adt.did()).shrink_to_lo(),
+ &format!(
+ "consider annotating `{}` with `#[derive({})]`",
+ trait_pred.skip_binder().self_ty(),
+ diagnostic_name,
+ ),
+ format!("#[derive({})]\n", diagnostic_name),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ fn suggest_dereferencing_index(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ err: &mut Diagnostic,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) {
+ if let ObligationCauseCode::ImplDerivedObligation(_) = obligation.cause.code()
+ && self.tcx.is_diagnostic_item(sym::SliceIndex, trait_pred.skip_binder().trait_ref.def_id)
+ && let ty::Slice(_) = trait_pred.skip_binder().trait_ref.substs.type_at(1).kind()
+ && let ty::Ref(_, inner_ty, _) = trait_pred.skip_binder().self_ty().kind()
+ && let ty::Uint(ty::UintTy::Usize) = inner_ty.kind()
+ {
+ err.span_suggestion_verbose(
+ obligation.cause.span.shrink_to_lo(),
+ "dereference this index",
+ '*',
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+}
+
+/// Collect all the returned expressions within the input expression.
+/// Used to point at the return spans when we want to suggest some change to them.
+#[derive(Default)]
+pub struct ReturnsVisitor<'v> {
+ pub returns: Vec<&'v hir::Expr<'v>>,
+ in_block_tail: bool,
+}
+
+impl<'v> Visitor<'v> for ReturnsVisitor<'v> {
+ fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) {
+ // Visit every expression to detect `return` paths, either through the function's tail
+ // expression or `return` statements. We walk all nodes to find `return` statements, but
+ // we only care about tail expressions when `in_block_tail` is `true`, which means that
+ // they're in the return path of the function body.
+ match ex.kind {
+ hir::ExprKind::Ret(Some(ex)) => {
+ self.returns.push(ex);
+ }
+ hir::ExprKind::Block(block, _) if self.in_block_tail => {
+ self.in_block_tail = false;
+ for stmt in block.stmts {
+ hir::intravisit::walk_stmt(self, stmt);
+ }
+ self.in_block_tail = true;
+ if let Some(expr) = block.expr {
+ self.visit_expr(expr);
+ }
+ }
+ hir::ExprKind::If(_, then, else_opt) if self.in_block_tail => {
+ self.visit_expr(then);
+ if let Some(el) = else_opt {
+ self.visit_expr(el);
+ }
+ }
+ hir::ExprKind::Match(_, arms, _) if self.in_block_tail => {
+ for arm in arms {
+ self.visit_expr(arm.body);
+ }
+ }
+ // We need to walk to find `return`s in the entire body.
+ _ if !self.in_block_tail => hir::intravisit::walk_expr(self, ex),
+ _ => self.returns.push(ex),
+ }
+ }
+
+ fn visit_body(&mut self, body: &'v hir::Body<'v>) {
+ assert!(!self.in_block_tail);
+ if body.generator_kind().is_none() {
+ if let hir::ExprKind::Block(block, None) = body.value.kind {
+ if block.expr.is_some() {
+ self.in_block_tail = true;
+ }
+ }
+ }
+ hir::intravisit::walk_body(self, body);
+ }
+}
+
+/// Collect all the awaited expressions within the input expression.
+#[derive(Default)]
+struct AwaitsVisitor {
+ awaits: Vec<hir::HirId>,
+}
+
+impl<'v> Visitor<'v> for AwaitsVisitor {
+ fn visit_expr(&mut self, ex: &'v hir::Expr<'v>) {
+ if let hir::ExprKind::Yield(_, hir::YieldSource::Await { expr: Some(id) }) = ex.kind {
+ self.awaits.push(id)
+ }
+ hir::intravisit::walk_expr(self, ex)
+ }
+}
+
+pub trait NextTypeParamName {
+ fn next_type_param_name(&self, name: Option<&str>) -> String;
+}
+
+impl NextTypeParamName for &[hir::GenericParam<'_>] {
+ fn next_type_param_name(&self, name: Option<&str>) -> String {
+ // This is the list of possible parameter names that we might suggest.
+ let name = name.and_then(|n| n.chars().next()).map(|c| c.to_string().to_uppercase());
+ let name = name.as_deref();
+ let possible_names = [name.unwrap_or("T"), "T", "U", "V", "X", "Y", "Z", "A", "B", "C"];
+ let used_names = self
+ .iter()
+ .filter_map(|p| match p.name {
+ hir::ParamName::Plain(ident) => Some(ident.name),
+ _ => None,
+ })
+ .collect::<Vec<_>>();
+
+ possible_names
+ .iter()
+ .find(|n| !used_names.contains(&Symbol::intern(n)))
+ .unwrap_or(&"ParamName")
+ .to_string()
+ }
+}
+
+fn suggest_trait_object_return_type_alternatives(
+ err: &mut Diagnostic,
+ ret_ty: Span,
+ trait_obj: &str,
+ is_object_safe: bool,
+) {
+ err.span_suggestion(
+ ret_ty,
+ "use some type `T` that is `T: Sized` as the return type if all return paths have the \
+ same type",
+ "T",
+ Applicability::MaybeIncorrect,
+ );
+ err.span_suggestion(
+ ret_ty,
+ &format!(
+ "use `impl {}` as the return type if all return paths have the same type but you \
+ want to expose only the trait in the signature",
+ trait_obj,
+ ),
+ format!("impl {}", trait_obj),
+ Applicability::MaybeIncorrect,
+ );
+ if is_object_safe {
+ err.multipart_suggestion(
+ &format!(
+ "use a boxed trait object if all return paths implement trait `{}`",
+ trait_obj,
+ ),
+ vec![
+ (ret_ty.shrink_to_lo(), "Box<".to_string()),
+ (ret_ty.shrink_to_hi(), ">".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+}
+
+/// Collect the spans that we see the generic param `param_did`
+struct ReplaceImplTraitVisitor<'a> {
+ ty_spans: &'a mut Vec<Span>,
+ param_did: DefId,
+}
+
+impl<'a, 'hir> hir::intravisit::Visitor<'hir> for ReplaceImplTraitVisitor<'a> {
+ fn visit_ty(&mut self, t: &'hir hir::Ty<'hir>) {
+ if let hir::TyKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path { res: hir::def::Res::Def(_, segment_did), .. },
+ )) = t.kind
+ {
+ if self.param_did == *segment_did {
+ // `fn foo(t: impl Trait)`
+ // ^^^^^^^^^^ get this to suggest `T` instead
+
+ // There might be more than one `impl Trait`.
+ self.ty_spans.push(t.span);
+ return;
+ }
+ }
+
+ hir::intravisit::walk_ty(self, t);
+ }
+}
+
+// Replace `param` with `replace_ty`
+struct ReplaceImplTraitFolder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param: &'tcx ty::GenericParamDef,
+ replace_ty: Ty<'tcx>,
+}
+
+impl<'tcx> TypeFolder<'tcx> for ReplaceImplTraitFolder<'tcx> {
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::Param(ty::ParamTy { index, .. }) = t.kind() {
+ if self.param.index == *index {
+ return self.replace_ty;
+ }
+ }
+ t.super_fold_with(self)
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/fulfill.rs b/compiler/rustc_trait_selection/src/traits/fulfill.rs
new file mode 100644
index 000000000..556ef466c
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/fulfill.rs
@@ -0,0 +1,757 @@
+use crate::infer::{InferCtxt, TyOrConstInferVar};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::obligation_forest::ProcessResult;
+use rustc_data_structures::obligation_forest::{Error, ForestObligation, Outcome};
+use rustc_data_structures::obligation_forest::{ObligationForest, ObligationProcessor};
+use rustc_infer::traits::ProjectionCacheKey;
+use rustc_infer::traits::{SelectionError, TraitEngine, TraitEngineExt as _, TraitObligation};
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::abstract_const::NotConstEvaluatable;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::ToPredicate;
+use rustc_middle::ty::{self, Binder, Const, Ty, TypeVisitable};
+use std::marker::PhantomData;
+
+use super::const_evaluatable;
+use super::project::{self, ProjectAndUnifyResult};
+use super::select::SelectionContext;
+use super::wf;
+use super::CodeAmbiguity;
+use super::CodeProjectionError;
+use super::CodeSelectionError;
+use super::EvaluationResult;
+use super::Unimplemented;
+use super::{FulfillmentError, FulfillmentErrorCode};
+use super::{ObligationCause, PredicateObligation};
+
+use crate::traits::error_reporting::InferCtxtExt as _;
+use crate::traits::project::PolyProjectionObligation;
+use crate::traits::project::ProjectionCacheKeyExt as _;
+use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
+
+impl<'tcx> ForestObligation for PendingPredicateObligation<'tcx> {
+ /// Note that we include both the `ParamEnv` and the `Predicate`,
+ /// as the `ParamEnv` can influence whether fulfillment succeeds
+ /// or fails.
+ type CacheKey = ty::ParamEnvAnd<'tcx, ty::Predicate<'tcx>>;
+
+ fn as_cache_key(&self) -> Self::CacheKey {
+ self.obligation.param_env.and(self.obligation.predicate)
+ }
+}
+
+/// The fulfillment context is used to drive trait resolution. It
+/// consists of a list of obligations that must be (eventually)
+/// satisfied. The job is to track which are satisfied, which yielded
+/// errors, and which are still pending. At any point, users can call
+/// `select_where_possible`, and the fulfillment context will try to do
+/// selection, retaining only those obligations that remain
+/// ambiguous. This may be helpful in pushing type inference
+/// along. Once all type inference constraints have been generated, the
+/// method `select_all_or_error` can be used to report any remaining
+/// ambiguous cases as errors.
+pub struct FulfillmentContext<'tcx> {
+ // A list of all obligations that have been registered with this
+ // fulfillment context.
+ predicates: ObligationForest<PendingPredicateObligation<'tcx>>,
+
+ relationships: FxHashMap<ty::TyVid, ty::FoundRelationships>,
+
+ // Is it OK to register obligations into this infcx inside
+ // an infcx snapshot?
+ //
+ // The "primary fulfillment" in many cases in typeck lives
+ // outside of any snapshot, so any use of it inside a snapshot
+ // will lead to trouble and therefore is checked against, but
+ // other fulfillment contexts sometimes do live inside of
+ // a snapshot (they don't *straddle* a snapshot, so there
+ // is no trouble there).
+ usable_in_snapshot: bool,
+}
+
+#[derive(Clone, Debug)]
+pub struct PendingPredicateObligation<'tcx> {
+ pub obligation: PredicateObligation<'tcx>,
+ // This is far more often read than modified, meaning that we
+ // should mostly optimize for reading speed, while modifying is not as relevant.
+ //
+ // For whatever reason using a boxed slice is slower than using a `Vec` here.
+ pub stalled_on: Vec<TyOrConstInferVar<'tcx>>,
+}
+
+// `PendingPredicateObligation` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(PendingPredicateObligation<'_>, 72);
+
+impl<'a, 'tcx> FulfillmentContext<'tcx> {
+ /// Creates a new fulfillment context.
+ pub fn new() -> FulfillmentContext<'tcx> {
+ FulfillmentContext {
+ predicates: ObligationForest::new(),
+ relationships: FxHashMap::default(),
+ usable_in_snapshot: false,
+ }
+ }
+
+ pub fn new_in_snapshot() -> FulfillmentContext<'tcx> {
+ FulfillmentContext {
+ predicates: ObligationForest::new(),
+ relationships: FxHashMap::default(),
+ usable_in_snapshot: true,
+ }
+ }
+
+ /// Attempts to select obligations using `selcx`.
+ fn select(&mut self, selcx: &mut SelectionContext<'a, 'tcx>) -> Vec<FulfillmentError<'tcx>> {
+ let span = debug_span!("select", obligation_forest_size = ?self.predicates.len());
+ let _enter = span.enter();
+
+ // Process pending obligations.
+ let outcome: Outcome<_, _> =
+ self.predicates.process_obligations(&mut FulfillProcessor { selcx });
+
+ // FIXME: if we kept the original cache key, we could mark projection
+ // obligations as complete for the projection cache here.
+
+ let errors: Vec<FulfillmentError<'tcx>> =
+ outcome.errors.into_iter().map(to_fulfillment_error).collect();
+
+ debug!(
+ "select({} predicates remaining, {} errors) done",
+ self.predicates.len(),
+ errors.len()
+ );
+
+ errors
+ }
+}
+
+impl<'tcx> TraitEngine<'tcx> for FulfillmentContext<'tcx> {
+ /// "Normalize" a projection type `<SomeType as SomeTrait>::X` by
+ /// creating a fresh type variable `$0` as well as a projection
+ /// predicate `<SomeType as SomeTrait>::X == $0`. When the
+ /// inference engine runs, it will attempt to find an impl of
+ /// `SomeTrait` or a where-clause that lets us unify `$0` with
+ /// something concrete. If this fails, we'll unify `$0` with
+ /// `projection_ty` again.
+ #[tracing::instrument(level = "debug", skip(self, infcx, param_env, cause))]
+ fn normalize_projection_type(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>,
+ ) -> Ty<'tcx> {
+ debug_assert!(!projection_ty.has_escaping_bound_vars());
+
+ // FIXME(#20304) -- cache
+
+ let mut selcx = SelectionContext::new(infcx);
+ let mut obligations = vec![];
+ let normalized_ty = project::normalize_projection_type(
+ &mut selcx,
+ param_env,
+ projection_ty,
+ cause,
+ 0,
+ &mut obligations,
+ );
+ self.register_predicate_obligations(infcx, obligations);
+
+ debug!(?normalized_ty);
+
+ normalized_ty.ty().unwrap()
+ }
+
+ fn register_predicate_obligation(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ obligation: PredicateObligation<'tcx>,
+ ) {
+ // this helps to reduce duplicate errors, as well as making
+ // debug output much nicer to read and so on.
+ let obligation = infcx.resolve_vars_if_possible(obligation);
+
+ debug!(?obligation, "register_predicate_obligation");
+
+ assert!(!infcx.is_in_snapshot() || self.usable_in_snapshot);
+
+ super::relationships::update(self, infcx, &obligation);
+
+ self.predicates
+ .register_obligation(PendingPredicateObligation { obligation, stalled_on: vec![] });
+ }
+
+ fn select_all_or_error(&mut self, infcx: &InferCtxt<'_, 'tcx>) -> Vec<FulfillmentError<'tcx>> {
+ {
+ let errors = self.select_where_possible(infcx);
+ if !errors.is_empty() {
+ return errors;
+ }
+ }
+
+ self.predicates.to_errors(CodeAmbiguity).into_iter().map(to_fulfillment_error).collect()
+ }
+
+ fn select_where_possible(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ ) -> Vec<FulfillmentError<'tcx>> {
+ let mut selcx = SelectionContext::new(infcx);
+ self.select(&mut selcx)
+ }
+
+ fn pending_obligations(&self) -> Vec<PredicateObligation<'tcx>> {
+ self.predicates.map_pending_obligations(|o| o.obligation.clone())
+ }
+
+ fn relationships(&mut self) -> &mut FxHashMap<ty::TyVid, ty::FoundRelationships> {
+ &mut self.relationships
+ }
+}
+
+struct FulfillProcessor<'a, 'b, 'tcx> {
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+}
+
+fn mk_pending(os: Vec<PredicateObligation<'_>>) -> Vec<PendingPredicateObligation<'_>> {
+ os.into_iter()
+ .map(|o| PendingPredicateObligation { obligation: o, stalled_on: vec![] })
+ .collect()
+}
+
+impl<'a, 'b, 'tcx> ObligationProcessor for FulfillProcessor<'a, 'b, 'tcx> {
+ type Obligation = PendingPredicateObligation<'tcx>;
+ type Error = FulfillmentErrorCode<'tcx>;
+
+ /// Identifies whether a predicate obligation needs processing.
+ ///
+ /// This is always inlined, despite its size, because it has a single
+ /// callsite and it is called *very* frequently.
+ #[inline(always)]
+ fn needs_process_obligation(&self, pending_obligation: &Self::Obligation) -> bool {
+ // If we were stalled on some unresolved variables, first check whether
+ // any of them have been resolved; if not, don't bother doing more work
+ // yet.
+ match pending_obligation.stalled_on.len() {
+ // Match arms are in order of frequency, which matters because this
+ // code is so hot. 1 and 0 dominate; 2+ is fairly rare.
+ 1 => {
+ let infer_var = pending_obligation.stalled_on[0];
+ self.selcx.infcx().ty_or_const_infer_var_changed(infer_var)
+ }
+ 0 => {
+ // In this case we haven't changed, but wish to make a change.
+ true
+ }
+ _ => {
+ // This `for` loop was once a call to `all()`, but this lower-level
+ // form was a perf win. See #64545 for details.
+ (|| {
+ for &infer_var in &pending_obligation.stalled_on {
+ if self.selcx.infcx().ty_or_const_infer_var_changed(infer_var) {
+ return true;
+ }
+ }
+ false
+ })()
+ }
+ }
+ }
+
+ /// Processes a predicate obligation and returns either:
+ /// - `Changed(v)` if the predicate is true, presuming that `v` are also true
+ /// - `Unchanged` if we don't have enough info to be sure
+ /// - `Error(e)` if the predicate does not hold
+ ///
+ /// This is called much less often than `needs_process_obligation`, so we
+ /// never inline it.
+ #[inline(never)]
+ #[instrument(level = "debug", skip(self, pending_obligation))]
+ fn process_obligation(
+ &mut self,
+ pending_obligation: &mut PendingPredicateObligation<'tcx>,
+ ) -> ProcessResult<PendingPredicateObligation<'tcx>, FulfillmentErrorCode<'tcx>> {
+ pending_obligation.stalled_on.truncate(0);
+
+ let obligation = &mut pending_obligation.obligation;
+
+ debug!(?obligation, "pre-resolve");
+
+ if obligation.predicate.has_infer_types_or_consts() {
+ obligation.predicate =
+ self.selcx.infcx().resolve_vars_if_possible(obligation.predicate);
+ }
+
+ let obligation = &pending_obligation.obligation;
+
+ let infcx = self.selcx.infcx();
+
+ if obligation.predicate.has_projections() {
+ let mut obligations = Vec::new();
+ let predicate = crate::traits::project::try_normalize_with_depth_to(
+ self.selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ obligation.predicate,
+ &mut obligations,
+ );
+ if predicate != obligation.predicate {
+ obligations.push(obligation.with(predicate));
+ return ProcessResult::Changed(mk_pending(obligations));
+ }
+ }
+ let binder = obligation.predicate.kind();
+ match binder.no_bound_vars() {
+ None => match binder.skip_binder() {
+ // Evaluation will discard candidates using the leak check.
+ // This means we need to pass it the bound version of our
+ // predicate.
+ ty::PredicateKind::Trait(trait_ref) => {
+ let trait_obligation = obligation.with(binder.rebind(trait_ref));
+
+ self.process_trait_obligation(
+ obligation,
+ trait_obligation,
+ &mut pending_obligation.stalled_on,
+ )
+ }
+ ty::PredicateKind::Projection(data) => {
+ let project_obligation = obligation.with(binder.rebind(data));
+
+ self.process_projection_obligation(
+ obligation,
+ project_obligation,
+ &mut pending_obligation.stalled_on,
+ )
+ }
+ ty::PredicateKind::RegionOutlives(_)
+ | ty::PredicateKind::TypeOutlives(_)
+ | ty::PredicateKind::WellFormed(_)
+ | ty::PredicateKind::ObjectSafe(_)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Subtype(_)
+ | ty::PredicateKind::Coerce(_)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..) => {
+ let pred =
+ ty::Binder::dummy(infcx.replace_bound_vars_with_placeholders(binder));
+ ProcessResult::Changed(mk_pending(vec![
+ obligation.with(pred.to_predicate(self.selcx.tcx())),
+ ]))
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(..) => {
+ bug!("TypeWellFormedFromEnv is only used for Chalk")
+ }
+ },
+ Some(pred) => match pred {
+ ty::PredicateKind::Trait(data) => {
+ let trait_obligation = obligation.with(Binder::dummy(data));
+
+ self.process_trait_obligation(
+ obligation,
+ trait_obligation,
+ &mut pending_obligation.stalled_on,
+ )
+ }
+
+ ty::PredicateKind::RegionOutlives(data) => {
+ if infcx.considering_regions || data.has_placeholders() {
+ infcx.region_outlives_predicate(&obligation.cause, Binder::dummy(data));
+ }
+
+ ProcessResult::Changed(vec![])
+ }
+
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(t_a, r_b)) => {
+ if infcx.considering_regions {
+ infcx.register_region_obligation_with_cause(t_a, r_b, &obligation.cause);
+ }
+ ProcessResult::Changed(vec![])
+ }
+
+ ty::PredicateKind::Projection(ref data) => {
+ let project_obligation = obligation.with(Binder::dummy(*data));
+
+ self.process_projection_obligation(
+ obligation,
+ project_obligation,
+ &mut pending_obligation.stalled_on,
+ )
+ }
+
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ if !self.selcx.tcx().is_object_safe(trait_def_id) {
+ ProcessResult::Error(CodeSelectionError(Unimplemented))
+ } else {
+ ProcessResult::Changed(vec![])
+ }
+ }
+
+ ty::PredicateKind::ClosureKind(_, closure_substs, kind) => {
+ match self.selcx.infcx().closure_kind(closure_substs) {
+ Some(closure_kind) => {
+ if closure_kind.extends(kind) {
+ ProcessResult::Changed(vec![])
+ } else {
+ ProcessResult::Error(CodeSelectionError(Unimplemented))
+ }
+ }
+ None => ProcessResult::Unchanged,
+ }
+ }
+
+ ty::PredicateKind::WellFormed(arg) => {
+ match wf::obligations(
+ self.selcx.infcx(),
+ obligation.param_env,
+ obligation.cause.body_id,
+ obligation.recursion_depth + 1,
+ arg,
+ obligation.cause.span,
+ ) {
+ None => {
+ pending_obligation.stalled_on =
+ vec![TyOrConstInferVar::maybe_from_generic_arg(arg).unwrap()];
+ ProcessResult::Unchanged
+ }
+ Some(os) => ProcessResult::Changed(mk_pending(os)),
+ }
+ }
+
+ ty::PredicateKind::Subtype(subtype) => {
+ match self.selcx.infcx().subtype_predicate(
+ &obligation.cause,
+ obligation.param_env,
+ Binder::dummy(subtype),
+ ) {
+ None => {
+ // None means that both are unresolved.
+ pending_obligation.stalled_on = vec![
+ TyOrConstInferVar::maybe_from_ty(subtype.a).unwrap(),
+ TyOrConstInferVar::maybe_from_ty(subtype.b).unwrap(),
+ ];
+ ProcessResult::Unchanged
+ }
+ Some(Ok(ok)) => ProcessResult::Changed(mk_pending(ok.obligations)),
+ Some(Err(err)) => {
+ let expected_found =
+ ExpectedFound::new(subtype.a_is_expected, subtype.a, subtype.b);
+ ProcessResult::Error(FulfillmentErrorCode::CodeSubtypeError(
+ expected_found,
+ err,
+ ))
+ }
+ }
+ }
+
+ ty::PredicateKind::Coerce(coerce) => {
+ match self.selcx.infcx().coerce_predicate(
+ &obligation.cause,
+ obligation.param_env,
+ Binder::dummy(coerce),
+ ) {
+ None => {
+ // None means that both are unresolved.
+ pending_obligation.stalled_on = vec![
+ TyOrConstInferVar::maybe_from_ty(coerce.a).unwrap(),
+ TyOrConstInferVar::maybe_from_ty(coerce.b).unwrap(),
+ ];
+ ProcessResult::Unchanged
+ }
+ Some(Ok(ok)) => ProcessResult::Changed(mk_pending(ok.obligations)),
+ Some(Err(err)) => {
+ let expected_found = ExpectedFound::new(false, coerce.a, coerce.b);
+ ProcessResult::Error(FulfillmentErrorCode::CodeSubtypeError(
+ expected_found,
+ err,
+ ))
+ }
+ }
+ }
+
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ match const_evaluatable::is_const_evaluatable(
+ self.selcx.infcx(),
+ uv,
+ obligation.param_env,
+ obligation.cause.span,
+ ) {
+ Ok(()) => ProcessResult::Changed(vec![]),
+ Err(NotConstEvaluatable::MentionsInfer) => {
+ pending_obligation.stalled_on.clear();
+ pending_obligation.stalled_on.extend(
+ uv.substs
+ .iter()
+ .filter_map(TyOrConstInferVar::maybe_from_generic_arg),
+ );
+ ProcessResult::Unchanged
+ }
+ Err(
+ e @ NotConstEvaluatable::MentionsParam
+ | e @ NotConstEvaluatable::Error(_),
+ ) => ProcessResult::Error(CodeSelectionError(
+ SelectionError::NotConstEvaluatable(e),
+ )),
+ }
+ }
+
+ ty::PredicateKind::ConstEquate(c1, c2) => {
+ debug!(?c1, ?c2, "equating consts");
+ let tcx = self.selcx.tcx();
+ if tcx.features().generic_const_exprs {
+ // FIXME: we probably should only try to unify abstract constants
+ // if the constants depend on generic parameters.
+ //
+ // Let's just see where this breaks :shrug:
+ if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
+ (c1.kind(), c2.kind())
+ {
+ if infcx.try_unify_abstract_consts(
+ a.shrink(),
+ b.shrink(),
+ obligation.param_env,
+ ) {
+ return ProcessResult::Changed(vec![]);
+ }
+ }
+ }
+
+ let stalled_on = &mut pending_obligation.stalled_on;
+
+ let mut evaluate = |c: Const<'tcx>| {
+ if let ty::ConstKind::Unevaluated(unevaluated) = c.kind() {
+ match self.selcx.infcx().try_const_eval_resolve(
+ obligation.param_env,
+ unevaluated,
+ c.ty(),
+ Some(obligation.cause.span),
+ ) {
+ Ok(val) => Ok(val),
+ Err(e) => match e {
+ ErrorHandled::TooGeneric => {
+ stalled_on.extend(
+ unevaluated.substs.iter().filter_map(
+ TyOrConstInferVar::maybe_from_generic_arg,
+ ),
+ );
+ Err(ErrorHandled::TooGeneric)
+ }
+ _ => Err(e),
+ },
+ }
+ } else {
+ Ok(c)
+ }
+ };
+
+ match (evaluate(c1), evaluate(c2)) {
+ (Ok(c1), Ok(c2)) => {
+ match self
+ .selcx
+ .infcx()
+ .at(&obligation.cause, obligation.param_env)
+ .eq(c1, c2)
+ {
+ Ok(_) => ProcessResult::Changed(vec![]),
+ Err(err) => ProcessResult::Error(
+ FulfillmentErrorCode::CodeConstEquateError(
+ ExpectedFound::new(true, c1, c2),
+ err,
+ ),
+ ),
+ }
+ }
+ (Err(ErrorHandled::Reported(reported)), _)
+ | (_, Err(ErrorHandled::Reported(reported))) => ProcessResult::Error(
+ CodeSelectionError(SelectionError::NotConstEvaluatable(
+ NotConstEvaluatable::Error(reported),
+ )),
+ ),
+ (Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => {
+ span_bug!(
+ obligation.cause.span(),
+ "ConstEquate: const_eval_resolve returned an unexpected error"
+ )
+ }
+ (Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
+ if c1.has_infer_types_or_consts() || c2.has_infer_types_or_consts() {
+ ProcessResult::Unchanged
+ } else {
+ // Two different constants using generic parameters ~> error.
+ let expected_found = ExpectedFound::new(true, c1, c2);
+ ProcessResult::Error(FulfillmentErrorCode::CodeConstEquateError(
+ expected_found,
+ TypeError::ConstMismatch(expected_found),
+ ))
+ }
+ }
+ }
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(..) => {
+ bug!("TypeWellFormedFromEnv is only used for Chalk")
+ }
+ },
+ }
+ }
+
+ fn process_backedge<'c, I>(
+ &mut self,
+ cycle: I,
+ _marker: PhantomData<&'c PendingPredicateObligation<'tcx>>,
+ ) where
+ I: Clone + Iterator<Item = &'c PendingPredicateObligation<'tcx>>,
+ {
+ if self.selcx.coinductive_match(cycle.clone().map(|s| s.obligation.predicate)) {
+ debug!("process_child_obligations: coinductive match");
+ } else {
+ let cycle: Vec<_> = cycle.map(|c| c.obligation.clone()).collect();
+ self.selcx.infcx().report_overflow_error_cycle(&cycle);
+ }
+ }
+}
+
+impl<'a, 'b, 'tcx> FulfillProcessor<'a, 'b, 'tcx> {
+ #[instrument(level = "debug", skip(self, obligation, stalled_on))]
+ fn process_trait_obligation(
+ &mut self,
+ obligation: &PredicateObligation<'tcx>,
+ trait_obligation: TraitObligation<'tcx>,
+ stalled_on: &mut Vec<TyOrConstInferVar<'tcx>>,
+ ) -> ProcessResult<PendingPredicateObligation<'tcx>, FulfillmentErrorCode<'tcx>> {
+ let infcx = self.selcx.infcx();
+ if obligation.predicate.is_global() {
+ // no type variables present, can use evaluation for better caching.
+ // FIXME: consider caching errors too.
+ if infcx.predicate_must_hold_considering_regions(obligation) {
+ debug!(
+ "selecting trait at depth {} evaluated to holds",
+ obligation.recursion_depth
+ );
+ return ProcessResult::Changed(vec![]);
+ }
+ }
+
+ match self.selcx.select(&trait_obligation) {
+ Ok(Some(impl_source)) => {
+ debug!("selecting trait at depth {} yielded Ok(Some)", obligation.recursion_depth);
+ ProcessResult::Changed(mk_pending(impl_source.nested_obligations()))
+ }
+ Ok(None) => {
+ debug!("selecting trait at depth {} yielded Ok(None)", obligation.recursion_depth);
+
+ // This is a bit subtle: for the most part, the
+ // only reason we can fail to make progress on
+ // trait selection is because we don't have enough
+ // information about the types in the trait.
+ stalled_on.clear();
+ stalled_on.extend(substs_infer_vars(
+ self.selcx,
+ trait_obligation.predicate.map_bound(|pred| pred.trait_ref.substs),
+ ));
+
+ debug!(
+ "process_predicate: pending obligation {:?} now stalled on {:?}",
+ infcx.resolve_vars_if_possible(obligation.clone()),
+ stalled_on
+ );
+
+ ProcessResult::Unchanged
+ }
+ Err(selection_err) => {
+ debug!("selecting trait at depth {} yielded Err", obligation.recursion_depth);
+
+ ProcessResult::Error(CodeSelectionError(selection_err))
+ }
+ }
+ }
+
+ fn process_projection_obligation(
+ &mut self,
+ obligation: &PredicateObligation<'tcx>,
+ project_obligation: PolyProjectionObligation<'tcx>,
+ stalled_on: &mut Vec<TyOrConstInferVar<'tcx>>,
+ ) -> ProcessResult<PendingPredicateObligation<'tcx>, FulfillmentErrorCode<'tcx>> {
+ let tcx = self.selcx.tcx();
+
+ if obligation.predicate.is_global() {
+ // no type variables present, can use evaluation for better caching.
+ // FIXME: consider caching errors too.
+ if self.selcx.infcx().predicate_must_hold_considering_regions(obligation) {
+ if let Some(key) = ProjectionCacheKey::from_poly_projection_predicate(
+ &mut self.selcx,
+ project_obligation.predicate,
+ ) {
+ // If `predicate_must_hold_considering_regions` succeeds, then we've
+ // evaluated all sub-obligations. We can therefore mark the 'root'
+ // obligation as complete, and skip evaluating sub-obligations.
+ self.selcx
+ .infcx()
+ .inner
+ .borrow_mut()
+ .projection_cache()
+ .complete(key, EvaluationResult::EvaluatedToOk);
+ }
+ return ProcessResult::Changed(vec![]);
+ } else {
+ debug!("Does NOT hold: {:?}", obligation);
+ }
+ }
+
+ match project::poly_project_and_unify_type(self.selcx, &project_obligation) {
+ ProjectAndUnifyResult::Holds(os) => ProcessResult::Changed(mk_pending(os)),
+ ProjectAndUnifyResult::FailedNormalization => {
+ stalled_on.clear();
+ stalled_on.extend(substs_infer_vars(
+ self.selcx,
+ project_obligation.predicate.map_bound(|pred| pred.projection_ty.substs),
+ ));
+ ProcessResult::Unchanged
+ }
+ // Let the caller handle the recursion
+ ProjectAndUnifyResult::Recursive => ProcessResult::Changed(mk_pending(vec![
+ project_obligation.with(project_obligation.predicate.to_predicate(tcx)),
+ ])),
+ ProjectAndUnifyResult::MismatchedProjectionTypes(e) => {
+ ProcessResult::Error(CodeProjectionError(e))
+ }
+ }
+ }
+}
+
+/// Returns the set of inference variables contained in `substs`.
+fn substs_infer_vars<'a, 'tcx>(
+ selcx: &mut SelectionContext<'a, 'tcx>,
+ substs: ty::Binder<'tcx, SubstsRef<'tcx>>,
+) -> impl Iterator<Item = TyOrConstInferVar<'tcx>> {
+ selcx
+ .infcx()
+ .resolve_vars_if_possible(substs)
+ .skip_binder() // ok because this check doesn't care about regions
+ .iter()
+ .filter(|arg| arg.has_infer_types_or_consts())
+ .flat_map(|arg| {
+ let mut walker = arg.walk();
+ while let Some(c) = walker.next() {
+ if !c.has_infer_types_or_consts() {
+ walker.visited.remove(&c);
+ walker.skip_current_subtree();
+ }
+ }
+ walker.visited.into_iter()
+ })
+ .filter_map(TyOrConstInferVar::maybe_from_generic_arg)
+}
+
+fn to_fulfillment_error<'tcx>(
+ error: Error<PendingPredicateObligation<'tcx>, FulfillmentErrorCode<'tcx>>,
+) -> FulfillmentError<'tcx> {
+ let mut iter = error.backtrace.into_iter();
+ let obligation = iter.next().unwrap().obligation;
+ // The root obligation is the last item in the backtrace - if there's only
+ // one item, then it's the same as the main obligation
+ let root_obligation = iter.next_back().map_or_else(|| obligation.clone(), |e| e.obligation);
+ FulfillmentError::new(obligation, error.error, root_obligation)
+}
diff --git a/compiler/rustc_trait_selection/src/traits/misc.rs b/compiler/rustc_trait_selection/src/traits/misc.rs
new file mode 100644
index 000000000..dd2769c71
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/misc.rs
@@ -0,0 +1,88 @@
+//! Miscellaneous type-system utilities that are too small to deserve their own modules.
+
+use crate::infer::InferCtxtExt as _;
+use crate::traits::{self, ObligationCause};
+
+use rustc_hir as hir;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
+
+use crate::traits::error_reporting::InferCtxtExt;
+
+#[derive(Clone)]
+pub enum CopyImplementationError<'tcx> {
+ InfrigingFields(Vec<(&'tcx ty::FieldDef, Ty<'tcx>)>),
+ NotAnAdt,
+ HasDestructor,
+}
+
+pub fn can_type_implement_copy<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ self_type: Ty<'tcx>,
+ parent_cause: ObligationCause<'tcx>,
+) -> Result<(), CopyImplementationError<'tcx>> {
+ // FIXME: (@jroesch) float this code up
+ tcx.infer_ctxt().enter(|infcx| {
+ let (adt, substs) = match self_type.kind() {
+ // These types used to have a builtin impl.
+ // Now libcore provides that impl.
+ ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::Char
+ | ty::RawPtr(..)
+ | ty::Never
+ | ty::Ref(_, _, hir::Mutability::Not)
+ | ty::Array(..) => return Ok(()),
+
+ ty::Adt(adt, substs) => (adt, substs),
+
+ _ => return Err(CopyImplementationError::NotAnAdt),
+ };
+
+ let mut infringing = Vec::new();
+ for variant in adt.variants() {
+ for field in &variant.fields {
+ let ty = field.ty(tcx, substs);
+ if ty.references_error() {
+ continue;
+ }
+ let span = tcx.def_span(field.did);
+ // FIXME(compiler-errors): This gives us better spans for bad
+ // projection types like in issue-50480.
+ // If the ADT has substs, point to the cause we are given.
+ // If it does not, then this field probably doesn't normalize
+ // to begin with, and point to the bad field's span instead.
+ let cause = if field
+ .ty(tcx, traits::InternalSubsts::identity_for_item(tcx, adt.did()))
+ .has_param_types_or_consts()
+ {
+ parent_cause.clone()
+ } else {
+ ObligationCause::dummy_with_span(span)
+ };
+ let ctx = traits::FulfillmentContext::new();
+ match traits::fully_normalize(&infcx, ctx, cause, param_env, ty) {
+ Ok(ty) => {
+ if !infcx.type_is_copy_modulo_regions(param_env, ty, span) {
+ infringing.push((field, ty));
+ }
+ }
+ Err(errors) => {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ }
+ };
+ }
+ }
+ if !infringing.is_empty() {
+ return Err(CopyImplementationError::InfrigingFields(infringing));
+ }
+ if adt.has_dtor(tcx) {
+ return Err(CopyImplementationError::HasDestructor);
+ }
+
+ Ok(())
+ })
+}
diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs
new file mode 100644
index 000000000..9c6bb0731
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/mod.rs
@@ -0,0 +1,863 @@
+//! Trait Resolution. See the [rustc dev guide] for more information on how this works.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html
+
+pub mod auto_trait;
+mod chalk_fulfill;
+pub mod codegen;
+mod coherence;
+pub mod const_evaluatable;
+mod engine;
+pub mod error_reporting;
+mod fulfill;
+pub mod misc;
+mod object_safety;
+mod on_unimplemented;
+mod project;
+pub mod query;
+pub(crate) mod relationships;
+mod select;
+mod specialize;
+mod structural_match;
+mod util;
+pub mod wf;
+
+use crate::infer::outlives::env::OutlivesEnvironment;
+use crate::infer::{InferCtxt, TyCtxtInferExt};
+use crate::traits::error_reporting::InferCtxtExt as _;
+use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, GenericParamDefKind, ToPredicate, Ty, TyCtxt, VtblEntry};
+use rustc_span::{sym, Span};
+use smallvec::SmallVec;
+
+use std::fmt::Debug;
+use std::ops::ControlFlow;
+
+pub use self::FulfillmentErrorCode::*;
+pub use self::ImplSource::*;
+pub use self::ObligationCauseCode::*;
+pub use self::SelectionError::*;
+
+pub use self::coherence::{add_placeholder_note, orphan_check, overlapping_impls};
+pub use self::coherence::{OrphanCheckErr, OverlapResult};
+pub use self::engine::{ObligationCtxt, TraitEngineExt};
+pub use self::fulfill::{FulfillmentContext, PendingPredicateObligation};
+pub use self::object_safety::astconv_object_safety_violations;
+pub use self::object_safety::is_vtable_safe_method;
+pub use self::object_safety::MethodViolationCode;
+pub use self::object_safety::ObjectSafetyViolation;
+pub use self::on_unimplemented::{OnUnimplementedDirective, OnUnimplementedNote};
+pub use self::project::{normalize, normalize_projection_type, normalize_to};
+pub use self::select::{EvaluationCache, SelectionCache, SelectionContext};
+pub use self::select::{EvaluationResult, IntercrateAmbiguityCause, OverflowError};
+pub use self::specialize::specialization_graph::FutureCompatOverlapError;
+pub use self::specialize::specialization_graph::FutureCompatOverlapErrorKind;
+pub use self::specialize::{specialization_graph, translate_substs, OverlapError};
+pub use self::structural_match::{
+ search_for_adt_const_param_violation, search_for_structural_match_violation,
+};
+pub use self::util::{
+ elaborate_obligations, elaborate_predicates, elaborate_predicates_with_span,
+ elaborate_trait_ref, elaborate_trait_refs,
+};
+pub use self::util::{expand_trait_aliases, TraitAliasExpander};
+pub use self::util::{
+ get_vtable_index_of_object_method, impl_item_is_final, predicate_for_trait_def, upcast_choices,
+};
+pub use self::util::{
+ supertrait_def_ids, supertraits, transitive_bounds, transitive_bounds_that_define_assoc_type,
+ SupertraitDefIds, Supertraits,
+};
+
+pub use self::chalk_fulfill::FulfillmentContext as ChalkFulfillmentContext;
+
+pub use rustc_infer::traits::*;
+
+/// Whether to skip the leak check, as part of a future compatibility warning step.
+///
+/// The "default" for skip-leak-check corresponds to the current
+/// behavior (do not skip the leak check) -- not the behavior we are
+/// transitioning into.
+#[derive(Copy, Clone, PartialEq, Eq, Debug, Default)]
+pub enum SkipLeakCheck {
+ Yes,
+ #[default]
+ No,
+}
+
+impl SkipLeakCheck {
+ fn is_yes(self) -> bool {
+ self == SkipLeakCheck::Yes
+ }
+}
+
+/// The mode that trait queries run in.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum TraitQueryMode {
+ /// Standard/un-canonicalized queries get accurate
+ /// spans etc. passed in and hence can do reasonable
+ /// error reporting on their own.
+ Standard,
+ /// Canonicalized queries get dummy spans and hence
+ /// must generally propagate errors to
+ /// pre-canonicalization callsites.
+ Canonical,
+}
+
+/// Creates predicate obligations from the generic bounds.
+pub fn predicates_for_generics<'tcx>(
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ generic_bounds: ty::InstantiatedPredicates<'tcx>,
+) -> impl Iterator<Item = PredicateObligation<'tcx>> {
+ util::predicates_for_generics(cause, 0, param_env, generic_bounds)
+}
+
+/// Determines whether the type `ty` is known to meet `bound` and
+/// returns true if so. Returns false if `ty` either does not meet
+/// `bound` or is not known to meet bound (note that this is
+/// conservative towards *no impl*, which is the opposite of the
+/// `evaluate` methods).
+pub fn type_known_to_meet_bound_modulo_regions<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ def_id: DefId,
+ span: Span,
+) -> bool {
+ debug!(
+ "type_known_to_meet_bound_modulo_regions(ty={:?}, bound={:?})",
+ ty,
+ infcx.tcx.def_path_str(def_id)
+ );
+
+ let trait_ref =
+ ty::Binder::dummy(ty::TraitRef { def_id, substs: infcx.tcx.mk_substs_trait(ty, &[]) });
+ let obligation = Obligation {
+ param_env,
+ cause: ObligationCause::misc(span, hir::CRATE_HIR_ID),
+ recursion_depth: 0,
+ predicate: trait_ref.without_const().to_predicate(infcx.tcx),
+ };
+
+ let result = infcx.predicate_must_hold_modulo_regions(&obligation);
+ debug!(
+ "type_known_to_meet_ty={:?} bound={} => {:?}",
+ ty,
+ infcx.tcx.def_path_str(def_id),
+ result
+ );
+
+ if result && ty.has_infer_types_or_consts() {
+ // Because of inference "guessing", selection can sometimes claim
+ // to succeed while the success requires a guess. To ensure
+ // this function's result remains infallible, we must confirm
+ // that guess. While imperfect, I believe this is sound.
+
+ // The handling of regions in this area of the code is terrible,
+ // see issue #29149. We should be able to improve on this with
+ // NLL.
+ let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+
+ // We can use a dummy node-id here because we won't pay any mind
+ // to region obligations that arise (there shouldn't really be any
+ // anyhow).
+ let cause = ObligationCause::misc(span, hir::CRATE_HIR_ID);
+
+ fulfill_cx.register_bound(infcx, param_env, ty, def_id, cause);
+
+ // Note: we only assume something is `Copy` if we can
+ // *definitively* show that it implements `Copy`. Otherwise,
+ // assume it is move; linear is always ok.
+ match fulfill_cx.select_all_or_error(infcx).as_slice() {
+ [] => {
+ debug!(
+ "type_known_to_meet_bound_modulo_regions: ty={:?} bound={} success",
+ ty,
+ infcx.tcx.def_path_str(def_id)
+ );
+ true
+ }
+ errors => {
+ debug!(
+ ?ty,
+ bound = %infcx.tcx.def_path_str(def_id),
+ ?errors,
+ "type_known_to_meet_bound_modulo_regions"
+ );
+ false
+ }
+ }
+ } else {
+ result
+ }
+}
+
+#[instrument(level = "debug", skip(tcx, elaborated_env))]
+fn do_normalize_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cause: ObligationCause<'tcx>,
+ elaborated_env: ty::ParamEnv<'tcx>,
+ predicates: Vec<ty::Predicate<'tcx>>,
+) -> Result<Vec<ty::Predicate<'tcx>>, ErrorGuaranteed> {
+ let span = cause.span;
+ // FIXME. We should really... do something with these region
+ // obligations. But this call just continues the older
+ // behavior (i.e., doesn't cause any new bugs), and it would
+ // take some further refactoring to actually solve them. In
+ // particular, we would have to handle implied bounds
+ // properly, and that code is currently largely confined to
+ // regionck (though I made some efforts to extract it
+ // out). -nmatsakis
+ //
+ // @arielby: In any case, these obligations are checked
+ // by wfcheck anyway, so I'm not sure we have to check
+ // them here too, and we will remove this function when
+ // we move over to lazy normalization *anyway*.
+ tcx.infer_ctxt().ignoring_regions().enter(|infcx| {
+ let fulfill_cx = FulfillmentContext::new();
+ let predicates =
+ match fully_normalize(&infcx, fulfill_cx, cause, elaborated_env, predicates) {
+ Ok(predicates) => predicates,
+ Err(errors) => {
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+ };
+
+ debug!("do_normalize_predictes: normalized predicates = {:?}", predicates);
+
+ // We can use the `elaborated_env` here; the region code only
+ // cares about declarations like `'a: 'b`.
+ let outlives_env = OutlivesEnvironment::new(elaborated_env);
+
+ // FIXME: It's very weird that we ignore region obligations but apparently
+ // still need to use `resolve_regions` as we need the resolved regions in
+ // the normalized predicates.
+ let errors = infcx.resolve_regions(&outlives_env);
+ if !errors.is_empty() {
+ tcx.sess.delay_span_bug(
+ span,
+ format!(
+ "failed region resolution while normalizing {elaborated_env:?}: {errors:?}"
+ ),
+ );
+ }
+
+ match infcx.fully_resolve(predicates) {
+ Ok(predicates) => Ok(predicates),
+ Err(fixup_err) => {
+ // If we encounter a fixup error, it means that some type
+ // variable wound up unconstrained. I actually don't know
+ // if this can happen, and I certainly don't expect it to
+ // happen often, but if it did happen it probably
+ // represents a legitimate failure due to some kind of
+ // unconstrained variable.
+ //
+ // @lcnr: Let's still ICE here for now. I want a test case
+ // for that.
+ span_bug!(
+ span,
+ "inference variables in normalized parameter environment: {}",
+ fixup_err
+ );
+ }
+ }
+ })
+}
+
+// FIXME: this is gonna need to be removed ...
+/// Normalizes the parameter environment, reporting errors if they occur.
+#[instrument(level = "debug", skip(tcx))]
+pub fn normalize_param_env_or_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ unnormalized_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+) -> ty::ParamEnv<'tcx> {
+ // I'm not wild about reporting errors here; I'd prefer to
+ // have the errors get reported at a defined place (e.g.,
+ // during typeck). Instead I have all parameter
+ // environments, in effect, going through this function
+ // and hence potentially reporting errors. This ensures of
+ // course that we never forget to normalize (the
+ // alternative seemed like it would involve a lot of
+ // manual invocations of this fn -- and then we'd have to
+ // deal with the errors at each of those sites).
+ //
+ // In any case, in practice, typeck constructs all the
+ // parameter environments once for every fn as it goes,
+ // and errors will get reported then; so outside of type inference we
+ // can be sure that no errors should occur.
+ let mut predicates: Vec<_> =
+ util::elaborate_predicates(tcx, unnormalized_env.caller_bounds().into_iter())
+ .map(|obligation| obligation.predicate)
+ .collect();
+
+ debug!("normalize_param_env_or_error: elaborated-predicates={:?}", predicates);
+
+ let elaborated_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&predicates),
+ unnormalized_env.reveal(),
+ unnormalized_env.constness(),
+ );
+
+ // HACK: we are trying to normalize the param-env inside *itself*. The problem is that
+ // normalization expects its param-env to be already normalized, which means we have
+ // a circularity.
+ //
+ // The way we handle this is by normalizing the param-env inside an unnormalized version
+ // of the param-env, which means that if the param-env contains unnormalized projections,
+ // we'll have some normalization failures. This is unfortunate.
+ //
+ // Lazy normalization would basically handle this by treating just the
+ // normalizing-a-trait-ref-requires-itself cycles as evaluation failures.
+ //
+ // Inferred outlives bounds can create a lot of `TypeOutlives` predicates for associated
+ // types, so to make the situation less bad, we normalize all the predicates *but*
+ // the `TypeOutlives` predicates first inside the unnormalized parameter environment, and
+ // then we normalize the `TypeOutlives` bounds inside the normalized parameter environment.
+ //
+ // This works fairly well because trait matching does not actually care about param-env
+ // TypeOutlives predicates - these are normally used by regionck.
+ let outlives_predicates: Vec<_> = predicates
+ .drain_filter(|predicate| {
+ matches!(predicate.kind().skip_binder(), ty::PredicateKind::TypeOutlives(..))
+ })
+ .collect();
+
+ debug!(
+ "normalize_param_env_or_error: predicates=(non-outlives={:?}, outlives={:?})",
+ predicates, outlives_predicates
+ );
+ let Ok(non_outlives_predicates) = do_normalize_predicates(
+ tcx,
+ cause.clone(),
+ elaborated_env,
+ predicates,
+ ) else {
+ // An unnormalized env is better than nothing.
+ debug!("normalize_param_env_or_error: errored resolving non-outlives predicates");
+ return elaborated_env;
+ };
+
+ debug!("normalize_param_env_or_error: non-outlives predicates={:?}", non_outlives_predicates);
+
+ // Not sure whether it is better to include the unnormalized TypeOutlives predicates
+ // here. I believe they should not matter, because we are ignoring TypeOutlives param-env
+ // predicates here anyway. Keeping them here anyway because it seems safer.
+ let outlives_env: Vec<_> =
+ non_outlives_predicates.iter().chain(&outlives_predicates).cloned().collect();
+ let outlives_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&outlives_env),
+ unnormalized_env.reveal(),
+ unnormalized_env.constness(),
+ );
+ let Ok(outlives_predicates) = do_normalize_predicates(
+ tcx,
+ cause,
+ outlives_env,
+ outlives_predicates,
+ ) else {
+ // An unnormalized env is better than nothing.
+ debug!("normalize_param_env_or_error: errored resolving outlives predicates");
+ return elaborated_env;
+ };
+ debug!("normalize_param_env_or_error: outlives predicates={:?}", outlives_predicates);
+
+ let mut predicates = non_outlives_predicates;
+ predicates.extend(outlives_predicates);
+ debug!("normalize_param_env_or_error: final predicates={:?}", predicates);
+ ty::ParamEnv::new(
+ tcx.intern_predicates(&predicates),
+ unnormalized_env.reveal(),
+ unnormalized_env.constness(),
+ )
+}
+
+pub fn fully_normalize<'a, 'tcx, T>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ mut fulfill_cx: FulfillmentContext<'tcx>,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+) -> Result<T, Vec<FulfillmentError<'tcx>>>
+where
+ T: TypeFoldable<'tcx>,
+{
+ debug!("fully_normalize_with_fulfillcx(value={:?})", value);
+ let selcx = &mut SelectionContext::new(infcx);
+ let Normalized { value: normalized_value, obligations } =
+ project::normalize(selcx, param_env, cause, value);
+ debug!(
+ "fully_normalize: normalized_value={:?} obligations={:?}",
+ normalized_value, obligations
+ );
+ for obligation in obligations {
+ fulfill_cx.register_predicate_obligation(selcx.infcx(), obligation);
+ }
+
+ debug!("fully_normalize: select_all_or_error start");
+ let errors = fulfill_cx.select_all_or_error(infcx);
+ if !errors.is_empty() {
+ return Err(errors);
+ }
+ debug!("fully_normalize: select_all_or_error complete");
+ let resolved_value = infcx.resolve_vars_if_possible(normalized_value);
+ debug!("fully_normalize: resolved_value={:?}", resolved_value);
+ Ok(resolved_value)
+}
+
+/// Normalizes the predicates and checks whether they hold in an empty environment. If this
+/// returns true, then either normalize encountered an error or one of the predicates did not
+/// hold. Used when creating vtables to check for unsatisfiable methods.
+pub fn impossible_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: Vec<ty::Predicate<'tcx>>,
+) -> bool {
+ debug!("impossible_predicates(predicates={:?})", predicates);
+
+ let result = tcx.infer_ctxt().enter(|infcx| {
+ // HACK: Set tainted by errors to gracefully exit in case of overflow.
+ infcx.set_tainted_by_errors();
+
+ let param_env = ty::ParamEnv::reveal_all();
+ let mut selcx = SelectionContext::new(&infcx);
+ let mut fulfill_cx = FulfillmentContext::new();
+ let cause = ObligationCause::dummy();
+ let Normalized { value: predicates, obligations } =
+ normalize(&mut selcx, param_env, cause.clone(), predicates);
+ for obligation in obligations {
+ fulfill_cx.register_predicate_obligation(&infcx, obligation);
+ }
+ for predicate in predicates {
+ let obligation = Obligation::new(cause.clone(), param_env, predicate);
+ fulfill_cx.register_predicate_obligation(&infcx, obligation);
+ }
+
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+
+ // Clean up after ourselves
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+
+ !errors.is_empty()
+ });
+ debug!("impossible_predicates = {:?}", result);
+ result
+}
+
+fn subst_and_check_impossible_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: (DefId, SubstsRef<'tcx>),
+) -> bool {
+ debug!("subst_and_check_impossible_predicates(key={:?})", key);
+
+ let mut predicates = tcx.predicates_of(key.0).instantiate(tcx, key.1).predicates;
+
+ // Specifically check trait fulfillment to avoid an error when trying to resolve
+ // associated items.
+ if let Some(trait_def_id) = tcx.trait_of_item(key.0) {
+ let trait_ref = ty::TraitRef::from_method(tcx, trait_def_id, key.1);
+ predicates.push(ty::Binder::dummy(trait_ref).to_poly_trait_predicate().to_predicate(tcx));
+ }
+
+ predicates.retain(|predicate| !predicate.needs_subst());
+ let result = impossible_predicates(tcx, predicates);
+
+ debug!("subst_and_check_impossible_predicates(key={:?}) = {:?}", key, result);
+ result
+}
+
+#[derive(Clone, Debug)]
+enum VtblSegment<'tcx> {
+ MetadataDSA,
+ TraitOwnEntries { trait_ref: ty::PolyTraitRef<'tcx>, emit_vptr: bool },
+}
+
+/// Prepare the segments for a vtable
+fn prepare_vtable_segments<'tcx, T>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ mut segment_visitor: impl FnMut(VtblSegment<'tcx>) -> ControlFlow<T>,
+) -> Option<T> {
+ // The following constraints holds for the final arrangement.
+ // 1. The whole virtual table of the first direct super trait is included as the
+ // the prefix. If this trait doesn't have any super traits, then this step
+ // consists of the dsa metadata.
+ // 2. Then comes the proper pointer metadata(vptr) and all own methods for all
+ // other super traits except those already included as part of the first
+ // direct super trait virtual table.
+ // 3. finally, the own methods of this trait.
+
+ // This has the advantage that trait upcasting to the first direct super trait on each level
+ // is zero cost, and to another trait includes only replacing the pointer with one level indirection,
+ // while not using too much extra memory.
+
+ // For a single inheritance relationship like this,
+ // D --> C --> B --> A
+ // The resulting vtable will consists of these segments:
+ // DSA, A, B, C, D
+
+ // For a multiple inheritance relationship like this,
+ // D --> C --> A
+ // \-> B
+ // The resulting vtable will consists of these segments:
+ // DSA, A, B, B-vptr, C, D
+
+ // For a diamond inheritance relationship like this,
+ // D --> B --> A
+ // \-> C -/
+ // The resulting vtable will consists of these segments:
+ // DSA, A, B, C, C-vptr, D
+
+ // For a more complex inheritance relationship like this:
+ // O --> G --> C --> A
+ // \ \ \-> B
+ // | |-> F --> D
+ // | \-> E
+ // |-> N --> J --> H
+ // \ \-> I
+ // |-> M --> K
+ // \-> L
+ // The resulting vtable will consists of these segments:
+ // DSA, A, B, B-vptr, C, D, D-vptr, E, E-vptr, F, F-vptr, G,
+ // H, H-vptr, I, I-vptr, J, J-vptr, K, K-vptr, L, L-vptr, M, M-vptr,
+ // N, N-vptr, O
+
+ // emit dsa segment first.
+ if let ControlFlow::Break(v) = (segment_visitor)(VtblSegment::MetadataDSA) {
+ return Some(v);
+ }
+
+ let mut emit_vptr_on_new_entry = false;
+ let mut visited = util::PredicateSet::new(tcx);
+ let predicate = trait_ref.without_const().to_predicate(tcx);
+ let mut stack: SmallVec<[(ty::PolyTraitRef<'tcx>, _, _); 5]> =
+ smallvec![(trait_ref, emit_vptr_on_new_entry, None)];
+ visited.insert(predicate);
+
+ // the main traversal loop:
+ // basically we want to cut the inheritance directed graph into a few non-overlapping slices of nodes
+ // that each node is emitted after all its descendents have been emitted.
+ // so we convert the directed graph into a tree by skipping all previously visited nodes using a visited set.
+ // this is done on the fly.
+ // Each loop run emits a slice - it starts by find a "childless" unvisited node, backtracking upwards, and it
+ // stops after it finds a node that has a next-sibling node.
+ // This next-sibling node will used as the starting point of next slice.
+
+ // Example:
+ // For a diamond inheritance relationship like this,
+ // D#1 --> B#0 --> A#0
+ // \-> C#1 -/
+
+ // Starting point 0 stack [D]
+ // Loop run #0: Stack after diving in is [D B A], A is "childless"
+ // after this point, all newly visited nodes won't have a vtable that equals to a prefix of this one.
+ // Loop run #0: Emitting the slice [B A] (in reverse order), B has a next-sibling node, so this slice stops here.
+ // Loop run #0: Stack after exiting out is [D C], C is the next starting point.
+ // Loop run #1: Stack after diving in is [D C], C is "childless", since its child A is skipped(already emitted).
+ // Loop run #1: Emitting the slice [D C] (in reverse order). No one has a next-sibling node.
+ // Loop run #1: Stack after exiting out is []. Now the function exits.
+
+ loop {
+ // dive deeper into the stack, recording the path
+ 'diving_in: loop {
+ if let Some((inner_most_trait_ref, _, _)) = stack.last() {
+ let inner_most_trait_ref = *inner_most_trait_ref;
+ let mut direct_super_traits_iter = tcx
+ .super_predicates_of(inner_most_trait_ref.def_id())
+ .predicates
+ .into_iter()
+ .filter_map(move |(pred, _)| {
+ pred.subst_supertrait(tcx, &inner_most_trait_ref).to_opt_poly_trait_pred()
+ });
+
+ 'diving_in_skip_visited_traits: loop {
+ if let Some(next_super_trait) = direct_super_traits_iter.next() {
+ if visited.insert(next_super_trait.to_predicate(tcx)) {
+ // We're throwing away potential constness of super traits here.
+ // FIXME: handle ~const super traits
+ let next_super_trait = next_super_trait.map_bound(|t| t.trait_ref);
+ stack.push((
+ next_super_trait,
+ emit_vptr_on_new_entry,
+ Some(direct_super_traits_iter),
+ ));
+ break 'diving_in_skip_visited_traits;
+ } else {
+ continue 'diving_in_skip_visited_traits;
+ }
+ } else {
+ break 'diving_in;
+ }
+ }
+ }
+ }
+
+ // Other than the left-most path, vptr should be emitted for each trait.
+ emit_vptr_on_new_entry = true;
+
+ // emit innermost item, move to next sibling and stop there if possible, otherwise jump to outer level.
+ 'exiting_out: loop {
+ if let Some((inner_most_trait_ref, emit_vptr, siblings_opt)) = stack.last_mut() {
+ if let ControlFlow::Break(v) = (segment_visitor)(VtblSegment::TraitOwnEntries {
+ trait_ref: *inner_most_trait_ref,
+ emit_vptr: *emit_vptr,
+ }) {
+ return Some(v);
+ }
+
+ 'exiting_out_skip_visited_traits: loop {
+ if let Some(siblings) = siblings_opt {
+ if let Some(next_inner_most_trait_ref) = siblings.next() {
+ if visited.insert(next_inner_most_trait_ref.to_predicate(tcx)) {
+ // We're throwing away potential constness of super traits here.
+ // FIXME: handle ~const super traits
+ let next_inner_most_trait_ref =
+ next_inner_most_trait_ref.map_bound(|t| t.trait_ref);
+ *inner_most_trait_ref = next_inner_most_trait_ref;
+ *emit_vptr = emit_vptr_on_new_entry;
+ break 'exiting_out;
+ } else {
+ continue 'exiting_out_skip_visited_traits;
+ }
+ }
+ }
+ stack.pop();
+ continue 'exiting_out;
+ }
+ }
+ // all done
+ return None;
+ }
+ }
+}
+
+fn dump_vtable_entries<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sp: Span,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ entries: &[VtblEntry<'tcx>],
+) {
+ let msg = format!("vtable entries for `{}`: {:#?}", trait_ref, entries);
+ tcx.sess.struct_span_err(sp, &msg).emit();
+}
+
+fn own_existential_vtable_entries<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyExistentialTraitRef<'tcx>,
+) -> &'tcx [DefId] {
+ let trait_methods = tcx
+ .associated_items(trait_ref.def_id())
+ .in_definition_order()
+ .filter(|item| item.kind == ty::AssocKind::Fn);
+ // Now list each method's DefId (for within its trait).
+ let own_entries = trait_methods.filter_map(move |trait_method| {
+ debug!("own_existential_vtable_entry: trait_method={:?}", trait_method);
+ let def_id = trait_method.def_id;
+
+ // Some methods cannot be called on an object; skip those.
+ if !is_vtable_safe_method(tcx, trait_ref.def_id(), &trait_method) {
+ debug!("own_existential_vtable_entry: not vtable safe");
+ return None;
+ }
+
+ Some(def_id)
+ });
+
+ tcx.arena.alloc_from_iter(own_entries.into_iter())
+}
+
+/// Given a trait `trait_ref`, iterates the vtable entries
+/// that come from `trait_ref`, including its supertraits.
+fn vtable_entries<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+) -> &'tcx [VtblEntry<'tcx>] {
+ debug!("vtable_entries({:?})", trait_ref);
+
+ let mut entries = vec![];
+
+ let vtable_segment_callback = |segment| -> ControlFlow<()> {
+ match segment {
+ VtblSegment::MetadataDSA => {
+ entries.extend(TyCtxt::COMMON_VTABLE_ENTRIES);
+ }
+ VtblSegment::TraitOwnEntries { trait_ref, emit_vptr } => {
+ let existential_trait_ref = trait_ref
+ .map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
+
+ // Lookup the shape of vtable for the trait.
+ let own_existential_entries =
+ tcx.own_existential_vtable_entries(existential_trait_ref);
+
+ let own_entries = own_existential_entries.iter().copied().map(|def_id| {
+ debug!("vtable_entries: trait_method={:?}", def_id);
+
+ // The method may have some early-bound lifetimes; add regions for those.
+ let substs = trait_ref.map_bound(|trait_ref| {
+ InternalSubsts::for_item(tcx, def_id, |param, _| match param.kind {
+ GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
+ GenericParamDefKind::Type { .. }
+ | GenericParamDefKind::Const { .. } => {
+ trait_ref.substs[param.index as usize]
+ }
+ })
+ });
+
+ // The trait type may have higher-ranked lifetimes in it;
+ // erase them if they appear, so that we get the type
+ // at some particular call site.
+ let substs = tcx
+ .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), substs);
+
+ // It's possible that the method relies on where-clauses that
+ // do not hold for this particular set of type parameters.
+ // Note that this method could then never be called, so we
+ // do not want to try and codegen it, in that case (see #23435).
+ let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs);
+ if impossible_predicates(tcx, predicates.predicates) {
+ debug!("vtable_entries: predicates do not hold");
+ return VtblEntry::Vacant;
+ }
+
+ let instance = ty::Instance::resolve_for_vtable(
+ tcx,
+ ty::ParamEnv::reveal_all(),
+ def_id,
+ substs,
+ )
+ .expect("resolution failed during building vtable representation");
+ VtblEntry::Method(instance)
+ });
+
+ entries.extend(own_entries);
+
+ if emit_vptr {
+ entries.push(VtblEntry::TraitVPtr(trait_ref));
+ }
+ }
+ }
+
+ ControlFlow::Continue(())
+ };
+
+ let _ = prepare_vtable_segments(tcx, trait_ref, vtable_segment_callback);
+
+ if tcx.has_attr(trait_ref.def_id(), sym::rustc_dump_vtable) {
+ let sp = tcx.def_span(trait_ref.def_id());
+ dump_vtable_entries(tcx, sp, trait_ref, &entries);
+ }
+
+ tcx.arena.alloc_from_iter(entries.into_iter())
+}
+
+/// Find slot base for trait methods within vtable entries of another trait
+fn vtable_trait_first_method_offset<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: (
+ ty::PolyTraitRef<'tcx>, // trait_to_be_found
+ ty::PolyTraitRef<'tcx>, // trait_owning_vtable
+ ),
+) -> usize {
+ let (trait_to_be_found, trait_owning_vtable) = key;
+
+ // #90177
+ let trait_to_be_found_erased = tcx.erase_regions(trait_to_be_found);
+
+ let vtable_segment_callback = {
+ let mut vtable_base = 0;
+
+ move |segment| {
+ match segment {
+ VtblSegment::MetadataDSA => {
+ vtable_base += TyCtxt::COMMON_VTABLE_ENTRIES.len();
+ }
+ VtblSegment::TraitOwnEntries { trait_ref, emit_vptr } => {
+ if tcx.erase_regions(trait_ref) == trait_to_be_found_erased {
+ return ControlFlow::Break(vtable_base);
+ }
+ vtable_base += util::count_own_vtable_entries(tcx, trait_ref);
+ if emit_vptr {
+ vtable_base += 1;
+ }
+ }
+ }
+ ControlFlow::Continue(())
+ }
+ };
+
+ if let Some(vtable_base) =
+ prepare_vtable_segments(tcx, trait_owning_vtable, vtable_segment_callback)
+ {
+ vtable_base
+ } else {
+ bug!("Failed to find info for expected trait in vtable");
+ }
+}
+
+/// Find slot offset for trait vptr within vtable entries of another trait
+pub fn vtable_trait_upcasting_coercion_new_vptr_slot<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: (
+ Ty<'tcx>, // trait object type whose trait owning vtable
+ Ty<'tcx>, // trait object for supertrait
+ ),
+) -> Option<usize> {
+ let (source, target) = key;
+ assert!(matches!(&source.kind(), &ty::Dynamic(..)) && !source.needs_infer());
+ assert!(matches!(&target.kind(), &ty::Dynamic(..)) && !target.needs_infer());
+
+ // this has been typecked-before, so diagnostics is not really needed.
+ let unsize_trait_did = tcx.require_lang_item(LangItem::Unsize, None);
+
+ let trait_ref = ty::TraitRef {
+ def_id: unsize_trait_did,
+ substs: tcx.mk_substs_trait(source, &[target.into()]),
+ };
+ let obligation = Obligation::new(
+ ObligationCause::dummy(),
+ ty::ParamEnv::reveal_all(),
+ ty::Binder::dummy(ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Positive,
+ }),
+ );
+
+ let implsrc = tcx.infer_ctxt().enter(|infcx| {
+ let mut selcx = SelectionContext::new(&infcx);
+ selcx.select(&obligation).unwrap()
+ });
+
+ let Some(ImplSource::TraitUpcasting(implsrc_traitcasting)) = implsrc else {
+ bug!();
+ };
+
+ implsrc_traitcasting.vtable_vptr_slot
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ object_safety::provide(providers);
+ structural_match::provide(providers);
+ *providers = ty::query::Providers {
+ specialization_graph_of: specialize::specialization_graph_provider,
+ specializes: specialize::specializes,
+ codegen_fulfill_obligation: codegen::codegen_fulfill_obligation,
+ own_existential_vtable_entries,
+ vtable_entries,
+ vtable_trait_upcasting_coercion_new_vptr_slot,
+ subst_and_check_impossible_predicates,
+ try_unify_abstract_consts: |tcx, param_env_and| {
+ let (param_env, (a, b)) = param_env_and.into_parts();
+ const_evaluatable::try_unify_abstract_consts(tcx, (a, b), param_env)
+ },
+ ..*providers
+ };
+}
diff --git a/compiler/rustc_trait_selection/src/traits/object_safety.rs b/compiler/rustc_trait_selection/src/traits/object_safety.rs
new file mode 100644
index 000000000..612f51309
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/object_safety.rs
@@ -0,0 +1,866 @@
+//! "Object safety" refers to the ability for a trait to be converted
+//! to an object. In general, traits may only be converted to an
+//! object if all of their methods meet certain criteria. In particular,
+//! they must:
+//!
+//! - have a suitable receiver from which we can extract a vtable and coerce to a "thin" version
+//! that doesn't contain the vtable;
+//! - not reference the erased type `Self` except for in this receiver;
+//! - not have generic type parameters.
+
+use super::elaborate_predicates;
+
+use crate::infer::TyCtxtInferExt;
+use crate::traits::query::evaluate_obligation::InferCtxtExt;
+use crate::traits::{self, Obligation, ObligationCause};
+use rustc_errors::{FatalError, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::abstract_const::{walk_abstract_const, AbstractConst};
+use rustc_middle::ty::subst::{GenericArg, InternalSubsts, Subst};
+use rustc_middle::ty::{
+ self, EarlyBinder, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
+};
+use rustc_middle::ty::{Predicate, ToPredicate};
+use rustc_session::lint::builtin::WHERE_CLAUSES_OBJECT_SAFETY;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+use smallvec::SmallVec;
+
+use std::iter;
+use std::ops::ControlFlow;
+
+pub use crate::traits::{MethodViolationCode, ObjectSafetyViolation};
+
+/// Returns the object safety violations that affect
+/// astconv -- currently, `Self` in supertraits. This is needed
+/// because `object_safety_violations` can't be used during
+/// type collection.
+pub fn astconv_object_safety_violations(
+ tcx: TyCtxt<'_>,
+ trait_def_id: DefId,
+) -> Vec<ObjectSafetyViolation> {
+ debug_assert!(tcx.generics_of(trait_def_id).has_self);
+ let violations = traits::supertrait_def_ids(tcx, trait_def_id)
+ .map(|def_id| predicates_reference_self(tcx, def_id, true))
+ .filter(|spans| !spans.is_empty())
+ .map(ObjectSafetyViolation::SupertraitSelf)
+ .collect();
+
+ debug!("astconv_object_safety_violations(trait_def_id={:?}) = {:?}", trait_def_id, violations);
+
+ violations
+}
+
+fn object_safety_violations(tcx: TyCtxt<'_>, trait_def_id: DefId) -> &'_ [ObjectSafetyViolation] {
+ debug_assert!(tcx.generics_of(trait_def_id).has_self);
+ debug!("object_safety_violations: {:?}", trait_def_id);
+
+ tcx.arena.alloc_from_iter(
+ traits::supertrait_def_ids(tcx, trait_def_id)
+ .flat_map(|def_id| object_safety_violations_for_trait(tcx, def_id)),
+ )
+}
+
+/// We say a method is *vtable safe* if it can be invoked on a trait
+/// object. Note that object-safe traits can have some
+/// non-vtable-safe methods, so long as they require `Self: Sized` or
+/// otherwise ensure that they cannot be used when `Self = Trait`.
+pub fn is_vtable_safe_method(tcx: TyCtxt<'_>, trait_def_id: DefId, method: &ty::AssocItem) -> bool {
+ debug_assert!(tcx.generics_of(trait_def_id).has_self);
+ debug!("is_vtable_safe_method({:?}, {:?})", trait_def_id, method);
+ // Any method that has a `Self: Sized` bound cannot be called.
+ if generics_require_sized_self(tcx, method.def_id) {
+ return false;
+ }
+
+ match virtual_call_violation_for_method(tcx, trait_def_id, method) {
+ None | Some(MethodViolationCode::WhereClauseReferencesSelf) => true,
+ Some(_) => false,
+ }
+}
+
+fn object_safety_violations_for_trait(
+ tcx: TyCtxt<'_>,
+ trait_def_id: DefId,
+) -> Vec<ObjectSafetyViolation> {
+ // Check methods for violations.
+ let mut violations: Vec<_> = tcx
+ .associated_items(trait_def_id)
+ .in_definition_order()
+ .filter(|item| item.kind == ty::AssocKind::Fn)
+ .filter_map(|item| {
+ object_safety_violation_for_method(tcx, trait_def_id, &item)
+ .map(|(code, span)| ObjectSafetyViolation::Method(item.name, code, span))
+ })
+ .filter(|violation| {
+ if let ObjectSafetyViolation::Method(
+ _,
+ MethodViolationCode::WhereClauseReferencesSelf,
+ span,
+ ) = violation
+ {
+ lint_object_unsafe_trait(tcx, *span, trait_def_id, &violation);
+ false
+ } else {
+ true
+ }
+ })
+ .collect();
+
+ // Check the trait itself.
+ if trait_has_sized_self(tcx, trait_def_id) {
+ // We don't want to include the requirement from `Sized` itself to be `Sized` in the list.
+ let spans = get_sized_bounds(tcx, trait_def_id);
+ violations.push(ObjectSafetyViolation::SizedSelf(spans));
+ }
+ let spans = predicates_reference_self(tcx, trait_def_id, false);
+ if !spans.is_empty() {
+ violations.push(ObjectSafetyViolation::SupertraitSelf(spans));
+ }
+ let spans = bounds_reference_self(tcx, trait_def_id);
+ if !spans.is_empty() {
+ violations.push(ObjectSafetyViolation::SupertraitSelf(spans));
+ }
+
+ violations.extend(
+ tcx.associated_items(trait_def_id)
+ .in_definition_order()
+ .filter(|item| item.kind == ty::AssocKind::Const)
+ .map(|item| {
+ let ident = item.ident(tcx);
+ ObjectSafetyViolation::AssocConst(ident.name, ident.span)
+ }),
+ );
+
+ if !tcx.features().generic_associated_types_extended {
+ violations.extend(
+ tcx.associated_items(trait_def_id)
+ .in_definition_order()
+ .filter(|item| item.kind == ty::AssocKind::Type)
+ .filter(|item| !tcx.generics_of(item.def_id).params.is_empty())
+ .map(|item| {
+ let ident = item.ident(tcx);
+ ObjectSafetyViolation::GAT(ident.name, ident.span)
+ }),
+ );
+ }
+
+ debug!(
+ "object_safety_violations_for_trait(trait_def_id={:?}) = {:?}",
+ trait_def_id, violations
+ );
+
+ violations
+}
+
+/// Lint object-unsafe trait.
+fn lint_object_unsafe_trait(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ trait_def_id: DefId,
+ violation: &ObjectSafetyViolation,
+) {
+ // Using `CRATE_NODE_ID` is wrong, but it's hard to get a more precise id.
+ // It's also hard to get a use site span, so we use the method definition span.
+ tcx.struct_span_lint_hir(WHERE_CLAUSES_OBJECT_SAFETY, hir::CRATE_HIR_ID, span, |lint| {
+ let mut err = lint.build(&format!(
+ "the trait `{}` cannot be made into an object",
+ tcx.def_path_str(trait_def_id)
+ ));
+ let node = tcx.hir().get_if_local(trait_def_id);
+ let mut spans = MultiSpan::from_span(span);
+ if let Some(hir::Node::Item(item)) = node {
+ spans.push_span_label(item.ident.span, "this trait cannot be made into an object...");
+ spans.push_span_label(span, format!("...because {}", violation.error_msg()));
+ } else {
+ spans.push_span_label(
+ span,
+ format!(
+ "the trait cannot be made into an object because {}",
+ violation.error_msg()
+ ),
+ );
+ };
+ err.span_note(
+ spans,
+ "for a trait to be \"object safe\" it needs to allow building a vtable to allow the \
+ call to be resolvable dynamically; for more information visit \
+ <https://doc.rust-lang.org/reference/items/traits.html#object-safety>",
+ );
+ if node.is_some() {
+ // Only provide the help if its a local trait, otherwise it's not
+ violation.solution(&mut err);
+ }
+ err.emit();
+ });
+}
+
+fn sized_trait_bound_spans<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ bounds: hir::GenericBounds<'tcx>,
+) -> impl 'tcx + Iterator<Item = Span> {
+ bounds.iter().filter_map(move |b| match b {
+ hir::GenericBound::Trait(trait_ref, hir::TraitBoundModifier::None)
+ if trait_has_sized_self(
+ tcx,
+ trait_ref.trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise()),
+ ) =>
+ {
+ // Fetch spans for supertraits that are `Sized`: `trait T: Super`
+ Some(trait_ref.span)
+ }
+ _ => None,
+ })
+}
+
+fn get_sized_bounds(tcx: TyCtxt<'_>, trait_def_id: DefId) -> SmallVec<[Span; 1]> {
+ tcx.hir()
+ .get_if_local(trait_def_id)
+ .and_then(|node| match node {
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(.., generics, bounds, _),
+ ..
+ }) => Some(
+ generics
+ .predicates
+ .iter()
+ .filter_map(|pred| {
+ match pred {
+ hir::WherePredicate::BoundPredicate(pred)
+ if pred.bounded_ty.hir_id.owner.to_def_id() == trait_def_id =>
+ {
+ // Fetch spans for trait bounds that are Sized:
+ // `trait T where Self: Pred`
+ Some(sized_trait_bound_spans(tcx, pred.bounds))
+ }
+ _ => None,
+ }
+ })
+ .flatten()
+ // Fetch spans for supertraits that are `Sized`: `trait T: Super`.
+ .chain(sized_trait_bound_spans(tcx, bounds))
+ .collect::<SmallVec<[Span; 1]>>(),
+ ),
+ _ => None,
+ })
+ .unwrap_or_else(SmallVec::new)
+}
+
+fn predicates_reference_self(
+ tcx: TyCtxt<'_>,
+ trait_def_id: DefId,
+ supertraits_only: bool,
+) -> SmallVec<[Span; 1]> {
+ let trait_ref = ty::TraitRef::identity(tcx, trait_def_id);
+ let predicates = if supertraits_only {
+ tcx.super_predicates_of(trait_def_id)
+ } else {
+ tcx.predicates_of(trait_def_id)
+ };
+ predicates
+ .predicates
+ .iter()
+ .map(|&(predicate, sp)| (predicate.subst_supertrait(tcx, &trait_ref), sp))
+ .filter_map(|predicate| predicate_references_self(tcx, predicate))
+ .collect()
+}
+
+fn bounds_reference_self(tcx: TyCtxt<'_>, trait_def_id: DefId) -> SmallVec<[Span; 1]> {
+ tcx.associated_items(trait_def_id)
+ .in_definition_order()
+ .filter(|item| item.kind == ty::AssocKind::Type)
+ .flat_map(|item| tcx.explicit_item_bounds(item.def_id))
+ .filter_map(|pred_span| predicate_references_self(tcx, *pred_span))
+ .collect()
+}
+
+fn predicate_references_self<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (predicate, sp): (ty::Predicate<'tcx>, Span),
+) -> Option<Span> {
+ let self_ty = tcx.types.self_param;
+ let has_self_ty = |arg: &GenericArg<'tcx>| arg.walk().any(|arg| arg == self_ty.into());
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(ref data) => {
+ // In the case of a trait predicate, we can skip the "self" type.
+ if data.trait_ref.substs[1..].iter().any(has_self_ty) { Some(sp) } else { None }
+ }
+ ty::PredicateKind::Projection(ref data) => {
+ // And similarly for projections. This should be redundant with
+ // the previous check because any projection should have a
+ // matching `Trait` predicate with the same inputs, but we do
+ // the check to be safe.
+ //
+ // It's also won't be redundant if we allow type-generic associated
+ // types for trait objects.
+ //
+ // Note that we *do* allow projection *outputs* to contain
+ // `self` (i.e., `trait Foo: Bar<Output=Self::Result> { type Result; }`),
+ // we just require the user to specify *both* outputs
+ // in the object type (i.e., `dyn Foo<Output=(), Result=()>`).
+ //
+ // This is ALT2 in issue #56288, see that for discussion of the
+ // possible alternatives.
+ if data.projection_ty.substs[1..].iter().any(has_self_ty) { Some(sp) } else { None }
+ }
+ ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::TypeOutlives(..)
+ | ty::PredicateKind::RegionOutlives(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+}
+
+fn trait_has_sized_self(tcx: TyCtxt<'_>, trait_def_id: DefId) -> bool {
+ generics_require_sized_self(tcx, trait_def_id)
+}
+
+fn generics_require_sized_self(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ let Some(sized_def_id) = tcx.lang_items().sized_trait() else {
+ return false; /* No Sized trait, can't require it! */
+ };
+
+ // Search for a predicate like `Self : Sized` amongst the trait bounds.
+ let predicates = tcx.predicates_of(def_id);
+ let predicates = predicates.instantiate_identity(tcx).predicates;
+ elaborate_predicates(tcx, predicates.into_iter()).any(|obligation| {
+ match obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(ref trait_pred) => {
+ trait_pred.def_id() == sized_def_id && trait_pred.self_ty().is_param(0)
+ }
+ ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::RegionOutlives(..)
+ | ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::TypeOutlives(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => false,
+ }
+ })
+}
+
+/// Returns `Some(_)` if this method makes the containing trait not object safe.
+fn object_safety_violation_for_method(
+ tcx: TyCtxt<'_>,
+ trait_def_id: DefId,
+ method: &ty::AssocItem,
+) -> Option<(MethodViolationCode, Span)> {
+ debug!("object_safety_violation_for_method({:?}, {:?})", trait_def_id, method);
+ // Any method that has a `Self : Sized` requisite is otherwise
+ // exempt from the regulations.
+ if generics_require_sized_self(tcx, method.def_id) {
+ return None;
+ }
+
+ let violation = virtual_call_violation_for_method(tcx, trait_def_id, method);
+ // Get an accurate span depending on the violation.
+ violation.map(|v| {
+ let node = tcx.hir().get_if_local(method.def_id);
+ let span = match (&v, node) {
+ (MethodViolationCode::ReferencesSelfInput(Some(span)), _) => *span,
+ (MethodViolationCode::UndispatchableReceiver(Some(span)), _) => *span,
+ (MethodViolationCode::ReferencesSelfOutput, Some(node)) => {
+ node.fn_decl().map_or(method.ident(tcx).span, |decl| decl.output.span())
+ }
+ _ => method.ident(tcx).span,
+ };
+ (v, span)
+ })
+}
+
+/// Returns `Some(_)` if this method cannot be called on a trait
+/// object; this does not necessarily imply that the enclosing trait
+/// is not object safe, because the method might have a where clause
+/// `Self:Sized`.
+fn virtual_call_violation_for_method<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ method: &ty::AssocItem,
+) -> Option<MethodViolationCode> {
+ let sig = tcx.fn_sig(method.def_id);
+
+ // The method's first parameter must be named `self`
+ if !method.fn_has_self_parameter {
+ let sugg = if let Some(hir::Node::TraitItem(hir::TraitItem {
+ generics,
+ kind: hir::TraitItemKind::Fn(sig, _),
+ ..
+ })) = tcx.hir().get_if_local(method.def_id).as_ref()
+ {
+ let sm = tcx.sess.source_map();
+ Some((
+ (
+ format!("&self{}", if sig.decl.inputs.is_empty() { "" } else { ", " }),
+ sm.span_through_char(sig.span, '(').shrink_to_hi(),
+ ),
+ (
+ format!("{} Self: Sized", generics.add_where_or_trailing_comma()),
+ generics.tail_span_for_predicate_suggestion(),
+ ),
+ ))
+ } else {
+ None
+ };
+ return Some(MethodViolationCode::StaticMethod(sugg));
+ }
+
+ for (i, &input_ty) in sig.skip_binder().inputs().iter().enumerate().skip(1) {
+ if contains_illegal_self_type_reference(tcx, trait_def_id, sig.rebind(input_ty)) {
+ let span = if let Some(hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(sig, _),
+ ..
+ })) = tcx.hir().get_if_local(method.def_id).as_ref()
+ {
+ Some(sig.decl.inputs[i].span)
+ } else {
+ None
+ };
+ return Some(MethodViolationCode::ReferencesSelfInput(span));
+ }
+ }
+ if contains_illegal_self_type_reference(tcx, trait_def_id, sig.output()) {
+ return Some(MethodViolationCode::ReferencesSelfOutput);
+ }
+
+ // We can't monomorphize things like `fn foo<A>(...)`.
+ let own_counts = tcx.generics_of(method.def_id).own_counts();
+ if own_counts.types + own_counts.consts != 0 {
+ return Some(MethodViolationCode::Generic);
+ }
+
+ if tcx
+ .predicates_of(method.def_id)
+ .predicates
+ .iter()
+ // A trait object can't claim to live more than the concrete type,
+ // so outlives predicates will always hold.
+ .cloned()
+ .filter(|(p, _)| p.to_opt_type_outlives().is_none())
+ .any(|pred| contains_illegal_self_type_reference(tcx, trait_def_id, pred))
+ {
+ return Some(MethodViolationCode::WhereClauseReferencesSelf);
+ }
+
+ let receiver_ty = tcx.liberate_late_bound_regions(method.def_id, sig.input(0));
+
+ // Until `unsized_locals` is fully implemented, `self: Self` can't be dispatched on.
+ // However, this is already considered object-safe. We allow it as a special case here.
+ // FIXME(mikeyhew) get rid of this `if` statement once `receiver_is_dispatchable` allows
+ // `Receiver: Unsize<Receiver[Self => dyn Trait]>`.
+ if receiver_ty != tcx.types.self_param {
+ if !receiver_is_dispatchable(tcx, method, receiver_ty) {
+ let span = if let Some(hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(sig, _),
+ ..
+ })) = tcx.hir().get_if_local(method.def_id).as_ref()
+ {
+ Some(sig.decl.inputs[0].span)
+ } else {
+ None
+ };
+ return Some(MethodViolationCode::UndispatchableReceiver(span));
+ } else {
+ // Do sanity check to make sure the receiver actually has the layout of a pointer.
+
+ use rustc_target::abi::Abi;
+
+ let param_env = tcx.param_env(method.def_id);
+
+ let abi_of_ty = |ty: Ty<'tcx>| -> Option<Abi> {
+ match tcx.layout_of(param_env.and(ty)) {
+ Ok(layout) => Some(layout.abi),
+ Err(err) => {
+ // #78372
+ tcx.sess.delay_span_bug(
+ tcx.def_span(method.def_id),
+ &format!("error: {}\n while computing layout for type {:?}", err, ty),
+ );
+ None
+ }
+ }
+ };
+
+ // e.g., `Rc<()>`
+ let unit_receiver_ty =
+ receiver_for_self_ty(tcx, receiver_ty, tcx.mk_unit(), method.def_id);
+
+ match abi_of_ty(unit_receiver_ty) {
+ Some(Abi::Scalar(..)) => (),
+ abi => {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(method.def_id),
+ &format!(
+ "receiver when `Self = ()` should have a Scalar ABI; found {:?}",
+ abi
+ ),
+ );
+ }
+ }
+
+ let trait_object_ty =
+ object_ty_for_trait(tcx, trait_def_id, tcx.mk_region(ty::ReStatic));
+
+ // e.g., `Rc<dyn Trait>`
+ let trait_object_receiver =
+ receiver_for_self_ty(tcx, receiver_ty, trait_object_ty, method.def_id);
+
+ match abi_of_ty(trait_object_receiver) {
+ Some(Abi::ScalarPair(..)) => (),
+ abi => {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(method.def_id),
+ &format!(
+ "receiver when `Self = {}` should have a ScalarPair ABI; found {:?}",
+ trait_object_ty, abi
+ ),
+ );
+ }
+ }
+ }
+ }
+
+ None
+}
+
+/// Performs a type substitution to produce the version of `receiver_ty` when `Self = self_ty`.
+/// For example, for `receiver_ty = Rc<Self>` and `self_ty = Foo`, returns `Rc<Foo>`.
+fn receiver_for_self_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ receiver_ty: Ty<'tcx>,
+ self_ty: Ty<'tcx>,
+ method_def_id: DefId,
+) -> Ty<'tcx> {
+ debug!("receiver_for_self_ty({:?}, {:?}, {:?})", receiver_ty, self_ty, method_def_id);
+ let substs = InternalSubsts::for_item(tcx, method_def_id, |param, _| {
+ if param.index == 0 { self_ty.into() } else { tcx.mk_param_from_def(param) }
+ });
+
+ let result = EarlyBinder(receiver_ty).subst(tcx, substs);
+ debug!(
+ "receiver_for_self_ty({:?}, {:?}, {:?}) = {:?}",
+ receiver_ty, self_ty, method_def_id, result
+ );
+ result
+}
+
+/// Creates the object type for the current trait. For example,
+/// if the current trait is `Deref`, then this will be
+/// `dyn Deref<Target = Self::Target> + 'static`.
+fn object_ty_for_trait<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ lifetime: ty::Region<'tcx>,
+) -> Ty<'tcx> {
+ debug!("object_ty_for_trait: trait_def_id={:?}", trait_def_id);
+
+ let trait_ref = ty::TraitRef::identity(tcx, trait_def_id);
+
+ let trait_predicate = trait_ref.map_bound(|trait_ref| {
+ ty::ExistentialPredicate::Trait(ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref))
+ });
+
+ let mut associated_types = traits::supertraits(tcx, trait_ref)
+ .flat_map(|super_trait_ref| {
+ tcx.associated_items(super_trait_ref.def_id())
+ .in_definition_order()
+ .map(move |item| (super_trait_ref, item))
+ })
+ .filter(|(_, item)| item.kind == ty::AssocKind::Type)
+ .collect::<Vec<_>>();
+
+ // existential predicates need to be in a specific order
+ associated_types.sort_by_cached_key(|(_, item)| tcx.def_path_hash(item.def_id));
+
+ let projection_predicates = associated_types.into_iter().map(|(super_trait_ref, item)| {
+ // We *can* get bound lifetimes here in cases like
+ // `trait MyTrait: for<'s> OtherTrait<&'s T, Output=bool>`.
+ super_trait_ref.map_bound(|super_trait_ref| {
+ ty::ExistentialPredicate::Projection(ty::ExistentialProjection {
+ term: tcx.mk_projection(item.def_id, super_trait_ref.substs).into(),
+ item_def_id: item.def_id,
+ substs: super_trait_ref.substs,
+ })
+ })
+ });
+
+ let existential_predicates = tcx
+ .mk_poly_existential_predicates(iter::once(trait_predicate).chain(projection_predicates));
+
+ let object_ty = tcx.mk_dynamic(existential_predicates, lifetime);
+
+ debug!("object_ty_for_trait: object_ty=`{}`", object_ty);
+
+ object_ty
+}
+
+/// Checks the method's receiver (the `self` argument) can be dispatched on when `Self` is a
+/// trait object. We require that `DispatchableFromDyn` be implemented for the receiver type
+/// in the following way:
+/// - let `Receiver` be the type of the `self` argument, i.e `Self`, `&Self`, `Rc<Self>`,
+/// - require the following bound:
+///
+/// ```ignore (not-rust)
+/// Receiver[Self => T]: DispatchFromDyn<Receiver[Self => dyn Trait]>
+/// ```
+///
+/// where `Foo[X => Y]` means "the same type as `Foo`, but with `X` replaced with `Y`"
+/// (substitution notation).
+///
+/// Some examples of receiver types and their required obligation:
+/// - `&'a mut self` requires `&'a mut Self: DispatchFromDyn<&'a mut dyn Trait>`,
+/// - `self: Rc<Self>` requires `Rc<Self>: DispatchFromDyn<Rc<dyn Trait>>`,
+/// - `self: Pin<Box<Self>>` requires `Pin<Box<Self>>: DispatchFromDyn<Pin<Box<dyn Trait>>>`.
+///
+/// The only case where the receiver is not dispatchable, but is still a valid receiver
+/// type (just not object-safe), is when there is more than one level of pointer indirection.
+/// E.g., `self: &&Self`, `self: &Rc<Self>`, `self: Box<Box<Self>>`. In these cases, there
+/// is no way, or at least no inexpensive way, to coerce the receiver from the version where
+/// `Self = dyn Trait` to the version where `Self = T`, where `T` is the unknown erased type
+/// contained by the trait object, because the object that needs to be coerced is behind
+/// a pointer.
+///
+/// In practice, we cannot use `dyn Trait` explicitly in the obligation because it would result
+/// in a new check that `Trait` is object safe, creating a cycle (until object_safe_for_dispatch
+/// is stabilized, see tracking issue <https://github.com/rust-lang/rust/issues/43561>).
+/// Instead, we fudge a little by introducing a new type parameter `U` such that
+/// `Self: Unsize<U>` and `U: Trait + ?Sized`, and use `U` in place of `dyn Trait`.
+/// Written as a chalk-style query:
+/// ```ignore (not-rust)
+/// forall (U: Trait + ?Sized) {
+/// if (Self: Unsize<U>) {
+/// Receiver: DispatchFromDyn<Receiver[Self => U]>
+/// }
+/// }
+/// ```
+/// for `self: &'a mut Self`, this means `&'a mut Self: DispatchFromDyn<&'a mut U>`
+/// for `self: Rc<Self>`, this means `Rc<Self>: DispatchFromDyn<Rc<U>>`
+/// for `self: Pin<Box<Self>>`, this means `Pin<Box<Self>>: DispatchFromDyn<Pin<Box<U>>>`
+//
+// FIXME(mikeyhew) when unsized receivers are implemented as part of unsized rvalues, add this
+// fallback query: `Receiver: Unsize<Receiver[Self => U]>` to support receivers like
+// `self: Wrapper<Self>`.
+#[allow(dead_code)]
+fn receiver_is_dispatchable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ method: &ty::AssocItem,
+ receiver_ty: Ty<'tcx>,
+) -> bool {
+ debug!("receiver_is_dispatchable: method = {:?}, receiver_ty = {:?}", method, receiver_ty);
+
+ let traits = (tcx.lang_items().unsize_trait(), tcx.lang_items().dispatch_from_dyn_trait());
+ let (Some(unsize_did), Some(dispatch_from_dyn_did)) = traits else {
+ debug!("receiver_is_dispatchable: Missing Unsize or DispatchFromDyn traits");
+ return false;
+ };
+
+ // the type `U` in the query
+ // use a bogus type parameter to mimic a forall(U) query using u32::MAX for now.
+ // FIXME(mikeyhew) this is a total hack. Once object_safe_for_dispatch is stabilized, we can
+ // replace this with `dyn Trait`
+ let unsized_self_ty: Ty<'tcx> =
+ tcx.mk_ty_param(u32::MAX, Symbol::intern("RustaceansAreAwesome"));
+
+ // `Receiver[Self => U]`
+ let unsized_receiver_ty =
+ receiver_for_self_ty(tcx, receiver_ty, unsized_self_ty, method.def_id);
+
+ // create a modified param env, with `Self: Unsize<U>` and `U: Trait` added to caller bounds
+ // `U: ?Sized` is already implied here
+ let param_env = {
+ let param_env = tcx.param_env(method.def_id);
+
+ // Self: Unsize<U>
+ let unsize_predicate = ty::Binder::dummy(ty::TraitRef {
+ def_id: unsize_did,
+ substs: tcx.mk_substs_trait(tcx.types.self_param, &[unsized_self_ty.into()]),
+ })
+ .without_const()
+ .to_predicate(tcx);
+
+ // U: Trait<Arg1, ..., ArgN>
+ let trait_predicate = {
+ let substs =
+ InternalSubsts::for_item(tcx, method.trait_container(tcx).unwrap(), |param, _| {
+ if param.index == 0 {
+ unsized_self_ty.into()
+ } else {
+ tcx.mk_param_from_def(param)
+ }
+ });
+
+ ty::Binder::dummy(ty::TraitRef { def_id: unsize_did, substs })
+ .without_const()
+ .to_predicate(tcx)
+ };
+
+ let caller_bounds: Vec<Predicate<'tcx>> =
+ param_env.caller_bounds().iter().chain([unsize_predicate, trait_predicate]).collect();
+
+ ty::ParamEnv::new(
+ tcx.intern_predicates(&caller_bounds),
+ param_env.reveal(),
+ param_env.constness(),
+ )
+ };
+
+ // Receiver: DispatchFromDyn<Receiver[Self => U]>
+ let obligation = {
+ let predicate = ty::Binder::dummy(ty::TraitRef {
+ def_id: dispatch_from_dyn_did,
+ substs: tcx.mk_substs_trait(receiver_ty, &[unsized_receiver_ty.into()]),
+ })
+ .without_const()
+ .to_predicate(tcx);
+
+ Obligation::new(ObligationCause::dummy(), param_env, predicate)
+ };
+
+ tcx.infer_ctxt().enter(|ref infcx| {
+ // the receiver is dispatchable iff the obligation holds
+ infcx.predicate_must_hold_modulo_regions(&obligation)
+ })
+}
+
+fn contains_illegal_self_type_reference<'tcx, T: TypeVisitable<'tcx>>(
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ value: T,
+) -> bool {
+ // This is somewhat subtle. In general, we want to forbid
+ // references to `Self` in the argument and return types,
+ // since the value of `Self` is erased. However, there is one
+ // exception: it is ok to reference `Self` in order to access
+ // an associated type of the current trait, since we retain
+ // the value of those associated types in the object type
+ // itself.
+ //
+ // ```rust
+ // trait SuperTrait {
+ // type X;
+ // }
+ //
+ // trait Trait : SuperTrait {
+ // type Y;
+ // fn foo(&self, x: Self) // bad
+ // fn foo(&self) -> Self // bad
+ // fn foo(&self) -> Option<Self> // bad
+ // fn foo(&self) -> Self::Y // OK, desugars to next example
+ // fn foo(&self) -> <Self as Trait>::Y // OK
+ // fn foo(&self) -> Self::X // OK, desugars to next example
+ // fn foo(&self) -> <Self as SuperTrait>::X // OK
+ // }
+ // ```
+ //
+ // However, it is not as simple as allowing `Self` in a projected
+ // type, because there are illegal ways to use `Self` as well:
+ //
+ // ```rust
+ // trait Trait : SuperTrait {
+ // ...
+ // fn foo(&self) -> <Self as SomeOtherTrait>::X;
+ // }
+ // ```
+ //
+ // Here we will not have the type of `X` recorded in the
+ // object type, and we cannot resolve `Self as SomeOtherTrait`
+ // without knowing what `Self` is.
+
+ struct IllegalSelfTypeVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ supertraits: Option<Vec<DefId>>,
+ }
+
+ impl<'tcx> TypeVisitor<'tcx> for IllegalSelfTypeVisitor<'tcx> {
+ type BreakTy = ();
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match t.kind() {
+ ty::Param(_) => {
+ if t == self.tcx.types.self_param {
+ ControlFlow::BREAK
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ ty::Projection(ref data) => {
+ // This is a projected type `<Foo as SomeTrait>::X`.
+
+ // Compute supertraits of current trait lazily.
+ if self.supertraits.is_none() {
+ let trait_ref = ty::TraitRef::identity(self.tcx, self.trait_def_id);
+ self.supertraits = Some(
+ traits::supertraits(self.tcx, trait_ref).map(|t| t.def_id()).collect(),
+ );
+ }
+
+ // Determine whether the trait reference `Foo as
+ // SomeTrait` is in fact a supertrait of the
+ // current trait. In that case, this type is
+ // legal, because the type `X` will be specified
+ // in the object type. Note that we can just use
+ // direct equality here because all of these types
+ // are part of the formal parameter listing, and
+ // hence there should be no inference variables.
+ let is_supertrait_of_current_trait = self
+ .supertraits
+ .as_ref()
+ .unwrap()
+ .contains(&data.trait_ref(self.tcx).def_id);
+
+ if is_supertrait_of_current_trait {
+ ControlFlow::CONTINUE // do not walk contained types, do not report error, do collect $200
+ } else {
+ t.super_visit_with(self) // DO walk contained types, POSSIBLY reporting an error
+ }
+ }
+ _ => t.super_visit_with(self), // walk contained types, if any
+ }
+ }
+
+ fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // Constants can only influence object safety if they reference `Self`.
+ // This is only possible for unevaluated constants, so we walk these here.
+ //
+ // If `AbstractConst::new` returned an error we already failed compilation
+ // so we don't have to emit an additional error here.
+ //
+ // We currently recurse into abstract consts here but do not recurse in
+ // `is_const_evaluatable`. This means that the object safety check is more
+ // liberal than the const eval check.
+ //
+ // This shouldn't really matter though as we can't really use any
+ // constants which are not considered const evaluatable.
+ use rustc_middle::ty::abstract_const::Node;
+ if let Ok(Some(ct)) = AbstractConst::new(self.tcx, uv.shrink()) {
+ walk_abstract_const(self.tcx, ct, |node| match node.root(self.tcx) {
+ Node::Leaf(leaf) => self.visit_const(leaf),
+ Node::Cast(_, _, ty) => self.visit_ty(ty),
+ Node::Binop(..) | Node::UnaryOp(..) | Node::FunctionCall(_, _) => {
+ ControlFlow::CONTINUE
+ }
+ })
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+
+ value
+ .visit_with(&mut IllegalSelfTypeVisitor { tcx, trait_def_id, supertraits: None })
+ .is_break()
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { object_safety_violations, ..*providers };
+}
diff --git a/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs b/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs
new file mode 100644
index 000000000..9227bbf01
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/on_unimplemented.rs
@@ -0,0 +1,425 @@
+use rustc_ast::{MetaItem, NestedMetaItem};
+use rustc_attr as attr;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{struct_span_err, ErrorGuaranteed};
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::{self, GenericParamDefKind, TyCtxt};
+use rustc_parse_format::{ParseMode, Parser, Piece, Position};
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+
+#[derive(Clone, Debug)]
+pub struct OnUnimplementedFormatString(Symbol);
+
+#[derive(Debug)]
+pub struct OnUnimplementedDirective {
+ pub condition: Option<MetaItem>,
+ pub subcommands: Vec<OnUnimplementedDirective>,
+ pub message: Option<OnUnimplementedFormatString>,
+ pub label: Option<OnUnimplementedFormatString>,
+ pub note: Option<OnUnimplementedFormatString>,
+ pub enclosing_scope: Option<OnUnimplementedFormatString>,
+ pub append_const_msg: Option<Option<Symbol>>,
+}
+
+#[derive(Default)]
+pub struct OnUnimplementedNote {
+ pub message: Option<String>,
+ pub label: Option<String>,
+ pub note: Option<String>,
+ pub enclosing_scope: Option<String>,
+ /// Append a message for `~const Trait` errors. `None` means not requested and
+ /// should fallback to a generic message, `Some(None)` suggests using the default
+ /// appended message, `Some(Some(s))` suggests use the `s` message instead of the
+ /// default one..
+ pub append_const_msg: Option<Option<Symbol>>,
+}
+
+fn parse_error(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ message: &str,
+ label: &str,
+ note: Option<&str>,
+) -> ErrorGuaranteed {
+ let mut diag = struct_span_err!(tcx.sess, span, E0232, "{}", message);
+ diag.span_label(span, label);
+ if let Some(note) = note {
+ diag.note(note);
+ }
+ diag.emit()
+}
+
+impl<'tcx> OnUnimplementedDirective {
+ fn parse(
+ tcx: TyCtxt<'tcx>,
+ item_def_id: DefId,
+ items: &[NestedMetaItem],
+ span: Span,
+ is_root: bool,
+ ) -> Result<Self, ErrorGuaranteed> {
+ let mut errored = None;
+ let mut item_iter = items.iter();
+
+ let parse_value = |value_str| {
+ OnUnimplementedFormatString::try_parse(tcx, item_def_id, value_str, span).map(Some)
+ };
+
+ let condition = if is_root {
+ None
+ } else {
+ let cond = item_iter
+ .next()
+ .ok_or_else(|| {
+ parse_error(
+ tcx,
+ span,
+ "empty `on`-clause in `#[rustc_on_unimplemented]`",
+ "empty on-clause here",
+ None,
+ )
+ })?
+ .meta_item()
+ .ok_or_else(|| {
+ parse_error(
+ tcx,
+ span,
+ "invalid `on`-clause in `#[rustc_on_unimplemented]`",
+ "invalid on-clause here",
+ None,
+ )
+ })?;
+ attr::eval_condition(cond, &tcx.sess.parse_sess, Some(tcx.features()), &mut |cfg| {
+ if let Some(value) = cfg.value && let Err(guar) = parse_value(value) {
+ errored = Some(guar);
+ }
+ true
+ });
+ Some(cond.clone())
+ };
+
+ let mut message = None;
+ let mut label = None;
+ let mut note = None;
+ let mut enclosing_scope = None;
+ let mut subcommands = vec![];
+ let mut append_const_msg = None;
+
+ for item in item_iter {
+ if item.has_name(sym::message) && message.is_none() {
+ if let Some(message_) = item.value_str() {
+ message = parse_value(message_)?;
+ continue;
+ }
+ } else if item.has_name(sym::label) && label.is_none() {
+ if let Some(label_) = item.value_str() {
+ label = parse_value(label_)?;
+ continue;
+ }
+ } else if item.has_name(sym::note) && note.is_none() {
+ if let Some(note_) = item.value_str() {
+ note = parse_value(note_)?;
+ continue;
+ }
+ } else if item.has_name(sym::enclosing_scope) && enclosing_scope.is_none() {
+ if let Some(enclosing_scope_) = item.value_str() {
+ enclosing_scope = parse_value(enclosing_scope_)?;
+ continue;
+ }
+ } else if item.has_name(sym::on)
+ && is_root
+ && message.is_none()
+ && label.is_none()
+ && note.is_none()
+ {
+ if let Some(items) = item.meta_item_list() {
+ match Self::parse(tcx, item_def_id, &items, item.span(), false) {
+ Ok(subcommand) => subcommands.push(subcommand),
+ Err(reported) => errored = Some(reported),
+ };
+ continue;
+ }
+ } else if item.has_name(sym::append_const_msg) && append_const_msg.is_none() {
+ if let Some(msg) = item.value_str() {
+ append_const_msg = Some(Some(msg));
+ continue;
+ } else if item.is_word() {
+ append_const_msg = Some(None);
+ continue;
+ }
+ }
+
+ // nothing found
+ parse_error(
+ tcx,
+ item.span(),
+ "this attribute must have a valid value",
+ "expected value here",
+ Some(r#"eg `#[rustc_on_unimplemented(message="foo")]`"#),
+ );
+ }
+
+ if let Some(reported) = errored {
+ Err(reported)
+ } else {
+ Ok(OnUnimplementedDirective {
+ condition,
+ subcommands,
+ message,
+ label,
+ note,
+ enclosing_scope,
+ append_const_msg,
+ })
+ }
+ }
+
+ pub fn of_item(tcx: TyCtxt<'tcx>, item_def_id: DefId) -> Result<Option<Self>, ErrorGuaranteed> {
+ let Some(attr) = tcx.get_attr(item_def_id, sym::rustc_on_unimplemented) else {
+ return Ok(None);
+ };
+
+ let result = if let Some(items) = attr.meta_item_list() {
+ Self::parse(tcx, item_def_id, &items, attr.span, true).map(Some)
+ } else if let Some(value) = attr.value_str() {
+ Ok(Some(OnUnimplementedDirective {
+ condition: None,
+ message: None,
+ subcommands: vec![],
+ label: Some(OnUnimplementedFormatString::try_parse(
+ tcx,
+ item_def_id,
+ value,
+ attr.span,
+ )?),
+ note: None,
+ enclosing_scope: None,
+ append_const_msg: None,
+ }))
+ } else {
+ let reported =
+ tcx.sess.delay_span_bug(DUMMY_SP, "of_item: neither meta_item_list nor value_str");
+ return Err(reported);
+ };
+ debug!("of_item({:?}) = {:?}", item_def_id, result);
+ result
+ }
+
+ pub fn evaluate(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ options: &[(Symbol, Option<String>)],
+ ) -> OnUnimplementedNote {
+ let mut message = None;
+ let mut label = None;
+ let mut note = None;
+ let mut enclosing_scope = None;
+ let mut append_const_msg = None;
+ info!("evaluate({:?}, trait_ref={:?}, options={:?})", self, trait_ref, options);
+
+ let options_map: FxHashMap<Symbol, String> =
+ options.iter().filter_map(|(k, v)| v.as_ref().map(|v| (*k, v.to_owned()))).collect();
+
+ for command in self.subcommands.iter().chain(Some(self)).rev() {
+ if let Some(ref condition) = command.condition && !attr::eval_condition(
+ condition,
+ &tcx.sess.parse_sess,
+ Some(tcx.features()),
+ &mut |cfg| {
+ let value = cfg.value.map(|v| {
+ OnUnimplementedFormatString(v).format(tcx, trait_ref, &options_map)
+ });
+
+ options.contains(&(cfg.name, value))
+ },
+ ) {
+ debug!("evaluate: skipping {:?} due to condition", command);
+ continue;
+ }
+ debug!("evaluate: {:?} succeeded", command);
+ if let Some(ref message_) = command.message {
+ message = Some(message_.clone());
+ }
+
+ if let Some(ref label_) = command.label {
+ label = Some(label_.clone());
+ }
+
+ if let Some(ref note_) = command.note {
+ note = Some(note_.clone());
+ }
+
+ if let Some(ref enclosing_scope_) = command.enclosing_scope {
+ enclosing_scope = Some(enclosing_scope_.clone());
+ }
+
+ append_const_msg = command.append_const_msg;
+ }
+
+ OnUnimplementedNote {
+ label: label.map(|l| l.format(tcx, trait_ref, &options_map)),
+ message: message.map(|m| m.format(tcx, trait_ref, &options_map)),
+ note: note.map(|n| n.format(tcx, trait_ref, &options_map)),
+ enclosing_scope: enclosing_scope.map(|e_s| e_s.format(tcx, trait_ref, &options_map)),
+ append_const_msg,
+ }
+ }
+}
+
+impl<'tcx> OnUnimplementedFormatString {
+ fn try_parse(
+ tcx: TyCtxt<'tcx>,
+ item_def_id: DefId,
+ from: Symbol,
+ err_sp: Span,
+ ) -> Result<Self, ErrorGuaranteed> {
+ let result = OnUnimplementedFormatString(from);
+ result.verify(tcx, item_def_id, err_sp)?;
+ Ok(result)
+ }
+
+ fn verify(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ item_def_id: DefId,
+ span: Span,
+ ) -> Result<(), ErrorGuaranteed> {
+ let trait_def_id = if tcx.is_trait(item_def_id) {
+ item_def_id
+ } else {
+ tcx.trait_id_of_impl(item_def_id)
+ .expect("expected `on_unimplemented` to correspond to a trait")
+ };
+ let trait_name = tcx.item_name(trait_def_id);
+ let generics = tcx.generics_of(item_def_id);
+ let s = self.0.as_str();
+ let parser = Parser::new(s, None, None, false, ParseMode::Format);
+ let mut result = Ok(());
+ for token in parser {
+ match token {
+ Piece::String(_) => (), // Normal string, no need to check it
+ Piece::NextArgument(a) => match a.position {
+ Position::ArgumentNamed(s) => {
+ match Symbol::intern(s) {
+ // `{Self}` is allowed
+ kw::SelfUpper => (),
+ // `{ThisTraitsName}` is allowed
+ s if s == trait_name => (),
+ // `{from_method}` is allowed
+ sym::from_method => (),
+ // `{from_desugaring}` is allowed
+ sym::from_desugaring => (),
+ // `{ItemContext}` is allowed
+ sym::ItemContext => (),
+ // `{integral}` and `{integer}` and `{float}` are allowed
+ sym::integral | sym::integer_ | sym::float => (),
+ // So is `{A}` if A is a type parameter
+ s => match generics.params.iter().find(|param| param.name == s) {
+ Some(_) => (),
+ None => {
+ let reported = struct_span_err!(
+ tcx.sess,
+ span,
+ E0230,
+ "there is no parameter `{}` on {}",
+ s,
+ if trait_def_id == item_def_id {
+ format!("trait `{}`", trait_name)
+ } else {
+ "impl".to_string()
+ }
+ )
+ .emit();
+ result = Err(reported);
+ }
+ },
+ }
+ }
+ // `{:1}` and `{}` are not to be used
+ Position::ArgumentIs(..) | Position::ArgumentImplicitlyIs(_) => {
+ let reported = struct_span_err!(
+ tcx.sess,
+ span,
+ E0231,
+ "only named substitution parameters are allowed"
+ )
+ .emit();
+ result = Err(reported);
+ }
+ },
+ }
+ }
+
+ result
+ }
+
+ pub fn format(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ options: &FxHashMap<Symbol, String>,
+ ) -> String {
+ let name = tcx.item_name(trait_ref.def_id);
+ let trait_str = tcx.def_path_str(trait_ref.def_id);
+ let generics = tcx.generics_of(trait_ref.def_id);
+ let generic_map = generics
+ .params
+ .iter()
+ .filter_map(|param| {
+ let value = match param.kind {
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
+ trait_ref.substs[param.index as usize].to_string()
+ }
+ GenericParamDefKind::Lifetime => return None,
+ };
+ let name = param.name;
+ Some((name, value))
+ })
+ .collect::<FxHashMap<Symbol, String>>();
+ let empty_string = String::new();
+
+ let s = self.0.as_str();
+ let parser = Parser::new(s, None, None, false, ParseMode::Format);
+ let item_context = (options.get(&sym::ItemContext)).unwrap_or(&empty_string);
+ parser
+ .map(|p| match p {
+ Piece::String(s) => s,
+ Piece::NextArgument(a) => match a.position {
+ Position::ArgumentNamed(s) => {
+ let s = Symbol::intern(s);
+ match generic_map.get(&s) {
+ Some(val) => val,
+ None if s == name => &trait_str,
+ None => {
+ if let Some(val) = options.get(&s) {
+ val
+ } else if s == sym::from_desugaring || s == sym::from_method {
+ // don't break messages using these two arguments incorrectly
+ &empty_string
+ } else if s == sym::ItemContext {
+ &item_context
+ } else if s == sym::integral {
+ "{integral}"
+ } else if s == sym::integer_ {
+ "{integer}"
+ } else if s == sym::float {
+ "{float}"
+ } else {
+ bug!(
+ "broken on_unimplemented {:?} for {:?}: \
+ no argument matching {:?}",
+ self.0,
+ trait_ref,
+ s
+ )
+ }
+ }
+ }
+ }
+ _ => bug!("broken on_unimplemented {:?} - bad format arg", self.0),
+ },
+ })
+ .collect()
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs
new file mode 100644
index 000000000..c4e80e1ba
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/project.rs
@@ -0,0 +1,2150 @@
+//! Code for projecting associated types out of trait references.
+
+use super::specialization_graph;
+use super::translate_substs;
+use super::util;
+use super::MismatchedProjectionTypes;
+use super::Obligation;
+use super::ObligationCause;
+use super::PredicateObligation;
+use super::Selection;
+use super::SelectionContext;
+use super::SelectionError;
+use super::{
+ ImplSourceClosureData, ImplSourceDiscriminantKindData, ImplSourceFnPointerData,
+ ImplSourceGeneratorData, ImplSourcePointeeData, ImplSourceUserDefinedData,
+};
+use super::{Normalized, NormalizedTy, ProjectionCacheEntry, ProjectionCacheKey};
+
+use crate::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use crate::infer::{InferCtxt, InferOk, LateBoundRegionConversionTime};
+use crate::traits::error_reporting::InferCtxtExt as _;
+use crate::traits::query::evaluate_obligation::InferCtxtExt as _;
+use crate::traits::select::ProjectionMatchesProjection;
+use rustc_data_structures::sso::SsoHashSet;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_infer::infer::resolve::OpportunisticRegionResolver;
+use rustc_middle::traits::select::OverflowError;
+use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::visit::{MaxUniverse, TypeVisitable};
+use rustc_middle::ty::{self, Term, ToPredicate, Ty, TyCtxt};
+use rustc_span::symbol::sym;
+
+use std::collections::BTreeMap;
+
+pub use rustc_middle::traits::Reveal;
+
+pub type PolyProjectionObligation<'tcx> = Obligation<'tcx, ty::PolyProjectionPredicate<'tcx>>;
+
+pub type ProjectionObligation<'tcx> = Obligation<'tcx, ty::ProjectionPredicate<'tcx>>;
+
+pub type ProjectionTyObligation<'tcx> = Obligation<'tcx, ty::ProjectionTy<'tcx>>;
+
+pub(super) struct InProgress;
+
+/// When attempting to resolve `<T as TraitRef>::Name` ...
+#[derive(Debug)]
+pub enum ProjectionError<'tcx> {
+ /// ...we found multiple sources of information and couldn't resolve the ambiguity.
+ TooManyCandidates,
+
+ /// ...an error occurred matching `T : TraitRef`
+ TraitSelectionError(SelectionError<'tcx>),
+}
+
+#[derive(PartialEq, Eq, Debug)]
+enum ProjectionCandidate<'tcx> {
+ /// From a where-clause in the env or object type
+ ParamEnv(ty::PolyProjectionPredicate<'tcx>),
+
+ /// From the definition of `Trait` when you have something like <<A as Trait>::B as Trait2>::C
+ TraitDef(ty::PolyProjectionPredicate<'tcx>),
+
+ /// Bounds specified on an object type
+ Object(ty::PolyProjectionPredicate<'tcx>),
+
+ /// From an "impl" (or a "pseudo-impl" returned by select)
+ Select(Selection<'tcx>),
+}
+
+enum ProjectionCandidateSet<'tcx> {
+ None,
+ Single(ProjectionCandidate<'tcx>),
+ Ambiguous,
+ Error(SelectionError<'tcx>),
+}
+
+impl<'tcx> ProjectionCandidateSet<'tcx> {
+ fn mark_ambiguous(&mut self) {
+ *self = ProjectionCandidateSet::Ambiguous;
+ }
+
+ fn mark_error(&mut self, err: SelectionError<'tcx>) {
+ *self = ProjectionCandidateSet::Error(err);
+ }
+
+ // Returns true if the push was successful, or false if the candidate
+ // was discarded -- this could be because of ambiguity, or because
+ // a higher-priority candidate is already there.
+ fn push_candidate(&mut self, candidate: ProjectionCandidate<'tcx>) -> bool {
+ use self::ProjectionCandidate::*;
+ use self::ProjectionCandidateSet::*;
+
+ // This wacky variable is just used to try and
+ // make code readable and avoid confusing paths.
+ // It is assigned a "value" of `()` only on those
+ // paths in which we wish to convert `*self` to
+ // ambiguous (and return false, because the candidate
+ // was not used). On other paths, it is not assigned,
+ // and hence if those paths *could* reach the code that
+ // comes after the match, this fn would not compile.
+ let convert_to_ambiguous;
+
+ match self {
+ None => {
+ *self = Single(candidate);
+ return true;
+ }
+
+ Single(current) => {
+ // Duplicates can happen inside ParamEnv. In the case, we
+ // perform a lazy deduplication.
+ if current == &candidate {
+ return false;
+ }
+
+ // Prefer where-clauses. As in select, if there are multiple
+ // candidates, we prefer where-clause candidates over impls. This
+ // may seem a bit surprising, since impls are the source of
+ // "truth" in some sense, but in fact some of the impls that SEEM
+ // applicable are not, because of nested obligations. Where
+ // clauses are the safer choice. See the comment on
+ // `select::SelectionCandidate` and #21974 for more details.
+ match (current, candidate) {
+ (ParamEnv(..), ParamEnv(..)) => convert_to_ambiguous = (),
+ (ParamEnv(..), _) => return false,
+ (_, ParamEnv(..)) => unreachable!(),
+ (_, _) => convert_to_ambiguous = (),
+ }
+ }
+
+ Ambiguous | Error(..) => {
+ return false;
+ }
+ }
+
+ // We only ever get here when we moved from a single candidate
+ // to ambiguous.
+ let () = convert_to_ambiguous;
+ *self = Ambiguous;
+ false
+ }
+}
+
+/// States returned from `poly_project_and_unify_type`. Takes the place
+/// of the old return type, which was:
+/// ```ignore (not-rust)
+/// Result<
+/// Result<Option<Vec<PredicateObligation<'tcx>>>, InProgress>,
+/// MismatchedProjectionTypes<'tcx>,
+/// >
+/// ```
+pub(super) enum ProjectAndUnifyResult<'tcx> {
+ /// The projection bound holds subject to the given obligations. If the
+ /// projection cannot be normalized because the required trait bound does
+ /// not hold, this is returned, with `obligations` being a predicate that
+ /// cannot be proven.
+ Holds(Vec<PredicateObligation<'tcx>>),
+ /// The projection cannot be normalized due to ambiguity. Resolving some
+ /// inference variables in the projection may fix this.
+ FailedNormalization,
+ /// The project cannot be normalized because `poly_project_and_unify_type`
+ /// is called recursively while normalizing the same projection.
+ Recursive,
+ // the projection can be normalized, but is not equal to the expected type.
+ // Returns the type error that arose from the mismatch.
+ MismatchedProjectionTypes(MismatchedProjectionTypes<'tcx>),
+}
+
+/// Evaluates constraints of the form:
+/// ```ignore (not-rust)
+/// for<...> <T as Trait>::U == V
+/// ```
+/// If successful, this may result in additional obligations. Also returns
+/// the projection cache key used to track these additional obligations.
+#[instrument(level = "debug", skip(selcx))]
+pub(super) fn poly_project_and_unify_type<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &PolyProjectionObligation<'tcx>,
+) -> ProjectAndUnifyResult<'tcx> {
+ let infcx = selcx.infcx();
+ let r = infcx.commit_if_ok(|_snapshot| {
+ let old_universe = infcx.universe();
+ let placeholder_predicate =
+ infcx.replace_bound_vars_with_placeholders(obligation.predicate);
+ let new_universe = infcx.universe();
+
+ let placeholder_obligation = obligation.with(placeholder_predicate);
+ match project_and_unify_type(selcx, &placeholder_obligation) {
+ ProjectAndUnifyResult::MismatchedProjectionTypes(e) => Err(e),
+ ProjectAndUnifyResult::Holds(obligations)
+ if old_universe != new_universe
+ && selcx.tcx().features().generic_associated_types_extended =>
+ {
+ // If the `generic_associated_types_extended` feature is active, then we ignore any
+ // obligations references lifetimes from any universe greater than or equal to the
+ // universe just created. Otherwise, we can end up with something like `for<'a> I: 'a`,
+ // which isn't quite what we want. Ideally, we want either an implied
+ // `for<'a where I: 'a> I: 'a` or we want to "lazily" check these hold when we
+ // substitute concrete regions. There is design work to be done here; until then,
+ // however, this allows experimenting potential GAT features without running into
+ // well-formedness issues.
+ let new_obligations = obligations
+ .into_iter()
+ .filter(|obligation| {
+ let mut visitor = MaxUniverse::new();
+ obligation.predicate.visit_with(&mut visitor);
+ visitor.max_universe() < new_universe
+ })
+ .collect();
+ Ok(ProjectAndUnifyResult::Holds(new_obligations))
+ }
+ other => Ok(other),
+ }
+ });
+
+ match r {
+ Ok(inner) => inner,
+ Err(err) => ProjectAndUnifyResult::MismatchedProjectionTypes(err),
+ }
+}
+
+/// Evaluates constraints of the form:
+/// ```ignore (not-rust)
+/// <T as Trait>::U == V
+/// ```
+/// If successful, this may result in additional obligations.
+///
+/// See [poly_project_and_unify_type] for an explanation of the return value.
+#[tracing::instrument(level = "debug", skip(selcx))]
+fn project_and_unify_type<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionObligation<'tcx>,
+) -> ProjectAndUnifyResult<'tcx> {
+ let mut obligations = vec![];
+
+ let infcx = selcx.infcx();
+ let normalized = match opt_normalize_projection_type(
+ selcx,
+ obligation.param_env,
+ obligation.predicate.projection_ty,
+ obligation.cause.clone(),
+ obligation.recursion_depth,
+ &mut obligations,
+ ) {
+ Ok(Some(n)) => n,
+ Ok(None) => return ProjectAndUnifyResult::FailedNormalization,
+ Err(InProgress) => return ProjectAndUnifyResult::Recursive,
+ };
+ debug!(?normalized, ?obligations, "project_and_unify_type result");
+ let actual = obligation.predicate.term;
+ // For an example where this is neccessary see src/test/ui/impl-trait/nested-return-type2.rs
+ // This allows users to omit re-mentioning all bounds on an associated type and just use an
+ // `impl Trait` for the assoc type to add more bounds.
+ let InferOk { value: actual, obligations: new } =
+ selcx.infcx().replace_opaque_types_with_inference_vars(
+ actual,
+ obligation.cause.body_id,
+ obligation.cause.span,
+ obligation.param_env,
+ );
+ obligations.extend(new);
+
+ match infcx.at(&obligation.cause, obligation.param_env).eq(normalized, actual) {
+ Ok(InferOk { obligations: inferred_obligations, value: () }) => {
+ obligations.extend(inferred_obligations);
+ ProjectAndUnifyResult::Holds(obligations)
+ }
+ Err(err) => {
+ debug!("equating types encountered error {:?}", err);
+ ProjectAndUnifyResult::MismatchedProjectionTypes(MismatchedProjectionTypes { err })
+ }
+ }
+}
+
+/// Normalizes any associated type projections in `value`, replacing
+/// them with a fully resolved type where possible. The return value
+/// combines the normalized result and any additional obligations that
+/// were incurred as result.
+pub fn normalize<'a, 'b, 'tcx, T>(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ value: T,
+) -> Normalized<'tcx, T>
+where
+ T: TypeFoldable<'tcx>,
+{
+ let mut obligations = Vec::new();
+ let value = normalize_to(selcx, param_env, cause, value, &mut obligations);
+ Normalized { value, obligations }
+}
+
+pub fn normalize_to<'a, 'b, 'tcx, T>(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ value: T,
+ obligations: &mut Vec<PredicateObligation<'tcx>>,
+) -> T
+where
+ T: TypeFoldable<'tcx>,
+{
+ normalize_with_depth_to(selcx, param_env, cause, 0, value, obligations)
+}
+
+/// As `normalize`, but with a custom depth.
+pub fn normalize_with_depth<'a, 'b, 'tcx, T>(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+ value: T,
+) -> Normalized<'tcx, T>
+where
+ T: TypeFoldable<'tcx>,
+{
+ let mut obligations = Vec::new();
+ let value = normalize_with_depth_to(selcx, param_env, cause, depth, value, &mut obligations);
+ Normalized { value, obligations }
+}
+
+#[instrument(level = "info", skip(selcx, param_env, cause, obligations))]
+pub fn normalize_with_depth_to<'a, 'b, 'tcx, T>(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+ value: T,
+ obligations: &mut Vec<PredicateObligation<'tcx>>,
+) -> T
+where
+ T: TypeFoldable<'tcx>,
+{
+ debug!(obligations.len = obligations.len());
+ let mut normalizer = AssocTypeNormalizer::new(selcx, param_env, cause, depth, obligations);
+ let result = ensure_sufficient_stack(|| normalizer.fold(value));
+ debug!(?result, obligations.len = normalizer.obligations.len());
+ debug!(?normalizer.obligations,);
+ result
+}
+
+#[instrument(level = "info", skip(selcx, param_env, cause, obligations))]
+pub fn try_normalize_with_depth_to<'a, 'b, 'tcx, T>(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+ value: T,
+ obligations: &mut Vec<PredicateObligation<'tcx>>,
+) -> T
+where
+ T: TypeFoldable<'tcx>,
+{
+ debug!(obligations.len = obligations.len());
+ let mut normalizer = AssocTypeNormalizer::new_without_eager_inference_replacement(
+ selcx,
+ param_env,
+ cause,
+ depth,
+ obligations,
+ );
+ let result = ensure_sufficient_stack(|| normalizer.fold(value));
+ debug!(?result, obligations.len = normalizer.obligations.len());
+ debug!(?normalizer.obligations,);
+ result
+}
+
+pub(crate) fn needs_normalization<'tcx, T: TypeVisitable<'tcx>>(value: &T, reveal: Reveal) -> bool {
+ match reveal {
+ Reveal::UserFacing => value
+ .has_type_flags(ty::TypeFlags::HAS_TY_PROJECTION | ty::TypeFlags::HAS_CT_PROJECTION),
+ Reveal::All => value.has_type_flags(
+ ty::TypeFlags::HAS_TY_PROJECTION
+ | ty::TypeFlags::HAS_TY_OPAQUE
+ | ty::TypeFlags::HAS_CT_PROJECTION,
+ ),
+ }
+}
+
+struct AssocTypeNormalizer<'a, 'b, 'tcx> {
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ obligations: &'a mut Vec<PredicateObligation<'tcx>>,
+ depth: usize,
+ universes: Vec<Option<ty::UniverseIndex>>,
+ /// If true, when a projection is unable to be completed, an inference
+ /// variable will be created and an obligation registered to project to that
+ /// inference variable. Also, constants will be eagerly evaluated.
+ eager_inference_replacement: bool,
+}
+
+impl<'a, 'b, 'tcx> AssocTypeNormalizer<'a, 'b, 'tcx> {
+ fn new(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+ obligations: &'a mut Vec<PredicateObligation<'tcx>>,
+ ) -> AssocTypeNormalizer<'a, 'b, 'tcx> {
+ AssocTypeNormalizer {
+ selcx,
+ param_env,
+ cause,
+ obligations,
+ depth,
+ universes: vec![],
+ eager_inference_replacement: true,
+ }
+ }
+
+ fn new_without_eager_inference_replacement(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+ obligations: &'a mut Vec<PredicateObligation<'tcx>>,
+ ) -> AssocTypeNormalizer<'a, 'b, 'tcx> {
+ AssocTypeNormalizer {
+ selcx,
+ param_env,
+ cause,
+ obligations,
+ depth,
+ universes: vec![],
+ eager_inference_replacement: false,
+ }
+ }
+
+ fn fold<T: TypeFoldable<'tcx>>(&mut self, value: T) -> T {
+ let value = self.selcx.infcx().resolve_vars_if_possible(value);
+ debug!(?value);
+
+ assert!(
+ !value.has_escaping_bound_vars(),
+ "Normalizing {:?} without wrapping in a `Binder`",
+ value
+ );
+
+ if !needs_normalization(&value, self.param_env.reveal()) {
+ value
+ } else {
+ value.fold_with(self)
+ }
+ }
+}
+
+impl<'a, 'b, 'tcx> TypeFolder<'tcx> for AssocTypeNormalizer<'a, 'b, 'tcx> {
+ fn tcx<'c>(&'c self) -> TyCtxt<'tcx> {
+ self.selcx.tcx()
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.universes.push(None);
+ let t = t.super_fold_with(self);
+ self.universes.pop();
+ t
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if !needs_normalization(&ty, self.param_env.reveal()) {
+ return ty;
+ }
+
+ // We try to be a little clever here as a performance optimization in
+ // cases where there are nested projections under binders.
+ // For example:
+ // ```
+ // for<'a> fn(<T as Foo>::One<'a, Box<dyn Bar<'a, Item=<T as Foo>::Two<'a>>>>)
+ // ```
+ // We normalize the substs on the projection before the projecting, but
+ // if we're naive, we'll
+ // replace bound vars on inner, project inner, replace placeholders on inner,
+ // replace bound vars on outer, project outer, replace placeholders on outer
+ //
+ // However, if we're a bit more clever, we can replace the bound vars
+ // on the entire type before normalizing nested projections, meaning we
+ // replace bound vars on outer, project inner,
+ // project outer, replace placeholders on outer
+ //
+ // This is possible because the inner `'a` will already be a placeholder
+ // when we need to normalize the inner projection
+ //
+ // On the other hand, this does add a bit of complexity, since we only
+ // replace bound vars if the current type is a `Projection` and we need
+ // to make sure we don't forget to fold the substs regardless.
+
+ match *ty.kind() {
+ // This is really important. While we *can* handle this, this has
+ // severe performance implications for large opaque types with
+ // late-bound regions. See `issue-88862` benchmark.
+ ty::Opaque(def_id, substs) if !substs.has_escaping_bound_vars() => {
+ // Only normalize `impl Trait` outside of type inference, usually in codegen.
+ match self.param_env.reveal() {
+ Reveal::UserFacing => ty.super_fold_with(self),
+
+ Reveal::All => {
+ let recursion_limit = self.tcx().recursion_limit();
+ if !recursion_limit.value_within_limit(self.depth) {
+ let obligation = Obligation::with_depth(
+ self.cause.clone(),
+ recursion_limit.0,
+ self.param_env,
+ ty,
+ );
+ self.selcx.infcx().report_overflow_error(&obligation, true);
+ }
+
+ let substs = substs.fold_with(self);
+ let generic_ty = self.tcx().bound_type_of(def_id);
+ let concrete_ty = generic_ty.subst(self.tcx(), substs);
+ self.depth += 1;
+ let folded_ty = self.fold_ty(concrete_ty);
+ self.depth -= 1;
+ folded_ty
+ }
+ }
+ }
+
+ ty::Projection(data) if !data.has_escaping_bound_vars() => {
+ // This branch is *mostly* just an optimization: when we don't
+ // have escaping bound vars, we don't need to replace them with
+ // placeholders (see branch below). *Also*, we know that we can
+ // register an obligation to *later* project, since we know
+ // there won't be bound vars there.
+ let data = data.fold_with(self);
+ let normalized_ty = if self.eager_inference_replacement {
+ normalize_projection_type(
+ self.selcx,
+ self.param_env,
+ data,
+ self.cause.clone(),
+ self.depth,
+ &mut self.obligations,
+ )
+ } else {
+ opt_normalize_projection_type(
+ self.selcx,
+ self.param_env,
+ data,
+ self.cause.clone(),
+ self.depth,
+ &mut self.obligations,
+ )
+ .ok()
+ .flatten()
+ .unwrap_or_else(|| ty::Term::Ty(ty.super_fold_with(self)))
+ };
+ debug!(
+ ?self.depth,
+ ?ty,
+ ?normalized_ty,
+ obligations.len = ?self.obligations.len(),
+ "AssocTypeNormalizer: normalized type"
+ );
+ normalized_ty.ty().unwrap()
+ }
+
+ ty::Projection(data) => {
+ // If there are escaping bound vars, we temporarily replace the
+ // bound vars with placeholders. Note though, that in the case
+ // that we still can't project for whatever reason (e.g. self
+ // type isn't known enough), we *can't* register an obligation
+ // and return an inference variable (since then that obligation
+ // would have bound vars and that's a can of worms). Instead,
+ // we just give up and fall back to pretending like we never tried!
+ //
+ // Note: this isn't necessarily the final approach here; we may
+ // want to figure out how to register obligations with escaping vars
+ // or handle this some other way.
+
+ let infcx = self.selcx.infcx();
+ let (data, mapped_regions, mapped_types, mapped_consts) =
+ BoundVarReplacer::replace_bound_vars(infcx, &mut self.universes, data);
+ let data = data.fold_with(self);
+ let normalized_ty = opt_normalize_projection_type(
+ self.selcx,
+ self.param_env,
+ data,
+ self.cause.clone(),
+ self.depth,
+ &mut self.obligations,
+ )
+ .ok()
+ .flatten()
+ .map(|term| term.ty().unwrap())
+ .map(|normalized_ty| {
+ PlaceholderReplacer::replace_placeholders(
+ infcx,
+ mapped_regions,
+ mapped_types,
+ mapped_consts,
+ &self.universes,
+ normalized_ty,
+ )
+ })
+ .unwrap_or_else(|| ty.super_fold_with(self));
+
+ debug!(
+ ?self.depth,
+ ?ty,
+ ?normalized_ty,
+ obligations.len = ?self.obligations.len(),
+ "AssocTypeNormalizer: normalized type"
+ );
+ normalized_ty
+ }
+
+ _ => ty.super_fold_with(self),
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn fold_const(&mut self, constant: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if self.selcx.tcx().lazy_normalization() || !self.eager_inference_replacement {
+ constant
+ } else {
+ let constant = constant.super_fold_with(self);
+ debug!(?constant);
+ debug!("self.param_env: {:?}", self.param_env);
+ constant.eval(self.selcx.tcx(), self.param_env)
+ }
+ }
+}
+
+pub struct BoundVarReplacer<'me, 'tcx> {
+ infcx: &'me InferCtxt<'me, 'tcx>,
+ // These three maps track the bound variable that were replaced by placeholders. It might be
+ // nice to remove these since we already have the `kind` in the placeholder; we really just need
+ // the `var` (but we *could* bring that into scope if we were to track them as we pass them).
+ mapped_regions: BTreeMap<ty::PlaceholderRegion, ty::BoundRegion>,
+ mapped_types: BTreeMap<ty::PlaceholderType, ty::BoundTy>,
+ mapped_consts: BTreeMap<ty::PlaceholderConst<'tcx>, ty::BoundVar>,
+ // The current depth relative to *this* folding, *not* the entire normalization. In other words,
+ // the depth of binders we've passed here.
+ current_index: ty::DebruijnIndex,
+ // The `UniverseIndex` of the binding levels above us. These are optional, since we are lazy:
+ // we don't actually create a universe until we see a bound var we have to replace.
+ universe_indices: &'me mut Vec<Option<ty::UniverseIndex>>,
+}
+
+impl<'me, 'tcx> BoundVarReplacer<'me, 'tcx> {
+ /// Returns `Some` if we *were* able to replace bound vars. If there are any bound vars that
+ /// use a binding level above `universe_indices.len()`, we fail.
+ pub fn replace_bound_vars<T: TypeFoldable<'tcx>>(
+ infcx: &'me InferCtxt<'me, 'tcx>,
+ universe_indices: &'me mut Vec<Option<ty::UniverseIndex>>,
+ value: T,
+ ) -> (
+ T,
+ BTreeMap<ty::PlaceholderRegion, ty::BoundRegion>,
+ BTreeMap<ty::PlaceholderType, ty::BoundTy>,
+ BTreeMap<ty::PlaceholderConst<'tcx>, ty::BoundVar>,
+ ) {
+ let mapped_regions: BTreeMap<ty::PlaceholderRegion, ty::BoundRegion> = BTreeMap::new();
+ let mapped_types: BTreeMap<ty::PlaceholderType, ty::BoundTy> = BTreeMap::new();
+ let mapped_consts: BTreeMap<ty::PlaceholderConst<'tcx>, ty::BoundVar> = BTreeMap::new();
+
+ let mut replacer = BoundVarReplacer {
+ infcx,
+ mapped_regions,
+ mapped_types,
+ mapped_consts,
+ current_index: ty::INNERMOST,
+ universe_indices,
+ };
+
+ let value = value.fold_with(&mut replacer);
+
+ (value, replacer.mapped_regions, replacer.mapped_types, replacer.mapped_consts)
+ }
+
+ fn universe_for(&mut self, debruijn: ty::DebruijnIndex) -> ty::UniverseIndex {
+ let infcx = self.infcx;
+ let index =
+ self.universe_indices.len() + self.current_index.as_usize() - debruijn.as_usize() - 1;
+ let universe = self.universe_indices[index].unwrap_or_else(|| {
+ for i in self.universe_indices.iter_mut().take(index + 1) {
+ *i = i.or_else(|| Some(infcx.create_next_universe()))
+ }
+ self.universe_indices[index].unwrap()
+ });
+ universe
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for BoundVarReplacer<'_, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(debruijn, _)
+ if debruijn.as_usize() + 1
+ > self.current_index.as_usize() + self.universe_indices.len() =>
+ {
+ bug!("Bound vars outside of `self.universe_indices`");
+ }
+ ty::ReLateBound(debruijn, br) if debruijn >= self.current_index => {
+ let universe = self.universe_for(debruijn);
+ let p = ty::PlaceholderRegion { universe, name: br.kind };
+ self.mapped_regions.insert(p, br);
+ self.infcx.tcx.mk_region(ty::RePlaceholder(p))
+ }
+ _ => r,
+ }
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ ty::Bound(debruijn, _)
+ if debruijn.as_usize() + 1
+ > self.current_index.as_usize() + self.universe_indices.len() =>
+ {
+ bug!("Bound vars outside of `self.universe_indices`");
+ }
+ ty::Bound(debruijn, bound_ty) if debruijn >= self.current_index => {
+ let universe = self.universe_for(debruijn);
+ let p = ty::PlaceholderType { universe, name: bound_ty.var };
+ self.mapped_types.insert(p, bound_ty);
+ self.infcx.tcx.mk_ty(ty::Placeholder(p))
+ }
+ _ if t.has_vars_bound_at_or_above(self.current_index) => t.super_fold_with(self),
+ _ => t,
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ match ct.kind() {
+ ty::ConstKind::Bound(debruijn, _)
+ if debruijn.as_usize() + 1
+ > self.current_index.as_usize() + self.universe_indices.len() =>
+ {
+ bug!("Bound vars outside of `self.universe_indices`");
+ }
+ ty::ConstKind::Bound(debruijn, bound_const) if debruijn >= self.current_index => {
+ let universe = self.universe_for(debruijn);
+ let p = ty::PlaceholderConst { universe, name: bound_const };
+ self.mapped_consts.insert(p, bound_const);
+ self.infcx
+ .tcx
+ .mk_const(ty::ConstS { kind: ty::ConstKind::Placeholder(p), ty: ct.ty() })
+ }
+ _ => ct.super_fold_with(self),
+ }
+ }
+
+ fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
+ if p.has_vars_bound_at_or_above(self.current_index) { p.super_fold_with(self) } else { p }
+ }
+}
+
+// The inverse of `BoundVarReplacer`: replaces placeholders with the bound vars from which they came.
+pub struct PlaceholderReplacer<'me, 'tcx> {
+ infcx: &'me InferCtxt<'me, 'tcx>,
+ mapped_regions: BTreeMap<ty::PlaceholderRegion, ty::BoundRegion>,
+ mapped_types: BTreeMap<ty::PlaceholderType, ty::BoundTy>,
+ mapped_consts: BTreeMap<ty::PlaceholderConst<'tcx>, ty::BoundVar>,
+ universe_indices: &'me [Option<ty::UniverseIndex>],
+ current_index: ty::DebruijnIndex,
+}
+
+impl<'me, 'tcx> PlaceholderReplacer<'me, 'tcx> {
+ pub fn replace_placeholders<T: TypeFoldable<'tcx>>(
+ infcx: &'me InferCtxt<'me, 'tcx>,
+ mapped_regions: BTreeMap<ty::PlaceholderRegion, ty::BoundRegion>,
+ mapped_types: BTreeMap<ty::PlaceholderType, ty::BoundTy>,
+ mapped_consts: BTreeMap<ty::PlaceholderConst<'tcx>, ty::BoundVar>,
+ universe_indices: &'me [Option<ty::UniverseIndex>],
+ value: T,
+ ) -> T {
+ let mut replacer = PlaceholderReplacer {
+ infcx,
+ mapped_regions,
+ mapped_types,
+ mapped_consts,
+ universe_indices,
+ current_index: ty::INNERMOST,
+ };
+ value.fold_with(&mut replacer)
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for PlaceholderReplacer<'_, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ if !t.has_placeholders() && !t.has_infer_regions() {
+ return t;
+ }
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_region(&mut self, r0: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ let r1 = match *r0 {
+ ty::ReVar(_) => self
+ .infcx
+ .inner
+ .borrow_mut()
+ .unwrap_region_constraints()
+ .opportunistic_resolve_region(self.infcx.tcx, r0),
+ _ => r0,
+ };
+
+ let r2 = match *r1 {
+ ty::RePlaceholder(p) => {
+ let replace_var = self.mapped_regions.get(&p);
+ match replace_var {
+ Some(replace_var) => {
+ let index = self
+ .universe_indices
+ .iter()
+ .position(|u| matches!(u, Some(pu) if *pu == p.universe))
+ .unwrap_or_else(|| bug!("Unexpected placeholder universe."));
+ let db = ty::DebruijnIndex::from_usize(
+ self.universe_indices.len() - index + self.current_index.as_usize() - 1,
+ );
+ self.tcx().mk_region(ty::ReLateBound(db, *replace_var))
+ }
+ None => r1,
+ }
+ }
+ _ => r1,
+ };
+
+ debug!(?r0, ?r1, ?r2, "fold_region");
+
+ r2
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match *ty.kind() {
+ ty::Placeholder(p) => {
+ let replace_var = self.mapped_types.get(&p);
+ match replace_var {
+ Some(replace_var) => {
+ let index = self
+ .universe_indices
+ .iter()
+ .position(|u| matches!(u, Some(pu) if *pu == p.universe))
+ .unwrap_or_else(|| bug!("Unexpected placeholder universe."));
+ let db = ty::DebruijnIndex::from_usize(
+ self.universe_indices.len() - index + self.current_index.as_usize() - 1,
+ );
+ self.tcx().mk_ty(ty::Bound(db, *replace_var))
+ }
+ None => ty,
+ }
+ }
+
+ _ if ty.has_placeholders() || ty.has_infer_regions() => ty.super_fold_with(self),
+ _ => ty,
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if let ty::ConstKind::Placeholder(p) = ct.kind() {
+ let replace_var = self.mapped_consts.get(&p);
+ match replace_var {
+ Some(replace_var) => {
+ let index = self
+ .universe_indices
+ .iter()
+ .position(|u| matches!(u, Some(pu) if *pu == p.universe))
+ .unwrap_or_else(|| bug!("Unexpected placeholder universe."));
+ let db = ty::DebruijnIndex::from_usize(
+ self.universe_indices.len() - index + self.current_index.as_usize() - 1,
+ );
+ self.tcx().mk_const(ty::ConstS {
+ kind: ty::ConstKind::Bound(db, *replace_var),
+ ty: ct.ty(),
+ })
+ }
+ None => ct,
+ }
+ } else {
+ ct.super_fold_with(self)
+ }
+ }
+}
+
+/// The guts of `normalize`: normalize a specific projection like `<T
+/// as Trait>::Item`. The result is always a type (and possibly
+/// additional obligations). If ambiguity arises, which implies that
+/// there are unresolved type variables in the projection, we will
+/// substitute a fresh type variable `$X` and generate a new
+/// obligation `<T as Trait>::Item == $X` for later.
+pub fn normalize_projection_type<'a, 'b, 'tcx>(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+ obligations: &mut Vec<PredicateObligation<'tcx>>,
+) -> Term<'tcx> {
+ opt_normalize_projection_type(
+ selcx,
+ param_env,
+ projection_ty,
+ cause.clone(),
+ depth,
+ obligations,
+ )
+ .ok()
+ .flatten()
+ .unwrap_or_else(move || {
+ // if we bottom out in ambiguity, create a type variable
+ // and a deferred predicate to resolve this when more type
+ // information is available.
+
+ selcx
+ .infcx()
+ .infer_projection(param_env, projection_ty, cause, depth + 1, obligations)
+ .into()
+ })
+}
+
+/// The guts of `normalize`: normalize a specific projection like `<T
+/// as Trait>::Item`. The result is always a type (and possibly
+/// additional obligations). Returns `None` in the case of ambiguity,
+/// which indicates that there are unbound type variables.
+///
+/// This function used to return `Option<NormalizedTy<'tcx>>`, which contains a
+/// `Ty<'tcx>` and an obligations vector. But that obligation vector was very
+/// often immediately appended to another obligations vector. So now this
+/// function takes an obligations vector and appends to it directly, which is
+/// slightly uglier but avoids the need for an extra short-lived allocation.
+#[instrument(level = "debug", skip(selcx, param_env, cause, obligations))]
+fn opt_normalize_projection_type<'a, 'b, 'tcx>(
+ selcx: &'a mut SelectionContext<'b, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+ obligations: &mut Vec<PredicateObligation<'tcx>>,
+) -> Result<Option<Term<'tcx>>, InProgress> {
+ let infcx = selcx.infcx();
+ // Don't use the projection cache in intercrate mode -
+ // the `infcx` may be re-used between intercrate in non-intercrate
+ // mode, which could lead to using incorrect cache results.
+ let use_cache = !selcx.is_intercrate();
+
+ let projection_ty = infcx.resolve_vars_if_possible(projection_ty);
+ let cache_key = ProjectionCacheKey::new(projection_ty);
+
+ // FIXME(#20304) For now, I am caching here, which is good, but it
+ // means we don't capture the type variables that are created in
+ // the case of ambiguity. Which means we may create a large stream
+ // of such variables. OTOH, if we move the caching up a level, we
+ // would not benefit from caching when proving `T: Trait<U=Foo>`
+ // bounds. It might be the case that we want two distinct caches,
+ // or else another kind of cache entry.
+
+ let cache_result = if use_cache {
+ infcx.inner.borrow_mut().projection_cache().try_start(cache_key)
+ } else {
+ Ok(())
+ };
+ match cache_result {
+ Ok(()) => debug!("no cache"),
+ Err(ProjectionCacheEntry::Ambiguous) => {
+ // If we found ambiguity the last time, that means we will continue
+ // to do so until some type in the key changes (and we know it
+ // hasn't, because we just fully resolved it).
+ debug!("found cache entry: ambiguous");
+ return Ok(None);
+ }
+ Err(ProjectionCacheEntry::InProgress) => {
+ // Under lazy normalization, this can arise when
+ // bootstrapping. That is, imagine an environment with a
+ // where-clause like `A::B == u32`. Now, if we are asked
+ // to normalize `A::B`, we will want to check the
+ // where-clauses in scope. So we will try to unify `A::B`
+ // with `A::B`, which can trigger a recursive
+ // normalization.
+
+ debug!("found cache entry: in-progress");
+
+ // Cache that normalizing this projection resulted in a cycle. This
+ // should ensure that, unless this happens within a snapshot that's
+ // rolled back, fulfillment or evaluation will notice the cycle.
+
+ if use_cache {
+ infcx.inner.borrow_mut().projection_cache().recur(cache_key);
+ }
+ return Err(InProgress);
+ }
+ Err(ProjectionCacheEntry::Recur) => {
+ debug!("recur cache");
+ return Err(InProgress);
+ }
+ Err(ProjectionCacheEntry::NormalizedTy { ty, complete: _ }) => {
+ // This is the hottest path in this function.
+ //
+ // If we find the value in the cache, then return it along
+ // with the obligations that went along with it. Note
+ // that, when using a fulfillment context, these
+ // obligations could in principle be ignored: they have
+ // already been registered when the cache entry was
+ // created (and hence the new ones will quickly be
+ // discarded as duplicated). But when doing trait
+ // evaluation this is not the case, and dropping the trait
+ // evaluations can causes ICEs (e.g., #43132).
+ debug!(?ty, "found normalized ty");
+ obligations.extend(ty.obligations);
+ return Ok(Some(ty.value));
+ }
+ Err(ProjectionCacheEntry::Error) => {
+ debug!("opt_normalize_projection_type: found error");
+ let result = normalize_to_error(selcx, param_env, projection_ty, cause, depth);
+ obligations.extend(result.obligations);
+ return Ok(Some(result.value.into()));
+ }
+ }
+
+ let obligation = Obligation::with_depth(cause.clone(), depth, param_env, projection_ty);
+
+ match project(selcx, &obligation) {
+ Ok(Projected::Progress(Progress {
+ term: projected_term,
+ obligations: mut projected_obligations,
+ })) => {
+ // if projection succeeded, then what we get out of this
+ // is also non-normalized (consider: it was derived from
+ // an impl, where-clause etc) and hence we must
+ // re-normalize it
+
+ let projected_term = selcx.infcx().resolve_vars_if_possible(projected_term);
+
+ let mut result = if projected_term.has_projections() {
+ let mut normalizer = AssocTypeNormalizer::new(
+ selcx,
+ param_env,
+ cause,
+ depth + 1,
+ &mut projected_obligations,
+ );
+ let normalized_ty = normalizer.fold(projected_term);
+
+ Normalized { value: normalized_ty, obligations: projected_obligations }
+ } else {
+ Normalized { value: projected_term, obligations: projected_obligations }
+ };
+
+ let mut deduped: SsoHashSet<_> = Default::default();
+ result.obligations.drain_filter(|projected_obligation| {
+ if !deduped.insert(projected_obligation.clone()) {
+ return true;
+ }
+ false
+ });
+
+ if use_cache {
+ infcx.inner.borrow_mut().projection_cache().insert_term(cache_key, result.clone());
+ }
+ obligations.extend(result.obligations);
+ Ok(Some(result.value))
+ }
+ Ok(Projected::NoProgress(projected_ty)) => {
+ let result = Normalized { value: projected_ty, obligations: vec![] };
+ if use_cache {
+ infcx.inner.borrow_mut().projection_cache().insert_term(cache_key, result.clone());
+ }
+ // No need to extend `obligations`.
+ Ok(Some(result.value))
+ }
+ Err(ProjectionError::TooManyCandidates) => {
+ debug!("opt_normalize_projection_type: too many candidates");
+ if use_cache {
+ infcx.inner.borrow_mut().projection_cache().ambiguous(cache_key);
+ }
+ Ok(None)
+ }
+ Err(ProjectionError::TraitSelectionError(_)) => {
+ debug!("opt_normalize_projection_type: ERROR");
+ // if we got an error processing the `T as Trait` part,
+ // just return `ty::err` but add the obligation `T :
+ // Trait`, which when processed will cause the error to be
+ // reported later
+
+ if use_cache {
+ infcx.inner.borrow_mut().projection_cache().error(cache_key);
+ }
+ let result = normalize_to_error(selcx, param_env, projection_ty, cause, depth);
+ obligations.extend(result.obligations);
+ Ok(Some(result.value.into()))
+ }
+ }
+}
+
+/// If we are projecting `<T as Trait>::Item`, but `T: Trait` does not
+/// hold. In various error cases, we cannot generate a valid
+/// normalized projection. Therefore, we create an inference variable
+/// return an associated obligation that, when fulfilled, will lead to
+/// an error.
+///
+/// Note that we used to return `Error` here, but that was quite
+/// dubious -- the premise was that an error would *eventually* be
+/// reported, when the obligation was processed. But in general once
+/// you see an `Error` you are supposed to be able to assume that an
+/// error *has been* reported, so that you can take whatever heuristic
+/// paths you want to take. To make things worse, it was possible for
+/// cycles to arise, where you basically had a setup like `<MyType<$0>
+/// as Trait>::Foo == $0`. Here, normalizing `<MyType<$0> as
+/// Trait>::Foo> to `[type error]` would lead to an obligation of
+/// `<MyType<[type error]> as Trait>::Foo`. We are supposed to report
+/// an error for this obligation, but we legitimately should not,
+/// because it contains `[type error]`. Yuck! (See issue #29857 for
+/// one case where this arose.)
+fn normalize_to_error<'a, 'tcx>(
+ selcx: &mut SelectionContext<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ projection_ty: ty::ProjectionTy<'tcx>,
+ cause: ObligationCause<'tcx>,
+ depth: usize,
+) -> NormalizedTy<'tcx> {
+ let trait_ref = ty::Binder::dummy(projection_ty.trait_ref(selcx.tcx()));
+ let trait_obligation = Obligation {
+ cause,
+ recursion_depth: depth,
+ param_env,
+ predicate: trait_ref.without_const().to_predicate(selcx.tcx()),
+ };
+ let tcx = selcx.infcx().tcx;
+ let def_id = projection_ty.item_def_id;
+ let new_value = selcx.infcx().next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::NormalizeProjectionType,
+ span: tcx.def_span(def_id),
+ });
+ Normalized { value: new_value, obligations: vec![trait_obligation] }
+}
+
+enum Projected<'tcx> {
+ Progress(Progress<'tcx>),
+ NoProgress(ty::Term<'tcx>),
+}
+
+struct Progress<'tcx> {
+ term: ty::Term<'tcx>,
+ obligations: Vec<PredicateObligation<'tcx>>,
+}
+
+impl<'tcx> Progress<'tcx> {
+ fn error(tcx: TyCtxt<'tcx>) -> Self {
+ Progress { term: tcx.ty_error().into(), obligations: vec![] }
+ }
+
+ fn with_addl_obligations(mut self, mut obligations: Vec<PredicateObligation<'tcx>>) -> Self {
+ self.obligations.append(&mut obligations);
+ self
+ }
+}
+
+/// Computes the result of a projection type (if we can).
+///
+/// IMPORTANT:
+/// - `obligation` must be fully normalized
+#[tracing::instrument(level = "info", skip(selcx))]
+fn project<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+) -> Result<Projected<'tcx>, ProjectionError<'tcx>> {
+ if !selcx.tcx().recursion_limit().value_within_limit(obligation.recursion_depth) {
+ // This should really be an immediate error, but some existing code
+ // relies on being able to recover from this.
+ return Err(ProjectionError::TraitSelectionError(SelectionError::Overflow(
+ OverflowError::Canonical,
+ )));
+ }
+
+ if obligation.predicate.references_error() {
+ return Ok(Projected::Progress(Progress::error(selcx.tcx())));
+ }
+
+ let mut candidates = ProjectionCandidateSet::None;
+
+ // Make sure that the following procedures are kept in order. ParamEnv
+ // needs to be first because it has highest priority, and Select checks
+ // the return value of push_candidate which assumes it's ran at last.
+ assemble_candidates_from_param_env(selcx, obligation, &mut candidates);
+
+ assemble_candidates_from_trait_def(selcx, obligation, &mut candidates);
+
+ assemble_candidates_from_object_ty(selcx, obligation, &mut candidates);
+
+ if let ProjectionCandidateSet::Single(ProjectionCandidate::Object(_)) = candidates {
+ // Avoid normalization cycle from selection (see
+ // `assemble_candidates_from_object_ty`).
+ // FIXME(lazy_normalization): Lazy normalization should save us from
+ // having to special case this.
+ } else {
+ assemble_candidates_from_impls(selcx, obligation, &mut candidates);
+ };
+
+ match candidates {
+ ProjectionCandidateSet::Single(candidate) => {
+ Ok(Projected::Progress(confirm_candidate(selcx, obligation, candidate)))
+ }
+ ProjectionCandidateSet::None => Ok(Projected::NoProgress(
+ // FIXME(associated_const_generics): this may need to change in the future?
+ // need to investigate whether or not this is fine.
+ selcx
+ .tcx()
+ .mk_projection(obligation.predicate.item_def_id, obligation.predicate.substs)
+ .into(),
+ )),
+ // Error occurred while trying to processing impls.
+ ProjectionCandidateSet::Error(e) => Err(ProjectionError::TraitSelectionError(e)),
+ // Inherent ambiguity that prevents us from even enumerating the
+ // candidates.
+ ProjectionCandidateSet::Ambiguous => Err(ProjectionError::TooManyCandidates),
+ }
+}
+
+/// The first thing we have to do is scan through the parameter
+/// environment to see whether there are any projection predicates
+/// there that can answer this question.
+fn assemble_candidates_from_param_env<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ candidate_set: &mut ProjectionCandidateSet<'tcx>,
+) {
+ assemble_candidates_from_predicates(
+ selcx,
+ obligation,
+ candidate_set,
+ ProjectionCandidate::ParamEnv,
+ obligation.param_env.caller_bounds().iter(),
+ false,
+ );
+}
+
+/// In the case of a nested projection like <<A as Foo>::FooT as Bar>::BarT, we may find
+/// that the definition of `Foo` has some clues:
+///
+/// ```ignore (illustrative)
+/// trait Foo {
+/// type FooT : Bar<BarT=i32>
+/// }
+/// ```
+///
+/// Here, for example, we could conclude that the result is `i32`.
+fn assemble_candidates_from_trait_def<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ candidate_set: &mut ProjectionCandidateSet<'tcx>,
+) {
+ debug!("assemble_candidates_from_trait_def(..)");
+
+ let tcx = selcx.tcx();
+ // Check whether the self-type is itself a projection.
+ // If so, extract what we know from the trait and try to come up with a good answer.
+ let bounds = match *obligation.predicate.self_ty().kind() {
+ ty::Projection(ref data) => tcx.bound_item_bounds(data.item_def_id).subst(tcx, data.substs),
+ ty::Opaque(def_id, substs) => tcx.bound_item_bounds(def_id).subst(tcx, substs),
+ ty::Infer(ty::TyVar(_)) => {
+ // If the self-type is an inference variable, then it MAY wind up
+ // being a projected type, so induce an ambiguity.
+ candidate_set.mark_ambiguous();
+ return;
+ }
+ _ => return,
+ };
+
+ assemble_candidates_from_predicates(
+ selcx,
+ obligation,
+ candidate_set,
+ ProjectionCandidate::TraitDef,
+ bounds.iter(),
+ true,
+ );
+}
+
+/// In the case of a trait object like
+/// `<dyn Iterator<Item = ()> as Iterator>::Item` we can use the existential
+/// predicate in the trait object.
+///
+/// We don't go through the select candidate for these bounds to avoid cycles:
+/// In the above case, `dyn Iterator<Item = ()>: Iterator` would create a
+/// nested obligation of `<dyn Iterator<Item = ()> as Iterator>::Item: Sized`,
+/// this then has to be normalized without having to prove
+/// `dyn Iterator<Item = ()>: Iterator` again.
+fn assemble_candidates_from_object_ty<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ candidate_set: &mut ProjectionCandidateSet<'tcx>,
+) {
+ debug!("assemble_candidates_from_object_ty(..)");
+
+ let tcx = selcx.tcx();
+
+ let self_ty = obligation.predicate.self_ty();
+ let object_ty = selcx.infcx().shallow_resolve(self_ty);
+ let data = match object_ty.kind() {
+ ty::Dynamic(data, ..) => data,
+ ty::Infer(ty::TyVar(_)) => {
+ // If the self-type is an inference variable, then it MAY wind up
+ // being an object type, so induce an ambiguity.
+ candidate_set.mark_ambiguous();
+ return;
+ }
+ _ => return,
+ };
+ let env_predicates = data
+ .projection_bounds()
+ .filter(|bound| bound.item_def_id() == obligation.predicate.item_def_id)
+ .map(|p| p.with_self_ty(tcx, object_ty).to_predicate(tcx));
+
+ assemble_candidates_from_predicates(
+ selcx,
+ obligation,
+ candidate_set,
+ ProjectionCandidate::Object,
+ env_predicates,
+ false,
+ );
+}
+
+#[tracing::instrument(
+ level = "debug",
+ skip(selcx, candidate_set, ctor, env_predicates, potentially_unnormalized_candidates)
+)]
+fn assemble_candidates_from_predicates<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ candidate_set: &mut ProjectionCandidateSet<'tcx>,
+ ctor: fn(ty::PolyProjectionPredicate<'tcx>) -> ProjectionCandidate<'tcx>,
+ env_predicates: impl Iterator<Item = ty::Predicate<'tcx>>,
+ potentially_unnormalized_candidates: bool,
+) {
+ let infcx = selcx.infcx();
+ for predicate in env_predicates {
+ let bound_predicate = predicate.kind();
+ if let ty::PredicateKind::Projection(data) = predicate.kind().skip_binder() {
+ let data = bound_predicate.rebind(data);
+ if data.projection_def_id() != obligation.predicate.item_def_id {
+ continue;
+ }
+
+ let is_match = infcx.probe(|_| {
+ selcx.match_projection_projections(
+ obligation,
+ data,
+ potentially_unnormalized_candidates,
+ )
+ });
+
+ match is_match {
+ ProjectionMatchesProjection::Yes => {
+ candidate_set.push_candidate(ctor(data));
+
+ if potentially_unnormalized_candidates
+ && !obligation.predicate.has_infer_types_or_consts()
+ {
+ // HACK: Pick the first trait def candidate for a fully
+ // inferred predicate. This is to allow duplicates that
+ // differ only in normalization.
+ return;
+ }
+ }
+ ProjectionMatchesProjection::Ambiguous => {
+ candidate_set.mark_ambiguous();
+ }
+ ProjectionMatchesProjection::No => {}
+ }
+ }
+ }
+}
+
+#[tracing::instrument(level = "debug", skip(selcx, obligation, candidate_set))]
+fn assemble_candidates_from_impls<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ candidate_set: &mut ProjectionCandidateSet<'tcx>,
+) {
+ // If we are resolving `<T as TraitRef<...>>::Item == Type`,
+ // start out by selecting the predicate `T as TraitRef<...>`:
+ let poly_trait_ref = ty::Binder::dummy(obligation.predicate.trait_ref(selcx.tcx()));
+ let trait_obligation = obligation.with(poly_trait_ref.to_poly_trait_predicate());
+ let _ = selcx.infcx().commit_if_ok(|_| {
+ let impl_source = match selcx.select(&trait_obligation) {
+ Ok(Some(impl_source)) => impl_source,
+ Ok(None) => {
+ candidate_set.mark_ambiguous();
+ return Err(());
+ }
+ Err(e) => {
+ debug!(error = ?e, "selection error");
+ candidate_set.mark_error(e);
+ return Err(());
+ }
+ };
+
+ let eligible = match &impl_source {
+ super::ImplSource::Closure(_)
+ | super::ImplSource::Generator(_)
+ | super::ImplSource::FnPointer(_)
+ | super::ImplSource::TraitAlias(_) => true,
+ super::ImplSource::UserDefined(impl_data) => {
+ // We have to be careful when projecting out of an
+ // impl because of specialization. If we are not in
+ // codegen (i.e., projection mode is not "any"), and the
+ // impl's type is declared as default, then we disable
+ // projection (even if the trait ref is fully
+ // monomorphic). In the case where trait ref is not
+ // fully monomorphic (i.e., includes type parameters),
+ // this is because those type parameters may
+ // ultimately be bound to types from other crates that
+ // may have specialized impls we can't see. In the
+ // case where the trait ref IS fully monomorphic, this
+ // is a policy decision that we made in the RFC in
+ // order to preserve flexibility for the crate that
+ // defined the specializable impl to specialize later
+ // for existing types.
+ //
+ // In either case, we handle this by not adding a
+ // candidate for an impl if it contains a `default`
+ // type.
+ //
+ // NOTE: This should be kept in sync with the similar code in
+ // `rustc_ty_utils::instance::resolve_associated_item()`.
+ let node_item =
+ assoc_def(selcx, impl_data.impl_def_id, obligation.predicate.item_def_id)
+ .map_err(|ErrorGuaranteed { .. }| ())?;
+
+ if node_item.is_final() {
+ // Non-specializable items are always projectable.
+ true
+ } else {
+ // Only reveal a specializable default if we're past type-checking
+ // and the obligation is monomorphic, otherwise passes such as
+ // transmute checking and polymorphic MIR optimizations could
+ // get a result which isn't correct for all monomorphizations.
+ if obligation.param_env.reveal() == Reveal::All {
+ // NOTE(eddyb) inference variables can resolve to parameters, so
+ // assume `poly_trait_ref` isn't monomorphic, if it contains any.
+ let poly_trait_ref = selcx.infcx().resolve_vars_if_possible(poly_trait_ref);
+ !poly_trait_ref.still_further_specializable()
+ } else {
+ debug!(
+ assoc_ty = ?selcx.tcx().def_path_str(node_item.item.def_id),
+ ?obligation.predicate,
+ "assemble_candidates_from_impls: not eligible due to default",
+ );
+ false
+ }
+ }
+ }
+ super::ImplSource::DiscriminantKind(..) => {
+ // While `DiscriminantKind` is automatically implemented for every type,
+ // the concrete discriminant may not be known yet.
+ //
+ // Any type with multiple potential discriminant types is therefore not eligible.
+ let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty());
+
+ match self_ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(..)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Dynamic(..)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Tuple(..)
+ // Integers and floats always have `u8` as their discriminant.
+ | ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..)) => true,
+
+ ty::Projection(..)
+ | ty::Opaque(..)
+ | ty::Param(..)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Infer(..)
+ | ty::Error(_) => false,
+ }
+ }
+ super::ImplSource::Pointee(..) => {
+ // While `Pointee` is automatically implemented for every type,
+ // the concrete metadata type may not be known yet.
+ //
+ // Any type with multiple potential metadata types is therefore not eligible.
+ let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty());
+
+ let tail = selcx.tcx().struct_tail_with_normalize(
+ self_ty,
+ |ty| {
+ // We throw away any obligations we get from this, since we normalize
+ // and confirm these obligations once again during confirmation
+ normalize_with_depth(
+ selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ ty,
+ )
+ .value
+ },
+ || {},
+ );
+
+ match tail.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Dynamic(..)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ // Extern types have unit metadata, according to RFC 2850
+ | ty::Foreign(_)
+ // If returned by `struct_tail_without_normalization` this is a unit struct
+ // without any fields, or not a struct, and therefore is Sized.
+ | ty::Adt(..)
+ // If returned by `struct_tail_without_normalization` this is the empty tuple.
+ | ty::Tuple(..)
+ // Integers and floats are always Sized, and so have unit type metadata.
+ | ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..)) => true,
+
+ // type parameters, opaques, and unnormalized projections have pointer
+ // metadata if they're known (e.g. by the param_env) to be sized
+ ty::Param(_) | ty::Projection(..) | ty::Opaque(..)
+ if selcx.infcx().predicate_must_hold_modulo_regions(
+ &obligation.with(
+ ty::Binder::dummy(ty::TraitRef::new(
+ selcx.tcx().require_lang_item(LangItem::Sized, None),
+ selcx.tcx().mk_substs_trait(self_ty, &[]),
+ ))
+ .without_const()
+ .to_predicate(selcx.tcx()),
+ ),
+ ) =>
+ {
+ true
+ }
+
+ // FIXME(compiler-errors): are Bound and Placeholder types ever known sized?
+ ty::Param(_)
+ | ty::Projection(..)
+ | ty::Opaque(..)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Infer(..)
+ | ty::Error(_) => {
+ if tail.has_infer_types() {
+ candidate_set.mark_ambiguous();
+ }
+ false
+ }
+ }
+ }
+ super::ImplSource::Param(..) => {
+ // This case tell us nothing about the value of an
+ // associated type. Consider:
+ //
+ // ```
+ // trait SomeTrait { type Foo; }
+ // fn foo<T:SomeTrait>(...) { }
+ // ```
+ //
+ // If the user writes `<T as SomeTrait>::Foo`, then the `T
+ // : SomeTrait` binding does not help us decide what the
+ // type `Foo` is (at least, not more specifically than
+ // what we already knew).
+ //
+ // But wait, you say! What about an example like this:
+ //
+ // ```
+ // fn bar<T:SomeTrait<Foo=usize>>(...) { ... }
+ // ```
+ //
+ // Doesn't the `T : SomeTrait<Foo=usize>` predicate help
+ // resolve `T::Foo`? And of course it does, but in fact
+ // that single predicate is desugared into two predicates
+ // in the compiler: a trait predicate (`T : SomeTrait`) and a
+ // projection. And the projection where clause is handled
+ // in `assemble_candidates_from_param_env`.
+ false
+ }
+ super::ImplSource::Object(_) => {
+ // Handled by the `Object` projection candidate. See
+ // `assemble_candidates_from_object_ty` for an explanation of
+ // why we special case object types.
+ false
+ }
+ super::ImplSource::AutoImpl(..)
+ | super::ImplSource::Builtin(..)
+ | super::ImplSource::TraitUpcasting(_)
+ | super::ImplSource::ConstDestruct(_) => {
+ // These traits have no associated types.
+ selcx.tcx().sess.delay_span_bug(
+ obligation.cause.span,
+ &format!("Cannot project an associated type from `{:?}`", impl_source),
+ );
+ return Err(());
+ }
+ };
+
+ if eligible {
+ if candidate_set.push_candidate(ProjectionCandidate::Select(impl_source)) {
+ Ok(())
+ } else {
+ Err(())
+ }
+ } else {
+ Err(())
+ }
+ });
+}
+
+fn confirm_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ candidate: ProjectionCandidate<'tcx>,
+) -> Progress<'tcx> {
+ debug!(?obligation, ?candidate, "confirm_candidate");
+ let mut progress = match candidate {
+ ProjectionCandidate::ParamEnv(poly_projection)
+ | ProjectionCandidate::Object(poly_projection) => {
+ confirm_param_env_candidate(selcx, obligation, poly_projection, false)
+ }
+
+ ProjectionCandidate::TraitDef(poly_projection) => {
+ confirm_param_env_candidate(selcx, obligation, poly_projection, true)
+ }
+
+ ProjectionCandidate::Select(impl_source) => {
+ confirm_select_candidate(selcx, obligation, impl_source)
+ }
+ };
+
+ // When checking for cycle during evaluation, we compare predicates with
+ // "syntactic" equality. Since normalization generally introduces a type
+ // with new region variables, we need to resolve them to existing variables
+ // when possible for this to work. See `auto-trait-projection-recursion.rs`
+ // for a case where this matters.
+ if progress.term.has_infer_regions() {
+ progress.term =
+ progress.term.fold_with(&mut OpportunisticRegionResolver::new(selcx.infcx()));
+ }
+ progress
+}
+
+fn confirm_select_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ impl_source: Selection<'tcx>,
+) -> Progress<'tcx> {
+ match impl_source {
+ super::ImplSource::UserDefined(data) => confirm_impl_candidate(selcx, obligation, data),
+ super::ImplSource::Generator(data) => confirm_generator_candidate(selcx, obligation, data),
+ super::ImplSource::Closure(data) => confirm_closure_candidate(selcx, obligation, data),
+ super::ImplSource::FnPointer(data) => confirm_fn_pointer_candidate(selcx, obligation, data),
+ super::ImplSource::DiscriminantKind(data) => {
+ confirm_discriminant_kind_candidate(selcx, obligation, data)
+ }
+ super::ImplSource::Pointee(data) => confirm_pointee_candidate(selcx, obligation, data),
+ super::ImplSource::Object(_)
+ | super::ImplSource::AutoImpl(..)
+ | super::ImplSource::Param(..)
+ | super::ImplSource::Builtin(..)
+ | super::ImplSource::TraitUpcasting(_)
+ | super::ImplSource::TraitAlias(..)
+ | super::ImplSource::ConstDestruct(_) => {
+ // we don't create Select candidates with this kind of resolution
+ span_bug!(
+ obligation.cause.span,
+ "Cannot project an associated type from `{:?}`",
+ impl_source
+ )
+ }
+ }
+}
+
+fn confirm_generator_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ impl_source: ImplSourceGeneratorData<'tcx, PredicateObligation<'tcx>>,
+) -> Progress<'tcx> {
+ let gen_sig = impl_source.substs.as_generator().poly_sig();
+ let Normalized { value: gen_sig, obligations } = normalize_with_depth(
+ selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ gen_sig,
+ );
+
+ debug!(?obligation, ?gen_sig, ?obligations, "confirm_generator_candidate");
+
+ let tcx = selcx.tcx();
+
+ let gen_def_id = tcx.require_lang_item(LangItem::Generator, None);
+
+ let predicate = super::util::generator_trait_ref_and_outputs(
+ tcx,
+ gen_def_id,
+ obligation.predicate.self_ty(),
+ gen_sig,
+ )
+ .map_bound(|(trait_ref, yield_ty, return_ty)| {
+ let name = tcx.associated_item(obligation.predicate.item_def_id).name;
+ let ty = if name == sym::Return {
+ return_ty
+ } else if name == sym::Yield {
+ yield_ty
+ } else {
+ bug!()
+ };
+
+ ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy {
+ substs: trait_ref.substs,
+ item_def_id: obligation.predicate.item_def_id,
+ },
+ term: ty.into(),
+ }
+ });
+
+ confirm_param_env_candidate(selcx, obligation, predicate, false)
+ .with_addl_obligations(impl_source.nested)
+ .with_addl_obligations(obligations)
+}
+
+fn confirm_discriminant_kind_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ _: ImplSourceDiscriminantKindData,
+) -> Progress<'tcx> {
+ let tcx = selcx.tcx();
+
+ let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty());
+ // We get here from `poly_project_and_unify_type` which replaces bound vars
+ // with placeholders
+ debug_assert!(!self_ty.has_escaping_bound_vars());
+ let substs = tcx.mk_substs([self_ty.into()].iter());
+
+ let discriminant_def_id = tcx.require_lang_item(LangItem::Discriminant, None);
+
+ let predicate = ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy { substs, item_def_id: discriminant_def_id },
+ term: self_ty.discriminant_ty(tcx).into(),
+ };
+
+ // We get here from `poly_project_and_unify_type` which replaces bound vars
+ // with placeholders, so dummy is okay here.
+ confirm_param_env_candidate(selcx, obligation, ty::Binder::dummy(predicate), false)
+}
+
+fn confirm_pointee_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ _: ImplSourcePointeeData,
+) -> Progress<'tcx> {
+ let tcx = selcx.tcx();
+ let self_ty = selcx.infcx().shallow_resolve(obligation.predicate.self_ty());
+
+ let mut obligations = vec![];
+ let (metadata_ty, check_is_sized) = self_ty.ptr_metadata_ty(tcx, |ty| {
+ normalize_with_depth_to(
+ selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ ty,
+ &mut obligations,
+ )
+ });
+ if check_is_sized {
+ let sized_predicate = ty::Binder::dummy(ty::TraitRef::new(
+ tcx.require_lang_item(LangItem::Sized, None),
+ tcx.mk_substs_trait(self_ty, &[]),
+ ))
+ .without_const()
+ .to_predicate(tcx);
+ obligations.push(Obligation::new(
+ obligation.cause.clone(),
+ obligation.param_env,
+ sized_predicate,
+ ));
+ }
+
+ let substs = tcx.mk_substs([self_ty.into()].iter());
+ let metadata_def_id = tcx.require_lang_item(LangItem::Metadata, None);
+
+ let predicate = ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy { substs, item_def_id: metadata_def_id },
+ term: metadata_ty.into(),
+ };
+
+ confirm_param_env_candidate(selcx, obligation, ty::Binder::dummy(predicate), false)
+ .with_addl_obligations(obligations)
+}
+
+fn confirm_fn_pointer_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ fn_pointer_impl_source: ImplSourceFnPointerData<'tcx, PredicateObligation<'tcx>>,
+) -> Progress<'tcx> {
+ let fn_type = selcx.infcx().shallow_resolve(fn_pointer_impl_source.fn_ty);
+ let sig = fn_type.fn_sig(selcx.tcx());
+ let Normalized { value: sig, obligations } = normalize_with_depth(
+ selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ sig,
+ );
+
+ confirm_callable_candidate(selcx, obligation, sig, util::TupleArgumentsFlag::Yes)
+ .with_addl_obligations(fn_pointer_impl_source.nested)
+ .with_addl_obligations(obligations)
+}
+
+fn confirm_closure_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ impl_source: ImplSourceClosureData<'tcx, PredicateObligation<'tcx>>,
+) -> Progress<'tcx> {
+ let closure_sig = impl_source.substs.as_closure().sig();
+ let Normalized { value: closure_sig, obligations } = normalize_with_depth(
+ selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ closure_sig,
+ );
+
+ debug!(?obligation, ?closure_sig, ?obligations, "confirm_closure_candidate");
+
+ confirm_callable_candidate(selcx, obligation, closure_sig, util::TupleArgumentsFlag::No)
+ .with_addl_obligations(impl_source.nested)
+ .with_addl_obligations(obligations)
+}
+
+fn confirm_callable_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ fn_sig: ty::PolyFnSig<'tcx>,
+ flag: util::TupleArgumentsFlag,
+) -> Progress<'tcx> {
+ let tcx = selcx.tcx();
+
+ debug!(?obligation, ?fn_sig, "confirm_callable_candidate");
+
+ let fn_once_def_id = tcx.require_lang_item(LangItem::FnOnce, None);
+ let fn_once_output_def_id = tcx.require_lang_item(LangItem::FnOnceOutput, None);
+
+ let predicate = super::util::closure_trait_ref_and_return_type(
+ tcx,
+ fn_once_def_id,
+ obligation.predicate.self_ty(),
+ fn_sig,
+ flag,
+ )
+ .map_bound(|(trait_ref, ret_type)| ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy {
+ substs: trait_ref.substs,
+ item_def_id: fn_once_output_def_id,
+ },
+ term: ret_type.into(),
+ });
+
+ confirm_param_env_candidate(selcx, obligation, predicate, true)
+}
+
+fn confirm_param_env_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ poly_cache_entry: ty::PolyProjectionPredicate<'tcx>,
+ potentially_unnormalized_candidate: bool,
+) -> Progress<'tcx> {
+ let infcx = selcx.infcx();
+ let cause = &obligation.cause;
+ let param_env = obligation.param_env;
+
+ let cache_entry = infcx.replace_bound_vars_with_fresh_vars(
+ cause.span,
+ LateBoundRegionConversionTime::HigherRankedType,
+ poly_cache_entry,
+ );
+
+ let cache_projection = cache_entry.projection_ty;
+ let mut nested_obligations = Vec::new();
+ let obligation_projection = obligation.predicate;
+ let obligation_projection = ensure_sufficient_stack(|| {
+ normalize_with_depth_to(
+ selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ obligation_projection,
+ &mut nested_obligations,
+ )
+ });
+ let cache_projection = if potentially_unnormalized_candidate {
+ ensure_sufficient_stack(|| {
+ normalize_with_depth_to(
+ selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ cache_projection,
+ &mut nested_obligations,
+ )
+ })
+ } else {
+ cache_projection
+ };
+
+ debug!(?cache_projection, ?obligation_projection);
+
+ match infcx.at(cause, param_env).eq(cache_projection, obligation_projection) {
+ Ok(InferOk { value: _, obligations }) => {
+ nested_obligations.extend(obligations);
+ assoc_ty_own_obligations(selcx, obligation, &mut nested_obligations);
+ // FIXME(associated_const_equality): Handle consts here as well? Maybe this progress type should just take
+ // a term instead.
+ Progress { term: cache_entry.term, obligations: nested_obligations }
+ }
+ Err(e) => {
+ let msg = format!(
+ "Failed to unify obligation `{:?}` with poly_projection `{:?}`: {:?}",
+ obligation, poly_cache_entry, e,
+ );
+ debug!("confirm_param_env_candidate: {}", msg);
+ let err = infcx.tcx.ty_error_with_message(obligation.cause.span, &msg);
+ Progress { term: err.into(), obligations: vec![] }
+ }
+ }
+}
+
+fn confirm_impl_candidate<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ impl_impl_source: ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>>,
+) -> Progress<'tcx> {
+ let tcx = selcx.tcx();
+
+ let ImplSourceUserDefinedData { impl_def_id, substs, mut nested } = impl_impl_source;
+ let assoc_item_id = obligation.predicate.item_def_id;
+ let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
+
+ let param_env = obligation.param_env;
+ let Ok(assoc_ty) = assoc_def(selcx, impl_def_id, assoc_item_id) else {
+ return Progress { term: tcx.ty_error().into(), obligations: nested };
+ };
+
+ if !assoc_ty.item.defaultness(tcx).has_value() {
+ // This means that the impl is missing a definition for the
+ // associated type. This error will be reported by the type
+ // checker method `check_impl_items_against_trait`, so here we
+ // just return Error.
+ debug!(
+ "confirm_impl_candidate: no associated type {:?} for {:?}",
+ assoc_ty.item.name, obligation.predicate
+ );
+ return Progress { term: tcx.ty_error().into(), obligations: nested };
+ }
+ // If we're trying to normalize `<Vec<u32> as X>::A<S>` using
+ //`impl<T> X for Vec<T> { type A<Y> = Box<Y>; }`, then:
+ //
+ // * `obligation.predicate.substs` is `[Vec<u32>, S]`
+ // * `substs` is `[u32]`
+ // * `substs` ends up as `[u32, S]`
+ let substs = obligation.predicate.substs.rebase_onto(tcx, trait_def_id, substs);
+ let substs =
+ translate_substs(selcx.infcx(), param_env, impl_def_id, substs, assoc_ty.defining_node);
+ let ty = tcx.bound_type_of(assoc_ty.item.def_id);
+ let is_const = matches!(tcx.def_kind(assoc_ty.item.def_id), DefKind::AssocConst);
+ let term: ty::EarlyBinder<ty::Term<'tcx>> = if is_const {
+ let identity_substs =
+ crate::traits::InternalSubsts::identity_for_item(tcx, assoc_ty.item.def_id);
+ let did = ty::WithOptConstParam::unknown(assoc_ty.item.def_id);
+ let kind = ty::ConstKind::Unevaluated(ty::Unevaluated::new(did, identity_substs));
+ ty.map_bound(|ty| tcx.mk_const(ty::ConstS { ty, kind }).into())
+ } else {
+ ty.map_bound(|ty| ty.into())
+ };
+ if substs.len() != tcx.generics_of(assoc_ty.item.def_id).count() {
+ let err = tcx.ty_error_with_message(
+ obligation.cause.span,
+ "impl item and trait item have different parameter counts",
+ );
+ Progress { term: err.into(), obligations: nested }
+ } else {
+ assoc_ty_own_obligations(selcx, obligation, &mut nested);
+ Progress { term: term.subst(tcx, substs), obligations: nested }
+ }
+}
+
+// Get obligations corresponding to the predicates from the where-clause of the
+// associated type itself.
+// Note: `feature(generic_associated_types)` is required to write such
+// predicates, even for non-generic associated types.
+fn assoc_ty_own_obligations<'cx, 'tcx>(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ obligation: &ProjectionTyObligation<'tcx>,
+ nested: &mut Vec<PredicateObligation<'tcx>>,
+) {
+ let tcx = selcx.tcx();
+ for predicate in tcx
+ .predicates_of(obligation.predicate.item_def_id)
+ .instantiate_own(tcx, obligation.predicate.substs)
+ .predicates
+ {
+ let normalized = normalize_with_depth_to(
+ selcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ predicate,
+ nested,
+ );
+ nested.push(Obligation::with_depth(
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ normalized,
+ ));
+ }
+}
+
+/// Locate the definition of an associated type in the specialization hierarchy,
+/// starting from the given impl.
+///
+/// Based on the "projection mode", this lookup may in fact only examine the
+/// topmost impl. See the comments for `Reveal` for more details.
+fn assoc_def(
+ selcx: &SelectionContext<'_, '_>,
+ impl_def_id: DefId,
+ assoc_def_id: DefId,
+) -> Result<specialization_graph::LeafDef, ErrorGuaranteed> {
+ let tcx = selcx.tcx();
+ let trait_def_id = tcx.impl_trait_ref(impl_def_id).unwrap().def_id;
+ let trait_def = tcx.trait_def(trait_def_id);
+
+ // This function may be called while we are still building the
+ // specialization graph that is queried below (via TraitDef::ancestors()),
+ // so, in order to avoid unnecessary infinite recursion, we manually look
+ // for the associated item at the given impl.
+ // If there is no such item in that impl, this function will fail with a
+ // cycle error if the specialization graph is currently being built.
+ if let Some(&impl_item_id) = tcx.impl_item_implementor_ids(impl_def_id).get(&assoc_def_id) {
+ let item = tcx.associated_item(impl_item_id);
+ let impl_node = specialization_graph::Node::Impl(impl_def_id);
+ return Ok(specialization_graph::LeafDef {
+ item: *item,
+ defining_node: impl_node,
+ finalizing_node: if item.defaultness(tcx).is_default() {
+ None
+ } else {
+ Some(impl_node)
+ },
+ });
+ }
+
+ let ancestors = trait_def.ancestors(tcx, impl_def_id)?;
+ if let Some(assoc_item) = ancestors.leaf_def(tcx, assoc_def_id) {
+ Ok(assoc_item)
+ } else {
+ // This is saying that neither the trait nor
+ // the impl contain a definition for this
+ // associated type. Normally this situation
+ // could only arise through a compiler bug --
+ // if the user wrote a bad item name, it
+ // should have failed in astconv.
+ bug!(
+ "No associated type `{}` for {}",
+ tcx.item_name(assoc_def_id),
+ tcx.def_path_str(impl_def_id)
+ )
+ }
+}
+
+pub(crate) trait ProjectionCacheKeyExt<'cx, 'tcx>: Sized {
+ fn from_poly_projection_predicate(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ predicate: ty::PolyProjectionPredicate<'tcx>,
+ ) -> Option<Self>;
+}
+
+impl<'cx, 'tcx> ProjectionCacheKeyExt<'cx, 'tcx> for ProjectionCacheKey<'tcx> {
+ fn from_poly_projection_predicate(
+ selcx: &mut SelectionContext<'cx, 'tcx>,
+ predicate: ty::PolyProjectionPredicate<'tcx>,
+ ) -> Option<Self> {
+ let infcx = selcx.infcx();
+ // We don't do cross-snapshot caching of obligations with escaping regions,
+ // so there's no cache key to use
+ predicate.no_bound_vars().map(|predicate| {
+ ProjectionCacheKey::new(
+ // We don't attempt to match up with a specific type-variable state
+ // from a specific call to `opt_normalize_projection_type` - if
+ // there's no precise match, the original cache entry is "stranded"
+ // anyway.
+ infcx.resolve_vars_if_possible(predicate.projection_ty),
+ )
+ })
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
new file mode 100644
index 000000000..aad3c37f8
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/dropck_outlives.rs
@@ -0,0 +1,73 @@
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+pub use rustc_middle::traits::query::{DropckConstraint, DropckOutlivesResult};
+
+/// This returns true if the type `ty` is "trivial" for
+/// dropck-outlives -- that is, if it doesn't require any types to
+/// outlive. This is similar but not *quite* the same as the
+/// `needs_drop` test in the compiler already -- that is, for every
+/// type T for which this function return true, needs-drop would
+/// return `false`. But the reverse does not hold: in particular,
+/// `needs_drop` returns false for `PhantomData`, but it is not
+/// trivial for dropck-outlives.
+///
+/// Note also that `needs_drop` requires a "global" type (i.e., one
+/// with erased regions), but this function does not.
+///
+// FIXME(@lcnr): remove this module and move this function somewhere else.
+pub fn trivial_dropck_outlives<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
+ match ty.kind() {
+ // None of these types have a destructor and hence they do not
+ // require anything in particular to outlive the dtor's
+ // execution.
+ ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Bool
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Never
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Char
+ | ty::GeneratorWitness(..)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::Str
+ | ty::Foreign(..)
+ | ty::Error(_) => true,
+
+ // [T; N] and [T] have same properties as T.
+ ty::Array(ty, _) | ty::Slice(ty) => trivial_dropck_outlives(tcx, *ty),
+
+ // (T1..Tn) and closures have same properties as T1..Tn --
+ // check if *all* of them are trivial.
+ ty::Tuple(tys) => tys.iter().all(|t| trivial_dropck_outlives(tcx, t)),
+ ty::Closure(_, ref substs) => {
+ trivial_dropck_outlives(tcx, substs.as_closure().tupled_upvars_ty())
+ }
+
+ ty::Adt(def, _) => {
+ if Some(def.did()) == tcx.lang_items().manually_drop() {
+ // `ManuallyDrop` never has a dtor.
+ true
+ } else {
+ // Other types might. Moreover, PhantomData doesn't
+ // have a dtor, but it is considered to own its
+ // content, so it is non-trivial. Unions can have `impl Drop`,
+ // and hence are non-trivial as well.
+ false
+ }
+ }
+
+ // The following *might* require a destructor: needs deeper inspection.
+ ty::Dynamic(..)
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Opaque(..)
+ | ty::Placeholder(..)
+ | ty::Infer(_)
+ | ty::Bound(..)
+ | ty::Generator(..) => false,
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs
new file mode 100644
index 000000000..32669e23d
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/evaluate_obligation.rs
@@ -0,0 +1,118 @@
+use rustc_middle::ty;
+
+use crate::infer::canonical::OriginalQueryValues;
+use crate::infer::InferCtxt;
+use crate::traits::{
+ EvaluationResult, OverflowError, PredicateObligation, SelectionContext, TraitQueryMode,
+};
+
+pub trait InferCtxtExt<'tcx> {
+ fn predicate_may_hold(&self, obligation: &PredicateObligation<'tcx>) -> bool;
+
+ fn predicate_must_hold_considering_regions(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> bool;
+
+ fn predicate_must_hold_modulo_regions(&self, obligation: &PredicateObligation<'tcx>) -> bool;
+
+ fn evaluate_obligation(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> Result<EvaluationResult, OverflowError>;
+
+ // Helper function that canonicalizes and runs the query. If an
+ // overflow results, we re-run it in the local context so we can
+ // report a nice error.
+ /*crate*/
+ fn evaluate_obligation_no_overflow(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> EvaluationResult;
+}
+
+impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
+ /// Evaluates whether the predicate can be satisfied (by any means)
+ /// in the given `ParamEnv`.
+ fn predicate_may_hold(&self, obligation: &PredicateObligation<'tcx>) -> bool {
+ self.evaluate_obligation_no_overflow(obligation).may_apply()
+ }
+
+ /// Evaluates whether the predicate can be satisfied in the given
+ /// `ParamEnv`, and returns `false` if not certain. However, this is
+ /// not entirely accurate if inference variables are involved.
+ ///
+ /// This version may conservatively fail when outlives obligations
+ /// are required.
+ fn predicate_must_hold_considering_regions(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> bool {
+ self.evaluate_obligation_no_overflow(obligation).must_apply_considering_regions()
+ }
+
+ /// Evaluates whether the predicate can be satisfied in the given
+ /// `ParamEnv`, and returns `false` if not certain. However, this is
+ /// not entirely accurate if inference variables are involved.
+ ///
+ /// This version ignores all outlives constraints.
+ fn predicate_must_hold_modulo_regions(&self, obligation: &PredicateObligation<'tcx>) -> bool {
+ self.evaluate_obligation_no_overflow(obligation).must_apply_modulo_regions()
+ }
+
+ /// Evaluate a given predicate, capturing overflow and propagating it back.
+ fn evaluate_obligation(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> Result<EvaluationResult, OverflowError> {
+ let mut _orig_values = OriginalQueryValues::default();
+
+ let param_env = match obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => {
+ // we ignore the value set to it.
+ let mut _constness = pred.constness;
+ obligation
+ .param_env
+ .with_constness(_constness.and(obligation.param_env.constness()))
+ }
+ // constness has no effect on the given predicate.
+ _ => obligation.param_env.without_const(),
+ };
+
+ let c_pred = self
+ .canonicalize_query_keep_static(param_env.and(obligation.predicate), &mut _orig_values);
+ // Run canonical query. If overflow occurs, rerun from scratch but this time
+ // in standard trait query mode so that overflow is handled appropriately
+ // within `SelectionContext`.
+ self.tcx.at(obligation.cause.span()).evaluate_obligation(c_pred)
+ }
+
+ // Helper function that canonicalizes and runs the query. If an
+ // overflow results, we re-run it in the local context so we can
+ // report a nice error.
+ fn evaluate_obligation_no_overflow(
+ &self,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> EvaluationResult {
+ match self.evaluate_obligation(obligation) {
+ Ok(result) => result,
+ Err(OverflowError::Canonical) => {
+ let mut selcx = SelectionContext::with_query_mode(&self, TraitQueryMode::Standard);
+ selcx.evaluate_root_obligation(obligation).unwrap_or_else(|r| match r {
+ OverflowError::Canonical => {
+ span_bug!(
+ obligation.cause.span,
+ "Overflow should be caught earlier in standard query mode: {:?}, {:?}",
+ obligation,
+ r,
+ )
+ }
+ OverflowError::ErrorReporting => EvaluationResult::EvaluatedToErr,
+ OverflowError::Error(_) => EvaluationResult::EvaluatedToErr,
+ })
+ }
+ Err(OverflowError::ErrorReporting) => EvaluationResult::EvaluatedToErr,
+ Err(OverflowError::Error(_)) => EvaluationResult::EvaluatedToErr,
+ }
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/method_autoderef.rs b/compiler/rustc_trait_selection/src/traits/query/method_autoderef.rs
new file mode 100644
index 000000000..3c0ebec93
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/method_autoderef.rs
@@ -0,0 +1,3 @@
+pub use rustc_middle::traits::query::{
+ CandidateStep, MethodAutoderefBadTy, MethodAutoderefStepsResult,
+};
diff --git a/compiler/rustc_trait_selection/src/traits/query/mod.rs b/compiler/rustc_trait_selection/src/traits/query/mod.rs
new file mode 100644
index 000000000..ef3493678
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/mod.rs
@@ -0,0 +1,14 @@
+//! Experimental types for the trait query interface. The methods
+//! defined in this module are all based on **canonicalization**,
+//! which makes a canonical query by replacing unbound inference
+//! variables and regions, so that results can be reused more broadly.
+//! The providers for the queries defined here can be found in
+//! `rustc_traits`.
+
+pub mod dropck_outlives;
+pub mod evaluate_obligation;
+pub mod method_autoderef;
+pub mod normalize;
+pub mod type_op;
+
+pub use rustc_middle::traits::query::*;
diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
new file mode 100644
index 000000000..449d7a7b4
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs
@@ -0,0 +1,354 @@
+//! Code for the 'normalization' query. This consists of a wrapper
+//! which folds deeply, invoking the underlying
+//! `normalize_projection_ty` query when it encounters projections.
+
+use crate::infer::at::At;
+use crate::infer::canonical::OriginalQueryValues;
+use crate::infer::{InferCtxt, InferOk};
+use crate::traits::error_reporting::InferCtxtExt;
+use crate::traits::project::needs_normalization;
+use crate::traits::{Obligation, ObligationCause, PredicateObligation, Reveal};
+use rustc_data_structures::sso::SsoHashMap;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_infer::traits::Normalized;
+use rustc_middle::mir;
+use rustc_middle::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable};
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitor};
+
+use std::ops::ControlFlow;
+
+use super::NoSolution;
+
+pub use rustc_middle::traits::query::NormalizationResult;
+
+pub trait AtExt<'tcx> {
+ fn normalize<T>(&self, value: T) -> Result<Normalized<'tcx, T>, NoSolution>
+ where
+ T: TypeFoldable<'tcx>;
+}
+
+impl<'cx, 'tcx> AtExt<'tcx> for At<'cx, 'tcx> {
+ /// Normalize `value` in the context of the inference context,
+ /// yielding a resulting type, or an error if `value` cannot be
+ /// normalized. If you don't care about regions, you should prefer
+ /// `normalize_erasing_regions`, which is more efficient.
+ ///
+ /// If the normalization succeeds and is unambiguous, returns back
+ /// the normalized value along with various outlives relations (in
+ /// the form of obligations that must be discharged).
+ ///
+ /// N.B., this will *eventually* be the main means of
+ /// normalizing, but for now should be used only when we actually
+ /// know that normalization will succeed, since error reporting
+ /// and other details are still "under development".
+ fn normalize<T>(&self, value: T) -> Result<Normalized<'tcx, T>, NoSolution>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "normalize::<{}>(value={:?}, param_env={:?})",
+ std::any::type_name::<T>(),
+ value,
+ self.param_env,
+ );
+ if !needs_normalization(&value, self.param_env.reveal()) {
+ return Ok(Normalized { value, obligations: vec![] });
+ }
+
+ let mut normalizer = QueryNormalizer {
+ infcx: self.infcx,
+ cause: self.cause,
+ param_env: self.param_env,
+ obligations: vec![],
+ cache: SsoHashMap::new(),
+ anon_depth: 0,
+ universes: vec![],
+ };
+
+ // This is actually a consequence by the way `normalize_erasing_regions` works currently.
+ // Because it needs to call the `normalize_generic_arg_after_erasing_regions`, it folds
+ // through tys and consts in a `TypeFoldable`. Importantly, it skips binders, leaving us
+ // with trying to normalize with escaping bound vars.
+ //
+ // Here, we just add the universes that we *would* have created had we passed through the binders.
+ //
+ // We *could* replace escaping bound vars eagerly here, but it doesn't seem really necessary.
+ // The rest of the code is already set up to be lazy about replacing bound vars,
+ // and only when we actually have to normalize.
+ if value.has_escaping_bound_vars() {
+ let mut max_visitor =
+ MaxEscapingBoundVarVisitor { outer_index: ty::INNERMOST, escaping: 0 };
+ value.visit_with(&mut max_visitor);
+ if max_visitor.escaping > 0 {
+ normalizer.universes.extend((0..max_visitor.escaping).map(|_| None));
+ }
+ }
+ let result = value.try_fold_with(&mut normalizer);
+ info!(
+ "normalize::<{}>: result={:?} with {} obligations",
+ std::any::type_name::<T>(),
+ result,
+ normalizer.obligations.len(),
+ );
+ debug!(
+ "normalize::<{}>: obligations={:?}",
+ std::any::type_name::<T>(),
+ normalizer.obligations,
+ );
+ result.map(|value| Normalized { value, obligations: normalizer.obligations })
+ }
+}
+
+// Visitor to find the maximum escaping bound var
+struct MaxEscapingBoundVarVisitor {
+ // The index which would count as escaping
+ outer_index: ty::DebruijnIndex,
+ escaping: usize,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for MaxEscapingBoundVarVisitor {
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &ty::Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.outer_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.outer_index.shift_out(1);
+ result
+ }
+
+ #[inline]
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if t.outer_exclusive_binder() > self.outer_index {
+ self.escaping = self
+ .escaping
+ .max(t.outer_exclusive_binder().as_usize() - self.outer_index.as_usize());
+ }
+ ControlFlow::CONTINUE
+ }
+
+ #[inline]
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ ty::ReLateBound(debruijn, _) if debruijn > self.outer_index => {
+ self.escaping =
+ self.escaping.max(debruijn.as_usize() - self.outer_index.as_usize());
+ }
+ _ => {}
+ }
+ ControlFlow::CONTINUE
+ }
+
+ fn visit_const(&mut self, ct: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match ct.kind() {
+ ty::ConstKind::Bound(debruijn, _) if debruijn >= self.outer_index => {
+ self.escaping =
+ self.escaping.max(debruijn.as_usize() - self.outer_index.as_usize());
+ ControlFlow::CONTINUE
+ }
+ _ => ct.super_visit_with(self),
+ }
+ }
+}
+
+struct QueryNormalizer<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ cause: &'cx ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ obligations: Vec<PredicateObligation<'tcx>>,
+ cache: SsoHashMap<Ty<'tcx>, Ty<'tcx>>,
+ anon_depth: usize,
+ universes: Vec<Option<ty::UniverseIndex>>,
+}
+
+impl<'cx, 'tcx> FallibleTypeFolder<'tcx> for QueryNormalizer<'cx, 'tcx> {
+ type Error = NoSolution;
+
+ fn tcx<'c>(&'c self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn try_fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> Result<ty::Binder<'tcx, T>, Self::Error> {
+ self.universes.push(None);
+ let t = t.try_super_fold_with(self);
+ self.universes.pop();
+ t
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn try_fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
+ if !needs_normalization(&ty, self.param_env.reveal()) {
+ return Ok(ty);
+ }
+
+ if let Some(ty) = self.cache.get(&ty) {
+ return Ok(*ty);
+ }
+
+ // See note in `rustc_trait_selection::traits::project` about why we
+ // wait to fold the substs.
+
+ // Wrap this in a closure so we don't accidentally return from the outer function
+ let res = (|| match *ty.kind() {
+ // This is really important. While we *can* handle this, this has
+ // severe performance implications for large opaque types with
+ // late-bound regions. See `issue-88862` benchmark.
+ ty::Opaque(def_id, substs) if !substs.has_escaping_bound_vars() => {
+ // Only normalize `impl Trait` outside of type inference, usually in codegen.
+ match self.param_env.reveal() {
+ Reveal::UserFacing => ty.try_super_fold_with(self),
+
+ Reveal::All => {
+ let substs = substs.try_fold_with(self)?;
+ let recursion_limit = self.tcx().recursion_limit();
+ if !recursion_limit.value_within_limit(self.anon_depth) {
+ let obligation = Obligation::with_depth(
+ self.cause.clone(),
+ recursion_limit.0,
+ self.param_env,
+ ty,
+ );
+ self.infcx.report_overflow_error(&obligation, true);
+ }
+
+ let generic_ty = self.tcx().bound_type_of(def_id);
+ let concrete_ty = generic_ty.subst(self.tcx(), substs);
+ self.anon_depth += 1;
+ if concrete_ty == ty {
+ bug!(
+ "infinite recursion generic_ty: {:#?}, substs: {:#?}, \
+ concrete_ty: {:#?}, ty: {:#?}",
+ generic_ty,
+ substs,
+ concrete_ty,
+ ty
+ );
+ }
+ let folded_ty = ensure_sufficient_stack(|| self.try_fold_ty(concrete_ty));
+ self.anon_depth -= 1;
+ folded_ty
+ }
+ }
+ }
+
+ ty::Projection(data) if !data.has_escaping_bound_vars() => {
+ // This branch is just an optimization: when we don't have escaping bound vars,
+ // we don't need to replace them with placeholders (see branch below).
+
+ let tcx = self.infcx.tcx;
+ let data = data.try_fold_with(self)?;
+
+ let mut orig_values = OriginalQueryValues::default();
+ // HACK(matthewjasper) `'static` is special-cased in selection,
+ // so we cannot canonicalize it.
+ let c_data = self
+ .infcx
+ .canonicalize_query_keep_static(self.param_env.and(data), &mut orig_values);
+ debug!("QueryNormalizer: c_data = {:#?}", c_data);
+ debug!("QueryNormalizer: orig_values = {:#?}", orig_values);
+ let result = tcx.normalize_projection_ty(c_data)?;
+ // We don't expect ambiguity.
+ if result.is_ambiguous() {
+ bug!("unexpected ambiguity: {:?} {:?}", c_data, result);
+ }
+ let InferOk { value: result, obligations } =
+ self.infcx.instantiate_query_response_and_region_obligations(
+ self.cause,
+ self.param_env,
+ &orig_values,
+ result,
+ )?;
+ debug!("QueryNormalizer: result = {:#?}", result);
+ debug!("QueryNormalizer: obligations = {:#?}", obligations);
+ self.obligations.extend(obligations);
+ Ok(result.normalized_ty)
+ }
+
+ ty::Projection(data) => {
+ // See note in `rustc_trait_selection::traits::project`
+
+ let tcx = self.infcx.tcx;
+ let infcx = self.infcx;
+ let (data, mapped_regions, mapped_types, mapped_consts) =
+ crate::traits::project::BoundVarReplacer::replace_bound_vars(
+ infcx,
+ &mut self.universes,
+ data,
+ );
+ let data = data.try_fold_with(self)?;
+
+ let mut orig_values = OriginalQueryValues::default();
+ // HACK(matthewjasper) `'static` is special-cased in selection,
+ // so we cannot canonicalize it.
+ let c_data = self
+ .infcx
+ .canonicalize_query_keep_static(self.param_env.and(data), &mut orig_values);
+ debug!("QueryNormalizer: c_data = {:#?}", c_data);
+ debug!("QueryNormalizer: orig_values = {:#?}", orig_values);
+ let result = tcx.normalize_projection_ty(c_data)?;
+ // We don't expect ambiguity.
+ if result.is_ambiguous() {
+ bug!("unexpected ambiguity: {:?} {:?}", c_data, result);
+ }
+ let InferOk { value: result, obligations } =
+ self.infcx.instantiate_query_response_and_region_obligations(
+ self.cause,
+ self.param_env,
+ &orig_values,
+ result,
+ )?;
+ debug!("QueryNormalizer: result = {:#?}", result);
+ debug!("QueryNormalizer: obligations = {:#?}", obligations);
+ self.obligations.extend(obligations);
+ Ok(crate::traits::project::PlaceholderReplacer::replace_placeholders(
+ infcx,
+ mapped_regions,
+ mapped_types,
+ mapped_consts,
+ &self.universes,
+ result.normalized_ty,
+ ))
+ }
+
+ _ => ty.try_super_fold_with(self),
+ })()?;
+ self.cache.insert(ty, res);
+ Ok(res)
+ }
+
+ fn try_fold_const(
+ &mut self,
+ constant: ty::Const<'tcx>,
+ ) -> Result<ty::Const<'tcx>, Self::Error> {
+ let constant = constant.try_super_fold_with(self)?;
+ Ok(constant.eval(self.infcx.tcx, self.param_env))
+ }
+
+ fn try_fold_mir_const(
+ &mut self,
+ constant: mir::ConstantKind<'tcx>,
+ ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
+ Ok(match constant {
+ mir::ConstantKind::Ty(c) => {
+ let const_folded = c.try_super_fold_with(self)?;
+ match const_folded.kind() {
+ ty::ConstKind::Value(valtree) => {
+ let tcx = self.infcx.tcx;
+ let ty = const_folded.ty();
+ let const_val = tcx.valtree_to_const_val((ty, valtree));
+ debug!(?ty, ?valtree, ?const_val);
+
+ mir::ConstantKind::Val(const_val, ty)
+ }
+ _ => mir::ConstantKind::Ty(const_folded),
+ }
+ }
+ mir::ConstantKind::Val(_, _) => constant.try_super_fold_with(self)?,
+ })
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs
new file mode 100644
index 000000000..86b015767
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/ascribe_user_type.rs
@@ -0,0 +1,23 @@
+use crate::infer::canonical::{Canonicalized, CanonicalizedQueryResponse};
+use crate::traits::query::Fallible;
+use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
+
+pub use rustc_middle::traits::query::type_op::AscribeUserType;
+
+impl<'tcx> super::QueryTypeOp<'tcx> for AscribeUserType<'tcx> {
+ type QueryResponse = ();
+
+ fn try_fast_path(
+ _tcx: TyCtxt<'tcx>,
+ _key: &ParamEnvAnd<'tcx, Self>,
+ ) -> Option<Self::QueryResponse> {
+ None
+ }
+
+ fn perform_query(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, ()>> {
+ tcx.type_op_ascribe_user_type(canonicalized)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
new file mode 100644
index 000000000..c99564936
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/custom.rs
@@ -0,0 +1,117 @@
+use crate::infer::canonical::query_response;
+use crate::infer::{InferCtxt, InferOk};
+use crate::traits::engine::TraitEngineExt as _;
+use crate::traits::query::type_op::TypeOpOutput;
+use crate::traits::query::Fallible;
+use crate::traits::TraitEngine;
+use rustc_infer::infer::region_constraints::RegionConstraintData;
+use rustc_infer::traits::TraitEngineExt as _;
+use rustc_span::source_map::DUMMY_SP;
+
+use std::fmt;
+
+pub struct CustomTypeOp<F, G> {
+ closure: F,
+ description: G,
+}
+
+impl<F, G> CustomTypeOp<F, G> {
+ pub fn new<'tcx, R>(closure: F, description: G) -> Self
+ where
+ F: FnOnce(&InferCtxt<'_, 'tcx>) -> Fallible<InferOk<'tcx, R>>,
+ G: Fn() -> String,
+ {
+ CustomTypeOp { closure, description }
+ }
+}
+
+impl<'tcx, F, R, G> super::TypeOp<'tcx> for CustomTypeOp<F, G>
+where
+ F: for<'a, 'cx> FnOnce(&'a InferCtxt<'cx, 'tcx>) -> Fallible<InferOk<'tcx, R>>,
+ G: Fn() -> String,
+{
+ type Output = R;
+ /// We can't do any custom error reporting for `CustomTypeOp`, so
+ /// we can use `!` to enforce that the implementation never provides it.
+ type ErrorInfo = !;
+
+ /// Processes the operation and all resulting obligations,
+ /// returning the final result along with any region constraints
+ /// (they will be given over to the NLL region solver).
+ fn fully_perform(self, infcx: &InferCtxt<'_, 'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
+ if cfg!(debug_assertions) {
+ info!("fully_perform({:?})", self);
+ }
+
+ Ok(scrape_region_constraints(infcx, || (self.closure)(infcx))?.0)
+ }
+}
+
+impl<F, G> fmt::Debug for CustomTypeOp<F, G>
+where
+ G: Fn() -> String,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", (self.description)())
+ }
+}
+
+/// Executes `op` and then scrapes out all the "old style" region
+/// constraints that result, creating query-region-constraints.
+pub fn scrape_region_constraints<'tcx, Op: super::TypeOp<'tcx, Output = R>, R>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ op: impl FnOnce() -> Fallible<InferOk<'tcx, R>>,
+) -> Fallible<(TypeOpOutput<'tcx, Op>, RegionConstraintData<'tcx>)> {
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+
+ // During NLL, we expect that nobody will register region
+ // obligations **except** as part of a custom type op (and, at the
+ // end of each custom type op, we scrape out the region
+ // obligations that resulted). So this vector should be empty on
+ // entry.
+ let pre_obligations = infcx.take_registered_region_obligations();
+ assert!(
+ pre_obligations.is_empty(),
+ "scrape_region_constraints: incoming region obligations = {:#?}",
+ pre_obligations,
+ );
+
+ let InferOk { value, obligations } = infcx.commit_if_ok(|_| op())?;
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+ let errors = fulfill_cx.select_all_or_error(infcx);
+ if !errors.is_empty() {
+ infcx.tcx.sess.diagnostic().delay_span_bug(
+ DUMMY_SP,
+ &format!("errors selecting obligation during MIR typeck: {:?}", errors),
+ );
+ }
+
+ let region_obligations = infcx.take_registered_region_obligations();
+
+ let region_constraint_data = infcx.take_and_reset_region_constraints();
+
+ let region_constraints = query_response::make_query_region_constraints(
+ infcx.tcx,
+ region_obligations
+ .iter()
+ .map(|r_o| (r_o.sup_type, r_o.sub_region))
+ .map(|(ty, r)| (infcx.resolve_vars_if_possible(ty), r)),
+ &region_constraint_data,
+ );
+
+ if region_constraints.is_empty() {
+ Ok((
+ TypeOpOutput { output: value, constraints: None, error_info: None },
+ region_constraint_data,
+ ))
+ } else {
+ Ok((
+ TypeOpOutput {
+ output: value,
+ constraints: Some(infcx.tcx.arena.alloc(region_constraints)),
+ error_info: None,
+ },
+ region_constraint_data,
+ ))
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/eq.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/eq.rs
new file mode 100644
index 000000000..490114aac
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/eq.rs
@@ -0,0 +1,23 @@
+use crate::infer::canonical::{Canonicalized, CanonicalizedQueryResponse};
+use crate::traits::query::Fallible;
+use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
+
+pub use rustc_middle::traits::query::type_op::Eq;
+
+impl<'tcx> super::QueryTypeOp<'tcx> for Eq<'tcx> {
+ type QueryResponse = ();
+
+ fn try_fast_path(
+ _tcx: TyCtxt<'tcx>,
+ key: &ParamEnvAnd<'tcx, Eq<'tcx>>,
+ ) -> Option<Self::QueryResponse> {
+ if key.value.a == key.value.b { Some(()) } else { None }
+ }
+
+ fn perform_query(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, ()>> {
+ tcx.type_op_eq(canonicalized)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs
new file mode 100644
index 000000000..2a3319f0f
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs
@@ -0,0 +1,42 @@
+use crate::infer::canonical::{Canonicalized, CanonicalizedQueryResponse};
+use crate::traits::query::Fallible;
+use rustc_infer::traits::query::OutlivesBound;
+use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt};
+
+#[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct ImpliedOutlivesBounds<'tcx> {
+ pub ty: Ty<'tcx>,
+}
+
+impl<'tcx> super::QueryTypeOp<'tcx> for ImpliedOutlivesBounds<'tcx> {
+ type QueryResponse = Vec<OutlivesBound<'tcx>>;
+
+ fn try_fast_path(
+ _tcx: TyCtxt<'tcx>,
+ key: &ParamEnvAnd<'tcx, Self>,
+ ) -> Option<Self::QueryResponse> {
+ // Don't go into the query for things that can't possibly have lifetimes.
+ match key.value.ty.kind() {
+ ty::Tuple(elems) if elems.is_empty() => Some(vec![]),
+ ty::Never | ty::Str | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => {
+ Some(vec![])
+ }
+ _ => None,
+ }
+ }
+
+ fn perform_query(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self::QueryResponse>> {
+ // FIXME this `unchecked_map` is only necessary because the
+ // query is defined as taking a `ParamEnvAnd<Ty>`; it should
+ // take an `ImpliedOutlivesBounds` instead
+ let canonicalized = canonicalized.unchecked_map(|ParamEnvAnd { param_env, value }| {
+ let ImpliedOutlivesBounds { ty } = value;
+ param_env.and(ty)
+ });
+
+ tcx.implied_outlives_bounds(canonicalized)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs
new file mode 100644
index 000000000..578e1d00c
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/mod.rs
@@ -0,0 +1,168 @@
+use crate::infer::canonical::{
+ Canonicalized, CanonicalizedQueryResponse, OriginalQueryValues, QueryRegionConstraints,
+};
+use crate::infer::{InferCtxt, InferOk};
+use crate::traits::query::Fallible;
+use crate::traits::ObligationCause;
+use rustc_infer::infer::canonical::{Canonical, Certainty};
+use rustc_infer::traits::query::NoSolution;
+use rustc_infer::traits::PredicateObligations;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
+use std::fmt;
+
+pub mod ascribe_user_type;
+pub mod custom;
+pub mod eq;
+pub mod implied_outlives_bounds;
+pub mod normalize;
+pub mod outlives;
+pub mod prove_predicate;
+pub mod subtype;
+
+pub use rustc_middle::traits::query::type_op::*;
+
+/// "Type ops" are used in NLL to perform some particular action and
+/// extract out the resulting region constraints (or an error if it
+/// cannot be completed).
+pub trait TypeOp<'tcx>: Sized + fmt::Debug {
+ type Output;
+ type ErrorInfo;
+
+ /// Processes the operation and all resulting obligations,
+ /// returning the final result along with any region constraints
+ /// (they will be given over to the NLL region solver).
+ fn fully_perform(self, infcx: &InferCtxt<'_, 'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>>;
+}
+
+/// The output from performing a type op
+pub struct TypeOpOutput<'tcx, Op: TypeOp<'tcx>> {
+ /// The output from the type op.
+ pub output: Op::Output,
+ /// Any region constraints from performing the type op.
+ pub constraints: Option<&'tcx QueryRegionConstraints<'tcx>>,
+ /// Used for error reporting to be able to rerun the query
+ pub error_info: Option<Op::ErrorInfo>,
+}
+
+/// "Query type ops" are type ops that are implemented using a
+/// [canonical query][c]. The `Self` type here contains the kernel of
+/// information needed to do the operation -- `TypeOp` is actually
+/// implemented for `ParamEnvAnd<Self>`, since we always need to bring
+/// along a parameter environment as well. For query type-ops, we will
+/// first canonicalize the key and then invoke the query on the tcx,
+/// which produces the resulting query region constraints.
+///
+/// [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
+pub trait QueryTypeOp<'tcx>: fmt::Debug + Copy + TypeFoldable<'tcx> + 'tcx {
+ type QueryResponse: TypeFoldable<'tcx>;
+
+ /// Give query the option for a simple fast path that never
+ /// actually hits the tcx cache lookup etc. Return `Some(r)` with
+ /// a final result or `None` to do the full path.
+ fn try_fast_path(
+ tcx: TyCtxt<'tcx>,
+ key: &ParamEnvAnd<'tcx, Self>,
+ ) -> Option<Self::QueryResponse>;
+
+ /// Performs the actual query with the canonicalized key -- the
+ /// real work happens here. This method is not given an `infcx`
+ /// because it shouldn't need one -- and if it had access to one,
+ /// it might do things like invoke `sub_regions`, which would be
+ /// bad, because it would create subregion relationships that are
+ /// not captured in the return value.
+ fn perform_query(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self::QueryResponse>>;
+
+ fn fully_perform_into(
+ query_key: ParamEnvAnd<'tcx, Self>,
+ infcx: &InferCtxt<'_, 'tcx>,
+ output_query_region_constraints: &mut QueryRegionConstraints<'tcx>,
+ ) -> Fallible<(
+ Self::QueryResponse,
+ Option<Canonical<'tcx, ParamEnvAnd<'tcx, Self>>>,
+ PredicateObligations<'tcx>,
+ Certainty,
+ )> {
+ if let Some(result) = QueryTypeOp::try_fast_path(infcx.tcx, &query_key) {
+ return Ok((result, None, vec![], Certainty::Proven));
+ }
+
+ // FIXME(#33684) -- We need to use
+ // `canonicalize_query_keep_static` here because of things
+ // like the subtype query, which go awry around
+ // `'static` otherwise.
+ let mut canonical_var_values = OriginalQueryValues::default();
+ let old_param_env = query_key.param_env;
+ let canonical_self =
+ infcx.canonicalize_query_keep_static(query_key, &mut canonical_var_values);
+ let canonical_result = Self::perform_query(infcx.tcx, canonical_self)?;
+
+ let InferOk { value, obligations } = infcx
+ .instantiate_nll_query_response_and_region_obligations(
+ &ObligationCause::dummy(),
+ old_param_env,
+ &canonical_var_values,
+ canonical_result,
+ output_query_region_constraints,
+ )?;
+
+ Ok((value, Some(canonical_self), obligations, canonical_result.value.certainty))
+ }
+}
+
+impl<'tcx, Q> TypeOp<'tcx> for ParamEnvAnd<'tcx, Q>
+where
+ Q: QueryTypeOp<'tcx>,
+{
+ type Output = Q::QueryResponse;
+ type ErrorInfo = Canonical<'tcx, ParamEnvAnd<'tcx, Q>>;
+
+ fn fully_perform(self, infcx: &InferCtxt<'_, 'tcx>) -> Fallible<TypeOpOutput<'tcx, Self>> {
+ let mut region_constraints = QueryRegionConstraints::default();
+ let (output, error_info, mut obligations, _) =
+ Q::fully_perform_into(self, infcx, &mut region_constraints)?;
+
+ // Typically, instantiating NLL query results does not
+ // create obligations. However, in some cases there
+ // are unresolved type variables, and unify them *can*
+ // create obligations. In that case, we have to go
+ // fulfill them. We do this via a (recursive) query.
+ while !obligations.is_empty() {
+ trace!("{:#?}", obligations);
+ let mut progress = false;
+ for obligation in std::mem::take(&mut obligations) {
+ let obligation = infcx.resolve_vars_if_possible(obligation);
+ match ProvePredicate::fully_perform_into(
+ obligation.param_env.and(ProvePredicate::new(obligation.predicate)),
+ infcx,
+ &mut region_constraints,
+ ) {
+ Ok(((), _, new, certainty)) => {
+ obligations.extend(new);
+ progress = true;
+ if let Certainty::Ambiguous = certainty {
+ obligations.push(obligation);
+ }
+ }
+ Err(_) => obligations.push(obligation),
+ }
+ }
+ if !progress {
+ return Err(NoSolution);
+ }
+ }
+
+ Ok(TypeOpOutput {
+ output,
+ constraints: if region_constraints.is_empty() {
+ None
+ } else {
+ Some(infcx.tcx.arena.alloc(region_constraints))
+ },
+ error_info,
+ })
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/normalize.rs
new file mode 100644
index 000000000..e92ca7325
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/normalize.rs
@@ -0,0 +1,68 @@
+use crate::infer::canonical::{Canonicalized, CanonicalizedQueryResponse};
+use crate::traits::query::Fallible;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::{self, Lift, ParamEnvAnd, Ty, TyCtxt};
+use std::fmt;
+
+pub use rustc_middle::traits::query::type_op::Normalize;
+
+impl<'tcx, T> super::QueryTypeOp<'tcx> for Normalize<T>
+where
+ T: Normalizable<'tcx> + 'tcx,
+{
+ type QueryResponse = T;
+
+ fn try_fast_path(_tcx: TyCtxt<'tcx>, key: &ParamEnvAnd<'tcx, Self>) -> Option<T> {
+ if !key.value.value.has_projections() { Some(key.value.value) } else { None }
+ }
+
+ fn perform_query(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self::QueryResponse>> {
+ T::type_op_method(tcx, canonicalized)
+ }
+}
+
+pub trait Normalizable<'tcx>: fmt::Debug + TypeFoldable<'tcx> + Lift<'tcx> + Copy {
+ fn type_op_method(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Normalize<Self>>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self>>;
+}
+
+impl<'tcx> Normalizable<'tcx> for Ty<'tcx> {
+ fn type_op_method(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Normalize<Self>>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self>> {
+ tcx.type_op_normalize_ty(canonicalized)
+ }
+}
+
+impl<'tcx> Normalizable<'tcx> for ty::Predicate<'tcx> {
+ fn type_op_method(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Normalize<Self>>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self>> {
+ tcx.type_op_normalize_predicate(canonicalized)
+ }
+}
+
+impl<'tcx> Normalizable<'tcx> for ty::PolyFnSig<'tcx> {
+ fn type_op_method(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Normalize<Self>>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self>> {
+ tcx.type_op_normalize_poly_fn_sig(canonicalized)
+ }
+}
+
+impl<'tcx> Normalizable<'tcx> for ty::FnSig<'tcx> {
+ fn type_op_method(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Normalize<Self>>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self>> {
+ tcx.type_op_normalize_fn_sig(canonicalized)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs
new file mode 100644
index 000000000..b63382429
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/outlives.rs
@@ -0,0 +1,55 @@
+use crate::infer::canonical::{Canonicalized, CanonicalizedQueryResponse};
+use crate::traits::query::dropck_outlives::{trivial_dropck_outlives, DropckOutlivesResult};
+use crate::traits::query::Fallible;
+use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
+
+#[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct DropckOutlives<'tcx> {
+ dropped_ty: Ty<'tcx>,
+}
+
+impl<'tcx> DropckOutlives<'tcx> {
+ pub fn new(dropped_ty: Ty<'tcx>) -> Self {
+ DropckOutlives { dropped_ty }
+ }
+}
+
+impl<'tcx> super::QueryTypeOp<'tcx> for DropckOutlives<'tcx> {
+ type QueryResponse = DropckOutlivesResult<'tcx>;
+
+ fn try_fast_path(
+ tcx: TyCtxt<'tcx>,
+ key: &ParamEnvAnd<'tcx, Self>,
+ ) -> Option<Self::QueryResponse> {
+ if trivial_dropck_outlives(tcx, key.value.dropped_ty) {
+ Some(DropckOutlivesResult::default())
+ } else {
+ None
+ }
+ }
+
+ fn perform_query(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, Self::QueryResponse>> {
+ // Subtle: note that we are not invoking
+ // `infcx.at(...).dropck_outlives(...)` here, but rather the
+ // underlying `dropck_outlives` query. This same underlying
+ // query is also used by the
+ // `infcx.at(...).dropck_outlives(...)` fn. Avoiding the
+ // wrapper means we don't need an infcx in this code, which is
+ // good because the interface doesn't give us one (so that we
+ // know we are not registering any subregion relations or
+ // other things).
+
+ // FIXME convert to the type expected by the `dropck_outlives`
+ // query. This should eventually be fixed by changing the
+ // *underlying query*.
+ let canonicalized = canonicalized.unchecked_map(|ParamEnvAnd { param_env, value }| {
+ let DropckOutlives { dropped_ty } = value;
+ param_env.and(dropped_ty)
+ });
+
+ tcx.dropck_outlives(canonicalized)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/prove_predicate.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/prove_predicate.rs
new file mode 100644
index 000000000..081308ac7
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/prove_predicate.rs
@@ -0,0 +1,43 @@
+use crate::infer::canonical::{Canonicalized, CanonicalizedQueryResponse};
+use crate::traits::query::Fallible;
+use rustc_middle::ty::{self, ParamEnvAnd, TyCtxt};
+
+pub use rustc_middle::traits::query::type_op::ProvePredicate;
+
+impl<'tcx> super::QueryTypeOp<'tcx> for ProvePredicate<'tcx> {
+ type QueryResponse = ();
+
+ fn try_fast_path(
+ tcx: TyCtxt<'tcx>,
+ key: &ParamEnvAnd<'tcx, Self>,
+ ) -> Option<Self::QueryResponse> {
+ // Proving Sized, very often on "obviously sized" types like
+ // `&T`, accounts for about 60% percentage of the predicates
+ // we have to prove. No need to canonicalize and all that for
+ // such cases.
+ if let ty::PredicateKind::Trait(trait_ref) = key.value.predicate.kind().skip_binder() {
+ if let Some(sized_def_id) = tcx.lang_items().sized_trait() {
+ if trait_ref.def_id() == sized_def_id {
+ if trait_ref.self_ty().is_trivially_sized(tcx) {
+ return Some(());
+ }
+ }
+ }
+ }
+
+ None
+ }
+
+ fn perform_query(
+ tcx: TyCtxt<'tcx>,
+ mut canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, ()>> {
+ match canonicalized.value.value.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => {
+ canonicalized.value.param_env.remap_constness_with(pred.constness);
+ }
+ _ => canonicalized.value.param_env = canonicalized.value.param_env.without_const(),
+ }
+ tcx.type_op_prove_predicate(canonicalized)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/subtype.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/subtype.rs
new file mode 100644
index 000000000..57290b669
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/query/type_op/subtype.rs
@@ -0,0 +1,20 @@
+use crate::infer::canonical::{Canonicalized, CanonicalizedQueryResponse};
+use crate::traits::query::Fallible;
+use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
+
+pub use rustc_middle::traits::query::type_op::Subtype;
+
+impl<'tcx> super::QueryTypeOp<'tcx> for Subtype<'tcx> {
+ type QueryResponse = ();
+
+ fn try_fast_path(_tcx: TyCtxt<'tcx>, key: &ParamEnvAnd<'tcx, Self>) -> Option<()> {
+ if key.value.sub == key.value.sup { Some(()) } else { None }
+ }
+
+ fn perform_query(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonicalized<'tcx, ParamEnvAnd<'tcx, Self>>,
+ ) -> Fallible<CanonicalizedQueryResponse<'tcx, ()>> {
+ tcx.type_op_subtype(canonicalized)
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/relationships.rs b/compiler/rustc_trait_selection/src/traits/relationships.rs
new file mode 100644
index 000000000..8148e2b78
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/relationships.rs
@@ -0,0 +1,58 @@
+use crate::infer::InferCtxt;
+use crate::traits::query::evaluate_obligation::InferCtxtExt;
+use crate::traits::{ObligationCause, PredicateObligation};
+use rustc_infer::traits::TraitEngine;
+use rustc_middle::ty::{self, ToPredicate};
+
+pub(crate) fn update<'tcx, T>(
+ engine: &mut T,
+ infcx: &InferCtxt<'_, 'tcx>,
+ obligation: &PredicateObligation<'tcx>,
+) where
+ T: TraitEngine<'tcx>,
+{
+ // (*) binder skipped
+ if let ty::PredicateKind::Trait(tpred) = obligation.predicate.kind().skip_binder()
+ && let Some(ty) = infcx.shallow_resolve(tpred.self_ty()).ty_vid().map(|t| infcx.root_var(t))
+ && infcx.tcx.lang_items().sized_trait().map_or(false, |st| st != tpred.trait_ref.def_id)
+ {
+ let new_self_ty = infcx.tcx.types.unit;
+
+ let trait_ref = ty::TraitRef {
+ substs: infcx.tcx.mk_substs_trait(new_self_ty, &tpred.trait_ref.substs[1..]),
+ ..tpred.trait_ref
+ };
+
+ // Then construct a new obligation with Self = () added
+ // to the ParamEnv, and see if it holds.
+ let o = rustc_infer::traits::Obligation::new(
+ ObligationCause::dummy(),
+ obligation.param_env,
+ obligation
+ .predicate
+ .kind()
+ .rebind(
+ // (*) binder moved here
+ ty::PredicateKind::Trait(ty::TraitPredicate {
+ trait_ref,
+ constness: tpred.constness,
+ polarity: tpred.polarity,
+ })
+ )
+ .to_predicate(infcx.tcx),
+ );
+ // Don't report overflow errors. Otherwise equivalent to may_hold.
+ if let Ok(result) = infcx.probe(|_| infcx.evaluate_obligation(&o)) && result.may_apply() {
+ engine.relationships().entry(ty).or_default().self_in_trait = true;
+ }
+ }
+
+ if let ty::PredicateKind::Projection(predicate) = obligation.predicate.kind().skip_binder() {
+ // If the projection predicate (Foo::Bar == X) has X as a non-TyVid,
+ // we need to make it into one.
+ if let Some(vid) = predicate.term.ty().and_then(|ty| ty.ty_vid()) {
+ debug!("relationship: {:?}.output = true", vid);
+ engine.relationships().entry(vid).or_default().output = true;
+ }
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
new file mode 100644
index 000000000..a60ce0f34
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs
@@ -0,0 +1,1009 @@
+//! Candidate assembly.
+//!
+//! The selection process begins by examining all in-scope impls,
+//! caller obligations, and so forth and assembling a list of
+//! candidates. See the [rustc dev guide] for more details.
+//!
+//! [rustc dev guide]:https://rustc-dev-guide.rust-lang.org/traits/resolution.html#candidate-assembly
+use hir::LangItem;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::traits::TraitEngine;
+use rustc_infer::traits::{Obligation, SelectionError, TraitObligation};
+use rustc_lint_defs::builtin::DEREF_INTO_DYN_SUPERTRAIT;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, ToPredicate, Ty, TypeVisitable};
+use rustc_target::spec::abi::Abi;
+
+use crate::traits;
+use crate::traits::coherence::Conflict;
+use crate::traits::query::evaluate_obligation::InferCtxtExt;
+use crate::traits::{util, SelectionResult};
+use crate::traits::{Ambiguous, ErrorReporting, Overflow, Unimplemented};
+
+use super::BuiltinImplConditions;
+use super::IntercrateAmbiguityCause;
+use super::OverflowError;
+use super::SelectionCandidate::{self, *};
+use super::{EvaluatedCandidate, SelectionCandidateSet, SelectionContext, TraitObligationStack};
+
+impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
+ #[instrument(level = "debug", skip(self))]
+ pub(super) fn candidate_from_obligation<'o>(
+ &mut self,
+ stack: &TraitObligationStack<'o, 'tcx>,
+ ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
+ // Watch out for overflow. This intentionally bypasses (and does
+ // not update) the cache.
+ self.check_recursion_limit(&stack.obligation, &stack.obligation)?;
+
+ // Check the cache. Note that we freshen the trait-ref
+ // separately rather than using `stack.fresh_trait_ref` --
+ // this is because we want the unbound variables to be
+ // replaced with fresh types starting from index 0.
+ let cache_fresh_trait_pred = self.infcx.freshen(stack.obligation.predicate);
+ debug!(?cache_fresh_trait_pred);
+ debug_assert!(!stack.obligation.predicate.has_escaping_bound_vars());
+
+ if let Some(c) =
+ self.check_candidate_cache(stack.obligation.param_env, cache_fresh_trait_pred)
+ {
+ debug!(candidate = ?c, "CACHE HIT");
+ return c;
+ }
+
+ // If no match, compute result and insert into cache.
+ //
+ // FIXME(nikomatsakis) -- this cache is not taking into
+ // account cycles that may have occurred in forming the
+ // candidate. I don't know of any specific problems that
+ // result but it seems awfully suspicious.
+ let (candidate, dep_node) =
+ self.in_task(|this| this.candidate_from_obligation_no_cache(stack));
+
+ debug!(?candidate, "CACHE MISS");
+ self.insert_candidate_cache(
+ stack.obligation.param_env,
+ cache_fresh_trait_pred,
+ dep_node,
+ candidate.clone(),
+ );
+ candidate
+ }
+
+ fn candidate_from_obligation_no_cache<'o>(
+ &mut self,
+ stack: &TraitObligationStack<'o, 'tcx>,
+ ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
+ if let Some(conflict) = self.is_knowable(stack) {
+ debug!("coherence stage: not knowable");
+ if self.intercrate_ambiguity_causes.is_some() {
+ debug!("evaluate_stack: intercrate_ambiguity_causes is some");
+ // Heuristics: show the diagnostics when there are no candidates in crate.
+ if let Ok(candidate_set) = self.assemble_candidates(stack) {
+ let mut no_candidates_apply = true;
+
+ for c in candidate_set.vec.iter() {
+ if self.evaluate_candidate(stack, &c)?.may_apply() {
+ no_candidates_apply = false;
+ break;
+ }
+ }
+
+ if !candidate_set.ambiguous && no_candidates_apply {
+ let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
+ let self_ty = trait_ref.self_ty();
+ let (trait_desc, self_desc) = with_no_trimmed_paths!({
+ let trait_desc = trait_ref.print_only_trait_path().to_string();
+ let self_desc = if self_ty.has_concrete_skeleton() {
+ Some(self_ty.to_string())
+ } else {
+ None
+ };
+ (trait_desc, self_desc)
+ });
+ let cause = if let Conflict::Upstream = conflict {
+ IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc }
+ } else {
+ IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc }
+ };
+ debug!(?cause, "evaluate_stack: pushing cause");
+ self.intercrate_ambiguity_causes.as_mut().unwrap().insert(cause);
+ }
+ }
+ }
+ return Ok(None);
+ }
+
+ let candidate_set = self.assemble_candidates(stack)?;
+
+ if candidate_set.ambiguous {
+ debug!("candidate set contains ambig");
+ return Ok(None);
+ }
+
+ let candidates = candidate_set.vec;
+
+ debug!(?stack, ?candidates, "assembled {} candidates", candidates.len());
+
+ // At this point, we know that each of the entries in the
+ // candidate set is *individually* applicable. Now we have to
+ // figure out if they contain mutual incompatibilities. This
+ // frequently arises if we have an unconstrained input type --
+ // for example, we are looking for `$0: Eq` where `$0` is some
+ // unconstrained type variable. In that case, we'll get a
+ // candidate which assumes $0 == int, one that assumes `$0 ==
+ // usize`, etc. This spells an ambiguity.
+
+ let mut candidates = self.filter_impls(candidates, stack.obligation);
+
+ // If there is more than one candidate, first winnow them down
+ // by considering extra conditions (nested obligations and so
+ // forth). We don't winnow if there is exactly one
+ // candidate. This is a relatively minor distinction but it
+ // can lead to better inference and error-reporting. An
+ // example would be if there was an impl:
+ //
+ // impl<T:Clone> Vec<T> { fn push_clone(...) { ... } }
+ //
+ // and we were to see some code `foo.push_clone()` where `boo`
+ // is a `Vec<Bar>` and `Bar` does not implement `Clone`. If
+ // we were to winnow, we'd wind up with zero candidates.
+ // Instead, we select the right impl now but report "`Bar` does
+ // not implement `Clone`".
+ if candidates.len() == 1 {
+ return self.filter_reservation_impls(candidates.pop().unwrap(), stack.obligation);
+ }
+
+ // Winnow, but record the exact outcome of evaluation, which
+ // is needed for specialization. Propagate overflow if it occurs.
+ let mut candidates = candidates
+ .into_iter()
+ .map(|c| match self.evaluate_candidate(stack, &c) {
+ Ok(eval) if eval.may_apply() => {
+ Ok(Some(EvaluatedCandidate { candidate: c, evaluation: eval }))
+ }
+ Ok(_) => Ok(None),
+ Err(OverflowError::Canonical) => Err(Overflow(OverflowError::Canonical)),
+ Err(OverflowError::ErrorReporting) => Err(ErrorReporting),
+ Err(OverflowError::Error(e)) => Err(Overflow(OverflowError::Error(e))),
+ })
+ .flat_map(Result::transpose)
+ .collect::<Result<Vec<_>, _>>()?;
+
+ debug!(?stack, ?candidates, "winnowed to {} candidates", candidates.len());
+
+ let needs_infer = stack.obligation.predicate.has_infer_types_or_consts();
+
+ // If there are STILL multiple candidates, we can further
+ // reduce the list by dropping duplicates -- including
+ // resolving specializations.
+ if candidates.len() > 1 {
+ let mut i = 0;
+ while i < candidates.len() {
+ let is_dup = (0..candidates.len()).filter(|&j| i != j).any(|j| {
+ self.candidate_should_be_dropped_in_favor_of(
+ &candidates[i],
+ &candidates[j],
+ needs_infer,
+ )
+ });
+ if is_dup {
+ debug!(candidate = ?candidates[i], "Dropping candidate #{}/{}", i, candidates.len());
+ candidates.swap_remove(i);
+ } else {
+ debug!(candidate = ?candidates[i], "Retaining candidate #{}/{}", i, candidates.len());
+ i += 1;
+
+ // If there are *STILL* multiple candidates, give up
+ // and report ambiguity.
+ if i > 1 {
+ debug!("multiple matches, ambig");
+ return Err(Ambiguous(
+ candidates
+ .into_iter()
+ .filter_map(|c| match c.candidate {
+ SelectionCandidate::ImplCandidate(def_id) => Some(def_id),
+ _ => None,
+ })
+ .collect(),
+ ));
+ }
+ }
+ }
+ }
+
+ // If there are *NO* candidates, then there are no impls --
+ // that we know of, anyway. Note that in the case where there
+ // are unbound type variables within the obligation, it might
+ // be the case that you could still satisfy the obligation
+ // from another crate by instantiating the type variables with
+ // a type from another crate that does have an impl. This case
+ // is checked for in `evaluate_stack` (and hence users
+ // who might care about this case, like coherence, should use
+ // that function).
+ if candidates.is_empty() {
+ // If there's an error type, 'downgrade' our result from
+ // `Err(Unimplemented)` to `Ok(None)`. This helps us avoid
+ // emitting additional spurious errors, since we're guaranteed
+ // to have emitted at least one.
+ if stack.obligation.predicate.references_error() {
+ debug!(?stack.obligation.predicate, "found error type in predicate, treating as ambiguous");
+ return Ok(None);
+ }
+ return Err(Unimplemented);
+ }
+
+ // Just one candidate left.
+ self.filter_reservation_impls(candidates.pop().unwrap().candidate, stack.obligation)
+ }
+
+ #[instrument(skip(self, stack), level = "debug")]
+ pub(super) fn assemble_candidates<'o>(
+ &mut self,
+ stack: &TraitObligationStack<'o, 'tcx>,
+ ) -> Result<SelectionCandidateSet<'tcx>, SelectionError<'tcx>> {
+ let TraitObligationStack { obligation, .. } = *stack;
+ let obligation = &Obligation {
+ param_env: obligation.param_env,
+ cause: obligation.cause.clone(),
+ recursion_depth: obligation.recursion_depth,
+ predicate: self.infcx().resolve_vars_if_possible(obligation.predicate),
+ };
+
+ if obligation.predicate.skip_binder().self_ty().is_ty_var() {
+ debug!(ty = ?obligation.predicate.skip_binder().self_ty(), "ambiguous inference var or opaque type");
+ // Self is a type variable (e.g., `_: AsRef<str>`).
+ //
+ // This is somewhat problematic, as the current scheme can't really
+ // handle it turning to be a projection. This does end up as truly
+ // ambiguous in most cases anyway.
+ //
+ // Take the fast path out - this also improves
+ // performance by preventing assemble_candidates_from_impls from
+ // matching every impl for this trait.
+ return Ok(SelectionCandidateSet { vec: vec![], ambiguous: true });
+ }
+
+ let mut candidates = SelectionCandidateSet { vec: Vec::new(), ambiguous: false };
+
+ // The only way to prove a NotImplemented(T: Foo) predicate is via a negative impl.
+ // There are no compiler built-in rules for this.
+ if obligation.polarity() == ty::ImplPolarity::Negative {
+ self.assemble_candidates_for_trait_alias(obligation, &mut candidates);
+ self.assemble_candidates_from_impls(obligation, &mut candidates);
+ } else {
+ self.assemble_candidates_for_trait_alias(obligation, &mut candidates);
+
+ // Other bounds. Consider both in-scope bounds from fn decl
+ // and applicable impls. There is a certain set of precedence rules here.
+ let def_id = obligation.predicate.def_id();
+ let lang_items = self.tcx().lang_items();
+
+ if lang_items.copy_trait() == Some(def_id) {
+ debug!(obligation_self_ty = ?obligation.predicate.skip_binder().self_ty());
+
+ // User-defined copy impls are permitted, but only for
+ // structs and enums.
+ self.assemble_candidates_from_impls(obligation, &mut candidates);
+
+ // For other types, we'll use the builtin rules.
+ let copy_conditions = self.copy_clone_conditions(obligation);
+ self.assemble_builtin_bound_candidates(copy_conditions, &mut candidates);
+ } else if lang_items.discriminant_kind_trait() == Some(def_id) {
+ // `DiscriminantKind` is automatically implemented for every type.
+ candidates.vec.push(DiscriminantKindCandidate);
+ } else if lang_items.pointee_trait() == Some(def_id) {
+ // `Pointee` is automatically implemented for every type.
+ candidates.vec.push(PointeeCandidate);
+ } else if lang_items.sized_trait() == Some(def_id) {
+ // Sized is never implementable by end-users, it is
+ // always automatically computed.
+ let sized_conditions = self.sized_conditions(obligation);
+ self.assemble_builtin_bound_candidates(sized_conditions, &mut candidates);
+ } else if lang_items.unsize_trait() == Some(def_id) {
+ self.assemble_candidates_for_unsizing(obligation, &mut candidates);
+ } else if lang_items.destruct_trait() == Some(def_id) {
+ self.assemble_const_destruct_candidates(obligation, &mut candidates);
+ } else if lang_items.transmute_trait() == Some(def_id) {
+ // User-defined transmutability impls are permitted.
+ self.assemble_candidates_from_impls(obligation, &mut candidates);
+ self.assemble_candidates_for_transmutability(obligation, &mut candidates);
+ } else {
+ if lang_items.clone_trait() == Some(def_id) {
+ // Same builtin conditions as `Copy`, i.e., every type which has builtin support
+ // for `Copy` also has builtin support for `Clone`, and tuples/arrays of `Clone`
+ // types have builtin support for `Clone`.
+ let clone_conditions = self.copy_clone_conditions(obligation);
+ self.assemble_builtin_bound_candidates(clone_conditions, &mut candidates);
+ }
+
+ self.assemble_generator_candidates(obligation, &mut candidates);
+ self.assemble_closure_candidates(obligation, &mut candidates);
+ self.assemble_fn_pointer_candidates(obligation, &mut candidates);
+ self.assemble_candidates_from_impls(obligation, &mut candidates);
+ self.assemble_candidates_from_object_ty(obligation, &mut candidates);
+ }
+
+ self.assemble_candidates_from_projected_tys(obligation, &mut candidates);
+ self.assemble_candidates_from_caller_bounds(stack, &mut candidates)?;
+ // Auto implementations have lower priority, so we only
+ // consider triggering a default if there is no other impl that can apply.
+ if candidates.vec.is_empty() {
+ self.assemble_candidates_from_auto_impls(obligation, &mut candidates);
+ }
+ }
+ debug!("candidate list size: {}", candidates.vec.len());
+ Ok(candidates)
+ }
+
+ #[tracing::instrument(level = "debug", skip(self, candidates))]
+ fn assemble_candidates_from_projected_tys(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ // Before we go into the whole placeholder thing, just
+ // quickly check if the self-type is a projection at all.
+ match obligation.predicate.skip_binder().trait_ref.self_ty().kind() {
+ ty::Projection(_) | ty::Opaque(..) => {}
+ ty::Infer(ty::TyVar(_)) => {
+ span_bug!(
+ obligation.cause.span,
+ "Self=_ should have been handled by assemble_candidates"
+ );
+ }
+ _ => return,
+ }
+
+ let result = self
+ .infcx
+ .probe(|_| self.match_projection_obligation_against_definition_bounds(obligation));
+
+ candidates.vec.extend(result.into_iter().map(ProjectionCandidate));
+ }
+
+ /// Given an obligation like `<SomeTrait for T>`, searches the obligations that the caller
+ /// supplied to find out whether it is listed among them.
+ ///
+ /// Never affects the inference environment.
+ #[tracing::instrument(level = "debug", skip(self, stack, candidates))]
+ fn assemble_candidates_from_caller_bounds<'o>(
+ &mut self,
+ stack: &TraitObligationStack<'o, 'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) -> Result<(), SelectionError<'tcx>> {
+ debug!(?stack.obligation);
+
+ let all_bounds = stack
+ .obligation
+ .param_env
+ .caller_bounds()
+ .iter()
+ .filter_map(|o| o.to_opt_poly_trait_pred());
+
+ // Micro-optimization: filter out predicates relating to different traits.
+ let matching_bounds =
+ all_bounds.filter(|p| p.def_id() == stack.obligation.predicate.def_id());
+
+ // Keep only those bounds which may apply, and propagate overflow if it occurs.
+ for bound in matching_bounds {
+ // FIXME(oli-obk): it is suspicious that we are dropping the constness and
+ // polarity here.
+ let wc = self.where_clause_may_apply(stack, bound.map_bound(|t| t.trait_ref))?;
+ if wc.may_apply() {
+ candidates.vec.push(ParamCandidate(bound));
+ }
+ }
+
+ Ok(())
+ }
+
+ fn assemble_generator_candidates(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ if self.tcx().lang_items().gen_trait() != Some(obligation.predicate.def_id()) {
+ return;
+ }
+
+ // Okay to skip binder because the substs on generator types never
+ // touch bound regions, they just capture the in-scope
+ // type/region parameters.
+ let self_ty = obligation.self_ty().skip_binder();
+ match self_ty.kind() {
+ ty::Generator(..) => {
+ debug!(?self_ty, ?obligation, "assemble_generator_candidates",);
+
+ candidates.vec.push(GeneratorCandidate);
+ }
+ ty::Infer(ty::TyVar(_)) => {
+ debug!("assemble_generator_candidates: ambiguous self-type");
+ candidates.ambiguous = true;
+ }
+ _ => {}
+ }
+ }
+
+ /// Checks for the artificial impl that the compiler will create for an obligation like `X :
+ /// FnMut<..>` where `X` is a closure type.
+ ///
+ /// Note: the type parameters on a closure candidate are modeled as *output* type
+ /// parameters and hence do not affect whether this trait is a match or not. They will be
+ /// unified during the confirmation step.
+ fn assemble_closure_candidates(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ let Some(kind) = self.tcx().fn_trait_kind_from_lang_item(obligation.predicate.def_id()) else {
+ return;
+ };
+
+ // Okay to skip binder because the substs on closure types never
+ // touch bound regions, they just capture the in-scope
+ // type/region parameters
+ match *obligation.self_ty().skip_binder().kind() {
+ ty::Closure(_, closure_substs) => {
+ debug!(?kind, ?obligation, "assemble_unboxed_candidates");
+ match self.infcx.closure_kind(closure_substs) {
+ Some(closure_kind) => {
+ debug!(?closure_kind, "assemble_unboxed_candidates");
+ if closure_kind.extends(kind) {
+ candidates.vec.push(ClosureCandidate);
+ }
+ }
+ None => {
+ debug!("assemble_unboxed_candidates: closure_kind not yet known");
+ candidates.vec.push(ClosureCandidate);
+ }
+ }
+ }
+ ty::Infer(ty::TyVar(_)) => {
+ debug!("assemble_unboxed_closure_candidates: ambiguous self-type");
+ candidates.ambiguous = true;
+ }
+ _ => {}
+ }
+ }
+
+ /// Implements one of the `Fn()` family for a fn pointer.
+ fn assemble_fn_pointer_candidates(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ // We provide impl of all fn traits for fn pointers.
+ if self.tcx().fn_trait_kind_from_lang_item(obligation.predicate.def_id()).is_none() {
+ return;
+ }
+
+ // Okay to skip binder because what we are inspecting doesn't involve bound regions.
+ let self_ty = obligation.self_ty().skip_binder();
+ match *self_ty.kind() {
+ ty::Infer(ty::TyVar(_)) => {
+ debug!("assemble_fn_pointer_candidates: ambiguous self-type");
+ candidates.ambiguous = true; // Could wind up being a fn() type.
+ }
+ // Provide an impl, but only for suitable `fn` pointers.
+ ty::FnPtr(_) => {
+ if let ty::FnSig {
+ unsafety: hir::Unsafety::Normal,
+ abi: Abi::Rust,
+ c_variadic: false,
+ ..
+ } = self_ty.fn_sig(self.tcx()).skip_binder()
+ {
+ candidates.vec.push(FnPointerCandidate { is_const: false });
+ }
+ }
+ // Provide an impl for suitable functions, rejecting `#[target_feature]` functions (RFC 2396).
+ ty::FnDef(def_id, _) => {
+ if let ty::FnSig {
+ unsafety: hir::Unsafety::Normal,
+ abi: Abi::Rust,
+ c_variadic: false,
+ ..
+ } = self_ty.fn_sig(self.tcx()).skip_binder()
+ {
+ if self.tcx().codegen_fn_attrs(def_id).target_features.is_empty() {
+ candidates
+ .vec
+ .push(FnPointerCandidate { is_const: self.tcx().is_const_fn(def_id) });
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ /// Searches for impls that might apply to `obligation`.
+ fn assemble_candidates_from_impls(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ debug!(?obligation, "assemble_candidates_from_impls");
+
+ // Essentially any user-written impl will match with an error type,
+ // so creating `ImplCandidates` isn't useful. However, we might
+ // end up finding a candidate elsewhere (e.g. a `BuiltinCandidate` for `Sized)
+ // This helps us avoid overflow: see issue #72839
+ // Since compilation is already guaranteed to fail, this is just
+ // to try to show the 'nicest' possible errors to the user.
+ // We don't check for errors in the `ParamEnv` - in practice,
+ // it seems to cause us to be overly aggressive in deciding
+ // to give up searching for candidates, leading to spurious errors.
+ if obligation.predicate.references_error() {
+ return;
+ }
+
+ self.tcx().for_each_relevant_impl(
+ obligation.predicate.def_id(),
+ obligation.predicate.skip_binder().trait_ref.self_ty(),
+ |impl_def_id| {
+ // Before we create the substitutions and everything, first
+ // consider a "quick reject". This avoids creating more types
+ // and so forth that we need to.
+ let impl_trait_ref = self.tcx().bound_impl_trait_ref(impl_def_id).unwrap();
+ if self.fast_reject_trait_refs(obligation, &impl_trait_ref.0) {
+ return;
+ }
+
+ self.infcx.probe(|_| {
+ if let Ok(_substs) = self.match_impl(impl_def_id, impl_trait_ref, obligation) {
+ candidates.vec.push(ImplCandidate(impl_def_id));
+ }
+ });
+ },
+ );
+ }
+
+ fn assemble_candidates_from_auto_impls(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ // Okay to skip binder here because the tests we do below do not involve bound regions.
+ let self_ty = obligation.self_ty().skip_binder();
+ debug!(?self_ty, "assemble_candidates_from_auto_impls");
+
+ let def_id = obligation.predicate.def_id();
+
+ if self.tcx().trait_is_auto(def_id) {
+ match self_ty.kind() {
+ ty::Dynamic(..) => {
+ // For object types, we don't know what the closed
+ // over types are. This means we conservatively
+ // say nothing; a candidate may be added by
+ // `assemble_candidates_from_object_ty`.
+ }
+ ty::Foreign(..) => {
+ // Since the contents of foreign types is unknown,
+ // we don't add any `..` impl. Default traits could
+ // still be provided by a manual implementation for
+ // this trait and type.
+ }
+ ty::Param(..) | ty::Projection(..) => {
+ // In these cases, we don't know what the actual
+ // type is. Therefore, we cannot break it down
+ // into its constituent types. So we don't
+ // consider the `..` impl but instead just add no
+ // candidates: this means that typeck will only
+ // succeed if there is another reason to believe
+ // that this obligation holds. That could be a
+ // where-clause or, in the case of an object type,
+ // it could be that the object type lists the
+ // trait (e.g., `Foo+Send : Send`). See
+ // `ui/typeck/typeck-default-trait-impl-send-param.rs`
+ // for an example of a test case that exercises
+ // this path.
+ }
+ ty::Infer(ty::TyVar(_)) => {
+ // The auto impl might apply; we don't know.
+ candidates.ambiguous = true;
+ }
+ ty::Generator(_, _, movability)
+ if self.tcx().lang_items().unpin_trait() == Some(def_id) =>
+ {
+ match movability {
+ hir::Movability::Static => {
+ // Immovable generators are never `Unpin`, so
+ // suppress the normal auto-impl candidate for it.
+ }
+ hir::Movability::Movable => {
+ // Movable generators are always `Unpin`, so add an
+ // unconditional builtin candidate.
+ candidates.vec.push(BuiltinCandidate { has_nested: false });
+ }
+ }
+ }
+
+ _ => candidates.vec.push(AutoImplCandidate(def_id)),
+ }
+ }
+ }
+
+ /// Searches for impls that might apply to `obligation`.
+ fn assemble_candidates_from_object_ty(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ debug!(
+ self_ty = ?obligation.self_ty().skip_binder(),
+ "assemble_candidates_from_object_ty",
+ );
+
+ self.infcx.probe(|_snapshot| {
+ // The code below doesn't care about regions, and the
+ // self-ty here doesn't escape this probe, so just erase
+ // any LBR.
+ let self_ty = self.tcx().erase_late_bound_regions(obligation.self_ty());
+ let poly_trait_ref = match self_ty.kind() {
+ ty::Dynamic(ref data, ..) => {
+ if data.auto_traits().any(|did| did == obligation.predicate.def_id()) {
+ debug!(
+ "assemble_candidates_from_object_ty: matched builtin bound, \
+ pushing candidate"
+ );
+ candidates.vec.push(BuiltinObjectCandidate);
+ return;
+ }
+
+ if let Some(principal) = data.principal() {
+ if !self.infcx.tcx.features().object_safe_for_dispatch {
+ principal.with_self_ty(self.tcx(), self_ty)
+ } else if self.tcx().is_object_safe(principal.def_id()) {
+ principal.with_self_ty(self.tcx(), self_ty)
+ } else {
+ return;
+ }
+ } else {
+ // Only auto trait bounds exist.
+ return;
+ }
+ }
+ ty::Infer(ty::TyVar(_)) => {
+ debug!("assemble_candidates_from_object_ty: ambiguous");
+ candidates.ambiguous = true; // could wind up being an object type
+ return;
+ }
+ _ => return,
+ };
+
+ debug!(?poly_trait_ref, "assemble_candidates_from_object_ty");
+
+ let poly_trait_predicate = self.infcx().resolve_vars_if_possible(obligation.predicate);
+ let placeholder_trait_predicate =
+ self.infcx().replace_bound_vars_with_placeholders(poly_trait_predicate);
+
+ // Count only those upcast versions that match the trait-ref
+ // we are looking for. Specifically, do not only check for the
+ // correct trait, but also the correct type parameters.
+ // For example, we may be trying to upcast `Foo` to `Bar<i32>`,
+ // but `Foo` is declared as `trait Foo: Bar<u32>`.
+ let candidate_supertraits = util::supertraits(self.tcx(), poly_trait_ref)
+ .enumerate()
+ .filter(|&(_, upcast_trait_ref)| {
+ self.infcx.probe(|_| {
+ self.match_normalize_trait_ref(
+ obligation,
+ upcast_trait_ref,
+ placeholder_trait_predicate.trait_ref,
+ )
+ .is_ok()
+ })
+ })
+ .map(|(idx, _)| ObjectCandidate(idx));
+
+ candidates.vec.extend(candidate_supertraits);
+ })
+ }
+
+ /// Temporary migration for #89190
+ fn need_migrate_deref_output_trait_object(
+ &mut self,
+ ty: Ty<'tcx>,
+ cause: &traits::ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Option<(Ty<'tcx>, DefId)> {
+ let tcx = self.tcx();
+ if tcx.features().trait_upcasting {
+ return None;
+ }
+
+ // <ty as Deref>
+ let trait_ref = ty::TraitRef {
+ def_id: tcx.lang_items().deref_trait()?,
+ substs: tcx.mk_substs_trait(ty, &[]),
+ };
+
+ let obligation = traits::Obligation::new(
+ cause.clone(),
+ param_env,
+ ty::Binder::dummy(trait_ref).without_const().to_predicate(tcx),
+ );
+ if !self.infcx.predicate_may_hold(&obligation) {
+ return None;
+ }
+
+ let mut fulfillcx = traits::FulfillmentContext::new_in_snapshot();
+ let normalized_ty = fulfillcx.normalize_projection_type(
+ &self.infcx,
+ param_env,
+ ty::ProjectionTy {
+ item_def_id: tcx.lang_items().deref_target()?,
+ substs: trait_ref.substs,
+ },
+ cause.clone(),
+ );
+
+ let ty::Dynamic(data, ..) = normalized_ty.kind() else {
+ return None;
+ };
+
+ let def_id = data.principal_def_id()?;
+
+ return Some((normalized_ty, def_id));
+ }
+
+ /// Searches for unsizing that might apply to `obligation`.
+ fn assemble_candidates_for_unsizing(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ // We currently never consider higher-ranked obligations e.g.
+ // `for<'a> &'a T: Unsize<Trait+'a>` to be implemented. This is not
+ // because they are a priori invalid, and we could potentially add support
+ // for them later, it's just that there isn't really a strong need for it.
+ // A `T: Unsize<U>` obligation is always used as part of a `T: CoerceUnsize<U>`
+ // impl, and those are generally applied to concrete types.
+ //
+ // That said, one might try to write a fn with a where clause like
+ // for<'a> Foo<'a, T>: Unsize<Foo<'a, Trait>>
+ // where the `'a` is kind of orthogonal to the relevant part of the `Unsize`.
+ // Still, you'd be more likely to write that where clause as
+ // T: Trait
+ // so it seems ok if we (conservatively) fail to accept that `Unsize`
+ // obligation above. Should be possible to extend this in the future.
+ let Some(source) = obligation.self_ty().no_bound_vars() else {
+ // Don't add any candidates if there are bound regions.
+ return;
+ };
+ let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
+
+ debug!(?source, ?target, "assemble_candidates_for_unsizing");
+
+ match (source.kind(), target.kind()) {
+ // Trait+Kx+'a -> Trait+Ky+'b (upcasts).
+ (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
+ // Upcast coercions permit several things:
+ //
+ // 1. Dropping auto traits, e.g., `Foo + Send` to `Foo`
+ // 2. Tightening the region bound, e.g., `Foo + 'a` to `Foo + 'b` if `'a: 'b`
+ // 3. Tightening trait to its super traits, eg. `Foo` to `Bar` if `Foo: Bar`
+ //
+ // Note that neither of the first two of these changes requires any
+ // change at runtime. The third needs to change pointer metadata at runtime.
+ //
+ // We always perform upcasting coercions when we can because of reason
+ // #2 (region bounds).
+ let auto_traits_compatible = data_b
+ .auto_traits()
+ // All of a's auto traits need to be in b's auto traits.
+ .all(|b| data_a.auto_traits().any(|a| a == b));
+ if auto_traits_compatible {
+ let principal_def_id_a = data_a.principal_def_id();
+ let principal_def_id_b = data_b.principal_def_id();
+ if principal_def_id_a == principal_def_id_b {
+ // no cyclic
+ candidates.vec.push(BuiltinUnsizeCandidate);
+ } else if principal_def_id_a.is_some() && principal_def_id_b.is_some() {
+ // not casual unsizing, now check whether this is trait upcasting coercion.
+ let principal_a = data_a.principal().unwrap();
+ let target_trait_did = principal_def_id_b.unwrap();
+ let source_trait_ref = principal_a.with_self_ty(self.tcx(), source);
+ if let Some((deref_output_ty, deref_output_trait_did)) = self
+ .need_migrate_deref_output_trait_object(
+ source,
+ &obligation.cause,
+ obligation.param_env,
+ )
+ {
+ if deref_output_trait_did == target_trait_did {
+ self.tcx().struct_span_lint_hir(
+ DEREF_INTO_DYN_SUPERTRAIT,
+ obligation.cause.body_id,
+ obligation.cause.span,
+ |lint| {
+ lint.build(&format!(
+ "`{}` implements `Deref` with supertrait `{}` as output",
+ source,
+ deref_output_ty
+ )).emit();
+ },
+ );
+ return;
+ }
+ }
+
+ for (idx, upcast_trait_ref) in
+ util::supertraits(self.tcx(), source_trait_ref).enumerate()
+ {
+ if upcast_trait_ref.def_id() == target_trait_did {
+ candidates.vec.push(TraitUpcastingUnsizeCandidate(idx));
+ }
+ }
+ }
+ }
+ }
+
+ // `T` -> `Trait`
+ (_, &ty::Dynamic(..)) => {
+ candidates.vec.push(BuiltinUnsizeCandidate);
+ }
+
+ // Ambiguous handling is below `T` -> `Trait`, because inference
+ // variables can still implement `Unsize<Trait>` and nested
+ // obligations will have the final say (likely deferred).
+ (&ty::Infer(ty::TyVar(_)), _) | (_, &ty::Infer(ty::TyVar(_))) => {
+ debug!("assemble_candidates_for_unsizing: ambiguous");
+ candidates.ambiguous = true;
+ }
+
+ // `[T; n]` -> `[T]`
+ (&ty::Array(..), &ty::Slice(_)) => {
+ candidates.vec.push(BuiltinUnsizeCandidate);
+ }
+
+ // `Struct<T>` -> `Struct<U>`
+ (&ty::Adt(def_id_a, _), &ty::Adt(def_id_b, _)) if def_id_a.is_struct() => {
+ if def_id_a == def_id_b {
+ candidates.vec.push(BuiltinUnsizeCandidate);
+ }
+ }
+
+ // `(.., T)` -> `(.., U)`
+ (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
+ if tys_a.len() == tys_b.len() {
+ candidates.vec.push(BuiltinUnsizeCandidate);
+ }
+ }
+
+ _ => {}
+ };
+ }
+
+ #[tracing::instrument(level = "debug", skip(self, obligation, candidates))]
+ fn assemble_candidates_for_transmutability(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ if obligation.has_param_types_or_consts() {
+ return;
+ }
+
+ if obligation.has_infer_types_or_consts() {
+ candidates.ambiguous = true;
+ return;
+ }
+
+ candidates.vec.push(TransmutabilityCandidate);
+ }
+
+ #[tracing::instrument(level = "debug", skip(self, obligation, candidates))]
+ fn assemble_candidates_for_trait_alias(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ // Okay to skip binder here because the tests we do below do not involve bound regions.
+ let self_ty = obligation.self_ty().skip_binder();
+ debug!(?self_ty);
+
+ let def_id = obligation.predicate.def_id();
+
+ if self.tcx().is_trait_alias(def_id) {
+ candidates.vec.push(TraitAliasCandidate(def_id));
+ }
+ }
+
+ /// Assembles the trait which are built-in to the language itself:
+ /// `Copy`, `Clone` and `Sized`.
+ #[tracing::instrument(level = "debug", skip(self, candidates))]
+ fn assemble_builtin_bound_candidates(
+ &mut self,
+ conditions: BuiltinImplConditions<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ match conditions {
+ BuiltinImplConditions::Where(nested) => {
+ candidates
+ .vec
+ .push(BuiltinCandidate { has_nested: !nested.skip_binder().is_empty() });
+ }
+ BuiltinImplConditions::None => {}
+ BuiltinImplConditions::Ambiguous => {
+ candidates.ambiguous = true;
+ }
+ }
+ }
+
+ fn assemble_const_destruct_candidates(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidates: &mut SelectionCandidateSet<'tcx>,
+ ) {
+ // If the predicate is `~const Destruct` in a non-const environment, we don't actually need
+ // to check anything. We'll short-circuit checking any obligations in confirmation, too.
+ if !obligation.is_const() {
+ candidates.vec.push(ConstDestructCandidate(None));
+ return;
+ }
+
+ let self_ty = self.infcx().shallow_resolve(obligation.self_ty());
+ match self_ty.skip_binder().kind() {
+ ty::Opaque(..)
+ | ty::Dynamic(..)
+ | ty::Error(_)
+ | ty::Bound(..)
+ | ty::Param(_)
+ | ty::Placeholder(_)
+ | ty::Projection(_) => {
+ // We don't know if these are `~const Destruct`, at least
+ // not structurally... so don't push a candidate.
+ }
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Infer(ty::IntVar(_))
+ | ty::Infer(ty::FloatVar(_))
+ | ty::Str
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Never
+ | ty::Foreign(_)
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::Tuple(_)
+ | ty::GeneratorWitness(_) => {
+ // These are built-in, and cannot have a custom `impl const Destruct`.
+ candidates.vec.push(ConstDestructCandidate(None));
+ }
+
+ ty::Adt(..) => {
+ // Find a custom `impl Drop` impl, if it exists
+ let relevant_impl = self.tcx().find_map_relevant_impl(
+ self.tcx().require_lang_item(LangItem::Drop, None),
+ obligation.predicate.skip_binder().trait_ref.self_ty(),
+ Some,
+ );
+
+ if let Some(impl_def_id) = relevant_impl {
+ // Check that `impl Drop` is actually const, if there is a custom impl
+ if self.tcx().constness(impl_def_id) == hir::Constness::Const {
+ candidates.vec.push(ConstDestructCandidate(Some(impl_def_id)));
+ }
+ } else {
+ // Otherwise check the ADT like a built-in type (structurally)
+ candidates.vec.push(ConstDestructCandidate(None));
+ }
+ }
+
+ ty::Infer(_) => {
+ candidates.ambiguous = true;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
new file mode 100644
index 000000000..2a1099fc8
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs
@@ -0,0 +1,1266 @@
+//! Confirmation.
+//!
+//! Confirmation unifies the output type parameters of the trait
+//! with the values found in the obligation, possibly yielding a
+//! type error. See the [rustc dev guide] for more details.
+//!
+//! [rustc dev guide]:
+//! https://rustc-dev-guide.rust-lang.org/traits/resolution.html#confirmation
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::bit_set::GrowableBitSet;
+use rustc_infer::infer::InferOk;
+use rustc_infer::infer::LateBoundRegionConversionTime::HigherRankedType;
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef};
+use rustc_middle::ty::{self, GenericParamDefKind, Ty, TyCtxt};
+use rustc_middle::ty::{ToPolyTraitRef, ToPredicate};
+use rustc_span::def_id::DefId;
+
+use crate::traits::project::{normalize_with_depth, normalize_with_depth_to};
+use crate::traits::util::{self, closure_trait_ref_and_return_type, predicate_for_trait_def};
+use crate::traits::{
+ BuiltinDerivedObligation, ImplDerivedObligation, ImplDerivedObligationCause, ImplSource,
+ ImplSourceAutoImplData, ImplSourceBuiltinData, ImplSourceClosureData,
+ ImplSourceConstDestructData, ImplSourceDiscriminantKindData, ImplSourceFnPointerData,
+ ImplSourceGeneratorData, ImplSourceObjectData, ImplSourcePointeeData, ImplSourceTraitAliasData,
+ ImplSourceTraitUpcastingData, ImplSourceUserDefinedData, Normalized, ObjectCastObligation,
+ Obligation, ObligationCause, OutputTypeParameterMismatch, PredicateObligation, Selection,
+ SelectionError, TraitNotObjectSafe, TraitObligation, Unimplemented, VtblSegment,
+};
+
+use super::BuiltinImplConditions;
+use super::SelectionCandidate::{self, *};
+use super::SelectionContext;
+
+use std::iter;
+use std::ops::ControlFlow;
+
+impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
+ #[instrument(level = "debug", skip(self))]
+ pub(super) fn confirm_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ candidate: SelectionCandidate<'tcx>,
+ ) -> Result<Selection<'tcx>, SelectionError<'tcx>> {
+ let mut impl_src = match candidate {
+ BuiltinCandidate { has_nested } => {
+ let data = self.confirm_builtin_candidate(obligation, has_nested);
+ ImplSource::Builtin(data)
+ }
+
+ TransmutabilityCandidate => {
+ let data = self.confirm_transmutability_candidate(obligation)?;
+ ImplSource::Builtin(data)
+ }
+
+ ParamCandidate(param) => {
+ let obligations =
+ self.confirm_param_candidate(obligation, param.map_bound(|t| t.trait_ref));
+ ImplSource::Param(obligations, param.skip_binder().constness)
+ }
+
+ ImplCandidate(impl_def_id) => {
+ ImplSource::UserDefined(self.confirm_impl_candidate(obligation, impl_def_id))
+ }
+
+ AutoImplCandidate(trait_def_id) => {
+ let data = self.confirm_auto_impl_candidate(obligation, trait_def_id);
+ ImplSource::AutoImpl(data)
+ }
+
+ ProjectionCandidate(idx) => {
+ let obligations = self.confirm_projection_candidate(obligation, idx)?;
+ // FIXME(jschievink): constness
+ ImplSource::Param(obligations, ty::BoundConstness::NotConst)
+ }
+
+ ObjectCandidate(idx) => {
+ let data = self.confirm_object_candidate(obligation, idx)?;
+ ImplSource::Object(data)
+ }
+
+ ClosureCandidate => {
+ let vtable_closure = self.confirm_closure_candidate(obligation)?;
+ ImplSource::Closure(vtable_closure)
+ }
+
+ GeneratorCandidate => {
+ let vtable_generator = self.confirm_generator_candidate(obligation)?;
+ ImplSource::Generator(vtable_generator)
+ }
+
+ FnPointerCandidate { .. } => {
+ let data = self.confirm_fn_pointer_candidate(obligation)?;
+ ImplSource::FnPointer(data)
+ }
+
+ DiscriminantKindCandidate => {
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ }
+
+ PointeeCandidate => ImplSource::Pointee(ImplSourcePointeeData),
+
+ TraitAliasCandidate(alias_def_id) => {
+ let data = self.confirm_trait_alias_candidate(obligation, alias_def_id);
+ ImplSource::TraitAlias(data)
+ }
+
+ BuiltinObjectCandidate => {
+ // This indicates something like `Trait + Send: Send`. In this case, we know that
+ // this holds because that's what the object type is telling us, and there's really
+ // no additional obligations to prove and no types in particular to unify, etc.
+ ImplSource::Param(Vec::new(), ty::BoundConstness::NotConst)
+ }
+
+ BuiltinUnsizeCandidate => {
+ let data = self.confirm_builtin_unsize_candidate(obligation)?;
+ ImplSource::Builtin(data)
+ }
+
+ TraitUpcastingUnsizeCandidate(idx) => {
+ let data = self.confirm_trait_upcasting_unsize_candidate(obligation, idx)?;
+ ImplSource::TraitUpcasting(data)
+ }
+
+ ConstDestructCandidate(def_id) => {
+ let data = self.confirm_const_destruct_candidate(obligation, def_id)?;
+ ImplSource::ConstDestruct(data)
+ }
+ };
+
+ if !obligation.predicate.is_const_if_const() {
+ // normalize nested predicates according to parent predicate's constness.
+ impl_src = impl_src.map(|mut o| {
+ o.predicate = o.predicate.without_const(self.tcx());
+ o
+ });
+ }
+
+ Ok(impl_src)
+ }
+
+ fn confirm_projection_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ idx: usize,
+ ) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ let tcx = self.tcx();
+
+ let trait_predicate = self.infcx.shallow_resolve(obligation.predicate);
+ let placeholder_trait_predicate =
+ self.infcx().replace_bound_vars_with_placeholders(trait_predicate).trait_ref;
+ let placeholder_self_ty = placeholder_trait_predicate.self_ty();
+ let placeholder_trait_predicate = ty::Binder::dummy(placeholder_trait_predicate);
+ let (def_id, substs) = match *placeholder_self_ty.kind() {
+ ty::Projection(proj) => (proj.item_def_id, proj.substs),
+ ty::Opaque(def_id, substs) => (def_id, substs),
+ _ => bug!("projection candidate for unexpected type: {:?}", placeholder_self_ty),
+ };
+
+ let candidate_predicate =
+ tcx.bound_item_bounds(def_id).map_bound(|i| i[idx]).subst(tcx, substs);
+ let candidate = candidate_predicate
+ .to_opt_poly_trait_pred()
+ .expect("projection candidate is not a trait predicate")
+ .map_bound(|t| t.trait_ref);
+ let mut obligations = Vec::new();
+ let candidate = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ candidate,
+ &mut obligations,
+ );
+
+ obligations.extend(self.infcx.commit_if_ok(|_| {
+ self.infcx
+ .at(&obligation.cause, obligation.param_env)
+ .sup(placeholder_trait_predicate, candidate)
+ .map(|InferOk { obligations, .. }| obligations)
+ .map_err(|_| Unimplemented)
+ })?);
+
+ if let ty::Projection(..) = placeholder_self_ty.kind() {
+ let predicates = tcx.predicates_of(def_id).instantiate_own(tcx, substs).predicates;
+ debug!(?predicates, "projection predicates");
+ for predicate in predicates {
+ let normalized = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ predicate,
+ &mut obligations,
+ );
+ obligations.push(Obligation::with_depth(
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ normalized,
+ ));
+ }
+ }
+
+ Ok(obligations)
+ }
+
+ fn confirm_param_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ param: ty::PolyTraitRef<'tcx>,
+ ) -> Vec<PredicateObligation<'tcx>> {
+ debug!(?obligation, ?param, "confirm_param_candidate");
+
+ // During evaluation, we already checked that this
+ // where-clause trait-ref could be unified with the obligation
+ // trait-ref. Repeat that unification now without any
+ // transactional boundary; it should not fail.
+ match self.match_where_clause_trait_ref(obligation, param) {
+ Ok(obligations) => obligations,
+ Err(()) => {
+ bug!(
+ "Where clause `{:?}` was applicable to `{:?}` but now is not",
+ param,
+ obligation
+ );
+ }
+ }
+ }
+
+ fn confirm_builtin_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ has_nested: bool,
+ ) -> ImplSourceBuiltinData<PredicateObligation<'tcx>> {
+ debug!(?obligation, ?has_nested, "confirm_builtin_candidate");
+
+ let lang_items = self.tcx().lang_items();
+ let obligations = if has_nested {
+ let trait_def = obligation.predicate.def_id();
+ let conditions = if Some(trait_def) == lang_items.sized_trait() {
+ self.sized_conditions(obligation)
+ } else if Some(trait_def) == lang_items.copy_trait() {
+ self.copy_clone_conditions(obligation)
+ } else if Some(trait_def) == lang_items.clone_trait() {
+ self.copy_clone_conditions(obligation)
+ } else {
+ bug!("unexpected builtin trait {:?}", trait_def)
+ };
+ let BuiltinImplConditions::Where(nested) = conditions else {
+ bug!("obligation {:?} had matched a builtin impl but now doesn't", obligation);
+ };
+
+ let cause = obligation.derived_cause(BuiltinDerivedObligation);
+ ensure_sufficient_stack(|| {
+ self.collect_predicates_for_types(
+ obligation.param_env,
+ cause,
+ obligation.recursion_depth + 1,
+ trait_def,
+ nested,
+ )
+ })
+ } else {
+ vec![]
+ };
+
+ debug!(?obligations);
+
+ ImplSourceBuiltinData { nested: obligations }
+ }
+
+ fn confirm_transmutability_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> Result<ImplSourceBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ debug!(?obligation, "confirm_transmutability_candidate");
+
+ let predicate = obligation.predicate;
+
+ let type_at = |i| predicate.map_bound(|p| p.trait_ref.substs.type_at(i));
+ let bool_at = |i| {
+ predicate
+ .skip_binder()
+ .trait_ref
+ .substs
+ .const_at(i)
+ .try_eval_bool(self.tcx(), obligation.param_env)
+ .unwrap_or(true)
+ };
+
+ let src_and_dst = predicate.map_bound(|p| rustc_transmute::Types {
+ src: p.trait_ref.substs.type_at(1),
+ dst: p.trait_ref.substs.type_at(0),
+ });
+
+ let scope = type_at(2).skip_binder();
+
+ let assume = rustc_transmute::Assume {
+ alignment: bool_at(3),
+ lifetimes: bool_at(4),
+ validity: bool_at(5),
+ visibility: bool_at(6),
+ };
+
+ let cause = obligation.cause.clone();
+
+ let mut transmute_env = rustc_transmute::TransmuteTypeEnv::new(self.infcx);
+
+ let maybe_transmutable = transmute_env.is_transmutable(cause, src_and_dst, scope, assume);
+
+ use rustc_transmute::Answer;
+
+ match maybe_transmutable {
+ Answer::Yes => Ok(ImplSourceBuiltinData { nested: vec![] }),
+ _ => Err(Unimplemented),
+ }
+ }
+
+ /// This handles the case where an `auto trait Foo` impl is being used.
+ /// The idea is that the impl applies to `X : Foo` if the following conditions are met:
+ ///
+ /// 1. For each constituent type `Y` in `X`, `Y : Foo` holds
+ /// 2. For each where-clause `C` declared on `Foo`, `[Self => X] C` holds.
+ fn confirm_auto_impl_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ trait_def_id: DefId,
+ ) -> ImplSourceAutoImplData<PredicateObligation<'tcx>> {
+ debug!(?obligation, ?trait_def_id, "confirm_auto_impl_candidate");
+
+ let self_ty = self.infcx.shallow_resolve(obligation.predicate.self_ty());
+ let types = self.constituent_types_for_ty(self_ty);
+ self.vtable_auto_impl(obligation, trait_def_id, types)
+ }
+
+ /// See `confirm_auto_impl_candidate`.
+ fn vtable_auto_impl(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ trait_def_id: DefId,
+ nested: ty::Binder<'tcx, Vec<Ty<'tcx>>>,
+ ) -> ImplSourceAutoImplData<PredicateObligation<'tcx>> {
+ debug!(?nested, "vtable_auto_impl");
+ ensure_sufficient_stack(|| {
+ let cause = obligation.derived_cause(BuiltinDerivedObligation);
+
+ let poly_trait_ref = obligation.predicate.to_poly_trait_ref();
+ let trait_ref = self.infcx.replace_bound_vars_with_placeholders(poly_trait_ref);
+ let trait_obligations: Vec<PredicateObligation<'_>> = self.impl_or_trait_obligations(
+ &cause,
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ trait_def_id,
+ &trait_ref.substs,
+ obligation.predicate,
+ );
+
+ let mut obligations = self.collect_predicates_for_types(
+ obligation.param_env,
+ cause,
+ obligation.recursion_depth + 1,
+ trait_def_id,
+ nested,
+ );
+
+ // Adds the predicates from the trait. Note that this contains a `Self: Trait`
+ // predicate as usual. It won't have any effect since auto traits are coinductive.
+ obligations.extend(trait_obligations);
+
+ debug!(?obligations, "vtable_auto_impl");
+
+ ImplSourceAutoImplData { trait_def_id, nested: obligations }
+ })
+ }
+
+ fn confirm_impl_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ impl_def_id: DefId,
+ ) -> ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>> {
+ debug!(?obligation, ?impl_def_id, "confirm_impl_candidate");
+
+ // First, create the substitutions by matching the impl again,
+ // this time not in a probe.
+ let substs = self.rematch_impl(impl_def_id, obligation);
+ debug!(?substs, "impl substs");
+ ensure_sufficient_stack(|| {
+ self.vtable_impl(
+ impl_def_id,
+ substs,
+ &obligation.cause,
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ obligation.predicate,
+ )
+ })
+ }
+
+ fn vtable_impl(
+ &mut self,
+ impl_def_id: DefId,
+ substs: Normalized<'tcx, SubstsRef<'tcx>>,
+ cause: &ObligationCause<'tcx>,
+ recursion_depth: usize,
+ param_env: ty::ParamEnv<'tcx>,
+ parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
+ ) -> ImplSourceUserDefinedData<'tcx, PredicateObligation<'tcx>> {
+ debug!(?impl_def_id, ?substs, ?recursion_depth, "vtable_impl");
+
+ let mut impl_obligations = self.impl_or_trait_obligations(
+ cause,
+ recursion_depth,
+ param_env,
+ impl_def_id,
+ &substs.value,
+ parent_trait_pred,
+ );
+
+ debug!(?impl_obligations, "vtable_impl");
+
+ // Because of RFC447, the impl-trait-ref and obligations
+ // are sufficient to determine the impl substs, without
+ // relying on projections in the impl-trait-ref.
+ //
+ // e.g., `impl<U: Tr, V: Iterator<Item=U>> Foo<<U as Tr>::T> for V`
+ impl_obligations.extend(substs.obligations);
+
+ ImplSourceUserDefinedData { impl_def_id, substs: substs.value, nested: impl_obligations }
+ }
+
+ fn confirm_object_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ index: usize,
+ ) -> Result<ImplSourceObjectData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ let tcx = self.tcx();
+ debug!(?obligation, ?index, "confirm_object_candidate");
+
+ let trait_predicate = self.infcx.replace_bound_vars_with_placeholders(obligation.predicate);
+ let self_ty = self.infcx.shallow_resolve(trait_predicate.self_ty());
+ let obligation_trait_ref = ty::Binder::dummy(trait_predicate.trait_ref);
+ let ty::Dynamic(data, ..) = *self_ty.kind() else {
+ span_bug!(obligation.cause.span, "object candidate with non-object");
+ };
+
+ let object_trait_ref = data.principal().unwrap_or_else(|| {
+ span_bug!(obligation.cause.span, "object candidate with no principal")
+ });
+ let object_trait_ref = self.infcx.replace_bound_vars_with_fresh_vars(
+ obligation.cause.span,
+ HigherRankedType,
+ object_trait_ref,
+ );
+ let object_trait_ref = object_trait_ref.with_self_ty(self.tcx(), self_ty);
+
+ let mut nested = vec![];
+
+ let mut supertraits = util::supertraits(tcx, ty::Binder::dummy(object_trait_ref));
+ let unnormalized_upcast_trait_ref =
+ supertraits.nth(index).expect("supertraits iterator no longer has as many elements");
+
+ let upcast_trait_ref = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ unnormalized_upcast_trait_ref,
+ &mut nested,
+ );
+
+ nested.extend(self.infcx.commit_if_ok(|_| {
+ self.infcx
+ .at(&obligation.cause, obligation.param_env)
+ .sup(obligation_trait_ref, upcast_trait_ref)
+ .map(|InferOk { obligations, .. }| obligations)
+ .map_err(|_| Unimplemented)
+ })?);
+
+ // Check supertraits hold. This is so that their associated type bounds
+ // will be checked in the code below.
+ for super_trait in tcx
+ .super_predicates_of(trait_predicate.def_id())
+ .instantiate(tcx, trait_predicate.trait_ref.substs)
+ .predicates
+ .into_iter()
+ {
+ let normalized_super_trait = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ super_trait,
+ &mut nested,
+ );
+ nested.push(Obligation::new(
+ obligation.cause.clone(),
+ obligation.param_env,
+ normalized_super_trait,
+ ));
+ }
+
+ let assoc_types: Vec<_> = tcx
+ .associated_items(trait_predicate.def_id())
+ .in_definition_order()
+ .filter_map(
+ |item| if item.kind == ty::AssocKind::Type { Some(item.def_id) } else { None },
+ )
+ .collect();
+
+ for assoc_type in assoc_types {
+ let defs: &ty::Generics = tcx.generics_of(assoc_type);
+
+ if !defs.params.is_empty() && !tcx.features().generic_associated_types_extended {
+ tcx.sess.delay_span_bug(
+ obligation.cause.span,
+ "GATs in trait object shouldn't have been considered",
+ );
+ return Err(SelectionError::Unimplemented);
+ }
+
+ // This maybe belongs in wf, but that can't (doesn't) handle
+ // higher-ranked things.
+ // Prevent, e.g., `dyn Iterator<Item = str>`.
+ for bound in self.tcx().bound_item_bounds(assoc_type).transpose_iter() {
+ let subst_bound =
+ if defs.count() == 0 {
+ bound.subst(tcx, trait_predicate.trait_ref.substs)
+ } else {
+ let mut substs = smallvec::SmallVec::with_capacity(defs.count());
+ substs.extend(trait_predicate.trait_ref.substs.iter());
+ let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> =
+ smallvec::SmallVec::with_capacity(
+ bound.0.kind().bound_vars().len() + defs.count(),
+ );
+ bound_vars.extend(bound.0.kind().bound_vars().into_iter());
+ InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param
+ .kind
+ {
+ GenericParamDefKind::Type { .. } => {
+ let kind = ty::BoundTyKind::Param(param.name);
+ let bound_var = ty::BoundVariableKind::Ty(kind);
+ bound_vars.push(bound_var);
+ tcx.mk_ty(ty::Bound(
+ ty::INNERMOST,
+ ty::BoundTy {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind,
+ },
+ ))
+ .into()
+ }
+ GenericParamDefKind::Lifetime => {
+ let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name);
+ let bound_var = ty::BoundVariableKind::Region(kind);
+ bound_vars.push(bound_var);
+ tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind,
+ },
+ ))
+ .into()
+ }
+ GenericParamDefKind::Const { .. } => {
+ let bound_var = ty::BoundVariableKind::Const;
+ bound_vars.push(bound_var);
+ tcx.mk_const(ty::ConstS {
+ ty: tcx.type_of(param.def_id),
+ kind: ty::ConstKind::Bound(
+ ty::INNERMOST,
+ ty::BoundVar::from_usize(bound_vars.len() - 1),
+ ),
+ })
+ .into()
+ }
+ });
+ let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter());
+ let assoc_ty_substs = tcx.intern_substs(&substs);
+
+ let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter());
+ let bound =
+ bound.map_bound(|b| b.kind().skip_binder()).subst(tcx, assoc_ty_substs);
+ tcx.mk_predicate(ty::Binder::bind_with_vars(bound, bound_vars))
+ };
+ let normalized_bound = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ subst_bound,
+ &mut nested,
+ );
+ nested.push(Obligation::new(
+ obligation.cause.clone(),
+ obligation.param_env,
+ normalized_bound,
+ ));
+ }
+ }
+
+ debug!(?nested, "object nested obligations");
+
+ let vtable_base = super::super::vtable_trait_first_method_offset(
+ tcx,
+ (unnormalized_upcast_trait_ref, ty::Binder::dummy(object_trait_ref)),
+ );
+
+ Ok(ImplSourceObjectData { upcast_trait_ref, vtable_base, nested })
+ }
+
+ fn confirm_fn_pointer_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> Result<ImplSourceFnPointerData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>>
+ {
+ debug!(?obligation, "confirm_fn_pointer_candidate");
+
+ // Okay to skip binder; it is reintroduced below.
+ let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
+ let sig = self_ty.fn_sig(self.tcx());
+ let trait_ref = closure_trait_ref_and_return_type(
+ self.tcx(),
+ obligation.predicate.def_id(),
+ self_ty,
+ sig,
+ util::TupleArgumentsFlag::Yes,
+ )
+ .map_bound(|(trait_ref, _)| trait_ref);
+
+ let nested = self.confirm_poly_trait_refs(obligation, trait_ref)?;
+ Ok(ImplSourceFnPointerData { fn_ty: self_ty, nested })
+ }
+
+ fn confirm_trait_alias_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ alias_def_id: DefId,
+ ) -> ImplSourceTraitAliasData<'tcx, PredicateObligation<'tcx>> {
+ debug!(?obligation, ?alias_def_id, "confirm_trait_alias_candidate");
+
+ let predicate = self.infcx().replace_bound_vars_with_placeholders(obligation.predicate);
+ let trait_ref = predicate.trait_ref;
+ let trait_def_id = trait_ref.def_id;
+ let substs = trait_ref.substs;
+
+ let trait_obligations = self.impl_or_trait_obligations(
+ &obligation.cause,
+ obligation.recursion_depth,
+ obligation.param_env,
+ trait_def_id,
+ &substs,
+ obligation.predicate,
+ );
+
+ debug!(?trait_def_id, ?trait_obligations, "trait alias obligations");
+
+ ImplSourceTraitAliasData { alias_def_id, substs, nested: trait_obligations }
+ }
+
+ fn confirm_generator_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> Result<ImplSourceGeneratorData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>>
+ {
+ // Okay to skip binder because the substs on generator types never
+ // touch bound regions, they just capture the in-scope
+ // type/region parameters.
+ let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
+ let ty::Generator(generator_def_id, substs, _) = *self_ty.kind() else {
+ bug!("closure candidate for non-closure {:?}", obligation);
+ };
+
+ debug!(?obligation, ?generator_def_id, ?substs, "confirm_generator_candidate");
+
+ let trait_ref = self.generator_trait_ref_unnormalized(obligation, substs);
+
+ let nested = self.confirm_poly_trait_refs(obligation, trait_ref)?;
+ debug!(?trait_ref, ?nested, "generator candidate obligations");
+
+ Ok(ImplSourceGeneratorData { generator_def_id, substs, nested })
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn confirm_closure_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> Result<ImplSourceClosureData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ let kind = self
+ .tcx()
+ .fn_trait_kind_from_lang_item(obligation.predicate.def_id())
+ .unwrap_or_else(|| bug!("closure candidate for non-fn trait {:?}", obligation));
+
+ // Okay to skip binder because the substs on closure types never
+ // touch bound regions, they just capture the in-scope
+ // type/region parameters.
+ let self_ty = self.infcx.shallow_resolve(obligation.self_ty().skip_binder());
+ let ty::Closure(closure_def_id, substs) = *self_ty.kind() else {
+ bug!("closure candidate for non-closure {:?}", obligation);
+ };
+
+ let trait_ref = self.closure_trait_ref_unnormalized(obligation, substs);
+ let mut nested = self.confirm_poly_trait_refs(obligation, trait_ref)?;
+
+ debug!(?closure_def_id, ?trait_ref, ?nested, "confirm closure candidate obligations");
+
+ // FIXME: Chalk
+
+ if !self.tcx().sess.opts.unstable_opts.chalk {
+ nested.push(Obligation::new(
+ obligation.cause.clone(),
+ obligation.param_env,
+ ty::Binder::dummy(ty::PredicateKind::ClosureKind(closure_def_id, substs, kind))
+ .to_predicate(self.tcx()),
+ ));
+ }
+
+ Ok(ImplSourceClosureData { closure_def_id, substs, nested })
+ }
+
+ /// In the case of closure types and fn pointers,
+ /// we currently treat the input type parameters on the trait as
+ /// outputs. This means that when we have a match we have only
+ /// considered the self type, so we have to go back and make sure
+ /// to relate the argument types too. This is kind of wrong, but
+ /// since we control the full set of impls, also not that wrong,
+ /// and it DOES yield better error messages (since we don't report
+ /// errors as if there is no applicable impl, but rather report
+ /// errors are about mismatched argument types.
+ ///
+ /// Here is an example. Imagine we have a closure expression
+ /// and we desugared it so that the type of the expression is
+ /// `Closure`, and `Closure` expects `i32` as argument. Then it
+ /// is "as if" the compiler generated this impl:
+ /// ```ignore (illustrative)
+ /// impl Fn(i32) for Closure { ... }
+ /// ```
+ /// Now imagine our obligation is `Closure: Fn(usize)`. So far
+ /// we have matched the self type `Closure`. At this point we'll
+ /// compare the `i32` to `usize` and generate an error.
+ ///
+ /// Note that this checking occurs *after* the impl has selected,
+ /// because these output type parameters should not affect the
+ /// selection of the impl. Therefore, if there is a mismatch, we
+ /// report an error to the user.
+ #[instrument(skip(self), level = "trace")]
+ fn confirm_poly_trait_refs(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ expected_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Result<Vec<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ let obligation_trait_ref = obligation.predicate.to_poly_trait_ref();
+ // Normalize the obligation and expected trait refs together, because why not
+ let Normalized { obligations: nested, value: (obligation_trait_ref, expected_trait_ref) } =
+ ensure_sufficient_stack(|| {
+ normalize_with_depth(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ (obligation_trait_ref, expected_trait_ref),
+ )
+ });
+
+ self.infcx
+ .at(&obligation.cause, obligation.param_env)
+ .sup(obligation_trait_ref, expected_trait_ref)
+ .map(|InferOk { mut obligations, .. }| {
+ obligations.extend(nested);
+ obligations
+ })
+ .map_err(|e| OutputTypeParameterMismatch(expected_trait_ref, obligation_trait_ref, e))
+ }
+
+ fn confirm_trait_upcasting_unsize_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ idx: usize,
+ ) -> Result<ImplSourceTraitUpcastingData<'tcx, PredicateObligation<'tcx>>, SelectionError<'tcx>>
+ {
+ let tcx = self.tcx();
+
+ // `assemble_candidates_for_unsizing` should ensure there are no late-bound
+ // regions here. See the comment there for more details.
+ let source = self.infcx.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap());
+ let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
+ let target = self.infcx.shallow_resolve(target);
+
+ debug!(?source, ?target, "confirm_trait_upcasting_unsize_candidate");
+
+ let mut nested = vec![];
+ let source_trait_ref;
+ let upcast_trait_ref;
+ match (source.kind(), target.kind()) {
+ // TraitA+Kx+'a -> TraitB+Ky+'b (trait upcasting coercion).
+ (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
+ // See `assemble_candidates_for_unsizing` for more info.
+ // We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`.
+ let principal_a = data_a.principal().unwrap();
+ source_trait_ref = principal_a.with_self_ty(tcx, source);
+ upcast_trait_ref = util::supertraits(tcx, source_trait_ref).nth(idx).unwrap();
+ assert_eq!(data_b.principal_def_id(), Some(upcast_trait_ref.def_id()));
+ let existential_predicate = upcast_trait_ref.map_bound(|trait_ref| {
+ ty::ExistentialPredicate::Trait(ty::ExistentialTraitRef::erase_self_ty(
+ tcx, trait_ref,
+ ))
+ });
+ let iter = Some(existential_predicate)
+ .into_iter()
+ .chain(
+ data_a
+ .projection_bounds()
+ .map(|b| b.map_bound(ty::ExistentialPredicate::Projection)),
+ )
+ .chain(
+ data_b
+ .auto_traits()
+ .map(ty::ExistentialPredicate::AutoTrait)
+ .map(ty::Binder::dummy),
+ );
+ let existential_predicates = tcx.mk_poly_existential_predicates(iter);
+ let source_trait = tcx.mk_dynamic(existential_predicates, r_b);
+
+ // Require that the traits involved in this upcast are **equal**;
+ // only the **lifetime bound** is changed.
+ let InferOk { obligations, .. } = self
+ .infcx
+ .at(&obligation.cause, obligation.param_env)
+ .sup(target, source_trait)
+ .map_err(|_| Unimplemented)?;
+ nested.extend(obligations);
+
+ // Register one obligation for 'a: 'b.
+ let cause = ObligationCause::new(
+ obligation.cause.span,
+ obligation.cause.body_id,
+ ObjectCastObligation(source, target),
+ );
+ let outlives = ty::OutlivesPredicate(r_a, r_b);
+ nested.push(Obligation::with_depth(
+ cause,
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ obligation.predicate.rebind(outlives).to_predicate(tcx),
+ ));
+ }
+ _ => bug!(),
+ };
+
+ let vtable_segment_callback = {
+ let mut vptr_offset = 0;
+ move |segment| {
+ match segment {
+ VtblSegment::MetadataDSA => {
+ vptr_offset += TyCtxt::COMMON_VTABLE_ENTRIES.len();
+ }
+ VtblSegment::TraitOwnEntries { trait_ref, emit_vptr } => {
+ vptr_offset += util::count_own_vtable_entries(tcx, trait_ref);
+ if trait_ref == upcast_trait_ref {
+ if emit_vptr {
+ return ControlFlow::Break(Some(vptr_offset));
+ } else {
+ return ControlFlow::Break(None);
+ }
+ }
+
+ if emit_vptr {
+ vptr_offset += 1;
+ }
+ }
+ }
+ ControlFlow::Continue(())
+ }
+ };
+
+ let vtable_vptr_slot =
+ super::super::prepare_vtable_segments(tcx, source_trait_ref, vtable_segment_callback)
+ .unwrap();
+
+ Ok(ImplSourceTraitUpcastingData { upcast_trait_ref, vtable_vptr_slot, nested })
+ }
+
+ fn confirm_builtin_unsize_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> Result<ImplSourceBuiltinData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ let tcx = self.tcx();
+
+ // `assemble_candidates_for_unsizing` should ensure there are no late-bound
+ // regions here. See the comment there for more details.
+ let source = self.infcx.shallow_resolve(obligation.self_ty().no_bound_vars().unwrap());
+ let target = obligation.predicate.skip_binder().trait_ref.substs.type_at(1);
+ let target = self.infcx.shallow_resolve(target);
+
+ debug!(?source, ?target, "confirm_builtin_unsize_candidate");
+
+ let mut nested = vec![];
+ match (source.kind(), target.kind()) {
+ // Trait+Kx+'a -> Trait+Ky+'b (auto traits and lifetime subtyping).
+ (&ty::Dynamic(ref data_a, r_a), &ty::Dynamic(ref data_b, r_b)) => {
+ // See `assemble_candidates_for_unsizing` for more info.
+ // We already checked the compatibility of auto traits within `assemble_candidates_for_unsizing`.
+ let iter = data_a
+ .principal()
+ .map(|b| b.map_bound(ty::ExistentialPredicate::Trait))
+ .into_iter()
+ .chain(
+ data_a
+ .projection_bounds()
+ .map(|b| b.map_bound(ty::ExistentialPredicate::Projection)),
+ )
+ .chain(
+ data_b
+ .auto_traits()
+ .map(ty::ExistentialPredicate::AutoTrait)
+ .map(ty::Binder::dummy),
+ );
+ let existential_predicates = tcx.mk_poly_existential_predicates(iter);
+ let source_trait = tcx.mk_dynamic(existential_predicates, r_b);
+
+ // Require that the traits involved in this upcast are **equal**;
+ // only the **lifetime bound** is changed.
+ let InferOk { obligations, .. } = self
+ .infcx
+ .at(&obligation.cause, obligation.param_env)
+ .sup(target, source_trait)
+ .map_err(|_| Unimplemented)?;
+ nested.extend(obligations);
+
+ // Register one obligation for 'a: 'b.
+ let cause = ObligationCause::new(
+ obligation.cause.span,
+ obligation.cause.body_id,
+ ObjectCastObligation(source, target),
+ );
+ let outlives = ty::OutlivesPredicate(r_a, r_b);
+ nested.push(Obligation::with_depth(
+ cause,
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ obligation.predicate.rebind(outlives).to_predicate(tcx),
+ ));
+ }
+
+ // `T` -> `Trait`
+ (_, &ty::Dynamic(ref data, r)) => {
+ let mut object_dids = data.auto_traits().chain(data.principal_def_id());
+ if let Some(did) = object_dids.find(|did| !tcx.is_object_safe(*did)) {
+ return Err(TraitNotObjectSafe(did));
+ }
+
+ let cause = ObligationCause::new(
+ obligation.cause.span,
+ obligation.cause.body_id,
+ ObjectCastObligation(source, target),
+ );
+
+ let predicate_to_obligation = |predicate| {
+ Obligation::with_depth(
+ cause.clone(),
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ predicate,
+ )
+ };
+
+ // Create obligations:
+ // - Casting `T` to `Trait`
+ // - For all the various builtin bounds attached to the object cast. (In other
+ // words, if the object type is `Foo + Send`, this would create an obligation for
+ // the `Send` check.)
+ // - Projection predicates
+ nested.extend(
+ data.iter().map(|predicate| {
+ predicate_to_obligation(predicate.with_self_ty(tcx, source))
+ }),
+ );
+
+ // We can only make objects from sized types.
+ let tr = ty::Binder::dummy(ty::TraitRef::new(
+ tcx.require_lang_item(LangItem::Sized, None),
+ tcx.mk_substs_trait(source, &[]),
+ ));
+ nested.push(predicate_to_obligation(tr.without_const().to_predicate(tcx)));
+
+ // If the type is `Foo + 'a`, ensure that the type
+ // being cast to `Foo + 'a` outlives `'a`:
+ let outlives = ty::OutlivesPredicate(source, r);
+ nested.push(predicate_to_obligation(ty::Binder::dummy(outlives).to_predicate(tcx)));
+ }
+
+ // `[T; n]` -> `[T]`
+ (&ty::Array(a, _), &ty::Slice(b)) => {
+ let InferOk { obligations, .. } = self
+ .infcx
+ .at(&obligation.cause, obligation.param_env)
+ .eq(b, a)
+ .map_err(|_| Unimplemented)?;
+ nested.extend(obligations);
+ }
+
+ // `Struct<T>` -> `Struct<U>`
+ (&ty::Adt(def, substs_a), &ty::Adt(_, substs_b)) => {
+ let maybe_unsizing_param_idx = |arg: GenericArg<'tcx>| match arg.unpack() {
+ GenericArgKind::Type(ty) => match ty.kind() {
+ ty::Param(p) => Some(p.index),
+ _ => None,
+ },
+
+ // Lifetimes aren't allowed to change during unsizing.
+ GenericArgKind::Lifetime(_) => None,
+
+ GenericArgKind::Const(ct) => match ct.kind() {
+ ty::ConstKind::Param(p) => Some(p.index),
+ _ => None,
+ },
+ };
+
+ // FIXME(eddyb) cache this (including computing `unsizing_params`)
+ // by putting it in a query; it would only need the `DefId` as it
+ // looks at declared field types, not anything substituted.
+
+ // The last field of the structure has to exist and contain type/const parameters.
+ let (tail_field, prefix_fields) =
+ def.non_enum_variant().fields.split_last().ok_or(Unimplemented)?;
+ let tail_field_ty = tcx.bound_type_of(tail_field.did);
+
+ let mut unsizing_params = GrowableBitSet::new_empty();
+ for arg in tail_field_ty.0.walk() {
+ if let Some(i) = maybe_unsizing_param_idx(arg) {
+ unsizing_params.insert(i);
+ }
+ }
+
+ // Ensure none of the other fields mention the parameters used
+ // in unsizing.
+ for field in prefix_fields {
+ for arg in tcx.type_of(field.did).walk() {
+ if let Some(i) = maybe_unsizing_param_idx(arg) {
+ unsizing_params.remove(i);
+ }
+ }
+ }
+
+ if unsizing_params.is_empty() {
+ return Err(Unimplemented);
+ }
+
+ // Extract `TailField<T>` and `TailField<U>` from `Struct<T>` and `Struct<U>`.
+ let source_tail = tail_field_ty.subst(tcx, substs_a);
+ let target_tail = tail_field_ty.subst(tcx, substs_b);
+
+ // Check that the source struct with the target's
+ // unsizing parameters is equal to the target.
+ let substs = tcx.mk_substs(substs_a.iter().enumerate().map(|(i, k)| {
+ if unsizing_params.contains(i as u32) { substs_b[i] } else { k }
+ }));
+ let new_struct = tcx.mk_adt(def, substs);
+ let InferOk { obligations, .. } = self
+ .infcx
+ .at(&obligation.cause, obligation.param_env)
+ .eq(target, new_struct)
+ .map_err(|_| Unimplemented)?;
+ nested.extend(obligations);
+
+ // Construct the nested `TailField<T>: Unsize<TailField<U>>` predicate.
+ nested.push(predicate_for_trait_def(
+ tcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.predicate.def_id(),
+ obligation.recursion_depth + 1,
+ source_tail,
+ &[target_tail.into()],
+ ));
+ }
+
+ // `(.., T)` -> `(.., U)`
+ (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => {
+ assert_eq!(tys_a.len(), tys_b.len());
+
+ // The last field of the tuple has to exist.
+ let (&a_last, a_mid) = tys_a.split_last().ok_or(Unimplemented)?;
+ let &b_last = tys_b.last().unwrap();
+
+ // Check that the source tuple with the target's
+ // last element is equal to the target.
+ let new_tuple = tcx.mk_tup(a_mid.iter().copied().chain(iter::once(b_last)));
+ let InferOk { obligations, .. } = self
+ .infcx
+ .at(&obligation.cause, obligation.param_env)
+ .eq(target, new_tuple)
+ .map_err(|_| Unimplemented)?;
+ nested.extend(obligations);
+
+ // Construct the nested `T: Unsize<U>` predicate.
+ nested.push(ensure_sufficient_stack(|| {
+ predicate_for_trait_def(
+ tcx,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.predicate.def_id(),
+ obligation.recursion_depth + 1,
+ a_last,
+ &[b_last.into()],
+ )
+ }));
+ }
+
+ _ => bug!(),
+ };
+
+ Ok(ImplSourceBuiltinData { nested })
+ }
+
+ fn confirm_const_destruct_candidate(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ impl_def_id: Option<DefId>,
+ ) -> Result<ImplSourceConstDestructData<PredicateObligation<'tcx>>, SelectionError<'tcx>> {
+ // `~const Destruct` in a non-const environment is always trivially true, since our type is `Drop`
+ if !obligation.is_const() {
+ return Ok(ImplSourceConstDestructData { nested: vec![] });
+ }
+
+ let drop_trait = self.tcx().require_lang_item(LangItem::Drop, None);
+
+ let tcx = self.tcx();
+ let self_ty = self.infcx.shallow_resolve(obligation.self_ty());
+
+ let mut nested = vec![];
+ let cause = obligation.derived_cause(BuiltinDerivedObligation);
+
+ // If we have a custom `impl const Drop`, then
+ // first check it like a regular impl candidate.
+ // This is copied from confirm_impl_candidate but remaps the predicate to `~const Drop` beforehand.
+ if let Some(impl_def_id) = impl_def_id {
+ let mut new_obligation = obligation.clone();
+ new_obligation.predicate = new_obligation.predicate.map_bound(|mut trait_pred| {
+ trait_pred.trait_ref.def_id = drop_trait;
+ trait_pred
+ });
+ let substs = self.rematch_impl(impl_def_id, &new_obligation);
+ debug!(?substs, "impl substs");
+
+ let cause = obligation.derived_cause(|derived| {
+ ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
+ derived,
+ impl_def_id,
+ span: obligation.cause.span,
+ }))
+ });
+ let obligations = ensure_sufficient_stack(|| {
+ self.vtable_impl(
+ impl_def_id,
+ substs,
+ &cause,
+ new_obligation.recursion_depth + 1,
+ new_obligation.param_env,
+ obligation.predicate,
+ )
+ });
+ nested.extend(obligations.nested);
+ }
+
+ // We want to confirm the ADT's fields if we have an ADT
+ let mut stack = match *self_ty.skip_binder().kind() {
+ ty::Adt(def, substs) => def.all_fields().map(|f| f.ty(tcx, substs)).collect(),
+ _ => vec![self_ty.skip_binder()],
+ };
+
+ while let Some(nested_ty) = stack.pop() {
+ match *nested_ty.kind() {
+ // We know these types are trivially drop
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Infer(ty::IntVar(_))
+ | ty::Infer(ty::FloatVar(_))
+ | ty::Str
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Never
+ | ty::Foreign(_) => {}
+
+ // These types are built-in, so we can fast-track by registering
+ // nested predicates for their constituent type(s)
+ ty::Array(ty, _) | ty::Slice(ty) => {
+ stack.push(ty);
+ }
+ ty::Tuple(tys) => {
+ stack.extend(tys.iter());
+ }
+ ty::Closure(_, substs) => {
+ stack.push(substs.as_closure().tupled_upvars_ty());
+ }
+ ty::Generator(_, substs, _) => {
+ let generator = substs.as_generator();
+ stack.extend([generator.tupled_upvars_ty(), generator.witness()]);
+ }
+ ty::GeneratorWitness(tys) => {
+ stack.extend(tcx.erase_late_bound_regions(tys).to_vec());
+ }
+
+ // If we have a projection type, make sure to normalize it so we replace it
+ // with a fresh infer variable
+ ty::Projection(..) => {
+ let predicate = normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ cause.clone(),
+ obligation.recursion_depth + 1,
+ self_ty
+ .rebind(ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ def_id: self.tcx().require_lang_item(LangItem::Destruct, None),
+ substs: self.tcx().mk_substs_trait(nested_ty, &[]),
+ },
+ constness: ty::BoundConstness::ConstIfConst,
+ polarity: ty::ImplPolarity::Positive,
+ })
+ .to_predicate(tcx),
+ &mut nested,
+ );
+
+ nested.push(Obligation::with_depth(
+ cause.clone(),
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ predicate,
+ ));
+ }
+
+ // If we have any other type (e.g. an ADT), just register a nested obligation
+ // since it's either not `const Drop` (and we raise an error during selection),
+ // or it's an ADT (and we need to check for a custom impl during selection)
+ _ => {
+ let predicate = self_ty
+ .rebind(ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ def_id: self.tcx().require_lang_item(LangItem::Destruct, None),
+ substs: self.tcx().mk_substs_trait(nested_ty, &[]),
+ },
+ constness: ty::BoundConstness::ConstIfConst,
+ polarity: ty::ImplPolarity::Positive,
+ })
+ .to_predicate(tcx);
+
+ nested.push(Obligation::with_depth(
+ cause.clone(),
+ obligation.recursion_depth + 1,
+ obligation.param_env,
+ predicate,
+ ));
+ }
+ }
+ }
+
+ Ok(ImplSourceConstDestructData { nested })
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs
new file mode 100644
index 000000000..c01ac1979
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs
@@ -0,0 +1,2698 @@
+//! Candidate selection. See the [rustc dev guide] for more information on how this works.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
+
+use self::EvaluationResult::*;
+use self::SelectionCandidate::*;
+
+use super::coherence::{self, Conflict};
+use super::const_evaluatable;
+use super::project;
+use super::project::normalize_with_depth_to;
+use super::project::ProjectionTyObligation;
+use super::util;
+use super::util::{closure_trait_ref_and_return_type, predicate_for_trait_def};
+use super::wf;
+use super::{
+ ErrorReporting, ImplDerivedObligation, ImplDerivedObligationCause, Normalized, Obligation,
+ ObligationCause, ObligationCauseCode, Overflow, PredicateObligation, Selection, SelectionError,
+ SelectionResult, TraitObligation, TraitQueryMode,
+};
+
+use crate::infer::{InferCtxt, InferOk, TypeFreshener};
+use crate::traits::error_reporting::InferCtxtExt;
+use crate::traits::project::ProjectAndUnifyResult;
+use crate::traits::project::ProjectionCacheKeyExt;
+use crate::traits::ProjectionCacheKey;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::{Diagnostic, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::LateBoundRegionConversionTime;
+use rustc_middle::dep_graph::{DepKind, DepNodeIndex};
+use rustc_middle::mir::interpret::ErrorHandled;
+use rustc_middle::ty::abstract_const::NotConstEvaluatable;
+use rustc_middle::ty::fast_reject::{DeepRejectCtxt, TreatParams};
+use rustc_middle::ty::fold::BottomUpFolder;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::relate::TypeRelation;
+use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::{self, EarlyBinder, PolyProjectionPredicate, ToPolyTraitRef, ToPredicate};
+use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable, TypeVisitable};
+use rustc_span::symbol::sym;
+
+use std::cell::{Cell, RefCell};
+use std::cmp;
+use std::fmt::{self, Display};
+use std::iter;
+
+pub use rustc_middle::traits::select::*;
+
+mod candidate_assembly;
+mod confirmation;
+
+#[derive(Clone, Debug, Eq, PartialEq, Hash)]
+pub enum IntercrateAmbiguityCause {
+ DownstreamCrate { trait_desc: String, self_desc: Option<String> },
+ UpstreamCrateUpdate { trait_desc: String, self_desc: Option<String> },
+ ReservationImpl { message: String },
+}
+
+impl IntercrateAmbiguityCause {
+ /// Emits notes when the overlap is caused by complex intercrate ambiguities.
+ /// See #23980 for details.
+ pub fn add_intercrate_ambiguity_hint(&self, err: &mut Diagnostic) {
+ err.note(&self.intercrate_ambiguity_hint());
+ }
+
+ pub fn intercrate_ambiguity_hint(&self) -> String {
+ match self {
+ IntercrateAmbiguityCause::DownstreamCrate { trait_desc, self_desc } => {
+ let self_desc = if let Some(ty) = self_desc {
+ format!(" for type `{}`", ty)
+ } else {
+ String::new()
+ };
+ format!("downstream crates may implement trait `{}`{}", trait_desc, self_desc)
+ }
+ IntercrateAmbiguityCause::UpstreamCrateUpdate { trait_desc, self_desc } => {
+ let self_desc = if let Some(ty) = self_desc {
+ format!(" for type `{}`", ty)
+ } else {
+ String::new()
+ };
+ format!(
+ "upstream crates may add a new impl of trait `{}`{} \
+ in future versions",
+ trait_desc, self_desc
+ )
+ }
+ IntercrateAmbiguityCause::ReservationImpl { message } => message.clone(),
+ }
+ }
+}
+
+pub struct SelectionContext<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+
+ /// Freshener used specifically for entries on the obligation
+ /// stack. This ensures that all entries on the stack at one time
+ /// will have the same set of placeholder entries, which is
+ /// important for checking for trait bounds that recursively
+ /// require themselves.
+ freshener: TypeFreshener<'cx, 'tcx>,
+
+ /// During coherence we have to assume that other crates may add
+ /// additional impls which we currently don't know about.
+ ///
+ /// To deal with this evaluation should be conservative
+ /// and consider the possibility of impls from outside this crate.
+ /// This comes up primarily when resolving ambiguity. Imagine
+ /// there is some trait reference `$0: Bar` where `$0` is an
+ /// inference variable. If `intercrate` is true, then we can never
+ /// say for sure that this reference is not implemented, even if
+ /// there are *no impls at all for `Bar`*, because `$0` could be
+ /// bound to some type that in a downstream crate that implements
+ /// `Bar`.
+ ///
+ /// Outside of coherence we set this to false because we are only
+ /// interested in types that the user could actually have written.
+ /// In other words, we consider `$0: Bar` to be unimplemented if
+ /// there is no type that the user could *actually name* that
+ /// would satisfy it. This avoids crippling inference, basically.
+ intercrate: bool,
+ /// If `intercrate` is set, we remember predicates which were
+ /// considered ambiguous because of impls potentially added in other crates.
+ /// This is used in coherence to give improved diagnostics.
+ /// We don't do his until we detect a coherence error because it can
+ /// lead to false overflow results (#47139) and because always
+ /// computing it may negatively impact performance.
+ intercrate_ambiguity_causes: Option<FxIndexSet<IntercrateAmbiguityCause>>,
+
+ /// The mode that trait queries run in, which informs our error handling
+ /// policy. In essence, canonicalized queries need their errors propagated
+ /// rather than immediately reported because we do not have accurate spans.
+ query_mode: TraitQueryMode,
+}
+
+// A stack that walks back up the stack frame.
+struct TraitObligationStack<'prev, 'tcx> {
+ obligation: &'prev TraitObligation<'tcx>,
+
+ /// The trait predicate from `obligation` but "freshened" with the
+ /// selection-context's freshener. Used to check for recursion.
+ fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
+
+ /// Starts out equal to `depth` -- if, during evaluation, we
+ /// encounter a cycle, then we will set this flag to the minimum
+ /// depth of that cycle for all participants in the cycle. These
+ /// participants will then forego caching their results. This is
+ /// not the most efficient solution, but it addresses #60010. The
+ /// problem we are trying to prevent:
+ ///
+ /// - If you have `A: AutoTrait` requires `B: AutoTrait` and `C: NonAutoTrait`
+ /// - `B: AutoTrait` requires `A: AutoTrait` (coinductive cycle, ok)
+ /// - `C: NonAutoTrait` requires `A: AutoTrait` (non-coinductive cycle, not ok)
+ ///
+ /// you don't want to cache that `B: AutoTrait` or `A: AutoTrait`
+ /// is `EvaluatedToOk`; this is because they were only considered
+ /// ok on the premise that if `A: AutoTrait` held, but we indeed
+ /// encountered a problem (later on) with `A: AutoTrait. So we
+ /// currently set a flag on the stack node for `B: AutoTrait` (as
+ /// well as the second instance of `A: AutoTrait`) to suppress
+ /// caching.
+ ///
+ /// This is a simple, targeted fix. A more-performant fix requires
+ /// deeper changes, but would permit more caching: we could
+ /// basically defer caching until we have fully evaluated the
+ /// tree, and then cache the entire tree at once. In any case, the
+ /// performance impact here shouldn't be so horrible: every time
+ /// this is hit, we do cache at least one trait, so we only
+ /// evaluate each member of a cycle up to N times, where N is the
+ /// length of the cycle. This means the performance impact is
+ /// bounded and we shouldn't have any terrible worst-cases.
+ reached_depth: Cell<usize>,
+
+ previous: TraitObligationStackList<'prev, 'tcx>,
+
+ /// The number of parent frames plus one (thus, the topmost frame has depth 1).
+ depth: usize,
+
+ /// The depth-first number of this node in the search graph -- a
+ /// pre-order index. Basically, a freshly incremented counter.
+ dfn: usize,
+}
+
+struct SelectionCandidateSet<'tcx> {
+ // A list of candidates that definitely apply to the current
+ // obligation (meaning: types unify).
+ vec: Vec<SelectionCandidate<'tcx>>,
+
+ // If `true`, then there were candidates that might or might
+ // not have applied, but we couldn't tell. This occurs when some
+ // of the input types are type variables, in which case there are
+ // various "builtin" rules that might or might not trigger.
+ ambiguous: bool,
+}
+
+#[derive(PartialEq, Eq, Debug, Clone)]
+struct EvaluatedCandidate<'tcx> {
+ candidate: SelectionCandidate<'tcx>,
+ evaluation: EvaluationResult,
+}
+
+/// When does the builtin impl for `T: Trait` apply?
+#[derive(Debug)]
+enum BuiltinImplConditions<'tcx> {
+ /// The impl is conditional on `T1, T2, ...: Trait`.
+ Where(ty::Binder<'tcx, Vec<Ty<'tcx>>>),
+ /// There is no built-in impl. There may be some other
+ /// candidate (a where-clause or user-defined impl).
+ None,
+ /// It is unknown whether there is an impl.
+ Ambiguous,
+}
+
+impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> {
+ pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
+ SelectionContext {
+ infcx,
+ freshener: infcx.freshener_keep_static(),
+ intercrate: false,
+ intercrate_ambiguity_causes: None,
+ query_mode: TraitQueryMode::Standard,
+ }
+ }
+
+ pub fn intercrate(infcx: &'cx InferCtxt<'cx, 'tcx>) -> SelectionContext<'cx, 'tcx> {
+ SelectionContext {
+ infcx,
+ freshener: infcx.freshener_keep_static(),
+ intercrate: true,
+ intercrate_ambiguity_causes: None,
+ query_mode: TraitQueryMode::Standard,
+ }
+ }
+
+ pub fn with_query_mode(
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ query_mode: TraitQueryMode,
+ ) -> SelectionContext<'cx, 'tcx> {
+ debug!(?query_mode, "with_query_mode");
+ SelectionContext {
+ infcx,
+ freshener: infcx.freshener_keep_static(),
+ intercrate: false,
+ intercrate_ambiguity_causes: None,
+ query_mode,
+ }
+ }
+
+ /// Enables tracking of intercrate ambiguity causes. See
+ /// the documentation of [`Self::intercrate_ambiguity_causes`] for more.
+ pub fn enable_tracking_intercrate_ambiguity_causes(&mut self) {
+ assert!(self.intercrate);
+ assert!(self.intercrate_ambiguity_causes.is_none());
+ self.intercrate_ambiguity_causes = Some(FxIndexSet::default());
+ debug!("selcx: enable_tracking_intercrate_ambiguity_causes");
+ }
+
+ /// Gets the intercrate ambiguity causes collected since tracking
+ /// was enabled and disables tracking at the same time. If
+ /// tracking is not enabled, just returns an empty vector.
+ pub fn take_intercrate_ambiguity_causes(&mut self) -> FxIndexSet<IntercrateAmbiguityCause> {
+ assert!(self.intercrate);
+ self.intercrate_ambiguity_causes.take().unwrap_or_default()
+ }
+
+ pub fn infcx(&self) -> &'cx InferCtxt<'cx, 'tcx> {
+ self.infcx
+ }
+
+ pub fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ pub fn is_intercrate(&self) -> bool {
+ self.intercrate
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Selection
+ //
+ // The selection phase tries to identify *how* an obligation will
+ // be resolved. For example, it will identify which impl or
+ // parameter bound is to be used. The process can be inconclusive
+ // if the self type in the obligation is not fully inferred. Selection
+ // can result in an error in one of two ways:
+ //
+ // 1. If no applicable impl or parameter bound can be found.
+ // 2. If the output type parameters in the obligation do not match
+ // those specified by the impl/bound. For example, if the obligation
+ // is `Vec<Foo>: Iterable<Bar>`, but the impl specifies
+ // `impl<T> Iterable<T> for Vec<T>`, than an error would result.
+
+ /// Attempts to satisfy the obligation. If successful, this will affect the surrounding
+ /// type environment by performing unification.
+ #[instrument(level = "debug", skip(self))]
+ pub fn select(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> SelectionResult<'tcx, Selection<'tcx>> {
+ let candidate = match self.select_from_obligation(obligation) {
+ Err(SelectionError::Overflow(OverflowError::Canonical)) => {
+ // In standard mode, overflow must have been caught and reported
+ // earlier.
+ assert!(self.query_mode == TraitQueryMode::Canonical);
+ return Err(SelectionError::Overflow(OverflowError::Canonical));
+ }
+ Err(SelectionError::Ambiguous(_)) => {
+ return Ok(None);
+ }
+ Err(e) => {
+ return Err(e);
+ }
+ Ok(None) => {
+ return Ok(None);
+ }
+ Ok(Some(candidate)) => candidate,
+ };
+
+ match self.confirm_candidate(obligation, candidate) {
+ Err(SelectionError::Overflow(OverflowError::Canonical)) => {
+ assert!(self.query_mode == TraitQueryMode::Canonical);
+ Err(SelectionError::Overflow(OverflowError::Canonical))
+ }
+ Err(e) => Err(e),
+ Ok(candidate) => {
+ debug!(?candidate, "confirmed");
+ Ok(Some(candidate))
+ }
+ }
+ }
+
+ pub(crate) fn select_from_obligation(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
+ debug_assert!(!obligation.predicate.has_escaping_bound_vars());
+
+ let pec = &ProvisionalEvaluationCache::default();
+ let stack = self.push_stack(TraitObligationStackList::empty(pec), obligation);
+
+ self.candidate_from_obligation(&stack)
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // EVALUATION
+ //
+ // Tests whether an obligation can be selected or whether an impl
+ // can be applied to particular types. It skips the "confirmation"
+ // step and hence completely ignores output type parameters.
+ //
+ // The result is "true" if the obligation *may* hold and "false" if
+ // we can be sure it does not.
+
+ /// Evaluates whether the obligation `obligation` can be satisfied (by any means).
+ pub fn predicate_may_hold_fatal(&mut self, obligation: &PredicateObligation<'tcx>) -> bool {
+ debug!(?obligation, "predicate_may_hold_fatal");
+
+ // This fatal query is a stopgap that should only be used in standard mode,
+ // where we do not expect overflow to be propagated.
+ assert!(self.query_mode == TraitQueryMode::Standard);
+
+ self.evaluate_root_obligation(obligation)
+ .expect("Overflow should be caught earlier in standard query mode")
+ .may_apply()
+ }
+
+ /// Evaluates whether the obligation `obligation` can be satisfied
+ /// and returns an `EvaluationResult`. This is meant for the
+ /// *initial* call.
+ pub fn evaluate_root_obligation(
+ &mut self,
+ obligation: &PredicateObligation<'tcx>,
+ ) -> Result<EvaluationResult, OverflowError> {
+ self.evaluation_probe(|this| {
+ this.evaluate_predicate_recursively(
+ TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
+ obligation.clone(),
+ )
+ })
+ }
+
+ fn evaluation_probe(
+ &mut self,
+ op: impl FnOnce(&mut Self) -> Result<EvaluationResult, OverflowError>,
+ ) -> Result<EvaluationResult, OverflowError> {
+ self.infcx.probe(|snapshot| -> Result<EvaluationResult, OverflowError> {
+ let result = op(self)?;
+
+ match self.infcx.leak_check(true, snapshot) {
+ Ok(()) => {}
+ Err(_) => return Ok(EvaluatedToErr),
+ }
+
+ if self.infcx.opaque_types_added_in_snapshot(snapshot) {
+ return Ok(result.max(EvaluatedToOkModuloOpaqueTypes));
+ }
+
+ match self.infcx.region_constraints_added_in_snapshot(snapshot) {
+ None => Ok(result),
+ Some(_) => Ok(result.max(EvaluatedToOkModuloRegions)),
+ }
+ })
+ }
+
+ /// Evaluates the predicates in `predicates` recursively. Note that
+ /// this applies projections in the predicates, and therefore
+ /// is run within an inference probe.
+ #[instrument(skip(self, stack), level = "debug")]
+ fn evaluate_predicates_recursively<'o, I>(
+ &mut self,
+ stack: TraitObligationStackList<'o, 'tcx>,
+ predicates: I,
+ ) -> Result<EvaluationResult, OverflowError>
+ where
+ I: IntoIterator<Item = PredicateObligation<'tcx>> + std::fmt::Debug,
+ {
+ let mut result = EvaluatedToOk;
+ for obligation in predicates {
+ let eval = self.evaluate_predicate_recursively(stack, obligation.clone())?;
+ if let EvaluatedToErr = eval {
+ // fast-path - EvaluatedToErr is the top of the lattice,
+ // so we don't need to look on the other predicates.
+ return Ok(EvaluatedToErr);
+ } else {
+ result = cmp::max(result, eval);
+ }
+ }
+ Ok(result)
+ }
+
+ #[instrument(
+ level = "debug",
+ skip(self, previous_stack),
+ fields(previous_stack = ?previous_stack.head())
+ )]
+ fn evaluate_predicate_recursively<'o>(
+ &mut self,
+ previous_stack: TraitObligationStackList<'o, 'tcx>,
+ obligation: PredicateObligation<'tcx>,
+ ) -> Result<EvaluationResult, OverflowError> {
+ // `previous_stack` stores a `TraitObligation`, while `obligation` is
+ // a `PredicateObligation`. These are distinct types, so we can't
+ // use any `Option` combinator method that would force them to be
+ // the same.
+ match previous_stack.head() {
+ Some(h) => self.check_recursion_limit(&obligation, h.obligation)?,
+ None => self.check_recursion_limit(&obligation, &obligation)?,
+ }
+
+ let result = ensure_sufficient_stack(|| {
+ let bound_predicate = obligation.predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(t) => {
+ let t = bound_predicate.rebind(t);
+ debug_assert!(!t.has_escaping_bound_vars());
+ let obligation = obligation.with(t);
+ self.evaluate_trait_predicate_recursively(previous_stack, obligation)
+ }
+
+ ty::PredicateKind::Subtype(p) => {
+ let p = bound_predicate.rebind(p);
+ // Does this code ever run?
+ match self.infcx.subtype_predicate(&obligation.cause, obligation.param_env, p) {
+ Some(Ok(InferOk { mut obligations, .. })) => {
+ self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
+ self.evaluate_predicates_recursively(
+ previous_stack,
+ obligations.into_iter(),
+ )
+ }
+ Some(Err(_)) => Ok(EvaluatedToErr),
+ None => Ok(EvaluatedToAmbig),
+ }
+ }
+
+ ty::PredicateKind::Coerce(p) => {
+ let p = bound_predicate.rebind(p);
+ // Does this code ever run?
+ match self.infcx.coerce_predicate(&obligation.cause, obligation.param_env, p) {
+ Some(Ok(InferOk { mut obligations, .. })) => {
+ self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
+ self.evaluate_predicates_recursively(
+ previous_stack,
+ obligations.into_iter(),
+ )
+ }
+ Some(Err(_)) => Ok(EvaluatedToErr),
+ None => Ok(EvaluatedToAmbig),
+ }
+ }
+
+ ty::PredicateKind::WellFormed(arg) => {
+ // So, there is a bit going on here. First, `WellFormed` predicates
+ // are coinductive, like trait predicates with auto traits.
+ // This means that we need to detect if we have recursively
+ // evaluated `WellFormed(X)`. Otherwise, we would run into
+ // a "natural" overflow error.
+ //
+ // Now, the next question is whether we need to do anything
+ // special with caching. Considering the following tree:
+ // - `WF(Foo<T>)`
+ // - `Bar<T>: Send`
+ // - `WF(Foo<T>)`
+ // - `Foo<T>: Trait`
+ // In this case, the innermost `WF(Foo<T>)` should return
+ // `EvaluatedToOk`, since it's coinductive. Then if
+ // `Bar<T>: Send` is resolved to `EvaluatedToOk`, it can be
+ // inserted into a cache (because without thinking about `WF`
+ // goals, it isn't in a cycle). If `Foo<T>: Trait` later doesn't
+ // hold, then `Bar<T>: Send` shouldn't hold. Therefore, we
+ // *do* need to keep track of coinductive cycles.
+
+ let cache = previous_stack.cache;
+ let dfn = cache.next_dfn();
+
+ for stack_arg in previous_stack.cache.wf_args.borrow().iter().rev() {
+ if stack_arg.0 != arg {
+ continue;
+ }
+ debug!("WellFormed({:?}) on stack", arg);
+ if let Some(stack) = previous_stack.head {
+ // Okay, let's imagine we have two different stacks:
+ // `T: NonAutoTrait -> WF(T) -> T: NonAutoTrait`
+ // `WF(T) -> T: NonAutoTrait -> WF(T)`
+ // Because of this, we need to check that all
+ // predicates between the WF goals are coinductive.
+ // Otherwise, we can say that `T: NonAutoTrait` is
+ // true.
+ // Let's imagine we have a predicate stack like
+ // `Foo: Bar -> WF(T) -> T: NonAutoTrait -> T: Auto
+ // depth ^1 ^2 ^3
+ // and the current predicate is `WF(T)`. `wf_args`
+ // would contain `(T, 1)`. We want to check all
+ // trait predicates greater than `1`. The previous
+ // stack would be `T: Auto`.
+ let cycle = stack.iter().take_while(|s| s.depth > stack_arg.1);
+ let tcx = self.tcx();
+ let cycle =
+ cycle.map(|stack| stack.obligation.predicate.to_predicate(tcx));
+ if self.coinductive_match(cycle) {
+ stack.update_reached_depth(stack_arg.1);
+ return Ok(EvaluatedToOk);
+ } else {
+ return Ok(EvaluatedToRecur);
+ }
+ }
+ return Ok(EvaluatedToOk);
+ }
+
+ match wf::obligations(
+ self.infcx,
+ obligation.param_env,
+ obligation.cause.body_id,
+ obligation.recursion_depth + 1,
+ arg,
+ obligation.cause.span,
+ ) {
+ Some(mut obligations) => {
+ self.add_depth(obligations.iter_mut(), obligation.recursion_depth);
+
+ cache.wf_args.borrow_mut().push((arg, previous_stack.depth()));
+ let result =
+ self.evaluate_predicates_recursively(previous_stack, obligations);
+ cache.wf_args.borrow_mut().pop();
+
+ let result = result?;
+
+ if !result.must_apply_modulo_regions() {
+ cache.on_failure(dfn);
+ }
+
+ cache.on_completion(dfn);
+
+ Ok(result)
+ }
+ None => Ok(EvaluatedToAmbig),
+ }
+ }
+
+ ty::PredicateKind::TypeOutlives(pred) => {
+ // A global type with no late-bound regions can only
+ // contain the "'static" lifetime (any other lifetime
+ // would either be late-bound or local), so it is guaranteed
+ // to outlive any other lifetime
+ if pred.0.is_global() && !pred.0.has_late_bound_regions() {
+ Ok(EvaluatedToOk)
+ } else {
+ Ok(EvaluatedToOkModuloRegions)
+ }
+ }
+
+ ty::PredicateKind::RegionOutlives(..) => {
+ // We do not consider region relationships when evaluating trait matches.
+ Ok(EvaluatedToOkModuloRegions)
+ }
+
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ if self.tcx().is_object_safe(trait_def_id) {
+ Ok(EvaluatedToOk)
+ } else {
+ Ok(EvaluatedToErr)
+ }
+ }
+
+ ty::PredicateKind::Projection(data) => {
+ let data = bound_predicate.rebind(data);
+ let project_obligation = obligation.with(data);
+ match project::poly_project_and_unify_type(self, &project_obligation) {
+ ProjectAndUnifyResult::Holds(mut subobligations) => {
+ 'compute_res: {
+ // If we've previously marked this projection as 'complete', then
+ // use the final cached result (either `EvaluatedToOk` or
+ // `EvaluatedToOkModuloRegions`), and skip re-evaluating the
+ // sub-obligations.
+ if let Some(key) =
+ ProjectionCacheKey::from_poly_projection_predicate(self, data)
+ {
+ if let Some(cached_res) = self
+ .infcx
+ .inner
+ .borrow_mut()
+ .projection_cache()
+ .is_complete(key)
+ {
+ break 'compute_res Ok(cached_res);
+ }
+ }
+
+ self.add_depth(
+ subobligations.iter_mut(),
+ obligation.recursion_depth,
+ );
+ let res = self.evaluate_predicates_recursively(
+ previous_stack,
+ subobligations,
+ );
+ if let Ok(eval_rslt) = res
+ && (eval_rslt == EvaluatedToOk || eval_rslt == EvaluatedToOkModuloRegions)
+ && let Some(key) =
+ ProjectionCacheKey::from_poly_projection_predicate(
+ self, data,
+ )
+ {
+ // If the result is something that we can cache, then mark this
+ // entry as 'complete'. This will allow us to skip evaluating the
+ // subobligations at all the next time we evaluate the projection
+ // predicate.
+ self.infcx
+ .inner
+ .borrow_mut()
+ .projection_cache()
+ .complete(key, eval_rslt);
+ }
+ res
+ }
+ }
+ ProjectAndUnifyResult::FailedNormalization => Ok(EvaluatedToAmbig),
+ ProjectAndUnifyResult::Recursive => Ok(EvaluatedToRecur),
+ ProjectAndUnifyResult::MismatchedProjectionTypes(_) => Ok(EvaluatedToErr),
+ }
+ }
+
+ ty::PredicateKind::ClosureKind(_, closure_substs, kind) => {
+ match self.infcx.closure_kind(closure_substs) {
+ Some(closure_kind) => {
+ if closure_kind.extends(kind) {
+ Ok(EvaluatedToOk)
+ } else {
+ Ok(EvaluatedToErr)
+ }
+ }
+ None => Ok(EvaluatedToAmbig),
+ }
+ }
+
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ match const_evaluatable::is_const_evaluatable(
+ self.infcx,
+ uv,
+ obligation.param_env,
+ obligation.cause.span,
+ ) {
+ Ok(()) => Ok(EvaluatedToOk),
+ Err(NotConstEvaluatable::MentionsInfer) => Ok(EvaluatedToAmbig),
+ Err(NotConstEvaluatable::MentionsParam) => Ok(EvaluatedToErr),
+ Err(_) => Ok(EvaluatedToErr),
+ }
+ }
+
+ ty::PredicateKind::ConstEquate(c1, c2) => {
+ debug!(?c1, ?c2, "evaluate_predicate_recursively: equating consts");
+
+ if self.tcx().features().generic_const_exprs {
+ // FIXME: we probably should only try to unify abstract constants
+ // if the constants depend on generic parameters.
+ //
+ // Let's just see where this breaks :shrug:
+ if let (ty::ConstKind::Unevaluated(a), ty::ConstKind::Unevaluated(b)) =
+ (c1.kind(), c2.kind())
+ {
+ if self.infcx.try_unify_abstract_consts(
+ a.shrink(),
+ b.shrink(),
+ obligation.param_env,
+ ) {
+ return Ok(EvaluatedToOk);
+ }
+ }
+ }
+
+ let evaluate = |c: ty::Const<'tcx>| {
+ if let ty::ConstKind::Unevaluated(unevaluated) = c.kind() {
+ match self.infcx.try_const_eval_resolve(
+ obligation.param_env,
+ unevaluated,
+ c.ty(),
+ Some(obligation.cause.span),
+ ) {
+ Ok(val) => Ok(val),
+ Err(e) => Err(e),
+ }
+ } else {
+ Ok(c)
+ }
+ };
+
+ match (evaluate(c1), evaluate(c2)) {
+ (Ok(c1), Ok(c2)) => {
+ match self
+ .infcx()
+ .at(&obligation.cause, obligation.param_env)
+ .eq(c1, c2)
+ {
+ Ok(_) => Ok(EvaluatedToOk),
+ Err(_) => Ok(EvaluatedToErr),
+ }
+ }
+ (Err(ErrorHandled::Reported(_)), _)
+ | (_, Err(ErrorHandled::Reported(_))) => Ok(EvaluatedToErr),
+ (Err(ErrorHandled::Linted), _) | (_, Err(ErrorHandled::Linted)) => {
+ span_bug!(
+ obligation.cause.span(),
+ "ConstEquate: const_eval_resolve returned an unexpected error"
+ )
+ }
+ (Err(ErrorHandled::TooGeneric), _) | (_, Err(ErrorHandled::TooGeneric)) => {
+ if c1.has_infer_types_or_consts() || c2.has_infer_types_or_consts() {
+ Ok(EvaluatedToAmbig)
+ } else {
+ // Two different constants using generic parameters ~> error.
+ Ok(EvaluatedToErr)
+ }
+ }
+ }
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(..) => {
+ bug!("TypeWellFormedFromEnv is only used for chalk")
+ }
+ }
+ });
+
+ debug!("finished: {:?} from {:?}", result, obligation);
+
+ result
+ }
+
+ #[instrument(skip(self, previous_stack), level = "debug")]
+ fn evaluate_trait_predicate_recursively<'o>(
+ &mut self,
+ previous_stack: TraitObligationStackList<'o, 'tcx>,
+ mut obligation: TraitObligation<'tcx>,
+ ) -> Result<EvaluationResult, OverflowError> {
+ if !self.intercrate
+ && obligation.is_global()
+ && obligation.param_env.caller_bounds().iter().all(|bound| bound.needs_subst())
+ {
+ // If a param env has no global bounds, global obligations do not
+ // depend on its particular value in order to work, so we can clear
+ // out the param env and get better caching.
+ debug!("in global");
+ obligation.param_env = obligation.param_env.without_caller_bounds();
+ }
+
+ let stack = self.push_stack(previous_stack, &obligation);
+ let mut fresh_trait_pred = stack.fresh_trait_pred;
+ let mut param_env = obligation.param_env;
+
+ fresh_trait_pred = fresh_trait_pred.map_bound(|mut pred| {
+ pred.remap_constness(&mut param_env);
+ pred
+ });
+
+ debug!(?fresh_trait_pred);
+
+ // If a trait predicate is in the (local or global) evaluation cache,
+ // then we know it holds without cycles.
+ if let Some(result) = self.check_evaluation_cache(param_env, fresh_trait_pred) {
+ debug!(?result, "CACHE HIT");
+ return Ok(result);
+ }
+
+ if let Some(result) = stack.cache().get_provisional(fresh_trait_pred) {
+ debug!(?result, "PROVISIONAL CACHE HIT");
+ stack.update_reached_depth(result.reached_depth);
+ return Ok(result.result);
+ }
+
+ // Check if this is a match for something already on the
+ // stack. If so, we don't want to insert the result into the
+ // main cache (it is cycle dependent) nor the provisional
+ // cache (which is meant for things that have completed but
+ // for a "backedge" -- this result *is* the backedge).
+ if let Some(cycle_result) = self.check_evaluation_cycle(&stack) {
+ return Ok(cycle_result);
+ }
+
+ let (result, dep_node) = self.in_task(|this| this.evaluate_stack(&stack));
+ let result = result?;
+
+ if !result.must_apply_modulo_regions() {
+ stack.cache().on_failure(stack.dfn);
+ }
+
+ let reached_depth = stack.reached_depth.get();
+ if reached_depth >= stack.depth {
+ debug!(?result, "CACHE MISS");
+ self.insert_evaluation_cache(param_env, fresh_trait_pred, dep_node, result);
+ stack.cache().on_completion(stack.dfn);
+ } else {
+ debug!(?result, "PROVISIONAL");
+ debug!(
+ "caching provisionally because {:?} \
+ is a cycle participant (at depth {}, reached depth {})",
+ fresh_trait_pred, stack.depth, reached_depth,
+ );
+
+ stack.cache().insert_provisional(stack.dfn, reached_depth, fresh_trait_pred, result);
+ }
+
+ Ok(result)
+ }
+
+ /// If there is any previous entry on the stack that precisely
+ /// matches this obligation, then we can assume that the
+ /// obligation is satisfied for now (still all other conditions
+ /// must be met of course). One obvious case this comes up is
+ /// marker traits like `Send`. Think of a linked list:
+ ///
+ /// struct List<T> { data: T, next: Option<Box<List<T>>> }
+ ///
+ /// `Box<List<T>>` will be `Send` if `T` is `Send` and
+ /// `Option<Box<List<T>>>` is `Send`, and in turn
+ /// `Option<Box<List<T>>>` is `Send` if `Box<List<T>>` is
+ /// `Send`.
+ ///
+ /// Note that we do this comparison using the `fresh_trait_ref`
+ /// fields. Because these have all been freshened using
+ /// `self.freshener`, we can be sure that (a) this will not
+ /// affect the inferencer state and (b) that if we see two
+ /// fresh regions with the same index, they refer to the same
+ /// unbound type variable.
+ fn check_evaluation_cycle(
+ &mut self,
+ stack: &TraitObligationStack<'_, 'tcx>,
+ ) -> Option<EvaluationResult> {
+ if let Some(cycle_depth) = stack
+ .iter()
+ .skip(1) // Skip top-most frame.
+ .find(|prev| {
+ stack.obligation.param_env == prev.obligation.param_env
+ && stack.fresh_trait_pred == prev.fresh_trait_pred
+ })
+ .map(|stack| stack.depth)
+ {
+ debug!("evaluate_stack --> recursive at depth {}", cycle_depth);
+
+ // If we have a stack like `A B C D E A`, where the top of
+ // the stack is the final `A`, then this will iterate over
+ // `A, E, D, C, B` -- i.e., all the participants apart
+ // from the cycle head. We mark them as participating in a
+ // cycle. This suppresses caching for those nodes. See
+ // `in_cycle` field for more details.
+ stack.update_reached_depth(cycle_depth);
+
+ // Subtle: when checking for a coinductive cycle, we do
+ // not compare using the "freshened trait refs" (which
+ // have erased regions) but rather the fully explicit
+ // trait refs. This is important because it's only a cycle
+ // if the regions match exactly.
+ let cycle = stack.iter().skip(1).take_while(|s| s.depth >= cycle_depth);
+ let tcx = self.tcx();
+ let cycle = cycle.map(|stack| stack.obligation.predicate.to_predicate(tcx));
+ if self.coinductive_match(cycle) {
+ debug!("evaluate_stack --> recursive, coinductive");
+ Some(EvaluatedToOk)
+ } else {
+ debug!("evaluate_stack --> recursive, inductive");
+ Some(EvaluatedToRecur)
+ }
+ } else {
+ None
+ }
+ }
+
+ fn evaluate_stack<'o>(
+ &mut self,
+ stack: &TraitObligationStack<'o, 'tcx>,
+ ) -> Result<EvaluationResult, OverflowError> {
+ // In intercrate mode, whenever any of the generics are unbound,
+ // there can always be an impl. Even if there are no impls in
+ // this crate, perhaps the type would be unified with
+ // something from another crate that does provide an impl.
+ //
+ // In intra mode, we must still be conservative. The reason is
+ // that we want to avoid cycles. Imagine an impl like:
+ //
+ // impl<T:Eq> Eq for Vec<T>
+ //
+ // and a trait reference like `$0 : Eq` where `$0` is an
+ // unbound variable. When we evaluate this trait-reference, we
+ // will unify `$0` with `Vec<$1>` (for some fresh variable
+ // `$1`), on the condition that `$1 : Eq`. We will then wind
+ // up with many candidates (since that are other `Eq` impls
+ // that apply) and try to winnow things down. This results in
+ // a recursive evaluation that `$1 : Eq` -- as you can
+ // imagine, this is just where we started. To avoid that, we
+ // check for unbound variables and return an ambiguous (hence possible)
+ // match if we've seen this trait before.
+ //
+ // This suffices to allow chains like `FnMut` implemented in
+ // terms of `Fn` etc, but we could probably make this more
+ // precise still.
+ let unbound_input_types =
+ stack.fresh_trait_pred.skip_binder().trait_ref.substs.types().any(|ty| ty.is_fresh());
+
+ if stack.obligation.polarity() != ty::ImplPolarity::Negative {
+ // This check was an imperfect workaround for a bug in the old
+ // intercrate mode; it should be removed when that goes away.
+ if unbound_input_types && self.intercrate {
+ debug!("evaluate_stack --> unbound argument, intercrate --> ambiguous",);
+ // Heuristics: show the diagnostics when there are no candidates in crate.
+ if self.intercrate_ambiguity_causes.is_some() {
+ debug!("evaluate_stack: intercrate_ambiguity_causes is some");
+ if let Ok(candidate_set) = self.assemble_candidates(stack) {
+ if !candidate_set.ambiguous && candidate_set.vec.is_empty() {
+ let trait_ref = stack.obligation.predicate.skip_binder().trait_ref;
+ let self_ty = trait_ref.self_ty();
+ let cause = with_no_trimmed_paths!({
+ IntercrateAmbiguityCause::DownstreamCrate {
+ trait_desc: trait_ref.print_only_trait_path().to_string(),
+ self_desc: if self_ty.has_concrete_skeleton() {
+ Some(self_ty.to_string())
+ } else {
+ None
+ },
+ }
+ });
+
+ debug!(?cause, "evaluate_stack: pushing cause");
+ self.intercrate_ambiguity_causes.as_mut().unwrap().insert(cause);
+ }
+ }
+ }
+ return Ok(EvaluatedToAmbig);
+ }
+ }
+
+ if unbound_input_types
+ && stack.iter().skip(1).any(|prev| {
+ stack.obligation.param_env == prev.obligation.param_env
+ && self.match_fresh_trait_refs(
+ stack.fresh_trait_pred,
+ prev.fresh_trait_pred,
+ prev.obligation.param_env,
+ )
+ })
+ {
+ debug!("evaluate_stack --> unbound argument, recursive --> giving up",);
+ return Ok(EvaluatedToUnknown);
+ }
+
+ match self.candidate_from_obligation(stack) {
+ Ok(Some(c)) => self.evaluate_candidate(stack, &c),
+ Err(SelectionError::Ambiguous(_)) => Ok(EvaluatedToAmbig),
+ Ok(None) => Ok(EvaluatedToAmbig),
+ Err(Overflow(OverflowError::Canonical)) => Err(OverflowError::Canonical),
+ Err(ErrorReporting) => Err(OverflowError::ErrorReporting),
+ Err(..) => Ok(EvaluatedToErr),
+ }
+ }
+
+ /// For defaulted traits, we use a co-inductive strategy to solve, so
+ /// that recursion is ok. This routine returns `true` if the top of the
+ /// stack (`cycle[0]`):
+ ///
+ /// - is a defaulted trait,
+ /// - it also appears in the backtrace at some position `X`,
+ /// - all the predicates at positions `X..` between `X` and the top are
+ /// also defaulted traits.
+ pub(crate) fn coinductive_match<I>(&mut self, mut cycle: I) -> bool
+ where
+ I: Iterator<Item = ty::Predicate<'tcx>>,
+ {
+ cycle.all(|predicate| self.coinductive_predicate(predicate))
+ }
+
+ fn coinductive_predicate(&self, predicate: ty::Predicate<'tcx>) -> bool {
+ let result = match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(ref data) => self.tcx().trait_is_auto(data.def_id()),
+ ty::PredicateKind::WellFormed(_) => true,
+ _ => false,
+ };
+ debug!(?predicate, ?result, "coinductive_predicate");
+ result
+ }
+
+ /// Further evaluates `candidate` to decide whether all type parameters match and whether nested
+ /// obligations are met. Returns whether `candidate` remains viable after this further
+ /// scrutiny.
+ #[instrument(
+ level = "debug",
+ skip(self, stack),
+ fields(depth = stack.obligation.recursion_depth)
+ )]
+ fn evaluate_candidate<'o>(
+ &mut self,
+ stack: &TraitObligationStack<'o, 'tcx>,
+ candidate: &SelectionCandidate<'tcx>,
+ ) -> Result<EvaluationResult, OverflowError> {
+ let mut result = self.evaluation_probe(|this| {
+ let candidate = (*candidate).clone();
+ match this.confirm_candidate(stack.obligation, candidate) {
+ Ok(selection) => {
+ debug!(?selection);
+ this.evaluate_predicates_recursively(
+ stack.list(),
+ selection.nested_obligations().into_iter(),
+ )
+ }
+ Err(..) => Ok(EvaluatedToErr),
+ }
+ })?;
+
+ // If we erased any lifetimes, then we want to use
+ // `EvaluatedToOkModuloRegions` instead of `EvaluatedToOk`
+ // as your final result. The result will be cached using
+ // the freshened trait predicate as a key, so we need
+ // our result to be correct by *any* choice of original lifetimes,
+ // not just the lifetime choice for this particular (non-erased)
+ // predicate.
+ // See issue #80691
+ if stack.fresh_trait_pred.has_erased_regions() {
+ result = result.max(EvaluatedToOkModuloRegions);
+ }
+
+ debug!(?result);
+ Ok(result)
+ }
+
+ fn check_evaluation_cache(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> Option<EvaluationResult> {
+ // Neither the global nor local cache is aware of intercrate
+ // mode, so don't do any caching. In particular, we might
+ // re-use the same `InferCtxt` with both an intercrate
+ // and non-intercrate `SelectionContext`
+ if self.intercrate {
+ return None;
+ }
+
+ let tcx = self.tcx();
+ if self.can_use_global_caches(param_env) {
+ if let Some(res) = tcx.evaluation_cache.get(&(param_env, trait_pred), tcx) {
+ return Some(res);
+ }
+ }
+ self.infcx.evaluation_cache.get(&(param_env, trait_pred), tcx)
+ }
+
+ fn insert_evaluation_cache(
+ &mut self,
+ param_env: ty::ParamEnv<'tcx>,
+ trait_pred: ty::PolyTraitPredicate<'tcx>,
+ dep_node: DepNodeIndex,
+ result: EvaluationResult,
+ ) {
+ // Avoid caching results that depend on more than just the trait-ref
+ // - the stack can create recursion.
+ if result.is_stack_dependent() {
+ return;
+ }
+
+ // Neither the global nor local cache is aware of intercrate
+ // mode, so don't do any caching. In particular, we might
+ // re-use the same `InferCtxt` with both an intercrate
+ // and non-intercrate `SelectionContext`
+ if self.intercrate {
+ return;
+ }
+
+ if self.can_use_global_caches(param_env) {
+ if !trait_pred.needs_infer() {
+ debug!(?trait_pred, ?result, "insert_evaluation_cache global");
+ // This may overwrite the cache with the same value
+ // FIXME: Due to #50507 this overwrites the different values
+ // This should be changed to use HashMapExt::insert_same
+ // when that is fixed
+ self.tcx().evaluation_cache.insert((param_env, trait_pred), dep_node, result);
+ return;
+ }
+ }
+
+ debug!(?trait_pred, ?result, "insert_evaluation_cache");
+ self.infcx.evaluation_cache.insert((param_env, trait_pred), dep_node, result);
+ }
+
+ /// For various reasons, it's possible for a subobligation
+ /// to have a *lower* recursion_depth than the obligation used to create it.
+ /// Projection sub-obligations may be returned from the projection cache,
+ /// which results in obligations with an 'old' `recursion_depth`.
+ /// Additionally, methods like `InferCtxt.subtype_predicate` produce
+ /// subobligations without taking in a 'parent' depth, causing the
+ /// generated subobligations to have a `recursion_depth` of `0`.
+ ///
+ /// To ensure that obligation_depth never decreases, we force all subobligations
+ /// to have at least the depth of the original obligation.
+ fn add_depth<T: 'cx, I: Iterator<Item = &'cx mut Obligation<'tcx, T>>>(
+ &self,
+ it: I,
+ min_depth: usize,
+ ) {
+ it.for_each(|o| o.recursion_depth = cmp::max(min_depth, o.recursion_depth) + 1);
+ }
+
+ fn check_recursion_depth<T: Display + TypeFoldable<'tcx>>(
+ &self,
+ depth: usize,
+ error_obligation: &Obligation<'tcx, T>,
+ ) -> Result<(), OverflowError> {
+ if !self.infcx.tcx.recursion_limit().value_within_limit(depth) {
+ match self.query_mode {
+ TraitQueryMode::Standard => {
+ if self.infcx.is_tainted_by_errors() {
+ return Err(OverflowError::Error(
+ ErrorGuaranteed::unchecked_claim_error_was_emitted(),
+ ));
+ }
+ self.infcx.report_overflow_error(error_obligation, true);
+ }
+ TraitQueryMode::Canonical => {
+ return Err(OverflowError::Canonical);
+ }
+ }
+ }
+ Ok(())
+ }
+
+ /// Checks that the recursion limit has not been exceeded.
+ ///
+ /// The weird return type of this function allows it to be used with the `try` (`?`)
+ /// operator within certain functions.
+ #[inline(always)]
+ fn check_recursion_limit<T: Display + TypeFoldable<'tcx>, V: Display + TypeFoldable<'tcx>>(
+ &self,
+ obligation: &Obligation<'tcx, T>,
+ error_obligation: &Obligation<'tcx, V>,
+ ) -> Result<(), OverflowError> {
+ self.check_recursion_depth(obligation.recursion_depth, error_obligation)
+ }
+
+ fn in_task<OP, R>(&mut self, op: OP) -> (R, DepNodeIndex)
+ where
+ OP: FnOnce(&mut Self) -> R,
+ {
+ let (result, dep_node) =
+ self.tcx().dep_graph.with_anon_task(self.tcx(), DepKind::TraitSelect, || op(self));
+ self.tcx().dep_graph.read_index(dep_node);
+ (result, dep_node)
+ }
+
+ /// filter_impls filters constant trait obligations and candidates that have a positive impl
+ /// for a negative goal and a negative impl for a positive goal
+ #[instrument(level = "debug", skip(self))]
+ fn filter_impls(
+ &mut self,
+ candidates: Vec<SelectionCandidate<'tcx>>,
+ obligation: &TraitObligation<'tcx>,
+ ) -> Vec<SelectionCandidate<'tcx>> {
+ let tcx = self.tcx();
+ let mut result = Vec::with_capacity(candidates.len());
+
+ for candidate in candidates {
+ // Respect const trait obligations
+ if obligation.is_const() {
+ match candidate {
+ // const impl
+ ImplCandidate(def_id) if tcx.constness(def_id) == hir::Constness::Const => {}
+ // const param
+ ParamCandidate(trait_pred) if trait_pred.is_const_if_const() => {}
+ // auto trait impl
+ AutoImplCandidate(..) => {}
+ // generator, this will raise error in other places
+ // or ignore error with const_async_blocks feature
+ GeneratorCandidate => {}
+ // FnDef where the function is const
+ FnPointerCandidate { is_const: true } => {}
+ ConstDestructCandidate(_) => {}
+ _ => {
+ // reject all other types of candidates
+ continue;
+ }
+ }
+ }
+
+ if let ImplCandidate(def_id) = candidate {
+ if ty::ImplPolarity::Reservation == tcx.impl_polarity(def_id)
+ || obligation.polarity() == tcx.impl_polarity(def_id)
+ {
+ result.push(candidate);
+ }
+ } else {
+ result.push(candidate);
+ }
+ }
+
+ result
+ }
+
+ /// filter_reservation_impls filter reservation impl for any goal as ambiguous
+ #[instrument(level = "debug", skip(self))]
+ fn filter_reservation_impls(
+ &mut self,
+ candidate: SelectionCandidate<'tcx>,
+ obligation: &TraitObligation<'tcx>,
+ ) -> SelectionResult<'tcx, SelectionCandidate<'tcx>> {
+ let tcx = self.tcx();
+ // Treat reservation impls as ambiguity.
+ if let ImplCandidate(def_id) = candidate {
+ if let ty::ImplPolarity::Reservation = tcx.impl_polarity(def_id) {
+ if let Some(intercrate_ambiguity_clauses) = &mut self.intercrate_ambiguity_causes {
+ let value = tcx
+ .get_attr(def_id, sym::rustc_reservation_impl)
+ .and_then(|a| a.value_str());
+ if let Some(value) = value {
+ debug!(
+ "filter_reservation_impls: \
+ reservation impl ambiguity on {:?}",
+ def_id
+ );
+ intercrate_ambiguity_clauses.insert(
+ IntercrateAmbiguityCause::ReservationImpl {
+ message: value.to_string(),
+ },
+ );
+ }
+ }
+ return Ok(None);
+ }
+ }
+ Ok(Some(candidate))
+ }
+
+ fn is_knowable<'o>(&mut self, stack: &TraitObligationStack<'o, 'tcx>) -> Option<Conflict> {
+ debug!("is_knowable(intercrate={:?})", self.intercrate);
+
+ if !self.intercrate || stack.obligation.polarity() == ty::ImplPolarity::Negative {
+ return None;
+ }
+
+ let obligation = &stack.obligation;
+ let predicate = self.infcx().resolve_vars_if_possible(obligation.predicate);
+
+ // Okay to skip binder because of the nature of the
+ // trait-ref-is-knowable check, which does not care about
+ // bound regions.
+ let trait_ref = predicate.skip_binder().trait_ref;
+
+ coherence::trait_ref_is_knowable(self.tcx(), trait_ref)
+ }
+
+ /// Returns `true` if the global caches can be used.
+ fn can_use_global_caches(&self, param_env: ty::ParamEnv<'tcx>) -> bool {
+ // If there are any inference variables in the `ParamEnv`, then we
+ // always use a cache local to this particular scope. Otherwise, we
+ // switch to a global cache.
+ if param_env.needs_infer() {
+ return false;
+ }
+
+ // Avoid using the master cache during coherence and just rely
+ // on the local cache. This effectively disables caching
+ // during coherence. It is really just a simplification to
+ // avoid us having to fear that coherence results "pollute"
+ // the master cache. Since coherence executes pretty quickly,
+ // it's not worth going to more trouble to increase the
+ // hit-rate, I don't think.
+ if self.intercrate {
+ return false;
+ }
+
+ // Otherwise, we can use the global cache.
+ true
+ }
+
+ fn check_candidate_cache(
+ &mut self,
+ mut param_env: ty::ParamEnv<'tcx>,
+ cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> Option<SelectionResult<'tcx, SelectionCandidate<'tcx>>> {
+ // Neither the global nor local cache is aware of intercrate
+ // mode, so don't do any caching. In particular, we might
+ // re-use the same `InferCtxt` with both an intercrate
+ // and non-intercrate `SelectionContext`
+ if self.intercrate {
+ return None;
+ }
+ let tcx = self.tcx();
+ let mut pred = cache_fresh_trait_pred.skip_binder();
+ pred.remap_constness(&mut param_env);
+
+ if self.can_use_global_caches(param_env) {
+ if let Some(res) = tcx.selection_cache.get(&(param_env, pred), tcx) {
+ return Some(res);
+ }
+ }
+ self.infcx.selection_cache.get(&(param_env, pred), tcx)
+ }
+
+ /// Determines whether can we safely cache the result
+ /// of selecting an obligation. This is almost always `true`,
+ /// except when dealing with certain `ParamCandidate`s.
+ ///
+ /// Ordinarily, a `ParamCandidate` will contain no inference variables,
+ /// since it was usually produced directly from a `DefId`. However,
+ /// certain cases (currently only librustdoc's blanket impl finder),
+ /// a `ParamEnv` may be explicitly constructed with inference types.
+ /// When this is the case, we do *not* want to cache the resulting selection
+ /// candidate. This is due to the fact that it might not always be possible
+ /// to equate the obligation's trait ref and the candidate's trait ref,
+ /// if more constraints end up getting added to an inference variable.
+ ///
+ /// Because of this, we always want to re-run the full selection
+ /// process for our obligation the next time we see it, since
+ /// we might end up picking a different `SelectionCandidate` (or none at all).
+ fn can_cache_candidate(
+ &self,
+ result: &SelectionResult<'tcx, SelectionCandidate<'tcx>>,
+ ) -> bool {
+ // Neither the global nor local cache is aware of intercrate
+ // mode, so don't do any caching. In particular, we might
+ // re-use the same `InferCtxt` with both an intercrate
+ // and non-intercrate `SelectionContext`
+ if self.intercrate {
+ return false;
+ }
+ match result {
+ Ok(Some(SelectionCandidate::ParamCandidate(trait_ref))) => !trait_ref.needs_infer(),
+ _ => true,
+ }
+ }
+
+ #[instrument(skip(self, param_env, cache_fresh_trait_pred, dep_node), level = "debug")]
+ fn insert_candidate_cache(
+ &mut self,
+ mut param_env: ty::ParamEnv<'tcx>,
+ cache_fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
+ dep_node: DepNodeIndex,
+ candidate: SelectionResult<'tcx, SelectionCandidate<'tcx>>,
+ ) {
+ let tcx = self.tcx();
+ let mut pred = cache_fresh_trait_pred.skip_binder();
+
+ pred.remap_constness(&mut param_env);
+
+ if !self.can_cache_candidate(&candidate) {
+ debug!(?pred, ?candidate, "insert_candidate_cache - candidate is not cacheable");
+ return;
+ }
+
+ if self.can_use_global_caches(param_env) {
+ if let Err(Overflow(OverflowError::Canonical)) = candidate {
+ // Don't cache overflow globally; we only produce this in certain modes.
+ } else if !pred.needs_infer() {
+ if !candidate.needs_infer() {
+ debug!(?pred, ?candidate, "insert_candidate_cache global");
+ // This may overwrite the cache with the same value.
+ tcx.selection_cache.insert((param_env, pred), dep_node, candidate);
+ return;
+ }
+ }
+ }
+
+ debug!(?pred, ?candidate, "insert_candidate_cache local");
+ self.infcx.selection_cache.insert((param_env, pred), dep_node, candidate);
+ }
+
+ /// Matches a predicate against the bounds of its self type.
+ ///
+ /// Given an obligation like `<T as Foo>::Bar: Baz` where the self type is
+ /// a projection, look at the bounds of `T::Bar`, see if we can find a
+ /// `Baz` bound. We return indexes into the list returned by
+ /// `tcx.item_bounds` for any applicable bounds.
+ #[instrument(level = "debug", skip(self))]
+ fn match_projection_obligation_against_definition_bounds(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> smallvec::SmallVec<[usize; 2]> {
+ let poly_trait_predicate = self.infcx().resolve_vars_if_possible(obligation.predicate);
+ let placeholder_trait_predicate =
+ self.infcx().replace_bound_vars_with_placeholders(poly_trait_predicate);
+ debug!(?placeholder_trait_predicate);
+
+ let tcx = self.infcx.tcx;
+ let (def_id, substs) = match *placeholder_trait_predicate.trait_ref.self_ty().kind() {
+ ty::Projection(ref data) => (data.item_def_id, data.substs),
+ ty::Opaque(def_id, substs) => (def_id, substs),
+ _ => {
+ span_bug!(
+ obligation.cause.span,
+ "match_projection_obligation_against_definition_bounds() called \
+ but self-ty is not a projection: {:?}",
+ placeholder_trait_predicate.trait_ref.self_ty()
+ );
+ }
+ };
+ let bounds = tcx.bound_item_bounds(def_id).subst(tcx, substs);
+
+ // The bounds returned by `item_bounds` may contain duplicates after
+ // normalization, so try to deduplicate when possible to avoid
+ // unnecessary ambiguity.
+ let mut distinct_normalized_bounds = FxHashSet::default();
+
+ let matching_bounds = bounds
+ .iter()
+ .enumerate()
+ .filter_map(|(idx, bound)| {
+ let bound_predicate = bound.kind();
+ if let ty::PredicateKind::Trait(pred) = bound_predicate.skip_binder() {
+ let bound = bound_predicate.rebind(pred.trait_ref);
+ if self.infcx.probe(|_| {
+ match self.match_normalize_trait_ref(
+ obligation,
+ bound,
+ placeholder_trait_predicate.trait_ref,
+ ) {
+ Ok(None) => true,
+ Ok(Some(normalized_trait))
+ if distinct_normalized_bounds.insert(normalized_trait) =>
+ {
+ true
+ }
+ _ => false,
+ }
+ }) {
+ return Some(idx);
+ }
+ }
+ None
+ })
+ .collect();
+
+ debug!(?matching_bounds);
+ matching_bounds
+ }
+
+ /// Equates the trait in `obligation` with trait bound. If the two traits
+ /// can be equated and the normalized trait bound doesn't contain inference
+ /// variables or placeholders, the normalized bound is returned.
+ fn match_normalize_trait_ref(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ trait_bound: ty::PolyTraitRef<'tcx>,
+ placeholder_trait_ref: ty::TraitRef<'tcx>,
+ ) -> Result<Option<ty::PolyTraitRef<'tcx>>, ()> {
+ debug_assert!(!placeholder_trait_ref.has_escaping_bound_vars());
+ if placeholder_trait_ref.def_id != trait_bound.def_id() {
+ // Avoid unnecessary normalization
+ return Err(());
+ }
+
+ let Normalized { value: trait_bound, obligations: _ } = ensure_sufficient_stack(|| {
+ project::normalize_with_depth(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ trait_bound,
+ )
+ });
+ self.infcx
+ .at(&obligation.cause, obligation.param_env)
+ .define_opaque_types(false)
+ .sup(ty::Binder::dummy(placeholder_trait_ref), trait_bound)
+ .map(|InferOk { obligations: _, value: () }| {
+ // This method is called within a probe, so we can't have
+ // inference variables and placeholders escape.
+ if !trait_bound.needs_infer() && !trait_bound.has_placeholders() {
+ Some(trait_bound)
+ } else {
+ None
+ }
+ })
+ .map_err(|_| ())
+ }
+
+ fn where_clause_may_apply<'o>(
+ &mut self,
+ stack: &TraitObligationStack<'o, 'tcx>,
+ where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Result<EvaluationResult, OverflowError> {
+ self.evaluation_probe(|this| {
+ match this.match_where_clause_trait_ref(stack.obligation, where_clause_trait_ref) {
+ Ok(obligations) => this.evaluate_predicates_recursively(stack.list(), obligations),
+ Err(()) => Ok(EvaluatedToErr),
+ }
+ })
+ }
+
+ /// Return `Yes` if the obligation's predicate type applies to the env_predicate, and
+ /// `No` if it does not. Return `Ambiguous` in the case that the projection type is a GAT,
+ /// and applying this env_predicate constrains any of the obligation's GAT substitutions.
+ ///
+ /// This behavior is a somewhat of a hack to prevent over-constraining inference variables
+ /// in cases like #91762.
+ pub(super) fn match_projection_projections(
+ &mut self,
+ obligation: &ProjectionTyObligation<'tcx>,
+ env_predicate: PolyProjectionPredicate<'tcx>,
+ potentially_unnormalized_candidates: bool,
+ ) -> ProjectionMatchesProjection {
+ let mut nested_obligations = Vec::new();
+ let infer_predicate = self.infcx.replace_bound_vars_with_fresh_vars(
+ obligation.cause.span,
+ LateBoundRegionConversionTime::HigherRankedType,
+ env_predicate,
+ );
+ let infer_projection = if potentially_unnormalized_candidates {
+ ensure_sufficient_stack(|| {
+ project::normalize_with_depth_to(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ infer_predicate.projection_ty,
+ &mut nested_obligations,
+ )
+ })
+ } else {
+ infer_predicate.projection_ty
+ };
+
+ let is_match = self
+ .infcx
+ .at(&obligation.cause, obligation.param_env)
+ .define_opaque_types(false)
+ .sup(obligation.predicate, infer_projection)
+ .map_or(false, |InferOk { obligations, value: () }| {
+ self.evaluate_predicates_recursively(
+ TraitObligationStackList::empty(&ProvisionalEvaluationCache::default()),
+ nested_obligations.into_iter().chain(obligations),
+ )
+ .map_or(false, |res| res.may_apply())
+ });
+
+ if is_match {
+ let generics = self.tcx().generics_of(obligation.predicate.item_def_id);
+ // FIXME(generic-associated-types): Addresses aggressive inference in #92917.
+ // If this type is a GAT, and of the GAT substs resolve to something new,
+ // that means that we must have newly inferred something about the GAT.
+ // We should give up in that case.
+ if !generics.params.is_empty()
+ && obligation.predicate.substs[generics.parent_count..]
+ .iter()
+ .any(|&p| p.has_infer_types_or_consts() && self.infcx.shallow_resolve(p) != p)
+ {
+ ProjectionMatchesProjection::Ambiguous
+ } else {
+ ProjectionMatchesProjection::Yes
+ }
+ } else {
+ ProjectionMatchesProjection::No
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // WINNOW
+ //
+ // Winnowing is the process of attempting to resolve ambiguity by
+ // probing further. During the winnowing process, we unify all
+ // type variables and then we also attempt to evaluate recursive
+ // bounds to see if they are satisfied.
+
+ /// Returns `true` if `victim` should be dropped in favor of
+ /// `other`. Generally speaking we will drop duplicate
+ /// candidates and prefer where-clause candidates.
+ ///
+ /// See the comment for "SelectionCandidate" for more details.
+ fn candidate_should_be_dropped_in_favor_of(
+ &mut self,
+ victim: &EvaluatedCandidate<'tcx>,
+ other: &EvaluatedCandidate<'tcx>,
+ needs_infer: bool,
+ ) -> bool {
+ if victim.candidate == other.candidate {
+ return true;
+ }
+
+ // Check if a bound would previously have been removed when normalizing
+ // the param_env so that it can be given the lowest priority. See
+ // #50825 for the motivation for this.
+ let is_global = |cand: &ty::PolyTraitPredicate<'tcx>| {
+ cand.is_global() && !cand.has_late_bound_regions()
+ };
+
+ // (*) Prefer `BuiltinCandidate { has_nested: false }`, `PointeeCandidate`,
+ // `DiscriminantKindCandidate`, and `ConstDestructCandidate` to anything else.
+ //
+ // This is a fix for #53123 and prevents winnowing from accidentally extending the
+ // lifetime of a variable.
+ match (&other.candidate, &victim.candidate) {
+ (_, AutoImplCandidate(..)) | (AutoImplCandidate(..), _) => {
+ bug!(
+ "default implementations shouldn't be recorded \
+ when there are other valid candidates"
+ );
+ }
+
+ // FIXME(@jswrenn): this should probably be more sophisticated
+ (TransmutabilityCandidate, _) | (_, TransmutabilityCandidate) => false,
+
+ // (*)
+ (
+ BuiltinCandidate { has_nested: false }
+ | DiscriminantKindCandidate
+ | PointeeCandidate
+ | ConstDestructCandidate(_),
+ _,
+ ) => true,
+ (
+ _,
+ BuiltinCandidate { has_nested: false }
+ | DiscriminantKindCandidate
+ | PointeeCandidate
+ | ConstDestructCandidate(_),
+ ) => false,
+
+ (ParamCandidate(other), ParamCandidate(victim)) => {
+ let same_except_bound_vars = other.skip_binder().trait_ref
+ == victim.skip_binder().trait_ref
+ && other.skip_binder().constness == victim.skip_binder().constness
+ && other.skip_binder().polarity == victim.skip_binder().polarity
+ && !other.skip_binder().trait_ref.has_escaping_bound_vars();
+ if same_except_bound_vars {
+ // See issue #84398. In short, we can generate multiple ParamCandidates which are
+ // the same except for unused bound vars. Just pick the one with the fewest bound vars
+ // or the current one if tied (they should both evaluate to the same answer). This is
+ // probably best characterized as a "hack", since we might prefer to just do our
+ // best to *not* create essentially duplicate candidates in the first place.
+ other.bound_vars().len() <= victim.bound_vars().len()
+ } else if other.skip_binder().trait_ref == victim.skip_binder().trait_ref
+ && victim.skip_binder().constness == ty::BoundConstness::NotConst
+ && other.skip_binder().polarity == victim.skip_binder().polarity
+ {
+ // Drop otherwise equivalent non-const candidates in favor of const candidates.
+ true
+ } else {
+ false
+ }
+ }
+
+ // Drop otherwise equivalent non-const fn pointer candidates
+ (FnPointerCandidate { .. }, FnPointerCandidate { is_const: false }) => true,
+
+ // Global bounds from the where clause should be ignored
+ // here (see issue #50825). Otherwise, we have a where
+ // clause so don't go around looking for impls.
+ // Arbitrarily give param candidates priority
+ // over projection and object candidates.
+ (
+ ParamCandidate(ref cand),
+ ImplCandidate(..)
+ | ClosureCandidate
+ | GeneratorCandidate
+ | FnPointerCandidate { .. }
+ | BuiltinObjectCandidate
+ | BuiltinUnsizeCandidate
+ | TraitUpcastingUnsizeCandidate(_)
+ | BuiltinCandidate { .. }
+ | TraitAliasCandidate(..)
+ | ObjectCandidate(_)
+ | ProjectionCandidate(_),
+ ) => !is_global(cand),
+ (ObjectCandidate(_) | ProjectionCandidate(_), ParamCandidate(ref cand)) => {
+ // Prefer these to a global where-clause bound
+ // (see issue #50825).
+ is_global(cand)
+ }
+ (
+ ImplCandidate(_)
+ | ClosureCandidate
+ | GeneratorCandidate
+ | FnPointerCandidate { .. }
+ | BuiltinObjectCandidate
+ | BuiltinUnsizeCandidate
+ | TraitUpcastingUnsizeCandidate(_)
+ | BuiltinCandidate { has_nested: true }
+ | TraitAliasCandidate(..),
+ ParamCandidate(ref cand),
+ ) => {
+ // Prefer these to a global where-clause bound
+ // (see issue #50825).
+ is_global(cand) && other.evaluation.must_apply_modulo_regions()
+ }
+
+ (ProjectionCandidate(i), ProjectionCandidate(j))
+ | (ObjectCandidate(i), ObjectCandidate(j)) => {
+ // Arbitrarily pick the lower numbered candidate for backwards
+ // compatibility reasons. Don't let this affect inference.
+ i < j && !needs_infer
+ }
+ (ObjectCandidate(_), ProjectionCandidate(_))
+ | (ProjectionCandidate(_), ObjectCandidate(_)) => {
+ bug!("Have both object and projection candidate")
+ }
+
+ // Arbitrarily give projection and object candidates priority.
+ (
+ ObjectCandidate(_) | ProjectionCandidate(_),
+ ImplCandidate(..)
+ | ClosureCandidate
+ | GeneratorCandidate
+ | FnPointerCandidate { .. }
+ | BuiltinObjectCandidate
+ | BuiltinUnsizeCandidate
+ | TraitUpcastingUnsizeCandidate(_)
+ | BuiltinCandidate { .. }
+ | TraitAliasCandidate(..),
+ ) => true,
+
+ (
+ ImplCandidate(..)
+ | ClosureCandidate
+ | GeneratorCandidate
+ | FnPointerCandidate { .. }
+ | BuiltinObjectCandidate
+ | BuiltinUnsizeCandidate
+ | TraitUpcastingUnsizeCandidate(_)
+ | BuiltinCandidate { .. }
+ | TraitAliasCandidate(..),
+ ObjectCandidate(_) | ProjectionCandidate(_),
+ ) => false,
+
+ (&ImplCandidate(other_def), &ImplCandidate(victim_def)) => {
+ // See if we can toss out `victim` based on specialization.
+ // This requires us to know *for sure* that the `other` impl applies
+ // i.e., `EvaluatedToOk`.
+ //
+ // FIXME(@lcnr): Using `modulo_regions` here seems kind of scary
+ // to me but is required for `std` to compile, so I didn't change it
+ // for now.
+ let tcx = self.tcx();
+ if other.evaluation.must_apply_modulo_regions() {
+ if tcx.specializes((other_def, victim_def)) {
+ return true;
+ }
+ }
+
+ if other.evaluation.must_apply_considering_regions() {
+ match tcx.impls_are_allowed_to_overlap(other_def, victim_def) {
+ Some(ty::ImplOverlapKind::Permitted { marker: true }) => {
+ // Subtle: If the predicate we are evaluating has inference
+ // variables, do *not* allow discarding candidates due to
+ // marker trait impls.
+ //
+ // Without this restriction, we could end up accidentally
+ // constraining inference variables based on an arbitrarily
+ // chosen trait impl.
+ //
+ // Imagine we have the following code:
+ //
+ // ```rust
+ // #[marker] trait MyTrait {}
+ // impl MyTrait for u8 {}
+ // impl MyTrait for bool {}
+ // ```
+ //
+ // And we are evaluating the predicate `<_#0t as MyTrait>`.
+ //
+ // During selection, we will end up with one candidate for each
+ // impl of `MyTrait`. If we were to discard one impl in favor
+ // of the other, we would be left with one candidate, causing
+ // us to "successfully" select the predicate, unifying
+ // _#0t with (for example) `u8`.
+ //
+ // However, we have no reason to believe that this unification
+ // is correct - we've essentially just picked an arbitrary
+ // *possibility* for _#0t, and required that this be the *only*
+ // possibility.
+ //
+ // Eventually, we will either:
+ // 1) Unify all inference variables in the predicate through
+ // some other means (e.g. type-checking of a function). We will
+ // then be in a position to drop marker trait candidates
+ // without constraining inference variables (since there are
+ // none left to constrain)
+ // 2) Be left with some unconstrained inference variables. We
+ // will then correctly report an inference error, since the
+ // existence of multiple marker trait impls tells us nothing
+ // about which one should actually apply.
+ !needs_infer
+ }
+ Some(_) => true,
+ None => false,
+ }
+ } else {
+ false
+ }
+ }
+
+ // Everything else is ambiguous
+ (
+ ImplCandidate(_)
+ | ClosureCandidate
+ | GeneratorCandidate
+ | FnPointerCandidate { .. }
+ | BuiltinObjectCandidate
+ | BuiltinUnsizeCandidate
+ | TraitUpcastingUnsizeCandidate(_)
+ | BuiltinCandidate { has_nested: true }
+ | TraitAliasCandidate(..),
+ ImplCandidate(_)
+ | ClosureCandidate
+ | GeneratorCandidate
+ | FnPointerCandidate { .. }
+ | BuiltinObjectCandidate
+ | BuiltinUnsizeCandidate
+ | TraitUpcastingUnsizeCandidate(_)
+ | BuiltinCandidate { has_nested: true }
+ | TraitAliasCandidate(..),
+ ) => false,
+ }
+ }
+
+ fn sized_conditions(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> BuiltinImplConditions<'tcx> {
+ use self::BuiltinImplConditions::{Ambiguous, None, Where};
+
+ // NOTE: binder moved to (*)
+ let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
+
+ match self_ty.kind() {
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(..)
+ | ty::Char
+ | ty::Ref(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Array(..)
+ | ty::Closure(..)
+ | ty::Never
+ | ty::Error(_) => {
+ // safe for everything
+ Where(ty::Binder::dummy(Vec::new()))
+ }
+
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => None,
+
+ ty::Tuple(tys) => Where(
+ obligation.predicate.rebind(tys.last().map_or_else(Vec::new, |&last| vec![last])),
+ ),
+
+ ty::Adt(def, substs) => {
+ let sized_crit = def.sized_constraint(self.tcx());
+ // (*) binder moved here
+ Where(obligation.predicate.rebind({
+ sized_crit
+ .0
+ .iter()
+ .map(|ty| sized_crit.rebind(*ty).subst(self.tcx(), substs))
+ .collect()
+ }))
+ }
+
+ ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => None,
+ ty::Infer(ty::TyVar(_)) => Ambiguous,
+
+ ty::Placeholder(..)
+ | ty::Bound(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
+ }
+ }
+ }
+
+ fn copy_clone_conditions(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ ) -> BuiltinImplConditions<'tcx> {
+ // NOTE: binder moved to (*)
+ let self_ty = self.infcx.shallow_resolve(obligation.predicate.skip_binder().self_ty());
+
+ use self::BuiltinImplConditions::{Ambiguous, None, Where};
+
+ match *self_ty.kind() {
+ ty::Infer(ty::IntVar(_))
+ | ty::Infer(ty::FloatVar(_))
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Error(_) => Where(ty::Binder::dummy(Vec::new())),
+
+ ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::Char
+ | ty::RawPtr(..)
+ | ty::Never
+ | ty::Ref(_, _, hir::Mutability::Not)
+ | ty::Array(..) => {
+ // Implementations provided in libcore
+ None
+ }
+
+ ty::Dynamic(..)
+ | ty::Str
+ | ty::Slice(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Foreign(..)
+ | ty::Ref(_, _, hir::Mutability::Mut) => None,
+
+ ty::Tuple(tys) => {
+ // (*) binder moved here
+ Where(obligation.predicate.rebind(tys.iter().collect()))
+ }
+
+ ty::Closure(_, substs) => {
+ // (*) binder moved here
+ let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
+ if let ty::Infer(ty::TyVar(_)) = ty.kind() {
+ // Not yet resolved.
+ Ambiguous
+ } else {
+ Where(obligation.predicate.rebind(substs.as_closure().upvar_tys().collect()))
+ }
+ }
+
+ ty::Adt(..) | ty::Projection(..) | ty::Param(..) | ty::Opaque(..) => {
+ // Fallback to whatever user-defined impls exist in this case.
+ None
+ }
+
+ ty::Infer(ty::TyVar(_)) => {
+ // Unbound type variable. Might or might not have
+ // applicable impls and so forth, depending on what
+ // those type variables wind up being bound to.
+ Ambiguous
+ }
+
+ ty::Placeholder(..)
+ | ty::Bound(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("asked to assemble builtin bounds of unexpected type: {:?}", self_ty);
+ }
+ }
+ }
+
+ /// For default impls, we need to break apart a type into its
+ /// "constituent types" -- meaning, the types that it contains.
+ ///
+ /// Here are some (simple) examples:
+ ///
+ /// ```ignore (illustrative)
+ /// (i32, u32) -> [i32, u32]
+ /// Foo where struct Foo { x: i32, y: u32 } -> [i32, u32]
+ /// Bar<i32> where struct Bar<T> { x: T, y: u32 } -> [i32, u32]
+ /// Zed<i32> where enum Zed { A(T), B(u32) } -> [i32, u32]
+ /// ```
+ fn constituent_types_for_ty(
+ &self,
+ t: ty::Binder<'tcx, Ty<'tcx>>,
+ ) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> {
+ match *t.skip_binder().kind() {
+ ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Str
+ | ty::Error(_)
+ | ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Never
+ | ty::Char => ty::Binder::dummy(Vec::new()),
+
+ ty::Placeholder(..)
+ | ty::Dynamic(..)
+ | ty::Param(..)
+ | ty::Foreign(..)
+ | ty::Projection(..)
+ | ty::Bound(..)
+ | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("asked to assemble constituent types of unexpected type: {:?}", t);
+ }
+
+ ty::RawPtr(ty::TypeAndMut { ty: element_ty, .. }) | ty::Ref(_, element_ty, _) => {
+ t.rebind(vec![element_ty])
+ }
+
+ ty::Array(element_ty, _) | ty::Slice(element_ty) => t.rebind(vec![element_ty]),
+
+ ty::Tuple(ref tys) => {
+ // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet
+ t.rebind(tys.iter().collect())
+ }
+
+ ty::Closure(_, ref substs) => {
+ let ty = self.infcx.shallow_resolve(substs.as_closure().tupled_upvars_ty());
+ t.rebind(vec![ty])
+ }
+
+ ty::Generator(_, ref substs, _) => {
+ let ty = self.infcx.shallow_resolve(substs.as_generator().tupled_upvars_ty());
+ let witness = substs.as_generator().witness();
+ t.rebind([ty].into_iter().chain(iter::once(witness)).collect())
+ }
+
+ ty::GeneratorWitness(types) => {
+ debug_assert!(!types.has_escaping_bound_vars());
+ types.map_bound(|types| types.to_vec())
+ }
+
+ // For `PhantomData<T>`, we pass `T`.
+ ty::Adt(def, substs) if def.is_phantom_data() => t.rebind(substs.types().collect()),
+
+ ty::Adt(def, substs) => {
+ t.rebind(def.all_fields().map(|f| f.ty(self.tcx(), substs)).collect())
+ }
+
+ ty::Opaque(def_id, substs) => {
+ // We can resolve the `impl Trait` to its concrete type,
+ // which enforces a DAG between the functions requiring
+ // the auto trait bounds in question.
+ t.rebind(vec![self.tcx().bound_type_of(def_id).subst(self.tcx(), substs)])
+ }
+ }
+ }
+
+ fn collect_predicates_for_types(
+ &mut self,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ recursion_depth: usize,
+ trait_def_id: DefId,
+ types: ty::Binder<'tcx, Vec<Ty<'tcx>>>,
+ ) -> Vec<PredicateObligation<'tcx>> {
+ // Because the types were potentially derived from
+ // higher-ranked obligations they may reference late-bound
+ // regions. For example, `for<'a> Foo<&'a i32> : Copy` would
+ // yield a type like `for<'a> &'a i32`. In general, we
+ // maintain the invariant that we never manipulate bound
+ // regions, so we have to process these bound regions somehow.
+ //
+ // The strategy is to:
+ //
+ // 1. Instantiate those regions to placeholder regions (e.g.,
+ // `for<'a> &'a i32` becomes `&0 i32`.
+ // 2. Produce something like `&'0 i32 : Copy`
+ // 3. Re-bind the regions back to `for<'a> &'a i32 : Copy`
+
+ types
+ .as_ref()
+ .skip_binder() // binder moved -\
+ .iter()
+ .flat_map(|ty| {
+ let ty: ty::Binder<'tcx, Ty<'tcx>> = types.rebind(*ty); // <----/
+
+ let placeholder_ty = self.infcx.replace_bound_vars_with_placeholders(ty);
+ let Normalized { value: normalized_ty, mut obligations } =
+ ensure_sufficient_stack(|| {
+ project::normalize_with_depth(
+ self,
+ param_env,
+ cause.clone(),
+ recursion_depth,
+ placeholder_ty,
+ )
+ });
+ let placeholder_obligation = predicate_for_trait_def(
+ self.tcx(),
+ param_env,
+ cause.clone(),
+ trait_def_id,
+ recursion_depth,
+ normalized_ty,
+ &[],
+ );
+ obligations.push(placeholder_obligation);
+ obligations
+ })
+ .collect()
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Matching
+ //
+ // Matching is a common path used for both evaluation and
+ // confirmation. It basically unifies types that appear in impls
+ // and traits. This does affect the surrounding environment;
+ // therefore, when used during evaluation, match routines must be
+ // run inside of a `probe()` so that their side-effects are
+ // contained.
+
+ fn rematch_impl(
+ &mut self,
+ impl_def_id: DefId,
+ obligation: &TraitObligation<'tcx>,
+ ) -> Normalized<'tcx, SubstsRef<'tcx>> {
+ let impl_trait_ref = self.tcx().bound_impl_trait_ref(impl_def_id).unwrap();
+ match self.match_impl(impl_def_id, impl_trait_ref, obligation) {
+ Ok(substs) => substs,
+ Err(()) => {
+ self.infcx.tcx.sess.delay_span_bug(
+ obligation.cause.span,
+ &format!(
+ "Impl {:?} was matchable against {:?} but now is not",
+ impl_def_id, obligation
+ ),
+ );
+ let value = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id);
+ let err = self.tcx().ty_error();
+ let value = value.fold_with(&mut BottomUpFolder {
+ tcx: self.tcx(),
+ ty_op: |_| err,
+ lt_op: |l| l,
+ ct_op: |c| c,
+ });
+ Normalized { value, obligations: vec![] }
+ }
+ }
+ }
+
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn match_impl(
+ &mut self,
+ impl_def_id: DefId,
+ impl_trait_ref: EarlyBinder<ty::TraitRef<'tcx>>,
+ obligation: &TraitObligation<'tcx>,
+ ) -> Result<Normalized<'tcx, SubstsRef<'tcx>>, ()> {
+ let placeholder_obligation =
+ self.infcx().replace_bound_vars_with_placeholders(obligation.predicate);
+ let placeholder_obligation_trait_ref = placeholder_obligation.trait_ref;
+
+ let impl_substs = self.infcx.fresh_substs_for_item(obligation.cause.span, impl_def_id);
+
+ let impl_trait_ref = impl_trait_ref.subst(self.tcx(), impl_substs);
+
+ debug!(?impl_trait_ref);
+
+ let Normalized { value: impl_trait_ref, obligations: mut nested_obligations } =
+ ensure_sufficient_stack(|| {
+ project::normalize_with_depth(
+ self,
+ obligation.param_env,
+ obligation.cause.clone(),
+ obligation.recursion_depth + 1,
+ impl_trait_ref,
+ )
+ });
+
+ debug!(?impl_trait_ref, ?placeholder_obligation_trait_ref);
+
+ let cause = ObligationCause::new(
+ obligation.cause.span,
+ obligation.cause.body_id,
+ ObligationCauseCode::MatchImpl(obligation.cause.clone(), impl_def_id),
+ );
+
+ let InferOk { obligations, .. } = self
+ .infcx
+ .at(&cause, obligation.param_env)
+ .define_opaque_types(false)
+ .eq(placeholder_obligation_trait_ref, impl_trait_ref)
+ .map_err(|e| debug!("match_impl: failed eq_trait_refs due to `{}`", e))?;
+ nested_obligations.extend(obligations);
+
+ if !self.intercrate
+ && self.tcx().impl_polarity(impl_def_id) == ty::ImplPolarity::Reservation
+ {
+ debug!("match_impl: reservation impls only apply in intercrate mode");
+ return Err(());
+ }
+
+ debug!(?impl_substs, ?nested_obligations, "match_impl: success");
+ Ok(Normalized { value: impl_substs, obligations: nested_obligations })
+ }
+
+ fn fast_reject_trait_refs(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ impl_trait_ref: &ty::TraitRef<'tcx>,
+ ) -> bool {
+ // We can avoid creating type variables and doing the full
+ // substitution if we find that any of the input types, when
+ // simplified, do not match.
+ let drcx = DeepRejectCtxt { treat_obligation_params: TreatParams::AsPlaceholder };
+ iter::zip(obligation.predicate.skip_binder().trait_ref.substs, impl_trait_ref.substs)
+ .any(|(obl, imp)| !drcx.generic_args_may_unify(obl, imp))
+ }
+
+ /// Normalize `where_clause_trait_ref` and try to match it against
+ /// `obligation`. If successful, return any predicates that
+ /// result from the normalization.
+ fn match_where_clause_trait_ref(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ where_clause_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
+ self.match_poly_trait_ref(obligation, where_clause_trait_ref)
+ }
+
+ /// Returns `Ok` if `poly_trait_ref` being true implies that the
+ /// obligation is satisfied.
+ #[instrument(skip(self), level = "debug")]
+ fn match_poly_trait_ref(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Result<Vec<PredicateObligation<'tcx>>, ()> {
+ self.infcx
+ .at(&obligation.cause, obligation.param_env)
+ // We don't want predicates for opaque types to just match all other types,
+ // if there is an obligation on the opaque type, then that obligation must be met
+ // opaquely. Otherwise we'd match any obligation to the opaque type and then error
+ // out later.
+ .define_opaque_types(false)
+ .sup(obligation.predicate.to_poly_trait_ref(), poly_trait_ref)
+ .map(|InferOk { obligations, .. }| obligations)
+ .map_err(|_| ())
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Miscellany
+
+ fn match_fresh_trait_refs(
+ &self,
+ previous: ty::PolyTraitPredicate<'tcx>,
+ current: ty::PolyTraitPredicate<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> bool {
+ let mut matcher = ty::_match::Match::new(self.tcx(), param_env);
+ matcher.relate(previous, current).is_ok()
+ }
+
+ fn push_stack<'o>(
+ &mut self,
+ previous_stack: TraitObligationStackList<'o, 'tcx>,
+ obligation: &'o TraitObligation<'tcx>,
+ ) -> TraitObligationStack<'o, 'tcx> {
+ let fresh_trait_pred = obligation.predicate.fold_with(&mut self.freshener);
+
+ let dfn = previous_stack.cache.next_dfn();
+ let depth = previous_stack.depth() + 1;
+ TraitObligationStack {
+ obligation,
+ fresh_trait_pred,
+ reached_depth: Cell::new(depth),
+ previous: previous_stack,
+ dfn,
+ depth,
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn closure_trait_ref_unnormalized(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> ty::PolyTraitRef<'tcx> {
+ let closure_sig = substs.as_closure().sig();
+
+ debug!(?closure_sig);
+
+ // (1) Feels icky to skip the binder here, but OTOH we know
+ // that the self-type is an unboxed closure type and hence is
+ // in fact unparameterized (or at least does not reference any
+ // regions bound in the obligation). Still probably some
+ // refactoring could make this nicer.
+ closure_trait_ref_and_return_type(
+ self.tcx(),
+ obligation.predicate.def_id(),
+ obligation.predicate.skip_binder().self_ty(), // (1)
+ closure_sig,
+ util::TupleArgumentsFlag::No,
+ )
+ .map_bound(|(trait_ref, _)| trait_ref)
+ }
+
+ fn generator_trait_ref_unnormalized(
+ &mut self,
+ obligation: &TraitObligation<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> ty::PolyTraitRef<'tcx> {
+ let gen_sig = substs.as_generator().poly_sig();
+
+ // (1) Feels icky to skip the binder here, but OTOH we know
+ // that the self-type is an generator type and hence is
+ // in fact unparameterized (or at least does not reference any
+ // regions bound in the obligation). Still probably some
+ // refactoring could make this nicer.
+
+ super::util::generator_trait_ref_and_outputs(
+ self.tcx(),
+ obligation.predicate.def_id(),
+ obligation.predicate.skip_binder().self_ty(), // (1)
+ gen_sig,
+ )
+ .map_bound(|(trait_ref, ..)| trait_ref)
+ }
+
+ /// Returns the obligations that are implied by instantiating an
+ /// impl or trait. The obligations are substituted and fully
+ /// normalized. This is used when confirming an impl or default
+ /// impl.
+ #[tracing::instrument(level = "debug", skip(self, cause, param_env))]
+ fn impl_or_trait_obligations(
+ &mut self,
+ cause: &ObligationCause<'tcx>,
+ recursion_depth: usize,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId, // of impl or trait
+ substs: SubstsRef<'tcx>, // for impl or trait
+ parent_trait_pred: ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
+ ) -> Vec<PredicateObligation<'tcx>> {
+ let tcx = self.tcx();
+
+ // To allow for one-pass evaluation of the nested obligation,
+ // each predicate must be preceded by the obligations required
+ // to normalize it.
+ // for example, if we have:
+ // impl<U: Iterator<Item: Copy>, V: Iterator<Item = U>> Foo for V
+ // the impl will have the following predicates:
+ // <V as Iterator>::Item = U,
+ // U: Iterator, U: Sized,
+ // V: Iterator, V: Sized,
+ // <U as Iterator>::Item: Copy
+ // When we substitute, say, `V => IntoIter<u32>, U => $0`, the last
+ // obligation will normalize to `<$0 as Iterator>::Item = $1` and
+ // `$1: Copy`, so we must ensure the obligations are emitted in
+ // that order.
+ let predicates = tcx.bound_predicates_of(def_id);
+ debug!(?predicates);
+ assert_eq!(predicates.0.parent, None);
+ let mut obligations = Vec::with_capacity(predicates.0.predicates.len());
+ for (predicate, span) in predicates.0.predicates {
+ let span = *span;
+ let cause = cause.clone().derived_cause(parent_trait_pred, |derived| {
+ ImplDerivedObligation(Box::new(ImplDerivedObligationCause {
+ derived,
+ impl_def_id: def_id,
+ span,
+ }))
+ });
+ let predicate = normalize_with_depth_to(
+ self,
+ param_env,
+ cause.clone(),
+ recursion_depth,
+ predicates.rebind(*predicate).subst(tcx, substs),
+ &mut obligations,
+ );
+ obligations.push(Obligation { cause, recursion_depth, param_env, predicate });
+ }
+
+ obligations
+ }
+}
+
+impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> {
+ fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> {
+ TraitObligationStackList::with(self)
+ }
+
+ fn cache(&self) -> &'o ProvisionalEvaluationCache<'tcx> {
+ self.previous.cache
+ }
+
+ fn iter(&'o self) -> TraitObligationStackList<'o, 'tcx> {
+ self.list()
+ }
+
+ /// Indicates that attempting to evaluate this stack entry
+ /// required accessing something from the stack at depth `reached_depth`.
+ fn update_reached_depth(&self, reached_depth: usize) {
+ assert!(
+ self.depth >= reached_depth,
+ "invoked `update_reached_depth` with something under this stack: \
+ self.depth={} reached_depth={}",
+ self.depth,
+ reached_depth,
+ );
+ debug!(reached_depth, "update_reached_depth");
+ let mut p = self;
+ while reached_depth < p.depth {
+ debug!(?p.fresh_trait_pred, "update_reached_depth: marking as cycle participant");
+ p.reached_depth.set(p.reached_depth.get().min(reached_depth));
+ p = p.previous.head.unwrap();
+ }
+ }
+}
+
+/// The "provisional evaluation cache" is used to store intermediate cache results
+/// when solving auto traits. Auto traits are unusual in that they can support
+/// cycles. So, for example, a "proof tree" like this would be ok:
+///
+/// - `Foo<T>: Send` :-
+/// - `Bar<T>: Send` :-
+/// - `Foo<T>: Send` -- cycle, but ok
+/// - `Baz<T>: Send`
+///
+/// Here, to prove `Foo<T>: Send`, we have to prove `Bar<T>: Send` and
+/// `Baz<T>: Send`. Proving `Bar<T>: Send` in turn required `Foo<T>: Send`.
+/// For non-auto traits, this cycle would be an error, but for auto traits (because
+/// they are coinductive) it is considered ok.
+///
+/// However, there is a complication: at the point where we have
+/// "proven" `Bar<T>: Send`, we have in fact only proven it
+/// *provisionally*. In particular, we proved that `Bar<T>: Send`
+/// *under the assumption* that `Foo<T>: Send`. But what if we later
+/// find out this assumption is wrong? Specifically, we could
+/// encounter some kind of error proving `Baz<T>: Send`. In that case,
+/// `Bar<T>: Send` didn't turn out to be true.
+///
+/// In Issue #60010, we found a bug in rustc where it would cache
+/// these intermediate results. This was fixed in #60444 by disabling
+/// *all* caching for things involved in a cycle -- in our example,
+/// that would mean we don't cache that `Bar<T>: Send`. But this led
+/// to large slowdowns.
+///
+/// Specifically, imagine this scenario, where proving `Baz<T>: Send`
+/// first requires proving `Bar<T>: Send` (which is true:
+///
+/// - `Foo<T>: Send` :-
+/// - `Bar<T>: Send` :-
+/// - `Foo<T>: Send` -- cycle, but ok
+/// - `Baz<T>: Send`
+/// - `Bar<T>: Send` -- would be nice for this to be a cache hit!
+/// - `*const T: Send` -- but what if we later encounter an error?
+///
+/// The *provisional evaluation cache* resolves this issue. It stores
+/// cache results that we've proven but which were involved in a cycle
+/// in some way. We track the minimal stack depth (i.e., the
+/// farthest from the top of the stack) that we are dependent on.
+/// The idea is that the cache results within are all valid -- so long as
+/// none of the nodes in between the current node and the node at that minimum
+/// depth result in an error (in which case the cached results are just thrown away).
+///
+/// During evaluation, we consult this provisional cache and rely on
+/// it. Accessing a cached value is considered equivalent to accessing
+/// a result at `reached_depth`, so it marks the *current* solution as
+/// provisional as well. If an error is encountered, we toss out any
+/// provisional results added from the subtree that encountered the
+/// error. When we pop the node at `reached_depth` from the stack, we
+/// can commit all the things that remain in the provisional cache.
+struct ProvisionalEvaluationCache<'tcx> {
+ /// next "depth first number" to issue -- just a counter
+ dfn: Cell<usize>,
+
+ /// Map from cache key to the provisionally evaluated thing.
+ /// The cache entries contain the result but also the DFN in which they
+ /// were added. The DFN is used to clear out values on failure.
+ ///
+ /// Imagine we have a stack like:
+ ///
+ /// - `A B C` and we add a cache for the result of C (DFN 2)
+ /// - Then we have a stack `A B D` where `D` has DFN 3
+ /// - We try to solve D by evaluating E: `A B D E` (DFN 4)
+ /// - `E` generates various cache entries which have cyclic dependencies on `B`
+ /// - `A B D E F` and so forth
+ /// - the DFN of `F` for example would be 5
+ /// - then we determine that `E` is in error -- we will then clear
+ /// all cache values whose DFN is >= 4 -- in this case, that
+ /// means the cached value for `F`.
+ map: RefCell<FxHashMap<ty::PolyTraitPredicate<'tcx>, ProvisionalEvaluation>>,
+
+ /// The stack of args that we assume to be true because a `WF(arg)` predicate
+ /// is on the stack above (and because of wellformedness is coinductive).
+ /// In an "ideal" world, this would share a stack with trait predicates in
+ /// `TraitObligationStack`. However, trait predicates are *much* hotter than
+ /// `WellFormed` predicates, and it's very likely that the additional matches
+ /// will have a perf effect. The value here is the well-formed `GenericArg`
+ /// and the depth of the trait predicate *above* that well-formed predicate.
+ wf_args: RefCell<Vec<(ty::GenericArg<'tcx>, usize)>>,
+}
+
+/// A cache value for the provisional cache: contains the depth-first
+/// number (DFN) and result.
+#[derive(Copy, Clone, Debug)]
+struct ProvisionalEvaluation {
+ from_dfn: usize,
+ reached_depth: usize,
+ result: EvaluationResult,
+}
+
+impl<'tcx> Default for ProvisionalEvaluationCache<'tcx> {
+ fn default() -> Self {
+ Self { dfn: Cell::new(0), map: Default::default(), wf_args: Default::default() }
+ }
+}
+
+impl<'tcx> ProvisionalEvaluationCache<'tcx> {
+ /// Get the next DFN in sequence (basically a counter).
+ fn next_dfn(&self) -> usize {
+ let result = self.dfn.get();
+ self.dfn.set(result + 1);
+ result
+ }
+
+ /// Check the provisional cache for any result for
+ /// `fresh_trait_ref`. If there is a hit, then you must consider
+ /// it an access to the stack slots at depth
+ /// `reached_depth` (from the returned value).
+ fn get_provisional(
+ &self,
+ fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
+ ) -> Option<ProvisionalEvaluation> {
+ debug!(
+ ?fresh_trait_pred,
+ "get_provisional = {:#?}",
+ self.map.borrow().get(&fresh_trait_pred),
+ );
+ Some(*self.map.borrow().get(&fresh_trait_pred)?)
+ }
+
+ /// Insert a provisional result into the cache. The result came
+ /// from the node with the given DFN. It accessed a minimum depth
+ /// of `reached_depth` to compute. It evaluated `fresh_trait_pred`
+ /// and resulted in `result`.
+ fn insert_provisional(
+ &self,
+ from_dfn: usize,
+ reached_depth: usize,
+ fresh_trait_pred: ty::PolyTraitPredicate<'tcx>,
+ result: EvaluationResult,
+ ) {
+ debug!(?from_dfn, ?fresh_trait_pred, ?result, "insert_provisional");
+
+ let mut map = self.map.borrow_mut();
+
+ // Subtle: when we complete working on the DFN `from_dfn`, anything
+ // that remains in the provisional cache must be dependent on some older
+ // stack entry than `from_dfn`. We have to update their depth with our transitive
+ // depth in that case or else it would be referring to some popped note.
+ //
+ // Example:
+ // A (reached depth 0)
+ // ...
+ // B // depth 1 -- reached depth = 0
+ // C // depth 2 -- reached depth = 1 (should be 0)
+ // B
+ // A // depth 0
+ // D (reached depth 1)
+ // C (cache -- reached depth = 2)
+ for (_k, v) in &mut *map {
+ if v.from_dfn >= from_dfn {
+ v.reached_depth = reached_depth.min(v.reached_depth);
+ }
+ }
+
+ map.insert(fresh_trait_pred, ProvisionalEvaluation { from_dfn, reached_depth, result });
+ }
+
+ /// Invoked when the node with dfn `dfn` does not get a successful
+ /// result. This will clear out any provisional cache entries
+ /// that were added since `dfn` was created. This is because the
+ /// provisional entries are things which must assume that the
+ /// things on the stack at the time of their creation succeeded --
+ /// since the failing node is presently at the top of the stack,
+ /// these provisional entries must either depend on it or some
+ /// ancestor of it.
+ fn on_failure(&self, dfn: usize) {
+ debug!(?dfn, "on_failure");
+ self.map.borrow_mut().retain(|key, eval| {
+ if !eval.from_dfn >= dfn {
+ debug!("on_failure: removing {:?}", key);
+ false
+ } else {
+ true
+ }
+ });
+ }
+
+ /// Invoked when the node at depth `depth` completed without
+ /// depending on anything higher in the stack (if that completion
+ /// was a failure, then `on_failure` should have been invoked
+ /// already).
+ ///
+ /// Note that we may still have provisional cache items remaining
+ /// in the cache when this is done. For example, if there is a
+ /// cycle:
+ ///
+ /// * A depends on...
+ /// * B depends on A
+ /// * C depends on...
+ /// * D depends on C
+ /// * ...
+ ///
+ /// Then as we complete the C node we will have a provisional cache
+ /// with results for A, B, C, and D. This method would clear out
+ /// the C and D results, but leave A and B provisional.
+ ///
+ /// This is determined based on the DFN: we remove any provisional
+ /// results created since `dfn` started (e.g., in our example, dfn
+ /// would be 2, representing the C node, and hence we would
+ /// remove the result for D, which has DFN 3, but not the results for
+ /// A and B, which have DFNs 0 and 1 respectively).
+ ///
+ /// Note that we *do not* attempt to cache these cycle participants
+ /// in the evaluation cache. Doing so would require carefully computing
+ /// the correct `DepNode` to store in the cache entry:
+ /// cycle participants may implicitly depend on query results
+ /// related to other participants in the cycle, due to our logic
+ /// which examines the evaluation stack.
+ ///
+ /// We used to try to perform this caching,
+ /// but it lead to multiple incremental compilation ICEs
+ /// (see #92987 and #96319), and was very hard to understand.
+ /// Fortunately, removing the caching didn't seem to
+ /// have a performance impact in practice.
+ fn on_completion(&self, dfn: usize) {
+ debug!(?dfn, "on_completion");
+
+ for (fresh_trait_pred, eval) in
+ self.map.borrow_mut().drain_filter(|_k, eval| eval.from_dfn >= dfn)
+ {
+ debug!(?fresh_trait_pred, ?eval, "on_completion");
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+struct TraitObligationStackList<'o, 'tcx> {
+ cache: &'o ProvisionalEvaluationCache<'tcx>,
+ head: Option<&'o TraitObligationStack<'o, 'tcx>>,
+}
+
+impl<'o, 'tcx> TraitObligationStackList<'o, 'tcx> {
+ fn empty(cache: &'o ProvisionalEvaluationCache<'tcx>) -> TraitObligationStackList<'o, 'tcx> {
+ TraitObligationStackList { cache, head: None }
+ }
+
+ fn with(r: &'o TraitObligationStack<'o, 'tcx>) -> TraitObligationStackList<'o, 'tcx> {
+ TraitObligationStackList { cache: r.cache(), head: Some(r) }
+ }
+
+ fn head(&self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
+ self.head
+ }
+
+ fn depth(&self) -> usize {
+ if let Some(head) = self.head { head.depth } else { 0 }
+ }
+}
+
+impl<'o, 'tcx> Iterator for TraitObligationStackList<'o, 'tcx> {
+ type Item = &'o TraitObligationStack<'o, 'tcx>;
+
+ fn next(&mut self) -> Option<&'o TraitObligationStack<'o, 'tcx>> {
+ let o = self.head?;
+ *self = o.previous;
+ Some(o)
+ }
+}
+
+impl<'o, 'tcx> fmt::Debug for TraitObligationStack<'o, 'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "TraitObligationStack({:?})", self.obligation)
+ }
+}
+
+pub enum ProjectionMatchesProjection {
+ Yes,
+ Ambiguous,
+ No,
+}
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
new file mode 100644
index 000000000..6223c5ea3
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs
@@ -0,0 +1,531 @@
+//! Logic and data structures related to impl specialization, explained in
+//! greater detail below.
+//!
+//! At the moment, this implementation support only the simple "chain" rule:
+//! If any two impls overlap, one must be a strict subset of the other.
+//!
+//! See the [rustc dev guide] for a bit more detail on how specialization
+//! fits together with the rest of the trait machinery.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/specialization.html
+
+pub mod specialization_graph;
+use specialization_graph::GraphExt;
+
+use crate::infer::{InferCtxt, InferOk, TyCtxtInferExt};
+use crate::traits::select::IntercrateAmbiguityCause;
+use crate::traits::{
+ self, coherence, FutureCompatOverlapErrorKind, ObligationCause, TraitEngine, TraitEngineExt,
+};
+use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
+use rustc_errors::{struct_span_err, EmissionGuarantee, LintDiagnosticBuilder};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
+use rustc_middle::ty::{self, ImplSubject, TyCtxt};
+use rustc_session::lint::builtin::COHERENCE_LEAK_CHECK;
+use rustc_session::lint::builtin::ORDER_DEPENDENT_TRAIT_OBJECTS;
+use rustc_span::{Span, DUMMY_SP};
+
+use super::SelectionContext;
+use super::{util, FulfillmentContext};
+
+/// Information pertinent to an overlapping impl error.
+#[derive(Debug)]
+pub struct OverlapError {
+ pub with_impl: DefId,
+ pub trait_desc: String,
+ pub self_desc: Option<String>,
+ pub intercrate_ambiguity_causes: FxIndexSet<IntercrateAmbiguityCause>,
+ pub involves_placeholder: bool,
+}
+
+/// Given a subst for the requested impl, translate it to a subst
+/// appropriate for the actual item definition (whether it be in that impl,
+/// a parent impl, or the trait).
+///
+/// When we have selected one impl, but are actually using item definitions from
+/// a parent impl providing a default, we need a way to translate between the
+/// type parameters of the two impls. Here the `source_impl` is the one we've
+/// selected, and `source_substs` is a substitution of its generics.
+/// And `target_node` is the impl/trait we're actually going to get the
+/// definition from. The resulting substitution will map from `target_node`'s
+/// generics to `source_impl`'s generics as instantiated by `source_subst`.
+///
+/// For example, consider the following scenario:
+///
+/// ```ignore (illustrative)
+/// trait Foo { ... }
+/// impl<T, U> Foo for (T, U) { ... } // target impl
+/// impl<V> Foo for (V, V) { ... } // source impl
+/// ```
+///
+/// Suppose we have selected "source impl" with `V` instantiated with `u32`.
+/// This function will produce a substitution with `T` and `U` both mapping to `u32`.
+///
+/// where-clauses add some trickiness here, because they can be used to "define"
+/// an argument indirectly:
+///
+/// ```ignore (illustrative)
+/// impl<'a, I, T: 'a> Iterator for Cloned<I>
+/// where I: Iterator<Item = &'a T>, T: Clone
+/// ```
+///
+/// In a case like this, the substitution for `T` is determined indirectly,
+/// through associated type projection. We deal with such cases by using
+/// *fulfillment* to relate the two impls, requiring that all projections are
+/// resolved.
+pub fn translate_substs<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ source_impl: DefId,
+ source_substs: SubstsRef<'tcx>,
+ target_node: specialization_graph::Node,
+) -> SubstsRef<'tcx> {
+ debug!(
+ "translate_substs({:?}, {:?}, {:?}, {:?})",
+ param_env, source_impl, source_substs, target_node
+ );
+ let source_trait_ref =
+ infcx.tcx.bound_impl_trait_ref(source_impl).unwrap().subst(infcx.tcx, &source_substs);
+
+ // translate the Self and Param parts of the substitution, since those
+ // vary across impls
+ let target_substs = match target_node {
+ specialization_graph::Node::Impl(target_impl) => {
+ // no need to translate if we're targeting the impl we started with
+ if source_impl == target_impl {
+ return source_substs;
+ }
+
+ fulfill_implication(infcx, param_env, source_trait_ref, target_impl).unwrap_or_else(
+ |_| {
+ bug!(
+ "When translating substitutions for specialization, the expected \
+ specialization failed to hold"
+ )
+ },
+ )
+ }
+ specialization_graph::Node::Trait(..) => source_trait_ref.substs,
+ };
+
+ // directly inherent the method generics, since those do not vary across impls
+ source_substs.rebase_onto(infcx.tcx, source_impl, target_substs)
+}
+
+/// Is `impl1` a specialization of `impl2`?
+///
+/// Specialization is determined by the sets of types to which the impls apply;
+/// `impl1` specializes `impl2` if it applies to a subset of the types `impl2` applies
+/// to.
+#[instrument(skip(tcx), level = "debug")]
+pub(super) fn specializes(tcx: TyCtxt<'_>, (impl1_def_id, impl2_def_id): (DefId, DefId)) -> bool {
+ // The feature gate should prevent introducing new specializations, but not
+ // taking advantage of upstream ones.
+ let features = tcx.features();
+ let specialization_enabled = features.specialization || features.min_specialization;
+ if !specialization_enabled && (impl1_def_id.is_local() || impl2_def_id.is_local()) {
+ return false;
+ }
+
+ // We determine whether there's a subset relationship by:
+ //
+ // - replacing bound vars with placeholders in impl1,
+ // - assuming the where clauses for impl1,
+ // - instantiating impl2 with fresh inference variables,
+ // - unifying,
+ // - attempting to prove the where clauses for impl2
+ //
+ // The last three steps are encapsulated in `fulfill_implication`.
+ //
+ // See RFC 1210 for more details and justification.
+
+ // Currently we do not allow e.g., a negative impl to specialize a positive one
+ if tcx.impl_polarity(impl1_def_id) != tcx.impl_polarity(impl2_def_id) {
+ return false;
+ }
+
+ // create a parameter environment corresponding to a (placeholder) instantiation of impl1
+ let penv = tcx.param_env(impl1_def_id);
+ let impl1_trait_ref = tcx.impl_trait_ref(impl1_def_id).unwrap();
+
+ // Create an infcx, taking the predicates of impl1 as assumptions:
+ tcx.infer_ctxt().enter(|infcx| {
+ let impl1_trait_ref = match traits::fully_normalize(
+ &infcx,
+ FulfillmentContext::new(),
+ ObligationCause::dummy(),
+ penv,
+ impl1_trait_ref,
+ ) {
+ Ok(impl1_trait_ref) => impl1_trait_ref,
+ Err(_errors) => {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(impl1_def_id),
+ format!("failed to fully normalize {impl1_trait_ref}"),
+ );
+ impl1_trait_ref
+ }
+ };
+
+ // Attempt to prove that impl2 applies, given all of the above.
+ fulfill_implication(&infcx, penv, impl1_trait_ref, impl2_def_id).is_ok()
+ })
+}
+
+/// Attempt to fulfill all obligations of `target_impl` after unification with
+/// `source_trait_ref`. If successful, returns a substitution for *all* the
+/// generics of `target_impl`, including both those needed to unify with
+/// `source_trait_ref` and those whose identity is determined via a where
+/// clause in the impl.
+fn fulfill_implication<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ source_trait_ref: ty::TraitRef<'tcx>,
+ target_impl: DefId,
+) -> Result<SubstsRef<'tcx>, ()> {
+ debug!(
+ "fulfill_implication({:?}, trait_ref={:?} |- {:?} applies)",
+ param_env, source_trait_ref, target_impl
+ );
+
+ let source_trait = ImplSubject::Trait(source_trait_ref);
+
+ let selcx = &mut SelectionContext::new(&infcx);
+ let target_substs = infcx.fresh_substs_for_item(DUMMY_SP, target_impl);
+ let (target_trait, obligations) =
+ util::impl_subject_and_oblig(selcx, param_env, target_impl, target_substs);
+
+ // do the impls unify? If not, no specialization.
+ let Ok(InferOk { obligations: more_obligations, .. }) =
+ infcx.at(&ObligationCause::dummy(), param_env).eq(source_trait, target_trait)
+ else {
+ debug!(
+ "fulfill_implication: {:?} does not unify with {:?}",
+ source_trait, target_trait
+ );
+ return Err(());
+ };
+
+ // attempt to prove all of the predicates for impl2 given those for impl1
+ // (which are packed up in penv)
+
+ infcx.save_and_restore_in_snapshot_flag(|infcx| {
+ let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+ for oblig in obligations.chain(more_obligations) {
+ fulfill_cx.register_predicate_obligation(&infcx, oblig);
+ }
+ match fulfill_cx.select_all_or_error(infcx).as_slice() {
+ [] => {
+ debug!(
+ "fulfill_implication: an impl for {:?} specializes {:?}",
+ source_trait, target_trait
+ );
+
+ // Now resolve the *substitution* we built for the target earlier, replacing
+ // the inference variables inside with whatever we got from fulfillment.
+ Ok(infcx.resolve_vars_if_possible(target_substs))
+ }
+ errors => {
+ // no dice!
+ debug!(
+ "fulfill_implication: for impls on {:?} and {:?}, \
+ could not fulfill: {:?} given {:?}",
+ source_trait,
+ target_trait,
+ errors,
+ param_env.caller_bounds()
+ );
+ Err(())
+ }
+ }
+ })
+}
+
+// Query provider for `specialization_graph_of`.
+pub(super) fn specialization_graph_provider(
+ tcx: TyCtxt<'_>,
+ trait_id: DefId,
+) -> specialization_graph::Graph {
+ let mut sg = specialization_graph::Graph::new();
+ let overlap_mode = specialization_graph::OverlapMode::get(tcx, trait_id);
+
+ let mut trait_impls: Vec<_> = tcx.all_impls(trait_id).collect();
+
+ // The coherence checking implementation seems to rely on impls being
+ // iterated over (roughly) in definition order, so we are sorting by
+ // negated `CrateNum` (so remote definitions are visited first) and then
+ // by a flattened version of the `DefIndex`.
+ trait_impls
+ .sort_unstable_by_key(|def_id| (-(def_id.krate.as_u32() as i64), def_id.index.index()));
+
+ for impl_def_id in trait_impls {
+ if let Some(impl_def_id) = impl_def_id.as_local() {
+ // This is where impl overlap checking happens:
+ let insert_result = sg.insert(tcx, impl_def_id.to_def_id(), overlap_mode);
+ // Report error if there was one.
+ let (overlap, used_to_be_allowed) = match insert_result {
+ Err(overlap) => (Some(overlap), None),
+ Ok(Some(overlap)) => (Some(overlap.error), Some(overlap.kind)),
+ Ok(None) => (None, None),
+ };
+
+ if let Some(overlap) = overlap {
+ report_overlap_conflict(tcx, overlap, impl_def_id, used_to_be_allowed, &mut sg);
+ }
+ } else {
+ let parent = tcx.impl_parent(impl_def_id).unwrap_or(trait_id);
+ sg.record_impl_from_cstore(tcx, parent, impl_def_id)
+ }
+ }
+
+ sg
+}
+
+// This function is only used when
+// encountering errors and inlining
+// it negatively impacts perf.
+#[cold]
+#[inline(never)]
+fn report_overlap_conflict(
+ tcx: TyCtxt<'_>,
+ overlap: OverlapError,
+ impl_def_id: LocalDefId,
+ used_to_be_allowed: Option<FutureCompatOverlapErrorKind>,
+ sg: &mut specialization_graph::Graph,
+) {
+ let impl_polarity = tcx.impl_polarity(impl_def_id.to_def_id());
+ let other_polarity = tcx.impl_polarity(overlap.with_impl);
+ match (impl_polarity, other_polarity) {
+ (ty::ImplPolarity::Negative, ty::ImplPolarity::Positive) => {
+ report_negative_positive_conflict(
+ tcx,
+ &overlap,
+ impl_def_id,
+ impl_def_id.to_def_id(),
+ overlap.with_impl,
+ sg,
+ );
+ }
+
+ (ty::ImplPolarity::Positive, ty::ImplPolarity::Negative) => {
+ report_negative_positive_conflict(
+ tcx,
+ &overlap,
+ impl_def_id,
+ overlap.with_impl,
+ impl_def_id.to_def_id(),
+ sg,
+ );
+ }
+
+ _ => {
+ report_conflicting_impls(tcx, overlap, impl_def_id, used_to_be_allowed, sg);
+ }
+ }
+}
+
+fn report_negative_positive_conflict(
+ tcx: TyCtxt<'_>,
+ overlap: &OverlapError,
+ local_impl_def_id: LocalDefId,
+ negative_impl_def_id: DefId,
+ positive_impl_def_id: DefId,
+ sg: &mut specialization_graph::Graph,
+) {
+ let impl_span = tcx.def_span(local_impl_def_id);
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0751,
+ "found both positive and negative implementation of trait `{}`{}:",
+ overlap.trait_desc,
+ overlap.self_desc.clone().map_or_else(String::new, |ty| format!(" for type `{}`", ty))
+ );
+
+ match tcx.span_of_impl(negative_impl_def_id) {
+ Ok(span) => {
+ err.span_label(span, "negative implementation here");
+ }
+ Err(cname) => {
+ err.note(&format!("negative implementation in crate `{}`", cname));
+ }
+ }
+
+ match tcx.span_of_impl(positive_impl_def_id) {
+ Ok(span) => {
+ err.span_label(span, "positive implementation here");
+ }
+ Err(cname) => {
+ err.note(&format!("positive implementation in crate `{}`", cname));
+ }
+ }
+
+ sg.has_errored = Some(err.emit());
+}
+
+fn report_conflicting_impls(
+ tcx: TyCtxt<'_>,
+ overlap: OverlapError,
+ impl_def_id: LocalDefId,
+ used_to_be_allowed: Option<FutureCompatOverlapErrorKind>,
+ sg: &mut specialization_graph::Graph,
+) {
+ let impl_span = tcx.def_span(impl_def_id);
+
+ // Work to be done after we've built the DiagnosticBuilder. We have to define it
+ // now because the struct_lint methods don't return back the DiagnosticBuilder
+ // that's passed in.
+ fn decorate<G: EmissionGuarantee>(
+ tcx: TyCtxt<'_>,
+ overlap: OverlapError,
+ used_to_be_allowed: Option<FutureCompatOverlapErrorKind>,
+ impl_span: Span,
+ err: LintDiagnosticBuilder<'_, G>,
+ ) -> G {
+ let msg = format!(
+ "conflicting implementations of trait `{}`{}{}",
+ overlap.trait_desc,
+ overlap
+ .self_desc
+ .clone()
+ .map_or_else(String::new, |ty| { format!(" for type `{}`", ty) }),
+ match used_to_be_allowed {
+ Some(FutureCompatOverlapErrorKind::Issue33140) => ": (E0119)",
+ _ => "",
+ }
+ );
+ let mut err = err.build(&msg);
+ match tcx.span_of_impl(overlap.with_impl) {
+ Ok(span) => {
+ err.span_label(span, "first implementation here");
+
+ err.span_label(
+ impl_span,
+ format!(
+ "conflicting implementation{}",
+ overlap.self_desc.map_or_else(String::new, |ty| format!(" for `{}`", ty))
+ ),
+ );
+ }
+ Err(cname) => {
+ let msg = match to_pretty_impl_header(tcx, overlap.with_impl) {
+ Some(s) => format!("conflicting implementation in crate `{}`:\n- {}", cname, s),
+ None => format!("conflicting implementation in crate `{}`", cname),
+ };
+ err.note(&msg);
+ }
+ }
+
+ for cause in &overlap.intercrate_ambiguity_causes {
+ cause.add_intercrate_ambiguity_hint(&mut err);
+ }
+
+ if overlap.involves_placeholder {
+ coherence::add_placeholder_note(&mut err);
+ }
+ err.emit()
+ }
+
+ match used_to_be_allowed {
+ None => {
+ let reported = if overlap.with_impl.is_local()
+ || tcx.orphan_check_impl(impl_def_id).is_ok()
+ {
+ let err = struct_span_err!(tcx.sess, impl_span, E0119, "");
+ Some(decorate(
+ tcx,
+ overlap,
+ used_to_be_allowed,
+ impl_span,
+ LintDiagnosticBuilder::new(err),
+ ))
+ } else {
+ Some(tcx.sess.delay_span_bug(impl_span, "impl should have failed the orphan check"))
+ };
+ sg.has_errored = reported;
+ }
+ Some(kind) => {
+ let lint = match kind {
+ FutureCompatOverlapErrorKind::Issue33140 => ORDER_DEPENDENT_TRAIT_OBJECTS,
+ FutureCompatOverlapErrorKind::LeakCheck => COHERENCE_LEAK_CHECK,
+ };
+ tcx.struct_span_lint_hir(
+ lint,
+ tcx.hir().local_def_id_to_hir_id(impl_def_id),
+ impl_span,
+ |ldb| {
+ decorate(tcx, overlap, used_to_be_allowed, impl_span, ldb);
+ },
+ );
+ }
+ };
+}
+
+/// Recovers the "impl X for Y" signature from `impl_def_id` and returns it as a
+/// string.
+pub(crate) fn to_pretty_impl_header(tcx: TyCtxt<'_>, impl_def_id: DefId) -> Option<String> {
+ use std::fmt::Write;
+
+ let trait_ref = tcx.impl_trait_ref(impl_def_id)?;
+ let mut w = "impl".to_owned();
+
+ let substs = InternalSubsts::identity_for_item(tcx, impl_def_id);
+
+ // FIXME: Currently only handles ?Sized.
+ // Needs to support ?Move and ?DynSized when they are implemented.
+ let mut types_without_default_bounds = FxHashSet::default();
+ let sized_trait = tcx.lang_items().sized_trait();
+
+ if !substs.is_empty() {
+ types_without_default_bounds.extend(substs.types());
+ w.push('<');
+ w.push_str(
+ &substs
+ .iter()
+ .map(|k| k.to_string())
+ .filter(|k| k != "'_")
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+ w.push('>');
+ }
+
+ write!(w, " {} for {}", trait_ref.print_only_trait_path(), tcx.type_of(impl_def_id)).unwrap();
+
+ // The predicates will contain default bounds like `T: Sized`. We need to
+ // remove these bounds, and add `T: ?Sized` to any untouched type parameters.
+ let predicates = tcx.predicates_of(impl_def_id).predicates;
+ let mut pretty_predicates =
+ Vec::with_capacity(predicates.len() + types_without_default_bounds.len());
+
+ for (mut p, _) in predicates {
+ if let Some(poly_trait_ref) = p.to_opt_poly_trait_pred() {
+ if Some(poly_trait_ref.def_id()) == sized_trait {
+ types_without_default_bounds.remove(&poly_trait_ref.self_ty().skip_binder());
+ continue;
+ }
+
+ if ty::BoundConstness::ConstIfConst == poly_trait_ref.skip_binder().constness {
+ let new_trait_pred = poly_trait_ref.map_bound(|mut trait_pred| {
+ trait_pred.constness = ty::BoundConstness::NotConst;
+ trait_pred
+ });
+
+ p = tcx.mk_predicate(new_trait_pred.map_bound(ty::PredicateKind::Trait))
+ }
+ }
+ pretty_predicates.push(p.to_string());
+ }
+
+ pretty_predicates
+ .extend(types_without_default_bounds.iter().map(|ty| format!("{}: ?Sized", ty)));
+
+ if !pretty_predicates.is_empty() {
+ write!(w, "\n where {}", pretty_predicates.join(", ")).unwrap();
+ }
+
+ w.push(';');
+ Some(w)
+}
diff --git a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs
new file mode 100644
index 000000000..fcb73b43f
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs
@@ -0,0 +1,395 @@
+use super::OverlapError;
+
+use crate::traits;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::fast_reject::{self, SimplifiedType, TreatParams};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
+
+pub use rustc_middle::traits::specialization_graph::*;
+
+#[derive(Copy, Clone, Debug)]
+pub enum FutureCompatOverlapErrorKind {
+ Issue33140,
+ LeakCheck,
+}
+
+#[derive(Debug)]
+pub struct FutureCompatOverlapError {
+ pub error: OverlapError,
+ pub kind: FutureCompatOverlapErrorKind,
+}
+
+/// The result of attempting to insert an impl into a group of children.
+enum Inserted {
+ /// The impl was inserted as a new child in this group of children.
+ BecameNewSibling(Option<FutureCompatOverlapError>),
+
+ /// The impl should replace existing impls [X1, ..], because the impl specializes X1, X2, etc.
+ ReplaceChildren(Vec<DefId>),
+
+ /// The impl is a specialization of an existing child.
+ ShouldRecurseOn(DefId),
+}
+
+trait ChildrenExt<'tcx> {
+ fn insert_blindly(&mut self, tcx: TyCtxt<'tcx>, impl_def_id: DefId);
+ fn remove_existing(&mut self, tcx: TyCtxt<'tcx>, impl_def_id: DefId);
+
+ fn insert(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ impl_def_id: DefId,
+ simplified_self: Option<SimplifiedType>,
+ overlap_mode: OverlapMode,
+ ) -> Result<Inserted, OverlapError>;
+}
+
+impl ChildrenExt<'_> for Children {
+ /// Insert an impl into this set of children without comparing to any existing impls.
+ fn insert_blindly(&mut self, tcx: TyCtxt<'_>, impl_def_id: DefId) {
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+ if let Some(st) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), TreatParams::AsInfer)
+ {
+ debug!("insert_blindly: impl_def_id={:?} st={:?}", impl_def_id, st);
+ self.non_blanket_impls.entry(st).or_default().push(impl_def_id)
+ } else {
+ debug!("insert_blindly: impl_def_id={:?} st=None", impl_def_id);
+ self.blanket_impls.push(impl_def_id)
+ }
+ }
+
+ /// Removes an impl from this set of children. Used when replacing
+ /// an impl with a parent. The impl must be present in the list of
+ /// children already.
+ fn remove_existing(&mut self, tcx: TyCtxt<'_>, impl_def_id: DefId) {
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+ let vec: &mut Vec<DefId>;
+ if let Some(st) = fast_reject::simplify_type(tcx, trait_ref.self_ty(), TreatParams::AsInfer)
+ {
+ debug!("remove_existing: impl_def_id={:?} st={:?}", impl_def_id, st);
+ vec = self.non_blanket_impls.get_mut(&st).unwrap();
+ } else {
+ debug!("remove_existing: impl_def_id={:?} st=None", impl_def_id);
+ vec = &mut self.blanket_impls;
+ }
+
+ let index = vec.iter().position(|d| *d == impl_def_id).unwrap();
+ vec.remove(index);
+ }
+
+ /// Attempt to insert an impl into this set of children, while comparing for
+ /// specialization relationships.
+ fn insert(
+ &mut self,
+ tcx: TyCtxt<'_>,
+ impl_def_id: DefId,
+ simplified_self: Option<SimplifiedType>,
+ overlap_mode: OverlapMode,
+ ) -> Result<Inserted, OverlapError> {
+ let mut last_lint = None;
+ let mut replace_children = Vec::new();
+
+ debug!("insert(impl_def_id={:?}, simplified_self={:?})", impl_def_id, simplified_self,);
+
+ let possible_siblings = match simplified_self {
+ Some(st) => PotentialSiblings::Filtered(filtered_children(self, st)),
+ None => PotentialSiblings::Unfiltered(iter_children(self)),
+ };
+
+ for possible_sibling in possible_siblings {
+ debug!(
+ "insert: impl_def_id={:?}, simplified_self={:?}, possible_sibling={:?}",
+ impl_def_id, simplified_self, possible_sibling,
+ );
+
+ let create_overlap_error = |overlap: traits::coherence::OverlapResult<'_>| {
+ let trait_ref = overlap.impl_header.trait_ref.unwrap();
+ let self_ty = trait_ref.self_ty();
+
+ // FIXME: should postpone string formatting until we decide to actually emit.
+ with_no_trimmed_paths!({
+ OverlapError {
+ with_impl: possible_sibling,
+ trait_desc: trait_ref.print_only_trait_path().to_string(),
+ // Only report the `Self` type if it has at least
+ // some outer concrete shell; otherwise, it's
+ // not adding much information.
+ self_desc: if self_ty.has_concrete_skeleton() {
+ Some(self_ty.to_string())
+ } else {
+ None
+ },
+ intercrate_ambiguity_causes: overlap.intercrate_ambiguity_causes,
+ involves_placeholder: overlap.involves_placeholder,
+ }
+ })
+ };
+
+ let report_overlap_error = |overlap: traits::coherence::OverlapResult<'_>,
+ last_lint: &mut _| {
+ // Found overlap, but no specialization; error out or report future-compat warning.
+
+ // Do we *still* get overlap if we disable the future-incompatible modes?
+ let should_err = traits::overlapping_impls(
+ tcx,
+ possible_sibling,
+ impl_def_id,
+ traits::SkipLeakCheck::default(),
+ overlap_mode,
+ |_| true,
+ || false,
+ );
+
+ let error = create_overlap_error(overlap);
+
+ if should_err {
+ Err(error)
+ } else {
+ *last_lint = Some(FutureCompatOverlapError {
+ error,
+ kind: FutureCompatOverlapErrorKind::LeakCheck,
+ });
+
+ Ok((false, false))
+ }
+ };
+
+ let last_lint_mut = &mut last_lint;
+ let (le, ge) = traits::overlapping_impls(
+ tcx,
+ possible_sibling,
+ impl_def_id,
+ traits::SkipLeakCheck::Yes,
+ overlap_mode,
+ |overlap| {
+ if let Some(overlap_kind) =
+ tcx.impls_are_allowed_to_overlap(impl_def_id, possible_sibling)
+ {
+ match overlap_kind {
+ ty::ImplOverlapKind::Permitted { marker: _ } => {}
+ ty::ImplOverlapKind::Issue33140 => {
+ *last_lint_mut = Some(FutureCompatOverlapError {
+ error: create_overlap_error(overlap),
+ kind: FutureCompatOverlapErrorKind::Issue33140,
+ });
+ }
+ }
+
+ return Ok((false, false));
+ }
+
+ let le = tcx.specializes((impl_def_id, possible_sibling));
+ let ge = tcx.specializes((possible_sibling, impl_def_id));
+
+ if le == ge {
+ report_overlap_error(overlap, last_lint_mut)
+ } else {
+ Ok((le, ge))
+ }
+ },
+ || Ok((false, false)),
+ )?;
+
+ if le && !ge {
+ debug!(
+ "descending as child of TraitRef {:?}",
+ tcx.impl_trait_ref(possible_sibling).unwrap()
+ );
+
+ // The impl specializes `possible_sibling`.
+ return Ok(Inserted::ShouldRecurseOn(possible_sibling));
+ } else if ge && !le {
+ debug!(
+ "placing as parent of TraitRef {:?}",
+ tcx.impl_trait_ref(possible_sibling).unwrap()
+ );
+
+ replace_children.push(possible_sibling);
+ } else {
+ // Either there's no overlap, or the overlap was already reported by
+ // `overlap_error`.
+ }
+ }
+
+ if !replace_children.is_empty() {
+ return Ok(Inserted::ReplaceChildren(replace_children));
+ }
+
+ // No overlap with any potential siblings, so add as a new sibling.
+ debug!("placing as new sibling");
+ self.insert_blindly(tcx, impl_def_id);
+ Ok(Inserted::BecameNewSibling(last_lint))
+ }
+}
+
+fn iter_children(children: &mut Children) -> impl Iterator<Item = DefId> + '_ {
+ let nonblanket = children.non_blanket_impls.iter().flat_map(|(_, v)| v.iter());
+ children.blanket_impls.iter().chain(nonblanket).cloned()
+}
+
+fn filtered_children(
+ children: &mut Children,
+ st: SimplifiedType,
+) -> impl Iterator<Item = DefId> + '_ {
+ let nonblanket = children.non_blanket_impls.entry(st).or_default().iter();
+ children.blanket_impls.iter().chain(nonblanket).cloned()
+}
+
+// A custom iterator used by Children::insert
+enum PotentialSiblings<I, J>
+where
+ I: Iterator<Item = DefId>,
+ J: Iterator<Item = DefId>,
+{
+ Unfiltered(I),
+ Filtered(J),
+}
+
+impl<I, J> Iterator for PotentialSiblings<I, J>
+where
+ I: Iterator<Item = DefId>,
+ J: Iterator<Item = DefId>,
+{
+ type Item = DefId;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ match *self {
+ PotentialSiblings::Unfiltered(ref mut iter) => iter.next(),
+ PotentialSiblings::Filtered(ref mut iter) => iter.next(),
+ }
+ }
+}
+
+pub trait GraphExt {
+ /// Insert a local impl into the specialization graph. If an existing impl
+ /// conflicts with it (has overlap, but neither specializes the other),
+ /// information about the area of overlap is returned in the `Err`.
+ fn insert(
+ &mut self,
+ tcx: TyCtxt<'_>,
+ impl_def_id: DefId,
+ overlap_mode: OverlapMode,
+ ) -> Result<Option<FutureCompatOverlapError>, OverlapError>;
+
+ /// Insert cached metadata mapping from a child impl back to its parent.
+ fn record_impl_from_cstore(&mut self, tcx: TyCtxt<'_>, parent: DefId, child: DefId);
+}
+
+impl GraphExt for Graph {
+ /// Insert a local impl into the specialization graph. If an existing impl
+ /// conflicts with it (has overlap, but neither specializes the other),
+ /// information about the area of overlap is returned in the `Err`.
+ fn insert(
+ &mut self,
+ tcx: TyCtxt<'_>,
+ impl_def_id: DefId,
+ overlap_mode: OverlapMode,
+ ) -> Result<Option<FutureCompatOverlapError>, OverlapError> {
+ assert!(impl_def_id.is_local());
+
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+ let trait_def_id = trait_ref.def_id;
+
+ debug!(
+ "insert({:?}): inserting TraitRef {:?} into specialization graph",
+ impl_def_id, trait_ref
+ );
+
+ // If the reference itself contains an earlier error (e.g., due to a
+ // resolution failure), then we just insert the impl at the top level of
+ // the graph and claim that there's no overlap (in order to suppress
+ // bogus errors).
+ if trait_ref.references_error() {
+ debug!(
+ "insert: inserting dummy node for erroneous TraitRef {:?}, \
+ impl_def_id={:?}, trait_def_id={:?}",
+ trait_ref, impl_def_id, trait_def_id
+ );
+
+ self.parent.insert(impl_def_id, trait_def_id);
+ self.children.entry(trait_def_id).or_default().insert_blindly(tcx, impl_def_id);
+ return Ok(None);
+ }
+
+ let mut parent = trait_def_id;
+ let mut last_lint = None;
+ let simplified = fast_reject::simplify_type(tcx, trait_ref.self_ty(), TreatParams::AsInfer);
+
+ // Descend the specialization tree, where `parent` is the current parent node.
+ loop {
+ use self::Inserted::*;
+
+ let insert_result = self.children.entry(parent).or_default().insert(
+ tcx,
+ impl_def_id,
+ simplified,
+ overlap_mode,
+ )?;
+
+ match insert_result {
+ BecameNewSibling(opt_lint) => {
+ last_lint = opt_lint;
+ break;
+ }
+ ReplaceChildren(grand_children_to_be) => {
+ // We currently have
+ //
+ // P
+ // |
+ // G
+ //
+ // and we are inserting the impl N. We want to make it:
+ //
+ // P
+ // |
+ // N
+ // |
+ // G
+
+ // Adjust P's list of children: remove G and then add N.
+ {
+ let siblings = self.children.get_mut(&parent).unwrap();
+ for &grand_child_to_be in &grand_children_to_be {
+ siblings.remove_existing(tcx, grand_child_to_be);
+ }
+ siblings.insert_blindly(tcx, impl_def_id);
+ }
+
+ // Set G's parent to N and N's parent to P.
+ for &grand_child_to_be in &grand_children_to_be {
+ self.parent.insert(grand_child_to_be, impl_def_id);
+ }
+ self.parent.insert(impl_def_id, parent);
+
+ // Add G as N's child.
+ for &grand_child_to_be in &grand_children_to_be {
+ self.children
+ .entry(impl_def_id)
+ .or_default()
+ .insert_blindly(tcx, grand_child_to_be);
+ }
+ break;
+ }
+ ShouldRecurseOn(new_parent) => {
+ parent = new_parent;
+ }
+ }
+ }
+
+ self.parent.insert(impl_def_id, parent);
+ Ok(last_lint)
+ }
+
+ /// Insert cached metadata mapping from a child impl back to its parent.
+ fn record_impl_from_cstore(&mut self, tcx: TyCtxt<'_>, parent: DefId, child: DefId) {
+ if self.parent.insert(child, parent).is_some() {
+ bug!(
+ "When recording an impl from the crate store, information about its parent \
+ was already present."
+ );
+ }
+
+ self.children.entry(parent).or_default().insert_blindly(tcx, child);
+ }
+}
diff --git a/compiler/rustc_trait_selection/src/traits/structural_match.rs b/compiler/rustc_trait_selection/src/traits/structural_match.rs
new file mode 100644
index 000000000..5829a0f92
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/structural_match.rs
@@ -0,0 +1,273 @@
+use crate::infer::{InferCtxt, TyCtxtInferExt};
+use crate::traits::ObligationCause;
+use crate::traits::{TraitEngine, TraitEngineExt};
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use rustc_span::Span;
+use std::ops::ControlFlow;
+
+/// This method traverses the structure of `ty`, trying to find an
+/// instance of an ADT (i.e. struct or enum) that doesn't implement
+/// the structural-match traits, or a generic type parameter
+/// (which cannot be determined to be structural-match).
+///
+/// The "structure of a type" includes all components that would be
+/// considered when doing a pattern match on a constant of that
+/// type.
+///
+/// * This means this method descends into fields of structs/enums,
+/// and also descends into the inner type `T` of `&T` and `&mut T`
+///
+/// * The traversal doesn't dereference unsafe pointers (`*const T`,
+/// `*mut T`), and it does not visit the type arguments of an
+/// instantiated generic like `PhantomData<T>`.
+///
+/// The reason we do this search is Rust currently require all ADTs
+/// reachable from a constant's type to implement the
+/// structural-match traits, which essentially say that
+/// the implementation of `PartialEq::eq` behaves *equivalently* to a
+/// comparison against the unfolded structure.
+///
+/// For more background on why Rust has this requirement, and issues
+/// that arose when the requirement was not enforced completely, see
+/// Rust RFC 1445, rust-lang/rust#61188, and rust-lang/rust#62307.
+pub fn search_for_structural_match_violation<'tcx>(
+ span: Span,
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+) -> Option<Ty<'tcx>> {
+ ty.visit_with(&mut Search { tcx, span, seen: FxHashSet::default(), adt_const_param: false })
+ .break_value()
+}
+
+/// This method traverses the structure of `ty`, trying to find any
+/// types that are not allowed to be used in a const generic.
+///
+/// This is either because the type does not implement `StructuralEq`
+/// and `StructuralPartialEq`, or because the type is intentionally
+/// not supported in const generics (such as floats and raw pointers,
+/// which are allowed in match blocks).
+pub fn search_for_adt_const_param_violation<'tcx>(
+ span: Span,
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+) -> Option<Ty<'tcx>> {
+ ty.visit_with(&mut Search { tcx, span, seen: FxHashSet::default(), adt_const_param: true })
+ .break_value()
+}
+
+/// This method returns true if and only if `adt_ty` itself has been marked as
+/// eligible for structural-match: namely, if it implements both
+/// `StructuralPartialEq` and `StructuralEq` (which are respectively injected by
+/// `#[derive(PartialEq)]` and `#[derive(Eq)]`).
+///
+/// Note that this does *not* recursively check if the substructure of `adt_ty`
+/// implements the traits.
+fn type_marked_structural<'tcx>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ adt_ty: Ty<'tcx>,
+ cause: ObligationCause<'tcx>,
+) -> bool {
+ let mut fulfillment_cx = <dyn TraitEngine<'tcx>>::new(infcx.tcx);
+ // require `#[derive(PartialEq)]`
+ let structural_peq_def_id =
+ infcx.tcx.require_lang_item(LangItem::StructuralPeq, Some(cause.span));
+ fulfillment_cx.register_bound(
+ infcx,
+ ty::ParamEnv::empty(),
+ adt_ty,
+ structural_peq_def_id,
+ cause.clone(),
+ );
+ // for now, require `#[derive(Eq)]`. (Doing so is a hack to work around
+ // the type `for<'a> fn(&'a ())` failing to implement `Eq` itself.)
+ let structural_teq_def_id =
+ infcx.tcx.require_lang_item(LangItem::StructuralTeq, Some(cause.span));
+ fulfillment_cx.register_bound(
+ infcx,
+ ty::ParamEnv::empty(),
+ adt_ty,
+ structural_teq_def_id,
+ cause,
+ );
+
+ // We deliberately skip *reporting* fulfillment errors (via
+ // `report_fulfillment_errors`), for two reasons:
+ //
+ // 1. The error messages would mention `std::marker::StructuralPartialEq`
+ // (a trait which is solely meant as an implementation detail
+ // for now), and
+ //
+ // 2. We are sometimes doing future-incompatibility lints for
+ // now, so we do not want unconditional errors here.
+ fulfillment_cx.select_all_or_error(infcx).is_empty()
+}
+
+/// This implements the traversal over the structure of a given type to try to
+/// find instances of ADTs (specifically structs or enums) that do not implement
+/// the structural-match traits (`StructuralPartialEq` and `StructuralEq`).
+struct Search<'tcx> {
+ span: Span,
+
+ tcx: TyCtxt<'tcx>,
+
+ /// Tracks ADTs previously encountered during search, so that
+ /// we will not recur on them again.
+ seen: FxHashSet<hir::def_id::DefId>,
+
+ // Additionally deny things that have been allowed in patterns,
+ // but are not allowed in adt const params, such as floats and
+ // fn ptrs.
+ adt_const_param: bool,
+}
+
+impl<'tcx> Search<'tcx> {
+ fn type_marked_structural(&self, adt_ty: Ty<'tcx>) -> bool {
+ adt_ty.is_structural_eq_shallow(self.tcx)
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for Search<'tcx> {
+ type BreakTy = Ty<'tcx>;
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!("Search visiting ty: {:?}", ty);
+
+ let (adt_def, substs) = match *ty.kind() {
+ ty::Adt(adt_def, substs) => (adt_def, substs),
+ ty::Param(_) => {
+ return ControlFlow::Break(ty);
+ }
+ ty::Dynamic(..) => {
+ return ControlFlow::Break(ty);
+ }
+ ty::Foreign(_) => {
+ return ControlFlow::Break(ty);
+ }
+ ty::Opaque(..) => {
+ return ControlFlow::Break(ty);
+ }
+ ty::Projection(..) => {
+ return ControlFlow::Break(ty);
+ }
+ ty::Closure(..) => {
+ return ControlFlow::Break(ty);
+ }
+ ty::Generator(..) | ty::GeneratorWitness(..) => {
+ return ControlFlow::Break(ty);
+ }
+ ty::FnDef(..) => {
+ // Types of formals and return in `fn(_) -> _` are also irrelevant;
+ // so we do not recur into them via `super_visit_with`
+ return ControlFlow::CONTINUE;
+ }
+ ty::Array(_, n)
+ if { n.try_eval_usize(self.tcx, ty::ParamEnv::reveal_all()) == Some(0) } =>
+ {
+ // rust-lang/rust#62336: ignore type of contents
+ // for empty array.
+ return ControlFlow::CONTINUE;
+ }
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Str | ty::Never => {
+ // These primitive types are always structural match.
+ //
+ // `Never` is kind of special here, but as it is not inhabitable, this should be fine.
+ return ControlFlow::CONTINUE;
+ }
+
+ ty::FnPtr(..) => {
+ if !self.adt_const_param {
+ return ControlFlow::CONTINUE;
+ } else {
+ return ControlFlow::Break(ty);
+ }
+ }
+
+ ty::RawPtr(..) => {
+ if !self.adt_const_param {
+ // structural-match ignores substructure of
+ // `*const _`/`*mut _`, so skip `super_visit_with`.
+ //
+ // For example, if you have:
+ // ```
+ // struct NonStructural;
+ // #[derive(PartialEq, Eq)]
+ // struct T(*const NonStructural);
+ // const C: T = T(std::ptr::null());
+ // ```
+ //
+ // Even though `NonStructural` does not implement `PartialEq`,
+ // structural equality on `T` does not recur into the raw
+ // pointer. Therefore, one can still use `C` in a pattern.
+ return ControlFlow::CONTINUE;
+ } else {
+ return ControlFlow::Break(ty);
+ }
+ }
+
+ ty::Float(_) => {
+ if !self.adt_const_param {
+ return ControlFlow::CONTINUE;
+ } else {
+ return ControlFlow::Break(ty);
+ }
+ }
+
+ ty::Array(..) | ty::Slice(_) | ty::Ref(..) | ty::Tuple(..) => {
+ // First check all contained types and then tell the caller to continue searching.
+ return ty.super_visit_with(self);
+ }
+ ty::Infer(_) | ty::Placeholder(_) | ty::Bound(..) => {
+ bug!("unexpected type during structural-match checking: {:?}", ty);
+ }
+ ty::Error(_) => {
+ self.tcx.sess.delay_span_bug(self.span, "ty::Error in structural-match check");
+ // We still want to check other types after encountering an error,
+ // as this may still emit relevant errors.
+ return ControlFlow::CONTINUE;
+ }
+ };
+
+ if !self.seen.insert(adt_def.did()) {
+ debug!("Search already seen adt_def: {:?}", adt_def);
+ return ControlFlow::CONTINUE;
+ }
+
+ if !self.type_marked_structural(ty) {
+ debug!("Search found ty: {:?}", ty);
+ return ControlFlow::Break(ty);
+ }
+
+ // structural-match does not care about the
+ // instantiation of the generics in an ADT (it
+ // instead looks directly at its fields outside
+ // this match), so we skip super_visit_with.
+ //
+ // (Must not recur on substs for `PhantomData<T>` cf
+ // rust-lang/rust#55028 and rust-lang/rust#55837; but also
+ // want to skip substs when only uses of generic are
+ // behind unsafe pointers `*const T`/`*mut T`.)
+
+ // even though we skip super_visit_with, we must recur on
+ // fields of ADT.
+ let tcx = self.tcx;
+ adt_def.all_fields().map(|field| field.ty(tcx, substs)).try_for_each(|field_ty| {
+ let ty = self.tcx.normalize_erasing_regions(ty::ParamEnv::empty(), field_ty);
+ debug!("structural-match ADT: field_ty={:?}, ty={:?}", field_ty, ty);
+ ty.visit_with(self)
+ })
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.has_structural_eq_impls = |tcx, ty| {
+ tcx.infer_ctxt().enter(|infcx| {
+ let cause = ObligationCause::dummy();
+ type_marked_structural(&infcx, ty, cause)
+ })
+ };
+}
diff --git a/compiler/rustc_trait_selection/src/traits/util.rs b/compiler/rustc_trait_selection/src/traits/util.rs
new file mode 100644
index 000000000..d25006016
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/util.rs
@@ -0,0 +1,368 @@
+use rustc_errors::Diagnostic;
+use rustc_span::Span;
+use smallvec::smallvec;
+use smallvec::SmallVec;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::subst::{GenericArg, Subst, SubstsRef};
+use rustc_middle::ty::{self, ImplSubject, ToPredicate, Ty, TyCtxt, TypeVisitable};
+
+use super::{Normalized, Obligation, ObligationCause, PredicateObligation, SelectionContext};
+pub use rustc_infer::traits::{self, util::*};
+
+use std::iter;
+
+///////////////////////////////////////////////////////////////////////////
+// `TraitAliasExpander` iterator
+///////////////////////////////////////////////////////////////////////////
+
+/// "Trait alias expansion" is the process of expanding a sequence of trait
+/// references into another sequence by transitively following all trait
+/// aliases. e.g. If you have bounds like `Foo + Send`, a trait alias
+/// `trait Foo = Bar + Sync;`, and another trait alias
+/// `trait Bar = Read + Write`, then the bounds would expand to
+/// `Read + Write + Sync + Send`.
+/// Expansion is done via a DFS (depth-first search), and the `visited` field
+/// is used to avoid cycles.
+pub struct TraitAliasExpander<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ stack: Vec<TraitAliasExpansionInfo<'tcx>>,
+}
+
+/// Stores information about the expansion of a trait via a path of zero or more trait aliases.
+#[derive(Debug, Clone)]
+pub struct TraitAliasExpansionInfo<'tcx> {
+ pub path: SmallVec<[(ty::PolyTraitRef<'tcx>, Span); 4]>,
+}
+
+impl<'tcx> TraitAliasExpansionInfo<'tcx> {
+ fn new(trait_ref: ty::PolyTraitRef<'tcx>, span: Span) -> Self {
+ Self { path: smallvec![(trait_ref, span)] }
+ }
+
+ /// Adds diagnostic labels to `diag` for the expansion path of a trait through all intermediate
+ /// trait aliases.
+ pub fn label_with_exp_info(&self, diag: &mut Diagnostic, top_label: &str, use_desc: &str) {
+ diag.span_label(self.top().1, top_label);
+ if self.path.len() > 1 {
+ for (_, sp) in self.path.iter().rev().skip(1).take(self.path.len() - 2) {
+ diag.span_label(*sp, format!("referenced here ({})", use_desc));
+ }
+ }
+ if self.top().1 != self.bottom().1 {
+ // When the trait object is in a return type these two spans match, we don't want
+ // redundant labels.
+ diag.span_label(
+ self.bottom().1,
+ format!("trait alias used in trait object type ({})", use_desc),
+ );
+ }
+ }
+
+ pub fn trait_ref(&self) -> ty::PolyTraitRef<'tcx> {
+ self.top().0
+ }
+
+ pub fn top(&self) -> &(ty::PolyTraitRef<'tcx>, Span) {
+ self.path.last().unwrap()
+ }
+
+ pub fn bottom(&self) -> &(ty::PolyTraitRef<'tcx>, Span) {
+ self.path.first().unwrap()
+ }
+
+ fn clone_and_push(&self, trait_ref: ty::PolyTraitRef<'tcx>, span: Span) -> Self {
+ let mut path = self.path.clone();
+ path.push((trait_ref, span));
+
+ Self { path }
+ }
+}
+
+pub fn expand_trait_aliases<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_refs: impl Iterator<Item = (ty::PolyTraitRef<'tcx>, Span)>,
+) -> TraitAliasExpander<'tcx> {
+ let items: Vec<_> =
+ trait_refs.map(|(trait_ref, span)| TraitAliasExpansionInfo::new(trait_ref, span)).collect();
+ TraitAliasExpander { tcx, stack: items }
+}
+
+impl<'tcx> TraitAliasExpander<'tcx> {
+ /// If `item` is a trait alias and its predicate has not yet been visited, then expands `item`
+ /// to the definition, pushes the resulting expansion onto `self.stack`, and returns `false`.
+ /// Otherwise, immediately returns `true` if `item` is a regular trait, or `false` if it is a
+ /// trait alias.
+ /// The return value indicates whether `item` should be yielded to the user.
+ fn expand(&mut self, item: &TraitAliasExpansionInfo<'tcx>) -> bool {
+ let tcx = self.tcx;
+ let trait_ref = item.trait_ref();
+ let pred = trait_ref.without_const().to_predicate(tcx);
+
+ debug!("expand_trait_aliases: trait_ref={:?}", trait_ref);
+
+ // Don't recurse if this bound is not a trait alias.
+ let is_alias = tcx.is_trait_alias(trait_ref.def_id());
+ if !is_alias {
+ return true;
+ }
+
+ // Don't recurse if this trait alias is already on the stack for the DFS search.
+ let anon_pred = anonymize_predicate(tcx, pred);
+ if item.path.iter().rev().skip(1).any(|&(tr, _)| {
+ anonymize_predicate(tcx, tr.without_const().to_predicate(tcx)) == anon_pred
+ }) {
+ return false;
+ }
+
+ // Get components of trait alias.
+ let predicates = tcx.super_predicates_of(trait_ref.def_id());
+ debug!(?predicates);
+
+ let items = predicates.predicates.iter().rev().filter_map(|(pred, span)| {
+ pred.subst_supertrait(tcx, &trait_ref)
+ .to_opt_poly_trait_pred()
+ .map(|trait_ref| item.clone_and_push(trait_ref.map_bound(|t| t.trait_ref), *span))
+ });
+ debug!("expand_trait_aliases: items={:?}", items.clone().collect::<Vec<_>>());
+
+ self.stack.extend(items);
+
+ false
+ }
+}
+
+impl<'tcx> Iterator for TraitAliasExpander<'tcx> {
+ type Item = TraitAliasExpansionInfo<'tcx>;
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.stack.len(), None)
+ }
+
+ fn next(&mut self) -> Option<TraitAliasExpansionInfo<'tcx>> {
+ while let Some(item) = self.stack.pop() {
+ if self.expand(&item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Iterator over def-IDs of supertraits
+///////////////////////////////////////////////////////////////////////////
+
+pub struct SupertraitDefIds<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ stack: Vec<DefId>,
+ visited: FxHashSet<DefId>,
+}
+
+pub fn supertrait_def_ids(tcx: TyCtxt<'_>, trait_def_id: DefId) -> SupertraitDefIds<'_> {
+ SupertraitDefIds {
+ tcx,
+ stack: vec![trait_def_id],
+ visited: Some(trait_def_id).into_iter().collect(),
+ }
+}
+
+impl Iterator for SupertraitDefIds<'_> {
+ type Item = DefId;
+
+ fn next(&mut self) -> Option<DefId> {
+ let def_id = self.stack.pop()?;
+ let predicates = self.tcx.super_predicates_of(def_id);
+ let visited = &mut self.visited;
+ self.stack.extend(
+ predicates
+ .predicates
+ .iter()
+ .filter_map(|(pred, _)| pred.to_opt_poly_trait_pred())
+ .map(|trait_ref| trait_ref.def_id())
+ .filter(|&super_def_id| visited.insert(super_def_id)),
+ );
+ Some(def_id)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Other
+///////////////////////////////////////////////////////////////////////////
+
+/// Instantiate all bound parameters of the impl subject with the given substs,
+/// returning the resulting subject and all obligations that arise.
+/// The obligations are closed under normalization.
+pub fn impl_subject_and_oblig<'a, 'tcx>(
+ selcx: &mut SelectionContext<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ impl_def_id: DefId,
+ impl_substs: SubstsRef<'tcx>,
+) -> (ImplSubject<'tcx>, impl Iterator<Item = PredicateObligation<'tcx>>) {
+ let subject = selcx.tcx().bound_impl_subject(impl_def_id);
+ let subject = subject.subst(selcx.tcx(), impl_substs);
+ let Normalized { value: subject, obligations: normalization_obligations1 } =
+ super::normalize(selcx, param_env, ObligationCause::dummy(), subject);
+
+ let predicates = selcx.tcx().predicates_of(impl_def_id);
+ let predicates = predicates.instantiate(selcx.tcx(), impl_substs);
+ let Normalized { value: predicates, obligations: normalization_obligations2 } =
+ super::normalize(selcx, param_env, ObligationCause::dummy(), predicates);
+ let impl_obligations =
+ predicates_for_generics(ObligationCause::dummy(), 0, param_env, predicates);
+
+ let impl_obligations = impl_obligations
+ .chain(normalization_obligations1.into_iter())
+ .chain(normalization_obligations2.into_iter());
+
+ (subject, impl_obligations)
+}
+
+pub fn predicates_for_generics<'tcx>(
+ cause: ObligationCause<'tcx>,
+ recursion_depth: usize,
+ param_env: ty::ParamEnv<'tcx>,
+ generic_bounds: ty::InstantiatedPredicates<'tcx>,
+) -> impl Iterator<Item = PredicateObligation<'tcx>> {
+ debug!("predicates_for_generics(generic_bounds={:?})", generic_bounds);
+
+ iter::zip(generic_bounds.predicates, generic_bounds.spans).map(move |(predicate, span)| {
+ let cause = match *cause.code() {
+ traits::ItemObligation(def_id) if !span.is_dummy() => traits::ObligationCause::new(
+ cause.span,
+ cause.body_id,
+ traits::BindingObligation(def_id, span),
+ ),
+ _ => cause.clone(),
+ };
+ Obligation { cause, recursion_depth, param_env, predicate }
+ })
+}
+
+pub fn predicate_for_trait_ref<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ recursion_depth: usize,
+) -> PredicateObligation<'tcx> {
+ Obligation {
+ cause,
+ param_env,
+ recursion_depth,
+ predicate: ty::Binder::dummy(trait_ref).without_const().to_predicate(tcx),
+ }
+}
+
+pub fn predicate_for_trait_def<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ cause: ObligationCause<'tcx>,
+ trait_def_id: DefId,
+ recursion_depth: usize,
+ self_ty: Ty<'tcx>,
+ params: &[GenericArg<'tcx>],
+) -> PredicateObligation<'tcx> {
+ let trait_ref =
+ ty::TraitRef { def_id: trait_def_id, substs: tcx.mk_substs_trait(self_ty, params) };
+ predicate_for_trait_ref(tcx, cause, param_env, trait_ref, recursion_depth)
+}
+
+/// Casts a trait reference into a reference to one of its super
+/// traits; returns `None` if `target_trait_def_id` is not a
+/// supertrait.
+pub fn upcast_choices<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ source_trait_ref: ty::PolyTraitRef<'tcx>,
+ target_trait_def_id: DefId,
+) -> Vec<ty::PolyTraitRef<'tcx>> {
+ if source_trait_ref.def_id() == target_trait_def_id {
+ return vec![source_trait_ref]; // Shortcut the most common case.
+ }
+
+ supertraits(tcx, source_trait_ref).filter(|r| r.def_id() == target_trait_def_id).collect()
+}
+
+/// Given a trait `trait_ref`, returns the number of vtable entries
+/// that come from `trait_ref`, excluding its supertraits. Used in
+/// computing the vtable base for an upcast trait of a trait object.
+pub fn count_own_vtable_entries<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+) -> usize {
+ let existential_trait_ref =
+ trait_ref.map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
+ let existential_trait_ref = tcx.erase_regions(existential_trait_ref);
+ tcx.own_existential_vtable_entries(existential_trait_ref).len()
+}
+
+/// Given an upcast trait object described by `object`, returns the
+/// index of the method `method_def_id` (which should be part of
+/// `object.upcast_trait_ref`) within the vtable for `object`.
+pub fn get_vtable_index_of_object_method<'tcx, N>(
+ tcx: TyCtxt<'tcx>,
+ object: &super::ImplSourceObjectData<'tcx, N>,
+ method_def_id: DefId,
+) -> Option<usize> {
+ let existential_trait_ref = object
+ .upcast_trait_ref
+ .map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
+ let existential_trait_ref = tcx.erase_regions(existential_trait_ref);
+
+ // Count number of methods preceding the one we are selecting and
+ // add them to the total offset.
+ if let Some(index) = tcx
+ .own_existential_vtable_entries(existential_trait_ref)
+ .iter()
+ .copied()
+ .position(|def_id| def_id == method_def_id)
+ {
+ Some(object.vtable_base + index)
+ } else {
+ None
+ }
+}
+
+pub fn closure_trait_ref_and_return_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ fn_trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ sig: ty::PolyFnSig<'tcx>,
+ tuple_arguments: TupleArgumentsFlag,
+) -> ty::Binder<'tcx, (ty::TraitRef<'tcx>, Ty<'tcx>)> {
+ let arguments_tuple = match tuple_arguments {
+ TupleArgumentsFlag::No => sig.skip_binder().inputs()[0],
+ TupleArgumentsFlag::Yes => tcx.intern_tup(sig.skip_binder().inputs()),
+ };
+ debug_assert!(!self_ty.has_escaping_bound_vars());
+ let trait_ref = ty::TraitRef {
+ def_id: fn_trait_def_id,
+ substs: tcx.mk_substs_trait(self_ty, &[arguments_tuple.into()]),
+ };
+ sig.map_bound(|sig| (trait_ref, sig.output()))
+}
+
+pub fn generator_trait_ref_and_outputs<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ fn_trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ sig: ty::PolyGenSig<'tcx>,
+) -> ty::Binder<'tcx, (ty::TraitRef<'tcx>, Ty<'tcx>, Ty<'tcx>)> {
+ debug_assert!(!self_ty.has_escaping_bound_vars());
+ let trait_ref = ty::TraitRef {
+ def_id: fn_trait_def_id,
+ substs: tcx.mk_substs_trait(self_ty, &[sig.skip_binder().resume_ty.into()]),
+ };
+ sig.map_bound(|sig| (trait_ref, sig.yield_ty, sig.return_ty))
+}
+
+pub fn impl_item_is_final(tcx: TyCtxt<'_>, assoc_item: &ty::AssocItem) -> bool {
+ assoc_item.defaultness(tcx).is_final()
+ && tcx.impl_defaultness(assoc_item.container_id(tcx)).is_final()
+}
+
+pub enum TupleArgumentsFlag {
+ Yes,
+ No,
+}
diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs
new file mode 100644
index 000000000..414857f0a
--- /dev/null
+++ b/compiler/rustc_trait_selection/src/traits/wf.rs
@@ -0,0 +1,888 @@
+use crate::infer::InferCtxt;
+use crate::traits;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt, TypeVisitable};
+use rustc_span::Span;
+
+use std::iter;
+/// Returns the set of obligations needed to make `arg` well-formed.
+/// If `arg` contains unresolved inference variables, this may include
+/// further WF obligations. However, if `arg` IS an unresolved
+/// inference variable, returns `None`, because we are not able to
+/// make any progress at all. This is to prevent "livelock" where we
+/// say "$0 is WF if $0 is WF".
+pub fn obligations<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ recursion_depth: usize,
+ arg: GenericArg<'tcx>,
+ span: Span,
+) -> Option<Vec<traits::PredicateObligation<'tcx>>> {
+ // Handle the "livelock" case (see comment above) by bailing out if necessary.
+ let arg = match arg.unpack() {
+ GenericArgKind::Type(ty) => {
+ match ty.kind() {
+ ty::Infer(ty::TyVar(_)) => {
+ let resolved_ty = infcx.shallow_resolve(ty);
+ if resolved_ty == ty {
+ // No progress, bail out to prevent "livelock".
+ return None;
+ }
+
+ resolved_ty
+ }
+ _ => ty,
+ }
+ .into()
+ }
+ GenericArgKind::Const(ct) => {
+ match ct.kind() {
+ ty::ConstKind::Infer(infer) => {
+ let resolved = infcx.shallow_resolve(infer);
+ if resolved == infer {
+ // No progress.
+ return None;
+ }
+
+ infcx
+ .tcx
+ .mk_const(ty::ConstS { kind: ty::ConstKind::Infer(resolved), ty: ct.ty() })
+ }
+ _ => ct,
+ }
+ .into()
+ }
+ // There is nothing we have to do for lifetimes.
+ GenericArgKind::Lifetime(..) => return Some(Vec::new()),
+ };
+
+ let mut wf = WfPredicates {
+ tcx: infcx.tcx,
+ param_env,
+ body_id,
+ span,
+ out: vec![],
+ recursion_depth,
+ item: None,
+ };
+ wf.compute(arg);
+ debug!("wf::obligations({:?}, body_id={:?}) = {:?}", arg, body_id, wf.out);
+
+ let result = wf.normalize(infcx);
+ debug!("wf::obligations({:?}, body_id={:?}) ~~> {:?}", arg, body_id, result);
+ Some(result)
+}
+
+/// Returns the obligations that make this trait reference
+/// well-formed. For example, if there is a trait `Set` defined like
+/// `trait Set<K:Eq>`, then the trait reference `Foo: Set<Bar>` is WF
+/// if `Bar: Eq`.
+pub fn trait_obligations<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ trait_pred: &ty::TraitPredicate<'tcx>,
+ span: Span,
+ item: &'tcx hir::Item<'tcx>,
+) -> Vec<traits::PredicateObligation<'tcx>> {
+ let mut wf = WfPredicates {
+ tcx: infcx.tcx,
+ param_env,
+ body_id,
+ span,
+ out: vec![],
+ recursion_depth: 0,
+ item: Some(item),
+ };
+ wf.compute_trait_pred(trait_pred, Elaborate::All);
+ debug!(obligations = ?wf.out);
+ wf.normalize(infcx)
+}
+
+pub fn predicate_obligations<'a, 'tcx>(
+ infcx: &InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ predicate: ty::Predicate<'tcx>,
+ span: Span,
+) -> Vec<traits::PredicateObligation<'tcx>> {
+ let mut wf = WfPredicates {
+ tcx: infcx.tcx,
+ param_env,
+ body_id,
+ span,
+ out: vec![],
+ recursion_depth: 0,
+ item: None,
+ };
+
+ // It's ok to skip the binder here because wf code is prepared for it
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(t) => {
+ wf.compute_trait_pred(&t, Elaborate::None);
+ }
+ ty::PredicateKind::RegionOutlives(..) => {}
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty, _reg)) => {
+ wf.compute(ty.into());
+ }
+ ty::PredicateKind::Projection(t) => {
+ wf.compute_projection(t.projection_ty);
+ wf.compute(match t.term {
+ ty::Term::Ty(ty) => ty.into(),
+ ty::Term::Const(c) => c.into(),
+ })
+ }
+ ty::PredicateKind::WellFormed(arg) => {
+ wf.compute(arg);
+ }
+ ty::PredicateKind::ObjectSafe(_) => {}
+ ty::PredicateKind::ClosureKind(..) => {}
+ ty::PredicateKind::Subtype(ty::SubtypePredicate { a, b, a_is_expected: _ }) => {
+ wf.compute(a.into());
+ wf.compute(b.into());
+ }
+ ty::PredicateKind::Coerce(ty::CoercePredicate { a, b }) => {
+ wf.compute(a.into());
+ wf.compute(b.into());
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ let obligations = wf.nominal_obligations(uv.def.did, uv.substs);
+ wf.out.extend(obligations);
+
+ for arg in uv.substs.iter() {
+ wf.compute(arg);
+ }
+ }
+ ty::PredicateKind::ConstEquate(c1, c2) => {
+ wf.compute(c1.into());
+ wf.compute(c2.into());
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(..) => {
+ bug!("TypeWellFormedFromEnv is only used for Chalk")
+ }
+ }
+
+ wf.normalize(infcx)
+}
+
+struct WfPredicates<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ span: Span,
+ out: Vec<traits::PredicateObligation<'tcx>>,
+ recursion_depth: usize,
+ item: Option<&'tcx hir::Item<'tcx>>,
+}
+
+/// Controls whether we "elaborate" supertraits and so forth on the WF
+/// predicates. This is a kind of hack to address #43784. The
+/// underlying problem in that issue was a trait structure like:
+///
+/// ```ignore (illustrative)
+/// trait Foo: Copy { }
+/// trait Bar: Foo { }
+/// impl<T: Bar> Foo for T { }
+/// impl<T> Bar for T { }
+/// ```
+///
+/// Here, in the `Foo` impl, we will check that `T: Copy` holds -- but
+/// we decide that this is true because `T: Bar` is in the
+/// where-clauses (and we can elaborate that to include `T:
+/// Copy`). This wouldn't be a problem, except that when we check the
+/// `Bar` impl, we decide that `T: Foo` must hold because of the `Foo`
+/// impl. And so nowhere did we check that `T: Copy` holds!
+///
+/// To resolve this, we elaborate the WF requirements that must be
+/// proven when checking impls. This means that (e.g.) the `impl Bar
+/// for T` will be forced to prove not only that `T: Foo` but also `T:
+/// Copy` (which it won't be able to do, because there is no `Copy`
+/// impl for `T`).
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+enum Elaborate {
+ All,
+ None,
+}
+
+fn extend_cause_with_original_assoc_item_obligation<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: &ty::TraitRef<'tcx>,
+ item: Option<&hir::Item<'tcx>>,
+ cause: &mut traits::ObligationCause<'tcx>,
+ pred: ty::Predicate<'tcx>,
+) {
+ debug!(
+ "extended_cause_with_original_assoc_item_obligation {:?} {:?} {:?} {:?}",
+ trait_ref, item, cause, pred
+ );
+ let (items, impl_def_id) = match item {
+ Some(hir::Item { kind: hir::ItemKind::Impl(impl_), def_id, .. }) => (impl_.items, *def_id),
+ _ => return,
+ };
+ let fix_span =
+ |impl_item_ref: &hir::ImplItemRef| match tcx.hir().impl_item(impl_item_ref.id).kind {
+ hir::ImplItemKind::Const(ty, _) | hir::ImplItemKind::TyAlias(ty) => ty.span,
+ _ => impl_item_ref.span,
+ };
+
+ // It is fine to skip the binder as we don't care about regions here.
+ match pred.kind().skip_binder() {
+ ty::PredicateKind::Projection(proj) => {
+ // The obligation comes not from the current `impl` nor the `trait` being implemented,
+ // but rather from a "second order" obligation, where an associated type has a
+ // projection coming from another associated type. See
+ // `src/test/ui/associated-types/point-at-type-on-obligation-failure.rs` and
+ // `traits-assoc-type-in-supertrait-bad.rs`.
+ if let Some(ty::Projection(projection_ty)) = proj.term.ty().map(|ty| ty.kind())
+ && let Some(&impl_item_id) =
+ tcx.impl_item_implementor_ids(impl_def_id).get(&projection_ty.item_def_id)
+ && let Some(impl_item_span) = items
+ .iter()
+ .find(|item| item.id.def_id.to_def_id() == impl_item_id)
+ .map(fix_span)
+ {
+ cause.span = impl_item_span;
+ }
+ }
+ ty::PredicateKind::Trait(pred) => {
+ // An associated item obligation born out of the `trait` failed to be met. An example
+ // can be seen in `ui/associated-types/point-at-type-on-obligation-failure-2.rs`.
+ debug!("extended_cause_with_original_assoc_item_obligation trait proj {:?}", pred);
+ if let ty::Projection(ty::ProjectionTy { item_def_id, .. }) = *pred.self_ty().kind()
+ && let Some(&impl_item_id) =
+ tcx.impl_item_implementor_ids(impl_def_id).get(&item_def_id)
+ && let Some(impl_item_span) = items
+ .iter()
+ .find(|item| item.id.def_id.to_def_id() == impl_item_id)
+ .map(fix_span)
+ {
+ cause.span = impl_item_span;
+ }
+ }
+ _ => {}
+ }
+}
+
+impl<'tcx> WfPredicates<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn cause(&self, code: traits::ObligationCauseCode<'tcx>) -> traits::ObligationCause<'tcx> {
+ traits::ObligationCause::new(self.span, self.body_id, code)
+ }
+
+ fn normalize(self, infcx: &InferCtxt<'_, 'tcx>) -> Vec<traits::PredicateObligation<'tcx>> {
+ let cause = self.cause(traits::WellFormed(None));
+ let param_env = self.param_env;
+ let mut obligations = Vec::with_capacity(self.out.len());
+ for mut obligation in self.out {
+ assert!(!obligation.has_escaping_bound_vars());
+ let mut selcx = traits::SelectionContext::new(infcx);
+ // Don't normalize the whole obligation, the param env is either
+ // already normalized, or we're currently normalizing the
+ // param_env. Either way we should only normalize the predicate.
+ let normalized_predicate = traits::project::normalize_with_depth_to(
+ &mut selcx,
+ param_env,
+ cause.clone(),
+ self.recursion_depth,
+ obligation.predicate,
+ &mut obligations,
+ );
+ obligation.predicate = normalized_predicate;
+ obligations.push(obligation);
+ }
+ obligations
+ }
+
+ /// Pushes the obligations required for `trait_ref` to be WF into `self.out`.
+ fn compute_trait_pred(&mut self, trait_pred: &ty::TraitPredicate<'tcx>, elaborate: Elaborate) {
+ let tcx = self.tcx;
+ let trait_ref = &trait_pred.trait_ref;
+
+ // if the trait predicate is not const, the wf obligations should not be const as well.
+ let obligations = if trait_pred.constness == ty::BoundConstness::NotConst {
+ self.nominal_obligations_without_const(trait_ref.def_id, trait_ref.substs)
+ } else {
+ self.nominal_obligations(trait_ref.def_id, trait_ref.substs)
+ };
+
+ debug!("compute_trait_pred obligations {:?}", obligations);
+ let param_env = self.param_env;
+ let depth = self.recursion_depth;
+
+ let item = self.item;
+
+ let extend = |traits::PredicateObligation { predicate, mut cause, .. }| {
+ if let Some(parent_trait_pred) = predicate.to_opt_poly_trait_pred() {
+ cause = cause.derived_cause(
+ parent_trait_pred,
+ traits::ObligationCauseCode::DerivedObligation,
+ );
+ }
+ extend_cause_with_original_assoc_item_obligation(
+ tcx, trait_ref, item, &mut cause, predicate,
+ );
+ traits::Obligation::with_depth(cause, depth, param_env, predicate)
+ };
+
+ if let Elaborate::All = elaborate {
+ let implied_obligations = traits::util::elaborate_obligations(tcx, obligations);
+ let implied_obligations = implied_obligations.map(extend);
+ self.out.extend(implied_obligations);
+ } else {
+ self.out.extend(obligations);
+ }
+
+ let tcx = self.tcx();
+ self.out.extend(
+ trait_ref
+ .substs
+ .iter()
+ .enumerate()
+ .filter(|(_, arg)| {
+ matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
+ })
+ .filter(|(_, arg)| !arg.has_escaping_bound_vars())
+ .map(|(i, arg)| {
+ let mut cause = traits::ObligationCause::misc(self.span, self.body_id);
+ // The first subst is the self ty - use the correct span for it.
+ if i == 0 {
+ if let Some(hir::ItemKind::Impl(hir::Impl { self_ty, .. })) =
+ item.map(|i| &i.kind)
+ {
+ cause.span = self_ty.span;
+ }
+ }
+ traits::Obligation::with_depth(
+ cause,
+ depth,
+ param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(tcx),
+ )
+ }),
+ );
+ }
+
+ /// Pushes the obligations required for `trait_ref::Item` to be WF
+ /// into `self.out`.
+ fn compute_projection(&mut self, data: ty::ProjectionTy<'tcx>) {
+ // A projection is well-formed if
+ //
+ // (a) its predicates hold (*)
+ // (b) its substs are wf
+ //
+ // (*) The predicates of an associated type include the predicates of
+ // the trait that it's contained in. For example, given
+ //
+ // trait A<T>: Clone {
+ // type X where T: Copy;
+ // }
+ //
+ // The predicates of `<() as A<i32>>::X` are:
+ // [
+ // `(): Sized`
+ // `(): Clone`
+ // `(): A<i32>`
+ // `i32: Sized`
+ // `i32: Clone`
+ // `i32: Copy`
+ // ]
+ let obligations = self.nominal_obligations(data.item_def_id, data.substs);
+ self.out.extend(obligations);
+
+ let tcx = self.tcx();
+ let cause = self.cause(traits::WellFormed(None));
+ let param_env = self.param_env;
+ let depth = self.recursion_depth;
+
+ self.out.extend(
+ data.substs
+ .iter()
+ .filter(|arg| {
+ matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
+ })
+ .filter(|arg| !arg.has_escaping_bound_vars())
+ .map(|arg| {
+ traits::Obligation::with_depth(
+ cause.clone(),
+ depth,
+ param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(tcx),
+ )
+ }),
+ );
+ }
+
+ fn require_sized(&mut self, subty: Ty<'tcx>, cause: traits::ObligationCauseCode<'tcx>) {
+ if !subty.has_escaping_bound_vars() {
+ let cause = self.cause(cause);
+ let trait_ref = ty::TraitRef {
+ def_id: self.tcx.require_lang_item(LangItem::Sized, None),
+ substs: self.tcx.mk_substs_trait(subty, &[]),
+ };
+ self.out.push(traits::Obligation::with_depth(
+ cause,
+ self.recursion_depth,
+ self.param_env,
+ ty::Binder::dummy(trait_ref).without_const().to_predicate(self.tcx),
+ ));
+ }
+ }
+
+ /// Pushes all the predicates needed to validate that `ty` is WF into `out`.
+ fn compute(&mut self, arg: GenericArg<'tcx>) {
+ let mut walker = arg.walk();
+ let param_env = self.param_env;
+ let depth = self.recursion_depth;
+ while let Some(arg) = walker.next() {
+ let ty = match arg.unpack() {
+ GenericArgKind::Type(ty) => ty,
+
+ // No WF constraints for lifetimes being present, any outlives
+ // obligations are handled by the parent (e.g. `ty::Ref`).
+ GenericArgKind::Lifetime(_) => continue,
+
+ GenericArgKind::Const(constant) => {
+ match constant.kind() {
+ ty::ConstKind::Unevaluated(uv) => {
+ let obligations = self.nominal_obligations(uv.def.did, uv.substs);
+ self.out.extend(obligations);
+
+ let predicate =
+ ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(uv.shrink()))
+ .to_predicate(self.tcx());
+ let cause = self.cause(traits::WellFormed(None));
+ self.out.push(traits::Obligation::with_depth(
+ cause,
+ self.recursion_depth,
+ self.param_env,
+ predicate,
+ ));
+ }
+ ty::ConstKind::Infer(_) => {
+ let cause = self.cause(traits::WellFormed(None));
+
+ self.out.push(traits::Obligation::with_depth(
+ cause,
+ self.recursion_depth,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(constant.into()))
+ .to_predicate(self.tcx()),
+ ));
+ }
+ ty::ConstKind::Error(_)
+ | ty::ConstKind::Param(_)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Placeholder(..) => {
+ // These variants are trivially WF, so nothing to do here.
+ }
+ ty::ConstKind::Value(..) => {
+ // FIXME: Enforce that values are structurally-matchable.
+ }
+ }
+ continue;
+ }
+ };
+
+ match *ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..)
+ | ty::Error(_)
+ | ty::Str
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Foreign(..) => {
+ // WfScalar, WfParameter, etc
+ }
+
+ // Can only infer to `ty::Int(_) | ty::Uint(_)`.
+ ty::Infer(ty::IntVar(_)) => {}
+
+ // Can only infer to `ty::Float(_)`.
+ ty::Infer(ty::FloatVar(_)) => {}
+
+ ty::Slice(subty) => {
+ self.require_sized(subty, traits::SliceOrArrayElem);
+ }
+
+ ty::Array(subty, _) => {
+ self.require_sized(subty, traits::SliceOrArrayElem);
+ // Note that we handle the len is implicitly checked while walking `arg`.
+ }
+
+ ty::Tuple(ref tys) => {
+ if let Some((_last, rest)) = tys.split_last() {
+ for &elem in rest {
+ self.require_sized(elem, traits::TupleElem);
+ }
+ }
+ }
+
+ ty::RawPtr(_) => {
+ // Simple cases that are WF if their type args are WF.
+ }
+
+ ty::Projection(data) => {
+ walker.skip_current_subtree(); // Subtree handled by compute_projection.
+ self.compute_projection(data);
+ }
+
+ ty::Adt(def, substs) => {
+ // WfNominalType
+ let obligations = self.nominal_obligations(def.did(), substs);
+ self.out.extend(obligations);
+ }
+
+ ty::FnDef(did, substs) => {
+ let obligations = self.nominal_obligations(did, substs);
+ self.out.extend(obligations);
+ }
+
+ ty::Ref(r, rty, _) => {
+ // WfReference
+ if !r.has_escaping_bound_vars() && !rty.has_escaping_bound_vars() {
+ let cause = self.cause(traits::ReferenceOutlivesReferent(ty));
+ self.out.push(traits::Obligation::with_depth(
+ cause,
+ depth,
+ param_env,
+ ty::Binder::dummy(ty::PredicateKind::TypeOutlives(
+ ty::OutlivesPredicate(rty, r),
+ ))
+ .to_predicate(self.tcx()),
+ ));
+ }
+ }
+
+ ty::Generator(did, substs, ..) => {
+ // Walk ALL the types in the generator: this will
+ // include the upvar types as well as the yield
+ // type. Note that this is mildly distinct from
+ // the closure case, where we have to be careful
+ // about the signature of the closure. We don't
+ // have the problem of implied bounds here since
+ // generators don't take arguments.
+ let obligations = self.nominal_obligations(did, substs);
+ self.out.extend(obligations);
+ }
+
+ ty::Closure(did, substs) => {
+ // Only check the upvar types for WF, not the rest
+ // of the types within. This is needed because we
+ // capture the signature and it may not be WF
+ // without the implied bounds. Consider a closure
+ // like `|x: &'a T|` -- it may be that `T: 'a` is
+ // not known to hold in the creator's context (and
+ // indeed the closure may not be invoked by its
+ // creator, but rather turned to someone who *can*
+ // verify that).
+ //
+ // The special treatment of closures here really
+ // ought not to be necessary either; the problem
+ // is related to #25860 -- there is no way for us
+ // to express a fn type complete with the implied
+ // bounds that it is assuming. I think in reality
+ // the WF rules around fn are a bit messed up, and
+ // that is the rot problem: `fn(&'a T)` should
+ // probably always be WF, because it should be
+ // shorthand for something like `where(T: 'a) {
+ // fn(&'a T) }`, as discussed in #25860.
+ walker.skip_current_subtree(); // subtree handled below
+ // FIXME(eddyb) add the type to `walker` instead of recursing.
+ self.compute(substs.as_closure().tupled_upvars_ty().into());
+ // Note that we cannot skip the generic types
+ // types. Normally, within the fn
+ // body where they are created, the generics will
+ // always be WF, and outside of that fn body we
+ // are not directly inspecting closure types
+ // anyway, except via auto trait matching (which
+ // only inspects the upvar types).
+ // But when a closure is part of a type-alias-impl-trait
+ // then the function that created the defining site may
+ // have had more bounds available than the type alias
+ // specifies. This may cause us to have a closure in the
+ // hidden type that is not actually well formed and
+ // can cause compiler crashes when the user abuses unsafe
+ // code to procure such a closure.
+ // See src/test/ui/type-alias-impl-trait/wf_check_closures.rs
+ let obligations = self.nominal_obligations(did, substs);
+ self.out.extend(obligations);
+ }
+
+ ty::FnPtr(_) => {
+ // let the loop iterate into the argument/return
+ // types appearing in the fn signature
+ }
+
+ ty::Opaque(did, substs) => {
+ // All of the requirements on type parameters
+ // have already been checked for `impl Trait` in
+ // return position. We do need to check type-alias-impl-trait though.
+ if ty::is_impl_trait_defn(self.tcx, did).is_none() {
+ let obligations = self.nominal_obligations(did, substs);
+ self.out.extend(obligations);
+ }
+ }
+
+ ty::Dynamic(data, r) => {
+ // WfObject
+ //
+ // Here, we defer WF checking due to higher-ranked
+ // regions. This is perhaps not ideal.
+ self.from_object_ty(ty, data, r);
+
+ // FIXME(#27579) RFC also considers adding trait
+ // obligations that don't refer to Self and
+ // checking those
+
+ let defer_to_coercion = self.tcx().features().object_safe_for_dispatch;
+
+ if !defer_to_coercion {
+ let cause = self.cause(traits::WellFormed(None));
+ let component_traits = data.auto_traits().chain(data.principal_def_id());
+ let tcx = self.tcx();
+ self.out.extend(component_traits.map(|did| {
+ traits::Obligation::with_depth(
+ cause.clone(),
+ depth,
+ param_env,
+ ty::Binder::dummy(ty::PredicateKind::ObjectSafe(did))
+ .to_predicate(tcx),
+ )
+ }));
+ }
+ }
+
+ // Inference variables are the complicated case, since we don't
+ // know what type they are. We do two things:
+ //
+ // 1. Check if they have been resolved, and if so proceed with
+ // THAT type.
+ // 2. If not, we've at least simplified things (e.g., we went
+ // from `Vec<$0>: WF` to `$0: WF`), so we can
+ // register a pending obligation and keep
+ // moving. (Goal is that an "inductive hypothesis"
+ // is satisfied to ensure termination.)
+ // See also the comment on `fn obligations`, describing "livelock"
+ // prevention, which happens before this can be reached.
+ ty::Infer(_) => {
+ let cause = self.cause(traits::WellFormed(None));
+ self.out.push(traits::Obligation::with_depth(
+ cause,
+ self.recursion_depth,
+ param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(ty.into()))
+ .to_predicate(self.tcx()),
+ ));
+ }
+ }
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn nominal_obligations_inner(
+ &mut self,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ remap_constness: bool,
+ ) -> Vec<traits::PredicateObligation<'tcx>> {
+ let predicates = self.tcx.predicates_of(def_id);
+ let mut origins = vec![def_id; predicates.predicates.len()];
+ let mut head = predicates;
+ while let Some(parent) = head.parent {
+ head = self.tcx.predicates_of(parent);
+ origins.extend(iter::repeat(parent).take(head.predicates.len()));
+ }
+
+ let predicates = predicates.instantiate(self.tcx, substs);
+ trace!("{:#?}", predicates);
+ debug_assert_eq!(predicates.predicates.len(), origins.len());
+
+ iter::zip(iter::zip(predicates.predicates, predicates.spans), origins.into_iter().rev())
+ .map(|((mut pred, span), origin_def_id)| {
+ let code = if span.is_dummy() {
+ traits::MiscObligation
+ } else {
+ traits::BindingObligation(origin_def_id, span)
+ };
+ let cause = self.cause(code);
+ if remap_constness {
+ pred = pred.without_const(self.tcx);
+ }
+ traits::Obligation::with_depth(cause, self.recursion_depth, self.param_env, pred)
+ })
+ .filter(|pred| !pred.has_escaping_bound_vars())
+ .collect()
+ }
+
+ fn nominal_obligations(
+ &mut self,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Vec<traits::PredicateObligation<'tcx>> {
+ self.nominal_obligations_inner(def_id, substs, false)
+ }
+
+ fn nominal_obligations_without_const(
+ &mut self,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Vec<traits::PredicateObligation<'tcx>> {
+ self.nominal_obligations_inner(def_id, substs, true)
+ }
+
+ fn from_object_ty(
+ &mut self,
+ ty: Ty<'tcx>,
+ data: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ region: ty::Region<'tcx>,
+ ) {
+ // Imagine a type like this:
+ //
+ // trait Foo { }
+ // trait Bar<'c> : 'c { }
+ //
+ // &'b (Foo+'c+Bar<'d>)
+ // ^
+ //
+ // In this case, the following relationships must hold:
+ //
+ // 'b <= 'c
+ // 'd <= 'c
+ //
+ // The first conditions is due to the normal region pointer
+ // rules, which say that a reference cannot outlive its
+ // referent.
+ //
+ // The final condition may be a bit surprising. In particular,
+ // you may expect that it would have been `'c <= 'd`, since
+ // usually lifetimes of outer things are conservative
+ // approximations for inner things. However, it works somewhat
+ // differently with trait objects: here the idea is that if the
+ // user specifies a region bound (`'c`, in this case) it is the
+ // "master bound" that *implies* that bounds from other traits are
+ // all met. (Remember that *all bounds* in a type like
+ // `Foo+Bar+Zed` must be met, not just one, hence if we write
+ // `Foo<'x>+Bar<'y>`, we know that the type outlives *both* 'x and
+ // 'y.)
+ //
+ // Note: in fact we only permit builtin traits, not `Bar<'d>`, I
+ // am looking forward to the future here.
+ if !data.has_escaping_bound_vars() && !region.has_escaping_bound_vars() {
+ let implicit_bounds = object_region_bounds(self.tcx, data);
+
+ let explicit_bound = region;
+
+ self.out.reserve(implicit_bounds.len());
+ for implicit_bound in implicit_bounds {
+ let cause = self.cause(traits::ObjectTypeBound(ty, explicit_bound));
+ let outlives =
+ ty::Binder::dummy(ty::OutlivesPredicate(explicit_bound, implicit_bound));
+ self.out.push(traits::Obligation::with_depth(
+ cause,
+ self.recursion_depth,
+ self.param_env,
+ outlives.to_predicate(self.tcx),
+ ));
+ }
+ }
+ }
+}
+
+/// Given an object type like `SomeTrait + Send`, computes the lifetime
+/// bounds that must hold on the elided self type. These are derived
+/// from the declarations of `SomeTrait`, `Send`, and friends -- if
+/// they declare `trait SomeTrait : 'static`, for example, then
+/// `'static` would appear in the list. The hard work is done by
+/// `infer::required_region_bounds`, see that for more information.
+pub fn object_region_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ existential_predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+) -> Vec<ty::Region<'tcx>> {
+ // Since we don't actually *know* the self type for an object,
+ // this "open(err)" serves as a kind of dummy standin -- basically
+ // a placeholder type.
+ let open_ty = tcx.mk_ty_infer(ty::FreshTy(0));
+
+ let predicates = existential_predicates.iter().filter_map(|predicate| {
+ if let ty::ExistentialPredicate::Projection(_) = predicate.skip_binder() {
+ None
+ } else {
+ Some(predicate.with_self_ty(tcx, open_ty))
+ }
+ });
+
+ required_region_bounds(tcx, open_ty, predicates)
+}
+
+/// Given a set of predicates that apply to an object type, returns
+/// the region bounds that the (erased) `Self` type must
+/// outlive. Precisely *because* the `Self` type is erased, the
+/// parameter `erased_self_ty` must be supplied to indicate what type
+/// has been used to represent `Self` in the predicates
+/// themselves. This should really be a unique type; `FreshTy(0)` is a
+/// popular choice.
+///
+/// N.B., in some cases, particularly around higher-ranked bounds,
+/// this function returns a kind of conservative approximation.
+/// That is, all regions returned by this function are definitely
+/// required, but there may be other region bounds that are not
+/// returned, as well as requirements like `for<'a> T: 'a`.
+///
+/// Requires that trait definitions have been processed so that we can
+/// elaborate predicates and walk supertraits.
+#[instrument(skip(tcx, predicates), level = "debug")]
+pub(crate) fn required_region_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ erased_self_ty: Ty<'tcx>,
+ predicates: impl Iterator<Item = ty::Predicate<'tcx>>,
+) -> Vec<ty::Region<'tcx>> {
+ assert!(!erased_self_ty.has_escaping_bound_vars());
+
+ traits::elaborate_predicates(tcx, predicates)
+ .filter_map(|obligation| {
+ debug!(?obligation);
+ match obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::Trait(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::RegionOutlives(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ref t, ref r)) => {
+ // Search for a bound of the form `erased_self_ty
+ // : 'a`, but be wary of something like `for<'a>
+ // erased_self_ty : 'a` (we interpret a
+ // higher-ranked bound like that as 'static,
+ // though at present the code in `fulfill.rs`
+ // considers such bounds to be unsatisfiable, so
+ // it's kind of a moot point since you could never
+ // construct such an object, but this seems
+ // correct even if that code changes).
+ if t == &erased_self_ty && !r.has_escaping_bound_vars() {
+ Some(*r)
+ } else {
+ None
+ }
+ }
+ }
+ })
+ .collect()
+}
diff --git a/compiler/rustc_traits/Cargo.toml b/compiler/rustc_traits/Cargo.toml
new file mode 100644
index 000000000..951554c77
--- /dev/null
+++ b/compiler/rustc_traits/Cargo.toml
@@ -0,0 +1,20 @@
+[package]
+name = "rustc_traits"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+tracing = "0.1"
+rustc_attr = { path = "../rustc_attr" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_index = { path = "../rustc_index" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+chalk-ir = "0.80.0"
+chalk-engine = "0.80.0"
+chalk-solve = "0.80.0"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_infer = { path = "../rustc_infer" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
diff --git a/compiler/rustc_traits/src/chalk/db.rs b/compiler/rustc_traits/src/chalk/db.rs
new file mode 100644
index 000000000..ff5ca0cbc
--- /dev/null
+++ b/compiler/rustc_traits/src/chalk/db.rs
@@ -0,0 +1,796 @@
+//! Provides the `RustIrDatabase` implementation for `chalk-solve`
+//!
+//! The purpose of the `chalk_solve::RustIrDatabase` is to get data about
+//! specific types, such as bounds, where clauses, or fields. This file contains
+//! the minimal logic to assemble the types for `chalk-solve` by calling out to
+//! either the `TyCtxt` (for information about types) or
+//! `crate::chalk::lowering` (to lower rustc types into Chalk types).
+
+use rustc_middle::traits::ChalkRustInterner as RustInterner;
+use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
+use rustc_middle::ty::{self, AssocKind, EarlyBinder, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable};
+
+use rustc_ast::ast;
+use rustc_attr as attr;
+
+use rustc_hir::def_id::DefId;
+
+use rustc_span::symbol::sym;
+
+use std::fmt;
+use std::sync::Arc;
+
+use crate::chalk::lowering::LowerInto;
+
+pub struct RustIrDatabase<'tcx> {
+ pub(crate) interner: RustInterner<'tcx>,
+}
+
+impl fmt::Debug for RustIrDatabase<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "RustIrDatabase")
+ }
+}
+
+impl<'tcx> RustIrDatabase<'tcx> {
+ fn where_clauses_for(
+ &self,
+ def_id: DefId,
+ bound_vars: SubstsRef<'tcx>,
+ ) -> Vec<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>> {
+ let predicates = self.interner.tcx.predicates_defined_on(def_id).predicates;
+ predicates
+ .iter()
+ .map(|(wc, _)| EarlyBinder(*wc).subst(self.interner.tcx, bound_vars))
+ .filter_map(|wc| LowerInto::<
+ Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>>
+ >::lower_into(wc, self.interner)).collect()
+ }
+
+ fn bounds_for<T>(&self, def_id: DefId, bound_vars: SubstsRef<'tcx>) -> Vec<T>
+ where
+ ty::Predicate<'tcx>: LowerInto<'tcx, std::option::Option<T>>,
+ {
+ let bounds = self.interner.tcx.bound_explicit_item_bounds(def_id);
+ bounds
+ .0
+ .iter()
+ .map(|(bound, _)| bounds.rebind(*bound).subst(self.interner.tcx, &bound_vars))
+ .filter_map(|bound| LowerInto::<Option<_>>::lower_into(bound, self.interner))
+ .collect()
+ }
+}
+
+impl<'tcx> chalk_solve::RustIrDatabase<RustInterner<'tcx>> for RustIrDatabase<'tcx> {
+ fn interner(&self) -> RustInterner<'tcx> {
+ self.interner
+ }
+
+ fn associated_ty_data(
+ &self,
+ assoc_type_id: chalk_ir::AssocTypeId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::AssociatedTyDatum<RustInterner<'tcx>>> {
+ let def_id = assoc_type_id.0;
+ let assoc_item = self.interner.tcx.associated_item(def_id);
+ let Some(trait_def_id) = assoc_item.trait_container(self.interner.tcx) else {
+ unimplemented!("Not possible??");
+ };
+ match assoc_item.kind {
+ AssocKind::Type => {}
+ _ => unimplemented!("Not possible??"),
+ }
+ let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
+ let binders = binders_for(self.interner, bound_vars);
+
+ let where_clauses = self.where_clauses_for(def_id, bound_vars);
+ let bounds = self.bounds_for(def_id, bound_vars);
+
+ Arc::new(chalk_solve::rust_ir::AssociatedTyDatum {
+ trait_id: chalk_ir::TraitId(trait_def_id),
+ id: assoc_type_id,
+ name: (),
+ binders: chalk_ir::Binders::new(
+ binders,
+ chalk_solve::rust_ir::AssociatedTyDatumBound { bounds, where_clauses },
+ ),
+ })
+ }
+
+ fn trait_datum(
+ &self,
+ trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::TraitDatum<RustInterner<'tcx>>> {
+ use chalk_solve::rust_ir::WellKnownTrait::*;
+
+ let def_id = trait_id.0;
+ let trait_def = self.interner.tcx.trait_def(def_id);
+
+ let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
+ let binders = binders_for(self.interner, bound_vars);
+
+ let where_clauses = self.where_clauses_for(def_id, bound_vars);
+
+ let associated_ty_ids: Vec<_> = self
+ .interner
+ .tcx
+ .associated_items(def_id)
+ .in_definition_order()
+ .filter(|i| i.kind == AssocKind::Type)
+ .map(|i| chalk_ir::AssocTypeId(i.def_id))
+ .collect();
+
+ let lang_items = self.interner.tcx.lang_items();
+ let well_known = if lang_items.sized_trait() == Some(def_id) {
+ Some(Sized)
+ } else if lang_items.copy_trait() == Some(def_id) {
+ Some(Copy)
+ } else if lang_items.clone_trait() == Some(def_id) {
+ Some(Clone)
+ } else if lang_items.drop_trait() == Some(def_id) {
+ Some(Drop)
+ } else if lang_items.fn_trait() == Some(def_id) {
+ Some(Fn)
+ } else if lang_items.fn_once_trait() == Some(def_id) {
+ Some(FnOnce)
+ } else if lang_items.fn_mut_trait() == Some(def_id) {
+ Some(FnMut)
+ } else if lang_items.unsize_trait() == Some(def_id) {
+ Some(Unsize)
+ } else if lang_items.unpin_trait() == Some(def_id) {
+ Some(Unpin)
+ } else if lang_items.coerce_unsized_trait() == Some(def_id) {
+ Some(CoerceUnsized)
+ } else if lang_items.dispatch_from_dyn_trait() == Some(def_id) {
+ Some(DispatchFromDyn)
+ } else {
+ None
+ };
+ Arc::new(chalk_solve::rust_ir::TraitDatum {
+ id: trait_id,
+ binders: chalk_ir::Binders::new(
+ binders,
+ chalk_solve::rust_ir::TraitDatumBound { where_clauses },
+ ),
+ flags: chalk_solve::rust_ir::TraitFlags {
+ auto: trait_def.has_auto_impl,
+ marker: trait_def.is_marker,
+ upstream: !def_id.is_local(),
+ fundamental: self.interner.tcx.has_attr(def_id, sym::fundamental),
+ non_enumerable: true,
+ coinductive: false,
+ },
+ associated_ty_ids,
+ well_known,
+ })
+ }
+
+ fn adt_datum(
+ &self,
+ adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::AdtDatum<RustInterner<'tcx>>> {
+ let adt_def = adt_id.0;
+
+ let bound_vars = bound_vars_for_item(self.interner.tcx, adt_def.did());
+ let binders = binders_for(self.interner, bound_vars);
+
+ let where_clauses = self.where_clauses_for(adt_def.did(), bound_vars);
+
+ let variants: Vec<_> = adt_def
+ .variants()
+ .iter()
+ .map(|variant| chalk_solve::rust_ir::AdtVariantDatum {
+ fields: variant
+ .fields
+ .iter()
+ .map(|field| field.ty(self.interner.tcx, bound_vars).lower_into(self.interner))
+ .collect(),
+ })
+ .collect();
+ Arc::new(chalk_solve::rust_ir::AdtDatum {
+ id: adt_id,
+ binders: chalk_ir::Binders::new(
+ binders,
+ chalk_solve::rust_ir::AdtDatumBound { variants, where_clauses },
+ ),
+ flags: chalk_solve::rust_ir::AdtFlags {
+ upstream: !adt_def.did().is_local(),
+ fundamental: adt_def.is_fundamental(),
+ phantom_data: adt_def.is_phantom_data(),
+ },
+ kind: match adt_def.adt_kind() {
+ ty::AdtKind::Struct => chalk_solve::rust_ir::AdtKind::Struct,
+ ty::AdtKind::Union => chalk_solve::rust_ir::AdtKind::Union,
+ ty::AdtKind::Enum => chalk_solve::rust_ir::AdtKind::Enum,
+ },
+ })
+ }
+
+ fn adt_repr(
+ &self,
+ adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::AdtRepr<RustInterner<'tcx>>> {
+ let adt_def = adt_id.0;
+ let int = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(i)).intern(self.interner);
+ let uint = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(i)).intern(self.interner);
+ Arc::new(chalk_solve::rust_ir::AdtRepr {
+ c: adt_def.repr().c(),
+ packed: adt_def.repr().packed(),
+ int: adt_def.repr().int.map(|i| match i {
+ attr::IntType::SignedInt(ty) => match ty {
+ ast::IntTy::Isize => int(chalk_ir::IntTy::Isize),
+ ast::IntTy::I8 => int(chalk_ir::IntTy::I8),
+ ast::IntTy::I16 => int(chalk_ir::IntTy::I16),
+ ast::IntTy::I32 => int(chalk_ir::IntTy::I32),
+ ast::IntTy::I64 => int(chalk_ir::IntTy::I64),
+ ast::IntTy::I128 => int(chalk_ir::IntTy::I128),
+ },
+ attr::IntType::UnsignedInt(ty) => match ty {
+ ast::UintTy::Usize => uint(chalk_ir::UintTy::Usize),
+ ast::UintTy::U8 => uint(chalk_ir::UintTy::U8),
+ ast::UintTy::U16 => uint(chalk_ir::UintTy::U16),
+ ast::UintTy::U32 => uint(chalk_ir::UintTy::U32),
+ ast::UintTy::U64 => uint(chalk_ir::UintTy::U64),
+ ast::UintTy::U128 => uint(chalk_ir::UintTy::U128),
+ },
+ }),
+ })
+ }
+
+ fn adt_size_align(
+ &self,
+ adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::AdtSizeAlign> {
+ let tcx = self.interner.tcx;
+ let did = adt_id.0.did();
+
+ // Grab the ADT and the param we might need to calculate its layout
+ let param_env = tcx.param_env(did);
+ let adt_ty = tcx.type_of(did);
+
+ // The ADT is a 1-zst if it's a ZST and its alignment is 1.
+ // Mark the ADT as _not_ a 1-zst if there was a layout error.
+ let one_zst = if let Ok(layout) = tcx.layout_of(param_env.and(adt_ty)) {
+ layout.is_zst() && layout.align.abi.bytes() == 1
+ } else {
+ false
+ };
+
+ Arc::new(chalk_solve::rust_ir::AdtSizeAlign::from_one_zst(one_zst))
+ }
+
+ fn fn_def_datum(
+ &self,
+ fn_def_id: chalk_ir::FnDefId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::FnDefDatum<RustInterner<'tcx>>> {
+ let def_id = fn_def_id.0;
+ let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
+ let binders = binders_for(self.interner, bound_vars);
+
+ let where_clauses = self.where_clauses_for(def_id, bound_vars);
+
+ let sig = self.interner.tcx.bound_fn_sig(def_id);
+ let (inputs_and_output, iobinders, _) = crate::chalk::lowering::collect_bound_vars(
+ self.interner,
+ self.interner.tcx,
+ sig.map_bound(|s| s.inputs_and_output()).subst(self.interner.tcx, bound_vars),
+ );
+
+ let argument_types = inputs_and_output[..inputs_and_output.len() - 1]
+ .iter()
+ .map(|t| sig.rebind(*t).subst(self.interner.tcx, &bound_vars).lower_into(self.interner))
+ .collect();
+
+ let return_type = sig
+ .rebind(inputs_and_output[inputs_and_output.len() - 1])
+ .subst(self.interner.tcx, &bound_vars)
+ .lower_into(self.interner);
+
+ let bound = chalk_solve::rust_ir::FnDefDatumBound {
+ inputs_and_output: chalk_ir::Binders::new(
+ iobinders,
+ chalk_solve::rust_ir::FnDefInputsAndOutputDatum { argument_types, return_type },
+ ),
+ where_clauses,
+ };
+ Arc::new(chalk_solve::rust_ir::FnDefDatum {
+ id: fn_def_id,
+ sig: sig.0.lower_into(self.interner),
+ binders: chalk_ir::Binders::new(binders, bound),
+ })
+ }
+
+ fn impl_datum(
+ &self,
+ impl_id: chalk_ir::ImplId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::ImplDatum<RustInterner<'tcx>>> {
+ let def_id = impl_id.0;
+ let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
+ let binders = binders_for(self.interner, bound_vars);
+
+ let trait_ref = self.interner.tcx.bound_impl_trait_ref(def_id).expect("not an impl");
+ let trait_ref = trait_ref.subst(self.interner.tcx, bound_vars);
+
+ let where_clauses = self.where_clauses_for(def_id, bound_vars);
+
+ let value = chalk_solve::rust_ir::ImplDatumBound {
+ trait_ref: trait_ref.lower_into(self.interner),
+ where_clauses,
+ };
+
+ let associated_ty_value_ids: Vec<_> = self
+ .interner
+ .tcx
+ .associated_items(def_id)
+ .in_definition_order()
+ .filter(|i| i.kind == AssocKind::Type)
+ .map(|i| chalk_solve::rust_ir::AssociatedTyValueId(i.def_id))
+ .collect();
+
+ Arc::new(chalk_solve::rust_ir::ImplDatum {
+ polarity: self.interner.tcx.impl_polarity(def_id).lower_into(self.interner),
+ binders: chalk_ir::Binders::new(binders, value),
+ impl_type: chalk_solve::rust_ir::ImplType::Local,
+ associated_ty_value_ids,
+ })
+ }
+
+ fn impls_for_trait(
+ &self,
+ trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
+ parameters: &[chalk_ir::GenericArg<RustInterner<'tcx>>],
+ _binders: &chalk_ir::CanonicalVarKinds<RustInterner<'tcx>>,
+ ) -> Vec<chalk_ir::ImplId<RustInterner<'tcx>>> {
+ let def_id = trait_id.0;
+
+ // FIXME(chalk): use TraitDef::for_each_relevant_impl, but that will
+ // require us to be able to interconvert `Ty<'tcx>`, and we're
+ // not there yet.
+
+ let all_impls = self.interner.tcx.all_impls(def_id);
+ let matched_impls = all_impls.filter(|impl_def_id| {
+ use chalk_ir::could_match::CouldMatch;
+ let trait_ref = self.interner.tcx.bound_impl_trait_ref(*impl_def_id).unwrap();
+ let bound_vars = bound_vars_for_item(self.interner.tcx, *impl_def_id);
+
+ let self_ty = trait_ref.map_bound(|t| t.self_ty());
+ let self_ty = self_ty.subst(self.interner.tcx, bound_vars);
+ let lowered_ty = self_ty.lower_into(self.interner);
+
+ parameters[0].assert_ty_ref(self.interner).could_match(
+ self.interner,
+ self.unification_database(),
+ &lowered_ty,
+ )
+ });
+
+ let impls = matched_impls.map(chalk_ir::ImplId).collect();
+ impls
+ }
+
+ fn impl_provided_for(
+ &self,
+ auto_trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
+ chalk_ty: &chalk_ir::TyKind<RustInterner<'tcx>>,
+ ) -> bool {
+ use chalk_ir::Scalar::*;
+ use chalk_ir::TyKind::*;
+
+ let trait_def_id = auto_trait_id.0;
+ let all_impls = self.interner.tcx.all_impls(trait_def_id);
+ for impl_def_id in all_impls {
+ let trait_ref = self.interner.tcx.impl_trait_ref(impl_def_id).unwrap();
+ let self_ty = trait_ref.self_ty();
+ let provides = match (self_ty.kind(), chalk_ty) {
+ (&ty::Adt(impl_adt_def, ..), Adt(id, ..)) => impl_adt_def.did() == id.0.did(),
+ (_, AssociatedType(_ty_id, ..)) => {
+ // FIXME(chalk): See https://github.com/rust-lang/rust/pull/77152#discussion_r494484774
+ false
+ }
+ (ty::Bool, Scalar(Bool)) => true,
+ (ty::Char, Scalar(Char)) => true,
+ (ty::Int(ty1), Scalar(Int(ty2))) => matches!(
+ (ty1, ty2),
+ (ty::IntTy::Isize, chalk_ir::IntTy::Isize)
+ | (ty::IntTy::I8, chalk_ir::IntTy::I8)
+ | (ty::IntTy::I16, chalk_ir::IntTy::I16)
+ | (ty::IntTy::I32, chalk_ir::IntTy::I32)
+ | (ty::IntTy::I64, chalk_ir::IntTy::I64)
+ | (ty::IntTy::I128, chalk_ir::IntTy::I128)
+ ),
+ (ty::Uint(ty1), Scalar(Uint(ty2))) => matches!(
+ (ty1, ty2),
+ (ty::UintTy::Usize, chalk_ir::UintTy::Usize)
+ | (ty::UintTy::U8, chalk_ir::UintTy::U8)
+ | (ty::UintTy::U16, chalk_ir::UintTy::U16)
+ | (ty::UintTy::U32, chalk_ir::UintTy::U32)
+ | (ty::UintTy::U64, chalk_ir::UintTy::U64)
+ | (ty::UintTy::U128, chalk_ir::UintTy::U128)
+ ),
+ (ty::Float(ty1), Scalar(Float(ty2))) => matches!(
+ (ty1, ty2),
+ (ty::FloatTy::F32, chalk_ir::FloatTy::F32)
+ | (ty::FloatTy::F64, chalk_ir::FloatTy::F64)
+ ),
+ (&ty::Tuple(substs), Tuple(len, _)) => substs.len() == *len,
+ (&ty::Array(..), Array(..)) => true,
+ (&ty::Slice(..), Slice(..)) => true,
+ (&ty::RawPtr(type_and_mut), Raw(mutability, _)) => {
+ match (type_and_mut.mutbl, mutability) {
+ (ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true,
+ (ast::Mutability::Mut, chalk_ir::Mutability::Not) => false,
+ (ast::Mutability::Not, chalk_ir::Mutability::Mut) => false,
+ (ast::Mutability::Not, chalk_ir::Mutability::Not) => true,
+ }
+ }
+ (&ty::Ref(.., mutability1), Ref(mutability2, ..)) => {
+ match (mutability1, mutability2) {
+ (ast::Mutability::Mut, chalk_ir::Mutability::Mut) => true,
+ (ast::Mutability::Mut, chalk_ir::Mutability::Not) => false,
+ (ast::Mutability::Not, chalk_ir::Mutability::Mut) => false,
+ (ast::Mutability::Not, chalk_ir::Mutability::Not) => true,
+ }
+ }
+ (&ty::Opaque(def_id, ..), OpaqueType(opaque_ty_id, ..)) => def_id == opaque_ty_id.0,
+ (&ty::FnDef(def_id, ..), FnDef(fn_def_id, ..)) => def_id == fn_def_id.0,
+ (&ty::Str, Str) => true,
+ (&ty::Never, Never) => true,
+ (&ty::Closure(def_id, ..), Closure(closure_id, _)) => def_id == closure_id.0,
+ (&ty::Foreign(def_id), Foreign(foreign_def_id)) => def_id == foreign_def_id.0,
+ (&ty::Error(..), Error) => false,
+ _ => false,
+ };
+ if provides {
+ return true;
+ }
+ }
+ false
+ }
+
+ fn associated_ty_value(
+ &self,
+ associated_ty_id: chalk_solve::rust_ir::AssociatedTyValueId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::AssociatedTyValue<RustInterner<'tcx>>> {
+ let def_id = associated_ty_id.0;
+ let assoc_item = self.interner.tcx.associated_item(def_id);
+ let impl_id = assoc_item.container_id(self.interner.tcx);
+ match assoc_item.kind {
+ AssocKind::Type => {}
+ _ => unimplemented!("Not possible??"),
+ }
+
+ let trait_item_id = assoc_item.trait_item_def_id.expect("assoc_ty with no trait version");
+ let bound_vars = bound_vars_for_item(self.interner.tcx, def_id);
+ let binders = binders_for(self.interner, bound_vars);
+ let ty = self
+ .interner
+ .tcx
+ .bound_type_of(def_id)
+ .subst(self.interner.tcx, bound_vars)
+ .lower_into(self.interner);
+
+ Arc::new(chalk_solve::rust_ir::AssociatedTyValue {
+ impl_id: chalk_ir::ImplId(impl_id),
+ associated_ty_id: chalk_ir::AssocTypeId(trait_item_id),
+ value: chalk_ir::Binders::new(
+ binders,
+ chalk_solve::rust_ir::AssociatedTyValueBound { ty },
+ ),
+ })
+ }
+
+ fn custom_clauses(&self) -> Vec<chalk_ir::ProgramClause<RustInterner<'tcx>>> {
+ vec![]
+ }
+
+ fn local_impls_to_coherence_check(
+ &self,
+ _trait_id: chalk_ir::TraitId<RustInterner<'tcx>>,
+ ) -> Vec<chalk_ir::ImplId<RustInterner<'tcx>>> {
+ unimplemented!()
+ }
+
+ fn opaque_ty_data(
+ &self,
+ opaque_ty_id: chalk_ir::OpaqueTyId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::OpaqueTyDatum<RustInterner<'tcx>>> {
+ let bound_vars = ty::fold::shift_vars(
+ self.interner.tcx,
+ bound_vars_for_item(self.interner.tcx, opaque_ty_id.0),
+ 1,
+ );
+ let where_clauses = self.where_clauses_for(opaque_ty_id.0, bound_vars);
+
+ let identity_substs = InternalSubsts::identity_for_item(self.interner.tcx, opaque_ty_id.0);
+
+ let explicit_item_bounds = self.interner.tcx.bound_explicit_item_bounds(opaque_ty_id.0);
+ let bounds =
+ explicit_item_bounds
+ .0
+ .iter()
+ .map(|(bound, _)| {
+ explicit_item_bounds.rebind(*bound).subst(self.interner.tcx, &bound_vars)
+ })
+ .map(|bound| {
+ bound.fold_with(&mut ReplaceOpaqueTyFolder {
+ tcx: self.interner.tcx,
+ opaque_ty_id,
+ identity_substs,
+ binder_index: ty::INNERMOST,
+ })
+ })
+ .filter_map(|bound| {
+ LowerInto::<
+ Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>>
+ >::lower_into(bound, self.interner)
+ })
+ .collect();
+
+ // Binder for the bound variable representing the concrete impl Trait type.
+ let existential_binder = chalk_ir::VariableKinds::from1(
+ self.interner,
+ chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General),
+ );
+
+ let value = chalk_solve::rust_ir::OpaqueTyDatumBound {
+ bounds: chalk_ir::Binders::new(existential_binder.clone(), bounds),
+ where_clauses: chalk_ir::Binders::new(existential_binder, where_clauses),
+ };
+
+ let binders = binders_for(self.interner, bound_vars);
+ Arc::new(chalk_solve::rust_ir::OpaqueTyDatum {
+ opaque_ty_id,
+ bound: chalk_ir::Binders::new(binders, value),
+ })
+ }
+
+ fn program_clauses_for_env(
+ &self,
+ environment: &chalk_ir::Environment<RustInterner<'tcx>>,
+ ) -> chalk_ir::ProgramClauses<RustInterner<'tcx>> {
+ chalk_solve::program_clauses_for_env(self, environment)
+ }
+
+ fn well_known_trait_id(
+ &self,
+ well_known_trait: chalk_solve::rust_ir::WellKnownTrait,
+ ) -> Option<chalk_ir::TraitId<RustInterner<'tcx>>> {
+ use chalk_solve::rust_ir::WellKnownTrait::*;
+ let lang_items = self.interner.tcx.lang_items();
+ let def_id = match well_known_trait {
+ Sized => lang_items.sized_trait(),
+ Copy => lang_items.copy_trait(),
+ Clone => lang_items.clone_trait(),
+ Drop => lang_items.drop_trait(),
+ Fn => lang_items.fn_trait(),
+ FnMut => lang_items.fn_mut_trait(),
+ FnOnce => lang_items.fn_once_trait(),
+ Generator => lang_items.gen_trait(),
+ Unsize => lang_items.unsize_trait(),
+ Unpin => lang_items.unpin_trait(),
+ CoerceUnsized => lang_items.coerce_unsized_trait(),
+ DiscriminantKind => lang_items.discriminant_kind_trait(),
+ DispatchFromDyn => lang_items.dispatch_from_dyn_trait(),
+ };
+ def_id.map(chalk_ir::TraitId)
+ }
+
+ fn is_object_safe(&self, trait_id: chalk_ir::TraitId<RustInterner<'tcx>>) -> bool {
+ self.interner.tcx.is_object_safe(trait_id.0)
+ }
+
+ fn hidden_opaque_type(
+ &self,
+ _id: chalk_ir::OpaqueTyId<RustInterner<'tcx>>,
+ ) -> chalk_ir::Ty<RustInterner<'tcx>> {
+ // FIXME(chalk): actually get hidden ty
+ self.interner
+ .tcx
+ .mk_ty(ty::Tuple(self.interner.tcx.intern_type_list(&[])))
+ .lower_into(self.interner)
+ }
+
+ fn closure_kind(
+ &self,
+ _closure_id: chalk_ir::ClosureId<RustInterner<'tcx>>,
+ substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
+ ) -> chalk_solve::rust_ir::ClosureKind {
+ let kind = &substs.as_slice(self.interner)[substs.len(self.interner) - 3];
+ match kind.assert_ty_ref(self.interner).kind(self.interner) {
+ chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(int_ty)) => match int_ty {
+ chalk_ir::IntTy::I8 => chalk_solve::rust_ir::ClosureKind::Fn,
+ chalk_ir::IntTy::I16 => chalk_solve::rust_ir::ClosureKind::FnMut,
+ chalk_ir::IntTy::I32 => chalk_solve::rust_ir::ClosureKind::FnOnce,
+ _ => bug!("bad closure kind"),
+ },
+ _ => bug!("bad closure kind"),
+ }
+ }
+
+ fn closure_inputs_and_output(
+ &self,
+ _closure_id: chalk_ir::ClosureId<RustInterner<'tcx>>,
+ substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
+ ) -> chalk_ir::Binders<chalk_solve::rust_ir::FnDefInputsAndOutputDatum<RustInterner<'tcx>>>
+ {
+ let sig = &substs.as_slice(self.interner)[substs.len(self.interner) - 2];
+ match sig.assert_ty_ref(self.interner).kind(self.interner) {
+ chalk_ir::TyKind::Function(f) => {
+ let substitution = f.substitution.0.as_slice(self.interner);
+ let return_type = substitution.last().unwrap().assert_ty_ref(self.interner).clone();
+ // Closure arguments are tupled
+ let argument_tuple = substitution[0].assert_ty_ref(self.interner);
+ let argument_types = match argument_tuple.kind(self.interner) {
+ chalk_ir::TyKind::Tuple(_len, substitution) => substitution
+ .iter(self.interner)
+ .map(|arg| arg.assert_ty_ref(self.interner))
+ .cloned()
+ .collect(),
+ _ => bug!("Expecting closure FnSig args to be tupled."),
+ };
+
+ chalk_ir::Binders::new(
+ chalk_ir::VariableKinds::from_iter(
+ self.interner,
+ (0..f.num_binders).map(|_| chalk_ir::VariableKind::Lifetime),
+ ),
+ chalk_solve::rust_ir::FnDefInputsAndOutputDatum { argument_types, return_type },
+ )
+ }
+ _ => panic!("Invalid sig."),
+ }
+ }
+
+ fn closure_upvars(
+ &self,
+ _closure_id: chalk_ir::ClosureId<RustInterner<'tcx>>,
+ substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
+ ) -> chalk_ir::Binders<chalk_ir::Ty<RustInterner<'tcx>>> {
+ let inputs_and_output = self.closure_inputs_and_output(_closure_id, substs);
+ let tuple = substs.as_slice(self.interner).last().unwrap().assert_ty_ref(self.interner);
+ inputs_and_output.map_ref(|_| tuple.clone())
+ }
+
+ fn closure_fn_substitution(
+ &self,
+ _closure_id: chalk_ir::ClosureId<RustInterner<'tcx>>,
+ substs: &chalk_ir::Substitution<RustInterner<'tcx>>,
+ ) -> chalk_ir::Substitution<RustInterner<'tcx>> {
+ let substitution = &substs.as_slice(self.interner)[0..substs.len(self.interner) - 3];
+ chalk_ir::Substitution::from_iter(self.interner, substitution)
+ }
+
+ fn generator_datum(
+ &self,
+ _generator_id: chalk_ir::GeneratorId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::GeneratorDatum<RustInterner<'tcx>>> {
+ unimplemented!()
+ }
+
+ fn generator_witness_datum(
+ &self,
+ _generator_id: chalk_ir::GeneratorId<RustInterner<'tcx>>,
+ ) -> Arc<chalk_solve::rust_ir::GeneratorWitnessDatum<RustInterner<'tcx>>> {
+ unimplemented!()
+ }
+
+ fn unification_database(&self) -> &dyn chalk_ir::UnificationDatabase<RustInterner<'tcx>> {
+ self
+ }
+
+ fn discriminant_type(
+ &self,
+ _: chalk_ir::Ty<RustInterner<'tcx>>,
+ ) -> chalk_ir::Ty<RustInterner<'tcx>> {
+ unimplemented!()
+ }
+}
+
+impl<'tcx> chalk_ir::UnificationDatabase<RustInterner<'tcx>> for RustIrDatabase<'tcx> {
+ fn fn_def_variance(
+ &self,
+ def_id: chalk_ir::FnDefId<RustInterner<'tcx>>,
+ ) -> chalk_ir::Variances<RustInterner<'tcx>> {
+ let variances = self.interner.tcx.variances_of(def_id.0);
+ chalk_ir::Variances::from_iter(
+ self.interner,
+ variances.iter().map(|v| v.lower_into(self.interner)),
+ )
+ }
+
+ fn adt_variance(
+ &self,
+ adt_id: chalk_ir::AdtId<RustInterner<'tcx>>,
+ ) -> chalk_ir::Variances<RustInterner<'tcx>> {
+ let variances = self.interner.tcx.variances_of(adt_id.0.did());
+ chalk_ir::Variances::from_iter(
+ self.interner,
+ variances.iter().map(|v| v.lower_into(self.interner)),
+ )
+ }
+}
+
+/// Creates an `InternalSubsts` that maps each generic parameter to a higher-ranked
+/// var bound at index `0`. For types, we use a `BoundVar` index equal to
+/// the type parameter index. For regions, we use the `BoundRegionKind::BrNamed`
+/// variant (which has a `DefId`).
+fn bound_vars_for_item<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> SubstsRef<'tcx> {
+ InternalSubsts::for_item(tcx, def_id, |param, substs| match param.kind {
+ ty::GenericParamDefKind::Type { .. } => tcx
+ .mk_ty(ty::Bound(
+ ty::INNERMOST,
+ ty::BoundTy {
+ var: ty::BoundVar::from(param.index),
+ kind: ty::BoundTyKind::Param(param.name),
+ },
+ ))
+ .into(),
+
+ ty::GenericParamDefKind::Lifetime => {
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(substs.len()),
+ kind: ty::BrAnon(substs.len() as u32),
+ };
+ tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)).into()
+ }
+
+ ty::GenericParamDefKind::Const { .. } => tcx
+ .mk_const(ty::ConstS {
+ kind: ty::ConstKind::Bound(ty::INNERMOST, ty::BoundVar::from(param.index)),
+ ty: tcx.type_of(param.def_id),
+ })
+ .into(),
+ })
+}
+
+fn binders_for<'tcx>(
+ interner: RustInterner<'tcx>,
+ bound_vars: SubstsRef<'tcx>,
+) -> chalk_ir::VariableKinds<RustInterner<'tcx>> {
+ chalk_ir::VariableKinds::from_iter(
+ interner,
+ bound_vars.iter().map(|arg| match arg.unpack() {
+ ty::subst::GenericArgKind::Lifetime(_re) => chalk_ir::VariableKind::Lifetime,
+ ty::subst::GenericArgKind::Type(_ty) => {
+ chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General)
+ }
+ ty::subst::GenericArgKind::Const(c) => {
+ chalk_ir::VariableKind::Const(c.ty().lower_into(interner))
+ }
+ }),
+ )
+}
+
+struct ReplaceOpaqueTyFolder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ opaque_ty_id: chalk_ir::OpaqueTyId<RustInterner<'tcx>>,
+ identity_substs: SubstsRef<'tcx>,
+ binder_index: ty::DebruijnIndex,
+}
+
+impl<'tcx> ty::TypeFolder<'tcx> for ReplaceOpaqueTyFolder<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.binder_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.binder_index.shift_out(1);
+ t
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::Opaque(def_id, substs) = *ty.kind() {
+ if def_id == self.opaque_ty_id.0 && substs == self.identity_substs {
+ return self.tcx.mk_ty(ty::Bound(
+ self.binder_index,
+ ty::BoundTy::from(ty::BoundVar::from_u32(0)),
+ ));
+ }
+ }
+ ty
+ }
+}
diff --git a/compiler/rustc_traits/src/chalk/lowering.rs b/compiler/rustc_traits/src/chalk/lowering.rs
new file mode 100644
index 000000000..c7c604e14
--- /dev/null
+++ b/compiler/rustc_traits/src/chalk/lowering.rs
@@ -0,0 +1,1172 @@
+//! Contains the logic to lower rustc types into Chalk types
+//!
+//! In many cases there is a 1:1 relationship between a rustc type and a Chalk type.
+//! For example, a `SubstsRef` maps almost directly to a `Substitution`. In some
+//! other cases, such as `Param`s, there is no Chalk type, so we have to handle
+//! accordingly.
+//!
+//! ## `Ty` lowering
+//! Much of the `Ty` lowering is 1:1 with Chalk. (Or will be eventually). A
+//! helpful table for what types lower to what can be found in the
+//! [Chalk book](https://rust-lang.github.io/chalk/book/types/rust_types.html).
+//! The most notable difference lies with `Param`s. To convert from rustc to
+//! Chalk, we eagerly and deeply convert `Param`s to placeholders (in goals) or
+//! bound variables (for clause generation through functions in `db`).
+//!
+//! ## `Region` lowering
+//! Regions are handled in rustc and Chalk is quite differently. In rustc, there
+//! is a difference between "early bound" and "late bound" regions, where only
+//! the late bound regions have a `DebruijnIndex`. Moreover, in Chalk all
+//! regions (Lifetimes) have an associated index. In rustc, only `BrAnon`s have
+//! an index, whereas `BrNamed` don't. In order to lower regions to Chalk, we
+//! convert all regions into `BrAnon` late-bound regions.
+//!
+//! ## `Const` lowering
+//! Chalk doesn't handle consts currently, so consts are currently lowered to
+//! an empty tuple.
+//!
+//! ## Bound variable collection
+//! Another difference between rustc and Chalk lies in the handling of binders.
+//! Chalk requires that we store the bound parameter kinds, whereas rustc does
+//! not. To lower anything wrapped in a `Binder`, we first deeply find any bound
+//! variables from the current `Binder`.
+
+use rustc_ast::ast;
+use rustc_middle::traits::{ChalkEnvironmentAndGoal, ChalkRustInterner as RustInterner};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, SubstsRef};
+use rustc_middle::ty::{
+ self, Binder, Region, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
+ TypeSuperVisitable, TypeVisitable, TypeVisitor,
+};
+use rustc_span::def_id::DefId;
+
+use chalk_ir::{FnSig, ForeignDefId};
+use rustc_hir::Unsafety;
+use std::collections::btree_map::{BTreeMap, Entry};
+use std::ops::ControlFlow;
+
+/// Essentially an `Into` with a `&RustInterner` parameter
+pub(crate) trait LowerInto<'tcx, T> {
+ /// Lower a rustc construct (e.g., `ty::TraitPredicate`) to a chalk type, consuming `self`.
+ fn lower_into(self, interner: RustInterner<'tcx>) -> T;
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::Substitution<RustInterner<'tcx>>> for SubstsRef<'tcx> {
+ fn lower_into(
+ self,
+ interner: RustInterner<'tcx>,
+ ) -> chalk_ir::Substitution<RustInterner<'tcx>> {
+ chalk_ir::Substitution::from_iter(interner, self.iter().map(|s| s.lower_into(interner)))
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, SubstsRef<'tcx>> for &chalk_ir::Substitution<RustInterner<'tcx>> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> SubstsRef<'tcx> {
+ interner.tcx.mk_substs(self.iter(interner).map(|subst| subst.lower_into(interner)))
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::AliasTy<RustInterner<'tcx>>> for ty::ProjectionTy<'tcx> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::AliasTy<RustInterner<'tcx>> {
+ chalk_ir::AliasTy::Projection(chalk_ir::ProjectionTy {
+ associated_ty_id: chalk_ir::AssocTypeId(self.item_def_id),
+ substitution: self.substs.lower_into(interner),
+ })
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::InEnvironment<chalk_ir::Goal<RustInterner<'tcx>>>>
+ for ChalkEnvironmentAndGoal<'tcx>
+{
+ fn lower_into(
+ self,
+ interner: RustInterner<'tcx>,
+ ) -> chalk_ir::InEnvironment<chalk_ir::Goal<RustInterner<'tcx>>> {
+ let clauses = self.environment.into_iter().map(|predicate| {
+ let (predicate, binders, _named_regions) =
+ collect_bound_vars(interner, interner.tcx, predicate.kind());
+ let consequence = match predicate {
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ chalk_ir::DomainGoal::FromEnv(chalk_ir::FromEnv::Ty(ty.lower_into(interner)))
+ }
+ ty::PredicateKind::Trait(predicate) => chalk_ir::DomainGoal::FromEnv(
+ chalk_ir::FromEnv::Trait(predicate.trait_ref.lower_into(interner)),
+ ),
+ ty::PredicateKind::RegionOutlives(predicate) => chalk_ir::DomainGoal::Holds(
+ chalk_ir::WhereClause::LifetimeOutlives(chalk_ir::LifetimeOutlives {
+ a: predicate.0.lower_into(interner),
+ b: predicate.1.lower_into(interner),
+ }),
+ ),
+ ty::PredicateKind::TypeOutlives(predicate) => chalk_ir::DomainGoal::Holds(
+ chalk_ir::WhereClause::TypeOutlives(chalk_ir::TypeOutlives {
+ ty: predicate.0.lower_into(interner),
+ lifetime: predicate.1.lower_into(interner),
+ }),
+ ),
+ ty::PredicateKind::Projection(predicate) => chalk_ir::DomainGoal::Holds(
+ chalk_ir::WhereClause::AliasEq(predicate.lower_into(interner)),
+ ),
+ ty::PredicateKind::WellFormed(arg) => match arg.unpack() {
+ ty::GenericArgKind::Type(ty) => chalk_ir::DomainGoal::WellFormed(
+ chalk_ir::WellFormed::Ty(ty.lower_into(interner)),
+ ),
+ // FIXME(chalk): we need to change `WellFormed` in Chalk to take a `GenericArg`
+ _ => chalk_ir::DomainGoal::WellFormed(chalk_ir::WellFormed::Ty(
+ interner.tcx.types.unit.lower_into(interner),
+ )),
+ },
+ ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..) => bug!("unexpected predicate {}", predicate),
+ };
+ let value = chalk_ir::ProgramClauseImplication {
+ consequence,
+ conditions: chalk_ir::Goals::empty(interner),
+ priority: chalk_ir::ClausePriority::High,
+ constraints: chalk_ir::Constraints::empty(interner),
+ };
+ chalk_ir::ProgramClauseData(chalk_ir::Binders::new(binders, value)).intern(interner)
+ });
+
+ let goal: chalk_ir::GoalData<RustInterner<'tcx>> = self.goal.lower_into(interner);
+ chalk_ir::InEnvironment {
+ environment: chalk_ir::Environment {
+ clauses: chalk_ir::ProgramClauses::from_iter(interner, clauses),
+ },
+ goal: goal.intern(interner),
+ }
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::GoalData<RustInterner<'tcx>>> for ty::Predicate<'tcx> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::GoalData<RustInterner<'tcx>> {
+ let (predicate, binders, _named_regions) =
+ collect_bound_vars(interner, interner.tcx, self.kind());
+
+ let value = match predicate {
+ ty::PredicateKind::Trait(predicate) => {
+ chalk_ir::GoalData::DomainGoal(chalk_ir::DomainGoal::Holds(
+ chalk_ir::WhereClause::Implemented(predicate.trait_ref.lower_into(interner)),
+ ))
+ }
+ ty::PredicateKind::RegionOutlives(predicate) => {
+ chalk_ir::GoalData::DomainGoal(chalk_ir::DomainGoal::Holds(
+ chalk_ir::WhereClause::LifetimeOutlives(chalk_ir::LifetimeOutlives {
+ a: predicate.0.lower_into(interner),
+ b: predicate.1.lower_into(interner),
+ }),
+ ))
+ }
+ ty::PredicateKind::TypeOutlives(predicate) => {
+ chalk_ir::GoalData::DomainGoal(chalk_ir::DomainGoal::Holds(
+ chalk_ir::WhereClause::TypeOutlives(chalk_ir::TypeOutlives {
+ ty: predicate.0.lower_into(interner),
+ lifetime: predicate.1.lower_into(interner),
+ }),
+ ))
+ }
+ ty::PredicateKind::Projection(predicate) => {
+ chalk_ir::GoalData::DomainGoal(chalk_ir::DomainGoal::Holds(
+ chalk_ir::WhereClause::AliasEq(predicate.lower_into(interner)),
+ ))
+ }
+ ty::PredicateKind::WellFormed(arg) => match arg.unpack() {
+ GenericArgKind::Type(ty) => match ty.kind() {
+ // FIXME(chalk): In Chalk, a placeholder is WellFormed if it
+ // `FromEnv`. However, when we "lower" Params, we don't update
+ // the environment.
+ ty::Placeholder(..) => {
+ chalk_ir::GoalData::All(chalk_ir::Goals::empty(interner))
+ }
+
+ _ => chalk_ir::GoalData::DomainGoal(chalk_ir::DomainGoal::WellFormed(
+ chalk_ir::WellFormed::Ty(ty.lower_into(interner)),
+ )),
+ },
+ // FIXME(chalk): handle well formed consts
+ GenericArgKind::Const(..) => {
+ chalk_ir::GoalData::All(chalk_ir::Goals::empty(interner))
+ }
+ GenericArgKind::Lifetime(lt) => bug!("unexpect well formed predicate: {:?}", lt),
+ },
+
+ ty::PredicateKind::ObjectSafe(t) => chalk_ir::GoalData::DomainGoal(
+ chalk_ir::DomainGoal::ObjectSafe(chalk_ir::TraitId(t)),
+ ),
+
+ ty::PredicateKind::Subtype(ty::SubtypePredicate { a, b, a_is_expected: _ }) => {
+ chalk_ir::GoalData::SubtypeGoal(chalk_ir::SubtypeGoal {
+ a: a.lower_into(interner),
+ b: b.lower_into(interner),
+ })
+ }
+
+ // FIXME(chalk): other predicates
+ //
+ // We can defer this, but ultimately we'll want to express
+ // some of these in terms of chalk operations.
+ ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..) => {
+ chalk_ir::GoalData::All(chalk_ir::Goals::empty(interner))
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => chalk_ir::GoalData::DomainGoal(
+ chalk_ir::DomainGoal::FromEnv(chalk_ir::FromEnv::Ty(ty.lower_into(interner))),
+ ),
+ };
+
+ chalk_ir::GoalData::Quantified(
+ chalk_ir::QuantifierKind::ForAll,
+ chalk_ir::Binders::new(binders, value.intern(interner)),
+ )
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::TraitRef<RustInterner<'tcx>>>
+ for rustc_middle::ty::TraitRef<'tcx>
+{
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::TraitRef<RustInterner<'tcx>> {
+ chalk_ir::TraitRef {
+ trait_id: chalk_ir::TraitId(self.def_id),
+ substitution: self.substs.lower_into(interner),
+ }
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::AliasEq<RustInterner<'tcx>>>
+ for rustc_middle::ty::ProjectionPredicate<'tcx>
+{
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::AliasEq<RustInterner<'tcx>> {
+ // FIXME(associated_const_equality): teach chalk about terms for alias eq.
+ chalk_ir::AliasEq {
+ ty: self.term.ty().unwrap().lower_into(interner),
+ alias: self.projection_ty.lower_into(interner),
+ }
+ }
+}
+
+/*
+// FIXME(...): Where do I add this to Chalk? I can't find it in the rustc repo anywhere.
+impl<'tcx> LowerInto<'tcx, chalk_ir::Term<RustInterner<'tcx>>> for rustc_middle::ty::Term<'tcx> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::Term<RustInterner<'tcx>> {
+ match self {
+ ty::Term::Ty(ty) => ty.lower_into(interner).into(),
+ ty::Term::Const(c) => c.lower_into(interner).into(),
+ }
+ }
+}
+*/
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::Ty<RustInterner<'tcx>>> for Ty<'tcx> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::Ty<RustInterner<'tcx>> {
+ let int = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Int(i));
+ let uint = |i| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Uint(i));
+ let float = |f| chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Float(f));
+
+ match *self.kind() {
+ ty::Bool => chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Bool),
+ ty::Char => chalk_ir::TyKind::Scalar(chalk_ir::Scalar::Char),
+ ty::Int(ty) => match ty {
+ ty::IntTy::Isize => int(chalk_ir::IntTy::Isize),
+ ty::IntTy::I8 => int(chalk_ir::IntTy::I8),
+ ty::IntTy::I16 => int(chalk_ir::IntTy::I16),
+ ty::IntTy::I32 => int(chalk_ir::IntTy::I32),
+ ty::IntTy::I64 => int(chalk_ir::IntTy::I64),
+ ty::IntTy::I128 => int(chalk_ir::IntTy::I128),
+ },
+ ty::Uint(ty) => match ty {
+ ty::UintTy::Usize => uint(chalk_ir::UintTy::Usize),
+ ty::UintTy::U8 => uint(chalk_ir::UintTy::U8),
+ ty::UintTy::U16 => uint(chalk_ir::UintTy::U16),
+ ty::UintTy::U32 => uint(chalk_ir::UintTy::U32),
+ ty::UintTy::U64 => uint(chalk_ir::UintTy::U64),
+ ty::UintTy::U128 => uint(chalk_ir::UintTy::U128),
+ },
+ ty::Float(ty) => match ty {
+ ty::FloatTy::F32 => float(chalk_ir::FloatTy::F32),
+ ty::FloatTy::F64 => float(chalk_ir::FloatTy::F64),
+ },
+ ty::Adt(def, substs) => {
+ chalk_ir::TyKind::Adt(chalk_ir::AdtId(def), substs.lower_into(interner))
+ }
+ ty::Foreign(def_id) => chalk_ir::TyKind::Foreign(ForeignDefId(def_id)),
+ ty::Str => chalk_ir::TyKind::Str,
+ ty::Array(ty, len) => {
+ chalk_ir::TyKind::Array(ty.lower_into(interner), len.lower_into(interner))
+ }
+ ty::Slice(ty) => chalk_ir::TyKind::Slice(ty.lower_into(interner)),
+
+ ty::RawPtr(ptr) => {
+ chalk_ir::TyKind::Raw(ptr.mutbl.lower_into(interner), ptr.ty.lower_into(interner))
+ }
+ ty::Ref(region, ty, mutability) => chalk_ir::TyKind::Ref(
+ mutability.lower_into(interner),
+ region.lower_into(interner),
+ ty.lower_into(interner),
+ ),
+ ty::FnDef(def_id, substs) => {
+ chalk_ir::TyKind::FnDef(chalk_ir::FnDefId(def_id), substs.lower_into(interner))
+ }
+ ty::FnPtr(sig) => {
+ let (inputs_and_outputs, binders, _named_regions) =
+ collect_bound_vars(interner, interner.tcx, sig.inputs_and_output());
+ chalk_ir::TyKind::Function(chalk_ir::FnPointer {
+ num_binders: binders.len(interner),
+ sig: sig.lower_into(interner),
+ substitution: chalk_ir::FnSubst(chalk_ir::Substitution::from_iter(
+ interner,
+ inputs_and_outputs.iter().map(|ty| {
+ chalk_ir::GenericArgData::Ty(ty.lower_into(interner)).intern(interner)
+ }),
+ )),
+ })
+ }
+ ty::Dynamic(predicates, region) => chalk_ir::TyKind::Dyn(chalk_ir::DynTy {
+ bounds: predicates.lower_into(interner),
+ lifetime: region.lower_into(interner),
+ }),
+ ty::Closure(def_id, substs) => {
+ chalk_ir::TyKind::Closure(chalk_ir::ClosureId(def_id), substs.lower_into(interner))
+ }
+ ty::Generator(def_id, substs, _) => chalk_ir::TyKind::Generator(
+ chalk_ir::GeneratorId(def_id),
+ substs.lower_into(interner),
+ ),
+ ty::GeneratorWitness(_) => unimplemented!(),
+ ty::Never => chalk_ir::TyKind::Never,
+ ty::Tuple(types) => {
+ chalk_ir::TyKind::Tuple(types.len(), types.as_substs().lower_into(interner))
+ }
+ ty::Projection(proj) => chalk_ir::TyKind::Alias(proj.lower_into(interner)),
+ ty::Opaque(def_id, substs) => {
+ chalk_ir::TyKind::Alias(chalk_ir::AliasTy::Opaque(chalk_ir::OpaqueTy {
+ opaque_ty_id: chalk_ir::OpaqueTyId(def_id),
+ substitution: substs.lower_into(interner),
+ }))
+ }
+ // This should have been done eagerly prior to this, and all Params
+ // should have been substituted to placeholders
+ ty::Param(_) => panic!("Lowering Param when not expected."),
+ ty::Bound(db, bound) => chalk_ir::TyKind::BoundVar(chalk_ir::BoundVar::new(
+ chalk_ir::DebruijnIndex::new(db.as_u32()),
+ bound.var.index(),
+ )),
+ ty::Placeholder(_placeholder) => {
+ chalk_ir::TyKind::Placeholder(chalk_ir::PlaceholderIndex {
+ ui: chalk_ir::UniverseIndex { counter: _placeholder.universe.as_usize() },
+ idx: _placeholder.name.as_usize(),
+ })
+ }
+ ty::Infer(_infer) => unimplemented!(),
+ ty::Error(_) => chalk_ir::TyKind::Error,
+ }
+ .intern(interner)
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, Ty<'tcx>> for &chalk_ir::Ty<RustInterner<'tcx>> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> Ty<'tcx> {
+ use chalk_ir::TyKind;
+
+ let kind = match self.kind(interner) {
+ TyKind::Adt(struct_id, substitution) => {
+ ty::Adt(struct_id.0, substitution.lower_into(interner))
+ }
+ TyKind::Scalar(scalar) => match scalar {
+ chalk_ir::Scalar::Bool => ty::Bool,
+ chalk_ir::Scalar::Char => ty::Char,
+ chalk_ir::Scalar::Int(int_ty) => match int_ty {
+ chalk_ir::IntTy::Isize => ty::Int(ty::IntTy::Isize),
+ chalk_ir::IntTy::I8 => ty::Int(ty::IntTy::I8),
+ chalk_ir::IntTy::I16 => ty::Int(ty::IntTy::I16),
+ chalk_ir::IntTy::I32 => ty::Int(ty::IntTy::I32),
+ chalk_ir::IntTy::I64 => ty::Int(ty::IntTy::I64),
+ chalk_ir::IntTy::I128 => ty::Int(ty::IntTy::I128),
+ },
+ chalk_ir::Scalar::Uint(int_ty) => match int_ty {
+ chalk_ir::UintTy::Usize => ty::Uint(ty::UintTy::Usize),
+ chalk_ir::UintTy::U8 => ty::Uint(ty::UintTy::U8),
+ chalk_ir::UintTy::U16 => ty::Uint(ty::UintTy::U16),
+ chalk_ir::UintTy::U32 => ty::Uint(ty::UintTy::U32),
+ chalk_ir::UintTy::U64 => ty::Uint(ty::UintTy::U64),
+ chalk_ir::UintTy::U128 => ty::Uint(ty::UintTy::U128),
+ },
+ chalk_ir::Scalar::Float(float_ty) => match float_ty {
+ chalk_ir::FloatTy::F32 => ty::Float(ty::FloatTy::F32),
+ chalk_ir::FloatTy::F64 => ty::Float(ty::FloatTy::F64),
+ },
+ },
+ TyKind::Array(ty, c) => {
+ let ty = ty.lower_into(interner);
+ let c = c.lower_into(interner);
+ ty::Array(ty, c)
+ }
+ TyKind::FnDef(id, substitution) => ty::FnDef(id.0, substitution.lower_into(interner)),
+ TyKind::Closure(closure, substitution) => {
+ ty::Closure(closure.0, substitution.lower_into(interner))
+ }
+ TyKind::Generator(..) => unimplemented!(),
+ TyKind::GeneratorWitness(..) => unimplemented!(),
+ TyKind::Never => ty::Never,
+ TyKind::Tuple(_len, substitution) => {
+ ty::Tuple(substitution.lower_into(interner).try_as_type_list().unwrap())
+ }
+ TyKind::Slice(ty) => ty::Slice(ty.lower_into(interner)),
+ TyKind::Raw(mutbl, ty) => ty::RawPtr(ty::TypeAndMut {
+ ty: ty.lower_into(interner),
+ mutbl: mutbl.lower_into(interner),
+ }),
+ TyKind::Ref(mutbl, lifetime, ty) => ty::Ref(
+ lifetime.lower_into(interner),
+ ty.lower_into(interner),
+ mutbl.lower_into(interner),
+ ),
+ TyKind::Str => ty::Str,
+ TyKind::OpaqueType(opaque_ty, substitution) => {
+ ty::Opaque(opaque_ty.0, substitution.lower_into(interner))
+ }
+ TyKind::AssociatedType(assoc_ty, substitution) => ty::Projection(ty::ProjectionTy {
+ substs: substitution.lower_into(interner),
+ item_def_id: assoc_ty.0,
+ }),
+ TyKind::Foreign(def_id) => ty::Foreign(def_id.0),
+ TyKind::Error => return interner.tcx.ty_error(),
+ TyKind::Placeholder(placeholder) => ty::Placeholder(ty::Placeholder {
+ universe: ty::UniverseIndex::from_usize(placeholder.ui.counter),
+ name: ty::BoundVar::from_usize(placeholder.idx),
+ }),
+ TyKind::Alias(alias_ty) => match alias_ty {
+ chalk_ir::AliasTy::Projection(projection) => ty::Projection(ty::ProjectionTy {
+ item_def_id: projection.associated_ty_id.0,
+ substs: projection.substitution.lower_into(interner),
+ }),
+ chalk_ir::AliasTy::Opaque(opaque) => {
+ ty::Opaque(opaque.opaque_ty_id.0, opaque.substitution.lower_into(interner))
+ }
+ },
+ TyKind::Function(_quantified_ty) => unimplemented!(),
+ TyKind::BoundVar(_bound) => ty::Bound(
+ ty::DebruijnIndex::from_usize(_bound.debruijn.depth() as usize),
+ ty::BoundTy {
+ var: ty::BoundVar::from_usize(_bound.index),
+ kind: ty::BoundTyKind::Anon,
+ },
+ ),
+ TyKind::InferenceVar(_, _) => unimplemented!(),
+ TyKind::Dyn(_) => unimplemented!(),
+ };
+ interner.tcx.mk_ty(kind)
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::Lifetime<RustInterner<'tcx>>> for Region<'tcx> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::Lifetime<RustInterner<'tcx>> {
+ match *self {
+ ty::ReEarlyBound(_) => {
+ panic!("Should have already been substituted.");
+ }
+ ty::ReLateBound(db, br) => chalk_ir::LifetimeData::BoundVar(chalk_ir::BoundVar::new(
+ chalk_ir::DebruijnIndex::new(db.as_u32()),
+ br.var.as_usize(),
+ ))
+ .intern(interner),
+ ty::ReFree(_) => unimplemented!(),
+ ty::ReStatic => chalk_ir::LifetimeData::Static.intern(interner),
+ ty::ReVar(_) => unimplemented!(),
+ ty::RePlaceholder(placeholder_region) => {
+ chalk_ir::LifetimeData::Placeholder(chalk_ir::PlaceholderIndex {
+ ui: chalk_ir::UniverseIndex { counter: placeholder_region.universe.index() },
+ idx: 0,
+ })
+ .intern(interner)
+ }
+ ty::ReEmpty(ui) => {
+ chalk_ir::LifetimeData::Empty(chalk_ir::UniverseIndex { counter: ui.index() })
+ .intern(interner)
+ }
+ ty::ReErased => chalk_ir::LifetimeData::Erased.intern(interner),
+ }
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, Region<'tcx>> for &chalk_ir::Lifetime<RustInterner<'tcx>> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> Region<'tcx> {
+ let kind = match self.data(interner) {
+ chalk_ir::LifetimeData::BoundVar(var) => ty::ReLateBound(
+ ty::DebruijnIndex::from_u32(var.debruijn.depth()),
+ ty::BoundRegion {
+ var: ty::BoundVar::from_usize(var.index),
+ kind: ty::BrAnon(var.index as u32),
+ },
+ ),
+ chalk_ir::LifetimeData::InferenceVar(_var) => unimplemented!(),
+ chalk_ir::LifetimeData::Placeholder(p) => ty::RePlaceholder(ty::Placeholder {
+ universe: ty::UniverseIndex::from_usize(p.ui.counter),
+ name: ty::BoundRegionKind::BrAnon(p.idx as u32),
+ }),
+ chalk_ir::LifetimeData::Static => return interner.tcx.lifetimes.re_static,
+ chalk_ir::LifetimeData::Empty(ui) => {
+ ty::ReEmpty(ty::UniverseIndex::from_usize(ui.counter))
+ }
+ chalk_ir::LifetimeData::Erased => return interner.tcx.lifetimes.re_erased,
+ chalk_ir::LifetimeData::Phantom(void, _) => match *void {},
+ };
+ interner.tcx.mk_region(kind)
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::Const<RustInterner<'tcx>>> for ty::Const<'tcx> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::Const<RustInterner<'tcx>> {
+ let ty = self.ty().lower_into(interner);
+ let value = match self.kind() {
+ ty::ConstKind::Value(val) => {
+ chalk_ir::ConstValue::Concrete(chalk_ir::ConcreteConst { interned: val })
+ }
+ ty::ConstKind::Bound(db, bound) => chalk_ir::ConstValue::BoundVar(
+ chalk_ir::BoundVar::new(chalk_ir::DebruijnIndex::new(db.as_u32()), bound.index()),
+ ),
+ _ => unimplemented!("Const not implemented. {:?}", self),
+ };
+ chalk_ir::ConstData { ty, value }.intern(interner)
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, ty::Const<'tcx>> for &chalk_ir::Const<RustInterner<'tcx>> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> ty::Const<'tcx> {
+ let data = self.data(interner);
+ let ty = data.ty.lower_into(interner);
+ let kind = match data.value {
+ chalk_ir::ConstValue::BoundVar(var) => ty::ConstKind::Bound(
+ ty::DebruijnIndex::from_u32(var.debruijn.depth()),
+ ty::BoundVar::from_u32(var.index as u32),
+ ),
+ chalk_ir::ConstValue::InferenceVar(_var) => unimplemented!(),
+ chalk_ir::ConstValue::Placeholder(_p) => unimplemented!(),
+ chalk_ir::ConstValue::Concrete(c) => ty::ConstKind::Value(c.interned),
+ };
+ interner.tcx.mk_const(ty::ConstS { ty, kind })
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::GenericArg<RustInterner<'tcx>>> for GenericArg<'tcx> {
+ fn lower_into(self, interner: RustInterner<'tcx>) -> chalk_ir::GenericArg<RustInterner<'tcx>> {
+ match self.unpack() {
+ ty::subst::GenericArgKind::Type(ty) => {
+ chalk_ir::GenericArgData::Ty(ty.lower_into(interner))
+ }
+ ty::subst::GenericArgKind::Lifetime(lifetime) => {
+ chalk_ir::GenericArgData::Lifetime(lifetime.lower_into(interner))
+ }
+ ty::subst::GenericArgKind::Const(c) => {
+ chalk_ir::GenericArgData::Const(c.lower_into(interner))
+ }
+ }
+ .intern(interner)
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, ty::subst::GenericArg<'tcx>>
+ for &chalk_ir::GenericArg<RustInterner<'tcx>>
+{
+ fn lower_into(self, interner: RustInterner<'tcx>) -> ty::subst::GenericArg<'tcx> {
+ match self.data(interner) {
+ chalk_ir::GenericArgData::Ty(ty) => {
+ let t: Ty<'tcx> = ty.lower_into(interner);
+ t.into()
+ }
+ chalk_ir::GenericArgData::Lifetime(lifetime) => {
+ let r: Region<'tcx> = lifetime.lower_into(interner);
+ r.into()
+ }
+ chalk_ir::GenericArgData::Const(c) => {
+ let c: ty::Const<'tcx> = c.lower_into(interner);
+ c.into()
+ }
+ }
+ }
+}
+
+// We lower into an Option here since there are some predicates which Chalk
+// doesn't have a representation for yet (as a `WhereClause`), but are so common
+// that we just are accepting the unsoundness for now. The `Option` will
+// eventually be removed.
+impl<'tcx> LowerInto<'tcx, Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>>>
+ for ty::Predicate<'tcx>
+{
+ fn lower_into(
+ self,
+ interner: RustInterner<'tcx>,
+ ) -> Option<chalk_ir::QuantifiedWhereClause<RustInterner<'tcx>>> {
+ let (predicate, binders, _named_regions) =
+ collect_bound_vars(interner, interner.tcx, self.kind());
+ let value = match predicate {
+ ty::PredicateKind::Trait(predicate) => {
+ Some(chalk_ir::WhereClause::Implemented(predicate.trait_ref.lower_into(interner)))
+ }
+ ty::PredicateKind::RegionOutlives(predicate) => {
+ Some(chalk_ir::WhereClause::LifetimeOutlives(chalk_ir::LifetimeOutlives {
+ a: predicate.0.lower_into(interner),
+ b: predicate.1.lower_into(interner),
+ }))
+ }
+ ty::PredicateKind::TypeOutlives(predicate) => {
+ Some(chalk_ir::WhereClause::TypeOutlives(chalk_ir::TypeOutlives {
+ ty: predicate.0.lower_into(interner),
+ lifetime: predicate.1.lower_into(interner),
+ }))
+ }
+ ty::PredicateKind::Projection(predicate) => {
+ Some(chalk_ir::WhereClause::AliasEq(predicate.lower_into(interner)))
+ }
+ ty::PredicateKind::WellFormed(_ty) => None,
+
+ ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => {
+ bug!("unexpected predicate {}", &self)
+ }
+ };
+ value.map(|value| chalk_ir::Binders::new(binders, value))
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::Binders<chalk_ir::QuantifiedWhereClauses<RustInterner<'tcx>>>>
+ for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>
+{
+ fn lower_into(
+ self,
+ interner: RustInterner<'tcx>,
+ ) -> chalk_ir::Binders<chalk_ir::QuantifiedWhereClauses<RustInterner<'tcx>>> {
+ // `Self` has one binder:
+ // Binder<&'tcx ty::List<ty::ExistentialPredicate<'tcx>>>
+ // The return type has two:
+ // Binders<&[Binders<WhereClause<I>>]>
+ // This means that any variables that are escaping `self` need to be
+ // shifted in by one so that they are still escaping.
+ let predicates = ty::fold::shift_vars(interner.tcx, self, 1);
+
+ let self_ty = interner.tcx.mk_ty(ty::Bound(
+ // This is going to be wrapped in a binder
+ ty::DebruijnIndex::from_usize(1),
+ ty::BoundTy { var: ty::BoundVar::from_usize(0), kind: ty::BoundTyKind::Anon },
+ ));
+ let where_clauses = predicates.into_iter().map(|predicate| {
+ let (predicate, binders, _named_regions) =
+ collect_bound_vars(interner, interner.tcx, predicate);
+ match predicate {
+ ty::ExistentialPredicate::Trait(ty::ExistentialTraitRef { def_id, substs }) => {
+ chalk_ir::Binders::new(
+ binders.clone(),
+ chalk_ir::WhereClause::Implemented(chalk_ir::TraitRef {
+ trait_id: chalk_ir::TraitId(def_id),
+ substitution: interner
+ .tcx
+ .mk_substs_trait(self_ty, substs)
+ .lower_into(interner),
+ }),
+ )
+ }
+ ty::ExistentialPredicate::Projection(predicate) => chalk_ir::Binders::new(
+ binders.clone(),
+ chalk_ir::WhereClause::AliasEq(chalk_ir::AliasEq {
+ alias: chalk_ir::AliasTy::Projection(chalk_ir::ProjectionTy {
+ associated_ty_id: chalk_ir::AssocTypeId(predicate.item_def_id),
+ substitution: interner
+ .tcx
+ .mk_substs_trait(self_ty, predicate.substs)
+ .lower_into(interner),
+ }),
+ // FIXME(associated_const_equality): teach chalk about terms for alias eq.
+ ty: predicate.term.ty().unwrap().lower_into(interner),
+ }),
+ ),
+ ty::ExistentialPredicate::AutoTrait(def_id) => chalk_ir::Binders::new(
+ binders.clone(),
+ chalk_ir::WhereClause::Implemented(chalk_ir::TraitRef {
+ trait_id: chalk_ir::TraitId(def_id),
+ substitution: interner
+ .tcx
+ .mk_substs_trait(self_ty, &[])
+ .lower_into(interner),
+ }),
+ ),
+ }
+ });
+
+ // Binder for the bound variable representing the concrete underlying type.
+ let existential_binder = chalk_ir::VariableKinds::from1(
+ interner,
+ chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General),
+ );
+ let value = chalk_ir::QuantifiedWhereClauses::from_iter(interner, where_clauses);
+ chalk_ir::Binders::new(existential_binder, value)
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::FnSig<RustInterner<'tcx>>>
+ for ty::Binder<'tcx, ty::FnSig<'tcx>>
+{
+ fn lower_into(self, _interner: RustInterner<'_>) -> FnSig<RustInterner<'tcx>> {
+ chalk_ir::FnSig {
+ abi: self.abi(),
+ safety: match self.unsafety() {
+ Unsafety::Normal => chalk_ir::Safety::Safe,
+ Unsafety::Unsafe => chalk_ir::Safety::Unsafe,
+ },
+ variadic: self.c_variadic(),
+ }
+ }
+}
+
+// We lower into an Option here since there are some predicates which Chalk
+// doesn't have a representation for yet (as an `InlineBound`). The `Option` will
+// eventually be removed.
+impl<'tcx> LowerInto<'tcx, Option<chalk_solve::rust_ir::QuantifiedInlineBound<RustInterner<'tcx>>>>
+ for ty::Predicate<'tcx>
+{
+ fn lower_into(
+ self,
+ interner: RustInterner<'tcx>,
+ ) -> Option<chalk_solve::rust_ir::QuantifiedInlineBound<RustInterner<'tcx>>> {
+ let (predicate, binders, _named_regions) =
+ collect_bound_vars(interner, interner.tcx, self.kind());
+ match predicate {
+ ty::PredicateKind::Trait(predicate) => Some(chalk_ir::Binders::new(
+ binders,
+ chalk_solve::rust_ir::InlineBound::TraitBound(
+ predicate.trait_ref.lower_into(interner),
+ ),
+ )),
+ ty::PredicateKind::Projection(predicate) => Some(chalk_ir::Binders::new(
+ binders,
+ chalk_solve::rust_ir::InlineBound::AliasEqBound(predicate.lower_into(interner)),
+ )),
+ ty::PredicateKind::TypeOutlives(_predicate) => None,
+ ty::PredicateKind::WellFormed(_ty) => None,
+
+ ty::PredicateKind::RegionOutlives(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => {
+ bug!("unexpected predicate {}", &self)
+ }
+ }
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::TraitBound<RustInterner<'tcx>>>
+ for ty::TraitRef<'tcx>
+{
+ fn lower_into(
+ self,
+ interner: RustInterner<'tcx>,
+ ) -> chalk_solve::rust_ir::TraitBound<RustInterner<'tcx>> {
+ chalk_solve::rust_ir::TraitBound {
+ trait_id: chalk_ir::TraitId(self.def_id),
+ args_no_self: self.substs[1..].iter().map(|arg| arg.lower_into(interner)).collect(),
+ }
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_ir::Mutability> for ast::Mutability {
+ fn lower_into(self, _interner: RustInterner<'tcx>) -> chalk_ir::Mutability {
+ match self {
+ rustc_ast::Mutability::Mut => chalk_ir::Mutability::Mut,
+ rustc_ast::Mutability::Not => chalk_ir::Mutability::Not,
+ }
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, ast::Mutability> for chalk_ir::Mutability {
+ fn lower_into(self, _interner: RustInterner<'tcx>) -> ast::Mutability {
+ match self {
+ chalk_ir::Mutability::Mut => ast::Mutability::Mut,
+ chalk_ir::Mutability::Not => ast::Mutability::Not,
+ }
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::Polarity> for ty::ImplPolarity {
+ fn lower_into(self, _interner: RustInterner<'tcx>) -> chalk_solve::rust_ir::Polarity {
+ match self {
+ ty::ImplPolarity::Positive => chalk_solve::rust_ir::Polarity::Positive,
+ ty::ImplPolarity::Negative => chalk_solve::rust_ir::Polarity::Negative,
+ // FIXME(chalk) reservation impls
+ ty::ImplPolarity::Reservation => chalk_solve::rust_ir::Polarity::Negative,
+ }
+ }
+}
+impl<'tcx> LowerInto<'tcx, chalk_ir::Variance> for ty::Variance {
+ fn lower_into(self, _interner: RustInterner<'tcx>) -> chalk_ir::Variance {
+ match self {
+ ty::Variance::Covariant => chalk_ir::Variance::Covariant,
+ ty::Variance::Invariant => chalk_ir::Variance::Invariant,
+ ty::Variance::Contravariant => chalk_ir::Variance::Contravariant,
+ ty::Variance::Bivariant => unimplemented!(),
+ }
+ }
+}
+
+impl<'tcx> LowerInto<'tcx, chalk_solve::rust_ir::AliasEqBound<RustInterner<'tcx>>>
+ for ty::ProjectionPredicate<'tcx>
+{
+ fn lower_into(
+ self,
+ interner: RustInterner<'tcx>,
+ ) -> chalk_solve::rust_ir::AliasEqBound<RustInterner<'tcx>> {
+ let (trait_ref, own_substs) = self.projection_ty.trait_ref_and_own_substs(interner.tcx);
+ chalk_solve::rust_ir::AliasEqBound {
+ trait_bound: trait_ref.lower_into(interner),
+ associated_ty_id: chalk_ir::AssocTypeId(self.projection_ty.item_def_id),
+ parameters: own_substs.iter().map(|arg| arg.lower_into(interner)).collect(),
+ value: self.term.ty().unwrap().lower_into(interner),
+ }
+ }
+}
+
+/// To collect bound vars, we have to do two passes. In the first pass, we
+/// collect all `BoundRegionKind`s and `ty::Bound`s. In the second pass, we then
+/// replace `BrNamed` into `BrAnon`. The two separate passes are important,
+/// since we can only replace `BrNamed` with `BrAnon`s with indices *after* all
+/// "real" `BrAnon`s.
+///
+/// It's important to note that because of prior substitution, we may have
+/// late-bound regions, even outside of fn contexts, since this is the best way
+/// to prep types for chalk lowering.
+pub(crate) fn collect_bound_vars<'tcx, T: TypeFoldable<'tcx>>(
+ interner: RustInterner<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ ty: Binder<'tcx, T>,
+) -> (T, chalk_ir::VariableKinds<RustInterner<'tcx>>, BTreeMap<DefId, u32>) {
+ let mut bound_vars_collector = BoundVarsCollector::new();
+ ty.as_ref().skip_binder().visit_with(&mut bound_vars_collector);
+ let mut parameters = bound_vars_collector.parameters;
+ let named_parameters: BTreeMap<DefId, u32> = bound_vars_collector
+ .named_parameters
+ .into_iter()
+ .enumerate()
+ .map(|(i, def_id)| (def_id, (i + parameters.len()) as u32))
+ .collect();
+
+ let mut bound_var_substitutor = NamedBoundVarSubstitutor::new(tcx, &named_parameters);
+ let new_ty = ty.skip_binder().fold_with(&mut bound_var_substitutor);
+
+ for var in named_parameters.values() {
+ parameters.insert(*var, chalk_ir::VariableKind::Lifetime);
+ }
+
+ (0..parameters.len()).for_each(|i| {
+ parameters
+ .get(&(i as u32))
+ .or_else(|| bug!("Skipped bound var index: parameters={:?}", parameters));
+ });
+
+ let binders =
+ chalk_ir::VariableKinds::from_iter(interner, parameters.into_iter().map(|(_, v)| v));
+
+ (new_ty, binders, named_parameters)
+}
+
+pub(crate) struct BoundVarsCollector<'tcx> {
+ binder_index: ty::DebruijnIndex,
+ pub(crate) parameters: BTreeMap<u32, chalk_ir::VariableKind<RustInterner<'tcx>>>,
+ pub(crate) named_parameters: Vec<DefId>,
+}
+
+impl<'tcx> BoundVarsCollector<'tcx> {
+ pub(crate) fn new() -> Self {
+ BoundVarsCollector {
+ binder_index: ty::INNERMOST,
+ parameters: BTreeMap::new(),
+ named_parameters: vec![],
+ }
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for BoundVarsCollector<'tcx> {
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.binder_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.binder_index.shift_out(1);
+ result
+ }
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *t.kind() {
+ ty::Bound(debruijn, bound_ty) if debruijn == self.binder_index => {
+ match self.parameters.entry(bound_ty.var.as_u32()) {
+ Entry::Vacant(entry) => {
+ entry.insert(chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General));
+ }
+ Entry::Occupied(entry) => match entry.get() {
+ chalk_ir::VariableKind::Ty(_) => {}
+ _ => panic!(),
+ },
+ }
+ }
+
+ _ => (),
+ };
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ ty::ReLateBound(index, br) if index == self.binder_index => match br.kind {
+ ty::BoundRegionKind::BrNamed(def_id, _name) => {
+ if !self.named_parameters.iter().any(|d| *d == def_id) {
+ self.named_parameters.push(def_id);
+ }
+ }
+
+ ty::BoundRegionKind::BrAnon(var) => match self.parameters.entry(var) {
+ Entry::Vacant(entry) => {
+ entry.insert(chalk_ir::VariableKind::Lifetime);
+ }
+ Entry::Occupied(entry) => match entry.get() {
+ chalk_ir::VariableKind::Lifetime => {}
+ _ => panic!(),
+ },
+ },
+
+ ty::BoundRegionKind::BrEnv => unimplemented!(),
+ },
+
+ ty::ReEarlyBound(_re) => {
+ // FIXME(chalk): jackh726 - I think we should always have already
+ // substituted away `ReEarlyBound`s for `ReLateBound`s, but need to confirm.
+ unimplemented!();
+ }
+
+ _ => (),
+ };
+
+ r.super_visit_with(self)
+ }
+}
+
+/// This is used to replace `BoundRegionKind::BrNamed` with `BoundRegionKind::BrAnon`.
+/// Note: we assume that we will always have room for more bound vars. (i.e. we
+/// won't ever hit the `u32` limit in `BrAnon`s).
+struct NamedBoundVarSubstitutor<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ binder_index: ty::DebruijnIndex,
+ named_parameters: &'a BTreeMap<DefId, u32>,
+}
+
+impl<'a, 'tcx> NamedBoundVarSubstitutor<'a, 'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, named_parameters: &'a BTreeMap<DefId, u32>) -> Self {
+ NamedBoundVarSubstitutor { tcx, binder_index: ty::INNERMOST, named_parameters }
+ }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for NamedBoundVarSubstitutor<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: Binder<'tcx, T>) -> Binder<'tcx, T> {
+ self.binder_index.shift_in(1);
+ let result = t.super_fold_with(self);
+ self.binder_index.shift_out(1);
+ result
+ }
+
+ fn fold_region(&mut self, r: Region<'tcx>) -> Region<'tcx> {
+ match *r {
+ ty::ReLateBound(index, br) if index == self.binder_index => match br.kind {
+ ty::BrNamed(def_id, _name) => match self.named_parameters.get(&def_id) {
+ Some(idx) => {
+ let new_br = ty::BoundRegion { var: br.var, kind: ty::BrAnon(*idx) };
+ return self.tcx.mk_region(ty::ReLateBound(index, new_br));
+ }
+ None => panic!("Missing `BrNamed`."),
+ },
+ ty::BrEnv => unimplemented!(),
+ ty::BrAnon(_) => {}
+ },
+ _ => (),
+ };
+
+ r.super_fold_with(self)
+ }
+}
+
+/// Used to substitute `Param`s with placeholders. We do this since Chalk
+/// have a notion of `Param`s.
+pub(crate) struct ParamsSubstitutor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ binder_index: ty::DebruijnIndex,
+ list: Vec<rustc_middle::ty::ParamTy>,
+ next_ty_placeholder: usize,
+ pub(crate) params: rustc_data_structures::fx::FxHashMap<usize, rustc_middle::ty::ParamTy>,
+ pub(crate) named_regions: BTreeMap<DefId, u32>,
+}
+
+impl<'tcx> ParamsSubstitutor<'tcx> {
+ pub(crate) fn new(tcx: TyCtxt<'tcx>, next_ty_placeholder: usize) -> Self {
+ ParamsSubstitutor {
+ tcx,
+ binder_index: ty::INNERMOST,
+ list: vec![],
+ next_ty_placeholder,
+ params: rustc_data_structures::fx::FxHashMap::default(),
+ named_regions: BTreeMap::default(),
+ }
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for ParamsSubstitutor<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(&mut self, t: Binder<'tcx, T>) -> Binder<'tcx, T> {
+ self.binder_index.shift_in(1);
+ let result = t.super_fold_with(self);
+ self.binder_index.shift_out(1);
+ result
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ ty::Param(param) => match self.list.iter().position(|r| r == &param) {
+ Some(idx) => self.tcx.mk_ty(ty::Placeholder(ty::PlaceholderType {
+ universe: ty::UniverseIndex::from_usize(0),
+ name: ty::BoundVar::from_usize(idx),
+ })),
+ None => {
+ self.list.push(param);
+ let idx = self.list.len() - 1 + self.next_ty_placeholder;
+ self.params.insert(idx, param);
+ self.tcx.mk_ty(ty::Placeholder(ty::PlaceholderType {
+ universe: ty::UniverseIndex::from_usize(0),
+ name: ty::BoundVar::from_usize(idx),
+ }))
+ }
+ },
+ _ => t.super_fold_with(self),
+ }
+ }
+
+ fn fold_region(&mut self, r: Region<'tcx>) -> Region<'tcx> {
+ match *r {
+ // FIXME(chalk) - jackh726 - this currently isn't hit in any tests,
+ // since canonicalization will already change these to canonical
+ // variables (ty::ReLateBound).
+ ty::ReEarlyBound(_re) => match self.named_regions.get(&_re.def_id) {
+ Some(idx) => {
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(*idx),
+ kind: ty::BrAnon(*idx),
+ };
+ self.tcx.mk_region(ty::ReLateBound(self.binder_index, br))
+ }
+ None => {
+ let idx = self.named_regions.len() as u32;
+ let br =
+ ty::BoundRegion { var: ty::BoundVar::from_u32(idx), kind: ty::BrAnon(idx) };
+ self.named_regions.insert(_re.def_id, idx);
+ self.tcx.mk_region(ty::ReLateBound(self.binder_index, br))
+ }
+ },
+
+ _ => r.super_fold_with(self),
+ }
+ }
+}
+
+pub(crate) struct ReverseParamsSubstitutor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ params: rustc_data_structures::fx::FxHashMap<usize, rustc_middle::ty::ParamTy>,
+}
+
+impl<'tcx> ReverseParamsSubstitutor<'tcx> {
+ pub(crate) fn new(
+ tcx: TyCtxt<'tcx>,
+ params: rustc_data_structures::fx::FxHashMap<usize, rustc_middle::ty::ParamTy>,
+ ) -> Self {
+ Self { tcx, params }
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for ReverseParamsSubstitutor<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ ty::Placeholder(ty::PlaceholderType { universe: ty::UniverseIndex::ROOT, name }) => {
+ match self.params.get(&name.as_usize()) {
+ Some(param) => self.tcx.mk_ty(ty::Param(*param)),
+ None => t,
+ }
+ }
+
+ _ => t.super_fold_with(self),
+ }
+ }
+}
+
+/// Used to collect `Placeholder`s.
+pub(crate) struct PlaceholdersCollector {
+ universe_index: ty::UniverseIndex,
+ pub(crate) next_ty_placeholder: usize,
+ pub(crate) next_anon_region_placeholder: u32,
+}
+
+impl PlaceholdersCollector {
+ pub(crate) fn new() -> Self {
+ PlaceholdersCollector {
+ universe_index: ty::UniverseIndex::ROOT,
+ next_ty_placeholder: 0,
+ next_anon_region_placeholder: 0,
+ }
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for PlaceholdersCollector {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match t.kind() {
+ ty::Placeholder(p) if p.universe == self.universe_index => {
+ self.next_ty_placeholder = self.next_ty_placeholder.max(p.name.as_usize() + 1);
+ }
+
+ _ => (),
+ };
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ ty::RePlaceholder(p) if p.universe == self.universe_index => {
+ if let ty::BoundRegionKind::BrAnon(anon) = p.name {
+ self.next_anon_region_placeholder = self.next_anon_region_placeholder.max(anon);
+ }
+ }
+
+ _ => (),
+ };
+
+ r.super_visit_with(self)
+ }
+}
diff --git a/compiler/rustc_traits/src/chalk/mod.rs b/compiler/rustc_traits/src/chalk/mod.rs
new file mode 100644
index 000000000..f76386fa7
--- /dev/null
+++ b/compiler/rustc_traits/src/chalk/mod.rs
@@ -0,0 +1,176 @@
+//! Calls `chalk-solve` to solve a `ty::Predicate`
+//!
+//! In order to call `chalk-solve`, this file must convert a `CanonicalChalkEnvironmentAndGoal` into
+//! a Chalk uncanonical goal. It then calls Chalk, and converts the answer back into rustc solution.
+
+pub(crate) mod db;
+pub(crate) mod lowering;
+
+use rustc_data_structures::fx::FxHashMap;
+
+use rustc_index::vec::IndexVec;
+
+use rustc_middle::infer::canonical::{CanonicalTyVarKind, CanonicalVarKind};
+use rustc_middle::traits::ChalkRustInterner;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::GenericArg;
+use rustc_middle::ty::{self, BoundVar, ParamTy, TyCtxt, TypeFoldable, TypeVisitable};
+
+use rustc_infer::infer::canonical::{
+ Canonical, CanonicalVarValues, Certainty, QueryRegionConstraints, QueryResponse,
+};
+use rustc_infer::traits::{self, CanonicalChalkEnvironmentAndGoal};
+
+use crate::chalk::db::RustIrDatabase as ChalkRustIrDatabase;
+use crate::chalk::lowering::LowerInto;
+use crate::chalk::lowering::{ParamsSubstitutor, PlaceholdersCollector, ReverseParamsSubstitutor};
+
+use chalk_solve::Solution;
+
+pub(crate) fn provide(p: &mut Providers) {
+ *p = Providers { evaluate_goal, ..*p };
+}
+
+pub(crate) fn evaluate_goal<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ obligation: CanonicalChalkEnvironmentAndGoal<'tcx>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, ()>>, traits::query::NoSolution> {
+ let interner = ChalkRustInterner { tcx };
+
+ // Chalk doesn't have a notion of `Params`, so instead we use placeholders.
+ let mut placeholders_collector = PlaceholdersCollector::new();
+ obligation.visit_with(&mut placeholders_collector);
+
+ let mut params_substitutor =
+ ParamsSubstitutor::new(tcx, placeholders_collector.next_ty_placeholder);
+ let obligation = obligation.fold_with(&mut params_substitutor);
+ let params: FxHashMap<usize, ParamTy> = params_substitutor.params;
+
+ let max_universe = obligation.max_universe.index();
+
+ let lowered_goal: chalk_ir::UCanonical<
+ chalk_ir::InEnvironment<chalk_ir::Goal<ChalkRustInterner<'tcx>>>,
+ > = chalk_ir::UCanonical {
+ canonical: chalk_ir::Canonical {
+ binders: chalk_ir::CanonicalVarKinds::from_iter(
+ interner,
+ obligation.variables.iter().map(|v| match v.kind {
+ CanonicalVarKind::PlaceholderTy(_ty) => unimplemented!(),
+ CanonicalVarKind::PlaceholderRegion(_ui) => unimplemented!(),
+ CanonicalVarKind::Ty(ty) => match ty {
+ CanonicalTyVarKind::General(ui) => chalk_ir::WithKind::new(
+ chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::General),
+ chalk_ir::UniverseIndex { counter: ui.index() },
+ ),
+ CanonicalTyVarKind::Int => chalk_ir::WithKind::new(
+ chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::Integer),
+ chalk_ir::UniverseIndex::root(),
+ ),
+ CanonicalTyVarKind::Float => chalk_ir::WithKind::new(
+ chalk_ir::VariableKind::Ty(chalk_ir::TyVariableKind::Float),
+ chalk_ir::UniverseIndex::root(),
+ ),
+ },
+ CanonicalVarKind::Region(ui) => chalk_ir::WithKind::new(
+ chalk_ir::VariableKind::Lifetime,
+ chalk_ir::UniverseIndex { counter: ui.index() },
+ ),
+ CanonicalVarKind::Const(_ui, _ty) => unimplemented!(),
+ CanonicalVarKind::PlaceholderConst(_pc, _ty) => unimplemented!(),
+ }),
+ ),
+ value: obligation.value.lower_into(interner),
+ },
+ universes: max_universe + 1,
+ };
+
+ use chalk_solve::Solver;
+ let mut solver = chalk_engine::solve::SLGSolver::new(32, None);
+ let db = ChalkRustIrDatabase { interner };
+ debug!(?lowered_goal);
+ let solution = solver.solve(&db, &lowered_goal);
+ debug!(?obligation, ?solution, "evaluate goal");
+
+ // Ideally, the code to convert *back* to rustc types would live close to
+ // the code to convert *from* rustc types. Right now though, we don't
+ // really need this and so it's really minimal.
+ // Right now, we also treat a `Unique` solution the same as
+ // `Ambig(Definite)`. This really isn't right.
+ let make_solution = |subst: chalk_ir::Substitution<_>,
+ binders: chalk_ir::CanonicalVarKinds<_>| {
+ use rustc_middle::infer::canonical::CanonicalVarInfo;
+
+ let mut var_values: IndexVec<BoundVar, GenericArg<'tcx>> = IndexVec::new();
+ let mut reverse_param_substitutor = ReverseParamsSubstitutor::new(tcx, params);
+ subst.as_slice(interner).iter().for_each(|p| {
+ var_values.push(p.lower_into(interner).fold_with(&mut reverse_param_substitutor));
+ });
+ let variables: Vec<_> = binders
+ .iter(interner)
+ .map(|var| {
+ let kind = match var.kind {
+ chalk_ir::VariableKind::Ty(ty_kind) => CanonicalVarKind::Ty(match ty_kind {
+ chalk_ir::TyVariableKind::General => CanonicalTyVarKind::General(
+ ty::UniverseIndex::from_usize(var.skip_kind().counter),
+ ),
+ chalk_ir::TyVariableKind::Integer => CanonicalTyVarKind::Int,
+ chalk_ir::TyVariableKind::Float => CanonicalTyVarKind::Float,
+ }),
+ chalk_ir::VariableKind::Lifetime => CanonicalVarKind::Region(
+ ty::UniverseIndex::from_usize(var.skip_kind().counter),
+ ),
+ // FIXME(compiler-errors): We don't currently have a way of turning
+ // a Chalk ty back into a rustc ty, right?
+ chalk_ir::VariableKind::Const(_) => todo!(),
+ };
+ CanonicalVarInfo { kind }
+ })
+ .collect();
+ let max_universe = binders.iter(interner).map(|v| v.skip_kind().counter).max().unwrap_or(0);
+ let sol = Canonical {
+ max_universe: ty::UniverseIndex::from_usize(max_universe),
+ variables: tcx.intern_canonical_var_infos(&variables),
+ value: QueryResponse {
+ var_values: CanonicalVarValues { var_values },
+ region_constraints: QueryRegionConstraints::default(),
+ certainty: Certainty::Proven,
+ opaque_types: vec![],
+ value: (),
+ },
+ };
+ tcx.arena.alloc(sol)
+ };
+ solution
+ .map(|s| match s {
+ Solution::Unique(subst) => {
+ // FIXME(chalk): handle constraints
+ make_solution(subst.value.subst, subst.binders)
+ }
+ Solution::Ambig(guidance) => {
+ match guidance {
+ chalk_solve::Guidance::Definite(subst) => {
+ make_solution(subst.value, subst.binders)
+ }
+ chalk_solve::Guidance::Suggested(_) => unimplemented!(),
+ chalk_solve::Guidance::Unknown => {
+ // chalk_fulfill doesn't use the var_values here, so
+ // let's just ignore that
+ let sol = Canonical {
+ max_universe: ty::UniverseIndex::from_usize(0),
+ variables: obligation.variables,
+ value: QueryResponse {
+ var_values: CanonicalVarValues { var_values: IndexVec::new() }
+ .make_identity(tcx),
+ region_constraints: QueryRegionConstraints::default(),
+ certainty: Certainty::Ambiguous,
+ opaque_types: vec![],
+ value: (),
+ },
+ };
+ &*tcx.arena.alloc(sol)
+ }
+ }
+ }
+ })
+ .ok_or(traits::query::NoSolution)
+}
diff --git a/compiler/rustc_traits/src/dropck_outlives.rs b/compiler/rustc_traits/src/dropck_outlives.rs
new file mode 100644
index 000000000..a20de08b4
--- /dev/null
+++ b/compiler/rustc_traits/src/dropck_outlives.rs
@@ -0,0 +1,348 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::canonical::{Canonical, QueryResponse};
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::TraitEngineExt as _;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::{self, EarlyBinder, ParamEnvAnd, Ty, TyCtxt};
+use rustc_span::source_map::{Span, DUMMY_SP};
+use rustc_trait_selection::traits::query::dropck_outlives::trivial_dropck_outlives;
+use rustc_trait_selection::traits::query::dropck_outlives::{
+ DropckConstraint, DropckOutlivesResult,
+};
+use rustc_trait_selection::traits::query::normalize::AtExt;
+use rustc_trait_selection::traits::query::{CanonicalTyGoal, NoSolution};
+use rustc_trait_selection::traits::{
+ Normalized, ObligationCause, TraitEngine, TraitEngineExt as _,
+};
+
+pub(crate) fn provide(p: &mut Providers) {
+ *p = Providers { dropck_outlives, adt_dtorck_constraint, ..*p };
+}
+
+fn dropck_outlives<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonical_goal: CanonicalTyGoal<'tcx>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>, NoSolution> {
+ debug!("dropck_outlives(goal={:#?})", canonical_goal);
+
+ tcx.infer_ctxt().enter_with_canonical(
+ DUMMY_SP,
+ &canonical_goal,
+ |ref infcx, goal, canonical_inference_vars| {
+ let tcx = infcx.tcx;
+ let ParamEnvAnd { param_env, value: for_ty } = goal;
+
+ let mut result = DropckOutlivesResult { kinds: vec![], overflows: vec![] };
+
+ // A stack of types left to process. Each round, we pop
+ // something from the stack and invoke
+ // `dtorck_constraint_for_ty`. This may produce new types that
+ // have to be pushed on the stack. This continues until we have explored
+ // all the reachable types from the type `for_ty`.
+ //
+ // Example: Imagine that we have the following code:
+ //
+ // ```rust
+ // struct A {
+ // value: B,
+ // children: Vec<A>,
+ // }
+ //
+ // struct B {
+ // value: u32
+ // }
+ //
+ // fn f() {
+ // let a: A = ...;
+ // ..
+ // } // here, `a` is dropped
+ // ```
+ //
+ // at the point where `a` is dropped, we need to figure out
+ // which types inside of `a` contain region data that may be
+ // accessed by any destructors in `a`. We begin by pushing `A`
+ // onto the stack, as that is the type of `a`. We will then
+ // invoke `dtorck_constraint_for_ty` which will expand `A`
+ // into the types of its fields `(B, Vec<A>)`. These will get
+ // pushed onto the stack. Eventually, expanding `Vec<A>` will
+ // lead to us trying to push `A` a second time -- to prevent
+ // infinite recursion, we notice that `A` was already pushed
+ // once and stop.
+ let mut ty_stack = vec![(for_ty, 0)];
+
+ // Set used to detect infinite recursion.
+ let mut ty_set = FxHashSet::default();
+
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+
+ let cause = ObligationCause::dummy();
+ let mut constraints = DropckConstraint::empty();
+ while let Some((ty, depth)) = ty_stack.pop() {
+ debug!(
+ "{} kinds, {} overflows, {} ty_stack",
+ result.kinds.len(),
+ result.overflows.len(),
+ ty_stack.len()
+ );
+ dtorck_constraint_for_ty(tcx, DUMMY_SP, for_ty, depth, ty, &mut constraints)?;
+
+ // "outlives" represent types/regions that may be touched
+ // by a destructor.
+ result.kinds.append(&mut constraints.outlives);
+ result.overflows.append(&mut constraints.overflows);
+
+ // If we have even one overflow, we should stop trying to evaluate further --
+ // chances are, the subsequent overflows for this evaluation won't provide useful
+ // information and will just decrease the speed at which we can emit these errors
+ // (since we'll be printing for just that much longer for the often enormous types
+ // that result here).
+ if !result.overflows.is_empty() {
+ break;
+ }
+
+ // dtorck types are "types that will get dropped but which
+ // do not themselves define a destructor", more or less. We have
+ // to push them onto the stack to be expanded.
+ for ty in constraints.dtorck_types.drain(..) {
+ match infcx.at(&cause, param_env).normalize(ty) {
+ Ok(Normalized { value: ty, obligations }) => {
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+
+ debug!("dropck_outlives: ty from dtorck_types = {:?}", ty);
+
+ match ty.kind() {
+ // All parameters live for the duration of the
+ // function.
+ ty::Param(..) => {}
+
+ // A projection that we couldn't resolve - it
+ // might have a destructor.
+ ty::Projection(..) | ty::Opaque(..) => {
+ result.kinds.push(ty.into());
+ }
+
+ _ => {
+ if ty_set.insert(ty) {
+ ty_stack.push((ty, depth + 1));
+ }
+ }
+ }
+ }
+
+ // We don't actually expect to fail to normalize.
+ // That implies a WF error somewhere else.
+ Err(NoSolution) => {
+ return Err(NoSolution);
+ }
+ }
+ }
+ }
+
+ debug!("dropck_outlives: result = {:#?}", result);
+
+ infcx.make_canonicalized_query_response(
+ canonical_inference_vars,
+ result,
+ &mut *fulfill_cx,
+ )
+ },
+ )
+}
+
+/// Returns a set of constraints that needs to be satisfied in
+/// order for `ty` to be valid for destruction.
+fn dtorck_constraint_for_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ for_ty: Ty<'tcx>,
+ depth: usize,
+ ty: Ty<'tcx>,
+ constraints: &mut DropckConstraint<'tcx>,
+) -> Result<(), NoSolution> {
+ debug!("dtorck_constraint_for_ty({:?}, {:?}, {:?}, {:?})", span, for_ty, depth, ty);
+
+ if !tcx.recursion_limit().value_within_limit(depth) {
+ constraints.overflows.push(ty);
+ return Ok(());
+ }
+
+ if trivial_dropck_outlives(tcx, ty) {
+ return Ok(());
+ }
+
+ match ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Never
+ | ty::Foreign(..)
+ | ty::RawPtr(..)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::GeneratorWitness(..) => {
+ // these types never have a destructor
+ }
+
+ ty::Array(ety, _) | ty::Slice(ety) => {
+ // single-element containers, behave like their element
+ rustc_data_structures::stack::ensure_sufficient_stack(|| {
+ dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, *ety, constraints)
+ })?;
+ }
+
+ ty::Tuple(tys) => rustc_data_structures::stack::ensure_sufficient_stack(|| {
+ for ty in tys.iter() {
+ dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty, constraints)?;
+ }
+ Ok::<_, NoSolution>(())
+ })?,
+
+ ty::Closure(_, substs) => {
+ if !substs.as_closure().is_valid() {
+ // By the time this code runs, all type variables ought to
+ // be fully resolved.
+
+ tcx.sess.delay_span_bug(
+ span,
+ &format!("upvar_tys for closure not found. Expected capture information for closure {}", ty,),
+ );
+ return Err(NoSolution);
+ }
+
+ rustc_data_structures::stack::ensure_sufficient_stack(|| {
+ for ty in substs.as_closure().upvar_tys() {
+ dtorck_constraint_for_ty(tcx, span, for_ty, depth + 1, ty, constraints)?;
+ }
+ Ok::<_, NoSolution>(())
+ })?
+ }
+
+ ty::Generator(_, substs, _movability) => {
+ // rust-lang/rust#49918: types can be constructed, stored
+ // in the interior, and sit idle when generator yields
+ // (and is subsequently dropped).
+ //
+ // It would be nice to descend into interior of a
+ // generator to determine what effects dropping it might
+ // have (by looking at any drop effects associated with
+ // its interior).
+ //
+ // However, the interior's representation uses things like
+ // GeneratorWitness that explicitly assume they are not
+ // traversed in such a manner. So instead, we will
+ // simplify things for now by treating all generators as
+ // if they were like trait objects, where its upvars must
+ // all be alive for the generator's (potential)
+ // destructor.
+ //
+ // In particular, skipping over `_interior` is safe
+ // because any side-effects from dropping `_interior` can
+ // only take place through references with lifetimes
+ // derived from lifetimes attached to the upvars and resume
+ // argument, and we *do* incorporate those here.
+
+ if !substs.as_generator().is_valid() {
+ // By the time this code runs, all type variables ought to
+ // be fully resolved.
+ tcx.sess.delay_span_bug(
+ span,
+ &format!("upvar_tys for generator not found. Expected capture information for generator {}", ty,),
+ );
+ return Err(NoSolution);
+ }
+
+ constraints.outlives.extend(
+ substs
+ .as_generator()
+ .upvar_tys()
+ .map(|t| -> ty::subst::GenericArg<'tcx> { t.into() }),
+ );
+ constraints.outlives.push(substs.as_generator().resume_ty().into());
+ }
+
+ ty::Adt(def, substs) => {
+ let DropckConstraint { dtorck_types, outlives, overflows } =
+ tcx.at(span).adt_dtorck_constraint(def.did())?;
+ // FIXME: we can try to recursively `dtorck_constraint_on_ty`
+ // there, but that needs some way to handle cycles.
+ constraints
+ .dtorck_types
+ .extend(dtorck_types.iter().map(|t| EarlyBinder(*t).subst(tcx, substs)));
+ constraints
+ .outlives
+ .extend(outlives.iter().map(|t| EarlyBinder(*t).subst(tcx, substs)));
+ constraints
+ .overflows
+ .extend(overflows.iter().map(|t| EarlyBinder(*t).subst(tcx, substs)));
+ }
+
+ // Objects must be alive in order for their destructor
+ // to be called.
+ ty::Dynamic(..) => {
+ constraints.outlives.push(ty.into());
+ }
+
+ // Types that can't be resolved. Pass them forward.
+ ty::Projection(..) | ty::Opaque(..) | ty::Param(..) => {
+ constraints.dtorck_types.push(ty);
+ }
+
+ ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(_) => {
+ // By the time this code runs, all type variables ought to
+ // be fully resolved.
+ return Err(NoSolution);
+ }
+ }
+
+ Ok(())
+}
+
+/// Calculates the dtorck constraint for a type.
+pub(crate) fn adt_dtorck_constraint(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+) -> Result<&DropckConstraint<'_>, NoSolution> {
+ let def = tcx.adt_def(def_id);
+ let span = tcx.def_span(def_id);
+ debug!("dtorck_constraint: {:?}", def);
+
+ if def.is_phantom_data() {
+ // The first generic parameter here is guaranteed to be a type because it's
+ // `PhantomData`.
+ let substs = InternalSubsts::identity_for_item(tcx, def_id);
+ assert_eq!(substs.len(), 1);
+ let result = DropckConstraint {
+ outlives: vec![],
+ dtorck_types: vec![substs.type_at(0)],
+ overflows: vec![],
+ };
+ debug!("dtorck_constraint: {:?} => {:?}", def, result);
+ return Ok(tcx.arena.alloc(result));
+ }
+
+ let mut result = DropckConstraint::empty();
+ for field in def.all_fields() {
+ let fty = tcx.type_of(field.did);
+ dtorck_constraint_for_ty(tcx, span, fty, 0, fty, &mut result)?;
+ }
+ result.outlives.extend(tcx.destructor_constraints(def));
+ dedup_dtorck_constraint(&mut result);
+
+ debug!("dtorck_constraint: {:?} => {:?}", def, result);
+
+ Ok(tcx.arena.alloc(result))
+}
+
+fn dedup_dtorck_constraint(c: &mut DropckConstraint<'_>) {
+ let mut outlives = FxHashSet::default();
+ let mut dtorck_types = FxHashSet::default();
+
+ c.outlives.retain(|&val| outlives.replace(val).is_none());
+ c.dtorck_types.retain(|&val| dtorck_types.replace(val).is_none());
+}
diff --git a/compiler/rustc_traits/src/evaluate_obligation.rs b/compiler/rustc_traits/src/evaluate_obligation.rs
new file mode 100644
index 000000000..49c9ba459
--- /dev/null
+++ b/compiler/rustc_traits/src/evaluate_obligation.rs
@@ -0,0 +1,34 @@
+use rustc_infer::infer::{DefiningAnchor, TyCtxtInferExt};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
+use rustc_span::source_map::DUMMY_SP;
+use rustc_trait_selection::traits::query::CanonicalPredicateGoal;
+use rustc_trait_selection::traits::{
+ EvaluationResult, Obligation, ObligationCause, OverflowError, SelectionContext, TraitQueryMode,
+};
+
+pub(crate) fn provide(p: &mut Providers) {
+ *p = Providers { evaluate_obligation, ..*p };
+}
+
+fn evaluate_obligation<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonical_goal: CanonicalPredicateGoal<'tcx>,
+) -> Result<EvaluationResult, OverflowError> {
+ debug!("evaluate_obligation(canonical_goal={:#?})", canonical_goal);
+ // HACK This bubble is required for this tests to pass:
+ // impl-trait/issue99642.rs
+ tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bubble).enter_with_canonical(
+ DUMMY_SP,
+ &canonical_goal,
+ |ref infcx, goal, _canonical_inference_vars| {
+ debug!("evaluate_obligation: goal={:#?}", goal);
+ let ParamEnvAnd { param_env, value: predicate } = goal;
+
+ let mut selcx = SelectionContext::with_query_mode(&infcx, TraitQueryMode::Canonical);
+ let obligation = Obligation::new(ObligationCause::dummy(), param_env, predicate);
+
+ selcx.evaluate_root_obligation(&obligation)
+ },
+ )
+}
diff --git a/compiler/rustc_traits/src/implied_outlives_bounds.rs b/compiler/rustc_traits/src/implied_outlives_bounds.rs
new file mode 100644
index 000000000..e3e78f70b
--- /dev/null
+++ b/compiler/rustc_traits/src/implied_outlives_bounds.rs
@@ -0,0 +1,172 @@
+//! Provider for the `implied_outlives_bounds` query.
+//! Do not call this query directory. See
+//! [`rustc_trait_selection::traits::query::type_op::implied_outlives_bounds`].
+
+use rustc_hir as hir;
+use rustc_infer::infer::canonical::{self, Canonical};
+use rustc_infer::infer::outlives::components::{push_outlives_components, Component};
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_infer::traits::query::OutlivesBound;
+use rustc_infer::traits::TraitEngineExt as _;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
+use rustc_span::source_map::DUMMY_SP;
+use rustc_trait_selection::infer::InferCtxtBuilderExt;
+use rustc_trait_selection::traits::query::{CanonicalTyGoal, Fallible, NoSolution};
+use rustc_trait_selection::traits::wf;
+use rustc_trait_selection::traits::{TraitEngine, TraitEngineExt};
+use smallvec::{smallvec, SmallVec};
+
+pub(crate) fn provide(p: &mut Providers) {
+ *p = Providers { implied_outlives_bounds, ..*p };
+}
+
+fn implied_outlives_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ goal: CanonicalTyGoal<'tcx>,
+) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>,
+ NoSolution,
+> {
+ tcx.infer_ctxt().enter_canonical_trait_query(&goal, |infcx, _fulfill_cx, key| {
+ let (param_env, ty) = key.into_parts();
+ compute_implied_outlives_bounds(&infcx, param_env, ty)
+ })
+}
+
+fn compute_implied_outlives_bounds<'tcx>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+) -> Fallible<Vec<OutlivesBound<'tcx>>> {
+ let tcx = infcx.tcx;
+
+ // Sometimes when we ask what it takes for T: WF, we get back that
+ // U: WF is required; in that case, we push U onto this stack and
+ // process it next. Because the resulting predicates aren't always
+ // guaranteed to be a subset of the original type, so we need to store the
+ // WF args we've computed in a set.
+ let mut checked_wf_args = rustc_data_structures::fx::FxHashSet::default();
+ let mut wf_args = vec![ty.into()];
+
+ let mut implied_bounds = vec![];
+
+ let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(tcx);
+
+ while let Some(arg) = wf_args.pop() {
+ if !checked_wf_args.insert(arg) {
+ continue;
+ }
+
+ // Compute the obligations for `arg` to be well-formed. If `arg` is
+ // an unresolved inference variable, just substituted an empty set
+ // -- because the return type here is going to be things we *add*
+ // to the environment, it's always ok for this set to be smaller
+ // than the ultimate set. (Note: normally there won't be
+ // unresolved inference variables here anyway, but there might be
+ // during typeck under some circumstances.)
+ let obligations = wf::obligations(infcx, param_env, hir::CRATE_HIR_ID, 0, arg, DUMMY_SP)
+ .unwrap_or_default();
+
+ // N.B., all of these predicates *ought* to be easily proven
+ // true. In fact, their correctness is (mostly) implied by
+ // other parts of the program. However, in #42552, we had
+ // an annoying scenario where:
+ //
+ // - Some `T::Foo` gets normalized, resulting in a
+ // variable `_1` and a `T: Trait<Foo=_1>` constraint
+ // (not sure why it couldn't immediately get
+ // solved). This result of `_1` got cached.
+ // - These obligations were dropped on the floor here,
+ // rather than being registered.
+ // - Then later we would get a request to normalize
+ // `T::Foo` which would result in `_1` being used from
+ // the cache, but hence without the `T: Trait<Foo=_1>`
+ // constraint. As a result, `_1` never gets resolved,
+ // and we get an ICE (in dropck).
+ //
+ // Therefore, we register any predicates involving
+ // inference variables. We restrict ourselves to those
+ // involving inference variables both for efficiency and
+ // to avoids duplicate errors that otherwise show up.
+ fulfill_cx.register_predicate_obligations(
+ infcx,
+ obligations.iter().filter(|o| o.predicate.has_infer_types_or_consts()).cloned(),
+ );
+
+ // From the full set of obligations, just filter down to the
+ // region relationships.
+ implied_bounds.extend(obligations.into_iter().flat_map(|obligation| {
+ assert!(!obligation.has_escaping_bound_vars());
+ match obligation.predicate.kind().no_bound_vars() {
+ None => vec![],
+ Some(pred) => match pred {
+ ty::PredicateKind::Trait(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => vec![],
+ ty::PredicateKind::WellFormed(arg) => {
+ wf_args.push(arg);
+ vec![]
+ }
+
+ ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(r_a, r_b)) => {
+ vec![OutlivesBound::RegionSubRegion(r_b, r_a)]
+ }
+
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, r_b)) => {
+ let ty_a = infcx.resolve_vars_if_possible(ty_a);
+ let mut components = smallvec![];
+ push_outlives_components(tcx, ty_a, &mut components);
+ implied_bounds_from_components(r_b, components)
+ }
+ },
+ }
+ }));
+ }
+
+ // Ensure that those obligations that we had to solve
+ // get solved *here*.
+ match fulfill_cx.select_all_or_error(infcx).as_slice() {
+ [] => Ok(implied_bounds),
+ _ => Err(NoSolution),
+ }
+}
+
+/// When we have an implied bound that `T: 'a`, we can further break
+/// this down to determine what relationships would have to hold for
+/// `T: 'a` to hold. We get to assume that the caller has validated
+/// those relationships.
+fn implied_bounds_from_components<'tcx>(
+ sub_region: ty::Region<'tcx>,
+ sup_components: SmallVec<[Component<'tcx>; 4]>,
+) -> Vec<OutlivesBound<'tcx>> {
+ sup_components
+ .into_iter()
+ .filter_map(|component| {
+ match component {
+ Component::Region(r) => Some(OutlivesBound::RegionSubRegion(sub_region, r)),
+ Component::Param(p) => Some(OutlivesBound::RegionSubParam(sub_region, p)),
+ Component::Projection(p) => Some(OutlivesBound::RegionSubProjection(sub_region, p)),
+ Component::EscapingProjection(_) =>
+ // If the projection has escaping regions, don't
+ // try to infer any implied bounds even for its
+ // free components. This is conservative, because
+ // the caller will still have to prove that those
+ // free components outlive `sub_region`. But the
+ // idea is that the WAY that the caller proves
+ // that may change in the future and we want to
+ // give ourselves room to get smarter here.
+ {
+ None
+ }
+ Component::UnresolvedInferenceVariable(..) => None,
+ }
+ })
+ .collect()
+}
diff --git a/compiler/rustc_traits/src/lib.rs b/compiler/rustc_traits/src/lib.rs
new file mode 100644
index 000000000..2bea164c0
--- /dev/null
+++ b/compiler/rustc_traits/src/lib.rs
@@ -0,0 +1,32 @@
+//! New recursive solver modeled on Chalk's recursive solver. Most of
+//! the guts are broken up into modules; see the comments in those modules.
+
+#![feature(let_else)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate rustc_middle;
+
+mod chalk;
+mod dropck_outlives;
+mod evaluate_obligation;
+mod implied_outlives_bounds;
+mod normalize_erasing_regions;
+mod normalize_projection_ty;
+mod type_op;
+
+pub use type_op::{type_op_ascribe_user_type_with_span, type_op_prove_predicate_with_cause};
+
+use rustc_middle::ty::query::Providers;
+
+pub fn provide(p: &mut Providers) {
+ dropck_outlives::provide(p);
+ evaluate_obligation::provide(p);
+ implied_outlives_bounds::provide(p);
+ chalk::provide(p);
+ normalize_projection_ty::provide(p);
+ normalize_erasing_regions::provide(p);
+ type_op::provide(p);
+}
diff --git a/compiler/rustc_traits/src/normalize_erasing_regions.rs b/compiler/rustc_traits/src/normalize_erasing_regions.rs
new file mode 100644
index 000000000..5d394ed22
--- /dev/null
+++ b/compiler/rustc_traits/src/normalize_erasing_regions.rs
@@ -0,0 +1,73 @@
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::traits::query::NoSolution;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, ParamEnvAnd, TyCtxt, TypeFoldable};
+use rustc_trait_selection::traits::query::normalize::AtExt;
+use rustc_trait_selection::traits::{Normalized, ObligationCause};
+use std::sync::atomic::Ordering;
+
+pub(crate) fn provide(p: &mut Providers) {
+ *p = Providers {
+ try_normalize_generic_arg_after_erasing_regions: |tcx, goal| {
+ debug!("try_normalize_generic_arg_after_erasing_regions(goal={:#?}", goal);
+
+ tcx.sess
+ .perf_stats
+ .normalize_generic_arg_after_erasing_regions
+ .fetch_add(1, Ordering::Relaxed);
+
+ try_normalize_after_erasing_regions(tcx, goal)
+ },
+ try_normalize_mir_const_after_erasing_regions: |tcx, goal| {
+ try_normalize_after_erasing_regions(tcx, goal)
+ },
+ ..*p
+ };
+}
+
+fn try_normalize_after_erasing_regions<'tcx, T: TypeFoldable<'tcx> + PartialEq + Copy>(
+ tcx: TyCtxt<'tcx>,
+ goal: ParamEnvAnd<'tcx, T>,
+) -> Result<T, NoSolution> {
+ let ParamEnvAnd { param_env, value } = goal;
+ tcx.infer_ctxt().enter(|infcx| {
+ let cause = ObligationCause::dummy();
+ match infcx.at(&cause, param_env).normalize(value) {
+ Ok(Normalized { value: normalized_value, obligations: normalized_obligations }) => {
+ // We don't care about the `obligations`; they are
+ // always only region relations, and we are about to
+ // erase those anyway:
+ debug_assert_eq!(
+ normalized_obligations.iter().find(|p| not_outlives_predicate(p.predicate)),
+ None,
+ );
+
+ let resolved_value = infcx.resolve_vars_if_possible(normalized_value);
+ // It's unclear when `resolve_vars` would have an effect in a
+ // fresh `InferCtxt`. If this assert does trigger, it will give
+ // us a test case.
+ debug_assert_eq!(normalized_value, resolved_value);
+ let erased = infcx.tcx.erase_regions(resolved_value);
+ debug_assert!(!erased.needs_infer(), "{:?}", erased);
+ Ok(erased)
+ }
+ Err(NoSolution) => Err(NoSolution),
+ }
+ })
+}
+
+fn not_outlives_predicate<'tcx>(p: ty::Predicate<'tcx>) -> bool {
+ match p.kind().skip_binder() {
+ ty::PredicateKind::RegionOutlives(..) | ty::PredicateKind::TypeOutlives(..) => false,
+ ty::PredicateKind::Trait(..)
+ | ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => true,
+ }
+}
diff --git a/compiler/rustc_traits/src/normalize_projection_ty.rs b/compiler/rustc_traits/src/normalize_projection_ty.rs
new file mode 100644
index 000000000..98bb42c9a
--- /dev/null
+++ b/compiler/rustc_traits/src/normalize_projection_ty.rs
@@ -0,0 +1,45 @@
+use rustc_infer::infer::canonical::{Canonical, QueryResponse};
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::TraitEngineExt as _;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{ParamEnvAnd, TyCtxt};
+use rustc_trait_selection::infer::InferCtxtBuilderExt;
+use rustc_trait_selection::traits::query::{
+ normalize::NormalizationResult, CanonicalProjectionGoal, NoSolution,
+};
+use rustc_trait_selection::traits::{self, ObligationCause, SelectionContext};
+use std::sync::atomic::Ordering;
+
+pub(crate) fn provide(p: &mut Providers) {
+ *p = Providers { normalize_projection_ty, ..*p };
+}
+
+fn normalize_projection_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ goal: CanonicalProjectionGoal<'tcx>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, NormalizationResult<'tcx>>>, NoSolution> {
+ debug!("normalize_provider(goal={:#?})", goal);
+
+ tcx.sess.perf_stats.normalize_projection_ty.fetch_add(1, Ordering::Relaxed);
+ tcx.infer_ctxt().enter_canonical_trait_query(
+ &goal,
+ |infcx, fulfill_cx, ParamEnvAnd { param_env, value: goal }| {
+ let selcx = &mut SelectionContext::new(infcx);
+ let cause = ObligationCause::dummy();
+ let mut obligations = vec![];
+ let answer = traits::normalize_projection_type(
+ selcx,
+ param_env,
+ goal,
+ cause,
+ 0,
+ &mut obligations,
+ );
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+ // FIXME(associated_const_equality): All users of normalize_projection_ty expected
+ // a type, but there is the possibility it could've been a const now. Maybe change
+ // it to a Term later?
+ Ok(NormalizationResult { normalized_ty: answer.ty().unwrap() })
+ },
+ )
+}
diff --git a/compiler/rustc_traits/src/type_op.rs b/compiler/rustc_traits/src/type_op.rs
new file mode 100644
index 000000000..d895b647d
--- /dev/null
+++ b/compiler/rustc_traits/src/type_op.rs
@@ -0,0 +1,283 @@
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::at::ToTrace;
+use rustc_infer::infer::canonical::{Canonical, QueryResponse};
+use rustc_infer::infer::{DefiningAnchor, InferCtxt, TyCtxtInferExt};
+use rustc_infer::traits::TraitEngineExt as _;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::{GenericArg, Subst, UserSelfTy, UserSubsts};
+use rustc_middle::ty::{
+ self, EarlyBinder, FnSig, Lift, PolyFnSig, Ty, TyCtxt, TypeFoldable, Variance,
+};
+use rustc_middle::ty::{ParamEnv, ParamEnvAnd, Predicate, ToPredicate};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::infer::InferCtxtBuilderExt;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::query::normalize::AtExt;
+use rustc_trait_selection::traits::query::type_op::ascribe_user_type::AscribeUserType;
+use rustc_trait_selection::traits::query::type_op::eq::Eq;
+use rustc_trait_selection::traits::query::type_op::normalize::Normalize;
+use rustc_trait_selection::traits::query::type_op::prove_predicate::ProvePredicate;
+use rustc_trait_selection::traits::query::type_op::subtype::Subtype;
+use rustc_trait_selection::traits::query::{Fallible, NoSolution};
+use rustc_trait_selection::traits::{Normalized, Obligation, ObligationCause, TraitEngine};
+use std::fmt;
+
+pub(crate) fn provide(p: &mut Providers) {
+ *p = Providers {
+ type_op_ascribe_user_type,
+ type_op_eq,
+ type_op_prove_predicate,
+ type_op_subtype,
+ type_op_normalize_ty,
+ type_op_normalize_predicate,
+ type_op_normalize_fn_sig,
+ type_op_normalize_poly_fn_sig,
+ ..*p
+ };
+}
+
+fn type_op_ascribe_user_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, AscribeUserType<'tcx>>>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, ()>>, NoSolution> {
+ tcx.infer_ctxt().enter_canonical_trait_query(&canonicalized, |infcx, fulfill_cx, key| {
+ type_op_ascribe_user_type_with_span(infcx, fulfill_cx, key, None)
+ })
+}
+
+/// The core of the `type_op_ascribe_user_type` query: for diagnostics purposes in NLL HRTB errors,
+/// this query can be re-run to better track the span of the obligation cause, and improve the error
+/// message. Do not call directly unless you're in that very specific context.
+pub fn type_op_ascribe_user_type_with_span<'a, 'tcx: 'a>(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ fulfill_cx: &'a mut dyn TraitEngine<'tcx>,
+ key: ParamEnvAnd<'tcx, AscribeUserType<'tcx>>,
+ span: Option<Span>,
+) -> Result<(), NoSolution> {
+ let (param_env, AscribeUserType { mir_ty, def_id, user_substs }) = key.into_parts();
+ debug!(
+ "type_op_ascribe_user_type: mir_ty={:?} def_id={:?} user_substs={:?}",
+ mir_ty, def_id, user_substs
+ );
+
+ let mut cx = AscribeUserTypeCx { infcx, param_env, fulfill_cx };
+ cx.relate_mir_and_user_ty(mir_ty, def_id, user_substs, span)?;
+ Ok(())
+}
+
+struct AscribeUserTypeCx<'me, 'tcx> {
+ infcx: &'me InferCtxt<'me, 'tcx>,
+ param_env: ParamEnv<'tcx>,
+ fulfill_cx: &'me mut dyn TraitEngine<'tcx>,
+}
+
+impl<'me, 'tcx> AscribeUserTypeCx<'me, 'tcx> {
+ fn normalize<T>(&mut self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.infcx
+ .partially_normalize_associated_types_in(
+ ObligationCause::misc(DUMMY_SP, hir::CRATE_HIR_ID),
+ self.param_env,
+ value,
+ )
+ .into_value_registering_obligations(self.infcx, self.fulfill_cx)
+ }
+
+ fn relate<T>(&mut self, a: T, variance: Variance, b: T) -> Result<(), NoSolution>
+ where
+ T: ToTrace<'tcx>,
+ {
+ self.infcx
+ .at(&ObligationCause::dummy(), self.param_env)
+ .relate(a, variance, b)?
+ .into_value_registering_obligations(self.infcx, self.fulfill_cx);
+ Ok(())
+ }
+
+ fn prove_predicate(&mut self, predicate: Predicate<'tcx>, span: Option<Span>) {
+ let cause = if let Some(span) = span {
+ ObligationCause::dummy_with_span(span)
+ } else {
+ ObligationCause::dummy()
+ };
+ self.fulfill_cx.register_predicate_obligation(
+ self.infcx,
+ Obligation::new(cause, self.param_env, predicate),
+ );
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ fn subst<T>(&self, value: T, substs: &[GenericArg<'tcx>]) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ EarlyBinder(value).subst(self.tcx(), substs)
+ }
+
+ fn relate_mir_and_user_ty(
+ &mut self,
+ mir_ty: Ty<'tcx>,
+ def_id: DefId,
+ user_substs: UserSubsts<'tcx>,
+ span: Option<Span>,
+ ) -> Result<(), NoSolution> {
+ let UserSubsts { user_self_ty, substs } = user_substs;
+ let tcx = self.tcx();
+
+ let ty = tcx.type_of(def_id);
+ let ty = self.subst(ty, substs);
+ debug!("relate_type_and_user_type: ty of def-id is {:?}", ty);
+ let ty = self.normalize(ty);
+
+ self.relate(mir_ty, Variance::Invariant, ty)?;
+
+ // Prove the predicates coming along with `def_id`.
+ //
+ // Also, normalize the `instantiated_predicates`
+ // because otherwise we wind up with duplicate "type
+ // outlives" error messages.
+ let instantiated_predicates =
+ self.tcx().predicates_of(def_id).instantiate(self.tcx(), substs);
+ debug!(?instantiated_predicates.predicates);
+ for instantiated_predicate in instantiated_predicates.predicates {
+ let instantiated_predicate = self.normalize(instantiated_predicate);
+ self.prove_predicate(instantiated_predicate, span);
+ }
+
+ if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
+ let impl_self_ty = self.tcx().type_of(impl_def_id);
+ let impl_self_ty = self.subst(impl_self_ty, &substs);
+ let impl_self_ty = self.normalize(impl_self_ty);
+
+ self.relate(self_ty, Variance::Invariant, impl_self_ty)?;
+
+ self.prove_predicate(
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(impl_self_ty.into()))
+ .to_predicate(self.tcx()),
+ span,
+ );
+ }
+
+ // In addition to proving the predicates, we have to
+ // prove that `ty` is well-formed -- this is because
+ // the WF of `ty` is predicated on the substs being
+ // well-formed, and we haven't proven *that*. We don't
+ // want to prove the WF of types from `substs` directly because they
+ // haven't been normalized.
+ //
+ // FIXME(nmatsakis): Well, perhaps we should normalize
+ // them? This would only be relevant if some input
+ // type were ill-formed but did not appear in `ty`,
+ // which...could happen with normalization...
+ self.prove_predicate(
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(ty.into())).to_predicate(self.tcx()),
+ span,
+ );
+ Ok(())
+ }
+}
+
+fn type_op_eq<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Eq<'tcx>>>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, ()>>, NoSolution> {
+ tcx.infer_ctxt().enter_canonical_trait_query(&canonicalized, |infcx, fulfill_cx, key| {
+ let (param_env, Eq { a, b }) = key.into_parts();
+ infcx
+ .at(&ObligationCause::dummy(), param_env)
+ .eq(a, b)?
+ .into_value_registering_obligations(infcx, fulfill_cx);
+ Ok(())
+ })
+}
+
+fn type_op_normalize<'tcx, T>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ fulfill_cx: &mut dyn TraitEngine<'tcx>,
+ key: ParamEnvAnd<'tcx, Normalize<T>>,
+) -> Fallible<T>
+where
+ T: fmt::Debug + TypeFoldable<'tcx> + Lift<'tcx>,
+{
+ let (param_env, Normalize { value }) = key.into_parts();
+ let Normalized { value, obligations } =
+ infcx.at(&ObligationCause::dummy(), param_env).normalize(value)?;
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+ Ok(value)
+}
+
+fn type_op_normalize_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Normalize<Ty<'tcx>>>>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>, NoSolution> {
+ tcx.infer_ctxt().enter_canonical_trait_query(&canonicalized, type_op_normalize)
+}
+
+fn type_op_normalize_predicate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Normalize<Predicate<'tcx>>>>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, Predicate<'tcx>>>, NoSolution> {
+ tcx.infer_ctxt().enter_canonical_trait_query(&canonicalized, type_op_normalize)
+}
+
+fn type_op_normalize_fn_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Normalize<FnSig<'tcx>>>>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, FnSig<'tcx>>>, NoSolution> {
+ tcx.infer_ctxt().enter_canonical_trait_query(&canonicalized, type_op_normalize)
+}
+
+fn type_op_normalize_poly_fn_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Normalize<PolyFnSig<'tcx>>>>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, PolyFnSig<'tcx>>>, NoSolution> {
+ tcx.infer_ctxt().enter_canonical_trait_query(&canonicalized, type_op_normalize)
+}
+
+fn type_op_subtype<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, Subtype<'tcx>>>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, ()>>, NoSolution> {
+ tcx.infer_ctxt().enter_canonical_trait_query(&canonicalized, |infcx, fulfill_cx, key| {
+ let (param_env, Subtype { sub, sup }) = key.into_parts();
+ infcx
+ .at(&ObligationCause::dummy(), param_env)
+ .sup(sup, sub)?
+ .into_value_registering_obligations(infcx, fulfill_cx);
+ Ok(())
+ })
+}
+
+fn type_op_prove_predicate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ canonicalized: Canonical<'tcx, ParamEnvAnd<'tcx, ProvePredicate<'tcx>>>,
+) -> Result<&'tcx Canonical<'tcx, QueryResponse<'tcx, ()>>, NoSolution> {
+ // HACK This bubble is required for this test to pass:
+ // impl-trait/issue-99642.rs
+ tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bubble).enter_canonical_trait_query(
+ &canonicalized,
+ |infcx, fulfill_cx, key| {
+ type_op_prove_predicate_with_cause(infcx, fulfill_cx, key, ObligationCause::dummy());
+ Ok(())
+ },
+ )
+}
+
+/// The core of the `type_op_prove_predicate` query: for diagnostics purposes in NLL HRTB errors,
+/// this query can be re-run to better track the span of the obligation cause, and improve the error
+/// message. Do not call directly unless you're in that very specific context.
+pub fn type_op_prove_predicate_with_cause<'a, 'tcx: 'a>(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ fulfill_cx: &'a mut dyn TraitEngine<'tcx>,
+ key: ParamEnvAnd<'tcx, ProvePredicate<'tcx>>,
+ cause: ObligationCause<'tcx>,
+) {
+ let (param_env, ProvePredicate { predicate }) = key.into_parts();
+ fulfill_cx.register_predicate_obligation(infcx, Obligation::new(cause, param_env, predicate));
+}
diff --git a/compiler/rustc_transmute/Cargo.toml b/compiler/rustc_transmute/Cargo.toml
new file mode 100644
index 000000000..9dc96e08a
--- /dev/null
+++ b/compiler/rustc_transmute/Cargo.toml
@@ -0,0 +1,28 @@
+[package]
+name = "rustc_transmute"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+tracing = "0.1"
+rustc_data_structures = { path = "../rustc_data_structures", optional = true}
+rustc_infer = { path = "../rustc_infer", optional = true}
+rustc_macros = { path = "../rustc_macros", optional = true}
+rustc_middle = { path = "../rustc_middle", optional = true}
+rustc_span = { path = "../rustc_span", optional = true}
+rustc_target = { path = "../rustc_target", optional = true}
+
+[features]
+rustc = [
+ "rustc_middle",
+ "rustc_data_structures",
+ "rustc_infer",
+ "rustc_macros",
+ "rustc_span",
+ "rustc_target",
+]
+
+[dev-dependencies]
+itertools = "0.10.1" \ No newline at end of file
diff --git a/compiler/rustc_transmute/src/layout/dfa.rs b/compiler/rustc_transmute/src/layout/dfa.rs
new file mode 100644
index 000000000..b60ea6e7a
--- /dev/null
+++ b/compiler/rustc_transmute/src/layout/dfa.rs
@@ -0,0 +1,184 @@
+use super::{nfa, Byte, Nfa, Ref};
+use crate::Map;
+use std::fmt;
+use std::sync::atomic::{AtomicU32, Ordering};
+
+#[derive(PartialEq, Clone, Debug)]
+pub(crate) struct Dfa<R>
+where
+ R: Ref,
+{
+ pub(crate) transitions: Map<State, Transitions<R>>,
+ pub(crate) start: State,
+ pub(crate) accepting: State,
+}
+
+#[derive(PartialEq, Clone, Debug)]
+pub(crate) struct Transitions<R>
+where
+ R: Ref,
+{
+ byte_transitions: Map<Byte, State>,
+ ref_transitions: Map<R, State>,
+}
+
+impl<R> Default for Transitions<R>
+where
+ R: Ref,
+{
+ fn default() -> Self {
+ Self { byte_transitions: Map::default(), ref_transitions: Map::default() }
+ }
+}
+
+impl<R> Transitions<R>
+where
+ R: Ref,
+{
+ fn insert(&mut self, transition: Transition<R>, state: State) {
+ match transition {
+ Transition::Byte(b) => {
+ self.byte_transitions.insert(b, state);
+ }
+ Transition::Ref(r) => {
+ self.ref_transitions.insert(r, state);
+ }
+ }
+ }
+}
+
+/// The states in a `Nfa` represent byte offsets.
+#[derive(Hash, Eq, PartialEq, PartialOrd, Ord, Copy, Clone)]
+pub(crate) struct State(u32);
+
+#[derive(Hash, Eq, PartialEq, Clone, Copy)]
+pub(crate) enum Transition<R>
+where
+ R: Ref,
+{
+ Byte(Byte),
+ Ref(R),
+}
+
+impl fmt::Debug for State {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "S_{}", self.0)
+ }
+}
+
+impl<R> fmt::Debug for Transition<R>
+where
+ R: Ref,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self {
+ Self::Byte(b) => b.fmt(f),
+ Self::Ref(r) => r.fmt(f),
+ }
+ }
+}
+
+impl<R> Dfa<R>
+where
+ R: Ref,
+{
+ pub(crate) fn unit() -> Self {
+ let transitions: Map<State, Transitions<R>> = Map::default();
+ let start = State::new();
+ let accepting = start;
+
+ Self { transitions, start, accepting }
+ }
+
+ #[cfg(test)]
+ pub(crate) fn bool() -> Self {
+ let mut transitions: Map<State, Transitions<R>> = Map::default();
+ let start = State::new();
+ let accepting = State::new();
+
+ transitions.entry(start).or_default().insert(Transition::Byte(Byte::Init(0x00)), accepting);
+
+ transitions.entry(start).or_default().insert(Transition::Byte(Byte::Init(0x01)), accepting);
+
+ Self { transitions, start, accepting }
+ }
+
+ #[instrument(level = "debug")]
+ #[cfg_attr(feature = "rustc", allow(rustc::potential_query_instability))]
+ pub(crate) fn from_nfa(nfa: Nfa<R>) -> Self {
+ let Nfa { transitions: nfa_transitions, start: nfa_start, accepting: nfa_accepting } = nfa;
+
+ let mut dfa_transitions: Map<State, Transitions<R>> = Map::default();
+ let mut nfa_to_dfa: Map<nfa::State, State> = Map::default();
+ let dfa_start = State::new();
+ nfa_to_dfa.insert(nfa_start, dfa_start);
+
+ let mut queue = vec![(nfa_start, dfa_start)];
+
+ while let Some((nfa_state, dfa_state)) = queue.pop() {
+ if nfa_state == nfa_accepting {
+ continue;
+ }
+
+ for (nfa_transition, next_nfa_states) in nfa_transitions[&nfa_state].iter() {
+ let dfa_transitions =
+ dfa_transitions.entry(dfa_state).or_insert_with(Default::default);
+
+ let mapped_state = next_nfa_states.iter().find_map(|x| nfa_to_dfa.get(x).copied());
+
+ let next_dfa_state = match nfa_transition {
+ &nfa::Transition::Byte(b) => *dfa_transitions
+ .byte_transitions
+ .entry(b)
+ .or_insert_with(|| mapped_state.unwrap_or_else(State::new)),
+ &nfa::Transition::Ref(r) => *dfa_transitions
+ .ref_transitions
+ .entry(r)
+ .or_insert_with(|| mapped_state.unwrap_or_else(State::new)),
+ };
+
+ for &next_nfa_state in next_nfa_states {
+ nfa_to_dfa.entry(next_nfa_state).or_insert_with(|| {
+ queue.push((next_nfa_state, next_dfa_state));
+ next_dfa_state
+ });
+ }
+ }
+ }
+
+ let dfa_accepting = nfa_to_dfa[&nfa_accepting];
+
+ Self { transitions: dfa_transitions, start: dfa_start, accepting: dfa_accepting }
+ }
+
+ pub(crate) fn bytes_from(&self, start: State) -> Option<&Map<Byte, State>> {
+ Some(&self.transitions.get(&start)?.byte_transitions)
+ }
+
+ pub(crate) fn byte_from(&self, start: State, byte: Byte) -> Option<State> {
+ self.transitions.get(&start)?.byte_transitions.get(&byte).copied()
+ }
+
+ pub(crate) fn refs_from(&self, start: State) -> Option<&Map<R, State>> {
+ Some(&self.transitions.get(&start)?.ref_transitions)
+ }
+}
+
+impl State {
+ pub(crate) fn new() -> Self {
+ static COUNTER: AtomicU32 = AtomicU32::new(0);
+ Self(COUNTER.fetch_add(1, Ordering::SeqCst))
+ }
+}
+
+impl<R> From<nfa::Transition<R>> for Transition<R>
+where
+ R: Ref,
+{
+ fn from(nfa_transition: nfa::Transition<R>) -> Self {
+ match nfa_transition {
+ nfa::Transition::Byte(byte) => Transition::Byte(byte),
+ nfa::Transition::Ref(r) => Transition::Ref(r),
+ }
+ }
+}
diff --git a/compiler/rustc_transmute/src/layout/mod.rs b/compiler/rustc_transmute/src/layout/mod.rs
new file mode 100644
index 000000000..07035ebdf
--- /dev/null
+++ b/compiler/rustc_transmute/src/layout/mod.rs
@@ -0,0 +1,71 @@
+use std::fmt::{self, Debug};
+use std::hash::Hash;
+
+pub(crate) mod tree;
+pub(crate) use tree::Tree;
+
+pub(crate) mod nfa;
+pub(crate) use nfa::Nfa;
+
+pub(crate) mod dfa;
+pub(crate) use dfa::Dfa;
+
+#[derive(Debug)]
+pub(crate) struct Uninhabited;
+
+/// An instance of a byte is either initialized to a particular value, or uninitialized.
+#[derive(Hash, Eq, PartialEq, Clone, Copy)]
+pub(crate) enum Byte {
+ Uninit,
+ Init(u8),
+}
+
+impl fmt::Debug for Byte {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self {
+ Self::Uninit => f.write_str("??u8"),
+ Self::Init(b) => write!(f, "{:#04x}u8", b),
+ }
+ }
+}
+
+pub(crate) trait Def: Debug + Hash + Eq + PartialEq + Copy + Clone {}
+pub trait Ref: Debug + Hash + Eq + PartialEq + Copy + Clone {}
+
+impl Def for ! {}
+impl Ref for ! {}
+
+#[cfg(feature = "rustc")]
+pub(crate) mod rustc {
+ use rustc_middle::mir::Mutability;
+ use rustc_middle::ty;
+ use rustc_middle::ty::Region;
+ use rustc_middle::ty::Ty;
+
+ /// A reference in the layout.
+ #[derive(Debug, Hash, Eq, PartialEq, PartialOrd, Ord, Clone, Copy)]
+ pub struct Ref<'tcx> {
+ lifetime: Region<'tcx>,
+ ty: Ty<'tcx>,
+ mutability: Mutability,
+ }
+
+ impl<'tcx> super::Ref for Ref<'tcx> {}
+
+ impl<'tcx> Ref<'tcx> {
+ pub fn min_align(&self) -> usize {
+ todo!()
+ }
+ }
+
+ /// A visibility node in the layout.
+ #[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)]
+ pub enum Def<'tcx> {
+ Adt(ty::AdtDef<'tcx>),
+ Variant(&'tcx ty::VariantDef),
+ Field(&'tcx ty::FieldDef),
+ Primitive,
+ }
+
+ impl<'tcx> super::Def for Def<'tcx> {}
+}
diff --git a/compiler/rustc_transmute/src/layout/nfa.rs b/compiler/rustc_transmute/src/layout/nfa.rs
new file mode 100644
index 000000000..f25e3c1fd
--- /dev/null
+++ b/compiler/rustc_transmute/src/layout/nfa.rs
@@ -0,0 +1,179 @@
+use super::{Byte, Ref, Tree, Uninhabited};
+use crate::{Map, Set};
+use std::fmt;
+use std::sync::atomic::{AtomicU32, Ordering};
+
+/// A non-deterministic finite automaton (NFA) that represents the layout of a type.
+/// The transmutability of two given types is computed by comparing their `Nfa`s.
+#[derive(PartialEq, Debug)]
+pub(crate) struct Nfa<R>
+where
+ R: Ref,
+{
+ pub(crate) transitions: Map<State, Map<Transition<R>, Set<State>>>,
+ pub(crate) start: State,
+ pub(crate) accepting: State,
+}
+
+/// The states in a `Nfa` represent byte offsets.
+#[derive(Hash, Eq, PartialEq, PartialOrd, Ord, Copy, Clone)]
+pub(crate) struct State(u32);
+
+/// The transitions between states in a `Nfa` reflect bit validity.
+#[derive(Hash, Eq, PartialEq, Clone, Copy)]
+pub(crate) enum Transition<R>
+where
+ R: Ref,
+{
+ Byte(Byte),
+ Ref(R),
+}
+
+impl fmt::Debug for State {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "S_{}", self.0)
+ }
+}
+
+impl<R> fmt::Debug for Transition<R>
+where
+ R: Ref,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match &self {
+ Self::Byte(b) => b.fmt(f),
+ Self::Ref(r) => r.fmt(f),
+ }
+ }
+}
+
+impl<R> Nfa<R>
+where
+ R: Ref,
+{
+ pub(crate) fn unit() -> Self {
+ let transitions: Map<State, Map<Transition<R>, Set<State>>> = Map::default();
+ let start = State::new();
+ let accepting = start;
+
+ Nfa { transitions, start, accepting }
+ }
+
+ pub(crate) fn from_byte(byte: Byte) -> Self {
+ let mut transitions: Map<State, Map<Transition<R>, Set<State>>> = Map::default();
+ let start = State::new();
+ let accepting = State::new();
+
+ let source = transitions.entry(start).or_default();
+ let edge = source.entry(Transition::Byte(byte)).or_default();
+ edge.insert(accepting);
+
+ Nfa { transitions, start, accepting }
+ }
+
+ pub(crate) fn from_ref(r: R) -> Self {
+ let mut transitions: Map<State, Map<Transition<R>, Set<State>>> = Map::default();
+ let start = State::new();
+ let accepting = State::new();
+
+ let source = transitions.entry(start).or_default();
+ let edge = source.entry(Transition::Ref(r)).or_default();
+ edge.insert(accepting);
+
+ Nfa { transitions, start, accepting }
+ }
+
+ pub(crate) fn from_tree(tree: Tree<!, R>) -> Result<Self, Uninhabited> {
+ Ok(match tree {
+ Tree::Byte(b) => Self::from_byte(b),
+ Tree::Def(..) => unreachable!(),
+ Tree::Ref(r) => Self::from_ref(r),
+ Tree::Alt(alts) => {
+ let mut alts = alts.into_iter().map(Self::from_tree);
+ let mut nfa = alts.next().ok_or(Uninhabited)??;
+ for alt in alts {
+ nfa = nfa.union(alt?);
+ }
+ nfa
+ }
+ Tree::Seq(elts) => {
+ let mut nfa = Self::unit();
+ for elt in elts.into_iter().map(Self::from_tree) {
+ nfa = nfa.concat(elt?);
+ }
+ nfa
+ }
+ })
+ }
+
+ /// Concatenate two `Nfa`s.
+ pub(crate) fn concat(self, other: Self) -> Self {
+ if self.start == self.accepting {
+ return other;
+ } else if other.start == other.accepting {
+ return self;
+ }
+
+ let start = self.start;
+ let accepting = other.accepting;
+
+ let mut transitions: Map<State, Map<Transition<R>, Set<State>>> = self.transitions;
+
+ // the iteration order doesn't matter
+ #[cfg_attr(feature = "rustc", allow(rustc::potential_query_instability))]
+ for (source, transition) in other.transitions {
+ let fix_state = |state| if state == other.start { self.accepting } else { state };
+ let entry = transitions.entry(fix_state(source)).or_default();
+ for (edge, destinations) in transition {
+ let entry = entry.entry(edge.clone()).or_default();
+ for destination in destinations {
+ entry.insert(fix_state(destination));
+ }
+ }
+ }
+
+ Self { transitions, start, accepting }
+ }
+
+ /// Compute the union of two `Nfa`s.
+ pub(crate) fn union(self, other: Self) -> Self {
+ let start = self.start;
+ let accepting = self.accepting;
+
+ let mut transitions: Map<State, Map<Transition<R>, Set<State>>> = self.transitions.clone();
+
+ // the iteration order doesn't matter
+ #[cfg_attr(feature = "rustc", allow(rustc::potential_query_instability))]
+ for (&(mut source), transition) in other.transitions.iter() {
+ // if source is starting state of `other`, replace with starting state of `self`
+ if source == other.start {
+ source = self.start;
+ }
+ let entry = transitions.entry(source).or_default();
+ for (edge, destinations) in transition {
+ let entry = entry.entry(edge.clone()).or_default();
+ // the iteration order doesn't matter
+ #[cfg_attr(feature = "rustc", allow(rustc::potential_query_instability))]
+ for &(mut destination) in destinations {
+ // if dest is accepting state of `other`, replace with accepting state of `self`
+ if destination == other.accepting {
+ destination = self.accepting;
+ }
+ entry.insert(destination);
+ }
+ }
+ }
+ Self { transitions, start, accepting }
+ }
+
+ pub(crate) fn edges_from(&self, start: State) -> Option<&Map<Transition<R>, Set<State>>> {
+ self.transitions.get(&start)
+ }
+}
+
+impl State {
+ pub(crate) fn new() -> Self {
+ static COUNTER: AtomicU32 = AtomicU32::new(0);
+ Self(COUNTER.fetch_add(1, Ordering::SeqCst))
+ }
+}
diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs
new file mode 100644
index 000000000..70b3ba02b
--- /dev/null
+++ b/compiler/rustc_transmute/src/layout/tree.rs
@@ -0,0 +1,471 @@
+use super::{Byte, Def, Ref};
+
+#[cfg(test)]
+mod tests;
+
+/// A tree-based representation of a type layout.
+///
+/// Invariants:
+/// 1. All paths through the layout have the same length (in bytes).
+///
+/// Nice-to-haves:
+/// 1. An `Alt` is never directly nested beneath another `Alt`.
+/// 2. A `Seq` is never directly nested beneath another `Seq`.
+/// 3. `Seq`s and `Alt`s with a single member do not exist.
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+pub(crate) enum Tree<D, R>
+where
+ D: Def,
+ R: Ref,
+{
+ /// A sequence of successive layouts.
+ Seq(Vec<Self>),
+ /// A choice between alternative layouts.
+ Alt(Vec<Self>),
+ /// A definition node.
+ Def(D),
+ /// A reference node.
+ Ref(R),
+ /// A byte node.
+ Byte(Byte),
+}
+
+impl<D, R> Tree<D, R>
+where
+ D: Def,
+ R: Ref,
+{
+ /// A `Tree` consisting only of a definition node.
+ pub(crate) fn def(def: D) -> Self {
+ Self::Def(def)
+ }
+
+ /// A `Tree` representing an uninhabited type.
+ pub(crate) fn uninhabited() -> Self {
+ Self::Alt(vec![])
+ }
+
+ /// A `Tree` representing a zero-sized type.
+ pub(crate) fn unit() -> Self {
+ Self::Seq(Vec::new())
+ }
+
+ /// A `Tree` containing a single, uninitialized byte.
+ pub(crate) fn uninit() -> Self {
+ Self::Byte(Byte::Uninit)
+ }
+
+ /// A `Tree` representing the layout of `bool`.
+ pub(crate) fn bool() -> Self {
+ Self::from_bits(0x00).or(Self::from_bits(0x01))
+ }
+
+ /// A `Tree` whose layout matches that of a `u8`.
+ pub(crate) fn u8() -> Self {
+ Self::Alt((0u8..=255).map(Self::from_bits).collect())
+ }
+
+ /// A `Tree` whose layout accepts exactly the given bit pattern.
+ pub(crate) fn from_bits(bits: u8) -> Self {
+ Self::Byte(Byte::Init(bits))
+ }
+
+ /// A `Tree` whose layout is a number of the given width.
+ pub(crate) fn number(width_in_bytes: usize) -> Self {
+ Self::Seq(vec![Self::u8(); width_in_bytes])
+ }
+
+ /// A `Tree` whose layout is entirely padding of the given width.
+ pub(crate) fn padding(width_in_bytes: usize) -> Self {
+ Self::Seq(vec![Self::uninit(); width_in_bytes])
+ }
+
+ /// Remove all `Def` nodes, and all branches of the layout for which `f` produces false.
+ pub(crate) fn prune<F>(self, f: &F) -> Tree<!, R>
+ where
+ F: Fn(D) -> bool,
+ {
+ match self {
+ Self::Seq(elts) => elts
+ .into_iter()
+ .map(|elt| elt.prune(f))
+ .try_fold(Tree::unit(), |elts, elt| {
+ if elt == Tree::uninhabited() {
+ Err(Tree::uninhabited())
+ } else {
+ Ok(elts.then(elt))
+ }
+ })
+ .into_ok_or_err(),
+ Self::Alt(alts) => alts
+ .into_iter()
+ .map(|alt| alt.prune(f))
+ .fold(Tree::uninhabited(), |alts, alt| alts.or(alt)),
+ Self::Byte(b) => Tree::Byte(b),
+ Self::Ref(r) => Tree::Ref(r),
+ Self::Def(d) => {
+ if !f(d) {
+ Tree::uninhabited()
+ } else {
+ Tree::unit()
+ }
+ }
+ }
+ }
+
+ /// Produces `true` if `Tree` is an inhabited type; otherwise false.
+ pub(crate) fn is_inhabited(&self) -> bool {
+ match self {
+ Self::Seq(elts) => elts.into_iter().all(|elt| elt.is_inhabited()),
+ Self::Alt(alts) => alts.into_iter().any(|alt| alt.is_inhabited()),
+ Self::Byte(..) | Self::Ref(..) | Self::Def(..) => true,
+ }
+ }
+}
+
+impl<D, R> Tree<D, R>
+where
+ D: Def,
+ R: Ref,
+{
+ /// Produces a new `Tree` where `other` is sequenced after `self`.
+ pub(crate) fn then(self, other: Self) -> Self {
+ match (self, other) {
+ (Self::Seq(elts), other) | (other, Self::Seq(elts)) if elts.len() == 0 => other,
+ (Self::Seq(mut lhs), Self::Seq(mut rhs)) => {
+ lhs.append(&mut rhs);
+ Self::Seq(lhs)
+ }
+ (Self::Seq(mut lhs), rhs) => {
+ lhs.push(rhs);
+ Self::Seq(lhs)
+ }
+ (lhs, Self::Seq(mut rhs)) => {
+ rhs.insert(0, lhs);
+ Self::Seq(rhs)
+ }
+ (lhs, rhs) => Self::Seq(vec![lhs, rhs]),
+ }
+ }
+
+ /// Produces a new `Tree` accepting either `self` or `other` as alternative layouts.
+ pub(crate) fn or(self, other: Self) -> Self {
+ match (self, other) {
+ (Self::Alt(alts), other) | (other, Self::Alt(alts)) if alts.len() == 0 => other,
+ (Self::Alt(mut lhs), Self::Alt(rhs)) => {
+ lhs.extend(rhs);
+ Self::Alt(lhs)
+ }
+ (Self::Alt(mut alts), alt) | (alt, Self::Alt(mut alts)) => {
+ alts.push(alt);
+ Self::Alt(alts)
+ }
+ (lhs, rhs) => Self::Alt(vec![lhs, rhs]),
+ }
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub(crate) enum Err {
+ /// The layout of the type is unspecified.
+ Unspecified,
+ /// This error will be surfaced elsewhere by rustc, so don't surface it.
+ Unknown,
+}
+
+#[cfg(feature = "rustc")]
+pub(crate) mod rustc {
+ use super::{Err, Tree};
+ use crate::layout::rustc::{Def, Ref};
+
+ use rustc_middle::ty;
+ use rustc_middle::ty::layout::LayoutError;
+ use rustc_middle::ty::util::Discr;
+ use rustc_middle::ty::AdtDef;
+ use rustc_middle::ty::ParamEnv;
+ use rustc_middle::ty::SubstsRef;
+ use rustc_middle::ty::Ty;
+ use rustc_middle::ty::TyCtxt;
+ use rustc_middle::ty::VariantDef;
+ use rustc_target::abi::Align;
+ use std::alloc;
+
+ impl<'tcx> From<LayoutError<'tcx>> for Err {
+ fn from(err: LayoutError<'tcx>) -> Self {
+ match err {
+ LayoutError::Unknown(..) => Self::Unknown,
+ err @ _ => unimplemented!("{:?}", err),
+ }
+ }
+ }
+
+ trait LayoutExt {
+ fn clamp_align(&self, min_align: Align, max_align: Align) -> Self;
+ }
+
+ impl LayoutExt for alloc::Layout {
+ fn clamp_align(&self, min_align: Align, max_align: Align) -> Self {
+ let min_align = min_align.bytes().try_into().unwrap();
+ let max_align = max_align.bytes().try_into().unwrap();
+ Self::from_size_align(self.size(), self.align().clamp(min_align, max_align)).unwrap()
+ }
+ }
+
+ struct LayoutSummary {
+ total_align: Align,
+ total_size: usize,
+ discriminant_size: usize,
+ discriminant_align: Align,
+ }
+
+ impl LayoutSummary {
+ fn from_ty<'tcx>(ty: Ty<'tcx>, ctx: TyCtxt<'tcx>) -> Result<Self, LayoutError<'tcx>> {
+ use rustc_middle::ty::ParamEnvAnd;
+ use rustc_target::abi::{TyAndLayout, Variants};
+
+ let param_env = ParamEnv::reveal_all();
+ let param_env_and_type = ParamEnvAnd { param_env, value: ty };
+ let TyAndLayout { layout, .. } = ctx.layout_of(param_env_and_type)?;
+
+ let total_size: usize = layout.size().bytes_usize();
+ let total_align: Align = layout.align().abi;
+ let discriminant_align: Align;
+ let discriminant_size: usize;
+
+ if let Variants::Multiple { tag, .. } = layout.variants() {
+ discriminant_align = tag.align(&ctx).abi;
+ discriminant_size = tag.size(&ctx).bytes_usize();
+ } else {
+ discriminant_align = Align::ONE;
+ discriminant_size = 0;
+ };
+
+ Ok(Self { total_align, total_size, discriminant_align, discriminant_size })
+ }
+
+ fn into(&self) -> alloc::Layout {
+ alloc::Layout::from_size_align(
+ self.total_size,
+ self.total_align.bytes().try_into().unwrap(),
+ )
+ .unwrap()
+ }
+ }
+
+ impl<'tcx> Tree<Def<'tcx>, Ref<'tcx>> {
+ pub fn from_ty(ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Result<Self, Err> {
+ use rustc_middle::ty::FloatTy::*;
+ use rustc_middle::ty::IntTy::*;
+ use rustc_middle::ty::UintTy::*;
+ use rustc_target::abi::HasDataLayout;
+
+ let target = tcx.data_layout();
+
+ match ty.kind() {
+ ty::Bool => Ok(Self::bool()),
+
+ ty::Int(I8) | ty::Uint(U8) => Ok(Self::u8()),
+ ty::Int(I16) | ty::Uint(U16) => Ok(Self::number(2)),
+ ty::Int(I32) | ty::Uint(U32) | ty::Float(F32) => Ok(Self::number(4)),
+ ty::Int(I64) | ty::Uint(U64) | ty::Float(F64) => Ok(Self::number(8)),
+ ty::Int(I128) | ty::Uint(U128) => Ok(Self::number(16)),
+ ty::Int(Isize) | ty::Uint(Usize) => {
+ Ok(Self::number(target.pointer_size.bytes_usize()))
+ }
+
+ ty::Tuple(members) => {
+ if members.len() == 0 {
+ Ok(Tree::unit())
+ } else {
+ Err(Err::Unspecified)
+ }
+ }
+
+ ty::Array(ty, len) => {
+ let len = len.try_eval_usize(tcx, ParamEnv::reveal_all()).unwrap();
+ let elt = Tree::from_ty(*ty, tcx)?;
+ Ok(std::iter::repeat(elt)
+ .take(len as usize)
+ .fold(Tree::unit(), |tree, elt| tree.then(elt)))
+ }
+
+ ty::Adt(adt_def, substs_ref) => {
+ use rustc_middle::ty::AdtKind;
+
+ // If the layout is ill-specified, halt.
+ if !(adt_def.repr().c() || adt_def.repr().int.is_some()) {
+ return Err(Err::Unspecified);
+ }
+
+ // Compute a summary of the type's layout.
+ let layout_summary = LayoutSummary::from_ty(ty, tcx)?;
+
+ // The layout begins with this adt's visibility.
+ let vis = Self::def(Def::Adt(*adt_def));
+
+ // And is followed the layout(s) of its variants
+ Ok(vis.then(match adt_def.adt_kind() {
+ AdtKind::Struct => Self::from_repr_c_variant(
+ ty,
+ *adt_def,
+ substs_ref,
+ &layout_summary,
+ None,
+ adt_def.non_enum_variant(),
+ tcx,
+ )?,
+ AdtKind::Enum => {
+ tracing::trace!(?adt_def, "treeifying enum");
+ let mut tree = Tree::uninhabited();
+
+ for (idx, discr) in adt_def.discriminants(tcx) {
+ tree = tree.or(Self::from_repr_c_variant(
+ ty,
+ *adt_def,
+ substs_ref,
+ &layout_summary,
+ Some(discr),
+ adt_def.variant(idx),
+ tcx,
+ )?);
+ }
+
+ tree
+ }
+ AdtKind::Union => {
+ // is the layout well-defined?
+ if !adt_def.repr().c() {
+ return Err(Err::Unspecified);
+ }
+
+ let ty_layout = layout_of(tcx, ty)?;
+
+ let mut tree = Tree::uninhabited();
+
+ for field in adt_def.all_fields() {
+ let variant_ty = field.ty(tcx, substs_ref);
+ let variant_layout = layout_of(tcx, variant_ty)?;
+ let padding_needed = ty_layout.size() - variant_layout.size();
+ let variant = Self::def(Def::Field(field))
+ .then(Self::from_ty(variant_ty, tcx)?)
+ .then(Self::padding(padding_needed));
+
+ tree = tree.or(variant);
+ }
+
+ tree
+ }
+ }))
+ }
+ _ => Err(Err::Unspecified),
+ }
+ }
+
+ fn from_repr_c_variant(
+ ty: Ty<'tcx>,
+ adt_def: AdtDef<'tcx>,
+ substs_ref: SubstsRef<'tcx>,
+ layout_summary: &LayoutSummary,
+ discr: Option<Discr<'tcx>>,
+ variant_def: &'tcx VariantDef,
+ tcx: TyCtxt<'tcx>,
+ ) -> Result<Self, Err> {
+ let mut tree = Tree::unit();
+
+ let repr = adt_def.repr();
+ let min_align = repr.align.unwrap_or(Align::ONE);
+ let max_align = repr.pack.unwrap_or(Align::MAX);
+
+ let clamp =
+ |align: Align| align.clamp(min_align, max_align).bytes().try_into().unwrap();
+
+ let variant_span = tracing::trace_span!(
+ "treeifying variant",
+ min_align = ?min_align,
+ max_align = ?max_align,
+ )
+ .entered();
+
+ let mut variant_layout = alloc::Layout::from_size_align(
+ 0,
+ layout_summary.total_align.bytes().try_into().unwrap(),
+ )
+ .unwrap();
+
+ // The layout of the variant is prefixed by the discriminant, if any.
+ if let Some(discr) = discr {
+ tracing::trace!(?discr, "treeifying discriminant");
+ let discr_layout = alloc::Layout::from_size_align(
+ layout_summary.discriminant_size,
+ clamp(layout_summary.discriminant_align),
+ )
+ .unwrap();
+ tracing::trace!(?discr_layout, "computed discriminant layout");
+ variant_layout = variant_layout.extend(discr_layout).unwrap().0;
+ tree = tree.then(Self::from_disr(discr, tcx, layout_summary.discriminant_size));
+ }
+
+ // Next come fields.
+ let fields_span = tracing::trace_span!("treeifying fields").entered();
+ for field_def in variant_def.fields.iter() {
+ let field_ty = field_def.ty(tcx, substs_ref);
+ let _span = tracing::trace_span!("treeifying field", field = ?field_ty).entered();
+
+ // begin with the field's visibility
+ tree = tree.then(Self::def(Def::Field(field_def)));
+
+ // compute the field's layout charactaristics
+ let field_layout = layout_of(tcx, field_ty)?.clamp_align(min_align, max_align);
+
+ // next comes the field's padding
+ let padding_needed = variant_layout.padding_needed_for(field_layout.align());
+ if padding_needed > 0 {
+ tree = tree.then(Self::padding(padding_needed));
+ }
+
+ // finally, the field's layout
+ tree = tree.then(Self::from_ty(field_ty, tcx)?);
+
+ // extend the variant layout with the field layout
+ variant_layout = variant_layout.extend(field_layout).unwrap().0;
+ }
+ drop(fields_span);
+
+ // finally: padding
+ let padding_span = tracing::trace_span!("adding trailing padding").entered();
+ let padding_needed = layout_summary.total_size - variant_layout.size();
+ if padding_needed > 0 {
+ tree = tree.then(Self::padding(padding_needed));
+ };
+ drop(padding_span);
+ drop(variant_span);
+ Ok(tree)
+ }
+
+ pub fn from_disr(discr: Discr<'tcx>, tcx: TyCtxt<'tcx>, size: usize) -> Self {
+ // FIXME(@jswrenn): I'm certain this is missing needed endian nuance.
+ let bytes = discr.val.to_ne_bytes();
+ let bytes = &bytes[..size];
+ Self::Seq(bytes.into_iter().copied().map(|b| Self::from_bits(b)).collect())
+ }
+ }
+
+ fn layout_of<'tcx>(
+ ctx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Result<alloc::Layout, LayoutError<'tcx>> {
+ use rustc_middle::ty::ParamEnvAnd;
+ use rustc_target::abi::TyAndLayout;
+
+ let param_env = ParamEnv::reveal_all();
+ let param_env_and_type = ParamEnvAnd { param_env, value: ty };
+ let TyAndLayout { layout, .. } = ctx.layout_of(param_env_and_type)?;
+ let layout = alloc::Layout::from_size_align(
+ layout.size().bytes_usize(),
+ layout.align().abi.bytes().try_into().unwrap(),
+ )
+ .unwrap();
+ tracing::trace!(?ty, ?layout, "computed layout for type");
+ Ok(layout)
+ }
+}
diff --git a/compiler/rustc_transmute/src/layout/tree/tests.rs b/compiler/rustc_transmute/src/layout/tree/tests.rs
new file mode 100644
index 000000000..90515e92f
--- /dev/null
+++ b/compiler/rustc_transmute/src/layout/tree/tests.rs
@@ -0,0 +1,80 @@
+use super::Tree;
+
+#[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)]
+pub enum Def {
+ Visible,
+ Invisible,
+}
+
+impl super::Def for Def {}
+
+mod prune {
+ use super::*;
+
+ mod should_simplify {
+ use super::*;
+
+ #[test]
+ fn seq_1() {
+ let layout: Tree<Def, !> = Tree::def(Def::Visible).then(Tree::from_bits(0x00));
+ assert_eq!(layout.prune(&|d| matches!(d, Def::Visible)), Tree::from_bits(0x00));
+ }
+
+ #[test]
+ fn seq_2() {
+ let layout: Tree<Def, !> =
+ Tree::from_bits(0x00).then(Tree::def(Def::Visible)).then(Tree::from_bits(0x01));
+
+ assert_eq!(
+ layout.prune(&|d| matches!(d, Def::Visible)),
+ Tree::from_bits(0x00).then(Tree::from_bits(0x01))
+ );
+ }
+ }
+
+ mod should_reject {
+ use super::*;
+
+ #[test]
+ fn invisible_def() {
+ let layout: Tree<Def, !> = Tree::def(Def::Invisible);
+ assert_eq!(layout.prune(&|d| matches!(d, Def::Visible)), Tree::uninhabited());
+ }
+
+ #[test]
+ fn invisible_def_in_seq_len_2() {
+ let layout: Tree<Def, !> = Tree::def(Def::Visible).then(Tree::def(Def::Invisible));
+ assert_eq!(layout.prune(&|d| matches!(d, Def::Visible)), Tree::uninhabited());
+ }
+
+ #[test]
+ fn invisible_def_in_seq_len_3() {
+ let layout: Tree<Def, !> =
+ Tree::def(Def::Visible).then(Tree::from_bits(0x00)).then(Tree::def(Def::Invisible));
+ assert_eq!(layout.prune(&|d| matches!(d, Def::Visible)), Tree::uninhabited());
+ }
+ }
+
+ mod should_accept {
+ use super::*;
+
+ #[test]
+ fn visible_def() {
+ let layout: Tree<Def, !> = Tree::def(Def::Visible);
+ assert_eq!(layout.prune(&|d| matches!(d, Def::Visible)), Tree::unit());
+ }
+
+ #[test]
+ fn visible_def_in_seq_len_2() {
+ let layout: Tree<Def, !> = Tree::def(Def::Visible).then(Tree::def(Def::Visible));
+ assert_eq!(layout.prune(&|d| matches!(d, Def::Visible)), Tree::unit());
+ }
+
+ #[test]
+ fn visible_def_in_seq_len_3() {
+ let layout: Tree<Def, !> =
+ Tree::def(Def::Visible).then(Tree::from_bits(0x00)).then(Tree::def(Def::Visible));
+ assert_eq!(layout.prune(&|d| matches!(d, Def::Visible)), Tree::from_bits(0x00));
+ }
+ }
+}
diff --git a/compiler/rustc_transmute/src/lib.rs b/compiler/rustc_transmute/src/lib.rs
new file mode 100644
index 000000000..cfc7c752a
--- /dev/null
+++ b/compiler/rustc_transmute/src/lib.rs
@@ -0,0 +1,117 @@
+#![feature(
+ alloc_layout_extra,
+ control_flow_enum,
+ decl_macro,
+ iterator_try_reduce,
+ never_type,
+ result_into_ok_or_err
+)]
+#![allow(dead_code, unused_variables)]
+
+#[macro_use]
+extern crate tracing;
+
+#[cfg(feature = "rustc")]
+pub(crate) use rustc_data_structures::fx::{FxHashMap as Map, FxHashSet as Set};
+
+#[cfg(not(feature = "rustc"))]
+pub(crate) use std::collections::{HashMap as Map, HashSet as Set};
+
+pub(crate) mod layout;
+pub(crate) mod maybe_transmutable;
+
+#[derive(Default)]
+pub struct Assume {
+ pub alignment: bool,
+ pub lifetimes: bool,
+ pub validity: bool,
+ pub visibility: bool,
+}
+
+/// The type encodes answers to the question: "Are these types transmutable?"
+#[derive(Debug, Hash, Eq, PartialEq, PartialOrd, Ord, Clone)]
+pub enum Answer<R>
+where
+ R: layout::Ref,
+{
+ /// `Src` is transmutable into `Dst`.
+ Yes,
+
+ /// `Src` is NOT transmutable into `Dst`.
+ No(Reason),
+
+ /// `Src` is transmutable into `Dst`, if `src` is transmutable into `dst`.
+ IfTransmutable { src: R, dst: R },
+
+ /// `Src` is transmutable into `Dst`, if all of the enclosed requirements are met.
+ IfAll(Vec<Answer<R>>),
+
+ /// `Src` is transmutable into `Dst` if any of the enclosed requirements are met.
+ IfAny(Vec<Answer<R>>),
+}
+
+/// Answers: Why wasn't the source type transmutable into the destination type?
+#[derive(Debug, Hash, Eq, PartialEq, PartialOrd, Ord, Clone)]
+pub enum Reason {
+ /// The layout of the source type is unspecified.
+ SrcIsUnspecified,
+ /// The layout of the destination type is unspecified.
+ DstIsUnspecified,
+ /// The layout of the destination type is bit-incompatible with the source type.
+ DstIsBitIncompatible,
+ /// There aren't any public constructors for `Dst`.
+ DstIsPrivate,
+ /// `Dst` is larger than `Src`, and the excess bytes were not exclusively uninitialized.
+ DstIsTooBig,
+}
+
+#[cfg(feature = "rustc")]
+mod rustc {
+ use rustc_infer::infer::InferCtxt;
+ use rustc_macros::{TypeFoldable, TypeVisitable};
+ use rustc_middle::traits::ObligationCause;
+ use rustc_middle::ty::Binder;
+ use rustc_middle::ty::Ty;
+
+ /// The source and destination types of a transmutation.
+ #[derive(TypeFoldable, TypeVisitable, Debug, Clone, Copy)]
+ pub struct Types<'tcx> {
+ /// The source type.
+ pub src: Ty<'tcx>,
+ /// The destination type.
+ pub dst: Ty<'tcx>,
+ }
+
+ pub struct TransmuteTypeEnv<'cx, 'tcx> {
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ }
+
+ impl<'cx, 'tcx> TransmuteTypeEnv<'cx, 'tcx> {
+ pub fn new(infcx: &'cx InferCtxt<'cx, 'tcx>) -> Self {
+ Self { infcx }
+ }
+
+ #[allow(unused)]
+ pub fn is_transmutable(
+ &mut self,
+ cause: ObligationCause<'tcx>,
+ src_and_dst: Binder<'tcx, Types<'tcx>>,
+ scope: Ty<'tcx>,
+ assume: crate::Assume,
+ ) -> crate::Answer<crate::layout::rustc::Ref<'tcx>> {
+ let src = src_and_dst.map_bound(|types| types.src).skip_binder();
+ let dst = src_and_dst.map_bound(|types| types.dst).skip_binder();
+ crate::maybe_transmutable::MaybeTransmutableQuery::new(
+ src,
+ dst,
+ scope,
+ assume,
+ self.infcx.tcx,
+ )
+ .answer()
+ }
+ }
+}
+
+#[cfg(feature = "rustc")]
+pub use rustc::*;
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
new file mode 100644
index 000000000..076d922d1
--- /dev/null
+++ b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs
@@ -0,0 +1,320 @@
+use crate::Map;
+use crate::{Answer, Reason};
+
+#[cfg(test)]
+mod tests;
+
+mod query_context;
+use query_context::QueryContext;
+
+use crate::layout::{self, dfa, Byte, Dfa, Nfa, Tree, Uninhabited};
+pub(crate) struct MaybeTransmutableQuery<L, C>
+where
+ C: QueryContext,
+{
+ src: L,
+ dst: L,
+ scope: <C as QueryContext>::Scope,
+ assume: crate::Assume,
+ context: C,
+}
+
+impl<L, C> MaybeTransmutableQuery<L, C>
+where
+ C: QueryContext,
+{
+ pub(crate) fn new(
+ src: L,
+ dst: L,
+ scope: <C as QueryContext>::Scope,
+ assume: crate::Assume,
+ context: C,
+ ) -> Self {
+ Self { src, dst, scope, assume, context }
+ }
+
+ pub(crate) fn map_layouts<F, M>(
+ self,
+ f: F,
+ ) -> Result<MaybeTransmutableQuery<M, C>, Answer<<C as QueryContext>::Ref>>
+ where
+ F: FnOnce(
+ L,
+ L,
+ <C as QueryContext>::Scope,
+ &C,
+ ) -> Result<(M, M), Answer<<C as QueryContext>::Ref>>,
+ {
+ let Self { src, dst, scope, assume, context } = self;
+
+ let (src, dst) = f(src, dst, scope, &context)?;
+
+ Ok(MaybeTransmutableQuery { src, dst, scope, assume, context })
+ }
+}
+
+#[cfg(feature = "rustc")]
+mod rustc {
+ use super::*;
+ use crate::layout::tree::Err;
+
+ use rustc_middle::ty::Ty;
+ use rustc_middle::ty::TyCtxt;
+
+ impl<'tcx> MaybeTransmutableQuery<Ty<'tcx>, TyCtxt<'tcx>> {
+ /// This method begins by converting `src` and `dst` from `Ty`s to `Tree`s,
+ /// then computes an answer using those trees.
+ #[instrument(level = "debug", skip(self), fields(src = ?self.src, dst = ?self.dst))]
+ pub fn answer(self) -> Answer<<TyCtxt<'tcx> as QueryContext>::Ref> {
+ let query_or_answer = self.map_layouts(|src, dst, scope, &context| {
+ // Convert `src` and `dst` from their rustc representations, to `Tree`-based
+ // representations. If these conversions fail, conclude that the transmutation is
+ // unacceptable; the layouts of both the source and destination types must be
+ // well-defined.
+ let src = Tree::from_ty(src, context).map_err(|err| match err {
+ // Answer `Yes` here, because "Unknown Type" will already be reported by
+ // rustc. No need to spam the user with more errors.
+ Err::Unknown => Answer::Yes,
+ Err::Unspecified => Answer::No(Reason::SrcIsUnspecified),
+ })?;
+
+ let dst = Tree::from_ty(dst, context).map_err(|err| match err {
+ Err::Unknown => Answer::Yes,
+ Err::Unspecified => Answer::No(Reason::DstIsUnspecified),
+ })?;
+
+ Ok((src, dst))
+ });
+
+ match query_or_answer {
+ Ok(query) => query.answer(),
+ Err(answer) => answer,
+ }
+ }
+ }
+}
+
+impl<C> MaybeTransmutableQuery<Tree<<C as QueryContext>::Def, <C as QueryContext>::Ref>, C>
+where
+ C: QueryContext,
+{
+ /// Answers whether a `Tree` is transmutable into another `Tree`.
+ ///
+ /// This method begins by de-def'ing `src` and `dst`, and prunes private paths from `dst`,
+ /// then converts `src` and `dst` to `Nfa`s, and computes an answer using those NFAs.
+ #[inline(always)]
+ #[instrument(level = "debug", skip(self), fields(src = ?self.src, dst = ?self.dst))]
+ pub(crate) fn answer(self) -> Answer<<C as QueryContext>::Ref> {
+ let assume_visibility = self.assume.visibility;
+ let query_or_answer = self.map_layouts(|src, dst, scope, context| {
+ // Remove all `Def` nodes from `src`, without checking their visibility.
+ let src = src.prune(&|def| true);
+
+ tracing::trace!(?src, "pruned src");
+
+ // Remove all `Def` nodes from `dst`, additionally...
+ let dst = if assume_visibility {
+ // ...if visibility is assumed, don't check their visibility.
+ dst.prune(&|def| true)
+ } else {
+ // ...otherwise, prune away all unreachable paths through the `Dst` layout.
+ dst.prune(&|def| context.is_accessible_from(def, scope))
+ };
+
+ tracing::trace!(?dst, "pruned dst");
+
+ // Convert `src` from a tree-based representation to an NFA-based representation.
+ // If the conversion fails because `src` is uninhabited, conclude that the transmutation
+ // is acceptable, because instances of the `src` type do not exist.
+ let src = Nfa::from_tree(src).map_err(|Uninhabited| Answer::Yes)?;
+
+ // Convert `dst` from a tree-based representation to an NFA-based representation.
+ // If the conversion fails because `src` is uninhabited, conclude that the transmutation
+ // is unacceptable, because instances of the `dst` type do not exist.
+ let dst =
+ Nfa::from_tree(dst).map_err(|Uninhabited| Answer::No(Reason::DstIsPrivate))?;
+
+ Ok((src, dst))
+ });
+
+ match query_or_answer {
+ Ok(query) => query.answer(),
+ Err(answer) => answer,
+ }
+ }
+}
+
+impl<C> MaybeTransmutableQuery<Nfa<<C as QueryContext>::Ref>, C>
+where
+ C: QueryContext,
+{
+ /// Answers whether a `Nfa` is transmutable into another `Nfa`.
+ ///
+ /// This method converts `src` and `dst` to DFAs, then computes an answer using those DFAs.
+ #[inline(always)]
+ #[instrument(level = "debug", skip(self), fields(src = ?self.src, dst = ?self.dst))]
+ pub(crate) fn answer(self) -> Answer<<C as QueryContext>::Ref> {
+ let query_or_answer = self
+ .map_layouts(|src, dst, scope, context| Ok((Dfa::from_nfa(src), Dfa::from_nfa(dst))));
+
+ match query_or_answer {
+ Ok(query) => query.answer(),
+ Err(answer) => answer,
+ }
+ }
+}
+
+impl<C> MaybeTransmutableQuery<Dfa<<C as QueryContext>::Ref>, C>
+where
+ C: QueryContext,
+{
+ /// Answers whether a `Nfa` is transmutable into another `Nfa`.
+ ///
+ /// This method converts `src` and `dst` to DFAs, then computes an answer using those DFAs.
+ pub(crate) fn answer(self) -> Answer<<C as QueryContext>::Ref> {
+ MaybeTransmutableQuery {
+ src: &self.src,
+ dst: &self.dst,
+ scope: self.scope,
+ assume: self.assume,
+ context: self.context,
+ }
+ .answer()
+ }
+}
+
+impl<'l, C> MaybeTransmutableQuery<&'l Dfa<<C as QueryContext>::Ref>, C>
+where
+ C: QueryContext,
+{
+ pub(crate) fn answer(&mut self) -> Answer<<C as QueryContext>::Ref> {
+ self.answer_memo(&mut Map::default(), self.src.start, self.dst.start)
+ }
+
+ #[inline(always)]
+ #[instrument(level = "debug", skip(self))]
+ fn answer_memo(
+ &self,
+ cache: &mut Map<(dfa::State, dfa::State), Answer<<C as QueryContext>::Ref>>,
+ src_state: dfa::State,
+ dst_state: dfa::State,
+ ) -> Answer<<C as QueryContext>::Ref> {
+ if let Some(answer) = cache.get(&(src_state, dst_state)) {
+ answer.clone()
+ } else {
+ let answer = if dst_state == self.dst.accepting {
+ // truncation: `size_of(Src) >= size_of(Dst)`
+ Answer::Yes
+ } else if src_state == self.src.accepting {
+ // extension: `size_of(Src) >= size_of(Dst)`
+ if let Some(dst_state_prime) = self.dst.byte_from(dst_state, Byte::Uninit) {
+ self.answer_memo(cache, src_state, dst_state_prime)
+ } else {
+ Answer::No(Reason::DstIsTooBig)
+ }
+ } else {
+ let src_quantification = if self.assume.validity {
+ // if the compiler may assume that the programmer is doing additional validity checks,
+ // (e.g.: that `src != 3u8` when the destination type is `bool`)
+ // then there must exist at least one transition out of `src_state` such that the transmute is viable...
+ there_exists
+ } else {
+ // if the compiler cannot assume that the programmer is doing additional validity checks,
+ // then for all transitions out of `src_state`, such that the transmute is viable...
+ // then there must exist at least one transition out of `src_state` such that the transmute is viable...
+ for_all
+ };
+
+ src_quantification(
+ self.src.bytes_from(src_state).unwrap_or(&Map::default()),
+ |(&src_validity, &src_state_prime)| {
+ if let Some(dst_state_prime) = self.dst.byte_from(dst_state, src_validity) {
+ self.answer_memo(cache, src_state_prime, dst_state_prime)
+ } else if let Some(dst_state_prime) =
+ self.dst.byte_from(dst_state, Byte::Uninit)
+ {
+ self.answer_memo(cache, src_state_prime, dst_state_prime)
+ } else {
+ Answer::No(Reason::DstIsBitIncompatible)
+ }
+ },
+ )
+ };
+ cache.insert((src_state, dst_state), answer.clone());
+ answer
+ }
+ }
+}
+
+impl<R> Answer<R>
+where
+ R: layout::Ref,
+{
+ pub(crate) fn and(self, rhs: Self) -> Self {
+ match (self, rhs) {
+ (Self::No(reason), _) | (_, Self::No(reason)) => Self::No(reason),
+ (Self::Yes, Self::Yes) => Self::Yes,
+ (Self::IfAll(mut lhs), Self::IfAll(ref mut rhs)) => {
+ lhs.append(rhs);
+ Self::IfAll(lhs)
+ }
+ (constraint, Self::IfAll(mut constraints))
+ | (Self::IfAll(mut constraints), constraint) => {
+ constraints.push(constraint);
+ Self::IfAll(constraints)
+ }
+ (lhs, rhs) => Self::IfAll(vec![lhs, rhs]),
+ }
+ }
+
+ pub(crate) fn or(self, rhs: Self) -> Self {
+ match (self, rhs) {
+ (Self::Yes, _) | (_, Self::Yes) => Self::Yes,
+ (Self::No(lhr), Self::No(rhr)) => Self::No(lhr),
+ (Self::IfAny(mut lhs), Self::IfAny(ref mut rhs)) => {
+ lhs.append(rhs);
+ Self::IfAny(lhs)
+ }
+ (constraint, Self::IfAny(mut constraints))
+ | (Self::IfAny(mut constraints), constraint) => {
+ constraints.push(constraint);
+ Self::IfAny(constraints)
+ }
+ (lhs, rhs) => Self::IfAny(vec![lhs, rhs]),
+ }
+ }
+}
+
+pub fn for_all<R, I, F>(iter: I, f: F) -> Answer<R>
+where
+ R: layout::Ref,
+ I: IntoIterator,
+ F: FnMut(<I as IntoIterator>::Item) -> Answer<R>,
+{
+ use std::ops::ControlFlow::{Break, Continue};
+ let (Continue(result) | Break(result)) =
+ iter.into_iter().map(f).try_fold(Answer::Yes, |constraints, constraint| {
+ match constraint.and(constraints) {
+ Answer::No(reason) => Break(Answer::No(reason)),
+ maybe => Continue(maybe),
+ }
+ });
+ result
+}
+
+pub fn there_exists<R, I, F>(iter: I, f: F) -> Answer<R>
+where
+ R: layout::Ref,
+ I: IntoIterator,
+ F: FnMut(<I as IntoIterator>::Item) -> Answer<R>,
+{
+ use std::ops::ControlFlow::{Break, Continue};
+ let (Continue(result) | Break(result)) = iter.into_iter().map(f).try_fold(
+ Answer::No(Reason::DstIsBitIncompatible),
+ |constraints, constraint| match constraint.or(constraints) {
+ Answer::Yes => Break(Answer::Yes),
+ maybe => Continue(maybe),
+ },
+ );
+ result
+}
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/query_context.rs b/compiler/rustc_transmute/src/maybe_transmutable/query_context.rs
new file mode 100644
index 000000000..9c2cf4c9a
--- /dev/null
+++ b/compiler/rustc_transmute/src/maybe_transmutable/query_context.rs
@@ -0,0 +1,93 @@
+use crate::layout;
+
+/// Context necessary to answer the question "Are these types transmutable?".
+pub(crate) trait QueryContext {
+ type Def: layout::Def;
+ type Ref: layout::Ref;
+ type Scope: Copy;
+
+ /// Is `def` accessible from the defining module of `scope`?
+ fn is_accessible_from(&self, def: Self::Def, scope: Self::Scope) -> bool;
+
+ fn min_align(&self, reference: Self::Ref) -> usize;
+}
+
+#[cfg(test)]
+pub(crate) mod test {
+ use super::QueryContext;
+
+ pub(crate) struct UltraMinimal;
+
+ #[derive(Debug, Hash, Eq, PartialEq, Clone, Copy)]
+ pub(crate) enum Def {
+ Visible,
+ Invisible,
+ }
+
+ impl crate::layout::Def for Def {}
+
+ impl QueryContext for UltraMinimal {
+ type Def = Def;
+ type Ref = !;
+ type Scope = ();
+
+ fn is_accessible_from(&self, def: Def, scope: ()) -> bool {
+ matches!(Def::Visible, def)
+ }
+
+ fn min_align(&self, reference: !) -> usize {
+ unimplemented!()
+ }
+ }
+}
+
+#[cfg(feature = "rustc")]
+mod rustc {
+ use super::*;
+ use rustc_middle::ty::{Ty, TyCtxt};
+
+ impl<'tcx> super::QueryContext for TyCtxt<'tcx> {
+ type Def = layout::rustc::Def<'tcx>;
+ type Ref = layout::rustc::Ref<'tcx>;
+
+ type Scope = Ty<'tcx>;
+
+ #[instrument(level = "debug", skip(self))]
+ fn is_accessible_from(&self, def: Self::Def, scope: Self::Scope) -> bool {
+ use layout::rustc::Def;
+ use rustc_middle::ty;
+
+ let parent = if let ty::Adt(adt_def, ..) = scope.kind() {
+ use rustc_middle::ty::DefIdTree;
+ let parent = self.parent(adt_def.did());
+ parent
+ } else {
+ // Is this always how we want to handle a non-ADT scope?
+ return false;
+ };
+
+ let def_id = match def {
+ Def::Adt(adt_def) => adt_def.did(),
+ Def::Variant(variant_def) => variant_def.def_id,
+ Def::Field(field_def) => field_def.did,
+ Def::Primitive => {
+ // primitives do not have a def_id, but they're always accessible
+ return true;
+ }
+ };
+
+ let ret = if self.visibility(def_id).is_accessible_from(parent, *self) {
+ true
+ } else {
+ false
+ };
+
+ tracing::trace!(?ret, "ret");
+ ret
+ }
+
+ fn min_align(&self, reference: Self::Ref) -> usize {
+ unimplemented!()
+ }
+ }
+}
diff --git a/compiler/rustc_transmute/src/maybe_transmutable/tests.rs b/compiler/rustc_transmute/src/maybe_transmutable/tests.rs
new file mode 100644
index 000000000..d9d125687
--- /dev/null
+++ b/compiler/rustc_transmute/src/maybe_transmutable/tests.rs
@@ -0,0 +1,115 @@
+use super::query_context::test::{Def, UltraMinimal};
+use crate::maybe_transmutable::MaybeTransmutableQuery;
+use crate::{layout, Answer, Reason, Set};
+use itertools::Itertools;
+
+mod bool {
+ use super::*;
+
+ #[test]
+ fn should_permit_identity_transmutation_tree() {
+ println!("{:?}", layout::Tree::<!, !>::bool());
+ let answer = crate::maybe_transmutable::MaybeTransmutableQuery::new(
+ layout::Tree::<Def, !>::bool(),
+ layout::Tree::<Def, !>::bool(),
+ (),
+ crate::Assume { alignment: false, lifetimes: false, validity: true, visibility: false },
+ UltraMinimal,
+ )
+ .answer();
+ assert_eq!(answer, Answer::Yes);
+ }
+
+ #[test]
+ fn should_permit_identity_transmutation_dfa() {
+ let answer = crate::maybe_transmutable::MaybeTransmutableQuery::new(
+ layout::Dfa::<!>::bool(),
+ layout::Dfa::<!>::bool(),
+ (),
+ crate::Assume { alignment: false, lifetimes: false, validity: true, visibility: false },
+ UltraMinimal,
+ )
+ .answer();
+ assert_eq!(answer, Answer::Yes);
+ }
+
+ #[test]
+ fn should_permit_validity_expansion_and_reject_contraction() {
+ let un = layout::Tree::<Def, !>::uninhabited();
+ let b0 = layout::Tree::<Def, !>::from_bits(0);
+ let b1 = layout::Tree::<Def, !>::from_bits(1);
+ let b2 = layout::Tree::<Def, !>::from_bits(2);
+
+ let alts = [b0, b1, b2];
+
+ let into_layout = |alts: Vec<_>| {
+ alts.into_iter().fold(layout::Tree::<Def, !>::uninhabited(), layout::Tree::<Def, !>::or)
+ };
+
+ let into_set = |alts: Vec<_>| {
+ #[cfg(feature = "rustc")]
+ let mut set = Set::default();
+ #[cfg(not(feature = "rustc"))]
+ let mut set = Set::new();
+ set.extend(alts);
+ set
+ };
+
+ for src_alts in alts.clone().into_iter().powerset() {
+ let src_layout = into_layout(src_alts.clone());
+ let src_set = into_set(src_alts.clone());
+
+ for dst_alts in alts.clone().into_iter().powerset().filter(|alts| !alts.is_empty()) {
+ let dst_layout = into_layout(dst_alts.clone());
+ let dst_set = into_set(dst_alts.clone());
+
+ if src_set.is_subset(&dst_set) {
+ assert_eq!(
+ Answer::Yes,
+ MaybeTransmutableQuery::new(
+ src_layout.clone(),
+ dst_layout.clone(),
+ (),
+ crate::Assume { validity: false, ..crate::Assume::default() },
+ UltraMinimal,
+ )
+ .answer(),
+ "{:?} SHOULD be transmutable into {:?}",
+ src_layout,
+ dst_layout
+ );
+ } else if !src_set.is_disjoint(&dst_set) {
+ assert_eq!(
+ Answer::Yes,
+ MaybeTransmutableQuery::new(
+ src_layout.clone(),
+ dst_layout.clone(),
+ (),
+ crate::Assume { validity: true, ..crate::Assume::default() },
+ UltraMinimal,
+ )
+ .answer(),
+ "{:?} SHOULD be transmutable (assuming validity) into {:?}",
+ src_layout,
+ dst_layout
+ );
+ } else {
+ assert_eq!(
+ Answer::No(Reason::DstIsBitIncompatible),
+ MaybeTransmutableQuery::new(
+ src_layout.clone(),
+ dst_layout.clone(),
+ (),
+ crate::Assume { validity: false, ..crate::Assume::default() },
+ UltraMinimal,
+ )
+ .answer(),
+ "{:?} should NOT be transmutable into {:?}",
+ src_layout,
+ dst_layout
+ );
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_ty_utils/Cargo.toml b/compiler/rustc_ty_utils/Cargo.toml
new file mode 100644
index 000000000..caad2ed42
--- /dev/null
+++ b/compiler/rustc_ty_utils/Cargo.toml
@@ -0,0 +1,18 @@
+[package]
+name = "rustc_ty_utils"
+version = "0.0.0"
+edition = "2021"
+
+[dependencies]
+tracing = "0.1"
+rustc_middle = { path = "../rustc_middle" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_span = { path = "../rustc_span" }
+rustc_session = { path = "../rustc_session" }
+rustc_target = { path = "../rustc_target" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_type_ir = { path = "../rustc_type_ir" }
+rustc_index = { path = "../rustc_index" }
diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs
new file mode 100644
index 000000000..515a73ead
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/assoc.rs
@@ -0,0 +1,111 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::{self, TyCtxt};
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers {
+ associated_item,
+ associated_item_def_ids,
+ associated_items,
+ impl_item_implementor_ids,
+ ..*providers
+ };
+}
+
+fn associated_item_def_ids(tcx: TyCtxt<'_>, def_id: DefId) -> &[DefId] {
+ let item = tcx.hir().expect_item(def_id.expect_local());
+ match item.kind {
+ hir::ItemKind::Trait(.., ref trait_item_refs) => tcx.arena.alloc_from_iter(
+ trait_item_refs.iter().map(|trait_item_ref| trait_item_ref.id.def_id.to_def_id()),
+ ),
+ hir::ItemKind::Impl(ref impl_) => tcx.arena.alloc_from_iter(
+ impl_.items.iter().map(|impl_item_ref| impl_item_ref.id.def_id.to_def_id()),
+ ),
+ hir::ItemKind::TraitAlias(..) => &[],
+ _ => span_bug!(item.span, "associated_item_def_ids: not impl or trait"),
+ }
+}
+
+fn associated_items(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AssocItems<'_> {
+ let items = tcx.associated_item_def_ids(def_id).iter().map(|did| tcx.associated_item(*did));
+ ty::AssocItems::new(items)
+}
+
+fn impl_item_implementor_ids(tcx: TyCtxt<'_>, impl_id: DefId) -> FxHashMap<DefId, DefId> {
+ tcx.associated_items(impl_id)
+ .in_definition_order()
+ .filter_map(|item| item.trait_item_def_id.map(|trait_item| (trait_item, item.def_id)))
+ .collect()
+}
+
+fn associated_item(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AssocItem {
+ let id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let parent_def_id = tcx.hir().get_parent_item(id);
+ let parent_item = tcx.hir().expect_item(parent_def_id);
+ match parent_item.kind {
+ hir::ItemKind::Impl(ref impl_) => {
+ if let Some(impl_item_ref) =
+ impl_.items.iter().find(|i| i.id.def_id.to_def_id() == def_id)
+ {
+ let assoc_item = associated_item_from_impl_item_ref(impl_item_ref);
+ debug_assert_eq!(assoc_item.def_id, def_id);
+ return assoc_item;
+ }
+ }
+
+ hir::ItemKind::Trait(.., ref trait_item_refs) => {
+ if let Some(trait_item_ref) =
+ trait_item_refs.iter().find(|i| i.id.def_id.to_def_id() == def_id)
+ {
+ let assoc_item = associated_item_from_trait_item_ref(trait_item_ref);
+ debug_assert_eq!(assoc_item.def_id, def_id);
+ return assoc_item;
+ }
+ }
+
+ _ => {}
+ }
+
+ span_bug!(
+ parent_item.span,
+ "unexpected parent of trait or impl item or item not found: {:?}",
+ parent_item.kind
+ )
+}
+
+fn associated_item_from_trait_item_ref(trait_item_ref: &hir::TraitItemRef) -> ty::AssocItem {
+ let def_id = trait_item_ref.id.def_id;
+ let (kind, has_self) = match trait_item_ref.kind {
+ hir::AssocItemKind::Const => (ty::AssocKind::Const, false),
+ hir::AssocItemKind::Fn { has_self } => (ty::AssocKind::Fn, has_self),
+ hir::AssocItemKind::Type => (ty::AssocKind::Type, false),
+ };
+
+ ty::AssocItem {
+ name: trait_item_ref.ident.name,
+ kind,
+ def_id: def_id.to_def_id(),
+ trait_item_def_id: Some(def_id.to_def_id()),
+ container: ty::TraitContainer,
+ fn_has_self_parameter: has_self,
+ }
+}
+
+fn associated_item_from_impl_item_ref(impl_item_ref: &hir::ImplItemRef) -> ty::AssocItem {
+ let def_id = impl_item_ref.id.def_id;
+ let (kind, has_self) = match impl_item_ref.kind {
+ hir::AssocItemKind::Const => (ty::AssocKind::Const, false),
+ hir::AssocItemKind::Fn { has_self } => (ty::AssocKind::Fn, has_self),
+ hir::AssocItemKind::Type => (ty::AssocKind::Type, false),
+ };
+
+ ty::AssocItem {
+ name: impl_item_ref.ident.name,
+ kind,
+ def_id: def_id.to_def_id(),
+ trait_item_def_id: impl_item_ref.trait_item_def_id,
+ container: ty::ImplContainer,
+ fn_has_self_parameter: has_self,
+ }
+}
diff --git a/compiler/rustc_ty_utils/src/common_traits.rs b/compiler/rustc_ty_utils/src/common_traits.rs
new file mode 100644
index 000000000..cedc84d97
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/common_traits.rs
@@ -0,0 +1,51 @@
+//! Queries for checking whether a type implements one of a few common traits.
+
+use rustc_hir::lang_items::LangItem;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::DUMMY_SP;
+use rustc_trait_selection::traits;
+
+fn is_copy_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ is_item_raw(tcx, query, LangItem::Copy)
+}
+
+fn is_sized_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ is_item_raw(tcx, query, LangItem::Sized)
+}
+
+fn is_freeze_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ is_item_raw(tcx, query, LangItem::Freeze)
+}
+
+fn is_unpin_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ is_item_raw(tcx, query, LangItem::Unpin)
+}
+
+fn is_item_raw<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+ item: LangItem,
+) -> bool {
+ let (param_env, ty) = query.into_parts();
+ let trait_def_id = tcx.require_lang_item(item, None);
+ tcx.infer_ctxt().enter(|infcx| {
+ traits::type_known_to_meet_bound_modulo_regions(
+ &infcx,
+ param_env,
+ ty,
+ trait_def_id,
+ DUMMY_SP,
+ )
+ })
+}
+
+pub(crate) fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers {
+ is_copy_raw,
+ is_sized_raw,
+ is_freeze_raw,
+ is_unpin_raw,
+ ..*providers
+ };
+}
diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs
new file mode 100644
index 000000000..7c2f4db94
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/consts.rs
@@ -0,0 +1,469 @@
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::LocalDefId;
+use rustc_index::vec::IndexVec;
+use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput};
+use rustc_middle::ty::abstract_const::{CastKind, Node, NodeId};
+use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
+use rustc_middle::{mir, thir};
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+
+use std::iter;
+
+/// Destructures array, ADT or tuple constants into the constants
+/// of their fields.
+pub(crate) fn destructure_const<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ const_: ty::Const<'tcx>,
+) -> ty::DestructuredConst<'tcx> {
+ let ty::ConstKind::Value(valtree) = const_.kind() else {
+ bug!("cannot destructure constant {:?}", const_)
+ };
+
+ let branches = match valtree {
+ ty::ValTree::Branch(b) => b,
+ _ => bug!("cannot destructure constant {:?}", const_),
+ };
+
+ let (fields, variant) = match const_.ty().kind() {
+ ty::Array(inner_ty, _) | ty::Slice(inner_ty) => {
+ // construct the consts for the elements of the array/slice
+ let field_consts = branches
+ .iter()
+ .map(|b| tcx.mk_const(ty::ConstS { kind: ty::ConstKind::Value(*b), ty: *inner_ty }))
+ .collect::<Vec<_>>();
+ debug!(?field_consts);
+
+ (field_consts, None)
+ }
+ ty::Adt(def, _) if def.variants().is_empty() => bug!("unreachable"),
+ ty::Adt(def, substs) => {
+ let (variant_idx, branches) = if def.is_enum() {
+ let (head, rest) = branches.split_first().unwrap();
+ (VariantIdx::from_u32(head.unwrap_leaf().try_to_u32().unwrap()), rest)
+ } else {
+ (VariantIdx::from_u32(0), branches)
+ };
+ let fields = &def.variant(variant_idx).fields;
+ let mut field_consts = Vec::with_capacity(fields.len());
+
+ for (field, field_valtree) in iter::zip(fields, branches) {
+ let field_ty = field.ty(tcx, substs);
+ let field_const = tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Value(*field_valtree),
+ ty: field_ty,
+ });
+ field_consts.push(field_const);
+ }
+ debug!(?field_consts);
+
+ (field_consts, Some(variant_idx))
+ }
+ ty::Tuple(elem_tys) => {
+ let fields = iter::zip(*elem_tys, branches)
+ .map(|(elem_ty, elem_valtree)| {
+ tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Value(*elem_valtree),
+ ty: elem_ty,
+ })
+ })
+ .collect::<Vec<_>>();
+
+ (fields, None)
+ }
+ _ => bug!("cannot destructure constant {:?}", const_),
+ };
+
+ let fields = tcx.arena.alloc_from_iter(fields.into_iter());
+
+ ty::DestructuredConst { variant, fields }
+}
+
+pub struct AbstractConstBuilder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ body_id: thir::ExprId,
+ body: &'a thir::Thir<'tcx>,
+ /// The current WIP node tree.
+ nodes: IndexVec<NodeId, Node<'tcx>>,
+}
+
+impl<'a, 'tcx> AbstractConstBuilder<'a, 'tcx> {
+ fn root_span(&self) -> Span {
+ self.body.exprs[self.body_id].span
+ }
+
+ fn error(&mut self, span: Span, msg: &str) -> Result<!, ErrorGuaranteed> {
+ let reported = self
+ .tcx
+ .sess
+ .struct_span_err(self.root_span(), "overly complex generic constant")
+ .span_label(span, msg)
+ .help("consider moving this anonymous constant into a `const` function")
+ .emit();
+
+ Err(reported)
+ }
+ fn maybe_supported_error(&mut self, span: Span, msg: &str) -> Result<!, ErrorGuaranteed> {
+ let reported = self
+ .tcx
+ .sess
+ .struct_span_err(self.root_span(), "overly complex generic constant")
+ .span_label(span, msg)
+ .help("consider moving this anonymous constant into a `const` function")
+ .note("this operation may be supported in the future")
+ .emit();
+
+ Err(reported)
+ }
+
+ #[instrument(skip(tcx, body, body_id), level = "debug")]
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ (body, body_id): (&'a thir::Thir<'tcx>, thir::ExprId),
+ ) -> Result<Option<AbstractConstBuilder<'a, 'tcx>>, ErrorGuaranteed> {
+ let builder = AbstractConstBuilder { tcx, body_id, body, nodes: IndexVec::new() };
+
+ struct IsThirPolymorphic<'a, 'tcx> {
+ is_poly: bool,
+ thir: &'a thir::Thir<'tcx>,
+ }
+
+ use crate::rustc_middle::thir::visit::Visitor;
+ use thir::visit;
+
+ impl<'a, 'tcx> IsThirPolymorphic<'a, 'tcx> {
+ fn expr_is_poly(&mut self, expr: &thir::Expr<'tcx>) -> bool {
+ if expr.ty.has_param_types_or_consts() {
+ return true;
+ }
+
+ match expr.kind {
+ thir::ExprKind::NamedConst { substs, .. } => substs.has_param_types_or_consts(),
+ thir::ExprKind::ConstParam { .. } => true,
+ thir::ExprKind::Repeat { value, count } => {
+ self.visit_expr(&self.thir()[value]);
+ count.has_param_types_or_consts()
+ }
+ _ => false,
+ }
+ }
+
+ fn pat_is_poly(&mut self, pat: &thir::Pat<'tcx>) -> bool {
+ if pat.ty.has_param_types_or_consts() {
+ return true;
+ }
+
+ match pat.kind.as_ref() {
+ thir::PatKind::Constant { value } => value.has_param_types_or_consts(),
+ thir::PatKind::Range(thir::PatRange { lo, hi, .. }) => {
+ lo.has_param_types_or_consts() || hi.has_param_types_or_consts()
+ }
+ _ => false,
+ }
+ }
+ }
+
+ impl<'a, 'tcx> visit::Visitor<'a, 'tcx> for IsThirPolymorphic<'a, 'tcx> {
+ fn thir(&self) -> &'a thir::Thir<'tcx> {
+ &self.thir
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn visit_expr(&mut self, expr: &thir::Expr<'tcx>) {
+ self.is_poly |= self.expr_is_poly(expr);
+ if !self.is_poly {
+ visit::walk_expr(self, expr)
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn visit_pat(&mut self, pat: &thir::Pat<'tcx>) {
+ self.is_poly |= self.pat_is_poly(pat);
+ if !self.is_poly {
+ visit::walk_pat(self, pat);
+ }
+ }
+ }
+
+ let mut is_poly_vis = IsThirPolymorphic { is_poly: false, thir: body };
+ visit::walk_expr(&mut is_poly_vis, &body[body_id]);
+ debug!("AbstractConstBuilder: is_poly={}", is_poly_vis.is_poly);
+ if !is_poly_vis.is_poly {
+ return Ok(None);
+ }
+
+ Ok(Some(builder))
+ }
+
+ /// We do not allow all binary operations in abstract consts, so filter disallowed ones.
+ fn check_binop(op: mir::BinOp) -> bool {
+ use mir::BinOp::*;
+ match op {
+ Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Shl | Shr | Eq | Lt | Le
+ | Ne | Ge | Gt => true,
+ Offset => false,
+ }
+ }
+
+ /// While we currently allow all unary operations, we still want to explicitly guard against
+ /// future changes here.
+ fn check_unop(op: mir::UnOp) -> bool {
+ use mir::UnOp::*;
+ match op {
+ Not | Neg => true,
+ }
+ }
+
+ /// Builds the abstract const by walking the thir and bailing out when
+ /// encountering an unsupported operation.
+ pub fn build(mut self) -> Result<&'tcx [Node<'tcx>], ErrorGuaranteed> {
+ debug!("AbstractConstBuilder::build: body={:?}", &*self.body);
+ self.recurse_build(self.body_id)?;
+
+ for n in self.nodes.iter() {
+ if let Node::Leaf(ct) = n {
+ if let ty::ConstKind::Unevaluated(ct) = ct.kind() {
+ // `AbstractConst`s should not contain any promoteds as they require references which
+ // are not allowed.
+ assert_eq!(ct.promoted, None);
+ assert_eq!(ct, self.tcx.erase_regions(ct));
+ }
+ }
+ }
+
+ Ok(self.tcx.arena.alloc_from_iter(self.nodes.into_iter()))
+ }
+
+ fn recurse_build(&mut self, node: thir::ExprId) -> Result<NodeId, ErrorGuaranteed> {
+ use thir::ExprKind;
+ let node = &self.body.exprs[node];
+ Ok(match &node.kind {
+ // I dont know if handling of these 3 is correct
+ &ExprKind::Scope { value, .. } => self.recurse_build(value)?,
+ &ExprKind::PlaceTypeAscription { source, .. }
+ | &ExprKind::ValueTypeAscription { source, .. } => self.recurse_build(source)?,
+ &ExprKind::Literal { lit, neg} => {
+ let sp = node.span;
+ let constant =
+ match self.tcx.at(sp).lit_to_const(LitToConstInput { lit: &lit.node, ty: node.ty, neg }) {
+ Ok(c) => c,
+ Err(LitToConstError::Reported) => {
+ self.tcx.const_error(node.ty)
+ }
+ Err(LitToConstError::TypeError) => {
+ bug!("encountered type error in lit_to_const")
+ }
+ };
+
+ self.nodes.push(Node::Leaf(constant))
+ }
+ &ExprKind::NonHirLiteral { lit , user_ty: _} => {
+ let val = ty::ValTree::from_scalar_int(lit);
+ self.nodes.push(Node::Leaf(ty::Const::from_value(self.tcx, val, node.ty)))
+ }
+ &ExprKind::ZstLiteral { user_ty: _ } => {
+ let val = ty::ValTree::zst();
+ self.nodes.push(Node::Leaf(ty::Const::from_value(self.tcx, val, node.ty)))
+ }
+ &ExprKind::NamedConst { def_id, substs, user_ty: _ } => {
+ let uneval = ty::Unevaluated::new(ty::WithOptConstParam::unknown(def_id), substs);
+
+ let constant = self.tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(uneval),
+ ty: node.ty,
+ });
+
+ self.nodes.push(Node::Leaf(constant))
+ }
+
+ ExprKind::ConstParam {param, ..} => {
+ let const_param = self.tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Param(*param),
+ ty: node.ty,
+ });
+ self.nodes.push(Node::Leaf(const_param))
+ }
+
+ ExprKind::Call { fun, args, .. } => {
+ let fun = self.recurse_build(*fun)?;
+
+ let mut new_args = Vec::<NodeId>::with_capacity(args.len());
+ for &id in args.iter() {
+ new_args.push(self.recurse_build(id)?);
+ }
+ let new_args = self.tcx.arena.alloc_slice(&new_args);
+ self.nodes.push(Node::FunctionCall(fun, new_args))
+ }
+ &ExprKind::Binary { op, lhs, rhs } if Self::check_binop(op) => {
+ let lhs = self.recurse_build(lhs)?;
+ let rhs = self.recurse_build(rhs)?;
+ self.nodes.push(Node::Binop(op, lhs, rhs))
+ }
+ &ExprKind::Unary { op, arg } if Self::check_unop(op) => {
+ let arg = self.recurse_build(arg)?;
+ self.nodes.push(Node::UnaryOp(op, arg))
+ }
+ // This is necessary so that the following compiles:
+ //
+ // ```
+ // fn foo<const N: usize>(a: [(); N + 1]) {
+ // bar::<{ N + 1 }>();
+ // }
+ // ```
+ ExprKind::Block { body: thir::Block { stmts: box [], expr: Some(e), .. } } => {
+ self.recurse_build(*e)?
+ }
+ // `ExprKind::Use` happens when a `hir::ExprKind::Cast` is a
+ // "coercion cast" i.e. using a coercion or is a no-op.
+ // This is important so that `N as usize as usize` doesnt unify with `N as usize`. (untested)
+ &ExprKind::Use { source } => {
+ let arg = self.recurse_build(source)?;
+ self.nodes.push(Node::Cast(CastKind::Use, arg, node.ty))
+ }
+ &ExprKind::Cast { source } => {
+ let arg = self.recurse_build(source)?;
+ self.nodes.push(Node::Cast(CastKind::As, arg, node.ty))
+ }
+ ExprKind::Borrow{ arg, ..} => {
+ let arg_node = &self.body.exprs[*arg];
+
+ // Skip reborrows for now until we allow Deref/Borrow/AddressOf
+ // expressions.
+ // FIXME(generic_const_exprs): Verify/explain why this is sound
+ if let ExprKind::Deref { arg } = arg_node.kind {
+ self.recurse_build(arg)?
+ } else {
+ self.maybe_supported_error(
+ node.span,
+ "borrowing is not supported in generic constants",
+ )?
+ }
+ }
+ // FIXME(generic_const_exprs): We may want to support these.
+ ExprKind::AddressOf { .. } | ExprKind::Deref {..}=> self.maybe_supported_error(
+ node.span,
+ "dereferencing or taking the address is not supported in generic constants",
+ )?,
+ ExprKind::Repeat { .. } | ExprKind::Array { .. } => self.maybe_supported_error(
+ node.span,
+ "array construction is not supported in generic constants",
+ )?,
+ ExprKind::Block { .. } => self.maybe_supported_error(
+ node.span,
+ "blocks are not supported in generic constant",
+ )?,
+ ExprKind::NeverToAny { .. } => self.maybe_supported_error(
+ node.span,
+ "converting nevers to any is not supported in generic constant",
+ )?,
+ ExprKind::Tuple { .. } => self.maybe_supported_error(
+ node.span,
+ "tuple construction is not supported in generic constants",
+ )?,
+ ExprKind::Index { .. } => self.maybe_supported_error(
+ node.span,
+ "indexing is not supported in generic constant",
+ )?,
+ ExprKind::Field { .. } => self.maybe_supported_error(
+ node.span,
+ "field access is not supported in generic constant",
+ )?,
+ ExprKind::ConstBlock { .. } => self.maybe_supported_error(
+ node.span,
+ "const blocks are not supported in generic constant",
+ )?,
+ ExprKind::Adt(_) => self.maybe_supported_error(
+ node.span,
+ "struct/enum construction is not supported in generic constants",
+ )?,
+ // dont know if this is correct
+ ExprKind::Pointer { .. } =>
+ self.error(node.span, "pointer casts are not allowed in generic constants")?,
+ ExprKind::Yield { .. } =>
+ self.error(node.span, "generator control flow is not allowed in generic constants")?,
+ ExprKind::Continue { .. } | ExprKind::Break { .. } | ExprKind::Loop { .. } => self
+ .error(
+ node.span,
+ "loops and loop control flow are not supported in generic constants",
+ )?,
+ ExprKind::Box { .. } =>
+ self.error(node.span, "allocations are not allowed in generic constants")?,
+
+ ExprKind::Unary { .. } => unreachable!(),
+ // we handle valid unary/binary ops above
+ ExprKind::Binary { .. } =>
+ self.error(node.span, "unsupported binary operation in generic constants")?,
+ ExprKind::LogicalOp { .. } =>
+ self.error(node.span, "unsupported operation in generic constants, short-circuiting operations would imply control flow")?,
+ ExprKind::Assign { .. } | ExprKind::AssignOp { .. } => {
+ self.error(node.span, "assignment is not supported in generic constants")?
+ }
+ ExprKind::Closure { .. } | ExprKind::Return { .. } => self.error(
+ node.span,
+ "closures and function keywords are not supported in generic constants",
+ )?,
+ // let expressions imply control flow
+ ExprKind::Match { .. } | ExprKind::If { .. } | ExprKind::Let { .. } =>
+ self.error(node.span, "control flow is not supported in generic constants")?,
+ ExprKind::InlineAsm { .. } => {
+ self.error(node.span, "assembly is not supported in generic constants")?
+ }
+
+ // we dont permit let stmts so `VarRef` and `UpvarRef` cant happen
+ ExprKind::VarRef { .. }
+ | ExprKind::UpvarRef { .. }
+ | ExprKind::StaticRef { .. }
+ | ExprKind::ThreadLocalRef(_) => {
+ self.error(node.span, "unsupported operation in generic constant")?
+ }
+ })
+ }
+}
+
+/// Builds an abstract const, do not use this directly, but use `AbstractConst::new` instead.
+pub fn thir_abstract_const<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+) -> Result<Option<&'tcx [Node<'tcx>]>, ErrorGuaranteed> {
+ if tcx.features().generic_const_exprs {
+ match tcx.def_kind(def.did) {
+ // FIXME(generic_const_exprs): We currently only do this for anonymous constants,
+ // meaning that we do not look into associated constants. I(@lcnr) am not yet sure whether
+ // we want to look into them or treat them as opaque projections.
+ //
+ // Right now we do neither of that and simply always fail to unify them.
+ DefKind::AnonConst | DefKind::InlineConst => (),
+ _ => return Ok(None),
+ }
+
+ let body = tcx.thir_body(def)?;
+
+ AbstractConstBuilder::new(tcx, (&*body.0.borrow(), body.1))?
+ .map(AbstractConstBuilder::build)
+ .transpose()
+ } else {
+ Ok(None)
+ }
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers {
+ destructure_const,
+ thir_abstract_const: |tcx, def_id| {
+ let def_id = def_id.expect_local();
+ if let Some(def) = ty::WithOptConstParam::try_lookup(def_id, tcx) {
+ tcx.thir_abstract_const_of_const_arg(def)
+ } else {
+ thir_abstract_const(tcx, ty::WithOptConstParam::unknown(def_id))
+ }
+ },
+ thir_abstract_const_of_const_arg: |tcx, (did, param_did)| {
+ thir_abstract_const(
+ tcx,
+ ty::WithOptConstParam { did, const_param_did: Some(param_did) },
+ )
+ },
+ ..*providers
+ };
+}
diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs
new file mode 100644
index 000000000..bd1d568cd
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/instance.rs
@@ -0,0 +1,407 @@
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::traits::CodegenObligationError;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{
+ self, Binder, Instance, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
+};
+use rustc_span::{sym, DUMMY_SP};
+use rustc_trait_selection::traits;
+use traits::{translate_substs, Reveal};
+
+use rustc_data_structures::sso::SsoHashSet;
+use std::collections::btree_map::Entry;
+use std::collections::BTreeMap;
+use std::ops::ControlFlow;
+
+use tracing::debug;
+
+// FIXME(#86795): `BoundVarsCollector` here should **NOT** be used
+// outside of `resolve_associated_item`. It's just to address #64494,
+// #83765, and #85848 which are creating bound types/regions that lose
+// their `Binder` *unintentionally*.
+// It's ideal to remove `BoundVarsCollector` and just use
+// `ty::Binder::*` methods but we use this stopgap until we figure out
+// the "real" fix.
+struct BoundVarsCollector<'tcx> {
+ binder_index: ty::DebruijnIndex,
+ vars: BTreeMap<u32, ty::BoundVariableKind>,
+ // We may encounter the same variable at different levels of binding, so
+ // this can't just be `Ty`
+ visited: SsoHashSet<(ty::DebruijnIndex, Ty<'tcx>)>,
+}
+
+impl<'tcx> BoundVarsCollector<'tcx> {
+ fn new() -> Self {
+ BoundVarsCollector {
+ binder_index: ty::INNERMOST,
+ vars: BTreeMap::new(),
+ visited: SsoHashSet::default(),
+ }
+ }
+
+ fn into_vars(self, tcx: TyCtxt<'tcx>) -> &'tcx ty::List<ty::BoundVariableKind> {
+ let max = self.vars.iter().map(|(k, _)| *k).max().unwrap_or(0);
+ for i in 0..max {
+ if let None = self.vars.get(&i) {
+ panic!("Unknown variable: {:?}", i);
+ }
+ }
+
+ tcx.mk_bound_variable_kinds(self.vars.into_iter().map(|(_, v)| v))
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for BoundVarsCollector<'tcx> {
+ type BreakTy = ();
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.binder_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.binder_index.shift_out(1);
+ result
+ }
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if t.outer_exclusive_binder() < self.binder_index
+ || !self.visited.insert((self.binder_index, t))
+ {
+ return ControlFlow::CONTINUE;
+ }
+ match *t.kind() {
+ ty::Bound(debruijn, bound_ty) if debruijn == self.binder_index => {
+ match self.vars.entry(bound_ty.var.as_u32()) {
+ Entry::Vacant(entry) => {
+ entry.insert(ty::BoundVariableKind::Ty(bound_ty.kind));
+ }
+ Entry::Occupied(entry) => match entry.get() {
+ ty::BoundVariableKind::Ty(_) => {}
+ _ => bug!("Conflicting bound vars"),
+ },
+ }
+ }
+
+ _ => (),
+ };
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ ty::ReLateBound(index, br) if index == self.binder_index => {
+ match self.vars.entry(br.var.as_u32()) {
+ Entry::Vacant(entry) => {
+ entry.insert(ty::BoundVariableKind::Region(br.kind));
+ }
+ Entry::Occupied(entry) => match entry.get() {
+ ty::BoundVariableKind::Region(_) => {}
+ _ => bug!("Conflicting bound vars"),
+ },
+ }
+ }
+
+ _ => (),
+ };
+
+ r.super_visit_with(self)
+ }
+}
+
+fn resolve_instance<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, (DefId, SubstsRef<'tcx>)>,
+) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
+ let (param_env, (did, substs)) = key.into_parts();
+ if let Some(did) = did.as_local() {
+ if let Some(param_did) = tcx.opt_const_param_of(did) {
+ return tcx.resolve_instance_of_const_arg(param_env.and((did, param_did, substs)));
+ }
+ }
+
+ inner_resolve_instance(tcx, param_env.and((ty::WithOptConstParam::unknown(did), substs)))
+}
+
+fn resolve_instance_of_const_arg<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, (LocalDefId, DefId, SubstsRef<'tcx>)>,
+) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
+ let (param_env, (did, const_param_did, substs)) = key.into_parts();
+ inner_resolve_instance(
+ tcx,
+ param_env.and((
+ ty::WithOptConstParam { did: did.to_def_id(), const_param_did: Some(const_param_did) },
+ substs,
+ )),
+ )
+}
+
+fn inner_resolve_instance<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, (ty::WithOptConstParam<DefId>, SubstsRef<'tcx>)>,
+) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
+ let (param_env, (def, substs)) = key.into_parts();
+
+ let result = if let Some(trait_def_id) = tcx.trait_of_item(def.did) {
+ debug!(" => associated item, attempting to find impl in param_env {:#?}", param_env);
+ resolve_associated_item(tcx, def.did, param_env, trait_def_id, substs)
+ } else {
+ let ty = tcx.type_of(def.def_id_for_type_of());
+ let item_type = tcx.subst_and_normalize_erasing_regions(substs, param_env, ty);
+
+ let def = match *item_type.kind() {
+ ty::FnDef(def_id, ..) if tcx.is_intrinsic(def_id) => {
+ debug!(" => intrinsic");
+ ty::InstanceDef::Intrinsic(def.did)
+ }
+ ty::FnDef(def_id, substs) if Some(def_id) == tcx.lang_items().drop_in_place_fn() => {
+ let ty = substs.type_at(0);
+
+ if ty.needs_drop(tcx, param_env) {
+ debug!(" => nontrivial drop glue");
+ match *ty.kind() {
+ ty::Closure(..)
+ | ty::Generator(..)
+ | ty::Tuple(..)
+ | ty::Adt(..)
+ | ty::Dynamic(..)
+ | ty::Array(..)
+ | ty::Slice(..) => {}
+ // Drop shims can only be built from ADTs.
+ _ => return Ok(None),
+ }
+
+ ty::InstanceDef::DropGlue(def_id, Some(ty))
+ } else {
+ debug!(" => trivial drop glue");
+ ty::InstanceDef::DropGlue(def_id, None)
+ }
+ }
+ _ => {
+ debug!(" => free item");
+ ty::InstanceDef::Item(def)
+ }
+ };
+ Ok(Some(Instance { def, substs }))
+ };
+ debug!("inner_resolve_instance: result={:?}", result);
+ result
+}
+
+fn resolve_associated_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_item_id: DefId,
+ param_env: ty::ParamEnv<'tcx>,
+ trait_id: DefId,
+ rcvr_substs: SubstsRef<'tcx>,
+) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
+ debug!(?trait_item_id, ?param_env, ?trait_id, ?rcvr_substs, "resolve_associated_item");
+
+ let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
+
+ // See FIXME on `BoundVarsCollector`.
+ let mut bound_vars_collector = BoundVarsCollector::new();
+ trait_ref.visit_with(&mut bound_vars_collector);
+ let trait_binder = ty::Binder::bind_with_vars(trait_ref, bound_vars_collector.into_vars(tcx));
+ let vtbl = match tcx.codegen_fulfill_obligation((param_env, trait_binder)) {
+ Ok(vtbl) => vtbl,
+ Err(CodegenObligationError::Ambiguity) => {
+ let reported = tcx.sess.delay_span_bug(
+ tcx.def_span(trait_item_id),
+ &format!(
+ "encountered ambiguity selecting `{:?}` during codegen, presuming due to \
+ overflow or prior type error",
+ trait_binder
+ ),
+ );
+ return Err(reported);
+ }
+ Err(CodegenObligationError::Unimplemented) => return Ok(None),
+ Err(CodegenObligationError::FulfillmentError) => return Ok(None),
+ };
+
+ // Now that we know which impl is being used, we can dispatch to
+ // the actual function:
+ Ok(match vtbl {
+ traits::ImplSource::UserDefined(impl_data) => {
+ debug!(
+ "resolving ImplSource::UserDefined: {:?}, {:?}, {:?}, {:?}",
+ param_env, trait_item_id, rcvr_substs, impl_data
+ );
+ assert!(!rcvr_substs.needs_infer());
+ assert!(!trait_ref.needs_infer());
+
+ let trait_def_id = tcx.trait_id_of_impl(impl_data.impl_def_id).unwrap();
+ let trait_def = tcx.trait_def(trait_def_id);
+ let leaf_def = trait_def
+ .ancestors(tcx, impl_data.impl_def_id)?
+ .leaf_def(tcx, trait_item_id)
+ .unwrap_or_else(|| {
+ bug!("{:?} not found in {:?}", trait_item_id, impl_data.impl_def_id);
+ });
+
+ let substs = tcx.infer_ctxt().enter(|infcx| {
+ let param_env = param_env.with_reveal_all_normalized(tcx);
+ let substs = rcvr_substs.rebase_onto(tcx, trait_def_id, impl_data.substs);
+ let substs = translate_substs(
+ &infcx,
+ param_env,
+ impl_data.impl_def_id,
+ substs,
+ leaf_def.defining_node,
+ );
+ infcx.tcx.erase_regions(substs)
+ });
+
+ // Since this is a trait item, we need to see if the item is either a trait default item
+ // or a specialization because we can't resolve those unless we can `Reveal::All`.
+ // NOTE: This should be kept in sync with the similar code in
+ // `rustc_trait_selection::traits::project::assemble_candidates_from_impls()`.
+ let eligible = if leaf_def.is_final() {
+ // Non-specializable items are always projectable.
+ true
+ } else {
+ // Only reveal a specializable default if we're past type-checking
+ // and the obligation is monomorphic, otherwise passes such as
+ // transmute checking and polymorphic MIR optimizations could
+ // get a result which isn't correct for all monomorphizations.
+ if param_env.reveal() == Reveal::All {
+ !trait_ref.still_further_specializable()
+ } else {
+ false
+ }
+ };
+
+ if !eligible {
+ return Ok(None);
+ }
+
+ // If the item does not have a value, then we cannot return an instance.
+ if !leaf_def.item.defaultness(tcx).has_value() {
+ return Ok(None);
+ }
+
+ let substs = tcx.erase_regions(substs);
+
+ // Check if we just resolved an associated `const` declaration from
+ // a `trait` to an associated `const` definition in an `impl`, where
+ // the definition in the `impl` has the wrong type (for which an
+ // error has already been/will be emitted elsewhere).
+ //
+ // NB: this may be expensive, we try to skip it in all the cases where
+ // we know the error would've been caught (e.g. in an upstream crate).
+ //
+ // A better approach might be to just introduce a query (returning
+ // `Result<(), ErrorGuaranteed>`) for the check that `rustc_typeck`
+ // performs (i.e. that the definition's type in the `impl` matches
+ // the declaration in the `trait`), so that we can cheaply check
+ // here if it failed, instead of approximating it.
+ if leaf_def.item.kind == ty::AssocKind::Const
+ && trait_item_id != leaf_def.item.def_id
+ && leaf_def.item.def_id.is_local()
+ {
+ let normalized_type_of = |def_id, substs| {
+ tcx.subst_and_normalize_erasing_regions(substs, param_env, tcx.type_of(def_id))
+ };
+
+ let original_ty = normalized_type_of(trait_item_id, rcvr_substs);
+ let resolved_ty = normalized_type_of(leaf_def.item.def_id, substs);
+
+ if original_ty != resolved_ty {
+ let msg = format!(
+ "Instance::resolve: inconsistent associated `const` type: \
+ was `{}: {}` but resolved to `{}: {}`",
+ tcx.def_path_str_with_substs(trait_item_id, rcvr_substs),
+ original_ty,
+ tcx.def_path_str_with_substs(leaf_def.item.def_id, substs),
+ resolved_ty,
+ );
+ let span = tcx.def_span(leaf_def.item.def_id);
+ let reported = tcx.sess.delay_span_bug(span, &msg);
+
+ return Err(reported);
+ }
+ }
+
+ Some(ty::Instance::new(leaf_def.item.def_id, substs))
+ }
+ traits::ImplSource::Generator(generator_data) => Some(Instance {
+ def: ty::InstanceDef::Item(ty::WithOptConstParam::unknown(
+ generator_data.generator_def_id,
+ )),
+ substs: generator_data.substs,
+ }),
+ traits::ImplSource::Closure(closure_data) => {
+ let trait_closure_kind = tcx.fn_trait_kind_from_lang_item(trait_id).unwrap();
+ Instance::resolve_closure(
+ tcx,
+ closure_data.closure_def_id,
+ closure_data.substs,
+ trait_closure_kind,
+ )
+ }
+ traits::ImplSource::FnPointer(ref data) => match data.fn_ty.kind() {
+ ty::FnDef(..) | ty::FnPtr(..) => Some(Instance {
+ def: ty::InstanceDef::FnPtrShim(trait_item_id, data.fn_ty),
+ substs: rcvr_substs,
+ }),
+ _ => None,
+ },
+ traits::ImplSource::Object(ref data) => {
+ if let Some(index) = traits::get_vtable_index_of_object_method(tcx, data, trait_item_id)
+ {
+ Some(Instance {
+ def: ty::InstanceDef::Virtual(trait_item_id, index),
+ substs: rcvr_substs,
+ })
+ } else {
+ None
+ }
+ }
+ traits::ImplSource::Builtin(..) => {
+ if Some(trait_ref.def_id) == tcx.lang_items().clone_trait() {
+ // FIXME(eddyb) use lang items for methods instead of names.
+ let name = tcx.item_name(trait_item_id);
+ if name == sym::clone {
+ let self_ty = trait_ref.self_ty();
+
+ let is_copy = self_ty.is_copy_modulo_regions(tcx.at(DUMMY_SP), param_env);
+ match self_ty.kind() {
+ _ if is_copy => (),
+ ty::Closure(..) | ty::Tuple(..) => {}
+ _ => return Ok(None),
+ };
+
+ Some(Instance {
+ def: ty::InstanceDef::CloneShim(trait_item_id, self_ty),
+ substs: rcvr_substs,
+ })
+ } else {
+ assert_eq!(name, sym::clone_from);
+
+ // Use the default `fn clone_from` from `trait Clone`.
+ let substs = tcx.erase_regions(rcvr_substs);
+ Some(ty::Instance::new(trait_item_id, substs))
+ }
+ } else {
+ None
+ }
+ }
+ traits::ImplSource::AutoImpl(..)
+ | traits::ImplSource::Param(..)
+ | traits::ImplSource::TraitAlias(..)
+ | traits::ImplSource::DiscriminantKind(..)
+ | traits::ImplSource::Pointee(..)
+ | traits::ImplSource::TraitUpcasting(_)
+ | traits::ImplSource::ConstDestruct(_) => None,
+ })
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers =
+ ty::query::Providers { resolve_instance, resolve_instance_of_const_arg, ..*providers };
+}
diff --git a/compiler/rustc_ty_utils/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs
new file mode 100644
index 000000000..09f5c2a11
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/lib.rs
@@ -0,0 +1,36 @@
+//! Various checks
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(control_flow_enum)]
+#![feature(let_else)]
+#![feature(never_type)]
+#![feature(box_patterns)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate rustc_middle;
+#[macro_use]
+extern crate tracing;
+
+use rustc_middle::ty::query::Providers;
+
+mod assoc;
+mod common_traits;
+mod consts;
+pub mod instance;
+mod needs_drop;
+pub mod representability;
+mod ty;
+
+pub fn provide(providers: &mut Providers) {
+ assoc::provide(providers);
+ common_traits::provide(providers);
+ consts::provide(providers);
+ needs_drop::provide(providers);
+ ty::provide(providers);
+ instance::provide(providers);
+}
diff --git a/compiler/rustc_ty_utils/src/needs_drop.rs b/compiler/rustc_ty_utils/src/needs_drop.rs
new file mode 100644
index 000000000..9ad44d14d
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/needs_drop.rs
@@ -0,0 +1,323 @@
+//! Check whether a type has (potentially) non-trivial drop glue.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::util::{needs_drop_components, AlwaysRequiresDrop};
+use rustc_middle::ty::{self, EarlyBinder, Ty, TyCtxt};
+use rustc_session::Limit;
+use rustc_span::{sym, DUMMY_SP};
+
+type NeedsDropResult<T> = Result<T, AlwaysRequiresDrop>;
+
+fn needs_drop_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ // If we don't know a type doesn't need drop, for example if it's a type
+ // parameter without a `Copy` bound, then we conservatively return that it
+ // needs drop.
+ let adt_has_dtor =
+ |adt_def: ty::AdtDef<'tcx>| adt_def.destructor(tcx).map(|_| DtorType::Significant);
+ let res =
+ drop_tys_helper(tcx, query.value, query.param_env, adt_has_dtor, false).next().is_some();
+
+ debug!("needs_drop_raw({:?}) = {:?}", query, res);
+ res
+}
+
+fn has_significant_drop_raw<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> bool {
+ let res = drop_tys_helper(
+ tcx,
+ query.value,
+ query.param_env,
+ adt_consider_insignificant_dtor(tcx),
+ true,
+ )
+ .next()
+ .is_some();
+ debug!("has_significant_drop_raw({:?}) = {:?}", query, res);
+ res
+}
+
+struct NeedsDropTypes<'tcx, F> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ query_ty: Ty<'tcx>,
+ seen_tys: FxHashSet<Ty<'tcx>>,
+ /// A stack of types left to process, and the recursion depth when we
+ /// pushed that type. Each round, we pop something from the stack and check
+ /// if it needs drop. If the result depends on whether some other types
+ /// need drop we push them onto the stack.
+ unchecked_tys: Vec<(Ty<'tcx>, usize)>,
+ recursion_limit: Limit,
+ adt_components: F,
+}
+
+impl<'tcx, F> NeedsDropTypes<'tcx, F> {
+ fn new(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ adt_components: F,
+ ) -> Self {
+ let mut seen_tys = FxHashSet::default();
+ seen_tys.insert(ty);
+ Self {
+ tcx,
+ param_env,
+ seen_tys,
+ query_ty: ty,
+ unchecked_tys: vec![(ty, 0)],
+ recursion_limit: tcx.recursion_limit(),
+ adt_components,
+ }
+ }
+}
+
+impl<'tcx, F, I> Iterator for NeedsDropTypes<'tcx, F>
+where
+ F: Fn(ty::AdtDef<'tcx>, SubstsRef<'tcx>) -> NeedsDropResult<I>,
+ I: Iterator<Item = Ty<'tcx>>,
+{
+ type Item = NeedsDropResult<Ty<'tcx>>;
+
+ fn next(&mut self) -> Option<NeedsDropResult<Ty<'tcx>>> {
+ let tcx = self.tcx;
+
+ while let Some((ty, level)) = self.unchecked_tys.pop() {
+ if !self.recursion_limit.value_within_limit(level) {
+ // Not having a `Span` isn't great. But there's hopefully some other
+ // recursion limit error as well.
+ tcx.sess.span_err(
+ DUMMY_SP,
+ &format!("overflow while checking whether `{}` requires drop", self.query_ty),
+ );
+ return Some(Err(AlwaysRequiresDrop));
+ }
+
+ let components = match needs_drop_components(ty, &tcx.data_layout) {
+ Err(e) => return Some(Err(e)),
+ Ok(components) => components,
+ };
+ debug!("needs_drop_components({:?}) = {:?}", ty, components);
+
+ let queue_type = move |this: &mut Self, component: Ty<'tcx>| {
+ if this.seen_tys.insert(component) {
+ this.unchecked_tys.push((component, level + 1));
+ }
+ };
+
+ for component in components {
+ match *component.kind() {
+ _ if component.is_copy_modulo_regions(tcx.at(DUMMY_SP), self.param_env) => (),
+
+ ty::Closure(_, substs) => {
+ queue_type(self, substs.as_closure().tupled_upvars_ty());
+ }
+
+ ty::Generator(def_id, substs, _) => {
+ let substs = substs.as_generator();
+ queue_type(self, substs.tupled_upvars_ty());
+
+ let witness = substs.witness();
+ let interior_tys = match witness.kind() {
+ &ty::GeneratorWitness(tys) => tcx.erase_late_bound_regions(tys),
+ _ => {
+ tcx.sess.delay_span_bug(
+ tcx.hir().span_if_local(def_id).unwrap_or(DUMMY_SP),
+ &format!("unexpected generator witness type {:?}", witness),
+ );
+ return Some(Err(AlwaysRequiresDrop));
+ }
+ };
+
+ for interior_ty in interior_tys {
+ queue_type(self, interior_ty);
+ }
+ }
+
+ // Check for a `Drop` impl and whether this is a union or
+ // `ManuallyDrop`. If it's a struct or enum without a `Drop`
+ // impl then check whether the field types need `Drop`.
+ ty::Adt(adt_def, substs) => {
+ let tys = match (self.adt_components)(adt_def, substs) {
+ Err(e) => return Some(Err(e)),
+ Ok(tys) => tys,
+ };
+ for required_ty in tys {
+ let required = tcx
+ .try_normalize_erasing_regions(self.param_env, required_ty)
+ .unwrap_or(required_ty);
+
+ queue_type(self, required);
+ }
+ }
+ ty::Array(..) | ty::Opaque(..) | ty::Projection(..) | ty::Param(_) => {
+ if ty == component {
+ // Return the type to the caller: they may be able
+ // to normalize further than we can.
+ return Some(Ok(component));
+ } else {
+ // Store the type for later. We can't return here
+ // because we would then lose any other components
+ // of the type.
+ queue_type(self, component);
+ }
+ }
+ _ => return Some(Err(AlwaysRequiresDrop)),
+ }
+ }
+ }
+
+ None
+ }
+}
+
+enum DtorType {
+ /// Type has a `Drop` but it is considered insignificant.
+ /// Check the query `adt_significant_drop_tys` for understanding
+ /// "significant" / "insignificant".
+ Insignificant,
+
+ /// Type has a `Drop` implantation.
+ Significant,
+}
+
+// This is a helper function for `adt_drop_tys` and `adt_significant_drop_tys`.
+// Depending on the implantation of `adt_has_dtor`, it is used to check if the
+// ADT has a destructor or if the ADT only has a significant destructor. For
+// understanding significant destructor look at `adt_significant_drop_tys`.
+fn drop_tys_helper<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ param_env: rustc_middle::ty::ParamEnv<'tcx>,
+ adt_has_dtor: impl Fn(ty::AdtDef<'tcx>) -> Option<DtorType>,
+ only_significant: bool,
+) -> impl Iterator<Item = NeedsDropResult<Ty<'tcx>>> {
+ fn with_query_cache<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ iter: impl IntoIterator<Item = Ty<'tcx>>,
+ ) -> NeedsDropResult<Vec<Ty<'tcx>>> {
+ iter.into_iter().try_fold(Vec::new(), |mut vec, subty| {
+ match subty.kind() {
+ ty::Adt(adt_id, subst) => {
+ for subty in tcx.adt_drop_tys(adt_id.did())? {
+ vec.push(EarlyBinder(subty).subst(tcx, subst));
+ }
+ }
+ _ => vec.push(subty),
+ };
+ Ok(vec)
+ })
+ }
+
+ let adt_components = move |adt_def: ty::AdtDef<'tcx>, substs: SubstsRef<'tcx>| {
+ if adt_def.is_manually_drop() {
+ debug!("drop_tys_helper: `{:?}` is manually drop", adt_def);
+ Ok(Vec::new())
+ } else if let Some(dtor_info) = adt_has_dtor(adt_def) {
+ match dtor_info {
+ DtorType::Significant => {
+ debug!("drop_tys_helper: `{:?}` implements `Drop`", adt_def);
+ Err(AlwaysRequiresDrop)
+ }
+ DtorType::Insignificant => {
+ debug!("drop_tys_helper: `{:?}` drop is insignificant", adt_def);
+
+ // Since the destructor is insignificant, we just want to make sure all of
+ // the passed in type parameters are also insignificant.
+ // Eg: Vec<T> dtor is insignificant when T=i32 but significant when T=Mutex.
+ Ok(substs.types().collect())
+ }
+ }
+ } else if adt_def.is_union() {
+ debug!("drop_tys_helper: `{:?}` is a union", adt_def);
+ Ok(Vec::new())
+ } else {
+ let field_tys = adt_def.all_fields().map(|field| {
+ let r = tcx.bound_type_of(field.did).subst(tcx, substs);
+ debug!("drop_tys_helper: Subst into {:?} with {:?} gettng {:?}", field, substs, r);
+ r
+ });
+ if only_significant {
+ // We can't recurse through the query system here because we might induce a cycle
+ Ok(field_tys.collect())
+ } else {
+ // We can use the query system if we consider all drops significant. In that case,
+ // ADTs are `needs_drop` exactly if they `impl Drop` or if any of their "transitive"
+ // fields do. There can be no cycles here, because ADTs cannot contain themselves as
+ // fields.
+ with_query_cache(tcx, field_tys)
+ }
+ }
+ .map(|v| v.into_iter())
+ };
+
+ NeedsDropTypes::new(tcx, param_env, ty, adt_components)
+}
+
+fn adt_consider_insignificant_dtor<'tcx>(
+ tcx: TyCtxt<'tcx>,
+) -> impl Fn(ty::AdtDef<'tcx>) -> Option<DtorType> + 'tcx {
+ move |adt_def: ty::AdtDef<'tcx>| {
+ let is_marked_insig = tcx.has_attr(adt_def.did(), sym::rustc_insignificant_dtor);
+ if is_marked_insig {
+ // In some cases like `std::collections::HashMap` where the struct is a wrapper around
+ // a type that is a Drop type, and the wrapped type (eg: `hashbrown::HashMap`) lies
+ // outside stdlib, we might choose to still annotate the the wrapper (std HashMap) with
+ // `rustc_insignificant_dtor`, even if the type itself doesn't have a `Drop` impl.
+ Some(DtorType::Insignificant)
+ } else if adt_def.destructor(tcx).is_some() {
+ // There is a Drop impl and the type isn't marked insignificant, therefore Drop must be
+ // significant.
+ Some(DtorType::Significant)
+ } else {
+ // No destructor found nor the type is annotated with `rustc_insignificant_dtor`, we
+ // treat this as the simple case of Drop impl for type.
+ None
+ }
+ }
+}
+
+fn adt_drop_tys<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> Result<&ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
+ // This is for the "adt_drop_tys" query, that considers all `Drop` impls, therefore all dtors are
+ // significant.
+ let adt_has_dtor =
+ |adt_def: ty::AdtDef<'tcx>| adt_def.destructor(tcx).map(|_| DtorType::Significant);
+ // `tcx.type_of(def_id)` identical to `tcx.make_adt(def, identity_substs)`
+ drop_tys_helper(tcx, tcx.type_of(def_id), tcx.param_env(def_id), adt_has_dtor, false)
+ .collect::<Result<Vec<_>, _>>()
+ .map(|components| tcx.intern_type_list(&components))
+}
+// If `def_id` refers to a generic ADT, the queries above and below act as if they had been handed
+// a `tcx.make_ty(def, identity_substs)` and as such it is legal to substitute the generic parameters
+// of the ADT into the outputted `ty`s.
+fn adt_significant_drop_tys(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+) -> Result<&ty::List<Ty<'_>>, AlwaysRequiresDrop> {
+ drop_tys_helper(
+ tcx,
+ tcx.type_of(def_id), // identical to `tcx.make_adt(def, identity_substs)`
+ tcx.param_env(def_id),
+ adt_consider_insignificant_dtor(tcx),
+ true,
+ )
+ .collect::<Result<Vec<_>, _>>()
+ .map(|components| tcx.intern_type_list(&components))
+}
+
+pub(crate) fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers {
+ needs_drop_raw,
+ has_significant_drop_raw,
+ adt_drop_tys,
+ adt_significant_drop_tys,
+ ..*providers
+ };
+}
diff --git a/compiler/rustc_ty_utils/src/representability.rs b/compiler/rustc_ty_utils/src/representability.rs
new file mode 100644
index 000000000..eded78916
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/representability.rs
@@ -0,0 +1,386 @@
+//! Check whether a type is representable.
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::Span;
+use std::cmp;
+
+/// Describes whether a type is representable. For types that are not
+/// representable, 'SelfRecursive' and 'ContainsRecursive' are used to
+/// distinguish between types that are recursive with themselves and types that
+/// contain a different recursive type. These cases can therefore be treated
+/// differently when reporting errors.
+///
+/// The ordering of the cases is significant. They are sorted so that cmp::max
+/// will keep the "more erroneous" of two values.
+#[derive(Clone, PartialOrd, Ord, Eq, PartialEq, Debug)]
+pub enum Representability {
+ Representable,
+ ContainsRecursive,
+ /// Return a list of types that are included in themselves:
+ /// the spans where they are self-included, and (if found)
+ /// the HirId of the FieldDef that defines the self-inclusion.
+ SelfRecursive(Vec<(Span, Option<hir::HirId>)>),
+}
+
+/// Check whether a type is representable. This means it cannot contain unboxed
+/// structural recursion. This check is needed for structs and enums.
+pub fn ty_is_representable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ sp: Span,
+ field_id: Option<hir::HirId>,
+) -> Representability {
+ debug!("is_type_representable: {:?}", ty);
+ // To avoid a stack overflow when checking an enum variant or struct that
+ // contains a different, structurally recursive type, maintain a stack of
+ // seen types and check recursion for each of them (issues #3008, #3779,
+ // #74224, #84611). `shadow_seen` contains the full stack and `seen` only
+ // the one for the current type (e.g. if we have structs A and B, B contains
+ // a field of type A, and we're currently looking at B, then `seen` will be
+ // cleared when recursing to check A, but `shadow_seen` won't, so that we
+ // can catch cases of mutual recursion where A also contains B).
+ let mut seen: Vec<Ty<'_>> = Vec::new();
+ let mut shadow_seen: Vec<ty::AdtDef<'tcx>> = Vec::new();
+ let mut representable_cache = FxHashMap::default();
+ let mut force_result = false;
+ let r = is_type_structurally_recursive(
+ tcx,
+ &mut seen,
+ &mut shadow_seen,
+ &mut representable_cache,
+ ty,
+ sp,
+ field_id,
+ &mut force_result,
+ );
+ debug!("is_type_representable: {:?} is {:?}", ty, r);
+ r
+}
+
+// Iterate until something non-representable is found
+fn fold_repr<It: Iterator<Item = Representability>>(iter: It) -> Representability {
+ iter.fold(Representability::Representable, |r1, r2| match (r1, r2) {
+ (Representability::SelfRecursive(v1), Representability::SelfRecursive(v2)) => {
+ Representability::SelfRecursive(v1.into_iter().chain(v2).collect())
+ }
+ (r1, r2) => cmp::max(r1, r2),
+ })
+}
+
+fn are_inner_types_recursive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ seen: &mut Vec<Ty<'tcx>>,
+ shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
+ representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
+ ty: Ty<'tcx>,
+ sp: Span,
+ field_id: Option<hir::HirId>,
+ force_result: &mut bool,
+) -> Representability {
+ debug!("are_inner_types_recursive({:?}, {:?}, {:?})", ty, seen, shadow_seen);
+ match ty.kind() {
+ ty::Tuple(fields) => {
+ // Find non representable
+ fold_repr(fields.iter().map(|ty| {
+ is_type_structurally_recursive(
+ tcx,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ sp,
+ field_id,
+ force_result,
+ )
+ }))
+ }
+ // Fixed-length vectors.
+ // FIXME(#11924) Behavior undecided for zero-length vectors.
+ ty::Array(ty, _) => is_type_structurally_recursive(
+ tcx,
+ seen,
+ shadow_seen,
+ representable_cache,
+ *ty,
+ sp,
+ field_id,
+ force_result,
+ ),
+ ty::Adt(def, substs) => {
+ // Find non representable fields with their spans
+ fold_repr(def.all_fields().map(|field| {
+ let ty = field.ty(tcx, substs);
+ let (sp, field_id) = match field
+ .did
+ .as_local()
+ .map(|id| tcx.hir().local_def_id_to_hir_id(id))
+ .and_then(|id| tcx.hir().find(id))
+ {
+ Some(hir::Node::Field(field)) => (field.ty.span, Some(field.hir_id)),
+ _ => (sp, field_id),
+ };
+
+ let mut result = None;
+
+ // First, we check whether the field type per se is representable.
+ // This catches cases as in #74224 and #84611. There is a special
+ // case related to mutual recursion, though; consider this example:
+ //
+ // struct A<T> {
+ // z: T,
+ // x: B<T>,
+ // }
+ //
+ // struct B<T> {
+ // y: A<T>
+ // }
+ //
+ // Here, without the following special case, both A and B are
+ // ContainsRecursive, which is a problem because we only report
+ // errors for SelfRecursive. We fix this by detecting this special
+ // case (shadow_seen.first() is the type we are originally
+ // interested in, and if we ever encounter the same AdtDef again,
+ // we know that it must be SelfRecursive) and "forcibly" returning
+ // SelfRecursive (by setting force_result, which tells the calling
+ // invocations of are_inner_types_representable to forward the
+ // result without adjusting).
+ if shadow_seen.len() > seen.len() && shadow_seen.first() == Some(def) {
+ *force_result = true;
+ result = Some(Representability::SelfRecursive(vec![(sp, field_id)]));
+ }
+
+ if result == None {
+ result = Some(Representability::Representable);
+
+ // Now, we check whether the field types per se are representable, e.g.
+ // for struct Foo { x: Option<Foo> }, we first check whether Option<_>
+ // by itself is representable (which it is), and the nesting of Foo
+ // will be detected later. This is necessary for #74224 and #84611.
+
+ // If we have encountered an ADT definition that we have not seen
+ // before (no need to check them twice), recurse to see whether that
+ // definition is SelfRecursive. If so, we must be ContainsRecursive.
+ if shadow_seen.len() > 1
+ && !shadow_seen
+ .iter()
+ .take(shadow_seen.len() - 1)
+ .any(|seen_def| seen_def == def)
+ {
+ let adt_def_id = def.did();
+ let raw_adt_ty = tcx.type_of(adt_def_id);
+ debug!("are_inner_types_recursive: checking nested type: {:?}", raw_adt_ty);
+
+ // Check independently whether the ADT is SelfRecursive. If so,
+ // we must be ContainsRecursive (except for the special case
+ // mentioned above).
+ let mut nested_seen: Vec<Ty<'_>> = vec![];
+ result = Some(
+ match is_type_structurally_recursive(
+ tcx,
+ &mut nested_seen,
+ shadow_seen,
+ representable_cache,
+ raw_adt_ty,
+ sp,
+ field_id,
+ force_result,
+ ) {
+ Representability::SelfRecursive(_) => {
+ if *force_result {
+ Representability::SelfRecursive(vec![(sp, field_id)])
+ } else {
+ Representability::ContainsRecursive
+ }
+ }
+ x => x,
+ },
+ );
+ }
+
+ // We only enter the following block if the type looks representable
+ // so far. This is necessary for cases such as this one (#74224):
+ //
+ // struct A<T> {
+ // x: T,
+ // y: A<A<T>>,
+ // }
+ //
+ // struct B {
+ // z: A<usize>
+ // }
+ //
+ // When checking B, we recurse into A and check field y of type
+ // A<A<usize>>. We haven't seen this exact type before, so we recurse
+ // into A<A<usize>>, which contains, A<A<A<usize>>>, and so forth,
+ // ad infinitum. We can prevent this from happening by first checking
+ // A separately (the code above) and only checking for nested Bs if
+ // A actually looks representable (which it wouldn't in this example).
+ if result == Some(Representability::Representable) {
+ // Now, even if the type is representable (e.g. Option<_>),
+ // it might still contribute to a recursive type, e.g.:
+ // struct Foo { x: Option<Option<Foo>> }
+ // These cases are handled by passing the full `seen`
+ // stack to is_type_structurally_recursive (instead of the
+ // empty `nested_seen` above):
+ result = Some(
+ match is_type_structurally_recursive(
+ tcx,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ sp,
+ field_id,
+ force_result,
+ ) {
+ Representability::SelfRecursive(_) => {
+ Representability::SelfRecursive(vec![(sp, field_id)])
+ }
+ x => x,
+ },
+ );
+ }
+ }
+
+ result.unwrap()
+ }))
+ }
+ ty::Closure(..) => {
+ // this check is run on type definitions, so we don't expect
+ // to see closure types
+ bug!("requires check invoked on inapplicable type: {:?}", ty)
+ }
+ _ => Representability::Representable,
+ }
+}
+
+fn same_adt<'tcx>(ty: Ty<'tcx>, def: ty::AdtDef<'tcx>) -> bool {
+ match *ty.kind() {
+ ty::Adt(ty_def, _) => ty_def == def,
+ _ => false,
+ }
+}
+
+// Does the type `ty` directly (without indirection through a pointer)
+// contain any types on stack `seen`?
+fn is_type_structurally_recursive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ seen: &mut Vec<Ty<'tcx>>,
+ shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
+ representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
+ ty: Ty<'tcx>,
+ sp: Span,
+ field_id: Option<hir::HirId>,
+ force_result: &mut bool,
+) -> Representability {
+ debug!("is_type_structurally_recursive: {:?} {:?} {:?}", ty, sp, field_id);
+ if let Some(representability) = representable_cache.get(&ty) {
+ debug!(
+ "is_type_structurally_recursive: {:?} {:?} {:?} - (cached) {:?}",
+ ty, sp, field_id, representability
+ );
+ return representability.clone();
+ }
+
+ let representability = is_type_structurally_recursive_inner(
+ tcx,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ sp,
+ field_id,
+ force_result,
+ );
+
+ representable_cache.insert(ty, representability.clone());
+ representability
+}
+
+fn is_type_structurally_recursive_inner<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ seen: &mut Vec<Ty<'tcx>>,
+ shadow_seen: &mut Vec<ty::AdtDef<'tcx>>,
+ representable_cache: &mut FxHashMap<Ty<'tcx>, Representability>,
+ ty: Ty<'tcx>,
+ sp: Span,
+ field_id: Option<hir::HirId>,
+ force_result: &mut bool,
+) -> Representability {
+ match ty.kind() {
+ ty::Adt(def, _) => {
+ {
+ debug!("is_type_structurally_recursive_inner: adt: {:?}, seen: {:?}", ty, seen);
+
+ // Iterate through stack of previously seen types.
+ let mut iter = seen.iter();
+
+ // The first item in `seen` is the type we are actually curious about.
+ // We want to return SelfRecursive if this type contains itself.
+ // It is important that we DON'T take generic parameters into account
+ // for this check, so that Bar<T> in this example counts as SelfRecursive:
+ //
+ // struct Foo;
+ // struct Bar<T> { x: Bar<Foo> }
+
+ if let Some(&seen_adt) = iter.next() {
+ if same_adt(seen_adt, *def) {
+ debug!("SelfRecursive: {:?} contains {:?}", seen_adt, ty);
+ return Representability::SelfRecursive(vec![(sp, field_id)]);
+ }
+ }
+
+ // We also need to know whether the first item contains other types
+ // that are structurally recursive. If we don't catch this case, we
+ // will recurse infinitely for some inputs.
+ //
+ // It is important that we DO take generic parameters into account
+ // here, because nesting e.g. Options is allowed (as long as the
+ // definition of Option doesn't itself include an Option field, which
+ // would be a case of SelfRecursive above). The following, too, counts
+ // as SelfRecursive:
+ //
+ // struct Foo { Option<Option<Foo>> }
+
+ for &seen_adt in iter {
+ if ty == seen_adt {
+ debug!("ContainsRecursive: {:?} contains {:?}", seen_adt, ty);
+ return Representability::ContainsRecursive;
+ }
+ }
+ }
+
+ // For structs and enums, track all previously seen types by pushing them
+ // onto the 'seen' stack.
+ seen.push(ty);
+ shadow_seen.push(*def);
+ let out = are_inner_types_recursive(
+ tcx,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ sp,
+ field_id,
+ force_result,
+ );
+ shadow_seen.pop();
+ seen.pop();
+ out
+ }
+ _ => {
+ // No need to push in other cases.
+ are_inner_types_recursive(
+ tcx,
+ seen,
+ shadow_seen,
+ representable_cache,
+ ty,
+ sp,
+ field_id,
+ force_result,
+ )
+ }
+ }
+}
diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs
new file mode 100644
index 000000000..db0d45b86
--- /dev/null
+++ b/compiler/rustc_ty_utils/src/ty.rs
@@ -0,0 +1,481 @@
+use rustc_data_structures::fx::FxIndexSet;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{self, Binder, Predicate, PredicateKind, ToPredicate, Ty, TyCtxt};
+use rustc_trait_selection::traits;
+
+fn sized_constraint_for_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ adtdef: ty::AdtDef<'tcx>,
+ ty: Ty<'tcx>,
+) -> Vec<Ty<'tcx>> {
+ use rustc_type_ir::sty::TyKind::*;
+
+ let result = match ty.kind() {
+ Bool | Char | Int(..) | Uint(..) | Float(..) | RawPtr(..) | Ref(..) | FnDef(..)
+ | FnPtr(_) | Array(..) | Closure(..) | Generator(..) | Never => vec![],
+
+ Str | Dynamic(..) | Slice(_) | Foreign(..) | Error(_) | GeneratorWitness(..) => {
+ // these are never sized - return the target type
+ vec![ty]
+ }
+
+ Tuple(ref tys) => match tys.last() {
+ None => vec![],
+ Some(&ty) => sized_constraint_for_ty(tcx, adtdef, ty),
+ },
+
+ Adt(adt, substs) => {
+ // recursive case
+ let adt_tys = adt.sized_constraint(tcx);
+ debug!("sized_constraint_for_ty({:?}) intermediate = {:?}", ty, adt_tys);
+ adt_tys
+ .0
+ .iter()
+ .map(|ty| adt_tys.rebind(*ty).subst(tcx, substs))
+ .flat_map(|ty| sized_constraint_for_ty(tcx, adtdef, ty))
+ .collect()
+ }
+
+ Projection(..) | Opaque(..) => {
+ // must calculate explicitly.
+ // FIXME: consider special-casing always-Sized projections
+ vec![ty]
+ }
+
+ Param(..) => {
+ // perf hack: if there is a `T: Sized` bound, then
+ // we know that `T` is Sized and do not need to check
+ // it on the impl.
+
+ let Some(sized_trait) = tcx.lang_items().sized_trait() else { return vec![ty] };
+ let sized_predicate = ty::Binder::dummy(ty::TraitRef {
+ def_id: sized_trait,
+ substs: tcx.mk_substs_trait(ty, &[]),
+ })
+ .without_const()
+ .to_predicate(tcx);
+ let predicates = tcx.predicates_of(adtdef.did()).predicates;
+ if predicates.iter().any(|(p, _)| *p == sized_predicate) { vec![] } else { vec![ty] }
+ }
+
+ Placeholder(..) | Bound(..) | Infer(..) => {
+ bug!("unexpected type `{:?}` in sized_constraint_for_ty", ty)
+ }
+ };
+ debug!("sized_constraint_for_ty({:?}) = {:?}", ty, result);
+ result
+}
+
+fn impl_defaultness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::Defaultness {
+ match tcx.hir().get_by_def_id(def_id.expect_local()) {
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(impl_), .. }) => impl_.defaultness,
+ hir::Node::ImplItem(hir::ImplItem { defaultness, .. })
+ | hir::Node::TraitItem(hir::TraitItem { defaultness, .. }) => *defaultness,
+ node => {
+ bug!("`impl_defaultness` called on {:?}", node);
+ }
+ }
+}
+
+/// Calculates the `Sized` constraint.
+///
+/// In fact, there are only a few options for the types in the constraint:
+/// - an obviously-unsized type
+/// - a type parameter or projection whose Sizedness can't be known
+/// - a tuple of type parameters or projections, if there are multiple
+/// such.
+/// - an Error, if a type contained itself. The representability
+/// check should catch this case.
+fn adt_sized_constraint(tcx: TyCtxt<'_>, def_id: DefId) -> ty::AdtSizedConstraint<'_> {
+ let def = tcx.adt_def(def_id);
+
+ let result = tcx.mk_type_list(
+ def.variants()
+ .iter()
+ .flat_map(|v| v.fields.last())
+ .flat_map(|f| sized_constraint_for_ty(tcx, def, tcx.type_of(f.did))),
+ );
+
+ debug!("adt_sized_constraint: {:?} => {:?}", def, result);
+
+ ty::AdtSizedConstraint(result)
+}
+
+/// See `ParamEnv` struct definition for details.
+#[instrument(level = "debug", skip(tcx))]
+fn param_env(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
+ // The param_env of an impl Trait type is its defining function's param_env
+ if let Some(parent) = ty::is_impl_trait_defn(tcx, def_id) {
+ return param_env(tcx, parent.to_def_id());
+ }
+ // Compute the bounds on Self and the type parameters.
+
+ let ty::InstantiatedPredicates { mut predicates, .. } =
+ tcx.predicates_of(def_id).instantiate_identity(tcx);
+
+ // Finally, we have to normalize the bounds in the environment, in
+ // case they contain any associated type projections. This process
+ // can yield errors if the put in illegal associated types, like
+ // `<i32 as Foo>::Bar` where `i32` does not implement `Foo`. We
+ // report these errors right here; this doesn't actually feel
+ // right to me, because constructing the environment feels like a
+ // kind of an "idempotent" action, but I'm not sure where would be
+ // a better place. In practice, we construct environments for
+ // every fn once during type checking, and we'll abort if there
+ // are any errors at that point, so outside of type inference you can be
+ // sure that this will succeed without errors anyway.
+
+ if tcx.sess.opts.unstable_opts.chalk {
+ let environment = well_formed_types_in_env(tcx, def_id);
+ predicates.extend(environment);
+ }
+
+ let local_did = def_id.as_local();
+ let hir_id = local_did.map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id));
+
+ let constness = match hir_id {
+ Some(hir_id) => match tcx.hir().get(hir_id) {
+ hir::Node::TraitItem(hir::TraitItem { kind: hir::TraitItemKind::Fn(..), .. })
+ if tcx.is_const_default_method(def_id) =>
+ {
+ hir::Constness::Const
+ }
+
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(..), .. })
+ | hir::Node::Item(hir::Item { kind: hir::ItemKind::Static(..), .. })
+ | hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Const(..), ..
+ })
+ | hir::Node::AnonConst(_)
+ | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. })
+ | hir::Node::ImplItem(hir::ImplItem {
+ kind:
+ hir::ImplItemKind::Fn(
+ hir::FnSig {
+ header: hir::FnHeader { constness: hir::Constness::Const, .. },
+ ..
+ },
+ ..,
+ ),
+ ..
+ }) => hir::Constness::Const,
+
+ hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::TyAlias(..) | hir::ImplItemKind::Fn(..),
+ ..
+ }) => {
+ let parent_hir_id = tcx.hir().get_parent_node(hir_id);
+ match tcx.hir().get(parent_hir_id) {
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { constness, .. }),
+ ..
+ }) => *constness,
+ _ => span_bug!(
+ tcx.def_span(parent_hir_id.owner),
+ "impl item's parent node is not an impl",
+ ),
+ }
+ }
+
+ hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Fn(hir::FnSig { header: hir::FnHeader { constness, .. }, .. }, ..),
+ ..
+ })
+ | hir::Node::TraitItem(hir::TraitItem {
+ kind:
+ hir::TraitItemKind::Fn(
+ hir::FnSig { header: hir::FnHeader { constness, .. }, .. },
+ ..,
+ ),
+ ..
+ })
+ | hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { constness, .. }),
+ ..
+ }) => *constness,
+
+ _ => hir::Constness::NotConst,
+ },
+ None => hir::Constness::NotConst,
+ };
+
+ let unnormalized_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&predicates),
+ traits::Reveal::UserFacing,
+ constness,
+ );
+
+ let body_id =
+ local_did.and_then(|id| tcx.hir().maybe_body_owned_by(id).map(|body| body.hir_id));
+ let body_id = match body_id {
+ Some(id) => id,
+ None if hir_id.is_some() => hir_id.unwrap(),
+ _ => hir::CRATE_HIR_ID,
+ };
+
+ let cause = traits::ObligationCause::misc(tcx.def_span(def_id), body_id);
+ traits::normalize_param_env_or_error(tcx, unnormalized_env, cause)
+}
+
+/// Elaborate the environment.
+///
+/// Collect a list of `Predicate`'s used for building the `ParamEnv`. Adds `TypeWellFormedFromEnv`'s
+/// that are assumed to be well-formed (because they come from the environment).
+///
+/// Used only in chalk mode.
+fn well_formed_types_in_env<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+) -> &'tcx ty::List<Predicate<'tcx>> {
+ use rustc_hir::{ForeignItemKind, ImplItemKind, ItemKind, Node, TraitItemKind};
+ use rustc_middle::ty::subst::GenericArgKind;
+
+ debug!("environment(def_id = {:?})", def_id);
+
+ // The environment of an impl Trait type is its defining function's environment.
+ if let Some(parent) = ty::is_impl_trait_defn(tcx, def_id) {
+ return well_formed_types_in_env(tcx, parent.to_def_id());
+ }
+
+ // Compute the bounds on `Self` and the type parameters.
+ let ty::InstantiatedPredicates { predicates, .. } =
+ tcx.predicates_of(def_id).instantiate_identity(tcx);
+
+ let clauses = predicates.into_iter();
+
+ if !def_id.is_local() {
+ return ty::List::empty();
+ }
+ let node = tcx.hir().get_by_def_id(def_id.expect_local());
+
+ enum NodeKind {
+ TraitImpl,
+ InherentImpl,
+ Fn,
+ Other,
+ }
+
+ let node_kind = match node {
+ Node::TraitItem(item) => match item.kind {
+ TraitItemKind::Fn(..) => NodeKind::Fn,
+ _ => NodeKind::Other,
+ },
+
+ Node::ImplItem(item) => match item.kind {
+ ImplItemKind::Fn(..) => NodeKind::Fn,
+ _ => NodeKind::Other,
+ },
+
+ Node::Item(item) => match item.kind {
+ ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) => NodeKind::TraitImpl,
+ ItemKind::Impl(hir::Impl { of_trait: None, .. }) => NodeKind::InherentImpl,
+ ItemKind::Fn(..) => NodeKind::Fn,
+ _ => NodeKind::Other,
+ },
+
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Fn(..) => NodeKind::Fn,
+ _ => NodeKind::Other,
+ },
+
+ // FIXME: closures?
+ _ => NodeKind::Other,
+ };
+
+ // FIXME(eddyb) isn't the unordered nature of this a hazard?
+ let mut inputs = FxIndexSet::default();
+
+ match node_kind {
+ // In a trait impl, we assume that the header trait ref and all its
+ // constituents are well-formed.
+ NodeKind::TraitImpl => {
+ let trait_ref = tcx.impl_trait_ref(def_id).expect("not an impl");
+
+ // FIXME(chalk): this has problems because of late-bound regions
+ //inputs.extend(trait_ref.substs.iter().flat_map(|arg| arg.walk()));
+ inputs.extend(trait_ref.substs.iter());
+ }
+
+ // In an inherent impl, we assume that the receiver type and all its
+ // constituents are well-formed.
+ NodeKind::InherentImpl => {
+ let self_ty = tcx.type_of(def_id);
+ inputs.extend(self_ty.walk());
+ }
+
+ // In an fn, we assume that the arguments and all their constituents are
+ // well-formed.
+ NodeKind::Fn => {
+ let fn_sig = tcx.fn_sig(def_id);
+ let fn_sig = tcx.liberate_late_bound_regions(def_id, fn_sig);
+
+ inputs.extend(fn_sig.inputs().iter().flat_map(|ty| ty.walk()));
+ }
+
+ NodeKind::Other => (),
+ }
+ let input_clauses = inputs.into_iter().filter_map(|arg| {
+ match arg.unpack() {
+ GenericArgKind::Type(ty) => {
+ let binder = Binder::dummy(PredicateKind::TypeWellFormedFromEnv(ty));
+ Some(tcx.mk_predicate(binder))
+ }
+
+ // FIXME(eddyb) no WF conditions from lifetimes?
+ GenericArgKind::Lifetime(_) => None,
+
+ // FIXME(eddyb) support const generics in Chalk
+ GenericArgKind::Const(_) => None,
+ }
+ });
+
+ tcx.mk_predicates(clauses.chain(input_clauses))
+}
+
+fn param_env_reveal_all_normalized(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ParamEnv<'_> {
+ tcx.param_env(def_id).with_reveal_all_normalized(tcx)
+}
+
+fn instance_def_size_estimate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance_def: ty::InstanceDef<'tcx>,
+) -> usize {
+ use ty::InstanceDef;
+
+ match instance_def {
+ InstanceDef::Item(..) | InstanceDef::DropGlue(..) => {
+ let mir = tcx.instance_mir(instance_def);
+ mir.basic_blocks().iter().map(|bb| bb.statements.len() + 1).sum()
+ }
+ // Estimate the size of other compiler-generated shims to be 1.
+ _ => 1,
+ }
+}
+
+/// If `def_id` is an issue 33140 hack impl, returns its self type; otherwise, returns `None`.
+///
+/// See [`ty::ImplOverlapKind::Issue33140`] for more details.
+fn issue33140_self_ty(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Ty<'_>> {
+ debug!("issue33140_self_ty({:?})", def_id);
+
+ let trait_ref = tcx
+ .impl_trait_ref(def_id)
+ .unwrap_or_else(|| bug!("issue33140_self_ty called on inherent impl {:?}", def_id));
+
+ debug!("issue33140_self_ty({:?}), trait-ref={:?}", def_id, trait_ref);
+
+ let is_marker_like = tcx.impl_polarity(def_id) == ty::ImplPolarity::Positive
+ && tcx.associated_item_def_ids(trait_ref.def_id).is_empty();
+
+ // Check whether these impls would be ok for a marker trait.
+ if !is_marker_like {
+ debug!("issue33140_self_ty - not marker-like!");
+ return None;
+ }
+
+ // impl must be `impl Trait for dyn Marker1 + Marker2 + ...`
+ if trait_ref.substs.len() != 1 {
+ debug!("issue33140_self_ty - impl has substs!");
+ return None;
+ }
+
+ let predicates = tcx.predicates_of(def_id);
+ if predicates.parent.is_some() || !predicates.predicates.is_empty() {
+ debug!("issue33140_self_ty - impl has predicates {:?}!", predicates);
+ return None;
+ }
+
+ let self_ty = trait_ref.self_ty();
+ let self_ty_matches = match self_ty.kind() {
+ ty::Dynamic(ref data, re) if re.is_static() => data.principal().is_none(),
+ _ => false,
+ };
+
+ if self_ty_matches {
+ debug!("issue33140_self_ty - MATCHES!");
+ Some(self_ty)
+ } else {
+ debug!("issue33140_self_ty - non-matching self type");
+ None
+ }
+}
+
+/// Check if a function is async.
+fn asyncness(tcx: TyCtxt<'_>, def_id: DefId) -> hir::IsAsync {
+ let node = tcx.hir().get_by_def_id(def_id.expect_local());
+ if let Some(fn_kind) = node.fn_kind() { fn_kind.asyncness() } else { hir::IsAsync::NotAsync }
+}
+
+/// Don't call this directly: use ``tcx.conservative_is_privately_uninhabited`` instead.
+#[instrument(level = "debug", skip(tcx))]
+pub fn conservative_is_privately_uninhabited_raw<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env_and: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> bool {
+ let (param_env, ty) = param_env_and.into_parts();
+ match ty.kind() {
+ ty::Never => {
+ debug!("ty::Never =>");
+ true
+ }
+ ty::Adt(def, _) if def.is_union() => {
+ debug!("ty::Adt(def, _) if def.is_union() =>");
+ // For now, `union`s are never considered uninhabited.
+ false
+ }
+ ty::Adt(def, substs) => {
+ debug!("ty::Adt(def, _) if def.is_not_union() =>");
+ // Any ADT is uninhabited if either:
+ // (a) It has no variants (i.e. an empty `enum`);
+ // (b) Each of its variants (a single one in the case of a `struct`) has at least
+ // one uninhabited field.
+ def.variants().iter().all(|var| {
+ var.fields.iter().any(|field| {
+ let ty = tcx.bound_type_of(field.did).subst(tcx, substs);
+ tcx.conservative_is_privately_uninhabited(param_env.and(ty))
+ })
+ })
+ }
+ ty::Tuple(fields) => {
+ debug!("ty::Tuple(..) =>");
+ fields.iter().any(|ty| tcx.conservative_is_privately_uninhabited(param_env.and(ty)))
+ }
+ ty::Array(ty, len) => {
+ debug!("ty::Array(ty, len) =>");
+ match len.try_eval_usize(tcx, param_env) {
+ Some(0) | None => false,
+ // If the array is definitely non-empty, it's uninhabited if
+ // the type of its elements is uninhabited.
+ Some(1..) => tcx.conservative_is_privately_uninhabited(param_env.and(*ty)),
+ }
+ }
+ ty::Ref(..) => {
+ debug!("ty::Ref(..) =>");
+ // References to uninitialised memory is valid for any type, including
+ // uninhabited types, in unsafe code, so we treat all references as
+ // inhabited.
+ false
+ }
+ _ => {
+ debug!("_ =>");
+ false
+ }
+ }
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers {
+ asyncness,
+ adt_sized_constraint,
+ param_env,
+ param_env_reveal_all_normalized,
+ instance_def_size_estimate,
+ issue33140_self_ty,
+ impl_defaultness,
+ conservative_is_privately_uninhabited: conservative_is_privately_uninhabited_raw,
+ ..*providers
+ };
+}
diff --git a/compiler/rustc_type_ir/Cargo.toml b/compiler/rustc_type_ir/Cargo.toml
new file mode 100644
index 000000000..5aa3cf017
--- /dev/null
+++ b/compiler/rustc_type_ir/Cargo.toml
@@ -0,0 +1,15 @@
+[package]
+name = "rustc_type_ir"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+bitflags = "1.2.1"
+rustc_index = { path = "../rustc_index" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_macros = { path = "../rustc_macros" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
diff --git a/compiler/rustc_type_ir/src/codec.rs b/compiler/rustc_type_ir/src/codec.rs
new file mode 100644
index 000000000..ee249050c
--- /dev/null
+++ b/compiler/rustc_type_ir/src/codec.rs
@@ -0,0 +1,64 @@
+use crate::Interner;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_serialize::{Decoder, Encoder};
+
+/// The shorthand encoding uses an enum's variant index `usize`
+/// and is offset by this value so it never matches a real variant.
+/// This offset is also chosen so that the first byte is never < 0x80.
+pub const SHORTHAND_OFFSET: usize = 0x80;
+
+/// Trait for decoding to a reference.
+///
+/// This is a separate trait from `Decodable` so that we can implement it for
+/// upstream types, such as `FxHashSet`.
+///
+/// The `TyDecodable` derive macro will use this trait for fields that are
+/// references (and don't use a type alias to hide that).
+///
+/// `Decodable` can still be implemented in cases where `Decodable` is required
+/// by a trait bound.
+pub trait RefDecodable<'tcx, D: TyDecoder> {
+ fn decode(d: &mut D) -> &'tcx Self;
+}
+
+pub trait TyEncoder: Encoder {
+ type I: Interner;
+ const CLEAR_CROSS_CRATE: bool;
+
+ fn position(&self) -> usize;
+ fn type_shorthands(&mut self) -> &mut FxHashMap<<Self::I as Interner>::Ty, usize>;
+ fn predicate_shorthands(
+ &mut self,
+ ) -> &mut FxHashMap<<Self::I as Interner>::PredicateKind, usize>;
+ fn encode_alloc_id(&mut self, alloc_id: &<Self::I as Interner>::AllocId);
+}
+
+pub trait TyDecoder: Decoder {
+ type I: Interner;
+ const CLEAR_CROSS_CRATE: bool;
+
+ fn interner(&self) -> Self::I;
+
+ fn peek_byte(&self) -> u8;
+
+ fn position(&self) -> usize;
+
+ fn cached_ty_for_shorthand<F>(
+ &mut self,
+ shorthand: usize,
+ or_insert_with: F,
+ ) -> <Self::I as Interner>::Ty
+ where
+ F: FnOnce(&mut Self) -> <Self::I as Interner>::Ty;
+
+ fn with_position<F, R>(&mut self, pos: usize, f: F) -> R
+ where
+ F: FnOnce(&mut Self) -> R;
+
+ fn positioned_at_shorthand(&self) -> bool {
+ (self.peek_byte() & (SHORTHAND_OFFSET as u8)) != 0
+ }
+
+ fn decode_alloc_id(&mut self) -> <Self::I as Interner>::AllocId;
+}
diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs
new file mode 100644
index 000000000..791e9e0f5
--- /dev/null
+++ b/compiler/rustc_type_ir/src/lib.rs
@@ -0,0 +1,856 @@
+#![feature(fmt_helpers_for_derive)]
+#![feature(min_specialization)]
+#![feature(rustc_attrs)]
+
+#[macro_use]
+extern crate bitflags;
+#[macro_use]
+extern crate rustc_macros;
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::unify::{EqUnifyValue, UnifyKey};
+use smallvec::SmallVec;
+use std::fmt;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::mem::discriminant;
+
+pub mod codec;
+pub mod sty;
+
+pub use codec::*;
+pub use sty::*;
+
+pub trait Interner {
+ type AdtDef: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type SubstsRef: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type DefId: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type Ty: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type Const: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type Region: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type TypeAndMut: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type Mutability: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type Movability: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type PolyFnSig: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type ListBinderExistentialPredicate: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type BinderListTy: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type ListTy: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type ProjectionTy: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type ParamTy: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type BoundTy: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type PlaceholderType: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type InferTy: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type DelaySpanBugEmitted: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type PredicateKind: Clone + Debug + Hash + PartialEq + Eq;
+ type AllocId: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+
+ type EarlyBoundRegion: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type BoundRegion: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type FreeRegion: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type RegionVid: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+ type PlaceholderRegion: Clone + Debug + Hash + PartialEq + Eq + PartialOrd + Ord;
+}
+
+pub trait InternAs<T: ?Sized, R> {
+ type Output;
+ fn intern_with<F>(self, f: F) -> Self::Output
+ where
+ F: FnOnce(&T) -> R;
+}
+
+impl<I, T, R, E> InternAs<[T], R> for I
+where
+ E: InternIteratorElement<T, R>,
+ I: Iterator<Item = E>,
+{
+ type Output = E::Output;
+ fn intern_with<F>(self, f: F) -> Self::Output
+ where
+ F: FnOnce(&[T]) -> R,
+ {
+ E::intern_with(self, f)
+ }
+}
+
+pub trait InternIteratorElement<T, R>: Sized {
+ type Output;
+ fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output;
+}
+
+impl<T, R> InternIteratorElement<T, R> for T {
+ type Output = R;
+ fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(
+ mut iter: I,
+ f: F,
+ ) -> Self::Output {
+ // This code is hot enough that it's worth specializing for the most
+ // common length lists, to avoid the overhead of `SmallVec` creation.
+ // Lengths 0, 1, and 2 typically account for ~95% of cases. If
+ // `size_hint` is incorrect a panic will occur via an `unwrap` or an
+ // `assert`.
+ match iter.size_hint() {
+ (0, Some(0)) => {
+ assert!(iter.next().is_none());
+ f(&[])
+ }
+ (1, Some(1)) => {
+ let t0 = iter.next().unwrap();
+ assert!(iter.next().is_none());
+ f(&[t0])
+ }
+ (2, Some(2)) => {
+ let t0 = iter.next().unwrap();
+ let t1 = iter.next().unwrap();
+ assert!(iter.next().is_none());
+ f(&[t0, t1])
+ }
+ _ => f(&iter.collect::<SmallVec<[_; 8]>>()),
+ }
+ }
+}
+
+impl<'a, T, R> InternIteratorElement<T, R> for &'a T
+where
+ T: Clone + 'a,
+{
+ type Output = R;
+ fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(iter: I, f: F) -> Self::Output {
+ // This code isn't hot.
+ f(&iter.cloned().collect::<SmallVec<[_; 8]>>())
+ }
+}
+
+impl<T, R, E> InternIteratorElement<T, R> for Result<T, E> {
+ type Output = Result<R, E>;
+ fn intern_with<I: Iterator<Item = Self>, F: FnOnce(&[T]) -> R>(
+ mut iter: I,
+ f: F,
+ ) -> Self::Output {
+ // This code is hot enough that it's worth specializing for the most
+ // common length lists, to avoid the overhead of `SmallVec` creation.
+ // Lengths 0, 1, and 2 typically account for ~95% of cases. If
+ // `size_hint` is incorrect a panic will occur via an `unwrap` or an
+ // `assert`, unless a failure happens first, in which case the result
+ // will be an error anyway.
+ Ok(match iter.size_hint() {
+ (0, Some(0)) => {
+ assert!(iter.next().is_none());
+ f(&[])
+ }
+ (1, Some(1)) => {
+ let t0 = iter.next().unwrap()?;
+ assert!(iter.next().is_none());
+ f(&[t0])
+ }
+ (2, Some(2)) => {
+ let t0 = iter.next().unwrap()?;
+ let t1 = iter.next().unwrap()?;
+ assert!(iter.next().is_none());
+ f(&[t0, t1])
+ }
+ _ => f(&iter.collect::<Result<SmallVec<[_; 8]>, _>>()?),
+ })
+ }
+}
+
+bitflags! {
+ /// Flags that we track on types. These flags are propagated upwards
+ /// through the type during type construction, so that we can quickly check
+ /// whether the type has various kinds of types in it without recursing
+ /// over the type itself.
+ pub struct TypeFlags: u32 {
+ // Does this have parameters? Used to determine whether substitution is
+ // required.
+ /// Does this have `Param`?
+ const HAS_TY_PARAM = 1 << 0;
+ /// Does this have `ReEarlyBound`?
+ const HAS_RE_PARAM = 1 << 1;
+ /// Does this have `ConstKind::Param`?
+ const HAS_CT_PARAM = 1 << 2;
+
+ const NEEDS_SUBST = TypeFlags::HAS_TY_PARAM.bits
+ | TypeFlags::HAS_RE_PARAM.bits
+ | TypeFlags::HAS_CT_PARAM.bits;
+
+ /// Does this have `Infer`?
+ const HAS_TY_INFER = 1 << 3;
+ /// Does this have `ReVar`?
+ const HAS_RE_INFER = 1 << 4;
+ /// Does this have `ConstKind::Infer`?
+ const HAS_CT_INFER = 1 << 5;
+
+ /// Does this have inference variables? Used to determine whether
+ /// inference is required.
+ const NEEDS_INFER = TypeFlags::HAS_TY_INFER.bits
+ | TypeFlags::HAS_RE_INFER.bits
+ | TypeFlags::HAS_CT_INFER.bits;
+
+ /// Does this have `Placeholder`?
+ const HAS_TY_PLACEHOLDER = 1 << 6;
+ /// Does this have `RePlaceholder`?
+ const HAS_RE_PLACEHOLDER = 1 << 7;
+ /// Does this have `ConstKind::Placeholder`?
+ const HAS_CT_PLACEHOLDER = 1 << 8;
+
+ /// `true` if there are "names" of regions and so forth
+ /// that are local to a particular fn/inferctxt
+ const HAS_FREE_LOCAL_REGIONS = 1 << 9;
+
+ /// `true` if there are "names" of types and regions and so forth
+ /// that are local to a particular fn
+ const HAS_FREE_LOCAL_NAMES = TypeFlags::HAS_TY_PARAM.bits
+ | TypeFlags::HAS_CT_PARAM.bits
+ | TypeFlags::HAS_TY_INFER.bits
+ | TypeFlags::HAS_CT_INFER.bits
+ | TypeFlags::HAS_TY_PLACEHOLDER.bits
+ | TypeFlags::HAS_CT_PLACEHOLDER.bits
+ // We consider 'freshened' types and constants
+ // to depend on a particular fn.
+ // The freshening process throws away information,
+ // which can make things unsuitable for use in a global
+ // cache. Note that there is no 'fresh lifetime' flag -
+ // freshening replaces all lifetimes with `ReErased`,
+ // which is different from how types/const are freshened.
+ | TypeFlags::HAS_TY_FRESH.bits
+ | TypeFlags::HAS_CT_FRESH.bits
+ | TypeFlags::HAS_FREE_LOCAL_REGIONS.bits;
+
+ /// Does this have `Projection`?
+ const HAS_TY_PROJECTION = 1 << 10;
+ /// Does this have `Opaque`?
+ const HAS_TY_OPAQUE = 1 << 11;
+ /// Does this have `ConstKind::Unevaluated`?
+ const HAS_CT_PROJECTION = 1 << 12;
+
+ /// Could this type be normalized further?
+ const HAS_PROJECTION = TypeFlags::HAS_TY_PROJECTION.bits
+ | TypeFlags::HAS_TY_OPAQUE.bits
+ | TypeFlags::HAS_CT_PROJECTION.bits;
+
+ /// Is an error type/const reachable?
+ const HAS_ERROR = 1 << 13;
+
+ /// Does this have any region that "appears free" in the type?
+ /// Basically anything but `ReLateBound` and `ReErased`.
+ const HAS_FREE_REGIONS = 1 << 14;
+
+ /// Does this have any `ReLateBound` regions? Used to check
+ /// if a global bound is safe to evaluate.
+ const HAS_RE_LATE_BOUND = 1 << 15;
+
+ /// Does this have any `ReErased` regions?
+ const HAS_RE_ERASED = 1 << 16;
+
+ /// Does this value have parameters/placeholders/inference variables which could be
+ /// replaced later, in a way that would change the results of `impl` specialization?
+ const STILL_FURTHER_SPECIALIZABLE = 1 << 17;
+
+ /// Does this value have `InferTy::FreshTy/FreshIntTy/FreshFloatTy`?
+ const HAS_TY_FRESH = 1 << 18;
+
+ /// Does this value have `InferConst::Fresh`?
+ const HAS_CT_FRESH = 1 << 19;
+ }
+}
+
+rustc_index::newtype_index! {
+ /// A [De Bruijn index][dbi] is a standard means of representing
+ /// regions (and perhaps later types) in a higher-ranked setting. In
+ /// particular, imagine a type like this:
+ /// ```ignore (illustrative)
+ /// for<'a> fn(for<'b> fn(&'b isize, &'a isize), &'a char)
+ /// // ^ ^ | | |
+ /// // | | | | |
+ /// // | +------------+ 0 | |
+ /// // | | |
+ /// // +----------------------------------+ 1 |
+ /// // | |
+ /// // +----------------------------------------------+ 0
+ /// ```
+ /// In this type, there are two binders (the outer fn and the inner
+ /// fn). We need to be able to determine, for any given region, which
+ /// fn type it is bound by, the inner or the outer one. There are
+ /// various ways you can do this, but a De Bruijn index is one of the
+ /// more convenient and has some nice properties. The basic idea is to
+ /// count the number of binders, inside out. Some examples should help
+ /// clarify what I mean.
+ ///
+ /// Let's start with the reference type `&'b isize` that is the first
+ /// argument to the inner function. This region `'b` is assigned a De
+ /// Bruijn index of 0, meaning "the innermost binder" (in this case, a
+ /// fn). The region `'a` that appears in the second argument type (`&'a
+ /// isize`) would then be assigned a De Bruijn index of 1, meaning "the
+ /// second-innermost binder". (These indices are written on the arrows
+ /// in the diagram).
+ ///
+ /// What is interesting is that De Bruijn index attached to a particular
+ /// variable will vary depending on where it appears. For example,
+ /// the final type `&'a char` also refers to the region `'a` declared on
+ /// the outermost fn. But this time, this reference is not nested within
+ /// any other binders (i.e., it is not an argument to the inner fn, but
+ /// rather the outer one). Therefore, in this case, it is assigned a
+ /// De Bruijn index of 0, because the innermost binder in that location
+ /// is the outer fn.
+ ///
+ /// [dbi]: https://en.wikipedia.org/wiki/De_Bruijn_index
+ pub struct DebruijnIndex {
+ DEBUG_FORMAT = "DebruijnIndex({})",
+ const INNERMOST = 0,
+ }
+}
+
+impl DebruijnIndex {
+ /// Returns the resulting index when this value is moved into
+ /// `amount` number of new binders. So, e.g., if you had
+ ///
+ /// for<'a> fn(&'a x)
+ ///
+ /// and you wanted to change it to
+ ///
+ /// for<'a> fn(for<'b> fn(&'a x))
+ ///
+ /// you would need to shift the index for `'a` into a new binder.
+ #[inline]
+ #[must_use]
+ pub fn shifted_in(self, amount: u32) -> DebruijnIndex {
+ DebruijnIndex::from_u32(self.as_u32() + amount)
+ }
+
+ /// Update this index in place by shifting it "in" through
+ /// `amount` number of binders.
+ #[inline]
+ pub fn shift_in(&mut self, amount: u32) {
+ *self = self.shifted_in(amount);
+ }
+
+ /// Returns the resulting index when this value is moved out from
+ /// `amount` number of new binders.
+ #[inline]
+ #[must_use]
+ pub fn shifted_out(self, amount: u32) -> DebruijnIndex {
+ DebruijnIndex::from_u32(self.as_u32() - amount)
+ }
+
+ /// Update in place by shifting out from `amount` binders.
+ #[inline]
+ pub fn shift_out(&mut self, amount: u32) {
+ *self = self.shifted_out(amount);
+ }
+
+ /// Adjusts any De Bruijn indices so as to make `to_binder` the
+ /// innermost binder. That is, if we have something bound at `to_binder`,
+ /// it will now be bound at INNERMOST. This is an appropriate thing to do
+ /// when moving a region out from inside binders:
+ ///
+ /// ```ignore (illustrative)
+ /// for<'a> fn(for<'b> for<'c> fn(&'a u32), _)
+ /// // Binder: D3 D2 D1 ^^
+ /// ```
+ ///
+ /// Here, the region `'a` would have the De Bruijn index D3,
+ /// because it is the bound 3 binders out. However, if we wanted
+ /// to refer to that region `'a` in the second argument (the `_`),
+ /// those two binders would not be in scope. In that case, we
+ /// might invoke `shift_out_to_binder(D3)`. This would adjust the
+ /// De Bruijn index of `'a` to D1 (the innermost binder).
+ ///
+ /// If we invoke `shift_out_to_binder` and the region is in fact
+ /// bound by one of the binders we are shifting out of, that is an
+ /// error (and should fail an assertion failure).
+ #[inline]
+ pub fn shifted_out_to_binder(self, to_binder: DebruijnIndex) -> Self {
+ self.shifted_out(to_binder.as_u32() - INNERMOST.as_u32())
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable)]
+pub enum IntTy {
+ Isize,
+ I8,
+ I16,
+ I32,
+ I64,
+ I128,
+}
+
+impl IntTy {
+ pub fn name_str(&self) -> &'static str {
+ match *self {
+ IntTy::Isize => "isize",
+ IntTy::I8 => "i8",
+ IntTy::I16 => "i16",
+ IntTy::I32 => "i32",
+ IntTy::I64 => "i64",
+ IntTy::I128 => "i128",
+ }
+ }
+
+ pub fn bit_width(&self) -> Option<u64> {
+ Some(match *self {
+ IntTy::Isize => return None,
+ IntTy::I8 => 8,
+ IntTy::I16 => 16,
+ IntTy::I32 => 32,
+ IntTy::I64 => 64,
+ IntTy::I128 => 128,
+ })
+ }
+
+ pub fn normalize(&self, target_width: u32) -> Self {
+ match self {
+ IntTy::Isize => match target_width {
+ 16 => IntTy::I16,
+ 32 => IntTy::I32,
+ 64 => IntTy::I64,
+ _ => unreachable!(),
+ },
+ _ => *self,
+ }
+ }
+}
+
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Copy, Debug)]
+#[derive(Encodable, Decodable)]
+pub enum UintTy {
+ Usize,
+ U8,
+ U16,
+ U32,
+ U64,
+ U128,
+}
+
+impl UintTy {
+ pub fn name_str(&self) -> &'static str {
+ match *self {
+ UintTy::Usize => "usize",
+ UintTy::U8 => "u8",
+ UintTy::U16 => "u16",
+ UintTy::U32 => "u32",
+ UintTy::U64 => "u64",
+ UintTy::U128 => "u128",
+ }
+ }
+
+ pub fn bit_width(&self) -> Option<u64> {
+ Some(match *self {
+ UintTy::Usize => return None,
+ UintTy::U8 => 8,
+ UintTy::U16 => 16,
+ UintTy::U32 => 32,
+ UintTy::U64 => 64,
+ UintTy::U128 => 128,
+ })
+ }
+
+ pub fn normalize(&self, target_width: u32) -> Self {
+ match self {
+ UintTy::Usize => match target_width {
+ 16 => UintTy::U16,
+ 32 => UintTy::U32,
+ 64 => UintTy::U64,
+ _ => unreachable!(),
+ },
+ _ => *self,
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable)]
+pub enum FloatTy {
+ F32,
+ F64,
+}
+
+impl FloatTy {
+ pub fn name_str(self) -> &'static str {
+ match self {
+ FloatTy::F32 => "f32",
+ FloatTy::F64 => "f64",
+ }
+ }
+
+ pub fn bit_width(self) -> u64 {
+ match self {
+ FloatTy::F32 => 32,
+ FloatTy::F64 => 64,
+ }
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub enum IntVarValue {
+ IntType(IntTy),
+ UintType(UintTy),
+}
+
+#[derive(Clone, Copy, PartialEq, Eq)]
+pub struct FloatVarValue(pub FloatTy);
+
+rustc_index::newtype_index! {
+ /// A **ty**pe **v**ariable **ID**.
+ pub struct TyVid {
+ DEBUG_FORMAT = "_#{}t"
+ }
+}
+
+/// An **int**egral (`u32`, `i32`, `usize`, etc.) type **v**ariable **ID**.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+pub struct IntVid {
+ pub index: u32,
+}
+
+/// An **float**ing-point (`f32` or `f64`) type **v**ariable **ID**.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+pub struct FloatVid {
+ pub index: u32,
+}
+
+/// A placeholder for a type that hasn't been inferred yet.
+///
+/// E.g., if we have an empty array (`[]`), then we create a fresh
+/// type variable for the element type since we won't know until it's
+/// used what the element type is supposed to be.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Encodable, Decodable)]
+pub enum InferTy {
+ /// A type variable.
+ TyVar(TyVid),
+ /// An integral type variable (`{integer}`).
+ ///
+ /// These are created when the compiler sees an integer literal like
+ /// `1` that could be several different types (`u8`, `i32`, `u32`, etc.).
+ /// We don't know until it's used what type it's supposed to be, so
+ /// we create a fresh type variable.
+ IntVar(IntVid),
+ /// A floating-point type variable (`{float}`).
+ ///
+ /// These are created when the compiler sees an float literal like
+ /// `1.0` that could be either an `f32` or an `f64`.
+ /// We don't know until it's used what type it's supposed to be, so
+ /// we create a fresh type variable.
+ FloatVar(FloatVid),
+
+ /// A [`FreshTy`][Self::FreshTy] is one that is generated as a replacement
+ /// for an unbound type variable. This is convenient for caching etc. See
+ /// `rustc_infer::infer::freshen` for more details.
+ ///
+ /// Compare with [`TyVar`][Self::TyVar].
+ FreshTy(u32),
+ /// Like [`FreshTy`][Self::FreshTy], but as a replacement for [`IntVar`][Self::IntVar].
+ FreshIntTy(u32),
+ /// Like [`FreshTy`][Self::FreshTy], but as a replacement for [`FloatVar`][Self::FloatVar].
+ FreshFloatTy(u32),
+}
+
+/// Raw `TyVid` are used as the unification key for `sub_relations`;
+/// they carry no values.
+impl UnifyKey for TyVid {
+ type Value = ();
+ #[inline]
+ fn index(&self) -> u32 {
+ self.as_u32()
+ }
+ #[inline]
+ fn from_index(i: u32) -> TyVid {
+ TyVid::from_u32(i)
+ }
+ fn tag() -> &'static str {
+ "TyVid"
+ }
+}
+
+impl EqUnifyValue for IntVarValue {}
+
+impl UnifyKey for IntVid {
+ type Value = Option<IntVarValue>;
+ #[inline] // make this function eligible for inlining - it is quite hot.
+ fn index(&self) -> u32 {
+ self.index
+ }
+ #[inline]
+ fn from_index(i: u32) -> IntVid {
+ IntVid { index: i }
+ }
+ fn tag() -> &'static str {
+ "IntVid"
+ }
+}
+
+impl EqUnifyValue for FloatVarValue {}
+
+impl UnifyKey for FloatVid {
+ type Value = Option<FloatVarValue>;
+ #[inline]
+ fn index(&self) -> u32 {
+ self.index
+ }
+ #[inline]
+ fn from_index(i: u32) -> FloatVid {
+ FloatVid { index: i }
+ }
+ fn tag() -> &'static str {
+ "FloatVid"
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Decodable, Encodable, Hash)]
+#[rustc_pass_by_value]
+pub enum Variance {
+ Covariant, // T<A> <: T<B> iff A <: B -- e.g., function return type
+ Invariant, // T<A> <: T<B> iff B == A -- e.g., type of mutable cell
+ Contravariant, // T<A> <: T<B> iff B <: A -- e.g., function param type
+ Bivariant, // T<A> <: T<B> -- e.g., unused type parameter
+}
+
+impl Variance {
+ /// `a.xform(b)` combines the variance of a context with the
+ /// variance of a type with the following meaning. If we are in a
+ /// context with variance `a`, and we encounter a type argument in
+ /// a position with variance `b`, then `a.xform(b)` is the new
+ /// variance with which the argument appears.
+ ///
+ /// Example 1:
+ /// ```ignore (illustrative)
+ /// *mut Vec<i32>
+ /// ```
+ /// Here, the "ambient" variance starts as covariant. `*mut T` is
+ /// invariant with respect to `T`, so the variance in which the
+ /// `Vec<i32>` appears is `Covariant.xform(Invariant)`, which
+ /// yields `Invariant`. Now, the type `Vec<T>` is covariant with
+ /// respect to its type argument `T`, and hence the variance of
+ /// the `i32` here is `Invariant.xform(Covariant)`, which results
+ /// (again) in `Invariant`.
+ ///
+ /// Example 2:
+ /// ```ignore (illustrative)
+ /// fn(*const Vec<i32>, *mut Vec<i32)
+ /// ```
+ /// The ambient variance is covariant. A `fn` type is
+ /// contravariant with respect to its parameters, so the variance
+ /// within which both pointer types appear is
+ /// `Covariant.xform(Contravariant)`, or `Contravariant`. `*const
+ /// T` is covariant with respect to `T`, so the variance within
+ /// which the first `Vec<i32>` appears is
+ /// `Contravariant.xform(Covariant)` or `Contravariant`. The same
+ /// is true for its `i32` argument. In the `*mut T` case, the
+ /// variance of `Vec<i32>` is `Contravariant.xform(Invariant)`,
+ /// and hence the outermost type is `Invariant` with respect to
+ /// `Vec<i32>` (and its `i32` argument).
+ ///
+ /// Source: Figure 1 of "Taming the Wildcards:
+ /// Combining Definition- and Use-Site Variance" published in PLDI'11.
+ pub fn xform(self, v: Variance) -> Variance {
+ match (self, v) {
+ // Figure 1, column 1.
+ (Variance::Covariant, Variance::Covariant) => Variance::Covariant,
+ (Variance::Covariant, Variance::Contravariant) => Variance::Contravariant,
+ (Variance::Covariant, Variance::Invariant) => Variance::Invariant,
+ (Variance::Covariant, Variance::Bivariant) => Variance::Bivariant,
+
+ // Figure 1, column 2.
+ (Variance::Contravariant, Variance::Covariant) => Variance::Contravariant,
+ (Variance::Contravariant, Variance::Contravariant) => Variance::Covariant,
+ (Variance::Contravariant, Variance::Invariant) => Variance::Invariant,
+ (Variance::Contravariant, Variance::Bivariant) => Variance::Bivariant,
+
+ // Figure 1, column 3.
+ (Variance::Invariant, _) => Variance::Invariant,
+
+ // Figure 1, column 4.
+ (Variance::Bivariant, _) => Variance::Bivariant,
+ }
+ }
+}
+
+impl<CTX> HashStable<CTX> for DebruijnIndex {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.as_u32().hash_stable(ctx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for IntTy {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ discriminant(self).hash_stable(ctx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for UintTy {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ discriminant(self).hash_stable(ctx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for FloatTy {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ discriminant(self).hash_stable(ctx, hasher);
+ }
+}
+
+impl<CTX> HashStable<CTX> for InferTy {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ use InferTy::*;
+ discriminant(self).hash_stable(ctx, hasher);
+ match self {
+ TyVar(v) => v.as_u32().hash_stable(ctx, hasher),
+ IntVar(v) => v.index.hash_stable(ctx, hasher),
+ FloatVar(v) => v.index.hash_stable(ctx, hasher),
+ FreshTy(v) | FreshIntTy(v) | FreshFloatTy(v) => v.hash_stable(ctx, hasher),
+ }
+ }
+}
+
+impl<CTX> HashStable<CTX> for Variance {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ discriminant(self).hash_stable(ctx, hasher);
+ }
+}
+
+impl fmt::Debug for IntVarValue {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ IntVarValue::IntType(ref v) => v.fmt(f),
+ IntVarValue::UintType(ref v) => v.fmt(f),
+ }
+ }
+}
+
+impl fmt::Debug for FloatVarValue {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.0.fmt(f)
+ }
+}
+
+impl fmt::Debug for IntVid {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "_#{}i", self.index)
+ }
+}
+
+impl fmt::Debug for FloatVid {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "_#{}f", self.index)
+ }
+}
+
+impl fmt::Debug for InferTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use InferTy::*;
+ match *self {
+ TyVar(ref v) => v.fmt(f),
+ IntVar(ref v) => v.fmt(f),
+ FloatVar(ref v) => v.fmt(f),
+ FreshTy(v) => write!(f, "FreshTy({:?})", v),
+ FreshIntTy(v) => write!(f, "FreshIntTy({:?})", v),
+ FreshFloatTy(v) => write!(f, "FreshFloatTy({:?})", v),
+ }
+ }
+}
+
+impl fmt::Debug for Variance {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str(match *self {
+ Variance::Covariant => "+",
+ Variance::Contravariant => "-",
+ Variance::Invariant => "o",
+ Variance::Bivariant => "*",
+ })
+ }
+}
+
+impl fmt::Display for InferTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use InferTy::*;
+ match *self {
+ TyVar(_) => write!(f, "_"),
+ IntVar(_) => write!(f, "{}", "{integer}"),
+ FloatVar(_) => write!(f, "{}", "{float}"),
+ FreshTy(v) => write!(f, "FreshTy({})", v),
+ FreshIntTy(v) => write!(f, "FreshIntTy({})", v),
+ FreshFloatTy(v) => write!(f, "FreshFloatTy({})", v),
+ }
+ }
+}
+
+rustc_index::newtype_index! {
+ /// "Universes" are used during type- and trait-checking in the
+ /// presence of `for<..>` binders to control what sets of names are
+ /// visible. Universes are arranged into a tree: the root universe
+ /// contains names that are always visible. Each child then adds a new
+ /// set of names that are visible, in addition to those of its parent.
+ /// We say that the child universe "extends" the parent universe with
+ /// new names.
+ ///
+ /// To make this more concrete, consider this program:
+ ///
+ /// ```ignore (illustrative)
+ /// struct Foo { }
+ /// fn bar<T>(x: T) {
+ /// let y: for<'a> fn(&'a u8, Foo) = ...;
+ /// }
+ /// ```
+ ///
+ /// The struct name `Foo` is in the root universe U0. But the type
+ /// parameter `T`, introduced on `bar`, is in an extended universe U1
+ /// -- i.e., within `bar`, we can name both `T` and `Foo`, but outside
+ /// of `bar`, we cannot name `T`. Then, within the type of `y`, the
+ /// region `'a` is in a universe U2 that extends U1, because we can
+ /// name it inside the fn type but not outside.
+ ///
+ /// Universes are used to do type- and trait-checking around these
+ /// "forall" binders (also called **universal quantification**). The
+ /// idea is that when, in the body of `bar`, we refer to `T` as a
+ /// type, we aren't referring to any type in particular, but rather a
+ /// kind of "fresh" type that is distinct from all other types we have
+ /// actually declared. This is called a **placeholder** type, and we
+ /// use universes to talk about this. In other words, a type name in
+ /// universe 0 always corresponds to some "ground" type that the user
+ /// declared, but a type name in a non-zero universe is a placeholder
+ /// type -- an idealized representative of "types in general" that we
+ /// use for checking generic functions.
+ pub struct UniverseIndex {
+ DEBUG_FORMAT = "U{}",
+ }
+}
+
+impl UniverseIndex {
+ pub const ROOT: UniverseIndex = UniverseIndex::from_u32(0);
+
+ /// Returns the "next" universe index in order -- this new index
+ /// is considered to extend all previous universes. This
+ /// corresponds to entering a `forall` quantifier. So, for
+ /// example, suppose we have this type in universe `U`:
+ ///
+ /// ```ignore (illustrative)
+ /// for<'a> fn(&'a u32)
+ /// ```
+ ///
+ /// Once we "enter" into this `for<'a>` quantifier, we are in a
+ /// new universe that extends `U` -- in this new universe, we can
+ /// name the region `'a`, but that region was not nameable from
+ /// `U` because it was not in scope there.
+ pub fn next_universe(self) -> UniverseIndex {
+ UniverseIndex::from_u32(self.private.checked_add(1).unwrap())
+ }
+
+ /// Returns `true` if `self` can name a name from `other` -- in other words,
+ /// if the set of names in `self` is a superset of those in
+ /// `other` (`self >= other`).
+ pub fn can_name(self, other: UniverseIndex) -> bool {
+ self.private >= other.private
+ }
+
+ /// Returns `true` if `self` cannot name some names from `other` -- in other
+ /// words, if the set of names in `self` is a strict subset of
+ /// those in `other` (`self < other`).
+ pub fn cannot_name(self, other: UniverseIndex) -> bool {
+ self.private < other.private
+ }
+}
+
+impl<CTX> HashStable<CTX> for UniverseIndex {
+ fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
+ self.private.hash_stable(ctx, hasher);
+ }
+}
diff --git a/compiler/rustc_type_ir/src/sty.rs b/compiler/rustc_type_ir/src/sty.rs
new file mode 100644
index 000000000..74737e30b
--- /dev/null
+++ b/compiler/rustc_type_ir/src/sty.rs
@@ -0,0 +1,1329 @@
+#![allow(rustc::usage_of_ty_tykind)]
+
+use std::cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd};
+use std::{fmt, hash};
+
+use crate::DebruijnIndex;
+use crate::FloatTy;
+use crate::IntTy;
+use crate::Interner;
+use crate::TyDecoder;
+use crate::TyEncoder;
+use crate::UintTy;
+use crate::UniverseIndex;
+
+use self::RegionKind::*;
+use self::TyKind::*;
+
+use rustc_data_structures::stable_hasher::HashStable;
+use rustc_serialize::{Decodable, Decoder, Encodable};
+
+/// Defines the kinds of types used by the type system.
+///
+/// Types written by the user start out as `hir::TyKind` and get
+/// converted to this representation using `AstConv::ast_ty_to_ty`.
+#[rustc_diagnostic_item = "IrTyKind"]
+pub enum TyKind<I: Interner> {
+ /// The primitive boolean type. Written as `bool`.
+ Bool,
+
+ /// The primitive character type; holds a Unicode scalar value
+ /// (a non-surrogate code point). Written as `char`.
+ Char,
+
+ /// A primitive signed integer type. For example, `i32`.
+ Int(IntTy),
+
+ /// A primitive unsigned integer type. For example, `u32`.
+ Uint(UintTy),
+
+ /// A primitive floating-point type. For example, `f64`.
+ Float(FloatTy),
+
+ /// Algebraic data types (ADT). For example: structures, enumerations and unions.
+ ///
+ /// For example, the type `List<i32>` would be represented using the `AdtDef`
+ /// for `struct List<T>` and the substs `[i32]`.
+ ///
+ /// Note that generic parameters in fields only get lazily substituted
+ /// by using something like `adt_def.all_fields().map(|field| field.ty(tcx, substs))`.
+ Adt(I::AdtDef, I::SubstsRef),
+
+ /// An unsized FFI type that is opaque to Rust. Written as `extern type T`.
+ Foreign(I::DefId),
+
+ /// The pointee of a string slice. Written as `str`.
+ Str,
+
+ /// An array with the given length. Written as `[T; N]`.
+ Array(I::Ty, I::Const),
+
+ /// The pointee of an array slice. Written as `[T]`.
+ Slice(I::Ty),
+
+ /// A raw pointer. Written as `*mut T` or `*const T`
+ RawPtr(I::TypeAndMut),
+
+ /// A reference; a pointer with an associated lifetime. Written as
+ /// `&'a mut T` or `&'a T`.
+ Ref(I::Region, I::Ty, I::Mutability),
+
+ /// The anonymous type of a function declaration/definition. Each
+ /// function has a unique type.
+ ///
+ /// For the function `fn foo() -> i32 { 3 }` this type would be
+ /// shown to the user as `fn() -> i32 {foo}`.
+ ///
+ /// For example the type of `bar` here:
+ /// ```rust
+ /// fn foo() -> i32 { 1 }
+ /// let bar = foo; // bar: fn() -> i32 {foo}
+ /// ```
+ FnDef(I::DefId, I::SubstsRef),
+
+ /// A pointer to a function. Written as `fn() -> i32`.
+ ///
+ /// Note that both functions and closures start out as either
+ /// [FnDef] or [Closure] which can be then be coerced to this variant.
+ ///
+ /// For example the type of `bar` here:
+ ///
+ /// ```rust
+ /// fn foo() -> i32 { 1 }
+ /// let bar: fn() -> i32 = foo;
+ /// ```
+ FnPtr(I::PolyFnSig),
+
+ /// A trait object. Written as `dyn for<'b> Trait<'b, Assoc = u32> + Send + 'a`.
+ Dynamic(I::ListBinderExistentialPredicate, I::Region),
+
+ /// The anonymous type of a closure. Used to represent the type of `|a| a`.
+ ///
+ /// Closure substs contain both the - potentially substituted - generic parameters
+ /// of its parent and some synthetic parameters. See the documentation for
+ /// `ClosureSubsts` for more details.
+ Closure(I::DefId, I::SubstsRef),
+
+ /// The anonymous type of a generator. Used to represent the type of
+ /// `|a| yield a`.
+ ///
+ /// For more info about generator substs, visit the documentation for
+ /// `GeneratorSubsts`.
+ Generator(I::DefId, I::SubstsRef, I::Movability),
+
+ /// A type representing the types stored inside a generator.
+ /// This should only appear as part of the `GeneratorSubsts`.
+ ///
+ /// Note that the captured variables for generators are stored separately
+ /// using a tuple in the same way as for closures.
+ ///
+ /// Unlike upvars, the witness can reference lifetimes from
+ /// inside of the generator itself. To deal with them in
+ /// the type of the generator, we convert them to higher ranked
+ /// lifetimes bound by the witness itself.
+ ///
+ /// Looking at the following example, the witness for this generator
+ /// may end up as something like `for<'a> [Vec<i32>, &'a Vec<i32>]`:
+ ///
+ /// ```ignore UNSOLVED (ask @compiler-errors, should this error? can we just swap the yields?)
+ /// #![feature(generators)]
+ /// |a| {
+ /// let x = &vec![3];
+ /// yield a;
+ /// yield x[0];
+ /// }
+ /// # ;
+ /// ```
+ GeneratorWitness(I::BinderListTy),
+
+ /// The never type `!`.
+ Never,
+
+ /// A tuple type. For example, `(i32, bool)`.
+ Tuple(I::ListTy),
+
+ /// The projection of an associated type. For example,
+ /// `<T as Trait<..>>::N`.
+ Projection(I::ProjectionTy),
+
+ /// Opaque (`impl Trait`) type found in a return type.
+ ///
+ /// The `DefId` comes either from
+ /// * the `impl Trait` ast::Ty node,
+ /// * or the `type Foo = impl Trait` declaration
+ ///
+ /// For RPIT the substitutions are for the generics of the function,
+ /// while for TAIT it is used for the generic parameters of the alias.
+ ///
+ /// During codegen, `tcx.type_of(def_id)` can be used to get the underlying type.
+ Opaque(I::DefId, I::SubstsRef),
+
+ /// A type parameter; for example, `T` in `fn f<T>(x: T) {}`.
+ Param(I::ParamTy),
+
+ /// Bound type variable, used to represent the `'a` in `for<'a> fn(&'a ())`.
+ ///
+ /// For canonical queries, we replace inference variables with bound variables,
+ /// so e.g. when checking whether `&'_ (): Trait<_>` holds, we canonicalize that to
+ /// `for<'a, T> &'a (): Trait<T>` and then convert the introduced bound variables
+ /// back to inference variables in a new inference context when inside of the query.
+ ///
+ /// See the `rustc-dev-guide` for more details about
+ /// [higher-ranked trait bounds][1] and [canonical queries][2].
+ ///
+ /// [1]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html
+ /// [2]: https://rustc-dev-guide.rust-lang.org/traits/canonical-queries.html
+ Bound(DebruijnIndex, I::BoundTy),
+
+ /// A placeholder type, used during higher ranked subtyping to instantiate
+ /// bound variables.
+ Placeholder(I::PlaceholderType),
+
+ /// A type variable used during type checking.
+ ///
+ /// Similar to placeholders, inference variables also live in a universe to
+ /// correctly deal with higher ranked types. Though unlike placeholders,
+ /// that universe is stored in the `InferCtxt` instead of directly
+ /// inside of the type.
+ Infer(I::InferTy),
+
+ /// A placeholder for a type which could not be computed; this is
+ /// propagated to avoid useless error messages.
+ Error(I::DelaySpanBugEmitted),
+}
+
+impl<I: Interner> TyKind<I> {
+ #[inline]
+ pub fn is_primitive(&self) -> bool {
+ matches!(self, Bool | Char | Int(_) | Uint(_) | Float(_))
+ }
+}
+
+// This is manually implemented for `TyKind` because `std::mem::discriminant`
+// returns an opaque value that is `PartialEq` but not `PartialOrd`
+#[inline]
+const fn tykind_discriminant<I: Interner>(value: &TyKind<I>) -> usize {
+ match value {
+ Bool => 0,
+ Char => 1,
+ Int(_) => 2,
+ Uint(_) => 3,
+ Float(_) => 4,
+ Adt(_, _) => 5,
+ Foreign(_) => 6,
+ Str => 7,
+ Array(_, _) => 8,
+ Slice(_) => 9,
+ RawPtr(_) => 10,
+ Ref(_, _, _) => 11,
+ FnDef(_, _) => 12,
+ FnPtr(_) => 13,
+ Dynamic(_, _) => 14,
+ Closure(_, _) => 15,
+ Generator(_, _, _) => 16,
+ GeneratorWitness(_) => 17,
+ Never => 18,
+ Tuple(_) => 19,
+ Projection(_) => 20,
+ Opaque(_, _) => 21,
+ Param(_) => 22,
+ Bound(_, _) => 23,
+ Placeholder(_) => 24,
+ Infer(_) => 25,
+ Error(_) => 26,
+ }
+}
+
+// This is manually implemented because a derive would require `I: Clone`
+impl<I: Interner> Clone for TyKind<I> {
+ fn clone(&self) -> Self {
+ match self {
+ Bool => Bool,
+ Char => Char,
+ Int(i) => Int(i.clone()),
+ Uint(u) => Uint(u.clone()),
+ Float(f) => Float(f.clone()),
+ Adt(d, s) => Adt(d.clone(), s.clone()),
+ Foreign(d) => Foreign(d.clone()),
+ Str => Str,
+ Array(t, c) => Array(t.clone(), c.clone()),
+ Slice(t) => Slice(t.clone()),
+ RawPtr(t) => RawPtr(t.clone()),
+ Ref(r, t, m) => Ref(r.clone(), t.clone(), m.clone()),
+ FnDef(d, s) => FnDef(d.clone(), s.clone()),
+ FnPtr(s) => FnPtr(s.clone()),
+ Dynamic(p, r) => Dynamic(p.clone(), r.clone()),
+ Closure(d, s) => Closure(d.clone(), s.clone()),
+ Generator(d, s, m) => Generator(d.clone(), s.clone(), m.clone()),
+ GeneratorWitness(g) => GeneratorWitness(g.clone()),
+ Never => Never,
+ Tuple(t) => Tuple(t.clone()),
+ Projection(p) => Projection(p.clone()),
+ Opaque(d, s) => Opaque(d.clone(), s.clone()),
+ Param(p) => Param(p.clone()),
+ Bound(d, b) => Bound(d.clone(), b.clone()),
+ Placeholder(p) => Placeholder(p.clone()),
+ Infer(t) => Infer(t.clone()),
+ Error(e) => Error(e.clone()),
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: PartialEq`
+impl<I: Interner> PartialEq for TyKind<I> {
+ #[inline]
+ fn eq(&self, other: &TyKind<I>) -> bool {
+ let __self_vi = tykind_discriminant(self);
+ let __arg_1_vi = tykind_discriminant(other);
+ if __self_vi == __arg_1_vi {
+ match (&*self, &*other) {
+ (&Int(ref __self_0), &Int(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Uint(ref __self_0), &Uint(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Float(ref __self_0), &Float(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Adt(ref __self_0, ref __self_1), &Adt(ref __arg_1_0, ref __arg_1_1)) => {
+ __self_0 == __arg_1_0 && __self_1 == __arg_1_1
+ }
+ (&Foreign(ref __self_0), &Foreign(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Array(ref __self_0, ref __self_1), &Array(ref __arg_1_0, ref __arg_1_1)) => {
+ __self_0 == __arg_1_0 && __self_1 == __arg_1_1
+ }
+ (&Slice(ref __self_0), &Slice(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&RawPtr(ref __self_0), &RawPtr(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (
+ &Ref(ref __self_0, ref __self_1, ref __self_2),
+ &Ref(ref __arg_1_0, ref __arg_1_1, ref __arg_1_2),
+ ) => __self_0 == __arg_1_0 && __self_1 == __arg_1_1 && __self_2 == __arg_1_2,
+ (&FnDef(ref __self_0, ref __self_1), &FnDef(ref __arg_1_0, ref __arg_1_1)) => {
+ __self_0 == __arg_1_0 && __self_1 == __arg_1_1
+ }
+ (&FnPtr(ref __self_0), &FnPtr(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Dynamic(ref __self_0, ref __self_1), &Dynamic(ref __arg_1_0, ref __arg_1_1)) => {
+ __self_0 == __arg_1_0 && __self_1 == __arg_1_1
+ }
+ (&Closure(ref __self_0, ref __self_1), &Closure(ref __arg_1_0, ref __arg_1_1)) => {
+ __self_0 == __arg_1_0 && __self_1 == __arg_1_1
+ }
+ (
+ &Generator(ref __self_0, ref __self_1, ref __self_2),
+ &Generator(ref __arg_1_0, ref __arg_1_1, ref __arg_1_2),
+ ) => __self_0 == __arg_1_0 && __self_1 == __arg_1_1 && __self_2 == __arg_1_2,
+ (&GeneratorWitness(ref __self_0), &GeneratorWitness(ref __arg_1_0)) => {
+ __self_0 == __arg_1_0
+ }
+ (&Tuple(ref __self_0), &Tuple(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Projection(ref __self_0), &Projection(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Opaque(ref __self_0, ref __self_1), &Opaque(ref __arg_1_0, ref __arg_1_1)) => {
+ __self_0 == __arg_1_0 && __self_1 == __arg_1_1
+ }
+ (&Param(ref __self_0), &Param(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Bound(ref __self_0, ref __self_1), &Bound(ref __arg_1_0, ref __arg_1_1)) => {
+ __self_0 == __arg_1_0 && __self_1 == __arg_1_1
+ }
+ (&Placeholder(ref __self_0), &Placeholder(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Infer(ref __self_0), &Infer(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&Error(ref __self_0), &Error(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ _ => true,
+ }
+ } else {
+ false
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Eq`
+impl<I: Interner> Eq for TyKind<I> {}
+
+// This is manually implemented because a derive would require `I: PartialOrd`
+impl<I: Interner> PartialOrd for TyKind<I> {
+ #[inline]
+ fn partial_cmp(&self, other: &TyKind<I>) -> Option<Ordering> {
+ Some(Ord::cmp(self, other))
+ }
+}
+
+// This is manually implemented because a derive would require `I: Ord`
+impl<I: Interner> Ord for TyKind<I> {
+ #[inline]
+ fn cmp(&self, other: &TyKind<I>) -> Ordering {
+ let __self_vi = tykind_discriminant(self);
+ let __arg_1_vi = tykind_discriminant(other);
+ if __self_vi == __arg_1_vi {
+ match (&*self, &*other) {
+ (&Int(ref __self_0), &Int(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&Uint(ref __self_0), &Uint(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&Float(ref __self_0), &Float(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&Adt(ref __self_0, ref __self_1), &Adt(ref __arg_1_0, ref __arg_1_1)) => {
+ match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ cmp => cmp,
+ }
+ }
+ (&Foreign(ref __self_0), &Foreign(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&Array(ref __self_0, ref __self_1), &Array(ref __arg_1_0, ref __arg_1_1)) => {
+ match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ cmp => cmp,
+ }
+ }
+ (&Slice(ref __self_0), &Slice(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&RawPtr(ref __self_0), &RawPtr(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (
+ &Ref(ref __self_0, ref __self_1, ref __self_2),
+ &Ref(ref __arg_1_0, ref __arg_1_1, ref __arg_1_2),
+ ) => match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => match Ord::cmp(__self_1, __arg_1_1) {
+ Ordering::Equal => Ord::cmp(__self_2, __arg_1_2),
+ cmp => cmp,
+ },
+ cmp => cmp,
+ },
+ (&FnDef(ref __self_0, ref __self_1), &FnDef(ref __arg_1_0, ref __arg_1_1)) => {
+ match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ cmp => cmp,
+ }
+ }
+ (&FnPtr(ref __self_0), &FnPtr(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&Dynamic(ref __self_0, ref __self_1), &Dynamic(ref __arg_1_0, ref __arg_1_1)) => {
+ match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ cmp => cmp,
+ }
+ }
+ (&Closure(ref __self_0, ref __self_1), &Closure(ref __arg_1_0, ref __arg_1_1)) => {
+ match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ cmp => cmp,
+ }
+ }
+ (
+ &Generator(ref __self_0, ref __self_1, ref __self_2),
+ &Generator(ref __arg_1_0, ref __arg_1_1, ref __arg_1_2),
+ ) => match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => match Ord::cmp(__self_1, __arg_1_1) {
+ Ordering::Equal => Ord::cmp(__self_2, __arg_1_2),
+ cmp => cmp,
+ },
+ cmp => cmp,
+ },
+ (&GeneratorWitness(ref __self_0), &GeneratorWitness(ref __arg_1_0)) => {
+ Ord::cmp(__self_0, __arg_1_0)
+ }
+ (&Tuple(ref __self_0), &Tuple(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&Projection(ref __self_0), &Projection(ref __arg_1_0)) => {
+ Ord::cmp(__self_0, __arg_1_0)
+ }
+ (&Opaque(ref __self_0, ref __self_1), &Opaque(ref __arg_1_0, ref __arg_1_1)) => {
+ match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ cmp => cmp,
+ }
+ }
+ (&Param(ref __self_0), &Param(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&Bound(ref __self_0, ref __self_1), &Bound(ref __arg_1_0, ref __arg_1_1)) => {
+ match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ cmp => cmp,
+ }
+ }
+ (&Placeholder(ref __self_0), &Placeholder(ref __arg_1_0)) => {
+ Ord::cmp(__self_0, __arg_1_0)
+ }
+ (&Infer(ref __self_0), &Infer(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&Error(ref __self_0), &Error(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ _ => Ordering::Equal,
+ }
+ } else {
+ Ord::cmp(&__self_vi, &__arg_1_vi)
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Hash`
+impl<I: Interner> hash::Hash for TyKind<I> {
+ fn hash<__H: hash::Hasher>(&self, state: &mut __H) -> () {
+ match (&*self,) {
+ (&Int(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Uint(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Float(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Adt(ref __self_0, ref __self_1),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state)
+ }
+ (&Foreign(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Array(ref __self_0, ref __self_1),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state)
+ }
+ (&Slice(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&RawPtr(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Ref(ref __self_0, ref __self_1, ref __self_2),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state);
+ hash::Hash::hash(__self_2, state)
+ }
+ (&FnDef(ref __self_0, ref __self_1),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state)
+ }
+ (&FnPtr(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Dynamic(ref __self_0, ref __self_1),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state)
+ }
+ (&Closure(ref __self_0, ref __self_1),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state)
+ }
+ (&Generator(ref __self_0, ref __self_1, ref __self_2),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state);
+ hash::Hash::hash(__self_2, state)
+ }
+ (&GeneratorWitness(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Tuple(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Projection(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Opaque(ref __self_0, ref __self_1),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state)
+ }
+ (&Param(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Bound(ref __self_0, ref __self_1),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state)
+ }
+ (&Placeholder(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Infer(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&Error(ref __self_0),) => {
+ hash::Hash::hash(&tykind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ _ => hash::Hash::hash(&tykind_discriminant(self), state),
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Debug`
+impl<I: Interner> fmt::Debug for TyKind<I> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use std::fmt::*;
+ match self {
+ Bool => Formatter::write_str(f, "Bool"),
+ Char => Formatter::write_str(f, "Char"),
+ Int(f0) => Formatter::debug_tuple_field1_finish(f, "Int", f0),
+ Uint(f0) => Formatter::debug_tuple_field1_finish(f, "Uint", f0),
+ Float(f0) => Formatter::debug_tuple_field1_finish(f, "Float", f0),
+ Adt(f0, f1) => Formatter::debug_tuple_field2_finish(f, "Adt", f0, f1),
+ Foreign(f0) => Formatter::debug_tuple_field1_finish(f, "Foreign", f0),
+ Str => Formatter::write_str(f, "Str"),
+ Array(f0, f1) => Formatter::debug_tuple_field2_finish(f, "Array", f0, f1),
+ Slice(f0) => Formatter::debug_tuple_field1_finish(f, "Slice", f0),
+ RawPtr(f0) => Formatter::debug_tuple_field1_finish(f, "RawPtr", f0),
+ Ref(f0, f1, f2) => Formatter::debug_tuple_field3_finish(f, "Ref", f0, f1, f2),
+ FnDef(f0, f1) => Formatter::debug_tuple_field2_finish(f, "FnDef", f0, f1),
+ FnPtr(f0) => Formatter::debug_tuple_field1_finish(f, "FnPtr", f0),
+ Dynamic(f0, f1) => Formatter::debug_tuple_field2_finish(f, "Dynamic", f0, f1),
+ Closure(f0, f1) => Formatter::debug_tuple_field2_finish(f, "Closure", f0, f1),
+ Generator(f0, f1, f2) => {
+ Formatter::debug_tuple_field3_finish(f, "Generator", f0, f1, f2)
+ }
+ GeneratorWitness(f0) => Formatter::debug_tuple_field1_finish(f, "GeneratorWitness", f0),
+ Never => Formatter::write_str(f, "Never"),
+ Tuple(f0) => Formatter::debug_tuple_field1_finish(f, "Tuple", f0),
+ Projection(f0) => Formatter::debug_tuple_field1_finish(f, "Projection", f0),
+ Opaque(f0, f1) => Formatter::debug_tuple_field2_finish(f, "Opaque", f0, f1),
+ Param(f0) => Formatter::debug_tuple_field1_finish(f, "Param", f0),
+ Bound(f0, f1) => Formatter::debug_tuple_field2_finish(f, "Bound", f0, f1),
+ Placeholder(f0) => Formatter::debug_tuple_field1_finish(f, "Placeholder", f0),
+ Infer(f0) => Formatter::debug_tuple_field1_finish(f, "Infer", f0),
+ TyKind::Error(f0) => Formatter::debug_tuple_field1_finish(f, "Error", f0),
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Encodable`
+impl<I: Interner, E: TyEncoder> Encodable<E> for TyKind<I>
+where
+ I::DelaySpanBugEmitted: Encodable<E>,
+ I::AdtDef: Encodable<E>,
+ I::SubstsRef: Encodable<E>,
+ I::DefId: Encodable<E>,
+ I::Ty: Encodable<E>,
+ I::Const: Encodable<E>,
+ I::Region: Encodable<E>,
+ I::TypeAndMut: Encodable<E>,
+ I::Mutability: Encodable<E>,
+ I::Movability: Encodable<E>,
+ I::PolyFnSig: Encodable<E>,
+ I::ListBinderExistentialPredicate: Encodable<E>,
+ I::BinderListTy: Encodable<E>,
+ I::ListTy: Encodable<E>,
+ I::ProjectionTy: Encodable<E>,
+ I::ParamTy: Encodable<E>,
+ I::BoundTy: Encodable<E>,
+ I::PlaceholderType: Encodable<E>,
+ I::InferTy: Encodable<E>,
+ I::DelaySpanBugEmitted: Encodable<E>,
+ I::PredicateKind: Encodable<E>,
+ I::AllocId: Encodable<E>,
+{
+ fn encode(&self, e: &mut E) {
+ let disc = tykind_discriminant(self);
+ match self {
+ Bool => e.emit_enum_variant(disc, |_| {}),
+ Char => e.emit_enum_variant(disc, |_| {}),
+ Int(i) => e.emit_enum_variant(disc, |e| {
+ i.encode(e);
+ }),
+ Uint(u) => e.emit_enum_variant(disc, |e| {
+ u.encode(e);
+ }),
+ Float(f) => e.emit_enum_variant(disc, |e| {
+ f.encode(e);
+ }),
+ Adt(adt, substs) => e.emit_enum_variant(disc, |e| {
+ adt.encode(e);
+ substs.encode(e);
+ }),
+ Foreign(def_id) => e.emit_enum_variant(disc, |e| {
+ def_id.encode(e);
+ }),
+ Str => e.emit_enum_variant(disc, |_| {}),
+ Array(t, c) => e.emit_enum_variant(disc, |e| {
+ t.encode(e);
+ c.encode(e);
+ }),
+ Slice(t) => e.emit_enum_variant(disc, |e| {
+ t.encode(e);
+ }),
+ RawPtr(tam) => e.emit_enum_variant(disc, |e| {
+ tam.encode(e);
+ }),
+ Ref(r, t, m) => e.emit_enum_variant(disc, |e| {
+ r.encode(e);
+ t.encode(e);
+ m.encode(e);
+ }),
+ FnDef(def_id, substs) => e.emit_enum_variant(disc, |e| {
+ def_id.encode(e);
+ substs.encode(e);
+ }),
+ FnPtr(polyfnsig) => e.emit_enum_variant(disc, |e| {
+ polyfnsig.encode(e);
+ }),
+ Dynamic(l, r) => e.emit_enum_variant(disc, |e| {
+ l.encode(e);
+ r.encode(e);
+ }),
+ Closure(def_id, substs) => e.emit_enum_variant(disc, |e| {
+ def_id.encode(e);
+ substs.encode(e);
+ }),
+ Generator(def_id, substs, m) => e.emit_enum_variant(disc, |e| {
+ def_id.encode(e);
+ substs.encode(e);
+ m.encode(e);
+ }),
+ GeneratorWitness(b) => e.emit_enum_variant(disc, |e| {
+ b.encode(e);
+ }),
+ Never => e.emit_enum_variant(disc, |_| {}),
+ Tuple(substs) => e.emit_enum_variant(disc, |e| {
+ substs.encode(e);
+ }),
+ Projection(p) => e.emit_enum_variant(disc, |e| {
+ p.encode(e);
+ }),
+ Opaque(def_id, substs) => e.emit_enum_variant(disc, |e| {
+ def_id.encode(e);
+ substs.encode(e);
+ }),
+ Param(p) => e.emit_enum_variant(disc, |e| {
+ p.encode(e);
+ }),
+ Bound(d, b) => e.emit_enum_variant(disc, |e| {
+ d.encode(e);
+ b.encode(e);
+ }),
+ Placeholder(p) => e.emit_enum_variant(disc, |e| {
+ p.encode(e);
+ }),
+ Infer(i) => e.emit_enum_variant(disc, |e| {
+ i.encode(e);
+ }),
+ Error(d) => e.emit_enum_variant(disc, |e| {
+ d.encode(e);
+ }),
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Decodable`
+impl<I: Interner, D: TyDecoder<I = I>> Decodable<D> for TyKind<I>
+where
+ I::DelaySpanBugEmitted: Decodable<D>,
+ I::AdtDef: Decodable<D>,
+ I::SubstsRef: Decodable<D>,
+ I::DefId: Decodable<D>,
+ I::Ty: Decodable<D>,
+ I::Const: Decodable<D>,
+ I::Region: Decodable<D>,
+ I::TypeAndMut: Decodable<D>,
+ I::Mutability: Decodable<D>,
+ I::Movability: Decodable<D>,
+ I::PolyFnSig: Decodable<D>,
+ I::ListBinderExistentialPredicate: Decodable<D>,
+ I::BinderListTy: Decodable<D>,
+ I::ListTy: Decodable<D>,
+ I::ProjectionTy: Decodable<D>,
+ I::ParamTy: Decodable<D>,
+ I::BoundTy: Decodable<D>,
+ I::PlaceholderType: Decodable<D>,
+ I::InferTy: Decodable<D>,
+ I::DelaySpanBugEmitted: Decodable<D>,
+ I::PredicateKind: Decodable<D>,
+ I::AllocId: Decodable<D>,
+{
+ fn decode(d: &mut D) -> Self {
+ match Decoder::read_usize(d) {
+ 0 => Bool,
+ 1 => Char,
+ 2 => Int(Decodable::decode(d)),
+ 3 => Uint(Decodable::decode(d)),
+ 4 => Float(Decodable::decode(d)),
+ 5 => Adt(Decodable::decode(d), Decodable::decode(d)),
+ 6 => Foreign(Decodable::decode(d)),
+ 7 => Str,
+ 8 => Array(Decodable::decode(d), Decodable::decode(d)),
+ 9 => Slice(Decodable::decode(d)),
+ 10 => RawPtr(Decodable::decode(d)),
+ 11 => Ref(Decodable::decode(d), Decodable::decode(d), Decodable::decode(d)),
+ 12 => FnDef(Decodable::decode(d), Decodable::decode(d)),
+ 13 => FnPtr(Decodable::decode(d)),
+ 14 => Dynamic(Decodable::decode(d), Decodable::decode(d)),
+ 15 => Closure(Decodable::decode(d), Decodable::decode(d)),
+ 16 => Generator(Decodable::decode(d), Decodable::decode(d), Decodable::decode(d)),
+ 17 => GeneratorWitness(Decodable::decode(d)),
+ 18 => Never,
+ 19 => Tuple(Decodable::decode(d)),
+ 20 => Projection(Decodable::decode(d)),
+ 21 => Opaque(Decodable::decode(d), Decodable::decode(d)),
+ 22 => Param(Decodable::decode(d)),
+ 23 => Bound(Decodable::decode(d), Decodable::decode(d)),
+ 24 => Placeholder(Decodable::decode(d)),
+ 25 => Infer(Decodable::decode(d)),
+ 26 => Error(Decodable::decode(d)),
+ _ => panic!(
+ "{}",
+ format!(
+ "invalid enum variant tag while decoding `{}`, expected 0..{}",
+ "TyKind", 27,
+ )
+ ),
+ }
+ }
+}
+
+// This is not a derived impl because a derive would require `I: HashStable`
+#[allow(rustc::usage_of_ty_tykind)]
+impl<CTX, I: Interner> HashStable<CTX> for TyKind<I>
+where
+ I::AdtDef: HashStable<CTX>,
+ I::DefId: HashStable<CTX>,
+ I::SubstsRef: HashStable<CTX>,
+ I::Ty: HashStable<CTX>,
+ I::Const: HashStable<CTX>,
+ I::TypeAndMut: HashStable<CTX>,
+ I::PolyFnSig: HashStable<CTX>,
+ I::ListBinderExistentialPredicate: HashStable<CTX>,
+ I::Region: HashStable<CTX>,
+ I::Movability: HashStable<CTX>,
+ I::Mutability: HashStable<CTX>,
+ I::BinderListTy: HashStable<CTX>,
+ I::ListTy: HashStable<CTX>,
+ I::ProjectionTy: HashStable<CTX>,
+ I::BoundTy: HashStable<CTX>,
+ I::ParamTy: HashStable<CTX>,
+ I::PlaceholderType: HashStable<CTX>,
+ I::InferTy: HashStable<CTX>,
+ I::DelaySpanBugEmitted: HashStable<CTX>,
+{
+ #[inline]
+ fn hash_stable(
+ &self,
+ __hcx: &mut CTX,
+ __hasher: &mut rustc_data_structures::stable_hasher::StableHasher,
+ ) {
+ std::mem::discriminant(self).hash_stable(__hcx, __hasher);
+ match self {
+ Bool => {}
+ Char => {}
+ Int(i) => {
+ i.hash_stable(__hcx, __hasher);
+ }
+ Uint(u) => {
+ u.hash_stable(__hcx, __hasher);
+ }
+ Float(f) => {
+ f.hash_stable(__hcx, __hasher);
+ }
+ Adt(adt, substs) => {
+ adt.hash_stable(__hcx, __hasher);
+ substs.hash_stable(__hcx, __hasher);
+ }
+ Foreign(def_id) => {
+ def_id.hash_stable(__hcx, __hasher);
+ }
+ Str => {}
+ Array(t, c) => {
+ t.hash_stable(__hcx, __hasher);
+ c.hash_stable(__hcx, __hasher);
+ }
+ Slice(t) => {
+ t.hash_stable(__hcx, __hasher);
+ }
+ RawPtr(tam) => {
+ tam.hash_stable(__hcx, __hasher);
+ }
+ Ref(r, t, m) => {
+ r.hash_stable(__hcx, __hasher);
+ t.hash_stable(__hcx, __hasher);
+ m.hash_stable(__hcx, __hasher);
+ }
+ FnDef(def_id, substs) => {
+ def_id.hash_stable(__hcx, __hasher);
+ substs.hash_stable(__hcx, __hasher);
+ }
+ FnPtr(polyfnsig) => {
+ polyfnsig.hash_stable(__hcx, __hasher);
+ }
+ Dynamic(l, r) => {
+ l.hash_stable(__hcx, __hasher);
+ r.hash_stable(__hcx, __hasher);
+ }
+ Closure(def_id, substs) => {
+ def_id.hash_stable(__hcx, __hasher);
+ substs.hash_stable(__hcx, __hasher);
+ }
+ Generator(def_id, substs, m) => {
+ def_id.hash_stable(__hcx, __hasher);
+ substs.hash_stable(__hcx, __hasher);
+ m.hash_stable(__hcx, __hasher);
+ }
+ GeneratorWitness(b) => {
+ b.hash_stable(__hcx, __hasher);
+ }
+ Never => {}
+ Tuple(substs) => {
+ substs.hash_stable(__hcx, __hasher);
+ }
+ Projection(p) => {
+ p.hash_stable(__hcx, __hasher);
+ }
+ Opaque(def_id, substs) => {
+ def_id.hash_stable(__hcx, __hasher);
+ substs.hash_stable(__hcx, __hasher);
+ }
+ Param(p) => {
+ p.hash_stable(__hcx, __hasher);
+ }
+ Bound(d, b) => {
+ d.hash_stable(__hcx, __hasher);
+ b.hash_stable(__hcx, __hasher);
+ }
+ Placeholder(p) => {
+ p.hash_stable(__hcx, __hasher);
+ }
+ Infer(i) => {
+ i.hash_stable(__hcx, __hasher);
+ }
+ Error(d) => {
+ d.hash_stable(__hcx, __hasher);
+ }
+ }
+ }
+}
+
+/// Representation of regions. Note that the NLL checker uses a distinct
+/// representation of regions. For this reason, it internally replaces all the
+/// regions with inference variables -- the index of the variable is then used
+/// to index into internal NLL data structures. See `rustc_const_eval::borrow_check`
+/// module for more information.
+///
+/// Note: operations are on the wrapper `Region` type, which is interned,
+/// rather than this type.
+///
+/// ## The Region lattice within a given function
+///
+/// In general, the region lattice looks like
+///
+/// ```text
+/// static ----------+-----...------+ (greatest)
+/// | | |
+/// early-bound and | |
+/// free regions | |
+/// | | |
+/// | | |
+/// empty(root) placeholder(U1) |
+/// | / |
+/// | / placeholder(Un)
+/// empty(U1) -- /
+/// | /
+/// ... /
+/// | /
+/// empty(Un) -------- (smallest)
+/// ```
+///
+/// Early-bound/free regions are the named lifetimes in scope from the
+/// function declaration. They have relationships to one another
+/// determined based on the declared relationships from the
+/// function.
+///
+/// Note that inference variables and bound regions are not included
+/// in this diagram. In the case of inference variables, they should
+/// be inferred to some other region from the diagram. In the case of
+/// bound regions, they are excluded because they don't make sense to
+/// include -- the diagram indicates the relationship between free
+/// regions.
+///
+/// ## Inference variables
+///
+/// During region inference, we sometimes create inference variables,
+/// represented as `ReVar`. These will be inferred by the code in
+/// `infer::lexical_region_resolve` to some free region from the
+/// lattice above (the minimal region that meets the
+/// constraints).
+///
+/// During NLL checking, where regions are defined differently, we
+/// also use `ReVar` -- in that case, the index is used to index into
+/// the NLL region checker's data structures. The variable may in fact
+/// represent either a free region or an inference variable, in that
+/// case.
+///
+/// ## Bound Regions
+///
+/// These are regions that are stored behind a binder and must be substituted
+/// with some concrete region before being used. There are two kind of
+/// bound regions: early-bound, which are bound in an item's `Generics`,
+/// and are substituted by an `InternalSubsts`, and late-bound, which are part of
+/// higher-ranked types (e.g., `for<'a> fn(&'a ())`), and are substituted by
+/// the likes of `liberate_late_bound_regions`. The distinction exists
+/// because higher-ranked lifetimes aren't supported in all places. See [1][2].
+///
+/// Unlike `Param`s, bound regions are not supposed to exist "in the wild"
+/// outside their binder, e.g., in types passed to type inference, and
+/// should first be substituted (by placeholder regions, free regions,
+/// or region variables).
+///
+/// ## Placeholder and Free Regions
+///
+/// One often wants to work with bound regions without knowing their precise
+/// identity. For example, when checking a function, the lifetime of a borrow
+/// can end up being assigned to some region parameter. In these cases,
+/// it must be ensured that bounds on the region can't be accidentally
+/// assumed without being checked.
+///
+/// To do this, we replace the bound regions with placeholder markers,
+/// which don't satisfy any relation not explicitly provided.
+///
+/// There are two kinds of placeholder regions in rustc: `ReFree` and
+/// `RePlaceholder`. When checking an item's body, `ReFree` is supposed
+/// to be used. These also support explicit bounds: both the internally-stored
+/// *scope*, which the region is assumed to outlive, as well as other
+/// relations stored in the `FreeRegionMap`. Note that these relations
+/// aren't checked when you `make_subregion` (or `eq_types`), only by
+/// `resolve_regions_and_report_errors`.
+///
+/// When working with higher-ranked types, some region relations aren't
+/// yet known, so you can't just call `resolve_regions_and_report_errors`.
+/// `RePlaceholder` is designed for this purpose. In these contexts,
+/// there's also the risk that some inference variable laying around will
+/// get unified with your placeholder region: if you want to check whether
+/// `for<'a> Foo<'_>: 'a`, and you substitute your bound region `'a`
+/// with a placeholder region `'%a`, the variable `'_` would just be
+/// instantiated to the placeholder region `'%a`, which is wrong because
+/// the inference variable is supposed to satisfy the relation
+/// *for every value of the placeholder region*. To ensure that doesn't
+/// happen, you can use `leak_check`. This is more clearly explained
+/// by the [rustc dev guide].
+///
+/// [1]: https://smallcultfollowing.com/babysteps/blog/2013/10/29/intermingled-parameter-lists/
+/// [2]: https://smallcultfollowing.com/babysteps/blog/2013/11/04/intermingled-parameter-lists/
+/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/hrtb.html
+pub enum RegionKind<I: Interner> {
+ /// Region bound in a type or fn declaration which will be
+ /// substituted 'early' -- that is, at the same time when type
+ /// parameters are substituted.
+ ReEarlyBound(I::EarlyBoundRegion),
+
+ /// Region bound in a function scope, which will be substituted when the
+ /// function is called.
+ ReLateBound(DebruijnIndex, I::BoundRegion),
+
+ /// When checking a function body, the types of all arguments and so forth
+ /// that refer to bound region parameters are modified to refer to free
+ /// region parameters.
+ ReFree(I::FreeRegion),
+
+ /// Static data that has an "infinite" lifetime. Top in the region lattice.
+ ReStatic,
+
+ /// A region variable. Should not exist outside of type inference.
+ ReVar(I::RegionVid),
+
+ /// A placeholder region -- basically, the higher-ranked version of `ReFree`.
+ /// Should not exist outside of type inference.
+ RePlaceholder(I::PlaceholderRegion),
+
+ /// Empty lifetime is for data that is never accessed. We tag the
+ /// empty lifetime with a universe -- the idea is that we don't
+ /// want `exists<'a> { forall<'b> { 'b: 'a } }` to be satisfiable.
+ /// Therefore, the `'empty` in a universe `U` is less than all
+ /// regions visible from `U`, but not less than regions not visible
+ /// from `U`.
+ ReEmpty(UniverseIndex),
+
+ /// Erased region, used by trait selection, in MIR and during codegen.
+ ReErased,
+}
+
+// This is manually implemented for `RegionKind` because `std::mem::discriminant`
+// returns an opaque value that is `PartialEq` but not `PartialOrd`
+#[inline]
+const fn regionkind_discriminant<I: Interner>(value: &RegionKind<I>) -> usize {
+ match value {
+ ReEarlyBound(_) => 0,
+ ReLateBound(_, _) => 1,
+ ReFree(_) => 2,
+ ReStatic => 3,
+ ReVar(_) => 4,
+ RePlaceholder(_) => 5,
+ ReEmpty(_) => 6,
+ ReErased => 7,
+ }
+}
+
+// This is manually implemented because a derive would require `I: Copy`
+impl<I: Interner> Copy for RegionKind<I>
+where
+ I::EarlyBoundRegion: Copy,
+ I::BoundRegion: Copy,
+ I::FreeRegion: Copy,
+ I::RegionVid: Copy,
+ I::PlaceholderRegion: Copy,
+{
+}
+
+// This is manually implemented because a derive would require `I: Clone`
+impl<I: Interner> Clone for RegionKind<I> {
+ fn clone(&self) -> Self {
+ match self {
+ ReEarlyBound(a) => ReEarlyBound(a.clone()),
+ ReLateBound(a, b) => ReLateBound(a.clone(), b.clone()),
+ ReFree(a) => ReFree(a.clone()),
+ ReStatic => ReStatic,
+ ReVar(a) => ReVar(a.clone()),
+ RePlaceholder(a) => RePlaceholder(a.clone()),
+ ReEmpty(a) => ReEmpty(a.clone()),
+ ReErased => ReErased,
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: PartialEq`
+impl<I: Interner> PartialEq for RegionKind<I> {
+ #[inline]
+ fn eq(&self, other: &RegionKind<I>) -> bool {
+ let __self_vi = regionkind_discriminant(self);
+ let __arg_1_vi = regionkind_discriminant(other);
+ if __self_vi == __arg_1_vi {
+ match (&*self, &*other) {
+ (&ReEarlyBound(ref __self_0), &ReEarlyBound(ref __arg_1_0)) => {
+ __self_0 == __arg_1_0
+ }
+ (
+ &ReLateBound(ref __self_0, ref __self_1),
+ &ReLateBound(ref __arg_1_0, ref __arg_1_1),
+ ) => __self_0 == __arg_1_0 && __self_1 == __arg_1_1,
+ (&ReFree(ref __self_0), &ReFree(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&ReStatic, &ReStatic) => true,
+ (&ReVar(ref __self_0), &ReVar(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&RePlaceholder(ref __self_0), &RePlaceholder(ref __arg_1_0)) => {
+ __self_0 == __arg_1_0
+ }
+ (&ReEmpty(ref __self_0), &ReEmpty(ref __arg_1_0)) => __self_0 == __arg_1_0,
+ (&ReErased, &ReErased) => true,
+ _ => true,
+ }
+ } else {
+ false
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Eq`
+impl<I: Interner> Eq for RegionKind<I> {}
+
+// This is manually implemented because a derive would require `I: PartialOrd`
+impl<I: Interner> PartialOrd for RegionKind<I> {
+ #[inline]
+ fn partial_cmp(&self, other: &RegionKind<I>) -> Option<Ordering> {
+ Some(Ord::cmp(self, other))
+ }
+}
+
+// This is manually implemented because a derive would require `I: Ord`
+impl<I: Interner> Ord for RegionKind<I> {
+ #[inline]
+ fn cmp(&self, other: &RegionKind<I>) -> Ordering {
+ let __self_vi = regionkind_discriminant(self);
+ let __arg_1_vi = regionkind_discriminant(other);
+ if __self_vi == __arg_1_vi {
+ match (&*self, &*other) {
+ (&ReEarlyBound(ref __self_0), &ReEarlyBound(ref __arg_1_0)) => {
+ Ord::cmp(__self_0, __arg_1_0)
+ }
+ (
+ &ReLateBound(ref __self_0, ref __self_1),
+ &ReLateBound(ref __arg_1_0, ref __arg_1_1),
+ ) => match Ord::cmp(__self_0, __arg_1_0) {
+ Ordering::Equal => Ord::cmp(__self_1, __arg_1_1),
+ cmp => cmp,
+ },
+ (&ReFree(ref __self_0), &ReFree(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&ReStatic, &ReStatic) => Ordering::Equal,
+ (&ReVar(ref __self_0), &ReVar(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&RePlaceholder(ref __self_0), &RePlaceholder(ref __arg_1_0)) => {
+ Ord::cmp(__self_0, __arg_1_0)
+ }
+ (&ReEmpty(ref __self_0), &ReEmpty(ref __arg_1_0)) => Ord::cmp(__self_0, __arg_1_0),
+ (&ReErased, &ReErased) => Ordering::Equal,
+ _ => Ordering::Equal,
+ }
+ } else {
+ Ord::cmp(&__self_vi, &__arg_1_vi)
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Hash`
+impl<I: Interner> hash::Hash for RegionKind<I> {
+ fn hash<__H: hash::Hasher>(&self, state: &mut __H) -> () {
+ match (&*self,) {
+ (&ReEarlyBound(ref __self_0),) => {
+ hash::Hash::hash(&regionkind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&ReLateBound(ref __self_0, ref __self_1),) => {
+ hash::Hash::hash(&regionkind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state);
+ hash::Hash::hash(__self_1, state)
+ }
+ (&ReFree(ref __self_0),) => {
+ hash::Hash::hash(&regionkind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&ReStatic,) => {
+ hash::Hash::hash(&regionkind_discriminant(self), state);
+ }
+ (&ReVar(ref __self_0),) => {
+ hash::Hash::hash(&regionkind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&RePlaceholder(ref __self_0),) => {
+ hash::Hash::hash(&regionkind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&ReEmpty(ref __self_0),) => {
+ hash::Hash::hash(&regionkind_discriminant(self), state);
+ hash::Hash::hash(__self_0, state)
+ }
+ (&ReErased,) => {
+ hash::Hash::hash(&regionkind_discriminant(self), state);
+ }
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Debug`
+impl<I: Interner> fmt::Debug for RegionKind<I> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ReEarlyBound(ref data) => write!(f, "ReEarlyBound({:?})", data),
+
+ ReLateBound(binder_id, ref bound_region) => {
+ write!(f, "ReLateBound({:?}, {:?})", binder_id, bound_region)
+ }
+
+ ReFree(ref fr) => fr.fmt(f),
+
+ ReStatic => write!(f, "ReStatic"),
+
+ ReVar(ref vid) => vid.fmt(f),
+
+ RePlaceholder(placeholder) => write!(f, "RePlaceholder({:?})", placeholder),
+
+ ReEmpty(ui) => write!(f, "ReEmpty({:?})", ui),
+
+ ReErased => write!(f, "ReErased"),
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Encodable`
+impl<I: Interner, E: TyEncoder> Encodable<E> for RegionKind<I>
+where
+ I::EarlyBoundRegion: Encodable<E>,
+ I::BoundRegion: Encodable<E>,
+ I::FreeRegion: Encodable<E>,
+ I::RegionVid: Encodable<E>,
+ I::PlaceholderRegion: Encodable<E>,
+{
+ fn encode(&self, e: &mut E) {
+ let disc = regionkind_discriminant(self);
+ match self {
+ ReEarlyBound(a) => e.emit_enum_variant(disc, |e| {
+ a.encode(e);
+ }),
+ ReLateBound(a, b) => e.emit_enum_variant(disc, |e| {
+ a.encode(e);
+ b.encode(e);
+ }),
+ ReFree(a) => e.emit_enum_variant(disc, |e| {
+ a.encode(e);
+ }),
+ ReStatic => e.emit_enum_variant(disc, |_| {}),
+ ReVar(a) => e.emit_enum_variant(disc, |e| {
+ a.encode(e);
+ }),
+ RePlaceholder(a) => e.emit_enum_variant(disc, |e| {
+ a.encode(e);
+ }),
+ ReEmpty(a) => e.emit_enum_variant(disc, |e| {
+ a.encode(e);
+ }),
+ ReErased => e.emit_enum_variant(disc, |_| {}),
+ }
+ }
+}
+
+// This is manually implemented because a derive would require `I: Decodable`
+impl<I: Interner, D: TyDecoder<I = I>> Decodable<D> for RegionKind<I>
+where
+ I::EarlyBoundRegion: Decodable<D>,
+ I::BoundRegion: Decodable<D>,
+ I::FreeRegion: Decodable<D>,
+ I::RegionVid: Decodable<D>,
+ I::PlaceholderRegion: Decodable<D>,
+{
+ fn decode(d: &mut D) -> Self {
+ match Decoder::read_usize(d) {
+ 0 => ReEarlyBound(Decodable::decode(d)),
+ 1 => ReLateBound(Decodable::decode(d), Decodable::decode(d)),
+ 2 => ReFree(Decodable::decode(d)),
+ 3 => ReStatic,
+ 4 => ReVar(Decodable::decode(d)),
+ 5 => RePlaceholder(Decodable::decode(d)),
+ 6 => ReEmpty(Decodable::decode(d)),
+ 7 => ReErased,
+ _ => panic!(
+ "{}",
+ format!(
+ "invalid enum variant tag while decoding `{}`, expected 0..{}",
+ "RegionKind", 8,
+ )
+ ),
+ }
+ }
+}
+
+// This is not a derived impl because a derive would require `I: HashStable`
+impl<CTX, I: Interner> HashStable<CTX> for RegionKind<I>
+where
+ I::EarlyBoundRegion: HashStable<CTX>,
+ I::BoundRegion: HashStable<CTX>,
+ I::FreeRegion: HashStable<CTX>,
+ I::RegionVid: HashStable<CTX>,
+ I::PlaceholderRegion: HashStable<CTX>,
+{
+ #[inline]
+ fn hash_stable(
+ &self,
+ hcx: &mut CTX,
+ hasher: &mut rustc_data_structures::stable_hasher::StableHasher,
+ ) {
+ std::mem::discriminant(self).hash_stable(hcx, hasher);
+ match self {
+ ReErased | ReStatic => {
+ // No variant fields to hash for these ...
+ }
+ ReEmpty(universe) => {
+ universe.hash_stable(hcx, hasher);
+ }
+ ReLateBound(db, br) => {
+ db.hash_stable(hcx, hasher);
+ br.hash_stable(hcx, hasher);
+ }
+ ReEarlyBound(eb) => {
+ eb.hash_stable(hcx, hasher);
+ }
+ ReFree(ref free_region) => {
+ free_region.hash_stable(hcx, hasher);
+ }
+ RePlaceholder(p) => {
+ p.hash_stable(hcx, hasher);
+ }
+ ReVar(reg) => {
+ reg.hash_stable(hcx, hasher);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/Cargo.toml b/compiler/rustc_typeck/Cargo.toml
new file mode 100644
index 000000000..faf52e269
--- /dev/null
+++ b/compiler/rustc_typeck/Cargo.toml
@@ -0,0 +1,32 @@
+[package]
+name = "rustc_typeck"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+test = false
+doctest = false
+
+[dependencies]
+rustc_arena = { path = "../rustc_arena" }
+tracing = "0.1"
+rustc_macros = { path = "../rustc_macros" }
+rustc_middle = { path = "../rustc_middle" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_hir_pretty = { path = "../rustc_hir_pretty" }
+rustc_target = { path = "../rustc_target" }
+rustc_session = { path = "../rustc_session" }
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+rustc_index = { path = "../rustc_index" }
+rustc_infer = { path = "../rustc_infer" }
+rustc_trait_selection = { path = "../rustc_trait_selection" }
+rustc_ty_utils = { path = "../rustc_ty_utils" }
+rustc_lint = { path = "../rustc_lint" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_type_ir = { path = "../rustc_type_ir" }
diff --git a/compiler/rustc_typeck/README.md b/compiler/rustc_typeck/README.md
new file mode 100644
index 000000000..b61dbd8c9
--- /dev/null
+++ b/compiler/rustc_typeck/README.md
@@ -0,0 +1,5 @@
+For high-level intro to how type checking works in rustc, see the
+[type checking] chapter of the [rustc dev guide].
+
+[type checking]: https://rustc-dev-guide.rust-lang.org/type-checking.html
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/
diff --git a/compiler/rustc_typeck/src/astconv/errors.rs b/compiler/rustc_typeck/src/astconv/errors.rs
new file mode 100644
index 000000000..ff39bf361
--- /dev/null
+++ b/compiler/rustc_typeck/src/astconv/errors.rs
@@ -0,0 +1,410 @@
+use crate::astconv::AstConv;
+use crate::errors::{ManualImplementation, MissingTypeParams};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{pluralize, struct_span_err, Applicability, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty;
+use rustc_session::parse::feature_err;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::{Span, Symbol, DUMMY_SP};
+
+use std::collections::BTreeSet;
+
+impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
+ /// On missing type parameters, emit an E0393 error and provide a structured suggestion using
+ /// the type parameter's name as a placeholder.
+ pub(crate) fn complain_about_missing_type_params(
+ &self,
+ missing_type_params: Vec<Symbol>,
+ def_id: DefId,
+ span: Span,
+ empty_generic_args: bool,
+ ) {
+ if missing_type_params.is_empty() {
+ return;
+ }
+
+ self.tcx().sess.emit_err(MissingTypeParams {
+ span,
+ def_span: self.tcx().def_span(def_id),
+ missing_type_params,
+ empty_generic_args,
+ });
+ }
+
+ /// When the code is using the `Fn` traits directly, instead of the `Fn(A) -> B` syntax, emit
+ /// an error and attempt to build a reasonable structured suggestion.
+ pub(crate) fn complain_about_internal_fn_trait(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ trait_segment: &'_ hir::PathSegment<'_>,
+ is_impl: bool,
+ ) {
+ if self.tcx().features().unboxed_closures {
+ return;
+ }
+
+ let trait_def = self.tcx().trait_def(trait_def_id);
+ if !trait_def.paren_sugar {
+ if trait_segment.args().parenthesized {
+ // For now, require that parenthetical notation be used only with `Fn()` etc.
+ let mut err = feature_err(
+ &self.tcx().sess.parse_sess,
+ sym::unboxed_closures,
+ span,
+ "parenthetical notation is only stable when used with `Fn`-family traits",
+ );
+ err.emit();
+ }
+
+ return;
+ }
+
+ let sess = self.tcx().sess;
+
+ if !trait_segment.args().parenthesized {
+ // For now, require that parenthetical notation be used only with `Fn()` etc.
+ let mut err = feature_err(
+ &sess.parse_sess,
+ sym::unboxed_closures,
+ span,
+ "the precise format of `Fn`-family traits' type parameters is subject to change",
+ );
+ // Do not suggest the other syntax if we are in trait impl:
+ // the desugaring would contain an associated type constraint.
+ if !is_impl {
+ let args = trait_segment
+ .args
+ .as_ref()
+ .and_then(|args| args.args.get(0))
+ .and_then(|arg| match arg {
+ hir::GenericArg::Type(ty) => match ty.kind {
+ hir::TyKind::Tup(t) => t
+ .iter()
+ .map(|e| sess.source_map().span_to_snippet(e.span))
+ .collect::<Result<Vec<_>, _>>()
+ .map(|a| a.join(", ")),
+ _ => sess.source_map().span_to_snippet(ty.span),
+ }
+ .map(|s| format!("({})", s))
+ .ok(),
+ _ => None,
+ })
+ .unwrap_or_else(|| "()".to_string());
+ let ret = trait_segment
+ .args()
+ .bindings
+ .iter()
+ .find_map(|b| match (b.ident.name == sym::Output, &b.kind) {
+ (true, hir::TypeBindingKind::Equality { term }) => {
+ let span = match term {
+ hir::Term::Ty(ty) => ty.span,
+ hir::Term::Const(c) => self.tcx().hir().span(c.hir_id),
+ };
+ sess.source_map().span_to_snippet(span).ok()
+ }
+ _ => None,
+ })
+ .unwrap_or_else(|| "()".to_string());
+ err.span_suggestion(
+ span,
+ "use parenthetical notation instead",
+ format!("{}{} -> {}", trait_segment.ident, args, ret),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+
+ if is_impl {
+ let trait_name = self.tcx().def_path_str(trait_def_id);
+ self.tcx().sess.emit_err(ManualImplementation { span, trait_name });
+ }
+ }
+
+ pub(crate) fn complain_about_assoc_type_not_found<I>(
+ &self,
+ all_candidates: impl Fn() -> I,
+ ty_param_name: &str,
+ assoc_name: Ident,
+ span: Span,
+ ) -> ErrorGuaranteed
+ where
+ I: Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ {
+ // The fallback span is needed because `assoc_name` might be an `Fn()`'s `Output` without a
+ // valid span, so we point at the whole path segment instead.
+ let span = if assoc_name.span != DUMMY_SP { assoc_name.span } else { span };
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0220,
+ "associated type `{}` not found for `{}`",
+ assoc_name,
+ ty_param_name
+ );
+
+ let all_candidate_names: Vec<_> = all_candidates()
+ .flat_map(|r| self.tcx().associated_items(r.def_id()).in_definition_order())
+ .filter_map(
+ |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None },
+ )
+ .collect();
+
+ if let (Some(suggested_name), true) = (
+ find_best_match_for_name(&all_candidate_names, assoc_name.name, None),
+ assoc_name.span != DUMMY_SP,
+ ) {
+ err.span_suggestion(
+ assoc_name.span,
+ "there is an associated type with a similar name",
+ suggested_name,
+ Applicability::MaybeIncorrect,
+ );
+ return err.emit();
+ }
+
+ // If we didn't find a good item in the supertraits (or couldn't get
+ // the supertraits), like in ItemCtxt, then look more generally from
+ // all visible traits. If there's one clear winner, just suggest that.
+
+ let visible_traits: Vec<_> = self
+ .tcx()
+ .all_traits()
+ .filter(|trait_def_id| {
+ let viz = self.tcx().visibility(*trait_def_id);
+ if let Some(def_id) = self.item_def_id() {
+ viz.is_accessible_from(def_id, self.tcx())
+ } else {
+ viz.is_visible_locally()
+ }
+ })
+ .collect();
+
+ let wider_candidate_names: Vec<_> = visible_traits
+ .iter()
+ .flat_map(|trait_def_id| {
+ self.tcx().associated_items(*trait_def_id).in_definition_order()
+ })
+ .filter_map(
+ |item| if item.kind == ty::AssocKind::Type { Some(item.name) } else { None },
+ )
+ .collect();
+
+ if let (Some(suggested_name), true) = (
+ find_best_match_for_name(&wider_candidate_names, assoc_name.name, None),
+ assoc_name.span != DUMMY_SP,
+ ) {
+ if let [best_trait] = visible_traits
+ .iter()
+ .filter(|trait_def_id| {
+ self.tcx()
+ .associated_items(*trait_def_id)
+ .filter_by_name_unhygienic(suggested_name)
+ .any(|item| item.kind == ty::AssocKind::Type)
+ })
+ .collect::<Vec<_>>()[..]
+ {
+ err.span_label(
+ assoc_name.span,
+ format!(
+ "there is a similarly named associated type `{suggested_name}` in the trait `{}`",
+ self.tcx().def_path_str(*best_trait)
+ ),
+ );
+ return err.emit();
+ }
+ }
+
+ err.span_label(span, format!("associated type `{}` not found", assoc_name));
+ err.emit()
+ }
+
+ /// When there are any missing associated types, emit an E0191 error and attempt to supply a
+ /// reasonable suggestion on how to write it. For the case of multiple associated types in the
+ /// same trait bound have the same name (as they come from different supertraits), we instead
+ /// emit a generic note suggesting using a `where` clause to constraint instead.
+ pub(crate) fn complain_about_missing_associated_types(
+ &self,
+ associated_types: FxHashMap<Span, BTreeSet<DefId>>,
+ potential_assoc_types: Vec<Span>,
+ trait_bounds: &[hir::PolyTraitRef<'_>],
+ ) {
+ if associated_types.values().all(|v| v.is_empty()) {
+ return;
+ }
+ let tcx = self.tcx();
+ // FIXME: Marked `mut` so that we can replace the spans further below with a more
+ // appropriate one, but this should be handled earlier in the span assignment.
+ let mut associated_types: FxHashMap<Span, Vec<_>> = associated_types
+ .into_iter()
+ .map(|(span, def_ids)| {
+ (span, def_ids.into_iter().map(|did| tcx.associated_item(did)).collect())
+ })
+ .collect();
+ let mut names = vec![];
+
+ // Account for things like `dyn Foo + 'a`, like in tests `issue-22434.rs` and
+ // `issue-22560.rs`.
+ let mut trait_bound_spans: Vec<Span> = vec![];
+ for (span, items) in &associated_types {
+ if !items.is_empty() {
+ trait_bound_spans.push(*span);
+ }
+ for assoc_item in items {
+ let trait_def_id = assoc_item.container_id(tcx);
+ names.push(format!(
+ "`{}` (from trait `{}`)",
+ assoc_item.name,
+ tcx.def_path_str(trait_def_id),
+ ));
+ }
+ }
+ if let ([], [bound]) = (&potential_assoc_types[..], &trait_bounds) {
+ match bound.trait_ref.path.segments {
+ // FIXME: `trait_ref.path.span` can point to a full path with multiple
+ // segments, even though `trait_ref.path.segments` is of length `1`. Work
+ // around that bug here, even though it should be fixed elsewhere.
+ // This would otherwise cause an invalid suggestion. For an example, look at
+ // `src/test/ui/issues/issue-28344.rs` where instead of the following:
+ //
+ // error[E0191]: the value of the associated type `Output`
+ // (from trait `std::ops::BitXor`) must be specified
+ // --> $DIR/issue-28344.rs:4:17
+ // |
+ // LL | let x: u8 = BitXor::bitor(0 as u8, 0 as u8);
+ // | ^^^^^^ help: specify the associated type:
+ // | `BitXor<Output = Type>`
+ //
+ // we would output:
+ //
+ // error[E0191]: the value of the associated type `Output`
+ // (from trait `std::ops::BitXor`) must be specified
+ // --> $DIR/issue-28344.rs:4:17
+ // |
+ // LL | let x: u8 = BitXor::bitor(0 as u8, 0 as u8);
+ // | ^^^^^^^^^^^^^ help: specify the associated type:
+ // | `BitXor::bitor<Output = Type>`
+ [segment] if segment.args.is_none() => {
+ trait_bound_spans = vec![segment.ident.span];
+ associated_types = associated_types
+ .into_iter()
+ .map(|(_, items)| (segment.ident.span, items))
+ .collect();
+ }
+ _ => {}
+ }
+ }
+ names.sort();
+ trait_bound_spans.sort();
+ let mut err = struct_span_err!(
+ tcx.sess,
+ trait_bound_spans,
+ E0191,
+ "the value of the associated type{} {} must be specified",
+ pluralize!(names.len()),
+ names.join(", "),
+ );
+ let mut suggestions = vec![];
+ let mut types_count = 0;
+ let mut where_constraints = vec![];
+ let mut already_has_generics_args_suggestion = false;
+ for (span, assoc_items) in &associated_types {
+ let mut names: FxHashMap<_, usize> = FxHashMap::default();
+ for item in assoc_items {
+ types_count += 1;
+ *names.entry(item.name).or_insert(0) += 1;
+ }
+ let mut dupes = false;
+ for item in assoc_items {
+ let prefix = if names[&item.name] > 1 {
+ let trait_def_id = item.container_id(tcx);
+ dupes = true;
+ format!("{}::", tcx.def_path_str(trait_def_id))
+ } else {
+ String::new()
+ };
+ if let Some(sp) = tcx.hir().span_if_local(item.def_id) {
+ err.span_label(sp, format!("`{}{}` defined here", prefix, item.name));
+ }
+ }
+ if potential_assoc_types.len() == assoc_items.len() {
+ // When the amount of missing associated types equals the number of
+ // extra type arguments present. A suggesting to replace the generic args with
+ // associated types is already emitted.
+ already_has_generics_args_suggestion = true;
+ } else if let (Ok(snippet), false) =
+ (tcx.sess.source_map().span_to_snippet(*span), dupes)
+ {
+ let types: Vec<_> =
+ assoc_items.iter().map(|item| format!("{} = Type", item.name)).collect();
+ let code = if snippet.ends_with('>') {
+ // The user wrote `Trait<'a>` or similar and we don't have a type we can
+ // suggest, but at least we can clue them to the correct syntax
+ // `Trait<'a, Item = Type>` while accounting for the `<'a>` in the
+ // suggestion.
+ format!("{}, {}>", &snippet[..snippet.len() - 1], types.join(", "))
+ } else {
+ // The user wrote `Iterator`, so we don't have a type we can suggest, but at
+ // least we can clue them to the correct syntax `Iterator<Item = Type>`.
+ format!("{}<{}>", snippet, types.join(", "))
+ };
+ suggestions.push((*span, code));
+ } else if dupes {
+ where_constraints.push(*span);
+ }
+ }
+ let where_msg = "consider introducing a new type parameter, adding `where` constraints \
+ using the fully-qualified path to the associated types";
+ if !where_constraints.is_empty() && suggestions.is_empty() {
+ // If there are duplicates associated type names and a single trait bound do not
+ // use structured suggestion, it means that there are multiple supertraits with
+ // the same associated type name.
+ err.help(where_msg);
+ }
+ if suggestions.len() != 1 || already_has_generics_args_suggestion {
+ // We don't need this label if there's an inline suggestion, show otherwise.
+ for (span, assoc_items) in &associated_types {
+ let mut names: FxHashMap<_, usize> = FxHashMap::default();
+ for item in assoc_items {
+ types_count += 1;
+ *names.entry(item.name).or_insert(0) += 1;
+ }
+ let mut label = vec![];
+ for item in assoc_items {
+ let postfix = if names[&item.name] > 1 {
+ let trait_def_id = item.container_id(tcx);
+ format!(" (from trait `{}`)", tcx.def_path_str(trait_def_id))
+ } else {
+ String::new()
+ };
+ label.push(format!("`{}`{}", item.name, postfix));
+ }
+ if !label.is_empty() {
+ err.span_label(
+ *span,
+ format!(
+ "associated type{} {} must be specified",
+ pluralize!(label.len()),
+ label.join(", "),
+ ),
+ );
+ }
+ }
+ }
+ if !suggestions.is_empty() {
+ err.multipart_suggestion(
+ &format!("specify the associated type{}", pluralize!(types_count)),
+ suggestions,
+ Applicability::HasPlaceholders,
+ );
+ if !where_constraints.is_empty() {
+ err.span_help(where_constraints, where_msg);
+ }
+ }
+ err.emit();
+ }
+}
diff --git a/compiler/rustc_typeck/src/astconv/generics.rs b/compiler/rustc_typeck/src/astconv/generics.rs
new file mode 100644
index 000000000..40aa27a29
--- /dev/null
+++ b/compiler/rustc_typeck/src/astconv/generics.rs
@@ -0,0 +1,664 @@
+use super::IsMethodCall;
+use crate::astconv::{
+ AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch,
+ GenericArgCountResult, GenericArgPosition,
+};
+use crate::errors::AssocTypeBindingNotAllowed;
+use crate::structured_errors::{GenericArgsInfo, StructuredDiagnostic, WrongNumberOfGenericArgs};
+use rustc_ast::ast::ParamKindOrd;
+use rustc_errors::{struct_span_err, Applicability, Diagnostic, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::GenericArg;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::ty::{
+ self, subst, subst::SubstsRef, GenericParamDef, GenericParamDefKind, IsSuggestable, Ty, TyCtxt,
+};
+use rustc_session::lint::builtin::LATE_BOUND_LIFETIME_ARGUMENTS;
+use rustc_span::{symbol::kw, Span};
+use smallvec::SmallVec;
+
+impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
+ /// Report an error that a generic argument did not match the generic parameter that was
+ /// expected.
+ fn generic_arg_mismatch_err(
+ tcx: TyCtxt<'_>,
+ arg: &GenericArg<'_>,
+ param: &GenericParamDef,
+ possible_ordering_error: bool,
+ help: Option<&str>,
+ ) {
+ let sess = tcx.sess;
+ let mut err = struct_span_err!(
+ sess,
+ arg.span(),
+ E0747,
+ "{} provided when a {} was expected",
+ arg.descr(),
+ param.kind.descr(),
+ );
+
+ if let GenericParamDefKind::Const { .. } = param.kind {
+ if matches!(arg, GenericArg::Type(hir::Ty { kind: hir::TyKind::Infer, .. })) {
+ err.help("const arguments cannot yet be inferred with `_`");
+ if sess.is_nightly_build() {
+ err.help(
+ "add `#![feature(generic_arg_infer)]` to the crate attributes to enable",
+ );
+ }
+ }
+ }
+
+ let add_braces_suggestion = |arg: &GenericArg<'_>, err: &mut Diagnostic| {
+ let suggestions = vec![
+ (arg.span().shrink_to_lo(), String::from("{ ")),
+ (arg.span().shrink_to_hi(), String::from(" }")),
+ ];
+ err.multipart_suggestion(
+ "if this generic argument was intended as a const parameter, \
+ surround it with braces",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ };
+
+ // Specific suggestion set for diagnostics
+ match (arg, &param.kind) {
+ (
+ GenericArg::Type(hir::Ty {
+ kind: hir::TyKind::Path(rustc_hir::QPath::Resolved(_, path)),
+ ..
+ }),
+ GenericParamDefKind::Const { .. },
+ ) => match path.res {
+ Res::Err => {
+ add_braces_suggestion(arg, &mut err);
+ err.set_primary_message(
+ "unresolved item provided when a constant was expected",
+ )
+ .emit();
+ return;
+ }
+ Res::Def(DefKind::TyParam, src_def_id) => {
+ if let Some(param_local_id) = param.def_id.as_local() {
+ let param_name = tcx.hir().ty_param_name(param_local_id);
+ let param_type = tcx.infer_ctxt().enter(|infcx| {
+ infcx.resolve_numeric_literals_with_default(tcx.type_of(param.def_id))
+ });
+ if param_type.is_suggestable(tcx, false) {
+ err.span_suggestion(
+ tcx.def_span(src_def_id),
+ "consider changing this type parameter to be a `const` generic",
+ format!("const {}: {}", param_name, param_type),
+ Applicability::MaybeIncorrect,
+ );
+ };
+ }
+ }
+ _ => add_braces_suggestion(arg, &mut err),
+ },
+ (
+ GenericArg::Type(hir::Ty { kind: hir::TyKind::Path(_), .. }),
+ GenericParamDefKind::Const { .. },
+ ) => add_braces_suggestion(arg, &mut err),
+ (
+ GenericArg::Type(hir::Ty { kind: hir::TyKind::Array(_, len), .. }),
+ GenericParamDefKind::Const { .. },
+ ) if tcx.type_of(param.def_id) == tcx.types.usize => {
+ let snippet = sess.source_map().span_to_snippet(tcx.hir().span(len.hir_id()));
+ if let Ok(snippet) = snippet {
+ err.span_suggestion(
+ arg.span(),
+ "array type provided where a `usize` was expected, try",
+ format!("{{ {} }}", snippet),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ (GenericArg::Const(cnst), GenericParamDefKind::Type { .. }) => {
+ let body = tcx.hir().body(cnst.value.body);
+ if let rustc_hir::ExprKind::Path(rustc_hir::QPath::Resolved(_, path)) =
+ body.value.kind
+ {
+ if let Res::Def(DefKind::Fn { .. }, id) = path.res {
+ err.help(&format!(
+ "`{}` is a function item, not a type",
+ tcx.item_name(id)
+ ));
+ err.help("function item types cannot be named directly");
+ }
+ }
+ }
+ _ => {}
+ }
+
+ let kind_ord = param.kind.to_ord();
+ let arg_ord = arg.to_ord();
+
+ // This note is only true when generic parameters are strictly ordered by their kind.
+ if possible_ordering_error && kind_ord.cmp(&arg_ord) != core::cmp::Ordering::Equal {
+ let (first, last) = if kind_ord < arg_ord {
+ (param.kind.descr(), arg.descr())
+ } else {
+ (arg.descr(), param.kind.descr())
+ };
+ err.note(&format!("{} arguments must be provided before {} arguments", first, last));
+ if let Some(help) = help {
+ err.help(help);
+ }
+ }
+
+ err.emit();
+ }
+
+ /// Creates the relevant generic argument substitutions
+ /// corresponding to a set of generic parameters. This is a
+ /// rather complex function. Let us try to explain the role
+ /// of each of its parameters:
+ ///
+ /// To start, we are given the `def_id` of the thing we are
+ /// creating the substitutions for, and a partial set of
+ /// substitutions `parent_substs`. In general, the substitutions
+ /// for an item begin with substitutions for all the "parents" of
+ /// that item -- e.g., for a method it might include the
+ /// parameters from the impl.
+ ///
+ /// Therefore, the method begins by walking down these parents,
+ /// starting with the outermost parent and proceed inwards until
+ /// it reaches `def_id`. For each parent `P`, it will check `parent_substs`
+ /// first to see if the parent's substitutions are listed in there. If so,
+ /// we can append those and move on. Otherwise, it invokes the
+ /// three callback functions:
+ ///
+ /// - `args_for_def_id`: given the `DefId` `P`, supplies back the
+ /// generic arguments that were given to that parent from within
+ /// the path; so e.g., if you have `<T as Foo>::Bar`, the `DefId`
+ /// might refer to the trait `Foo`, and the arguments might be
+ /// `[T]`. The boolean value indicates whether to infer values
+ /// for arguments whose values were not explicitly provided.
+ /// - `provided_kind`: given the generic parameter and the value from `args_for_def_id`,
+ /// instantiate a `GenericArg`.
+ /// - `inferred_kind`: if no parameter was provided, and inference is enabled, then
+ /// creates a suitable inference variable.
+ pub fn create_substs_for_generic_args<'a>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ parent_substs: &[subst::GenericArg<'tcx>],
+ has_self: bool,
+ self_ty: Option<Ty<'tcx>>,
+ arg_count: &GenericArgCountResult,
+ ctx: &mut impl CreateSubstsForGenericArgsCtxt<'a, 'tcx>,
+ ) -> SubstsRef<'tcx> {
+ // Collect the segments of the path; we need to substitute arguments
+ // for parameters throughout the entire path (wherever there are
+ // generic parameters).
+ let mut parent_defs = tcx.generics_of(def_id);
+ let count = parent_defs.count();
+ let mut stack = vec![(def_id, parent_defs)];
+ while let Some(def_id) = parent_defs.parent {
+ parent_defs = tcx.generics_of(def_id);
+ stack.push((def_id, parent_defs));
+ }
+
+ // We manually build up the substitution, rather than using convenience
+ // methods in `subst.rs`, so that we can iterate over the arguments and
+ // parameters in lock-step linearly, instead of trying to match each pair.
+ let mut substs: SmallVec<[subst::GenericArg<'tcx>; 8]> = SmallVec::with_capacity(count);
+ // Iterate over each segment of the path.
+ while let Some((def_id, defs)) = stack.pop() {
+ let mut params = defs.params.iter().peekable();
+
+ // If we have already computed substitutions for parents, we can use those directly.
+ while let Some(&param) = params.peek() {
+ if let Some(&kind) = parent_substs.get(param.index as usize) {
+ substs.push(kind);
+ params.next();
+ } else {
+ break;
+ }
+ }
+
+ // `Self` is handled first, unless it's been handled in `parent_substs`.
+ if has_self {
+ if let Some(&param) = params.peek() {
+ if param.index == 0 {
+ if let GenericParamDefKind::Type { .. } = param.kind {
+ substs.push(
+ self_ty
+ .map(|ty| ty.into())
+ .unwrap_or_else(|| ctx.inferred_kind(None, param, true)),
+ );
+ params.next();
+ }
+ }
+ }
+ }
+
+ // Check whether this segment takes generic arguments and the user has provided any.
+ let (generic_args, infer_args) = ctx.args_for_def_id(def_id);
+
+ let args_iter = generic_args.iter().flat_map(|generic_args| generic_args.args.iter());
+ let mut args = args_iter.clone().peekable();
+
+ // If we encounter a type or const when we expect a lifetime, we infer the lifetimes.
+ // If we later encounter a lifetime, we know that the arguments were provided in the
+ // wrong order. `force_infer_lt` records the type or const that forced lifetimes to be
+ // inferred, so we can use it for diagnostics later.
+ let mut force_infer_lt = None;
+
+ loop {
+ // We're going to iterate through the generic arguments that the user
+ // provided, matching them with the generic parameters we expect.
+ // Mismatches can occur as a result of elided lifetimes, or for malformed
+ // input. We try to handle both sensibly.
+ match (args.peek(), params.peek()) {
+ (Some(&arg), Some(&param)) => {
+ match (arg, &param.kind, arg_count.explicit_late_bound) {
+ (GenericArg::Lifetime(_), GenericParamDefKind::Lifetime, _)
+ | (
+ GenericArg::Type(_) | GenericArg::Infer(_),
+ GenericParamDefKind::Type { .. },
+ _,
+ )
+ | (
+ GenericArg::Const(_) | GenericArg::Infer(_),
+ GenericParamDefKind::Const { .. },
+ _,
+ ) => {
+ substs.push(ctx.provided_kind(param, arg));
+ args.next();
+ params.next();
+ }
+ (
+ GenericArg::Infer(_) | GenericArg::Type(_) | GenericArg::Const(_),
+ GenericParamDefKind::Lifetime,
+ _,
+ ) => {
+ // We expected a lifetime argument, but got a type or const
+ // argument. That means we're inferring the lifetimes.
+ substs.push(ctx.inferred_kind(None, param, infer_args));
+ force_infer_lt = Some((arg, param));
+ params.next();
+ }
+ (GenericArg::Lifetime(_), _, ExplicitLateBound::Yes) => {
+ // We've come across a lifetime when we expected something else in
+ // the presence of explicit late bounds. This is most likely
+ // due to the presence of the explicit bound so we're just going to
+ // ignore it.
+ args.next();
+ }
+ (_, _, _) => {
+ // We expected one kind of parameter, but the user provided
+ // another. This is an error. However, if we already know that
+ // the arguments don't match up with the parameters, we won't issue
+ // an additional error, as the user already knows what's wrong.
+ if arg_count.correct.is_ok() {
+ // We're going to iterate over the parameters to sort them out, and
+ // show that order to the user as a possible order for the parameters
+ let mut param_types_present = defs
+ .params
+ .clone()
+ .into_iter()
+ .map(|param| (param.kind.to_ord(), param))
+ .collect::<Vec<(ParamKindOrd, GenericParamDef)>>();
+ param_types_present.sort_by_key(|(ord, _)| *ord);
+ let (mut param_types_present, ordered_params): (
+ Vec<ParamKindOrd>,
+ Vec<GenericParamDef>,
+ ) = param_types_present.into_iter().unzip();
+ param_types_present.dedup();
+
+ Self::generic_arg_mismatch_err(
+ tcx,
+ arg,
+ param,
+ !args_iter.clone().is_sorted_by_key(|arg| arg.to_ord()),
+ Some(&format!(
+ "reorder the arguments: {}: `<{}>`",
+ param_types_present
+ .into_iter()
+ .map(|ord| format!("{}s", ord))
+ .collect::<Vec<String>>()
+ .join(", then "),
+ ordered_params
+ .into_iter()
+ .filter_map(|param| {
+ if param.name == kw::SelfUpper {
+ None
+ } else {
+ Some(param.name.to_string())
+ }
+ })
+ .collect::<Vec<String>>()
+ .join(", ")
+ )),
+ );
+ }
+
+ // We've reported the error, but we want to make sure that this
+ // problem doesn't bubble down and create additional, irrelevant
+ // errors. In this case, we're simply going to ignore the argument
+ // and any following arguments. The rest of the parameters will be
+ // inferred.
+ while args.next().is_some() {}
+ }
+ }
+ }
+
+ (Some(&arg), None) => {
+ // We should never be able to reach this point with well-formed input.
+ // There are three situations in which we can encounter this issue.
+ //
+ // 1. The number of arguments is incorrect. In this case, an error
+ // will already have been emitted, and we can ignore it.
+ // 2. There are late-bound lifetime parameters present, yet the
+ // lifetime arguments have also been explicitly specified by the
+ // user.
+ // 3. We've inferred some lifetimes, which have been provided later (i.e.
+ // after a type or const). We want to throw an error in this case.
+
+ if arg_count.correct.is_ok()
+ && arg_count.explicit_late_bound == ExplicitLateBound::No
+ {
+ let kind = arg.descr();
+ assert_eq!(kind, "lifetime");
+ let (provided_arg, param) =
+ force_infer_lt.expect("lifetimes ought to have been inferred");
+ Self::generic_arg_mismatch_err(tcx, provided_arg, param, false, None);
+ }
+
+ break;
+ }
+
+ (None, Some(&param)) => {
+ // If there are fewer arguments than parameters, it means
+ // we're inferring the remaining arguments.
+ substs.push(ctx.inferred_kind(Some(&substs), param, infer_args));
+ params.next();
+ }
+
+ (None, None) => break,
+ }
+ }
+ }
+
+ tcx.intern_substs(&substs)
+ }
+
+ /// Checks that the correct number of generic arguments have been provided.
+ /// Used specifically for function calls.
+ pub fn check_generic_arg_count_for_call(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ def_id: DefId,
+ generics: &ty::Generics,
+ seg: &hir::PathSegment<'_>,
+ is_method_call: IsMethodCall,
+ ) -> GenericArgCountResult {
+ let empty_args = hir::GenericArgs::none();
+ let gen_args = seg.args.unwrap_or(&empty_args);
+ let gen_pos = if is_method_call == IsMethodCall::Yes {
+ GenericArgPosition::MethodCall
+ } else {
+ GenericArgPosition::Value
+ };
+ let has_self = generics.parent.is_none() && generics.has_self;
+
+ Self::check_generic_arg_count(
+ tcx,
+ span,
+ def_id,
+ seg,
+ generics,
+ gen_args,
+ gen_pos,
+ has_self,
+ seg.infer_args,
+ )
+ }
+
+ /// Checks that the correct number of generic arguments have been provided.
+ /// This is used both for datatypes and function calls.
+ #[instrument(skip(tcx, gen_pos), level = "debug")]
+ pub(crate) fn check_generic_arg_count(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ def_id: DefId,
+ seg: &hir::PathSegment<'_>,
+ gen_params: &ty::Generics,
+ gen_args: &hir::GenericArgs<'_>,
+ gen_pos: GenericArgPosition,
+ has_self: bool,
+ infer_args: bool,
+ ) -> GenericArgCountResult {
+ let default_counts = gen_params.own_defaults();
+ let param_counts = gen_params.own_counts();
+
+ // Subtracting from param count to ensure type params synthesized from `impl Trait`
+ // cannot be explicitly specified.
+ let synth_type_param_count = gen_params
+ .params
+ .iter()
+ .filter(|param| {
+ matches!(param.kind, ty::GenericParamDefKind::Type { synthetic: true, .. })
+ })
+ .count();
+ let named_type_param_count =
+ param_counts.types - has_self as usize - synth_type_param_count;
+ let infer_lifetimes =
+ (gen_pos != GenericArgPosition::Type || infer_args) && !gen_args.has_lifetime_params();
+
+ if gen_pos != GenericArgPosition::Type && !gen_args.bindings.is_empty() {
+ Self::prohibit_assoc_ty_binding(tcx, gen_args.bindings[0].span);
+ }
+
+ let explicit_late_bound =
+ Self::prohibit_explicit_late_bound_lifetimes(tcx, gen_params, gen_args, gen_pos);
+
+ let mut invalid_args = vec![];
+
+ let mut check_lifetime_args =
+ |min_expected_args: usize,
+ max_expected_args: usize,
+ provided_args: usize,
+ late_bounds_ignore: bool| {
+ if (min_expected_args..=max_expected_args).contains(&provided_args) {
+ return Ok(());
+ }
+
+ if late_bounds_ignore {
+ return Ok(());
+ }
+
+ if provided_args > max_expected_args {
+ invalid_args.extend(
+ gen_args.args[max_expected_args..provided_args]
+ .iter()
+ .map(|arg| arg.span()),
+ );
+ };
+
+ let gen_args_info = if provided_args > min_expected_args {
+ invalid_args.extend(
+ gen_args.args[min_expected_args..provided_args]
+ .iter()
+ .map(|arg| arg.span()),
+ );
+ let num_redundant_args = provided_args - min_expected_args;
+ GenericArgsInfo::ExcessLifetimes { num_redundant_args }
+ } else {
+ let num_missing_args = min_expected_args - provided_args;
+ GenericArgsInfo::MissingLifetimes { num_missing_args }
+ };
+
+ let reported = WrongNumberOfGenericArgs::new(
+ tcx,
+ gen_args_info,
+ seg,
+ gen_params,
+ has_self as usize,
+ gen_args,
+ def_id,
+ )
+ .diagnostic()
+ .emit();
+
+ Err(reported)
+ };
+
+ let min_expected_lifetime_args = if infer_lifetimes { 0 } else { param_counts.lifetimes };
+ let max_expected_lifetime_args = param_counts.lifetimes;
+ let num_provided_lifetime_args = gen_args.num_lifetime_params();
+
+ let lifetimes_correct = check_lifetime_args(
+ min_expected_lifetime_args,
+ max_expected_lifetime_args,
+ num_provided_lifetime_args,
+ explicit_late_bound == ExplicitLateBound::Yes,
+ );
+
+ let mut check_types_and_consts = |expected_min,
+ expected_max,
+ expected_max_with_synth,
+ provided,
+ params_offset,
+ args_offset| {
+ debug!(
+ ?expected_min,
+ ?expected_max,
+ ?provided,
+ ?params_offset,
+ ?args_offset,
+ "check_types_and_consts"
+ );
+ if (expected_min..=expected_max).contains(&provided) {
+ return Ok(());
+ }
+
+ let num_default_params = expected_max - expected_min;
+
+ let gen_args_info = if provided > expected_max {
+ invalid_args.extend(
+ gen_args.args[args_offset + expected_max..args_offset + provided]
+ .iter()
+ .map(|arg| arg.span()),
+ );
+ let num_redundant_args = provided - expected_max;
+
+ // Provide extra note if synthetic arguments like `impl Trait` are specified.
+ let synth_provided = provided <= expected_max_with_synth;
+
+ GenericArgsInfo::ExcessTypesOrConsts {
+ num_redundant_args,
+ num_default_params,
+ args_offset,
+ synth_provided,
+ }
+ } else {
+ let num_missing_args = expected_max - provided;
+
+ GenericArgsInfo::MissingTypesOrConsts {
+ num_missing_args,
+ num_default_params,
+ args_offset,
+ }
+ };
+
+ debug!(?gen_args_info);
+
+ let reported = WrongNumberOfGenericArgs::new(
+ tcx,
+ gen_args_info,
+ seg,
+ gen_params,
+ params_offset,
+ gen_args,
+ def_id,
+ )
+ .diagnostic()
+ .emit_unless(gen_args.has_err());
+
+ Err(reported)
+ };
+
+ let args_correct = {
+ let expected_min = if infer_args {
+ 0
+ } else {
+ param_counts.consts + named_type_param_count
+ - default_counts.types
+ - default_counts.consts
+ };
+ debug!(?expected_min);
+ debug!(arg_counts.lifetimes=?gen_args.num_lifetime_params());
+
+ check_types_and_consts(
+ expected_min,
+ param_counts.consts + named_type_param_count,
+ param_counts.consts + named_type_param_count + synth_type_param_count,
+ gen_args.num_generic_params(),
+ param_counts.lifetimes + has_self as usize,
+ gen_args.num_lifetime_params(),
+ )
+ };
+
+ GenericArgCountResult {
+ explicit_late_bound,
+ correct: lifetimes_correct.and(args_correct).map_err(|reported| {
+ GenericArgCountMismatch { reported: Some(reported), invalid_args }
+ }),
+ }
+ }
+
+ /// Emits an error regarding forbidden type binding associations
+ pub fn prohibit_assoc_ty_binding(tcx: TyCtxt<'_>, span: Span) {
+ tcx.sess.emit_err(AssocTypeBindingNotAllowed { span });
+ }
+
+ /// Prohibits explicit lifetime arguments if late-bound lifetime parameters
+ /// are present. This is used both for datatypes and function calls.
+ pub(crate) fn prohibit_explicit_late_bound_lifetimes(
+ tcx: TyCtxt<'_>,
+ def: &ty::Generics,
+ args: &hir::GenericArgs<'_>,
+ position: GenericArgPosition,
+ ) -> ExplicitLateBound {
+ let param_counts = def.own_counts();
+ let infer_lifetimes = position != GenericArgPosition::Type && !args.has_lifetime_params();
+
+ if infer_lifetimes {
+ return ExplicitLateBound::No;
+ }
+
+ if let Some(span_late) = def.has_late_bound_regions {
+ let msg = "cannot specify lifetime arguments explicitly \
+ if late bound lifetime parameters are present";
+ let note = "the late bound lifetime parameter is introduced here";
+ let span = args.args[0].span();
+
+ if position == GenericArgPosition::Value
+ && args.num_lifetime_params() != param_counts.lifetimes
+ {
+ let mut err = tcx.sess.struct_span_err(span, msg);
+ err.span_note(span_late, note);
+ err.emit();
+ } else {
+ let mut multispan = MultiSpan::from_span(span);
+ multispan.push_span_label(span_late, note);
+ tcx.struct_span_lint_hir(
+ LATE_BOUND_LIFETIME_ARGUMENTS,
+ args.args[0].id(),
+ multispan,
+ |lint| {
+ lint.build(msg).emit();
+ },
+ );
+ }
+
+ ExplicitLateBound::Yes
+ } else {
+ ExplicitLateBound::No
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/astconv/mod.rs b/compiler/rustc_typeck/src/astconv/mod.rs
new file mode 100644
index 000000000..8a5c7fee6
--- /dev/null
+++ b/compiler/rustc_typeck/src/astconv/mod.rs
@@ -0,0 +1,3091 @@
+//! Conversion from AST representation of types to the `ty.rs` representation.
+//! The main routine here is `ast_ty_to_ty()`; each use is parameterized by an
+//! instance of `AstConv`.
+
+mod errors;
+mod generics;
+
+use crate::bounds::Bounds;
+use crate::collect::HirPlaceholderCollector;
+use crate::errors::{
+ AmbiguousLifetimeBound, MultipleRelaxedDefaultBounds, TraitObjectDeclaredWithNoTraits,
+ TypeofReservedKeywordUsed, ValueOfAssociatedStructAlreadySpecified,
+};
+use crate::middle::resolve_lifetime as rl;
+use crate::require_c_abi_if_c_variadic;
+use rustc_ast::TraitObjectSyntax;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{
+ struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed, FatalError, MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Namespace, Res};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::{walk_generics, Visitor as _};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{GenericArg, GenericArgs, OpaqueTyOrigin};
+use rustc_middle::middle::stability::AllowUnstable;
+use rustc_middle::ty::subst::{self, GenericArgKind, InternalSubsts, Subst, SubstsRef};
+use rustc_middle::ty::GenericParamDefKind;
+use rustc_middle::ty::{
+ self, Const, DefIdTree, EarlyBinder, IsSuggestable, Ty, TyCtxt, TypeVisitable,
+};
+use rustc_session::lint::builtin::{AMBIGUOUS_ASSOCIATED_ITEMS, BARE_TRAIT_OBJECTS};
+use rustc_span::edition::Edition;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::spec::abi;
+use rustc_trait_selection::traits;
+use rustc_trait_selection::traits::astconv_object_safety_violations;
+use rustc_trait_selection::traits::error_reporting::{
+ report_object_safety_error, suggestions::NextTypeParamName,
+};
+use rustc_trait_selection::traits::wf::object_region_bounds;
+
+use smallvec::SmallVec;
+use std::collections::BTreeSet;
+use std::slice;
+
+#[derive(Debug)]
+pub struct PathSeg(pub DefId, pub usize);
+
+pub trait AstConv<'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn item_def_id(&self) -> Option<DefId>;
+
+ /// Returns predicates in scope of the form `X: Foo<T>`, where `X`
+ /// is a type parameter `X` with the given id `def_id` and T
+ /// matches `assoc_name`. This is a subset of the full set of
+ /// predicates.
+ ///
+ /// This is used for one specific purpose: resolving "short-hand"
+ /// associated type references like `T::Item`. In principle, we
+ /// would do that by first getting the full set of predicates in
+ /// scope and then filtering down to find those that apply to `T`,
+ /// but this can lead to cycle errors. The problem is that we have
+ /// to do this resolution *in order to create the predicates in
+ /// the first place*. Hence, we have this "special pass".
+ fn get_type_parameter_bounds(
+ &self,
+ span: Span,
+ def_id: DefId,
+ assoc_name: Ident,
+ ) -> ty::GenericPredicates<'tcx>;
+
+ /// Returns the lifetime to use when a lifetime is omitted (and not elided).
+ fn re_infer(&self, param: Option<&ty::GenericParamDef>, span: Span)
+ -> Option<ty::Region<'tcx>>;
+
+ /// Returns the type to use when a type is omitted.
+ fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx>;
+
+ /// Returns `true` if `_` is allowed in type signatures in the current context.
+ fn allow_ty_infer(&self) -> bool;
+
+ /// Returns the const to use when a const is omitted.
+ fn ct_infer(
+ &self,
+ ty: Ty<'tcx>,
+ param: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> Const<'tcx>;
+
+ /// Projecting an associated type from a (potentially)
+ /// higher-ranked trait reference is more complicated, because of
+ /// the possibility of late-bound regions appearing in the
+ /// associated type binding. This is not legal in function
+ /// signatures for that reason. In a function body, we can always
+ /// handle it because we can use inference variables to remove the
+ /// late-bound regions.
+ fn projected_ty_from_poly_trait_ref(
+ &self,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Ty<'tcx>;
+
+ /// Normalize an associated type coming from the user.
+ fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx>;
+
+ /// Invoked when we encounter an error from some prior pass
+ /// (e.g., resolve) that is translated into a ty-error. This is
+ /// used to help suppress derived errors typeck might otherwise
+ /// report.
+ fn set_tainted_by_errors(&self);
+
+ fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, span: Span);
+}
+
+#[derive(Debug)]
+struct ConvertedBinding<'a, 'tcx> {
+ hir_id: hir::HirId,
+ item_name: Ident,
+ kind: ConvertedBindingKind<'a, 'tcx>,
+ gen_args: &'a GenericArgs<'a>,
+ span: Span,
+}
+
+#[derive(Debug)]
+enum ConvertedBindingKind<'a, 'tcx> {
+ Equality(ty::Term<'tcx>),
+ Constraint(&'a [hir::GenericBound<'a>]),
+}
+
+/// New-typed boolean indicating whether explicit late-bound lifetimes
+/// are present in a set of generic arguments.
+///
+/// For example if we have some method `fn f<'a>(&'a self)` implemented
+/// for some type `T`, although `f` is generic in the lifetime `'a`, `'a`
+/// is late-bound so should not be provided explicitly. Thus, if `f` is
+/// instantiated with some generic arguments providing `'a` explicitly,
+/// we taint those arguments with `ExplicitLateBound::Yes` so that we
+/// can provide an appropriate diagnostic later.
+#[derive(Copy, Clone, PartialEq)]
+pub enum ExplicitLateBound {
+ Yes,
+ No,
+}
+
+#[derive(Copy, Clone, PartialEq)]
+pub enum IsMethodCall {
+ Yes,
+ No,
+}
+
+/// Denotes the "position" of a generic argument, indicating if it is a generic type,
+/// generic function or generic method call.
+#[derive(Copy, Clone, PartialEq)]
+pub(crate) enum GenericArgPosition {
+ Type,
+ Value, // e.g., functions
+ MethodCall,
+}
+
+/// A marker denoting that the generic arguments that were
+/// provided did not match the respective generic parameters.
+#[derive(Clone, Default)]
+pub struct GenericArgCountMismatch {
+ /// Indicates whether a fatal error was reported (`Some`), or just a lint (`None`).
+ pub reported: Option<ErrorGuaranteed>,
+ /// A list of spans of arguments provided that were not valid.
+ pub invalid_args: Vec<Span>,
+}
+
+/// Decorates the result of a generic argument count mismatch
+/// check with whether explicit late bounds were provided.
+#[derive(Clone)]
+pub struct GenericArgCountResult {
+ pub explicit_late_bound: ExplicitLateBound,
+ pub correct: Result<(), GenericArgCountMismatch>,
+}
+
+pub trait CreateSubstsForGenericArgsCtxt<'a, 'tcx> {
+ fn args_for_def_id(&mut self, def_id: DefId) -> (Option<&'a GenericArgs<'a>>, bool);
+
+ fn provided_kind(
+ &mut self,
+ param: &ty::GenericParamDef,
+ arg: &GenericArg<'_>,
+ ) -> subst::GenericArg<'tcx>;
+
+ fn inferred_kind(
+ &mut self,
+ substs: Option<&[subst::GenericArg<'tcx>]>,
+ param: &ty::GenericParamDef,
+ infer_args: bool,
+ ) -> subst::GenericArg<'tcx>;
+}
+
+impl<'o, 'tcx> dyn AstConv<'tcx> + 'o {
+ #[tracing::instrument(level = "debug", skip(self))]
+ pub fn ast_region_to_region(
+ &self,
+ lifetime: &hir::Lifetime,
+ def: Option<&ty::GenericParamDef>,
+ ) -> ty::Region<'tcx> {
+ let tcx = self.tcx();
+ let lifetime_name = |def_id| tcx.hir().name(tcx.hir().local_def_id_to_hir_id(def_id));
+
+ let r = match tcx.named_region(lifetime.hir_id) {
+ Some(rl::Region::Static) => tcx.lifetimes.re_static,
+
+ Some(rl::Region::LateBound(debruijn, index, def_id)) => {
+ let name = lifetime_name(def_id.expect_local());
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(index),
+ kind: ty::BrNamed(def_id, name),
+ };
+ tcx.mk_region(ty::ReLateBound(debruijn, br))
+ }
+
+ Some(rl::Region::EarlyBound(index, id)) => {
+ let name = lifetime_name(id.expect_local());
+ tcx.mk_region(ty::ReEarlyBound(ty::EarlyBoundRegion { def_id: id, index, name }))
+ }
+
+ Some(rl::Region::Free(scope, id)) => {
+ let name = lifetime_name(id.expect_local());
+ tcx.mk_region(ty::ReFree(ty::FreeRegion {
+ scope,
+ bound_region: ty::BrNamed(id, name),
+ }))
+
+ // (*) -- not late-bound, won't change
+ }
+
+ None => {
+ self.re_infer(def, lifetime.span).unwrap_or_else(|| {
+ debug!(?lifetime, "unelided lifetime in signature");
+
+ // This indicates an illegal lifetime
+ // elision. `resolve_lifetime` should have
+ // reported an error in this case -- but if
+ // not, let's error out.
+ tcx.sess.delay_span_bug(lifetime.span, "unelided lifetime in signature");
+
+ // Supply some dummy value. We don't have an
+ // `re_error`, annoyingly, so use `'static`.
+ tcx.lifetimes.re_static
+ })
+ }
+ };
+
+ debug!("ast_region_to_region(lifetime={:?}) yields {:?}", lifetime, r);
+
+ r
+ }
+
+ /// Given a path `path` that refers to an item `I` with the declared generics `decl_generics`,
+ /// returns an appropriate set of substitutions for this particular reference to `I`.
+ pub fn ast_path_substs_for_ty(
+ &self,
+ span: Span,
+ def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ ) -> SubstsRef<'tcx> {
+ let (substs, _) = self.create_substs_for_ast_path(
+ span,
+ def_id,
+ &[],
+ item_segment,
+ item_segment.args(),
+ item_segment.infer_args,
+ None,
+ );
+ let assoc_bindings = self.create_assoc_bindings_for_generic_args(item_segment.args());
+
+ if let Some(b) = assoc_bindings.first() {
+ Self::prohibit_assoc_ty_binding(self.tcx(), b.span);
+ }
+
+ substs
+ }
+
+ /// Given the type/lifetime/const arguments provided to some path (along with
+ /// an implicit `Self`, if this is a trait reference), returns the complete
+ /// set of substitutions. This may involve applying defaulted type parameters.
+ /// Constraints on associated types are created from `create_assoc_bindings_for_generic_args`.
+ ///
+ /// Example:
+ ///
+ /// ```ignore (illustrative)
+ /// T: std::ops::Index<usize, Output = u32>
+ /// // ^1 ^^^^^^^^^^^^^^2 ^^^^3 ^^^^^^^^^^^4
+ /// ```
+ ///
+ /// 1. The `self_ty` here would refer to the type `T`.
+ /// 2. The path in question is the path to the trait `std::ops::Index`,
+ /// which will have been resolved to a `def_id`
+ /// 3. The `generic_args` contains info on the `<...>` contents. The `usize` type
+ /// parameters are returned in the `SubstsRef`, the associated type bindings like
+ /// `Output = u32` are returned from `create_assoc_bindings_for_generic_args`.
+ ///
+ /// Note that the type listing given here is *exactly* what the user provided.
+ ///
+ /// For (generic) associated types
+ ///
+ /// ```ignore (illustrative)
+ /// <Vec<u8> as Iterable<u8>>::Iter::<'a>
+ /// ```
+ ///
+ /// We have the parent substs are the substs for the parent trait:
+ /// `[Vec<u8>, u8]` and `generic_args` are the arguments for the associated
+ /// type itself: `['a]`. The returned `SubstsRef` concatenates these two
+ /// lists: `[Vec<u8>, u8, 'a]`.
+ #[tracing::instrument(level = "debug", skip(self, span))]
+ fn create_substs_for_ast_path<'a>(
+ &self,
+ span: Span,
+ def_id: DefId,
+ parent_substs: &[subst::GenericArg<'tcx>],
+ seg: &hir::PathSegment<'_>,
+ generic_args: &'a hir::GenericArgs<'_>,
+ infer_args: bool,
+ self_ty: Option<Ty<'tcx>>,
+ ) -> (SubstsRef<'tcx>, GenericArgCountResult) {
+ // If the type is parameterized by this region, then replace this
+ // region with the current anon region binding (in other words,
+ // whatever & would get replaced with).
+
+ let tcx = self.tcx();
+ let generics = tcx.generics_of(def_id);
+ debug!("generics: {:?}", generics);
+
+ if generics.has_self {
+ if generics.parent.is_some() {
+ // The parent is a trait so it should have at least one subst
+ // for the `Self` type.
+ assert!(!parent_substs.is_empty())
+ } else {
+ // This item (presumably a trait) needs a self-type.
+ assert!(self_ty.is_some());
+ }
+ } else {
+ assert!(self_ty.is_none() && parent_substs.is_empty());
+ }
+
+ let arg_count = Self::check_generic_arg_count(
+ tcx,
+ span,
+ def_id,
+ seg,
+ generics,
+ generic_args,
+ GenericArgPosition::Type,
+ self_ty.is_some(),
+ infer_args,
+ );
+
+ // Skip processing if type has no generic parameters.
+ // Traits always have `Self` as a generic parameter, which means they will not return early
+ // here and so associated type bindings will be handled regardless of whether there are any
+ // non-`Self` generic parameters.
+ if generics.params.is_empty() {
+ return (tcx.intern_substs(&[]), arg_count);
+ }
+
+ let is_object = self_ty.map_or(false, |ty| ty == self.tcx().types.trait_object_dummy_self);
+
+ struct SubstsForAstPathCtxt<'a, 'tcx> {
+ astconv: &'a (dyn AstConv<'tcx> + 'a),
+ def_id: DefId,
+ generic_args: &'a GenericArgs<'a>,
+ span: Span,
+ missing_type_params: Vec<Symbol>,
+ inferred_params: Vec<Span>,
+ infer_args: bool,
+ is_object: bool,
+ }
+
+ impl<'tcx, 'a> SubstsForAstPathCtxt<'tcx, 'a> {
+ fn default_needs_object_self(&mut self, param: &ty::GenericParamDef) -> bool {
+ let tcx = self.astconv.tcx();
+ if let GenericParamDefKind::Type { has_default, .. } = param.kind {
+ if self.is_object && has_default {
+ let default_ty = tcx.at(self.span).type_of(param.def_id);
+ let self_param = tcx.types.self_param;
+ if default_ty.walk().any(|arg| arg == self_param.into()) {
+ // There is no suitable inference default for a type parameter
+ // that references self, in an object type.
+ return true;
+ }
+ }
+ }
+
+ false
+ }
+ }
+
+ impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for SubstsForAstPathCtxt<'a, 'tcx> {
+ fn args_for_def_id(&mut self, did: DefId) -> (Option<&'a GenericArgs<'a>>, bool) {
+ if did == self.def_id {
+ (Some(self.generic_args), self.infer_args)
+ } else {
+ // The last component of this tuple is unimportant.
+ (None, false)
+ }
+ }
+
+ fn provided_kind(
+ &mut self,
+ param: &ty::GenericParamDef,
+ arg: &GenericArg<'_>,
+ ) -> subst::GenericArg<'tcx> {
+ let tcx = self.astconv.tcx();
+
+ let mut handle_ty_args = |has_default, ty: &hir::Ty<'_>| {
+ if has_default {
+ tcx.check_optional_stability(
+ param.def_id,
+ Some(arg.id()),
+ arg.span(),
+ None,
+ AllowUnstable::No,
+ |_, _| {
+ // Default generic parameters may not be marked
+ // with stability attributes, i.e. when the
+ // default parameter was defined at the same time
+ // as the rest of the type. As such, we ignore missing
+ // stability attributes.
+ },
+ );
+ }
+ if let (hir::TyKind::Infer, false) = (&ty.kind, self.astconv.allow_ty_infer()) {
+ self.inferred_params.push(ty.span);
+ tcx.ty_error().into()
+ } else {
+ self.astconv.ast_ty_to_ty(ty).into()
+ }
+ };
+
+ match (&param.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ self.astconv.ast_region_to_region(lt, Some(param)).into()
+ }
+ (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Type(ty)) => {
+ handle_ty_args(has_default, ty)
+ }
+ (&GenericParamDefKind::Type { has_default, .. }, GenericArg::Infer(inf)) => {
+ handle_ty_args(has_default, &inf.to_ty())
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => {
+ ty::Const::from_opt_const_arg_anon_const(
+ tcx,
+ ty::WithOptConstParam {
+ did: tcx.hir().local_def_id(ct.value.hir_id),
+ const_param_did: Some(param.def_id),
+ },
+ )
+ .into()
+ }
+ (&GenericParamDefKind::Const { .. }, hir::GenericArg::Infer(inf)) => {
+ let ty = tcx.at(self.span).type_of(param.def_id);
+ if self.astconv.allow_ty_infer() {
+ self.astconv.ct_infer(ty, Some(param), inf.span).into()
+ } else {
+ self.inferred_params.push(inf.span);
+ tcx.const_error(ty).into()
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn inferred_kind(
+ &mut self,
+ substs: Option<&[subst::GenericArg<'tcx>]>,
+ param: &ty::GenericParamDef,
+ infer_args: bool,
+ ) -> subst::GenericArg<'tcx> {
+ let tcx = self.astconv.tcx();
+ match param.kind {
+ GenericParamDefKind::Lifetime => self
+ .astconv
+ .re_infer(Some(param), self.span)
+ .unwrap_or_else(|| {
+ debug!(?param, "unelided lifetime in signature");
+
+ // This indicates an illegal lifetime in a non-assoc-trait position
+ tcx.sess.delay_span_bug(self.span, "unelided lifetime in signature");
+
+ // Supply some dummy value. We don't have an
+ // `re_error`, annoyingly, so use `'static`.
+ tcx.lifetimes.re_static
+ })
+ .into(),
+ GenericParamDefKind::Type { has_default, .. } => {
+ if !infer_args && has_default {
+ // No type parameter provided, but a default exists.
+
+ // If we are converting an object type, then the
+ // `Self` parameter is unknown. However, some of the
+ // other type parameters may reference `Self` in their
+ // defaults. This will lead to an ICE if we are not
+ // careful!
+ if self.default_needs_object_self(param) {
+ self.missing_type_params.push(param.name);
+ tcx.ty_error().into()
+ } else {
+ // This is a default type parameter.
+ let substs = substs.unwrap();
+ if substs.iter().any(|arg| match arg.unpack() {
+ GenericArgKind::Type(ty) => ty.references_error(),
+ _ => false,
+ }) {
+ // Avoid ICE #86756 when type error recovery goes awry.
+ return tcx.ty_error().into();
+ }
+ self.astconv
+ .normalize_ty(
+ self.span,
+ EarlyBinder(tcx.at(self.span).type_of(param.def_id))
+ .subst(tcx, substs),
+ )
+ .into()
+ }
+ } else if infer_args {
+ // No type parameters were provided, we can infer all.
+ let param = if !self.default_needs_object_self(param) {
+ Some(param)
+ } else {
+ None
+ };
+ self.astconv.ty_infer(param, self.span).into()
+ } else {
+ // We've already errored above about the mismatch.
+ tcx.ty_error().into()
+ }
+ }
+ GenericParamDefKind::Const { has_default } => {
+ let ty = tcx.at(self.span).type_of(param.def_id);
+ if !infer_args && has_default {
+ tcx.bound_const_param_default(param.def_id)
+ .subst(tcx, substs.unwrap())
+ .into()
+ } else {
+ if infer_args {
+ self.astconv.ct_infer(ty, Some(param), self.span).into()
+ } else {
+ // We've already errored above about the mismatch.
+ tcx.const_error(ty).into()
+ }
+ }
+ }
+ }
+ }
+ }
+
+ let mut substs_ctx = SubstsForAstPathCtxt {
+ astconv: self,
+ def_id,
+ span,
+ generic_args,
+ missing_type_params: vec![],
+ inferred_params: vec![],
+ infer_args,
+ is_object,
+ };
+ let substs = Self::create_substs_for_generic_args(
+ tcx,
+ def_id,
+ parent_substs,
+ self_ty.is_some(),
+ self_ty,
+ &arg_count,
+ &mut substs_ctx,
+ );
+
+ self.complain_about_missing_type_params(
+ substs_ctx.missing_type_params,
+ def_id,
+ span,
+ generic_args.args.is_empty(),
+ );
+
+ debug!(
+ "create_substs_for_ast_path(generic_params={:?}, self_ty={:?}) -> {:?}",
+ generics, self_ty, substs
+ );
+
+ (substs, arg_count)
+ }
+
+ fn create_assoc_bindings_for_generic_args<'a>(
+ &self,
+ generic_args: &'a hir::GenericArgs<'_>,
+ ) -> Vec<ConvertedBinding<'a, 'tcx>> {
+ // Convert associated-type bindings or constraints into a separate vector.
+ // Example: Given this:
+ //
+ // T: Iterator<Item = u32>
+ //
+ // The `T` is passed in as a self-type; the `Item = u32` is
+ // not a "type parameter" of the `Iterator` trait, but rather
+ // a restriction on `<T as Iterator>::Item`, so it is passed
+ // back separately.
+ let assoc_bindings = generic_args
+ .bindings
+ .iter()
+ .map(|binding| {
+ let kind = match binding.kind {
+ hir::TypeBindingKind::Equality { ref term } => match term {
+ hir::Term::Ty(ref ty) => {
+ ConvertedBindingKind::Equality(self.ast_ty_to_ty(ty).into())
+ }
+ hir::Term::Const(ref c) => {
+ let local_did = self.tcx().hir().local_def_id(c.hir_id);
+ let c = Const::from_anon_const(self.tcx(), local_did);
+ ConvertedBindingKind::Equality(c.into())
+ }
+ },
+ hir::TypeBindingKind::Constraint { ref bounds } => {
+ ConvertedBindingKind::Constraint(bounds)
+ }
+ };
+ ConvertedBinding {
+ hir_id: binding.hir_id,
+ item_name: binding.ident,
+ kind,
+ gen_args: binding.gen_args,
+ span: binding.span,
+ }
+ })
+ .collect();
+
+ assoc_bindings
+ }
+
+ pub(crate) fn create_substs_for_associated_item(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ parent_substs: SubstsRef<'tcx>,
+ ) -> SubstsRef<'tcx> {
+ debug!(
+ "create_substs_for_associated_item(span: {:?}, item_def_id: {:?}, item_segment: {:?}",
+ span, item_def_id, item_segment
+ );
+ if tcx.generics_of(item_def_id).params.is_empty() {
+ self.prohibit_generics(slice::from_ref(item_segment).iter(), |_| {});
+
+ parent_substs
+ } else {
+ self.create_substs_for_ast_path(
+ span,
+ item_def_id,
+ parent_substs,
+ item_segment,
+ item_segment.args(),
+ item_segment.infer_args,
+ None,
+ )
+ .0
+ }
+ }
+
+ /// Instantiates the path for the given trait reference, assuming that it's
+ /// bound to a valid trait type. Returns the `DefId` of the defining trait.
+ /// The type _cannot_ be a type other than a trait type.
+ ///
+ /// If the `projections` argument is `None`, then assoc type bindings like `Foo<T = X>`
+ /// are disallowed. Otherwise, they are pushed onto the vector given.
+ pub fn instantiate_mono_trait_ref(
+ &self,
+ trait_ref: &hir::TraitRef<'_>,
+ self_ty: Ty<'tcx>,
+ ) -> ty::TraitRef<'tcx> {
+ self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {});
+
+ self.ast_path_to_mono_trait_ref(
+ trait_ref.path.span,
+ trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise()),
+ self_ty,
+ trait_ref.path.segments.last().unwrap(),
+ true,
+ )
+ }
+
+ fn instantiate_poly_trait_ref_inner(
+ &self,
+ hir_id: hir::HirId,
+ span: Span,
+ binding_span: Option<Span>,
+ constness: ty::BoundConstness,
+ bounds: &mut Bounds<'tcx>,
+ speculative: bool,
+ trait_ref_span: Span,
+ trait_def_id: DefId,
+ trait_segment: &hir::PathSegment<'_>,
+ args: &GenericArgs<'_>,
+ infer_args: bool,
+ self_ty: Ty<'tcx>,
+ ) -> GenericArgCountResult {
+ let (substs, arg_count) = self.create_substs_for_ast_path(
+ trait_ref_span,
+ trait_def_id,
+ &[],
+ trait_segment,
+ args,
+ infer_args,
+ Some(self_ty),
+ );
+
+ let tcx = self.tcx();
+ let bound_vars = tcx.late_bound_vars(hir_id);
+ debug!(?bound_vars);
+
+ let assoc_bindings = self.create_assoc_bindings_for_generic_args(args);
+
+ let poly_trait_ref =
+ ty::Binder::bind_with_vars(ty::TraitRef::new(trait_def_id, substs), bound_vars);
+
+ debug!(?poly_trait_ref, ?assoc_bindings);
+ bounds.trait_bounds.push((poly_trait_ref, span, constness));
+
+ let mut dup_bindings = FxHashMap::default();
+ for binding in &assoc_bindings {
+ // Specify type to assert that error was already reported in `Err` case.
+ let _: Result<_, ErrorGuaranteed> = self.add_predicates_for_ast_type_binding(
+ hir_id,
+ poly_trait_ref,
+ binding,
+ bounds,
+ speculative,
+ &mut dup_bindings,
+ binding_span.unwrap_or(binding.span),
+ );
+ // Okay to ignore `Err` because of `ErrorGuaranteed` (see above).
+ }
+
+ arg_count
+ }
+
+ /// Given a trait bound like `Debug`, applies that trait bound the given self-type to construct
+ /// a full trait reference. The resulting trait reference is returned. This may also generate
+ /// auxiliary bounds, which are added to `bounds`.
+ ///
+ /// Example:
+ ///
+ /// ```ignore (illustrative)
+ /// poly_trait_ref = Iterator<Item = u32>
+ /// self_ty = Foo
+ /// ```
+ ///
+ /// this would return `Foo: Iterator` and add `<Foo as Iterator>::Item = u32` into `bounds`.
+ ///
+ /// **A note on binders:** against our usual convention, there is an implied bounder around
+ /// the `self_ty` and `poly_trait_ref` parameters here. So they may reference bound regions.
+ /// If for example you had `for<'a> Foo<'a>: Bar<'a>`, then the `self_ty` would be `Foo<'a>`
+ /// where `'a` is a bound region at depth 0. Similarly, the `poly_trait_ref` would be
+ /// `Bar<'a>`. The returned poly-trait-ref will have this binder instantiated explicitly,
+ /// however.
+ #[tracing::instrument(level = "debug", skip(self, span, constness, bounds, speculative))]
+ pub(crate) fn instantiate_poly_trait_ref(
+ &self,
+ trait_ref: &hir::TraitRef<'_>,
+ span: Span,
+ constness: ty::BoundConstness,
+ self_ty: Ty<'tcx>,
+ bounds: &mut Bounds<'tcx>,
+ speculative: bool,
+ ) -> GenericArgCountResult {
+ let hir_id = trait_ref.hir_ref_id;
+ let binding_span = None;
+ let trait_ref_span = trait_ref.path.span;
+ let trait_def_id = trait_ref.trait_def_id().unwrap_or_else(|| FatalError.raise());
+ let trait_segment = trait_ref.path.segments.last().unwrap();
+ let args = trait_segment.args();
+ let infer_args = trait_segment.infer_args;
+
+ self.prohibit_generics(trait_ref.path.segments.split_last().unwrap().1.iter(), |_| {});
+ self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, false);
+
+ self.instantiate_poly_trait_ref_inner(
+ hir_id,
+ span,
+ binding_span,
+ constness,
+ bounds,
+ speculative,
+ trait_ref_span,
+ trait_def_id,
+ trait_segment,
+ args,
+ infer_args,
+ self_ty,
+ )
+ }
+
+ pub(crate) fn instantiate_lang_item_trait_ref(
+ &self,
+ lang_item: hir::LangItem,
+ span: Span,
+ hir_id: hir::HirId,
+ args: &GenericArgs<'_>,
+ self_ty: Ty<'tcx>,
+ bounds: &mut Bounds<'tcx>,
+ ) {
+ let binding_span = Some(span);
+ let constness = ty::BoundConstness::NotConst;
+ let speculative = false;
+ let trait_ref_span = span;
+ let trait_def_id = self.tcx().require_lang_item(lang_item, Some(span));
+ let trait_segment = &hir::PathSegment::invalid();
+ let infer_args = false;
+
+ self.instantiate_poly_trait_ref_inner(
+ hir_id,
+ span,
+ binding_span,
+ constness,
+ bounds,
+ speculative,
+ trait_ref_span,
+ trait_def_id,
+ trait_segment,
+ args,
+ infer_args,
+ self_ty,
+ );
+ }
+
+ fn ast_path_to_mono_trait_ref(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ trait_segment: &hir::PathSegment<'_>,
+ is_impl: bool,
+ ) -> ty::TraitRef<'tcx> {
+ let (substs, _) = self.create_substs_for_ast_trait_ref(
+ span,
+ trait_def_id,
+ self_ty,
+ trait_segment,
+ is_impl,
+ );
+ let assoc_bindings = self.create_assoc_bindings_for_generic_args(trait_segment.args());
+ if let Some(b) = assoc_bindings.first() {
+ Self::prohibit_assoc_ty_binding(self.tcx(), b.span);
+ }
+ ty::TraitRef::new(trait_def_id, substs)
+ }
+
+ #[tracing::instrument(level = "debug", skip(self, span))]
+ fn create_substs_for_ast_trait_ref<'a>(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ trait_segment: &'a hir::PathSegment<'a>,
+ is_impl: bool,
+ ) -> (SubstsRef<'tcx>, GenericArgCountResult) {
+ self.complain_about_internal_fn_trait(span, trait_def_id, trait_segment, is_impl);
+
+ self.create_substs_for_ast_path(
+ span,
+ trait_def_id,
+ &[],
+ trait_segment,
+ trait_segment.args(),
+ trait_segment.infer_args,
+ Some(self_ty),
+ )
+ }
+
+ fn trait_defines_associated_type_named(&self, trait_def_id: DefId, assoc_name: Ident) -> bool {
+ self.tcx()
+ .associated_items(trait_def_id)
+ .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, trait_def_id)
+ .is_some()
+ }
+ fn trait_defines_associated_const_named(&self, trait_def_id: DefId, assoc_name: Ident) -> bool {
+ self.tcx()
+ .associated_items(trait_def_id)
+ .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Const, trait_def_id)
+ .is_some()
+ }
+
+ // Sets `implicitly_sized` to true on `Bounds` if necessary
+ pub(crate) fn add_implicitly_sized<'hir>(
+ &self,
+ bounds: &mut Bounds<'hir>,
+ ast_bounds: &'hir [hir::GenericBound<'hir>],
+ self_ty_where_predicates: Option<(hir::HirId, &'hir [hir::WherePredicate<'hir>])>,
+ span: Span,
+ ) {
+ let tcx = self.tcx();
+
+ // Try to find an unbound in bounds.
+ let mut unbound = None;
+ let mut search_bounds = |ast_bounds: &'hir [hir::GenericBound<'hir>]| {
+ for ab in ast_bounds {
+ if let hir::GenericBound::Trait(ptr, hir::TraitBoundModifier::Maybe) = ab {
+ if unbound.is_none() {
+ unbound = Some(&ptr.trait_ref);
+ } else {
+ tcx.sess.emit_err(MultipleRelaxedDefaultBounds { span });
+ }
+ }
+ }
+ };
+ search_bounds(ast_bounds);
+ if let Some((self_ty, where_clause)) = self_ty_where_predicates {
+ let self_ty_def_id = tcx.hir().local_def_id(self_ty).to_def_id();
+ for clause in where_clause {
+ if let hir::WherePredicate::BoundPredicate(pred) = clause {
+ if pred.is_param_bound(self_ty_def_id) {
+ search_bounds(pred.bounds);
+ }
+ }
+ }
+ }
+
+ let sized_def_id = tcx.lang_items().require(LangItem::Sized);
+ match (&sized_def_id, unbound) {
+ (Ok(sized_def_id), Some(tpb))
+ if tpb.path.res == Res::Def(DefKind::Trait, *sized_def_id) =>
+ {
+ // There was in fact a `?Sized` bound, return without doing anything
+ return;
+ }
+ (_, Some(_)) => {
+ // There was a `?Trait` bound, but it was not `?Sized`; warn.
+ tcx.sess.span_warn(
+ span,
+ "default bound relaxed for a type parameter, but \
+ this does nothing because the given bound is not \
+ a default; only `?Sized` is supported",
+ );
+ // Otherwise, add implicitly sized if `Sized` is available.
+ }
+ _ => {
+ // There was no `?Sized` bound; add implicitly sized if `Sized` is available.
+ }
+ }
+ if sized_def_id.is_err() {
+ // No lang item for `Sized`, so we can't add it as a bound.
+ return;
+ }
+ bounds.implicitly_sized = Some(span);
+ }
+
+ /// This helper takes a *converted* parameter type (`param_ty`)
+ /// and an *unconverted* list of bounds:
+ ///
+ /// ```text
+ /// fn foo<T: Debug>
+ /// ^ ^^^^^ `ast_bounds` parameter, in HIR form
+ /// |
+ /// `param_ty`, in ty form
+ /// ```
+ ///
+ /// It adds these `ast_bounds` into the `bounds` structure.
+ ///
+ /// **A note on binders:** there is an implied binder around
+ /// `param_ty` and `ast_bounds`. See `instantiate_poly_trait_ref`
+ /// for more details.
+ #[tracing::instrument(level = "debug", skip(self, ast_bounds, bounds))]
+ pub(crate) fn add_bounds<'hir, I: Iterator<Item = &'hir hir::GenericBound<'hir>>>(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: I,
+ bounds: &mut Bounds<'tcx>,
+ bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
+ ) {
+ for ast_bound in ast_bounds {
+ match ast_bound {
+ hir::GenericBound::Trait(poly_trait_ref, modifier) => {
+ let constness = match modifier {
+ hir::TraitBoundModifier::MaybeConst => ty::BoundConstness::ConstIfConst,
+ hir::TraitBoundModifier::None => ty::BoundConstness::NotConst,
+ hir::TraitBoundModifier::Maybe => continue,
+ };
+
+ let _ = self.instantiate_poly_trait_ref(
+ &poly_trait_ref.trait_ref,
+ poly_trait_ref.span,
+ constness,
+ param_ty,
+ bounds,
+ false,
+ );
+ }
+ &hir::GenericBound::LangItemTrait(lang_item, span, hir_id, args) => {
+ self.instantiate_lang_item_trait_ref(
+ lang_item, span, hir_id, args, param_ty, bounds,
+ );
+ }
+ hir::GenericBound::Outlives(lifetime) => {
+ let region = self.ast_region_to_region(lifetime, None);
+ bounds
+ .region_bounds
+ .push((ty::Binder::bind_with_vars(region, bound_vars), lifetime.span));
+ }
+ }
+ }
+ }
+
+ /// Translates a list of bounds from the HIR into the `Bounds` data structure.
+ /// The self-type for the bounds is given by `param_ty`.
+ ///
+ /// Example:
+ ///
+ /// ```ignore (illustrative)
+ /// fn foo<T: Bar + Baz>() { }
+ /// // ^ ^^^^^^^^^ ast_bounds
+ /// // param_ty
+ /// ```
+ ///
+ /// The `sized_by_default` parameter indicates if, in this context, the `param_ty` should be
+ /// considered `Sized` unless there is an explicit `?Sized` bound. This would be true in the
+ /// example above, but is not true in supertrait listings like `trait Foo: Bar + Baz`.
+ ///
+ /// `span` should be the declaration size of the parameter.
+ pub(crate) fn compute_bounds(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: &[hir::GenericBound<'_>],
+ ) -> Bounds<'tcx> {
+ self.compute_bounds_inner(param_ty, ast_bounds)
+ }
+
+ /// Convert the bounds in `ast_bounds` that refer to traits which define an associated type
+ /// named `assoc_name` into ty::Bounds. Ignore the rest.
+ pub(crate) fn compute_bounds_that_match_assoc_type(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: &[hir::GenericBound<'_>],
+ assoc_name: Ident,
+ ) -> Bounds<'tcx> {
+ let mut result = Vec::new();
+
+ for ast_bound in ast_bounds {
+ if let Some(trait_ref) = ast_bound.trait_ref()
+ && let Some(trait_did) = trait_ref.trait_def_id()
+ && self.tcx().trait_may_define_assoc_type(trait_did, assoc_name)
+ {
+ result.push(ast_bound.clone());
+ }
+ }
+
+ self.compute_bounds_inner(param_ty, &result)
+ }
+
+ fn compute_bounds_inner(
+ &self,
+ param_ty: Ty<'tcx>,
+ ast_bounds: &[hir::GenericBound<'_>],
+ ) -> Bounds<'tcx> {
+ let mut bounds = Bounds::default();
+
+ self.add_bounds(param_ty, ast_bounds.iter(), &mut bounds, ty::List::empty());
+ debug!(?bounds);
+
+ bounds
+ }
+
+ /// Given an HIR binding like `Item = Foo` or `Item: Foo`, pushes the corresponding predicates
+ /// onto `bounds`.
+ ///
+ /// **A note on binders:** given something like `T: for<'a> Iterator<Item = &'a u32>`, the
+ /// `trait_ref` here will be `for<'a> T: Iterator`. The `binding` data however is from *inside*
+ /// the binder (e.g., `&'a u32`) and hence may reference bound regions.
+ #[tracing::instrument(
+ level = "debug",
+ skip(self, bounds, speculative, dup_bindings, path_span)
+ )]
+ fn add_predicates_for_ast_type_binding(
+ &self,
+ hir_ref_id: hir::HirId,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ binding: &ConvertedBinding<'_, 'tcx>,
+ bounds: &mut Bounds<'tcx>,
+ speculative: bool,
+ dup_bindings: &mut FxHashMap<DefId, Span>,
+ path_span: Span,
+ ) -> Result<(), ErrorGuaranteed> {
+ // Given something like `U: SomeTrait<T = X>`, we want to produce a
+ // predicate like `<U as SomeTrait>::T = X`. This is somewhat
+ // subtle in the event that `T` is defined in a supertrait of
+ // `SomeTrait`, because in that case we need to upcast.
+ //
+ // That is, consider this case:
+ //
+ // ```
+ // trait SubTrait: SuperTrait<i32> { }
+ // trait SuperTrait<A> { type T; }
+ //
+ // ... B: SubTrait<T = foo> ...
+ // ```
+ //
+ // We want to produce `<B as SuperTrait<i32>>::T == foo`.
+
+ let tcx = self.tcx();
+
+ let candidate =
+ if self.trait_defines_associated_type_named(trait_ref.def_id(), binding.item_name) {
+ // Simple case: X is defined in the current trait.
+ trait_ref
+ } else {
+ // Otherwise, we have to walk through the supertraits to find
+ // those that do.
+ self.one_bound_for_assoc_type(
+ || traits::supertraits(tcx, trait_ref),
+ || trait_ref.print_only_trait_path().to_string(),
+ binding.item_name,
+ path_span,
+ || match binding.kind {
+ ConvertedBindingKind::Equality(ty) => Some(ty.to_string()),
+ _ => None,
+ },
+ )?
+ };
+
+ let (assoc_ident, def_scope) =
+ tcx.adjust_ident_and_get_scope(binding.item_name, candidate.def_id(), hir_ref_id);
+
+ // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead
+ // of calling `filter_by_name_and_kind`.
+ let find_item_of_kind = |kind| {
+ tcx.associated_items(candidate.def_id())
+ .filter_by_name_unhygienic(assoc_ident.name)
+ .find(|i| i.kind == kind && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident)
+ };
+ let assoc_item = find_item_of_kind(ty::AssocKind::Type)
+ .or_else(|| find_item_of_kind(ty::AssocKind::Const))
+ .expect("missing associated type");
+
+ if !assoc_item.visibility(tcx).is_accessible_from(def_scope, tcx) {
+ tcx.sess
+ .struct_span_err(
+ binding.span,
+ &format!("{} `{}` is private", assoc_item.kind, binding.item_name),
+ )
+ .span_label(binding.span, &format!("private {}", assoc_item.kind))
+ .emit();
+ }
+ tcx.check_stability(assoc_item.def_id, Some(hir_ref_id), binding.span, None);
+
+ if !speculative {
+ dup_bindings
+ .entry(assoc_item.def_id)
+ .and_modify(|prev_span| {
+ self.tcx().sess.emit_err(ValueOfAssociatedStructAlreadySpecified {
+ span: binding.span,
+ prev_span: *prev_span,
+ item_name: binding.item_name,
+ def_path: tcx.def_path_str(assoc_item.container_id(tcx)),
+ });
+ })
+ .or_insert(binding.span);
+ }
+
+ // Include substitutions for generic parameters of associated types
+ let projection_ty = candidate.map_bound(|trait_ref| {
+ let ident = Ident::new(assoc_item.name, binding.item_name.span);
+ let item_segment = hir::PathSegment {
+ ident,
+ hir_id: Some(binding.hir_id),
+ res: None,
+ args: Some(binding.gen_args),
+ infer_args: false,
+ };
+
+ let substs_trait_ref_and_assoc_item = self.create_substs_for_associated_item(
+ tcx,
+ path_span,
+ assoc_item.def_id,
+ &item_segment,
+ trait_ref.substs,
+ );
+
+ debug!(
+ "add_predicates_for_ast_type_binding: substs for trait-ref and assoc_item: {:?}",
+ substs_trait_ref_and_assoc_item
+ );
+
+ ty::ProjectionTy {
+ item_def_id: assoc_item.def_id,
+ substs: substs_trait_ref_and_assoc_item,
+ }
+ });
+
+ if !speculative {
+ // Find any late-bound regions declared in `ty` that are not
+ // declared in the trait-ref or assoc_item. These are not well-formed.
+ //
+ // Example:
+ //
+ // for<'a> <T as Iterator>::Item = &'a str // <-- 'a is bad
+ // for<'a> <T as FnMut<(&'a u32,)>>::Output = &'a str // <-- 'a is ok
+ if let ConvertedBindingKind::Equality(ty) = binding.kind {
+ let late_bound_in_trait_ref =
+ tcx.collect_constrained_late_bound_regions(&projection_ty);
+ let late_bound_in_ty =
+ tcx.collect_referenced_late_bound_regions(&trait_ref.rebind(ty));
+ debug!("late_bound_in_trait_ref = {:?}", late_bound_in_trait_ref);
+ debug!("late_bound_in_ty = {:?}", late_bound_in_ty);
+
+ // FIXME: point at the type params that don't have appropriate lifetimes:
+ // struct S1<F: for<'a> Fn(&i32, &i32) -> &'a i32>(F);
+ // ---- ---- ^^^^^^^
+ self.validate_late_bound_regions(
+ late_bound_in_trait_ref,
+ late_bound_in_ty,
+ |br_name| {
+ struct_span_err!(
+ tcx.sess,
+ binding.span,
+ E0582,
+ "binding for associated type `{}` references {}, \
+ which does not appear in the trait input types",
+ binding.item_name,
+ br_name
+ )
+ },
+ );
+ }
+ }
+
+ match binding.kind {
+ ConvertedBindingKind::Equality(mut term) => {
+ // "Desugar" a constraint like `T: Iterator<Item = u32>` this to
+ // the "projection predicate" for:
+ //
+ // `<T as Iterator>::Item = u32`
+ let assoc_item_def_id = projection_ty.skip_binder().item_def_id;
+ let def_kind = tcx.def_kind(assoc_item_def_id);
+ match (def_kind, term) {
+ (hir::def::DefKind::AssocTy, ty::Term::Ty(_))
+ | (hir::def::DefKind::AssocConst, ty::Term::Const(_)) => (),
+ (_, _) => {
+ let got = if let ty::Term::Ty(_) = term { "type" } else { "constant" };
+ let expected = def_kind.descr(assoc_item_def_id);
+ tcx.sess
+ .struct_span_err(
+ binding.span,
+ &format!("expected {expected} bound, found {got}"),
+ )
+ .span_note(
+ tcx.def_span(assoc_item_def_id),
+ &format!("{expected} defined here"),
+ )
+ .emit();
+ term = match def_kind {
+ hir::def::DefKind::AssocTy => tcx.ty_error().into(),
+ hir::def::DefKind::AssocConst => tcx
+ .const_error(
+ tcx.bound_type_of(assoc_item_def_id)
+ .subst(tcx, projection_ty.skip_binder().substs),
+ )
+ .into(),
+ _ => unreachable!(),
+ };
+ }
+ }
+ bounds.projection_bounds.push((
+ projection_ty.map_bound(|projection_ty| ty::ProjectionPredicate {
+ projection_ty,
+ term: term,
+ }),
+ binding.span,
+ ));
+ }
+ ConvertedBindingKind::Constraint(ast_bounds) => {
+ // "Desugar" a constraint like `T: Iterator<Item: Debug>` to
+ //
+ // `<T as Iterator>::Item: Debug`
+ //
+ // Calling `skip_binder` is okay, because `add_bounds` expects the `param_ty`
+ // parameter to have a skipped binder.
+ let param_ty = tcx.mk_ty(ty::Projection(projection_ty.skip_binder()));
+ self.add_bounds(param_ty, ast_bounds.iter(), bounds, candidate.bound_vars());
+ }
+ }
+ Ok(())
+ }
+
+ fn ast_path_to_ty(
+ &self,
+ span: Span,
+ did: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ ) -> Ty<'tcx> {
+ let substs = self.ast_path_substs_for_ty(span, did, item_segment);
+ self.normalize_ty(
+ span,
+ EarlyBinder(self.tcx().at(span).type_of(did)).subst(self.tcx(), substs),
+ )
+ }
+
+ fn conv_object_ty_poly_trait_ref(
+ &self,
+ span: Span,
+ trait_bounds: &[hir::PolyTraitRef<'_>],
+ lifetime: &hir::Lifetime,
+ borrowed: bool,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ let mut bounds = Bounds::default();
+ let mut potential_assoc_types = Vec::new();
+ let dummy_self = self.tcx().types.trait_object_dummy_self;
+ for trait_bound in trait_bounds.iter().rev() {
+ if let GenericArgCountResult {
+ correct:
+ Err(GenericArgCountMismatch { invalid_args: cur_potential_assoc_types, .. }),
+ ..
+ } = self.instantiate_poly_trait_ref(
+ &trait_bound.trait_ref,
+ trait_bound.span,
+ ty::BoundConstness::NotConst,
+ dummy_self,
+ &mut bounds,
+ false,
+ ) {
+ potential_assoc_types.extend(cur_potential_assoc_types);
+ }
+ }
+
+ // Expand trait aliases recursively and check that only one regular (non-auto) trait
+ // is used and no 'maybe' bounds are used.
+ let expanded_traits =
+ traits::expand_trait_aliases(tcx, bounds.trait_bounds.iter().map(|&(a, b, _)| (a, b)));
+ let (mut auto_traits, regular_traits): (Vec<_>, Vec<_>) = expanded_traits
+ .filter(|i| i.trait_ref().self_ty().skip_binder() == dummy_self)
+ .partition(|i| tcx.trait_is_auto(i.trait_ref().def_id()));
+ if regular_traits.len() > 1 {
+ let first_trait = &regular_traits[0];
+ let additional_trait = &regular_traits[1];
+ let mut err = struct_span_err!(
+ tcx.sess,
+ additional_trait.bottom().1,
+ E0225,
+ "only auto traits can be used as additional traits in a trait object"
+ );
+ additional_trait.label_with_exp_info(
+ &mut err,
+ "additional non-auto trait",
+ "additional use",
+ );
+ first_trait.label_with_exp_info(&mut err, "first non-auto trait", "first use");
+ err.help(&format!(
+ "consider creating a new trait with all of these as supertraits and using that \
+ trait here instead: `trait NewTrait: {} {{}}`",
+ regular_traits
+ .iter()
+ .map(|t| t.trait_ref().print_only_trait_path().to_string())
+ .collect::<Vec<_>>()
+ .join(" + "),
+ ));
+ err.note(
+ "auto-traits like `Send` and `Sync` are traits that have special properties; \
+ for more information on them, visit \
+ <https://doc.rust-lang.org/reference/special-types-and-traits.html#auto-traits>",
+ );
+ err.emit();
+ }
+
+ if regular_traits.is_empty() && auto_traits.is_empty() {
+ let trait_alias_span = bounds
+ .trait_bounds
+ .iter()
+ .map(|&(trait_ref, _, _)| trait_ref.def_id())
+ .find(|&trait_ref| tcx.is_trait_alias(trait_ref))
+ .map(|trait_ref| tcx.def_span(trait_ref));
+ tcx.sess.emit_err(TraitObjectDeclaredWithNoTraits { span, trait_alias_span });
+ return tcx.ty_error();
+ }
+
+ // Check that there are no gross object safety violations;
+ // most importantly, that the supertraits don't contain `Self`,
+ // to avoid ICEs.
+ for item in &regular_traits {
+ let object_safety_violations =
+ astconv_object_safety_violations(tcx, item.trait_ref().def_id());
+ if !object_safety_violations.is_empty() {
+ report_object_safety_error(
+ tcx,
+ span,
+ item.trait_ref().def_id(),
+ &object_safety_violations,
+ )
+ .emit();
+ return tcx.ty_error();
+ }
+ }
+
+ // Use a `BTreeSet` to keep output in a more consistent order.
+ let mut associated_types: FxHashMap<Span, BTreeSet<DefId>> = FxHashMap::default();
+
+ let regular_traits_refs_spans = bounds
+ .trait_bounds
+ .into_iter()
+ .filter(|(trait_ref, _, _)| !tcx.trait_is_auto(trait_ref.def_id()));
+
+ for (base_trait_ref, span, constness) in regular_traits_refs_spans {
+ assert_eq!(constness, ty::BoundConstness::NotConst);
+
+ for obligation in traits::elaborate_trait_ref(tcx, base_trait_ref) {
+ debug!(
+ "conv_object_ty_poly_trait_ref: observing object predicate `{:?}`",
+ obligation.predicate
+ );
+
+ let bound_predicate = obligation.predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(pred) => {
+ let pred = bound_predicate.rebind(pred);
+ associated_types.entry(span).or_default().extend(
+ tcx.associated_items(pred.def_id())
+ .in_definition_order()
+ .filter(|item| item.kind == ty::AssocKind::Type)
+ .map(|item| item.def_id),
+ );
+ }
+ ty::PredicateKind::Projection(pred) => {
+ let pred = bound_predicate.rebind(pred);
+ // A `Self` within the original bound will be substituted with a
+ // `trait_object_dummy_self`, so check for that.
+ let references_self = match pred.skip_binder().term {
+ ty::Term::Ty(ty) => ty.walk().any(|arg| arg == dummy_self.into()),
+ ty::Term::Const(c) => c.ty().walk().any(|arg| arg == dummy_self.into()),
+ };
+
+ // If the projection output contains `Self`, force the user to
+ // elaborate it explicitly to avoid a lot of complexity.
+ //
+ // The "classically useful" case is the following:
+ // ```
+ // trait MyTrait: FnMut() -> <Self as MyTrait>::MyOutput {
+ // type MyOutput;
+ // }
+ // ```
+ //
+ // Here, the user could theoretically write `dyn MyTrait<Output = X>`,
+ // but actually supporting that would "expand" to an infinitely-long type
+ // `fix $ τ → dyn MyTrait<MyOutput = X, Output = <τ as MyTrait>::MyOutput`.
+ //
+ // Instead, we force the user to write
+ // `dyn MyTrait<MyOutput = X, Output = X>`, which is uglier but works. See
+ // the discussion in #56288 for alternatives.
+ if !references_self {
+ // Include projections defined on supertraits.
+ bounds.projection_bounds.push((pred, span));
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+
+ for (projection_bound, _) in &bounds.projection_bounds {
+ for def_ids in associated_types.values_mut() {
+ def_ids.remove(&projection_bound.projection_def_id());
+ }
+ }
+
+ self.complain_about_missing_associated_types(
+ associated_types,
+ potential_assoc_types,
+ trait_bounds,
+ );
+
+ // De-duplicate auto traits so that, e.g., `dyn Trait + Send + Send` is the same as
+ // `dyn Trait + Send`.
+ // We remove duplicates by inserting into a `FxHashSet` to avoid re-ordering
+ // the bounds
+ let mut duplicates = FxHashSet::default();
+ auto_traits.retain(|i| duplicates.insert(i.trait_ref().def_id()));
+ debug!("regular_traits: {:?}", regular_traits);
+ debug!("auto_traits: {:?}", auto_traits);
+
+ // Erase the `dummy_self` (`trait_object_dummy_self`) used above.
+ let existential_trait_refs = regular_traits.iter().map(|i| {
+ i.trait_ref().map_bound(|trait_ref: ty::TraitRef<'tcx>| {
+ if trait_ref.self_ty() != dummy_self {
+ // FIXME: There appears to be a missing filter on top of `expand_trait_aliases`,
+ // which picks up non-supertraits where clauses - but also, the object safety
+ // completely ignores trait aliases, which could be object safety hazards. We
+ // `delay_span_bug` here to avoid an ICE in stable even when the feature is
+ // disabled. (#66420)
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ &format!(
+ "trait_ref_to_existential called on {:?} with non-dummy Self",
+ trait_ref,
+ ),
+ );
+ }
+ ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
+ })
+ });
+ let existential_projections = bounds.projection_bounds.iter().map(|(bound, _)| {
+ bound.map_bound(|b| {
+ if b.projection_ty.self_ty() != dummy_self {
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ &format!("trait_ref_to_existential called on {:?} with non-dummy Self", b),
+ );
+ }
+ ty::ExistentialProjection::erase_self_ty(tcx, b)
+ })
+ });
+
+ let regular_trait_predicates = existential_trait_refs
+ .map(|trait_ref| trait_ref.map_bound(ty::ExistentialPredicate::Trait));
+ let auto_trait_predicates = auto_traits.into_iter().map(|trait_ref| {
+ ty::Binder::dummy(ty::ExistentialPredicate::AutoTrait(trait_ref.trait_ref().def_id()))
+ });
+ // N.b. principal, projections, auto traits
+ // FIXME: This is actually wrong with multiple principals in regards to symbol mangling
+ let mut v = regular_trait_predicates
+ .chain(
+ existential_projections.map(|x| x.map_bound(ty::ExistentialPredicate::Projection)),
+ )
+ .chain(auto_trait_predicates)
+ .collect::<SmallVec<[_; 8]>>();
+ v.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder()));
+ v.dedup();
+ let existential_predicates = tcx.mk_poly_existential_predicates(v.into_iter());
+
+ // Use explicitly-specified region bound.
+ let region_bound = if !lifetime.is_elided() {
+ self.ast_region_to_region(lifetime, None)
+ } else {
+ self.compute_object_lifetime_bound(span, existential_predicates).unwrap_or_else(|| {
+ if tcx.named_region(lifetime.hir_id).is_some() {
+ self.ast_region_to_region(lifetime, None)
+ } else {
+ self.re_infer(None, span).unwrap_or_else(|| {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0228,
+ "the lifetime bound for this object type cannot be deduced \
+ from context; please supply an explicit bound"
+ );
+ if borrowed {
+ // We will have already emitted an error E0106 complaining about a
+ // missing named lifetime in `&dyn Trait`, so we elide this one.
+ err.delay_as_bug();
+ } else {
+ err.emit();
+ }
+ tcx.lifetimes.re_static
+ })
+ }
+ })
+ };
+ debug!("region_bound: {:?}", region_bound);
+
+ let ty = tcx.mk_dynamic(existential_predicates, region_bound);
+ debug!("trait_object_type: {:?}", ty);
+ ty
+ }
+
+ fn report_ambiguous_associated_type(
+ &self,
+ span: Span,
+ type_str: &str,
+ trait_str: &str,
+ name: Symbol,
+ ) -> ErrorGuaranteed {
+ let mut err = struct_span_err!(self.tcx().sess, span, E0223, "ambiguous associated type");
+ if self
+ .tcx()
+ .resolutions(())
+ .confused_type_with_std_module
+ .keys()
+ .any(|full_span| full_span.contains(span))
+ {
+ err.span_suggestion(
+ span.shrink_to_lo(),
+ "you are looking for the module in `std`, not the primitive type",
+ "std::",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion(
+ span,
+ "use fully-qualified syntax",
+ format!("<{} as {}>::{}", type_str, trait_str, name),
+ Applicability::HasPlaceholders,
+ );
+ }
+ err.emit()
+ }
+
+ // Search for a bound on a type parameter which includes the associated item
+ // given by `assoc_name`. `ty_param_def_id` is the `DefId` of the type parameter
+ // This function will fail if there are no suitable bounds or there is
+ // any ambiguity.
+ fn find_bound_for_assoc_item(
+ &self,
+ ty_param_def_id: LocalDefId,
+ assoc_name: Ident,
+ span: Span,
+ ) -> Result<ty::PolyTraitRef<'tcx>, ErrorGuaranteed> {
+ let tcx = self.tcx();
+
+ debug!(
+ "find_bound_for_assoc_item(ty_param_def_id={:?}, assoc_name={:?}, span={:?})",
+ ty_param_def_id, assoc_name, span,
+ );
+
+ let predicates = &self
+ .get_type_parameter_bounds(span, ty_param_def_id.to_def_id(), assoc_name)
+ .predicates;
+
+ debug!("find_bound_for_assoc_item: predicates={:#?}", predicates);
+
+ let param_name = tcx.hir().ty_param_name(ty_param_def_id);
+ self.one_bound_for_assoc_type(
+ || {
+ traits::transitive_bounds_that_define_assoc_type(
+ tcx,
+ predicates.iter().filter_map(|(p, _)| {
+ Some(p.to_opt_poly_trait_pred()?.map_bound(|t| t.trait_ref))
+ }),
+ assoc_name,
+ )
+ },
+ || param_name.to_string(),
+ assoc_name,
+ span,
+ || None,
+ )
+ }
+
+ // Checks that `bounds` contains exactly one element and reports appropriate
+ // errors otherwise.
+ fn one_bound_for_assoc_type<I>(
+ &self,
+ all_candidates: impl Fn() -> I,
+ ty_param_name: impl Fn() -> String,
+ assoc_name: Ident,
+ span: Span,
+ is_equality: impl Fn() -> Option<String>,
+ ) -> Result<ty::PolyTraitRef<'tcx>, ErrorGuaranteed>
+ where
+ I: Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ {
+ let mut matching_candidates = all_candidates()
+ .filter(|r| self.trait_defines_associated_type_named(r.def_id(), assoc_name));
+ let mut const_candidates = all_candidates()
+ .filter(|r| self.trait_defines_associated_const_named(r.def_id(), assoc_name));
+
+ let (bound, next_cand) = match (matching_candidates.next(), const_candidates.next()) {
+ (Some(bound), _) => (bound, matching_candidates.next()),
+ (None, Some(bound)) => (bound, const_candidates.next()),
+ (None, None) => {
+ let reported = self.complain_about_assoc_type_not_found(
+ all_candidates,
+ &ty_param_name(),
+ assoc_name,
+ span,
+ );
+ return Err(reported);
+ }
+ };
+ debug!("one_bound_for_assoc_type: bound = {:?}", bound);
+
+ if let Some(bound2) = next_cand {
+ debug!("one_bound_for_assoc_type: bound2 = {:?}", bound2);
+
+ let is_equality = is_equality();
+ let bounds = IntoIterator::into_iter([bound, bound2]).chain(matching_candidates);
+ let mut err = if is_equality.is_some() {
+ // More specific Error Index entry.
+ struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0222,
+ "ambiguous associated type `{}` in bounds of `{}`",
+ assoc_name,
+ ty_param_name()
+ )
+ } else {
+ struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0221,
+ "ambiguous associated type `{}` in bounds of `{}`",
+ assoc_name,
+ ty_param_name()
+ )
+ };
+ err.span_label(span, format!("ambiguous associated type `{}`", assoc_name));
+
+ let mut where_bounds = vec![];
+ for bound in bounds {
+ let bound_id = bound.def_id();
+ let bound_span = self
+ .tcx()
+ .associated_items(bound_id)
+ .find_by_name_and_kind(self.tcx(), assoc_name, ty::AssocKind::Type, bound_id)
+ .and_then(|item| self.tcx().hir().span_if_local(item.def_id));
+
+ if let Some(bound_span) = bound_span {
+ err.span_label(
+ bound_span,
+ format!(
+ "ambiguous `{}` from `{}`",
+ assoc_name,
+ bound.print_only_trait_path(),
+ ),
+ );
+ if let Some(constraint) = &is_equality {
+ where_bounds.push(format!(
+ " T: {trait}::{assoc} = {constraint}",
+ trait=bound.print_only_trait_path(),
+ assoc=assoc_name,
+ constraint=constraint,
+ ));
+ } else {
+ err.span_suggestion_verbose(
+ span.with_hi(assoc_name.span.lo()),
+ "use fully qualified syntax to disambiguate",
+ format!(
+ "<{} as {}>::",
+ ty_param_name(),
+ bound.print_only_trait_path(),
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ } else {
+ err.note(&format!(
+ "associated type `{}` could derive from `{}`",
+ ty_param_name(),
+ bound.print_only_trait_path(),
+ ));
+ }
+ }
+ if !where_bounds.is_empty() {
+ err.help(&format!(
+ "consider introducing a new type parameter `T` and adding `where` constraints:\
+ \n where\n T: {},\n{}",
+ ty_param_name(),
+ where_bounds.join(",\n"),
+ ));
+ }
+ let reported = err.emit();
+ if !where_bounds.is_empty() {
+ return Err(reported);
+ }
+ }
+
+ Ok(bound)
+ }
+
+ // Create a type from a path to an associated type.
+ // For a path `A::B::C::D`, `qself_ty` and `qself_def` are the type and def for `A::B::C`
+ // and item_segment is the path segment for `D`. We return a type and a def for
+ // the whole path.
+ // Will fail except for `T::A` and `Self::A`; i.e., if `qself_ty`/`qself_def` are not a type
+ // parameter or `Self`.
+ // NOTE: When this function starts resolving `Trait::AssocTy` successfully
+ // it should also start reporting the `BARE_TRAIT_OBJECTS` lint.
+ pub fn associated_path_to_ty(
+ &self,
+ hir_ref_id: hir::HirId,
+ span: Span,
+ qself_ty: Ty<'tcx>,
+ qself: &hir::Ty<'_>,
+ assoc_segment: &hir::PathSegment<'_>,
+ permit_variants: bool,
+ ) -> Result<(Ty<'tcx>, DefKind, DefId), ErrorGuaranteed> {
+ let tcx = self.tcx();
+ let assoc_ident = assoc_segment.ident;
+ let qself_res = if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = qself.kind {
+ path.res
+ } else {
+ Res::Err
+ };
+
+ debug!("associated_path_to_ty: {:?}::{}", qself_ty, assoc_ident);
+
+ // Check if we have an enum variant.
+ let mut variant_resolution = None;
+ if let ty::Adt(adt_def, _) = qself_ty.kind() {
+ if adt_def.is_enum() {
+ let variant_def = adt_def
+ .variants()
+ .iter()
+ .find(|vd| tcx.hygienic_eq(assoc_ident, vd.ident(tcx), adt_def.did()));
+ if let Some(variant_def) = variant_def {
+ if permit_variants {
+ tcx.check_stability(variant_def.def_id, Some(hir_ref_id), span, None);
+ self.prohibit_generics(slice::from_ref(assoc_segment).iter(), |err| {
+ err.note("enum variants can't have type parameters");
+ let type_name = tcx.item_name(adt_def.did());
+ let msg = format!(
+ "you might have meant to specity type parameters on enum \
+ `{type_name}`"
+ );
+ let Some(args) = assoc_segment.args else { return; };
+ // Get the span of the generics args *including* the leading `::`.
+ let args_span = assoc_segment.ident.span.shrink_to_hi().to(args.span_ext);
+ if tcx.generics_of(adt_def.did()).count() == 0 {
+ // FIXME(estebank): we could also verify that the arguments being
+ // work for the `enum`, instead of just looking if it takes *any*.
+ err.span_suggestion_verbose(
+ args_span,
+ &format!("{type_name} doesn't have generic parameters"),
+ "",
+ Applicability::MachineApplicable,
+ );
+ return;
+ }
+ let Ok(snippet) = tcx.sess.source_map().span_to_snippet(args_span) else {
+ err.note(&msg);
+ return;
+ };
+ let (qself_sugg_span, is_self) = if let hir::TyKind::Path(
+ hir::QPath::Resolved(_, ref path)
+ ) = qself.kind {
+ // If the path segment already has type params, we want to overwrite
+ // them.
+ match &path.segments[..] {
+ // `segment` is the previous to last element on the path,
+ // which would normally be the `enum` itself, while the last
+ // `_` `PathSegment` corresponds to the variant.
+ [.., hir::PathSegment {
+ ident,
+ args,
+ res: Some(Res::Def(DefKind::Enum, _)),
+ ..
+ }, _] => (
+ // We need to include the `::` in `Type::Variant::<Args>`
+ // to point the span to `::<Args>`, not just `<Args>`.
+ ident.span.shrink_to_hi().to(args.map_or(
+ ident.span.shrink_to_hi(),
+ |a| a.span_ext)),
+ false,
+ ),
+ [segment] => (
+ // We need to include the `::` in `Type::Variant::<Args>`
+ // to point the span to `::<Args>`, not just `<Args>`.
+ segment.ident.span.shrink_to_hi().to(segment.args.map_or(
+ segment.ident.span.shrink_to_hi(),
+ |a| a.span_ext)),
+ kw::SelfUpper == segment.ident.name,
+ ),
+ _ => {
+ err.note(&msg);
+ return;
+ }
+ }
+ } else {
+ err.note(&msg);
+ return;
+ };
+ let suggestion = vec![
+ if is_self {
+ // Account for people writing `Self::Variant::<Args>`, where
+ // `Self` is the enum, and suggest replacing `Self` with the
+ // appropriate type: `Type::<Args>::Variant`.
+ (qself.span, format!("{type_name}{snippet}"))
+ } else {
+ (qself_sugg_span, snippet)
+ },
+ (args_span, String::new()),
+ ];
+ err.multipart_suggestion_verbose(
+ &msg,
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ });
+ return Ok((qself_ty, DefKind::Variant, variant_def.def_id));
+ } else {
+ variant_resolution = Some(variant_def.def_id);
+ }
+ }
+ }
+ }
+
+ // Find the type of the associated item, and the trait where the associated
+ // item is declared.
+ let bound = match (&qself_ty.kind(), qself_res) {
+ (_, Res::SelfTy { trait_: Some(_), alias_to: Some((impl_def_id, _)) }) => {
+ // `Self` in an impl of a trait -- we have a concrete self type and a
+ // trait reference.
+ let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) else {
+ // A cycle error occurred, most likely.
+ let guar = tcx.sess.delay_span_bug(span, "expected cycle error");
+ return Err(guar);
+ };
+
+ self.one_bound_for_assoc_type(
+ || traits::supertraits(tcx, ty::Binder::dummy(trait_ref)),
+ || "Self".to_string(),
+ assoc_ident,
+ span,
+ || None,
+ )?
+ }
+ (
+ &ty::Param(_),
+ Res::SelfTy { trait_: Some(param_did), alias_to: None }
+ | Res::Def(DefKind::TyParam, param_did),
+ ) => self.find_bound_for_assoc_item(param_did.expect_local(), assoc_ident, span)?,
+ _ => {
+ let reported = if variant_resolution.is_some() {
+ // Variant in type position
+ let msg = format!("expected type, found variant `{}`", assoc_ident);
+ tcx.sess.span_err(span, &msg)
+ } else if qself_ty.is_enum() {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ assoc_ident.span,
+ E0599,
+ "no variant named `{}` found for enum `{}`",
+ assoc_ident,
+ qself_ty,
+ );
+
+ let adt_def = qself_ty.ty_adt_def().expect("enum is not an ADT");
+ if let Some(suggested_name) = find_best_match_for_name(
+ &adt_def
+ .variants()
+ .iter()
+ .map(|variant| variant.name)
+ .collect::<Vec<Symbol>>(),
+ assoc_ident.name,
+ None,
+ ) {
+ err.span_suggestion(
+ assoc_ident.span,
+ "there is a variant with a similar name",
+ suggested_name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(
+ assoc_ident.span,
+ format!("variant not found in `{}`", qself_ty),
+ );
+ }
+
+ if let Some(sp) = tcx.hir().span_if_local(adt_def.did()) {
+ err.span_label(sp, format!("variant `{}` not found here", assoc_ident));
+ }
+
+ err.emit()
+ } else if let Some(reported) = qself_ty.error_reported() {
+ reported
+ } else {
+ // Don't print `TyErr` to the user.
+ self.report_ambiguous_associated_type(
+ span,
+ &qself_ty.to_string(),
+ "Trait",
+ assoc_ident.name,
+ )
+ };
+ return Err(reported);
+ }
+ };
+
+ let trait_did = bound.def_id();
+ let (assoc_ident, def_scope) =
+ tcx.adjust_ident_and_get_scope(assoc_ident, trait_did, hir_ref_id);
+
+ // We have already adjusted the item name above, so compare with `ident.normalize_to_macros_2_0()` instead
+ // of calling `filter_by_name_and_kind`.
+ let item = tcx.associated_items(trait_did).in_definition_order().find(|i| {
+ i.kind.namespace() == Namespace::TypeNS
+ && i.ident(tcx).normalize_to_macros_2_0() == assoc_ident
+ });
+ // Assume that if it's not matched, there must be a const defined with the same name
+ // but it was used in a type position.
+ let Some(item) = item else {
+ let msg = format!("found associated const `{assoc_ident}` when type was expected");
+ let guar = tcx.sess.struct_span_err(span, &msg).emit();
+ return Err(guar);
+ };
+
+ let ty = self.projected_ty_from_poly_trait_ref(span, item.def_id, assoc_segment, bound);
+ let ty = self.normalize_ty(span, ty);
+
+ let kind = DefKind::AssocTy;
+ if !item.visibility(tcx).is_accessible_from(def_scope, tcx) {
+ let kind = kind.descr(item.def_id);
+ let msg = format!("{} `{}` is private", kind, assoc_ident);
+ tcx.sess
+ .struct_span_err(span, &msg)
+ .span_label(span, &format!("private {}", kind))
+ .emit();
+ }
+ tcx.check_stability(item.def_id, Some(hir_ref_id), span, None);
+
+ if let Some(variant_def_id) = variant_resolution {
+ tcx.struct_span_lint_hir(AMBIGUOUS_ASSOCIATED_ITEMS, hir_ref_id, span, |lint| {
+ let mut err = lint.build("ambiguous associated item");
+ let mut could_refer_to = |kind: DefKind, def_id, also| {
+ let note_msg = format!(
+ "`{}` could{} refer to the {} defined here",
+ assoc_ident,
+ also,
+ kind.descr(def_id)
+ );
+ err.span_note(tcx.def_span(def_id), &note_msg);
+ };
+
+ could_refer_to(DefKind::Variant, variant_def_id, "");
+ could_refer_to(kind, item.def_id, " also");
+
+ err.span_suggestion(
+ span,
+ "use fully-qualified syntax",
+ format!("<{} as {}>::{}", qself_ty, tcx.item_name(trait_did), assoc_ident),
+ Applicability::MachineApplicable,
+ );
+
+ err.emit();
+ });
+ }
+ Ok((ty, kind, item.def_id))
+ }
+
+ fn qpath_to_ty(
+ &self,
+ span: Span,
+ opt_self_ty: Option<Ty<'tcx>>,
+ item_def_id: DefId,
+ trait_segment: &hir::PathSegment<'_>,
+ item_segment: &hir::PathSegment<'_>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ let trait_def_id = tcx.parent(item_def_id);
+
+ debug!("qpath_to_ty: trait_def_id={:?}", trait_def_id);
+
+ let Some(self_ty) = opt_self_ty else {
+ let path_str = tcx.def_path_str(trait_def_id);
+
+ let def_id = self.item_def_id();
+
+ debug!("qpath_to_ty: self.item_def_id()={:?}", def_id);
+
+ let parent_def_id = def_id
+ .and_then(|def_id| {
+ def_id.as_local().map(|def_id| tcx.hir().local_def_id_to_hir_id(def_id))
+ })
+ .map(|hir_id| tcx.hir().get_parent_item(hir_id).to_def_id());
+
+ debug!("qpath_to_ty: parent_def_id={:?}", parent_def_id);
+
+ // If the trait in segment is the same as the trait defining the item,
+ // use the `<Self as ..>` syntax in the error.
+ let is_part_of_self_trait_constraints = def_id == Some(trait_def_id);
+ let is_part_of_fn_in_self_trait = parent_def_id == Some(trait_def_id);
+
+ let type_name = if is_part_of_self_trait_constraints || is_part_of_fn_in_self_trait {
+ "Self"
+ } else {
+ "Type"
+ };
+
+ self.report_ambiguous_associated_type(
+ span,
+ type_name,
+ &path_str,
+ item_segment.ident.name,
+ );
+ return tcx.ty_error();
+ };
+
+ debug!("qpath_to_ty: self_type={:?}", self_ty);
+
+ let trait_ref =
+ self.ast_path_to_mono_trait_ref(span, trait_def_id, self_ty, trait_segment, false);
+
+ let item_substs = self.create_substs_for_associated_item(
+ tcx,
+ span,
+ item_def_id,
+ item_segment,
+ trait_ref.substs,
+ );
+
+ debug!("qpath_to_ty: trait_ref={:?}", trait_ref);
+
+ self.normalize_ty(span, tcx.mk_projection(item_def_id, item_substs))
+ }
+
+ pub fn prohibit_generics<'a>(
+ &self,
+ segments: impl Iterator<Item = &'a hir::PathSegment<'a>> + Clone,
+ extend: impl Fn(&mut DiagnosticBuilder<'tcx, ErrorGuaranteed>),
+ ) -> bool {
+ let args = segments.clone().flat_map(|segment| segment.args().args);
+
+ let (lt, ty, ct, inf) =
+ args.clone().fold((false, false, false, false), |(lt, ty, ct, inf), arg| match arg {
+ hir::GenericArg::Lifetime(_) => (true, ty, ct, inf),
+ hir::GenericArg::Type(_) => (lt, true, ct, inf),
+ hir::GenericArg::Const(_) => (lt, ty, true, inf),
+ hir::GenericArg::Infer(_) => (lt, ty, ct, true),
+ });
+ let mut emitted = false;
+ if lt || ty || ct || inf {
+ let types_and_spans: Vec<_> = segments
+ .clone()
+ .flat_map(|segment| {
+ segment.res.and_then(|res| {
+ if segment.args().args.is_empty() {
+ None
+ } else {
+ Some((
+ match res {
+ Res::PrimTy(ty) => format!("{} `{}`", res.descr(), ty.name()),
+ Res::Def(_, def_id)
+ if let Some(name) = self.tcx().opt_item_name(def_id) => {
+ format!("{} `{name}`", res.descr())
+ }
+ Res::Err => "this type".to_string(),
+ _ => res.descr().to_string(),
+ },
+ segment.ident.span,
+ ))
+ }
+ })
+ })
+ .collect();
+ let this_type = match &types_and_spans[..] {
+ [.., _, (last, _)] => format!(
+ "{} and {last}",
+ types_and_spans[..types_and_spans.len() - 1]
+ .iter()
+ .map(|(x, _)| x.as_str())
+ .intersperse(&", ")
+ .collect::<String>()
+ ),
+ [(only, _)] => only.to_string(),
+ [] => "this type".to_string(),
+ };
+
+ let arg_spans: Vec<Span> = args.map(|arg| arg.span()).collect();
+
+ let mut kinds = Vec::with_capacity(4);
+ if lt {
+ kinds.push("lifetime");
+ }
+ if ty {
+ kinds.push("type");
+ }
+ if ct {
+ kinds.push("const");
+ }
+ if inf {
+ kinds.push("generic");
+ }
+ let (kind, s) = match kinds[..] {
+ [.., _, last] => (
+ format!(
+ "{} and {last}",
+ kinds[..kinds.len() - 1]
+ .iter()
+ .map(|&x| x)
+ .intersperse(", ")
+ .collect::<String>()
+ ),
+ "s",
+ ),
+ [only] => (format!("{only}"), ""),
+ [] => unreachable!(),
+ };
+ let last_span = *arg_spans.last().unwrap();
+ let span: MultiSpan = arg_spans.into();
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0109,
+ "{kind} arguments are not allowed on {this_type}",
+ );
+ err.span_label(last_span, format!("{kind} argument{s} not allowed"));
+ for (what, span) in types_and_spans {
+ err.span_label(span, format!("not allowed on {what}"));
+ }
+ extend(&mut err);
+ err.emit();
+ emitted = true;
+ }
+
+ for segment in segments {
+ // Only emit the first error to avoid overloading the user with error messages.
+ if let [binding, ..] = segment.args().bindings {
+ Self::prohibit_assoc_ty_binding(self.tcx(), binding.span);
+ return true;
+ }
+ }
+ emitted
+ }
+
+ // FIXME(eddyb, varkor) handle type paths here too, not just value ones.
+ pub fn def_ids_for_value_path_segments(
+ &self,
+ segments: &[hir::PathSegment<'_>],
+ self_ty: Option<Ty<'tcx>>,
+ kind: DefKind,
+ def_id: DefId,
+ ) -> Vec<PathSeg> {
+ // We need to extract the type parameters supplied by the user in
+ // the path `path`. Due to the current setup, this is a bit of a
+ // tricky-process; the problem is that resolve only tells us the
+ // end-point of the path resolution, and not the intermediate steps.
+ // Luckily, we can (at least for now) deduce the intermediate steps
+ // just from the end-point.
+ //
+ // There are basically five cases to consider:
+ //
+ // 1. Reference to a constructor of a struct:
+ //
+ // struct Foo<T>(...)
+ //
+ // In this case, the parameters are declared in the type space.
+ //
+ // 2. Reference to a constructor of an enum variant:
+ //
+ // enum E<T> { Foo(...) }
+ //
+ // In this case, the parameters are defined in the type space,
+ // but may be specified either on the type or the variant.
+ //
+ // 3. Reference to a fn item or a free constant:
+ //
+ // fn foo<T>() { }
+ //
+ // In this case, the path will again always have the form
+ // `a::b::foo::<T>` where only the final segment should have
+ // type parameters. However, in this case, those parameters are
+ // declared on a value, and hence are in the `FnSpace`.
+ //
+ // 4. Reference to a method or an associated constant:
+ //
+ // impl<A> SomeStruct<A> {
+ // fn foo<B>(...)
+ // }
+ //
+ // Here we can have a path like
+ // `a::b::SomeStruct::<A>::foo::<B>`, in which case parameters
+ // may appear in two places. The penultimate segment,
+ // `SomeStruct::<A>`, contains parameters in TypeSpace, and the
+ // final segment, `foo::<B>` contains parameters in fn space.
+ //
+ // The first step then is to categorize the segments appropriately.
+
+ let tcx = self.tcx();
+
+ assert!(!segments.is_empty());
+ let last = segments.len() - 1;
+
+ let mut path_segs = vec![];
+
+ match kind {
+ // Case 1. Reference to a struct constructor.
+ DefKind::Ctor(CtorOf::Struct, ..) => {
+ // Everything but the final segment should have no
+ // parameters at all.
+ let generics = tcx.generics_of(def_id);
+ // Variant and struct constructors use the
+ // generics of their parent type definition.
+ let generics_def_id = generics.parent.unwrap_or(def_id);
+ path_segs.push(PathSeg(generics_def_id, last));
+ }
+
+ // Case 2. Reference to a variant constructor.
+ DefKind::Ctor(CtorOf::Variant, ..) | DefKind::Variant => {
+ let adt_def = self_ty.map(|t| t.ty_adt_def().unwrap());
+ let (generics_def_id, index) = if let Some(adt_def) = adt_def {
+ debug_assert!(adt_def.is_enum());
+ (adt_def.did(), last)
+ } else if last >= 1 && segments[last - 1].args.is_some() {
+ // Everything but the penultimate segment should have no
+ // parameters at all.
+ let mut def_id = def_id;
+
+ // `DefKind::Ctor` -> `DefKind::Variant`
+ if let DefKind::Ctor(..) = kind {
+ def_id = tcx.parent(def_id);
+ }
+
+ // `DefKind::Variant` -> `DefKind::Enum`
+ let enum_def_id = tcx.parent(def_id);
+ (enum_def_id, last - 1)
+ } else {
+ // FIXME: lint here recommending `Enum::<...>::Variant` form
+ // instead of `Enum::Variant::<...>` form.
+
+ // Everything but the final segment should have no
+ // parameters at all.
+ let generics = tcx.generics_of(def_id);
+ // Variant and struct constructors use the
+ // generics of their parent type definition.
+ (generics.parent.unwrap_or(def_id), last)
+ };
+ path_segs.push(PathSeg(generics_def_id, index));
+ }
+
+ // Case 3. Reference to a top-level value.
+ DefKind::Fn | DefKind::Const | DefKind::ConstParam | DefKind::Static(_) => {
+ path_segs.push(PathSeg(def_id, last));
+ }
+
+ // Case 4. Reference to a method or associated const.
+ DefKind::AssocFn | DefKind::AssocConst => {
+ if segments.len() >= 2 {
+ let generics = tcx.generics_of(def_id);
+ path_segs.push(PathSeg(generics.parent.unwrap(), last - 1));
+ }
+ path_segs.push(PathSeg(def_id, last));
+ }
+
+ kind => bug!("unexpected definition kind {:?} for {:?}", kind, def_id),
+ }
+
+ debug!("path_segs = {:?}", path_segs);
+
+ path_segs
+ }
+
+ // Check a type `Path` and convert it to a `Ty`.
+ pub fn res_to_ty(
+ &self,
+ opt_self_ty: Option<Ty<'tcx>>,
+ path: &hir::Path<'_>,
+ permit_variants: bool,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ debug!(
+ "res_to_ty(res={:?}, opt_self_ty={:?}, path_segments={:?})",
+ path.res, opt_self_ty, path.segments
+ );
+
+ let span = path.span;
+ match path.res {
+ Res::Def(DefKind::OpaqueTy, did) => {
+ // Check for desugared `impl Trait`.
+ assert!(ty::is_impl_trait_defn(tcx, did).is_none());
+ let item_segment = path.segments.split_last().unwrap();
+ self.prohibit_generics(item_segment.1.iter(), |err| {
+ err.note("`impl Trait` types can't have type parameters");
+ });
+ let substs = self.ast_path_substs_for_ty(span, did, item_segment.0);
+ self.normalize_ty(span, tcx.mk_opaque(did, substs))
+ }
+ Res::Def(
+ DefKind::Enum
+ | DefKind::TyAlias
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::ForeignTy,
+ did,
+ ) => {
+ assert_eq!(opt_self_ty, None);
+ self.prohibit_generics(path.segments.split_last().unwrap().1.iter(), |_| {});
+ self.ast_path_to_ty(span, did, path.segments.last().unwrap())
+ }
+ Res::Def(kind @ DefKind::Variant, def_id) if permit_variants => {
+ // Convert "variant type" as if it were a real type.
+ // The resulting `Ty` is type of the variant's enum for now.
+ assert_eq!(opt_self_ty, None);
+
+ let path_segs =
+ self.def_ids_for_value_path_segments(path.segments, None, kind, def_id);
+ let generic_segs: FxHashSet<_> =
+ path_segs.iter().map(|PathSeg(_, index)| index).collect();
+ self.prohibit_generics(
+ path.segments.iter().enumerate().filter_map(|(index, seg)| {
+ if !generic_segs.contains(&index) { Some(seg) } else { None }
+ }),
+ |err| {
+ err.note("enum variants can't have type parameters");
+ },
+ );
+
+ let PathSeg(def_id, index) = path_segs.last().unwrap();
+ self.ast_path_to_ty(span, *def_id, &path.segments[*index])
+ }
+ Res::Def(DefKind::TyParam, def_id) => {
+ assert_eq!(opt_self_ty, None);
+ self.prohibit_generics(path.segments.iter(), |err| {
+ if let Some(span) = tcx.def_ident_span(def_id) {
+ let name = tcx.item_name(def_id);
+ err.span_note(span, &format!("type parameter `{name}` defined here"));
+ }
+ });
+
+ let def_id = def_id.expect_local();
+ let item_def_id = tcx.hir().ty_param_owner(def_id);
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id.to_def_id()];
+ tcx.mk_ty_param(index, tcx.hir().ty_param_name(def_id))
+ }
+ Res::SelfTy { trait_: Some(_), alias_to: None } => {
+ // `Self` in trait or type alias.
+ assert_eq!(opt_self_ty, None);
+ self.prohibit_generics(path.segments.iter(), |err| {
+ if let [hir::PathSegment { args: Some(args), ident, .. }] = &path.segments[..] {
+ err.span_suggestion_verbose(
+ ident.span.shrink_to_hi().to(args.span_ext),
+ "the `Self` type doesn't accept type parameters",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ });
+ tcx.types.self_param
+ }
+ Res::SelfTy { trait_: _, alias_to: Some((def_id, forbid_generic)) } => {
+ // `Self` in impl (we know the concrete type).
+ assert_eq!(opt_self_ty, None);
+ // Try to evaluate any array length constants.
+ let ty = tcx.at(span).type_of(def_id);
+ let span_of_impl = tcx.span_of_impl(def_id);
+ self.prohibit_generics(path.segments.iter(), |err| {
+ let def_id = match *ty.kind() {
+ ty::Adt(self_def, _) => self_def.did(),
+ _ => return,
+ };
+
+ let type_name = tcx.item_name(def_id);
+ let span_of_ty = tcx.def_ident_span(def_id);
+ let generics = tcx.generics_of(def_id).count();
+
+ let msg = format!("`Self` is of type `{ty}`");
+ if let (Ok(i_sp), Some(t_sp)) = (span_of_impl, span_of_ty) {
+ let mut span: MultiSpan = vec![t_sp].into();
+ span.push_span_label(
+ i_sp,
+ &format!("`Self` is on type `{type_name}` in this `impl`"),
+ );
+ let mut postfix = "";
+ if generics == 0 {
+ postfix = ", which doesn't have generic parameters";
+ }
+ span.push_span_label(
+ t_sp,
+ &format!("`Self` corresponds to this type{postfix}"),
+ );
+ err.span_note(span, &msg);
+ } else {
+ err.note(&msg);
+ }
+ for segment in path.segments {
+ if let Some(args) = segment.args && segment.ident.name == kw::SelfUpper {
+ if generics == 0 {
+ // FIXME(estebank): we could also verify that the arguments being
+ // work for the `enum`, instead of just looking if it takes *any*.
+ err.span_suggestion_verbose(
+ segment.ident.span.shrink_to_hi().to(args.span_ext),
+ "the `Self` type doesn't accept type parameters",
+ "",
+ Applicability::MachineApplicable,
+ );
+ return;
+ } else {
+ err.span_suggestion_verbose(
+ segment.ident.span,
+ format!(
+ "the `Self` type doesn't accept type parameters, use the \
+ concrete type's name `{type_name}` instead if you want to \
+ specify its type parameters"
+ ),
+ type_name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ });
+ // HACK(min_const_generics): Forbid generic `Self` types
+ // here as we can't easily do that during nameres.
+ //
+ // We do this before normalization as we otherwise allow
+ // ```rust
+ // trait AlwaysApplicable { type Assoc; }
+ // impl<T: ?Sized> AlwaysApplicable for T { type Assoc = usize; }
+ //
+ // trait BindsParam<T> {
+ // type ArrayTy;
+ // }
+ // impl<T> BindsParam<T> for <T as AlwaysApplicable>::Assoc {
+ // type ArrayTy = [u8; Self::MAX];
+ // }
+ // ```
+ // Note that the normalization happens in the param env of
+ // the anon const, which is empty. This is why the
+ // `AlwaysApplicable` impl needs a `T: ?Sized` bound for
+ // this to compile if we were to normalize here.
+ if forbid_generic && ty.needs_subst() {
+ let mut err = tcx.sess.struct_span_err(
+ path.span,
+ "generic `Self` types are currently not permitted in anonymous constants",
+ );
+ if let Some(hir::Node::Item(&hir::Item {
+ kind: hir::ItemKind::Impl(ref impl_),
+ ..
+ })) = tcx.hir().get_if_local(def_id)
+ {
+ err.span_note(impl_.self_ty.span, "not a concrete type");
+ }
+ err.emit();
+ tcx.ty_error()
+ } else {
+ self.normalize_ty(span, ty)
+ }
+ }
+ Res::Def(DefKind::AssocTy, def_id) => {
+ debug_assert!(path.segments.len() >= 2);
+ self.prohibit_generics(path.segments[..path.segments.len() - 2].iter(), |_| {});
+ self.qpath_to_ty(
+ span,
+ opt_self_ty,
+ def_id,
+ &path.segments[path.segments.len() - 2],
+ path.segments.last().unwrap(),
+ )
+ }
+ Res::PrimTy(prim_ty) => {
+ assert_eq!(opt_self_ty, None);
+ self.prohibit_generics(path.segments.iter(), |err| {
+ let name = prim_ty.name_str();
+ for segment in path.segments {
+ if let Some(args) = segment.args {
+ err.span_suggestion_verbose(
+ segment.ident.span.shrink_to_hi().to(args.span_ext),
+ &format!("primitive type `{name}` doesn't have generic parameters"),
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ });
+ match prim_ty {
+ hir::PrimTy::Bool => tcx.types.bool,
+ hir::PrimTy::Char => tcx.types.char,
+ hir::PrimTy::Int(it) => tcx.mk_mach_int(ty::int_ty(it)),
+ hir::PrimTy::Uint(uit) => tcx.mk_mach_uint(ty::uint_ty(uit)),
+ hir::PrimTy::Float(ft) => tcx.mk_mach_float(ty::float_ty(ft)),
+ hir::PrimTy::Str => tcx.types.str_,
+ }
+ }
+ Res::Err => {
+ self.set_tainted_by_errors();
+ self.tcx().ty_error()
+ }
+ _ => span_bug!(span, "unexpected resolution: {:?}", path.res),
+ }
+ }
+
+ /// Parses the programmer's textual representation of a type into our
+ /// internal notion of a type.
+ pub fn ast_ty_to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ self.ast_ty_to_ty_inner(ast_ty, false, false)
+ }
+
+ /// Parses the programmer's textual representation of a type into our
+ /// internal notion of a type. This is meant to be used within a path.
+ pub fn ast_ty_to_ty_in_path(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ self.ast_ty_to_ty_inner(ast_ty, false, true)
+ }
+
+ /// Turns a `hir::Ty` into a `Ty`. For diagnostics' purposes we keep track of whether trait
+ /// objects are borrowed like `&dyn Trait` to avoid emitting redundant errors.
+ #[tracing::instrument(level = "debug", skip(self))]
+ fn ast_ty_to_ty_inner(&self, ast_ty: &hir::Ty<'_>, borrowed: bool, in_path: bool) -> Ty<'tcx> {
+ let tcx = self.tcx();
+
+ let result_ty = match ast_ty.kind {
+ hir::TyKind::Slice(ref ty) => tcx.mk_slice(self.ast_ty_to_ty(ty)),
+ hir::TyKind::Ptr(ref mt) => {
+ tcx.mk_ptr(ty::TypeAndMut { ty: self.ast_ty_to_ty(mt.ty), mutbl: mt.mutbl })
+ }
+ hir::TyKind::Rptr(ref region, ref mt) => {
+ let r = self.ast_region_to_region(region, None);
+ debug!(?r);
+ let t = self.ast_ty_to_ty_inner(mt.ty, true, false);
+ tcx.mk_ref(r, ty::TypeAndMut { ty: t, mutbl: mt.mutbl })
+ }
+ hir::TyKind::Never => tcx.types.never,
+ hir::TyKind::Tup(fields) => tcx.mk_tup(fields.iter().map(|t| self.ast_ty_to_ty(t))),
+ hir::TyKind::BareFn(bf) => {
+ require_c_abi_if_c_variadic(tcx, bf.decl, bf.abi, ast_ty.span);
+
+ tcx.mk_fn_ptr(self.ty_of_fn(
+ ast_ty.hir_id,
+ bf.unsafety,
+ bf.abi,
+ bf.decl,
+ None,
+ Some(ast_ty),
+ ))
+ }
+ hir::TyKind::TraitObject(bounds, ref lifetime, _) => {
+ self.maybe_lint_bare_trait(ast_ty, in_path);
+ self.conv_object_ty_poly_trait_ref(ast_ty.span, bounds, lifetime, borrowed)
+ }
+ hir::TyKind::Path(hir::QPath::Resolved(ref maybe_qself, ref path)) => {
+ debug!(?maybe_qself, ?path);
+ let opt_self_ty = maybe_qself.as_ref().map(|qself| self.ast_ty_to_ty(qself));
+ self.res_to_ty(opt_self_ty, path, false)
+ }
+ hir::TyKind::OpaqueDef(item_id, lifetimes) => {
+ let opaque_ty = tcx.hir().item(item_id);
+ let def_id = item_id.def_id.to_def_id();
+
+ match opaque_ty.kind {
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
+ self.impl_trait_ty_to_ty(def_id, lifetimes, origin)
+ }
+ ref i => bug!("`impl Trait` pointed to non-opaque type?? {:#?}", i),
+ }
+ }
+ hir::TyKind::Path(hir::QPath::TypeRelative(ref qself, ref segment)) => {
+ debug!(?qself, ?segment);
+ let ty = self.ast_ty_to_ty_inner(qself, false, true);
+ self.associated_path_to_ty(ast_ty.hir_id, ast_ty.span, ty, qself, segment, false)
+ .map(|(ty, _, _)| ty)
+ .unwrap_or_else(|_| tcx.ty_error())
+ }
+ hir::TyKind::Path(hir::QPath::LangItem(lang_item, span, _)) => {
+ let def_id = tcx.require_lang_item(lang_item, Some(span));
+ let (substs, _) = self.create_substs_for_ast_path(
+ span,
+ def_id,
+ &[],
+ &hir::PathSegment::invalid(),
+ &GenericArgs::none(),
+ true,
+ None,
+ );
+ EarlyBinder(self.normalize_ty(span, tcx.at(span).type_of(def_id)))
+ .subst(tcx, substs)
+ }
+ hir::TyKind::Array(ref ty, ref length) => {
+ let length = match length {
+ &hir::ArrayLen::Infer(_, span) => self.ct_infer(tcx.types.usize, None, span),
+ hir::ArrayLen::Body(constant) => {
+ let length_def_id = tcx.hir().local_def_id(constant.hir_id);
+ ty::Const::from_anon_const(tcx, length_def_id)
+ }
+ };
+
+ let array_ty = tcx.mk_ty(ty::Array(self.ast_ty_to_ty(ty), length));
+ self.normalize_ty(ast_ty.span, array_ty)
+ }
+ hir::TyKind::Typeof(ref e) => {
+ let ty = tcx.type_of(tcx.hir().local_def_id(e.hir_id));
+ let span = ast_ty.span;
+ tcx.sess.emit_err(TypeofReservedKeywordUsed {
+ span,
+ ty,
+ opt_sugg: Some((span, Applicability::MachineApplicable))
+ .filter(|_| ty.is_suggestable(tcx, false)),
+ });
+
+ ty
+ }
+ hir::TyKind::Infer => {
+ // Infer also appears as the type of arguments or return
+ // values in an ExprKind::Closure, or as
+ // the type of local variables. Both of these cases are
+ // handled specially and will not descend into this routine.
+ self.ty_infer(None, ast_ty.span)
+ }
+ hir::TyKind::Err => tcx.ty_error(),
+ };
+
+ debug!(?result_ty);
+
+ self.record_ty(ast_ty.hir_id, result_ty, ast_ty.span);
+ result_ty
+ }
+
+ fn impl_trait_ty_to_ty(
+ &self,
+ def_id: DefId,
+ lifetimes: &[hir::GenericArg<'_>],
+ origin: OpaqueTyOrigin,
+ ) -> Ty<'tcx> {
+ debug!("impl_trait_ty_to_ty(def_id={:?}, lifetimes={:?})", def_id, lifetimes);
+ let tcx = self.tcx();
+
+ let generics = tcx.generics_of(def_id);
+
+ debug!("impl_trait_ty_to_ty: generics={:?}", generics);
+ let substs = InternalSubsts::for_item(tcx, def_id, |param, _| {
+ if let Some(i) = (param.index as usize).checked_sub(generics.parent_count) {
+ // Our own parameters are the resolved lifetimes.
+ if let GenericParamDefKind::Lifetime = param.kind {
+ if let hir::GenericArg::Lifetime(lifetime) = &lifetimes[i] {
+ self.ast_region_to_region(lifetime, None).into()
+ } else {
+ bug!()
+ }
+ } else {
+ bug!()
+ }
+ } else {
+ match param.kind {
+ // For RPIT (return position impl trait), only lifetimes
+ // mentioned in the impl Trait predicate are captured by
+ // the opaque type, so the lifetime parameters from the
+ // parent item need to be replaced with `'static`.
+ //
+ // For `impl Trait` in the types of statics, constants,
+ // locals and type aliases. These capture all parent
+ // lifetimes, so they can use their identity subst.
+ GenericParamDefKind::Lifetime
+ if matches!(
+ origin,
+ hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..)
+ ) =>
+ {
+ tcx.lifetimes.re_static.into()
+ }
+ _ => tcx.mk_param_from_def(param),
+ }
+ }
+ });
+ debug!("impl_trait_ty_to_ty: substs={:?}", substs);
+
+ let ty = tcx.mk_opaque(def_id, substs);
+ debug!("impl_trait_ty_to_ty: {}", ty);
+ ty
+ }
+
+ pub fn ty_of_arg(&self, ty: &hir::Ty<'_>, expected_ty: Option<Ty<'tcx>>) -> Ty<'tcx> {
+ match ty.kind {
+ hir::TyKind::Infer if expected_ty.is_some() => {
+ self.record_ty(ty.hir_id, expected_ty.unwrap(), ty.span);
+ expected_ty.unwrap()
+ }
+ _ => self.ast_ty_to_ty(ty),
+ }
+ }
+
+ pub fn ty_of_fn(
+ &self,
+ hir_id: hir::HirId,
+ unsafety: hir::Unsafety,
+ abi: abi::Abi,
+ decl: &hir::FnDecl<'_>,
+ generics: Option<&hir::Generics<'_>>,
+ hir_ty: Option<&hir::Ty<'_>>,
+ ) -> ty::PolyFnSig<'tcx> {
+ debug!("ty_of_fn");
+
+ let tcx = self.tcx();
+ let bound_vars = tcx.late_bound_vars(hir_id);
+ debug!(?bound_vars);
+
+ // We proactively collect all the inferred type params to emit a single error per fn def.
+ let mut visitor = HirPlaceholderCollector::default();
+ let mut infer_replacements = vec![];
+
+ if let Some(generics) = generics {
+ walk_generics(&mut visitor, generics);
+ }
+
+ let input_tys: Vec<_> = decl
+ .inputs
+ .iter()
+ .enumerate()
+ .map(|(i, a)| {
+ if let hir::TyKind::Infer = a.kind && !self.allow_ty_infer() {
+ if let Some(suggested_ty) =
+ self.suggest_trait_fn_ty_for_impl_fn_infer(hir_id, Some(i))
+ {
+ infer_replacements.push((a.span, suggested_ty.to_string()));
+ return suggested_ty;
+ }
+ }
+
+ // Only visit the type looking for `_` if we didn't fix the type above
+ visitor.visit_ty(a);
+ self.ty_of_arg(a, None)
+ })
+ .collect();
+
+ let output_ty = match decl.output {
+ hir::FnRetTy::Return(output) => {
+ if let hir::TyKind::Infer = output.kind
+ && !self.allow_ty_infer()
+ && let Some(suggested_ty) =
+ self.suggest_trait_fn_ty_for_impl_fn_infer(hir_id, None)
+ {
+ infer_replacements.push((output.span, suggested_ty.to_string()));
+ suggested_ty
+ } else {
+ visitor.visit_ty(output);
+ self.ast_ty_to_ty(output)
+ }
+ }
+ hir::FnRetTy::DefaultReturn(..) => tcx.mk_unit(),
+ };
+
+ debug!("ty_of_fn: output_ty={:?}", output_ty);
+
+ let fn_ty = tcx.mk_fn_sig(input_tys.into_iter(), output_ty, decl.c_variadic, unsafety, abi);
+ let bare_fn_ty = ty::Binder::bind_with_vars(fn_ty, bound_vars);
+
+ if !self.allow_ty_infer() && !(visitor.0.is_empty() && infer_replacements.is_empty()) {
+ // We always collect the spans for placeholder types when evaluating `fn`s, but we
+ // only want to emit an error complaining about them if infer types (`_`) are not
+ // allowed. `allow_ty_infer` gates this behavior. We check for the presence of
+ // `ident_span` to not emit an error twice when we have `fn foo(_: fn() -> _)`.
+
+ let mut diag = crate::collect::placeholder_type_error_diag(
+ tcx,
+ generics,
+ visitor.0,
+ infer_replacements.iter().map(|(s, _)| *s).collect(),
+ true,
+ hir_ty,
+ "function",
+ );
+
+ if !infer_replacements.is_empty() {
+ diag.multipart_suggestion(&format!(
+ "try replacing `_` with the type{} in the corresponding trait method signature",
+ rustc_errors::pluralize!(infer_replacements.len()),
+ ), infer_replacements, Applicability::MachineApplicable);
+ }
+
+ diag.emit();
+ }
+
+ // Find any late-bound regions declared in return type that do
+ // not appear in the arguments. These are not well-formed.
+ //
+ // Example:
+ // for<'a> fn() -> &'a str <-- 'a is bad
+ // for<'a> fn(&'a String) -> &'a str <-- 'a is ok
+ let inputs = bare_fn_ty.inputs();
+ let late_bound_in_args =
+ tcx.collect_constrained_late_bound_regions(&inputs.map_bound(|i| i.to_owned()));
+ let output = bare_fn_ty.output();
+ let late_bound_in_ret = tcx.collect_referenced_late_bound_regions(&output);
+
+ self.validate_late_bound_regions(late_bound_in_args, late_bound_in_ret, |br_name| {
+ struct_span_err!(
+ tcx.sess,
+ decl.output.span(),
+ E0581,
+ "return type references {}, which is not constrained by the fn input types",
+ br_name
+ )
+ });
+
+ bare_fn_ty
+ }
+
+ /// Given a fn_hir_id for a impl function, suggest the type that is found on the
+ /// corresponding function in the trait that the impl implements, if it exists.
+ /// If arg_idx is Some, then it corresponds to an input type index, otherwise it
+ /// corresponds to the return type.
+ fn suggest_trait_fn_ty_for_impl_fn_infer(
+ &self,
+ fn_hir_id: hir::HirId,
+ arg_idx: Option<usize>,
+ ) -> Option<Ty<'tcx>> {
+ let tcx = self.tcx();
+ let hir = tcx.hir();
+
+ let hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), ident, .. }) =
+ hir.get(fn_hir_id) else { return None };
+ let hir::Node::Item(hir::Item { kind: hir::ItemKind::Impl(i), .. }) =
+ hir.get(hir.get_parent_node(fn_hir_id)) else { bug!("ImplItem should have Impl parent") };
+
+ let trait_ref =
+ self.instantiate_mono_trait_ref(i.of_trait.as_ref()?, self.ast_ty_to_ty(i.self_ty));
+
+ let assoc = tcx.associated_items(trait_ref.def_id).find_by_name_and_kind(
+ tcx,
+ *ident,
+ ty::AssocKind::Fn,
+ trait_ref.def_id,
+ )?;
+
+ let fn_sig = tcx.bound_fn_sig(assoc.def_id).subst(
+ tcx,
+ trait_ref.substs.extend_to(tcx, assoc.def_id, |param, _| tcx.mk_param_from_def(param)),
+ );
+
+ let ty = if let Some(arg_idx) = arg_idx { fn_sig.input(arg_idx) } else { fn_sig.output() };
+
+ Some(tcx.liberate_late_bound_regions(fn_hir_id.expect_owner().to_def_id(), ty))
+ }
+
+ fn validate_late_bound_regions(
+ &self,
+ constrained_regions: FxHashSet<ty::BoundRegionKind>,
+ referenced_regions: FxHashSet<ty::BoundRegionKind>,
+ generate_err: impl Fn(&str) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ) {
+ for br in referenced_regions.difference(&constrained_regions) {
+ let br_name = match *br {
+ ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) | ty::BrEnv => {
+ "an anonymous lifetime".to_string()
+ }
+ ty::BrNamed(_, name) => format!("lifetime `{}`", name),
+ };
+
+ let mut err = generate_err(&br_name);
+
+ if let ty::BrNamed(_, kw::UnderscoreLifetime) | ty::BrAnon(_) = *br {
+ // The only way for an anonymous lifetime to wind up
+ // in the return type but **also** be unconstrained is
+ // if it only appears in "associated types" in the
+ // input. See #47511 and #62200 for examples. In this case,
+ // though we can easily give a hint that ought to be
+ // relevant.
+ err.note(
+ "lifetimes appearing in an associated type are not considered constrained",
+ );
+ }
+
+ err.emit();
+ }
+ }
+
+ /// Given the bounds on an object, determines what single region bound (if any) we can
+ /// use to summarize this type. The basic idea is that we will use the bound the user
+ /// provided, if they provided one, and otherwise search the supertypes of trait bounds
+ /// for region bounds. It may be that we can derive no bound at all, in which case
+ /// we return `None`.
+ fn compute_object_lifetime_bound(
+ &self,
+ span: Span,
+ existential_predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Option<ty::Region<'tcx>> // if None, use the default
+ {
+ let tcx = self.tcx();
+
+ debug!("compute_opt_region_bound(existential_predicates={:?})", existential_predicates);
+
+ // No explicit region bound specified. Therefore, examine trait
+ // bounds and see if we can derive region bounds from those.
+ let derived_region_bounds = object_region_bounds(tcx, existential_predicates);
+
+ // If there are no derived region bounds, then report back that we
+ // can find no region bound. The caller will use the default.
+ if derived_region_bounds.is_empty() {
+ return None;
+ }
+
+ // If any of the derived region bounds are 'static, that is always
+ // the best choice.
+ if derived_region_bounds.iter().any(|r| r.is_static()) {
+ return Some(tcx.lifetimes.re_static);
+ }
+
+ // Determine whether there is exactly one unique region in the set
+ // of derived region bounds. If so, use that. Otherwise, report an
+ // error.
+ let r = derived_region_bounds[0];
+ if derived_region_bounds[1..].iter().any(|r1| r != *r1) {
+ tcx.sess.emit_err(AmbiguousLifetimeBound { span });
+ }
+ Some(r)
+ }
+
+ /// Make sure that we are in the condition to suggest the blanket implementation.
+ fn maybe_lint_blanket_trait_impl<T: rustc_errors::EmissionGuarantee>(
+ &self,
+ self_ty: &hir::Ty<'_>,
+ diag: &mut DiagnosticBuilder<'_, T>,
+ ) {
+ let tcx = self.tcx();
+ let parent_id = tcx.hir().get_parent_item(self_ty.hir_id);
+ if let hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Impl(hir::Impl {
+ self_ty: impl_self_ty, of_trait: Some(of_trait_ref), generics, ..
+ }),
+ ..
+ }) = tcx.hir().get_by_def_id(parent_id) && self_ty.hir_id == impl_self_ty.hir_id
+ {
+ if !of_trait_ref.trait_def_id().map_or(false, |def_id| def_id.is_local()) {
+ return;
+ }
+ let of_trait_span = of_trait_ref.path.span;
+ // make sure that we are not calling unwrap to abort during the compilation
+ let Ok(impl_trait_name) = tcx.sess.source_map().span_to_snippet(self_ty.span) else { return; };
+ let Ok(of_trait_name) = tcx.sess.source_map().span_to_snippet(of_trait_span) else { return; };
+ // check if the trait has generics, to make a correct suggestion
+ let param_name = generics.params.next_type_param_name(None);
+
+ let add_generic_sugg = if let Some(span) = generics.span_for_param_suggestion() {
+ (span, format!(", {}: {}", param_name, impl_trait_name))
+ } else {
+ (generics.span, format!("<{}: {}>", param_name, impl_trait_name))
+ };
+ diag.multipart_suggestion(
+ format!("alternatively use a blanket \
+ implementation to implement `{of_trait_name}` for \
+ all types that also implement `{impl_trait_name}`"),
+ vec![
+ (self_ty.span, param_name),
+ add_generic_sugg,
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ fn maybe_lint_bare_trait(&self, self_ty: &hir::Ty<'_>, in_path: bool) {
+ let tcx = self.tcx();
+ if let hir::TyKind::TraitObject([poly_trait_ref, ..], _, TraitObjectSyntax::None) =
+ self_ty.kind
+ {
+ let needs_bracket = in_path
+ && !tcx
+ .sess
+ .source_map()
+ .span_to_prev_source(self_ty.span)
+ .ok()
+ .map_or(false, |s| s.trim_end().ends_with('<'));
+
+ let is_global = poly_trait_ref.trait_ref.path.is_global();
+ let sugg = Vec::from_iter([
+ (
+ self_ty.span.shrink_to_lo(),
+ format!(
+ "{}dyn {}",
+ if needs_bracket { "<" } else { "" },
+ if is_global { "(" } else { "" },
+ ),
+ ),
+ (
+ self_ty.span.shrink_to_hi(),
+ format!(
+ "{}{}",
+ if is_global { ")" } else { "" },
+ if needs_bracket { ">" } else { "" },
+ ),
+ ),
+ ]);
+ if self_ty.span.edition() >= Edition::Edition2021 {
+ let msg = "trait objects must include the `dyn` keyword";
+ let label = "add `dyn` keyword before this trait";
+ let mut diag =
+ rustc_errors::struct_span_err!(tcx.sess, self_ty.span, E0782, "{}", msg);
+ diag.multipart_suggestion_verbose(label, sugg, Applicability::MachineApplicable);
+ // check if the impl trait that we are considering is a impl of a local trait
+ self.maybe_lint_blanket_trait_impl(&self_ty, &mut diag);
+ diag.emit();
+ } else {
+ let msg = "trait objects without an explicit `dyn` are deprecated";
+ tcx.struct_span_lint_hir(
+ BARE_TRAIT_OBJECTS,
+ self_ty.hir_id,
+ self_ty.span,
+ |lint| {
+ let mut diag = lint.build(msg);
+ diag.multipart_suggestion_verbose(
+ "use `dyn`",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ self.maybe_lint_blanket_trait_impl::<()>(&self_ty, &mut diag);
+ diag.emit();
+ },
+ );
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/bounds.rs b/compiler/rustc_typeck/src/bounds.rs
new file mode 100644
index 000000000..6a28bb16a
--- /dev/null
+++ b/compiler/rustc_typeck/src/bounds.rs
@@ -0,0 +1,90 @@
+//! Bounds are restrictions applied to some types after they've been converted into the
+//! `ty` form from the HIR.
+
+use rustc_middle::ty::{self, ToPredicate, Ty, TyCtxt};
+use rustc_span::Span;
+
+/// Collects together a list of type bounds. These lists of bounds occur in many places
+/// in Rust's syntax:
+///
+/// ```text
+/// trait Foo: Bar + Baz { }
+/// ^^^^^^^^^ supertrait list bounding the `Self` type parameter
+///
+/// fn foo<T: Bar + Baz>() { }
+/// ^^^^^^^^^ bounding the type parameter `T`
+///
+/// impl dyn Bar + Baz
+/// ^^^^^^^^^ bounding the forgotten dynamic type
+/// ```
+///
+/// Our representation is a bit mixed here -- in some cases, we
+/// include the self type (e.g., `trait_bounds`) but in others we do not
+#[derive(Default, PartialEq, Eq, Clone, Debug)]
+pub struct Bounds<'tcx> {
+ /// A list of region bounds on the (implicit) self type. So if you
+ /// had `T: 'a + 'b` this might would be a list `['a, 'b]` (but
+ /// the `T` is not explicitly included).
+ pub region_bounds: Vec<(ty::Binder<'tcx, ty::Region<'tcx>>, Span)>,
+
+ /// A list of trait bounds. So if you had `T: Debug` this would be
+ /// `T: Debug`. Note that the self-type is explicit here.
+ pub trait_bounds: Vec<(ty::PolyTraitRef<'tcx>, Span, ty::BoundConstness)>,
+
+ /// A list of projection equality bounds. So if you had `T:
+ /// Iterator<Item = u32>` this would include `<T as
+ /// Iterator>::Item => u32`. Note that the self-type is explicit
+ /// here.
+ pub projection_bounds: Vec<(ty::PolyProjectionPredicate<'tcx>, Span)>,
+
+ /// `Some` if there is *no* `?Sized` predicate. The `span`
+ /// is the location in the source of the `T` declaration which can
+ /// be cited as the source of the `T: Sized` requirement.
+ pub implicitly_sized: Option<Span>,
+}
+
+impl<'tcx> Bounds<'tcx> {
+ /// Converts a bounds list into a flat set of predicates (like
+ /// where-clauses). Because some of our bounds listings (e.g.,
+ /// regions) don't include the self-type, you must supply the
+ /// self-type here (the `param_ty` parameter).
+ pub fn predicates<'out, 's>(
+ &'s self,
+ tcx: TyCtxt<'tcx>,
+ param_ty: Ty<'tcx>,
+ // the output must live shorter than the duration of the borrow of self and 'tcx.
+ ) -> impl Iterator<Item = (ty::Predicate<'tcx>, Span)> + 'out
+ where
+ 'tcx: 'out,
+ 's: 'out,
+ {
+ // If it could be sized, and is, add the `Sized` predicate.
+ let sized_predicate = self.implicitly_sized.and_then(|span| {
+ tcx.lang_items().sized_trait().map(move |sized| {
+ let trait_ref = ty::Binder::dummy(ty::TraitRef {
+ def_id: sized,
+ substs: tcx.mk_substs_trait(param_ty, &[]),
+ });
+ (trait_ref.without_const().to_predicate(tcx), span)
+ })
+ });
+
+ let region_preds = self.region_bounds.iter().map(move |&(region_bound, span)| {
+ let pred = region_bound
+ .map_bound(|region_bound| ty::OutlivesPredicate(param_ty, region_bound))
+ .to_predicate(tcx);
+ (pred, span)
+ });
+ let trait_bounds =
+ self.trait_bounds.iter().map(move |&(bound_trait_ref, span, constness)| {
+ let predicate = bound_trait_ref.with_constness(constness).to_predicate(tcx);
+ (predicate, span)
+ });
+ let projection_bounds = self
+ .projection_bounds
+ .iter()
+ .map(move |&(projection, span)| (projection.to_predicate(tcx), span));
+
+ sized_predicate.into_iter().chain(region_preds).chain(trait_bounds).chain(projection_bounds)
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/_match.rs b/compiler/rustc_typeck/src/check/_match.rs
new file mode 100644
index 000000000..1b13c98e4
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/_match.rs
@@ -0,0 +1,529 @@
+use crate::check::coercion::{AsCoercionSite, CoerceMany};
+use crate::check::{Diverges, Expectation, FnCtxt, Needs};
+use rustc_errors::{Applicability, MultiSpan};
+use rustc_hir::{self as hir, ExprKind};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::traits::Obligation;
+use rustc_middle::ty::{self, ToPredicate, Ty, TypeVisitable};
+use rustc_span::Span;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+use rustc_trait_selection::traits::{
+ IfExpressionCause, MatchExpressionArmCause, ObligationCause, ObligationCauseCode,
+};
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ #[instrument(skip(self), level = "debug")]
+ pub fn check_match(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ scrut: &'tcx hir::Expr<'tcx>,
+ arms: &'tcx [hir::Arm<'tcx>],
+ orig_expected: Expectation<'tcx>,
+ match_src: hir::MatchSource,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ let acrb = arms_contain_ref_bindings(arms);
+ let scrutinee_ty = self.demand_scrutinee_type(scrut, acrb, arms.is_empty());
+ debug!(?scrutinee_ty);
+
+ // If there are no arms, that is a diverging match; a special case.
+ if arms.is_empty() {
+ self.diverges.set(self.diverges.get() | Diverges::always(expr.span));
+ return tcx.types.never;
+ }
+
+ self.warn_arms_when_scrutinee_diverges(arms);
+
+ // Otherwise, we have to union together the types that the arms produce and so forth.
+ let scrut_diverges = self.diverges.replace(Diverges::Maybe);
+
+ // #55810: Type check patterns first so we get types for all bindings.
+ let scrut_span = scrut.span.find_ancestor_inside(expr.span).unwrap_or(scrut.span);
+ for arm in arms {
+ self.check_pat_top(&arm.pat, scrutinee_ty, Some(scrut_span), true);
+ }
+
+ // Now typecheck the blocks.
+ //
+ // The result of the match is the common supertype of all the
+ // arms. Start out the value as bottom, since it's the, well,
+ // bottom the type lattice, and we'll be moving up the lattice as
+ // we process each arm. (Note that any match with 0 arms is matching
+ // on any empty type and is therefore unreachable; should the flow
+ // of execution reach it, we will panic, so bottom is an appropriate
+ // type in that case)
+ let mut all_arms_diverge = Diverges::WarnedAlways;
+
+ let expected = orig_expected.adjust_for_branches(self);
+ debug!(?expected);
+
+ let mut coercion = {
+ let coerce_first = match expected {
+ // We don't coerce to `()` so that if the match expression is a
+ // statement it's branches can have any consistent type. That allows
+ // us to give better error messages (pointing to a usually better
+ // arm for inconsistent arms or to the whole match when a `()` type
+ // is required).
+ Expectation::ExpectHasType(ety) if ety != self.tcx.mk_unit() => ety,
+ _ => self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: expr.span,
+ }),
+ };
+ CoerceMany::with_coercion_sites(coerce_first, arms)
+ };
+
+ let mut other_arms = vec![]; // Used only for diagnostics.
+ let mut prior_arm = None;
+ for arm in arms {
+ if let Some(g) = &arm.guard {
+ self.diverges.set(Diverges::Maybe);
+ match g {
+ hir::Guard::If(e) => {
+ self.check_expr_has_type_or_error(e, tcx.types.bool, |_| {});
+ }
+ hir::Guard::IfLet(l) => {
+ self.check_expr_let(l);
+ }
+ };
+ }
+
+ self.diverges.set(Diverges::Maybe);
+
+ let arm_ty = self.check_expr_with_expectation(&arm.body, expected);
+ all_arms_diverge &= self.diverges.get();
+
+ let opt_suggest_box_span = self.opt_suggest_box_span(arm_ty, orig_expected);
+
+ let (arm_block_id, arm_span) = if let hir::ExprKind::Block(blk, _) = arm.body.kind {
+ (Some(blk.hir_id), self.find_block_span(blk))
+ } else {
+ (None, arm.body.span)
+ };
+
+ let (span, code) = match prior_arm {
+ // The reason for the first arm to fail is not that the match arms diverge,
+ // but rather that there's a prior obligation that doesn't hold.
+ None => (arm_span, ObligationCauseCode::BlockTailExpression(arm.body.hir_id)),
+ Some((prior_arm_block_id, prior_arm_ty, prior_arm_span)) => (
+ expr.span,
+ ObligationCauseCode::MatchExpressionArm(Box::new(MatchExpressionArmCause {
+ arm_block_id,
+ arm_span,
+ arm_ty,
+ prior_arm_block_id,
+ prior_arm_ty,
+ prior_arm_span,
+ scrut_span: scrut.span,
+ source: match_src,
+ prior_arms: other_arms.clone(),
+ scrut_hir_id: scrut.hir_id,
+ opt_suggest_box_span,
+ })),
+ ),
+ };
+ let cause = self.cause(span, code);
+
+ // This is the moral equivalent of `coercion.coerce(self, cause, arm.body, arm_ty)`.
+ // We use it this way to be able to expand on the potential error and detect when a
+ // `match` tail statement could be a tail expression instead. If so, we suggest
+ // removing the stray semicolon.
+ coercion.coerce_inner(
+ self,
+ &cause,
+ Some(&arm.body),
+ arm_ty,
+ Some(&mut |err| {
+ let Some(ret) = self.ret_type_span else {
+ return;
+ };
+ let Expectation::IsLast(stmt) = orig_expected else {
+ return
+ };
+ let can_coerce_to_return_ty = match self.ret_coercion.as_ref() {
+ Some(ret_coercion) if self.in_tail_expr => {
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let ret_ty = self.inh.infcx.shallow_resolve(ret_ty);
+ self.can_coerce(arm_ty, ret_ty)
+ && prior_arm.map_or(true, |(_, t, _)| self.can_coerce(t, ret_ty))
+ // The match arms need to unify for the case of `impl Trait`.
+ && !matches!(ret_ty.kind(), ty::Opaque(..))
+ }
+ _ => false,
+ };
+ if !can_coerce_to_return_ty {
+ return;
+ }
+
+ let semi_span = expr.span.shrink_to_hi().with_hi(stmt.hi());
+ let mut ret_span: MultiSpan = semi_span.into();
+ ret_span.push_span_label(
+ expr.span,
+ "this could be implicitly returned but it is a statement, not a \
+ tail expression",
+ );
+ ret_span
+ .push_span_label(ret, "the `match` arms can conform to this return type");
+ ret_span.push_span_label(
+ semi_span,
+ "the `match` is a statement because of this semicolon, consider \
+ removing it",
+ );
+ err.span_note(
+ ret_span,
+ "you might have meant to return the `match` expression",
+ );
+ err.tool_only_span_suggestion(
+ semi_span,
+ "remove this semicolon",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }),
+ false,
+ );
+
+ other_arms.push(arm_span);
+ if other_arms.len() > 5 {
+ other_arms.remove(0);
+ }
+
+ prior_arm = Some((arm_block_id, arm_ty, arm_span));
+ }
+
+ // If all of the arms in the `match` diverge,
+ // and we're dealing with an actual `match` block
+ // (as opposed to a `match` desugared from something else'),
+ // we can emit a better note. Rather than pointing
+ // at a diverging expression in an arbitrary arm,
+ // we can point at the entire `match` expression
+ if let (Diverges::Always { .. }, hir::MatchSource::Normal) = (all_arms_diverge, match_src) {
+ all_arms_diverge = Diverges::Always {
+ span: expr.span,
+ custom_note: Some(
+ "any code following this `match` expression is unreachable, as all arms diverge",
+ ),
+ };
+ }
+
+ // We won't diverge unless the scrutinee or all arms diverge.
+ self.diverges.set(scrut_diverges | all_arms_diverge);
+
+ let match_ty = coercion.complete(self);
+ debug!(?match_ty);
+ match_ty
+ }
+
+ /// When the previously checked expression (the scrutinee) diverges,
+ /// warn the user about the match arms being unreachable.
+ fn warn_arms_when_scrutinee_diverges(&self, arms: &'tcx [hir::Arm<'tcx>]) {
+ for arm in arms {
+ self.warn_if_unreachable(arm.body.hir_id, arm.body.span, "arm");
+ }
+ }
+
+ /// Handle the fallback arm of a desugared if(-let) like a missing else.
+ ///
+ /// Returns `true` if there was an error forcing the coercion to the `()` type.
+ pub(super) fn if_fallback_coercion<T>(
+ &self,
+ span: Span,
+ then_expr: &'tcx hir::Expr<'tcx>,
+ coercion: &mut CoerceMany<'tcx, '_, T>,
+ ) -> bool
+ where
+ T: AsCoercionSite,
+ {
+ // If this `if` expr is the parent's function return expr,
+ // the cause of the type coercion is the return type, point at it. (#25228)
+ let ret_reason = self.maybe_get_coercion_reason(then_expr.hir_id, span);
+ let cause = self.cause(span, ObligationCauseCode::IfExpressionWithNoElse);
+ let mut error = false;
+ coercion.coerce_forced_unit(
+ self,
+ &cause,
+ &mut |err| {
+ if let Some((span, msg)) = &ret_reason {
+ err.span_label(*span, msg);
+ } else if let ExprKind::Block(block, _) = &then_expr.kind
+ && let Some(expr) = &block.expr
+ {
+ err.span_label(expr.span, "found here");
+ }
+ err.note("`if` expressions without `else` evaluate to `()`");
+ err.help("consider adding an `else` block that evaluates to the expected type");
+ error = true;
+ },
+ ret_reason.is_none(),
+ );
+ error
+ }
+
+ fn maybe_get_coercion_reason(&self, hir_id: hir::HirId, sp: Span) -> Option<(Span, String)> {
+ let node = {
+ let rslt = self.tcx.hir().get_parent_node(self.tcx.hir().get_parent_node(hir_id));
+ self.tcx.hir().get(rslt)
+ };
+ if let hir::Node::Block(block) = node {
+ // check that the body's parent is an fn
+ let parent = self
+ .tcx
+ .hir()
+ .get(self.tcx.hir().get_parent_node(self.tcx.hir().get_parent_node(block.hir_id)));
+ if let (Some(expr), hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(..), .. })) =
+ (&block.expr, parent)
+ {
+ // check that the `if` expr without `else` is the fn body's expr
+ if expr.span == sp {
+ return self.get_fn_decl(hir_id).and_then(|(fn_decl, _)| {
+ let span = fn_decl.output.span();
+ let snippet = self.tcx.sess.source_map().span_to_snippet(span).ok()?;
+ Some((span, format!("expected `{snippet}` because of this return type")))
+ });
+ }
+ }
+ }
+ if let hir::Node::Local(hir::Local { ty: Some(_), pat, .. }) = node {
+ return Some((pat.span, "expected because of this assignment".to_string()));
+ }
+ None
+ }
+
+ pub(crate) fn if_cause(
+ &self,
+ span: Span,
+ cond_span: Span,
+ then_expr: &'tcx hir::Expr<'tcx>,
+ else_expr: &'tcx hir::Expr<'tcx>,
+ then_ty: Ty<'tcx>,
+ else_ty: Ty<'tcx>,
+ opt_suggest_box_span: Option<Span>,
+ ) -> ObligationCause<'tcx> {
+ let mut outer_span = if self.tcx.sess.source_map().is_multiline(span) {
+ // The `if`/`else` isn't in one line in the output, include some context to make it
+ // clear it is an if/else expression:
+ // ```
+ // LL | let x = if true {
+ // | _____________-
+ // LL || 10i32
+ // || ----- expected because of this
+ // LL || } else {
+ // LL || 10u32
+ // || ^^^^^ expected `i32`, found `u32`
+ // LL || };
+ // ||_____- `if` and `else` have incompatible types
+ // ```
+ Some(span)
+ } else {
+ // The entire expression is in one line, only point at the arms
+ // ```
+ // LL | let x = if true { 10i32 } else { 10u32 };
+ // | ----- ^^^^^ expected `i32`, found `u32`
+ // | |
+ // | expected because of this
+ // ```
+ None
+ };
+
+ let (error_sp, else_id) = if let ExprKind::Block(block, _) = &else_expr.kind {
+ let block = block.innermost_block();
+
+ // Avoid overlapping spans that aren't as readable:
+ // ```
+ // 2 | let x = if true {
+ // | _____________-
+ // 3 | | 3
+ // | | - expected because of this
+ // 4 | | } else {
+ // | |____________^
+ // 5 | ||
+ // 6 | || };
+ // | || ^
+ // | ||_____|
+ // | |______if and else have incompatible types
+ // | expected integer, found `()`
+ // ```
+ // by not pointing at the entire expression:
+ // ```
+ // 2 | let x = if true {
+ // | ------- `if` and `else` have incompatible types
+ // 3 | 3
+ // | - expected because of this
+ // 4 | } else {
+ // | ____________^
+ // 5 | |
+ // 6 | | };
+ // | |_____^ expected integer, found `()`
+ // ```
+ if block.expr.is_none() && block.stmts.is_empty()
+ && let Some(outer_span) = &mut outer_span
+ && let Some(cond_span) = cond_span.find_ancestor_inside(*outer_span)
+ {
+ *outer_span = outer_span.with_hi(cond_span.hi())
+ }
+
+ (self.find_block_span(block), block.hir_id)
+ } else {
+ (else_expr.span, else_expr.hir_id)
+ };
+
+ let then_id = if let ExprKind::Block(block, _) = &then_expr.kind {
+ let block = block.innermost_block();
+ // Exclude overlapping spans
+ if block.expr.is_none() && block.stmts.is_empty() {
+ outer_span = None;
+ }
+ block.hir_id
+ } else {
+ then_expr.hir_id
+ };
+
+ // Finally construct the cause:
+ self.cause(
+ error_sp,
+ ObligationCauseCode::IfExpression(Box::new(IfExpressionCause {
+ else_id,
+ then_id,
+ then_ty,
+ else_ty,
+ outer_span,
+ opt_suggest_box_span,
+ })),
+ )
+ }
+
+ pub(super) fn demand_scrutinee_type(
+ &self,
+ scrut: &'tcx hir::Expr<'tcx>,
+ contains_ref_bindings: Option<hir::Mutability>,
+ no_arms: bool,
+ ) -> Ty<'tcx> {
+ // Not entirely obvious: if matches may create ref bindings, we want to
+ // use the *precise* type of the scrutinee, *not* some supertype, as
+ // the "scrutinee type" (issue #23116).
+ //
+ // arielb1 [writes here in this comment thread][c] that there
+ // is certainly *some* potential danger, e.g., for an example
+ // like:
+ //
+ // [c]: https://github.com/rust-lang/rust/pull/43399#discussion_r130223956
+ //
+ // ```
+ // let Foo(x) = f()[0];
+ // ```
+ //
+ // Then if the pattern matches by reference, we want to match
+ // `f()[0]` as a lexpr, so we can't allow it to be
+ // coerced. But if the pattern matches by value, `f()[0]` is
+ // still syntactically a lexpr, but we *do* want to allow
+ // coercions.
+ //
+ // However, *likely* we are ok with allowing coercions to
+ // happen if there are no explicit ref mut patterns - all
+ // implicit ref mut patterns must occur behind a reference, so
+ // they will have the "correct" variance and lifetime.
+ //
+ // This does mean that the following pattern would be legal:
+ //
+ // ```
+ // struct Foo(Bar);
+ // struct Bar(u32);
+ // impl Deref for Foo {
+ // type Target = Bar;
+ // fn deref(&self) -> &Bar { &self.0 }
+ // }
+ // impl DerefMut for Foo {
+ // fn deref_mut(&mut self) -> &mut Bar { &mut self.0 }
+ // }
+ // fn foo(x: &mut Foo) {
+ // {
+ // let Bar(z): &mut Bar = x;
+ // *z = 42;
+ // }
+ // assert_eq!(foo.0.0, 42);
+ // }
+ // ```
+ //
+ // FIXME(tschottdorf): don't call contains_explicit_ref_binding, which
+ // is problematic as the HIR is being scraped, but ref bindings may be
+ // implicit after #42640. We need to make sure that pat_adjustments
+ // (once introduced) is populated by the time we get here.
+ //
+ // See #44848.
+ if let Some(m) = contains_ref_bindings {
+ self.check_expr_with_needs(scrut, Needs::maybe_mut_place(m))
+ } else if no_arms {
+ self.check_expr(scrut)
+ } else {
+ // ...but otherwise we want to use any supertype of the
+ // scrutinee. This is sort of a workaround, see note (*) in
+ // `check_pat` for some details.
+ let scrut_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: scrut.span,
+ });
+ self.check_expr_has_type_or_error(scrut, scrut_ty, |_| {});
+ scrut_ty
+ }
+ }
+
+ // When we have a `match` as a tail expression in a `fn` with a returned `impl Trait`
+ // we check if the different arms would work with boxed trait objects instead and
+ // provide a structured suggestion in that case.
+ pub(crate) fn opt_suggest_box_span(
+ &self,
+ outer_ty: Ty<'tcx>,
+ orig_expected: Expectation<'tcx>,
+ ) -> Option<Span> {
+ match orig_expected {
+ Expectation::ExpectHasType(expected)
+ if self.in_tail_expr
+ && self.ret_coercion.as_ref()?.borrow().merged_ty().has_opaque_types()
+ && self.can_coerce(outer_ty, expected) =>
+ {
+ let obligations = self.fulfillment_cx.borrow().pending_obligations();
+ let mut suggest_box = !obligations.is_empty();
+ for o in obligations {
+ match o.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(t) => {
+ let pred =
+ ty::Binder::dummy(ty::PredicateKind::Trait(ty::TraitPredicate {
+ trait_ref: ty::TraitRef {
+ def_id: t.def_id(),
+ substs: self.tcx.mk_substs_trait(outer_ty, &[]),
+ },
+ constness: t.constness,
+ polarity: t.polarity,
+ }));
+ let obl = Obligation::new(
+ o.cause.clone(),
+ self.param_env,
+ pred.to_predicate(self.tcx),
+ );
+ suggest_box &= self.predicate_must_hold_modulo_regions(&obl);
+ if !suggest_box {
+ // We've encountered some obligation that didn't hold, so the
+ // return expression can't just be boxed. We don't need to
+ // evaluate the rest of the obligations.
+ break;
+ }
+ }
+ _ => {}
+ }
+ }
+ // If all the obligations hold (or there are no obligations) the tail expression
+ // we can suggest to return a boxed trait object instead of an opaque type.
+ if suggest_box { self.ret_type_span } else { None }
+ }
+ _ => None,
+ }
+ }
+}
+
+fn arms_contain_ref_bindings<'tcx>(arms: &'tcx [hir::Arm<'tcx>]) -> Option<hir::Mutability> {
+ arms.iter().filter_map(|a| a.pat.contains_explicit_ref_binding()).max_by_key(|m| match *m {
+ hir::Mutability::Mut => 1,
+ hir::Mutability::Not => 0,
+ })
+}
diff --git a/compiler/rustc_typeck/src/check/autoderef.rs b/compiler/rustc_typeck/src/check/autoderef.rs
new file mode 100644
index 000000000..59c366ad7
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/autoderef.rs
@@ -0,0 +1,78 @@
+//! Some helper functions for `AutoDeref`
+use super::method::MethodCallee;
+use super::{FnCtxt, PlaceOp};
+
+use rustc_infer::infer::InferOk;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, OverloadedDeref};
+use rustc_middle::ty::{self, Ty};
+use rustc_span::Span;
+use rustc_trait_selection::autoderef::{Autoderef, AutoderefKind};
+
+use std::iter;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn autoderef(&'a self, span: Span, base_ty: Ty<'tcx>) -> Autoderef<'a, 'tcx> {
+ Autoderef::new(self, self.param_env, self.body_id, span, base_ty, span)
+ }
+
+ /// Like `autoderef`, but provides a custom `Span` to use for calls to
+ /// an overloaded `Deref` operator
+ pub fn autoderef_overloaded_span(
+ &'a self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ overloaded_span: Span,
+ ) -> Autoderef<'a, 'tcx> {
+ Autoderef::new(self, self.param_env, self.body_id, span, base_ty, overloaded_span)
+ }
+
+ pub fn try_overloaded_deref(
+ &self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ self.try_overloaded_place_op(span, base_ty, &[], PlaceOp::Deref)
+ }
+
+ /// Returns the adjustment steps.
+ pub fn adjust_steps(&self, autoderef: &Autoderef<'a, 'tcx>) -> Vec<Adjustment<'tcx>> {
+ self.register_infer_ok_obligations(self.adjust_steps_as_infer_ok(autoderef))
+ }
+
+ pub fn adjust_steps_as_infer_ok(
+ &self,
+ autoderef: &Autoderef<'a, 'tcx>,
+ ) -> InferOk<'tcx, Vec<Adjustment<'tcx>>> {
+ let mut obligations = vec![];
+ let steps = autoderef.steps();
+ let targets =
+ steps.iter().skip(1).map(|&(ty, _)| ty).chain(iter::once(autoderef.final_ty(false)));
+ let steps: Vec<_> = steps
+ .iter()
+ .map(|&(source, kind)| {
+ if let AutoderefKind::Overloaded = kind {
+ self.try_overloaded_deref(autoderef.span(), source).and_then(
+ |InferOk { value: method, obligations: o }| {
+ obligations.extend(o);
+ if let ty::Ref(region, _, mutbl) = *method.sig.output().kind() {
+ Some(OverloadedDeref {
+ region,
+ mutbl,
+ span: autoderef.overloaded_span(),
+ })
+ } else {
+ None
+ }
+ },
+ )
+ } else {
+ None
+ }
+ })
+ .zip(targets)
+ .map(|(autoderef, target)| Adjustment { kind: Adjust::Deref(autoderef), target })
+ .collect();
+
+ InferOk { obligations, value: steps }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/callee.rs b/compiler/rustc_typeck/src/check/callee.rs
new file mode 100644
index 000000000..75f5aced8
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/callee.rs
@@ -0,0 +1,675 @@
+use super::method::MethodCallee;
+use super::{Expectation, FnCtxt, TupleArgumentsFlag};
+use crate::type_error_struct;
+
+use rustc_errors::{struct_span_err, Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_hir::def::{self, Namespace, Res};
+use rustc_hir::def_id::DefId;
+use rustc_infer::{
+ infer,
+ traits::{self, Obligation},
+};
+use rustc_infer::{
+ infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind},
+ traits::ObligationCause,
+};
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
+};
+use rustc_middle::ty::subst::{Subst, SubstsRef};
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitable};
+use rustc_span::def_id::LocalDefId;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_target::spec::abi;
+use rustc_trait_selection::autoderef::Autoderef;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+
+use std::iter;
+
+/// Checks that it is legal to call methods of the trait corresponding
+/// to `trait_id` (this only cares about the trait, not the specific
+/// method that is called).
+pub fn check_legal_trait_for_method_call(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ receiver: Option<Span>,
+ expr_span: Span,
+ trait_id: DefId,
+) {
+ if tcx.lang_items().drop_trait() == Some(trait_id) {
+ let mut err = struct_span_err!(tcx.sess, span, E0040, "explicit use of destructor method");
+ err.span_label(span, "explicit destructor calls not allowed");
+
+ let (sp, suggestion) = receiver
+ .and_then(|s| tcx.sess.source_map().span_to_snippet(s).ok())
+ .filter(|snippet| !snippet.is_empty())
+ .map(|snippet| (expr_span, format!("drop({snippet})")))
+ .unwrap_or_else(|| (span, "drop".to_string()));
+
+ err.span_suggestion(
+ sp,
+ "consider using `drop` function",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+
+ err.emit();
+ }
+}
+
+enum CallStep<'tcx> {
+ Builtin(Ty<'tcx>),
+ DeferredClosure(LocalDefId, ty::FnSig<'tcx>),
+ /// E.g., enum variant constructors.
+ Overloaded(MethodCallee<'tcx>),
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn check_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let original_callee_ty = match &callee_expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(..) | hir::QPath::TypeRelative(..)) => self
+ .check_expr_with_expectation_and_args(
+ callee_expr,
+ Expectation::NoExpectation,
+ arg_exprs,
+ ),
+ _ => self.check_expr(callee_expr),
+ };
+
+ let expr_ty = self.structurally_resolved_type(call_expr.span, original_callee_ty);
+
+ let mut autoderef = self.autoderef(callee_expr.span, expr_ty);
+ let mut result = None;
+ while result.is_none() && autoderef.next().is_some() {
+ result = self.try_overloaded_call_step(call_expr, callee_expr, arg_exprs, &autoderef);
+ }
+ self.register_predicates(autoderef.into_obligations());
+
+ let output = match result {
+ None => {
+ // this will report an error since original_callee_ty is not a fn
+ self.confirm_builtin_call(
+ call_expr,
+ callee_expr,
+ original_callee_ty,
+ arg_exprs,
+ expected,
+ )
+ }
+
+ Some(CallStep::Builtin(callee_ty)) => {
+ self.confirm_builtin_call(call_expr, callee_expr, callee_ty, arg_exprs, expected)
+ }
+
+ Some(CallStep::DeferredClosure(def_id, fn_sig)) => {
+ self.confirm_deferred_closure_call(call_expr, arg_exprs, expected, def_id, fn_sig)
+ }
+
+ Some(CallStep::Overloaded(method_callee)) => {
+ self.confirm_overloaded_call(call_expr, arg_exprs, expected, method_callee)
+ }
+ };
+
+ // we must check that return type of called functions is WF:
+ self.register_wf_obligation(output.into(), call_expr.span, traits::WellFormed(None));
+
+ output
+ }
+
+ fn try_overloaded_call_step(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ autoderef: &Autoderef<'a, 'tcx>,
+ ) -> Option<CallStep<'tcx>> {
+ let adjusted_ty =
+ self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
+ debug!(
+ "try_overloaded_call_step(call_expr={:?}, adjusted_ty={:?})",
+ call_expr, adjusted_ty
+ );
+
+ // If the callee is a bare function or a closure, then we're all set.
+ match *adjusted_ty.kind() {
+ ty::FnDef(..) | ty::FnPtr(_) => {
+ let adjustments = self.adjust_steps(autoderef);
+ self.apply_adjustments(callee_expr, adjustments);
+ return Some(CallStep::Builtin(adjusted_ty));
+ }
+
+ ty::Closure(def_id, substs) => {
+ let def_id = def_id.expect_local();
+
+ // Check whether this is a call to a closure where we
+ // haven't yet decided on whether the closure is fn vs
+ // fnmut vs fnonce. If so, we have to defer further processing.
+ if self.closure_kind(substs).is_none() {
+ let closure_sig = substs.as_closure().sig();
+ let closure_sig = self.replace_bound_vars_with_fresh_vars(
+ call_expr.span,
+ infer::FnCall,
+ closure_sig,
+ );
+ let adjustments = self.adjust_steps(autoderef);
+ self.record_deferred_call_resolution(
+ def_id,
+ DeferredCallResolution {
+ call_expr,
+ callee_expr,
+ adjusted_ty,
+ adjustments,
+ fn_sig: closure_sig,
+ closure_substs: substs,
+ },
+ );
+ return Some(CallStep::DeferredClosure(def_id, closure_sig));
+ }
+ }
+
+ // Hack: we know that there are traits implementing Fn for &F
+ // where F:Fn and so forth. In the particular case of types
+ // like `x: &mut FnMut()`, if there is a call `x()`, we would
+ // normally translate to `FnMut::call_mut(&mut x, ())`, but
+ // that winds up requiring `mut x: &mut FnMut()`. A little
+ // over the top. The simplest fix by far is to just ignore
+ // this case and deref again, so we wind up with
+ // `FnMut::call_mut(&mut *x, ())`.
+ ty::Ref(..) if autoderef.step_count() == 0 => {
+ return None;
+ }
+
+ _ => {}
+ }
+
+ // Now, we look for the implementation of a Fn trait on the object's type.
+ // We first do it with the explicit instruction to look for an impl of
+ // `Fn<Tuple>`, with the tuple `Tuple` having an arity corresponding
+ // to the number of call parameters.
+ // If that fails (or_else branch), we try again without specifying the
+ // shape of the tuple (hence the None). This allows to detect an Fn trait
+ // is implemented, and use this information for diagnostic.
+ self.try_overloaded_call_traits(call_expr, adjusted_ty, Some(arg_exprs))
+ .or_else(|| self.try_overloaded_call_traits(call_expr, adjusted_ty, None))
+ .map(|(autoref, method)| {
+ let mut adjustments = self.adjust_steps(autoderef);
+ adjustments.extend(autoref);
+ self.apply_adjustments(callee_expr, adjustments);
+ CallStep::Overloaded(method)
+ })
+ }
+
+ fn try_overloaded_call_traits(
+ &self,
+ call_expr: &hir::Expr<'_>,
+ adjusted_ty: Ty<'tcx>,
+ opt_arg_exprs: Option<&'tcx [hir::Expr<'tcx>]>,
+ ) -> Option<(Option<Adjustment<'tcx>>, MethodCallee<'tcx>)> {
+ // Try the options that are least restrictive on the caller first.
+ for (opt_trait_def_id, method_name, borrow) in [
+ (self.tcx.lang_items().fn_trait(), Ident::with_dummy_span(sym::call), true),
+ (self.tcx.lang_items().fn_mut_trait(), Ident::with_dummy_span(sym::call_mut), true),
+ (self.tcx.lang_items().fn_once_trait(), Ident::with_dummy_span(sym::call_once), false),
+ ] {
+ let Some(trait_def_id) = opt_trait_def_id else { continue };
+
+ let opt_input_types = opt_arg_exprs.map(|arg_exprs| {
+ [self.tcx.mk_tup(arg_exprs.iter().map(|e| {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: e.span,
+ })
+ }))]
+ });
+ let opt_input_types = opt_input_types.as_ref().map(AsRef::as_ref);
+
+ if let Some(ok) = self.lookup_method_in_trait(
+ call_expr.span,
+ method_name,
+ trait_def_id,
+ adjusted_ty,
+ opt_input_types,
+ ) {
+ let method = self.register_infer_ok_obligations(ok);
+ let mut autoref = None;
+ if borrow {
+ // Check for &self vs &mut self in the method signature. Since this is either
+ // the Fn or FnMut trait, it should be one of those.
+ let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() else {
+ // The `fn`/`fn_mut` lang item is ill-formed, which should have
+ // caused an error elsewhere.
+ self.tcx
+ .sess
+ .delay_span_bug(call_expr.span, "input to call/call_mut is not a ref?");
+ return None;
+ };
+
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // For initial two-phase borrow
+ // deployment, conservatively omit
+ // overloaded function call ops.
+ allow_two_phase_borrow: AllowTwoPhase::No,
+ },
+ };
+ autoref = Some(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[0],
+ });
+ }
+ return Some((autoref, method));
+ }
+ }
+
+ None
+ }
+
+ /// Give appropriate suggestion when encountering `||{/* not callable */}()`, where the
+ /// likely intention is to call the closure, suggest `(||{})()`. (#55851)
+ fn identify_bad_closure_def_and_call(
+ &self,
+ err: &mut Diagnostic,
+ hir_id: hir::HirId,
+ callee_node: &hir::ExprKind<'_>,
+ callee_span: Span,
+ ) {
+ let hir = self.tcx.hir();
+ let parent_hir_id = hir.get_parent_node(hir_id);
+ let parent_node = hir.get(parent_hir_id);
+ if let (
+ hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, body, .. }),
+ ..
+ }),
+ hir::ExprKind::Block(..),
+ ) = (parent_node, callee_node)
+ {
+ let fn_decl_span = if hir.body(body).generator_kind
+ == Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Closure))
+ {
+ // Actually need to unwrap a few more layers of HIR to get to
+ // the _real_ closure...
+ let async_closure = hir.get_parent_node(hir.get_parent_node(parent_hir_id));
+ if let hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. }),
+ ..
+ }) = hir.get(async_closure)
+ {
+ fn_decl_span
+ } else {
+ return;
+ }
+ } else {
+ fn_decl_span
+ };
+
+ let start = fn_decl_span.shrink_to_lo();
+ let end = callee_span.shrink_to_hi();
+ err.multipart_suggestion(
+ "if you meant to create this closure and immediately call it, surround the \
+ closure with parentheses",
+ vec![(start, "(".to_string()), (end, ")".to_string())],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ /// Give appropriate suggestion when encountering `[("a", 0) ("b", 1)]`, where the
+ /// likely intention is to create an array containing tuples.
+ fn maybe_suggest_bad_array_definition(
+ &self,
+ err: &mut Diagnostic,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ ) -> bool {
+ let hir_id = self.tcx.hir().get_parent_node(call_expr.hir_id);
+ let parent_node = self.tcx.hir().get(hir_id);
+ if let (
+ hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Array(_), .. }),
+ hir::ExprKind::Tup(exp),
+ hir::ExprKind::Call(_, args),
+ ) = (parent_node, &callee_expr.kind, &call_expr.kind)
+ && args.len() == exp.len()
+ {
+ let start = callee_expr.span.shrink_to_hi();
+ err.span_suggestion(
+ start,
+ "consider separating array elements with a comma",
+ ",",
+ Applicability::MaybeIncorrect,
+ );
+ return true;
+ }
+ false
+ }
+
+ fn confirm_builtin_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ callee_ty: Ty<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let (fn_sig, def_id) = match *callee_ty.kind() {
+ ty::FnDef(def_id, subst) => {
+ let fn_sig = self.tcx.bound_fn_sig(def_id).subst(self.tcx, subst);
+
+ // Unit testing: function items annotated with
+ // `#[rustc_evaluate_where_clauses]` trigger special output
+ // to let us test the trait evaluation system.
+ if self.tcx.has_attr(def_id, sym::rustc_evaluate_where_clauses) {
+ let predicates = self.tcx.predicates_of(def_id);
+ let predicates = predicates.instantiate(self.tcx, subst);
+ for (predicate, predicate_span) in
+ predicates.predicates.iter().zip(&predicates.spans)
+ {
+ let obligation = Obligation::new(
+ ObligationCause::dummy_with_span(callee_expr.span),
+ self.param_env,
+ *predicate,
+ );
+ let result = self.evaluate_obligation(&obligation);
+ self.tcx
+ .sess
+ .struct_span_err(
+ callee_expr.span,
+ &format!("evaluate({:?}) = {:?}", predicate, result),
+ )
+ .span_label(*predicate_span, "predicate")
+ .emit();
+ }
+ }
+ (fn_sig, Some(def_id))
+ }
+ ty::FnPtr(sig) => (sig, None),
+ _ => {
+ let mut unit_variant = None;
+ if let hir::ExprKind::Path(qpath) = &callee_expr.kind
+ && let Res::Def(def::DefKind::Ctor(kind, def::CtorKind::Const), _)
+ = self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
+ // Only suggest removing parens if there are no arguments
+ && arg_exprs.is_empty()
+ {
+ let descr = match kind {
+ def::CtorOf::Struct => "struct",
+ def::CtorOf::Variant => "enum variant",
+ };
+ let removal_span =
+ callee_expr.span.shrink_to_hi().to(call_expr.span.shrink_to_hi());
+ unit_variant =
+ Some((removal_span, descr, rustc_hir_pretty::qpath_to_string(qpath)));
+ }
+
+ let callee_ty = self.resolve_vars_if_possible(callee_ty);
+ let mut err = type_error_struct!(
+ self.tcx.sess,
+ callee_expr.span,
+ callee_ty,
+ E0618,
+ "expected function, found {}",
+ match &unit_variant {
+ Some((_, kind, path)) => format!("{kind} `{path}`"),
+ None => format!("`{callee_ty}`"),
+ }
+ );
+
+ self.identify_bad_closure_def_and_call(
+ &mut err,
+ call_expr.hir_id,
+ &callee_expr.kind,
+ callee_expr.span,
+ );
+
+ if let Some((removal_span, kind, path)) = &unit_variant {
+ err.span_suggestion_verbose(
+ *removal_span,
+ &format!(
+ "`{path}` is a unit {kind}, and does not take parentheses to be constructed",
+ ),
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+
+ let mut inner_callee_path = None;
+ let def = match callee_expr.kind {
+ hir::ExprKind::Path(ref qpath) => {
+ self.typeck_results.borrow().qpath_res(qpath, callee_expr.hir_id)
+ }
+ hir::ExprKind::Call(ref inner_callee, _) => {
+ // If the call spans more than one line and the callee kind is
+ // itself another `ExprCall`, that's a clue that we might just be
+ // missing a semicolon (Issue #51055)
+ let call_is_multiline =
+ self.tcx.sess.source_map().is_multiline(call_expr.span);
+ if call_is_multiline {
+ err.span_suggestion(
+ callee_expr.span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ if let hir::ExprKind::Path(ref inner_qpath) = inner_callee.kind {
+ inner_callee_path = Some(inner_qpath);
+ self.typeck_results.borrow().qpath_res(inner_qpath, inner_callee.hir_id)
+ } else {
+ Res::Err
+ }
+ }
+ _ => Res::Err,
+ };
+
+ if !self.maybe_suggest_bad_array_definition(&mut err, call_expr, callee_expr) {
+ err.span_label(call_expr.span, "call expression requires function");
+ }
+
+ if let Some(span) = self.tcx.hir().res_span(def) {
+ let callee_ty = callee_ty.to_string();
+ let label = match (unit_variant, inner_callee_path) {
+ (Some((_, kind, path)), _) => Some(format!("{kind} `{path}` defined here")),
+ (_, Some(hir::QPath::Resolved(_, path))) => self
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(path.span)
+ .ok()
+ .map(|p| format!("`{p}` defined here returns `{callee_ty}`")),
+ _ => {
+ match def {
+ // Emit a different diagnostic for local variables, as they are not
+ // type definitions themselves, but rather variables *of* that type.
+ Res::Local(hir_id) => Some(format!(
+ "`{}` has type `{}`",
+ self.tcx.hir().name(hir_id),
+ callee_ty
+ )),
+ Res::Def(kind, def_id) if kind.ns() == Some(Namespace::ValueNS) => {
+ Some(format!(
+ "`{}` defined here",
+ self.tcx.def_path_str(def_id),
+ ))
+ }
+ _ => Some(format!("`{callee_ty}` defined here")),
+ }
+ }
+ };
+ if let Some(label) = label {
+ err.span_label(span, label);
+ }
+ }
+ err.emit();
+
+ // This is the "default" function signature, used in case of error.
+ // In that case, we check each argument against "error" in order to
+ // set up all the node type bindings.
+ (
+ ty::Binder::dummy(self.tcx.mk_fn_sig(
+ self.err_args(arg_exprs.len()).into_iter(),
+ self.tcx.ty_error(),
+ false,
+ hir::Unsafety::Normal,
+ abi::Abi::Rust,
+ )),
+ None,
+ )
+ }
+ };
+
+ // Replace any late-bound regions that appear in the function
+ // signature with region variables. We also have to
+ // renormalize the associated types at this point, since they
+ // previously appeared within a `Binder<>` and hence would not
+ // have been normalized before.
+ let fn_sig = self.replace_bound_vars_with_fresh_vars(call_expr.span, infer::FnCall, fn_sig);
+ let fn_sig = self.normalize_associated_types_in(call_expr.span, fn_sig);
+
+ // Call the generic checker.
+ let expected_arg_tys = self.expected_inputs_for_expected_output(
+ call_expr.span,
+ expected,
+ fn_sig.output(),
+ fn_sig.inputs(),
+ );
+ self.check_argument_types(
+ call_expr.span,
+ call_expr,
+ fn_sig.inputs(),
+ expected_arg_tys,
+ arg_exprs,
+ fn_sig.c_variadic,
+ TupleArgumentsFlag::DontTupleArguments,
+ def_id,
+ );
+
+ fn_sig.output()
+ }
+
+ fn confirm_deferred_closure_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ closure_def_id: LocalDefId,
+ fn_sig: ty::FnSig<'tcx>,
+ ) -> Ty<'tcx> {
+ // `fn_sig` is the *signature* of the closure being called. We
+ // don't know the full details yet (`Fn` vs `FnMut` etc), but we
+ // do know the types expected for each argument and the return
+ // type.
+
+ let expected_arg_tys = self.expected_inputs_for_expected_output(
+ call_expr.span,
+ expected,
+ fn_sig.output(),
+ fn_sig.inputs(),
+ );
+
+ self.check_argument_types(
+ call_expr.span,
+ call_expr,
+ fn_sig.inputs(),
+ expected_arg_tys,
+ arg_exprs,
+ fn_sig.c_variadic,
+ TupleArgumentsFlag::TupleArguments,
+ Some(closure_def_id.to_def_id()),
+ );
+
+ fn_sig.output()
+ }
+
+ fn confirm_overloaded_call(
+ &self,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ arg_exprs: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ method_callee: MethodCallee<'tcx>,
+ ) -> Ty<'tcx> {
+ let output_type = self.check_method_argument_types(
+ call_expr.span,
+ call_expr,
+ Ok(method_callee),
+ arg_exprs,
+ TupleArgumentsFlag::TupleArguments,
+ expected,
+ );
+
+ self.write_method_call(call_expr.hir_id, method_callee);
+ output_type
+ }
+}
+
+#[derive(Debug)]
+pub struct DeferredCallResolution<'tcx> {
+ call_expr: &'tcx hir::Expr<'tcx>,
+ callee_expr: &'tcx hir::Expr<'tcx>,
+ adjusted_ty: Ty<'tcx>,
+ adjustments: Vec<Adjustment<'tcx>>,
+ fn_sig: ty::FnSig<'tcx>,
+ closure_substs: SubstsRef<'tcx>,
+}
+
+impl<'a, 'tcx> DeferredCallResolution<'tcx> {
+ pub fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) {
+ debug!("DeferredCallResolution::resolve() {:?}", self);
+
+ // we should not be invoked until the closure kind has been
+ // determined by upvar inference
+ assert!(fcx.closure_kind(self.closure_substs).is_some());
+
+ // We may now know enough to figure out fn vs fnmut etc.
+ match fcx.try_overloaded_call_traits(self.call_expr, self.adjusted_ty, None) {
+ Some((autoref, method_callee)) => {
+ // One problem is that when we get here, we are going
+ // to have a newly instantiated function signature
+ // from the call trait. This has to be reconciled with
+ // the older function signature we had before. In
+ // principle we *should* be able to fn_sigs(), but we
+ // can't because of the annoying need for a TypeTrace.
+ // (This always bites me, should find a way to
+ // refactor it.)
+ let method_sig = method_callee.sig;
+
+ debug!("attempt_resolution: method_callee={:?}", method_callee);
+
+ for (method_arg_ty, self_arg_ty) in
+ iter::zip(method_sig.inputs().iter().skip(1), self.fn_sig.inputs())
+ {
+ fcx.demand_eqtype(self.call_expr.span, *self_arg_ty, *method_arg_ty);
+ }
+
+ fcx.demand_eqtype(self.call_expr.span, method_sig.output(), self.fn_sig.output());
+
+ let mut adjustments = self.adjustments;
+ adjustments.extend(autoref);
+ fcx.apply_adjustments(self.callee_expr, adjustments);
+
+ fcx.write_method_call(self.call_expr.hir_id, method_callee);
+ }
+ None => {
+ // This can happen if `#![no_core]` is used and the `fn/fn_mut/fn_once`
+ // lang items are not defined (issue #86238).
+ let mut err = fcx.inh.tcx.sess.struct_span_err(
+ self.call_expr.span,
+ "failed to find an overloaded call trait for closure call",
+ );
+ err.help(
+ "make sure the `fn`/`fn_mut`/`fn_once` lang items are defined \
+ and have associated `call`/`call_mut`/`call_once` functions",
+ );
+ err.emit();
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/cast.rs b/compiler/rustc_typeck/src/check/cast.rs
new file mode 100644
index 000000000..7aaddc2bd
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/cast.rs
@@ -0,0 +1,1072 @@
+//! Code for type-checking cast expressions.
+//!
+//! A cast `e as U` is valid if one of the following holds:
+//! * `e` has type `T` and `T` coerces to `U`; *coercion-cast*
+//! * `e` has type `*T`, `U` is `*U_0`, and either `U_0: Sized` or
+//! pointer_kind(`T`) = pointer_kind(`U_0`); *ptr-ptr-cast*
+//! * `e` has type `*T` and `U` is a numeric type, while `T: Sized`; *ptr-addr-cast*
+//! * `e` is an integer and `U` is `*U_0`, while `U_0: Sized`; *addr-ptr-cast*
+//! * `e` has type `T` and `T` and `U` are any numeric types; *numeric-cast*
+//! * `e` is a C-like enum and `U` is an integer type; *enum-cast*
+//! * `e` has type `bool` or `char` and `U` is an integer; *prim-int-cast*
+//! * `e` has type `u8` and `U` is `char`; *u8-char-cast*
+//! * `e` has type `&[T; n]` and `U` is `*const T`; *array-ptr-cast*
+//! * `e` is a function pointer type and `U` has type `*T`,
+//! while `T: Sized`; *fptr-ptr-cast*
+//! * `e` is a function pointer type and `U` is an integer; *fptr-addr-cast*
+//!
+//! where `&.T` and `*T` are references of either mutability,
+//! and where pointer_kind(`T`) is the kind of the unsize info
+//! in `T` - the vtable for a trait definition (e.g., `fmt::Display` or
+//! `Iterator`, not `Iterator<Item=u8>`) or a length (or `()` if `T: Sized`).
+//!
+//! Note that lengths are not adjusted when casting raw slices -
+//! `T: *const [u16] as *const [u8]` creates a slice that only includes
+//! half of the original memory.
+//!
+//! Casting is not transitive, that is, even if `e as U1 as U2` is a valid
+//! expression, `e as U2` is not necessarily so (in fact it will only be valid if
+//! `U1` coerces to `U2`).
+
+use super::FnCtxt;
+
+use crate::hir::def_id::DefId;
+use crate::type_error_struct;
+use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_middle::mir::Mutability;
+use rustc_middle::ty::adjustment::AllowTwoPhase;
+use rustc_middle::ty::cast::{CastKind, CastTy};
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, Ty, TypeAndMut, TypeVisitable};
+use rustc_session::lint;
+use rustc_session::Session;
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits;
+use rustc_trait_selection::traits::error_reporting::report_object_safety_error;
+
+/// Reifies a cast check to be checked once we have full type information for
+/// a function context.
+#[derive(Debug)]
+pub struct CastCheck<'tcx> {
+ expr: &'tcx hir::Expr<'tcx>,
+ expr_ty: Ty<'tcx>,
+ expr_span: Span,
+ cast_ty: Ty<'tcx>,
+ cast_span: Span,
+ span: Span,
+}
+
+/// The kind of pointer and associated metadata (thin, length or vtable) - we
+/// only allow casts between fat pointers if their metadata have the same
+/// kind.
+#[derive(Copy, Clone, PartialEq, Eq)]
+enum PointerKind<'tcx> {
+ /// No metadata attached, ie pointer to sized type or foreign type
+ Thin,
+ /// A trait object
+ VTable(Option<DefId>),
+ /// Slice
+ Length,
+ /// The unsize info of this projection
+ OfProjection(&'tcx ty::ProjectionTy<'tcx>),
+ /// The unsize info of this opaque ty
+ OfOpaque(DefId, SubstsRef<'tcx>),
+ /// The unsize info of this parameter
+ OfParam(&'tcx ty::ParamTy),
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Returns the kind of unsize information of t, or None
+ /// if t is unknown.
+ fn pointer_kind(
+ &self,
+ t: Ty<'tcx>,
+ span: Span,
+ ) -> Result<Option<PointerKind<'tcx>>, ErrorGuaranteed> {
+ debug!("pointer_kind({:?}, {:?})", t, span);
+
+ let t = self.resolve_vars_if_possible(t);
+
+ if let Some(reported) = t.error_reported() {
+ return Err(reported);
+ }
+
+ if self.type_is_known_to_be_sized_modulo_regions(t, span) {
+ return Ok(Some(PointerKind::Thin));
+ }
+
+ Ok(match *t.kind() {
+ ty::Slice(_) | ty::Str => Some(PointerKind::Length),
+ ty::Dynamic(ref tty, ..) => Some(PointerKind::VTable(tty.principal_def_id())),
+ ty::Adt(def, substs) if def.is_struct() => match def.non_enum_variant().fields.last() {
+ None => Some(PointerKind::Thin),
+ Some(f) => {
+ let field_ty = self.field_ty(span, f, substs);
+ self.pointer_kind(field_ty, span)?
+ }
+ },
+ ty::Tuple(fields) => match fields.last() {
+ None => Some(PointerKind::Thin),
+ Some(&f) => self.pointer_kind(f, span)?,
+ },
+
+ // Pointers to foreign types are thin, despite being unsized
+ ty::Foreign(..) => Some(PointerKind::Thin),
+ // We should really try to normalize here.
+ ty::Projection(ref pi) => Some(PointerKind::OfProjection(pi)),
+ ty::Opaque(def_id, substs) => Some(PointerKind::OfOpaque(def_id, substs)),
+ ty::Param(ref p) => Some(PointerKind::OfParam(p)),
+ // Insufficient type information.
+ ty::Placeholder(..) | ty::Bound(..) | ty::Infer(_) => None,
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(_)
+ | ty::Array(..)
+ | ty::GeneratorWitness(..)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::Adt(..)
+ | ty::Never
+ | ty::Error(_) => {
+ let reported = self
+ .tcx
+ .sess
+ .delay_span_bug(span, &format!("`{:?}` should be sized but is not?", t));
+ return Err(reported);
+ }
+ })
+ }
+}
+
+#[derive(Copy, Clone)]
+pub enum CastError {
+ ErrorGuaranteed,
+
+ CastToBool,
+ CastToChar,
+ DifferingKinds,
+ /// Cast of thin to fat raw ptr (e.g., `*const () as *const [u8]`).
+ SizedUnsizedCast,
+ IllegalCast,
+ NeedDeref,
+ NeedViaPtr,
+ NeedViaThinPtr,
+ NeedViaInt,
+ NonScalar,
+ UnknownExprPtrKind,
+ UnknownCastPtrKind,
+ /// Cast of int to (possibly) fat raw pointer.
+ ///
+ /// Argument is the specific name of the metadata in plain words, such as "a vtable"
+ /// or "a length". If this argument is None, then the metadata is unknown, for example,
+ /// when we're typechecking a type parameter with a ?Sized bound.
+ IntToFatCast(Option<&'static str>),
+}
+
+impl From<ErrorGuaranteed> for CastError {
+ fn from(_: ErrorGuaranteed) -> Self {
+ CastError::ErrorGuaranteed
+ }
+}
+
+fn make_invalid_casting_error<'a, 'tcx>(
+ sess: &'a Session,
+ span: Span,
+ expr_ty: Ty<'tcx>,
+ cast_ty: Ty<'tcx>,
+ fcx: &FnCtxt<'a, 'tcx>,
+) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ type_error_struct!(
+ sess,
+ span,
+ expr_ty,
+ E0606,
+ "casting `{}` as `{}` is invalid",
+ fcx.ty_to_string(expr_ty),
+ fcx.ty_to_string(cast_ty)
+ )
+}
+
+impl<'a, 'tcx> CastCheck<'tcx> {
+ pub fn new(
+ fcx: &FnCtxt<'a, 'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ expr_ty: Ty<'tcx>,
+ cast_ty: Ty<'tcx>,
+ cast_span: Span,
+ span: Span,
+ ) -> Result<CastCheck<'tcx>, ErrorGuaranteed> {
+ let expr_span = expr.span.find_ancestor_inside(span).unwrap_or(expr.span);
+ let check = CastCheck { expr, expr_ty, expr_span, cast_ty, cast_span, span };
+
+ // For better error messages, check for some obviously unsized
+ // cases now. We do a more thorough check at the end, once
+ // inference is more completely known.
+ match cast_ty.kind() {
+ ty::Dynamic(..) | ty::Slice(..) => {
+ let reported = check.report_cast_to_unsized_type(fcx);
+ Err(reported)
+ }
+ _ => Ok(check),
+ }
+ }
+
+ fn report_cast_error(&self, fcx: &FnCtxt<'a, 'tcx>, e: CastError) {
+ match e {
+ CastError::ErrorGuaranteed => {
+ // an error has already been reported
+ }
+ CastError::NeedDeref => {
+ let error_span = self.span;
+ let mut err = make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ );
+ let cast_ty = fcx.ty_to_string(self.cast_ty);
+ err.span_label(
+ error_span,
+ format!("cannot cast `{}` as `{}`", fcx.ty_to_string(self.expr_ty), cast_ty),
+ );
+ if let Ok(snippet) = fcx.sess().source_map().span_to_snippet(self.expr_span) {
+ err.span_suggestion(
+ self.expr_span,
+ "dereference the expression",
+ format!("*{}", snippet),
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_help(self.expr_span, "dereference the expression with `*`");
+ }
+ err.emit();
+ }
+ CastError::NeedViaThinPtr | CastError::NeedViaPtr => {
+ let mut err = make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ );
+ if self.cast_ty.is_integral() {
+ err.help(&format!(
+ "cast through {} first",
+ match e {
+ CastError::NeedViaPtr => "a raw pointer",
+ CastError::NeedViaThinPtr => "a thin pointer",
+ _ => bug!(),
+ }
+ ));
+ }
+ err.emit();
+ }
+ CastError::NeedViaInt => {
+ make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ )
+ .help(&format!(
+ "cast through {} first",
+ match e {
+ CastError::NeedViaInt => "an integer",
+ _ => bug!(),
+ }
+ ))
+ .emit();
+ }
+ CastError::IllegalCast => {
+ make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ )
+ .emit();
+ }
+ CastError::DifferingKinds => {
+ make_invalid_casting_error(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ self.cast_ty,
+ fcx,
+ )
+ .note("vtable kinds may not match")
+ .emit();
+ }
+ CastError::CastToBool => {
+ let mut err =
+ struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`");
+
+ if self.expr_ty.is_numeric() {
+ match fcx.tcx.sess.source_map().span_to_snippet(self.expr_span) {
+ Ok(snippet) => {
+ err.span_suggestion(
+ self.span,
+ "compare with zero instead",
+ format!("{snippet} != 0"),
+ Applicability::MachineApplicable,
+ );
+ }
+ Err(_) => {
+ err.span_help(self.span, "compare with zero instead");
+ }
+ }
+ } else {
+ err.span_label(self.span, "unsupported cast");
+ }
+
+ err.emit();
+ }
+ CastError::CastToChar => {
+ let mut err = type_error_struct!(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ E0604,
+ "only `u8` can be cast as `char`, not `{}`",
+ self.expr_ty
+ );
+ err.span_label(self.span, "invalid cast");
+ if self.expr_ty.is_numeric() {
+ if self.expr_ty == fcx.tcx.types.u32 {
+ match fcx.tcx.sess.source_map().span_to_snippet(self.expr.span) {
+ Ok(snippet) => err.span_suggestion(
+ self.span,
+ "try `char::from_u32` instead",
+ format!("char::from_u32({snippet})"),
+ Applicability::MachineApplicable,
+ ),
+
+ Err(_) => err.span_help(self.span, "try `char::from_u32` instead"),
+ };
+ } else if self.expr_ty == fcx.tcx.types.i8 {
+ err.span_help(self.span, "try casting from `u8` instead");
+ } else {
+ err.span_help(self.span, "try `char::from_u32` instead (via a `u32`)");
+ };
+ }
+ err.emit();
+ }
+ CastError::NonScalar => {
+ let mut err = type_error_struct!(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ E0605,
+ "non-primitive cast: `{}` as `{}`",
+ self.expr_ty,
+ fcx.ty_to_string(self.cast_ty)
+ );
+ let mut sugg = None;
+ let mut sugg_mutref = false;
+ if let ty::Ref(reg, cast_ty, mutbl) = *self.cast_ty.kind() {
+ if let ty::RawPtr(TypeAndMut { ty: expr_ty, .. }) = *self.expr_ty.kind()
+ && fcx
+ .try_coerce(
+ self.expr,
+ fcx.tcx.mk_ref(
+ fcx.tcx.lifetimes.re_erased,
+ TypeAndMut { ty: expr_ty, mutbl },
+ ),
+ self.cast_ty,
+ AllowTwoPhase::No,
+ None,
+ )
+ .is_ok()
+ {
+ sugg = Some((format!("&{}*", mutbl.prefix_str()), cast_ty == expr_ty));
+ } else if let ty::Ref(expr_reg, expr_ty, expr_mutbl) = *self.expr_ty.kind()
+ && expr_mutbl == Mutability::Not
+ && mutbl == Mutability::Mut
+ && fcx
+ .try_coerce(
+ self.expr,
+ fcx.tcx.mk_ref(
+ expr_reg,
+ TypeAndMut { ty: expr_ty, mutbl: Mutability::Mut },
+ ),
+ self.cast_ty,
+ AllowTwoPhase::No,
+ None,
+ )
+ .is_ok()
+ {
+ sugg_mutref = true;
+ }
+
+ if !sugg_mutref
+ && sugg == None
+ && fcx
+ .try_coerce(
+ self.expr,
+ fcx.tcx.mk_ref(reg, TypeAndMut { ty: self.expr_ty, mutbl }),
+ self.cast_ty,
+ AllowTwoPhase::No,
+ None,
+ )
+ .is_ok()
+ {
+ sugg = Some((format!("&{}", mutbl.prefix_str()), false));
+ }
+ } else if let ty::RawPtr(TypeAndMut { mutbl, .. }) = *self.cast_ty.kind()
+ && fcx
+ .try_coerce(
+ self.expr,
+ fcx.tcx.mk_ref(
+ fcx.tcx.lifetimes.re_erased,
+ TypeAndMut { ty: self.expr_ty, mutbl },
+ ),
+ self.cast_ty,
+ AllowTwoPhase::No,
+ None,
+ )
+ .is_ok()
+ {
+ sugg = Some((format!("&{}", mutbl.prefix_str()), false));
+ }
+ if sugg_mutref {
+ err.span_label(self.span, "invalid cast");
+ err.span_note(self.expr_span, "this reference is immutable");
+ err.span_note(self.cast_span, "trying to cast to a mutable reference type");
+ } else if let Some((sugg, remove_cast)) = sugg {
+ err.span_label(self.span, "invalid cast");
+
+ let has_parens = fcx
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(self.expr_span)
+ .map_or(false, |snip| snip.starts_with('('));
+
+ // Very crude check to see whether the expression must be wrapped
+ // in parentheses for the suggestion to work (issue #89497).
+ // Can/should be extended in the future.
+ let needs_parens =
+ !has_parens && matches!(self.expr.kind, hir::ExprKind::Cast(..));
+
+ let mut suggestion = vec![(self.expr_span.shrink_to_lo(), sugg)];
+ if needs_parens {
+ suggestion[0].1 += "(";
+ suggestion.push((self.expr_span.shrink_to_hi(), ")".to_string()));
+ }
+ if remove_cast {
+ suggestion.push((
+ self.expr_span.shrink_to_hi().to(self.cast_span),
+ String::new(),
+ ));
+ }
+
+ err.multipart_suggestion_verbose(
+ "consider borrowing the value",
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if !matches!(
+ self.cast_ty.kind(),
+ ty::FnDef(..) | ty::FnPtr(..) | ty::Closure(..)
+ ) {
+ let mut label = true;
+ // Check `impl From<self.expr_ty> for self.cast_ty {}` for accurate suggestion:
+ if let Ok(snippet) = fcx.tcx.sess.source_map().span_to_snippet(self.expr_span)
+ && let Some(from_trait) = fcx.tcx.get_diagnostic_item(sym::From)
+ {
+ let ty = fcx.resolve_vars_if_possible(self.cast_ty);
+ // Erase regions to avoid panic in `prove_value` when calling
+ // `type_implements_trait`.
+ let ty = fcx.tcx.erase_regions(ty);
+ let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty);
+ let expr_ty = fcx.tcx.erase_regions(expr_ty);
+ let ty_params = fcx.tcx.mk_substs_trait(expr_ty, &[]);
+ if fcx
+ .infcx
+ .type_implements_trait(from_trait, ty, ty_params, fcx.param_env)
+ .must_apply_modulo_regions()
+ {
+ label = false;
+ err.span_suggestion(
+ self.span,
+ "consider using the `From` trait instead",
+ format!("{}::from({})", self.cast_ty, snippet),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ let msg = "an `as` expression can only be used to convert between primitive \
+ types or to coerce to a specific trait object";
+ if label {
+ err.span_label(self.span, msg);
+ } else {
+ err.note(msg);
+ }
+ } else {
+ err.span_label(self.span, "invalid cast");
+ }
+ err.emit();
+ }
+ CastError::SizedUnsizedCast => {
+ use crate::structured_errors::{SizedUnsizedCast, StructuredDiagnostic};
+
+ SizedUnsizedCast {
+ sess: &fcx.tcx.sess,
+ span: self.span,
+ expr_ty: self.expr_ty,
+ cast_ty: fcx.ty_to_string(self.cast_ty),
+ }
+ .diagnostic()
+ .emit();
+ }
+ CastError::IntToFatCast(known_metadata) => {
+ let mut err = struct_span_err!(
+ fcx.tcx.sess,
+ self.cast_span,
+ E0606,
+ "cannot cast `{}` to a pointer that {} wide",
+ fcx.ty_to_string(self.expr_ty),
+ if known_metadata.is_some() { "is" } else { "may be" }
+ );
+
+ err.span_label(
+ self.cast_span,
+ format!(
+ "creating a `{}` requires both an address and {}",
+ self.cast_ty,
+ known_metadata.unwrap_or("type-specific metadata"),
+ ),
+ );
+
+ if fcx.tcx.sess.is_nightly_build() {
+ err.span_label(
+ self.expr_span,
+ "consider casting this expression to `*const ()`, \
+ then using `core::ptr::from_raw_parts`",
+ );
+ }
+
+ err.emit();
+ }
+ CastError::UnknownCastPtrKind | CastError::UnknownExprPtrKind => {
+ let unknown_cast_to = match e {
+ CastError::UnknownCastPtrKind => true,
+ CastError::UnknownExprPtrKind => false,
+ _ => bug!(),
+ };
+ let mut err = struct_span_err!(
+ fcx.tcx.sess,
+ if unknown_cast_to { self.cast_span } else { self.span },
+ E0641,
+ "cannot cast {} a pointer of an unknown kind",
+ if unknown_cast_to { "to" } else { "from" }
+ );
+ if unknown_cast_to {
+ err.span_label(self.cast_span, "needs more type information");
+ err.note(
+ "the type information given here is insufficient to check whether \
+ the pointer cast is valid",
+ );
+ } else {
+ err.span_label(
+ self.span,
+ "the type information given here is insufficient to check whether \
+ the pointer cast is valid",
+ );
+ }
+ err.emit();
+ }
+ }
+ }
+
+ fn report_cast_to_unsized_type(&self, fcx: &FnCtxt<'a, 'tcx>) -> ErrorGuaranteed {
+ if let Some(reported) =
+ self.cast_ty.error_reported().or_else(|| self.expr_ty.error_reported())
+ {
+ return reported;
+ }
+
+ let tstr = fcx.ty_to_string(self.cast_ty);
+ let mut err = type_error_struct!(
+ fcx.tcx.sess,
+ self.span,
+ self.expr_ty,
+ E0620,
+ "cast to unsized type: `{}` as `{}`",
+ fcx.resolve_vars_if_possible(self.expr_ty),
+ tstr
+ );
+ match self.expr_ty.kind() {
+ ty::Ref(_, _, mt) => {
+ let mtstr = mt.prefix_str();
+ if self.cast_ty.is_trait() {
+ match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) {
+ Ok(s) => {
+ err.span_suggestion(
+ self.cast_span,
+ "try casting to a reference instead",
+ format!("&{}{}", mtstr, s),
+ Applicability::MachineApplicable,
+ );
+ }
+ Err(_) => {
+ let msg = &format!("did you mean `&{}{}`?", mtstr, tstr);
+ err.span_help(self.cast_span, msg);
+ }
+ }
+ } else {
+ let msg =
+ &format!("consider using an implicit coercion to `&{mtstr}{tstr}` instead");
+ err.span_help(self.span, msg);
+ }
+ }
+ ty::Adt(def, ..) if def.is_box() => {
+ match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) {
+ Ok(s) => {
+ err.span_suggestion(
+ self.cast_span,
+ "you can cast to a `Box` instead",
+ format!("Box<{s}>"),
+ Applicability::MachineApplicable,
+ );
+ }
+ Err(_) => {
+ err.span_help(
+ self.cast_span,
+ &format!("you might have meant `Box<{tstr}>`"),
+ );
+ }
+ }
+ }
+ _ => {
+ err.span_help(self.expr_span, "consider using a box or reference as appropriate");
+ }
+ }
+ err.emit()
+ }
+
+ fn trivial_cast_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
+ let t_cast = self.cast_ty;
+ let t_expr = self.expr_ty;
+ let type_asc_or =
+ if fcx.tcx.features().type_ascription { "type ascription or " } else { "" };
+ let (adjective, lint) = if t_cast.is_numeric() && t_expr.is_numeric() {
+ ("numeric ", lint::builtin::TRIVIAL_NUMERIC_CASTS)
+ } else {
+ ("", lint::builtin::TRIVIAL_CASTS)
+ };
+ fcx.tcx.struct_span_lint_hir(lint, self.expr.hir_id, self.span, |err| {
+ err.build(&format!(
+ "trivial {}cast: `{}` as `{}`",
+ adjective,
+ fcx.ty_to_string(t_expr),
+ fcx.ty_to_string(t_cast)
+ ))
+ .help(&format!(
+ "cast can be replaced by coercion; this might \
+ require {type_asc_or}a temporary variable"
+ ))
+ .emit();
+ });
+ }
+
+ #[instrument(skip(fcx), level = "debug")]
+ pub fn check(mut self, fcx: &FnCtxt<'a, 'tcx>) {
+ self.expr_ty = fcx.structurally_resolved_type(self.expr_span, self.expr_ty);
+ self.cast_ty = fcx.structurally_resolved_type(self.cast_span, self.cast_ty);
+
+ debug!("check_cast({}, {:?} as {:?})", self.expr.hir_id, self.expr_ty, self.cast_ty);
+
+ if !fcx.type_is_known_to_be_sized_modulo_regions(self.cast_ty, self.span)
+ && !self.cast_ty.has_infer_types()
+ {
+ self.report_cast_to_unsized_type(fcx);
+ } else if self.expr_ty.references_error() || self.cast_ty.references_error() {
+ // No sense in giving duplicate error messages
+ } else {
+ match self.try_coercion_cast(fcx) {
+ Ok(()) => {
+ self.trivial_cast_lint(fcx);
+ debug!(" -> CoercionCast");
+ fcx.typeck_results.borrow_mut().set_coercion_cast(self.expr.hir_id.local_id);
+ }
+ Err(ty::error::TypeError::ObjectUnsafeCoercion(did)) => {
+ self.report_object_unsafe_cast(&fcx, did);
+ }
+ Err(_) => {
+ match self.do_check(fcx) {
+ Ok(k) => {
+ debug!(" -> {:?}", k);
+ }
+ Err(e) => self.report_cast_error(fcx, e),
+ };
+ }
+ };
+ }
+ }
+
+ fn report_object_unsafe_cast(&self, fcx: &FnCtxt<'a, 'tcx>, did: DefId) {
+ let violations = fcx.tcx.object_safety_violations(did);
+ let mut err = report_object_safety_error(fcx.tcx, self.cast_span, did, violations);
+ err.note(&format!("required by cast to type '{}'", fcx.ty_to_string(self.cast_ty)));
+ err.emit();
+ }
+
+ /// Checks a cast, and report an error if one exists. In some cases, this
+ /// can return Ok and create type errors in the fcx rather than returning
+ /// directly. coercion-cast is handled in check instead of here.
+ pub fn do_check(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<CastKind, CastError> {
+ use rustc_middle::ty::cast::CastTy::*;
+ use rustc_middle::ty::cast::IntTy::*;
+
+ let (t_from, t_cast) = match (CastTy::from_ty(self.expr_ty), CastTy::from_ty(self.cast_ty))
+ {
+ (Some(t_from), Some(t_cast)) => (t_from, t_cast),
+ // Function item types may need to be reified before casts.
+ (None, Some(t_cast)) => {
+ match *self.expr_ty.kind() {
+ ty::FnDef(..) => {
+ // Attempt a coercion to a fn pointer type.
+ let f = fcx.normalize_associated_types_in(
+ self.expr_span,
+ self.expr_ty.fn_sig(fcx.tcx),
+ );
+ let res = fcx.try_coerce(
+ self.expr,
+ self.expr_ty,
+ fcx.tcx.mk_fn_ptr(f),
+ AllowTwoPhase::No,
+ None,
+ );
+ if let Err(TypeError::IntrinsicCast) = res {
+ return Err(CastError::IllegalCast);
+ }
+ if res.is_err() {
+ return Err(CastError::NonScalar);
+ }
+ (FnPtr, t_cast)
+ }
+ // Special case some errors for references, and check for
+ // array-ptr-casts. `Ref` is not a CastTy because the cast
+ // is split into a coercion to a pointer type, followed by
+ // a cast.
+ ty::Ref(_, inner_ty, mutbl) => {
+ return match t_cast {
+ Int(_) | Float => match *inner_ty.kind() {
+ ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) => {
+ Err(CastError::NeedDeref)
+ }
+ _ => Err(CastError::NeedViaPtr),
+ },
+ // array-ptr-cast
+ Ptr(mt) => {
+ self.check_ref_cast(fcx, TypeAndMut { mutbl, ty: inner_ty }, mt)
+ }
+ _ => Err(CastError::NonScalar),
+ };
+ }
+ _ => return Err(CastError::NonScalar),
+ }
+ }
+ _ => return Err(CastError::NonScalar),
+ };
+
+ match (t_from, t_cast) {
+ // These types have invariants! can't cast into them.
+ (_, Int(CEnum) | FnPtr) => Err(CastError::NonScalar),
+
+ // * -> Bool
+ (_, Int(Bool)) => Err(CastError::CastToBool),
+
+ // * -> Char
+ (Int(U(ty::UintTy::U8)), Int(Char)) => Ok(CastKind::U8CharCast), // u8-char-cast
+ (_, Int(Char)) => Err(CastError::CastToChar),
+
+ // prim -> float,ptr
+ (Int(Bool) | Int(CEnum) | Int(Char), Float) => Err(CastError::NeedViaInt),
+
+ (Int(Bool) | Int(CEnum) | Int(Char) | Float, Ptr(_)) | (Ptr(_) | FnPtr, Float) => {
+ Err(CastError::IllegalCast)
+ }
+
+ // ptr -> *
+ (Ptr(m_e), Ptr(m_c)) => self.check_ptr_ptr_cast(fcx, m_e, m_c), // ptr-ptr-cast
+
+ // ptr-addr-cast
+ (Ptr(m_expr), Int(t_c)) => {
+ self.lossy_provenance_ptr2int_lint(fcx, t_c);
+ self.check_ptr_addr_cast(fcx, m_expr)
+ }
+ (FnPtr, Int(_)) => {
+ // FIXME(#95489): there should eventually be a lint for these casts
+ Ok(CastKind::FnPtrAddrCast)
+ }
+ // addr-ptr-cast
+ (Int(_), Ptr(mt)) => {
+ self.fuzzy_provenance_int2ptr_lint(fcx);
+ self.check_addr_ptr_cast(fcx, mt)
+ }
+ // fn-ptr-cast
+ (FnPtr, Ptr(mt)) => self.check_fptr_ptr_cast(fcx, mt),
+
+ // prim -> prim
+ (Int(CEnum), Int(_)) => {
+ self.cenum_impl_drop_lint(fcx);
+ Ok(CastKind::EnumCast)
+ }
+ (Int(Char) | Int(Bool), Int(_)) => Ok(CastKind::PrimIntCast),
+
+ (Int(_) | Float, Int(_) | Float) => Ok(CastKind::NumericCast),
+ }
+ }
+
+ fn check_ptr_ptr_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_expr: ty::TypeAndMut<'tcx>,
+ m_cast: ty::TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ debug!("check_ptr_ptr_cast m_expr={:?} m_cast={:?}", m_expr, m_cast);
+ // ptr-ptr cast. vtables must match.
+
+ let expr_kind = fcx.pointer_kind(m_expr.ty, self.span)?;
+ let cast_kind = fcx.pointer_kind(m_cast.ty, self.span)?;
+
+ let Some(cast_kind) = cast_kind else {
+ // We can't cast if target pointer kind is unknown
+ return Err(CastError::UnknownCastPtrKind);
+ };
+
+ // Cast to thin pointer is OK
+ if cast_kind == PointerKind::Thin {
+ return Ok(CastKind::PtrPtrCast);
+ }
+
+ let Some(expr_kind) = expr_kind else {
+ // We can't cast to fat pointer if source pointer kind is unknown
+ return Err(CastError::UnknownExprPtrKind);
+ };
+
+ // thin -> fat? report invalid cast (don't complain about vtable kinds)
+ if expr_kind == PointerKind::Thin {
+ return Err(CastError::SizedUnsizedCast);
+ }
+
+ // vtable kinds must match
+ if cast_kind == expr_kind {
+ Ok(CastKind::PtrPtrCast)
+ } else {
+ Err(CastError::DifferingKinds)
+ }
+ }
+
+ fn check_fptr_ptr_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_cast: ty::TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ // fptr-ptr cast. must be to thin ptr
+
+ match fcx.pointer_kind(m_cast.ty, self.span)? {
+ None => Err(CastError::UnknownCastPtrKind),
+ Some(PointerKind::Thin) => Ok(CastKind::FnPtrPtrCast),
+ _ => Err(CastError::IllegalCast),
+ }
+ }
+
+ fn check_ptr_addr_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_expr: ty::TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ // ptr-addr cast. must be from thin ptr
+
+ match fcx.pointer_kind(m_expr.ty, self.span)? {
+ None => Err(CastError::UnknownExprPtrKind),
+ Some(PointerKind::Thin) => Ok(CastKind::PtrAddrCast),
+ _ => Err(CastError::NeedViaThinPtr),
+ }
+ }
+
+ fn check_ref_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_expr: ty::TypeAndMut<'tcx>,
+ m_cast: ty::TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ // array-ptr-cast: allow mut-to-mut, mut-to-const, const-to-const
+ if m_expr.mutbl == hir::Mutability::Mut || m_cast.mutbl == hir::Mutability::Not {
+ if let ty::Array(ety, _) = m_expr.ty.kind() {
+ // Due to the limitations of LLVM global constants,
+ // region pointers end up pointing at copies of
+ // vector elements instead of the original values.
+ // To allow raw pointers to work correctly, we
+ // need to special-case obtaining a raw pointer
+ // from a region pointer to a vector.
+
+ // Coerce to a raw pointer so that we generate AddressOf in MIR.
+ let array_ptr_type = fcx.tcx.mk_ptr(m_expr);
+ fcx.try_coerce(self.expr, self.expr_ty, array_ptr_type, AllowTwoPhase::No, None)
+ .unwrap_or_else(|_| {
+ bug!(
+ "could not cast from reference to array to pointer to array ({:?} to {:?})",
+ self.expr_ty,
+ array_ptr_type,
+ )
+ });
+
+ // this will report a type mismatch if needed
+ fcx.demand_eqtype(self.span, *ety, m_cast.ty);
+ return Ok(CastKind::ArrayPtrCast);
+ }
+ }
+
+ Err(CastError::IllegalCast)
+ }
+
+ fn check_addr_ptr_cast(
+ &self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ m_cast: TypeAndMut<'tcx>,
+ ) -> Result<CastKind, CastError> {
+ // ptr-addr cast. pointer must be thin.
+ match fcx.pointer_kind(m_cast.ty, self.span)? {
+ None => Err(CastError::UnknownCastPtrKind),
+ Some(PointerKind::Thin) => Ok(CastKind::AddrPtrCast),
+ Some(PointerKind::VTable(_)) => Err(CastError::IntToFatCast(Some("a vtable"))),
+ Some(PointerKind::Length) => Err(CastError::IntToFatCast(Some("a length"))),
+ Some(
+ PointerKind::OfProjection(_)
+ | PointerKind::OfOpaque(_, _)
+ | PointerKind::OfParam(_),
+ ) => Err(CastError::IntToFatCast(None)),
+ }
+ }
+
+ fn try_coercion_cast(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<(), ty::error::TypeError<'tcx>> {
+ match fcx.try_coerce(self.expr, self.expr_ty, self.cast_ty, AllowTwoPhase::No, None) {
+ Ok(_) => Ok(()),
+ Err(err) => Err(err),
+ }
+ }
+
+ fn cenum_impl_drop_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
+ if let ty::Adt(d, _) = self.expr_ty.kind()
+ && d.has_dtor(fcx.tcx)
+ {
+ fcx.tcx.struct_span_lint_hir(
+ lint::builtin::CENUM_IMPL_DROP_CAST,
+ self.expr.hir_id,
+ self.span,
+ |err| {
+ err.build(&format!(
+ "cannot cast enum `{}` into integer `{}` because it implements `Drop`",
+ self.expr_ty, self.cast_ty
+ ))
+ .emit();
+ },
+ );
+ }
+ }
+
+ fn lossy_provenance_ptr2int_lint(&self, fcx: &FnCtxt<'a, 'tcx>, t_c: ty::cast::IntTy) {
+ fcx.tcx.struct_span_lint_hir(
+ lint::builtin::LOSSY_PROVENANCE_CASTS,
+ self.expr.hir_id,
+ self.span,
+ |err| {
+ let mut err = err.build(&format!(
+ "under strict provenance it is considered bad style to cast pointer `{}` to integer `{}`",
+ self.expr_ty, self.cast_ty
+ ));
+
+ let msg = "use `.addr()` to obtain the address of a pointer";
+
+ let expr_prec = self.expr.precedence().order();
+ let needs_parens = expr_prec < rustc_ast::util::parser::PREC_POSTFIX;
+
+ let scalar_cast = match t_c {
+ ty::cast::IntTy::U(ty::UintTy::Usize) => String::new(),
+ _ => format!(" as {}", self.cast_ty),
+ };
+
+ let cast_span = self.expr_span.shrink_to_hi().to(self.cast_span);
+
+ if needs_parens {
+ let suggestions = vec![
+ (self.expr_span.shrink_to_lo(), String::from("(")),
+ (cast_span, format!(").addr(){scalar_cast}")),
+ ];
+
+ err.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
+ } else {
+ err.span_suggestion(
+ cast_span,
+ msg,
+ format!(".addr(){scalar_cast}"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ err.help(
+ "if you can't comply with strict provenance and need to expose the pointer \
+ provenance you can use `.expose_addr()` instead"
+ );
+
+ err.emit();
+ },
+ );
+ }
+
+ fn fuzzy_provenance_int2ptr_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
+ fcx.tcx.struct_span_lint_hir(
+ lint::builtin::FUZZY_PROVENANCE_CASTS,
+ self.expr.hir_id,
+ self.span,
+ |err| {
+ let mut err = err.build(&format!(
+ "strict provenance disallows casting integer `{}` to pointer `{}`",
+ self.expr_ty, self.cast_ty
+ ));
+ let msg = "use `.with_addr()` to adjust a valid pointer in the same allocation, to this address";
+ let suggestions = vec![
+ (self.expr_span.shrink_to_lo(), String::from("(...).with_addr(")),
+ (self.expr_span.shrink_to_hi().to(self.cast_span), String::from(")")),
+ ];
+
+ err.multipart_suggestion(msg, suggestions, Applicability::MaybeIncorrect);
+ err.help(
+ "if you can't comply with strict provenance and don't have a pointer with \
+ the correct provenance you can use `std::ptr::from_exposed_addr()` instead"
+ );
+
+ err.emit();
+ },
+ );
+ }
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ fn type_is_known_to_be_sized_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
+ traits::type_known_to_meet_bound_modulo_regions(self, self.param_env, ty, lang_item, span)
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/check.rs b/compiler/rustc_typeck/src/check/check.rs
new file mode 100644
index 000000000..9c1fd9b30
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/check.rs
@@ -0,0 +1,1712 @@
+use crate::check::intrinsicck::InlineAsmCtxt;
+
+use super::coercion::CoerceMany;
+use super::compare_method::check_type_bounds;
+use super::compare_method::{compare_const_impl, compare_impl_method, compare_ty_impl};
+use super::*;
+use rustc_attr as attr;
+use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ItemKind, Node, PathSegment};
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{DefiningAnchor, RegionVariableOrigin, TyCtxtInferExt};
+use rustc_infer::traits::Obligation;
+use rustc_lint::builtin::REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS;
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::layout::{LayoutError, MAX_SIMD_LANES};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::util::{Discr, IntTypeExt};
+use rustc_middle::ty::{
+ self, ParamEnv, ToPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable,
+};
+use rustc_session::lint::builtin::{UNINHABITED_STATIC, UNSUPPORTED_CALLING_CONVENTIONS};
+use rustc_span::symbol::sym;
+use rustc_span::{self, Span};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCtxt};
+use rustc_ty_utils::representability::{self, Representability};
+
+use std::iter;
+use std::ops::ControlFlow;
+
+pub(super) fn check_abi(tcx: TyCtxt<'_>, hir_id: hir::HirId, span: Span, abi: Abi) {
+ match tcx.sess.target.is_abi_supported(abi) {
+ Some(true) => (),
+ Some(false) => {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0570,
+ "`{abi}` is not a supported ABI for the current target",
+ )
+ .emit();
+ }
+ None => {
+ tcx.struct_span_lint_hir(UNSUPPORTED_CALLING_CONVENTIONS, hir_id, span, |lint| {
+ lint.build("use of calling convention not supported on this target").emit();
+ });
+ }
+ }
+
+ // This ABI is only allowed on function pointers
+ if abi == Abi::CCmseNonSecureCall {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0781,
+ "the `\"C-cmse-nonsecure-call\"` ABI is only allowed on function pointers"
+ )
+ .emit();
+ }
+}
+
+/// Helper used for fns and closures. Does the grungy work of checking a function
+/// body and returns the function context used for that purpose, since in the case of a fn item
+/// there is still a bit more to do.
+///
+/// * ...
+/// * inherited: other fields inherited from the enclosing fn (if any)
+#[instrument(skip(inherited, body), level = "debug")]
+pub(super) fn check_fn<'a, 'tcx>(
+ inherited: &'a Inherited<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ fn_sig: ty::FnSig<'tcx>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ fn_id: hir::HirId,
+ body: &'tcx hir::Body<'tcx>,
+ can_be_generator: Option<hir::Movability>,
+ return_type_pre_known: bool,
+) -> (FnCtxt<'a, 'tcx>, Option<GeneratorTypes<'tcx>>) {
+ // Create the function context. This is either derived from scratch or,
+ // in the case of closures, based on the outer context.
+ let mut fcx = FnCtxt::new(inherited, param_env, body.value.hir_id);
+ fcx.ps.set(UnsafetyState::function(fn_sig.unsafety, fn_id));
+ fcx.return_type_pre_known = return_type_pre_known;
+
+ let tcx = fcx.tcx;
+ let hir = tcx.hir();
+
+ let declared_ret_ty = fn_sig.output();
+
+ let ret_ty =
+ fcx.register_infer_ok_obligations(fcx.infcx.replace_opaque_types_with_inference_vars(
+ declared_ret_ty,
+ body.value.hir_id,
+ decl.output.span(),
+ param_env,
+ ));
+ // If we replaced declared_ret_ty with infer vars, then we must be infering
+ // an opaque type, so set a flag so we can improve diagnostics.
+ fcx.return_type_has_opaque = ret_ty != declared_ret_ty;
+
+ fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(ret_ty)));
+ fcx.ret_type_span = Some(decl.output.span());
+
+ let span = body.value.span;
+
+ fn_maybe_err(tcx, span, fn_sig.abi);
+
+ if fn_sig.abi == Abi::RustCall {
+ let expected_args = if let ImplicitSelfKind::None = decl.implicit_self { 1 } else { 2 };
+
+ let err = || {
+ let item = match tcx.hir().get(fn_id) {
+ Node::Item(hir::Item { kind: ItemKind::Fn(header, ..), .. }) => Some(header),
+ Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(header, ..), ..
+ }) => Some(header),
+ Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(header, ..),
+ ..
+ }) => Some(header),
+ // Closures are RustCall, but they tuple their arguments, so shouldn't be checked
+ Node::Expr(hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => None,
+ node => bug!("Item being checked wasn't a function/closure: {:?}", node),
+ };
+
+ if let Some(header) = item {
+ tcx.sess.span_err(header.span, "functions with the \"rust-call\" ABI must take a single non-self argument that is a tuple");
+ }
+ };
+
+ if fn_sig.inputs().len() != expected_args {
+ err()
+ } else {
+ // FIXME(CraftSpider) Add a check on parameter expansion, so we don't just make the ICE happen later on
+ // This will probably require wide-scale changes to support a TupleKind obligation
+ // We can't resolve this without knowing the type of the param
+ if !matches!(fn_sig.inputs()[expected_args - 1].kind(), ty::Tuple(_) | ty::Param(_)) {
+ err()
+ }
+ }
+ }
+
+ if body.generator_kind.is_some() && can_be_generator.is_some() {
+ let yield_ty = fcx
+ .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span });
+ fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType);
+
+ // Resume type defaults to `()` if the generator has no argument.
+ let resume_ty = fn_sig.inputs().get(0).copied().unwrap_or_else(|| tcx.mk_unit());
+
+ fcx.resume_yield_tys = Some((resume_ty, yield_ty));
+ }
+
+ GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ // C-variadic fns also have a `VaList` input that's not listed in `fn_sig`
+ // (as it's created inside the body itself, not passed in from outside).
+ let maybe_va_list = if fn_sig.c_variadic {
+ let span = body.params.last().unwrap().span;
+ let va_list_did = tcx.require_lang_item(LangItem::VaList, Some(span));
+ let region = fcx.next_region_var(RegionVariableOrigin::MiscVariable(span));
+
+ Some(tcx.bound_type_of(va_list_did).subst(tcx, &[region.into()]))
+ } else {
+ None
+ };
+
+ // Add formal parameters.
+ let inputs_hir = hir.fn_decl_by_hir_id(fn_id).map(|decl| &decl.inputs);
+ let inputs_fn = fn_sig.inputs().iter().copied();
+ for (idx, (param_ty, param)) in inputs_fn.chain(maybe_va_list).zip(body.params).enumerate() {
+ // Check the pattern.
+ let ty_span = try { inputs_hir?.get(idx)?.span };
+ fcx.check_pat_top(&param.pat, param_ty, ty_span, false);
+
+ // Check that argument is Sized.
+ // The check for a non-trivial pattern is a hack to avoid duplicate warnings
+ // for simple cases like `fn foo(x: Trait)`,
+ // where we would error once on the parameter as a whole, and once on the binding `x`.
+ if param.pat.simple_ident().is_none() && !tcx.features().unsized_fn_params {
+ fcx.require_type_is_sized(param_ty, param.pat.span, traits::SizedArgumentType(ty_span));
+ }
+
+ fcx.write_ty(param.hir_id, param_ty);
+ }
+
+ inherited.typeck_results.borrow_mut().liberated_fn_sigs_mut().insert(fn_id, fn_sig);
+
+ fcx.in_tail_expr = true;
+ if let ty::Dynamic(..) = declared_ret_ty.kind() {
+ // FIXME: We need to verify that the return type is `Sized` after the return expression has
+ // been evaluated so that we have types available for all the nodes being returned, but that
+ // requires the coerced evaluated type to be stored. Moving `check_return_expr` before this
+ // causes unsized errors caused by the `declared_ret_ty` to point at the return expression,
+ // while keeping the current ordering we will ignore the tail expression's type because we
+ // don't know it yet. We can't do `check_expr_kind` while keeping `check_return_expr`
+ // because we will trigger "unreachable expression" lints unconditionally.
+ // Because of all of this, we perform a crude check to know whether the simplest `!Sized`
+ // case that a newcomer might make, returning a bare trait, and in that case we populate
+ // the tail expression's type so that the suggestion will be correct, but ignore all other
+ // possible cases.
+ fcx.check_expr(&body.value);
+ fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
+ } else {
+ fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
+ fcx.check_return_expr(&body.value, false);
+ }
+ fcx.in_tail_expr = false;
+
+ // We insert the deferred_generator_interiors entry after visiting the body.
+ // This ensures that all nested generators appear before the entry of this generator.
+ // resolve_generator_interiors relies on this property.
+ let gen_ty = if let (Some(_), Some(gen_kind)) = (can_be_generator, body.generator_kind) {
+ let interior = fcx
+ .next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span });
+ fcx.deferred_generator_interiors.borrow_mut().push((body.id(), interior, gen_kind));
+
+ let (resume_ty, yield_ty) = fcx.resume_yield_tys.unwrap();
+ Some(GeneratorTypes {
+ resume_ty,
+ yield_ty,
+ interior,
+ movability: can_be_generator.unwrap(),
+ })
+ } else {
+ None
+ };
+
+ // Finalize the return check by taking the LUB of the return types
+ // we saw and assigning it to the expected return type. This isn't
+ // really expected to fail, since the coercions would have failed
+ // earlier when trying to find a LUB.
+ let coercion = fcx.ret_coercion.take().unwrap().into_inner();
+ let mut actual_return_ty = coercion.complete(&fcx);
+ debug!("actual_return_ty = {:?}", actual_return_ty);
+ if let ty::Dynamic(..) = declared_ret_ty.kind() {
+ // We have special-cased the case where the function is declared
+ // `-> dyn Foo` and we don't actually relate it to the
+ // `fcx.ret_coercion`, so just substitute a type variable.
+ actual_return_ty =
+ fcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::DynReturnFn, span });
+ debug!("actual_return_ty replaced with {:?}", actual_return_ty);
+ }
+
+ // HACK(oli-obk, compiler-errors): We should be comparing this against
+ // `declared_ret_ty`, but then anything uninferred would be inferred to
+ // the opaque type itself. That again would cause writeback to assume
+ // we have a recursive call site and do the sadly stabilized fallback to `()`.
+ fcx.demand_suptype(span, ret_ty, actual_return_ty);
+
+ // Check that a function marked as `#[panic_handler]` has signature `fn(&PanicInfo) -> !`
+ if let Some(panic_impl_did) = tcx.lang_items().panic_impl()
+ && panic_impl_did == hir.local_def_id(fn_id).to_def_id()
+ {
+ check_panic_info_fn(tcx, panic_impl_did.expect_local(), fn_sig, decl, declared_ret_ty);
+ }
+
+ // Check that a function marked as `#[alloc_error_handler]` has signature `fn(Layout) -> !`
+ if let Some(alloc_error_handler_did) = tcx.lang_items().oom()
+ && alloc_error_handler_did == hir.local_def_id(fn_id).to_def_id()
+ {
+ check_alloc_error_fn(tcx, alloc_error_handler_did.expect_local(), fn_sig, decl, declared_ret_ty);
+ }
+
+ (fcx, gen_ty)
+}
+
+fn check_panic_info_fn(
+ tcx: TyCtxt<'_>,
+ fn_id: LocalDefId,
+ fn_sig: ty::FnSig<'_>,
+ decl: &hir::FnDecl<'_>,
+ declared_ret_ty: Ty<'_>,
+) {
+ let Some(panic_info_did) = tcx.lang_items().panic_info() else {
+ tcx.sess.err("language item required, but not found: `panic_info`");
+ return;
+ };
+
+ if *declared_ret_ty.kind() != ty::Never {
+ tcx.sess.span_err(decl.output.span(), "return type should be `!`");
+ }
+
+ let inputs = fn_sig.inputs();
+ if inputs.len() != 1 {
+ tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
+ return;
+ }
+
+ let arg_is_panic_info = match *inputs[0].kind() {
+ ty::Ref(region, ty, mutbl) => match *ty.kind() {
+ ty::Adt(ref adt, _) => {
+ adt.did() == panic_info_did && mutbl == hir::Mutability::Not && !region.is_static()
+ }
+ _ => false,
+ },
+ _ => false,
+ };
+
+ if !arg_is_panic_info {
+ tcx.sess.span_err(decl.inputs[0].span, "argument should be `&PanicInfo`");
+ }
+
+ let DefKind::Fn = tcx.def_kind(fn_id) else {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should be a function");
+ return;
+ };
+
+ let generic_counts = tcx.generics_of(fn_id).own_counts();
+ if generic_counts.types != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should have no type parameters");
+ }
+ if generic_counts.consts != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "should have no const parameters");
+ }
+}
+
+fn check_alloc_error_fn(
+ tcx: TyCtxt<'_>,
+ fn_id: LocalDefId,
+ fn_sig: ty::FnSig<'_>,
+ decl: &hir::FnDecl<'_>,
+ declared_ret_ty: Ty<'_>,
+) {
+ let Some(alloc_layout_did) = tcx.lang_items().alloc_layout() else {
+ tcx.sess.err("language item required, but not found: `alloc_layout`");
+ return;
+ };
+
+ if *declared_ret_ty.kind() != ty::Never {
+ tcx.sess.span_err(decl.output.span(), "return type should be `!`");
+ }
+
+ let inputs = fn_sig.inputs();
+ if inputs.len() != 1 {
+ tcx.sess.span_err(tcx.def_span(fn_id), "function should have one argument");
+ return;
+ }
+
+ let arg_is_alloc_layout = match inputs[0].kind() {
+ ty::Adt(ref adt, _) => adt.did() == alloc_layout_did,
+ _ => false,
+ };
+
+ if !arg_is_alloc_layout {
+ tcx.sess.span_err(decl.inputs[0].span, "argument should be `Layout`");
+ }
+
+ let DefKind::Fn = tcx.def_kind(fn_id) else {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "`#[alloc_error_handler]` should be a function");
+ return;
+ };
+
+ let generic_counts = tcx.generics_of(fn_id).own_counts();
+ if generic_counts.types != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess.span_err(span, "`#[alloc_error_handler]` function should have no type parameters");
+ }
+ if generic_counts.consts != 0 {
+ let span = tcx.def_span(fn_id);
+ tcx.sess
+ .span_err(span, "`#[alloc_error_handler]` function should have no const parameters");
+ }
+}
+
+fn check_struct(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let span = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+ check_representable(tcx, span, def_id);
+
+ if def.repr().simd() {
+ check_simd(tcx, span, def_id);
+ }
+
+ check_transparent(tcx, span, def);
+ check_packed(tcx, span, def);
+}
+
+fn check_union(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let span = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+ check_representable(tcx, span, def_id);
+ check_transparent(tcx, span, def);
+ check_union_fields(tcx, span, def_id);
+ check_packed(tcx, span, def);
+}
+
+/// Check that the fields of the `union` do not need dropping.
+fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> bool {
+ let item_type = tcx.type_of(item_def_id);
+ if let ty::Adt(def, substs) = item_type.kind() {
+ assert!(def.is_union());
+
+ fn allowed_union_field<'tcx>(
+ ty: Ty<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ span: Span,
+ ) -> bool {
+ // We don't just accept all !needs_drop fields, due to semver concerns.
+ match ty.kind() {
+ ty::Ref(..) => true, // references never drop (even mutable refs, which are non-Copy and hence fail the later check)
+ ty::Tuple(tys) => {
+ // allow tuples of allowed types
+ tys.iter().all(|ty| allowed_union_field(ty, tcx, param_env, span))
+ }
+ ty::Array(elem, _len) => {
+ // Like `Copy`, we do *not* special-case length 0.
+ allowed_union_field(*elem, tcx, param_env, span)
+ }
+ _ => {
+ // Fallback case: allow `ManuallyDrop` and things that are `Copy`.
+ ty.ty_adt_def().is_some_and(|adt_def| adt_def.is_manually_drop())
+ || ty.is_copy_modulo_regions(tcx.at(span), param_env)
+ }
+ }
+ }
+
+ let param_env = tcx.param_env(item_def_id);
+ for field in &def.non_enum_variant().fields {
+ let field_ty = field.ty(tcx, substs);
+
+ if !allowed_union_field(field_ty, tcx, param_env, span) {
+ let (field_span, ty_span) = match tcx.hir().get_if_local(field.did) {
+ // We are currently checking the type this field came from, so it must be local.
+ Some(Node::Field(field)) => (field.span, field.ty.span),
+ _ => unreachable!("mir field has to correspond to hir field"),
+ };
+ struct_span_err!(
+ tcx.sess,
+ field_span,
+ E0740,
+ "unions cannot contain fields that may need dropping"
+ )
+ .note(
+ "a type is guaranteed not to need dropping \
+ when it implements `Copy`, or when it is the special `ManuallyDrop<_>` type",
+ )
+ .multipart_suggestion_verbose(
+ "when the type does not implement `Copy`, \
+ wrap it inside a `ManuallyDrop<_>` and ensure it is manually dropped",
+ vec![
+ (ty_span.shrink_to_lo(), "std::mem::ManuallyDrop<".into()),
+ (ty_span.shrink_to_hi(), ">".into()),
+ ],
+ Applicability::MaybeIncorrect,
+ )
+ .emit();
+ return false;
+ } else if field_ty.needs_drop(tcx, param_env) {
+ // This should never happen. But we can get here e.g. in case of name resolution errors.
+ tcx.sess.delay_span_bug(span, "we should never accept maybe-dropping union fields");
+ }
+ }
+ } else {
+ span_bug!(span, "unions must be ty::Adt, but got {:?}", item_type.kind());
+ }
+ true
+}
+
+/// Check that a `static` is inhabited.
+fn check_static_inhabited<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) {
+ // Make sure statics are inhabited.
+ // Other parts of the compiler assume that there are no uninhabited places. In principle it
+ // would be enough to check this for `extern` statics, as statics with an initializer will
+ // have UB during initialization if they are uninhabited, but there also seems to be no good
+ // reason to allow any statics to be uninhabited.
+ let ty = tcx.type_of(def_id);
+ let span = tcx.def_span(def_id);
+ let layout = match tcx.layout_of(ParamEnv::reveal_all().and(ty)) {
+ Ok(l) => l,
+ // Foreign statics that overflow their allowed size should emit an error
+ Err(LayoutError::SizeOverflow(_))
+ if {
+ let node = tcx.hir().get_by_def_id(def_id);
+ matches!(
+ node,
+ hir::Node::ForeignItem(hir::ForeignItem {
+ kind: hir::ForeignItemKind::Static(..),
+ ..
+ })
+ )
+ } =>
+ {
+ tcx.sess
+ .struct_span_err(span, "extern static is too large for the current architecture")
+ .emit();
+ return;
+ }
+ // Generic statics are rejected, but we still reach this case.
+ Err(e) => {
+ tcx.sess.delay_span_bug(span, &e.to_string());
+ return;
+ }
+ };
+ if layout.abi.is_uninhabited() {
+ tcx.struct_span_lint_hir(
+ UNINHABITED_STATIC,
+ tcx.hir().local_def_id_to_hir_id(def_id),
+ span,
+ |lint| {
+ lint.build("static of uninhabited type")
+ .note("uninhabited statics cannot be initialized, and any access would be an immediate error")
+ .emit();
+ },
+ );
+ }
+}
+
+/// Checks that an opaque type does not contain cycles and does not use `Self` or `T::Foo`
+/// projections that would result in "inheriting lifetimes".
+pub(super) fn check_opaque<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ substs: SubstsRef<'tcx>,
+ origin: &hir::OpaqueTyOrigin,
+) {
+ let span = tcx.def_span(def_id);
+ check_opaque_for_inheriting_lifetimes(tcx, def_id, span);
+ if tcx.type_of(def_id).references_error() {
+ return;
+ }
+ if check_opaque_for_cycles(tcx, def_id, substs, span, origin).is_err() {
+ return;
+ }
+ check_opaque_meets_bounds(tcx, def_id, substs, span, origin);
+}
+
+/// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result
+/// in "inheriting lifetimes".
+#[instrument(level = "debug", skip(tcx, span))]
+pub(super) fn check_opaque_for_inheriting_lifetimes<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ span: Span,
+) {
+ let item = tcx.hir().expect_item(def_id);
+ debug!(?item, ?span);
+
+ struct FoundParentLifetime;
+ struct FindParentLifetimeVisitor<'tcx>(&'tcx ty::Generics);
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for FindParentLifetimeVisitor<'tcx> {
+ type BreakTy = FoundParentLifetime;
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!("FindParentLifetimeVisitor: r={:?}", r);
+ if let ty::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = *r {
+ if index < self.0.parent_count as u32 {
+ return ControlFlow::Break(FoundParentLifetime);
+ } else {
+ return ControlFlow::CONTINUE;
+ }
+ }
+
+ r.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ConstKind::Unevaluated(..) = c.kind() {
+ // FIXME(#72219) We currently don't detect lifetimes within substs
+ // which would violate this check. Even though the particular substitution is not used
+ // within the const, this should still be fixed.
+ return ControlFlow::CONTINUE;
+ }
+ c.super_visit_with(self)
+ }
+ }
+
+ struct ProhibitOpaqueVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ opaque_identity_ty: Ty<'tcx>,
+ generics: &'tcx ty::Generics,
+ selftys: Vec<(Span, Option<String>)>,
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
+ type BreakTy = Ty<'tcx>;
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!("check_opaque_for_inheriting_lifetimes: (visit_ty) t={:?}", t);
+ if t == self.opaque_identity_ty {
+ ControlFlow::CONTINUE
+ } else {
+ t.super_visit_with(&mut FindParentLifetimeVisitor(self.generics))
+ .map_break(|FoundParentLifetime| t)
+ }
+ }
+ }
+
+ impl<'tcx> Visitor<'tcx> for ProhibitOpaqueVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) {
+ match arg.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments {
+ [
+ PathSegment {
+ res: Some(Res::SelfTy { trait_: _, alias_to: impl_ref }),
+ ..
+ },
+ ] => {
+ let impl_ty_name =
+ impl_ref.map(|(def_id, _)| self.tcx.def_path_str(def_id));
+ self.selftys.push((path.span, impl_ty_name));
+ }
+ _ => {}
+ },
+ _ => {}
+ }
+ hir::intravisit::walk_ty(self, arg);
+ }
+ }
+
+ if let ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
+ ..
+ }) = item.kind
+ {
+ let mut visitor = ProhibitOpaqueVisitor {
+ opaque_identity_ty: tcx.mk_opaque(
+ def_id.to_def_id(),
+ InternalSubsts::identity_for_item(tcx, def_id.to_def_id()),
+ ),
+ generics: tcx.generics_of(def_id),
+ tcx,
+ selftys: vec![],
+ };
+ let prohibit_opaque = tcx
+ .explicit_item_bounds(def_id)
+ .iter()
+ .try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor));
+ debug!(
+ "check_opaque_for_inheriting_lifetimes: prohibit_opaque={:?}, visitor.opaque_identity_ty={:?}, visitor.generics={:?}",
+ prohibit_opaque, visitor.opaque_identity_ty, visitor.generics
+ );
+
+ if let Some(ty) = prohibit_opaque.break_value() {
+ visitor.visit_item(&item);
+ let is_async = match item.kind {
+ ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => {
+ matches!(origin, hir::OpaqueTyOrigin::AsyncFn(..))
+ }
+ _ => unreachable!(),
+ };
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0760,
+ "`{}` return type cannot contain a projection or `Self` that references lifetimes from \
+ a parent scope",
+ if is_async { "async fn" } else { "impl Trait" },
+ );
+
+ for (span, name) in visitor.selftys {
+ err.span_suggestion(
+ span,
+ "consider spelling out the type instead",
+ name.unwrap_or_else(|| format!("{:?}", ty)),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+ }
+}
+
+/// Checks that an opaque type does not contain cycles.
+pub(super) fn check_opaque_for_cycles<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ substs: SubstsRef<'tcx>,
+ span: Span,
+ origin: &hir::OpaqueTyOrigin,
+) -> Result<(), ErrorGuaranteed> {
+ if tcx.try_expand_impl_trait_type(def_id.to_def_id(), substs).is_err() {
+ let reported = match origin {
+ hir::OpaqueTyOrigin::AsyncFn(..) => async_opaque_type_cycle_error(tcx, span),
+ _ => opaque_type_cycle_error(tcx, def_id, span),
+ };
+ Err(reported)
+ } else {
+ Ok(())
+ }
+}
+
+/// Check that the concrete type behind `impl Trait` actually implements `Trait`.
+///
+/// This is mostly checked at the places that specify the opaque type, but we
+/// check those cases in the `param_env` of that function, which may have
+/// bounds not on this opaque type:
+///
+/// type X<T> = impl Clone
+/// fn f<T: Clone>(t: T) -> X<T> {
+/// t
+/// }
+///
+/// Without this check the above code is incorrectly accepted: we would ICE if
+/// some tried, for example, to clone an `Option<X<&mut ()>>`.
+#[instrument(level = "debug", skip(tcx))]
+fn check_opaque_meets_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ substs: SubstsRef<'tcx>,
+ span: Span,
+ origin: &hir::OpaqueTyOrigin,
+) {
+ let hidden_type = tcx.bound_type_of(def_id.to_def_id()).subst(tcx, substs);
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let defining_use_anchor = match *origin {
+ hir::OpaqueTyOrigin::FnReturn(did) | hir::OpaqueTyOrigin::AsyncFn(did) => did,
+ hir::OpaqueTyOrigin::TyAlias => def_id,
+ };
+ let param_env = tcx.param_env(defining_use_anchor);
+
+ tcx.infer_ctxt().with_opaque_type_inference(DefiningAnchor::Bind(defining_use_anchor)).enter(
+ move |infcx| {
+ let ocx = ObligationCtxt::new(&infcx);
+ let opaque_ty = tcx.mk_opaque(def_id.to_def_id(), substs);
+
+ let misc_cause = traits::ObligationCause::misc(span, hir_id);
+
+ match infcx.at(&misc_cause, param_env).eq(opaque_ty, hidden_type) {
+ Ok(infer_ok) => ocx.register_infer_ok_obligations(infer_ok),
+ Err(ty_err) => {
+ tcx.sess.delay_span_bug(
+ span,
+ &format!("could not unify `{hidden_type}` with revealed type:\n{ty_err}"),
+ );
+ }
+ }
+
+ // Additionally require the hidden type to be well-formed with only the generics of the opaque type.
+ // Defining use functions may have more bounds than the opaque type, which is ok, as long as the
+ // hidden type is well formed even without those bounds.
+ let predicate = ty::Binder::dummy(ty::PredicateKind::WellFormed(hidden_type.into()))
+ .to_predicate(tcx);
+ ocx.register_obligation(Obligation::new(misc_cause, param_env, predicate));
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ }
+ match origin {
+ // Checked when type checking the function containing them.
+ hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..) => {}
+ // Can have different predicates to their defining use
+ hir::OpaqueTyOrigin::TyAlias => {
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(
+ defining_use_anchor,
+ &outlives_environment,
+ );
+ }
+ }
+ // Clean up after ourselves
+ let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ },
+ );
+}
+
+fn check_item_type<'tcx>(tcx: TyCtxt<'tcx>, id: hir::ItemId) {
+ debug!(
+ "check_item_type(it.def_id={:?}, it.name={})",
+ id.def_id,
+ tcx.def_path_str(id.def_id.to_def_id())
+ );
+ let _indenter = indenter();
+ match tcx.def_kind(id.def_id) {
+ DefKind::Static(..) => {
+ tcx.ensure().typeck(id.def_id);
+ maybe_check_static_with_link_section(tcx, id.def_id);
+ check_static_inhabited(tcx, id.def_id);
+ }
+ DefKind::Const => {
+ tcx.ensure().typeck(id.def_id);
+ }
+ DefKind::Enum => {
+ let item = tcx.hir().item(id);
+ let hir::ItemKind::Enum(ref enum_definition, _) = item.kind else {
+ return;
+ };
+ check_enum(tcx, &enum_definition.variants, item.def_id);
+ }
+ DefKind::Fn => {} // entirely within check_item_body
+ DefKind::Impl => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::Impl(ref impl_) = it.kind else {
+ return;
+ };
+ debug!("ItemKind::Impl {} with id {:?}", it.ident, it.def_id);
+ if let Some(impl_trait_ref) = tcx.impl_trait_ref(it.def_id) {
+ check_impl_items_against_trait(
+ tcx,
+ it.span,
+ it.def_id,
+ impl_trait_ref,
+ &impl_.items,
+ );
+ check_on_unimplemented(tcx, it);
+ }
+ }
+ DefKind::Trait => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::Trait(_, _, _, _, ref items) = it.kind else {
+ return;
+ };
+ check_on_unimplemented(tcx, it);
+
+ for item in items.iter() {
+ let item = tcx.hir().trait_item(item.id);
+ match item.kind {
+ hir::TraitItemKind::Fn(ref sig, _) => {
+ let abi = sig.header.abi;
+ fn_maybe_err(tcx, item.ident.span, abi);
+ }
+ hir::TraitItemKind::Type(.., Some(default)) => {
+ let assoc_item = tcx.associated_item(item.def_id);
+ let trait_substs =
+ InternalSubsts::identity_for_item(tcx, it.def_id.to_def_id());
+ let _: Result<_, rustc_errors::ErrorGuaranteed> = check_type_bounds(
+ tcx,
+ assoc_item,
+ assoc_item,
+ default.span,
+ ty::TraitRef { def_id: it.def_id.to_def_id(), substs: trait_substs },
+ );
+ }
+ _ => {}
+ }
+ }
+ }
+ DefKind::Struct => {
+ check_struct(tcx, id.def_id);
+ }
+ DefKind::Union => {
+ check_union(tcx, id.def_id);
+ }
+ DefKind::OpaqueTy => {
+ let item = tcx.hir().item(id);
+ let hir::ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) = item.kind else {
+ return;
+ };
+ // HACK(jynelson): trying to infer the type of `impl trait` breaks documenting
+ // `async-std` (and `pub async fn` in general).
+ // Since rustdoc doesn't care about the concrete type behind `impl Trait`, just don't look at it!
+ // See https://github.com/rust-lang/rust/issues/75100
+ if !tcx.sess.opts.actually_rustdoc {
+ let substs = InternalSubsts::identity_for_item(tcx, item.def_id.to_def_id());
+ check_opaque(tcx, item.def_id, substs, &origin);
+ }
+ }
+ DefKind::TyAlias => {
+ let pty_ty = tcx.type_of(id.def_id);
+ let generics = tcx.generics_of(id.def_id);
+ check_type_params_are_used(tcx, &generics, pty_ty);
+ }
+ DefKind::ForeignMod => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::ForeignMod { abi, items } = it.kind else {
+ return;
+ };
+ check_abi(tcx, it.hir_id(), it.span, abi);
+
+ if abi == Abi::RustIntrinsic {
+ for item in items {
+ let item = tcx.hir().foreign_item(item.id);
+ intrinsic::check_intrinsic_type(tcx, item);
+ }
+ } else if abi == Abi::PlatformIntrinsic {
+ for item in items {
+ let item = tcx.hir().foreign_item(item.id);
+ intrinsic::check_platform_intrinsic_type(tcx, item);
+ }
+ } else {
+ for item in items {
+ let def_id = item.id.def_id;
+ let generics = tcx.generics_of(def_id);
+ let own_counts = generics.own_counts();
+ if generics.params.len() - own_counts.lifetimes != 0 {
+ let (kinds, kinds_pl, egs) = match (own_counts.types, own_counts.consts) {
+ (_, 0) => ("type", "types", Some("u32")),
+ // We don't specify an example value, because we can't generate
+ // a valid value for any type.
+ (0, _) => ("const", "consts", None),
+ _ => ("type or const", "types or consts", None),
+ };
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0044,
+ "foreign items may not have {kinds} parameters",
+ )
+ .span_label(item.span, &format!("can't have {kinds} parameters"))
+ .help(
+ // FIXME: once we start storing spans for type arguments, turn this
+ // into a suggestion.
+ &format!(
+ "replace the {} parameters with concrete {}{}",
+ kinds,
+ kinds_pl,
+ egs.map(|egs| format!(" like `{}`", egs)).unwrap_or_default(),
+ ),
+ )
+ .emit();
+ }
+
+ let item = tcx.hir().foreign_item(item.id);
+ match item.kind {
+ hir::ForeignItemKind::Fn(ref fn_decl, _, _) => {
+ require_c_abi_if_c_variadic(tcx, fn_decl, abi, item.span);
+ }
+ hir::ForeignItemKind::Static(..) => {
+ check_static_inhabited(tcx, def_id);
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+ DefKind::GlobalAsm => {
+ let it = tcx.hir().item(id);
+ let hir::ItemKind::GlobalAsm(asm) = it.kind else { span_bug!(it.span, "DefKind::GlobalAsm but got {:#?}", it) };
+ InlineAsmCtxt::new_global_asm(tcx).check_asm(asm, id.hir_id());
+ }
+ _ => {}
+ }
+}
+
+pub(super) fn check_on_unimplemented(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
+ // an error would be reported if this fails.
+ let _ = traits::OnUnimplementedDirective::of_item(tcx, item.def_id.to_def_id());
+}
+
+pub(super) fn check_specialization_validity<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def: &ty::TraitDef,
+ trait_item: &ty::AssocItem,
+ impl_id: DefId,
+ impl_item: &hir::ImplItemRef,
+) {
+ let Ok(ancestors) = trait_def.ancestors(tcx, impl_id) else { return };
+ let mut ancestor_impls = ancestors.skip(1).filter_map(|parent| {
+ if parent.is_from_trait() {
+ None
+ } else {
+ Some((parent, parent.item(tcx, trait_item.def_id)))
+ }
+ });
+
+ let opt_result = ancestor_impls.find_map(|(parent_impl, parent_item)| {
+ match parent_item {
+ // Parent impl exists, and contains the parent item we're trying to specialize, but
+ // doesn't mark it `default`.
+ Some(parent_item) if traits::impl_item_is_final(tcx, &parent_item) => {
+ Some(Err(parent_impl.def_id()))
+ }
+
+ // Parent impl contains item and makes it specializable.
+ Some(_) => Some(Ok(())),
+
+ // Parent impl doesn't mention the item. This means it's inherited from the
+ // grandparent. In that case, if parent is a `default impl`, inherited items use the
+ // "defaultness" from the grandparent, else they are final.
+ None => {
+ if tcx.impl_defaultness(parent_impl.def_id()).is_default() {
+ None
+ } else {
+ Some(Err(parent_impl.def_id()))
+ }
+ }
+ }
+ });
+
+ // If `opt_result` is `None`, we have only encountered `default impl`s that don't contain the
+ // item. This is allowed, the item isn't actually getting specialized here.
+ let result = opt_result.unwrap_or(Ok(()));
+
+ if let Err(parent_impl) = result {
+ report_forbidden_specialization(tcx, impl_item, parent_impl);
+ }
+}
+
+fn check_impl_items_against_trait<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ full_impl_span: Span,
+ impl_id: LocalDefId,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ impl_item_refs: &[hir::ImplItemRef],
+) {
+ // If the trait reference itself is erroneous (so the compilation is going
+ // to fail), skip checking the items here -- the `impl_item` table in `tcx`
+ // isn't populated for such impls.
+ if impl_trait_ref.references_error() {
+ return;
+ }
+
+ // Negative impls are not expected to have any items
+ match tcx.impl_polarity(impl_id) {
+ ty::ImplPolarity::Reservation | ty::ImplPolarity::Positive => {}
+ ty::ImplPolarity::Negative => {
+ if let [first_item_ref, ..] = impl_item_refs {
+ let first_item_span = tcx.hir().impl_item(first_item_ref.id).span;
+ struct_span_err!(
+ tcx.sess,
+ first_item_span,
+ E0749,
+ "negative impls cannot have any items"
+ )
+ .emit();
+ }
+ return;
+ }
+ }
+
+ let trait_def = tcx.trait_def(impl_trait_ref.def_id);
+
+ for impl_item in impl_item_refs {
+ let ty_impl_item = tcx.associated_item(impl_item.id.def_id);
+ let ty_trait_item = if let Some(trait_item_id) = ty_impl_item.trait_item_def_id {
+ tcx.associated_item(trait_item_id)
+ } else {
+ // Checked in `associated_item`.
+ tcx.sess.delay_span_bug(impl_item.span, "missing associated item in trait");
+ continue;
+ };
+ let impl_item_full = tcx.hir().impl_item(impl_item.id);
+ match impl_item_full.kind {
+ hir::ImplItemKind::Const(..) => {
+ // Find associated const definition.
+ compare_const_impl(
+ tcx,
+ &ty_impl_item,
+ impl_item.span,
+ &ty_trait_item,
+ impl_trait_ref,
+ );
+ }
+ hir::ImplItemKind::Fn(..) => {
+ let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
+ compare_impl_method(
+ tcx,
+ &ty_impl_item,
+ &ty_trait_item,
+ impl_trait_ref,
+ opt_trait_span,
+ );
+ }
+ hir::ImplItemKind::TyAlias(impl_ty) => {
+ let opt_trait_span = tcx.hir().span_if_local(ty_trait_item.def_id);
+ compare_ty_impl(
+ tcx,
+ &ty_impl_item,
+ impl_ty.span,
+ &ty_trait_item,
+ impl_trait_ref,
+ opt_trait_span,
+ );
+ }
+ }
+
+ check_specialization_validity(
+ tcx,
+ trait_def,
+ &ty_trait_item,
+ impl_id.to_def_id(),
+ impl_item,
+ );
+ }
+
+ if let Ok(ancestors) = trait_def.ancestors(tcx, impl_id.to_def_id()) {
+ // Check for missing items from trait
+ let mut missing_items = Vec::new();
+
+ let mut must_implement_one_of: Option<&[Ident]> =
+ trait_def.must_implement_one_of.as_deref();
+
+ for &trait_item_id in tcx.associated_item_def_ids(impl_trait_ref.def_id) {
+ let is_implemented = ancestors
+ .leaf_def(tcx, trait_item_id)
+ .map_or(false, |node_item| node_item.item.defaultness(tcx).has_value());
+
+ if !is_implemented && tcx.impl_defaultness(impl_id).is_final() {
+ missing_items.push(tcx.associated_item(trait_item_id));
+ }
+
+ if let Some(required_items) = &must_implement_one_of {
+ // true if this item is specifically implemented in this impl
+ let is_implemented_here = ancestors
+ .leaf_def(tcx, trait_item_id)
+ .map_or(false, |node_item| !node_item.defining_node.is_from_trait());
+
+ if is_implemented_here {
+ let trait_item = tcx.associated_item(trait_item_id);
+ if required_items.contains(&trait_item.ident(tcx)) {
+ must_implement_one_of = None;
+ }
+ }
+ }
+ }
+
+ if !missing_items.is_empty() {
+ missing_items_err(tcx, tcx.def_span(impl_id), &missing_items, full_impl_span);
+ }
+
+ if let Some(missing_items) = must_implement_one_of {
+ let attr_span = tcx
+ .get_attr(impl_trait_ref.def_id, sym::rustc_must_implement_one_of)
+ .map(|attr| attr.span);
+
+ missing_items_must_implement_one_of_err(
+ tcx,
+ tcx.def_span(impl_id),
+ missing_items,
+ attr_span,
+ );
+ }
+ }
+}
+
+/// Checks whether a type can be represented in memory. In particular, it
+/// identifies types that contain themselves without indirection through a
+/// pointer, which would mean their size is unbounded.
+pub(super) fn check_representable(tcx: TyCtxt<'_>, sp: Span, item_def_id: LocalDefId) -> bool {
+ let rty = tcx.type_of(item_def_id);
+
+ // Check that it is possible to represent this type. This call identifies
+ // (1) types that contain themselves and (2) types that contain a different
+ // recursive type. It is only necessary to throw an error on those that
+ // contain themselves. For case 2, there must be an inner type that will be
+ // caught by case 1.
+ match representability::ty_is_representable(tcx, rty, sp, None) {
+ Representability::SelfRecursive(spans) => {
+ recursive_type_with_infinite_size_error(tcx, item_def_id.to_def_id(), spans);
+ return false;
+ }
+ Representability::Representable | Representability::ContainsRecursive => (),
+ }
+ true
+}
+
+pub fn check_simd(tcx: TyCtxt<'_>, sp: Span, def_id: LocalDefId) {
+ let t = tcx.type_of(def_id);
+ if let ty::Adt(def, substs) = t.kind()
+ && def.is_struct()
+ {
+ let fields = &def.non_enum_variant().fields;
+ if fields.is_empty() {
+ struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
+ return;
+ }
+ let e = fields[0].ty(tcx, substs);
+ if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
+ struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous")
+ .span_label(sp, "SIMD elements must have the same type")
+ .emit();
+ return;
+ }
+
+ let len = if let ty::Array(_ty, c) = e.kind() {
+ c.try_eval_usize(tcx, tcx.param_env(def.did()))
+ } else {
+ Some(fields.len() as u64)
+ };
+ if let Some(len) = len {
+ if len == 0 {
+ struct_span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty").emit();
+ return;
+ } else if len > MAX_SIMD_LANES {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0075,
+ "SIMD vector cannot have more than {MAX_SIMD_LANES} elements",
+ )
+ .emit();
+ return;
+ }
+ }
+
+ // Check that we use types valid for use in the lanes of a SIMD "vector register"
+ // These are scalar types which directly match a "machine" type
+ // Yes: Integers, floats, "thin" pointers
+ // No: char, "fat" pointers, compound types
+ match e.kind() {
+ ty::Param(_) => (), // pass struct<T>(T, T, T, T) through, let monomorphization catch errors
+ ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_) => (), // struct(u8, u8, u8, u8) is ok
+ ty::Array(t, _) if matches!(t.kind(), ty::Param(_)) => (), // pass struct<T>([T; N]) through, let monomorphization catch errors
+ ty::Array(t, _clen)
+ if matches!(
+ t.kind(),
+ ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::RawPtr(_)
+ ) =>
+ { /* struct([f32; 4]) is ok */ }
+ _ => {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0077,
+ "SIMD vector element type should be a \
+ primitive scalar (integer/float/pointer) type"
+ )
+ .emit();
+ return;
+ }
+ }
+ }
+}
+
+pub(super) fn check_packed(tcx: TyCtxt<'_>, sp: Span, def: ty::AdtDef<'_>) {
+ let repr = def.repr();
+ if repr.packed() {
+ for attr in tcx.get_attrs(def.did(), sym::repr) {
+ for r in attr::parse_repr_attr(&tcx.sess, attr) {
+ if let attr::ReprPacked(pack) = r
+ && let Some(repr_pack) = repr.pack
+ && pack as u64 != repr_pack.bytes()
+ {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0634,
+ "type has conflicting packed representation hints"
+ )
+ .emit();
+ }
+ }
+ }
+ if repr.align.is_some() {
+ struct_span_err!(
+ tcx.sess,
+ sp,
+ E0587,
+ "type has conflicting packed and align representation hints"
+ )
+ .emit();
+ } else {
+ if let Some(def_spans) = check_packed_inner(tcx, def.did(), &mut vec![]) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0588,
+ "packed type cannot transitively contain a `#[repr(align)]` type"
+ );
+
+ err.span_note(
+ tcx.def_span(def_spans[0].0),
+ &format!(
+ "`{}` has a `#[repr(align)]` attribute",
+ tcx.item_name(def_spans[0].0)
+ ),
+ );
+
+ if def_spans.len() > 2 {
+ let mut first = true;
+ for (adt_def, span) in def_spans.iter().skip(1).rev() {
+ let ident = tcx.item_name(*adt_def);
+ err.span_note(
+ *span,
+ &if first {
+ format!(
+ "`{}` contains a field of type `{}`",
+ tcx.type_of(def.did()),
+ ident
+ )
+ } else {
+ format!("...which contains a field of type `{ident}`")
+ },
+ );
+ first = false;
+ }
+ }
+
+ err.emit();
+ }
+ }
+ }
+}
+
+pub(super) fn check_packed_inner(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ stack: &mut Vec<DefId>,
+) -> Option<Vec<(DefId, Span)>> {
+ if let ty::Adt(def, substs) = tcx.type_of(def_id).kind() {
+ if def.is_struct() || def.is_union() {
+ if def.repr().align.is_some() {
+ return Some(vec![(def.did(), DUMMY_SP)]);
+ }
+
+ stack.push(def_id);
+ for field in &def.non_enum_variant().fields {
+ if let ty::Adt(def, _) = field.ty(tcx, substs).kind()
+ && !stack.contains(&def.did())
+ && let Some(mut defs) = check_packed_inner(tcx, def.did(), stack)
+ {
+ defs.push((def.did(), field.ident(tcx).span));
+ return Some(defs);
+ }
+ }
+ stack.pop();
+ }
+ }
+
+ None
+}
+
+pub(super) fn check_transparent<'tcx>(tcx: TyCtxt<'tcx>, sp: Span, adt: ty::AdtDef<'tcx>) {
+ if !adt.repr().transparent() {
+ return;
+ }
+
+ if adt.is_union() && !tcx.features().transparent_unions {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::transparent_unions,
+ sp,
+ "transparent unions are unstable",
+ )
+ .emit();
+ }
+
+ if adt.variants().len() != 1 {
+ bad_variant_count(tcx, adt, sp, adt.did());
+ if adt.variants().is_empty() {
+ // Don't bother checking the fields. No variants (and thus no fields) exist.
+ return;
+ }
+ }
+
+ // For each field, figure out if it's known to be a ZST and align(1), with "known"
+ // respecting #[non_exhaustive] attributes.
+ let field_infos = adt.all_fields().map(|field| {
+ let ty = field.ty(tcx, InternalSubsts::identity_for_item(tcx, field.did));
+ let param_env = tcx.param_env(field.did);
+ let layout = tcx.layout_of(param_env.and(ty));
+ // We are currently checking the type this field came from, so it must be local
+ let span = tcx.hir().span_if_local(field.did).unwrap();
+ let zst = layout.map_or(false, |layout| layout.is_zst());
+ let align1 = layout.map_or(false, |layout| layout.align.abi.bytes() == 1);
+ if !zst {
+ return (span, zst, align1, None);
+ }
+
+ fn check_non_exhaustive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ t: Ty<'tcx>,
+ ) -> ControlFlow<(&'static str, DefId, SubstsRef<'tcx>, bool)> {
+ match t.kind() {
+ ty::Tuple(list) => list.iter().try_for_each(|t| check_non_exhaustive(tcx, t)),
+ ty::Array(ty, _) => check_non_exhaustive(tcx, *ty),
+ ty::Adt(def, subst) => {
+ if !def.did().is_local() {
+ let non_exhaustive = def.is_variant_list_non_exhaustive()
+ || def
+ .variants()
+ .iter()
+ .any(ty::VariantDef::is_field_list_non_exhaustive);
+ let has_priv = def.all_fields().any(|f| !f.vis.is_public());
+ if non_exhaustive || has_priv {
+ return ControlFlow::Break((
+ def.descr(),
+ def.did(),
+ subst,
+ non_exhaustive,
+ ));
+ }
+ }
+ def.all_fields()
+ .map(|field| field.ty(tcx, subst))
+ .try_for_each(|t| check_non_exhaustive(tcx, t))
+ }
+ _ => ControlFlow::Continue(()),
+ }
+ }
+
+ (span, zst, align1, check_non_exhaustive(tcx, ty).break_value())
+ });
+
+ let non_zst_fields = field_infos
+ .clone()
+ .filter_map(|(span, zst, _align1, _non_exhaustive)| if !zst { Some(span) } else { None });
+ let non_zst_count = non_zst_fields.clone().count();
+ if non_zst_count >= 2 {
+ bad_non_zero_sized_fields(tcx, adt, non_zst_count, non_zst_fields, sp);
+ }
+ let incompatible_zst_fields =
+ field_infos.clone().filter(|(_, _, _, opt)| opt.is_some()).count();
+ let incompat = incompatible_zst_fields + non_zst_count >= 2 && non_zst_count < 2;
+ for (span, zst, align1, non_exhaustive) in field_infos {
+ if zst && !align1 {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0691,
+ "zero-sized field in transparent {} has alignment larger than 1",
+ adt.descr(),
+ )
+ .span_label(span, "has alignment larger than 1")
+ .emit();
+ }
+ if incompat && let Some((descr, def_id, substs, non_exhaustive)) = non_exhaustive {
+ tcx.struct_span_lint_hir(
+ REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
+ tcx.hir().local_def_id_to_hir_id(adt.did().expect_local()),
+ span,
+ |lint| {
+ let note = if non_exhaustive {
+ "is marked with `#[non_exhaustive]`"
+ } else {
+ "contains private fields"
+ };
+ let field_ty = tcx.def_path_str_with_substs(def_id, substs);
+ lint.build("zero-sized fields in repr(transparent) cannot contain external non-exhaustive types")
+ .note(format!("this {descr} contains `{field_ty}`, which {note}, \
+ and makes it not a breaking change to become non-zero-sized in the future."))
+ .emit();
+ },
+ )
+ }
+ }
+}
+
+#[allow(trivial_numeric_casts)]
+fn check_enum<'tcx>(tcx: TyCtxt<'tcx>, vs: &'tcx [hir::Variant<'tcx>], def_id: LocalDefId) {
+ let def = tcx.adt_def(def_id);
+ let sp = tcx.def_span(def_id);
+ def.destructor(tcx); // force the destructor to be evaluated
+
+ if vs.is_empty() {
+ if let Some(attr) = tcx.get_attr(def_id.to_def_id(), sym::repr) {
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0084,
+ "unsupported representation for zero-variant enum"
+ )
+ .span_label(sp, "zero-variant enum")
+ .emit();
+ }
+ }
+
+ let repr_type_ty = def.repr().discr_type().to_ty(tcx);
+ if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 {
+ if !tcx.features().repr128 {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::repr128,
+ sp,
+ "repr with 128-bit type is unstable",
+ )
+ .emit();
+ }
+ }
+
+ for v in vs {
+ if let Some(ref e) = v.disr_expr {
+ tcx.ensure().typeck(tcx.hir().local_def_id(e.hir_id));
+ }
+ }
+
+ if tcx.adt_def(def_id).repr().int.is_none() && tcx.features().arbitrary_enum_discriminant {
+ let is_unit = |var: &hir::Variant<'_>| matches!(var.data, hir::VariantData::Unit(..));
+
+ let has_disr = |var: &hir::Variant<'_>| var.disr_expr.is_some();
+ let has_non_units = vs.iter().any(|var| !is_unit(var));
+ let disr_units = vs.iter().any(|var| is_unit(&var) && has_disr(&var));
+ let disr_non_unit = vs.iter().any(|var| !is_unit(&var) && has_disr(&var));
+
+ if disr_non_unit || (disr_units && has_non_units) {
+ let mut err =
+ struct_span_err!(tcx.sess, sp, E0732, "`#[repr(inttype)]` must be specified");
+ err.emit();
+ }
+ }
+
+ let mut disr_vals: Vec<Discr<'tcx>> = Vec::with_capacity(vs.len());
+ // This tracks the previous variant span (in the loop) incase we need it for diagnostics
+ let mut prev_variant_span: Span = DUMMY_SP;
+ for ((_, discr), v) in iter::zip(def.discriminants(tcx), vs) {
+ // Check for duplicate discriminant values
+ if let Some(i) = disr_vals.iter().position(|&x| x.val == discr.val) {
+ let variant_did = def.variant(VariantIdx::new(i)).def_id;
+ let variant_i_hir_id = tcx.hir().local_def_id_to_hir_id(variant_did.expect_local());
+ let variant_i = tcx.hir().expect_variant(variant_i_hir_id);
+ let i_span = match variant_i.disr_expr {
+ Some(ref expr) => tcx.hir().span(expr.hir_id),
+ None => tcx.def_span(variant_did),
+ };
+ let span = match v.disr_expr {
+ Some(ref expr) => tcx.hir().span(expr.hir_id),
+ None => v.span,
+ };
+ let display_discr = format_discriminant_overflow(tcx, v, discr);
+ let display_discr_i = format_discriminant_overflow(tcx, variant_i, disr_vals[i]);
+ let no_disr = v.disr_expr.is_none();
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0081,
+ "discriminant value `{}` assigned more than once",
+ discr,
+ );
+
+ err.span_label(i_span, format!("first assignment of {display_discr_i}"));
+ err.span_label(span, format!("second assignment of {display_discr}"));
+
+ if no_disr {
+ err.span_label(
+ prev_variant_span,
+ format!(
+ "assigned discriminant for `{}` was incremented from this discriminant",
+ v.ident
+ ),
+ );
+ }
+ err.emit();
+ }
+
+ disr_vals.push(discr);
+ prev_variant_span = v.span;
+ }
+
+ check_representable(tcx, sp, def_id);
+ check_transparent(tcx, sp, def);
+}
+
+/// In the case that a discriminant is both a duplicate and an overflowing literal,
+/// we insert both the assigned discriminant and the literal it overflowed from into the formatted
+/// output. Otherwise we format the discriminant normally.
+fn format_discriminant_overflow<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ variant: &hir::Variant<'_>,
+ dis: Discr<'tcx>,
+) -> String {
+ if let Some(expr) = &variant.disr_expr {
+ let body = &tcx.hir().body(expr.body).value;
+ if let hir::ExprKind::Lit(lit) = &body.kind
+ && let rustc_ast::LitKind::Int(lit_value, _int_kind) = &lit.node
+ && dis.val != *lit_value
+ {
+ return format!("`{dis}` (overflowed from `{lit_value}`)");
+ }
+ }
+
+ format!("`{dis}`")
+}
+
+pub(super) fn check_type_params_are_used<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &ty::Generics,
+ ty: Ty<'tcx>,
+) {
+ debug!("check_type_params_are_used(generics={:?}, ty={:?})", generics, ty);
+
+ assert_eq!(generics.parent, None);
+
+ if generics.own_counts().types == 0 {
+ return;
+ }
+
+ let mut params_used = BitSet::new_empty(generics.params.len());
+
+ if ty.references_error() {
+ // If there is already another error, do not emit
+ // an error for not using a type parameter.
+ assert!(tcx.sess.has_errors().is_some());
+ return;
+ }
+
+ for leaf in ty.walk() {
+ if let GenericArgKind::Type(leaf_ty) = leaf.unpack()
+ && let ty::Param(param) = leaf_ty.kind()
+ {
+ debug!("found use of ty param {:?}", param);
+ params_used.insert(param.index);
+ }
+ }
+
+ for param in &generics.params {
+ if !params_used.contains(param.index)
+ && let ty::GenericParamDefKind::Type { .. } = param.kind
+ {
+ let span = tcx.def_span(param.def_id);
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0091,
+ "type parameter `{}` is unused",
+ param.name,
+ )
+ .span_label(span, "unused type parameter")
+ .emit();
+ }
+ }
+}
+
+pub(super) fn check_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ let module = tcx.hir_module_items(module_def_id);
+ for id in module.items() {
+ check_item_type(tcx, id);
+ }
+}
+
+fn async_opaque_type_cycle_error(tcx: TyCtxt<'_>, span: Span) -> ErrorGuaranteed {
+ struct_span_err!(tcx.sess, span, E0733, "recursion in an `async fn` requires boxing")
+ .span_label(span, "recursive `async fn`")
+ .note("a recursive `async fn` must be rewritten to return a boxed `dyn Future`")
+ .note(
+ "consider using the `async_recursion` crate: https://crates.io/crates/async_recursion",
+ )
+ .emit()
+}
+
+/// Emit an error for recursive opaque types.
+///
+/// If this is a return `impl Trait`, find the item's return expressions and point at them. For
+/// direct recursion this is enough, but for indirect recursion also point at the last intermediary
+/// `impl Trait`.
+///
+/// If all the return expressions evaluate to `!`, then we explain that the error will go away
+/// after changing it. This can happen when a user uses `panic!()` or similar as a placeholder.
+fn opaque_type_cycle_error(tcx: TyCtxt<'_>, def_id: LocalDefId, span: Span) -> ErrorGuaranteed {
+ let mut err = struct_span_err!(tcx.sess, span, E0720, "cannot resolve opaque type");
+
+ let mut label = false;
+ if let Some((def_id, visitor)) = get_owner_return_paths(tcx, def_id) {
+ let typeck_results = tcx.typeck(def_id);
+ if visitor
+ .returns
+ .iter()
+ .filter_map(|expr| typeck_results.node_type_opt(expr.hir_id))
+ .all(|ty| matches!(ty.kind(), ty::Never))
+ {
+ let spans = visitor
+ .returns
+ .iter()
+ .filter(|expr| typeck_results.node_type_opt(expr.hir_id).is_some())
+ .map(|expr| expr.span)
+ .collect::<Vec<Span>>();
+ let span_len = spans.len();
+ if span_len == 1 {
+ err.span_label(spans[0], "this returned value is of `!` type");
+ } else {
+ let mut multispan: MultiSpan = spans.clone().into();
+ for span in spans {
+ multispan.push_span_label(span, "this returned value is of `!` type");
+ }
+ err.span_note(multispan, "these returned values have a concrete \"never\" type");
+ }
+ err.help("this error will resolve once the item's body returns a concrete type");
+ } else {
+ let mut seen = FxHashSet::default();
+ seen.insert(span);
+ err.span_label(span, "recursive opaque type");
+ label = true;
+ for (sp, ty) in visitor
+ .returns
+ .iter()
+ .filter_map(|e| typeck_results.node_type_opt(e.hir_id).map(|t| (e.span, t)))
+ .filter(|(_, ty)| !matches!(ty.kind(), ty::Never))
+ {
+ struct OpaqueTypeCollector(Vec<DefId>);
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for OpaqueTypeCollector {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *t.kind() {
+ ty::Opaque(def, _) => {
+ self.0.push(def);
+ ControlFlow::CONTINUE
+ }
+ _ => t.super_visit_with(self),
+ }
+ }
+ }
+ let mut visitor = OpaqueTypeCollector(vec![]);
+ ty.visit_with(&mut visitor);
+ for def_id in visitor.0 {
+ let ty_span = tcx.def_span(def_id);
+ if !seen.contains(&ty_span) {
+ err.span_label(ty_span, &format!("returning this opaque type `{ty}`"));
+ seen.insert(ty_span);
+ }
+ err.span_label(sp, &format!("returning here with type `{ty}`"));
+ }
+ }
+ }
+ }
+ if !label {
+ err.span_label(span, "cannot resolve opaque type");
+ }
+ err.emit()
+}
diff --git a/compiler/rustc_typeck/src/check/closure.rs b/compiler/rustc_typeck/src/check/closure.rs
new file mode 100644
index 000000000..fee872155
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/closure.rs
@@ -0,0 +1,805 @@
+//! Code for type-checking closure expressions.
+
+use super::{check_fn, Expectation, FnCtxt, GeneratorTypes};
+
+use crate::astconv::AstConv;
+use crate::rustc_middle::ty::subst::Subst;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::LateBoundRegionConversionTime;
+use rustc_infer::infer::{InferOk, InferResult};
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Ty};
+use rustc_span::source_map::Span;
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits::error_reporting::ArgKind;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use std::cmp;
+use std::iter;
+
+/// What signature do we *expect* the closure to have from context?
+#[derive(Debug)]
+struct ExpectedSig<'tcx> {
+ /// Span that gave us this expectation, if we know that.
+ cause_span: Option<Span>,
+ sig: ty::PolyFnSig<'tcx>,
+}
+
+struct ClosureSignatures<'tcx> {
+ bound_sig: ty::PolyFnSig<'tcx>,
+ liberated_sig: ty::FnSig<'tcx>,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ #[instrument(skip(self, expr, _capture, decl, body_id), level = "debug")]
+ pub fn check_expr_closure(
+ &self,
+ expr: &hir::Expr<'_>,
+ _capture: hir::CaptureBy,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ body_id: hir::BodyId,
+ gen: Option<hir::Movability>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ trace!("decl = {:#?}", decl);
+ trace!("expr = {:#?}", expr);
+
+ // It's always helpful for inference if we know the kind of
+ // closure sooner rather than later, so first examine the expected
+ // type, and see if can glean a closure kind from there.
+ let (expected_sig, expected_kind) = match expected.to_option(self) {
+ Some(ty) => self.deduce_expectations_from_expected_type(ty),
+ None => (None, None),
+ };
+ let body = self.tcx.hir().body(body_id);
+ self.check_closure(expr, expected_kind, decl, body, gen, expected_sig)
+ }
+
+ #[instrument(skip(self, expr, body, decl), level = "debug")]
+ fn check_closure(
+ &self,
+ expr: &hir::Expr<'_>,
+ opt_kind: Option<ty::ClosureKind>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ body: &'tcx hir::Body<'tcx>,
+ gen: Option<hir::Movability>,
+ expected_sig: Option<ExpectedSig<'tcx>>,
+ ) -> Ty<'tcx> {
+ trace!("decl = {:#?}", decl);
+ let expr_def_id = self.tcx.hir().local_def_id(expr.hir_id);
+ debug!(?expr_def_id);
+
+ let ClosureSignatures { bound_sig, liberated_sig } =
+ self.sig_of_closure(expr.hir_id, expr_def_id.to_def_id(), decl, body, expected_sig);
+
+ debug!(?bound_sig, ?liberated_sig);
+
+ let return_type_pre_known = !liberated_sig.output().is_ty_infer();
+
+ let generator_types = check_fn(
+ self,
+ self.param_env.without_const(),
+ liberated_sig,
+ decl,
+ expr.hir_id,
+ body,
+ gen,
+ return_type_pre_known,
+ )
+ .1;
+
+ let parent_substs = InternalSubsts::identity_for_item(
+ self.tcx,
+ self.tcx.typeck_root_def_id(expr_def_id.to_def_id()),
+ );
+
+ let tupled_upvars_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::ClosureSynthetic,
+ span: self.tcx.hir().span(expr.hir_id),
+ });
+
+ if let Some(GeneratorTypes { resume_ty, yield_ty, interior, movability }) = generator_types
+ {
+ let generator_substs = ty::GeneratorSubsts::new(
+ self.tcx,
+ ty::GeneratorSubstsParts {
+ parent_substs,
+ resume_ty,
+ yield_ty,
+ return_ty: liberated_sig.output(),
+ witness: interior,
+ tupled_upvars_ty,
+ },
+ );
+
+ return self.tcx.mk_generator(
+ expr_def_id.to_def_id(),
+ generator_substs.substs,
+ movability,
+ );
+ }
+
+ // Tuple up the arguments and insert the resulting function type into
+ // the `closures` table.
+ let sig = bound_sig.map_bound(|sig| {
+ self.tcx.mk_fn_sig(
+ iter::once(self.tcx.intern_tup(sig.inputs())),
+ sig.output(),
+ sig.c_variadic,
+ sig.unsafety,
+ sig.abi,
+ )
+ });
+
+ debug!(?sig, ?opt_kind);
+
+ let closure_kind_ty = match opt_kind {
+ Some(kind) => kind.to_ty(self.tcx),
+
+ // Create a type variable (for now) to represent the closure kind.
+ // It will be unified during the upvar inference phase (`upvar.rs`)
+ None => self.next_ty_var(TypeVariableOrigin {
+ // FIXME(eddyb) distinguish closure kind inference variables from the rest.
+ kind: TypeVariableOriginKind::ClosureSynthetic,
+ span: expr.span,
+ }),
+ };
+
+ let closure_substs = ty::ClosureSubsts::new(
+ self.tcx,
+ ty::ClosureSubstsParts {
+ parent_substs,
+ closure_kind_ty,
+ closure_sig_as_fn_ptr_ty: self.tcx.mk_fn_ptr(sig),
+ tupled_upvars_ty,
+ },
+ );
+
+ let closure_type = self.tcx.mk_closure(expr_def_id.to_def_id(), closure_substs.substs);
+
+ debug!(?expr.hir_id, ?closure_type);
+
+ closure_type
+ }
+
+ /// Given the expected type, figures out what it can about this closure we
+ /// are about to type check:
+ #[instrument(skip(self), level = "debug")]
+ fn deduce_expectations_from_expected_type(
+ &self,
+ expected_ty: Ty<'tcx>,
+ ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
+ match *expected_ty.kind() {
+ ty::Opaque(def_id, substs) => {
+ let bounds = self.tcx.bound_explicit_item_bounds(def_id);
+ let sig = bounds
+ .transpose_iter()
+ .map(|e| e.map_bound(|e| *e).transpose_tuple2())
+ .find_map(|(pred, span)| match pred.0.kind().skip_binder() {
+ ty::PredicateKind::Projection(proj_predicate) => self
+ .deduce_sig_from_projection(
+ Some(span.0),
+ pred.0
+ .kind()
+ .rebind(pred.rebind(proj_predicate).subst(self.tcx, substs)),
+ ),
+ _ => None,
+ });
+
+ let kind = bounds
+ .transpose_iter()
+ .map(|e| e.map_bound(|e| *e).transpose_tuple2())
+ .filter_map(|(pred, _)| match pred.0.kind().skip_binder() {
+ ty::PredicateKind::Trait(tp) => {
+ self.tcx.fn_trait_kind_from_lang_item(tp.def_id())
+ }
+ _ => None,
+ })
+ .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
+ trace!(?sig, ?kind);
+ (sig, kind)
+ }
+ ty::Dynamic(ref object_type, ..) => {
+ let sig = object_type.projection_bounds().find_map(|pb| {
+ let pb = pb.with_self_ty(self.tcx, self.tcx.types.trait_object_dummy_self);
+ self.deduce_sig_from_projection(None, pb)
+ });
+ let kind = object_type
+ .principal_def_id()
+ .and_then(|did| self.tcx.fn_trait_kind_from_lang_item(did));
+ (sig, kind)
+ }
+ ty::Infer(ty::TyVar(vid)) => self.deduce_expectations_from_obligations(vid),
+ ty::FnPtr(sig) => {
+ let expected_sig = ExpectedSig { cause_span: None, sig };
+ (Some(expected_sig), Some(ty::ClosureKind::Fn))
+ }
+ _ => (None, None),
+ }
+ }
+
+ fn deduce_expectations_from_obligations(
+ &self,
+ expected_vid: ty::TyVid,
+ ) -> (Option<ExpectedSig<'tcx>>, Option<ty::ClosureKind>) {
+ let expected_sig =
+ self.obligations_for_self_ty(expected_vid).find_map(|(_, obligation)| {
+ debug!(?obligation.predicate);
+
+ let bound_predicate = obligation.predicate.kind();
+ if let ty::PredicateKind::Projection(proj_predicate) =
+ obligation.predicate.kind().skip_binder()
+ {
+ // Given a Projection predicate, we can potentially infer
+ // the complete signature.
+ self.deduce_sig_from_projection(
+ Some(obligation.cause.span),
+ bound_predicate.rebind(proj_predicate),
+ )
+ } else {
+ None
+ }
+ });
+
+ // Even if we can't infer the full signature, we may be able to
+ // infer the kind. This can occur when we elaborate a predicate
+ // like `F : Fn<A>`. Note that due to subtyping we could encounter
+ // many viable options, so pick the most restrictive.
+ let expected_kind = self
+ .obligations_for_self_ty(expected_vid)
+ .filter_map(|(tr, _)| self.tcx.fn_trait_kind_from_lang_item(tr.def_id()))
+ .fold(None, |best, cur| Some(best.map_or(cur, |best| cmp::min(best, cur))));
+
+ (expected_sig, expected_kind)
+ }
+
+ /// Given a projection like "<F as Fn(X)>::Result == Y", we can deduce
+ /// everything we need to know about a closure or generator.
+ ///
+ /// The `cause_span` should be the span that caused us to
+ /// have this expected signature, or `None` if we can't readily
+ /// know that.
+ #[instrument(level = "debug", skip(self, cause_span))]
+ fn deduce_sig_from_projection(
+ &self,
+ cause_span: Option<Span>,
+ projection: ty::PolyProjectionPredicate<'tcx>,
+ ) -> Option<ExpectedSig<'tcx>> {
+ let tcx = self.tcx;
+
+ let trait_def_id = projection.trait_def_id(tcx);
+
+ let is_fn = tcx.fn_trait_kind_from_lang_item(trait_def_id).is_some();
+ let gen_trait = tcx.require_lang_item(LangItem::Generator, cause_span);
+ let is_gen = gen_trait == trait_def_id;
+ if !is_fn && !is_gen {
+ debug!("not fn or generator");
+ return None;
+ }
+
+ if is_gen {
+ // Check that we deduce the signature from the `<_ as std::ops::Generator>::Return`
+ // associated item and not yield.
+ let return_assoc_item = self.tcx.associated_item_def_ids(gen_trait)[1];
+ if return_assoc_item != projection.projection_def_id() {
+ debug!("not return assoc item of generator");
+ return None;
+ }
+ }
+
+ let input_tys = if is_fn {
+ let arg_param_ty = projection.skip_binder().projection_ty.substs.type_at(1);
+ let arg_param_ty = self.resolve_vars_if_possible(arg_param_ty);
+ debug!(?arg_param_ty);
+
+ match arg_param_ty.kind() {
+ &ty::Tuple(tys) => tys,
+ _ => return None,
+ }
+ } else {
+ // Generators with a `()` resume type may be defined with 0 or 1 explicit arguments,
+ // else they must have exactly 1 argument. For now though, just give up in this case.
+ return None;
+ };
+
+ // Since this is a return parameter type it is safe to unwrap.
+ let ret_param_ty = projection.skip_binder().term.ty().unwrap();
+ let ret_param_ty = self.resolve_vars_if_possible(ret_param_ty);
+ debug!(?ret_param_ty);
+
+ let sig = projection.rebind(self.tcx.mk_fn_sig(
+ input_tys.iter(),
+ ret_param_ty,
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
+ debug!(?sig);
+
+ Some(ExpectedSig { cause_span, sig })
+ }
+
+ fn sig_of_closure(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: Option<ExpectedSig<'tcx>>,
+ ) -> ClosureSignatures<'tcx> {
+ if let Some(e) = expected_sig {
+ self.sig_of_closure_with_expectation(hir_id, expr_def_id, decl, body, e)
+ } else {
+ self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body)
+ }
+ }
+
+ /// If there is no expected signature, then we will convert the
+ /// types that the user gave into a signature.
+ #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")]
+ fn sig_of_closure_no_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ ) -> ClosureSignatures<'tcx> {
+ let bound_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body);
+
+ self.closure_sigs(expr_def_id, body, bound_sig)
+ }
+
+ /// Invoked to compute the signature of a closure expression. This
+ /// combines any user-provided type annotations (e.g., `|x: u32|
+ /// -> u32 { .. }`) with the expected signature.
+ ///
+ /// The approach is as follows:
+ ///
+ /// - Let `S` be the (higher-ranked) signature that we derive from the user's annotations.
+ /// - Let `E` be the (higher-ranked) signature that we derive from the expectations, if any.
+ /// - If we have no expectation `E`, then the signature of the closure is `S`.
+ /// - Otherwise, the signature of the closure is E. Moreover:
+ /// - Skolemize the late-bound regions in `E`, yielding `E'`.
+ /// - Instantiate all the late-bound regions bound in the closure within `S`
+ /// with fresh (existential) variables, yielding `S'`
+ /// - Require that `E' = S'`
+ /// - We could use some kind of subtyping relationship here,
+ /// I imagine, but equality is easier and works fine for
+ /// our purposes.
+ ///
+ /// The key intuition here is that the user's types must be valid
+ /// from "the inside" of the closure, but the expectation
+ /// ultimately drives the overall signature.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (illustrative)
+ /// fn with_closure<F>(_: F)
+ /// where F: Fn(&u32) -> &u32 { .. }
+ ///
+ /// with_closure(|x: &u32| { ... })
+ /// ```
+ ///
+ /// Here:
+ /// - E would be `fn(&u32) -> &u32`.
+ /// - S would be `fn(&u32) ->
+ /// - E' is `&'!0 u32 -> &'!0 u32`
+ /// - S' is `&'?0 u32 -> ?T`
+ ///
+ /// S' can be unified with E' with `['?0 = '!0, ?T = &'!10 u32]`.
+ ///
+ /// # Arguments
+ ///
+ /// - `expr_def_id`: the `DefId` of the closure expression
+ /// - `decl`: the HIR declaration of the closure
+ /// - `body`: the body of the closure
+ /// - `expected_sig`: the expected signature (if any). Note that
+ /// this is missing a binder: that is, there may be late-bound
+ /// regions with depth 1, which are bound then by the closure.
+ #[instrument(skip(self, hir_id, expr_def_id, decl, body), level = "debug")]
+ fn sig_of_closure_with_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: ExpectedSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ // Watch out for some surprises and just ignore the
+ // expectation if things don't see to match up with what we
+ // expect.
+ if expected_sig.sig.c_variadic() != decl.c_variadic {
+ return self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body);
+ } else if expected_sig.sig.skip_binder().inputs_and_output.len() != decl.inputs.len() + 1 {
+ return self.sig_of_closure_with_mismatched_number_of_arguments(
+ expr_def_id,
+ decl,
+ body,
+ expected_sig,
+ );
+ }
+
+ // Create a `PolyFnSig`. Note the oddity that late bound
+ // regions appearing free in `expected_sig` are now bound up
+ // in this binder we are creating.
+ assert!(!expected_sig.sig.skip_binder().has_vars_bound_above(ty::INNERMOST));
+ let bound_sig = expected_sig.sig.map_bound(|sig| {
+ self.tcx.mk_fn_sig(
+ sig.inputs().iter().cloned(),
+ sig.output(),
+ sig.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ )
+ });
+
+ // `deduce_expectations_from_expected_type` introduces
+ // late-bound lifetimes defined elsewhere, which we now
+ // anonymize away, so as not to confuse the user.
+ let bound_sig = self.tcx.anonymize_late_bound_regions(bound_sig);
+
+ let closure_sigs = self.closure_sigs(expr_def_id, body, bound_sig);
+
+ // Up till this point, we have ignored the annotations that the user
+ // gave. This function will check that they unify successfully.
+ // Along the way, it also writes out entries for types that the user
+ // wrote into our typeck results, which are then later used by the privacy
+ // check.
+ match self.check_supplied_sig_against_expectation(
+ hir_id,
+ expr_def_id,
+ decl,
+ body,
+ &closure_sigs,
+ ) {
+ Ok(infer_ok) => self.register_infer_ok_obligations(infer_ok),
+ Err(_) => return self.sig_of_closure_no_expectation(hir_id, expr_def_id, decl, body),
+ }
+
+ closure_sigs
+ }
+
+ fn sig_of_closure_with_mismatched_number_of_arguments(
+ &self,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sig: ExpectedSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ let hir = self.tcx.hir();
+ let expr_map_node = hir.get_if_local(expr_def_id).unwrap();
+ let expected_args: Vec<_> = expected_sig
+ .sig
+ .skip_binder()
+ .inputs()
+ .iter()
+ .map(|ty| ArgKind::from_expected_ty(*ty, None))
+ .collect();
+ let (closure_span, found_args) = match self.get_fn_like_arguments(expr_map_node) {
+ Some((sp, args)) => (Some(sp), args),
+ None => (None, Vec::new()),
+ };
+ let expected_span =
+ expected_sig.cause_span.unwrap_or_else(|| hir.span_if_local(expr_def_id).unwrap());
+ self.report_arg_count_mismatch(
+ expected_span,
+ closure_span,
+ expected_args,
+ found_args,
+ true,
+ )
+ .emit();
+
+ let error_sig = self.error_sig_of_closure(decl);
+
+ self.closure_sigs(expr_def_id, body, error_sig)
+ }
+
+ /// Enforce the user's types against the expectation. See
+ /// `sig_of_closure_with_expectation` for details on the overall
+ /// strategy.
+ fn check_supplied_sig_against_expectation(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ expected_sigs: &ClosureSignatures<'tcx>,
+ ) -> InferResult<'tcx, ()> {
+ // Get the signature S that the user gave.
+ //
+ // (See comment on `sig_of_closure_with_expectation` for the
+ // meaning of these letters.)
+ let supplied_sig = self.supplied_sig_of_closure(hir_id, expr_def_id, decl, body);
+
+ debug!("check_supplied_sig_against_expectation: supplied_sig={:?}", supplied_sig);
+
+ // FIXME(#45727): As discussed in [this comment][c1], naively
+ // forcing equality here actually results in suboptimal error
+ // messages in some cases. For now, if there would have been
+ // an obvious error, we fallback to declaring the type of the
+ // closure to be the one the user gave, which allows other
+ // error message code to trigger.
+ //
+ // However, I think [there is potential to do even better
+ // here][c2], since in *this* code we have the precise span of
+ // the type parameter in question in hand when we report the
+ // error.
+ //
+ // [c1]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341089706
+ // [c2]: https://github.com/rust-lang/rust/pull/45072#issuecomment-341096796
+ self.commit_if_ok(|_| {
+ let mut all_obligations = vec![];
+
+ // The liberated version of this signature should be a subtype
+ // of the liberated form of the expectation.
+ for ((hir_ty, &supplied_ty), expected_ty) in iter::zip(
+ iter::zip(
+ decl.inputs,
+ supplied_sig.inputs().skip_binder(), // binder moved to (*) below
+ ),
+ expected_sigs.liberated_sig.inputs(), // `liberated_sig` is E'.
+ ) {
+ // Instantiate (this part of..) S to S', i.e., with fresh variables.
+ let supplied_ty = self.replace_bound_vars_with_fresh_vars(
+ hir_ty.span,
+ LateBoundRegionConversionTime::FnCall,
+ supplied_sig.inputs().rebind(supplied_ty),
+ ); // recreated from (*) above
+
+ // Check that E' = S'.
+ let cause = self.misc(hir_ty.span);
+ let InferOk { value: (), obligations } =
+ self.at(&cause, self.param_env).eq(*expected_ty, supplied_ty)?;
+ all_obligations.extend(obligations);
+ }
+
+ let supplied_output_ty = self.replace_bound_vars_with_fresh_vars(
+ decl.output.span(),
+ LateBoundRegionConversionTime::FnCall,
+ supplied_sig.output(),
+ );
+ let cause = &self.misc(decl.output.span());
+ let InferOk { value: (), obligations } = self
+ .at(cause, self.param_env)
+ .eq(expected_sigs.liberated_sig.output(), supplied_output_ty)?;
+ all_obligations.extend(obligations);
+
+ Ok(InferOk { value: (), obligations: all_obligations })
+ })
+ }
+
+ /// If there is no expected signature, then we will convert the
+ /// types that the user gave into a signature.
+ ///
+ /// Also, record this closure signature for later.
+ #[instrument(skip(self, decl, body), level = "debug")]
+ fn supplied_sig_of_closure(
+ &self,
+ hir_id: hir::HirId,
+ expr_def_id: DefId,
+ decl: &hir::FnDecl<'_>,
+ body: &hir::Body<'_>,
+ ) -> ty::PolyFnSig<'tcx> {
+ let astconv: &dyn AstConv<'_> = self;
+
+ trace!("decl = {:#?}", decl);
+ debug!(?body.generator_kind);
+
+ let bound_vars = self.tcx.late_bound_vars(hir_id);
+
+ // First, convert the types that the user supplied (if any).
+ let supplied_arguments = decl.inputs.iter().map(|a| astconv.ast_ty_to_ty(a));
+ let supplied_return = match decl.output {
+ hir::FnRetTy::Return(ref output) => astconv.ast_ty_to_ty(&output),
+ hir::FnRetTy::DefaultReturn(_) => match body.generator_kind {
+ // In the case of the async block that we create for a function body,
+ // we expect the return type of the block to match that of the enclosing
+ // function.
+ Some(hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Fn)) => {
+ debug!("closure is async fn body");
+ self.deduce_future_output_from_obligations(expr_def_id, body.id().hir_id)
+ .unwrap_or_else(|| {
+ // AFAIK, deducing the future output
+ // always succeeds *except* in error cases
+ // like #65159. I'd like to return Error
+ // here, but I can't because I can't
+ // easily (and locally) prove that we
+ // *have* reported an
+ // error. --nikomatsakis
+ astconv.ty_infer(None, decl.output.span())
+ })
+ }
+
+ _ => astconv.ty_infer(None, decl.output.span()),
+ },
+ };
+
+ let result = ty::Binder::bind_with_vars(
+ self.tcx.mk_fn_sig(
+ supplied_arguments,
+ supplied_return,
+ decl.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ ),
+ bound_vars,
+ );
+
+ debug!(?result);
+
+ let c_result = self.inh.infcx.canonicalize_response(result);
+ self.typeck_results.borrow_mut().user_provided_sigs.insert(expr_def_id, c_result);
+
+ result
+ }
+
+ /// Invoked when we are translating the generator that results
+ /// from desugaring an `async fn`. Returns the "sugared" return
+ /// type of the `async fn` -- that is, the return type that the
+ /// user specified. The "desugared" return type is an `impl
+ /// Future<Output = T>`, so we do this by searching through the
+ /// obligations to extract the `T`.
+ #[instrument(skip(self), level = "debug")]
+ fn deduce_future_output_from_obligations(
+ &self,
+ expr_def_id: DefId,
+ body_id: hir::HirId,
+ ) -> Option<Ty<'tcx>> {
+ let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| {
+ span_bug!(self.tcx.def_span(expr_def_id), "async fn generator outside of a fn")
+ });
+
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let ret_ty = self.inh.infcx.shallow_resolve(ret_ty);
+
+ let get_future_output = |predicate: ty::Predicate<'tcx>, span| {
+ // Search for a pending obligation like
+ //
+ // `<R as Future>::Output = T`
+ //
+ // where R is the return type we are expecting. This type `T`
+ // will be our output.
+ let bound_predicate = predicate.kind();
+ if let ty::PredicateKind::Projection(proj_predicate) = bound_predicate.skip_binder() {
+ self.deduce_future_output_from_projection(
+ span,
+ bound_predicate.rebind(proj_predicate),
+ )
+ } else {
+ None
+ }
+ };
+
+ let output_ty = match *ret_ty.kind() {
+ ty::Infer(ty::TyVar(ret_vid)) => {
+ self.obligations_for_self_ty(ret_vid).find_map(|(_, obligation)| {
+ get_future_output(obligation.predicate, obligation.cause.span)
+ })?
+ }
+ ty::Opaque(def_id, substs) => self
+ .tcx
+ .bound_explicit_item_bounds(def_id)
+ .transpose_iter()
+ .map(|e| e.map_bound(|e| *e).transpose_tuple2())
+ .find_map(|(p, s)| get_future_output(p.subst(self.tcx, substs), s.0))?,
+ ty::Error(_) => return None,
+ _ => span_bug!(
+ self.tcx.def_span(expr_def_id),
+ "async fn generator return type not an inference variable"
+ ),
+ };
+
+ // async fn that have opaque types in their return type need to redo the conversion to inference variables
+ // as they fetch the still opaque version from the signature.
+ let InferOk { value: output_ty, obligations } = self
+ .replace_opaque_types_with_inference_vars(
+ output_ty,
+ body_id,
+ self.tcx.def_span(expr_def_id),
+ self.param_env,
+ );
+ self.register_predicates(obligations);
+
+ debug!("deduce_future_output_from_obligations: output_ty={:?}", output_ty);
+ Some(output_ty)
+ }
+
+ /// Given a projection like
+ ///
+ /// `<X as Future>::Output = T`
+ ///
+ /// where `X` is some type that has no late-bound regions, returns
+ /// `Some(T)`. If the projection is for some other trait, returns
+ /// `None`.
+ fn deduce_future_output_from_projection(
+ &self,
+ cause_span: Span,
+ predicate: ty::PolyProjectionPredicate<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ debug!("deduce_future_output_from_projection(predicate={:?})", predicate);
+
+ // We do not expect any bound regions in our predicate, so
+ // skip past the bound vars.
+ let Some(predicate) = predicate.no_bound_vars() else {
+ debug!("deduce_future_output_from_projection: has late-bound regions");
+ return None;
+ };
+
+ // Check that this is a projection from the `Future` trait.
+ let trait_def_id = predicate.projection_ty.trait_def_id(self.tcx);
+ let future_trait = self.tcx.require_lang_item(LangItem::Future, Some(cause_span));
+ if trait_def_id != future_trait {
+ debug!("deduce_future_output_from_projection: not a future");
+ return None;
+ }
+
+ // The `Future` trait has only one associated item, `Output`,
+ // so check that this is what we see.
+ let output_assoc_item = self.tcx.associated_item_def_ids(future_trait)[0];
+ if output_assoc_item != predicate.projection_ty.item_def_id {
+ span_bug!(
+ cause_span,
+ "projecting associated item `{:?}` from future, which is not Output `{:?}`",
+ predicate.projection_ty.item_def_id,
+ output_assoc_item,
+ );
+ }
+
+ // Extract the type from the projection. Note that there can
+ // be no bound variables in this type because the "self type"
+ // does not have any regions in it.
+ let output_ty = self.resolve_vars_if_possible(predicate.term);
+ debug!("deduce_future_output_from_projection: output_ty={:?}", output_ty);
+ // This is a projection on a Fn trait so will always be a type.
+ Some(output_ty.ty().unwrap())
+ }
+
+ /// Converts the types that the user supplied, in case that doing
+ /// so should yield an error, but returns back a signature where
+ /// all parameters are of type `TyErr`.
+ fn error_sig_of_closure(&self, decl: &hir::FnDecl<'_>) -> ty::PolyFnSig<'tcx> {
+ let astconv: &dyn AstConv<'_> = self;
+
+ let supplied_arguments = decl.inputs.iter().map(|a| {
+ // Convert the types that the user supplied (if any), but ignore them.
+ astconv.ast_ty_to_ty(a);
+ self.tcx.ty_error()
+ });
+
+ if let hir::FnRetTy::Return(ref output) = decl.output {
+ astconv.ast_ty_to_ty(&output);
+ }
+
+ let result = ty::Binder::dummy(self.tcx.mk_fn_sig(
+ supplied_arguments,
+ self.tcx.ty_error(),
+ decl.c_variadic,
+ hir::Unsafety::Normal,
+ Abi::RustCall,
+ ));
+
+ debug!("supplied_sig_of_closure: result={:?}", result);
+
+ result
+ }
+
+ fn closure_sigs(
+ &self,
+ expr_def_id: DefId,
+ body: &hir::Body<'_>,
+ bound_sig: ty::PolyFnSig<'tcx>,
+ ) -> ClosureSignatures<'tcx> {
+ let liberated_sig = self.tcx().liberate_late_bound_regions(expr_def_id, bound_sig);
+ let liberated_sig = self.inh.normalize_associated_types_in(
+ body.value.span,
+ body.value.hir_id,
+ self.param_env,
+ liberated_sig,
+ );
+ ClosureSignatures { bound_sig, liberated_sig }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/coercion.rs b/compiler/rustc_typeck/src/check/coercion.rs
new file mode 100644
index 000000000..2ed5f569b
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/coercion.rs
@@ -0,0 +1,1804 @@
+//! # Type Coercion
+//!
+//! Under certain circumstances we will coerce from one type to another,
+//! for example by auto-borrowing. This occurs in situations where the
+//! compiler has a firm 'expected type' that was supplied from the user,
+//! and where the actual type is similar to that expected type in purpose
+//! but not in representation (so actual subtyping is inappropriate).
+//!
+//! ## Reborrowing
+//!
+//! Note that if we are expecting a reference, we will *reborrow*
+//! even if the argument provided was already a reference. This is
+//! useful for freezing mut things (that is, when the expected type is &T
+//! but you have &mut T) and also for avoiding the linearity
+//! of mut things (when the expected is &mut T and you have &mut T). See
+//! the various `src/test/ui/coerce/*.rs` tests for
+//! examples of where this is useful.
+//!
+//! ## Subtle note
+//!
+//! When inferring the generic arguments of functions, the argument
+//! order is relevant, which can lead to the following edge case:
+//!
+//! ```ignore (illustrative)
+//! fn foo<T>(a: T, b: T) {
+//! // ...
+//! }
+//!
+//! foo(&7i32, &mut 7i32);
+//! // This compiles, as we first infer `T` to be `&i32`,
+//! // and then coerce `&mut 7i32` to `&7i32`.
+//!
+//! foo(&mut 7i32, &7i32);
+//! // This does not compile, as we first infer `T` to be `&mut i32`
+//! // and are then unable to coerce `&7i32` to `&mut i32`.
+//! ```
+
+use crate::astconv::AstConv;
+use crate::check::FnCtxt;
+use rustc_errors::{
+ struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{Coercion, InferOk, InferResult};
+use rustc_infer::traits::{Obligation, TraitEngine, TraitEngineExt};
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCast,
+};
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::relate::RelateResult;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, ToPredicate, Ty, TypeAndMut};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::sym;
+use rustc_span::{self, BytePos, DesugaringKind, Span};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCause, ObligationCauseCode};
+
+use smallvec::{smallvec, SmallVec};
+use std::ops::Deref;
+
+struct Coerce<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ use_lub: bool,
+ /// Determines whether or not allow_two_phase_borrow is set on any
+ /// autoref adjustments we create while coercing. We don't want to
+ /// allow deref coercions to create two-phase borrows, at least initially,
+ /// but we do need two-phase borrows for function argument reborrows.
+ /// See #47489 and #48598
+ /// See docs on the "AllowTwoPhase" type for a more detailed discussion
+ allow_two_phase: AllowTwoPhase,
+}
+
+impl<'a, 'tcx> Deref for Coerce<'a, 'tcx> {
+ type Target = FnCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.fcx
+ }
+}
+
+type CoerceResult<'tcx> = InferResult<'tcx, (Vec<Adjustment<'tcx>>, Ty<'tcx>)>;
+
+/// Coercing a mutable reference to an immutable works, while
+/// coercing `&T` to `&mut T` should be forbidden.
+fn coerce_mutbls<'tcx>(
+ from_mutbl: hir::Mutability,
+ to_mutbl: hir::Mutability,
+) -> RelateResult<'tcx, ()> {
+ match (from_mutbl, to_mutbl) {
+ (hir::Mutability::Mut, hir::Mutability::Mut | hir::Mutability::Not)
+ | (hir::Mutability::Not, hir::Mutability::Not) => Ok(()),
+ (hir::Mutability::Not, hir::Mutability::Mut) => Err(TypeError::Mutability),
+ }
+}
+
+/// Do not require any adjustments, i.e. coerce `x -> x`.
+fn identity(_: Ty<'_>) -> Vec<Adjustment<'_>> {
+ vec![]
+}
+
+fn simple<'tcx>(kind: Adjust<'tcx>) -> impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>> {
+ move |target| vec![Adjustment { kind, target }]
+}
+
+/// This always returns `Ok(...)`.
+fn success<'tcx>(
+ adj: Vec<Adjustment<'tcx>>,
+ target: Ty<'tcx>,
+ obligations: traits::PredicateObligations<'tcx>,
+) -> CoerceResult<'tcx> {
+ Ok(InferOk { value: (adj, target), obligations })
+}
+
+impl<'f, 'tcx> Coerce<'f, 'tcx> {
+ fn new(
+ fcx: &'f FnCtxt<'f, 'tcx>,
+ cause: ObligationCause<'tcx>,
+ allow_two_phase: AllowTwoPhase,
+ ) -> Self {
+ Coerce { fcx, cause, allow_two_phase, use_lub: false }
+ }
+
+ fn unify(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> InferResult<'tcx, Ty<'tcx>> {
+ debug!("unify(a: {:?}, b: {:?}, use_lub: {})", a, b, self.use_lub);
+ self.commit_if_ok(|_| {
+ if self.use_lub {
+ self.at(&self.cause, self.fcx.param_env).lub(b, a)
+ } else {
+ self.at(&self.cause, self.fcx.param_env)
+ .sup(b, a)
+ .map(|InferOk { value: (), obligations }| InferOk { value: a, obligations })
+ }
+ })
+ }
+
+ /// Unify two types (using sub or lub) and produce a specific coercion.
+ fn unify_and<F>(&self, a: Ty<'tcx>, b: Ty<'tcx>, f: F) -> CoerceResult<'tcx>
+ where
+ F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
+ {
+ self.unify(a, b)
+ .and_then(|InferOk { value: ty, obligations }| success(f(ty), ty, obligations))
+ }
+
+ #[instrument(skip(self))]
+ fn coerce(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
+ // First, remove any resolved type variables (at the top level, at least):
+ let a = self.shallow_resolve(a);
+ let b = self.shallow_resolve(b);
+ debug!("Coerce.tys({:?} => {:?})", a, b);
+
+ // Just ignore error types.
+ if a.references_error() || b.references_error() {
+ return success(vec![], self.fcx.tcx.ty_error(), vec![]);
+ }
+
+ // Coercing from `!` to any type is allowed:
+ if a.is_never() {
+ return success(simple(Adjust::NeverToAny)(b), b, vec![]);
+ }
+
+ // Coercing *from* an unresolved inference variable means that
+ // we have no information about the source type. This will always
+ // ultimately fall back to some form of subtyping.
+ if a.is_ty_var() {
+ return self.coerce_from_inference_variable(a, b, identity);
+ }
+
+ // Consider coercing the subtype to a DST
+ //
+ // NOTE: this is wrapped in a `commit_if_ok` because it creates
+ // a "spurious" type variable, and we don't want to have that
+ // type variable in memory if the coercion fails.
+ let unsize = self.commit_if_ok(|_| self.coerce_unsized(a, b));
+ match unsize {
+ Ok(_) => {
+ debug!("coerce: unsize successful");
+ return unsize;
+ }
+ Err(TypeError::ObjectUnsafeCoercion(did)) => {
+ debug!("coerce: unsize not object safe");
+ return Err(TypeError::ObjectUnsafeCoercion(did));
+ }
+ Err(error) => {
+ debug!(?error, "coerce: unsize failed");
+ }
+ }
+
+ // Examine the supertype and consider auto-borrowing.
+ match *b.kind() {
+ ty::RawPtr(mt_b) => {
+ return self.coerce_unsafe_ptr(a, b, mt_b.mutbl);
+ }
+ ty::Ref(r_b, _, mutbl_b) => {
+ return self.coerce_borrowed_pointer(a, b, r_b, mutbl_b);
+ }
+ _ => {}
+ }
+
+ match *a.kind() {
+ ty::FnDef(..) => {
+ // Function items are coercible to any closure
+ // type; function pointers are not (that would
+ // require double indirection).
+ // Additionally, we permit coercion of function
+ // items to drop the unsafe qualifier.
+ self.coerce_from_fn_item(a, b)
+ }
+ ty::FnPtr(a_f) => {
+ // We permit coercion of fn pointers to drop the
+ // unsafe qualifier.
+ self.coerce_from_fn_pointer(a, a_f, b)
+ }
+ ty::Closure(closure_def_id_a, substs_a) => {
+ // Non-capturing closures are coercible to
+ // function pointers or unsafe function pointers.
+ // It cannot convert closures that require unsafe.
+ self.coerce_closure_to_fn(a, closure_def_id_a, substs_a, b)
+ }
+ _ => {
+ // Otherwise, just use unification rules.
+ self.unify_and(a, b, identity)
+ }
+ }
+ }
+
+ /// Coercing *from* an inference variable. In this case, we have no information
+ /// about the source type, so we can't really do a true coercion and we always
+ /// fall back to subtyping (`unify_and`).
+ fn coerce_from_inference_variable(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ make_adjustments: impl FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
+ ) -> CoerceResult<'tcx> {
+ debug!("coerce_from_inference_variable(a={:?}, b={:?})", a, b);
+ assert!(a.is_ty_var() && self.shallow_resolve(a) == a);
+ assert!(self.shallow_resolve(b) == b);
+
+ if b.is_ty_var() {
+ // Two unresolved type variables: create a `Coerce` predicate.
+ let target_ty = if self.use_lub {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::LatticeVariable,
+ span: self.cause.span,
+ })
+ } else {
+ b
+ };
+
+ let mut obligations = Vec::with_capacity(2);
+ for &source_ty in &[a, b] {
+ if source_ty != target_ty {
+ obligations.push(Obligation::new(
+ self.cause.clone(),
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::Coerce(ty::CoercePredicate {
+ a: source_ty,
+ b: target_ty,
+ }))
+ .to_predicate(self.tcx()),
+ ));
+ }
+ }
+
+ debug!(
+ "coerce_from_inference_variable: two inference variables, target_ty={:?}, obligations={:?}",
+ target_ty, obligations
+ );
+ let adjustments = make_adjustments(target_ty);
+ InferResult::Ok(InferOk { value: (adjustments, target_ty), obligations })
+ } else {
+ // One unresolved type variable: just apply subtyping, we may be able
+ // to do something useful.
+ self.unify_and(a, b, make_adjustments)
+ }
+ }
+
+ /// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
+ /// To match `A` with `B`, autoderef will be performed,
+ /// calling `deref`/`deref_mut` where necessary.
+ fn coerce_borrowed_pointer(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ r_b: ty::Region<'tcx>,
+ mutbl_b: hir::Mutability,
+ ) -> CoerceResult<'tcx> {
+ debug!("coerce_borrowed_pointer(a={:?}, b={:?})", a, b);
+
+ // If we have a parameter of type `&M T_a` and the value
+ // provided is `expr`, we will be adding an implicit borrow,
+ // meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
+ // to type check, we will construct the type that `&M*expr` would
+ // yield.
+
+ let (r_a, mt_a) = match *a.kind() {
+ ty::Ref(r_a, ty, mutbl) => {
+ let mt_a = ty::TypeAndMut { ty, mutbl };
+ coerce_mutbls(mt_a.mutbl, mutbl_b)?;
+ (r_a, mt_a)
+ }
+ _ => return self.unify_and(a, b, identity),
+ };
+
+ let span = self.cause.span;
+
+ let mut first_error = None;
+ let mut r_borrow_var = None;
+ let mut autoderef = self.autoderef(span, a);
+ let mut found = None;
+
+ for (referent_ty, autoderefs) in autoderef.by_ref() {
+ if autoderefs == 0 {
+ // Don't let this pass, otherwise it would cause
+ // &T to autoref to &&T.
+ continue;
+ }
+
+ // At this point, we have deref'd `a` to `referent_ty`. So
+ // imagine we are coercing from `&'a mut Vec<T>` to `&'b mut [T]`.
+ // In the autoderef loop for `&'a mut Vec<T>`, we would get
+ // three callbacks:
+ //
+ // - `&'a mut Vec<T>` -- 0 derefs, just ignore it
+ // - `Vec<T>` -- 1 deref
+ // - `[T]` -- 2 deref
+ //
+ // At each point after the first callback, we want to
+ // check to see whether this would match out target type
+ // (`&'b mut [T]`) if we autoref'd it. We can't just
+ // compare the referent types, though, because we still
+ // have to consider the mutability. E.g., in the case
+ // we've been considering, we have an `&mut` reference, so
+ // the `T` in `[T]` needs to be unified with equality.
+ //
+ // Therefore, we construct reference types reflecting what
+ // the types will be after we do the final auto-ref and
+ // compare those. Note that this means we use the target
+ // mutability [1], since it may be that we are coercing
+ // from `&mut T` to `&U`.
+ //
+ // One fine point concerns the region that we use. We
+ // choose the region such that the region of the final
+ // type that results from `unify` will be the region we
+ // want for the autoref:
+ //
+ // - if in sub mode, that means we want to use `'b` (the
+ // region from the target reference) for both
+ // pointers [2]. This is because sub mode (somewhat
+ // arbitrarily) returns the subtype region. In the case
+ // where we are coercing to a target type, we know we
+ // want to use that target type region (`'b`) because --
+ // for the program to type-check -- it must be the
+ // smaller of the two.
+ // - One fine point. It may be surprising that we can
+ // use `'b` without relating `'a` and `'b`. The reason
+ // that this is ok is that what we produce is
+ // effectively a `&'b *x` expression (if you could
+ // annotate the region of a borrow), and regionck has
+ // code that adds edges from the region of a borrow
+ // (`'b`, here) into the regions in the borrowed
+ // expression (`*x`, here). (Search for "link".)
+ // - if in lub mode, things can get fairly complicated. The
+ // easiest thing is just to make a fresh
+ // region variable [4], which effectively means we defer
+ // the decision to region inference (and regionck, which will add
+ // some more edges to this variable). However, this can wind up
+ // creating a crippling number of variables in some cases --
+ // e.g., #32278 -- so we optimize one particular case [3].
+ // Let me try to explain with some examples:
+ // - The "running example" above represents the simple case,
+ // where we have one `&` reference at the outer level and
+ // ownership all the rest of the way down. In this case,
+ // we want `LUB('a, 'b)` as the resulting region.
+ // - However, if there are nested borrows, that region is
+ // too strong. Consider a coercion from `&'a &'x Rc<T>` to
+ // `&'b T`. In this case, `'a` is actually irrelevant.
+ // The pointer we want is `LUB('x, 'b`). If we choose `LUB('a,'b)`
+ // we get spurious errors (`ui/regions-lub-ref-ref-rc.rs`).
+ // (The errors actually show up in borrowck, typically, because
+ // this extra edge causes the region `'a` to be inferred to something
+ // too big, which then results in borrowck errors.)
+ // - We could track the innermost shared reference, but there is already
+ // code in regionck that has the job of creating links between
+ // the region of a borrow and the regions in the thing being
+ // borrowed (here, `'a` and `'x`), and it knows how to handle
+ // all the various cases. So instead we just make a region variable
+ // and let regionck figure it out.
+ let r = if !self.use_lub {
+ r_b // [2] above
+ } else if autoderefs == 1 {
+ r_a // [3] above
+ } else {
+ if r_borrow_var.is_none() {
+ // create var lazily, at most once
+ let coercion = Coercion(span);
+ let r = self.next_region_var(coercion);
+ r_borrow_var = Some(r); // [4] above
+ }
+ r_borrow_var.unwrap()
+ };
+ let derefd_ty_a = self.tcx.mk_ref(
+ r,
+ TypeAndMut {
+ ty: referent_ty,
+ mutbl: mutbl_b, // [1] above
+ },
+ );
+ match self.unify(derefd_ty_a, b) {
+ Ok(ok) => {
+ found = Some(ok);
+ break;
+ }
+ Err(err) => {
+ if first_error.is_none() {
+ first_error = Some(err);
+ }
+ }
+ }
+ }
+
+ // Extract type or return an error. We return the first error
+ // we got, which should be from relating the "base" type
+ // (e.g., in example above, the failure from relating `Vec<T>`
+ // to the target type), since that should be the least
+ // confusing.
+ let Some(InferOk { value: ty, mut obligations }) = found else {
+ let err = first_error.expect("coerce_borrowed_pointer had no error");
+ debug!("coerce_borrowed_pointer: failed with err = {:?}", err);
+ return Err(err);
+ };
+
+ if ty == a && mt_a.mutbl == hir::Mutability::Not && autoderef.step_count() == 1 {
+ // As a special case, if we would produce `&'a *x`, that's
+ // a total no-op. We end up with the type `&'a T` just as
+ // we started with. In that case, just skip it
+ // altogether. This is just an optimization.
+ //
+ // Note that for `&mut`, we DO want to reborrow --
+ // otherwise, this would be a move, which might be an
+ // error. For example `foo(self.x)` where `self` and
+ // `self.x` both have `&mut `type would be a move of
+ // `self.x`, but we auto-coerce it to `foo(&mut *self.x)`,
+ // which is a borrow.
+ assert_eq!(mutbl_b, hir::Mutability::Not); // can only coerce &T -> &U
+ return success(vec![], ty, obligations);
+ }
+
+ let InferOk { value: mut adjustments, obligations: o } =
+ self.adjust_steps_as_infer_ok(&autoderef);
+ obligations.extend(o);
+ obligations.extend(autoderef.into_obligations());
+
+ // Now apply the autoref. We have to extract the region out of
+ // the final ref type we got.
+ let ty::Ref(r_borrow, _, _) = ty.kind() else {
+ span_bug!(span, "expected a ref type, got {:?}", ty);
+ };
+ let mutbl = match mutbl_b {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => {
+ AutoBorrowMutability::Mut { allow_two_phase_borrow: self.allow_two_phase }
+ }
+ };
+ adjustments.push(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*r_borrow, mutbl)),
+ target: ty,
+ });
+
+ debug!("coerce_borrowed_pointer: succeeded ty={:?} adjustments={:?}", ty, adjustments);
+
+ success(adjustments, ty, obligations)
+ }
+
+ // &[T; n] or &mut [T; n] -> &[T]
+ // or &mut [T; n] -> &mut [T]
+ // or &Concrete -> &Trait, etc.
+ #[instrument(skip(self), level = "debug")]
+ fn coerce_unsized(&self, mut source: Ty<'tcx>, mut target: Ty<'tcx>) -> CoerceResult<'tcx> {
+ source = self.shallow_resolve(source);
+ target = self.shallow_resolve(target);
+ debug!(?source, ?target);
+
+ // These 'if' statements require some explanation.
+ // The `CoerceUnsized` trait is special - it is only
+ // possible to write `impl CoerceUnsized<B> for A` where
+ // A and B have 'matching' fields. This rules out the following
+ // two types of blanket impls:
+ //
+ // `impl<T> CoerceUnsized<T> for SomeType`
+ // `impl<T> CoerceUnsized<SomeType> for T`
+ //
+ // Both of these trigger a special `CoerceUnsized`-related error (E0376)
+ //
+ // We can take advantage of this fact to avoid performing unnecessary work.
+ // If either `source` or `target` is a type variable, then any applicable impl
+ // would need to be generic over the self-type (`impl<T> CoerceUnsized<SomeType> for T`)
+ // or generic over the `CoerceUnsized` type parameter (`impl<T> CoerceUnsized<T> for
+ // SomeType`).
+ //
+ // However, these are exactly the kinds of impls which are forbidden by
+ // the compiler! Therefore, we can be sure that coercion will always fail
+ // when either the source or target type is a type variable. This allows us
+ // to skip performing any trait selection, and immediately bail out.
+ if source.is_ty_var() {
+ debug!("coerce_unsized: source is a TyVar, bailing out");
+ return Err(TypeError::Mismatch);
+ }
+ if target.is_ty_var() {
+ debug!("coerce_unsized: target is a TyVar, bailing out");
+ return Err(TypeError::Mismatch);
+ }
+
+ let traits =
+ (self.tcx.lang_items().unsize_trait(), self.tcx.lang_items().coerce_unsized_trait());
+ let (Some(unsize_did), Some(coerce_unsized_did)) = traits else {
+ debug!("missing Unsize or CoerceUnsized traits");
+ return Err(TypeError::Mismatch);
+ };
+
+ // Note, we want to avoid unnecessary unsizing. We don't want to coerce to
+ // a DST unless we have to. This currently comes out in the wash since
+ // we can't unify [T] with U. But to properly support DST, we need to allow
+ // that, at which point we will need extra checks on the target here.
+
+ // Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
+ let reborrow = match (source.kind(), target.kind()) {
+ (&ty::Ref(_, ty_a, mutbl_a), &ty::Ref(_, _, mutbl_b)) => {
+ coerce_mutbls(mutbl_a, mutbl_b)?;
+
+ let coercion = Coercion(self.cause.span);
+ let r_borrow = self.next_region_var(coercion);
+ let mutbl = match mutbl_b {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // We don't allow two-phase borrows here, at least for initial
+ // implementation. If it happens that this coercion is a function argument,
+ // the reborrow in coerce_borrowed_ptr will pick it up.
+ allow_two_phase_borrow: AllowTwoPhase::No,
+ },
+ };
+ Some((
+ Adjustment { kind: Adjust::Deref(None), target: ty_a },
+ Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(r_borrow, mutbl)),
+ target: self
+ .tcx
+ .mk_ref(r_borrow, ty::TypeAndMut { mutbl: mutbl_b, ty: ty_a }),
+ },
+ ))
+ }
+ (&ty::Ref(_, ty_a, mt_a), &ty::RawPtr(ty::TypeAndMut { mutbl: mt_b, .. })) => {
+ coerce_mutbls(mt_a, mt_b)?;
+
+ Some((
+ Adjustment { kind: Adjust::Deref(None), target: ty_a },
+ Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::RawPtr(mt_b)),
+ target: self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mt_b, ty: ty_a }),
+ },
+ ))
+ }
+ _ => None,
+ };
+ let coerce_source = reborrow.as_ref().map_or(source, |&(_, ref r)| r.target);
+
+ // Setup either a subtyping or a LUB relationship between
+ // the `CoerceUnsized` target type and the expected type.
+ // We only have the latter, so we use an inference variable
+ // for the former and let type inference do the rest.
+ let origin = TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.cause.span,
+ };
+ let coerce_target = self.next_ty_var(origin);
+ let mut coercion = self.unify_and(coerce_target, target, |target| {
+ let unsize = Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target };
+ match reborrow {
+ None => vec![unsize],
+ Some((ref deref, ref autoref)) => vec![deref.clone(), autoref.clone(), unsize],
+ }
+ })?;
+
+ let mut selcx = traits::SelectionContext::new(self);
+
+ // Create an obligation for `Source: CoerceUnsized<Target>`.
+ let cause = ObligationCause::new(
+ self.cause.span,
+ self.body_id,
+ ObligationCauseCode::Coercion { source, target },
+ );
+
+ // Use a FIFO queue for this custom fulfillment procedure.
+ //
+ // A Vec (or SmallVec) is not a natural choice for a queue. However,
+ // this code path is hot, and this queue usually has a max length of 1
+ // and almost never more than 3. By using a SmallVec we avoid an
+ // allocation, at the (very small) cost of (occasionally) having to
+ // shift subsequent elements down when removing the front element.
+ let mut queue: SmallVec<[_; 4]> = smallvec![traits::predicate_for_trait_def(
+ self.tcx,
+ self.fcx.param_env,
+ cause,
+ coerce_unsized_did,
+ 0,
+ coerce_source,
+ &[coerce_target.into()]
+ )];
+
+ let mut has_unsized_tuple_coercion = false;
+ let mut has_trait_upcasting_coercion = None;
+
+ // Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
+ // emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
+ // inference might unify those two inner type variables later.
+ let traits = [coerce_unsized_did, unsize_did];
+ while !queue.is_empty() {
+ let obligation = queue.remove(0);
+ debug!("coerce_unsized resolve step: {:?}", obligation);
+ let bound_predicate = obligation.predicate.kind();
+ let trait_pred = match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(trait_pred) if traits.contains(&trait_pred.def_id()) => {
+ if unsize_did == trait_pred.def_id() {
+ let self_ty = trait_pred.self_ty();
+ let unsize_ty = trait_pred.trait_ref.substs[1].expect_ty();
+ if let (ty::Dynamic(ref data_a, ..), ty::Dynamic(ref data_b, ..)) =
+ (self_ty.kind(), unsize_ty.kind())
+ && data_a.principal_def_id() != data_b.principal_def_id()
+ {
+ debug!("coerce_unsized: found trait upcasting coercion");
+ has_trait_upcasting_coercion = Some((self_ty, unsize_ty));
+ }
+ if let ty::Tuple(..) = unsize_ty.kind() {
+ debug!("coerce_unsized: found unsized tuple coercion");
+ has_unsized_tuple_coercion = true;
+ }
+ }
+ bound_predicate.rebind(trait_pred)
+ }
+ _ => {
+ coercion.obligations.push(obligation);
+ continue;
+ }
+ };
+ match selcx.select(&obligation.with(trait_pred)) {
+ // Uncertain or unimplemented.
+ Ok(None) => {
+ if trait_pred.def_id() == unsize_did {
+ let trait_pred = self.resolve_vars_if_possible(trait_pred);
+ let self_ty = trait_pred.skip_binder().self_ty();
+ let unsize_ty = trait_pred.skip_binder().trait_ref.substs[1].expect_ty();
+ debug!("coerce_unsized: ambiguous unsize case for {:?}", trait_pred);
+ match (&self_ty.kind(), &unsize_ty.kind()) {
+ (ty::Infer(ty::TyVar(v)), ty::Dynamic(..))
+ if self.type_var_is_sized(*v) =>
+ {
+ debug!("coerce_unsized: have sized infer {:?}", v);
+ coercion.obligations.push(obligation);
+ // `$0: Unsize<dyn Trait>` where we know that `$0: Sized`, try going
+ // for unsizing.
+ }
+ _ => {
+ // Some other case for `$0: Unsize<Something>`. Note that we
+ // hit this case even if `Something` is a sized type, so just
+ // don't do the coercion.
+ debug!("coerce_unsized: ambiguous unsize");
+ return Err(TypeError::Mismatch);
+ }
+ }
+ } else {
+ debug!("coerce_unsized: early return - ambiguous");
+ return Err(TypeError::Mismatch);
+ }
+ }
+ Err(traits::Unimplemented) => {
+ debug!("coerce_unsized: early return - can't prove obligation");
+ return Err(TypeError::Mismatch);
+ }
+
+ // Object safety violations or miscellaneous.
+ Err(err) => {
+ self.report_selection_error(obligation.clone(), &obligation, &err, false);
+ // Treat this like an obligation and follow through
+ // with the unsizing - the lack of a coercion should
+ // be silent, as it causes a type mismatch later.
+ }
+
+ Ok(Some(impl_source)) => queue.extend(impl_source.nested_obligations()),
+ }
+ }
+
+ if has_unsized_tuple_coercion && !self.tcx.features().unsized_tuple_coercion {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::unsized_tuple_coercion,
+ self.cause.span,
+ "unsized tuple coercion is not stable enough for use and is subject to change",
+ )
+ .emit();
+ }
+
+ if let Some((sub, sup)) = has_trait_upcasting_coercion
+ && !self.tcx().features().trait_upcasting
+ {
+ // Renders better when we erase regions, since they're not really the point here.
+ let (sub, sup) = self.tcx.erase_regions((sub, sup));
+ let mut err = feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::trait_upcasting,
+ self.cause.span,
+ &format!("cannot cast `{sub}` to `{sup}`, trait upcasting coercion is experimental"),
+ );
+ err.note(&format!("required when coercing `{source}` into `{target}`"));
+ err.emit();
+ }
+
+ Ok(coercion)
+ }
+
+ fn coerce_from_safe_fn<F, G>(
+ &self,
+ a: Ty<'tcx>,
+ fn_ty_a: ty::PolyFnSig<'tcx>,
+ b: Ty<'tcx>,
+ to_unsafe: F,
+ normal: G,
+ ) -> CoerceResult<'tcx>
+ where
+ F: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
+ G: FnOnce(Ty<'tcx>) -> Vec<Adjustment<'tcx>>,
+ {
+ self.commit_if_ok(|snapshot| {
+ let result = if let ty::FnPtr(fn_ty_b) = b.kind()
+ && let (hir::Unsafety::Normal, hir::Unsafety::Unsafe) =
+ (fn_ty_a.unsafety(), fn_ty_b.unsafety())
+ {
+ let unsafe_a = self.tcx.safe_to_unsafe_fn_ty(fn_ty_a);
+ self.unify_and(unsafe_a, b, to_unsafe)
+ } else {
+ self.unify_and(a, b, normal)
+ };
+
+ // FIXME(#73154): This is a hack. Currently LUB can generate
+ // unsolvable constraints. Additionally, it returns `a`
+ // unconditionally, even when the "LUB" is `b`. In the future, we
+ // want the coerced type to be the actual supertype of these two,
+ // but for now, we want to just error to ensure we don't lock
+ // ourselves into a specific behavior with NLL.
+ self.leak_check(false, snapshot)?;
+
+ result
+ })
+ }
+
+ fn coerce_from_fn_pointer(
+ &self,
+ a: Ty<'tcx>,
+ fn_ty_a: ty::PolyFnSig<'tcx>,
+ b: Ty<'tcx>,
+ ) -> CoerceResult<'tcx> {
+ //! Attempts to coerce from the type of a Rust function item
+ //! into a closure or a `proc`.
+ //!
+
+ let b = self.shallow_resolve(b);
+ debug!("coerce_from_fn_pointer(a={:?}, b={:?})", a, b);
+
+ self.coerce_from_safe_fn(
+ a,
+ fn_ty_a,
+ b,
+ simple(Adjust::Pointer(PointerCast::UnsafeFnPointer)),
+ identity,
+ )
+ }
+
+ fn coerce_from_fn_item(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
+ //! Attempts to coerce from the type of a Rust function item
+ //! into a closure or a `proc`.
+
+ let b = self.shallow_resolve(b);
+ let InferOk { value: b, mut obligations } =
+ self.normalize_associated_types_in_as_infer_ok(self.cause.span, b);
+ debug!("coerce_from_fn_item(a={:?}, b={:?})", a, b);
+
+ match b.kind() {
+ ty::FnPtr(b_sig) => {
+ let a_sig = a.fn_sig(self.tcx);
+ if let ty::FnDef(def_id, _) = *a.kind() {
+ // Intrinsics are not coercible to function pointers
+ if self.tcx.is_intrinsic(def_id) {
+ return Err(TypeError::IntrinsicCast);
+ }
+
+ // Safe `#[target_feature]` functions are not assignable to safe fn pointers (RFC 2396).
+
+ if b_sig.unsafety() == hir::Unsafety::Normal
+ && !self.tcx.codegen_fn_attrs(def_id).target_features.is_empty()
+ {
+ return Err(TypeError::TargetFeatureCast(def_id));
+ }
+ }
+
+ let InferOk { value: a_sig, obligations: o1 } =
+ self.normalize_associated_types_in_as_infer_ok(self.cause.span, a_sig);
+ obligations.extend(o1);
+
+ let a_fn_pointer = self.tcx.mk_fn_ptr(a_sig);
+ let InferOk { value, obligations: o2 } = self.coerce_from_safe_fn(
+ a_fn_pointer,
+ a_sig,
+ b,
+ |unsafe_ty| {
+ vec![
+ Adjustment {
+ kind: Adjust::Pointer(PointerCast::ReifyFnPointer),
+ target: a_fn_pointer,
+ },
+ Adjustment {
+ kind: Adjust::Pointer(PointerCast::UnsafeFnPointer),
+ target: unsafe_ty,
+ },
+ ]
+ },
+ simple(Adjust::Pointer(PointerCast::ReifyFnPointer)),
+ )?;
+
+ obligations.extend(o2);
+ Ok(InferOk { value, obligations })
+ }
+ _ => self.unify_and(a, b, identity),
+ }
+ }
+
+ fn coerce_closure_to_fn(
+ &self,
+ a: Ty<'tcx>,
+ closure_def_id_a: DefId,
+ substs_a: SubstsRef<'tcx>,
+ b: Ty<'tcx>,
+ ) -> CoerceResult<'tcx> {
+ //! Attempts to coerce from the type of a non-capturing closure
+ //! into a function pointer.
+ //!
+
+ let b = self.shallow_resolve(b);
+
+ match b.kind() {
+ // At this point we haven't done capture analysis, which means
+ // that the ClosureSubsts just contains an inference variable instead
+ // of tuple of captured types.
+ //
+ // All we care here is if any variable is being captured and not the exact paths,
+ // so we check `upvars_mentioned` for root variables being captured.
+ ty::FnPtr(fn_ty)
+ if self
+ .tcx
+ .upvars_mentioned(closure_def_id_a.expect_local())
+ .map_or(true, |u| u.is_empty()) =>
+ {
+ // We coerce the closure, which has fn type
+ // `extern "rust-call" fn((arg0,arg1,...)) -> _`
+ // to
+ // `fn(arg0,arg1,...) -> _`
+ // or
+ // `unsafe fn(arg0,arg1,...) -> _`
+ let closure_sig = substs_a.as_closure().sig();
+ let unsafety = fn_ty.unsafety();
+ let pointer_ty =
+ self.tcx.mk_fn_ptr(self.tcx.signature_unclosure(closure_sig, unsafety));
+ debug!("coerce_closure_to_fn(a={:?}, b={:?}, pty={:?})", a, b, pointer_ty);
+ self.unify_and(
+ pointer_ty,
+ b,
+ simple(Adjust::Pointer(PointerCast::ClosureFnPointer(unsafety))),
+ )
+ }
+ _ => self.unify_and(a, b, identity),
+ }
+ }
+
+ fn coerce_unsafe_ptr(
+ &self,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ mutbl_b: hir::Mutability,
+ ) -> CoerceResult<'tcx> {
+ debug!("coerce_unsafe_ptr(a={:?}, b={:?})", a, b);
+
+ let (is_ref, mt_a) = match *a.kind() {
+ ty::Ref(_, ty, mutbl) => (true, ty::TypeAndMut { ty, mutbl }),
+ ty::RawPtr(mt) => (false, mt),
+ _ => return self.unify_and(a, b, identity),
+ };
+ coerce_mutbls(mt_a.mutbl, mutbl_b)?;
+
+ // Check that the types which they point at are compatible.
+ let a_unsafe = self.tcx.mk_ptr(ty::TypeAndMut { mutbl: mutbl_b, ty: mt_a.ty });
+ // Although references and unsafe ptrs have the same
+ // representation, we still register an Adjust::DerefRef so that
+ // regionck knows that the region for `a` must be valid here.
+ if is_ref {
+ self.unify_and(a_unsafe, b, |target| {
+ vec![
+ Adjustment { kind: Adjust::Deref(None), target: mt_a.ty },
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::RawPtr(mutbl_b)), target },
+ ]
+ })
+ } else if mt_a.mutbl != mutbl_b {
+ self.unify_and(a_unsafe, b, simple(Adjust::Pointer(PointerCast::MutToConstPointer)))
+ } else {
+ self.unify_and(a_unsafe, b, identity)
+ }
+ }
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Attempt to coerce an expression to a type, and return the
+ /// adjusted type of the expression, if successful.
+ /// Adjustments are only recorded if the coercion succeeded.
+ /// The expressions *must not* have any pre-existing adjustments.
+ pub fn try_coerce(
+ &self,
+ expr: &hir::Expr<'_>,
+ expr_ty: Ty<'tcx>,
+ target: Ty<'tcx>,
+ allow_two_phase: AllowTwoPhase,
+ cause: Option<ObligationCause<'tcx>>,
+ ) -> RelateResult<'tcx, Ty<'tcx>> {
+ let source = self.resolve_vars_with_obligations(expr_ty);
+ debug!("coercion::try({:?}: {:?} -> {:?})", expr, source, target);
+
+ let cause =
+ cause.unwrap_or_else(|| self.cause(expr.span, ObligationCauseCode::ExprAssignable));
+ let coerce = Coerce::new(self, cause, allow_two_phase);
+ let ok = self.commit_if_ok(|_| coerce.coerce(source, target))?;
+
+ let (adjustments, _) = self.register_infer_ok_obligations(ok);
+ self.apply_adjustments(expr, adjustments);
+ Ok(if expr_ty.references_error() { self.tcx.ty_error() } else { target })
+ }
+
+ /// Same as `try_coerce()`, but without side-effects.
+ ///
+ /// Returns false if the coercion creates any obligations that result in
+ /// errors.
+ pub fn can_coerce(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> bool {
+ let source = self.resolve_vars_with_obligations(expr_ty);
+ debug!("coercion::can_with_predicates({:?} -> {:?})", source, target);
+
+ let cause = self.cause(rustc_span::DUMMY_SP, ObligationCauseCode::ExprAssignable);
+ // We don't ever need two-phase here since we throw out the result of the coercion
+ let coerce = Coerce::new(self, cause, AllowTwoPhase::No);
+ self.probe(|_| {
+ let Ok(ok) = coerce.coerce(source, target) else {
+ return false;
+ };
+ let mut fcx = traits::FulfillmentContext::new_in_snapshot();
+ fcx.register_predicate_obligations(self, ok.obligations);
+ fcx.select_where_possible(&self).is_empty()
+ })
+ }
+
+ /// Given a type and a target type, this function will calculate and return
+ /// how many dereference steps needed to achieve `expr_ty <: target`. If
+ /// it's not possible, return `None`.
+ pub fn deref_steps(&self, expr_ty: Ty<'tcx>, target: Ty<'tcx>) -> Option<usize> {
+ let cause = self.cause(rustc_span::DUMMY_SP, ObligationCauseCode::ExprAssignable);
+ // We don't ever need two-phase here since we throw out the result of the coercion
+ let coerce = Coerce::new(self, cause, AllowTwoPhase::No);
+ coerce
+ .autoderef(rustc_span::DUMMY_SP, expr_ty)
+ .find_map(|(ty, steps)| self.probe(|_| coerce.unify(ty, target)).ok().map(|_| steps))
+ }
+
+ /// Given a type, this function will calculate and return the type given
+ /// for `<Ty as Deref>::Target` only if `Ty` also implements `DerefMut`.
+ ///
+ /// This function is for diagnostics only, since it does not register
+ /// trait or region sub-obligations. (presumably we could, but it's not
+ /// particularly important for diagnostics...)
+ pub fn deref_once_mutably_for_diagnostic(&self, expr_ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
+ self.autoderef(rustc_span::DUMMY_SP, expr_ty).nth(1).and_then(|(deref_ty, _)| {
+ self.infcx
+ .type_implements_trait(
+ self.tcx.lang_items().deref_mut_trait()?,
+ expr_ty,
+ ty::List::empty(),
+ self.param_env,
+ )
+ .may_apply()
+ .then(|| deref_ty)
+ })
+ }
+
+ /// Given some expressions, their known unified type and another expression,
+ /// tries to unify the types, potentially inserting coercions on any of the
+ /// provided expressions and returns their LUB (aka "common supertype").
+ ///
+ /// This is really an internal helper. From outside the coercion
+ /// module, you should instantiate a `CoerceMany` instance.
+ fn try_find_coercion_lub<E>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ exprs: &[E],
+ prev_ty: Ty<'tcx>,
+ new: &hir::Expr<'_>,
+ new_ty: Ty<'tcx>,
+ ) -> RelateResult<'tcx, Ty<'tcx>>
+ where
+ E: AsCoercionSite,
+ {
+ let prev_ty = self.resolve_vars_with_obligations(prev_ty);
+ let new_ty = self.resolve_vars_with_obligations(new_ty);
+ debug!(
+ "coercion::try_find_coercion_lub({:?}, {:?}, exprs={:?} exprs)",
+ prev_ty,
+ new_ty,
+ exprs.len()
+ );
+
+ // The following check fixes #88097, where the compiler erroneously
+ // attempted to coerce a closure type to itself via a function pointer.
+ if prev_ty == new_ty {
+ return Ok(prev_ty);
+ }
+
+ // Special-case that coercion alone cannot handle:
+ // Function items or non-capturing closures of differing IDs or InternalSubsts.
+ let (a_sig, b_sig) = {
+ #[allow(rustc::usage_of_ty_tykind)]
+ let is_capturing_closure = |ty: &ty::TyKind<'tcx>| {
+ if let &ty::Closure(closure_def_id, _substs) = ty {
+ self.tcx.upvars_mentioned(closure_def_id.expect_local()).is_some()
+ } else {
+ false
+ }
+ };
+ if is_capturing_closure(prev_ty.kind()) || is_capturing_closure(new_ty.kind()) {
+ (None, None)
+ } else {
+ match (prev_ty.kind(), new_ty.kind()) {
+ (ty::FnDef(..), ty::FnDef(..)) => {
+ // Don't reify if the function types have a LUB, i.e., they
+ // are the same function and their parameters have a LUB.
+ match self
+ .commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
+ {
+ // We have a LUB of prev_ty and new_ty, just return it.
+ Ok(ok) => return Ok(self.register_infer_ok_obligations(ok)),
+ Err(_) => {
+ (Some(prev_ty.fn_sig(self.tcx)), Some(new_ty.fn_sig(self.tcx)))
+ }
+ }
+ }
+ (ty::Closure(_, substs), ty::FnDef(..)) => {
+ let b_sig = new_ty.fn_sig(self.tcx);
+ let a_sig = self
+ .tcx
+ .signature_unclosure(substs.as_closure().sig(), b_sig.unsafety());
+ (Some(a_sig), Some(b_sig))
+ }
+ (ty::FnDef(..), ty::Closure(_, substs)) => {
+ let a_sig = prev_ty.fn_sig(self.tcx);
+ let b_sig = self
+ .tcx
+ .signature_unclosure(substs.as_closure().sig(), a_sig.unsafety());
+ (Some(a_sig), Some(b_sig))
+ }
+ (ty::Closure(_, substs_a), ty::Closure(_, substs_b)) => (
+ Some(self.tcx.signature_unclosure(
+ substs_a.as_closure().sig(),
+ hir::Unsafety::Normal,
+ )),
+ Some(self.tcx.signature_unclosure(
+ substs_b.as_closure().sig(),
+ hir::Unsafety::Normal,
+ )),
+ ),
+ _ => (None, None),
+ }
+ }
+ };
+ if let (Some(a_sig), Some(b_sig)) = (a_sig, b_sig) {
+ // Intrinsics are not coercible to function pointers.
+ if a_sig.abi() == Abi::RustIntrinsic
+ || a_sig.abi() == Abi::PlatformIntrinsic
+ || b_sig.abi() == Abi::RustIntrinsic
+ || b_sig.abi() == Abi::PlatformIntrinsic
+ {
+ return Err(TypeError::IntrinsicCast);
+ }
+ // The signature must match.
+ let a_sig = self.normalize_associated_types_in(new.span, a_sig);
+ let b_sig = self.normalize_associated_types_in(new.span, b_sig);
+ let sig = self
+ .at(cause, self.param_env)
+ .trace(prev_ty, new_ty)
+ .lub(a_sig, b_sig)
+ .map(|ok| self.register_infer_ok_obligations(ok))?;
+
+ // Reify both sides and return the reified fn pointer type.
+ let fn_ptr = self.tcx.mk_fn_ptr(sig);
+ let prev_adjustment = match prev_ty.kind() {
+ ty::Closure(..) => Adjust::Pointer(PointerCast::ClosureFnPointer(a_sig.unsafety())),
+ ty::FnDef(..) => Adjust::Pointer(PointerCast::ReifyFnPointer),
+ _ => unreachable!(),
+ };
+ let next_adjustment = match new_ty.kind() {
+ ty::Closure(..) => Adjust::Pointer(PointerCast::ClosureFnPointer(b_sig.unsafety())),
+ ty::FnDef(..) => Adjust::Pointer(PointerCast::ReifyFnPointer),
+ _ => unreachable!(),
+ };
+ for expr in exprs.iter().map(|e| e.as_coercion_site()) {
+ self.apply_adjustments(
+ expr,
+ vec![Adjustment { kind: prev_adjustment.clone(), target: fn_ptr }],
+ );
+ }
+ self.apply_adjustments(new, vec![Adjustment { kind: next_adjustment, target: fn_ptr }]);
+ return Ok(fn_ptr);
+ }
+
+ // Configure a Coerce instance to compute the LUB.
+ // We don't allow two-phase borrows on any autorefs this creates since we
+ // probably aren't processing function arguments here and even if we were,
+ // they're going to get autorefed again anyway and we can apply 2-phase borrows
+ // at that time.
+ let mut coerce = Coerce::new(self, cause.clone(), AllowTwoPhase::No);
+ coerce.use_lub = true;
+
+ // First try to coerce the new expression to the type of the previous ones,
+ // but only if the new expression has no coercion already applied to it.
+ let mut first_error = None;
+ if !self.typeck_results.borrow().adjustments().contains_key(new.hir_id) {
+ let result = self.commit_if_ok(|_| coerce.coerce(new_ty, prev_ty));
+ match result {
+ Ok(ok) => {
+ let (adjustments, target) = self.register_infer_ok_obligations(ok);
+ self.apply_adjustments(new, adjustments);
+ debug!(
+ "coercion::try_find_coercion_lub: was able to coerce from new type {:?} to previous type {:?} ({:?})",
+ new_ty, prev_ty, target
+ );
+ return Ok(target);
+ }
+ Err(e) => first_error = Some(e),
+ }
+ }
+
+ // Then try to coerce the previous expressions to the type of the new one.
+ // This requires ensuring there are no coercions applied to *any* of the
+ // previous expressions, other than noop reborrows (ignoring lifetimes).
+ for expr in exprs {
+ let expr = expr.as_coercion_site();
+ let noop = match self.typeck_results.borrow().expr_adjustments(expr) {
+ &[
+ Adjustment { kind: Adjust::Deref(_), .. },
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(_, mutbl_adj)), .. },
+ ] => {
+ match *self.node_ty(expr.hir_id).kind() {
+ ty::Ref(_, _, mt_orig) => {
+ let mutbl_adj: hir::Mutability = mutbl_adj.into();
+ // Reborrow that we can safely ignore, because
+ // the next adjustment can only be a Deref
+ // which will be merged into it.
+ mutbl_adj == mt_orig
+ }
+ _ => false,
+ }
+ }
+ &[Adjustment { kind: Adjust::NeverToAny, .. }] | &[] => true,
+ _ => false,
+ };
+
+ if !noop {
+ debug!(
+ "coercion::try_find_coercion_lub: older expression {:?} had adjustments, requiring LUB",
+ expr,
+ );
+
+ return self
+ .commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
+ .map(|ok| self.register_infer_ok_obligations(ok));
+ }
+ }
+
+ match self.commit_if_ok(|_| coerce.coerce(prev_ty, new_ty)) {
+ Err(_) => {
+ // Avoid giving strange errors on failed attempts.
+ if let Some(e) = first_error {
+ Err(e)
+ } else {
+ self.commit_if_ok(|_| self.at(cause, self.param_env).lub(prev_ty, new_ty))
+ .map(|ok| self.register_infer_ok_obligations(ok))
+ }
+ }
+ Ok(ok) => {
+ let (adjustments, target) = self.register_infer_ok_obligations(ok);
+ for expr in exprs {
+ let expr = expr.as_coercion_site();
+ self.apply_adjustments(expr, adjustments.clone());
+ }
+ debug!(
+ "coercion::try_find_coercion_lub: was able to coerce previous type {:?} to new type {:?} ({:?})",
+ prev_ty, new_ty, target
+ );
+ Ok(target)
+ }
+ }
+ }
+}
+
+/// CoerceMany encapsulates the pattern you should use when you have
+/// many expressions that are all getting coerced to a common
+/// type. This arises, for example, when you have a match (the result
+/// of each arm is coerced to a common type). It also arises in less
+/// obvious places, such as when you have many `break foo` expressions
+/// that target the same loop, or the various `return` expressions in
+/// a function.
+///
+/// The basic protocol is as follows:
+///
+/// - Instantiate the `CoerceMany` with an initial `expected_ty`.
+/// This will also serve as the "starting LUB". The expectation is
+/// that this type is something which all of the expressions *must*
+/// be coercible to. Use a fresh type variable if needed.
+/// - For each expression whose result is to be coerced, invoke `coerce()` with.
+/// - In some cases we wish to coerce "non-expressions" whose types are implicitly
+/// unit. This happens for example if you have a `break` with no expression,
+/// or an `if` with no `else`. In that case, invoke `coerce_forced_unit()`.
+/// - `coerce()` and `coerce_forced_unit()` may report errors. They hide this
+/// from you so that you don't have to worry your pretty head about it.
+/// But if an error is reported, the final type will be `err`.
+/// - Invoking `coerce()` may cause us to go and adjust the "adjustments" on
+/// previously coerced expressions.
+/// - When all done, invoke `complete()`. This will return the LUB of
+/// all your expressions.
+/// - WARNING: I don't believe this final type is guaranteed to be
+/// related to your initial `expected_ty` in any particular way,
+/// although it will typically be a subtype, so you should check it.
+/// - Invoking `complete()` may cause us to go and adjust the "adjustments" on
+/// previously coerced expressions.
+///
+/// Example:
+///
+/// ```ignore (illustrative)
+/// let mut coerce = CoerceMany::new(expected_ty);
+/// for expr in exprs {
+/// let expr_ty = fcx.check_expr_with_expectation(expr, expected);
+/// coerce.coerce(fcx, &cause, expr, expr_ty);
+/// }
+/// let final_ty = coerce.complete(fcx);
+/// ```
+pub struct CoerceMany<'tcx, 'exprs, E: AsCoercionSite> {
+ expected_ty: Ty<'tcx>,
+ final_ty: Option<Ty<'tcx>>,
+ expressions: Expressions<'tcx, 'exprs, E>,
+ pushed: usize,
+}
+
+/// The type of a `CoerceMany` that is storing up the expressions into
+/// a buffer. We use this in `check/mod.rs` for things like `break`.
+pub type DynamicCoerceMany<'tcx> = CoerceMany<'tcx, 'tcx, &'tcx hir::Expr<'tcx>>;
+
+enum Expressions<'tcx, 'exprs, E: AsCoercionSite> {
+ Dynamic(Vec<&'tcx hir::Expr<'tcx>>),
+ UpFront(&'exprs [E]),
+}
+
+impl<'tcx, 'exprs, E: AsCoercionSite> CoerceMany<'tcx, 'exprs, E> {
+ /// The usual case; collect the set of expressions dynamically.
+ /// If the full set of coercion sites is known before hand,
+ /// consider `with_coercion_sites()` instead to avoid allocation.
+ pub fn new(expected_ty: Ty<'tcx>) -> Self {
+ Self::make(expected_ty, Expressions::Dynamic(vec![]))
+ }
+
+ /// As an optimization, you can create a `CoerceMany` with a
+ /// pre-existing slice of expressions. In this case, you are
+ /// expected to pass each element in the slice to `coerce(...)` in
+ /// order. This is used with arrays in particular to avoid
+ /// needlessly cloning the slice.
+ pub fn with_coercion_sites(expected_ty: Ty<'tcx>, coercion_sites: &'exprs [E]) -> Self {
+ Self::make(expected_ty, Expressions::UpFront(coercion_sites))
+ }
+
+ fn make(expected_ty: Ty<'tcx>, expressions: Expressions<'tcx, 'exprs, E>) -> Self {
+ CoerceMany { expected_ty, final_ty: None, expressions, pushed: 0 }
+ }
+
+ /// Returns the "expected type" with which this coercion was
+ /// constructed. This represents the "downward propagated" type
+ /// that was given to us at the start of typing whatever construct
+ /// we are typing (e.g., the match expression).
+ ///
+ /// Typically, this is used as the expected type when
+ /// type-checking each of the alternative expressions whose types
+ /// we are trying to merge.
+ pub fn expected_ty(&self) -> Ty<'tcx> {
+ self.expected_ty
+ }
+
+ /// Returns the current "merged type", representing our best-guess
+ /// at the LUB of the expressions we've seen so far (if any). This
+ /// isn't *final* until you call `self.complete()`, which will return
+ /// the merged type.
+ pub fn merged_ty(&self) -> Ty<'tcx> {
+ self.final_ty.unwrap_or(self.expected_ty)
+ }
+
+ /// Indicates that the value generated by `expression`, which is
+ /// of type `expression_ty`, is one of the possibilities that we
+ /// could coerce from. This will record `expression`, and later
+ /// calls to `coerce` may come back and add adjustments and things
+ /// if necessary.
+ pub fn coerce<'a>(
+ &mut self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ cause: &ObligationCause<'tcx>,
+ expression: &'tcx hir::Expr<'tcx>,
+ expression_ty: Ty<'tcx>,
+ ) {
+ self.coerce_inner(fcx, cause, Some(expression), expression_ty, None, false)
+ }
+
+ /// Indicates that one of the inputs is a "forced unit". This
+ /// occurs in a case like `if foo { ... };`, where the missing else
+ /// generates a "forced unit". Another example is a `loop { break;
+ /// }`, where the `break` has no argument expression. We treat
+ /// these cases slightly differently for error-reporting
+ /// purposes. Note that these tend to correspond to cases where
+ /// the `()` expression is implicit in the source, and hence we do
+ /// not take an expression argument.
+ ///
+ /// The `augment_error` gives you a chance to extend the error
+ /// message, in case any results (e.g., we use this to suggest
+ /// removing a `;`).
+ pub fn coerce_forced_unit<'a>(
+ &mut self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ cause: &ObligationCause<'tcx>,
+ augment_error: &mut dyn FnMut(&mut Diagnostic),
+ label_unit_as_expected: bool,
+ ) {
+ self.coerce_inner(
+ fcx,
+ cause,
+ None,
+ fcx.tcx.mk_unit(),
+ Some(augment_error),
+ label_unit_as_expected,
+ )
+ }
+
+ /// The inner coercion "engine". If `expression` is `None`, this
+ /// is a forced-unit case, and hence `expression_ty` must be
+ /// `Nil`.
+ #[instrument(skip(self, fcx, augment_error, label_expression_as_expected), level = "debug")]
+ pub(crate) fn coerce_inner<'a>(
+ &mut self,
+ fcx: &FnCtxt<'a, 'tcx>,
+ cause: &ObligationCause<'tcx>,
+ expression: Option<&'tcx hir::Expr<'tcx>>,
+ mut expression_ty: Ty<'tcx>,
+ augment_error: Option<&mut dyn FnMut(&mut Diagnostic)>,
+ label_expression_as_expected: bool,
+ ) {
+ // Incorporate whatever type inference information we have
+ // until now; in principle we might also want to process
+ // pending obligations, but doing so should only improve
+ // compatibility (hopefully that is true) by helping us
+ // uncover never types better.
+ if expression_ty.is_ty_var() {
+ expression_ty = fcx.infcx.shallow_resolve(expression_ty);
+ }
+
+ // If we see any error types, just propagate that error
+ // upwards.
+ if expression_ty.references_error() || self.merged_ty().references_error() {
+ self.final_ty = Some(fcx.tcx.ty_error());
+ return;
+ }
+
+ // Handle the actual type unification etc.
+ let result = if let Some(expression) = expression {
+ if self.pushed == 0 {
+ // Special-case the first expression we are coercing.
+ // To be honest, I'm not entirely sure why we do this.
+ // We don't allow two-phase borrows, see comment in try_find_coercion_lub for why
+ fcx.try_coerce(
+ expression,
+ expression_ty,
+ self.expected_ty,
+ AllowTwoPhase::No,
+ Some(cause.clone()),
+ )
+ } else {
+ match self.expressions {
+ Expressions::Dynamic(ref exprs) => fcx.try_find_coercion_lub(
+ cause,
+ exprs,
+ self.merged_ty(),
+ expression,
+ expression_ty,
+ ),
+ Expressions::UpFront(ref coercion_sites) => fcx.try_find_coercion_lub(
+ cause,
+ &coercion_sites[0..self.pushed],
+ self.merged_ty(),
+ expression,
+ expression_ty,
+ ),
+ }
+ }
+ } else {
+ // this is a hack for cases where we default to `()` because
+ // the expression etc has been omitted from the source. An
+ // example is an `if let` without an else:
+ //
+ // if let Some(x) = ... { }
+ //
+ // we wind up with a second match arm that is like `_ =>
+ // ()`. That is the case we are considering here. We take
+ // a different path to get the right "expected, found"
+ // message and so forth (and because we know that
+ // `expression_ty` will be unit).
+ //
+ // Another example is `break` with no argument expression.
+ assert!(expression_ty.is_unit(), "if let hack without unit type");
+ fcx.at(cause, fcx.param_env)
+ .eq_exp(label_expression_as_expected, expression_ty, self.merged_ty())
+ .map(|infer_ok| {
+ fcx.register_infer_ok_obligations(infer_ok);
+ expression_ty
+ })
+ };
+
+ debug!(?result);
+ match result {
+ Ok(v) => {
+ self.final_ty = Some(v);
+ if let Some(e) = expression {
+ match self.expressions {
+ Expressions::Dynamic(ref mut buffer) => buffer.push(e),
+ Expressions::UpFront(coercion_sites) => {
+ // if the user gave us an array to validate, check that we got
+ // the next expression in the list, as expected
+ assert_eq!(
+ coercion_sites[self.pushed].as_coercion_site().hir_id,
+ e.hir_id
+ );
+ }
+ }
+ self.pushed += 1;
+ }
+ }
+ Err(coercion_error) => {
+ let (expected, found) = if label_expression_as_expected {
+ // In the case where this is a "forced unit", like
+ // `break`, we want to call the `()` "expected"
+ // since it is implied by the syntax.
+ // (Note: not all force-units work this way.)"
+ (expression_ty, self.final_ty.unwrap_or(self.expected_ty))
+ } else {
+ // Otherwise, the "expected" type for error
+ // reporting is the current unification type,
+ // which is basically the LUB of the expressions
+ // we've seen so far (combined with the expected
+ // type)
+ (self.final_ty.unwrap_or(self.expected_ty), expression_ty)
+ };
+
+ let mut err;
+ let mut unsized_return = false;
+ match *cause.code() {
+ ObligationCauseCode::ReturnNoExpression => {
+ err = struct_span_err!(
+ fcx.tcx.sess,
+ cause.span,
+ E0069,
+ "`return;` in a function whose return type is not `()`"
+ );
+ err.span_label(cause.span, "return type is not `()`");
+ }
+ ObligationCauseCode::BlockTailExpression(blk_id) => {
+ let parent_id = fcx.tcx.hir().get_parent_node(blk_id);
+ err = self.report_return_mismatched_types(
+ cause,
+ expected,
+ found,
+ coercion_error.clone(),
+ fcx,
+ parent_id,
+ expression,
+ Some(blk_id),
+ );
+ if !fcx.tcx.features().unsized_locals {
+ unsized_return = self.is_return_ty_unsized(fcx, blk_id);
+ }
+ }
+ ObligationCauseCode::ReturnValue(id) => {
+ err = self.report_return_mismatched_types(
+ cause,
+ expected,
+ found,
+ coercion_error.clone(),
+ fcx,
+ id,
+ expression,
+ None,
+ );
+ if !fcx.tcx.features().unsized_locals {
+ let id = fcx.tcx.hir().get_parent_node(id);
+ unsized_return = self.is_return_ty_unsized(fcx, id);
+ }
+ }
+ _ => {
+ err = fcx.report_mismatched_types(
+ cause,
+ expected,
+ found,
+ coercion_error.clone(),
+ );
+ }
+ }
+
+ if let Some(augment_error) = augment_error {
+ augment_error(&mut err);
+ }
+
+ let is_insufficiently_polymorphic =
+ matches!(coercion_error, TypeError::RegionsInsufficientlyPolymorphic(..));
+
+ if !is_insufficiently_polymorphic && let Some(expr) = expression {
+ fcx.emit_coerce_suggestions(
+ &mut err,
+ expr,
+ found,
+ expected,
+ None,
+ Some(coercion_error),
+ );
+ }
+
+ err.emit_unless(unsized_return);
+
+ self.final_ty = Some(fcx.tcx.ty_error());
+ }
+ }
+ }
+
+ fn report_return_mismatched_types<'a>(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ty_err: TypeError<'tcx>,
+ fcx: &FnCtxt<'a, 'tcx>,
+ id: hir::HirId,
+ expression: Option<&'tcx hir::Expr<'tcx>>,
+ blk_id: Option<hir::HirId>,
+ ) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err = fcx.report_mismatched_types(cause, expected, found, ty_err);
+
+ let mut pointing_at_return_type = false;
+ let mut fn_output = None;
+
+ let parent_id = fcx.tcx.hir().get_parent_node(id);
+ let parent = fcx.tcx.hir().get(parent_id);
+ if let Some(expr) = expression
+ && let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Closure(&hir::Closure { body, .. }), .. }) = parent
+ && !matches!(fcx.tcx.hir().body(body).value.kind, hir::ExprKind::Block(..))
+ {
+ fcx.suggest_missing_semicolon(&mut err, expr, expected, true);
+ }
+ // Verify that this is a tail expression of a function, otherwise the
+ // label pointing out the cause for the type coercion will be wrong
+ // as prior return coercions would not be relevant (#57664).
+ let fn_decl = if let (Some(expr), Some(blk_id)) = (expression, blk_id) {
+ pointing_at_return_type =
+ fcx.suggest_mismatched_types_on_tail(&mut err, expr, expected, found, blk_id);
+ if let (Some(cond_expr), true, false) = (
+ fcx.tcx.hir().get_if_cause(expr.hir_id),
+ expected.is_unit(),
+ pointing_at_return_type,
+ )
+ // If the block is from an external macro or try (`?`) desugaring, then
+ // do not suggest adding a semicolon, because there's nowhere to put it.
+ // See issues #81943 and #87051.
+ && matches!(
+ cond_expr.span.desugaring_kind(),
+ None | Some(DesugaringKind::WhileLoop)
+ ) && !in_external_macro(fcx.tcx.sess, cond_expr.span)
+ && !matches!(
+ cond_expr.kind,
+ hir::ExprKind::Match(.., hir::MatchSource::TryDesugar)
+ )
+ {
+ err.span_label(cond_expr.span, "expected this to be `()`");
+ if expr.can_have_side_effects() {
+ fcx.suggest_semicolon_at_end(cond_expr.span, &mut err);
+ }
+ }
+ fcx.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
+ } else {
+ fcx.get_fn_decl(parent_id)
+ };
+
+ if let Some((fn_decl, can_suggest)) = fn_decl {
+ if blk_id.is_none() {
+ pointing_at_return_type |= fcx.suggest_missing_return_type(
+ &mut err,
+ &fn_decl,
+ expected,
+ found,
+ can_suggest,
+ fcx.tcx.hir().local_def_id_to_hir_id(fcx.tcx.hir().get_parent_item(id)),
+ );
+ }
+ if !pointing_at_return_type {
+ fn_output = Some(&fn_decl.output); // `impl Trait` return type
+ }
+ }
+
+ let parent_id = fcx.tcx.hir().get_parent_item(id);
+ let parent_item = fcx.tcx.hir().get_by_def_id(parent_id);
+
+ if let (Some(expr), Some(_), Some((fn_decl, _, _))) =
+ (expression, blk_id, fcx.get_node_fn_decl(parent_item))
+ {
+ fcx.suggest_missing_break_or_return_expr(
+ &mut err,
+ expr,
+ fn_decl,
+ expected,
+ found,
+ id,
+ fcx.tcx.hir().local_def_id_to_hir_id(parent_id),
+ );
+ }
+
+ if let (Some(sp), Some(fn_output)) = (fcx.ret_coercion_span.get(), fn_output) {
+ self.add_impl_trait_explanation(&mut err, cause, fcx, expected, sp, fn_output);
+ }
+ err
+ }
+
+ fn add_impl_trait_explanation<'a>(
+ &self,
+ err: &mut Diagnostic,
+ cause: &ObligationCause<'tcx>,
+ fcx: &FnCtxt<'a, 'tcx>,
+ expected: Ty<'tcx>,
+ sp: Span,
+ fn_output: &hir::FnRetTy<'_>,
+ ) {
+ let return_sp = fn_output.span();
+ err.span_label(return_sp, "expected because this return type...");
+ err.span_label(
+ sp,
+ format!("...is found to be `{}` here", fcx.resolve_vars_with_obligations(expected)),
+ );
+ let impl_trait_msg = "for information on `impl Trait`, see \
+ <https://doc.rust-lang.org/book/ch10-02-traits.html\
+ #returning-types-that-implement-traits>";
+ let trait_obj_msg = "for information on trait objects, see \
+ <https://doc.rust-lang.org/book/ch17-02-trait-objects.html\
+ #using-trait-objects-that-allow-for-values-of-different-types>";
+ err.note("to return `impl Trait`, all returned values must be of the same type");
+ err.note(impl_trait_msg);
+ let snippet = fcx
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(return_sp)
+ .unwrap_or_else(|_| "dyn Trait".to_string());
+ let mut snippet_iter = snippet.split_whitespace();
+ let has_impl = snippet_iter.next().map_or(false, |s| s == "impl");
+ // Only suggest `Box<dyn Trait>` if `Trait` in `impl Trait` is object safe.
+ let mut is_object_safe = false;
+ if let hir::FnRetTy::Return(ty) = fn_output
+ // Get the return type.
+ && let hir::TyKind::OpaqueDef(..) = ty.kind
+ {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(fcx, ty);
+ // Get the `impl Trait`'s `DefId`.
+ if let ty::Opaque(def_id, _) = ty.kind()
+ // Get the `impl Trait`'s `Item` so that we can get its trait bounds and
+ // get the `Trait`'s `DefId`.
+ && let hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, .. }) =
+ fcx.tcx.hir().expect_item(def_id.expect_local()).kind
+ {
+ // Are of this `impl Trait`'s traits object safe?
+ is_object_safe = bounds.iter().all(|bound| {
+ bound
+ .trait_ref()
+ .and_then(|t| t.trait_def_id())
+ .map_or(false, |def_id| {
+ fcx.tcx.object_safety_violations(def_id).is_empty()
+ })
+ })
+ }
+ };
+ if has_impl {
+ if is_object_safe {
+ err.multipart_suggestion(
+ "you could change the return type to be a boxed trait object",
+ vec![
+ (return_sp.with_hi(return_sp.lo() + BytePos(4)), "Box<dyn".to_string()),
+ (return_sp.shrink_to_hi(), ">".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ let sugg = [sp, cause.span]
+ .into_iter()
+ .flat_map(|sp| {
+ [
+ (sp.shrink_to_lo(), "Box::new(".to_string()),
+ (sp.shrink_to_hi(), ")".to_string()),
+ ]
+ .into_iter()
+ })
+ .collect::<Vec<_>>();
+ err.multipart_suggestion(
+ "if you change the return type to expect trait objects, box the returned \
+ expressions",
+ sugg,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.help(&format!(
+ "if the trait `{}` were object safe, you could return a boxed trait object",
+ &snippet[5..]
+ ));
+ }
+ err.note(trait_obj_msg);
+ }
+ err.help("you could instead create a new `enum` with a variant for each returned type");
+ }
+
+ fn is_return_ty_unsized<'a>(&self, fcx: &FnCtxt<'a, 'tcx>, blk_id: hir::HirId) -> bool {
+ if let Some((fn_decl, _)) = fcx.get_fn_decl(blk_id)
+ && let hir::FnRetTy::Return(ty) = fn_decl.output
+ && let ty = <dyn AstConv<'_>>::ast_ty_to_ty(fcx, ty)
+ && let ty::Dynamic(..) = ty.kind()
+ {
+ return true;
+ }
+ false
+ }
+
+ pub fn complete<'a>(self, fcx: &FnCtxt<'a, 'tcx>) -> Ty<'tcx> {
+ if let Some(final_ty) = self.final_ty {
+ final_ty
+ } else {
+ // If we only had inputs that were of type `!` (or no
+ // inputs at all), then the final type is `!`.
+ assert_eq!(self.pushed, 0);
+ fcx.tcx.types.never
+ }
+ }
+}
+
+/// Something that can be converted into an expression to which we can
+/// apply a coercion.
+pub trait AsCoercionSite {
+ fn as_coercion_site(&self) -> &hir::Expr<'_>;
+}
+
+impl AsCoercionSite for hir::Expr<'_> {
+ fn as_coercion_site(&self) -> &hir::Expr<'_> {
+ self
+ }
+}
+
+impl<'a, T> AsCoercionSite for &'a T
+where
+ T: AsCoercionSite,
+{
+ fn as_coercion_site(&self) -> &hir::Expr<'_> {
+ (**self).as_coercion_site()
+ }
+}
+
+impl AsCoercionSite for ! {
+ fn as_coercion_site(&self) -> &hir::Expr<'_> {
+ unreachable!()
+ }
+}
+
+impl AsCoercionSite for hir::Arm<'_> {
+ fn as_coercion_site(&self) -> &hir::Expr<'_> {
+ &self.body
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/compare_method.rs b/compiler/rustc_typeck/src/check/compare_method.rs
new file mode 100644
index 000000000..666498403
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/compare_method.rs
@@ -0,0 +1,1547 @@
+use super::potentially_plural_count;
+use crate::check::regionck::OutlivesEnvironmentExt;
+use crate::check::wfcheck;
+use crate::errors::LifetimesOrBoundsMismatchOnTrait;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticId, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::intravisit;
+use rustc_hir::{GenericParamKind, ImplItemKind, TraitItemKind};
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::{self, TyCtxtInferExt};
+use rustc_infer::traits::util;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::subst::{InternalSubsts, Subst};
+use rustc_middle::ty::util::ExplicitSelf;
+use rustc_middle::ty::{self, DefIdTree};
+use rustc_middle::ty::{GenericParamDefKind, ToPredicate, TyCtxt};
+use rustc_span::Span;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCauseCode, ObligationCtxt, Reveal,
+};
+use std::iter;
+
+/// Checks that a method from an impl conforms to the signature of
+/// the same method as declared in the trait.
+///
+/// # Parameters
+///
+/// - `impl_m`: type of the method we are checking
+/// - `impl_m_span`: span to use for reporting errors
+/// - `trait_m`: the method in the trait
+/// - `impl_trait_ref`: the TraitRef corresponding to the trait implementation
+pub(crate) fn compare_impl_method<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ trait_item_span: Option<Span>,
+) {
+ debug!("compare_impl_method(impl_trait_ref={:?})", impl_trait_ref);
+
+ let impl_m_span = tcx.def_span(impl_m.def_id);
+
+ if let Err(_) = compare_self_type(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref) {
+ return;
+ }
+
+ if let Err(_) = compare_number_of_generics(tcx, impl_m, impl_m_span, trait_m, trait_item_span) {
+ return;
+ }
+
+ if let Err(_) = compare_generic_param_kinds(tcx, impl_m, trait_m) {
+ return;
+ }
+
+ if let Err(_) =
+ compare_number_of_method_arguments(tcx, impl_m, impl_m_span, trait_m, trait_item_span)
+ {
+ return;
+ }
+
+ if let Err(_) = compare_synthetic_generics(tcx, impl_m, trait_m) {
+ return;
+ }
+
+ if let Err(_) = compare_predicate_entailment(tcx, impl_m, impl_m_span, trait_m, impl_trait_ref)
+ {
+ return;
+ }
+}
+
+fn compare_predicate_entailment<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ impl_m_span: Span,
+ trait_m: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_to_impl_substs = impl_trait_ref.substs;
+
+ // This node-id should be used for the `body_id` field on each
+ // `ObligationCause` (and the `FnCtxt`).
+ //
+ // FIXME(@lcnr): remove that after removing `cause.body_id` from
+ // obligations.
+ let impl_m_hir_id = tcx.hir().local_def_id_to_hir_id(impl_m.def_id.expect_local());
+ // We sometimes modify the span further down.
+ let mut cause = ObligationCause::new(
+ impl_m_span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_m.def_id.expect_local(),
+ trait_item_def_id: trait_m.def_id,
+ kind: impl_m.kind,
+ },
+ );
+
+ // This code is best explained by example. Consider a trait:
+ //
+ // trait Trait<'t, T> {
+ // fn method<'a, M>(t: &'t T, m: &'a M) -> Self;
+ // }
+ //
+ // And an impl:
+ //
+ // impl<'i, 'j, U> Trait<'j, &'i U> for Foo {
+ // fn method<'b, N>(t: &'j &'i U, m: &'b N) -> Foo;
+ // }
+ //
+ // We wish to decide if those two method types are compatible.
+ //
+ // We start out with trait_to_impl_substs, that maps the trait
+ // type parameters to impl type parameters. This is taken from the
+ // impl trait reference:
+ //
+ // trait_to_impl_substs = {'t => 'j, T => &'i U, Self => Foo}
+ //
+ // We create a mapping `dummy_substs` that maps from the impl type
+ // parameters to fresh types and regions. For type parameters,
+ // this is the identity transform, but we could as well use any
+ // placeholder types. For regions, we convert from bound to free
+ // regions (Note: but only early-bound regions, i.e., those
+ // declared on the impl or used in type parameter bounds).
+ //
+ // impl_to_placeholder_substs = {'i => 'i0, U => U0, N => N0 }
+ //
+ // Now we can apply placeholder_substs to the type of the impl method
+ // to yield a new function type in terms of our fresh, placeholder
+ // types:
+ //
+ // <'b> fn(t: &'i0 U0, m: &'b) -> Foo
+ //
+ // We now want to extract and substitute the type of the *trait*
+ // method and compare it. To do so, we must create a compound
+ // substitution by combining trait_to_impl_substs and
+ // impl_to_placeholder_substs, and also adding a mapping for the method
+ // type parameters. We extend the mapping to also include
+ // the method parameters.
+ //
+ // trait_to_placeholder_substs = { T => &'i0 U0, Self => Foo, M => N0 }
+ //
+ // Applying this to the trait method type yields:
+ //
+ // <'a> fn(t: &'i0 U0, m: &'a) -> Foo
+ //
+ // This type is also the same but the name of the bound region ('a
+ // vs 'b). However, the normal subtyping rules on fn types handle
+ // this kind of equivalency just fine.
+ //
+ // We now use these substitutions to ensure that all declared bounds are
+ // satisfied by the implementation's method.
+ //
+ // We do this by creating a parameter environment which contains a
+ // substitution corresponding to impl_to_placeholder_substs. We then build
+ // trait_to_placeholder_substs and use it to convert the predicates contained
+ // in the trait_m.generics to the placeholder form.
+ //
+ // Finally we register each of these predicates as an obligation in
+ // a fresh FulfillmentCtxt, and invoke select_all_or_error.
+
+ // Create mapping from impl to placeholder.
+ let impl_to_placeholder_substs = InternalSubsts::identity_for_item(tcx, impl_m.def_id);
+
+ // Create mapping from trait to placeholder.
+ let trait_to_placeholder_substs =
+ impl_to_placeholder_substs.rebase_onto(tcx, impl_m.container_id(tcx), trait_to_impl_substs);
+ debug!("compare_impl_method: trait_to_placeholder_substs={:?}", trait_to_placeholder_substs);
+
+ let impl_m_generics = tcx.generics_of(impl_m.def_id);
+ let trait_m_generics = tcx.generics_of(trait_m.def_id);
+ let impl_m_predicates = tcx.predicates_of(impl_m.def_id);
+ let trait_m_predicates = tcx.predicates_of(trait_m.def_id);
+
+ // Check region bounds.
+ check_region_bounds_on_impl_item(tcx, impl_m, trait_m, &trait_m_generics, &impl_m_generics)?;
+
+ // Create obligations for each predicate declared by the impl
+ // definition in the context of the trait's parameter
+ // environment. We can't just use `impl_env.caller_bounds`,
+ // however, because we want to replace all late-bound regions with
+ // region variables.
+ let impl_predicates = tcx.predicates_of(impl_m_predicates.parent.unwrap());
+ let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
+
+ debug!("compare_impl_method: impl_bounds={:?}", hybrid_preds);
+
+ // This is the only tricky bit of the new way we check implementation methods
+ // We need to build a set of predicates where only the method-level bounds
+ // are from the trait and we assume all other bounds from the implementation
+ // to be previously satisfied.
+ //
+ // We then register the obligations from the impl_m and check to see
+ // if all constraints hold.
+ hybrid_preds
+ .predicates
+ .extend(trait_m_predicates.instantiate_own(tcx, trait_to_placeholder_substs).predicates);
+
+ // Construct trait parameter environment and then shift it into the placeholder viewpoint.
+ // The key step here is to update the caller_bounds's predicates to be
+ // the new hybrid bounds we computed.
+ let normalize_cause = traits::ObligationCause::misc(impl_m_span, impl_m_hir_id);
+ let param_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&hybrid_preds.predicates),
+ Reveal::UserFacing,
+ hir::Constness::NotConst,
+ );
+ let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
+
+ tcx.infer_ctxt().enter(|ref infcx| {
+ let ocx = ObligationCtxt::new(infcx);
+
+ debug!("compare_impl_method: caller_bounds={:?}", param_env.caller_bounds());
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let impl_m_own_bounds = impl_m_predicates.instantiate_own(tcx, impl_to_placeholder_substs);
+ for (predicate, span) in iter::zip(impl_m_own_bounds.predicates, impl_m_own_bounds.spans) {
+ let normalize_cause = traits::ObligationCause::misc(span, impl_m_hir_id);
+ let traits::Normalized { value: predicate, obligations } =
+ traits::normalize(&mut selcx, param_env, normalize_cause, predicate);
+
+ ocx.register_obligations(obligations);
+ let cause = ObligationCause::new(
+ span,
+ impl_m_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_m.def_id.expect_local(),
+ trait_item_def_id: trait_m.def_id,
+ kind: impl_m.kind,
+ },
+ );
+ ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate));
+ }
+
+ // We now need to check that the signature of the impl method is
+ // compatible with that of the trait method. We do this by
+ // checking that `impl_fty <: trait_fty`.
+ //
+ // FIXME. Unfortunately, this doesn't quite work right now because
+ // associated type normalization is not integrated into subtype
+ // checks. For the comparison to be valid, we need to
+ // normalize the associated types in the impl/trait methods
+ // first. However, because function types bind regions, just
+ // calling `normalize_associated_types_in` would have no effect on
+ // any associated types appearing in the fn arguments or return
+ // type.
+
+ // Compute placeholder form of impl and trait method tys.
+ let tcx = infcx.tcx;
+
+ let mut wf_tys = FxHashSet::default();
+
+ let impl_sig = infcx.replace_bound_vars_with_fresh_vars(
+ impl_m_span,
+ infer::HigherRankedType,
+ tcx.fn_sig(impl_m.def_id),
+ );
+
+ let norm_cause = ObligationCause::misc(impl_m_span, impl_m_hir_id);
+ let impl_sig = ocx.normalize(norm_cause.clone(), param_env, impl_sig);
+ let impl_fty = tcx.mk_fn_ptr(ty::Binder::dummy(impl_sig));
+ debug!("compare_impl_method: impl_fty={:?}", impl_fty);
+
+ let trait_sig = tcx.bound_fn_sig(trait_m.def_id).subst(tcx, trait_to_placeholder_substs);
+ let trait_sig = tcx.liberate_late_bound_regions(impl_m.def_id, trait_sig);
+ let trait_sig = ocx.normalize(norm_cause, param_env, trait_sig);
+ // Add the resulting inputs and output as well-formed.
+ wf_tys.extend(trait_sig.inputs_and_output.iter());
+ let trait_fty = tcx.mk_fn_ptr(ty::Binder::dummy(trait_sig));
+
+ debug!("compare_impl_method: trait_fty={:?}", trait_fty);
+
+ // FIXME: We'd want to keep more accurate spans than "the method signature" when
+ // processing the comparison between the trait and impl fn, but we sadly lose them
+ // and point at the whole signature when a trait bound or specific input or output
+ // type would be more appropriate. In other places we have a `Vec<Span>`
+ // corresponding to their `Vec<Predicate>`, but we don't have that here.
+ // Fixing this would improve the output of test `issue-83765.rs`.
+ let sub_result = infcx
+ .at(&cause, param_env)
+ .sup(trait_fty, impl_fty)
+ .map(|infer_ok| ocx.register_infer_ok_obligations(infer_ok));
+
+ if let Err(terr) = sub_result {
+ debug!("sub_types failed: impl ty {:?}, trait ty {:?}", impl_fty, trait_fty);
+
+ let (impl_err_span, trait_err_span) =
+ extract_spans_for_error_reporting(&infcx, &terr, &cause, impl_m, trait_m);
+
+ cause.span = impl_err_span;
+
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ cause.span(),
+ E0053,
+ "method `{}` has an incompatible type for trait",
+ trait_m.name
+ );
+ match &terr {
+ TypeError::ArgumentMutability(0) | TypeError::ArgumentSorts(_, 0)
+ if trait_m.fn_has_self_parameter =>
+ {
+ let ty = trait_sig.inputs()[0];
+ let sugg = match ExplicitSelf::determine(ty, |_| ty == impl_trait_ref.self_ty())
+ {
+ ExplicitSelf::ByValue => "self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Mut) => {
+ "&mut self".to_owned()
+ }
+ _ => format!("self: {ty}"),
+ };
+
+ // When the `impl` receiver is an arbitrary self type, like `self: Box<Self>`, the
+ // span points only at the type `Box<Self`>, but we want to cover the whole
+ // argument pattern and type.
+ let span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, body) => tcx
+ .hir()
+ .body_param_names(body)
+ .zip(sig.decl.inputs.iter())
+ .map(|(param, ty)| param.span.to(ty.span))
+ .next()
+ .unwrap_or(impl_err_span),
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+
+ diag.span_suggestion(
+ span,
+ "change the self-receiver type to match the trait",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ }
+ TypeError::ArgumentMutability(i) | TypeError::ArgumentSorts(_, i) => {
+ if trait_sig.inputs().len() == *i {
+ // Suggestion to change output type. We do not suggest in `async` functions
+ // to avoid complex logic or incorrect output.
+ match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, _)
+ if sig.header.asyncness == hir::IsAsync::NotAsync =>
+ {
+ let msg = "change the output type to match the trait";
+ let ap = Applicability::MachineApplicable;
+ match sig.decl.output {
+ hir::FnRetTy::DefaultReturn(sp) => {
+ let sugg = format!("-> {} ", trait_sig.output());
+ diag.span_suggestion_verbose(sp, msg, sugg, ap);
+ }
+ hir::FnRetTy::Return(hir_ty) => {
+ let sugg = trait_sig.output();
+ diag.span_suggestion(hir_ty.span, msg, sugg, ap);
+ }
+ };
+ }
+ _ => {}
+ };
+ } else if let Some(trait_ty) = trait_sig.inputs().get(*i) {
+ diag.span_suggestion(
+ impl_err_span,
+ "change the parameter type to match the trait",
+ trait_ty,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ _ => {}
+ }
+
+ infcx.note_type_err(
+ &mut diag,
+ &cause,
+ trait_err_span.map(|sp| (sp, "type in trait".to_owned())),
+ Some(infer::ValuePairs::Terms(ExpectedFound {
+ expected: trait_fty.into(),
+ found: impl_fty.into(),
+ })),
+ &terr,
+ false,
+ false,
+ );
+
+ return Err(diag.emit());
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let mut outlives_environment = OutlivesEnvironment::new(param_env);
+ outlives_environment.add_implied_bounds(infcx, wf_tys, impl_m_hir_id);
+ infcx.check_region_obligations_and_report_errors(
+ impl_m.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ Ok(())
+ })
+}
+
+fn check_region_bounds_on_impl_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+ trait_generics: &ty::Generics,
+ impl_generics: &ty::Generics,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_params = trait_generics.own_counts().lifetimes;
+ let impl_params = impl_generics.own_counts().lifetimes;
+
+ debug!(
+ "check_region_bounds_on_impl_item: \
+ trait_generics={:?} \
+ impl_generics={:?}",
+ trait_generics, impl_generics
+ );
+
+ // Must have same number of early-bound lifetime parameters.
+ // Unfortunately, if the user screws up the bounds, then this
+ // will change classification between early and late. E.g.,
+ // if in trait we have `<'a,'b:'a>`, and in impl we just have
+ // `<'a,'b>`, then we have 2 early-bound lifetime parameters
+ // in trait but 0 in the impl. But if we report "expected 2
+ // but found 0" it's confusing, because it looks like there
+ // are zero. Since I don't quite know how to phrase things at
+ // the moment, give a kind of vague error message.
+ if trait_params != impl_params {
+ let span = tcx
+ .hir()
+ .get_generics(impl_m.def_id.expect_local())
+ .expect("expected impl item to have generics or else we can't compare them")
+ .span;
+ let generics_span = if let Some(local_def_id) = trait_m.def_id.as_local() {
+ Some(
+ tcx.hir()
+ .get_generics(local_def_id)
+ .expect("expected trait item to have generics or else we can't compare them")
+ .span,
+ )
+ } else {
+ None
+ };
+
+ let reported = tcx.sess.emit_err(LifetimesOrBoundsMismatchOnTrait {
+ span,
+ item_kind: assoc_item_kind_str(impl_m),
+ ident: impl_m.ident(tcx),
+ generics_span,
+ });
+ return Err(reported);
+ }
+
+ Ok(())
+}
+
+#[instrument(level = "debug", skip(infcx))]
+fn extract_spans_for_error_reporting<'a, 'tcx>(
+ infcx: &infer::InferCtxt<'a, 'tcx>,
+ terr: &TypeError<'_>,
+ cause: &ObligationCause<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+) -> (Span, Option<Span>) {
+ let tcx = infcx.tcx;
+ let mut impl_args = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref sig, _) => {
+ sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span()))
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+ let trait_args =
+ trait_m.def_id.as_local().map(|def_id| match tcx.hir().expect_trait_item(def_id).kind {
+ TraitItemKind::Fn(ref sig, _) => {
+ sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span()))
+ }
+ _ => bug!("{:?} is not a TraitItemKind::Fn", trait_m),
+ });
+
+ match *terr {
+ TypeError::ArgumentMutability(i) => {
+ (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i)))
+ }
+ TypeError::ArgumentSorts(ExpectedFound { .. }, i) => {
+ (impl_args.nth(i).unwrap(), trait_args.and_then(|mut args| args.nth(i)))
+ }
+ _ => (cause.span(), tcx.hir().span_if_local(trait_m.def_id)),
+ }
+}
+
+fn compare_self_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ impl_m_span: Span,
+ trait_m: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ // Try to give more informative error messages about self typing
+ // mismatches. Note that any mismatch will also be detected
+ // below, where we construct a canonical function type that
+ // includes the self parameter as a normal parameter. It's just
+ // that the error messages you get out of this code are a bit more
+ // inscrutable, particularly for cases where one method has no
+ // self.
+
+ let self_string = |method: &ty::AssocItem| {
+ let untransformed_self_ty = match method.container {
+ ty::ImplContainer => impl_trait_ref.self_ty(),
+ ty::TraitContainer => tcx.types.self_param,
+ };
+ let self_arg_ty = tcx.fn_sig(method.def_id).input(0);
+ let param_env = ty::ParamEnv::reveal_all();
+
+ tcx.infer_ctxt().enter(|infcx| {
+ let self_arg_ty = tcx.liberate_late_bound_regions(method.def_id, self_arg_ty);
+ let can_eq_self = |ty| infcx.can_eq(param_env, untransformed_self_ty, ty).is_ok();
+ match ExplicitSelf::determine(self_arg_ty, can_eq_self) {
+ ExplicitSelf::ByValue => "self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Not) => "&self".to_owned(),
+ ExplicitSelf::ByReference(_, hir::Mutability::Mut) => "&mut self".to_owned(),
+ _ => format!("self: {self_arg_ty}"),
+ }
+ })
+ };
+
+ match (trait_m.fn_has_self_parameter, impl_m.fn_has_self_parameter) {
+ (false, false) | (true, true) => {}
+
+ (false, true) => {
+ let self_descr = self_string(impl_m);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_m_span,
+ E0185,
+ "method `{}` has a `{}` declaration in the impl, but not in the trait",
+ trait_m.name,
+ self_descr
+ );
+ err.span_label(impl_m_span, format!("`{self_descr}` used in impl"));
+ if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) {
+ err.span_label(span, format!("trait method declared without `{self_descr}`"));
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ let reported = err.emit();
+ return Err(reported);
+ }
+
+ (true, false) => {
+ let self_descr = self_string(trait_m);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_m_span,
+ E0186,
+ "method `{}` has a `{}` declaration in the trait, but not in the impl",
+ trait_m.name,
+ self_descr
+ );
+ err.span_label(impl_m_span, format!("expected `{self_descr}` in impl"));
+ if let Some(span) = tcx.hir().span_if_local(trait_m.def_id) {
+ err.span_label(span, format!("`{self_descr}` used in trait"));
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ let reported = err.emit();
+ return Err(reported);
+ }
+ }
+
+ Ok(())
+}
+
+/// Checks that the number of generics on a given assoc item in a trait impl is the same
+/// as the number of generics on the respective assoc item in the trait definition.
+///
+/// For example this code emits the errors in the following code:
+/// ```
+/// trait Trait {
+/// fn foo();
+/// type Assoc<T>;
+/// }
+///
+/// impl Trait for () {
+/// fn foo<T>() {}
+/// //~^ error
+/// type Assoc = u32;
+/// //~^ error
+/// }
+/// ```
+///
+/// Notably this does not error on `foo<T>` implemented as `foo<const N: u8>` or
+/// `foo<const N: u8>` implemented as `foo<const N: u32>`. This is handled in
+/// [`compare_generic_param_kinds`]. This function also does not handle lifetime parameters
+fn compare_number_of_generics<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_: &ty::AssocItem,
+ _impl_span: Span,
+ trait_: &ty::AssocItem,
+ trait_span: Option<Span>,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_own_counts = tcx.generics_of(trait_.def_id).own_counts();
+ let impl_own_counts = tcx.generics_of(impl_.def_id).own_counts();
+
+ // This avoids us erroring on `foo<T>` implemented as `foo<const N: u8>` as this is implemented
+ // in `compare_generic_param_kinds` which will give a nicer error message than something like:
+ // "expected 1 type parameter, found 0 type parameters"
+ if (trait_own_counts.types + trait_own_counts.consts)
+ == (impl_own_counts.types + impl_own_counts.consts)
+ {
+ return Ok(());
+ }
+
+ let matchings = [
+ ("type", trait_own_counts.types, impl_own_counts.types),
+ ("const", trait_own_counts.consts, impl_own_counts.consts),
+ ];
+
+ let item_kind = assoc_item_kind_str(impl_);
+
+ let mut err_occurred = None;
+ for (kind, trait_count, impl_count) in matchings {
+ if impl_count != trait_count {
+ let arg_spans = |kind: ty::AssocKind, generics: &hir::Generics<'_>| {
+ let mut spans = generics
+ .params
+ .iter()
+ .filter(|p| match p.kind {
+ hir::GenericParamKind::Lifetime {
+ kind: hir::LifetimeParamKind::Elided,
+ } => {
+ // A fn can have an arbitrary number of extra elided lifetimes for the
+ // same signature.
+ !matches!(kind, ty::AssocKind::Fn)
+ }
+ _ => true,
+ })
+ .map(|p| p.span)
+ .collect::<Vec<Span>>();
+ if spans.is_empty() {
+ spans = vec![generics.span]
+ }
+ spans
+ };
+ let (trait_spans, impl_trait_spans) = if let Some(def_id) = trait_.def_id.as_local() {
+ let trait_item = tcx.hir().expect_trait_item(def_id);
+ let arg_spans: Vec<Span> = arg_spans(trait_.kind, trait_item.generics);
+ let impl_trait_spans: Vec<Span> = trait_item
+ .generics
+ .params
+ .iter()
+ .filter_map(|p| match p.kind {
+ GenericParamKind::Type { synthetic: true, .. } => Some(p.span),
+ _ => None,
+ })
+ .collect();
+ (Some(arg_spans), impl_trait_spans)
+ } else {
+ (trait_span.map(|s| vec![s]), vec![])
+ };
+
+ let impl_item = tcx.hir().expect_impl_item(impl_.def_id.expect_local());
+ let impl_item_impl_trait_spans: Vec<Span> = impl_item
+ .generics
+ .params
+ .iter()
+ .filter_map(|p| match p.kind {
+ GenericParamKind::Type { synthetic: true, .. } => Some(p.span),
+ _ => None,
+ })
+ .collect();
+ let spans = arg_spans(impl_.kind, impl_item.generics);
+ let span = spans.first().copied();
+
+ let mut err = tcx.sess.struct_span_err_with_code(
+ spans,
+ &format!(
+ "{} `{}` has {} {kind} parameter{} but its trait \
+ declaration has {} {kind} parameter{}",
+ item_kind,
+ trait_.name,
+ impl_count,
+ pluralize!(impl_count),
+ trait_count,
+ pluralize!(trait_count),
+ kind = kind,
+ ),
+ DiagnosticId::Error("E0049".into()),
+ );
+
+ let mut suffix = None;
+
+ if let Some(spans) = trait_spans {
+ let mut spans = spans.iter();
+ if let Some(span) = spans.next() {
+ err.span_label(
+ *span,
+ format!(
+ "expected {} {} parameter{}",
+ trait_count,
+ kind,
+ pluralize!(trait_count),
+ ),
+ );
+ }
+ for span in spans {
+ err.span_label(*span, "");
+ }
+ } else {
+ suffix = Some(format!(", expected {trait_count}"));
+ }
+
+ if let Some(span) = span {
+ err.span_label(
+ span,
+ format!(
+ "found {} {} parameter{}{}",
+ impl_count,
+ kind,
+ pluralize!(impl_count),
+ suffix.unwrap_or_else(String::new),
+ ),
+ );
+ }
+
+ for span in impl_trait_spans.iter().chain(impl_item_impl_trait_spans.iter()) {
+ err.span_label(*span, "`impl Trait` introduces an implicit type parameter");
+ }
+
+ let reported = err.emit();
+ err_occurred = Some(reported);
+ }
+ }
+
+ if let Some(reported) = err_occurred { Err(reported) } else { Ok(()) }
+}
+
+fn compare_number_of_method_arguments<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ impl_m_span: Span,
+ trait_m: &ty::AssocItem,
+ trait_item_span: Option<Span>,
+) -> Result<(), ErrorGuaranteed> {
+ let impl_m_fty = tcx.fn_sig(impl_m.def_id);
+ let trait_m_fty = tcx.fn_sig(trait_m.def_id);
+ let trait_number_args = trait_m_fty.inputs().skip_binder().len();
+ let impl_number_args = impl_m_fty.inputs().skip_binder().len();
+ if trait_number_args != impl_number_args {
+ let trait_span = if let Some(def_id) = trait_m.def_id.as_local() {
+ match tcx.hir().expect_trait_item(def_id).kind {
+ TraitItemKind::Fn(ref trait_m_sig, _) => {
+ let pos = if trait_number_args > 0 { trait_number_args - 1 } else { 0 };
+ if let Some(arg) = trait_m_sig.decl.inputs.get(pos) {
+ Some(if pos == 0 {
+ arg.span
+ } else {
+ arg.span.with_lo(trait_m_sig.decl.inputs[0].span.lo())
+ })
+ } else {
+ trait_item_span
+ }
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ }
+ } else {
+ trait_item_span
+ };
+ let impl_span = match tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind {
+ ImplItemKind::Fn(ref impl_m_sig, _) => {
+ let pos = if impl_number_args > 0 { impl_number_args - 1 } else { 0 };
+ if let Some(arg) = impl_m_sig.decl.inputs.get(pos) {
+ if pos == 0 {
+ arg.span
+ } else {
+ arg.span.with_lo(impl_m_sig.decl.inputs[0].span.lo())
+ }
+ } else {
+ impl_m_span
+ }
+ }
+ _ => bug!("{:?} is not a method", impl_m),
+ };
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0050,
+ "method `{}` has {} but the declaration in trait `{}` has {}",
+ trait_m.name,
+ potentially_plural_count(impl_number_args, "parameter"),
+ tcx.def_path_str(trait_m.def_id),
+ trait_number_args
+ );
+ if let Some(trait_span) = trait_span {
+ err.span_label(
+ trait_span,
+ format!(
+ "trait requires {}",
+ potentially_plural_count(trait_number_args, "parameter")
+ ),
+ );
+ } else {
+ err.note_trait_signature(trait_m.name, trait_m.signature(tcx));
+ }
+ err.span_label(
+ impl_span,
+ format!(
+ "expected {}, found {}",
+ potentially_plural_count(trait_number_args, "parameter"),
+ impl_number_args
+ ),
+ );
+ let reported = err.emit();
+ return Err(reported);
+ }
+
+ Ok(())
+}
+
+fn compare_synthetic_generics<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_m: &ty::AssocItem,
+ trait_m: &ty::AssocItem,
+) -> Result<(), ErrorGuaranteed> {
+ // FIXME(chrisvittal) Clean up this function, list of FIXME items:
+ // 1. Better messages for the span labels
+ // 2. Explanation as to what is going on
+ // If we get here, we already have the same number of generics, so the zip will
+ // be okay.
+ let mut error_found = None;
+ let impl_m_generics = tcx.generics_of(impl_m.def_id);
+ let trait_m_generics = tcx.generics_of(trait_m.def_id);
+ let impl_m_type_params = impl_m_generics.params.iter().filter_map(|param| match param.kind {
+ GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)),
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None,
+ });
+ let trait_m_type_params = trait_m_generics.params.iter().filter_map(|param| match param.kind {
+ GenericParamDefKind::Type { synthetic, .. } => Some((param.def_id, synthetic)),
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => None,
+ });
+ for ((impl_def_id, impl_synthetic), (trait_def_id, trait_synthetic)) in
+ iter::zip(impl_m_type_params, trait_m_type_params)
+ {
+ if impl_synthetic != trait_synthetic {
+ let impl_def_id = impl_def_id.expect_local();
+ let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_def_id);
+ let impl_span = tcx.hir().span(impl_hir_id);
+ let trait_span = tcx.def_span(trait_def_id);
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0643,
+ "method `{}` has incompatible signature for trait",
+ trait_m.name
+ );
+ err.span_label(trait_span, "declaration in trait here");
+ match (impl_synthetic, trait_synthetic) {
+ // The case where the impl method uses `impl Trait` but the trait method uses
+ // explicit generics
+ (true, false) => {
+ err.span_label(impl_span, "expected generic parameter, found `impl Trait`");
+ (|| {
+ // try taking the name from the trait impl
+ // FIXME: this is obviously suboptimal since the name can already be used
+ // as another generic argument
+ let new_name = tcx.sess.source_map().span_to_snippet(trait_span).ok()?;
+ let trait_m = trait_m.def_id.as_local()?;
+ let trait_m = tcx.hir().trait_item(hir::TraitItemId { def_id: trait_m });
+
+ let impl_m = impl_m.def_id.as_local()?;
+ let impl_m = tcx.hir().impl_item(hir::ImplItemId { def_id: impl_m });
+
+ // in case there are no generics, take the spot between the function name
+ // and the opening paren of the argument list
+ let new_generics_span =
+ tcx.sess.source_map().generate_fn_name_span(impl_span)?.shrink_to_hi();
+ // in case there are generics, just replace them
+ let generics_span =
+ impl_m.generics.span.substitute_dummy(new_generics_span);
+ // replace with the generics from the trait
+ let new_generics =
+ tcx.sess.source_map().span_to_snippet(trait_m.generics.span).ok()?;
+
+ err.multipart_suggestion(
+ "try changing the `impl Trait` argument to a generic parameter",
+ vec![
+ // replace `impl Trait` with `T`
+ (impl_span, new_name),
+ // replace impl method generics with trait method generics
+ // This isn't quite right, as users might have changed the names
+ // of the generics, but it works for the common case
+ (generics_span, new_generics),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ Some(())
+ })();
+ }
+ // The case where the trait method uses `impl Trait`, but the impl method uses
+ // explicit generics.
+ (false, true) => {
+ err.span_label(impl_span, "expected `impl Trait`, found generic parameter");
+ (|| {
+ let impl_m = impl_m.def_id.as_local()?;
+ let impl_m = tcx.hir().impl_item(hir::ImplItemId { def_id: impl_m });
+ let input_tys = match impl_m.kind {
+ hir::ImplItemKind::Fn(ref sig, _) => sig.decl.inputs,
+ _ => unreachable!(),
+ };
+ struct Visitor(Option<Span>, hir::def_id::LocalDefId);
+ impl<'v> intravisit::Visitor<'v> for Visitor {
+ fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
+ intravisit::walk_ty(self, ty);
+ if let hir::TyKind::Path(hir::QPath::Resolved(None, ref path)) =
+ ty.kind
+ && let Res::Def(DefKind::TyParam, def_id) = path.res
+ && def_id == self.1.to_def_id()
+ {
+ self.0 = Some(ty.span);
+ }
+ }
+ }
+ let mut visitor = Visitor(None, impl_def_id);
+ for ty in input_tys {
+ intravisit::Visitor::visit_ty(&mut visitor, ty);
+ }
+ let span = visitor.0?;
+
+ let bounds = impl_m.generics.bounds_for_param(impl_def_id).next()?.bounds;
+ let bounds = bounds.first()?.span().to(bounds.last()?.span());
+ let bounds = tcx.sess.source_map().span_to_snippet(bounds).ok()?;
+
+ err.multipart_suggestion(
+ "try removing the generic parameter and using `impl Trait` instead",
+ vec![
+ // delete generic parameters
+ (impl_m.generics.span, String::new()),
+ // replace param usage with `impl Trait`
+ (span, format!("impl {bounds}")),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ Some(())
+ })();
+ }
+ _ => unreachable!(),
+ }
+ let reported = err.emit();
+ error_found = Some(reported);
+ }
+ }
+ if let Some(reported) = error_found { Err(reported) } else { Ok(()) }
+}
+
+/// Checks that all parameters in the generics of a given assoc item in a trait impl have
+/// the same kind as the respective generic parameter in the trait def.
+///
+/// For example all 4 errors in the following code are emitted here:
+/// ```
+/// trait Foo {
+/// fn foo<const N: u8>();
+/// type bar<const N: u8>;
+/// fn baz<const N: u32>();
+/// type blah<T>;
+/// }
+///
+/// impl Foo for () {
+/// fn foo<const N: u64>() {}
+/// //~^ error
+/// type bar<const N: u64> {}
+/// //~^ error
+/// fn baz<T>() {}
+/// //~^ error
+/// type blah<const N: i64> = u32;
+/// //~^ error
+/// }
+/// ```
+///
+/// This function does not handle lifetime parameters
+fn compare_generic_param_kinds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_item: &ty::AssocItem,
+ trait_item: &ty::AssocItem,
+) -> Result<(), ErrorGuaranteed> {
+ assert_eq!(impl_item.kind, trait_item.kind);
+
+ let ty_const_params_of = |def_id| {
+ tcx.generics_of(def_id).params.iter().filter(|param| {
+ matches!(
+ param.kind,
+ GenericParamDefKind::Const { .. } | GenericParamDefKind::Type { .. }
+ )
+ })
+ };
+
+ for (param_impl, param_trait) in
+ iter::zip(ty_const_params_of(impl_item.def_id), ty_const_params_of(trait_item.def_id))
+ {
+ use GenericParamDefKind::*;
+ if match (&param_impl.kind, &param_trait.kind) {
+ (Const { .. }, Const { .. })
+ if tcx.type_of(param_impl.def_id) != tcx.type_of(param_trait.def_id) =>
+ {
+ true
+ }
+ (Const { .. }, Type { .. }) | (Type { .. }, Const { .. }) => true,
+ // this is exhaustive so that anyone adding new generic param kinds knows
+ // to make sure this error is reported for them.
+ (Const { .. }, Const { .. }) | (Type { .. }, Type { .. }) => false,
+ (Lifetime { .. }, _) | (_, Lifetime { .. }) => unreachable!(),
+ } {
+ let param_impl_span = tcx.def_span(param_impl.def_id);
+ let param_trait_span = tcx.def_span(param_trait.def_id);
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ param_impl_span,
+ E0053,
+ "{} `{}` has an incompatible generic parameter for trait `{}`",
+ assoc_item_kind_str(&impl_item),
+ trait_item.name,
+ &tcx.def_path_str(tcx.parent(trait_item.def_id))
+ );
+
+ let make_param_message = |prefix: &str, param: &ty::GenericParamDef| match param.kind {
+ Const { .. } => {
+ format!("{} const parameter of type `{}`", prefix, tcx.type_of(param.def_id))
+ }
+ Type { .. } => format!("{} type parameter", prefix),
+ Lifetime { .. } => unreachable!(),
+ };
+
+ let trait_header_span = tcx.def_ident_span(tcx.parent(trait_item.def_id)).unwrap();
+ err.span_label(trait_header_span, "");
+ err.span_label(param_trait_span, make_param_message("expected", param_trait));
+
+ let impl_header_span = tcx.def_span(tcx.parent(impl_item.def_id));
+ err.span_label(impl_header_span, "");
+ err.span_label(param_impl_span, make_param_message("found", param_impl));
+
+ let reported = err.emit();
+ return Err(reported);
+ }
+ }
+
+ Ok(())
+}
+
+pub(crate) fn compare_const_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_c: &ty::AssocItem,
+ impl_c_span: Span,
+ trait_c: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) {
+ debug!("compare_const_impl(impl_trait_ref={:?})", impl_trait_ref);
+
+ tcx.infer_ctxt().enter(|infcx| {
+ let param_env = tcx.param_env(impl_c.def_id);
+ let ocx = ObligationCtxt::new(&infcx);
+
+ // The below is for the most part highly similar to the procedure
+ // for methods above. It is simpler in many respects, especially
+ // because we shouldn't really have to deal with lifetimes or
+ // predicates. In fact some of this should probably be put into
+ // shared functions because of DRY violations...
+ let trait_to_impl_substs = impl_trait_ref.substs;
+
+ // Create a parameter environment that represents the implementation's
+ // method.
+ let impl_c_hir_id = tcx.hir().local_def_id_to_hir_id(impl_c.def_id.expect_local());
+
+ // Compute placeholder form of impl and trait const tys.
+ let impl_ty = tcx.type_of(impl_c.def_id);
+ let trait_ty = tcx.bound_type_of(trait_c.def_id).subst(tcx, trait_to_impl_substs);
+ let mut cause = ObligationCause::new(
+ impl_c_span,
+ impl_c_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_c.def_id.expect_local(),
+ trait_item_def_id: trait_c.def_id,
+ kind: impl_c.kind,
+ },
+ );
+
+ // There is no "body" here, so just pass dummy id.
+ let impl_ty = ocx.normalize(cause.clone(), param_env, impl_ty);
+
+ debug!("compare_const_impl: impl_ty={:?}", impl_ty);
+
+ let trait_ty = ocx.normalize(cause.clone(), param_env, trait_ty);
+
+ debug!("compare_const_impl: trait_ty={:?}", trait_ty);
+
+ let err = infcx
+ .at(&cause, param_env)
+ .sup(trait_ty, impl_ty)
+ .map(|ok| ocx.register_infer_ok_obligations(ok));
+
+ if let Err(terr) = err {
+ debug!(
+ "checking associated const for compatibility: impl ty {:?}, trait ty {:?}",
+ impl_ty, trait_ty
+ );
+
+ // Locate the Span containing just the type of the offending impl
+ match tcx.hir().expect_impl_item(impl_c.def_id.expect_local()).kind {
+ ImplItemKind::Const(ref ty, _) => cause.span = ty.span,
+ _ => bug!("{:?} is not a impl const", impl_c),
+ }
+
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ cause.span,
+ E0326,
+ "implemented const `{}` has an incompatible type for trait",
+ trait_c.name
+ );
+
+ let trait_c_span = trait_c.def_id.as_local().map(|trait_c_def_id| {
+ // Add a label to the Span containing just the type of the const
+ match tcx.hir().expect_trait_item(trait_c_def_id).kind {
+ TraitItemKind::Const(ref ty, _) => ty.span,
+ _ => bug!("{:?} is not a trait const", trait_c),
+ }
+ });
+
+ infcx.note_type_err(
+ &mut diag,
+ &cause,
+ trait_c_span.map(|span| (span, "type in trait".to_owned())),
+ Some(infer::ValuePairs::Terms(ExpectedFound {
+ expected: trait_ty.into(),
+ found: impl_ty.into(),
+ })),
+ &terr,
+ false,
+ false,
+ );
+ diag.emit();
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ return;
+ }
+
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(
+ impl_c.def_id.expect_local(),
+ &outlives_environment,
+ );
+ });
+}
+
+pub(crate) fn compare_ty_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ trait_ty: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+ trait_item_span: Option<Span>,
+) {
+ debug!("compare_impl_type(impl_trait_ref={:?})", impl_trait_ref);
+
+ let _: Result<(), ErrorGuaranteed> = (|| {
+ compare_number_of_generics(tcx, impl_ty, impl_ty_span, trait_ty, trait_item_span)?;
+
+ compare_generic_param_kinds(tcx, impl_ty, trait_ty)?;
+
+ let sp = tcx.def_span(impl_ty.def_id);
+ compare_type_predicate_entailment(tcx, impl_ty, sp, trait_ty, impl_trait_ref)?;
+
+ check_type_bounds(tcx, trait_ty, impl_ty, impl_ty_span, impl_trait_ref)
+ })();
+}
+
+/// The equivalent of [compare_predicate_entailment], but for associated types
+/// instead of associated functions.
+fn compare_type_predicate_entailment<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ trait_ty: &ty::AssocItem,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let impl_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
+ let trait_to_impl_substs =
+ impl_substs.rebase_onto(tcx, impl_ty.container_id(tcx), impl_trait_ref.substs);
+
+ let impl_ty_generics = tcx.generics_of(impl_ty.def_id);
+ let trait_ty_generics = tcx.generics_of(trait_ty.def_id);
+ let impl_ty_predicates = tcx.predicates_of(impl_ty.def_id);
+ let trait_ty_predicates = tcx.predicates_of(trait_ty.def_id);
+
+ check_region_bounds_on_impl_item(
+ tcx,
+ impl_ty,
+ trait_ty,
+ &trait_ty_generics,
+ &impl_ty_generics,
+ )?;
+
+ let impl_ty_own_bounds = impl_ty_predicates.instantiate_own(tcx, impl_substs);
+
+ if impl_ty_own_bounds.is_empty() {
+ // Nothing to check.
+ return Ok(());
+ }
+
+ // This `HirId` should be used for the `body_id` field on each
+ // `ObligationCause` (and the `FnCtxt`). This is what
+ // `regionck_item` expects.
+ let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
+ debug!("compare_type_predicate_entailment: trait_to_impl_substs={:?}", trait_to_impl_substs);
+
+ // The predicates declared by the impl definition, the trait and the
+ // associated type in the trait are assumed.
+ let impl_predicates = tcx.predicates_of(impl_ty_predicates.parent.unwrap());
+ let mut hybrid_preds = impl_predicates.instantiate_identity(tcx);
+ hybrid_preds
+ .predicates
+ .extend(trait_ty_predicates.instantiate_own(tcx, trait_to_impl_substs).predicates);
+
+ debug!("compare_type_predicate_entailment: bounds={:?}", hybrid_preds);
+
+ let normalize_cause = traits::ObligationCause::misc(impl_ty_span, impl_ty_hir_id);
+ let param_env = ty::ParamEnv::new(
+ tcx.intern_predicates(&hybrid_preds.predicates),
+ Reveal::UserFacing,
+ hir::Constness::NotConst,
+ );
+ let param_env = traits::normalize_param_env_or_error(tcx, param_env, normalize_cause);
+ tcx.infer_ctxt().enter(|infcx| {
+ let ocx = ObligationCtxt::new(&infcx);
+
+ debug!("compare_type_predicate_entailment: caller_bounds={:?}", param_env.caller_bounds());
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+
+ assert_eq!(impl_ty_own_bounds.predicates.len(), impl_ty_own_bounds.spans.len());
+ for (span, predicate) in
+ std::iter::zip(impl_ty_own_bounds.spans, impl_ty_own_bounds.predicates)
+ {
+ let cause = ObligationCause::misc(span, impl_ty_hir_id);
+ let traits::Normalized { value: predicate, obligations } =
+ traits::normalize(&mut selcx, param_env, cause, predicate);
+
+ let cause = ObligationCause::new(
+ span,
+ impl_ty_hir_id,
+ ObligationCauseCode::CompareImplItemObligation {
+ impl_item_def_id: impl_ty.def_id.expect_local(),
+ trait_item_def_id: trait_ty.def_id,
+ kind: impl_ty.kind,
+ },
+ );
+ ocx.register_obligations(obligations);
+ ocx.register_obligation(traits::Obligation::new(cause, param_env, predicate));
+ }
+
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let outlives_environment = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(
+ impl_ty.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ Ok(())
+ })
+}
+
+/// Validate that `ProjectionCandidate`s created for this associated type will
+/// be valid.
+///
+/// Usually given
+///
+/// trait X { type Y: Copy } impl X for T { type Y = S; }
+///
+/// We are able to normalize `<T as X>::U` to `S`, and so when we check the
+/// impl is well-formed we have to prove `S: Copy`.
+///
+/// For default associated types the normalization is not possible (the value
+/// from the impl could be overridden). We also can't normalize generic
+/// associated types (yet) because they contain bound parameters.
+#[tracing::instrument(level = "debug", skip(tcx))]
+pub fn check_type_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ty: &ty::AssocItem,
+ impl_ty: &ty::AssocItem,
+ impl_ty_span: Span,
+ impl_trait_ref: ty::TraitRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ // Given
+ //
+ // impl<A, B> Foo<u32> for (A, B) {
+ // type Bar<C> =...
+ // }
+ //
+ // - `impl_trait_ref` would be `<(A, B) as Foo<u32>>
+ // - `impl_ty_substs` would be `[A, B, ^0.0]` (`^0.0` here is the bound var with db 0 and index 0)
+ // - `rebased_substs` would be `[(A, B), u32, ^0.0]`, combining the substs from
+ // the *trait* with the generic associated type parameters (as bound vars).
+ //
+ // A note regarding the use of bound vars here:
+ // Imagine as an example
+ // ```
+ // trait Family {
+ // type Member<C: Eq>;
+ // }
+ //
+ // impl Family for VecFamily {
+ // type Member<C: Eq> = i32;
+ // }
+ // ```
+ // Here, we would generate
+ // ```notrust
+ // forall<C> { Normalize(<VecFamily as Family>::Member<C> => i32) }
+ // ```
+ // when we really would like to generate
+ // ```notrust
+ // forall<C> { Normalize(<VecFamily as Family>::Member<C> => i32) :- Implemented(C: Eq) }
+ // ```
+ // But, this is probably fine, because although the first clause can be used with types C that
+ // do not implement Eq, for it to cause some kind of problem, there would have to be a
+ // VecFamily::Member<X> for some type X where !(X: Eq), that appears in the value of type
+ // Member<C: Eq> = .... That type would fail a well-formedness check that we ought to be doing
+ // elsewhere, which would check that any <T as Family>::Member<X> meets the bounds declared in
+ // the trait (notably, that X: Eq and T: Family).
+ let defs: &ty::Generics = tcx.generics_of(impl_ty.def_id);
+ let mut substs = smallvec::SmallVec::with_capacity(defs.count());
+ if let Some(def_id) = defs.parent {
+ let parent_defs = tcx.generics_of(def_id);
+ InternalSubsts::fill_item(&mut substs, tcx, parent_defs, &mut |param, _| {
+ tcx.mk_param_from_def(param)
+ });
+ }
+ let mut bound_vars: smallvec::SmallVec<[ty::BoundVariableKind; 8]> =
+ smallvec::SmallVec::with_capacity(defs.count());
+ InternalSubsts::fill_single(&mut substs, defs, &mut |param, _| match param.kind {
+ GenericParamDefKind::Type { .. } => {
+ let kind = ty::BoundTyKind::Param(param.name);
+ let bound_var = ty::BoundVariableKind::Ty(kind);
+ bound_vars.push(bound_var);
+ tcx.mk_ty(ty::Bound(
+ ty::INNERMOST,
+ ty::BoundTy { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind },
+ ))
+ .into()
+ }
+ GenericParamDefKind::Lifetime => {
+ let kind = ty::BoundRegionKind::BrNamed(param.def_id, param.name);
+ let bound_var = ty::BoundVariableKind::Region(kind);
+ bound_vars.push(bound_var);
+ tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_usize(bound_vars.len() - 1), kind },
+ ))
+ .into()
+ }
+ GenericParamDefKind::Const { .. } => {
+ let bound_var = ty::BoundVariableKind::Const;
+ bound_vars.push(bound_var);
+ tcx.mk_const(ty::ConstS {
+ ty: tcx.type_of(param.def_id),
+ kind: ty::ConstKind::Bound(
+ ty::INNERMOST,
+ ty::BoundVar::from_usize(bound_vars.len() - 1),
+ ),
+ })
+ .into()
+ }
+ });
+ let bound_vars = tcx.mk_bound_variable_kinds(bound_vars.into_iter());
+ let impl_ty_substs = tcx.intern_substs(&substs);
+ let container_id = impl_ty.container_id(tcx);
+
+ let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
+ let impl_ty_value = tcx.type_of(impl_ty.def_id);
+
+ let param_env = tcx.param_env(impl_ty.def_id);
+
+ // When checking something like
+ //
+ // trait X { type Y: PartialEq<<Self as X>::Y> }
+ // impl X for T { default type Y = S; }
+ //
+ // We will have to prove the bound S: PartialEq<<T as X>::Y>. In this case
+ // we want <T as X>::Y to normalize to S. This is valid because we are
+ // checking the default value specifically here. Add this equality to the
+ // ParamEnv for normalization specifically.
+ let normalize_param_env = {
+ let mut predicates = param_env.caller_bounds().iter().collect::<Vec<_>>();
+ match impl_ty_value.kind() {
+ ty::Projection(proj)
+ if proj.item_def_id == trait_ty.def_id && proj.substs == rebased_substs =>
+ {
+ // Don't include this predicate if the projected type is
+ // exactly the same as the projection. This can occur in
+ // (somewhat dubious) code like this:
+ //
+ // impl<T> X for T where T: X { type Y = <T as X>::Y; }
+ }
+ _ => predicates.push(
+ ty::Binder::bind_with_vars(
+ ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy {
+ item_def_id: trait_ty.def_id,
+ substs: rebased_substs,
+ },
+ term: impl_ty_value.into(),
+ },
+ bound_vars,
+ )
+ .to_predicate(tcx),
+ ),
+ };
+ ty::ParamEnv::new(
+ tcx.intern_predicates(&predicates),
+ Reveal::UserFacing,
+ param_env.constness(),
+ )
+ };
+ debug!(?normalize_param_env);
+
+ let impl_ty_substs = InternalSubsts::identity_for_item(tcx, impl_ty.def_id);
+ let rebased_substs = impl_ty_substs.rebase_onto(tcx, container_id, impl_trait_ref.substs);
+
+ tcx.infer_ctxt().enter(move |infcx| {
+ let ocx = ObligationCtxt::new(&infcx);
+
+ let mut selcx = traits::SelectionContext::new(&infcx);
+ let impl_ty_hir_id = tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local());
+ let normalize_cause = ObligationCause::new(
+ impl_ty_span,
+ impl_ty_hir_id,
+ ObligationCauseCode::CheckAssociatedTypeBounds {
+ impl_item_def_id: impl_ty.def_id.expect_local(),
+ trait_item_def_id: trait_ty.def_id,
+ },
+ );
+ let mk_cause = |span: Span| {
+ let code = if span.is_dummy() {
+ traits::MiscObligation
+ } else {
+ traits::BindingObligation(trait_ty.def_id, span)
+ };
+ ObligationCause::new(impl_ty_span, impl_ty_hir_id, code)
+ };
+
+ let obligations = tcx
+ .bound_explicit_item_bounds(trait_ty.def_id)
+ .transpose_iter()
+ .map(|e| e.map_bound(|e| *e).transpose_tuple2())
+ .map(|(bound, span)| {
+ debug!(?bound);
+ // this is where opaque type is found
+ let concrete_ty_bound = bound.subst(tcx, rebased_substs);
+ debug!("check_type_bounds: concrete_ty_bound = {:?}", concrete_ty_bound);
+
+ traits::Obligation::new(mk_cause(span.0), param_env, concrete_ty_bound)
+ })
+ .collect();
+ debug!("check_type_bounds: item_bounds={:?}", obligations);
+
+ for mut obligation in util::elaborate_obligations(tcx, obligations) {
+ let traits::Normalized { value: normalized_predicate, obligations } = traits::normalize(
+ &mut selcx,
+ normalize_param_env,
+ normalize_cause.clone(),
+ obligation.predicate,
+ );
+ debug!("compare_projection_bounds: normalized predicate = {:?}", normalized_predicate);
+ obligation.predicate = normalized_predicate;
+
+ ocx.register_obligations(obligations);
+ ocx.register_obligation(obligation);
+ }
+ // Check that all obligations are satisfied by the implementation's
+ // version.
+ let errors = ocx.select_all_or_error();
+ if !errors.is_empty() {
+ let reported = infcx.report_fulfillment_errors(&errors, None, false);
+ return Err(reported);
+ }
+
+ // Finally, resolve all regions. This catches wily misuses of
+ // lifetime parameters.
+ let implied_bounds = match impl_ty.container {
+ ty::TraitContainer => FxHashSet::default(),
+ ty::ImplContainer => wfcheck::impl_implied_bounds(
+ tcx,
+ param_env,
+ container_id.expect_local(),
+ impl_ty_span,
+ ),
+ };
+ let mut outlives_environment = OutlivesEnvironment::new(param_env);
+ outlives_environment.add_implied_bounds(&infcx, implied_bounds, impl_ty_hir_id);
+ infcx.check_region_obligations_and_report_errors(
+ impl_ty.def_id.expect_local(),
+ &outlives_environment,
+ );
+
+ let constraints = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ for (key, value) in constraints {
+ infcx
+ .report_mismatched_types(
+ &ObligationCause::misc(
+ value.hidden_type.span,
+ tcx.hir().local_def_id_to_hir_id(impl_ty.def_id.expect_local()),
+ ),
+ tcx.mk_opaque(key.def_id.to_def_id(), key.substs),
+ value.hidden_type.ty,
+ TypeError::Mismatch,
+ )
+ .emit();
+ }
+
+ Ok(())
+ })
+}
+
+fn assoc_item_kind_str(impl_item: &ty::AssocItem) -> &'static str {
+ match impl_item.kind {
+ ty::AssocKind::Const => "const",
+ ty::AssocKind::Fn => "method",
+ ty::AssocKind::Type => "type",
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/demand.rs b/compiler/rustc_typeck/src/check/demand.rs
new file mode 100644
index 000000000..4de48dc5b
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/demand.rs
@@ -0,0 +1,1442 @@
+use crate::check::FnCtxt;
+use rustc_infer::infer::InferOk;
+use rustc_middle::middle::stability::EvalResult;
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::ObligationCause;
+
+use rustc_ast::util::parser::PREC_POSTFIX;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{is_range_literal, Node};
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty::adjustment::AllowTwoPhase;
+use rustc_middle::ty::error::{ExpectedFound, TypeError};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_middle::ty::{self, Article, AssocItem, Ty, TypeAndMut};
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{BytePos, Span};
+
+use super::method::probe;
+
+use std::iter;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn emit_coerce_suggestions(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'tcx>,
+ expr_ty: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ error: Option<TypeError<'tcx>>,
+ ) {
+ self.annotate_expected_due_to_let_ty(err, expr, error);
+ self.suggest_deref_ref_or_into(err, expr, expected, expr_ty, expected_ty_expr);
+ self.suggest_compatible_variants(err, expr, expected, expr_ty);
+ self.suggest_non_zero_new_unwrap(err, expr, expected, expr_ty);
+ if self.suggest_calling_boxed_future_when_appropriate(err, expr, expected, expr_ty) {
+ return;
+ }
+ self.suggest_no_capture_closure(err, expected, expr_ty);
+ self.suggest_boxing_when_appropriate(err, expr, expected, expr_ty);
+ self.suggest_missing_parentheses(err, expr);
+ self.suggest_block_to_brackets_peeling_refs(err, expr, expr_ty, expected);
+ self.note_type_is_not_clone(err, expected, expr_ty, expr);
+ self.note_need_for_fn_pointer(err, expected, expr_ty);
+ self.note_internal_mutation_in_method(err, expr, expected, expr_ty);
+ self.report_closure_inferred_return_type(err, expected);
+ }
+
+ // Requires that the two types unify, and prints an error message if
+ // they don't.
+ pub fn demand_suptype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) {
+ if let Some(mut e) = self.demand_suptype_diag(sp, expected, actual) {
+ e.emit();
+ }
+ }
+
+ pub fn demand_suptype_diag(
+ &self,
+ sp: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ self.demand_suptype_with_origin(&self.misc(sp), expected, actual)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn demand_suptype_with_origin(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ match self.at(cause, self.param_env).sup(expected, actual) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ None
+ }
+ Err(e) => Some(self.report_mismatched_types(&cause, expected, actual, e)),
+ }
+ }
+
+ pub fn demand_eqtype(&self, sp: Span, expected: Ty<'tcx>, actual: Ty<'tcx>) {
+ if let Some(mut err) = self.demand_eqtype_diag(sp, expected, actual) {
+ err.emit();
+ }
+ }
+
+ pub fn demand_eqtype_diag(
+ &self,
+ sp: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ self.demand_eqtype_with_origin(&self.misc(sp), expected, actual)
+ }
+
+ pub fn demand_eqtype_with_origin(
+ &self,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ match self.at(cause, self.param_env).eq(expected, actual) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ None
+ }
+ Err(e) => Some(self.report_mismatched_types(cause, expected, actual, e)),
+ }
+ }
+
+ pub fn demand_coerce(
+ &self,
+ expr: &hir::Expr<'tcx>,
+ checked_ty: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ allow_two_phase: AllowTwoPhase,
+ ) -> Ty<'tcx> {
+ let (ty, err) =
+ self.demand_coerce_diag(expr, checked_ty, expected, expected_ty_expr, allow_two_phase);
+ if let Some(mut err) = err {
+ err.emit();
+ }
+ ty
+ }
+
+ /// Checks that the type of `expr` can be coerced to `expected`.
+ ///
+ /// N.B., this code relies on `self.diverges` to be accurate. In particular, assignments to `!`
+ /// will be permitted if the diverges flag is currently "always".
+ #[tracing::instrument(level = "debug", skip(self, expr, expected_ty_expr, allow_two_phase))]
+ pub fn demand_coerce_diag(
+ &self,
+ expr: &hir::Expr<'tcx>,
+ checked_ty: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ allow_two_phase: AllowTwoPhase,
+ ) -> (Ty<'tcx>, Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>>) {
+ let expected = self.resolve_vars_with_obligations(expected);
+
+ let e = match self.try_coerce(expr, checked_ty, expected, allow_two_phase, None) {
+ Ok(ty) => return (ty, None),
+ Err(e) => e,
+ };
+
+ self.set_tainted_by_errors();
+ let expr = expr.peel_drop_temps();
+ let cause = self.misc(expr.span);
+ let expr_ty = self.resolve_vars_with_obligations(checked_ty);
+ let mut err = self.report_mismatched_types(&cause, expected, expr_ty, e.clone());
+
+ let is_insufficiently_polymorphic =
+ matches!(e, TypeError::RegionsInsufficientlyPolymorphic(..));
+
+ // FIXME(#73154): For now, we do leak check when coercing function
+ // pointers in typeck, instead of only during borrowck. This can lead
+ // to these `RegionsInsufficientlyPolymorphic` errors that aren't helpful.
+ if !is_insufficiently_polymorphic {
+ self.emit_coerce_suggestions(
+ &mut err,
+ expr,
+ expr_ty,
+ expected,
+ expected_ty_expr,
+ Some(e),
+ );
+ }
+
+ (expected, Some(err))
+ }
+
+ fn annotate_expected_due_to_let_ty(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ error: Option<TypeError<'_>>,
+ ) {
+ let parent = self.tcx.hir().get_parent_node(expr.hir_id);
+ match (self.tcx.hir().find(parent), error) {
+ (Some(hir::Node::Local(hir::Local { ty: Some(ty), init: Some(init), .. })), _)
+ if init.hir_id == expr.hir_id =>
+ {
+ // Point at `let` assignment type.
+ err.span_label(ty.span, "expected due to this");
+ }
+ (
+ Some(hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Assign(lhs, rhs, _), ..
+ })),
+ Some(TypeError::Sorts(ExpectedFound { expected, .. })),
+ ) if rhs.hir_id == expr.hir_id && !expected.is_closure() => {
+ // We ignore closures explicitly because we already point at them elsewhere.
+ // Point at the assigned-to binding.
+ let mut primary_span = lhs.span;
+ let mut secondary_span = lhs.span;
+ let mut post_message = "";
+ match lhs.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path {
+ res:
+ hir::def::Res::Def(
+ hir::def::DefKind::Static(_) | hir::def::DefKind::Const,
+ def_id,
+ ),
+ ..
+ },
+ )) => {
+ if let Some(hir::Node::Item(hir::Item {
+ ident,
+ kind: hir::ItemKind::Static(ty, ..) | hir::ItemKind::Const(ty, ..),
+ ..
+ })) = self.tcx.hir().get_if_local(*def_id)
+ {
+ primary_span = ty.span;
+ secondary_span = ident.span;
+ post_message = " type";
+ }
+ }
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path { res: hir::def::Res::Local(hir_id), .. },
+ )) => {
+ if let Some(hir::Node::Pat(pat)) = self.tcx.hir().find(*hir_id) {
+ let parent = self.tcx.hir().get_parent_node(pat.hir_id);
+ primary_span = pat.span;
+ secondary_span = pat.span;
+ match self.tcx.hir().find(parent) {
+ Some(hir::Node::Local(hir::Local { ty: Some(ty), .. })) => {
+ primary_span = ty.span;
+ post_message = " type";
+ }
+ Some(hir::Node::Local(hir::Local { init: Some(init), .. })) => {
+ primary_span = init.span;
+ post_message = " value";
+ }
+ Some(hir::Node::Param(hir::Param { ty_span, .. })) => {
+ primary_span = *ty_span;
+ post_message = " parameter type";
+ }
+ _ => {}
+ }
+ }
+ }
+ _ => {}
+ }
+
+ if primary_span != secondary_span
+ && self
+ .tcx
+ .sess
+ .source_map()
+ .is_multiline(secondary_span.shrink_to_hi().until(primary_span))
+ {
+ // We are pointing at the binding's type or initializer value, but it's pattern
+ // is in a different line, so we point at both.
+ err.span_label(secondary_span, "expected due to the type of this binding");
+ err.span_label(primary_span, &format!("expected due to this{post_message}"));
+ } else if post_message == "" {
+ // We are pointing at either the assignment lhs or the binding def pattern.
+ err.span_label(primary_span, "expected due to the type of this binding");
+ } else {
+ // We are pointing at the binding's type or initializer value.
+ err.span_label(primary_span, &format!("expected due to this{post_message}"));
+ }
+
+ if !lhs.is_syntactic_place_expr() {
+ // We already emitted E0070 "invalid left-hand side of assignment", so we
+ // silence this.
+ err.downgrade_to_delayed_bug();
+ }
+ }
+ _ => {}
+ }
+ }
+
+ /// If the expected type is an enum (Issue #55250) with any variants whose
+ /// sole field is of the found type, suggest such variants. (Issue #42764)
+ fn suggest_compatible_variants(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ expr_ty: Ty<'tcx>,
+ ) {
+ if let ty::Adt(expected_adt, substs) = expected.kind() {
+ if let hir::ExprKind::Field(base, ident) = expr.kind {
+ let base_ty = self.typeck_results.borrow().expr_ty(base);
+ if self.can_eq(self.param_env, base_ty, expected).is_ok()
+ && let Some(base_span) = base.span.find_ancestor_inside(expr.span)
+ {
+ err.span_suggestion_verbose(
+ expr.span.with_lo(base_span.hi()),
+ format!("consider removing the tuple struct field `{ident}`"),
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ return
+ }
+ }
+
+ // If the expression is of type () and it's the return expression of a block,
+ // we suggest adding a separate return expression instead.
+ // (To avoid things like suggesting `Ok(while .. { .. })`.)
+ if expr_ty.is_unit() {
+ let mut id = expr.hir_id;
+ let mut parent;
+
+ // Unroll desugaring, to make sure this works for `for` loops etc.
+ loop {
+ parent = self.tcx.hir().get_parent_node(id);
+ if let Some(parent_span) = self.tcx.hir().opt_span(parent) {
+ if parent_span.find_ancestor_inside(expr.span).is_some() {
+ // The parent node is part of the same span, so is the result of the
+ // same expansion/desugaring and not the 'real' parent node.
+ id = parent;
+ continue;
+ }
+ }
+ break;
+ }
+
+ if let Some(hir::Node::Block(&hir::Block {
+ span: block_span, expr: Some(e), ..
+ })) = self.tcx.hir().find(parent)
+ {
+ if e.hir_id == id {
+ if let Some(span) = expr.span.find_ancestor_inside(block_span) {
+ let return_suggestions = if self
+ .tcx
+ .is_diagnostic_item(sym::Result, expected_adt.did())
+ {
+ vec!["Ok(())"]
+ } else if self.tcx.is_diagnostic_item(sym::Option, expected_adt.did()) {
+ vec!["None", "Some(())"]
+ } else {
+ return;
+ };
+ if let Some(indent) =
+ self.tcx.sess.source_map().indentation_before(span.shrink_to_lo())
+ {
+ // Add a semicolon, except after `}`.
+ let semicolon =
+ match self.tcx.sess.source_map().span_to_snippet(span) {
+ Ok(s) if s.ends_with('}') => "",
+ _ => ";",
+ };
+ err.span_suggestions(
+ span.shrink_to_hi(),
+ "try adding an expression at the end of the block",
+ return_suggestions
+ .into_iter()
+ .map(|r| format!("{semicolon}\n{indent}{r}")),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ return;
+ }
+ }
+ }
+ }
+
+ let compatible_variants: Vec<(String, _, _, Option<String>)> = expected_adt
+ .variants()
+ .iter()
+ .filter(|variant| {
+ variant.fields.len() == 1
+ })
+ .filter_map(|variant| {
+ let sole_field = &variant.fields[0];
+
+ let field_is_local = sole_field.did.is_local();
+ let field_is_accessible =
+ sole_field.vis.is_accessible_from(expr.hir_id.owner.to_def_id(), self.tcx)
+ // Skip suggestions for unstable public fields (for example `Pin::pointer`)
+ && matches!(self.tcx.eval_stability(sole_field.did, None, expr.span, None), EvalResult::Allow | EvalResult::Unmarked);
+
+ if !field_is_local && !field_is_accessible {
+ return None;
+ }
+
+ let note_about_variant_field_privacy = (field_is_local && !field_is_accessible)
+ .then(|| format!(" (its field is private, but it's local to this crate and its privacy can be changed)"));
+
+ let sole_field_ty = sole_field.ty(self.tcx, substs);
+ if self.can_coerce(expr_ty, sole_field_ty) {
+ let variant_path =
+ with_no_trimmed_paths!(self.tcx.def_path_str(variant.def_id));
+ // FIXME #56861: DRYer prelude filtering
+ if let Some(path) = variant_path.strip_prefix("std::prelude::")
+ && let Some((_, path)) = path.split_once("::")
+ {
+ return Some((path.to_string(), variant.ctor_kind, sole_field.name, note_about_variant_field_privacy));
+ }
+ Some((variant_path, variant.ctor_kind, sole_field.name, note_about_variant_field_privacy))
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ let suggestions_for = |variant: &_, ctor, field_name| {
+ let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ Some(ident) => format!("{ident}: "),
+ None => String::new(),
+ };
+
+ let (open, close) = match ctor {
+ hir::def::CtorKind::Fn => ("(".to_owned(), ")"),
+ hir::def::CtorKind::Fictive => (format!(" {{ {field_name}: "), " }"),
+
+ // unit variants don't have fields
+ hir::def::CtorKind::Const => unreachable!(),
+ };
+
+ vec![
+ (expr.span.shrink_to_lo(), format!("{prefix}{variant}{open}")),
+ (expr.span.shrink_to_hi(), close.to_owned()),
+ ]
+ };
+
+ match &compatible_variants[..] {
+ [] => { /* No variants to format */ }
+ [(variant, ctor_kind, field_name, note)] => {
+ // Just a single matching variant.
+ err.multipart_suggestion_verbose(
+ &format!(
+ "try wrapping the expression in `{variant}`{note}",
+ note = note.as_deref().unwrap_or("")
+ ),
+ suggestions_for(&**variant, *ctor_kind, *field_name),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {
+ // More than one matching variant.
+ err.multipart_suggestions(
+ &format!(
+ "try wrapping the expression in a variant of `{}`",
+ self.tcx.def_path_str(expected_adt.did())
+ ),
+ compatible_variants.into_iter().map(
+ |(variant, ctor_kind, field_name, _)| {
+ suggestions_for(&variant, ctor_kind, field_name)
+ },
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+
+ fn suggest_non_zero_new_unwrap(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ expr_ty: Ty<'tcx>,
+ ) {
+ let tcx = self.tcx;
+ let (adt, unwrap) = match expected.kind() {
+ // In case Option<NonZero*> is wanted, but * is provided, suggest calling new
+ ty::Adt(adt, substs) if tcx.is_diagnostic_item(sym::Option, adt.did()) => {
+ // Unwrap option
+ let ty::Adt(adt, _) = substs.type_at(0).kind() else { return };
+
+ (adt, "")
+ }
+ // In case NonZero* is wanted, but * is provided also add `.unwrap()` to satisfy types
+ ty::Adt(adt, _) => (adt, ".unwrap()"),
+ _ => return,
+ };
+
+ let map = [
+ (sym::NonZeroU8, tcx.types.u8),
+ (sym::NonZeroU16, tcx.types.u16),
+ (sym::NonZeroU32, tcx.types.u32),
+ (sym::NonZeroU64, tcx.types.u64),
+ (sym::NonZeroU128, tcx.types.u128),
+ (sym::NonZeroI8, tcx.types.i8),
+ (sym::NonZeroI16, tcx.types.i16),
+ (sym::NonZeroI32, tcx.types.i32),
+ (sym::NonZeroI64, tcx.types.i64),
+ (sym::NonZeroI128, tcx.types.i128),
+ ];
+
+ let Some((s, _)) = map
+ .iter()
+ .find(|&&(s, t)| self.tcx.is_diagnostic_item(s, adt.did()) && self.can_coerce(expr_ty, t))
+ else { return };
+
+ let path = self.tcx.def_path_str(adt.non_enum_variant().def_id);
+
+ err.multipart_suggestion(
+ format!("consider calling `{s}::new`"),
+ vec![
+ (expr.span.shrink_to_lo(), format!("{path}::new(")),
+ (expr.span.shrink_to_hi(), format!("){unwrap}")),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ pub fn get_conversion_methods(
+ &self,
+ span: Span,
+ expected: Ty<'tcx>,
+ checked_ty: Ty<'tcx>,
+ hir_id: hir::HirId,
+ ) -> Vec<AssocItem> {
+ let mut methods =
+ self.probe_for_return_type(span, probe::Mode::MethodCall, expected, checked_ty, hir_id);
+ methods.retain(|m| {
+ self.has_only_self_parameter(m)
+ && self
+ .tcx
+ // This special internal attribute is used to permit
+ // "identity-like" conversion methods to be suggested here.
+ //
+ // FIXME (#46459 and #46460): ideally
+ // `std::convert::Into::into` and `std::borrow:ToOwned` would
+ // also be `#[rustc_conversion_suggestion]`, if not for
+ // method-probing false-positives and -negatives (respectively).
+ //
+ // FIXME? Other potential candidate methods: `as_ref` and
+ // `as_mut`?
+ .has_attr(m.def_id, sym::rustc_conversion_suggestion)
+ });
+
+ methods
+ }
+
+ /// This function checks whether the method is not static and does not accept other parameters than `self`.
+ fn has_only_self_parameter(&self, method: &AssocItem) -> bool {
+ match method.kind {
+ ty::AssocKind::Fn => {
+ method.fn_has_self_parameter
+ && self.tcx.fn_sig(method.def_id).inputs().skip_binder().len() == 1
+ }
+ _ => false,
+ }
+ }
+
+ /// Identify some cases where `as_ref()` would be appropriate and suggest it.
+ ///
+ /// Given the following code:
+ /// ```compile_fail,E0308
+ /// struct Foo;
+ /// fn takes_ref(_: &Foo) {}
+ /// let ref opt = Some(Foo);
+ ///
+ /// opt.map(|param| takes_ref(param));
+ /// ```
+ /// Suggest using `opt.as_ref().map(|param| takes_ref(param));` instead.
+ ///
+ /// It only checks for `Option` and `Result` and won't work with
+ /// ```ignore (illustrative)
+ /// opt.map(|param| { takes_ref(param) });
+ /// ```
+ fn can_use_as_ref(&self, expr: &hir::Expr<'_>) -> Option<(Span, &'static str, String)> {
+ let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = expr.kind else {
+ return None;
+ };
+
+ let hir::def::Res::Local(local_id) = path.res else {
+ return None;
+ };
+
+ let local_parent = self.tcx.hir().get_parent_node(local_id);
+ let Some(Node::Param(hir::Param { hir_id: param_hir_id, .. })) = self.tcx.hir().find(local_parent) else {
+ return None;
+ };
+
+ let param_parent = self.tcx.hir().get_parent_node(*param_hir_id);
+ let Some(Node::Expr(hir::Expr {
+ hir_id: expr_hir_id,
+ kind: hir::ExprKind::Closure(hir::Closure { fn_decl: closure_fn_decl, .. }),
+ ..
+ })) = self.tcx.hir().find(param_parent) else {
+ return None;
+ };
+
+ let expr_parent = self.tcx.hir().get_parent_node(*expr_hir_id);
+ let hir = self.tcx.hir().find(expr_parent);
+ let closure_params_len = closure_fn_decl.inputs.len();
+ let (
+ Some(Node::Expr(hir::Expr {
+ kind: hir::ExprKind::MethodCall(method_path, method_expr, _),
+ ..
+ })),
+ 1,
+ ) = (hir, closure_params_len) else {
+ return None;
+ };
+
+ let self_ty = self.typeck_results.borrow().expr_ty(&method_expr[0]);
+ let self_ty = format!("{:?}", self_ty);
+ let name = method_path.ident.name;
+ let is_as_ref_able = (self_ty.starts_with("&std::option::Option")
+ || self_ty.starts_with("&std::result::Result")
+ || self_ty.starts_with("std::option::Option")
+ || self_ty.starts_with("std::result::Result"))
+ && (name == sym::map || name == sym::and_then);
+ match (is_as_ref_able, self.sess().source_map().span_to_snippet(method_path.ident.span)) {
+ (true, Ok(src)) => {
+ let suggestion = format!("as_ref().{}", src);
+ Some((method_path.ident.span, "consider using `as_ref` instead", suggestion))
+ }
+ _ => None,
+ }
+ }
+
+ pub(crate) fn maybe_get_struct_pattern_shorthand_field(
+ &self,
+ expr: &hir::Expr<'_>,
+ ) -> Option<Symbol> {
+ let hir = self.tcx.hir();
+ let local = match expr {
+ hir::Expr {
+ kind:
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path {
+ res: hir::def::Res::Local(_),
+ segments: [hir::PathSegment { ident, .. }],
+ ..
+ },
+ )),
+ ..
+ } => Some(ident),
+ _ => None,
+ }?;
+
+ match hir.find(hir.get_parent_node(expr.hir_id))? {
+ Node::Expr(hir::Expr { kind: hir::ExprKind::Struct(_, fields, ..), .. }) => {
+ for field in *fields {
+ if field.ident.name == local.name && field.is_shorthand {
+ return Some(local.name);
+ }
+ }
+ }
+ _ => {}
+ }
+
+ None
+ }
+
+ /// If the given `HirId` corresponds to a block with a trailing expression, return that expression
+ pub(crate) fn maybe_get_block_expr(
+ &self,
+ expr: &hir::Expr<'tcx>,
+ ) -> Option<&'tcx hir::Expr<'tcx>> {
+ match expr {
+ hir::Expr { kind: hir::ExprKind::Block(block, ..), .. } => block.expr,
+ _ => None,
+ }
+ }
+
+ /// Returns whether the given expression is an `else if`.
+ pub(crate) fn is_else_if_block(&self, expr: &hir::Expr<'_>) -> bool {
+ if let hir::ExprKind::If(..) = expr.kind {
+ let parent_id = self.tcx.hir().get_parent_node(expr.hir_id);
+ if let Some(Node::Expr(hir::Expr {
+ kind: hir::ExprKind::If(_, _, Some(else_expr)),
+ ..
+ })) = self.tcx.hir().find(parent_id)
+ {
+ return else_expr.hir_id == expr.hir_id;
+ }
+ }
+ false
+ }
+
+ /// This function is used to determine potential "simple" improvements or users' errors and
+ /// provide them useful help. For example:
+ ///
+ /// ```compile_fail,E0308
+ /// fn some_fn(s: &str) {}
+ ///
+ /// let x = "hey!".to_owned();
+ /// some_fn(x); // error
+ /// ```
+ ///
+ /// No need to find every potential function which could make a coercion to transform a
+ /// `String` into a `&str` since a `&` would do the trick!
+ ///
+ /// In addition of this check, it also checks between references mutability state. If the
+ /// expected is mutable but the provided isn't, maybe we could just say "Hey, try with
+ /// `&mut`!".
+ pub fn check_ref(
+ &self,
+ expr: &hir::Expr<'tcx>,
+ checked_ty: Ty<'tcx>,
+ expected: Ty<'tcx>,
+ ) -> Option<(Span, String, String, Applicability, bool /* verbose */)> {
+ let sess = self.sess();
+ let sp = expr.span;
+
+ // If the span is from an external macro, there's no suggestion we can make.
+ if in_external_macro(sess, sp) {
+ return None;
+ }
+
+ let sm = sess.source_map();
+
+ let replace_prefix = |s: &str, old: &str, new: &str| {
+ s.strip_prefix(old).map(|stripped| new.to_string() + stripped)
+ };
+
+ // `ExprKind::DropTemps` is semantically irrelevant for these suggestions.
+ let expr = expr.peel_drop_temps();
+
+ match (&expr.kind, expected.kind(), checked_ty.kind()) {
+ (_, &ty::Ref(_, exp, _), &ty::Ref(_, check, _)) => match (exp.kind(), check.kind()) {
+ (&ty::Str, &ty::Array(arr, _) | &ty::Slice(arr)) if arr == self.tcx.types.u8 => {
+ if let hir::ExprKind::Lit(_) = expr.kind
+ && let Ok(src) = sm.span_to_snippet(sp)
+ && replace_prefix(&src, "b\"", "\"").is_some()
+ {
+ let pos = sp.lo() + BytePos(1);
+ return Some((
+ sp.with_hi(pos),
+ "consider removing the leading `b`".to_string(),
+ String::new(),
+ Applicability::MachineApplicable,
+ true,
+ ));
+ }
+ }
+ (&ty::Array(arr, _) | &ty::Slice(arr), &ty::Str) if arr == self.tcx.types.u8 => {
+ if let hir::ExprKind::Lit(_) = expr.kind
+ && let Ok(src) = sm.span_to_snippet(sp)
+ && replace_prefix(&src, "\"", "b\"").is_some()
+ {
+ return Some((
+ sp.shrink_to_lo(),
+ "consider adding a leading `b`".to_string(),
+ "b".to_string(),
+ Applicability::MachineApplicable,
+ true,
+ ));
+ }
+ }
+ _ => {}
+ },
+ (_, &ty::Ref(_, _, mutability), _) => {
+ // Check if it can work when put into a ref. For example:
+ //
+ // ```
+ // fn bar(x: &mut i32) {}
+ //
+ // let x = 0u32;
+ // bar(&x); // error, expected &mut
+ // ```
+ let ref_ty = match mutability {
+ hir::Mutability::Mut => {
+ self.tcx.mk_mut_ref(self.tcx.mk_region(ty::ReStatic), checked_ty)
+ }
+ hir::Mutability::Not => {
+ self.tcx.mk_imm_ref(self.tcx.mk_region(ty::ReStatic), checked_ty)
+ }
+ };
+ if self.can_coerce(ref_ty, expected) {
+ let mut sugg_sp = sp;
+ if let hir::ExprKind::MethodCall(ref segment, ref args, _) = expr.kind {
+ let clone_trait =
+ self.tcx.require_lang_item(LangItem::Clone, Some(segment.ident.span));
+ if let ([arg], Some(true), sym::clone) = (
+ &args[..],
+ self.typeck_results.borrow().type_dependent_def_id(expr.hir_id).map(
+ |did| {
+ let ai = self.tcx.associated_item(did);
+ ai.trait_container(self.tcx) == Some(clone_trait)
+ },
+ ),
+ segment.ident.name,
+ ) {
+ // If this expression had a clone call when suggesting borrowing
+ // we want to suggest removing it because it'd now be unnecessary.
+ sugg_sp = arg.span;
+ }
+ }
+ if let Ok(src) = sm.span_to_snippet(sugg_sp) {
+ let needs_parens = match expr.kind {
+ // parenthesize if needed (Issue #46756)
+ hir::ExprKind::Cast(_, _) | hir::ExprKind::Binary(_, _, _) => true,
+ // parenthesize borrows of range literals (Issue #54505)
+ _ if is_range_literal(expr) => true,
+ _ => false,
+ };
+ let sugg_expr = if needs_parens { format!("({src})") } else { src };
+
+ if let Some(sugg) = self.can_use_as_ref(expr) {
+ return Some((
+ sugg.0,
+ sugg.1.to_string(),
+ sugg.2,
+ Applicability::MachineApplicable,
+ false,
+ ));
+ }
+
+ let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ Some(ident) => format!("{ident}: "),
+ None => String::new(),
+ };
+
+ if let Some(hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Assign(..),
+ ..
+ })) = self.tcx.hir().find(self.tcx.hir().get_parent_node(expr.hir_id))
+ {
+ if mutability == hir::Mutability::Mut {
+ // Suppressing this diagnostic, we'll properly print it in `check_expr_assign`
+ return None;
+ }
+ }
+
+ return Some(match mutability {
+ hir::Mutability::Mut => (
+ sp,
+ "consider mutably borrowing here".to_string(),
+ format!("{prefix}&mut {sugg_expr}"),
+ Applicability::MachineApplicable,
+ false,
+ ),
+ hir::Mutability::Not => (
+ sp,
+ "consider borrowing here".to_string(),
+ format!("{prefix}&{sugg_expr}"),
+ Applicability::MachineApplicable,
+ false,
+ ),
+ });
+ }
+ }
+ }
+ (
+ hir::ExprKind::AddrOf(hir::BorrowKind::Ref, _, ref expr),
+ _,
+ &ty::Ref(_, checked, _),
+ ) if self.can_sub(self.param_env, checked, expected).is_ok() => {
+ // We have `&T`, check if what was expected was `T`. If so,
+ // we may want to suggest removing a `&`.
+ if sm.is_imported(expr.span) {
+ // Go through the spans from which this span was expanded,
+ // and find the one that's pointing inside `sp`.
+ //
+ // E.g. for `&format!("")`, where we want the span to the
+ // `format!()` invocation instead of its expansion.
+ if let Some(call_span) =
+ iter::successors(Some(expr.span), |s| s.parent_callsite())
+ .find(|&s| sp.contains(s))
+ && sm.is_span_accessible(call_span)
+ {
+ return Some((
+ sp.with_hi(call_span.lo()),
+ "consider removing the borrow".to_string(),
+ String::new(),
+ Applicability::MachineApplicable,
+ true,
+ ));
+ }
+ return None;
+ }
+ if sp.contains(expr.span)
+ && sm.is_span_accessible(expr.span)
+ {
+ return Some((
+ sp.with_hi(expr.span.lo()),
+ "consider removing the borrow".to_string(),
+ String::new(),
+ Applicability::MachineApplicable,
+ true,
+ ));
+ }
+ }
+ (
+ _,
+ &ty::RawPtr(TypeAndMut { ty: ty_b, mutbl: mutbl_b }),
+ &ty::Ref(_, ty_a, mutbl_a),
+ ) => {
+ if let Some(steps) = self.deref_steps(ty_a, ty_b)
+ // Only suggest valid if dereferencing needed.
+ && steps > 0
+ // The pointer type implements `Copy` trait so the suggestion is always valid.
+ && let Ok(src) = sm.span_to_snippet(sp)
+ {
+ let derefs = "*".repeat(steps);
+ if let Some((span, src, applicability)) = match mutbl_b {
+ hir::Mutability::Mut => {
+ let new_prefix = "&mut ".to_owned() + &derefs;
+ match mutbl_a {
+ hir::Mutability::Mut => {
+ replace_prefix(&src, "&mut ", &new_prefix).map(|_| {
+ let pos = sp.lo() + BytePos(5);
+ let sp = sp.with_lo(pos).with_hi(pos);
+ (sp, derefs, Applicability::MachineApplicable)
+ })
+ }
+ hir::Mutability::Not => {
+ replace_prefix(&src, "&", &new_prefix).map(|_| {
+ let pos = sp.lo() + BytePos(1);
+ let sp = sp.with_lo(pos).with_hi(pos);
+ (
+ sp,
+ format!("mut {derefs}"),
+ Applicability::Unspecified,
+ )
+ })
+ }
+ }
+ }
+ hir::Mutability::Not => {
+ let new_prefix = "&".to_owned() + &derefs;
+ match mutbl_a {
+ hir::Mutability::Mut => {
+ replace_prefix(&src, "&mut ", &new_prefix).map(|_| {
+ let lo = sp.lo() + BytePos(1);
+ let hi = sp.lo() + BytePos(5);
+ let sp = sp.with_lo(lo).with_hi(hi);
+ (sp, derefs, Applicability::MachineApplicable)
+ })
+ }
+ hir::Mutability::Not => {
+ replace_prefix(&src, "&", &new_prefix).map(|_| {
+ let pos = sp.lo() + BytePos(1);
+ let sp = sp.with_lo(pos).with_hi(pos);
+ (sp, derefs, Applicability::MachineApplicable)
+ })
+ }
+ }
+ }
+ } {
+ return Some((
+ span,
+ "consider dereferencing".to_string(),
+ src,
+ applicability,
+ true,
+ ));
+ }
+ }
+ }
+ _ if sp == expr.span => {
+ if let Some(mut steps) = self.deref_steps(checked_ty, expected) {
+ let mut expr = expr.peel_blocks();
+ let mut prefix_span = expr.span.shrink_to_lo();
+ let mut remove = String::new();
+
+ // Try peeling off any existing `&` and `&mut` to reach our target type
+ while steps > 0 {
+ if let hir::ExprKind::AddrOf(_, mutbl, inner) = expr.kind {
+ // If the expression has `&`, removing it would fix the error
+ prefix_span = prefix_span.with_hi(inner.span.lo());
+ expr = inner;
+ remove += match mutbl {
+ hir::Mutability::Not => "&",
+ hir::Mutability::Mut => "&mut ",
+ };
+ steps -= 1;
+ } else {
+ break;
+ }
+ }
+ // If we've reached our target type with just removing `&`, then just print now.
+ if steps == 0 {
+ return Some((
+ prefix_span,
+ format!("consider removing the `{}`", remove.trim()),
+ String::new(),
+ // Do not remove `&&` to get to bool, because it might be something like
+ // { a } && b, which we have a separate fixup suggestion that is more
+ // likely correct...
+ if remove.trim() == "&&" && expected == self.tcx.types.bool {
+ Applicability::MaybeIncorrect
+ } else {
+ Applicability::MachineApplicable
+ },
+ true,
+ ));
+ }
+
+ // For this suggestion to make sense, the type would need to be `Copy`,
+ // or we have to be moving out of a `Box<T>`
+ if self.type_is_copy_modulo_regions(self.param_env, expected, sp)
+ // FIXME(compiler-errors): We can actually do this if the checked_ty is
+ // `steps` layers of boxes, not just one, but this is easier and most likely.
+ || (checked_ty.is_box() && steps == 1)
+ {
+ let deref_kind = if checked_ty.is_box() {
+ "unboxing the value"
+ } else if checked_ty.is_region_ptr() {
+ "dereferencing the borrow"
+ } else {
+ "dereferencing the type"
+ };
+
+ // Suggest removing `&` if we have removed any, otherwise suggest just
+ // dereferencing the remaining number of steps.
+ let message = if remove.is_empty() {
+ format!("consider {deref_kind}")
+ } else {
+ format!(
+ "consider removing the `{}` and {} instead",
+ remove.trim(),
+ deref_kind
+ )
+ };
+
+ let prefix = match self.maybe_get_struct_pattern_shorthand_field(expr) {
+ Some(ident) => format!("{ident}: "),
+ None => String::new(),
+ };
+
+ let (span, suggestion) = if self.is_else_if_block(expr) {
+ // Don't suggest nonsense like `else *if`
+ return None;
+ } else if let Some(expr) = self.maybe_get_block_expr(expr) {
+ // prefix should be empty here..
+ (expr.span.shrink_to_lo(), "*".to_string())
+ } else {
+ (prefix_span, format!("{}{}", prefix, "*".repeat(steps)))
+ };
+
+ return Some((
+ span,
+ message,
+ suggestion,
+ Applicability::MachineApplicable,
+ true,
+ ));
+ }
+ }
+ }
+ _ => {}
+ }
+ None
+ }
+
+ pub fn check_for_cast(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ checked_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) -> bool {
+ if self.tcx.sess.source_map().is_imported(expr.span) {
+ // Ignore if span is from within a macro.
+ return false;
+ }
+
+ let Ok(src) = self.tcx.sess.source_map().span_to_snippet(expr.span) else {
+ return false;
+ };
+
+ // If casting this expression to a given numeric type would be appropriate in case of a type
+ // mismatch.
+ //
+ // We want to minimize the amount of casting operations that are suggested, as it can be a
+ // lossy operation with potentially bad side effects, so we only suggest when encountering
+ // an expression that indicates that the original type couldn't be directly changed.
+ //
+ // For now, don't suggest casting with `as`.
+ let can_cast = false;
+
+ let mut sugg = vec![];
+
+ if let Some(hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Struct(_, fields, _), ..
+ })) = self.tcx.hir().find(self.tcx.hir().get_parent_node(expr.hir_id))
+ {
+ // `expr` is a literal field for a struct, only suggest if appropriate
+ match (*fields)
+ .iter()
+ .find(|field| field.expr.hir_id == expr.hir_id && field.is_shorthand)
+ {
+ // This is a field literal
+ Some(field) => {
+ sugg.push((field.ident.span.shrink_to_lo(), format!("{}: ", field.ident)));
+ }
+ // Likely a field was meant, but this field wasn't found. Do not suggest anything.
+ None => return false,
+ }
+ };
+
+ if let hir::ExprKind::Call(path, args) = &expr.kind
+ && let (hir::ExprKind::Path(hir::QPath::TypeRelative(base_ty, path_segment)), 1) =
+ (&path.kind, args.len())
+ // `expr` is a conversion like `u32::from(val)`, do not suggest anything (#63697).
+ && let (hir::TyKind::Path(hir::QPath::Resolved(None, base_ty_path)), sym::from) =
+ (&base_ty.kind, path_segment.ident.name)
+ {
+ if let Some(ident) = &base_ty_path.segments.iter().map(|s| s.ident).next() {
+ match ident.name {
+ sym::i128
+ | sym::i64
+ | sym::i32
+ | sym::i16
+ | sym::i8
+ | sym::u128
+ | sym::u64
+ | sym::u32
+ | sym::u16
+ | sym::u8
+ | sym::isize
+ | sym::usize
+ if base_ty_path.segments.len() == 1 =>
+ {
+ return false;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ let msg = format!(
+ "you can convert {} `{}` to {} `{}`",
+ checked_ty.kind().article(),
+ checked_ty,
+ expected_ty.kind().article(),
+ expected_ty,
+ );
+ let cast_msg = format!(
+ "you can cast {} `{}` to {} `{}`",
+ checked_ty.kind().article(),
+ checked_ty,
+ expected_ty.kind().article(),
+ expected_ty,
+ );
+ let lit_msg = format!(
+ "change the type of the numeric literal from `{checked_ty}` to `{expected_ty}`",
+ );
+
+ let close_paren = if expr.precedence().order() < PREC_POSTFIX {
+ sugg.push((expr.span.shrink_to_lo(), "(".to_string()));
+ ")"
+ } else {
+ ""
+ };
+
+ let mut cast_suggestion = sugg.clone();
+ cast_suggestion.push((expr.span.shrink_to_hi(), format!("{close_paren} as {expected_ty}")));
+ let mut into_suggestion = sugg.clone();
+ into_suggestion.push((expr.span.shrink_to_hi(), format!("{close_paren}.into()")));
+ let mut suffix_suggestion = sugg.clone();
+ suffix_suggestion.push((
+ if matches!(
+ (&expected_ty.kind(), &checked_ty.kind()),
+ (ty::Int(_) | ty::Uint(_), ty::Float(_))
+ ) {
+ // Remove fractional part from literal, for example `42.0f32` into `42`
+ let src = src.trim_end_matches(&checked_ty.to_string());
+ let len = src.split('.').next().unwrap().len();
+ expr.span.with_lo(expr.span.lo() + BytePos(len as u32))
+ } else {
+ let len = src.trim_end_matches(&checked_ty.to_string()).len();
+ expr.span.with_lo(expr.span.lo() + BytePos(len as u32))
+ },
+ if expr.precedence().order() < PREC_POSTFIX {
+ // Readd `)`
+ format!("{expected_ty})")
+ } else {
+ expected_ty.to_string()
+ },
+ ));
+ let literal_is_ty_suffixed = |expr: &hir::Expr<'_>| {
+ if let hir::ExprKind::Lit(lit) = &expr.kind { lit.node.is_suffixed() } else { false }
+ };
+ let is_negative_int =
+ |expr: &hir::Expr<'_>| matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::Neg, ..));
+ let is_uint = |ty: Ty<'_>| matches!(ty.kind(), ty::Uint(..));
+
+ let in_const_context = self.tcx.hir().is_inside_const_context(expr.hir_id);
+
+ let suggest_fallible_into_or_lhs_from =
+ |err: &mut Diagnostic, exp_to_found_is_fallible: bool| {
+ // If we know the expression the expected type is derived from, we might be able
+ // to suggest a widening conversion rather than a narrowing one (which may
+ // panic). For example, given x: u8 and y: u32, if we know the span of "x",
+ // x > y
+ // can be given the suggestion "u32::from(x) > y" rather than
+ // "x > y.try_into().unwrap()".
+ let lhs_expr_and_src = expected_ty_expr.and_then(|expr| {
+ self.tcx
+ .sess
+ .source_map()
+ .span_to_snippet(expr.span)
+ .ok()
+ .map(|src| (expr, src))
+ });
+ let (msg, suggestion) = if let (Some((lhs_expr, lhs_src)), false) =
+ (lhs_expr_and_src, exp_to_found_is_fallible)
+ {
+ let msg = format!(
+ "you can convert `{lhs_src}` from `{expected_ty}` to `{checked_ty}`, matching the type of `{src}`",
+ );
+ let suggestion = vec![
+ (lhs_expr.span.shrink_to_lo(), format!("{checked_ty}::from(")),
+ (lhs_expr.span.shrink_to_hi(), ")".to_string()),
+ ];
+ (msg, suggestion)
+ } else {
+ let msg = format!("{msg} and panic if the converted value doesn't fit");
+ let mut suggestion = sugg.clone();
+ suggestion.push((
+ expr.span.shrink_to_hi(),
+ format!("{close_paren}.try_into().unwrap()"),
+ ));
+ (msg, suggestion)
+ };
+ err.multipart_suggestion_verbose(
+ &msg,
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ };
+
+ let suggest_to_change_suffix_or_into =
+ |err: &mut Diagnostic,
+ found_to_exp_is_fallible: bool,
+ exp_to_found_is_fallible: bool| {
+ let exp_is_lhs =
+ expected_ty_expr.map(|e| self.tcx.hir().is_lhs(e.hir_id)).unwrap_or(false);
+
+ if exp_is_lhs {
+ return;
+ }
+
+ let always_fallible = found_to_exp_is_fallible
+ && (exp_to_found_is_fallible || expected_ty_expr.is_none());
+ let msg = if literal_is_ty_suffixed(expr) {
+ &lit_msg
+ } else if always_fallible && (is_negative_int(expr) && is_uint(expected_ty)) {
+ // We now know that converting either the lhs or rhs is fallible. Before we
+ // suggest a fallible conversion, check if the value can never fit in the
+ // expected type.
+ let msg = format!("`{src}` cannot fit into type `{expected_ty}`");
+ err.note(&msg);
+ return;
+ } else if in_const_context {
+ // Do not recommend `into` or `try_into` in const contexts.
+ return;
+ } else if found_to_exp_is_fallible {
+ return suggest_fallible_into_or_lhs_from(err, exp_to_found_is_fallible);
+ } else {
+ &msg
+ };
+ let suggestion = if literal_is_ty_suffixed(expr) {
+ suffix_suggestion.clone()
+ } else {
+ into_suggestion.clone()
+ };
+ err.multipart_suggestion_verbose(msg, suggestion, Applicability::MachineApplicable);
+ };
+
+ match (&expected_ty.kind(), &checked_ty.kind()) {
+ (&ty::Int(ref exp), &ty::Int(ref found)) => {
+ let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width())
+ {
+ (Some(exp), Some(found)) if exp < found => (true, false),
+ (Some(exp), Some(found)) if exp > found => (false, true),
+ (None, Some(8 | 16)) => (false, true),
+ (Some(8 | 16), None) => (true, false),
+ (None, _) | (_, None) => (true, true),
+ _ => (false, false),
+ };
+ suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible);
+ true
+ }
+ (&ty::Uint(ref exp), &ty::Uint(ref found)) => {
+ let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width())
+ {
+ (Some(exp), Some(found)) if exp < found => (true, false),
+ (Some(exp), Some(found)) if exp > found => (false, true),
+ (None, Some(8 | 16)) => (false, true),
+ (Some(8 | 16), None) => (true, false),
+ (None, _) | (_, None) => (true, true),
+ _ => (false, false),
+ };
+ suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible);
+ true
+ }
+ (&ty::Int(exp), &ty::Uint(found)) => {
+ let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width())
+ {
+ (Some(exp), Some(found)) if found < exp => (false, true),
+ (None, Some(8)) => (false, true),
+ _ => (true, true),
+ };
+ suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible);
+ true
+ }
+ (&ty::Uint(exp), &ty::Int(found)) => {
+ let (f2e_is_fallible, e2f_is_fallible) = match (exp.bit_width(), found.bit_width())
+ {
+ (Some(exp), Some(found)) if found > exp => (true, false),
+ (Some(8), None) => (true, false),
+ _ => (true, true),
+ };
+ suggest_to_change_suffix_or_into(err, f2e_is_fallible, e2f_is_fallible);
+ true
+ }
+ (&ty::Float(ref exp), &ty::Float(ref found)) => {
+ if found.bit_width() < exp.bit_width() {
+ suggest_to_change_suffix_or_into(err, false, true);
+ } else if literal_is_ty_suffixed(expr) {
+ err.multipart_suggestion_verbose(
+ &lit_msg,
+ suffix_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if can_cast {
+ // Missing try_into implementation for `f64` to `f32`
+ err.multipart_suggestion_verbose(
+ &format!("{cast_msg}, producing the closest possible value"),
+ cast_suggestion,
+ Applicability::MaybeIncorrect, // lossy conversion
+ );
+ }
+ true
+ }
+ (&ty::Uint(_) | &ty::Int(_), &ty::Float(_)) => {
+ if literal_is_ty_suffixed(expr) {
+ err.multipart_suggestion_verbose(
+ &lit_msg,
+ suffix_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if can_cast {
+ // Missing try_into implementation for `{float}` to `{integer}`
+ err.multipart_suggestion_verbose(
+ &format!("{msg}, rounding the float towards zero"),
+ cast_suggestion,
+ Applicability::MaybeIncorrect, // lossy conversion
+ );
+ }
+ true
+ }
+ (&ty::Float(ref exp), &ty::Uint(ref found)) => {
+ // if `found` is `None` (meaning found is `usize`), don't suggest `.into()`
+ if exp.bit_width() > found.bit_width().unwrap_or(256) {
+ err.multipart_suggestion_verbose(
+ &format!(
+ "{msg}, producing the floating point representation of the integer",
+ ),
+ into_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if literal_is_ty_suffixed(expr) {
+ err.multipart_suggestion_verbose(
+ &lit_msg,
+ suffix_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // Missing try_into implementation for `{integer}` to `{float}`
+ err.multipart_suggestion_verbose(
+ &format!(
+ "{cast_msg}, producing the floating point representation of the integer, \
+ rounded if necessary",
+ ),
+ cast_suggestion,
+ Applicability::MaybeIncorrect, // lossy conversion
+ );
+ }
+ true
+ }
+ (&ty::Float(ref exp), &ty::Int(ref found)) => {
+ // if `found` is `None` (meaning found is `isize`), don't suggest `.into()`
+ if exp.bit_width() > found.bit_width().unwrap_or(256) {
+ err.multipart_suggestion_verbose(
+ &format!(
+ "{}, producing the floating point representation of the integer",
+ &msg,
+ ),
+ into_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else if literal_is_ty_suffixed(expr) {
+ err.multipart_suggestion_verbose(
+ &lit_msg,
+ suffix_suggestion,
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // Missing try_into implementation for `{integer}` to `{float}`
+ err.multipart_suggestion_verbose(
+ &format!(
+ "{}, producing the floating point representation of the integer, \
+ rounded if necessary",
+ &msg,
+ ),
+ cast_suggestion,
+ Applicability::MaybeIncorrect, // lossy conversion
+ );
+ }
+ true
+ }
+ (
+ &ty::Uint(ty::UintTy::U32 | ty::UintTy::U64 | ty::UintTy::U128)
+ | &ty::Int(ty::IntTy::I32 | ty::IntTy::I64 | ty::IntTy::I128),
+ &ty::Char,
+ ) => {
+ err.multipart_suggestion_verbose(
+ &format!("{cast_msg}, since a `char` always occupies 4 bytes"),
+ cast_suggestion,
+ Applicability::MachineApplicable,
+ );
+ true
+ }
+ _ => false,
+ }
+ }
+
+ // Report the type inferred by the return statement.
+ fn report_closure_inferred_return_type(&self, err: &mut Diagnostic, expected: Ty<'tcx>) {
+ if let Some(sp) = self.ret_coercion_span.get()
+ // If the closure has an explicit return type annotation, or if
+ // the closure's return type has been inferred from outside
+ // requirements (such as an Fn* trait bound), then a type error
+ // may occur at the first return expression we see in the closure
+ // (if it conflicts with the declared return type). Skip adding a
+ // note in this case, since it would be incorrect.
+ && !self.return_type_pre_known
+ {
+ err.span_note(
+ sp,
+ &format!(
+ "return type inferred to be `{}` here",
+ self.resolve_vars_if_possible(expected)
+ ),
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/diverges.rs b/compiler/rustc_typeck/src/check/diverges.rs
new file mode 100644
index 000000000..963a93a95
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/diverges.rs
@@ -0,0 +1,78 @@
+use rustc_span::source_map::DUMMY_SP;
+use rustc_span::{self, Span};
+use std::{cmp, ops};
+
+/// Tracks whether executing a node may exit normally (versus
+/// return/break/panic, which "diverge", leaving dead code in their
+/// wake). Tracked semi-automatically (through type variables marked
+/// as diverging), with some manual adjustments for control-flow
+/// primitives (approximating a CFG).
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+pub enum Diverges {
+ /// Potentially unknown, some cases converge,
+ /// others require a CFG to determine them.
+ Maybe,
+
+ /// Definitely known to diverge and therefore
+ /// not reach the next sibling or its parent.
+ Always {
+ /// The `Span` points to the expression
+ /// that caused us to diverge
+ /// (e.g. `return`, `break`, etc).
+ span: Span,
+ /// In some cases (e.g. a `match` expression
+ /// where all arms diverge), we may be
+ /// able to provide a more informative
+ /// message to the user.
+ /// If this is `None`, a default message
+ /// will be generated, which is suitable
+ /// for most cases.
+ custom_note: Option<&'static str>,
+ },
+
+ /// Same as `Always` but with a reachability
+ /// warning already emitted.
+ WarnedAlways,
+}
+
+// Convenience impls for combining `Diverges`.
+
+impl ops::BitAnd for Diverges {
+ type Output = Self;
+ fn bitand(self, other: Self) -> Self {
+ cmp::min(self, other)
+ }
+}
+
+impl ops::BitOr for Diverges {
+ type Output = Self;
+ fn bitor(self, other: Self) -> Self {
+ cmp::max(self, other)
+ }
+}
+
+impl ops::BitAndAssign for Diverges {
+ fn bitand_assign(&mut self, other: Self) {
+ *self = *self & other;
+ }
+}
+
+impl ops::BitOrAssign for Diverges {
+ fn bitor_assign(&mut self, other: Self) {
+ *self = *self | other;
+ }
+}
+
+impl Diverges {
+ /// Creates a `Diverges::Always` with the provided `span` and the default note message.
+ pub(super) fn always(span: Span) -> Diverges {
+ Diverges::Always { span, custom_note: None }
+ }
+
+ pub(super) fn is_always(self) -> bool {
+ // Enum comparison ignores the
+ // contents of fields, so we just
+ // fill them in with garbage here.
+ self >= Diverges::Always { span: DUMMY_SP, custom_note: None }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/dropck.rs b/compiler/rustc_typeck/src/check/dropck.rs
new file mode 100644
index 000000000..321064ec0
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/dropck.rs
@@ -0,0 +1,327 @@
+// FIXME(@lcnr): Move this module out of `rustc_typeck`.
+//
+// We don't do any drop checking during hir typeck.
+use crate::hir::def_id::{DefId, LocalDefId};
+use rustc_errors::{struct_span_err, ErrorGuaranteed};
+use rustc_middle::ty::error::TypeError;
+use rustc_middle::ty::relate::{Relate, RelateResult, TypeRelation};
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::util::IgnoreRegions;
+use rustc_middle::ty::{self, Predicate, Ty, TyCtxt};
+
+/// This function confirms that the `Drop` implementation identified by
+/// `drop_impl_did` is not any more specialized than the type it is
+/// attached to (Issue #8142).
+///
+/// This means:
+///
+/// 1. The self type must be nominal (this is already checked during
+/// coherence),
+///
+/// 2. The generic region/type parameters of the impl's self type must
+/// all be parameters of the Drop impl itself (i.e., no
+/// specialization like `impl Drop for Foo<i32>`), and,
+///
+/// 3. Any bounds on the generic parameters must be reflected in the
+/// struct/enum definition for the nominal type itself (i.e.
+/// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`).
+///
+pub fn check_drop_impl(tcx: TyCtxt<'_>, drop_impl_did: DefId) -> Result<(), ErrorGuaranteed> {
+ let dtor_self_type = tcx.type_of(drop_impl_did);
+ let dtor_predicates = tcx.predicates_of(drop_impl_did);
+ match dtor_self_type.kind() {
+ ty::Adt(adt_def, self_to_impl_substs) => {
+ ensure_drop_params_and_item_params_correspond(
+ tcx,
+ drop_impl_did.expect_local(),
+ adt_def.did(),
+ self_to_impl_substs,
+ )?;
+
+ ensure_drop_predicates_are_implied_by_item_defn(
+ tcx,
+ dtor_predicates,
+ adt_def.did().expect_local(),
+ self_to_impl_substs,
+ )
+ }
+ _ => {
+ // Destructors only work on nominal types. This was
+ // already checked by coherence, but compilation may
+ // not have been terminated.
+ let span = tcx.def_span(drop_impl_did);
+ let reported = tcx.sess.delay_span_bug(
+ span,
+ &format!("should have been rejected by coherence check: {dtor_self_type}"),
+ );
+ Err(reported)
+ }
+ }
+}
+
+fn ensure_drop_params_and_item_params_correspond<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ drop_impl_did: LocalDefId,
+ self_type_did: DefId,
+ drop_impl_substs: SubstsRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let Err(arg) = tcx.uses_unique_generic_params(drop_impl_substs, IgnoreRegions::No) else {
+ return Ok(())
+ };
+
+ let drop_impl_span = tcx.def_span(drop_impl_did);
+ let item_span = tcx.def_span(self_type_did);
+ let self_descr = tcx.def_kind(self_type_did).descr(self_type_did);
+ let mut err =
+ struct_span_err!(tcx.sess, drop_impl_span, E0366, "`Drop` impls cannot be specialized");
+ match arg {
+ ty::util::NotUniqueParam::DuplicateParam(arg) => {
+ err.note(&format!("`{arg}` is mentioned multiple times"))
+ }
+ ty::util::NotUniqueParam::NotParam(arg) => {
+ err.note(&format!("`{arg}` is not a generic parameter"))
+ }
+ };
+ err.span_note(
+ item_span,
+ &format!(
+ "use the same sequence of generic lifetime, type and const parameters \
+ as the {self_descr} definition",
+ ),
+ );
+ Err(err.emit())
+}
+
+/// Confirms that every predicate imposed by dtor_predicates is
+/// implied by assuming the predicates attached to self_type_did.
+fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ dtor_predicates: ty::GenericPredicates<'tcx>,
+ self_type_did: LocalDefId,
+ self_to_impl_substs: SubstsRef<'tcx>,
+) -> Result<(), ErrorGuaranteed> {
+ let mut result = Ok(());
+
+ // Here is an example, analogous to that from
+ // `compare_impl_method`.
+ //
+ // Consider a struct type:
+ //
+ // struct Type<'c, 'b:'c, 'a> {
+ // x: &'a Contents // (contents are irrelevant;
+ // y: &'c Cell<&'b Contents>, // only the bounds matter for our purposes.)
+ // }
+ //
+ // and a Drop impl:
+ //
+ // impl<'z, 'y:'z, 'x:'y> Drop for P<'z, 'y, 'x> {
+ // fn drop(&mut self) { self.y.set(self.x); } // (only legal if 'x: 'y)
+ // }
+ //
+ // We start out with self_to_impl_substs, that maps the generic
+ // parameters of Type to that of the Drop impl.
+ //
+ // self_to_impl_substs = {'c => 'z, 'b => 'y, 'a => 'x}
+ //
+ // Applying this to the predicates (i.e., assumptions) provided by the item
+ // definition yields the instantiated assumptions:
+ //
+ // ['y : 'z]
+ //
+ // We then check all of the predicates of the Drop impl:
+ //
+ // ['y:'z, 'x:'y]
+ //
+ // and ensure each is in the list of instantiated
+ // assumptions. Here, `'y:'z` is present, but `'x:'y` is
+ // absent. So we report an error that the Drop impl injected a
+ // predicate that is not present on the struct definition.
+
+ // We can assume the predicates attached to struct/enum definition
+ // hold.
+ let generic_assumptions = tcx.predicates_of(self_type_did);
+
+ let assumptions_in_impl_context = generic_assumptions.instantiate(tcx, &self_to_impl_substs);
+ let assumptions_in_impl_context = assumptions_in_impl_context.predicates;
+
+ let self_param_env = tcx.param_env(self_type_did);
+
+ // An earlier version of this code attempted to do this checking
+ // via the traits::fulfill machinery. However, it ran into trouble
+ // since the fulfill machinery merely turns outlives-predicates
+ // 'a:'b and T:'b into region inference constraints. It is simpler
+ // just to look for all the predicates directly.
+
+ assert_eq!(dtor_predicates.parent, None);
+ for &(predicate, predicate_sp) in dtor_predicates.predicates {
+ // (We do not need to worry about deep analysis of type
+ // expressions etc because the Drop impls are already forced
+ // to take on a structure that is roughly an alpha-renaming of
+ // the generic parameters of the item definition.)
+
+ // This path now just checks *all* predicates via an instantiation of
+ // the `SimpleEqRelation`, which simply forwards to the `relate` machinery
+ // after taking care of anonymizing late bound regions.
+ //
+ // However, it may be more efficient in the future to batch
+ // the analysis together via the fulfill (see comment above regarding
+ // the usage of the fulfill machinery), rather than the
+ // repeated `.iter().any(..)` calls.
+
+ // This closure is a more robust way to check `Predicate` equality
+ // than simple `==` checks (which were the previous implementation).
+ // It relies on `ty::relate` for `TraitPredicate`, `ProjectionPredicate`,
+ // `ConstEvaluatable` and `TypeOutlives` (which implement the Relate trait),
+ // while delegating on simple equality for the other `Predicate`.
+ // This implementation solves (Issue #59497) and (Issue #58311).
+ // It is unclear to me at the moment whether the approach based on `relate`
+ // could be extended easily also to the other `Predicate`.
+ let predicate_matches_closure = |p: Predicate<'tcx>| {
+ let mut relator: SimpleEqRelation<'tcx> = SimpleEqRelation::new(tcx, self_param_env);
+ let predicate = predicate.kind();
+ let p = p.kind();
+ match (predicate.skip_binder(), p.skip_binder()) {
+ (ty::PredicateKind::Trait(a), ty::PredicateKind::Trait(b)) => {
+ // Since struct predicates cannot have ~const, project the impl predicate
+ // onto one that ignores the constness. This is equivalent to saying that
+ // we match a `Trait` bound on the struct with a `Trait` or `~const Trait`
+ // in the impl.
+ let non_const_a =
+ ty::TraitPredicate { constness: ty::BoundConstness::NotConst, ..a };
+ relator.relate(predicate.rebind(non_const_a), p.rebind(b)).is_ok()
+ }
+ (ty::PredicateKind::Projection(a), ty::PredicateKind::Projection(b)) => {
+ relator.relate(predicate.rebind(a), p.rebind(b)).is_ok()
+ }
+ (
+ ty::PredicateKind::ConstEvaluatable(a),
+ ty::PredicateKind::ConstEvaluatable(b),
+ ) => tcx.try_unify_abstract_consts(self_param_env.and((a, b))),
+ (
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_a, lt_a)),
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_b, lt_b)),
+ ) => {
+ relator.relate(predicate.rebind(ty_a), p.rebind(ty_b)).is_ok()
+ && relator.relate(predicate.rebind(lt_a), p.rebind(lt_b)).is_ok()
+ }
+ (ty::PredicateKind::WellFormed(arg_a), ty::PredicateKind::WellFormed(arg_b)) => {
+ relator.relate(predicate.rebind(arg_a), p.rebind(arg_b)).is_ok()
+ }
+ _ => predicate == p,
+ }
+ };
+
+ if !assumptions_in_impl_context.iter().copied().any(predicate_matches_closure) {
+ let item_span = tcx.def_span(self_type_did);
+ let self_descr = tcx.def_kind(self_type_did).descr(self_type_did.to_def_id());
+ let reported = struct_span_err!(
+ tcx.sess,
+ predicate_sp,
+ E0367,
+ "`Drop` impl requires `{predicate}` but the {self_descr} it is implemented for does not",
+ )
+ .span_note(item_span, "the implementor must specify the same requirement")
+ .emit();
+ result = Err(reported);
+ }
+ }
+
+ result
+}
+
+// This is an implementation of the TypeRelation trait with the
+// aim of simply comparing for equality (without side-effects).
+// It is not intended to be used anywhere else other than here.
+pub(crate) struct SimpleEqRelation<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> SimpleEqRelation<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> SimpleEqRelation<'tcx> {
+ SimpleEqRelation { tcx, param_env }
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for SimpleEqRelation<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+
+ fn tag(&self) -> &'static str {
+ "dropck::SimpleEqRelation"
+ }
+
+ fn a_is_expected(&self) -> bool {
+ true
+ }
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ _: ty::Variance,
+ _info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ // Here we ignore variance because we require drop impl's types
+ // to be *exactly* the same as to the ones in the struct definition.
+ self.relate(a, b)
+ }
+
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ debug!("SimpleEqRelation::tys(a={:?}, b={:?})", a, b);
+ ty::relate::super_relate_tys(self, a, b)
+ }
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ debug!("SimpleEqRelation::regions(a={:?}, b={:?})", a, b);
+
+ // We can just equate the regions because LBRs have been
+ // already anonymized.
+ if a == b {
+ Ok(a)
+ } else {
+ // I'm not sure is this `TypeError` is the right one, but
+ // it should not matter as it won't be checked (the dropck
+ // will emit its own, more informative and higher-level errors
+ // in case anything goes wrong).
+ Err(TypeError::RegionsPlaceholderMismatch)
+ }
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ debug!("SimpleEqRelation::consts(a={:?}, b={:?})", a, b);
+ ty::relate::super_relate_consts(self, a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ debug!("SimpleEqRelation::binders({:?}: {:?}", a, b);
+
+ // Anonymizing the LBRs is necessary to solve (Issue #59497).
+ // After we do so, it should be totally fine to skip the binders.
+ let anon_a = self.tcx.anonymize_bound_vars(a);
+ let anon_b = self.tcx.anonymize_bound_vars(b);
+ self.relate(anon_a.skip_binder(), anon_b.skip_binder())?;
+
+ Ok(a)
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/expectation.rs b/compiler/rustc_typeck/src/check/expectation.rs
new file mode 100644
index 000000000..e9e810344
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/expectation.rs
@@ -0,0 +1,122 @@
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::ty::{self, Ty};
+use rustc_span::{self, Span};
+
+use super::Expectation::*;
+use super::FnCtxt;
+
+/// When type-checking an expression, we propagate downward
+/// whatever type hint we are able in the form of an `Expectation`.
+#[derive(Copy, Clone, Debug)]
+pub enum Expectation<'tcx> {
+ /// We know nothing about what type this expression should have.
+ NoExpectation,
+
+ /// This expression should have the type given (or some subtype).
+ ExpectHasType(Ty<'tcx>),
+
+ /// This expression will be cast to the `Ty`.
+ ExpectCastableToType(Ty<'tcx>),
+
+ /// This rvalue expression will be wrapped in `&` or `Box` and coerced
+ /// to `&Ty` or `Box<Ty>`, respectively. `Ty` is `[A]` or `Trait`.
+ ExpectRvalueLikeUnsized(Ty<'tcx>),
+
+ IsLast(Span),
+}
+
+impl<'a, 'tcx> Expectation<'tcx> {
+ // Disregard "castable to" expectations because they
+ // can lead us astray. Consider for example `if cond
+ // {22} else {c} as u8` -- if we propagate the
+ // "castable to u8" constraint to 22, it will pick the
+ // type 22u8, which is overly constrained (c might not
+ // be a u8). In effect, the problem is that the
+ // "castable to" expectation is not the tightest thing
+ // we can say, so we want to drop it in this case.
+ // The tightest thing we can say is "must unify with
+ // else branch". Note that in the case of a "has type"
+ // constraint, this limitation does not hold.
+
+ // If the expected type is just a type variable, then don't use
+ // an expected type. Otherwise, we might write parts of the type
+ // when checking the 'then' block which are incompatible with the
+ // 'else' branch.
+ pub(super) fn adjust_for_branches(&self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> {
+ match *self {
+ ExpectHasType(ety) => {
+ let ety = fcx.shallow_resolve(ety);
+ if !ety.is_ty_var() { ExpectHasType(ety) } else { NoExpectation }
+ }
+ ExpectRvalueLikeUnsized(ety) => ExpectRvalueLikeUnsized(ety),
+ _ => NoExpectation,
+ }
+ }
+
+ /// Provides an expectation for an rvalue expression given an *optional*
+ /// hint, which is not required for type safety (the resulting type might
+ /// be checked higher up, as is the case with `&expr` and `box expr`), but
+ /// is useful in determining the concrete type.
+ ///
+ /// The primary use case is where the expected type is a fat pointer,
+ /// like `&[isize]`. For example, consider the following statement:
+ ///
+ /// let x: &[isize] = &[1, 2, 3];
+ ///
+ /// In this case, the expected type for the `&[1, 2, 3]` expression is
+ /// `&[isize]`. If however we were to say that `[1, 2, 3]` has the
+ /// expectation `ExpectHasType([isize])`, that would be too strong --
+ /// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`.
+ /// It is only the `&[1, 2, 3]` expression as a whole that can be coerced
+ /// to the type `&[isize]`. Therefore, we propagate this more limited hint,
+ /// which still is useful, because it informs integer literals and the like.
+ /// See the test case `test/ui/coerce-expect-unsized.rs` and #20169
+ /// for examples of where this comes up,.
+ pub(super) fn rvalue_hint(fcx: &FnCtxt<'a, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
+ match fcx.tcx.struct_tail_without_normalization(ty).kind() {
+ ty::Slice(_) | ty::Str | ty::Dynamic(..) => ExpectRvalueLikeUnsized(ty),
+ _ => ExpectHasType(ty),
+ }
+ }
+
+ // Resolves `expected` by a single level if it is a variable. If
+ // there is no expected type or resolution is not possible (e.g.,
+ // no constraints yet present), just returns `self`.
+ fn resolve(self, fcx: &FnCtxt<'a, 'tcx>) -> Expectation<'tcx> {
+ match self {
+ NoExpectation => NoExpectation,
+ ExpectCastableToType(t) => ExpectCastableToType(fcx.resolve_vars_if_possible(t)),
+ ExpectHasType(t) => ExpectHasType(fcx.resolve_vars_if_possible(t)),
+ ExpectRvalueLikeUnsized(t) => ExpectRvalueLikeUnsized(fcx.resolve_vars_if_possible(t)),
+ IsLast(sp) => IsLast(sp),
+ }
+ }
+
+ pub(super) fn to_option(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
+ match self.resolve(fcx) {
+ NoExpectation | IsLast(_) => None,
+ ExpectCastableToType(ty) | ExpectHasType(ty) | ExpectRvalueLikeUnsized(ty) => Some(ty),
+ }
+ }
+
+ /// It sometimes happens that we want to turn an expectation into
+ /// a **hard constraint** (i.e., something that must be satisfied
+ /// for the program to type-check). `only_has_type` will return
+ /// such a constraint, if it exists.
+ pub(super) fn only_has_type(self, fcx: &FnCtxt<'a, 'tcx>) -> Option<Ty<'tcx>> {
+ match self {
+ ExpectHasType(ty) => Some(fcx.resolve_vars_if_possible(ty)),
+ NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) | IsLast(_) => {
+ None
+ }
+ }
+ }
+
+ /// Like `only_has_type`, but instead of returning `None` if no
+ /// hard constraint exists, creates a fresh type variable.
+ pub(super) fn coercion_target_type(self, fcx: &FnCtxt<'a, 'tcx>, span: Span) -> Ty<'tcx> {
+ self.only_has_type(fcx).unwrap_or_else(|| {
+ fcx.next_ty_var(TypeVariableOrigin { kind: TypeVariableOriginKind::MiscVariable, span })
+ })
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/expr.rs b/compiler/rustc_typeck/src/check/expr.rs
new file mode 100644
index 000000000..6e97b0bf2
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/expr.rs
@@ -0,0 +1,2824 @@
+//! Type checking expressions.
+//!
+//! See `mod.rs` for more context on type checking in general.
+
+use crate::astconv::AstConv as _;
+use crate::check::cast;
+use crate::check::coercion::CoerceMany;
+use crate::check::fatally_break_rust;
+use crate::check::method::SelfSource;
+use crate::check::report_unexpected_variant_res;
+use crate::check::BreakableCtxt;
+use crate::check::Diverges;
+use crate::check::DynamicCoerceMany;
+use crate::check::Expectation::{self, ExpectCastableToType, ExpectHasType, NoExpectation};
+use crate::check::FnCtxt;
+use crate::check::Needs;
+use crate::check::TupleArgumentsFlag::DontTupleArguments;
+use crate::errors::{
+ FieldMultiplySpecifiedInInitializer, FunctionalRecordUpdateOnNonStruct,
+ YieldExprOutsideOfGenerator,
+};
+use crate::type_error_struct;
+
+use super::suggest_call_constructor;
+use crate::errors::{AddressOfTemporaryTaken, ReturnStmtOutsideOfFnBody, StructExprNonExhaustive};
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stack::ensure_sufficient_stack;
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId,
+ EmissionGuarantee, ErrorGuaranteed,
+};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{Closure, ExprKind, HirId, QPath};
+use rustc_infer::infer;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::InferOk;
+use rustc_infer::traits::ObligationCause;
+use rustc_middle::middle::stability;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase};
+use rustc_middle::ty::error::TypeError::FieldMisMatch;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TypeVisitable};
+use rustc_session::parse::feature_err;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::source_map::{Span, Spanned};
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{BytePos, Pos};
+use rustc_target::spec::abi::Abi::RustIntrinsic;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::{self, ObligationCauseCode};
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ fn check_expr_eq_type(&self, expr: &'tcx hir::Expr<'tcx>, expected: Ty<'tcx>) {
+ let ty = self.check_expr_with_hint(expr, expected);
+ self.demand_eqtype(expr.span, expected, ty);
+ }
+
+ pub fn check_expr_has_type_or_error(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ extend_err: impl FnMut(&mut Diagnostic),
+ ) -> Ty<'tcx> {
+ self.check_expr_meets_expectation_or_error(expr, ExpectHasType(expected), extend_err)
+ }
+
+ fn check_expr_meets_expectation_or_error(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ mut extend_err: impl FnMut(&mut Diagnostic),
+ ) -> Ty<'tcx> {
+ let expected_ty = expected.to_option(&self).unwrap_or(self.tcx.types.bool);
+ let mut ty = self.check_expr_with_expectation(expr, expected);
+
+ // While we don't allow *arbitrary* coercions here, we *do* allow
+ // coercions from ! to `expected`.
+ if ty.is_never() {
+ if let Some(adjustments) = self.typeck_results.borrow().adjustments().get(expr.hir_id) {
+ self.tcx().sess.delay_span_bug(
+ expr.span,
+ "expression with never type wound up being adjusted",
+ );
+ return if let [Adjustment { kind: Adjust::NeverToAny, target }] = &adjustments[..] {
+ target.to_owned()
+ } else {
+ self.tcx().ty_error()
+ };
+ }
+
+ let adj_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::AdjustmentType,
+ span: expr.span,
+ });
+ self.apply_adjustments(
+ expr,
+ vec![Adjustment { kind: Adjust::NeverToAny, target: adj_ty }],
+ );
+ ty = adj_ty;
+ }
+
+ if let Some(mut err) = self.demand_suptype_diag(expr.span, expected_ty, ty) {
+ let expr = expr.peel_drop_temps();
+ self.suggest_deref_ref_or_into(&mut err, expr, expected_ty, ty, None);
+ extend_err(&mut err);
+ err.emit();
+ }
+ ty
+ }
+
+ pub(super) fn check_expr_coercable_to_type(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) -> Ty<'tcx> {
+ let ty = self.check_expr_with_hint(expr, expected);
+ // checks don't need two phase
+ self.demand_coerce(expr, ty, expected, expected_ty_expr, AllowTwoPhase::No)
+ }
+
+ pub(super) fn check_expr_with_hint(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation(expr, ExpectHasType(expected))
+ }
+
+ fn check_expr_with_expectation_and_needs(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ needs: Needs,
+ ) -> Ty<'tcx> {
+ let ty = self.check_expr_with_expectation(expr, expected);
+
+ // If the expression is used in a place whether mutable place is required
+ // e.g. LHS of assignment, perform the conversion.
+ if let Needs::MutPlace = needs {
+ self.convert_place_derefs_to_mutable(expr);
+ }
+
+ ty
+ }
+
+ pub(super) fn check_expr(&self, expr: &'tcx hir::Expr<'tcx>) -> Ty<'tcx> {
+ self.check_expr_with_expectation(expr, NoExpectation)
+ }
+
+ pub(super) fn check_expr_with_needs(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ needs: Needs,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation_and_needs(expr, NoExpectation, needs)
+ }
+
+ /// Invariant:
+ /// If an expression has any sub-expressions that result in a type error,
+ /// inspecting that expression's type with `ty.references_error()` will return
+ /// true. Likewise, if an expression is known to diverge, inspecting its
+ /// type with `ty::type_is_bot` will return true (n.b.: since Rust is
+ /// strict, _|_ can appear in the type of an expression that does not,
+ /// itself, diverge: for example, fn() -> _|_.)
+ /// Note that inspecting a type's structure *directly* may expose the fact
+ /// that there are actually multiple representations for `Error`, so avoid
+ /// that when err needs to be handled differently.
+ #[instrument(skip(self, expr), level = "debug")]
+ pub(super) fn check_expr_with_expectation(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ self.check_expr_with_expectation_and_args(expr, expected, &[])
+ }
+
+ /// Same as `check_expr_with_expectation`, but allows us to pass in the arguments of a
+ /// `ExprKind::Call` when evaluating its callee when it is an `ExprKind::Path`.
+ pub(super) fn check_expr_with_expectation_and_args(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Ty<'tcx> {
+ if self.tcx().sess.verbose() {
+ // make this code only run with -Zverbose because it is probably slow
+ if let Ok(lint_str) = self.tcx.sess.source_map().span_to_snippet(expr.span) {
+ if !lint_str.contains('\n') {
+ debug!("expr text: {lint_str}");
+ } else {
+ let mut lines = lint_str.lines();
+ if let Some(line0) = lines.next() {
+ let remaining_lines = lines.count();
+ debug!("expr text: {line0}");
+ debug!("expr text: ...(and {remaining_lines} more lines)");
+ }
+ }
+ }
+ }
+
+ // True if `expr` is a `Try::from_ok(())` that is a result of desugaring a try block
+ // without the final expr (e.g. `try { return; }`). We don't want to generate an
+ // unreachable_code lint for it since warnings for autogenerated code are confusing.
+ let is_try_block_generated_unit_expr = match expr.kind {
+ ExprKind::Call(_, args) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {
+ args.len() == 1 && args[0].span.is_desugaring(DesugaringKind::TryBlock)
+ }
+
+ _ => false,
+ };
+
+ // Warn for expressions after diverging siblings.
+ if !is_try_block_generated_unit_expr {
+ self.warn_if_unreachable(expr.hir_id, expr.span, "expression");
+ }
+
+ // Hide the outer diverging and has_errors flags.
+ let old_diverges = self.diverges.replace(Diverges::Maybe);
+ let old_has_errors = self.has_errors.replace(false);
+
+ let ty = ensure_sufficient_stack(|| match &expr.kind {
+ hir::ExprKind::Path(
+ qpath @ hir::QPath::Resolved(..) | qpath @ hir::QPath::TypeRelative(..),
+ ) => self.check_expr_path(qpath, expr, args),
+ _ => self.check_expr_kind(expr, expected),
+ });
+
+ // Warn for non-block expressions with diverging children.
+ match expr.kind {
+ ExprKind::Block(..)
+ | ExprKind::If(..)
+ | ExprKind::Let(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Match(..) => {}
+ // If `expr` is a result of desugaring the try block and is an ok-wrapped
+ // diverging expression (e.g. it arose from desugaring of `try { return }`),
+ // we skip issuing a warning because it is autogenerated code.
+ ExprKind::Call(..) if expr.span.is_desugaring(DesugaringKind::TryBlock) => {}
+ ExprKind::Call(callee, _) => self.warn_if_unreachable(expr.hir_id, callee.span, "call"),
+ ExprKind::MethodCall(segment, ..) => {
+ self.warn_if_unreachable(expr.hir_id, segment.ident.span, "call")
+ }
+ _ => self.warn_if_unreachable(expr.hir_id, expr.span, "expression"),
+ }
+
+ // Any expression that produces a value of type `!` must have diverged
+ if ty.is_never() {
+ self.diverges.set(self.diverges.get() | Diverges::always(expr.span));
+ }
+
+ // Record the type, which applies it effects.
+ // We need to do this after the warning above, so that
+ // we don't warn for the diverging expression itself.
+ self.write_ty(expr.hir_id, ty);
+
+ // Combine the diverging and has_error flags.
+ self.diverges.set(self.diverges.get() | old_diverges);
+ self.has_errors.set(self.has_errors.get() | old_has_errors);
+
+ debug!("type of {} is...", self.tcx.hir().node_to_string(expr.hir_id));
+ debug!("... {:?}, expected is {:?}", ty, expected);
+
+ ty
+ }
+
+ #[instrument(skip(self, expr), level = "debug")]
+ fn check_expr_kind(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ trace!("expr={:#?}", expr);
+
+ let tcx = self.tcx;
+ match expr.kind {
+ ExprKind::Box(subexpr) => self.check_expr_box(subexpr, expected),
+ ExprKind::Lit(ref lit) => self.check_lit(&lit, expected),
+ ExprKind::Binary(op, lhs, rhs) => self.check_binop(expr, op, lhs, rhs, expected),
+ ExprKind::Assign(lhs, rhs, span) => {
+ self.check_expr_assign(expr, expected, lhs, rhs, span)
+ }
+ ExprKind::AssignOp(op, lhs, rhs) => {
+ self.check_binop_assign(expr, op, lhs, rhs, expected)
+ }
+ ExprKind::Unary(unop, oprnd) => self.check_expr_unary(unop, oprnd, expected, expr),
+ ExprKind::AddrOf(kind, mutbl, oprnd) => {
+ self.check_expr_addr_of(kind, mutbl, oprnd, expected, expr)
+ }
+ ExprKind::Path(QPath::LangItem(lang_item, _, hir_id)) => {
+ self.check_lang_item_path(lang_item, expr, hir_id)
+ }
+ ExprKind::Path(ref qpath) => self.check_expr_path(qpath, expr, &[]),
+ ExprKind::InlineAsm(asm) => {
+ // We defer some asm checks as we may not have resolved the input and output types yet (they may still be infer vars).
+ self.deferred_asm_checks.borrow_mut().push((asm, expr.hir_id));
+ self.check_expr_asm(asm)
+ }
+ ExprKind::Break(destination, ref expr_opt) => {
+ self.check_expr_break(destination, expr_opt.as_deref(), expr)
+ }
+ ExprKind::Continue(destination) => {
+ if destination.target_id.is_ok() {
+ tcx.types.never
+ } else {
+ // There was an error; make type-check fail.
+ tcx.ty_error()
+ }
+ }
+ ExprKind::Ret(ref expr_opt) => self.check_expr_return(expr_opt.as_deref(), expr),
+ ExprKind::Let(let_expr) => self.check_expr_let(let_expr),
+ ExprKind::Loop(body, _, source, _) => {
+ self.check_expr_loop(body, source, expected, expr)
+ }
+ ExprKind::Match(discrim, arms, match_src) => {
+ self.check_match(expr, &discrim, arms, expected, match_src)
+ }
+ ExprKind::Closure(&Closure { capture_clause, fn_decl, body, movability, .. }) => {
+ self.check_expr_closure(expr, capture_clause, &fn_decl, body, movability, expected)
+ }
+ ExprKind::Block(body, _) => self.check_block_with_expected(&body, expected),
+ ExprKind::Call(callee, args) => self.check_call(expr, &callee, args, expected),
+ ExprKind::MethodCall(segment, args, _) => {
+ self.check_method_call(expr, segment, args, expected)
+ }
+ ExprKind::Cast(e, t) => self.check_expr_cast(e, t, expr),
+ ExprKind::Type(e, t) => {
+ let ty = self.to_ty_saving_user_provided_ty(&t);
+ self.check_expr_eq_type(&e, ty);
+ ty
+ }
+ ExprKind::If(cond, then_expr, opt_else_expr) => {
+ self.check_then_else(cond, then_expr, opt_else_expr, expr.span, expected)
+ }
+ ExprKind::DropTemps(e) => self.check_expr_with_expectation(e, expected),
+ ExprKind::Array(args) => self.check_expr_array(args, expected, expr),
+ ExprKind::ConstBlock(ref anon_const) => {
+ self.check_expr_const_block(anon_const, expected, expr)
+ }
+ ExprKind::Repeat(element, ref count) => {
+ self.check_expr_repeat(element, count, expected, expr)
+ }
+ ExprKind::Tup(elts) => self.check_expr_tuple(elts, expected, expr),
+ ExprKind::Struct(qpath, fields, ref base_expr) => {
+ self.check_expr_struct(expr, expected, qpath, fields, base_expr)
+ }
+ ExprKind::Field(base, field) => self.check_field(expr, &base, field),
+ ExprKind::Index(base, idx) => self.check_expr_index(base, idx, expr),
+ ExprKind::Yield(value, ref src) => self.check_expr_yield(value, expr, src),
+ hir::ExprKind::Err => tcx.ty_error(),
+ }
+ }
+
+ fn check_expr_box(&self, expr: &'tcx hir::Expr<'tcx>, expected: Expectation<'tcx>) -> Ty<'tcx> {
+ let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| match ty.kind() {
+ ty::Adt(def, _) if def.is_box() => Expectation::rvalue_hint(self, ty.boxed_ty()),
+ _ => NoExpectation,
+ });
+ let referent_ty = self.check_expr_with_expectation(expr, expected_inner);
+ self.require_type_is_sized(referent_ty, expr.span, traits::SizedBoxType);
+ self.tcx.mk_box(referent_ty)
+ }
+
+ fn check_expr_unary(
+ &self,
+ unop: hir::UnOp,
+ oprnd: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let expected_inner = match unop {
+ hir::UnOp::Not | hir::UnOp::Neg => expected,
+ hir::UnOp::Deref => NoExpectation,
+ };
+ let mut oprnd_t = self.check_expr_with_expectation(&oprnd, expected_inner);
+
+ if !oprnd_t.references_error() {
+ oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
+ match unop {
+ hir::UnOp::Deref => {
+ if let Some(ty) = self.lookup_derefing(expr, oprnd, oprnd_t) {
+ oprnd_t = ty;
+ } else {
+ let mut err = type_error_struct!(
+ tcx.sess,
+ expr.span,
+ oprnd_t,
+ E0614,
+ "type `{oprnd_t}` cannot be dereferenced",
+ );
+ let sp = tcx.sess.source_map().start_point(expr.span);
+ if let Some(sp) =
+ tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp)
+ {
+ tcx.sess.parse_sess.expr_parentheses_needed(&mut err, *sp);
+ }
+ err.emit();
+ oprnd_t = tcx.ty_error();
+ }
+ }
+ hir::UnOp::Not => {
+ let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner);
+ // If it's builtin, we can reuse the type, this helps inference.
+ if !(oprnd_t.is_integral() || *oprnd_t.kind() == ty::Bool) {
+ oprnd_t = result;
+ }
+ }
+ hir::UnOp::Neg => {
+ let result = self.check_user_unop(expr, oprnd_t, unop, expected_inner);
+ // If it's builtin, we can reuse the type, this helps inference.
+ if !oprnd_t.is_numeric() {
+ oprnd_t = result;
+ }
+ }
+ }
+ }
+ oprnd_t
+ }
+
+ fn check_expr_addr_of(
+ &self,
+ kind: hir::BorrowKind,
+ mutbl: hir::Mutability,
+ oprnd: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
+ match ty.kind() {
+ ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
+ if oprnd.is_syntactic_place_expr() {
+ // Places may legitimately have unsized types.
+ // For example, dereferences of a fat pointer and
+ // the last field of a struct can be unsized.
+ ExpectHasType(*ty)
+ } else {
+ Expectation::rvalue_hint(self, *ty)
+ }
+ }
+ _ => NoExpectation,
+ }
+ });
+ let ty =
+ self.check_expr_with_expectation_and_needs(&oprnd, hint, Needs::maybe_mut_place(mutbl));
+
+ let tm = ty::TypeAndMut { ty, mutbl };
+ match kind {
+ _ if tm.ty.references_error() => self.tcx.ty_error(),
+ hir::BorrowKind::Raw => {
+ self.check_named_place_expr(oprnd);
+ self.tcx.mk_ptr(tm)
+ }
+ hir::BorrowKind::Ref => {
+ // Note: at this point, we cannot say what the best lifetime
+ // is to use for resulting pointer. We want to use the
+ // shortest lifetime possible so as to avoid spurious borrowck
+ // errors. Moreover, the longest lifetime will depend on the
+ // precise details of the value whose address is being taken
+ // (and how long it is valid), which we don't know yet until
+ // type inference is complete.
+ //
+ // Therefore, here we simply generate a region variable. The
+ // region inferencer will then select a suitable value.
+ // Finally, borrowck will infer the value of the region again,
+ // this time with enough precision to check that the value
+ // whose address was taken can actually be made to live as long
+ // as it needs to live.
+ let region = self.next_region_var(infer::AddrOfRegion(expr.span));
+ self.tcx.mk_ref(region, tm)
+ }
+ }
+ }
+
+ /// Does this expression refer to a place that either:
+ /// * Is based on a local or static.
+ /// * Contains a dereference
+ /// Note that the adjustments for the children of `expr` should already
+ /// have been resolved.
+ fn check_named_place_expr(&self, oprnd: &'tcx hir::Expr<'tcx>) {
+ let is_named = oprnd.is_place_expr(|base| {
+ // Allow raw borrows if there are any deref adjustments.
+ //
+ // const VAL: (i32,) = (0,);
+ // const REF: &(i32,) = &(0,);
+ //
+ // &raw const VAL.0; // ERROR
+ // &raw const REF.0; // OK, same as &raw const (*REF).0;
+ //
+ // This is maybe too permissive, since it allows
+ // `let u = &raw const Box::new((1,)).0`, which creates an
+ // immediately dangling raw pointer.
+ self.typeck_results
+ .borrow()
+ .adjustments()
+ .get(base.hir_id)
+ .map_or(false, |x| x.iter().any(|adj| matches!(adj.kind, Adjust::Deref(_))))
+ });
+ if !is_named {
+ self.tcx.sess.emit_err(AddressOfTemporaryTaken { span: oprnd.span });
+ }
+ }
+
+ fn check_lang_item_path(
+ &self,
+ lang_item: hir::LangItem,
+ expr: &'tcx hir::Expr<'tcx>,
+ hir_id: Option<hir::HirId>,
+ ) -> Ty<'tcx> {
+ self.resolve_lang_item_path(lang_item, expr.span, expr.hir_id, hir_id).1
+ }
+
+ pub(crate) fn check_expr_path(
+ &self,
+ qpath: &'tcx hir::QPath<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let (res, opt_ty, segs) =
+ self.resolve_ty_and_res_fully_qualified_call(qpath, expr.hir_id, expr.span);
+ let ty = match res {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ tcx.ty_error()
+ }
+ Res::Def(DefKind::Ctor(_, CtorKind::Fictive), _) => {
+ report_unexpected_variant_res(tcx, res, qpath, expr.span);
+ tcx.ty_error()
+ }
+ _ => self.instantiate_value_path(segs, opt_ty, res, expr.span, expr.hir_id).0,
+ };
+
+ if let ty::FnDef(did, ..) = *ty.kind() {
+ let fn_sig = ty.fn_sig(tcx);
+ if tcx.fn_sig(did).abi() == RustIntrinsic && tcx.item_name(did) == sym::transmute {
+ let from = fn_sig.inputs().skip_binder()[0];
+ let to = fn_sig.output().skip_binder();
+ // We defer the transmute to the end of typeck, once all inference vars have
+ // been resolved or we errored. This is important as we can only check transmute
+ // on concrete types, but the output type may not be known yet (it would only
+ // be known if explicitly specified via turbofish).
+ self.deferred_transmute_checks.borrow_mut().push((from, to, expr.span));
+ }
+ if !tcx.features().unsized_fn_params {
+ // We want to remove some Sized bounds from std functions,
+ // but don't want to expose the removal to stable Rust.
+ // i.e., we don't want to allow
+ //
+ // ```rust
+ // drop as fn(str);
+ // ```
+ //
+ // to work in stable even if the Sized bound on `drop` is relaxed.
+ for i in 0..fn_sig.inputs().skip_binder().len() {
+ // We just want to check sizedness, so instead of introducing
+ // placeholder lifetimes with probing, we just replace higher lifetimes
+ // with fresh vars.
+ let span = args.get(i).map(|a| a.span).unwrap_or(expr.span);
+ let input = self.replace_bound_vars_with_fresh_vars(
+ span,
+ infer::LateBoundRegionConversionTime::FnCall,
+ fn_sig.input(i),
+ );
+ self.require_type_is_sized_deferred(
+ input,
+ span,
+ traits::SizedArgumentType(None),
+ );
+ }
+ }
+ // Here we want to prevent struct constructors from returning unsized types.
+ // There were two cases this happened: fn pointer coercion in stable
+ // and usual function call in presence of unsized_locals.
+ // Also, as we just want to check sizedness, instead of introducing
+ // placeholder lifetimes with probing, we just replace higher lifetimes
+ // with fresh vars.
+ let output = self.replace_bound_vars_with_fresh_vars(
+ expr.span,
+ infer::LateBoundRegionConversionTime::FnCall,
+ fn_sig.output(),
+ );
+ self.require_type_is_sized_deferred(output, expr.span, traits::SizedReturnType);
+ }
+
+ // We always require that the type provided as the value for
+ // a type parameter outlives the moment of instantiation.
+ let substs = self.typeck_results.borrow().node_substs(expr.hir_id);
+ self.add_wf_bounds(substs, expr);
+
+ ty
+ }
+
+ fn check_expr_break(
+ &self,
+ destination: hir::Destination,
+ expr_opt: Option<&'tcx hir::Expr<'tcx>>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ if let Ok(target_id) = destination.target_id {
+ let (e_ty, cause);
+ if let Some(e) = expr_opt {
+ // If this is a break with a value, we need to type-check
+ // the expression. Get an expected type from the loop context.
+ let opt_coerce_to = {
+ // We should release `enclosing_breakables` before the `check_expr_with_hint`
+ // below, so can't move this block of code to the enclosing scope and share
+ // `ctxt` with the second `enclosing_breakables` borrow below.
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ match enclosing_breakables.opt_find_breakable(target_id) {
+ Some(ctxt) => ctxt.coerce.as_ref().map(|coerce| coerce.expected_ty()),
+ None => {
+ // Avoid ICE when `break` is inside a closure (#65383).
+ return tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+ }
+ }
+ };
+
+ // If the loop context is not a `loop { }`, then break with
+ // a value is illegal, and `opt_coerce_to` will be `None`.
+ // Just set expectation to error in that case.
+ let coerce_to = opt_coerce_to.unwrap_or_else(|| tcx.ty_error());
+
+ // Recurse without `enclosing_breakables` borrowed.
+ e_ty = self.check_expr_with_hint(e, coerce_to);
+ cause = self.misc(e.span);
+ } else {
+ // Otherwise, this is a break *without* a value. That's
+ // always legal, and is equivalent to `break ()`.
+ e_ty = tcx.mk_unit();
+ cause = self.misc(expr.span);
+ }
+
+ // Now that we have type-checked `expr_opt`, borrow
+ // the `enclosing_loops` field and let's coerce the
+ // type of `expr_opt` into what is expected.
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ let Some(ctxt) = enclosing_breakables.opt_find_breakable(target_id) else {
+ // Avoid ICE when `break` is inside a closure (#65383).
+ return tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+ };
+
+ if let Some(ref mut coerce) = ctxt.coerce {
+ if let Some(ref e) = expr_opt {
+ coerce.coerce(self, &cause, e, e_ty);
+ } else {
+ assert!(e_ty.is_unit());
+ let ty = coerce.expected_ty();
+ coerce.coerce_forced_unit(
+ self,
+ &cause,
+ &mut |mut err| {
+ self.suggest_mismatched_types_on_tail(
+ &mut err, expr, ty, e_ty, target_id,
+ );
+ if let Some(val) = ty_kind_suggestion(ty) {
+ let label = destination
+ .label
+ .map(|l| format!(" {}", l.ident))
+ .unwrap_or_else(String::new);
+ err.span_suggestion(
+ expr.span,
+ "give it a value of the expected type",
+ format!("break{label} {val}"),
+ Applicability::HasPlaceholders,
+ );
+ }
+ },
+ false,
+ );
+ }
+ } else {
+ // If `ctxt.coerce` is `None`, we can just ignore
+ // the type of the expression. This is because
+ // either this was a break *without* a value, in
+ // which case it is always a legal type (`()`), or
+ // else an error would have been flagged by the
+ // `loops` pass for using break with an expression
+ // where you are not supposed to.
+ assert!(expr_opt.is_none() || self.tcx.sess.has_errors().is_some());
+ }
+
+ // If we encountered a `break`, then (no surprise) it may be possible to break from the
+ // loop... unless the value being returned from the loop diverges itself, e.g.
+ // `break return 5` or `break loop {}`.
+ ctxt.may_break |= !self.diverges.get().is_always();
+
+ // the type of a `break` is always `!`, since it diverges
+ tcx.types.never
+ } else {
+ // Otherwise, we failed to find the enclosing loop;
+ // this can only happen if the `break` was not
+ // inside a loop at all, which is caught by the
+ // loop-checking pass.
+ let err = self.tcx.ty_error_with_message(
+ expr.span,
+ "break was outside loop, but no error was emitted",
+ );
+
+ // We still need to assign a type to the inner expression to
+ // prevent the ICE in #43162.
+ if let Some(e) = expr_opt {
+ self.check_expr_with_hint(e, err);
+
+ // ... except when we try to 'break rust;'.
+ // ICE this expression in particular (see #43162).
+ if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind {
+ if path.segments.len() == 1 && path.segments[0].ident.name == sym::rust {
+ fatally_break_rust(self.tcx.sess);
+ }
+ }
+ }
+
+ // There was an error; make type-check fail.
+ err
+ }
+ }
+
+ fn check_expr_return(
+ &self,
+ expr_opt: Option<&'tcx hir::Expr<'tcx>>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ if self.ret_coercion.is_none() {
+ let mut err = ReturnStmtOutsideOfFnBody {
+ span: expr.span,
+ encl_body_span: None,
+ encl_fn_span: None,
+ };
+
+ let encl_item_id = self.tcx.hir().get_parent_item(expr.hir_id);
+
+ if let Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Fn(..),
+ span: encl_fn_span,
+ ..
+ }))
+ | Some(hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(_, hir::TraitFn::Provided(_)),
+ span: encl_fn_span,
+ ..
+ }))
+ | Some(hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Fn(..),
+ span: encl_fn_span,
+ ..
+ })) = self.tcx.hir().find_by_def_id(encl_item_id)
+ {
+ // We are inside a function body, so reporting "return statement
+ // outside of function body" needs an explanation.
+
+ let encl_body_owner_id = self.tcx.hir().enclosing_body_owner(expr.hir_id);
+
+ // If this didn't hold, we would not have to report an error in
+ // the first place.
+ assert_ne!(encl_item_id, encl_body_owner_id);
+
+ let encl_body_id = self.tcx.hir().body_owned_by(encl_body_owner_id);
+ let encl_body = self.tcx.hir().body(encl_body_id);
+
+ err.encl_body_span = Some(encl_body.value.span);
+ err.encl_fn_span = Some(*encl_fn_span);
+ }
+
+ self.tcx.sess.emit_err(err);
+
+ if let Some(e) = expr_opt {
+ // We still have to type-check `e` (issue #86188), but calling
+ // `check_return_expr` only works inside fn bodies.
+ self.check_expr(e);
+ }
+ } else if let Some(e) = expr_opt {
+ if self.ret_coercion_span.get().is_none() {
+ self.ret_coercion_span.set(Some(e.span));
+ }
+ self.check_return_expr(e, true);
+ } else {
+ let mut coercion = self.ret_coercion.as_ref().unwrap().borrow_mut();
+ if self.ret_coercion_span.get().is_none() {
+ self.ret_coercion_span.set(Some(expr.span));
+ }
+ let cause = self.cause(expr.span, ObligationCauseCode::ReturnNoExpression);
+ if let Some((fn_decl, _)) = self.get_fn_decl(expr.hir_id) {
+ coercion.coerce_forced_unit(
+ self,
+ &cause,
+ &mut |db| {
+ let span = fn_decl.output.span();
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span) {
+ db.span_label(
+ span,
+ format!("expected `{snippet}` because of this return type"),
+ );
+ }
+ },
+ true,
+ );
+ } else {
+ coercion.coerce_forced_unit(self, &cause, &mut |_| (), true);
+ }
+ }
+ self.tcx.types.never
+ }
+
+ /// `explicit_return` is `true` if we're checking an explicit `return expr`,
+ /// and `false` if we're checking a trailing expression.
+ pub(super) fn check_return_expr(
+ &self,
+ return_expr: &'tcx hir::Expr<'tcx>,
+ explicit_return: bool,
+ ) {
+ let ret_coercion = self.ret_coercion.as_ref().unwrap_or_else(|| {
+ span_bug!(return_expr.span, "check_return_expr called outside fn body")
+ });
+
+ let ret_ty = ret_coercion.borrow().expected_ty();
+ let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty);
+ let mut span = return_expr.span;
+ // Use the span of the trailing expression for our cause,
+ // not the span of the entire function
+ if !explicit_return {
+ if let ExprKind::Block(body, _) = return_expr.kind && let Some(last_expr) = body.expr {
+ span = last_expr.span;
+ }
+ }
+ ret_coercion.borrow_mut().coerce(
+ self,
+ &self.cause(span, ObligationCauseCode::ReturnValue(return_expr.hir_id)),
+ return_expr,
+ return_expr_ty,
+ );
+
+ if self.return_type_has_opaque {
+ // Point any obligations that were registered due to opaque type
+ // inference at the return expression.
+ self.select_obligations_where_possible(false, |errors| {
+ self.point_at_return_for_opaque_ty_error(errors, span, return_expr_ty);
+ });
+ }
+ }
+
+ fn point_at_return_for_opaque_ty_error(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ span: Span,
+ return_expr_ty: Ty<'tcx>,
+ ) {
+ // Don't point at the whole block if it's empty
+ if span == self.tcx.hir().span(self.body_id) {
+ return;
+ }
+ for err in errors {
+ let cause = &mut err.obligation.cause;
+ if let ObligationCauseCode::OpaqueReturnType(None) = cause.code() {
+ let new_cause = ObligationCause::new(
+ cause.span,
+ cause.body_id,
+ ObligationCauseCode::OpaqueReturnType(Some((return_expr_ty, span))),
+ );
+ *cause = new_cause;
+ }
+ }
+ }
+
+ pub(crate) fn check_lhs_assignable(
+ &self,
+ lhs: &'tcx hir::Expr<'tcx>,
+ err_code: &'static str,
+ op_span: Span,
+ adjust_err: impl FnOnce(&mut DiagnosticBuilder<'tcx, ErrorGuaranteed>),
+ ) {
+ if lhs.is_syntactic_place_expr() {
+ return;
+ }
+
+ // FIXME: Make this use SessionDiagnostic once error codes can be dynamically set.
+ let mut err = self.tcx.sess.struct_span_err_with_code(
+ op_span,
+ "invalid left-hand side of assignment",
+ DiagnosticId::Error(err_code.into()),
+ );
+ err.span_label(lhs.span, "cannot assign to this expression");
+
+ self.comes_from_while_condition(lhs.hir_id, |expr| {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "you might have meant to use pattern destructuring",
+ "let ",
+ Applicability::MachineApplicable,
+ );
+ });
+
+ adjust_err(&mut err);
+
+ err.emit();
+ }
+
+ // Check if an expression `original_expr_id` comes from the condition of a while loop,
+ // as opposed from the body of a while loop, which we can naively check by iterating
+ // parents until we find a loop...
+ pub(super) fn comes_from_while_condition(
+ &self,
+ original_expr_id: HirId,
+ then: impl FnOnce(&hir::Expr<'_>),
+ ) {
+ let mut parent = self.tcx.hir().get_parent_node(original_expr_id);
+ while let Some(node) = self.tcx.hir().find(parent) {
+ match node {
+ hir::Node::Expr(hir::Expr {
+ kind:
+ hir::ExprKind::Loop(
+ hir::Block {
+ expr:
+ Some(hir::Expr {
+ kind:
+ hir::ExprKind::Match(expr, ..) | hir::ExprKind::If(expr, ..),
+ ..
+ }),
+ ..
+ },
+ _,
+ hir::LoopSource::While,
+ _,
+ ),
+ ..
+ }) => {
+ // Check if our original expression is a child of the condition of a while loop
+ let expr_is_ancestor = std::iter::successors(Some(original_expr_id), |id| {
+ self.tcx.hir().find_parent_node(*id)
+ })
+ .take_while(|id| *id != parent)
+ .any(|id| id == expr.hir_id);
+ // if it is, then we have a situation like `while Some(0) = value.get(0) {`,
+ // where `while let` was more likely intended.
+ if expr_is_ancestor {
+ then(expr);
+ }
+ break;
+ }
+ hir::Node::Item(_)
+ | hir::Node::ImplItem(_)
+ | hir::Node::TraitItem(_)
+ | hir::Node::Crate(_) => break,
+ _ => {
+ parent = self.tcx.hir().get_parent_node(parent);
+ }
+ }
+ }
+ }
+
+ // A generic function for checking the 'then' and 'else' clauses in an 'if'
+ // or 'if-else' expression.
+ fn check_then_else(
+ &self,
+ cond_expr: &'tcx hir::Expr<'tcx>,
+ then_expr: &'tcx hir::Expr<'tcx>,
+ opt_else_expr: Option<&'tcx hir::Expr<'tcx>>,
+ sp: Span,
+ orig_expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let cond_ty = self.check_expr_has_type_or_error(cond_expr, self.tcx.types.bool, |_| {});
+
+ self.warn_if_unreachable(
+ cond_expr.hir_id,
+ then_expr.span,
+ "block in `if` or `while` expression",
+ );
+
+ let cond_diverges = self.diverges.get();
+ self.diverges.set(Diverges::Maybe);
+
+ let expected = orig_expected.adjust_for_branches(self);
+ let then_ty = self.check_expr_with_expectation(then_expr, expected);
+ let then_diverges = self.diverges.get();
+ self.diverges.set(Diverges::Maybe);
+
+ // We've already taken the expected type's preferences
+ // into account when typing the `then` branch. To figure
+ // out the initial shot at a LUB, we thus only consider
+ // `expected` if it represents a *hard* constraint
+ // (`only_has_type`); otherwise, we just go with a
+ // fresh type variable.
+ let coerce_to_ty = expected.coercion_target_type(self, sp);
+ let mut coerce: DynamicCoerceMany<'_> = CoerceMany::new(coerce_to_ty);
+
+ coerce.coerce(self, &self.misc(sp), then_expr, then_ty);
+
+ if let Some(else_expr) = opt_else_expr {
+ let else_ty = self.check_expr_with_expectation(else_expr, expected);
+ let else_diverges = self.diverges.get();
+
+ let opt_suggest_box_span = self.opt_suggest_box_span(else_ty, orig_expected);
+ let if_cause = self.if_cause(
+ sp,
+ cond_expr.span,
+ then_expr,
+ else_expr,
+ then_ty,
+ else_ty,
+ opt_suggest_box_span,
+ );
+
+ coerce.coerce(self, &if_cause, else_expr, else_ty);
+
+ // We won't diverge unless both branches do (or the condition does).
+ self.diverges.set(cond_diverges | then_diverges & else_diverges);
+ } else {
+ self.if_fallback_coercion(sp, then_expr, &mut coerce);
+
+ // If the condition is false we can't diverge.
+ self.diverges.set(cond_diverges);
+ }
+
+ let result_ty = coerce.complete(self);
+ if cond_ty.references_error() { self.tcx.ty_error() } else { result_ty }
+ }
+
+ /// Type check assignment expression `expr` of form `lhs = rhs`.
+ /// The expected type is `()` and is passed to the function for the purposes of diagnostics.
+ fn check_expr_assign(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ lhs: &'tcx hir::Expr<'tcx>,
+ rhs: &'tcx hir::Expr<'tcx>,
+ span: Span,
+ ) -> Ty<'tcx> {
+ let expected_ty = expected.coercion_target_type(self, expr.span);
+ if expected_ty == self.tcx.types.bool {
+ // The expected type is `bool` but this will result in `()` so we can reasonably
+ // say that the user intended to write `lhs == rhs` instead of `lhs = rhs`.
+ // The likely cause of this is `if foo = bar { .. }`.
+ let actual_ty = self.tcx.mk_unit();
+ let mut err = self.demand_suptype_diag(expr.span, expected_ty, actual_ty).unwrap();
+ let lhs_ty = self.check_expr(&lhs);
+ let rhs_ty = self.check_expr(&rhs);
+ let (applicability, eq) = if self.can_coerce(rhs_ty, lhs_ty) {
+ (Applicability::MachineApplicable, true)
+ } else {
+ (Applicability::MaybeIncorrect, false)
+ };
+ if !lhs.is_syntactic_place_expr()
+ && lhs.is_approximately_pattern()
+ && !matches!(lhs.kind, hir::ExprKind::Lit(_))
+ {
+ // Do not suggest `if let x = y` as `==` is way more likely to be the intention.
+ let hir = self.tcx.hir();
+ if let hir::Node::Expr(hir::Expr { kind: ExprKind::If { .. }, .. }) =
+ hir.get(hir.get_parent_node(hir.get_parent_node(expr.hir_id)))
+ {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_lo(),
+ "you might have meant to use pattern matching",
+ "let ",
+ applicability,
+ );
+ };
+ }
+ if eq {
+ err.span_suggestion_verbose(
+ span,
+ "you might have meant to compare for equality",
+ "==",
+ applicability,
+ );
+ }
+
+ // If the assignment expression itself is ill-formed, don't
+ // bother emitting another error
+ if lhs_ty.references_error() || rhs_ty.references_error() {
+ err.delay_as_bug()
+ } else {
+ err.emit();
+ }
+ return self.tcx.ty_error();
+ }
+
+ let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace);
+
+ let suggest_deref_binop = |err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ rhs_ty: Ty<'tcx>| {
+ if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
+ // Can only assign if the type is sized, so if `DerefMut` yields a type that is
+ // unsized, do not suggest dereferencing it.
+ let lhs_deref_ty_is_sized = self
+ .infcx
+ .type_implements_trait(
+ self.tcx.lang_items().sized_trait().unwrap(),
+ lhs_deref_ty,
+ ty::List::empty(),
+ self.param_env,
+ )
+ .may_apply();
+ if lhs_deref_ty_is_sized && self.can_coerce(rhs_ty, lhs_deref_ty) {
+ err.span_suggestion_verbose(
+ lhs.span.shrink_to_lo(),
+ "consider dereferencing here to assign to the mutably borrowed value",
+ "*",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ };
+
+ self.check_lhs_assignable(lhs, "E0070", span, |err| {
+ let rhs_ty = self.check_expr(&rhs);
+ suggest_deref_binop(err, rhs_ty);
+ });
+
+ // This is (basically) inlined `check_expr_coercable_to_type`, but we want
+ // to suggest an additional fixup here in `suggest_deref_binop`.
+ let rhs_ty = self.check_expr_with_hint(&rhs, lhs_ty);
+ if let (_, Some(mut diag)) =
+ self.demand_coerce_diag(rhs, rhs_ty, lhs_ty, Some(lhs), AllowTwoPhase::No)
+ {
+ suggest_deref_binop(&mut diag, rhs_ty);
+ diag.emit();
+ }
+
+ self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized);
+
+ if lhs_ty.references_error() || rhs_ty.references_error() {
+ self.tcx.ty_error()
+ } else {
+ self.tcx.mk_unit()
+ }
+ }
+
+ pub(super) fn check_expr_let(&self, let_expr: &'tcx hir::Let<'tcx>) -> Ty<'tcx> {
+ // for let statements, this is done in check_stmt
+ let init = let_expr.init;
+ self.warn_if_unreachable(init.hir_id, init.span, "block in `let` expression");
+ // otherwise check exactly as a let statement
+ self.check_decl(let_expr.into());
+ // but return a bool, for this is a boolean expression
+ self.tcx.types.bool
+ }
+
+ fn check_expr_loop(
+ &self,
+ body: &'tcx hir::Block<'tcx>,
+ source: hir::LoopSource,
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let coerce = match source {
+ // you can only use break with a value from a normal `loop { }`
+ hir::LoopSource::Loop => {
+ let coerce_to = expected.coercion_target_type(self, body.span);
+ Some(CoerceMany::new(coerce_to))
+ }
+
+ hir::LoopSource::While | hir::LoopSource::ForLoop => None,
+ };
+
+ let ctxt = BreakableCtxt {
+ coerce,
+ may_break: false, // Will get updated if/when we find a `break`.
+ };
+
+ let (ctxt, ()) = self.with_breakable_ctxt(expr.hir_id, ctxt, || {
+ self.check_block_no_value(&body);
+ });
+
+ if ctxt.may_break {
+ // No way to know whether it's diverging because
+ // of a `break` or an outer `break` or `return`.
+ self.diverges.set(Diverges::Maybe);
+ }
+
+ // If we permit break with a value, then result type is
+ // the LUB of the breaks (possibly ! if none); else, it
+ // is nil. This makes sense because infinite loops
+ // (which would have type !) are only possible iff we
+ // permit break with a value [1].
+ if ctxt.coerce.is_none() && !ctxt.may_break {
+ // [1]
+ self.tcx.sess.delay_span_bug(body.span, "no coercion, but loop may not break");
+ }
+ ctxt.coerce.map(|c| c.complete(self)).unwrap_or_else(|| self.tcx.mk_unit())
+ }
+
+ /// Checks a method call.
+ fn check_method_call(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ args: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let rcvr = &args[0];
+ let rcvr_t = self.check_expr(&rcvr);
+ // no need to check for bot/err -- callee does that
+ let rcvr_t = self.structurally_resolved_type(args[0].span, rcvr_t);
+ let span = segment.ident.span;
+
+ let method = match self.lookup_method(rcvr_t, segment, span, expr, rcvr, args) {
+ Ok(method) => {
+ // We could add a "consider `foo::<params>`" suggestion here, but I wasn't able to
+ // trigger this codepath causing `structurally_resolved_type` to emit an error.
+
+ self.write_method_call(expr.hir_id, method);
+ Ok(method)
+ }
+ Err(error) => {
+ if segment.ident.name != kw::Empty {
+ if let Some(mut err) = self.report_method_error(
+ span,
+ rcvr_t,
+ segment.ident,
+ SelfSource::MethodCall(&args[0]),
+ error,
+ Some(args),
+ ) {
+ err.emit();
+ }
+ }
+ Err(())
+ }
+ };
+
+ // Call the generic checker.
+ self.check_method_argument_types(
+ span,
+ expr,
+ method,
+ &args[1..],
+ DontTupleArguments,
+ expected,
+ )
+ }
+
+ fn check_expr_cast(
+ &self,
+ e: &'tcx hir::Expr<'tcx>,
+ t: &'tcx hir::Ty<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ // Find the type of `e`. Supply hints based on the type we are casting to,
+ // if appropriate.
+ let t_cast = self.to_ty_saving_user_provided_ty(t);
+ let t_cast = self.resolve_vars_if_possible(t_cast);
+ let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast));
+ let t_expr = self.resolve_vars_if_possible(t_expr);
+
+ // Eagerly check for some obvious errors.
+ if t_expr.references_error() || t_cast.references_error() {
+ self.tcx.ty_error()
+ } else {
+ // Defer other checks until we're done type checking.
+ let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
+ match cast::CastCheck::new(self, e, t_expr, t_cast, t.span, expr.span) {
+ Ok(cast_check) => {
+ debug!(
+ "check_expr_cast: deferring cast from {:?} to {:?}: {:?}",
+ t_cast, t_expr, cast_check,
+ );
+ deferred_cast_checks.push(cast_check);
+ t_cast
+ }
+ Err(_) => self.tcx.ty_error(),
+ }
+ }
+ }
+
+ fn check_expr_array(
+ &self,
+ args: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let element_ty = if !args.is_empty() {
+ let coerce_to = expected
+ .to_option(self)
+ .and_then(|uty| match *uty.kind() {
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
+ _ => None,
+ })
+ .unwrap_or_else(|| {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: expr.span,
+ })
+ });
+ let mut coerce = CoerceMany::with_coercion_sites(coerce_to, args);
+ assert_eq!(self.diverges.get(), Diverges::Maybe);
+ for e in args {
+ let e_ty = self.check_expr_with_hint(e, coerce_to);
+ let cause = self.misc(e.span);
+ coerce.coerce(self, &cause, e, e_ty);
+ }
+ coerce.complete(self)
+ } else {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: expr.span,
+ })
+ };
+ self.tcx.mk_array(element_ty, args.len() as u64)
+ }
+
+ fn check_expr_const_block(
+ &self,
+ anon_const: &'tcx hir::AnonConst,
+ expected: Expectation<'tcx>,
+ _expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let body = self.tcx.hir().body(anon_const.body);
+
+ // Create a new function context.
+ let fcx = FnCtxt::new(self, self.param_env.with_const(), body.value.hir_id);
+ crate::check::GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ let ty = fcx.check_expr_with_expectation(&body.value, expected);
+ fcx.require_type_is_sized(ty, body.value.span, traits::ConstSized);
+ fcx.write_ty(anon_const.hir_id, ty);
+ ty
+ }
+
+ fn check_expr_repeat(
+ &self,
+ element: &'tcx hir::Expr<'tcx>,
+ count: &'tcx hir::ArrayLen,
+ expected: Expectation<'tcx>,
+ _expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let count = self.array_length_to_const(count);
+
+ let uty = match expected {
+ ExpectHasType(uty) => match *uty.kind() {
+ ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let (element_ty, t) = match uty {
+ Some(uty) => {
+ self.check_expr_coercable_to_type(&element, uty, None);
+ (uty, uty)
+ }
+ None => {
+ let ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: element.span,
+ });
+ let element_ty = self.check_expr_has_type_or_error(&element, ty, |_| {});
+ (element_ty, ty)
+ }
+ };
+
+ if element_ty.references_error() {
+ return tcx.ty_error();
+ }
+
+ self.check_repeat_element_needs_copy_bound(element, count, element_ty);
+
+ tcx.mk_ty(ty::Array(t, count))
+ }
+
+ fn check_repeat_element_needs_copy_bound(
+ &self,
+ element: &hir::Expr<'_>,
+ count: ty::Const<'tcx>,
+ element_ty: Ty<'tcx>,
+ ) {
+ let tcx = self.tcx;
+ // Actual constants as the repeat element get inserted repeatedly instead of getting copied via Copy.
+ match &element.kind {
+ hir::ExprKind::ConstBlock(..) => return,
+ hir::ExprKind::Path(qpath) => {
+ let res = self.typeck_results.borrow().qpath_res(qpath, element.hir_id);
+ if let Res::Def(DefKind::Const | DefKind::AssocConst | DefKind::AnonConst, _) = res
+ {
+ return;
+ }
+ }
+ _ => {}
+ }
+ // If someone calls a const fn, they can extract that call out into a separate constant (or a const
+ // block in the future), so we check that to tell them that in the diagnostic. Does not affect typeck.
+ let is_const_fn = match element.kind {
+ hir::ExprKind::Call(func, _args) => match *self.node_ty(func.hir_id).kind() {
+ ty::FnDef(def_id, _) => tcx.is_const_fn(def_id),
+ _ => false,
+ },
+ _ => false,
+ };
+
+ // If the length is 0, we don't create any elements, so we don't copy any. If the length is 1, we
+ // don't copy that one element, we move it. Only check for Copy if the length is larger.
+ if count.try_eval_usize(tcx, self.param_env).map_or(true, |len| len > 1) {
+ let lang_item = self.tcx.require_lang_item(LangItem::Copy, None);
+ let code = traits::ObligationCauseCode::RepeatElementCopy { is_const_fn };
+ self.require_type_meets(element_ty, element.span, code, lang_item);
+ }
+ }
+
+ fn check_expr_tuple(
+ &self,
+ elts: &'tcx [hir::Expr<'tcx>],
+ expected: Expectation<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let flds = expected.only_has_type(self).and_then(|ty| {
+ let ty = self.resolve_vars_with_obligations(ty);
+ match ty.kind() {
+ ty::Tuple(flds) => Some(&flds[..]),
+ _ => None,
+ }
+ });
+
+ let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| match flds {
+ Some(fs) if i < fs.len() => {
+ let ety = fs[i];
+ self.check_expr_coercable_to_type(&e, ety, None);
+ ety
+ }
+ _ => self.check_expr_with_expectation(&e, NoExpectation),
+ });
+ let tuple = self.tcx.mk_tup(elt_ts_iter);
+ if tuple.references_error() {
+ self.tcx.ty_error()
+ } else {
+ self.require_type_is_sized(tuple, expr.span, traits::TupleInitializerSized);
+ tuple
+ }
+ }
+
+ fn check_expr_struct(
+ &self,
+ expr: &hir::Expr<'_>,
+ expected: Expectation<'tcx>,
+ qpath: &QPath<'_>,
+ fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ ) -> Ty<'tcx> {
+ // Find the relevant variant
+ let Some((variant, adt_ty)) = self.check_struct_path(qpath, expr.hir_id) else {
+ self.check_struct_fields_on_error(fields, base_expr);
+ return self.tcx.ty_error();
+ };
+
+ // Prohibit struct expressions when non-exhaustive flag is set.
+ let adt = adt_ty.ty_adt_def().expect("`check_struct_path` returned non-ADT type");
+ if !adt.did().is_local() && variant.is_field_list_non_exhaustive() {
+ self.tcx
+ .sess
+ .emit_err(StructExprNonExhaustive { span: expr.span, what: adt.variant_descr() });
+ }
+
+ self.check_expr_struct_fields(
+ adt_ty,
+ expected,
+ expr.hir_id,
+ qpath.span(),
+ variant,
+ fields,
+ base_expr,
+ expr.span,
+ );
+
+ self.require_type_is_sized(adt_ty, expr.span, traits::StructInitializerSized);
+ adt_ty
+ }
+
+ fn check_expr_struct_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ expected: Expectation<'tcx>,
+ expr_id: hir::HirId,
+ span: Span,
+ variant: &'tcx ty::VariantDef,
+ ast_fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ expr_span: Span,
+ ) {
+ let tcx = self.tcx;
+
+ let expected_inputs =
+ self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty]);
+ let adt_ty_hint = if let Some(expected_inputs) = expected_inputs {
+ expected_inputs.get(0).cloned().unwrap_or(adt_ty)
+ } else {
+ adt_ty
+ };
+ // re-link the regions that EIfEO can erase.
+ self.demand_eqtype(span, adt_ty_hint, adt_ty);
+
+ let ty::Adt(adt, substs) = adt_ty.kind() else {
+ span_bug!(span, "non-ADT passed to check_expr_struct_fields");
+ };
+ let adt_kind = adt.adt_kind();
+
+ let mut remaining_fields = variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, field)| (field.ident(tcx).normalize_to_macros_2_0(), (i, field)))
+ .collect::<FxHashMap<_, _>>();
+
+ let mut seen_fields = FxHashMap::default();
+
+ let mut error_happened = false;
+
+ // Type-check each field.
+ for field in ast_fields {
+ let ident = tcx.adjust_ident(field.ident, variant.def_id);
+ let field_type = if let Some((i, v_field)) = remaining_fields.remove(&ident) {
+ seen_fields.insert(ident, field.span);
+ self.write_field_index(field.hir_id, i);
+
+ // We don't look at stability attributes on
+ // struct-like enums (yet...), but it's definitely not
+ // a bug to have constructed one.
+ if adt_kind != AdtKind::Enum {
+ tcx.check_stability(v_field.did, Some(expr_id), field.span, None);
+ }
+
+ self.field_ty(field.span, v_field, substs)
+ } else {
+ error_happened = true;
+ if let Some(prev_span) = seen_fields.get(&ident) {
+ tcx.sess.emit_err(FieldMultiplySpecifiedInInitializer {
+ span: field.ident.span,
+ prev_span: *prev_span,
+ ident,
+ });
+ } else {
+ self.report_unknown_field(
+ adt_ty,
+ variant,
+ field,
+ ast_fields,
+ adt.variant_descr(),
+ expr_span,
+ );
+ }
+
+ tcx.ty_error()
+ };
+
+ // Make sure to give a type to the field even if there's
+ // an error, so we can continue type-checking.
+ self.check_expr_coercable_to_type(&field.expr, field_type, None);
+ }
+
+ // Make sure the programmer specified correct number of fields.
+ if adt_kind == AdtKind::Union {
+ if ast_fields.len() != 1 {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0784,
+ "union expressions should have exactly one field",
+ )
+ .emit();
+ }
+ }
+
+ // If check_expr_struct_fields hit an error, do not attempt to populate
+ // the fields with the base_expr. This could cause us to hit errors later
+ // when certain fields are assumed to exist that in fact do not.
+ if error_happened {
+ return;
+ }
+
+ if let Some(base_expr) = base_expr {
+ // FIXME: We are currently creating two branches here in order to maintain
+ // consistency. But they should be merged as much as possible.
+ let fru_tys = if self.tcx.features().type_changing_struct_update {
+ if adt.is_struct() {
+ // Make some fresh substitutions for our ADT type.
+ let fresh_substs = self.fresh_substs_for_item(base_expr.span, adt.did());
+ // We do subtyping on the FRU fields first, so we can
+ // learn exactly what types we expect the base expr
+ // needs constrained to be compatible with the struct
+ // type we expect from the expectation value.
+ let fru_tys = variant
+ .fields
+ .iter()
+ .map(|f| {
+ let fru_ty = self.normalize_associated_types_in(
+ expr_span,
+ self.field_ty(base_expr.span, f, fresh_substs),
+ );
+ let ident = self.tcx.adjust_ident(f.ident(self.tcx), variant.def_id);
+ if let Some(_) = remaining_fields.remove(&ident) {
+ let target_ty = self.field_ty(base_expr.span, f, substs);
+ let cause = self.misc(base_expr.span);
+ match self.at(&cause, self.param_env).sup(target_ty, fru_ty) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations)
+ }
+ Err(_) => {
+ // This should never happen, since we're just subtyping the
+ // remaining_fields, but it's fine to emit this, I guess.
+ self.report_mismatched_types(
+ &cause,
+ target_ty,
+ fru_ty,
+ FieldMisMatch(variant.name, ident.name),
+ )
+ .emit();
+ }
+ }
+ }
+ self.resolve_vars_if_possible(fru_ty)
+ })
+ .collect();
+ // The use of fresh substs that we have subtyped against
+ // our base ADT type's fields allows us to guide inference
+ // along so that, e.g.
+ // ```
+ // MyStruct<'a, F1, F2, const C: usize> {
+ // f: F1,
+ // // Other fields that reference `'a`, `F2`, and `C`
+ // }
+ //
+ // let x = MyStruct {
+ // f: 1usize,
+ // ..other_struct
+ // };
+ // ```
+ // will have the `other_struct` expression constrained to
+ // `MyStruct<'a, _, F2, C>`, as opposed to just `_`...
+ // This is important to allow coercions to happen in
+ // `other_struct` itself. See `coerce-in-base-expr.rs`.
+ let fresh_base_ty = self.tcx.mk_adt(*adt, fresh_substs);
+ self.check_expr_has_type_or_error(
+ base_expr,
+ self.resolve_vars_if_possible(fresh_base_ty),
+ |_| {},
+ );
+ fru_tys
+ } else {
+ // Check the base_expr, regardless of a bad expected adt_ty, so we can get
+ // type errors on that expression, too.
+ self.check_expr(base_expr);
+ self.tcx
+ .sess
+ .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span });
+ return;
+ }
+ } else {
+ self.check_expr_has_type_or_error(base_expr, adt_ty, |_| {
+ let base_ty = self.typeck_results.borrow().expr_ty(*base_expr);
+ let same_adt = match (adt_ty.kind(), base_ty.kind()) {
+ (ty::Adt(adt, _), ty::Adt(base_adt, _)) if adt == base_adt => true,
+ _ => false,
+ };
+ if self.tcx.sess.is_nightly_build() && same_adt {
+ feature_err(
+ &self.tcx.sess.parse_sess,
+ sym::type_changing_struct_update,
+ base_expr.span,
+ "type changing struct updating is experimental",
+ )
+ .emit();
+ }
+ });
+ match adt_ty.kind() {
+ ty::Adt(adt, substs) if adt.is_struct() => variant
+ .fields
+ .iter()
+ .map(|f| {
+ self.normalize_associated_types_in(expr_span, f.ty(self.tcx, substs))
+ })
+ .collect(),
+ _ => {
+ self.tcx
+ .sess
+ .emit_err(FunctionalRecordUpdateOnNonStruct { span: base_expr.span });
+ return;
+ }
+ }
+ };
+ self.typeck_results.borrow_mut().fru_field_types_mut().insert(expr_id, fru_tys);
+ } else if adt_kind != AdtKind::Union && !remaining_fields.is_empty() {
+ debug!(?remaining_fields);
+ let private_fields: Vec<&ty::FieldDef> = variant
+ .fields
+ .iter()
+ .filter(|field| {
+ !field.vis.is_accessible_from(tcx.parent_module(expr_id).to_def_id(), tcx)
+ })
+ .collect();
+
+ if !private_fields.is_empty() {
+ self.report_private_fields(adt_ty, span, private_fields, ast_fields);
+ } else {
+ self.report_missing_fields(
+ adt_ty,
+ span,
+ remaining_fields,
+ variant,
+ ast_fields,
+ substs,
+ );
+ }
+ }
+ }
+
+ fn check_struct_fields_on_error(
+ &self,
+ fields: &'tcx [hir::ExprField<'tcx>],
+ base_expr: &'tcx Option<&'tcx hir::Expr<'tcx>>,
+ ) {
+ for field in fields {
+ self.check_expr(&field.expr);
+ }
+ if let Some(base) = *base_expr {
+ self.check_expr(&base);
+ }
+ }
+
+ /// Report an error for a struct field expression when there are fields which aren't provided.
+ ///
+ /// ```text
+ /// error: missing field `you_can_use_this_field` in initializer of `foo::Foo`
+ /// --> src/main.rs:8:5
+ /// |
+ /// 8 | foo::Foo {};
+ /// | ^^^^^^^^ missing `you_can_use_this_field`
+ ///
+ /// error: aborting due to previous error
+ /// ```
+ fn report_missing_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ span: Span,
+ remaining_fields: FxHashMap<Ident, (usize, &ty::FieldDef)>,
+ variant: &'tcx ty::VariantDef,
+ ast_fields: &'tcx [hir::ExprField<'tcx>],
+ substs: SubstsRef<'tcx>,
+ ) {
+ let len = remaining_fields.len();
+
+ let mut displayable_field_names: Vec<&str> =
+ remaining_fields.keys().map(|ident| ident.as_str()).collect();
+ // sorting &str primitives here, sort_unstable is ok
+ displayable_field_names.sort_unstable();
+
+ let mut truncated_fields_error = String::new();
+ let remaining_fields_names = match &displayable_field_names[..] {
+ [field1] => format!("`{}`", field1),
+ [field1, field2] => format!("`{field1}` and `{field2}`"),
+ [field1, field2, field3] => format!("`{field1}`, `{field2}` and `{field3}`"),
+ _ => {
+ truncated_fields_error =
+ format!(" and {} other field{}", len - 3, pluralize!(len - 3));
+ displayable_field_names
+ .iter()
+ .take(3)
+ .map(|n| format!("`{n}`"))
+ .collect::<Vec<_>>()
+ .join(", ")
+ }
+ };
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0063,
+ "missing field{} {}{} in initializer of `{}`",
+ pluralize!(len),
+ remaining_fields_names,
+ truncated_fields_error,
+ adt_ty
+ );
+ err.span_label(span, format!("missing {remaining_fields_names}{truncated_fields_error}"));
+
+ // If the last field is a range literal, but it isn't supposed to be, then they probably
+ // meant to use functional update syntax.
+ //
+ // I don't use 'is_range_literal' because only double-sided, half-open ranges count.
+ if let Some((
+ last,
+ ExprKind::Struct(
+ QPath::LangItem(LangItem::Range, ..),
+ &[ref range_start, ref range_end],
+ _,
+ ),
+ )) = ast_fields.last().map(|last| (last, &last.expr.kind)) &&
+ let variant_field =
+ variant.fields.iter().find(|field| field.ident(self.tcx) == last.ident) &&
+ let range_def_id = self.tcx.lang_items().range_struct() &&
+ variant_field
+ .and_then(|field| field.ty(self.tcx, substs).ty_adt_def())
+ .map(|adt| adt.did())
+ != range_def_id
+ {
+ let instead = self
+ .tcx
+ .sess
+ .source_map()
+ .span_to_snippet(range_end.expr.span)
+ .map(|s| format!(" from `{s}`"))
+ .unwrap_or_default();
+ err.span_suggestion(
+ range_start.span.shrink_to_hi(),
+ &format!("to set the remaining fields{instead}, separate the last named field with a comma"),
+ ",",
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ err.emit();
+ }
+
+ /// Report an error for a struct field expression when there are invisible fields.
+ ///
+ /// ```text
+ /// error: cannot construct `Foo` with struct literal syntax due to private fields
+ /// --> src/main.rs:8:5
+ /// |
+ /// 8 | foo::Foo {};
+ /// | ^^^^^^^^
+ ///
+ /// error: aborting due to previous error
+ /// ```
+ fn report_private_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ span: Span,
+ private_fields: Vec<&ty::FieldDef>,
+ used_fields: &'tcx [hir::ExprField<'tcx>],
+ ) {
+ let mut err = self.tcx.sess.struct_span_err(
+ span,
+ &format!(
+ "cannot construct `{adt_ty}` with struct literal syntax due to private fields",
+ ),
+ );
+ let (used_private_fields, remaining_private_fields): (
+ Vec<(Symbol, Span, bool)>,
+ Vec<(Symbol, Span, bool)>,
+ ) = private_fields
+ .iter()
+ .map(|field| {
+ match used_fields.iter().find(|used_field| field.name == used_field.ident.name) {
+ Some(used_field) => (field.name, used_field.span, true),
+ None => (field.name, self.tcx.def_span(field.did), false),
+ }
+ })
+ .partition(|field| field.2);
+ err.span_labels(used_private_fields.iter().map(|(_, span, _)| *span), "private field");
+ if !remaining_private_fields.is_empty() {
+ let remaining_private_fields_len = remaining_private_fields.len();
+ let names = match &remaining_private_fields
+ .iter()
+ .map(|(name, _, _)| name)
+ .collect::<Vec<_>>()[..]
+ {
+ _ if remaining_private_fields_len > 6 => String::new(),
+ [name] => format!("`{name}` "),
+ [names @ .., last] => {
+ let names = names.iter().map(|name| format!("`{name}`")).collect::<Vec<_>>();
+ format!("{} and `{last}` ", names.join(", "))
+ }
+ [] => unreachable!(),
+ };
+ err.note(format!(
+ "... and other private field{s} {names}that {were} not provided",
+ s = pluralize!(remaining_private_fields_len),
+ were = pluralize!("was", remaining_private_fields_len),
+ ));
+ }
+ err.emit();
+ }
+
+ fn report_unknown_field(
+ &self,
+ ty: Ty<'tcx>,
+ variant: &'tcx ty::VariantDef,
+ field: &hir::ExprField<'_>,
+ skip_fields: &[hir::ExprField<'_>],
+ kind_name: &str,
+ expr_span: Span,
+ ) {
+ if variant.is_recovered() {
+ self.set_tainted_by_errors();
+ return;
+ }
+ let mut err = self.type_error_struct_with_diag(
+ field.ident.span,
+ |actual| match ty.kind() {
+ ty::Adt(adt, ..) if adt.is_enum() => struct_span_err!(
+ self.tcx.sess,
+ field.ident.span,
+ E0559,
+ "{} `{}::{}` has no field named `{}`",
+ kind_name,
+ actual,
+ variant.name,
+ field.ident
+ ),
+ _ => struct_span_err!(
+ self.tcx.sess,
+ field.ident.span,
+ E0560,
+ "{} `{}` has no field named `{}`",
+ kind_name,
+ actual,
+ field.ident
+ ),
+ },
+ ty,
+ );
+
+ let variant_ident_span = self.tcx.def_ident_span(variant.def_id).unwrap();
+ match variant.ctor_kind {
+ CtorKind::Fn => match ty.kind() {
+ ty::Adt(adt, ..) if adt.is_enum() => {
+ err.span_label(
+ variant_ident_span,
+ format!(
+ "`{adt}::{variant}` defined here",
+ adt = ty,
+ variant = variant.name,
+ ),
+ );
+ err.span_label(field.ident.span, "field does not exist");
+ err.span_suggestion_verbose(
+ expr_span,
+ &format!(
+ "`{adt}::{variant}` is a tuple {kind_name}, use the appropriate syntax",
+ adt = ty,
+ variant = variant.name,
+ ),
+ format!(
+ "{adt}::{variant}(/* fields */)",
+ adt = ty,
+ variant = variant.name,
+ ),
+ Applicability::HasPlaceholders,
+ );
+ }
+ _ => {
+ err.span_label(variant_ident_span, format!("`{adt}` defined here", adt = ty));
+ err.span_label(field.ident.span, "field does not exist");
+ err.span_suggestion_verbose(
+ expr_span,
+ &format!(
+ "`{adt}` is a tuple {kind_name}, use the appropriate syntax",
+ adt = ty,
+ kind_name = kind_name,
+ ),
+ format!("{adt}(/* fields */)", adt = ty),
+ Applicability::HasPlaceholders,
+ );
+ }
+ },
+ _ => {
+ // prevent all specified fields from being suggested
+ let skip_fields = skip_fields.iter().map(|x| x.ident.name);
+ if let Some(field_name) = self.suggest_field_name(
+ variant,
+ field.ident.name,
+ skip_fields.collect(),
+ expr_span,
+ ) {
+ err.span_suggestion(
+ field.ident.span,
+ "a field with a similar name exists",
+ field_name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ match ty.kind() {
+ ty::Adt(adt, ..) => {
+ if adt.is_enum() {
+ err.span_label(
+ field.ident.span,
+ format!("`{}::{}` does not have this field", ty, variant.name),
+ );
+ } else {
+ err.span_label(
+ field.ident.span,
+ format!("`{ty}` does not have this field"),
+ );
+ }
+ let available_field_names =
+ self.available_field_names(variant, expr_span);
+ if !available_field_names.is_empty() {
+ err.note(&format!(
+ "available fields are: {}",
+ self.name_series_display(available_field_names)
+ ));
+ }
+ }
+ _ => bug!("non-ADT passed to report_unknown_field"),
+ }
+ };
+ }
+ }
+ err.emit();
+ }
+
+ // Return a hint about the closest match in field names
+ fn suggest_field_name(
+ &self,
+ variant: &'tcx ty::VariantDef,
+ field: Symbol,
+ skip: Vec<Symbol>,
+ // The span where stability will be checked
+ span: Span,
+ ) -> Option<Symbol> {
+ let names = variant
+ .fields
+ .iter()
+ .filter_map(|field| {
+ // ignore already set fields and private fields from non-local crates
+ // and unstable fields.
+ if skip.iter().any(|&x| x == field.name)
+ || (!variant.def_id.is_local() && !field.vis.is_public())
+ || matches!(
+ self.tcx.eval_stability(field.did, None, span, None),
+ stability::EvalResult::Deny { .. }
+ )
+ {
+ None
+ } else {
+ Some(field.name)
+ }
+ })
+ .collect::<Vec<Symbol>>();
+
+ find_best_match_for_name(&names, field, None)
+ }
+
+ fn available_field_names(
+ &self,
+ variant: &'tcx ty::VariantDef,
+ access_span: Span,
+ ) -> Vec<Symbol> {
+ variant
+ .fields
+ .iter()
+ .filter(|field| {
+ let def_scope = self
+ .tcx
+ .adjust_ident_and_get_scope(field.ident(self.tcx), variant.def_id, self.body_id)
+ .1;
+ field.vis.is_accessible_from(def_scope, self.tcx)
+ && !matches!(
+ self.tcx.eval_stability(field.did, None, access_span, None),
+ stability::EvalResult::Deny { .. }
+ )
+ })
+ .filter(|field| !self.tcx.is_doc_hidden(field.did))
+ .map(|field| field.name)
+ .collect()
+ }
+
+ fn name_series_display(&self, names: Vec<Symbol>) -> String {
+ // dynamic limit, to never omit just one field
+ let limit = if names.len() == 6 { 6 } else { 5 };
+ let mut display =
+ names.iter().take(limit).map(|n| format!("`{}`", n)).collect::<Vec<_>>().join(", ");
+ if names.len() > limit {
+ display = format!("{} ... and {} others", display, names.len() - limit);
+ }
+ display
+ }
+
+ // Check field access expressions
+ fn check_field(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ base: &'tcx hir::Expr<'tcx>,
+ field: Ident,
+ ) -> Ty<'tcx> {
+ debug!("check_field(expr: {:?}, base: {:?}, field: {:?})", expr, base, field);
+ let expr_t = self.check_expr(base);
+ let expr_t = self.structurally_resolved_type(base.span, expr_t);
+ let mut private_candidate = None;
+ let mut autoderef = self.autoderef(expr.span, expr_t);
+ while let Some((base_t, _)) = autoderef.next() {
+ debug!("base_t: {:?}", base_t);
+ match base_t.kind() {
+ ty::Adt(base_def, substs) if !base_def.is_enum() => {
+ debug!("struct named {:?}", base_t);
+ let (ident, def_scope) =
+ self.tcx.adjust_ident_and_get_scope(field, base_def.did(), self.body_id);
+ let fields = &base_def.non_enum_variant().fields;
+ if let Some(index) = fields
+ .iter()
+ .position(|f| f.ident(self.tcx).normalize_to_macros_2_0() == ident)
+ {
+ let field = &fields[index];
+ let field_ty = self.field_ty(expr.span, field, substs);
+ // Save the index of all fields regardless of their visibility in case
+ // of error recovery.
+ self.write_field_index(expr.hir_id, index);
+ let adjustments = self.adjust_steps(&autoderef);
+ if field.vis.is_accessible_from(def_scope, self.tcx) {
+ self.apply_adjustments(base, adjustments);
+ self.register_predicates(autoderef.into_obligations());
+
+ self.tcx.check_stability(field.did, Some(expr.hir_id), expr.span, None);
+ return field_ty;
+ }
+ private_candidate = Some((adjustments, base_def.did(), field_ty));
+ }
+ }
+ ty::Tuple(tys) => {
+ let fstr = field.as_str();
+ if let Ok(index) = fstr.parse::<usize>() {
+ if fstr == index.to_string() {
+ if let Some(&field_ty) = tys.get(index) {
+ let adjustments = self.adjust_steps(&autoderef);
+ self.apply_adjustments(base, adjustments);
+ self.register_predicates(autoderef.into_obligations());
+
+ self.write_field_index(expr.hir_id, index);
+ return field_ty;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
+
+ if let Some((adjustments, did, field_ty)) = private_candidate {
+ // (#90483) apply adjustments to avoid ExprUseVisitor from
+ // creating erroneous projection.
+ self.apply_adjustments(base, adjustments);
+ self.ban_private_field_access(expr, expr_t, field, did);
+ return field_ty;
+ }
+
+ if field.name == kw::Empty {
+ } else if self.method_exists(field, expr_t, expr.hir_id, true) {
+ self.ban_take_value_of_method(expr, expr_t, field);
+ } else if !expr_t.is_primitive_ty() {
+ self.ban_nonexisting_field(field, base, expr, expr_t);
+ } else {
+ let field_name = field.to_string();
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ expr_t,
+ E0610,
+ "`{expr_t}` is a primitive type and therefore doesn't have fields",
+ );
+ let is_valid_suffix = |field: String| {
+ if field == "f32" || field == "f64" {
+ return true;
+ }
+ let mut chars = field.chars().peekable();
+ match chars.peek() {
+ Some('e') | Some('E') => {
+ chars.next();
+ if let Some(c) = chars.peek()
+ && !c.is_numeric() && *c != '-' && *c != '+'
+ {
+ return false;
+ }
+ while let Some(c) = chars.peek() {
+ if !c.is_numeric() {
+ break;
+ }
+ chars.next();
+ }
+ }
+ _ => (),
+ }
+ let suffix = chars.collect::<String>();
+ suffix.is_empty() || suffix == "f32" || suffix == "f64"
+ };
+ if let ty::Infer(ty::IntVar(_)) = expr_t.kind()
+ && let ExprKind::Lit(Spanned {
+ node: ast::LitKind::Int(_, ast::LitIntType::Unsuffixed),
+ ..
+ }) = base.kind
+ && !base.span.from_expansion()
+ && is_valid_suffix(field_name)
+ {
+ err.span_suggestion_verbose(
+ field.span.shrink_to_lo(),
+ "If the number is meant to be a floating point number, consider adding a `0` after the period",
+ '0',
+ Applicability::MaybeIncorrect,
+ );
+ }
+ err.emit();
+ }
+
+ self.tcx().ty_error()
+ }
+
+ fn check_call_constructor<G: EmissionGuarantee>(
+ &self,
+ err: &mut DiagnosticBuilder<'_, G>,
+ base: &'tcx hir::Expr<'tcx>,
+ def_id: DefId,
+ ) {
+ if let Some(local_id) = def_id.as_local() {
+ let hir_id = self.tcx.hir().local_def_id_to_hir_id(local_id);
+ let node = self.tcx.hir().get(hir_id);
+
+ if let Some(fields) = node.tuple_fields() {
+ let kind = match self.tcx.opt_def_kind(local_id) {
+ Some(DefKind::Ctor(of, _)) => of,
+ _ => return,
+ };
+
+ suggest_call_constructor(base.span, kind, fields.len(), err);
+ }
+ } else {
+ // The logic here isn't smart but `associated_item_def_ids`
+ // doesn't work nicely on local.
+ if let DefKind::Ctor(of, _) = self.tcx.def_kind(def_id) {
+ let parent_def_id = self.tcx.parent(def_id);
+ let fields = self.tcx.associated_item_def_ids(parent_def_id);
+ suggest_call_constructor(base.span, of, fields.len(), err);
+ }
+ }
+ }
+
+ fn suggest_await_on_field_access(
+ &self,
+ err: &mut Diagnostic,
+ field_ident: Ident,
+ base: &'tcx hir::Expr<'tcx>,
+ ty: Ty<'tcx>,
+ ) {
+ let output_ty = match self.get_impl_future_output_ty(ty) {
+ Some(output_ty) => self.resolve_vars_if_possible(output_ty),
+ _ => return,
+ };
+ let mut add_label = true;
+ if let ty::Adt(def, _) = output_ty.skip_binder().kind() {
+ // no field access on enum type
+ if !def.is_enum() {
+ if def
+ .non_enum_variant()
+ .fields
+ .iter()
+ .any(|field| field.ident(self.tcx) == field_ident)
+ {
+ add_label = false;
+ err.span_label(
+ field_ident.span,
+ "field not available in `impl Future`, but it is available in its `Output`",
+ );
+ err.span_suggestion_verbose(
+ base.span.shrink_to_hi(),
+ "consider `await`ing on the `Future` and access the field of its `Output`",
+ ".await",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ if add_label {
+ err.span_label(field_ident.span, &format!("field not found in `{ty}`"));
+ }
+ }
+
+ fn ban_nonexisting_field(
+ &self,
+ field: Ident,
+ base: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ expr_t: Ty<'tcx>,
+ ) {
+ debug!(
+ "ban_nonexisting_field: field={:?}, base={:?}, expr={:?}, expr_ty={:?}",
+ field, base, expr, expr_t
+ );
+ let mut err = self.no_such_field_err(field, expr_t, base.hir_id);
+
+ match *expr_t.peel_refs().kind() {
+ ty::Array(_, len) => {
+ self.maybe_suggest_array_indexing(&mut err, expr, base, field, len);
+ }
+ ty::RawPtr(..) => {
+ self.suggest_first_deref_field(&mut err, expr, base, field);
+ }
+ ty::Adt(def, _) if !def.is_enum() => {
+ self.suggest_fields_on_recordish(&mut err, def, field, expr.span);
+ }
+ ty::Param(param_ty) => {
+ self.point_at_param_definition(&mut err, param_ty);
+ }
+ ty::Opaque(_, _) => {
+ self.suggest_await_on_field_access(&mut err, field, base, expr_t.peel_refs());
+ }
+ ty::FnDef(def_id, _) => {
+ self.check_call_constructor(&mut err, base, def_id);
+ }
+ _ => {}
+ }
+
+ if field.name == kw::Await {
+ // We know by construction that `<expr>.await` is either on Rust 2015
+ // or results in `ExprKind::Await`. Suggest switching the edition to 2018.
+ err.note("to `.await` a `Future`, switch to Rust 2018 or later");
+ err.help_use_latest_edition();
+ }
+
+ err.emit();
+ }
+
+ fn ban_private_field_access(
+ &self,
+ expr: &hir::Expr<'_>,
+ expr_t: Ty<'tcx>,
+ field: Ident,
+ base_did: DefId,
+ ) {
+ let struct_path = self.tcx().def_path_str(base_did);
+ let kind_name = self.tcx().def_kind(base_did).descr(base_did);
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ field.span,
+ E0616,
+ "field `{field}` of {kind_name} `{struct_path}` is private",
+ );
+ err.span_label(field.span, "private field");
+ // Also check if an accessible method exists, which is often what is meant.
+ if self.method_exists(field, expr_t, expr.hir_id, false) && !self.expr_in_place(expr.hir_id)
+ {
+ self.suggest_method_call(
+ &mut err,
+ &format!("a method `{field}` also exists, call it with parentheses"),
+ field,
+ expr_t,
+ expr,
+ None,
+ );
+ }
+ err.emit();
+ }
+
+ fn ban_take_value_of_method(&self, expr: &hir::Expr<'_>, expr_t: Ty<'tcx>, field: Ident) {
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ expr_t,
+ E0615,
+ "attempted to take value of method `{field}` on type `{expr_t}`",
+ );
+ err.span_label(field.span, "method, not a field");
+ let expr_is_call =
+ if let hir::Node::Expr(hir::Expr { kind: ExprKind::Call(callee, _args), .. }) =
+ self.tcx.hir().get(self.tcx.hir().get_parent_node(expr.hir_id))
+ {
+ expr.hir_id == callee.hir_id
+ } else {
+ false
+ };
+ let expr_snippet =
+ self.tcx.sess.source_map().span_to_snippet(expr.span).unwrap_or_default();
+ let is_wrapped = expr_snippet.starts_with('(') && expr_snippet.ends_with(')');
+ let after_open = expr.span.lo() + rustc_span::BytePos(1);
+ let before_close = expr.span.hi() - rustc_span::BytePos(1);
+
+ if expr_is_call && is_wrapped {
+ err.multipart_suggestion(
+ "remove wrapping parentheses to call the method",
+ vec![
+ (expr.span.with_hi(after_open), String::new()),
+ (expr.span.with_lo(before_close), String::new()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else if !self.expr_in_place(expr.hir_id) {
+ // Suggest call parentheses inside the wrapping parentheses
+ let span = if is_wrapped {
+ expr.span.with_lo(after_open).with_hi(before_close)
+ } else {
+ expr.span
+ };
+ self.suggest_method_call(
+ &mut err,
+ "use parentheses to call the method",
+ field,
+ expr_t,
+ expr,
+ Some(span),
+ );
+ } else {
+ let mut found = false;
+
+ if let ty::RawPtr(ty_and_mut) = expr_t.kind()
+ && let ty::Adt(adt_def, _) = ty_and_mut.ty.kind()
+ {
+ if adt_def.variants().len() == 1
+ && adt_def
+ .variants()
+ .iter()
+ .next()
+ .unwrap()
+ .fields
+ .iter()
+ .any(|f| f.ident(self.tcx) == field)
+ {
+ if let Some(dot_loc) = expr_snippet.rfind('.') {
+ found = true;
+ err.span_suggestion(
+ expr.span.with_hi(expr.span.lo() + BytePos::from_usize(dot_loc)),
+ "to access the field, dereference first",
+ format!("(*{})", &expr_snippet[0..dot_loc]),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ if !found {
+ err.help("methods are immutable and cannot be assigned to");
+ }
+ }
+
+ err.emit();
+ }
+
+ fn point_at_param_definition(&self, err: &mut Diagnostic, param: ty::ParamTy) {
+ let generics = self.tcx.generics_of(self.body_id.owner.to_def_id());
+ let generic_param = generics.type_param(&param, self.tcx);
+ if let ty::GenericParamDefKind::Type { synthetic: true, .. } = generic_param.kind {
+ return;
+ }
+ let param_def_id = generic_param.def_id;
+ let param_hir_id = match param_def_id.as_local() {
+ Some(x) => self.tcx.hir().local_def_id_to_hir_id(x),
+ None => return,
+ };
+ let param_span = self.tcx.hir().span(param_hir_id);
+ let param_name = self.tcx.hir().ty_param_name(param_def_id.expect_local());
+
+ err.span_label(param_span, &format!("type parameter '{param_name}' declared here"));
+ }
+
+ fn suggest_fields_on_recordish(
+ &self,
+ err: &mut Diagnostic,
+ def: ty::AdtDef<'tcx>,
+ field: Ident,
+ access_span: Span,
+ ) {
+ if let Some(suggested_field_name) =
+ self.suggest_field_name(def.non_enum_variant(), field.name, vec![], access_span)
+ {
+ err.span_suggestion(
+ field.span,
+ "a field with a similar name exists",
+ suggested_field_name,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_label(field.span, "unknown field");
+ let struct_variant_def = def.non_enum_variant();
+ let field_names = self.available_field_names(struct_variant_def, access_span);
+ if !field_names.is_empty() {
+ err.note(&format!(
+ "available fields are: {}",
+ self.name_series_display(field_names),
+ ));
+ }
+ }
+ }
+
+ fn maybe_suggest_array_indexing(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ base: &hir::Expr<'_>,
+ field: Ident,
+ len: ty::Const<'tcx>,
+ ) {
+ if let (Some(len), Ok(user_index)) =
+ (len.try_eval_usize(self.tcx, self.param_env), field.as_str().parse::<u64>())
+ && let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span)
+ {
+ let help = "instead of using tuple indexing, use array indexing";
+ let suggestion = format!("{base}[{field}]");
+ let applicability = if len < user_index {
+ Applicability::MachineApplicable
+ } else {
+ Applicability::MaybeIncorrect
+ };
+ err.span_suggestion(expr.span, help, suggestion, applicability);
+ }
+ }
+
+ fn suggest_first_deref_field(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ base: &hir::Expr<'_>,
+ field: Ident,
+ ) {
+ if let Ok(base) = self.tcx.sess.source_map().span_to_snippet(base.span) {
+ let msg = format!("`{base}` is a raw pointer; try dereferencing it");
+ let suggestion = format!("(*{base}).{field}");
+ err.span_suggestion(expr.span, &msg, suggestion, Applicability::MaybeIncorrect);
+ }
+ }
+
+ fn no_such_field_err(
+ &self,
+ field: Ident,
+ expr_t: Ty<'tcx>,
+ id: HirId,
+ ) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let span = field.span;
+ debug!("no_such_field_err(span: {:?}, field: {:?}, expr_t: {:?})", span, field, expr_t);
+
+ let mut err = type_error_struct!(
+ self.tcx().sess,
+ field.span,
+ expr_t,
+ E0609,
+ "no field `{field}` on type `{expr_t}`",
+ );
+
+ // try to add a suggestion in case the field is a nested field of a field of the Adt
+ if let Some((fields, substs)) = self.get_field_candidates(span, expr_t) {
+ for candidate_field in fields.iter() {
+ if let Some(mut field_path) = self.check_for_nested_field_satisfying(
+ span,
+ &|candidate_field, _| candidate_field.ident(self.tcx()) == field,
+ candidate_field,
+ substs,
+ vec![],
+ self.tcx.parent_module(id).to_def_id(),
+ ) {
+ // field_path includes `field` that we're looking for, so pop it.
+ field_path.pop();
+
+ let field_path_str = field_path
+ .iter()
+ .map(|id| id.name.to_ident_string())
+ .collect::<Vec<String>>()
+ .join(".");
+ debug!("field_path_str: {:?}", field_path_str);
+
+ err.span_suggestion_verbose(
+ field.span.shrink_to_lo(),
+ "one of the expressions' fields has a field of the same name",
+ format!("{field_path_str}."),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ err
+ }
+
+ pub(crate) fn get_field_candidates(
+ &self,
+ span: Span,
+ base_t: Ty<'tcx>,
+ ) -> Option<(&[ty::FieldDef], SubstsRef<'tcx>)> {
+ debug!("get_field_candidates(span: {:?}, base_t: {:?}", span, base_t);
+
+ for (base_t, _) in self.autoderef(span, base_t) {
+ match base_t.kind() {
+ ty::Adt(base_def, substs) if !base_def.is_enum() => {
+ let fields = &base_def.non_enum_variant().fields;
+ // For compile-time reasons put a limit on number of fields we search
+ if fields.len() > 100 {
+ return None;
+ }
+ return Some((fields, substs));
+ }
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// This method is called after we have encountered a missing field error to recursively
+ /// search for the field
+ pub(crate) fn check_for_nested_field_satisfying(
+ &self,
+ span: Span,
+ matches: &impl Fn(&ty::FieldDef, Ty<'tcx>) -> bool,
+ candidate_field: &ty::FieldDef,
+ subst: SubstsRef<'tcx>,
+ mut field_path: Vec<Ident>,
+ id: DefId,
+ ) -> Option<Vec<Ident>> {
+ debug!(
+ "check_for_nested_field_satisfying(span: {:?}, candidate_field: {:?}, field_path: {:?}",
+ span, candidate_field, field_path
+ );
+
+ if field_path.len() > 3 {
+ // For compile-time reasons and to avoid infinite recursion we only check for fields
+ // up to a depth of three
+ None
+ } else {
+ // recursively search fields of `candidate_field` if it's a ty::Adt
+ field_path.push(candidate_field.ident(self.tcx).normalize_to_macros_2_0());
+ let field_ty = candidate_field.ty(self.tcx, subst);
+ if let Some((nested_fields, subst)) = self.get_field_candidates(span, field_ty) {
+ for field in nested_fields.iter() {
+ if field.vis.is_accessible_from(id, self.tcx) {
+ if matches(candidate_field, field_ty) {
+ return Some(field_path);
+ } else if let Some(field_path) = self.check_for_nested_field_satisfying(
+ span,
+ matches,
+ field,
+ subst,
+ field_path.clone(),
+ id,
+ ) {
+ return Some(field_path);
+ }
+ }
+ }
+ }
+ None
+ }
+ }
+
+ fn check_expr_index(
+ &self,
+ base: &'tcx hir::Expr<'tcx>,
+ idx: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ let base_t = self.check_expr(&base);
+ let idx_t = self.check_expr(&idx);
+
+ if base_t.references_error() {
+ base_t
+ } else if idx_t.references_error() {
+ idx_t
+ } else {
+ let base_t = self.structurally_resolved_type(base.span, base_t);
+ match self.lookup_indexing(expr, base, base_t, idx, idx_t) {
+ Some((index_ty, element_ty)) => {
+ // two-phase not needed because index_ty is never mutable
+ self.demand_coerce(idx, idx_t, index_ty, None, AllowTwoPhase::No);
+ self.select_obligations_where_possible(false, |errors| {
+ self.point_at_index_if_possible(errors, idx.span)
+ });
+ element_ty
+ }
+ None => {
+ let mut err = type_error_struct!(
+ self.tcx.sess,
+ expr.span,
+ base_t,
+ E0608,
+ "cannot index into a value of type `{base_t}`",
+ );
+ // Try to give some advice about indexing tuples.
+ if let ty::Tuple(..) = base_t.kind() {
+ let mut needs_note = true;
+ // If the index is an integer, we can show the actual
+ // fixed expression:
+ if let ExprKind::Lit(ref lit) = idx.kind {
+ if let ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) = lit.node {
+ let snip = self.tcx.sess.source_map().span_to_snippet(base.span);
+ if let Ok(snip) = snip {
+ err.span_suggestion(
+ expr.span,
+ "to access tuple elements, use",
+ format!("{snip}.{i}"),
+ Applicability::MachineApplicable,
+ );
+ needs_note = false;
+ }
+ }
+ }
+ if needs_note {
+ err.help(
+ "to access tuple elements, use tuple indexing \
+ syntax (e.g., `tuple.0`)",
+ );
+ }
+ }
+ err.emit();
+ self.tcx.ty_error()
+ }
+ }
+ }
+ }
+
+ fn point_at_index_if_possible(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ span: Span,
+ ) {
+ for error in errors {
+ match error.obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(predicate)
+ if self.tcx.is_diagnostic_item(sym::SliceIndex, predicate.trait_ref.def_id) => {
+ }
+ _ => continue,
+ }
+ error.obligation.cause.span = span;
+ }
+ }
+
+ fn check_expr_yield(
+ &self,
+ value: &'tcx hir::Expr<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ src: &'tcx hir::YieldSource,
+ ) -> Ty<'tcx> {
+ match self.resume_yield_tys {
+ Some((resume_ty, yield_ty)) => {
+ self.check_expr_coercable_to_type(&value, yield_ty, None);
+
+ resume_ty
+ }
+ // Given that this `yield` expression was generated as a result of lowering a `.await`,
+ // we know that the yield type must be `()`; however, the context won't contain this
+ // information. Hence, we check the source of the yield expression here and check its
+ // value's type against `()` (this check should always hold).
+ None if src.is_await() => {
+ self.check_expr_coercable_to_type(&value, self.tcx.mk_unit(), None);
+ self.tcx.mk_unit()
+ }
+ _ => {
+ self.tcx.sess.emit_err(YieldExprOutsideOfGenerator { span: expr.span });
+ // Avoid expressions without types during writeback (#78653).
+ self.check_expr(value);
+ self.tcx.mk_unit()
+ }
+ }
+ }
+
+ fn check_expr_asm_operand(&self, expr: &'tcx hir::Expr<'tcx>, is_input: bool) {
+ let needs = if is_input { Needs::None } else { Needs::MutPlace };
+ let ty = self.check_expr_with_needs(expr, needs);
+ self.require_type_is_sized(ty, expr.span, traits::InlineAsmSized);
+
+ if !is_input && !expr.is_syntactic_place_expr() {
+ let mut err = self.tcx.sess.struct_span_err(expr.span, "invalid asm output");
+ err.span_label(expr.span, "cannot assign to this expression");
+ err.emit();
+ }
+
+ // If this is an input value, we require its type to be fully resolved
+ // at this point. This allows us to provide helpful coercions which help
+ // pass the type candidate list in a later pass.
+ //
+ // We don't require output types to be resolved at this point, which
+ // allows them to be inferred based on how they are used later in the
+ // function.
+ if is_input {
+ let ty = self.structurally_resolved_type(expr.span, ty);
+ match *ty.kind() {
+ ty::FnDef(..) => {
+ let fnptr_ty = self.tcx.mk_fn_ptr(ty.fn_sig(self.tcx));
+ self.demand_coerce(expr, ty, fnptr_ty, None, AllowTwoPhase::No);
+ }
+ ty::Ref(_, base_ty, mutbl) => {
+ let ptr_ty = self.tcx.mk_ptr(ty::TypeAndMut { ty: base_ty, mutbl });
+ self.demand_coerce(expr, ty, ptr_ty, None, AllowTwoPhase::No);
+ }
+ _ => {}
+ }
+ }
+ }
+
+ fn check_expr_asm(&self, asm: &'tcx hir::InlineAsm<'tcx>) -> Ty<'tcx> {
+ for (op, _op_sp) in asm.operands {
+ match op {
+ hir::InlineAsmOperand::In { expr, .. } => {
+ self.check_expr_asm_operand(expr, true);
+ }
+ hir::InlineAsmOperand::Out { expr: Some(expr), .. }
+ | hir::InlineAsmOperand::InOut { expr, .. } => {
+ self.check_expr_asm_operand(expr, false);
+ }
+ hir::InlineAsmOperand::Out { expr: None, .. } => {}
+ hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ self.check_expr_asm_operand(in_expr, true);
+ if let Some(out_expr) = out_expr {
+ self.check_expr_asm_operand(out_expr, false);
+ }
+ }
+ // `AnonConst`s have their own body and is type-checked separately.
+ // As they don't flow into the type system we don't need them to
+ // be well-formed.
+ hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymFn { .. } => {}
+ hir::InlineAsmOperand::SymStatic { .. } => {}
+ }
+ }
+ if asm.options.contains(ast::InlineAsmOptions::NORETURN) {
+ self.tcx.types.never
+ } else {
+ self.tcx.mk_unit()
+ }
+ }
+}
+
+pub(super) fn ty_kind_suggestion(ty: Ty<'_>) -> Option<&'static str> {
+ Some(match ty.kind() {
+ ty::Bool => "true",
+ ty::Char => "'a'",
+ ty::Int(_) | ty::Uint(_) => "42",
+ ty::Float(_) => "3.14159",
+ ty::Error(_) | ty::Never => return None,
+ _ => "value",
+ })
+}
diff --git a/compiler/rustc_typeck/src/check/fallback.rs b/compiler/rustc_typeck/src/check/fallback.rs
new file mode 100644
index 000000000..4059b3403
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fallback.rs
@@ -0,0 +1,398 @@
+use crate::check::FnCtxt;
+use rustc_data_structures::{
+ fx::{FxHashMap, FxHashSet},
+ graph::WithSuccessors,
+ graph::{iterate::DepthFirstSearch, vec_graph::VecGraph},
+};
+use rustc_middle::ty::{self, Ty};
+
+impl<'tcx> FnCtxt<'_, 'tcx> {
+ /// Performs type inference fallback, returning true if any fallback
+ /// occurs.
+ pub(super) fn type_inference_fallback(&self) -> bool {
+ debug!(
+ "type-inference-fallback start obligations: {:#?}",
+ self.fulfillment_cx.borrow_mut().pending_obligations()
+ );
+
+ // All type checking constraints were added, try to fallback unsolved variables.
+ self.select_obligations_where_possible(false, |_| {});
+
+ debug!(
+ "type-inference-fallback post selection obligations: {:#?}",
+ self.fulfillment_cx.borrow_mut().pending_obligations()
+ );
+
+ // Check if we have any unsolved variables. If not, no need for fallback.
+ let unsolved_variables = self.unsolved_variables();
+ if unsolved_variables.is_empty() {
+ return false;
+ }
+
+ let diverging_fallback = self.calculate_diverging_fallback(&unsolved_variables);
+
+ let mut fallback_has_occurred = false;
+ // We do fallback in two passes, to try to generate
+ // better error messages.
+ // The first time, we do *not* replace opaque types.
+ for ty in unsolved_variables {
+ debug!("unsolved_variable = {:?}", ty);
+ fallback_has_occurred |= self.fallback_if_possible(ty, &diverging_fallback);
+ }
+
+ // We now see if we can make progress. This might cause us to
+ // unify inference variables for opaque types, since we may
+ // have unified some other type variables during the first
+ // phase of fallback. This means that we only replace
+ // inference variables with their underlying opaque types as a
+ // last resort.
+ //
+ // In code like this:
+ //
+ // ```rust
+ // type MyType = impl Copy;
+ // fn produce() -> MyType { true }
+ // fn bad_produce() -> MyType { panic!() }
+ // ```
+ //
+ // we want to unify the opaque inference variable in `bad_produce`
+ // with the diverging fallback for `panic!` (e.g. `()` or `!`).
+ // This will produce a nice error message about conflicting concrete
+ // types for `MyType`.
+ //
+ // If we had tried to fallback the opaque inference variable to `MyType`,
+ // we will generate a confusing type-check error that does not explicitly
+ // refer to opaque types.
+ self.select_obligations_where_possible(fallback_has_occurred, |_| {});
+
+ fallback_has_occurred
+ }
+
+ // Tries to apply a fallback to `ty` if it is an unsolved variable.
+ //
+ // - Unconstrained ints are replaced with `i32`.
+ //
+ // - Unconstrained floats are replaced with with `f64`.
+ //
+ // - Non-numerics may get replaced with `()` or `!`, depending on
+ // how they were categorized by `calculate_diverging_fallback`
+ // (and the setting of `#![feature(never_type_fallback)]`).
+ //
+ // Fallback becomes very dubious if we have encountered
+ // type-checking errors. In that case, fallback to Error.
+ //
+ // The return value indicates whether fallback has occurred.
+ fn fallback_if_possible(
+ &self,
+ ty: Ty<'tcx>,
+ diverging_fallback: &FxHashMap<Ty<'tcx>, Ty<'tcx>>,
+ ) -> bool {
+ // Careful: we do NOT shallow-resolve `ty`. We know that `ty`
+ // is an unsolved variable, and we determine its fallback
+ // based solely on how it was created, not what other type
+ // variables it may have been unified with since then.
+ //
+ // The reason this matters is that other attempts at fallback
+ // may (in principle) conflict with this fallback, and we wish
+ // to generate a type error in that case. (However, this
+ // actually isn't true right now, because we're only using the
+ // builtin fallback rules. This would be true if we were using
+ // user-supplied fallbacks. But it's still useful to write the
+ // code to detect bugs.)
+ //
+ // (Note though that if we have a general type variable `?T`
+ // that is then unified with an integer type variable `?I`
+ // that ultimately never gets resolved to a special integral
+ // type, `?T` is not considered unsolved, but `?I` is. The
+ // same is true for float variables.)
+ let fallback = match ty.kind() {
+ _ if self.is_tainted_by_errors() => self.tcx.ty_error(),
+ ty::Infer(ty::IntVar(_)) => self.tcx.types.i32,
+ ty::Infer(ty::FloatVar(_)) => self.tcx.types.f64,
+ _ => match diverging_fallback.get(&ty) {
+ Some(&fallback_ty) => fallback_ty,
+ None => return false,
+ },
+ };
+ debug!("fallback_if_possible(ty={:?}): defaulting to `{:?}`", ty, fallback);
+
+ let span = self
+ .infcx
+ .type_var_origin(ty)
+ .map(|origin| origin.span)
+ .unwrap_or(rustc_span::DUMMY_SP);
+ self.demand_eqtype(span, ty, fallback);
+ true
+ }
+
+ /// The "diverging fallback" system is rather complicated. This is
+ /// a result of our need to balance 'do the right thing' with
+ /// backwards compatibility.
+ ///
+ /// "Diverging" type variables are variables created when we
+ /// coerce a `!` type into an unbound type variable `?X`. If they
+ /// never wind up being constrained, the "right and natural" thing
+ /// is that `?X` should "fallback" to `!`. This means that e.g. an
+ /// expression like `Some(return)` will ultimately wind up with a
+ /// type like `Option<!>` (presuming it is not assigned or
+ /// constrained to have some other type).
+ ///
+ /// However, the fallback used to be `()` (before the `!` type was
+ /// added). Moreover, there are cases where the `!` type 'leaks
+ /// out' from dead code into type variables that affect live
+ /// code. The most common case is something like this:
+ ///
+ /// ```rust
+ /// # fn foo() -> i32 { 4 }
+ /// match foo() {
+ /// 22 => Default::default(), // call this type `?D`
+ /// _ => return, // return has type `!`
+ /// } // call the type of this match `?M`
+ /// ```
+ ///
+ /// Here, coercing the type `!` into `?M` will create a diverging
+ /// type variable `?X` where `?X <: ?M`. We also have that `?D <:
+ /// ?M`. If `?M` winds up unconstrained, then `?X` will
+ /// fallback. If it falls back to `!`, then all the type variables
+ /// will wind up equal to `!` -- this includes the type `?D`
+ /// (since `!` doesn't implement `Default`, we wind up a "trait
+ /// not implemented" error in code like this). But since the
+ /// original fallback was `()`, this code used to compile with `?D
+ /// = ()`. This is somewhat surprising, since `Default::default()`
+ /// on its own would give an error because the types are
+ /// insufficiently constrained.
+ ///
+ /// Our solution to this dilemma is to modify diverging variables
+ /// so that they can *either* fallback to `!` (the default) or to
+ /// `()` (the backwards compatibility case). We decide which
+ /// fallback to use based on whether there is a coercion pattern
+ /// like this:
+ ///
+ /// ```ignore (not-rust)
+ /// ?Diverging -> ?V
+ /// ?NonDiverging -> ?V
+ /// ?V != ?NonDiverging
+ /// ```
+ ///
+ /// Here `?Diverging` represents some diverging type variable and
+ /// `?NonDiverging` represents some non-diverging type
+ /// variable. `?V` can be any type variable (diverging or not), so
+ /// long as it is not equal to `?NonDiverging`.
+ ///
+ /// Intuitively, what we are looking for is a case where a
+ /// "non-diverging" type variable (like `?M` in our example above)
+ /// is coerced *into* some variable `?V` that would otherwise
+ /// fallback to `!`. In that case, we make `?V` fallback to `!`,
+ /// along with anything that would flow into `?V`.
+ ///
+ /// The algorithm we use:
+ /// * Identify all variables that are coerced *into* by a
+ /// diverging variable. Do this by iterating over each
+ /// diverging, unsolved variable and finding all variables
+ /// reachable from there. Call that set `D`.
+ /// * Walk over all unsolved, non-diverging variables, and find
+ /// any variable that has an edge into `D`.
+ fn calculate_diverging_fallback(
+ &self,
+ unsolved_variables: &[Ty<'tcx>],
+ ) -> FxHashMap<Ty<'tcx>, Ty<'tcx>> {
+ debug!("calculate_diverging_fallback({:?})", unsolved_variables);
+
+ let relationships = self.fulfillment_cx.borrow_mut().relationships().clone();
+
+ // Construct a coercion graph where an edge `A -> B` indicates
+ // a type variable is that is coerced
+ let coercion_graph = self.create_coercion_graph();
+
+ // Extract the unsolved type inference variable vids; note that some
+ // unsolved variables are integer/float variables and are excluded.
+ let unsolved_vids = unsolved_variables.iter().filter_map(|ty| ty.ty_vid());
+
+ // Compute the diverging root vids D -- that is, the root vid of
+ // those type variables that (a) are the target of a coercion from
+ // a `!` type and (b) have not yet been solved.
+ //
+ // These variables are the ones that are targets for fallback to
+ // either `!` or `()`.
+ let diverging_roots: FxHashSet<ty::TyVid> = self
+ .diverging_type_vars
+ .borrow()
+ .iter()
+ .map(|&ty| self.shallow_resolve(ty))
+ .filter_map(|ty| ty.ty_vid())
+ .map(|vid| self.root_var(vid))
+ .collect();
+ debug!(
+ "calculate_diverging_fallback: diverging_type_vars={:?}",
+ self.diverging_type_vars.borrow()
+ );
+ debug!("calculate_diverging_fallback: diverging_roots={:?}", diverging_roots);
+
+ // Find all type variables that are reachable from a diverging
+ // type variable. These will typically default to `!`, unless
+ // we find later that they are *also* reachable from some
+ // other type variable outside this set.
+ let mut roots_reachable_from_diverging = DepthFirstSearch::new(&coercion_graph);
+ let mut diverging_vids = vec![];
+ let mut non_diverging_vids = vec![];
+ for unsolved_vid in unsolved_vids {
+ let root_vid = self.root_var(unsolved_vid);
+ debug!(
+ "calculate_diverging_fallback: unsolved_vid={:?} root_vid={:?} diverges={:?}",
+ unsolved_vid,
+ root_vid,
+ diverging_roots.contains(&root_vid),
+ );
+ if diverging_roots.contains(&root_vid) {
+ diverging_vids.push(unsolved_vid);
+ roots_reachable_from_diverging.push_start_node(root_vid);
+
+ debug!(
+ "calculate_diverging_fallback: root_vid={:?} reaches {:?}",
+ root_vid,
+ coercion_graph.depth_first_search(root_vid).collect::<Vec<_>>()
+ );
+
+ // drain the iterator to visit all nodes reachable from this node
+ roots_reachable_from_diverging.complete_search();
+ } else {
+ non_diverging_vids.push(unsolved_vid);
+ }
+ }
+
+ debug!(
+ "calculate_diverging_fallback: roots_reachable_from_diverging={:?}",
+ roots_reachable_from_diverging,
+ );
+
+ // Find all type variables N0 that are not reachable from a
+ // diverging variable, and then compute the set reachable from
+ // N0, which we call N. These are the *non-diverging* type
+ // variables. (Note that this set consists of "root variables".)
+ let mut roots_reachable_from_non_diverging = DepthFirstSearch::new(&coercion_graph);
+ for &non_diverging_vid in &non_diverging_vids {
+ let root_vid = self.root_var(non_diverging_vid);
+ if roots_reachable_from_diverging.visited(root_vid) {
+ continue;
+ }
+ roots_reachable_from_non_diverging.push_start_node(root_vid);
+ roots_reachable_from_non_diverging.complete_search();
+ }
+ debug!(
+ "calculate_diverging_fallback: roots_reachable_from_non_diverging={:?}",
+ roots_reachable_from_non_diverging,
+ );
+
+ debug!("inherited: {:#?}", self.inh.fulfillment_cx.borrow_mut().pending_obligations());
+ debug!("obligations: {:#?}", self.fulfillment_cx.borrow_mut().pending_obligations());
+ debug!("relationships: {:#?}", relationships);
+
+ // For each diverging variable, figure out whether it can
+ // reach a member of N. If so, it falls back to `()`. Else
+ // `!`.
+ let mut diverging_fallback = FxHashMap::default();
+ diverging_fallback.reserve(diverging_vids.len());
+ for &diverging_vid in &diverging_vids {
+ let diverging_ty = self.tcx.mk_ty_var(diverging_vid);
+ let root_vid = self.root_var(diverging_vid);
+ let can_reach_non_diverging = coercion_graph
+ .depth_first_search(root_vid)
+ .any(|n| roots_reachable_from_non_diverging.visited(n));
+
+ let mut relationship = ty::FoundRelationships { self_in_trait: false, output: false };
+
+ for (vid, rel) in relationships.iter() {
+ if self.root_var(*vid) == root_vid {
+ relationship.self_in_trait |= rel.self_in_trait;
+ relationship.output |= rel.output;
+ }
+ }
+
+ if relationship.self_in_trait && relationship.output {
+ // This case falls back to () to ensure that the code pattern in
+ // src/test/ui/never_type/fallback-closure-ret.rs continues to
+ // compile when never_type_fallback is enabled.
+ //
+ // This rule is not readily explainable from first principles,
+ // but is rather intended as a patchwork fix to ensure code
+ // which compiles before the stabilization of never type
+ // fallback continues to work.
+ //
+ // Typically this pattern is encountered in a function taking a
+ // closure as a parameter, where the return type of that closure
+ // (checked by `relationship.output`) is expected to implement
+ // some trait (checked by `relationship.self_in_trait`). This
+ // can come up in non-closure cases too, so we do not limit this
+ // rule to specifically `FnOnce`.
+ //
+ // When the closure's body is something like `panic!()`, the
+ // return type would normally be inferred to `!`. However, it
+ // needs to fall back to `()` in order to still compile, as the
+ // trait is specifically implemented for `()` but not `!`.
+ //
+ // For details on the requirements for these relationships to be
+ // set, see the relationship finding module in
+ // compiler/rustc_trait_selection/src/traits/relationships.rs.
+ debug!("fallback to () - found trait and projection: {:?}", diverging_vid);
+ diverging_fallback.insert(diverging_ty, self.tcx.types.unit);
+ } else if can_reach_non_diverging {
+ debug!("fallback to () - reached non-diverging: {:?}", diverging_vid);
+ diverging_fallback.insert(diverging_ty, self.tcx.types.unit);
+ } else {
+ debug!("fallback to ! - all diverging: {:?}", diverging_vid);
+ diverging_fallback.insert(diverging_ty, self.tcx.mk_diverging_default());
+ }
+ }
+
+ diverging_fallback
+ }
+
+ /// Returns a graph whose nodes are (unresolved) inference variables and where
+ /// an edge `?A -> ?B` indicates that the variable `?A` is coerced to `?B`.
+ fn create_coercion_graph(&self) -> VecGraph<ty::TyVid> {
+ let pending_obligations = self.fulfillment_cx.borrow_mut().pending_obligations();
+ debug!("create_coercion_graph: pending_obligations={:?}", pending_obligations);
+ let coercion_edges: Vec<(ty::TyVid, ty::TyVid)> = pending_obligations
+ .into_iter()
+ .filter_map(|obligation| {
+ // The predicates we are looking for look like `Coerce(?A -> ?B)`.
+ // They will have no bound variables.
+ obligation.predicate.kind().no_bound_vars()
+ })
+ .filter_map(|atom| {
+ // We consider both subtyping and coercion to imply 'flow' from
+ // some position in the code `a` to a different position `b`.
+ // This is then used to determine which variables interact with
+ // live code, and as such must fall back to `()` to preserve
+ // soundness.
+ //
+ // In practice currently the two ways that this happens is
+ // coercion and subtyping.
+ let (a, b) = if let ty::PredicateKind::Coerce(ty::CoercePredicate { a, b }) = atom {
+ (a, b)
+ } else if let ty::PredicateKind::Subtype(ty::SubtypePredicate {
+ a_is_expected: _,
+ a,
+ b,
+ }) = atom
+ {
+ (a, b)
+ } else {
+ return None;
+ };
+
+ let a_vid = self.root_vid(a)?;
+ let b_vid = self.root_vid(b)?;
+ Some((a_vid, b_vid))
+ })
+ .collect();
+ debug!("create_coercion_graph: coercion_edges={:?}", coercion_edges);
+ let num_ty_vars = self.num_ty_vars();
+ VecGraph::new(num_ty_vars, coercion_edges)
+ }
+
+ /// If `ty` is an unresolved type variable, returns its root vid.
+ fn root_vid(&self, ty: Ty<'tcx>) -> Option<ty::TyVid> {
+ Some(self.root_var(self.shallow_resolve(ty).ty_vid()?))
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs
new file mode 100644
index 000000000..3a8093345
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/_impl.rs
@@ -0,0 +1,1510 @@
+use crate::astconv::{
+ AstConv, CreateSubstsForGenericArgsCtxt, ExplicitLateBound, GenericArgCountMismatch,
+ GenericArgCountResult, IsMethodCall, PathSeg,
+};
+use crate::check::callee::{self, DeferredCallResolution};
+use crate::check::method::{self, MethodCallee, SelfSource};
+use crate::check::rvalue_scopes;
+use crate::check::{BreakableCtxt, Diverges, Expectation, FnCtxt, LocalTy};
+
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{Applicability, Diagnostic, ErrorGuaranteed, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ExprKind, GenericArg, Node, QPath};
+use rustc_infer::infer::canonical::{Canonical, OriginalQueryValues, QueryResponse};
+use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
+use rustc_infer::infer::{InferOk, InferResult};
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, AutoBorrow, AutoBorrowMutability};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::{
+ self, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSelfTy, UserSubsts,
+};
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{
+ self, AdtKind, CanonicalUserType, DefIdTree, EarlyBinder, GenericParamDefKind, ToPolyTraitRef,
+ ToPredicate, Ty, UserType,
+};
+use rustc_session::lint;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt,
+};
+
+use std::collections::hash_map::Entry;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Produces warning on the given node, if the current point in the
+ /// function is unreachable, and there hasn't been another warning.
+ pub(in super::super) fn warn_if_unreachable(&self, id: hir::HirId, span: Span, kind: &str) {
+ // FIXME: Combine these two 'if' expressions into one once
+ // let chains are implemented
+ if let Diverges::Always { span: orig_span, custom_note } = self.diverges.get() {
+ // If span arose from a desugaring of `if` or `while`, then it is the condition itself,
+ // which diverges, that we are about to lint on. This gives suboptimal diagnostics.
+ // Instead, stop here so that the `if`- or `while`-expression's block is linted instead.
+ if !span.is_desugaring(DesugaringKind::CondTemporary)
+ && !span.is_desugaring(DesugaringKind::Async)
+ && !orig_span.is_desugaring(DesugaringKind::Await)
+ {
+ self.diverges.set(Diverges::WarnedAlways);
+
+ debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind);
+
+ self.tcx().struct_span_lint_hir(lint::builtin::UNREACHABLE_CODE, id, span, |lint| {
+ let msg = format!("unreachable {}", kind);
+ lint.build(&msg)
+ .span_label(span, &msg)
+ .span_label(
+ orig_span,
+ custom_note
+ .unwrap_or("any code following this expression is unreachable"),
+ )
+ .emit();
+ })
+ }
+ }
+ }
+
+ /// Resolves type and const variables in `ty` if possible. Unlike the infcx
+ /// version (resolve_vars_if_possible), this version will
+ /// also select obligations if it seems useful, in an effort
+ /// to get more type information.
+ pub(in super::super) fn resolve_vars_with_obligations(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.resolve_vars_with_obligations_and_mutate_fulfillment(ty, |_| {})
+ }
+
+ #[instrument(skip(self, mutate_fulfillment_errors), level = "debug")]
+ pub(in super::super) fn resolve_vars_with_obligations_and_mutate_fulfillment(
+ &self,
+ mut ty: Ty<'tcx>,
+ mutate_fulfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
+ ) -> Ty<'tcx> {
+ // No Infer()? Nothing needs doing.
+ if !ty.has_infer_types_or_consts() {
+ debug!("no inference var, nothing needs doing");
+ return ty;
+ }
+
+ // If `ty` is a type variable, see whether we already know what it is.
+ ty = self.resolve_vars_if_possible(ty);
+ if !ty.has_infer_types_or_consts() {
+ debug!(?ty);
+ return ty;
+ }
+
+ // If not, try resolving pending obligations as much as
+ // possible. This can help substantially when there are
+ // indirect dependencies that don't seem worth tracking
+ // precisely.
+ self.select_obligations_where_possible(false, mutate_fulfillment_errors);
+ ty = self.resolve_vars_if_possible(ty);
+
+ debug!(?ty);
+ ty
+ }
+
+ pub(in super::super) fn record_deferred_call_resolution(
+ &self,
+ closure_def_id: LocalDefId,
+ r: DeferredCallResolution<'tcx>,
+ ) {
+ let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
+ deferred_call_resolutions.entry(closure_def_id).or_default().push(r);
+ }
+
+ pub(in super::super) fn remove_deferred_call_resolutions(
+ &self,
+ closure_def_id: LocalDefId,
+ ) -> Vec<DeferredCallResolution<'tcx>> {
+ let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
+ deferred_call_resolutions.remove(&closure_def_id).unwrap_or_default()
+ }
+
+ pub fn tag(&self) -> String {
+ format!("{:p}", self)
+ }
+
+ pub fn local_ty(&self, span: Span, nid: hir::HirId) -> LocalTy<'tcx> {
+ self.locals.borrow().get(&nid).cloned().unwrap_or_else(|| {
+ span_bug!(span, "no type for local variable {}", self.tcx.hir().node_to_string(nid))
+ })
+ }
+
+ #[inline]
+ pub fn write_ty(&self, id: hir::HirId, ty: Ty<'tcx>) {
+ debug!("write_ty({:?}, {:?}) in fcx {}", id, self.resolve_vars_if_possible(ty), self.tag());
+ self.typeck_results.borrow_mut().node_types_mut().insert(id, ty);
+
+ if ty.references_error() {
+ self.has_errors.set(true);
+ self.set_tainted_by_errors();
+ }
+ }
+
+ pub fn write_field_index(&self, hir_id: hir::HirId, index: usize) {
+ self.typeck_results.borrow_mut().field_indices_mut().insert(hir_id, index);
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub(in super::super) fn write_resolution(
+ &self,
+ hir_id: hir::HirId,
+ r: Result<(DefKind, DefId), ErrorGuaranteed>,
+ ) {
+ self.typeck_results.borrow_mut().type_dependent_defs_mut().insert(hir_id, r);
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub fn write_method_call(&self, hir_id: hir::HirId, method: MethodCallee<'tcx>) {
+ self.write_resolution(hir_id, Ok((DefKind::AssocFn, method.def_id)));
+ self.write_substs(hir_id, method.substs);
+
+ // When the method is confirmed, the `method.substs` includes
+ // parameters from not just the method, but also the impl of
+ // the method -- in particular, the `Self` type will be fully
+ // resolved. However, those are not something that the "user
+ // specified" -- i.e., those types come from the inferred type
+ // of the receiver, not something the user wrote. So when we
+ // create the user-substs, we want to replace those earlier
+ // types with just the types that the user actually wrote --
+ // that is, those that appear on the *method itself*.
+ //
+ // As an example, if the user wrote something like
+ // `foo.bar::<u32>(...)` -- the `Self` type here will be the
+ // type of `foo` (possibly adjusted), but we don't want to
+ // include that. We want just the `[_, u32]` part.
+ if !method.substs.is_empty() {
+ let method_generics = self.tcx.generics_of(method.def_id);
+ if !method_generics.params.is_empty() {
+ let user_type_annotation = self.probe(|_| {
+ let user_substs = UserSubsts {
+ substs: InternalSubsts::for_item(self.tcx, method.def_id, |param, _| {
+ let i = param.index as usize;
+ if i < method_generics.parent_count {
+ self.var_for_def(DUMMY_SP, param)
+ } else {
+ method.substs[i]
+ }
+ }),
+ user_self_ty: None, // not relevant here
+ };
+
+ self.canonicalize_user_type_annotation(UserType::TypeOf(
+ method.def_id,
+ user_substs,
+ ))
+ });
+
+ debug!("write_method_call: user_type_annotation={:?}", user_type_annotation);
+ self.write_user_type_annotation(hir_id, user_type_annotation);
+ }
+ }
+ }
+
+ pub fn write_substs(&self, node_id: hir::HirId, substs: SubstsRef<'tcx>) {
+ if !substs.is_empty() {
+ debug!("write_substs({:?}, {:?}) in fcx {}", node_id, substs, self.tag());
+
+ self.typeck_results.borrow_mut().node_substs_mut().insert(node_id, substs);
+ }
+ }
+
+ /// Given the substs that we just converted from the HIR, try to
+ /// canonicalize them and store them as user-given substitutions
+ /// (i.e., substitutions that must be respected by the NLL check).
+ ///
+ /// This should be invoked **before any unifications have
+ /// occurred**, so that annotations like `Vec<_>` are preserved
+ /// properly.
+ #[instrument(skip(self), level = "debug")]
+ pub fn write_user_type_annotation_from_substs(
+ &self,
+ hir_id: hir::HirId,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ user_self_ty: Option<UserSelfTy<'tcx>>,
+ ) {
+ debug!("fcx {}", self.tag());
+
+ if Self::can_contain_user_lifetime_bounds((substs, user_self_ty)) {
+ let canonicalized = self.canonicalize_user_type_annotation(UserType::TypeOf(
+ def_id,
+ UserSubsts { substs, user_self_ty },
+ ));
+ debug!(?canonicalized);
+ self.write_user_type_annotation(hir_id, canonicalized);
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn write_user_type_annotation(
+ &self,
+ hir_id: hir::HirId,
+ canonical_user_type_annotation: CanonicalUserType<'tcx>,
+ ) {
+ debug!("fcx {}", self.tag());
+
+ if !canonical_user_type_annotation.is_identity() {
+ self.typeck_results
+ .borrow_mut()
+ .user_provided_types_mut()
+ .insert(hir_id, canonical_user_type_annotation);
+ } else {
+ debug!("skipping identity substs");
+ }
+ }
+
+ #[instrument(skip(self, expr), level = "debug")]
+ pub fn apply_adjustments(&self, expr: &hir::Expr<'_>, adj: Vec<Adjustment<'tcx>>) {
+ debug!("expr = {:#?}", expr);
+
+ if adj.is_empty() {
+ return;
+ }
+
+ for a in &adj {
+ if let Adjust::NeverToAny = a.kind {
+ if a.target.is_ty_var() {
+ self.diverging_type_vars.borrow_mut().insert(a.target);
+ debug!("apply_adjustments: adding `{:?}` as diverging type var", a.target);
+ }
+ }
+ }
+
+ let autoborrow_mut = adj.iter().any(|adj| {
+ matches!(
+ adj,
+ &Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(_, AutoBorrowMutability::Mut { .. })),
+ ..
+ }
+ )
+ });
+
+ match self.typeck_results.borrow_mut().adjustments_mut().entry(expr.hir_id) {
+ Entry::Vacant(entry) => {
+ entry.insert(adj);
+ }
+ Entry::Occupied(mut entry) => {
+ debug!(" - composing on top of {:?}", entry.get());
+ match (&entry.get()[..], &adj[..]) {
+ // Applying any adjustment on top of a NeverToAny
+ // is a valid NeverToAny adjustment, because it can't
+ // be reached.
+ (&[Adjustment { kind: Adjust::NeverToAny, .. }], _) => return,
+ (
+ &[
+ Adjustment { kind: Adjust::Deref(_), .. },
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
+ ],
+ &[
+ Adjustment { kind: Adjust::Deref(_), .. },
+ .., // Any following adjustments are allowed.
+ ],
+ ) => {
+ // A reborrow has no effect before a dereference.
+ }
+ // FIXME: currently we never try to compose autoderefs
+ // and ReifyFnPointer/UnsafeFnPointer, but we could.
+ _ => {
+ self.tcx.sess.delay_span_bug(
+ expr.span,
+ &format!(
+ "while adjusting {:?}, can't compose {:?} and {:?}",
+ expr,
+ entry.get(),
+ adj
+ ),
+ );
+ }
+ }
+ *entry.get_mut() = adj;
+ }
+ }
+
+ // If there is an mutable auto-borrow, it is equivalent to `&mut <expr>`.
+ // In this case implicit use of `Deref` and `Index` within `<expr>` should
+ // instead be `DerefMut` and `IndexMut`, so fix those up.
+ if autoborrow_mut {
+ self.convert_place_derefs_to_mutable(expr);
+ }
+ }
+
+ /// Basically whenever we are converting from a type scheme into
+ /// the fn body space, we always want to normalize associated
+ /// types as well. This function combines the two.
+ fn instantiate_type_scheme<T>(&self, span: Span, substs: SubstsRef<'tcx>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!("instantiate_type_scheme(value={:?}, substs={:?})", value, substs);
+ let value = EarlyBinder(value).subst(self.tcx, substs);
+ let result = self.normalize_associated_types_in(span, value);
+ debug!("instantiate_type_scheme = {:?}", result);
+ result
+ }
+
+ /// As `instantiate_type_scheme`, but for the bounds found in a
+ /// generic type scheme.
+ pub(in super::super) fn instantiate_bounds(
+ &self,
+ span: Span,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> (ty::InstantiatedPredicates<'tcx>, Vec<Span>) {
+ let bounds = self.tcx.predicates_of(def_id);
+ let spans: Vec<Span> = bounds.predicates.iter().map(|(_, span)| *span).collect();
+ let result = bounds.instantiate(self.tcx, substs);
+ let result = self.normalize_associated_types_in(span, result);
+ debug!(
+ "instantiate_bounds(bounds={:?}, substs={:?}) = {:?}, {:?}",
+ bounds, substs, result, spans,
+ );
+ (result, spans)
+ }
+
+ pub(in super::super) fn normalize_associated_types_in<T>(&self, span: Span, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.inh.normalize_associated_types_in(span, self.body_id, self.param_env, value)
+ }
+
+ pub(in super::super) fn normalize_associated_types_in_as_infer_ok<T>(
+ &self,
+ span: Span,
+ value: T,
+ ) -> InferOk<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.inh.partially_normalize_associated_types_in(
+ ObligationCause::misc(span, self.body_id),
+ self.param_env,
+ value,
+ )
+ }
+
+ pub(in super::super) fn normalize_op_associated_types_in_as_infer_ok<T>(
+ &self,
+ span: Span,
+ value: T,
+ opt_input_expr: Option<&hir::Expr<'_>>,
+ ) -> InferOk<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.inh.partially_normalize_associated_types_in(
+ ObligationCause::new(
+ span,
+ self.body_id,
+ traits::BinOp {
+ rhs_span: opt_input_expr.map(|expr| expr.span),
+ is_lit: opt_input_expr
+ .map_or(false, |expr| matches!(expr.kind, ExprKind::Lit(_))),
+ output_pred: None,
+ },
+ ),
+ self.param_env,
+ value,
+ )
+ }
+
+ pub fn require_type_meets(
+ &self,
+ ty: Ty<'tcx>,
+ span: Span,
+ code: traits::ObligationCauseCode<'tcx>,
+ def_id: DefId,
+ ) {
+ self.register_bound(ty, def_id, traits::ObligationCause::new(span, self.body_id, code));
+ }
+
+ pub fn require_type_is_sized(
+ &self,
+ ty: Ty<'tcx>,
+ span: Span,
+ code: traits::ObligationCauseCode<'tcx>,
+ ) {
+ if !ty.references_error() {
+ let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
+ self.require_type_meets(ty, span, code, lang_item);
+ }
+ }
+
+ pub fn require_type_is_sized_deferred(
+ &self,
+ ty: Ty<'tcx>,
+ span: Span,
+ code: traits::ObligationCauseCode<'tcx>,
+ ) {
+ if !ty.references_error() {
+ self.deferred_sized_obligations.borrow_mut().push((ty, span, code));
+ }
+ }
+
+ pub fn register_bound(
+ &self,
+ ty: Ty<'tcx>,
+ def_id: DefId,
+ cause: traits::ObligationCause<'tcx>,
+ ) {
+ if !ty.references_error() {
+ self.fulfillment_cx.borrow_mut().register_bound(
+ self,
+ self.param_env,
+ ty,
+ def_id,
+ cause,
+ );
+ }
+ }
+
+ pub fn to_ty(&self, ast_t: &hir::Ty<'_>) -> Ty<'tcx> {
+ let t = <dyn AstConv<'_>>::ast_ty_to_ty(self, ast_t);
+ self.register_wf_obligation(t.into(), ast_t.span, traits::WellFormed(None));
+ t
+ }
+
+ pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ let ty = self.to_ty(ast_ty);
+ debug!("to_ty_saving_user_provided_ty: ty={:?}", ty);
+
+ if Self::can_contain_user_lifetime_bounds(ty) {
+ let c_ty = self.canonicalize_response(UserType::Ty(ty));
+ debug!("to_ty_saving_user_provided_ty: c_ty={:?}", c_ty);
+ self.typeck_results.borrow_mut().user_provided_types_mut().insert(ast_ty.hir_id, c_ty);
+ }
+
+ ty
+ }
+
+ pub fn array_length_to_const(&self, length: &hir::ArrayLen) -> ty::Const<'tcx> {
+ match length {
+ &hir::ArrayLen::Infer(_, span) => self.ct_infer(self.tcx.types.usize, None, span),
+ hir::ArrayLen::Body(anon_const) => self.to_const(anon_const),
+ }
+ }
+
+ pub fn to_const(&self, ast_c: &hir::AnonConst) -> ty::Const<'tcx> {
+ let const_def_id = self.tcx.hir().local_def_id(ast_c.hir_id);
+ let c = ty::Const::from_anon_const(self.tcx, const_def_id);
+ self.register_wf_obligation(
+ c.into(),
+ self.tcx.hir().span(ast_c.hir_id),
+ ObligationCauseCode::WellFormed(None),
+ );
+ c
+ }
+
+ pub fn const_arg_to_const(
+ &self,
+ ast_c: &hir::AnonConst,
+ param_def_id: DefId,
+ ) -> ty::Const<'tcx> {
+ let const_def = ty::WithOptConstParam {
+ did: self.tcx.hir().local_def_id(ast_c.hir_id),
+ const_param_did: Some(param_def_id),
+ };
+ let c = ty::Const::from_opt_const_arg_anon_const(self.tcx, const_def);
+ self.register_wf_obligation(
+ c.into(),
+ self.tcx.hir().span(ast_c.hir_id),
+ ObligationCauseCode::WellFormed(None),
+ );
+ c
+ }
+
+ // If the type given by the user has free regions, save it for later, since
+ // NLL would like to enforce those. Also pass in types that involve
+ // projections, since those can resolve to `'static` bounds (modulo #54940,
+ // which hopefully will be fixed by the time you see this comment, dear
+ // reader, although I have my doubts). Also pass in types with inference
+ // types, because they may be repeated. Other sorts of things are already
+ // sufficiently enforced with erased regions. =)
+ fn can_contain_user_lifetime_bounds<T>(t: T) -> bool
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ t.has_free_regions() || t.has_projections() || t.has_infer_types()
+ }
+
+ pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> {
+ match self.typeck_results.borrow().node_types().get(id) {
+ Some(&t) => t,
+ None if self.is_tainted_by_errors() => self.tcx.ty_error(),
+ None => {
+ bug!(
+ "no type for node {}: {} in fcx {}",
+ id,
+ self.tcx.hir().node_to_string(id),
+ self.tag()
+ );
+ }
+ }
+ }
+
+ pub fn node_ty_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> {
+ match self.typeck_results.borrow().node_types().get(id) {
+ Some(&t) => Some(t),
+ None if self.is_tainted_by_errors() => Some(self.tcx.ty_error()),
+ None => None,
+ }
+ }
+
+ /// Registers an obligation for checking later, during regionck, that `arg` is well-formed.
+ pub fn register_wf_obligation(
+ &self,
+ arg: subst::GenericArg<'tcx>,
+ span: Span,
+ code: traits::ObligationCauseCode<'tcx>,
+ ) {
+ // WF obligations never themselves fail, so no real need to give a detailed cause:
+ let cause = traits::ObligationCause::new(span, self.body_id, code);
+ self.register_predicate(traits::Obligation::new(
+ cause,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(self.tcx),
+ ));
+ }
+
+ /// Registers obligations that all `substs` are well-formed.
+ pub fn add_wf_bounds(&self, substs: SubstsRef<'tcx>, expr: &hir::Expr<'_>) {
+ for arg in substs.iter().filter(|arg| {
+ matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..))
+ }) {
+ self.register_wf_obligation(arg, expr.span, traits::WellFormed(None));
+ }
+ }
+
+ // FIXME(arielb1): use this instead of field.ty everywhere
+ // Only for fields! Returns <none> for methods>
+ // Indifferent to privacy flags
+ pub fn field_ty(
+ &self,
+ span: Span,
+ field: &'tcx ty::FieldDef,
+ substs: SubstsRef<'tcx>,
+ ) -> Ty<'tcx> {
+ self.normalize_associated_types_in(span, field.ty(self.tcx, substs))
+ }
+
+ pub(in super::super) fn resolve_rvalue_scopes(&self, def_id: DefId) {
+ let scope_tree = self.tcx.region_scope_tree(def_id);
+ let rvalue_scopes = { rvalue_scopes::resolve_rvalue_scopes(self, &scope_tree, def_id) };
+ let mut typeck_results = self.inh.typeck_results.borrow_mut();
+ typeck_results.rvalue_scopes = rvalue_scopes;
+ }
+
+ pub(in super::super) fn resolve_generator_interiors(&self, def_id: DefId) {
+ let mut generators = self.deferred_generator_interiors.borrow_mut();
+ for (body_id, interior, kind) in generators.drain(..) {
+ self.select_obligations_where_possible(false, |_| {});
+ crate::check::generator_interior::resolve_interior(
+ self, def_id, body_id, interior, kind,
+ );
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(in super::super) fn select_all_obligations_or_error(&self) {
+ let errors = self.fulfillment_cx.borrow_mut().select_all_or_error(&self);
+
+ if !errors.is_empty() {
+ self.report_fulfillment_errors(&errors, self.inh.body_id, false);
+ }
+ }
+
+ /// Select as many obligations as we can at present.
+ pub(in super::super) fn select_obligations_where_possible(
+ &self,
+ fallback_has_occurred: bool,
+ mutate_fulfillment_errors: impl Fn(&mut Vec<traits::FulfillmentError<'tcx>>),
+ ) {
+ let mut result = self.fulfillment_cx.borrow_mut().select_where_possible(self);
+ if !result.is_empty() {
+ mutate_fulfillment_errors(&mut result);
+ self.report_fulfillment_errors(&result, self.inh.body_id, fallback_has_occurred);
+ }
+ }
+
+ /// For the overloaded place expressions (`*x`, `x[3]`), the trait
+ /// returns a type of `&T`, but the actual type we assign to the
+ /// *expression* is `T`. So this function just peels off the return
+ /// type by one layer to yield `T`.
+ pub(in super::super) fn make_overloaded_place_return_type(
+ &self,
+ method: MethodCallee<'tcx>,
+ ) -> ty::TypeAndMut<'tcx> {
+ // extract method return type, which will be &T;
+ let ret_ty = method.sig.output();
+
+ // method returns &T, but the type as visible to user is T, so deref
+ ret_ty.builtin_deref(true).unwrap()
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn self_type_matches_expected_vid(
+ &self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ expected_vid: ty::TyVid,
+ ) -> bool {
+ let self_ty = self.shallow_resolve(trait_ref.skip_binder().self_ty());
+ debug!(?self_ty);
+
+ match *self_ty.kind() {
+ ty::Infer(ty::TyVar(found_vid)) => {
+ // FIXME: consider using `sub_root_var` here so we
+ // can see through subtyping.
+ let found_vid = self.root_var(found_vid);
+ debug!("self_type_matches_expected_vid - found_vid={:?}", found_vid);
+ expected_vid == found_vid
+ }
+ _ => false,
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub(in super::super) fn obligations_for_self_ty<'b>(
+ &'b self,
+ self_ty: ty::TyVid,
+ ) -> impl Iterator<Item = (ty::PolyTraitRef<'tcx>, traits::PredicateObligation<'tcx>)>
+ + Captures<'tcx>
+ + 'b {
+ // FIXME: consider using `sub_root_var` here so we
+ // can see through subtyping.
+ let ty_var_root = self.root_var(self_ty);
+ trace!("pending_obligations = {:#?}", self.fulfillment_cx.borrow().pending_obligations());
+
+ self.fulfillment_cx
+ .borrow()
+ .pending_obligations()
+ .into_iter()
+ .filter_map(move |obligation| {
+ let bound_predicate = obligation.predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Projection(data) => Some((
+ bound_predicate.rebind(data).required_poly_trait_ref(self.tcx),
+ obligation,
+ )),
+ ty::PredicateKind::Trait(data) => {
+ Some((bound_predicate.rebind(data).to_poly_trait_ref(), obligation))
+ }
+ ty::PredicateKind::Subtype(..) => None,
+ ty::PredicateKind::Coerce(..) => None,
+ ty::PredicateKind::RegionOutlives(..) => None,
+ ty::PredicateKind::TypeOutlives(..) => None,
+ ty::PredicateKind::WellFormed(..) => None,
+ ty::PredicateKind::ObjectSafe(..) => None,
+ ty::PredicateKind::ConstEvaluatable(..) => None,
+ ty::PredicateKind::ConstEquate(..) => None,
+ // N.B., this predicate is created by breaking down a
+ // `ClosureType: FnFoo()` predicate, where
+ // `ClosureType` represents some `Closure`. It can't
+ // possibly be referring to the current closure,
+ // because we haven't produced the `Closure` for
+ // this closure yet; this is exactly why the other
+ // code is looking for a self type of an unresolved
+ // inference variable.
+ ty::PredicateKind::ClosureKind(..) => None,
+ ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ })
+ .filter(move |(tr, _)| self.self_type_matches_expected_vid(*tr, ty_var_root))
+ }
+
+ pub(in super::super) fn type_var_is_sized(&self, self_ty: ty::TyVid) -> bool {
+ self.obligations_for_self_ty(self_ty)
+ .any(|(tr, _)| Some(tr.def_id()) == self.tcx.lang_items().sized_trait())
+ }
+
+ pub(in super::super) fn err_args(&self, len: usize) -> Vec<Ty<'tcx>> {
+ vec![self.tcx.ty_error(); len]
+ }
+
+ /// Unifies the output type with the expected type early, for more coercions
+ /// and forward type information on the input expressions.
+ #[instrument(skip(self, call_span), level = "debug")]
+ pub(in super::super) fn expected_inputs_for_expected_output(
+ &self,
+ call_span: Span,
+ expected_ret: Expectation<'tcx>,
+ formal_ret: Ty<'tcx>,
+ formal_args: &[Ty<'tcx>],
+ ) -> Option<Vec<Ty<'tcx>>> {
+ let formal_ret = self.resolve_vars_with_obligations(formal_ret);
+ let ret_ty = expected_ret.only_has_type(self)?;
+
+ // HACK(oli-obk): This is a hack to keep RPIT and TAIT in sync wrt their behaviour.
+ // Without it, the inference
+ // variable will get instantiated with the opaque type. The inference variable often
+ // has various helpful obligations registered for it that help closures figure out their
+ // signature. If we infer the inference var to the opaque type, the closure won't be able
+ // to find those obligations anymore, and it can't necessarily find them from the opaque
+ // type itself. We could be more powerful with inference if we *combined* the obligations
+ // so that we got both the obligations from the opaque type and the ones from the inference
+ // variable. That will accept more code than we do right now, so we need to carefully consider
+ // the implications.
+ // Note: this check is pessimistic, as the inference type could be matched with something other
+ // than the opaque type, but then we need a new `TypeRelation` just for this specific case and
+ // can't re-use `sup` below.
+ // See src/test/ui/impl-trait/hidden-type-is-opaque.rs and
+ // src/test/ui/impl-trait/hidden-type-is-opaque-2.rs for examples that hit this path.
+ if formal_ret.has_infer_types() {
+ for ty in ret_ty.walk() {
+ if let ty::subst::GenericArgKind::Type(ty) = ty.unpack()
+ && let ty::Opaque(def_id, _) = *ty.kind()
+ && let Some(def_id) = def_id.as_local()
+ && self.opaque_type_origin(def_id, DUMMY_SP).is_some() {
+ return None;
+ }
+ }
+ }
+
+ let expect_args = self
+ .fudge_inference_if_ok(|| {
+ // Attempt to apply a subtyping relationship between the formal
+ // return type (likely containing type variables if the function
+ // is polymorphic) and the expected return type.
+ // No argument expectations are produced if unification fails.
+ let origin = self.misc(call_span);
+ let ures = self.at(&origin, self.param_env).sup(ret_ty, formal_ret);
+
+ // FIXME(#27336) can't use ? here, Try::from_error doesn't default
+ // to identity so the resulting type is not constrained.
+ match ures {
+ Ok(ok) => {
+ // Process any obligations locally as much as
+ // we can. We don't care if some things turn
+ // out unconstrained or ambiguous, as we're
+ // just trying to get hints here.
+ let errors = self.save_and_restore_in_snapshot_flag(|_| {
+ let mut fulfill = <dyn TraitEngine<'_>>::new(self.tcx);
+ for obligation in ok.obligations {
+ fulfill.register_predicate_obligation(self, obligation);
+ }
+ fulfill.select_where_possible(self)
+ });
+
+ if !errors.is_empty() {
+ return Err(());
+ }
+ }
+ Err(_) => return Err(()),
+ }
+
+ // Record all the argument types, with the substitutions
+ // produced from the above subtyping unification.
+ Ok(Some(formal_args.iter().map(|&ty| self.resolve_vars_if_possible(ty)).collect()))
+ })
+ .unwrap_or_default();
+ debug!(?formal_args, ?formal_ret, ?expect_args, ?expected_ret);
+ expect_args
+ }
+
+ pub(in super::super) fn resolve_lang_item_path(
+ &self,
+ lang_item: hir::LangItem,
+ span: Span,
+ hir_id: hir::HirId,
+ expr_hir_id: Option<hir::HirId>,
+ ) -> (Res, Ty<'tcx>) {
+ let def_id = self.tcx.require_lang_item(lang_item, Some(span));
+ let def_kind = self.tcx.def_kind(def_id);
+
+ let item_ty = if let DefKind::Variant = def_kind {
+ self.tcx.bound_type_of(self.tcx.parent(def_id))
+ } else {
+ self.tcx.bound_type_of(def_id)
+ };
+ let substs = self.fresh_substs_for_item(span, def_id);
+ let ty = item_ty.subst(self.tcx, substs);
+
+ self.write_resolution(hir_id, Ok((def_kind, def_id)));
+ self.add_required_obligations_with_code(
+ span,
+ def_id,
+ &substs,
+ match lang_item {
+ hir::LangItem::IntoFutureIntoFuture => {
+ ObligationCauseCode::AwaitableExpr(expr_hir_id)
+ }
+ hir::LangItem::IteratorNext | hir::LangItem::IntoIterIntoIter => {
+ ObligationCauseCode::ForLoopIterator
+ }
+ hir::LangItem::TryTraitFromOutput
+ | hir::LangItem::TryTraitFromResidual
+ | hir::LangItem::TryTraitBranch => ObligationCauseCode::QuestionMark,
+ _ => traits::ItemObligation(def_id),
+ },
+ );
+ (Res::Def(def_kind, def_id), ty)
+ }
+
+ /// Resolves an associated value path into a base type and associated constant, or method
+ /// resolution. The newly resolved definition is written into `type_dependent_defs`.
+ pub fn resolve_ty_and_res_fully_qualified_call(
+ &self,
+ qpath: &'tcx QPath<'tcx>,
+ hir_id: hir::HirId,
+ span: Span,
+ ) -> (Res, Option<Ty<'tcx>>, &'tcx [hir::PathSegment<'tcx>]) {
+ debug!(
+ "resolve_ty_and_res_fully_qualified_call: qpath={:?} hir_id={:?} span={:?}",
+ qpath, hir_id, span
+ );
+ let (ty, qself, item_segment) = match *qpath {
+ QPath::Resolved(ref opt_qself, ref path) => {
+ return (
+ path.res,
+ opt_qself.as_ref().map(|qself| self.to_ty(qself)),
+ path.segments,
+ );
+ }
+ QPath::TypeRelative(ref qself, ref segment) => {
+ // Don't use `self.to_ty`, since this will register a WF obligation.
+ // If we're trying to call a non-existent method on a trait
+ // (e.g. `MyTrait::missing_method`), then resolution will
+ // give us a `QPath::TypeRelative` with a trait object as
+ // `qself`. In that case, we want to avoid registering a WF obligation
+ // for `dyn MyTrait`, since we don't actually need the trait
+ // to be object-safe.
+ // We manually call `register_wf_obligation` in the success path
+ // below.
+ (<dyn AstConv<'_>>::ast_ty_to_ty_in_path(self, qself), qself, segment)
+ }
+ QPath::LangItem(..) => {
+ bug!("`resolve_ty_and_res_fully_qualified_call` called on `LangItem`")
+ }
+ };
+ if let Some(&cached_result) = self.typeck_results.borrow().type_dependent_defs().get(hir_id)
+ {
+ self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None));
+ // Return directly on cache hit. This is useful to avoid doubly reporting
+ // errors with default match binding modes. See #44614.
+ let def = cached_result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id));
+ return (def, Some(ty), slice::from_ref(&**item_segment));
+ }
+ let item_name = item_segment.ident;
+ let result = self
+ .resolve_fully_qualified_call(span, item_name, ty, qself.span, hir_id)
+ .or_else(|error| {
+ let result = match error {
+ method::MethodError::PrivateMatch(kind, def_id, _) => Ok((kind, def_id)),
+ _ => Err(ErrorGuaranteed::unchecked_claim_error_was_emitted()),
+ };
+
+ // If we have a path like `MyTrait::missing_method`, then don't register
+ // a WF obligation for `dyn MyTrait` when method lookup fails. Otherwise,
+ // register a WF obligation so that we can detect any additional
+ // errors in the self type.
+ if !(matches!(error, method::MethodError::NoMatch(_)) && ty.is_trait()) {
+ self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None));
+ }
+ if item_name.name != kw::Empty {
+ if let Some(mut e) = self.report_method_error(
+ span,
+ ty,
+ item_name,
+ SelfSource::QPath(qself),
+ error,
+ None,
+ ) {
+ e.emit();
+ }
+ }
+ result
+ });
+
+ if result.is_ok() {
+ self.register_wf_obligation(ty.into(), qself.span, traits::WellFormed(None));
+ }
+
+ // Write back the new resolution.
+ self.write_resolution(hir_id, result);
+ (
+ result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)),
+ Some(ty),
+ slice::from_ref(&**item_segment),
+ )
+ }
+
+ /// Given a function `Node`, return its `FnDecl` if it exists, or `None` otherwise.
+ pub(in super::super) fn get_node_fn_decl(
+ &self,
+ node: Node<'tcx>,
+ ) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident, bool)> {
+ match node {
+ Node::Item(&hir::Item { ident, kind: hir::ItemKind::Fn(ref sig, ..), .. }) => {
+ // This is less than ideal, it will not suggest a return type span on any
+ // method called `main`, regardless of whether it is actually the entry point,
+ // but it will still present it as the reason for the expected type.
+ Some((&sig.decl, ident, ident.name != sym::main))
+ }
+ Node::TraitItem(&hir::TraitItem {
+ ident,
+ kind: hir::TraitItemKind::Fn(ref sig, ..),
+ ..
+ }) => Some((&sig.decl, ident, true)),
+ Node::ImplItem(&hir::ImplItem {
+ ident,
+ kind: hir::ImplItemKind::Fn(ref sig, ..),
+ ..
+ }) => Some((&sig.decl, ident, false)),
+ _ => None,
+ }
+ }
+
+ /// Given a `HirId`, return the `FnDecl` of the method it is enclosed by and whether a
+ /// suggestion can be made, `None` otherwise.
+ pub fn get_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, bool)> {
+ // Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or
+ // `while` before reaching it, as block tail returns are not available in them.
+ self.tcx.hir().get_return_block(blk_id).and_then(|blk_id| {
+ let parent = self.tcx.hir().get(blk_id);
+ self.get_node_fn_decl(parent).map(|(fn_decl, _, is_main)| (fn_decl, is_main))
+ })
+ }
+
+ pub(in super::super) fn note_internal_mutation_in_method(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) {
+ if found != self.tcx.types.unit {
+ return;
+ }
+ if let ExprKind::MethodCall(path_segment, [rcvr, ..], _) = expr.kind {
+ if self
+ .typeck_results
+ .borrow()
+ .expr_ty_adjusted_opt(rcvr)
+ .map_or(true, |ty| expected.peel_refs() != ty.peel_refs())
+ {
+ return;
+ }
+ let mut sp = MultiSpan::from_span(path_segment.ident.span);
+ sp.push_span_label(
+ path_segment.ident.span,
+ format!(
+ "this call modifies {} in-place",
+ match rcvr.kind {
+ ExprKind::Path(QPath::Resolved(
+ None,
+ hir::Path { segments: [segment], .. },
+ )) => format!("`{}`", segment.ident),
+ _ => "its receiver".to_string(),
+ }
+ ),
+ );
+ sp.push_span_label(
+ rcvr.span,
+ "you probably want to use this value after calling the method...",
+ );
+ err.span_note(
+ sp,
+ &format!("method `{}` modifies its receiver in-place", path_segment.ident),
+ );
+ err.note(&format!("...instead of the `()` output of method `{}`", path_segment.ident));
+ }
+ }
+
+ pub(in super::super) fn note_need_for_fn_pointer(
+ &self,
+ err: &mut Diagnostic,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) {
+ let (sig, did, substs) = match (&expected.kind(), &found.kind()) {
+ (ty::FnDef(did1, substs1), ty::FnDef(did2, substs2)) => {
+ let sig1 = self.tcx.bound_fn_sig(*did1).subst(self.tcx, substs1);
+ let sig2 = self.tcx.bound_fn_sig(*did2).subst(self.tcx, substs2);
+ if sig1 != sig2 {
+ return;
+ }
+ err.note(
+ "different `fn` items always have unique types, even if their signatures are \
+ the same",
+ );
+ (sig1, *did1, substs1)
+ }
+ (ty::FnDef(did, substs), ty::FnPtr(sig2)) => {
+ let sig1 = self.tcx.bound_fn_sig(*did).subst(self.tcx, substs);
+ if sig1 != *sig2 {
+ return;
+ }
+ (sig1, *did, substs)
+ }
+ _ => return,
+ };
+ err.help(&format!("change the expected type to be function pointer `{}`", sig));
+ err.help(&format!(
+ "if the expected type is due to type inference, cast the expected `fn` to a function \
+ pointer: `{} as {}`",
+ self.tcx.def_path_str_with_substs(did, substs),
+ sig
+ ));
+ }
+
+ // Instantiates the given path, which must refer to an item with the given
+ // number of type parameters and type.
+ #[instrument(skip(self, span), level = "debug")]
+ pub fn instantiate_value_path(
+ &self,
+ segments: &[hir::PathSegment<'_>],
+ self_ty: Option<Ty<'tcx>>,
+ res: Res,
+ span: Span,
+ hir_id: hir::HirId,
+ ) -> (Ty<'tcx>, Res) {
+ let tcx = self.tcx;
+
+ let path_segs = match res {
+ Res::Local(_) | Res::SelfCtor(_) => vec![],
+ Res::Def(kind, def_id) => <dyn AstConv<'_>>::def_ids_for_value_path_segments(
+ self, segments, self_ty, kind, def_id,
+ ),
+ _ => bug!("instantiate_value_path on {:?}", res),
+ };
+
+ let mut user_self_ty = None;
+ let mut is_alias_variant_ctor = false;
+ match res {
+ Res::Def(DefKind::Ctor(CtorOf::Variant, _), _)
+ if let Some(self_ty) = self_ty =>
+ {
+ let adt_def = self_ty.ty_adt_def().unwrap();
+ user_self_ty = Some(UserSelfTy { impl_def_id: adt_def.did(), self_ty });
+ is_alias_variant_ctor = true;
+ }
+ Res::Def(DefKind::AssocFn | DefKind::AssocConst, def_id) => {
+ let assoc_item = tcx.associated_item(def_id);
+ let container = assoc_item.container;
+ let container_id = assoc_item.container_id(tcx);
+ debug!(?def_id, ?container, ?container_id);
+ match container {
+ ty::TraitContainer => {
+ callee::check_legal_trait_for_method_call(tcx, span, None, span, container_id)
+ }
+ ty::ImplContainer => {
+ if segments.len() == 1 {
+ // `<T>::assoc` will end up here, and so
+ // can `T::assoc`. It this came from an
+ // inherent impl, we need to record the
+ // `T` for posterity (see `UserSelfTy` for
+ // details).
+ let self_ty = self_ty.expect("UFCS sugared assoc missing Self");
+ user_self_ty = Some(UserSelfTy { impl_def_id: container_id, self_ty });
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+
+ // Now that we have categorized what space the parameters for each
+ // segment belong to, let's sort out the parameters that the user
+ // provided (if any) into their appropriate spaces. We'll also report
+ // errors if type parameters are provided in an inappropriate place.
+
+ let generic_segs: FxHashSet<_> = path_segs.iter().map(|PathSeg(_, index)| index).collect();
+ let generics_has_err = <dyn AstConv<'_>>::prohibit_generics(
+ self,
+ segments.iter().enumerate().filter_map(|(index, seg)| {
+ if !generic_segs.contains(&index) || is_alias_variant_ctor {
+ Some(seg)
+ } else {
+ None
+ }
+ }),
+ |_| {},
+ );
+
+ if let Res::Local(hid) = res {
+ let ty = self.local_ty(span, hid).decl_ty;
+ let ty = self.normalize_associated_types_in(span, ty);
+ self.write_ty(hir_id, ty);
+ return (ty, res);
+ }
+
+ if generics_has_err {
+ // Don't try to infer type parameters when prohibited generic arguments were given.
+ user_self_ty = None;
+ }
+
+ // Now we have to compare the types that the user *actually*
+ // provided against the types that were *expected*. If the user
+ // did not provide any types, then we want to substitute inference
+ // variables. If the user provided some types, we may still need
+ // to add defaults. If the user provided *too many* types, that's
+ // a problem.
+
+ let mut infer_args_for_err = FxHashSet::default();
+
+ let mut explicit_late_bound = ExplicitLateBound::No;
+ for &PathSeg(def_id, index) in &path_segs {
+ let seg = &segments[index];
+ let generics = tcx.generics_of(def_id);
+
+ // Argument-position `impl Trait` is treated as a normal generic
+ // parameter internally, but we don't allow users to specify the
+ // parameter's value explicitly, so we have to do some error-
+ // checking here.
+ let arg_count = <dyn AstConv<'_>>::check_generic_arg_count_for_call(
+ tcx,
+ span,
+ def_id,
+ &generics,
+ seg,
+ IsMethodCall::No,
+ );
+
+ if let ExplicitLateBound::Yes = arg_count.explicit_late_bound {
+ explicit_late_bound = ExplicitLateBound::Yes;
+ }
+
+ if let Err(GenericArgCountMismatch { reported: Some(_), .. }) = arg_count.correct {
+ infer_args_for_err.insert(index);
+ self.set_tainted_by_errors(); // See issue #53251.
+ }
+ }
+
+ let has_self = path_segs
+ .last()
+ .map(|PathSeg(def_id, _)| tcx.generics_of(*def_id).has_self)
+ .unwrap_or(false);
+
+ let (res, self_ctor_substs) = if let Res::SelfCtor(impl_def_id) = res {
+ let ty = self.normalize_ty(span, tcx.at(span).type_of(impl_def_id));
+ match *ty.kind() {
+ ty::Adt(adt_def, substs) if adt_def.has_ctor() => {
+ let variant = adt_def.non_enum_variant();
+ let ctor_def_id = variant.ctor_def_id.unwrap();
+ (
+ Res::Def(DefKind::Ctor(CtorOf::Struct, variant.ctor_kind), ctor_def_id),
+ Some(substs),
+ )
+ }
+ _ => {
+ let mut err = tcx.sess.struct_span_err(
+ span,
+ "the `Self` constructor can only be used with tuple or unit structs",
+ );
+ if let Some(adt_def) = ty.ty_adt_def() {
+ match adt_def.adt_kind() {
+ AdtKind::Enum => {
+ err.help("did you mean to use one of the enum's variants?");
+ }
+ AdtKind::Struct | AdtKind::Union => {
+ err.span_suggestion(
+ span,
+ "use curly brackets",
+ "Self { /* fields */ }",
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ }
+ err.emit();
+
+ return (tcx.ty_error(), res);
+ }
+ }
+ } else {
+ (res, None)
+ };
+ let def_id = res.def_id();
+
+ // The things we are substituting into the type should not contain
+ // escaping late-bound regions, and nor should the base type scheme.
+ let ty = tcx.type_of(def_id);
+
+ let arg_count = GenericArgCountResult {
+ explicit_late_bound,
+ correct: if infer_args_for_err.is_empty() {
+ Ok(())
+ } else {
+ Err(GenericArgCountMismatch::default())
+ },
+ };
+
+ struct CreateCtorSubstsContext<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ path_segs: &'a [PathSeg],
+ infer_args_for_err: &'a FxHashSet<usize>,
+ segments: &'a [hir::PathSegment<'a>],
+ }
+ impl<'tcx, 'a> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for CreateCtorSubstsContext<'a, 'tcx> {
+ fn args_for_def_id(
+ &mut self,
+ def_id: DefId,
+ ) -> (Option<&'a hir::GenericArgs<'a>>, bool) {
+ if let Some(&PathSeg(_, index)) =
+ self.path_segs.iter().find(|&PathSeg(did, _)| *did == def_id)
+ {
+ // If we've encountered an `impl Trait`-related error, we're just
+ // going to infer the arguments for better error messages.
+ if !self.infer_args_for_err.contains(&index) {
+ // Check whether the user has provided generic arguments.
+ if let Some(ref data) = self.segments[index].args {
+ return (Some(data), self.segments[index].infer_args);
+ }
+ }
+ return (None, self.segments[index].infer_args);
+ }
+
+ (None, true)
+ }
+
+ fn provided_kind(
+ &mut self,
+ param: &ty::GenericParamDef,
+ arg: &GenericArg<'_>,
+ ) -> subst::GenericArg<'tcx> {
+ match (&param.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ <dyn AstConv<'_>>::ast_region_to_region(self.fcx, lt, Some(param)).into()
+ }
+ (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
+ self.fcx.to_ty(ty).into()
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => {
+ self.fcx.const_arg_to_const(&ct.value, param.def_id).into()
+ }
+ (GenericParamDefKind::Type { .. }, GenericArg::Infer(inf)) => {
+ self.fcx.ty_infer(Some(param), inf.span).into()
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => {
+ let tcx = self.fcx.tcx();
+ self.fcx.ct_infer(tcx.type_of(param.def_id), Some(param), inf.span).into()
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn inferred_kind(
+ &mut self,
+ substs: Option<&[subst::GenericArg<'tcx>]>,
+ param: &ty::GenericParamDef,
+ infer_args: bool,
+ ) -> subst::GenericArg<'tcx> {
+ let tcx = self.fcx.tcx();
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ self.fcx.re_infer(Some(param), self.span).unwrap().into()
+ }
+ GenericParamDefKind::Type { has_default, .. } => {
+ if !infer_args && has_default {
+ // If we have a default, then we it doesn't matter that we're not
+ // inferring the type arguments: we provide the default where any
+ // is missing.
+ let default = tcx.bound_type_of(param.def_id);
+ self.fcx
+ .normalize_ty(self.span, default.subst(tcx, substs.unwrap()))
+ .into()
+ } else {
+ // If no type arguments were provided, we have to infer them.
+ // This case also occurs as a result of some malformed input, e.g.
+ // a lifetime argument being given instead of a type parameter.
+ // Using inference instead of `Error` gives better error messages.
+ self.fcx.var_for_def(self.span, param)
+ }
+ }
+ GenericParamDefKind::Const { has_default } => {
+ if !infer_args && has_default {
+ tcx.bound_const_param_default(param.def_id)
+ .subst(tcx, substs.unwrap())
+ .into()
+ } else {
+ self.fcx.var_for_def(self.span, param)
+ }
+ }
+ }
+ }
+ }
+
+ let substs = self_ctor_substs.unwrap_or_else(|| {
+ <dyn AstConv<'_>>::create_substs_for_generic_args(
+ tcx,
+ def_id,
+ &[],
+ has_self,
+ self_ty,
+ &arg_count,
+ &mut CreateCtorSubstsContext {
+ fcx: self,
+ span,
+ path_segs: &path_segs,
+ infer_args_for_err: &infer_args_for_err,
+ segments,
+ },
+ )
+ });
+ assert!(!substs.has_escaping_bound_vars());
+ assert!(!ty.has_escaping_bound_vars());
+
+ // First, store the "user substs" for later.
+ self.write_user_type_annotation_from_substs(hir_id, def_id, substs, user_self_ty);
+
+ self.add_required_obligations(span, def_id, &substs);
+
+ // Substitute the values for the type parameters into the type of
+ // the referenced item.
+ let ty_substituted = self.instantiate_type_scheme(span, &substs, ty);
+
+ if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
+ // In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
+ // is inherent, there is no `Self` parameter; instead, the impl needs
+ // type parameters, which we can infer by unifying the provided `Self`
+ // with the substituted impl type.
+ // This also occurs for an enum variant on a type alias.
+ let ty = tcx.type_of(impl_def_id);
+
+ let impl_ty = self.instantiate_type_scheme(span, &substs, ty);
+ match self.at(&self.misc(span), self.param_env).eq(impl_ty, self_ty) {
+ Ok(ok) => self.register_infer_ok_obligations(ok),
+ Err(_) => {
+ self.tcx.sess.delay_span_bug(
+ span,
+ &format!(
+ "instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
+ self_ty,
+ impl_ty,
+ ),
+ );
+ }
+ }
+ }
+
+ debug!("instantiate_value_path: type of {:?} is {:?}", hir_id, ty_substituted);
+ self.write_substs(hir_id, substs);
+
+ (ty_substituted, res)
+ }
+
+ /// Add all the obligations that are required, substituting and normalized appropriately.
+ pub(crate) fn add_required_obligations(
+ &self,
+ span: Span,
+ def_id: DefId,
+ substs: &SubstsRef<'tcx>,
+ ) {
+ self.add_required_obligations_with_code(
+ span,
+ def_id,
+ substs,
+ traits::ItemObligation(def_id),
+ )
+ }
+
+ #[tracing::instrument(level = "debug", skip(self, span, def_id, substs))]
+ fn add_required_obligations_with_code(
+ &self,
+ span: Span,
+ def_id: DefId,
+ substs: &SubstsRef<'tcx>,
+ code: ObligationCauseCode<'tcx>,
+ ) {
+ let (bounds, _) = self.instantiate_bounds(span, def_id, &substs);
+
+ for obligation in traits::predicates_for_generics(
+ traits::ObligationCause::new(span, self.body_id, code),
+ self.param_env,
+ bounds,
+ ) {
+ self.register_predicate(obligation);
+ }
+ }
+
+ /// Resolves `typ` by a single level if `typ` is a type variable.
+ /// If no resolution is possible, then an error is reported.
+ /// Numeric inference variables may be left unresolved.
+ pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let ty = self.resolve_vars_with_obligations(ty);
+ if !ty.is_ty_var() {
+ ty
+ } else {
+ if !self.is_tainted_by_errors() {
+ self.emit_inference_failure_err((**self).body_id, sp, ty.into(), E0282, true)
+ .emit();
+ }
+ let err = self.tcx.ty_error();
+ self.demand_suptype(sp, err, ty);
+ err
+ }
+ }
+
+ pub(in super::super) fn with_breakable_ctxt<F: FnOnce() -> R, R>(
+ &self,
+ id: hir::HirId,
+ ctxt: BreakableCtxt<'tcx>,
+ f: F,
+ ) -> (BreakableCtxt<'tcx>, R) {
+ let index;
+ {
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ index = enclosing_breakables.stack.len();
+ enclosing_breakables.by_id.insert(id, index);
+ enclosing_breakables.stack.push(ctxt);
+ }
+ let result = f();
+ let ctxt = {
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ debug_assert!(enclosing_breakables.stack.len() == index + 1);
+ enclosing_breakables.by_id.remove(&id).expect("missing breakable context");
+ enclosing_breakables.stack.pop().expect("missing breakable context")
+ };
+ (ctxt, result)
+ }
+
+ /// Instantiate a QueryResponse in a probe context, without a
+ /// good ObligationCause.
+ pub(in super::super) fn probe_instantiate_query_response(
+ &self,
+ span: Span,
+ original_values: &OriginalQueryValues<'tcx>,
+ query_result: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+ ) -> InferResult<'tcx, Ty<'tcx>> {
+ self.instantiate_query_response_and_region_obligations(
+ &traits::ObligationCause::misc(span, self.body_id),
+ self.param_env,
+ original_values,
+ query_result,
+ )
+ }
+
+ /// Returns `true` if an expression is contained inside the LHS of an assignment expression.
+ pub(in super::super) fn expr_in_place(&self, mut expr_id: hir::HirId) -> bool {
+ let mut contained_in_place = false;
+
+ while let hir::Node::Expr(parent_expr) =
+ self.tcx.hir().get(self.tcx.hir().get_parent_node(expr_id))
+ {
+ match &parent_expr.kind {
+ hir::ExprKind::Assign(lhs, ..) | hir::ExprKind::AssignOp(_, lhs, ..) => {
+ if lhs.hir_id == expr_id {
+ contained_in_place = true;
+ break;
+ }
+ }
+ _ => (),
+ }
+ expr_id = parent_expr.hir_id;
+ }
+
+ contained_in_place
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs b/compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs
new file mode 100644
index 000000000..7602f2550
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/arg_matrix.rs
@@ -0,0 +1,376 @@
+use std::cmp;
+
+use rustc_index::vec::IndexVec;
+use rustc_middle::ty::error::TypeError;
+
+rustc_index::newtype_index! {
+ pub(crate) struct ExpectedIdx {
+ DEBUG_FORMAT = "ExpectedIdx({})",
+ }
+}
+
+rustc_index::newtype_index! {
+ pub(crate) struct ProvidedIdx {
+ DEBUG_FORMAT = "ProvidedIdx({})",
+ }
+}
+
+impl ExpectedIdx {
+ pub fn to_provided_idx(self) -> ProvidedIdx {
+ ProvidedIdx::from_usize(self.as_usize())
+ }
+}
+
+// An issue that might be found in the compatibility matrix
+#[derive(Debug)]
+enum Issue {
+ /// The given argument is the invalid type for the input
+ Invalid(usize),
+ /// There is a missing input
+ Missing(usize),
+ /// There's a superfluous argument
+ Extra(usize),
+ /// Two arguments should be swapped
+ Swap(usize, usize),
+ /// Several arguments should be reordered
+ Permutation(Vec<Option<usize>>),
+}
+
+#[derive(Clone, Debug)]
+pub(crate) enum Compatibility<'tcx> {
+ Compatible,
+ Incompatible(Option<TypeError<'tcx>>),
+}
+
+/// Similar to `Issue`, but contains some extra information
+#[derive(Debug)]
+pub(crate) enum Error<'tcx> {
+ /// The provided argument is the invalid type for the expected input
+ Invalid(ProvidedIdx, ExpectedIdx, Compatibility<'tcx>),
+ /// There is a missing input
+ Missing(ExpectedIdx),
+ /// There's a superfluous argument
+ Extra(ProvidedIdx),
+ /// Two arguments should be swapped
+ Swap(ProvidedIdx, ProvidedIdx, ExpectedIdx, ExpectedIdx),
+ /// Several arguments should be reordered
+ Permutation(Vec<(ExpectedIdx, ProvidedIdx)>),
+}
+
+pub(crate) struct ArgMatrix<'tcx> {
+ /// Maps the indices in the `compatibility_matrix` rows to the indices of
+ /// the *user provided* inputs
+ provided_indices: Vec<ProvidedIdx>,
+ /// Maps the indices in the `compatibility_matrix` columns to the indices
+ /// of the *expected* args
+ expected_indices: Vec<ExpectedIdx>,
+ /// The first dimension (rows) are the remaining user provided inputs to
+ /// match and the second dimension (cols) are the remaining expected args
+ /// to match
+ compatibility_matrix: Vec<Vec<Compatibility<'tcx>>>,
+}
+
+impl<'tcx> ArgMatrix<'tcx> {
+ pub(crate) fn new<F: FnMut(ProvidedIdx, ExpectedIdx) -> Compatibility<'tcx>>(
+ provided_count: usize,
+ expected_input_count: usize,
+ mut is_compatible: F,
+ ) -> Self {
+ let compatibility_matrix = (0..provided_count)
+ .map(|i| {
+ (0..expected_input_count)
+ .map(|j| is_compatible(ProvidedIdx::from_usize(i), ExpectedIdx::from_usize(j)))
+ .collect()
+ })
+ .collect();
+ ArgMatrix {
+ provided_indices: (0..provided_count).map(ProvidedIdx::from_usize).collect(),
+ expected_indices: (0..expected_input_count).map(ExpectedIdx::from_usize).collect(),
+ compatibility_matrix,
+ }
+ }
+
+ /// Remove a given input from consideration
+ fn eliminate_provided(&mut self, idx: usize) {
+ self.provided_indices.remove(idx);
+ self.compatibility_matrix.remove(idx);
+ }
+
+ /// Remove a given argument from consideration
+ fn eliminate_expected(&mut self, idx: usize) {
+ self.expected_indices.remove(idx);
+ for row in &mut self.compatibility_matrix {
+ row.remove(idx);
+ }
+ }
+
+ /// "satisfy" an input with a given arg, removing both from consideration
+ fn satisfy_input(&mut self, provided_idx: usize, expected_idx: usize) {
+ self.eliminate_provided(provided_idx);
+ self.eliminate_expected(expected_idx);
+ }
+
+ // Returns a `Vec` of (user input, expected arg) of matched arguments. These
+ // are inputs on the remaining diagonal that match.
+ fn eliminate_satisfied(&mut self) -> Vec<(ProvidedIdx, ExpectedIdx)> {
+ let num_args = cmp::min(self.provided_indices.len(), self.expected_indices.len());
+ let mut eliminated = vec![];
+ for i in (0..num_args).rev() {
+ if matches!(self.compatibility_matrix[i][i], Compatibility::Compatible) {
+ eliminated.push((self.provided_indices[i], self.expected_indices[i]));
+ self.satisfy_input(i, i);
+ }
+ }
+ eliminated
+ }
+
+ // Find some issue in the compatibility matrix
+ fn find_issue(&self) -> Option<Issue> {
+ let mat = &self.compatibility_matrix;
+ let ai = &self.expected_indices;
+ let ii = &self.provided_indices;
+
+ for i in 0..cmp::max(ai.len(), ii.len()) {
+ // If we eliminate the last row, any left-over inputs are considered missing
+ if i >= mat.len() {
+ return Some(Issue::Missing(i));
+ }
+ // If we eliminate the last column, any left-over arguments are extra
+ if mat[i].len() == 0 {
+ return Some(Issue::Extra(i));
+ }
+
+ // Make sure we don't pass the bounds of our matrix
+ let is_arg = i < ai.len();
+ let is_input = i < ii.len();
+ if is_arg && is_input && matches!(mat[i][i], Compatibility::Compatible) {
+ // This is a satisfied input, so move along
+ continue;
+ }
+
+ let mut useless = true;
+ let mut unsatisfiable = true;
+ if is_arg {
+ for j in 0..ii.len() {
+ // If we find at least one input this argument could satisfy
+ // this argument isn't unsatisfiable
+ if matches!(mat[j][i], Compatibility::Compatible) {
+ unsatisfiable = false;
+ break;
+ }
+ }
+ }
+ if is_input {
+ for j in 0..ai.len() {
+ // If we find at least one argument that could satisfy this input
+ // this argument isn't useless
+ if matches!(mat[i][j], Compatibility::Compatible) {
+ useless = false;
+ break;
+ }
+ }
+ }
+
+ match (is_input, is_arg, useless, unsatisfiable) {
+ // If an argument is unsatisfied, and the input in its position is useless
+ // then the most likely explanation is that we just got the types wrong
+ (true, true, true, true) => return Some(Issue::Invalid(i)),
+ // Otherwise, if an input is useless, then indicate that this is an extra argument
+ (true, _, true, _) => return Some(Issue::Extra(i)),
+ // Otherwise, if an argument is unsatisfiable, indicate that it's missing
+ (_, true, _, true) => return Some(Issue::Missing(i)),
+ (true, true, _, _) => {
+ // The argument isn't useless, and the input isn't unsatisfied,
+ // so look for a parameter we might swap it with
+ // We look for swaps explicitly, instead of just falling back on permutations
+ // so that cases like (A,B,C,D) given (B,A,D,C) show up as two swaps,
+ // instead of a large permutation of 4 elements.
+ for j in 0..cmp::min(ai.len(), ii.len()) {
+ if i == j || matches!(mat[j][j], Compatibility::Compatible) {
+ continue;
+ }
+ if matches!(mat[i][j], Compatibility::Compatible)
+ && matches!(mat[j][i], Compatibility::Compatible)
+ {
+ return Some(Issue::Swap(i, j));
+ }
+ }
+ }
+ _ => {
+ continue;
+ }
+ }
+ }
+
+ // We didn't find any of the individual issues above, but
+ // there might be a larger permutation of parameters, so we now check for that
+ // by checking for cycles
+ // We use a double option at position i in this vec to represent:
+ // - None: We haven't computed anything about this argument yet
+ // - Some(None): This argument definitely doesn't participate in a cycle
+ // - Some(Some(x)): the i-th argument could permute to the x-th position
+ let mut permutation: Vec<Option<Option<usize>>> = vec![None; mat.len()];
+ let mut permutation_found = false;
+ for i in 0..mat.len() {
+ if permutation[i].is_some() {
+ // We've already decided whether this argument is or is not in a loop
+ continue;
+ }
+
+ let mut stack = vec![];
+ let mut j = i;
+ let mut last = i;
+ let mut is_cycle = true;
+ loop {
+ stack.push(j);
+ // Look for params this one could slot into
+ let compat: Vec<_> =
+ mat[j]
+ .iter()
+ .enumerate()
+ .filter_map(|(i, c)| {
+ if matches!(c, Compatibility::Compatible) { Some(i) } else { None }
+ })
+ .collect();
+ if compat.len() != 1 {
+ // this could go into multiple slots, don't bother exploring both
+ is_cycle = false;
+ break;
+ }
+ j = compat[0];
+ if stack.contains(&j) {
+ last = j;
+ break;
+ }
+ }
+ if stack.len() <= 2 {
+ // If we encounter a cycle of 1 or 2 elements, we'll let the
+ // "satisfy" and "swap" code above handle those
+ is_cycle = false;
+ }
+ // We've built up some chain, some of which might be a cycle
+ // ex: [1,2,3,4]; last = 2; j = 2;
+ // So, we want to mark 4, 3, and 2 as part of a permutation
+ permutation_found = is_cycle;
+ while let Some(x) = stack.pop() {
+ if is_cycle {
+ permutation[x] = Some(Some(j));
+ j = x;
+ if j == last {
+ // From here on out, we're a tail leading into a cycle,
+ // not the cycle itself
+ is_cycle = false;
+ }
+ } else {
+ // Some(None) ensures we save time by skipping this argument again
+ permutation[x] = Some(None);
+ }
+ }
+ }
+
+ if permutation_found {
+ // Map unwrap to remove the first layer of Some
+ let final_permutation: Vec<Option<usize>> =
+ permutation.into_iter().map(|x| x.unwrap()).collect();
+ return Some(Issue::Permutation(final_permutation));
+ }
+ return None;
+ }
+
+ // Obviously, detecting exact user intention is impossible, so the goal here is to
+ // come up with as likely of a story as we can to be helpful.
+ //
+ // We'll iteratively removed "satisfied" input/argument pairs,
+ // then check for the cases above, until we've eliminated the entire grid
+ //
+ // We'll want to know which arguments and inputs these rows and columns correspond to
+ // even after we delete them.
+ pub(crate) fn find_errors(
+ mut self,
+ ) -> (Vec<Error<'tcx>>, IndexVec<ExpectedIdx, Option<ProvidedIdx>>) {
+ let provided_arg_count = self.provided_indices.len();
+
+ let mut errors: Vec<Error<'tcx>> = vec![];
+ // For each expected argument, the matched *actual* input
+ let mut matched_inputs: IndexVec<ExpectedIdx, Option<ProvidedIdx>> =
+ IndexVec::from_elem_n(None, self.expected_indices.len());
+
+ // Before we start looking for issues, eliminate any arguments that are already satisfied,
+ // so that an argument which is already spoken for by the input it's in doesn't
+ // spill over into another similarly typed input
+ // ex:
+ // fn some_func(_a: i32, _b: i32) {}
+ // some_func(1, "");
+ // Without this elimination, the first argument causes the second argument
+ // to show up as both a missing input and extra argument, rather than
+ // just an invalid type.
+ for (provided, expected) in self.eliminate_satisfied() {
+ matched_inputs[expected] = Some(provided);
+ }
+
+ while !self.provided_indices.is_empty() || !self.expected_indices.is_empty() {
+ match self.find_issue() {
+ Some(Issue::Invalid(idx)) => {
+ let compatibility = self.compatibility_matrix[idx][idx].clone();
+ let input_idx = self.provided_indices[idx];
+ let arg_idx = self.expected_indices[idx];
+ self.satisfy_input(idx, idx);
+ errors.push(Error::Invalid(input_idx, arg_idx, compatibility));
+ }
+ Some(Issue::Extra(idx)) => {
+ let input_idx = self.provided_indices[idx];
+ self.eliminate_provided(idx);
+ errors.push(Error::Extra(input_idx));
+ }
+ Some(Issue::Missing(idx)) => {
+ let arg_idx = self.expected_indices[idx];
+ self.eliminate_expected(idx);
+ errors.push(Error::Missing(arg_idx));
+ }
+ Some(Issue::Swap(idx, other)) => {
+ let input_idx = self.provided_indices[idx];
+ let other_input_idx = self.provided_indices[other];
+ let arg_idx = self.expected_indices[idx];
+ let other_arg_idx = self.expected_indices[other];
+ let (min, max) = (cmp::min(idx, other), cmp::max(idx, other));
+ self.satisfy_input(min, max);
+ // Subtract 1 because we already removed the "min" row
+ self.satisfy_input(max - 1, min);
+ errors.push(Error::Swap(input_idx, other_input_idx, arg_idx, other_arg_idx));
+ matched_inputs[other_arg_idx] = Some(input_idx);
+ matched_inputs[arg_idx] = Some(other_input_idx);
+ }
+ Some(Issue::Permutation(args)) => {
+ let mut idxs: Vec<usize> = args.iter().filter_map(|&a| a).collect();
+
+ let mut real_idxs: IndexVec<ProvidedIdx, Option<(ExpectedIdx, ProvidedIdx)>> =
+ IndexVec::from_elem_n(None, provided_arg_count);
+ for (src, dst) in
+ args.iter().enumerate().filter_map(|(src, dst)| dst.map(|dst| (src, dst)))
+ {
+ let src_input_idx = self.provided_indices[src];
+ let dst_input_idx = self.provided_indices[dst];
+ let dest_arg_idx = self.expected_indices[dst];
+ real_idxs[src_input_idx] = Some((dest_arg_idx, dst_input_idx));
+ matched_inputs[dest_arg_idx] = Some(src_input_idx);
+ }
+ idxs.sort();
+ idxs.reverse();
+ for i in idxs {
+ self.satisfy_input(i, i);
+ }
+ errors.push(Error::Permutation(real_idxs.into_iter().flatten().collect()));
+ }
+ None => {
+ // We didn't find any issues, so we need to push the algorithm forward
+ // First, eliminate any arguments that currently satisfy their inputs
+ for (inp, arg) in self.eliminate_satisfied() {
+ matched_inputs[arg] = Some(inp);
+ }
+ }
+ };
+ }
+
+ return (errors, matched_inputs);
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
new file mode 100644
index 000000000..660e7e4e3
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/checks.rs
@@ -0,0 +1,1900 @@
+use crate::astconv::AstConv;
+use crate::check::coercion::CoerceMany;
+use crate::check::fn_ctxt::arg_matrix::{
+ ArgMatrix, Compatibility, Error, ExpectedIdx, ProvidedIdx,
+};
+use crate::check::gather_locals::Declaration;
+use crate::check::intrinsicck::InlineAsmCtxt;
+use crate::check::method::MethodCallee;
+use crate::check::Expectation::*;
+use crate::check::TupleArgumentsFlag::*;
+use crate::check::{
+ potentially_plural_count, struct_span_err, BreakableCtxt, Diverges, Expectation, FnCtxt,
+ LocalTy, Needs, TupleArgumentsFlag,
+};
+use crate::structured_errors::StructuredDiagnostic;
+
+use rustc_ast as ast;
+use rustc_errors::{pluralize, Applicability, Diagnostic, DiagnosticId, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{ExprKind, Node, QPath};
+use rustc_index::vec::IndexVec;
+use rustc_infer::infer::error_reporting::{FailureCode, ObligationCauseExt};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::InferOk;
+use rustc_infer::infer::TypeTrace;
+use rustc_middle::ty::adjustment::AllowTwoPhase;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, DefIdTree, IsSuggestable, Ty};
+use rustc_session::Session;
+use rustc_span::symbol::Ident;
+use rustc_span::{self, Span};
+use rustc_trait_selection::traits::{self, ObligationCauseCode, SelectionContext};
+
+use std::iter;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub(in super::super) fn check_casts(&self) {
+ let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
+ debug!("FnCtxt::check_casts: {} deferred checks", deferred_cast_checks.len());
+ for cast in deferred_cast_checks.drain(..) {
+ cast.check(self);
+ }
+ }
+
+ pub(in super::super) fn check_transmutes(&self) {
+ let mut deferred_transmute_checks = self.deferred_transmute_checks.borrow_mut();
+ debug!("FnCtxt::check_transmutes: {} deferred checks", deferred_transmute_checks.len());
+ for (from, to, span) in deferred_transmute_checks.drain(..) {
+ self.check_transmute(span, from, to);
+ }
+ }
+
+ pub(in super::super) fn check_asms(&self) {
+ let mut deferred_asm_checks = self.deferred_asm_checks.borrow_mut();
+ debug!("FnCtxt::check_asm: {} deferred checks", deferred_asm_checks.len());
+ for (asm, hir_id) in deferred_asm_checks.drain(..) {
+ let enclosing_id = self.tcx.hir().enclosing_body_owner(hir_id);
+ InlineAsmCtxt::new_in_fn(self)
+ .check_asm(asm, self.tcx.hir().local_def_id_to_hir_id(enclosing_id));
+ }
+ }
+
+ pub(in super::super) fn check_method_argument_types(
+ &self,
+ sp: Span,
+ expr: &'tcx hir::Expr<'tcx>,
+ method: Result<MethodCallee<'tcx>, ()>,
+ args_no_rcvr: &'tcx [hir::Expr<'tcx>],
+ tuple_arguments: TupleArgumentsFlag,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let has_error = match method {
+ Ok(method) => method.substs.references_error() || method.sig.references_error(),
+ Err(_) => true,
+ };
+ if has_error {
+ let err_inputs = self.err_args(args_no_rcvr.len());
+
+ let err_inputs = match tuple_arguments {
+ DontTupleArguments => err_inputs,
+ TupleArguments => vec![self.tcx.intern_tup(&err_inputs)],
+ };
+
+ self.check_argument_types(
+ sp,
+ expr,
+ &err_inputs,
+ None,
+ args_no_rcvr,
+ false,
+ tuple_arguments,
+ method.ok().map(|method| method.def_id),
+ );
+ return self.tcx.ty_error();
+ }
+
+ let method = method.unwrap();
+ // HACK(eddyb) ignore self in the definition (see above).
+ let expected_input_tys = self.expected_inputs_for_expected_output(
+ sp,
+ expected,
+ method.sig.output(),
+ &method.sig.inputs()[1..],
+ );
+ self.check_argument_types(
+ sp,
+ expr,
+ &method.sig.inputs()[1..],
+ expected_input_tys,
+ args_no_rcvr,
+ method.sig.c_variadic,
+ tuple_arguments,
+ Some(method.def_id),
+ );
+ method.sig.output()
+ }
+
+ /// Generic function that factors out common logic from function calls,
+ /// method calls and overloaded operators.
+ pub(in super::super) fn check_argument_types(
+ &self,
+ // Span enclosing the call site
+ call_span: Span,
+ // Expression of the call site
+ call_expr: &'tcx hir::Expr<'tcx>,
+ // Types (as defined in the *signature* of the target function)
+ formal_input_tys: &[Ty<'tcx>],
+ // More specific expected types, after unifying with caller output types
+ expected_input_tys: Option<Vec<Ty<'tcx>>>,
+ // The expressions for each provided argument
+ provided_args: &'tcx [hir::Expr<'tcx>],
+ // Whether the function is variadic, for example when imported from C
+ c_variadic: bool,
+ // Whether the arguments have been bundled in a tuple (ex: closures)
+ tuple_arguments: TupleArgumentsFlag,
+ // The DefId for the function being called, for better error messages
+ fn_def_id: Option<DefId>,
+ ) {
+ let tcx = self.tcx;
+
+ // Conceptually, we've got some number of expected inputs, and some number of provided aguments
+ // and we can form a grid of whether each argument could satisfy a given input:
+ // in1 | in2 | in3 | ...
+ // arg1 ? | | |
+ // arg2 | ? | |
+ // arg3 | | ? |
+ // ...
+ // Initially, we just check the diagonal, because in the case of correct code
+ // these are the only checks that matter
+ // However, in the unhappy path, we'll fill in this whole grid to attempt to provide
+ // better error messages about invalid method calls.
+
+ // All the input types from the fn signature must outlive the call
+ // so as to validate implied bounds.
+ for (&fn_input_ty, arg_expr) in iter::zip(formal_input_tys, provided_args) {
+ self.register_wf_obligation(fn_input_ty.into(), arg_expr.span, traits::MiscObligation);
+ }
+
+ let mut err_code = "E0061";
+
+ // If the arguments should be wrapped in a tuple (ex: closures), unwrap them here
+ let (formal_input_tys, expected_input_tys) = if tuple_arguments == TupleArguments {
+ let tuple_type = self.structurally_resolved_type(call_span, formal_input_tys[0]);
+ match tuple_type.kind() {
+ // We expected a tuple and got a tuple
+ ty::Tuple(arg_types) => {
+ // Argument length differs
+ if arg_types.len() != provided_args.len() {
+ err_code = "E0057";
+ }
+ let expected_input_tys = match expected_input_tys {
+ Some(expected_input_tys) => match expected_input_tys.get(0) {
+ Some(ty) => match ty.kind() {
+ ty::Tuple(tys) => Some(tys.iter().collect()),
+ _ => None,
+ },
+ None => None,
+ },
+ None => None,
+ };
+ (arg_types.iter().collect(), expected_input_tys)
+ }
+ _ => {
+ // Otherwise, there's a mismatch, so clear out what we're expecting, and set
+ // our input types to err_args so we don't blow up the error messages
+ struct_span_err!(
+ tcx.sess,
+ call_span,
+ E0059,
+ "cannot use call notation; the first type parameter \
+ for the function trait is neither a tuple nor unit"
+ )
+ .emit();
+ (self.err_args(provided_args.len()), None)
+ }
+ }
+ } else {
+ (formal_input_tys.to_vec(), expected_input_tys)
+ };
+
+ // If there are no external expectations at the call site, just use the types from the function defn
+ let expected_input_tys = if let Some(expected_input_tys) = expected_input_tys {
+ assert_eq!(expected_input_tys.len(), formal_input_tys.len());
+ expected_input_tys
+ } else {
+ formal_input_tys.clone()
+ };
+
+ let minimum_input_count = expected_input_tys.len();
+ let provided_arg_count = provided_args.len();
+
+ // We introduce a helper function to demand that a given argument satisfy a given input
+ // This is more complicated than just checking type equality, as arguments could be coerced
+ // This version writes those types back so further type checking uses the narrowed types
+ let demand_compatible = |idx| {
+ let formal_input_ty: Ty<'tcx> = formal_input_tys[idx];
+ let expected_input_ty: Ty<'tcx> = expected_input_tys[idx];
+ let provided_arg = &provided_args[idx];
+
+ debug!("checking argument {}: {:?} = {:?}", idx, provided_arg, formal_input_ty);
+
+ // We're on the happy path here, so we'll do a more involved check and write back types
+ // To check compatibility, we'll do 3 things:
+ // 1. Unify the provided argument with the expected type
+ let expectation = Expectation::rvalue_hint(self, expected_input_ty);
+
+ let checked_ty = self.check_expr_with_expectation(provided_arg, expectation);
+
+ // 2. Coerce to the most detailed type that could be coerced
+ // to, which is `expected_ty` if `rvalue_hint` returns an
+ // `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
+ let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty);
+
+ // Cause selection errors caused by resolving a single argument to point at the
+ // argument and not the call. This lets us customize the span pointed to in the
+ // fulfillment error to be more accurate.
+ let coerced_ty =
+ self.resolve_vars_with_obligations_and_mutate_fulfillment(coerced_ty, |errors| {
+ self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr);
+ self.point_at_arg_instead_of_call_if_possible(
+ errors,
+ call_expr,
+ call_span,
+ provided_args,
+ &expected_input_tys,
+ );
+ });
+
+ let coerce_error = self
+ .try_coerce(provided_arg, checked_ty, coerced_ty, AllowTwoPhase::Yes, None)
+ .err();
+
+ if coerce_error.is_some() {
+ return Compatibility::Incompatible(coerce_error);
+ }
+
+ // 3. Check if the formal type is a supertype of the checked one
+ // and register any such obligations for future type checks
+ let supertype_error = self
+ .at(&self.misc(provided_arg.span), self.param_env)
+ .sup(formal_input_ty, coerced_ty);
+ let subtyping_error = match supertype_error {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ None
+ }
+ Err(err) => Some(err),
+ };
+
+ // If neither check failed, the types are compatible
+ match subtyping_error {
+ None => Compatibility::Compatible,
+ Some(_) => Compatibility::Incompatible(subtyping_error),
+ }
+ };
+
+ // To start, we only care "along the diagonal", where we expect every
+ // provided arg to be in the right spot
+ let mut compatibility_diagonal =
+ vec![Compatibility::Incompatible(None); provided_args.len()];
+
+ // Keep track of whether we *could possibly* be satisfied, i.e. whether we're on the happy path
+ // if the wrong number of arguments were supplied, we CAN'T be satisfied,
+ // and if we're c_variadic, the supplied arguments must be >= the minimum count from the function
+ // otherwise, they need to be identical, because rust doesn't currently support variadic functions
+ let mut call_appears_satisfied = if c_variadic {
+ provided_arg_count >= minimum_input_count
+ } else {
+ provided_arg_count == minimum_input_count
+ };
+
+ // Check the arguments.
+ // We do this in a pretty awful way: first we type-check any arguments
+ // that are not closures, then we type-check the closures. This is so
+ // that we have more information about the types of arguments when we
+ // type-check the functions. This isn't really the right way to do this.
+ for check_closures in [false, true] {
+ // More awful hacks: before we check argument types, try to do
+ // an "opportunistic" trait resolution of any trait bounds on
+ // the call. This helps coercions.
+ if check_closures {
+ self.select_obligations_where_possible(false, |errors| {
+ self.point_at_type_arg_instead_of_call_if_possible(errors, call_expr);
+ self.point_at_arg_instead_of_call_if_possible(
+ errors,
+ call_expr,
+ call_span,
+ &provided_args,
+ &expected_input_tys,
+ );
+ })
+ }
+
+ // Check each argument, to satisfy the input it was provided for
+ // Visually, we're traveling down the diagonal of the compatibility matrix
+ for (idx, arg) in provided_args.iter().enumerate() {
+ // Warn only for the first loop (the "no closures" one).
+ // Closure arguments themselves can't be diverging, but
+ // a previous argument can, e.g., `foo(panic!(), || {})`.
+ if !check_closures {
+ self.warn_if_unreachable(arg.hir_id, arg.span, "expression");
+ }
+
+ // For C-variadic functions, we don't have a declared type for all of
+ // the arguments hence we only do our usual type checking with
+ // the arguments who's types we do know. However, we *can* check
+ // for unreachable expressions (see above).
+ // FIXME: unreachable warning current isn't emitted
+ if idx >= minimum_input_count {
+ continue;
+ }
+
+ let is_closure = matches!(arg.kind, ExprKind::Closure { .. });
+ if is_closure != check_closures {
+ continue;
+ }
+
+ let compatible = demand_compatible(idx);
+ let is_compatible = matches!(compatible, Compatibility::Compatible);
+ compatibility_diagonal[idx] = compatible;
+
+ if !is_compatible {
+ call_appears_satisfied = false;
+ }
+ }
+ }
+
+ if c_variadic && provided_arg_count < minimum_input_count {
+ err_code = "E0060";
+ }
+
+ for arg in provided_args.iter().skip(minimum_input_count) {
+ // Make sure we've checked this expr at least once.
+ let arg_ty = self.check_expr(&arg);
+
+ // If the function is c-style variadic, we skipped a bunch of arguments
+ // so we need to check those, and write out the types
+ // Ideally this would be folded into the above, for uniform style
+ // but c-variadic is already a corner case
+ if c_variadic {
+ fn variadic_error<'tcx>(
+ sess: &'tcx Session,
+ span: Span,
+ ty: Ty<'tcx>,
+ cast_ty: &str,
+ ) {
+ use crate::structured_errors::MissingCastForVariadicArg;
+
+ MissingCastForVariadicArg { sess, span, ty, cast_ty }.diagnostic().emit();
+ }
+
+ // There are a few types which get autopromoted when passed via varargs
+ // in C but we just error out instead and require explicit casts.
+ let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
+ match arg_ty.kind() {
+ ty::Float(ty::FloatTy::F32) => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
+ }
+ ty::Int(ty::IntTy::I8 | ty::IntTy::I16) | ty::Bool => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
+ }
+ ty::Uint(ty::UintTy::U8 | ty::UintTy::U16) => {
+ variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
+ }
+ ty::FnDef(..) => {
+ let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
+ let ptr_ty = self.resolve_vars_if_possible(ptr_ty);
+ variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
+ }
+ _ => {}
+ }
+ }
+ }
+
+ if !call_appears_satisfied {
+ let compatibility_diagonal = IndexVec::from_raw(compatibility_diagonal);
+ let provided_args = IndexVec::from_iter(provided_args.iter().take(if c_variadic {
+ minimum_input_count
+ } else {
+ provided_arg_count
+ }));
+ debug_assert_eq!(
+ formal_input_tys.len(),
+ expected_input_tys.len(),
+ "expected formal_input_tys to be the same size as expected_input_tys"
+ );
+ let formal_and_expected_inputs = IndexVec::from_iter(
+ formal_input_tys
+ .iter()
+ .copied()
+ .zip(expected_input_tys.iter().copied())
+ .map(|vars| self.resolve_vars_if_possible(vars)),
+ );
+
+ self.report_arg_errors(
+ compatibility_diagonal,
+ formal_and_expected_inputs,
+ provided_args,
+ c_variadic,
+ err_code,
+ fn_def_id,
+ call_span,
+ call_expr,
+ );
+ }
+ }
+
+ fn report_arg_errors(
+ &self,
+ compatibility_diagonal: IndexVec<ProvidedIdx, Compatibility<'tcx>>,
+ formal_and_expected_inputs: IndexVec<ExpectedIdx, (Ty<'tcx>, Ty<'tcx>)>,
+ provided_args: IndexVec<ProvidedIdx, &'tcx hir::Expr<'tcx>>,
+ c_variadic: bool,
+ err_code: &str,
+ fn_def_id: Option<DefId>,
+ call_span: Span,
+ call_expr: &hir::Expr<'tcx>,
+ ) {
+ // Next, let's construct the error
+ let (error_span, full_call_span, ctor_of) = match &call_expr.kind {
+ hir::ExprKind::Call(
+ hir::Expr { hir_id, span, kind: hir::ExprKind::Path(qpath), .. },
+ _,
+ ) => {
+ if let Res::Def(DefKind::Ctor(of, _), _) =
+ self.typeck_results.borrow().qpath_res(qpath, *hir_id)
+ {
+ (call_span, *span, Some(of))
+ } else {
+ (call_span, *span, None)
+ }
+ }
+ hir::ExprKind::Call(hir::Expr { span, .. }, _) => (call_span, *span, None),
+ hir::ExprKind::MethodCall(path_segment, _, span) => {
+ let ident_span = path_segment.ident.span;
+ let ident_span = if let Some(args) = path_segment.args {
+ ident_span.with_hi(args.span_ext.hi())
+ } else {
+ ident_span
+ };
+ (
+ *span, ident_span, None, // methods are never ctors
+ )
+ }
+ k => span_bug!(call_span, "checking argument types on a non-call: `{:?}`", k),
+ };
+ let args_span = error_span.trim_start(full_call_span).unwrap_or(error_span);
+ let call_name = match ctor_of {
+ Some(CtorOf::Struct) => "struct",
+ Some(CtorOf::Variant) => "enum variant",
+ None => "function",
+ };
+
+ // Don't print if it has error types or is just plain `_`
+ fn has_error_or_infer<'tcx>(tys: impl IntoIterator<Item = Ty<'tcx>>) -> bool {
+ tys.into_iter().any(|ty| ty.references_error() || ty.is_ty_var())
+ }
+
+ self.set_tainted_by_errors();
+ let tcx = self.tcx;
+
+ // Get the argument span in the context of the call span so that
+ // suggestions and labels are (more) correct when an arg is a
+ // macro invocation.
+ let normalize_span = |span: Span| -> Span {
+ let normalized_span = span.find_ancestor_inside(error_span).unwrap_or(span);
+ // Sometimes macros mess up the spans, so do not normalize the
+ // arg span to equal the error span, because that's less useful
+ // than pointing out the arg expr in the wrong context.
+ if normalized_span.source_equal(error_span) { span } else { normalized_span }
+ };
+
+ // Precompute the provided types and spans, since that's all we typically need for below
+ let provided_arg_tys: IndexVec<ProvidedIdx, (Ty<'tcx>, Span)> = provided_args
+ .iter()
+ .map(|expr| {
+ let ty = self
+ .typeck_results
+ .borrow()
+ .expr_ty_adjusted_opt(*expr)
+ .unwrap_or_else(|| tcx.ty_error());
+ (self.resolve_vars_if_possible(ty), normalize_span(expr.span))
+ })
+ .collect();
+ let callee_expr = match &call_expr.peel_blocks().kind {
+ hir::ExprKind::Call(callee, _) => Some(*callee),
+ hir::ExprKind::MethodCall(_, callee, _) => {
+ if let Some((DefKind::AssocFn, def_id)) =
+ self.typeck_results.borrow().type_dependent_def(call_expr.hir_id)
+ && let Some(assoc) = tcx.opt_associated_item(def_id)
+ && assoc.fn_has_self_parameter
+ {
+ Some(&callee[0])
+ } else {
+ None
+ }
+ }
+ _ => None,
+ };
+ let callee_ty = callee_expr
+ .and_then(|callee_expr| self.typeck_results.borrow().expr_ty_adjusted_opt(callee_expr));
+
+ // A "softer" version of the `demand_compatible`, which checks types without persisting them,
+ // and treats error types differently
+ // This will allow us to "probe" for other argument orders that would likely have been correct
+ let check_compatible = |provided_idx: ProvidedIdx, expected_idx: ExpectedIdx| {
+ if provided_idx.as_usize() == expected_idx.as_usize() {
+ return compatibility_diagonal[provided_idx].clone();
+ }
+
+ let (formal_input_ty, expected_input_ty) = formal_and_expected_inputs[expected_idx];
+ // If either is an error type, we defy the usual convention and consider them to *not* be
+ // coercible. This prevents our error message heuristic from trying to pass errors into
+ // every argument.
+ if (formal_input_ty, expected_input_ty).references_error() {
+ return Compatibility::Incompatible(None);
+ }
+
+ let (arg_ty, arg_span) = provided_arg_tys[provided_idx];
+
+ let expectation = Expectation::rvalue_hint(self, expected_input_ty);
+ let coerced_ty = expectation.only_has_type(self).unwrap_or(formal_input_ty);
+ let can_coerce = self.can_coerce(arg_ty, coerced_ty);
+ if !can_coerce {
+ return Compatibility::Incompatible(None);
+ }
+
+ // Using probe here, since we don't want this subtyping to affect inference.
+ let subtyping_error = self.probe(|_| {
+ self.at(&self.misc(arg_span), self.param_env).sup(formal_input_ty, coerced_ty).err()
+ });
+
+ // Same as above: if either the coerce type or the checked type is an error type,
+ // consider them *not* compatible.
+ let references_error = (coerced_ty, arg_ty).references_error();
+ match (references_error, subtyping_error) {
+ (false, None) => Compatibility::Compatible,
+ (_, subtyping_error) => Compatibility::Incompatible(subtyping_error),
+ }
+ };
+
+ // The algorithm here is inspired by levenshtein distance and longest common subsequence.
+ // We'll try to detect 4 different types of mistakes:
+ // - An extra parameter has been provided that doesn't satisfy *any* of the other inputs
+ // - An input is missing, which isn't satisfied by *any* of the other arguments
+ // - Some number of arguments have been provided in the wrong order
+ // - A type is straight up invalid
+
+ // First, let's find the errors
+ let (mut errors, matched_inputs) =
+ ArgMatrix::new(provided_args.len(), formal_and_expected_inputs.len(), check_compatible)
+ .find_errors();
+
+ // First, check if we just need to wrap some arguments in a tuple.
+ if let Some((mismatch_idx, terr)) =
+ compatibility_diagonal.iter().enumerate().find_map(|(i, c)| {
+ if let Compatibility::Incompatible(Some(terr)) = c { Some((i, terr)) } else { None }
+ })
+ {
+ // Is the first bad expected argument a tuple?
+ // Do we have as many extra provided arguments as the tuple's length?
+ // If so, we might have just forgotten to wrap some args in a tuple.
+ if let Some(ty::Tuple(tys)) =
+ formal_and_expected_inputs.get(mismatch_idx.into()).map(|tys| tys.1.kind())
+ // If the tuple is unit, we're not actually wrapping any arguments.
+ && !tys.is_empty()
+ && provided_arg_tys.len() == formal_and_expected_inputs.len() - 1 + tys.len()
+ {
+ // Wrap up the N provided arguments starting at this position in a tuple.
+ let provided_as_tuple = tcx.mk_tup(
+ provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx).take(tys.len()),
+ );
+
+ let mut satisfied = true;
+ // Check if the newly wrapped tuple + rest of the arguments are compatible.
+ for ((_, expected_ty), provided_ty) in std::iter::zip(
+ formal_and_expected_inputs.iter().skip(mismatch_idx),
+ [provided_as_tuple].into_iter().chain(
+ provided_arg_tys.iter().map(|(ty, _)| *ty).skip(mismatch_idx + tys.len()),
+ ),
+ ) {
+ if !self.can_coerce(provided_ty, *expected_ty) {
+ satisfied = false;
+ break;
+ }
+ }
+
+ // If they're compatible, suggest wrapping in an arg, and we're done!
+ // Take some care with spans, so we don't suggest wrapping a macro's
+ // innards in parenthesis, for example.
+ if satisfied
+ && let Some((_, lo)) =
+ provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx))
+ && let Some((_, hi)) =
+ provided_arg_tys.get(ProvidedIdx::from_usize(mismatch_idx + tys.len() - 1))
+ {
+ let mut err;
+ if tys.len() == 1 {
+ // A tuple wrap suggestion actually occurs within,
+ // so don't do anything special here.
+ err = self.report_and_explain_type_error(
+ TypeTrace::types(
+ &self.misc(*lo),
+ true,
+ formal_and_expected_inputs[mismatch_idx.into()].1,
+ provided_arg_tys[mismatch_idx.into()].0,
+ ),
+ terr,
+ );
+ err.span_label(
+ full_call_span,
+ format!("arguments to this {} are incorrect", call_name),
+ );
+ } else {
+ err = tcx.sess.struct_span_err_with_code(
+ full_call_span,
+ &format!(
+ "this {} takes {}{} but {} {} supplied",
+ call_name,
+ if c_variadic { "at least " } else { "" },
+ potentially_plural_count(
+ formal_and_expected_inputs.len(),
+ "argument"
+ ),
+ potentially_plural_count(provided_args.len(), "argument"),
+ pluralize!("was", provided_args.len())
+ ),
+ DiagnosticId::Error(err_code.to_owned()),
+ );
+ err.multipart_suggestion_verbose(
+ "wrap these arguments in parentheses to construct a tuple",
+ vec![
+ (lo.shrink_to_lo(), "(".to_string()),
+ (hi.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ };
+ self.label_fn_like(&mut err, fn_def_id, callee_ty);
+ err.emit();
+ return;
+ }
+ }
+ }
+
+ // Okay, so here's where it gets complicated in regards to what errors
+ // we emit and how.
+ // There are 3 different "types" of errors we might encounter.
+ // 1) Missing/extra/swapped arguments
+ // 2) Valid but incorrect arguments
+ // 3) Invalid arguments
+ // - Currently I think this only comes up with `CyclicTy`
+ //
+ // We first need to go through, remove those from (3) and emit those
+ // as their own error, particularly since they're error code and
+ // message is special. From what I can tell, we *must* emit these
+ // here (vs somewhere prior to this function) since the arguments
+ // become invalid *because* of how they get used in the function.
+ // It is what it is.
+
+ if errors.is_empty() {
+ if cfg!(debug_assertions) {
+ span_bug!(error_span, "expected errors from argument matrix");
+ } else {
+ tcx.sess
+ .struct_span_err(
+ error_span,
+ "argument type mismatch was detected, \
+ but rustc had trouble determining where",
+ )
+ .note(
+ "we would appreciate a bug report: \
+ https://github.com/rust-lang/rust/issues/new",
+ )
+ .emit();
+ }
+ return;
+ }
+
+ errors.drain_filter(|error| {
+ let Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(error)) = error else { return false };
+ let (provided_ty, provided_span) = provided_arg_tys[*provided_idx];
+ let (expected_ty, _) = formal_and_expected_inputs[*expected_idx];
+ let cause = &self.misc(provided_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ if let Some(e) = error {
+ if !matches!(trace.cause.as_failure_code(e), FailureCode::Error0308(_)) {
+ self.report_and_explain_type_error(trace, e).emit();
+ return true;
+ }
+ }
+ false
+ });
+
+ // We're done if we found errors, but we already emitted them.
+ if errors.is_empty() {
+ return;
+ }
+
+ // Okay, now that we've emitted the special errors separately, we
+ // are only left missing/extra/swapped and mismatched arguments, both
+ // can be collated pretty easily if needed.
+
+ // Next special case: if there is only one "Incompatible" error, just emit that
+ if let [
+ Error::Invalid(provided_idx, expected_idx, Compatibility::Incompatible(Some(err))),
+ ] = &errors[..]
+ {
+ let (formal_ty, expected_ty) = formal_and_expected_inputs[*expected_idx];
+ let (provided_ty, provided_arg_span) = provided_arg_tys[*provided_idx];
+ let cause = &self.misc(provided_arg_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ let mut err = self.report_and_explain_type_error(trace, err);
+ self.emit_coerce_suggestions(
+ &mut err,
+ &provided_args[*provided_idx],
+ provided_ty,
+ Expectation::rvalue_hint(self, expected_ty)
+ .only_has_type(self)
+ .unwrap_or(formal_ty),
+ None,
+ None,
+ );
+ err.span_label(
+ full_call_span,
+ format!("arguments to this {} are incorrect", call_name),
+ );
+ // Call out where the function is defined
+ self.label_fn_like(&mut err, fn_def_id, callee_ty);
+ err.emit();
+ return;
+ }
+
+ let mut err = if formal_and_expected_inputs.len() == provided_args.len() {
+ struct_span_err!(
+ tcx.sess,
+ full_call_span,
+ E0308,
+ "arguments to this {} are incorrect",
+ call_name,
+ )
+ } else {
+ tcx.sess.struct_span_err_with_code(
+ full_call_span,
+ &format!(
+ "this {} takes {}{} but {} {} supplied",
+ call_name,
+ if c_variadic { "at least " } else { "" },
+ potentially_plural_count(formal_and_expected_inputs.len(), "argument"),
+ potentially_plural_count(provided_args.len(), "argument"),
+ pluralize!("was", provided_args.len())
+ ),
+ DiagnosticId::Error(err_code.to_owned()),
+ )
+ };
+
+ // As we encounter issues, keep track of what we want to provide for the suggestion
+ let mut labels = vec![];
+ // If there is a single error, we give a specific suggestion; otherwise, we change to
+ // "did you mean" with the suggested function call
+ enum SuggestionText {
+ None,
+ Provide(bool),
+ Remove(bool),
+ Swap,
+ Reorder,
+ DidYouMean,
+ }
+ let mut suggestion_text = SuggestionText::None;
+
+ let mut errors = errors.into_iter().peekable();
+ while let Some(error) = errors.next() {
+ match error {
+ Error::Invalid(provided_idx, expected_idx, compatibility) => {
+ let (formal_ty, expected_ty) = formal_and_expected_inputs[expected_idx];
+ let (provided_ty, provided_span) = provided_arg_tys[provided_idx];
+ if let Compatibility::Incompatible(error) = &compatibility {
+ let cause = &self.misc(provided_span);
+ let trace = TypeTrace::types(cause, true, expected_ty, provided_ty);
+ if let Some(e) = error {
+ self.note_type_err(
+ &mut err,
+ &trace.cause,
+ None,
+ Some(trace.values),
+ e,
+ false,
+ true,
+ );
+ }
+ }
+
+ self.emit_coerce_suggestions(
+ &mut err,
+ &provided_args[provided_idx],
+ provided_ty,
+ Expectation::rvalue_hint(self, expected_ty)
+ .only_has_type(self)
+ .unwrap_or(formal_ty),
+ None,
+ None,
+ );
+ }
+ Error::Extra(arg_idx) => {
+ let (provided_ty, provided_span) = provided_arg_tys[arg_idx];
+ let provided_ty_name = if !has_error_or_infer([provided_ty]) {
+ // FIXME: not suggestable, use something else
+ format!(" of type `{}`", provided_ty)
+ } else {
+ "".to_string()
+ };
+ labels
+ .push((provided_span, format!("argument{} unexpected", provided_ty_name)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Remove(false),
+ SuggestionText::Remove(_) => SuggestionText::Remove(true),
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ Error::Missing(expected_idx) => {
+ // If there are multiple missing arguments adjacent to each other,
+ // then we can provide a single error.
+
+ let mut missing_idxs = vec![expected_idx];
+ while let Some(e) = errors.next_if(|e| {
+ matches!(e, Error::Missing(next_expected_idx)
+ if *next_expected_idx == *missing_idxs.last().unwrap() + 1)
+ }) {
+ match e {
+ Error::Missing(expected_idx) => missing_idxs.push(expected_idx),
+ _ => unreachable!(),
+ }
+ }
+
+ // NOTE: Because we might be re-arranging arguments, might have extra
+ // arguments, etc. it's hard to *really* know where we should provide
+ // this error label, so as a heuristic, we point to the provided arg, or
+ // to the call if the missing inputs pass the provided args.
+ match &missing_idxs[..] {
+ &[expected_idx] => {
+ let (_, input_ty) = formal_and_expected_inputs[expected_idx];
+ let span = if let Some((_, arg_span)) =
+ provided_arg_tys.get(expected_idx.to_provided_idx())
+ {
+ *arg_span
+ } else {
+ args_span
+ };
+ let rendered = if !has_error_or_infer([input_ty]) {
+ format!(" of type `{}`", input_ty)
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("an argument{} is missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Provide(false),
+ SuggestionText::Provide(_) => SuggestionText::Provide(true),
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ &[first_idx, second_idx] => {
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_idx];
+ let span = if let (Some((_, first_span)), Some((_, second_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(second_idx.to_provided_idx()),
+ ) {
+ first_span.to(*second_span)
+ } else {
+ args_span
+ };
+ let rendered =
+ if !has_error_or_infer([first_expected_ty, second_expected_ty]) {
+ format!(
+ " of type `{}` and `{}`",
+ first_expected_ty, second_expected_ty
+ )
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("two arguments{} are missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ &[first_idx, second_idx, third_idx] => {
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_idx];
+ let (_, third_expected_ty) = formal_and_expected_inputs[third_idx];
+ let span = if let (Some((_, first_span)), Some((_, third_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(third_idx.to_provided_idx()),
+ ) {
+ first_span.to(*third_span)
+ } else {
+ args_span
+ };
+ let rendered = if !has_error_or_infer([
+ first_expected_ty,
+ second_expected_ty,
+ third_expected_ty,
+ ]) {
+ format!(
+ " of type `{}`, `{}`, and `{}`",
+ first_expected_ty, second_expected_ty, third_expected_ty
+ )
+ } else {
+ "".to_string()
+ };
+ labels.push((span, format!("three arguments{} are missing", rendered)));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ missing_idxs => {
+ let first_idx = *missing_idxs.first().unwrap();
+ let last_idx = *missing_idxs.last().unwrap();
+ // NOTE: Because we might be re-arranging arguments, might have extra arguments, etc.
+ // It's hard to *really* know where we should provide this error label, so this is a
+ // decent heuristic
+ let span = if let (Some((_, first_span)), Some((_, last_span))) = (
+ provided_arg_tys.get(first_idx.to_provided_idx()),
+ provided_arg_tys.get(last_idx.to_provided_idx()),
+ ) {
+ first_span.to(*last_span)
+ } else {
+ args_span
+ };
+ labels.push((span, format!("multiple arguments are missing")));
+ suggestion_text = match suggestion_text {
+ SuggestionText::None | SuggestionText::Provide(_) => {
+ SuggestionText::Provide(true)
+ }
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ }
+ }
+ Error::Swap(
+ first_provided_idx,
+ second_provided_idx,
+ first_expected_idx,
+ second_expected_idx,
+ ) => {
+ let (first_provided_ty, first_span) = provided_arg_tys[first_provided_idx];
+ let (_, first_expected_ty) = formal_and_expected_inputs[first_expected_idx];
+ let first_provided_ty_name = if !has_error_or_infer([first_provided_ty]) {
+ format!(", found `{}`", first_provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ first_span,
+ format!("expected `{}`{}", first_expected_ty, first_provided_ty_name),
+ ));
+
+ let (second_provided_ty, second_span) = provided_arg_tys[second_provided_idx];
+ let (_, second_expected_ty) = formal_and_expected_inputs[second_expected_idx];
+ let second_provided_ty_name = if !has_error_or_infer([second_provided_ty]) {
+ format!(", found `{}`", second_provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ second_span,
+ format!("expected `{}`{}", second_expected_ty, second_provided_ty_name),
+ ));
+
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Swap,
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ Error::Permutation(args) => {
+ for (dst_arg, dest_input) in args {
+ let (_, expected_ty) = formal_and_expected_inputs[dst_arg];
+ let (provided_ty, provided_span) = provided_arg_tys[dest_input];
+ let provided_ty_name = if !has_error_or_infer([provided_ty]) {
+ format!(", found `{}`", provided_ty)
+ } else {
+ String::new()
+ };
+ labels.push((
+ provided_span,
+ format!("expected `{}`{}", expected_ty, provided_ty_name),
+ ));
+ }
+
+ suggestion_text = match suggestion_text {
+ SuggestionText::None => SuggestionText::Reorder,
+ _ => SuggestionText::DidYouMean,
+ };
+ }
+ }
+ }
+
+ // If we have less than 5 things to say, it would be useful to call out exactly what's wrong
+ if labels.len() <= 5 {
+ for (span, label) in labels {
+ err.span_label(span, label);
+ }
+ }
+
+ // Call out where the function is defined
+ self.label_fn_like(&mut err, fn_def_id, callee_ty);
+
+ // And add a suggestion block for all of the parameters
+ let suggestion_text = match suggestion_text {
+ SuggestionText::None => None,
+ SuggestionText::Provide(plural) => {
+ Some(format!("provide the argument{}", if plural { "s" } else { "" }))
+ }
+ SuggestionText::Remove(plural) => {
+ Some(format!("remove the extra argument{}", if plural { "s" } else { "" }))
+ }
+ SuggestionText::Swap => Some("swap these arguments".to_string()),
+ SuggestionText::Reorder => Some("reorder these arguments".to_string()),
+ SuggestionText::DidYouMean => Some("did you mean".to_string()),
+ };
+ if let Some(suggestion_text) = suggestion_text {
+ let source_map = self.sess().source_map();
+ let mut suggestion = format!(
+ "{}(",
+ source_map.span_to_snippet(full_call_span).unwrap_or_else(|_| fn_def_id
+ .map_or("".to_string(), |fn_def_id| tcx.item_name(fn_def_id).to_string()))
+ );
+ let mut needs_comma = false;
+ for (expected_idx, provided_idx) in matched_inputs.iter_enumerated() {
+ if needs_comma {
+ suggestion += ", ";
+ } else {
+ needs_comma = true;
+ }
+ let suggestion_text = if let Some(provided_idx) = provided_idx
+ && let (_, provided_span) = provided_arg_tys[*provided_idx]
+ && let Ok(arg_text) =
+ source_map.span_to_snippet(provided_span)
+ {
+ arg_text
+ } else {
+ // Propose a placeholder of the correct type
+ let (_, expected_ty) = formal_and_expected_inputs[expected_idx];
+ if expected_ty.is_unit() {
+ "()".to_string()
+ } else if expected_ty.is_suggestable(tcx, false) {
+ format!("/* {} */", expected_ty)
+ } else {
+ "/* value */".to_string()
+ }
+ };
+ suggestion += &suggestion_text;
+ }
+ suggestion += ")";
+ err.span_suggestion_verbose(
+ error_span,
+ &suggestion_text,
+ suggestion,
+ Applicability::HasPlaceholders,
+ );
+ }
+
+ err.emit();
+ }
+
+ // AST fragment checking
+ pub(in super::super) fn check_lit(
+ &self,
+ lit: &hir::Lit,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ match lit.node {
+ ast::LitKind::Str(..) => tcx.mk_static_str(),
+ ast::LitKind::ByteStr(ref v) => {
+ tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_array(tcx.types.u8, v.len() as u64))
+ }
+ ast::LitKind::Byte(_) => tcx.types.u8,
+ ast::LitKind::Char(_) => tcx.types.char,
+ ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(ty::int_ty(t)),
+ ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(ty::uint_ty(t)),
+ ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
+ let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
+ ty::Int(_) | ty::Uint(_) => Some(ty),
+ ty::Char => Some(tcx.types.u8),
+ ty::RawPtr(..) => Some(tcx.types.usize),
+ ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
+ _ => None,
+ });
+ opt_ty.unwrap_or_else(|| self.next_int_var())
+ }
+ ast::LitKind::Float(_, ast::LitFloatType::Suffixed(t)) => {
+ tcx.mk_mach_float(ty::float_ty(t))
+ }
+ ast::LitKind::Float(_, ast::LitFloatType::Unsuffixed) => {
+ let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() {
+ ty::Float(_) => Some(ty),
+ _ => None,
+ });
+ opt_ty.unwrap_or_else(|| self.next_float_var())
+ }
+ ast::LitKind::Bool(_) => tcx.types.bool,
+ ast::LitKind::Err(_) => tcx.ty_error(),
+ }
+ }
+
+ pub fn check_struct_path(
+ &self,
+ qpath: &QPath<'_>,
+ hir_id: hir::HirId,
+ ) -> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
+ let path_span = qpath.span();
+ let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, hir_id);
+ let variant = match def {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ return None;
+ }
+ Res::Def(DefKind::Variant, _) => match ty.kind() {
+ ty::Adt(adt, substs) => Some((adt.variant_of_res(def), adt.did(), substs)),
+ _ => bug!("unexpected type: {:?}", ty),
+ },
+ Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
+ | Res::SelfTy { .. } => match ty.kind() {
+ ty::Adt(adt, substs) if !adt.is_enum() => {
+ Some((adt.non_enum_variant(), adt.did(), substs))
+ }
+ _ => None,
+ },
+ _ => bug!("unexpected definition: {:?}", def),
+ };
+
+ if let Some((variant, did, substs)) = variant {
+ debug!("check_struct_path: did={:?} substs={:?}", did, substs);
+ self.write_user_type_annotation_from_substs(hir_id, did, substs, None);
+
+ // Check bounds on type arguments used in the path.
+ self.add_required_obligations(path_span, did, substs);
+
+ Some((variant, ty))
+ } else {
+ match ty.kind() {
+ ty::Error(_) => {
+ // E0071 might be caused by a spelling error, which will have
+ // already caused an error message and probably a suggestion
+ // elsewhere. Refrain from emitting more unhelpful errors here
+ // (issue #88844).
+ }
+ _ => {
+ struct_span_err!(
+ self.tcx.sess,
+ path_span,
+ E0071,
+ "expected struct, variant or union type, found {}",
+ ty.sort_string(self.tcx)
+ )
+ .span_label(path_span, "not a struct")
+ .emit();
+ }
+ }
+ None
+ }
+ }
+
+ pub fn check_decl_initializer(
+ &self,
+ hir_id: hir::HirId,
+ pat: &'tcx hir::Pat<'tcx>,
+ init: &'tcx hir::Expr<'tcx>,
+ ) -> Ty<'tcx> {
+ // FIXME(tschottdorf): `contains_explicit_ref_binding()` must be removed
+ // for #42640 (default match binding modes).
+ //
+ // See #44848.
+ let ref_bindings = pat.contains_explicit_ref_binding();
+
+ let local_ty = self.local_ty(init.span, hir_id).revealed_ty;
+ if let Some(m) = ref_bindings {
+ // Somewhat subtle: if we have a `ref` binding in the pattern,
+ // we want to avoid introducing coercions for the RHS. This is
+ // both because it helps preserve sanity and, in the case of
+ // ref mut, for soundness (issue #23116). In particular, in
+ // the latter case, we need to be clear that the type of the
+ // referent for the reference that results is *equal to* the
+ // type of the place it is referencing, and not some
+ // supertype thereof.
+ let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
+ self.demand_eqtype(init.span, local_ty, init_ty);
+ init_ty
+ } else {
+ self.check_expr_coercable_to_type(init, local_ty, None)
+ }
+ }
+
+ pub(in super::super) fn check_decl(&self, decl: Declaration<'tcx>) {
+ // Determine and write the type which we'll check the pattern against.
+ let decl_ty = self.local_ty(decl.span, decl.hir_id).decl_ty;
+ self.write_ty(decl.hir_id, decl_ty);
+
+ // Type check the initializer.
+ if let Some(ref init) = decl.init {
+ let init_ty = self.check_decl_initializer(decl.hir_id, decl.pat, &init);
+ self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, init_ty);
+ }
+
+ // Does the expected pattern type originate from an expression and what is the span?
+ let (origin_expr, ty_span) = match (decl.ty, decl.init) {
+ (Some(ty), _) => (false, Some(ty.span)), // Bias towards the explicit user type.
+ (_, Some(init)) => {
+ (true, Some(init.span.find_ancestor_inside(decl.span).unwrap_or(init.span)))
+ } // No explicit type; so use the scrutinee.
+ _ => (false, None), // We have `let $pat;`, so the expected type is unconstrained.
+ };
+
+ // Type check the pattern. Override if necessary to avoid knock-on errors.
+ self.check_pat_top(&decl.pat, decl_ty, ty_span, origin_expr);
+ let pat_ty = self.node_ty(decl.pat.hir_id);
+ self.overwrite_local_ty_if_err(decl.hir_id, decl.pat, decl_ty, pat_ty);
+
+ if let Some(blk) = decl.els {
+ let previous_diverges = self.diverges.get();
+ let else_ty = self.check_block_with_expected(blk, NoExpectation);
+ let cause = self.cause(blk.span, ObligationCauseCode::LetElse);
+ if let Some(mut err) =
+ self.demand_eqtype_with_origin(&cause, self.tcx.types.never, else_ty)
+ {
+ err.emit();
+ }
+ self.diverges.set(previous_diverges);
+ }
+ }
+
+ /// Type check a `let` statement.
+ pub fn check_decl_local(&self, local: &'tcx hir::Local<'tcx>) {
+ self.check_decl(local.into());
+ }
+
+ pub fn check_stmt(&self, stmt: &'tcx hir::Stmt<'tcx>, is_last: bool) {
+ // Don't do all the complex logic below for `DeclItem`.
+ match stmt.kind {
+ hir::StmtKind::Item(..) => return,
+ hir::StmtKind::Local(..) | hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
+ }
+
+ self.warn_if_unreachable(stmt.hir_id, stmt.span, "statement");
+
+ // Hide the outer diverging and `has_errors` flags.
+ let old_diverges = self.diverges.replace(Diverges::Maybe);
+ let old_has_errors = self.has_errors.replace(false);
+
+ match stmt.kind {
+ hir::StmtKind::Local(l) => {
+ self.check_decl_local(l);
+ }
+ // Ignore for now.
+ hir::StmtKind::Item(_) => {}
+ hir::StmtKind::Expr(ref expr) => {
+ // Check with expected type of `()`.
+ self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit(), |err| {
+ if expr.can_have_side_effects() {
+ self.suggest_semicolon_at_end(expr.span, err);
+ }
+ });
+ }
+ hir::StmtKind::Semi(ref expr) => {
+ // All of this is equivalent to calling `check_expr`, but it is inlined out here
+ // in order to capture the fact that this `match` is the last statement in its
+ // function. This is done for better suggestions to remove the `;`.
+ let expectation = match expr.kind {
+ hir::ExprKind::Match(..) if is_last => IsLast(stmt.span),
+ _ => NoExpectation,
+ };
+ self.check_expr_with_expectation(expr, expectation);
+ }
+ }
+
+ // Combine the diverging and `has_error` flags.
+ self.diverges.set(self.diverges.get() | old_diverges);
+ self.has_errors.set(self.has_errors.get() | old_has_errors);
+ }
+
+ pub fn check_block_no_value(&self, blk: &'tcx hir::Block<'tcx>) {
+ let unit = self.tcx.mk_unit();
+ let ty = self.check_block_with_expected(blk, ExpectHasType(unit));
+
+ // if the block produces a `!` value, that can always be
+ // (effectively) coerced to unit.
+ if !ty.is_never() {
+ self.demand_suptype(blk.span, unit, ty);
+ }
+ }
+
+ pub(in super::super) fn check_block_with_expected(
+ &self,
+ blk: &'tcx hir::Block<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let prev = self.ps.replace(self.ps.get().recurse(blk));
+
+ // In some cases, blocks have just one exit, but other blocks
+ // can be targeted by multiple breaks. This can happen both
+ // with labeled blocks as well as when we desugar
+ // a `try { ... }` expression.
+ //
+ // Example 1:
+ //
+ // 'a: { if true { break 'a Err(()); } Ok(()) }
+ //
+ // Here we would wind up with two coercions, one from
+ // `Err(())` and the other from the tail expression
+ // `Ok(())`. If the tail expression is omitted, that's a
+ // "forced unit" -- unless the block diverges, in which
+ // case we can ignore the tail expression (e.g., `'a: {
+ // break 'a 22; }` would not force the type of the block
+ // to be `()`).
+ let tail_expr = blk.expr.as_ref();
+ let coerce_to_ty = expected.coercion_target_type(self, blk.span);
+ let coerce = if blk.targeted_by_break {
+ CoerceMany::new(coerce_to_ty)
+ } else {
+ let tail_expr: &[&hir::Expr<'_>] = match tail_expr {
+ Some(e) => slice::from_ref(e),
+ None => &[],
+ };
+ CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr)
+ };
+
+ let prev_diverges = self.diverges.get();
+ let ctxt = BreakableCtxt { coerce: Some(coerce), may_break: false };
+
+ let (ctxt, ()) = self.with_breakable_ctxt(blk.hir_id, ctxt, || {
+ for (pos, s) in blk.stmts.iter().enumerate() {
+ self.check_stmt(s, blk.stmts.len() - 1 == pos);
+ }
+
+ // check the tail expression **without** holding the
+ // `enclosing_breakables` lock below.
+ let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected));
+
+ let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
+ let ctxt = enclosing_breakables.find_breakable(blk.hir_id);
+ let coerce = ctxt.coerce.as_mut().unwrap();
+ if let Some(tail_expr_ty) = tail_expr_ty {
+ let tail_expr = tail_expr.unwrap();
+ let span = self.get_expr_coercion_span(tail_expr);
+ let cause = self.cause(span, ObligationCauseCode::BlockTailExpression(blk.hir_id));
+ let ty_for_diagnostic = coerce.merged_ty();
+ // We use coerce_inner here because we want to augment the error
+ // suggesting to wrap the block in square brackets if it might've
+ // been mistaken array syntax
+ coerce.coerce_inner(
+ self,
+ &cause,
+ Some(tail_expr),
+ tail_expr_ty,
+ Some(&mut |diag: &mut Diagnostic| {
+ self.suggest_block_to_brackets(diag, blk, tail_expr_ty, ty_for_diagnostic);
+ }),
+ false,
+ );
+ } else {
+ // Subtle: if there is no explicit tail expression,
+ // that is typically equivalent to a tail expression
+ // of `()` -- except if the block diverges. In that
+ // case, there is no value supplied from the tail
+ // expression (assuming there are no other breaks,
+ // this implies that the type of the block will be
+ // `!`).
+ //
+ // #41425 -- label the implicit `()` as being the
+ // "found type" here, rather than the "expected type".
+ if !self.diverges.get().is_always() {
+ // #50009 -- Do not point at the entire fn block span, point at the return type
+ // span, as it is the cause of the requirement, and
+ // `consider_hint_about_removing_semicolon` will point at the last expression
+ // if it were a relevant part of the error. This improves usability in editors
+ // that highlight errors inline.
+ let mut sp = blk.span;
+ let mut fn_span = None;
+ if let Some((decl, ident)) = self.get_parent_fn_decl(blk.hir_id) {
+ let ret_sp = decl.output.span();
+ if let Some(block_sp) = self.parent_item_span(blk.hir_id) {
+ // HACK: on some cases (`ui/liveness/liveness-issue-2163.rs`) the
+ // output would otherwise be incorrect and even misleading. Make sure
+ // the span we're aiming at correspond to a `fn` body.
+ if block_sp == blk.span {
+ sp = ret_sp;
+ fn_span = Some(ident.span);
+ }
+ }
+ }
+ coerce.coerce_forced_unit(
+ self,
+ &self.misc(sp),
+ &mut |err| {
+ if let Some(expected_ty) = expected.only_has_type(self) {
+ if !self.consider_removing_semicolon(blk, expected_ty, err) {
+ self.consider_returning_binding(blk, expected_ty, err);
+ }
+ if expected_ty == self.tcx.types.bool {
+ // If this is caused by a missing `let` in a `while let`,
+ // silence this redundant error, as we already emit E0070.
+
+ // Our block must be a `assign desugar local; assignment`
+ if let Some(hir::Node::Block(hir::Block {
+ stmts:
+ [
+ hir::Stmt {
+ kind:
+ hir::StmtKind::Local(hir::Local {
+ source:
+ hir::LocalSource::AssignDesugar(_),
+ ..
+ }),
+ ..
+ },
+ hir::Stmt {
+ kind:
+ hir::StmtKind::Expr(hir::Expr {
+ kind: hir::ExprKind::Assign(..),
+ ..
+ }),
+ ..
+ },
+ ],
+ ..
+ })) = self.tcx.hir().find(blk.hir_id)
+ {
+ self.comes_from_while_condition(blk.hir_id, |_| {
+ err.downgrade_to_delayed_bug();
+ })
+ }
+ }
+ }
+ if let Some(fn_span) = fn_span {
+ err.span_label(
+ fn_span,
+ "implicitly returns `()` as its body has no tail or `return` \
+ expression",
+ );
+ }
+ },
+ false,
+ );
+ }
+ }
+ });
+
+ if ctxt.may_break {
+ // If we can break from the block, then the block's exit is always reachable
+ // (... as long as the entry is reachable) - regardless of the tail of the block.
+ self.diverges.set(prev_diverges);
+ }
+
+ let mut ty = ctxt.coerce.unwrap().complete(self);
+
+ if self.has_errors.get() || ty.references_error() {
+ ty = self.tcx.ty_error()
+ }
+
+ self.write_ty(blk.hir_id, ty);
+
+ self.ps.set(prev);
+ ty
+ }
+
+ fn parent_item_span(&self, id: hir::HirId) -> Option<Span> {
+ let node = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(id));
+ match node {
+ Node::Item(&hir::Item { kind: hir::ItemKind::Fn(_, _, body_id), .. })
+ | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(_, body_id), .. }) => {
+ let body = self.tcx.hir().body(body_id);
+ if let ExprKind::Block(block, _) = &body.value.kind {
+ return Some(block.span);
+ }
+ }
+ _ => {}
+ }
+ None
+ }
+
+ /// Given a function block's `HirId`, returns its `FnDecl` if it exists, or `None` otherwise.
+ fn get_parent_fn_decl(&self, blk_id: hir::HirId) -> Option<(&'tcx hir::FnDecl<'tcx>, Ident)> {
+ let parent = self.tcx.hir().get_by_def_id(self.tcx.hir().get_parent_item(blk_id));
+ self.get_node_fn_decl(parent).map(|(fn_decl, ident, _)| (fn_decl, ident))
+ }
+
+ /// If `expr` is a `match` expression that has only one non-`!` arm, use that arm's tail
+ /// expression's `Span`, otherwise return `expr.span`. This is done to give better errors
+ /// when given code like the following:
+ /// ```text
+ /// if false { return 0i32; } else { 1u32 }
+ /// // ^^^^ point at this instead of the whole `if` expression
+ /// ```
+ fn get_expr_coercion_span(&self, expr: &hir::Expr<'_>) -> rustc_span::Span {
+ let check_in_progress = |elem: &hir::Expr<'_>| {
+ self.typeck_results.borrow().node_type_opt(elem.hir_id).filter(|ty| !ty.is_never()).map(
+ |_| match elem.kind {
+ // Point at the tail expression when possible.
+ hir::ExprKind::Block(block, _) => block.expr.map_or(block.span, |e| e.span),
+ _ => elem.span,
+ },
+ )
+ };
+
+ if let hir::ExprKind::If(_, _, Some(el)) = expr.kind {
+ if let Some(rslt) = check_in_progress(el) {
+ return rslt;
+ }
+ }
+
+ if let hir::ExprKind::Match(_, arms, _) = expr.kind {
+ let mut iter = arms.iter().filter_map(|arm| check_in_progress(arm.body));
+ if let Some(span) = iter.next() {
+ if iter.next().is_none() {
+ return span;
+ }
+ }
+ }
+
+ expr.span
+ }
+
+ fn overwrite_local_ty_if_err(
+ &self,
+ hir_id: hir::HirId,
+ pat: &'tcx hir::Pat<'tcx>,
+ decl_ty: Ty<'tcx>,
+ ty: Ty<'tcx>,
+ ) {
+ if ty.references_error() {
+ // Override the types everywhere with `err()` to avoid knock on errors.
+ self.write_ty(hir_id, ty);
+ self.write_ty(pat.hir_id, ty);
+ let local_ty = LocalTy { decl_ty, revealed_ty: ty };
+ self.locals.borrow_mut().insert(hir_id, local_ty);
+ self.locals.borrow_mut().insert(pat.hir_id, local_ty);
+ }
+ }
+
+ // Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary.
+ // The newly resolved definition is written into `type_dependent_defs`.
+ fn finish_resolving_struct_path(
+ &self,
+ qpath: &QPath<'_>,
+ path_span: Span,
+ hir_id: hir::HirId,
+ ) -> (Res, Ty<'tcx>) {
+ match *qpath {
+ QPath::Resolved(ref maybe_qself, ref path) => {
+ let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
+ let ty = <dyn AstConv<'_>>::res_to_ty(self, self_ty, path, true);
+ (path.res, ty)
+ }
+ QPath::TypeRelative(ref qself, ref segment) => {
+ let ty = self.to_ty(qself);
+
+ let result = <dyn AstConv<'_>>::associated_path_to_ty(
+ self, hir_id, path_span, ty, qself, segment, true,
+ );
+ let ty = result.map(|(ty, _, _)| ty).unwrap_or_else(|_| self.tcx().ty_error());
+ let result = result.map(|(_, kind, def_id)| (kind, def_id));
+
+ // Write back the new resolution.
+ self.write_resolution(hir_id, result);
+
+ (result.map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)), ty)
+ }
+ QPath::LangItem(lang_item, span, id) => {
+ self.resolve_lang_item_path(lang_item, span, hir_id, id)
+ }
+ }
+ }
+
+ /// Given a vec of evaluated `FulfillmentError`s and an `fn` call argument expressions, we walk
+ /// the checked and coerced types for each argument to see if any of the `FulfillmentError`s
+ /// reference a type argument. The reason to walk also the checked type is that the coerced type
+ /// can be not easily comparable with predicate type (because of coercion). If the types match
+ /// for either checked or coerced type, and there's only *one* argument that does, we point at
+ /// the corresponding argument's expression span instead of the `fn` call path span.
+ fn point_at_arg_instead_of_call_if_possible(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ expr: &'tcx hir::Expr<'tcx>,
+ call_sp: Span,
+ args: &'tcx [hir::Expr<'tcx>],
+ expected_tys: &[Ty<'tcx>],
+ ) {
+ // We *do not* do this for desugared call spans to keep good diagnostics when involving
+ // the `?` operator.
+ if call_sp.desugaring_kind().is_some() {
+ return;
+ }
+
+ 'outer: for error in errors {
+ // Only if the cause is somewhere inside the expression we want try to point at arg.
+ // Otherwise, it means that the cause is somewhere else and we should not change
+ // anything because we can break the correct span.
+ if !call_sp.contains(error.obligation.cause.span) {
+ continue;
+ }
+
+ // Peel derived obligation, because it's the type that originally
+ // started this inference chain that matters, not the one we wound
+ // up with at the end.
+ fn unpeel_to_top<'a, 'tcx>(
+ mut code: &'a ObligationCauseCode<'tcx>,
+ ) -> &'a ObligationCauseCode<'tcx> {
+ let mut result_code = code;
+ loop {
+ let parent = match code {
+ ObligationCauseCode::ImplDerivedObligation(c) => &c.derived.parent_code,
+ ObligationCauseCode::BuiltinDerivedObligation(c)
+ | ObligationCauseCode::DerivedObligation(c) => &c.parent_code,
+ _ => break result_code,
+ };
+ (result_code, code) = (code, parent);
+ }
+ }
+ let self_: ty::subst::GenericArg<'_> =
+ match unpeel_to_top(error.obligation.cause.code()) {
+ ObligationCauseCode::BuiltinDerivedObligation(code)
+ | ObligationCauseCode::DerivedObligation(code) => {
+ code.parent_trait_pred.self_ty().skip_binder().into()
+ }
+ ObligationCauseCode::ImplDerivedObligation(code) => {
+ code.derived.parent_trait_pred.self_ty().skip_binder().into()
+ }
+ _ if let ty::PredicateKind::Trait(predicate) =
+ error.obligation.predicate.kind().skip_binder() =>
+ {
+ predicate.self_ty().into()
+ }
+ _ => continue,
+ };
+ let self_ = self.resolve_vars_if_possible(self_);
+ let ty_matches_self = |ty: Ty<'tcx>| ty.walk().any(|arg| arg == self_);
+
+ let typeck_results = self.typeck_results.borrow();
+
+ for (idx, arg) in args.iter().enumerate() {
+ // Don't adjust the span if we already have a more precise span
+ // within one of the args.
+ if arg.span.contains(error.obligation.cause.span) {
+ let references_arg =
+ typeck_results.expr_ty_opt(arg).map_or(false, &ty_matches_self)
+ || expected_tys.get(idx).copied().map_or(false, &ty_matches_self);
+ if references_arg && !arg.span.from_expansion() {
+ error.obligation.cause.map_code(|parent_code| {
+ ObligationCauseCode::FunctionArgumentObligation {
+ arg_hir_id: args[idx].hir_id,
+ call_hir_id: expr.hir_id,
+ parent_code,
+ }
+ })
+ }
+ continue 'outer;
+ }
+ }
+
+ // Collect the argument position for all arguments that could have caused this
+ // `FulfillmentError`.
+ let mut referenced_in: Vec<_> = std::iter::zip(expected_tys, args)
+ .enumerate()
+ .flat_map(|(idx, (expected_ty, arg))| {
+ if let Some(arg_ty) = typeck_results.expr_ty_opt(arg) {
+ vec![(idx, arg_ty), (idx, *expected_ty)]
+ } else {
+ vec![]
+ }
+ })
+ .filter_map(|(i, ty)| {
+ let ty = self.resolve_vars_if_possible(ty);
+ // We walk the argument type because the argument's type could have
+ // been `Option<T>`, but the `FulfillmentError` references `T`.
+ if ty_matches_self(ty) { Some(i) } else { None }
+ })
+ .collect();
+
+ // Both checked and coerced types could have matched, thus we need to remove
+ // duplicates.
+
+ // We sort primitive type usize here and can use unstable sort
+ referenced_in.sort_unstable();
+ referenced_in.dedup();
+
+ if let &[idx] = &referenced_in[..] {
+ // Do not point at the inside of a macro.
+ // That would often result in poor error messages.
+ if args[idx].span.from_expansion() {
+ continue;
+ }
+ // We make sure that only *one* argument matches the obligation failure
+ // and we assign the obligation's span to its expression's.
+ error.obligation.cause.span = args[idx].span;
+ error.obligation.cause.map_code(|parent_code| {
+ ObligationCauseCode::FunctionArgumentObligation {
+ arg_hir_id: args[idx].hir_id,
+ call_hir_id: expr.hir_id,
+ parent_code,
+ }
+ });
+ } else if error.obligation.cause.span == call_sp {
+ // Make function calls point at the callee, not the whole thing.
+ if let hir::ExprKind::Call(callee, _) = expr.kind {
+ error.obligation.cause.span = callee.span;
+ }
+ }
+ }
+ }
+
+ /// Given a vec of evaluated `FulfillmentError`s and an `fn` call expression, we walk the
+ /// `PathSegment`s and resolve their type parameters to see if any of the `FulfillmentError`s
+ /// were caused by them. If they were, we point at the corresponding type argument's span
+ /// instead of the `fn` call path span.
+ fn point_at_type_arg_instead_of_call_if_possible(
+ &self,
+ errors: &mut Vec<traits::FulfillmentError<'tcx>>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ ) {
+ if let hir::ExprKind::Call(path, _) = &call_expr.kind {
+ if let hir::ExprKind::Path(hir::QPath::Resolved(_, path)) = &path.kind {
+ for error in errors {
+ if let ty::PredicateKind::Trait(predicate) =
+ error.obligation.predicate.kind().skip_binder()
+ {
+ // If any of the type arguments in this path segment caused the
+ // `FulfillmentError`, point at its span (#61860).
+ for arg in path
+ .segments
+ .iter()
+ .filter_map(|seg| seg.args.as_ref())
+ .flat_map(|a| a.args.iter())
+ {
+ if let hir::GenericArg::Type(hir_ty) = &arg
+ && let Some(ty) =
+ self.typeck_results.borrow().node_type_opt(hir_ty.hir_id)
+ && self.resolve_vars_if_possible(ty) == predicate.self_ty()
+ {
+ error.obligation.cause.span = hir_ty.span;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ fn label_fn_like(
+ &self,
+ err: &mut rustc_errors::DiagnosticBuilder<'tcx, rustc_errors::ErrorGuaranteed>,
+ callable_def_id: Option<DefId>,
+ callee_ty: Option<Ty<'tcx>>,
+ ) {
+ let Some(mut def_id) = callable_def_id else {
+ return;
+ };
+
+ if let Some(assoc_item) = self.tcx.opt_associated_item(def_id)
+ // Possibly points at either impl or trait item, so try to get it
+ // to point to trait item, then get the parent.
+ // This parent might be an impl in the case of an inherent function,
+ // but the next check will fail.
+ && let maybe_trait_item_def_id = assoc_item.trait_item_def_id.unwrap_or(def_id)
+ && let maybe_trait_def_id = self.tcx.parent(maybe_trait_item_def_id)
+ // Just an easy way to check "trait_def_id == Fn/FnMut/FnOnce"
+ && let Some(call_kind) = ty::ClosureKind::from_def_id(self.tcx, maybe_trait_def_id)
+ && let Some(callee_ty) = callee_ty
+ {
+ let callee_ty = callee_ty.peel_refs();
+ match *callee_ty.kind() {
+ ty::Param(param) => {
+ let param =
+ self.tcx.generics_of(self.body_id.owner).type_param(&param, self.tcx);
+ if param.kind.is_synthetic() {
+ // if it's `impl Fn() -> ..` then just fall down to the def-id based logic
+ def_id = param.def_id;
+ } else {
+ // Otherwise, find the predicate that makes this generic callable,
+ // and point at that.
+ let instantiated = self
+ .tcx
+ .explicit_predicates_of(self.body_id.owner)
+ .instantiate_identity(self.tcx);
+ // FIXME(compiler-errors): This could be problematic if something has two
+ // fn-like predicates with different args, but callable types really never
+ // do that, so it's OK.
+ for (predicate, span) in
+ std::iter::zip(instantiated.predicates, instantiated.spans)
+ {
+ if let ty::PredicateKind::Trait(pred) = predicate.kind().skip_binder()
+ && pred.self_ty().peel_refs() == callee_ty
+ && ty::ClosureKind::from_def_id(self.tcx, pred.def_id()).is_some()
+ {
+ err.span_note(span, "callable defined here");
+ return;
+ }
+ }
+ }
+ }
+ ty::Opaque(new_def_id, _)
+ | ty::Closure(new_def_id, _)
+ | ty::FnDef(new_def_id, _) => {
+ def_id = new_def_id;
+ }
+ _ => {
+ // Look for a user-provided impl of a `Fn` trait, and point to it.
+ let new_def_id = self.probe(|_| {
+ let trait_ref = ty::TraitRef::new(
+ call_kind.to_def_id(self.tcx),
+ self.tcx.mk_substs([
+ ty::GenericArg::from(callee_ty),
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: rustc_span::DUMMY_SP,
+ })
+ .into(),
+ ].into_iter()),
+ );
+ let obligation = traits::Obligation::new(
+ traits::ObligationCause::dummy(),
+ self.param_env,
+ ty::Binder::dummy(ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Positive,
+ }),
+ );
+ match SelectionContext::new(&self).select(&obligation) {
+ Ok(Some(traits::ImplSource::UserDefined(impl_source))) => {
+ Some(impl_source.impl_def_id)
+ }
+ _ => None
+ }
+ });
+ if let Some(new_def_id) = new_def_id {
+ def_id = new_def_id;
+ } else {
+ return;
+ }
+ }
+ }
+ }
+
+ if let Some(def_span) = self.tcx.def_ident_span(def_id) && !def_span.is_dummy() {
+ let mut spans: MultiSpan = def_span.into();
+
+ let params = self
+ .tcx
+ .hir()
+ .get_if_local(def_id)
+ .and_then(|node| node.body_id())
+ .into_iter()
+ .flat_map(|id| self.tcx.hir().body(id).params);
+
+ for param in params {
+ spans.push_span_label(param.span, "");
+ }
+
+ let def_kind = self.tcx.def_kind(def_id);
+ err.span_note(spans, &format!("{} defined here", def_kind.descr(def_id)));
+ } else {
+ let def_kind = self.tcx.def_kind(def_id);
+ err.span_note(
+ self.tcx.def_span(def_id),
+ &format!("{} defined here", def_kind.descr(def_id)),
+ );
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
new file mode 100644
index 000000000..05bcc710e
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/mod.rs
@@ -0,0 +1,296 @@
+mod _impl;
+mod arg_matrix;
+mod checks;
+mod suggestions;
+
+pub use _impl::*;
+pub use suggestions::*;
+
+use crate::astconv::AstConv;
+use crate::check::coercion::DynamicCoerceMany;
+use crate::check::{Diverges, EnclosingBreakables, Inherited, UnsafetyState};
+
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Const, Ty, TyCtxt};
+use rustc_session::Session;
+use rustc_span::symbol::Ident;
+use rustc_span::{self, Span};
+use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode};
+
+use std::cell::{Cell, RefCell};
+use std::ops::Deref;
+
+pub struct FnCtxt<'a, 'tcx> {
+ pub(super) body_id: hir::HirId,
+
+ /// The parameter environment used for proving trait obligations
+ /// in this function. This can change when we descend into
+ /// closures (as they bring new things into scope), hence it is
+ /// not part of `Inherited` (as of the time of this writing,
+ /// closures do not yet change the environment, but they will
+ /// eventually).
+ pub(super) param_env: ty::ParamEnv<'tcx>,
+
+ /// Number of errors that had been reported when we started
+ /// checking this function. On exit, if we find that *more* errors
+ /// have been reported, we will skip regionck and other work that
+ /// expects the types within the function to be consistent.
+ // FIXME(matthewjasper) This should not exist, and it's not correct
+ // if type checking is run in parallel.
+ err_count_on_creation: usize,
+
+ /// If `Some`, this stores coercion information for returned
+ /// expressions. If `None`, this is in a context where return is
+ /// inappropriate, such as a const expression.
+ ///
+ /// This is a `RefCell<DynamicCoerceMany>`, which means that we
+ /// can track all the return expressions and then use them to
+ /// compute a useful coercion from the set, similar to a match
+ /// expression or other branching context. You can use methods
+ /// like `expected_ty` to access the declared return type (if
+ /// any).
+ pub(super) ret_coercion: Option<RefCell<DynamicCoerceMany<'tcx>>>,
+
+ pub(super) ret_type_span: Option<Span>,
+
+ /// Used exclusively to reduce cost of advanced evaluation used for
+ /// more helpful diagnostics.
+ pub(super) in_tail_expr: bool,
+
+ /// First span of a return site that we find. Used in error messages.
+ pub(super) ret_coercion_span: Cell<Option<Span>>,
+
+ pub(super) resume_yield_tys: Option<(Ty<'tcx>, Ty<'tcx>)>,
+
+ pub(super) ps: Cell<UnsafetyState>,
+
+ /// Whether the last checked node generates a divergence (e.g.,
+ /// `return` will set this to `Always`). In general, when entering
+ /// an expression or other node in the tree, the initial value
+ /// indicates whether prior parts of the containing expression may
+ /// have diverged. It is then typically set to `Maybe` (and the
+ /// old value remembered) for processing the subparts of the
+ /// current expression. As each subpart is processed, they may set
+ /// the flag to `Always`, etc. Finally, at the end, we take the
+ /// result and "union" it with the original value, so that when we
+ /// return the flag indicates if any subpart of the parent
+ /// expression (up to and including this part) has diverged. So,
+ /// if you read it after evaluating a subexpression `X`, the value
+ /// you get indicates whether any subexpression that was
+ /// evaluating up to and including `X` diverged.
+ ///
+ /// We currently use this flag only for diagnostic purposes:
+ ///
+ /// - To warn about unreachable code: if, after processing a
+ /// sub-expression but before we have applied the effects of the
+ /// current node, we see that the flag is set to `Always`, we
+ /// can issue a warning. This corresponds to something like
+ /// `foo(return)`; we warn on the `foo()` expression. (We then
+ /// update the flag to `WarnedAlways` to suppress duplicate
+ /// reports.) Similarly, if we traverse to a fresh statement (or
+ /// tail expression) from an `Always` setting, we will issue a
+ /// warning. This corresponds to something like `{return;
+ /// foo();}` or `{return; 22}`, where we would warn on the
+ /// `foo()` or `22`.
+ ///
+ /// An expression represents dead code if, after checking it,
+ /// the diverges flag is set to something other than `Maybe`.
+ pub(super) diverges: Cell<Diverges>,
+
+ /// Whether any child nodes have any type errors.
+ pub(super) has_errors: Cell<bool>,
+
+ pub(super) enclosing_breakables: RefCell<EnclosingBreakables<'tcx>>,
+
+ pub(super) inh: &'a Inherited<'a, 'tcx>,
+
+ /// True if the function or closure's return type is known before
+ /// entering the function/closure, i.e. if the return type is
+ /// either given explicitly or inferred from, say, an `Fn*` trait
+ /// bound. Used for diagnostic purposes only.
+ pub(super) return_type_pre_known: bool,
+
+ /// True if the return type has an Opaque type
+ pub(super) return_type_has_opaque: bool,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn new(
+ inh: &'a Inherited<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ) -> FnCtxt<'a, 'tcx> {
+ FnCtxt {
+ body_id,
+ param_env,
+ err_count_on_creation: inh.tcx.sess.err_count(),
+ ret_coercion: None,
+ ret_type_span: None,
+ in_tail_expr: false,
+ ret_coercion_span: Cell::new(None),
+ resume_yield_tys: None,
+ ps: Cell::new(UnsafetyState::function(hir::Unsafety::Normal, hir::CRATE_HIR_ID)),
+ diverges: Cell::new(Diverges::Maybe),
+ has_errors: Cell::new(false),
+ enclosing_breakables: RefCell::new(EnclosingBreakables {
+ stack: Vec::new(),
+ by_id: Default::default(),
+ }),
+ inh,
+ return_type_pre_known: true,
+ return_type_has_opaque: false,
+ }
+ }
+
+ pub fn cause(&self, span: Span, code: ObligationCauseCode<'tcx>) -> ObligationCause<'tcx> {
+ ObligationCause::new(span, self.body_id, code)
+ }
+
+ pub fn misc(&self, span: Span) -> ObligationCause<'tcx> {
+ self.cause(span, ObligationCauseCode::MiscObligation)
+ }
+
+ pub fn sess(&self) -> &Session {
+ &self.tcx.sess
+ }
+
+ pub fn errors_reported_since_creation(&self) -> bool {
+ self.tcx.sess.err_count() > self.err_count_on_creation
+ }
+}
+
+impl<'a, 'tcx> Deref for FnCtxt<'a, 'tcx> {
+ type Target = Inherited<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.inh
+ }
+}
+
+impl<'a, 'tcx> AstConv<'tcx> for FnCtxt<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn item_def_id(&self) -> Option<DefId> {
+ None
+ }
+
+ fn get_type_parameter_bounds(
+ &self,
+ _: Span,
+ def_id: DefId,
+ _: Ident,
+ ) -> ty::GenericPredicates<'tcx> {
+ let tcx = self.tcx;
+ let item_def_id = tcx.hir().ty_param_owner(def_id.expect_local());
+ let generics = tcx.generics_of(item_def_id);
+ let index = generics.param_def_id_to_index[&def_id];
+ ty::GenericPredicates {
+ parent: None,
+ predicates: tcx.arena.alloc_from_iter(
+ self.param_env.caller_bounds().iter().filter_map(|predicate| {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(data) if data.self_ty().is_param(index) => {
+ // HACK(eddyb) should get the original `Span`.
+ let span = tcx.def_span(def_id);
+ Some((predicate, span))
+ }
+ _ => None,
+ }
+ }),
+ ),
+ }
+ }
+
+ fn re_infer(&self, def: Option<&ty::GenericParamDef>, span: Span) -> Option<ty::Region<'tcx>> {
+ let v = match def {
+ Some(def) => infer::EarlyBoundRegion(span, def.name),
+ None => infer::MiscVariable(span),
+ };
+ Some(self.next_region_var(v))
+ }
+
+ fn allow_ty_infer(&self) -> bool {
+ true
+ }
+
+ fn ty_infer(&self, param: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
+ if let Some(param) = param {
+ if let GenericArgKind::Type(ty) = self.var_for_def(span, param).unpack() {
+ return ty;
+ }
+ unreachable!()
+ } else {
+ self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ })
+ }
+ }
+
+ fn ct_infer(
+ &self,
+ ty: Ty<'tcx>,
+ param: Option<&ty::GenericParamDef>,
+ span: Span,
+ ) -> Const<'tcx> {
+ if let Some(param) = param {
+ if let GenericArgKind::Const(ct) = self.var_for_def(span, param).unpack() {
+ return ct;
+ }
+ unreachable!()
+ } else {
+ self.next_const_var(
+ ty,
+ ConstVariableOrigin { kind: ConstVariableOriginKind::ConstInference, span },
+ )
+ }
+ }
+
+ fn projected_ty_from_poly_trait_ref(
+ &self,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Ty<'tcx> {
+ let trait_ref = self.replace_bound_vars_with_fresh_vars(
+ span,
+ infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id),
+ poly_trait_ref,
+ );
+
+ let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
+ self,
+ self.tcx,
+ span,
+ item_def_id,
+ item_segment,
+ trait_ref.substs,
+ );
+
+ self.tcx().mk_projection(item_def_id, item_substs)
+ }
+
+ fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if ty.has_escaping_bound_vars() {
+ ty // FIXME: normalization and escaping regions
+ } else {
+ self.normalize_associated_types_in(span, ty)
+ }
+ }
+
+ fn set_tainted_by_errors(&self) {
+ self.infcx.set_tainted_by_errors()
+ }
+
+ fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) {
+ self.write_ty(hir_id, ty)
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
new file mode 100644
index 000000000..57771e096
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/fn_ctxt/suggestions.rs
@@ -0,0 +1,912 @@
+use super::FnCtxt;
+use crate::astconv::AstConv;
+use crate::errors::{AddReturnTypeSuggestion, ExpectedReturnTypeLabel};
+
+use rustc_ast::util::parser::ExprPrecedence;
+use rustc_errors::{Applicability, Diagnostic, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{
+ Expr, ExprKind, GenericBound, Node, Path, QPath, Stmt, StmtKind, TyKind, WherePredicate,
+};
+use rustc_infer::infer::{self, TyCtxtInferExt};
+use rustc_infer::traits::{self, StatementAsExpression};
+use rustc_middle::lint::in_external_macro;
+use rustc_middle::ty::{self, Binder, IsSuggestable, Subst, ToPredicate, Ty};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub(in super::super) fn suggest_semicolon_at_end(&self, span: Span, err: &mut Diagnostic) {
+ err.span_suggestion_short(
+ span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MachineApplicable,
+ );
+ }
+
+ /// On implicit return expressions with mismatched types, provides the following suggestions:
+ ///
+ /// - Points out the method's return type as the reason for the expected type.
+ /// - Possible missing semicolon.
+ /// - Possible missing return type if the return type is the default, and not `fn main()`.
+ pub fn suggest_mismatched_types_on_tail(
+ &self,
+ err: &mut Diagnostic,
+ expr: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ blk_id: hir::HirId,
+ ) -> bool {
+ let expr = expr.peel_drop_temps();
+ self.suggest_missing_semicolon(err, expr, expected, false);
+ let mut pointing_at_return_type = false;
+ if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
+ let fn_id = self.tcx.hir().get_return_block(blk_id).unwrap();
+ pointing_at_return_type = self.suggest_missing_return_type(
+ err,
+ &fn_decl,
+ expected,
+ found,
+ can_suggest,
+ fn_id,
+ );
+ self.suggest_missing_break_or_return_expr(
+ err, expr, &fn_decl, expected, found, blk_id, fn_id,
+ );
+ }
+ pointing_at_return_type
+ }
+
+ /// When encountering an fn-like ctor that needs to unify with a value, check whether calling
+ /// the ctor would successfully solve the type mismatch and if so, suggest it:
+ /// ```compile_fail,E0308
+ /// fn foo(x: usize) -> usize { x }
+ /// let x: usize = foo; // suggest calling the `foo` function: `foo(42)`
+ /// ```
+ fn suggest_fn_call(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) -> bool {
+ let (def_id, output, inputs) = match *found.kind() {
+ ty::FnDef(def_id, _) => {
+ let fn_sig = found.fn_sig(self.tcx);
+ (def_id, fn_sig.output(), fn_sig.inputs().skip_binder().len())
+ }
+ ty::Closure(def_id, substs) => {
+ let fn_sig = substs.as_closure().sig();
+ (def_id, fn_sig.output(), fn_sig.inputs().skip_binder().len() - 1)
+ }
+ ty::Opaque(def_id, substs) => {
+ let sig = self.tcx.bound_item_bounds(def_id).subst(self.tcx, substs).iter().find_map(|pred| {
+ if let ty::PredicateKind::Projection(proj) = pred.kind().skip_binder()
+ && Some(proj.projection_ty.item_def_id) == self.tcx.lang_items().fn_once_output()
+ // args tuple will always be substs[1]
+ && let ty::Tuple(args) = proj.projection_ty.substs.type_at(1).kind()
+ {
+ Some((
+ pred.kind().rebind(proj.term.ty().unwrap()),
+ args.len(),
+ ))
+ } else {
+ None
+ }
+ });
+ if let Some((output, inputs)) = sig {
+ (def_id, output, inputs)
+ } else {
+ return false;
+ }
+ }
+ _ => return false,
+ };
+
+ let output = self.replace_bound_vars_with_fresh_vars(expr.span, infer::FnCall, output);
+ let output = self.normalize_associated_types_in(expr.span, output);
+ if !output.is_ty_var() && self.can_coerce(output, expected) {
+ let (sugg_call, mut applicability) = match inputs {
+ 0 => ("".to_string(), Applicability::MachineApplicable),
+ 1..=4 => (
+ (0..inputs).map(|_| "_").collect::<Vec<_>>().join(", "),
+ Applicability::MachineApplicable,
+ ),
+ _ => ("...".to_string(), Applicability::HasPlaceholders),
+ };
+
+ let msg = match self.tcx.def_kind(def_id) {
+ DefKind::Fn => "call this function",
+ DefKind::Closure | DefKind::OpaqueTy => "call this closure",
+ DefKind::Ctor(CtorOf::Struct, _) => "instantiate this tuple struct",
+ DefKind::Ctor(CtorOf::Variant, _) => "instantiate this tuple variant",
+ _ => "call this function",
+ };
+
+ let sugg = match expr.kind {
+ hir::ExprKind::Call(..)
+ | hir::ExprKind::Path(..)
+ | hir::ExprKind::Index(..)
+ | hir::ExprKind::Lit(..) => {
+ vec![(expr.span.shrink_to_hi(), format!("({sugg_call})"))]
+ }
+ hir::ExprKind::Closure { .. } => {
+ // Might be `{ expr } || { bool }`
+ applicability = Applicability::MaybeIncorrect;
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]
+ }
+ _ => {
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(")({sugg_call})")),
+ ]
+ }
+ };
+
+ err.multipart_suggestion_verbose(
+ format!("use parentheses to {msg}"),
+ sugg,
+ applicability,
+ );
+
+ return true;
+ }
+ false
+ }
+
+ pub fn suggest_deref_ref_or_into(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ expected_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ ) {
+ let expr = expr.peel_blocks();
+ if let Some((sp, msg, suggestion, applicability, verbose)) =
+ self.check_ref(expr, found, expected)
+ {
+ if verbose {
+ err.span_suggestion_verbose(sp, &msg, suggestion, applicability);
+ } else {
+ err.span_suggestion(sp, &msg, suggestion, applicability);
+ }
+ } else if let (ty::FnDef(def_id, ..), true) =
+ (&found.kind(), self.suggest_fn_call(err, expr, expected, found))
+ {
+ if let Some(sp) = self.tcx.hir().span_if_local(*def_id) {
+ err.span_label(sp, format!("{found} defined here"));
+ }
+ } else if !self.check_for_cast(err, expr, found, expected, expected_ty_expr) {
+ let methods = self.get_conversion_methods(expr.span, expected, found, expr.hir_id);
+ if !methods.is_empty() {
+ let mut suggestions = methods.iter()
+ .filter_map(|conversion_method| {
+ let receiver_method_ident = expr.method_ident();
+ if let Some(method_ident) = receiver_method_ident
+ && method_ident.name == conversion_method.name
+ {
+ return None // do not suggest code that is already there (#53348)
+ }
+
+ let method_call_list = [sym::to_vec, sym::to_string];
+ let mut sugg = if let ExprKind::MethodCall(receiver_method, ..) = expr.kind
+ && receiver_method.ident.name == sym::clone
+ && method_call_list.contains(&conversion_method.name)
+ // If receiver is `.clone()` and found type has one of those methods,
+ // we guess that the user wants to convert from a slice type (`&[]` or `&str`)
+ // to an owned type (`Vec` or `String`). These conversions clone internally,
+ // so we remove the user's `clone` call.
+ {
+ vec![(
+ receiver_method.ident.span,
+ conversion_method.name.to_string()
+ )]
+ } else if expr.precedence().order()
+ < ExprPrecedence::MethodCall.order()
+ {
+ vec![
+ (expr.span.shrink_to_lo(), "(".to_string()),
+ (expr.span.shrink_to_hi(), format!(").{}()", conversion_method.name)),
+ ]
+ } else {
+ vec![(expr.span.shrink_to_hi(), format!(".{}()", conversion_method.name))]
+ };
+ let struct_pat_shorthand_field = self.maybe_get_struct_pattern_shorthand_field(expr);
+ if let Some(name) = struct_pat_shorthand_field {
+ sugg.insert(
+ 0,
+ (expr.span.shrink_to_lo(), format!("{}: ", name)),
+ );
+ }
+ Some(sugg)
+ })
+ .peekable();
+ if suggestions.peek().is_some() {
+ err.multipart_suggestions(
+ "try using a conversion method",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ } else if let ty::Adt(found_adt, found_substs) = found.kind()
+ && self.tcx.is_diagnostic_item(sym::Option, found_adt.did())
+ && let ty::Adt(expected_adt, expected_substs) = expected.kind()
+ && self.tcx.is_diagnostic_item(sym::Option, expected_adt.did())
+ && let ty::Ref(_, inner_ty, _) = expected_substs.type_at(0).kind()
+ && inner_ty.is_str()
+ {
+ let ty = found_substs.type_at(0);
+ let mut peeled = ty;
+ let mut ref_cnt = 0;
+ while let ty::Ref(_, inner, _) = peeled.kind() {
+ peeled = *inner;
+ ref_cnt += 1;
+ }
+ if let ty::Adt(adt, _) = peeled.kind()
+ && self.tcx.is_diagnostic_item(sym::String, adt.did())
+ {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ "try converting the passed type into a `&str`",
+ format!(".map(|x| &*{}x)", "*".repeat(ref_cnt)),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+
+ /// When encountering the expected boxed value allocated in the stack, suggest allocating it
+ /// in the heap by calling `Box::new()`.
+ pub(in super::super) fn suggest_boxing_when_appropriate(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) {
+ if self.tcx.hir().is_inside_const_context(expr.hir_id) {
+ // Do not suggest `Box::new` in const context.
+ return;
+ }
+ if !expected.is_box() || found.is_box() {
+ return;
+ }
+ let boxed_found = self.tcx.mk_box(found);
+ if self.can_coerce(boxed_found, expected) {
+ err.multipart_suggestion(
+ "store this in the heap by calling `Box::new`",
+ vec![
+ (expr.span.shrink_to_lo(), "Box::new(".to_string()),
+ (expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ err.note(
+ "for more on the distinction between the stack and the heap, read \
+ https://doc.rust-lang.org/book/ch15-01-box.html, \
+ https://doc.rust-lang.org/rust-by-example/std/box.html, and \
+ https://doc.rust-lang.org/std/boxed/index.html",
+ );
+ }
+ }
+
+ /// When encountering a closure that captures variables, where a FnPtr is expected,
+ /// suggest a non-capturing closure
+ pub(in super::super) fn suggest_no_capture_closure(
+ &self,
+ err: &mut Diagnostic,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) {
+ if let (ty::FnPtr(_), ty::Closure(def_id, _)) = (expected.kind(), found.kind()) {
+ if let Some(upvars) = self.tcx.upvars_mentioned(*def_id) {
+ // Report upto four upvars being captured to reduce the amount error messages
+ // reported back to the user.
+ let spans_and_labels = upvars
+ .iter()
+ .take(4)
+ .map(|(var_hir_id, upvar)| {
+ let var_name = self.tcx.hir().name(*var_hir_id).to_string();
+ let msg = format!("`{}` captured here", var_name);
+ (upvar.span, msg)
+ })
+ .collect::<Vec<_>>();
+
+ let mut multi_span: MultiSpan =
+ spans_and_labels.iter().map(|(sp, _)| *sp).collect::<Vec<_>>().into();
+ for (sp, label) in spans_and_labels {
+ multi_span.push_span_label(sp, label);
+ }
+ err.span_note(
+ multi_span,
+ "closures can only be coerced to `fn` types if they do not capture any variables"
+ );
+ }
+ }
+ }
+
+ /// When encountering an `impl Future` where `BoxFuture` is expected, suggest `Box::pin`.
+ #[instrument(skip(self, err))]
+ pub(in super::super) fn suggest_calling_boxed_future_when_appropriate(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ ) -> bool {
+ // Handle #68197.
+
+ if self.tcx.hir().is_inside_const_context(expr.hir_id) {
+ // Do not suggest `Box::new` in const context.
+ return false;
+ }
+ let pin_did = self.tcx.lang_items().pin_type();
+ // This guards the `unwrap` and `mk_box` below.
+ if pin_did.is_none() || self.tcx.lang_items().owned_box().is_none() {
+ return false;
+ }
+ let box_found = self.tcx.mk_box(found);
+ let pin_box_found = self.tcx.mk_lang_item(box_found, LangItem::Pin).unwrap();
+ let pin_found = self.tcx.mk_lang_item(found, LangItem::Pin).unwrap();
+ match expected.kind() {
+ ty::Adt(def, _) if Some(def.did()) == pin_did => {
+ if self.can_coerce(pin_box_found, expected) {
+ debug!("can coerce {:?} to {:?}, suggesting Box::pin", pin_box_found, expected);
+ match found.kind() {
+ ty::Adt(def, _) if def.is_box() => {
+ err.help("use `Box::pin`");
+ }
+ _ => {
+ err.multipart_suggestion(
+ "you need to pin and box this expression",
+ vec![
+ (expr.span.shrink_to_lo(), "Box::pin(".to_string()),
+ (expr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ true
+ } else if self.can_coerce(pin_found, expected) {
+ match found.kind() {
+ ty::Adt(def, _) if def.is_box() => {
+ err.help("use `Box::pin`");
+ true
+ }
+ _ => false,
+ }
+ } else {
+ false
+ }
+ }
+ ty::Adt(def, _) if def.is_box() && self.can_coerce(box_found, expected) => {
+ // Check if the parent expression is a call to Pin::new. If it
+ // is and we were expecting a Box, ergo Pin<Box<expected>>, we
+ // can suggest Box::pin.
+ let parent = self.tcx.hir().get_parent_node(expr.hir_id);
+ let Some(Node::Expr(Expr { kind: ExprKind::Call(fn_name, _), .. })) = self.tcx.hir().find(parent) else {
+ return false;
+ };
+ match fn_name.kind {
+ ExprKind::Path(QPath::TypeRelative(
+ hir::Ty {
+ kind: TyKind::Path(QPath::Resolved(_, Path { res: recv_ty, .. })),
+ ..
+ },
+ method,
+ )) if recv_ty.opt_def_id() == pin_did && method.ident.name == sym::new => {
+ err.span_suggestion(
+ fn_name.span,
+ "use `Box::pin` to pin and box this expression",
+ "Box::pin",
+ Applicability::MachineApplicable,
+ );
+ true
+ }
+ _ => false,
+ }
+ }
+ _ => false,
+ }
+ }
+
+ /// A common error is to forget to add a semicolon at the end of a block, e.g.,
+ ///
+ /// ```compile_fail,E0308
+ /// # fn bar_that_returns_u32() -> u32 { 4 }
+ /// fn foo() {
+ /// bar_that_returns_u32()
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the return expression in a block would make sense on its own as a
+ /// statement and the return type has been left as default or has been specified as `()`. If so,
+ /// it suggests adding a semicolon.
+ ///
+ /// If the expression is the expression of a closure without block (`|| expr`), a
+ /// block is needed to be added too (`|| { expr; }`). This is denoted by `needs_block`.
+ pub fn suggest_missing_semicolon(
+ &self,
+ err: &mut Diagnostic,
+ expression: &'tcx hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ needs_block: bool,
+ ) {
+ if expected.is_unit() {
+ // `BlockTailExpression` only relevant if the tail expr would be
+ // useful on its own.
+ match expression.kind {
+ ExprKind::Call(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Loop(..)
+ | ExprKind::If(..)
+ | ExprKind::Match(..)
+ | ExprKind::Block(..)
+ if expression.can_have_side_effects()
+ // If the expression is from an external macro, then do not suggest
+ // adding a semicolon, because there's nowhere to put it.
+ // See issue #81943.
+ && !in_external_macro(self.tcx.sess, expression.span) =>
+ {
+ if needs_block {
+ err.multipart_suggestion(
+ "consider using a semicolon here",
+ vec![
+ (expression.span.shrink_to_lo(), "{ ".to_owned()),
+ (expression.span.shrink_to_hi(), "; }".to_owned()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion(
+ expression.span.shrink_to_hi(),
+ "consider using a semicolon here",
+ ";",
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+
+ /// A possible error is to forget to add a return type that is needed:
+ ///
+ /// ```compile_fail,E0308
+ /// # fn bar_that_returns_u32() -> u32 { 4 }
+ /// fn foo() {
+ /// bar_that_returns_u32()
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the return type is left as default, the method is not part of an
+ /// `impl` block and that it isn't the `main` method. If so, it suggests setting the return
+ /// type.
+ pub(in super::super) fn suggest_missing_return_type(
+ &self,
+ err: &mut Diagnostic,
+ fn_decl: &hir::FnDecl<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ can_suggest: bool,
+ fn_id: hir::HirId,
+ ) -> bool {
+ let found =
+ self.resolve_numeric_literals_with_default(self.resolve_vars_if_possible(found));
+ // Only suggest changing the return type for methods that
+ // haven't set a return type at all (and aren't `fn main()` or an impl).
+ match (
+ &fn_decl.output,
+ found.is_suggestable(self.tcx, false),
+ can_suggest,
+ expected.is_unit(),
+ ) {
+ (&hir::FnRetTy::DefaultReturn(span), true, true, true) => {
+ err.subdiagnostic(AddReturnTypeSuggestion::Add { span, found });
+ true
+ }
+ (&hir::FnRetTy::DefaultReturn(span), false, true, true) => {
+ // FIXME: if `found` could be `impl Iterator` or `impl Fn*`, we should suggest
+ // that.
+ err.subdiagnostic(AddReturnTypeSuggestion::MissingHere { span });
+ true
+ }
+ (&hir::FnRetTy::DefaultReturn(span), _, false, true) => {
+ // `fn main()` must return `()`, do not suggest changing return type
+ err.subdiagnostic(ExpectedReturnTypeLabel::Unit { span });
+ true
+ }
+ // expectation was caused by something else, not the default return
+ (&hir::FnRetTy::DefaultReturn(_), _, _, false) => false,
+ (&hir::FnRetTy::Return(ref ty), _, _, _) => {
+ // Only point to return type if the expected type is the return type, as if they
+ // are not, the expectation must have been caused by something else.
+ debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.kind);
+ let span = ty.span;
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
+ debug!("suggest_missing_return_type: return type {:?}", ty);
+ debug!("suggest_missing_return_type: expected type {:?}", ty);
+ let bound_vars = self.tcx.late_bound_vars(fn_id);
+ let ty = Binder::bind_with_vars(ty, bound_vars);
+ let ty = self.normalize_associated_types_in(span, ty);
+ let ty = self.tcx.erase_late_bound_regions(ty);
+ if self.can_coerce(expected, ty) {
+ err.subdiagnostic(ExpectedReturnTypeLabel::Other { span, expected });
+ self.try_suggest_return_impl_trait(err, expected, ty, fn_id);
+ return true;
+ }
+ false
+ }
+ }
+ }
+
+ /// check whether the return type is a generic type with a trait bound
+ /// only suggest this if the generic param is not present in the arguments
+ /// if this is true, hint them towards changing the return type to `impl Trait`
+ /// ```compile_fail,E0308
+ /// fn cant_name_it<T: Fn() -> u32>() -> T {
+ /// || 3
+ /// }
+ /// ```
+ fn try_suggest_return_impl_trait(
+ &self,
+ err: &mut Diagnostic,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ fn_id: hir::HirId,
+ ) {
+ // Only apply the suggestion if:
+ // - the return type is a generic parameter
+ // - the generic param is not used as a fn param
+ // - the generic param has at least one bound
+ // - the generic param doesn't appear in any other bounds where it's not the Self type
+ // Suggest:
+ // - Changing the return type to be `impl <all bounds>`
+
+ debug!("try_suggest_return_impl_trait, expected = {:?}, found = {:?}", expected, found);
+
+ let ty::Param(expected_ty_as_param) = expected.kind() else { return };
+
+ let fn_node = self.tcx.hir().find(fn_id);
+
+ let Some(hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Fn(
+ hir::FnSig { decl: hir::FnDecl { inputs: fn_parameters, output: fn_return, .. }, .. },
+ hir::Generics { params, predicates, .. },
+ _body_id,
+ ),
+ ..
+ })) = fn_node else { return };
+
+ if params.get(expected_ty_as_param.index as usize).is_none() {
+ return;
+ };
+
+ // get all where BoundPredicates here, because they are used in to cases below
+ let where_predicates = predicates
+ .iter()
+ .filter_map(|p| match p {
+ WherePredicate::BoundPredicate(hir::WhereBoundPredicate {
+ bounds,
+ bounded_ty,
+ ..
+ }) => {
+ // FIXME: Maybe these calls to `ast_ty_to_ty` can be removed (and the ones below)
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, bounded_ty);
+ Some((ty, bounds))
+ }
+ _ => None,
+ })
+ .map(|(ty, bounds)| match ty.kind() {
+ ty::Param(param_ty) if param_ty == expected_ty_as_param => Ok(Some(bounds)),
+ // check whether there is any predicate that contains our `T`, like `Option<T>: Send`
+ _ => match ty.contains(expected) {
+ true => Err(()),
+ false => Ok(None),
+ },
+ })
+ .collect::<Result<Vec<_>, _>>();
+
+ let Ok(where_predicates) = where_predicates else { return };
+
+ // now get all predicates in the same types as the where bounds, so we can chain them
+ let predicates_from_where =
+ where_predicates.iter().flatten().flat_map(|bounds| bounds.iter());
+
+ // extract all bounds from the source code using their spans
+ let all_matching_bounds_strs = predicates_from_where
+ .filter_map(|bound| match bound {
+ GenericBound::Trait(_, _) => {
+ self.tcx.sess.source_map().span_to_snippet(bound.span()).ok()
+ }
+ _ => None,
+ })
+ .collect::<Vec<String>>();
+
+ if all_matching_bounds_strs.len() == 0 {
+ return;
+ }
+
+ let all_bounds_str = all_matching_bounds_strs.join(" + ");
+
+ let ty_param_used_in_fn_params = fn_parameters.iter().any(|param| {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, param);
+ matches!(ty.kind(), ty::Param(fn_param_ty_param) if expected_ty_as_param == fn_param_ty_param)
+ });
+
+ if ty_param_used_in_fn_params {
+ return;
+ }
+
+ err.span_suggestion(
+ fn_return.span(),
+ "consider using an impl return type",
+ format!("impl {}", all_bounds_str),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ pub(in super::super) fn suggest_missing_break_or_return_expr(
+ &self,
+ err: &mut Diagnostic,
+ expr: &'tcx hir::Expr<'tcx>,
+ fn_decl: &hir::FnDecl<'_>,
+ expected: Ty<'tcx>,
+ found: Ty<'tcx>,
+ id: hir::HirId,
+ fn_id: hir::HirId,
+ ) {
+ if !expected.is_unit() {
+ return;
+ }
+ let found = self.resolve_vars_with_obligations(found);
+
+ let in_loop = self.is_loop(id)
+ || self.tcx.hir().parent_iter(id).any(|(parent_id, _)| self.is_loop(parent_id));
+
+ let in_local_statement = self.is_local_statement(id)
+ || self
+ .tcx
+ .hir()
+ .parent_iter(id)
+ .any(|(parent_id, _)| self.is_local_statement(parent_id));
+
+ if in_loop && in_local_statement {
+ err.multipart_suggestion(
+ "you might have meant to break the loop with this value",
+ vec![
+ (expr.span.shrink_to_lo(), "break ".to_string()),
+ (expr.span.shrink_to_hi(), ";".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ return;
+ }
+
+ if let hir::FnRetTy::Return(ty) = fn_decl.output {
+ let ty = <dyn AstConv<'_>>::ast_ty_to_ty(self, ty);
+ let bound_vars = self.tcx.late_bound_vars(fn_id);
+ let ty = self.tcx.erase_late_bound_regions(Binder::bind_with_vars(ty, bound_vars));
+ let ty = self.normalize_associated_types_in(expr.span, ty);
+ let ty = match self.tcx.asyncness(fn_id.owner) {
+ hir::IsAsync::Async => self
+ .tcx
+ .infer_ctxt()
+ .enter(|infcx| {
+ infcx.get_impl_future_output_ty(ty).unwrap_or_else(|| {
+ span_bug!(
+ fn_decl.output.span(),
+ "failed to get output type of async function"
+ )
+ })
+ })
+ .skip_binder(),
+ hir::IsAsync::NotAsync => ty,
+ };
+ if self.can_coerce(found, ty) {
+ err.multipart_suggestion(
+ "you might have meant to return this value",
+ vec![
+ (expr.span.shrink_to_lo(), "return ".to_string()),
+ (expr.span.shrink_to_hi(), ";".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ pub(in super::super) fn suggest_missing_parentheses(
+ &self,
+ err: &mut Diagnostic,
+ expr: &hir::Expr<'_>,
+ ) {
+ let sp = self.tcx.sess.source_map().start_point(expr.span);
+ if let Some(sp) = self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp) {
+ // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`
+ self.tcx.sess.parse_sess.expr_parentheses_needed(err, *sp);
+ }
+ }
+
+ /// Given an expression type mismatch, peel any `&` expressions until we get to
+ /// a block expression, and then suggest replacing the braces with square braces
+ /// if it was possibly mistaken array syntax.
+ pub(crate) fn suggest_block_to_brackets_peeling_refs(
+ &self,
+ diag: &mut Diagnostic,
+ mut expr: &hir::Expr<'_>,
+ mut expr_ty: Ty<'tcx>,
+ mut expected_ty: Ty<'tcx>,
+ ) {
+ loop {
+ match (&expr.kind, expr_ty.kind(), expected_ty.kind()) {
+ (
+ hir::ExprKind::AddrOf(_, _, inner_expr),
+ ty::Ref(_, inner_expr_ty, _),
+ ty::Ref(_, inner_expected_ty, _),
+ ) => {
+ expr = *inner_expr;
+ expr_ty = *inner_expr_ty;
+ expected_ty = *inner_expected_ty;
+ }
+ (hir::ExprKind::Block(blk, _), _, _) => {
+ self.suggest_block_to_brackets(diag, *blk, expr_ty, expected_ty);
+ break;
+ }
+ _ => break,
+ }
+ }
+ }
+
+ /// Suggest wrapping the block in square brackets instead of curly braces
+ /// in case the block was mistaken array syntax, e.g. `{ 1 }` -> `[ 1 ]`.
+ pub(crate) fn suggest_block_to_brackets(
+ &self,
+ diag: &mut Diagnostic,
+ blk: &hir::Block<'_>,
+ blk_ty: Ty<'tcx>,
+ expected_ty: Ty<'tcx>,
+ ) {
+ if let ty::Slice(elem_ty) | ty::Array(elem_ty, _) = expected_ty.kind() {
+ if self.can_coerce(blk_ty, *elem_ty)
+ && blk.stmts.is_empty()
+ && blk.rules == hir::BlockCheckMode::DefaultBlock
+ {
+ let source_map = self.tcx.sess.source_map();
+ if let Ok(snippet) = source_map.span_to_snippet(blk.span) {
+ if snippet.starts_with('{') && snippet.ends_with('}') {
+ diag.multipart_suggestion_verbose(
+ "to create an array, use square brackets instead of curly braces",
+ vec![
+ (
+ blk.span
+ .shrink_to_lo()
+ .with_hi(rustc_span::BytePos(blk.span.lo().0 + 1)),
+ "[".to_string(),
+ ),
+ (
+ blk.span
+ .shrink_to_hi()
+ .with_lo(rustc_span::BytePos(blk.span.hi().0 - 1)),
+ "]".to_string(),
+ ),
+ ],
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ }
+ }
+ }
+
+ fn is_loop(&self, id: hir::HirId) -> bool {
+ let node = self.tcx.hir().get(id);
+ matches!(node, Node::Expr(Expr { kind: ExprKind::Loop(..), .. }))
+ }
+
+ fn is_local_statement(&self, id: hir::HirId) -> bool {
+ let node = self.tcx.hir().get(id);
+ matches!(node, Node::Stmt(Stmt { kind: StmtKind::Local(..), .. }))
+ }
+
+ /// Suggest that `&T` was cloned instead of `T` because `T` does not implement `Clone`,
+ /// which is a side-effect of autoref.
+ pub(crate) fn note_type_is_not_clone(
+ &self,
+ diag: &mut Diagnostic,
+ expected_ty: Ty<'tcx>,
+ found_ty: Ty<'tcx>,
+ expr: &hir::Expr<'_>,
+ ) {
+ let hir::ExprKind::MethodCall(segment, &[ref callee_expr], _) = expr.kind else { return; };
+ let Some(clone_trait_did) = self.tcx.lang_items().clone_trait() else { return; };
+ let ty::Ref(_, pointee_ty, _) = found_ty.kind() else { return };
+ let results = self.typeck_results.borrow();
+ // First, look for a `Clone::clone` call
+ if segment.ident.name == sym::clone
+ && results.type_dependent_def_id(expr.hir_id).map_or(
+ false,
+ |did| {
+ let assoc_item = self.tcx.associated_item(did);
+ assoc_item.container == ty::AssocItemContainer::TraitContainer
+ && assoc_item.container_id(self.tcx) == clone_trait_did
+ },
+ )
+ // If that clone call hasn't already dereferenced the self type (i.e. don't give this
+ // diagnostic in cases where we have `(&&T).clone()` and we expect `T`).
+ && !results.expr_adjustments(callee_expr).iter().any(|adj| matches!(adj.kind, ty::adjustment::Adjust::Deref(..)))
+ // Check that we're in fact trying to clone into the expected type
+ && self.can_coerce(*pointee_ty, expected_ty)
+ // And the expected type doesn't implement `Clone`
+ && !self.predicate_must_hold_considering_regions(&traits::Obligation {
+ cause: traits::ObligationCause::dummy(),
+ param_env: self.param_env,
+ recursion_depth: 0,
+ predicate: ty::Binder::dummy(ty::TraitRef {
+ def_id: clone_trait_did,
+ substs: self.tcx.mk_substs([expected_ty.into()].iter()),
+ })
+ .without_const()
+ .to_predicate(self.tcx),
+ })
+ {
+ diag.span_note(
+ callee_expr.span,
+ &format!(
+ "`{expected_ty}` does not implement `Clone`, so `{found_ty}` was cloned instead"
+ ),
+ );
+ }
+ }
+
+ /// A common error is to add an extra semicolon:
+ ///
+ /// ```compile_fail,E0308
+ /// fn foo() -> usize {
+ /// 22;
+ /// }
+ /// ```
+ ///
+ /// This routine checks if the final statement in a block is an
+ /// expression with an explicit semicolon whose type is compatible
+ /// with `expected_ty`. If so, it suggests removing the semicolon.
+ pub(crate) fn consider_removing_semicolon(
+ &self,
+ blk: &'tcx hir::Block<'tcx>,
+ expected_ty: Ty<'tcx>,
+ err: &mut Diagnostic,
+ ) -> bool {
+ if let Some((span_semi, boxed)) = self.could_remove_semicolon(blk, expected_ty) {
+ if let StatementAsExpression::NeedsBoxing = boxed {
+ err.span_suggestion_verbose(
+ span_semi,
+ "consider removing this semicolon and boxing the expression",
+ "",
+ Applicability::HasPlaceholders,
+ );
+ } else {
+ err.span_suggestion_short(
+ span_semi,
+ "remove this semicolon",
+ "",
+ Applicability::MachineApplicable,
+ );
+ }
+ true
+ } else {
+ false
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/gather_locals.rs b/compiler/rustc_typeck/src/check/gather_locals.rs
new file mode 100644
index 000000000..8f34a970f
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/gather_locals.rs
@@ -0,0 +1,160 @@
+use crate::check::{FnCtxt, LocalTy, UserType};
+use rustc_hir as hir;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::PatKind;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+
+/// A declaration is an abstraction of [hir::Local] and [hir::Let].
+///
+/// It must have a hir_id, as this is how we connect gather_locals to the check functions.
+pub(super) struct Declaration<'a> {
+ pub hir_id: hir::HirId,
+ pub pat: &'a hir::Pat<'a>,
+ pub ty: Option<&'a hir::Ty<'a>>,
+ pub span: Span,
+ pub init: Option<&'a hir::Expr<'a>>,
+ pub els: Option<&'a hir::Block<'a>>,
+}
+
+impl<'a> From<&'a hir::Local<'a>> for Declaration<'a> {
+ fn from(local: &'a hir::Local<'a>) -> Self {
+ let hir::Local { hir_id, pat, ty, span, init, els, source: _ } = *local;
+ Declaration { hir_id, pat, ty, span, init, els }
+ }
+}
+
+impl<'a> From<&'a hir::Let<'a>> for Declaration<'a> {
+ fn from(let_expr: &'a hir::Let<'a>) -> Self {
+ let hir::Let { hir_id, pat, ty, span, init } = *let_expr;
+ Declaration { hir_id, pat, ty, span, init: Some(init), els: None }
+ }
+}
+
+pub(super) struct GatherLocalsVisitor<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ // parameters are special cases of patterns, but we want to handle them as
+ // *distinct* cases. so track when we are hitting a pattern *within* an fn
+ // parameter.
+ outermost_fn_param_pat: Option<Span>,
+}
+
+impl<'a, 'tcx> GatherLocalsVisitor<'a, 'tcx> {
+ pub(super) fn new(fcx: &'a FnCtxt<'a, 'tcx>) -> Self {
+ Self { fcx, outermost_fn_param_pat: None }
+ }
+
+ fn assign(&mut self, span: Span, nid: hir::HirId, ty_opt: Option<LocalTy<'tcx>>) -> Ty<'tcx> {
+ match ty_opt {
+ None => {
+ // Infer the variable's type.
+ let var_ty = self.fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ });
+ self.fcx
+ .locals
+ .borrow_mut()
+ .insert(nid, LocalTy { decl_ty: var_ty, revealed_ty: var_ty });
+ var_ty
+ }
+ Some(typ) => {
+ // Take type that the user specified.
+ self.fcx.locals.borrow_mut().insert(nid, typ);
+ typ.revealed_ty
+ }
+ }
+ }
+
+ /// Allocates a [LocalTy] for a declaration, which may have a type annotation. If it does have
+ /// a type annotation, then the LocalTy stored will be the resolved type. This may be found
+ /// again during type checking by querying [FnCtxt::local_ty] for the same hir_id.
+ fn declare(&mut self, decl: Declaration<'tcx>) {
+ let local_ty = match decl.ty {
+ Some(ref ty) => {
+ let o_ty = self.fcx.to_ty(&ty);
+
+ let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation(UserType::Ty(o_ty));
+ debug!("visit_local: ty.hir_id={:?} o_ty={:?} c_ty={:?}", ty.hir_id, o_ty, c_ty);
+ self.fcx
+ .typeck_results
+ .borrow_mut()
+ .user_provided_types_mut()
+ .insert(ty.hir_id, c_ty);
+
+ Some(LocalTy { decl_ty: o_ty, revealed_ty: o_ty })
+ }
+ None => None,
+ };
+ self.assign(decl.span, decl.hir_id, local_ty);
+
+ debug!(
+ "local variable {:?} is assigned type {}",
+ decl.pat,
+ self.fcx.ty_to_string(self.fcx.locals.borrow().get(&decl.hir_id).unwrap().decl_ty)
+ );
+ }
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for GatherLocalsVisitor<'a, 'tcx> {
+ // Add explicitly-declared locals.
+ fn visit_local(&mut self, local: &'tcx hir::Local<'tcx>) {
+ self.declare(local.into());
+ intravisit::walk_local(self, local)
+ }
+
+ fn visit_let_expr(&mut self, let_expr: &'tcx hir::Let<'tcx>) {
+ self.declare(let_expr.into());
+ intravisit::walk_let_expr(self, let_expr);
+ }
+
+ fn visit_param(&mut self, param: &'tcx hir::Param<'tcx>) {
+ let old_outermost_fn_param_pat = self.outermost_fn_param_pat.replace(param.ty_span);
+ intravisit::walk_param(self, param);
+ self.outermost_fn_param_pat = old_outermost_fn_param_pat;
+ }
+
+ // Add pattern bindings.
+ fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) {
+ if let PatKind::Binding(_, _, ident, _) = p.kind {
+ let var_ty = self.assign(p.span, p.hir_id, None);
+
+ if let Some(ty_span) = self.outermost_fn_param_pat {
+ if !self.fcx.tcx.features().unsized_fn_params {
+ self.fcx.require_type_is_sized(
+ var_ty,
+ p.span,
+ traits::SizedArgumentType(Some(ty_span)),
+ );
+ }
+ } else {
+ if !self.fcx.tcx.features().unsized_locals {
+ self.fcx.require_type_is_sized(var_ty, p.span, traits::VariableType(p.hir_id));
+ }
+ }
+
+ debug!(
+ "pattern binding {} is assigned to {} with type {:?}",
+ ident,
+ self.fcx.ty_to_string(self.fcx.locals.borrow().get(&p.hir_id).unwrap().decl_ty),
+ var_ty
+ );
+ }
+ let old_outermost_fn_param_pat = self.outermost_fn_param_pat.take();
+ intravisit::walk_pat(self, p);
+ self.outermost_fn_param_pat = old_outermost_fn_param_pat;
+ }
+
+ // Don't descend into the bodies of nested closures.
+ fn visit_fn(
+ &mut self,
+ _: intravisit::FnKind<'tcx>,
+ _: &'tcx hir::FnDecl<'tcx>,
+ _: hir::BodyId,
+ _: Span,
+ _: hir::HirId,
+ ) {
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/generator_interior.rs b/compiler/rustc_typeck/src/check/generator_interior.rs
new file mode 100644
index 000000000..d4f800149
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/generator_interior.rs
@@ -0,0 +1,632 @@
+//! This calculates the types which has storage which lives across a suspension point in a
+//! generator from the perspective of typeck. The actual types used at runtime
+//! is calculated in `rustc_mir_transform::generator` and may be a subset of the
+//! types computed here.
+
+use self::drop_ranges::DropRanges;
+use super::FnCtxt;
+use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
+use rustc_errors::pluralize;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_hir::hir_id::HirIdSet;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Arm, Expr, ExprKind, Guard, HirId, Pat, PatKind};
+use rustc_middle::middle::region::{self, Scope, ScopeData, YieldData};
+use rustc_middle::ty::{self, RvalueScopes, Ty, TyCtxt, TypeVisitable};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+use tracing::debug;
+
+mod drop_ranges;
+
+struct InteriorVisitor<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ region_scope_tree: &'a region::ScopeTree,
+ types: FxIndexSet<ty::GeneratorInteriorTypeCause<'tcx>>,
+ rvalue_scopes: &'a RvalueScopes,
+ expr_count: usize,
+ kind: hir::GeneratorKind,
+ prev_unresolved_span: Option<Span>,
+ linted_values: HirIdSet,
+ drop_ranges: DropRanges,
+}
+
+impl<'a, 'tcx> InteriorVisitor<'a, 'tcx> {
+ fn record(
+ &mut self,
+ ty: Ty<'tcx>,
+ hir_id: HirId,
+ scope: Option<region::Scope>,
+ expr: Option<&'tcx Expr<'tcx>>,
+ source_span: Span,
+ ) {
+ use rustc_span::DUMMY_SP;
+
+ let ty = self.fcx.resolve_vars_if_possible(ty);
+
+ debug!(
+ "attempting to record type ty={:?}; hir_id={:?}; scope={:?}; expr={:?}; source_span={:?}; expr_count={:?}",
+ ty, hir_id, scope, expr, source_span, self.expr_count,
+ );
+
+ let live_across_yield = scope
+ .map(|s| {
+ self.region_scope_tree.yield_in_scope(s).and_then(|yield_data| {
+ // If we are recording an expression that is the last yield
+ // in the scope, or that has a postorder CFG index larger
+ // than the one of all of the yields, then its value can't
+ // be storage-live (and therefore live) at any of the yields.
+ //
+ // See the mega-comment at `yield_in_scope` for a proof.
+
+ yield_data
+ .iter()
+ .find(|yield_data| {
+ debug!(
+ "comparing counts yield: {} self: {}, source_span = {:?}",
+ yield_data.expr_and_pat_count, self.expr_count, source_span
+ );
+
+ if self.fcx.sess().opts.unstable_opts.drop_tracking
+ && self
+ .drop_ranges
+ .is_dropped_at(hir_id, yield_data.expr_and_pat_count)
+ {
+ debug!("value is dropped at yield point; not recording");
+ return false;
+ }
+
+ // If it is a borrowing happening in the guard,
+ // it needs to be recorded regardless because they
+ // do live across this yield point.
+ yield_data.expr_and_pat_count >= self.expr_count
+ })
+ .cloned()
+ })
+ })
+ .unwrap_or_else(|| {
+ Some(YieldData { span: DUMMY_SP, expr_and_pat_count: 0, source: self.kind.into() })
+ });
+
+ if let Some(yield_data) = live_across_yield {
+ debug!(
+ "type in expr = {:?}, scope = {:?}, type = {:?}, count = {}, yield_span = {:?}",
+ expr, scope, ty, self.expr_count, yield_data.span
+ );
+
+ if let Some((unresolved_type, unresolved_type_span)) =
+ self.fcx.unresolved_type_vars(&ty)
+ {
+ // If unresolved type isn't a ty_var then unresolved_type_span is None
+ let span = self
+ .prev_unresolved_span
+ .unwrap_or_else(|| unresolved_type_span.unwrap_or(source_span));
+
+ // If we encounter an int/float variable, then inference fallback didn't
+ // finish due to some other error. Don't emit spurious additional errors.
+ if let ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) =
+ unresolved_type.kind()
+ {
+ self.fcx
+ .tcx
+ .sess
+ .delay_span_bug(span, &format!("Encountered var {:?}", unresolved_type));
+ } else {
+ let note = format!(
+ "the type is part of the {} because of this {}",
+ self.kind, yield_data.source
+ );
+
+ self.fcx
+ .need_type_info_err_in_generator(self.kind, span, unresolved_type)
+ .span_note(yield_data.span, &*note)
+ .emit();
+ }
+ } else {
+ // Insert the type into the ordered set.
+ let scope_span = scope.map(|s| s.span(self.fcx.tcx, self.region_scope_tree));
+
+ if !self.linted_values.contains(&hir_id) {
+ check_must_not_suspend_ty(
+ self.fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ expr,
+ source_span,
+ yield_span: yield_data.span,
+ plural_len: 1,
+ ..Default::default()
+ },
+ );
+ self.linted_values.insert(hir_id);
+ }
+
+ self.types.insert(ty::GeneratorInteriorTypeCause {
+ span: source_span,
+ ty,
+ scope_span,
+ yield_span: yield_data.span,
+ expr: expr.map(|e| e.hir_id),
+ });
+ }
+ } else {
+ debug!(
+ "no type in expr = {:?}, count = {:?}, span = {:?}",
+ expr,
+ self.expr_count,
+ expr.map(|e| e.span)
+ );
+ if let Some((unresolved_type, unresolved_type_span)) =
+ self.fcx.unresolved_type_vars(&ty)
+ {
+ debug!(
+ "remained unresolved_type = {:?}, unresolved_type_span: {:?}",
+ unresolved_type, unresolved_type_span
+ );
+ self.prev_unresolved_span = unresolved_type_span;
+ }
+ }
+ }
+}
+
+pub fn resolve_interior<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ def_id: DefId,
+ body_id: hir::BodyId,
+ interior: Ty<'tcx>,
+ kind: hir::GeneratorKind,
+) {
+ let body = fcx.tcx.hir().body(body_id);
+ let typeck_results = fcx.inh.typeck_results.borrow();
+ let mut visitor = InteriorVisitor {
+ fcx,
+ types: FxIndexSet::default(),
+ region_scope_tree: fcx.tcx.region_scope_tree(def_id),
+ rvalue_scopes: &typeck_results.rvalue_scopes,
+ expr_count: 0,
+ kind,
+ prev_unresolved_span: None,
+ linted_values: <_>::default(),
+ drop_ranges: drop_ranges::compute_drop_ranges(fcx, def_id, body),
+ };
+ intravisit::walk_body(&mut visitor, body);
+
+ // Check that we visited the same amount of expressions as the RegionResolutionVisitor
+ let region_expr_count = fcx.tcx.region_scope_tree(def_id).body_expr_count(body_id).unwrap();
+ assert_eq!(region_expr_count, visitor.expr_count);
+
+ // The types are already kept in insertion order.
+ let types = visitor.types;
+
+ // The types in the generator interior contain lifetimes local to the generator itself,
+ // which should not be exposed outside of the generator. Therefore, we replace these
+ // lifetimes with existentially-bound lifetimes, which reflect the exact value of the
+ // lifetimes not being known by users.
+ //
+ // These lifetimes are used in auto trait impl checking (for example,
+ // if a Sync generator contains an &'α T, we need to check whether &'α T: Sync),
+ // so knowledge of the exact relationships between them isn't particularly important.
+
+ debug!("types in generator {:?}, span = {:?}", types, body.value.span);
+
+ let mut counter = 0;
+ let mut captured_tys = FxHashSet::default();
+ let type_causes: Vec<_> = types
+ .into_iter()
+ .filter_map(|mut cause| {
+ // Erase regions and canonicalize late-bound regions to deduplicate as many types as we
+ // can.
+ let erased = fcx.tcx.erase_regions(cause.ty);
+ if captured_tys.insert(erased) {
+ // Replace all regions inside the generator interior with late bound regions.
+ // Note that each region slot in the types gets a new fresh late bound region,
+ // which means that none of the regions inside relate to any other, even if
+ // typeck had previously found constraints that would cause them to be related.
+ let folded = fcx.tcx.fold_regions(erased, |_, current_depth| {
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(counter),
+ kind: ty::BrAnon(counter),
+ };
+ let r = fcx.tcx.mk_region(ty::ReLateBound(current_depth, br));
+ counter += 1;
+ r
+ });
+
+ cause.ty = folded;
+ Some(cause)
+ } else {
+ None
+ }
+ })
+ .collect();
+
+ // Extract type components to build the witness type.
+ let type_list = fcx.tcx.mk_type_list(type_causes.iter().map(|cause| cause.ty));
+ let bound_vars = fcx.tcx.mk_bound_variable_kinds(
+ (0..counter).map(|i| ty::BoundVariableKind::Region(ty::BrAnon(i))),
+ );
+ let witness =
+ fcx.tcx.mk_generator_witness(ty::Binder::bind_with_vars(type_list, bound_vars.clone()));
+
+ drop(typeck_results);
+ // Store the generator types and spans into the typeck results for this generator.
+ fcx.inh.typeck_results.borrow_mut().generator_interior_types =
+ ty::Binder::bind_with_vars(type_causes, bound_vars);
+
+ debug!(
+ "types in generator after region replacement {:?}, span = {:?}",
+ witness, body.value.span
+ );
+
+ // Unify the type variable inside the generator with the new witness
+ match fcx.at(&fcx.misc(body.value.span), fcx.param_env).eq(interior, witness) {
+ Ok(ok) => fcx.register_infer_ok_obligations(ok),
+ _ => bug!(),
+ }
+}
+
+// This visitor has to have the same visit_expr calls as RegionResolutionVisitor in
+// librustc_middle/middle/region.rs since `expr_count` is compared against the results
+// there.
+impl<'a, 'tcx> Visitor<'tcx> for InteriorVisitor<'a, 'tcx> {
+ fn visit_arm(&mut self, arm: &'tcx Arm<'tcx>) {
+ let Arm { guard, pat, body, .. } = arm;
+ self.visit_pat(pat);
+ if let Some(ref g) = guard {
+ {
+ // If there is a guard, we need to count all variables bound in the pattern as
+ // borrowed for the entire guard body, regardless of whether they are accessed.
+ // We do this by walking the pattern bindings and recording `&T` for any `x: T`
+ // that is bound.
+
+ struct ArmPatCollector<'a, 'b, 'tcx> {
+ interior_visitor: &'a mut InteriorVisitor<'b, 'tcx>,
+ scope: Scope,
+ }
+
+ impl<'a, 'b, 'tcx> Visitor<'tcx> for ArmPatCollector<'a, 'b, 'tcx> {
+ fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
+ intravisit::walk_pat(self, pat);
+ if let PatKind::Binding(_, id, ident, ..) = pat.kind {
+ let ty =
+ self.interior_visitor.fcx.typeck_results.borrow().node_type(id);
+ let tcx = self.interior_visitor.fcx.tcx;
+ let ty = tcx.mk_ref(
+ // Use `ReErased` as `resolve_interior` is going to replace all the
+ // regions anyway.
+ tcx.mk_region(ty::ReErased),
+ ty::TypeAndMut { ty, mutbl: hir::Mutability::Not },
+ );
+ self.interior_visitor.record(
+ ty,
+ id,
+ Some(self.scope),
+ None,
+ ident.span,
+ );
+ }
+ }
+ }
+
+ ArmPatCollector {
+ interior_visitor: self,
+ scope: Scope { id: g.body().hir_id.local_id, data: ScopeData::Node },
+ }
+ .visit_pat(pat);
+ }
+
+ match g {
+ Guard::If(ref e) => {
+ self.visit_expr(e);
+ }
+ Guard::IfLet(ref l) => {
+ self.visit_let_expr(l);
+ }
+ }
+ }
+ self.visit_expr(body);
+ }
+
+ fn visit_pat(&mut self, pat: &'tcx Pat<'tcx>) {
+ intravisit::walk_pat(self, pat);
+
+ self.expr_count += 1;
+
+ if let PatKind::Binding(..) = pat.kind {
+ let scope = self.region_scope_tree.var_scope(pat.hir_id.local_id).unwrap();
+ let ty = self.fcx.typeck_results.borrow().pat_ty(pat);
+ self.record(ty, pat.hir_id, Some(scope), None, pat.span);
+ }
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ match &expr.kind {
+ ExprKind::Call(callee, args) => match &callee.kind {
+ ExprKind::Path(qpath) => {
+ let res = self.fcx.typeck_results.borrow().qpath_res(qpath, callee.hir_id);
+ match res {
+ // Direct calls never need to keep the callee `ty::FnDef`
+ // ZST in a temporary, so skip its type, just in case it
+ // can significantly complicate the generator type.
+ Res::Def(
+ DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fn),
+ _,
+ ) => {
+ // NOTE(eddyb) this assumes a path expression has
+ // no nested expressions to keep track of.
+ self.expr_count += 1;
+
+ // Record the rest of the call expression normally.
+ for arg in *args {
+ self.visit_expr(arg);
+ }
+ }
+ _ => intravisit::walk_expr(self, expr),
+ }
+ }
+ _ => intravisit::walk_expr(self, expr),
+ },
+ _ => intravisit::walk_expr(self, expr),
+ }
+
+ self.expr_count += 1;
+
+ debug!("is_borrowed_temporary: {:?}", self.drop_ranges.is_borrowed_temporary(expr));
+
+ let ty = self.fcx.typeck_results.borrow().expr_ty_adjusted_opt(expr);
+ let may_need_drop = |ty: Ty<'tcx>| {
+ // Avoid ICEs in needs_drop.
+ let ty = self.fcx.resolve_vars_if_possible(ty);
+ let ty = self.fcx.tcx.erase_regions(ty);
+ if ty.needs_infer() {
+ return true;
+ }
+ ty.needs_drop(self.fcx.tcx, self.fcx.param_env)
+ };
+
+ // Typically, the value produced by an expression is consumed by its parent in some way,
+ // so we only have to check if the parent contains a yield (note that the parent may, for
+ // example, store the value into a local variable, but then we already consider local
+ // variables to be live across their scope).
+ //
+ // However, in the case of temporary values, we are going to store the value into a
+ // temporary on the stack that is live for the current temporary scope and then return a
+ // reference to it. That value may be live across the entire temporary scope.
+ //
+ // There's another subtlety: if the type has an observable drop, it must be dropped after
+ // the yield, even if it's not borrowed or referenced after the yield. Ideally this would
+ // *only* happen for types with observable drop, not all types which wrap them, but that
+ // doesn't match the behavior of MIR borrowck and causes ICEs. See the FIXME comment in
+ // src/test/ui/generator/drop-tracking-parent-expression.rs.
+ let scope = if self.drop_ranges.is_borrowed_temporary(expr)
+ || ty.map_or(true, |ty| {
+ let needs_drop = may_need_drop(ty);
+ debug!(?needs_drop, ?ty);
+ needs_drop
+ }) {
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
+ } else {
+ debug!("parent_node: {:?}", self.fcx.tcx.hir().find_parent_node(expr.hir_id));
+ match self.fcx.tcx.hir().find_parent_node(expr.hir_id) {
+ Some(parent) => Some(Scope { id: parent.local_id, data: ScopeData::Node }),
+ None => {
+ self.rvalue_scopes.temporary_scope(self.region_scope_tree, expr.hir_id.local_id)
+ }
+ }
+ };
+
+ // If there are adjustments, then record the final type --
+ // this is the actual value that is being produced.
+ if let Some(adjusted_ty) = ty {
+ self.record(adjusted_ty, expr.hir_id, scope, Some(expr), expr.span);
+ }
+
+ // Also record the unadjusted type (which is the only type if
+ // there are no adjustments). The reason for this is that the
+ // unadjusted value is sometimes a "temporary" that would wind
+ // up in a MIR temporary.
+ //
+ // As an example, consider an expression like `vec![].push(x)`.
+ // Here, the `vec![]` would wind up MIR stored into a
+ // temporary variable `t` which we can borrow to invoke
+ // `<Vec<_>>::push(&mut t, x)`.
+ //
+ // Note that an expression can have many adjustments, and we
+ // are just ignoring those intermediate types. This is because
+ // those intermediate values are always linearly "consumed" by
+ // the other adjustments, and hence would never be directly
+ // captured in the MIR.
+ //
+ // (Note that this partly relies on the fact that the `Deref`
+ // traits always return references, which means their content
+ // can be reborrowed without needing to spill to a temporary.
+ // If this were not the case, then we could conceivably have
+ // to create intermediate temporaries.)
+ //
+ // The type table might not have information for this expression
+ // if it is in a malformed scope. (#66387)
+ if let Some(ty) = self.fcx.typeck_results.borrow().expr_ty_opt(expr) {
+ self.record(ty, expr.hir_id, scope, Some(expr), expr.span);
+ } else {
+ self.fcx.tcx.sess.delay_span_bug(expr.span, "no type for node");
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct SuspendCheckData<'a, 'tcx> {
+ expr: Option<&'tcx Expr<'tcx>>,
+ source_span: Span,
+ yield_span: Span,
+ descr_pre: &'a str,
+ descr_post: &'a str,
+ plural_len: usize,
+}
+
+// Returns whether it emitted a diagnostic or not
+// Note that this fn and the proceeding one are based on the code
+// for creating must_use diagnostics
+//
+// Note that this technique was chosen over things like a `Suspend` marker trait
+// as it is simpler and has precedent in the compiler
+pub fn check_must_not_suspend_ty<'tcx>(
+ fcx: &FnCtxt<'_, 'tcx>,
+ ty: Ty<'tcx>,
+ hir_id: HirId,
+ data: SuspendCheckData<'_, 'tcx>,
+) -> bool {
+ if ty.is_unit()
+ // FIXME: should this check `is_ty_uninhabited_from`. This query is not available in this stage
+ // of typeck (before ReVar and RePlaceholder are removed), but may remove noise, like in
+ // `must_use`
+ // || fcx.tcx.is_ty_uninhabited_from(fcx.tcx.parent_module(hir_id).to_def_id(), ty, fcx.param_env)
+ {
+ return false;
+ }
+
+ let plural_suffix = pluralize!(data.plural_len);
+
+ match *ty.kind() {
+ ty::Adt(..) if ty.is_box() => {
+ let boxed_ty = ty.boxed_ty();
+ let descr_pre = &format!("{}boxed ", data.descr_pre);
+ check_must_not_suspend_ty(fcx, boxed_ty, hir_id, SuspendCheckData { descr_pre, ..data })
+ }
+ ty::Adt(def, _) => check_must_not_suspend_def(fcx.tcx, def.did(), hir_id, data),
+ // FIXME: support adding the attribute to TAITs
+ ty::Opaque(def, _) => {
+ let mut has_emitted = false;
+ for &(predicate, _) in fcx.tcx.explicit_item_bounds(def) {
+ // We only look at the `DefId`, so it is safe to skip the binder here.
+ if let ty::PredicateKind::Trait(ref poly_trait_predicate) =
+ predicate.kind().skip_binder()
+ {
+ let def_id = poly_trait_predicate.trait_ref.def_id;
+ let descr_pre = &format!("{}implementer{} of ", data.descr_pre, plural_suffix);
+ if check_must_not_suspend_def(
+ fcx.tcx,
+ def_id,
+ hir_id,
+ SuspendCheckData { descr_pre, ..data },
+ ) {
+ has_emitted = true;
+ break;
+ }
+ }
+ }
+ has_emitted
+ }
+ ty::Dynamic(binder, _) => {
+ let mut has_emitted = false;
+ for predicate in binder.iter() {
+ if let ty::ExistentialPredicate::Trait(ref trait_ref) = predicate.skip_binder() {
+ let def_id = trait_ref.def_id;
+ let descr_post = &format!(" trait object{}{}", plural_suffix, data.descr_post);
+ if check_must_not_suspend_def(
+ fcx.tcx,
+ def_id,
+ hir_id,
+ SuspendCheckData { descr_post, ..data },
+ ) {
+ has_emitted = true;
+ break;
+ }
+ }
+ }
+ has_emitted
+ }
+ ty::Tuple(fields) => {
+ let mut has_emitted = false;
+ let comps = match data.expr.map(|e| &e.kind) {
+ Some(hir::ExprKind::Tup(comps)) => {
+ debug_assert_eq!(comps.len(), fields.len());
+ Some(comps)
+ }
+ _ => None,
+ };
+ for (i, ty) in fields.iter().enumerate() {
+ let descr_post = &format!(" in tuple element {i}");
+ let span = comps.and_then(|c| c.get(i)).map(|e| e.span).unwrap_or(data.source_span);
+ if check_must_not_suspend_ty(
+ fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ descr_post,
+ expr: comps.and_then(|comps| comps.get(i)),
+ source_span: span,
+ ..data
+ },
+ ) {
+ has_emitted = true;
+ }
+ }
+ has_emitted
+ }
+ ty::Array(ty, len) => {
+ let descr_pre = &format!("{}array{} of ", data.descr_pre, plural_suffix);
+ check_must_not_suspend_ty(
+ fcx,
+ ty,
+ hir_id,
+ SuspendCheckData {
+ descr_pre,
+ plural_len: len.try_eval_usize(fcx.tcx, fcx.param_env).unwrap_or(0) as usize
+ + 1,
+ ..data
+ },
+ )
+ }
+ _ => false,
+ }
+}
+
+fn check_must_not_suspend_def(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ hir_id: HirId,
+ data: SuspendCheckData<'_, '_>,
+) -> bool {
+ if let Some(attr) = tcx.get_attr(def_id, sym::must_not_suspend) {
+ tcx.struct_span_lint_hir(
+ rustc_session::lint::builtin::MUST_NOT_SUSPEND,
+ hir_id,
+ data.source_span,
+ |lint| {
+ let msg = format!(
+ "{}`{}`{} held across a suspend point, but should not be",
+ data.descr_pre,
+ tcx.def_path_str(def_id),
+ data.descr_post,
+ );
+ let mut err = lint.build(&msg);
+
+ // add span pointing to the offending yield/await
+ err.span_label(data.yield_span, "the value is held across this suspend point");
+
+ // Add optional reason note
+ if let Some(note) = attr.value_str() {
+ // FIXME(guswynn): consider formatting this better
+ err.span_note(data.source_span, note.as_str());
+ }
+
+ // Add some quick suggestions on what to do
+ // FIXME: can `drop` work as a suggestion here as well?
+ err.span_help(
+ data.source_span,
+ "consider using a block (`{ ... }`) \
+ to shrink the value's scope, ending before the suspend point",
+ );
+
+ err.emit();
+ },
+ );
+
+ true
+ } else {
+ false
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs
new file mode 100644
index 000000000..518cd7342
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges.rs
@@ -0,0 +1,309 @@
+//! Drop range analysis finds the portions of the tree where a value is guaranteed to be dropped
+//! (i.e. moved, uninitialized, etc.). This is used to exclude the types of those values from the
+//! generator type. See `InteriorVisitor::record` for where the results of this analysis are used.
+//!
+//! There are three phases to this analysis:
+//! 1. Use `ExprUseVisitor` to identify the interesting values that are consumed and borrowed.
+//! 2. Use `DropRangeVisitor` to find where the interesting values are dropped or reinitialized,
+//! and also build a control flow graph.
+//! 3. Use `DropRanges::propagate_to_fixpoint` to flow the dropped/reinitialized information through
+//! the CFG and find the exact points where we know a value is definitely dropped.
+//!
+//! The end result is a data structure that maps the post-order index of each node in the HIR tree
+//! to a set of values that are known to be dropped at that location.
+
+use self::cfg_build::build_control_flow_graph;
+use self::record_consumed_borrow::find_consumed_and_borrowed;
+use crate::check::FnCtxt;
+use hir::def_id::DefId;
+use hir::{Body, HirId, HirIdMap, Node};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir as hir;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::IndexVec;
+use rustc_middle::hir::map::Map;
+use rustc_middle::hir::place::{PlaceBase, PlaceWithHirId};
+use rustc_middle::ty;
+use std::collections::BTreeMap;
+use std::fmt::Debug;
+
+mod cfg_build;
+mod cfg_propagate;
+mod cfg_visualize;
+mod record_consumed_borrow;
+
+pub fn compute_drop_ranges<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ def_id: DefId,
+ body: &'tcx Body<'tcx>,
+) -> DropRanges {
+ if fcx.sess().opts.unstable_opts.drop_tracking {
+ let consumed_borrowed_places = find_consumed_and_borrowed(fcx, def_id, body);
+
+ let typeck_results = &fcx.typeck_results.borrow();
+ let num_exprs = fcx.tcx.region_scope_tree(def_id).body_expr_count(body.id()).unwrap_or(0);
+ let (mut drop_ranges, borrowed_temporaries) = build_control_flow_graph(
+ fcx.tcx.hir(),
+ fcx.tcx,
+ typeck_results,
+ consumed_borrowed_places,
+ body,
+ num_exprs,
+ );
+
+ drop_ranges.propagate_to_fixpoint();
+
+ debug!("borrowed_temporaries = {borrowed_temporaries:?}");
+ DropRanges {
+ tracked_value_map: drop_ranges.tracked_value_map,
+ nodes: drop_ranges.nodes,
+ borrowed_temporaries: Some(borrowed_temporaries),
+ }
+ } else {
+ // If drop range tracking is not enabled, skip all the analysis and produce an
+ // empty set of DropRanges.
+ DropRanges {
+ tracked_value_map: FxHashMap::default(),
+ nodes: IndexVec::new(),
+ borrowed_temporaries: None,
+ }
+ }
+}
+
+/// Applies `f` to consumable node in the HIR subtree pointed to by `place`.
+///
+/// This includes the place itself, and if the place is a reference to a local
+/// variable then `f` is also called on the HIR node for that variable as well.
+///
+/// For example, if `place` points to `foo()`, then `f` is called once for the
+/// result of `foo`. On the other hand, if `place` points to `x` then `f` will
+/// be called both on the `ExprKind::Path` node that represents the expression
+/// as well as the HirId of the local `x` itself.
+fn for_each_consumable<'tcx>(hir: Map<'tcx>, place: TrackedValue, mut f: impl FnMut(TrackedValue)) {
+ f(place);
+ let node = hir.find(place.hir_id());
+ if let Some(Node::Expr(expr)) = node {
+ match expr.kind {
+ hir::ExprKind::Path(hir::QPath::Resolved(
+ _,
+ hir::Path { res: hir::def::Res::Local(hir_id), .. },
+ )) => {
+ f(TrackedValue::Variable(*hir_id));
+ }
+ _ => (),
+ }
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct PostOrderId {
+ DEBUG_FORMAT = "id({})",
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct TrackedValueIndex {
+ DEBUG_FORMAT = "hidx({})",
+ }
+}
+
+/// Identifies a value whose drop state we need to track.
+#[derive(PartialEq, Eq, Hash, Clone, Copy)]
+enum TrackedValue {
+ /// Represents a named variable, such as a let binding, parameter, or upvar.
+ ///
+ /// The HirId points to the variable's definition site.
+ Variable(HirId),
+ /// A value produced as a result of an expression.
+ ///
+ /// The HirId points to the expression that returns this value.
+ Temporary(HirId),
+}
+
+impl Debug for TrackedValue {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ ty::tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ write!(f, "{}", tcx.hir().node_to_string(self.hir_id()))
+ } else {
+ match self {
+ Self::Variable(hir_id) => write!(f, "Variable({:?})", hir_id),
+ Self::Temporary(hir_id) => write!(f, "Temporary({:?})", hir_id),
+ }
+ }
+ })
+ }
+}
+
+impl TrackedValue {
+ fn hir_id(&self) -> HirId {
+ match self {
+ TrackedValue::Variable(hir_id) | TrackedValue::Temporary(hir_id) => *hir_id,
+ }
+ }
+
+ fn from_place_with_projections_allowed(place_with_id: &PlaceWithHirId<'_>) -> Self {
+ match place_with_id.place.base {
+ PlaceBase::Rvalue | PlaceBase::StaticItem => {
+ TrackedValue::Temporary(place_with_id.hir_id)
+ }
+ PlaceBase::Local(hir_id)
+ | PlaceBase::Upvar(ty::UpvarId { var_path: ty::UpvarPath { hir_id }, .. }) => {
+ TrackedValue::Variable(hir_id)
+ }
+ }
+ }
+}
+
+/// Represents a reason why we might not be able to convert a HirId or Place
+/// into a tracked value.
+#[derive(Debug)]
+enum TrackedValueConversionError {
+ /// Place projects are not currently supported.
+ ///
+ /// The reasoning around these is kind of subtle, so we choose to be more
+ /// conservative around these for now. There is no reason in theory we
+ /// cannot support these, we just have not implemented it yet.
+ PlaceProjectionsNotSupported,
+}
+
+impl TryFrom<&PlaceWithHirId<'_>> for TrackedValue {
+ type Error = TrackedValueConversionError;
+
+ fn try_from(place_with_id: &PlaceWithHirId<'_>) -> Result<Self, Self::Error> {
+ if !place_with_id.place.projections.is_empty() {
+ debug!(
+ "TrackedValue from PlaceWithHirId: {:?} has projections, which are not supported.",
+ place_with_id
+ );
+ return Err(TrackedValueConversionError::PlaceProjectionsNotSupported);
+ }
+
+ Ok(TrackedValue::from_place_with_projections_allowed(place_with_id))
+ }
+}
+
+pub struct DropRanges {
+ tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
+ nodes: IndexVec<PostOrderId, NodeInfo>,
+ borrowed_temporaries: Option<FxHashSet<HirId>>,
+}
+
+impl DropRanges {
+ pub fn is_dropped_at(&self, hir_id: HirId, location: usize) -> bool {
+ self.tracked_value_map
+ .get(&TrackedValue::Temporary(hir_id))
+ .or(self.tracked_value_map.get(&TrackedValue::Variable(hir_id)))
+ .cloned()
+ .map_or(false, |tracked_value_id| {
+ self.expect_node(location.into()).drop_state.contains(tracked_value_id)
+ })
+ }
+
+ pub fn is_borrowed_temporary(&self, expr: &hir::Expr<'_>) -> bool {
+ if let Some(b) = &self.borrowed_temporaries { b.contains(&expr.hir_id) } else { true }
+ }
+
+ /// Returns a reference to the NodeInfo for a node, panicking if it does not exist
+ fn expect_node(&self, id: PostOrderId) -> &NodeInfo {
+ &self.nodes[id]
+ }
+}
+
+/// Tracks information needed to compute drop ranges.
+struct DropRangesBuilder {
+ /// The core of DropRangesBuilder is a set of nodes, which each represent
+ /// one expression. We primarily refer to them by their index in a
+ /// post-order traversal of the HIR tree, since this is what
+ /// generator_interior uses to talk about yield positions.
+ ///
+ /// This IndexVec keeps the relevant details for each node. See the
+ /// NodeInfo struct for more details, but this information includes things
+ /// such as the set of control-flow successors, which variables are dropped
+ /// or reinitialized, and whether each variable has been inferred to be
+ /// known-dropped or potentially reinitialized at each point.
+ nodes: IndexVec<PostOrderId, NodeInfo>,
+ /// We refer to values whose drop state we are tracking by the HirId of
+ /// where they are defined. Within a NodeInfo, however, we store the
+ /// drop-state in a bit vector indexed by a HirIdIndex
+ /// (see NodeInfo::drop_state). The hir_id_map field stores the mapping
+ /// from HirIds to the HirIdIndex that is used to represent that value in
+ /// bitvector.
+ tracked_value_map: FxHashMap<TrackedValue, TrackedValueIndex>,
+
+ /// When building the control flow graph, we don't always know the
+ /// post-order index of the target node at the point we encounter it.
+ /// For example, this happens with break and continue. In those cases,
+ /// we store a pair of the PostOrderId of the source and the HirId
+ /// of the target. Once we have gathered all of these edges, we make a
+ /// pass over the set of deferred edges (see process_deferred_edges in
+ /// cfg_build.rs), look up the PostOrderId for the target (since now the
+ /// post-order index for all nodes is known), and add missing control flow
+ /// edges.
+ deferred_edges: Vec<(PostOrderId, HirId)>,
+ /// This maps HirIds of expressions to their post-order index. It is
+ /// used in process_deferred_edges to correctly add back-edges.
+ post_order_map: HirIdMap<PostOrderId>,
+}
+
+impl Debug for DropRangesBuilder {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ f.debug_struct("DropRanges")
+ .field("hir_id_map", &self.tracked_value_map)
+ .field("post_order_maps", &self.post_order_map)
+ .field("nodes", &self.nodes.iter_enumerated().collect::<BTreeMap<_, _>>())
+ .finish()
+ }
+}
+
+/// DropRanges keeps track of what values are definitely dropped at each point in the code.
+///
+/// Values of interest are defined by the hir_id of their place. Locations in code are identified
+/// by their index in the post-order traversal. At its core, DropRanges maps
+/// (hir_id, post_order_id) -> bool, where a true value indicates that the value is definitely
+/// dropped at the point of the node identified by post_order_id.
+impl DropRangesBuilder {
+ /// Returns the number of values (hir_ids) that are tracked
+ fn num_values(&self) -> usize {
+ self.tracked_value_map.len()
+ }
+
+ fn node_mut(&mut self, id: PostOrderId) -> &mut NodeInfo {
+ let size = self.num_values();
+ self.nodes.ensure_contains_elem(id, || NodeInfo::new(size));
+ &mut self.nodes[id]
+ }
+
+ fn add_control_edge(&mut self, from: PostOrderId, to: PostOrderId) {
+ trace!("adding control edge from {:?} to {:?}", from, to);
+ self.node_mut(from).successors.push(to);
+ }
+}
+
+#[derive(Debug)]
+struct NodeInfo {
+ /// IDs of nodes that can follow this one in the control flow
+ ///
+ /// If the vec is empty, then control proceeds to the next node.
+ successors: Vec<PostOrderId>,
+
+ /// List of hir_ids that are dropped by this node.
+ drops: Vec<TrackedValueIndex>,
+
+ /// List of hir_ids that are reinitialized by this node.
+ reinits: Vec<TrackedValueIndex>,
+
+ /// Set of values that are definitely dropped at this point.
+ drop_state: BitSet<TrackedValueIndex>,
+}
+
+impl NodeInfo {
+ fn new(num_values: usize) -> Self {
+ Self {
+ successors: vec![],
+ drops: vec![],
+ reinits: vec![],
+ drop_state: BitSet::new_filled(num_values),
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs
new file mode 100644
index 000000000..a2c23db16
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_build.rs
@@ -0,0 +1,560 @@
+use super::{
+ for_each_consumable, record_consumed_borrow::ConsumedAndBorrowedPlaces, DropRangesBuilder,
+ NodeInfo, PostOrderId, TrackedValue, TrackedValueIndex,
+};
+use hir::{
+ intravisit::{self, Visitor},
+ Body, Expr, ExprKind, Guard, HirId, LoopIdError,
+};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir as hir;
+use rustc_index::vec::IndexVec;
+use rustc_middle::{
+ hir::map::Map,
+ ty::{TyCtxt, TypeckResults},
+};
+use std::mem::swap;
+
+/// Traverses the body to find the control flow graph and locations for the
+/// relevant places are dropped or reinitialized.
+///
+/// The resulting structure still needs to be iterated to a fixed point, which
+/// can be done with propagate_to_fixpoint in cfg_propagate.
+pub(super) fn build_control_flow_graph<'tcx>(
+ hir: Map<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &TypeckResults<'tcx>,
+ consumed_borrowed_places: ConsumedAndBorrowedPlaces,
+ body: &'tcx Body<'tcx>,
+ num_exprs: usize,
+) -> (DropRangesBuilder, FxHashSet<HirId>) {
+ let mut drop_range_visitor =
+ DropRangeVisitor::new(hir, tcx, typeck_results, consumed_borrowed_places, num_exprs);
+ intravisit::walk_body(&mut drop_range_visitor, body);
+
+ drop_range_visitor.drop_ranges.process_deferred_edges();
+ if let Some(filename) = &tcx.sess.opts.unstable_opts.dump_drop_tracking_cfg {
+ super::cfg_visualize::write_graph_to_file(&drop_range_visitor.drop_ranges, filename, tcx);
+ }
+
+ (drop_range_visitor.drop_ranges, drop_range_visitor.places.borrowed_temporaries)
+}
+
+/// This struct is used to gather the information for `DropRanges` to determine the regions of the
+/// HIR tree for which a value is dropped.
+///
+/// We are interested in points where a variables is dropped or initialized, and the control flow
+/// of the code. We identify locations in code by their post-order traversal index, so it is
+/// important for this traversal to match that in `RegionResolutionVisitor` and `InteriorVisitor`.
+///
+/// We make several simplifying assumptions, with the goal of being more conservative than
+/// necessary rather than less conservative (since being less conservative is unsound, but more
+/// conservative is still safe). These assumptions are:
+///
+/// 1. Moving a variable `a` counts as a move of the whole variable.
+/// 2. Moving a partial path like `a.b.c` is ignored.
+/// 3. Reinitializing through a field (e.g. `a.b.c = 5`) counts as a reinitialization of all of
+/// `a`.
+///
+/// Some examples:
+///
+/// Rule 1:
+/// ```rust
+/// let mut a = (vec![0], vec![0]);
+/// drop(a);
+/// // `a` is not considered initialized.
+/// ```
+///
+/// Rule 2:
+/// ```rust
+/// let mut a = (vec![0], vec![0]);
+/// drop(a.0);
+/// drop(a.1);
+/// // `a` is still considered initialized.
+/// ```
+///
+/// Rule 3:
+/// ```compile_fail,E0382
+/// let mut a = (vec![0], vec![0]);
+/// drop(a);
+/// a.1 = vec![1];
+/// // all of `a` is considered initialized
+/// ```
+
+struct DropRangeVisitor<'a, 'tcx> {
+ hir: Map<'tcx>,
+ places: ConsumedAndBorrowedPlaces,
+ drop_ranges: DropRangesBuilder,
+ expr_index: PostOrderId,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a TypeckResults<'tcx>,
+ label_stack: Vec<(Option<rustc_ast::Label>, PostOrderId)>,
+}
+
+impl<'a, 'tcx> DropRangeVisitor<'a, 'tcx> {
+ fn new(
+ hir: Map<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ typeck_results: &'a TypeckResults<'tcx>,
+ places: ConsumedAndBorrowedPlaces,
+ num_exprs: usize,
+ ) -> Self {
+ debug!("consumed_places: {:?}", places.consumed);
+ let drop_ranges = DropRangesBuilder::new(
+ places.consumed.iter().flat_map(|(_, places)| places.iter().cloned()),
+ hir,
+ num_exprs,
+ );
+ Self {
+ hir,
+ places,
+ drop_ranges,
+ expr_index: PostOrderId::from_u32(0),
+ typeck_results,
+ tcx,
+ label_stack: vec![],
+ }
+ }
+
+ fn record_drop(&mut self, value: TrackedValue) {
+ if self.places.borrowed.contains(&value) {
+ debug!("not marking {:?} as dropped because it is borrowed at some point", value);
+ } else {
+ debug!("marking {:?} as dropped at {:?}", value, self.expr_index);
+ let count = self.expr_index;
+ self.drop_ranges.drop_at(value, count);
+ }
+ }
+
+ /// ExprUseVisitor's consume callback doesn't go deep enough for our purposes in all
+ /// expressions. This method consumes a little deeper into the expression when needed.
+ fn consume_expr(&mut self, expr: &hir::Expr<'_>) {
+ debug!("consuming expr {:?}, count={:?}", expr.kind, self.expr_index);
+ let places = self
+ .places
+ .consumed
+ .get(&expr.hir_id)
+ .map_or(vec![], |places| places.iter().cloned().collect());
+ for place in places {
+ trace!(?place, "consuming place");
+ for_each_consumable(self.hir, place, |value| self.record_drop(value));
+ }
+ }
+
+ /// Marks an expression as being reinitialized.
+ ///
+ /// Note that we always approximated on the side of things being more
+ /// initialized than they actually are, as opposed to less. In cases such
+ /// as `x.y = ...`, we would consider all of `x` as being initialized
+ /// instead of just the `y` field.
+ ///
+ /// This is because it is always safe to consider something initialized
+ /// even when it is not, but the other way around will cause problems.
+ ///
+ /// In the future, we will hopefully tighten up these rules to be more
+ /// precise.
+ fn reinit_expr(&mut self, expr: &hir::Expr<'_>) {
+ // Walk the expression to find the base. For example, in an expression
+ // like `*a[i].x`, we want to find the `a` and mark that as
+ // reinitialized.
+ match expr.kind {
+ ExprKind::Path(hir::QPath::Resolved(
+ _,
+ hir::Path { res: hir::def::Res::Local(hir_id), .. },
+ )) => {
+ // This is the base case, where we have found an actual named variable.
+
+ let location = self.expr_index;
+ debug!("reinitializing {:?} at {:?}", hir_id, location);
+ self.drop_ranges.reinit_at(TrackedValue::Variable(*hir_id), location);
+ }
+
+ ExprKind::Field(base, _) => self.reinit_expr(base),
+
+ // Most expressions do not refer to something where we need to track
+ // reinitializations.
+ //
+ // Some of these may be interesting in the future
+ ExprKind::Path(..)
+ | ExprKind::Box(..)
+ | ExprKind::ConstBlock(..)
+ | ExprKind::Array(..)
+ | ExprKind::Call(..)
+ | ExprKind::MethodCall(..)
+ | ExprKind::Tup(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Unary(..)
+ | ExprKind::Lit(..)
+ | ExprKind::Cast(..)
+ | ExprKind::Type(..)
+ | ExprKind::DropTemps(..)
+ | ExprKind::Let(..)
+ | ExprKind::If(..)
+ | ExprKind::Loop(..)
+ | ExprKind::Match(..)
+ | ExprKind::Closure { .. }
+ | ExprKind::Block(..)
+ | ExprKind::Assign(..)
+ | ExprKind::AssignOp(..)
+ | ExprKind::Index(..)
+ | ExprKind::AddrOf(..)
+ | ExprKind::Break(..)
+ | ExprKind::Continue(..)
+ | ExprKind::Ret(..)
+ | ExprKind::InlineAsm(..)
+ | ExprKind::Struct(..)
+ | ExprKind::Repeat(..)
+ | ExprKind::Yield(..)
+ | ExprKind::Err => (),
+ }
+ }
+
+ /// For an expression with an uninhabited return type (e.g. a function that returns !),
+ /// this adds a self edge to to the CFG to model the fact that the function does not
+ /// return.
+ fn handle_uninhabited_return(&mut self, expr: &Expr<'tcx>) {
+ let ty = self.typeck_results.expr_ty(expr);
+ let ty = self.tcx.erase_regions(ty);
+ let m = self.tcx.parent_module(expr.hir_id).to_def_id();
+ let param_env = self.tcx.param_env(m.expect_local());
+ if self.tcx.is_ty_uninhabited_from(m, ty, param_env) {
+ // This function will not return. We model this fact as an infinite loop.
+ self.drop_ranges.add_control_edge(self.expr_index + 1, self.expr_index + 1);
+ }
+ }
+
+ /// Map a Destination to an equivalent expression node
+ ///
+ /// The destination field of a Break or Continue expression can target either an
+ /// expression or a block. The drop range analysis, however, only deals in
+ /// expression nodes, so blocks that might be the destination of a Break or Continue
+ /// will not have a PostOrderId.
+ ///
+ /// If the destination is an expression, this function will simply return that expression's
+ /// hir_id. If the destination is a block, this function will return the hir_id of last
+ /// expression in the block.
+ fn find_target_expression_from_destination(
+ &self,
+ destination: hir::Destination,
+ ) -> Result<HirId, LoopIdError> {
+ destination.target_id.map(|target| {
+ let node = self.hir.get(target);
+ match node {
+ hir::Node::Expr(_) => target,
+ hir::Node::Block(b) => find_last_block_expression(b),
+ hir::Node::Param(..)
+ | hir::Node::Item(..)
+ | hir::Node::ForeignItem(..)
+ | hir::Node::TraitItem(..)
+ | hir::Node::ImplItem(..)
+ | hir::Node::Variant(..)
+ | hir::Node::Field(..)
+ | hir::Node::AnonConst(..)
+ | hir::Node::Stmt(..)
+ | hir::Node::PathSegment(..)
+ | hir::Node::Ty(..)
+ | hir::Node::TypeBinding(..)
+ | hir::Node::TraitRef(..)
+ | hir::Node::Pat(..)
+ | hir::Node::Arm(..)
+ | hir::Node::Local(..)
+ | hir::Node::Ctor(..)
+ | hir::Node::Lifetime(..)
+ | hir::Node::GenericParam(..)
+ | hir::Node::Crate(..)
+ | hir::Node::Infer(..) => bug!("Unsupported branch target: {:?}", node),
+ }
+ })
+ }
+}
+
+fn find_last_block_expression(block: &hir::Block<'_>) -> HirId {
+ block.expr.map_or_else(
+ // If there is no tail expression, there will be at least one statement in the
+ // block because the block contains a break or continue statement.
+ || block.stmts.last().unwrap().hir_id,
+ |expr| expr.hir_id,
+ )
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for DropRangeVisitor<'a, 'tcx> {
+ fn visit_expr(&mut self, expr: &'tcx Expr<'tcx>) {
+ let mut reinit = None;
+ match expr.kind {
+ ExprKind::Assign(lhs, rhs, _) => {
+ self.visit_expr(lhs);
+ self.visit_expr(rhs);
+
+ reinit = Some(lhs);
+ }
+
+ ExprKind::If(test, if_true, if_false) => {
+ self.visit_expr(test);
+
+ let fork = self.expr_index;
+
+ self.drop_ranges.add_control_edge(fork, self.expr_index + 1);
+ self.visit_expr(if_true);
+ let true_end = self.expr_index;
+
+ self.drop_ranges.add_control_edge(fork, self.expr_index + 1);
+ if let Some(if_false) = if_false {
+ self.visit_expr(if_false);
+ }
+
+ self.drop_ranges.add_control_edge(true_end, self.expr_index + 1);
+ }
+ ExprKind::Match(scrutinee, arms, ..) => {
+ // We walk through the match expression almost like a chain of if expressions.
+ // Here's a diagram to follow along with:
+ //
+ // ┌─┐
+ // match │A│ {
+ // ┌───┴─┘
+ // │
+ // ┌▼┌───►┌─┐ ┌─┐
+ // │B│ if │C│ =>│D│,
+ // └─┘ ├─┴──►└─┴──────┐
+ // ┌──┘ │
+ // ┌──┘ │
+ // │ │
+ // ┌▼┌───►┌─┐ ┌─┐ │
+ // │E│ if │F│ =>│G│, │
+ // └─┘ ├─┴──►└─┴┐ │
+ // │ │ │
+ // } ▼ ▼ │
+ // ┌─┐◄───────────────────┘
+ // │H│
+ // └─┘
+ //
+ // The order we want is that the scrutinee (A) flows into the first pattern (B),
+ // which flows into the guard (C). Then the guard either flows into the arm body
+ // (D) or into the start of the next arm (E). Finally, the body flows to the end
+ // of the match block (H).
+ //
+ // The subsequent arms follow the same ordering. First we go to the pattern, then
+ // the guard (if present, otherwise it flows straight into the body), then into
+ // the body and then to the end of the match expression.
+ //
+ // The comments below show which edge is being added.
+ self.visit_expr(scrutinee);
+
+ let (guard_exit, arm_end_ids) = arms.iter().fold(
+ (self.expr_index, vec![]),
+ |(incoming_edge, mut arm_end_ids), hir::Arm { pat, body, guard, .. }| {
+ // A -> B, or C -> E
+ self.drop_ranges.add_control_edge(incoming_edge, self.expr_index + 1);
+ self.visit_pat(pat);
+ // B -> C and E -> F are added implicitly due to the traversal order.
+ match guard {
+ Some(Guard::If(expr)) => self.visit_expr(expr),
+ Some(Guard::IfLet(let_expr)) => {
+ self.visit_let_expr(let_expr);
+ }
+ None => (),
+ }
+ // Likewise, C -> D and F -> G are added implicitly.
+
+ // Save C, F, so we can add the other outgoing edge.
+ let to_next_arm = self.expr_index;
+
+ // The default edge does not get added since we also have an explicit edge,
+ // so we also need to add an edge to the next node as well.
+ //
+ // This adds C -> D, F -> G
+ self.drop_ranges.add_control_edge(self.expr_index, self.expr_index + 1);
+ self.visit_expr(body);
+
+ // Save the end of the body so we can add the exit edge once we know where
+ // the exit is.
+ arm_end_ids.push(self.expr_index);
+
+ // Pass C to the next iteration, as well as vec![D]
+ //
+ // On the last round through, we pass F and vec![D, G] so that we can
+ // add all the exit edges.
+ (to_next_arm, arm_end_ids)
+ },
+ );
+ // F -> H
+ self.drop_ranges.add_control_edge(guard_exit, self.expr_index + 1);
+
+ arm_end_ids.into_iter().for_each(|arm_end| {
+ // D -> H, G -> H
+ self.drop_ranges.add_control_edge(arm_end, self.expr_index + 1)
+ });
+ }
+
+ ExprKind::Loop(body, label, ..) => {
+ let loop_begin = self.expr_index + 1;
+ self.label_stack.push((label, loop_begin));
+ if body.stmts.is_empty() && body.expr.is_none() {
+ // For empty loops we won't have updated self.expr_index after visiting the
+ // body, meaning we'd get an edge from expr_index to expr_index + 1, but
+ // instead we want an edge from expr_index + 1 to expr_index + 1.
+ self.drop_ranges.add_control_edge(loop_begin, loop_begin);
+ } else {
+ self.visit_block(body);
+ self.drop_ranges.add_control_edge(self.expr_index, loop_begin);
+ }
+ self.label_stack.pop();
+ }
+ // Find the loop entry by searching through the label stack for either the last entry
+ // (if label is none), or the first entry where the label matches this one. The Loop
+ // case maintains this stack mapping labels to the PostOrderId for the loop entry.
+ ExprKind::Continue(hir::Destination { label, .. }, ..) => self
+ .label_stack
+ .iter()
+ .rev()
+ .find(|(loop_label, _)| label.is_none() || *loop_label == label)
+ .map_or((), |(_, target)| {
+ self.drop_ranges.add_control_edge(self.expr_index, *target)
+ }),
+
+ ExprKind::Break(destination, ..) => {
+ // destination either points to an expression or to a block. We use
+ // find_target_expression_from_destination to use the last expression of the block
+ // if destination points to a block.
+ //
+ // We add an edge to the hir_id of the expression/block we are breaking out of, and
+ // then in process_deferred_edges we will map this hir_id to its PostOrderId, which
+ // will refer to the end of the block due to the post order traversal.
+ self.find_target_expression_from_destination(destination).map_or((), |target| {
+ self.drop_ranges.add_control_edge_hir_id(self.expr_index, target)
+ })
+ }
+
+ ExprKind::Call(f, args) => {
+ self.visit_expr(f);
+ for arg in args {
+ self.visit_expr(arg);
+ }
+
+ self.handle_uninhabited_return(expr);
+ }
+ ExprKind::MethodCall(_, exprs, _) => {
+ for expr in exprs {
+ self.visit_expr(expr);
+ }
+
+ self.handle_uninhabited_return(expr);
+ }
+
+ ExprKind::AddrOf(..)
+ | ExprKind::Array(..)
+ | ExprKind::AssignOp(..)
+ | ExprKind::Binary(..)
+ | ExprKind::Block(..)
+ | ExprKind::Box(..)
+ | ExprKind::Cast(..)
+ | ExprKind::Closure { .. }
+ | ExprKind::ConstBlock(..)
+ | ExprKind::DropTemps(..)
+ | ExprKind::Err
+ | ExprKind::Field(..)
+ | ExprKind::Index(..)
+ | ExprKind::InlineAsm(..)
+ | ExprKind::Let(..)
+ | ExprKind::Lit(..)
+ | ExprKind::Path(..)
+ | ExprKind::Repeat(..)
+ | ExprKind::Ret(..)
+ | ExprKind::Struct(..)
+ | ExprKind::Tup(..)
+ | ExprKind::Type(..)
+ | ExprKind::Unary(..)
+ | ExprKind::Yield(..) => intravisit::walk_expr(self, expr),
+ }
+
+ self.expr_index = self.expr_index + 1;
+ self.drop_ranges.add_node_mapping(expr.hir_id, self.expr_index);
+ self.consume_expr(expr);
+ if let Some(expr) = reinit {
+ self.reinit_expr(expr);
+ }
+ }
+
+ fn visit_pat(&mut self, pat: &'tcx hir::Pat<'tcx>) {
+ intravisit::walk_pat(self, pat);
+
+ // Increment expr_count here to match what InteriorVisitor expects.
+ self.expr_index = self.expr_index + 1;
+ }
+}
+
+impl DropRangesBuilder {
+ fn new(
+ tracked_values: impl Iterator<Item = TrackedValue>,
+ hir: Map<'_>,
+ num_exprs: usize,
+ ) -> Self {
+ let mut tracked_value_map = FxHashMap::<_, TrackedValueIndex>::default();
+ let mut next = <_>::from(0u32);
+ for value in tracked_values {
+ for_each_consumable(hir, value, |value| {
+ if !tracked_value_map.contains_key(&value) {
+ tracked_value_map.insert(value, next);
+ next = next + 1;
+ }
+ });
+ }
+ debug!("hir_id_map: {:?}", tracked_value_map);
+ let num_values = tracked_value_map.len();
+ Self {
+ tracked_value_map,
+ nodes: IndexVec::from_fn_n(|_| NodeInfo::new(num_values), num_exprs + 1),
+ deferred_edges: <_>::default(),
+ post_order_map: <_>::default(),
+ }
+ }
+
+ fn tracked_value_index(&self, tracked_value: TrackedValue) -> TrackedValueIndex {
+ *self.tracked_value_map.get(&tracked_value).unwrap()
+ }
+
+ /// Adds an entry in the mapping from HirIds to PostOrderIds
+ ///
+ /// Needed so that `add_control_edge_hir_id` can work.
+ fn add_node_mapping(&mut self, node_hir_id: HirId, post_order_id: PostOrderId) {
+ self.post_order_map.insert(node_hir_id, post_order_id);
+ }
+
+ /// Like add_control_edge, but uses a hir_id as the target.
+ ///
+ /// This can be used for branches where we do not know the PostOrderId of the target yet,
+ /// such as when handling `break` or `continue`.
+ fn add_control_edge_hir_id(&mut self, from: PostOrderId, to: HirId) {
+ self.deferred_edges.push((from, to));
+ }
+
+ fn drop_at(&mut self, value: TrackedValue, location: PostOrderId) {
+ let value = self.tracked_value_index(value);
+ self.node_mut(location).drops.push(value);
+ }
+
+ fn reinit_at(&mut self, value: TrackedValue, location: PostOrderId) {
+ let value = match self.tracked_value_map.get(&value) {
+ Some(value) => *value,
+ // If there's no value, this is never consumed and therefore is never dropped. We can
+ // ignore this.
+ None => return,
+ };
+ self.node_mut(location).reinits.push(value);
+ }
+
+ /// Looks up PostOrderId for any control edges added by HirId and adds a proper edge for them.
+ ///
+ /// Should be called after visiting the HIR but before solving the control flow, otherwise some
+ /// edges will be missed.
+ fn process_deferred_edges(&mut self) {
+ trace!("processing deferred edges. post_order_map={:#?}", self.post_order_map);
+ let mut edges = vec![];
+ swap(&mut edges, &mut self.deferred_edges);
+ edges.into_iter().for_each(|(from, to)| {
+ trace!("Adding deferred edge from {:?} to {:?}", from, to);
+ let to = *self.post_order_map.get(&to).expect("Expression ID not found");
+ trace!("target edge PostOrderId={:?}", to);
+ self.add_control_edge(from, to)
+ });
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs
new file mode 100644
index 000000000..139d17d2e
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_propagate.rs
@@ -0,0 +1,92 @@
+use super::{DropRangesBuilder, PostOrderId};
+use rustc_index::{bit_set::BitSet, vec::IndexVec};
+use std::collections::BTreeMap;
+
+impl DropRangesBuilder {
+ pub fn propagate_to_fixpoint(&mut self) {
+ trace!("before fixpoint: {:#?}", self);
+ let preds = self.compute_predecessors();
+
+ trace!("predecessors: {:#?}", preds.iter_enumerated().collect::<BTreeMap<_, _>>());
+
+ let mut new_state = BitSet::new_empty(self.num_values());
+ let mut changed_nodes = BitSet::new_empty(self.nodes.len());
+ let mut unchanged_mask = BitSet::new_filled(self.nodes.len());
+ changed_nodes.insert(0u32.into());
+
+ let mut propagate = || {
+ let mut changed = false;
+ unchanged_mask.insert_all();
+ for id in self.nodes.indices() {
+ trace!("processing {:?}, changed_nodes: {:?}", id, changed_nodes);
+ // Check if any predecessor has changed, and if not then short-circuit.
+ //
+ // We handle the start node specially, since it doesn't have any predecessors,
+ // but we need to start somewhere.
+ if match id.index() {
+ 0 => !changed_nodes.contains(id),
+ _ => !preds[id].iter().any(|pred| changed_nodes.contains(*pred)),
+ } {
+ trace!("short-circuiting because none of {:?} have changed", preds[id]);
+ unchanged_mask.remove(id);
+ continue;
+ }
+
+ if id.index() == 0 {
+ new_state.clear();
+ } else {
+ // If we are not the start node and we have no predecessors, treat
+ // everything as dropped because there's no way to get here anyway.
+ new_state.insert_all();
+ };
+
+ for pred in &preds[id] {
+ new_state.intersect(&self.nodes[*pred].drop_state);
+ }
+
+ for drop in &self.nodes[id].drops {
+ new_state.insert(*drop);
+ }
+
+ for reinit in &self.nodes[id].reinits {
+ new_state.remove(*reinit);
+ }
+
+ if self.nodes[id].drop_state.intersect(&new_state) {
+ changed_nodes.insert(id);
+ changed = true;
+ } else {
+ unchanged_mask.remove(id);
+ }
+ }
+
+ changed_nodes.intersect(&unchanged_mask);
+ changed
+ };
+
+ while propagate() {
+ trace!("drop_state changed, re-running propagation");
+ }
+
+ trace!("after fixpoint: {:#?}", self);
+ }
+
+ fn compute_predecessors(&self) -> IndexVec<PostOrderId, Vec<PostOrderId>> {
+ let mut preds = IndexVec::from_fn_n(|_| vec![], self.nodes.len());
+ for (id, node) in self.nodes.iter_enumerated() {
+ // If the node has no explicit successors, we assume that control
+ // will from this node into the next one.
+ //
+ // If there are successors listed, then we assume that all
+ // possible successors are given and we do not include the default.
+ if node.successors.len() == 0 && id.index() != self.nodes.len() - 1 {
+ preds[id + 1].push(id);
+ } else {
+ for succ in &node.successors {
+ preds[*succ].push(id);
+ }
+ }
+ }
+ preds
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs
new file mode 100644
index 000000000..c0a0bfe8e
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/cfg_visualize.rs
@@ -0,0 +1,91 @@
+//! Implementation of GraphWalk for DropRanges so we can visualize the control
+//! flow graph when needed for debugging.
+
+use rustc_graphviz as dot;
+use rustc_middle::ty::TyCtxt;
+
+use super::{DropRangesBuilder, PostOrderId};
+
+/// Writes the CFG for DropRangesBuilder to a .dot file for visualization.
+///
+/// It is not normally called, but is kept around to easily add debugging
+/// code when needed.
+pub(super) fn write_graph_to_file(
+ drop_ranges: &DropRangesBuilder,
+ filename: &str,
+ tcx: TyCtxt<'_>,
+) {
+ dot::render(
+ &DropRangesGraph { drop_ranges, tcx },
+ &mut std::fs::File::create(filename).unwrap(),
+ )
+ .unwrap();
+}
+
+struct DropRangesGraph<'a, 'tcx> {
+ drop_ranges: &'a DropRangesBuilder,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'a> dot::GraphWalk<'a> for DropRangesGraph<'_, '_> {
+ type Node = PostOrderId;
+
+ type Edge = (PostOrderId, PostOrderId);
+
+ fn nodes(&'a self) -> dot::Nodes<'a, Self::Node> {
+ self.drop_ranges.nodes.iter_enumerated().map(|(i, _)| i).collect()
+ }
+
+ fn edges(&'a self) -> dot::Edges<'a, Self::Edge> {
+ self.drop_ranges
+ .nodes
+ .iter_enumerated()
+ .flat_map(|(i, node)| {
+ if node.successors.len() == 0 {
+ vec![(i, i + 1)]
+ } else {
+ node.successors.iter().map(move |&s| (i, s)).collect()
+ }
+ })
+ .collect()
+ }
+
+ fn source(&'a self, edge: &Self::Edge) -> Self::Node {
+ edge.0
+ }
+
+ fn target(&'a self, edge: &Self::Edge) -> Self::Node {
+ edge.1
+ }
+}
+
+impl<'a> dot::Labeller<'a> for DropRangesGraph<'_, '_> {
+ type Node = PostOrderId;
+
+ type Edge = (PostOrderId, PostOrderId);
+
+ fn graph_id(&'a self) -> dot::Id<'a> {
+ dot::Id::new("drop_ranges").unwrap()
+ }
+
+ fn node_id(&'a self, n: &Self::Node) -> dot::Id<'a> {
+ dot::Id::new(format!("id{}", n.index())).unwrap()
+ }
+
+ fn node_label(&'a self, n: &Self::Node) -> dot::LabelText<'a> {
+ dot::LabelText::LabelStr(
+ format!(
+ "{n:?}: {}",
+ self.drop_ranges
+ .post_order_map
+ .iter()
+ .find(|(_hir_id, &post_order_id)| post_order_id == *n)
+ .map_or("<unknown>".into(), |(hir_id, _)| self
+ .tcx
+ .hir()
+ .node_to_string(*hir_id))
+ )
+ .into(),
+ )
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs
new file mode 100644
index 000000000..ded0888c3
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/generator_interior/drop_ranges/record_consumed_borrow.rs
@@ -0,0 +1,232 @@
+use super::TrackedValue;
+use crate::{
+ check::FnCtxt,
+ expr_use_visitor::{self, ExprUseVisitor},
+};
+use hir::{def_id::DefId, Body, HirId, HirIdMap};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_middle::hir::place::{PlaceBase, Projection, ProjectionKind};
+use rustc_middle::ty::{ParamEnv, TyCtxt};
+
+pub(super) fn find_consumed_and_borrowed<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ def_id: DefId,
+ body: &'tcx Body<'tcx>,
+) -> ConsumedAndBorrowedPlaces {
+ let mut expr_use_visitor = ExprUseDelegate::new(fcx.tcx, fcx.param_env);
+ expr_use_visitor.consume_body(fcx, def_id, body);
+ expr_use_visitor.places
+}
+
+pub(super) struct ConsumedAndBorrowedPlaces {
+ /// Records the variables/expressions that are dropped by a given expression.
+ ///
+ /// The key is the hir-id of the expression, and the value is a set or hir-ids for variables
+ /// or values that are consumed by that expression.
+ ///
+ /// Note that this set excludes "partial drops" -- for example, a statement like `drop(x.y)` is
+ /// not considered a drop of `x`, although it would be a drop of `x.y`.
+ pub(super) consumed: HirIdMap<FxHashSet<TrackedValue>>,
+
+ /// A set of hir-ids of values or variables that are borrowed at some point within the body.
+ pub(super) borrowed: FxHashSet<TrackedValue>,
+
+ /// A set of hir-ids of values or variables that are borrowed at some point within the body.
+ pub(super) borrowed_temporaries: FxHashSet<HirId>,
+}
+
+/// Works with ExprUseVisitor to find interesting values for the drop range analysis.
+///
+/// Interesting values are those that are either dropped or borrowed. For dropped values, we also
+/// record the parent expression, which is the point where the drop actually takes place.
+struct ExprUseDelegate<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ places: ConsumedAndBorrowedPlaces,
+}
+
+impl<'tcx> ExprUseDelegate<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self {
+ Self {
+ tcx,
+ param_env,
+ places: ConsumedAndBorrowedPlaces {
+ consumed: <_>::default(),
+ borrowed: <_>::default(),
+ borrowed_temporaries: <_>::default(),
+ },
+ }
+ }
+
+ fn consume_body(&mut self, fcx: &'_ FnCtxt<'_, 'tcx>, def_id: DefId, body: &'tcx Body<'tcx>) {
+ // Run ExprUseVisitor to find where values are consumed.
+ ExprUseVisitor::new(
+ self,
+ &fcx.infcx,
+ def_id.expect_local(),
+ fcx.param_env,
+ &fcx.typeck_results.borrow(),
+ )
+ .consume_body(body);
+ }
+
+ fn mark_consumed(&mut self, consumer: HirId, target: TrackedValue) {
+ self.places.consumed.entry(consumer).or_insert_with(|| <_>::default());
+
+ debug!(?consumer, ?target, "mark_consumed");
+ self.places.consumed.get_mut(&consumer).map(|places| places.insert(target));
+ }
+
+ fn borrow_place(&mut self, place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>) {
+ self.places
+ .borrowed
+ .insert(TrackedValue::from_place_with_projections_allowed(place_with_id));
+
+ // Ordinarily a value is consumed by it's parent, but in the special case of a
+ // borrowed RValue, we create a reference that lives as long as the temporary scope
+ // for that expression (typically, the innermost statement, but sometimes the enclosing
+ // block). We record this fact here so that later in generator_interior
+ // we can use the correct scope.
+ //
+ // We special case borrows through a dereference (`&*x`, `&mut *x` where `x` is
+ // some rvalue expression), since these are essentially a copy of a pointer.
+ // In other words, this borrow does not refer to the
+ // temporary (`*x`), but to the referent (whatever `x` is a borrow of).
+ //
+ // We were considering that we might encounter problems down the line if somehow,
+ // some part of the compiler were to look at this result and try to use it to
+ // drive a borrowck-like analysis (this does not currently happen, as of this writing).
+ // But even this should be fine, because the lifetime of the dereferenced reference
+ // found in the rvalue is only significant as an intermediate 'link' to the value we
+ // are producing, and we separately track whether that value is live over a yield.
+ // Example:
+ //
+ // ```notrust
+ // fn identity<T>(x: &mut T) -> &mut T { x }
+ // let a: A = ...;
+ // let y: &'y mut A = &mut *identity(&'a mut a);
+ // ^^^^^^^^^^^^^^^^^^^^^^^^^ the borrow we are talking about
+ // ```
+ //
+ // The expression `*identity(...)` is a deref of an rvalue,
+ // where the `identity(...)` (the rvalue) produces a return type
+ // of `&'rv mut A`, where `'a: 'rv`. We then assign this result to
+ // `'y`, resulting in (transitively) `'a: 'y` (i.e., while `y` is in use,
+ // `a` will be considered borrowed). Other parts of the code will ensure
+ // that if `y` is live over a yield, `&'y mut A` appears in the generator
+ // state. If `'y` is live, then any sound region analysis must conclude
+ // that `'a` is also live. So if this causes a bug, blame some other
+ // part of the code!
+ let is_deref = place_with_id
+ .place
+ .projections
+ .iter()
+ .any(|Projection { kind, .. }| *kind == ProjectionKind::Deref);
+
+ if let (false, PlaceBase::Rvalue) = (is_deref, place_with_id.place.base) {
+ self.places.borrowed_temporaries.insert(place_with_id.hir_id);
+ }
+ }
+}
+
+impl<'tcx> expr_use_visitor::Delegate<'tcx> for ExprUseDelegate<'tcx> {
+ fn consume(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ diag_expr_id: HirId,
+ ) {
+ let hir = self.tcx.hir();
+ let parent = match hir.find_parent_node(place_with_id.hir_id) {
+ Some(parent) => parent,
+ None => place_with_id.hir_id,
+ };
+ debug!(
+ "consume {:?}; diag_expr_id={}, using parent {}",
+ place_with_id,
+ hir.node_to_string(diag_expr_id),
+ hir.node_to_string(parent)
+ );
+ place_with_id
+ .try_into()
+ .map_or((), |tracked_value| self.mark_consumed(parent, tracked_value));
+ }
+
+ fn borrow(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ diag_expr_id: HirId,
+ bk: rustc_middle::ty::BorrowKind,
+ ) {
+ debug!(
+ "borrow: place_with_id = {place_with_id:?}, diag_expr_id={diag_expr_id:?}, \
+ borrow_kind={bk:?}"
+ );
+
+ self.borrow_place(place_with_id);
+ }
+
+ fn copy(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ _diag_expr_id: HirId,
+ ) {
+ debug!("copy: place_with_id = {place_with_id:?}");
+
+ self.places
+ .borrowed
+ .insert(TrackedValue::from_place_with_projections_allowed(place_with_id));
+
+ // For copied we treat this mostly like a borrow except that we don't add the place
+ // to borrowed_temporaries because the copy is consumed.
+ }
+
+ fn mutate(
+ &mut self,
+ assignee_place: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ diag_expr_id: HirId,
+ ) {
+ debug!("mutate {assignee_place:?}; diag_expr_id={diag_expr_id:?}");
+
+ if assignee_place.place.base == PlaceBase::Rvalue
+ && assignee_place.place.projections.is_empty()
+ {
+ // Assigning to an Rvalue is illegal unless done through a dereference. We would have
+ // already gotten a type error, so we will just return here.
+ return;
+ }
+
+ // If the type being assigned needs dropped, then the mutation counts as a borrow
+ // since it is essentially doing `Drop::drop(&mut x); x = new_value;`.
+ if assignee_place.place.base_ty.needs_drop(self.tcx, self.param_env) {
+ self.places
+ .borrowed
+ .insert(TrackedValue::from_place_with_projections_allowed(assignee_place));
+ }
+ }
+
+ fn bind(
+ &mut self,
+ binding_place: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ diag_expr_id: HirId,
+ ) {
+ debug!("bind {binding_place:?}; diag_expr_id={diag_expr_id:?}");
+ }
+
+ fn fake_read(
+ &mut self,
+ place_with_id: &expr_use_visitor::PlaceWithHirId<'tcx>,
+ cause: rustc_middle::mir::FakeReadCause,
+ diag_expr_id: HirId,
+ ) {
+ debug!(
+ "fake_read place_with_id={place_with_id:?}; cause={cause:?}; diag_expr_id={diag_expr_id:?}"
+ );
+
+ // fake reads happen in places like the scrutinee of a match expression.
+ // we treat those as a borrow, much like a copy: the idea is that we are
+ // transiently creating a `&T` ref that we can read from to observe the current
+ // value (this `&T` is immediately dropped afterwards).
+ self.borrow_place(place_with_id);
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/inherited.rs b/compiler/rustc_typeck/src/check/inherited.rs
new file mode 100644
index 000000000..cd152eb97
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/inherited.rs
@@ -0,0 +1,183 @@
+use super::callee::DeferredCallResolution;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::HirIdMap;
+use rustc_infer::infer;
+use rustc_infer::infer::{InferCtxt, InferOk, TyCtxtInferExt};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::def_id::LocalDefIdMap;
+use rustc_span::{self, Span};
+use rustc_trait_selection::infer::InferCtxtExt as _;
+use rustc_trait_selection::traits::{self, ObligationCause, TraitEngine, TraitEngineExt};
+
+use std::cell::RefCell;
+use std::ops::Deref;
+
+/// Closures defined within the function. For example:
+/// ```ignore (illustrative)
+/// fn foo() {
+/// bar(move|| { ... })
+/// }
+/// ```
+/// Here, the function `foo()` and the closure passed to
+/// `bar()` will each have their own `FnCtxt`, but they will
+/// share the inherited fields.
+pub struct Inherited<'a, 'tcx> {
+ pub(super) infcx: InferCtxt<'a, 'tcx>,
+
+ pub(super) typeck_results: &'a RefCell<ty::TypeckResults<'tcx>>,
+
+ pub(super) locals: RefCell<HirIdMap<super::LocalTy<'tcx>>>,
+
+ pub(super) fulfillment_cx: RefCell<Box<dyn TraitEngine<'tcx>>>,
+
+ // Some additional `Sized` obligations badly affect type inference.
+ // These obligations are added in a later stage of typeck.
+ pub(super) deferred_sized_obligations:
+ RefCell<Vec<(Ty<'tcx>, Span, traits::ObligationCauseCode<'tcx>)>>,
+
+ // When we process a call like `c()` where `c` is a closure type,
+ // we may not have decided yet whether `c` is a `Fn`, `FnMut`, or
+ // `FnOnce` closure. In that case, we defer full resolution of the
+ // call until upvar inference can kick in and make the
+ // decision. We keep these deferred resolutions grouped by the
+ // def-id of the closure, so that once we decide, we can easily go
+ // back and process them.
+ pub(super) deferred_call_resolutions: RefCell<LocalDefIdMap<Vec<DeferredCallResolution<'tcx>>>>,
+
+ pub(super) deferred_cast_checks: RefCell<Vec<super::cast::CastCheck<'tcx>>>,
+
+ pub(super) deferred_transmute_checks: RefCell<Vec<(Ty<'tcx>, Ty<'tcx>, Span)>>,
+
+ pub(super) deferred_asm_checks: RefCell<Vec<(&'tcx hir::InlineAsm<'tcx>, hir::HirId)>>,
+
+ pub(super) deferred_generator_interiors:
+ RefCell<Vec<(hir::BodyId, Ty<'tcx>, hir::GeneratorKind)>>,
+
+ pub(super) body_id: Option<hir::BodyId>,
+
+ /// Whenever we introduce an adjustment from `!` into a type variable,
+ /// we record that type variable here. This is later used to inform
+ /// fallback. See the `fallback` module for details.
+ pub(super) diverging_type_vars: RefCell<FxHashSet<Ty<'tcx>>>,
+}
+
+impl<'a, 'tcx> Deref for Inherited<'a, 'tcx> {
+ type Target = InferCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.infcx
+ }
+}
+
+/// A temporary returned by `Inherited::build(...)`. This is necessary
+/// for multiple `InferCtxt` to share the same `in_progress_typeck_results`
+/// without using `Rc` or something similar.
+pub struct InheritedBuilder<'tcx> {
+ infcx: infer::InferCtxtBuilder<'tcx>,
+ def_id: LocalDefId,
+}
+
+impl<'tcx> Inherited<'_, 'tcx> {
+ pub fn build(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> InheritedBuilder<'tcx> {
+ let hir_owner = tcx.hir().local_def_id_to_hir_id(def_id).owner;
+
+ InheritedBuilder {
+ infcx: tcx
+ .infer_ctxt()
+ .ignoring_regions()
+ .with_fresh_in_progress_typeck_results(hir_owner),
+ def_id,
+ }
+ }
+}
+
+impl<'tcx> InheritedBuilder<'tcx> {
+ pub fn enter<F, R>(&mut self, f: F) -> R
+ where
+ F: for<'a> FnOnce(Inherited<'a, 'tcx>) -> R,
+ {
+ let def_id = self.def_id;
+ self.infcx.enter(|infcx| f(Inherited::new(infcx, def_id)))
+ }
+}
+
+impl<'a, 'tcx> Inherited<'a, 'tcx> {
+ fn new(infcx: InferCtxt<'a, 'tcx>, def_id: LocalDefId) -> Self {
+ let tcx = infcx.tcx;
+ let body_id = tcx.hir().maybe_body_owned_by(def_id);
+ let typeck_results =
+ infcx.in_progress_typeck_results.expect("building `FnCtxt` without typeck results");
+
+ Inherited {
+ typeck_results,
+ infcx,
+ fulfillment_cx: RefCell::new(<dyn TraitEngine<'_>>::new(tcx)),
+ locals: RefCell::new(Default::default()),
+ deferred_sized_obligations: RefCell::new(Vec::new()),
+ deferred_call_resolutions: RefCell::new(Default::default()),
+ deferred_cast_checks: RefCell::new(Vec::new()),
+ deferred_transmute_checks: RefCell::new(Vec::new()),
+ deferred_asm_checks: RefCell::new(Vec::new()),
+ deferred_generator_interiors: RefCell::new(Vec::new()),
+ diverging_type_vars: RefCell::new(Default::default()),
+ body_id,
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub(super) fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) {
+ if obligation.has_escaping_bound_vars() {
+ span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}", obligation);
+ }
+ self.fulfillment_cx.borrow_mut().register_predicate_obligation(self, obligation);
+ }
+
+ pub(super) fn register_predicates<I>(&self, obligations: I)
+ where
+ I: IntoIterator<Item = traits::PredicateObligation<'tcx>>,
+ {
+ for obligation in obligations {
+ self.register_predicate(obligation);
+ }
+ }
+
+ pub(super) fn register_infer_ok_obligations<T>(&self, infer_ok: InferOk<'tcx, T>) -> T {
+ self.register_predicates(infer_ok.obligations);
+ infer_ok.value
+ }
+
+ pub(super) fn normalize_associated_types_in<T>(
+ &self,
+ span: Span,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.normalize_associated_types_in_with_cause(
+ ObligationCause::misc(span, body_id),
+ param_env,
+ value,
+ )
+ }
+
+ pub(super) fn normalize_associated_types_in_with_cause<T>(
+ &self,
+ cause: ObligationCause<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let ok = self.partially_normalize_associated_types_in(cause, param_env, value);
+ debug!(?ok);
+ self.register_infer_ok_obligations(ok)
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/intrinsic.rs b/compiler/rustc_typeck/src/check/intrinsic.rs
new file mode 100644
index 000000000..3f2a0da8d
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/intrinsic.rs
@@ -0,0 +1,517 @@
+//! Type-checking for the rust-intrinsic and platform-intrinsic
+//! intrinsics that the compiler exposes.
+
+use crate::errors::{
+ UnrecognizedAtomicOperation, UnrecognizedIntrinsicFunction,
+ WrongNumberOfGenericArgumentsToIntrinsic,
+};
+use crate::require_same_types;
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_middle::traits::{ObligationCause, ObligationCauseCode};
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::symbol::{kw, sym, Symbol};
+use rustc_target::spec::abi::Abi;
+
+use std::iter;
+
+fn equate_intrinsic_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ it: &hir::ForeignItem<'_>,
+ n_tps: usize,
+ n_lts: usize,
+ sig: ty::PolyFnSig<'tcx>,
+) {
+ let (own_counts, span) = match &it.kind {
+ hir::ForeignItemKind::Fn(.., generics) => {
+ let own_counts = tcx.generics_of(it.def_id.to_def_id()).own_counts();
+ (own_counts, generics.span)
+ }
+ _ => {
+ struct_span_err!(tcx.sess, it.span, E0622, "intrinsic must be a function")
+ .span_label(it.span, "expected a function")
+ .emit();
+ return;
+ }
+ };
+
+ let gen_count_ok = |found: usize, expected: usize, descr: &str| -> bool {
+ if found != expected {
+ tcx.sess.emit_err(WrongNumberOfGenericArgumentsToIntrinsic {
+ span,
+ found,
+ expected,
+ descr,
+ });
+ false
+ } else {
+ true
+ }
+ };
+
+ if gen_count_ok(own_counts.lifetimes, n_lts, "lifetime")
+ && gen_count_ok(own_counts.types, n_tps, "type")
+ && gen_count_ok(own_counts.consts, 0, "const")
+ {
+ let fty = tcx.mk_fn_ptr(sig);
+ let cause = ObligationCause::new(it.span, it.hir_id(), ObligationCauseCode::IntrinsicType);
+ require_same_types(tcx, &cause, tcx.mk_fn_ptr(tcx.fn_sig(it.def_id)), fty);
+ }
+}
+
+/// Returns the unsafety of the given intrinsic.
+pub fn intrinsic_operation_unsafety(intrinsic: Symbol) -> hir::Unsafety {
+ match intrinsic {
+ // When adding a new intrinsic to this list,
+ // it's usually worth updating that intrinsic's documentation
+ // to note that it's safe to call, since
+ // safe extern fns are otherwise unprecedented.
+ sym::abort
+ | sym::size_of
+ | sym::min_align_of
+ | sym::needs_drop
+ | sym::caller_location
+ | sym::add_with_overflow
+ | sym::sub_with_overflow
+ | sym::mul_with_overflow
+ | sym::wrapping_add
+ | sym::wrapping_sub
+ | sym::wrapping_mul
+ | sym::saturating_add
+ | sym::saturating_sub
+ | sym::rotate_left
+ | sym::rotate_right
+ | sym::ctpop
+ | sym::ctlz
+ | sym::cttz
+ | sym::bswap
+ | sym::bitreverse
+ | sym::discriminant_value
+ | sym::type_id
+ | sym::likely
+ | sym::unlikely
+ | sym::ptr_guaranteed_eq
+ | sym::ptr_guaranteed_ne
+ | sym::minnumf32
+ | sym::minnumf64
+ | sym::maxnumf32
+ | sym::rustc_peek
+ | sym::maxnumf64
+ | sym::type_name
+ | sym::forget
+ | sym::black_box
+ | sym::variant_count => hir::Unsafety::Normal,
+ _ => hir::Unsafety::Unsafe,
+ }
+}
+
+/// Remember to add all intrinsics here, in `compiler/rustc_codegen_llvm/src/intrinsic.rs`,
+/// and in `library/core/src/intrinsics.rs`.
+pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
+ let param = |n| tcx.mk_ty_param(n, Symbol::intern(&format!("P{}", n)));
+ let intrinsic_name = tcx.item_name(it.def_id.to_def_id());
+ let name_str = intrinsic_name.as_str();
+
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ [ty::BoundVariableKind::Region(ty::BrAnon(0)), ty::BoundVariableKind::Region(ty::BrEnv)]
+ .iter()
+ .copied(),
+ );
+ let mk_va_list_ty = |mutbl| {
+ tcx.lang_items().va_list().map(|did| {
+ let region = tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) },
+ ));
+ let env_region = tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: ty::BoundVar::from_u32(1), kind: ty::BrEnv },
+ ));
+ let va_list_ty = tcx.bound_type_of(did).subst(tcx, &[region.into()]);
+ (tcx.mk_ref(env_region, ty::TypeAndMut { ty: va_list_ty, mutbl }), va_list_ty)
+ })
+ };
+
+ let (n_tps, n_lts, inputs, output, unsafety) = if name_str.starts_with("atomic_") {
+ let split: Vec<&str> = name_str.split('_').collect();
+ assert!(split.len() >= 2, "Atomic intrinsic in an incorrect format");
+
+ //We only care about the operation here
+ let (n_tps, inputs, output) = match split[1] {
+ "cxchg" | "cxchgweak" => (
+ 1,
+ vec![tcx.mk_mut_ptr(param(0)), param(0), param(0)],
+ tcx.intern_tup(&[param(0), tcx.types.bool]),
+ ),
+ "load" => (1, vec![tcx.mk_imm_ptr(param(0))], param(0)),
+ "store" => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit()),
+
+ "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax"
+ | "umin" => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], param(0)),
+ "fence" | "singlethreadfence" => (0, Vec::new(), tcx.mk_unit()),
+ op => {
+ tcx.sess.emit_err(UnrecognizedAtomicOperation { span: it.span, op });
+ return;
+ }
+ };
+ (n_tps, 0, inputs, output, hir::Unsafety::Unsafe)
+ } else {
+ let unsafety = intrinsic_operation_unsafety(intrinsic_name);
+ let (n_tps, inputs, output) = match intrinsic_name {
+ sym::abort => (0, Vec::new(), tcx.types.never),
+ sym::unreachable => (0, Vec::new(), tcx.types.never),
+ sym::breakpoint => (0, Vec::new(), tcx.mk_unit()),
+ sym::size_of | sym::pref_align_of | sym::min_align_of | sym::variant_count => {
+ (1, Vec::new(), tcx.types.usize)
+ }
+ sym::size_of_val | sym::min_align_of_val => {
+ (1, vec![tcx.mk_imm_ptr(param(0))], tcx.types.usize)
+ }
+ sym::rustc_peek => (1, vec![param(0)], param(0)),
+ sym::caller_location => (0, vec![], tcx.caller_location_ty()),
+ sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
+ (1, Vec::new(), tcx.mk_unit())
+ }
+ sym::forget => (1, vec![param(0)], tcx.mk_unit()),
+ sym::transmute => (2, vec![param(0)], param(1)),
+ sym::prefetch_read_data
+ | sym::prefetch_write_data
+ | sym::prefetch_read_instruction
+ | sym::prefetch_write_instruction => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.types.i32,
+ ],
+ tcx.mk_unit(),
+ ),
+ sym::drop_in_place => (1, vec![tcx.mk_mut_ptr(param(0))], tcx.mk_unit()),
+ sym::needs_drop => (1, Vec::new(), tcx.types.bool),
+
+ sym::type_name => (1, Vec::new(), tcx.mk_static_str()),
+ sym::type_id => (1, Vec::new(), tcx.types.u64),
+ sym::offset | sym::arith_offset => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.types.isize,
+ ],
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ ),
+ sym::copy | sym::copy_nonoverlapping => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
+ tcx.types.usize,
+ ],
+ tcx.mk_unit(),
+ ),
+ sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Not }),
+ tcx.types.usize,
+ ],
+ tcx.mk_unit(),
+ ),
+ sym::write_bytes | sym::volatile_set_memory => (
+ 1,
+ vec![
+ tcx.mk_ptr(ty::TypeAndMut { ty: param(0), mutbl: hir::Mutability::Mut }),
+ tcx.types.u8,
+ tcx.types.usize,
+ ],
+ tcx.mk_unit(),
+ ),
+ sym::sqrtf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::sqrtf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::powif32 => (0, vec![tcx.types.f32, tcx.types.i32], tcx.types.f32),
+ sym::powif64 => (0, vec![tcx.types.f64, tcx.types.i32], tcx.types.f64),
+ sym::sinf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::sinf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::cosf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::cosf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::powf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::powf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::expf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::expf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::exp2f32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::exp2f64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::logf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::logf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::log10f32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::log10f64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::log2f32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::log2f64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::fmaf32 => (0, vec![tcx.types.f32, tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::fmaf64 => (0, vec![tcx.types.f64, tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::fabsf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::fabsf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::minnumf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::minnumf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::maxnumf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::maxnumf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::copysignf32 => (0, vec![tcx.types.f32, tcx.types.f32], tcx.types.f32),
+ sym::copysignf64 => (0, vec![tcx.types.f64, tcx.types.f64], tcx.types.f64),
+ sym::floorf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::floorf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::ceilf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::ceilf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::truncf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::truncf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::rintf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::rintf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::nearbyintf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::nearbyintf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+ sym::roundf32 => (0, vec![tcx.types.f32], tcx.types.f32),
+ sym::roundf64 => (0, vec![tcx.types.f64], tcx.types.f64),
+
+ sym::volatile_load | sym::unaligned_volatile_load => {
+ (1, vec![tcx.mk_imm_ptr(param(0))], param(0))
+ }
+ sym::volatile_store | sym::unaligned_volatile_store => {
+ (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit())
+ }
+
+ sym::ctpop
+ | sym::ctlz
+ | sym::ctlz_nonzero
+ | sym::cttz
+ | sym::cttz_nonzero
+ | sym::bswap
+ | sym::bitreverse => (1, vec![param(0)], param(0)),
+
+ sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
+ (1, vec![param(0), param(0)], tcx.intern_tup(&[param(0), tcx.types.bool]))
+ }
+
+ sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
+ (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.bool)
+ }
+
+ sym::const_allocate => {
+ (0, vec![tcx.types.usize, tcx.types.usize], tcx.mk_mut_ptr(tcx.types.u8))
+ }
+ sym::const_deallocate => (
+ 0,
+ vec![tcx.mk_mut_ptr(tcx.types.u8), tcx.types.usize, tcx.types.usize],
+ tcx.mk_unit(),
+ ),
+
+ sym::ptr_offset_from => {
+ (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.isize)
+ }
+ sym::ptr_offset_from_unsigned => {
+ (1, vec![tcx.mk_imm_ptr(param(0)), tcx.mk_imm_ptr(param(0))], tcx.types.usize)
+ }
+ sym::unchecked_div | sym::unchecked_rem | sym::exact_div => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::unchecked_shl | sym::unchecked_shr | sym::rotate_left | sym::rotate_right => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::unchecked_add | sym::unchecked_sub | sym::unchecked_mul => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::wrapping_add | sym::wrapping_sub | sym::wrapping_mul => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::saturating_add | sym::saturating_sub => (1, vec![param(0), param(0)], param(0)),
+ sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
+ (1, vec![param(0), param(0)], param(0))
+ }
+ sym::float_to_int_unchecked => (2, vec![param(0)], param(1)),
+
+ sym::assume => (0, vec![tcx.types.bool], tcx.mk_unit()),
+ sym::likely => (0, vec![tcx.types.bool], tcx.types.bool),
+ sym::unlikely => (0, vec![tcx.types.bool], tcx.types.bool),
+
+ sym::discriminant_value => {
+ let assoc_items = tcx.associated_item_def_ids(
+ tcx.require_lang_item(hir::LangItem::DiscriminantKind, None),
+ );
+ let discriminant_def_id = assoc_items[0];
+
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) };
+ (
+ 1,
+ vec![
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)), param(0)),
+ ],
+ tcx.mk_projection(discriminant_def_id, tcx.mk_substs([param(0).into()].iter())),
+ )
+ }
+
+ kw::Try => {
+ let mut_u8 = tcx.mk_mut_ptr(tcx.types.u8);
+ let try_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
+ iter::once(mut_u8),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
+ let catch_fn_ty = ty::Binder::dummy(tcx.mk_fn_sig(
+ [mut_u8, mut_u8].iter().cloned(),
+ tcx.mk_unit(),
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ ));
+ (
+ 0,
+ vec![tcx.mk_fn_ptr(try_fn_ty), mut_u8, tcx.mk_fn_ptr(catch_fn_ty)],
+ tcx.types.i32,
+ )
+ }
+
+ sym::va_start | sym::va_end => match mk_va_list_ty(hir::Mutability::Mut) {
+ Some((va_list_ref_ty, _)) => (0, vec![va_list_ref_ty], tcx.mk_unit()),
+ None => bug!("`va_list` language item needed for C-variadic intrinsics"),
+ },
+
+ sym::va_copy => match mk_va_list_ty(hir::Mutability::Not) {
+ Some((va_list_ref_ty, va_list_ty)) => {
+ let va_list_ptr_ty = tcx.mk_mut_ptr(va_list_ty);
+ (0, vec![va_list_ptr_ty, va_list_ref_ty], tcx.mk_unit())
+ }
+ None => bug!("`va_list` language item needed for C-variadic intrinsics"),
+ },
+
+ sym::va_arg => match mk_va_list_ty(hir::Mutability::Mut) {
+ Some((va_list_ref_ty, _)) => (1, vec![va_list_ref_ty], param(0)),
+ None => bug!("`va_list` language item needed for C-variadic intrinsics"),
+ },
+
+ sym::nontemporal_store => (1, vec![tcx.mk_mut_ptr(param(0)), param(0)], tcx.mk_unit()),
+
+ sym::raw_eq => {
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind: ty::BrAnon(0) };
+ let param_ty =
+ tcx.mk_imm_ref(tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)), param(0));
+ (1, vec![param_ty; 2], tcx.types.bool)
+ }
+
+ sym::black_box => (1, vec![param(0)], param(0)),
+
+ sym::const_eval_select => (4, vec![param(0), param(1), param(2)], param(3)),
+
+ sym::vtable_size | sym::vtable_align => {
+ (0, vec![tcx.mk_imm_ptr(tcx.mk_unit())], tcx.types.usize)
+ }
+
+ other => {
+ tcx.sess.emit_err(UnrecognizedIntrinsicFunction { span: it.span, name: other });
+ return;
+ }
+ };
+ (n_tps, 0, inputs, output, unsafety)
+ };
+ let sig = tcx.mk_fn_sig(inputs.into_iter(), output, false, unsafety, Abi::RustIntrinsic);
+ let sig = ty::Binder::bind_with_vars(sig, bound_vars);
+ equate_intrinsic_type(tcx, it, n_tps, n_lts, sig)
+}
+
+/// Type-check `extern "platform-intrinsic" { ... }` functions.
+pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
+ let param = |n| {
+ let name = Symbol::intern(&format!("P{}", n));
+ tcx.mk_ty_param(n, name)
+ };
+
+ let name = it.ident.name;
+
+ let (n_tps, inputs, output) = match name {
+ sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
+ (2, vec![param(0), param(0)], param(1))
+ }
+ sym::simd_add
+ | sym::simd_sub
+ | sym::simd_mul
+ | sym::simd_rem
+ | sym::simd_div
+ | sym::simd_shl
+ | sym::simd_shr
+ | sym::simd_and
+ | sym::simd_or
+ | sym::simd_xor
+ | sym::simd_fmin
+ | sym::simd_fmax
+ | sym::simd_fpow
+ | sym::simd_saturating_add
+ | sym::simd_saturating_sub => (1, vec![param(0), param(0)], param(0)),
+ sym::simd_arith_offset => (2, vec![param(0), param(1)], param(0)),
+ sym::simd_neg
+ | sym::simd_fsqrt
+ | sym::simd_fsin
+ | sym::simd_fcos
+ | sym::simd_fexp
+ | sym::simd_fexp2
+ | sym::simd_flog2
+ | sym::simd_flog10
+ | sym::simd_flog
+ | sym::simd_fabs
+ | sym::simd_ceil
+ | sym::simd_floor
+ | sym::simd_round
+ | sym::simd_trunc => (1, vec![param(0)], param(0)),
+ sym::simd_fpowi => (1, vec![param(0), tcx.types.i32], param(0)),
+ sym::simd_fma => (1, vec![param(0), param(0), param(0)], param(0)),
+ sym::simd_gather => (3, vec![param(0), param(1), param(2)], param(0)),
+ sym::simd_scatter => (3, vec![param(0), param(1), param(2)], tcx.mk_unit()),
+ sym::simd_insert => (2, vec![param(0), tcx.types.u32, param(1)], param(0)),
+ sym::simd_extract => (2, vec![param(0), tcx.types.u32], param(1)),
+ sym::simd_cast | sym::simd_as => (2, vec![param(0)], param(1)),
+ sym::simd_bitmask => (2, vec![param(0)], param(1)),
+ sym::simd_select | sym::simd_select_bitmask => {
+ (2, vec![param(0), param(1), param(1)], param(1))
+ }
+ sym::simd_reduce_all | sym::simd_reduce_any => (1, vec![param(0)], tcx.types.bool),
+ sym::simd_reduce_add_ordered | sym::simd_reduce_mul_ordered => {
+ (2, vec![param(0), param(1)], param(1))
+ }
+ sym::simd_reduce_add_unordered
+ | sym::simd_reduce_mul_unordered
+ | sym::simd_reduce_and
+ | sym::simd_reduce_or
+ | sym::simd_reduce_xor
+ | sym::simd_reduce_min
+ | sym::simd_reduce_max
+ | sym::simd_reduce_min_nanless
+ | sym::simd_reduce_max_nanless => (2, vec![param(0)], param(1)),
+ sym::simd_shuffle => (3, vec![param(0), param(0), param(1)], param(2)),
+ name if name.as_str().starts_with("simd_shuffle") => {
+ match name.as_str()["simd_shuffle".len()..].parse() {
+ Ok(n) => {
+ let params = vec![param(0), param(0), tcx.mk_array(tcx.types.u32, n)];
+ (2, params, param(1))
+ }
+ Err(_) => {
+ let msg =
+ format!("unrecognized platform-specific intrinsic function: `{name}`");
+ tcx.sess.struct_span_err(it.span, &msg).emit();
+ return;
+ }
+ }
+ }
+ _ => {
+ let msg = format!("unrecognized platform-specific intrinsic function: `{name}`");
+ tcx.sess.struct_span_err(it.span, &msg).emit();
+ return;
+ }
+ };
+
+ let sig = tcx.mk_fn_sig(
+ inputs.into_iter(),
+ output,
+ false,
+ hir::Unsafety::Unsafe,
+ Abi::PlatformIntrinsic,
+ );
+ let sig = ty::Binder::dummy(sig);
+ equate_intrinsic_type(tcx, it, n_tps, 0, sig)
+}
diff --git a/compiler/rustc_typeck/src/check/intrinsicck.rs b/compiler/rustc_typeck/src/check/intrinsicck.rs
new file mode 100644
index 000000000..df94abbaf
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/intrinsicck.rs
@@ -0,0 +1,530 @@
+use rustc_ast::InlineAsmTemplatePiece;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_index::vec::Idx;
+use rustc_middle::ty::layout::{LayoutError, SizeSkeleton};
+use rustc_middle::ty::{self, Article, FloatTy, IntTy, Ty, TyCtxt, TypeVisitable, UintTy};
+use rustc_session::lint;
+use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_target::abi::{Pointer, VariantIdx};
+use rustc_target::asm::{InlineAsmReg, InlineAsmRegClass, InlineAsmRegOrRegClass, InlineAsmType};
+use rustc_trait_selection::infer::InferCtxtExt;
+
+use super::FnCtxt;
+
+/// If the type is `Option<T>`, it will return `T`, otherwise
+/// the type itself. Works on most `Option`-like types.
+fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let ty::Adt(def, substs) = *ty.kind() else { return ty };
+
+ if def.variants().len() == 2 && !def.repr().c() && def.repr().int.is_none() {
+ let data_idx;
+
+ let one = VariantIdx::new(1);
+ let zero = VariantIdx::new(0);
+
+ if def.variant(zero).fields.is_empty() {
+ data_idx = one;
+ } else if def.variant(one).fields.is_empty() {
+ data_idx = zero;
+ } else {
+ return ty;
+ }
+
+ if def.variant(data_idx).fields.len() == 1 {
+ return def.variant(data_idx).fields[0].ty(tcx, substs);
+ }
+ }
+
+ ty
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn check_transmute(&self, span: Span, from: Ty<'tcx>, to: Ty<'tcx>) {
+ let convert = |ty: Ty<'tcx>| {
+ let ty = self.resolve_vars_if_possible(ty);
+ let ty = self.tcx.normalize_erasing_regions(self.param_env, ty);
+ (SizeSkeleton::compute(ty, self.tcx, self.param_env), ty)
+ };
+ let (sk_from, from) = convert(from);
+ let (sk_to, to) = convert(to);
+
+ // Check for same size using the skeletons.
+ if let (Ok(sk_from), Ok(sk_to)) = (sk_from, sk_to) {
+ if sk_from.same_size(sk_to) {
+ return;
+ }
+
+ // Special-case transmuting from `typeof(function)` and
+ // `Option<typeof(function)>` to present a clearer error.
+ let from = unpack_option_like(self.tcx, from);
+ if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) && size_to == Pointer.size(&self.tcx) {
+ struct_span_err!(self.tcx.sess, span, E0591, "can't transmute zero-sized type")
+ .note(&format!("source type: {from}"))
+ .note(&format!("target type: {to}"))
+ .help("cast with `as` to a pointer instead")
+ .emit();
+ return;
+ }
+ }
+
+ // Try to display a sensible error with as much information as possible.
+ let skeleton_string = |ty: Ty<'tcx>, sk| match sk {
+ Ok(SizeSkeleton::Known(size)) => format!("{} bits", size.bits()),
+ Ok(SizeSkeleton::Pointer { tail, .. }) => format!("pointer to `{tail}`"),
+ Err(LayoutError::Unknown(bad)) => {
+ if bad == ty {
+ "this type does not have a fixed size".to_owned()
+ } else {
+ format!("size can vary because of {bad}")
+ }
+ }
+ Err(err) => err.to_string(),
+ };
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0512,
+ "cannot transmute between types of different sizes, \
+ or dependently-sized types"
+ );
+ if from == to {
+ err.note(&format!("`{from}` does not have a fixed size"));
+ } else {
+ err.note(&format!("source type: `{}` ({})", from, skeleton_string(from, sk_from)))
+ .note(&format!("target type: `{}` ({})", to, skeleton_string(to, sk_to)));
+ }
+ err.emit();
+ }
+
+ // FIXME(compiler-errors): This could use `<$ty as Pointee>::Metadata == ()`
+ fn is_thin_ptr_ty(&self, ty: Ty<'tcx>) -> bool {
+ // Type still may have region variables, but `Sized` does not depend
+ // on those, so just erase them before querying.
+ if self.tcx.erase_regions(ty).is_sized(self.tcx.at(DUMMY_SP), self.param_env) {
+ return true;
+ }
+ if let ty::Foreign(..) = ty.kind() {
+ return true;
+ }
+ false
+ }
+}
+
+pub struct InlineAsmCtxt<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ fcx: Option<&'a FnCtxt<'a, 'tcx>>,
+}
+
+impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> {
+ pub fn new_global_asm(tcx: TyCtxt<'tcx>) -> Self {
+ InlineAsmCtxt { tcx, fcx: None }
+ }
+
+ pub fn new_in_fn(fcx: &'a FnCtxt<'a, 'tcx>) -> Self {
+ InlineAsmCtxt { tcx: fcx.tcx, fcx: Some(fcx) }
+ }
+
+ fn check_asm_operand_type(
+ &self,
+ idx: usize,
+ reg: InlineAsmRegOrRegClass,
+ expr: &hir::Expr<'tcx>,
+ template: &[InlineAsmTemplatePiece],
+ is_input: bool,
+ tied_input: Option<(&hir::Expr<'tcx>, Option<InlineAsmType>)>,
+ target_features: &FxHashSet<Symbol>,
+ ) -> Option<InlineAsmType> {
+ let fcx = self.fcx.unwrap_or_else(|| span_bug!(expr.span, "asm operand for global asm"));
+ // Check the type against the allowed types for inline asm.
+ let ty = fcx.typeck_results.borrow().expr_ty_adjusted(expr);
+ let ty = fcx.resolve_vars_if_possible(ty);
+ let asm_ty_isize = match self.tcx.sess.target.pointer_width {
+ 16 => InlineAsmType::I16,
+ 32 => InlineAsmType::I32,
+ 64 => InlineAsmType::I64,
+ _ => unreachable!(),
+ };
+
+ // Expect types to be fully resolved, no const or type variables.
+ if ty.has_infer_types_or_consts() {
+ assert!(fcx.is_tainted_by_errors());
+ return None;
+ }
+
+ let asm_ty = match *ty.kind() {
+ // `!` is allowed for input but not for output (issue #87802)
+ ty::Never if is_input => return None,
+ ty::Error(_) => return None,
+ ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => Some(InlineAsmType::I8),
+ ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => Some(InlineAsmType::I16),
+ ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => Some(InlineAsmType::I32),
+ ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => Some(InlineAsmType::I64),
+ ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => Some(InlineAsmType::I128),
+ ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => Some(asm_ty_isize),
+ ty::Float(FloatTy::F32) => Some(InlineAsmType::F32),
+ ty::Float(FloatTy::F64) => Some(InlineAsmType::F64),
+ ty::FnPtr(_) => Some(asm_ty_isize),
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl: _ }) if fcx.is_thin_ptr_ty(ty) => {
+ Some(asm_ty_isize)
+ }
+ ty::Adt(adt, substs) if adt.repr().simd() => {
+ let fields = &adt.non_enum_variant().fields;
+ let elem_ty = fields[0].ty(self.tcx, substs);
+ match elem_ty.kind() {
+ ty::Never | ty::Error(_) => return None,
+ ty::Int(IntTy::I8) | ty::Uint(UintTy::U8) => {
+ Some(InlineAsmType::VecI8(fields.len() as u64))
+ }
+ ty::Int(IntTy::I16) | ty::Uint(UintTy::U16) => {
+ Some(InlineAsmType::VecI16(fields.len() as u64))
+ }
+ ty::Int(IntTy::I32) | ty::Uint(UintTy::U32) => {
+ Some(InlineAsmType::VecI32(fields.len() as u64))
+ }
+ ty::Int(IntTy::I64) | ty::Uint(UintTy::U64) => {
+ Some(InlineAsmType::VecI64(fields.len() as u64))
+ }
+ ty::Int(IntTy::I128) | ty::Uint(UintTy::U128) => {
+ Some(InlineAsmType::VecI128(fields.len() as u64))
+ }
+ ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => {
+ Some(match self.tcx.sess.target.pointer_width {
+ 16 => InlineAsmType::VecI16(fields.len() as u64),
+ 32 => InlineAsmType::VecI32(fields.len() as u64),
+ 64 => InlineAsmType::VecI64(fields.len() as u64),
+ _ => unreachable!(),
+ })
+ }
+ ty::Float(FloatTy::F32) => Some(InlineAsmType::VecF32(fields.len() as u64)),
+ ty::Float(FloatTy::F64) => Some(InlineAsmType::VecF64(fields.len() as u64)),
+ _ => None,
+ }
+ }
+ ty::Infer(_) => unreachable!(),
+ _ => None,
+ };
+ let Some(asm_ty) = asm_ty else {
+ let msg = &format!("cannot use value of type `{ty}` for inline assembly");
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(
+ "only integers, floats, SIMD vectors, pointers and function pointers \
+ can be used as arguments for inline assembly",
+ );
+ err.emit();
+ return None;
+ };
+
+ // Check that the type implements Copy. The only case where this can
+ // possibly fail is for SIMD types which don't #[derive(Copy)].
+ if !fcx.infcx.type_is_copy_modulo_regions(fcx.param_env, ty, DUMMY_SP) {
+ let msg = "arguments for inline assembly must be copyable";
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(&format!("`{ty}` does not implement the Copy trait"));
+ err.emit();
+ }
+
+ // Ideally we wouldn't need to do this, but LLVM's register allocator
+ // really doesn't like it when tied operands have different types.
+ //
+ // This is purely an LLVM limitation, but we have to live with it since
+ // there is no way to hide this with implicit conversions.
+ //
+ // For the purposes of this check we only look at the `InlineAsmType`,
+ // which means that pointers and integers are treated as identical (modulo
+ // size).
+ if let Some((in_expr, Some(in_asm_ty))) = tied_input {
+ if in_asm_ty != asm_ty {
+ let msg = "incompatible types for asm inout argument";
+ let mut err = self.tcx.sess.struct_span_err(vec![in_expr.span, expr.span], msg);
+
+ let in_expr_ty = fcx.typeck_results.borrow().expr_ty_adjusted(in_expr);
+ let in_expr_ty = fcx.resolve_vars_if_possible(in_expr_ty);
+ err.span_label(in_expr.span, &format!("type `{in_expr_ty}`"));
+ err.span_label(expr.span, &format!("type `{ty}`"));
+ err.note(
+ "asm inout arguments must have the same type, \
+ unless they are both pointers or integers of the same size",
+ );
+ err.emit();
+ }
+
+ // All of the later checks have already been done on the input, so
+ // let's not emit errors and warnings twice.
+ return Some(asm_ty);
+ }
+
+ // Check the type against the list of types supported by the selected
+ // register class.
+ let asm_arch = self.tcx.sess.asm_arch.unwrap();
+ let reg_class = reg.reg_class();
+ let supported_tys = reg_class.supported_types(asm_arch);
+ let Some((_, feature)) = supported_tys.iter().find(|&&(t, _)| t == asm_ty) else {
+ let msg = &format!("type `{ty}` cannot be used with this register class");
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ let supported_tys: Vec<_> =
+ supported_tys.iter().map(|(t, _)| t.to_string()).collect();
+ err.note(&format!(
+ "register class `{}` supports these types: {}",
+ reg_class.name(),
+ supported_tys.join(", "),
+ ));
+ if let Some(suggest) = reg_class.suggest_class(asm_arch, asm_ty) {
+ err.help(&format!(
+ "consider using the `{}` register class instead",
+ suggest.name()
+ ));
+ }
+ err.emit();
+ return Some(asm_ty);
+ };
+
+ // Check whether the selected type requires a target feature. Note that
+ // this is different from the feature check we did earlier. While the
+ // previous check checked that this register class is usable at all
+ // with the currently enabled features, some types may only be usable
+ // with a register class when a certain feature is enabled. We check
+ // this here since it depends on the results of typeck.
+ //
+ // Also note that this check isn't run when the operand type is never
+ // (!). In that case we still need the earlier check to verify that the
+ // register class is usable at all.
+ if let Some(feature) = feature {
+ if !target_features.contains(&feature) {
+ let msg = &format!("`{}` target feature is not enabled", feature);
+ let mut err = self.tcx.sess.struct_span_err(expr.span, msg);
+ err.note(&format!(
+ "this is required to use type `{}` with register class `{}`",
+ ty,
+ reg_class.name(),
+ ));
+ err.emit();
+ return Some(asm_ty);
+ }
+ }
+
+ // Check whether a modifier is suggested for using this type.
+ if let Some((suggested_modifier, suggested_result)) =
+ reg_class.suggest_modifier(asm_arch, asm_ty)
+ {
+ // Search for any use of this operand without a modifier and emit
+ // the suggestion for them.
+ let mut spans = vec![];
+ for piece in template {
+ if let &InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span } = piece
+ {
+ if operand_idx == idx && modifier.is_none() {
+ spans.push(span);
+ }
+ }
+ }
+ if !spans.is_empty() {
+ let (default_modifier, default_result) =
+ reg_class.default_modifier(asm_arch).unwrap();
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::ASM_SUB_REGISTER,
+ expr.hir_id,
+ spans,
+ |lint| {
+ let msg = "formatting may not be suitable for sub-register argument";
+ let mut err = lint.build(msg);
+ err.span_label(expr.span, "for this argument");
+ err.help(&format!(
+ "use the `{suggested_modifier}` modifier to have the register formatted as `{suggested_result}`",
+ ));
+ err.help(&format!(
+ "or use the `{default_modifier}` modifier to keep the default formatting of `{default_result}`",
+ ));
+ err.emit();
+ },
+ );
+ }
+ }
+
+ Some(asm_ty)
+ }
+
+ pub fn check_asm(&self, asm: &hir::InlineAsm<'tcx>, enclosing_id: hir::HirId) {
+ let hir = self.tcx.hir();
+ let enclosing_def_id = hir.local_def_id(enclosing_id).to_def_id();
+ let target_features = self.tcx.asm_target_features(enclosing_def_id);
+ let Some(asm_arch) = self.tcx.sess.asm_arch else {
+ self.tcx.sess.delay_span_bug(DUMMY_SP, "target architecture does not support asm");
+ return;
+ };
+ for (idx, (op, op_sp)) in asm.operands.iter().enumerate() {
+ // Validate register classes against currently enabled target
+ // features. We check that at least one type is available for
+ // the enabled features.
+ //
+ // We ignore target feature requirements for clobbers: if the
+ // feature is disabled then the compiler doesn't care what we
+ // do with the registers.
+ //
+ // Note that this is only possible for explicit register
+ // operands, which cannot be used in the asm string.
+ if let Some(reg) = op.reg() {
+ // Some explicit registers cannot be used depending on the
+ // target. Reject those here.
+ if let InlineAsmRegOrRegClass::Reg(reg) = reg {
+ if let InlineAsmReg::Err = reg {
+ // `validate` will panic on `Err`, as an error must
+ // already have been reported.
+ continue;
+ }
+ if let Err(msg) = reg.validate(
+ asm_arch,
+ self.tcx.sess.relocation_model(),
+ &target_features,
+ &self.tcx.sess.target,
+ op.is_clobber(),
+ ) {
+ let msg = format!("cannot use register `{}`: {}", reg.name(), msg);
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ continue;
+ }
+ }
+
+ if !op.is_clobber() {
+ let mut missing_required_features = vec![];
+ let reg_class = reg.reg_class();
+ if let InlineAsmRegClass::Err = reg_class {
+ continue;
+ }
+ for &(_, feature) in reg_class.supported_types(asm_arch) {
+ match feature {
+ Some(feature) => {
+ if target_features.contains(&feature) {
+ missing_required_features.clear();
+ break;
+ } else {
+ missing_required_features.push(feature);
+ }
+ }
+ None => {
+ missing_required_features.clear();
+ break;
+ }
+ }
+ }
+
+ // We are sorting primitive strs here and can use unstable sort here
+ missing_required_features.sort_unstable();
+ missing_required_features.dedup();
+ match &missing_required_features[..] {
+ [] => {}
+ [feature] => {
+ let msg = format!(
+ "register class `{}` requires the `{}` target feature",
+ reg_class.name(),
+ feature
+ );
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ // register isn't enabled, don't do more checks
+ continue;
+ }
+ features => {
+ let msg = format!(
+ "register class `{}` requires at least one of the following target features: {}",
+ reg_class.name(),
+ features
+ .iter()
+ .map(|f| f.as_str())
+ .intersperse(", ")
+ .collect::<String>(),
+ );
+ self.tcx.sess.struct_span_err(*op_sp, &msg).emit();
+ // register isn't enabled, don't do more checks
+ continue;
+ }
+ }
+ }
+ }
+
+ match *op {
+ hir::InlineAsmOperand::In { reg, ref expr } => {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ true,
+ None,
+ &target_features,
+ );
+ }
+ hir::InlineAsmOperand::Out { reg, late: _, ref expr } => {
+ if let Some(expr) = expr {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ false,
+ None,
+ &target_features,
+ );
+ }
+ }
+ hir::InlineAsmOperand::InOut { reg, late: _, ref expr } => {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ expr,
+ asm.template,
+ false,
+ None,
+ &target_features,
+ );
+ }
+ hir::InlineAsmOperand::SplitInOut { reg, late: _, ref in_expr, ref out_expr } => {
+ let in_ty = self.check_asm_operand_type(
+ idx,
+ reg,
+ in_expr,
+ asm.template,
+ true,
+ None,
+ &target_features,
+ );
+ if let Some(out_expr) = out_expr {
+ self.check_asm_operand_type(
+ idx,
+ reg,
+ out_expr,
+ asm.template,
+ false,
+ Some((in_expr, in_ty)),
+ &target_features,
+ );
+ }
+ }
+ // No special checking is needed for these:
+ // - Typeck has checked that Const operands are integers.
+ // - AST lowering guarantees that SymStatic points to a static.
+ hir::InlineAsmOperand::Const { .. } | hir::InlineAsmOperand::SymStatic { .. } => {}
+ // Check that sym actually points to a function. Later passes
+ // depend on this.
+ hir::InlineAsmOperand::SymFn { anon_const } => {
+ let ty = self.tcx.typeck_body(anon_const.body).node_type(anon_const.hir_id);
+ match ty.kind() {
+ ty::Never | ty::Error(_) => {}
+ ty::FnDef(..) => {}
+ _ => {
+ let mut err =
+ self.tcx.sess.struct_span_err(*op_sp, "invalid `sym` operand");
+ err.span_label(
+ self.tcx.hir().span(anon_const.body.hir_id),
+ &format!("is {} `{}`", ty.kind().article(), ty),
+ );
+ err.help("`sym` operands must refer to either a function or a static");
+ err.emit();
+ }
+ };
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/method/confirm.rs b/compiler/rustc_typeck/src/check/method/confirm.rs
new file mode 100644
index 000000000..2c89b63ae
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/method/confirm.rs
@@ -0,0 +1,582 @@
+use super::{probe, MethodCallee};
+
+use crate::astconv::{AstConv, CreateSubstsForGenericArgsCtxt, IsMethodCall};
+use crate::check::{callee, FnCtxt};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::GenericArg;
+use rustc_infer::infer::{self, InferOk};
+use rustc_middle::traits::{ObligationCauseCode, UnifyReceiverContext};
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast};
+use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::subst::{self, Subst, SubstsRef};
+use rustc_middle::ty::{self, GenericParamDefKind, Ty};
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+
+use std::iter;
+use std::ops::Deref;
+
+struct ConfirmContext<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+}
+
+impl<'a, 'tcx> Deref for ConfirmContext<'a, 'tcx> {
+ type Target = FnCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ self.fcx
+ }
+}
+
+#[derive(Debug)]
+pub struct ConfirmResult<'tcx> {
+ pub callee: MethodCallee<'tcx>,
+ pub illegal_sized_bound: Option<Span>,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn confirm_method(
+ &self,
+ span: Span,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ unadjusted_self_ty: Ty<'tcx>,
+ pick: probe::Pick<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ ) -> ConfirmResult<'tcx> {
+ debug!(
+ "confirm(unadjusted_self_ty={:?}, pick={:?}, generic_args={:?})",
+ unadjusted_self_ty, pick, segment.args,
+ );
+
+ let mut confirm_cx = ConfirmContext::new(self, span, self_expr, call_expr);
+ confirm_cx.confirm(unadjusted_self_ty, pick, segment)
+ }
+}
+
+impl<'a, 'tcx> ConfirmContext<'a, 'tcx> {
+ fn new(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ ) -> ConfirmContext<'a, 'tcx> {
+ ConfirmContext { fcx, span, self_expr, call_expr }
+ }
+
+ fn confirm(
+ &mut self,
+ unadjusted_self_ty: Ty<'tcx>,
+ pick: probe::Pick<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ ) -> ConfirmResult<'tcx> {
+ // Adjust the self expression the user provided and obtain the adjusted type.
+ let self_ty = self.adjust_self_ty(unadjusted_self_ty, &pick);
+
+ // Create substitutions for the method's type parameters.
+ let rcvr_substs = self.fresh_receiver_substs(self_ty, &pick);
+ let all_substs = self.instantiate_method_substs(&pick, segment, rcvr_substs);
+
+ debug!("rcvr_substs={rcvr_substs:?}, all_substs={all_substs:?}");
+
+ // Create the final signature for the method, replacing late-bound regions.
+ let (method_sig, method_predicates) = self.instantiate_method_sig(&pick, all_substs);
+
+ // If there is a `Self: Sized` bound and `Self` is a trait object, it is possible that
+ // something which derefs to `Self` actually implements the trait and the caller
+ // wanted to make a static dispatch on it but forgot to import the trait.
+ // See test `src/test/ui/issue-35976.rs`.
+ //
+ // In that case, we'll error anyway, but we'll also re-run the search with all traits
+ // in scope, and if we find another method which can be used, we'll output an
+ // appropriate hint suggesting to import the trait.
+ let filler_substs = rcvr_substs
+ .extend_to(self.tcx, pick.item.def_id, |def, _| self.tcx.mk_param_from_def(def));
+ let illegal_sized_bound = self.predicates_require_illegal_sized_bound(
+ &self.tcx.predicates_of(pick.item.def_id).instantiate(self.tcx, filler_substs),
+ );
+
+ // Unify the (adjusted) self type with what the method expects.
+ //
+ // SUBTLE: if we want good error messages, because of "guessing" while matching
+ // traits, no trait system method can be called before this point because they
+ // could alter our Self-type, except for normalizing the receiver from the
+ // signature (which is also done during probing).
+ let method_sig_rcvr = self.normalize_associated_types_in(self.span, method_sig.inputs()[0]);
+ debug!(
+ "confirm: self_ty={:?} method_sig_rcvr={:?} method_sig={:?} method_predicates={:?}",
+ self_ty, method_sig_rcvr, method_sig, method_predicates
+ );
+ self.unify_receivers(self_ty, method_sig_rcvr, &pick, all_substs);
+
+ let (method_sig, method_predicates) =
+ self.normalize_associated_types_in(self.span, (method_sig, method_predicates));
+ let method_sig = ty::Binder::dummy(method_sig);
+
+ // Make sure nobody calls `drop()` explicitly.
+ self.enforce_illegal_method_limitations(&pick);
+
+ // Add any trait/regions obligations specified on the method's type parameters.
+ // We won't add these if we encountered an illegal sized bound, so that we can use
+ // a custom error in that case.
+ if illegal_sized_bound.is_none() {
+ self.add_obligations(
+ self.tcx.mk_fn_ptr(method_sig),
+ all_substs,
+ method_predicates,
+ pick.item.def_id,
+ );
+ }
+
+ // Create the final `MethodCallee`.
+ let callee = MethodCallee {
+ def_id: pick.item.def_id,
+ substs: all_substs,
+ sig: method_sig.skip_binder(),
+ };
+ ConfirmResult { callee, illegal_sized_bound }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // ADJUSTMENTS
+
+ fn adjust_self_ty(
+ &mut self,
+ unadjusted_self_ty: Ty<'tcx>,
+ pick: &probe::Pick<'tcx>,
+ ) -> Ty<'tcx> {
+ // Commit the autoderefs by calling `autoderef` again, but this
+ // time writing the results into the various typeck results.
+ let mut autoderef =
+ self.autoderef_overloaded_span(self.span, unadjusted_self_ty, self.call_expr.span);
+ let Some((ty, n)) = autoderef.nth(pick.autoderefs) else {
+ return self.tcx.ty_error_with_message(
+ rustc_span::DUMMY_SP,
+ &format!("failed autoderef {}", pick.autoderefs),
+ );
+ };
+ assert_eq!(n, pick.autoderefs);
+
+ let mut adjustments = self.adjust_steps(&autoderef);
+ let mut target = self.structurally_resolved_type(autoderef.span(), ty);
+
+ match pick.autoref_or_ptr_adjustment {
+ Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl, unsize }) => {
+ let region = self.next_region_var(infer::Autoref(self.span));
+ // Type we're wrapping in a reference, used later for unsizing
+ let base_ty = target;
+
+ target = self.tcx.mk_ref(region, ty::TypeAndMut { mutbl, ty: target });
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Method call receivers are the primary use case
+ // for two-phase borrows.
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ adjustments.push(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
+ target,
+ });
+
+ if unsize {
+ let unsized_ty = if let ty::Array(elem_ty, _) = base_ty.kind() {
+ self.tcx.mk_slice(*elem_ty)
+ } else {
+ bug!(
+ "AutorefOrPtrAdjustment's unsize flag should only be set for array ty, found {}",
+ base_ty
+ )
+ };
+ target = self
+ .tcx
+ .mk_ref(region, ty::TypeAndMut { mutbl: mutbl.into(), ty: unsized_ty });
+ adjustments
+ .push(Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), target });
+ }
+ }
+ Some(probe::AutorefOrPtrAdjustment::ToConstPtr) => {
+ target = match target.kind() {
+ &ty::RawPtr(ty::TypeAndMut { ty, mutbl }) => {
+ assert_eq!(mutbl, hir::Mutability::Mut);
+ self.tcx.mk_ptr(ty::TypeAndMut { mutbl: hir::Mutability::Not, ty })
+ }
+ other => panic!("Cannot adjust receiver type {:?} to const ptr", other),
+ };
+
+ adjustments.push(Adjustment {
+ kind: Adjust::Pointer(PointerCast::MutToConstPointer),
+ target,
+ });
+ }
+ None => {}
+ }
+
+ self.register_predicates(autoderef.into_obligations());
+
+ // Write out the final adjustments.
+ self.apply_adjustments(self.self_expr, adjustments);
+
+ target
+ }
+
+ /// Returns a set of substitutions for the method *receiver* where all type and region
+ /// parameters are instantiated with fresh variables. This substitution does not include any
+ /// parameters declared on the method itself.
+ ///
+ /// Note that this substitution may include late-bound regions from the impl level. If so,
+ /// these are instantiated later in the `instantiate_method_sig` routine.
+ fn fresh_receiver_substs(
+ &mut self,
+ self_ty: Ty<'tcx>,
+ pick: &probe::Pick<'tcx>,
+ ) -> SubstsRef<'tcx> {
+ match pick.kind {
+ probe::InherentImplPick => {
+ let impl_def_id = pick.item.container_id(self.tcx);
+ assert!(
+ self.tcx.impl_trait_ref(impl_def_id).is_none(),
+ "impl {:?} is not an inherent impl",
+ impl_def_id
+ );
+ self.fresh_substs_for_item(self.span, impl_def_id)
+ }
+
+ probe::ObjectPick => {
+ let trait_def_id = pick.item.container_id(self.tcx);
+ self.extract_existential_trait_ref(self_ty, |this, object_ty, principal| {
+ // The object data has no entry for the Self
+ // Type. For the purposes of this method call, we
+ // substitute the object type itself. This
+ // wouldn't be a sound substitution in all cases,
+ // since each instance of the object type is a
+ // different existential and hence could match
+ // distinct types (e.g., if `Self` appeared as an
+ // argument type), but those cases have already
+ // been ruled out when we deemed the trait to be
+ // "object safe".
+ let original_poly_trait_ref = principal.with_self_ty(this.tcx, object_ty);
+ let upcast_poly_trait_ref = this.upcast(original_poly_trait_ref, trait_def_id);
+ let upcast_trait_ref =
+ this.replace_bound_vars_with_fresh_vars(upcast_poly_trait_ref);
+ debug!(
+ "original_poly_trait_ref={:?} upcast_trait_ref={:?} target_trait={:?}",
+ original_poly_trait_ref, upcast_trait_ref, trait_def_id
+ );
+ upcast_trait_ref.substs
+ })
+ }
+
+ probe::TraitPick => {
+ let trait_def_id = pick.item.container_id(self.tcx);
+
+ // Make a trait reference `$0 : Trait<$1...$n>`
+ // consisting entirely of type variables. Later on in
+ // the process we will unify the transformed-self-type
+ // of the method with the actual type in order to
+ // unify some of these variables.
+ self.fresh_substs_for_item(self.span, trait_def_id)
+ }
+
+ probe::WhereClausePick(poly_trait_ref) => {
+ // Where clauses can have bound regions in them. We need to instantiate
+ // those to convert from a poly-trait-ref to a trait-ref.
+ self.replace_bound_vars_with_fresh_vars(poly_trait_ref).substs
+ }
+ }
+ }
+
+ fn extract_existential_trait_ref<R, F>(&mut self, self_ty: Ty<'tcx>, mut closure: F) -> R
+ where
+ F: FnMut(&mut ConfirmContext<'a, 'tcx>, Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>) -> R,
+ {
+ // If we specified that this is an object method, then the
+ // self-type ought to be something that can be dereferenced to
+ // yield an object-type (e.g., `&Object` or `Box<Object>`
+ // etc).
+
+ // FIXME: this feels, like, super dubious
+ self.fcx
+ .autoderef(self.span, self_ty)
+ .include_raw_pointers()
+ .find_map(|(ty, _)| match ty.kind() {
+ ty::Dynamic(data, ..) => Some(closure(
+ self,
+ ty,
+ data.principal().unwrap_or_else(|| {
+ span_bug!(self.span, "calling trait method on empty object?")
+ }),
+ )),
+ _ => None,
+ })
+ .unwrap_or_else(|| {
+ span_bug!(
+ self.span,
+ "self-type `{}` for ObjectPick never dereferenced to an object",
+ self_ty
+ )
+ })
+ }
+
+ fn instantiate_method_substs(
+ &mut self,
+ pick: &probe::Pick<'tcx>,
+ seg: &hir::PathSegment<'_>,
+ parent_substs: SubstsRef<'tcx>,
+ ) -> SubstsRef<'tcx> {
+ // Determine the values for the generic parameters of the method.
+ // If they were not explicitly supplied, just construct fresh
+ // variables.
+ let generics = self.tcx.generics_of(pick.item.def_id);
+
+ let arg_count_correct = <dyn AstConv<'_>>::check_generic_arg_count_for_call(
+ self.tcx,
+ self.span,
+ pick.item.def_id,
+ generics,
+ seg,
+ IsMethodCall::Yes,
+ );
+
+ // Create subst for early-bound lifetime parameters, combining
+ // parameters from the type and those from the method.
+ assert_eq!(generics.parent_count, parent_substs.len());
+
+ struct MethodSubstsCtxt<'a, 'tcx> {
+ cfcx: &'a ConfirmContext<'a, 'tcx>,
+ pick: &'a probe::Pick<'tcx>,
+ seg: &'a hir::PathSegment<'a>,
+ }
+ impl<'a, 'tcx> CreateSubstsForGenericArgsCtxt<'a, 'tcx> for MethodSubstsCtxt<'a, 'tcx> {
+ fn args_for_def_id(
+ &mut self,
+ def_id: DefId,
+ ) -> (Option<&'a hir::GenericArgs<'a>>, bool) {
+ if def_id == self.pick.item.def_id {
+ if let Some(data) = self.seg.args {
+ return (Some(data), false);
+ }
+ }
+ (None, false)
+ }
+
+ fn provided_kind(
+ &mut self,
+ param: &ty::GenericParamDef,
+ arg: &GenericArg<'_>,
+ ) -> subst::GenericArg<'tcx> {
+ match (&param.kind, arg) {
+ (GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
+ <dyn AstConv<'_>>::ast_region_to_region(self.cfcx.fcx, lt, Some(param))
+ .into()
+ }
+ (GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
+ self.cfcx.to_ty(ty).into()
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => {
+ self.cfcx.const_arg_to_const(&ct.value, param.def_id).into()
+ }
+ (GenericParamDefKind::Type { .. }, GenericArg::Infer(inf)) => {
+ self.cfcx.ty_infer(Some(param), inf.span).into()
+ }
+ (GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => {
+ let tcx = self.cfcx.tcx();
+ self.cfcx.ct_infer(tcx.type_of(param.def_id), Some(param), inf.span).into()
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn inferred_kind(
+ &mut self,
+ _substs: Option<&[subst::GenericArg<'tcx>]>,
+ param: &ty::GenericParamDef,
+ _infer_args: bool,
+ ) -> subst::GenericArg<'tcx> {
+ self.cfcx.var_for_def(self.cfcx.span, param)
+ }
+ }
+ <dyn AstConv<'_>>::create_substs_for_generic_args(
+ self.tcx,
+ pick.item.def_id,
+ parent_substs,
+ false,
+ None,
+ &arg_count_correct,
+ &mut MethodSubstsCtxt { cfcx: self, pick, seg },
+ )
+ }
+
+ fn unify_receivers(
+ &mut self,
+ self_ty: Ty<'tcx>,
+ method_self_ty: Ty<'tcx>,
+ pick: &probe::Pick<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) {
+ debug!(
+ "unify_receivers: self_ty={:?} method_self_ty={:?} span={:?} pick={:?}",
+ self_ty, method_self_ty, self.span, pick
+ );
+ let cause = self.cause(
+ self.span,
+ ObligationCauseCode::UnifyReceiver(Box::new(UnifyReceiverContext {
+ assoc_item: pick.item,
+ param_env: self.param_env,
+ substs,
+ })),
+ );
+ match self.at(&cause, self.param_env).sup(method_self_ty, self_ty) {
+ Ok(InferOk { obligations, value: () }) => {
+ self.register_predicates(obligations);
+ }
+ Err(_) => {
+ span_bug!(
+ self.span,
+ "{} was a subtype of {} but now is not?",
+ self_ty,
+ method_self_ty
+ );
+ }
+ }
+ }
+
+ // NOTE: this returns the *unnormalized* predicates and method sig. Because of
+ // inference guessing, the predicates and method signature can't be normalized
+ // until we unify the `Self` type.
+ fn instantiate_method_sig(
+ &mut self,
+ pick: &probe::Pick<'tcx>,
+ all_substs: SubstsRef<'tcx>,
+ ) -> (ty::FnSig<'tcx>, ty::InstantiatedPredicates<'tcx>) {
+ debug!("instantiate_method_sig(pick={:?}, all_substs={:?})", pick, all_substs);
+
+ // Instantiate the bounds on the method with the
+ // type/early-bound-regions substitutions performed. There can
+ // be no late-bound regions appearing here.
+ let def_id = pick.item.def_id;
+ let method_predicates = self.tcx.predicates_of(def_id).instantiate(self.tcx, all_substs);
+
+ debug!("method_predicates after subst = {:?}", method_predicates);
+
+ let sig = self.tcx.bound_fn_sig(def_id);
+
+ let sig = sig.subst(self.tcx, all_substs);
+ debug!("type scheme substituted, sig={:?}", sig);
+
+ let sig = self.replace_bound_vars_with_fresh_vars(sig);
+ debug!("late-bound lifetimes from method instantiated, sig={:?}", sig);
+
+ (sig, method_predicates)
+ }
+
+ fn add_obligations(
+ &mut self,
+ fty: Ty<'tcx>,
+ all_substs: SubstsRef<'tcx>,
+ method_predicates: ty::InstantiatedPredicates<'tcx>,
+ def_id: DefId,
+ ) {
+ debug!(
+ "add_obligations: fty={:?} all_substs={:?} method_predicates={:?} def_id={:?}",
+ fty, all_substs, method_predicates, def_id
+ );
+
+ // FIXME: could replace with the following, but we already calculated `method_predicates`,
+ // so we just call `predicates_for_generics` directly to avoid redoing work.
+ // `self.add_required_obligations(self.span, def_id, &all_substs);`
+ for obligation in traits::predicates_for_generics(
+ traits::ObligationCause::new(self.span, self.body_id, traits::ItemObligation(def_id)),
+ self.param_env,
+ method_predicates,
+ ) {
+ self.register_predicate(obligation);
+ }
+
+ // this is a projection from a trait reference, so we have to
+ // make sure that the trait reference inputs are well-formed.
+ self.add_wf_bounds(all_substs, self.call_expr);
+
+ // the function type must also be well-formed (this is not
+ // implied by the substs being well-formed because of inherent
+ // impls and late-bound regions - see issue #28609).
+ self.register_wf_obligation(fty.into(), self.span, traits::WellFormed(None));
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // MISCELLANY
+
+ fn predicates_require_illegal_sized_bound(
+ &self,
+ predicates: &ty::InstantiatedPredicates<'tcx>,
+ ) -> Option<Span> {
+ let sized_def_id = self.tcx.lang_items().sized_trait()?;
+
+ traits::elaborate_predicates(self.tcx, predicates.predicates.iter().copied())
+ // We don't care about regions here.
+ .filter_map(|obligation| match obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(trait_pred) if trait_pred.def_id() == sized_def_id => {
+ let span = iter::zip(&predicates.predicates, &predicates.spans)
+ .find_map(
+ |(p, span)| {
+ if *p == obligation.predicate { Some(*span) } else { None }
+ },
+ )
+ .unwrap_or(rustc_span::DUMMY_SP);
+ Some((trait_pred, span))
+ }
+ _ => None,
+ })
+ .find_map(|(trait_pred, span)| match trait_pred.self_ty().kind() {
+ ty::Dynamic(..) => Some(span),
+ _ => None,
+ })
+ }
+
+ fn enforce_illegal_method_limitations(&self, pick: &probe::Pick<'_>) {
+ // Disallow calls to the method `drop` defined in the `Drop` trait.
+ if let Some(trait_def_id) = pick.item.trait_container(self.tcx) {
+ callee::check_legal_trait_for_method_call(
+ self.tcx,
+ self.span,
+ Some(self.self_expr.span),
+ self.call_expr.span,
+ trait_def_id,
+ )
+ }
+ }
+
+ fn upcast(
+ &mut self,
+ source_trait_ref: ty::PolyTraitRef<'tcx>,
+ target_trait_def_id: DefId,
+ ) -> ty::PolyTraitRef<'tcx> {
+ let upcast_trait_refs =
+ traits::upcast_choices(self.tcx, source_trait_ref, target_trait_def_id);
+
+ // must be exactly one trait ref or we'd get an ambig error etc
+ if upcast_trait_refs.len() != 1 {
+ span_bug!(
+ self.span,
+ "cannot uniquely upcast `{:?}` to `{:?}`: `{:?}`",
+ source_trait_ref,
+ target_trait_def_id,
+ upcast_trait_refs
+ );
+ }
+
+ upcast_trait_refs.into_iter().next().unwrap()
+ }
+
+ fn replace_bound_vars_with_fresh_vars<T>(&self, value: ty::Binder<'tcx, T>) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ self.fcx.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, value)
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/method/mod.rs b/compiler/rustc_typeck/src/check/method/mod.rs
new file mode 100644
index 000000000..0e678c41f
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/method/mod.rs
@@ -0,0 +1,658 @@
+//! Method lookup: the secret sauce of Rust. See the [rustc dev guide] for more information.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/method-lookup.html
+
+mod confirm;
+mod prelude2021;
+pub mod probe;
+mod suggest;
+
+pub use self::suggest::SelfSource;
+pub use self::MethodError::*;
+
+use crate::check::{Expectation, FnCtxt};
+use crate::ObligationCause;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Namespace};
+use rustc_hir::def_id::DefId;
+use rustc_infer::infer::{self, InferOk};
+use rustc_middle::ty::subst::Subst;
+use rustc_middle::ty::subst::{InternalSubsts, SubstsRef};
+use rustc_middle::ty::{
+ self, AssocKind, DefIdTree, GenericParamDefKind, ProjectionPredicate, ProjectionTy, Term,
+ ToPredicate, Ty, TypeVisitable,
+};
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+
+use self::probe::{IsSuggestion, ProbeScope};
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ probe::provide(providers);
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct MethodCallee<'tcx> {
+ /// Impl method ID, for inherent methods, or trait method ID, otherwise.
+ pub def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+
+ /// Instantiated method signature, i.e., it has been
+ /// substituted, normalized, and has had late-bound
+ /// lifetimes replaced with inference variables.
+ pub sig: ty::FnSig<'tcx>,
+}
+
+#[derive(Debug)]
+pub enum MethodError<'tcx> {
+ // Did not find an applicable method, but we did find various near-misses that may work.
+ NoMatch(NoMatchData<'tcx>),
+
+ // Multiple methods might apply.
+ Ambiguity(Vec<CandidateSource>),
+
+ // Found an applicable method, but it is not visible. The third argument contains a list of
+ // not-in-scope traits which may work.
+ PrivateMatch(DefKind, DefId, Vec<DefId>),
+
+ // Found a `Self: Sized` bound where `Self` is a trait object, also the caller may have
+ // forgotten to import a trait.
+ IllegalSizedBound(Vec<DefId>, bool, Span),
+
+ // Found a match, but the return type is wrong
+ BadReturnType,
+}
+
+// Contains a list of static methods that may apply, a list of unsatisfied trait predicates which
+// could lead to matches if satisfied, and a list of not-in-scope traits which may work.
+#[derive(Debug)]
+pub struct NoMatchData<'tcx> {
+ pub static_candidates: Vec<CandidateSource>,
+ pub unsatisfied_predicates:
+ Vec<(ty::Predicate<'tcx>, Option<ty::Predicate<'tcx>>, Option<ObligationCause<'tcx>>)>,
+ pub out_of_scope_traits: Vec<DefId>,
+ pub lev_candidate: Option<ty::AssocItem>,
+ pub mode: probe::Mode,
+}
+
+// A pared down enum describing just the places from which a method
+// candidate can arise. Used for error reporting only.
+#[derive(Copy, Clone, Debug, Eq, Ord, PartialEq, PartialOrd)]
+pub enum CandidateSource {
+ Impl(DefId),
+ Trait(DefId /* trait id */),
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Determines whether the type `self_ty` supports a method name `method_name` or not.
+ #[instrument(level = "debug", skip(self))]
+ pub fn method_exists(
+ &self,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr_id: hir::HirId,
+ allow_private: bool,
+ ) -> bool {
+ let mode = probe::Mode::MethodCall;
+ match self.probe_for_name(
+ method_name.span,
+ mode,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr_id,
+ ProbeScope::TraitsInScope,
+ ) {
+ Ok(..) => true,
+ Err(NoMatch(..)) => false,
+ Err(Ambiguity(..)) => true,
+ Err(PrivateMatch(..)) => allow_private,
+ Err(IllegalSizedBound(..)) => true,
+ Err(BadReturnType) => bug!("no return type expectations but got BadReturnType"),
+ }
+ }
+
+ /// Adds a suggestion to call the given method to the provided diagnostic.
+ #[instrument(level = "debug", skip(self, err, call_expr))]
+ pub(crate) fn suggest_method_call(
+ &self,
+ err: &mut Diagnostic,
+ msg: &str,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr: &hir::Expr<'_>,
+ span: Option<Span>,
+ ) {
+ let params = self
+ .probe_for_name(
+ method_name.span,
+ probe::Mode::MethodCall,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr.hir_id,
+ ProbeScope::TraitsInScope,
+ )
+ .map(|pick| {
+ let sig = self.tcx.fn_sig(pick.item.def_id);
+ sig.inputs().skip_binder().len().saturating_sub(1)
+ })
+ .unwrap_or(0);
+
+ // Account for `foo.bar<T>`;
+ let sugg_span = span.unwrap_or(call_expr.span).shrink_to_hi();
+ let (suggestion, applicability) = (
+ format!("({})", (0..params).map(|_| "_").collect::<Vec<_>>().join(", ")),
+ if params > 0 { Applicability::HasPlaceholders } else { Applicability::MaybeIncorrect },
+ );
+
+ err.span_suggestion_verbose(sugg_span, msg, suggestion, applicability);
+ }
+
+ /// Performs method lookup. If lookup is successful, it will return the callee
+ /// and store an appropriate adjustment for the self-expr. In some cases it may
+ /// report an error (e.g., invoking the `drop` method).
+ ///
+ /// # Arguments
+ ///
+ /// Given a method call like `foo.bar::<T1,...Tn>(a, b + 1, ...)`:
+ ///
+ /// * `self`: the surrounding `FnCtxt` (!)
+ /// * `self_ty`: the (unadjusted) type of the self expression (`foo`)
+ /// * `segment`: the name and generic arguments of the method (`bar::<T1, ...Tn>`)
+ /// * `span`: the span for the method call
+ /// * `call_expr`: the complete method call: (`foo.bar::<T1,...Tn>(...)`)
+ /// * `self_expr`: the self expression (`foo`)
+ /// * `args`: the expressions of the arguments (`a, b + 1, ...`)
+ #[instrument(level = "debug", skip(self, call_expr, self_expr))]
+ pub fn lookup_method(
+ &self,
+ self_ty: Ty<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ span: Span,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) -> Result<MethodCallee<'tcx>, MethodError<'tcx>> {
+ debug!(
+ "lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})",
+ segment.ident, self_ty, call_expr, self_expr
+ );
+
+ let pick =
+ self.lookup_probe(span, segment.ident, self_ty, call_expr, ProbeScope::TraitsInScope)?;
+
+ self.lint_dot_call_from_2018(self_ty, segment, span, call_expr, self_expr, &pick, args);
+
+ for import_id in &pick.import_ids {
+ debug!("used_trait_import: {:?}", import_id);
+ Lrc::get_mut(&mut self.typeck_results.borrow_mut().used_trait_imports)
+ .unwrap()
+ .insert(*import_id);
+ }
+
+ self.tcx.check_stability(pick.item.def_id, Some(call_expr.hir_id), span, None);
+
+ let result =
+ self.confirm_method(span, self_expr, call_expr, self_ty, pick.clone(), segment);
+ debug!("result = {:?}", result);
+
+ if let Some(span) = result.illegal_sized_bound {
+ let mut needs_mut = false;
+ if let ty::Ref(region, t_type, mutability) = self_ty.kind() {
+ let trait_type = self
+ .tcx
+ .mk_ref(*region, ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() });
+ // We probe again to see if there might be a borrow mutability discrepancy.
+ match self.lookup_probe(
+ span,
+ segment.ident,
+ trait_type,
+ call_expr,
+ ProbeScope::TraitsInScope,
+ ) {
+ Ok(ref new_pick) if *new_pick != pick => {
+ needs_mut = true;
+ }
+ _ => {}
+ }
+ }
+
+ // We probe again, taking all traits into account (not only those in scope).
+ let mut candidates = match self.lookup_probe(
+ span,
+ segment.ident,
+ self_ty,
+ call_expr,
+ ProbeScope::AllTraits,
+ ) {
+ // If we find a different result the caller probably forgot to import a trait.
+ Ok(ref new_pick) if *new_pick != pick => vec![new_pick.item.container_id(self.tcx)],
+ Err(Ambiguity(ref sources)) => sources
+ .iter()
+ .filter_map(|source| {
+ match *source {
+ // Note: this cannot come from an inherent impl,
+ // because the first probing succeeded.
+ CandidateSource::Impl(def) => self.tcx.trait_id_of_impl(def),
+ CandidateSource::Trait(_) => None,
+ }
+ })
+ .collect(),
+ _ => Vec::new(),
+ };
+ candidates.retain(|candidate| *candidate != self.tcx.parent(result.callee.def_id));
+
+ return Err(IllegalSizedBound(candidates, needs_mut, span));
+ }
+
+ Ok(result.callee)
+ }
+
+ #[instrument(level = "debug", skip(self, call_expr))]
+ pub fn lookup_probe(
+ &self,
+ span: Span,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ scope: ProbeScope,
+ ) -> probe::PickResult<'tcx> {
+ let mode = probe::Mode::MethodCall;
+ let self_ty = self.resolve_vars_if_possible(self_ty);
+ self.probe_for_name(
+ span,
+ mode,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ call_expr.hir_id,
+ scope,
+ )
+ }
+
+ pub(super) fn obligation_for_method(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_types: Option<&[Ty<'tcx>]>,
+ ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>)
+ {
+ // Construct a trait-reference `self_ty : Trait<input_tys>`
+ let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
+ match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
+ GenericParamDefKind::Type { .. } => {
+ if param.index == 0 {
+ return self_ty.into();
+ } else if let Some(input_types) = opt_input_types {
+ return input_types[param.index as usize - 1].into();
+ }
+ }
+ }
+ self.var_for_def(span, param)
+ });
+
+ let trait_ref = ty::TraitRef::new(trait_def_id, substs);
+
+ // Construct an obligation
+ let poly_trait_ref = ty::Binder::dummy(trait_ref);
+ (
+ traits::Obligation::misc(
+ span,
+ self.body_id,
+ self.param_env,
+ poly_trait_ref.without_const().to_predicate(self.tcx),
+ ),
+ substs,
+ )
+ }
+
+ pub(super) fn obligation_for_op_method(
+ &self,
+ span: Span,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_type: Option<Ty<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ expected: Expectation<'tcx>,
+ ) -> (traits::Obligation<'tcx, ty::Predicate<'tcx>>, &'tcx ty::List<ty::subst::GenericArg<'tcx>>)
+ {
+ // Construct a trait-reference `self_ty : Trait<input_tys>`
+ let substs = InternalSubsts::for_item(self.tcx, trait_def_id, |param, _| {
+ match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => {}
+ GenericParamDefKind::Type { .. } => {
+ if param.index == 0 {
+ return self_ty.into();
+ } else if let Some(input_type) = opt_input_type {
+ return input_type.into();
+ }
+ }
+ }
+ self.var_for_def(span, param)
+ });
+
+ let trait_ref = ty::TraitRef::new(trait_def_id, substs);
+
+ // Construct an obligation
+ let poly_trait_ref = ty::Binder::dummy(trait_ref);
+ let opt_output_ty =
+ expected.only_has_type(self).and_then(|ty| (!ty.needs_infer()).then(|| ty));
+ let opt_output_assoc_item = self.tcx.associated_items(trait_def_id).find_by_name_and_kind(
+ self.tcx,
+ Ident::from_str("Output"),
+ AssocKind::Type,
+ trait_def_id,
+ );
+ let output_pred =
+ opt_output_ty.zip(opt_output_assoc_item).map(|(output_ty, output_assoc_item)| {
+ ty::Binder::dummy(ty::PredicateKind::Projection(ProjectionPredicate {
+ projection_ty: ProjectionTy { substs, item_def_id: output_assoc_item.def_id },
+ term: Term::Ty(output_ty),
+ }))
+ .to_predicate(self.tcx)
+ });
+
+ (
+ traits::Obligation::new(
+ traits::ObligationCause::new(
+ span,
+ self.body_id,
+ traits::BinOp {
+ rhs_span: opt_input_expr.map(|expr| expr.span),
+ is_lit: opt_input_expr
+ .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))),
+ output_pred,
+ },
+ ),
+ self.param_env,
+ poly_trait_ref.without_const().to_predicate(self.tcx),
+ ),
+ substs,
+ )
+ }
+
+ /// `lookup_method_in_trait` is used for overloaded operators.
+ /// It does a very narrow slice of what the normal probe/confirm path does.
+ /// In particular, it doesn't really do any probing: it simply constructs
+ /// an obligation for a particular trait with the given self type and checks
+ /// whether that trait is implemented.
+ #[instrument(level = "debug", skip(self, span, opt_input_types))]
+ pub(super) fn lookup_method_in_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_types: Option<&[Ty<'tcx>]>,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ debug!(
+ "lookup_in_trait_adjusted(self_ty={:?}, m_name={}, trait_def_id={:?}, opt_input_types={:?})",
+ self_ty, m_name, trait_def_id, opt_input_types
+ );
+
+ let (obligation, substs) =
+ self.obligation_for_method(span, trait_def_id, self_ty, opt_input_types);
+ self.construct_obligation_for_trait(
+ span,
+ m_name,
+ trait_def_id,
+ obligation,
+ substs,
+ None,
+ false,
+ )
+ }
+
+ pub(super) fn lookup_op_method_in_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ self_ty: Ty<'tcx>,
+ opt_input_type: Option<Ty<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ expected: Expectation<'tcx>,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ let (obligation, substs) = self.obligation_for_op_method(
+ span,
+ trait_def_id,
+ self_ty,
+ opt_input_type,
+ opt_input_expr,
+ expected,
+ );
+ self.construct_obligation_for_trait(
+ span,
+ m_name,
+ trait_def_id,
+ obligation,
+ substs,
+ opt_input_expr,
+ true,
+ )
+ }
+
+ // FIXME(#18741): it seems likely that we can consolidate some of this
+ // code with the other method-lookup code. In particular, the second half
+ // of this method is basically the same as confirmation.
+ fn construct_obligation_for_trait(
+ &self,
+ span: Span,
+ m_name: Ident,
+ trait_def_id: DefId,
+ obligation: traits::PredicateObligation<'tcx>,
+ substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
+ opt_input_expr: Option<&'tcx hir::Expr<'tcx>>,
+ is_op: bool,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ debug!(?obligation);
+
+ // Now we want to know if this can be matched
+ if !self.predicate_may_hold(&obligation) {
+ debug!("--> Cannot match obligation");
+ // Cannot be matched, no such method resolution is possible.
+ return None;
+ }
+
+ // Trait must have a method named `m_name` and it should not have
+ // type parameters or early-bound regions.
+ let tcx = self.tcx;
+ let Some(method_item) = self.associated_value(trait_def_id, m_name) else {
+ tcx.sess.delay_span_bug(
+ span,
+ "operator trait does not have corresponding operator method",
+ );
+ return None;
+ };
+ let def_id = method_item.def_id;
+ let generics = tcx.generics_of(def_id);
+ assert_eq!(generics.params.len(), 0);
+
+ debug!("lookup_in_trait_adjusted: method_item={:?}", method_item);
+ let mut obligations = vec![];
+
+ // Instantiate late-bound regions and substitute the trait
+ // parameters into the method type to get the actual method type.
+ //
+ // N.B., instantiate late-bound regions first so that
+ // `instantiate_type_scheme` can normalize associated types that
+ // may reference those regions.
+ let fn_sig = tcx.bound_fn_sig(def_id);
+ let fn_sig = fn_sig.subst(self.tcx, substs);
+ let fn_sig = self.replace_bound_vars_with_fresh_vars(span, infer::FnCall, fn_sig);
+
+ let InferOk { value, obligations: o } = if is_op {
+ self.normalize_op_associated_types_in_as_infer_ok(span, fn_sig, opt_input_expr)
+ } else {
+ self.normalize_associated_types_in_as_infer_ok(span, fn_sig)
+ };
+ let fn_sig = {
+ obligations.extend(o);
+ value
+ };
+
+ // Register obligations for the parameters. This will include the
+ // `Self` parameter, which in turn has a bound of the main trait,
+ // so this also effectively registers `obligation` as well. (We
+ // used to register `obligation` explicitly, but that resulted in
+ // double error messages being reported.)
+ //
+ // Note that as the method comes from a trait, it should not have
+ // any late-bound regions appearing in its bounds.
+ let bounds = self.tcx.predicates_of(def_id).instantiate(self.tcx, substs);
+
+ let InferOk { value, obligations: o } = if is_op {
+ self.normalize_op_associated_types_in_as_infer_ok(span, bounds, opt_input_expr)
+ } else {
+ self.normalize_associated_types_in_as_infer_ok(span, bounds)
+ };
+ let bounds = {
+ obligations.extend(o);
+ value
+ };
+
+ assert!(!bounds.has_escaping_bound_vars());
+
+ let cause = if is_op {
+ ObligationCause::new(
+ span,
+ self.body_id,
+ traits::BinOp {
+ rhs_span: opt_input_expr.map(|expr| expr.span),
+ is_lit: opt_input_expr
+ .map_or(false, |expr| matches!(expr.kind, hir::ExprKind::Lit(_))),
+ output_pred: None,
+ },
+ )
+ } else {
+ traits::ObligationCause::misc(span, self.body_id)
+ };
+ obligations.extend(traits::predicates_for_generics(cause.clone(), self.param_env, bounds));
+
+ // Also add an obligation for the method type being well-formed.
+ let method_ty = tcx.mk_fn_ptr(ty::Binder::dummy(fn_sig));
+ debug!(
+ "lookup_in_trait_adjusted: matched method method_ty={:?} obligation={:?}",
+ method_ty, obligation
+ );
+ obligations.push(traits::Obligation::new(
+ cause,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(method_ty.into())).to_predicate(tcx),
+ ));
+
+ let callee = MethodCallee { def_id, substs, sig: fn_sig };
+
+ debug!("callee = {:?}", callee);
+
+ Some(InferOk { obligations, value: callee })
+ }
+
+ /// Performs a [full-qualified function call] (formerly "universal function call") lookup. If
+ /// lookup is successful, it will return the type of definition and the [`DefId`] of the found
+ /// function definition.
+ ///
+ /// [full-qualified function call]: https://doc.rust-lang.org/reference/expressions/call-expr.html#disambiguating-function-calls
+ ///
+ /// # Arguments
+ ///
+ /// Given a function call like `Foo::bar::<T1,...Tn>(...)`:
+ ///
+ /// * `self`: the surrounding `FnCtxt` (!)
+ /// * `span`: the span of the call, excluding arguments (`Foo::bar::<T1, ...Tn>`)
+ /// * `method_name`: the identifier of the function within the container type (`bar`)
+ /// * `self_ty`: the type to search within (`Foo`)
+ /// * `self_ty_span` the span for the type being searched within (span of `Foo`)
+ /// * `expr_id`: the [`hir::HirId`] of the expression composing the entire call
+ #[instrument(level = "debug", skip(self))]
+ pub fn resolve_fully_qualified_call(
+ &self,
+ span: Span,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ self_ty_span: Span,
+ expr_id: hir::HirId,
+ ) -> Result<(DefKind, DefId), MethodError<'tcx>> {
+ debug!(
+ "resolve_fully_qualified_call: method_name={:?} self_ty={:?} expr_id={:?}",
+ method_name, self_ty, expr_id,
+ );
+
+ let tcx = self.tcx;
+
+ // Check if we have an enum variant.
+ if let ty::Adt(adt_def, _) = self_ty.kind() {
+ if adt_def.is_enum() {
+ let variant_def = adt_def
+ .variants()
+ .iter()
+ .find(|vd| tcx.hygienic_eq(method_name, vd.ident(tcx), adt_def.did()));
+ if let Some(variant_def) = variant_def {
+ // Braced variants generate unusable names in value namespace (reserved for
+ // possible future use), so variants resolved as associated items may refer to
+ // them as well. It's ok to use the variant's id as a ctor id since an
+ // error will be reported on any use of such resolution anyway.
+ let ctor_def_id = variant_def.ctor_def_id.unwrap_or(variant_def.def_id);
+ tcx.check_stability(ctor_def_id, Some(expr_id), span, Some(method_name.span));
+ return Ok((
+ DefKind::Ctor(CtorOf::Variant, variant_def.ctor_kind),
+ ctor_def_id,
+ ));
+ }
+ }
+ }
+
+ let pick = self.probe_for_name(
+ span,
+ probe::Mode::Path,
+ method_name,
+ IsSuggestion(false),
+ self_ty,
+ expr_id,
+ ProbeScope::TraitsInScope,
+ )?;
+
+ self.lint_fully_qualified_call_from_2018(
+ span,
+ method_name,
+ self_ty,
+ self_ty_span,
+ expr_id,
+ &pick,
+ );
+
+ debug!("resolve_fully_qualified_call: pick={:?}", pick);
+ {
+ let mut typeck_results = self.typeck_results.borrow_mut();
+ let used_trait_imports = Lrc::get_mut(&mut typeck_results.used_trait_imports).unwrap();
+ for import_id in pick.import_ids {
+ debug!("resolve_fully_qualified_call: used_trait_import: {:?}", import_id);
+ used_trait_imports.insert(import_id);
+ }
+ }
+
+ let def_kind = pick.item.kind.as_def_kind();
+ debug!(
+ "resolve_fully_qualified_call: def_kind={:?}, def_id={:?}",
+ def_kind, pick.item.def_id
+ );
+ tcx.check_stability(pick.item.def_id, Some(expr_id), span, Some(method_name.span));
+ Ok((def_kind, pick.item.def_id))
+ }
+
+ /// Finds item with name `item_name` defined in impl/trait `def_id`
+ /// and return it, or `None`, if no such item was defined there.
+ pub fn associated_value(&self, def_id: DefId, item_name: Ident) -> Option<ty::AssocItem> {
+ self.tcx
+ .associated_items(def_id)
+ .find_by_name_and_namespace(self.tcx, item_name, Namespace::ValueNS, def_id)
+ .copied()
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/method/prelude2021.rs b/compiler/rustc_typeck/src/check/method/prelude2021.rs
new file mode 100644
index 000000000..7c68d9304
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/method/prelude2021.rs
@@ -0,0 +1,419 @@
+use hir::def_id::DefId;
+use hir::HirId;
+use hir::ItemKind;
+use rustc_ast::Mutability;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{Adt, Array, Ref, Ty};
+use rustc_session::lint::builtin::RUST_2021_PRELUDE_COLLISIONS;
+use rustc_span::symbol::kw::{Empty, Underscore};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+
+use crate::check::{
+ method::probe::{self, Pick},
+ FnCtxt,
+};
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub(super) fn lint_dot_call_from_2018(
+ &self,
+ self_ty: Ty<'tcx>,
+ segment: &hir::PathSegment<'_>,
+ span: Span,
+ call_expr: &'tcx hir::Expr<'tcx>,
+ self_expr: &'tcx hir::Expr<'tcx>,
+ pick: &Pick<'tcx>,
+ args: &'tcx [hir::Expr<'tcx>],
+ ) {
+ debug!(
+ "lookup(method_name={}, self_ty={:?}, call_expr={:?}, self_expr={:?})",
+ segment.ident, self_ty, call_expr, self_expr
+ );
+
+ // Rust 2021 and later is already using the new prelude
+ if span.rust_2021() {
+ return;
+ }
+
+ let prelude_or_array_lint = match segment.ident.name {
+ // `try_into` was added to the prelude in Rust 2021.
+ sym::try_into => RUST_2021_PRELUDE_COLLISIONS,
+ // `into_iter` wasn't added to the prelude,
+ // but `[T; N].into_iter()` doesn't resolve to IntoIterator::into_iter
+ // before Rust 2021, which results in the same problem.
+ // It is only a problem for arrays.
+ sym::into_iter if let Array(..) = self_ty.kind() => {
+ // In this case, it wasn't really a prelude addition that was the problem.
+ // Instead, the problem is that the array-into_iter hack will no longer apply in Rust 2021.
+ rustc_lint::ARRAY_INTO_ITER
+ }
+ _ => return,
+ };
+
+ // No need to lint if method came from std/core, as that will now be in the prelude
+ if matches!(self.tcx.crate_name(pick.item.def_id.krate), sym::std | sym::core) {
+ return;
+ }
+
+ if matches!(pick.kind, probe::PickKind::InherentImplPick | probe::PickKind::ObjectPick) {
+ // avoid repeatedly adding unneeded `&*`s
+ if pick.autoderefs == 1
+ && matches!(
+ pick.autoref_or_ptr_adjustment,
+ Some(probe::AutorefOrPtrAdjustment::Autoref { .. })
+ )
+ && matches!(self_ty.kind(), Ref(..))
+ {
+ return;
+ }
+
+ // if it's an inherent `self` method (not `&self` or `&mut self`), it will take
+ // precedence over the `TryInto` impl, and thus won't break in 2021 edition
+ if pick.autoderefs == 0 && pick.autoref_or_ptr_adjustment.is_none() {
+ return;
+ }
+
+ // Inherent impls only require not relying on autoref and autoderef in order to
+ // ensure that the trait implementation won't be used
+ self.tcx.struct_span_lint_hir(
+ prelude_or_array_lint,
+ self_expr.hir_id,
+ self_expr.span,
+ |lint| {
+ let sp = self_expr.span;
+
+ let mut lint = lint.build(&format!(
+ "trait method `{}` will become ambiguous in Rust 2021",
+ segment.ident.name
+ ));
+
+ let derefs = "*".repeat(pick.autoderefs);
+
+ let autoref = match pick.autoref_or_ptr_adjustment {
+ Some(probe::AutorefOrPtrAdjustment::Autoref {
+ mutbl: Mutability::Mut,
+ ..
+ }) => "&mut ",
+ Some(probe::AutorefOrPtrAdjustment::Autoref {
+ mutbl: Mutability::Not,
+ ..
+ }) => "&",
+ Some(probe::AutorefOrPtrAdjustment::ToConstPtr) | None => "",
+ };
+ if let Ok(self_expr) = self.sess().source_map().span_to_snippet(self_expr.span)
+ {
+ let self_adjusted = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
+ pick.autoref_or_ptr_adjustment
+ {
+ format!("{}{} as *const _", derefs, self_expr)
+ } else {
+ format!("{}{}{}", autoref, derefs, self_expr)
+ };
+
+ lint.span_suggestion(
+ sp,
+ "disambiguate the method call",
+ format!("({})", self_adjusted),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ let self_adjusted = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
+ pick.autoref_or_ptr_adjustment
+ {
+ format!("{}(...) as *const _", derefs)
+ } else {
+ format!("{}{}...", autoref, derefs)
+ };
+ lint.span_help(
+ sp,
+ &format!("disambiguate the method call with `({})`", self_adjusted,),
+ );
+ }
+
+ lint.emit();
+ },
+ );
+ } else {
+ // trait implementations require full disambiguation to not clash with the new prelude
+ // additions (i.e. convert from dot-call to fully-qualified call)
+ self.tcx.struct_span_lint_hir(
+ prelude_or_array_lint,
+ call_expr.hir_id,
+ call_expr.span,
+ |lint| {
+ let sp = call_expr.span;
+ let trait_name = self.trait_path_or_bare_name(
+ span,
+ call_expr.hir_id,
+ pick.item.container_id(self.tcx),
+ );
+
+ let mut lint = lint.build(&format!(
+ "trait method `{}` will become ambiguous in Rust 2021",
+ segment.ident.name
+ ));
+
+ let (self_adjusted, precise) = self.adjust_expr(pick, self_expr, sp);
+ if precise {
+ let args = args
+ .iter()
+ .skip(1)
+ .map(|arg| {
+ let span = arg.span.find_ancestor_inside(sp).unwrap_or_default();
+ format!(
+ ", {}",
+ self.sess().source_map().span_to_snippet(span).unwrap()
+ )
+ })
+ .collect::<String>();
+
+ lint.span_suggestion(
+ sp,
+ "disambiguate the associated function",
+ format!(
+ "{}::{}{}({}{})",
+ trait_name,
+ segment.ident.name,
+ if let Some(args) = segment.args.as_ref().and_then(|args| self
+ .sess()
+ .source_map()
+ .span_to_snippet(args.span_ext)
+ .ok())
+ {
+ // Keep turbofish.
+ format!("::{}", args)
+ } else {
+ String::new()
+ },
+ self_adjusted,
+ args,
+ ),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ lint.span_help(
+ sp,
+ &format!(
+ "disambiguate the associated function with `{}::{}(...)`",
+ trait_name, segment.ident,
+ ),
+ );
+ }
+
+ lint.emit();
+ },
+ );
+ }
+ }
+
+ pub(super) fn lint_fully_qualified_call_from_2018(
+ &self,
+ span: Span,
+ method_name: Ident,
+ self_ty: Ty<'tcx>,
+ self_ty_span: Span,
+ expr_id: hir::HirId,
+ pick: &Pick<'tcx>,
+ ) {
+ // Rust 2021 and later is already using the new prelude
+ if span.rust_2021() {
+ return;
+ }
+
+ // These are the fully qualified methods added to prelude in Rust 2021
+ if !matches!(method_name.name, sym::try_into | sym::try_from | sym::from_iter) {
+ return;
+ }
+
+ // No need to lint if method came from std/core, as that will now be in the prelude
+ if matches!(self.tcx.crate_name(pick.item.def_id.krate), sym::std | sym::core) {
+ return;
+ }
+
+ // For from_iter, check if the type actually implements FromIterator.
+ // If we know it does not, we don't need to warn.
+ if method_name.name == sym::from_iter {
+ if let Some(trait_def_id) = self.tcx.get_diagnostic_item(sym::FromIterator) {
+ if !self
+ .infcx
+ .type_implements_trait(
+ trait_def_id,
+ self_ty,
+ InternalSubsts::empty(),
+ self.param_env,
+ )
+ .may_apply()
+ {
+ return;
+ }
+ }
+ }
+
+ // No need to lint if this is an inherent method called on a specific type, like `Vec::foo(...)`,
+ // since such methods take precedence over trait methods.
+ if matches!(pick.kind, probe::PickKind::InherentImplPick) {
+ return;
+ }
+
+ self.tcx.struct_span_lint_hir(RUST_2021_PRELUDE_COLLISIONS, expr_id, span, |lint| {
+ // "type" refers to either a type or, more likely, a trait from which
+ // the associated function or method is from.
+ let container_id = pick.item.container_id(self.tcx);
+ let trait_path = self.trait_path_or_bare_name(span, expr_id, container_id);
+ let trait_generics = self.tcx.generics_of(container_id);
+
+ let trait_name =
+ if trait_generics.params.len() <= trait_generics.has_self as usize {
+ trait_path
+ } else {
+ let counts = trait_generics.own_counts();
+ format!(
+ "{}<{}>",
+ trait_path,
+ std::iter::repeat("'_")
+ .take(counts.lifetimes)
+ .chain(std::iter::repeat("_").take(
+ counts.types + counts.consts - trait_generics.has_self as usize
+ ))
+ .collect::<Vec<_>>()
+ .join(", ")
+ )
+ };
+
+ let mut lint = lint.build(&format!(
+ "trait-associated function `{}` will become ambiguous in Rust 2021",
+ method_name.name
+ ));
+
+ let mut self_ty_name = self_ty_span
+ .find_ancestor_inside(span)
+ .and_then(|span| self.sess().source_map().span_to_snippet(span).ok())
+ .unwrap_or_else(|| self_ty.to_string());
+
+ // Get the number of generics the self type has (if an Adt) unless we can determine that
+ // the user has written the self type with generics already which we (naively) do by looking
+ // for a "<" in `self_ty_name`.
+ if !self_ty_name.contains('<') {
+ if let Adt(def, _) = self_ty.kind() {
+ let generics = self.tcx.generics_of(def.did());
+ if !generics.params.is_empty() {
+ let counts = generics.own_counts();
+ self_ty_name += &format!(
+ "<{}>",
+ std::iter::repeat("'_")
+ .take(counts.lifetimes)
+ .chain(std::iter::repeat("_").take(counts.types + counts.consts))
+ .collect::<Vec<_>>()
+ .join(", ")
+ );
+ }
+ }
+ }
+ lint.span_suggestion(
+ span,
+ "disambiguate the associated function",
+ format!("<{} as {}>::{}", self_ty_name, trait_name, method_name.name,),
+ Applicability::MachineApplicable,
+ );
+
+ lint.emit();
+ });
+ }
+
+ fn trait_path_or_bare_name(
+ &self,
+ span: Span,
+ expr_hir_id: HirId,
+ trait_def_id: DefId,
+ ) -> String {
+ self.trait_path(span, expr_hir_id, trait_def_id).unwrap_or_else(|| {
+ let key = self.tcx.def_key(trait_def_id);
+ format!("{}", key.disambiguated_data.data)
+ })
+ }
+
+ fn trait_path(&self, span: Span, expr_hir_id: HirId, trait_def_id: DefId) -> Option<String> {
+ let applicable_traits = self.tcx.in_scope_traits(expr_hir_id)?;
+ let applicable_trait = applicable_traits.iter().find(|t| t.def_id == trait_def_id)?;
+ if applicable_trait.import_ids.is_empty() {
+ // The trait was declared within the module, we only need to use its name.
+ return None;
+ }
+
+ let import_items: Vec<_> = applicable_trait
+ .import_ids
+ .iter()
+ .map(|&import_id| self.tcx.hir().expect_item(import_id))
+ .collect();
+
+ // Find an identifier with which this trait was imported (note that `_` doesn't count).
+ let any_id = import_items
+ .iter()
+ .filter_map(|item| if item.ident.name != Underscore { Some(item.ident) } else { None })
+ .next();
+ if let Some(any_id) = any_id {
+ if any_id.name == Empty {
+ // Glob import, so just use its name.
+ return None;
+ } else {
+ return Some(format!("{}", any_id));
+ }
+ }
+
+ // All that is left is `_`! We need to use the full path. It doesn't matter which one we pick,
+ // so just take the first one.
+ match import_items[0].kind {
+ ItemKind::Use(path, _) => Some(
+ path.segments
+ .iter()
+ .map(|segment| segment.ident.to_string())
+ .collect::<Vec<_>>()
+ .join("::"),
+ ),
+ _ => {
+ span_bug!(span, "unexpected item kind, expected a use: {:?}", import_items[0].kind);
+ }
+ }
+ }
+
+ /// Creates a string version of the `expr` that includes explicit adjustments.
+ /// Returns the string and also a bool indicating whether this is a *precise*
+ /// suggestion.
+ fn adjust_expr(
+ &self,
+ pick: &Pick<'tcx>,
+ expr: &hir::Expr<'tcx>,
+ outer: Span,
+ ) -> (String, bool) {
+ let derefs = "*".repeat(pick.autoderefs);
+
+ let autoref = match pick.autoref_or_ptr_adjustment {
+ Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl: Mutability::Mut, .. }) => "&mut ",
+ Some(probe::AutorefOrPtrAdjustment::Autoref { mutbl: Mutability::Not, .. }) => "&",
+ Some(probe::AutorefOrPtrAdjustment::ToConstPtr) | None => "",
+ };
+
+ let (expr_text, precise) = if let Some(expr_text) = expr
+ .span
+ .find_ancestor_inside(outer)
+ .and_then(|span| self.sess().source_map().span_to_snippet(span).ok())
+ {
+ (expr_text, true)
+ } else {
+ ("(..)".to_string(), false)
+ };
+
+ let adjusted_text = if let Some(probe::AutorefOrPtrAdjustment::ToConstPtr) =
+ pick.autoref_or_ptr_adjustment
+ {
+ format!("{}{} as *const _", derefs, expr_text)
+ } else {
+ format!("{}{}{}", autoref, derefs, expr_text)
+ };
+
+ (adjusted_text, precise)
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/method/probe.rs b/compiler/rustc_typeck/src/check/method/probe.rs
new file mode 100644
index 000000000..efe15fec7
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/method/probe.rs
@@ -0,0 +1,1932 @@
+use super::suggest;
+use super::CandidateSource;
+use super::MethodError;
+use super::NoMatchData;
+
+use crate::check::FnCtxt;
+use crate::errors::MethodCallOnUnknownType;
+use crate::hir::def::DefKind;
+use crate::hir::def_id::DefId;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_hir::def::Namespace;
+use rustc_infer::infer::canonical::OriginalQueryValues;
+use rustc_infer::infer::canonical::{Canonical, QueryResponse};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::{self, InferOk, TyCtxtInferExt};
+use rustc_middle::infer::unify_key::{ConstVariableOrigin, ConstVariableOriginKind};
+use rustc_middle::middle::stability;
+use rustc_middle::ty::fast_reject::{simplify_type, TreatParams};
+use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
+use rustc_middle::ty::GenericParamDefKind;
+use rustc_middle::ty::{self, ParamEnvAnd, ToPredicate, Ty, TyCtxt, TypeFoldable, TypeVisitable};
+use rustc_session::lint;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::lev_distance::{
+ find_best_match_for_name_with_substrings, lev_distance_with_substrings,
+};
+use rustc_span::symbol::sym;
+use rustc_span::{symbol::Ident, Span, Symbol, DUMMY_SP};
+use rustc_trait_selection::autoderef::{self, Autoderef};
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt;
+use rustc_trait_selection::traits::query::method_autoderef::MethodAutoderefBadTy;
+use rustc_trait_selection::traits::query::method_autoderef::{
+ CandidateStep, MethodAutoderefStepsResult,
+};
+use rustc_trait_selection::traits::query::CanonicalTyGoal;
+use rustc_trait_selection::traits::{self, ObligationCause};
+use std::cmp::max;
+use std::iter;
+use std::mem;
+use std::ops::Deref;
+
+use smallvec::{smallvec, SmallVec};
+
+use self::CandidateKind::*;
+pub use self::PickKind::*;
+
+/// Boolean flag used to indicate if this search is for a suggestion
+/// or not. If true, we can allow ambiguity and so forth.
+#[derive(Clone, Copy, Debug)]
+pub struct IsSuggestion(pub bool);
+
+struct ProbeContext<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ mode: Mode,
+ method_name: Option<Ident>,
+ return_type: Option<Ty<'tcx>>,
+
+ /// This is the OriginalQueryValues for the steps queries
+ /// that are answered in steps.
+ orig_steps_var_values: OriginalQueryValues<'tcx>,
+ steps: &'tcx [CandidateStep<'tcx>],
+
+ inherent_candidates: Vec<Candidate<'tcx>>,
+ extension_candidates: Vec<Candidate<'tcx>>,
+ impl_dups: FxHashSet<DefId>,
+
+ /// Collects near misses when the candidate functions are missing a `self` keyword and is only
+ /// used for error reporting
+ static_candidates: Vec<CandidateSource>,
+
+ /// When probing for names, include names that are close to the
+ /// requested name (by Levensthein distance)
+ allow_similar_names: bool,
+
+ /// Some(candidate) if there is a private candidate
+ private_candidate: Option<(DefKind, DefId)>,
+
+ /// Collects near misses when trait bounds for type parameters are unsatisfied and is only used
+ /// for error reporting
+ unsatisfied_predicates:
+ Vec<(ty::Predicate<'tcx>, Option<ty::Predicate<'tcx>>, Option<ObligationCause<'tcx>>)>,
+
+ is_suggestion: IsSuggestion,
+
+ scope_expr_id: hir::HirId,
+}
+
+impl<'a, 'tcx> Deref for ProbeContext<'a, 'tcx> {
+ type Target = FnCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ self.fcx
+ }
+}
+
+#[derive(Debug, Clone)]
+struct Candidate<'tcx> {
+ // Candidates are (I'm not quite sure, but they are mostly) basically
+ // some metadata on top of a `ty::AssocItem` (without substs).
+ //
+ // However, method probing wants to be able to evaluate the predicates
+ // for a function with the substs applied - for example, if a function
+ // has `where Self: Sized`, we don't want to consider it unless `Self`
+ // is actually `Sized`, and similarly, return-type suggestions want
+ // to consider the "actual" return type.
+ //
+ // The way this is handled is through `xform_self_ty`. It contains
+ // the receiver type of this candidate, but `xform_self_ty`,
+ // `xform_ret_ty` and `kind` (which contains the predicates) have the
+ // generic parameters of this candidate substituted with the *same set*
+ // of inference variables, which acts as some weird sort of "query".
+ //
+ // When we check out a candidate, we require `xform_self_ty` to be
+ // a subtype of the passed-in self-type, and this equates the type
+ // variables in the rest of the fields.
+ //
+ // For example, if we have this candidate:
+ // ```
+ // trait Foo {
+ // fn foo(&self) where Self: Sized;
+ // }
+ // ```
+ //
+ // Then `xform_self_ty` will be `&'erased ?X` and `kind` will contain
+ // the predicate `?X: Sized`, so if we are evaluating `Foo` for a
+ // the receiver `&T`, we'll do the subtyping which will make `?X`
+ // get the right value, then when we evaluate the predicate we'll check
+ // if `T: Sized`.
+ xform_self_ty: Ty<'tcx>,
+ xform_ret_ty: Option<Ty<'tcx>>,
+ item: ty::AssocItem,
+ kind: CandidateKind<'tcx>,
+ import_ids: SmallVec<[LocalDefId; 1]>,
+}
+
+#[derive(Debug, Clone)]
+enum CandidateKind<'tcx> {
+ InherentImplCandidate(
+ SubstsRef<'tcx>,
+ // Normalize obligations
+ Vec<traits::PredicateObligation<'tcx>>,
+ ),
+ ObjectCandidate,
+ TraitCandidate(ty::TraitRef<'tcx>),
+ WhereClauseCandidate(
+ // Trait
+ ty::PolyTraitRef<'tcx>,
+ ),
+}
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+enum ProbeResult {
+ NoMatch,
+ BadReturnType,
+ Match,
+}
+
+/// When adjusting a receiver we often want to do one of
+///
+/// - Add a `&` (or `&mut`), converting the receiver from `T` to `&T` (or `&mut T`)
+/// - If the receiver has type `*mut T`, convert it to `*const T`
+///
+/// This type tells us which one to do.
+///
+/// Note that in principle we could do both at the same time. For example, when the receiver has
+/// type `T`, we could autoref it to `&T`, then convert to `*const T`. Or, when it has type `*mut
+/// T`, we could convert it to `*const T`, then autoref to `&*const T`. However, currently we do
+/// (at most) one of these. Either the receiver has type `T` and we convert it to `&T` (or with
+/// `mut`), or it has type `*mut T` and we convert it to `*const T`.
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum AutorefOrPtrAdjustment {
+ /// Receiver has type `T`, add `&` or `&mut` (it `T` is `mut`), and maybe also "unsize" it.
+ /// Unsizing is used to convert a `[T; N]` to `[T]`, which only makes sense when autorefing.
+ Autoref {
+ mutbl: hir::Mutability,
+
+ /// Indicates that the source expression should be "unsized" to a target type.
+ /// This is special-cased for just arrays unsizing to slices.
+ unsize: bool,
+ },
+ /// Receiver has type `*mut T`, convert to `*const T`
+ ToConstPtr,
+}
+
+impl AutorefOrPtrAdjustment {
+ fn get_unsize(&self) -> bool {
+ match self {
+ AutorefOrPtrAdjustment::Autoref { mutbl: _, unsize } => *unsize,
+ AutorefOrPtrAdjustment::ToConstPtr => false,
+ }
+ }
+}
+
+#[derive(Debug, PartialEq, Clone)]
+pub struct Pick<'tcx> {
+ pub item: ty::AssocItem,
+ pub kind: PickKind<'tcx>,
+ pub import_ids: SmallVec<[LocalDefId; 1]>,
+
+ /// Indicates that the source expression should be autoderef'd N times
+ /// ```ignore (not-rust)
+ /// A = expr | *expr | **expr | ...
+ /// ```
+ pub autoderefs: usize,
+
+ /// Indicates that we want to add an autoref (and maybe also unsize it), or if the receiver is
+ /// `*mut T`, convert it to `*const T`.
+ pub autoref_or_ptr_adjustment: Option<AutorefOrPtrAdjustment>,
+ pub self_ty: Ty<'tcx>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq)]
+pub enum PickKind<'tcx> {
+ InherentImplPick,
+ ObjectPick,
+ TraitPick,
+ WhereClausePick(
+ // Trait
+ ty::PolyTraitRef<'tcx>,
+ ),
+}
+
+pub type PickResult<'tcx> = Result<Pick<'tcx>, MethodError<'tcx>>;
+
+#[derive(PartialEq, Eq, Copy, Clone, Debug)]
+pub enum Mode {
+ // An expression of the form `receiver.method_name(...)`.
+ // Autoderefs are performed on `receiver`, lookup is done based on the
+ // `self` argument of the method, and static methods aren't considered.
+ MethodCall,
+ // An expression of the form `Type::item` or `<T>::item`.
+ // No autoderefs are performed, lookup is done based on the type each
+ // implementation is for, and static methods are included.
+ Path,
+}
+
+#[derive(PartialEq, Eq, Copy, Clone, Debug)]
+pub enum ProbeScope {
+ // Assemble candidates coming only from traits in scope.
+ TraitsInScope,
+
+ // Assemble candidates coming from all traits.
+ AllTraits,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// This is used to offer suggestions to users. It returns methods
+ /// that could have been called which have the desired return
+ /// type. Some effort is made to rule out methods that, if called,
+ /// would result in an error (basically, the same criteria we
+ /// would use to decide if a method is a plausible fit for
+ /// ambiguity purposes).
+ #[instrument(level = "debug", skip(self, scope_expr_id))]
+ pub fn probe_for_return_type(
+ &self,
+ span: Span,
+ mode: Mode,
+ return_type: Ty<'tcx>,
+ self_ty: Ty<'tcx>,
+ scope_expr_id: hir::HirId,
+ ) -> Vec<ty::AssocItem> {
+ debug!(
+ "probe(self_ty={:?}, return_type={}, scope_expr_id={})",
+ self_ty, return_type, scope_expr_id
+ );
+ let method_names = self
+ .probe_op(
+ span,
+ mode,
+ None,
+ Some(return_type),
+ IsSuggestion(true),
+ self_ty,
+ scope_expr_id,
+ ProbeScope::AllTraits,
+ |probe_cx| Ok(probe_cx.candidate_method_names()),
+ )
+ .unwrap_or_default();
+ method_names
+ .iter()
+ .flat_map(|&method_name| {
+ self.probe_op(
+ span,
+ mode,
+ Some(method_name),
+ Some(return_type),
+ IsSuggestion(true),
+ self_ty,
+ scope_expr_id,
+ ProbeScope::AllTraits,
+ |probe_cx| probe_cx.pick(),
+ )
+ .ok()
+ .map(|pick| pick.item)
+ })
+ .collect()
+ }
+
+ #[instrument(level = "debug", skip(self, scope_expr_id))]
+ pub fn probe_for_name(
+ &self,
+ span: Span,
+ mode: Mode,
+ item_name: Ident,
+ is_suggestion: IsSuggestion,
+ self_ty: Ty<'tcx>,
+ scope_expr_id: hir::HirId,
+ scope: ProbeScope,
+ ) -> PickResult<'tcx> {
+ debug!(
+ "probe(self_ty={:?}, item_name={}, scope_expr_id={})",
+ self_ty, item_name, scope_expr_id
+ );
+ self.probe_op(
+ span,
+ mode,
+ Some(item_name),
+ None,
+ is_suggestion,
+ self_ty,
+ scope_expr_id,
+ scope,
+ |probe_cx| probe_cx.pick(),
+ )
+ }
+
+ fn probe_op<OP, R>(
+ &'a self,
+ span: Span,
+ mode: Mode,
+ method_name: Option<Ident>,
+ return_type: Option<Ty<'tcx>>,
+ is_suggestion: IsSuggestion,
+ self_ty: Ty<'tcx>,
+ scope_expr_id: hir::HirId,
+ scope: ProbeScope,
+ op: OP,
+ ) -> Result<R, MethodError<'tcx>>
+ where
+ OP: FnOnce(ProbeContext<'a, 'tcx>) -> Result<R, MethodError<'tcx>>,
+ {
+ let mut orig_values = OriginalQueryValues::default();
+ let param_env_and_self_ty = self.canonicalize_query(
+ ParamEnvAnd { param_env: self.param_env, value: self_ty },
+ &mut orig_values,
+ );
+
+ let steps = if mode == Mode::MethodCall {
+ self.tcx.method_autoderef_steps(param_env_and_self_ty)
+ } else {
+ self.probe(|_| {
+ // Mode::Path - the deref steps is "trivial". This turns
+ // our CanonicalQuery into a "trivial" QueryResponse. This
+ // is a bit inefficient, but I don't think that writing
+ // special handling for this "trivial case" is a good idea.
+
+ let infcx = &self.infcx;
+ let (ParamEnvAnd { param_env: _, value: self_ty }, canonical_inference_vars) =
+ infcx.instantiate_canonical_with_fresh_inference_vars(
+ span,
+ &param_env_and_self_ty,
+ );
+ debug!(
+ "probe_op: Mode::Path, param_env_and_self_ty={:?} self_ty={:?}",
+ param_env_and_self_ty, self_ty
+ );
+ MethodAutoderefStepsResult {
+ steps: infcx.tcx.arena.alloc_from_iter([CandidateStep {
+ self_ty: self.make_query_response_ignoring_pending_obligations(
+ canonical_inference_vars,
+ self_ty,
+ ),
+ autoderefs: 0,
+ from_unsafe_deref: false,
+ unsize: false,
+ }]),
+ opt_bad_ty: None,
+ reached_recursion_limit: false,
+ }
+ })
+ };
+
+ // If our autoderef loop had reached the recursion limit,
+ // report an overflow error, but continue going on with
+ // the truncated autoderef list.
+ if steps.reached_recursion_limit {
+ self.probe(|_| {
+ let ty = &steps
+ .steps
+ .last()
+ .unwrap_or_else(|| span_bug!(span, "reached the recursion limit in 0 steps?"))
+ .self_ty;
+ let ty = self
+ .probe_instantiate_query_response(span, &orig_values, ty)
+ .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty));
+ autoderef::report_autoderef_recursion_limit_error(self.tcx, span, ty.value);
+ });
+ }
+
+ // If we encountered an `_` type or an error type during autoderef, this is
+ // ambiguous.
+ if let Some(bad_ty) = &steps.opt_bad_ty {
+ if is_suggestion.0 {
+ // Ambiguity was encountered during a suggestion. Just keep going.
+ debug!("ProbeContext: encountered ambiguity in suggestion");
+ } else if bad_ty.reached_raw_pointer && !self.tcx.features().arbitrary_self_types {
+ // this case used to be allowed by the compiler,
+ // so we do a future-compat lint here for the 2015 edition
+ // (see https://github.com/rust-lang/rust/issues/46906)
+ if self.tcx.sess.rust_2018() {
+ self.tcx.sess.emit_err(MethodCallOnUnknownType { span });
+ } else {
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::TYVAR_BEHIND_RAW_POINTER,
+ scope_expr_id,
+ span,
+ |lint| {
+ lint.build("type annotations needed").emit();
+ },
+ );
+ }
+ } else {
+ // Encountered a real ambiguity, so abort the lookup. If `ty` is not
+ // an `Err`, report the right "type annotations needed" error pointing
+ // to it.
+ let ty = &bad_ty.ty;
+ let ty = self
+ .probe_instantiate_query_response(span, &orig_values, ty)
+ .unwrap_or_else(|_| span_bug!(span, "instantiating {:?} failed?", ty));
+ let ty = self.structurally_resolved_type(span, ty.value);
+ assert!(matches!(ty.kind(), ty::Error(_)));
+ return Err(MethodError::NoMatch(NoMatchData {
+ static_candidates: Vec::new(),
+ unsatisfied_predicates: Vec::new(),
+ out_of_scope_traits: Vec::new(),
+ lev_candidate: None,
+ mode,
+ }));
+ }
+ }
+
+ debug!("ProbeContext: steps for self_ty={:?} are {:?}", self_ty, steps);
+
+ // this creates one big transaction so that all type variables etc
+ // that we create during the probe process are removed later
+ self.probe(|_| {
+ let mut probe_cx = ProbeContext::new(
+ self,
+ span,
+ mode,
+ method_name,
+ return_type,
+ orig_values,
+ steps.steps,
+ is_suggestion,
+ scope_expr_id,
+ );
+
+ probe_cx.assemble_inherent_candidates();
+ match scope {
+ ProbeScope::TraitsInScope => {
+ probe_cx.assemble_extension_candidates_for_traits_in_scope(scope_expr_id)
+ }
+ ProbeScope::AllTraits => probe_cx.assemble_extension_candidates_for_all_traits(),
+ };
+ op(probe_cx)
+ })
+ }
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ providers.method_autoderef_steps = method_autoderef_steps;
+}
+
+fn method_autoderef_steps<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ goal: CanonicalTyGoal<'tcx>,
+) -> MethodAutoderefStepsResult<'tcx> {
+ debug!("method_autoderef_steps({:?})", goal);
+
+ tcx.infer_ctxt().enter_with_canonical(DUMMY_SP, &goal, |ref infcx, goal, inference_vars| {
+ let ParamEnvAnd { param_env, value: self_ty } = goal;
+
+ let mut autoderef =
+ Autoderef::new(infcx, param_env, hir::CRATE_HIR_ID, DUMMY_SP, self_ty, DUMMY_SP)
+ .include_raw_pointers()
+ .silence_errors();
+ let mut reached_raw_pointer = false;
+ let mut steps: Vec<_> = autoderef
+ .by_ref()
+ .map(|(ty, d)| {
+ let step = CandidateStep {
+ self_ty: infcx.make_query_response_ignoring_pending_obligations(
+ inference_vars.clone(),
+ ty,
+ ),
+ autoderefs: d,
+ from_unsafe_deref: reached_raw_pointer,
+ unsize: false,
+ };
+ if let ty::RawPtr(_) = ty.kind() {
+ // all the subsequent steps will be from_unsafe_deref
+ reached_raw_pointer = true;
+ }
+ step
+ })
+ .collect();
+
+ let final_ty = autoderef.final_ty(true);
+ let opt_bad_ty = match final_ty.kind() {
+ ty::Infer(ty::TyVar(_)) | ty::Error(_) => Some(MethodAutoderefBadTy {
+ reached_raw_pointer,
+ ty: infcx
+ .make_query_response_ignoring_pending_obligations(inference_vars, final_ty),
+ }),
+ ty::Array(elem_ty, _) => {
+ let dereferences = steps.len() - 1;
+
+ steps.push(CandidateStep {
+ self_ty: infcx.make_query_response_ignoring_pending_obligations(
+ inference_vars,
+ infcx.tcx.mk_slice(*elem_ty),
+ ),
+ autoderefs: dereferences,
+ // this could be from an unsafe deref if we had
+ // a *mut/const [T; N]
+ from_unsafe_deref: reached_raw_pointer,
+ unsize: true,
+ });
+
+ None
+ }
+ _ => None,
+ };
+
+ debug!("method_autoderef_steps: steps={:?} opt_bad_ty={:?}", steps, opt_bad_ty);
+
+ MethodAutoderefStepsResult {
+ steps: tcx.arena.alloc_from_iter(steps),
+ opt_bad_ty: opt_bad_ty.map(|ty| &*tcx.arena.alloc(ty)),
+ reached_recursion_limit: autoderef.reached_recursion_limit(),
+ }
+ })
+}
+
+impl<'a, 'tcx> ProbeContext<'a, 'tcx> {
+ fn new(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ span: Span,
+ mode: Mode,
+ method_name: Option<Ident>,
+ return_type: Option<Ty<'tcx>>,
+ orig_steps_var_values: OriginalQueryValues<'tcx>,
+ steps: &'tcx [CandidateStep<'tcx>],
+ is_suggestion: IsSuggestion,
+ scope_expr_id: hir::HirId,
+ ) -> ProbeContext<'a, 'tcx> {
+ ProbeContext {
+ fcx,
+ span,
+ mode,
+ method_name,
+ return_type,
+ inherent_candidates: Vec::new(),
+ extension_candidates: Vec::new(),
+ impl_dups: FxHashSet::default(),
+ orig_steps_var_values,
+ steps,
+ static_candidates: Vec::new(),
+ allow_similar_names: false,
+ private_candidate: None,
+ unsatisfied_predicates: Vec::new(),
+ is_suggestion,
+ scope_expr_id,
+ }
+ }
+
+ fn reset(&mut self) {
+ self.inherent_candidates.clear();
+ self.extension_candidates.clear();
+ self.impl_dups.clear();
+ self.static_candidates.clear();
+ self.private_candidate = None;
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // CANDIDATE ASSEMBLY
+
+ fn push_candidate(&mut self, candidate: Candidate<'tcx>, is_inherent: bool) {
+ let is_accessible = if let Some(name) = self.method_name {
+ let item = candidate.item;
+ let def_scope = self
+ .tcx
+ .adjust_ident_and_get_scope(name, item.container_id(self.tcx), self.body_id)
+ .1;
+ item.visibility(self.tcx).is_accessible_from(def_scope, self.tcx)
+ } else {
+ true
+ };
+ if is_accessible {
+ if is_inherent {
+ self.inherent_candidates.push(candidate);
+ } else {
+ self.extension_candidates.push(candidate);
+ }
+ } else if self.private_candidate.is_none() {
+ self.private_candidate =
+ Some((candidate.item.kind.as_def_kind(), candidate.item.def_id));
+ }
+ }
+
+ fn assemble_inherent_candidates(&mut self) {
+ for step in self.steps.iter() {
+ self.assemble_probe(&step.self_ty);
+ }
+ }
+
+ fn assemble_probe(&mut self, self_ty: &Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>) {
+ debug!("assemble_probe: self_ty={:?}", self_ty);
+ let raw_self_ty = self_ty.value.value;
+ match *raw_self_ty.kind() {
+ ty::Dynamic(data, ..) if let Some(p) = data.principal() => {
+ // Subtle: we can't use `instantiate_query_response` here: using it will
+ // commit to all of the type equalities assumed by inference going through
+ // autoderef (see the `method-probe-no-guessing` test).
+ //
+ // However, in this code, it is OK if we end up with an object type that is
+ // "more general" than the object type that we are evaluating. For *every*
+ // object type `MY_OBJECT`, a function call that goes through a trait-ref
+ // of the form `<MY_OBJECT as SuperTraitOf(MY_OBJECT)>::func` is a valid
+ // `ObjectCandidate`, and it should be discoverable "exactly" through one
+ // of the iterations in the autoderef loop, so there is no problem with it
+ // being discoverable in another one of these iterations.
+ //
+ // Using `instantiate_canonical_with_fresh_inference_vars` on our
+ // `Canonical<QueryResponse<Ty<'tcx>>>` and then *throwing away* the
+ // `CanonicalVarValues` will exactly give us such a generalization - it
+ // will still match the original object type, but it won't pollute our
+ // type variables in any form, so just do that!
+ let (QueryResponse { value: generalized_self_ty, .. }, _ignored_var_values) =
+ self.fcx
+ .instantiate_canonical_with_fresh_inference_vars(self.span, self_ty);
+
+ self.assemble_inherent_candidates_from_object(generalized_self_ty);
+ self.assemble_inherent_impl_candidates_for_type(p.def_id());
+ if self.tcx.has_attr(p.def_id(), sym::rustc_has_incoherent_inherent_impls) {
+ self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty);
+ }
+ }
+ ty::Adt(def, _) => {
+ let def_id = def.did();
+ self.assemble_inherent_impl_candidates_for_type(def_id);
+ if self.tcx.has_attr(def_id, sym::rustc_has_incoherent_inherent_impls) {
+ self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty);
+ }
+ }
+ ty::Foreign(did) => {
+ self.assemble_inherent_impl_candidates_for_type(did);
+ if self.tcx.has_attr(did, sym::rustc_has_incoherent_inherent_impls) {
+ self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty);
+ }
+ }
+ ty::Param(p) => {
+ self.assemble_inherent_candidates_from_param(p);
+ }
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::Never
+ | ty::Tuple(..) => self.assemble_inherent_candidates_for_incoherent_ty(raw_self_ty),
+ _ => {}
+ }
+ }
+
+ fn assemble_inherent_candidates_for_incoherent_ty(&mut self, self_ty: Ty<'tcx>) {
+ let Some(simp) = simplify_type(self.tcx, self_ty, TreatParams::AsInfer) else {
+ bug!("unexpected incoherent type: {:?}", self_ty)
+ };
+ for &impl_def_id in self.tcx.incoherent_impls(simp) {
+ self.assemble_inherent_impl_probe(impl_def_id);
+ }
+ }
+
+ fn assemble_inherent_impl_candidates_for_type(&mut self, def_id: DefId) {
+ let impl_def_ids = self.tcx.at(self.span).inherent_impls(def_id);
+ for &impl_def_id in impl_def_ids.iter() {
+ self.assemble_inherent_impl_probe(impl_def_id);
+ }
+ }
+
+ fn assemble_inherent_impl_probe(&mut self, impl_def_id: DefId) {
+ if !self.impl_dups.insert(impl_def_id) {
+ return; // already visited
+ }
+
+ debug!("assemble_inherent_impl_probe {:?}", impl_def_id);
+
+ for item in self.impl_or_trait_item(impl_def_id) {
+ if !self.has_applicable_self(&item) {
+ // No receiver declared. Not a candidate.
+ self.record_static_candidate(CandidateSource::Impl(impl_def_id));
+ continue;
+ }
+
+ let (impl_ty, impl_substs) = self.impl_ty_and_substs(impl_def_id);
+ let impl_ty = impl_ty.subst(self.tcx, impl_substs);
+
+ debug!("impl_ty: {:?}", impl_ty);
+
+ // Determine the receiver type that the method itself expects.
+ let (xform_self_ty, xform_ret_ty) = self.xform_self_ty(&item, impl_ty, impl_substs);
+ debug!("xform_self_ty: {:?}, xform_ret_ty: {:?}", xform_self_ty, xform_ret_ty);
+
+ // We can't use normalize_associated_types_in as it will pollute the
+ // fcx's fulfillment context after this probe is over.
+ // Note: we only normalize `xform_self_ty` here since the normalization
+ // of the return type can lead to inference results that prohibit
+ // valid candidates from being found, see issue #85671
+ // FIXME Postponing the normalization of the return type likely only hides a deeper bug,
+ // which might be caused by the `param_env` itself. The clauses of the `param_env`
+ // maybe shouldn't include `Param`s, but rather fresh variables or be canonicalized,
+ // see issue #89650
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+ let selcx = &mut traits::SelectionContext::new(self.fcx);
+ let traits::Normalized { value: xform_self_ty, obligations } =
+ traits::normalize(selcx, self.param_env, cause, xform_self_ty);
+ debug!(
+ "assemble_inherent_impl_probe after normalization: xform_self_ty = {:?}/{:?}",
+ xform_self_ty, xform_ret_ty
+ );
+
+ self.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ kind: InherentImplCandidate(impl_substs, obligations),
+ import_ids: smallvec![],
+ },
+ true,
+ );
+ }
+ }
+
+ fn assemble_inherent_candidates_from_object(&mut self, self_ty: Ty<'tcx>) {
+ debug!("assemble_inherent_candidates_from_object(self_ty={:?})", self_ty);
+
+ let principal = match self_ty.kind() {
+ ty::Dynamic(ref data, ..) => Some(data),
+ _ => None,
+ }
+ .and_then(|data| data.principal())
+ .unwrap_or_else(|| {
+ span_bug!(
+ self.span,
+ "non-object {:?} in assemble_inherent_candidates_from_object",
+ self_ty
+ )
+ });
+
+ // It is illegal to invoke a method on a trait instance that refers to
+ // the `Self` type. An [`ObjectSafetyViolation::SupertraitSelf`] error
+ // will be reported by `object_safety.rs` if the method refers to the
+ // `Self` type anywhere other than the receiver. Here, we use a
+ // substitution that replaces `Self` with the object type itself. Hence,
+ // a `&self` method will wind up with an argument type like `&dyn Trait`.
+ let trait_ref = principal.with_self_ty(self.tcx, self_ty);
+ self.elaborate_bounds(iter::once(trait_ref), |this, new_trait_ref, item| {
+ let new_trait_ref = this.erase_late_bound_regions(new_trait_ref);
+
+ let (xform_self_ty, xform_ret_ty) =
+ this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs);
+ this.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ kind: ObjectCandidate,
+ import_ids: smallvec![],
+ },
+ true,
+ );
+ });
+ }
+
+ fn assemble_inherent_candidates_from_param(&mut self, param_ty: ty::ParamTy) {
+ // FIXME: do we want to commit to this behavior for param bounds?
+ debug!("assemble_inherent_candidates_from_param(param_ty={:?})", param_ty);
+
+ let bounds = self.param_env.caller_bounds().iter().filter_map(|predicate| {
+ let bound_predicate = predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(trait_predicate) => {
+ match *trait_predicate.trait_ref.self_ty().kind() {
+ ty::Param(p) if p == param_ty => {
+ Some(bound_predicate.rebind(trait_predicate.trait_ref))
+ }
+ _ => None,
+ }
+ }
+ ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::RegionOutlives(..)
+ | ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::TypeOutlives(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ });
+
+ self.elaborate_bounds(bounds, |this, poly_trait_ref, item| {
+ let trait_ref = this.erase_late_bound_regions(poly_trait_ref);
+
+ let (xform_self_ty, xform_ret_ty) =
+ this.xform_self_ty(&item, trait_ref.self_ty(), trait_ref.substs);
+
+ // Because this trait derives from a where-clause, it
+ // should not contain any inference variables or other
+ // artifacts. This means it is safe to put into the
+ // `WhereClauseCandidate` and (eventually) into the
+ // `WhereClausePick`.
+ assert!(!trait_ref.substs.needs_infer());
+
+ this.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ kind: WhereClauseCandidate(poly_trait_ref),
+ import_ids: smallvec![],
+ },
+ true,
+ );
+ });
+ }
+
+ // Do a search through a list of bounds, using a callback to actually
+ // create the candidates.
+ fn elaborate_bounds<F>(
+ &mut self,
+ bounds: impl Iterator<Item = ty::PolyTraitRef<'tcx>>,
+ mut mk_cand: F,
+ ) where
+ F: for<'b> FnMut(&mut ProbeContext<'b, 'tcx>, ty::PolyTraitRef<'tcx>, ty::AssocItem),
+ {
+ let tcx = self.tcx;
+ for bound_trait_ref in traits::transitive_bounds(tcx, bounds) {
+ debug!("elaborate_bounds(bound_trait_ref={:?})", bound_trait_ref);
+ for item in self.impl_or_trait_item(bound_trait_ref.def_id()) {
+ if !self.has_applicable_self(&item) {
+ self.record_static_candidate(CandidateSource::Trait(bound_trait_ref.def_id()));
+ } else {
+ mk_cand(self, bound_trait_ref, item);
+ }
+ }
+ }
+ }
+
+ fn assemble_extension_candidates_for_traits_in_scope(&mut self, expr_hir_id: hir::HirId) {
+ let mut duplicates = FxHashSet::default();
+ let opt_applicable_traits = self.tcx.in_scope_traits(expr_hir_id);
+ if let Some(applicable_traits) = opt_applicable_traits {
+ for trait_candidate in applicable_traits.iter() {
+ let trait_did = trait_candidate.def_id;
+ if duplicates.insert(trait_did) {
+ self.assemble_extension_candidates_for_trait(
+ &trait_candidate.import_ids,
+ trait_did,
+ );
+ }
+ }
+ }
+ }
+
+ fn assemble_extension_candidates_for_all_traits(&mut self) {
+ let mut duplicates = FxHashSet::default();
+ for trait_info in suggest::all_traits(self.tcx) {
+ if duplicates.insert(trait_info.def_id) {
+ self.assemble_extension_candidates_for_trait(&smallvec![], trait_info.def_id);
+ }
+ }
+ }
+
+ pub fn matches_return_type(
+ &self,
+ method: &ty::AssocItem,
+ self_ty: Option<Ty<'tcx>>,
+ expected: Ty<'tcx>,
+ ) -> bool {
+ match method.kind {
+ ty::AssocKind::Fn => {
+ let fty = self.tcx.bound_fn_sig(method.def_id);
+ self.probe(|_| {
+ let substs = self.fresh_substs_for_item(self.span, method.def_id);
+ let fty = fty.subst(self.tcx, substs);
+ let fty =
+ self.replace_bound_vars_with_fresh_vars(self.span, infer::FnCall, fty);
+
+ if let Some(self_ty) = self_ty {
+ if self
+ .at(&ObligationCause::dummy(), self.param_env)
+ .sup(fty.inputs()[0], self_ty)
+ .is_err()
+ {
+ return false;
+ }
+ }
+ self.can_sub(self.param_env, fty.output(), expected).is_ok()
+ })
+ }
+ _ => false,
+ }
+ }
+
+ fn assemble_extension_candidates_for_trait(
+ &mut self,
+ import_ids: &SmallVec<[LocalDefId; 1]>,
+ trait_def_id: DefId,
+ ) {
+ debug!("assemble_extension_candidates_for_trait(trait_def_id={:?})", trait_def_id);
+ let trait_substs = self.fresh_item_substs(trait_def_id);
+ let trait_ref = ty::TraitRef::new(trait_def_id, trait_substs);
+
+ if self.tcx.is_trait_alias(trait_def_id) {
+ // For trait aliases, assume all supertraits are relevant.
+ let bounds = iter::once(ty::Binder::dummy(trait_ref));
+ self.elaborate_bounds(bounds, |this, new_trait_ref, item| {
+ let new_trait_ref = this.erase_late_bound_regions(new_trait_ref);
+
+ let (xform_self_ty, xform_ret_ty) =
+ this.xform_self_ty(&item, new_trait_ref.self_ty(), new_trait_ref.substs);
+ this.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ import_ids: import_ids.clone(),
+ kind: TraitCandidate(new_trait_ref),
+ },
+ false,
+ );
+ });
+ } else {
+ debug_assert!(self.tcx.is_trait(trait_def_id));
+ for item in self.impl_or_trait_item(trait_def_id) {
+ // Check whether `trait_def_id` defines a method with suitable name.
+ if !self.has_applicable_self(&item) {
+ debug!("method has inapplicable self");
+ self.record_static_candidate(CandidateSource::Trait(trait_def_id));
+ continue;
+ }
+
+ let (xform_self_ty, xform_ret_ty) =
+ self.xform_self_ty(&item, trait_ref.self_ty(), trait_substs);
+ self.push_candidate(
+ Candidate {
+ xform_self_ty,
+ xform_ret_ty,
+ item,
+ import_ids: import_ids.clone(),
+ kind: TraitCandidate(trait_ref),
+ },
+ false,
+ );
+ }
+ }
+ }
+
+ fn candidate_method_names(&self) -> Vec<Ident> {
+ let mut set = FxHashSet::default();
+ let mut names: Vec<_> = self
+ .inherent_candidates
+ .iter()
+ .chain(&self.extension_candidates)
+ .filter(|candidate| {
+ if let Some(return_ty) = self.return_type {
+ self.matches_return_type(&candidate.item, None, return_ty)
+ } else {
+ true
+ }
+ })
+ .map(|candidate| candidate.item.ident(self.tcx))
+ .filter(|&name| set.insert(name))
+ .collect();
+
+ // Sort them by the name so we have a stable result.
+ names.sort_by(|a, b| a.as_str().partial_cmp(b.as_str()).unwrap());
+ names
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // THE ACTUAL SEARCH
+
+ fn pick(mut self) -> PickResult<'tcx> {
+ assert!(self.method_name.is_some());
+
+ if let Some(r) = self.pick_core() {
+ return r;
+ }
+
+ debug!("pick: actual search failed, assemble diagnostics");
+
+ let static_candidates = mem::take(&mut self.static_candidates);
+ let private_candidate = self.private_candidate.take();
+ let unsatisfied_predicates = mem::take(&mut self.unsatisfied_predicates);
+
+ // things failed, so lets look at all traits, for diagnostic purposes now:
+ self.reset();
+
+ let span = self.span;
+ let tcx = self.tcx;
+
+ self.assemble_extension_candidates_for_all_traits();
+
+ let out_of_scope_traits = match self.pick_core() {
+ Some(Ok(p)) => vec![p.item.container_id(self.tcx)],
+ //Some(Ok(p)) => p.iter().map(|p| p.item.container().id()).collect(),
+ Some(Err(MethodError::Ambiguity(v))) => v
+ .into_iter()
+ .map(|source| match source {
+ CandidateSource::Trait(id) => id,
+ CandidateSource::Impl(impl_id) => match tcx.trait_id_of_impl(impl_id) {
+ Some(id) => id,
+ None => span_bug!(span, "found inherent method when looking at traits"),
+ },
+ })
+ .collect(),
+ Some(Err(MethodError::NoMatch(NoMatchData {
+ out_of_scope_traits: others, ..
+ }))) => {
+ assert!(others.is_empty());
+ vec![]
+ }
+ _ => vec![],
+ };
+
+ if let Some((kind, def_id)) = private_candidate {
+ return Err(MethodError::PrivateMatch(kind, def_id, out_of_scope_traits));
+ }
+ let lev_candidate = self.probe_for_lev_candidate()?;
+
+ Err(MethodError::NoMatch(NoMatchData {
+ static_candidates,
+ unsatisfied_predicates,
+ out_of_scope_traits,
+ lev_candidate,
+ mode: self.mode,
+ }))
+ }
+
+ fn pick_core(&mut self) -> Option<PickResult<'tcx>> {
+ let mut unstable_candidates = Vec::new();
+ let pick = self.pick_all_method(Some(&mut unstable_candidates));
+
+ // In this case unstable picking is done by `pick_method`.
+ if !self.tcx.sess.opts.unstable_opts.pick_stable_methods_before_any_unstable {
+ return pick;
+ }
+
+ match pick {
+ // Emit a lint if there are unstable candidates alongside the stable ones.
+ //
+ // We suppress warning if we're picking the method only because it is a
+ // suggestion.
+ Some(Ok(ref p)) if !self.is_suggestion.0 && !unstable_candidates.is_empty() => {
+ self.emit_unstable_name_collision_hint(p, &unstable_candidates);
+ pick
+ }
+ Some(_) => pick,
+ None => self.pick_all_method(None),
+ }
+ }
+
+ fn pick_all_method(
+ &mut self,
+ mut unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ let steps = self.steps.clone();
+ steps
+ .iter()
+ .filter(|step| {
+ debug!("pick_all_method: step={:?}", step);
+ // skip types that are from a type error or that would require dereferencing
+ // a raw pointer
+ !step.self_ty.references_error() && !step.from_unsafe_deref
+ })
+ .flat_map(|step| {
+ let InferOk { value: self_ty, obligations: _ } = self
+ .fcx
+ .probe_instantiate_query_response(
+ self.span,
+ &self.orig_steps_var_values,
+ &step.self_ty,
+ )
+ .unwrap_or_else(|_| {
+ span_bug!(self.span, "{:?} was applicable but now isn't?", step.self_ty)
+ });
+ self.pick_by_value_method(step, self_ty, unstable_candidates.as_deref_mut())
+ .or_else(|| {
+ self.pick_autorefd_method(
+ step,
+ self_ty,
+ hir::Mutability::Not,
+ unstable_candidates.as_deref_mut(),
+ )
+ .or_else(|| {
+ self.pick_autorefd_method(
+ step,
+ self_ty,
+ hir::Mutability::Mut,
+ unstable_candidates.as_deref_mut(),
+ )
+ })
+ .or_else(|| {
+ self.pick_const_ptr_method(
+ step,
+ self_ty,
+ unstable_candidates.as_deref_mut(),
+ )
+ })
+ })
+ })
+ .next()
+ }
+
+ /// For each type `T` in the step list, this attempts to find a method where
+ /// the (transformed) self type is exactly `T`. We do however do one
+ /// transformation on the adjustment: if we are passing a region pointer in,
+ /// we will potentially *reborrow* it to a shorter lifetime. This allows us
+ /// to transparently pass `&mut` pointers, in particular, without consuming
+ /// them for their entire lifetime.
+ fn pick_by_value_method(
+ &mut self,
+ step: &CandidateStep<'tcx>,
+ self_ty: Ty<'tcx>,
+ unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ if step.unsize {
+ return None;
+ }
+
+ self.pick_method(self_ty, unstable_candidates).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
+
+ // Insert a `&*` or `&mut *` if this is a reference type:
+ if let ty::Ref(_, _, mutbl) = *step.self_ty.value.value.kind() {
+ pick.autoderefs += 1;
+ pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::Autoref {
+ mutbl,
+ unsize: pick.autoref_or_ptr_adjustment.map_or(false, |a| a.get_unsize()),
+ })
+ }
+
+ pick
+ })
+ })
+ }
+
+ fn pick_autorefd_method(
+ &mut self,
+ step: &CandidateStep<'tcx>,
+ self_ty: Ty<'tcx>,
+ mutbl: hir::Mutability,
+ unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ let tcx = self.tcx;
+
+ // In general, during probing we erase regions.
+ let region = tcx.lifetimes.re_erased;
+
+ let autoref_ty = tcx.mk_ref(region, ty::TypeAndMut { ty: self_ty, mutbl });
+ self.pick_method(autoref_ty, unstable_candidates).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
+ pick.autoref_or_ptr_adjustment =
+ Some(AutorefOrPtrAdjustment::Autoref { mutbl, unsize: step.unsize });
+ pick
+ })
+ })
+ }
+
+ /// If `self_ty` is `*mut T` then this picks `*const T` methods. The reason why we have a
+ /// special case for this is because going from `*mut T` to `*const T` with autoderefs and
+ /// autorefs would require dereferencing the pointer, which is not safe.
+ fn pick_const_ptr_method(
+ &mut self,
+ step: &CandidateStep<'tcx>,
+ self_ty: Ty<'tcx>,
+ unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ // Don't convert an unsized reference to ptr
+ if step.unsize {
+ return None;
+ }
+
+ let &ty::RawPtr(ty::TypeAndMut { ty, mutbl: hir::Mutability::Mut }) = self_ty.kind() else {
+ return None;
+ };
+
+ let const_self_ty = ty::TypeAndMut { ty, mutbl: hir::Mutability::Not };
+ let const_ptr_ty = self.tcx.mk_ptr(const_self_ty);
+ self.pick_method(const_ptr_ty, unstable_candidates).map(|r| {
+ r.map(|mut pick| {
+ pick.autoderefs = step.autoderefs;
+ pick.autoref_or_ptr_adjustment = Some(AutorefOrPtrAdjustment::ToConstPtr);
+ pick
+ })
+ })
+ }
+
+ fn pick_method_with_unstable(&mut self, self_ty: Ty<'tcx>) -> Option<PickResult<'tcx>> {
+ debug!("pick_method_with_unstable(self_ty={})", self.ty_to_string(self_ty));
+
+ let mut possibly_unsatisfied_predicates = Vec::new();
+ let mut unstable_candidates = Vec::new();
+
+ for (kind, candidates) in
+ &[("inherent", &self.inherent_candidates), ("extension", &self.extension_candidates)]
+ {
+ debug!("searching {} candidates", kind);
+ let res = self.consider_candidates(
+ self_ty,
+ candidates.iter(),
+ &mut possibly_unsatisfied_predicates,
+ Some(&mut unstable_candidates),
+ );
+ if let Some(pick) = res {
+ if !self.is_suggestion.0 && !unstable_candidates.is_empty() {
+ if let Ok(p) = &pick {
+ // Emit a lint if there are unstable candidates alongside the stable ones.
+ //
+ // We suppress warning if we're picking the method only because it is a
+ // suggestion.
+ self.emit_unstable_name_collision_hint(p, &unstable_candidates);
+ }
+ }
+ return Some(pick);
+ }
+ }
+
+ debug!("searching unstable candidates");
+ let res = self.consider_candidates(
+ self_ty,
+ unstable_candidates.iter().map(|(c, _)| c),
+ &mut possibly_unsatisfied_predicates,
+ None,
+ );
+ if res.is_none() {
+ self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates);
+ }
+ res
+ }
+
+ fn pick_method(
+ &mut self,
+ self_ty: Ty<'tcx>,
+ mut unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>> {
+ if !self.tcx.sess.opts.unstable_opts.pick_stable_methods_before_any_unstable {
+ return self.pick_method_with_unstable(self_ty);
+ }
+
+ debug!("pick_method(self_ty={})", self.ty_to_string(self_ty));
+
+ let mut possibly_unsatisfied_predicates = Vec::new();
+
+ for (kind, candidates) in
+ &[("inherent", &self.inherent_candidates), ("extension", &self.extension_candidates)]
+ {
+ debug!("searching {} candidates", kind);
+ let res = self.consider_candidates(
+ self_ty,
+ candidates.iter(),
+ &mut possibly_unsatisfied_predicates,
+ unstable_candidates.as_deref_mut(),
+ );
+ if let Some(pick) = res {
+ return Some(pick);
+ }
+ }
+
+ // `pick_method` may be called twice for the same self_ty if no stable methods
+ // match. Only extend once.
+ if unstable_candidates.is_some() {
+ self.unsatisfied_predicates.extend(possibly_unsatisfied_predicates);
+ }
+ None
+ }
+
+ fn consider_candidates<'b, ProbesIter>(
+ &self,
+ self_ty: Ty<'tcx>,
+ probes: ProbesIter,
+ possibly_unsatisfied_predicates: &mut Vec<(
+ ty::Predicate<'tcx>,
+ Option<ty::Predicate<'tcx>>,
+ Option<ObligationCause<'tcx>>,
+ )>,
+ unstable_candidates: Option<&mut Vec<(Candidate<'tcx>, Symbol)>>,
+ ) -> Option<PickResult<'tcx>>
+ where
+ ProbesIter: Iterator<Item = &'b Candidate<'tcx>> + Clone,
+ 'tcx: 'b,
+ {
+ let mut applicable_candidates: Vec<_> = probes
+ .clone()
+ .map(|probe| {
+ (probe, self.consider_probe(self_ty, probe, possibly_unsatisfied_predicates))
+ })
+ .filter(|&(_, status)| status != ProbeResult::NoMatch)
+ .collect();
+
+ debug!("applicable_candidates: {:?}", applicable_candidates);
+
+ if applicable_candidates.len() > 1 {
+ if let Some(pick) =
+ self.collapse_candidates_to_trait_pick(self_ty, &applicable_candidates)
+ {
+ return Some(Ok(pick));
+ }
+ }
+
+ if let Some(uc) = unstable_candidates {
+ applicable_candidates.retain(|&(p, _)| {
+ if let stability::EvalResult::Deny { feature, .. } =
+ self.tcx.eval_stability(p.item.def_id, None, self.span, None)
+ {
+ uc.push((p.clone(), feature));
+ return false;
+ }
+ true
+ });
+ }
+
+ if applicable_candidates.len() > 1 {
+ let sources = probes.map(|p| self.candidate_source(p, self_ty)).collect();
+ return Some(Err(MethodError::Ambiguity(sources)));
+ }
+
+ applicable_candidates.pop().map(|(probe, status)| {
+ if status == ProbeResult::Match {
+ Ok(probe.to_unadjusted_pick(self_ty))
+ } else {
+ Err(MethodError::BadReturnType)
+ }
+ })
+ }
+
+ fn emit_unstable_name_collision_hint(
+ &self,
+ stable_pick: &Pick<'_>,
+ unstable_candidates: &[(Candidate<'tcx>, Symbol)],
+ ) {
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::UNSTABLE_NAME_COLLISIONS,
+ self.scope_expr_id,
+ self.span,
+ |lint| {
+ let def_kind = stable_pick.item.kind.as_def_kind();
+ let mut diag = lint.build(&format!(
+ "{} {} with this name may be added to the standard library in the future",
+ def_kind.article(),
+ def_kind.descr(stable_pick.item.def_id),
+ ));
+ match (stable_pick.item.kind, stable_pick.item.container) {
+ (ty::AssocKind::Fn, _) => {
+ // FIXME: This should be a `span_suggestion` instead of `help`
+ // However `self.span` only
+ // highlights the method name, so we can't use it. Also consider reusing
+ // the code from `report_method_error()`.
+ diag.help(&format!(
+ "call with fully qualified syntax `{}(...)` to keep using the current \
+ method",
+ self.tcx.def_path_str(stable_pick.item.def_id),
+ ));
+ }
+ (ty::AssocKind::Const, ty::AssocItemContainer::TraitContainer) => {
+ let def_id = stable_pick.item.container_id(self.tcx);
+ diag.span_suggestion(
+ self.span,
+ "use the fully qualified path to the associated const",
+ format!(
+ "<{} as {}>::{}",
+ stable_pick.self_ty,
+ self.tcx.def_path_str(def_id),
+ stable_pick.item.name
+ ),
+ Applicability::MachineApplicable,
+ );
+ }
+ _ => {}
+ }
+ if self.tcx.sess.is_nightly_build() {
+ for (candidate, feature) in unstable_candidates {
+ diag.help(&format!(
+ "add `#![feature({})]` to the crate attributes to enable `{}`",
+ feature,
+ self.tcx.def_path_str(candidate.item.def_id),
+ ));
+ }
+ }
+
+ diag.emit();
+ },
+ );
+ }
+
+ fn select_trait_candidate(
+ &self,
+ trait_ref: ty::TraitRef<'tcx>,
+ ) -> traits::SelectionResult<'tcx, traits::Selection<'tcx>> {
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+ let predicate = ty::Binder::dummy(trait_ref).to_poly_trait_predicate();
+ let obligation = traits::Obligation::new(cause, self.param_env, predicate);
+ traits::SelectionContext::new(self).select(&obligation)
+ }
+
+ fn candidate_source(&self, candidate: &Candidate<'tcx>, self_ty: Ty<'tcx>) -> CandidateSource {
+ match candidate.kind {
+ InherentImplCandidate(..) => {
+ CandidateSource::Impl(candidate.item.container_id(self.tcx))
+ }
+ ObjectCandidate | WhereClauseCandidate(_) => {
+ CandidateSource::Trait(candidate.item.container_id(self.tcx))
+ }
+ TraitCandidate(trait_ref) => self.probe(|_| {
+ let _ = self
+ .at(&ObligationCause::dummy(), self.param_env)
+ .define_opaque_types(false)
+ .sup(candidate.xform_self_ty, self_ty);
+ match self.select_trait_candidate(trait_ref) {
+ Ok(Some(traits::ImplSource::UserDefined(ref impl_data))) => {
+ // If only a single impl matches, make the error message point
+ // to that impl.
+ CandidateSource::Impl(impl_data.impl_def_id)
+ }
+ _ => CandidateSource::Trait(candidate.item.container_id(self.tcx)),
+ }
+ }),
+ }
+ }
+
+ fn consider_probe(
+ &self,
+ self_ty: Ty<'tcx>,
+ probe: &Candidate<'tcx>,
+ possibly_unsatisfied_predicates: &mut Vec<(
+ ty::Predicate<'tcx>,
+ Option<ty::Predicate<'tcx>>,
+ Option<ObligationCause<'tcx>>,
+ )>,
+ ) -> ProbeResult {
+ debug!("consider_probe: self_ty={:?} probe={:?}", self_ty, probe);
+
+ self.probe(|_| {
+ // First check that the self type can be related.
+ let sub_obligations = match self
+ .at(&ObligationCause::dummy(), self.param_env)
+ .define_opaque_types(false)
+ .sup(probe.xform_self_ty, self_ty)
+ {
+ Ok(InferOk { obligations, value: () }) => obligations,
+ Err(err) => {
+ debug!("--> cannot relate self-types {:?}", err);
+ return ProbeResult::NoMatch;
+ }
+ };
+
+ let mut result = ProbeResult::Match;
+ let mut xform_ret_ty = probe.xform_ret_ty;
+ debug!(?xform_ret_ty);
+
+ let selcx = &mut traits::SelectionContext::new(self);
+ let cause = traits::ObligationCause::misc(self.span, self.body_id);
+
+ let mut parent_pred = None;
+
+ // If so, impls may carry other conditions (e.g., where
+ // clauses) that must be considered. Make sure that those
+ // match as well (or at least may match, sometimes we
+ // don't have enough information to fully evaluate).
+ match probe.kind {
+ InherentImplCandidate(ref substs, ref ref_obligations) => {
+ // `xform_ret_ty` hasn't been normalized yet, only `xform_self_ty`,
+ // see the reasons mentioned in the comments in `assemble_inherent_impl_probe`
+ // for why this is necessary
+ let traits::Normalized {
+ value: normalized_xform_ret_ty,
+ obligations: normalization_obligations,
+ } = traits::normalize(selcx, self.param_env, cause.clone(), probe.xform_ret_ty);
+ xform_ret_ty = normalized_xform_ret_ty;
+ debug!("xform_ret_ty after normalization: {:?}", xform_ret_ty);
+
+ // Check whether the impl imposes obligations we have to worry about.
+ let impl_def_id = probe.item.container_id(self.tcx);
+ let impl_bounds = self.tcx.predicates_of(impl_def_id);
+ let impl_bounds = impl_bounds.instantiate(self.tcx, substs);
+ let traits::Normalized { value: impl_bounds, obligations: norm_obligations } =
+ traits::normalize(selcx, self.param_env, cause.clone(), impl_bounds);
+
+ // Convert the bounds into obligations.
+ let impl_obligations =
+ traits::predicates_for_generics(cause, self.param_env, impl_bounds);
+
+ let candidate_obligations = impl_obligations
+ .chain(norm_obligations.into_iter())
+ .chain(ref_obligations.iter().cloned())
+ .chain(normalization_obligations.into_iter());
+
+ // Evaluate those obligations to see if they might possibly hold.
+ for o in candidate_obligations {
+ let o = self.resolve_vars_if_possible(o);
+ if !self.predicate_may_hold(&o) {
+ result = ProbeResult::NoMatch;
+ possibly_unsatisfied_predicates.push((
+ o.predicate,
+ None,
+ Some(o.cause),
+ ));
+ }
+ }
+ }
+
+ ObjectCandidate | WhereClauseCandidate(..) => {
+ // These have no additional conditions to check.
+ }
+
+ TraitCandidate(trait_ref) => {
+ if let Some(method_name) = self.method_name {
+ // Some trait methods are excluded for arrays before 2021.
+ // (`array.into_iter()` wants a slice iterator for compatibility.)
+ if self_ty.is_array() && !method_name.span.rust_2021() {
+ let trait_def = self.tcx.trait_def(trait_ref.def_id);
+ if trait_def.skip_array_during_method_dispatch {
+ return ProbeResult::NoMatch;
+ }
+ }
+ }
+ let predicate =
+ ty::Binder::dummy(trait_ref).without_const().to_predicate(self.tcx);
+ parent_pred = Some(predicate);
+ let obligation = traits::Obligation::new(cause, self.param_env, predicate);
+ if !self.predicate_may_hold(&obligation) {
+ result = ProbeResult::NoMatch;
+ if self.probe(|_| {
+ match self.select_trait_candidate(trait_ref) {
+ Err(_) => return true,
+ Ok(Some(impl_source))
+ if !impl_source.borrow_nested_obligations().is_empty() =>
+ {
+ for obligation in impl_source.borrow_nested_obligations() {
+ // Determine exactly which obligation wasn't met, so
+ // that we can give more context in the error.
+ if !self.predicate_may_hold(obligation) {
+ let nested_predicate =
+ self.resolve_vars_if_possible(obligation.predicate);
+ let predicate =
+ self.resolve_vars_if_possible(predicate);
+ let p = if predicate == nested_predicate {
+ // Avoid "`MyStruct: Foo` which is required by
+ // `MyStruct: Foo`" in E0599.
+ None
+ } else {
+ Some(predicate)
+ };
+ possibly_unsatisfied_predicates.push((
+ nested_predicate,
+ p,
+ Some(obligation.cause.clone()),
+ ));
+ }
+ }
+ }
+ _ => {
+ // Some nested subobligation of this predicate
+ // failed.
+ let predicate = self.resolve_vars_if_possible(predicate);
+ possibly_unsatisfied_predicates.push((predicate, None, None));
+ }
+ }
+ false
+ }) {
+ // This candidate's primary obligation doesn't even
+ // select - don't bother registering anything in
+ // `potentially_unsatisfied_predicates`.
+ return ProbeResult::NoMatch;
+ }
+ }
+ }
+ }
+
+ // Evaluate those obligations to see if they might possibly hold.
+ for o in sub_obligations {
+ let o = self.resolve_vars_if_possible(o);
+ if !self.predicate_may_hold(&o) {
+ result = ProbeResult::NoMatch;
+ possibly_unsatisfied_predicates.push((o.predicate, parent_pred, Some(o.cause)));
+ }
+ }
+
+ if let ProbeResult::Match = result {
+ if let (Some(return_ty), Some(xform_ret_ty)) = (self.return_type, xform_ret_ty) {
+ let xform_ret_ty = self.resolve_vars_if_possible(xform_ret_ty);
+ debug!(
+ "comparing return_ty {:?} with xform ret ty {:?}",
+ return_ty, probe.xform_ret_ty
+ );
+ if self
+ .at(&ObligationCause::dummy(), self.param_env)
+ .define_opaque_types(false)
+ .sup(return_ty, xform_ret_ty)
+ .is_err()
+ {
+ return ProbeResult::BadReturnType;
+ }
+ }
+ }
+
+ result
+ })
+ }
+
+ /// Sometimes we get in a situation where we have multiple probes that are all impls of the
+ /// same trait, but we don't know which impl to use. In this case, since in all cases the
+ /// external interface of the method can be determined from the trait, it's ok not to decide.
+ /// We can basically just collapse all of the probes for various impls into one where-clause
+ /// probe. This will result in a pending obligation so when more type-info is available we can
+ /// make the final decision.
+ ///
+ /// Example (`src/test/ui/method-two-trait-defer-resolution-1.rs`):
+ ///
+ /// ```ignore (illustrative)
+ /// trait Foo { ... }
+ /// impl Foo for Vec<i32> { ... }
+ /// impl Foo for Vec<usize> { ... }
+ /// ```
+ ///
+ /// Now imagine the receiver is `Vec<_>`. It doesn't really matter at this time which impl we
+ /// use, so it's ok to just commit to "using the method from the trait Foo".
+ fn collapse_candidates_to_trait_pick(
+ &self,
+ self_ty: Ty<'tcx>,
+ probes: &[(&Candidate<'tcx>, ProbeResult)],
+ ) -> Option<Pick<'tcx>> {
+ // Do all probes correspond to the same trait?
+ let container = probes[0].0.item.trait_container(self.tcx)?;
+ for (p, _) in &probes[1..] {
+ let p_container = p.item.trait_container(self.tcx)?;
+ if p_container != container {
+ return None;
+ }
+ }
+
+ // FIXME: check the return type here somehow.
+ // If so, just use this trait and call it a day.
+ Some(Pick {
+ item: probes[0].0.item,
+ kind: TraitPick,
+ import_ids: probes[0].0.import_ids.clone(),
+ autoderefs: 0,
+ autoref_or_ptr_adjustment: None,
+ self_ty,
+ })
+ }
+
+ /// Similarly to `probe_for_return_type`, this method attempts to find the best matching
+ /// candidate method where the method name may have been misspelled. Similarly to other
+ /// Levenshtein based suggestions, we provide at most one such suggestion.
+ fn probe_for_lev_candidate(&mut self) -> Result<Option<ty::AssocItem>, MethodError<'tcx>> {
+ debug!("probing for method names similar to {:?}", self.method_name);
+
+ let steps = self.steps.clone();
+ self.probe(|_| {
+ let mut pcx = ProbeContext::new(
+ self.fcx,
+ self.span,
+ self.mode,
+ self.method_name,
+ self.return_type,
+ self.orig_steps_var_values.clone(),
+ steps,
+ IsSuggestion(true),
+ self.scope_expr_id,
+ );
+ pcx.allow_similar_names = true;
+ pcx.assemble_inherent_candidates();
+
+ let method_names = pcx.candidate_method_names();
+ pcx.allow_similar_names = false;
+ let applicable_close_candidates: Vec<ty::AssocItem> = method_names
+ .iter()
+ .filter_map(|&method_name| {
+ pcx.reset();
+ pcx.method_name = Some(method_name);
+ pcx.assemble_inherent_candidates();
+ pcx.pick_core().and_then(|pick| pick.ok()).map(|pick| pick.item)
+ })
+ .collect();
+
+ if applicable_close_candidates.is_empty() {
+ Ok(None)
+ } else {
+ let best_name = {
+ let names = applicable_close_candidates
+ .iter()
+ .map(|cand| cand.name)
+ .collect::<Vec<Symbol>>();
+ find_best_match_for_name_with_substrings(
+ &names,
+ self.method_name.unwrap().name,
+ None,
+ )
+ }
+ .unwrap();
+ Ok(applicable_close_candidates.into_iter().find(|method| method.name == best_name))
+ }
+ })
+ }
+
+ ///////////////////////////////////////////////////////////////////////////
+ // MISCELLANY
+ fn has_applicable_self(&self, item: &ty::AssocItem) -> bool {
+ // "Fast track" -- check for usage of sugar when in method call
+ // mode.
+ //
+ // In Path mode (i.e., resolving a value like `T::next`), consider any
+ // associated value (i.e., methods, constants) but not types.
+ match self.mode {
+ Mode::MethodCall => item.fn_has_self_parameter,
+ Mode::Path => match item.kind {
+ ty::AssocKind::Type => false,
+ ty::AssocKind::Fn | ty::AssocKind::Const => true,
+ },
+ }
+ // FIXME -- check for types that deref to `Self`,
+ // like `Rc<Self>` and so on.
+ //
+ // Note also that the current code will break if this type
+ // includes any of the type parameters defined on the method
+ // -- but this could be overcome.
+ }
+
+ fn record_static_candidate(&mut self, source: CandidateSource) {
+ self.static_candidates.push(source);
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn xform_self_ty(
+ &self,
+ item: &ty::AssocItem,
+ impl_ty: Ty<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> (Ty<'tcx>, Option<Ty<'tcx>>) {
+ if item.kind == ty::AssocKind::Fn && self.mode == Mode::MethodCall {
+ let sig = self.xform_method_sig(item.def_id, substs);
+ (sig.inputs()[0], Some(sig.output()))
+ } else {
+ (impl_ty, None)
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ fn xform_method_sig(&self, method: DefId, substs: SubstsRef<'tcx>) -> ty::FnSig<'tcx> {
+ let fn_sig = self.tcx.bound_fn_sig(method);
+ debug!(?fn_sig);
+
+ assert!(!substs.has_escaping_bound_vars());
+
+ // It is possible for type parameters or early-bound lifetimes
+ // to appear in the signature of `self`. The substitutions we
+ // are given do not include type/lifetime parameters for the
+ // method yet. So create fresh variables here for those too,
+ // if there are any.
+ let generics = self.tcx.generics_of(method);
+ assert_eq!(substs.len(), generics.parent_count as usize);
+
+ let xform_fn_sig = if generics.params.is_empty() {
+ fn_sig.subst(self.tcx, substs)
+ } else {
+ let substs = InternalSubsts::for_item(self.tcx, method, |param, _| {
+ let i = param.index as usize;
+ if i < substs.len() {
+ substs[i]
+ } else {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ // In general, during probe we erase regions.
+ self.tcx.lifetimes.re_erased.into()
+ }
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
+ self.var_for_def(self.span, param)
+ }
+ }
+ }
+ });
+ fn_sig.subst(self.tcx, substs)
+ };
+
+ self.erase_late_bound_regions(xform_fn_sig)
+ }
+
+ /// Gets the type of an impl and generate substitutions with inference vars.
+ fn impl_ty_and_substs(
+ &self,
+ impl_def_id: DefId,
+ ) -> (ty::EarlyBinder<Ty<'tcx>>, SubstsRef<'tcx>) {
+ (self.tcx.bound_type_of(impl_def_id), self.fresh_item_substs(impl_def_id))
+ }
+
+ fn fresh_item_substs(&self, def_id: DefId) -> SubstsRef<'tcx> {
+ InternalSubsts::for_item(self.tcx, def_id, |param, _| match param.kind {
+ GenericParamDefKind::Lifetime => self.tcx.lifetimes.re_erased.into(),
+ GenericParamDefKind::Type { .. } => self
+ .next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::SubstitutionPlaceholder,
+ span: self.tcx.def_span(def_id),
+ })
+ .into(),
+ GenericParamDefKind::Const { .. } => {
+ let span = self.tcx.def_span(def_id);
+ let origin = ConstVariableOrigin {
+ kind: ConstVariableOriginKind::SubstitutionPlaceholder,
+ span,
+ };
+ self.next_const_var(self.tcx.type_of(param.def_id), origin).into()
+ }
+ })
+ }
+
+ /// Replaces late-bound-regions bound by `value` with `'static` using
+ /// `ty::erase_late_bound_regions`.
+ ///
+ /// This is only a reasonable thing to do during the *probe* phase, not the *confirm* phase, of
+ /// method matching. It is reasonable during the probe phase because we don't consider region
+ /// relationships at all. Therefore, we can just replace all the region variables with 'static
+ /// rather than creating fresh region variables. This is nice for two reasons:
+ ///
+ /// 1. Because the numbers of the region variables would otherwise be fairly unique to this
+ /// particular method call, it winds up creating fewer types overall, which helps for memory
+ /// usage. (Admittedly, this is a rather small effect, though measurable.)
+ ///
+ /// 2. It makes it easier to deal with higher-ranked trait bounds, because we can replace any
+ /// late-bound regions with 'static. Otherwise, if we were going to replace late-bound
+ /// regions with actual region variables as is proper, we'd have to ensure that the same
+ /// region got replaced with the same variable, which requires a bit more coordination
+ /// and/or tracking the substitution and
+ /// so forth.
+ fn erase_late_bound_regions<T>(&self, value: ty::Binder<'tcx, T>) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.tcx.erase_late_bound_regions(value)
+ }
+
+ /// Finds the method with the appropriate name (or return type, as the case may be). If
+ /// `allow_similar_names` is set, find methods with close-matching names.
+ // The length of the returned iterator is nearly always 0 or 1 and this
+ // method is fairly hot.
+ fn impl_or_trait_item(&self, def_id: DefId) -> SmallVec<[ty::AssocItem; 1]> {
+ if let Some(name) = self.method_name {
+ if self.allow_similar_names {
+ let max_dist = max(name.as_str().len(), 3) / 3;
+ self.tcx
+ .associated_items(def_id)
+ .in_definition_order()
+ .filter(|x| {
+ if x.kind.namespace() != Namespace::ValueNS {
+ return false;
+ }
+ match lev_distance_with_substrings(name.as_str(), x.name.as_str(), max_dist)
+ {
+ Some(d) => d > 0,
+ None => false,
+ }
+ })
+ .copied()
+ .collect()
+ } else {
+ self.fcx
+ .associated_value(def_id, name)
+ .map_or_else(SmallVec::new, |x| SmallVec::from_buf([x]))
+ }
+ } else {
+ self.tcx.associated_items(def_id).in_definition_order().copied().collect()
+ }
+ }
+}
+
+impl<'tcx> Candidate<'tcx> {
+ fn to_unadjusted_pick(&self, self_ty: Ty<'tcx>) -> Pick<'tcx> {
+ Pick {
+ item: self.item,
+ kind: match self.kind {
+ InherentImplCandidate(..) => InherentImplPick,
+ ObjectCandidate => ObjectPick,
+ TraitCandidate(_) => TraitPick,
+ WhereClauseCandidate(ref trait_ref) => {
+ // Only trait derived from where-clauses should
+ // appear here, so they should not contain any
+ // inference variables or other artifacts. This
+ // means they are safe to put into the
+ // `WhereClausePick`.
+ assert!(
+ !trait_ref.skip_binder().substs.needs_infer()
+ && !trait_ref.skip_binder().substs.has_placeholders()
+ );
+
+ WhereClausePick(*trait_ref)
+ }
+ },
+ import_ids: self.import_ids.clone(),
+ autoderefs: 0,
+ autoref_or_ptr_adjustment: None,
+ self_ty,
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/method/suggest.rs b/compiler/rustc_typeck/src/check/method/suggest.rs
new file mode 100644
index 000000000..c92b93cbc
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/method/suggest.rs
@@ -0,0 +1,2286 @@
+//! Give useful errors and suggestions to users when an item can't be
+//! found or is otherwise invalid.
+
+use crate::check::FnCtxt;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+ MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{ExprKind, Node, QPath};
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::traits::util::supertraits;
+use rustc_middle::ty::fast_reject::{simplify_type, TreatParams};
+use rustc_middle::ty::print::with_crate_prefix;
+use rustc_middle::ty::ToPolyTraitRef;
+use rustc_middle::ty::{self, DefIdTree, ToPredicate, Ty, TyCtxt, TypeVisitable};
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::Symbol;
+use rustc_span::{lev_distance, source_map, ExpnKind, FileName, MacroKind, Span};
+use rustc_trait_selection::traits::error_reporting::on_unimplemented::InferCtxtExt as _;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ FulfillmentError, Obligation, ObligationCause, ObligationCauseCode, OnUnimplementedNote,
+};
+
+use std::cmp::Ordering;
+use std::iter;
+
+use super::probe::{Mode, ProbeScope};
+use super::{super::suggest_call_constructor, CandidateSource, MethodError, NoMatchData};
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ fn is_fn_ty(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ let tcx = self.tcx;
+ match ty.kind() {
+ // Not all of these (e.g., unsafe fns) implement `FnOnce`,
+ // so we look for these beforehand.
+ ty::Closure(..) | ty::FnDef(..) | ty::FnPtr(_) => true,
+ // If it's not a simple function, look for things which implement `FnOnce`.
+ _ => {
+ let Some(fn_once) = tcx.lang_items().fn_once_trait() else {
+ return false;
+ };
+
+ // This conditional prevents us from asking to call errors and unresolved types.
+ // It might seem that we can use `predicate_must_hold_modulo_regions`,
+ // but since a Dummy binder is used to fill in the FnOnce trait's arguments,
+ // type resolution always gives a "maybe" here.
+ if self.autoderef(span, ty).any(|(ty, _)| {
+ info!("check deref {:?} error", ty);
+ matches!(ty.kind(), ty::Error(_) | ty::Infer(_))
+ }) {
+ return false;
+ }
+
+ self.autoderef(span, ty).any(|(ty, _)| {
+ info!("check deref {:?} impl FnOnce", ty);
+ self.probe(|_| {
+ let fn_once_substs = tcx.mk_substs_trait(
+ ty,
+ &[self
+ .next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span,
+ })
+ .into()],
+ );
+ let trait_ref = ty::TraitRef::new(fn_once, fn_once_substs);
+ let poly_trait_ref = ty::Binder::dummy(trait_ref);
+ let obligation = Obligation::misc(
+ span,
+ self.body_id,
+ self.param_env,
+ poly_trait_ref.without_const().to_predicate(tcx),
+ );
+ self.predicate_may_hold(&obligation)
+ })
+ })
+ }
+ }
+ }
+
+ fn is_slice_ty(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ self.autoderef(span, ty).any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..)))
+ }
+
+ pub fn report_method_error(
+ &self,
+ mut span: Span,
+ rcvr_ty: Ty<'tcx>,
+ item_name: Ident,
+ source: SelfSource<'tcx>,
+ error: MethodError<'tcx>,
+ args: Option<&'tcx [hir::Expr<'tcx>]>,
+ ) -> Option<DiagnosticBuilder<'_, ErrorGuaranteed>> {
+ // Avoid suggestions when we don't know what's going on.
+ if rcvr_ty.references_error() {
+ return None;
+ }
+
+ let report_candidates = |span: Span,
+ err: &mut Diagnostic,
+ mut sources: Vec<CandidateSource>,
+ sugg_span: Span| {
+ sources.sort();
+ sources.dedup();
+ // Dynamic limit to avoid hiding just one candidate, which is silly.
+ let limit = if sources.len() == 5 { 5 } else { 4 };
+
+ for (idx, source) in sources.iter().take(limit).enumerate() {
+ match *source {
+ CandidateSource::Impl(impl_did) => {
+ // Provide the best span we can. Use the item, if local to crate, else
+ // the impl, if local to crate (item may be defaulted), else nothing.
+ let Some(item) = self.associated_value(impl_did, item_name).or_else(|| {
+ let impl_trait_ref = self.tcx.impl_trait_ref(impl_did)?;
+ self.associated_value(impl_trait_ref.def_id, item_name)
+ }) else {
+ continue;
+ };
+
+ let note_span = if item.def_id.is_local() {
+ Some(self.tcx.def_span(item.def_id))
+ } else if impl_did.is_local() {
+ Some(self.tcx.def_span(impl_did))
+ } else {
+ None
+ };
+
+ let impl_ty = self.tcx.at(span).type_of(impl_did);
+
+ let insertion = match self.tcx.impl_trait_ref(impl_did) {
+ None => String::new(),
+ Some(trait_ref) => format!(
+ " of the trait `{}`",
+ self.tcx.def_path_str(trait_ref.def_id)
+ ),
+ };
+
+ let (note_str, idx) = if sources.len() > 1 {
+ (
+ format!(
+ "candidate #{} is defined in an impl{} for the type `{}`",
+ idx + 1,
+ insertion,
+ impl_ty,
+ ),
+ Some(idx + 1),
+ )
+ } else {
+ (
+ format!(
+ "the candidate is defined in an impl{} for the type `{}`",
+ insertion, impl_ty,
+ ),
+ None,
+ )
+ };
+ if let Some(note_span) = note_span {
+ // We have a span pointing to the method. Show note with snippet.
+ err.span_note(note_span, &note_str);
+ } else {
+ err.note(&note_str);
+ }
+ if let Some(trait_ref) = self.tcx.impl_trait_ref(impl_did) {
+ let path = self.tcx.def_path_str(trait_ref.def_id);
+
+ let ty = match item.kind {
+ ty::AssocKind::Const | ty::AssocKind::Type => rcvr_ty,
+ ty::AssocKind::Fn => self
+ .tcx
+ .fn_sig(item.def_id)
+ .inputs()
+ .skip_binder()
+ .get(0)
+ .filter(|ty| ty.is_region_ptr() && !rcvr_ty.is_region_ptr())
+ .copied()
+ .unwrap_or(rcvr_ty),
+ };
+ print_disambiguation_help(
+ item_name,
+ args,
+ err,
+ path,
+ ty,
+ item.kind,
+ item.def_id,
+ sugg_span,
+ idx,
+ self.tcx.sess.source_map(),
+ item.fn_has_self_parameter,
+ );
+ }
+ }
+ CandidateSource::Trait(trait_did) => {
+ let Some(item) = self.associated_value(trait_did, item_name) else { continue };
+ let item_span = self.tcx.def_span(item.def_id);
+ let idx = if sources.len() > 1 {
+ let msg = &format!(
+ "candidate #{} is defined in the trait `{}`",
+ idx + 1,
+ self.tcx.def_path_str(trait_did)
+ );
+ err.span_note(item_span, msg);
+ Some(idx + 1)
+ } else {
+ let msg = &format!(
+ "the candidate is defined in the trait `{}`",
+ self.tcx.def_path_str(trait_did)
+ );
+ err.span_note(item_span, msg);
+ None
+ };
+ let path = self.tcx.def_path_str(trait_did);
+ print_disambiguation_help(
+ item_name,
+ args,
+ err,
+ path,
+ rcvr_ty,
+ item.kind,
+ item.def_id,
+ sugg_span,
+ idx,
+ self.tcx.sess.source_map(),
+ item.fn_has_self_parameter,
+ );
+ }
+ }
+ }
+ if sources.len() > limit {
+ err.note(&format!("and {} others", sources.len() - limit));
+ }
+ };
+
+ let sugg_span = if let SelfSource::MethodCall(expr) = source {
+ // Given `foo.bar(baz)`, `expr` is `bar`, but we want to point to the whole thing.
+ self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id)).span
+ } else {
+ span
+ };
+
+ match error {
+ MethodError::NoMatch(NoMatchData {
+ static_candidates: static_sources,
+ unsatisfied_predicates,
+ out_of_scope_traits,
+ lev_candidate,
+ mode,
+ }) => {
+ let tcx = self.tcx;
+
+ let actual = self.resolve_vars_if_possible(rcvr_ty);
+ let ty_str = self.ty_to_string(actual);
+ let is_method = mode == Mode::MethodCall;
+ let item_kind = if is_method {
+ "method"
+ } else if actual.is_enum() {
+ "variant or associated item"
+ } else {
+ match (item_name.as_str().chars().next(), actual.is_fresh_ty()) {
+ (Some(name), false) if name.is_lowercase() => "function or associated item",
+ (Some(_), false) => "associated item",
+ (Some(_), true) | (None, false) => "variant or associated item",
+ (None, true) => "variant",
+ }
+ };
+
+ if self.suggest_constraining_numerical_ty(
+ tcx, actual, source, span, item_kind, item_name, &ty_str,
+ ) {
+ return None;
+ }
+
+ span = item_name.span;
+
+ // Don't show generic arguments when the method can't be found in any implementation (#81576).
+ let mut ty_str_reported = ty_str.clone();
+ if let ty::Adt(_, generics) = actual.kind() {
+ if generics.len() > 0 {
+ let mut autoderef = self.autoderef(span, actual);
+ let candidate_found = autoderef.any(|(ty, _)| {
+ if let ty::Adt(adt_deref, _) = ty.kind() {
+ self.tcx
+ .inherent_impls(adt_deref.did())
+ .iter()
+ .filter_map(|def_id| self.associated_value(*def_id, item_name))
+ .count()
+ >= 1
+ } else {
+ false
+ }
+ });
+ let has_deref = autoderef.step_count() > 0;
+ if !candidate_found && !has_deref && unsatisfied_predicates.is_empty() {
+ if let Some((path_string, _)) = ty_str.split_once('<') {
+ ty_str_reported = path_string.to_string();
+ }
+ }
+ }
+ }
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0599,
+ "no {} named `{}` found for {} `{}` in the current scope",
+ item_kind,
+ item_name,
+ actual.prefix_string(self.tcx),
+ ty_str_reported,
+ );
+ if actual.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ if let Mode::MethodCall = mode && let SelfSource::MethodCall(cal) = source {
+ self.suggest_await_before_method(
+ &mut err, item_name, actual, cal, span,
+ );
+ }
+ if let Some(span) = tcx.resolutions(()).confused_type_with_std_module.get(&span) {
+ err.span_suggestion(
+ span.shrink_to_lo(),
+ "you are looking for the module in `std`, not the primitive type",
+ "std::",
+ Applicability::MachineApplicable,
+ );
+ }
+ if let ty::RawPtr(_) = &actual.kind() {
+ err.note(
+ "try using `<*const T>::as_ref()` to get a reference to the \
+ type behind the pointer: https://doc.rust-lang.org/std/\
+ primitive.pointer.html#method.as_ref",
+ );
+ err.note(
+ "using `<*const T>::as_ref()` on a pointer which is unaligned or points \
+ to invalid or uninitialized memory is undefined behavior",
+ );
+ }
+
+ let ty_span = match actual.kind() {
+ ty::Param(param_type) => {
+ let generics = self.tcx.generics_of(self.body_id.owner.to_def_id());
+ let type_param = generics.type_param(param_type, self.tcx);
+ Some(self.tcx.def_span(type_param.def_id))
+ }
+ ty::Adt(def, _) if def.did().is_local() => Some(tcx.def_span(def.did())),
+ _ => None,
+ };
+
+ if let Some(span) = ty_span {
+ err.span_label(
+ span,
+ format!(
+ "{item_kind} `{item_name}` not found for this {}",
+ actual.prefix_string(self.tcx)
+ ),
+ );
+ }
+
+ if self.is_fn_ty(rcvr_ty, span) {
+ if let SelfSource::MethodCall(expr) = source {
+ let suggest = if let ty::FnDef(def_id, _) = rcvr_ty.kind() {
+ if let Some(local_id) = def_id.as_local() {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(local_id);
+ let node = tcx.hir().get(hir_id);
+ let fields = node.tuple_fields();
+ if let Some(fields) = fields
+ && let Some(DefKind::Ctor(of, _)) = self.tcx.opt_def_kind(local_id) {
+ Some((fields.len(), of))
+ } else {
+ None
+ }
+ } else {
+ // The logic here isn't smart but `associated_item_def_ids`
+ // doesn't work nicely on local.
+ if let DefKind::Ctor(of, _) = tcx.def_kind(def_id) {
+ let parent_def_id = tcx.parent(*def_id);
+ Some((tcx.associated_item_def_ids(parent_def_id).len(), of))
+ } else {
+ None
+ }
+ }
+ } else {
+ None
+ };
+
+ // If the function is a tuple constructor, we recommend that they call it
+ if let Some((fields, kind)) = suggest {
+ suggest_call_constructor(expr.span, kind, fields, &mut err);
+ } else {
+ // General case
+ err.span_label(
+ expr.span,
+ "this is a function, perhaps you wish to call it",
+ );
+ }
+ }
+ }
+
+ let mut custom_span_label = false;
+
+ if !static_sources.is_empty() {
+ err.note(
+ "found the following associated functions; to be used as methods, \
+ functions must have a `self` parameter",
+ );
+ err.span_label(span, "this is an associated function, not a method");
+ custom_span_label = true;
+ }
+ if static_sources.len() == 1 {
+ let ty_str =
+ if let Some(CandidateSource::Impl(impl_did)) = static_sources.get(0) {
+ // When the "method" is resolved through dereferencing, we really want the
+ // original type that has the associated function for accurate suggestions.
+ // (#61411)
+ let ty = tcx.at(span).type_of(*impl_did);
+ match (&ty.peel_refs().kind(), &actual.peel_refs().kind()) {
+ (ty::Adt(def, _), ty::Adt(def_actual, _)) if def == def_actual => {
+ // Use `actual` as it will have more `substs` filled in.
+ self.ty_to_value_string(actual.peel_refs())
+ }
+ _ => self.ty_to_value_string(ty.peel_refs()),
+ }
+ } else {
+ self.ty_to_value_string(actual.peel_refs())
+ };
+ if let SelfSource::MethodCall(expr) = source {
+ err.span_suggestion(
+ expr.span.to(span),
+ "use associated function syntax instead",
+ format!("{}::{}", ty_str, item_name),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.help(&format!("try with `{}::{}`", ty_str, item_name,));
+ }
+
+ report_candidates(span, &mut err, static_sources, sugg_span);
+ } else if static_sources.len() > 1 {
+ report_candidates(span, &mut err, static_sources, sugg_span);
+ }
+
+ let mut bound_spans = vec![];
+ let mut restrict_type_params = false;
+ let mut unsatisfied_bounds = false;
+ if item_name.name == sym::count && self.is_slice_ty(actual, span) {
+ let msg = "consider using `len` instead";
+ if let SelfSource::MethodCall(_expr) = source {
+ err.span_suggestion_short(
+ span,
+ msg,
+ "len",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_label(span, msg);
+ }
+ if let Some(iterator_trait) = self.tcx.get_diagnostic_item(sym::Iterator) {
+ let iterator_trait = self.tcx.def_path_str(iterator_trait);
+ err.note(&format!("`count` is defined on `{iterator_trait}`, which `{actual}` does not implement"));
+ }
+ } else if !unsatisfied_predicates.is_empty() {
+ let mut type_params = FxHashMap::default();
+
+ // Pick out the list of unimplemented traits on the receiver.
+ // This is used for custom error messages with the `#[rustc_on_unimplemented]` attribute.
+ let mut unimplemented_traits = FxHashMap::default();
+ let mut unimplemented_traits_only = true;
+ for (predicate, _parent_pred, cause) in &unsatisfied_predicates {
+ if let (ty::PredicateKind::Trait(p), Some(cause)) =
+ (predicate.kind().skip_binder(), cause.as_ref())
+ {
+ if p.trait_ref.self_ty() != rcvr_ty {
+ // This is necessary, not just to keep the errors clean, but also
+ // because our derived obligations can wind up with a trait ref that
+ // requires a different param_env to be correctly compared.
+ continue;
+ }
+ unimplemented_traits.entry(p.trait_ref.def_id).or_insert((
+ predicate.kind().rebind(p.trait_ref),
+ Obligation {
+ cause: cause.clone(),
+ param_env: self.param_env,
+ predicate: *predicate,
+ recursion_depth: 0,
+ },
+ ));
+ }
+ }
+
+ // Make sure that, if any traits other than the found ones were involved,
+ // we don't don't report an unimplemented trait.
+ // We don't want to say that `iter::Cloned` is not an iterator, just
+ // because of some non-Clone item being iterated over.
+ for (predicate, _parent_pred, _cause) in &unsatisfied_predicates {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(p)
+ if unimplemented_traits.contains_key(&p.trait_ref.def_id) => {}
+ _ => {
+ unimplemented_traits_only = false;
+ break;
+ }
+ }
+ }
+
+ let mut collect_type_param_suggestions =
+ |self_ty: Ty<'tcx>, parent_pred: ty::Predicate<'tcx>, obligation: &str| {
+ // We don't care about regions here, so it's fine to skip the binder here.
+ if let (ty::Param(_), ty::PredicateKind::Trait(p)) =
+ (self_ty.kind(), parent_pred.kind().skip_binder())
+ {
+ let node = match p.trait_ref.self_ty().kind() {
+ ty::Param(_) => {
+ // Account for `fn` items like in `issue-35677.rs` to
+ // suggest restricting its type params.
+ let did = self.tcx.hir().body_owner_def_id(hir::BodyId {
+ hir_id: self.body_id,
+ });
+ Some(
+ self.tcx
+ .hir()
+ .get(self.tcx.hir().local_def_id_to_hir_id(did)),
+ )
+ }
+ ty::Adt(def, _) => def.did().as_local().map(|def_id| {
+ self.tcx
+ .hir()
+ .get(self.tcx.hir().local_def_id_to_hir_id(def_id))
+ }),
+ _ => None,
+ };
+ if let Some(hir::Node::Item(hir::Item { kind, .. })) = node {
+ if let Some(g) = kind.generics() {
+ let key = (
+ g.tail_span_for_predicate_suggestion(),
+ g.add_where_or_trailing_comma(),
+ );
+ type_params
+ .entry(key)
+ .or_insert_with(FxHashSet::default)
+ .insert(obligation.to_owned());
+ }
+ }
+ }
+ };
+ let mut bound_span_label = |self_ty: Ty<'_>, obligation: &str, quiet: &str| {
+ let msg = format!(
+ "doesn't satisfy `{}`",
+ if obligation.len() > 50 { quiet } else { obligation }
+ );
+ match &self_ty.kind() {
+ // Point at the type that couldn't satisfy the bound.
+ ty::Adt(def, _) => {
+ bound_spans.push((self.tcx.def_span(def.did()), msg))
+ }
+ // Point at the trait object that couldn't satisfy the bound.
+ ty::Dynamic(preds, _) => {
+ for pred in preds.iter() {
+ match pred.skip_binder() {
+ ty::ExistentialPredicate::Trait(tr) => bound_spans
+ .push((self.tcx.def_span(tr.def_id), msg.clone())),
+ ty::ExistentialPredicate::Projection(_)
+ | ty::ExistentialPredicate::AutoTrait(_) => {}
+ }
+ }
+ }
+ // Point at the closure that couldn't satisfy the bound.
+ ty::Closure(def_id, _) => bound_spans.push((
+ tcx.def_span(*def_id),
+ format!("doesn't satisfy `{}`", quiet),
+ )),
+ _ => {}
+ }
+ };
+ let mut format_pred = |pred: ty::Predicate<'tcx>| {
+ let bound_predicate = pred.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Projection(pred) => {
+ let pred = bound_predicate.rebind(pred);
+ // `<Foo as Iterator>::Item = String`.
+ let projection_ty = pred.skip_binder().projection_ty;
+
+ let substs_with_infer_self = tcx.mk_substs(
+ iter::once(tcx.mk_ty_var(ty::TyVid::from_u32(0)).into())
+ .chain(projection_ty.substs.iter().skip(1)),
+ );
+
+ let quiet_projection_ty = ty::ProjectionTy {
+ substs: substs_with_infer_self,
+ item_def_id: projection_ty.item_def_id,
+ };
+
+ let term = pred.skip_binder().term;
+
+ let obligation = format!("{} = {}", projection_ty, term);
+ let quiet = format!("{} = {}", quiet_projection_ty, term);
+
+ bound_span_label(projection_ty.self_ty(), &obligation, &quiet);
+ Some((obligation, projection_ty.self_ty()))
+ }
+ ty::PredicateKind::Trait(poly_trait_ref) => {
+ let p = poly_trait_ref.trait_ref;
+ let self_ty = p.self_ty();
+ let path = p.print_only_trait_path();
+ let obligation = format!("{}: {}", self_ty, path);
+ let quiet = format!("_: {}", path);
+ bound_span_label(self_ty, &obligation, &quiet);
+ Some((obligation, self_ty))
+ }
+ _ => None,
+ }
+ };
+
+ // Find all the requirements that come from a local `impl` block.
+ let mut skip_list: FxHashSet<_> = Default::default();
+ let mut spanned_predicates: FxHashMap<MultiSpan, _> = Default::default();
+ for (data, p, parent_p, impl_def_id, cause) in unsatisfied_predicates
+ .iter()
+ .filter_map(|(p, parent, c)| c.as_ref().map(|c| (p, parent, c)))
+ .filter_map(|(p, parent, c)| match c.code() {
+ ObligationCauseCode::ImplDerivedObligation(ref data) => {
+ Some((&data.derived, p, parent, data.impl_def_id, data))
+ }
+ _ => None,
+ })
+ {
+ let parent_trait_ref = data.parent_trait_pred;
+ let path = parent_trait_ref.print_modifiers_and_trait_path();
+ let tr_self_ty = parent_trait_ref.skip_binder().self_ty();
+ let unsatisfied_msg = "unsatisfied trait bound introduced here";
+ let derive_msg =
+ "unsatisfied trait bound introduced in this `derive` macro";
+ match self.tcx.hir().get_if_local(impl_def_id) {
+ // Unmet obligation comes from a `derive` macro, point at it once to
+ // avoid multiple span labels pointing at the same place.
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(..),
+ ident,
+ ..
+ })) if matches!(
+ ident.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) =>
+ {
+ let span = ident.span.ctxt().outer_expn_data().call_site;
+ let mut spans: MultiSpan = span.into();
+ spans.push_span_label(span, derive_msg);
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { of_trait, self_ty, .. }),
+ ..
+ })) if matches!(
+ self_ty.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) || matches!(
+ of_trait.as_ref().map(|t| t
+ .path
+ .span
+ .ctxt()
+ .outer_expn_data()
+ .kind),
+ Some(ExpnKind::Macro(MacroKind::Derive, _))
+ ) =>
+ {
+ let span = self_ty.span.ctxt().outer_expn_data().call_site;
+ let mut spans: MultiSpan = span.into();
+ spans.push_span_label(span, derive_msg);
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ // Unmet obligation coming from a `trait`.
+ Some(Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(..),
+ ident,
+ span: item_span,
+ ..
+ })) if !matches!(
+ ident.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) =>
+ {
+ if let Some(pred) = parent_p {
+ // Done to add the "doesn't satisfy" `span_label`.
+ let _ = format_pred(*pred);
+ }
+ skip_list.insert(p);
+ let mut spans = if cause.span != *item_span {
+ let mut spans: MultiSpan = cause.span.into();
+ spans.push_span_label(cause.span, unsatisfied_msg);
+ spans
+ } else {
+ ident.span.into()
+ };
+ spans.push_span_label(ident.span, "in this trait");
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+
+ // Unmet obligation coming from an `impl`.
+ Some(Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Impl(hir::Impl {
+ of_trait, self_ty, generics, ..
+ }),
+ span: item_span,
+ ..
+ })) if !matches!(
+ self_ty.span.ctxt().outer_expn_data().kind,
+ ExpnKind::Macro(MacroKind::Derive, _)
+ ) && !matches!(
+ of_trait.as_ref().map(|t| t
+ .path
+ .span
+ .ctxt()
+ .outer_expn_data()
+ .kind),
+ Some(ExpnKind::Macro(MacroKind::Derive, _))
+ ) =>
+ {
+ let sized_pred =
+ unsatisfied_predicates.iter().any(|(pred, _, _)| {
+ match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => {
+ Some(pred.def_id())
+ == self.tcx.lang_items().sized_trait()
+ && pred.polarity == ty::ImplPolarity::Positive
+ }
+ _ => false,
+ }
+ });
+ for param in generics.params {
+ if param.span == cause.span && sized_pred {
+ let (sp, sugg) = match param.colon_span {
+ Some(sp) => (sp.shrink_to_hi(), " ?Sized +"),
+ None => (param.span.shrink_to_hi(), ": ?Sized"),
+ };
+ err.span_suggestion_verbose(
+ sp,
+ "consider relaxing the type parameter's implicit \
+ `Sized` bound",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ if let Some(pred) = parent_p {
+ // Done to add the "doesn't satisfy" `span_label`.
+ let _ = format_pred(*pred);
+ }
+ skip_list.insert(p);
+ let mut spans = if cause.span != *item_span {
+ let mut spans: MultiSpan = cause.span.into();
+ spans.push_span_label(cause.span, unsatisfied_msg);
+ spans
+ } else {
+ let mut spans = Vec::with_capacity(2);
+ if let Some(trait_ref) = of_trait {
+ spans.push(trait_ref.path.span);
+ }
+ spans.push(self_ty.span);
+ spans.into()
+ };
+ if let Some(trait_ref) = of_trait {
+ spans.push_span_label(trait_ref.path.span, "");
+ }
+ spans.push_span_label(self_ty.span, "");
+
+ let entry = spanned_predicates.entry(spans);
+ entry.or_insert_with(|| (path, tr_self_ty, Vec::new())).2.push(p);
+ }
+ _ => {}
+ }
+ }
+ let mut spanned_predicates: Vec<_> = spanned_predicates.into_iter().collect();
+ spanned_predicates.sort_by_key(|(span, (_, _, _))| span.primary_span());
+ for (span, (_path, _self_ty, preds)) in spanned_predicates {
+ let mut preds: Vec<_> = preds
+ .into_iter()
+ .filter_map(|pred| format_pred(*pred))
+ .map(|(p, _)| format!("`{}`", p))
+ .collect();
+ preds.sort();
+ preds.dedup();
+ let msg = if let [pred] = &preds[..] {
+ format!("trait bound {} was not satisfied", pred)
+ } else {
+ format!(
+ "the following trait bounds were not satisfied:\n{}",
+ preds.join("\n"),
+ )
+ };
+ err.span_note(span, &msg);
+ unsatisfied_bounds = true;
+ }
+
+ // The requirements that didn't have an `impl` span to show.
+ let mut bound_list = unsatisfied_predicates
+ .iter()
+ .filter_map(|(pred, parent_pred, _cause)| {
+ format_pred(*pred).map(|(p, self_ty)| {
+ collect_type_param_suggestions(self_ty, *pred, &p);
+ (
+ match parent_pred {
+ None => format!("`{}`", &p),
+ Some(parent_pred) => match format_pred(*parent_pred) {
+ None => format!("`{}`", &p),
+ Some((parent_p, _)) => {
+ collect_type_param_suggestions(
+ self_ty,
+ *parent_pred,
+ &p,
+ );
+ format!(
+ "`{}`\nwhich is required by `{}`",
+ p, parent_p
+ )
+ }
+ },
+ },
+ *pred,
+ )
+ })
+ })
+ .filter(|(_, pred)| !skip_list.contains(&pred))
+ .map(|(t, _)| t)
+ .enumerate()
+ .collect::<Vec<(usize, String)>>();
+
+ for ((span, add_where_or_comma), obligations) in type_params.into_iter() {
+ restrict_type_params = true;
+ // #74886: Sort here so that the output is always the same.
+ let mut obligations = obligations.into_iter().collect::<Vec<_>>();
+ obligations.sort();
+ err.span_suggestion_verbose(
+ span,
+ &format!(
+ "consider restricting the type parameter{s} to satisfy the \
+ trait bound{s}",
+ s = pluralize!(obligations.len())
+ ),
+ format!("{} {}", add_where_or_comma, obligations.join(", ")),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ bound_list.sort_by(|(_, a), (_, b)| a.cmp(b)); // Sort alphabetically.
+ bound_list.dedup_by(|(_, a), (_, b)| a == b); // #35677
+ bound_list.sort_by_key(|(pos, _)| *pos); // Keep the original predicate order.
+
+ if !bound_list.is_empty() || !skip_list.is_empty() {
+ let bound_list = bound_list
+ .into_iter()
+ .map(|(_, path)| path)
+ .collect::<Vec<_>>()
+ .join("\n");
+ let actual_prefix = actual.prefix_string(self.tcx);
+ info!("unimplemented_traits.len() == {}", unimplemented_traits.len());
+ let (primary_message, label) =
+ if unimplemented_traits.len() == 1 && unimplemented_traits_only {
+ unimplemented_traits
+ .into_iter()
+ .next()
+ .map(|(_, (trait_ref, obligation))| {
+ if trait_ref.self_ty().references_error()
+ || actual.references_error()
+ {
+ // Avoid crashing.
+ return (None, None);
+ }
+ let OnUnimplementedNote { message, label, .. } =
+ self.on_unimplemented_note(trait_ref, &obligation);
+ (message, label)
+ })
+ .unwrap_or((None, None))
+ } else {
+ (None, None)
+ };
+ let primary_message = primary_message.unwrap_or_else(|| format!(
+ "the {item_kind} `{item_name}` exists for {actual_prefix} `{ty_str}`, but its trait bounds were not satisfied"
+ ));
+ err.set_primary_message(&primary_message);
+ if let Some(label) = label {
+ custom_span_label = true;
+ err.span_label(span, label);
+ }
+ if !bound_list.is_empty() {
+ err.note(&format!(
+ "the following trait bounds were not satisfied:\n{bound_list}"
+ ));
+ }
+ self.suggest_derive(&mut err, &unsatisfied_predicates);
+
+ unsatisfied_bounds = true;
+ }
+ }
+
+ let label_span_not_found = |err: &mut DiagnosticBuilder<'_, _>| {
+ if unsatisfied_predicates.is_empty() {
+ err.span_label(span, format!("{item_kind} not found in `{ty_str}`"));
+ let is_string_or_ref_str = match actual.kind() {
+ ty::Ref(_, ty, _) => {
+ ty.is_str()
+ || matches!(
+ ty.kind(),
+ ty::Adt(adt, _) if self.tcx.is_diagnostic_item(sym::String, adt.did())
+ )
+ }
+ ty::Adt(adt, _) => self.tcx.is_diagnostic_item(sym::String, adt.did()),
+ _ => false,
+ };
+ if is_string_or_ref_str && item_name.name == sym::iter {
+ err.span_suggestion_verbose(
+ item_name.span,
+ "because of the in-memory representation of `&str`, to obtain \
+ an `Iterator` over each of its codepoint use method `chars`",
+ "chars",
+ Applicability::MachineApplicable,
+ );
+ }
+ if let ty::Adt(adt, _) = rcvr_ty.kind() {
+ let mut inherent_impls_candidate = self
+ .tcx
+ .inherent_impls(adt.did())
+ .iter()
+ .copied()
+ .filter(|def_id| {
+ if let Some(assoc) = self.associated_value(*def_id, item_name) {
+ // Check for both mode is the same so we avoid suggesting
+ // incorrect associated item.
+ match (mode, assoc.fn_has_self_parameter, source) {
+ (Mode::MethodCall, true, SelfSource::MethodCall(_)) => {
+ // We check that the suggest type is actually
+ // different from the received one
+ // So we avoid suggestion method with Box<Self>
+ // for instance
+ self.tcx.at(span).type_of(*def_id) != actual
+ && self.tcx.at(span).type_of(*def_id) != rcvr_ty
+ }
+ (Mode::Path, false, _) => true,
+ _ => false,
+ }
+ } else {
+ false
+ }
+ })
+ .collect::<Vec<_>>();
+ if !inherent_impls_candidate.is_empty() {
+ inherent_impls_candidate.sort();
+ inherent_impls_candidate.dedup();
+
+ // number of type to shows at most.
+ let limit = if inherent_impls_candidate.len() == 5 { 5 } else { 4 };
+ let type_candidates = inherent_impls_candidate
+ .iter()
+ .take(limit)
+ .map(|impl_item| {
+ format!("- `{}`", self.tcx.at(span).type_of(*impl_item))
+ })
+ .collect::<Vec<_>>()
+ .join("\n");
+ let additional_types = if inherent_impls_candidate.len() > limit {
+ format!(
+ "\nand {} more types",
+ inherent_impls_candidate.len() - limit
+ )
+ } else {
+ "".to_string()
+ };
+ err.note(&format!(
+ "the {item_kind} was found for\n{}{}",
+ type_candidates, additional_types
+ ));
+ }
+ }
+ } else {
+ err.span_label(span, format!("{item_kind} cannot be called on `{ty_str}` due to unsatisfied trait bounds"));
+ }
+ };
+
+ // If the method name is the name of a field with a function or closure type,
+ // give a helping note that it has to be called as `(x.f)(...)`.
+ if let SelfSource::MethodCall(expr) = source {
+ if !self.suggest_field_call(span, rcvr_ty, expr, item_name, &mut err)
+ && lev_candidate.is_none()
+ && !custom_span_label
+ {
+ label_span_not_found(&mut err);
+ }
+ } else if !custom_span_label {
+ label_span_not_found(&mut err);
+ }
+
+ self.check_for_field_method(&mut err, source, span, actual, item_name);
+
+ self.check_for_unwrap_self(&mut err, source, span, actual, item_name);
+
+ bound_spans.sort();
+ bound_spans.dedup();
+ for (span, msg) in bound_spans.into_iter() {
+ err.span_label(span, &msg);
+ }
+
+ if actual.is_numeric() && actual.is_fresh() || restrict_type_params {
+ } else {
+ self.suggest_traits_to_import(
+ &mut err,
+ span,
+ rcvr_ty,
+ item_name,
+ args.map(|args| args.len()),
+ source,
+ out_of_scope_traits,
+ &unsatisfied_predicates,
+ unsatisfied_bounds,
+ );
+ }
+
+ // Don't emit a suggestion if we found an actual method
+ // that had unsatisfied trait bounds
+ if unsatisfied_predicates.is_empty() && actual.is_enum() {
+ let adt_def = actual.ty_adt_def().expect("enum is not an ADT");
+ if let Some(suggestion) = lev_distance::find_best_match_for_name(
+ &adt_def.variants().iter().map(|s| s.name).collect::<Vec<_>>(),
+ item_name.name,
+ None,
+ ) {
+ err.span_suggestion(
+ span,
+ "there is a variant with a similar name",
+ suggestion,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ if item_name.name == sym::as_str && actual.peel_refs().is_str() {
+ let msg = "remove this method call";
+ let mut fallback_span = true;
+ if let SelfSource::MethodCall(expr) = source {
+ let call_expr =
+ self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id));
+ if let Some(span) = call_expr.span.trim_start(expr.span) {
+ err.span_suggestion(span, msg, "", Applicability::MachineApplicable);
+ fallback_span = false;
+ }
+ }
+ if fallback_span {
+ err.span_label(span, msg);
+ }
+ } else if let Some(lev_candidate) = lev_candidate {
+ // Don't emit a suggestion if we found an actual method
+ // that had unsatisfied trait bounds
+ if unsatisfied_predicates.is_empty() {
+ let def_kind = lev_candidate.kind.as_def_kind();
+ err.span_suggestion(
+ span,
+ &format!(
+ "there is {} {} with a similar name",
+ def_kind.article(),
+ def_kind.descr(lev_candidate.def_id),
+ ),
+ lev_candidate.name,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ return Some(err);
+ }
+
+ MethodError::Ambiguity(sources) => {
+ let mut err = struct_span_err!(
+ self.sess(),
+ item_name.span,
+ E0034,
+ "multiple applicable items in scope"
+ );
+ err.span_label(item_name.span, format!("multiple `{}` found", item_name));
+
+ report_candidates(span, &mut err, sources, sugg_span);
+ err.emit();
+ }
+
+ MethodError::PrivateMatch(kind, def_id, out_of_scope_traits) => {
+ let kind = kind.descr(def_id);
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ item_name.span,
+ E0624,
+ "{} `{}` is private",
+ kind,
+ item_name
+ );
+ err.span_label(item_name.span, &format!("private {}", kind));
+ let sp = self
+ .tcx
+ .hir()
+ .span_if_local(def_id)
+ .unwrap_or_else(|| self.tcx.def_span(def_id));
+ err.span_label(sp, &format!("private {} defined here", kind));
+ self.suggest_valid_traits(&mut err, out_of_scope_traits);
+ err.emit();
+ }
+
+ MethodError::IllegalSizedBound(candidates, needs_mut, bound_span) => {
+ let msg = format!("the `{}` method cannot be invoked on a trait object", item_name);
+ let mut err = self.sess().struct_span_err(span, &msg);
+ err.span_label(bound_span, "this has a `Sized` requirement");
+ if !candidates.is_empty() {
+ let help = format!(
+ "{an}other candidate{s} {were} found in the following trait{s}, perhaps \
+ add a `use` for {one_of_them}:",
+ an = if candidates.len() == 1 { "an" } else { "" },
+ s = pluralize!(candidates.len()),
+ were = pluralize!("was", candidates.len()),
+ one_of_them = if candidates.len() == 1 { "it" } else { "one_of_them" },
+ );
+ self.suggest_use_candidates(&mut err, help, candidates);
+ }
+ if let ty::Ref(region, t_type, mutability) = rcvr_ty.kind() {
+ if needs_mut {
+ let trait_type = self.tcx.mk_ref(
+ *region,
+ ty::TypeAndMut { ty: *t_type, mutbl: mutability.invert() },
+ );
+ err.note(&format!("you need `{}` instead of `{}`", trait_type, rcvr_ty));
+ }
+ }
+ err.emit();
+ }
+
+ MethodError::BadReturnType => bug!("no return type expectations but got BadReturnType"),
+ }
+ None
+ }
+
+ fn suggest_field_call(
+ &self,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ expr: &hir::Expr<'_>,
+ item_name: Ident,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ) -> bool {
+ let tcx = self.tcx;
+ let field_receiver = self.autoderef(span, rcvr_ty).find_map(|(ty, _)| match ty.kind() {
+ ty::Adt(def, substs) if !def.is_enum() => {
+ let variant = &def.non_enum_variant();
+ tcx.find_field_index(item_name, variant).map(|index| {
+ let field = &variant.fields[index];
+ let field_ty = field.ty(tcx, substs);
+ (field, field_ty)
+ })
+ }
+ _ => None,
+ });
+ if let Some((field, field_ty)) = field_receiver {
+ let scope = tcx.parent_module(self.body_id).to_def_id();
+ let is_accessible = field.vis.is_accessible_from(scope, tcx);
+
+ if is_accessible {
+ if self.is_fn_ty(field_ty, span) {
+ let expr_span = expr.span.to(item_name.span);
+ err.multipart_suggestion(
+ &format!(
+ "to call the function stored in `{}`, \
+ surround the field access with parentheses",
+ item_name,
+ ),
+ vec![
+ (expr_span.shrink_to_lo(), '('.to_string()),
+ (expr_span.shrink_to_hi(), ')'.to_string()),
+ ],
+ Applicability::MachineApplicable,
+ );
+ } else {
+ let call_expr = tcx.hir().expect_expr(tcx.hir().get_parent_node(expr.hir_id));
+
+ if let Some(span) = call_expr.span.trim_start(item_name.span) {
+ err.span_suggestion(
+ span,
+ "remove the arguments",
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ let field_kind = if is_accessible { "field" } else { "private field" };
+ err.span_label(item_name.span, format!("{}, not a method", field_kind));
+ return true;
+ }
+ false
+ }
+
+ fn suggest_constraining_numerical_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ actual: Ty<'tcx>,
+ source: SelfSource<'_>,
+ span: Span,
+ item_kind: &str,
+ item_name: Ident,
+ ty_str: &str,
+ ) -> bool {
+ let found_candidate = all_traits(self.tcx)
+ .into_iter()
+ .any(|info| self.associated_value(info.def_id, item_name).is_some());
+ let found_assoc = |ty: Ty<'tcx>| {
+ simplify_type(tcx, ty, TreatParams::AsInfer)
+ .and_then(|simp| {
+ tcx.incoherent_impls(simp)
+ .iter()
+ .find_map(|&id| self.associated_value(id, item_name))
+ })
+ .is_some()
+ };
+ let found_candidate = found_candidate
+ || found_assoc(tcx.types.i8)
+ || found_assoc(tcx.types.i16)
+ || found_assoc(tcx.types.i32)
+ || found_assoc(tcx.types.i64)
+ || found_assoc(tcx.types.i128)
+ || found_assoc(tcx.types.u8)
+ || found_assoc(tcx.types.u16)
+ || found_assoc(tcx.types.u32)
+ || found_assoc(tcx.types.u64)
+ || found_assoc(tcx.types.u128)
+ || found_assoc(tcx.types.f32)
+ || found_assoc(tcx.types.f32);
+ if found_candidate
+ && actual.is_numeric()
+ && !actual.has_concrete_skeleton()
+ && let SelfSource::MethodCall(expr) = source
+ {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0689,
+ "can't call {} `{}` on ambiguous numeric type `{}`",
+ item_kind,
+ item_name,
+ ty_str
+ );
+ let concrete_type = if actual.is_integral() { "i32" } else { "f32" };
+ match expr.kind {
+ ExprKind::Lit(ref lit) => {
+ // numeric literal
+ let snippet = tcx
+ .sess
+ .source_map()
+ .span_to_snippet(lit.span)
+ .unwrap_or_else(|_| "<numeric literal>".to_owned());
+
+ // If this is a floating point literal that ends with '.',
+ // get rid of it to stop this from becoming a member access.
+ let snippet = snippet.strip_suffix('.').unwrap_or(&snippet);
+
+ err.span_suggestion(
+ lit.span,
+ &format!(
+ "you must specify a concrete type for this numeric value, \
+ like `{}`",
+ concrete_type
+ ),
+ format!("{snippet}_{concrete_type}"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ ExprKind::Path(QPath::Resolved(_, path)) => {
+ // local binding
+ if let hir::def::Res::Local(hir_id) = path.res {
+ let span = tcx.hir().span(hir_id);
+ let snippet = tcx.sess.source_map().span_to_snippet(span);
+ let filename = tcx.sess.source_map().span_to_filename(span);
+
+ let parent_node =
+ self.tcx.hir().get(self.tcx.hir().get_parent_node(hir_id));
+ let msg = format!(
+ "you must specify a type for this binding, like `{}`",
+ concrete_type,
+ );
+
+ match (filename, parent_node, snippet) {
+ (
+ FileName::Real(_),
+ Node::Local(hir::Local {
+ source: hir::LocalSource::Normal,
+ ty,
+ ..
+ }),
+ Ok(ref snippet),
+ ) => {
+ err.span_suggestion(
+ // account for `let x: _ = 42;`
+ // ^^^^
+ span.to(ty.as_ref().map(|ty| ty.span).unwrap_or(span)),
+ &msg,
+ format!("{}: {}", snippet, concrete_type),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {
+ err.span_label(span, msg);
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ err.emit();
+ return true;
+ }
+ false
+ }
+
+ fn check_for_field_method(
+ &self,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ source: SelfSource<'tcx>,
+ span: Span,
+ actual: Ty<'tcx>,
+ item_name: Ident,
+ ) {
+ if let SelfSource::MethodCall(expr) = source
+ && let Some((fields, substs)) = self.get_field_candidates(span, actual)
+ {
+ let call_expr = self.tcx.hir().expect_expr(self.tcx.hir().get_parent_node(expr.hir_id));
+ for candidate_field in fields.iter() {
+ if let Some(field_path) = self.check_for_nested_field_satisfying(
+ span,
+ &|_, field_ty| {
+ self.lookup_probe(
+ span,
+ item_name,
+ field_ty,
+ call_expr,
+ ProbeScope::AllTraits,
+ )
+ .is_ok()
+ },
+ candidate_field,
+ substs,
+ vec![],
+ self.tcx.parent_module(expr.hir_id).to_def_id(),
+ ) {
+ let field_path_str = field_path
+ .iter()
+ .map(|id| id.name.to_ident_string())
+ .collect::<Vec<String>>()
+ .join(".");
+ debug!("field_path_str: {:?}", field_path_str);
+
+ err.span_suggestion_verbose(
+ item_name.span.shrink_to_lo(),
+ "one of the expressions' fields has a method of the same name",
+ format!("{field_path_str}."),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+
+ fn check_for_unwrap_self(
+ &self,
+ err: &mut DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ source: SelfSource<'tcx>,
+ span: Span,
+ actual: Ty<'tcx>,
+ item_name: Ident,
+ ) {
+ let tcx = self.tcx;
+ let SelfSource::MethodCall(expr) = source else { return; };
+ let call_expr = tcx.hir().expect_expr(tcx.hir().get_parent_node(expr.hir_id));
+
+ let ty::Adt(kind, substs) = actual.kind() else { return; };
+ if !kind.is_enum() {
+ return;
+ }
+
+ let matching_variants: Vec<_> = kind
+ .variants()
+ .iter()
+ .flat_map(|variant| {
+ let [field] = &variant.fields[..] else { return None; };
+ let field_ty = field.ty(tcx, substs);
+
+ // Skip `_`, since that'll just lead to ambiguity.
+ if self.resolve_vars_if_possible(field_ty).is_ty_var() {
+ return None;
+ }
+
+ self.lookup_probe(span, item_name, field_ty, call_expr, ProbeScope::AllTraits)
+ .ok()
+ .map(|pick| (variant, field, pick))
+ })
+ .collect();
+
+ let ret_ty_matches = |diagnostic_item| {
+ if let Some(ret_ty) = self
+ .ret_coercion
+ .as_ref()
+ .map(|c| self.resolve_vars_if_possible(c.borrow().expected_ty()))
+ && let ty::Adt(kind, _) = ret_ty.kind()
+ && tcx.get_diagnostic_item(diagnostic_item) == Some(kind.did())
+ {
+ true
+ } else {
+ false
+ }
+ };
+
+ match &matching_variants[..] {
+ [(_, field, pick)] => {
+ let self_ty = field.ty(tcx, substs);
+ err.span_note(
+ tcx.def_span(pick.item.def_id),
+ &format!("the method `{item_name}` exists on the type `{self_ty}`"),
+ );
+ let (article, kind, variant, question) =
+ if Some(kind.did()) == tcx.get_diagnostic_item(sym::Result) {
+ ("a", "Result", "Err", ret_ty_matches(sym::Result))
+ } else if Some(kind.did()) == tcx.get_diagnostic_item(sym::Option) {
+ ("an", "Option", "None", ret_ty_matches(sym::Option))
+ } else {
+ return;
+ };
+ if question {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "use the `?` operator to extract the `{self_ty}` value, propagating \
+ {article} `{kind}::{variant}` value to the caller"
+ ),
+ "?",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_suggestion_verbose(
+ expr.span.shrink_to_hi(),
+ format!(
+ "consider using `{kind}::expect` to unwrap the `{self_ty}` value, \
+ panicking if the value is {article} `{kind}::{variant}`"
+ ),
+ ".expect(\"REASON\")",
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ // FIXME(compiler-errors): Support suggestions for other matching enum variants
+ _ => {}
+ }
+ }
+
+ pub(crate) fn note_unmet_impls_on_type(
+ &self,
+ err: &mut Diagnostic,
+ errors: Vec<FulfillmentError<'tcx>>,
+ ) {
+ let all_local_types_needing_impls =
+ errors.iter().all(|e| match e.obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => match pred.self_ty().kind() {
+ ty::Adt(def, _) => def.did().is_local(),
+ _ => false,
+ },
+ _ => false,
+ });
+ let mut preds: Vec<_> = errors
+ .iter()
+ .filter_map(|e| match e.obligation.predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(pred) => Some(pred),
+ _ => None,
+ })
+ .collect();
+ preds.sort_by_key(|pred| (pred.def_id(), pred.self_ty()));
+ let def_ids = preds
+ .iter()
+ .filter_map(|pred| match pred.self_ty().kind() {
+ ty::Adt(def, _) => Some(def.did()),
+ _ => None,
+ })
+ .collect::<FxHashSet<_>>();
+ let mut spans: MultiSpan = def_ids
+ .iter()
+ .filter_map(|def_id| {
+ let span = self.tcx.def_span(*def_id);
+ if span.is_dummy() { None } else { Some(span) }
+ })
+ .collect::<Vec<_>>()
+ .into();
+
+ for pred in &preds {
+ match pred.self_ty().kind() {
+ ty::Adt(def, _) if def.did().is_local() => {
+ spans.push_span_label(
+ self.tcx.def_span(def.did()),
+ format!("must implement `{}`", pred.trait_ref.print_only_trait_path()),
+ );
+ }
+ _ => {}
+ }
+ }
+
+ if all_local_types_needing_impls && spans.primary_span().is_some() {
+ let msg = if preds.len() == 1 {
+ format!(
+ "an implementation of `{}` might be missing for `{}`",
+ preds[0].trait_ref.print_only_trait_path(),
+ preds[0].self_ty()
+ )
+ } else {
+ format!(
+ "the following type{} would have to `impl` {} required trait{} for this \
+ operation to be valid",
+ pluralize!(def_ids.len()),
+ if def_ids.len() == 1 { "its" } else { "their" },
+ pluralize!(preds.len()),
+ )
+ };
+ err.span_note(spans, &msg);
+ }
+
+ let preds: Vec<_> = errors
+ .iter()
+ .map(|e| (e.obligation.predicate, None, Some(e.obligation.cause.clone())))
+ .collect();
+ self.suggest_derive(err, &preds);
+ }
+
+ fn suggest_derive(
+ &self,
+ err: &mut Diagnostic,
+ unsatisfied_predicates: &[(
+ ty::Predicate<'tcx>,
+ Option<ty::Predicate<'tcx>>,
+ Option<ObligationCause<'tcx>>,
+ )],
+ ) {
+ let mut derives = Vec::<(String, Span, Symbol)>::new();
+ let mut traits = Vec::<Span>::new();
+ for (pred, _, _) in unsatisfied_predicates {
+ let ty::PredicateKind::Trait(trait_pred) = pred.kind().skip_binder() else { continue };
+ let adt = match trait_pred.self_ty().ty_adt_def() {
+ Some(adt) if adt.did().is_local() => adt,
+ _ => continue,
+ };
+ if let Some(diagnostic_name) = self.tcx.get_diagnostic_name(trait_pred.def_id()) {
+ let can_derive = match diagnostic_name {
+ sym::Default => !adt.is_enum(),
+ sym::Eq
+ | sym::PartialEq
+ | sym::Ord
+ | sym::PartialOrd
+ | sym::Clone
+ | sym::Copy
+ | sym::Hash
+ | sym::Debug => true,
+ _ => false,
+ };
+ if can_derive {
+ let self_name = trait_pred.self_ty().to_string();
+ let self_span = self.tcx.def_span(adt.did());
+ if let Some(poly_trait_ref) = pred.to_opt_poly_trait_pred() {
+ for super_trait in supertraits(self.tcx, poly_trait_ref.to_poly_trait_ref())
+ {
+ if let Some(parent_diagnostic_name) =
+ self.tcx.get_diagnostic_name(super_trait.def_id())
+ {
+ derives.push((
+ self_name.clone(),
+ self_span,
+ parent_diagnostic_name,
+ ));
+ }
+ }
+ }
+ derives.push((self_name, self_span, diagnostic_name));
+ } else {
+ traits.push(self.tcx.def_span(trait_pred.def_id()));
+ }
+ } else {
+ traits.push(self.tcx.def_span(trait_pred.def_id()));
+ }
+ }
+ traits.sort();
+ traits.dedup();
+
+ derives.sort();
+ derives.dedup();
+
+ let mut derives_grouped = Vec::<(String, Span, String)>::new();
+ for (self_name, self_span, trait_name) in derives.into_iter() {
+ if let Some((last_self_name, _, ref mut last_trait_names)) = derives_grouped.last_mut()
+ {
+ if last_self_name == &self_name {
+ last_trait_names.push_str(format!(", {}", trait_name).as_str());
+ continue;
+ }
+ }
+ derives_grouped.push((self_name, self_span, trait_name.to_string()));
+ }
+
+ let len = traits.len();
+ if len > 0 {
+ let span: MultiSpan = traits.into();
+ err.span_note(
+ span,
+ &format!("the following trait{} must be implemented", pluralize!(len),),
+ );
+ }
+
+ for (self_name, self_span, traits) in &derives_grouped {
+ err.span_suggestion_verbose(
+ self_span.shrink_to_lo(),
+ &format!("consider annotating `{}` with `#[derive({})]`", self_name, traits),
+ format!("#[derive({})]\n", traits),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ /// Print out the type for use in value namespace.
+ fn ty_to_value_string(&self, ty: Ty<'tcx>) -> String {
+ match ty.kind() {
+ ty::Adt(def, substs) => format!("{}", ty::Instance::new(def.did(), substs)),
+ _ => self.ty_to_string(ty),
+ }
+ }
+
+ fn suggest_await_before_method(
+ &self,
+ err: &mut Diagnostic,
+ item_name: Ident,
+ ty: Ty<'tcx>,
+ call: &hir::Expr<'_>,
+ span: Span,
+ ) {
+ let output_ty = match self.get_impl_future_output_ty(ty) {
+ Some(output_ty) => self.resolve_vars_if_possible(output_ty).skip_binder(),
+ _ => return,
+ };
+ let method_exists = self.method_exists(item_name, output_ty, call.hir_id, true);
+ debug!("suggest_await_before_method: is_method_exist={}", method_exists);
+ if method_exists {
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ "consider `await`ing on the `Future` and calling the method on its `Output`",
+ "await.",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+
+ fn suggest_use_candidates(&self, err: &mut Diagnostic, msg: String, candidates: Vec<DefId>) {
+ let parent_map = self.tcx.visible_parent_map(());
+
+ // Separate out candidates that must be imported with a glob, because they are named `_`
+ // and cannot be referred with their identifier.
+ let (candidates, globs): (Vec<_>, Vec<_>) = candidates.into_iter().partition(|trait_did| {
+ if let Some(parent_did) = parent_map.get(trait_did) {
+ // If the item is re-exported as `_`, we should suggest a glob-import instead.
+ if *parent_did != self.tcx.parent(*trait_did)
+ && self
+ .tcx
+ .module_children(*parent_did)
+ .iter()
+ .filter(|child| child.res.opt_def_id() == Some(*trait_did))
+ .all(|child| child.ident.name == kw::Underscore)
+ {
+ return false;
+ }
+ }
+
+ true
+ });
+
+ let module_did = self.tcx.parent_module(self.body_id);
+ let (module, _, _) = self.tcx.hir().get_module(module_did);
+ let span = module.spans.inject_use_span;
+
+ let path_strings = candidates.iter().map(|trait_did| {
+ format!("use {};\n", with_crate_prefix!(self.tcx.def_path_str(*trait_did)),)
+ });
+
+ let glob_path_strings = globs.iter().map(|trait_did| {
+ let parent_did = parent_map.get(trait_did).unwrap();
+ format!(
+ "use {}::*; // trait {}\n",
+ with_crate_prefix!(self.tcx.def_path_str(*parent_did)),
+ self.tcx.item_name(*trait_did),
+ )
+ });
+
+ err.span_suggestions(
+ span,
+ &msg,
+ path_strings.chain(glob_path_strings),
+ Applicability::MaybeIncorrect,
+ );
+ }
+
+ fn suggest_valid_traits(
+ &self,
+ err: &mut Diagnostic,
+ valid_out_of_scope_traits: Vec<DefId>,
+ ) -> bool {
+ if !valid_out_of_scope_traits.is_empty() {
+ let mut candidates = valid_out_of_scope_traits;
+ candidates.sort();
+ candidates.dedup();
+
+ // `TryFrom` and `FromIterator` have no methods
+ let edition_fix = candidates
+ .iter()
+ .find(|did| self.tcx.is_diagnostic_item(sym::TryInto, **did))
+ .copied();
+
+ err.help("items from traits can only be used if the trait is in scope");
+ let msg = format!(
+ "the following {traits_are} implemented but not in scope; \
+ perhaps add a `use` for {one_of_them}:",
+ traits_are = if candidates.len() == 1 { "trait is" } else { "traits are" },
+ one_of_them = if candidates.len() == 1 { "it" } else { "one of them" },
+ );
+
+ self.suggest_use_candidates(err, msg, candidates);
+ if let Some(did) = edition_fix {
+ err.note(&format!(
+ "'{}' is included in the prelude starting in Edition 2021",
+ with_crate_prefix!(self.tcx.def_path_str(did))
+ ));
+ }
+
+ true
+ } else {
+ false
+ }
+ }
+
+ fn suggest_traits_to_import(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ item_name: Ident,
+ inputs_len: Option<usize>,
+ source: SelfSource<'tcx>,
+ valid_out_of_scope_traits: Vec<DefId>,
+ unsatisfied_predicates: &[(
+ ty::Predicate<'tcx>,
+ Option<ty::Predicate<'tcx>>,
+ Option<ObligationCause<'tcx>>,
+ )],
+ unsatisfied_bounds: bool,
+ ) {
+ let mut alt_rcvr_sugg = false;
+ if let (SelfSource::MethodCall(rcvr), false) = (source, unsatisfied_bounds) {
+ debug!(?span, ?item_name, ?rcvr_ty, ?rcvr);
+ let skippable = [
+ self.tcx.lang_items().clone_trait(),
+ self.tcx.lang_items().deref_trait(),
+ self.tcx.lang_items().deref_mut_trait(),
+ self.tcx.lang_items().drop_trait(),
+ self.tcx.get_diagnostic_item(sym::AsRef),
+ ];
+ // Try alternative arbitrary self types that could fulfill this call.
+ // FIXME: probe for all types that *could* be arbitrary self-types, not
+ // just this list.
+ for (rcvr_ty, post) in &[
+ (rcvr_ty, ""),
+ (self.tcx.mk_mut_ref(self.tcx.lifetimes.re_erased, rcvr_ty), "&mut "),
+ (self.tcx.mk_imm_ref(self.tcx.lifetimes.re_erased, rcvr_ty), "&"),
+ ] {
+ match self.lookup_probe(span, item_name, *rcvr_ty, rcvr, ProbeScope::AllTraits) {
+ Ok(pick) => {
+ // If the method is defined for the receiver we have, it likely wasn't `use`d.
+ // We point at the method, but we just skip the rest of the check for arbitrary
+ // self types and rely on the suggestion to `use` the trait from
+ // `suggest_valid_traits`.
+ let did = Some(pick.item.container_id(self.tcx));
+ let skip = skippable.contains(&did);
+ if pick.autoderefs == 0 && !skip {
+ err.span_label(
+ pick.item.ident(self.tcx).span,
+ &format!("the method is available for `{}` here", rcvr_ty),
+ );
+ }
+ break;
+ }
+ Err(MethodError::Ambiguity(_)) => {
+ // If the method is defined (but ambiguous) for the receiver we have, it is also
+ // likely we haven't `use`d it. It may be possible that if we `Box`/`Pin`/etc.
+ // the receiver, then it might disambiguate this method, but I think these
+ // suggestions are generally misleading (see #94218).
+ break;
+ }
+ _ => {}
+ }
+
+ for (rcvr_ty, pre) in &[
+ (self.tcx.mk_lang_item(*rcvr_ty, LangItem::OwnedBox), "Box::new"),
+ (self.tcx.mk_lang_item(*rcvr_ty, LangItem::Pin), "Pin::new"),
+ (self.tcx.mk_diagnostic_item(*rcvr_ty, sym::Arc), "Arc::new"),
+ (self.tcx.mk_diagnostic_item(*rcvr_ty, sym::Rc), "Rc::new"),
+ ] {
+ if let Some(new_rcvr_t) = *rcvr_ty
+ && let Ok(pick) = self.lookup_probe(
+ span,
+ item_name,
+ new_rcvr_t,
+ rcvr,
+ ProbeScope::AllTraits,
+ )
+ {
+ debug!("try_alt_rcvr: pick candidate {:?}", pick);
+ let did = Some(pick.item.container_id(self.tcx));
+ // We don't want to suggest a container type when the missing
+ // method is `.clone()` or `.deref()` otherwise we'd suggest
+ // `Arc::new(foo).clone()`, which is far from what the user wants.
+ // Explicitly ignore the `Pin::as_ref()` method as `Pin` does not
+ // implement the `AsRef` trait.
+ let skip = skippable.contains(&did)
+ || (("Pin::new" == *pre) && (sym::as_ref == item_name.name))
+ || inputs_len.map_or(false, |inputs_len| pick.item.kind == ty::AssocKind::Fn && self.tcx.fn_sig(pick.item.def_id).skip_binder().inputs().len() != inputs_len);
+ // Make sure the method is defined for the *actual* receiver: we don't
+ // want to treat `Box<Self>` as a receiver if it only works because of
+ // an autoderef to `&self`
+ if pick.autoderefs == 0 && !skip {
+ err.span_label(
+ pick.item.ident(self.tcx).span,
+ &format!("the method is available for `{}` here", new_rcvr_t),
+ );
+ err.multipart_suggestion(
+ "consider wrapping the receiver expression with the \
+ appropriate type",
+ vec![
+ (rcvr.span.shrink_to_lo(), format!("{}({}", pre, post)),
+ (rcvr.span.shrink_to_hi(), ")".to_string()),
+ ],
+ Applicability::MaybeIncorrect,
+ );
+ // We don't care about the other suggestions.
+ alt_rcvr_sugg = true;
+ }
+ }
+ }
+ }
+ }
+ if self.suggest_valid_traits(err, valid_out_of_scope_traits) {
+ return;
+ }
+
+ let type_is_local = self.type_derefs_to_local(span, rcvr_ty, source);
+
+ let mut arbitrary_rcvr = vec![];
+ // There are no traits implemented, so lets suggest some traits to
+ // implement, by finding ones that have the item name, and are
+ // legal to implement.
+ let mut candidates = all_traits(self.tcx)
+ .into_iter()
+ // Don't issue suggestions for unstable traits since they're
+ // unlikely to be implementable anyway
+ .filter(|info| match self.tcx.lookup_stability(info.def_id) {
+ Some(attr) => attr.level.is_stable(),
+ None => true,
+ })
+ .filter(|info| {
+ // We approximate the coherence rules to only suggest
+ // traits that are legal to implement by requiring that
+ // either the type or trait is local. Multi-dispatch means
+ // this isn't perfect (that is, there are cases when
+ // implementing a trait would be legal but is rejected
+ // here).
+ unsatisfied_predicates.iter().all(|(p, _, _)| {
+ match p.kind().skip_binder() {
+ // Hide traits if they are present in predicates as they can be fixed without
+ // having to implement them.
+ ty::PredicateKind::Trait(t) => t.def_id() == info.def_id,
+ ty::PredicateKind::Projection(p) => {
+ p.projection_ty.item_def_id == info.def_id
+ }
+ _ => false,
+ }
+ }) && (type_is_local || info.def_id.is_local())
+ && self
+ .associated_value(info.def_id, item_name)
+ .filter(|item| {
+ if let ty::AssocKind::Fn = item.kind {
+ let id = item
+ .def_id
+ .as_local()
+ .map(|def_id| self.tcx.hir().local_def_id_to_hir_id(def_id));
+ if let Some(hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Fn(fn_sig, method),
+ ..
+ })) = id.map(|id| self.tcx.hir().get(id))
+ {
+ let self_first_arg = match method {
+ hir::TraitFn::Required([ident, ..]) => {
+ ident.name == kw::SelfLower
+ }
+ hir::TraitFn::Provided(body_id) => {
+ self.tcx.hir().body(*body_id).params.first().map_or(
+ false,
+ |param| {
+ matches!(
+ param.pat.kind,
+ hir::PatKind::Binding(_, _, ident, _)
+ if ident.name == kw::SelfLower
+ )
+ },
+ )
+ }
+ _ => false,
+ };
+
+ if !fn_sig.decl.implicit_self.has_implicit_self()
+ && self_first_arg
+ {
+ if let Some(ty) = fn_sig.decl.inputs.get(0) {
+ arbitrary_rcvr.push(ty.span);
+ }
+ return false;
+ }
+ }
+ }
+ // We only want to suggest public or local traits (#45781).
+ item.visibility(self.tcx).is_public() || info.def_id.is_local()
+ })
+ .is_some()
+ })
+ .collect::<Vec<_>>();
+ for span in &arbitrary_rcvr {
+ err.span_label(
+ *span,
+ "the method might not be found because of this arbitrary self type",
+ );
+ }
+ if alt_rcvr_sugg {
+ return;
+ }
+
+ if !candidates.is_empty() {
+ // Sort from most relevant to least relevant.
+ candidates.sort_by(|a, b| a.cmp(b).reverse());
+ candidates.dedup();
+
+ let param_type = match rcvr_ty.kind() {
+ ty::Param(param) => Some(param),
+ ty::Ref(_, ty, _) => match ty.kind() {
+ ty::Param(param) => Some(param),
+ _ => None,
+ },
+ _ => None,
+ };
+ err.help(if param_type.is_some() {
+ "items from traits can only be used if the type parameter is bounded by the trait"
+ } else {
+ "items from traits can only be used if the trait is implemented and in scope"
+ });
+ let candidates_len = candidates.len();
+ let message = |action| {
+ format!(
+ "the following {traits_define} an item `{name}`, perhaps you need to {action} \
+ {one_of_them}:",
+ traits_define =
+ if candidates_len == 1 { "trait defines" } else { "traits define" },
+ action = action,
+ one_of_them = if candidates_len == 1 { "it" } else { "one of them" },
+ name = item_name,
+ )
+ };
+ // Obtain the span for `param` and use it for a structured suggestion.
+ if let Some(param) = param_type {
+ let generics = self.tcx.generics_of(self.body_id.owner.to_def_id());
+ let type_param = generics.type_param(param, self.tcx);
+ let hir = self.tcx.hir();
+ if let Some(def_id) = type_param.def_id.as_local() {
+ let id = hir.local_def_id_to_hir_id(def_id);
+ // Get the `hir::Param` to verify whether it already has any bounds.
+ // We do this to avoid suggesting code that ends up as `T: FooBar`,
+ // instead we suggest `T: Foo + Bar` in that case.
+ match hir.get(id) {
+ Node::GenericParam(param) => {
+ enum Introducer {
+ Plus,
+ Colon,
+ Nothing,
+ }
+ let ast_generics = hir.get_generics(id.owner).unwrap();
+ let (sp, mut introducer) = if let Some(span) =
+ ast_generics.bounds_span_for_suggestions(def_id)
+ {
+ (span, Introducer::Plus)
+ } else if let Some(colon_span) = param.colon_span {
+ (colon_span.shrink_to_hi(), Introducer::Nothing)
+ } else {
+ (param.span.shrink_to_hi(), Introducer::Colon)
+ };
+ if matches!(
+ param.kind,
+ hir::GenericParamKind::Type { synthetic: true, .. },
+ ) {
+ introducer = Introducer::Plus
+ }
+ let trait_def_ids: FxHashSet<DefId> = ast_generics
+ .bounds_for_param(def_id)
+ .flat_map(|bp| bp.bounds.iter())
+ .filter_map(|bound| bound.trait_ref()?.trait_def_id())
+ .collect();
+ if !candidates.iter().any(|t| trait_def_ids.contains(&t.def_id)) {
+ err.span_suggestions(
+ sp,
+ &message(format!(
+ "restrict type parameter `{}` with",
+ param.name.ident(),
+ )),
+ candidates.iter().map(|t| {
+ format!(
+ "{} {}",
+ match introducer {
+ Introducer::Plus => " +",
+ Introducer::Colon => ":",
+ Introducer::Nothing => "",
+ },
+ self.tcx.def_path_str(t.def_id),
+ )
+ }),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ return;
+ }
+ Node::Item(hir::Item {
+ kind: hir::ItemKind::Trait(.., bounds, _),
+ ident,
+ ..
+ }) => {
+ let (sp, sep, article) = if bounds.is_empty() {
+ (ident.span.shrink_to_hi(), ":", "a")
+ } else {
+ (bounds.last().unwrap().span().shrink_to_hi(), " +", "another")
+ };
+ err.span_suggestions(
+ sp,
+ &message(format!("add {} supertrait for", article)),
+ candidates.iter().map(|t| {
+ format!("{} {}", sep, self.tcx.def_path_str(t.def_id),)
+ }),
+ Applicability::MaybeIncorrect,
+ );
+ return;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ let (potential_candidates, explicitly_negative) = if param_type.is_some() {
+ // FIXME: Even though negative bounds are not implemented, we could maybe handle
+ // cases where a positive bound implies a negative impl.
+ (candidates, Vec::new())
+ } else if let Some(simp_rcvr_ty) =
+ simplify_type(self.tcx, rcvr_ty, TreatParams::AsPlaceholder)
+ {
+ let mut potential_candidates = Vec::new();
+ let mut explicitly_negative = Vec::new();
+ for candidate in candidates {
+ // Check if there's a negative impl of `candidate` for `rcvr_ty`
+ if self
+ .tcx
+ .all_impls(candidate.def_id)
+ .filter(|imp_did| {
+ self.tcx.impl_polarity(*imp_did) == ty::ImplPolarity::Negative
+ })
+ .any(|imp_did| {
+ let imp = self.tcx.impl_trait_ref(imp_did).unwrap();
+ let imp_simp =
+ simplify_type(self.tcx, imp.self_ty(), TreatParams::AsPlaceholder);
+ imp_simp.map_or(false, |s| s == simp_rcvr_ty)
+ })
+ {
+ explicitly_negative.push(candidate);
+ } else {
+ potential_candidates.push(candidate);
+ }
+ }
+ (potential_candidates, explicitly_negative)
+ } else {
+ // We don't know enough about `recv_ty` to make proper suggestions.
+ (candidates, Vec::new())
+ };
+
+ let action = if let Some(param) = param_type {
+ format!("restrict type parameter `{}` with", param)
+ } else {
+ // FIXME: it might only need to be imported into scope, not implemented.
+ "implement".to_string()
+ };
+ match &potential_candidates[..] {
+ [] => {}
+ [trait_info] if trait_info.def_id.is_local() => {
+ err.span_note(
+ self.tcx.def_span(trait_info.def_id),
+ &format!(
+ "`{}` defines an item `{}`, perhaps you need to {} it",
+ self.tcx.def_path_str(trait_info.def_id),
+ item_name,
+ action
+ ),
+ );
+ }
+ trait_infos => {
+ let mut msg = message(action);
+ for (i, trait_info) in trait_infos.iter().enumerate() {
+ msg.push_str(&format!(
+ "\ncandidate #{}: `{}`",
+ i + 1,
+ self.tcx.def_path_str(trait_info.def_id),
+ ));
+ }
+ err.note(&msg);
+ }
+ }
+ match &explicitly_negative[..] {
+ [] => {}
+ [trait_info] => {
+ let msg = format!(
+ "the trait `{}` defines an item `{}`, but is explicitly unimplemented",
+ self.tcx.def_path_str(trait_info.def_id),
+ item_name
+ );
+ err.note(&msg);
+ }
+ trait_infos => {
+ let mut msg = format!(
+ "the following traits define an item `{}`, but are explicitly unimplemented:",
+ item_name
+ );
+ for trait_info in trait_infos {
+ msg.push_str(&format!("\n{}", self.tcx.def_path_str(trait_info.def_id)));
+ }
+ err.note(&msg);
+ }
+ }
+ }
+ }
+
+ /// Checks whether there is a local type somewhere in the chain of
+ /// autoderefs of `rcvr_ty`.
+ fn type_derefs_to_local(
+ &self,
+ span: Span,
+ rcvr_ty: Ty<'tcx>,
+ source: SelfSource<'tcx>,
+ ) -> bool {
+ fn is_local(ty: Ty<'_>) -> bool {
+ match ty.kind() {
+ ty::Adt(def, _) => def.did().is_local(),
+ ty::Foreign(did) => did.is_local(),
+ ty::Dynamic(tr, ..) => tr.principal().map_or(false, |d| d.def_id().is_local()),
+ ty::Param(_) => true,
+
+ // Everything else (primitive types, etc.) is effectively
+ // non-local (there are "edge" cases, e.g., `(LocalType,)`, but
+ // the noise from these sort of types is usually just really
+ // annoying, rather than any sort of help).
+ _ => false,
+ }
+ }
+
+ // This occurs for UFCS desugaring of `T::method`, where there is no
+ // receiver expression for the method call, and thus no autoderef.
+ if let SelfSource::QPath(_) = source {
+ return is_local(self.resolve_vars_with_obligations(rcvr_ty));
+ }
+
+ self.autoderef(span, rcvr_ty).any(|(ty, _)| is_local(ty))
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum SelfSource<'a> {
+ QPath(&'a hir::Ty<'a>),
+ MethodCall(&'a hir::Expr<'a> /* rcvr */),
+}
+
+#[derive(Copy, Clone)]
+pub struct TraitInfo {
+ pub def_id: DefId,
+}
+
+impl PartialEq for TraitInfo {
+ fn eq(&self, other: &TraitInfo) -> bool {
+ self.cmp(other) == Ordering::Equal
+ }
+}
+impl Eq for TraitInfo {}
+impl PartialOrd for TraitInfo {
+ fn partial_cmp(&self, other: &TraitInfo) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+impl Ord for TraitInfo {
+ fn cmp(&self, other: &TraitInfo) -> Ordering {
+ // Local crates are more important than remote ones (local:
+ // `cnum == 0`), and otherwise we throw in the defid for totality.
+
+ let lhs = (other.def_id.krate, other.def_id);
+ let rhs = (self.def_id.krate, self.def_id);
+ lhs.cmp(&rhs)
+ }
+}
+
+/// Retrieves all traits in this crate and any dependent crates,
+/// and wraps them into `TraitInfo` for custom sorting.
+pub fn all_traits(tcx: TyCtxt<'_>) -> Vec<TraitInfo> {
+ tcx.all_traits().map(|def_id| TraitInfo { def_id }).collect()
+}
+
+fn print_disambiguation_help<'tcx>(
+ item_name: Ident,
+ args: Option<&'tcx [hir::Expr<'tcx>]>,
+ err: &mut Diagnostic,
+ trait_name: String,
+ rcvr_ty: Ty<'_>,
+ kind: ty::AssocKind,
+ def_id: DefId,
+ span: Span,
+ candidate: Option<usize>,
+ source_map: &source_map::SourceMap,
+ fn_has_self_parameter: bool,
+) {
+ let mut applicability = Applicability::MachineApplicable;
+ let (span, sugg) = if let (ty::AssocKind::Fn, Some(args)) = (kind, args) {
+ let args = format!(
+ "({}{})",
+ if rcvr_ty.is_region_ptr() {
+ if rcvr_ty.is_mutable_ptr() { "&mut " } else { "&" }
+ } else {
+ ""
+ },
+ args.iter()
+ .map(|arg| source_map.span_to_snippet(arg.span).unwrap_or_else(|_| {
+ applicability = Applicability::HasPlaceholders;
+ "_".to_owned()
+ }))
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+ let trait_name = if !fn_has_self_parameter {
+ format!("<{} as {}>", rcvr_ty, trait_name)
+ } else {
+ trait_name
+ };
+ (span, format!("{}::{}{}", trait_name, item_name, args))
+ } else {
+ (span.with_hi(item_name.span.lo()), format!("<{} as {}>::", rcvr_ty, trait_name))
+ };
+ err.span_suggestion_verbose(
+ span,
+ &format!(
+ "disambiguate the {} for {}",
+ kind.as_def_kind().descr(def_id),
+ if let Some(candidate) = candidate {
+ format!("candidate #{}", candidate)
+ } else {
+ "the candidate".to_string()
+ },
+ ),
+ sugg,
+ applicability,
+ );
+}
diff --git a/compiler/rustc_typeck/src/check/mod.rs b/compiler/rustc_typeck/src/check/mod.rs
new file mode 100644
index 000000000..17c2e4868
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/mod.rs
@@ -0,0 +1,970 @@
+/*!
+
+# typeck: check phase
+
+Within the check phase of type check, we check each item one at a time
+(bodies of function expressions are checked as part of the containing
+function). Inference is used to supply types wherever they are unknown.
+
+By far the most complex case is checking the body of a function. This
+can be broken down into several distinct phases:
+
+- gather: creates type variables to represent the type of each local
+ variable and pattern binding.
+
+- main: the main pass does the lion's share of the work: it
+ determines the types of all expressions, resolves
+ methods, checks for most invalid conditions, and so forth. In
+ some cases, where a type is unknown, it may create a type or region
+ variable and use that as the type of an expression.
+
+ In the process of checking, various constraints will be placed on
+ these type variables through the subtyping relationships requested
+ through the `demand` module. The `infer` module is in charge
+ of resolving those constraints.
+
+- regionck: after main is complete, the regionck pass goes over all
+ types looking for regions and making sure that they did not escape
+ into places where they are not in scope. This may also influence the
+ final assignments of the various region variables if there is some
+ flexibility.
+
+- writeback: writes the final types within a function body, replacing
+ type variables with their final inferred types. These final types
+ are written into the `tcx.node_types` table, which should *never* contain
+ any reference to a type variable.
+
+## Intermediate types
+
+While type checking a function, the intermediate types for the
+expressions, blocks, and so forth contained within the function are
+stored in `fcx.node_types` and `fcx.node_substs`. These types
+may contain unresolved type variables. After type checking is
+complete, the functions in the writeback module are used to take the
+types from this table, resolve them, and then write them into their
+permanent home in the type context `tcx`.
+
+This means that during inferencing you should use `fcx.write_ty()`
+and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of
+nodes within the function.
+
+The types of top-level items, which never contain unbound type
+variables, are stored directly into the `tcx` typeck_results.
+
+N.B., a type variable is not the same thing as a type parameter. A
+type variable is an instance of a type parameter. That is,
+given a generic function `fn foo<T>(t: T)`, while checking the
+function `foo`, the type `ty_param(0)` refers to the type `T`, which
+is treated in abstract. However, when `foo()` is called, `T` will be
+substituted for a fresh type variable `N`. This variable will
+eventually be resolved to some concrete type (which might itself be
+a type parameter).
+
+*/
+
+pub mod _match;
+mod autoderef;
+mod callee;
+pub mod cast;
+mod check;
+mod closure;
+pub mod coercion;
+mod compare_method;
+pub mod demand;
+mod diverges;
+pub mod dropck;
+mod expectation;
+mod expr;
+mod fallback;
+mod fn_ctxt;
+mod gather_locals;
+mod generator_interior;
+mod inherited;
+pub mod intrinsic;
+mod intrinsicck;
+pub mod method;
+mod op;
+mod pat;
+mod place_op;
+mod region;
+pub mod regionck;
+pub mod rvalue_scopes;
+mod upvar;
+pub mod wfcheck;
+pub mod writeback;
+
+use check::{check_abi, check_fn, check_mod_item_types};
+pub use diverges::Diverges;
+pub use expectation::Expectation;
+pub use fn_ctxt::*;
+use hir::def::CtorOf;
+pub use inherited::{Inherited, InheritedBuilder};
+
+use crate::astconv::AstConv;
+use crate::check::gather_locals::GatherLocalsVisitor;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, DiagnosticBuilder, EmissionGuarantee, MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::{HirIdMap, ImplicitSelfKind, Node};
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::Idx;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::{InternalSubsts, Subst, SubstsRef};
+use rustc_middle::ty::{self, Ty, TyCtxt, UserType};
+use rustc_session::config;
+use rustc_session::parse::feature_err;
+use rustc_session::Session;
+use rustc_span::source_map::DUMMY_SP;
+use rustc_span::symbol::{kw, Ident};
+use rustc_span::{self, BytePos, Span};
+use rustc_target::abi::VariantIdx;
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::traits;
+use rustc_trait_selection::traits::error_reporting::recursive_type_with_infinite_size_error;
+use rustc_trait_selection::traits::error_reporting::suggestions::ReturnsVisitor;
+use std::cell::RefCell;
+
+use crate::require_c_abi_if_c_variadic;
+use crate::util::common::indenter;
+
+use self::coercion::DynamicCoerceMany;
+use self::region::region_scope_tree;
+pub use self::Expectation::*;
+
+#[macro_export]
+macro_rules! type_error_struct {
+ ($session:expr, $span:expr, $typ:expr, $code:ident, $($message:tt)*) => ({
+ let mut err = rustc_errors::struct_span_err!($session, $span, $code, $($message)*);
+
+ if $typ.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ err
+ })
+}
+
+/// The type of a local binding, including the revealed type for anon types.
+#[derive(Copy, Clone, Debug)]
+pub struct LocalTy<'tcx> {
+ decl_ty: Ty<'tcx>,
+ revealed_ty: Ty<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Needs {
+ MutPlace,
+ None,
+}
+
+impl Needs {
+ fn maybe_mut_place(m: hir::Mutability) -> Self {
+ match m {
+ hir::Mutability::Mut => Needs::MutPlace,
+ hir::Mutability::Not => Needs::None,
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct UnsafetyState {
+ pub def: hir::HirId,
+ pub unsafety: hir::Unsafety,
+ from_fn: bool,
+}
+
+impl UnsafetyState {
+ pub fn function(unsafety: hir::Unsafety, def: hir::HirId) -> UnsafetyState {
+ UnsafetyState { def, unsafety, from_fn: true }
+ }
+
+ pub fn recurse(self, blk: &hir::Block<'_>) -> UnsafetyState {
+ use hir::BlockCheckMode;
+ match self.unsafety {
+ // If this unsafe, then if the outer function was already marked as
+ // unsafe we shouldn't attribute the unsafe'ness to the block. This
+ // way the block can be warned about instead of ignoring this
+ // extraneous block (functions are never warned about).
+ hir::Unsafety::Unsafe if self.from_fn => self,
+
+ unsafety => {
+ let (unsafety, def) = match blk.rules {
+ BlockCheckMode::UnsafeBlock(..) => (hir::Unsafety::Unsafe, blk.hir_id),
+ BlockCheckMode::DefaultBlock => (unsafety, self.def),
+ };
+ UnsafetyState { def, unsafety, from_fn: false }
+ }
+ }
+ }
+}
+
+#[derive(Debug, Copy, Clone)]
+pub enum PlaceOp {
+ Deref,
+ Index,
+}
+
+pub struct BreakableCtxt<'tcx> {
+ may_break: bool,
+
+ // this is `null` for loops where break with a value is illegal,
+ // such as `while`, `for`, and `while let`
+ coerce: Option<DynamicCoerceMany<'tcx>>,
+}
+
+pub struct EnclosingBreakables<'tcx> {
+ stack: Vec<BreakableCtxt<'tcx>>,
+ by_id: HirIdMap<usize>,
+}
+
+impl<'tcx> EnclosingBreakables<'tcx> {
+ fn find_breakable(&mut self, target_id: hir::HirId) -> &mut BreakableCtxt<'tcx> {
+ self.opt_find_breakable(target_id).unwrap_or_else(|| {
+ bug!("could not find enclosing breakable with id {}", target_id);
+ })
+ }
+
+ fn opt_find_breakable(&mut self, target_id: hir::HirId) -> Option<&mut BreakableCtxt<'tcx>> {
+ match self.by_id.get(&target_id) {
+ Some(ix) => Some(&mut self.stack[*ix]),
+ None => None,
+ }
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ method::provide(providers);
+ wfcheck::provide(providers);
+ *providers = Providers {
+ typeck_item_bodies,
+ typeck_const_arg,
+ typeck,
+ diagnostic_only_typeck,
+ has_typeck_results,
+ adt_destructor,
+ used_trait_imports,
+ check_mod_item_types,
+ region_scope_tree,
+ ..*providers
+ };
+}
+
+fn adt_destructor(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ty::Destructor> {
+ tcx.calculate_dtor(def_id, dropck::check_drop_impl)
+}
+
+/// If this `DefId` is a "primary tables entry", returns
+/// `Some((body_id, body_ty, fn_sig))`. Otherwise, returns `None`.
+///
+/// If this function returns `Some`, then `typeck_results(def_id)` will
+/// succeed; if it returns `None`, then `typeck_results(def_id)` may or
+/// may not succeed. In some cases where this function returns `None`
+/// (notably closures), `typeck_results(def_id)` would wind up
+/// redirecting to the owning function.
+fn primary_body_of(
+ tcx: TyCtxt<'_>,
+ id: hir::HirId,
+) -> Option<(hir::BodyId, Option<&hir::Ty<'_>>, Option<&hir::FnSig<'_>>)> {
+ match tcx.hir().get(id) {
+ Node::Item(item) => match item.kind {
+ hir::ItemKind::Const(ty, body) | hir::ItemKind::Static(ty, _, body) => {
+ Some((body, Some(ty), None))
+ }
+ hir::ItemKind::Fn(ref sig, .., body) => Some((body, None, Some(sig))),
+ _ => None,
+ },
+ Node::TraitItem(item) => match item.kind {
+ hir::TraitItemKind::Const(ty, Some(body)) => Some((body, Some(ty), None)),
+ hir::TraitItemKind::Fn(ref sig, hir::TraitFn::Provided(body)) => {
+ Some((body, None, Some(sig)))
+ }
+ _ => None,
+ },
+ Node::ImplItem(item) => match item.kind {
+ hir::ImplItemKind::Const(ty, body) => Some((body, Some(ty), None)),
+ hir::ImplItemKind::Fn(ref sig, body) => Some((body, None, Some(sig))),
+ _ => None,
+ },
+ Node::AnonConst(constant) => Some((constant.body, None, None)),
+ _ => None,
+ }
+}
+
+fn has_typeck_results(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ // Closures' typeck results come from their outermost function,
+ // as they are part of the same "inference environment".
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id);
+ if typeck_root_def_id != def_id {
+ return tcx.has_typeck_results(typeck_root_def_id);
+ }
+
+ if let Some(def_id) = def_id.as_local() {
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ primary_body_of(tcx, id).is_some()
+ } else {
+ false
+ }
+}
+
+fn used_trait_imports(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &FxHashSet<LocalDefId> {
+ &*tcx.typeck(def_id).used_trait_imports
+}
+
+fn typeck_const_arg<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (did, param_did): (LocalDefId, DefId),
+) -> &ty::TypeckResults<'tcx> {
+ let fallback = move || tcx.type_of(param_did);
+ typeck_with_fallback(tcx, did, fallback)
+}
+
+fn typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
+ if let Some(param_did) = tcx.opt_const_param_of(def_id) {
+ tcx.typeck_const_arg((def_id, param_did))
+ } else {
+ let fallback = move || tcx.type_of(def_id.to_def_id());
+ typeck_with_fallback(tcx, def_id, fallback)
+ }
+}
+
+/// Used only to get `TypeckResults` for type inference during error recovery.
+/// Currently only used for type inference of `static`s and `const`s to avoid type cycle errors.
+fn diagnostic_only_typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &ty::TypeckResults<'tcx> {
+ let fallback = move || {
+ let span = tcx.hir().span(tcx.hir().local_def_id_to_hir_id(def_id));
+ tcx.ty_error_with_message(span, "diagnostic only typeck table used")
+ };
+ typeck_with_fallback(tcx, def_id, fallback)
+}
+
+#[instrument(skip(tcx, fallback))]
+fn typeck_with_fallback<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ fallback: impl Fn() -> Ty<'tcx> + 'tcx,
+) -> &'tcx ty::TypeckResults<'tcx> {
+ // Closures' typeck results come from their outermost function,
+ // as they are part of the same "inference environment".
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id()).expect_local();
+ if typeck_root_def_id != def_id {
+ return tcx.typeck(typeck_root_def_id);
+ }
+
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let span = tcx.hir().span(id);
+
+ // Figure out what primary body this item has.
+ let (body_id, body_ty, fn_sig) = primary_body_of(tcx, id).unwrap_or_else(|| {
+ span_bug!(span, "can't type-check body of {:?}", def_id);
+ });
+ let body = tcx.hir().body(body_id);
+
+ let typeck_results = Inherited::build(tcx, def_id).enter(|inh| {
+ let param_env = tcx.param_env(def_id);
+ let fcx = if let Some(hir::FnSig { header, decl, .. }) = fn_sig {
+ let fn_sig = if crate::collect::get_infer_ret_ty(&decl.output).is_some() {
+ let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
+ <dyn AstConv<'_>>::ty_of_fn(&fcx, id, header.unsafety, header.abi, decl, None, None)
+ } else {
+ tcx.fn_sig(def_id)
+ };
+
+ check_abi(tcx, id, span, fn_sig.abi());
+
+ // Compute the function signature from point of view of inside the fn.
+ let fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), fn_sig);
+ let fn_sig = inh.normalize_associated_types_in(
+ body.value.span,
+ body_id.hir_id,
+ param_env,
+ fn_sig,
+ );
+ check_fn(&inh, param_env, fn_sig, decl, id, body, None, true).0
+ } else {
+ let fcx = FnCtxt::new(&inh, param_env, body.value.hir_id);
+ let expected_type = body_ty
+ .and_then(|ty| match ty.kind {
+ hir::TyKind::Infer => Some(<dyn AstConv<'_>>::ast_ty_to_ty(&fcx, ty)),
+ _ => None,
+ })
+ .unwrap_or_else(|| match tcx.hir().get(id) {
+ Node::AnonConst(_) => match tcx.hir().get(tcx.hir().get_parent_node(id)) {
+ Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::ConstBlock(ref anon_const),
+ ..
+ }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ }),
+ Node::Ty(&hir::Ty {
+ kind: hir::TyKind::Typeof(ref anon_const), ..
+ }) if anon_const.hir_id == id => fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span,
+ }),
+ Node::Expr(&hir::Expr { kind: hir::ExprKind::InlineAsm(asm), .. })
+ | Node::Item(&hir::Item { kind: hir::ItemKind::GlobalAsm(asm), .. }) => {
+ let operand_ty = asm
+ .operands
+ .iter()
+ .filter_map(|(op, _op_sp)| match op {
+ hir::InlineAsmOperand::Const { anon_const }
+ if anon_const.hir_id == id =>
+ {
+ // Inline assembly constants must be integers.
+ Some(fcx.next_int_var())
+ }
+ hir::InlineAsmOperand::SymFn { anon_const }
+ if anon_const.hir_id == id =>
+ {
+ Some(fcx.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span,
+ }))
+ }
+ _ => None,
+ })
+ .next();
+ operand_ty.unwrap_or_else(fallback)
+ }
+ _ => fallback(),
+ },
+ _ => fallback(),
+ });
+
+ let expected_type = fcx.normalize_associated_types_in(body.value.span, expected_type);
+ fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
+
+ // Gather locals in statics (because of block expressions).
+ GatherLocalsVisitor::new(&fcx).visit_body(body);
+
+ fcx.check_expr_coercable_to_type(&body.value, expected_type, None);
+
+ fcx.write_ty(id, expected_type);
+
+ fcx
+ };
+
+ let fallback_has_occurred = fcx.type_inference_fallback();
+
+ // Even though coercion casts provide type hints, we check casts after fallback for
+ // backwards compatibility. This makes fallback a stronger type hint than a cast coercion.
+ fcx.check_casts();
+ fcx.select_obligations_where_possible(fallback_has_occurred, |_| {});
+
+ // Closure and generator analysis may run after fallback
+ // because they don't constrain other type variables.
+ fcx.closure_analyze(body);
+ assert!(fcx.deferred_call_resolutions.borrow().is_empty());
+ // Before the generator analysis, temporary scopes shall be marked to provide more
+ // precise information on types to be captured.
+ fcx.resolve_rvalue_scopes(def_id.to_def_id());
+ fcx.resolve_generator_interiors(def_id.to_def_id());
+
+ for (ty, span, code) in fcx.deferred_sized_obligations.borrow_mut().drain(..) {
+ let ty = fcx.normalize_ty(span, ty);
+ fcx.require_type_is_sized(ty, span, code);
+ }
+
+ fcx.select_all_obligations_or_error();
+
+ if !fcx.infcx.is_tainted_by_errors() {
+ fcx.check_transmutes();
+ }
+
+ fcx.check_asms();
+
+ fcx.infcx.skip_region_resolution();
+
+ fcx.resolve_type_vars_in_body(body)
+ });
+
+ // Consistency check our TypeckResults instance can hold all ItemLocalIds
+ // it will need to hold.
+ assert_eq!(typeck_results.hir_owner, id.owner);
+
+ typeck_results
+}
+
+/// When `check_fn` is invoked on a generator (i.e., a body that
+/// includes yield), it returns back some information about the yield
+/// points.
+struct GeneratorTypes<'tcx> {
+ /// Type of generator argument / values returned by `yield`.
+ resume_ty: Ty<'tcx>,
+
+ /// Type of value that is yielded.
+ yield_ty: Ty<'tcx>,
+
+ /// Types that are captured (see `GeneratorInterior` for more).
+ interior: Ty<'tcx>,
+
+ /// Indicates if the generator is movable or static (immovable).
+ movability: hir::Movability,
+}
+
+/// Given a `DefId` for an opaque type in return position, find its parent item's return
+/// expressions.
+fn get_owner_return_paths<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+) -> Option<(LocalDefId, ReturnsVisitor<'tcx>)> {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let parent_id = tcx.hir().get_parent_item(hir_id);
+ tcx.hir().find_by_def_id(parent_id).and_then(|node| node.body_id()).map(|body_id| {
+ let body = tcx.hir().body(body_id);
+ let mut visitor = ReturnsVisitor::default();
+ visitor.visit_body(body);
+ (parent_id, visitor)
+ })
+}
+
+// Forbid defining intrinsics in Rust code,
+// as they must always be defined by the compiler.
+fn fn_maybe_err(tcx: TyCtxt<'_>, sp: Span, abi: Abi) {
+ if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = abi {
+ tcx.sess.span_err(sp, "intrinsic must be in `extern \"rust-intrinsic\" { ... }` block");
+ }
+}
+
+fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) {
+ // Only restricted on wasm target for now
+ if !tcx.sess.target.is_like_wasm {
+ return;
+ }
+
+ // If `#[link_section]` is missing, then nothing to verify
+ let attrs = tcx.codegen_fn_attrs(id);
+ if attrs.link_section.is_none() {
+ return;
+ }
+
+ // For the wasm32 target statics with `#[link_section]` are placed into custom
+ // sections of the final output file, but this isn't link custom sections of
+ // other executable formats. Namely we can only embed a list of bytes,
+ // nothing with pointers to anything else or relocations. If any relocation
+ // show up, reject them here.
+ // `#[link_section]` may contain arbitrary, or even undefined bytes, but it is
+ // the consumer's responsibility to ensure all bytes that have been read
+ // have defined values.
+ if let Ok(alloc) = tcx.eval_static_initializer(id.to_def_id())
+ && alloc.inner().relocations().len() != 0
+ {
+ let msg = "statics with a custom `#[link_section]` must be a \
+ simple list of bytes on the wasm target with no \
+ extra levels of indirection such as references";
+ tcx.sess.span_err(tcx.def_span(id), msg);
+ }
+}
+
+fn report_forbidden_specialization(
+ tcx: TyCtxt<'_>,
+ impl_item: &hir::ImplItemRef,
+ parent_impl: DefId,
+) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_item.span,
+ E0520,
+ "`{}` specializes an item from a parent `impl`, but \
+ that item is not marked `default`",
+ impl_item.ident
+ );
+ err.span_label(impl_item.span, format!("cannot specialize default item `{}`", impl_item.ident));
+
+ match tcx.span_of_impl(parent_impl) {
+ Ok(span) => {
+ err.span_label(span, "parent `impl` is here");
+ err.note(&format!(
+ "to specialize, `{}` in the parent `impl` must be marked `default`",
+ impl_item.ident
+ ));
+ }
+ Err(cname) => {
+ err.note(&format!("parent implementation is in crate `{cname}`"));
+ }
+ }
+
+ err.emit();
+}
+
+fn missing_items_err(
+ tcx: TyCtxt<'_>,
+ impl_span: Span,
+ missing_items: &[&ty::AssocItem],
+ full_impl_span: Span,
+) {
+ let missing_items_msg = missing_items
+ .iter()
+ .map(|trait_item| trait_item.name.to_string())
+ .collect::<Vec<_>>()
+ .join("`, `");
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0046,
+ "not all trait items implemented, missing: `{missing_items_msg}`",
+ );
+ err.span_label(impl_span, format!("missing `{missing_items_msg}` in implementation"));
+
+ // `Span` before impl block closing brace.
+ let hi = full_impl_span.hi() - BytePos(1);
+ // Point at the place right before the closing brace of the relevant `impl` to suggest
+ // adding the associated item at the end of its body.
+ let sugg_sp = full_impl_span.with_lo(hi).with_hi(hi);
+ // Obtain the level of indentation ending in `sugg_sp`.
+ let padding =
+ tcx.sess.source_map().indentation_before(sugg_sp).unwrap_or_else(|| String::new());
+
+ for trait_item in missing_items {
+ let snippet = suggestion_signature(trait_item, tcx);
+ let code = format!("{}{}\n{}", padding, snippet, padding);
+ let msg = format!("implement the missing item: `{snippet}`");
+ let appl = Applicability::HasPlaceholders;
+ if let Some(span) = tcx.hir().span_if_local(trait_item.def_id) {
+ err.span_label(span, format!("`{}` from trait", trait_item.name));
+ err.tool_only_span_suggestion(sugg_sp, &msg, code, appl);
+ } else {
+ err.span_suggestion_hidden(sugg_sp, &msg, code, appl);
+ }
+ }
+ err.emit();
+}
+
+fn missing_items_must_implement_one_of_err(
+ tcx: TyCtxt<'_>,
+ impl_span: Span,
+ missing_items: &[Ident],
+ annotation_span: Option<Span>,
+) {
+ let missing_items_msg =
+ missing_items.iter().map(Ident::to_string).collect::<Vec<_>>().join("`, `");
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ impl_span,
+ E0046,
+ "not all trait items implemented, missing one of: `{missing_items_msg}`",
+ );
+ err.span_label(impl_span, format!("missing one of `{missing_items_msg}` in implementation"));
+
+ if let Some(annotation_span) = annotation_span {
+ err.span_note(annotation_span, "required because of this annotation");
+ }
+
+ err.emit();
+}
+
+/// Re-sugar `ty::GenericPredicates` in a way suitable to be used in structured suggestions.
+fn bounds_from_generic_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: ty::GenericPredicates<'tcx>,
+) -> (String, String) {
+ let mut types: FxHashMap<Ty<'tcx>, Vec<DefId>> = FxHashMap::default();
+ let mut projections = vec![];
+ for (predicate, _) in predicates.predicates {
+ debug!("predicate {:?}", predicate);
+ let bound_predicate = predicate.kind();
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(trait_predicate) => {
+ let entry = types.entry(trait_predicate.self_ty()).or_default();
+ let def_id = trait_predicate.def_id();
+ if Some(def_id) != tcx.lang_items().sized_trait() {
+ // Type params are `Sized` by default, do not add that restriction to the list
+ // if it is a positive requirement.
+ entry.push(trait_predicate.def_id());
+ }
+ }
+ ty::PredicateKind::Projection(projection_pred) => {
+ projections.push(bound_predicate.rebind(projection_pred));
+ }
+ _ => {}
+ }
+ }
+ let generics = if types.is_empty() {
+ "".to_string()
+ } else {
+ format!(
+ "<{}>",
+ types
+ .keys()
+ .filter_map(|t| match t.kind() {
+ ty::Param(_) => Some(t.to_string()),
+ // Avoid suggesting the following:
+ // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {}
+ _ => None,
+ })
+ .collect::<Vec<_>>()
+ .join(", ")
+ )
+ };
+ let mut where_clauses = vec![];
+ for (ty, bounds) in types {
+ where_clauses
+ .extend(bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound))));
+ }
+ for projection in &projections {
+ let p = projection.skip_binder();
+ // FIXME: this is not currently supported syntax, we should be looking at the `types` and
+ // insert the associated types where they correspond, but for now let's be "lazy" and
+ // propose this instead of the following valid resugaring:
+ // `T: Trait, Trait::Assoc = K` → `T: Trait<Assoc = K>`
+ where_clauses.push(format!(
+ "{} = {}",
+ tcx.def_path_str(p.projection_ty.item_def_id),
+ p.term,
+ ));
+ }
+ let where_clauses = if where_clauses.is_empty() {
+ String::new()
+ } else {
+ format!(" where {}", where_clauses.join(", "))
+ };
+ (generics, where_clauses)
+}
+
+/// Return placeholder code for the given function.
+fn fn_sig_suggestion<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sig: ty::FnSig<'tcx>,
+ ident: Ident,
+ predicates: ty::GenericPredicates<'tcx>,
+ assoc: &ty::AssocItem,
+) -> String {
+ let args = sig
+ .inputs()
+ .iter()
+ .enumerate()
+ .map(|(i, ty)| {
+ Some(match ty.kind() {
+ ty::Param(_) if assoc.fn_has_self_parameter && i == 0 => "self".to_string(),
+ ty::Ref(reg, ref_ty, mutability) if i == 0 => {
+ let reg = format!("{reg} ");
+ let reg = match &reg[..] {
+ "'_ " | " " => "",
+ reg => reg,
+ };
+ if assoc.fn_has_self_parameter {
+ match ref_ty.kind() {
+ ty::Param(param) if param.name == kw::SelfUpper => {
+ format!("&{}{}self", reg, mutability.prefix_str())
+ }
+
+ _ => format!("self: {ty}"),
+ }
+ } else {
+ format!("_: {ty}")
+ }
+ }
+ _ => {
+ if assoc.fn_has_self_parameter && i == 0 {
+ format!("self: {ty}")
+ } else {
+ format!("_: {ty}")
+ }
+ }
+ })
+ })
+ .chain(std::iter::once(if sig.c_variadic { Some("...".to_string()) } else { None }))
+ .flatten()
+ .collect::<Vec<String>>()
+ .join(", ");
+ let output = sig.output();
+ let output = if !output.is_unit() { format!(" -> {output}") } else { String::new() };
+
+ let unsafety = sig.unsafety.prefix_str();
+ let (generics, where_clauses) = bounds_from_generic_predicates(tcx, predicates);
+
+ // FIXME: this is not entirely correct, as the lifetimes from borrowed params will
+ // not be present in the `fn` definition, not will we account for renamed
+ // lifetimes between the `impl` and the `trait`, but this should be good enough to
+ // fill in a significant portion of the missing code, and other subsequent
+ // suggestions can help the user fix the code.
+ format!("{unsafety}fn {ident}{generics}({args}){output}{where_clauses} {{ todo!() }}")
+}
+
+/// Return placeholder code for the given associated item.
+/// Similar to `ty::AssocItem::suggestion`, but appropriate for use as the code snippet of a
+/// structured suggestion.
+fn suggestion_signature(assoc: &ty::AssocItem, tcx: TyCtxt<'_>) -> String {
+ match assoc.kind {
+ ty::AssocKind::Fn => {
+ // We skip the binder here because the binder would deanonymize all
+ // late-bound regions, and we don't want method signatures to show up
+ // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
+ // regions just fine, showing `fn(&MyType)`.
+ fn_sig_suggestion(
+ tcx,
+ tcx.fn_sig(assoc.def_id).skip_binder(),
+ assoc.ident(tcx),
+ tcx.predicates_of(assoc.def_id),
+ assoc,
+ )
+ }
+ ty::AssocKind::Type => format!("type {} = Type;", assoc.name),
+ ty::AssocKind::Const => {
+ let ty = tcx.type_of(assoc.def_id);
+ let val = expr::ty_kind_suggestion(ty).unwrap_or("value");
+ format!("const {}: {} = {};", assoc.name, ty, val)
+ }
+ }
+}
+
+/// Emit an error when encountering two or more variants in a transparent enum.
+fn bad_variant_count<'tcx>(tcx: TyCtxt<'tcx>, adt: ty::AdtDef<'tcx>, sp: Span, did: DefId) {
+ let variant_spans: Vec<_> = adt
+ .variants()
+ .iter()
+ .map(|variant| tcx.hir().span_if_local(variant.def_id).unwrap())
+ .collect();
+ let msg = format!("needs exactly one variant, but has {}", adt.variants().len(),);
+ let mut err = struct_span_err!(tcx.sess, sp, E0731, "transparent enum {msg}");
+ err.span_label(sp, &msg);
+ if let [start @ .., end] = &*variant_spans {
+ for variant_span in start {
+ err.span_label(*variant_span, "");
+ }
+ err.span_label(*end, &format!("too many variants in `{}`", tcx.def_path_str(did)));
+ }
+ err.emit();
+}
+
+/// Emit an error when encountering two or more non-zero-sized fields in a transparent
+/// enum.
+fn bad_non_zero_sized_fields<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ adt: ty::AdtDef<'tcx>,
+ field_count: usize,
+ field_spans: impl Iterator<Item = Span>,
+ sp: Span,
+) {
+ let msg = format!("needs at most one non-zero-sized field, but has {field_count}");
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0690,
+ "{}transparent {} {}",
+ if adt.is_enum() { "the variant of a " } else { "" },
+ adt.descr(),
+ msg,
+ );
+ err.span_label(sp, &msg);
+ for sp in field_spans {
+ err.span_label(sp, "this field is non-zero-sized");
+ }
+ err.emit();
+}
+
+fn report_unexpected_variant_res(tcx: TyCtxt<'_>, res: Res, qpath: &hir::QPath<'_>, span: Span) {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0533,
+ "expected unit struct, unit variant or constant, found {} `{}`",
+ res.descr(),
+ rustc_hir_pretty::qpath_to_string(qpath),
+ )
+ .emit();
+}
+
+/// Controls whether the arguments are tupled. This is used for the call
+/// operator.
+///
+/// Tupling means that all call-side arguments are packed into a tuple and
+/// passed as a single parameter. For example, if tupling is enabled, this
+/// function:
+/// ```
+/// fn f(x: (isize, isize)) {}
+/// ```
+/// Can be called as:
+/// ```ignore UNSOLVED (can this be done in user code?)
+/// # fn f(x: (isize, isize)) {}
+/// f(1, 2);
+/// ```
+/// Instead of:
+/// ```
+/// # fn f(x: (isize, isize)) {}
+/// f((1, 2));
+/// ```
+#[derive(Clone, Eq, PartialEq)]
+enum TupleArgumentsFlag {
+ DontTupleArguments,
+ TupleArguments,
+}
+
+fn typeck_item_bodies(tcx: TyCtxt<'_>, (): ()) {
+ tcx.hir().par_body_owners(|body_owner_def_id| tcx.ensure().typeck(body_owner_def_id));
+}
+
+fn fatally_break_rust(sess: &Session) {
+ let handler = sess.diagnostic();
+ handler.span_bug_no_panic(
+ MultiSpan::new(),
+ "It looks like you're trying to break rust; would you like some ICE?",
+ );
+ handler.note_without_error("the compiler expectedly panicked. this is a feature.");
+ handler.note_without_error(
+ "we would appreciate a joke overview: \
+ https://github.com/rust-lang/rust/issues/43162#issuecomment-320764675",
+ );
+ handler.note_without_error(&format!(
+ "rustc {} running on {}",
+ option_env!("CFG_VERSION").unwrap_or("unknown_version"),
+ config::host_triple(),
+ ));
+}
+
+fn potentially_plural_count(count: usize, word: &str) -> String {
+ format!("{} {}{}", count, word, pluralize!(count))
+}
+
+fn has_expected_num_generic_args<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_did: Option<DefId>,
+ expected: usize,
+) -> bool {
+ trait_did.map_or(true, |trait_did| {
+ let generics = tcx.generics_of(trait_did);
+ generics.count() == expected + if generics.has_self { 1 } else { 0 }
+ })
+}
+
+/// Suggests calling the constructor of a tuple struct or enum variant
+///
+/// * `snippet` - The snippet of code that references the constructor
+/// * `span` - The span of the snippet
+/// * `params` - The number of parameters the constructor accepts
+/// * `err` - A mutable diagnostic builder to add the suggestion to
+fn suggest_call_constructor<G: EmissionGuarantee>(
+ span: Span,
+ kind: CtorOf,
+ params: usize,
+ err: &mut DiagnosticBuilder<'_, G>,
+) {
+ // Note: tuple-structs don't have named fields, so just use placeholders
+ let args = vec!["_"; params].join(", ");
+ let applicable = if params > 0 {
+ Applicability::HasPlaceholders
+ } else {
+ // When n = 0, it's an empty-tuple struct/enum variant
+ // so we trivially know how to construct it
+ Applicability::MachineApplicable
+ };
+ let kind = match kind {
+ CtorOf::Struct => "a struct",
+ CtorOf::Variant => "an enum variant",
+ };
+ err.span_label(span, &format!("this is the constructor of {kind}"));
+ err.multipart_suggestion(
+ "call the constructor",
+ vec![(span.shrink_to_lo(), "(".to_string()), (span.shrink_to_hi(), format!(")({args})"))],
+ applicable,
+ );
+}
diff --git a/compiler/rustc_typeck/src/check/op.rs b/compiler/rustc_typeck/src/check/op.rs
new file mode 100644
index 000000000..920b3e688
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/op.rs
@@ -0,0 +1,1076 @@
+//! Code related to processing overloaded binary and unary operators.
+
+use super::method::MethodCallee;
+use super::{has_expected_num_generic_args, FnCtxt};
+use crate::check::Expectation;
+use rustc_ast as ast;
+use rustc_errors::{self, struct_span_err, Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::traits::ObligationCauseCode;
+use rustc_middle::ty::adjustment::{
+ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability,
+};
+use rustc_middle::ty::{
+ self, Ty, TyCtxt, TypeFolder, TypeSuperFoldable, TypeSuperVisitable, TypeVisitable, TypeVisitor,
+};
+use rustc_span::source_map::Spanned;
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::suggestions::InferCtxtExt as _;
+use rustc_trait_selection::traits::{FulfillmentError, TraitEngine, TraitEngineExt};
+use rustc_type_ir::sty::TyKind::*;
+
+use std::ops::ControlFlow;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Checks a `a <op>= b`
+ pub fn check_binop_assign(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ lhs: &'tcx hir::Expr<'tcx>,
+ rhs: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let (lhs_ty, rhs_ty, return_ty) =
+ self.check_overloaded_binop(expr, lhs, rhs, op, IsAssign::Yes, expected);
+
+ let ty =
+ if !lhs_ty.is_ty_var() && !rhs_ty.is_ty_var() && is_builtin_binop(lhs_ty, rhs_ty, op) {
+ self.enforce_builtin_binop_types(lhs.span, lhs_ty, rhs.span, rhs_ty, op);
+ self.tcx.mk_unit()
+ } else {
+ return_ty
+ };
+
+ self.check_lhs_assignable(lhs, "E0067", op.span, |err| {
+ if let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
+ if self
+ .lookup_op_method(
+ lhs_deref_ty,
+ Some(rhs_ty),
+ Some(rhs),
+ Op::Binary(op, IsAssign::Yes),
+ expected,
+ )
+ .is_ok()
+ {
+ // Suppress this error, since we already emitted
+ // a deref suggestion in check_overloaded_binop
+ err.delay_as_bug();
+ }
+ }
+ });
+
+ ty
+ }
+
+ /// Checks a potentially overloaded binary operator.
+ pub fn check_binop(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ debug!(
+ "check_binop(expr.hir_id={}, expr={:?}, op={:?}, lhs_expr={:?}, rhs_expr={:?})",
+ expr.hir_id, expr, op, lhs_expr, rhs_expr
+ );
+
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => {
+ // && and || are a simple case.
+ self.check_expr_coercable_to_type(lhs_expr, tcx.types.bool, None);
+ let lhs_diverges = self.diverges.get();
+ self.check_expr_coercable_to_type(rhs_expr, tcx.types.bool, None);
+
+ // Depending on the LHS' value, the RHS can never execute.
+ self.diverges.set(lhs_diverges);
+
+ tcx.types.bool
+ }
+ _ => {
+ // Otherwise, we always treat operators as if they are
+ // overloaded. This is the way to be most flexible w/r/t
+ // types that get inferred.
+ let (lhs_ty, rhs_ty, return_ty) = self.check_overloaded_binop(
+ expr,
+ lhs_expr,
+ rhs_expr,
+ op,
+ IsAssign::No,
+ expected,
+ );
+
+ // Supply type inference hints if relevant. Probably these
+ // hints should be enforced during select as part of the
+ // `consider_unification_despite_ambiguity` routine, but this
+ // more convenient for now.
+ //
+ // The basic idea is to help type inference by taking
+ // advantage of things we know about how the impls for
+ // scalar types are arranged. This is important in a
+ // scenario like `1_u32 << 2`, because it lets us quickly
+ // deduce that the result type should be `u32`, even
+ // though we don't know yet what type 2 has and hence
+ // can't pin this down to a specific impl.
+ if !lhs_ty.is_ty_var()
+ && !rhs_ty.is_ty_var()
+ && is_builtin_binop(lhs_ty, rhs_ty, op)
+ {
+ let builtin_return_ty = self.enforce_builtin_binop_types(
+ lhs_expr.span,
+ lhs_ty,
+ rhs_expr.span,
+ rhs_ty,
+ op,
+ );
+ self.demand_suptype(expr.span, builtin_return_ty, return_ty);
+ }
+
+ return_ty
+ }
+ }
+ }
+
+ fn enforce_builtin_binop_types(
+ &self,
+ lhs_span: Span,
+ lhs_ty: Ty<'tcx>,
+ rhs_span: Span,
+ rhs_ty: Ty<'tcx>,
+ op: hir::BinOp,
+ ) -> Ty<'tcx> {
+ debug_assert!(is_builtin_binop(lhs_ty, rhs_ty, op));
+
+ // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work.
+ // (See https://github.com/rust-lang/rust/issues/57447.)
+ let (lhs_ty, rhs_ty) = (deref_ty_if_possible(lhs_ty), deref_ty_if_possible(rhs_ty));
+
+ let tcx = self.tcx;
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => {
+ self.demand_suptype(lhs_span, tcx.types.bool, lhs_ty);
+ self.demand_suptype(rhs_span, tcx.types.bool, rhs_ty);
+ tcx.types.bool
+ }
+
+ BinOpCategory::Shift => {
+ // result type is same as LHS always
+ lhs_ty
+ }
+
+ BinOpCategory::Math | BinOpCategory::Bitwise => {
+ // both LHS and RHS and result will have the same type
+ self.demand_suptype(rhs_span, lhs_ty, rhs_ty);
+ lhs_ty
+ }
+
+ BinOpCategory::Comparison => {
+ // both LHS and RHS and result will have the same type
+ self.demand_suptype(rhs_span, lhs_ty, rhs_ty);
+ tcx.types.bool
+ }
+ }
+ }
+
+ fn check_overloaded_binop(
+ &self,
+ expr: &'tcx hir::Expr<'tcx>,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ is_assign: IsAssign,
+ expected: Expectation<'tcx>,
+ ) -> (Ty<'tcx>, Ty<'tcx>, Ty<'tcx>) {
+ debug!(
+ "check_overloaded_binop(expr.hir_id={}, op={:?}, is_assign={:?})",
+ expr.hir_id, op, is_assign
+ );
+
+ let lhs_ty = match is_assign {
+ IsAssign::No => {
+ // Find a suitable supertype of the LHS expression's type, by coercing to
+ // a type variable, to pass as the `Self` to the trait, avoiding invariant
+ // trait matching creating lifetime constraints that are too strict.
+ // e.g., adding `&'a T` and `&'b T`, given `&'x T: Add<&'x T>`, will result
+ // in `&'a T <: &'x T` and `&'b T <: &'x T`, instead of `'a = 'b = 'x`.
+ let lhs_ty = self.check_expr(lhs_expr);
+ let fresh_var = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: lhs_expr.span,
+ });
+ self.demand_coerce(lhs_expr, lhs_ty, fresh_var, Some(rhs_expr), AllowTwoPhase::No)
+ }
+ IsAssign::Yes => {
+ // rust-lang/rust#52126: We have to use strict
+ // equivalence on the LHS of an assign-op like `+=`;
+ // overwritten or mutably-borrowed places cannot be
+ // coerced to a supertype.
+ self.check_expr(lhs_expr)
+ }
+ };
+ let lhs_ty = self.resolve_vars_with_obligations(lhs_ty);
+
+ // N.B., as we have not yet type-checked the RHS, we don't have the
+ // type at hand. Make a variable to represent it. The whole reason
+ // for this indirection is so that, below, we can check the expr
+ // using this variable as the expected type, which sometimes lets
+ // us do better coercions than we would be able to do otherwise,
+ // particularly for things like `String + &String`.
+ let rhs_ty_var = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: rhs_expr.span,
+ });
+
+ let result = self.lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty_var),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ );
+
+ // see `NB` above
+ let rhs_ty = self.check_expr_coercable_to_type(rhs_expr, rhs_ty_var, Some(lhs_expr));
+ let rhs_ty = self.resolve_vars_with_obligations(rhs_ty);
+
+ let return_ty = match result {
+ Ok(method) => {
+ let by_ref_binop = !op.node.is_by_value();
+ if is_assign == IsAssign::Yes || by_ref_binop {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].kind() {
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Allow two-phase borrows for binops in initial deployment
+ // since they desugar to methods
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ let autoref = Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[0],
+ };
+ self.apply_adjustments(lhs_expr, vec![autoref]);
+ }
+ }
+ if by_ref_binop {
+ if let ty::Ref(region, _, mutbl) = method.sig.inputs()[1].kind() {
+ let mutbl = match mutbl {
+ hir::Mutability::Not => AutoBorrowMutability::Not,
+ hir::Mutability::Mut => AutoBorrowMutability::Mut {
+ // Allow two-phase borrows for binops in initial deployment
+ // since they desugar to methods
+ allow_two_phase_borrow: AllowTwoPhase::Yes,
+ },
+ };
+ let autoref = Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, mutbl)),
+ target: method.sig.inputs()[1],
+ };
+ // HACK(eddyb) Bypass checks due to reborrows being in
+ // some cases applied on the RHS, on top of which we need
+ // to autoref, which is not allowed by apply_adjustments.
+ // self.apply_adjustments(rhs_expr, vec![autoref]);
+ self.typeck_results
+ .borrow_mut()
+ .adjustments_mut()
+ .entry(rhs_expr.hir_id)
+ .or_default()
+ .push(autoref);
+ }
+ }
+ self.write_method_call(expr.hir_id, method);
+
+ method.sig.output()
+ }
+ // error types are considered "builtin"
+ Err(_) if lhs_ty.references_error() || rhs_ty.references_error() => self.tcx.ty_error(),
+ Err(errors) => {
+ let source_map = self.tcx.sess.source_map();
+ let (mut err, missing_trait, use_output) = match is_assign {
+ IsAssign::Yes => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ expr.span,
+ E0368,
+ "binary assignment operation `{}=` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty,
+ );
+ err.span_label(
+ lhs_expr.span,
+ format!("cannot use `{}=` on type `{}`", op.node.as_str(), lhs_ty),
+ );
+ let missing_trait = match op.node {
+ hir::BinOpKind::Add => Some("std::ops::AddAssign"),
+ hir::BinOpKind::Sub => Some("std::ops::SubAssign"),
+ hir::BinOpKind::Mul => Some("std::ops::MulAssign"),
+ hir::BinOpKind::Div => Some("std::ops::DivAssign"),
+ hir::BinOpKind::Rem => Some("std::ops::RemAssign"),
+ hir::BinOpKind::BitAnd => Some("std::ops::BitAndAssign"),
+ hir::BinOpKind::BitXor => Some("std::ops::BitXorAssign"),
+ hir::BinOpKind::BitOr => Some("std::ops::BitOrAssign"),
+ hir::BinOpKind::Shl => Some("std::ops::ShlAssign"),
+ hir::BinOpKind::Shr => Some("std::ops::ShrAssign"),
+ _ => None,
+ };
+ self.note_unmet_impls_on_type(&mut err, errors);
+ (err, missing_trait, false)
+ }
+ IsAssign::No => {
+ let (message, missing_trait, use_output) = match op.node {
+ hir::BinOpKind::Add => (
+ format!("cannot add `{rhs_ty}` to `{lhs_ty}`"),
+ Some("std::ops::Add"),
+ true,
+ ),
+ hir::BinOpKind::Sub => (
+ format!("cannot subtract `{rhs_ty}` from `{lhs_ty}`"),
+ Some("std::ops::Sub"),
+ true,
+ ),
+ hir::BinOpKind::Mul => (
+ format!("cannot multiply `{lhs_ty}` by `{rhs_ty}`"),
+ Some("std::ops::Mul"),
+ true,
+ ),
+ hir::BinOpKind::Div => (
+ format!("cannot divide `{lhs_ty}` by `{rhs_ty}`"),
+ Some("std::ops::Div"),
+ true,
+ ),
+ hir::BinOpKind::Rem => (
+ format!("cannot mod `{lhs_ty}` by `{rhs_ty}`"),
+ Some("std::ops::Rem"),
+ true,
+ ),
+ hir::BinOpKind::BitAnd => (
+ format!("no implementation for `{lhs_ty} & {rhs_ty}`"),
+ Some("std::ops::BitAnd"),
+ true,
+ ),
+ hir::BinOpKind::BitXor => (
+ format!("no implementation for `{lhs_ty} ^ {rhs_ty}`"),
+ Some("std::ops::BitXor"),
+ true,
+ ),
+ hir::BinOpKind::BitOr => (
+ format!("no implementation for `{lhs_ty} | {rhs_ty}`"),
+ Some("std::ops::BitOr"),
+ true,
+ ),
+ hir::BinOpKind::Shl => (
+ format!("no implementation for `{lhs_ty} << {rhs_ty}`"),
+ Some("std::ops::Shl"),
+ true,
+ ),
+ hir::BinOpKind::Shr => (
+ format!("no implementation for `{lhs_ty} >> {rhs_ty}`"),
+ Some("std::ops::Shr"),
+ true,
+ ),
+ hir::BinOpKind::Eq | hir::BinOpKind::Ne => (
+ format!(
+ "binary operation `{}` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty
+ ),
+ Some("std::cmp::PartialEq"),
+ false,
+ ),
+ hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Gt
+ | hir::BinOpKind::Ge => (
+ format!(
+ "binary operation `{}` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty
+ ),
+ Some("std::cmp::PartialOrd"),
+ false,
+ ),
+ _ => (
+ format!(
+ "binary operation `{}` cannot be applied to type `{}`",
+ op.node.as_str(),
+ lhs_ty
+ ),
+ None,
+ false,
+ ),
+ };
+ let mut err = struct_span_err!(self.tcx.sess, op.span, E0369, "{message}");
+ if !lhs_expr.span.eq(&rhs_expr.span) {
+ self.add_type_neq_err_label(
+ &mut err,
+ lhs_expr.span,
+ lhs_ty,
+ rhs_ty,
+ rhs_expr,
+ op,
+ is_assign,
+ expected,
+ );
+ self.add_type_neq_err_label(
+ &mut err,
+ rhs_expr.span,
+ rhs_ty,
+ lhs_ty,
+ lhs_expr,
+ op,
+ is_assign,
+ expected,
+ );
+ }
+ self.note_unmet_impls_on_type(&mut err, errors);
+ (err, missing_trait, use_output)
+ }
+ };
+
+ let mut suggest_deref_binop = |lhs_deref_ty: Ty<'tcx>| {
+ if self
+ .lookup_op_method(
+ lhs_deref_ty,
+ Some(rhs_ty),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .is_ok()
+ {
+ if let Ok(lstring) = source_map.span_to_snippet(lhs_expr.span) {
+ let msg = &format!(
+ "`{}{}` can be used on `{}`, you can dereference `{}`",
+ op.node.as_str(),
+ match is_assign {
+ IsAssign::Yes => "=",
+ IsAssign::No => "",
+ },
+ lhs_deref_ty.peel_refs(),
+ lstring,
+ );
+ err.span_suggestion_verbose(
+ lhs_expr.span.shrink_to_lo(),
+ msg,
+ "*",
+ rustc_errors::Applicability::MachineApplicable,
+ );
+ }
+ }
+ };
+
+ // We should suggest `a + b` => `*a + b` if `a` is copy, and suggest
+ // `a += b` => `*a += b` if a is a mut ref.
+ if is_assign == IsAssign::Yes
+ && let Some(lhs_deref_ty) = self.deref_once_mutably_for_diagnostic(lhs_ty) {
+ suggest_deref_binop(lhs_deref_ty);
+ } else if is_assign == IsAssign::No
+ && let Ref(_, lhs_deref_ty, _) = lhs_ty.kind() {
+ if self.type_is_copy_modulo_regions(self.param_env, *lhs_deref_ty, lhs_expr.span) {
+ suggest_deref_binop(*lhs_deref_ty);
+ }
+ }
+ if let Some(missing_trait) = missing_trait {
+ let mut visitor = TypeParamVisitor(vec![]);
+ visitor.visit_ty(lhs_ty);
+
+ if op.node == hir::BinOpKind::Add
+ && self.check_str_addition(
+ lhs_expr, rhs_expr, lhs_ty, rhs_ty, &mut err, is_assign, op,
+ )
+ {
+ // This has nothing here because it means we did string
+ // concatenation (e.g., "Hello " + "World!"). This means
+ // we don't want the note in the else clause to be emitted
+ } else if let [ty] = &visitor.0[..] {
+ // Look for a TraitPredicate in the Fulfillment errors,
+ // and use it to generate a suggestion.
+ //
+ // Note that lookup_op_method must be called again but
+ // with a specific rhs_ty instead of a placeholder so
+ // the resulting predicate generates a more specific
+ // suggestion for the user.
+ let errors = self
+ .lookup_op_method(
+ lhs_ty,
+ Some(rhs_ty),
+ Some(rhs_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .unwrap_err();
+ if !errors.is_empty() {
+ for error in errors {
+ if let Some(trait_pred) =
+ error.obligation.predicate.to_opt_poly_trait_pred()
+ {
+ let proj_pred = match error.obligation.cause.code() {
+ ObligationCauseCode::BinOp {
+ output_pred: Some(output_pred),
+ ..
+ } if use_output => {
+ output_pred.to_opt_poly_projection_pred()
+ }
+ _ => None,
+ };
+
+ self.suggest_restricting_param_bound(
+ &mut err,
+ trait_pred,
+ proj_pred,
+ self.body_id,
+ );
+ }
+ }
+ } else if *ty != lhs_ty {
+ // When we know that a missing bound is responsible, we don't show
+ // this note as it is redundant.
+ err.note(&format!(
+ "the trait `{missing_trait}` is not implemented for `{lhs_ty}`"
+ ));
+ }
+ }
+ }
+ err.emit();
+ self.tcx.ty_error()
+ }
+ };
+
+ (lhs_ty, rhs_ty, return_ty)
+ }
+
+ /// If one of the types is an uncalled function and calling it would yield the other type,
+ /// suggest calling the function. Returns `true` if suggestion would apply (even if not given).
+ fn add_type_neq_err_label(
+ &self,
+ err: &mut Diagnostic,
+ span: Span,
+ ty: Ty<'tcx>,
+ other_ty: Ty<'tcx>,
+ other_expr: &'tcx hir::Expr<'tcx>,
+ op: hir::BinOp,
+ is_assign: IsAssign,
+ expected: Expectation<'tcx>,
+ ) -> bool /* did we suggest to call a function because of missing parentheses? */ {
+ err.span_label(span, ty.to_string());
+ if let FnDef(def_id, _) = *ty.kind() {
+ if !self.tcx.has_typeck_results(def_id) {
+ return false;
+ }
+ // FIXME: Instead of exiting early when encountering bound vars in
+ // the function signature, consider keeping the binder here and
+ // propagating it downwards.
+ let Some(fn_sig) = self.tcx.fn_sig(def_id).no_bound_vars() else {
+ return false;
+ };
+
+ let other_ty = if let FnDef(def_id, _) = *other_ty.kind() {
+ if !self.tcx.has_typeck_results(def_id) {
+ return false;
+ }
+ // We're emitting a suggestion, so we can just ignore regions
+ self.tcx.fn_sig(def_id).skip_binder().output()
+ } else {
+ other_ty
+ };
+
+ if self
+ .lookup_op_method(
+ fn_sig.output(),
+ Some(other_ty),
+ Some(other_expr),
+ Op::Binary(op, is_assign),
+ expected,
+ )
+ .is_ok()
+ {
+ let (variable_snippet, applicability) = if !fn_sig.inputs().is_empty() {
+ ("( /* arguments */ )", Applicability::HasPlaceholders)
+ } else {
+ ("()", Applicability::MaybeIncorrect)
+ };
+
+ err.span_suggestion_verbose(
+ span.shrink_to_hi(),
+ "you might have forgotten to call this function",
+ variable_snippet,
+ applicability,
+ );
+ return true;
+ }
+ }
+ false
+ }
+
+ /// Provide actionable suggestions when trying to add two strings with incorrect types,
+ /// like `&str + &str`, `String + String` and `&str + &String`.
+ ///
+ /// If this function returns `true` it means a note was printed, so we don't need
+ /// to print the normal "implementation of `std::ops::Add` might be missing" note
+ fn check_str_addition(
+ &self,
+ lhs_expr: &'tcx hir::Expr<'tcx>,
+ rhs_expr: &'tcx hir::Expr<'tcx>,
+ lhs_ty: Ty<'tcx>,
+ rhs_ty: Ty<'tcx>,
+ err: &mut Diagnostic,
+ is_assign: IsAssign,
+ op: hir::BinOp,
+ ) -> bool {
+ let str_concat_note = "string concatenation requires an owned `String` on the left";
+ let rm_borrow_msg = "remove the borrow to obtain an owned `String`";
+ let to_owned_msg = "create an owned `String` from a string reference";
+
+ let is_std_string = |ty: Ty<'tcx>| {
+ ty.ty_adt_def()
+ .map_or(false, |ty_def| self.tcx.is_diagnostic_item(sym::String, ty_def.did()))
+ };
+
+ match (lhs_ty.kind(), rhs_ty.kind()) {
+ (&Ref(_, l_ty, _), &Ref(_, r_ty, _)) // &str or &String + &str, &String or &&str
+ if (*l_ty.kind() == Str || is_std_string(l_ty))
+ && (*r_ty.kind() == Str
+ || is_std_string(r_ty)
+ || matches!(
+ r_ty.kind(), Ref(_, inner_ty, _) if *inner_ty.kind() == Str
+ )) =>
+ {
+ if let IsAssign::No = is_assign { // Do not supply this message if `&str += &str`
+ err.span_label(op.span, "`+` cannot be used to concatenate two `&str` strings");
+ err.note(str_concat_note);
+ if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind {
+ err.span_suggestion_verbose(
+ lhs_expr.span.until(lhs_inner_expr.span),
+ rm_borrow_msg,
+ "",
+ Applicability::MachineApplicable
+ );
+ } else {
+ err.span_suggestion_verbose(
+ lhs_expr.span.shrink_to_hi(),
+ to_owned_msg,
+ ".to_owned()",
+ Applicability::MachineApplicable
+ );
+ }
+ }
+ true
+ }
+ (&Ref(_, l_ty, _), &Adt(..)) // Handle `&str` & `&String` + `String`
+ if (*l_ty.kind() == Str || is_std_string(l_ty)) && is_std_string(rhs_ty) =>
+ {
+ err.span_label(
+ op.span,
+ "`+` cannot be used to concatenate a `&str` with a `String`",
+ );
+ match is_assign {
+ IsAssign::No => {
+ let sugg_msg;
+ let lhs_sugg = if let hir::ExprKind::AddrOf(_, _, lhs_inner_expr) = lhs_expr.kind {
+ sugg_msg = "remove the borrow on the left and add one on the right";
+ (lhs_expr.span.until(lhs_inner_expr.span), "".to_owned())
+ } else {
+ sugg_msg = "create an owned `String` on the left and add a borrow on the right";
+ (lhs_expr.span.shrink_to_hi(), ".to_owned()".to_owned())
+ };
+ let suggestions = vec![
+ lhs_sugg,
+ (rhs_expr.span.shrink_to_lo(), "&".to_owned()),
+ ];
+ err.multipart_suggestion_verbose(
+ sugg_msg,
+ suggestions,
+ Applicability::MachineApplicable,
+ );
+ }
+ IsAssign::Yes => {
+ err.note(str_concat_note);
+ }
+ }
+ true
+ }
+ _ => false,
+ }
+ }
+
+ pub fn check_user_unop(
+ &self,
+ ex: &'tcx hir::Expr<'tcx>,
+ operand_ty: Ty<'tcx>,
+ op: hir::UnOp,
+ expected: Expectation<'tcx>,
+ ) -> Ty<'tcx> {
+ assert!(op.is_by_value());
+ match self.lookup_op_method(operand_ty, None, None, Op::Unary(op, ex.span), expected) {
+ Ok(method) => {
+ self.write_method_call(ex.hir_id, method);
+ method.sig.output()
+ }
+ Err(errors) => {
+ let actual = self.resolve_vars_if_possible(operand_ty);
+ if !actual.references_error() {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ ex.span,
+ E0600,
+ "cannot apply unary operator `{}` to type `{}`",
+ op.as_str(),
+ actual
+ );
+ err.span_label(
+ ex.span,
+ format!("cannot apply unary operator `{}`", op.as_str()),
+ );
+
+ let mut visitor = TypeParamVisitor(vec![]);
+ visitor.visit_ty(operand_ty);
+ if let [_] = &visitor.0[..] && let ty::Param(_) = *operand_ty.kind() {
+ let predicates = errors
+ .iter()
+ .filter_map(|error| {
+ error.obligation.predicate.to_opt_poly_trait_pred()
+ });
+ for pred in predicates {
+ self.suggest_restricting_param_bound(
+ &mut err,
+ pred,
+ None,
+ self.body_id,
+ );
+ }
+ }
+
+ let sp = self.tcx.sess.source_map().start_point(ex.span);
+ if let Some(sp) =
+ self.tcx.sess.parse_sess.ambiguous_block_expr_parse.borrow().get(&sp)
+ {
+ // If the previous expression was a block expression, suggest parentheses
+ // (turning this into a binary subtraction operation instead.)
+ // for example, `{2} - 2` -> `({2}) - 2` (see src\test\ui\parser\expr-as-stmt.rs)
+ self.tcx.sess.parse_sess.expr_parentheses_needed(&mut err, *sp);
+ } else {
+ match actual.kind() {
+ Uint(_) if op == hir::UnOp::Neg => {
+ err.note("unsigned values cannot be negated");
+
+ if let hir::ExprKind::Unary(
+ _,
+ hir::Expr {
+ kind:
+ hir::ExprKind::Lit(Spanned {
+ node: ast::LitKind::Int(1, _),
+ ..
+ }),
+ ..
+ },
+ ) = ex.kind
+ {
+ err.span_suggestion(
+ ex.span,
+ &format!(
+ "you may have meant the maximum value of `{actual}`",
+ ),
+ format!("{actual}::MAX"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ Str | Never | Char | Tuple(_) | Array(_, _) => {}
+ Ref(_, lty, _) if *lty.kind() == Str => {}
+ _ => {
+ self.note_unmet_impls_on_type(&mut err, errors);
+ }
+ }
+ }
+ err.emit();
+ }
+ self.tcx.ty_error()
+ }
+ }
+ }
+
+ fn lookup_op_method(
+ &self,
+ lhs_ty: Ty<'tcx>,
+ other_ty: Option<Ty<'tcx>>,
+ other_ty_expr: Option<&'tcx hir::Expr<'tcx>>,
+ op: Op,
+ expected: Expectation<'tcx>,
+ ) -> Result<MethodCallee<'tcx>, Vec<FulfillmentError<'tcx>>> {
+ let lang = self.tcx.lang_items();
+
+ let span = match op {
+ Op::Binary(op, _) => op.span,
+ Op::Unary(_, span) => span,
+ };
+ let (opname, trait_did) = if let Op::Binary(op, IsAssign::Yes) = op {
+ match op.node {
+ hir::BinOpKind::Add => (sym::add_assign, lang.add_assign_trait()),
+ hir::BinOpKind::Sub => (sym::sub_assign, lang.sub_assign_trait()),
+ hir::BinOpKind::Mul => (sym::mul_assign, lang.mul_assign_trait()),
+ hir::BinOpKind::Div => (sym::div_assign, lang.div_assign_trait()),
+ hir::BinOpKind::Rem => (sym::rem_assign, lang.rem_assign_trait()),
+ hir::BinOpKind::BitXor => (sym::bitxor_assign, lang.bitxor_assign_trait()),
+ hir::BinOpKind::BitAnd => (sym::bitand_assign, lang.bitand_assign_trait()),
+ hir::BinOpKind::BitOr => (sym::bitor_assign, lang.bitor_assign_trait()),
+ hir::BinOpKind::Shl => (sym::shl_assign, lang.shl_assign_trait()),
+ hir::BinOpKind::Shr => (sym::shr_assign, lang.shr_assign_trait()),
+ hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Ge
+ | hir::BinOpKind::Gt
+ | hir::BinOpKind::Eq
+ | hir::BinOpKind::Ne
+ | hir::BinOpKind::And
+ | hir::BinOpKind::Or => {
+ span_bug!(span, "impossible assignment operation: {}=", op.node.as_str())
+ }
+ }
+ } else if let Op::Binary(op, IsAssign::No) = op {
+ match op.node {
+ hir::BinOpKind::Add => (sym::add, lang.add_trait()),
+ hir::BinOpKind::Sub => (sym::sub, lang.sub_trait()),
+ hir::BinOpKind::Mul => (sym::mul, lang.mul_trait()),
+ hir::BinOpKind::Div => (sym::div, lang.div_trait()),
+ hir::BinOpKind::Rem => (sym::rem, lang.rem_trait()),
+ hir::BinOpKind::BitXor => (sym::bitxor, lang.bitxor_trait()),
+ hir::BinOpKind::BitAnd => (sym::bitand, lang.bitand_trait()),
+ hir::BinOpKind::BitOr => (sym::bitor, lang.bitor_trait()),
+ hir::BinOpKind::Shl => (sym::shl, lang.shl_trait()),
+ hir::BinOpKind::Shr => (sym::shr, lang.shr_trait()),
+ hir::BinOpKind::Lt => (sym::lt, lang.partial_ord_trait()),
+ hir::BinOpKind::Le => (sym::le, lang.partial_ord_trait()),
+ hir::BinOpKind::Ge => (sym::ge, lang.partial_ord_trait()),
+ hir::BinOpKind::Gt => (sym::gt, lang.partial_ord_trait()),
+ hir::BinOpKind::Eq => (sym::eq, lang.eq_trait()),
+ hir::BinOpKind::Ne => (sym::ne, lang.eq_trait()),
+ hir::BinOpKind::And | hir::BinOpKind::Or => {
+ span_bug!(span, "&& and || are not overloadable")
+ }
+ }
+ } else if let Op::Unary(hir::UnOp::Not, _) = op {
+ (sym::not, lang.not_trait())
+ } else if let Op::Unary(hir::UnOp::Neg, _) = op {
+ (sym::neg, lang.neg_trait())
+ } else {
+ bug!("lookup_op_method: op not supported: {:?}", op)
+ };
+
+ debug!(
+ "lookup_op_method(lhs_ty={:?}, op={:?}, opname={:?}, trait_did={:?})",
+ lhs_ty, op, opname, trait_did
+ );
+
+ // Catches cases like #83893, where a lang item is declared with the
+ // wrong number of generic arguments. Should have yielded an error
+ // elsewhere by now, but we have to catch it here so that we do not
+ // index `other_tys` out of bounds (if the lang item has too many
+ // generic arguments, `other_tys` is too short).
+ if !has_expected_num_generic_args(
+ self.tcx,
+ trait_did,
+ match op {
+ // Binary ops have a generic right-hand side, unary ops don't
+ Op::Binary(..) => 1,
+ Op::Unary(..) => 0,
+ },
+ ) {
+ return Err(vec![]);
+ }
+
+ let opname = Ident::with_dummy_span(opname);
+ let method = trait_did.and_then(|trait_did| {
+ self.lookup_op_method_in_trait(
+ span,
+ opname,
+ trait_did,
+ lhs_ty,
+ other_ty,
+ other_ty_expr,
+ expected,
+ )
+ });
+
+ match (method, trait_did) {
+ (Some(ok), _) => {
+ let method = self.register_infer_ok_obligations(ok);
+ self.select_obligations_where_possible(false, |_| {});
+ Ok(method)
+ }
+ (None, None) => Err(vec![]),
+ (None, Some(trait_did)) => {
+ let (obligation, _) = self.obligation_for_op_method(
+ span,
+ trait_did,
+ lhs_ty,
+ other_ty,
+ other_ty_expr,
+ expected,
+ );
+ let mut fulfill = <dyn TraitEngine<'_>>::new(self.tcx);
+ fulfill.register_predicate_obligation(self, obligation);
+ Err(fulfill.select_where_possible(&self.infcx))
+ }
+ }
+ }
+}
+
+// Binary operator categories. These categories summarize the behavior
+// with respect to the builtin operations supported.
+enum BinOpCategory {
+ /// &&, || -- cannot be overridden
+ Shortcircuit,
+
+ /// <<, >> -- when shifting a single integer, rhs can be any
+ /// integer type. For simd, types must match.
+ Shift,
+
+ /// +, -, etc -- takes equal types, produces same type as input,
+ /// applicable to ints/floats/simd
+ Math,
+
+ /// &, |, ^ -- takes equal types, produces same type as input,
+ /// applicable to ints/floats/simd/bool
+ Bitwise,
+
+ /// ==, !=, etc -- takes equal types, produces bools, except for simd,
+ /// which produce the input type
+ Comparison,
+}
+
+impl BinOpCategory {
+ fn from(op: hir::BinOp) -> BinOpCategory {
+ match op.node {
+ hir::BinOpKind::Shl | hir::BinOpKind::Shr => BinOpCategory::Shift,
+
+ hir::BinOpKind::Add
+ | hir::BinOpKind::Sub
+ | hir::BinOpKind::Mul
+ | hir::BinOpKind::Div
+ | hir::BinOpKind::Rem => BinOpCategory::Math,
+
+ hir::BinOpKind::BitXor | hir::BinOpKind::BitAnd | hir::BinOpKind::BitOr => {
+ BinOpCategory::Bitwise
+ }
+
+ hir::BinOpKind::Eq
+ | hir::BinOpKind::Ne
+ | hir::BinOpKind::Lt
+ | hir::BinOpKind::Le
+ | hir::BinOpKind::Ge
+ | hir::BinOpKind::Gt => BinOpCategory::Comparison,
+
+ hir::BinOpKind::And | hir::BinOpKind::Or => BinOpCategory::Shortcircuit,
+ }
+ }
+}
+
+/// Whether the binary operation is an assignment (`a += b`), or not (`a + b`)
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum IsAssign {
+ No,
+ Yes,
+}
+
+#[derive(Clone, Copy, Debug)]
+enum Op {
+ Binary(hir::BinOp, IsAssign),
+ Unary(hir::UnOp, Span),
+}
+
+/// Dereferences a single level of immutable referencing.
+fn deref_ty_if_possible<'tcx>(ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.kind() {
+ ty::Ref(_, ty, hir::Mutability::Not) => *ty,
+ _ => ty,
+ }
+}
+
+/// Returns `true` if this is a built-in arithmetic operation (e.g., u32
+/// + u32, i16x4 == i16x4) and false if these types would have to be
+/// overloaded to be legal. There are two reasons that we distinguish
+/// builtin operations from overloaded ones (vs trying to drive
+/// everything uniformly through the trait system and intrinsics or
+/// something like that):
+///
+/// 1. Builtin operations can trivially be evaluated in constants.
+/// 2. For comparison operators applied to SIMD types the result is
+/// not of type `bool`. For example, `i16x4 == i16x4` yields a
+/// type like `i16x4`. This means that the overloaded trait
+/// `PartialEq` is not applicable.
+///
+/// Reason #2 is the killer. I tried for a while to always use
+/// overloaded logic and just check the types in constants/codegen after
+/// the fact, and it worked fine, except for SIMD types. -nmatsakis
+fn is_builtin_binop<'tcx>(lhs: Ty<'tcx>, rhs: Ty<'tcx>, op: hir::BinOp) -> bool {
+ // Special-case a single layer of referencing, so that things like `5.0 + &6.0f32` work.
+ // (See https://github.com/rust-lang/rust/issues/57447.)
+ let (lhs, rhs) = (deref_ty_if_possible(lhs), deref_ty_if_possible(rhs));
+
+ match BinOpCategory::from(op) {
+ BinOpCategory::Shortcircuit => true,
+
+ BinOpCategory::Shift => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ }
+
+ BinOpCategory::Math => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ || lhs.is_floating_point() && rhs.is_floating_point()
+ }
+
+ BinOpCategory::Bitwise => {
+ lhs.references_error()
+ || rhs.references_error()
+ || lhs.is_integral() && rhs.is_integral()
+ || lhs.is_floating_point() && rhs.is_floating_point()
+ || lhs.is_bool() && rhs.is_bool()
+ }
+
+ BinOpCategory::Comparison => {
+ lhs.references_error() || rhs.references_error() || lhs.is_scalar() && rhs.is_scalar()
+ }
+ }
+}
+
+struct TypeParamVisitor<'tcx>(Vec<Ty<'tcx>>);
+
+impl<'tcx> TypeVisitor<'tcx> for TypeParamVisitor<'tcx> {
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::Param(_) = ty.kind() {
+ self.0.push(ty);
+ }
+ ty.super_visit_with(self)
+ }
+}
+
+struct TypeParamEraser<'a, 'tcx>(&'a FnCtxt<'a, 'tcx>, Span);
+
+impl<'tcx> TypeFolder<'tcx> for TypeParamEraser<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.0.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match ty.kind() {
+ ty::Param(_) => self.0.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::MiscVariable,
+ span: self.1,
+ }),
+ _ => ty.super_fold_with(self),
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/pat.rs b/compiler/rustc_typeck/src/check/pat.rs
new file mode 100644
index 000000000..837c32355
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/pat.rs
@@ -0,0 +1,2142 @@
+use crate::check::FnCtxt;
+use rustc_ast as ast;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{
+ pluralize, struct_span_err, Applicability, Diagnostic, DiagnosticBuilder, ErrorGuaranteed,
+ MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::pat_util::EnumerateAndAdjustIterator;
+use rustc_hir::{HirId, Pat, PatKind};
+use rustc_infer::infer;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_middle::middle::stability::EvalResult;
+use rustc_middle::ty::{self, Adt, BindingMode, Ty, TypeVisitable};
+use rustc_session::lint::builtin::NON_EXHAUSTIVE_OMITTED_PATTERNS;
+use rustc_span::hygiene::DesugaringKind;
+use rustc_span::lev_distance::find_best_match_for_name;
+use rustc_span::source_map::{Span, Spanned};
+use rustc_span::symbol::{kw, sym, Ident};
+use rustc_span::{BytePos, DUMMY_SP};
+use rustc_trait_selection::autoderef::Autoderef;
+use rustc_trait_selection::traits::{ObligationCause, Pattern};
+use ty::VariantDef;
+
+use std::cmp;
+use std::collections::hash_map::Entry::{Occupied, Vacant};
+
+use super::report_unexpected_variant_res;
+
+const CANNOT_IMPLICITLY_DEREF_POINTER_TRAIT_OBJ: &str = "\
+This error indicates that a pointer to a trait type cannot be implicitly dereferenced by a \
+pattern. Every trait defines a type, but because the size of trait implementors isn't fixed, \
+this type has no compile-time size. Therefore, all accesses to trait types must be through \
+pointers. If you encounter this error you should try to avoid dereferencing the pointer.
+
+You can read more about trait objects in the Trait Objects section of the Reference: \
+https://doc.rust-lang.org/reference/types.html#trait-objects";
+
+/// Information about the expected type at the top level of type checking a pattern.
+///
+/// **NOTE:** This is only for use by diagnostics. Do NOT use for type checking logic!
+#[derive(Copy, Clone)]
+struct TopInfo<'tcx> {
+ /// The `expected` type at the top level of type checking a pattern.
+ expected: Ty<'tcx>,
+ /// Was the origin of the `span` from a scrutinee expression?
+ ///
+ /// Otherwise there is no scrutinee and it could be e.g. from the type of a formal parameter.
+ origin_expr: bool,
+ /// The span giving rise to the `expected` type, if one could be provided.
+ ///
+ /// If `origin_expr` is `true`, then this is the span of the scrutinee as in:
+ ///
+ /// - `match scrutinee { ... }`
+ /// - `let _ = scrutinee;`
+ ///
+ /// This is used to point to add context in type errors.
+ /// In the following example, `span` corresponds to the `a + b` expression:
+ ///
+ /// ```text
+ /// error[E0308]: mismatched types
+ /// --> src/main.rs:L:C
+ /// |
+ /// L | let temp: usize = match a + b {
+ /// | ----- this expression has type `usize`
+ /// L | Ok(num) => num,
+ /// | ^^^^^^^ expected `usize`, found enum `std::result::Result`
+ /// |
+ /// = note: expected type `usize`
+ /// found type `std::result::Result<_, _>`
+ /// ```
+ span: Option<Span>,
+}
+
+impl<'tcx> FnCtxt<'_, 'tcx> {
+ fn pattern_cause(&self, ti: TopInfo<'tcx>, cause_span: Span) -> ObligationCause<'tcx> {
+ let code = Pattern { span: ti.span, root_ty: ti.expected, origin_expr: ti.origin_expr };
+ self.cause(cause_span, code)
+ }
+
+ fn demand_eqtype_pat_diag(
+ &self,
+ cause_span: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ self.demand_eqtype_with_origin(&self.pattern_cause(ti, cause_span), expected, actual)
+ }
+
+ fn demand_eqtype_pat(
+ &self,
+ cause_span: Span,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) {
+ if let Some(mut err) = self.demand_eqtype_pat_diag(cause_span, expected, actual, ti) {
+ err.emit();
+ }
+ }
+}
+
+const INITIAL_BM: BindingMode = BindingMode::BindByValue(hir::Mutability::Not);
+
+/// Mode for adjusting the expected type and binding mode.
+enum AdjustMode {
+ /// Peel off all immediate reference types.
+ Peel,
+ /// Reset binding mode to the initial mode.
+ Reset,
+ /// Pass on the input binding mode and expected type.
+ Pass,
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Type check the given top level pattern against the `expected` type.
+ ///
+ /// If a `Some(span)` is provided and `origin_expr` holds,
+ /// then the `span` represents the scrutinee's span.
+ /// The scrutinee is found in e.g. `match scrutinee { ... }` and `let pat = scrutinee;`.
+ ///
+ /// Otherwise, `Some(span)` represents the span of a type expression
+ /// which originated the `expected` type.
+ pub fn check_pat_top(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ span: Option<Span>,
+ origin_expr: bool,
+ ) {
+ let info = TopInfo { expected, origin_expr, span };
+ self.check_pat(pat, expected, INITIAL_BM, info);
+ }
+
+ /// Type check the given `pat` against the `expected` type
+ /// with the provided `def_bm` (default binding mode).
+ ///
+ /// Outside of this module, `check_pat_top` should always be used.
+ /// Conversely, inside this module, `check_pat_top` should never be used.
+ #[instrument(level = "debug", skip(self, ti))]
+ fn check_pat(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) {
+ let path_res = match &pat.kind {
+ PatKind::Path(qpath) => {
+ Some(self.resolve_ty_and_res_fully_qualified_call(qpath, pat.hir_id, pat.span))
+ }
+ _ => None,
+ };
+ let adjust_mode = self.calc_adjust_mode(pat, path_res.map(|(res, ..)| res));
+ let (expected, def_bm) = self.calc_default_binding_mode(pat, expected, def_bm, adjust_mode);
+
+ let ty = match pat.kind {
+ PatKind::Wild => expected,
+ PatKind::Lit(lt) => self.check_pat_lit(pat.span, lt, expected, ti),
+ PatKind::Range(lhs, rhs, _) => self.check_pat_range(pat.span, lhs, rhs, expected, ti),
+ PatKind::Binding(ba, var_id, _, sub) => {
+ self.check_pat_ident(pat, ba, var_id, sub, expected, def_bm, ti)
+ }
+ PatKind::TupleStruct(ref qpath, subpats, ddpos) => {
+ self.check_pat_tuple_struct(pat, qpath, subpats, ddpos, expected, def_bm, ti)
+ }
+ PatKind::Path(ref qpath) => {
+ self.check_pat_path(pat, qpath, path_res.unwrap(), expected, ti)
+ }
+ PatKind::Struct(ref qpath, fields, has_rest_pat) => {
+ self.check_pat_struct(pat, qpath, fields, has_rest_pat, expected, def_bm, ti)
+ }
+ PatKind::Or(pats) => {
+ for pat in pats {
+ self.check_pat(pat, expected, def_bm, ti);
+ }
+ expected
+ }
+ PatKind::Tuple(elements, ddpos) => {
+ self.check_pat_tuple(pat.span, elements, ddpos, expected, def_bm, ti)
+ }
+ PatKind::Box(inner) => self.check_pat_box(pat.span, inner, expected, def_bm, ti),
+ PatKind::Ref(inner, mutbl) => {
+ self.check_pat_ref(pat, inner, mutbl, expected, def_bm, ti)
+ }
+ PatKind::Slice(before, slice, after) => {
+ self.check_pat_slice(pat.span, before, slice, after, expected, def_bm, ti)
+ }
+ };
+
+ self.write_ty(pat.hir_id, ty);
+
+ // (note_1): In most of the cases where (note_1) is referenced
+ // (literals and constants being the exception), we relate types
+ // using strict equality, even though subtyping would be sufficient.
+ // There are a few reasons for this, some of which are fairly subtle
+ // and which cost me (nmatsakis) an hour or two debugging to remember,
+ // so I thought I'd write them down this time.
+ //
+ // 1. There is no loss of expressiveness here, though it does
+ // cause some inconvenience. What we are saying is that the type
+ // of `x` becomes *exactly* what is expected. This can cause unnecessary
+ // errors in some cases, such as this one:
+ //
+ // ```
+ // fn foo<'x>(x: &'x i32) {
+ // let a = 1;
+ // let mut z = x;
+ // z = &a;
+ // }
+ // ```
+ //
+ // The reason we might get an error is that `z` might be
+ // assigned a type like `&'x i32`, and then we would have
+ // a problem when we try to assign `&a` to `z`, because
+ // the lifetime of `&a` (i.e., the enclosing block) is
+ // shorter than `'x`.
+ //
+ // HOWEVER, this code works fine. The reason is that the
+ // expected type here is whatever type the user wrote, not
+ // the initializer's type. In this case the user wrote
+ // nothing, so we are going to create a type variable `Z`.
+ // Then we will assign the type of the initializer (`&'x i32`)
+ // as a subtype of `Z`: `&'x i32 <: Z`. And hence we
+ // will instantiate `Z` as a type `&'0 i32` where `'0` is
+ // a fresh region variable, with the constraint that `'x : '0`.
+ // So basically we're all set.
+ //
+ // Note that there are two tests to check that this remains true
+ // (`regions-reassign-{match,let}-bound-pointer.rs`).
+ //
+ // 2. Things go horribly wrong if we use subtype. The reason for
+ // THIS is a fairly subtle case involving bound regions. See the
+ // `givens` field in `region_constraints`, as well as the test
+ // `regions-relate-bound-regions-on-closures-to-inference-variables.rs`,
+ // for details. Short version is that we must sometimes detect
+ // relationships between specific region variables and regions
+ // bound in a closure signature, and that detection gets thrown
+ // off when we substitute fresh region variables here to enable
+ // subtyping.
+ }
+
+ /// Compute the new expected type and default binding mode from the old ones
+ /// as well as the pattern form we are currently checking.
+ fn calc_default_binding_mode(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ adjust_mode: AdjustMode,
+ ) -> (Ty<'tcx>, BindingMode) {
+ match adjust_mode {
+ AdjustMode::Pass => (expected, def_bm),
+ AdjustMode::Reset => (expected, INITIAL_BM),
+ AdjustMode::Peel => self.peel_off_references(pat, expected, def_bm),
+ }
+ }
+
+ /// How should the binding mode and expected type be adjusted?
+ ///
+ /// When the pattern is a path pattern, `opt_path_res` must be `Some(res)`.
+ fn calc_adjust_mode(&self, pat: &'tcx Pat<'tcx>, opt_path_res: Option<Res>) -> AdjustMode {
+ // When we perform destructuring assignment, we disable default match bindings, which are
+ // unintuitive in this context.
+ if !pat.default_binding_modes {
+ return AdjustMode::Reset;
+ }
+ match &pat.kind {
+ // Type checking these product-like types successfully always require
+ // that the expected type be of those types and not reference types.
+ PatKind::Struct(..)
+ | PatKind::TupleStruct(..)
+ | PatKind::Tuple(..)
+ | PatKind::Box(_)
+ | PatKind::Range(..)
+ | PatKind::Slice(..) => AdjustMode::Peel,
+ // String and byte-string literals result in types `&str` and `&[u8]` respectively.
+ // All other literals result in non-reference types.
+ // As a result, we allow `if let 0 = &&0 {}` but not `if let "foo" = &&"foo {}`.
+ //
+ // Call `resolve_vars_if_possible` here for inline const blocks.
+ PatKind::Lit(lt) => match self.resolve_vars_if_possible(self.check_expr(lt)).kind() {
+ ty::Ref(..) => AdjustMode::Pass,
+ _ => AdjustMode::Peel,
+ },
+ PatKind::Path(_) => match opt_path_res.unwrap() {
+ // These constants can be of a reference type, e.g. `const X: &u8 = &0;`.
+ // Peeling the reference types too early will cause type checking failures.
+ // Although it would be possible to *also* peel the types of the constants too.
+ Res::Def(DefKind::Const | DefKind::AssocConst, _) => AdjustMode::Pass,
+ // In the `ValueNS`, we have `SelfCtor(..) | Ctor(_, Const), _)` remaining which
+ // could successfully compile. The former being `Self` requires a unit struct.
+ // In either case, and unlike constants, the pattern itself cannot be
+ // a reference type wherefore peeling doesn't give up any expressiveness.
+ _ => AdjustMode::Peel,
+ },
+ // When encountering a `& mut? pat` pattern, reset to "by value".
+ // This is so that `x` and `y` here are by value, as they appear to be:
+ //
+ // ```
+ // match &(&22, &44) {
+ // (&x, &y) => ...
+ // }
+ // ```
+ //
+ // See issue #46688.
+ PatKind::Ref(..) => AdjustMode::Reset,
+ // A `_` pattern works with any expected type, so there's no need to do anything.
+ PatKind::Wild
+ // Bindings also work with whatever the expected type is,
+ // and moreover if we peel references off, that will give us the wrong binding type.
+ // Also, we can have a subpattern `binding @ pat`.
+ // Each side of the `@` should be treated independently (like with OR-patterns).
+ | PatKind::Binding(..)
+ // An OR-pattern just propagates to each individual alternative.
+ // This is maximally flexible, allowing e.g., `Some(mut x) | &Some(mut x)`.
+ // In that example, `Some(mut x)` results in `Peel` whereas `&Some(mut x)` in `Reset`.
+ | PatKind::Or(_) => AdjustMode::Pass,
+ }
+ }
+
+ /// Peel off as many immediately nested `& mut?` from the expected type as possible
+ /// and return the new expected type and binding default binding mode.
+ /// The adjustments vector, if non-empty is stored in a table.
+ fn peel_off_references(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ mut def_bm: BindingMode,
+ ) -> (Ty<'tcx>, BindingMode) {
+ let mut expected = self.resolve_vars_with_obligations(expected);
+
+ // Peel off as many `&` or `&mut` from the scrutinee type as possible. For example,
+ // for `match &&&mut Some(5)` the loop runs three times, aborting when it reaches
+ // the `Some(5)` which is not of type Ref.
+ //
+ // For each ampersand peeled off, update the binding mode and push the original
+ // type into the adjustments vector.
+ //
+ // See the examples in `ui/match-defbm*.rs`.
+ let mut pat_adjustments = vec![];
+ while let ty::Ref(_, inner_ty, inner_mutability) = *expected.kind() {
+ debug!("inspecting {:?}", expected);
+
+ debug!("current discriminant is Ref, inserting implicit deref");
+ // Preserve the reference type. We'll need it later during THIR lowering.
+ pat_adjustments.push(expected);
+
+ expected = inner_ty;
+ def_bm = ty::BindByReference(match def_bm {
+ // If default binding mode is by value, make it `ref` or `ref mut`
+ // (depending on whether we observe `&` or `&mut`).
+ ty::BindByValue(_) |
+ // When `ref mut`, stay a `ref mut` (on `&mut`) or downgrade to `ref` (on `&`).
+ ty::BindByReference(hir::Mutability::Mut) => inner_mutability,
+ // Once a `ref`, always a `ref`.
+ // This is because a `& &mut` cannot mutate the underlying value.
+ ty::BindByReference(m @ hir::Mutability::Not) => m,
+ });
+ }
+
+ if !pat_adjustments.is_empty() {
+ debug!("default binding mode is now {:?}", def_bm);
+ self.inh
+ .typeck_results
+ .borrow_mut()
+ .pat_adjustments_mut()
+ .insert(pat.hir_id, pat_adjustments);
+ }
+
+ (expected, def_bm)
+ }
+
+ fn check_pat_lit(
+ &self,
+ span: Span,
+ lt: &hir::Expr<'tcx>,
+ expected: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ // We've already computed the type above (when checking for a non-ref pat),
+ // so avoid computing it again.
+ let ty = self.node_ty(lt.hir_id);
+
+ // Byte string patterns behave the same way as array patterns
+ // They can denote both statically and dynamically-sized byte arrays.
+ let mut pat_ty = ty;
+ if let hir::ExprKind::Lit(Spanned { node: ast::LitKind::ByteStr(_), .. }) = lt.kind {
+ let expected = self.structurally_resolved_type(span, expected);
+ if let ty::Ref(_, inner_ty, _) = expected.kind()
+ && matches!(inner_ty.kind(), ty::Slice(_))
+ {
+ let tcx = self.tcx;
+ trace!(?lt.hir_id.local_id, "polymorphic byte string lit");
+ self.typeck_results
+ .borrow_mut()
+ .treat_byte_string_as_slice
+ .insert(lt.hir_id.local_id);
+ pat_ty = tcx.mk_imm_ref(tcx.lifetimes.re_static, tcx.mk_slice(tcx.types.u8));
+ }
+ }
+
+ // Somewhat surprising: in this case, the subtyping relation goes the
+ // opposite way as the other cases. Actually what we really want is not
+ // a subtyping relation at all but rather that there exists a LUB
+ // (so that they can be compared). However, in practice, constants are
+ // always scalars or strings. For scalars subtyping is irrelevant,
+ // and for strings `ty` is type is `&'static str`, so if we say that
+ //
+ // &'static str <: expected
+ //
+ // then that's equivalent to there existing a LUB.
+ let cause = self.pattern_cause(ti, span);
+ if let Some(mut err) = self.demand_suptype_with_origin(&cause, expected, pat_ty) {
+ err.emit_unless(
+ ti.span
+ .filter(|&s| {
+ // In the case of `if`- and `while`-expressions we've already checked
+ // that `scrutinee: bool`. We know that the pattern is `true`,
+ // so an error here would be a duplicate and from the wrong POV.
+ s.is_desugaring(DesugaringKind::CondTemporary)
+ })
+ .is_some(),
+ );
+ }
+
+ pat_ty
+ }
+
+ fn check_pat_range(
+ &self,
+ span: Span,
+ lhs: Option<&'tcx hir::Expr<'tcx>>,
+ rhs: Option<&'tcx hir::Expr<'tcx>>,
+ expected: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let calc_side = |opt_expr: Option<&'tcx hir::Expr<'tcx>>| match opt_expr {
+ None => None,
+ Some(expr) => {
+ let ty = self.check_expr(expr);
+ // Check that the end-point is possibly of numeric or char type.
+ // The early check here is not for correctness, but rather better
+ // diagnostics (e.g. when `&str` is being matched, `expected` will
+ // be peeled to `str` while ty here is still `&str`, if we don't
+ // err early here, a rather confusing unification error will be
+ // emitted instead).
+ let fail =
+ !(ty.is_numeric() || ty.is_char() || ty.is_ty_var() || ty.references_error());
+ Some((fail, ty, expr.span))
+ }
+ };
+ let mut lhs = calc_side(lhs);
+ let mut rhs = calc_side(rhs);
+
+ if let (Some((true, ..)), _) | (_, Some((true, ..))) = (lhs, rhs) {
+ // There exists a side that didn't meet our criteria that the end-point
+ // be of a numeric or char type, as checked in `calc_side` above.
+ self.emit_err_pat_range(span, lhs, rhs);
+ return self.tcx.ty_error();
+ }
+
+ // Unify each side with `expected`.
+ // Subtyping doesn't matter here, as the value is some kind of scalar.
+ let demand_eqtype = |x: &mut _, y| {
+ if let Some((ref mut fail, x_ty, x_span)) = *x
+ && let Some(mut err) = self.demand_eqtype_pat_diag(x_span, expected, x_ty, ti)
+ {
+ if let Some((_, y_ty, y_span)) = y {
+ self.endpoint_has_type(&mut err, y_span, y_ty);
+ }
+ err.emit();
+ *fail = true;
+ }
+ };
+ demand_eqtype(&mut lhs, rhs);
+ demand_eqtype(&mut rhs, lhs);
+
+ if let (Some((true, ..)), _) | (_, Some((true, ..))) = (lhs, rhs) {
+ return self.tcx.ty_error();
+ }
+
+ // Find the unified type and check if it's of numeric or char type again.
+ // This check is needed if both sides are inference variables.
+ // We require types to be resolved here so that we emit inference failure
+ // rather than "_ is not a char or numeric".
+ let ty = self.structurally_resolved_type(span, expected);
+ if !(ty.is_numeric() || ty.is_char() || ty.references_error()) {
+ if let Some((ref mut fail, _, _)) = lhs {
+ *fail = true;
+ }
+ if let Some((ref mut fail, _, _)) = rhs {
+ *fail = true;
+ }
+ self.emit_err_pat_range(span, lhs, rhs);
+ return self.tcx.ty_error();
+ }
+ ty
+ }
+
+ fn endpoint_has_type(&self, err: &mut Diagnostic, span: Span, ty: Ty<'_>) {
+ if !ty.references_error() {
+ err.span_label(span, &format!("this is of type `{}`", ty));
+ }
+ }
+
+ fn emit_err_pat_range(
+ &self,
+ span: Span,
+ lhs: Option<(bool, Ty<'tcx>, Span)>,
+ rhs: Option<(bool, Ty<'tcx>, Span)>,
+ ) {
+ let span = match (lhs, rhs) {
+ (Some((true, ..)), Some((true, ..))) => span,
+ (Some((true, _, sp)), _) => sp,
+ (_, Some((true, _, sp))) => sp,
+ _ => span_bug!(span, "emit_err_pat_range: no side failed or exists but still error?"),
+ };
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0029,
+ "only `char` and numeric types are allowed in range patterns"
+ );
+ let msg = |ty| {
+ let ty = self.resolve_vars_if_possible(ty);
+ format!("this is of type `{}` but it should be `char` or numeric", ty)
+ };
+ let mut one_side_err = |first_span, first_ty, second: Option<(bool, Ty<'tcx>, Span)>| {
+ err.span_label(first_span, &msg(first_ty));
+ if let Some((_, ty, sp)) = second {
+ let ty = self.resolve_vars_if_possible(ty);
+ self.endpoint_has_type(&mut err, sp, ty);
+ }
+ };
+ match (lhs, rhs) {
+ (Some((true, lhs_ty, lhs_sp)), Some((true, rhs_ty, rhs_sp))) => {
+ err.span_label(lhs_sp, &msg(lhs_ty));
+ err.span_label(rhs_sp, &msg(rhs_ty));
+ }
+ (Some((true, lhs_ty, lhs_sp)), rhs) => one_side_err(lhs_sp, lhs_ty, rhs),
+ (lhs, Some((true, rhs_ty, rhs_sp))) => one_side_err(rhs_sp, rhs_ty, lhs),
+ _ => span_bug!(span, "Impossible, verified above."),
+ }
+ if self.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "In a match expression, only numbers and characters can be matched \
+ against a range. This is because the compiler checks that the range \
+ is non-empty at compile-time, and is unable to evaluate arbitrary \
+ comparison functions. If you want to capture values of an orderable \
+ type between two end-points, you can use a guard.",
+ );
+ }
+ err.emit();
+ }
+
+ fn check_pat_ident(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ ba: hir::BindingAnnotation,
+ var_id: HirId,
+ sub: Option<&'tcx Pat<'tcx>>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ // Determine the binding mode...
+ let bm = match ba {
+ hir::BindingAnnotation::Unannotated => def_bm,
+ _ => BindingMode::convert(ba),
+ };
+ // ...and store it in a side table:
+ self.inh.typeck_results.borrow_mut().pat_binding_modes_mut().insert(pat.hir_id, bm);
+
+ debug!("check_pat_ident: pat.hir_id={:?} bm={:?}", pat.hir_id, bm);
+
+ let local_ty = self.local_ty(pat.span, pat.hir_id).decl_ty;
+ let eq_ty = match bm {
+ ty::BindByReference(mutbl) => {
+ // If the binding is like `ref x | ref mut x`,
+ // then `x` is assigned a value of type `&M T` where M is the
+ // mutability and T is the expected type.
+ //
+ // `x` is assigned a value of type `&M T`, hence `&M T <: typeof(x)`
+ // is required. However, we use equality, which is stronger.
+ // See (note_1) for an explanation.
+ self.new_ref_ty(pat.span, mutbl, expected)
+ }
+ // Otherwise, the type of x is the expected type `T`.
+ ty::BindByValue(_) => {
+ // As above, `T <: typeof(x)` is required, but we use equality, see (note_1).
+ expected
+ }
+ };
+ self.demand_eqtype_pat(pat.span, eq_ty, local_ty, ti);
+
+ // If there are multiple arms, make sure they all agree on
+ // what the type of the binding `x` ought to be.
+ if var_id != pat.hir_id {
+ self.check_binding_alt_eq_ty(pat.span, var_id, local_ty, ti);
+ }
+
+ if let Some(p) = sub {
+ self.check_pat(p, expected, def_bm, ti);
+ }
+
+ local_ty
+ }
+
+ fn check_binding_alt_eq_ty(&self, span: Span, var_id: HirId, ty: Ty<'tcx>, ti: TopInfo<'tcx>) {
+ let var_ty = self.local_ty(span, var_id).decl_ty;
+ if let Some(mut err) = self.demand_eqtype_pat_diag(span, var_ty, ty, ti) {
+ let hir = self.tcx.hir();
+ let var_ty = self.resolve_vars_with_obligations(var_ty);
+ let msg = format!("first introduced with type `{var_ty}` here");
+ err.span_label(hir.span(var_id), msg);
+ let in_match = hir.parent_iter(var_id).any(|(_, n)| {
+ matches!(
+ n,
+ hir::Node::Expr(hir::Expr {
+ kind: hir::ExprKind::Match(.., hir::MatchSource::Normal),
+ ..
+ })
+ )
+ });
+ let pre = if in_match { "in the same arm, " } else { "" };
+ err.note(&format!("{}a binding must have the same type in all alternatives", pre));
+ // FIXME: check if `var_ty` and `ty` can be made the same type by adding or removing
+ // `ref` or `&` to the pattern.
+ err.emit();
+ }
+ }
+
+ // Precondition: pat is a Ref(_) pattern
+ fn borrow_pat_suggestion(&self, err: &mut Diagnostic, pat: &Pat<'_>) {
+ let tcx = self.tcx;
+ if let PatKind::Ref(inner, mutbl) = pat.kind
+ && let PatKind::Binding(_, _, binding, ..) = inner.kind {
+ let binding_parent_id = tcx.hir().get_parent_node(pat.hir_id);
+ let binding_parent = tcx.hir().get(binding_parent_id);
+ debug!(?inner, ?pat, ?binding_parent);
+
+ let mutability = match mutbl {
+ ast::Mutability::Mut => "mut",
+ ast::Mutability::Not => "",
+ };
+
+ let mut_var_suggestion = 'block: {
+ if !matches!(mutbl, ast::Mutability::Mut) {
+ break 'block None;
+ }
+
+ let ident_kind = match binding_parent {
+ hir::Node::Param(_) => "parameter",
+ hir::Node::Local(_) => "variable",
+ hir::Node::Arm(_) => "binding",
+
+ // Provide diagnostics only if the parent pattern is struct-like,
+ // i.e. where `mut binding` makes sense
+ hir::Node::Pat(Pat { kind, .. }) => match kind {
+ PatKind::Struct(..)
+ | PatKind::TupleStruct(..)
+ | PatKind::Or(..)
+ | PatKind::Tuple(..)
+ | PatKind::Slice(..) => "binding",
+
+ PatKind::Wild
+ | PatKind::Binding(..)
+ | PatKind::Path(..)
+ | PatKind::Box(..)
+ | PatKind::Ref(..)
+ | PatKind::Lit(..)
+ | PatKind::Range(..) => break 'block None,
+ },
+
+ // Don't provide suggestions in other cases
+ _ => break 'block None,
+ };
+
+ Some((
+ pat.span,
+ format!("to declare a mutable {ident_kind} use"),
+ format!("mut {binding}"),
+ ))
+
+ };
+
+ match binding_parent {
+ // Check that there is explicit type (ie this is not a closure param with inferred type)
+ // so we don't suggest moving something to the type that does not exist
+ hir::Node::Param(hir::Param { ty_span, .. }) if binding.span != *ty_span => {
+ err.multipart_suggestion_verbose(
+ format!("to take parameter `{binding}` by reference, move `&{mutability}` to the type"),
+ vec![
+ (pat.span.until(inner.span), "".to_owned()),
+ (ty_span.shrink_to_lo(), format!("&{}", mutbl.prefix_str())),
+ ],
+ Applicability::MachineApplicable
+ );
+
+ if let Some((sp, msg, sugg)) = mut_var_suggestion {
+ err.span_note(sp, format!("{msg}: `{sugg}`"));
+ }
+ }
+ hir::Node::Param(_) | hir::Node::Arm(_) | hir::Node::Pat(_) => {
+ // rely on match ergonomics or it might be nested `&&pat`
+ err.span_suggestion_verbose(
+ pat.span.until(inner.span),
+ format!("consider removing `&{mutability}` from the pattern"),
+ "",
+ Applicability::MaybeIncorrect,
+ );
+
+ if let Some((sp, msg, sugg)) = mut_var_suggestion {
+ err.span_note(sp, format!("{msg}: `{sugg}`"));
+ }
+ }
+ _ if let Some((sp, msg, sugg)) = mut_var_suggestion => {
+ err.span_suggestion(sp, msg, sugg, Applicability::MachineApplicable);
+ }
+ _ => {} // don't provide suggestions in other cases #55175
+ }
+ }
+ }
+
+ pub fn check_dereferenceable(&self, span: Span, expected: Ty<'tcx>, inner: &Pat<'_>) -> bool {
+ if let PatKind::Binding(..) = inner.kind
+ && let Some(mt) = self.shallow_resolve(expected).builtin_deref(true)
+ && let ty::Dynamic(..) = mt.ty.kind()
+ {
+ // This is "x = SomeTrait" being reduced from
+ // "let &x = &SomeTrait" or "let box x = Box<SomeTrait>", an error.
+ let type_str = self.ty_to_string(expected);
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0033,
+ "type `{}` cannot be dereferenced",
+ type_str
+ );
+ err.span_label(span, format!("type `{type_str}` cannot be dereferenced"));
+ if self.tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(CANNOT_IMPLICITLY_DEREF_POINTER_TRAIT_OBJ);
+ }
+ err.emit();
+ return false;
+ }
+ true
+ }
+
+ fn check_pat_struct(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ qpath: &hir::QPath<'_>,
+ fields: &'tcx [hir::PatField<'tcx>],
+ has_rest_pat: bool,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ // Resolve the path and check the definition for errors.
+ let Some((variant, pat_ty)) = self.check_struct_path(qpath, pat.hir_id) else {
+ let err = self.tcx.ty_error();
+ for field in fields {
+ let ti = ti;
+ self.check_pat(field.pat, err, def_bm, ti);
+ }
+ return err;
+ };
+
+ // Type-check the path.
+ self.demand_eqtype_pat(pat.span, expected, pat_ty, ti);
+
+ // Type-check subpatterns.
+ if self.check_struct_pat_fields(pat_ty, &pat, variant, fields, has_rest_pat, def_bm, ti) {
+ pat_ty
+ } else {
+ self.tcx.ty_error()
+ }
+ }
+
+ fn check_pat_path(
+ &self,
+ pat: &Pat<'tcx>,
+ qpath: &hir::QPath<'_>,
+ path_resolution: (Res, Option<Ty<'tcx>>, &'tcx [hir::PathSegment<'tcx>]),
+ expected: Ty<'tcx>,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+
+ // We have already resolved the path.
+ let (res, opt_ty, segments) = path_resolution;
+ match res {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ return tcx.ty_error();
+ }
+ Res::Def(DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fictive | CtorKind::Fn), _) => {
+ report_unexpected_variant_res(tcx, res, qpath, pat.span);
+ return tcx.ty_error();
+ }
+ Res::SelfCtor(..)
+ | Res::Def(
+ DefKind::Ctor(_, CtorKind::Const)
+ | DefKind::Const
+ | DefKind::AssocConst
+ | DefKind::ConstParam,
+ _,
+ ) => {} // OK
+ _ => bug!("unexpected pattern resolution: {:?}", res),
+ }
+
+ // Type-check the path.
+ let (pat_ty, pat_res) =
+ self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.hir_id);
+ if let Some(err) =
+ self.demand_suptype_with_origin(&self.pattern_cause(ti, pat.span), expected, pat_ty)
+ {
+ self.emit_bad_pat_path(err, pat, res, pat_res, pat_ty, segments);
+ }
+ pat_ty
+ }
+
+ fn maybe_suggest_range_literal(
+ &self,
+ e: &mut Diagnostic,
+ opt_def_id: Option<hir::def_id::DefId>,
+ ident: Ident,
+ ) -> bool {
+ match opt_def_id {
+ Some(def_id) => match self.tcx.hir().get_if_local(def_id) {
+ Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Const(_, body_id), ..
+ })) => match self.tcx.hir().get(body_id.hir_id) {
+ hir::Node::Expr(expr) => {
+ if hir::is_range_literal(expr) {
+ let span = self.tcx.hir().span(body_id.hir_id);
+ if let Ok(snip) = self.tcx.sess.source_map().span_to_snippet(span) {
+ e.span_suggestion_verbose(
+ ident.span,
+ "you may want to move the range into the match block",
+ snip,
+ Applicability::MachineApplicable,
+ );
+ return true;
+ }
+ }
+ }
+ _ => (),
+ },
+ _ => (),
+ },
+ _ => (),
+ }
+ false
+ }
+
+ fn emit_bad_pat_path(
+ &self,
+ mut e: DiagnosticBuilder<'_, ErrorGuaranteed>,
+ pat: &hir::Pat<'tcx>,
+ res: Res,
+ pat_res: Res,
+ pat_ty: Ty<'tcx>,
+ segments: &'tcx [hir::PathSegment<'tcx>],
+ ) {
+ let pat_span = pat.span;
+ if let Some(span) = self.tcx.hir().res_span(pat_res) {
+ e.span_label(span, &format!("{} defined here", res.descr()));
+ if let [hir::PathSegment { ident, .. }] = &*segments {
+ e.span_label(
+ pat_span,
+ &format!(
+ "`{}` is interpreted as {} {}, not a new binding",
+ ident,
+ res.article(),
+ res.descr(),
+ ),
+ );
+ match self.tcx.hir().get(self.tcx.hir().get_parent_node(pat.hir_id)) {
+ hir::Node::Pat(Pat { kind: hir::PatKind::Struct(..), .. }) => {
+ e.span_suggestion_verbose(
+ ident.span.shrink_to_hi(),
+ "bind the struct field to a different name instead",
+ format!(": other_{}", ident.as_str().to_lowercase()),
+ Applicability::HasPlaceholders,
+ );
+ }
+ _ => {
+ let (type_def_id, item_def_id) = match pat_ty.kind() {
+ Adt(def, _) => match res {
+ Res::Def(DefKind::Const, def_id) => (Some(def.did()), Some(def_id)),
+ _ => (None, None),
+ },
+ _ => (None, None),
+ };
+
+ let ranges = &[
+ self.tcx.lang_items().range_struct(),
+ self.tcx.lang_items().range_from_struct(),
+ self.tcx.lang_items().range_to_struct(),
+ self.tcx.lang_items().range_full_struct(),
+ self.tcx.lang_items().range_inclusive_struct(),
+ self.tcx.lang_items().range_to_inclusive_struct(),
+ ];
+ if type_def_id != None && ranges.contains(&type_def_id) {
+ if !self.maybe_suggest_range_literal(&mut e, item_def_id, *ident) {
+ let msg = "constants only support matching by type, \
+ if you meant to match against a range of values, \
+ consider using a range pattern like `min ..= max` in the match block";
+ e.note(msg);
+ }
+ } else {
+ let msg = "introduce a new binding instead";
+ let sugg = format!("other_{}", ident.as_str().to_lowercase());
+ e.span_suggestion(
+ ident.span,
+ msg,
+ sugg,
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+ };
+ }
+ }
+ e.emit();
+ }
+
+ fn check_pat_tuple_struct(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ qpath: &'tcx hir::QPath<'tcx>,
+ subpats: &'tcx [Pat<'tcx>],
+ ddpos: Option<usize>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let on_error = || {
+ for pat in subpats {
+ self.check_pat(pat, tcx.ty_error(), def_bm, ti);
+ }
+ };
+ let report_unexpected_res = |res: Res| {
+ let sm = tcx.sess.source_map();
+ let path_str = sm
+ .span_to_snippet(sm.span_until_char(pat.span, '('))
+ .map_or_else(|_| String::new(), |s| format!(" `{}`", s.trim_end()));
+ let msg = format!(
+ "expected tuple struct or tuple variant, found {}{}",
+ res.descr(),
+ path_str
+ );
+
+ let mut err = struct_span_err!(tcx.sess, pat.span, E0164, "{msg}");
+ match res {
+ Res::Def(DefKind::Fn | DefKind::AssocFn, _) => {
+ err.span_label(pat.span, "`fn` calls are not allowed in patterns");
+ err.help(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch18-00-patterns.html",
+ );
+ }
+ _ => {
+ err.span_label(pat.span, "not a tuple variant or struct");
+ }
+ }
+ err.emit();
+ on_error();
+ };
+
+ // Resolve the path and check the definition for errors.
+ let (res, opt_ty, segments) =
+ self.resolve_ty_and_res_fully_qualified_call(qpath, pat.hir_id, pat.span);
+ if res == Res::Err {
+ self.set_tainted_by_errors();
+ on_error();
+ return self.tcx.ty_error();
+ }
+
+ // Type-check the path.
+ let (pat_ty, res) =
+ self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.hir_id);
+ if !pat_ty.is_fn() {
+ report_unexpected_res(res);
+ return tcx.ty_error();
+ }
+
+ let variant = match res {
+ Res::Err => {
+ self.set_tainted_by_errors();
+ on_error();
+ return tcx.ty_error();
+ }
+ Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) => {
+ report_unexpected_res(res);
+ return tcx.ty_error();
+ }
+ Res::Def(DefKind::Ctor(_, CtorKind::Fn), _) => tcx.expect_variant_res(res),
+ _ => bug!("unexpected pattern resolution: {:?}", res),
+ };
+
+ // Replace constructor type with constructed type for tuple struct patterns.
+ let pat_ty = pat_ty.fn_sig(tcx).output();
+ let pat_ty = pat_ty.no_bound_vars().expect("expected fn type");
+
+ // Type-check the tuple struct pattern against the expected type.
+ let diag = self.demand_eqtype_pat_diag(pat.span, expected, pat_ty, ti);
+ let had_err = if let Some(mut err) = diag {
+ err.emit();
+ true
+ } else {
+ false
+ };
+
+ // Type-check subpatterns.
+ if subpats.len() == variant.fields.len()
+ || subpats.len() < variant.fields.len() && ddpos.is_some()
+ {
+ let ty::Adt(_, substs) = pat_ty.kind() else {
+ bug!("unexpected pattern type {:?}", pat_ty);
+ };
+ for (i, subpat) in subpats.iter().enumerate_and_adjust(variant.fields.len(), ddpos) {
+ let field_ty = self.field_ty(subpat.span, &variant.fields[i], substs);
+ self.check_pat(subpat, field_ty, def_bm, ti);
+
+ self.tcx.check_stability(
+ variant.fields[i].did,
+ Some(pat.hir_id),
+ subpat.span,
+ None,
+ );
+ }
+ } else {
+ // Pattern has wrong number of fields.
+ self.e0023(pat.span, res, qpath, subpats, &variant.fields, expected, had_err);
+ on_error();
+ return tcx.ty_error();
+ }
+ pat_ty
+ }
+
+ fn e0023(
+ &self,
+ pat_span: Span,
+ res: Res,
+ qpath: &hir::QPath<'_>,
+ subpats: &'tcx [Pat<'tcx>],
+ fields: &'tcx [ty::FieldDef],
+ expected: Ty<'tcx>,
+ had_err: bool,
+ ) {
+ let subpats_ending = pluralize!(subpats.len());
+ let fields_ending = pluralize!(fields.len());
+
+ let subpat_spans = if subpats.is_empty() {
+ vec![pat_span]
+ } else {
+ subpats.iter().map(|p| p.span).collect()
+ };
+ let last_subpat_span = *subpat_spans.last().unwrap();
+ let res_span = self.tcx.def_span(res.def_id());
+ let def_ident_span = self.tcx.def_ident_span(res.def_id()).unwrap_or(res_span);
+ let field_def_spans = if fields.is_empty() {
+ vec![res_span]
+ } else {
+ fields.iter().map(|f| f.ident(self.tcx).span).collect()
+ };
+ let last_field_def_span = *field_def_spans.last().unwrap();
+
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ MultiSpan::from_spans(subpat_spans),
+ E0023,
+ "this pattern has {} field{}, but the corresponding {} has {} field{}",
+ subpats.len(),
+ subpats_ending,
+ res.descr(),
+ fields.len(),
+ fields_ending,
+ );
+ err.span_label(
+ last_subpat_span,
+ &format!("expected {} field{}, found {}", fields.len(), fields_ending, subpats.len()),
+ );
+ if self.tcx.sess.source_map().is_multiline(qpath.span().between(last_subpat_span)) {
+ err.span_label(qpath.span(), "");
+ }
+ if self.tcx.sess.source_map().is_multiline(def_ident_span.between(last_field_def_span)) {
+ err.span_label(def_ident_span, format!("{} defined here", res.descr()));
+ }
+ for span in &field_def_spans[..field_def_spans.len() - 1] {
+ err.span_label(*span, "");
+ }
+ err.span_label(
+ last_field_def_span,
+ &format!("{} has {} field{}", res.descr(), fields.len(), fields_ending),
+ );
+
+ // Identify the case `Some(x, y)` where the expected type is e.g. `Option<(T, U)>`.
+ // More generally, the expected type wants a tuple variant with one field of an
+ // N-arity-tuple, e.g., `V_i((p_0, .., p_N))`. Meanwhile, the user supplied a pattern
+ // with the subpatterns directly in the tuple variant pattern, e.g., `V_i(p_0, .., p_N)`.
+ let missing_parentheses = match (&expected.kind(), fields, had_err) {
+ // #67037: only do this if we could successfully type-check the expected type against
+ // the tuple struct pattern. Otherwise the substs could get out of range on e.g.,
+ // `let P() = U;` where `P != U` with `struct P<T>(T);`.
+ (ty::Adt(_, substs), [field], false) => {
+ let field_ty = self.field_ty(pat_span, field, substs);
+ match field_ty.kind() {
+ ty::Tuple(fields) => fields.len() == subpats.len(),
+ _ => false,
+ }
+ }
+ _ => false,
+ };
+ if missing_parentheses {
+ let (left, right) = match subpats {
+ // This is the zero case; we aim to get the "hi" part of the `QPath`'s
+ // span as the "lo" and then the "hi" part of the pattern's span as the "hi".
+ // This looks like:
+ //
+ // help: missing parentheses
+ // |
+ // L | let A(()) = A(());
+ // | ^ ^
+ [] => (qpath.span().shrink_to_hi(), pat_span),
+ // Easy case. Just take the "lo" of the first sub-pattern and the "hi" of the
+ // last sub-pattern. In the case of `A(x)` the first and last may coincide.
+ // This looks like:
+ //
+ // help: missing parentheses
+ // |
+ // L | let A((x, y)) = A((1, 2));
+ // | ^ ^
+ [first, ..] => (first.span.shrink_to_lo(), subpats.last().unwrap().span),
+ };
+ err.multipart_suggestion(
+ "missing parentheses",
+ vec![(left, "(".to_string()), (right.shrink_to_hi(), ")".to_string())],
+ Applicability::MachineApplicable,
+ );
+ } else if fields.len() > subpats.len() && pat_span != DUMMY_SP {
+ let after_fields_span = pat_span.with_hi(pat_span.hi() - BytePos(1)).shrink_to_hi();
+ let all_fields_span = match subpats {
+ [] => after_fields_span,
+ [field] => field.span,
+ [first, .., last] => first.span.to(last.span),
+ };
+
+ // Check if all the fields in the pattern are wildcards.
+ let all_wildcards = subpats.iter().all(|pat| matches!(pat.kind, PatKind::Wild));
+ let first_tail_wildcard =
+ subpats.iter().enumerate().fold(None, |acc, (pos, pat)| match (acc, &pat.kind) {
+ (None, PatKind::Wild) => Some(pos),
+ (Some(_), PatKind::Wild) => acc,
+ _ => None,
+ });
+ let tail_span = match first_tail_wildcard {
+ None => after_fields_span,
+ Some(0) => subpats[0].span.to(after_fields_span),
+ Some(pos) => subpats[pos - 1].span.shrink_to_hi().to(after_fields_span),
+ };
+
+ // FIXME: heuristic-based suggestion to check current types for where to add `_`.
+ let mut wildcard_sugg = vec!["_"; fields.len() - subpats.len()].join(", ");
+ if !subpats.is_empty() {
+ wildcard_sugg = String::from(", ") + &wildcard_sugg;
+ }
+
+ err.span_suggestion_verbose(
+ after_fields_span,
+ "use `_` to explicitly ignore each field",
+ wildcard_sugg,
+ Applicability::MaybeIncorrect,
+ );
+
+ // Only suggest `..` if more than one field is missing
+ // or the pattern consists of all wildcards.
+ if fields.len() - subpats.len() > 1 || all_wildcards {
+ if subpats.is_empty() || all_wildcards {
+ err.span_suggestion_verbose(
+ all_fields_span,
+ "use `..` to ignore all fields",
+ "..",
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ err.span_suggestion_verbose(
+ tail_span,
+ "use `..` to ignore the rest of the fields",
+ ", ..",
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+
+ err.emit();
+ }
+
+ fn check_pat_tuple(
+ &self,
+ span: Span,
+ elements: &'tcx [Pat<'tcx>],
+ ddpos: Option<usize>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let mut expected_len = elements.len();
+ if ddpos.is_some() {
+ // Require known type only when `..` is present.
+ if let ty::Tuple(tys) = self.structurally_resolved_type(span, expected).kind() {
+ expected_len = tys.len();
+ }
+ }
+ let max_len = cmp::max(expected_len, elements.len());
+
+ let element_tys_iter = (0..max_len).map(|_| {
+ self.next_ty_var(
+ // FIXME: `MiscVariable` for now -- obtaining the span and name information
+ // from all tuple elements isn't trivial.
+ TypeVariableOrigin { kind: TypeVariableOriginKind::TypeInference, span },
+ )
+ });
+ let element_tys = tcx.mk_type_list(element_tys_iter);
+ let pat_ty = tcx.mk_ty(ty::Tuple(element_tys));
+ if let Some(mut err) = self.demand_eqtype_pat_diag(span, expected, pat_ty, ti) {
+ err.emit();
+ // Walk subpatterns with an expected type of `err` in this case to silence
+ // further errors being emitted when using the bindings. #50333
+ let element_tys_iter = (0..max_len).map(|_| tcx.ty_error());
+ for (_, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
+ self.check_pat(elem, tcx.ty_error(), def_bm, ti);
+ }
+ tcx.mk_tup(element_tys_iter)
+ } else {
+ for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
+ self.check_pat(elem, element_tys[i], def_bm, ti);
+ }
+ pat_ty
+ }
+ }
+
+ fn check_struct_pat_fields(
+ &self,
+ adt_ty: Ty<'tcx>,
+ pat: &'tcx Pat<'tcx>,
+ variant: &'tcx ty::VariantDef,
+ fields: &'tcx [hir::PatField<'tcx>],
+ has_rest_pat: bool,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> bool {
+ let tcx = self.tcx;
+
+ let ty::Adt(adt, substs) = adt_ty.kind() else {
+ span_bug!(pat.span, "struct pattern is not an ADT");
+ };
+
+ // Index the struct fields' types.
+ let field_map = variant
+ .fields
+ .iter()
+ .enumerate()
+ .map(|(i, field)| (field.ident(self.tcx).normalize_to_macros_2_0(), (i, field)))
+ .collect::<FxHashMap<_, _>>();
+
+ // Keep track of which fields have already appeared in the pattern.
+ let mut used_fields = FxHashMap::default();
+ let mut no_field_errors = true;
+
+ let mut inexistent_fields = vec![];
+ // Typecheck each field.
+ for field in fields {
+ let span = field.span;
+ let ident = tcx.adjust_ident(field.ident, variant.def_id);
+ let field_ty = match used_fields.entry(ident) {
+ Occupied(occupied) => {
+ self.error_field_already_bound(span, field.ident, *occupied.get());
+ no_field_errors = false;
+ tcx.ty_error()
+ }
+ Vacant(vacant) => {
+ vacant.insert(span);
+ field_map
+ .get(&ident)
+ .map(|(i, f)| {
+ self.write_field_index(field.hir_id, *i);
+ self.tcx.check_stability(f.did, Some(pat.hir_id), span, None);
+ self.field_ty(span, f, substs)
+ })
+ .unwrap_or_else(|| {
+ inexistent_fields.push(field);
+ no_field_errors = false;
+ tcx.ty_error()
+ })
+ }
+ };
+
+ self.check_pat(field.pat, field_ty, def_bm, ti);
+ }
+
+ let mut unmentioned_fields = variant
+ .fields
+ .iter()
+ .map(|field| (field, field.ident(self.tcx).normalize_to_macros_2_0()))
+ .filter(|(_, ident)| !used_fields.contains_key(ident))
+ .collect::<Vec<_>>();
+
+ let inexistent_fields_err = if !(inexistent_fields.is_empty() || variant.is_recovered())
+ && !inexistent_fields.iter().any(|field| field.ident.name == kw::Underscore)
+ {
+ Some(self.error_inexistent_fields(
+ adt.variant_descr(),
+ &inexistent_fields,
+ &mut unmentioned_fields,
+ variant,
+ substs,
+ ))
+ } else {
+ None
+ };
+
+ // Require `..` if struct has non_exhaustive attribute.
+ let non_exhaustive = variant.is_field_list_non_exhaustive() && !adt.did().is_local();
+ if non_exhaustive && !has_rest_pat {
+ self.error_foreign_non_exhaustive_spat(pat, adt.variant_descr(), fields.is_empty());
+ }
+
+ let mut unmentioned_err = None;
+ // Report an error if an incorrect number of fields was specified.
+ if adt.is_union() {
+ if fields.len() != 1 {
+ tcx.sess
+ .struct_span_err(pat.span, "union patterns should have exactly one field")
+ .emit();
+ }
+ if has_rest_pat {
+ tcx.sess.struct_span_err(pat.span, "`..` cannot be used in union patterns").emit();
+ }
+ } else if !unmentioned_fields.is_empty() {
+ let accessible_unmentioned_fields: Vec<_> = unmentioned_fields
+ .iter()
+ .copied()
+ .filter(|(field, _)| {
+ field.vis.is_accessible_from(tcx.parent_module(pat.hir_id).to_def_id(), tcx)
+ && !matches!(
+ tcx.eval_stability(field.did, None, DUMMY_SP, None),
+ EvalResult::Deny { .. }
+ )
+ // We only want to report the error if it is hidden and not local
+ && !(tcx.is_doc_hidden(field.did) && !field.did.is_local())
+ })
+ .collect();
+
+ if !has_rest_pat {
+ if accessible_unmentioned_fields.is_empty() {
+ unmentioned_err = Some(self.error_no_accessible_fields(pat, fields));
+ } else {
+ unmentioned_err = Some(self.error_unmentioned_fields(
+ pat,
+ &accessible_unmentioned_fields,
+ accessible_unmentioned_fields.len() != unmentioned_fields.len(),
+ fields,
+ ));
+ }
+ } else if non_exhaustive && !accessible_unmentioned_fields.is_empty() {
+ self.lint_non_exhaustive_omitted_patterns(
+ pat,
+ &accessible_unmentioned_fields,
+ adt_ty,
+ )
+ }
+ }
+ match (inexistent_fields_err, unmentioned_err) {
+ (Some(mut i), Some(mut u)) => {
+ if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) {
+ // We don't want to show the nonexistent fields error when this was
+ // `Foo { a, b }` when it should have been `Foo(a, b)`.
+ i.delay_as_bug();
+ u.delay_as_bug();
+ e.emit();
+ } else {
+ i.emit();
+ u.emit();
+ }
+ }
+ (None, Some(mut u)) => {
+ if let Some(mut e) = self.error_tuple_variant_as_struct_pat(pat, fields, variant) {
+ u.delay_as_bug();
+ e.emit();
+ } else {
+ u.emit();
+ }
+ }
+ (Some(mut err), None) => {
+ err.emit();
+ }
+ (None, None) if let Some(mut err) =
+ self.error_tuple_variant_index_shorthand(variant, pat, fields) =>
+ {
+ err.emit();
+ }
+ (None, None) => {}
+ }
+ no_field_errors
+ }
+
+ fn error_tuple_variant_index_shorthand(
+ &self,
+ variant: &VariantDef,
+ pat: &'_ Pat<'_>,
+ fields: &[hir::PatField<'_>],
+ ) -> Option<DiagnosticBuilder<'_, ErrorGuaranteed>> {
+ // if this is a tuple struct, then all field names will be numbers
+ // so if any fields in a struct pattern use shorthand syntax, they will
+ // be invalid identifiers (for example, Foo { 0, 1 }).
+ if let (CtorKind::Fn, PatKind::Struct(qpath, field_patterns, ..)) =
+ (variant.ctor_kind, &pat.kind)
+ {
+ let has_shorthand_field_name = field_patterns.iter().any(|field| field.is_shorthand);
+ if has_shorthand_field_name {
+ let path = rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
+ s.print_qpath(qpath, false)
+ });
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ pat.span,
+ E0769,
+ "tuple variant `{path}` written as struct variant",
+ );
+ err.span_suggestion_verbose(
+ qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()),
+ "use the tuple variant pattern syntax instead",
+ format!("({})", self.get_suggested_tuple_struct_pattern(fields, variant)),
+ Applicability::MaybeIncorrect,
+ );
+ return Some(err);
+ }
+ }
+ None
+ }
+
+ fn error_foreign_non_exhaustive_spat(&self, pat: &Pat<'_>, descr: &str, no_fields: bool) {
+ let sess = self.tcx.sess;
+ let sm = sess.source_map();
+ let sp_brace = sm.end_point(pat.span);
+ let sp_comma = sm.end_point(pat.span.with_hi(sp_brace.hi()));
+ let sugg = if no_fields || sp_brace != sp_comma { ".. }" } else { ", .. }" };
+
+ let mut err = struct_span_err!(
+ sess,
+ pat.span,
+ E0638,
+ "`..` required with {descr} marked as non-exhaustive",
+ );
+ err.span_suggestion_verbose(
+ sp_comma,
+ "add `..` at the end of the field list to ignore all other fields",
+ sugg,
+ Applicability::MachineApplicable,
+ );
+ err.emit();
+ }
+
+ fn error_field_already_bound(&self, span: Span, ident: Ident, other_field: Span) {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0025,
+ "field `{}` bound multiple times in the pattern",
+ ident
+ )
+ .span_label(span, format!("multiple uses of `{ident}` in pattern"))
+ .span_label(other_field, format!("first use of `{ident}`"))
+ .emit();
+ }
+
+ fn error_inexistent_fields(
+ &self,
+ kind_name: &str,
+ inexistent_fields: &[&hir::PatField<'tcx>],
+ unmentioned_fields: &mut Vec<(&'tcx ty::FieldDef, Ident)>,
+ variant: &ty::VariantDef,
+ substs: &'tcx ty::List<ty::subst::GenericArg<'tcx>>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let tcx = self.tcx;
+ let (field_names, t, plural) = if inexistent_fields.len() == 1 {
+ (format!("a field named `{}`", inexistent_fields[0].ident), "this", "")
+ } else {
+ (
+ format!(
+ "fields named {}",
+ inexistent_fields
+ .iter()
+ .map(|field| format!("`{}`", field.ident))
+ .collect::<Vec<String>>()
+ .join(", ")
+ ),
+ "these",
+ "s",
+ )
+ };
+ let spans = inexistent_fields.iter().map(|field| field.ident.span).collect::<Vec<_>>();
+ let mut err = struct_span_err!(
+ tcx.sess,
+ spans,
+ E0026,
+ "{} `{}` does not have {}",
+ kind_name,
+ tcx.def_path_str(variant.def_id),
+ field_names
+ );
+ if let Some(pat_field) = inexistent_fields.last() {
+ err.span_label(
+ pat_field.ident.span,
+ format!(
+ "{} `{}` does not have {} field{}",
+ kind_name,
+ tcx.def_path_str(variant.def_id),
+ t,
+ plural
+ ),
+ );
+
+ if unmentioned_fields.len() == 1 {
+ let input =
+ unmentioned_fields.iter().map(|(_, field)| field.name).collect::<Vec<_>>();
+ let suggested_name = find_best_match_for_name(&input, pat_field.ident.name, None);
+ if let Some(suggested_name) = suggested_name {
+ err.span_suggestion(
+ pat_field.ident.span,
+ "a field with a similar name exists",
+ suggested_name,
+ Applicability::MaybeIncorrect,
+ );
+
+ // When we have a tuple struct used with struct we don't want to suggest using
+ // the (valid) struct syntax with numeric field names. Instead we want to
+ // suggest the expected syntax. We infer that this is the case by parsing the
+ // `Ident` into an unsized integer. The suggestion will be emitted elsewhere in
+ // `smart_resolve_context_dependent_help`.
+ if suggested_name.to_ident_string().parse::<usize>().is_err() {
+ // We don't want to throw `E0027` in case we have thrown `E0026` for them.
+ unmentioned_fields.retain(|&(_, x)| x.name != suggested_name);
+ }
+ } else if inexistent_fields.len() == 1 {
+ match pat_field.pat.kind {
+ PatKind::Lit(expr)
+ if !self.can_coerce(
+ self.typeck_results.borrow().expr_ty(expr),
+ self.field_ty(
+ unmentioned_fields[0].1.span,
+ unmentioned_fields[0].0,
+ substs,
+ ),
+ ) => {}
+ _ => {
+ let unmentioned_field = unmentioned_fields[0].1.name;
+ err.span_suggestion_short(
+ pat_field.ident.span,
+ &format!(
+ "`{}` has a field named `{}`",
+ tcx.def_path_str(variant.def_id),
+ unmentioned_field
+ ),
+ unmentioned_field.to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ }
+ }
+ }
+ }
+ if tcx.sess.teach(&err.get_code().unwrap()) {
+ err.note(
+ "This error indicates that a struct pattern attempted to \
+ extract a non-existent field from a struct. Struct fields \
+ are identified by the name used before the colon : so struct \
+ patterns should resemble the declaration of the struct type \
+ being matched.\n\n\
+ If you are using shorthand field patterns but want to refer \
+ to the struct field by a different name, you should rename \
+ it explicitly.",
+ );
+ }
+ err
+ }
+
+ fn error_tuple_variant_as_struct_pat(
+ &self,
+ pat: &Pat<'_>,
+ fields: &'tcx [hir::PatField<'tcx>],
+ variant: &ty::VariantDef,
+ ) -> Option<DiagnosticBuilder<'tcx, ErrorGuaranteed>> {
+ if let (CtorKind::Fn, PatKind::Struct(qpath, ..)) = (variant.ctor_kind, &pat.kind) {
+ let path = rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
+ s.print_qpath(qpath, false)
+ });
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ pat.span,
+ E0769,
+ "tuple variant `{}` written as struct variant",
+ path
+ );
+ let (sugg, appl) = if fields.len() == variant.fields.len() {
+ (
+ self.get_suggested_tuple_struct_pattern(fields, variant),
+ Applicability::MachineApplicable,
+ )
+ } else {
+ (
+ variant.fields.iter().map(|_| "_").collect::<Vec<&str>>().join(", "),
+ Applicability::MaybeIncorrect,
+ )
+ };
+ err.span_suggestion_verbose(
+ qpath.span().shrink_to_hi().to(pat.span.shrink_to_hi()),
+ "use the tuple variant pattern syntax instead",
+ format!("({})", sugg),
+ appl,
+ );
+ return Some(err);
+ }
+ None
+ }
+
+ fn get_suggested_tuple_struct_pattern(
+ &self,
+ fields: &[hir::PatField<'_>],
+ variant: &VariantDef,
+ ) -> String {
+ let variant_field_idents =
+ variant.fields.iter().map(|f| f.ident(self.tcx)).collect::<Vec<Ident>>();
+ fields
+ .iter()
+ .map(|field| {
+ match self.tcx.sess.source_map().span_to_snippet(field.pat.span) {
+ Ok(f) => {
+ // Field names are numbers, but numbers
+ // are not valid identifiers
+ if variant_field_idents.contains(&field.ident) {
+ String::from("_")
+ } else {
+ f
+ }
+ }
+ Err(_) => rustc_hir_pretty::to_string(rustc_hir_pretty::NO_ANN, |s| {
+ s.print_pat(field.pat)
+ }),
+ }
+ })
+ .collect::<Vec<String>>()
+ .join(", ")
+ }
+
+ /// Returns a diagnostic reporting a struct pattern which is missing an `..` due to
+ /// inaccessible fields.
+ ///
+ /// ```text
+ /// error: pattern requires `..` due to inaccessible fields
+ /// --> src/main.rs:10:9
+ /// |
+ /// LL | let foo::Foo {} = foo::Foo::default();
+ /// | ^^^^^^^^^^^
+ /// |
+ /// help: add a `..`
+ /// |
+ /// LL | let foo::Foo { .. } = foo::Foo::default();
+ /// | ^^^^^^
+ /// ```
+ fn error_no_accessible_fields(
+ &self,
+ pat: &Pat<'_>,
+ fields: &'tcx [hir::PatField<'tcx>],
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = self
+ .tcx
+ .sess
+ .struct_span_err(pat.span, "pattern requires `..` due to inaccessible fields");
+
+ if let Some(field) = fields.last() {
+ err.span_suggestion_verbose(
+ field.span.shrink_to_hi(),
+ "ignore the inaccessible and unused fields",
+ ", ..",
+ Applicability::MachineApplicable,
+ );
+ } else {
+ let qpath_span = if let PatKind::Struct(qpath, ..) = &pat.kind {
+ qpath.span()
+ } else {
+ bug!("`error_no_accessible_fields` called on non-struct pattern");
+ };
+
+ // Shrink the span to exclude the `foo:Foo` in `foo::Foo { }`.
+ let span = pat.span.with_lo(qpath_span.shrink_to_hi().hi());
+ err.span_suggestion_verbose(
+ span,
+ "ignore the inaccessible and unused fields",
+ " { .. }",
+ Applicability::MachineApplicable,
+ );
+ }
+ err
+ }
+
+ /// Report that a pattern for a `#[non_exhaustive]` struct marked with `non_exhaustive_omitted_patterns`
+ /// is not exhaustive enough.
+ ///
+ /// Nb: the partner lint for enums lives in `compiler/rustc_mir_build/src/thir/pattern/usefulness.rs`.
+ fn lint_non_exhaustive_omitted_patterns(
+ &self,
+ pat: &Pat<'_>,
+ unmentioned_fields: &[(&ty::FieldDef, Ident)],
+ ty: Ty<'tcx>,
+ ) {
+ fn joined_uncovered_patterns(witnesses: &[&Ident]) -> String {
+ const LIMIT: usize = 3;
+ match witnesses {
+ [] => bug!(),
+ [witness] => format!("`{}`", witness),
+ [head @ .., tail] if head.len() < LIMIT => {
+ let head: Vec<_> = head.iter().map(<_>::to_string).collect();
+ format!("`{}` and `{}`", head.join("`, `"), tail)
+ }
+ _ => {
+ let (head, tail) = witnesses.split_at(LIMIT);
+ let head: Vec<_> = head.iter().map(<_>::to_string).collect();
+ format!("`{}` and {} more", head.join("`, `"), tail.len())
+ }
+ }
+ }
+ let joined_patterns = joined_uncovered_patterns(
+ &unmentioned_fields.iter().map(|(_, i)| i).collect::<Vec<_>>(),
+ );
+
+ self.tcx.struct_span_lint_hir(NON_EXHAUSTIVE_OMITTED_PATTERNS, pat.hir_id, pat.span, |build| {
+ let mut lint = build.build("some fields are not explicitly listed");
+ lint.span_label(pat.span, format!("field{} {} not listed", rustc_errors::pluralize!(unmentioned_fields.len()), joined_patterns));
+
+ lint.help(
+ "ensure that all fields are mentioned explicitly by adding the suggested fields",
+ );
+ lint.note(&format!(
+ "the pattern is of type `{}` and the `non_exhaustive_omitted_patterns` attribute was found",
+ ty,
+ ));
+ lint.emit();
+ });
+ }
+
+ /// Returns a diagnostic reporting a struct pattern which does not mention some fields.
+ ///
+ /// ```text
+ /// error[E0027]: pattern does not mention field `bar`
+ /// --> src/main.rs:15:9
+ /// |
+ /// LL | let foo::Foo {} = foo::Foo::new();
+ /// | ^^^^^^^^^^^ missing field `bar`
+ /// ```
+ fn error_unmentioned_fields(
+ &self,
+ pat: &Pat<'_>,
+ unmentioned_fields: &[(&ty::FieldDef, Ident)],
+ have_inaccessible_fields: bool,
+ fields: &'tcx [hir::PatField<'tcx>],
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let inaccessible = if have_inaccessible_fields { " and inaccessible fields" } else { "" };
+ let field_names = if unmentioned_fields.len() == 1 {
+ format!("field `{}`{}", unmentioned_fields[0].1, inaccessible)
+ } else {
+ let fields = unmentioned_fields
+ .iter()
+ .map(|(_, name)| format!("`{}`", name))
+ .collect::<Vec<String>>()
+ .join(", ");
+ format!("fields {}{}", fields, inaccessible)
+ };
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ pat.span,
+ E0027,
+ "pattern does not mention {}",
+ field_names
+ );
+ err.span_label(pat.span, format!("missing {}", field_names));
+ let len = unmentioned_fields.len();
+ let (prefix, postfix, sp) = match fields {
+ [] => match &pat.kind {
+ PatKind::Struct(path, [], false) => {
+ (" { ", " }", path.span().shrink_to_hi().until(pat.span.shrink_to_hi()))
+ }
+ _ => return err,
+ },
+ [.., field] => {
+ // Account for last field having a trailing comma or parse recovery at the tail of
+ // the pattern to avoid invalid suggestion (#78511).
+ let tail = field.span.shrink_to_hi().with_hi(pat.span.hi());
+ match &pat.kind {
+ PatKind::Struct(..) => (", ", " }", tail),
+ _ => return err,
+ }
+ }
+ };
+ err.span_suggestion(
+ sp,
+ &format!(
+ "include the missing field{} in the pattern{}",
+ pluralize!(len),
+ if have_inaccessible_fields { " and ignore the inaccessible fields" } else { "" }
+ ),
+ format!(
+ "{}{}{}{}",
+ prefix,
+ unmentioned_fields
+ .iter()
+ .map(|(_, name)| name.to_string())
+ .collect::<Vec<_>>()
+ .join(", "),
+ if have_inaccessible_fields { ", .." } else { "" },
+ postfix,
+ ),
+ Applicability::MachineApplicable,
+ );
+ err.span_suggestion(
+ sp,
+ &format!(
+ "if you don't care about {these} missing field{s}, you can explicitly ignore {them}",
+ these = pluralize!("this", len),
+ s = pluralize!(len),
+ them = if len == 1 { "it" } else { "them" },
+ ),
+ format!("{}..{}", prefix, postfix),
+ Applicability::MachineApplicable,
+ );
+ err
+ }
+
+ fn check_pat_box(
+ &self,
+ span: Span,
+ inner: &'tcx Pat<'tcx>,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let (box_ty, inner_ty) = if self.check_dereferenceable(span, expected, inner) {
+ // Here, `demand::subtype` is good enough, but I don't
+ // think any errors can be introduced by using `demand::eqtype`.
+ let inner_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: inner.span,
+ });
+ let box_ty = tcx.mk_box(inner_ty);
+ self.demand_eqtype_pat(span, expected, box_ty, ti);
+ (box_ty, inner_ty)
+ } else {
+ let err = tcx.ty_error();
+ (err, err)
+ };
+ self.check_pat(inner, inner_ty, def_bm, ti);
+ box_ty
+ }
+
+ // Precondition: Pat is Ref(inner)
+ fn check_pat_ref(
+ &self,
+ pat: &'tcx Pat<'tcx>,
+ inner: &'tcx Pat<'tcx>,
+ mutbl: hir::Mutability,
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self.tcx;
+ let expected = self.shallow_resolve(expected);
+ let (rptr_ty, inner_ty) = if self.check_dereferenceable(pat.span, expected, inner) {
+ // `demand::subtype` would be good enough, but using `eqtype` turns
+ // out to be equally general. See (note_1) for details.
+
+ // Take region, inner-type from expected type if we can,
+ // to avoid creating needless variables. This also helps with
+ // the bad interactions of the given hack detailed in (note_1).
+ debug!("check_pat_ref: expected={:?}", expected);
+ match *expected.kind() {
+ ty::Ref(_, r_ty, r_mutbl) if r_mutbl == mutbl => (expected, r_ty),
+ _ => {
+ let inner_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::TypeInference,
+ span: inner.span,
+ });
+ let rptr_ty = self.new_ref_ty(pat.span, mutbl, inner_ty);
+ debug!("check_pat_ref: demanding {:?} = {:?}", expected, rptr_ty);
+ let err = self.demand_eqtype_pat_diag(pat.span, expected, rptr_ty, ti);
+
+ // Look for a case like `fn foo(&foo: u32)` and suggest
+ // `fn foo(foo: &u32)`
+ if let Some(mut err) = err {
+ self.borrow_pat_suggestion(&mut err, pat);
+ err.emit();
+ }
+ (rptr_ty, inner_ty)
+ }
+ }
+ } else {
+ let err = tcx.ty_error();
+ (err, err)
+ };
+ self.check_pat(inner, inner_ty, def_bm, ti);
+ rptr_ty
+ }
+
+ /// Create a reference type with a fresh region variable.
+ fn new_ref_ty(&self, span: Span, mutbl: hir::Mutability, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let region = self.next_region_var(infer::PatternRegion(span));
+ let mt = ty::TypeAndMut { ty, mutbl };
+ self.tcx.mk_ref(region, mt)
+ }
+
+ /// Type check a slice pattern.
+ ///
+ /// Syntactically, these look like `[pat_0, ..., pat_n]`.
+ /// Semantically, we are type checking a pattern with structure:
+ /// ```ignore (not-rust)
+ /// [before_0, ..., before_n, (slice, after_0, ... after_n)?]
+ /// ```
+ /// The type of `slice`, if it is present, depends on the `expected` type.
+ /// If `slice` is missing, then so is `after_i`.
+ /// If `slice` is present, it can still represent 0 elements.
+ fn check_pat_slice(
+ &self,
+ span: Span,
+ before: &'tcx [Pat<'tcx>],
+ slice: Option<&'tcx Pat<'tcx>>,
+ after: &'tcx [Pat<'tcx>],
+ expected: Ty<'tcx>,
+ def_bm: BindingMode,
+ ti: TopInfo<'tcx>,
+ ) -> Ty<'tcx> {
+ let expected = self.structurally_resolved_type(span, expected);
+ let (element_ty, opt_slice_ty, inferred) = match *expected.kind() {
+ // An array, so we might have something like `let [a, b, c] = [0, 1, 2];`.
+ ty::Array(element_ty, len) => {
+ let min = before.len() as u64 + after.len() as u64;
+ let (opt_slice_ty, expected) =
+ self.check_array_pat_len(span, element_ty, expected, slice, len, min);
+ // `opt_slice_ty.is_none()` => `slice.is_none()`.
+ // Note, though, that opt_slice_ty could be `Some(error_ty)`.
+ assert!(opt_slice_ty.is_some() || slice.is_none());
+ (element_ty, opt_slice_ty, expected)
+ }
+ ty::Slice(element_ty) => (element_ty, Some(expected), expected),
+ // The expected type must be an array or slice, but was neither, so error.
+ _ => {
+ if !expected.references_error() {
+ self.error_expected_array_or_slice(span, expected, ti);
+ }
+ let err = self.tcx.ty_error();
+ (err, Some(err), err)
+ }
+ };
+
+ // Type check all the patterns before `slice`.
+ for elt in before {
+ self.check_pat(elt, element_ty, def_bm, ti);
+ }
+ // Type check the `slice`, if present, against its expected type.
+ if let Some(slice) = slice {
+ self.check_pat(slice, opt_slice_ty.unwrap(), def_bm, ti);
+ }
+ // Type check the elements after `slice`, if present.
+ for elt in after {
+ self.check_pat(elt, element_ty, def_bm, ti);
+ }
+ inferred
+ }
+
+ /// Type check the length of an array pattern.
+ ///
+ /// Returns both the type of the variable length pattern (or `None`), and the potentially
+ /// inferred array type. We only return `None` for the slice type if `slice.is_none()`.
+ fn check_array_pat_len(
+ &self,
+ span: Span,
+ element_ty: Ty<'tcx>,
+ arr_ty: Ty<'tcx>,
+ slice: Option<&'tcx Pat<'tcx>>,
+ len: ty::Const<'tcx>,
+ min_len: u64,
+ ) -> (Option<Ty<'tcx>>, Ty<'tcx>) {
+ if let Some(len) = len.try_eval_usize(self.tcx, self.param_env) {
+ // Now we know the length...
+ if slice.is_none() {
+ // ...and since there is no variable-length pattern,
+ // we require an exact match between the number of elements
+ // in the array pattern and as provided by the matched type.
+ if min_len == len {
+ return (None, arr_ty);
+ }
+
+ self.error_scrutinee_inconsistent_length(span, min_len, len);
+ } else if let Some(pat_len) = len.checked_sub(min_len) {
+ // The variable-length pattern was there,
+ // so it has an array type with the remaining elements left as its size...
+ return (Some(self.tcx.mk_array(element_ty, pat_len)), arr_ty);
+ } else {
+ // ...however, in this case, there were no remaining elements.
+ // That is, the slice pattern requires more than the array type offers.
+ self.error_scrutinee_with_rest_inconsistent_length(span, min_len, len);
+ }
+ } else if slice.is_none() {
+ // We have a pattern with a fixed length,
+ // which we can use to infer the length of the array.
+ let updated_arr_ty = self.tcx.mk_array(element_ty, min_len);
+ self.demand_eqtype(span, updated_arr_ty, arr_ty);
+ return (None, updated_arr_ty);
+ } else {
+ // We have a variable-length pattern and don't know the array length.
+ // This happens if we have e.g.,
+ // `let [a, b, ..] = arr` where `arr: [T; N]` where `const N: usize`.
+ self.error_scrutinee_unfixed_length(span);
+ }
+
+ // If we get here, we must have emitted an error.
+ (Some(self.tcx.ty_error()), arr_ty)
+ }
+
+ fn error_scrutinee_inconsistent_length(&self, span: Span, min_len: u64, size: u64) {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0527,
+ "pattern requires {} element{} but array has {}",
+ min_len,
+ pluralize!(min_len),
+ size,
+ )
+ .span_label(span, format!("expected {} element{}", size, pluralize!(size)))
+ .emit();
+ }
+
+ fn error_scrutinee_with_rest_inconsistent_length(&self, span: Span, min_len: u64, size: u64) {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0528,
+ "pattern requires at least {} element{} but array has {}",
+ min_len,
+ pluralize!(min_len),
+ size,
+ )
+ .span_label(
+ span,
+ format!("pattern cannot match array of {} element{}", size, pluralize!(size),),
+ )
+ .emit();
+ }
+
+ fn error_scrutinee_unfixed_length(&self, span: Span) {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0730,
+ "cannot pattern-match on an array without a fixed length",
+ )
+ .emit();
+ }
+
+ fn error_expected_array_or_slice(&self, span: Span, expected_ty: Ty<'tcx>, ti: TopInfo<'tcx>) {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0529,
+ "expected an array or slice, found `{expected_ty}`"
+ );
+ if let ty::Ref(_, ty, _) = expected_ty.kind()
+ && let ty::Array(..) | ty::Slice(..) = ty.kind()
+ {
+ err.help("the semantics of slice patterns changed recently; see issue #62254");
+ } else if Autoderef::new(&self.infcx, self.param_env, self.body_id, span, expected_ty, span)
+ .any(|(ty, _)| matches!(ty.kind(), ty::Slice(..) | ty::Array(..)))
+ && let (Some(span), true) = (ti.span, ti.origin_expr)
+ && let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(span)
+ {
+ let ty = self.resolve_vars_if_possible(ti.expected);
+ let is_slice_or_array_or_vector = self.is_slice_or_array_or_vector(&mut err, snippet.clone(), ty);
+ match is_slice_or_array_or_vector.1.kind() {
+ ty::Adt(adt_def, _)
+ if self.tcx.is_diagnostic_item(sym::Option, adt_def.did())
+ || self.tcx.is_diagnostic_item(sym::Result, adt_def.did()) =>
+ {
+ // Slicing won't work here, but `.as_deref()` might (issue #91328).
+ err.span_suggestion(
+ span,
+ "consider using `as_deref` here",
+ format!("{snippet}.as_deref()"),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => ()
+ }
+ if is_slice_or_array_or_vector.0 {
+ err.span_suggestion(
+ span,
+ "consider slicing here",
+ format!("{snippet}[..]"),
+ Applicability::MachineApplicable,
+ );
+ }
+ }
+ err.span_label(span, format!("pattern cannot match with input type `{expected_ty}`"));
+ err.emit();
+ }
+
+ fn is_slice_or_array_or_vector(
+ &self,
+ err: &mut Diagnostic,
+ snippet: String,
+ ty: Ty<'tcx>,
+ ) -> (bool, Ty<'tcx>) {
+ match ty.kind() {
+ ty::Adt(adt_def, _) if self.tcx.is_diagnostic_item(sym::Vec, adt_def.did()) => {
+ (true, ty)
+ }
+ ty::Ref(_, ty, _) => self.is_slice_or_array_or_vector(err, snippet, *ty),
+ ty::Slice(..) | ty::Array(..) => (true, ty),
+ _ => (false, ty),
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/place_op.rs b/compiler/rustc_typeck/src/check/place_op.rs
new file mode 100644
index 000000000..2e0f37eba
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/place_op.rs
@@ -0,0 +1,451 @@
+use crate::check::method::MethodCallee;
+use crate::check::{has_expected_num_generic_args, FnCtxt, PlaceOp};
+use rustc_ast as ast;
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_infer::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind};
+use rustc_infer::infer::InferOk;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, OverloadedDeref, PointerCast};
+use rustc_middle::ty::adjustment::{AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
+use rustc_middle::ty::{self, Ty};
+use rustc_span::symbol::{sym, Ident};
+use rustc_span::Span;
+use rustc_trait_selection::autoderef::Autoderef;
+use std::slice;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Type-check `*oprnd_expr` with `oprnd_expr` type-checked already.
+ pub(super) fn lookup_derefing(
+ &self,
+ expr: &hir::Expr<'_>,
+ oprnd_expr: &'tcx hir::Expr<'tcx>,
+ oprnd_ty: Ty<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ if let Some(mt) = oprnd_ty.builtin_deref(true) {
+ return Some(mt.ty);
+ }
+
+ let ok = self.try_overloaded_deref(expr.span, oprnd_ty)?;
+ let method = self.register_infer_ok_obligations(ok);
+ if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() {
+ self.apply_adjustments(
+ oprnd_expr,
+ vec![Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, AutoBorrowMutability::Not)),
+ target: method.sig.inputs()[0],
+ }],
+ );
+ } else {
+ span_bug!(expr.span, "input to deref is not a ref?");
+ }
+ let ty = self.make_overloaded_place_return_type(method).ty;
+ self.write_method_call(expr.hir_id, method);
+ Some(ty)
+ }
+
+ /// Type-check `*base_expr[index_expr]` with `base_expr` and `index_expr` type-checked already.
+ pub(super) fn lookup_indexing(
+ &self,
+ expr: &hir::Expr<'_>,
+ base_expr: &'tcx hir::Expr<'tcx>,
+ base_ty: Ty<'tcx>,
+ index_expr: &'tcx hir::Expr<'tcx>,
+ idx_ty: Ty<'tcx>,
+ ) -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> {
+ // FIXME(#18741) -- this is almost but not quite the same as the
+ // autoderef that normal method probing does. They could likely be
+ // consolidated.
+
+ let mut autoderef = self.autoderef(base_expr.span, base_ty);
+ let mut result = None;
+ while result.is_none() && autoderef.next().is_some() {
+ result = self.try_index_step(expr, base_expr, &autoderef, idx_ty, index_expr);
+ }
+ self.register_predicates(autoderef.into_obligations());
+ result
+ }
+
+ fn negative_index(
+ &self,
+ ty: Ty<'tcx>,
+ span: Span,
+ base_expr: &hir::Expr<'_>,
+ ) -> Option<(Ty<'tcx>, Ty<'tcx>)> {
+ let ty = self.resolve_vars_if_possible(ty);
+ let mut err = self.tcx.sess.struct_span_err(
+ span,
+ &format!("negative integers cannot be used to index on a `{ty}`"),
+ );
+ err.span_label(span, &format!("cannot use a negative integer for indexing on `{ty}`"));
+ if let (hir::ExprKind::Path(..), Ok(snippet)) =
+ (&base_expr.kind, self.tcx.sess.source_map().span_to_snippet(base_expr.span))
+ {
+ // `foo[-1]` to `foo[foo.len() - 1]`
+ err.span_suggestion_verbose(
+ span.shrink_to_lo(),
+ &format!(
+ "to access an element starting from the end of the `{ty}`, compute the index",
+ ),
+ format!("{snippet}.len() "),
+ Applicability::MachineApplicable,
+ );
+ }
+ err.emit();
+ Some((self.tcx.ty_error(), self.tcx.ty_error()))
+ }
+
+ /// To type-check `base_expr[index_expr]`, we progressively autoderef
+ /// (and otherwise adjust) `base_expr`, looking for a type which either
+ /// supports builtin indexing or overloaded indexing.
+ /// This loop implements one step in that search; the autoderef loop
+ /// is implemented by `lookup_indexing`.
+ fn try_index_step(
+ &self,
+ expr: &hir::Expr<'_>,
+ base_expr: &hir::Expr<'_>,
+ autoderef: &Autoderef<'a, 'tcx>,
+ index_ty: Ty<'tcx>,
+ index_expr: &hir::Expr<'_>,
+ ) -> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)> {
+ let adjusted_ty =
+ self.structurally_resolved_type(autoderef.span(), autoderef.final_ty(false));
+ debug!(
+ "try_index_step(expr={:?}, base_expr={:?}, adjusted_ty={:?}, \
+ index_ty={:?})",
+ expr, base_expr, adjusted_ty, index_ty
+ );
+
+ if let hir::ExprKind::Unary(
+ hir::UnOp::Neg,
+ hir::Expr {
+ kind: hir::ExprKind::Lit(hir::Lit { node: ast::LitKind::Int(..), .. }),
+ ..
+ },
+ ) = index_expr.kind
+ {
+ match adjusted_ty.kind() {
+ ty::Adt(def, _) if self.tcx.is_diagnostic_item(sym::Vec, def.did()) => {
+ return self.negative_index(adjusted_ty, index_expr.span, base_expr);
+ }
+ ty::Slice(_) | ty::Array(_, _) => {
+ return self.negative_index(adjusted_ty, index_expr.span, base_expr);
+ }
+ _ => {}
+ }
+ }
+
+ for unsize in [false, true] {
+ let mut self_ty = adjusted_ty;
+ if unsize {
+ // We only unsize arrays here.
+ if let ty::Array(element_ty, _) = adjusted_ty.kind() {
+ self_ty = self.tcx.mk_slice(*element_ty);
+ } else {
+ continue;
+ }
+ }
+
+ // If some lookup succeeds, write callee into table and extract index/element
+ // type from the method signature.
+ // If some lookup succeeded, install method in table
+ let input_ty = self.next_ty_var(TypeVariableOrigin {
+ kind: TypeVariableOriginKind::AutoDeref,
+ span: base_expr.span,
+ });
+ let method =
+ self.try_overloaded_place_op(expr.span, self_ty, &[input_ty], PlaceOp::Index);
+
+ if let Some(result) = method {
+ debug!("try_index_step: success, using overloaded indexing");
+ let method = self.register_infer_ok_obligations(result);
+
+ let mut adjustments = self.adjust_steps(autoderef);
+ if let ty::Ref(region, _, hir::Mutability::Not) = method.sig.inputs()[0].kind() {
+ adjustments.push(Adjustment {
+ kind: Adjust::Borrow(AutoBorrow::Ref(*region, AutoBorrowMutability::Not)),
+ target: self.tcx.mk_ref(
+ *region,
+ ty::TypeAndMut { mutbl: hir::Mutability::Not, ty: adjusted_ty },
+ ),
+ });
+ } else {
+ span_bug!(expr.span, "input to index is not a ref?");
+ }
+ if unsize {
+ adjustments.push(Adjustment {
+ kind: Adjust::Pointer(PointerCast::Unsize),
+ target: method.sig.inputs()[0],
+ });
+ }
+ self.apply_adjustments(base_expr, adjustments);
+
+ self.write_method_call(expr.hir_id, method);
+
+ return Some((input_ty, self.make_overloaded_place_return_type(method).ty));
+ }
+ }
+
+ None
+ }
+
+ /// Try to resolve an overloaded place op. We only deal with the immutable
+ /// variant here (Deref/Index). In some contexts we would need the mutable
+ /// variant (DerefMut/IndexMut); those would be later converted by
+ /// `convert_place_derefs_to_mutable`.
+ pub(super) fn try_overloaded_place_op(
+ &self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ arg_tys: &[Ty<'tcx>],
+ op: PlaceOp,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ debug!("try_overloaded_place_op({:?},{:?},{:?})", span, base_ty, op);
+
+ let (imm_tr, imm_op) = match op {
+ PlaceOp::Deref => (self.tcx.lang_items().deref_trait(), sym::deref),
+ PlaceOp::Index => (self.tcx.lang_items().index_trait(), sym::index),
+ };
+
+ // If the lang item was declared incorrectly, stop here so that we don't
+ // run into an ICE (#83893). The error is reported where the lang item is
+ // declared.
+ if !has_expected_num_generic_args(
+ self.tcx,
+ imm_tr,
+ match op {
+ PlaceOp::Deref => 0,
+ PlaceOp::Index => 1,
+ },
+ ) {
+ return None;
+ }
+
+ imm_tr.and_then(|trait_did| {
+ self.lookup_method_in_trait(
+ span,
+ Ident::with_dummy_span(imm_op),
+ trait_did,
+ base_ty,
+ Some(arg_tys),
+ )
+ })
+ }
+
+ fn try_mutable_overloaded_place_op(
+ &self,
+ span: Span,
+ base_ty: Ty<'tcx>,
+ arg_tys: &[Ty<'tcx>],
+ op: PlaceOp,
+ ) -> Option<InferOk<'tcx, MethodCallee<'tcx>>> {
+ debug!("try_mutable_overloaded_place_op({:?},{:?},{:?})", span, base_ty, op);
+
+ let (mut_tr, mut_op) = match op {
+ PlaceOp::Deref => (self.tcx.lang_items().deref_mut_trait(), sym::deref_mut),
+ PlaceOp::Index => (self.tcx.lang_items().index_mut_trait(), sym::index_mut),
+ };
+
+ // If the lang item was declared incorrectly, stop here so that we don't
+ // run into an ICE (#83893). The error is reported where the lang item is
+ // declared.
+ if !has_expected_num_generic_args(
+ self.tcx,
+ mut_tr,
+ match op {
+ PlaceOp::Deref => 0,
+ PlaceOp::Index => 1,
+ },
+ ) {
+ return None;
+ }
+
+ mut_tr.and_then(|trait_did| {
+ self.lookup_method_in_trait(
+ span,
+ Ident::with_dummy_span(mut_op),
+ trait_did,
+ base_ty,
+ Some(arg_tys),
+ )
+ })
+ }
+
+ /// Convert auto-derefs, indices, etc of an expression from `Deref` and `Index`
+ /// into `DerefMut` and `IndexMut` respectively.
+ ///
+ /// This is a second pass of typechecking derefs/indices. We need this because we do not
+ /// always know whether a place needs to be mutable or not in the first pass.
+ /// This happens whether there is an implicit mutable reborrow, e.g. when the type
+ /// is used as the receiver of a method call.
+ pub fn convert_place_derefs_to_mutable(&self, expr: &hir::Expr<'_>) {
+ // Gather up expressions we want to munge.
+ let mut exprs = vec![expr];
+
+ while let hir::ExprKind::Field(ref expr, _)
+ | hir::ExprKind::Index(ref expr, _)
+ | hir::ExprKind::Unary(hir::UnOp::Deref, ref expr) = exprs.last().unwrap().kind
+ {
+ exprs.push(expr);
+ }
+
+ debug!("convert_place_derefs_to_mutable: exprs={:?}", exprs);
+
+ // Fix up autoderefs and derefs.
+ let mut inside_union = false;
+ for (i, &expr) in exprs.iter().rev().enumerate() {
+ debug!("convert_place_derefs_to_mutable: i={} expr={:?}", i, expr);
+
+ let mut source = self.node_ty(expr.hir_id);
+ if matches!(expr.kind, hir::ExprKind::Unary(hir::UnOp::Deref, _)) {
+ // Clear previous flag; after a pointer indirection it does not apply any more.
+ inside_union = false;
+ }
+ if source.is_union() {
+ inside_union = true;
+ }
+ // Fix up the autoderefs. Autorefs can only occur immediately preceding
+ // overloaded place ops, and will be fixed by them in order to get
+ // the correct region.
+ // Do not mutate adjustments in place, but rather take them,
+ // and replace them after mutating them, to avoid having the
+ // typeck results borrowed during (`deref_mut`) method resolution.
+ let previous_adjustments =
+ self.typeck_results.borrow_mut().adjustments_mut().remove(expr.hir_id);
+ if let Some(mut adjustments) = previous_adjustments {
+ for adjustment in &mut adjustments {
+ if let Adjust::Deref(Some(ref mut deref)) = adjustment.kind
+ && let Some(ok) = self.try_mutable_overloaded_place_op(
+ expr.span,
+ source,
+ &[],
+ PlaceOp::Deref,
+ )
+ {
+ let method = self.register_infer_ok_obligations(ok);
+ if let ty::Ref(region, _, mutbl) = *method.sig.output().kind() {
+ *deref = OverloadedDeref { region, mutbl, span: deref.span };
+ }
+ // If this is a union field, also throw an error for `DerefMut` of `ManuallyDrop` (see RFC 2514).
+ // This helps avoid accidental drops.
+ if inside_union
+ && source.ty_adt_def().map_or(false, |adt| adt.is_manually_drop())
+ {
+ let mut err = self.tcx.sess.struct_span_err(
+ expr.span,
+ "not automatically applying `DerefMut` on `ManuallyDrop` union field",
+ );
+ err.help(
+ "writing to this reference calls the destructor for the old value",
+ );
+ err.help("add an explicit `*` if that is desired, or call `ptr::write` to not run the destructor");
+ err.emit();
+ }
+ }
+ source = adjustment.target;
+ }
+ self.typeck_results.borrow_mut().adjustments_mut().insert(expr.hir_id, adjustments);
+ }
+
+ match expr.kind {
+ hir::ExprKind::Index(base_expr, ..) => {
+ self.convert_place_op_to_mutable(PlaceOp::Index, expr, base_expr);
+ }
+ hir::ExprKind::Unary(hir::UnOp::Deref, base_expr) => {
+ self.convert_place_op_to_mutable(PlaceOp::Deref, expr, base_expr);
+ }
+ _ => {}
+ }
+ }
+ }
+
+ fn convert_place_op_to_mutable(
+ &self,
+ op: PlaceOp,
+ expr: &hir::Expr<'_>,
+ base_expr: &hir::Expr<'_>,
+ ) {
+ debug!("convert_place_op_to_mutable({:?}, {:?}, {:?})", op, expr, base_expr);
+ if !self.typeck_results.borrow().is_method_call(expr) {
+ debug!("convert_place_op_to_mutable - builtin, nothing to do");
+ return;
+ }
+
+ // Need to deref because overloaded place ops take self by-reference.
+ let base_ty = self
+ .typeck_results
+ .borrow()
+ .expr_ty_adjusted(base_expr)
+ .builtin_deref(false)
+ .expect("place op takes something that is not a ref")
+ .ty;
+
+ let arg_ty = match op {
+ PlaceOp::Deref => None,
+ PlaceOp::Index => {
+ // We would need to recover the `T` used when we resolve `<_ as Index<T>>::index`
+ // in try_index_step. This is the subst at index 1.
+ //
+ // Note: we should *not* use `expr_ty` of index_expr here because autoderef
+ // during coercions can cause type of index_expr to differ from `T` (#72002).
+ // We also could not use `expr_ty_adjusted` of index_expr because reborrowing
+ // during coercions can also cause type of index_expr to differ from `T`,
+ // which can potentially cause regionck failure (#74933).
+ Some(self.typeck_results.borrow().node_substs(expr.hir_id).type_at(1))
+ }
+ };
+ let arg_tys = match arg_ty {
+ None => &[],
+ Some(ref ty) => slice::from_ref(ty),
+ };
+
+ let method = self.try_mutable_overloaded_place_op(expr.span, base_ty, arg_tys, op);
+ let method = match method {
+ Some(ok) => self.register_infer_ok_obligations(ok),
+ // Couldn't find the mutable variant of the place op, keep the
+ // current, immutable version.
+ None => return,
+ };
+ debug!("convert_place_op_to_mutable: method={:?}", method);
+ self.write_method_call(expr.hir_id, method);
+
+ let ty::Ref(region, _, hir::Mutability::Mut) = method.sig.inputs()[0].kind() else {
+ span_bug!(expr.span, "input to mutable place op is not a mut ref?");
+ };
+
+ // Convert the autoref in the base expr to mutable with the correct
+ // region and mutability.
+ let base_expr_ty = self.node_ty(base_expr.hir_id);
+ if let Some(adjustments) =
+ self.typeck_results.borrow_mut().adjustments_mut().get_mut(base_expr.hir_id)
+ {
+ let mut source = base_expr_ty;
+ for adjustment in &mut adjustments[..] {
+ if let Adjust::Borrow(AutoBorrow::Ref(..)) = adjustment.kind {
+ debug!("convert_place_op_to_mutable: converting autoref {:?}", adjustment);
+ let mutbl = AutoBorrowMutability::Mut {
+ // Deref/indexing can be desugared to a method call,
+ // so maybe we could use two-phase here.
+ // See the documentation of AllowTwoPhase for why that's
+ // not the case today.
+ allow_two_phase_borrow: AllowTwoPhase::No,
+ };
+ adjustment.kind = Adjust::Borrow(AutoBorrow::Ref(*region, mutbl));
+ adjustment.target = self
+ .tcx
+ .mk_ref(*region, ty::TypeAndMut { ty: source, mutbl: mutbl.into() });
+ }
+ source = adjustment.target;
+ }
+
+ // If we have an autoref followed by unsizing at the end, fix the unsize target.
+ if let [
+ ..,
+ Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
+ Adjustment { kind: Adjust::Pointer(PointerCast::Unsize), ref mut target },
+ ] = adjustments[..]
+ {
+ *target = method.sig.inputs()[0];
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/region.rs b/compiler/rustc_typeck/src/check/region.rs
new file mode 100644
index 000000000..0081e9049
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/region.rs
@@ -0,0 +1,837 @@
+//! This file builds up the `ScopeTree`, which describes
+//! the parent links in the region hierarchy.
+//!
+//! For more information about how MIR-based region-checking works,
+//! see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
+
+use rustc_ast::walk_list;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{Arm, Block, Expr, Local, Pat, PatKind, Stmt};
+use rustc_index::vec::Idx;
+use rustc_middle::middle::region::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::source_map;
+use rustc_span::Span;
+
+use std::mem;
+
+#[derive(Debug, Copy, Clone)]
+pub struct Context {
+ /// The scope that contains any new variables declared, plus its depth in
+ /// the scope tree.
+ var_parent: Option<(Scope, ScopeDepth)>,
+
+ /// Region parent of expressions, etc., plus its depth in the scope tree.
+ parent: Option<(Scope, ScopeDepth)>,
+}
+
+struct RegionResolutionVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ // The number of expressions and patterns visited in the current body.
+ expr_and_pat_count: usize,
+ // When this is `true`, we record the `Scopes` we encounter
+ // when processing a Yield expression. This allows us to fix
+ // up their indices.
+ pessimistic_yield: bool,
+ // Stores scopes when `pessimistic_yield` is `true`.
+ fixup_scopes: Vec<Scope>,
+ // The generated scope tree.
+ scope_tree: ScopeTree,
+
+ cx: Context,
+
+ /// `terminating_scopes` is a set containing the ids of each
+ /// statement, or conditional/repeating expression. These scopes
+ /// are calling "terminating scopes" because, when attempting to
+ /// find the scope of a temporary, by default we search up the
+ /// enclosing scopes until we encounter the terminating scope. A
+ /// conditional/repeating expression is one which is not
+ /// guaranteed to execute exactly once upon entering the parent
+ /// scope. This could be because the expression only executes
+ /// conditionally, such as the expression `b` in `a && b`, or
+ /// because the expression may execute many times, such as a loop
+ /// body. The reason that we distinguish such expressions is that,
+ /// upon exiting the parent scope, we cannot statically know how
+ /// many times the expression executed, and thus if the expression
+ /// creates temporaries we cannot know statically how many such
+ /// temporaries we would have to cleanup. Therefore, we ensure that
+ /// the temporaries never outlast the conditional/repeating
+ /// expression, preventing the need for dynamic checks and/or
+ /// arbitrary amounts of stack space. Terminating scopes end
+ /// up being contained in a DestructionScope that contains the
+ /// destructor's execution.
+ terminating_scopes: FxHashSet<hir::ItemLocalId>,
+}
+
+/// Records the lifetime of a local variable as `cx.var_parent`
+fn record_var_lifetime(
+ visitor: &mut RegionResolutionVisitor<'_>,
+ var_id: hir::ItemLocalId,
+ _sp: Span,
+) {
+ match visitor.cx.var_parent {
+ None => {
+ // this can happen in extern fn declarations like
+ //
+ // extern fn isalnum(c: c_int) -> c_int
+ }
+ Some((parent_scope, _)) => visitor.scope_tree.record_var_scope(var_id, parent_scope),
+ }
+}
+
+fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx hir::Block<'tcx>) {
+ debug!("resolve_block(blk.hir_id={:?})", blk.hir_id);
+
+ let prev_cx = visitor.cx;
+
+ // We treat the tail expression in the block (if any) somewhat
+ // differently from the statements. The issue has to do with
+ // temporary lifetimes. Consider the following:
+ //
+ // quux({
+ // let inner = ... (&bar()) ...;
+ //
+ // (... (&foo()) ...) // (the tail expression)
+ // }, other_argument());
+ //
+ // Each of the statements within the block is a terminating
+ // scope, and thus a temporary (e.g., the result of calling
+ // `bar()` in the initializer expression for `let inner = ...;`)
+ // will be cleaned up immediately after its corresponding
+ // statement (i.e., `let inner = ...;`) executes.
+ //
+ // On the other hand, temporaries associated with evaluating the
+ // tail expression for the block are assigned lifetimes so that
+ // they will be cleaned up as part of the terminating scope
+ // *surrounding* the block expression. Here, the terminating
+ // scope for the block expression is the `quux(..)` call; so
+ // those temporaries will only be cleaned up *after* both
+ // `other_argument()` has run and also the call to `quux(..)`
+ // itself has returned.
+
+ visitor.enter_node_scope_with_dtor(blk.hir_id.local_id);
+ visitor.cx.var_parent = visitor.cx.parent;
+
+ {
+ // This block should be kept approximately in sync with
+ // `intravisit::walk_block`. (We manually walk the block, rather
+ // than call `walk_block`, in order to maintain precise
+ // index information.)
+
+ for (i, statement) in blk.stmts.iter().enumerate() {
+ match statement.kind {
+ hir::StmtKind::Local(..) | hir::StmtKind::Item(..) => {
+ // Each declaration introduces a subscope for bindings
+ // introduced by the declaration; this subscope covers a
+ // suffix of the block. Each subscope in a block has the
+ // previous subscope in the block as a parent, except for
+ // the first such subscope, which has the block itself as a
+ // parent.
+ visitor.enter_scope(Scope {
+ id: blk.hir_id.local_id,
+ data: ScopeData::Remainder(FirstStatementIndex::new(i)),
+ });
+ visitor.cx.var_parent = visitor.cx.parent;
+ }
+ hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
+ }
+ visitor.visit_stmt(statement)
+ }
+ walk_list!(visitor, visit_expr, &blk.expr);
+ }
+
+ visitor.cx = prev_cx;
+}
+
+fn resolve_arm<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, arm: &'tcx hir::Arm<'tcx>) {
+ let prev_cx = visitor.cx;
+
+ visitor.enter_scope(Scope { id: arm.hir_id.local_id, data: ScopeData::Node });
+ visitor.cx.var_parent = visitor.cx.parent;
+
+ visitor.terminating_scopes.insert(arm.body.hir_id.local_id);
+
+ if let Some(hir::Guard::If(ref expr)) = arm.guard {
+ visitor.terminating_scopes.insert(expr.hir_id.local_id);
+ }
+
+ intravisit::walk_arm(visitor, arm);
+
+ visitor.cx = prev_cx;
+}
+
+fn resolve_pat<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, pat: &'tcx hir::Pat<'tcx>) {
+ visitor.record_child_scope(Scope { id: pat.hir_id.local_id, data: ScopeData::Node });
+
+ // If this is a binding then record the lifetime of that binding.
+ if let PatKind::Binding(..) = pat.kind {
+ record_var_lifetime(visitor, pat.hir_id.local_id, pat.span);
+ }
+
+ debug!("resolve_pat - pre-increment {} pat = {:?}", visitor.expr_and_pat_count, pat);
+
+ intravisit::walk_pat(visitor, pat);
+
+ visitor.expr_and_pat_count += 1;
+
+ debug!("resolve_pat - post-increment {} pat = {:?}", visitor.expr_and_pat_count, pat);
+}
+
+fn resolve_stmt<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, stmt: &'tcx hir::Stmt<'tcx>) {
+ let stmt_id = stmt.hir_id.local_id;
+ debug!("resolve_stmt(stmt.id={:?})", stmt_id);
+
+ // Every statement will clean up the temporaries created during
+ // execution of that statement. Therefore each statement has an
+ // associated destruction scope that represents the scope of the
+ // statement plus its destructors, and thus the scope for which
+ // regions referenced by the destructors need to survive.
+ visitor.terminating_scopes.insert(stmt_id);
+
+ let prev_parent = visitor.cx.parent;
+ visitor.enter_node_scope_with_dtor(stmt_id);
+
+ intravisit::walk_stmt(visitor, stmt);
+
+ visitor.cx.parent = prev_parent;
+}
+
+fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx hir::Expr<'tcx>) {
+ debug!("resolve_expr - pre-increment {} expr = {:?}", visitor.expr_and_pat_count, expr);
+
+ let prev_cx = visitor.cx;
+ visitor.enter_node_scope_with_dtor(expr.hir_id.local_id);
+
+ {
+ let terminating_scopes = &mut visitor.terminating_scopes;
+ let mut terminating = |id: hir::ItemLocalId| {
+ terminating_scopes.insert(id);
+ };
+ match expr.kind {
+ // Conditional or repeating scopes are always terminating
+ // scopes, meaning that temporaries cannot outlive them.
+ // This ensures fixed size stacks.
+ hir::ExprKind::Binary(
+ source_map::Spanned { node: hir::BinOpKind::And, .. },
+ _,
+ ref r,
+ )
+ | hir::ExprKind::Binary(
+ source_map::Spanned { node: hir::BinOpKind::Or, .. },
+ _,
+ ref r,
+ ) => {
+ // For shortcircuiting operators, mark the RHS as a terminating
+ // scope since it only executes conditionally.
+ terminating(r.hir_id.local_id);
+ }
+
+ hir::ExprKind::If(_, ref then, Some(ref otherwise)) => {
+ terminating(then.hir_id.local_id);
+ terminating(otherwise.hir_id.local_id);
+ }
+
+ hir::ExprKind::If(_, ref then, None) => {
+ terminating(then.hir_id.local_id);
+ }
+
+ hir::ExprKind::Loop(ref body, _, _, _) => {
+ terminating(body.hir_id.local_id);
+ }
+
+ hir::ExprKind::DropTemps(ref expr) => {
+ // `DropTemps(expr)` does not denote a conditional scope.
+ // Rather, we want to achieve the same behavior as `{ let _t = expr; _t }`.
+ terminating(expr.hir_id.local_id);
+ }
+
+ hir::ExprKind::AssignOp(..)
+ | hir::ExprKind::Index(..)
+ | hir::ExprKind::Unary(..)
+ | hir::ExprKind::Call(..)
+ | hir::ExprKind::MethodCall(..) => {
+ // FIXME(https://github.com/rust-lang/rfcs/issues/811) Nested method calls
+ //
+ // The lifetimes for a call or method call look as follows:
+ //
+ // call.id
+ // - arg0.id
+ // - ...
+ // - argN.id
+ // - call.callee_id
+ //
+ // The idea is that call.callee_id represents *the time when
+ // the invoked function is actually running* and call.id
+ // represents *the time to prepare the arguments and make the
+ // call*. See the section "Borrows in Calls" borrowck/README.md
+ // for an extended explanation of why this distinction is
+ // important.
+ //
+ // record_superlifetime(new_cx, expr.callee_id);
+ }
+
+ _ => {}
+ }
+ }
+
+ let prev_pessimistic = visitor.pessimistic_yield;
+
+ // Ordinarily, we can rely on the visit order of HIR intravisit
+ // to correspond to the actual execution order of statements.
+ // However, there's a weird corner case with compound assignment
+ // operators (e.g. `a += b`). The evaluation order depends on whether
+ // or not the operator is overloaded (e.g. whether or not a trait
+ // like AddAssign is implemented).
+
+ // For primitive types (which, despite having a trait impl, don't actually
+ // end up calling it), the evaluation order is right-to-left. For example,
+ // the following code snippet:
+ //
+ // let y = &mut 0;
+ // *{println!("LHS!"); y} += {println!("RHS!"); 1};
+ //
+ // will print:
+ //
+ // RHS!
+ // LHS!
+ //
+ // However, if the operator is used on a non-primitive type,
+ // the evaluation order will be left-to-right, since the operator
+ // actually get desugared to a method call. For example, this
+ // nearly identical code snippet:
+ //
+ // let y = &mut String::new();
+ // *{println!("LHS String"); y} += {println!("RHS String"); "hi"};
+ //
+ // will print:
+ // LHS String
+ // RHS String
+ //
+ // To determine the actual execution order, we need to perform
+ // trait resolution. Unfortunately, we need to be able to compute
+ // yield_in_scope before type checking is even done, as it gets
+ // used by AST borrowcheck.
+ //
+ // Fortunately, we don't need to know the actual execution order.
+ // It suffices to know the 'worst case' order with respect to yields.
+ // Specifically, we need to know the highest 'expr_and_pat_count'
+ // that we could assign to the yield expression. To do this,
+ // we pick the greater of the two values from the left-hand
+ // and right-hand expressions. This makes us overly conservative
+ // about what types could possibly live across yield points,
+ // but we will never fail to detect that a type does actually
+ // live across a yield point. The latter part is critical -
+ // we're already overly conservative about what types will live
+ // across yield points, as the generated MIR will determine
+ // when things are actually live. However, for typecheck to work
+ // properly, we can't miss any types.
+
+ match expr.kind {
+ // Manually recurse over closures and inline consts, because they are the only
+ // case of nested bodies that share the parent environment.
+ hir::ExprKind::Closure(&hir::Closure { body, .. })
+ | hir::ExprKind::ConstBlock(hir::AnonConst { body, .. }) => {
+ let body = visitor.tcx.hir().body(body);
+ visitor.visit_body(body);
+ }
+ hir::ExprKind::AssignOp(_, ref left_expr, ref right_expr) => {
+ debug!(
+ "resolve_expr - enabling pessimistic_yield, was previously {}",
+ prev_pessimistic
+ );
+
+ let start_point = visitor.fixup_scopes.len();
+ visitor.pessimistic_yield = true;
+
+ // If the actual execution order turns out to be right-to-left,
+ // then we're fine. However, if the actual execution order is left-to-right,
+ // then we'll assign too low a count to any `yield` expressions
+ // we encounter in 'right_expression' - they should really occur after all of the
+ // expressions in 'left_expression'.
+ visitor.visit_expr(&right_expr);
+ visitor.pessimistic_yield = prev_pessimistic;
+
+ debug!("resolve_expr - restoring pessimistic_yield to {}", prev_pessimistic);
+ visitor.visit_expr(&left_expr);
+ debug!("resolve_expr - fixing up counts to {}", visitor.expr_and_pat_count);
+
+ // Remove and process any scopes pushed by the visitor
+ let target_scopes = visitor.fixup_scopes.drain(start_point..);
+
+ for scope in target_scopes {
+ let mut yield_data =
+ visitor.scope_tree.yield_in_scope.get_mut(&scope).unwrap().last_mut().unwrap();
+ let count = yield_data.expr_and_pat_count;
+ let span = yield_data.span;
+
+ // expr_and_pat_count never decreases. Since we recorded counts in yield_in_scope
+ // before walking the left-hand side, it should be impossible for the recorded
+ // count to be greater than the left-hand side count.
+ if count > visitor.expr_and_pat_count {
+ bug!(
+ "Encountered greater count {} at span {:?} - expected no greater than {}",
+ count,
+ span,
+ visitor.expr_and_pat_count
+ );
+ }
+ let new_count = visitor.expr_and_pat_count;
+ debug!(
+ "resolve_expr - increasing count for scope {:?} from {} to {} at span {:?}",
+ scope, count, new_count, span
+ );
+
+ yield_data.expr_and_pat_count = new_count;
+ }
+ }
+
+ hir::ExprKind::If(ref cond, ref then, Some(ref otherwise)) => {
+ let expr_cx = visitor.cx;
+ visitor.enter_scope(Scope { id: then.hir_id.local_id, data: ScopeData::IfThen });
+ visitor.cx.var_parent = visitor.cx.parent;
+ visitor.visit_expr(cond);
+ visitor.visit_expr(then);
+ visitor.cx = expr_cx;
+ visitor.visit_expr(otherwise);
+ }
+
+ hir::ExprKind::If(ref cond, ref then, None) => {
+ let expr_cx = visitor.cx;
+ visitor.enter_scope(Scope { id: then.hir_id.local_id, data: ScopeData::IfThen });
+ visitor.cx.var_parent = visitor.cx.parent;
+ visitor.visit_expr(cond);
+ visitor.visit_expr(then);
+ visitor.cx = expr_cx;
+ }
+
+ _ => intravisit::walk_expr(visitor, expr),
+ }
+
+ visitor.expr_and_pat_count += 1;
+
+ debug!("resolve_expr post-increment {}, expr = {:?}", visitor.expr_and_pat_count, expr);
+
+ if let hir::ExprKind::Yield(_, source) = &expr.kind {
+ // Mark this expr's scope and all parent scopes as containing `yield`.
+ let mut scope = Scope { id: expr.hir_id.local_id, data: ScopeData::Node };
+ loop {
+ let span = match expr.kind {
+ hir::ExprKind::Yield(expr, hir::YieldSource::Await { .. }) => {
+ expr.span.shrink_to_hi().to(expr.span)
+ }
+ _ => expr.span,
+ };
+ let data =
+ YieldData { span, expr_and_pat_count: visitor.expr_and_pat_count, source: *source };
+ match visitor.scope_tree.yield_in_scope.get_mut(&scope) {
+ Some(yields) => yields.push(data),
+ None => {
+ visitor.scope_tree.yield_in_scope.insert(scope, vec![data]);
+ }
+ }
+
+ if visitor.pessimistic_yield {
+ debug!("resolve_expr in pessimistic_yield - marking scope {:?} for fixup", scope);
+ visitor.fixup_scopes.push(scope);
+ }
+
+ // Keep traversing up while we can.
+ match visitor.scope_tree.parent_map.get(&scope) {
+ // Don't cross from closure bodies to their parent.
+ Some(&(superscope, _)) => match superscope.data {
+ ScopeData::CallSite => break,
+ _ => scope = superscope,
+ },
+ None => break,
+ }
+ }
+ }
+
+ visitor.cx = prev_cx;
+}
+
+fn resolve_local<'tcx>(
+ visitor: &mut RegionResolutionVisitor<'tcx>,
+ pat: Option<&'tcx hir::Pat<'tcx>>,
+ init: Option<&'tcx hir::Expr<'tcx>>,
+ els: Option<&'tcx hir::Block<'tcx>>,
+) {
+ debug!("resolve_local(pat={:?}, init={:?})", pat, init);
+
+ let blk_scope = visitor.cx.var_parent.map(|(p, _)| p);
+
+ // As an exception to the normal rules governing temporary
+ // lifetimes, initializers in a let have a temporary lifetime
+ // of the enclosing block. This means that e.g., a program
+ // like the following is legal:
+ //
+ // let ref x = HashMap::new();
+ //
+ // Because the hash map will be freed in the enclosing block.
+ //
+ // We express the rules more formally based on 3 grammars (defined
+ // fully in the helpers below that implement them):
+ //
+ // 1. `E&`, which matches expressions like `&<rvalue>` that
+ // own a pointer into the stack.
+ //
+ // 2. `P&`, which matches patterns like `ref x` or `(ref x, ref
+ // y)` that produce ref bindings into the value they are
+ // matched against or something (at least partially) owned by
+ // the value they are matched against. (By partially owned,
+ // I mean that creating a binding into a ref-counted or managed value
+ // would still count.)
+ //
+ // 3. `ET`, which matches both rvalues like `foo()` as well as places
+ // based on rvalues like `foo().x[2].y`.
+ //
+ // A subexpression `<rvalue>` that appears in a let initializer
+ // `let pat [: ty] = expr` has an extended temporary lifetime if
+ // any of the following conditions are met:
+ //
+ // A. `pat` matches `P&` and `expr` matches `ET`
+ // (covers cases where `pat` creates ref bindings into an rvalue
+ // produced by `expr`)
+ // B. `ty` is a borrowed pointer and `expr` matches `ET`
+ // (covers cases where coercion creates a borrow)
+ // C. `expr` matches `E&`
+ // (covers cases `expr` borrows an rvalue that is then assigned
+ // to memory (at least partially) owned by the binding)
+ //
+ // Here are some examples hopefully giving an intuition where each
+ // rule comes into play and why:
+ //
+ // Rule A. `let (ref x, ref y) = (foo().x, 44)`. The rvalue `(22, 44)`
+ // would have an extended lifetime, but not `foo()`.
+ //
+ // Rule B. `let x = &foo().x`. The rvalue `foo()` would have extended
+ // lifetime.
+ //
+ // In some cases, multiple rules may apply (though not to the same
+ // rvalue). For example:
+ //
+ // let ref x = [&a(), &b()];
+ //
+ // Here, the expression `[...]` has an extended lifetime due to rule
+ // A, but the inner rvalues `a()` and `b()` have an extended lifetime
+ // due to rule C.
+
+ if let Some(expr) = init {
+ record_rvalue_scope_if_borrow_expr(visitor, &expr, blk_scope);
+
+ if let Some(pat) = pat {
+ if is_binding_pat(pat) {
+ visitor.scope_tree.record_rvalue_candidate(
+ expr.hir_id,
+ RvalueCandidateType::Pattern {
+ target: expr.hir_id.local_id,
+ lifetime: blk_scope,
+ },
+ );
+ }
+ }
+ }
+
+ // Make sure we visit the initializer first, so expr_and_pat_count remains correct.
+ // The correct order, as shared between generator_interior, drop_ranges and intravisitor,
+ // is to walk initializer, followed by pattern bindings, finally followed by the `else` block.
+ if let Some(expr) = init {
+ visitor.visit_expr(expr);
+ }
+ if let Some(pat) = pat {
+ visitor.visit_pat(pat);
+ }
+ if let Some(els) = els {
+ visitor.visit_block(els);
+ }
+
+ /// Returns `true` if `pat` match the `P&` non-terminal.
+ ///
+ /// ```text
+ /// P& = ref X
+ /// | StructName { ..., P&, ... }
+ /// | VariantName(..., P&, ...)
+ /// | [ ..., P&, ... ]
+ /// | ( ..., P&, ... )
+ /// | ... "|" P& "|" ...
+ /// | box P&
+ /// ```
+ fn is_binding_pat(pat: &hir::Pat<'_>) -> bool {
+ // Note that the code below looks for *explicit* refs only, that is, it won't
+ // know about *implicit* refs as introduced in #42640.
+ //
+ // This is not a problem. For example, consider
+ //
+ // let (ref x, ref y) = (Foo { .. }, Bar { .. });
+ //
+ // Due to the explicit refs on the left hand side, the below code would signal
+ // that the temporary value on the right hand side should live until the end of
+ // the enclosing block (as opposed to being dropped after the let is complete).
+ //
+ // To create an implicit ref, however, you must have a borrowed value on the RHS
+ // already, as in this example (which won't compile before #42640):
+ //
+ // let Foo { x, .. } = &Foo { x: ..., ... };
+ //
+ // in place of
+ //
+ // let Foo { ref x, .. } = Foo { ... };
+ //
+ // In the former case (the implicit ref version), the temporary is created by the
+ // & expression, and its lifetime would be extended to the end of the block (due
+ // to a different rule, not the below code).
+ match pat.kind {
+ PatKind::Binding(hir::BindingAnnotation::Ref, ..)
+ | PatKind::Binding(hir::BindingAnnotation::RefMut, ..) => true,
+
+ PatKind::Struct(_, ref field_pats, _) => {
+ field_pats.iter().any(|fp| is_binding_pat(&fp.pat))
+ }
+
+ PatKind::Slice(ref pats1, ref pats2, ref pats3) => {
+ pats1.iter().any(|p| is_binding_pat(&p))
+ || pats2.iter().any(|p| is_binding_pat(&p))
+ || pats3.iter().any(|p| is_binding_pat(&p))
+ }
+
+ PatKind::Or(ref subpats)
+ | PatKind::TupleStruct(_, ref subpats, _)
+ | PatKind::Tuple(ref subpats, _) => subpats.iter().any(|p| is_binding_pat(&p)),
+
+ PatKind::Box(ref subpat) => is_binding_pat(&subpat),
+
+ PatKind::Ref(_, _)
+ | PatKind::Binding(
+ hir::BindingAnnotation::Unannotated | hir::BindingAnnotation::Mutable,
+ ..,
+ )
+ | PatKind::Wild
+ | PatKind::Path(_)
+ | PatKind::Lit(_)
+ | PatKind::Range(_, _, _) => false,
+ }
+ }
+
+ /// If `expr` matches the `E&` grammar, then records an extended rvalue scope as appropriate:
+ ///
+ /// ```text
+ /// E& = & ET
+ /// | StructName { ..., f: E&, ... }
+ /// | [ ..., E&, ... ]
+ /// | ( ..., E&, ... )
+ /// | {...; E&}
+ /// | box E&
+ /// | E& as ...
+ /// | ( E& )
+ /// ```
+ fn record_rvalue_scope_if_borrow_expr<'tcx>(
+ visitor: &mut RegionResolutionVisitor<'tcx>,
+ expr: &hir::Expr<'_>,
+ blk_id: Option<Scope>,
+ ) {
+ match expr.kind {
+ hir::ExprKind::AddrOf(_, _, subexpr) => {
+ record_rvalue_scope_if_borrow_expr(visitor, subexpr, blk_id);
+ visitor.scope_tree.record_rvalue_candidate(
+ subexpr.hir_id,
+ RvalueCandidateType::Borrow {
+ target: subexpr.hir_id.local_id,
+ lifetime: blk_id,
+ },
+ );
+ }
+ hir::ExprKind::Struct(_, fields, _) => {
+ for field in fields {
+ record_rvalue_scope_if_borrow_expr(visitor, &field.expr, blk_id);
+ }
+ }
+ hir::ExprKind::Array(subexprs) | hir::ExprKind::Tup(subexprs) => {
+ for subexpr in subexprs {
+ record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id);
+ }
+ }
+ hir::ExprKind::Cast(ref subexpr, _) => {
+ record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id)
+ }
+ hir::ExprKind::Block(ref block, _) => {
+ if let Some(ref subexpr) = block.expr {
+ record_rvalue_scope_if_borrow_expr(visitor, &subexpr, blk_id);
+ }
+ }
+ hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) => {
+ // FIXME(@dingxiangfei2009): choose call arguments here
+ // for candidacy for extended parameter rule application
+ }
+ hir::ExprKind::Index(..) => {
+ // FIXME(@dingxiangfei2009): select the indices
+ // as candidate for rvalue scope rules
+ }
+ _ => {}
+ }
+ }
+}
+
+impl<'tcx> RegionResolutionVisitor<'tcx> {
+ /// Records the current parent (if any) as the parent of `child_scope`.
+ /// Returns the depth of `child_scope`.
+ fn record_child_scope(&mut self, child_scope: Scope) -> ScopeDepth {
+ let parent = self.cx.parent;
+ self.scope_tree.record_scope_parent(child_scope, parent);
+ // If `child_scope` has no parent, it must be the root node, and so has
+ // a depth of 1. Otherwise, its depth is one more than its parent's.
+ parent.map_or(1, |(_p, d)| d + 1)
+ }
+
+ /// Records the current parent (if any) as the parent of `child_scope`,
+ /// and sets `child_scope` as the new current parent.
+ fn enter_scope(&mut self, child_scope: Scope) {
+ let child_depth = self.record_child_scope(child_scope);
+ self.cx.parent = Some((child_scope, child_depth));
+ }
+
+ fn enter_node_scope_with_dtor(&mut self, id: hir::ItemLocalId) {
+ // If node was previously marked as a terminating scope during the
+ // recursive visit of its parent node in the AST, then we need to
+ // account for the destruction scope representing the scope of
+ // the destructors that run immediately after it completes.
+ if self.terminating_scopes.contains(&id) {
+ self.enter_scope(Scope { id, data: ScopeData::Destruction });
+ }
+ self.enter_scope(Scope { id, data: ScopeData::Node });
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> {
+ fn visit_block(&mut self, b: &'tcx Block<'tcx>) {
+ resolve_block(self, b);
+ }
+
+ fn visit_body(&mut self, body: &'tcx hir::Body<'tcx>) {
+ let body_id = body.id();
+ let owner_id = self.tcx.hir().body_owner_def_id(body_id);
+
+ debug!(
+ "visit_body(id={:?}, span={:?}, body.id={:?}, cx.parent={:?})",
+ owner_id,
+ self.tcx.sess.source_map().span_to_diagnostic_string(body.value.span),
+ body_id,
+ self.cx.parent
+ );
+
+ // Save all state that is specific to the outer function
+ // body. These will be restored once down below, once we've
+ // visited the body.
+ let outer_ec = mem::replace(&mut self.expr_and_pat_count, 0);
+ let outer_cx = self.cx;
+ let outer_ts = mem::take(&mut self.terminating_scopes);
+ // The 'pessimistic yield' flag is set to true when we are
+ // processing a `+=` statement and have to make pessimistic
+ // control flow assumptions. This doesn't apply to nested
+ // bodies within the `+=` statements. See #69307.
+ let outer_pessimistic_yield = mem::replace(&mut self.pessimistic_yield, false);
+ self.terminating_scopes.insert(body.value.hir_id.local_id);
+
+ self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::CallSite });
+ self.enter_scope(Scope { id: body.value.hir_id.local_id, data: ScopeData::Arguments });
+
+ // The arguments and `self` are parented to the fn.
+ self.cx.var_parent = self.cx.parent.take();
+ for param in body.params {
+ self.visit_pat(&param.pat);
+ }
+
+ // The body of the every fn is a root scope.
+ self.cx.parent = self.cx.var_parent;
+ if self.tcx.hir().body_owner_kind(owner_id).is_fn_or_closure() {
+ self.visit_expr(&body.value)
+ } else {
+ // Only functions have an outer terminating (drop) scope, while
+ // temporaries in constant initializers may be 'static, but only
+ // according to rvalue lifetime semantics, using the same
+ // syntactical rules used for let initializers.
+ //
+ // e.g., in `let x = &f();`, the temporary holding the result from
+ // the `f()` call lives for the entirety of the surrounding block.
+ //
+ // Similarly, `const X: ... = &f();` would have the result of `f()`
+ // live for `'static`, implying (if Drop restrictions on constants
+ // ever get lifted) that the value *could* have a destructor, but
+ // it'd get leaked instead of the destructor running during the
+ // evaluation of `X` (if at all allowed by CTFE).
+ //
+ // However, `const Y: ... = g(&f());`, like `let y = g(&f());`,
+ // would *not* let the `f()` temporary escape into an outer scope
+ // (i.e., `'static`), which means that after `g` returns, it drops,
+ // and all the associated destruction scope rules apply.
+ self.cx.var_parent = None;
+ resolve_local(self, None, Some(&body.value), None);
+ }
+
+ if body.generator_kind.is_some() {
+ self.scope_tree.body_expr_count.insert(body_id, self.expr_and_pat_count);
+ }
+
+ // Restore context we had at the start.
+ self.expr_and_pat_count = outer_ec;
+ self.cx = outer_cx;
+ self.terminating_scopes = outer_ts;
+ self.pessimistic_yield = outer_pessimistic_yield;
+ }
+
+ fn visit_arm(&mut self, a: &'tcx Arm<'tcx>) {
+ resolve_arm(self, a);
+ }
+ fn visit_pat(&mut self, p: &'tcx Pat<'tcx>) {
+ resolve_pat(self, p);
+ }
+ fn visit_stmt(&mut self, s: &'tcx Stmt<'tcx>) {
+ resolve_stmt(self, s);
+ }
+ fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
+ resolve_expr(self, ex);
+ }
+ fn visit_local(&mut self, l: &'tcx Local<'tcx>) {
+ resolve_local(self, Some(&l.pat), l.init, l.els)
+ }
+}
+
+/// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body;
+/// in the case of closures, this will be redirected to the enclosing function.
+///
+/// Performance: This is a query rather than a simple function to enable
+/// re-use in incremental scenarios. We may sometimes need to rerun the
+/// type checker even when the HIR hasn't changed, and in those cases
+/// we can avoid reconstructing the region scope tree.
+pub fn region_scope_tree(tcx: TyCtxt<'_>, def_id: DefId) -> &ScopeTree {
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id);
+ if typeck_root_def_id != def_id {
+ return tcx.region_scope_tree(typeck_root_def_id);
+ }
+
+ let scope_tree = if let Some(body_id) = tcx.hir().maybe_body_owned_by(def_id.expect_local()) {
+ let mut visitor = RegionResolutionVisitor {
+ tcx,
+ scope_tree: ScopeTree::default(),
+ expr_and_pat_count: 0,
+ cx: Context { parent: None, var_parent: None },
+ terminating_scopes: Default::default(),
+ pessimistic_yield: false,
+ fixup_scopes: vec![],
+ };
+
+ let body = tcx.hir().body(body_id);
+ visitor.scope_tree.root_body = Some(body.value.hir_id);
+ visitor.visit_body(body);
+ visitor.scope_tree
+ } else {
+ ScopeTree::default()
+ };
+
+ tcx.arena.alloc(scope_tree)
+}
diff --git a/compiler/rustc_typeck/src/check/regionck.rs b/compiler/rustc_typeck/src/check/regionck.rs
new file mode 100644
index 000000000..d49a6138f
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/regionck.rs
@@ -0,0 +1,47 @@
+use crate::outlives::outlives_bounds::InferCtxtExt as _;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir as hir;
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::ty::Ty;
+
+pub(crate) trait OutlivesEnvironmentExt<'tcx> {
+ fn add_implied_bounds(
+ &mut self,
+ infcx: &InferCtxt<'_, 'tcx>,
+ fn_sig_tys: FxHashSet<Ty<'tcx>>,
+ body_id: hir::HirId,
+ );
+}
+
+impl<'tcx> OutlivesEnvironmentExt<'tcx> for OutlivesEnvironment<'tcx> {
+ /// This method adds "implied bounds" into the outlives environment.
+ /// Implied bounds are outlives relationships that we can deduce
+ /// on the basis that certain types must be well-formed -- these are
+ /// either the types that appear in the function signature or else
+ /// the input types to an impl. For example, if you have a function
+ /// like
+ ///
+ /// ```
+ /// fn foo<'a, 'b, T>(x: &'a &'b [T]) { }
+ /// ```
+ ///
+ /// we can assume in the caller's body that `'b: 'a` and that `T:
+ /// 'b` (and hence, transitively, that `T: 'a`). This method would
+ /// add those assumptions into the outlives-environment.
+ ///
+ /// Tests: `src/test/ui/regions/regions-free-region-ordering-*.rs`
+ #[instrument(level = "debug", skip(self, infcx))]
+ fn add_implied_bounds<'a>(
+ &mut self,
+ infcx: &InferCtxt<'a, 'tcx>,
+ fn_sig_tys: FxHashSet<Ty<'tcx>>,
+ body_id: hir::HirId,
+ ) {
+ for ty in fn_sig_tys {
+ let ty = infcx.resolve_vars_if_possible(ty);
+ let implied_bounds = infcx.implied_outlives_bounds(self.param_env, body_id, ty);
+ self.add_outlives_bounds(Some(infcx), implied_bounds)
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/check/rvalue_scopes.rs b/compiler/rustc_typeck/src/check/rvalue_scopes.rs
new file mode 100644
index 000000000..22c9e7961
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/rvalue_scopes.rs
@@ -0,0 +1,83 @@
+use super::FnCtxt;
+use hir::def_id::DefId;
+use hir::Node;
+use rustc_hir as hir;
+use rustc_middle::middle::region::{RvalueCandidateType, Scope, ScopeTree};
+use rustc_middle::ty::RvalueScopes;
+
+/// Applied to an expression `expr` if `expr` -- or something owned or partially owned by
+/// `expr` -- is going to be indirectly referenced by a variable in a let statement. In that
+/// case, the "temporary lifetime" or `expr` is extended to be the block enclosing the `let`
+/// statement.
+///
+/// More formally, if `expr` matches the grammar `ET`, record the rvalue scope of the matching
+/// `<rvalue>` as `blk_id`:
+///
+/// ```text
+/// ET = *ET
+/// | ET[...]
+/// | ET.f
+/// | (ET)
+/// | <rvalue>
+/// ```
+///
+/// Note: ET is intended to match "rvalues or places based on rvalues".
+fn record_rvalue_scope_rec(
+ rvalue_scopes: &mut RvalueScopes,
+ mut expr: &hir::Expr<'_>,
+ lifetime: Option<Scope>,
+) {
+ loop {
+ // Note: give all the expressions matching `ET` with the
+ // extended temporary lifetime, not just the innermost rvalue,
+ // because in codegen if we must compile e.g., `*rvalue()`
+ // into a temporary, we request the temporary scope of the
+ // outer expression.
+
+ rvalue_scopes.record_rvalue_scope(expr.hir_id.local_id, lifetime);
+
+ match expr.kind {
+ hir::ExprKind::AddrOf(_, _, subexpr)
+ | hir::ExprKind::Unary(hir::UnOp::Deref, subexpr)
+ | hir::ExprKind::Field(subexpr, _)
+ | hir::ExprKind::Index(subexpr, _) => {
+ expr = subexpr;
+ }
+ _ => {
+ return;
+ }
+ }
+ }
+}
+fn record_rvalue_scope(
+ rvalue_scopes: &mut RvalueScopes,
+ expr: &hir::Expr<'_>,
+ candidate: &RvalueCandidateType,
+) {
+ debug!("resolve_rvalue_scope(expr={expr:?}, candidate={candidate:?})");
+ match candidate {
+ RvalueCandidateType::Borrow { lifetime, .. }
+ | RvalueCandidateType::Pattern { lifetime, .. } => {
+ record_rvalue_scope_rec(rvalue_scopes, expr, *lifetime)
+ } // FIXME(@dingxiangfei2009): handle the candidates in the function call arguments
+ }
+}
+
+pub fn resolve_rvalue_scopes<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ scope_tree: &'a ScopeTree,
+ def_id: DefId,
+) -> RvalueScopes {
+ let tcx = &fcx.tcx;
+ let hir_map = tcx.hir();
+ let mut rvalue_scopes = RvalueScopes::new();
+ debug!("start resolving rvalue scopes, def_id={def_id:?}");
+ debug!("rvalue_scope: rvalue_candidates={:?}", scope_tree.rvalue_candidates);
+ for (&hir_id, candidate) in &scope_tree.rvalue_candidates {
+ let Some(Node::Expr(expr)) = hir_map.find(hir_id) else {
+ bug!("hir node does not exist")
+ };
+ record_rvalue_scope(&mut rvalue_scopes, expr, candidate);
+ }
+ rvalue_scopes
+}
diff --git a/compiler/rustc_typeck/src/check/upvar.rs b/compiler/rustc_typeck/src/check/upvar.rs
new file mode 100644
index 000000000..dd8f943b9
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/upvar.rs
@@ -0,0 +1,2272 @@
+//! ### Inferring borrow kinds for upvars
+//!
+//! Whenever there is a closure expression, we need to determine how each
+//! upvar is used. We do this by initially assigning each upvar an
+//! immutable "borrow kind" (see `ty::BorrowKind` for details) and then
+//! "escalating" the kind as needed. The borrow kind proceeds according to
+//! the following lattice:
+//! ```ignore (not-rust)
+//! ty::ImmBorrow -> ty::UniqueImmBorrow -> ty::MutBorrow
+//! ```
+//! So, for example, if we see an assignment `x = 5` to an upvar `x`, we
+//! will promote its borrow kind to mutable borrow. If we see an `&mut x`
+//! we'll do the same. Naturally, this applies not just to the upvar, but
+//! to everything owned by `x`, so the result is the same for something
+//! like `x.f = 5` and so on (presuming `x` is not a borrowed pointer to a
+//! struct). These adjustments are performed in
+//! `adjust_upvar_borrow_kind()` (you can trace backwards through the code
+//! from there).
+//!
+//! The fact that we are inferring borrow kinds as we go results in a
+//! semi-hacky interaction with mem-categorization. In particular,
+//! mem-categorization will query the current borrow kind as it
+//! categorizes, and we'll return the *current* value, but this may get
+//! adjusted later. Therefore, in this module, we generally ignore the
+//! borrow kind (and derived mutabilities) that are returned from
+//! mem-categorization, since they may be inaccurate. (Another option
+//! would be to use a unification scheme, where instead of returning a
+//! concrete borrow kind like `ty::ImmBorrow`, we return a
+//! `ty::InferBorrow(upvar_id)` or something like that, but this would
+//! then mean that all later passes would have to check for these figments
+//! and report an error, and it just seems like more mess in the end.)
+
+use super::FnCtxt;
+
+use crate::expr_use_visitor as euv;
+use rustc_errors::{Applicability, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_infer::infer::UpvarRegion;
+use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection, ProjectionKind};
+use rustc_middle::mir::FakeReadCause;
+use rustc_middle::ty::{
+ self, ClosureSizeProfileData, Ty, TyCtxt, TypeckResults, UpvarCapture, UpvarSubsts,
+};
+use rustc_session::lint;
+use rustc_span::sym;
+use rustc_span::{BytePos, Pos, Span, Symbol};
+use rustc_trait_selection::infer::InferCtxtExt;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_index::vec::Idx;
+use rustc_target::abi::VariantIdx;
+
+use std::iter;
+
+/// Describe the relationship between the paths of two places
+/// eg:
+/// - `foo` is ancestor of `foo.bar.baz`
+/// - `foo.bar.baz` is an descendant of `foo.bar`
+/// - `foo.bar` and `foo.baz` are divergent
+enum PlaceAncestryRelation {
+ Ancestor,
+ Descendant,
+ SamePlace,
+ Divergent,
+}
+
+/// Intermediate format to store a captured `Place` and associated `ty::CaptureInfo`
+/// during capture analysis. Information in this map feeds into the minimum capture
+/// analysis pass.
+type InferredCaptureInformation<'tcx> = Vec<(Place<'tcx>, ty::CaptureInfo)>;
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn closure_analyze(&self, body: &'tcx hir::Body<'tcx>) {
+ InferBorrowKindVisitor { fcx: self }.visit_body(body);
+
+ // it's our job to process these.
+ assert!(self.deferred_call_resolutions.borrow().is_empty());
+ }
+}
+
+/// Intermediate format to store the hir_id pointing to the use that resulted in the
+/// corresponding place being captured and a String which contains the captured value's
+/// name (i.e: a.b.c)
+#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+enum UpvarMigrationInfo {
+ /// We previously captured all of `x`, but now we capture some sub-path.
+ CapturingPrecise { source_expr: Option<hir::HirId>, var_name: String },
+ CapturingNothing {
+ // where the variable appears in the closure (but is not captured)
+ use_span: Span,
+ },
+}
+
+/// Reasons that we might issue a migration warning.
+#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
+struct MigrationWarningReason {
+ /// When we used to capture `x` in its entirety, we implemented the auto-trait(s)
+ /// in this vec, but now we don't.
+ auto_traits: Vec<&'static str>,
+
+ /// When we used to capture `x` in its entirety, we would execute some destructors
+ /// at a different time.
+ drop_order: bool,
+}
+
+impl MigrationWarningReason {
+ fn migration_message(&self) -> String {
+ let base = "changes to closure capture in Rust 2021 will affect";
+ if !self.auto_traits.is_empty() && self.drop_order {
+ format!("{} drop order and which traits the closure implements", base)
+ } else if self.drop_order {
+ format!("{} drop order", base)
+ } else {
+ format!("{} which traits the closure implements", base)
+ }
+ }
+}
+
+/// Intermediate format to store information needed to generate a note in the migration lint.
+struct MigrationLintNote {
+ captures_info: UpvarMigrationInfo,
+
+ /// reasons why migration is needed for this capture
+ reason: MigrationWarningReason,
+}
+
+/// Intermediate format to store the hir id of the root variable and a HashSet containing
+/// information on why the root variable should be fully captured
+struct NeededMigration {
+ var_hir_id: hir::HirId,
+ diagnostics_info: Vec<MigrationLintNote>,
+}
+
+struct InferBorrowKindVisitor<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+}
+
+impl<'a, 'tcx> Visitor<'tcx> for InferBorrowKindVisitor<'a, 'tcx> {
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ match expr.kind {
+ hir::ExprKind::Closure(&hir::Closure { capture_clause, body: body_id, .. }) => {
+ let body = self.fcx.tcx.hir().body(body_id);
+ self.visit_body(body);
+ self.fcx.analyze_closure(expr.hir_id, expr.span, body_id, body, capture_clause);
+ }
+ hir::ExprKind::ConstBlock(anon_const) => {
+ let body = self.fcx.tcx.hir().body(anon_const.body);
+ self.visit_body(body);
+ }
+ _ => {}
+ }
+
+ intravisit::walk_expr(self, expr);
+ }
+}
+
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ /// Analysis starting point.
+ #[instrument(skip(self, body), level = "debug")]
+ fn analyze_closure(
+ &self,
+ closure_hir_id: hir::HirId,
+ span: Span,
+ body_id: hir::BodyId,
+ body: &'tcx hir::Body<'tcx>,
+ capture_clause: hir::CaptureBy,
+ ) {
+ // Extract the type of the closure.
+ let ty = self.node_ty(closure_hir_id);
+ let (closure_def_id, substs) = match *ty.kind() {
+ ty::Closure(def_id, substs) => (def_id, UpvarSubsts::Closure(substs)),
+ ty::Generator(def_id, substs, _) => (def_id, UpvarSubsts::Generator(substs)),
+ ty::Error(_) => {
+ // #51714: skip analysis when we have already encountered type errors
+ return;
+ }
+ _ => {
+ span_bug!(
+ span,
+ "type of closure expr {:?} is not a closure {:?}",
+ closure_hir_id,
+ ty
+ );
+ }
+ };
+ let closure_def_id = closure_def_id.expect_local();
+
+ let infer_kind = if let UpvarSubsts::Closure(closure_substs) = substs {
+ self.closure_kind(closure_substs).is_none().then_some(closure_substs)
+ } else {
+ None
+ };
+
+ assert_eq!(self.tcx.hir().body_owner_def_id(body.id()), closure_def_id);
+ let mut delegate = InferBorrowKind {
+ fcx: self,
+ closure_def_id,
+ capture_information: Default::default(),
+ fake_reads: Default::default(),
+ };
+ euv::ExprUseVisitor::new(
+ &mut delegate,
+ &self.infcx,
+ closure_def_id,
+ self.param_env,
+ &self.typeck_results.borrow(),
+ )
+ .consume_body(body);
+
+ debug!(
+ "For closure={:?}, capture_information={:#?}",
+ closure_def_id, delegate.capture_information
+ );
+
+ self.log_capture_analysis_first_pass(closure_def_id, &delegate.capture_information, span);
+
+ let (capture_information, closure_kind, origin) = self
+ .process_collected_capture_information(capture_clause, delegate.capture_information);
+
+ self.compute_min_captures(closure_def_id, capture_information, span);
+
+ let closure_hir_id = self.tcx.hir().local_def_id_to_hir_id(closure_def_id);
+
+ if should_do_rust_2021_incompatible_closure_captures_analysis(self.tcx, closure_hir_id) {
+ self.perform_2229_migration_anaysis(closure_def_id, body_id, capture_clause, span);
+ }
+
+ let after_feature_tys = self.final_upvar_tys(closure_def_id);
+
+ // We now fake capture information for all variables that are mentioned within the closure
+ // We do this after handling migrations so that min_captures computes before
+ if !enable_precise_capture(self.tcx, span) {
+ let mut capture_information: InferredCaptureInformation<'tcx> = Default::default();
+
+ if let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) {
+ for var_hir_id in upvars.keys() {
+ let place = self.place_for_root_variable(closure_def_id, *var_hir_id);
+
+ debug!("seed place {:?}", place);
+
+ let capture_kind = self.init_capture_kind_for_place(&place, capture_clause);
+ let fake_info = ty::CaptureInfo {
+ capture_kind_expr_id: None,
+ path_expr_id: None,
+ capture_kind,
+ };
+
+ capture_information.push((place, fake_info));
+ }
+ }
+
+ // This will update the min captures based on this new fake information.
+ self.compute_min_captures(closure_def_id, capture_information, span);
+ }
+
+ let before_feature_tys = self.final_upvar_tys(closure_def_id);
+
+ if let Some(closure_substs) = infer_kind {
+ // Unify the (as yet unbound) type variable in the closure
+ // substs with the kind we inferred.
+ let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ self.demand_eqtype(span, closure_kind.to_ty(self.tcx), closure_kind_ty);
+
+ // If we have an origin, store it.
+ if let Some(origin) = origin {
+ let origin = if enable_precise_capture(self.tcx, span) {
+ (origin.0, origin.1)
+ } else {
+ (origin.0, Place { projections: vec![], ..origin.1 })
+ };
+
+ self.typeck_results
+ .borrow_mut()
+ .closure_kind_origins_mut()
+ .insert(closure_hir_id, origin);
+ }
+ }
+
+ self.log_closure_min_capture_info(closure_def_id, span);
+
+ // Now that we've analyzed the closure, we know how each
+ // variable is borrowed, and we know what traits the closure
+ // implements (Fn vs FnMut etc). We now have some updates to do
+ // with that information.
+ //
+ // Note that no closure type C may have an upvar of type C
+ // (though it may reference itself via a trait object). This
+ // results from the desugaring of closures to a struct like
+ // `Foo<..., UV0...UVn>`. If one of those upvars referenced
+ // C, then the type would have infinite size (and the
+ // inference algorithm will reject it).
+
+ // Equate the type variables for the upvars with the actual types.
+ let final_upvar_tys = self.final_upvar_tys(closure_def_id);
+ debug!(
+ "analyze_closure: id={:?} substs={:?} final_upvar_tys={:?}",
+ closure_hir_id, substs, final_upvar_tys
+ );
+
+ // Build a tuple (U0..Un) of the final upvar types U0..Un
+ // and unify the upvar tuple type in the closure with it:
+ let final_tupled_upvars_type = self.tcx.mk_tup(final_upvar_tys.iter());
+ self.demand_suptype(span, substs.tupled_upvars_ty(), final_tupled_upvars_type);
+
+ let fake_reads = delegate
+ .fake_reads
+ .into_iter()
+ .map(|(place, cause, hir_id)| (place, cause, hir_id))
+ .collect();
+ self.typeck_results.borrow_mut().closure_fake_reads.insert(closure_def_id, fake_reads);
+
+ if self.tcx.sess.opts.unstable_opts.profile_closures {
+ self.typeck_results.borrow_mut().closure_size_eval.insert(
+ closure_def_id,
+ ClosureSizeProfileData {
+ before_feature_tys: self.tcx.mk_tup(before_feature_tys.into_iter()),
+ after_feature_tys: self.tcx.mk_tup(after_feature_tys.into_iter()),
+ },
+ );
+ }
+
+ // If we are also inferred the closure kind here,
+ // process any deferred resolutions.
+ let deferred_call_resolutions = self.remove_deferred_call_resolutions(closure_def_id);
+ for deferred_call_resolution in deferred_call_resolutions {
+ deferred_call_resolution.resolve(self);
+ }
+ }
+
+ // Returns a list of `Ty`s for each upvar.
+ fn final_upvar_tys(&self, closure_id: LocalDefId) -> Vec<Ty<'tcx>> {
+ self.typeck_results
+ .borrow()
+ .closure_min_captures_flattened(closure_id)
+ .map(|captured_place| {
+ let upvar_ty = captured_place.place.ty();
+ let capture = captured_place.info.capture_kind;
+
+ debug!(
+ "final_upvar_tys: place={:?} upvar_ty={:?} capture={:?}, mutability={:?}",
+ captured_place.place, upvar_ty, capture, captured_place.mutability,
+ );
+
+ apply_capture_kind_on_capture_ty(self.tcx, upvar_ty, capture, captured_place.region)
+ })
+ .collect()
+ }
+
+ /// Adjusts the closure capture information to ensure that the operations aren't unsafe,
+ /// and that the path can be captured with required capture kind (depending on use in closure,
+ /// move closure etc.)
+ ///
+ /// Returns the set of of adjusted information along with the inferred closure kind and span
+ /// associated with the closure kind inference.
+ ///
+ /// Note that we *always* infer a minimal kind, even if
+ /// we don't always *use* that in the final result (i.e., sometimes
+ /// we've taken the closure kind from the expectations instead, and
+ /// for generators we don't even implement the closure traits
+ /// really).
+ ///
+ /// If we inferred that the closure needs to be FnMut/FnOnce, last element of the returned tuple
+ /// contains a `Some()` with the `Place` that caused us to do so.
+ fn process_collected_capture_information(
+ &self,
+ capture_clause: hir::CaptureBy,
+ capture_information: InferredCaptureInformation<'tcx>,
+ ) -> (InferredCaptureInformation<'tcx>, ty::ClosureKind, Option<(Span, Place<'tcx>)>) {
+ let mut closure_kind = ty::ClosureKind::LATTICE_BOTTOM;
+ let mut origin: Option<(Span, Place<'tcx>)> = None;
+
+ let processed = capture_information
+ .into_iter()
+ .map(|(place, mut capture_info)| {
+ // Apply rules for safety before inferring closure kind
+ let (place, capture_kind) =
+ restrict_capture_precision(place, capture_info.capture_kind);
+
+ let (place, capture_kind) = truncate_capture_for_optimization(place, capture_kind);
+
+ let usage_span = if let Some(usage_expr) = capture_info.path_expr_id {
+ self.tcx.hir().span(usage_expr)
+ } else {
+ unreachable!()
+ };
+
+ let updated = match capture_kind {
+ ty::UpvarCapture::ByValue => match closure_kind {
+ ty::ClosureKind::Fn | ty::ClosureKind::FnMut => {
+ (ty::ClosureKind::FnOnce, Some((usage_span, place.clone())))
+ }
+ // If closure is already FnOnce, don't update
+ ty::ClosureKind::FnOnce => (closure_kind, origin.take()),
+ },
+
+ ty::UpvarCapture::ByRef(
+ ty::BorrowKind::MutBorrow | ty::BorrowKind::UniqueImmBorrow,
+ ) => {
+ match closure_kind {
+ ty::ClosureKind::Fn => {
+ (ty::ClosureKind::FnMut, Some((usage_span, place.clone())))
+ }
+ // Don't update the origin
+ ty::ClosureKind::FnMut | ty::ClosureKind::FnOnce => {
+ (closure_kind, origin.take())
+ }
+ }
+ }
+
+ _ => (closure_kind, origin.take()),
+ };
+
+ closure_kind = updated.0;
+ origin = updated.1;
+
+ let (place, capture_kind) = match capture_clause {
+ hir::CaptureBy::Value => adjust_for_move_closure(place, capture_kind),
+ hir::CaptureBy::Ref => adjust_for_non_move_closure(place, capture_kind),
+ };
+
+ // This restriction needs to be applied after we have handled adjustments for `move`
+ // closures. We want to make sure any adjustment that might make us move the place into
+ // the closure gets handled.
+ let (place, capture_kind) =
+ restrict_precision_for_drop_types(self, place, capture_kind, usage_span);
+
+ capture_info.capture_kind = capture_kind;
+ (place, capture_info)
+ })
+ .collect();
+
+ (processed, closure_kind, origin)
+ }
+
+ /// Analyzes the information collected by `InferBorrowKind` to compute the min number of
+ /// Places (and corresponding capture kind) that we need to keep track of to support all
+ /// the required captured paths.
+ ///
+ ///
+ /// Note: If this function is called multiple times for the same closure, it will update
+ /// the existing min_capture map that is stored in TypeckResults.
+ ///
+ /// Eg:
+ /// ```
+ /// #[derive(Debug)]
+ /// struct Point { x: i32, y: i32 }
+ ///
+ /// let s = String::from("s"); // hir_id_s
+ /// let mut p = Point { x: 2, y: -2 }; // his_id_p
+ /// let c = || {
+ /// println!("{s:?}"); // L1
+ /// p.x += 10; // L2
+ /// println!("{}" , p.y); // L3
+ /// println!("{p:?}"); // L4
+ /// drop(s); // L5
+ /// };
+ /// ```
+ /// and let hir_id_L1..5 be the expressions pointing to use of a captured variable on
+ /// the lines L1..5 respectively.
+ ///
+ /// InferBorrowKind results in a structure like this:
+ ///
+ /// ```ignore (illustrative)
+ /// {
+ /// Place(base: hir_id_s, projections: [], ....) -> {
+ /// capture_kind_expr: hir_id_L5,
+ /// path_expr_id: hir_id_L5,
+ /// capture_kind: ByValue
+ /// },
+ /// Place(base: hir_id_p, projections: [Field(0, 0)], ...) -> {
+ /// capture_kind_expr: hir_id_L2,
+ /// path_expr_id: hir_id_L2,
+ /// capture_kind: ByValue
+ /// },
+ /// Place(base: hir_id_p, projections: [Field(1, 0)], ...) -> {
+ /// capture_kind_expr: hir_id_L3,
+ /// path_expr_id: hir_id_L3,
+ /// capture_kind: ByValue
+ /// },
+ /// Place(base: hir_id_p, projections: [], ...) -> {
+ /// capture_kind_expr: hir_id_L4,
+ /// path_expr_id: hir_id_L4,
+ /// capture_kind: ByValue
+ /// },
+ /// }
+ /// ```
+ ///
+ /// After the min capture analysis, we get:
+ /// ```ignore (illustrative)
+ /// {
+ /// hir_id_s -> [
+ /// Place(base: hir_id_s, projections: [], ....) -> {
+ /// capture_kind_expr: hir_id_L5,
+ /// path_expr_id: hir_id_L5,
+ /// capture_kind: ByValue
+ /// },
+ /// ],
+ /// hir_id_p -> [
+ /// Place(base: hir_id_p, projections: [], ...) -> {
+ /// capture_kind_expr: hir_id_L2,
+ /// path_expr_id: hir_id_L4,
+ /// capture_kind: ByValue
+ /// },
+ /// ],
+ /// }
+ /// ```
+ fn compute_min_captures(
+ &self,
+ closure_def_id: LocalDefId,
+ capture_information: InferredCaptureInformation<'tcx>,
+ closure_span: Span,
+ ) {
+ if capture_information.is_empty() {
+ return;
+ }
+
+ let mut typeck_results = self.typeck_results.borrow_mut();
+
+ let mut root_var_min_capture_list =
+ typeck_results.closure_min_captures.remove(&closure_def_id).unwrap_or_default();
+
+ for (mut place, capture_info) in capture_information.into_iter() {
+ let var_hir_id = match place.base {
+ PlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected upvar, found={:?}", base),
+ };
+
+ let Some(min_cap_list) = root_var_min_capture_list.get_mut(&var_hir_id) else {
+ let mutability = self.determine_capture_mutability(&typeck_results, &place);
+ let min_cap_list = vec![ty::CapturedPlace {
+ place,
+ info: capture_info,
+ mutability,
+ region: None,
+ }];
+ root_var_min_capture_list.insert(var_hir_id, min_cap_list);
+ continue;
+ };
+
+ // Go through each entry in the current list of min_captures
+ // - if ancestor is found, update it's capture kind to account for current place's
+ // capture information.
+ //
+ // - if descendant is found, remove it from the list, and update the current place's
+ // capture information to account for the descendant's capture kind.
+ //
+ // We can never be in a case where the list contains both an ancestor and a descendant
+ // Also there can only be ancestor but in case of descendants there might be
+ // multiple.
+
+ let mut descendant_found = false;
+ let mut updated_capture_info = capture_info;
+ min_cap_list.retain(|possible_descendant| {
+ match determine_place_ancestry_relation(&place, &possible_descendant.place) {
+ // current place is ancestor of possible_descendant
+ PlaceAncestryRelation::Ancestor => {
+ descendant_found = true;
+
+ let mut possible_descendant = possible_descendant.clone();
+ let backup_path_expr_id = updated_capture_info.path_expr_id;
+
+ // Truncate the descendant (already in min_captures) to be same as the ancestor to handle any
+ // possible change in capture mode.
+ truncate_place_to_len_and_update_capture_kind(
+ &mut possible_descendant.place,
+ &mut possible_descendant.info.capture_kind,
+ place.projections.len(),
+ );
+
+ updated_capture_info =
+ determine_capture_info(updated_capture_info, possible_descendant.info);
+
+ // we need to keep the ancestor's `path_expr_id`
+ updated_capture_info.path_expr_id = backup_path_expr_id;
+ false
+ }
+
+ _ => true,
+ }
+ });
+
+ let mut ancestor_found = false;
+ if !descendant_found {
+ for possible_ancestor in min_cap_list.iter_mut() {
+ match determine_place_ancestry_relation(&place, &possible_ancestor.place) {
+ PlaceAncestryRelation::SamePlace => {
+ ancestor_found = true;
+ possible_ancestor.info = determine_capture_info(
+ possible_ancestor.info,
+ updated_capture_info,
+ );
+
+ // Only one related place will be in the list.
+ break;
+ }
+ // current place is descendant of possible_ancestor
+ PlaceAncestryRelation::Descendant => {
+ ancestor_found = true;
+ let backup_path_expr_id = possible_ancestor.info.path_expr_id;
+
+ // Truncate the descendant (current place) to be same as the ancestor to handle any
+ // possible change in capture mode.
+ truncate_place_to_len_and_update_capture_kind(
+ &mut place,
+ &mut updated_capture_info.capture_kind,
+ possible_ancestor.place.projections.len(),
+ );
+
+ possible_ancestor.info = determine_capture_info(
+ possible_ancestor.info,
+ updated_capture_info,
+ );
+
+ // we need to keep the ancestor's `path_expr_id`
+ possible_ancestor.info.path_expr_id = backup_path_expr_id;
+
+ // Only one related place will be in the list.
+ break;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Only need to insert when we don't have an ancestor in the existing min capture list
+ if !ancestor_found {
+ let mutability = self.determine_capture_mutability(&typeck_results, &place);
+ let captured_place = ty::CapturedPlace {
+ place,
+ info: updated_capture_info,
+ mutability,
+ region: None,
+ };
+ min_cap_list.push(captured_place);
+ }
+ }
+
+ // For each capture that is determined to be captured by ref, add region info.
+ for (_, captures) in &mut root_var_min_capture_list {
+ for capture in captures {
+ match capture.info.capture_kind {
+ ty::UpvarCapture::ByRef(_) => {
+ let PlaceBase::Upvar(upvar_id) = capture.place.base else { bug!("expected upvar") };
+ let origin = UpvarRegion(upvar_id, closure_span);
+ let upvar_region = self.next_region_var(origin);
+ capture.region = Some(upvar_region);
+ }
+ _ => (),
+ }
+ }
+ }
+
+ debug!(
+ "For closure={:?}, min_captures before sorting={:?}",
+ closure_def_id, root_var_min_capture_list
+ );
+
+ // Now that we have the minimized list of captures, sort the captures by field id.
+ // This causes the closure to capture the upvars in the same order as the fields are
+ // declared which is also the drop order. Thus, in situations where we capture all the
+ // fields of some type, the observable drop order will remain the same as it previously
+ // was even though we're dropping each capture individually.
+ // See https://github.com/rust-lang/project-rfc-2229/issues/42 and
+ // `src/test/ui/closures/2229_closure_analysis/preserve_field_drop_order.rs`.
+ for (_, captures) in &mut root_var_min_capture_list {
+ captures.sort_by(|capture1, capture2| {
+ for (p1, p2) in capture1.place.projections.iter().zip(&capture2.place.projections) {
+ // We do not need to look at the `Projection.ty` fields here because at each
+ // step of the iteration, the projections will either be the same and therefore
+ // the types must be as well or the current projection will be different and
+ // we will return the result of comparing the field indexes.
+ match (p1.kind, p2.kind) {
+ // Paths are the same, continue to next loop.
+ (ProjectionKind::Deref, ProjectionKind::Deref) => {}
+ (ProjectionKind::Field(i1, _), ProjectionKind::Field(i2, _))
+ if i1 == i2 => {}
+
+ // Fields are different, compare them.
+ (ProjectionKind::Field(i1, _), ProjectionKind::Field(i2, _)) => {
+ return i1.cmp(&i2);
+ }
+
+ // We should have either a pair of `Deref`s or a pair of `Field`s.
+ // Anything else is a bug.
+ (
+ l @ (ProjectionKind::Deref | ProjectionKind::Field(..)),
+ r @ (ProjectionKind::Deref | ProjectionKind::Field(..)),
+ ) => bug!(
+ "ProjectionKinds Deref and Field were mismatched: ({:?}, {:?})",
+ l,
+ r
+ ),
+ (
+ l @ (ProjectionKind::Index
+ | ProjectionKind::Subslice
+ | ProjectionKind::Deref
+ | ProjectionKind::Field(..)),
+ r @ (ProjectionKind::Index
+ | ProjectionKind::Subslice
+ | ProjectionKind::Deref
+ | ProjectionKind::Field(..)),
+ ) => bug!(
+ "ProjectionKinds Index or Subslice were unexpected: ({:?}, {:?})",
+ l,
+ r
+ ),
+ }
+ }
+
+ unreachable!(
+ "we captured two identical projections: capture1 = {:?}, capture2 = {:?}",
+ capture1, capture2
+ );
+ });
+ }
+
+ debug!(
+ "For closure={:?}, min_captures after sorting={:#?}",
+ closure_def_id, root_var_min_capture_list
+ );
+ typeck_results.closure_min_captures.insert(closure_def_id, root_var_min_capture_list);
+ }
+
+ /// Perform the migration analysis for RFC 2229, and emit lint
+ /// `disjoint_capture_drop_reorder` if needed.
+ fn perform_2229_migration_anaysis(
+ &self,
+ closure_def_id: LocalDefId,
+ body_id: hir::BodyId,
+ capture_clause: hir::CaptureBy,
+ span: Span,
+ ) {
+ let (need_migrations, reasons) = self.compute_2229_migrations(
+ closure_def_id,
+ span,
+ capture_clause,
+ self.typeck_results.borrow().closure_min_captures.get(&closure_def_id),
+ );
+
+ if !need_migrations.is_empty() {
+ let (migration_string, migrated_variables_concat) =
+ migration_suggestion_for_2229(self.tcx, &need_migrations);
+
+ let closure_hir_id = self.tcx.hir().local_def_id_to_hir_id(closure_def_id);
+ let closure_head_span = self.tcx.def_span(closure_def_id);
+ self.tcx.struct_span_lint_hir(
+ lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES,
+ closure_hir_id,
+ closure_head_span,
+ |lint| {
+ let mut diagnostics_builder = lint.build(
+ &reasons.migration_message(),
+ );
+ for NeededMigration { var_hir_id, diagnostics_info } in &need_migrations {
+ // Labels all the usage of the captured variable and why they are responsible
+ // for migration being needed
+ for lint_note in diagnostics_info.iter() {
+ match &lint_note.captures_info {
+ UpvarMigrationInfo::CapturingPrecise { source_expr: Some(capture_expr_id), var_name: captured_name } => {
+ let cause_span = self.tcx.hir().span(*capture_expr_id);
+ diagnostics_builder.span_label(cause_span, format!("in Rust 2018, this closure captures all of `{}`, but in Rust 2021, it will only capture `{}`",
+ self.tcx.hir().name(*var_hir_id),
+ captured_name,
+ ));
+ }
+ UpvarMigrationInfo::CapturingNothing { use_span } => {
+ diagnostics_builder.span_label(*use_span, format!("in Rust 2018, this causes the closure to capture `{}`, but in Rust 2021, it has no effect",
+ self.tcx.hir().name(*var_hir_id),
+ ));
+ }
+
+ _ => { }
+ }
+
+ // Add a label pointing to where a captured variable affected by drop order
+ // is dropped
+ if lint_note.reason.drop_order {
+ let drop_location_span = drop_location_span(self.tcx, closure_hir_id);
+
+ match &lint_note.captures_info {
+ UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => {
+ diagnostics_builder.span_label(drop_location_span, format!("in Rust 2018, `{}` is dropped here, but in Rust 2021, only `{}` will be dropped here as part of the closure",
+ self.tcx.hir().name(*var_hir_id),
+ captured_name,
+ ));
+ }
+ UpvarMigrationInfo::CapturingNothing { use_span: _ } => {
+ diagnostics_builder.span_label(drop_location_span, format!("in Rust 2018, `{v}` is dropped here along with the closure, but in Rust 2021 `{v}` is not part of the closure",
+ v = self.tcx.hir().name(*var_hir_id),
+ ));
+ }
+ }
+ }
+
+ // Add a label explaining why a closure no longer implements a trait
+ for &missing_trait in &lint_note.reason.auto_traits {
+ // not capturing something anymore cannot cause a trait to fail to be implemented:
+ match &lint_note.captures_info {
+ UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => {
+ let var_name = self.tcx.hir().name(*var_hir_id);
+ diagnostics_builder.span_label(closure_head_span, format!("\
+ in Rust 2018, this closure implements {missing_trait} \
+ as `{var_name}` implements {missing_trait}, but in Rust 2021, \
+ this closure will no longer implement {missing_trait} \
+ because `{var_name}` is not fully captured \
+ and `{captured_name}` does not implement {missing_trait}"));
+ }
+
+ // Cannot happen: if we don't capture a variable, we impl strictly more traits
+ UpvarMigrationInfo::CapturingNothing { use_span } => span_bug!(*use_span, "missing trait from not capturing something"),
+ }
+ }
+ }
+ }
+ diagnostics_builder.note("for more information, see <https://doc.rust-lang.org/nightly/edition-guide/rust-2021/disjoint-capture-in-closures.html>");
+
+ let diagnostic_msg = format!(
+ "add a dummy let to cause {} to be fully captured",
+ migrated_variables_concat
+ );
+
+ let closure_span = self.tcx.hir().span_with_body(closure_hir_id);
+ let mut closure_body_span = {
+ // If the body was entirely expanded from a macro
+ // invocation, i.e. the body is not contained inside the
+ // closure span, then we walk up the expansion until we
+ // find the span before the expansion.
+ let s = self.tcx.hir().span_with_body(body_id.hir_id);
+ s.find_ancestor_inside(closure_span).unwrap_or(s)
+ };
+
+ if let Ok(mut s) = self.tcx.sess.source_map().span_to_snippet(closure_body_span) {
+ if s.starts_with('$') {
+ // Looks like a macro fragment. Try to find the real block.
+ if let Some(hir::Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::Block(block, ..), ..
+ })) = self.tcx.hir().find(body_id.hir_id) {
+ // If the body is a block (with `{..}`), we use the span of that block.
+ // E.g. with a `|| $body` expanded from a `m!({ .. })`, we use `{ .. }`, and not `$body`.
+ // Since we know it's a block, we know we can insert the `let _ = ..` without
+ // breaking the macro syntax.
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(block.span) {
+ closure_body_span = block.span;
+ s = snippet;
+ }
+ }
+ }
+
+ let mut lines = s.lines();
+ let line1 = lines.next().unwrap_or_default();
+
+ if line1.trim_end() == "{" {
+ // This is a multi-line closure with just a `{` on the first line,
+ // so we put the `let` on its own line.
+ // We take the indentation from the next non-empty line.
+ let line2 = lines.find(|line| !line.is_empty()).unwrap_or_default();
+ let indent = line2.split_once(|c: char| !c.is_whitespace()).unwrap_or_default().0;
+ diagnostics_builder.span_suggestion(
+ closure_body_span.with_lo(closure_body_span.lo() + BytePos::from_usize(line1.len())).shrink_to_lo(),
+ &diagnostic_msg,
+ format!("\n{indent}{migration_string};"),
+ Applicability::MachineApplicable,
+ );
+ } else if line1.starts_with('{') {
+ // This is a closure with its body wrapped in
+ // braces, but with more than just the opening
+ // brace on the first line. We put the `let`
+ // directly after the `{`.
+ diagnostics_builder.span_suggestion(
+ closure_body_span.with_lo(closure_body_span.lo() + BytePos(1)).shrink_to_lo(),
+ &diagnostic_msg,
+ format!(" {migration_string};"),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ // This is a closure without braces around the body.
+ // We add braces to add the `let` before the body.
+ diagnostics_builder.multipart_suggestion(
+ &diagnostic_msg,
+ vec![
+ (closure_body_span.shrink_to_lo(), format!("{{ {migration_string}; ")),
+ (closure_body_span.shrink_to_hi(), " }".to_string()),
+ ],
+ Applicability::MachineApplicable
+ );
+ }
+ } else {
+ diagnostics_builder.span_suggestion(
+ closure_span,
+ &diagnostic_msg,
+ migration_string,
+ Applicability::HasPlaceholders
+ );
+ }
+
+ diagnostics_builder.emit();
+ },
+ );
+ }
+ }
+
+ /// Combines all the reasons for 2229 migrations
+ fn compute_2229_migrations_reasons(
+ &self,
+ auto_trait_reasons: FxHashSet<&'static str>,
+ drop_order: bool,
+ ) -> MigrationWarningReason {
+ let mut reasons = MigrationWarningReason::default();
+
+ reasons.auto_traits.extend(auto_trait_reasons);
+ reasons.drop_order = drop_order;
+
+ // `auto_trait_reasons` are in hashset order, so sort them to put the
+ // diagnostics we emit later in a cross-platform-consistent order.
+ reasons.auto_traits.sort_unstable();
+
+ reasons
+ }
+
+ /// Figures out the list of root variables (and their types) that aren't completely
+ /// captured by the closure when `capture_disjoint_fields` is enabled and auto-traits
+ /// differ between the root variable and the captured paths.
+ ///
+ /// Returns a tuple containing a HashMap of CapturesInfo that maps to a HashSet of trait names
+ /// if migration is needed for traits for the provided var_hir_id, otherwise returns None
+ fn compute_2229_migrations_for_trait(
+ &self,
+ min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
+ var_hir_id: hir::HirId,
+ closure_clause: hir::CaptureBy,
+ ) -> Option<FxHashMap<UpvarMigrationInfo, FxHashSet<&'static str>>> {
+ let auto_traits_def_id = vec![
+ self.tcx.lang_items().clone_trait(),
+ self.tcx.lang_items().sync_trait(),
+ self.tcx.get_diagnostic_item(sym::Send),
+ self.tcx.lang_items().unpin_trait(),
+ self.tcx.get_diagnostic_item(sym::unwind_safe_trait),
+ self.tcx.get_diagnostic_item(sym::ref_unwind_safe_trait),
+ ];
+ const AUTO_TRAITS: [&str; 6] =
+ ["`Clone`", "`Sync`", "`Send`", "`Unpin`", "`UnwindSafe`", "`RefUnwindSafe`"];
+
+ let root_var_min_capture_list = min_captures.and_then(|m| m.get(&var_hir_id))?;
+
+ let ty = self.resolve_vars_if_possible(self.node_ty(var_hir_id));
+
+ let ty = match closure_clause {
+ hir::CaptureBy::Value => ty, // For move closure the capture kind should be by value
+ hir::CaptureBy::Ref => {
+ // For non move closure the capture kind is the max capture kind of all captures
+ // according to the ordering ImmBorrow < UniqueImmBorrow < MutBorrow < ByValue
+ let mut max_capture_info = root_var_min_capture_list.first().unwrap().info;
+ for capture in root_var_min_capture_list.iter() {
+ max_capture_info = determine_capture_info(max_capture_info, capture.info);
+ }
+
+ apply_capture_kind_on_capture_ty(
+ self.tcx,
+ ty,
+ max_capture_info.capture_kind,
+ Some(self.tcx.lifetimes.re_erased),
+ )
+ }
+ };
+
+ let mut obligations_should_hold = Vec::new();
+ // Checks if a root variable implements any of the auto traits
+ for check_trait in auto_traits_def_id.iter() {
+ obligations_should_hold.push(
+ check_trait
+ .map(|check_trait| {
+ self.infcx
+ .type_implements_trait(
+ check_trait,
+ ty,
+ self.tcx.mk_substs_trait(ty, &[]),
+ self.param_env,
+ )
+ .must_apply_modulo_regions()
+ })
+ .unwrap_or(false),
+ );
+ }
+
+ let mut problematic_captures = FxHashMap::default();
+ // Check whether captured fields also implement the trait
+ for capture in root_var_min_capture_list.iter() {
+ let ty = apply_capture_kind_on_capture_ty(
+ self.tcx,
+ capture.place.ty(),
+ capture.info.capture_kind,
+ Some(self.tcx.lifetimes.re_erased),
+ );
+
+ // Checks if a capture implements any of the auto traits
+ let mut obligations_holds_for_capture = Vec::new();
+ for check_trait in auto_traits_def_id.iter() {
+ obligations_holds_for_capture.push(
+ check_trait
+ .map(|check_trait| {
+ self.infcx
+ .type_implements_trait(
+ check_trait,
+ ty,
+ self.tcx.mk_substs_trait(ty, &[]),
+ self.param_env,
+ )
+ .must_apply_modulo_regions()
+ })
+ .unwrap_or(false),
+ );
+ }
+
+ let mut capture_problems = FxHashSet::default();
+
+ // Checks if for any of the auto traits, one or more trait is implemented
+ // by the root variable but not by the capture
+ for (idx, _) in obligations_should_hold.iter().enumerate() {
+ if !obligations_holds_for_capture[idx] && obligations_should_hold[idx] {
+ capture_problems.insert(AUTO_TRAITS[idx]);
+ }
+ }
+
+ if !capture_problems.is_empty() {
+ problematic_captures.insert(
+ UpvarMigrationInfo::CapturingPrecise {
+ source_expr: capture.info.path_expr_id,
+ var_name: capture.to_string(self.tcx),
+ },
+ capture_problems,
+ );
+ }
+ }
+ if !problematic_captures.is_empty() {
+ return Some(problematic_captures);
+ }
+ None
+ }
+
+ /// Figures out the list of root variables (and their types) that aren't completely
+ /// captured by the closure when `capture_disjoint_fields` is enabled and drop order of
+ /// some path starting at that root variable **might** be affected.
+ ///
+ /// The output list would include a root variable if:
+ /// - It would have been moved into the closure when `capture_disjoint_fields` wasn't
+ /// enabled, **and**
+ /// - It wasn't completely captured by the closure, **and**
+ /// - One of the paths starting at this root variable, that is not captured needs Drop.
+ ///
+ /// This function only returns a HashSet of CapturesInfo for significant drops. If there
+ /// are no significant drops than None is returned
+ #[instrument(level = "debug", skip(self))]
+ fn compute_2229_migrations_for_drop(
+ &self,
+ closure_def_id: LocalDefId,
+ closure_span: Span,
+ min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
+ closure_clause: hir::CaptureBy,
+ var_hir_id: hir::HirId,
+ ) -> Option<FxHashSet<UpvarMigrationInfo>> {
+ let ty = self.resolve_vars_if_possible(self.node_ty(var_hir_id));
+
+ if !ty.has_significant_drop(self.tcx, self.tcx.param_env(closure_def_id)) {
+ debug!("does not have significant drop");
+ return None;
+ }
+
+ let Some(root_var_min_capture_list) = min_captures.and_then(|m| m.get(&var_hir_id)) else {
+ // The upvar is mentioned within the closure but no path starting from it is
+ // used. This occurs when you have (e.g.)
+ //
+ // ```
+ // let x = move || {
+ // let _ = y;
+ // });
+ // ```
+ debug!("no path starting from it is used");
+
+
+ match closure_clause {
+ // Only migrate if closure is a move closure
+ hir::CaptureBy::Value => {
+ let mut diagnostics_info = FxHashSet::default();
+ let upvars = self.tcx.upvars_mentioned(closure_def_id).expect("must be an upvar");
+ let upvar = upvars[&var_hir_id];
+ diagnostics_info.insert(UpvarMigrationInfo::CapturingNothing { use_span: upvar.span });
+ return Some(diagnostics_info);
+ }
+ hir::CaptureBy::Ref => {}
+ }
+
+ return None;
+ };
+ debug!(?root_var_min_capture_list);
+
+ let mut projections_list = Vec::new();
+ let mut diagnostics_info = FxHashSet::default();
+
+ for captured_place in root_var_min_capture_list.iter() {
+ match captured_place.info.capture_kind {
+ // Only care about captures that are moved into the closure
+ ty::UpvarCapture::ByValue => {
+ projections_list.push(captured_place.place.projections.as_slice());
+ diagnostics_info.insert(UpvarMigrationInfo::CapturingPrecise {
+ source_expr: captured_place.info.path_expr_id,
+ var_name: captured_place.to_string(self.tcx),
+ });
+ }
+ ty::UpvarCapture::ByRef(..) => {}
+ }
+ }
+
+ debug!(?projections_list);
+ debug!(?diagnostics_info);
+
+ let is_moved = !projections_list.is_empty();
+ debug!(?is_moved);
+
+ let is_not_completely_captured =
+ root_var_min_capture_list.iter().any(|capture| !capture.place.projections.is_empty());
+ debug!(?is_not_completely_captured);
+
+ if is_moved
+ && is_not_completely_captured
+ && self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ ty,
+ projections_list,
+ )
+ {
+ return Some(diagnostics_info);
+ }
+
+ None
+ }
+
+ /// Figures out the list of root variables (and their types) that aren't completely
+ /// captured by the closure when `capture_disjoint_fields` is enabled and either drop
+ /// order of some path starting at that root variable **might** be affected or auto-traits
+ /// differ between the root variable and the captured paths.
+ ///
+ /// The output list would include a root variable if:
+ /// - It would have been moved into the closure when `capture_disjoint_fields` wasn't
+ /// enabled, **and**
+ /// - It wasn't completely captured by the closure, **and**
+ /// - One of the paths starting at this root variable, that is not captured needs Drop **or**
+ /// - One of the paths captured does not implement all the auto-traits its root variable
+ /// implements.
+ ///
+ /// Returns a tuple containing a vector of MigrationDiagnosticInfo, as well as a String
+ /// containing the reason why root variables whose HirId is contained in the vector should
+ /// be captured
+ #[instrument(level = "debug", skip(self))]
+ fn compute_2229_migrations(
+ &self,
+ closure_def_id: LocalDefId,
+ closure_span: Span,
+ closure_clause: hir::CaptureBy,
+ min_captures: Option<&ty::RootVariableMinCaptureList<'tcx>>,
+ ) -> (Vec<NeededMigration>, MigrationWarningReason) {
+ let Some(upvars) = self.tcx.upvars_mentioned(closure_def_id) else {
+ return (Vec::new(), MigrationWarningReason::default());
+ };
+
+ let mut need_migrations = Vec::new();
+ let mut auto_trait_migration_reasons = FxHashSet::default();
+ let mut drop_migration_needed = false;
+
+ // Perform auto-trait analysis
+ for (&var_hir_id, _) in upvars.iter() {
+ let mut diagnostics_info = Vec::new();
+
+ let auto_trait_diagnostic = if let Some(diagnostics_info) =
+ self.compute_2229_migrations_for_trait(min_captures, var_hir_id, closure_clause)
+ {
+ diagnostics_info
+ } else {
+ FxHashMap::default()
+ };
+
+ let drop_reorder_diagnostic = if let Some(diagnostics_info) = self
+ .compute_2229_migrations_for_drop(
+ closure_def_id,
+ closure_span,
+ min_captures,
+ closure_clause,
+ var_hir_id,
+ ) {
+ drop_migration_needed = true;
+ diagnostics_info
+ } else {
+ FxHashSet::default()
+ };
+
+ // Combine all the captures responsible for needing migrations into one HashSet
+ let mut capture_diagnostic = drop_reorder_diagnostic.clone();
+ for key in auto_trait_diagnostic.keys() {
+ capture_diagnostic.insert(key.clone());
+ }
+
+ let mut capture_diagnostic = capture_diagnostic.into_iter().collect::<Vec<_>>();
+ capture_diagnostic.sort();
+ for captures_info in capture_diagnostic {
+ // Get the auto trait reasons of why migration is needed because of that capture, if there are any
+ let capture_trait_reasons =
+ if let Some(reasons) = auto_trait_diagnostic.get(&captures_info) {
+ reasons.clone()
+ } else {
+ FxHashSet::default()
+ };
+
+ // Check if migration is needed because of drop reorder as a result of that capture
+ let capture_drop_reorder_reason = drop_reorder_diagnostic.contains(&captures_info);
+
+ // Combine all the reasons of why the root variable should be captured as a result of
+ // auto trait implementation issues
+ auto_trait_migration_reasons.extend(capture_trait_reasons.clone());
+
+ diagnostics_info.push(MigrationLintNote {
+ captures_info,
+ reason: self.compute_2229_migrations_reasons(
+ capture_trait_reasons,
+ capture_drop_reorder_reason,
+ ),
+ });
+ }
+
+ if !diagnostics_info.is_empty() {
+ need_migrations.push(NeededMigration { var_hir_id, diagnostics_info });
+ }
+ }
+ (
+ need_migrations,
+ self.compute_2229_migrations_reasons(
+ auto_trait_migration_reasons,
+ drop_migration_needed,
+ ),
+ )
+ }
+
+ /// This is a helper function to `compute_2229_migrations_precise_pass`. Provided the type
+ /// of a root variable and a list of captured paths starting at this root variable (expressed
+ /// using list of `Projection` slices), it returns true if there is a path that is not
+ /// captured starting at this root variable that implements Drop.
+ ///
+ /// The way this function works is at a given call it looks at type `base_path_ty` of some base
+ /// path say P and then list of projection slices which represent the different captures moved
+ /// into the closure starting off of P.
+ ///
+ /// This will make more sense with an example:
+ ///
+ /// ```rust
+ /// #![feature(capture_disjoint_fields)]
+ ///
+ /// struct FancyInteger(i32); // This implements Drop
+ ///
+ /// struct Point { x: FancyInteger, y: FancyInteger }
+ /// struct Color;
+ ///
+ /// struct Wrapper { p: Point, c: Color }
+ ///
+ /// fn f(w: Wrapper) {
+ /// let c = || {
+ /// // Closure captures w.p.x and w.c by move.
+ /// };
+ ///
+ /// c();
+ /// }
+ /// ```
+ ///
+ /// If `capture_disjoint_fields` wasn't enabled the closure would've moved `w` instead of the
+ /// precise paths. If we look closely `w.p.y` isn't captured which implements Drop and
+ /// therefore Drop ordering would change and we want this function to return true.
+ ///
+ /// Call stack to figure out if we need to migrate for `w` would look as follows:
+ ///
+ /// Our initial base path is just `w`, and the paths captured from it are `w[p, x]` and
+ /// `w[c]`.
+ /// Notation:
+ /// - Ty(place): Type of place
+ /// - `(a, b)`: Represents the function parameters `base_path_ty` and `captured_by_move_projs`
+ /// respectively.
+ /// ```ignore (illustrative)
+ /// (Ty(w), [ &[p, x], &[c] ])
+ /// // |
+ /// // ----------------------------
+ /// // | |
+ /// // v v
+ /// (Ty(w.p), [ &[x] ]) (Ty(w.c), [ &[] ]) // I(1)
+ /// // | |
+ /// // v v
+ /// (Ty(w.p), [ &[x] ]) false
+ /// // |
+ /// // |
+ /// // -------------------------------
+ /// // | |
+ /// // v v
+ /// (Ty((w.p).x), [ &[] ]) (Ty((w.p).y), []) // IMP 2
+ /// // | |
+ /// // v v
+ /// false NeedsSignificantDrop(Ty(w.p.y))
+ /// // |
+ /// // v
+ /// true
+ /// ```
+ ///
+ /// IMP 1 `(Ty(w.c), [ &[] ])`: Notice the single empty slice inside `captured_projs`.
+ /// This implies that the `w.c` is completely captured by the closure.
+ /// Since drop for this path will be called when the closure is
+ /// dropped we don't need to migrate for it.
+ ///
+ /// IMP 2 `(Ty((w.p).y), [])`: Notice that `captured_projs` is empty. This implies that this
+ /// path wasn't captured by the closure. Also note that even
+ /// though we didn't capture this path, the function visits it,
+ /// which is kind of the point of this function. We then return
+ /// if the type of `w.p.y` implements Drop, which in this case is
+ /// true.
+ ///
+ /// Consider another example:
+ ///
+ /// ```ignore (pseudo-rust)
+ /// struct X;
+ /// impl Drop for X {}
+ ///
+ /// struct Y(X);
+ /// impl Drop for Y {}
+ ///
+ /// fn foo() {
+ /// let y = Y(X);
+ /// let c = || move(y.0);
+ /// }
+ /// ```
+ ///
+ /// Note that `y.0` is captured by the closure. When this function is called for `y`, it will
+ /// return true, because even though all paths starting at `y` are captured, `y` itself
+ /// implements Drop which will be affected since `y` isn't completely captured.
+ fn has_significant_drop_outside_of_captures(
+ &self,
+ closure_def_id: LocalDefId,
+ closure_span: Span,
+ base_path_ty: Ty<'tcx>,
+ captured_by_move_projs: Vec<&[Projection<'tcx>]>,
+ ) -> bool {
+ let needs_drop =
+ |ty: Ty<'tcx>| ty.has_significant_drop(self.tcx, self.tcx.param_env(closure_def_id));
+
+ let is_drop_defined_for_ty = |ty: Ty<'tcx>| {
+ let drop_trait = self.tcx.require_lang_item(hir::LangItem::Drop, Some(closure_span));
+ let ty_params = self.tcx.mk_substs_trait(base_path_ty, &[]);
+ self.infcx
+ .type_implements_trait(
+ drop_trait,
+ ty,
+ ty_params,
+ self.tcx.param_env(closure_def_id),
+ )
+ .must_apply_modulo_regions()
+ };
+
+ let is_drop_defined_for_ty = is_drop_defined_for_ty(base_path_ty);
+
+ // If there is a case where no projection is applied on top of current place
+ // then there must be exactly one capture corresponding to such a case. Note that this
+ // represents the case of the path being completely captured by the variable.
+ //
+ // eg. If `a.b` is captured and we are processing `a.b`, then we can't have the closure also
+ // capture `a.b.c`, because that violates min capture.
+ let is_completely_captured = captured_by_move_projs.iter().any(|projs| projs.is_empty());
+
+ assert!(!is_completely_captured || (captured_by_move_projs.len() == 1));
+
+ if is_completely_captured {
+ // The place is captured entirely, so doesn't matter if needs dtor, it will be drop
+ // when the closure is dropped.
+ return false;
+ }
+
+ if captured_by_move_projs.is_empty() {
+ return needs_drop(base_path_ty);
+ }
+
+ if is_drop_defined_for_ty {
+ // If drop is implemented for this type then we need it to be fully captured,
+ // and we know it is not completely captured because of the previous checks.
+
+ // Note that this is a bug in the user code that will be reported by the
+ // borrow checker, since we can't move out of drop types.
+
+ // The bug exists in the user's code pre-migration, and we don't migrate here.
+ return false;
+ }
+
+ match base_path_ty.kind() {
+ // Observations:
+ // - `captured_by_move_projs` is not empty. Therefore we can call
+ // `captured_by_move_projs.first().unwrap()` safely.
+ // - All entries in `captured_by_move_projs` have at least one projection.
+ // Therefore we can call `captured_by_move_projs.first().unwrap().first().unwrap()` safely.
+
+ // We don't capture derefs in case of move captures, which would have be applied to
+ // access any further paths.
+ ty::Adt(def, _) if def.is_box() => unreachable!(),
+ ty::Ref(..) => unreachable!(),
+ ty::RawPtr(..) => unreachable!(),
+
+ ty::Adt(def, substs) => {
+ // Multi-variant enums are captured in entirety,
+ // which would've been handled in the case of single empty slice in `captured_by_move_projs`.
+ assert_eq!(def.variants().len(), 1);
+
+ // Only Field projections can be applied to a non-box Adt.
+ assert!(
+ captured_by_move_projs.iter().all(|projs| matches!(
+ projs.first().unwrap().kind,
+ ProjectionKind::Field(..)
+ ))
+ );
+ def.variants().get(VariantIdx::new(0)).unwrap().fields.iter().enumerate().any(
+ |(i, field)| {
+ let paths_using_field = captured_by_move_projs
+ .iter()
+ .filter_map(|projs| {
+ if let ProjectionKind::Field(field_idx, _) =
+ projs.first().unwrap().kind
+ {
+ if (field_idx as usize) == i { Some(&projs[1..]) } else { None }
+ } else {
+ unreachable!();
+ }
+ })
+ .collect();
+
+ let after_field_ty = field.ty(self.tcx, substs);
+ self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ after_field_ty,
+ paths_using_field,
+ )
+ },
+ )
+ }
+
+ ty::Tuple(fields) => {
+ // Only Field projections can be applied to a tuple.
+ assert!(
+ captured_by_move_projs.iter().all(|projs| matches!(
+ projs.first().unwrap().kind,
+ ProjectionKind::Field(..)
+ ))
+ );
+
+ fields.iter().enumerate().any(|(i, element_ty)| {
+ let paths_using_field = captured_by_move_projs
+ .iter()
+ .filter_map(|projs| {
+ if let ProjectionKind::Field(field_idx, _) = projs.first().unwrap().kind
+ {
+ if (field_idx as usize) == i { Some(&projs[1..]) } else { None }
+ } else {
+ unreachable!();
+ }
+ })
+ .collect();
+
+ self.has_significant_drop_outside_of_captures(
+ closure_def_id,
+ closure_span,
+ element_ty,
+ paths_using_field,
+ )
+ })
+ }
+
+ // Anything else would be completely captured and therefore handled already.
+ _ => unreachable!(),
+ }
+ }
+
+ fn init_capture_kind_for_place(
+ &self,
+ place: &Place<'tcx>,
+ capture_clause: hir::CaptureBy,
+ ) -> ty::UpvarCapture {
+ match capture_clause {
+ // In case of a move closure if the data is accessed through a reference we
+ // want to capture by ref to allow precise capture using reborrows.
+ //
+ // If the data will be moved out of this place, then the place will be truncated
+ // at the first Deref in `adjust_upvar_borrow_kind_for_consume` and then moved into
+ // the closure.
+ hir::CaptureBy::Value if !place.deref_tys().any(Ty::is_ref) => {
+ ty::UpvarCapture::ByValue
+ }
+ hir::CaptureBy::Value | hir::CaptureBy::Ref => ty::UpvarCapture::ByRef(ty::ImmBorrow),
+ }
+ }
+
+ fn place_for_root_variable(
+ &self,
+ closure_def_id: LocalDefId,
+ var_hir_id: hir::HirId,
+ ) -> Place<'tcx> {
+ let upvar_id = ty::UpvarId::new(var_hir_id, closure_def_id);
+
+ Place {
+ base_ty: self.node_ty(var_hir_id),
+ base: PlaceBase::Upvar(upvar_id),
+ projections: Default::default(),
+ }
+ }
+
+ fn should_log_capture_analysis(&self, closure_def_id: LocalDefId) -> bool {
+ self.tcx.has_attr(closure_def_id.to_def_id(), sym::rustc_capture_analysis)
+ }
+
+ fn log_capture_analysis_first_pass(
+ &self,
+ closure_def_id: LocalDefId,
+ capture_information: &InferredCaptureInformation<'tcx>,
+ closure_span: Span,
+ ) {
+ if self.should_log_capture_analysis(closure_def_id) {
+ let mut diag =
+ self.tcx.sess.struct_span_err(closure_span, "First Pass analysis includes:");
+ for (place, capture_info) in capture_information {
+ let capture_str = construct_capture_info_string(self.tcx, place, capture_info);
+ let output_str = format!("Capturing {capture_str}");
+
+ let span =
+ capture_info.path_expr_id.map_or(closure_span, |e| self.tcx.hir().span(e));
+ diag.span_note(span, &output_str);
+ }
+ diag.emit();
+ }
+ }
+
+ fn log_closure_min_capture_info(&self, closure_def_id: LocalDefId, closure_span: Span) {
+ if self.should_log_capture_analysis(closure_def_id) {
+ if let Some(min_captures) =
+ self.typeck_results.borrow().closure_min_captures.get(&closure_def_id)
+ {
+ let mut diag =
+ self.tcx.sess.struct_span_err(closure_span, "Min Capture analysis includes:");
+
+ for (_, min_captures_for_var) in min_captures {
+ for capture in min_captures_for_var {
+ let place = &capture.place;
+ let capture_info = &capture.info;
+
+ let capture_str =
+ construct_capture_info_string(self.tcx, place, capture_info);
+ let output_str = format!("Min Capture {capture_str}");
+
+ if capture.info.path_expr_id != capture.info.capture_kind_expr_id {
+ let path_span = capture_info
+ .path_expr_id
+ .map_or(closure_span, |e| self.tcx.hir().span(e));
+ let capture_kind_span = capture_info
+ .capture_kind_expr_id
+ .map_or(closure_span, |e| self.tcx.hir().span(e));
+
+ let mut multi_span: MultiSpan =
+ MultiSpan::from_spans(vec![path_span, capture_kind_span]);
+
+ let capture_kind_label =
+ construct_capture_kind_reason_string(self.tcx, place, capture_info);
+ let path_label = construct_path_string(self.tcx, place);
+
+ multi_span.push_span_label(path_span, path_label);
+ multi_span.push_span_label(capture_kind_span, capture_kind_label);
+
+ diag.span_note(multi_span, &output_str);
+ } else {
+ let span = capture_info
+ .path_expr_id
+ .map_or(closure_span, |e| self.tcx.hir().span(e));
+
+ diag.span_note(span, &output_str);
+ };
+ }
+ }
+ diag.emit();
+ }
+ }
+ }
+
+ /// A captured place is mutable if
+ /// 1. Projections don't include a Deref of an immut-borrow, **and**
+ /// 2. PlaceBase is mut or projections include a Deref of a mut-borrow.
+ fn determine_capture_mutability(
+ &self,
+ typeck_results: &'a TypeckResults<'tcx>,
+ place: &Place<'tcx>,
+ ) -> hir::Mutability {
+ let var_hir_id = match place.base {
+ PlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ _ => unreachable!(),
+ };
+
+ let bm = *typeck_results.pat_binding_modes().get(var_hir_id).expect("missing binding mode");
+
+ let mut is_mutbl = match bm {
+ ty::BindByValue(mutability) => mutability,
+ ty::BindByReference(_) => hir::Mutability::Not,
+ };
+
+ for pointer_ty in place.deref_tys() {
+ match pointer_ty.kind() {
+ // We don't capture derefs of raw ptrs
+ ty::RawPtr(_) => unreachable!(),
+
+ // Dereferencing a mut-ref allows us to mut the Place if we don't deref
+ // an immut-ref after on top of this.
+ ty::Ref(.., hir::Mutability::Mut) => is_mutbl = hir::Mutability::Mut,
+
+ // The place isn't mutable once we dereference an immutable reference.
+ ty::Ref(.., hir::Mutability::Not) => return hir::Mutability::Not,
+
+ // Dereferencing a box doesn't change mutability
+ ty::Adt(def, ..) if def.is_box() => {}
+
+ unexpected_ty => bug!("deref of unexpected pointer type {:?}", unexpected_ty),
+ }
+ }
+
+ is_mutbl
+ }
+}
+
+/// Truncate the capture so that the place being borrowed is in accordance with RFC 1240,
+/// which states that it's unsafe to take a reference into a struct marked `repr(packed)`.
+fn restrict_repr_packed_field_ref_capture<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ mut place: Place<'tcx>,
+ mut curr_borrow_kind: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let pos = place.projections.iter().enumerate().position(|(i, p)| {
+ let ty = place.ty_before_projection(i);
+
+ // Return true for fields of packed structs, unless those fields have alignment 1.
+ match p.kind {
+ ProjectionKind::Field(..) => match ty.kind() {
+ ty::Adt(def, _) if def.repr().packed() => {
+ // We erase regions here because they cannot be hashed
+ match tcx.layout_of(param_env.and(tcx.erase_regions(p.ty))) {
+ Ok(layout) if layout.align.abi.bytes() == 1 => {
+ // if the alignment is 1, the type can't be further
+ // disaligned.
+ debug!(
+ "restrict_repr_packed_field_ref_capture: ({:?}) - align = 1",
+ place
+ );
+ false
+ }
+ _ => {
+ debug!("restrict_repr_packed_field_ref_capture: ({:?}) - true", place);
+ true
+ }
+ }
+ }
+
+ _ => false,
+ },
+ _ => false,
+ }
+ });
+
+ if let Some(pos) = pos {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_borrow_kind, pos);
+ }
+
+ (place, curr_borrow_kind)
+}
+
+/// Returns a Ty that applies the specified capture kind on the provided capture Ty
+fn apply_capture_kind_on_capture_ty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ capture_kind: UpvarCapture,
+ region: Option<ty::Region<'tcx>>,
+) -> Ty<'tcx> {
+ match capture_kind {
+ ty::UpvarCapture::ByValue => ty,
+ ty::UpvarCapture::ByRef(kind) => {
+ tcx.mk_ref(region.unwrap(), ty::TypeAndMut { ty: ty, mutbl: kind.to_mutbl_lossy() })
+ }
+ }
+}
+
+/// Returns the Span of where the value with the provided HirId would be dropped
+fn drop_location_span<'tcx>(tcx: TyCtxt<'tcx>, hir_id: hir::HirId) -> Span {
+ let owner_id = tcx.hir().get_enclosing_scope(hir_id).unwrap();
+
+ let owner_node = tcx.hir().get(owner_id);
+ let owner_span = match owner_node {
+ hir::Node::Item(item) => match item.kind {
+ hir::ItemKind::Fn(_, _, owner_id) => tcx.hir().span(owner_id.hir_id),
+ _ => {
+ bug!("Drop location span error: need to handle more ItemKind '{:?}'", item.kind);
+ }
+ },
+ hir::Node::Block(block) => tcx.hir().span(block.hir_id),
+ hir::Node::TraitItem(item) => tcx.hir().span(item.hir_id()),
+ hir::Node::ImplItem(item) => tcx.hir().span(item.hir_id()),
+ _ => {
+ bug!("Drop location span error: need to handle more Node '{:?}'", owner_node);
+ }
+ };
+ tcx.sess.source_map().end_point(owner_span)
+}
+
+struct InferBorrowKind<'a, 'tcx> {
+ fcx: &'a FnCtxt<'a, 'tcx>,
+
+ // The def-id of the closure whose kind and upvar accesses are being inferred.
+ closure_def_id: LocalDefId,
+
+ /// For each Place that is captured by the closure, we track the minimal kind of
+ /// access we need (ref, ref mut, move, etc) and the expression that resulted in such access.
+ ///
+ /// Consider closure where s.str1 is captured via an ImmutableBorrow and
+ /// s.str2 via a MutableBorrow
+ ///
+ /// ```rust,no_run
+ /// struct SomeStruct { str1: String, str2: String };
+ ///
+ /// // Assume that the HirId for the variable definition is `V1`
+ /// let mut s = SomeStruct { str1: format!("s1"), str2: format!("s2") };
+ ///
+ /// let fix_s = |new_s2| {
+ /// // Assume that the HirId for the expression `s.str1` is `E1`
+ /// println!("Updating SomeStruct with str1={0}", s.str1);
+ /// // Assume that the HirId for the expression `*s.str2` is `E2`
+ /// s.str2 = new_s2;
+ /// };
+ /// ```
+ ///
+ /// For closure `fix_s`, (at a high level) the map contains
+ ///
+ /// ```ignore (illustrative)
+ /// Place { V1, [ProjectionKind::Field(Index=0, Variant=0)] } : CaptureKind { E1, ImmutableBorrow }
+ /// Place { V1, [ProjectionKind::Field(Index=1, Variant=0)] } : CaptureKind { E2, MutableBorrow }
+ /// ```
+ capture_information: InferredCaptureInformation<'tcx>,
+ fake_reads: Vec<(Place<'tcx>, FakeReadCause, hir::HirId)>,
+}
+
+impl<'a, 'tcx> euv::Delegate<'tcx> for InferBorrowKind<'a, 'tcx> {
+ fn fake_read(
+ &mut self,
+ place: &PlaceWithHirId<'tcx>,
+ cause: FakeReadCause,
+ diag_expr_id: hir::HirId,
+ ) {
+ let PlaceBase::Upvar(_) = place.place.base else { return };
+
+ // We need to restrict Fake Read precision to avoid fake reading unsafe code,
+ // such as deref of a raw pointer.
+ let dummy_capture_kind = ty::UpvarCapture::ByRef(ty::BorrowKind::ImmBorrow);
+
+ let (place, _) = restrict_capture_precision(place.place.clone(), dummy_capture_kind);
+
+ let (place, _) = restrict_repr_packed_field_ref_capture(
+ self.fcx.tcx,
+ self.fcx.param_env,
+ place,
+ dummy_capture_kind,
+ );
+ self.fake_reads.push((place, cause, diag_expr_id));
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ let PlaceBase::Upvar(upvar_id) = place_with_id.place.base else { return };
+ assert_eq!(self.closure_def_id, upvar_id.closure_expr_id);
+
+ self.capture_information.push((
+ place_with_id.place.clone(),
+ ty::CaptureInfo {
+ capture_kind_expr_id: Some(diag_expr_id),
+ path_expr_id: Some(diag_expr_id),
+ capture_kind: ty::UpvarCapture::ByValue,
+ },
+ ));
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn borrow(
+ &mut self,
+ place_with_id: &PlaceWithHirId<'tcx>,
+ diag_expr_id: hir::HirId,
+ bk: ty::BorrowKind,
+ ) {
+ let PlaceBase::Upvar(upvar_id) = place_with_id.place.base else { return };
+ assert_eq!(self.closure_def_id, upvar_id.closure_expr_id);
+
+ // The region here will get discarded/ignored
+ let capture_kind = ty::UpvarCapture::ByRef(bk);
+
+ // We only want repr packed restriction to be applied to reading references into a packed
+ // struct, and not when the data is being moved. Therefore we call this method here instead
+ // of in `restrict_capture_precision`.
+ let (place, mut capture_kind) = restrict_repr_packed_field_ref_capture(
+ self.fcx.tcx,
+ self.fcx.param_env,
+ place_with_id.place.clone(),
+ capture_kind,
+ );
+
+ // Raw pointers don't inherit mutability
+ if place_with_id.place.deref_tys().any(Ty::is_unsafe_ptr) {
+ capture_kind = ty::UpvarCapture::ByRef(ty::BorrowKind::ImmBorrow);
+ }
+
+ self.capture_information.push((
+ place,
+ ty::CaptureInfo {
+ capture_kind_expr_id: Some(diag_expr_id),
+ path_expr_id: Some(diag_expr_id),
+ capture_kind,
+ },
+ ));
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ self.borrow(assignee_place, diag_expr_id, ty::BorrowKind::MutBorrow);
+ }
+}
+
+/// Rust doesn't permit moving fields out of a type that implements drop
+fn restrict_precision_for_drop_types<'a, 'tcx>(
+ fcx: &'a FnCtxt<'a, 'tcx>,
+ mut place: Place<'tcx>,
+ mut curr_mode: ty::UpvarCapture,
+ span: Span,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let is_copy_type = fcx.infcx.type_is_copy_modulo_regions(fcx.param_env, place.ty(), span);
+
+ if let (false, UpvarCapture::ByValue) = (is_copy_type, curr_mode) {
+ for i in 0..place.projections.len() {
+ match place.ty_before_projection(i).kind() {
+ ty::Adt(def, _) if def.destructor(fcx.tcx).is_some() => {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i);
+ break;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ (place, curr_mode)
+}
+
+/// Truncate `place` so that an `unsafe` block isn't required to capture it.
+/// - No projections are applied to raw pointers, since these require unsafe blocks. We capture
+/// them completely.
+/// - No projections are applied on top of Union ADTs, since these require unsafe blocks.
+fn restrict_precision_for_unsafe<'tcx>(
+ mut place: Place<'tcx>,
+ mut curr_mode: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ if place.base_ty.is_unsafe_ptr() {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, 0);
+ }
+
+ if place.base_ty.is_union() {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, 0);
+ }
+
+ for (i, proj) in place.projections.iter().enumerate() {
+ if proj.ty.is_unsafe_ptr() {
+ // Don't apply any projections on top of an unsafe ptr.
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i + 1);
+ break;
+ }
+
+ if proj.ty.is_union() {
+ // Don't capture precise fields of a union.
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i + 1);
+ break;
+ }
+ }
+
+ (place, curr_mode)
+}
+
+/// Truncate projections so that following rules are obeyed by the captured `place`:
+/// - No Index projections are captured, since arrays are captured completely.
+/// - No unsafe block is required to capture `place`
+/// Returns the truncated place and updated capture mode.
+fn restrict_capture_precision<'tcx>(
+ place: Place<'tcx>,
+ curr_mode: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let (mut place, mut curr_mode) = restrict_precision_for_unsafe(place, curr_mode);
+
+ if place.projections.is_empty() {
+ // Nothing to do here
+ return (place, curr_mode);
+ }
+
+ for (i, proj) in place.projections.iter().enumerate() {
+ match proj.kind {
+ ProjectionKind::Index => {
+ // Arrays are completely captured, so we drop Index projections
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, i);
+ return (place, curr_mode);
+ }
+ ProjectionKind::Deref => {}
+ ProjectionKind::Field(..) => {} // ignore
+ ProjectionKind::Subslice => {} // We never capture this
+ }
+ }
+
+ (place, curr_mode)
+}
+
+/// Truncate deref of any reference.
+fn adjust_for_move_closure<'tcx>(
+ mut place: Place<'tcx>,
+ mut kind: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let first_deref = place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref);
+
+ if let Some(idx) = first_deref {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut kind, idx);
+ }
+
+ (place, ty::UpvarCapture::ByValue)
+}
+
+/// Adjust closure capture just that if taking ownership of data, only move data
+/// from enclosing stack frame.
+fn adjust_for_non_move_closure<'tcx>(
+ mut place: Place<'tcx>,
+ mut kind: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let contains_deref =
+ place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref);
+
+ match kind {
+ ty::UpvarCapture::ByValue => {
+ if let Some(idx) = contains_deref {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut kind, idx);
+ }
+ }
+
+ ty::UpvarCapture::ByRef(..) => {}
+ }
+
+ (place, kind)
+}
+
+fn construct_place_string<'tcx>(tcx: TyCtxt<'_>, place: &Place<'tcx>) -> String {
+ let variable_name = match place.base {
+ PlaceBase::Upvar(upvar_id) => var_name(tcx, upvar_id.var_path.hir_id).to_string(),
+ _ => bug!("Capture_information should only contain upvars"),
+ };
+
+ let mut projections_str = String::new();
+ for (i, item) in place.projections.iter().enumerate() {
+ let proj = match item.kind {
+ ProjectionKind::Field(a, b) => format!("({:?}, {:?})", a, b),
+ ProjectionKind::Deref => String::from("Deref"),
+ ProjectionKind::Index => String::from("Index"),
+ ProjectionKind::Subslice => String::from("Subslice"),
+ };
+ if i != 0 {
+ projections_str.push(',');
+ }
+ projections_str.push_str(proj.as_str());
+ }
+
+ format!("{variable_name}[{projections_str}]")
+}
+
+fn construct_capture_kind_reason_string<'tcx>(
+ tcx: TyCtxt<'_>,
+ place: &Place<'tcx>,
+ capture_info: &ty::CaptureInfo,
+) -> String {
+ let place_str = construct_place_string(tcx, place);
+
+ let capture_kind_str = match capture_info.capture_kind {
+ ty::UpvarCapture::ByValue => "ByValue".into(),
+ ty::UpvarCapture::ByRef(kind) => format!("{:?}", kind),
+ };
+
+ format!("{place_str} captured as {capture_kind_str} here")
+}
+
+fn construct_path_string<'tcx>(tcx: TyCtxt<'_>, place: &Place<'tcx>) -> String {
+ let place_str = construct_place_string(tcx, place);
+
+ format!("{place_str} used here")
+}
+
+fn construct_capture_info_string<'tcx>(
+ tcx: TyCtxt<'_>,
+ place: &Place<'tcx>,
+ capture_info: &ty::CaptureInfo,
+) -> String {
+ let place_str = construct_place_string(tcx, place);
+
+ let capture_kind_str = match capture_info.capture_kind {
+ ty::UpvarCapture::ByValue => "ByValue".into(),
+ ty::UpvarCapture::ByRef(kind) => format!("{:?}", kind),
+ };
+ format!("{place_str} -> {capture_kind_str}")
+}
+
+fn var_name(tcx: TyCtxt<'_>, var_hir_id: hir::HirId) -> Symbol {
+ tcx.hir().name(var_hir_id)
+}
+
+#[instrument(level = "debug", skip(tcx))]
+fn should_do_rust_2021_incompatible_closure_captures_analysis(
+ tcx: TyCtxt<'_>,
+ closure_id: hir::HirId,
+) -> bool {
+ let (level, _) =
+ tcx.lint_level_at_node(lint::builtin::RUST_2021_INCOMPATIBLE_CLOSURE_CAPTURES, closure_id);
+
+ !matches!(level, lint::Level::Allow)
+}
+
+/// Return a two string tuple (s1, s2)
+/// - s1: Line of code that is needed for the migration: eg: `let _ = (&x, ...)`.
+/// - s2: Comma separated names of the variables being migrated.
+fn migration_suggestion_for_2229(
+ tcx: TyCtxt<'_>,
+ need_migrations: &[NeededMigration],
+) -> (String, String) {
+ let need_migrations_variables = need_migrations
+ .iter()
+ .map(|NeededMigration { var_hir_id: v, .. }| var_name(tcx, *v))
+ .collect::<Vec<_>>();
+
+ let migration_ref_concat =
+ need_migrations_variables.iter().map(|v| format!("&{v}")).collect::<Vec<_>>().join(", ");
+
+ let migration_string = if 1 == need_migrations.len() {
+ format!("let _ = {migration_ref_concat}")
+ } else {
+ format!("let _ = ({migration_ref_concat})")
+ };
+
+ let migrated_variables_concat =
+ need_migrations_variables.iter().map(|v| format!("`{v}`")).collect::<Vec<_>>().join(", ");
+
+ (migration_string, migrated_variables_concat)
+}
+
+/// Helper function to determine if we need to escalate CaptureKind from
+/// CaptureInfo A to B and returns the escalated CaptureInfo.
+/// (Note: CaptureInfo contains CaptureKind and an expression that led to capture it in that way)
+///
+/// If both `CaptureKind`s are considered equivalent, then the CaptureInfo is selected based
+/// on the `CaptureInfo` containing an associated `capture_kind_expr_id`.
+///
+/// It is the caller's duty to figure out which path_expr_id to use.
+///
+/// If both the CaptureKind and Expression are considered to be equivalent,
+/// then `CaptureInfo` A is preferred. This can be useful in cases where we want to prioritize
+/// expressions reported back to the user as part of diagnostics based on which appears earlier
+/// in the closure. This can be achieved simply by calling
+/// `determine_capture_info(existing_info, current_info)`. This works out because the
+/// expressions that occur earlier in the closure body than the current expression are processed before.
+/// Consider the following example
+/// ```rust,no_run
+/// struct Point { x: i32, y: i32 }
+/// let mut p = Point { x: 10, y: 10 };
+///
+/// let c = || {
+/// p.x += 10;
+/// // ^ E1 ^
+/// // ...
+/// // More code
+/// // ...
+/// p.x += 10; // E2
+/// // ^ E2 ^
+/// };
+/// ```
+/// `CaptureKind` associated with both `E1` and `E2` will be ByRef(MutBorrow),
+/// and both have an expression associated, however for diagnostics we prefer reporting
+/// `E1` since it appears earlier in the closure body. When `E2` is being processed we
+/// would've already handled `E1`, and have an existing capture_information for it.
+/// Calling `determine_capture_info(existing_info_e1, current_info_e2)` will return
+/// `existing_info_e1` in this case, allowing us to point to `E1` in case of diagnostics.
+fn determine_capture_info(
+ capture_info_a: ty::CaptureInfo,
+ capture_info_b: ty::CaptureInfo,
+) -> ty::CaptureInfo {
+ // If the capture kind is equivalent then, we don't need to escalate and can compare the
+ // expressions.
+ let eq_capture_kind = match (capture_info_a.capture_kind, capture_info_b.capture_kind) {
+ (ty::UpvarCapture::ByValue, ty::UpvarCapture::ByValue) => true,
+ (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => ref_a == ref_b,
+ (ty::UpvarCapture::ByValue, _) | (ty::UpvarCapture::ByRef(_), _) => false,
+ };
+
+ if eq_capture_kind {
+ match (capture_info_a.capture_kind_expr_id, capture_info_b.capture_kind_expr_id) {
+ (Some(_), _) | (None, None) => capture_info_a,
+ (None, Some(_)) => capture_info_b,
+ }
+ } else {
+ // We select the CaptureKind which ranks higher based the following priority order:
+ // ByValue > MutBorrow > UniqueImmBorrow > ImmBorrow
+ match (capture_info_a.capture_kind, capture_info_b.capture_kind) {
+ (ty::UpvarCapture::ByValue, _) => capture_info_a,
+ (_, ty::UpvarCapture::ByValue) => capture_info_b,
+ (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => {
+ match (ref_a, ref_b) {
+ // Take LHS:
+ (ty::UniqueImmBorrow | ty::MutBorrow, ty::ImmBorrow)
+ | (ty::MutBorrow, ty::UniqueImmBorrow) => capture_info_a,
+
+ // Take RHS:
+ (ty::ImmBorrow, ty::UniqueImmBorrow | ty::MutBorrow)
+ | (ty::UniqueImmBorrow, ty::MutBorrow) => capture_info_b,
+
+ (ty::ImmBorrow, ty::ImmBorrow)
+ | (ty::UniqueImmBorrow, ty::UniqueImmBorrow)
+ | (ty::MutBorrow, ty::MutBorrow) => {
+ bug!("Expected unequal capture kinds");
+ }
+ }
+ }
+ }
+ }
+}
+
+/// Truncates `place` to have up to `len` projections.
+/// `curr_mode` is the current required capture kind for the place.
+/// Returns the truncated `place` and the updated required capture kind.
+///
+/// Note: Capture kind changes from `MutBorrow` to `UniqueImmBorrow` if the truncated part of the `place`
+/// contained `Deref` of `&mut`.
+fn truncate_place_to_len_and_update_capture_kind<'tcx>(
+ place: &mut Place<'tcx>,
+ curr_mode: &mut ty::UpvarCapture,
+ len: usize,
+) {
+ let is_mut_ref = |ty: Ty<'_>| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Mut));
+
+ // If the truncated part of the place contains `Deref` of a `&mut` then convert MutBorrow ->
+ // UniqueImmBorrow
+ // Note that if the place contained Deref of a raw pointer it would've not been MutBorrow, so
+ // we don't need to worry about that case here.
+ match curr_mode {
+ ty::UpvarCapture::ByRef(ty::BorrowKind::MutBorrow) => {
+ for i in len..place.projections.len() {
+ if place.projections[i].kind == ProjectionKind::Deref
+ && is_mut_ref(place.ty_before_projection(i))
+ {
+ *curr_mode = ty::UpvarCapture::ByRef(ty::BorrowKind::UniqueImmBorrow);
+ break;
+ }
+ }
+ }
+
+ ty::UpvarCapture::ByRef(..) => {}
+ ty::UpvarCapture::ByValue => {}
+ }
+
+ place.projections.truncate(len);
+}
+
+/// Determines the Ancestry relationship of Place A relative to Place B
+///
+/// `PlaceAncestryRelation::Ancestor` implies Place A is ancestor of Place B
+/// `PlaceAncestryRelation::Descendant` implies Place A is descendant of Place B
+/// `PlaceAncestryRelation::Divergent` implies neither of them is the ancestor of the other.
+fn determine_place_ancestry_relation<'tcx>(
+ place_a: &Place<'tcx>,
+ place_b: &Place<'tcx>,
+) -> PlaceAncestryRelation {
+ // If Place A and Place B, don't start off from the same root variable, they are divergent.
+ if place_a.base != place_b.base {
+ return PlaceAncestryRelation::Divergent;
+ }
+
+ // Assume of length of projections_a = n
+ let projections_a = &place_a.projections;
+
+ // Assume of length of projections_b = m
+ let projections_b = &place_b.projections;
+
+ let same_initial_projections =
+ iter::zip(projections_a, projections_b).all(|(proj_a, proj_b)| proj_a.kind == proj_b.kind);
+
+ if same_initial_projections {
+ use std::cmp::Ordering;
+
+ // First min(n, m) projections are the same
+ // Select Ancestor/Descendant
+ match projections_b.len().cmp(&projections_a.len()) {
+ Ordering::Greater => PlaceAncestryRelation::Ancestor,
+ Ordering::Equal => PlaceAncestryRelation::SamePlace,
+ Ordering::Less => PlaceAncestryRelation::Descendant,
+ }
+ } else {
+ PlaceAncestryRelation::Divergent
+ }
+}
+
+/// Reduces the precision of the captured place when the precision doesn't yield any benefit from
+/// borrow checking perspective, allowing us to save us on the size of the capture.
+///
+///
+/// Fields that are read through a shared reference will always be read via a shared ref or a copy,
+/// and therefore capturing precise paths yields no benefit. This optimization truncates the
+/// rightmost deref of the capture if the deref is applied to a shared ref.
+///
+/// Reason we only drop the last deref is because of the following edge case:
+///
+/// ```
+/// # struct A { field_of_a: Box<i32> }
+/// # struct B {}
+/// # struct C<'a>(&'a i32);
+/// struct MyStruct<'a> {
+/// a: &'static A,
+/// b: B,
+/// c: C<'a>,
+/// }
+///
+/// fn foo<'a, 'b>(m: &'a MyStruct<'b>) -> impl FnMut() + 'static {
+/// || drop(&*m.a.field_of_a)
+/// // Here we really do want to capture `*m.a` because that outlives `'static`
+///
+/// // If we capture `m`, then the closure no longer outlives `'static'
+/// // it is constrained to `'a`
+/// }
+/// ```
+fn truncate_capture_for_optimization<'tcx>(
+ mut place: Place<'tcx>,
+ mut curr_mode: ty::UpvarCapture,
+) -> (Place<'tcx>, ty::UpvarCapture) {
+ let is_shared_ref = |ty: Ty<'_>| matches!(ty.kind(), ty::Ref(.., hir::Mutability::Not));
+
+ // Find the right-most deref (if any). All the projections that come after this
+ // are fields or other "in-place pointer adjustments"; these refer therefore to
+ // data owned by whatever pointer is being dereferenced here.
+ let idx = place.projections.iter().rposition(|proj| ProjectionKind::Deref == proj.kind);
+
+ match idx {
+ // If that pointer is a shared reference, then we don't need those fields.
+ Some(idx) if is_shared_ref(place.ty_before_projection(idx)) => {
+ truncate_place_to_len_and_update_capture_kind(&mut place, &mut curr_mode, idx + 1)
+ }
+ None | Some(_) => {}
+ }
+
+ (place, curr_mode)
+}
+
+/// Precise capture is enabled if the feature gate `capture_disjoint_fields` is enabled or if
+/// user is using Rust Edition 2021 or higher.
+///
+/// `span` is the span of the closure.
+fn enable_precise_capture(tcx: TyCtxt<'_>, span: Span) -> bool {
+ // We use span here to ensure that if the closure was generated by a macro with a different
+ // edition.
+ tcx.features().capture_disjoint_fields || span.rust_2021()
+}
diff --git a/compiler/rustc_typeck/src/check/wfcheck.rs b/compiler/rustc_typeck/src/check/wfcheck.rs
new file mode 100644
index 000000000..d0334cd0d
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/wfcheck.rs
@@ -0,0 +1,1973 @@
+use crate::check::regionck::OutlivesEnvironmentExt;
+use crate::constrained_generic_params::{identify_constrained_generic_params, Parameter};
+use rustc_ast as ast;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::{pluralize, struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::ItemKind;
+use rustc_infer::infer::outlives::env::{OutlivesEnvironment, RegionBoundPairs};
+use rustc_infer::infer::outlives::obligations::TypeOutlives;
+use rustc_infer::infer::{self, InferCtxt, TyCtxtInferExt};
+use rustc_infer::traits::Normalized;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::{GenericArgKind, InternalSubsts, Subst};
+use rustc_middle::ty::trait_def::TraitSpecializationKind;
+use rustc_middle::ty::{
+ self, AdtKind, DefIdTree, GenericParamDefKind, ToPredicate, Ty, TyCtxt, TypeFoldable,
+ TypeSuperVisitable, TypeVisitable, TypeVisitor,
+};
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::{sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_trait_selection::autoderef::Autoderef;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _;
+use rustc_trait_selection::traits::query::normalize::AtExt;
+use rustc_trait_selection::traits::query::NoSolution;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCauseCode, ObligationCtxt, WellFormedLoc,
+};
+
+use std::cell::LazyCell;
+use std::convert::TryInto;
+use std::iter;
+use std::ops::{ControlFlow, Deref};
+
+pub(super) struct WfCheckingCtxt<'a, 'tcx> {
+ pub(super) ocx: ObligationCtxt<'a, 'tcx>,
+ span: Span,
+ body_id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+}
+impl<'a, 'tcx> Deref for WfCheckingCtxt<'a, 'tcx> {
+ type Target = ObligationCtxt<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.ocx
+ }
+}
+
+impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.ocx.infcx.tcx
+ }
+
+ fn normalize<T>(&self, span: Span, loc: Option<WellFormedLoc>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.ocx.normalize(
+ ObligationCause::new(span, self.body_id, ObligationCauseCode::WellFormed(loc)),
+ self.param_env,
+ value,
+ )
+ }
+
+ fn register_wf_obligation(
+ &self,
+ span: Span,
+ loc: Option<WellFormedLoc>,
+ arg: ty::GenericArg<'tcx>,
+ ) {
+ let cause =
+ traits::ObligationCause::new(span, self.body_id, ObligationCauseCode::WellFormed(loc));
+ self.ocx.register_obligation(traits::Obligation::new(
+ cause,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(arg)).to_predicate(self.tcx()),
+ ));
+ }
+}
+
+pub(super) fn enter_wf_checking_ctxt<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ body_def_id: LocalDefId,
+ f: F,
+) where
+ F: for<'a> FnOnce(&WfCheckingCtxt<'a, 'tcx>) -> FxHashSet<Ty<'tcx>>,
+{
+ let param_env = tcx.param_env(body_def_id);
+ let body_id = tcx.hir().local_def_id_to_hir_id(body_def_id);
+ tcx.infer_ctxt().enter(|ref infcx| {
+ let ocx = ObligationCtxt::new(infcx);
+ let mut wfcx = WfCheckingCtxt { ocx, span, body_id, param_env };
+
+ if !tcx.features().trivial_bounds {
+ wfcx.check_false_global_bounds()
+ }
+ let wf_tys = f(&mut wfcx);
+ let errors = wfcx.select_all_or_error();
+ if !errors.is_empty() {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ return;
+ }
+
+ let mut outlives_environment = OutlivesEnvironment::new(param_env);
+ outlives_environment.add_implied_bounds(infcx, wf_tys, body_id);
+ infcx.check_region_obligations_and_report_errors(body_def_id, &outlives_environment);
+ })
+}
+
+fn check_well_formed(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ let node = tcx.hir().expect_owner(def_id);
+ match node {
+ hir::OwnerNode::Crate(_) => {}
+ hir::OwnerNode::Item(item) => check_item(tcx, item),
+ hir::OwnerNode::TraitItem(item) => check_trait_item(tcx, item),
+ hir::OwnerNode::ImplItem(item) => check_impl_item(tcx, item),
+ hir::OwnerNode::ForeignItem(item) => check_foreign_item(tcx, item),
+ }
+
+ if let Some(generics) = node.generics() {
+ for param in generics.params {
+ check_param_wf(tcx, param)
+ }
+ }
+}
+
+/// Checks that the field types (in a struct def'n) or argument types (in an enum def'n) are
+/// well-formed, meaning that they do not require any constraints not declared in the struct
+/// definition itself. For example, this definition would be illegal:
+///
+/// ```rust
+/// struct Ref<'a, T> { x: &'a T }
+/// ```
+///
+/// because the type did not declare that `T:'a`.
+///
+/// We do this check as a pre-pass before checking fn bodies because if these constraints are
+/// not included it frequently leads to confusing errors in fn bodies. So it's better to check
+/// the types first.
+#[instrument(skip(tcx), level = "debug")]
+fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) {
+ let def_id = item.def_id;
+
+ debug!(
+ ?item.def_id,
+ item.name = ? tcx.def_path_str(def_id.to_def_id())
+ );
+
+ match item.kind {
+ // Right now we check that every default trait implementation
+ // has an implementation of itself. Basically, a case like:
+ //
+ // impl Trait for T {}
+ //
+ // has a requirement of `T: Trait` which was required for default
+ // method implementations. Although this could be improved now that
+ // there's a better infrastructure in place for this, it's being left
+ // for a follow-up work.
+ //
+ // Since there's such a requirement, we need to check *just* positive
+ // implementations, otherwise things like:
+ //
+ // impl !Send for T {}
+ //
+ // won't be allowed unless there's an *explicit* implementation of `Send`
+ // for `T`
+ hir::ItemKind::Impl(ref impl_) => {
+ let is_auto = tcx
+ .impl_trait_ref(item.def_id)
+ .map_or(false, |trait_ref| tcx.trait_is_auto(trait_ref.def_id));
+ if let (hir::Defaultness::Default { .. }, true) = (impl_.defaultness, is_auto) {
+ let sp = impl_.of_trait.as_ref().map_or(item.span, |t| t.path.span);
+ let mut err =
+ tcx.sess.struct_span_err(sp, "impls of auto traits cannot be default");
+ err.span_labels(impl_.defaultness_span, "default because of this");
+ err.span_label(sp, "auto trait");
+ err.emit();
+ }
+ // We match on both `ty::ImplPolarity` and `ast::ImplPolarity` just to get the `!` span.
+ match (tcx.impl_polarity(def_id), impl_.polarity) {
+ (ty::ImplPolarity::Positive, _) => {
+ check_impl(tcx, item, impl_.self_ty, &impl_.of_trait, impl_.constness);
+ }
+ (ty::ImplPolarity::Negative, ast::ImplPolarity::Negative(span)) => {
+ // FIXME(#27579): what amount of WF checking do we need for neg impls?
+ if let hir::Defaultness::Default { .. } = impl_.defaultness {
+ let mut spans = vec![span];
+ spans.extend(impl_.defaultness_span);
+ struct_span_err!(
+ tcx.sess,
+ spans,
+ E0750,
+ "negative impls cannot be default impls"
+ )
+ .emit();
+ }
+ }
+ (ty::ImplPolarity::Reservation, _) => {
+ // FIXME: what amount of WF checking do we need for reservation impls?
+ }
+ _ => unreachable!(),
+ }
+ }
+ hir::ItemKind::Fn(ref sig, ..) => {
+ check_item_fn(tcx, item.def_id, item.ident, item.span, sig.decl);
+ }
+ hir::ItemKind::Static(ty, ..) => {
+ check_item_type(tcx, item.def_id, ty.span, false);
+ }
+ hir::ItemKind::Const(ty, ..) => {
+ check_item_type(tcx, item.def_id, ty.span, false);
+ }
+ hir::ItemKind::Struct(ref struct_def, ref ast_generics) => {
+ check_type_defn(tcx, item, false, |wfcx| vec![wfcx.non_enum_variant(struct_def)]);
+
+ check_variances_for_type_defn(tcx, item, ast_generics);
+ }
+ hir::ItemKind::Union(ref struct_def, ref ast_generics) => {
+ check_type_defn(tcx, item, true, |wfcx| vec![wfcx.non_enum_variant(struct_def)]);
+
+ check_variances_for_type_defn(tcx, item, ast_generics);
+ }
+ hir::ItemKind::Enum(ref enum_def, ref ast_generics) => {
+ check_type_defn(tcx, item, true, |wfcx| wfcx.enum_variants(enum_def));
+
+ check_variances_for_type_defn(tcx, item, ast_generics);
+ }
+ hir::ItemKind::Trait(..) => {
+ check_trait(tcx, item);
+ }
+ hir::ItemKind::TraitAlias(..) => {
+ check_trait(tcx, item);
+ }
+ // `ForeignItem`s are handled separately.
+ hir::ItemKind::ForeignMod { .. } => {}
+ _ => {}
+ }
+}
+
+fn check_foreign_item(tcx: TyCtxt<'_>, item: &hir::ForeignItem<'_>) {
+ let def_id = item.def_id;
+
+ debug!(
+ ?item.def_id,
+ item.name = ? tcx.def_path_str(def_id.to_def_id())
+ );
+
+ match item.kind {
+ hir::ForeignItemKind::Fn(decl, ..) => {
+ check_item_fn(tcx, item.def_id, item.ident, item.span, decl)
+ }
+ hir::ForeignItemKind::Static(ty, ..) => check_item_type(tcx, item.def_id, ty.span, true),
+ hir::ForeignItemKind::Type => (),
+ }
+}
+
+fn check_trait_item(tcx: TyCtxt<'_>, trait_item: &hir::TraitItem<'_>) {
+ let def_id = trait_item.def_id;
+
+ let (method_sig, span) = match trait_item.kind {
+ hir::TraitItemKind::Fn(ref sig, _) => (Some(sig), trait_item.span),
+ hir::TraitItemKind::Type(_bounds, Some(ty)) => (None, ty.span),
+ _ => (None, trait_item.span),
+ };
+ check_object_unsafe_self_trait_by_name(tcx, trait_item);
+ check_associated_item(tcx, trait_item.def_id, span, method_sig);
+
+ let encl_trait_def_id = tcx.local_parent(def_id);
+ let encl_trait = tcx.hir().expect_item(encl_trait_def_id);
+ let encl_trait_def_id = encl_trait.def_id.to_def_id();
+ let fn_lang_item_name = if Some(encl_trait_def_id) == tcx.lang_items().fn_trait() {
+ Some("fn")
+ } else if Some(encl_trait_def_id) == tcx.lang_items().fn_mut_trait() {
+ Some("fn_mut")
+ } else {
+ None
+ };
+
+ if let (Some(fn_lang_item_name), "call") =
+ (fn_lang_item_name, trait_item.ident.name.to_ident_string().as_str())
+ {
+ // We are looking at the `call` function of the `fn` or `fn_mut` lang item.
+ // Do some rudimentary sanity checking to avoid an ICE later (issue #83471).
+ if let Some(hir::FnSig { decl, span, .. }) = method_sig {
+ if let [self_ty, _] = decl.inputs {
+ if !matches!(self_ty.kind, hir::TyKind::Rptr(_, _)) {
+ tcx.sess
+ .struct_span_err(
+ self_ty.span,
+ &format!(
+ "first argument of `call` in `{fn_lang_item_name}` lang item must be a reference",
+ ),
+ )
+ .emit();
+ }
+ } else {
+ tcx.sess
+ .struct_span_err(
+ *span,
+ &format!(
+ "`call` function in `{fn_lang_item_name}` lang item takes exactly two arguments",
+ ),
+ )
+ .emit();
+ }
+ } else {
+ tcx.sess
+ .struct_span_err(
+ trait_item.span,
+ &format!(
+ "`call` trait item in `{fn_lang_item_name}` lang item must be a function",
+ ),
+ )
+ .emit();
+ }
+ }
+}
+
+/// Require that the user writes where clauses on GATs for the implicit
+/// outlives bounds involving trait parameters in trait functions and
+/// lifetimes passed as GAT substs. See `self-outlives-lint` test.
+///
+/// We use the following trait as an example throughout this function:
+/// ```rust,ignore (this code fails due to this lint)
+/// trait IntoIter {
+/// type Iter<'a>: Iterator<Item = Self::Item<'a>>;
+/// type Item<'a>;
+/// fn into_iter<'a>(&'a self) -> Self::Iter<'a>;
+/// }
+/// ```
+fn check_gat_where_clauses(tcx: TyCtxt<'_>, associated_items: &[hir::TraitItemRef]) {
+ // Associates every GAT's def_id to a list of possibly missing bounds detected by this lint.
+ let mut required_bounds_by_item = FxHashMap::default();
+
+ // Loop over all GATs together, because if this lint suggests adding a where-clause bound
+ // to one GAT, it might then require us to an additional bound on another GAT.
+ // In our `IntoIter` example, we discover a missing `Self: 'a` bound on `Iter<'a>`, which
+ // then in a second loop adds a `Self: 'a` bound to `Item` due to the relationship between
+ // those GATs.
+ loop {
+ let mut should_continue = false;
+ for gat_item in associated_items {
+ let gat_def_id = gat_item.id.def_id;
+ let gat_item = tcx.associated_item(gat_def_id);
+ // If this item is not an assoc ty, or has no substs, then it's not a GAT
+ if gat_item.kind != ty::AssocKind::Type {
+ continue;
+ }
+ let gat_generics = tcx.generics_of(gat_def_id);
+ // FIXME(jackh726): we can also warn in the more general case
+ if gat_generics.params.is_empty() {
+ continue;
+ }
+
+ // Gather the bounds with which all other items inside of this trait constrain the GAT.
+ // This is calculated by taking the intersection of the bounds that each item
+ // constrains the GAT with individually.
+ let mut new_required_bounds: Option<FxHashSet<ty::Predicate<'_>>> = None;
+ for item in associated_items {
+ let item_def_id = item.id.def_id;
+ // Skip our own GAT, since it does not constrain itself at all.
+ if item_def_id == gat_def_id {
+ continue;
+ }
+
+ let item_hir_id = item.id.hir_id();
+ let param_env = tcx.param_env(item_def_id);
+
+ let item_required_bounds = match item.kind {
+ // In our example, this corresponds to `into_iter` method
+ hir::AssocItemKind::Fn { .. } => {
+ // For methods, we check the function signature's return type for any GATs
+ // to constrain. In the `into_iter` case, we see that the return type
+ // `Self::Iter<'a>` is a GAT we want to gather any potential missing bounds from.
+ let sig: ty::FnSig<'_> = tcx.liberate_late_bound_regions(
+ item_def_id.to_def_id(),
+ tcx.fn_sig(item_def_id),
+ );
+ gather_gat_bounds(
+ tcx,
+ param_env,
+ item_hir_id,
+ sig.output(),
+ // We also assume that all of the function signature's parameter types
+ // are well formed.
+ &sig.inputs().iter().copied().collect(),
+ gat_def_id,
+ gat_generics,
+ )
+ }
+ // In our example, this corresponds to the `Iter` and `Item` associated types
+ hir::AssocItemKind::Type => {
+ // If our associated item is a GAT with missing bounds, add them to
+ // the param-env here. This allows this GAT to propagate missing bounds
+ // to other GATs.
+ let param_env = augment_param_env(
+ tcx,
+ param_env,
+ required_bounds_by_item.get(&item_def_id),
+ );
+ gather_gat_bounds(
+ tcx,
+ param_env,
+ item_hir_id,
+ tcx.explicit_item_bounds(item_def_id)
+ .iter()
+ .copied()
+ .collect::<Vec<_>>(),
+ &FxHashSet::default(),
+ gat_def_id,
+ gat_generics,
+ )
+ }
+ hir::AssocItemKind::Const => None,
+ };
+
+ if let Some(item_required_bounds) = item_required_bounds {
+ // Take the intersection of the required bounds for this GAT, and
+ // the item_required_bounds which are the ones implied by just
+ // this item alone.
+ // This is why we use an Option<_>, since we need to distinguish
+ // the empty set of bounds from the _uninitialized_ set of bounds.
+ if let Some(new_required_bounds) = &mut new_required_bounds {
+ new_required_bounds.retain(|b| item_required_bounds.contains(b));
+ } else {
+ new_required_bounds = Some(item_required_bounds);
+ }
+ }
+ }
+
+ if let Some(new_required_bounds) = new_required_bounds {
+ let required_bounds = required_bounds_by_item.entry(gat_def_id).or_default();
+ if new_required_bounds.into_iter().any(|p| required_bounds.insert(p)) {
+ // Iterate until our required_bounds no longer change
+ // Since they changed here, we should continue the loop
+ should_continue = true;
+ }
+ }
+ }
+ // We know that this loop will eventually halt, since we only set `should_continue` if the
+ // `required_bounds` for this item grows. Since we are not creating any new region or type
+ // variables, the set of all region and type bounds that we could ever insert are limited
+ // by the number of unique types and regions we observe in a given item.
+ if !should_continue {
+ break;
+ }
+ }
+
+ for (gat_def_id, required_bounds) in required_bounds_by_item {
+ let gat_item_hir = tcx.hir().expect_trait_item(gat_def_id);
+ debug!(?required_bounds);
+ let param_env = tcx.param_env(gat_def_id);
+ let gat_hir = gat_item_hir.hir_id();
+
+ let mut unsatisfied_bounds: Vec<_> = required_bounds
+ .into_iter()
+ .filter(|clause| match clause.kind().skip_binder() {
+ ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(a, b)) => {
+ !region_known_to_outlive(tcx, gat_hir, param_env, &FxHashSet::default(), a, b)
+ }
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(a, b)) => {
+ !ty_known_to_outlive(tcx, gat_hir, param_env, &FxHashSet::default(), a, b)
+ }
+ _ => bug!("Unexpected PredicateKind"),
+ })
+ .map(|clause| clause.to_string())
+ .collect();
+
+ // We sort so that order is predictable
+ unsatisfied_bounds.sort();
+
+ if !unsatisfied_bounds.is_empty() {
+ let plural = pluralize!(unsatisfied_bounds.len());
+ let mut err = tcx.sess.struct_span_err(
+ gat_item_hir.span,
+ &format!("missing required bound{} on `{}`", plural, gat_item_hir.ident),
+ );
+
+ let suggestion = format!(
+ "{} {}",
+ gat_item_hir.generics.add_where_or_trailing_comma(),
+ unsatisfied_bounds.join(", "),
+ );
+ err.span_suggestion(
+ gat_item_hir.generics.tail_span_for_predicate_suggestion(),
+ &format!("add the required where clause{plural}"),
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+
+ let bound =
+ if unsatisfied_bounds.len() > 1 { "these bounds are" } else { "this bound is" };
+ err.note(&format!(
+ "{} currently required to ensure that impls have maximum flexibility",
+ bound
+ ));
+ err.note(
+ "we are soliciting feedback, see issue #87479 \
+ <https://github.com/rust-lang/rust/issues/87479> \
+ for more information",
+ );
+
+ err.emit();
+ }
+ }
+}
+
+/// Add a new set of predicates to the caller_bounds of an existing param_env.
+fn augment_param_env<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ new_predicates: Option<&FxHashSet<ty::Predicate<'tcx>>>,
+) -> ty::ParamEnv<'tcx> {
+ let Some(new_predicates) = new_predicates else {
+ return param_env;
+ };
+
+ if new_predicates.is_empty() {
+ return param_env;
+ }
+
+ let bounds =
+ tcx.mk_predicates(param_env.caller_bounds().iter().chain(new_predicates.iter().cloned()));
+ // FIXME(compiler-errors): Perhaps there is a case where we need to normalize this
+ // i.e. traits::normalize_param_env_or_error
+ ty::ParamEnv::new(bounds, param_env.reveal(), param_env.constness())
+}
+
+/// We use the following trait as an example throughout this function.
+/// Specifically, let's assume that `to_check` here is the return type
+/// of `into_iter`, and the GAT we are checking this for is `Iter`.
+/// ```rust,ignore (this code fails due to this lint)
+/// trait IntoIter {
+/// type Iter<'a>: Iterator<Item = Self::Item<'a>>;
+/// type Item<'a>;
+/// fn into_iter<'a>(&'a self) -> Self::Iter<'a>;
+/// }
+/// ```
+fn gather_gat_bounds<'tcx, T: TypeFoldable<'tcx>>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ item_hir: hir::HirId,
+ to_check: T,
+ wf_tys: &FxHashSet<Ty<'tcx>>,
+ gat_def_id: LocalDefId,
+ gat_generics: &'tcx ty::Generics,
+) -> Option<FxHashSet<ty::Predicate<'tcx>>> {
+ // The bounds we that we would require from `to_check`
+ let mut bounds = FxHashSet::default();
+
+ let (regions, types) = GATSubstCollector::visit(gat_def_id.to_def_id(), to_check);
+
+ // If both regions and types are empty, then this GAT isn't in the
+ // set of types we are checking, and we shouldn't try to do clause analysis
+ // (particularly, doing so would end up with an empty set of clauses,
+ // since the current method would require none, and we take the
+ // intersection of requirements of all methods)
+ if types.is_empty() && regions.is_empty() {
+ return None;
+ }
+
+ for (region_a, region_a_idx) in &regions {
+ // Ignore `'static` lifetimes for the purpose of this lint: it's
+ // because we know it outlives everything and so doesn't give meaningful
+ // clues
+ if let ty::ReStatic = **region_a {
+ continue;
+ }
+ // For each region argument (e.g., `'a` in our example), check for a
+ // relationship to the type arguments (e.g., `Self`). If there is an
+ // outlives relationship (`Self: 'a`), then we want to ensure that is
+ // reflected in a where clause on the GAT itself.
+ for (ty, ty_idx) in &types {
+ // In our example, requires that `Self: 'a`
+ if ty_known_to_outlive(tcx, item_hir, param_env, &wf_tys, *ty, *region_a) {
+ debug!(?ty_idx, ?region_a_idx);
+ debug!("required clause: {ty} must outlive {region_a}");
+ // Translate into the generic parameters of the GAT. In
+ // our example, the type was `Self`, which will also be
+ // `Self` in the GAT.
+ let ty_param = gat_generics.param_at(*ty_idx, tcx);
+ let ty_param = tcx
+ .mk_ty(ty::Param(ty::ParamTy { index: ty_param.index, name: ty_param.name }));
+ // Same for the region. In our example, 'a corresponds
+ // to the 'me parameter.
+ let region_param = gat_generics.param_at(*region_a_idx, tcx);
+ let region_param =
+ tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion {
+ def_id: region_param.def_id,
+ index: region_param.index,
+ name: region_param.name,
+ }));
+ // The predicate we expect to see. (In our example,
+ // `Self: 'me`.)
+ let clause =
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty_param, region_param));
+ let clause = tcx.mk_predicate(ty::Binder::dummy(clause));
+ bounds.insert(clause);
+ }
+ }
+
+ // For each region argument (e.g., `'a` in our example), also check for a
+ // relationship to the other region arguments. If there is an outlives
+ // relationship, then we want to ensure that is reflected in the where clause
+ // on the GAT itself.
+ for (region_b, region_b_idx) in &regions {
+ // Again, skip `'static` because it outlives everything. Also, we trivially
+ // know that a region outlives itself.
+ if ty::ReStatic == **region_b || region_a == region_b {
+ continue;
+ }
+ if region_known_to_outlive(tcx, item_hir, param_env, &wf_tys, *region_a, *region_b) {
+ debug!(?region_a_idx, ?region_b_idx);
+ debug!("required clause: {region_a} must outlive {region_b}");
+ // Translate into the generic parameters of the GAT.
+ let region_a_param = gat_generics.param_at(*region_a_idx, tcx);
+ let region_a_param =
+ tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion {
+ def_id: region_a_param.def_id,
+ index: region_a_param.index,
+ name: region_a_param.name,
+ }));
+ // Same for the region.
+ let region_b_param = gat_generics.param_at(*region_b_idx, tcx);
+ let region_b_param =
+ tcx.mk_region(ty::RegionKind::ReEarlyBound(ty::EarlyBoundRegion {
+ def_id: region_b_param.def_id,
+ index: region_b_param.index,
+ name: region_b_param.name,
+ }));
+ // The predicate we expect to see.
+ let clause = ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(
+ region_a_param,
+ region_b_param,
+ ));
+ let clause = tcx.mk_predicate(ty::Binder::dummy(clause));
+ bounds.insert(clause);
+ }
+ }
+ }
+
+ Some(bounds)
+}
+
+/// Given a known `param_env` and a set of well formed types, can we prove that
+/// `ty` outlives `region`.
+fn ty_known_to_outlive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ wf_tys: &FxHashSet<Ty<'tcx>>,
+ ty: Ty<'tcx>,
+ region: ty::Region<'tcx>,
+) -> bool {
+ resolve_regions_with_wf_tys(tcx, id, param_env, &wf_tys, |infcx, region_bound_pairs| {
+ let origin = infer::RelateParamBound(DUMMY_SP, ty, None);
+ let outlives = &mut TypeOutlives::new(infcx, tcx, region_bound_pairs, None, param_env);
+ outlives.type_must_outlive(origin, ty, region);
+ })
+}
+
+/// Given a known `param_env` and a set of well formed types, can we prove that
+/// `region_a` outlives `region_b`
+fn region_known_to_outlive<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ wf_tys: &FxHashSet<Ty<'tcx>>,
+ region_a: ty::Region<'tcx>,
+ region_b: ty::Region<'tcx>,
+) -> bool {
+ resolve_regions_with_wf_tys(tcx, id, param_env, &wf_tys, |mut infcx, _| {
+ use rustc_infer::infer::outlives::obligations::TypeOutlivesDelegate;
+ let origin = infer::RelateRegionParamBound(DUMMY_SP);
+ // `region_a: region_b` -> `region_b <= region_a`
+ infcx.push_sub_region_constraint(origin, region_b, region_a);
+ })
+}
+
+/// Given a known `param_env` and a set of well formed types, set up an
+/// `InferCtxt`, call the passed function (to e.g. set up region constraints
+/// to be tested), then resolve region and return errors
+fn resolve_regions_with_wf_tys<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ id: hir::HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ wf_tys: &FxHashSet<Ty<'tcx>>,
+ add_constraints: impl for<'a> FnOnce(&'a InferCtxt<'a, 'tcx>, &'a RegionBoundPairs<'tcx>),
+) -> bool {
+ // Unfortunately, we have to use a new `InferCtxt` each call, because
+ // region constraints get added and solved there and we need to test each
+ // call individually.
+ tcx.infer_ctxt().enter(|infcx| {
+ let mut outlives_environment = OutlivesEnvironment::new(param_env);
+ outlives_environment.add_implied_bounds(&infcx, wf_tys.clone(), id);
+ let region_bound_pairs = outlives_environment.region_bound_pairs();
+
+ add_constraints(&infcx, region_bound_pairs);
+
+ let errors = infcx.resolve_regions(&outlives_environment);
+
+ debug!(?errors, "errors");
+
+ // If we were able to prove that the type outlives the region without
+ // an error, it must be because of the implied or explicit bounds...
+ errors.is_empty()
+ })
+}
+
+/// TypeVisitor that looks for uses of GATs like
+/// `<P0 as Trait<P1..Pn>>::GAT<Pn..Pm>` and adds the arguments `P0..Pm` into
+/// the two vectors, `regions` and `types` (depending on their kind). For each
+/// parameter `Pi` also track the index `i`.
+struct GATSubstCollector<'tcx> {
+ gat: DefId,
+ // Which region appears and which parameter index its substituted for
+ regions: FxHashSet<(ty::Region<'tcx>, usize)>,
+ // Which params appears and which parameter index its substituted for
+ types: FxHashSet<(Ty<'tcx>, usize)>,
+}
+
+impl<'tcx> GATSubstCollector<'tcx> {
+ fn visit<T: TypeFoldable<'tcx>>(
+ gat: DefId,
+ t: T,
+ ) -> (FxHashSet<(ty::Region<'tcx>, usize)>, FxHashSet<(Ty<'tcx>, usize)>) {
+ let mut visitor =
+ GATSubstCollector { gat, regions: FxHashSet::default(), types: FxHashSet::default() };
+ t.visit_with(&mut visitor);
+ (visitor.regions, visitor.types)
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for GATSubstCollector<'tcx> {
+ type BreakTy = !;
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match t.kind() {
+ ty::Projection(p) if p.item_def_id == self.gat => {
+ for (idx, subst) in p.substs.iter().enumerate() {
+ match subst.unpack() {
+ GenericArgKind::Lifetime(lt) if !lt.is_late_bound() => {
+ self.regions.insert((lt, idx));
+ }
+ GenericArgKind::Type(t) => {
+ self.types.insert((t, idx));
+ }
+ _ => {}
+ }
+ }
+ }
+ _ => {}
+ }
+ t.super_visit_with(self)
+ }
+}
+
+fn could_be_self(trait_def_id: LocalDefId, ty: &hir::Ty<'_>) -> bool {
+ match ty.kind {
+ hir::TyKind::TraitObject([trait_ref], ..) => match trait_ref.trait_ref.path.segments {
+ [s] => s.res.and_then(|r| r.opt_def_id()) == Some(trait_def_id.to_def_id()),
+ _ => false,
+ },
+ _ => false,
+ }
+}
+
+/// Detect when an object unsafe trait is referring to itself in one of its associated items.
+/// When this is done, suggest using `Self` instead.
+fn check_object_unsafe_self_trait_by_name(tcx: TyCtxt<'_>, item: &hir::TraitItem<'_>) {
+ let (trait_name, trait_def_id) =
+ match tcx.hir().get_by_def_id(tcx.hir().get_parent_item(item.hir_id())) {
+ hir::Node::Item(item) => match item.kind {
+ hir::ItemKind::Trait(..) => (item.ident, item.def_id),
+ _ => return,
+ },
+ _ => return,
+ };
+ let mut trait_should_be_self = vec![];
+ match &item.kind {
+ hir::TraitItemKind::Const(ty, _) | hir::TraitItemKind::Type(_, Some(ty))
+ if could_be_self(trait_def_id, ty) =>
+ {
+ trait_should_be_self.push(ty.span)
+ }
+ hir::TraitItemKind::Fn(sig, _) => {
+ for ty in sig.decl.inputs {
+ if could_be_self(trait_def_id, ty) {
+ trait_should_be_self.push(ty.span);
+ }
+ }
+ match sig.decl.output {
+ hir::FnRetTy::Return(ty) if could_be_self(trait_def_id, ty) => {
+ trait_should_be_self.push(ty.span);
+ }
+ _ => {}
+ }
+ }
+ _ => {}
+ }
+ if !trait_should_be_self.is_empty() {
+ if tcx.object_safety_violations(trait_def_id).is_empty() {
+ return;
+ }
+ let sugg = trait_should_be_self.iter().map(|span| (*span, "Self".to_string())).collect();
+ tcx.sess
+ .struct_span_err(
+ trait_should_be_self,
+ "associated item referring to unboxed trait object for its own trait",
+ )
+ .span_label(trait_name.span, "in this trait")
+ .multipart_suggestion(
+ "you might have meant to use `Self` to refer to the implementing type",
+ sugg,
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ }
+}
+
+fn check_impl_item(tcx: TyCtxt<'_>, impl_item: &hir::ImplItem<'_>) {
+ let def_id = impl_item.def_id;
+
+ let (method_sig, span) = match impl_item.kind {
+ hir::ImplItemKind::Fn(ref sig, _) => (Some(sig), impl_item.span),
+ // Constrain binding and overflow error spans to `<Ty>` in `type foo = <Ty>`.
+ hir::ImplItemKind::TyAlias(ty) if ty.span != DUMMY_SP => (None, ty.span),
+ _ => (None, impl_item.span),
+ };
+
+ check_associated_item(tcx, def_id, span, method_sig);
+}
+
+fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) {
+ match param.kind {
+ // We currently only check wf of const params here.
+ hir::GenericParamKind::Lifetime { .. } | hir::GenericParamKind::Type { .. } => (),
+
+ // Const parameters are well formed if their type is structural match.
+ hir::GenericParamKind::Const { ty: hir_ty, default: _ } => {
+ let ty = tcx.type_of(tcx.hir().local_def_id(param.hir_id));
+
+ if tcx.features().adt_const_params {
+ if let Some(non_structural_match_ty) =
+ traits::search_for_adt_const_param_violation(param.span, tcx, ty)
+ {
+ // We use the same error code in both branches, because this is really the same
+ // issue: we just special-case the message for type parameters to make it
+ // clearer.
+ match non_structural_match_ty.kind() {
+ ty::Param(_) => {
+ // Const parameters may not have type parameters as their types,
+ // because we cannot be sure that the type parameter derives `PartialEq`
+ // and `Eq` (just implementing them is not enough for `structural_match`).
+ struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "`{ty}` is not guaranteed to `#[derive(PartialEq, Eq)]`, so may not be \
+ used as the type of a const parameter",
+ )
+ .span_label(
+ hir_ty.span,
+ format!("`{ty}` may not derive both `PartialEq` and `Eq`"),
+ )
+ .note(
+ "it is not currently possible to use a type parameter as the type of a \
+ const parameter",
+ )
+ .emit();
+ }
+ ty::Float(_) => {
+ struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "`{ty}` is forbidden as the type of a const generic parameter",
+ )
+ .note("floats do not derive `Eq` or `Ord`, which are required for const parameters")
+ .emit();
+ }
+ ty::FnPtr(_) => {
+ struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "using function pointers as const generic parameters is forbidden",
+ )
+ .emit();
+ }
+ ty::RawPtr(_) => {
+ struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "using raw pointers as const generic parameters is forbidden",
+ )
+ .emit();
+ }
+ _ => {
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ hir_ty.span,
+ E0741,
+ "`{}` must be annotated with `#[derive(PartialEq, Eq)]` to be used as \
+ the type of a const parameter",
+ non_structural_match_ty,
+ );
+
+ if ty == non_structural_match_ty {
+ diag.span_label(
+ hir_ty.span,
+ format!("`{ty}` doesn't derive both `PartialEq` and `Eq`"),
+ );
+ }
+
+ diag.emit();
+ }
+ }
+ }
+ } else {
+ let err_ty_str;
+ let mut is_ptr = true;
+
+ let err = match ty.kind() {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Error(_) => None,
+ ty::FnPtr(_) => Some("function pointers"),
+ ty::RawPtr(_) => Some("raw pointers"),
+ _ => {
+ is_ptr = false;
+ err_ty_str = format!("`{ty}`");
+ Some(err_ty_str.as_str())
+ }
+ };
+
+ if let Some(unsupported_type) = err {
+ if is_ptr {
+ tcx.sess.span_err(
+ hir_ty.span,
+ &format!(
+ "using {unsupported_type} as const generic parameters is forbidden",
+ ),
+ );
+ } else {
+ let mut err = tcx.sess.struct_span_err(
+ hir_ty.span,
+ &format!(
+ "{unsupported_type} is forbidden as the type of a const generic parameter",
+ ),
+ );
+ err.note("the only supported types are integers, `bool` and `char`");
+ if tcx.sess.is_nightly_build() {
+ err.help(
+ "more complex types are supported with `#![feature(adt_const_params)]`",
+ );
+ }
+ err.emit();
+ }
+ }
+ }
+ }
+ }
+}
+
+#[tracing::instrument(level = "debug", skip(tcx, span, sig_if_method))]
+fn check_associated_item(
+ tcx: TyCtxt<'_>,
+ item_id: LocalDefId,
+ span: Span,
+ sig_if_method: Option<&hir::FnSig<'_>>,
+) {
+ let loc = Some(WellFormedLoc::Ty(item_id));
+ enter_wf_checking_ctxt(tcx, span, item_id, |wfcx| {
+ let item = tcx.associated_item(item_id);
+
+ let (mut implied_bounds, self_ty) = match item.container {
+ ty::TraitContainer => (FxHashSet::default(), tcx.types.self_param),
+ ty::ImplContainer => {
+ let def_id = item.container_id(tcx);
+ (
+ impl_implied_bounds(tcx, wfcx.param_env, def_id.expect_local(), span),
+ tcx.type_of(def_id),
+ )
+ }
+ };
+
+ match item.kind {
+ ty::AssocKind::Const => {
+ let ty = tcx.type_of(item.def_id);
+ let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
+ wfcx.register_wf_obligation(span, loc, ty.into());
+ }
+ ty::AssocKind::Fn => {
+ let sig = tcx.fn_sig(item.def_id);
+ let hir_sig = sig_if_method.expect("bad signature for method");
+ check_fn_or_method(
+ wfcx,
+ item.ident(tcx).span,
+ sig,
+ hir_sig.decl,
+ item.def_id.expect_local(),
+ &mut implied_bounds,
+ );
+ check_method_receiver(wfcx, hir_sig, item, self_ty);
+ }
+ ty::AssocKind::Type => {
+ if let ty::AssocItemContainer::TraitContainer = item.container {
+ check_associated_type_bounds(wfcx, item, span)
+ }
+ if item.defaultness(tcx).has_value() {
+ let ty = tcx.type_of(item.def_id);
+ let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty);
+ wfcx.register_wf_obligation(span, loc, ty.into());
+ }
+ }
+ }
+
+ implied_bounds
+ })
+}
+
+fn item_adt_kind(kind: &ItemKind<'_>) -> Option<AdtKind> {
+ match kind {
+ ItemKind::Struct(..) => Some(AdtKind::Struct),
+ ItemKind::Union(..) => Some(AdtKind::Union),
+ ItemKind::Enum(..) => Some(AdtKind::Enum),
+ _ => None,
+ }
+}
+
+/// In a type definition, we check that to ensure that the types of the fields are well-formed.
+fn check_type_defn<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ item: &hir::Item<'tcx>,
+ all_sized: bool,
+ mut lookup_fields: F,
+) where
+ F: FnMut(&WfCheckingCtxt<'_, 'tcx>) -> Vec<AdtVariant<'tcx>>,
+{
+ enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| {
+ let variants = lookup_fields(wfcx);
+ let packed = tcx.adt_def(item.def_id).repr().packed();
+
+ for variant in &variants {
+ // All field types must be well-formed.
+ for field in &variant.fields {
+ wfcx.register_wf_obligation(
+ field.span,
+ Some(WellFormedLoc::Ty(field.def_id)),
+ field.ty.into(),
+ )
+ }
+
+ // For DST, or when drop needs to copy things around, all
+ // intermediate types must be sized.
+ let needs_drop_copy = || {
+ packed && {
+ let ty = variant.fields.last().unwrap().ty;
+ let ty = tcx.erase_regions(ty);
+ if ty.needs_infer() {
+ tcx.sess
+ .delay_span_bug(item.span, &format!("inference variables in {:?}", ty));
+ // Just treat unresolved type expression as if it needs drop.
+ true
+ } else {
+ ty.needs_drop(tcx, tcx.param_env(item.def_id))
+ }
+ }
+ };
+ // All fields (except for possibly the last) should be sized.
+ let all_sized = all_sized || variant.fields.is_empty() || needs_drop_copy();
+ let unsized_len = if all_sized { 0 } else { 1 };
+ for (idx, field) in
+ variant.fields[..variant.fields.len() - unsized_len].iter().enumerate()
+ {
+ let last = idx == variant.fields.len() - 1;
+ wfcx.register_bound(
+ traits::ObligationCause::new(
+ field.span,
+ wfcx.body_id,
+ traits::FieldSized {
+ adt_kind: match item_adt_kind(&item.kind) {
+ Some(i) => i,
+ None => bug!(),
+ },
+ span: field.span,
+ last,
+ },
+ ),
+ wfcx.param_env,
+ field.ty,
+ tcx.require_lang_item(LangItem::Sized, None),
+ );
+ }
+
+ // Explicit `enum` discriminant values must const-evaluate successfully.
+ if let Some(discr_def_id) = variant.explicit_discr {
+ let discr_substs = InternalSubsts::identity_for_item(tcx, discr_def_id.to_def_id());
+
+ let cause = traits::ObligationCause::new(
+ tcx.def_span(discr_def_id),
+ wfcx.body_id,
+ traits::MiscObligation,
+ );
+ wfcx.register_obligation(traits::Obligation::new(
+ cause,
+ wfcx.param_env,
+ ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(ty::Unevaluated::new(
+ ty::WithOptConstParam::unknown(discr_def_id.to_def_id()),
+ discr_substs,
+ )))
+ .to_predicate(tcx),
+ ));
+ }
+ }
+
+ check_where_clauses(wfcx, item.span, item.def_id);
+
+ // No implied bounds in a struct definition.
+ FxHashSet::default()
+ });
+}
+
+#[instrument(skip(tcx, item))]
+fn check_trait(tcx: TyCtxt<'_>, item: &hir::Item<'_>) {
+ debug!(?item.def_id);
+
+ let trait_def = tcx.trait_def(item.def_id);
+ if trait_def.is_marker
+ || matches!(trait_def.specialization_kind, TraitSpecializationKind::Marker)
+ {
+ for associated_def_id in &*tcx.associated_item_def_ids(item.def_id) {
+ struct_span_err!(
+ tcx.sess,
+ tcx.def_span(*associated_def_id),
+ E0714,
+ "marker traits cannot have associated items",
+ )
+ .emit();
+ }
+ }
+
+ enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| {
+ check_where_clauses(wfcx, item.span, item.def_id);
+
+ FxHashSet::default()
+ });
+
+ // Only check traits, don't check trait aliases
+ if let hir::ItemKind::Trait(_, _, _, _, items) = item.kind {
+ check_gat_where_clauses(tcx, items);
+ }
+}
+
+/// Checks all associated type defaults of trait `trait_def_id`.
+///
+/// Assuming the defaults are used, check that all predicates (bounds on the
+/// assoc type and where clauses on the trait) hold.
+fn check_associated_type_bounds(wfcx: &WfCheckingCtxt<'_, '_>, item: &ty::AssocItem, span: Span) {
+ let bounds = wfcx.tcx().explicit_item_bounds(item.def_id);
+
+ debug!("check_associated_type_bounds: bounds={:?}", bounds);
+ let wf_obligations = bounds.iter().flat_map(|&(bound, bound_span)| {
+ let normalized_bound = wfcx.normalize(span, None, bound);
+ traits::wf::predicate_obligations(
+ wfcx.infcx,
+ wfcx.param_env,
+ wfcx.body_id,
+ normalized_bound,
+ bound_span,
+ )
+ });
+
+ wfcx.register_obligations(wf_obligations);
+}
+
+fn check_item_fn(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+ ident: Ident,
+ span: Span,
+ decl: &hir::FnDecl<'_>,
+) {
+ enter_wf_checking_ctxt(tcx, span, def_id, |wfcx| {
+ let sig = tcx.fn_sig(def_id);
+ let mut implied_bounds = FxHashSet::default();
+ check_fn_or_method(wfcx, ident.span, sig, decl, def_id, &mut implied_bounds);
+ implied_bounds
+ })
+}
+
+fn check_item_type(tcx: TyCtxt<'_>, item_id: LocalDefId, ty_span: Span, allow_foreign_ty: bool) {
+ debug!("check_item_type: {:?}", item_id);
+
+ enter_wf_checking_ctxt(tcx, ty_span, item_id, |wfcx| {
+ let ty = tcx.type_of(item_id);
+ let item_ty = wfcx.normalize(ty_span, Some(WellFormedLoc::Ty(item_id)), ty);
+
+ let mut forbid_unsized = true;
+ if allow_foreign_ty {
+ let tail = tcx.struct_tail_erasing_lifetimes(item_ty, wfcx.param_env);
+ if let ty::Foreign(_) = tail.kind() {
+ forbid_unsized = false;
+ }
+ }
+
+ wfcx.register_wf_obligation(ty_span, Some(WellFormedLoc::Ty(item_id)), item_ty.into());
+ if forbid_unsized {
+ wfcx.register_bound(
+ traits::ObligationCause::new(ty_span, wfcx.body_id, traits::WellFormed(None)),
+ wfcx.param_env,
+ item_ty,
+ tcx.require_lang_item(LangItem::Sized, None),
+ );
+ }
+
+ // Ensure that the end result is `Sync` in a non-thread local `static`.
+ let should_check_for_sync = tcx.static_mutability(item_id.to_def_id())
+ == Some(hir::Mutability::Not)
+ && !tcx.is_foreign_item(item_id.to_def_id())
+ && !tcx.is_thread_local_static(item_id.to_def_id());
+
+ if should_check_for_sync {
+ wfcx.register_bound(
+ traits::ObligationCause::new(ty_span, wfcx.body_id, traits::SharedStatic),
+ wfcx.param_env,
+ item_ty,
+ tcx.require_lang_item(LangItem::Sync, Some(ty_span)),
+ );
+ }
+
+ // No implied bounds in a const, etc.
+ FxHashSet::default()
+ });
+}
+
+#[tracing::instrument(level = "debug", skip(tcx, ast_self_ty, ast_trait_ref))]
+fn check_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ item: &'tcx hir::Item<'tcx>,
+ ast_self_ty: &hir::Ty<'_>,
+ ast_trait_ref: &Option<hir::TraitRef<'_>>,
+ constness: hir::Constness,
+) {
+ enter_wf_checking_ctxt(tcx, item.span, item.def_id, |wfcx| {
+ match *ast_trait_ref {
+ Some(ref ast_trait_ref) => {
+ // `#[rustc_reservation_impl]` impls are not real impls and
+ // therefore don't need to be WF (the trait's `Self: Trait` predicate
+ // won't hold).
+ let trait_ref = tcx.impl_trait_ref(item.def_id).unwrap();
+ let trait_ref = wfcx.normalize(ast_trait_ref.path.span, None, trait_ref);
+ let trait_pred = ty::TraitPredicate {
+ trait_ref,
+ constness: match constness {
+ hir::Constness::Const => ty::BoundConstness::ConstIfConst,
+ hir::Constness::NotConst => ty::BoundConstness::NotConst,
+ },
+ polarity: ty::ImplPolarity::Positive,
+ };
+ let obligations = traits::wf::trait_obligations(
+ wfcx.infcx,
+ wfcx.param_env,
+ wfcx.body_id,
+ &trait_pred,
+ ast_trait_ref.path.span,
+ item,
+ );
+ debug!(?obligations);
+ wfcx.register_obligations(obligations);
+ }
+ None => {
+ let self_ty = tcx.type_of(item.def_id);
+ let self_ty = wfcx.normalize(item.span, None, self_ty);
+ wfcx.register_wf_obligation(
+ ast_self_ty.span,
+ Some(WellFormedLoc::Ty(item.hir_id().expect_owner())),
+ self_ty.into(),
+ );
+ }
+ }
+
+ check_where_clauses(wfcx, item.span, item.def_id);
+
+ impl_implied_bounds(tcx, wfcx.param_env, item.def_id, item.span)
+ });
+}
+
+/// Checks where-clauses and inline bounds that are declared on `def_id`.
+#[instrument(level = "debug", skip(wfcx))]
+fn check_where_clauses<'tcx>(wfcx: &WfCheckingCtxt<'_, 'tcx>, span: Span, def_id: LocalDefId) {
+ let infcx = wfcx.infcx;
+ let tcx = wfcx.tcx();
+
+ let predicates = tcx.bound_predicates_of(def_id.to_def_id());
+ let generics = tcx.generics_of(def_id);
+
+ let is_our_default = |def: &ty::GenericParamDef| match def.kind {
+ GenericParamDefKind::Type { has_default, .. }
+ | GenericParamDefKind::Const { has_default } => {
+ has_default && def.index >= generics.parent_count as u32
+ }
+ GenericParamDefKind::Lifetime => unreachable!(),
+ };
+
+ // Check that concrete defaults are well-formed. See test `type-check-defaults.rs`.
+ // For example, this forbids the declaration:
+ //
+ // struct Foo<T = Vec<[u32]>> { .. }
+ //
+ // Here, the default `Vec<[u32]>` is not WF because `[u32]: Sized` does not hold.
+ for param in &generics.params {
+ match param.kind {
+ GenericParamDefKind::Type { .. } => {
+ if is_our_default(param) {
+ let ty = tcx.type_of(param.def_id);
+ // Ignore dependent defaults -- that is, where the default of one type
+ // parameter includes another (e.g., `<T, U = T>`). In those cases, we can't
+ // be sure if it will error or not as user might always specify the other.
+ if !ty.needs_subst() {
+ wfcx.register_wf_obligation(tcx.def_span(param.def_id), None, ty.into());
+ }
+ }
+ }
+ GenericParamDefKind::Const { .. } => {
+ if is_our_default(param) {
+ // FIXME(const_generics_defaults): This
+ // is incorrect when dealing with unused substs, for example
+ // for `struct Foo<const N: usize, const M: usize = { 1 - 2 }>`
+ // we should eagerly error.
+ let default_ct = tcx.const_param_default(param.def_id);
+ if !default_ct.needs_subst() {
+ wfcx.register_wf_obligation(
+ tcx.def_span(param.def_id),
+ None,
+ default_ct.into(),
+ );
+ }
+ }
+ }
+ // Doesn't have defaults.
+ GenericParamDefKind::Lifetime => {}
+ }
+ }
+
+ // Check that trait predicates are WF when params are substituted by their defaults.
+ // We don't want to overly constrain the predicates that may be written but we want to
+ // catch cases where a default my never be applied such as `struct Foo<T: Copy = String>`.
+ // Therefore we check if a predicate which contains a single type param
+ // with a concrete default is WF with that default substituted.
+ // For more examples see tests `defaults-well-formedness.rs` and `type-check-defaults.rs`.
+ //
+ // First we build the defaulted substitution.
+ let substs = InternalSubsts::for_item(tcx, def_id.to_def_id(), |param, _| {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ // All regions are identity.
+ tcx.mk_param_from_def(param)
+ }
+
+ GenericParamDefKind::Type { .. } => {
+ // If the param has a default, ...
+ if is_our_default(param) {
+ let default_ty = tcx.type_of(param.def_id);
+ // ... and it's not a dependent default, ...
+ if !default_ty.needs_subst() {
+ // ... then substitute it with the default.
+ return default_ty.into();
+ }
+ }
+
+ tcx.mk_param_from_def(param)
+ }
+ GenericParamDefKind::Const { .. } => {
+ // If the param has a default, ...
+ if is_our_default(param) {
+ let default_ct = tcx.const_param_default(param.def_id);
+ // ... and it's not a dependent default, ...
+ if !default_ct.needs_subst() {
+ // ... then substitute it with the default.
+ return default_ct.into();
+ }
+ }
+
+ tcx.mk_param_from_def(param)
+ }
+ }
+ });
+
+ // Now we build the substituted predicates.
+ let default_obligations = predicates
+ .0
+ .predicates
+ .iter()
+ .flat_map(|&(pred, sp)| {
+ #[derive(Default)]
+ struct CountParams {
+ params: FxHashSet<u32>,
+ }
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for CountParams {
+ type BreakTy = ();
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::Param(param) = t.kind() {
+ self.params.insert(param.index);
+ }
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, _: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ ControlFlow::BREAK
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ConstKind::Param(param) = c.kind() {
+ self.params.insert(param.index);
+ }
+ c.super_visit_with(self)
+ }
+ }
+ let mut param_count = CountParams::default();
+ let has_region = pred.visit_with(&mut param_count).is_break();
+ let substituted_pred = predicates.rebind(pred).subst(tcx, substs);
+ // Don't check non-defaulted params, dependent defaults (including lifetimes)
+ // or preds with multiple params.
+ if substituted_pred.has_param_types_or_consts()
+ || param_count.params.len() > 1
+ || has_region
+ {
+ None
+ } else if predicates.0.predicates.iter().any(|&(p, _)| p == substituted_pred) {
+ // Avoid duplication of predicates that contain no parameters, for example.
+ None
+ } else {
+ Some((substituted_pred, sp))
+ }
+ })
+ .map(|(pred, sp)| {
+ // Convert each of those into an obligation. So if you have
+ // something like `struct Foo<T: Copy = String>`, we would
+ // take that predicate `T: Copy`, substitute to `String: Copy`
+ // (actually that happens in the previous `flat_map` call),
+ // and then try to prove it (in this case, we'll fail).
+ //
+ // Note the subtle difference from how we handle `predicates`
+ // below: there, we are not trying to prove those predicates
+ // to be *true* but merely *well-formed*.
+ let pred = wfcx.normalize(sp, None, pred);
+ let cause = traits::ObligationCause::new(
+ sp,
+ wfcx.body_id,
+ traits::ItemObligation(def_id.to_def_id()),
+ );
+ traits::Obligation::new(cause, wfcx.param_env, pred)
+ });
+
+ let predicates = predicates.0.instantiate_identity(tcx);
+
+ let predicates = wfcx.normalize(span, None, predicates);
+
+ debug!(?predicates.predicates);
+ assert_eq!(predicates.predicates.len(), predicates.spans.len());
+ let wf_obligations =
+ iter::zip(&predicates.predicates, &predicates.spans).flat_map(|(&p, &sp)| {
+ traits::wf::predicate_obligations(infcx, wfcx.param_env, wfcx.body_id, p, sp)
+ });
+
+ let obligations: Vec<_> = wf_obligations.chain(default_obligations).collect();
+ wfcx.register_obligations(obligations);
+}
+
+#[tracing::instrument(level = "debug", skip(wfcx, span, hir_decl))]
+fn check_fn_or_method<'tcx>(
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ span: Span,
+ sig: ty::PolyFnSig<'tcx>,
+ hir_decl: &hir::FnDecl<'_>,
+ def_id: LocalDefId,
+ implied_bounds: &mut FxHashSet<Ty<'tcx>>,
+) {
+ let tcx = wfcx.tcx();
+ let sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), sig);
+
+ // Normalize the input and output types one at a time, using a different
+ // `WellFormedLoc` for each. We cannot call `normalize_associated_types`
+ // on the entire `FnSig`, since this would use the same `WellFormedLoc`
+ // for each type, preventing the HIR wf check from generating
+ // a nice error message.
+ let ty::FnSig { mut inputs_and_output, c_variadic, unsafety, abi } = sig;
+ inputs_and_output = tcx.mk_type_list(inputs_and_output.iter().enumerate().map(|(i, ty)| {
+ wfcx.normalize(
+ span,
+ Some(WellFormedLoc::Param {
+ function: def_id,
+ // Note that the `param_idx` of the output type is
+ // one greater than the index of the last input type.
+ param_idx: i.try_into().unwrap(),
+ }),
+ ty,
+ )
+ }));
+ // Manually call `normalize_associated_types_in` on the other types
+ // in `FnSig`. This ensures that if the types of these fields
+ // ever change to include projections, we will start normalizing
+ // them automatically.
+ let sig = ty::FnSig {
+ inputs_and_output,
+ c_variadic: wfcx.normalize(span, None, c_variadic),
+ unsafety: wfcx.normalize(span, None, unsafety),
+ abi: wfcx.normalize(span, None, abi),
+ };
+
+ for (i, (&input_ty, ty)) in iter::zip(sig.inputs(), hir_decl.inputs).enumerate() {
+ wfcx.register_wf_obligation(
+ ty.span,
+ Some(WellFormedLoc::Param { function: def_id, param_idx: i.try_into().unwrap() }),
+ input_ty.into(),
+ );
+ }
+
+ implied_bounds.extend(sig.inputs());
+
+ wfcx.register_wf_obligation(hir_decl.output.span(), None, sig.output().into());
+
+ // FIXME(#27579) return types should not be implied bounds
+ implied_bounds.insert(sig.output());
+
+ debug!(?implied_bounds);
+
+ check_where_clauses(wfcx, span, def_id);
+}
+
+const HELP_FOR_SELF_TYPE: &str = "consider changing to `self`, `&self`, `&mut self`, `self: Box<Self>`, \
+ `self: Rc<Self>`, `self: Arc<Self>`, or `self: Pin<P>` (where P is one \
+ of the previous types except `Self`)";
+
+#[tracing::instrument(level = "debug", skip(wfcx))]
+fn check_method_receiver<'tcx>(
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ fn_sig: &hir::FnSig<'_>,
+ method: &ty::AssocItem,
+ self_ty: Ty<'tcx>,
+) {
+ let tcx = wfcx.tcx();
+
+ if !method.fn_has_self_parameter {
+ return;
+ }
+
+ let span = fn_sig.decl.inputs[0].span;
+
+ let sig = tcx.fn_sig(method.def_id);
+ let sig = tcx.liberate_late_bound_regions(method.def_id, sig);
+ let sig = wfcx.normalize(span, None, sig);
+
+ debug!("check_method_receiver: sig={:?}", sig);
+
+ let self_ty = wfcx.normalize(span, None, self_ty);
+
+ let receiver_ty = sig.inputs()[0];
+ let receiver_ty = wfcx.normalize(span, None, receiver_ty);
+
+ if tcx.features().arbitrary_self_types {
+ if !receiver_is_valid(wfcx, span, receiver_ty, self_ty, true) {
+ // Report error; `arbitrary_self_types` was enabled.
+ e0307(tcx, span, receiver_ty);
+ }
+ } else {
+ if !receiver_is_valid(wfcx, span, receiver_ty, self_ty, false) {
+ if receiver_is_valid(wfcx, span, receiver_ty, self_ty, true) {
+ // Report error; would have worked with `arbitrary_self_types`.
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::arbitrary_self_types,
+ span,
+ &format!(
+ "`{receiver_ty}` cannot be used as the type of `self` without \
+ the `arbitrary_self_types` feature",
+ ),
+ )
+ .help(HELP_FOR_SELF_TYPE)
+ .emit();
+ } else {
+ // Report error; would not have worked with `arbitrary_self_types`.
+ e0307(tcx, span, receiver_ty);
+ }
+ }
+ }
+}
+
+fn e0307<'tcx>(tcx: TyCtxt<'tcx>, span: Span, receiver_ty: Ty<'_>) {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ span,
+ E0307,
+ "invalid `self` parameter type: {receiver_ty}"
+ )
+ .note("type of `self` must be `Self` or a type that dereferences to it")
+ .help(HELP_FOR_SELF_TYPE)
+ .emit();
+}
+
+/// Returns whether `receiver_ty` would be considered a valid receiver type for `self_ty`. If
+/// `arbitrary_self_types` is enabled, `receiver_ty` must transitively deref to `self_ty`, possibly
+/// through a `*const/mut T` raw pointer. If the feature is not enabled, the requirements are more
+/// strict: `receiver_ty` must implement `Receiver` and directly implement
+/// `Deref<Target = self_ty>`.
+///
+/// N.B., there are cases this function returns `true` but causes an error to be emitted,
+/// particularly when `receiver_ty` derefs to a type that is the same as `self_ty` but has the
+/// wrong lifetime. Be careful of this if you are calling this function speculatively.
+fn receiver_is_valid<'tcx>(
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ span: Span,
+ receiver_ty: Ty<'tcx>,
+ self_ty: Ty<'tcx>,
+ arbitrary_self_types_enabled: bool,
+) -> bool {
+ let infcx = wfcx.infcx;
+ let tcx = wfcx.tcx();
+ let cause =
+ ObligationCause::new(span, wfcx.body_id, traits::ObligationCauseCode::MethodReceiver);
+
+ let can_eq_self = |ty| infcx.can_eq(wfcx.param_env, self_ty, ty).is_ok();
+
+ // `self: Self` is always valid.
+ if can_eq_self(receiver_ty) {
+ if let Err(err) = wfcx.equate_types(&cause, wfcx.param_env, self_ty, receiver_ty) {
+ infcx.report_mismatched_types(&cause, self_ty, receiver_ty, err).emit();
+ }
+ return true;
+ }
+
+ let mut autoderef =
+ Autoderef::new(infcx, wfcx.param_env, wfcx.body_id, span, receiver_ty, span);
+
+ // The `arbitrary_self_types` feature allows raw pointer receivers like `self: *const Self`.
+ if arbitrary_self_types_enabled {
+ autoderef = autoderef.include_raw_pointers();
+ }
+
+ // The first type is `receiver_ty`, which we know its not equal to `self_ty`; skip it.
+ autoderef.next();
+
+ let receiver_trait_def_id = tcx.require_lang_item(LangItem::Receiver, None);
+
+ // Keep dereferencing `receiver_ty` until we get to `self_ty`.
+ loop {
+ if let Some((potential_self_ty, _)) = autoderef.next() {
+ debug!(
+ "receiver_is_valid: potential self type `{:?}` to match `{:?}`",
+ potential_self_ty, self_ty
+ );
+
+ if can_eq_self(potential_self_ty) {
+ wfcx.register_obligations(autoderef.into_obligations());
+
+ if let Err(err) =
+ wfcx.equate_types(&cause, wfcx.param_env, self_ty, potential_self_ty)
+ {
+ infcx.report_mismatched_types(&cause, self_ty, potential_self_ty, err).emit();
+ }
+
+ break;
+ } else {
+ // Without `feature(arbitrary_self_types)`, we require that each step in the
+ // deref chain implement `receiver`
+ if !arbitrary_self_types_enabled
+ && !receiver_is_implemented(
+ wfcx,
+ receiver_trait_def_id,
+ cause.clone(),
+ potential_self_ty,
+ )
+ {
+ return false;
+ }
+ }
+ } else {
+ debug!("receiver_is_valid: type `{:?}` does not deref to `{:?}`", receiver_ty, self_ty);
+ // If the receiver already has errors reported due to it, consider it valid to avoid
+ // unnecessary errors (#58712).
+ return receiver_ty.references_error();
+ }
+ }
+
+ // Without `feature(arbitrary_self_types)`, we require that `receiver_ty` implements `Receiver`.
+ if !arbitrary_self_types_enabled
+ && !receiver_is_implemented(wfcx, receiver_trait_def_id, cause.clone(), receiver_ty)
+ {
+ return false;
+ }
+
+ true
+}
+
+fn receiver_is_implemented<'tcx>(
+ wfcx: &WfCheckingCtxt<'_, 'tcx>,
+ receiver_trait_def_id: DefId,
+ cause: ObligationCause<'tcx>,
+ receiver_ty: Ty<'tcx>,
+) -> bool {
+ let tcx = wfcx.tcx();
+ let trait_ref = ty::Binder::dummy(ty::TraitRef {
+ def_id: receiver_trait_def_id,
+ substs: tcx.mk_substs_trait(receiver_ty, &[]),
+ });
+
+ let obligation =
+ traits::Obligation::new(cause, wfcx.param_env, trait_ref.without_const().to_predicate(tcx));
+
+ if wfcx.infcx.predicate_must_hold_modulo_regions(&obligation) {
+ true
+ } else {
+ debug!(
+ "receiver_is_implemented: type `{:?}` does not implement `Receiver` trait",
+ receiver_ty
+ );
+ false
+ }
+}
+
+fn check_variances_for_type_defn<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ item: &hir::Item<'tcx>,
+ hir_generics: &hir::Generics<'_>,
+) {
+ let ty = tcx.type_of(item.def_id);
+ if tcx.has_error_field(ty) {
+ return;
+ }
+
+ let ty_predicates = tcx.predicates_of(item.def_id);
+ assert_eq!(ty_predicates.parent, None);
+ let variances = tcx.variances_of(item.def_id);
+
+ let mut constrained_parameters: FxHashSet<_> = variances
+ .iter()
+ .enumerate()
+ .filter(|&(_, &variance)| variance != ty::Bivariant)
+ .map(|(index, _)| Parameter(index as u32))
+ .collect();
+
+ identify_constrained_generic_params(tcx, ty_predicates, None, &mut constrained_parameters);
+
+ // Lazily calculated because it is only needed in case of an error.
+ let explicitly_bounded_params = LazyCell::new(|| {
+ let icx = crate::collect::ItemCtxt::new(tcx, item.def_id.to_def_id());
+ hir_generics
+ .predicates
+ .iter()
+ .filter_map(|predicate| match predicate {
+ hir::WherePredicate::BoundPredicate(predicate) => {
+ match icx.to_ty(predicate.bounded_ty).kind() {
+ ty::Param(data) => Some(Parameter(data.index)),
+ _ => None,
+ }
+ }
+ _ => None,
+ })
+ .collect::<FxHashSet<_>>()
+ });
+
+ for (index, _) in variances.iter().enumerate() {
+ let parameter = Parameter(index as u32);
+
+ if constrained_parameters.contains(&parameter) {
+ continue;
+ }
+
+ let param = &hir_generics.params[index];
+
+ match param.name {
+ hir::ParamName::Error => {}
+ _ => {
+ let has_explicit_bounds = explicitly_bounded_params.contains(&parameter);
+ report_bivariance(tcx, param, has_explicit_bounds);
+ }
+ }
+ }
+}
+
+fn report_bivariance(
+ tcx: TyCtxt<'_>,
+ param: &rustc_hir::GenericParam<'_>,
+ has_explicit_bounds: bool,
+) -> ErrorGuaranteed {
+ let span = param.span;
+ let param_name = param.name.ident().name;
+ let mut err = error_392(tcx, span, param_name);
+
+ let suggested_marker_id = tcx.lang_items().phantom_data();
+ // Help is available only in presence of lang items.
+ let msg = if let Some(def_id) = suggested_marker_id {
+ format!(
+ "consider removing `{}`, referring to it in a field, or using a marker such as `{}`",
+ param_name,
+ tcx.def_path_str(def_id),
+ )
+ } else {
+ format!("consider removing `{param_name}` or referring to it in a field")
+ };
+ err.help(&msg);
+
+ if matches!(param.kind, hir::GenericParamKind::Type { .. }) && !has_explicit_bounds {
+ err.help(&format!(
+ "if you intended `{0}` to be a const parameter, use `const {0}: usize` instead",
+ param_name
+ ));
+ }
+ err.emit()
+}
+
+impl<'tcx> WfCheckingCtxt<'_, 'tcx> {
+ /// Feature gates RFC 2056 -- trivial bounds, checking for global bounds that
+ /// aren't true.
+ fn check_false_global_bounds(&mut self) {
+ let tcx = self.ocx.infcx.tcx;
+ let mut span = self.span;
+ let empty_env = ty::ParamEnv::empty();
+
+ let def_id = tcx.hir().local_def_id(self.body_id);
+ let predicates_with_span = tcx.predicates_of(def_id).predicates.iter().copied();
+ // Check elaborated bounds.
+ let implied_obligations = traits::elaborate_predicates_with_span(tcx, predicates_with_span);
+
+ for obligation in implied_obligations {
+ // We lower empty bounds like `Vec<dyn Copy>:` as
+ // `WellFormed(Vec<dyn Copy>)`, which will later get checked by
+ // regular WF checking
+ if let ty::PredicateKind::WellFormed(..) = obligation.predicate.kind().skip_binder() {
+ continue;
+ }
+ let pred = obligation.predicate;
+ // Match the existing behavior.
+ if pred.is_global() && !pred.has_late_bound_regions() {
+ let pred = self.normalize(span, None, pred);
+ let hir_node = tcx.hir().find(self.body_id);
+
+ // only use the span of the predicate clause (#90869)
+
+ if let Some(hir::Generics { predicates, .. }) =
+ hir_node.and_then(|node| node.generics())
+ {
+ let obligation_span = obligation.cause.span();
+
+ span = predicates
+ .iter()
+ // There seems to be no better way to find out which predicate we are in
+ .find(|pred| pred.span().contains(obligation_span))
+ .map(|pred| pred.span())
+ .unwrap_or(obligation_span);
+ }
+
+ let obligation = traits::Obligation::new(
+ traits::ObligationCause::new(span, self.body_id, traits::TrivialBound),
+ empty_env,
+ pred,
+ );
+ self.ocx.register_obligation(obligation);
+ }
+ }
+ }
+}
+
+fn check_mod_type_wf(tcx: TyCtxt<'_>, module: LocalDefId) {
+ let items = tcx.hir_module_items(module);
+ items.par_items(|item| tcx.ensure().check_well_formed(item.def_id));
+ items.par_impl_items(|item| tcx.ensure().check_well_formed(item.def_id));
+ items.par_trait_items(|item| tcx.ensure().check_well_formed(item.def_id));
+ items.par_foreign_items(|item| tcx.ensure().check_well_formed(item.def_id));
+}
+
+///////////////////////////////////////////////////////////////////////////
+// ADT
+
+// FIXME(eddyb) replace this with getting fields/discriminants through `ty::AdtDef`.
+struct AdtVariant<'tcx> {
+ /// Types of fields in the variant, that must be well-formed.
+ fields: Vec<AdtField<'tcx>>,
+
+ /// Explicit discriminant of this variant (e.g. `A = 123`),
+ /// that must evaluate to a constant value.
+ explicit_discr: Option<LocalDefId>,
+}
+
+struct AdtField<'tcx> {
+ ty: Ty<'tcx>,
+ def_id: LocalDefId,
+ span: Span,
+}
+
+impl<'a, 'tcx> WfCheckingCtxt<'a, 'tcx> {
+ // FIXME(eddyb) replace this with getting fields through `ty::AdtDef`.
+ fn non_enum_variant(&self, struct_def: &hir::VariantData<'_>) -> AdtVariant<'tcx> {
+ let fields = struct_def
+ .fields()
+ .iter()
+ .map(|field| {
+ let def_id = self.tcx().hir().local_def_id(field.hir_id);
+ let field_ty = self.tcx().type_of(def_id);
+ let field_ty = self.normalize(field.ty.span, None, field_ty);
+ debug!("non_enum_variant: type of field {:?} is {:?}", field, field_ty);
+ AdtField { ty: field_ty, span: field.ty.span, def_id }
+ })
+ .collect();
+ AdtVariant { fields, explicit_discr: None }
+ }
+
+ fn enum_variants(&self, enum_def: &hir::EnumDef<'_>) -> Vec<AdtVariant<'tcx>> {
+ enum_def
+ .variants
+ .iter()
+ .map(|variant| AdtVariant {
+ fields: self.non_enum_variant(&variant.data).fields,
+ explicit_discr: variant
+ .disr_expr
+ .map(|explicit_discr| self.tcx().hir().local_def_id(explicit_discr.hir_id)),
+ })
+ .collect()
+ }
+}
+
+pub fn impl_implied_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ impl_def_id: LocalDefId,
+ span: Span,
+) -> FxHashSet<Ty<'tcx>> {
+ // We completely ignore any obligations caused by normalizing the types
+ // we assume to be well formed. Considering that the user of the implied
+ // bounds will also normalize them, we leave it to them to emit errors
+ // which should result in better causes and spans.
+ tcx.infer_ctxt().enter(|infcx| {
+ let cause = ObligationCause::misc(span, tcx.hir().local_def_id_to_hir_id(impl_def_id));
+ match tcx.impl_trait_ref(impl_def_id) {
+ Some(trait_ref) => {
+ // Trait impl: take implied bounds from all types that
+ // appear in the trait reference.
+ match infcx.at(&cause, param_env).normalize(trait_ref) {
+ Ok(Normalized { value, obligations: _ }) => value.substs.types().collect(),
+ Err(NoSolution) => FxHashSet::default(),
+ }
+ }
+
+ None => {
+ // Inherent impl: take implied bounds from the `self` type.
+ let self_ty = tcx.type_of(impl_def_id);
+ match infcx.at(&cause, param_env).normalize(self_ty) {
+ Ok(Normalized { value, obligations: _ }) => FxHashSet::from_iter([value]),
+ Err(NoSolution) => FxHashSet::default(),
+ }
+ }
+ }
+ })
+}
+
+fn error_392(
+ tcx: TyCtxt<'_>,
+ span: Span,
+ param_name: Symbol,
+) -> DiagnosticBuilder<'_, ErrorGuaranteed> {
+ let mut err = struct_span_err!(tcx.sess, span, E0392, "parameter `{param_name}` is never used");
+ err.span_label(span, "unused parameter");
+ err
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { check_mod_type_wf, check_well_formed, ..*providers };
+}
diff --git a/compiler/rustc_typeck/src/check/writeback.rs b/compiler/rustc_typeck/src/check/writeback.rs
new file mode 100644
index 000000000..f549807c3
--- /dev/null
+++ b/compiler/rustc_typeck/src/check/writeback.rs
@@ -0,0 +1,783 @@
+// Type resolution: the phase that finds all the types in the AST with
+// unresolved type variables and replaces "ty_var" types with their
+// substitutions.
+
+use crate::check::FnCtxt;
+
+use hir::def_id::LocalDefId;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_infer::infer::error_reporting::TypeAnnotationNeeded::E0282;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::hir::place::Place as HirPlace;
+use rustc_middle::mir::FakeReadCause;
+use rustc_middle::ty::adjustment::{Adjust, Adjustment, PointerCast};
+use rustc_middle::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable};
+use rustc_middle::ty::{self, ClosureSizeProfileData, Ty, TyCtxt};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+use std::mem;
+use std::ops::ControlFlow;
+
+///////////////////////////////////////////////////////////////////////////
+// Entry point
+
+// During type inference, partially inferred types are
+// represented using Type variables (ty::Infer). These don't appear in
+// the final TypeckResults since all of the types should have been
+// inferred once typeck is done.
+// When type inference is running however, having to update the typeck
+// typeck results every time a new type is inferred would be unreasonably slow,
+// so instead all of the replacement happens at the end in
+// resolve_type_vars_in_body, which creates a new TypeTables which
+// doesn't contain any inference types.
+impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
+ pub fn resolve_type_vars_in_body(
+ &self,
+ body: &'tcx hir::Body<'tcx>,
+ ) -> &'tcx ty::TypeckResults<'tcx> {
+ let item_id = self.tcx.hir().body_owner(body.id());
+ let item_def_id = self.tcx.hir().local_def_id(item_id);
+
+ // This attribute causes us to dump some writeback information
+ // in the form of errors, which is used for unit tests.
+ let rustc_dump_user_substs =
+ self.tcx.has_attr(item_def_id.to_def_id(), sym::rustc_dump_user_substs);
+
+ let mut wbcx = WritebackCx::new(self, body, rustc_dump_user_substs);
+ for param in body.params {
+ wbcx.visit_node_id(param.pat.span, param.hir_id);
+ }
+ // Type only exists for constants and statics, not functions.
+ match self.tcx.hir().body_owner_kind(item_def_id) {
+ hir::BodyOwnerKind::Const | hir::BodyOwnerKind::Static(_) => {
+ wbcx.visit_node_id(body.value.span, item_id);
+ }
+ hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => (),
+ }
+ wbcx.visit_body(body);
+ wbcx.visit_min_capture_map();
+ wbcx.eval_closure_size();
+ wbcx.visit_fake_reads_map();
+ wbcx.visit_closures();
+ wbcx.visit_liberated_fn_sigs();
+ wbcx.visit_fru_field_types();
+ wbcx.visit_opaque_types();
+ wbcx.visit_coercion_casts();
+ wbcx.visit_user_provided_tys();
+ wbcx.visit_user_provided_sigs();
+ wbcx.visit_generator_interior_types();
+
+ wbcx.typeck_results.rvalue_scopes =
+ mem::take(&mut self.typeck_results.borrow_mut().rvalue_scopes);
+
+ let used_trait_imports =
+ mem::take(&mut self.typeck_results.borrow_mut().used_trait_imports);
+ debug!("used_trait_imports({:?}) = {:?}", item_def_id, used_trait_imports);
+ wbcx.typeck_results.used_trait_imports = used_trait_imports;
+
+ wbcx.typeck_results.treat_byte_string_as_slice =
+ mem::take(&mut self.typeck_results.borrow_mut().treat_byte_string_as_slice);
+
+ if self.is_tainted_by_errors() {
+ // FIXME(eddyb) keep track of `ErrorGuaranteed` from where the error was emitted.
+ wbcx.typeck_results.tainted_by_errors =
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
+ }
+
+ debug!("writeback: typeck results for {:?} are {:#?}", item_def_id, wbcx.typeck_results);
+
+ self.tcx.arena.alloc(wbcx.typeck_results)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// The Writeback context. This visitor walks the HIR, checking the
+// fn-specific typeck results to find references to types or regions. It
+// resolves those regions to remove inference variables and writes the
+// final result back into the master typeck results in the tcx. Here and
+// there, it applies a few ad-hoc checks that were not convenient to
+// do elsewhere.
+
+struct WritebackCx<'cx, 'tcx> {
+ fcx: &'cx FnCtxt<'cx, 'tcx>,
+
+ typeck_results: ty::TypeckResults<'tcx>,
+
+ body: &'tcx hir::Body<'tcx>,
+
+ rustc_dump_user_substs: bool,
+}
+
+impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
+ fn new(
+ fcx: &'cx FnCtxt<'cx, 'tcx>,
+ body: &'tcx hir::Body<'tcx>,
+ rustc_dump_user_substs: bool,
+ ) -> WritebackCx<'cx, 'tcx> {
+ let owner = body.id().hir_id.owner;
+
+ WritebackCx {
+ fcx,
+ typeck_results: ty::TypeckResults::new(owner),
+ body,
+ rustc_dump_user_substs,
+ }
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.fcx.tcx
+ }
+
+ fn write_ty_to_typeck_results(&mut self, hir_id: hir::HirId, ty: Ty<'tcx>) {
+ debug!("write_ty_to_typeck_results({:?}, {:?})", hir_id, ty);
+ assert!(!ty.needs_infer() && !ty.has_placeholders() && !ty.has_free_regions());
+ self.typeck_results.node_types_mut().insert(hir_id, ty);
+ }
+
+ // Hacky hack: During type-checking, we treat *all* operators
+ // as potentially overloaded. But then, during writeback, if
+ // we observe that something like `a+b` is (known to be)
+ // operating on scalars, we clear the overload.
+ fn fix_scalar_builtin_expr(&mut self, e: &hir::Expr<'_>) {
+ match e.kind {
+ hir::ExprKind::Unary(hir::UnOp::Neg | hir::UnOp::Not, inner) => {
+ let inner_ty = self.fcx.node_ty(inner.hir_id);
+ let inner_ty = self.fcx.resolve_vars_if_possible(inner_ty);
+
+ if inner_ty.is_scalar() {
+ let mut typeck_results = self.fcx.typeck_results.borrow_mut();
+ typeck_results.type_dependent_defs_mut().remove(e.hir_id);
+ typeck_results.node_substs_mut().remove(e.hir_id);
+ }
+ }
+ hir::ExprKind::Binary(ref op, lhs, rhs) | hir::ExprKind::AssignOp(ref op, lhs, rhs) => {
+ let lhs_ty = self.fcx.node_ty(lhs.hir_id);
+ let lhs_ty = self.fcx.resolve_vars_if_possible(lhs_ty);
+
+ let rhs_ty = self.fcx.node_ty(rhs.hir_id);
+ let rhs_ty = self.fcx.resolve_vars_if_possible(rhs_ty);
+
+ if lhs_ty.is_scalar() && rhs_ty.is_scalar() {
+ let mut typeck_results = self.fcx.typeck_results.borrow_mut();
+ typeck_results.type_dependent_defs_mut().remove(e.hir_id);
+ typeck_results.node_substs_mut().remove(e.hir_id);
+
+ match e.kind {
+ hir::ExprKind::Binary(..) => {
+ if !op.node.is_by_value() {
+ let mut adjustments = typeck_results.adjustments_mut();
+ if let Some(a) = adjustments.get_mut(lhs.hir_id) {
+ a.pop();
+ }
+ if let Some(a) = adjustments.get_mut(rhs.hir_id) {
+ a.pop();
+ }
+ }
+ }
+ hir::ExprKind::AssignOp(..)
+ if let Some(a) = typeck_results.adjustments_mut().get_mut(lhs.hir_id) =>
+ {
+ a.pop();
+ }
+ _ => {}
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+
+ // Similar to operators, indexing is always assumed to be overloaded
+ // Here, correct cases where an indexing expression can be simplified
+ // to use builtin indexing because the index type is known to be
+ // usize-ish
+ fn fix_index_builtin_expr(&mut self, e: &hir::Expr<'_>) {
+ if let hir::ExprKind::Index(ref base, ref index) = e.kind {
+ let mut typeck_results = self.fcx.typeck_results.borrow_mut();
+
+ // All valid indexing looks like this; might encounter non-valid indexes at this point.
+ let base_ty = typeck_results
+ .expr_ty_adjusted_opt(base)
+ .map(|t| self.fcx.resolve_vars_if_possible(t).kind());
+ if base_ty.is_none() {
+ // When encountering `return [0][0]` outside of a `fn` body we can encounter a base
+ // that isn't in the type table. We assume more relevant errors have already been
+ // emitted, so we delay an ICE if none have. (#64638)
+ self.tcx().sess.delay_span_bug(e.span, &format!("bad base: `{:?}`", base));
+ }
+ if let Some(ty::Ref(_, base_ty, _)) = base_ty {
+ let index_ty = typeck_results.expr_ty_adjusted_opt(index).unwrap_or_else(|| {
+ // When encountering `return [0][0]` outside of a `fn` body we would attempt
+ // to access an nonexistent index. We assume that more relevant errors will
+ // already have been emitted, so we only gate on this with an ICE if no
+ // error has been emitted. (#64638)
+ self.fcx.tcx.ty_error_with_message(
+ e.span,
+ &format!("bad index {:?} for base: `{:?}`", index, base),
+ )
+ });
+ let index_ty = self.fcx.resolve_vars_if_possible(index_ty);
+
+ if base_ty.builtin_index().is_some() && index_ty == self.fcx.tcx.types.usize {
+ // Remove the method call record
+ typeck_results.type_dependent_defs_mut().remove(e.hir_id);
+ typeck_results.node_substs_mut().remove(e.hir_id);
+
+ if let Some(a) = typeck_results.adjustments_mut().get_mut(base.hir_id) {
+ // Discard the need for a mutable borrow
+
+ // Extra adjustment made when indexing causes a drop
+ // of size information - we need to get rid of it
+ // Since this is "after" the other adjustment to be
+ // discarded, we do an extra `pop()`
+ if let Some(Adjustment {
+ kind: Adjust::Pointer(PointerCast::Unsize), ..
+ }) = a.pop()
+ {
+ // So the borrow discard actually happens here
+ a.pop();
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Impl of Visitor for Resolver
+//
+// This is the master code which walks the AST. It delegates most of
+// the heavy lifting to the generic visit and resolve functions
+// below. In general, a function is made into a `visitor` if it must
+// traffic in node-ids or update typeck results in the type context etc.
+
+impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> {
+ fn visit_expr(&mut self, e: &'tcx hir::Expr<'tcx>) {
+ self.fix_scalar_builtin_expr(e);
+ self.fix_index_builtin_expr(e);
+
+ match e.kind {
+ hir::ExprKind::Closure(&hir::Closure { body, .. }) => {
+ let body = self.fcx.tcx.hir().body(body);
+ for param in body.params {
+ self.visit_node_id(e.span, param.hir_id);
+ }
+
+ self.visit_body(body);
+ }
+ hir::ExprKind::Struct(_, fields, _) => {
+ for field in fields {
+ self.visit_field_id(field.hir_id);
+ }
+ }
+ hir::ExprKind::Field(..) => {
+ self.visit_field_id(e.hir_id);
+ }
+ hir::ExprKind::ConstBlock(anon_const) => {
+ self.visit_node_id(e.span, anon_const.hir_id);
+
+ let body = self.tcx().hir().body(anon_const.body);
+ self.visit_body(body);
+ }
+ _ => {}
+ }
+
+ self.visit_node_id(e.span, e.hir_id);
+ intravisit::walk_expr(self, e);
+ }
+
+ fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) {
+ self.visit_node_id(b.span, b.hir_id);
+ intravisit::walk_block(self, b);
+ }
+
+ fn visit_pat(&mut self, p: &'tcx hir::Pat<'tcx>) {
+ match p.kind {
+ hir::PatKind::Binding(..) => {
+ let typeck_results = self.fcx.typeck_results.borrow();
+ if let Some(bm) =
+ typeck_results.extract_binding_mode(self.tcx().sess, p.hir_id, p.span)
+ {
+ self.typeck_results.pat_binding_modes_mut().insert(p.hir_id, bm);
+ }
+ }
+ hir::PatKind::Struct(_, fields, _) => {
+ for field in fields {
+ self.visit_field_id(field.hir_id);
+ }
+ }
+ _ => {}
+ };
+
+ self.visit_pat_adjustments(p.span, p.hir_id);
+
+ self.visit_node_id(p.span, p.hir_id);
+ intravisit::walk_pat(self, p);
+ }
+
+ fn visit_local(&mut self, l: &'tcx hir::Local<'tcx>) {
+ intravisit::walk_local(self, l);
+ let var_ty = self.fcx.local_ty(l.span, l.hir_id).decl_ty;
+ let var_ty = self.resolve(var_ty, &l.span);
+ self.write_ty_to_typeck_results(l.hir_id, var_ty);
+ }
+
+ fn visit_ty(&mut self, hir_ty: &'tcx hir::Ty<'tcx>) {
+ intravisit::walk_ty(self, hir_ty);
+ let ty = self.fcx.node_ty(hir_ty.hir_id);
+ let ty = self.resolve(ty, &hir_ty.span);
+ self.write_ty_to_typeck_results(hir_ty.hir_id, ty);
+ }
+
+ fn visit_infer(&mut self, inf: &'tcx hir::InferArg) {
+ intravisit::walk_inf(self, inf);
+ // Ignore cases where the inference is a const.
+ if let Some(ty) = self.fcx.node_ty_opt(inf.hir_id) {
+ let ty = self.resolve(ty, &inf.span);
+ self.write_ty_to_typeck_results(inf.hir_id, ty);
+ }
+ }
+}
+
+impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> {
+ fn eval_closure_size(&mut self) {
+ let mut res: FxHashMap<LocalDefId, ClosureSizeProfileData<'tcx>> = Default::default();
+ for (&closure_def_id, data) in self.fcx.typeck_results.borrow().closure_size_eval.iter() {
+ let closure_hir_id = self.tcx().hir().local_def_id_to_hir_id(closure_def_id);
+
+ let data = self.resolve(*data, &closure_hir_id);
+
+ res.insert(closure_def_id, data);
+ }
+
+ self.typeck_results.closure_size_eval = res;
+ }
+ fn visit_min_capture_map(&mut self) {
+ let mut min_captures_wb = ty::MinCaptureInformationMap::with_capacity_and_hasher(
+ self.fcx.typeck_results.borrow().closure_min_captures.len(),
+ Default::default(),
+ );
+ for (&closure_def_id, root_min_captures) in
+ self.fcx.typeck_results.borrow().closure_min_captures.iter()
+ {
+ let mut root_var_map_wb = ty::RootVariableMinCaptureList::with_capacity_and_hasher(
+ root_min_captures.len(),
+ Default::default(),
+ );
+ for (var_hir_id, min_list) in root_min_captures.iter() {
+ let min_list_wb = min_list
+ .iter()
+ .map(|captured_place| {
+ let locatable = captured_place.info.path_expr_id.unwrap_or_else(|| {
+ self.tcx().hir().local_def_id_to_hir_id(closure_def_id)
+ });
+
+ self.resolve(captured_place.clone(), &locatable)
+ })
+ .collect();
+ root_var_map_wb.insert(*var_hir_id, min_list_wb);
+ }
+ min_captures_wb.insert(closure_def_id, root_var_map_wb);
+ }
+
+ self.typeck_results.closure_min_captures = min_captures_wb;
+ }
+
+ fn visit_fake_reads_map(&mut self) {
+ let mut resolved_closure_fake_reads: FxHashMap<
+ LocalDefId,
+ Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>,
+ > = Default::default();
+ for (&closure_def_id, fake_reads) in
+ self.fcx.typeck_results.borrow().closure_fake_reads.iter()
+ {
+ let mut resolved_fake_reads = Vec::<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>::new();
+ for (place, cause, hir_id) in fake_reads.iter() {
+ let locatable = self.tcx().hir().local_def_id_to_hir_id(closure_def_id);
+
+ let resolved_fake_read = self.resolve(place.clone(), &locatable);
+ resolved_fake_reads.push((resolved_fake_read, *cause, *hir_id));
+ }
+ resolved_closure_fake_reads.insert(closure_def_id, resolved_fake_reads);
+ }
+ self.typeck_results.closure_fake_reads = resolved_closure_fake_reads;
+ }
+
+ fn visit_closures(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ let common_hir_owner = fcx_typeck_results.hir_owner;
+
+ for (id, origin) in fcx_typeck_results.closure_kind_origins().iter() {
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id: *id };
+ let place_span = origin.0;
+ let place = self.resolve(origin.1.clone(), &place_span);
+ self.typeck_results.closure_kind_origins_mut().insert(hir_id, (place_span, place));
+ }
+ }
+
+ fn visit_coercion_casts(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ let fcx_coercion_casts = fcx_typeck_results.coercion_casts();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+
+ for local_id in fcx_coercion_casts {
+ self.typeck_results.set_coercion_cast(*local_id);
+ }
+ }
+
+ fn visit_user_provided_tys(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ let common_hir_owner = fcx_typeck_results.hir_owner;
+
+ let mut errors_buffer = Vec::new();
+ for (&local_id, c_ty) in fcx_typeck_results.user_provided_types().iter() {
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
+
+ if cfg!(debug_assertions) && c_ty.needs_infer() {
+ span_bug!(
+ hir_id.to_span(self.fcx.tcx),
+ "writeback: `{:?}` has inference variables",
+ c_ty
+ );
+ };
+
+ self.typeck_results.user_provided_types_mut().insert(hir_id, *c_ty);
+
+ if let ty::UserType::TypeOf(_, user_substs) = c_ty.value {
+ if self.rustc_dump_user_substs {
+ // This is a unit-testing mechanism.
+ let span = self.tcx().hir().span(hir_id);
+ // We need to buffer the errors in order to guarantee a consistent
+ // order when emitting them.
+ let err = self
+ .tcx()
+ .sess
+ .struct_span_err(span, &format!("user substs: {:?}", user_substs));
+ err.buffer(&mut errors_buffer);
+ }
+ }
+ }
+
+ if !errors_buffer.is_empty() {
+ errors_buffer.sort_by_key(|diag| diag.span.primary_span());
+ for mut diag in errors_buffer.drain(..) {
+ self.tcx().sess.diagnostic().emit_diagnostic(&mut diag);
+ }
+ }
+ }
+
+ fn visit_user_provided_sigs(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+
+ for (&def_id, c_sig) in fcx_typeck_results.user_provided_sigs.iter() {
+ if cfg!(debug_assertions) && c_sig.needs_infer() {
+ span_bug!(
+ self.fcx.tcx.hir().span_if_local(def_id).unwrap(),
+ "writeback: `{:?}` has inference variables",
+ c_sig
+ );
+ };
+
+ self.typeck_results.user_provided_sigs.insert(def_id, *c_sig);
+ }
+ }
+
+ fn visit_generator_interior_types(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ self.typeck_results.generator_interior_types =
+ fcx_typeck_results.generator_interior_types.clone();
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn visit_opaque_types(&mut self) {
+ let opaque_types =
+ self.fcx.infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
+ for (opaque_type_key, decl) in opaque_types {
+ let hidden_type = match decl.origin {
+ hir::OpaqueTyOrigin::FnReturn(_) | hir::OpaqueTyOrigin::AsyncFn(_) => {
+ let ty = self.resolve(decl.hidden_type.ty, &decl.hidden_type.span);
+ struct RecursionChecker {
+ def_id: LocalDefId,
+ }
+ impl<'tcx> ty::TypeVisitor<'tcx> for RecursionChecker {
+ type BreakTy = ();
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::Opaque(def_id, _) = *t.kind() {
+ if def_id == self.def_id.to_def_id() {
+ return ControlFlow::Break(());
+ }
+ }
+ t.super_visit_with(self)
+ }
+ }
+ if ty
+ .visit_with(&mut RecursionChecker { def_id: opaque_type_key.def_id })
+ .is_break()
+ {
+ return;
+ }
+ Some(ty)
+ }
+ hir::OpaqueTyOrigin::TyAlias => None,
+ };
+ self.typeck_results.concrete_opaque_types.insert(opaque_type_key.def_id, hidden_type);
+ }
+ }
+
+ fn visit_field_id(&mut self, hir_id: hir::HirId) {
+ if let Some(index) = self.fcx.typeck_results.borrow_mut().field_indices_mut().remove(hir_id)
+ {
+ self.typeck_results.field_indices_mut().insert(hir_id, index);
+ }
+ }
+
+ #[instrument(skip(self, span), level = "debug")]
+ fn visit_node_id(&mut self, span: Span, hir_id: hir::HirId) {
+ // Export associated path extensions and method resolutions.
+ if let Some(def) =
+ self.fcx.typeck_results.borrow_mut().type_dependent_defs_mut().remove(hir_id)
+ {
+ self.typeck_results.type_dependent_defs_mut().insert(hir_id, def);
+ }
+
+ // Resolve any borrowings for the node with id `node_id`
+ self.visit_adjustments(span, hir_id);
+
+ // Resolve the type of the node with id `node_id`
+ let n_ty = self.fcx.node_ty(hir_id);
+ let n_ty = self.resolve(n_ty, &span);
+ self.write_ty_to_typeck_results(hir_id, n_ty);
+ debug!(?n_ty);
+
+ // Resolve any substitutions
+ if let Some(substs) = self.fcx.typeck_results.borrow().node_substs_opt(hir_id) {
+ let substs = self.resolve(substs, &span);
+ debug!("write_substs_to_tcx({:?}, {:?})", hir_id, substs);
+ assert!(!substs.needs_infer() && !substs.has_placeholders());
+ self.typeck_results.node_substs_mut().insert(hir_id, substs);
+ }
+ }
+
+ #[instrument(skip(self, span), level = "debug")]
+ fn visit_adjustments(&mut self, span: Span, hir_id: hir::HirId) {
+ let adjustment = self.fcx.typeck_results.borrow_mut().adjustments_mut().remove(hir_id);
+ match adjustment {
+ None => {
+ debug!("no adjustments for node");
+ }
+
+ Some(adjustment) => {
+ let resolved_adjustment = self.resolve(adjustment, &span);
+ debug!(?resolved_adjustment);
+ self.typeck_results.adjustments_mut().insert(hir_id, resolved_adjustment);
+ }
+ }
+ }
+
+ #[instrument(skip(self, span), level = "debug")]
+ fn visit_pat_adjustments(&mut self, span: Span, hir_id: hir::HirId) {
+ let adjustment = self.fcx.typeck_results.borrow_mut().pat_adjustments_mut().remove(hir_id);
+ match adjustment {
+ None => {
+ debug!("no pat_adjustments for node");
+ }
+
+ Some(adjustment) => {
+ let resolved_adjustment = self.resolve(adjustment, &span);
+ debug!(?resolved_adjustment);
+ self.typeck_results.pat_adjustments_mut().insert(hir_id, resolved_adjustment);
+ }
+ }
+ }
+
+ fn visit_liberated_fn_sigs(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ let common_hir_owner = fcx_typeck_results.hir_owner;
+
+ for (&local_id, &fn_sig) in fcx_typeck_results.liberated_fn_sigs().iter() {
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
+ let fn_sig = self.resolve(fn_sig, &hir_id);
+ self.typeck_results.liberated_fn_sigs_mut().insert(hir_id, fn_sig);
+ }
+ }
+
+ fn visit_fru_field_types(&mut self) {
+ let fcx_typeck_results = self.fcx.typeck_results.borrow();
+ assert_eq!(fcx_typeck_results.hir_owner, self.typeck_results.hir_owner);
+ let common_hir_owner = fcx_typeck_results.hir_owner;
+
+ for (&local_id, ftys) in fcx_typeck_results.fru_field_types().iter() {
+ let hir_id = hir::HirId { owner: common_hir_owner, local_id };
+ let ftys = self.resolve(ftys.clone(), &hir_id);
+ self.typeck_results.fru_field_types_mut().insert(hir_id, ftys);
+ }
+ }
+
+ fn resolve<T>(&mut self, x: T, span: &dyn Locatable) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let mut resolver = Resolver::new(self.fcx, span, self.body);
+ let x = x.fold_with(&mut resolver);
+ if cfg!(debug_assertions) && x.needs_infer() {
+ span_bug!(span.to_span(self.fcx.tcx), "writeback: `{:?}` has inference variables", x);
+ }
+
+ // We may have introduced e.g. `ty::Error`, if inference failed, make sure
+ // to mark the `TypeckResults` as tainted in that case, so that downstream
+ // users of the typeck results don't produce extra errors, or worse, ICEs.
+ if resolver.replaced_with_error {
+ // FIXME(eddyb) keep track of `ErrorGuaranteed` from where the error was emitted.
+ self.typeck_results.tainted_by_errors =
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted());
+ }
+
+ x
+ }
+}
+
+pub(crate) trait Locatable {
+ fn to_span(&self, tcx: TyCtxt<'_>) -> Span;
+}
+
+impl Locatable for Span {
+ fn to_span(&self, _: TyCtxt<'_>) -> Span {
+ *self
+ }
+}
+
+impl Locatable for hir::HirId {
+ fn to_span(&self, tcx: TyCtxt<'_>) -> Span {
+ tcx.hir().span(*self)
+ }
+}
+
+/// The Resolver. This is the type folding engine that detects
+/// unresolved types and so forth.
+struct Resolver<'cx, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ infcx: &'cx InferCtxt<'cx, 'tcx>,
+ span: &'cx dyn Locatable,
+ body: &'tcx hir::Body<'tcx>,
+
+ /// Set to `true` if any `Ty` or `ty::Const` had to be replaced with an `Error`.
+ replaced_with_error: bool,
+}
+
+impl<'cx, 'tcx> Resolver<'cx, 'tcx> {
+ fn new(
+ fcx: &'cx FnCtxt<'cx, 'tcx>,
+ span: &'cx dyn Locatable,
+ body: &'tcx hir::Body<'tcx>,
+ ) -> Resolver<'cx, 'tcx> {
+ Resolver { tcx: fcx.tcx, infcx: fcx, span, body, replaced_with_error: false }
+ }
+
+ fn report_type_error(&self, t: Ty<'tcx>) {
+ if !self.tcx.sess.has_errors().is_some() {
+ self.infcx
+ .emit_inference_failure_err(
+ Some(self.body.id()),
+ self.span.to_span(self.tcx),
+ t.into(),
+ E0282,
+ false,
+ )
+ .emit();
+ }
+ }
+
+ fn report_const_error(&self, c: ty::Const<'tcx>) {
+ if self.tcx.sess.has_errors().is_none() {
+ self.infcx
+ .emit_inference_failure_err(
+ Some(self.body.id()),
+ self.span.to_span(self.tcx),
+ c.into(),
+ E0282,
+ false,
+ )
+ .emit();
+ }
+ }
+}
+
+struct EraseEarlyRegions<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TypeFolder<'tcx> for EraseEarlyRegions<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if ty.has_type_flags(ty::TypeFlags::HAS_FREE_REGIONS) {
+ ty.super_fold_with(self)
+ } else {
+ ty
+ }
+ }
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ if r.is_late_bound() { r } else { self.tcx.lifetimes.re_erased }
+ }
+}
+
+impl<'cx, 'tcx> TypeFolder<'tcx> for Resolver<'cx, 'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match self.infcx.fully_resolve(t) {
+ Ok(t) => {
+ // Do not anonymize late-bound regions
+ // (e.g. keep `for<'a>` named `for<'a>`).
+ // This allows NLL to generate error messages that
+ // refer to the higher-ranked lifetime names written by the user.
+ EraseEarlyRegions { tcx: self.tcx }.fold_ty(t)
+ }
+ Err(_) => {
+ debug!("Resolver::fold_ty: input type `{:?}` not fully resolvable", t);
+ self.report_type_error(t);
+ self.replaced_with_error = true;
+ self.tcx().ty_error()
+ }
+ }
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ debug_assert!(!r.is_late_bound(), "Should not be resolving bound region.");
+ self.tcx.lifetimes.re_erased
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ match self.infcx.fully_resolve(ct) {
+ Ok(ct) => self.tcx.erase_regions(ct),
+ Err(_) => {
+ debug!("Resolver::fold_const: input const `{:?}` not fully resolvable", ct);
+ self.report_const_error(ct);
+ self.replaced_with_error = true;
+ self.tcx().const_error(ct.ty())
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// During type check, we store promises with the result of trait
+// lookup rather than the actual results (because the results are not
+// necessarily available immediately). These routines unwind the
+// promises. It is expected that we will have already reported any
+// errors that may be encountered, so if the promises store an error,
+// a dummy result is returned.
diff --git a/compiler/rustc_typeck/src/check_unused.rs b/compiler/rustc_typeck/src/check_unused.rs
new file mode 100644
index 000000000..4a3cfa1ca
--- /dev/null
+++ b/compiler/rustc_typeck/src/check_unused.rs
@@ -0,0 +1,196 @@
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::Applicability;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::ty::TyCtxt;
+use rustc_session::lint;
+use rustc_span::{Span, Symbol};
+
+pub fn check_crate(tcx: TyCtxt<'_>) {
+ let mut used_trait_imports: FxHashSet<LocalDefId> = FxHashSet::default();
+
+ for item_def_id in tcx.hir().body_owners() {
+ let imports = tcx.used_trait_imports(item_def_id);
+ debug!("GatherVisitor: item_def_id={:?} with imports {:#?}", item_def_id, imports);
+ used_trait_imports.extend(imports.iter());
+ }
+
+ for &id in tcx.maybe_unused_trait_imports(()) {
+ debug_assert_eq!(tcx.def_kind(id), DefKind::Use);
+ if tcx.visibility(id).is_public() {
+ continue;
+ }
+ if used_trait_imports.contains(&id) {
+ continue;
+ }
+ let item = tcx.hir().expect_item(id);
+ if item.span.is_dummy() {
+ continue;
+ }
+ let hir::ItemKind::Use(path, _) = item.kind else { unreachable!() };
+ tcx.struct_span_lint_hir(lint::builtin::UNUSED_IMPORTS, item.hir_id(), path.span, |lint| {
+ let msg = if let Ok(snippet) = tcx.sess.source_map().span_to_snippet(path.span) {
+ format!("unused import: `{}`", snippet)
+ } else {
+ "unused import".to_owned()
+ };
+ lint.build(&msg).emit();
+ });
+ }
+
+ unused_crates_lint(tcx);
+}
+
+fn unused_crates_lint(tcx: TyCtxt<'_>) {
+ let lint = lint::builtin::UNUSED_EXTERN_CRATES;
+
+ // Collect first the crates that are completely unused. These we
+ // can always suggest removing (no matter which edition we are
+ // in).
+ let unused_extern_crates: FxHashMap<LocalDefId, Span> = tcx
+ .maybe_unused_extern_crates(())
+ .iter()
+ .filter(|&&(def_id, _)| {
+ // The `def_id` here actually was calculated during resolution (at least
+ // at the time of this writing) and is being shipped to us via a side
+ // channel of the tcx. There may have been extra expansion phases,
+ // however, which ended up removing the `def_id` *after* expansion.
+ //
+ // As a result we need to verify that `def_id` is indeed still valid for
+ // our AST and actually present in the HIR map. If it's not there then
+ // there's safely nothing to warn about, and otherwise we carry on with
+ // our execution.
+ //
+ // Note that if we carry through to the `extern_mod_stmt_cnum` query
+ // below it'll cause a panic because `def_id` is actually bogus at this
+ // point in time otherwise.
+ if tcx.hir().find(tcx.hir().local_def_id_to_hir_id(def_id)).is_none() {
+ return false;
+ }
+ true
+ })
+ .filter(|&&(def_id, _)| {
+ tcx.extern_mod_stmt_cnum(def_id).map_or(true, |cnum| {
+ !tcx.is_compiler_builtins(cnum)
+ && !tcx.is_panic_runtime(cnum)
+ && !tcx.has_global_allocator(cnum)
+ && !tcx.has_panic_handler(cnum)
+ })
+ })
+ .cloned()
+ .collect();
+
+ // Collect all the extern crates (in a reliable order).
+ let mut crates_to_lint = vec![];
+
+ for id in tcx.hir().items() {
+ if matches!(tcx.def_kind(id.def_id), DefKind::ExternCrate) {
+ let item = tcx.hir().item(id);
+ if let hir::ItemKind::ExternCrate(orig_name) = item.kind {
+ crates_to_lint.push(ExternCrateToLint {
+ def_id: item.def_id.to_def_id(),
+ span: item.span,
+ orig_name,
+ warn_if_unused: !item.ident.as_str().starts_with('_'),
+ });
+ }
+ }
+ }
+
+ let extern_prelude = &tcx.resolutions(()).extern_prelude;
+
+ for extern_crate in &crates_to_lint {
+ let def_id = extern_crate.def_id.expect_local();
+ let item = tcx.hir().expect_item(def_id);
+
+ // If the crate is fully unused, we suggest removing it altogether.
+ // We do this in any edition.
+ if extern_crate.warn_if_unused {
+ if let Some(&span) = unused_extern_crates.get(&def_id) {
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ tcx.struct_span_lint_hir(lint, id, span, |lint| {
+ // Removal suggestion span needs to include attributes (Issue #54400)
+ let span_with_attrs = tcx
+ .hir()
+ .attrs(id)
+ .iter()
+ .map(|attr| attr.span)
+ .fold(span, |acc, attr_span| acc.to(attr_span));
+
+ lint.build("unused extern crate")
+ .span_suggestion_short(
+ span_with_attrs,
+ "remove it",
+ "",
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ });
+ continue;
+ }
+ }
+
+ // If we are not in Rust 2018 edition, then we don't make any further
+ // suggestions.
+ if !tcx.sess.rust_2018() {
+ continue;
+ }
+
+ // If the extern crate isn't in the extern prelude,
+ // there is no way it can be written as a `use`.
+ let orig_name = extern_crate.orig_name.unwrap_or(item.ident.name);
+ if !extern_prelude.get(&orig_name).map_or(false, |from_item| !from_item) {
+ continue;
+ }
+
+ // If the extern crate is renamed, then we cannot suggest replacing it with a use as this
+ // would not insert the new name into the prelude, where other imports in the crate may be
+ // expecting it.
+ if extern_crate.orig_name.is_some() {
+ continue;
+ }
+
+ let id = tcx.hir().local_def_id_to_hir_id(def_id);
+ // If the extern crate has any attributes, they may have funky
+ // semantics we can't faithfully represent using `use` (most
+ // notably `#[macro_use]`). Ignore it.
+ if !tcx.hir().attrs(id).is_empty() {
+ continue;
+ }
+ tcx.struct_span_lint_hir(lint, id, extern_crate.span, |lint| {
+ // Otherwise, we can convert it into a `use` of some kind.
+ let base_replacement = match extern_crate.orig_name {
+ Some(orig_name) => format!("use {} as {};", orig_name, item.ident.name),
+ None => format!("use {};", item.ident.name),
+ };
+ let vis = tcx.sess.source_map().span_to_snippet(item.vis_span).unwrap_or_default();
+ let add_vis = |to| if vis.is_empty() { to } else { format!("{} {}", vis, to) };
+ lint.build("`extern crate` is not idiomatic in the new edition")
+ .span_suggestion_short(
+ extern_crate.span,
+ &format!("convert it to a `{}`", add_vis("use".to_string())),
+ add_vis(base_replacement),
+ Applicability::MachineApplicable,
+ )
+ .emit();
+ })
+ }
+}
+
+struct ExternCrateToLint {
+ /// `DefId` of the extern crate
+ def_id: DefId,
+
+ /// span from the item
+ span: Span,
+
+ /// if `Some`, then this is renamed (`extern crate orig_name as
+ /// crate_name`), and -- perhaps surprisingly -- this stores the
+ /// *original* name (`item.name` will contain the new name)
+ orig_name: Option<Symbol>,
+
+ /// if `false`, the original name started with `_`, so we shouldn't lint
+ /// about it going unused (but we should still emit idiom lints).
+ warn_if_unused: bool,
+}
diff --git a/compiler/rustc_typeck/src/coherence/builtin.rs b/compiler/rustc_typeck/src/coherence/builtin.rs
new file mode 100644
index 000000000..50946cc1d
--- /dev/null
+++ b/compiler/rustc_typeck/src/coherence/builtin.rs
@@ -0,0 +1,603 @@
+//! Check properties that are required by built-in traits and set
+//! up data structures required by type-checking/codegen.
+
+use crate::errors::{CopyImplOnNonAdt, CopyImplOnTypeWithDtor, DropImplOnWrongItem};
+use rustc_errors::{struct_span_err, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::ItemKind;
+use rustc_infer::infer;
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::ty::adjustment::CoerceUnsizedInfo;
+use rustc_middle::ty::{self, suggest_constraining_type_params, Ty, TyCtxt, TypeVisitable};
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt;
+use rustc_trait_selection::traits::misc::{can_type_implement_copy, CopyImplementationError};
+use rustc_trait_selection::traits::predicate_for_trait_def;
+use rustc_trait_selection::traits::{self, ObligationCause, TraitEngine, TraitEngineExt};
+use std::collections::BTreeMap;
+
+pub fn check_trait(tcx: TyCtxt<'_>, trait_def_id: DefId) {
+ let lang_items = tcx.lang_items();
+ Checker { tcx, trait_def_id }
+ .check(lang_items.drop_trait(), visit_implementation_of_drop)
+ .check(lang_items.copy_trait(), visit_implementation_of_copy)
+ .check(lang_items.coerce_unsized_trait(), visit_implementation_of_coerce_unsized)
+ .check(lang_items.dispatch_from_dyn_trait(), visit_implementation_of_dispatch_from_dyn);
+}
+
+struct Checker<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+}
+
+impl<'tcx> Checker<'tcx> {
+ fn check<F>(&self, trait_def_id: Option<DefId>, mut f: F) -> &Self
+ where
+ F: FnMut(TyCtxt<'tcx>, LocalDefId),
+ {
+ if Some(self.trait_def_id) == trait_def_id {
+ for &impl_def_id in self.tcx.hir().trait_impls(self.trait_def_id) {
+ f(self.tcx, impl_def_id);
+ }
+ }
+ self
+ }
+}
+
+fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
+ // Destructors only work on nominal types.
+ if let ty::Adt(..) | ty::Error(_) = tcx.type_of(impl_did).kind() {
+ return;
+ }
+
+ let sp = match tcx.hir().expect_item(impl_did).kind {
+ ItemKind::Impl(ref impl_) => impl_.self_ty.span,
+ _ => bug!("expected Drop impl item"),
+ };
+
+ tcx.sess.emit_err(DropImplOnWrongItem { span: sp });
+}
+
+fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) {
+ debug!("visit_implementation_of_copy: impl_did={:?}", impl_did);
+
+ let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
+
+ let self_type = tcx.type_of(impl_did);
+ debug!("visit_implementation_of_copy: self_type={:?} (bound)", self_type);
+
+ let span = tcx.hir().span(impl_hir_id);
+ let param_env = tcx.param_env(impl_did);
+ assert!(!self_type.has_escaping_bound_vars());
+
+ debug!("visit_implementation_of_copy: self_type={:?} (free)", self_type);
+
+ let cause = traits::ObligationCause::misc(span, impl_hir_id);
+ match can_type_implement_copy(tcx, param_env, self_type, cause) {
+ Ok(()) => {}
+ Err(CopyImplementationError::InfrigingFields(fields)) => {
+ let item = tcx.hir().expect_item(impl_did);
+ let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(ref tr), .. }) = item.kind {
+ tr.path.span
+ } else {
+ span
+ };
+
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0204,
+ "the trait `Copy` may not be implemented for this type"
+ );
+
+ // We'll try to suggest constraining type parameters to fulfill the requirements of
+ // their `Copy` implementation.
+ let mut errors: BTreeMap<_, Vec<_>> = Default::default();
+ let mut bounds = vec![];
+
+ for (field, ty) in fields {
+ let field_span = tcx.def_span(field.did);
+ let field_ty_span = match tcx.hir().get_if_local(field.did) {
+ Some(hir::Node::Field(field_def)) => field_def.ty.span,
+ _ => field_span,
+ };
+ err.span_label(field_span, "this field does not implement `Copy`");
+ // Spin up a new FulfillmentContext, so we can get the _precise_ reason
+ // why this field does not implement Copy. This is useful because sometimes
+ // it is not immediately clear why Copy is not implemented for a field, since
+ // all we point at is the field itself.
+ tcx.infer_ctxt().ignoring_regions().enter(|infcx| {
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(tcx);
+ fulfill_cx.register_bound(
+ &infcx,
+ param_env,
+ ty,
+ tcx.lang_items().copy_trait().unwrap(),
+ traits::ObligationCause::dummy_with_span(field_ty_span),
+ );
+ for error in fulfill_cx.select_all_or_error(&infcx) {
+ let error_predicate = error.obligation.predicate;
+ // Only note if it's not the root obligation, otherwise it's trivial and
+ // should be self-explanatory (i.e. a field literally doesn't implement Copy).
+
+ // FIXME: This error could be more descriptive, especially if the error_predicate
+ // contains a foreign type or if it's a deeply nested type...
+ if error_predicate != error.root_obligation.predicate {
+ errors
+ .entry((ty.to_string(), error_predicate.to_string()))
+ .or_default()
+ .push(error.obligation.cause.span);
+ }
+ if let ty::PredicateKind::Trait(ty::TraitPredicate {
+ trait_ref,
+ polarity: ty::ImplPolarity::Positive,
+ ..
+ }) = error_predicate.kind().skip_binder()
+ {
+ let ty = trait_ref.self_ty();
+ if let ty::Param(_) = ty.kind() {
+ bounds.push((
+ format!("{ty}"),
+ trait_ref.print_only_trait_path().to_string(),
+ Some(trait_ref.def_id),
+ ));
+ }
+ }
+ }
+ });
+ }
+ for ((ty, error_predicate), spans) in errors {
+ let span: MultiSpan = spans.into();
+ err.span_note(
+ span,
+ &format!("the `Copy` impl for `{}` requires that `{}`", ty, error_predicate),
+ );
+ }
+ suggest_constraining_type_params(
+ tcx,
+ tcx.hir().get_generics(impl_did).expect("impls always have generics"),
+ &mut err,
+ bounds.iter().map(|(param, constraint, def_id)| {
+ (param.as_str(), constraint.as_str(), *def_id)
+ }),
+ );
+ err.emit();
+ }
+ Err(CopyImplementationError::NotAnAdt) => {
+ let item = tcx.hir().expect_item(impl_did);
+ let span =
+ if let ItemKind::Impl(ref impl_) = item.kind { impl_.self_ty.span } else { span };
+
+ tcx.sess.emit_err(CopyImplOnNonAdt { span });
+ }
+ Err(CopyImplementationError::HasDestructor) => {
+ tcx.sess.emit_err(CopyImplOnTypeWithDtor { span });
+ }
+ }
+}
+
+fn visit_implementation_of_coerce_unsized<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) {
+ debug!("visit_implementation_of_coerce_unsized: impl_did={:?}", impl_did);
+
+ // Just compute this for the side-effects, in particular reporting
+ // errors; other parts of the code may demand it for the info of
+ // course.
+ let span = tcx.def_span(impl_did);
+ tcx.at(span).coerce_unsized_info(impl_did);
+}
+
+fn visit_implementation_of_dispatch_from_dyn<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) {
+ debug!("visit_implementation_of_dispatch_from_dyn: impl_did={:?}", impl_did);
+
+ let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
+ let span = tcx.hir().span(impl_hir_id);
+
+ let dispatch_from_dyn_trait = tcx.require_lang_item(LangItem::DispatchFromDyn, Some(span));
+
+ let source = tcx.type_of(impl_did);
+ assert!(!source.has_escaping_bound_vars());
+ let target = {
+ let trait_ref = tcx.impl_trait_ref(impl_did).unwrap();
+ assert_eq!(trait_ref.def_id, dispatch_from_dyn_trait);
+
+ trait_ref.substs.type_at(1)
+ };
+
+ debug!("visit_implementation_of_dispatch_from_dyn: {:?} -> {:?}", source, target);
+
+ let param_env = tcx.param_env(impl_did);
+
+ let create_err = |msg: &str| struct_span_err!(tcx.sess, span, E0378, "{}", msg);
+
+ tcx.infer_ctxt().enter(|infcx| {
+ let cause = ObligationCause::misc(span, impl_hir_id);
+
+ use rustc_type_ir::sty::TyKind::*;
+ match (source.kind(), target.kind()) {
+ (&Ref(r_a, _, mutbl_a), Ref(r_b, _, mutbl_b))
+ if infcx.at(&cause, param_env).eq(r_a, *r_b).is_ok() && mutbl_a == *mutbl_b => {}
+ (&RawPtr(tm_a), &RawPtr(tm_b)) if tm_a.mutbl == tm_b.mutbl => (),
+ (&Adt(def_a, substs_a), &Adt(def_b, substs_b))
+ if def_a.is_struct() && def_b.is_struct() =>
+ {
+ if def_a != def_b {
+ let source_path = tcx.def_path_str(def_a.did());
+ let target_path = tcx.def_path_str(def_b.did());
+
+ create_err(&format!(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with the same \
+ definition; expected `{}`, found `{}`",
+ source_path, target_path,
+ ))
+ .emit();
+
+ return;
+ }
+
+ if def_a.repr().c() || def_a.repr().packed() {
+ create_err(
+ "structs implementing `DispatchFromDyn` may not have \
+ `#[repr(packed)]` or `#[repr(C)]`",
+ )
+ .emit();
+ }
+
+ let fields = &def_a.non_enum_variant().fields;
+
+ let coerced_fields = fields
+ .iter()
+ .filter(|field| {
+ let ty_a = field.ty(tcx, substs_a);
+ let ty_b = field.ty(tcx, substs_b);
+
+ if let Ok(layout) = tcx.layout_of(param_env.and(ty_a)) {
+ if layout.is_zst() && layout.align.abi.bytes() == 1 {
+ // ignore ZST fields with alignment of 1 byte
+ return false;
+ }
+ }
+
+ if let Ok(ok) = infcx.at(&cause, param_env).eq(ty_a, ty_b) {
+ if ok.obligations.is_empty() {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for structs containing the field being coerced, \
+ ZST fields with 1 byte alignment, and nothing else",
+ )
+ .note(&format!(
+ "extra field `{}` of type `{}` is not allowed",
+ field.name, ty_a,
+ ))
+ .emit();
+
+ return false;
+ }
+ }
+
+ return true;
+ })
+ .collect::<Vec<_>>();
+
+ if coerced_fields.is_empty() {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with a single field \
+ being coerced, none found",
+ )
+ .emit();
+ } else if coerced_fields.len() > 1 {
+ create_err(
+ "implementing the `DispatchFromDyn` trait requires multiple coercions",
+ )
+ .note(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures with a single field \
+ being coerced",
+ )
+ .note(&format!(
+ "currently, {} fields need coercions: {}",
+ coerced_fields.len(),
+ coerced_fields
+ .iter()
+ .map(|field| {
+ format!(
+ "`{}` (`{}` to `{}`)",
+ field.name,
+ field.ty(tcx, substs_a),
+ field.ty(tcx, substs_b),
+ )
+ })
+ .collect::<Vec<_>>()
+ .join(", ")
+ ))
+ .emit();
+ } else {
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+
+ for field in coerced_fields {
+ let predicate = predicate_for_trait_def(
+ tcx,
+ param_env,
+ cause.clone(),
+ dispatch_from_dyn_trait,
+ 0,
+ field.ty(tcx, substs_a),
+ &[field.ty(tcx, substs_b).into()],
+ );
+
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
+ }
+
+ // Check that all transitive obligations are satisfied.
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+ if !errors.is_empty() {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ }
+
+ // Finally, resolve all regions.
+ let outlives_env = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(impl_did, &outlives_env);
+ }
+ }
+ _ => {
+ create_err(
+ "the trait `DispatchFromDyn` may only be implemented \
+ for a coercion between structures",
+ )
+ .emit();
+ }
+ }
+ })
+}
+
+pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: DefId) -> CoerceUnsizedInfo {
+ debug!("compute_coerce_unsized_info(impl_did={:?})", impl_did);
+
+ // this provider should only get invoked for local def-ids
+ let impl_did = impl_did.expect_local();
+ let span = tcx.def_span(impl_did);
+
+ let coerce_unsized_trait = tcx.require_lang_item(LangItem::CoerceUnsized, Some(span));
+
+ let unsize_trait = tcx.lang_items().require(LangItem::Unsize).unwrap_or_else(|err| {
+ tcx.sess.fatal(&format!("`CoerceUnsized` implementation {}", err));
+ });
+
+ let source = tcx.type_of(impl_did);
+ let trait_ref = tcx.impl_trait_ref(impl_did).unwrap();
+ assert_eq!(trait_ref.def_id, coerce_unsized_trait);
+ let target = trait_ref.substs.type_at(1);
+ debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (bound)", source, target);
+
+ let param_env = tcx.param_env(impl_did);
+ assert!(!source.has_escaping_bound_vars());
+
+ let err_info = CoerceUnsizedInfo { custom_kind: None };
+
+ debug!("visit_implementation_of_coerce_unsized: {:?} -> {:?} (free)", source, target);
+
+ tcx.infer_ctxt().enter(|infcx| {
+ let impl_hir_id = tcx.hir().local_def_id_to_hir_id(impl_did);
+ let cause = ObligationCause::misc(span, impl_hir_id);
+ let check_mutbl = |mt_a: ty::TypeAndMut<'tcx>,
+ mt_b: ty::TypeAndMut<'tcx>,
+ mk_ptr: &dyn Fn(Ty<'tcx>) -> Ty<'tcx>| {
+ if (mt_a.mutbl, mt_b.mutbl) == (hir::Mutability::Not, hir::Mutability::Mut) {
+ infcx
+ .report_mismatched_types(
+ &cause,
+ mk_ptr(mt_b.ty),
+ target,
+ ty::error::TypeError::Mutability,
+ )
+ .emit();
+ }
+ (mt_a.ty, mt_b.ty, unsize_trait, None)
+ };
+ let (source, target, trait_def_id, kind) = match (source.kind(), target.kind()) {
+ (&ty::Ref(r_a, ty_a, mutbl_a), &ty::Ref(r_b, ty_b, mutbl_b)) => {
+ infcx.sub_regions(infer::RelateObjectBound(span), r_b, r_a);
+ let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
+ let mt_b = ty::TypeAndMut { ty: ty_b, mutbl: mutbl_b };
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ref(r_b, ty))
+ }
+
+ (&ty::Ref(_, ty_a, mutbl_a), &ty::RawPtr(mt_b)) => {
+ let mt_a = ty::TypeAndMut { ty: ty_a, mutbl: mutbl_a };
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty))
+ }
+
+ (&ty::RawPtr(mt_a), &ty::RawPtr(mt_b)) => {
+ check_mutbl(mt_a, mt_b, &|ty| tcx.mk_imm_ptr(ty))
+ }
+
+ (&ty::Adt(def_a, substs_a), &ty::Adt(def_b, substs_b))
+ if def_a.is_struct() && def_b.is_struct() =>
+ {
+ if def_a != def_b {
+ let source_path = tcx.def_path_str(def_a.did());
+ let target_path = tcx.def_path_str(def_b.did());
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0377,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures with the same \
+ definition; expected `{}`, found `{}`",
+ source_path,
+ target_path
+ )
+ .emit();
+ return err_info;
+ }
+
+ // Here we are considering a case of converting
+ // `S<P0...Pn>` to S<Q0...Qn>`. As an example, let's imagine a struct `Foo<T, U>`,
+ // which acts like a pointer to `U`, but carries along some extra data of type `T`:
+ //
+ // struct Foo<T, U> {
+ // extra: T,
+ // ptr: *mut U,
+ // }
+ //
+ // We might have an impl that allows (e.g.) `Foo<T, [i32; 3]>` to be unsized
+ // to `Foo<T, [i32]>`. That impl would look like:
+ //
+ // impl<T, U: Unsize<V>, V> CoerceUnsized<Foo<T, V>> for Foo<T, U> {}
+ //
+ // Here `U = [i32; 3]` and `V = [i32]`. At runtime,
+ // when this coercion occurs, we would be changing the
+ // field `ptr` from a thin pointer of type `*mut [i32;
+ // 3]` to a fat pointer of type `*mut [i32]` (with
+ // extra data `3`). **The purpose of this check is to
+ // make sure that we know how to do this conversion.**
+ //
+ // To check if this impl is legal, we would walk down
+ // the fields of `Foo` and consider their types with
+ // both substitutes. We are looking to find that
+ // exactly one (non-phantom) field has changed its
+ // type, which we will expect to be the pointer that
+ // is becoming fat (we could probably generalize this
+ // to multiple thin pointers of the same type becoming
+ // fat, but we don't). In this case:
+ //
+ // - `extra` has type `T` before and type `T` after
+ // - `ptr` has type `*mut U` before and type `*mut V` after
+ //
+ // Since just one field changed, we would then check
+ // that `*mut U: CoerceUnsized<*mut V>` is implemented
+ // (in other words, that we know how to do this
+ // conversion). This will work out because `U:
+ // Unsize<V>`, and we have a builtin rule that `*mut
+ // U` can be coerced to `*mut V` if `U: Unsize<V>`.
+ let fields = &def_a.non_enum_variant().fields;
+ let diff_fields = fields
+ .iter()
+ .enumerate()
+ .filter_map(|(i, f)| {
+ let (a, b) = (f.ty(tcx, substs_a), f.ty(tcx, substs_b));
+
+ if tcx.type_of(f.did).is_phantom_data() {
+ // Ignore PhantomData fields
+ return None;
+ }
+
+ // Ignore fields that aren't changed; it may
+ // be that we could get away with subtyping or
+ // something more accepting, but we use
+ // equality because we want to be able to
+ // perform this check without computing
+ // variance where possible. (This is because
+ // we may have to evaluate constraint
+ // expressions in the course of execution.)
+ // See e.g., #41936.
+ if let Ok(ok) = infcx.at(&cause, param_env).eq(a, b) {
+ if ok.obligations.is_empty() {
+ return None;
+ }
+ }
+
+ // Collect up all fields that were significantly changed
+ // i.e., those that contain T in coerce_unsized T -> U
+ Some((i, a, b))
+ })
+ .collect::<Vec<_>>();
+
+ if diff_fields.is_empty() {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0374,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures with one field \
+ being coerced, none found"
+ )
+ .emit();
+ return err_info;
+ } else if diff_fields.len() > 1 {
+ let item = tcx.hir().expect_item(impl_did);
+ let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(ref t), .. }) =
+ item.kind
+ {
+ t.path.span
+ } else {
+ tcx.def_span(impl_did)
+ };
+
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0375,
+ "implementing the trait \
+ `CoerceUnsized` requires multiple \
+ coercions"
+ )
+ .note(
+ "`CoerceUnsized` may only be implemented for \
+ a coercion between structures with one field being coerced",
+ )
+ .note(&format!(
+ "currently, {} fields need coercions: {}",
+ diff_fields.len(),
+ diff_fields
+ .iter()
+ .map(|&(i, a, b)| {
+ format!("`{}` (`{}` to `{}`)", fields[i].name, a, b)
+ })
+ .collect::<Vec<_>>()
+ .join(", ")
+ ))
+ .span_label(span, "requires multiple coercions")
+ .emit();
+ return err_info;
+ }
+
+ let (i, a, b) = diff_fields[0];
+ let kind = ty::adjustment::CustomCoerceUnsized::Struct(i);
+ (a, b, coerce_unsized_trait, Some(kind))
+ }
+
+ _ => {
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0376,
+ "the trait `CoerceUnsized` may only be implemented \
+ for a coercion between structures"
+ )
+ .emit();
+ return err_info;
+ }
+ };
+
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+
+ // Register an obligation for `A: Trait<B>`.
+ let cause = traits::ObligationCause::misc(span, impl_hir_id);
+ let predicate = predicate_for_trait_def(
+ tcx,
+ param_env,
+ cause,
+ trait_def_id,
+ 0,
+ source,
+ &[target.into()],
+ );
+ fulfill_cx.register_predicate_obligation(&infcx, predicate);
+
+ // Check that all transitive obligations are satisfied.
+ let errors = fulfill_cx.select_all_or_error(&infcx);
+ if !errors.is_empty() {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ }
+
+ // Finally, resolve all regions.
+ let outlives_env = OutlivesEnvironment::new(param_env);
+ infcx.check_region_obligations_and_report_errors(impl_did, &outlives_env);
+
+ CoerceUnsizedInfo { custom_kind: kind }
+ })
+}
diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls.rs b/compiler/rustc_typeck/src/coherence/inherent_impls.rs
new file mode 100644
index 000000000..52aad636f
--- /dev/null
+++ b/compiler/rustc_typeck/src/coherence/inherent_impls.rs
@@ -0,0 +1,249 @@
+//! The code in this module gathers up all of the inherent impls in
+//! the current crate and organizes them in a map. It winds up
+//! touching the whole crate and thus must be recomputed completely
+//! for any change, but it is very cheap to compute. In practice, most
+//! code in the compiler never *directly* requests this map. Instead,
+//! it requests the inherent impls specific to some type (via
+//! `tcx.inherent_impls(def_id)`). That value, however,
+//! is computed by selecting an idea from this table.
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
+use rustc_middle::ty::fast_reject::{simplify_type, SimplifiedType, TreatParams};
+use rustc_middle::ty::{self, CrateInherentImpls, Ty, TyCtxt};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+/// On-demand query: yields a map containing all types mapped to their inherent impls.
+pub fn crate_inherent_impls(tcx: TyCtxt<'_>, (): ()) -> CrateInherentImpls {
+ let mut collect = InherentCollect { tcx, impls_map: Default::default() };
+ for id in tcx.hir().items() {
+ collect.check_item(id);
+ }
+ collect.impls_map
+}
+
+pub fn crate_incoherent_impls(tcx: TyCtxt<'_>, (_, simp): (CrateNum, SimplifiedType)) -> &[DefId] {
+ let crate_map = tcx.crate_inherent_impls(());
+ tcx.arena.alloc_from_iter(
+ crate_map.incoherent_impls.get(&simp).unwrap_or(&Vec::new()).iter().map(|d| d.to_def_id()),
+ )
+}
+
+/// On-demand query: yields a vector of the inherent impls for a specific type.
+pub fn inherent_impls(tcx: TyCtxt<'_>, ty_def_id: DefId) -> &[DefId] {
+ let ty_def_id = ty_def_id.expect_local();
+
+ let crate_map = tcx.crate_inherent_impls(());
+ match crate_map.inherent_impls.get(&ty_def_id) {
+ Some(v) => &v[..],
+ None => &[],
+ }
+}
+
+struct InherentCollect<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ impls_map: CrateInherentImpls,
+}
+
+const INTO_CORE: &str = "consider moving this inherent impl into `core` if possible";
+const INTO_DEFINING_CRATE: &str =
+ "consider moving this inherent impl into the crate defining the type if possible";
+const ADD_ATTR_TO_TY: &str = "alternatively add `#[rustc_has_incoherent_inherent_impls]` to the type \
+ and `#[rustc_allow_incoherent_impl]` to the relevant impl items";
+const ADD_ATTR: &str =
+ "alternatively add `#[rustc_allow_incoherent_impl]` to the relevant impl items";
+
+impl<'tcx> InherentCollect<'tcx> {
+ fn check_def_id(&mut self, item: &hir::Item<'_>, self_ty: Ty<'tcx>, def_id: DefId) {
+ let impl_def_id = item.def_id;
+ if let Some(def_id) = def_id.as_local() {
+ // Add the implementation to the mapping from implementation to base
+ // type def ID, if there is a base type for this implementation and
+ // the implementation does not have any associated traits.
+ let vec = self.impls_map.inherent_impls.entry(def_id).or_default();
+ vec.push(impl_def_id.to_def_id());
+ return;
+ }
+
+ if self.tcx.features().rustc_attrs {
+ let hir::ItemKind::Impl(&hir::Impl { items, .. }) = item.kind else {
+ bug!("expected `impl` item: {:?}", item);
+ };
+
+ if !self.tcx.has_attr(def_id, sym::rustc_has_incoherent_inherent_impls) {
+ struct_span_err!(
+ self.tcx.sess,
+ item.span,
+ E0390,
+ "cannot define inherent `impl` for a type outside of the crate where the type is defined",
+ )
+ .help(INTO_DEFINING_CRATE)
+ .span_help(item.span, ADD_ATTR_TO_TY)
+ .emit();
+ return;
+ }
+
+ for impl_item in items {
+ if !self
+ .tcx
+ .has_attr(impl_item.id.def_id.to_def_id(), sym::rustc_allow_incoherent_impl)
+ {
+ struct_span_err!(
+ self.tcx.sess,
+ item.span,
+ E0390,
+ "cannot define inherent `impl` for a type outside of the crate where the type is defined",
+ )
+ .help(INTO_DEFINING_CRATE)
+ .span_help(impl_item.span, ADD_ATTR)
+ .emit();
+ return;
+ }
+ }
+
+ if let Some(simp) = simplify_type(self.tcx, self_ty, TreatParams::AsInfer) {
+ self.impls_map.incoherent_impls.entry(simp).or_default().push(impl_def_id);
+ } else {
+ bug!("unexpected self type: {:?}", self_ty);
+ }
+ } else {
+ struct_span_err!(
+ self.tcx.sess,
+ item.span,
+ E0116,
+ "cannot define inherent `impl` for a type outside of the crate \
+ where the type is defined"
+ )
+ .span_label(item.span, "impl for type defined outside of crate.")
+ .note("define and implement a trait or new type instead")
+ .emit();
+ }
+ }
+
+ fn check_primitive_impl(
+ &mut self,
+ impl_def_id: LocalDefId,
+ ty: Ty<'tcx>,
+ items: &[hir::ImplItemRef],
+ span: Span,
+ ) {
+ if !self.tcx.hir().rustc_coherence_is_core() {
+ if self.tcx.features().rustc_attrs {
+ for item in items {
+ if !self
+ .tcx
+ .has_attr(item.id.def_id.to_def_id(), sym::rustc_allow_incoherent_impl)
+ {
+ struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0390,
+ "cannot define inherent `impl` for primitive types outside of `core`",
+ )
+ .help(INTO_CORE)
+ .span_help(item.span, ADD_ATTR)
+ .emit();
+ return;
+ }
+ }
+ } else {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ span,
+ E0390,
+ "cannot define inherent `impl` for primitive types",
+ );
+ err.help("consider using an extension trait instead");
+ if let ty::Ref(_, subty, _) = ty.kind() {
+ err.note(&format!(
+ "you could also try moving the reference to \
+ uses of `{}` (such as `self`) within the implementation",
+ subty
+ ));
+ }
+ err.emit();
+ return;
+ }
+ }
+
+ if let Some(simp) = simplify_type(self.tcx, ty, TreatParams::AsInfer) {
+ self.impls_map.incoherent_impls.entry(simp).or_default().push(impl_def_id);
+ } else {
+ bug!("unexpected primitive type: {:?}", ty);
+ }
+ }
+
+ fn check_item(&mut self, id: hir::ItemId) {
+ if !matches!(self.tcx.def_kind(id.def_id), DefKind::Impl) {
+ return;
+ }
+
+ let item = self.tcx.hir().item(id);
+ let hir::ItemKind::Impl(hir::Impl { of_trait: None, self_ty: ty, ref items, .. }) = item.kind else {
+ return;
+ };
+
+ let self_ty = self.tcx.type_of(item.def_id);
+ match *self_ty.kind() {
+ ty::Adt(def, _) => {
+ self.check_def_id(item, self_ty, def.did());
+ }
+ ty::Foreign(did) => {
+ self.check_def_id(item, self_ty, did);
+ }
+ ty::Dynamic(data, ..) if data.principal_def_id().is_some() => {
+ self.check_def_id(item, self_ty, data.principal_def_id().unwrap());
+ }
+ ty::Dynamic(..) => {
+ struct_span_err!(
+ self.tcx.sess,
+ ty.span,
+ E0785,
+ "cannot define inherent `impl` for a dyn auto trait"
+ )
+ .span_label(ty.span, "impl requires at least one non-auto trait")
+ .note("define and implement a new trait or type instead")
+ .emit();
+ }
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::Never
+ | ty::FnPtr(_)
+ | ty::Tuple(..) => self.check_primitive_impl(item.def_id, self_ty, items, ty.span),
+ ty::Projection(..) | ty::Opaque(..) | ty::Param(_) => {
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ ty.span,
+ E0118,
+ "no nominal type found for inherent implementation"
+ );
+
+ err.span_label(ty.span, "impl requires a nominal type")
+ .note("either implement a trait on it or create a newtype to wrap it instead");
+
+ err.emit();
+ }
+ ty::FnDef(..)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Bound(..)
+ | ty::Placeholder(_)
+ | ty::Infer(_) => {
+ bug!("unexpected impl self type of impl: {:?} {:?}", item.def_id, self_ty);
+ }
+ ty::Error(_) => {}
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
new file mode 100644
index 000000000..03e076bf5
--- /dev/null
+++ b/compiler/rustc_typeck/src/coherence/inherent_impls_overlap.rs
@@ -0,0 +1,307 @@
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::IndexVec;
+use rustc_middle::traits::specialization_graph::OverlapMode;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_span::Symbol;
+use rustc_trait_selection::traits::{self, SkipLeakCheck};
+use smallvec::SmallVec;
+use std::collections::hash_map::Entry;
+
+pub fn crate_inherent_impls_overlap_check(tcx: TyCtxt<'_>, (): ()) {
+ let mut inherent_overlap_checker = InherentOverlapChecker { tcx };
+ for id in tcx.hir().items() {
+ inherent_overlap_checker.check_item(id);
+ }
+}
+
+struct InherentOverlapChecker<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> InherentOverlapChecker<'tcx> {
+ /// Checks whether any associated items in impls 1 and 2 share the same identifier and
+ /// namespace.
+ fn impls_have_common_items(
+ &self,
+ impl_items1: &ty::AssocItems<'_>,
+ impl_items2: &ty::AssocItems<'_>,
+ ) -> bool {
+ let mut impl_items1 = &impl_items1;
+ let mut impl_items2 = &impl_items2;
+
+ // Performance optimization: iterate over the smaller list
+ if impl_items1.len() > impl_items2.len() {
+ std::mem::swap(&mut impl_items1, &mut impl_items2);
+ }
+
+ for item1 in impl_items1.in_definition_order() {
+ let collision = impl_items2
+ .filter_by_name_unhygienic(item1.name)
+ .any(|item2| self.compare_hygienically(item1, item2));
+
+ if collision {
+ return true;
+ }
+ }
+
+ false
+ }
+
+ fn compare_hygienically(&self, item1: &ty::AssocItem, item2: &ty::AssocItem) -> bool {
+ // Symbols and namespace match, compare hygienically.
+ item1.kind.namespace() == item2.kind.namespace()
+ && item1.ident(self.tcx).normalize_to_macros_2_0()
+ == item2.ident(self.tcx).normalize_to_macros_2_0()
+ }
+
+ fn check_for_common_items_in_impls(
+ &self,
+ impl1: DefId,
+ impl2: DefId,
+ overlap: traits::OverlapResult<'_>,
+ ) {
+ let impl_items1 = self.tcx.associated_items(impl1);
+ let impl_items2 = self.tcx.associated_items(impl2);
+
+ for item1 in impl_items1.in_definition_order() {
+ let collision = impl_items2
+ .filter_by_name_unhygienic(item1.name)
+ .find(|item2| self.compare_hygienically(item1, item2));
+
+ if let Some(item2) = collision {
+ let name = item1.ident(self.tcx).normalize_to_macros_2_0();
+ let mut err = struct_span_err!(
+ self.tcx.sess,
+ self.tcx.def_span(item1.def_id),
+ E0592,
+ "duplicate definitions with name `{}`",
+ name
+ );
+ err.span_label(
+ self.tcx.def_span(item1.def_id),
+ format!("duplicate definitions for `{}`", name),
+ );
+ err.span_label(
+ self.tcx.def_span(item2.def_id),
+ format!("other definition for `{}`", name),
+ );
+
+ for cause in &overlap.intercrate_ambiguity_causes {
+ cause.add_intercrate_ambiguity_hint(&mut err);
+ }
+
+ if overlap.involves_placeholder {
+ traits::add_placeholder_note(&mut err);
+ }
+
+ err.emit();
+ }
+ }
+ }
+
+ fn check_for_overlapping_inherent_impls(
+ &self,
+ overlap_mode: OverlapMode,
+ impl1_def_id: DefId,
+ impl2_def_id: DefId,
+ ) {
+ traits::overlapping_impls(
+ self.tcx,
+ impl1_def_id,
+ impl2_def_id,
+ // We go ahead and just skip the leak check for
+ // inherent impls without warning.
+ SkipLeakCheck::Yes,
+ overlap_mode,
+ |overlap| {
+ self.check_for_common_items_in_impls(impl1_def_id, impl2_def_id, overlap);
+ false
+ },
+ || true,
+ );
+ }
+
+ fn check_item(&mut self, id: hir::ItemId) {
+ let def_kind = self.tcx.def_kind(id.def_id);
+ if !matches!(def_kind, DefKind::Enum | DefKind::Struct | DefKind::Trait | DefKind::Union) {
+ return;
+ }
+
+ let impls = self.tcx.inherent_impls(id.def_id);
+
+ // If there is only one inherent impl block,
+ // there is nothing to overlap check it with
+ if impls.len() <= 1 {
+ return;
+ }
+
+ let overlap_mode = OverlapMode::get(self.tcx, id.def_id.to_def_id());
+
+ let impls_items = impls
+ .iter()
+ .map(|impl_def_id| (impl_def_id, self.tcx.associated_items(*impl_def_id)))
+ .collect::<SmallVec<[_; 8]>>();
+
+ // Perform a O(n^2) algorithm for small n,
+ // otherwise switch to an allocating algorithm with
+ // faster asymptotic runtime.
+ const ALLOCATING_ALGO_THRESHOLD: usize = 500;
+ if impls.len() < ALLOCATING_ALGO_THRESHOLD {
+ for (i, &(&impl1_def_id, impl_items1)) in impls_items.iter().enumerate() {
+ for &(&impl2_def_id, impl_items2) in &impls_items[(i + 1)..] {
+ if self.impls_have_common_items(impl_items1, impl_items2) {
+ self.check_for_overlapping_inherent_impls(
+ overlap_mode,
+ impl1_def_id,
+ impl2_def_id,
+ );
+ }
+ }
+ }
+ } else {
+ // Build a set of connected regions of impl blocks.
+ // Two impl blocks are regarded as connected if they share
+ // an item with the same unhygienic identifier.
+ // After we have assembled the connected regions,
+ // run the O(n^2) algorithm on each connected region.
+ // This is advantageous to running the algorithm over the
+ // entire graph when there are many connected regions.
+
+ rustc_index::newtype_index! {
+ pub struct RegionId {
+ ENCODABLE = custom
+ }
+ }
+ struct ConnectedRegion {
+ idents: SmallVec<[Symbol; 8]>,
+ impl_blocks: FxHashSet<usize>,
+ }
+ let mut connected_regions: IndexVec<RegionId, _> = Default::default();
+ // Reverse map from the Symbol to the connected region id.
+ let mut connected_region_ids = FxHashMap::default();
+
+ for (i, &(&_impl_def_id, impl_items)) in impls_items.iter().enumerate() {
+ if impl_items.len() == 0 {
+ continue;
+ }
+ // First obtain a list of existing connected region ids
+ let mut idents_to_add = SmallVec::<[Symbol; 8]>::new();
+ let mut ids = impl_items
+ .in_definition_order()
+ .filter_map(|item| {
+ let entry = connected_region_ids.entry(item.name);
+ if let Entry::Occupied(e) = &entry {
+ Some(*e.get())
+ } else {
+ idents_to_add.push(item.name);
+ None
+ }
+ })
+ .collect::<SmallVec<[RegionId; 8]>>();
+ // Sort the id list so that the algorithm is deterministic
+ ids.sort_unstable();
+ ids.dedup();
+ let ids = ids;
+ match &ids[..] {
+ // Create a new connected region
+ [] => {
+ let id_to_set = connected_regions.next_index();
+ // Update the connected region ids
+ for ident in &idents_to_add {
+ connected_region_ids.insert(*ident, id_to_set);
+ }
+ connected_regions.insert(
+ id_to_set,
+ ConnectedRegion {
+ idents: idents_to_add,
+ impl_blocks: std::iter::once(i).collect(),
+ },
+ );
+ }
+ // Take the only id inside the list
+ &[id_to_set] => {
+ let region = connected_regions[id_to_set].as_mut().unwrap();
+ region.impl_blocks.insert(i);
+ region.idents.extend_from_slice(&idents_to_add);
+ // Update the connected region ids
+ for ident in &idents_to_add {
+ connected_region_ids.insert(*ident, id_to_set);
+ }
+ }
+ // We have multiple connected regions to merge.
+ // In the worst case this might add impl blocks
+ // one by one and can thus be O(n^2) in the size
+ // of the resulting final connected region, but
+ // this is no issue as the final step to check
+ // for overlaps runs in O(n^2) as well.
+ &[id_to_set, ..] => {
+ let mut region = connected_regions.remove(id_to_set).unwrap();
+ region.impl_blocks.insert(i);
+ region.idents.extend_from_slice(&idents_to_add);
+ // Update the connected region ids
+ for ident in &idents_to_add {
+ connected_region_ids.insert(*ident, id_to_set);
+ }
+
+ // Remove other regions from ids.
+ for &id in ids.iter() {
+ if id == id_to_set {
+ continue;
+ }
+ let r = connected_regions.remove(id).unwrap();
+ for ident in r.idents.iter() {
+ connected_region_ids.insert(*ident, id_to_set);
+ }
+ region.idents.extend_from_slice(&r.idents);
+ region.impl_blocks.extend(r.impl_blocks);
+ }
+
+ connected_regions.insert(id_to_set, region);
+ }
+ }
+ }
+
+ debug!(
+ "churning through {} components (sum={}, avg={}, var={}, max={})",
+ connected_regions.len(),
+ impls.len(),
+ impls.len() / connected_regions.len(),
+ {
+ let avg = impls.len() / connected_regions.len();
+ let s = connected_regions
+ .iter()
+ .flatten()
+ .map(|r| r.impl_blocks.len() as isize - avg as isize)
+ .map(|v| v.abs() as usize)
+ .sum::<usize>();
+ s / connected_regions.len()
+ },
+ connected_regions.iter().flatten().map(|r| r.impl_blocks.len()).max().unwrap()
+ );
+ // List of connected regions is built. Now, run the overlap check
+ // for each pair of impl blocks in the same connected region.
+ for region in connected_regions.into_iter().flatten() {
+ let mut impl_blocks =
+ region.impl_blocks.into_iter().collect::<SmallVec<[usize; 8]>>();
+ impl_blocks.sort_unstable();
+ for (i, &impl1_items_idx) in impl_blocks.iter().enumerate() {
+ let &(&impl1_def_id, impl_items1) = &impls_items[impl1_items_idx];
+ for &impl2_items_idx in impl_blocks[(i + 1)..].iter() {
+ let &(&impl2_def_id, impl_items2) = &impls_items[impl2_items_idx];
+ if self.impls_have_common_items(impl_items1, impl_items2) {
+ self.check_for_overlapping_inherent_impls(
+ overlap_mode,
+ impl1_def_id,
+ impl2_def_id,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/coherence/mod.rs b/compiler/rustc_typeck/src/coherence/mod.rs
new file mode 100644
index 000000000..ae9ebe590
--- /dev/null
+++ b/compiler/rustc_typeck/src/coherence/mod.rs
@@ -0,0 +1,237 @@
+// Coherence phase
+//
+// The job of the coherence phase of typechecking is to ensure that
+// each trait has at most one implementation for each type. This is
+// done by the orphan and overlap modules. Then we build up various
+// mappings. That mapping code resides here.
+
+use rustc_errors::struct_span_err;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
+use rustc_trait_selection::traits;
+
+mod builtin;
+mod inherent_impls;
+mod inherent_impls_overlap;
+mod orphan;
+mod unsafety;
+
+fn check_impl(tcx: TyCtxt<'_>, impl_def_id: LocalDefId, trait_ref: ty::TraitRef<'_>) {
+ debug!(
+ "(checking implementation) adding impl for trait '{:?}', item '{}'",
+ trait_ref,
+ tcx.def_path_str(impl_def_id.to_def_id())
+ );
+
+ // Skip impls where one of the self type is an error type.
+ // This occurs with e.g., resolve failures (#30589).
+ if trait_ref.references_error() {
+ return;
+ }
+
+ enforce_trait_manually_implementable(tcx, impl_def_id, trait_ref.def_id);
+ enforce_empty_impls_for_marker_traits(tcx, impl_def_id, trait_ref.def_id);
+}
+
+fn enforce_trait_manually_implementable(
+ tcx: TyCtxt<'_>,
+ impl_def_id: LocalDefId,
+ trait_def_id: DefId,
+) {
+ let did = Some(trait_def_id);
+ let li = tcx.lang_items();
+ let impl_header_span = tcx.def_span(impl_def_id);
+
+ // Disallow *all* explicit impls of `Pointee`, `DiscriminantKind`, `Sized` and `Unsize` for now.
+ if did == li.pointee_trait() {
+ struct_span_err!(
+ tcx.sess,
+ impl_header_span,
+ E0322,
+ "explicit impls for the `Pointee` trait are not permitted"
+ )
+ .span_label(impl_header_span, "impl of `Pointee` not allowed")
+ .emit();
+ return;
+ }
+
+ if did == li.discriminant_kind_trait() {
+ struct_span_err!(
+ tcx.sess,
+ impl_header_span,
+ E0322,
+ "explicit impls for the `DiscriminantKind` trait are not permitted"
+ )
+ .span_label(impl_header_span, "impl of `DiscriminantKind` not allowed")
+ .emit();
+ return;
+ }
+
+ if did == li.sized_trait() {
+ struct_span_err!(
+ tcx.sess,
+ impl_header_span,
+ E0322,
+ "explicit impls for the `Sized` trait are not permitted"
+ )
+ .span_label(impl_header_span, "impl of `Sized` not allowed")
+ .emit();
+ return;
+ }
+
+ if did == li.unsize_trait() {
+ struct_span_err!(
+ tcx.sess,
+ impl_header_span,
+ E0328,
+ "explicit impls for the `Unsize` trait are not permitted"
+ )
+ .span_label(impl_header_span, "impl of `Unsize` not allowed")
+ .emit();
+ return;
+ }
+
+ if tcx.features().unboxed_closures {
+ // the feature gate allows all Fn traits
+ return;
+ }
+
+ if let ty::trait_def::TraitSpecializationKind::AlwaysApplicable =
+ tcx.trait_def(trait_def_id).specialization_kind
+ {
+ if !tcx.features().specialization && !tcx.features().min_specialization {
+ tcx.sess
+ .struct_span_err(
+ impl_header_span,
+ "implementing `rustc_specialization_trait` traits is unstable",
+ )
+ .help("add `#![feature(min_specialization)]` to the crate attributes to enable")
+ .emit();
+ return;
+ }
+ }
+}
+
+/// We allow impls of marker traits to overlap, so they can't override impls
+/// as that could make it ambiguous which associated item to use.
+fn enforce_empty_impls_for_marker_traits(
+ tcx: TyCtxt<'_>,
+ impl_def_id: LocalDefId,
+ trait_def_id: DefId,
+) {
+ if !tcx.trait_def(trait_def_id).is_marker {
+ return;
+ }
+
+ if tcx.associated_item_def_ids(trait_def_id).is_empty() {
+ return;
+ }
+
+ struct_span_err!(
+ tcx.sess,
+ tcx.def_span(impl_def_id),
+ E0715,
+ "impls for marker traits cannot contain items"
+ )
+ .emit();
+}
+
+pub fn provide(providers: &mut Providers) {
+ use self::builtin::coerce_unsized_info;
+ use self::inherent_impls::{crate_incoherent_impls, crate_inherent_impls, inherent_impls};
+ use self::inherent_impls_overlap::crate_inherent_impls_overlap_check;
+ use self::orphan::orphan_check_impl;
+
+ *providers = Providers {
+ coherent_trait,
+ crate_inherent_impls,
+ crate_incoherent_impls,
+ inherent_impls,
+ crate_inherent_impls_overlap_check,
+ coerce_unsized_info,
+ orphan_check_impl,
+ ..*providers
+ };
+}
+
+fn coherent_trait(tcx: TyCtxt<'_>, def_id: DefId) {
+ // Trigger building the specialization graph for the trait. This will detect and report any
+ // overlap errors.
+ tcx.ensure().specialization_graph_of(def_id);
+
+ let impls = tcx.hir().trait_impls(def_id);
+ for &impl_def_id in impls {
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+
+ check_impl(tcx, impl_def_id, trait_ref);
+ check_object_overlap(tcx, impl_def_id, trait_ref);
+
+ tcx.sess.time("unsafety_checking", || unsafety::check_item(tcx, impl_def_id));
+ tcx.sess.time("orphan_checking", || tcx.ensure().orphan_check_impl(impl_def_id));
+ }
+
+ builtin::check_trait(tcx, def_id);
+}
+
+/// Checks whether an impl overlaps with the automatic `impl Trait for dyn Trait`.
+fn check_object_overlap<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_def_id: LocalDefId,
+ trait_ref: ty::TraitRef<'tcx>,
+) {
+ let trait_def_id = trait_ref.def_id;
+
+ if trait_ref.references_error() {
+ debug!("coherence: skipping impl {:?} with error {:?}", impl_def_id, trait_ref);
+ return;
+ }
+
+ // check for overlap with the automatic `impl Trait for dyn Trait`
+ if let ty::Dynamic(data, ..) = trait_ref.self_ty().kind() {
+ // This is something like impl Trait1 for Trait2. Illegal
+ // if Trait1 is a supertrait of Trait2 or Trait2 is not object safe.
+
+ let component_def_ids = data.iter().flat_map(|predicate| {
+ match predicate.skip_binder() {
+ ty::ExistentialPredicate::Trait(tr) => Some(tr.def_id),
+ ty::ExistentialPredicate::AutoTrait(def_id) => Some(def_id),
+ // An associated type projection necessarily comes with
+ // an additional `Trait` requirement.
+ ty::ExistentialPredicate::Projection(..) => None,
+ }
+ });
+
+ for component_def_id in component_def_ids {
+ if !tcx.is_object_safe(component_def_id) {
+ // Without the 'object_safe_for_dispatch' feature this is an error
+ // which will be reported by wfcheck. Ignore it here.
+ // This is tested by `coherence-impl-trait-for-trait-object-safe.rs`.
+ // With the feature enabled, the trait is not implemented automatically,
+ // so this is valid.
+ } else {
+ let mut supertrait_def_ids = traits::supertrait_def_ids(tcx, component_def_id);
+ if supertrait_def_ids.any(|d| d == trait_def_id) {
+ let span = tcx.def_span(impl_def_id);
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0371,
+ "the object type `{}` automatically implements the trait `{}`",
+ trait_ref.self_ty(),
+ tcx.def_path_str(trait_def_id)
+ )
+ .span_label(
+ span,
+ format!(
+ "`{}` automatically implements trait `{}`",
+ trait_ref.self_ty(),
+ tcx.def_path_str(trait_def_id)
+ ),
+ )
+ .emit();
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/coherence/orphan.rs b/compiler/rustc_typeck/src/coherence/orphan.rs
new file mode 100644
index 000000000..1608550aa
--- /dev/null
+++ b/compiler/rustc_typeck/src/coherence/orphan.rs
@@ -0,0 +1,507 @@
+//! Orphan checker: every impl either implements a trait defined in this
+//! crate or pertains to a type defined in this crate.
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::struct_span_err;
+use rustc_errors::{Diagnostic, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::util::IgnoreRegions;
+use rustc_middle::ty::{
+ self, ImplPolarity, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor,
+};
+use rustc_session::lint;
+use rustc_span::def_id::{DefId, LocalDefId};
+use rustc_span::Span;
+use rustc_trait_selection::traits;
+use std::ops::ControlFlow;
+
+#[instrument(skip(tcx), level = "debug")]
+pub(crate) fn orphan_check_impl(
+ tcx: TyCtxt<'_>,
+ impl_def_id: LocalDefId,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_ref = tcx.impl_trait_ref(impl_def_id).unwrap();
+ if let Some(err) = trait_ref.error_reported() {
+ return Err(err);
+ }
+
+ let ret = do_orphan_check_impl(tcx, trait_ref, impl_def_id);
+ if tcx.trait_is_auto(trait_ref.def_id) {
+ lint_auto_trait_impl(tcx, trait_ref, impl_def_id);
+ }
+
+ ret
+}
+
+fn do_orphan_check_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ def_id: LocalDefId,
+) -> Result<(), ErrorGuaranteed> {
+ let trait_def_id = trait_ref.def_id;
+
+ let item = tcx.hir().item(hir::ItemId { def_id });
+ let hir::ItemKind::Impl(ref impl_) = item.kind else {
+ bug!("{:?} is not an impl: {:?}", def_id, item);
+ };
+ let sp = tcx.def_span(def_id);
+ let tr = impl_.of_trait.as_ref().unwrap();
+
+ // Ensure no opaque types are present in this impl header. See issues #76202 and #86411 for examples,
+ // and #84660 where it would otherwise allow unsoundness.
+ if trait_ref.has_opaque_types() {
+ trace!("{:#?}", item);
+ // First we find the opaque type in question.
+ for ty in trait_ref.substs {
+ for ty in ty.walk() {
+ let ty::subst::GenericArgKind::Type(ty) = ty.unpack() else { continue };
+ let ty::Opaque(def_id, _) = *ty.kind() else { continue };
+ trace!(?def_id);
+
+ // Then we search for mentions of the opaque type's type alias in the HIR
+ struct SpanFinder<'tcx> {
+ sp: Span,
+ def_id: DefId,
+ tcx: TyCtxt<'tcx>,
+ }
+ impl<'v, 'tcx> hir::intravisit::Visitor<'v> for SpanFinder<'tcx> {
+ #[instrument(level = "trace", skip(self, _id))]
+ fn visit_path(&mut self, path: &'v hir::Path<'v>, _id: hir::HirId) {
+ // You can't mention an opaque type directly, so we look for type aliases
+ if let hir::def::Res::Def(hir::def::DefKind::TyAlias, def_id) = path.res {
+ // And check if that type alias's type contains the opaque type we're looking for
+ for arg in self.tcx.type_of(def_id).walk() {
+ if let GenericArgKind::Type(ty) = arg.unpack() {
+ if let ty::Opaque(def_id, _) = *ty.kind() {
+ if def_id == self.def_id {
+ // Finally we update the span to the mention of the type alias
+ self.sp = path.span;
+ return;
+ }
+ }
+ }
+ }
+ }
+ hir::intravisit::walk_path(self, path)
+ }
+ }
+
+ let mut visitor = SpanFinder { sp, def_id, tcx };
+ hir::intravisit::walk_item(&mut visitor, item);
+ let reported = tcx
+ .sess
+ .struct_span_err(visitor.sp, "cannot implement trait on type alias impl trait")
+ .span_note(tcx.def_span(def_id), "type alias impl trait defined here")
+ .emit();
+ return Err(reported);
+ }
+ }
+ span_bug!(sp, "opaque type not found, but `has_opaque_types` is set")
+ }
+
+ match traits::orphan_check(tcx, item.def_id.to_def_id()) {
+ Ok(()) => {}
+ Err(err) => emit_orphan_check_error(
+ tcx,
+ sp,
+ item.span,
+ tr.path.span,
+ trait_ref.self_ty(),
+ impl_.self_ty.span,
+ &impl_.generics,
+ err,
+ )?,
+ }
+
+ // In addition to the above rules, we restrict impls of auto traits
+ // so that they can only be implemented on nominal types, such as structs,
+ // enums or foreign types. To see why this restriction exists, consider the
+ // following example (#22978). Imagine that crate A defines an auto trait
+ // `Foo` and a fn that operates on pairs of types:
+ //
+ // ```
+ // // Crate A
+ // auto trait Foo { }
+ // fn two_foos<A:Foo,B:Foo>(..) {
+ // one_foo::<(A,B)>(..)
+ // }
+ // fn one_foo<T:Foo>(..) { .. }
+ // ```
+ //
+ // This type-checks fine; in particular the fn
+ // `two_foos` is able to conclude that `(A,B):Foo`
+ // because `A:Foo` and `B:Foo`.
+ //
+ // Now imagine that crate B comes along and does the following:
+ //
+ // ```
+ // struct A { }
+ // struct B { }
+ // impl Foo for A { }
+ // impl Foo for B { }
+ // impl !Send for (A, B) { }
+ // ```
+ //
+ // This final impl is legal according to the orphan
+ // rules, but it invalidates the reasoning from
+ // `two_foos` above.
+ debug!(
+ "trait_ref={:?} trait_def_id={:?} trait_is_auto={}",
+ trait_ref,
+ trait_def_id,
+ tcx.trait_is_auto(trait_def_id)
+ );
+
+ if tcx.trait_is_auto(trait_def_id) && !trait_def_id.is_local() {
+ let self_ty = trait_ref.self_ty();
+ let opt_self_def_id = match *self_ty.kind() {
+ ty::Adt(self_def, _) => Some(self_def.did()),
+ ty::Foreign(did) => Some(did),
+ _ => None,
+ };
+
+ let msg = match opt_self_def_id {
+ // We only want to permit nominal types, but not *all* nominal types.
+ // They must be local to the current crate, so that people
+ // can't do `unsafe impl Send for Rc<SomethingLocal>` or
+ // `impl !Send for Box<SomethingLocalAndSend>`.
+ Some(self_def_id) => {
+ if self_def_id.is_local() {
+ None
+ } else {
+ Some((
+ format!(
+ "cross-crate traits with a default impl, like `{}`, \
+ can only be implemented for a struct/enum type \
+ defined in the current crate",
+ tcx.def_path_str(trait_def_id)
+ ),
+ "can't implement cross-crate trait for type in another crate",
+ ))
+ }
+ }
+ _ => Some((
+ format!(
+ "cross-crate traits with a default impl, like `{}`, can \
+ only be implemented for a struct/enum type, not `{}`",
+ tcx.def_path_str(trait_def_id),
+ self_ty
+ ),
+ "can't implement cross-crate trait with a default impl for \
+ non-struct/enum type",
+ )),
+ };
+
+ if let Some((msg, label)) = msg {
+ let reported =
+ struct_span_err!(tcx.sess, sp, E0321, "{}", msg).span_label(sp, label).emit();
+ return Err(reported);
+ }
+ }
+
+ Ok(())
+}
+
+fn emit_orphan_check_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sp: Span,
+ full_impl_span: Span,
+ trait_span: Span,
+ self_ty: Ty<'tcx>,
+ self_ty_span: Span,
+ generics: &hir::Generics<'tcx>,
+ err: traits::OrphanCheckErr<'tcx>,
+) -> Result<!, ErrorGuaranteed> {
+ Err(match err {
+ traits::OrphanCheckErr::NonLocalInputType(tys) => {
+ let msg = match self_ty.kind() {
+ ty::Adt(..) => "can be implemented for types defined outside of the crate",
+ _ if self_ty.is_primitive() => "can be implemented for primitive types",
+ _ => "can be implemented for arbitrary types",
+ };
+ let mut err = struct_span_err!(
+ tcx.sess,
+ sp,
+ E0117,
+ "only traits defined in the current crate {msg}"
+ );
+ err.span_label(sp, "impl doesn't use only types from inside the current crate");
+ for (ty, is_target_ty) in &tys {
+ let mut ty = *ty;
+ tcx.infer_ctxt().enter(|infcx| {
+ // Remove the lifetimes unnecessary for this error.
+ ty = infcx.freshen(ty);
+ });
+ ty = match ty.kind() {
+ // Remove the type arguments from the output, as they are not relevant.
+ // You can think of this as the reverse of `resolve_vars_if_possible`.
+ // That way if we had `Vec<MyType>`, we will properly attribute the
+ // problem to `Vec<T>` and avoid confusing the user if they were to see
+ // `MyType` in the error.
+ ty::Adt(def, _) => tcx.mk_adt(*def, ty::List::empty()),
+ _ => ty,
+ };
+ let this = "this".to_string();
+ let (ty, postfix) = match &ty.kind() {
+ ty::Slice(_) => (this, " because slices are always foreign"),
+ ty::Array(..) => (this, " because arrays are always foreign"),
+ ty::Tuple(..) => (this, " because tuples are always foreign"),
+ ty::RawPtr(ptr_ty) => {
+ emit_newtype_suggestion_for_raw_ptr(
+ full_impl_span,
+ self_ty,
+ self_ty_span,
+ ptr_ty,
+ &mut err,
+ );
+
+ (format!("`{}`", ty), " because raw pointers are always foreign")
+ }
+ _ => (format!("`{}`", ty), ""),
+ };
+
+ let msg = format!("{} is not defined in the current crate{}", ty, postfix);
+ if *is_target_ty {
+ // Point at `D<A>` in `impl<A, B> for C<B> in D<A>`
+ err.span_label(self_ty_span, &msg);
+ } else {
+ // Point at `C<B>` in `impl<A, B> for C<B> in D<A>`
+ err.span_label(trait_span, &msg);
+ }
+ }
+ err.note("define and implement a trait or new type instead");
+ err.emit()
+ }
+ traits::OrphanCheckErr::UncoveredTy(param_ty, local_type) => {
+ let mut sp = sp;
+ for param in generics.params {
+ if param.name.ident().to_string() == param_ty.to_string() {
+ sp = param.span;
+ }
+ }
+
+ match local_type {
+ Some(local_type) => struct_span_err!(
+ tcx.sess,
+ sp,
+ E0210,
+ "type parameter `{}` must be covered by another type \
+ when it appears before the first local type (`{}`)",
+ param_ty,
+ local_type
+ )
+ .span_label(
+ sp,
+ format!(
+ "type parameter `{}` must be covered by another type \
+ when it appears before the first local type (`{}`)",
+ param_ty, local_type
+ ),
+ )
+ .note(
+ "implementing a foreign trait is only possible if at \
+ least one of the types for which it is implemented is local, \
+ and no uncovered type parameters appear before that first \
+ local type",
+ )
+ .note(
+ "in this case, 'before' refers to the following order: \
+ `impl<..> ForeignTrait<T1, ..., Tn> for T0`, \
+ where `T0` is the first and `Tn` is the last",
+ )
+ .emit(),
+ None => struct_span_err!(
+ tcx.sess,
+ sp,
+ E0210,
+ "type parameter `{}` must be used as the type parameter for some \
+ local type (e.g., `MyStruct<{}>`)",
+ param_ty,
+ param_ty
+ )
+ .span_label(
+ sp,
+ format!(
+ "type parameter `{}` must be used as the type parameter for some \
+ local type",
+ param_ty,
+ ),
+ )
+ .note(
+ "implementing a foreign trait is only possible if at \
+ least one of the types for which it is implemented is local",
+ )
+ .note(
+ "only traits defined in the current crate can be \
+ implemented for a type parameter",
+ )
+ .emit(),
+ }
+ }
+ })
+}
+
+fn emit_newtype_suggestion_for_raw_ptr(
+ full_impl_span: Span,
+ self_ty: Ty<'_>,
+ self_ty_span: Span,
+ ptr_ty: &ty::TypeAndMut<'_>,
+ diag: &mut Diagnostic,
+) {
+ if !self_ty.needs_subst() {
+ let mut_key = if ptr_ty.mutbl == rustc_middle::mir::Mutability::Mut { "mut " } else { "" };
+ let msg_sugg = "consider introducing a new wrapper type".to_owned();
+ let sugg = vec![
+ (
+ full_impl_span.shrink_to_lo(),
+ format!("struct WrapperType(*{}{});\n\n", mut_key, ptr_ty.ty),
+ ),
+ (self_ty_span, "WrapperType".to_owned()),
+ ];
+ diag.multipart_suggestion(msg_sugg, sugg, rustc_errors::Applicability::MaybeIncorrect);
+ }
+}
+
+/// Lint impls of auto traits if they are likely to have
+/// unsound or surprising effects on auto impls.
+fn lint_auto_trait_impl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ impl_def_id: LocalDefId,
+) {
+ if tcx.impl_polarity(impl_def_id) != ImplPolarity::Positive {
+ return;
+ }
+
+ assert_eq!(trait_ref.substs.len(), 1);
+ let self_ty = trait_ref.self_ty();
+ let (self_type_did, substs) = match self_ty.kind() {
+ ty::Adt(def, substs) => (def.did(), substs),
+ _ => {
+ // FIXME: should also lint for stuff like `&i32` but
+ // considering that auto traits are unstable, that
+ // isn't too important for now as this only affects
+ // crates using `nightly`, and std.
+ return;
+ }
+ };
+
+ // Impls which completely cover a given root type are fine as they
+ // disable auto impls entirely. So only lint if the substs
+ // are not a permutation of the identity substs.
+ let Err(arg) = tcx.uses_unique_generic_params(substs, IgnoreRegions::Yes) else {
+ // ok
+ return;
+ };
+
+ // Ideally:
+ //
+ // - compute the requirements for the auto impl candidate
+ // - check whether these are implied by the non covering impls
+ // - if not, emit the lint
+ //
+ // What we do here is a bit simpler:
+ //
+ // - badly check if an auto impl candidate definitely does not apply
+ // for the given simplified type
+ // - if so, do not lint
+ if fast_reject_auto_impl(tcx, trait_ref.def_id, self_ty) {
+ // ok
+ return;
+ }
+
+ tcx.struct_span_lint_hir(
+ lint::builtin::SUSPICIOUS_AUTO_TRAIT_IMPLS,
+ tcx.hir().local_def_id_to_hir_id(impl_def_id),
+ tcx.def_span(impl_def_id),
+ |err| {
+ let item_span = tcx.def_span(self_type_did);
+ let self_descr = tcx.def_kind(self_type_did).descr(self_type_did);
+ let mut err = err.build(&format!(
+ "cross-crate traits with a default impl, like `{}`, \
+ should not be specialized",
+ tcx.def_path_str(trait_ref.def_id),
+ ));
+ match arg {
+ ty::util::NotUniqueParam::DuplicateParam(arg) => {
+ err.note(&format!("`{}` is mentioned multiple times", arg));
+ }
+ ty::util::NotUniqueParam::NotParam(arg) => {
+ err.note(&format!("`{}` is not a generic parameter", arg));
+ }
+ }
+ err.span_note(
+ item_span,
+ &format!(
+ "try using the same sequence of generic parameters as the {} definition",
+ self_descr,
+ ),
+ );
+ err.emit();
+ },
+ );
+}
+
+fn fast_reject_auto_impl<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, self_ty: Ty<'tcx>) -> bool {
+ struct DisableAutoTraitVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ self_ty_root: Ty<'tcx>,
+ seen: FxHashSet<DefId>,
+ }
+
+ impl<'tcx> TypeVisitor<'tcx> for DisableAutoTraitVisitor<'tcx> {
+ type BreakTy = ();
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let tcx = self.tcx;
+ if t != self.self_ty_root {
+ for impl_def_id in tcx.non_blanket_impls_for_ty(self.trait_def_id, t) {
+ match tcx.impl_polarity(impl_def_id) {
+ ImplPolarity::Negative => return ControlFlow::BREAK,
+ ImplPolarity::Reservation => {}
+ // FIXME(@lcnr): That's probably not good enough, idk
+ //
+ // We might just want to take the rustdoc code and somehow avoid
+ // explicit impls for `Self`.
+ ImplPolarity::Positive => return ControlFlow::CONTINUE,
+ }
+ }
+ }
+
+ match t.kind() {
+ ty::Adt(def, substs) if def.is_phantom_data() => substs.visit_with(self),
+ ty::Adt(def, substs) => {
+ // @lcnr: This is the only place where cycles can happen. We avoid this
+ // by only visiting each `DefId` once.
+ //
+ // This will be is incorrect in subtle cases, but I don't care :)
+ if self.seen.insert(def.did()) {
+ for ty in def.all_fields().map(|field| field.ty(tcx, substs)) {
+ ty.visit_with(self)?;
+ }
+ }
+
+ ControlFlow::CONTINUE
+ }
+ _ => t.super_visit_with(self),
+ }
+ }
+ }
+
+ let self_ty_root = match self_ty.kind() {
+ ty::Adt(def, _) => tcx.mk_adt(*def, InternalSubsts::identity_for_item(tcx, def.did())),
+ _ => unimplemented!("unexpected self ty {:?}", self_ty),
+ };
+
+ self_ty_root
+ .visit_with(&mut DisableAutoTraitVisitor {
+ tcx,
+ self_ty_root,
+ trait_def_id,
+ seen: FxHashSet::default(),
+ })
+ .is_break()
+}
diff --git a/compiler/rustc_typeck/src/coherence/unsafety.rs b/compiler/rustc_typeck/src/coherence/unsafety.rs
new file mode 100644
index 000000000..e45fb5fe4
--- /dev/null
+++ b/compiler/rustc_typeck/src/coherence/unsafety.rs
@@ -0,0 +1,66 @@
+//! Unsafety checker: every impl either implements a trait defined in this
+//! crate or pertains to a type defined in this crate.
+
+use rustc_errors::struct_span_err;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::Unsafety;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::def_id::LocalDefId;
+
+pub(super) fn check_item(tcx: TyCtxt<'_>, def_id: LocalDefId) {
+ debug_assert!(matches!(tcx.def_kind(def_id), DefKind::Impl));
+ let item = tcx.hir().expect_item(def_id);
+ let hir::ItemKind::Impl(ref impl_) = item.kind else { bug!() };
+
+ if let Some(trait_ref) = tcx.impl_trait_ref(item.def_id) {
+ let trait_def = tcx.trait_def(trait_ref.def_id);
+ let unsafe_attr =
+ impl_.generics.params.iter().find(|p| p.pure_wrt_drop).map(|_| "may_dangle");
+ match (trait_def.unsafety, unsafe_attr, impl_.unsafety, impl_.polarity) {
+ (Unsafety::Normal, None, Unsafety::Unsafe, hir::ImplPolarity::Positive) => {
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0199,
+ "implementing the trait `{}` is not unsafe",
+ trait_ref.print_only_trait_path()
+ )
+ .emit();
+ }
+
+ (Unsafety::Unsafe, _, Unsafety::Normal, hir::ImplPolarity::Positive) => {
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0200,
+ "the trait `{}` requires an `unsafe impl` declaration",
+ trait_ref.print_only_trait_path()
+ )
+ .emit();
+ }
+
+ (Unsafety::Normal, Some(attr_name), Unsafety::Normal, hir::ImplPolarity::Positive) => {
+ struct_span_err!(
+ tcx.sess,
+ item.span,
+ E0569,
+ "requires an `unsafe impl` declaration due to `#[{}]` attribute",
+ attr_name
+ )
+ .emit();
+ }
+
+ (_, _, Unsafety::Unsafe, hir::ImplPolarity::Negative(_)) => {
+ // Reported in AST validation
+ tcx.sess.delay_span_bug(item.span, "unsafe negative impl");
+ }
+ (_, _, Unsafety::Normal, hir::ImplPolarity::Negative(_))
+ | (Unsafety::Unsafe, _, Unsafety::Unsafe, hir::ImplPolarity::Positive)
+ | (Unsafety::Normal, Some(_), Unsafety::Unsafe, hir::ImplPolarity::Positive)
+ | (Unsafety::Normal, None, Unsafety::Normal, _) => {
+ // OK
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/collect.rs b/compiler/rustc_typeck/src/collect.rs
new file mode 100644
index 000000000..99996e80c
--- /dev/null
+++ b/compiler/rustc_typeck/src/collect.rs
@@ -0,0 +1,3361 @@
+//! "Collection" is the process of determining the type and other external
+//! details of each item in Rust. Collection is specifically concerned
+//! with *inter-procedural* things -- for example, for a function
+//! definition, collection will figure out the type and signature of the
+//! function, but it will not visit the *body* of the function in any way,
+//! nor examine type annotations on local variables (that's the job of
+//! type *checking*).
+//!
+//! Collecting is ultimately defined by a bundle of queries that
+//! inquire after various facts about the items in the crate (e.g.,
+//! `type_of`, `generics_of`, `predicates_of`, etc). See the `provide` function
+//! for the full set.
+//!
+//! At present, however, we do run collection across all items in the
+//! crate as a kind of pass. This should eventually be factored away.
+
+use crate::astconv::AstConv;
+use crate::bounds::Bounds;
+use crate::check::intrinsic::intrinsic_operation_unsafety;
+use crate::constrained_generic_params as cgp;
+use crate::errors;
+use crate::middle::resolve_lifetime as rl;
+use rustc_ast as ast;
+use rustc_ast::{MetaItemKind, NestedMetaItem};
+use rustc_attr::{list_contains_name, InlineAttr, InstructionSetAttr, OptimizeAttr};
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet};
+use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::weak_lang_items;
+use rustc_hir::{GenericParamKind, HirId, Node};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::mono::Linkage;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::util::Discr;
+use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::{self, AdtKind, Const, DefIdTree, IsSuggestable, Ty, TyCtxt};
+use rustc_middle::ty::{ReprOptions, ToPredicate};
+use rustc_session::lint;
+use rustc_session::parse::feature_err;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::spec::{abi, SanitizerSet};
+use rustc_trait_selection::traits::error_reporting::suggestions::NextTypeParamName;
+use std::iter;
+
+mod item_bounds;
+mod type_of;
+
+#[derive(Debug)]
+struct OnlySelfBounds(bool);
+
+///////////////////////////////////////////////////////////////////////////
+// Main entry point
+
+fn collect_mod_item_types(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ tcx.hir().visit_item_likes_in_module(module_def_id, &mut CollectItemTypesVisitor { tcx });
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers {
+ opt_const_param_of: type_of::opt_const_param_of,
+ type_of: type_of::type_of,
+ item_bounds: item_bounds::item_bounds,
+ explicit_item_bounds: item_bounds::explicit_item_bounds,
+ generics_of,
+ predicates_of,
+ predicates_defined_on,
+ explicit_predicates_of,
+ super_predicates_of,
+ super_predicates_that_define_assoc_type,
+ trait_explicit_predicates_and_bounds,
+ type_param_predicates,
+ trait_def,
+ adt_def,
+ fn_sig,
+ impl_trait_ref,
+ impl_polarity,
+ is_foreign_item,
+ generator_kind,
+ codegen_fn_attrs,
+ asm_target_features,
+ collect_mod_item_types,
+ should_inherit_track_caller,
+ ..*providers
+ };
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+/// Context specific to some particular item. This is what implements
+/// `AstConv`. It has information about the predicates that are defined
+/// on the trait. Unfortunately, this predicate information is
+/// available in various different forms at various points in the
+/// process. So we can't just store a pointer to e.g., the AST or the
+/// parsed ty form, we have to be more flexible. To this end, the
+/// `ItemCtxt` is parameterized by a `DefId` that it uses to satisfy
+/// `get_type_parameter_bounds` requests, drawing the information from
+/// the AST (`hir::Generics`), recursively.
+pub struct ItemCtxt<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ item_def_id: DefId,
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+#[derive(Default)]
+pub(crate) struct HirPlaceholderCollector(pub(crate) Vec<Span>);
+
+impl<'v> Visitor<'v> for HirPlaceholderCollector {
+ fn visit_ty(&mut self, t: &'v hir::Ty<'v>) {
+ if let hir::TyKind::Infer = t.kind {
+ self.0.push(t.span);
+ }
+ intravisit::walk_ty(self, t)
+ }
+ fn visit_generic_arg(&mut self, generic_arg: &'v hir::GenericArg<'v>) {
+ match generic_arg {
+ hir::GenericArg::Infer(inf) => {
+ self.0.push(inf.span);
+ intravisit::walk_inf(self, inf);
+ }
+ hir::GenericArg::Type(t) => self.visit_ty(t),
+ _ => {}
+ }
+ }
+ fn visit_array_length(&mut self, length: &'v hir::ArrayLen) {
+ if let &hir::ArrayLen::Infer(_, span) = length {
+ self.0.push(span);
+ }
+ intravisit::walk_array_len(self, length)
+ }
+}
+
+struct CollectItemTypesVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+/// If there are any placeholder types (`_`), emit an error explaining that this is not allowed
+/// and suggest adding type parameters in the appropriate place, taking into consideration any and
+/// all already existing generic type parameters to avoid suggesting a name that is already in use.
+pub(crate) fn placeholder_type_error<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: Option<&hir::Generics<'_>>,
+ placeholder_types: Vec<Span>,
+ suggest: bool,
+ hir_ty: Option<&hir::Ty<'_>>,
+ kind: &'static str,
+) {
+ if placeholder_types.is_empty() {
+ return;
+ }
+
+ placeholder_type_error_diag(tcx, generics, placeholder_types, vec![], suggest, hir_ty, kind)
+ .emit();
+}
+
+pub(crate) fn placeholder_type_error_diag<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: Option<&hir::Generics<'_>>,
+ placeholder_types: Vec<Span>,
+ additional_spans: Vec<Span>,
+ suggest: bool,
+ hir_ty: Option<&hir::Ty<'_>>,
+ kind: &'static str,
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ if placeholder_types.is_empty() {
+ return bad_placeholder(tcx, additional_spans, kind);
+ }
+
+ let params = generics.map(|g| g.params).unwrap_or_default();
+ let type_name = params.next_type_param_name(None);
+ let mut sugg: Vec<_> =
+ placeholder_types.iter().map(|sp| (*sp, (*type_name).to_string())).collect();
+
+ if let Some(generics) = generics {
+ if let Some(arg) = params.iter().find(|arg| {
+ matches!(arg.name, hir::ParamName::Plain(Ident { name: kw::Underscore, .. }))
+ }) {
+ // Account for `_` already present in cases like `struct S<_>(_);` and suggest
+ // `struct S<T>(T);` instead of `struct S<_, T>(T);`.
+ sugg.push((arg.span, (*type_name).to_string()));
+ } else if let Some(span) = generics.span_for_param_suggestion() {
+ // Account for bounds, we want `fn foo<T: E, K>(_: K)` not `fn foo<T, K: E>(_: K)`.
+ sugg.push((span, format!(", {}", type_name)));
+ } else {
+ sugg.push((generics.span, format!("<{}>", type_name)));
+ }
+ }
+
+ let mut err =
+ bad_placeholder(tcx, placeholder_types.into_iter().chain(additional_spans).collect(), kind);
+
+ // Suggest, but only if it is not a function in const or static
+ if suggest {
+ let mut is_fn = false;
+ let mut is_const_or_static = false;
+
+ if let Some(hir_ty) = hir_ty && let hir::TyKind::BareFn(_) = hir_ty.kind {
+ is_fn = true;
+
+ // Check if parent is const or static
+ let parent_id = tcx.hir().get_parent_node(hir_ty.hir_id);
+ let parent_node = tcx.hir().get(parent_id);
+
+ is_const_or_static = matches!(
+ parent_node,
+ Node::Item(&hir::Item {
+ kind: hir::ItemKind::Const(..) | hir::ItemKind::Static(..),
+ ..
+ }) | Node::TraitItem(&hir::TraitItem {
+ kind: hir::TraitItemKind::Const(..),
+ ..
+ }) | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Const(..), .. })
+ );
+ }
+
+ // if function is wrapped around a const or static,
+ // then don't show the suggestion
+ if !(is_fn && is_const_or_static) {
+ err.multipart_suggestion(
+ "use type parameters instead",
+ sugg,
+ Applicability::HasPlaceholders,
+ );
+ }
+ }
+
+ err
+}
+
+fn reject_placeholder_type_signatures_in_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ item: &'tcx hir::Item<'tcx>,
+) {
+ let (generics, suggest) = match &item.kind {
+ hir::ItemKind::Union(_, generics)
+ | hir::ItemKind::Enum(_, generics)
+ | hir::ItemKind::TraitAlias(generics, _)
+ | hir::ItemKind::Trait(_, _, generics, ..)
+ | hir::ItemKind::Impl(hir::Impl { generics, .. })
+ | hir::ItemKind::Struct(_, generics) => (generics, true),
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy { generics, .. })
+ | hir::ItemKind::TyAlias(_, generics) => (generics, false),
+ // `static`, `fn` and `const` are handled elsewhere to suggest appropriate type.
+ _ => return,
+ };
+
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_item(item);
+
+ placeholder_type_error(tcx, Some(generics), visitor.0, suggest, None, item.kind.descr());
+}
+
+impl<'tcx> Visitor<'tcx> for CollectItemTypesVisitor<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'tcx hir::Item<'tcx>) {
+ convert_item(self.tcx, item.item_id());
+ reject_placeholder_type_signatures_in_item(self.tcx, item);
+ intravisit::walk_item(self, item);
+ }
+
+ fn visit_generics(&mut self, generics: &'tcx hir::Generics<'tcx>) {
+ for param in generics.params {
+ match param.kind {
+ hir::GenericParamKind::Lifetime { .. } => {}
+ hir::GenericParamKind::Type { default: Some(_), .. } => {
+ let def_id = self.tcx.hir().local_def_id(param.hir_id);
+ self.tcx.ensure().type_of(def_id);
+ }
+ hir::GenericParamKind::Type { .. } => {}
+ hir::GenericParamKind::Const { default, .. } => {
+ let def_id = self.tcx.hir().local_def_id(param.hir_id);
+ self.tcx.ensure().type_of(def_id);
+ if let Some(default) = default {
+ let default_def_id = self.tcx.hir().local_def_id(default.hir_id);
+ // need to store default and type of default
+ self.tcx.ensure().type_of(default_def_id);
+ self.tcx.ensure().const_param_default(def_id);
+ }
+ }
+ }
+ }
+ intravisit::walk_generics(self, generics);
+ }
+
+ fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) {
+ if let hir::ExprKind::Closure { .. } = expr.kind {
+ let def_id = self.tcx.hir().local_def_id(expr.hir_id);
+ self.tcx.ensure().generics_of(def_id);
+ // We do not call `type_of` for closures here as that
+ // depends on typecheck and would therefore hide
+ // any further errors in case one typeck fails.
+ }
+ intravisit::walk_expr(self, expr);
+ }
+
+ fn visit_trait_item(&mut self, trait_item: &'tcx hir::TraitItem<'tcx>) {
+ convert_trait_item(self.tcx, trait_item.trait_item_id());
+ intravisit::walk_trait_item(self, trait_item);
+ }
+
+ fn visit_impl_item(&mut self, impl_item: &'tcx hir::ImplItem<'tcx>) {
+ convert_impl_item(self.tcx, impl_item.impl_item_id());
+ intravisit::walk_impl_item(self, impl_item);
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Utility types and common code for the above passes.
+
+fn bad_placeholder<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mut spans: Vec<Span>,
+ kind: &'static str,
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let kind = if kind.ends_with('s') { format!("{}es", kind) } else { format!("{}s", kind) };
+
+ spans.sort();
+ let mut err = struct_span_err!(
+ tcx.sess,
+ spans.clone(),
+ E0121,
+ "the placeholder `_` is not allowed within types on item signatures for {}",
+ kind
+ );
+ for span in spans {
+ err.span_label(span, "not allowed in type signatures");
+ }
+ err
+}
+
+impl<'tcx> ItemCtxt<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, item_def_id: DefId) -> ItemCtxt<'tcx> {
+ ItemCtxt { tcx, item_def_id }
+ }
+
+ pub fn to_ty(&self, ast_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ <dyn AstConv<'_>>::ast_ty_to_ty(self, ast_ty)
+ }
+
+ pub fn hir_id(&self) -> hir::HirId {
+ self.tcx.hir().local_def_id_to_hir_id(self.item_def_id.expect_local())
+ }
+
+ pub fn node(&self) -> hir::Node<'tcx> {
+ self.tcx.hir().get(self.hir_id())
+ }
+}
+
+impl<'tcx> AstConv<'tcx> for ItemCtxt<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn item_def_id(&self) -> Option<DefId> {
+ Some(self.item_def_id)
+ }
+
+ fn get_type_parameter_bounds(
+ &self,
+ span: Span,
+ def_id: DefId,
+ assoc_name: Ident,
+ ) -> ty::GenericPredicates<'tcx> {
+ self.tcx.at(span).type_param_predicates((
+ self.item_def_id,
+ def_id.expect_local(),
+ assoc_name,
+ ))
+ }
+
+ fn re_infer(&self, _: Option<&ty::GenericParamDef>, _: Span) -> Option<ty::Region<'tcx>> {
+ None
+ }
+
+ fn allow_ty_infer(&self) -> bool {
+ false
+ }
+
+ fn ty_infer(&self, _: Option<&ty::GenericParamDef>, span: Span) -> Ty<'tcx> {
+ self.tcx().ty_error_with_message(span, "bad placeholder type")
+ }
+
+ fn ct_infer(&self, ty: Ty<'tcx>, _: Option<&ty::GenericParamDef>, span: Span) -> Const<'tcx> {
+ let ty = self.tcx.fold_regions(ty, |r, _| match *r {
+ ty::ReErased => self.tcx.lifetimes.re_static,
+ _ => r,
+ });
+ self.tcx().const_error_with_message(ty, span, "bad placeholder constant")
+ }
+
+ fn projected_ty_from_poly_trait_ref(
+ &self,
+ span: Span,
+ item_def_id: DefId,
+ item_segment: &hir::PathSegment<'_>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ ) -> Ty<'tcx> {
+ if let Some(trait_ref) = poly_trait_ref.no_bound_vars() {
+ let item_substs = <dyn AstConv<'tcx>>::create_substs_for_associated_item(
+ self,
+ self.tcx,
+ span,
+ item_def_id,
+ item_segment,
+ trait_ref.substs,
+ );
+ self.tcx().mk_projection(item_def_id, item_substs)
+ } else {
+ // There are no late-bound regions; we can just ignore the binder.
+ let mut err = struct_span_err!(
+ self.tcx().sess,
+ span,
+ E0212,
+ "cannot use the associated type of a trait \
+ with uninferred generic parameters"
+ );
+
+ match self.node() {
+ hir::Node::Field(_) | hir::Node::Ctor(_) | hir::Node::Variant(_) => {
+ let item =
+ self.tcx.hir().expect_item(self.tcx.hir().get_parent_item(self.hir_id()));
+ match &item.kind {
+ hir::ItemKind::Enum(_, generics)
+ | hir::ItemKind::Struct(_, generics)
+ | hir::ItemKind::Union(_, generics) => {
+ let lt_name = get_new_lifetime_name(self.tcx, poly_trait_ref, generics);
+ let (lt_sp, sugg) = match generics.params {
+ [] => (generics.span, format!("<{}>", lt_name)),
+ [bound, ..] => {
+ (bound.span.shrink_to_lo(), format!("{}, ", lt_name))
+ }
+ };
+ let suggestions = vec![
+ (lt_sp, sugg),
+ (
+ span.with_hi(item_segment.ident.span.lo()),
+ format!(
+ "{}::",
+ // Replace the existing lifetimes with a new named lifetime.
+ self.tcx.replace_late_bound_regions_uncached(
+ poly_trait_ref,
+ |_| {
+ self.tcx.mk_region(ty::ReEarlyBound(
+ ty::EarlyBoundRegion {
+ def_id: item_def_id,
+ index: 0,
+ name: Symbol::intern(&lt_name),
+ },
+ ))
+ }
+ ),
+ ),
+ ),
+ ];
+ err.multipart_suggestion(
+ "use a fully qualified path with explicit lifetimes",
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ }
+ }
+ hir::Node::Item(hir::Item {
+ kind:
+ hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..),
+ ..
+ }) => {}
+ hir::Node::Item(_)
+ | hir::Node::ForeignItem(_)
+ | hir::Node::TraitItem(_)
+ | hir::Node::ImplItem(_) => {
+ err.span_suggestion_verbose(
+ span.with_hi(item_segment.ident.span.lo()),
+ "use a fully qualified path with inferred lifetimes",
+ format!(
+ "{}::",
+ // Erase named lt, we want `<A as B<'_>::C`, not `<A as B<'a>::C`.
+ self.tcx.anonymize_late_bound_regions(poly_trait_ref).skip_binder(),
+ ),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ _ => {}
+ }
+ err.emit();
+ self.tcx().ty_error()
+ }
+ }
+
+ fn normalize_ty(&self, _span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
+ // Types in item signatures are not normalized to avoid undue dependencies.
+ ty
+ }
+
+ fn set_tainted_by_errors(&self) {
+ // There's no obvious place to track this, so just let it go.
+ }
+
+ fn record_ty(&self, _hir_id: hir::HirId, _ty: Ty<'tcx>, _span: Span) {
+ // There's no place to record types from signatures?
+ }
+}
+
+/// Synthesize a new lifetime name that doesn't clash with any of the lifetimes already present.
+fn get_new_lifetime_name<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ poly_trait_ref: ty::PolyTraitRef<'tcx>,
+ generics: &hir::Generics<'tcx>,
+) -> String {
+ let existing_lifetimes = tcx
+ .collect_referenced_late_bound_regions(&poly_trait_ref)
+ .into_iter()
+ .filter_map(|lt| {
+ if let ty::BoundRegionKind::BrNamed(_, name) = lt {
+ Some(name.as_str().to_string())
+ } else {
+ None
+ }
+ })
+ .chain(generics.params.iter().filter_map(|param| {
+ if let hir::GenericParamKind::Lifetime { .. } = &param.kind {
+ Some(param.name.ident().as_str().to_string())
+ } else {
+ None
+ }
+ }))
+ .collect::<FxHashSet<String>>();
+
+ let a_to_z_repeat_n = |n| {
+ (b'a'..=b'z').map(move |c| {
+ let mut s = '\''.to_string();
+ s.extend(std::iter::repeat(char::from(c)).take(n));
+ s
+ })
+ };
+
+ // If all single char lifetime names are present, we wrap around and double the chars.
+ (1..).flat_map(a_to_z_repeat_n).find(|lt| !existing_lifetimes.contains(lt.as_str())).unwrap()
+}
+
+/// Returns the predicates defined on `item_def_id` of the form
+/// `X: Foo` where `X` is the type parameter `def_id`.
+fn type_param_predicates(
+ tcx: TyCtxt<'_>,
+ (item_def_id, def_id, assoc_name): (DefId, LocalDefId, Ident),
+) -> ty::GenericPredicates<'_> {
+ use rustc_hir::*;
+
+ // In the AST, bounds can derive from two places. Either
+ // written inline like `<T: Foo>` or in a where-clause like
+ // `where T: Foo`.
+
+ let param_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let param_owner = tcx.hir().ty_param_owner(def_id);
+ let generics = tcx.generics_of(param_owner);
+ let index = generics.param_def_id_to_index[&def_id.to_def_id()];
+ let ty = tcx.mk_ty_param(index, tcx.hir().ty_param_name(def_id));
+
+ // Don't look for bounds where the type parameter isn't in scope.
+ let parent = if item_def_id == param_owner.to_def_id() {
+ None
+ } else {
+ tcx.generics_of(item_def_id).parent
+ };
+
+ let mut result = parent
+ .map(|parent| {
+ let icx = ItemCtxt::new(tcx, parent);
+ icx.get_type_parameter_bounds(DUMMY_SP, def_id.to_def_id(), assoc_name)
+ })
+ .unwrap_or_default();
+ let mut extend = None;
+
+ let item_hir_id = tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local());
+ let ast_generics = match tcx.hir().get(item_hir_id) {
+ Node::TraitItem(item) => &item.generics,
+
+ Node::ImplItem(item) => &item.generics,
+
+ Node::Item(item) => {
+ match item.kind {
+ ItemKind::Fn(.., ref generics, _)
+ | ItemKind::Impl(hir::Impl { ref generics, .. })
+ | ItemKind::TyAlias(_, ref generics)
+ | ItemKind::OpaqueTy(OpaqueTy {
+ ref generics,
+ origin: hir::OpaqueTyOrigin::TyAlias,
+ ..
+ })
+ | ItemKind::Enum(_, ref generics)
+ | ItemKind::Struct(_, ref generics)
+ | ItemKind::Union(_, ref generics) => generics,
+ ItemKind::Trait(_, _, ref generics, ..) => {
+ // Implied `Self: Trait` and supertrait bounds.
+ if param_id == item_hir_id {
+ let identity_trait_ref = ty::TraitRef::identity(tcx, item_def_id);
+ extend =
+ Some((identity_trait_ref.without_const().to_predicate(tcx), item.span));
+ }
+ generics
+ }
+ _ => return result,
+ }
+ }
+
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Fn(_, _, ref generics) => generics,
+ _ => return result,
+ },
+
+ _ => return result,
+ };
+
+ let icx = ItemCtxt::new(tcx, item_def_id);
+ let extra_predicates = extend.into_iter().chain(
+ icx.type_parameter_bounds_in_generics(
+ ast_generics,
+ param_id,
+ ty,
+ OnlySelfBounds(true),
+ Some(assoc_name),
+ )
+ .into_iter()
+ .filter(|(predicate, _)| match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(data) => data.self_ty().is_param(index),
+ _ => false,
+ }),
+ );
+ result.predicates =
+ tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(extra_predicates));
+ result
+}
+
+impl<'tcx> ItemCtxt<'tcx> {
+ /// Finds bounds from `hir::Generics`. This requires scanning through the
+ /// AST. We do this to avoid having to convert *all* the bounds, which
+ /// would create artificial cycles. Instead, we can only convert the
+ /// bounds for a type parameter `X` if `X::Foo` is used.
+ #[instrument(level = "trace", skip(self, ast_generics))]
+ fn type_parameter_bounds_in_generics(
+ &self,
+ ast_generics: &'tcx hir::Generics<'tcx>,
+ param_id: hir::HirId,
+ ty: Ty<'tcx>,
+ only_self_bounds: OnlySelfBounds,
+ assoc_name: Option<Ident>,
+ ) -> Vec<(ty::Predicate<'tcx>, Span)> {
+ let param_def_id = self.tcx.hir().local_def_id(param_id).to_def_id();
+ debug!(?param_def_id);
+ ast_generics
+ .predicates
+ .iter()
+ .filter_map(|wp| match *wp {
+ hir::WherePredicate::BoundPredicate(ref bp) => Some(bp),
+ _ => None,
+ })
+ .flat_map(|bp| {
+ let bt = if bp.is_param_bound(param_def_id) {
+ Some(ty)
+ } else if !only_self_bounds.0 {
+ Some(self.to_ty(bp.bounded_ty))
+ } else {
+ None
+ };
+ let bvars = self.tcx.late_bound_vars(bp.bounded_ty.hir_id);
+
+ bp.bounds.iter().filter_map(move |b| bt.map(|bt| (bt, b, bvars))).filter(
+ |(_, b, _)| match assoc_name {
+ Some(assoc_name) => self.bound_defines_assoc_item(b, assoc_name),
+ None => true,
+ },
+ )
+ })
+ .flat_map(|(bt, b, bvars)| predicates_from_bound(self, bt, b, bvars))
+ .collect()
+ }
+
+ fn bound_defines_assoc_item(&self, b: &hir::GenericBound<'_>, assoc_name: Ident) -> bool {
+ debug!("bound_defines_assoc_item(b={:?}, assoc_name={:?})", b, assoc_name);
+
+ match b {
+ hir::GenericBound::Trait(poly_trait_ref, _) => {
+ let trait_ref = &poly_trait_ref.trait_ref;
+ if let Some(trait_did) = trait_ref.trait_def_id() {
+ self.tcx.trait_may_define_assoc_type(trait_did, assoc_name)
+ } else {
+ false
+ }
+ }
+ _ => false,
+ }
+ }
+}
+
+fn convert_item(tcx: TyCtxt<'_>, item_id: hir::ItemId) {
+ let it = tcx.hir().item(item_id);
+ debug!("convert: item {} with id {}", it.ident, it.hir_id());
+ let def_id = item_id.def_id;
+
+ match it.kind {
+ // These don't define types.
+ hir::ItemKind::ExternCrate(_)
+ | hir::ItemKind::Use(..)
+ | hir::ItemKind::Macro(..)
+ | hir::ItemKind::Mod(_)
+ | hir::ItemKind::GlobalAsm(_) => {}
+ hir::ItemKind::ForeignMod { items, .. } => {
+ for item in items {
+ let item = tcx.hir().foreign_item(item.id);
+ tcx.ensure().generics_of(item.def_id);
+ tcx.ensure().type_of(item.def_id);
+ tcx.ensure().predicates_of(item.def_id);
+ match item.kind {
+ hir::ForeignItemKind::Fn(..) => tcx.ensure().fn_sig(item.def_id),
+ hir::ForeignItemKind::Static(..) => {
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_foreign_item(item);
+ placeholder_type_error(
+ tcx,
+ None,
+ visitor.0,
+ false,
+ None,
+ "static variable",
+ );
+ }
+ _ => (),
+ }
+ }
+ }
+ hir::ItemKind::Enum(ref enum_definition, _) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ convert_enum_variant_types(tcx, def_id.to_def_id(), enum_definition.variants);
+ }
+ hir::ItemKind::Impl { .. } => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().impl_trait_ref(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+ hir::ItemKind::Trait(..) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().trait_def(def_id);
+ tcx.at(it.span).super_predicates_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+ hir::ItemKind::TraitAlias(..) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.at(it.span).super_predicates_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+ hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+
+ for f in struct_def.fields() {
+ let def_id = tcx.hir().local_def_id(f.hir_id);
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+
+ if let Some(ctor_hir_id) = struct_def.ctor_hir_id() {
+ convert_variant_ctor(tcx, ctor_hir_id);
+ }
+ }
+
+ // Desugared from `impl Trait`, so visited by the function's return type.
+ hir::ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin: hir::OpaqueTyOrigin::FnReturn(..) | hir::OpaqueTyOrigin::AsyncFn(..),
+ ..
+ }) => {}
+
+ // Don't call `type_of` on opaque types, since that depends on type
+ // checking function bodies. `check_item_type` ensures that it's called
+ // instead.
+ hir::ItemKind::OpaqueTy(..) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ tcx.ensure().explicit_item_bounds(def_id);
+ }
+ hir::ItemKind::TyAlias(..)
+ | hir::ItemKind::Static(..)
+ | hir::ItemKind::Const(..)
+ | hir::ItemKind::Fn(..) => {
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ match it.kind {
+ hir::ItemKind::Fn(..) => tcx.ensure().fn_sig(def_id),
+ hir::ItemKind::OpaqueTy(..) => tcx.ensure().item_bounds(def_id),
+ hir::ItemKind::Const(ty, ..) | hir::ItemKind::Static(ty, ..) => {
+ if !is_suggestable_infer_ty(ty) {
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_item(it);
+ placeholder_type_error(tcx, None, visitor.0, false, None, it.kind.descr());
+ }
+ }
+ _ => (),
+ }
+ }
+ }
+}
+
+fn convert_trait_item(tcx: TyCtxt<'_>, trait_item_id: hir::TraitItemId) {
+ let trait_item = tcx.hir().trait_item(trait_item_id);
+ tcx.ensure().generics_of(trait_item_id.def_id);
+
+ match trait_item.kind {
+ hir::TraitItemKind::Fn(..) => {
+ tcx.ensure().type_of(trait_item_id.def_id);
+ tcx.ensure().fn_sig(trait_item_id.def_id);
+ }
+
+ hir::TraitItemKind::Const(.., Some(_)) => {
+ tcx.ensure().type_of(trait_item_id.def_id);
+ }
+
+ hir::TraitItemKind::Const(..) => {
+ tcx.ensure().type_of(trait_item_id.def_id);
+ // Account for `const C: _;`.
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_trait_item(trait_item);
+ placeholder_type_error(tcx, None, visitor.0, false, None, "constant");
+ }
+
+ hir::TraitItemKind::Type(_, Some(_)) => {
+ tcx.ensure().item_bounds(trait_item_id.def_id);
+ tcx.ensure().type_of(trait_item_id.def_id);
+ // Account for `type T = _;`.
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_trait_item(trait_item);
+ placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
+ }
+
+ hir::TraitItemKind::Type(_, None) => {
+ tcx.ensure().item_bounds(trait_item_id.def_id);
+ // #74612: Visit and try to find bad placeholders
+ // even if there is no concrete type.
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_trait_item(trait_item);
+
+ placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
+ }
+ };
+
+ tcx.ensure().predicates_of(trait_item_id.def_id);
+}
+
+fn convert_impl_item(tcx: TyCtxt<'_>, impl_item_id: hir::ImplItemId) {
+ let def_id = impl_item_id.def_id;
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ let impl_item = tcx.hir().impl_item(impl_item_id);
+ match impl_item.kind {
+ hir::ImplItemKind::Fn(..) => {
+ tcx.ensure().fn_sig(def_id);
+ }
+ hir::ImplItemKind::TyAlias(_) => {
+ // Account for `type T = _;`
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_impl_item(impl_item);
+
+ placeholder_type_error(tcx, None, visitor.0, false, None, "associated type");
+ }
+ hir::ImplItemKind::Const(..) => {}
+ }
+}
+
+fn convert_variant_ctor(tcx: TyCtxt<'_>, ctor_id: hir::HirId) {
+ let def_id = tcx.hir().local_def_id(ctor_id);
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+}
+
+fn convert_enum_variant_types(tcx: TyCtxt<'_>, def_id: DefId, variants: &[hir::Variant<'_>]) {
+ let def = tcx.adt_def(def_id);
+ let repr_type = def.repr().discr_type();
+ let initial = repr_type.initial_discriminant(tcx);
+ let mut prev_discr = None::<Discr<'_>>;
+
+ // fill the discriminant values and field types
+ for variant in variants {
+ let wrapped_discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
+ prev_discr = Some(
+ if let Some(ref e) = variant.disr_expr {
+ let expr_did = tcx.hir().local_def_id(e.hir_id);
+ def.eval_explicit_discr(tcx, expr_did.to_def_id())
+ } else if let Some(discr) = repr_type.disr_incr(tcx, prev_discr) {
+ Some(discr)
+ } else {
+ struct_span_err!(tcx.sess, variant.span, E0370, "enum discriminant overflowed")
+ .span_label(
+ variant.span,
+ format!("overflowed on value after {}", prev_discr.unwrap()),
+ )
+ .note(&format!(
+ "explicitly set `{} = {}` if that is desired outcome",
+ variant.ident, wrapped_discr
+ ))
+ .emit();
+ None
+ }
+ .unwrap_or(wrapped_discr),
+ );
+
+ for f in variant.data.fields() {
+ let def_id = tcx.hir().local_def_id(f.hir_id);
+ tcx.ensure().generics_of(def_id);
+ tcx.ensure().type_of(def_id);
+ tcx.ensure().predicates_of(def_id);
+ }
+
+ // Convert the ctor, if any. This also registers the variant as
+ // an item.
+ if let Some(ctor_hir_id) = variant.data.ctor_hir_id() {
+ convert_variant_ctor(tcx, ctor_hir_id);
+ }
+ }
+}
+
+fn convert_variant(
+ tcx: TyCtxt<'_>,
+ variant_did: Option<LocalDefId>,
+ ctor_did: Option<LocalDefId>,
+ ident: Ident,
+ discr: ty::VariantDiscr,
+ def: &hir::VariantData<'_>,
+ adt_kind: ty::AdtKind,
+ parent_did: LocalDefId,
+) -> ty::VariantDef {
+ let mut seen_fields: FxHashMap<Ident, Span> = Default::default();
+ let fields = def
+ .fields()
+ .iter()
+ .map(|f| {
+ let fid = tcx.hir().local_def_id(f.hir_id);
+ let dup_span = seen_fields.get(&f.ident.normalize_to_macros_2_0()).cloned();
+ if let Some(prev_span) = dup_span {
+ tcx.sess.emit_err(errors::FieldAlreadyDeclared {
+ field_name: f.ident,
+ span: f.span,
+ prev_span,
+ });
+ } else {
+ seen_fields.insert(f.ident.normalize_to_macros_2_0(), f.span);
+ }
+
+ ty::FieldDef { did: fid.to_def_id(), name: f.ident.name, vis: tcx.visibility(fid) }
+ })
+ .collect();
+ let recovered = match def {
+ hir::VariantData::Struct(_, r) => *r,
+ _ => false,
+ };
+ ty::VariantDef::new(
+ ident.name,
+ variant_did.map(LocalDefId::to_def_id),
+ ctor_did.map(LocalDefId::to_def_id),
+ discr,
+ fields,
+ CtorKind::from_hir(def),
+ adt_kind,
+ parent_did.to_def_id(),
+ recovered,
+ adt_kind == AdtKind::Struct && tcx.has_attr(parent_did.to_def_id(), sym::non_exhaustive)
+ || variant_did.map_or(false, |variant_did| {
+ tcx.has_attr(variant_did.to_def_id(), sym::non_exhaustive)
+ }),
+ )
+}
+
+fn adt_def<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::AdtDef<'tcx> {
+ use rustc_hir::*;
+
+ let def_id = def_id.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let Node::Item(item) = tcx.hir().get(hir_id) else {
+ bug!();
+ };
+
+ let repr = ReprOptions::new(tcx, def_id.to_def_id());
+ let (kind, variants) = match item.kind {
+ ItemKind::Enum(ref def, _) => {
+ let mut distance_from_explicit = 0;
+ let variants = def
+ .variants
+ .iter()
+ .map(|v| {
+ let variant_did = Some(tcx.hir().local_def_id(v.id));
+ let ctor_did =
+ v.data.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
+
+ let discr = if let Some(ref e) = v.disr_expr {
+ distance_from_explicit = 0;
+ ty::VariantDiscr::Explicit(tcx.hir().local_def_id(e.hir_id).to_def_id())
+ } else {
+ ty::VariantDiscr::Relative(distance_from_explicit)
+ };
+ distance_from_explicit += 1;
+
+ convert_variant(
+ tcx,
+ variant_did,
+ ctor_did,
+ v.ident,
+ discr,
+ &v.data,
+ AdtKind::Enum,
+ def_id,
+ )
+ })
+ .collect();
+
+ (AdtKind::Enum, variants)
+ }
+ ItemKind::Struct(ref def, _) => {
+ let variant_did = None::<LocalDefId>;
+ let ctor_did = def.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
+
+ let variants = std::iter::once(convert_variant(
+ tcx,
+ variant_did,
+ ctor_did,
+ item.ident,
+ ty::VariantDiscr::Relative(0),
+ def,
+ AdtKind::Struct,
+ def_id,
+ ))
+ .collect();
+
+ (AdtKind::Struct, variants)
+ }
+ ItemKind::Union(ref def, _) => {
+ let variant_did = None;
+ let ctor_did = def.ctor_hir_id().map(|hir_id| tcx.hir().local_def_id(hir_id));
+
+ let variants = std::iter::once(convert_variant(
+ tcx,
+ variant_did,
+ ctor_did,
+ item.ident,
+ ty::VariantDiscr::Relative(0),
+ def,
+ AdtKind::Union,
+ def_id,
+ ))
+ .collect();
+
+ (AdtKind::Union, variants)
+ }
+ _ => bug!(),
+ };
+ tcx.alloc_adt_def(def_id.to_def_id(), kind, variants, repr)
+}
+
+/// Ensures that the super-predicates of the trait with a `DefId`
+/// of `trait_def_id` are converted and stored. This also ensures that
+/// the transitive super-predicates are converted.
+fn super_predicates_of(tcx: TyCtxt<'_>, trait_def_id: DefId) -> ty::GenericPredicates<'_> {
+ debug!("super_predicates(trait_def_id={:?})", trait_def_id);
+ tcx.super_predicates_that_define_assoc_type((trait_def_id, None))
+}
+
+/// Ensures that the super-predicates of the trait with a `DefId`
+/// of `trait_def_id` are converted and stored. This also ensures that
+/// the transitive super-predicates are converted.
+fn super_predicates_that_define_assoc_type(
+ tcx: TyCtxt<'_>,
+ (trait_def_id, assoc_name): (DefId, Option<Ident>),
+) -> ty::GenericPredicates<'_> {
+ debug!(
+ "super_predicates_that_define_assoc_type(trait_def_id={:?}, assoc_name={:?})",
+ trait_def_id, assoc_name
+ );
+ if trait_def_id.is_local() {
+ debug!("super_predicates_that_define_assoc_type: local trait_def_id={:?}", trait_def_id);
+ let trait_hir_id = tcx.hir().local_def_id_to_hir_id(trait_def_id.expect_local());
+
+ let Node::Item(item) = tcx.hir().get(trait_hir_id) else {
+ bug!("trait_node_id {} is not an item", trait_hir_id);
+ };
+
+ let (generics, bounds) = match item.kind {
+ hir::ItemKind::Trait(.., ref generics, ref supertraits, _) => (generics, supertraits),
+ hir::ItemKind::TraitAlias(ref generics, ref supertraits) => (generics, supertraits),
+ _ => span_bug!(item.span, "super_predicates invoked on non-trait"),
+ };
+
+ let icx = ItemCtxt::new(tcx, trait_def_id);
+
+ // Convert the bounds that follow the colon, e.g., `Bar + Zed` in `trait Foo: Bar + Zed`.
+ let self_param_ty = tcx.types.self_param;
+ let superbounds1 = if let Some(assoc_name) = assoc_name {
+ <dyn AstConv<'_>>::compute_bounds_that_match_assoc_type(
+ &icx,
+ self_param_ty,
+ bounds,
+ assoc_name,
+ )
+ } else {
+ <dyn AstConv<'_>>::compute_bounds(&icx, self_param_ty, bounds)
+ };
+
+ let superbounds1 = superbounds1.predicates(tcx, self_param_ty);
+
+ // Convert any explicit superbounds in the where-clause,
+ // e.g., `trait Foo where Self: Bar`.
+ // In the case of trait aliases, however, we include all bounds in the where-clause,
+ // so e.g., `trait Foo = where u32: PartialEq<Self>` would include `u32: PartialEq<Self>`
+ // as one of its "superpredicates".
+ let is_trait_alias = tcx.is_trait_alias(trait_def_id);
+ let superbounds2 = icx.type_parameter_bounds_in_generics(
+ generics,
+ item.hir_id(),
+ self_param_ty,
+ OnlySelfBounds(!is_trait_alias),
+ assoc_name,
+ );
+
+ // Combine the two lists to form the complete set of superbounds:
+ let superbounds = &*tcx.arena.alloc_from_iter(superbounds1.into_iter().chain(superbounds2));
+ debug!(?superbounds);
+
+ // Now require that immediate supertraits are converted,
+ // which will, in turn, reach indirect supertraits.
+ if assoc_name.is_none() {
+ // Now require that immediate supertraits are converted,
+ // which will, in turn, reach indirect supertraits.
+ for &(pred, span) in superbounds {
+ debug!("superbound: {:?}", pred);
+ if let ty::PredicateKind::Trait(bound) = pred.kind().skip_binder() {
+ tcx.at(span).super_predicates_of(bound.def_id());
+ }
+ }
+ }
+
+ ty::GenericPredicates { parent: None, predicates: superbounds }
+ } else {
+ // if `assoc_name` is None, then the query should've been redirected to an
+ // external provider
+ assert!(assoc_name.is_some());
+ tcx.super_predicates_of(trait_def_id)
+ }
+}
+
+fn trait_def(tcx: TyCtxt<'_>, def_id: DefId) -> ty::TraitDef {
+ let item = tcx.hir().expect_item(def_id.expect_local());
+
+ let (is_auto, unsafety, items) = match item.kind {
+ hir::ItemKind::Trait(is_auto, unsafety, .., items) => {
+ (is_auto == hir::IsAuto::Yes, unsafety, items)
+ }
+ hir::ItemKind::TraitAlias(..) => (false, hir::Unsafety::Normal, &[][..]),
+ _ => span_bug!(item.span, "trait_def_of_item invoked on non-trait"),
+ };
+
+ let paren_sugar = tcx.has_attr(def_id, sym::rustc_paren_sugar);
+ if paren_sugar && !tcx.features().unboxed_closures {
+ tcx.sess
+ .struct_span_err(
+ item.span,
+ "the `#[rustc_paren_sugar]` attribute is a temporary means of controlling \
+ which traits can use parenthetical notation",
+ )
+ .help("add `#![feature(unboxed_closures)]` to the crate attributes to use it")
+ .emit();
+ }
+
+ let is_marker = tcx.has_attr(def_id, sym::marker);
+ let skip_array_during_method_dispatch =
+ tcx.has_attr(def_id, sym::rustc_skip_array_during_method_dispatch);
+ let spec_kind = if tcx.has_attr(def_id, sym::rustc_unsafe_specialization_marker) {
+ ty::trait_def::TraitSpecializationKind::Marker
+ } else if tcx.has_attr(def_id, sym::rustc_specialization_trait) {
+ ty::trait_def::TraitSpecializationKind::AlwaysApplicable
+ } else {
+ ty::trait_def::TraitSpecializationKind::None
+ };
+ let must_implement_one_of = tcx
+ .get_attr(def_id, sym::rustc_must_implement_one_of)
+ // Check that there are at least 2 arguments of `#[rustc_must_implement_one_of]`
+ // and that they are all identifiers
+ .and_then(|attr| match attr.meta_item_list() {
+ Some(items) if items.len() < 2 => {
+ tcx.sess
+ .struct_span_err(
+ attr.span,
+ "the `#[rustc_must_implement_one_of]` attribute must be \
+ used with at least 2 args",
+ )
+ .emit();
+
+ None
+ }
+ Some(items) => items
+ .into_iter()
+ .map(|item| item.ident().ok_or(item.span()))
+ .collect::<Result<Box<[_]>, _>>()
+ .map_err(|span| {
+ tcx.sess
+ .struct_span_err(span, "must be a name of an associated function")
+ .emit();
+ })
+ .ok()
+ .zip(Some(attr.span)),
+ // Error is reported by `rustc_attr!`
+ None => None,
+ })
+ // Check that all arguments of `#[rustc_must_implement_one_of]` reference
+ // functions in the trait with default implementations
+ .and_then(|(list, attr_span)| {
+ let errors = list.iter().filter_map(|ident| {
+ let item = items.iter().find(|item| item.ident == *ident);
+
+ match item {
+ Some(item) if matches!(item.kind, hir::AssocItemKind::Fn { .. }) => {
+ if !tcx.impl_defaultness(item.id.def_id).has_value() {
+ tcx.sess
+ .struct_span_err(
+ item.span,
+ "This function doesn't have a default implementation",
+ )
+ .span_note(attr_span, "required by this annotation")
+ .emit();
+
+ return Some(());
+ }
+
+ return None;
+ }
+ Some(item) => {
+ tcx.sess
+ .struct_span_err(item.span, "Not a function")
+ .span_note(attr_span, "required by this annotation")
+ .note(
+ "All `#[rustc_must_implement_one_of]` arguments \
+ must be associated function names",
+ )
+ .emit();
+ }
+ None => {
+ tcx.sess
+ .struct_span_err(ident.span, "Function not found in this trait")
+ .emit();
+ }
+ }
+
+ Some(())
+ });
+
+ (errors.count() == 0).then_some(list)
+ })
+ // Check for duplicates
+ .and_then(|list| {
+ let mut set: FxHashMap<Symbol, Span> = FxHashMap::default();
+ let mut no_dups = true;
+
+ for ident in &*list {
+ if let Some(dup) = set.insert(ident.name, ident.span) {
+ tcx.sess
+ .struct_span_err(vec![dup, ident.span], "Functions names are duplicated")
+ .note(
+ "All `#[rustc_must_implement_one_of]` arguments \
+ must be unique",
+ )
+ .emit();
+
+ no_dups = false;
+ }
+ }
+
+ no_dups.then_some(list)
+ });
+
+ ty::TraitDef::new(
+ def_id,
+ unsafety,
+ paren_sugar,
+ is_auto,
+ is_marker,
+ skip_array_during_method_dispatch,
+ spec_kind,
+ must_implement_one_of,
+ )
+}
+
+fn has_late_bound_regions<'tcx>(tcx: TyCtxt<'tcx>, node: Node<'tcx>) -> Option<Span> {
+ struct LateBoundRegionsDetector<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ outer_index: ty::DebruijnIndex,
+ has_late_bound_regions: Option<Span>,
+ }
+
+ impl<'tcx> Visitor<'tcx> for LateBoundRegionsDetector<'tcx> {
+ fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
+ if self.has_late_bound_regions.is_some() {
+ return;
+ }
+ match ty.kind {
+ hir::TyKind::BareFn(..) => {
+ self.outer_index.shift_in(1);
+ intravisit::walk_ty(self, ty);
+ self.outer_index.shift_out(1);
+ }
+ _ => intravisit::walk_ty(self, ty),
+ }
+ }
+
+ fn visit_poly_trait_ref(
+ &mut self,
+ tr: &'tcx hir::PolyTraitRef<'tcx>,
+ m: hir::TraitBoundModifier,
+ ) {
+ if self.has_late_bound_regions.is_some() {
+ return;
+ }
+ self.outer_index.shift_in(1);
+ intravisit::walk_poly_trait_ref(self, tr, m);
+ self.outer_index.shift_out(1);
+ }
+
+ fn visit_lifetime(&mut self, lt: &'tcx hir::Lifetime) {
+ if self.has_late_bound_regions.is_some() {
+ return;
+ }
+
+ match self.tcx.named_region(lt.hir_id) {
+ Some(rl::Region::Static | rl::Region::EarlyBound(..)) => {}
+ Some(rl::Region::LateBound(debruijn, _, _)) if debruijn < self.outer_index => {}
+ Some(rl::Region::LateBound(..) | rl::Region::Free(..)) | None => {
+ self.has_late_bound_regions = Some(lt.span);
+ }
+ }
+ }
+ }
+
+ fn has_late_bound_regions<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &'tcx hir::Generics<'tcx>,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ ) -> Option<Span> {
+ let mut visitor = LateBoundRegionsDetector {
+ tcx,
+ outer_index: ty::INNERMOST,
+ has_late_bound_regions: None,
+ };
+ for param in generics.params {
+ if let GenericParamKind::Lifetime { .. } = param.kind {
+ if tcx.is_late_bound(param.hir_id) {
+ return Some(param.span);
+ }
+ }
+ }
+ visitor.visit_fn_decl(decl);
+ visitor.has_late_bound_regions
+ }
+
+ match node {
+ Node::TraitItem(item) => match item.kind {
+ hir::TraitItemKind::Fn(ref sig, _) => {
+ has_late_bound_regions(tcx, &item.generics, sig.decl)
+ }
+ _ => None,
+ },
+ Node::ImplItem(item) => match item.kind {
+ hir::ImplItemKind::Fn(ref sig, _) => {
+ has_late_bound_regions(tcx, &item.generics, sig.decl)
+ }
+ _ => None,
+ },
+ Node::ForeignItem(item) => match item.kind {
+ hir::ForeignItemKind::Fn(fn_decl, _, ref generics) => {
+ has_late_bound_regions(tcx, generics, fn_decl)
+ }
+ _ => None,
+ },
+ Node::Item(item) => match item.kind {
+ hir::ItemKind::Fn(ref sig, .., ref generics, _) => {
+ has_late_bound_regions(tcx, generics, sig.decl)
+ }
+ _ => None,
+ },
+ _ => None,
+ }
+}
+
+struct AnonConstInParamTyDetector {
+ in_param_ty: bool,
+ found_anon_const_in_param_ty: bool,
+ ct: HirId,
+}
+
+impl<'v> Visitor<'v> for AnonConstInParamTyDetector {
+ fn visit_generic_param(&mut self, p: &'v hir::GenericParam<'v>) {
+ if let GenericParamKind::Const { ty, default: _ } = p.kind {
+ let prev = self.in_param_ty;
+ self.in_param_ty = true;
+ self.visit_ty(ty);
+ self.in_param_ty = prev;
+ }
+ }
+
+ fn visit_anon_const(&mut self, c: &'v hir::AnonConst) {
+ if self.in_param_ty && self.ct == c.hir_id {
+ self.found_anon_const_in_param_ty = true;
+ } else {
+ intravisit::walk_anon_const(self, c)
+ }
+ }
+}
+
+fn generics_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::Generics {
+ use rustc_hir::*;
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+
+ let node = tcx.hir().get(hir_id);
+ let parent_def_id = match node {
+ Node::ImplItem(_)
+ | Node::TraitItem(_)
+ | Node::Variant(_)
+ | Node::Ctor(..)
+ | Node::Field(_) => {
+ let parent_id = tcx.hir().get_parent_item(hir_id);
+ Some(parent_id.to_def_id())
+ }
+ // FIXME(#43408) always enable this once `lazy_normalization` is
+ // stable enough and does not need a feature gate anymore.
+ Node::AnonConst(_) => {
+ let parent_def_id = tcx.hir().get_parent_item(hir_id);
+
+ let mut in_param_ty = false;
+ for (_parent, node) in tcx.hir().parent_iter(hir_id) {
+ if let Some(generics) = node.generics() {
+ let mut visitor = AnonConstInParamTyDetector {
+ in_param_ty: false,
+ found_anon_const_in_param_ty: false,
+ ct: hir_id,
+ };
+
+ visitor.visit_generics(generics);
+ in_param_ty = visitor.found_anon_const_in_param_ty;
+ break;
+ }
+ }
+
+ if in_param_ty {
+ // We do not allow generic parameters in anon consts if we are inside
+ // of a const parameter type, e.g. `struct Foo<const N: usize, const M: [u8; N]>` is not allowed.
+ None
+ } else if tcx.lazy_normalization() {
+ if let Some(param_id) = tcx.hir().opt_const_param_default_param_hir_id(hir_id) {
+ // If the def_id we are calling generics_of on is an anon ct default i.e:
+ //
+ // struct Foo<const N: usize = { .. }>;
+ // ^^^ ^ ^^^^^^ def id of this anon const
+ // ^ ^ param_id
+ // ^ parent_def_id
+ //
+ // then we only want to return generics for params to the left of `N`. If we don't do that we
+ // end up with that const looking like: `ty::ConstKind::Unevaluated(def_id, substs: [N#0])`.
+ //
+ // This causes ICEs (#86580) when building the substs for Foo in `fn foo() -> Foo { .. }` as
+ // we substitute the defaults with the partially built substs when we build the substs. Subst'ing
+ // the `N#0` on the unevaluated const indexes into the empty substs we're in the process of building.
+ //
+ // We fix this by having this function return the parent's generics ourselves and truncating the
+ // generics to only include non-forward declared params (with the exception of the `Self` ty)
+ //
+ // For the above code example that means we want `substs: []`
+ // For the following struct def we want `substs: [N#0]` when generics_of is called on
+ // the def id of the `{ N + 1 }` anon const
+ // struct Foo<const N: usize, const M: usize = { N + 1 }>;
+ //
+ // This has some implications for how we get the predicates available to the anon const
+ // see `explicit_predicates_of` for more information on this
+ let generics = tcx.generics_of(parent_def_id.to_def_id());
+ let param_def = tcx.hir().local_def_id(param_id).to_def_id();
+ let param_def_idx = generics.param_def_id_to_index[&param_def];
+ // In the above example this would be .params[..N#0]
+ let params = generics.params[..param_def_idx as usize].to_owned();
+ let param_def_id_to_index =
+ params.iter().map(|param| (param.def_id, param.index)).collect();
+
+ return ty::Generics {
+ // we set the parent of these generics to be our parent's parent so that we
+ // dont end up with substs: [N, M, N] for the const default on a struct like this:
+ // struct Foo<const N: usize, const M: usize = { ... }>;
+ parent: generics.parent,
+ parent_count: generics.parent_count,
+ params,
+ param_def_id_to_index,
+ has_self: generics.has_self,
+ has_late_bound_regions: generics.has_late_bound_regions,
+ };
+ }
+
+ // HACK(eddyb) this provides the correct generics when
+ // `feature(generic_const_expressions)` is enabled, so that const expressions
+ // used with const generics, e.g. `Foo<{N+1}>`, can work at all.
+ //
+ // Note that we do not supply the parent generics when using
+ // `min_const_generics`.
+ Some(parent_def_id.to_def_id())
+ } else {
+ let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id));
+ match parent_node {
+ // HACK(eddyb) this provides the correct generics for repeat
+ // expressions' count (i.e. `N` in `[x; N]`), and explicit
+ // `enum` discriminants (i.e. `D` in `enum Foo { Bar = D }`),
+ // as they shouldn't be able to cause query cycle errors.
+ Node::Expr(&Expr { kind: ExprKind::Repeat(_, ref constant), .. })
+ if constant.hir_id() == hir_id =>
+ {
+ Some(parent_def_id.to_def_id())
+ }
+ Node::Variant(Variant { disr_expr: Some(ref constant), .. })
+ if constant.hir_id == hir_id =>
+ {
+ Some(parent_def_id.to_def_id())
+ }
+ Node::Expr(&Expr { kind: ExprKind::ConstBlock(_), .. }) => {
+ Some(tcx.typeck_root_def_id(def_id))
+ }
+ // Exclude `GlobalAsm` here which cannot have generics.
+ Node::Expr(&Expr { kind: ExprKind::InlineAsm(asm), .. })
+ if asm.operands.iter().any(|(op, _op_sp)| match op {
+ hir::InlineAsmOperand::Const { anon_const }
+ | hir::InlineAsmOperand::SymFn { anon_const } => {
+ anon_const.hir_id == hir_id
+ }
+ _ => false,
+ }) =>
+ {
+ Some(parent_def_id.to_def_id())
+ }
+ _ => None,
+ }
+ }
+ }
+ Node::Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => {
+ Some(tcx.typeck_root_def_id(def_id))
+ }
+ Node::Item(item) => match item.kind {
+ ItemKind::OpaqueTy(hir::OpaqueTy {
+ origin:
+ hir::OpaqueTyOrigin::FnReturn(fn_def_id) | hir::OpaqueTyOrigin::AsyncFn(fn_def_id),
+ ..
+ }) => Some(fn_def_id.to_def_id()),
+ ItemKind::OpaqueTy(hir::OpaqueTy { origin: hir::OpaqueTyOrigin::TyAlias, .. }) => {
+ let parent_id = tcx.hir().get_parent_item(hir_id);
+ assert_ne!(parent_id, CRATE_DEF_ID);
+ debug!("generics_of: parent of opaque ty {:?} is {:?}", def_id, parent_id);
+ // Opaque types are always nested within another item, and
+ // inherit the generics of the item.
+ Some(parent_id.to_def_id())
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+
+ let no_generics = hir::Generics::empty();
+ let ast_generics = node.generics().unwrap_or(&no_generics);
+ let (opt_self, allow_defaults) = match node {
+ Node::Item(item) => {
+ match item.kind {
+ ItemKind::Trait(..) | ItemKind::TraitAlias(..) => {
+ // Add in the self type parameter.
+ //
+ // Something of a hack: use the node id for the trait, also as
+ // the node id for the Self type parameter.
+ let opt_self = Some(ty::GenericParamDef {
+ index: 0,
+ name: kw::SelfUpper,
+ def_id,
+ pure_wrt_drop: false,
+ kind: ty::GenericParamDefKind::Type {
+ has_default: false,
+ object_lifetime_default: rl::Set1::Empty,
+ synthetic: false,
+ },
+ });
+
+ (opt_self, true)
+ }
+ ItemKind::TyAlias(..)
+ | ItemKind::Enum(..)
+ | ItemKind::Struct(..)
+ | ItemKind::OpaqueTy(..)
+ | ItemKind::Union(..) => (None, true),
+ _ => (None, false),
+ }
+ }
+ _ => (None, false),
+ };
+
+ let has_self = opt_self.is_some();
+ let mut parent_has_self = false;
+ let mut own_start = has_self as u32;
+ let parent_count = parent_def_id.map_or(0, |def_id| {
+ let generics = tcx.generics_of(def_id);
+ assert!(!has_self);
+ parent_has_self = generics.has_self;
+ own_start = generics.count() as u32;
+ generics.parent_count + generics.params.len()
+ });
+
+ let mut params: Vec<_> = Vec::with_capacity(ast_generics.params.len() + has_self as usize);
+
+ if let Some(opt_self) = opt_self {
+ params.push(opt_self);
+ }
+
+ let early_lifetimes = early_bound_lifetimes_from_generics(tcx, ast_generics);
+ params.extend(early_lifetimes.enumerate().map(|(i, param)| ty::GenericParamDef {
+ name: param.name.ident().name,
+ index: own_start + i as u32,
+ def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
+ pure_wrt_drop: param.pure_wrt_drop,
+ kind: ty::GenericParamDefKind::Lifetime,
+ }));
+
+ let object_lifetime_defaults = tcx.object_lifetime_defaults(hir_id.owner);
+
+ // Now create the real type and const parameters.
+ let type_start = own_start - has_self as u32 + params.len() as u32;
+ let mut i = 0;
+
+ params.extend(ast_generics.params.iter().filter_map(|param| match param.kind {
+ GenericParamKind::Lifetime { .. } => None,
+ GenericParamKind::Type { ref default, synthetic, .. } => {
+ if !allow_defaults && default.is_some() {
+ if !tcx.features().default_type_parameter_fallback {
+ tcx.struct_span_lint_hir(
+ lint::builtin::INVALID_TYPE_PARAM_DEFAULT,
+ param.hir_id,
+ param.span,
+ |lint| {
+ lint.build(
+ "defaults for type parameters are only allowed in \
+ `struct`, `enum`, `type`, or `trait` definitions",
+ )
+ .emit();
+ },
+ );
+ }
+ }
+
+ let kind = ty::GenericParamDefKind::Type {
+ has_default: default.is_some(),
+ object_lifetime_default: object_lifetime_defaults
+ .as_ref()
+ .map_or(rl::Set1::Empty, |o| o[i]),
+ synthetic,
+ };
+
+ let param_def = ty::GenericParamDef {
+ index: type_start + i as u32,
+ name: param.name.ident().name,
+ def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
+ pure_wrt_drop: param.pure_wrt_drop,
+ kind,
+ };
+ i += 1;
+ Some(param_def)
+ }
+ GenericParamKind::Const { default, .. } => {
+ if !allow_defaults && default.is_some() {
+ tcx.sess.span_err(
+ param.span,
+ "defaults for const parameters are only allowed in \
+ `struct`, `enum`, `type`, or `trait` definitions",
+ );
+ }
+
+ let param_def = ty::GenericParamDef {
+ index: type_start + i as u32,
+ name: param.name.ident().name,
+ def_id: tcx.hir().local_def_id(param.hir_id).to_def_id(),
+ pure_wrt_drop: param.pure_wrt_drop,
+ kind: ty::GenericParamDefKind::Const { has_default: default.is_some() },
+ };
+ i += 1;
+ Some(param_def)
+ }
+ }));
+
+ // provide junk type parameter defs - the only place that
+ // cares about anything but the length is instantiation,
+ // and we don't do that for closures.
+ if let Node::Expr(&hir::Expr {
+ kind: hir::ExprKind::Closure(hir::Closure { movability: gen, .. }),
+ ..
+ }) = node
+ {
+ let dummy_args = if gen.is_some() {
+ &["<resume_ty>", "<yield_ty>", "<return_ty>", "<witness>", "<upvars>"][..]
+ } else {
+ &["<closure_kind>", "<closure_signature>", "<upvars>"][..]
+ };
+
+ params.extend(dummy_args.iter().enumerate().map(|(i, &arg)| ty::GenericParamDef {
+ index: type_start + i as u32,
+ name: Symbol::intern(arg),
+ def_id,
+ pure_wrt_drop: false,
+ kind: ty::GenericParamDefKind::Type {
+ has_default: false,
+ object_lifetime_default: rl::Set1::Empty,
+ synthetic: false,
+ },
+ }));
+ }
+
+ // provide junk type parameter defs for const blocks.
+ if let Node::AnonConst(_) = node {
+ let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id));
+ if let Node::Expr(&Expr { kind: ExprKind::ConstBlock(_), .. }) = parent_node {
+ params.push(ty::GenericParamDef {
+ index: type_start,
+ name: Symbol::intern("<const_ty>"),
+ def_id,
+ pure_wrt_drop: false,
+ kind: ty::GenericParamDefKind::Type {
+ has_default: false,
+ object_lifetime_default: rl::Set1::Empty,
+ synthetic: false,
+ },
+ });
+ }
+ }
+
+ let param_def_id_to_index = params.iter().map(|param| (param.def_id, param.index)).collect();
+
+ ty::Generics {
+ parent: parent_def_id,
+ parent_count,
+ params,
+ param_def_id_to_index,
+ has_self: has_self || parent_has_self,
+ has_late_bound_regions: has_late_bound_regions(tcx, node),
+ }
+}
+
+fn are_suggestable_generic_args(generic_args: &[hir::GenericArg<'_>]) -> bool {
+ generic_args.iter().any(|arg| match arg {
+ hir::GenericArg::Type(ty) => is_suggestable_infer_ty(ty),
+ hir::GenericArg::Infer(_) => true,
+ _ => false,
+ })
+}
+
+/// Whether `ty` is a type with `_` placeholders that can be inferred. Used in diagnostics only to
+/// use inference to provide suggestions for the appropriate type if possible.
+fn is_suggestable_infer_ty(ty: &hir::Ty<'_>) -> bool {
+ debug!(?ty);
+ use hir::TyKind::*;
+ match &ty.kind {
+ Infer => true,
+ Slice(ty) => is_suggestable_infer_ty(ty),
+ Array(ty, length) => {
+ is_suggestable_infer_ty(ty) || matches!(length, hir::ArrayLen::Infer(_, _))
+ }
+ Tup(tys) => tys.iter().any(is_suggestable_infer_ty),
+ Ptr(mut_ty) | Rptr(_, mut_ty) => is_suggestable_infer_ty(mut_ty.ty),
+ OpaqueDef(_, generic_args) => are_suggestable_generic_args(generic_args),
+ Path(hir::QPath::TypeRelative(ty, segment)) => {
+ is_suggestable_infer_ty(ty) || are_suggestable_generic_args(segment.args().args)
+ }
+ Path(hir::QPath::Resolved(ty_opt, hir::Path { segments, .. })) => {
+ ty_opt.map_or(false, is_suggestable_infer_ty)
+ || segments.iter().any(|segment| are_suggestable_generic_args(segment.args().args))
+ }
+ _ => false,
+ }
+}
+
+pub fn get_infer_ret_ty<'hir>(output: &'hir hir::FnRetTy<'hir>) -> Option<&'hir hir::Ty<'hir>> {
+ if let hir::FnRetTy::Return(ty) = output {
+ if is_suggestable_infer_ty(ty) {
+ return Some(&*ty);
+ }
+ }
+ None
+}
+
+fn fn_sig(tcx: TyCtxt<'_>, def_id: DefId) -> ty::PolyFnSig<'_> {
+ use rustc_hir::Node::*;
+ use rustc_hir::*;
+
+ let def_id = def_id.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ let icx = ItemCtxt::new(tcx, def_id.to_def_id());
+
+ match tcx.hir().get(hir_id) {
+ TraitItem(hir::TraitItem {
+ kind: TraitItemKind::Fn(sig, TraitFn::Provided(_)),
+ generics,
+ ..
+ })
+ | Item(hir::Item { kind: ItemKind::Fn(sig, generics, _), .. }) => {
+ infer_return_ty_for_fn_sig(tcx, sig, generics, def_id, &icx)
+ }
+
+ ImplItem(hir::ImplItem { kind: ImplItemKind::Fn(sig, _), generics, .. }) => {
+ // Do not try to inference the return type for a impl method coming from a trait
+ if let Item(hir::Item { kind: ItemKind::Impl(i), .. }) =
+ tcx.hir().get(tcx.hir().get_parent_node(hir_id))
+ && i.of_trait.is_some()
+ {
+ <dyn AstConv<'_>>::ty_of_fn(
+ &icx,
+ hir_id,
+ sig.header.unsafety,
+ sig.header.abi,
+ sig.decl,
+ Some(generics),
+ None,
+ )
+ } else {
+ infer_return_ty_for_fn_sig(tcx, sig, generics, def_id, &icx)
+ }
+ }
+
+ TraitItem(hir::TraitItem {
+ kind: TraitItemKind::Fn(FnSig { header, decl, span: _ }, _),
+ generics,
+ ..
+ }) => <dyn AstConv<'_>>::ty_of_fn(
+ &icx,
+ hir_id,
+ header.unsafety,
+ header.abi,
+ decl,
+ Some(generics),
+ None,
+ ),
+
+ ForeignItem(&hir::ForeignItem { kind: ForeignItemKind::Fn(fn_decl, _, _), .. }) => {
+ let abi = tcx.hir().get_foreign_abi(hir_id);
+ compute_sig_of_foreign_fn_decl(tcx, def_id.to_def_id(), fn_decl, abi)
+ }
+
+ Ctor(data) | Variant(hir::Variant { data, .. }) if data.ctor_hir_id().is_some() => {
+ let ty = tcx.type_of(tcx.hir().get_parent_item(hir_id));
+ let inputs =
+ data.fields().iter().map(|f| tcx.type_of(tcx.hir().local_def_id(f.hir_id)));
+ ty::Binder::dummy(tcx.mk_fn_sig(
+ inputs,
+ ty,
+ false,
+ hir::Unsafety::Normal,
+ abi::Abi::Rust,
+ ))
+ }
+
+ Expr(&hir::Expr { kind: hir::ExprKind::Closure { .. }, .. }) => {
+ // Closure signatures are not like other function
+ // signatures and cannot be accessed through `fn_sig`. For
+ // example, a closure signature excludes the `self`
+ // argument. In any case they are embedded within the
+ // closure type as part of the `ClosureSubsts`.
+ //
+ // To get the signature of a closure, you should use the
+ // `sig` method on the `ClosureSubsts`:
+ //
+ // substs.as_closure().sig(def_id, tcx)
+ bug!(
+ "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`",
+ );
+ }
+
+ x => {
+ bug!("unexpected sort of node in fn_sig(): {:?}", x);
+ }
+ }
+}
+
+fn infer_return_ty_for_fn_sig<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ sig: &hir::FnSig<'_>,
+ generics: &hir::Generics<'_>,
+ def_id: LocalDefId,
+ icx: &ItemCtxt<'tcx>,
+) -> ty::PolyFnSig<'tcx> {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ match get_infer_ret_ty(&sig.decl.output) {
+ Some(ty) => {
+ let fn_sig = tcx.typeck(def_id).liberated_fn_sigs()[hir_id];
+ // Typeck doesn't expect erased regions to be returned from `type_of`.
+ let fn_sig = tcx.fold_regions(fn_sig, |r, _| match *r {
+ ty::ReErased => tcx.lifetimes.re_static,
+ _ => r,
+ });
+ let fn_sig = ty::Binder::dummy(fn_sig);
+
+ let mut visitor = HirPlaceholderCollector::default();
+ visitor.visit_ty(ty);
+ let mut diag = bad_placeholder(tcx, visitor.0, "return type");
+ let ret_ty = fn_sig.skip_binder().output();
+ if ret_ty.is_suggestable(tcx, false) {
+ diag.span_suggestion(
+ ty.span,
+ "replace with the correct return type",
+ ret_ty,
+ Applicability::MachineApplicable,
+ );
+ } else if matches!(ret_ty.kind(), ty::FnDef(..)) {
+ let fn_sig = ret_ty.fn_sig(tcx);
+ if fn_sig
+ .skip_binder()
+ .inputs_and_output
+ .iter()
+ .all(|t| t.is_suggestable(tcx, false))
+ {
+ diag.span_suggestion(
+ ty.span,
+ "replace with the correct return type",
+ fn_sig,
+ Applicability::MachineApplicable,
+ );
+ }
+ } else if ret_ty.is_closure() {
+ // We're dealing with a closure, so we should suggest using `impl Fn` or trait bounds
+ // to prevent the user from getting a papercut while trying to use the unique closure
+ // syntax (e.g. `[closure@src/lib.rs:2:5: 2:9]`).
+ diag.help("consider using an `Fn`, `FnMut`, or `FnOnce` trait bound");
+ diag.note("for more information on `Fn` traits and closure types, see https://doc.rust-lang.org/book/ch13-01-closures.html");
+ }
+ diag.emit();
+
+ fn_sig
+ }
+ None => <dyn AstConv<'_>>::ty_of_fn(
+ icx,
+ hir_id,
+ sig.header.unsafety,
+ sig.header.abi,
+ sig.decl,
+ Some(generics),
+ None,
+ ),
+ }
+}
+
+fn impl_trait_ref(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ty::TraitRef<'_>> {
+ let icx = ItemCtxt::new(tcx, def_id);
+ match tcx.hir().expect_item(def_id.expect_local()).kind {
+ hir::ItemKind::Impl(ref impl_) => impl_.of_trait.as_ref().map(|ast_trait_ref| {
+ let selfty = tcx.type_of(def_id);
+ <dyn AstConv<'_>>::instantiate_mono_trait_ref(&icx, ast_trait_ref, selfty)
+ }),
+ _ => bug!(),
+ }
+}
+
+fn impl_polarity(tcx: TyCtxt<'_>, def_id: DefId) -> ty::ImplPolarity {
+ let is_rustc_reservation = tcx.has_attr(def_id, sym::rustc_reservation_impl);
+ let item = tcx.hir().expect_item(def_id.expect_local());
+ match &item.kind {
+ hir::ItemKind::Impl(hir::Impl {
+ polarity: hir::ImplPolarity::Negative(span),
+ of_trait,
+ ..
+ }) => {
+ if is_rustc_reservation {
+ let span = span.to(of_trait.as_ref().map_or(*span, |t| t.path.span));
+ tcx.sess.span_err(span, "reservation impls can't be negative");
+ }
+ ty::ImplPolarity::Negative
+ }
+ hir::ItemKind::Impl(hir::Impl {
+ polarity: hir::ImplPolarity::Positive,
+ of_trait: None,
+ ..
+ }) => {
+ if is_rustc_reservation {
+ tcx.sess.span_err(item.span, "reservation impls can't be inherent");
+ }
+ ty::ImplPolarity::Positive
+ }
+ hir::ItemKind::Impl(hir::Impl {
+ polarity: hir::ImplPolarity::Positive,
+ of_trait: Some(_),
+ ..
+ }) => {
+ if is_rustc_reservation {
+ ty::ImplPolarity::Reservation
+ } else {
+ ty::ImplPolarity::Positive
+ }
+ }
+ item => bug!("impl_polarity: {:?} not an impl", item),
+ }
+}
+
+/// Returns the early-bound lifetimes declared in this generics
+/// listing. For anything other than fns/methods, this is just all
+/// the lifetimes that are declared. For fns or methods, we have to
+/// screen out those that do not appear in any where-clauses etc using
+/// `resolve_lifetime::early_bound_lifetimes`.
+fn early_bound_lifetimes_from_generics<'a, 'tcx: 'a>(
+ tcx: TyCtxt<'tcx>,
+ generics: &'a hir::Generics<'a>,
+) -> impl Iterator<Item = &'a hir::GenericParam<'a>> + Captures<'tcx> {
+ generics.params.iter().filter(move |param| match param.kind {
+ GenericParamKind::Lifetime { .. } => !tcx.is_late_bound(param.hir_id),
+ _ => false,
+ })
+}
+
+/// Returns a list of type predicates for the definition with ID `def_id`, including inferred
+/// lifetime constraints. This includes all predicates returned by `explicit_predicates_of`, plus
+/// inferred constraints concerning which regions outlive other regions.
+fn predicates_defined_on(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
+ debug!("predicates_defined_on({:?})", def_id);
+ let mut result = tcx.explicit_predicates_of(def_id);
+ debug!("predicates_defined_on: explicit_predicates_of({:?}) = {:?}", def_id, result,);
+ let inferred_outlives = tcx.inferred_outlives_of(def_id);
+ if !inferred_outlives.is_empty() {
+ debug!(
+ "predicates_defined_on: inferred_outlives_of({:?}) = {:?}",
+ def_id, inferred_outlives,
+ );
+ if result.predicates.is_empty() {
+ result.predicates = inferred_outlives;
+ } else {
+ result.predicates = tcx
+ .arena
+ .alloc_from_iter(result.predicates.iter().chain(inferred_outlives).copied());
+ }
+ }
+
+ debug!("predicates_defined_on({:?}) = {:?}", def_id, result);
+ result
+}
+
+/// Returns a list of all type predicates (explicit and implicit) for the definition with
+/// ID `def_id`. This includes all predicates returned by `predicates_defined_on`, plus
+/// `Self: Trait` predicates for traits.
+fn predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
+ let mut result = tcx.predicates_defined_on(def_id);
+
+ if tcx.is_trait(def_id) {
+ // For traits, add `Self: Trait` predicate. This is
+ // not part of the predicates that a user writes, but it
+ // is something that one must prove in order to invoke a
+ // method or project an associated type.
+ //
+ // In the chalk setup, this predicate is not part of the
+ // "predicates" for a trait item. But it is useful in
+ // rustc because if you directly (e.g.) invoke a trait
+ // method like `Trait::method(...)`, you must naturally
+ // prove that the trait applies to the types that were
+ // used, and adding the predicate into this list ensures
+ // that this is done.
+ //
+ // We use a DUMMY_SP here as a way to signal trait bounds that come
+ // from the trait itself that *shouldn't* be shown as the source of
+ // an obligation and instead be skipped. Otherwise we'd use
+ // `tcx.def_span(def_id);`
+
+ let constness = if tcx.has_attr(def_id, sym::const_trait) {
+ ty::BoundConstness::ConstIfConst
+ } else {
+ ty::BoundConstness::NotConst
+ };
+
+ let span = rustc_span::DUMMY_SP;
+ result.predicates =
+ tcx.arena.alloc_from_iter(result.predicates.iter().copied().chain(std::iter::once((
+ ty::TraitRef::identity(tcx, def_id).with_constness(constness).to_predicate(tcx),
+ span,
+ ))));
+ }
+ debug!("predicates_of(def_id={:?}) = {:?}", def_id, result);
+ result
+}
+
+/// Returns a list of user-specified type predicates for the definition with ID `def_id`.
+/// N.B., this does not include any implied/inferred constraints.
+fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: DefId) -> ty::GenericPredicates<'_> {
+ use rustc_hir::*;
+
+ debug!("explicit_predicates_of(def_id={:?})", def_id);
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let node = tcx.hir().get(hir_id);
+
+ let mut is_trait = None;
+ let mut is_default_impl_trait = None;
+
+ let icx = ItemCtxt::new(tcx, def_id);
+
+ const NO_GENERICS: &hir::Generics<'_> = hir::Generics::empty();
+
+ // We use an `IndexSet` to preserves order of insertion.
+ // Preserving the order of insertion is important here so as not to break UI tests.
+ let mut predicates: FxIndexSet<(ty::Predicate<'_>, Span)> = FxIndexSet::default();
+
+ let ast_generics = match node {
+ Node::TraitItem(item) => item.generics,
+
+ Node::ImplItem(item) => item.generics,
+
+ Node::Item(item) => {
+ match item.kind {
+ ItemKind::Impl(ref impl_) => {
+ if impl_.defaultness.is_default() {
+ is_default_impl_trait = tcx.impl_trait_ref(def_id).map(ty::Binder::dummy);
+ }
+ &impl_.generics
+ }
+ ItemKind::Fn(.., ref generics, _)
+ | ItemKind::TyAlias(_, ref generics)
+ | ItemKind::Enum(_, ref generics)
+ | ItemKind::Struct(_, ref generics)
+ | ItemKind::Union(_, ref generics) => *generics,
+
+ ItemKind::Trait(_, _, ref generics, ..) => {
+ is_trait = Some(ty::TraitRef::identity(tcx, def_id));
+ *generics
+ }
+ ItemKind::TraitAlias(ref generics, _) => {
+ is_trait = Some(ty::TraitRef::identity(tcx, def_id));
+ *generics
+ }
+ ItemKind::OpaqueTy(OpaqueTy {
+ origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..),
+ ..
+ }) => {
+ // return-position impl trait
+ //
+ // We don't inherit predicates from the parent here:
+ // If we have, say `fn f<'a, T: 'a>() -> impl Sized {}`
+ // then the return type is `f::<'static, T>::{{opaque}}`.
+ //
+ // If we inherited the predicates of `f` then we would
+ // require that `T: 'static` to show that the return
+ // type is well-formed.
+ //
+ // The only way to have something with this opaque type
+ // is from the return type of the containing function,
+ // which will ensure that the function's predicates
+ // hold.
+ return ty::GenericPredicates { parent: None, predicates: &[] };
+ }
+ ItemKind::OpaqueTy(OpaqueTy {
+ ref generics,
+ origin: hir::OpaqueTyOrigin::TyAlias,
+ ..
+ }) => {
+ // type-alias impl trait
+ generics
+ }
+
+ _ => NO_GENERICS,
+ }
+ }
+
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Static(..) => NO_GENERICS,
+ ForeignItemKind::Fn(_, _, ref generics) => *generics,
+ ForeignItemKind::Type => NO_GENERICS,
+ },
+
+ _ => NO_GENERICS,
+ };
+
+ let generics = tcx.generics_of(def_id);
+ let parent_count = generics.parent_count as u32;
+ let has_own_self = generics.has_self && parent_count == 0;
+
+ // Below we'll consider the bounds on the type parameters (including `Self`)
+ // and the explicit where-clauses, but to get the full set of predicates
+ // on a trait we need to add in the supertrait bounds and bounds found on
+ // associated types.
+ if let Some(_trait_ref) = is_trait {
+ predicates.extend(tcx.super_predicates_of(def_id).predicates.iter().cloned());
+ }
+
+ // In default impls, we can assume that the self type implements
+ // the trait. So in:
+ //
+ // default impl Foo for Bar { .. }
+ //
+ // we add a default where clause `Foo: Bar`. We do a similar thing for traits
+ // (see below). Recall that a default impl is not itself an impl, but rather a
+ // set of defaults that can be incorporated into another impl.
+ if let Some(trait_ref) = is_default_impl_trait {
+ predicates.insert((trait_ref.without_const().to_predicate(tcx), tcx.def_span(def_id)));
+ }
+
+ // Collect the region predicates that were declared inline as
+ // well. In the case of parameters declared on a fn or method, we
+ // have to be careful to only iterate over early-bound regions.
+ let mut index = parent_count
+ + has_own_self as u32
+ + early_bound_lifetimes_from_generics(tcx, ast_generics).count() as u32;
+
+ // Collect the predicates that were written inline by the user on each
+ // type parameter (e.g., `<T: Foo>`).
+ for param in ast_generics.params {
+ match param.kind {
+ // We already dealt with early bound lifetimes above.
+ GenericParamKind::Lifetime { .. } => (),
+ GenericParamKind::Type { .. } => {
+ let name = param.name.ident().name;
+ let param_ty = ty::ParamTy::new(index, name).to_ty(tcx);
+ index += 1;
+
+ let mut bounds = Bounds::default();
+ // Params are implicitly sized unless a `?Sized` bound is found
+ <dyn AstConv<'_>>::add_implicitly_sized(
+ &icx,
+ &mut bounds,
+ &[],
+ Some((param.hir_id, ast_generics.predicates)),
+ param.span,
+ );
+ predicates.extend(bounds.predicates(tcx, param_ty));
+ }
+ GenericParamKind::Const { .. } => {
+ // Bounds on const parameters are currently not possible.
+ index += 1;
+ }
+ }
+ }
+
+ // Add in the bounds that appear in the where-clause.
+ for predicate in ast_generics.predicates {
+ match predicate {
+ hir::WherePredicate::BoundPredicate(bound_pred) => {
+ let ty = icx.to_ty(bound_pred.bounded_ty);
+ let bound_vars = icx.tcx.late_bound_vars(bound_pred.bounded_ty.hir_id);
+
+ // Keep the type around in a dummy predicate, in case of no bounds.
+ // That way, `where Ty:` is not a complete noop (see #53696) and `Ty`
+ // is still checked for WF.
+ if bound_pred.bounds.is_empty() {
+ if let ty::Param(_) = ty.kind() {
+ // This is a `where T:`, which can be in the HIR from the
+ // transformation that moves `?Sized` to `T`'s declaration.
+ // We can skip the predicate because type parameters are
+ // trivially WF, but also we *should*, to avoid exposing
+ // users who never wrote `where Type:,` themselves, to
+ // compiler/tooling bugs from not handling WF predicates.
+ } else {
+ let span = bound_pred.bounded_ty.span;
+ let predicate = ty::Binder::bind_with_vars(
+ ty::PredicateKind::WellFormed(ty.into()),
+ bound_vars,
+ );
+ predicates.insert((predicate.to_predicate(tcx), span));
+ }
+ }
+
+ let mut bounds = Bounds::default();
+ <dyn AstConv<'_>>::add_bounds(
+ &icx,
+ ty,
+ bound_pred.bounds.iter(),
+ &mut bounds,
+ bound_vars,
+ );
+ predicates.extend(bounds.predicates(tcx, ty));
+ }
+
+ hir::WherePredicate::RegionPredicate(region_pred) => {
+ let r1 = <dyn AstConv<'_>>::ast_region_to_region(&icx, &region_pred.lifetime, None);
+ predicates.extend(region_pred.bounds.iter().map(|bound| {
+ let (r2, span) = match bound {
+ hir::GenericBound::Outlives(lt) => {
+ (<dyn AstConv<'_>>::ast_region_to_region(&icx, lt, None), lt.span)
+ }
+ _ => bug!(),
+ };
+ let pred = ty::Binder::dummy(ty::PredicateKind::RegionOutlives(
+ ty::OutlivesPredicate(r1, r2),
+ ))
+ .to_predicate(icx.tcx);
+
+ (pred, span)
+ }))
+ }
+
+ hir::WherePredicate::EqPredicate(..) => {
+ // FIXME(#20041)
+ }
+ }
+ }
+
+ if tcx.features().generic_const_exprs {
+ predicates.extend(const_evaluatable_predicates_of(tcx, def_id.expect_local()));
+ }
+
+ let mut predicates: Vec<_> = predicates.into_iter().collect();
+
+ // Subtle: before we store the predicates into the tcx, we
+ // sort them so that predicates like `T: Foo<Item=U>` come
+ // before uses of `U`. This avoids false ambiguity errors
+ // in trait checking. See `setup_constraining_predicates`
+ // for details.
+ if let Node::Item(&Item { kind: ItemKind::Impl { .. }, .. }) = node {
+ let self_ty = tcx.type_of(def_id);
+ let trait_ref = tcx.impl_trait_ref(def_id);
+ cgp::setup_constraining_predicates(
+ tcx,
+ &mut predicates,
+ trait_ref,
+ &mut cgp::parameters_for_impl(self_ty, trait_ref),
+ );
+ }
+
+ let result = ty::GenericPredicates {
+ parent: generics.parent,
+ predicates: tcx.arena.alloc_from_iter(predicates),
+ };
+ debug!("explicit_predicates_of(def_id={:?}) = {:?}", def_id, result);
+ result
+}
+
+fn const_evaluatable_predicates_of<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+) -> FxIndexSet<(ty::Predicate<'tcx>, Span)> {
+ struct ConstCollector<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ preds: FxIndexSet<(ty::Predicate<'tcx>, Span)>,
+ }
+
+ impl<'tcx> intravisit::Visitor<'tcx> for ConstCollector<'tcx> {
+ fn visit_anon_const(&mut self, c: &'tcx hir::AnonConst) {
+ let def_id = self.tcx.hir().local_def_id(c.hir_id);
+ let ct = ty::Const::from_anon_const(self.tcx, def_id);
+ if let ty::ConstKind::Unevaluated(uv) = ct.kind() {
+ assert_eq!(uv.promoted, None);
+ let span = self.tcx.hir().span(c.hir_id);
+ self.preds.insert((
+ ty::Binder::dummy(ty::PredicateKind::ConstEvaluatable(uv.shrink()))
+ .to_predicate(self.tcx),
+ span,
+ ));
+ }
+ }
+
+ fn visit_const_param_default(&mut self, _param: HirId, _ct: &'tcx hir::AnonConst) {
+ // Do not look into const param defaults,
+ // these get checked when they are actually instantiated.
+ //
+ // We do not want the following to error:
+ //
+ // struct Foo<const N: usize, const M: usize = { N + 1 }>;
+ // struct Bar<const N: usize>(Foo<N, 3>);
+ }
+ }
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let node = tcx.hir().get(hir_id);
+
+ let mut collector = ConstCollector { tcx, preds: FxIndexSet::default() };
+ if let hir::Node::Item(item) = node && let hir::ItemKind::Impl(ref impl_) = item.kind {
+ if let Some(of_trait) = &impl_.of_trait {
+ debug!("const_evaluatable_predicates_of({:?}): visit impl trait_ref", def_id);
+ collector.visit_trait_ref(of_trait);
+ }
+
+ debug!("const_evaluatable_predicates_of({:?}): visit_self_ty", def_id);
+ collector.visit_ty(impl_.self_ty);
+ }
+
+ if let Some(generics) = node.generics() {
+ debug!("const_evaluatable_predicates_of({:?}): visit_generics", def_id);
+ collector.visit_generics(generics);
+ }
+
+ if let Some(fn_sig) = tcx.hir().fn_sig_by_hir_id(hir_id) {
+ debug!("const_evaluatable_predicates_of({:?}): visit_fn_decl", def_id);
+ collector.visit_fn_decl(fn_sig.decl);
+ }
+ debug!("const_evaluatable_predicates_of({:?}) = {:?}", def_id, collector.preds);
+
+ collector.preds
+}
+
+fn trait_explicit_predicates_and_bounds(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+) -> ty::GenericPredicates<'_> {
+ assert_eq!(tcx.def_kind(def_id), DefKind::Trait);
+ gather_explicit_predicates_of(tcx, def_id.to_def_id())
+}
+
+fn explicit_predicates_of<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::GenericPredicates<'tcx> {
+ let def_kind = tcx.def_kind(def_id);
+ if let DefKind::Trait = def_kind {
+ // Remove bounds on associated types from the predicates, they will be
+ // returned by `explicit_item_bounds`.
+ let predicates_and_bounds = tcx.trait_explicit_predicates_and_bounds(def_id.expect_local());
+ let trait_identity_substs = InternalSubsts::identity_for_item(tcx, def_id);
+
+ let is_assoc_item_ty = |ty: Ty<'tcx>| {
+ // For a predicate from a where clause to become a bound on an
+ // associated type:
+ // * It must use the identity substs of the item.
+ // * Since any generic parameters on the item are not in scope,
+ // this means that the item is not a GAT, and its identity
+ // substs are the same as the trait's.
+ // * It must be an associated type for this trait (*not* a
+ // supertrait).
+ if let ty::Projection(projection) = ty.kind() {
+ projection.substs == trait_identity_substs
+ && tcx.associated_item(projection.item_def_id).container_id(tcx) == def_id
+ } else {
+ false
+ }
+ };
+
+ let predicates: Vec<_> = predicates_and_bounds
+ .predicates
+ .iter()
+ .copied()
+ .filter(|(pred, _)| match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(tr) => !is_assoc_item_ty(tr.self_ty()),
+ ty::PredicateKind::Projection(proj) => {
+ !is_assoc_item_ty(proj.projection_ty.self_ty())
+ }
+ ty::PredicateKind::TypeOutlives(outlives) => !is_assoc_item_ty(outlives.0),
+ _ => true,
+ })
+ .collect();
+ if predicates.len() == predicates_and_bounds.predicates.len() {
+ predicates_and_bounds
+ } else {
+ ty::GenericPredicates {
+ parent: predicates_and_bounds.parent,
+ predicates: tcx.arena.alloc_slice(&predicates),
+ }
+ }
+ } else {
+ if matches!(def_kind, DefKind::AnonConst) && tcx.lazy_normalization() {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ if tcx.hir().opt_const_param_default_param_hir_id(hir_id).is_some() {
+ // In `generics_of` we set the generics' parent to be our parent's parent which means that
+ // we lose out on the predicates of our actual parent if we dont return those predicates here.
+ // (See comment in `generics_of` for more information on why the parent shenanigans is necessary)
+ //
+ // struct Foo<T, const N: usize = { <T as Trait>::ASSOC }>(T) where T: Trait;
+ // ^^^ ^^^^^^^^^^^^^^^^^^^^^^^ the def id we are calling
+ // ^^^ explicit_predicates_of on
+ // parent item we dont have set as the
+ // parent of generics returned by `generics_of`
+ //
+ // In the above code we want the anon const to have predicates in its param env for `T: Trait`
+ let item_def_id = tcx.hir().get_parent_item(hir_id);
+ // In the above code example we would be calling `explicit_predicates_of(Foo)` here
+ return tcx.explicit_predicates_of(item_def_id);
+ }
+ }
+ gather_explicit_predicates_of(tcx, def_id)
+ }
+}
+
+/// Converts a specific `GenericBound` from the AST into a set of
+/// predicates that apply to the self type. A vector is returned
+/// because this can be anywhere from zero predicates (`T: ?Sized` adds no
+/// predicates) to one (`T: Foo`) to many (`T: Bar<X = i32>` adds `T: Bar`
+/// and `<T as Bar>::X == i32`).
+fn predicates_from_bound<'tcx>(
+ astconv: &dyn AstConv<'tcx>,
+ param_ty: Ty<'tcx>,
+ bound: &'tcx hir::GenericBound<'tcx>,
+ bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
+) -> Vec<(ty::Predicate<'tcx>, Span)> {
+ let mut bounds = Bounds::default();
+ astconv.add_bounds(param_ty, [bound].into_iter(), &mut bounds, bound_vars);
+ bounds.predicates(astconv.tcx(), param_ty).collect()
+}
+
+fn compute_sig_of_foreign_fn_decl<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ decl: &'tcx hir::FnDecl<'tcx>,
+ abi: abi::Abi,
+) -> ty::PolyFnSig<'tcx> {
+ let unsafety = if abi == abi::Abi::RustIntrinsic {
+ intrinsic_operation_unsafety(tcx.item_name(def_id))
+ } else {
+ hir::Unsafety::Unsafe
+ };
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let fty = <dyn AstConv<'_>>::ty_of_fn(
+ &ItemCtxt::new(tcx, def_id),
+ hir_id,
+ unsafety,
+ abi,
+ decl,
+ None,
+ None,
+ );
+
+ // Feature gate SIMD types in FFI, since I am not sure that the
+ // ABIs are handled at all correctly. -huonw
+ if abi != abi::Abi::RustIntrinsic
+ && abi != abi::Abi::PlatformIntrinsic
+ && !tcx.features().simd_ffi
+ {
+ let check = |ast_ty: &hir::Ty<'_>, ty: Ty<'_>| {
+ if ty.is_simd() {
+ let snip = tcx
+ .sess
+ .source_map()
+ .span_to_snippet(ast_ty.span)
+ .map_or_else(|_| String::new(), |s| format!(" `{}`", s));
+ tcx.sess
+ .struct_span_err(
+ ast_ty.span,
+ &format!(
+ "use of SIMD type{} in FFI is highly experimental and \
+ may result in invalid code",
+ snip
+ ),
+ )
+ .help("add `#![feature(simd_ffi)]` to the crate attributes to enable")
+ .emit();
+ }
+ };
+ for (input, ty) in iter::zip(decl.inputs, fty.inputs().skip_binder()) {
+ check(input, *ty)
+ }
+ if let hir::FnRetTy::Return(ref ty) = decl.output {
+ check(ty, fty.output().skip_binder())
+ }
+ }
+
+ fty
+}
+
+fn is_foreign_item(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ match tcx.hir().get_if_local(def_id) {
+ Some(Node::ForeignItem(..)) => true,
+ Some(_) => false,
+ _ => bug!("is_foreign_item applied to non-local def-id {:?}", def_id),
+ }
+}
+
+fn generator_kind(tcx: TyCtxt<'_>, def_id: DefId) -> Option<hir::GeneratorKind> {
+ match tcx.hir().get_if_local(def_id) {
+ Some(Node::Expr(&rustc_hir::Expr {
+ kind: rustc_hir::ExprKind::Closure(&rustc_hir::Closure { body, .. }),
+ ..
+ })) => tcx.hir().body(body).generator_kind(),
+ Some(_) => None,
+ _ => bug!("generator_kind applied to non-local def-id {:?}", def_id),
+ }
+}
+
+fn from_target_feature(
+ tcx: TyCtxt<'_>,
+ attr: &ast::Attribute,
+ supported_target_features: &FxHashMap<String, Option<Symbol>>,
+ target_features: &mut Vec<Symbol>,
+) {
+ let Some(list) = attr.meta_item_list() else { return };
+ let bad_item = |span| {
+ let msg = "malformed `target_feature` attribute input";
+ let code = "enable = \"..\"";
+ tcx.sess
+ .struct_span_err(span, msg)
+ .span_suggestion(span, "must be of the form", code, Applicability::HasPlaceholders)
+ .emit();
+ };
+ let rust_features = tcx.features();
+ for item in list {
+ // Only `enable = ...` is accepted in the meta-item list.
+ if !item.has_name(sym::enable) {
+ bad_item(item.span());
+ continue;
+ }
+
+ // Must be of the form `enable = "..."` (a string).
+ let Some(value) = item.value_str() else {
+ bad_item(item.span());
+ continue;
+ };
+
+ // We allow comma separation to enable multiple features.
+ target_features.extend(value.as_str().split(',').filter_map(|feature| {
+ let Some(feature_gate) = supported_target_features.get(feature) else {
+ let msg =
+ format!("the feature named `{}` is not valid for this target", feature);
+ let mut err = tcx.sess.struct_span_err(item.span(), &msg);
+ err.span_label(
+ item.span(),
+ format!("`{}` is not valid for this target", feature),
+ );
+ if let Some(stripped) = feature.strip_prefix('+') {
+ let valid = supported_target_features.contains_key(stripped);
+ if valid {
+ err.help("consider removing the leading `+` in the feature name");
+ }
+ }
+ err.emit();
+ return None;
+ };
+
+ // Only allow features whose feature gates have been enabled.
+ let allowed = match feature_gate.as_ref().copied() {
+ Some(sym::arm_target_feature) => rust_features.arm_target_feature,
+ Some(sym::hexagon_target_feature) => rust_features.hexagon_target_feature,
+ Some(sym::powerpc_target_feature) => rust_features.powerpc_target_feature,
+ Some(sym::mips_target_feature) => rust_features.mips_target_feature,
+ Some(sym::riscv_target_feature) => rust_features.riscv_target_feature,
+ Some(sym::avx512_target_feature) => rust_features.avx512_target_feature,
+ Some(sym::sse4a_target_feature) => rust_features.sse4a_target_feature,
+ Some(sym::tbm_target_feature) => rust_features.tbm_target_feature,
+ Some(sym::wasm_target_feature) => rust_features.wasm_target_feature,
+ Some(sym::cmpxchg16b_target_feature) => rust_features.cmpxchg16b_target_feature,
+ Some(sym::movbe_target_feature) => rust_features.movbe_target_feature,
+ Some(sym::rtm_target_feature) => rust_features.rtm_target_feature,
+ Some(sym::f16c_target_feature) => rust_features.f16c_target_feature,
+ Some(sym::ermsb_target_feature) => rust_features.ermsb_target_feature,
+ Some(sym::bpf_target_feature) => rust_features.bpf_target_feature,
+ Some(sym::aarch64_ver_target_feature) => rust_features.aarch64_ver_target_feature,
+ Some(name) => bug!("unknown target feature gate {}", name),
+ None => true,
+ };
+ if !allowed {
+ feature_err(
+ &tcx.sess.parse_sess,
+ feature_gate.unwrap(),
+ item.span(),
+ &format!("the target feature `{}` is currently unstable", feature),
+ )
+ .emit();
+ }
+ Some(Symbol::intern(feature))
+ }));
+ }
+}
+
+fn linkage_by_name(tcx: TyCtxt<'_>, def_id: LocalDefId, name: &str) -> Linkage {
+ use rustc_middle::mir::mono::Linkage::*;
+
+ // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
+ // applicable to variable declarations and may not really make sense for
+ // Rust code in the first place but allow them anyway and trust that the
+ // user knows what they're doing. Who knows, unanticipated use cases may pop
+ // up in the future.
+ //
+ // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
+ // and don't have to be, LLVM treats them as no-ops.
+ match name {
+ "appending" => Appending,
+ "available_externally" => AvailableExternally,
+ "common" => Common,
+ "extern_weak" => ExternalWeak,
+ "external" => External,
+ "internal" => Internal,
+ "linkonce" => LinkOnceAny,
+ "linkonce_odr" => LinkOnceODR,
+ "private" => Private,
+ "weak" => WeakAny,
+ "weak_odr" => WeakODR,
+ _ => tcx.sess.span_fatal(tcx.def_span(def_id), "invalid linkage specified"),
+ }
+}
+
+fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: DefId) -> CodegenFnAttrs {
+ if cfg!(debug_assertions) {
+ let def_kind = tcx.def_kind(did);
+ assert!(
+ def_kind.has_codegen_attrs(),
+ "unexpected `def_kind` in `codegen_fn_attrs`: {def_kind:?}",
+ );
+ }
+
+ let did = did.expect_local();
+ let attrs = tcx.hir().attrs(tcx.hir().local_def_id_to_hir_id(did));
+ let mut codegen_fn_attrs = CodegenFnAttrs::new();
+ if tcx.should_inherit_track_caller(did) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
+ }
+
+ // The panic_no_unwind function called by TerminatorKind::Abort will never
+ // unwind. If the panic handler that it invokes unwind then it will simply
+ // call the panic handler again.
+ if Some(did.to_def_id()) == tcx.lang_items().panic_no_unwind() {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
+ }
+
+ let supported_target_features = tcx.supported_target_features(LOCAL_CRATE);
+
+ let mut inline_span = None;
+ let mut link_ordinal_span = None;
+ let mut no_sanitize_span = None;
+ for attr in attrs.iter() {
+ if attr.has_name(sym::cold) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::COLD;
+ } else if attr.has_name(sym::rustc_allocator) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR;
+ } else if attr.has_name(sym::ffi_returns_twice) {
+ if tcx.is_foreign_item(did) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_RETURNS_TWICE;
+ } else {
+ // `#[ffi_returns_twice]` is only allowed `extern fn`s.
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0724,
+ "`#[ffi_returns_twice]` may only be used on foreign functions"
+ )
+ .emit();
+ }
+ } else if attr.has_name(sym::ffi_pure) {
+ if tcx.is_foreign_item(did) {
+ if attrs.iter().any(|a| a.has_name(sym::ffi_const)) {
+ // `#[ffi_const]` functions cannot be `#[ffi_pure]`
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0757,
+ "`#[ffi_const]` function cannot be `#[ffi_pure]`"
+ )
+ .emit();
+ } else {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_PURE;
+ }
+ } else {
+ // `#[ffi_pure]` is only allowed on foreign functions
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0755,
+ "`#[ffi_pure]` may only be used on foreign functions"
+ )
+ .emit();
+ }
+ } else if attr.has_name(sym::ffi_const) {
+ if tcx.is_foreign_item(did) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::FFI_CONST;
+ } else {
+ // `#[ffi_const]` is only allowed on foreign functions
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0756,
+ "`#[ffi_const]` may only be used on foreign functions"
+ )
+ .emit();
+ }
+ } else if attr.has_name(sym::rustc_allocator_nounwind) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
+ } else if attr.has_name(sym::rustc_reallocator) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::REALLOCATOR;
+ } else if attr.has_name(sym::rustc_deallocator) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::DEALLOCATOR;
+ } else if attr.has_name(sym::rustc_allocator_zeroed) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::ALLOCATOR_ZEROED;
+ } else if attr.has_name(sym::naked) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NAKED;
+ } else if attr.has_name(sym::no_mangle) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
+ } else if attr.has_name(sym::no_coverage) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_COVERAGE;
+ } else if attr.has_name(sym::rustc_std_internal_symbol) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
+ } else if attr.has_name(sym::used) {
+ let inner = attr.meta_item_list();
+ match inner.as_deref() {
+ Some([item]) if item.has_name(sym::linker) => {
+ if !tcx.features().used_with_arg {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::used_with_arg,
+ attr.span,
+ "`#[used(linker)]` is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED_LINKER;
+ }
+ Some([item]) if item.has_name(sym::compiler) => {
+ if !tcx.features().used_with_arg {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::used_with_arg,
+ attr.span,
+ "`#[used(compiler)]` is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::USED;
+ }
+ Some(_) => {
+ tcx.sess
+ .struct_span_err(
+ attr.span,
+ "expected `used`, `used(compiler)` or `used(linker)`",
+ )
+ .emit();
+ }
+ None => {
+ // Unfortunately, unconditionally using `llvm.used` causes
+ // issues in handling `.init_array` with the gold linker,
+ // but using `llvm.compiler.used` caused a nontrival amount
+ // of unintentional ecosystem breakage -- particularly on
+ // Mach-O targets.
+ //
+ // As a result, we emit `llvm.compiler.used` only on ELF
+ // targets. This is somewhat ad-hoc, but actually follows
+ // our pre-LLVM 13 behavior (prior to the ecosystem
+ // breakage), and seems to match `clang`'s behavior as well
+ // (both before and after LLVM 13), possibly because they
+ // have similar compatibility concerns to us. See
+ // https://github.com/rust-lang/rust/issues/47384#issuecomment-1019080146
+ // and following comments for some discussion of this, as
+ // well as the comments in `rustc_codegen_llvm` where these
+ // flags are handled.
+ //
+ // Anyway, to be clear: this is still up in the air
+ // somewhat, and is subject to change in the future (which
+ // is a good thing, because this would ideally be a bit
+ // more firmed up).
+ let is_like_elf = !(tcx.sess.target.is_like_osx
+ || tcx.sess.target.is_like_windows
+ || tcx.sess.target.is_like_wasm);
+ codegen_fn_attrs.flags |= if is_like_elf {
+ CodegenFnAttrFlags::USED
+ } else {
+ CodegenFnAttrFlags::USED_LINKER
+ };
+ }
+ }
+ } else if attr.has_name(sym::cmse_nonsecure_entry) {
+ if !matches!(tcx.fn_sig(did).abi(), abi::Abi::C { .. }) {
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0776,
+ "`#[cmse_nonsecure_entry]` requires C ABI"
+ )
+ .emit();
+ }
+ if !tcx.sess.target.llvm_target.contains("thumbv8m") {
+ struct_span_err!(tcx.sess, attr.span, E0775, "`#[cmse_nonsecure_entry]` is only valid for targets with the TrustZone-M extension")
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::CMSE_NONSECURE_ENTRY;
+ } else if attr.has_name(sym::thread_local) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::THREAD_LOCAL;
+ } else if attr.has_name(sym::track_caller) {
+ if !tcx.is_closure(did.to_def_id()) && tcx.fn_sig(did).abi() != abi::Abi::Rust {
+ struct_span_err!(tcx.sess, attr.span, E0737, "`#[track_caller]` requires Rust ABI")
+ .emit();
+ }
+ if tcx.is_closure(did.to_def_id()) && !tcx.features().closure_track_caller {
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::closure_track_caller,
+ attr.span,
+ "`#[track_caller]` on closures is currently unstable",
+ )
+ .emit();
+ }
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER;
+ } else if attr.has_name(sym::export_name) {
+ if let Some(s) = attr.value_str() {
+ if s.as_str().contains('\0') {
+ // `#[export_name = ...]` will be converted to a null-terminated string,
+ // so it may not contain any null characters.
+ struct_span_err!(
+ tcx.sess,
+ attr.span,
+ E0648,
+ "`export_name` may not contain null characters"
+ )
+ .emit();
+ }
+ codegen_fn_attrs.export_name = Some(s);
+ }
+ } else if attr.has_name(sym::target_feature) {
+ if !tcx.is_closure(did.to_def_id())
+ && tcx.fn_sig(did).unsafety() == hir::Unsafety::Normal
+ {
+ if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc {
+ // The `#[target_feature]` attribute is allowed on
+ // WebAssembly targets on all functions, including safe
+ // ones. Other targets require that `#[target_feature]` is
+ // only applied to unsafe functions (pending the
+ // `target_feature_11` feature) because on most targets
+ // execution of instructions that are not supported is
+ // considered undefined behavior. For WebAssembly which is a
+ // 100% safe target at execution time it's not possible to
+ // execute undefined instructions, and even if a future
+ // feature was added in some form for this it would be a
+ // deterministic trap. There is no undefined behavior when
+ // executing WebAssembly so `#[target_feature]` is allowed
+ // on safe functions (but again, only for WebAssembly)
+ //
+ // Note that this is also allowed if `actually_rustdoc` so
+ // if a target is documenting some wasm-specific code then
+ // it's not spuriously denied.
+ } else if !tcx.features().target_feature_11 {
+ let mut err = feature_err(
+ &tcx.sess.parse_sess,
+ sym::target_feature_11,
+ attr.span,
+ "`#[target_feature(..)]` can only be applied to `unsafe` functions",
+ );
+ err.span_label(tcx.def_span(did), "not an `unsafe` function");
+ err.emit();
+ } else {
+ check_target_feature_trait_unsafe(tcx, did, attr.span);
+ }
+ }
+ from_target_feature(
+ tcx,
+ attr,
+ supported_target_features,
+ &mut codegen_fn_attrs.target_features,
+ );
+ } else if attr.has_name(sym::linkage) {
+ if let Some(val) = attr.value_str() {
+ codegen_fn_attrs.linkage = Some(linkage_by_name(tcx, did, val.as_str()));
+ }
+ } else if attr.has_name(sym::link_section) {
+ if let Some(val) = attr.value_str() {
+ if val.as_str().bytes().any(|b| b == 0) {
+ let msg = format!(
+ "illegal null byte in link_section \
+ value: `{}`",
+ &val
+ );
+ tcx.sess.span_err(attr.span, &msg);
+ } else {
+ codegen_fn_attrs.link_section = Some(val);
+ }
+ }
+ } else if attr.has_name(sym::link_name) {
+ codegen_fn_attrs.link_name = attr.value_str();
+ } else if attr.has_name(sym::link_ordinal) {
+ link_ordinal_span = Some(attr.span);
+ if let ordinal @ Some(_) = check_link_ordinal(tcx, attr) {
+ codegen_fn_attrs.link_ordinal = ordinal;
+ }
+ } else if attr.has_name(sym::no_sanitize) {
+ no_sanitize_span = Some(attr.span);
+ if let Some(list) = attr.meta_item_list() {
+ for item in list.iter() {
+ if item.has_name(sym::address) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::ADDRESS;
+ } else if item.has_name(sym::cfi) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::CFI;
+ } else if item.has_name(sym::memory) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMORY;
+ } else if item.has_name(sym::memtag) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::MEMTAG;
+ } else if item.has_name(sym::shadow_call_stack) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::SHADOWCALLSTACK;
+ } else if item.has_name(sym::thread) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::THREAD;
+ } else if item.has_name(sym::hwaddress) {
+ codegen_fn_attrs.no_sanitize |= SanitizerSet::HWADDRESS;
+ } else {
+ tcx.sess
+ .struct_span_err(item.span(), "invalid argument for `no_sanitize`")
+ .note("expected one of: `address`, `cfi`, `hwaddress`, `memory`, `memtag`, `shadow-call-stack`, or `thread`")
+ .emit();
+ }
+ }
+ }
+ } else if attr.has_name(sym::instruction_set) {
+ codegen_fn_attrs.instruction_set = match attr.meta_kind() {
+ Some(MetaItemKind::List(ref items)) => match items.as_slice() {
+ [NestedMetaItem::MetaItem(set)] => {
+ let segments =
+ set.path.segments.iter().map(|x| x.ident.name).collect::<Vec<_>>();
+ match segments.as_slice() {
+ [sym::arm, sym::a32] | [sym::arm, sym::t32] => {
+ if !tcx.sess.target.has_thumb_interworking {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0779,
+ "target does not support `#[instruction_set]`"
+ )
+ .emit();
+ None
+ } else if segments[1] == sym::a32 {
+ Some(InstructionSetAttr::ArmA32)
+ } else if segments[1] == sym::t32 {
+ Some(InstructionSetAttr::ArmT32)
+ } else {
+ unreachable!()
+ }
+ }
+ _ => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0779,
+ "invalid instruction set specified",
+ )
+ .emit();
+ None
+ }
+ }
+ }
+ [] => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0778,
+ "`#[instruction_set]` requires an argument"
+ )
+ .emit();
+ None
+ }
+ _ => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0779,
+ "cannot specify more than one instruction set"
+ )
+ .emit();
+ None
+ }
+ },
+ _ => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0778,
+ "must specify an instruction set"
+ )
+ .emit();
+ None
+ }
+ };
+ } else if attr.has_name(sym::repr) {
+ codegen_fn_attrs.alignment = match attr.meta_item_list() {
+ Some(items) => match items.as_slice() {
+ [item] => match item.name_value_literal() {
+ Some((sym::align, literal)) => {
+ let alignment = rustc_attr::parse_alignment(&literal.kind);
+
+ match alignment {
+ Ok(align) => Some(align),
+ Err(msg) => {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0589,
+ "invalid `repr(align)` attribute: {}",
+ msg
+ )
+ .emit();
+
+ None
+ }
+ }
+ }
+ _ => None,
+ },
+ [] => None,
+ _ => None,
+ },
+ None => None,
+ };
+ }
+ }
+
+ codegen_fn_attrs.inline = attrs.iter().fold(InlineAttr::None, |ia, attr| {
+ if !attr.has_name(sym::inline) {
+ return ia;
+ }
+ match attr.meta_kind() {
+ Some(MetaItemKind::Word) => InlineAttr::Hint,
+ Some(MetaItemKind::List(ref items)) => {
+ inline_span = Some(attr.span);
+ if items.len() != 1 {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ attr.span,
+ E0534,
+ "expected one argument"
+ )
+ .emit();
+ InlineAttr::None
+ } else if list_contains_name(&items, sym::always) {
+ InlineAttr::Always
+ } else if list_contains_name(&items, sym::never) {
+ InlineAttr::Never
+ } else {
+ struct_span_err!(
+ tcx.sess.diagnostic(),
+ items[0].span(),
+ E0535,
+ "invalid argument"
+ )
+ .emit();
+
+ InlineAttr::None
+ }
+ }
+ Some(MetaItemKind::NameValue(_)) => ia,
+ None => ia,
+ }
+ });
+
+ codegen_fn_attrs.optimize = attrs.iter().fold(OptimizeAttr::None, |ia, attr| {
+ if !attr.has_name(sym::optimize) {
+ return ia;
+ }
+ let err = |sp, s| struct_span_err!(tcx.sess.diagnostic(), sp, E0722, "{}", s).emit();
+ match attr.meta_kind() {
+ Some(MetaItemKind::Word) => {
+ err(attr.span, "expected one argument");
+ ia
+ }
+ Some(MetaItemKind::List(ref items)) => {
+ inline_span = Some(attr.span);
+ if items.len() != 1 {
+ err(attr.span, "expected one argument");
+ OptimizeAttr::None
+ } else if list_contains_name(&items, sym::size) {
+ OptimizeAttr::Size
+ } else if list_contains_name(&items, sym::speed) {
+ OptimizeAttr::Speed
+ } else {
+ err(items[0].span(), "invalid argument");
+ OptimizeAttr::None
+ }
+ }
+ Some(MetaItemKind::NameValue(_)) => ia,
+ None => ia,
+ }
+ });
+
+ // #73631: closures inherit `#[target_feature]` annotations
+ if tcx.features().target_feature_11 && tcx.is_closure(did.to_def_id()) {
+ let owner_id = tcx.parent(did.to_def_id());
+ if tcx.def_kind(owner_id).has_codegen_attrs() {
+ codegen_fn_attrs
+ .target_features
+ .extend(tcx.codegen_fn_attrs(owner_id).target_features.iter().copied());
+ }
+ }
+
+ // If a function uses #[target_feature] it can't be inlined into general
+ // purpose functions as they wouldn't have the right target features
+ // enabled. For that reason we also forbid #[inline(always)] as it can't be
+ // respected.
+ if !codegen_fn_attrs.target_features.is_empty() {
+ if codegen_fn_attrs.inline == InlineAttr::Always {
+ if let Some(span) = inline_span {
+ tcx.sess.span_err(
+ span,
+ "cannot use `#[inline(always)]` with \
+ `#[target_feature]`",
+ );
+ }
+ }
+ }
+
+ if !codegen_fn_attrs.no_sanitize.is_empty() {
+ if codegen_fn_attrs.inline == InlineAttr::Always {
+ if let (Some(no_sanitize_span), Some(inline_span)) = (no_sanitize_span, inline_span) {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(did);
+ tcx.struct_span_lint_hir(
+ lint::builtin::INLINE_NO_SANITIZE,
+ hir_id,
+ no_sanitize_span,
+ |lint| {
+ lint.build("`no_sanitize` will have no effect after inlining")
+ .span_note(inline_span, "inlining requested here")
+ .emit();
+ },
+ )
+ }
+ }
+ }
+
+ // Weak lang items have the same semantics as "std internal" symbols in the
+ // sense that they're preserved through all our LTO passes and only
+ // strippable by the linker.
+ //
+ // Additionally weak lang items have predetermined symbol names.
+ if tcx.is_weak_lang_item(did.to_def_id()) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL;
+ }
+ if let Some(name) = weak_lang_items::link_name(attrs) {
+ codegen_fn_attrs.export_name = Some(name);
+ codegen_fn_attrs.link_name = Some(name);
+ }
+ check_link_name_xor_ordinal(tcx, &codegen_fn_attrs, link_ordinal_span);
+
+ // Internal symbols to the standard library all have no_mangle semantics in
+ // that they have defined symbol names present in the function name. This
+ // also applies to weak symbols where they all have known symbol names.
+ if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_MANGLE;
+ }
+
+ // Any linkage to LLVM intrinsics for now forcibly marks them all as never
+ // unwinds since LLVM sometimes can't handle codegen which `invoke`s
+ // intrinsic functions.
+ if let Some(name) = &codegen_fn_attrs.link_name {
+ if name.as_str().starts_with("llvm.") {
+ codegen_fn_attrs.flags |= CodegenFnAttrFlags::NEVER_UNWIND;
+ }
+ }
+
+ codegen_fn_attrs
+}
+
+/// Computes the set of target features used in a function for the purposes of
+/// inline assembly.
+fn asm_target_features<'tcx>(tcx: TyCtxt<'tcx>, did: DefId) -> &'tcx FxHashSet<Symbol> {
+ let mut target_features = tcx.sess.unstable_target_features.clone();
+ if tcx.def_kind(did).has_codegen_attrs() {
+ let attrs = tcx.codegen_fn_attrs(did);
+ target_features.extend(&attrs.target_features);
+ match attrs.instruction_set {
+ None => {}
+ Some(InstructionSetAttr::ArmA32) => {
+ target_features.remove(&sym::thumb_mode);
+ }
+ Some(InstructionSetAttr::ArmT32) => {
+ target_features.insert(sym::thumb_mode);
+ }
+ }
+ }
+
+ tcx.arena.alloc(target_features)
+}
+
+/// Checks if the provided DefId is a method in a trait impl for a trait which has track_caller
+/// applied to the method prototype.
+fn should_inherit_track_caller(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ if let Some(impl_item) = tcx.opt_associated_item(def_id)
+ && let ty::AssocItemContainer::ImplContainer = impl_item.container
+ && let Some(trait_item) = impl_item.trait_item_def_id
+ {
+ return tcx
+ .codegen_fn_attrs(trait_item)
+ .flags
+ .intersects(CodegenFnAttrFlags::TRACK_CALLER);
+ }
+
+ false
+}
+
+fn check_link_ordinal(tcx: TyCtxt<'_>, attr: &ast::Attribute) -> Option<u16> {
+ use rustc_ast::{Lit, LitIntType, LitKind};
+ let meta_item_list = attr.meta_item_list();
+ let meta_item_list: Option<&[ast::NestedMetaItem]> = meta_item_list.as_ref().map(Vec::as_ref);
+ let sole_meta_list = match meta_item_list {
+ Some([item]) => item.literal(),
+ Some(_) => {
+ tcx.sess
+ .struct_span_err(attr.span, "incorrect number of arguments to `#[link_ordinal]`")
+ .note("the attribute requires exactly one argument")
+ .emit();
+ return None;
+ }
+ _ => None,
+ };
+ if let Some(Lit { kind: LitKind::Int(ordinal, LitIntType::Unsuffixed), .. }) = sole_meta_list {
+ // According to the table at https://docs.microsoft.com/en-us/windows/win32/debug/pe-format#import-header,
+ // the ordinal must fit into 16 bits. Similarly, the Ordinal field in COFFShortExport (defined
+ // in llvm/include/llvm/Object/COFFImportFile.h), which we use to communicate import information
+ // to LLVM for `#[link(kind = "raw-dylib"_])`, is also defined to be uint16_t.
+ //
+ // FIXME: should we allow an ordinal of 0? The MSVC toolchain has inconsistent support for this:
+ // both LINK.EXE and LIB.EXE signal errors and abort when given a .DEF file that specifies
+ // a zero ordinal. However, llvm-dlltool is perfectly happy to generate an import library
+ // for such a .DEF file, and MSVC's LINK.EXE is also perfectly happy to consume an import
+ // library produced by LLVM with an ordinal of 0, and it generates an .EXE. (I don't know yet
+ // if the resulting EXE runs, as I haven't yet built the necessary DLL -- see earlier comment
+ // about LINK.EXE failing.)
+ if *ordinal <= u16::MAX as u128 {
+ Some(*ordinal as u16)
+ } else {
+ let msg = format!("ordinal value in `link_ordinal` is too large: `{}`", &ordinal);
+ tcx.sess
+ .struct_span_err(attr.span, &msg)
+ .note("the value may not exceed `u16::MAX`")
+ .emit();
+ None
+ }
+ } else {
+ tcx.sess
+ .struct_span_err(attr.span, "illegal ordinal format in `link_ordinal`")
+ .note("an unsuffixed integer value, e.g., `1`, is expected")
+ .emit();
+ None
+ }
+}
+
+fn check_link_name_xor_ordinal(
+ tcx: TyCtxt<'_>,
+ codegen_fn_attrs: &CodegenFnAttrs,
+ inline_span: Option<Span>,
+) {
+ if codegen_fn_attrs.link_name.is_none() || codegen_fn_attrs.link_ordinal.is_none() {
+ return;
+ }
+ let msg = "cannot use `#[link_name]` with `#[link_ordinal]`";
+ if let Some(span) = inline_span {
+ tcx.sess.span_err(span, msg);
+ } else {
+ tcx.sess.err(msg);
+ }
+}
+
+/// Checks the function annotated with `#[target_feature]` is not a safe
+/// trait method implementation, reporting an error if it is.
+fn check_target_feature_trait_unsafe(tcx: TyCtxt<'_>, id: LocalDefId, attr_span: Span) {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(id);
+ let node = tcx.hir().get(hir_id);
+ if let Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }) = node {
+ let parent_id = tcx.hir().get_parent_item(hir_id);
+ let parent_item = tcx.hir().expect_item(parent_id);
+ if let hir::ItemKind::Impl(hir::Impl { of_trait: Some(_), .. }) = parent_item.kind {
+ tcx.sess
+ .struct_span_err(
+ attr_span,
+ "`#[target_feature(..)]` cannot be applied to safe trait method",
+ )
+ .span_label(attr_span, "cannot be applied to safe trait method")
+ .span_label(tcx.def_span(id), "not an `unsafe` function")
+ .emit();
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/collect/item_bounds.rs b/compiler/rustc_typeck/src/collect/item_bounds.rs
new file mode 100644
index 000000000..0d2b75d33
--- /dev/null
+++ b/compiler/rustc_typeck/src/collect/item_bounds.rs
@@ -0,0 +1,102 @@
+use super::ItemCtxt;
+use crate::astconv::AstConv;
+use rustc_hir as hir;
+use rustc_infer::traits::util;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::{self, DefIdTree, TyCtxt};
+use rustc_span::def_id::DefId;
+use rustc_span::Span;
+
+/// For associated types we include both bounds written on the type
+/// (`type X: Trait`) and predicates from the trait: `where Self::X: Trait`.
+///
+/// Note that this filtering is done with the items identity substs to
+/// simplify checking that these bounds are met in impls. This means that
+/// a bound such as `for<'b> <Self as X<'b>>::U: Clone` can't be used, as in
+/// `hr-associated-type-bound-1.rs`.
+fn associated_type_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ assoc_item_def_id: DefId,
+ ast_bounds: &'tcx [hir::GenericBound<'tcx>],
+ span: Span,
+) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+ let item_ty = tcx.mk_projection(
+ assoc_item_def_id,
+ InternalSubsts::identity_for_item(tcx, assoc_item_def_id),
+ );
+
+ let icx = ItemCtxt::new(tcx, assoc_item_def_id);
+ let mut bounds = <dyn AstConv<'_>>::compute_bounds(&icx, item_ty, ast_bounds);
+ // Associated types are implicitly sized unless a `?Sized` bound is found
+ <dyn AstConv<'_>>::add_implicitly_sized(&icx, &mut bounds, ast_bounds, None, span);
+
+ let trait_def_id = tcx.parent(assoc_item_def_id);
+ let trait_predicates = tcx.trait_explicit_predicates_and_bounds(trait_def_id.expect_local());
+
+ let bounds_from_parent = trait_predicates.predicates.iter().copied().filter(|(pred, _)| {
+ match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(tr) => tr.self_ty() == item_ty,
+ ty::PredicateKind::Projection(proj) => proj.projection_ty.self_ty() == item_ty,
+ ty::PredicateKind::TypeOutlives(outlives) => outlives.0 == item_ty,
+ _ => false,
+ }
+ });
+
+ let all_bounds = tcx
+ .arena
+ .alloc_from_iter(bounds.predicates(tcx, item_ty).into_iter().chain(bounds_from_parent));
+ debug!("associated_type_bounds({}) = {:?}", tcx.def_path_str(assoc_item_def_id), all_bounds);
+ all_bounds
+}
+
+/// Opaque types don't inherit bounds from their parent: for return position
+/// impl trait it isn't possible to write a suitable predicate on the
+/// containing function and for type-alias impl trait we don't have a backwards
+/// compatibility issue.
+fn opaque_type_bounds<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ opaque_def_id: DefId,
+ ast_bounds: &'tcx [hir::GenericBound<'tcx>],
+ span: Span,
+) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+ ty::print::with_no_queries!({
+ let item_ty =
+ tcx.mk_opaque(opaque_def_id, InternalSubsts::identity_for_item(tcx, opaque_def_id));
+
+ let icx = ItemCtxt::new(tcx, opaque_def_id);
+ let mut bounds = <dyn AstConv<'_>>::compute_bounds(&icx, item_ty, ast_bounds);
+ // Opaque types are implicitly sized unless a `?Sized` bound is found
+ <dyn AstConv<'_>>::add_implicitly_sized(&icx, &mut bounds, ast_bounds, None, span);
+ tcx.arena.alloc_from_iter(bounds.predicates(tcx, item_ty))
+ })
+}
+
+pub(super) fn explicit_item_bounds(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+) -> &'_ [(ty::Predicate<'_>, Span)] {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().get(hir_id) {
+ hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Type(bounds, _),
+ span,
+ ..
+ }) => associated_type_bounds(tcx, def_id, bounds, *span),
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::OpaqueTy(hir::OpaqueTy { bounds, .. }),
+ span,
+ ..
+ }) => opaque_type_bounds(tcx, def_id, bounds, *span),
+ _ => bug!("item_bounds called on {:?}", def_id),
+ }
+}
+
+pub(super) fn item_bounds(tcx: TyCtxt<'_>, def_id: DefId) -> &'_ ty::List<ty::Predicate<'_>> {
+ tcx.mk_predicates(
+ util::elaborate_predicates(
+ tcx,
+ tcx.explicit_item_bounds(def_id).iter().map(|&(bound, _span)| bound),
+ )
+ .map(|obligation| obligation.predicate),
+ )
+}
diff --git a/compiler/rustc_typeck/src/collect/type_of.rs b/compiler/rustc_typeck/src/collect/type_of.rs
new file mode 100644
index 000000000..534ddfa95
--- /dev/null
+++ b/compiler/rustc_typeck/src/collect/type_of.rs
@@ -0,0 +1,877 @@
+use rustc_errors::{Applicability, StashKey};
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::intravisit;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::{HirId, Node};
+use rustc_middle::hir::nested_filter;
+use rustc_middle::ty::subst::InternalSubsts;
+use rustc_middle::ty::util::IntTypeExt;
+use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt, TypeFolder, TypeSuperFoldable, TypeVisitable};
+use rustc_span::symbol::Ident;
+use rustc_span::{Span, DUMMY_SP};
+
+use super::ItemCtxt;
+use super::{bad_placeholder, is_suggestable_infer_ty};
+use crate::errors::UnconstrainedOpaqueType;
+
+/// Computes the relevant generic parameter for a potential generic const argument.
+///
+/// This should be called using the query `tcx.opt_const_param_of`.
+#[instrument(level = "debug", skip(tcx))]
+pub(super) fn opt_const_param_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<DefId> {
+ use hir::*;
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ match tcx.hir().get(hir_id) {
+ Node::AnonConst(_) => (),
+ _ => return None,
+ };
+
+ let parent_node_id = tcx.hir().get_parent_node(hir_id);
+ let parent_node = tcx.hir().get(parent_node_id);
+
+ let (generics, arg_idx) = match parent_node {
+ // This match arm is for when the def_id appears in a GAT whose
+ // path can't be resolved without typechecking e.g.
+ //
+ // trait Foo {
+ // type Assoc<const N: usize>;
+ // fn foo() -> Self::Assoc<3>;
+ // }
+ //
+ // In the above code we would call this query with the def_id of 3 and
+ // the parent_node we match on would be the hir node for Self::Assoc<3>
+ //
+ // `Self::Assoc<3>` cant be resolved without typechecking here as we
+ // didnt write <Self as Foo>::Assoc<3>. If we did then another match
+ // arm would handle this.
+ //
+ // I believe this match arm is only needed for GAT but I am not 100% sure - BoxyUwU
+ Node::Ty(hir_ty @ Ty { kind: TyKind::Path(QPath::TypeRelative(_, segment)), .. }) => {
+ // Find the Item containing the associated type so we can create an ItemCtxt.
+ // Using the ItemCtxt convert the HIR for the unresolved assoc type into a
+ // ty which is a fully resolved projection.
+ // For the code example above, this would mean converting Self::Assoc<3>
+ // into a ty::Projection(<Self as Foo>::Assoc<3>)
+ let item_hir_id = tcx
+ .hir()
+ .parent_iter(hir_id)
+ .filter(|(_, node)| matches!(node, Node::Item(_)))
+ .map(|(id, _)| id)
+ .next()
+ .unwrap();
+ let item_did = tcx.hir().local_def_id(item_hir_id).to_def_id();
+ let item_ctxt = &ItemCtxt::new(tcx, item_did) as &dyn crate::astconv::AstConv<'_>;
+ let ty = item_ctxt.ast_ty_to_ty(hir_ty);
+
+ // Iterate through the generics of the projection to find the one that corresponds to
+ // the def_id that this query was called with. We filter to only const args here as a
+ // precaution for if it's ever allowed to elide lifetimes in GAT's. It currently isn't
+ // but it can't hurt to be safe ^^
+ if let ty::Projection(projection) = ty.kind() {
+ let generics = tcx.generics_of(projection.item_def_id);
+
+ let arg_index = segment
+ .args
+ .and_then(|args| {
+ args.args
+ .iter()
+ .filter(|arg| arg.is_ty_or_const())
+ .position(|arg| arg.id() == hir_id)
+ })
+ .unwrap_or_else(|| {
+ bug!("no arg matching AnonConst in segment");
+ });
+
+ (generics, arg_index)
+ } else {
+ // I dont think it's possible to reach this but I'm not 100% sure - BoxyUwU
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ "unexpected non-GAT usage of an anon const",
+ );
+ return None;
+ }
+ }
+ Node::Expr(&Expr {
+ kind:
+ ExprKind::MethodCall(segment, ..) | ExprKind::Path(QPath::TypeRelative(_, segment)),
+ ..
+ }) => {
+ let body_owner = tcx.hir().enclosing_body_owner(hir_id);
+ let tables = tcx.typeck(body_owner);
+ // This may fail in case the method/path does not actually exist.
+ // As there is no relevant param for `def_id`, we simply return
+ // `None` here.
+ let type_dependent_def = tables.type_dependent_def_id(parent_node_id)?;
+ let idx = segment
+ .args
+ .and_then(|args| {
+ args.args
+ .iter()
+ .filter(|arg| arg.is_ty_or_const())
+ .position(|arg| arg.id() == hir_id)
+ })
+ .unwrap_or_else(|| {
+ bug!("no arg matching AnonConst in segment");
+ });
+
+ (tcx.generics_of(type_dependent_def), idx)
+ }
+
+ Node::Ty(&Ty { kind: TyKind::Path(_), .. })
+ | Node::Expr(&Expr { kind: ExprKind::Path(_) | ExprKind::Struct(..), .. })
+ | Node::TraitRef(..)
+ | Node::Pat(_) => {
+ let path = match parent_node {
+ Node::Ty(&Ty { kind: TyKind::Path(QPath::Resolved(_, path)), .. })
+ | Node::TraitRef(&TraitRef { path, .. }) => &*path,
+ Node::Expr(&Expr {
+ kind:
+ ExprKind::Path(QPath::Resolved(_, path))
+ | ExprKind::Struct(&QPath::Resolved(_, path), ..),
+ ..
+ }) => {
+ let body_owner = tcx.hir().enclosing_body_owner(hir_id);
+ let _tables = tcx.typeck(body_owner);
+ &*path
+ }
+ Node::Pat(pat) => {
+ if let Some(path) = get_path_containing_arg_in_pat(pat, hir_id) {
+ path
+ } else {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ &format!("unable to find const parent for {} in pat {:?}", hir_id, pat),
+ );
+ return None;
+ }
+ }
+ _ => {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ &format!("unexpected const parent path {:?}", parent_node),
+ );
+ return None;
+ }
+ };
+
+ // We've encountered an `AnonConst` in some path, so we need to
+ // figure out which generic parameter it corresponds to and return
+ // the relevant type.
+ let Some((arg_index, segment)) = path.segments.iter().find_map(|seg| {
+ let args = seg.args?;
+ args.args
+ .iter()
+ .filter(|arg| arg.is_ty_or_const())
+ .position(|arg| arg.id() == hir_id)
+ .map(|index| (index, seg)).or_else(|| args.bindings
+ .iter()
+ .filter_map(TypeBinding::opt_const)
+ .position(|ct| ct.hir_id == hir_id)
+ .map(|idx| (idx, seg)))
+ }) else {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ "no arg matching AnonConst in path",
+ );
+ return None;
+ };
+
+ // Try to use the segment resolution if it is valid, otherwise we
+ // default to the path resolution.
+ let res = segment.res.filter(|&r| r != Res::Err).unwrap_or(path.res);
+ let generics = match tcx.res_generics_def_id(res) {
+ Some(def_id) => tcx.generics_of(def_id),
+ None => {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(def_id),
+ &format!("unexpected anon const res {:?} in path: {:?}", res, path),
+ );
+ return None;
+ }
+ };
+
+ (generics, arg_index)
+ }
+ _ => return None,
+ };
+
+ debug!(?parent_node);
+ debug!(?generics, ?arg_idx);
+ generics
+ .params
+ .iter()
+ .filter(|param| param.kind.is_ty_or_const())
+ .nth(match generics.has_self && generics.parent.is_none() {
+ true => arg_idx + 1,
+ false => arg_idx,
+ })
+ .and_then(|param| match param.kind {
+ ty::GenericParamDefKind::Const { .. } => {
+ debug!(?param);
+ Some(param.def_id)
+ }
+ _ => None,
+ })
+}
+
+fn get_path_containing_arg_in_pat<'hir>(
+ pat: &'hir hir::Pat<'hir>,
+ arg_id: HirId,
+) -> Option<&'hir hir::Path<'hir>> {
+ use hir::*;
+
+ let is_arg_in_path = |p: &hir::Path<'_>| {
+ p.segments
+ .iter()
+ .filter_map(|seg| seg.args)
+ .flat_map(|args| args.args)
+ .any(|arg| arg.id() == arg_id)
+ };
+ let mut arg_path = None;
+ pat.walk(|pat| match pat.kind {
+ PatKind::Struct(QPath::Resolved(_, path), _, _)
+ | PatKind::TupleStruct(QPath::Resolved(_, path), _, _)
+ | PatKind::Path(QPath::Resolved(_, path))
+ if is_arg_in_path(path) =>
+ {
+ arg_path = Some(path);
+ false
+ }
+ _ => true,
+ });
+ arg_path
+}
+
+pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: DefId) -> Ty<'_> {
+ let def_id = def_id.expect_local();
+ use rustc_hir::*;
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ let icx = ItemCtxt::new(tcx, def_id.to_def_id());
+
+ match tcx.hir().get(hir_id) {
+ Node::TraitItem(item) => match item.kind {
+ TraitItemKind::Fn(..) => {
+ let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ tcx.mk_fn_def(def_id.to_def_id(), substs)
+ }
+ TraitItemKind::Const(ty, body_id) => body_id
+ .and_then(|body_id| {
+ if is_suggestable_infer_ty(ty) {
+ Some(infer_placeholder_type(
+ tcx, def_id, body_id, ty.span, item.ident, "constant",
+ ))
+ } else {
+ None
+ }
+ })
+ .unwrap_or_else(|| icx.to_ty(ty)),
+ TraitItemKind::Type(_, Some(ty)) => icx.to_ty(ty),
+ TraitItemKind::Type(_, None) => {
+ span_bug!(item.span, "associated type missing default");
+ }
+ },
+
+ Node::ImplItem(item) => match item.kind {
+ ImplItemKind::Fn(..) => {
+ let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ tcx.mk_fn_def(def_id.to_def_id(), substs)
+ }
+ ImplItemKind::Const(ty, body_id) => {
+ if is_suggestable_infer_ty(ty) {
+ infer_placeholder_type(tcx, def_id, body_id, ty.span, item.ident, "constant")
+ } else {
+ icx.to_ty(ty)
+ }
+ }
+ ImplItemKind::TyAlias(ty) => {
+ if tcx.impl_trait_ref(tcx.hir().get_parent_item(hir_id)).is_none() {
+ check_feature_inherent_assoc_ty(tcx, item.span);
+ }
+
+ icx.to_ty(ty)
+ }
+ },
+
+ Node::Item(item) => {
+ match item.kind {
+ ItemKind::Static(ty, .., body_id) => {
+ if is_suggestable_infer_ty(ty) {
+ infer_placeholder_type(
+ tcx,
+ def_id,
+ body_id,
+ ty.span,
+ item.ident,
+ "static variable",
+ )
+ } else {
+ icx.to_ty(ty)
+ }
+ }
+ ItemKind::Const(ty, body_id) => {
+ if is_suggestable_infer_ty(ty) {
+ infer_placeholder_type(
+ tcx, def_id, body_id, ty.span, item.ident, "constant",
+ )
+ } else {
+ icx.to_ty(ty)
+ }
+ }
+ ItemKind::TyAlias(self_ty, _) => icx.to_ty(self_ty),
+ ItemKind::Impl(hir::Impl { self_ty, .. }) => icx.to_ty(*self_ty),
+ ItemKind::Fn(..) => {
+ let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ tcx.mk_fn_def(def_id.to_def_id(), substs)
+ }
+ ItemKind::Enum(..) | ItemKind::Struct(..) | ItemKind::Union(..) => {
+ let def = tcx.adt_def(def_id);
+ let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ tcx.mk_adt(def, substs)
+ }
+ ItemKind::OpaqueTy(OpaqueTy { origin: hir::OpaqueTyOrigin::TyAlias, .. }) => {
+ find_opaque_ty_constraints_for_tait(tcx, def_id)
+ }
+ // Opaque types desugared from `impl Trait`.
+ ItemKind::OpaqueTy(OpaqueTy { origin: hir::OpaqueTyOrigin::FnReturn(owner) | hir::OpaqueTyOrigin::AsyncFn(owner), .. }) => {
+ find_opaque_ty_constraints_for_rpit(tcx, def_id, owner)
+ }
+ ItemKind::Trait(..)
+ | ItemKind::TraitAlias(..)
+ | ItemKind::Macro(..)
+ | ItemKind::Mod(..)
+ | ItemKind::ForeignMod { .. }
+ | ItemKind::GlobalAsm(..)
+ | ItemKind::ExternCrate(..)
+ | ItemKind::Use(..) => {
+ span_bug!(
+ item.span,
+ "compute_type_of_item: unexpected item type: {:?}",
+ item.kind
+ );
+ }
+ }
+ }
+
+ Node::ForeignItem(foreign_item) => match foreign_item.kind {
+ ForeignItemKind::Fn(..) => {
+ let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ tcx.mk_fn_def(def_id.to_def_id(), substs)
+ }
+ ForeignItemKind::Static(t, _) => icx.to_ty(t),
+ ForeignItemKind::Type => tcx.mk_foreign(def_id.to_def_id()),
+ },
+
+ Node::Ctor(&ref def) | Node::Variant(Variant { data: ref def, .. }) => match *def {
+ VariantData::Unit(..) | VariantData::Struct(..) => {
+ tcx.type_of(tcx.hir().get_parent_item(hir_id))
+ }
+ VariantData::Tuple(..) => {
+ let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ tcx.mk_fn_def(def_id.to_def_id(), substs)
+ }
+ },
+
+ Node::Field(field) => icx.to_ty(field.ty),
+
+ Node::Expr(&Expr { kind: ExprKind::Closure{..}, .. }) => tcx.typeck(def_id).node_type(hir_id),
+
+ Node::AnonConst(_) if let Some(param) = tcx.opt_const_param_of(def_id) => {
+ // We defer to `type_of` of the corresponding parameter
+ // for generic arguments.
+ tcx.type_of(param)
+ }
+
+ Node::AnonConst(_) => {
+ let parent_node = tcx.hir().get(tcx.hir().get_parent_node(hir_id));
+ match parent_node {
+ Node::Ty(&Ty { kind: TyKind::Array(_, ref constant), .. })
+ | Node::Expr(&Expr { kind: ExprKind::Repeat(_, ref constant), .. })
+ if constant.hir_id() == hir_id =>
+ {
+ tcx.types.usize
+ }
+ Node::Ty(&Ty { kind: TyKind::Typeof(ref e), .. }) if e.hir_id == hir_id => {
+ tcx.typeck(def_id).node_type(e.hir_id)
+ }
+
+ Node::Expr(&Expr { kind: ExprKind::ConstBlock(ref anon_const), .. })
+ if anon_const.hir_id == hir_id =>
+ {
+ let substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ substs.as_inline_const().ty()
+ }
+
+ Node::Expr(&Expr { kind: ExprKind::InlineAsm(asm), .. })
+ | Node::Item(&Item { kind: ItemKind::GlobalAsm(asm), .. })
+ if asm.operands.iter().any(|(op, _op_sp)| match op {
+ hir::InlineAsmOperand::Const { anon_const }
+ | hir::InlineAsmOperand::SymFn { anon_const } => anon_const.hir_id == hir_id,
+ _ => false,
+ }) =>
+ {
+ tcx.typeck(def_id).node_type(hir_id)
+ }
+
+ Node::Variant(Variant { disr_expr: Some(ref e), .. }) if e.hir_id == hir_id => tcx
+ .adt_def(tcx.hir().get_parent_item(hir_id))
+ .repr()
+ .discr_type()
+ .to_ty(tcx),
+
+ Node::TypeBinding(binding @ &TypeBinding { hir_id: binding_id, .. })
+ if let Node::TraitRef(trait_ref) = tcx.hir().get(
+ tcx.hir().get_parent_node(binding_id)
+ ) =>
+ {
+ let Some(trait_def_id) = trait_ref.trait_def_id() else {
+ return tcx.ty_error_with_message(DUMMY_SP, "Could not find trait");
+ };
+ let assoc_items = tcx.associated_items(trait_def_id);
+ let assoc_item = assoc_items.find_by_name_and_kind(
+ tcx, binding.ident, ty::AssocKind::Const, def_id.to_def_id(),
+ );
+ if let Some(assoc_item) = assoc_item {
+ tcx.type_of(assoc_item.def_id)
+ } else {
+ // FIXME(associated_const_equality): add a useful error message here.
+ tcx.ty_error_with_message(
+ DUMMY_SP,
+ "Could not find associated const on trait",
+ )
+ }
+ }
+
+ Node::GenericParam(&GenericParam {
+ hir_id: param_hir_id,
+ kind: GenericParamKind::Const { default: Some(ct), .. },
+ ..
+ }) if ct.hir_id == hir_id => tcx.type_of(tcx.hir().local_def_id(param_hir_id)),
+
+ x =>
+ tcx.ty_error_with_message(
+ DUMMY_SP,
+ &format!("unexpected const parent in type_of(): {x:?}"),
+ ),
+ }
+ }
+
+ Node::GenericParam(param) => match &param.kind {
+ GenericParamKind::Type { default: Some(ty), .. }
+ | GenericParamKind::Const { ty, .. } => icx.to_ty(ty),
+ x => bug!("unexpected non-type Node::GenericParam: {:?}", x),
+ },
+
+ x => {
+ bug!("unexpected sort of node in type_of(): {:?}", x);
+ }
+ }
+}
+
+#[instrument(skip(tcx), level = "debug")]
+/// Checks "defining uses" of opaque `impl Trait` types to ensure that they meet the restrictions
+/// laid for "higher-order pattern unification".
+/// This ensures that inference is tractable.
+/// In particular, definitions of opaque types can only use other generics as arguments,
+/// and they cannot repeat an argument. Example:
+///
+/// ```ignore (illustrative)
+/// type Foo<A, B> = impl Bar<A, B>;
+///
+/// // Okay -- `Foo` is applied to two distinct, generic types.
+/// fn a<T, U>() -> Foo<T, U> { .. }
+///
+/// // Not okay -- `Foo` is applied to `T` twice.
+/// fn b<T>() -> Foo<T, T> { .. }
+///
+/// // Not okay -- `Foo` is applied to a non-generic type.
+/// fn b<T>() -> Foo<T, u32> { .. }
+/// ```
+///
+fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Ty<'_> {
+ use rustc_hir::{Expr, ImplItem, Item, TraitItem};
+
+ struct ConstraintLocator<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ /// def_id of the opaque type whose defining uses are being checked
+ def_id: LocalDefId,
+
+ /// as we walk the defining uses, we are checking that all of them
+ /// define the same hidden type. This variable is set to `Some`
+ /// with the first type that we find, and then later types are
+ /// checked against it (we also carry the span of that first
+ /// type).
+ found: Option<ty::OpaqueHiddenType<'tcx>>,
+ }
+
+ impl ConstraintLocator<'_> {
+ #[instrument(skip(self), level = "debug")]
+ fn check(&mut self, item_def_id: LocalDefId) {
+ // Don't try to check items that cannot possibly constrain the type.
+ if !self.tcx.has_typeck_results(item_def_id) {
+ debug!("no constraint: no typeck results");
+ return;
+ }
+ // Calling `mir_borrowck` can lead to cycle errors through
+ // const-checking, avoid calling it if we don't have to.
+ // ```rust
+ // type Foo = impl Fn() -> usize; // when computing type for this
+ // const fn bar() -> Foo {
+ // || 0usize
+ // }
+ // const BAZR: Foo = bar(); // we would mir-borrowck this, causing cycles
+ // // because we again need to reveal `Foo` so we can check whether the
+ // // constant does not contain interior mutability.
+ // ```
+ let tables = self.tcx.typeck(item_def_id);
+ if let Some(_) = tables.tainted_by_errors {
+ self.found = Some(ty::OpaqueHiddenType { span: DUMMY_SP, ty: self.tcx.ty_error() });
+ return;
+ }
+ if !tables.concrete_opaque_types.contains_key(&self.def_id) {
+ debug!("no constraints in typeck results");
+ return;
+ }
+ // Use borrowck to get the type with unerased regions.
+ let concrete_opaque_types = &self.tcx.mir_borrowck(item_def_id).concrete_opaque_types;
+ debug!(?concrete_opaque_types);
+ if let Some(&concrete_type) = concrete_opaque_types.get(&self.def_id) {
+ debug!(?concrete_type, "found constraint");
+ if let Some(prev) = self.found {
+ if concrete_type.ty != prev.ty && !(concrete_type, prev).references_error() {
+ prev.report_mismatch(&concrete_type, self.tcx);
+ }
+ } else {
+ self.found = Some(concrete_type);
+ }
+ }
+ }
+ }
+
+ impl<'tcx> intravisit::Visitor<'tcx> for ConstraintLocator<'tcx> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+ fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
+ if let hir::ExprKind::Closure { .. } = ex.kind {
+ let def_id = self.tcx.hir().local_def_id(ex.hir_id);
+ self.check(def_id);
+ }
+ intravisit::walk_expr(self, ex);
+ }
+ fn visit_item(&mut self, it: &'tcx Item<'tcx>) {
+ trace!(?it.def_id);
+ // The opaque type itself or its children are not within its reveal scope.
+ if it.def_id != self.def_id {
+ self.check(it.def_id);
+ intravisit::walk_item(self, it);
+ }
+ }
+ fn visit_impl_item(&mut self, it: &'tcx ImplItem<'tcx>) {
+ trace!(?it.def_id);
+ // The opaque type itself or its children are not within its reveal scope.
+ if it.def_id != self.def_id {
+ self.check(it.def_id);
+ intravisit::walk_impl_item(self, it);
+ }
+ }
+ fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) {
+ trace!(?it.def_id);
+ self.check(it.def_id);
+ intravisit::walk_trait_item(self, it);
+ }
+ }
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let scope = tcx.hir().get_defining_scope(hir_id);
+ let mut locator = ConstraintLocator { def_id: def_id, tcx, found: None };
+
+ debug!(?scope);
+
+ if scope == hir::CRATE_HIR_ID {
+ tcx.hir().walk_toplevel_module(&mut locator);
+ } else {
+ trace!("scope={:#?}", tcx.hir().get(scope));
+ match tcx.hir().get(scope) {
+ // We explicitly call `visit_*` methods, instead of using `intravisit::walk_*` methods
+ // This allows our visitor to process the defining item itself, causing
+ // it to pick up any 'sibling' defining uses.
+ //
+ // For example, this code:
+ // ```
+ // fn foo() {
+ // type Blah = impl Debug;
+ // let my_closure = || -> Blah { true };
+ // }
+ // ```
+ //
+ // requires us to explicitly process `foo()` in order
+ // to notice the defining usage of `Blah`.
+ Node::Item(it) => locator.visit_item(it),
+ Node::ImplItem(it) => locator.visit_impl_item(it),
+ Node::TraitItem(it) => locator.visit_trait_item(it),
+ other => bug!("{:?} is not a valid scope for an opaque type item", other),
+ }
+ }
+
+ match locator.found {
+ Some(hidden) => hidden.ty,
+ None => {
+ tcx.sess.emit_err(UnconstrainedOpaqueType {
+ span: tcx.def_span(def_id),
+ name: tcx.item_name(tcx.local_parent(def_id).to_def_id()),
+ });
+ tcx.ty_error()
+ }
+ }
+}
+
+fn find_opaque_ty_constraints_for_rpit(
+ tcx: TyCtxt<'_>,
+ def_id: LocalDefId,
+ owner_def_id: LocalDefId,
+) -> Ty<'_> {
+ use rustc_hir::{Expr, ImplItem, Item, TraitItem};
+
+ struct ConstraintChecker<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ /// def_id of the opaque type whose defining uses are being checked
+ def_id: LocalDefId,
+
+ found: ty::OpaqueHiddenType<'tcx>,
+ }
+
+ impl ConstraintChecker<'_> {
+ #[instrument(skip(self), level = "debug")]
+ fn check(&self, def_id: LocalDefId) {
+ // Use borrowck to get the type with unerased regions.
+ let concrete_opaque_types = &self.tcx.mir_borrowck(def_id).concrete_opaque_types;
+ debug!(?concrete_opaque_types);
+ for &(def_id, concrete_type) in concrete_opaque_types {
+ if def_id != self.def_id {
+ // Ignore constraints for other opaque types.
+ continue;
+ }
+
+ debug!(?concrete_type, "found constraint");
+
+ if concrete_type.ty != self.found.ty
+ && !(concrete_type, self.found).references_error()
+ {
+ self.found.report_mismatch(&concrete_type, self.tcx);
+ }
+ }
+ }
+ }
+
+ impl<'tcx> intravisit::Visitor<'tcx> for ConstraintChecker<'tcx> {
+ type NestedFilter = nested_filter::OnlyBodies;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+ fn visit_expr(&mut self, ex: &'tcx Expr<'tcx>) {
+ if let hir::ExprKind::Closure { .. } = ex.kind {
+ let def_id = self.tcx.hir().local_def_id(ex.hir_id);
+ self.check(def_id);
+ }
+ intravisit::walk_expr(self, ex);
+ }
+ fn visit_item(&mut self, it: &'tcx Item<'tcx>) {
+ trace!(?it.def_id);
+ // The opaque type itself or its children are not within its reveal scope.
+ if it.def_id != self.def_id {
+ self.check(it.def_id);
+ intravisit::walk_item(self, it);
+ }
+ }
+ fn visit_impl_item(&mut self, it: &'tcx ImplItem<'tcx>) {
+ trace!(?it.def_id);
+ // The opaque type itself or its children are not within its reveal scope.
+ if it.def_id != self.def_id {
+ self.check(it.def_id);
+ intravisit::walk_impl_item(self, it);
+ }
+ }
+ fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) {
+ trace!(?it.def_id);
+ self.check(it.def_id);
+ intravisit::walk_trait_item(self, it);
+ }
+ }
+
+ let concrete = tcx.mir_borrowck(owner_def_id).concrete_opaque_types.get(&def_id).copied();
+
+ if let Some(concrete) = concrete {
+ let scope = tcx.hir().local_def_id_to_hir_id(owner_def_id);
+ debug!(?scope);
+ let mut locator = ConstraintChecker { def_id: def_id, tcx, found: concrete };
+
+ match tcx.hir().get(scope) {
+ Node::Item(it) => intravisit::walk_item(&mut locator, it),
+ Node::ImplItem(it) => intravisit::walk_impl_item(&mut locator, it),
+ Node::TraitItem(it) => intravisit::walk_trait_item(&mut locator, it),
+ other => bug!("{:?} is not a valid scope for an opaque type item", other),
+ }
+ }
+
+ concrete.map(|concrete| concrete.ty).unwrap_or_else(|| {
+ let table = tcx.typeck(owner_def_id);
+ if let Some(_) = table.tainted_by_errors {
+ // Some error in the
+ // owner fn prevented us from populating
+ // the `concrete_opaque_types` table.
+ tcx.ty_error()
+ } else {
+ table
+ .concrete_opaque_types
+ .get(&def_id)
+ .copied()
+ .unwrap_or_else(|| {
+ // We failed to resolve the opaque type or it
+ // resolves to itself. We interpret this as the
+ // no values of the hidden type ever being constructed,
+ // so we can just make the hidden type be `!`.
+ // For backwards compatibility reasons, we fall back to
+ // `()` until we the diverging default is changed.
+ Some(tcx.mk_diverging_default())
+ })
+ .expect("RPIT always have a hidden type from typeck")
+ }
+ })
+}
+
+fn infer_placeholder_type<'a>(
+ tcx: TyCtxt<'a>,
+ def_id: LocalDefId,
+ body_id: hir::BodyId,
+ span: Span,
+ item_ident: Ident,
+ kind: &'static str,
+) -> Ty<'a> {
+ // Attempts to make the type nameable by turning FnDefs into FnPtrs.
+ struct MakeNameable<'tcx> {
+ success: bool,
+ tcx: TyCtxt<'tcx>,
+ }
+
+ impl<'tcx> MakeNameable<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>) -> Self {
+ MakeNameable { success: true, tcx }
+ }
+ }
+
+ impl<'tcx> TypeFolder<'tcx> for MakeNameable<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if !self.success {
+ return ty;
+ }
+
+ match ty.kind() {
+ ty::FnDef(def_id, _) => self.tcx.mk_fn_ptr(self.tcx.fn_sig(*def_id)),
+ // FIXME: non-capturing closures should also suggest a function pointer
+ ty::Closure(..) | ty::Generator(..) => {
+ self.success = false;
+ ty
+ }
+ _ => ty.super_fold_with(self),
+ }
+ }
+ }
+
+ let ty = tcx.diagnostic_only_typeck(def_id).node_type(body_id.hir_id);
+
+ // If this came from a free `const` or `static mut?` item,
+ // then the user may have written e.g. `const A = 42;`.
+ // In this case, the parser has stashed a diagnostic for
+ // us to improve in typeck so we do that now.
+ match tcx.sess.diagnostic().steal_diagnostic(span, StashKey::ItemNoType) {
+ Some(mut err) => {
+ if !ty.references_error() {
+ // The parser provided a sub-optimal `HasPlaceholders` suggestion for the type.
+ // We are typeck and have the real type, so remove that and suggest the actual type.
+ // FIXME(eddyb) this looks like it should be functionality on `Diagnostic`.
+ if let Ok(suggestions) = &mut err.suggestions {
+ suggestions.clear();
+ }
+
+ // Suggesting unnameable types won't help.
+ let mut mk_nameable = MakeNameable::new(tcx);
+ let ty = mk_nameable.fold_ty(ty);
+ let sugg_ty = if mk_nameable.success { Some(ty) } else { None };
+ if let Some(sugg_ty) = sugg_ty {
+ err.span_suggestion(
+ span,
+ &format!("provide a type for the {item}", item = kind),
+ format!("{}: {}", item_ident, sugg_ty),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.span_note(
+ tcx.hir().body(body_id).value.span,
+ &format!("however, the inferred type `{}` cannot be named", ty),
+ );
+ }
+ }
+
+ err.emit();
+ }
+ None => {
+ let mut diag = bad_placeholder(tcx, vec![span], kind);
+
+ if !ty.references_error() {
+ let mut mk_nameable = MakeNameable::new(tcx);
+ let ty = mk_nameable.fold_ty(ty);
+ let sugg_ty = if mk_nameable.success { Some(ty) } else { None };
+ if let Some(sugg_ty) = sugg_ty {
+ diag.span_suggestion(
+ span,
+ "replace with the correct type",
+ sugg_ty,
+ Applicability::MaybeIncorrect,
+ );
+ } else {
+ diag.span_note(
+ tcx.hir().body(body_id).value.span,
+ &format!("however, the inferred type `{}` cannot be named", ty),
+ );
+ }
+ }
+
+ diag.emit();
+ }
+ }
+
+ // Typeck doesn't expect erased regions to be returned from `type_of`.
+ tcx.fold_regions(ty, |r, _| match *r {
+ ty::ReErased => tcx.lifetimes.re_static,
+ _ => r,
+ })
+}
+
+fn check_feature_inherent_assoc_ty(tcx: TyCtxt<'_>, span: Span) {
+ if !tcx.features().inherent_associated_types {
+ use rustc_session::parse::feature_err;
+ use rustc_span::symbol::sym;
+ feature_err(
+ &tcx.sess.parse_sess,
+ sym::inherent_associated_types,
+ span,
+ "inherent associated types are unstable",
+ )
+ .emit();
+ }
+}
diff --git a/compiler/rustc_typeck/src/constrained_generic_params.rs b/compiler/rustc_typeck/src/constrained_generic_params.rs
new file mode 100644
index 000000000..8428e4664
--- /dev/null
+++ b/compiler/rustc_typeck/src/constrained_generic_params.rs
@@ -0,0 +1,221 @@
+use rustc_data_structures::fx::FxHashSet;
+use rustc_middle::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_span::source_map::Span;
+use std::ops::ControlFlow;
+
+#[derive(Clone, PartialEq, Eq, Hash, Debug)]
+pub struct Parameter(pub u32);
+
+impl From<ty::ParamTy> for Parameter {
+ fn from(param: ty::ParamTy) -> Self {
+ Parameter(param.index)
+ }
+}
+
+impl From<ty::EarlyBoundRegion> for Parameter {
+ fn from(param: ty::EarlyBoundRegion) -> Self {
+ Parameter(param.index)
+ }
+}
+
+impl From<ty::ParamConst> for Parameter {
+ fn from(param: ty::ParamConst) -> Self {
+ Parameter(param.index)
+ }
+}
+
+/// Returns the set of parameters constrained by the impl header.
+pub fn parameters_for_impl<'tcx>(
+ impl_self_ty: Ty<'tcx>,
+ impl_trait_ref: Option<ty::TraitRef<'tcx>>,
+) -> FxHashSet<Parameter> {
+ let vec = match impl_trait_ref {
+ Some(tr) => parameters_for(&tr, false),
+ None => parameters_for(&impl_self_ty, false),
+ };
+ vec.into_iter().collect()
+}
+
+/// If `include_nonconstraining` is false, returns the list of parameters that are
+/// constrained by `t` - i.e., the value of each parameter in the list is
+/// uniquely determined by `t` (see RFC 447). If it is true, return the list
+/// of parameters whose values are needed in order to constrain `ty` - these
+/// differ, with the latter being a superset, in the presence of projections.
+pub fn parameters_for<'tcx>(
+ t: &impl TypeVisitable<'tcx>,
+ include_nonconstraining: bool,
+) -> Vec<Parameter> {
+ let mut collector = ParameterCollector { parameters: vec![], include_nonconstraining };
+ t.visit_with(&mut collector);
+ collector.parameters
+}
+
+struct ParameterCollector {
+ parameters: Vec<Parameter>,
+ include_nonconstraining: bool,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for ParameterCollector {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *t.kind() {
+ ty::Projection(..) if !self.include_nonconstraining => {
+ // projections are not injective
+ return ControlFlow::CONTINUE;
+ }
+ ty::Param(data) => {
+ self.parameters.push(Parameter::from(data));
+ }
+ _ => {}
+ }
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ReEarlyBound(data) = *r {
+ self.parameters.push(Parameter::from(data));
+ }
+ ControlFlow::CONTINUE
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match c.kind() {
+ ty::ConstKind::Unevaluated(..) if !self.include_nonconstraining => {
+ // Constant expressions are not injective
+ return c.ty().visit_with(self);
+ }
+ ty::ConstKind::Param(data) => {
+ self.parameters.push(Parameter::from(data));
+ }
+ _ => {}
+ }
+
+ c.super_visit_with(self)
+ }
+}
+
+pub fn identify_constrained_generic_params<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: ty::GenericPredicates<'tcx>,
+ impl_trait_ref: Option<ty::TraitRef<'tcx>>,
+ input_parameters: &mut FxHashSet<Parameter>,
+) {
+ let mut predicates = predicates.predicates.to_vec();
+ setup_constraining_predicates(tcx, &mut predicates, impl_trait_ref, input_parameters);
+}
+
+/// Order the predicates in `predicates` such that each parameter is
+/// constrained before it is used, if that is possible, and add the
+/// parameters so constrained to `input_parameters`. For example,
+/// imagine the following impl:
+/// ```ignore (illustrative)
+/// impl<T: Debug, U: Iterator<Item = T>> Trait for U
+/// ```
+/// The impl's predicates are collected from left to right. Ignoring
+/// the implicit `Sized` bounds, these are
+/// * T: Debug
+/// * U: Iterator
+/// * <U as Iterator>::Item = T -- a desugared ProjectionPredicate
+///
+/// When we, for example, try to go over the trait-reference
+/// `IntoIter<u32> as Trait`, we substitute the impl parameters with fresh
+/// variables and match them with the impl trait-ref, so we know that
+/// `$U = IntoIter<u32>`.
+///
+/// However, in order to process the `$T: Debug` predicate, we must first
+/// know the value of `$T` - which is only given by processing the
+/// projection. As we occasionally want to process predicates in a single
+/// pass, we want the projection to come first. In fact, as projections
+/// can (acyclically) depend on one another - see RFC447 for details - we
+/// need to topologically sort them.
+///
+/// We *do* have to be somewhat careful when projection targets contain
+/// projections themselves, for example in
+/// impl<S,U,V,W> Trait for U where
+/// /* 0 */ S: Iterator<Item = U>,
+/// /* - */ U: Iterator,
+/// /* 1 */ <U as Iterator>::Item: ToOwned<Owned=(W,<V as Iterator>::Item)>
+/// /* 2 */ W: Iterator<Item = V>
+/// /* 3 */ V: Debug
+/// we have to evaluate the projections in the order I wrote them:
+/// `V: Debug` requires `V` to be evaluated. The only projection that
+/// *determines* `V` is 2 (1 contains it, but *does not determine it*,
+/// as it is only contained within a projection), but that requires `W`
+/// which is determined by 1, which requires `U`, that is determined
+/// by 0. I should probably pick a less tangled example, but I can't
+/// think of any.
+pub fn setup_constraining_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicates: &mut [(ty::Predicate<'tcx>, Span)],
+ impl_trait_ref: Option<ty::TraitRef<'tcx>>,
+ input_parameters: &mut FxHashSet<Parameter>,
+) {
+ // The canonical way of doing the needed topological sort
+ // would be a DFS, but getting the graph and its ownership
+ // right is annoying, so I am using an in-place fixed-point iteration,
+ // which is `O(nt)` where `t` is the depth of type-parameter constraints,
+ // remembering that `t` should be less than 7 in practice.
+ //
+ // Basically, I iterate over all projections and swap every
+ // "ready" projection to the start of the list, such that
+ // all of the projections before `i` are topologically sorted
+ // and constrain all the parameters in `input_parameters`.
+ //
+ // In the example, `input_parameters` starts by containing `U` - which
+ // is constrained by the trait-ref - and so on the first pass we
+ // observe that `<U as Iterator>::Item = T` is a "ready" projection that
+ // constrains `T` and swap it to front. As it is the sole projection,
+ // no more swaps can take place afterwards, with the result being
+ // * <U as Iterator>::Item = T
+ // * T: Debug
+ // * U: Iterator
+ debug!(
+ "setup_constraining_predicates: predicates={:?} \
+ impl_trait_ref={:?} input_parameters={:?}",
+ predicates, impl_trait_ref, input_parameters
+ );
+ let mut i = 0;
+ let mut changed = true;
+ while changed {
+ changed = false;
+
+ for j in i..predicates.len() {
+ // Note that we don't have to care about binders here,
+ // as the impl trait ref never contains any late-bound regions.
+ if let ty::PredicateKind::Projection(projection) = predicates[j].0.kind().skip_binder()
+ {
+ // Special case: watch out for some kind of sneaky attempt
+ // to project out an associated type defined by this very
+ // trait.
+ let unbound_trait_ref = projection.projection_ty.trait_ref(tcx);
+ if Some(unbound_trait_ref) == impl_trait_ref {
+ continue;
+ }
+
+ // A projection depends on its input types and determines its output
+ // type. For example, if we have
+ // `<<T as Bar>::Baz as Iterator>::Output = <U as Iterator>::Output`
+ // Then the projection only applies if `T` is known, but it still
+ // does not determine `U`.
+ let inputs = parameters_for(&projection.projection_ty, true);
+ let relies_only_on_inputs = inputs.iter().all(|p| input_parameters.contains(p));
+ if !relies_only_on_inputs {
+ continue;
+ }
+ input_parameters.extend(parameters_for(&projection.term, false));
+ } else {
+ continue;
+ }
+ // fancy control flow to bypass borrow checker
+ predicates.swap(i, j);
+ i += 1;
+ changed = true;
+ }
+ debug!(
+ "setup_constraining_predicates: predicates={:?} \
+ i={} impl_trait_ref={:?} input_parameters={:?}",
+ predicates, i, impl_trait_ref, input_parameters
+ );
+ }
+}
diff --git a/compiler/rustc_typeck/src/errors.rs b/compiler/rustc_typeck/src/errors.rs
new file mode 100644
index 000000000..0438ac02e
--- /dev/null
+++ b/compiler/rustc_typeck/src/errors.rs
@@ -0,0 +1,326 @@
+//! Errors emitted by typeck.
+use rustc_errors::{error_code, Applicability, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_macros::{SessionDiagnostic, SessionSubdiagnostic};
+use rustc_middle::ty::Ty;
+use rustc_session::{parse::ParseSess, SessionDiagnostic};
+use rustc_span::{symbol::Ident, Span, Symbol};
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::field_multiply_specified_in_initializer, code = "E0062")]
+pub struct FieldMultiplySpecifiedInInitializer {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(typeck::previous_use_label)]
+ pub prev_span: Span,
+ pub ident: Ident,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::unrecognized_atomic_operation, code = "E0092")]
+pub struct UnrecognizedAtomicOperation<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub op: &'a str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::wrong_number_of_generic_arguments_to_intrinsic, code = "E0094")]
+pub struct WrongNumberOfGenericArgumentsToIntrinsic<'a> {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub found: usize,
+ pub expected: usize,
+ pub descr: &'a str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::unrecognized_intrinsic_function, code = "E0093")]
+pub struct UnrecognizedIntrinsicFunction {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::lifetimes_or_bounds_mismatch_on_trait, code = "E0195")]
+pub struct LifetimesOrBoundsMismatchOnTrait {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(typeck::generics_label)]
+ pub generics_span: Option<Span>,
+ pub item_kind: &'static str,
+ pub ident: Ident,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::drop_impl_on_wrong_item, code = "E0120")]
+pub struct DropImplOnWrongItem {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::field_already_declared, code = "E0124")]
+pub struct FieldAlreadyDeclared {
+ pub field_name: Ident,
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(typeck::previous_decl_label)]
+ pub prev_span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::copy_impl_on_type_with_dtor, code = "E0184")]
+pub struct CopyImplOnTypeWithDtor {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::multiple_relaxed_default_bounds, code = "E0203")]
+pub struct MultipleRelaxedDefaultBounds {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::copy_impl_on_non_adt, code = "E0206")]
+pub struct CopyImplOnNonAdt {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::trait_object_declared_with_no_traits, code = "E0224")]
+pub struct TraitObjectDeclaredWithNoTraits {
+ #[primary_span]
+ pub span: Span,
+ #[label(typeck::alias_span)]
+ pub trait_alias_span: Option<Span>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::ambiguous_lifetime_bound, code = "E0227")]
+pub struct AmbiguousLifetimeBound {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::assoc_type_binding_not_allowed, code = "E0229")]
+pub struct AssocTypeBindingNotAllowed {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::functional_record_update_on_non_struct, code = "E0436")]
+pub struct FunctionalRecordUpdateOnNonStruct {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::typeof_reserved_keyword_used, code = "E0516")]
+pub struct TypeofReservedKeywordUsed<'tcx> {
+ pub ty: Ty<'tcx>,
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[suggestion_verbose(code = "{ty}")]
+ pub opt_sugg: Option<(Span, Applicability)>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::return_stmt_outside_of_fn_body, code = "E0572")]
+pub struct ReturnStmtOutsideOfFnBody {
+ #[primary_span]
+ pub span: Span,
+ #[label(typeck::encl_body_label)]
+ pub encl_body_span: Option<Span>,
+ #[label(typeck::encl_fn_label)]
+ pub encl_fn_span: Option<Span>,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::yield_expr_outside_of_generator, code = "E0627")]
+pub struct YieldExprOutsideOfGenerator {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::struct_expr_non_exhaustive, code = "E0639")]
+pub struct StructExprNonExhaustive {
+ #[primary_span]
+ pub span: Span,
+ pub what: &'static str,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::method_call_on_unknown_type, code = "E0699")]
+pub struct MethodCallOnUnknownType {
+ #[primary_span]
+ pub span: Span,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::value_of_associated_struct_already_specified, code = "E0719")]
+pub struct ValueOfAssociatedStructAlreadySpecified {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ #[label(typeck::previous_bound_label)]
+ pub prev_span: Span,
+ pub item_name: Ident,
+ pub def_path: String,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::address_of_temporary_taken, code = "E0745")]
+pub struct AddressOfTemporaryTaken {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+}
+
+#[derive(SessionSubdiagnostic)]
+pub enum AddReturnTypeSuggestion<'tcx> {
+ #[suggestion(
+ typeck::add_return_type_add,
+ code = "-> {found} ",
+ applicability = "machine-applicable"
+ )]
+ Add {
+ #[primary_span]
+ span: Span,
+ found: Ty<'tcx>,
+ },
+ #[suggestion(
+ typeck::add_return_type_missing_here,
+ code = "-> _ ",
+ applicability = "has-placeholders"
+ )]
+ MissingHere {
+ #[primary_span]
+ span: Span,
+ },
+}
+
+#[derive(SessionSubdiagnostic)]
+pub enum ExpectedReturnTypeLabel<'tcx> {
+ #[label(typeck::expected_default_return_type)]
+ Unit {
+ #[primary_span]
+ span: Span,
+ },
+ #[label(typeck::expected_return_type)]
+ Other {
+ #[primary_span]
+ span: Span,
+ expected: Ty<'tcx>,
+ },
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::unconstrained_opaque_type)]
+#[note]
+pub struct UnconstrainedOpaqueType {
+ #[primary_span]
+ pub span: Span,
+ pub name: Symbol,
+}
+
+pub struct MissingTypeParams {
+ pub span: Span,
+ pub def_span: Span,
+ pub missing_type_params: Vec<Symbol>,
+ pub empty_generic_args: bool,
+}
+
+// Manual implementation of `SessionDiagnostic` to be able to call `span_to_snippet`.
+impl<'a> SessionDiagnostic<'a> for MissingTypeParams {
+ fn into_diagnostic(self, sess: &'a ParseSess) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ let mut err = sess.span_diagnostic.struct_span_err_with_code(
+ self.span,
+ rustc_errors::fluent::typeck::missing_type_params,
+ error_code!(E0393),
+ );
+ err.set_arg("parameterCount", self.missing_type_params.len());
+ err.set_arg(
+ "parameters",
+ self.missing_type_params
+ .iter()
+ .map(|n| format!("`{}`", n))
+ .collect::<Vec<_>>()
+ .join(", "),
+ );
+
+ err.span_label(self.def_span, rustc_errors::fluent::typeck::label);
+
+ let mut suggested = false;
+ if let (Ok(snippet), true) = (
+ sess.source_map().span_to_snippet(self.span),
+ // Don't suggest setting the type params if there are some already: the order is
+ // tricky to get right and the user will already know what the syntax is.
+ self.empty_generic_args,
+ ) {
+ if snippet.ends_with('>') {
+ // The user wrote `Trait<'a, T>` or similar. To provide an accurate suggestion
+ // we would have to preserve the right order. For now, as clearly the user is
+ // aware of the syntax, we do nothing.
+ } else {
+ // The user wrote `Iterator`, so we don't have a type we can suggest, but at
+ // least we can clue them to the correct syntax `Iterator<Type>`.
+ err.span_suggestion(
+ self.span,
+ rustc_errors::fluent::typeck::suggestion,
+ format!(
+ "{}<{}>",
+ snippet,
+ self.missing_type_params
+ .iter()
+ .map(|n| n.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
+ ),
+ Applicability::HasPlaceholders,
+ );
+ suggested = true;
+ }
+ }
+ if !suggested {
+ err.span_label(self.span, rustc_errors::fluent::typeck::no_suggestion_label);
+ }
+
+ err.note(rustc_errors::fluent::typeck::note);
+ err
+ }
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::manual_implementation, code = "E0183")]
+#[help]
+pub struct ManualImplementation {
+ #[primary_span]
+ #[label]
+ pub span: Span,
+ pub trait_name: String,
+}
+
+#[derive(SessionDiagnostic)]
+#[error(typeck::substs_on_overridden_impl)]
+pub struct SubstsOnOverriddenImpl {
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_typeck/src/expr_use_visitor.rs b/compiler/rustc_typeck/src/expr_use_visitor.rs
new file mode 100644
index 000000000..74a5b6e42
--- /dev/null
+++ b/compiler/rustc_typeck/src/expr_use_visitor.rs
@@ -0,0 +1,914 @@
+//! A different sort of visitor for walking fn bodies. Unlike the
+//! normal visitor, which just walks the entire body in one shot, the
+//! `ExprUseVisitor` determines how expressions are being used.
+
+use std::slice::from_ref;
+
+use hir::def::DefKind;
+use hir::Expr;
+// Export these here so that Clippy can use them.
+pub use rustc_middle::hir::place::{Place, PlaceBase, PlaceWithHirId, Projection};
+
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_hir as hir;
+use rustc_hir::def::Res;
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::PatKind;
+use rustc_index::vec::Idx;
+use rustc_infer::infer::InferCtxt;
+use rustc_middle::hir::place::ProjectionKind;
+use rustc_middle::mir::FakeReadCause;
+use rustc_middle::ty::{self, adjustment, AdtKind, Ty, TyCtxt};
+use rustc_target::abi::VariantIdx;
+use ty::BorrowKind::ImmBorrow;
+
+use crate::mem_categorization as mc;
+
+/// This trait defines the callbacks you can expect to receive when
+/// employing the ExprUseVisitor.
+pub trait Delegate<'tcx> {
+ /// The value found at `place` is moved, depending
+ /// on `mode`. Where `diag_expr_id` is the id used for diagnostics for `place`.
+ ///
+ /// Use of a `Copy` type in a ByValue context is considered a use
+ /// by `ImmBorrow` and `borrow` is called instead. This is because
+ /// a shared borrow is the "minimum access" that would be needed
+ /// to perform a copy.
+ ///
+ ///
+ /// The parameter `diag_expr_id` indicates the HIR id that ought to be used for
+ /// diagnostics. Around pattern matching such as `let pat = expr`, the diagnostic
+ /// id will be the id of the expression `expr` but the place itself will have
+ /// the id of the binding in the pattern `pat`.
+ fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId);
+
+ /// The value found at `place` is being borrowed with kind `bk`.
+ /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+ fn borrow(
+ &mut self,
+ place_with_id: &PlaceWithHirId<'tcx>,
+ diag_expr_id: hir::HirId,
+ bk: ty::BorrowKind,
+ );
+
+ /// The value found at `place` is being copied.
+ /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+ fn copy(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ // In most cases, copying data from `x` is equivalent to doing `*&x`, so by default
+ // we treat a copy of `x` as a borrow of `x`.
+ self.borrow(place_with_id, diag_expr_id, ty::BorrowKind::ImmBorrow)
+ }
+
+ /// The path at `assignee_place` is being assigned to.
+ /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details).
+ fn mutate(&mut self, assignee_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId);
+
+ /// The path at `binding_place` is a binding that is being initialized.
+ ///
+ /// This covers cases such as `let x = 42;`
+ fn bind(&mut self, binding_place: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ // Bindings can normally be treated as a regular assignment, so by default we
+ // forward this to the mutate callback.
+ self.mutate(binding_place, diag_expr_id)
+ }
+
+ /// The `place` should be a fake read because of specified `cause`.
+ fn fake_read(
+ &mut self,
+ place_with_id: &PlaceWithHirId<'tcx>,
+ cause: FakeReadCause,
+ diag_expr_id: hir::HirId,
+ );
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+enum ConsumeMode {
+ /// reference to x where x has a type that copies
+ Copy,
+ /// reference to x where x has a type that moves
+ Move,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug)]
+pub enum MutateMode {
+ Init,
+ /// Example: `x = y`
+ JustWrite,
+ /// Example: `x += y`
+ WriteAndRead,
+}
+
+/// The ExprUseVisitor type
+///
+/// This is the code that actually walks the tree.
+pub struct ExprUseVisitor<'a, 'tcx> {
+ mc: mc::MemCategorizationContext<'a, 'tcx>,
+ body_owner: LocalDefId,
+ delegate: &'a mut dyn Delegate<'tcx>,
+}
+
+/// If the MC results in an error, it's because the type check
+/// failed (or will fail, when the error is uncovered and reported
+/// during writeback). In this case, we just ignore this part of the
+/// code.
+///
+/// Note that this macro appears similar to try!(), but, unlike try!(),
+/// it does not propagate the error.
+macro_rules! return_if_err {
+ ($inp: expr) => {
+ match $inp {
+ Ok(v) => v,
+ Err(()) => {
+ debug!("mc reported err");
+ return;
+ }
+ }
+ };
+}
+
+impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
+ /// Creates the ExprUseVisitor, configuring it with the various options provided:
+ ///
+ /// - `delegate` -- who receives the callbacks
+ /// - `param_env` --- parameter environment for trait lookups (esp. pertaining to `Copy`)
+ /// - `typeck_results` --- typeck results for the code being analyzed
+ pub fn new(
+ delegate: &'a mut (dyn Delegate<'tcx> + 'a),
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ body_owner: LocalDefId,
+ param_env: ty::ParamEnv<'tcx>,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> Self {
+ ExprUseVisitor {
+ mc: mc::MemCategorizationContext::new(infcx, param_env, body_owner, typeck_results),
+ body_owner,
+ delegate,
+ }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ pub fn consume_body(&mut self, body: &hir::Body<'_>) {
+ for param in body.params {
+ let param_ty = return_if_err!(self.mc.pat_ty_adjusted(param.pat));
+ debug!("consume_body: param_ty = {:?}", param_ty);
+
+ let param_place = self.mc.cat_rvalue(param.hir_id, param.pat.span, param_ty);
+
+ self.walk_irrefutable_pat(&param_place, param.pat);
+ }
+
+ self.consume_expr(&body.value);
+ }
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.mc.tcx()
+ }
+
+ fn delegate_consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: hir::HirId) {
+ delegate_consume(&self.mc, self.delegate, place_with_id, diag_expr_id)
+ }
+
+ fn consume_exprs(&mut self, exprs: &[hir::Expr<'_>]) {
+ for expr in exprs {
+ self.consume_expr(expr);
+ }
+ }
+
+ pub fn consume_expr(&mut self, expr: &hir::Expr<'_>) {
+ debug!("consume_expr(expr={:?})", expr);
+
+ let place_with_id = return_if_err!(self.mc.cat_expr(expr));
+ self.delegate_consume(&place_with_id, place_with_id.hir_id);
+ self.walk_expr(expr);
+ }
+
+ fn mutate_expr(&mut self, expr: &hir::Expr<'_>) {
+ let place_with_id = return_if_err!(self.mc.cat_expr(expr));
+ self.delegate.mutate(&place_with_id, place_with_id.hir_id);
+ self.walk_expr(expr);
+ }
+
+ fn borrow_expr(&mut self, expr: &hir::Expr<'_>, bk: ty::BorrowKind) {
+ debug!("borrow_expr(expr={:?}, bk={:?})", expr, bk);
+
+ let place_with_id = return_if_err!(self.mc.cat_expr(expr));
+ self.delegate.borrow(&place_with_id, place_with_id.hir_id, bk);
+
+ self.walk_expr(expr)
+ }
+
+ fn select_from_expr(&mut self, expr: &hir::Expr<'_>) {
+ self.walk_expr(expr)
+ }
+
+ pub fn walk_expr(&mut self, expr: &hir::Expr<'_>) {
+ debug!("walk_expr(expr={:?})", expr);
+
+ self.walk_adjustment(expr);
+
+ match expr.kind {
+ hir::ExprKind::Path(_) => {}
+
+ hir::ExprKind::Type(subexpr, _) => self.walk_expr(subexpr),
+
+ hir::ExprKind::Unary(hir::UnOp::Deref, base) => {
+ // *base
+ self.select_from_expr(base);
+ }
+
+ hir::ExprKind::Field(base, _) => {
+ // base.f
+ self.select_from_expr(base);
+ }
+
+ hir::ExprKind::Index(lhs, rhs) => {
+ // lhs[rhs]
+ self.select_from_expr(lhs);
+ self.consume_expr(rhs);
+ }
+
+ hir::ExprKind::Call(callee, args) => {
+ // callee(args)
+ self.consume_expr(callee);
+ self.consume_exprs(args);
+ }
+
+ hir::ExprKind::MethodCall(.., args, _) => {
+ // callee.m(args)
+ self.consume_exprs(args);
+ }
+
+ hir::ExprKind::Struct(_, fields, ref opt_with) => {
+ self.walk_struct_expr(fields, opt_with);
+ }
+
+ hir::ExprKind::Tup(exprs) => {
+ self.consume_exprs(exprs);
+ }
+
+ hir::ExprKind::If(ref cond_expr, ref then_expr, ref opt_else_expr) => {
+ self.consume_expr(cond_expr);
+ self.consume_expr(then_expr);
+ if let Some(ref else_expr) = *opt_else_expr {
+ self.consume_expr(else_expr);
+ }
+ }
+
+ hir::ExprKind::Let(hir::Let { pat, init, .. }) => {
+ self.walk_local(init, pat, None, |t| t.borrow_expr(init, ty::ImmBorrow))
+ }
+
+ hir::ExprKind::Match(ref discr, arms, _) => {
+ let discr_place = return_if_err!(self.mc.cat_expr(discr));
+ self.maybe_read_scrutinee(
+ discr,
+ discr_place.clone(),
+ arms.iter().map(|arm| arm.pat),
+ );
+
+ // treatment of the discriminant is handled while walking the arms.
+ for arm in arms {
+ self.walk_arm(&discr_place, arm);
+ }
+ }
+
+ hir::ExprKind::Array(exprs) => {
+ self.consume_exprs(exprs);
+ }
+
+ hir::ExprKind::AddrOf(_, m, ref base) => {
+ // &base
+ // make sure that the thing we are pointing out stays valid
+ // for the lifetime `scope_r` of the resulting ptr:
+ let bk = ty::BorrowKind::from_mutbl(m);
+ self.borrow_expr(base, bk);
+ }
+
+ hir::ExprKind::InlineAsm(asm) => {
+ for (op, _op_sp) in asm.operands {
+ match op {
+ hir::InlineAsmOperand::In { expr, .. } => self.consume_expr(expr),
+ hir::InlineAsmOperand::Out { expr: Some(expr), .. }
+ | hir::InlineAsmOperand::InOut { expr, .. } => {
+ self.mutate_expr(expr);
+ }
+ hir::InlineAsmOperand::SplitInOut { in_expr, out_expr, .. } => {
+ self.consume_expr(in_expr);
+ if let Some(out_expr) = out_expr {
+ self.mutate_expr(out_expr);
+ }
+ }
+ hir::InlineAsmOperand::Out { expr: None, .. }
+ | hir::InlineAsmOperand::Const { .. }
+ | hir::InlineAsmOperand::SymFn { .. }
+ | hir::InlineAsmOperand::SymStatic { .. } => {}
+ }
+ }
+ }
+
+ hir::ExprKind::Continue(..)
+ | hir::ExprKind::Lit(..)
+ | hir::ExprKind::ConstBlock(..)
+ | hir::ExprKind::Err => {}
+
+ hir::ExprKind::Loop(blk, ..) => {
+ self.walk_block(blk);
+ }
+
+ hir::ExprKind::Unary(_, lhs) => {
+ self.consume_expr(lhs);
+ }
+
+ hir::ExprKind::Binary(_, lhs, rhs) => {
+ self.consume_expr(lhs);
+ self.consume_expr(rhs);
+ }
+
+ hir::ExprKind::Block(blk, _) => {
+ self.walk_block(blk);
+ }
+
+ hir::ExprKind::Break(_, ref opt_expr) | hir::ExprKind::Ret(ref opt_expr) => {
+ if let Some(expr) = *opt_expr {
+ self.consume_expr(expr);
+ }
+ }
+
+ hir::ExprKind::Assign(lhs, rhs, _) => {
+ self.mutate_expr(lhs);
+ self.consume_expr(rhs);
+ }
+
+ hir::ExprKind::Cast(base, _) => {
+ self.consume_expr(base);
+ }
+
+ hir::ExprKind::DropTemps(expr) => {
+ self.consume_expr(expr);
+ }
+
+ hir::ExprKind::AssignOp(_, lhs, rhs) => {
+ if self.mc.typeck_results.is_method_call(expr) {
+ self.consume_expr(lhs);
+ } else {
+ self.mutate_expr(lhs);
+ }
+ self.consume_expr(rhs);
+ }
+
+ hir::ExprKind::Repeat(base, _) => {
+ self.consume_expr(base);
+ }
+
+ hir::ExprKind::Closure { .. } => {
+ self.walk_captures(expr);
+ }
+
+ hir::ExprKind::Box(ref base) => {
+ self.consume_expr(base);
+ }
+
+ hir::ExprKind::Yield(value, _) => {
+ self.consume_expr(value);
+ }
+ }
+ }
+
+ fn walk_stmt(&mut self, stmt: &hir::Stmt<'_>) {
+ match stmt.kind {
+ hir::StmtKind::Local(hir::Local { pat, init: Some(expr), els, .. }) => {
+ self.walk_local(expr, pat, *els, |_| {})
+ }
+
+ hir::StmtKind::Local(_) => {}
+
+ hir::StmtKind::Item(_) => {
+ // We don't visit nested items in this visitor,
+ // only the fn body we were given.
+ }
+
+ hir::StmtKind::Expr(ref expr) | hir::StmtKind::Semi(ref expr) => {
+ self.consume_expr(expr);
+ }
+ }
+ }
+
+ fn maybe_read_scrutinee<'t>(
+ &mut self,
+ discr: &Expr<'_>,
+ discr_place: PlaceWithHirId<'tcx>,
+ pats: impl Iterator<Item = &'t hir::Pat<'t>>,
+ ) {
+ // Matching should not always be considered a use of the place, hence
+ // discr does not necessarily need to be borrowed.
+ // We only want to borrow discr if the pattern contain something other
+ // than wildcards.
+ let ExprUseVisitor { ref mc, body_owner: _, delegate: _ } = *self;
+ let mut needs_to_be_read = false;
+ for pat in pats {
+ return_if_err!(mc.cat_pattern(discr_place.clone(), pat, |place, pat| {
+ match &pat.kind {
+ PatKind::Binding(.., opt_sub_pat) => {
+ // If the opt_sub_pat is None, than the binding does not count as
+ // a wildcard for the purpose of borrowing discr.
+ if opt_sub_pat.is_none() {
+ needs_to_be_read = true;
+ }
+ }
+ PatKind::Path(qpath) => {
+ // A `Path` pattern is just a name like `Foo`. This is either a
+ // named constant or else it refers to an ADT variant
+
+ let res = self.mc.typeck_results.qpath_res(qpath, pat.hir_id);
+ match res {
+ Res::Def(DefKind::Const, _) | Res::Def(DefKind::AssocConst, _) => {
+ // Named constants have to be equated with the value
+ // being matched, so that's a read of the value being matched.
+ //
+ // FIXME: We don't actually reads for ZSTs.
+ needs_to_be_read = true;
+ }
+ _ => {
+ // Otherwise, this is a struct/enum variant, and so it's
+ // only a read if we need to read the discriminant.
+ needs_to_be_read |= is_multivariant_adt(place.place.ty());
+ }
+ }
+ }
+ PatKind::TupleStruct(..) | PatKind::Struct(..) | PatKind::Tuple(..) => {
+ // For `Foo(..)`, `Foo { ... }` and `(...)` patterns, check if we are matching
+ // against a multivariant enum or struct. In that case, we have to read
+ // the discriminant. Otherwise this kind of pattern doesn't actually
+ // read anything (we'll get invoked for the `...`, which may indeed
+ // perform some reads).
+
+ let place_ty = place.place.ty();
+ needs_to_be_read |= is_multivariant_adt(place_ty);
+ }
+ PatKind::Lit(_) | PatKind::Range(..) => {
+ // If the PatKind is a Lit or a Range then we want
+ // to borrow discr.
+ needs_to_be_read = true;
+ }
+ PatKind::Or(_)
+ | PatKind::Box(_)
+ | PatKind::Slice(..)
+ | PatKind::Ref(..)
+ | PatKind::Wild => {
+ // If the PatKind is Or, Box, Slice or Ref, the decision is made later
+ // as these patterns contains subpatterns
+ // If the PatKind is Wild, the decision is made based on the other patterns being
+ // examined
+ }
+ }
+ }));
+ }
+
+ if needs_to_be_read {
+ self.borrow_expr(discr, ty::ImmBorrow);
+ } else {
+ let closure_def_id = match discr_place.place.base {
+ PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id),
+ _ => None,
+ };
+
+ self.delegate.fake_read(
+ &discr_place,
+ FakeReadCause::ForMatchedPlace(closure_def_id),
+ discr_place.hir_id,
+ );
+
+ // We always want to walk the discriminant. We want to make sure, for instance,
+ // that the discriminant has been initialized.
+ self.walk_expr(discr);
+ }
+ }
+
+ fn walk_local<F>(
+ &mut self,
+ expr: &hir::Expr<'_>,
+ pat: &hir::Pat<'_>,
+ els: Option<&hir::Block<'_>>,
+ mut f: F,
+ ) where
+ F: FnMut(&mut Self),
+ {
+ self.walk_expr(expr);
+ let expr_place = return_if_err!(self.mc.cat_expr(expr));
+ f(self);
+ if let Some(els) = els {
+ // borrowing because we need to test the descriminant
+ self.maybe_read_scrutinee(expr, expr_place.clone(), from_ref(pat).iter());
+ self.walk_block(els)
+ }
+ self.walk_irrefutable_pat(&expr_place, &pat);
+ }
+
+ /// Indicates that the value of `blk` will be consumed, meaning either copied or moved
+ /// depending on its type.
+ fn walk_block(&mut self, blk: &hir::Block<'_>) {
+ debug!("walk_block(blk.hir_id={})", blk.hir_id);
+
+ for stmt in blk.stmts {
+ self.walk_stmt(stmt);
+ }
+
+ if let Some(ref tail_expr) = blk.expr {
+ self.consume_expr(tail_expr);
+ }
+ }
+
+ fn walk_struct_expr<'hir>(
+ &mut self,
+ fields: &[hir::ExprField<'_>],
+ opt_with: &Option<&'hir hir::Expr<'_>>,
+ ) {
+ // Consume the expressions supplying values for each field.
+ for field in fields {
+ self.consume_expr(field.expr);
+ }
+
+ let with_expr = match *opt_with {
+ Some(w) => &*w,
+ None => {
+ return;
+ }
+ };
+
+ let with_place = return_if_err!(self.mc.cat_expr(with_expr));
+
+ // Select just those fields of the `with`
+ // expression that will actually be used
+ match with_place.place.ty().kind() {
+ ty::Adt(adt, substs) if adt.is_struct() => {
+ // Consume those fields of the with expression that are needed.
+ for (f_index, with_field) in adt.non_enum_variant().fields.iter().enumerate() {
+ let is_mentioned = fields.iter().any(|f| {
+ self.tcx().field_index(f.hir_id, self.mc.typeck_results) == f_index
+ });
+ if !is_mentioned {
+ let field_place = self.mc.cat_projection(
+ &*with_expr,
+ with_place.clone(),
+ with_field.ty(self.tcx(), substs),
+ ProjectionKind::Field(f_index as u32, VariantIdx::new(0)),
+ );
+ self.delegate_consume(&field_place, field_place.hir_id);
+ }
+ }
+ }
+ _ => {
+ // the base expression should always evaluate to a
+ // struct; however, when EUV is run during typeck, it
+ // may not. This will generate an error earlier in typeck,
+ // so we can just ignore it.
+ if !self.tcx().sess.has_errors().is_some() {
+ span_bug!(with_expr.span, "with expression doesn't evaluate to a struct");
+ }
+ }
+ }
+
+ // walk the with expression so that complex expressions
+ // are properly handled.
+ self.walk_expr(with_expr);
+ }
+
+ /// Invoke the appropriate delegate calls for anything that gets
+ /// consumed or borrowed as part of the automatic adjustment
+ /// process.
+ fn walk_adjustment(&mut self, expr: &hir::Expr<'_>) {
+ let adjustments = self.mc.typeck_results.expr_adjustments(expr);
+ let mut place_with_id = return_if_err!(self.mc.cat_expr_unadjusted(expr));
+ for adjustment in adjustments {
+ debug!("walk_adjustment expr={:?} adj={:?}", expr, adjustment);
+ match adjustment.kind {
+ adjustment::Adjust::NeverToAny | adjustment::Adjust::Pointer(_) => {
+ // Creating a closure/fn-pointer or unsizing consumes
+ // the input and stores it into the resulting rvalue.
+ self.delegate_consume(&place_with_id, place_with_id.hir_id);
+ }
+
+ adjustment::Adjust::Deref(None) => {}
+
+ // Autoderefs for overloaded Deref calls in fact reference
+ // their receiver. That is, if we have `(*x)` where `x`
+ // is of type `Rc<T>`, then this in fact is equivalent to
+ // `x.deref()`. Since `deref()` is declared with `&self`,
+ // this is an autoref of `x`.
+ adjustment::Adjust::Deref(Some(ref deref)) => {
+ let bk = ty::BorrowKind::from_mutbl(deref.mutbl);
+ self.delegate.borrow(&place_with_id, place_with_id.hir_id, bk);
+ }
+
+ adjustment::Adjust::Borrow(ref autoref) => {
+ self.walk_autoref(expr, &place_with_id, autoref);
+ }
+ }
+ place_with_id =
+ return_if_err!(self.mc.cat_expr_adjusted(expr, place_with_id, adjustment));
+ }
+ }
+
+ /// Walks the autoref `autoref` applied to the autoderef'd
+ /// `expr`. `base_place` is the mem-categorized form of `expr`
+ /// after all relevant autoderefs have occurred.
+ fn walk_autoref(
+ &mut self,
+ expr: &hir::Expr<'_>,
+ base_place: &PlaceWithHirId<'tcx>,
+ autoref: &adjustment::AutoBorrow<'tcx>,
+ ) {
+ debug!(
+ "walk_autoref(expr.hir_id={} base_place={:?} autoref={:?})",
+ expr.hir_id, base_place, autoref
+ );
+
+ match *autoref {
+ adjustment::AutoBorrow::Ref(_, m) => {
+ self.delegate.borrow(
+ base_place,
+ base_place.hir_id,
+ ty::BorrowKind::from_mutbl(m.into()),
+ );
+ }
+
+ adjustment::AutoBorrow::RawPtr(m) => {
+ debug!("walk_autoref: expr.hir_id={} base_place={:?}", expr.hir_id, base_place);
+
+ self.delegate.borrow(base_place, base_place.hir_id, ty::BorrowKind::from_mutbl(m));
+ }
+ }
+ }
+
+ fn walk_arm(&mut self, discr_place: &PlaceWithHirId<'tcx>, arm: &hir::Arm<'_>) {
+ let closure_def_id = match discr_place.place.base {
+ PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id),
+ _ => None,
+ };
+
+ self.delegate.fake_read(
+ discr_place,
+ FakeReadCause::ForMatchedPlace(closure_def_id),
+ discr_place.hir_id,
+ );
+ self.walk_pat(discr_place, arm.pat, arm.guard.is_some());
+
+ if let Some(hir::Guard::If(e)) = arm.guard {
+ self.consume_expr(e)
+ } else if let Some(hir::Guard::IfLet(ref l)) = arm.guard {
+ self.consume_expr(l.init)
+ }
+
+ self.consume_expr(arm.body);
+ }
+
+ /// Walks a pat that occurs in isolation (i.e., top-level of fn argument or
+ /// let binding, and *not* a match arm or nested pat.)
+ fn walk_irrefutable_pat(&mut self, discr_place: &PlaceWithHirId<'tcx>, pat: &hir::Pat<'_>) {
+ let closure_def_id = match discr_place.place.base {
+ PlaceBase::Upvar(upvar_id) => Some(upvar_id.closure_expr_id),
+ _ => None,
+ };
+
+ self.delegate.fake_read(
+ discr_place,
+ FakeReadCause::ForLet(closure_def_id),
+ discr_place.hir_id,
+ );
+ self.walk_pat(discr_place, pat, false);
+ }
+
+ /// The core driver for walking a pattern
+ fn walk_pat(
+ &mut self,
+ discr_place: &PlaceWithHirId<'tcx>,
+ pat: &hir::Pat<'_>,
+ has_guard: bool,
+ ) {
+ debug!("walk_pat(discr_place={:?}, pat={:?}, has_guard={:?})", discr_place, pat, has_guard);
+
+ let tcx = self.tcx();
+ let ExprUseVisitor { ref mc, body_owner: _, ref mut delegate } = *self;
+ return_if_err!(mc.cat_pattern(discr_place.clone(), pat, |place, pat| {
+ if let PatKind::Binding(_, canonical_id, ..) = pat.kind {
+ debug!("walk_pat: binding place={:?} pat={:?}", place, pat);
+ if let Some(bm) =
+ mc.typeck_results.extract_binding_mode(tcx.sess, pat.hir_id, pat.span)
+ {
+ debug!("walk_pat: pat.hir_id={:?} bm={:?}", pat.hir_id, bm);
+
+ // pat_ty: the type of the binding being produced.
+ let pat_ty = return_if_err!(mc.node_ty(pat.hir_id));
+ debug!("walk_pat: pat_ty={:?}", pat_ty);
+
+ let def = Res::Local(canonical_id);
+ if let Ok(ref binding_place) = mc.cat_res(pat.hir_id, pat.span, pat_ty, def) {
+ delegate.bind(binding_place, binding_place.hir_id);
+ }
+
+ // Subtle: MIR desugaring introduces immutable borrows for each pattern
+ // binding when lowering pattern guards to ensure that the guard does not
+ // modify the scrutinee.
+ if has_guard {
+ delegate.borrow(place, discr_place.hir_id, ImmBorrow);
+ }
+
+ // It is also a borrow or copy/move of the value being matched.
+ // In a cases of pattern like `let pat = upvar`, don't use the span
+ // of the pattern, as this just looks confusing, instead use the span
+ // of the discriminant.
+ match bm {
+ ty::BindByReference(m) => {
+ let bk = ty::BorrowKind::from_mutbl(m);
+ delegate.borrow(place, discr_place.hir_id, bk);
+ }
+ ty::BindByValue(..) => {
+ debug!("walk_pat binding consuming pat");
+ delegate_consume(mc, *delegate, place, discr_place.hir_id);
+ }
+ }
+ }
+ }
+ }));
+ }
+
+ /// Handle the case where the current body contains a closure.
+ ///
+ /// When the current body being handled is a closure, then we must make sure that
+ /// - The parent closure only captures Places from the nested closure that are not local to it.
+ ///
+ /// In the following example the closures `c` only captures `p.x` even though `incr`
+ /// is a capture of the nested closure
+ ///
+ /// ```
+ /// struct P { x: i32 }
+ /// let mut p = P { x: 4 };
+ /// let c = || {
+ /// let incr = 10;
+ /// let nested = || p.x += incr;
+ /// };
+ /// ```
+ ///
+ /// - When reporting the Place back to the Delegate, ensure that the UpvarId uses the enclosing
+ /// closure as the DefId.
+ fn walk_captures(&mut self, closure_expr: &hir::Expr<'_>) {
+ fn upvar_is_local_variable<'tcx>(
+ upvars: Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>>,
+ upvar_id: hir::HirId,
+ body_owner_is_closure: bool,
+ ) -> bool {
+ upvars.map(|upvars| !upvars.contains_key(&upvar_id)).unwrap_or(body_owner_is_closure)
+ }
+
+ debug!("walk_captures({:?})", closure_expr);
+
+ let tcx = self.tcx();
+ let closure_def_id = tcx.hir().local_def_id(closure_expr.hir_id);
+ let upvars = tcx.upvars_mentioned(self.body_owner);
+
+ // For purposes of this function, generator and closures are equivalent.
+ let body_owner_is_closure =
+ matches!(tcx.hir().body_owner_kind(self.body_owner), hir::BodyOwnerKind::Closure,);
+
+ // If we have a nested closure, we want to include the fake reads present in the nested closure.
+ if let Some(fake_reads) = self.mc.typeck_results.closure_fake_reads.get(&closure_def_id) {
+ for (fake_read, cause, hir_id) in fake_reads.iter() {
+ match fake_read.base {
+ PlaceBase::Upvar(upvar_id) => {
+ if upvar_is_local_variable(
+ upvars,
+ upvar_id.var_path.hir_id,
+ body_owner_is_closure,
+ ) {
+ // The nested closure might be fake reading the current (enclosing) closure's local variables.
+ // The only places we want to fake read before creating the parent closure are the ones that
+ // are not local to it/ defined by it.
+ //
+ // ```rust,ignore(cannot-test-this-because-pseudo-code)
+ // let v1 = (0, 1);
+ // let c = || { // fake reads: v1
+ // let v2 = (0, 1);
+ // let e = || { // fake reads: v1, v2
+ // let (_, t1) = v1;
+ // let (_, t2) = v2;
+ // }
+ // }
+ // ```
+ // This check is performed when visiting the body of the outermost closure (`c`) and ensures
+ // that we don't add a fake read of v2 in c.
+ continue;
+ }
+ }
+ _ => {
+ bug!(
+ "Do not know how to get HirId out of Rvalue and StaticItem {:?}",
+ fake_read.base
+ );
+ }
+ };
+ self.delegate.fake_read(
+ &PlaceWithHirId { place: fake_read.clone(), hir_id: *hir_id },
+ *cause,
+ *hir_id,
+ );
+ }
+ }
+
+ if let Some(min_captures) = self.mc.typeck_results.closure_min_captures.get(&closure_def_id)
+ {
+ for (var_hir_id, min_list) in min_captures.iter() {
+ if upvars.map_or(body_owner_is_closure, |upvars| !upvars.contains_key(var_hir_id)) {
+ // The nested closure might be capturing the current (enclosing) closure's local variables.
+ // We check if the root variable is ever mentioned within the enclosing closure, if not
+ // then for the current body (if it's a closure) these aren't captures, we will ignore them.
+ continue;
+ }
+ for captured_place in min_list {
+ let place = &captured_place.place;
+ let capture_info = captured_place.info;
+
+ let place_base = if body_owner_is_closure {
+ // Mark the place to be captured by the enclosing closure
+ PlaceBase::Upvar(ty::UpvarId::new(*var_hir_id, self.body_owner))
+ } else {
+ // If the body owner isn't a closure then the variable must
+ // be a local variable
+ PlaceBase::Local(*var_hir_id)
+ };
+ let place_with_id = PlaceWithHirId::new(
+ capture_info.path_expr_id.unwrap_or(
+ capture_info.capture_kind_expr_id.unwrap_or(closure_expr.hir_id),
+ ),
+ place.base_ty,
+ place_base,
+ place.projections.clone(),
+ );
+
+ match capture_info.capture_kind {
+ ty::UpvarCapture::ByValue => {
+ self.delegate_consume(&place_with_id, place_with_id.hir_id);
+ }
+ ty::UpvarCapture::ByRef(upvar_borrow) => {
+ self.delegate.borrow(
+ &place_with_id,
+ place_with_id.hir_id,
+ upvar_borrow,
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+fn copy_or_move<'a, 'tcx>(
+ mc: &mc::MemCategorizationContext<'a, 'tcx>,
+ place_with_id: &PlaceWithHirId<'tcx>,
+) -> ConsumeMode {
+ if !mc.type_is_copy_modulo_regions(
+ place_with_id.place.ty(),
+ mc.tcx().hir().span(place_with_id.hir_id),
+ ) {
+ ConsumeMode::Move
+ } else {
+ ConsumeMode::Copy
+ }
+}
+
+// - If a place is used in a `ByValue` context then move it if it's not a `Copy` type.
+// - If the place that is a `Copy` type consider it an `ImmBorrow`.
+fn delegate_consume<'a, 'tcx>(
+ mc: &mc::MemCategorizationContext<'a, 'tcx>,
+ delegate: &mut (dyn Delegate<'tcx> + 'a),
+ place_with_id: &PlaceWithHirId<'tcx>,
+ diag_expr_id: hir::HirId,
+) {
+ debug!("delegate_consume(place_with_id={:?})", place_with_id);
+
+ let mode = copy_or_move(mc, place_with_id);
+
+ match mode {
+ ConsumeMode::Move => delegate.consume(place_with_id, diag_expr_id),
+ ConsumeMode::Copy => delegate.copy(place_with_id, diag_expr_id),
+ }
+}
+
+fn is_multivariant_adt(ty: Ty<'_>) -> bool {
+ if let ty::Adt(def, _) = ty.kind() {
+ // Note that if a non-exhaustive SingleVariant is defined in another crate, we need
+ // to assume that more cases will be added to the variant in the future. This mean
+ // that we should handle non-exhaustive SingleVariant the same way we would handle
+ // a MultiVariant.
+ // If the variant is not local it must be defined in another crate.
+ let is_non_exhaustive = match def.adt_kind() {
+ AdtKind::Struct | AdtKind::Union => {
+ def.non_enum_variant().is_field_list_non_exhaustive()
+ }
+ AdtKind::Enum => def.is_variant_list_non_exhaustive(),
+ };
+ def.variants().len() > 1 || (!def.did().is_local() && is_non_exhaustive)
+ } else {
+ false
+ }
+}
diff --git a/compiler/rustc_typeck/src/hir_wf_check.rs b/compiler/rustc_typeck/src/hir_wf_check.rs
new file mode 100644
index 000000000..55c7a15f9
--- /dev/null
+++ b/compiler/rustc_typeck/src/hir_wf_check.rs
@@ -0,0 +1,188 @@
+use crate::collect::ItemCtxt;
+use rustc_hir as hir;
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::{ForeignItem, ForeignItemKind, HirId};
+use rustc_infer::infer::TyCtxtInferExt;
+use rustc_infer::traits::TraitEngine;
+use rustc_infer::traits::{ObligationCause, WellFormedLoc};
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Region, ToPredicate, TyCtxt, TypeFoldable, TypeFolder};
+use rustc_trait_selection::traits::{self, TraitEngineExt};
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { diagnostic_hir_wf_check, ..*providers };
+}
+
+// Ideally, this would be in `rustc_trait_selection`, but we
+// need access to `ItemCtxt`
+fn diagnostic_hir_wf_check<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ (predicate, loc): (ty::Predicate<'tcx>, WellFormedLoc),
+) -> Option<ObligationCause<'tcx>> {
+ let hir = tcx.hir();
+
+ let def_id = match loc {
+ WellFormedLoc::Ty(def_id) => def_id,
+ WellFormedLoc::Param { function, param_idx: _ } => function,
+ };
+ let hir_id = hir.local_def_id_to_hir_id(def_id);
+
+ // HIR wfcheck should only ever happen as part of improving an existing error
+ tcx.sess
+ .delay_span_bug(tcx.def_span(def_id), "Performed HIR wfcheck without an existing error!");
+
+ let icx = ItemCtxt::new(tcx, def_id.to_def_id());
+
+ // To perform HIR-based WF checking, we iterate over all HIR types
+ // that occur 'inside' the item we're checking. For example,
+ // given the type `Option<MyStruct<u8>>`, we will check
+ // `Option<MyStruct<u8>>`, `MyStruct<u8>`, and `u8`.
+ // For each type, we perform a well-formed check, and see if we get
+ // an error that matches our expected predicate. We save
+ // the `ObligationCause` corresponding to the *innermost* type,
+ // which is the most specific type that we can point to.
+ // In general, the different components of an `hir::Ty` may have
+ // completely different spans due to macro invocations. Pointing
+ // to the most accurate part of the type can be the difference
+ // between a useless span (e.g. the macro invocation site)
+ // and a useful span (e.g. a user-provided type passed into the macro).
+ //
+ // This approach is quite inefficient - we redo a lot of work done
+ // by the normal WF checker. However, this code is run at most once
+ // per reported error - it will have no impact when compilation succeeds,
+ // and should only have an impact if a very large number of errors is
+ // displayed to the user.
+ struct HirWfCheck<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ predicate: ty::Predicate<'tcx>,
+ cause: Option<ObligationCause<'tcx>>,
+ cause_depth: usize,
+ icx: ItemCtxt<'tcx>,
+ hir_id: HirId,
+ param_env: ty::ParamEnv<'tcx>,
+ depth: usize,
+ }
+
+ impl<'tcx> Visitor<'tcx> for HirWfCheck<'tcx> {
+ fn visit_ty(&mut self, ty: &'tcx hir::Ty<'tcx>) {
+ self.tcx.infer_ctxt().enter(|infcx| {
+ let mut fulfill = <dyn TraitEngine<'tcx>>::new(self.tcx);
+ let tcx_ty =
+ self.icx.to_ty(ty).fold_with(&mut EraseAllBoundRegions { tcx: self.tcx });
+ let cause = traits::ObligationCause::new(
+ ty.span,
+ self.hir_id,
+ traits::ObligationCauseCode::WellFormed(None),
+ );
+ fulfill.register_predicate_obligation(
+ &infcx,
+ traits::Obligation::new(
+ cause,
+ self.param_env,
+ ty::Binder::dummy(ty::PredicateKind::WellFormed(tcx_ty.into()))
+ .to_predicate(self.tcx),
+ ),
+ );
+
+ let errors = fulfill.select_all_or_error(&infcx);
+ if !errors.is_empty() {
+ debug!("Wf-check got errors for {:?}: {:?}", ty, errors);
+ for error in errors {
+ if error.obligation.predicate == self.predicate {
+ // Save the cause from the greatest depth - this corresponds
+ // to picking more-specific types (e.g. `MyStruct<u8>`)
+ // over less-specific types (e.g. `Option<MyStruct<u8>>`)
+ if self.depth >= self.cause_depth {
+ self.cause = Some(error.obligation.cause);
+ self.cause_depth = self.depth
+ }
+ }
+ }
+ }
+ });
+ self.depth += 1;
+ intravisit::walk_ty(self, ty);
+ self.depth -= 1;
+ }
+ }
+
+ let mut visitor = HirWfCheck {
+ tcx,
+ predicate,
+ cause: None,
+ cause_depth: 0,
+ icx,
+ hir_id,
+ param_env: tcx.param_env(def_id.to_def_id()),
+ depth: 0,
+ };
+
+ // Get the starting `hir::Ty` using our `WellFormedLoc`.
+ // We will walk 'into' this type to try to find
+ // a more precise span for our predicate.
+ let ty = match loc {
+ WellFormedLoc::Ty(_) => match hir.get(hir_id) {
+ hir::Node::ImplItem(item) => match item.kind {
+ hir::ImplItemKind::TyAlias(ty) => Some(ty),
+ hir::ImplItemKind::Const(ty, _) => Some(ty),
+ ref item => bug!("Unexpected ImplItem {:?}", item),
+ },
+ hir::Node::TraitItem(item) => match item.kind {
+ hir::TraitItemKind::Type(_, ty) => ty,
+ hir::TraitItemKind::Const(ty, _) => Some(ty),
+ ref item => bug!("Unexpected TraitItem {:?}", item),
+ },
+ hir::Node::Item(item) => match item.kind {
+ hir::ItemKind::Static(ty, _, _) | hir::ItemKind::Const(ty, _) => Some(ty),
+ hir::ItemKind::Impl(ref impl_) => {
+ assert!(impl_.of_trait.is_none(), "Unexpected trait impl: {:?}", impl_);
+ Some(impl_.self_ty)
+ }
+ ref item => bug!("Unexpected item {:?}", item),
+ },
+ hir::Node::Field(field) => Some(field.ty),
+ hir::Node::ForeignItem(ForeignItem {
+ kind: ForeignItemKind::Static(ty, _), ..
+ }) => Some(*ty),
+ ref node => bug!("Unexpected node {:?}", node),
+ },
+ WellFormedLoc::Param { function: _, param_idx } => {
+ let fn_decl = hir.fn_decl_by_hir_id(hir_id).unwrap();
+ // Get return type
+ if param_idx as usize == fn_decl.inputs.len() {
+ match fn_decl.output {
+ hir::FnRetTy::Return(ty) => Some(ty),
+ // The unit type `()` is always well-formed
+ hir::FnRetTy::DefaultReturn(_span) => None,
+ }
+ } else {
+ Some(&fn_decl.inputs[param_idx as usize])
+ }
+ }
+ };
+ if let Some(ty) = ty {
+ visitor.visit_ty(ty);
+ }
+ visitor.cause
+}
+
+struct EraseAllBoundRegions<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+// Higher ranked regions are complicated.
+// To make matters worse, the HIR WF check can instantiate them
+// outside of a `Binder`, due to the way we (ab)use
+// `ItemCtxt::to_ty`. To make things simpler, we just erase all
+// of them, regardless of depth. At worse, this will give
+// us an inaccurate span for an error message, but cannot
+// lead to unsoundness (we call `delay_span_bug` at the start
+// of `diagnostic_hir_wf_check`).
+impl<'tcx> TypeFolder<'tcx> for EraseAllBoundRegions<'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn fold_region(&mut self, r: Region<'tcx>) -> Region<'tcx> {
+ if r.is_late_bound() { self.tcx.lifetimes.re_erased } else { r }
+ }
+}
diff --git a/compiler/rustc_typeck/src/impl_wf_check.rs b/compiler/rustc_typeck/src/impl_wf_check.rs
new file mode 100644
index 000000000..9fee1eaae
--- /dev/null
+++ b/compiler/rustc_typeck/src/impl_wf_check.rs
@@ -0,0 +1,228 @@
+//! This pass enforces various "well-formedness constraints" on impls.
+//! Logically, it is part of wfcheck -- but we do it early so that we
+//! can stop compilation afterwards, since part of the trait matching
+//! infrastructure gets very grumpy if these conditions don't hold. In
+//! particular, if there are type parameters that are not part of the
+//! impl, then coherence will report strange inference ambiguity
+//! errors; if impls have duplicate items, we get misleading
+//! specialization errors. These things can (and probably should) be
+//! fixed, but for the moment it's easier to do these checks early.
+
+use crate::constrained_generic_params as cgp;
+use min_specialization::check_min_specialization;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_errors::struct_span_err;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::LocalDefId;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
+use rustc_span::{Span, Symbol};
+
+use std::collections::hash_map::Entry::{Occupied, Vacant};
+
+mod min_specialization;
+
+/// Checks that all the type/lifetime parameters on an impl also
+/// appear in the trait ref or self type (or are constrained by a
+/// where-clause). These rules are needed to ensure that, given a
+/// trait ref like `<T as Trait<U>>`, we can derive the values of all
+/// parameters on the impl (which is needed to make specialization
+/// possible).
+///
+/// However, in the case of lifetimes, we only enforce these rules if
+/// the lifetime parameter is used in an associated type. This is a
+/// concession to backwards compatibility; see comment at the end of
+/// the fn for details.
+///
+/// Example:
+///
+/// ```rust,ignore (pseudo-Rust)
+/// impl<T> Trait<Foo> for Bar { ... }
+/// // ^ T does not appear in `Foo` or `Bar`, error!
+///
+/// impl<T> Trait<Foo<T>> for Bar { ... }
+/// // ^ T appears in `Foo<T>`, ok.
+///
+/// impl<T> Trait<Foo> for Bar where Bar: Iterator<Item = T> { ... }
+/// // ^ T is bound to `<Bar as Iterator>::Item`, ok.
+///
+/// impl<'a> Trait<Foo> for Bar { }
+/// // ^ 'a is unused, but for back-compat we allow it
+///
+/// impl<'a> Trait<Foo> for Bar { type X = &'a i32; }
+/// // ^ 'a is unused and appears in assoc type, error
+/// ```
+fn check_mod_impl_wf(tcx: TyCtxt<'_>, module_def_id: LocalDefId) {
+ let min_specialization = tcx.features().min_specialization;
+ let module = tcx.hir_module_items(module_def_id);
+ for id in module.items() {
+ if matches!(tcx.def_kind(id.def_id), DefKind::Impl) {
+ enforce_impl_params_are_constrained(tcx, id.def_id);
+ enforce_impl_items_are_distinct(tcx, id.def_id);
+ if min_specialization {
+ check_min_specialization(tcx, id.def_id);
+ }
+ }
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { check_mod_impl_wf, ..*providers };
+}
+
+fn enforce_impl_params_are_constrained(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) {
+ // Every lifetime used in an associated type must be constrained.
+ let impl_self_ty = tcx.type_of(impl_def_id);
+ if impl_self_ty.references_error() {
+ // Don't complain about unconstrained type params when self ty isn't known due to errors.
+ // (#36836)
+ tcx.sess.delay_span_bug(
+ tcx.def_span(impl_def_id),
+ &format!(
+ "potentially unconstrained type parameters weren't evaluated: {:?}",
+ impl_self_ty,
+ ),
+ );
+ return;
+ }
+ let impl_generics = tcx.generics_of(impl_def_id);
+ let impl_predicates = tcx.predicates_of(impl_def_id);
+ let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
+
+ let mut input_parameters = cgp::parameters_for_impl(impl_self_ty, impl_trait_ref);
+ cgp::identify_constrained_generic_params(
+ tcx,
+ impl_predicates,
+ impl_trait_ref,
+ &mut input_parameters,
+ );
+
+ // Disallow unconstrained lifetimes, but only if they appear in assoc types.
+ let lifetimes_in_associated_types: FxHashSet<_> = tcx
+ .associated_item_def_ids(impl_def_id)
+ .iter()
+ .flat_map(|def_id| {
+ let item = tcx.associated_item(def_id);
+ match item.kind {
+ ty::AssocKind::Type => {
+ if item.defaultness(tcx).has_value() {
+ cgp::parameters_for(&tcx.type_of(def_id), true)
+ } else {
+ Vec::new()
+ }
+ }
+ ty::AssocKind::Fn | ty::AssocKind::Const => Vec::new(),
+ }
+ })
+ .collect();
+
+ for param in &impl_generics.params {
+ match param.kind {
+ // Disallow ANY unconstrained type parameters.
+ ty::GenericParamDefKind::Type { .. } => {
+ let param_ty = ty::ParamTy::for_def(param);
+ if !input_parameters.contains(&cgp::Parameter::from(param_ty)) {
+ report_unused_parameter(tcx, tcx.def_span(param.def_id), "type", param_ty.name);
+ }
+ }
+ ty::GenericParamDefKind::Lifetime => {
+ let param_lt = cgp::Parameter::from(param.to_early_bound_region_data());
+ if lifetimes_in_associated_types.contains(&param_lt) && // (*)
+ !input_parameters.contains(&param_lt)
+ {
+ report_unused_parameter(
+ tcx,
+ tcx.def_span(param.def_id),
+ "lifetime",
+ param.name,
+ );
+ }
+ }
+ ty::GenericParamDefKind::Const { .. } => {
+ let param_ct = ty::ParamConst::for_def(param);
+ if !input_parameters.contains(&cgp::Parameter::from(param_ct)) {
+ report_unused_parameter(
+ tcx,
+ tcx.def_span(param.def_id),
+ "const",
+ param_ct.name,
+ );
+ }
+ }
+ }
+ }
+
+ // (*) This is a horrible concession to reality. I think it'd be
+ // better to just ban unconstrained lifetimes outright, but in
+ // practice people do non-hygienic macros like:
+ //
+ // ```
+ // macro_rules! __impl_slice_eq1 {
+ // ($Lhs: ty, $Rhs: ty, $Bound: ident) => {
+ // impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq<B> {
+ // ....
+ // }
+ // }
+ // }
+ // ```
+ //
+ // In a concession to backwards compatibility, we continue to
+ // permit those, so long as the lifetimes aren't used in
+ // associated types. I believe this is sound, because lifetimes
+ // used elsewhere are not projected back out.
+}
+
+fn report_unused_parameter(tcx: TyCtxt<'_>, span: Span, kind: &str, name: Symbol) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0207,
+ "the {} parameter `{}` is not constrained by the \
+ impl trait, self type, or predicates",
+ kind,
+ name
+ );
+ err.span_label(span, format!("unconstrained {} parameter", kind));
+ if kind == "const" {
+ err.note(
+ "expressions using a const parameter must map each value to a distinct output value",
+ );
+ err.note(
+ "proving the result of expressions other than the parameter are unique is not supported",
+ );
+ }
+ err.emit();
+}
+
+/// Enforce that we do not have two items in an impl with the same name.
+fn enforce_impl_items_are_distinct(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) {
+ let mut seen_type_items = FxHashMap::default();
+ let mut seen_value_items = FxHashMap::default();
+ for &impl_item_ref in tcx.associated_item_def_ids(impl_def_id) {
+ let impl_item = tcx.associated_item(impl_item_ref);
+ let seen_items = match impl_item.kind {
+ ty::AssocKind::Type => &mut seen_type_items,
+ _ => &mut seen_value_items,
+ };
+ let span = tcx.def_span(impl_item_ref);
+ let ident = impl_item.ident(tcx);
+ match seen_items.entry(ident.normalize_to_macros_2_0()) {
+ Occupied(entry) => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0201,
+ "duplicate definitions with name `{}`:",
+ ident
+ );
+ err.span_label(*entry.get(), format!("previous definition of `{}` here", ident));
+ err.span_label(span, "duplicate definition");
+ err.emit();
+ }
+ Vacant(entry) => {
+ entry.insert(span);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs b/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs
new file mode 100644
index 000000000..74abb71a1
--- /dev/null
+++ b/compiler/rustc_typeck/src/impl_wf_check/min_specialization.rs
@@ -0,0 +1,439 @@
+//! # Minimal Specialization
+//!
+//! This module contains the checks for sound specialization used when the
+//! `min_specialization` feature is enabled. This requires that the impl is
+//! *always applicable*.
+//!
+//! If `impl1` specializes `impl2` then `impl1` is always applicable if we know
+//! that all the bounds of `impl2` are satisfied, and all of the bounds of
+//! `impl1` are satisfied for some choice of lifetimes then we know that
+//! `impl1` applies for any choice of lifetimes.
+//!
+//! ## Basic approach
+//!
+//! To enforce this requirement on specializations we take the following
+//! approach:
+//!
+//! 1. Match up the substs for `impl2` so that the implemented trait and
+//! self-type match those for `impl1`.
+//! 2. Check for any direct use of `'static` in the substs of `impl2`.
+//! 3. Check that all of the generic parameters of `impl1` occur at most once
+//! in the *unconstrained* substs for `impl2`. A parameter is constrained if
+//! its value is completely determined by an associated type projection
+//! predicate.
+//! 4. Check that all predicates on `impl1` either exist on `impl2` (after
+//! matching substs), or are well-formed predicates for the trait's type
+//! arguments.
+//!
+//! ## Example
+//!
+//! Suppose we have the following always applicable impl:
+//!
+//! ```ignore (illustrative)
+//! impl<T> SpecExtend<T> for std::vec::IntoIter<T> { /* specialized impl */ }
+//! impl<T, I: Iterator<Item=T>> SpecExtend<T> for I { /* default impl */ }
+//! ```
+//!
+//! We get that the subst for `impl2` are `[T, std::vec::IntoIter<T>]`. `T` is
+//! constrained to be `<I as Iterator>::Item`, so we check only
+//! `std::vec::IntoIter<T>` for repeated parameters, which it doesn't have. The
+//! predicates of `impl1` are only `T: Sized`, which is also a predicate of
+//! `impl2`. So this specialization is sound.
+//!
+//! ## Extensions
+//!
+//! Unfortunately not all specializations in the standard library are allowed
+//! by this. So there are two extensions to these rules that allow specializing
+//! on some traits: that is, using them as bounds on the specializing impl,
+//! even when they don't occur in the base impl.
+//!
+//! ### rustc_specialization_trait
+//!
+//! If a trait is always applicable, then it's sound to specialize on it. We
+//! check trait is always applicable in the same way as impls, except that step
+//! 4 is now "all predicates on `impl1` are always applicable". We require that
+//! `specialization` or `min_specialization` is enabled to implement these
+//! traits.
+//!
+//! ### rustc_unsafe_specialization_marker
+//!
+//! There are also some specialization on traits with no methods, including the
+//! stable `FusedIterator` trait. We allow marking marker traits with an
+//! unstable attribute that means we ignore them in point 3 of the checks
+//! above. This is unsound, in the sense that the specialized impl may be used
+//! when it doesn't apply, but we allow it in the short term since it can't
+//! cause use after frees with purely safe code in the same way as specializing
+//! on traits with methods can.
+
+use crate::check::regionck::OutlivesEnvironmentExt;
+use crate::check::wfcheck::impl_implied_bounds;
+use crate::constrained_generic_params as cgp;
+use crate::errors::SubstsOnOverriddenImpl;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_infer::infer::outlives::env::OutlivesEnvironment;
+use rustc_infer::infer::{InferCtxt, TyCtxtInferExt};
+use rustc_infer::traits::specialization_graph::Node;
+use rustc_middle::ty::subst::{GenericArg, InternalSubsts, SubstsRef};
+use rustc_middle::ty::trait_def::TraitSpecializationKind;
+use rustc_middle::ty::{self, TyCtxt, TypeVisitable};
+use rustc_span::Span;
+use rustc_trait_selection::traits::{self, translate_substs, wf};
+
+pub(super) fn check_min_specialization(tcx: TyCtxt<'_>, impl_def_id: LocalDefId) {
+ if let Some(node) = parent_specialization_node(tcx, impl_def_id) {
+ tcx.infer_ctxt().enter(|infcx| {
+ check_always_applicable(&infcx, impl_def_id, node);
+ });
+ }
+}
+
+fn parent_specialization_node(tcx: TyCtxt<'_>, impl1_def_id: LocalDefId) -> Option<Node> {
+ let trait_ref = tcx.impl_trait_ref(impl1_def_id)?;
+ let trait_def = tcx.trait_def(trait_ref.def_id);
+
+ let impl2_node = trait_def.ancestors(tcx, impl1_def_id.to_def_id()).ok()?.nth(1)?;
+
+ let always_applicable_trait =
+ matches!(trait_def.specialization_kind, TraitSpecializationKind::AlwaysApplicable);
+ if impl2_node.is_from_trait() && !always_applicable_trait {
+ // Implementing a normal trait isn't a specialization.
+ return None;
+ }
+ Some(impl2_node)
+}
+
+/// Check that `impl1` is a sound specialization
+fn check_always_applicable(infcx: &InferCtxt<'_, '_>, impl1_def_id: LocalDefId, impl2_node: Node) {
+ if let Some((impl1_substs, impl2_substs)) = get_impl_substs(infcx, impl1_def_id, impl2_node) {
+ let impl2_def_id = impl2_node.def_id();
+ debug!(
+ "check_always_applicable(\nimpl1_def_id={:?},\nimpl2_def_id={:?},\nimpl2_substs={:?}\n)",
+ impl1_def_id, impl2_def_id, impl2_substs
+ );
+
+ let tcx = infcx.tcx;
+
+ let parent_substs = if impl2_node.is_from_trait() {
+ impl2_substs.to_vec()
+ } else {
+ unconstrained_parent_impl_substs(tcx, impl2_def_id, impl2_substs)
+ };
+
+ let span = tcx.def_span(impl1_def_id);
+ check_static_lifetimes(tcx, &parent_substs, span);
+ check_duplicate_params(tcx, impl1_substs, &parent_substs, span);
+ check_predicates(infcx, impl1_def_id, impl1_substs, impl2_node, impl2_substs, span);
+ }
+}
+
+/// Given a specializing impl `impl1`, and the base impl `impl2`, returns two
+/// substitutions `(S1, S2)` that equate their trait references. The returned
+/// types are expressed in terms of the generics of `impl1`.
+///
+/// Example
+///
+/// impl<A, B> Foo<A> for B { /* impl2 */ }
+/// impl<C> Foo<Vec<C>> for C { /* impl1 */ }
+///
+/// Would return `S1 = [C]` and `S2 = [Vec<C>, C]`.
+fn get_impl_substs<'tcx>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ impl1_def_id: LocalDefId,
+ impl2_node: Node,
+) -> Option<(SubstsRef<'tcx>, SubstsRef<'tcx>)> {
+ let tcx = infcx.tcx;
+ let param_env = tcx.param_env(impl1_def_id);
+
+ let impl1_substs = InternalSubsts::identity_for_item(tcx, impl1_def_id.to_def_id());
+ let impl2_substs =
+ translate_substs(infcx, param_env, impl1_def_id.to_def_id(), impl1_substs, impl2_node);
+
+ let mut outlives_env = OutlivesEnvironment::new(param_env);
+ let implied_bounds =
+ impl_implied_bounds(infcx.tcx, param_env, impl1_def_id, tcx.def_span(impl1_def_id));
+ outlives_env.add_implied_bounds(
+ infcx,
+ implied_bounds,
+ tcx.hir().local_def_id_to_hir_id(impl1_def_id),
+ );
+ infcx.check_region_obligations_and_report_errors(impl1_def_id, &outlives_env);
+ let Ok(impl2_substs) = infcx.fully_resolve(impl2_substs) else {
+ let span = tcx.def_span(impl1_def_id);
+ tcx.sess.emit_err(SubstsOnOverriddenImpl { span });
+ return None;
+ };
+ Some((impl1_substs, impl2_substs))
+}
+
+/// Returns a list of all of the unconstrained subst of the given impl.
+///
+/// For example given the impl:
+///
+/// impl<'a, T, I> ... where &'a I: IntoIterator<Item=&'a T>
+///
+/// This would return the substs corresponding to `['a, I]`, because knowing
+/// `'a` and `I` determines the value of `T`.
+fn unconstrained_parent_impl_substs<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl_def_id: DefId,
+ impl_substs: SubstsRef<'tcx>,
+) -> Vec<GenericArg<'tcx>> {
+ let impl_generic_predicates = tcx.predicates_of(impl_def_id);
+ let mut unconstrained_parameters = FxHashSet::default();
+ let mut constrained_params = FxHashSet::default();
+ let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
+
+ // Unfortunately the functions in `constrained_generic_parameters` don't do
+ // what we want here. We want only a list of constrained parameters while
+ // the functions in `cgp` add the constrained parameters to a list of
+ // unconstrained parameters.
+ for (predicate, _) in impl_generic_predicates.predicates.iter() {
+ if let ty::PredicateKind::Projection(proj) = predicate.kind().skip_binder() {
+ let projection_ty = proj.projection_ty;
+ let projected_ty = proj.term;
+
+ let unbound_trait_ref = projection_ty.trait_ref(tcx);
+ if Some(unbound_trait_ref) == impl_trait_ref {
+ continue;
+ }
+
+ unconstrained_parameters.extend(cgp::parameters_for(&projection_ty, true));
+
+ for param in cgp::parameters_for(&projected_ty, false) {
+ if !unconstrained_parameters.contains(&param) {
+ constrained_params.insert(param.0);
+ }
+ }
+
+ unconstrained_parameters.extend(cgp::parameters_for(&projected_ty, true));
+ }
+ }
+
+ impl_substs
+ .iter()
+ .enumerate()
+ .filter(|&(idx, _)| !constrained_params.contains(&(idx as u32)))
+ .map(|(_, arg)| arg)
+ .collect()
+}
+
+/// Check that parameters of the derived impl don't occur more than once in the
+/// equated substs of the base impl.
+///
+/// For example forbid the following:
+///
+/// impl<A> Tr for A { }
+/// impl<B> Tr for (B, B) { }
+///
+/// Note that only consider the unconstrained parameters of the base impl:
+///
+/// impl<S, I: IntoIterator<Item = S>> Tr<S> for I { }
+/// impl<T> Tr<T> for Vec<T> { }
+///
+/// The substs for the parent impl here are `[T, Vec<T>]`, which repeats `T`,
+/// but `S` is constrained in the parent impl, so `parent_substs` is only
+/// `[Vec<T>]`. This means we allow this impl.
+fn check_duplicate_params<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ impl1_substs: SubstsRef<'tcx>,
+ parent_substs: &Vec<GenericArg<'tcx>>,
+ span: Span,
+) {
+ let mut base_params = cgp::parameters_for(parent_substs, true);
+ base_params.sort_by_key(|param| param.0);
+ if let (_, [duplicate, ..]) = base_params.partition_dedup() {
+ let param = impl1_substs[duplicate.0 as usize];
+ tcx.sess
+ .struct_span_err(span, &format!("specializing impl repeats parameter `{}`", param))
+ .emit();
+ }
+}
+
+/// Check that `'static` lifetimes are not introduced by the specializing impl.
+///
+/// For example forbid the following:
+///
+/// impl<A> Tr for A { }
+/// impl Tr for &'static i32 { }
+fn check_static_lifetimes<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ parent_substs: &Vec<GenericArg<'tcx>>,
+ span: Span,
+) {
+ if tcx.any_free_region_meets(parent_substs, |r| r.is_static()) {
+ tcx.sess.struct_span_err(span, "cannot specialize on `'static` lifetime").emit();
+ }
+}
+
+/// Check whether predicates on the specializing impl (`impl1`) are allowed.
+///
+/// Each predicate `P` must be:
+///
+/// * global (not reference any parameters)
+/// * `T: Tr` predicate where `Tr` is an always-applicable trait
+/// * on the base `impl impl2`
+/// * Currently this check is done using syntactic equality, which is
+/// conservative but generally sufficient.
+/// * a well-formed predicate of a type argument of the trait being implemented,
+/// including the `Self`-type.
+fn check_predicates<'tcx>(
+ infcx: &InferCtxt<'_, 'tcx>,
+ impl1_def_id: LocalDefId,
+ impl1_substs: SubstsRef<'tcx>,
+ impl2_node: Node,
+ impl2_substs: SubstsRef<'tcx>,
+ span: Span,
+) {
+ let tcx = infcx.tcx;
+ let instantiated = tcx.predicates_of(impl1_def_id).instantiate(tcx, impl1_substs);
+ let impl1_predicates: Vec<_> = traits::elaborate_predicates_with_span(
+ tcx,
+ std::iter::zip(
+ instantiated.predicates,
+ // Don't drop predicates (unsound!) because `spans` is too short
+ instantiated.spans.into_iter().chain(std::iter::repeat(span)),
+ ),
+ )
+ .map(|obligation| (obligation.predicate, obligation.cause.span))
+ .collect();
+
+ let mut impl2_predicates = if impl2_node.is_from_trait() {
+ // Always applicable traits have to be always applicable without any
+ // assumptions.
+ Vec::new()
+ } else {
+ traits::elaborate_predicates(
+ tcx,
+ tcx.predicates_of(impl2_node.def_id())
+ .instantiate(tcx, impl2_substs)
+ .predicates
+ .into_iter(),
+ )
+ .map(|obligation| obligation.predicate)
+ .collect()
+ };
+ debug!(
+ "check_always_applicable(\nimpl1_predicates={:?},\nimpl2_predicates={:?}\n)",
+ impl1_predicates, impl2_predicates,
+ );
+
+ // Since impls of always applicable traits don't get to assume anything, we
+ // can also assume their supertraits apply.
+ //
+ // For example, we allow:
+ //
+ // #[rustc_specialization_trait]
+ // trait AlwaysApplicable: Debug { }
+ //
+ // impl<T> Tr for T { }
+ // impl<T: AlwaysApplicable> Tr for T { }
+ //
+ // Specializing on `AlwaysApplicable` allows also specializing on `Debug`
+ // which is sound because we forbid impls like the following
+ //
+ // impl<D: Debug> AlwaysApplicable for D { }
+ let always_applicable_traits = impl1_predicates.iter().copied().filter(|&(predicate, _)| {
+ matches!(
+ trait_predicate_kind(tcx, predicate),
+ Some(TraitSpecializationKind::AlwaysApplicable)
+ )
+ });
+
+ // Include the well-formed predicates of the type parameters of the impl.
+ for arg in tcx.impl_trait_ref(impl1_def_id).unwrap().substs {
+ if let Some(obligations) = wf::obligations(
+ infcx,
+ tcx.param_env(impl1_def_id),
+ tcx.hir().local_def_id_to_hir_id(impl1_def_id),
+ 0,
+ arg,
+ span,
+ ) {
+ impl2_predicates.extend(
+ traits::elaborate_obligations(tcx, obligations)
+ .map(|obligation| obligation.predicate),
+ )
+ }
+ }
+ impl2_predicates.extend(
+ traits::elaborate_predicates_with_span(tcx, always_applicable_traits)
+ .map(|obligation| obligation.predicate),
+ );
+
+ for (predicate, span) in impl1_predicates {
+ if !impl2_predicates.contains(&predicate) {
+ check_specialization_on(tcx, predicate, span)
+ }
+ }
+}
+
+fn check_specialization_on<'tcx>(tcx: TyCtxt<'tcx>, predicate: ty::Predicate<'tcx>, span: Span) {
+ debug!("can_specialize_on(predicate = {:?})", predicate);
+ match predicate.kind().skip_binder() {
+ // Global predicates are either always true or always false, so we
+ // are fine to specialize on.
+ _ if predicate.is_global() => (),
+ // We allow specializing on explicitly marked traits with no associated
+ // items.
+ ty::PredicateKind::Trait(ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: _,
+ }) => {
+ if !matches!(
+ trait_predicate_kind(tcx, predicate),
+ Some(TraitSpecializationKind::Marker)
+ ) {
+ tcx.sess
+ .struct_span_err(
+ span,
+ &format!(
+ "cannot specialize on trait `{}`",
+ tcx.def_path_str(trait_ref.def_id),
+ ),
+ )
+ .emit();
+ }
+ }
+ ty::PredicateKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => {
+ tcx.sess
+ .struct_span_err(
+ span,
+ &format!("cannot specialize on associated type `{projection_ty} == {term}`",),
+ )
+ .emit();
+ }
+ _ => {
+ tcx.sess
+ .struct_span_err(span, &format!("cannot specialize on predicate `{}`", predicate))
+ .emit();
+ }
+ }
+}
+
+fn trait_predicate_kind<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ predicate: ty::Predicate<'tcx>,
+) -> Option<TraitSpecializationKind> {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::Trait(ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: _,
+ }) => Some(tcx.trait_def(trait_ref.def_id).specialization_kind),
+ ty::PredicateKind::Trait(_)
+ | ty::PredicateKind::RegionOutlives(_)
+ | ty::PredicateKind::TypeOutlives(_)
+ | ty::PredicateKind::Projection(_)
+ | ty::PredicateKind::WellFormed(_)
+ | ty::PredicateKind::Subtype(_)
+ | ty::PredicateKind::Coerce(_)
+ | ty::PredicateKind::ObjectSafe(_)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+}
diff --git a/compiler/rustc_typeck/src/lib.rs b/compiler/rustc_typeck/src/lib.rs
new file mode 100644
index 000000000..f98ae46c5
--- /dev/null
+++ b/compiler/rustc_typeck/src/lib.rs
@@ -0,0 +1,579 @@
+/*!
+
+# typeck
+
+The type checker is responsible for:
+
+1. Determining the type of each expression.
+2. Resolving methods and traits.
+3. Guaranteeing that most type rules are met. ("Most?", you say, "why most?"
+ Well, dear reader, read on.)
+
+The main entry point is [`check_crate()`]. Type checking operates in
+several major phases:
+
+1. The collect phase first passes over all items and determines their
+ type, without examining their "innards".
+
+2. Variance inference then runs to compute the variance of each parameter.
+
+3. Coherence checks for overlapping or orphaned impls.
+
+4. Finally, the check phase then checks function bodies and so forth.
+ Within the check phase, we check each function body one at a time
+ (bodies of function expressions are checked as part of the
+ containing function). Inference is used to supply types wherever
+ they are unknown. The actual checking of a function itself has
+ several phases (check, regionck, writeback), as discussed in the
+ documentation for the [`check`] module.
+
+The type checker is defined into various submodules which are documented
+independently:
+
+- astconv: converts the AST representation of types
+ into the `ty` representation.
+
+- collect: computes the types of each top-level item and enters them into
+ the `tcx.types` table for later use.
+
+- coherence: enforces coherence rules, builds some tables.
+
+- variance: variance inference
+
+- outlives: outlives inference
+
+- check: walks over function bodies and type checks them, inferring types for
+ local variables, type parameters, etc as necessary.
+
+- infer: finds the types to use for each type variable such that
+ all subtyping and assignment constraints are met. In essence, the check
+ module specifies the constraints, and the infer module solves them.
+
+## Note
+
+This API is completely unstable and subject to change.
+
+*/
+
+#![allow(rustc::potential_query_instability)]
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(box_patterns)]
+#![feature(control_flow_enum)]
+#![feature(drain_filter)]
+#![feature(hash_drain_filter)]
+#![feature(if_let_guard)]
+#![feature(is_sorted)]
+#![feature(iter_intersperse)]
+#![feature(label_break_value)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(once_cell)]
+#![feature(slice_partition_dedup)]
+#![feature(try_blocks)]
+#![feature(is_some_with)]
+#![recursion_limit = "256"]
+
+#[macro_use]
+extern crate tracing;
+
+#[macro_use]
+extern crate rustc_middle;
+
+// These are used by Clippy.
+pub mod check;
+pub mod expr_use_visitor;
+
+mod astconv;
+mod bounds;
+mod check_unused;
+mod coherence;
+mod collect;
+mod constrained_generic_params;
+mod errors;
+pub mod hir_wf_check;
+mod impl_wf_check;
+mod mem_categorization;
+mod outlives;
+mod structured_errors;
+mod variance;
+
+use rustc_errors::{struct_span_err, ErrorGuaranteed};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::{Node, CRATE_HIR_ID};
+use rustc_infer::infer::{InferOk, TyCtxtInferExt};
+use rustc_infer::traits::TraitEngineExt as _;
+use rustc_middle::middle;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+use rustc_middle::util;
+use rustc_session::config::EntryFnType;
+use rustc_span::{symbol::sym, Span, DUMMY_SP};
+use rustc_target::spec::abi::Abi;
+use rustc_trait_selection::infer::InferCtxtExt;
+use rustc_trait_selection::traits::error_reporting::InferCtxtExt as _;
+use rustc_trait_selection::traits::{
+ self, ObligationCause, ObligationCauseCode, TraitEngine, TraitEngineExt as _,
+};
+
+use std::iter;
+
+use astconv::AstConv;
+use bounds::Bounds;
+
+fn require_c_abi_if_c_variadic(tcx: TyCtxt<'_>, decl: &hir::FnDecl<'_>, abi: Abi, span: Span) {
+ match (decl.c_variadic, abi) {
+ // The function has the correct calling convention, or isn't a "C-variadic" function.
+ (false, _) | (true, Abi::C { .. }) | (true, Abi::Cdecl { .. }) => {}
+ // The function is a "C-variadic" function with an incorrect calling convention.
+ (true, _) => {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0045,
+ "C-variadic function must have C or cdecl calling convention"
+ );
+ err.span_label(span, "C-variadics require C or cdecl calling convention").emit();
+ }
+ }
+}
+
+fn require_same_types<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ expected: Ty<'tcx>,
+ actual: Ty<'tcx>,
+) -> bool {
+ tcx.infer_ctxt().enter(|ref infcx| {
+ let param_env = ty::ParamEnv::empty();
+ let mut fulfill_cx = <dyn TraitEngine<'_>>::new(infcx.tcx);
+ match infcx.at(cause, param_env).eq(expected, actual) {
+ Ok(InferOk { obligations, .. }) => {
+ fulfill_cx.register_predicate_obligations(infcx, obligations);
+ }
+ Err(err) => {
+ infcx.report_mismatched_types(cause, expected, actual, err).emit();
+ return false;
+ }
+ }
+
+ match fulfill_cx.select_all_or_error(infcx).as_slice() {
+ [] => true,
+ errors => {
+ infcx.report_fulfillment_errors(errors, None, false);
+ false
+ }
+ }
+ })
+}
+
+fn check_main_fn_ty(tcx: TyCtxt<'_>, main_def_id: DefId) {
+ let main_fnsig = tcx.fn_sig(main_def_id);
+ let main_span = tcx.def_span(main_def_id);
+
+ fn main_fn_diagnostics_hir_id(tcx: TyCtxt<'_>, def_id: DefId, sp: Span) -> hir::HirId {
+ if let Some(local_def_id) = def_id.as_local() {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(local_def_id);
+ let hir_type = tcx.type_of(local_def_id);
+ if !matches!(hir_type.kind(), ty::FnDef(..)) {
+ span_bug!(sp, "main has a non-function type: found `{}`", hir_type);
+ }
+ hir_id
+ } else {
+ CRATE_HIR_ID
+ }
+ }
+
+ fn main_fn_generics_params_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, ref generics, _), .. })) => {
+ if !generics.params.is_empty() {
+ Some(generics.span)
+ } else {
+ None
+ }
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ fn main_fn_where_clauses_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(_, ref generics, _), .. })) => {
+ Some(generics.where_clause_span)
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ fn main_fn_asyncness_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ Some(tcx.def_span(def_id))
+ }
+
+ fn main_fn_return_type_span(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Span> {
+ if !def_id.is_local() {
+ return None;
+ }
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ match tcx.hir().find(hir_id) {
+ Some(Node::Item(hir::Item { kind: hir::ItemKind::Fn(ref fn_sig, _, _), .. })) => {
+ Some(fn_sig.decl.output.span())
+ }
+ _ => {
+ span_bug!(tcx.def_span(def_id), "main has a non-function type");
+ }
+ }
+ }
+
+ let mut error = false;
+ let main_diagnostics_hir_id = main_fn_diagnostics_hir_id(tcx, main_def_id, main_span);
+ let main_fn_generics = tcx.generics_of(main_def_id);
+ let main_fn_predicates = tcx.predicates_of(main_def_id);
+ if main_fn_generics.count() != 0 || !main_fnsig.bound_vars().is_empty() {
+ let generics_param_span = main_fn_generics_params_span(tcx, main_def_id);
+ let msg = "`main` function is not allowed to have generic \
+ parameters";
+ let mut diag =
+ struct_span_err!(tcx.sess, generics_param_span.unwrap_or(main_span), E0131, "{}", msg);
+ if let Some(generics_param_span) = generics_param_span {
+ let label = "`main` cannot have generic parameters";
+ diag.span_label(generics_param_span, label);
+ }
+ diag.emit();
+ error = true;
+ } else if !main_fn_predicates.predicates.is_empty() {
+ // generics may bring in implicit predicates, so we skip this check if generics is present.
+ let generics_where_clauses_span = main_fn_where_clauses_span(tcx, main_def_id);
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ generics_where_clauses_span.unwrap_or(main_span),
+ E0646,
+ "`main` function is not allowed to have a `where` clause"
+ );
+ if let Some(generics_where_clauses_span) = generics_where_clauses_span {
+ diag.span_label(generics_where_clauses_span, "`main` cannot have a `where` clause");
+ }
+ diag.emit();
+ error = true;
+ }
+
+ let main_asyncness = tcx.asyncness(main_def_id);
+ if let hir::IsAsync::Async = main_asyncness {
+ let mut diag = struct_span_err!(
+ tcx.sess,
+ main_span,
+ E0752,
+ "`main` function is not allowed to be `async`"
+ );
+ let asyncness_span = main_fn_asyncness_span(tcx, main_def_id);
+ if let Some(asyncness_span) = asyncness_span {
+ diag.span_label(asyncness_span, "`main` function is not allowed to be `async`");
+ }
+ diag.emit();
+ error = true;
+ }
+
+ for attr in tcx.get_attrs(main_def_id, sym::track_caller) {
+ tcx.sess
+ .struct_span_err(attr.span, "`main` function is not allowed to be `#[track_caller]`")
+ .span_label(main_span, "`main` function is not allowed to be `#[track_caller]`")
+ .emit();
+ error = true;
+ }
+
+ if error {
+ return;
+ }
+
+ let expected_return_type;
+ if let Some(term_id) = tcx.lang_items().termination() {
+ let return_ty = main_fnsig.output();
+ let return_ty_span = main_fn_return_type_span(tcx, main_def_id).unwrap_or(main_span);
+ if !return_ty.bound_vars().is_empty() {
+ let msg = "`main` function return type is not allowed to have generic \
+ parameters";
+ struct_span_err!(tcx.sess, return_ty_span, E0131, "{}", msg).emit();
+ error = true;
+ }
+ let return_ty = return_ty.skip_binder();
+ tcx.infer_ctxt().enter(|infcx| {
+ let cause = traits::ObligationCause::new(
+ return_ty_span,
+ main_diagnostics_hir_id,
+ ObligationCauseCode::MainFunctionType,
+ );
+ let mut fulfillment_cx = traits::FulfillmentContext::new();
+ // normalize any potential projections in the return type, then add
+ // any possible obligations to the fulfillment context.
+ // HACK(ThePuzzlemaker) this feels symptomatic of a problem within
+ // checking trait fulfillment, not this here. I'm not sure why it
+ // works in the example in `fn test()` given in #88609? This also
+ // probably isn't the best way to do this.
+ let InferOk { value: norm_return_ty, obligations } = infcx
+ .partially_normalize_associated_types_in(
+ cause.clone(),
+ ty::ParamEnv::empty(),
+ return_ty,
+ );
+ fulfillment_cx.register_predicate_obligations(&infcx, obligations);
+ fulfillment_cx.register_bound(
+ &infcx,
+ ty::ParamEnv::empty(),
+ norm_return_ty,
+ term_id,
+ cause,
+ );
+ let errors = fulfillment_cx.select_all_or_error(&infcx);
+ if !errors.is_empty() {
+ infcx.report_fulfillment_errors(&errors, None, false);
+ error = true;
+ }
+ });
+ // now we can take the return type of the given main function
+ expected_return_type = main_fnsig.output();
+ } else {
+ // standard () main return type
+ expected_return_type = ty::Binder::dummy(tcx.mk_unit());
+ }
+
+ if error {
+ return;
+ }
+
+ let se_ty = tcx.mk_fn_ptr(expected_return_type.map_bound(|expected_return_type| {
+ tcx.mk_fn_sig(iter::empty(), expected_return_type, false, hir::Unsafety::Normal, Abi::Rust)
+ }));
+
+ require_same_types(
+ tcx,
+ &ObligationCause::new(
+ main_span,
+ main_diagnostics_hir_id,
+ ObligationCauseCode::MainFunctionType,
+ ),
+ se_ty,
+ tcx.mk_fn_ptr(main_fnsig),
+ );
+}
+fn check_start_fn_ty(tcx: TyCtxt<'_>, start_def_id: DefId) {
+ let start_def_id = start_def_id.expect_local();
+ let start_id = tcx.hir().local_def_id_to_hir_id(start_def_id);
+ let start_span = tcx.def_span(start_def_id);
+ let start_t = tcx.type_of(start_def_id);
+ match start_t.kind() {
+ ty::FnDef(..) => {
+ if let Some(Node::Item(it)) = tcx.hir().find(start_id) {
+ if let hir::ItemKind::Fn(ref sig, ref generics, _) = it.kind {
+ let mut error = false;
+ if !generics.params.is_empty() {
+ struct_span_err!(
+ tcx.sess,
+ generics.span,
+ E0132,
+ "start function is not allowed to have type parameters"
+ )
+ .span_label(generics.span, "start function cannot have type parameters")
+ .emit();
+ error = true;
+ }
+ if generics.has_where_clause_predicates {
+ struct_span_err!(
+ tcx.sess,
+ generics.where_clause_span,
+ E0647,
+ "start function is not allowed to have a `where` clause"
+ )
+ .span_label(
+ generics.where_clause_span,
+ "start function cannot have a `where` clause",
+ )
+ .emit();
+ error = true;
+ }
+ if let hir::IsAsync::Async = sig.header.asyncness {
+ let span = tcx.def_span(it.def_id);
+ struct_span_err!(
+ tcx.sess,
+ span,
+ E0752,
+ "`start` is not allowed to be `async`"
+ )
+ .span_label(span, "`start` is not allowed to be `async`")
+ .emit();
+ error = true;
+ }
+
+ let attrs = tcx.hir().attrs(start_id);
+ for attr in attrs {
+ if attr.has_name(sym::track_caller) {
+ tcx.sess
+ .struct_span_err(
+ attr.span,
+ "`start` is not allowed to be `#[track_caller]`",
+ )
+ .span_label(
+ start_span,
+ "`start` is not allowed to be `#[track_caller]`",
+ )
+ .emit();
+ error = true;
+ }
+ }
+
+ if error {
+ return;
+ }
+ }
+ }
+
+ let se_ty = tcx.mk_fn_ptr(ty::Binder::dummy(tcx.mk_fn_sig(
+ [tcx.types.isize, tcx.mk_imm_ptr(tcx.mk_imm_ptr(tcx.types.u8))].iter().cloned(),
+ tcx.types.isize,
+ false,
+ hir::Unsafety::Normal,
+ Abi::Rust,
+ )));
+
+ require_same_types(
+ tcx,
+ &ObligationCause::new(start_span, start_id, ObligationCauseCode::StartFunctionType),
+ se_ty,
+ tcx.mk_fn_ptr(tcx.fn_sig(start_def_id)),
+ );
+ }
+ _ => {
+ span_bug!(start_span, "start has a non-function type: found `{}`", start_t);
+ }
+ }
+}
+
+fn check_for_entry_fn(tcx: TyCtxt<'_>) {
+ match tcx.entry_fn(()) {
+ Some((def_id, EntryFnType::Main)) => check_main_fn_ty(tcx, def_id),
+ Some((def_id, EntryFnType::Start)) => check_start_fn_ty(tcx, def_id),
+ _ => {}
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ collect::provide(providers);
+ coherence::provide(providers);
+ check::provide(providers);
+ variance::provide(providers);
+ outlives::provide(providers);
+ impl_wf_check::provide(providers);
+ hir_wf_check::provide(providers);
+}
+
+pub fn check_crate(tcx: TyCtxt<'_>) -> Result<(), ErrorGuaranteed> {
+ let _prof_timer = tcx.sess.timer("type_check_crate");
+
+ // this ensures that later parts of type checking can assume that items
+ // have valid types and not error
+ // FIXME(matthewjasper) We shouldn't need to use `track_errors`.
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("type_collecting", || {
+ tcx.hir().for_each_module(|module| tcx.ensure().collect_mod_item_types(module))
+ });
+ })?;
+
+ if tcx.features().rustc_attrs {
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("outlives_testing", || outlives::test::test_inferred_outlives(tcx));
+ })?;
+ }
+
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("impl_wf_inference", || {
+ tcx.hir().for_each_module(|module| tcx.ensure().check_mod_impl_wf(module))
+ });
+ })?;
+
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("coherence_checking", || {
+ for &trait_def_id in tcx.all_local_trait_impls(()).keys() {
+ tcx.ensure().coherent_trait(trait_def_id);
+ }
+
+ // these queries are executed for side-effects (error reporting):
+ tcx.ensure().crate_inherent_impls(());
+ tcx.ensure().crate_inherent_impls_overlap_check(());
+ });
+ })?;
+
+ if tcx.features().rustc_attrs {
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("variance_testing", || variance::test::test_variance(tcx));
+ })?;
+ }
+
+ tcx.sess.track_errors(|| {
+ tcx.sess.time("wf_checking", || {
+ tcx.hir().par_for_each_module(|module| tcx.ensure().check_mod_type_wf(module))
+ });
+ })?;
+
+ // NOTE: This is copy/pasted in librustdoc/core.rs and should be kept in sync.
+ tcx.sess.time("item_types_checking", || {
+ tcx.hir().for_each_module(|module| tcx.ensure().check_mod_item_types(module))
+ });
+
+ tcx.sess.time("item_bodies_checking", || tcx.typeck_item_bodies(()));
+
+ check_unused::check_crate(tcx);
+ check_for_entry_fn(tcx);
+
+ if let Some(reported) = tcx.sess.has_errors() { Err(reported) } else { Ok(()) }
+}
+
+/// A quasi-deprecated helper used in rustdoc and clippy to get
+/// the type from a HIR node.
+pub fn hir_ty_to_ty<'tcx>(tcx: TyCtxt<'tcx>, hir_ty: &hir::Ty<'_>) -> Ty<'tcx> {
+ // In case there are any projections, etc., find the "environment"
+ // def-ID that will be used to determine the traits/predicates in
+ // scope. This is derived from the enclosing item-like thing.
+ let env_def_id = tcx.hir().get_parent_item(hir_ty.hir_id);
+ let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id());
+ <dyn AstConv<'_>>::ast_ty_to_ty(&item_cx, hir_ty)
+}
+
+pub fn hir_trait_to_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ hir_trait: &hir::TraitRef<'_>,
+ self_ty: Ty<'tcx>,
+) -> Bounds<'tcx> {
+ // In case there are any projections, etc., find the "environment"
+ // def-ID that will be used to determine the traits/predicates in
+ // scope. This is derived from the enclosing item-like thing.
+ let env_def_id = tcx.hir().get_parent_item(hir_trait.hir_ref_id);
+ let item_cx = self::collect::ItemCtxt::new(tcx, env_def_id.to_def_id());
+ let mut bounds = Bounds::default();
+ let _ = <dyn AstConv<'_>>::instantiate_poly_trait_ref(
+ &item_cx,
+ hir_trait,
+ DUMMY_SP,
+ ty::BoundConstness::NotConst,
+ self_ty,
+ &mut bounds,
+ true,
+ );
+
+ bounds
+}
diff --git a/compiler/rustc_typeck/src/mem_categorization.rs b/compiler/rustc_typeck/src/mem_categorization.rs
new file mode 100644
index 000000000..ced919f66
--- /dev/null
+++ b/compiler/rustc_typeck/src/mem_categorization.rs
@@ -0,0 +1,786 @@
+//! # Categorization
+//!
+//! The job of the categorization module is to analyze an expression to
+//! determine what kind of memory is used in evaluating it (for example,
+//! where dereferences occur and what kind of pointer is dereferenced;
+//! whether the memory is mutable, etc.).
+//!
+//! Categorization effectively transforms all of our expressions into
+//! expressions of the following forms (the actual enum has many more
+//! possibilities, naturally, but they are all variants of these base
+//! forms):
+//! ```ignore (not-rust)
+//! E = rvalue // some computed rvalue
+//! | x // address of a local variable or argument
+//! | *E // deref of a ptr
+//! | E.comp // access to an interior component
+//! ```
+//! Imagine a routine ToAddr(Expr) that evaluates an expression and returns an
+//! address where the result is to be found. If Expr is a place, then this
+//! is the address of the place. If `Expr` is an rvalue, this is the address of
+//! some temporary spot in memory where the result is stored.
+//!
+//! Now, `cat_expr()` classifies the expression `Expr` and the address `A = ToAddr(Expr)`
+//! as follows:
+//!
+//! - `cat`: what kind of expression was this? This is a subset of the
+//! full expression forms which only includes those that we care about
+//! for the purpose of the analysis.
+//! - `mutbl`: mutability of the address `A`.
+//! - `ty`: the type of data found at the address `A`.
+//!
+//! The resulting categorization tree differs somewhat from the expressions
+//! themselves. For example, auto-derefs are explicit. Also, an index `a[b]` is
+//! decomposed into two operations: a dereference to reach the array data and
+//! then an index to jump forward to the relevant item.
+//!
+//! ## By-reference upvars
+//!
+//! One part of the codegen which may be non-obvious is that we translate
+//! closure upvars into the dereference of a borrowed pointer; this more closely
+//! resembles the runtime codegen. So, for example, if we had:
+//!
+//! let mut x = 3;
+//! let y = 5;
+//! let inc = || x += y;
+//!
+//! Then when we categorize `x` (*within* the closure) we would yield a
+//! result of `*x'`, effectively, where `x'` is a `Categorization::Upvar` reference
+//! tied to `x`. The type of `x'` will be a borrowed pointer.
+
+use rustc_middle::hir::place::*;
+use rustc_middle::ty::adjustment;
+use rustc_middle::ty::fold::TypeFoldable;
+use rustc_middle::ty::visit::TypeVisitable;
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::LocalDefId;
+use rustc_hir::pat_util::EnumerateAndAdjustIterator;
+use rustc_hir::PatKind;
+use rustc_index::vec::Idx;
+use rustc_infer::infer::InferCtxt;
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+use rustc_trait_selection::infer::InferCtxtExt;
+
+pub(crate) trait HirNode {
+ fn hir_id(&self) -> hir::HirId;
+ fn span(&self) -> Span;
+}
+
+impl HirNode for hir::Expr<'_> {
+ fn hir_id(&self) -> hir::HirId {
+ self.hir_id
+ }
+ fn span(&self) -> Span {
+ self.span
+ }
+}
+
+impl HirNode for hir::Pat<'_> {
+ fn hir_id(&self) -> hir::HirId {
+ self.hir_id
+ }
+ fn span(&self) -> Span {
+ self.span
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct MemCategorizationContext<'a, 'tcx> {
+ pub(crate) typeck_results: &'a ty::TypeckResults<'tcx>,
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_owner: LocalDefId,
+ upvars: Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>>,
+}
+
+pub(crate) type McResult<T> = Result<T, ()>;
+
+impl<'a, 'tcx> MemCategorizationContext<'a, 'tcx> {
+ /// Creates a `MemCategorizationContext`.
+ pub(crate) fn new(
+ infcx: &'a InferCtxt<'a, 'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ body_owner: LocalDefId,
+ typeck_results: &'a ty::TypeckResults<'tcx>,
+ ) -> MemCategorizationContext<'a, 'tcx> {
+ MemCategorizationContext {
+ typeck_results,
+ infcx,
+ param_env,
+ body_owner,
+ upvars: infcx.tcx.upvars_mentioned(body_owner),
+ }
+ }
+
+ pub(crate) fn tcx(&self) -> TyCtxt<'tcx> {
+ self.infcx.tcx
+ }
+
+ pub(crate) fn type_is_copy_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool {
+ self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span)
+ }
+
+ fn resolve_vars_if_possible<T>(&self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.infcx.resolve_vars_if_possible(value)
+ }
+
+ fn is_tainted_by_errors(&self) -> bool {
+ self.infcx.is_tainted_by_errors()
+ }
+
+ fn resolve_type_vars_or_error(
+ &self,
+ id: hir::HirId,
+ ty: Option<Ty<'tcx>>,
+ ) -> McResult<Ty<'tcx>> {
+ match ty {
+ Some(ty) => {
+ let ty = self.resolve_vars_if_possible(ty);
+ if ty.references_error() || ty.is_ty_var() {
+ debug!("resolve_type_vars_or_error: error from {:?}", ty);
+ Err(())
+ } else {
+ Ok(ty)
+ }
+ }
+ // FIXME
+ None if self.is_tainted_by_errors() => Err(()),
+ None => {
+ bug!(
+ "no type for node {}: {} in mem_categorization",
+ id,
+ self.tcx().hir().node_to_string(id)
+ );
+ }
+ }
+ }
+
+ pub(crate) fn node_ty(&self, hir_id: hir::HirId) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(hir_id, self.typeck_results.node_type_opt(hir_id))
+ }
+
+ fn expr_ty(&self, expr: &hir::Expr<'_>) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(expr.hir_id, self.typeck_results.expr_ty_opt(expr))
+ }
+
+ pub(crate) fn expr_ty_adjusted(&self, expr: &hir::Expr<'_>) -> McResult<Ty<'tcx>> {
+ self.resolve_type_vars_or_error(expr.hir_id, self.typeck_results.expr_ty_adjusted_opt(expr))
+ }
+
+ /// Returns the type of value that this pattern matches against.
+ /// Some non-obvious cases:
+ ///
+ /// - a `ref x` binding matches against a value of type `T` and gives
+ /// `x` the type `&T`; we return `T`.
+ /// - a pattern with implicit derefs (thanks to default binding
+ /// modes #42640) may look like `Some(x)` but in fact have
+ /// implicit deref patterns attached (e.g., it is really
+ /// `&Some(x)`). In that case, we return the "outermost" type
+ /// (e.g., `&Option<T>).
+ pub(crate) fn pat_ty_adjusted(&self, pat: &hir::Pat<'_>) -> McResult<Ty<'tcx>> {
+ // Check for implicit `&` types wrapping the pattern; note
+ // that these are never attached to binding patterns, so
+ // actually this is somewhat "disjoint" from the code below
+ // that aims to account for `ref x`.
+ if let Some(vec) = self.typeck_results.pat_adjustments().get(pat.hir_id) {
+ if let Some(first_ty) = vec.first() {
+ debug!("pat_ty(pat={:?}) found adjusted ty `{:?}`", pat, first_ty);
+ return Ok(*first_ty);
+ }
+ }
+
+ self.pat_ty_unadjusted(pat)
+ }
+
+ /// Like `pat_ty`, but ignores implicit `&` patterns.
+ fn pat_ty_unadjusted(&self, pat: &hir::Pat<'_>) -> McResult<Ty<'tcx>> {
+ let base_ty = self.node_ty(pat.hir_id)?;
+ debug!("pat_ty(pat={:?}) base_ty={:?}", pat, base_ty);
+
+ // This code detects whether we are looking at a `ref x`,
+ // and if so, figures out what the type *being borrowed* is.
+ let ret_ty = match pat.kind {
+ PatKind::Binding(..) => {
+ let bm = *self
+ .typeck_results
+ .pat_binding_modes()
+ .get(pat.hir_id)
+ .expect("missing binding mode");
+
+ if let ty::BindByReference(_) = bm {
+ // a bind-by-ref means that the base_ty will be the type of the ident itself,
+ // but what we want here is the type of the underlying value being borrowed.
+ // So peel off one-level, turning the &T into T.
+ match base_ty.builtin_deref(false) {
+ Some(t) => t.ty,
+ None => {
+ debug!("By-ref binding of non-derefable type {:?}", base_ty);
+ return Err(());
+ }
+ }
+ } else {
+ base_ty
+ }
+ }
+ _ => base_ty,
+ };
+ debug!("pat_ty(pat={:?}) ret_ty={:?}", pat, ret_ty);
+
+ Ok(ret_ty)
+ }
+
+ pub(crate) fn cat_expr(&self, expr: &hir::Expr<'_>) -> McResult<PlaceWithHirId<'tcx>> {
+ // This recursion helper avoids going through *too many*
+ // adjustments, since *only* non-overloaded deref recurses.
+ fn helper<'a, 'tcx>(
+ mc: &MemCategorizationContext<'a, 'tcx>,
+ expr: &hir::Expr<'_>,
+ adjustments: &[adjustment::Adjustment<'tcx>],
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ match adjustments.split_last() {
+ None => mc.cat_expr_unadjusted(expr),
+ Some((adjustment, previous)) => {
+ mc.cat_expr_adjusted_with(expr, || helper(mc, expr, previous), adjustment)
+ }
+ }
+ }
+
+ helper(self, expr, self.typeck_results.expr_adjustments(expr))
+ }
+
+ pub(crate) fn cat_expr_adjusted(
+ &self,
+ expr: &hir::Expr<'_>,
+ previous: PlaceWithHirId<'tcx>,
+ adjustment: &adjustment::Adjustment<'tcx>,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ self.cat_expr_adjusted_with(expr, || Ok(previous), adjustment)
+ }
+
+ fn cat_expr_adjusted_with<F>(
+ &self,
+ expr: &hir::Expr<'_>,
+ previous: F,
+ adjustment: &adjustment::Adjustment<'tcx>,
+ ) -> McResult<PlaceWithHirId<'tcx>>
+ where
+ F: FnOnce() -> McResult<PlaceWithHirId<'tcx>>,
+ {
+ debug!("cat_expr_adjusted_with({:?}): {:?}", adjustment, expr);
+ let target = self.resolve_vars_if_possible(adjustment.target);
+ match adjustment.kind {
+ adjustment::Adjust::Deref(overloaded) => {
+ // Equivalent to *expr or something similar.
+ let base = if let Some(deref) = overloaded {
+ let ref_ty = self
+ .tcx()
+ .mk_ref(deref.region, ty::TypeAndMut { ty: target, mutbl: deref.mutbl });
+ self.cat_rvalue(expr.hir_id, expr.span, ref_ty)
+ } else {
+ previous()?
+ };
+ self.cat_deref(expr, base)
+ }
+
+ adjustment::Adjust::NeverToAny
+ | adjustment::Adjust::Pointer(_)
+ | adjustment::Adjust::Borrow(_) => {
+ // Result is an rvalue.
+ Ok(self.cat_rvalue(expr.hir_id, expr.span, target))
+ }
+ }
+ }
+
+ pub(crate) fn cat_expr_unadjusted(
+ &self,
+ expr: &hir::Expr<'_>,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ debug!("cat_expr: id={} expr={:?}", expr.hir_id, expr);
+
+ let expr_ty = self.expr_ty(expr)?;
+ match expr.kind {
+ hir::ExprKind::Unary(hir::UnOp::Deref, ref e_base) => {
+ if self.typeck_results.is_method_call(expr) {
+ self.cat_overloaded_place(expr, e_base)
+ } else {
+ let base = self.cat_expr(e_base)?;
+ self.cat_deref(expr, base)
+ }
+ }
+
+ hir::ExprKind::Field(ref base, _) => {
+ let base = self.cat_expr(base)?;
+ debug!("cat_expr(cat_field): id={} expr={:?} base={:?}", expr.hir_id, expr, base);
+
+ let field_idx = self
+ .typeck_results
+ .field_indices()
+ .get(expr.hir_id)
+ .cloned()
+ .expect("Field index not found");
+
+ Ok(self.cat_projection(
+ expr,
+ base,
+ expr_ty,
+ ProjectionKind::Field(field_idx as u32, VariantIdx::new(0)),
+ ))
+ }
+
+ hir::ExprKind::Index(ref base, _) => {
+ if self.typeck_results.is_method_call(expr) {
+ // If this is an index implemented by a method call, then it
+ // will include an implicit deref of the result.
+ // The call to index() returns a `&T` value, which
+ // is an rvalue. That is what we will be
+ // dereferencing.
+ self.cat_overloaded_place(expr, base)
+ } else {
+ let base = self.cat_expr(base)?;
+ Ok(self.cat_projection(expr, base, expr_ty, ProjectionKind::Index))
+ }
+ }
+
+ hir::ExprKind::Path(ref qpath) => {
+ let res = self.typeck_results.qpath_res(qpath, expr.hir_id);
+ self.cat_res(expr.hir_id, expr.span, expr_ty, res)
+ }
+
+ hir::ExprKind::Type(ref e, _) => self.cat_expr(e),
+
+ hir::ExprKind::AddrOf(..)
+ | hir::ExprKind::Call(..)
+ | hir::ExprKind::Assign(..)
+ | hir::ExprKind::AssignOp(..)
+ | hir::ExprKind::Closure { .. }
+ | hir::ExprKind::Ret(..)
+ | hir::ExprKind::Unary(..)
+ | hir::ExprKind::Yield(..)
+ | hir::ExprKind::MethodCall(..)
+ | hir::ExprKind::Cast(..)
+ | hir::ExprKind::DropTemps(..)
+ | hir::ExprKind::Array(..)
+ | hir::ExprKind::If(..)
+ | hir::ExprKind::Tup(..)
+ | hir::ExprKind::Binary(..)
+ | hir::ExprKind::Block(..)
+ | hir::ExprKind::Let(..)
+ | hir::ExprKind::Loop(..)
+ | hir::ExprKind::Match(..)
+ | hir::ExprKind::Lit(..)
+ | hir::ExprKind::ConstBlock(..)
+ | hir::ExprKind::Break(..)
+ | hir::ExprKind::Continue(..)
+ | hir::ExprKind::Struct(..)
+ | hir::ExprKind::Repeat(..)
+ | hir::ExprKind::InlineAsm(..)
+ | hir::ExprKind::Box(..)
+ | hir::ExprKind::Err => Ok(self.cat_rvalue(expr.hir_id, expr.span, expr_ty)),
+ }
+ }
+
+ pub(crate) fn cat_res(
+ &self,
+ hir_id: hir::HirId,
+ span: Span,
+ expr_ty: Ty<'tcx>,
+ res: Res,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ debug!("cat_res: id={:?} expr={:?} def={:?}", hir_id, expr_ty, res);
+
+ match res {
+ Res::Def(
+ DefKind::Ctor(..)
+ | DefKind::Const
+ | DefKind::ConstParam
+ | DefKind::AssocConst
+ | DefKind::Fn
+ | DefKind::AssocFn,
+ _,
+ )
+ | Res::SelfCtor(..) => Ok(self.cat_rvalue(hir_id, span, expr_ty)),
+
+ Res::Def(DefKind::Static(_), _) => {
+ Ok(PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::StaticItem, Vec::new()))
+ }
+
+ Res::Local(var_id) => {
+ if self.upvars.map_or(false, |upvars| upvars.contains_key(&var_id)) {
+ self.cat_upvar(hir_id, var_id)
+ } else {
+ Ok(PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::Local(var_id), Vec::new()))
+ }
+ }
+
+ def => span_bug!(span, "unexpected definition in memory categorization: {:?}", def),
+ }
+ }
+
+ /// Categorize an upvar.
+ ///
+ /// Note: the actual upvar access contains invisible derefs of closure
+ /// environment and upvar reference as appropriate. Only regionck cares
+ /// about these dereferences, so we let it compute them as needed.
+ fn cat_upvar(&self, hir_id: hir::HirId, var_id: hir::HirId) -> McResult<PlaceWithHirId<'tcx>> {
+ let closure_expr_def_id = self.body_owner;
+
+ let upvar_id = ty::UpvarId {
+ var_path: ty::UpvarPath { hir_id: var_id },
+ closure_expr_id: closure_expr_def_id,
+ };
+ let var_ty = self.node_ty(var_id)?;
+
+ let ret = PlaceWithHirId::new(hir_id, var_ty, PlaceBase::Upvar(upvar_id), Vec::new());
+
+ debug!("cat_upvar ret={:?}", ret);
+ Ok(ret)
+ }
+
+ pub(crate) fn cat_rvalue(
+ &self,
+ hir_id: hir::HirId,
+ span: Span,
+ expr_ty: Ty<'tcx>,
+ ) -> PlaceWithHirId<'tcx> {
+ debug!("cat_rvalue hir_id={:?}, expr_ty={:?}, span={:?}", hir_id, expr_ty, span);
+ let ret = PlaceWithHirId::new(hir_id, expr_ty, PlaceBase::Rvalue, Vec::new());
+ debug!("cat_rvalue ret={:?}", ret);
+ ret
+ }
+
+ pub(crate) fn cat_projection<N: HirNode>(
+ &self,
+ node: &N,
+ base_place: PlaceWithHirId<'tcx>,
+ ty: Ty<'tcx>,
+ kind: ProjectionKind,
+ ) -> PlaceWithHirId<'tcx> {
+ let mut projections = base_place.place.projections;
+ projections.push(Projection { kind, ty });
+ let ret = PlaceWithHirId::new(
+ node.hir_id(),
+ base_place.place.base_ty,
+ base_place.place.base,
+ projections,
+ );
+ debug!("cat_field ret {:?}", ret);
+ ret
+ }
+
+ fn cat_overloaded_place(
+ &self,
+ expr: &hir::Expr<'_>,
+ base: &hir::Expr<'_>,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ debug!("cat_overloaded_place(expr={:?}, base={:?})", expr, base);
+
+ // Reconstruct the output assuming it's a reference with the
+ // same region and mutability as the receiver. This holds for
+ // `Deref(Mut)::Deref(_mut)` and `Index(Mut)::index(_mut)`.
+ let place_ty = self.expr_ty(expr)?;
+ let base_ty = self.expr_ty_adjusted(base)?;
+
+ let ty::Ref(region, _, mutbl) = *base_ty.kind() else {
+ span_bug!(expr.span, "cat_overloaded_place: base is not a reference");
+ };
+ let ref_ty = self.tcx().mk_ref(region, ty::TypeAndMut { ty: place_ty, mutbl });
+
+ let base = self.cat_rvalue(expr.hir_id, expr.span, ref_ty);
+ self.cat_deref(expr, base)
+ }
+
+ fn cat_deref(
+ &self,
+ node: &impl HirNode,
+ base_place: PlaceWithHirId<'tcx>,
+ ) -> McResult<PlaceWithHirId<'tcx>> {
+ debug!("cat_deref: base_place={:?}", base_place);
+
+ let base_curr_ty = base_place.place.ty();
+ let deref_ty = match base_curr_ty.builtin_deref(true) {
+ Some(mt) => mt.ty,
+ None => {
+ debug!("explicit deref of non-derefable type: {:?}", base_curr_ty);
+ return Err(());
+ }
+ };
+ let mut projections = base_place.place.projections;
+ projections.push(Projection { kind: ProjectionKind::Deref, ty: deref_ty });
+
+ let ret = PlaceWithHirId::new(
+ node.hir_id(),
+ base_place.place.base_ty,
+ base_place.place.base,
+ projections,
+ );
+ debug!("cat_deref ret {:?}", ret);
+ Ok(ret)
+ }
+
+ pub(crate) fn cat_pattern<F>(
+ &self,
+ place: PlaceWithHirId<'tcx>,
+ pat: &hir::Pat<'_>,
+ mut op: F,
+ ) -> McResult<()>
+ where
+ F: FnMut(&PlaceWithHirId<'tcx>, &hir::Pat<'_>),
+ {
+ self.cat_pattern_(place, pat, &mut op)
+ }
+
+ /// Returns the variant index for an ADT used within a Struct or TupleStruct pattern
+ /// Here `pat_hir_id` is the HirId of the pattern itself.
+ fn variant_index_for_adt(
+ &self,
+ qpath: &hir::QPath<'_>,
+ pat_hir_id: hir::HirId,
+ span: Span,
+ ) -> McResult<VariantIdx> {
+ let res = self.typeck_results.qpath_res(qpath, pat_hir_id);
+ let ty = self.typeck_results.node_type(pat_hir_id);
+ let ty::Adt(adt_def, _) = ty.kind() else {
+ self.tcx()
+ .sess
+ .delay_span_bug(span, "struct or tuple struct pattern not applied to an ADT");
+ return Err(());
+ };
+
+ match res {
+ Res::Def(DefKind::Variant, variant_id) => Ok(adt_def.variant_index_with_id(variant_id)),
+ Res::Def(DefKind::Ctor(CtorOf::Variant, ..), variant_ctor_id) => {
+ Ok(adt_def.variant_index_with_ctor_id(variant_ctor_id))
+ }
+ Res::Def(DefKind::Ctor(CtorOf::Struct, ..), _)
+ | Res::Def(DefKind::Struct | DefKind::Union | DefKind::TyAlias | DefKind::AssocTy, _)
+ | Res::SelfCtor(..)
+ | Res::SelfTy { .. } => {
+ // Structs and Unions have only have one variant.
+ Ok(VariantIdx::new(0))
+ }
+ _ => bug!("expected ADT path, found={:?}", res),
+ }
+ }
+
+ /// Returns the total number of fields in an ADT variant used within a pattern.
+ /// Here `pat_hir_id` is the HirId of the pattern itself.
+ fn total_fields_in_adt_variant(
+ &self,
+ pat_hir_id: hir::HirId,
+ variant_index: VariantIdx,
+ span: Span,
+ ) -> McResult<usize> {
+ let ty = self.typeck_results.node_type(pat_hir_id);
+ match ty.kind() {
+ ty::Adt(adt_def, _) => Ok(adt_def.variant(variant_index).fields.len()),
+ _ => {
+ self.tcx()
+ .sess
+ .delay_span_bug(span, "struct or tuple struct pattern not applied to an ADT");
+ Err(())
+ }
+ }
+ }
+
+ /// Returns the total number of fields in a tuple used within a Tuple pattern.
+ /// Here `pat_hir_id` is the HirId of the pattern itself.
+ fn total_fields_in_tuple(&self, pat_hir_id: hir::HirId, span: Span) -> McResult<usize> {
+ let ty = self.typeck_results.node_type(pat_hir_id);
+ match ty.kind() {
+ ty::Tuple(substs) => Ok(substs.len()),
+ _ => {
+ self.tcx().sess.delay_span_bug(span, "tuple pattern not applied to a tuple");
+ Err(())
+ }
+ }
+ }
+
+ // FIXME(#19596) This is a workaround, but there should be a better way to do this
+ fn cat_pattern_<F>(
+ &self,
+ mut place_with_id: PlaceWithHirId<'tcx>,
+ pat: &hir::Pat<'_>,
+ op: &mut F,
+ ) -> McResult<()>
+ where
+ F: FnMut(&PlaceWithHirId<'tcx>, &hir::Pat<'_>),
+ {
+ // Here, `place` is the `PlaceWithHirId` being matched and pat is the pattern it
+ // is being matched against.
+ //
+ // In general, the way that this works is that we walk down the pattern,
+ // constructing a `PlaceWithHirId` that represents the path that will be taken
+ // to reach the value being matched.
+
+ debug!("cat_pattern(pat={:?}, place_with_id={:?})", pat, place_with_id);
+
+ // If (pattern) adjustments are active for this pattern, adjust the `PlaceWithHirId` correspondingly.
+ // `PlaceWithHirId`s are constructed differently from patterns. For example, in
+ //
+ // ```
+ // match foo {
+ // &&Some(x, ) => { ... },
+ // _ => { ... },
+ // }
+ // ```
+ //
+ // the pattern `&&Some(x,)` is represented as `Ref { Ref { TupleStruct }}`. To build the
+ // corresponding `PlaceWithHirId` we start with the `PlaceWithHirId` for `foo`, and then, by traversing the
+ // pattern, try to answer the question: given the address of `foo`, how is `x` reached?
+ //
+ // `&&Some(x,)` `place_foo`
+ // `&Some(x,)` `deref { place_foo}`
+ // `Some(x,)` `deref { deref { place_foo }}`
+ // (x,)` `field0 { deref { deref { place_foo }}}` <- resulting place
+ //
+ // The above example has no adjustments. If the code were instead the (after adjustments,
+ // equivalent) version
+ //
+ // ```
+ // match foo {
+ // Some(x, ) => { ... },
+ // _ => { ... },
+ // }
+ // ```
+ //
+ // Then we see that to get the same result, we must start with
+ // `deref { deref { place_foo }}` instead of `place_foo` since the pattern is now `Some(x,)`
+ // and not `&&Some(x,)`, even though its assigned type is that of `&&Some(x,)`.
+ for _ in 0..self.typeck_results.pat_adjustments().get(pat.hir_id).map_or(0, |v| v.len()) {
+ debug!("cat_pattern: applying adjustment to place_with_id={:?}", place_with_id);
+ place_with_id = self.cat_deref(pat, place_with_id)?;
+ }
+ let place_with_id = place_with_id; // lose mutability
+ debug!("cat_pattern: applied adjustment derefs to get place_with_id={:?}", place_with_id);
+
+ // Invoke the callback, but only now, after the `place_with_id` has adjusted.
+ //
+ // To see that this makes sense, consider `match &Some(3) { Some(x) => { ... }}`. In that
+ // case, the initial `place_with_id` will be that for `&Some(3)` and the pattern is `Some(x)`. We
+ // don't want to call `op` with these incompatible values. As written, what happens instead
+ // is that `op` is called with the adjusted place (that for `*&Some(3)`) and the pattern
+ // `Some(x)` (which matches). Recursing once more, `*&Some(3)` and the pattern `Some(x)`
+ // result in the place `Downcast<Some>(*&Some(3)).0` associated to `x` and invoke `op` with
+ // that (where the `ref` on `x` is implied).
+ op(&place_with_id, pat);
+
+ match pat.kind {
+ PatKind::Tuple(subpats, dots_pos) => {
+ // (p1, ..., pN)
+ let total_fields = self.total_fields_in_tuple(pat.hir_id, pat.span)?;
+
+ for (i, subpat) in subpats.iter().enumerate_and_adjust(total_fields, dots_pos) {
+ let subpat_ty = self.pat_ty_adjusted(subpat)?;
+ let projection_kind = ProjectionKind::Field(i as u32, VariantIdx::new(0));
+ let sub_place =
+ self.cat_projection(pat, place_with_id.clone(), subpat_ty, projection_kind);
+ self.cat_pattern_(sub_place, subpat, op)?;
+ }
+ }
+
+ PatKind::TupleStruct(ref qpath, subpats, dots_pos) => {
+ // S(p1, ..., pN)
+ let variant_index = self.variant_index_for_adt(qpath, pat.hir_id, pat.span)?;
+ let total_fields =
+ self.total_fields_in_adt_variant(pat.hir_id, variant_index, pat.span)?;
+
+ for (i, subpat) in subpats.iter().enumerate_and_adjust(total_fields, dots_pos) {
+ let subpat_ty = self.pat_ty_adjusted(subpat)?;
+ let projection_kind = ProjectionKind::Field(i as u32, variant_index);
+ let sub_place =
+ self.cat_projection(pat, place_with_id.clone(), subpat_ty, projection_kind);
+ self.cat_pattern_(sub_place, subpat, op)?;
+ }
+ }
+
+ PatKind::Struct(ref qpath, field_pats, _) => {
+ // S { f1: p1, ..., fN: pN }
+
+ let variant_index = self.variant_index_for_adt(qpath, pat.hir_id, pat.span)?;
+
+ for fp in field_pats {
+ let field_ty = self.pat_ty_adjusted(fp.pat)?;
+ let field_index = self
+ .typeck_results
+ .field_indices()
+ .get(fp.hir_id)
+ .cloned()
+ .expect("no index for a field");
+
+ let field_place = self.cat_projection(
+ pat,
+ place_with_id.clone(),
+ field_ty,
+ ProjectionKind::Field(field_index as u32, variant_index),
+ );
+ self.cat_pattern_(field_place, fp.pat, op)?;
+ }
+ }
+
+ PatKind::Or(pats) => {
+ for pat in pats {
+ self.cat_pattern_(place_with_id.clone(), pat, op)?;
+ }
+ }
+
+ PatKind::Binding(.., Some(ref subpat)) => {
+ self.cat_pattern_(place_with_id, subpat, op)?;
+ }
+
+ PatKind::Box(ref subpat) | PatKind::Ref(ref subpat, _) => {
+ // box p1, &p1, &mut p1. we can ignore the mutability of
+ // PatKind::Ref since that information is already contained
+ // in the type.
+ let subplace = self.cat_deref(pat, place_with_id)?;
+ self.cat_pattern_(subplace, subpat, op)?;
+ }
+
+ PatKind::Slice(before, ref slice, after) => {
+ let Some(element_ty) = place_with_id.place.ty().builtin_index() else {
+ debug!("explicit index of non-indexable type {:?}", place_with_id);
+ return Err(());
+ };
+ let elt_place = self.cat_projection(
+ pat,
+ place_with_id.clone(),
+ element_ty,
+ ProjectionKind::Index,
+ );
+ for before_pat in before {
+ self.cat_pattern_(elt_place.clone(), before_pat, op)?;
+ }
+ if let Some(ref slice_pat) = *slice {
+ let slice_pat_ty = self.pat_ty_adjusted(slice_pat)?;
+ let slice_place = self.cat_projection(
+ pat,
+ place_with_id,
+ slice_pat_ty,
+ ProjectionKind::Subslice,
+ );
+ self.cat_pattern_(slice_place, slice_pat, op)?;
+ }
+ for after_pat in after {
+ self.cat_pattern_(elt_place.clone(), after_pat, op)?;
+ }
+ }
+
+ PatKind::Path(_)
+ | PatKind::Binding(.., None)
+ | PatKind::Lit(..)
+ | PatKind::Range(..)
+ | PatKind::Wild => {
+ // always ok
+ }
+ }
+
+ Ok(())
+ }
+}
diff --git a/compiler/rustc_typeck/src/outlives/explicit.rs b/compiler/rustc_typeck/src/outlives/explicit.rs
new file mode 100644
index 000000000..7534482cc
--- /dev/null
+++ b/compiler/rustc_typeck/src/outlives/explicit.rs
@@ -0,0 +1,69 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::{self, OutlivesPredicate, TyCtxt};
+
+use super::utils::*;
+
+#[derive(Debug)]
+pub struct ExplicitPredicatesMap<'tcx> {
+ map: FxHashMap<DefId, ty::EarlyBinder<RequiredPredicates<'tcx>>>,
+}
+
+impl<'tcx> ExplicitPredicatesMap<'tcx> {
+ pub fn new() -> ExplicitPredicatesMap<'tcx> {
+ ExplicitPredicatesMap { map: FxHashMap::default() }
+ }
+
+ pub(crate) fn explicit_predicates_of(
+ &mut self,
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ ) -> &ty::EarlyBinder<RequiredPredicates<'tcx>> {
+ self.map.entry(def_id).or_insert_with(|| {
+ let predicates = if def_id.is_local() {
+ tcx.explicit_predicates_of(def_id)
+ } else {
+ tcx.predicates_of(def_id)
+ };
+ let mut required_predicates = RequiredPredicates::default();
+
+ // process predicates and convert to `RequiredPredicates` entry, see below
+ for &(predicate, span) in predicates.predicates {
+ match predicate.kind().skip_binder() {
+ ty::PredicateKind::TypeOutlives(OutlivesPredicate(ty, reg)) => {
+ insert_outlives_predicate(
+ tcx,
+ ty.into(),
+ reg,
+ span,
+ &mut required_predicates,
+ )
+ }
+
+ ty::PredicateKind::RegionOutlives(OutlivesPredicate(reg1, reg2)) => {
+ insert_outlives_predicate(
+ tcx,
+ reg1.into(),
+ reg2,
+ span,
+ &mut required_predicates,
+ )
+ }
+
+ ty::PredicateKind::Trait(..)
+ | ty::PredicateKind::Projection(..)
+ | ty::PredicateKind::WellFormed(..)
+ | ty::PredicateKind::ObjectSafe(..)
+ | ty::PredicateKind::ClosureKind(..)
+ | ty::PredicateKind::Subtype(..)
+ | ty::PredicateKind::Coerce(..)
+ | ty::PredicateKind::ConstEvaluatable(..)
+ | ty::PredicateKind::ConstEquate(..)
+ | ty::PredicateKind::TypeWellFormedFromEnv(..) => (),
+ }
+ }
+
+ ty::EarlyBinder(required_predicates)
+ })
+ }
+}
diff --git a/compiler/rustc_typeck/src/outlives/implicit_infer.rs b/compiler/rustc_typeck/src/outlives/implicit_infer.rs
new file mode 100644
index 000000000..3b779280e
--- /dev/null
+++ b/compiler/rustc_typeck/src/outlives/implicit_infer.rs
@@ -0,0 +1,300 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind, Subst};
+use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt};
+use rustc_span::Span;
+
+use super::explicit::ExplicitPredicatesMap;
+use super::utils::*;
+
+/// Infer predicates for the items in the crate.
+///
+/// `global_inferred_outlives`: this is initially the empty map that
+/// was generated by walking the items in the crate. This will
+/// now be filled with inferred predicates.
+pub(super) fn infer_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+) -> FxHashMap<DefId, ty::EarlyBinder<RequiredPredicates<'tcx>>> {
+ debug!("infer_predicates");
+
+ let mut explicit_map = ExplicitPredicatesMap::new();
+
+ let mut global_inferred_outlives = FxHashMap::default();
+
+ // If new predicates were added then we need to re-calculate
+ // all crates since there could be new implied predicates.
+ 'outer: loop {
+ let mut predicates_added = false;
+
+ // Visit all the crates and infer predicates
+ for id in tcx.hir().items() {
+ let item_did = id.def_id;
+
+ debug!("InferVisitor::visit_item(item={:?})", item_did);
+
+ let mut item_required_predicates = RequiredPredicates::default();
+ match tcx.def_kind(item_did) {
+ DefKind::Union | DefKind::Enum | DefKind::Struct => {
+ let adt_def = tcx.adt_def(item_did.to_def_id());
+
+ // Iterate over all fields in item_did
+ for field_def in adt_def.all_fields() {
+ // Calculating the predicate requirements necessary
+ // for item_did.
+ //
+ // For field of type &'a T (reference) or Adt
+ // (struct/enum/union) there will be outlive
+ // requirements for adt_def.
+ let field_ty = tcx.type_of(field_def.did);
+ let field_span = tcx.def_span(field_def.did);
+ insert_required_predicates_to_be_wf(
+ tcx,
+ field_ty,
+ field_span,
+ &global_inferred_outlives,
+ &mut item_required_predicates,
+ &mut explicit_map,
+ );
+ }
+ }
+
+ _ => {}
+ };
+
+ // If new predicates were added (`local_predicate_map` has more
+ // predicates than the `global_inferred_outlives`), the new predicates
+ // might result in implied predicates for their parent types.
+ // Therefore mark `predicates_added` as true and which will ensure
+ // we walk the crates again and re-calculate predicates for all
+ // items.
+ let item_predicates_len: usize =
+ global_inferred_outlives.get(&item_did.to_def_id()).map_or(0, |p| p.0.len());
+ if item_required_predicates.len() > item_predicates_len {
+ predicates_added = true;
+ global_inferred_outlives
+ .insert(item_did.to_def_id(), ty::EarlyBinder(item_required_predicates));
+ }
+ }
+
+ if !predicates_added {
+ break 'outer;
+ }
+ }
+
+ global_inferred_outlives
+}
+
+fn insert_required_predicates_to_be_wf<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ field_ty: Ty<'tcx>,
+ field_span: Span,
+ global_inferred_outlives: &FxHashMap<DefId, ty::EarlyBinder<RequiredPredicates<'tcx>>>,
+ required_predicates: &mut RequiredPredicates<'tcx>,
+ explicit_map: &mut ExplicitPredicatesMap<'tcx>,
+) {
+ for arg in field_ty.walk() {
+ let ty = match arg.unpack() {
+ GenericArgKind::Type(ty) => ty,
+
+ // No predicates from lifetimes or constants, except potentially
+ // constants' types, but `walk` will get to them as well.
+ GenericArgKind::Lifetime(_) | GenericArgKind::Const(_) => continue,
+ };
+
+ match *ty.kind() {
+ // The field is of type &'a T which means that we will have
+ // a predicate requirement of T: 'a (T outlives 'a).
+ //
+ // We also want to calculate potential predicates for the T
+ ty::Ref(region, rty, _) => {
+ debug!("Ref");
+ insert_outlives_predicate(tcx, rty.into(), region, field_span, required_predicates);
+ }
+
+ // For each Adt (struct/enum/union) type `Foo<'a, T>`, we
+ // can load the current set of inferred and explicit
+ // predicates from `global_inferred_outlives` and filter the
+ // ones that are TypeOutlives.
+ ty::Adt(def, substs) => {
+ // First check the inferred predicates
+ //
+ // Example 1:
+ //
+ // struct Foo<'a, T> {
+ // field1: Bar<'a, T>
+ // }
+ //
+ // struct Bar<'b, U> {
+ // field2: &'b U
+ // }
+ //
+ // Here, when processing the type of `field1`, we would
+ // request the set of implicit predicates computed for `Bar`
+ // thus far. This will initially come back empty, but in next
+ // round we will get `U: 'b`. We then apply the substitution
+ // `['b => 'a, U => T]` and thus get the requirement that `T:
+ // 'a` holds for `Foo`.
+ debug!("Adt");
+ if let Some(unsubstituted_predicates) = global_inferred_outlives.get(&def.did()) {
+ for (unsubstituted_predicate, &span) in &unsubstituted_predicates.0 {
+ // `unsubstituted_predicate` is `U: 'b` in the
+ // example above. So apply the substitution to
+ // get `T: 'a` (or `predicate`):
+ let predicate = unsubstituted_predicates
+ .rebind(*unsubstituted_predicate)
+ .subst(tcx, substs);
+ insert_outlives_predicate(
+ tcx,
+ predicate.0,
+ predicate.1,
+ span,
+ required_predicates,
+ );
+ }
+ }
+
+ // Check if the type has any explicit predicates that need
+ // to be added to `required_predicates`
+ // let _: () = substs.region_at(0);
+ check_explicit_predicates(
+ tcx,
+ def.did(),
+ substs,
+ required_predicates,
+ explicit_map,
+ None,
+ );
+ }
+
+ ty::Dynamic(obj, ..) => {
+ // This corresponds to `dyn Trait<..>`. In this case, we should
+ // use the explicit predicates as well.
+
+ debug!("Dynamic");
+ debug!("field_ty = {}", &field_ty);
+ debug!("ty in field = {}", &ty);
+ if let Some(ex_trait_ref) = obj.principal() {
+ // Here, we are passing the type `usize` as a
+ // placeholder value with the function
+ // `with_self_ty`, since there is no concrete type
+ // `Self` for a `dyn Trait` at this
+ // stage. Therefore when checking explicit
+ // predicates in `check_explicit_predicates` we
+ // need to ignore checking the explicit_map for
+ // Self type.
+ let substs =
+ ex_trait_ref.with_self_ty(tcx, tcx.types.usize).skip_binder().substs;
+ check_explicit_predicates(
+ tcx,
+ ex_trait_ref.skip_binder().def_id,
+ substs,
+ required_predicates,
+ explicit_map,
+ Some(tcx.types.self_param),
+ );
+ }
+ }
+
+ ty::Projection(obj) => {
+ // This corresponds to `<T as Foo<'a>>::Bar`. In this case, we should use the
+ // explicit predicates as well.
+ debug!("Projection");
+ check_explicit_predicates(
+ tcx,
+ tcx.parent(obj.item_def_id),
+ obj.substs,
+ required_predicates,
+ explicit_map,
+ None,
+ );
+ }
+
+ _ => {}
+ }
+ }
+}
+
+/// We also have to check the explicit predicates
+/// declared on the type.
+/// ```ignore (illustrative)
+/// struct Foo<'a, T> {
+/// field1: Bar<T>
+/// }
+///
+/// struct Bar<U> where U: 'static, U: Foo {
+/// ...
+/// }
+/// ```
+/// Here, we should fetch the explicit predicates, which
+/// will give us `U: 'static` and `U: Foo`. The latter we
+/// can ignore, but we will want to process `U: 'static`,
+/// applying the substitution as above.
+fn check_explicit_predicates<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ substs: &[GenericArg<'tcx>],
+ required_predicates: &mut RequiredPredicates<'tcx>,
+ explicit_map: &mut ExplicitPredicatesMap<'tcx>,
+ ignored_self_ty: Option<Ty<'tcx>>,
+) {
+ debug!(
+ "check_explicit_predicates(def_id={:?}, \
+ substs={:?}, \
+ explicit_map={:?}, \
+ required_predicates={:?}, \
+ ignored_self_ty={:?})",
+ def_id, substs, explicit_map, required_predicates, ignored_self_ty,
+ );
+ let explicit_predicates = explicit_map.explicit_predicates_of(tcx, def_id);
+
+ for (outlives_predicate, &span) in &explicit_predicates.0 {
+ debug!("outlives_predicate = {:?}", &outlives_predicate);
+
+ // Careful: If we are inferring the effects of a `dyn Trait<..>`
+ // type, then when we look up the predicates for `Trait`,
+ // we may find some that reference `Self`. e.g., perhaps the
+ // definition of `Trait` was:
+ //
+ // ```
+ // trait Trait<'a, T> where Self: 'a { .. }
+ // ```
+ //
+ // we want to ignore such predicates here, because
+ // there is no type parameter for them to affect. Consider
+ // a struct containing `dyn Trait`:
+ //
+ // ```
+ // struct MyStruct<'x, X> { field: Box<dyn Trait<'x, X>> }
+ // ```
+ //
+ // The `where Self: 'a` predicate refers to the *existential, hidden type*
+ // that is represented by the `dyn Trait`, not to the `X` type parameter
+ // (or any other generic parameter) declared on `MyStruct`.
+ //
+ // Note that we do this check for self **before** applying `substs`. In the
+ // case that `substs` come from a `dyn Trait` type, our caller will have
+ // included `Self = usize` as the value for `Self`. If we were
+ // to apply the substs, and not filter this predicate, we might then falsely
+ // conclude that e.g., `X: 'x` was a reasonable inferred requirement.
+ //
+ // Another similar case is where we have an inferred
+ // requirement like `<Self as Trait>::Foo: 'b`. We presently
+ // ignore such requirements as well (cc #54467)-- though
+ // conceivably it might be better if we could extract the `Foo
+ // = X` binding from the object type (there must be such a
+ // binding) and thus infer an outlives requirement that `X:
+ // 'b`.
+ if let Some(self_ty) = ignored_self_ty
+ && let GenericArgKind::Type(ty) = outlives_predicate.0.unpack()
+ && ty.walk().any(|arg| arg == self_ty.into())
+ {
+ debug!("skipping self ty = {:?}", &ty);
+ continue;
+ }
+
+ let predicate = explicit_predicates.rebind(*outlives_predicate).subst(tcx, substs);
+ debug!("predicate = {:?}", &predicate);
+ insert_outlives_predicate(tcx, predicate.0, predicate.1, span, required_predicates);
+ }
+}
diff --git a/compiler/rustc_typeck/src/outlives/mod.rs b/compiler/rustc_typeck/src/outlives/mod.rs
new file mode 100644
index 000000000..8fa65d51e
--- /dev/null
+++ b/compiler/rustc_typeck/src/outlives/mod.rs
@@ -0,0 +1,130 @@
+use hir::Node;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::subst::GenericArgKind;
+use rustc_middle::ty::{self, CratePredicatesMap, ToPredicate, TyCtxt};
+use rustc_span::symbol::sym;
+use rustc_span::Span;
+
+mod explicit;
+mod implicit_infer;
+pub(crate) mod outlives_bounds;
+/// Code to write unit test for outlives.
+pub mod test;
+mod utils;
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { inferred_outlives_of, inferred_outlives_crate, ..*providers };
+}
+
+fn inferred_outlives_of(tcx: TyCtxt<'_>, item_def_id: DefId) -> &[(ty::Predicate<'_>, Span)] {
+ let id = tcx.hir().local_def_id_to_hir_id(item_def_id.expect_local());
+
+ if matches!(tcx.def_kind(item_def_id), hir::def::DefKind::AnonConst) && tcx.lazy_normalization()
+ {
+ if tcx.hir().opt_const_param_default_param_hir_id(id).is_some() {
+ // In `generics_of` we set the generics' parent to be our parent's parent which means that
+ // we lose out on the predicates of our actual parent if we dont return those predicates here.
+ // (See comment in `generics_of` for more information on why the parent shenanigans is necessary)
+ //
+ // struct Foo<'a, 'b, const N: usize = { ... }>(&'a &'b ());
+ // ^^^ ^^^^^^^ the def id we are calling
+ // ^^^ inferred_outlives_of on
+ // parent item we dont have set as the
+ // parent of generics returned by `generics_of`
+ //
+ // In the above code we want the anon const to have predicates in its param env for `'b: 'a`
+ let item_def_id = tcx.hir().get_parent_item(id);
+ // In the above code example we would be calling `inferred_outlives_of(Foo)` here
+ return tcx.inferred_outlives_of(item_def_id);
+ }
+ }
+
+ match tcx.hir().get(id) {
+ Node::Item(item) => match item.kind {
+ hir::ItemKind::Struct(..) | hir::ItemKind::Enum(..) | hir::ItemKind::Union(..) => {
+ let crate_map = tcx.inferred_outlives_crate(());
+
+ let predicates = crate_map.predicates.get(&item_def_id).copied().unwrap_or(&[]);
+
+ if tcx.has_attr(item_def_id, sym::rustc_outlives) {
+ let mut pred: Vec<String> = predicates
+ .iter()
+ .map(|(out_pred, _)| match out_pred.kind().skip_binder() {
+ ty::PredicateKind::RegionOutlives(p) => p.to_string(),
+ ty::PredicateKind::TypeOutlives(p) => p.to_string(),
+ err => bug!("unexpected predicate {:?}", err),
+ })
+ .collect();
+ pred.sort();
+
+ let span = tcx.def_span(item_def_id);
+ let mut err = tcx.sess.struct_span_err(span, "rustc_outlives");
+ for p in &pred {
+ err.note(p);
+ }
+ err.emit();
+ }
+
+ debug!("inferred_outlives_of({:?}) = {:?}", item_def_id, predicates);
+
+ predicates
+ }
+
+ _ => &[],
+ },
+
+ _ => &[],
+ }
+}
+
+fn inferred_outlives_crate(tcx: TyCtxt<'_>, (): ()) -> CratePredicatesMap<'_> {
+ // Compute a map from each struct/enum/union S to the **explicit**
+ // outlives predicates (`T: 'a`, `'a: 'b`) that the user wrote.
+ // Typically there won't be many of these, except in older code where
+ // they were mandatory. Nonetheless, we have to ensure that every such
+ // predicate is satisfied, so they form a kind of base set of requirements
+ // for the type.
+
+ // Compute the inferred predicates
+ let global_inferred_outlives = implicit_infer::infer_predicates(tcx);
+
+ // Convert the inferred predicates into the "collected" form the
+ // global data structure expects.
+ //
+ // FIXME -- consider correcting impedance mismatch in some way,
+ // probably by updating the global data structure.
+ let predicates = global_inferred_outlives
+ .iter()
+ .map(|(&def_id, set)| {
+ let predicates = &*tcx.arena.alloc_from_iter(set.0.iter().filter_map(
+ |(ty::OutlivesPredicate(kind1, region2), &span)| {
+ match kind1.unpack() {
+ GenericArgKind::Type(ty1) => Some((
+ ty::Binder::dummy(ty::PredicateKind::TypeOutlives(
+ ty::OutlivesPredicate(ty1, *region2),
+ ))
+ .to_predicate(tcx),
+ span,
+ )),
+ GenericArgKind::Lifetime(region1) => Some((
+ ty::Binder::dummy(ty::PredicateKind::RegionOutlives(
+ ty::OutlivesPredicate(region1, *region2),
+ ))
+ .to_predicate(tcx),
+ span,
+ )),
+ GenericArgKind::Const(_) => {
+ // Generic consts don't impose any constraints.
+ None
+ }
+ }
+ },
+ ));
+ (def_id, predicates)
+ })
+ .collect();
+
+ ty::CratePredicatesMap { predicates }
+}
diff --git a/compiler/rustc_typeck/src/outlives/outlives_bounds.rs b/compiler/rustc_typeck/src/outlives/outlives_bounds.rs
new file mode 100644
index 000000000..229a64650
--- /dev/null
+++ b/compiler/rustc_typeck/src/outlives/outlives_bounds.rs
@@ -0,0 +1,90 @@
+use rustc_hir as hir;
+use rustc_middle::ty::{self, Ty};
+use rustc_trait_selection::infer::InferCtxt;
+use rustc_trait_selection::traits::query::type_op::{self, TypeOp, TypeOpOutput};
+use rustc_trait_selection::traits::query::NoSolution;
+use rustc_trait_selection::traits::{ObligationCause, TraitEngine, TraitEngineExt};
+
+pub use rustc_middle::traits::query::OutlivesBound;
+
+pub trait InferCtxtExt<'tcx> {
+ fn implied_outlives_bounds(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ty: Ty<'tcx>,
+ ) -> Vec<OutlivesBound<'tcx>>;
+}
+
+impl<'cx, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'cx, 'tcx> {
+ /// Implied bounds are region relationships that we deduce
+ /// automatically. The idea is that (e.g.) a caller must check that a
+ /// function's argument types are well-formed immediately before
+ /// calling that fn, and hence the *callee* can assume that its
+ /// argument types are well-formed. This may imply certain relationships
+ /// between generic parameters. For example:
+ /// ```
+ /// fn foo<'a,T>(x: &'a T) {}
+ /// ```
+ /// can only be called with a `'a` and `T` such that `&'a T` is WF.
+ /// For `&'a T` to be WF, `T: 'a` must hold. So we can assume `T: 'a`.
+ ///
+ /// # Parameters
+ ///
+ /// - `param_env`, the where-clauses in scope
+ /// - `body_id`, the body-id to use when normalizing assoc types.
+ /// Note that this may cause outlives obligations to be injected
+ /// into the inference context with this body-id.
+ /// - `ty`, the type that we are supposed to assume is WF.
+ #[instrument(level = "debug", skip(self, param_env, body_id))]
+ fn implied_outlives_bounds(
+ &self,
+ param_env: ty::ParamEnv<'tcx>,
+ body_id: hir::HirId,
+ ty: Ty<'tcx>,
+ ) -> Vec<OutlivesBound<'tcx>> {
+ let span = self.tcx.hir().span(body_id);
+ let result = param_env
+ .and(type_op::implied_outlives_bounds::ImpliedOutlivesBounds { ty })
+ .fully_perform(self);
+ let result = match result {
+ Ok(r) => r,
+ Err(NoSolution) => {
+ self.tcx.sess.delay_span_bug(
+ span,
+ "implied_outlives_bounds failed to solve all obligations",
+ );
+ return vec![];
+ }
+ };
+
+ let TypeOpOutput { output, constraints, .. } = result;
+
+ if let Some(constraints) = constraints {
+ // Instantiation may have produced new inference variables and constraints on those
+ // variables. Process these constraints.
+ let mut fulfill_cx = <dyn TraitEngine<'tcx>>::new(self.tcx);
+ let cause = ObligationCause::misc(span, body_id);
+ for &constraint in &constraints.outlives {
+ let obligation = self.query_outlives_constraint_to_obligation(
+ constraint,
+ cause.clone(),
+ param_env,
+ );
+ fulfill_cx.register_predicate_obligation(self, obligation);
+ }
+ if !constraints.member_constraints.is_empty() {
+ span_bug!(span, "{:#?}", constraints.member_constraints);
+ }
+ let errors = fulfill_cx.select_all_or_error(self);
+ if !errors.is_empty() {
+ self.tcx.sess.delay_span_bug(
+ span,
+ "implied_outlives_bounds failed to solve obligations from instantiation",
+ );
+ }
+ };
+
+ output
+ }
+}
diff --git a/compiler/rustc_typeck/src/outlives/test.rs b/compiler/rustc_typeck/src/outlives/test.rs
new file mode 100644
index 000000000..eb0e12034
--- /dev/null
+++ b/compiler/rustc_typeck/src/outlives/test.rs
@@ -0,0 +1,21 @@
+use rustc_errors::struct_span_err;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
+
+pub fn test_inferred_outlives(tcx: TyCtxt<'_>) {
+ for id in tcx.hir().items() {
+ // For unit testing: check for a special "rustc_outlives"
+ // attribute and report an error with various results if found.
+ if tcx.has_attr(id.def_id.to_def_id(), sym::rustc_outlives) {
+ let inferred_outlives_of = tcx.inferred_outlives_of(id.def_id);
+ struct_span_err!(
+ tcx.sess,
+ tcx.def_span(id.def_id),
+ E0640,
+ "{:?}",
+ inferred_outlives_of
+ )
+ .emit();
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/outlives/utils.rs b/compiler/rustc_typeck/src/outlives/utils.rs
new file mode 100644
index 000000000..b718ca942
--- /dev/null
+++ b/compiler/rustc_typeck/src/outlives/utils.rs
@@ -0,0 +1,175 @@
+use rustc_infer::infer::outlives::components::{push_outlives_components, Component};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
+use rustc_middle::ty::{self, Region, Ty, TyCtxt};
+use rustc_span::Span;
+use smallvec::smallvec;
+use std::collections::BTreeMap;
+
+/// Tracks the `T: 'a` or `'a: 'a` predicates that we have inferred
+/// must be added to the struct header.
+pub(crate) type RequiredPredicates<'tcx> =
+ BTreeMap<ty::OutlivesPredicate<GenericArg<'tcx>, ty::Region<'tcx>>, Span>;
+
+/// Given a requirement `T: 'a` or `'b: 'a`, deduce the
+/// outlives_component and add it to `required_predicates`
+pub(crate) fn insert_outlives_predicate<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ kind: GenericArg<'tcx>,
+ outlived_region: Region<'tcx>,
+ span: Span,
+ required_predicates: &mut RequiredPredicates<'tcx>,
+) {
+ // If the `'a` region is bound within the field type itself, we
+ // don't want to propagate this constraint to the header.
+ if !is_free_region(outlived_region) {
+ return;
+ }
+
+ match kind.unpack() {
+ GenericArgKind::Type(ty) => {
+ // `T: 'outlived_region` for some type `T`
+ // But T could be a lot of things:
+ // e.g., if `T = &'b u32`, then `'b: 'outlived_region` is
+ // what we want to add.
+ //
+ // Or if within `struct Foo<U>` you had `T = Vec<U>`, then
+ // we would want to add `U: 'outlived_region`
+ let mut components = smallvec![];
+ push_outlives_components(tcx, ty, &mut components);
+ for component in components {
+ match component {
+ Component::Region(r) => {
+ // This would arise from something like:
+ //
+ // ```
+ // struct Foo<'a, 'b> {
+ // x: &'a &'b u32
+ // }
+ // ```
+ //
+ // Here `outlived_region = 'a` and `kind = &'b
+ // u32`. Decomposing `&'b u32` into
+ // components would yield `'b`, and we add the
+ // where clause that `'b: 'a`.
+ insert_outlives_predicate(
+ tcx,
+ r.into(),
+ outlived_region,
+ span,
+ required_predicates,
+ );
+ }
+
+ Component::Param(param_ty) => {
+ // param_ty: ty::ParamTy
+ // This would arise from something like:
+ //
+ // ```
+ // struct Foo<'a, U> {
+ // x: &'a Vec<U>
+ // }
+ // ```
+ //
+ // Here `outlived_region = 'a` and `kind =
+ // Vec<U>`. Decomposing `Vec<U>` into
+ // components would yield `U`, and we add the
+ // where clause that `U: 'a`.
+ let ty: Ty<'tcx> = param_ty.to_ty(tcx);
+ required_predicates
+ .entry(ty::OutlivesPredicate(ty.into(), outlived_region))
+ .or_insert(span);
+ }
+
+ Component::Projection(proj_ty) => {
+ // This would arise from something like:
+ //
+ // ```
+ // struct Foo<'a, T: Iterator> {
+ // x: &'a <T as Iterator>::Item
+ // }
+ // ```
+ //
+ // Here we want to add an explicit `where <T as Iterator>::Item: 'a`.
+ let ty: Ty<'tcx> = tcx.mk_projection(proj_ty.item_def_id, proj_ty.substs);
+ required_predicates
+ .entry(ty::OutlivesPredicate(ty.into(), outlived_region))
+ .or_insert(span);
+ }
+
+ Component::EscapingProjection(_) => {
+ // As above, but the projection involves
+ // late-bound regions. Therefore, the WF
+ // requirement is not checked in type definition
+ // but at fn call site, so ignore it.
+ //
+ // ```
+ // struct Foo<'a, T: Iterator> {
+ // x: for<'b> fn(<&'b T as Iterator>::Item)
+ // // ^^^^^^^^^^^^^^^^^^^^^^^^^
+ // }
+ // ```
+ //
+ // Since `'b` is not in scope on `Foo`, can't
+ // do anything here, ignore it.
+ }
+
+ Component::UnresolvedInferenceVariable(_) => bug!("not using infcx"),
+ }
+ }
+ }
+
+ GenericArgKind::Lifetime(r) => {
+ if !is_free_region(r) {
+ return;
+ }
+ required_predicates.entry(ty::OutlivesPredicate(kind, outlived_region)).or_insert(span);
+ }
+
+ GenericArgKind::Const(_) => {
+ // Generic consts don't impose any constraints.
+ }
+ }
+}
+
+fn is_free_region(region: Region<'_>) -> bool {
+ // First, screen for regions that might appear in a type header.
+ match *region {
+ // These correspond to `T: 'a` relationships:
+ //
+ // struct Foo<'a, T> {
+ // field: &'a T, // this would generate a ReEarlyBound referencing `'a`
+ // }
+ //
+ // We care about these, so fall through.
+ ty::ReEarlyBound(_) => true,
+
+ // These correspond to `T: 'static` relationships which can be
+ // rather surprising.
+ //
+ // struct Foo<'a, T> {
+ // field: &'static T, // this would generate a ReStatic
+ // }
+ ty::ReStatic => false,
+
+ // Late-bound regions can appear in `fn` types:
+ //
+ // struct Foo<T> {
+ // field: for<'b> fn(&'b T) // e.g., 'b here
+ // }
+ //
+ // The type above might generate a `T: 'b` bound, but we can
+ // ignore it. We can't put it on the struct header anyway.
+ ty::ReLateBound(..) => false,
+
+ // This can appear in `where Self: ` bounds (#64855):
+ //
+ // struct Bar<T>(<Self as Foo>::Type) where Self: ;
+ // struct Baz<'a>(&'a Self) where Self: ;
+ ty::ReEmpty(_) => false,
+
+ // These regions don't appear in types from type declarations:
+ ty::ReErased | ty::ReVar(..) | ty::RePlaceholder(..) | ty::ReFree(..) => {
+ bug!("unexpected region in outlives inference: {:?}", region);
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/structured_errors.rs b/compiler/rustc_typeck/src/structured_errors.rs
new file mode 100644
index 000000000..0b46fce17
--- /dev/null
+++ b/compiler/rustc_typeck/src/structured_errors.rs
@@ -0,0 +1,42 @@
+mod missing_cast_for_variadic_arg;
+mod sized_unsized_cast;
+mod wrong_number_of_generic_args;
+
+pub use self::{
+ missing_cast_for_variadic_arg::*, sized_unsized_cast::*, wrong_number_of_generic_args::*,
+};
+
+use rustc_errors::{DiagnosticBuilder, DiagnosticId, ErrorGuaranteed};
+use rustc_session::Session;
+
+pub trait StructuredDiagnostic<'tcx> {
+ fn session(&self) -> &Session;
+
+ fn code(&self) -> DiagnosticId;
+
+ fn diagnostic(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let err = self.diagnostic_common();
+
+ if self.session().teach(&self.code()) {
+ self.diagnostic_extended(err)
+ } else {
+ self.diagnostic_regular(err)
+ }
+ }
+
+ fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed>;
+
+ fn diagnostic_regular(
+ &self,
+ err: DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ err
+ }
+
+ fn diagnostic_extended(
+ &self,
+ err: DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ err
+ }
+}
diff --git a/compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs b/compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs
new file mode 100644
index 000000000..324df313e
--- /dev/null
+++ b/compiler/rustc_typeck/src/structured_errors/missing_cast_for_variadic_arg.rs
@@ -0,0 +1,61 @@
+use crate::structured_errors::StructuredDiagnostic;
+use rustc_errors::{Applicability, DiagnosticBuilder, DiagnosticId, ErrorGuaranteed};
+use rustc_middle::ty::{Ty, TypeVisitable};
+use rustc_session::Session;
+use rustc_span::Span;
+
+pub struct MissingCastForVariadicArg<'tcx, 's> {
+ pub sess: &'tcx Session,
+ pub span: Span,
+ pub ty: Ty<'tcx>,
+ pub cast_ty: &'s str,
+}
+
+impl<'tcx> StructuredDiagnostic<'tcx> for MissingCastForVariadicArg<'tcx, '_> {
+ fn session(&self) -> &Session {
+ self.sess
+ }
+
+ fn code(&self) -> DiagnosticId {
+ rustc_errors::error_code!(E0617)
+ }
+
+ fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = self.sess.struct_span_err_with_code(
+ self.span,
+ &format!("can't pass `{}` to variadic function", self.ty),
+ self.code(),
+ );
+
+ if self.ty.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ if let Ok(snippet) = self.sess.source_map().span_to_snippet(self.span) {
+ err.span_suggestion(
+ self.span,
+ &format!("cast the value to `{}`", self.cast_ty),
+ format!("{} as {}", snippet, self.cast_ty),
+ Applicability::MachineApplicable,
+ );
+ } else {
+ err.help(&format!("cast the value to `{}`", self.cast_ty));
+ }
+
+ err
+ }
+
+ fn diagnostic_extended(
+ &self,
+ mut err: DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ err.note(&format!(
+ "certain types, like `{}`, must be casted before passing them to a \
+ variadic function, because of arcane ABI rules dictated by the C \
+ standard",
+ self.ty
+ ));
+
+ err
+ }
+}
diff --git a/compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs b/compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs
new file mode 100644
index 000000000..bb6088054
--- /dev/null
+++ b/compiler/rustc_typeck/src/structured_errors/sized_unsized_cast.rs
@@ -0,0 +1,62 @@
+use crate::structured_errors::StructuredDiagnostic;
+use rustc_errors::{DiagnosticBuilder, DiagnosticId, ErrorGuaranteed};
+use rustc_middle::ty::{Ty, TypeVisitable};
+use rustc_session::Session;
+use rustc_span::Span;
+
+pub struct SizedUnsizedCast<'tcx> {
+ pub sess: &'tcx Session,
+ pub span: Span,
+ pub expr_ty: Ty<'tcx>,
+ pub cast_ty: String,
+}
+
+impl<'tcx> StructuredDiagnostic<'tcx> for SizedUnsizedCast<'tcx> {
+ fn session(&self) -> &Session {
+ self.sess
+ }
+
+ fn code(&self) -> DiagnosticId {
+ rustc_errors::error_code!(E0607)
+ }
+
+ fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = self.sess.struct_span_err_with_code(
+ self.span,
+ &format!(
+ "cannot cast thin pointer `{}` to fat pointer `{}`",
+ self.expr_ty, self.cast_ty
+ ),
+ self.code(),
+ );
+
+ if self.expr_ty.references_error() {
+ err.downgrade_to_delayed_bug();
+ }
+
+ err
+ }
+
+ fn diagnostic_extended(
+ &self,
+ mut err: DiagnosticBuilder<'tcx, ErrorGuaranteed>,
+ ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ err.help(
+ "Thin pointers are \"simple\" pointers: they are purely a reference to a
+memory address.
+
+Fat pointers are pointers referencing \"Dynamically Sized Types\" (also
+called DST). DST don't have a statically known size, therefore they can
+only exist behind some kind of pointers that contain additional
+information. Slices and trait objects are DSTs. In the case of slices,
+the additional information the fat pointer holds is their size.
+
+To fix this error, don't try to cast directly between thin and fat
+pointers.
+
+For more information about casts, take a look at The Book:
+https://doc.rust-lang.org/reference/expressions/operator-expr.html#type-cast-expressions",
+ );
+ err
+ }
+}
diff --git a/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs b/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs
new file mode 100644
index 000000000..99729391e
--- /dev/null
+++ b/compiler/rustc_typeck/src/structured_errors/wrong_number_of_generic_args.rs
@@ -0,0 +1,890 @@
+use crate::structured_errors::StructuredDiagnostic;
+use rustc_errors::{
+ pluralize, Applicability, Diagnostic, DiagnosticBuilder, DiagnosticId, ErrorGuaranteed,
+ MultiSpan,
+};
+use rustc_hir as hir;
+use rustc_middle::hir::map::fn_sig;
+use rustc_middle::ty::{self as ty, AssocItems, AssocKind, TyCtxt};
+use rustc_session::Session;
+use rustc_span::def_id::DefId;
+use std::iter;
+
+use GenericArgsInfo::*;
+
+/// Handles the `wrong number of type / lifetime / ... arguments` family of error messages.
+pub struct WrongNumberOfGenericArgs<'a, 'tcx> {
+ pub(crate) tcx: TyCtxt<'tcx>,
+
+ pub(crate) angle_brackets: AngleBrackets,
+
+ pub(crate) gen_args_info: GenericArgsInfo,
+
+ /// Offending path segment
+ pub(crate) path_segment: &'a hir::PathSegment<'a>,
+
+ /// Generic parameters as expected by type or trait
+ pub(crate) gen_params: &'a ty::Generics,
+
+ /// Index offset into parameters. Depends on whether `Self` is included and on
+ /// number of lifetime parameters in case we're processing missing or redundant
+ /// type or constant arguments.
+ pub(crate) params_offset: usize,
+
+ /// Generic arguments as provided by user
+ pub(crate) gen_args: &'a hir::GenericArgs<'a>,
+
+ /// DefId of the generic type
+ pub(crate) def_id: DefId,
+}
+
+// Provides information about the kind of arguments that were provided for
+// the PathSegment, for which missing generic arguments were detected
+#[derive(Debug)]
+pub(crate) enum AngleBrackets {
+ // No angle brackets were provided, but generic arguments exist in elided form
+ Implied,
+
+ // No angle brackets were provided
+ Missing,
+
+ // Angle brackets are available, but missing some generic arguments
+ Available,
+}
+
+// Information about the kind of arguments that are either missing or are unexpected
+#[derive(Debug)]
+pub enum GenericArgsInfo {
+ MissingLifetimes {
+ num_missing_args: usize,
+ },
+ ExcessLifetimes {
+ num_redundant_args: usize,
+ },
+ MissingTypesOrConsts {
+ num_missing_args: usize,
+
+ // type or const generic arguments can have default values
+ num_default_params: usize,
+
+ // lifetime arguments precede type and const parameters, this
+ // field gives the number of generic lifetime arguments to let
+ // us infer the position of type and const generic arguments
+ // in the angle brackets
+ args_offset: usize,
+ },
+
+ ExcessTypesOrConsts {
+ num_redundant_args: usize,
+
+ // type or const generic arguments can have default values
+ num_default_params: usize,
+
+ // lifetime arguments precede type and const parameters, this
+ // field gives the number of generic lifetime arguments to let
+ // us infer the position of type and const generic arguments
+ // in the angle brackets
+ args_offset: usize,
+
+ // if synthetic type arguments (e.g. `impl Trait`) are specified
+ synth_provided: bool,
+ },
+}
+
+impl<'a, 'tcx> WrongNumberOfGenericArgs<'a, 'tcx> {
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ gen_args_info: GenericArgsInfo,
+ path_segment: &'a hir::PathSegment<'_>,
+ gen_params: &'a ty::Generics,
+ params_offset: usize,
+ gen_args: &'a hir::GenericArgs<'a>,
+ def_id: DefId,
+ ) -> Self {
+ let angle_brackets = if gen_args.span_ext().is_none() {
+ if gen_args.is_empty() { AngleBrackets::Missing } else { AngleBrackets::Implied }
+ } else {
+ AngleBrackets::Available
+ };
+
+ Self {
+ tcx,
+ angle_brackets,
+ gen_args_info,
+ path_segment,
+ gen_params,
+ params_offset,
+ gen_args,
+ def_id,
+ }
+ }
+
+ fn missing_lifetimes(&self) -> bool {
+ match self.gen_args_info {
+ MissingLifetimes { .. } | ExcessLifetimes { .. } => true,
+ MissingTypesOrConsts { .. } | ExcessTypesOrConsts { .. } => false,
+ }
+ }
+
+ fn kind(&self) -> &str {
+ if self.missing_lifetimes() { "lifetime" } else { "generic" }
+ }
+
+ fn num_provided_args(&self) -> usize {
+ if self.missing_lifetimes() {
+ self.num_provided_lifetime_args()
+ } else {
+ self.num_provided_type_or_const_args()
+ }
+ }
+
+ fn num_provided_lifetime_args(&self) -> usize {
+ match self.angle_brackets {
+ AngleBrackets::Missing => 0,
+ // Only lifetime arguments can be implied
+ AngleBrackets::Implied => self.gen_args.args.len(),
+ AngleBrackets::Available => self.gen_args.num_lifetime_params(),
+ }
+ }
+
+ fn num_provided_type_or_const_args(&self) -> usize {
+ match self.angle_brackets {
+ AngleBrackets::Missing => 0,
+ // Only lifetime arguments can be implied
+ AngleBrackets::Implied => 0,
+ AngleBrackets::Available => self.gen_args.num_generic_params(),
+ }
+ }
+
+ fn num_expected_lifetime_args(&self) -> usize {
+ let num_provided_args = self.num_provided_lifetime_args();
+ match self.gen_args_info {
+ MissingLifetimes { num_missing_args } => num_provided_args + num_missing_args,
+ ExcessLifetimes { num_redundant_args } => num_provided_args - num_redundant_args,
+ _ => 0,
+ }
+ }
+
+ fn num_expected_type_or_const_args(&self) -> usize {
+ let num_provided_args = self.num_provided_type_or_const_args();
+ match self.gen_args_info {
+ MissingTypesOrConsts { num_missing_args, .. } => num_provided_args + num_missing_args,
+ ExcessTypesOrConsts { num_redundant_args, .. } => {
+ num_provided_args - num_redundant_args
+ }
+ _ => 0,
+ }
+ }
+
+ // Gives the number of expected arguments taking into account default arguments
+ fn num_expected_type_or_const_args_including_defaults(&self) -> usize {
+ let provided_args = self.num_provided_type_or_const_args();
+ match self.gen_args_info {
+ MissingTypesOrConsts { num_missing_args, num_default_params, .. } => {
+ provided_args + num_missing_args - num_default_params
+ }
+ ExcessTypesOrConsts { num_redundant_args, num_default_params, .. } => {
+ provided_args - num_redundant_args - num_default_params
+ }
+ _ => 0,
+ }
+ }
+
+ fn num_missing_lifetime_args(&self) -> usize {
+ let missing_args = self.num_expected_lifetime_args() - self.num_provided_lifetime_args();
+ assert!(missing_args > 0);
+ missing_args
+ }
+
+ fn num_missing_type_or_const_args(&self) -> usize {
+ let missing_args = self.num_expected_type_or_const_args_including_defaults()
+ - self.num_provided_type_or_const_args();
+ assert!(missing_args > 0);
+ missing_args
+ }
+
+ fn num_excess_lifetime_args(&self) -> usize {
+ match self.gen_args_info {
+ ExcessLifetimes { num_redundant_args } => num_redundant_args,
+ _ => 0,
+ }
+ }
+
+ fn num_excess_type_or_const_args(&self) -> usize {
+ match self.gen_args_info {
+ ExcessTypesOrConsts { num_redundant_args, .. } => num_redundant_args,
+ _ => 0,
+ }
+ }
+
+ fn too_many_args_provided(&self) -> bool {
+ match self.gen_args_info {
+ MissingLifetimes { .. } | MissingTypesOrConsts { .. } => false,
+ ExcessLifetimes { num_redundant_args }
+ | ExcessTypesOrConsts { num_redundant_args, .. } => {
+ assert!(num_redundant_args > 0);
+ true
+ }
+ }
+ }
+
+ fn not_enough_args_provided(&self) -> bool {
+ match self.gen_args_info {
+ MissingLifetimes { num_missing_args }
+ | MissingTypesOrConsts { num_missing_args, .. } => {
+ assert!(num_missing_args > 0);
+ true
+ }
+ ExcessLifetimes { .. } | ExcessTypesOrConsts { .. } => false,
+ }
+ }
+
+ // Helper method to get the index offset in angle brackets, at which type or const arguments
+ // start appearing
+ fn get_lifetime_args_offset(&self) -> usize {
+ match self.gen_args_info {
+ MissingLifetimes { .. } | ExcessLifetimes { .. } => 0,
+ MissingTypesOrConsts { args_offset, .. } | ExcessTypesOrConsts { args_offset, .. } => {
+ args_offset
+ }
+ }
+ }
+
+ fn get_num_default_params(&self) -> usize {
+ match self.gen_args_info {
+ MissingTypesOrConsts { num_default_params, .. }
+ | ExcessTypesOrConsts { num_default_params, .. } => num_default_params,
+ _ => 0,
+ }
+ }
+
+ fn is_synth_provided(&self) -> bool {
+ match self.gen_args_info {
+ ExcessTypesOrConsts { synth_provided, .. } => synth_provided,
+ _ => false,
+ }
+ }
+
+ // Helper function to choose a quantifier word for the number of expected arguments
+ // and to give a bound for the number of expected arguments
+ fn get_quantifier_and_bound(&self) -> (&'static str, usize) {
+ if self.get_num_default_params() == 0 {
+ match self.gen_args_info {
+ MissingLifetimes { .. } | ExcessLifetimes { .. } => {
+ ("", self.num_expected_lifetime_args())
+ }
+ MissingTypesOrConsts { .. } | ExcessTypesOrConsts { .. } => {
+ ("", self.num_expected_type_or_const_args())
+ }
+ }
+ } else {
+ match self.gen_args_info {
+ MissingLifetimes { .. } => ("at least ", self.num_expected_lifetime_args()),
+ MissingTypesOrConsts { .. } => {
+ ("at least ", self.num_expected_type_or_const_args_including_defaults())
+ }
+ ExcessLifetimes { .. } => ("at most ", self.num_expected_lifetime_args()),
+ ExcessTypesOrConsts { .. } => ("at most ", self.num_expected_type_or_const_args()),
+ }
+ }
+ }
+
+ // Creates lifetime name suggestions from the lifetime parameter names
+ fn get_lifetime_args_suggestions_from_param_names(
+ &self,
+ path_hir_id: Option<hir::HirId>,
+ num_params_to_take: usize,
+ ) -> String {
+ debug!(?path_hir_id);
+
+ if let Some(path_hir_id) = path_hir_id {
+ let mut ret = Vec::new();
+ for (id, node) in self.tcx.hir().parent_iter(path_hir_id) {
+ debug!(?id);
+ let params = if let Some(generics) = node.generics() {
+ generics.params
+ } else if let hir::Node::Ty(ty) = node
+ && let hir::TyKind::BareFn(bare_fn) = ty.kind
+ {
+ bare_fn.generic_params
+ } else {
+ &[]
+ };
+ ret.extend(params.iter().filter_map(|p| {
+ let hir::GenericParamKind::Lifetime { kind: hir::LifetimeParamKind::Explicit }
+ = p.kind
+ else { return None };
+ let hir::ParamName::Plain(name) = p.name else { return None };
+ Some(name.to_string())
+ }));
+ // Suggest `'static` when in const/static item-like.
+ if let hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Static { .. } | hir::ItemKind::Const { .. },
+ ..
+ })
+ | hir::Node::TraitItem(hir::TraitItem {
+ kind: hir::TraitItemKind::Const { .. },
+ ..
+ })
+ | hir::Node::ImplItem(hir::ImplItem {
+ kind: hir::ImplItemKind::Const { .. },
+ ..
+ })
+ | hir::Node::ForeignItem(hir::ForeignItem {
+ kind: hir::ForeignItemKind::Static { .. },
+ ..
+ })
+ | hir::Node::AnonConst(..) = node
+ {
+ ret.extend(
+ std::iter::repeat("'static".to_owned())
+ .take(num_params_to_take.saturating_sub(ret.len())),
+ );
+ }
+ if ret.len() >= num_params_to_take {
+ return ret[..num_params_to_take].join(", ");
+ }
+ // We cannot refer to lifetimes defined in an outer function.
+ if let hir::Node::Item(_) = node {
+ break;
+ }
+ }
+ }
+
+ // We could not gather enough lifetime parameters in the scope.
+ // We use the parameter names from the target type's definition instead.
+ self.gen_params
+ .params
+ .iter()
+ .skip(self.params_offset + self.num_provided_lifetime_args())
+ .take(num_params_to_take)
+ .map(|param| param.name.to_string())
+ .collect::<Vec<_>>()
+ .join(", ")
+ }
+
+ // Creates type or constant name suggestions from the provided parameter names
+ fn get_type_or_const_args_suggestions_from_param_names(
+ &self,
+ num_params_to_take: usize,
+ ) -> String {
+ let fn_sig = self.tcx.hir().get_if_local(self.def_id).and_then(fn_sig);
+ let is_used_in_input = |def_id| {
+ fn_sig.map_or(false, |fn_sig| {
+ fn_sig.decl.inputs.iter().any(|ty| match ty.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(
+ None,
+ hir::Path { res: hir::def::Res::Def(_, id), .. },
+ )) => *id == def_id,
+ _ => false,
+ })
+ })
+ };
+ self.gen_params
+ .params
+ .iter()
+ .skip(self.params_offset + self.num_provided_type_or_const_args())
+ .take(num_params_to_take)
+ .map(|param| match param.kind {
+ // This is being inferred from the item's inputs, no need to set it.
+ ty::GenericParamDefKind::Type { .. } if is_used_in_input(param.def_id) => {
+ "_".to_string()
+ }
+ _ => param.name.to_string(),
+ })
+ .collect::<Vec<_>>()
+ .join(", ")
+ }
+
+ fn get_unbound_associated_types(&self) -> Vec<String> {
+ if self.tcx.is_trait(self.def_id) {
+ let items: &AssocItems<'_> = self.tcx.associated_items(self.def_id);
+ items
+ .in_definition_order()
+ .filter(|item| item.kind == AssocKind::Type)
+ .filter(|item| {
+ !self.gen_args.bindings.iter().any(|binding| binding.ident.name == item.name)
+ })
+ .map(|item| item.name.to_ident_string())
+ .collect()
+ } else {
+ Vec::default()
+ }
+ }
+
+ fn create_error_message(&self) -> String {
+ let def_path = self.tcx.def_path_str(self.def_id);
+ let def_kind = self.tcx.def_kind(self.def_id).descr(self.def_id);
+ let (quantifier, bound) = self.get_quantifier_and_bound();
+ let kind = self.kind();
+ let provided_lt_args = self.num_provided_lifetime_args();
+ let provided_type_or_const_args = self.num_provided_type_or_const_args();
+
+ let (provided_args_str, verb) = match self.gen_args_info {
+ MissingLifetimes { .. } | ExcessLifetimes { .. } => (
+ format!("{} lifetime argument{}", provided_lt_args, pluralize!(provided_lt_args)),
+ pluralize!("was", provided_lt_args),
+ ),
+ MissingTypesOrConsts { .. } | ExcessTypesOrConsts { .. } => (
+ format!(
+ "{} generic argument{}",
+ provided_type_or_const_args,
+ pluralize!(provided_type_or_const_args)
+ ),
+ pluralize!("was", provided_type_or_const_args),
+ ),
+ };
+
+ if self.gen_args.span_ext().is_some() {
+ format!(
+ "this {} takes {}{} {} argument{} but {} {} supplied",
+ def_kind,
+ quantifier,
+ bound,
+ kind,
+ pluralize!(bound),
+ provided_args_str.as_str(),
+ verb
+ )
+ } else {
+ format!("missing generics for {} `{}`", def_kind, def_path)
+ }
+ }
+
+ fn start_diagnostics(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let span = self.path_segment.ident.span;
+ let msg = self.create_error_message();
+
+ self.tcx.sess.struct_span_err_with_code(span, &msg, self.code())
+ }
+
+ /// Builds the `expected 1 type argument / supplied 2 type arguments` message.
+ fn notify(&self, err: &mut Diagnostic) {
+ let (quantifier, bound) = self.get_quantifier_and_bound();
+ let provided_args = self.num_provided_args();
+
+ err.span_label(
+ self.path_segment.ident.span,
+ format!(
+ "expected {}{} {} argument{}",
+ quantifier,
+ bound,
+ self.kind(),
+ pluralize!(bound),
+ ),
+ );
+
+ // When too many arguments were provided, we don't highlight each of them, because it
+ // would overlap with the suggestion to remove them:
+ //
+ // ```
+ // type Foo = Bar<usize, usize>;
+ // ----- ----- supplied 2 type arguments
+ // ^^^^^^^ remove this type argument
+ // ```
+ if self.too_many_args_provided() {
+ return;
+ }
+
+ let args = self
+ .gen_args
+ .args
+ .iter()
+ .skip(self.get_lifetime_args_offset())
+ .take(provided_args)
+ .enumerate();
+
+ for (i, arg) in args {
+ err.span_label(
+ arg.span(),
+ if i + 1 == provided_args {
+ format!(
+ "supplied {} {} argument{}",
+ provided_args,
+ self.kind(),
+ pluralize!(provided_args)
+ )
+ } else {
+ String::new()
+ },
+ );
+ }
+ }
+
+ fn suggest(&self, err: &mut Diagnostic) {
+ debug!(
+ "suggest(self.provided {:?}, self.gen_args.span(): {:?})",
+ self.num_provided_args(),
+ self.gen_args.span(),
+ );
+
+ match self.angle_brackets {
+ AngleBrackets::Missing | AngleBrackets::Implied => self.suggest_adding_args(err),
+ AngleBrackets::Available => {
+ if self.not_enough_args_provided() {
+ self.suggest_adding_args(err);
+ } else if self.too_many_args_provided() {
+ self.suggest_removing_args_or_generics(err);
+ } else {
+ unreachable!();
+ }
+ }
+ }
+ }
+
+ /// Suggests to add missing argument(s) when current invocation site already contains some
+ /// generics:
+ ///
+ /// ```text
+ /// type Map = HashMap<String>;
+ /// ```
+ fn suggest_adding_args(&self, err: &mut Diagnostic) {
+ if self.gen_args.parenthesized {
+ return;
+ }
+
+ match self.gen_args_info {
+ MissingLifetimes { .. } => {
+ self.suggest_adding_lifetime_args(err);
+ }
+ MissingTypesOrConsts { .. } => {
+ self.suggest_adding_type_and_const_args(err);
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn suggest_adding_lifetime_args(&self, err: &mut Diagnostic) {
+ debug!("suggest_adding_lifetime_args(path_segment: {:?})", self.path_segment);
+ let num_missing_args = self.num_missing_lifetime_args();
+ let num_params_to_take = num_missing_args;
+ let msg = format!("add missing {} argument{}", self.kind(), pluralize!(num_missing_args));
+
+ let suggested_args = self.get_lifetime_args_suggestions_from_param_names(
+ self.path_segment.hir_id,
+ num_params_to_take,
+ );
+ debug!("suggested_args: {:?}", &suggested_args);
+
+ match self.angle_brackets {
+ AngleBrackets::Missing => {
+ let span = self.path_segment.ident.span;
+
+ // insert a suggestion of the form "Y<'a, 'b>"
+ let ident = self.path_segment.ident.name.to_ident_string();
+ let sugg = format!("{}<{}>", ident, suggested_args);
+ debug!("sugg: {:?}", sugg);
+
+ err.span_suggestion_verbose(span, &msg, sugg, Applicability::HasPlaceholders);
+ }
+
+ AngleBrackets::Available => {
+ let (sugg_span, is_first) = if self.num_provided_lifetime_args() == 0 {
+ (self.gen_args.span().unwrap().shrink_to_lo(), true)
+ } else {
+ let last_lt = &self.gen_args.args[self.num_provided_lifetime_args() - 1];
+ (last_lt.span().shrink_to_hi(), false)
+ };
+ let has_non_lt_args = self.num_provided_type_or_const_args() != 0;
+ let has_bindings = !self.gen_args.bindings.is_empty();
+
+ let sugg_prefix = if is_first { "" } else { ", " };
+ let sugg_suffix =
+ if is_first && (has_non_lt_args || has_bindings) { ", " } else { "" };
+
+ let sugg = format!("{}{}{}", sugg_prefix, suggested_args, sugg_suffix);
+ debug!("sugg: {:?}", sugg);
+
+ err.span_suggestion_verbose(sugg_span, &msg, sugg, Applicability::HasPlaceholders);
+ }
+ AngleBrackets::Implied => {
+ // We never encounter missing lifetimes in situations in which lifetimes are elided
+ unreachable!();
+ }
+ }
+ }
+
+ fn suggest_adding_type_and_const_args(&self, err: &mut Diagnostic) {
+ let num_missing_args = self.num_missing_type_or_const_args();
+ let msg = format!("add missing {} argument{}", self.kind(), pluralize!(num_missing_args));
+
+ let suggested_args =
+ self.get_type_or_const_args_suggestions_from_param_names(num_missing_args);
+ debug!("suggested_args: {:?}", suggested_args);
+
+ match self.angle_brackets {
+ AngleBrackets::Missing | AngleBrackets::Implied => {
+ let span = self.path_segment.ident.span;
+
+ // insert a suggestion of the form "Y<T, U>"
+ let ident = self.path_segment.ident.name.to_ident_string();
+ let sugg = format!("{}<{}>", ident, suggested_args);
+ debug!("sugg: {:?}", sugg);
+
+ err.span_suggestion_verbose(span, &msg, sugg, Applicability::HasPlaceholders);
+ }
+ AngleBrackets::Available => {
+ let gen_args_span = self.gen_args.span().unwrap();
+ let sugg_offset =
+ self.get_lifetime_args_offset() + self.num_provided_type_or_const_args();
+
+ let (sugg_span, is_first) = if sugg_offset == 0 {
+ (gen_args_span.shrink_to_lo(), true)
+ } else {
+ let arg_span = self.gen_args.args[sugg_offset - 1].span();
+ // If we came here then inferred lifetime's spans can only point
+ // to either the opening bracket or to the space right after.
+ // Both of these spans have an `hi` lower than or equal to the span
+ // of the generics excluding the brackets.
+ // This allows us to check if `arg_span` is the artificial span of
+ // an inferred lifetime, in which case the generic we're suggesting to
+ // add will be the first visible, even if it isn't the actual first generic.
+ (arg_span.shrink_to_hi(), arg_span.hi() <= gen_args_span.lo())
+ };
+
+ let sugg_prefix = if is_first { "" } else { ", " };
+ let sugg_suffix =
+ if is_first && !self.gen_args.bindings.is_empty() { ", " } else { "" };
+
+ let sugg = format!("{}{}{}", sugg_prefix, suggested_args, sugg_suffix);
+ debug!("sugg: {:?}", sugg);
+
+ err.span_suggestion_verbose(sugg_span, &msg, sugg, Applicability::HasPlaceholders);
+ }
+ }
+ }
+
+ /// Suggests to remove redundant argument(s):
+ ///
+ /// ```text
+ /// type Map = HashMap<String, String, String, String>;
+ /// ```
+ fn suggest_removing_args_or_generics(&self, err: &mut Diagnostic) {
+ let num_provided_lt_args = self.num_provided_lifetime_args();
+ let num_provided_type_const_args = self.num_provided_type_or_const_args();
+ let unbound_types = self.get_unbound_associated_types();
+ let num_provided_args = num_provided_lt_args + num_provided_type_const_args;
+ assert!(num_provided_args > 0);
+
+ let num_redundant_lt_args = self.num_excess_lifetime_args();
+ let num_redundant_type_or_const_args = self.num_excess_type_or_const_args();
+ let num_redundant_args = num_redundant_lt_args + num_redundant_type_or_const_args;
+
+ let redundant_lifetime_args = num_redundant_lt_args > 0;
+ let redundant_type_or_const_args = num_redundant_type_or_const_args > 0;
+
+ let remove_entire_generics = num_redundant_args >= self.gen_args.args.len();
+ let provided_args_matches_unbound_traits =
+ unbound_types.len() == num_redundant_type_or_const_args;
+
+ let remove_lifetime_args = |err: &mut Diagnostic| {
+ let mut lt_arg_spans = Vec::new();
+ let mut found_redundant = false;
+ for arg in self.gen_args.args {
+ if let hir::GenericArg::Lifetime(_) = arg {
+ lt_arg_spans.push(arg.span());
+ if lt_arg_spans.len() > self.num_expected_lifetime_args() {
+ found_redundant = true;
+ }
+ } else if found_redundant {
+ // Argument which is redundant and separated like this `'c`
+ // is not included to avoid including `Bar` in span.
+ // ```
+ // type Foo<'a, T> = &'a T;
+ // let _: Foo<'a, 'b, Bar, 'c>;
+ // ```
+ break;
+ }
+ }
+
+ let span_lo_redundant_lt_args = lt_arg_spans[self.num_expected_lifetime_args()];
+ let span_hi_redundant_lt_args = lt_arg_spans[lt_arg_spans.len() - 1];
+
+ let span_redundant_lt_args = span_lo_redundant_lt_args.to(span_hi_redundant_lt_args);
+ debug!("span_redundant_lt_args: {:?}", span_redundant_lt_args);
+
+ let num_redundant_lt_args = lt_arg_spans.len() - self.num_expected_lifetime_args();
+ let msg_lifetimes = format!(
+ "remove {these} lifetime argument{s}",
+ these = pluralize!("this", num_redundant_lt_args),
+ s = pluralize!(num_redundant_lt_args),
+ );
+
+ err.span_suggestion(
+ span_redundant_lt_args,
+ &msg_lifetimes,
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ };
+
+ let remove_type_or_const_args = |err: &mut Diagnostic| {
+ let mut gen_arg_spans = Vec::new();
+ let mut found_redundant = false;
+ for arg in self.gen_args.args {
+ match arg {
+ hir::GenericArg::Type(_)
+ | hir::GenericArg::Const(_)
+ | hir::GenericArg::Infer(_) => {
+ gen_arg_spans.push(arg.span());
+ if gen_arg_spans.len() > self.num_expected_type_or_const_args() {
+ found_redundant = true;
+ }
+ }
+ _ if found_redundant => break,
+ _ => {}
+ }
+ }
+
+ let span_lo_redundant_type_or_const_args =
+ gen_arg_spans[self.num_expected_type_or_const_args()];
+ let span_hi_redundant_type_or_const_args = gen_arg_spans[gen_arg_spans.len() - 1];
+
+ let span_redundant_type_or_const_args =
+ span_lo_redundant_type_or_const_args.to(span_hi_redundant_type_or_const_args);
+ debug!("span_redundant_type_or_const_args: {:?}", span_redundant_type_or_const_args);
+
+ let num_redundant_gen_args =
+ gen_arg_spans.len() - self.num_expected_type_or_const_args();
+ let msg_types_or_consts = format!(
+ "remove {these} generic argument{s}",
+ these = pluralize!("this", num_redundant_gen_args),
+ s = pluralize!(num_redundant_gen_args),
+ );
+
+ err.span_suggestion(
+ span_redundant_type_or_const_args,
+ &msg_types_or_consts,
+ "",
+ Applicability::MaybeIncorrect,
+ );
+ };
+
+ // If there is a single unbound associated type and a single excess generic param
+ // suggest replacing the generic param with the associated type bound
+ if provided_args_matches_unbound_traits && !unbound_types.is_empty() {
+ let mut suggestions = vec![];
+ let unused_generics = &self.gen_args.args[self.num_expected_type_or_const_args()..];
+ for (potential, name) in iter::zip(unused_generics, &unbound_types) {
+ if let Ok(snippet) = self.tcx.sess.source_map().span_to_snippet(potential.span()) {
+ suggestions.push((potential.span(), format!("{} = {}", name, snippet)));
+ }
+ }
+
+ if !suggestions.is_empty() {
+ err.multipart_suggestion(
+ &format!(
+ "replace the generic bound{s} with the associated type{s}",
+ s = pluralize!(unbound_types.len())
+ ),
+ suggestions,
+ Applicability::MaybeIncorrect,
+ );
+ }
+ } else if remove_entire_generics {
+ let span = self
+ .path_segment
+ .args
+ .unwrap()
+ .span_ext()
+ .unwrap()
+ .with_lo(self.path_segment.ident.span.hi());
+
+ let msg = format!(
+ "remove these {}generics",
+ if self.gen_args.parenthesized { "parenthetical " } else { "" },
+ );
+
+ err.span_suggestion(span, &msg, "", Applicability::MaybeIncorrect);
+ } else if redundant_lifetime_args && redundant_type_or_const_args {
+ remove_lifetime_args(err);
+ remove_type_or_const_args(err);
+ } else if redundant_lifetime_args {
+ remove_lifetime_args(err);
+ } else {
+ assert!(redundant_type_or_const_args);
+ remove_type_or_const_args(err);
+ }
+ }
+
+ /// Builds the `type defined here` message.
+ fn show_definition(&self, err: &mut Diagnostic) {
+ let mut spans: MultiSpan = if let Some(def_span) = self.tcx.def_ident_span(self.def_id) {
+ if self.tcx.sess.source_map().is_span_accessible(def_span) {
+ def_span.into()
+ } else {
+ return;
+ }
+ } else {
+ return;
+ };
+
+ let msg = {
+ let def_kind = self.tcx.def_kind(self.def_id).descr(self.def_id);
+ let (quantifier, bound) = self.get_quantifier_and_bound();
+
+ let params = if bound == 0 {
+ String::new()
+ } else {
+ let params = self
+ .gen_params
+ .params
+ .iter()
+ .skip(self.params_offset)
+ .take(bound)
+ .map(|param| {
+ let span = self.tcx.def_span(param.def_id);
+ spans.push_span_label(span, "");
+ param
+ })
+ .map(|param| format!("`{}`", param.name))
+ .collect::<Vec<_>>()
+ .join(", ");
+
+ format!(": {}", params)
+ };
+
+ format!(
+ "{} defined here, with {}{} {} parameter{}{}",
+ def_kind,
+ quantifier,
+ bound,
+ self.kind(),
+ pluralize!(bound),
+ params,
+ )
+ };
+
+ err.span_note(spans, &msg);
+ }
+
+ /// Add note if `impl Trait` is explicitly specified.
+ fn note_synth_provided(&self, err: &mut Diagnostic) {
+ if !self.is_synth_provided() {
+ return;
+ }
+
+ err.note("`impl Trait` cannot be explicitly specified as a generic argument");
+ }
+}
+
+impl<'tcx> StructuredDiagnostic<'tcx> for WrongNumberOfGenericArgs<'_, 'tcx> {
+ fn session(&self) -> &Session {
+ self.tcx.sess
+ }
+
+ fn code(&self) -> DiagnosticId {
+ rustc_errors::error_code!(E0107)
+ }
+
+ fn diagnostic_common(&self) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ let mut err = self.start_diagnostics();
+
+ self.notify(&mut err);
+ self.suggest(&mut err);
+ self.show_definition(&mut err);
+ self.note_synth_provided(&mut err);
+
+ err
+ }
+}
diff --git a/compiler/rustc_typeck/src/variance/constraints.rs b/compiler/rustc_typeck/src/variance/constraints.rs
new file mode 100644
index 000000000..d79450e1a
--- /dev/null
+++ b/compiler/rustc_typeck/src/variance/constraints.rs
@@ -0,0 +1,449 @@
+//! Constraint construction and representation
+//!
+//! The second pass over the AST determines the set of constraints.
+//! We walk the set of items and, for each member, generate new constraints.
+
+use hir::def_id::{DefId, LocalDefId};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
+use rustc_middle::ty::{self, Ty, TyCtxt};
+
+use super::terms::VarianceTerm::*;
+use super::terms::*;
+
+pub struct ConstraintContext<'a, 'tcx> {
+ pub terms_cx: TermsContext<'a, 'tcx>,
+
+ // These are pointers to common `ConstantTerm` instances
+ covariant: VarianceTermPtr<'a>,
+ contravariant: VarianceTermPtr<'a>,
+ invariant: VarianceTermPtr<'a>,
+ bivariant: VarianceTermPtr<'a>,
+
+ pub constraints: Vec<Constraint<'a>>,
+}
+
+/// Declares that the variable `decl_id` appears in a location with
+/// variance `variance`.
+#[derive(Copy, Clone)]
+pub struct Constraint<'a> {
+ pub inferred: InferredIndex,
+ pub variance: &'a VarianceTerm<'a>,
+}
+
+/// To build constraints, we visit one item (type, trait) at a time
+/// and look at its contents. So e.g., if we have
+/// ```ignore (illustrative)
+/// struct Foo<T> {
+/// b: Bar<T>
+/// }
+/// ```
+/// then while we are visiting `Bar<T>`, the `CurrentItem` would have
+/// the `DefId` and the start of `Foo`'s inferreds.
+pub struct CurrentItem {
+ inferred_start: InferredIndex,
+}
+
+pub fn add_constraints_from_crate<'a, 'tcx>(
+ terms_cx: TermsContext<'a, 'tcx>,
+) -> ConstraintContext<'a, 'tcx> {
+ let tcx = terms_cx.tcx;
+ let covariant = terms_cx.arena.alloc(ConstantTerm(ty::Covariant));
+ let contravariant = terms_cx.arena.alloc(ConstantTerm(ty::Contravariant));
+ let invariant = terms_cx.arena.alloc(ConstantTerm(ty::Invariant));
+ let bivariant = terms_cx.arena.alloc(ConstantTerm(ty::Bivariant));
+ let mut constraint_cx = ConstraintContext {
+ terms_cx,
+ covariant,
+ contravariant,
+ invariant,
+ bivariant,
+ constraints: Vec::new(),
+ };
+
+ let crate_items = tcx.hir_crate_items(());
+
+ for def_id in crate_items.definitions() {
+ let def_kind = tcx.def_kind(def_id);
+ match def_kind {
+ DefKind::Struct | DefKind::Union | DefKind::Enum => {
+ constraint_cx.build_constraints_for_item(def_id);
+
+ let adt = tcx.adt_def(def_id);
+ for variant in adt.variants() {
+ if let Some(ctor) = variant.ctor_def_id {
+ constraint_cx.build_constraints_for_item(ctor.expect_local());
+ }
+ }
+ }
+ DefKind::Fn | DefKind::AssocFn => constraint_cx.build_constraints_for_item(def_id),
+ _ => {}
+ }
+ }
+
+ constraint_cx
+}
+
+impl<'a, 'tcx> ConstraintContext<'a, 'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.terms_cx.tcx
+ }
+
+ fn build_constraints_for_item(&mut self, def_id: LocalDefId) {
+ let tcx = self.tcx();
+ debug!("build_constraints_for_item({})", tcx.def_path_str(def_id.to_def_id()));
+
+ // Skip items with no generics - there's nothing to infer in them.
+ if tcx.generics_of(def_id).count() == 0 {
+ return;
+ }
+
+ let inferred_start = self.terms_cx.inferred_starts[&def_id];
+ let current_item = &CurrentItem { inferred_start };
+ match tcx.type_of(def_id).kind() {
+ ty::Adt(def, _) => {
+ // Not entirely obvious: constraints on structs/enums do not
+ // affect the variance of their type parameters. See discussion
+ // in comment at top of module.
+ //
+ // self.add_constraints_from_generics(generics);
+
+ for field in def.all_fields() {
+ self.add_constraints_from_ty(
+ current_item,
+ tcx.type_of(field.did),
+ self.covariant,
+ );
+ }
+ }
+
+ ty::FnDef(..) => {
+ self.add_constraints_from_sig(current_item, tcx.fn_sig(def_id), self.covariant);
+ }
+
+ ty::Error(_) => {}
+ _ => {
+ span_bug!(
+ tcx.def_span(def_id),
+ "`build_constraints_for_item` unsupported for this item"
+ );
+ }
+ }
+ }
+
+ fn add_constraint(&mut self, current: &CurrentItem, index: u32, variance: VarianceTermPtr<'a>) {
+ debug!("add_constraint(index={}, variance={:?})", index, variance);
+ self.constraints.push(Constraint {
+ inferred: InferredIndex(current.inferred_start.0 + index as usize),
+ variance,
+ });
+ }
+
+ fn contravariant(&mut self, variance: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> {
+ self.xform(variance, self.contravariant)
+ }
+
+ fn invariant(&mut self, variance: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> {
+ self.xform(variance, self.invariant)
+ }
+
+ fn constant_term(&self, v: ty::Variance) -> VarianceTermPtr<'a> {
+ match v {
+ ty::Covariant => self.covariant,
+ ty::Invariant => self.invariant,
+ ty::Contravariant => self.contravariant,
+ ty::Bivariant => self.bivariant,
+ }
+ }
+
+ fn xform(&mut self, v1: VarianceTermPtr<'a>, v2: VarianceTermPtr<'a>) -> VarianceTermPtr<'a> {
+ match (*v1, *v2) {
+ (_, ConstantTerm(ty::Covariant)) => {
+ // Applying a "covariant" transform is always a no-op
+ v1
+ }
+
+ (ConstantTerm(c1), ConstantTerm(c2)) => self.constant_term(c1.xform(c2)),
+
+ _ => &*self.terms_cx.arena.alloc(TransformTerm(v1, v2)),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self, current))]
+ fn add_constraints_from_invariant_substs(
+ &mut self,
+ current: &CurrentItem,
+ substs: SubstsRef<'tcx>,
+ variance: VarianceTermPtr<'a>,
+ ) {
+ // Trait are always invariant so we can take advantage of that.
+ let variance_i = self.invariant(variance);
+
+ for k in substs {
+ match k.unpack() {
+ GenericArgKind::Lifetime(lt) => {
+ self.add_constraints_from_region(current, lt, variance_i)
+ }
+ GenericArgKind::Type(ty) => self.add_constraints_from_ty(current, ty, variance_i),
+ GenericArgKind::Const(val) => {
+ self.add_constraints_from_const(current, val, variance_i)
+ }
+ }
+ }
+ }
+
+ /// Adds constraints appropriate for an instance of `ty` appearing
+ /// in a context with the generics defined in `generics` and
+ /// ambient variance `variance`
+ fn add_constraints_from_ty(
+ &mut self,
+ current: &CurrentItem,
+ ty: Ty<'tcx>,
+ variance: VarianceTermPtr<'a>,
+ ) {
+ debug!("add_constraints_from_ty(ty={:?}, variance={:?})", ty, variance);
+
+ match *ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Never
+ | ty::Foreign(..) => {
+ // leaf type -- noop
+ }
+
+ ty::FnDef(..) | ty::Generator(..) | ty::Closure(..) => {
+ bug!("Unexpected closure type in variance computation");
+ }
+
+ ty::Ref(region, ty, mutbl) => {
+ let contra = self.contravariant(variance);
+ self.add_constraints_from_region(current, region, contra);
+ self.add_constraints_from_mt(current, &ty::TypeAndMut { ty, mutbl }, variance);
+ }
+
+ ty::Array(typ, len) => {
+ self.add_constraints_from_const(current, len, variance);
+ self.add_constraints_from_ty(current, typ, variance);
+ }
+
+ ty::Slice(typ) => {
+ self.add_constraints_from_ty(current, typ, variance);
+ }
+
+ ty::RawPtr(ref mt) => {
+ self.add_constraints_from_mt(current, mt, variance);
+ }
+
+ ty::Tuple(subtys) => {
+ for subty in subtys {
+ self.add_constraints_from_ty(current, subty, variance);
+ }
+ }
+
+ ty::Adt(def, substs) => {
+ self.add_constraints_from_substs(current, def.did(), substs, variance);
+ }
+
+ ty::Projection(ref data) => {
+ self.add_constraints_from_invariant_substs(current, data.substs, variance);
+ }
+
+ ty::Opaque(_, substs) => {
+ self.add_constraints_from_invariant_substs(current, substs, variance);
+ }
+
+ ty::Dynamic(data, r) => {
+ // The type `Foo<T+'a>` is contravariant w/r/t `'a`:
+ let contra = self.contravariant(variance);
+ self.add_constraints_from_region(current, r, contra);
+
+ if let Some(poly_trait_ref) = data.principal() {
+ self.add_constraints_from_invariant_substs(
+ current,
+ poly_trait_ref.skip_binder().substs,
+ variance,
+ );
+ }
+
+ for projection in data.projection_bounds() {
+ match projection.skip_binder().term {
+ ty::Term::Ty(ty) => {
+ self.add_constraints_from_ty(current, ty, self.invariant);
+ }
+ ty::Term::Const(c) => {
+ self.add_constraints_from_const(current, c, self.invariant)
+ }
+ }
+ }
+ }
+
+ ty::Param(ref data) => {
+ self.add_constraint(current, data.index, variance);
+ }
+
+ ty::FnPtr(sig) => {
+ self.add_constraints_from_sig(current, sig, variance);
+ }
+
+ ty::Error(_) => {
+ // we encounter this when walking the trait references for object
+ // types, where we use Error as the Self type
+ }
+
+ ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Bound(..) | ty::Infer(..) => {
+ bug!(
+ "unexpected type encountered in \
+ variance inference: {}",
+ ty
+ );
+ }
+ }
+ }
+
+ /// Adds constraints appropriate for a nominal type (enum, struct,
+ /// object, etc) appearing in a context with ambient variance `variance`
+ fn add_constraints_from_substs(
+ &mut self,
+ current: &CurrentItem,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ variance: VarianceTermPtr<'a>,
+ ) {
+ debug!(
+ "add_constraints_from_substs(def_id={:?}, substs={:?}, variance={:?})",
+ def_id, substs, variance
+ );
+
+ // We don't record `inferred_starts` entries for empty generics.
+ if substs.is_empty() {
+ return;
+ }
+
+ let (local, remote) = if let Some(def_id) = def_id.as_local() {
+ (Some(self.terms_cx.inferred_starts[&def_id]), None)
+ } else {
+ (None, Some(self.tcx().variances_of(def_id)))
+ };
+
+ for (i, k) in substs.iter().enumerate() {
+ let variance_decl = if let Some(InferredIndex(start)) = local {
+ // Parameter on an item defined within current crate:
+ // variance not yet inferred, so return a symbolic
+ // variance.
+ self.terms_cx.inferred_terms[start + i]
+ } else {
+ // Parameter on an item defined within another crate:
+ // variance already inferred, just look it up.
+ self.constant_term(remote.as_ref().unwrap()[i])
+ };
+ let variance_i = self.xform(variance, variance_decl);
+ debug!(
+ "add_constraints_from_substs: variance_decl={:?} variance_i={:?}",
+ variance_decl, variance_i
+ );
+ match k.unpack() {
+ GenericArgKind::Lifetime(lt) => {
+ self.add_constraints_from_region(current, lt, variance_i)
+ }
+ GenericArgKind::Type(ty) => self.add_constraints_from_ty(current, ty, variance_i),
+ GenericArgKind::Const(val) => {
+ self.add_constraints_from_const(current, val, variance)
+ }
+ }
+ }
+ }
+
+ /// Adds constraints appropriate for a const expression `val`
+ /// in a context with ambient variance `variance`
+ fn add_constraints_from_const(
+ &mut self,
+ current: &CurrentItem,
+ c: ty::Const<'tcx>,
+ variance: VarianceTermPtr<'a>,
+ ) {
+ debug!("add_constraints_from_const(c={:?}, variance={:?})", c, variance);
+
+ match &c.kind() {
+ ty::ConstKind::Unevaluated(uv) => {
+ self.add_constraints_from_invariant_substs(current, uv.substs, variance);
+ }
+ _ => {}
+ }
+ }
+
+ /// Adds constraints appropriate for a function with signature
+ /// `sig` appearing in a context with ambient variance `variance`
+ fn add_constraints_from_sig(
+ &mut self,
+ current: &CurrentItem,
+ sig: ty::PolyFnSig<'tcx>,
+ variance: VarianceTermPtr<'a>,
+ ) {
+ let contra = self.contravariant(variance);
+ for &input in sig.skip_binder().inputs() {
+ self.add_constraints_from_ty(current, input, contra);
+ }
+ self.add_constraints_from_ty(current, sig.skip_binder().output(), variance);
+ }
+
+ /// Adds constraints appropriate for a region appearing in a
+ /// context with ambient variance `variance`
+ fn add_constraints_from_region(
+ &mut self,
+ current: &CurrentItem,
+ region: ty::Region<'tcx>,
+ variance: VarianceTermPtr<'a>,
+ ) {
+ match *region {
+ ty::ReEarlyBound(ref data) => {
+ self.add_constraint(current, data.index, variance);
+ }
+
+ ty::ReStatic => {}
+
+ ty::ReLateBound(..) => {
+ // Late-bound regions do not get substituted the same
+ // way early-bound regions do, so we skip them here.
+ }
+
+ ty::ReFree(..)
+ | ty::ReVar(..)
+ | ty::RePlaceholder(..)
+ | ty::ReEmpty(_)
+ | ty::ReErased => {
+ // We don't expect to see anything but 'static or bound
+ // regions when visiting member types or method types.
+ bug!(
+ "unexpected region encountered in variance \
+ inference: {:?}",
+ region
+ );
+ }
+ }
+ }
+
+ /// Adds constraints appropriate for a mutability-type pair
+ /// appearing in a context with ambient variance `variance`
+ fn add_constraints_from_mt(
+ &mut self,
+ current: &CurrentItem,
+ mt: &ty::TypeAndMut<'tcx>,
+ variance: VarianceTermPtr<'a>,
+ ) {
+ match mt.mutbl {
+ hir::Mutability::Mut => {
+ let invar = self.invariant(variance);
+ self.add_constraints_from_ty(current, mt.ty, invar);
+ }
+
+ hir::Mutability::Not => {
+ self.add_constraints_from_ty(current, mt.ty, variance);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/variance/mod.rs b/compiler/rustc_typeck/src/variance/mod.rs
new file mode 100644
index 000000000..82103c5a0
--- /dev/null
+++ b/compiler/rustc_typeck/src/variance/mod.rs
@@ -0,0 +1,63 @@
+//! Module for inferring the variance of type and lifetime parameters. See the [rustc dev guide]
+//! chapter for more info.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/variance.html
+
+use rustc_arena::DroplessArena;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::{self, CrateVariancesMap, TyCtxt};
+
+/// Defines the `TermsContext` basically houses an arena where we can
+/// allocate terms.
+mod terms;
+
+/// Code to gather up constraints.
+mod constraints;
+
+/// Code to solve constraints and write out the results.
+mod solve;
+
+/// Code to write unit tests of variance.
+pub mod test;
+
+/// Code for transforming variances.
+mod xform;
+
+pub fn provide(providers: &mut Providers) {
+ *providers = Providers { variances_of, crate_variances, ..*providers };
+}
+
+fn crate_variances(tcx: TyCtxt<'_>, (): ()) -> CrateVariancesMap<'_> {
+ let arena = DroplessArena::default();
+ let terms_cx = terms::determine_parameters_to_be_inferred(tcx, &arena);
+ let constraints_cx = constraints::add_constraints_from_crate(terms_cx);
+ solve::solve_constraints(constraints_cx)
+}
+
+fn variances_of(tcx: TyCtxt<'_>, item_def_id: DefId) -> &[ty::Variance] {
+ // Skip items with no generics - there's nothing to infer in them.
+ if tcx.generics_of(item_def_id).count() == 0 {
+ return &[];
+ }
+
+ match tcx.def_kind(item_def_id) {
+ DefKind::Fn
+ | DefKind::AssocFn
+ | DefKind::Enum
+ | DefKind::Struct
+ | DefKind::Union
+ | DefKind::Variant
+ | DefKind::Ctor(..) => {}
+ _ => {
+ // Variance not relevant.
+ span_bug!(tcx.def_span(item_def_id), "asked to compute variance for wrong kind of item")
+ }
+ }
+
+ // Everything else must be inferred.
+
+ let crate_map = tcx.crate_variances(());
+ crate_map.variances.get(&item_def_id).copied().unwrap_or(&[])
+}
diff --git a/compiler/rustc_typeck/src/variance/solve.rs b/compiler/rustc_typeck/src/variance/solve.rs
new file mode 100644
index 000000000..97aca621a
--- /dev/null
+++ b/compiler/rustc_typeck/src/variance/solve.rs
@@ -0,0 +1,135 @@
+//! Constraint solving
+//!
+//! The final phase iterates over the constraints, refining the variance
+//! for each inferred until a fixed point is reached. This will be the
+//! optimal solution to the constraints. The final variance for each
+//! inferred is then written into the `variance_map` in the tcx.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_middle::ty;
+
+use super::constraints::*;
+use super::terms::VarianceTerm::*;
+use super::terms::*;
+use super::xform::*;
+
+struct SolveContext<'a, 'tcx> {
+ terms_cx: TermsContext<'a, 'tcx>,
+ constraints: Vec<Constraint<'a>>,
+
+ // Maps from an InferredIndex to the inferred value for that variable.
+ solutions: Vec<ty::Variance>,
+}
+
+pub fn solve_constraints<'tcx>(
+ constraints_cx: ConstraintContext<'_, 'tcx>,
+) -> ty::CrateVariancesMap<'tcx> {
+ let ConstraintContext { terms_cx, constraints, .. } = constraints_cx;
+
+ let mut solutions = vec![ty::Bivariant; terms_cx.inferred_terms.len()];
+ for &(id, ref variances) in &terms_cx.lang_items {
+ let InferredIndex(start) = terms_cx.inferred_starts[&id];
+ for (i, &variance) in variances.iter().enumerate() {
+ solutions[start + i] = variance;
+ }
+ }
+
+ let mut solutions_cx = SolveContext { terms_cx, constraints, solutions };
+ solutions_cx.solve();
+ let variances = solutions_cx.create_map();
+
+ ty::CrateVariancesMap { variances }
+}
+
+impl<'a, 'tcx> SolveContext<'a, 'tcx> {
+ fn solve(&mut self) {
+ // Propagate constraints until a fixed point is reached. Note
+ // that the maximum number of iterations is 2C where C is the
+ // number of constraints (each variable can change values at most
+ // twice). Since number of constraints is linear in size of the
+ // input, so is the inference process.
+ let mut changed = true;
+ while changed {
+ changed = false;
+
+ for constraint in &self.constraints {
+ let Constraint { inferred, variance: term } = *constraint;
+ let InferredIndex(inferred) = inferred;
+ let variance = self.evaluate(term);
+ let old_value = self.solutions[inferred];
+ let new_value = glb(variance, old_value);
+ if old_value != new_value {
+ debug!(
+ "updating inferred {} \
+ from {:?} to {:?} due to {:?}",
+ inferred, old_value, new_value, term
+ );
+
+ self.solutions[inferred] = new_value;
+ changed = true;
+ }
+ }
+ }
+ }
+
+ fn enforce_const_invariance(&self, generics: &ty::Generics, variances: &mut [ty::Variance]) {
+ let tcx = self.terms_cx.tcx;
+
+ // Make all const parameters invariant.
+ for param in generics.params.iter() {
+ if let ty::GenericParamDefKind::Const { .. } = param.kind {
+ variances[param.index as usize] = ty::Invariant;
+ }
+ }
+
+ // Make all the const parameters in the parent invariant (recursively).
+ if let Some(def_id) = generics.parent {
+ self.enforce_const_invariance(tcx.generics_of(def_id), variances);
+ }
+ }
+
+ fn create_map(&self) -> FxHashMap<DefId, &'tcx [ty::Variance]> {
+ let tcx = self.terms_cx.tcx;
+
+ let solutions = &self.solutions;
+ self.terms_cx
+ .inferred_starts
+ .iter()
+ .map(|(&def_id, &InferredIndex(start))| {
+ let generics = tcx.generics_of(def_id);
+ let count = generics.count();
+
+ let variances = tcx.arena.alloc_slice(&solutions[start..(start + count)]);
+
+ // Const parameters are always invariant.
+ self.enforce_const_invariance(generics, variances);
+
+ // Functions are permitted to have unused generic parameters: make those invariant.
+ if let ty::FnDef(..) = tcx.type_of(def_id).kind() {
+ for variance in variances.iter_mut() {
+ if *variance == ty::Bivariant {
+ *variance = ty::Invariant;
+ }
+ }
+ }
+
+ (def_id.to_def_id(), &*variances)
+ })
+ .collect()
+ }
+
+ fn evaluate(&self, term: VarianceTermPtr<'a>) -> ty::Variance {
+ match *term {
+ ConstantTerm(v) => v,
+
+ TransformTerm(t1, t2) => {
+ let v1 = self.evaluate(t1);
+ let v2 = self.evaluate(t2);
+ v1.xform(v2)
+ }
+
+ InferredTerm(InferredIndex(index)) => self.solutions[index],
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/variance/terms.rs b/compiler/rustc_typeck/src/variance/terms.rs
new file mode 100644
index 000000000..1f763011e
--- /dev/null
+++ b/compiler/rustc_typeck/src/variance/terms.rs
@@ -0,0 +1,145 @@
+// Representing terms
+//
+// Terms are structured as a straightforward tree. Rather than rely on
+// GC, we allocate terms out of a bounded arena (the lifetime of this
+// arena is the lifetime 'a that is threaded around).
+//
+// We assign a unique index to each type/region parameter whose variance
+// is to be inferred. We refer to such variables as "inferreds". An
+// `InferredIndex` is a newtype'd int representing the index of such
+// a variable.
+
+use rustc_arena::DroplessArena;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{LocalDefId, LocalDefIdMap};
+use rustc_middle::ty::{self, TyCtxt};
+use std::fmt;
+
+use self::VarianceTerm::*;
+
+pub type VarianceTermPtr<'a> = &'a VarianceTerm<'a>;
+
+#[derive(Copy, Clone, Debug)]
+pub struct InferredIndex(pub usize);
+
+#[derive(Copy, Clone)]
+pub enum VarianceTerm<'a> {
+ ConstantTerm(ty::Variance),
+ TransformTerm(VarianceTermPtr<'a>, VarianceTermPtr<'a>),
+ InferredTerm(InferredIndex),
+}
+
+impl<'a> fmt::Debug for VarianceTerm<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ ConstantTerm(c1) => write!(f, "{:?}", c1),
+ TransformTerm(v1, v2) => write!(f, "({:?} \u{00D7} {:?})", v1, v2),
+ InferredTerm(id) => write!(f, "[{}]", {
+ let InferredIndex(i) = id;
+ i
+ }),
+ }
+ }
+}
+
+// The first pass over the crate simply builds up the set of inferreds.
+
+pub struct TermsContext<'a, 'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub arena: &'a DroplessArena,
+
+ // For marker types, UnsafeCell, and other lang items where
+ // variance is hardcoded, records the item-id and the hardcoded
+ // variance.
+ pub lang_items: Vec<(LocalDefId, Vec<ty::Variance>)>,
+
+ // Maps from the node id of an item to the first inferred index
+ // used for its type & region parameters.
+ pub inferred_starts: LocalDefIdMap<InferredIndex>,
+
+ // Maps from an InferredIndex to the term for that variable.
+ pub inferred_terms: Vec<VarianceTermPtr<'a>>,
+}
+
+pub fn determine_parameters_to_be_inferred<'a, 'tcx>(
+ tcx: TyCtxt<'tcx>,
+ arena: &'a DroplessArena,
+) -> TermsContext<'a, 'tcx> {
+ let mut terms_cx = TermsContext {
+ tcx,
+ arena,
+ inferred_starts: Default::default(),
+ inferred_terms: vec![],
+
+ lang_items: lang_items(tcx),
+ };
+
+ // See the following for a discussion on dep-graph management.
+ //
+ // - https://rustc-dev-guide.rust-lang.org/query.html
+ // - https://rustc-dev-guide.rust-lang.org/variance.html
+ let crate_items = tcx.hir_crate_items(());
+
+ for def_id in crate_items.definitions() {
+ debug!("add_inferreds for item {:?}", def_id);
+
+ let def_kind = tcx.def_kind(def_id);
+
+ match def_kind {
+ DefKind::Struct | DefKind::Union | DefKind::Enum => {
+ terms_cx.add_inferreds_for_item(def_id);
+
+ let adt = tcx.adt_def(def_id);
+ for variant in adt.variants() {
+ if let Some(ctor) = variant.ctor_def_id {
+ terms_cx.add_inferreds_for_item(ctor.expect_local());
+ }
+ }
+ }
+ DefKind::Fn | DefKind::AssocFn => terms_cx.add_inferreds_for_item(def_id),
+ _ => {}
+ }
+ }
+
+ terms_cx
+}
+
+fn lang_items(tcx: TyCtxt<'_>) -> Vec<(LocalDefId, Vec<ty::Variance>)> {
+ let lang_items = tcx.lang_items();
+ let all = [
+ (lang_items.phantom_data(), vec![ty::Covariant]),
+ (lang_items.unsafe_cell_type(), vec![ty::Invariant]),
+ ];
+
+ all.into_iter() // iterating over (Option<DefId>, Variance)
+ .filter_map(|(d, v)| {
+ let def_id = d?.as_local()?; // LocalDefId
+ Some((def_id, v))
+ })
+ .collect()
+}
+
+impl<'a, 'tcx> TermsContext<'a, 'tcx> {
+ fn add_inferreds_for_item(&mut self, def_id: LocalDefId) {
+ let tcx = self.tcx;
+ let count = tcx.generics_of(def_id).count();
+
+ if count == 0 {
+ return;
+ }
+
+ // Record the start of this item's inferreds.
+ let start = self.inferred_terms.len();
+ let newly_added = self.inferred_starts.insert(def_id, InferredIndex(start)).is_none();
+ assert!(newly_added);
+
+ // N.B., in the code below for writing the results back into the
+ // `CrateVariancesMap`, we rely on the fact that all inferreds
+ // for a particular item are assigned continuous indices.
+
+ let arena = self.arena;
+ self.inferred_terms.extend(
+ (start..(start + count)).map(|i| &*arena.alloc(InferredTerm(InferredIndex(i)))),
+ );
+ }
+}
diff --git a/compiler/rustc_typeck/src/variance/test.rs b/compiler/rustc_typeck/src/variance/test.rs
new file mode 100644
index 000000000..2ba87db88
--- /dev/null
+++ b/compiler/rustc_typeck/src/variance/test.rs
@@ -0,0 +1,14 @@
+use rustc_errors::struct_span_err;
+use rustc_middle::ty::TyCtxt;
+use rustc_span::symbol::sym;
+
+pub fn test_variance(tcx: TyCtxt<'_>) {
+ // For unit testing: check for a special "rustc_variance"
+ // attribute and report an error with various results if found.
+ for id in tcx.hir().items() {
+ if tcx.has_attr(id.def_id.to_def_id(), sym::rustc_variance) {
+ let variances_of = tcx.variances_of(id.def_id);
+ struct_span_err!(tcx.sess, tcx.def_span(id.def_id), E0208, "{:?}", variances_of).emit();
+ }
+ }
+}
diff --git a/compiler/rustc_typeck/src/variance/xform.rs b/compiler/rustc_typeck/src/variance/xform.rs
new file mode 100644
index 000000000..027f0859f
--- /dev/null
+++ b/compiler/rustc_typeck/src/variance/xform.rs
@@ -0,0 +1,22 @@
+use rustc_middle::ty;
+
+pub fn glb(v1: ty::Variance, v2: ty::Variance) -> ty::Variance {
+ // Greatest lower bound of the variance lattice as
+ // defined in The Paper:
+ //
+ // *
+ // - +
+ // o
+ match (v1, v2) {
+ (ty::Invariant, _) | (_, ty::Invariant) => ty::Invariant,
+
+ (ty::Covariant, ty::Contravariant) => ty::Invariant,
+ (ty::Contravariant, ty::Covariant) => ty::Invariant,
+
+ (ty::Covariant, ty::Covariant) => ty::Covariant,
+
+ (ty::Contravariant, ty::Contravariant) => ty::Contravariant,
+
+ (x, ty::Bivariant) | (ty::Bivariant, x) => x,
+ }
+}